aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/acpi/Kconfig28
-rw-r--r--drivers/acpi/ac.c30
-rw-r--r--drivers/acpi/acpi_cmos_rtc.c2
-rw-r--r--drivers/acpi/acpi_extlog.c62
-rw-r--r--drivers/acpi/acpi_lpss.c82
-rw-r--r--drivers/acpi/acpi_memhotplug.c27
-rw-r--r--drivers/acpi/acpi_pad.c27
-rw-r--r--drivers/acpi/acpi_processor.c26
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h5
-rw-r--r--drivers/acpi/acpica/acdispat.h15
-rw-r--r--drivers/acpi/acpica/acevents.h11
-rw-r--r--drivers/acpi/acpica/acglobal.h388
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h10
-rw-r--r--drivers/acpi/acpica/aclocal.h11
-rw-r--r--drivers/acpi/acpica/acmacros.h16
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h5
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h16
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h5
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h4
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c4
-rw-r--r--drivers/acpi/acpica/dsinit.c61
-rw-r--r--drivers/acpi/acpica/dsmethod.c158
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c21
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c22
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c10
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c26
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c13
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c21
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c231
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c5
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c14
-rw-r--r--drivers/acpi/acpica/exutils.c82
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c7
-rw-r--r--drivers/acpi/acpica/nsload.c6
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c12
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c42
-rw-r--r--drivers/acpi/acpica/nsrepair.c31
-rw-r--r--drivers/acpi/acpica/nsrepair2.c39
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c58
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c6
-rw-r--r--drivers/acpi/acpica/psobject.c9
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c53
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c4
-rw-r--r--drivers/acpi/acpica/rscreate.c12
-rw-r--r--drivers/acpi/acpica/rsdump.c5
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c4
-rw-r--r--drivers/acpi/acpica/rsinfo.c6
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c2
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsserial.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c337
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c17
-rw-r--r--drivers/acpi/acpica/tbprint.c22
-rw-r--r--drivers/acpi/acpica/tbutils.c216
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c21
-rw-r--r--drivers/acpi/acpica/utalloc.c12
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c14
-rw-r--r--drivers/acpi/acpica/utcopy.c8
-rw-r--r--drivers/acpi/acpica/utdebug.c6
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c17
-rw-r--r--drivers/acpi/acpica/uterror.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utexcep.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c43
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c28
-rw-r--r--drivers/acpi/acpica/utownerid.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c5
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utstring.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c5
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c14
-rw-r--r--drivers/acpi/acpica/utxfmutex.c2
-rw-r--r--drivers/acpi/apei/Kconfig2
-rw-r--r--drivers/acpi/apei/apei-base.c1
-rw-r--r--drivers/acpi/apei/apei-internal.h1
-rw-r--r--drivers/acpi/apei/einj.c1
-rw-r--r--drivers/acpi/apei/ghes.c1
-rw-r--r--drivers/acpi/battery.c14
-rw-r--r--drivers/acpi/battery.h10
-rw-r--r--drivers/acpi/blacklist.c9
-rw-r--r--drivers/acpi/bus.c145
-rw-r--r--drivers/acpi/button.c27
-rw-r--r--drivers/acpi/container.c61
-rw-r--r--drivers/acpi/custom_method.c2
-rw-r--r--drivers/acpi/debugfs.c2
-rw-r--r--drivers/acpi/device_pm.c87
-rw-r--r--drivers/acpi/dock.c471
-rw-r--r--drivers/acpi/ec.c96
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/event.c2
-rw-r--r--drivers/acpi/fan.c15
-rw-r--r--drivers/acpi/glue.c177
-rw-r--r--drivers/acpi/hed.c2
-rw-r--r--drivers/acpi/internal.h32
-rw-r--r--drivers/acpi/numa.c17
-rw-r--r--drivers/acpi/nvs.c3
-rw-r--r--drivers/acpi/osl.c57
-rw-r--r--drivers/acpi/pci_irq.c39
-rw-r--r--drivers/acpi/pci_link.c6
-rw-r--r--drivers/acpi/pci_root.c128
-rw-r--r--drivers/acpi/pci_slot.c1
-rw-r--r--drivers/acpi/power.c5
-rw-r--r--drivers/acpi/proc.c7
-rw-r--r--drivers/acpi/processor_core.c81
-rw-r--r--drivers/acpi/processor_driver.c10
-rw-r--r--drivers/acpi/processor_idle.c37
-rw-r--r--drivers/acpi/processor_perflib.c21
-rw-r--r--drivers/acpi/processor_thermal.c11
-rw-r--r--drivers/acpi/processor_throttling.c76
-rw-r--r--drivers/acpi/resource.c10
-rw-r--r--drivers/acpi/sbs.c6
-rw-r--r--drivers/acpi/sbshc.c3
-rw-r--r--drivers/acpi/scan.c794
-rw-r--r--drivers/acpi/sleep.c46
-rw-r--r--drivers/acpi/sysfs.c6
-rw-r--r--drivers/acpi/tables.c139
-rw-r--r--drivers/acpi/thermal.c46
-rw-r--r--drivers/acpi/utils.c119
-rw-r--r--drivers/acpi/video.c154
-rw-r--r--drivers/acpi/video_detect.c12
-rw-r--r--drivers/acpi/wakeup.c1
-rw-r--r--drivers/amba/bus.c213
-rw-r--r--drivers/ata/Kconfig57
-rw-r--r--drivers/ata/Makefile8
-rw-r--r--drivers/ata/acard-ahci.c1
-rw-r--r--drivers/ata/ahci.c39
-rw-r--r--drivers/ata/ahci.h14
-rw-r--r--drivers/ata/ahci_da850.c114
-rw-r--r--drivers/ata/ahci_imx.c333
-rw-r--r--drivers/ata/ahci_platform.c291
-rw-r--r--drivers/ata/ahci_st.c245
-rw-r--r--drivers/ata/ahci_sunxi.c249
-rw-r--r--drivers/ata/ahci_xgene.c486
-rw-r--r--drivers/ata/ata_generic.c1
-rw-r--r--drivers/ata/libahci.c38
-rw-r--r--drivers/ata/libahci_platform.c541
-rw-r--r--drivers/ata/libata-acpi.c99
-rw-r--r--drivers/ata/libata-core.c142
-rw-r--r--drivers/ata/libata-eh.c18
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/libata-zpodd.c21
-rw-r--r--drivers/ata/pata_acpi.c6
-rw-r--r--drivers/ata/pata_amd.c1
-rw-r--r--drivers/ata/pata_arasan_cf.c2
-rw-r--r--drivers/ata/pata_artop.c1
-rw-r--r--drivers/ata/pata_at91.c1
-rw-r--r--drivers/ata/pata_atiixp.c1
-rw-r--r--drivers/ata/pata_atp867x.c1
-rw-r--r--drivers/ata/pata_cmd640.c1
-rw-r--r--drivers/ata/pata_cmd64x.c1
-rw-r--r--drivers/ata/pata_cs5520.c1
-rw-r--r--drivers/ata/pata_cs5530.c1
-rw-r--r--drivers/ata/pata_cs5535.c1
-rw-r--r--drivers/ata/pata_cs5536.c1
-rw-r--r--drivers/ata/pata_cypress.c1
-rw-r--r--drivers/ata/pata_efar.c1
-rw-r--r--drivers/ata/pata_ep93xx.c1
-rw-r--r--drivers/ata/pata_hpt366.c1
-rw-r--r--drivers/ata/pata_hpt37x.c1
-rw-r--r--drivers/ata/pata_hpt3x2n.c1
-rw-r--r--drivers/ata/pata_hpt3x3.c1
-rw-r--r--drivers/ata/pata_imx.c26
-rw-r--r--drivers/ata/pata_it8213.c1
-rw-r--r--drivers/ata/pata_it821x.c1
-rw-r--r--drivers/ata/pata_jmicron.c1
-rw-r--r--drivers/ata/pata_legacy.c1
-rw-r--r--drivers/ata/pata_marvell.c1
-rw-r--r--drivers/ata/pata_mpiix.c1
-rw-r--r--drivers/ata/pata_netcell.c1
-rw-r--r--drivers/ata/pata_ninja32.c1
-rw-r--r--drivers/ata/pata_ns87410.c1
-rw-r--r--drivers/ata/pata_ns87415.c1
-rw-r--r--drivers/ata/pata_oldpiix.c1
-rw-r--r--drivers/ata/pata_opti.c1
-rw-r--r--drivers/ata/pata_optidma.c1
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/ata/pata_pdc2027x.c1
-rw-r--r--drivers/ata/pata_pdc202xx_old.c1
-rw-r--r--drivers/ata/pata_piccolo.c1
-rw-r--r--drivers/ata/pata_platform.c1
-rw-r--r--drivers/ata/pata_pxa.c1
-rw-r--r--drivers/ata/pata_radisys.c1
-rw-r--r--drivers/ata/pata_rdc.c1
-rw-r--r--drivers/ata/pata_rz1000.c1
-rw-r--r--drivers/ata/pata_sc1200.c1
-rw-r--r--drivers/ata/pata_scc.c1
-rw-r--r--drivers/ata/pata_sch.c1
-rw-r--r--drivers/ata/pata_serverworks.c1
-rw-r--r--drivers/ata/pata_sil680.c1
-rw-r--r--drivers/ata/pata_sis.c1
-rw-r--r--drivers/ata/pata_sl82c105.c1
-rw-r--r--drivers/ata/pata_triflex.c1
-rw-r--r--drivers/ata/pata_via.c1
-rw-r--r--drivers/ata/pdc_adma.c1
-rw-r--r--drivers/ata/sata_dwc_460ex.c4
-rw-r--r--drivers/ata/sata_highbank.c6
-rw-r--r--drivers/ata/sata_mv.c16
-rw-r--r--drivers/ata/sata_nv.c1
-rw-r--r--drivers/ata/sata_promise.c1
-rw-r--r--drivers/ata/sata_qstor.c1
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/ata/sata_sis.c1
-rw-r--r--drivers/ata/sata_svw.c1
-rw-r--r--drivers/ata/sata_sx4.c10
-rw-r--r--drivers/ata/sata_uli.c1
-rw-r--r--drivers/ata/sata_via.c1
-rw-r--r--drivers/ata/sata_vsc.c1
-rw-r--r--drivers/atm/he.c1
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/base/Kconfig5
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/attribute_container.c1
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/component.c8
-rw-r--r--drivers/base/container.c44
-rw-r--r--drivers/base/core.c20
-rw-r--r--drivers/base/cpu.c46
-rw-r--r--drivers/base/devres.c26
-rw-r--r--drivers/base/dma-buf.c43
-rw-r--r--drivers/base/firmware_class.c14
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/base/platform.c27
-rw-r--r--drivers/base/power/Makefile3
-rw-r--r--drivers/base/power/clock_ops.c31
-rw-r--r--drivers/base/power/common.c1
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/domain_governor.c1
-rw-r--r--drivers/base/power/generic_ops.c6
-rw-r--r--drivers/base/power/main.c280
-rw-r--r--drivers/base/power/opp.c1
-rw-r--r--drivers/base/power/power.h4
-rw-r--r--drivers/base/power/qos.c220
-rw-r--r--drivers/base/power/runtime.c164
-rw-r--r--drivers/base/power/sysfs.c97
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regcache.c13
-rw-r--r--drivers/base/regmap/regmap-debugfs.c2
-rw-r--r--drivers/base/regmap/regmap-i2c.c1
-rw-r--r--drivers/base/regmap/regmap-irq.c12
-rw-r--r--drivers/base/regmap/regmap-mmio.c57
-rw-r--r--drivers/base/regmap/regmap-spi.c1
-rw-r--r--drivers/base/regmap/regmap-spmi.c228
-rw-r--r--drivers/base/regmap/regmap.c415
-rw-r--r--drivers/base/topology.c1
-rw-r--r--drivers/bcma/Kconfig1
-rw-r--r--drivers/bcma/bcma_private.h2
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c6
-rw-r--r--drivers/bcma/driver_gpio.c137
-rw-r--r--drivers/bcma/host_pci.c3
-rw-r--r--drivers/bcma/main.c14
-rw-r--r--drivers/block/DAC960.c34
-rw-r--r--drivers/block/Kconfig5
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/aoe/aoe.h10
-rw-r--r--drivers/block/aoe/aoecmd.c153
-rw-r--r--drivers/block/ataflop.c16
-rw-r--r--drivers/block/brd.c16
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/drbd/drbd_actlog.c631
-rw-r--r--drivers/block/drbd/drbd_bitmap.c370
-rw-r--r--drivers/block/drbd/drbd_int.h1130
-rw-r--r--drivers/block/drbd/drbd_main.c2032
-rw-r--r--drivers/block/drbd/drbd_nl.c1653
-rw-r--r--drivers/block/drbd/drbd_proc.c140
-rw-r--r--drivers/block/drbd/drbd_protocol.h295
-rw-r--r--drivers/block/drbd/drbd_receiver.c2549
-rw-r--r--drivers/block/drbd/drbd_req.c468
-rw-r--r--drivers/block/drbd/drbd_req.h22
-rw-r--r--drivers/block/drbd/drbd_state.c859
-rw-r--r--drivers/block/drbd/drbd_state.h40
-rw-r--r--drivers/block/drbd/drbd_strings.c1
-rw-r--r--drivers/block/drbd/drbd_strings.h9
-rw-r--r--drivers/block/drbd/drbd_worker.c952
-rw-r--r--drivers/block/drbd/drbd_wrappers.h14
-rw-r--r--drivers/block/floppy.c94
-rw-r--r--drivers/block/loop.c29
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c364
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h16
-rw-r--r--drivers/block/nbd.c14
-rw-r--r--drivers/block/null_blk.c100
-rw-r--r--drivers/block/nvme-core.c791
-rw-r--r--drivers/block/nvme-scsi.c147
-rw-r--r--drivers/block/paride/pg.c2
-rw-r--r--drivers/block/pktcdvd.c186
-rw-r--r--drivers/block/ps3disk.c17
-rw-r--r--drivers/block/ps3vram.c12
-rw-r--r--drivers/block/rbd.c395
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/rsxx/dma.c15
-rw-r--r--drivers/block/skd_main.c67
-rw-r--r--drivers/block/swim3.c18
-rw-r--r--drivers/block/sx8.c16
-rw-r--r--drivers/block/umem.c53
-rw-r--r--drivers/block/virtio_blk.c30
-rw-r--r--drivers/block/xen-blkback/blkback.c68
-rw-r--r--drivers/block/xen-blkback/common.h5
-rw-r--r--drivers/block/xen-blkback/xenbus.c14
-rw-r--r--drivers/block/xen-blkfront.c17
-rw-r--r--drivers/block/zram/Kconfig (renamed from drivers/staging/zram/Kconfig)1
-rw-r--r--drivers/block/zram/Makefile (renamed from drivers/staging/zram/Makefile)0
-rw-r--r--drivers/block/zram/zram_drv.c (renamed from drivers/staging/zram/zram_drv.c)163
-rw-r--r--drivers/block/zram/zram_drv.h (renamed from drivers/staging/zram/zram_drv.h)32
-rw-r--r--drivers/bluetooth/Kconfig3
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/btmrvl_drv.h25
-rw-r--r--drivers/bluetooth/btmrvl_main.c130
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c9
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h2
-rw-r--r--drivers/bluetooth/btsdio.c6
-rw-r--r--drivers/bluetooth/btusb.c53
-rw-r--r--drivers/bluetooth/hci_vhci.c29
-rw-r--r--drivers/bus/arm-cci.c2
-rw-r--r--drivers/bus/mvebu-mbus.c14
-rw-r--r--drivers/cdrom/Makefile1
-rw-r--r--drivers/cdrom/gdrom.c4
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/Makefile3
-rw-r--r--drivers/char/agp/Kconfig5
-rw-r--r--drivers/char/agp/Makefile2
-rw-r--r--drivers/char/agp/frontend.c1
-rw-r--r--drivers/char/agp/generic.c1
-rw-r--r--drivers/char/agp/intel-agp.c5
-rw-r--r--drivers/char/agp/intel-gtt.c19
-rw-r--r--drivers/char/agp/sgi-agp.c1
-rw-r--r--drivers/char/apm-emulation.c11
-rw-r--r--drivers/char/hpet.c7
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c1
-rw-r--r--drivers/char/hw_random/core.c1
-rw-r--r--drivers/char/hw_random/exynos-rng.c1
-rw-r--r--drivers/char/hw_random/n2-drv.c1
-rw-r--r--drivers/char/hw_random/nomadik-rng.c1
-rw-r--r--drivers/char/hw_random/octeon-rng.c1
-rw-r--r--drivers/char/hw_random/virtio-rng.c3
-rw-r--r--drivers/char/i8k.c2
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c8
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c52
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c2
-rw-r--r--drivers/char/mem.c6
-rw-r--r--drivers/char/msm_smd_pkt.c2
-rw-r--r--drivers/char/mwave/3780i.c1
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/tile-srom.c1
-rw-r--r--drivers/char/tpm/tpm_acpi.c2
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c1
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c1
-rw-r--r--drivers/char/tpm/tpm_ppi.c406
-rw-r--r--drivers/char/tpm/xen-tpmfront.c4
-rw-r--r--drivers/char/virtio_console.c9
-rw-r--r--drivers/clk/Kconfig22
-rw-r--r--drivers/clk/Makefile74
-rw-r--r--drivers/clk/at91/Makefile11
-rw-r--r--drivers/clk/at91/clk-main.c187
-rw-r--r--drivers/clk/at91/clk-master.c270
-rw-r--r--drivers/clk/at91/clk-peripheral.c410
-rw-r--r--drivers/clk/at91/clk-pll.c531
-rw-r--r--drivers/clk/at91/clk-plldiv.c135
-rw-r--r--drivers/clk/at91/clk-programmable.c366
-rw-r--r--drivers/clk/at91/clk-smd.c171
-rw-r--r--drivers/clk/at91/clk-system.c135
-rw-r--r--drivers/clk/at91/clk-usb.c398
-rw-r--r--drivers/clk/at91/clk-utmi.c159
-rw-r--r--drivers/clk/at91/pmc.c395
-rw-r--r--drivers/clk/at91/pmc.h114
-rw-r--r--drivers/clk/clk-composite.c28
-rw-r--r--drivers/clk/clk-fixed-rate.c43
-rw-r--r--drivers/clk/clk-max77686.c97
-rw-r--r--drivers/clk/clk-nomadik.c3
-rw-r--r--drivers/clk/clk-si5351.c30
-rw-r--r--drivers/clk/clk-si5351.h14
-rw-r--r--drivers/clk/clk-si570.c531
-rw-r--r--drivers/clk/clk-vt8500.c2
-rw-r--r--drivers/clk/clk.c383
-rw-r--r--drivers/clk/clk.h16
-rw-r--r--drivers/clk/clkdev.c12
-rw-r--r--drivers/clk/hisilicon/Makefile5
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c242
-rw-r--r--drivers/clk/hisilicon/clk.c171
-rw-r--r--drivers/clk/hisilicon/clk.h103
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c130
-rw-r--r--drivers/clk/keystone/gate.c13
-rw-r--r--drivers/clk/keystone/pll.c24
-rw-r--r--drivers/clk/mvebu/Kconfig5
-rw-r--r--drivers/clk/mvebu/Makefile1
-rw-r--r--drivers/clk/mvebu/armada-370.c21
-rw-r--r--drivers/clk/mvebu/armada-xp.c20
-rw-r--r--drivers/clk/mvebu/clk-corediv.c223
-rw-r--r--drivers/clk/mvebu/clk-cpu.c2
-rw-r--r--drivers/clk/mvebu/dove.c19
-rw-r--r--drivers/clk/mvebu/kirkwood.c34
-rw-r--r--drivers/clk/qcom/Kconfig47
-rw-r--r--drivers/clk/qcom/Makefile14
-rw-r--r--drivers/clk/qcom/clk-branch.c159
-rw-r--r--drivers/clk/qcom/clk-branch.h56
-rw-r--r--drivers/clk/qcom/clk-pll.c222
-rw-r--r--drivers/clk/qcom/clk-pll.h66
-rw-r--r--drivers/clk/qcom/clk-rcg.c517
-rw-r--r--drivers/clk/qcom/clk-rcg.h159
-rw-r--r--drivers/clk/qcom/clk-rcg2.c291
-rw-r--r--drivers/clk/qcom/clk-regmap.c114
-rw-r--r--drivers/clk/qcom/clk-regmap.h45
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c2819
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c2993
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c2694
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2321
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c2625
-rw-r--r--drivers/clk/qcom/reset.c63
-rw-r--r--drivers/clk/qcom/reset.h37
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c159
-rw-r--r--drivers/clk/samsung/clk-exynos4.c858
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c699
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c650
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c81
-rw-r--r--drivers/clk/samsung/clk-pll.c2
-rw-r--r--drivers/clk/shmobile/Makefile7
-rw-r--r--drivers/clk/shmobile/clk-div6.c185
-rw-r--r--drivers/clk/shmobile/clk-emev2.c104
-rw-r--r--drivers/clk/shmobile/clk-mstp.c233
-rw-r--r--drivers/clk/shmobile/clk-rcar-gen2.c338
-rw-r--r--drivers/clk/sirf/Makefile5
-rw-r--r--drivers/clk/sirf/atlas6.h31
-rw-r--r--drivers/clk/sirf/clk-atlas6.c152
-rw-r--r--drivers/clk/sirf/clk-common.c (renamed from drivers/clk/clk-prima2.c)264
-rw-r--r--drivers/clk/sirf/clk-prima2.c151
-rw-r--r--drivers/clk/sirf/prima2.h25
-rw-r--r--drivers/clk/socfpga/clk.c6
-rw-r--r--drivers/clk/spear/clk-frac-synth.c2
-rw-r--r--drivers/clk/sunxi/clk-factors.c67
-rw-r--r--drivers/clk/sunxi/clk-factors.h16
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c493
-rw-r--r--drivers/clk/tegra/Makefile7
-rw-r--r--drivers/clk/tegra/clk-divider.c2
-rw-r--r--drivers/clk/tegra/clk-id.h239
-rw-r--r--drivers/clk/tegra/clk-periph-gate.c30
-rw-r--r--drivers/clk/tegra/clk-periph.c74
-rw-r--r--drivers/clk/tegra/clk-pll.c417
-rw-r--r--drivers/clk/tegra/clk-tegra-audio.c215
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c111
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c676
-rw-r--r--drivers/clk/tegra/clk-tegra-pmc.c132
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c149
-rw-r--r--drivers/clk/tegra/clk-tegra114.c1688
-rw-r--r--drivers/clk/tegra/clk-tegra124.c1430
-rw-r--r--drivers/clk/tegra/clk-tegra20.c819
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1504
-rw-r--r--drivers/clk/tegra/clk.c214
-rw-r--r--drivers/clk/tegra/clk.h116
-rw-r--r--drivers/clk/ti/Makefile11
-rw-r--r--drivers/clk/ti/apll.c223
-rw-r--r--drivers/clk/ti/autoidle.c133
-rw-r--r--drivers/clk/ti/clk-33xx.c161
-rw-r--r--drivers/clk/ti/clk-3xxx.c401
-rw-r--r--drivers/clk/ti/clk-43xx.c118
-rw-r--r--drivers/clk/ti/clk-44xx.c316
-rw-r--r--drivers/clk/ti/clk-54xx.c255
-rw-r--r--drivers/clk/ti/clk-7xx.c332
-rw-r--r--drivers/clk/ti/clk.c167
-rw-r--r--drivers/clk/ti/clockdomain.c70
-rw-r--r--drivers/clk/ti/composite.c269
-rw-r--r--drivers/clk/ti/divider.c487
-rw-r--r--drivers/clk/ti/dpll.c558
-rw-r--r--drivers/clk/ti/fixed-factor.c66
-rw-r--r--drivers/clk/ti/gate.c249
-rw-r--r--drivers/clk/ti/interface.c125
-rw-r--r--drivers/clk/ti/mux.c246
-rw-r--r--drivers/clk/ux500/clk-prcmu.c2
-rw-r--r--drivers/clk/versatile/clk-icst.c3
-rw-r--r--drivers/clk/versatile/clk-icst.h1
-rw-r--r--drivers/clk/versatile/clk-impd1.c88
-rw-r--r--drivers/clk/versatile/clk-integrator.c2
-rw-r--r--drivers/clk/versatile/clk-realview.c6
-rw-r--r--drivers/clk/versatile/clk-sp810.c2
-rw-r--r--drivers/clk/zynq/clkc.c18
-rw-r--r--drivers/clocksource/Kconfig48
-rw-r--r--drivers/clocksource/Makefile5
-rw-r--r--drivers/clocksource/arm_arch_timer.c1
-rw-r--r--drivers/clocksource/bcm_kona_timer.c48
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c121
-rw-r--r--drivers/clocksource/cyclone.c113
-rw-r--r--drivers/clocksource/exynos_mct.c6
-rw-r--r--drivers/clocksource/nomadik-mtu.c23
-rw-r--r--drivers/clocksource/sun4i_timer.c2
-rw-r--r--drivers/clocksource/time-armada-370-xp.c12
-rw-r--r--drivers/clocksource/time-orion.c28
-rw-r--r--drivers/clocksource/timer-keystone.c241
-rw-r--r--drivers/clocksource/timer-u300.c447
-rw-r--r--drivers/clocksource/vf_pit_timer.c2
-rw-r--r--drivers/connector/cn_proc.c18
-rw-r--r--drivers/connector/connector.c20
-rw-r--r--drivers/cpufreq/Kconfig9
-rw-r--r--drivers/cpufreq/Kconfig.arm44
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c94
-rw-r--r--drivers/cpufreq/arm_big_little.c9
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c17
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c27
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c4
-rw-r--r--drivers/cpufreq/cpufreq.c729
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/cpufreq_governor.h2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c135
-rw-r--r--drivers/cpufreq/cris-artpec3-cpufreq.c1
-rw-r--r--drivers/cpufreq/cris-etraxfs-cpufreq.c1
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c17
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c22
-rw-r--r--drivers/cpufreq/e_powersaver.c1
-rw-r--r--drivers/cpufreq/elanfreq.c1
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c125
-rw-r--r--drivers/cpufreq/exynos-cpufreq.h22
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c2
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c4
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c75
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c41
-rw-r--r--drivers/cpufreq/freq_table.c122
-rw-r--r--drivers/cpufreq/gx-suspmod.c4
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c1
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c135
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c5
-rw-r--r--drivers/cpufreq/intel_pstate.c169
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c2
-rw-r--r--drivers/cpufreq/longhaul.c5
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c16
-rw-r--r--drivers/cpufreq/omap-cpufreq.c35
-rw-r--r--drivers/cpufreq/p4-clockmod.c1
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c20
-rw-r--r--drivers/cpufreq/powernow-k6.c152
-rw-r--r--drivers/cpufreq/powernow-k7.c6
-rw-r--r--drivers/cpufreq/powernow-k8.c21
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c22
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c1
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c6
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c18
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c35
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c72
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c2
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/cpufreq/sc520_freq.c1
-rw-r--r--drivers/cpufreq/sh-cpufreq.c5
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c4
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c4
-rw-r--r--drivers/cpufreq/spear-cpufreq.c26
-rw-r--r--drivers/cpufreq/speedstep-centrino.c2
-rw-r--r--drivers/cpufreq/speedstep-ich.c1
-rw-r--r--drivers/cpufreq/speedstep-smi.c33
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c84
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c33
-rw-r--r--drivers/cpuidle/Kconfig5
-rw-r--r--drivers/cpuidle/Kconfig.powerpc20
-rw-r--r--drivers/cpuidle/Makefile5
-rw-r--r--drivers/cpuidle/coupled.c2
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c256
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c273
-rw-r--r--drivers/cpuidle/cpuidle.c108
-rw-r--r--drivers/cpuidle/driver.c2
-rw-r--r--drivers/cpuidle/governors/menu.c75
-rw-r--r--drivers/crypto/Kconfig39
-rw-r--r--drivers/crypto/Makefile33
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c1
-rw-r--r--drivers/crypto/atmel-aes.c143
-rw-r--r--drivers/crypto/atmel-sha.c103
-rw-r--r--drivers/crypto/atmel-tdes.c143
-rw-r--r--drivers/crypto/caam/caamalg.c36
-rw-r--r--drivers/crypto/ccp/Kconfig24
-rw-r--r--drivers/crypto/ccp/Makefile10
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c365
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c279
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c369
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c432
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c517
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h197
-rw-r--r--drivers/crypto/ccp/ccp-dev.c595
-rw-r--r--drivers/crypto/ccp/ccp-dev.h272
-rw-r--r--drivers/crypto/ccp/ccp-ops.c2024
-rw-r--r--drivers/crypto/ccp/ccp-pci.c361
-rw-r--r--drivers/crypto/dcp.c903
-rw-r--r--drivers/crypto/mxs-dcp.c1100
-rw-r--r--drivers/crypto/nx/nx-842.c29
-rw-r--r--drivers/crypto/omap-aes.c16
-rw-r--r--drivers/crypto/omap-sham.c19
-rw-r--r--drivers/crypto/talitos.c23
-rw-r--r--drivers/devfreq/Kconfig2
-rw-r--r--drivers/devfreq/devfreq.c31
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.c6
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.h110
-rw-r--r--drivers/devfreq/exynos/exynos5_bus.c2
-rw-r--r--drivers/dma/Kconfig17
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/acpi-dma.c36
-rw-r--r--drivers/dma/amba-pl08x.c4
-rw-r--r--drivers/dma/bcm2835-dma.c707
-rw-r--r--drivers/dma/cppi41.c4
-rw-r--r--drivers/dma/dmaengine.c28
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/dma/dw/core.c35
-rw-r--r--drivers/dma/edma.c6
-rw-r--r--drivers/dma/fsldma.h2
-rw-r--r--drivers/dma/imx-sdma.c24
-rw-r--r--drivers/dma/ioat/dma.c52
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/dma_v2.c11
-rw-r--r--drivers/dma/ioat/dma_v3.c3
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/mmp_pdma.c221
-rw-r--r--drivers/dma/mmp_tdma.c28
-rw-r--r--drivers/dma/moxart-dma.c699
-rw-r--r--drivers/dma/mv_xor.c24
-rw-r--r--drivers/dma/omap-dma.c4
-rw-r--r--drivers/dma/pl330.c65
-rw-r--r--drivers/dma/ppc4xx/adma.c1
-rw-r--r--drivers/dma/sirf-dma.c20
-rw-r--r--drivers/dma/ste_dma40.c8
-rw-r--r--drivers/dma/tegra20-apb-dma.c114
-rw-r--r--drivers/dma/virt-dma.h4
-rw-r--r--drivers/edac/amd64_edac.c38
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/edac/amd8111_edac.c44
-rw-r--r--drivers/edac/e752x_edac.c30
-rw-r--r--drivers/edac/edac_mc.c13
-rw-r--r--drivers/edac/edac_mc_sysfs.c10
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5100_edac.c17
-rw-r--r--drivers/edac/i5400_edac.c2
-rw-r--r--drivers/edac/i7300_edac.c38
-rw-r--r--drivers/edac/i7core_edac.c19
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/mce_amd.c65
-rw-r--r--drivers/edac/mpc85xx_edac.c6
-rw-r--r--drivers/edac/octeon_edac-lmc.c179
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/extcon/Kconfig4
-rw-r--r--drivers/extcon/Makefile2
-rw-r--r--drivers/extcon/extcon-arizona.c12
-rw-r--r--drivers/extcon/extcon-class.c42
-rw-r--r--drivers/extcon/extcon-gpio.c4
-rw-r--r--drivers/extcon/extcon-palmas.c5
-rw-r--r--drivers/extcon/of_extcon.c64
-rw-r--r--drivers/firewire/core-device.c22
-rw-r--r--drivers/firewire/core-transaction.c6
-rw-r--r--drivers/firewire/core.h3
-rw-r--r--drivers/firewire/net.c6
-rw-r--r--drivers/firewire/ohci.c42
-rw-r--r--drivers/firewire/sbp2.c17
-rw-r--r--drivers/firmware/Kconfig5
-rw-r--r--drivers/firmware/dcdbas.c2
-rw-r--r--drivers/firmware/dmi_scan.c20
-rw-r--r--drivers/firmware/efi/efi-stub-helper.c136
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/firmware/efi/efivars.c2
-rw-r--r--drivers/firmware/google/Kconfig3
-rw-r--r--drivers/firmware/google/gsmi.c7
-rw-r--r--drivers/firmware/google/memconsole.c47
-rw-r--r--drivers/fmc/fmc-core.c22
-rw-r--r--drivers/fmc/fmc-sdb.c41
-rw-r--r--drivers/fmc/fmc-write-eeprom.c2
-rw-r--r--drivers/gpio/Kconfig8
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-bcm-kona.c4
-rw-r--r--drivers/gpio/gpio-clps711x.c1
-rw-r--r--drivers/gpio/gpio-davinci.c185
-rw-r--r--drivers/gpio/gpio-intel-mid.c4
-rw-r--r--drivers/gpio/gpio-vr41xx.c20
-rw-r--r--drivers/gpio/gpio-xtensa.c16
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/armada/Kconfig1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c13
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c49
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c20
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/bochs/Kconfig12
-rw-r--r--drivers/gpu/drm/bochs/Makefile4
-rw-r--r--drivers/gpu/drm/bochs/bochs.h164
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c178
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c215
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c177
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c294
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c546
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c26
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c35
-rw-r--r--drivers/gpu/drm/drm_buffer.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c10
-rw-r--r--drivers/gpu/drm/drm_cache.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c25
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c33
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c289
-rw-r--r--drivers/gpu/drm/drm_edid_load.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_fops.c14
-rw-r--r--drivers/gpu/drm/drm_gem.c45
-rw-r--r--drivers/gpu/drm/drm_info.c16
-rw-r--r--drivers/gpu/drm/drm_ioctl.c12
-rw-r--r--drivers/gpu/drm/drm_irq.c119
-rw-r--r--drivers/gpu/drm/drm_memory.c15
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c315
-rw-r--r--drivers/gpu/drm/drm_panel.c100
-rw-r--r--drivers/gpu/drm/drm_pci.c38
-rw-r--r--drivers/gpu/drm/drm_platform.c12
-rw-r--r--drivers/gpu/drm/drm_prime.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c31
-rw-r--r--drivers/gpu/drm/drm_usb.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c10
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/gma500/Kconfig1
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c2
-rw-r--r--drivers/gpu/drm/gma500/backlight.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c39
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c12
-rw-r--r--drivers/gpu/drm/gma500/mmu.c2
-rw-r--r--drivers/gpu/drm/gma500/opregion.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h5
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c19
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c19
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig33
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c73
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c288
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c53
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c129
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h175
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c123
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c69
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c56
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c29
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c39
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c221
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h335
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c49
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c49
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c27
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c146
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c37
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h64
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c149
-rw-r--r--drivers/gpu/drm/i915/intel_display.c614
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c191
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h47
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c193
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h21
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c119
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c5
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c65
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c93
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c90
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c57
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c958
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1669
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c75
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c62
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h40
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c43
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c86
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c319
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c4
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h4
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c1
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c8
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c6
-rw-r--r--drivers/gpu/drm/msm/Kconfig4
-rw-r--r--drivers/gpu/drm/msm/Makefile21
-rw-r--r--drivers/gpu/drm/msm/NOTES20
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h125
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h116
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c190
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h171
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h30
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c201
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h38
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h83
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c71
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c139
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c157
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h (renamed from drivers/gpu/drm/msm/mdp4/mdp4.xml.h)88
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c (renamed from drivers/gpu/drm/msm/mdp4/mdp4_crtc.c)201
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c (renamed from drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c)6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c93
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c (renamed from drivers/gpu/drm/msm/mdp4/mdp4_kms.c)61
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h (renamed from drivers/gpu/drm/msm/mdp4/mdp4_kms.h)56
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c (renamed from drivers/gpu/drm/msm/mdp4/mdp4_plane.c)10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h1036
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c574
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c258
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c111
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c350
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h213
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c389
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c173
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h41
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h78
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c (renamed from drivers/gpu/drm/msm/mdp4/mdp4_format.c)13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c144
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h97
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_irq.c203
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c132
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h64
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c172
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c42
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c148
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h68
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h47
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig3
-rw-r--r--drivers/gpu/drm/nouveau/Makefile8
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engine.c23
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/vga.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/falcon.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c416
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c1408
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc138
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc137
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc542
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h473
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h704
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h766
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h766
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h766
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc382
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc540
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h916
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h1238
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h1238
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h1202
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h1202
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc141
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv108.c236
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c166
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h66
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/priv.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c67
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/timing.c70
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/base.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c41
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c41
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c41
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c614
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c117
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc393
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc53
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h467
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h484
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h484
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h533
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c183
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c162
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c197
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c133
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c11
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c86
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c26
-rw-r--r--drivers/gpu/drm/panel/Kconfig19
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c548
-rw-r--r--drivers/gpu/drm/qxl/Kconfig8
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c2
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c4
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h4
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c1
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c2
-rw-r--r--drivers/gpu/drm/r128/r128_state.c66
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c83
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c54
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c52
-rw-r--r--drivers/gpu/drm/radeon/btcd.h4
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c70
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c1
-rw-r--r--drivers/gpu/drm/radeon/cik.c377
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c134
-rw-r--r--drivers/gpu/drm/radeon/cikd.h11
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c15
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c104
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c26
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/evergreen_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c48
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c142
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c77
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c31
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/pptable.h4
-rw-r--r--drivers/gpu/drm/radeon/r100.c36
-rw-r--r--drivers/gpu/drm/radeon/r300.c5
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c8
-rw-r--r--drivers/gpu/drm/radeon/r420.c5
-rw-r--r--drivers/gpu/drm/radeon/r520.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c171
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c20
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c17
-rw-r--r--drivers/gpu/drm/radeon/r600d.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h29
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c87
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h66
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h36
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c158
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c96
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h21
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c200
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c5
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rs400.c5
-rw-r--r--drivers/gpu/drm/radeon/rs600.c5
-rw-r--r--drivers/gpu/drm/radeon/rs690.c5
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770.c61
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c49
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h4
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h7
-rw-r--r--drivers/gpu/drm/radeon/si.c168
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c72
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h9
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c23
-rw-r--r--drivers/gpu/drm/radeon/sumo_smc.c1
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c3
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c28
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c28
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c21
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c10
-rw-r--r--drivers/gpu/drm/savage/savage_state.c8
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c18
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c2
-rw-r--r--drivers/gpu/drm/tegra/Kconfig25
-rw-r--r--drivers/gpu/drm/tegra/Makefile2
-rw-r--r--drivers/gpu/drm/tegra/bus.c1
-rw-r--r--drivers/gpu/drm/tegra/dc.c117
-rw-r--r--drivers/gpu/drm/tegra/dc.h9
-rw-r--r--drivers/gpu/drm/tegra/drm.c21
-rw-r--r--drivers/gpu/drm/tegra/drm.h15
-rw-r--r--drivers/gpu/drm/tegra/dsi.c971
-rw-r--r--drivers/gpu/drm/tegra/dsi.h134
-rw-r--r--drivers/gpu/drm/tegra/fb.c45
-rw-r--r--drivers/gpu/drm/tegra/gem.c183
-rw-r--r--drivers/gpu/drm/tegra/gem.h9
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c22
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c78
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.c138
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.h65
-rw-r--r--drivers/gpu/drm/tegra/output.c51
-rw-r--r--drivers/gpu/drm/tegra/rgb.c58
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c34
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c90
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c30
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c12
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c11
-rw-r--r--drivers/gpu/drm/via/via_dma.c12
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c20
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.h2
-rw-r--r--drivers/gpu/drm/via/via_irq.c10
-rw-r--r--drivers/gpu/drm/via/via_video.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h759
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h11
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c182
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c630
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c221
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h249
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1177
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c107
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c160
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c107
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c656
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c209
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c812
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c493
-rw-r--r--drivers/gpu/host1x/Kconfig2
-rw-r--r--drivers/gpu/host1x/Makefile4
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/channel.c5
-rw-r--r--drivers/gpu/host1x/debug.c13
-rw-r--r--drivers/gpu/host1x/dev.c28
-rw-r--r--drivers/gpu/host1x/dev.h2
-rw-r--r--drivers/gpu/host1x/hw/host1x02.c4
-rw-r--r--drivers/gpu/host1x/hw/host1x02_hardware.h142
-rw-r--r--drivers/gpu/host1x/hw/host1x04.c42
-rw-r--r--drivers/gpu/host1x/hw/host1x04.h26
-rw-r--r--drivers/gpu/host1x/hw/host1x04_hardware.h142
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x02_uclass.h6
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x04_channel.h121
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x04_sync.h243
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x04_uclass.h181
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c1
-rw-r--r--drivers/gpu/host1x/job.c8
-rw-r--r--drivers/gpu/host1x/mipi.c275
-rw-r--r--drivers/gpu/host1x/syncpt.c9
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-prodikeys.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c26
-rw-r--r--drivers/hv/Makefile2
-rw-r--r--drivers/hv/channel.c56
-rw-r--r--drivers/hv/connection.c13
-rw-r--r--drivers/hv/hv_balloon.c3
-rw-r--r--drivers/hv/hv_fcopy.c414
-rw-r--r--drivers/hv/hv_kvp.c4
-rw-r--r--drivers/hv/hv_snapshot.c2
-rw-r--r--drivers/hv/hv_util.c11
-rw-r--r--drivers/hv/hyperv_vmbus.h8
-rw-r--r--drivers/hv/ring_buffer.c17
-rw-r--r--drivers/hv/vmbus_drv.c110
-rw-r--r--drivers/hwmon/Kconfig614
-rw-r--r--drivers/hwmon/Makefile5
-rw-r--r--drivers/hwmon/acpi_power_meter.c3
-rw-r--r--drivers/hwmon/adc128d818.c491
-rw-r--r--drivers/hwmon/adm1025.c4
-rw-r--r--drivers/hwmon/adm1029.c2
-rw-r--r--drivers/hwmon/adm1031.c2
-rw-r--r--drivers/hwmon/adt7475.c2
-rw-r--r--drivers/hwmon/asus_atk0110.c6
-rw-r--r--drivers/hwmon/coretemp.c71
-rw-r--r--drivers/hwmon/da9055-hwmon.c4
-rw-r--r--drivers/hwmon/ds1621.c2
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/emc6w201.c4
-rw-r--r--drivers/hwmon/f71805f.c2
-rw-r--r--drivers/hwmon/gl518sm.c2
-rw-r--r--drivers/hwmon/hwmon.c5
-rw-r--r--drivers/hwmon/ibmpowernv.c529
-rw-r--r--drivers/hwmon/iio_hwmon.c37
-rw-r--r--drivers/hwmon/it87.c108
-rw-r--r--drivers/hwmon/jz4740-hwmon.c25
-rw-r--r--drivers/hwmon/lm63.c4
-rw-r--r--drivers/hwmon/lm75.c35
-rw-r--r--drivers/hwmon/lm78.c4
-rw-r--r--drivers/hwmon/lm83.c4
-rw-r--r--drivers/hwmon/lm85.c2
-rw-r--r--drivers/hwmon/lm87.c4
-rw-r--r--drivers/hwmon/lm90.c4
-rw-r--r--drivers/hwmon/lm92.c4
-rw-r--r--drivers/hwmon/lm93.c2
-rw-r--r--drivers/hwmon/lm95241.c93
-rw-r--r--drivers/hwmon/lm95245.c112
-rw-r--r--drivers/hwmon/ltc2945.c519
-rw-r--r--drivers/hwmon/ltc4215.c51
-rw-r--r--drivers/hwmon/ltc4222.c237
-rw-r--r--drivers/hwmon/ltc4245.c30
-rw-r--r--drivers/hwmon/ltc4260.c200
-rw-r--r--drivers/hwmon/max1619.c5
-rw-r--r--drivers/hwmon/max1668.c81
-rw-r--r--drivers/hwmon/max6639.c91
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/max6650.c257
-rw-r--r--drivers/hwmon/nct6775.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c6
-rw-r--r--drivers/hwmon/pc87360.c4
-rw-r--r--drivers/hwmon/pc87427.c4
-rw-r--r--drivers/hwmon/pcf8591.c2
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c25
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c68
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/smm665.c2
-rw-r--r--drivers/hwmon/smsc47b397.c2
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/tmp102.c19
-rw-r--r--drivers/hwmon/w83627ehf.c4
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c2
-rw-r--r--drivers/hwmon/w83795.c4
-rw-r--r--drivers/hwmon/w83l785ts.c6
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c3
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c1
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c1
-rw-r--r--drivers/i2c/busses/Kconfig3
-rw-r--r--drivers/i2c/busses/i2c-acorn.c2
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c1
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c1
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c1
-rw-r--r--drivers/i2c/busses/i2c-amd756-s4882.c4
-rw-r--r--drivers/i2c/busses/i2c-amd756.c1
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c1
-rw-r--r--drivers/i2c/busses/i2c-au1550.c1
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c3
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c1
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c3
-rw-r--r--drivers/i2c/busses/i2c-highlander.c1
-rw-r--r--drivers/i2c/busses/i2c-hydra.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c5
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c1
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c1
-rw-r--r--drivers/i2c/busses/i2c-isch.c1
-rw-r--r--drivers/i2c/busses/i2c-ismt.c1
-rw-r--r--drivers/i2c/busses/i2c-mpc.c1
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c38
-rw-r--r--drivers/i2c/busses/i2c-nforce2-s4985.c4
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c1
-rw-r--r--drivers/i2c/busses/i2c-ocores.c1
-rw-r--r--drivers/i2c/busses/i2c-octeon.c1
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c4
-rw-r--r--drivers/i2c/busses/i2c-parport.c4
-rw-r--r--drivers/i2c/busses/i2c-parport.h2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c1
-rw-r--r--drivers/i2c/busses/i2c-piix4.c44
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c1
-rw-r--r--drivers/i2c/busses/i2c-powermac.c1
-rw-r--r--drivers/i2c/busses/i2c-puv3.c1
-rw-r--r--drivers/i2c/busses/i2c-rcar.c22
-rw-r--r--drivers/i2c/busses/i2c-scmi.c1
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c1
-rw-r--r--drivers/i2c/busses/i2c-simtec.c1
-rw-r--r--drivers/i2c/busses/i2c-sis630.c1
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c1
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c4
-rw-r--r--drivers/i2c/busses/i2c-tegra.c13
-rw-r--r--drivers/i2c/busses/i2c-via.c1
-rw-r--r--drivers/i2c/busses/i2c-viapro.c4
-rw-r--r--drivers/i2c/busses/i2c-xiic.c1
-rw-r--r--drivers/i2c/busses/i2c-xlr.c1
-rw-r--r--drivers/i2c/busses/scx200_i2c.c1
-rw-r--r--drivers/i2c/i2c-core.c26
-rw-r--r--drivers/i2c/i2c-smbus.c4
-rw-r--r--drivers/i2c/i2c-stub.c2
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c1
-rw-r--r--drivers/ide/ide-acpi.c12
-rw-r--r--drivers/ide/ide-cd_verbose.c2
-rw-r--r--drivers/ide/ide-pio-blacklist.c1
-rw-r--r--drivers/idle/intel_idle.c32
-rw-r--r--drivers/iio/accel/bma180.c20
-rw-r--r--drivers/iio/adc/Kconfig33
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/max1363.c18
-rw-r--r--drivers/iio/adc/men_z188_adc.c172
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c1
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c711
-rw-r--r--drivers/iio/adc/viperboard_adc.c2
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c1333
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c254
-rw-r--r--drivers/iio/adc/xilinx-xadc.h209
-rw-r--r--drivers/iio/buffer_cb.c7
-rw-r--r--drivers/iio/dac/ad7303.c2
-rw-r--r--drivers/iio/dac/max517.c1
-rw-r--r--drivers/iio/dac/mcp4725.c1
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/st_gyro.h1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c9
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c1
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c1
-rw-r--r--drivers/iio/humidity/Kconfig10
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/si7005.c189
-rw-r--r--drivers/iio/imu/Kconfig4
-rw-r--r--drivers/iio/imu/adis16400.h1
-rw-r--r--drivers/iio/imu/adis16400_core.c12
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h38
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c1
-rw-r--r--drivers/iio/industrialio-buffer.c16
-rw-r--r--drivers/iio/industrialio-core.c51
-rw-r--r--drivers/iio/industrialio-event.c100
-rw-r--r--drivers/iio/industrialio-trigger.c11
-rw-r--r--drivers/iio/light/Kconfig26
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/adjd_s311.c3
-rw-r--r--drivers/iio/light/cm32181.c16
-rw-r--r--drivers/iio/light/cm36651.c45
-rw-r--r--drivers/iio/light/hid-sensor-prox.c375
-rw-r--r--drivers/iio/light/ltr501.c445
-rw-r--r--drivers/iio/light/tcs3472.c2
-rw-r--r--drivers/iio/light/tsl2563.c16
-rw-r--r--drivers/iio/magnetometer/ak8975.c17
-rw-r--r--drivers/iio/magnetometer/mag3110.c25
-rw-r--r--drivers/iio/pressure/Kconfig16
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c376
-rw-r--r--drivers/iio/pressure/mpl3115.c2
-rw-r--r--drivers/iio/pressure/st_pressure.h1
-rw-r--r--drivers/iio/pressure/st_pressure_core.c87
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c1
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c1
-rw-r--r--drivers/infiniband/Kconfig6
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/Makefile5
-rw-r--r--drivers/infiniband/core/addr.c97
-rw-r--r--drivers/infiniband/core/cm.c52
-rw-r--r--drivers/infiniband/core/cma.c83
-rw-r--r--drivers/infiniband/core/core_priv.h2
-rw-r--r--drivers/infiniband/core/iwcm.c3
-rw-r--r--drivers/infiniband/core/sa_query.c12
-rw-r--r--drivers/infiniband/core/sysfs.c1
-rw-r--r--drivers/infiniband/core/ucma.c18
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c4
-rw-r--r--drivers/infiniband/core/verbs.c101
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_intr.c3
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c40
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/main.c806
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c157
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c8
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c310
-rw-r--r--drivers/infiniband/hw/mlx5/main.c39
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h4
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c151
-rw-r--r--drivers/infiniband/hw/mlx5/user.h10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c3
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/Kconfig2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c21
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c140
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c9
-rw-r--r--drivers/infiniband/hw/usnic/Kconfig10
-rw-r--r--drivers/infiniband/hw/usnic/Makefile15
-rw-r--r--drivers/infiniband/hw/usnic/usnic.h29
-rw-r--r--drivers/infiniband/hw/usnic/usnic_abi.h73
-rw-r--r--drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h27
-rw-r--r--drivers/infiniband/hw/usnic/usnic_common_util.h68
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.c154
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.h29
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c350
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h113
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib.h118
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c682
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c761
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h117
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c341
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.h29
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c765
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h72
-rw-r--r--drivers/infiniband/hw/usnic/usnic_log.h58
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.c202
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.h51
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c604
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h80
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c236
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h73
-rw-r--r--drivers/infiniband/hw/usnic/usnic_vnic.c467
-rw-r--r--drivers/infiniband/hw/usnic/usnic_vnic.h103
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c403
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h17
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/input/evdev.c4
-rw-r--r--drivers/input/gameport/emu10k1-gp.c1
-rw-r--r--drivers/input/gameport/fm801-gp.c1
-rw-r--r--drivers/input/input.c76
-rw-r--r--drivers/input/joystick/a3d.c1
-rw-r--r--drivers/input/joystick/adi.c1
-rw-r--r--drivers/input/joystick/cobra.c1
-rw-r--r--drivers/input/joystick/gf2k.c1
-rw-r--r--drivers/input/joystick/grip.c1
-rw-r--r--drivers/input/joystick/grip_mp.c1
-rw-r--r--drivers/input/joystick/guillemot.c1
-rw-r--r--drivers/input/joystick/iforce/iforce.h1
-rw-r--r--drivers/input/joystick/interact.c1
-rw-r--r--drivers/input/joystick/joydump.c1
-rw-r--r--drivers/input/joystick/magellan.c1
-rw-r--r--drivers/input/joystick/sidewinder.c1
-rw-r--r--drivers/input/joystick/spaceball.c1
-rw-r--r--drivers/input/joystick/spaceorb.c1
-rw-r--r--drivers/input/joystick/stinger.c1
-rw-r--r--drivers/input/joystick/tmdc.c1
-rw-r--r--drivers/input/joystick/twidjoy.c1
-rw-r--r--drivers/input/joystick/warrior.c1
-rw-r--r--drivers/input/joystick/xpad.c7
-rw-r--r--drivers/input/joystick/zhenhua.c1
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/adp5520-keys.c3
-rw-r--r--drivers/input/keyboard/adp5588-keys.c23
-rw-r--r--drivers/input/keyboard/adp5589-keys.c9
-rw-r--r--drivers/input/keyboard/bf54x-keys.c5
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c15
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c2
-rw-r--r--drivers/input/keyboard/goldfish_events.c1
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c1
-rw-r--r--drivers/input/keyboard/hil_kbd.c1
-rw-r--r--drivers/input/keyboard/imx_keypad.c4
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c1
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c1
-rw-r--r--drivers/input/keyboard/lkkbd.c1
-rw-r--r--drivers/input/keyboard/lm8323.c2
-rw-r--r--drivers/input/keyboard/lm8333.c3
-rw-r--r--drivers/input/keyboard/matrix_keypad.c1
-rw-r--r--drivers/input/keyboard/max7359_keypad.c3
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c3
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c4
-rw-r--r--drivers/input/keyboard/newtonkbd.c1
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c3
-rw-r--r--drivers/input/keyboard/omap-keypad.c3
-rw-r--r--drivers/input/keyboard/omap4-keypad.c1
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c1
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c4
-rw-r--r--drivers/input/keyboard/qt1070.c1
-rw-r--r--drivers/input/keyboard/qt2160.c1
-rw-r--r--drivers/input/keyboard/samsung-keypad.c37
-rw-r--r--drivers/input/keyboard/sh_keysc.c5
-rw-r--r--drivers/input/keyboard/spear-keyboard.c1
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c1
-rw-r--r--drivers/input/keyboard/stowaway.c1
-rw-r--r--drivers/input/keyboard/sunkbd.c1
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c1
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c13
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c2
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c122
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c3
-rw-r--r--drivers/input/keyboard/xtkbd.c1
-rw-r--r--drivers/input/misc/Kconfig9
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ad714x.c5
-rw-r--r--drivers/input/misc/adxl34x.c3
-rw-r--r--drivers/input/misc/arizona-haptics.c19
-rw-r--r--drivers/input/misc/atlas_btns.c3
-rw-r--r--drivers/input/misc/bfin_rotary.c3
-rw-r--r--drivers/input/misc/bma150.c3
-rw-r--r--drivers/input/misc/cma3000_d0x.c2
-rw-r--r--drivers/input/misc/cobalt_btns.c1
-rw-r--r--drivers/input/misc/da9052_onkey.c30
-rw-r--r--drivers/input/misc/da9055_onkey.c1
-rw-r--r--drivers/input/misc/dm355evm_keys.c1
-rw-r--r--drivers/input/misc/gp2ap002a00f.c2
-rw-r--r--drivers/input/misc/gpio-beeper.c127
-rw-r--r--drivers/input/misc/gpio_tilt_polled.c4
-rw-r--r--drivers/input/misc/keyspan_remote.c1
-rw-r--r--drivers/input/misc/kxtj9.c3
-rw-r--r--drivers/input/misc/max8997_haptic.c1
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c1
-rw-r--r--drivers/input/misc/mpu3050.c1
-rw-r--r--drivers/input/misc/pcap_keys.c1
-rw-r--r--drivers/input/misc/pcf50633-input.c1
-rw-r--r--drivers/input/misc/pcf8574_keypad.c1
-rw-r--r--drivers/input/misc/pcspkr.c1
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c107
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c106
-rw-r--r--drivers/input/misc/powermate.c1
-rw-r--r--drivers/input/misc/pwm-beeper.c2
-rw-r--r--drivers/input/misc/retu-pwrbutton.c1
-rw-r--r--drivers/input/misc/rotary_encoder.c1
-rw-r--r--drivers/input/misc/sgi_btns.c1
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c1
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c46
-rw-r--r--drivers/input/misc/twl4030-vibra.c6
-rw-r--r--drivers/input/misc/twl6040-vibra.c8
-rw-r--r--drivers/input/misc/wm831x-on.c1
-rw-r--r--drivers/input/misc/xen-kbdfront.c4
-rw-r--r--drivers/input/misc/yealink.c1
-rw-r--r--drivers/input/mouse/alps.c214
-rw-r--r--drivers/input/mouse/alps.h7
-rw-r--r--drivers/input/mouse/appletouch.c1
-rw-r--r--drivers/input/mouse/bcm5974.c1
-rw-r--r--drivers/input/mouse/cypress_ps2.c2
-rw-r--r--drivers/input/mouse/elantech.c45
-rw-r--r--drivers/input/mouse/gpio_mouse.c3
-rw-r--r--drivers/input/mouse/logips2pp.c2
-rw-r--r--drivers/input/mouse/navpoint.c1
-rw-r--r--drivers/input/mouse/pxa930_trkball.c3
-rw-r--r--drivers/input/mouse/sermouse.c1
-rw-r--r--drivers/input/mouse/synaptics.c55
-rw-r--r--drivers/input/mouse/synaptics_usb.c1
-rw-r--r--drivers/input/mouse/vsxxxaa.c1
-rw-r--r--drivers/input/mousedev.c73
-rw-r--r--drivers/input/serio/Kconfig11
-rw-r--r--drivers/input/serio/altera_ps2.c1
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/serio/hyperv-keyboard.c4
-rw-r--r--drivers/input/serio/libps2.c1
-rw-r--r--drivers/input/serio/olpc_apsp.c1
-rw-r--r--drivers/input/serio/pcips2.c2
-rw-r--r--drivers/input/serio/q40kbd.c1
-rw-r--r--drivers/input/serio/rpckbd.c1
-rw-r--r--drivers/input/serio/serio_raw.c1
-rw-r--r--drivers/input/serio/xilinx_ps2.c1
-rw-r--r--drivers/input/sparse-keymap.c2
-rw-r--r--drivers/input/tablet/acecad.c1
-rw-r--r--drivers/input/tablet/aiptek.c1
-rw-r--r--drivers/input/tablet/gtco.c1
-rw-r--r--drivers/input/tablet/hanwang.c1
-rw-r--r--drivers/input/tablet/kbtab.c1
-rw-r--r--drivers/input/tablet/wacom.h1
-rw-r--r--drivers/input/tablet/wacom_sys.c23
-rw-r--r--drivers/input/tablet/wacom_wac.c169
-rw-r--r--drivers/input/tablet/wacom_wac.h13
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c2
-rw-r--r--drivers/input/touchscreen/ad7877.c3
-rw-r--r--drivers/input/touchscreen/ad7879.c5
-rw-r--r--drivers/input/touchscreen/ads7846.c86
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c3
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c3
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c2
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c4
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c_common.c2
-rw-r--r--drivers/input/touchscreen/da9034-ts.c3
-rw-r--r--drivers/input/touchscreen/dynapro.c1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c71
-rw-r--r--drivers/input/touchscreen/eeti_ts.c2
-rw-r--r--drivers/input/touchscreen/egalax_ts.c1
-rw-r--r--drivers/input/touchscreen/elo.c1
-rw-r--r--drivers/input/touchscreen/fujitsu_ts.c1
-rw-r--r--drivers/input/touchscreen/gunze.c1
-rw-r--r--drivers/input/touchscreen/hampshire.c1
-rw-r--r--drivers/input/touchscreen/ili210x.c2
-rw-r--r--drivers/input/touchscreen/inexio.c1
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c1
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c1
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c1
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c1
-rw-r--r--drivers/input/touchscreen/max11801_ts.c1
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c5
-rw-r--r--drivers/input/touchscreen/mms114.c1
-rw-r--r--drivers/input/touchscreen/mtouch.c1
-rw-r--r--drivers/input/touchscreen/pcap_ts.c1
-rw-r--r--drivers/input/touchscreen/penmount.c1
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c3
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c5
-rw-r--r--drivers/input/touchscreen/st1232.c5
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c1
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c1
-rw-r--r--drivers/input/touchscreen/touchit213.c1
-rw-r--r--drivers/input/touchscreen/touchright.c1
-rw-r--r--drivers/input/touchscreen/touchwin.c1
-rw-r--r--drivers/input/touchscreen/tsc2005.c2
-rw-r--r--drivers/input/touchscreen/tsc2007.c230
-rw-r--r--drivers/input/touchscreen/tsc40.c1
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c9
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c1
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c1
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c1
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/input/touchscreen/zforce_ts.c3
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c1
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/amd_iommu_init.c1
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/arm-smmu.c138
-rw-r--r--drivers/iommu/dmar.c135
-rw-r--r--drivers/iommu/fsl_pamu_domain.c6
-rw-r--r--drivers/iommu/intel-iommu.c216
-rw-r--r--drivers/iommu/intel_irq_remapping.c109
-rw-r--r--drivers/iommu/irq_remapping.c6
-rw-r--r--drivers/iommu/of_iommu.c1
-rw-r--r--drivers/iommu/omap-iommu-debug.c4
-rw-r--r--drivers/iommu/shmobile-iommu.c5
-rw-r--r--drivers/iommu/shmobile-ipmmu.c10
-rw-r--r--drivers/iommu/shmobile-ipmmu.h2
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile5
-rw-r--r--drivers/irqchip/exynos-combiner.c15
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c104
-rw-r--r--drivers/irqchip/irq-bcm2835.c4
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c150
-rw-r--r--drivers/irqchip/irq-gic.c11
-rw-r--r--drivers/irqchip/irq-metag-ext.c2
-rw-r--r--drivers/irqchip/irq-metag.c2
-rw-r--r--drivers/irqchip/irq-mmp.c6
-rw-r--r--drivers/irqchip/irq-moxart.c2
-rw-r--r--drivers/irqchip/irq-orion.c24
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c21
-rw-r--r--drivers/irqchip/irq-sirfsoc.c5
-rw-r--r--drivers/irqchip/irq-sun4i.c42
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c208
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c15
-rw-r--r--drivers/irqchip/irq-vic.c2
-rw-r--r--drivers/irqchip/irq-vt8500.c3
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c164
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c108
-rw-r--r--drivers/irqchip/irq-zevio.c127
-rw-r--r--drivers/irqchip/irqchip.c3
-rw-r--r--drivers/isdn/capi/Kconfig18
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c2
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c4
-rw-r--r--drivers/isdn/mISDN/Kconfig2
-rw-r--r--drivers/isdn/mISDN/socket.c5
-rw-r--r--drivers/isdn/sc/event.c2
-rw-r--r--drivers/leds/led-triggers.c15
-rw-r--r--drivers/leds/leds-lp5521.c18
-rw-r--r--drivers/leds/leds-lp5523.c30
-rw-r--r--drivers/leds/leds-lp55xx-common.c2
-rw-r--r--drivers/leds/leds-lp8501.c2
-rw-r--r--drivers/leds/leds-mc13783.c89
-rw-r--r--drivers/leds/leds-pwm.c9
-rw-r--r--drivers/leds/leds-s3c24xx.c3
-rw-r--r--drivers/leds/leds-tca6507.c207
-rw-r--r--drivers/macintosh/adb.c57
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c2
-rw-r--r--drivers/mailbox/omap-mbox.h2
-rw-r--r--drivers/mcb/Kconfig31
-rw-r--r--drivers/mcb/Makefile7
-rw-r--r--drivers/mcb/mcb-core.c414
-rw-r--r--drivers/mcb/mcb-internal.h118
-rw-r--r--drivers/mcb/mcb-parse.c159
-rw-r--r--drivers/mcb/mcb-pci.c114
-rw-r--r--drivers/md/Kconfig10
-rw-r--r--drivers/md/bcache/Kconfig8
-rw-r--r--drivers/md/bcache/Makefile5
-rw-r--r--drivers/md/bcache/alloc.c240
-rw-r--r--drivers/md/bcache/bcache.h144
-rw-r--r--drivers/md/bcache/bset.c907
-rw-r--r--drivers/md/bcache/bset.h446
-rw-r--r--drivers/md/bcache/btree.c1196
-rw-r--r--drivers/md/bcache/btree.h74
-rw-r--r--drivers/md/bcache/closure.c90
-rw-r--r--drivers/md/bcache/closure.h355
-rw-r--r--drivers/md/bcache/debug.c268
-rw-r--r--drivers/md/bcache/debug.h27
-rw-r--r--drivers/md/bcache/extents.c620
-rw-r--r--drivers/md/bcache/extents.h13
-rw-r--r--drivers/md/bcache/io.c196
-rw-r--r--drivers/md/bcache/journal.c133
-rw-r--r--drivers/md/bcache/journal.h2
-rw-r--r--drivers/md/bcache/movinggc.c24
-rw-r--r--drivers/md/bcache/request.c388
-rw-r--r--drivers/md/bcache/request.h40
-rw-r--r--drivers/md/bcache/stats.c3
-rw-r--r--drivers/md/bcache/super.c187
-rw-r--r--drivers/md/bcache/sysfs.c224
-rw-r--r--drivers/md/bcache/trace.c2
-rw-r--r--drivers/md/bcache/util.c4
-rw-r--r--drivers/md/bcache/util.h8
-rw-r--r--drivers/md/bcache/writeback.c6
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-bio-record.h37
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-policy-mq.c8
-rw-r--r--drivers/md/dm-cache-target.c48
-rw-r--r--drivers/md/dm-crypt.c64
-rw-r--r--drivers/md/dm-delay.c7
-rw-r--r--drivers/md/dm-flakey.c7
-rw-r--r--drivers/md/dm-io.c34
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-log-userspace-transfer.c2
-rw-r--r--drivers/md/dm-mpath.c7
-rw-r--r--drivers/md/dm-raid1.c23
-rw-r--r--drivers/md/dm-region-hash.c3
-rw-r--r--drivers/md/dm-snap-persistent.c3
-rw-r--r--drivers/md/dm-snap.c19
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-thin-metadata.c58
-rw-r--r--drivers/md/dm-thin-metadata.h21
-rw-r--r--drivers/md/dm-thin.c373
-rw-r--r--drivers/md/dm-verity.c62
-rw-r--r--drivers/md/dm.c189
-rw-r--r--drivers/md/faulty.c19
-rw-r--r--drivers/md/linear.c96
-rw-r--r--drivers/md/md.c88
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/persistent-data/Kconfig10
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c115
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.h11
-rw-r--r--drivers/md/raid0.c79
-rw-r--r--drivers/md/raid1.c86
-rw-r--r--drivers/md/raid10.c197
-rw-r--r--drivers/md/raid5.c184
-rw-r--r--drivers/media/Kconfig3
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h3
-rw-r--r--drivers/media/dvb-core/dvb_net.c10
-rw-r--r--drivers/media/dvb-frontends/Kconfig7
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/a8293.c2
-rw-r--r--drivers/media/dvb-frontends/cx24117.c131
-rw-r--r--drivers/media/dvb-frontends/dib8000.c590
-rw-r--r--drivers/media/dvb-frontends/drxk.h2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c24
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c1311
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.h114
-rw-r--r--drivers/media/dvb-frontends/m88ds3103_priv.h215
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c172
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.h2
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c4
-rw-r--r--drivers/media/i2c/Kconfig27
-rw-r--r--drivers/media/i2c/Makefile3
-rw-r--r--drivers/media/i2c/ad9389b.c277
-rw-r--r--drivers/media/i2c/adv7343.c4
-rw-r--r--drivers/media/i2c/adv7511.c64
-rw-r--r--drivers/media/i2c/adv7604.c645
-rw-r--r--drivers/media/i2c/adv7842.c646
-rw-r--r--drivers/media/i2c/lm3560.c34
-rw-r--r--drivers/media/i2c/mt9m032.c16
-rw-r--r--drivers/media/i2c/mt9p031.c32
-rw-r--r--drivers/media/i2c/mt9t001.c26
-rw-r--r--drivers/media/i2c/mt9v032.c264
-rw-r--r--drivers/media/i2c/s5k5baf.c2054
-rw-r--r--drivers/media/i2c/saa6588.c50
-rw-r--r--drivers/media/i2c/saa6752hs.c (renamed from drivers/media/pci/saa7134/saa6752hs.c)19
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c9
-rw-r--r--drivers/media/i2c/soc_camera/mt9m111.c4
-rw-r--r--drivers/media/i2c/tvp514x.c3
-rw-r--r--drivers/media/i2c/tvp5150.c40
-rw-r--r--drivers/media/i2c/tvp7002.c3
-rw-r--r--drivers/media/i2c/vs6624.c2
-rw-r--r--drivers/media/media-entity.c41
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c10
-rw-r--r--drivers/media/pci/bt8xx/bttv-gpio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c9
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c5
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c9
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c2
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c10
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c9
-rw-r--r--drivers/media/pci/saa7134/Kconfig1
-rw-r--r--drivers/media/pci/saa7134/Makefile2
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c6
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c11
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c359
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c11
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c781
-rw-r--r--drivers/media/pci/saa7134/saa7134.h66
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/platform/Kconfig10
-rw-r--r--drivers/media/platform/Makefile3
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c2
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c32
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.c36
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.h1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c35
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite-reg.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c29
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c148
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c13
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c18
-rw-r--r--drivers/media/platform/fsl-viu.c2
-rw-r--r--drivers/media/platform/m2m-deinterlace.c2
-rw-r--r--drivers/media/platform/mem2mem_testdev.c152
-rw-r--r--drivers/media/platform/omap3isp/isp.c100
-rw-r--r--drivers/media/platform/omap3isp/isp.h6
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c5
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c3
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c3
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c3
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.c2
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c18
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c106
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c124
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h1
-rw-r--r--drivers/media/platform/s5p-jpeg/Makefile2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c1327
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h69
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c279
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h42
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c (renamed from drivers/media/platform/s5p-jpeg/jpeg-hw.h)82
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h63
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h209
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c28
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h14
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c57
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c26
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c34
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c2
-rw-r--r--drivers/media/platform/s5p-tv/sdo_drv.c39
-rw-r--r--drivers/media/platform/sh_vou.c16
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c179
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c7
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c4
-rw-r--r--drivers/media/platform/ti-vpe/Makefile2
-rw-r--r--drivers/media/platform/ti-vpe/csc.c196
-rw-r--r--drivers/media/platform/ti-vpe/csc.h68
-rw-r--r--drivers/media/platform/ti-vpe/sc.c311
-rw-r--r--drivers/media/platform/ti-vpe/sc.h208
-rw-r--r--drivers/media/platform/ti-vpe/sc_coeff.h1342
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c44
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h12
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h2
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c327
-rw-r--r--drivers/media/platform/ti-vpe/vpe_regs.h187
-rw-r--r--drivers/media/platform/vsp1/Makefile3
-rw-r--r--drivers/media/platform/vsp1/vsp1.h7
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c39
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c7
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h4
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c222
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.h38
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c252
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.h38
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h16
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c34
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c96
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h10
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c356
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.h41
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c13
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c17
-rw-r--r--drivers/media/radio/Kconfig43
-rw-r--r--drivers/media/radio/Makefile4
-rw-r--r--drivers/media/radio/radio-raremono.c387
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c81
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h1
-rw-r--r--drivers/media/radio/si4713/Kconfig40
-rw-r--r--drivers/media/radio/si4713/Makefile7
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c (renamed from drivers/media/radio/radio-si4713.c)0
-rw-r--r--drivers/media/radio/si4713/radio-usb-si4713.c540
-rw-r--r--drivers/media/radio/si4713/si4713.c (renamed from drivers/media/radio/si4713-i2c.c)279
-rw-r--r--drivers/media/radio/si4713/si4713.h (renamed from drivers/media/radio/si4713-i2c.h)4
-rw-r--r--drivers/media/radio/tea575x.c2
-rw-r--r--drivers/media/rc/imon.c8
-rw-r--r--drivers/media/rc/keymaps/Makefile3
-rw-r--r--drivers/media/rc/keymaps/rc-su3000.c75
-rw-r--r--drivers/media/rc/mceusb.c10
-rw-r--r--drivers/media/rc/rc-main.c20
-rw-r--r--drivers/media/rc/st_rc.c13
-rw-r--r--drivers/media/tuners/Kconfig7
-rw-r--r--drivers/media/tuners/Makefile1
-rw-r--r--drivers/media/tuners/e4000.c16
-rw-r--r--drivers/media/tuners/m88ts2022.c674
-rw-r--r--drivers/media/tuners/m88ts2022.h54
-rw-r--r--drivers/media/tuners/m88ts2022_priv.h34
-rw-r--r--drivers/media/tuners/tuner-xc2028.c38
-rw-r--r--drivers/media/usb/Kconfig1
-rw-r--r--drivers/media/usb/Makefile1
-rw-r--r--drivers/media/usb/au0828/au0828-core.c13
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c116
-rw-r--r--drivers/media/usb/au0828/au0828.h6
-rw-r--r--drivers/media/usb/cx231xx/Kconfig2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c23
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c59
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/it913x.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c21
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c455
-rw-r--r--drivers/media/usb/em28xx/Kconfig8
-rw-r--r--drivers/media/usb/em28xx/Makefile5
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c434
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c553
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c410
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c112
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c199
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c209
-rw-r--r--drivers/media/usb/em28xx/em28xx-reg.h11
-rw-r--r--drivers/media/usb/em28xx/em28xx-v4l.h20
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c652
-rw-r--r--drivers/media/usb/em28xx/em28xx.h120
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c4
-rw-r--r--drivers/media/usb/pwc/pwc-if.c1
-rw-r--r--drivers/media/usb/stk1160/stk1160-ac97.c6
-rw-r--r--drivers/media/usb/tlg2300/pd-alsa.c3
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c4
-rw-r--r--drivers/media/v4l2-core/Kconfig11
-rw-r--r--drivers/media/v4l2-core/Makefile1
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c126
-rw-r--r--drivers/media/v4l2-core/v4l2-of.c139
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c12
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c10
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c485
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c53
-rw-r--r--drivers/memory/Kconfig15
-rw-r--r--drivers/memory/Makefile2
-rw-r--r--drivers/memory/fsl_ifc.c309
-rw-r--r--drivers/memory/ti-aemif.c427
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c30
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/message/i2o/i2o_config.c4
-rw-r--r--drivers/message/i2o/iop.c85
-rw-r--r--drivers/mfd/arizona-core.c4
-rw-r--r--drivers/mfd/da9055-i2c.c12
-rw-r--r--drivers/mfd/max14577.c2
-rw-r--r--drivers/mfd/max8997.c6
-rw-r--r--drivers/mfd/max8998.c6
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/sec-core.c138
-rw-r--r--drivers/mfd/sec-irq.c97
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/wm5102-tables.c21
-rw-r--r--drivers/mfd/wm8994-core.c2
-rw-r--r--drivers/misc/Kconfig3
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ad525x_dpot.c1
-rw-r--r--drivers/misc/apds9802als.c1
-rw-r--r--drivers/misc/atmel-ssc.c6
-rw-r--r--drivers/misc/bmp085.c1
-rw-r--r--drivers/misc/carma/carma-fpga.c1
-rw-r--r--drivers/misc/ds1682.c1
-rw-r--r--drivers/misc/echo/Kconfig (renamed from drivers/staging/echo/Kconfig)0
-rw-r--r--drivers/misc/echo/Makefile (renamed from drivers/staging/echo/Makefile)0
-rw-r--r--drivers/misc/echo/echo.c (renamed from drivers/staging/echo/echo.c)0
-rw-r--r--drivers/misc/echo/echo.h (renamed from drivers/staging/echo/echo.h)0
-rw-r--r--drivers/misc/echo/fir.h (renamed from drivers/staging/echo/fir.h)0
-rw-r--r--drivers/misc/echo/oslec.h (renamed from drivers/staging/echo/oslec.h)0
-rw-r--r--drivers/misc/eeprom/at25.c1
-rw-r--r--drivers/misc/eeprom/eeprom.c3
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c1
-rw-r--r--drivers/misc/eeprom/max6875.c1
-rw-r--r--drivers/misc/eeprom/sunxi_sid.c3
-rw-r--r--drivers/misc/fsa9480.c2
-rw-r--r--drivers/misc/genwqe/card_debugfs.c1
-rw-r--r--drivers/misc/genwqe/card_dev.c1
-rw-r--r--drivers/misc/hmc6352.c1
-rw-r--r--drivers/misc/isl29003.c1
-rw-r--r--drivers/misc/isl29020.c1
-rw-r--r--drivers/misc/lattice-ecp3-config.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c1
-rw-r--r--drivers/misc/lkdtm.c74
-rw-r--r--drivers/misc/mei/Kconfig9
-rw-r--r--drivers/misc/mei/Makefile6
-rw-r--r--drivers/misc/mei/amthif.c72
-rw-r--r--drivers/misc/mei/bus.c21
-rw-r--r--drivers/misc/mei/client.c297
-rw-r--r--drivers/misc/mei/client.h30
-rw-r--r--drivers/misc/mei/debugfs.c54
-rw-r--r--drivers/misc/mei/hbm.c281
-rw-r--r--drivers/misc/mei/hbm.h1
-rw-r--r--drivers/misc/mei/hw-me.c30
-rw-r--r--drivers/misc/mei/hw-me.h1
-rw-r--r--drivers/misc/mei/hw-txe-regs.h294
-rw-r--r--drivers/misc/mei/hw-txe.c1107
-rw-r--r--drivers/misc/mei/hw-txe.h74
-rw-r--r--drivers/misc/mei/hw.h20
-rw-r--r--drivers/misc/mei/init.c43
-rw-r--r--drivers/misc/mei/interrupt.c159
-rw-r--r--drivers/misc/mei/main.c23
-rw-r--r--drivers/misc/mei/mei_dev.h54
-rw-r--r--drivers/misc/mei/nfc.c14
-rw-r--r--drivers/misc/mei/pci-me.c14
-rw-r--r--drivers/misc/mei/pci-txe.c293
-rw-r--r--drivers/misc/mei/wd.c136
-rw-r--r--drivers/misc/mic/Kconfig2
-rw-r--r--drivers/misc/mic/card/mic_device.h1
-rw-r--r--drivers/misc/mic/host/mic_device.h1
-rw-r--r--drivers/misc/mic/host/mic_intr.c2
-rw-r--r--drivers/misc/mic/host/mic_virtio.c3
-rw-r--r--drivers/misc/pch_phub.c5
-rw-r--r--drivers/misc/sgi-gru/grukdump.c11
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/misc/sram.c127
-rw-r--r--drivers/misc/ti-st/st_core.c1
-rw-r--r--drivers/misc/ti_dac7512.c1
-rw-r--r--drivers/misc/tsl2550.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c7
-rw-r--r--drivers/mmc/card/block.c7
-rw-r--r--drivers/mmc/card/queue.c2
-rw-r--r--drivers/mmc/core/core.c3
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/mmc.c8
-rw-r--r--drivers/mmc/core/quirks.c8
-rw-r--r--drivers/mmc/core/sd.c37
-rw-r--r--drivers/mmc/core/sdio_bus.c2
-rw-r--r--drivers/mmc/core/sdio_irq.c11
-rw-r--r--drivers/mmc/host/Kconfig30
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/atmel-mci.c20
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c95
-rw-r--r--drivers/mmc/host/dw_mmc.c56
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c60
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c122
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c224
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c321
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.h72
-rw-r--r--drivers/mmc/host/sdhci-pci.c191
-rw-r--r--drivers/mmc/host/sdhci-pci.h78
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c6
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c1
-rw-r--r--drivers/mmc/host/sdhci.c44
-rw-r--r--drivers/mmc/host/sh_mmcif.c100
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c23
-rw-r--r--drivers/mmc/host/tmio_mmc.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.h17
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c38
-rw-r--r--drivers/mtd/Kconfig7
-rw-r--r--drivers/mtd/afs.c3
-rw-r--r--drivers/mtd/ar7part.c3
-rw-r--r--drivers/mtd/bcm47xxpart.c27
-rw-r--r--drivers/mtd/bcm63xxpart.c3
-rw-r--r--drivers/mtd/cmdlinepart.c3
-rw-r--r--drivers/mtd/devices/docg3.c20
-rw-r--r--drivers/mtd/devices/m25p80.c226
-rw-r--r--drivers/mtd/devices/ms02-nv.c2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c5
-rw-r--r--drivers/mtd/devices/mtdram.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c32
-rw-r--r--drivers/mtd/maps/ixp4xx.c28
-rw-r--r--drivers/mtd/maps/lantiq-flash.c37
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/mtdcore.c10
-rw-r--r--drivers/mtd/mtdpart.c9
-rw-r--r--drivers/mtd/nand/Kconfig15
-rw-r--r--drivers/mtd/nand/atmel_nand.c8
-rw-r--r--drivers/mtd/nand/au1550nd.c6
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c1
-rw-r--r--drivers/mtd/nand/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/cmx270_nand.c1
-rw-r--r--drivers/mtd/nand/cs553x_nand.c1
-rw-r--r--drivers/mtd/nand/davinci_nand.c93
-rw-r--r--drivers/mtd/nand/denali.c53
-rw-r--r--drivers/mtd/nand/denali.h4
-rw-r--r--drivers/mtd/nand/denali_dt.c4
-rw-r--r--drivers/mtd/nand/denali_pci.c3
-rw-r--r--drivers/mtd/nand/diskonchip.c13
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c5
-rw-r--r--drivers/mtd/nand/fsmc_nand.c16
-rw-r--r--drivers/mtd/nand/gpio.c12
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c127
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c207
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h2
-rw-r--r--drivers/mtd/nand/jz4740_nand.c4
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c26
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c19
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c12
-rw-r--r--drivers/mtd/nand/mxc_nand.c25
-rw-r--r--drivers/mtd/nand/nand_base.c183
-rw-r--r--drivers/mtd/nand/nand_ids.c2
-rw-r--r--drivers/mtd/nand/nuc900_nand.c54
-rw-r--r--drivers/mtd/nand/omap2.c69
-rw-r--r--drivers/mtd/nand/orion_nand.c4
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c40
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c713
-rw-r--r--drivers/mtd/nand/s3c2410.c38
-rw-r--r--drivers/mtd/nand/sh_flctl.c57
-rw-r--r--drivers/mtd/nand/sharpsl.c6
-rw-r--r--drivers/mtd/nand/tmio_nand.c46
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c5
-rw-r--r--drivers/mtd/ofpart.c19
-rw-r--r--drivers/mtd/onenand/generic.c2
-rw-r--r--drivers/mtd/onenand/samsung.h2
-rw-r--r--drivers/mtd/redboot.c3
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c2
-rw-r--r--drivers/mtd/ubi/attach.c4
-rw-r--r--drivers/mtd/ubi/build.c4
-rw-r--r--drivers/mtd/ubi/fastmap.c8
-rw-r--r--drivers/mtd/ubi/io.c54
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/Space.c29
-rw-r--r--drivers/net/arcnet/com20020_cs.c1
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c922
-rw-r--r--drivers/net/bonding/bond_3ad.h3
-rw-r--r--drivers/net/bonding/bond_alb.c51
-rw-r--r--drivers/net/bonding/bond_alb.h3
-rw-r--r--drivers/net/bonding/bond_main.c714
-rw-r--r--drivers/net/bonding/bond_netlink.c464
-rw-r--r--drivers/net/bonding/bond_options.c1194
-rw-r--r--drivers/net/bonding/bond_options.h170
-rw-r--r--drivers/net/bonding/bond_procfs.c37
-rw-r--r--drivers/net/bonding/bond_sysfs.c896
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c144
-rw-r--r--drivers/net/bonding/bonding.h98
-rw-r--r--drivers/net/caif/caif_spi_slave.c1
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/at91_can.c1
-rw-r--r--drivers/net/can/bfin_can.c1
-rw-r--r--drivers/net/can/c_can/c_can.c22
-rw-r--r--drivers/net/can/dev.c18
-rw-r--r--drivers/net/can/flexcan.c179
-rw-r--r--drivers/net/can/janz-ican3.c21
-rw-r--r--drivers/net/can/mcp251x.c121
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c273
-rw-r--r--drivers/net/can/mscan/mscan.c3
-rw-r--r--drivers/net/can/mscan/mscan.h3
-rw-r--r--drivers/net/can/pch_can.c4
-rw-r--r--drivers/net/can/sja1000/ems_pci.c3
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c3
-rw-r--r--drivers/net/can/sja1000/plx_pci.c26
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c3
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/softing/softing_cs.c3
-rw-r--r--drivers/net/can/softing/softing_fw.c3
-rw-r--r--drivers/net/can/softing/softing_main.c4
-rw-r--r--drivers/net/can/ti_hecc.c11
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb2.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c3
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/can/vcan.c9
-rw-r--r--drivers/net/eql.c95
-rw-r--r--drivers/net/ethernet/3com/3c509.c3
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c8
-rw-r--r--drivers/net/ethernet/8390/8390.h7
-rw-r--r--drivers/net/ethernet/8390/apne.c61
-rw-r--r--drivers/net/ethernet/8390/ax88796.c23
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c120
-rw-r--r--drivers/net/ethernet/8390/etherh.c53
-rw-r--r--drivers/net/ethernet/8390/hydra.c11
-rw-r--r--drivers/net/ethernet/8390/lib8390.c77
-rw-r--r--drivers/net/ethernet/8390/mac8390.c19
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c9
-rw-r--r--drivers/net/ethernet/8390/ne.c96
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c54
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c63
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c48
-rw-r--r--drivers/net/ethernet/8390/stnic.c28
-rw-r--r--drivers/net/ethernet/8390/wd.c42
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c22
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c22
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c3
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c1
-rw-r--r--drivers/net/ethernet/amd/7990.c837
-rw-r--r--drivers/net/ethernet/amd/7990.h268
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c5
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h3
-rw-r--r--drivers/net/ethernet/amd/hplance.c96
-rw-r--r--drivers/net/ethernet/amd/mvme147.c36
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c1
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c1
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c20
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h3
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c101
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c58
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h71
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c65
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h52
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c31
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c34
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c46
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c262
-rw-r--r--drivers/net/ethernet/broadcom/b44.h15
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c364
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c114
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h128
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c84
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c66
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c223
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c277
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c187
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c39
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c112
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h4
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h16
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c255
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h17
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c625
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c40
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h24
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c58
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c251
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h57
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c591
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h26
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.c142
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/elmer0.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c139
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c4
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c24
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h1
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h11
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c178
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h33
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c73
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c248
-rw-r--r--drivers/net/ethernet/ethoc.c138
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c40
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c17
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c99
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c5
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c1
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c40
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h4
-rw-r--r--drivers/net/ethernet/icplus/ipg.h1
-rw-r--r--drivers/net/ethernet/intel/Kconfig49
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h127
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c237
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h136
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c666
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c469
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h107
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c316
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c400
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c432
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1754
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c77
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h53
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c662
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h170
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c195
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h64
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h152
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c876
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile33
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c927
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h106
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2153
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h55
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c254
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h238
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h165
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h72
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h4667
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h97
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c1575
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h296
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1152
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h364
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h321
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c390
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c2353
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c772
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c89
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c108
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c303
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h65
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c120
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c88
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c84
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h96
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1276
-rw-r--r--drivers/net/ethernet/korina.c1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c5
-rw-r--r--drivers/net/ethernet/marvell/Kconfig7
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c28
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c443
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c24
-rw-r--r--drivers/net/ethernet/marvell/sky2.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c152
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c30
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c12
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c41
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c37
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h8
-rw-r--r--drivers/net/ethernet/netx-eth.c3
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c6
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h3
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c17
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h197
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c426
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h45
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c28
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c49
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c35
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c144
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c127
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c186
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c447
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c266
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c227
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c133
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/rdc/r6040.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c263
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h56
-rw-r--r--drivers/net/ethernet/s6gmac.c1
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c549
-rw-r--r--drivers/net/ethernet/sfc/efx.c201
-rw-r--r--drivers/net/ethernet/sfc/efx.h16
-rw-r--r--drivers/net/ethernet/sfc/enum.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/falcon.c38
-rw-r--r--drivers/net/ethernet/sfc/farch.c48
-rw-r--r--drivers/net/ethernet/sfc/filter.h17
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c444
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h21
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c76
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h733
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c89
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h75
-rw-r--r--drivers/net/ethernet/sfc/nic.c12
-rw-r--r--drivers/net/ethernet/sfc/nic.h34
-rw-r--r--drivers/net/ethernet/sfc/ptp.c861
-rw-r--r--drivers/net/ethernet/sfc/rx.c24
-rw-r--r--drivers/net/ethernet/sfc/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/selftest.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c119
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1
-rw-r--r--drivers/net/ethernet/sgi/meth.c1
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h3
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h3
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c5
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c330
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c140
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c564
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c140
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.h4
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c166
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c11
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c53
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/tile/Kconfig12
-rw-r--r--drivers/net/ethernet/tile/tilegx.c40
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c18
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c16
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.h4
-rw-r--r--drivers/net/ethernet/via/via-rhine.c8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c1
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c3
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c36
-rw-r--r--drivers/net/fddi/defxx.c21
-rw-r--r--drivers/net/fddi/skfp/fplustm.c27
-rw-r--r--drivers/net/fddi/skfp/h/supern_2.h96
-rw-r--r--drivers/net/fddi/skfp/h/targetos.h1
-rw-r--r--drivers/net/fddi/skfp/skfddi.c1
-rw-r--r--drivers/net/fddi/skfp/smt.c2
-rw-r--r--drivers/net/fddi/skfp/srf.c24
-rw-r--r--drivers/net/hamradio/6pack.c3
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/mkiss.c3
-rw-r--r--drivers/net/hippi/rrunner.c6
-rw-r--r--drivers/net/hyperv/hyperv_net.h5
-rw-r--r--drivers/net/hyperv/netvsc.c10
-rw-r--r--drivers/net/hyperv/netvsc_drv.c56
-rw-r--r--drivers/net/hyperv/rndis_filter.c24
-rw-r--r--drivers/net/ieee802154/at86rf230.c13
-rw-r--r--drivers/net/ieee802154/mrf24j40.c1
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/irda/Kconfig11
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/au1k_ir.c4
-rw-r--r--drivers/net/irda/ep7211-sir.c70
-rw-r--r--drivers/net/irda/esi-sir.c4
-rw-r--r--drivers/net/irda/irda-usb.c1
-rw-r--r--drivers/net/irda/irtty-sir.c1
-rw-r--r--drivers/net/irda/kingsun-sir.c1
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/irda/litelink-sir.c4
-rw-r--r--drivers/net/irda/ma600-sir.c4
-rw-r--r--drivers/net/irda/mcs7780.c1
-rw-r--r--drivers/net/irda/old_belkin-sir.c4
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/irda/sir_dongle.c1
-rw-r--r--drivers/net/irda/smsc-ircc2.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.h4
-rw-r--r--drivers/net/irda/stir4200.c1
-rw-r--r--drivers/net/irda/via-ircc.c5
-rw-r--r--drivers/net/irda/via-ircc.h3
-rw-r--r--drivers/net/irda/vlsi_ir.c7
-rw-r--r--drivers/net/irda/vlsi_ir.h4
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/macvlan.c50
-rw-r--r--drivers/net/macvtap.c77
-rw-r--r--drivers/net/mdio.c28
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c36
-rw-r--r--drivers/net/phy/icplus.c2
-rw-r--r--drivers/net/phy/lxt.c4
-rw-r--r--drivers/net/phy/marvell.c22
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/mdio-moxart.c1
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c1
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c1
-rw-r--r--drivers/net/phy/mdio-octeon.c1
-rw-r--r--drivers/net/phy/mdio-sun4i.c4
-rw-r--r--drivers/net/phy/mdio_bus.c36
-rw-r--r--drivers/net/phy/micrel.c4
-rw-r--r--drivers/net/phy/phy.c440
-rw-r--r--drivers/net/phy/phy_device.c466
-rw-r--r--drivers/net/phy/spi_ks8995.c7
-rw-r--r--drivers/net/plip/plip.c4
-rw-r--r--drivers/net/ppp/ppp_mppe.c3
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/team/team_mode_random.c8
-rw-r--r--drivers/net/tun.c101
-rw-r--r--drivers/net/usb/Kconfig20
-rw-r--r--drivers/net/usb/Makefile3
-rw-r--r--drivers/net/usb/asix.h4
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/asix_devices.c6
-rw-r--r--drivers/net/usb/ax88172a.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c49
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_eem.c4
-rw-r--r--drivers/net/usb/cdc_ether.c12
-rw-r--r--drivers/net/usb/cdc_ncm.c49
-rw-r--r--drivers/net/usb/cdc_subset.c4
-rw-r--r--drivers/net/usb/cx82310_eth.c4
-rw-r--r--drivers/net/usb/dm9601.c1
-rw-r--r--drivers/net/usb/gl620a.c8
-rw-r--r--drivers/net/usb/hso.c32
-rw-r--r--drivers/net/usb/int51x1.c3
-rw-r--r--drivers/net/usb/ipheth.c1
-rw-r--r--drivers/net/usb/kalmia.c1
-rw-r--r--drivers/net/usb/kaweth.c4
-rw-r--r--drivers/net/usb/lg-vl600.c3
-rw-r--r--drivers/net/usb/mcs7830.c9
-rw-r--r--drivers/net/usb/net1080.c8
-rw-r--r--drivers/net/usb/plusb.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c11
-rw-r--r--drivers/net/usb/r8152.c918
-rw-r--r--drivers/net/usb/r815x.c256
-rw-r--r--drivers/net/usb/rndis_host.c8
-rw-r--r--drivers/net/usb/rtl8150.c1
-rw-r--r--drivers/net/usb/sierra_net.c3
-rw-r--r--drivers/net/usb/smsc75xx.c8
-rw-r--r--drivers/net/usb/smsc75xx.h3
-rw-r--r--drivers/net/usb/smsc95xx.c8
-rw-r--r--drivers/net/usb/smsc95xx.h3
-rw-r--r--drivers/net/usb/sr9700.c1
-rw-r--r--drivers/net/usb/sr9800.c874
-rw-r--r--drivers/net/usb/sr9800.h202
-rw-r--r--drivers/net/usb/usbnet.c61
-rw-r--r--drivers/net/usb/zaurus.c4
-rw-r--r--drivers/net/veth.c6
-rw-r--r--drivers/net/virtio_net.c263
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c24
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h1
-rw-r--r--drivers/net/vxlan.c437
-rw-r--r--drivers/net/wan/dlci.c5
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hd64570.c1
-rw-r--r--drivers/net/wan/hd64570.h4
-rw-r--r--drivers/net/wan/hd64572.c1
-rw-r--r--drivers/net/wan/hd64572.h2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c5
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wan/sbni.c1
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wireless/adm8211.c4
-rw-r--r--drivers/net/wireless/airo_cs.c1
-rw-r--r--drivers/net/wireless/at76c50x-usb.c3
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c53
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c43
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c160
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c40
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c677
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c791
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c408
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h157
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c34
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c11
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c11
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig18
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile14
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h222
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h126
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c385
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c297
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c106
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c422
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h128
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h401
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h392
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h579
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h1559
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h117
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h718
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h540
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h85
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h64
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h572
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h434
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c147
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c632
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h44
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c269
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c89
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h47
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c614
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h75
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c80
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c671
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c134
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c309
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h29
-rw-r--r--drivers/net/wireless/ath/ath9k/spectral.c543
-rw-r--r--drivers/net/wireless/ath/ath9k/spectral.h212
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c272
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c588
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c42
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c13
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c14
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c1
-rw-r--r--drivers/net/wireless/ath/main.c8
-rw-r--r--drivers/net/wireless/ath/regd.c379
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c65
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h1
-rw-r--r--drivers/net/wireless/atmel.c8
-rw-r--r--drivers/net/wireless/atmel.h4
-rw-r--r--drivers/net/wireless/atmel_cs.c5
-rw-r--r--drivers/net/wireless/atmel_pci.c5
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/main.c27
-rw-r--r--drivers/net/wireless/b43/xmit.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcdc.c375
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcdc.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c737
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c539
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h487
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h44
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c392
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c36
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c207
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h42
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c1592
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h54
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c19
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h61
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h304
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c216
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/nvram.c94
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/nvram.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c52
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/proto.c62
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/proto.h57
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c827
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h83
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c194
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c67
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h14
-rw-r--r--drivers/net/wireless/cw1200/cw1200_sdio.c4
-rw-r--r--drivers/net/wireless/cw1200/fwio.c1
-rw-r--r--drivers/net/wireless/cw1200/main.c2
-rw-r--r--drivers/net/wireless/cw1200/pm.c11
-rw-r--r--drivers/net/wireless/cw1200/scan.c15
-rw-r--r--drivers/net/wireless/cw1200/sta.c5
-rw-r--r--drivers/net/wireless/cw1200/txrx.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c30
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c5
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c42
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h1
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c12
-rw-r--r--drivers/net/wireless/iwlegacy/3945-debug.c6
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c11
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c5
-rw-r--r--drivers/net/wireless/iwlegacy/4965-debug.c6
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c8
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c1
-rw-r--r--drivers/net/wireless/iwlegacy/common.c13
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c40
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h50
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h87
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/binding.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c28
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c57
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c546
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c689
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.h101
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h69
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c32
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c211
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c366
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h65
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c51
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c28
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c400
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power_legacy.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c2192
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h154
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c77
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c291
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c52
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/testmode.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c114
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c23
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c17
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h65
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c437
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c183
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c172
-rw-r--r--drivers/net/wireless/libertas/README5
-rw-r--r--drivers/net/wireless/libertas/cfg.c9
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c2
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c6
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1213
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h18
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n.c5
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c6
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c70
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c4
-rw-r--r--drivers/net/wireless/mwifiex/decl.h1
-rw-r--r--drivers/net/wireless/mwifiex/fw.h41
-rw-r--r--drivers/net/wireless/mwifiex/init.c3
-rw-r--r--drivers/net/wireless/mwifiex/main.c13
-rw-r--r--drivers/net/wireless/mwifiex/main.h7
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c34
-rw-r--r--drivers/net/wireless/mwifiex/scan.c16
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c80
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c38
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c20
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c46
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c1
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c15
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c39
-rw-r--r--drivers/net/wireless/mwifiex/usb.c68
-rw-r--r--drivers/net/wireless/mwifiex/usb.h12
-rw-r--r--drivers/net/wireless/mwifiex/util.c5
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/mwl8k.c5
-rw-r--r--drivers/net/wireless/orinoco/hermes.c1
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c1
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c1
-rw-r--r--drivers/net/wireless/p54/eeprom.c1
-rw-r--r--drivers/net/wireless/p54/fwio.c1
-rw-r--r--drivers/net/wireless/p54/led.c1
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/net2280.h3
-rw-r--r--drivers/net/wireless/p54/p54pci.c1
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c5
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.c3
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.h3
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c6
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h3
-rw-r--r--drivers/net/wireless/prism54/isl_oid.h3
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c4
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h3
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.h3
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c5
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.h3
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c3
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.h3
-rw-r--r--drivers/net/wireless/prism54/prismcompat.h3
-rw-r--r--drivers/net/wireless/ray_cs.c3
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c40
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800soc.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c24
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c3
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.c1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c8
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c11
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c2
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c6
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c61
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c327
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c39
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c29
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.c40
-rw-r--r--drivers/net/wireless/rtlwifi/stats.c14
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c8
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h33
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c258
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h26
-rw-r--r--drivers/net/wireless/ti/wl1251/boot.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c58
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h8
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c46
-rw-r--r--drivers/net/wireless/ti/wl1251/event.h7
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c13
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c153
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h6
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c14
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c12
-rw-r--r--drivers/net/wireless/wl3501_cs.c5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c8
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al2230.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al7230b.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_rf2959.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_uw2453.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h3
-rw-r--r--drivers/net/xen-netback/common.h26
-rw-r--r--drivers/net/xen-netback/interface.c45
-rw-r--r--drivers/net/xen-netback/netback.c537
-rw-r--r--drivers/net/xen-netback/xenbus.c3
-rw-r--r--drivers/net/xen-netfront.c192
-rw-r--r--drivers/nfc/Kconfig1
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/mei_phy.c6
-rw-r--r--drivers/nfc/microread/i2c.c4
-rw-r--r--drivers/nfc/microread/mei.c4
-rw-r--r--drivers/nfc/microread/microread.c4
-rw-r--r--drivers/nfc/microread/microread.h4
-rw-r--r--drivers/nfc/nfcmrvl/Kconfig23
-rw-r--r--drivers/nfc/nfcmrvl/Makefile9
-rw-r--r--drivers/nfc/nfcmrvl/main.c165
-rw-r--r--drivers/nfc/nfcmrvl/nfcmrvl.h48
-rw-r--r--drivers/nfc/nfcmrvl/usb.c459
-rw-r--r--drivers/nfc/nfcwilink.c3
-rw-r--r--drivers/nfc/pn533.c7
-rw-r--r--drivers/nfc/pn544/i2c.c4
-rw-r--r--drivers/nfc/pn544/mei.c4
-rw-r--r--drivers/nfc/pn544/pn544.c50
-rw-r--r--drivers/nfc/pn544/pn544.h4
-rw-r--r--drivers/nfc/port100.c1
-rw-r--r--drivers/of/Kconfig18
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/address.c13
-rw-r--r--drivers/of/base.c571
-rw-r--r--drivers/of/device.c3
-rw-r--r--drivers/of/fdt.c145
-rw-r--r--drivers/of/irq.c6
-rw-r--r--drivers/of/of_mdio.c161
-rw-r--r--drivers/of/of_net.c12
-rw-r--r--drivers/of/of_reserved_mem.c217
-rw-r--r--drivers/of/pdt.c3
-rw-r--r--drivers/of/selftest.c129
-rw-r--r--drivers/of/testcase-data/testcases.dtsi3
-rw-r--r--drivers/of/testcase-data/tests-interrupts.dtsi58
-rw-r--r--drivers/of/testcase-data/tests-match.dtsi19
-rw-r--r--drivers/of/testcase-data/tests-phandle.dtsi42
-rw-r--r--drivers/parport/share.c3
-rw-r--r--drivers/pci/Makefile23
-rw-r--r--drivers/pci/bus.c8
-rw-r--r--drivers/pci/host-bridge.c8
-rw-r--r--drivers/pci/host/Kconfig2
-rw-r--r--drivers/pci/host/pci-imx6.c47
-rw-r--r--drivers/pci/host/pci-mvebu.c37
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c180
-rw-r--r--drivers/pci/host/pci-tegra.c53
-rw-r--r--drivers/pci/host/pcie-designware.c6
-rw-r--r--drivers/pci/hotplug/acpiphp.h16
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c533
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c3
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c4
-rw-r--r--drivers/pci/hotplug/pciehp.h7
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c8
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c173
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c75
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c2
-rw-r--r--drivers/pci/ioapic.c1
-rw-r--r--drivers/pci/iov.c119
-rw-r--r--drivers/pci/msi.c10
-rw-r--r--drivers/pci/pci-acpi.c17
-rw-r--r--drivers/pci/pci-driver.c33
-rw-r--r--drivers/pci/pci-label.c130
-rw-r--r--drivers/pci/pci-sysfs.c17
-rw-r--r--drivers/pci/pci.c131
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/probe.c114
-rw-r--r--drivers/pci/quirks.c190
-rw-r--r--drivers/pci/remove.c17
-rw-r--r--drivers/pci/rom.c2
-rw-r--r--drivers/pci/search.c10
-rw-r--r--drivers/pci/setup-res.c37
-rw-r--r--drivers/pci/xen-pcifront.c4
-rw-r--r--drivers/pcmcia/sa11xx_base.c3
-rw-r--r--drivers/pcmcia/yenta_socket.c18
-rw-r--r--drivers/phy/Kconfig109
-rw-r--r--drivers/phy/Makefile9
-rw-r--r--drivers/phy/phy-bcm-kona-usb2.c4
-rw-r--r--drivers/phy/phy-core.c152
-rw-r--r--drivers/phy/phy-exynos-dp-video.c8
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c10
-rw-r--r--drivers/phy/phy-exynos4210-usb2.c261
-rw-r--r--drivers/phy/phy-exynos4x12-usb2.c328
-rw-r--r--drivers/phy/phy-exynos5250-sata.c251
-rw-r--r--drivers/phy/phy-exynos5250-usb2.c404
-rw-r--r--drivers/phy/phy-mvebu-sata.c10
-rw-r--r--drivers/phy/phy-omap-control.c (renamed from drivers/usb/phy/phy-omap-control.c)169
-rw-r--r--drivers/phy/phy-omap-usb2.c139
-rw-r--r--drivers/phy/phy-samsung-usb2.c228
-rw-r--r--drivers/phy/phy-samsung-usb2.h67
-rw-r--r--drivers/phy/phy-sun4i-usb.c331
-rw-r--r--drivers/phy/phy-ti-pipe3.c470
-rw-r--r--drivers/phy/phy-twl4030-usb.c16
-rw-r--r--drivers/phy/phy-xgene.c1750
-rw-r--r--drivers/pinctrl/Kconfig4
-rw-r--r--drivers/pinctrl/core.c8
-rw-r--r--drivers/pinctrl/devicetree.c4
-rw-r--r--drivers/pinctrl/mvebu/Kconfig9
-rw-r--r--drivers/pinctrl/mvebu/Makefile2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c20
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-375.c459
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-38x.c462
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c24
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c404
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c25
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c122
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.h55
-rw-r--r--drivers/pinctrl/pinctrl-adi2-bf54x.c138
-rw-r--r--drivers/pinctrl/pinctrl-adi2-bf60x.c128
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c31
-rw-r--r--drivers/pinctrl/pinctrl-adi2.h8
-rw-r--r--drivers/pinctrl/pinctrl-at91.c49
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c42
-rw-r--r--drivers/pinctrl/pinctrl-capri.c2
-rw-r--r--drivers/pinctrl/pinctrl-exynos.c82
-rw-r--r--drivers/pinctrl/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx1-core.c10
-rw-r--r--drivers/pinctrl/pinctrl-msm.c113
-rw-r--r--drivers/pinctrl/pinctrl-msm.h5
-rw-r--r--drivers/pinctrl/pinctrl-msm8x74.c14
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c313
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.h14
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c2
-rw-r--r--drivers/pinctrl/pinctrl-samsung.h1
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/pinctrl-st.c462
-rw-r--r--drivers/pinctrl/pinctrl-sunxi-pins.h12
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c6
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.h6
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c40
-rw-r--r--drivers/pinctrl/pinctrl-tegra.h4
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c1100
-rw-r--r--drivers/pinctrl/pinctrl-tegra124.c1243
-rw-r--r--drivers/pinctrl/pinctrl-tegra20.c640
-rw-r--r--drivers/pinctrl/pinctrl-tegra30.c1287
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c171
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c602
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c46
-rw-r--r--drivers/pinctrl/sirf/pinctrl-prima2.c5
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c7
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c15
-rw-r--r--drivers/platform/chrome/Kconfig14
-rw-r--r--drivers/platform/chrome/Makefile1
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c298
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c101
-rw-r--r--drivers/platform/x86/Kconfig21
-rw-r--r--drivers/platform/x86/Makefile2
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/asus-laptop.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c51
-rw-r--r--drivers/platform/x86/classmate-laptop.c3
-rw-r--r--drivers/platform/x86/compal-laptop.c121
-rw-r--r--drivers/platform/x86/dell-laptop.c78
-rw-r--r--drivers/platform/x86/dell-wmi-aio.c1
-rw-r--r--drivers/platform/x86/dell-wmi.c1
-rw-r--r--drivers/platform/x86/eeepc-laptop.c55
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c16
-rw-r--r--drivers/platform/x86/hp-wireless.c132
-rw-r--r--drivers/platform/x86/hp_accel.c9
-rw-r--r--drivers/platform/x86/ideapad-laptop.c3
-rw-r--r--drivers/platform/x86/intel-rst.c2
-rw-r--r--drivers/platform/x86/intel-smartconnect.c2
-rw-r--r--drivers/platform/x86/intel_baytrail.c224
-rw-r--r--drivers/platform/x86/intel_baytrail.h90
-rw-r--r--drivers/platform/x86/intel_menlow.c4
-rw-r--r--drivers/platform/x86/intel_oaktrail.c3
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c87
-rw-r--r--drivers/platform/x86/mxm-wmi.c4
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/pvpanic.c3
-rw-r--r--drivers/platform/x86/samsung-q10.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c6
-rw-r--r--drivers/platform/x86/tc1100-wmi.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c20
-rw-r--r--drivers/platform/x86/toshiba_acpi.c5
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c4
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/platform/x86/xo15-ebook.c3
-rw-r--r--drivers/pnp/card.c1
-rw-r--r--drivers/pnp/pnpacpi/core.c33
-rw-r--r--drivers/pnp/pnpacpi/pnpacpi.h1
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c15
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c9
-rw-r--r--drivers/pnp/pnpbios/core.c12
-rw-r--r--drivers/pnp/resource.c4
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/ds2782_battery.c2
-rw-r--r--drivers/power/isp1704_charger.c2
-rw-r--r--drivers/power/max17040_battery.c5
-rw-r--r--drivers/powercap/intel_rapl.c17
-rw-r--r--drivers/ps3/ps3-vuart.c4
-rw-r--r--drivers/ptp/Kconfig1
-rw-r--r--drivers/pwm/Kconfig14
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c6
-rw-r--r--drivers/pwm/pwm-atmel.c395
-rw-r--r--drivers/pwm/pwm-ep93xx.c4
-rw-r--r--drivers/pwm/pwm-jz4740.c20
-rw-r--r--drivers/pwm/pwm-lp3943.c4
-rw-r--r--drivers/pwm/pwm-pxa.c55
-rw-r--r--drivers/pwm/pwm-tiecap.c1
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c4
-rw-r--r--drivers/pwm/sysfs.c12
-rw-r--r--drivers/rapidio/devices/tsi721.h1
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c27
-rw-r--r--drivers/regulator/88pm800.c4
-rw-r--r--drivers/regulator/88pm8607.c8
-rw-r--r--drivers/regulator/Kconfig60
-rw-r--r--drivers/regulator/Makefile6
-rw-r--r--drivers/regulator/aat2870-regulator.c1
-rw-r--r--drivers/regulator/ab3100.c4
-rw-r--r--drivers/regulator/ab8500.c126
-rw-r--r--drivers/regulator/act8865-regulator.c344
-rw-r--r--drivers/regulator/anatop-regulator.c160
-rw-r--r--drivers/regulator/arizona-ldo1.c11
-rw-r--r--drivers/regulator/arizona-micsupp.c56
-rw-r--r--drivers/regulator/as3711-regulator.c15
-rw-r--r--drivers/regulator/as3722-regulator.c35
-rw-r--r--drivers/regulator/bcm590xx-regulator.c403
-rw-r--r--drivers/regulator/core.c90
-rw-r--r--drivers/regulator/da9052-regulator.c29
-rw-r--r--drivers/regulator/da9055-regulator.c73
-rw-r--r--drivers/regulator/da9063-regulator.c17
-rw-r--r--drivers/regulator/da9210-regulator.c5
-rw-r--r--drivers/regulator/db8500-prcmu.c22
-rw-r--r--drivers/regulator/dbx500-prcmu.c16
-rw-r--r--drivers/regulator/dummy.c6
-rw-r--r--drivers/regulator/fan53555.c13
-rw-r--r--drivers/regulator/fixed.c46
-rw-r--r--drivers/regulator/gpio-regulator.c51
-rw-r--r--drivers/regulator/helpers.c48
-rw-r--r--drivers/regulator/lp3971.c45
-rw-r--r--drivers/regulator/lp3972.c41
-rw-r--r--drivers/regulator/lp872x.c4
-rw-r--r--drivers/regulator/max14577.c269
-rw-r--r--drivers/regulator/max1586.c15
-rw-r--r--drivers/regulator/max77686.c15
-rw-r--r--drivers/regulator/max77693.c45
-rw-r--r--drivers/regulator/max8649.c8
-rw-r--r--drivers/regulator/max8660.c35
-rw-r--r--drivers/regulator/max8907-regulator.c16
-rw-r--r--drivers/regulator/max8925-regulator.c8
-rw-r--r--drivers/regulator/max8952.c26
-rw-r--r--drivers/regulator/max8973-regulator.c6
-rw-r--r--drivers/regulator/max8997.c22
-rw-r--r--drivers/regulator/max8998.c27
-rw-r--r--drivers/regulator/mc13892-regulator.c24
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c12
-rw-r--r--drivers/regulator/pcf50633-regulator.c2
-rw-r--r--drivers/regulator/pfuze100-regulator.c229
-rw-r--r--drivers/regulator/rc5t583-regulator.c13
-rw-r--r--drivers/regulator/s2mpa01.c481
-rw-r--r--drivers/regulator/s2mps11.c367
-rw-r--r--drivers/regulator/s5m8767.c256
-rw-r--r--drivers/regulator/st-pwm.c190
-rw-r--r--drivers/regulator/stw481x-vmmc.c12
-rw-r--r--drivers/regulator/ti-abb-regulator.c140
-rw-r--r--drivers/regulator/tps51632-regulator.c38
-rw-r--r--drivers/regulator/tps62360-regulator.c11
-rw-r--r--drivers/regulator/tps6507x-regulator.c29
-rw-r--r--drivers/regulator/tps65090-regulator.c13
-rw-r--r--drivers/regulator/tps65217-regulator.c17
-rw-r--r--drivers/regulator/tps65218-regulator.c285
-rw-r--r--drivers/regulator/tps6524x-regulator.c5
-rw-r--r--drivers/regulator/tps6586x-regulator.c20
-rw-r--r--drivers/regulator/tps65910-regulator.c61
-rw-r--r--drivers/regulator/tps80031-regulator.c6
-rw-r--r--drivers/regulator/twl-regulator.c4
-rw-r--r--drivers/regulator/wm831x-dcdc.c26
-rw-r--r--drivers/regulator/wm831x-isink.c4
-rw-r--r--drivers/regulator/wm831x-ldo.c12
-rw-r--r--drivers/regulator/wm8350-regulator.c4
-rw-r--r--drivers/regulator/wm8994-regulator.c4
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-sunxi.c175
-rw-r--r--drivers/rtc/Kconfig31
-rw-r--r--drivers/rtc/Makefile3
-rw-r--r--drivers/rtc/class.c24
-rw-r--r--drivers/rtc/rtc-as3722.c19
-rw-r--r--drivers/rtc/rtc-cmos.c8
-rw-r--r--drivers/rtc/rtc-ds1305.c1
-rw-r--r--drivers/rtc/rtc-ds1742.c10
-rw-r--r--drivers/rtc/rtc-hym8563.c606
-rw-r--r--drivers/rtc/rtc-isl12057.c310
-rw-r--r--drivers/rtc/rtc-max8907.c11
-rw-r--r--drivers/rtc/rtc-mxc.c10
-rw-r--r--drivers/rtc/rtc-pcf2127.c5
-rw-r--r--drivers/rtc/rtc-rx8581.c81
-rw-r--r--drivers/rtc/rtc-s3c.c17
-rw-r--r--drivers/rtc/rtc-s5m.c2
-rw-r--r--drivers/rtc/rtc-sunxi.c523
-rw-r--r--drivers/rtc/rtc-twl.c38
-rw-r--r--drivers/rtc/rtc-vr41xx.c50
-rw-r--r--drivers/s390/block/dasd_diag.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c48
-rw-r--r--drivers/s390/block/dasd_fba.c26
-rw-r--r--drivers/s390/block/dcssblk.c35
-rw-r--r--drivers/s390/block/scm_blk.c8
-rw-r--r--drivers/s390/block/scm_blk_cluster.c4
-rw-r--r--drivers/s390/block/xpram.c24
-rw-r--r--drivers/s390/char/con3215.c8
-rw-r--r--drivers/s390/char/con3270.c13
-rw-r--r--drivers/s390/char/raw3270.c26
-rw-r--r--drivers/s390/char/raw3270.h3
-rw-r--r--drivers/s390/char/sclp_cmd.c5
-rw-r--r--drivers/s390/char/sclp_early.c31
-rw-r--r--drivers/s390/char/vmur.c4
-rw-r--r--drivers/s390/cio/airq.c66
-rw-r--r--drivers/s390/cio/ccwgroup.c26
-rw-r--r--drivers/s390/cio/chsc.c1
-rw-r--r--drivers/s390/cio/chsc_sch.c3
-rw-r--r--drivers/s390/cio/cio.c52
-rw-r--r--drivers/s390/cio/device.c52
-rw-r--r--drivers/s390/cio/qdio.h14
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c24
-rw-r--r--drivers/s390/kvm/virtio_ccw.c334
-rw-r--r--drivers/s390/net/Makefile2
-rw-r--r--drivers/s390/net/netiucv.c8
-rw-r--r--drivers/s390/net/qeth_core.h37
-rw-r--r--drivers/s390/net/qeth_core_main.c218
-rw-r--r--drivers/s390/net/qeth_core_mpc.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.h150
-rw-r--r--drivers/s390/net/qeth_l2.h15
-rw-r--r--drivers/s390/net/qeth_l2_main.c626
-rw-r--r--drivers/s390/net/qeth_l2_sys.c223
-rw-r--r--drivers/s390/net/qeth_l3_main.c11
-rw-r--r--drivers/sbus/char/bbc_i2c.c1
-rw-r--r--drivers/sbus/char/display7seg.c1
-rw-r--r--drivers/sbus/char/envctrl.c1
-rw-r--r--drivers/sbus/char/flash.c1
-rw-r--r--drivers/sbus/char/jsflash.c1
-rw-r--r--drivers/sbus/char/uctrl.c1
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/rx.c9
-rw-r--r--drivers/scsi/aacraid/sa.c3
-rw-r--r--drivers/scsi/aacraid/src.c4
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c7
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/atari_scsi.c12
-rw-r--r--drivers/scsi/be2iscsi/be.h11
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c121
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h10
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c13
-rw-r--r--drivers/scsi/be2iscsi/be_main.c313
-rw-r--r--drivers/scsi/be2iscsi/be_main.h23
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c22
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c11
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c4
-rw-r--r--drivers/scsi/bfa/bfad_im.c7
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c35
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c39
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c98
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c31
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c59
-rw-r--r--drivers/scsi/dtc.c2
-rw-r--r--drivers/scsi/eata.c2
-rw-r--r--drivers/scsi/eata_pio.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_log.c8
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/gdth.c6
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/hpsa.c2618
-rw-r--r--drivers/scsi/hpsa.h171
-rw-r--r--drivers/scsi/hpsa_cmd.h270
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/in2000.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c382
-rw-r--r--drivers/scsi/ipr.h22
-rw-r--r--drivers/scsi/isci/host.h5
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/isci/port_config.c7
-rw-r--r--drivers/scsi/isci/request.c8
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c1
-rw-r--r--drivers/scsi/iscsi_tcp.c25
-rw-r--r--drivers/scsi/libiscsi.c232
-rw-r--r--drivers/scsi/libiscsi_tcp.c71
-rw-r--r--drivers/scsi/libsas/sas_ata.c35
-rw-r--r--drivers/scsi/libsas/sas_expander.c8
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c628
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h24
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c108
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c200
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c277
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c552
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c267
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c120
-rw-r--r--drivers/scsi/megaraid.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h114
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c815
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c11
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c272
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h3
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c41
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c39
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/pas16.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c38
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c105
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c12
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h12
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c97
-rw-r--r--drivers/scsi/qla1280.c7
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c195
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c134
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h80
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c426
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c139
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c252
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h57
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c171
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c65
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c209
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c909
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h205
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c328
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h11
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c38
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h17
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h31
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c7
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c69
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c25
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c89
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c232
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c373
-rw-r--r--drivers/scsi/scsi_debug.c141
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_lib.c114
-rw-r--r--drivers/scsi/scsi_scan.c115
-rw-r--r--drivers/scsi/scsi_sysfs.c257
-rw-r--r--drivers/scsi/scsi_tgt_lib.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c6
-rw-r--r--drivers/scsi/scsi_transport_srp.c95
-rw-r--r--drivers/scsi/sd.c44
-rw-r--r--drivers/scsi/sd.h6
-rw-r--r--drivers/scsi/sd_dif.c30
-rw-r--r--drivers/scsi/ses.c38
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/scsi/t128.c2
-rw-r--r--drivers/scsi/u14-34f.c2
-rw-r--r--drivers/scsi/virtio_scsi.c15
-rw-r--r--drivers/scsi/vmw_pvscsi.c242
-rw-r--r--drivers/scsi/vmw_pvscsi.h19
-rw-r--r--drivers/scsi/wd7000.c2
-rw-r--r--drivers/sfi/sfi_acpi.c4
-rw-r--r--drivers/spi/Kconfig74
-rw-r--r--drivers/spi/Makefile6
-rw-r--r--drivers/spi/spi-altera.c9
-rw-r--r--drivers/spi/spi-ath79.c19
-rw-r--r--drivers/spi/spi-atmel.c811
-rw-r--r--drivers/spi/spi-au1550.c39
-rw-r--r--drivers/spi/spi-bcm2835.c11
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c475
-rw-r--r--drivers/spi/spi-bcm63xx.c54
-rw-r--r--drivers/spi/spi-bfin-sport.c1
-rw-r--r--drivers/spi/spi-bfin-v3.c3
-rw-r--r--drivers/spi/spi-bfin5xx.c8
-rw-r--r--drivers/spi/spi-bitbang-txrx.h2
-rw-r--r--drivers/spi/spi-bitbang.c5
-rw-r--r--drivers/spi/spi-butterfly.c3
-rw-r--r--drivers/spi/spi-clps711x.c248
-rw-r--r--drivers/spi/spi-coldfire-qspi.c171
-rw-r--r--drivers/spi/spi-davinci.c61
-rw-r--r--drivers/spi/spi-dw-mmio.c76
-rw-r--r--drivers/spi/spi-dw-pci.c47
-rw-r--r--drivers/spi/spi-dw.c43
-rw-r--r--drivers/spi/spi-dw.h5
-rw-r--r--drivers/spi/spi-efm32.c46
-rw-r--r--drivers/spi/spi-ep93xx.c21
-rw-r--r--drivers/spi/spi-falcon.c17
-rw-r--r--drivers/spi/spi-fsl-dspi.c105
-rw-r--r--drivers/spi/spi-fsl-espi.c68
-rw-r--r--drivers/spi/spi-fsl-lib.c14
-rw-r--r--drivers/spi/spi-fsl-spi.c30
-rw-r--r--drivers/spi/spi-gpio.c16
-rw-r--r--drivers/spi/spi-imx.c38
-rw-r--r--drivers/spi/spi-mpc512x-psc.c61
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c1
-rw-r--r--drivers/spi/spi-mpc52xx.c17
-rw-r--r--drivers/spi/spi-mxs.c16
-rw-r--r--drivers/spi/spi-nuc900.c84
-rw-r--r--drivers/spi/spi-oc-tiny.c65
-rw-r--r--drivers/spi/spi-octeon.c80
-rw-r--r--drivers/spi/spi-omap-100k.c72
-rw-r--r--drivers/spi/spi-omap-uwire.c34
-rw-r--r--drivers/spi/spi-omap2-mcspi.c103
-rw-r--r--drivers/spi/spi-orion.c84
-rw-r--r--drivers/spi/spi-pl022.c80
-rw-r--r--drivers/spi/spi-ppc4xx.c1
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c1
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c2
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c1
-rw-r--r--drivers/spi/spi-pxa2xx.c7
-rw-r--r--drivers/spi/spi-qup.c779
-rw-r--r--drivers/spi/spi-rspi.c1129
-rw-r--r--drivers/spi/spi-s3c24xx.c93
-rw-r--r--drivers/spi/spi-s3c64xx.c429
-rw-r--r--drivers/spi/spi-sc18is602.c51
-rw-r--r--drivers/spi/spi-sh-hspi.c47
-rw-r--r--drivers/spi/spi-sh-msiof.c415
-rw-r--r--drivers/spi/spi-sh-sci.c8
-rw-r--r--drivers/spi/spi-sh.c13
-rw-r--r--drivers/spi/spi-sirf.c123
-rw-r--r--drivers/spi/spi-sun4i.c478
-rw-r--r--drivers/spi/spi-sun6i.c484
-rw-r--r--drivers/spi/spi-tegra114.c193
-rw-r--r--drivers/spi/spi-tegra20-sflash.c66
-rw-r--r--drivers/spi/spi-tegra20-slink.c183
-rw-r--r--drivers/spi/spi-ti-qspi.c132
-rw-r--r--drivers/spi/spi-ti-ssp.c378
-rw-r--r--drivers/spi/spi-topcliff-pch.c70
-rw-r--r--drivers/spi/spi-txx9.c33
-rw-r--r--drivers/spi/spi-xcomm.c25
-rw-r--r--drivers/spi/spi-xilinx.c27
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c170
-rw-r--r--drivers/spi/spi.c327
-rw-r--r--drivers/spi/spidev.c23
-rw-r--r--drivers/spmi/Kconfig27
-rw-r--r--drivers/spmi/Makefile6
-rw-r--r--drivers/spmi/spmi-pmic-arb.c778
-rw-r--r--drivers/spmi/spmi.c574
-rw-r--r--drivers/ssb/Kconfig1
-rw-r--r--drivers/ssb/driver_chipcommon_sflash.c6
-rw-r--r--drivers/ssb/driver_gpio.c306
-rw-r--r--drivers/ssb/main.c12
-rw-r--r--drivers/staging/Kconfig18
-rw-r--r--drivers/staging/Makefile9
-rw-r--r--drivers/staging/android/Kconfig15
-rw-r--r--drivers/staging/android/android_alarm.h44
-rw-r--r--drivers/staging/android/ashmem.c45
-rw-r--r--drivers/staging/android/ashmem.h30
-rw-r--r--drivers/staging/android/binder.c271
-rw-r--r--drivers/staging/android/binder.h308
-rw-r--r--drivers/staging/android/binder_trace.h14
-rw-r--r--drivers/staging/android/ion/compat_ion.c26
-rw-r--r--drivers/staging/android/ion/ion.c156
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c2
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c24
-rw-r--r--drivers/staging/android/ion/ion_heap.c69
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c8
-rw-r--r--drivers/staging/android/ion/ion_priv.h90
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c90
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c10
-rw-r--r--drivers/staging/android/lowmemorykiller.c5
-rw-r--r--drivers/staging/android/sw_sync.h37
-rw-r--r--drivers/staging/android/sync.c14
-rw-r--r--drivers/staging/android/sync.h88
-rw-r--r--drivers/staging/android/timed_gpio.c8
-rw-r--r--drivers/staging/android/timed_output.h4
-rw-r--r--drivers/staging/android/uapi/android_alarm.h62
-rw-r--r--drivers/staging/android/uapi/ashmem.h47
-rw-r--r--drivers/staging/android/uapi/binder.h351
-rw-r--r--drivers/staging/android/uapi/sw_sync.h32
-rw-r--r--drivers/staging/android/uapi/sync.h97
-rw-r--r--drivers/staging/bcm/Adapter.h6
-rw-r--r--drivers/staging/bcm/Bcmchar.c3439
-rw-r--r--drivers/staging/bcm/Bcmnet.c2
-rw-r--r--drivers/staging/bcm/CmHost.c61
-rw-r--r--drivers/staging/bcm/DDRInit.c32
-rw-r--r--drivers/staging/bcm/InterfaceInit.c416
-rw-r--r--drivers/staging/bcm/InterfaceIsr.c223
-rw-r--r--drivers/staging/bcm/Kconfig1
-rw-r--r--drivers/staging/bcm/Qos.c35
-rw-r--r--drivers/staging/bcm/Typedefs.h22
-rw-r--r--drivers/staging/bcm/headers.h2
-rw-r--r--drivers/staging/bcm/hostmibs.c8
-rw-r--r--drivers/staging/bcm/nvm.c30
-rw-r--r--drivers/staging/ced1401/ced_ioc.c163
-rw-r--r--drivers/staging/ced1401/ced_ioctl.h31
-rw-r--r--drivers/staging/ced1401/usb1401.c195
-rw-r--r--drivers/staging/ced1401/usb1401.h6
-rw-r--r--drivers/staging/ced1401/use14_ioc.h5
-rw-r--r--drivers/staging/ced1401/userspace/use1401.c10
-rw-r--r--drivers/staging/comedi/Kconfig24
-rw-r--r--drivers/staging/comedi/comedi_fops.c191
-rw-r--r--drivers/staging/comedi/comedidev.h56
-rw-r--r--drivers/staging/comedi/drivers.c32
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c37
-rw-r--r--drivers/staging/comedi/drivers/Makefile2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c597
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c1187
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c857
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c106
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c73
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c24
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c14
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c24
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c16
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c26
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c36
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c6
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c57
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c22
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c3
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c73
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c92
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c31
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c84
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c10
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c31
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c35
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c29
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c42
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c2
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c491
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c150
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c51
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c59
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c41
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c4
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c5
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.c99
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.h84
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c7
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c2
-rw-r--r--drivers/staging/comedi/drivers/dac02.c172
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c85
-rw-r--r--drivers/staging/comedi/drivers/das08.c35
-rw-r--r--drivers/staging/comedi/drivers/das16.c19
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c38
-rw-r--r--drivers/staging/comedi/drivers/das1800.c10
-rw-r--r--drivers/staging/comedi/drivers/das6402.c672
-rw-r--r--drivers/staging/comedi/drivers/das800.c43
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c83
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c23
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c29
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c39
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c73
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c8
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c40
-rw-r--r--drivers/staging/comedi/drivers/fl512.c207
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c1099
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c109
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c20
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c891
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.h6
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c209
-rw-r--r--drivers/staging/comedi/drivers/me4000.c3
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c36
-rw-r--r--drivers/staging/comedi/drivers/mf6x4.c23
-rw-r--r--drivers/staging/comedi/drivers/mite.c11
-rw-r--r--drivers/staging/comedi/drivers/mite.h6
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c30
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c38
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c55
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c61
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c56
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c33
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c78
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c16
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h8
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c26
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c1484
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c1045
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c1523
-rw-r--r--drivers/staging/comedi/drivers/pcm3724.c6
-rw-r--r--drivers/staging/comedi/drivers/pcmad.c21
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c42
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h8
-rw-r--r--drivers/staging/comedi/drivers/poc.c157
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c5
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c86
-rw-r--r--drivers/staging/comedi/drivers/rti800.c29
-rw-r--r--drivers/staging/comedi/drivers/rti802.c127
-rw-r--r--drivers/staging/comedi/drivers/s526.c34
-rw-r--r--drivers/staging/comedi/drivers/s626.c174
-rw-r--r--drivers/staging/comedi/drivers/skel.c43
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c3
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c51
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c6
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c3
-rw-r--r--drivers/staging/comedi/proc.c48
-rw-r--r--drivers/staging/comedi/range.c37
-rw-r--r--drivers/staging/crystalhd/bcm_70012_regs.h3
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c8
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c21
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h4
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c2
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h2
-rw-r--r--drivers/staging/cxt1e1/Makefile1
-rw-r--r--drivers/staging/cxt1e1/comet.c44
-rw-r--r--drivers/staging/cxt1e1/comet_tables.c1
-rw-r--r--drivers/staging/cxt1e1/functions.c373
-rw-r--r--drivers/staging/cxt1e1/hwprobe.c598
-rw-r--r--drivers/staging/cxt1e1/libsbew.h56
-rw-r--r--drivers/staging/cxt1e1/linux.c1650
-rw-r--r--drivers/staging/cxt1e1/musycc.c43
-rw-r--r--drivers/staging/cxt1e1/ossiRelease.c29
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.c6
-rw-r--r--drivers/staging/cxt1e1/pmcc4.h1
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c116
-rw-r--r--drivers/staging/cxt1e1/pmcc4_private.h1
-rw-r--r--drivers/staging/cxt1e1/pmcc4_sysdep.h1
-rw-r--r--drivers/staging/cxt1e1/sbeproc.c4
-rw-r--r--drivers/staging/dgap/Kconfig2
-rw-r--r--drivers/staging/dgap/Makefile6
-rw-r--r--drivers/staging/dgap/dgap.c7675
-rw-r--r--drivers/staging/dgap/dgap.h1322
-rw-r--r--drivers/staging/dgap/dgap_conf.h290
-rw-r--r--drivers/staging/dgap/dgap_downld.h69
-rw-r--r--drivers/staging/dgap/dgap_driver.c1030
-rw-r--r--drivers/staging/dgap/dgap_driver.h620
-rw-r--r--drivers/staging/dgap/dgap_fep5.c1938
-rw-r--r--drivers/staging/dgap/dgap_fep5.h253
-rw-r--r--drivers/staging/dgap/dgap_kcompat.h64
-rw-r--r--drivers/staging/dgap/dgap_parse.c1374
-rw-r--r--drivers/staging/dgap/dgap_parse.h35
-rw-r--r--drivers/staging/dgap/dgap_pci.h92
-rw-r--r--drivers/staging/dgap/dgap_sysfs.c793
-rw-r--r--drivers/staging/dgap/dgap_sysfs.h48
-rw-r--r--drivers/staging/dgap/dgap_trace.c186
-rw-r--r--drivers/staging/dgap/dgap_trace.h36
-rw-r--r--drivers/staging/dgap/dgap_tty.c3580
-rw-r--r--drivers/staging/dgap/dgap_tty.h39
-rw-r--r--drivers/staging/dgap/dgap_types.h36
-rw-r--r--drivers/staging/dgap/digi.h376
-rw-r--r--drivers/staging/dgap/downld.c798
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c11
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c4
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c27
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c3
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c53
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c330
-rw-r--r--drivers/staging/dgrp/dgrp_sysfs.c4
-rw-r--r--drivers/staging/dgrp/dgrp_tty.c22
-rw-r--r--drivers/staging/echo/TODO5
-rw-r--r--drivers/staging/et131x/et131x.c23
-rw-r--r--drivers/staging/frontier/Kconfig1
-rw-r--r--drivers/staging/frontier/alphatrack.c11
-rw-r--r--drivers/staging/frontier/tranzport.c12
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h8
-rw-r--r--drivers/staging/ft1000/ft1000.h156
-rw-r--r--drivers/staging/fwserial/fwserial.c13
-rw-r--r--drivers/staging/fwserial/fwserial.h1
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c187
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h2
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c26
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c56
-rw-r--r--drivers/staging/gdm724x/netlink_k.c3
-rw-r--r--drivers/staging/gdm72xx/TODO2
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.c50
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c40
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.c77
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.h20
-rw-r--r--drivers/staging/gs_fpgaboot/Kconfig8
-rw-r--r--drivers/staging/gs_fpgaboot/Makefile4
-rw-r--r--drivers/staging/gs_fpgaboot/README71
-rw-r--r--drivers/staging/gs_fpgaboot/TODO7
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c422
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h56
-rw-r--r--drivers/staging/gs_fpgaboot/io.c294
-rw-r--r--drivers/staging/gs_fpgaboot/io.h90
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h28
-rw-r--r--drivers/staging/iio/Documentation/lsiio.c157
-rw-r--r--drivers/staging/iio/accel/sca3000.h26
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c172
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c2
-rw-r--r--drivers/staging/iio/adc/Kconfig2
-rw-r--r--drivers/staging/iio/adc/ad799x.h10
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c65
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c22
-rw-r--r--drivers/staging/iio/addac/adt7316.c21
-rw-r--r--drivers/staging/iio/addac/adt7316.h8
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/iio/light/tsl2583.c6
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c8
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c4
-rw-r--r--drivers/staging/imx-drm/Kconfig1
-rw-r--r--drivers/staging/imx-drm/Makefile3
-rw-r--r--drivers/staging/imx-drm/TODO5
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c894
-rw-r--r--drivers/staging/imx-drm/imx-drm.h44
-rw-r--r--drivers/staging/imx-drm/imx-fb.c47
-rw-r--r--drivers/staging/imx-drm/imx-fbdev.c74
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.c713
-rw-r--r--drivers/staging/imx-drm/imx-ldb.c150
-rw-r--r--drivers/staging/imx-drm/imx-tve.c137
-rw-r--r--drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h2
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dc.c2
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-di.c317
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c3
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c93
-rw-r--r--drivers/staging/imx-drm/ipuv3-plane.c4
-rw-r--r--drivers/staging/imx-drm/parallel-display.c127
-rw-r--r--drivers/staging/keucr/common.h8
-rw-r--r--drivers/staging/keucr/init.c16
-rw-r--r--drivers/staging/keucr/init.h6
-rw-r--r--drivers/staging/keucr/smil.h112
-rw-r--r--drivers/staging/keucr/smilecc.c58
-rw-r--r--drivers/staging/keucr/smilmain.c82
-rw-r--r--drivers/staging/keucr/smilsub.c170
-rw-r--r--drivers/staging/keucr/smscsi.c28
-rw-r--r--drivers/staging/keucr/transport.c8
-rw-r--r--drivers/staging/keucr/transport.h6
-rw-r--r--drivers/staging/keucr/usb.c6
-rw-r--r--drivers/staging/keucr/usb.h86
-rw-r--r--drivers/staging/line6/audio.c5
-rw-r--r--drivers/staging/line6/capture.c1
-rw-r--r--drivers/staging/line6/driver.c56
-rw-r--r--drivers/staging/line6/driver.h2
-rw-r--r--drivers/staging/line6/midi.c7
-rw-r--r--drivers/staging/line6/pcm.c2
-rw-r--r--drivers/staging/line6/usbdefs.h8
-rw-r--r--drivers/staging/lustre/TODO5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h9
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h8
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h9
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h30
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h16
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c132
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c12
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c20
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c3
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c5
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c12
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c7
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c12
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c3
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c24
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c3
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h6
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c34
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c15
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h14
-rw-r--r--drivers/staging/lustre/lustre/include/ioctl.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h4
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_acl.h2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h85
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h25
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h18
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h21
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h12
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h21
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_linkea.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h11
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h26
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_quota.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h4
-rw-r--r--drivers/staging/lustre/lustre/include/md_object.h2
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h11
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h15
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h2
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c41
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c5
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/fail.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c102
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_string.c54
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c19
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c65
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c38
-rw-r--r--drivers/staging/lustre/lustre/libcfs/nidstrings.c7
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c5
-rw-r--r--drivers/staging/lustre/lustre/libcfs/upcall_cache.c5
-rw-r--r--drivers/staging/lustre/lustre/libcfs/workitem.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c328
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c37
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c105
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h42
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c30
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c32
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c124
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c37
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c31
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c125
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c5
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c16
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c3
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c4
-rw-r--r--drivers/staging/lustre/lustre/lvfs/lvfs_linux.c2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c117
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c3
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c80
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c92
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_lvfs.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_osd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/local_storage.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/md_attrs.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c54
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo.c3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c20
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c12
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c16
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c187
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c36
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c37
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c5
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c57
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c2
-rw-r--r--drivers/staging/media/Kconfig8
-rw-r--r--drivers/staging/media/Makefile5
-rw-r--r--drivers/staging/media/as102/as102_drv.c13
-rw-r--r--drivers/staging/media/as102/as102_drv.h8
-rw-r--r--drivers/staging/media/as102/as102_fe.c37
-rw-r--r--drivers/staging/media/as102/as102_fw.c16
-rw-r--r--drivers/staging/media/as102/as102_usb_drv.c36
-rw-r--r--drivers/staging/media/as102/as10x_cmd.c21
-rw-r--r--drivers/staging/media/as102/as10x_cmd_cfg.c9
-rw-r--r--drivers/staging/media/as102/as10x_cmd_stream.c12
-rw-r--r--drivers/staging/media/as102/as10x_handle.h14
-rw-r--r--drivers/staging/media/bcm2048/Kconfig13
-rw-r--r--drivers/staging/media/bcm2048/Makefile1
-rw-r--r--drivers/staging/media/bcm2048/TODO24
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2744
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.h30
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c12
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c3
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h2
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c2
-rw-r--r--drivers/staging/media/go7007/Kconfig3
-rw-r--r--drivers/staging/media/go7007/go7007-loader.c6
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c20
-rw-r--r--drivers/staging/media/go7007/snd-go7007.c5
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c2
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c4
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c30
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c13
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c4
-rw-r--r--drivers/staging/media/msi3101/sdr-msi3101.c2
-rw-r--r--drivers/staging/media/omap24xx/Kconfig35
-rw-r--r--drivers/staging/media/omap24xx/Makefile5
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam-dma.c (renamed from drivers/media/platform/omap24xxcam-dma.c)0
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam.c (renamed from drivers/media/platform/omap24xxcam.c)0
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam.h (renamed from drivers/media/platform/omap24xxcam.h)2
-rw-r--r--drivers/staging/media/omap24xx/tcm825x.c (renamed from drivers/media/i2c/tcm825x.c)10
-rw-r--r--drivers/staging/media/omap24xx/tcm825x.h (renamed from drivers/media/i2c/tcm825x.h)4
-rw-r--r--drivers/staging/media/omap24xx/v4l2-int-device.c (renamed from drivers/media/v4l2-core/v4l2-int-device.c)2
-rw-r--r--drivers/staging/media/omap24xx/v4l2-int-device.h305
-rw-r--r--drivers/staging/media/omap4iss/Kconfig12
-rw-r--r--drivers/staging/media/omap4iss/Makefile6
-rw-r--r--drivers/staging/media/omap4iss/TODO4
-rw-r--r--drivers/staging/media/omap4iss/iss.c1563
-rw-r--r--drivers/staging/media/omap4iss/iss.h236
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c1343
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.h158
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.c279
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.h51
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c570
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.h67
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c849
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.h92
-rw-r--r--drivers/staging/media/omap4iss/iss_regs.h901
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c893
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.h75
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c1226
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h204
-rw-r--r--drivers/staging/media/sn9c102/Kconfig (renamed from drivers/media/usb/sn9c102/Kconfig)9
-rw-r--r--drivers/staging/media/sn9c102/Makefile (renamed from drivers/media/usb/sn9c102/Makefile)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102.h (renamed from drivers/media/usb/sn9c102/sn9c102.h)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102.txt592
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_config.h (renamed from drivers/media/usb/sn9c102/sn9c102_config.h)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_core.c (renamed from drivers/media/usb/sn9c102/sn9c102_core.c)85
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_devtable.h (renamed from drivers/media/usb/sn9c102/sn9c102_devtable.h)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_hv7131d.c (renamed from drivers/media/usb/sn9c102/sn9c102_hv7131d.c)15
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_hv7131r.c (renamed from drivers/media/usb/sn9c102/sn9c102_hv7131r.c)15
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_mi0343.c (renamed from drivers/media/usb/sn9c102/sn9c102_mi0343.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_mi0360.c (renamed from drivers/media/usb/sn9c102/sn9c102_mi0360.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_mt9v111.c (renamed from drivers/media/usb/sn9c102/sn9c102_mt9v111.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_ov7630.c (renamed from drivers/media/usb/sn9c102/sn9c102_ov7630.c)24
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_ov7660.c (renamed from drivers/media/usb/sn9c102/sn9c102_ov7660.c)24
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_pas106b.c (renamed from drivers/media/usb/sn9c102/sn9c102_pas106b.c)18
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_pas202bcb.c (renamed from drivers/media/usb/sn9c102/sn9c102_pas202bcb.c)15
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_sensor.h (renamed from drivers/media/usb/sn9c102/sn9c102_sensor.h)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_tas5110c1b.c (renamed from drivers/media/usb/sn9c102/sn9c102_tas5110c1b.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_tas5110d.c (renamed from drivers/media/usb/sn9c102/sn9c102_tas5110d.c)0
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_tas5130d1b.c (renamed from drivers/media/usb/sn9c102/sn9c102_tas5130d1b.c)0
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-core.c2
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-g723.c6
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-tw28.c2
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c46
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-v4l2.c7
-rw-r--r--drivers/staging/media/solo6x10/solo6x10.h2
-rw-r--r--drivers/staging/netlogic/xlr_net.c10
-rw-r--r--drivers/staging/nokia_h4p/Kconfig9
-rw-r--r--drivers/staging/nokia_h4p/Makefile6
-rw-r--r--drivers/staging/nokia_h4p/TODO132
-rw-r--r--drivers/staging/nokia_h4p/hci_h4p.h222
-rw-r--r--drivers/staging/nokia_h4p/nokia_core.c1206
-rw-r--r--drivers/staging/nokia_h4p/nokia_fw-bcm.c148
-rw-r--r--drivers/staging/nokia_h4p/nokia_fw-csr.c149
-rw-r--r--drivers/staging/nokia_h4p/nokia_fw-ti1273.c110
-rw-r--r--drivers/staging/nokia_h4p/nokia_fw.c208
-rw-r--r--drivers/staging/nokia_h4p/nokia_uart.c199
-rw-r--r--drivers/staging/nvec/nvec.c14
-rw-r--r--drivers/staging/nvec/nvec.h5
-rw-r--r--drivers/staging/nvec/nvec_ps2.c2
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c1017
-rw-r--r--drivers/staging/octeon/ethernet-defines.h4
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c20
-rw-r--r--drivers/staging/octeon/ethernet-mem.c11
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c23
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--drivers/staging/octeon/ethernet.c10
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h2
-rw-r--r--drivers/staging/ozwpan/ozcdev.c6
-rw-r--r--drivers/staging/ozwpan/ozhcd.c8
-rw-r--r--drivers/staging/ozwpan/ozpd.c31
-rw-r--r--drivers/staging/ozwpan/ozpd.h5
-rw-r--r--drivers/staging/ozwpan/ozproto.c59
-rw-r--r--drivers/staging/ozwpan/ozproto.h2
-rw-r--r--drivers/staging/ozwpan/ozprotocol.h6
-rw-r--r--drivers/staging/ozwpan/ozusbif.h2
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c2
-rw-r--r--drivers/staging/panel/panel.c41
-rw-r--r--drivers/staging/quickstart/quickstart.c2
-rw-r--r--drivers/staging/rtl8187se/Kconfig1
-rw-r--r--drivers/staging/rtl8187se/Module.symvers0
-rw-r--r--drivers/staging/rtl8187se/ieee80211/dot11d.h11
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h12
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c14
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c15
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c1203
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c151
-rw-r--r--drivers/staging/rtl8187se/r8180.h750
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c894
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.c6
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225.h21
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225z2.c59
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c10
-rw-r--r--drivers/staging/rtl8187se/r8185b_init.c62
-rw-r--r--drivers/staging/rtl8188eu/Kconfig5
-rw-r--r--drivers/staging/rtl8188eu/Makefile1
-rw-r--r--drivers/staging/rtl8188eu/TODO5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c81
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_br_ext.c13
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c220
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c59
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c58
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_io.c28
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c52
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c303
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c302
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c78
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_p2p.c50
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c27
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c502
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c93
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c87
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c68
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c129
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c13
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c37
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c15
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_interface.c101
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c12
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c87
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_mp.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c37
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c3
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c10
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_ops_linux.c63
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h6
-rw-r--r--drivers/staging/rtl8188eu/include/ethernet.h42
-rw-r--r--drivers/staging/rtl8188eu/include/if_ether.h111
-rw-r--r--drivers/staging/rtl8188eu/include/ioctl_cfg80211.h107
-rw-r--r--drivers/staging/rtl8188eu/include/ip.h126
-rw-r--r--drivers/staging/rtl8188eu/include/nic_spec.h44
-rw-r--r--drivers/staging/rtl8188eu/include/odm_interface.h23
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h34
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h10
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h11
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h7
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h24
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_io.h44
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h32
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h49
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h139
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h5
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c254
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c9
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c12
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c35
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c44
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c22
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c17
-rw-r--r--drivers/staging/rtl8192e/dot11d.c9
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h52
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c4
-rw-r--r--drivers/staging/rtl8192u/Kconfig1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/EndianFree.h194
-rw-r--r--drivers/staging/rtl8192u/ieee80211/aes.c468
-rw-r--r--drivers/staging/rtl8192u/ieee80211/arc4.c103
-rw-r--r--drivers/staging/rtl8192u/ieee80211/autoload.c40
-rw-r--r--drivers/staging/rtl8192u/ieee80211/cipher.c298
-rw-r--r--drivers/staging/rtl8192u/ieee80211/compress.c64
-rw-r--r--drivers/staging/rtl8192u/ieee80211/crypto_compat.h58
-rw-r--r--drivers/staging/rtl8192u/ieee80211/digest.c108
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c173
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c22
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c107
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c5
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/internal.h81
-rw-r--r--drivers/staging/rtl8192u/ieee80211/michael_mic.c194
-rw-r--r--drivers/staging/rtl8192u/ieee80211/proc.c112
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c12
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c70
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c39
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl_crypto.h398
-rw-r--r--drivers/staging/rtl8192u/ieee80211/scatterwalk.c117
-rw-r--r--drivers/staging/rtl8192u/ieee80211/scatterwalk.h51
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c10
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c138
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c99
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h40
-rw-r--r--drivers/staging/rtl8192u/r819xU_HTGen.h12
-rw-r--r--drivers/staging/rtl8192u/r819xU_HTType.h379
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c50
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware_img.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c29
-rw-r--r--drivers/staging/rtl8712/Kconfig2
-rw-r--r--drivers/staging/rtl8712/drv_types.h2
-rw-r--r--drivers/staging/rtl8712/ieee80211.c6
-rw-r--r--drivers/staging/rtl8712/os_intfs.c2
-rw-r--r--drivers/staging/rtl8712/osdep_service.h5
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c11
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c13
-rw-r--r--drivers/staging/rtl8821ae/Kconfig11
-rw-r--r--drivers/staging/rtl8821ae/Makefile35
-rw-r--r--drivers/staging/rtl8821ae/TODO10
-rw-r--r--drivers/staging/rtl8821ae/base.c1868
-rw-r--r--drivers/staging/rtl8821ae/base.h159
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c3976
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h205
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c1614
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h176
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h99
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c3891
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h226
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c4242
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h162
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c3780
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h179
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c4104
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h175
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c4185
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h145
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c1171
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h549
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/rtl_btc.c236
-rw-r--r--drivers/staging/rtl8821ae/btcoexist/rtl_btc.h66
-rw-r--r--drivers/staging/rtl8821ae/cam.c354
-rw-r--r--drivers/staging/rtl8821ae/cam.h56
-rw-r--r--drivers/staging/rtl8821ae/compat.h125
-rw-r--r--drivers/staging/rtl8821ae/core.c1464
-rw-r--r--drivers/staging/rtl8821ae/core.h43
-rw-r--r--drivers/staging/rtl8821ae/debug.c988
-rw-r--r--drivers/staging/rtl8821ae/debug.h227
-rw-r--r--drivers/staging/rtl8821ae/efuse.c1285
-rw-r--r--drivers/staging/rtl8821ae/efuse.h130
-rw-r--r--drivers/staging/rtl8821ae/pci.c2549
-rw-r--r--drivers/staging/rtl8821ae/pci.h353
-rw-r--r--drivers/staging/rtl8821ae/ps.c1025
-rw-r--r--drivers/staging/rtl8821ae/ps.h55
-rw-r--r--drivers/staging/rtl8821ae/rc.c309
-rw-r--r--drivers/staging/rtl8821ae/rc.h47
-rw-r--r--drivers/staging/rtl8821ae/regd.c503
-rw-r--r--drivers/staging/rtl8821ae/regd.h75
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/btc.h87
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/def.h442
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/dm.c3045
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/dm.h426
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/fw.c1349
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/fw.h321
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c519
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h169
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c2069
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h160
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hw.c3347
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/hw.h75
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/led.c239
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/led.h40
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/phy.c5525
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/phy.h258
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c199
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h413
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c140
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h71
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/reg.h2427
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/rf.c464
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/rf.h46
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/sw.c499
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/sw.h (renamed from drivers/staging/rtl8188eu/include/h2clbk.h)34
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/table.c4002
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/table.h62
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/trx.c1050
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/trx.h641
-rw-r--r--drivers/staging/rtl8821ae/stats.c283
-rw-r--r--drivers/staging/rtl8821ae/stats.h46
-rw-r--r--drivers/staging/rtl8821ae/wifi.h2534
-rw-r--r--drivers/staging/rts5139/ms.c21
-rw-r--r--drivers/staging/rts5139/ms_mg.c1
-rw-r--r--drivers/staging/rts5139/rts51x.c6
-rw-r--r--drivers/staging/rts5139/rts51x_fop.c10
-rw-r--r--drivers/staging/rts5139/rts51x_fop.h4
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.c4
-rw-r--r--drivers/staging/rts5139/rts51x_transport.c4
-rw-r--r--drivers/staging/rts5139/sd.c4
-rw-r--r--drivers/staging/rts5139/sd_cprm.c1
-rw-r--r--drivers/staging/rts5139/xd.c18
-rw-r--r--drivers/staging/rts5208/ms.c2
-rw-r--r--drivers/staging/rts5208/rtsx.c6
-rw-r--r--drivers/staging/rts5208/rtsx_card.c71
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c8
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c4
-rw-r--r--drivers/staging/sb105x/Kconfig9
-rw-r--r--drivers/staging/sb105x/Makefile3
-rw-r--r--drivers/staging/sb105x/sb_mp_register.h295
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c3189
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.h291
-rw-r--r--drivers/staging/sb105x/sb_ser_core.h368
-rw-r--r--drivers/staging/sbe-2t3e3/2t3e3.h21
-rw-r--r--drivers/staging/sbe-2t3e3/ctrl.c17
-rw-r--r--drivers/staging/sbe-2t3e3/ctrl.h16
-rw-r--r--drivers/staging/sbe-2t3e3/dc.c7
-rw-r--r--drivers/staging/sbe-2t3e3/intr.c4
-rw-r--r--drivers/staging/sbe-2t3e3/maps.c9
-rw-r--r--drivers/staging/sbe-2t3e3/module.c2
-rw-r--r--drivers/staging/sbe-2t3e3/netdev.c6
-rw-r--r--drivers/staging/sep/sep_crypto.c2
-rw-r--r--drivers/staging/sep/sep_main.c94
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c18
-rw-r--r--drivers/staging/silicom/bp_mod.h25
-rw-r--r--drivers/staging/silicom/bpctl_mod.c629
-rw-r--r--drivers/staging/silicom/bypasslib/bp_ioctl.h8
-rw-r--r--drivers/staging/silicom/bypasslib/libbp_sd.h313
-rw-r--r--drivers/staging/slicoss/README40
-rw-r--r--drivers/staging/slicoss/TODO38
-rw-r--r--drivers/staging/slicoss/slic.h9
-rw-r--r--drivers/staging/slicoss/slicoss.c73
-rw-r--r--drivers/staging/sm7xxfb/Kconfig13
-rw-r--r--drivers/staging/sm7xxfb/Makefile1
-rw-r--r--drivers/staging/sm7xxfb/TODO9
-rw-r--r--drivers/staging/sm7xxfb/sm7xx.h779
-rw-r--r--drivers/staging/sm7xxfb/sm7xxfb.c1028
-rw-r--r--drivers/staging/speakup/serialio.c4
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c6
-rw-r--r--drivers/staging/tidspbridge/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c2
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c23
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c15
-rw-r--r--drivers/staging/tidspbridge/dynload/tramp.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c4
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/mgr.c8
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c5
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c20
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt174
-rw-r--r--drivers/staging/unisys/Documentation/proc-entries.txt93
-rw-r--r--drivers/staging/unisys/Kconfig20
-rw-r--r--drivers/staging/unisys/MAINTAINERS6
-rw-r--r--drivers/staging/unisys/Makefile10
-rw-r--r--drivers/staging/unisys/TODO21
-rw-r--r--drivers/staging/unisys/channels/Kconfig10
-rw-r--r--drivers/staging/unisys/channels/Makefile13
-rw-r--r--drivers/staging/unisys/channels/channel.c219
-rw-r--r--drivers/staging/unisys/channels/chanstub.c70
-rw-r--r--drivers/staging/unisys/channels/chanstub.h23
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/channel.h661
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/channel_guid.h64
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/controlframework.h77
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h619
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/diagchannel.h427
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/iochannel.h933
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/vbuschannel.h135
-rw-r--r--drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h92
-rw-r--r--drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h310
-rw-r--r--drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h53
-rw-r--r--drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h209
-rw-r--r--drivers/staging/unisys/common-spar/include/version.h46
-rw-r--r--drivers/staging/unisys/common-spar/include/vmcallinterface.h167
-rw-r--r--drivers/staging/unisys/include/commontypes.h170
-rw-r--r--drivers/staging/unisys/include/guestlinuxdebug.h182
-rw-r--r--drivers/staging/unisys/include/guidutils.h203
-rw-r--r--drivers/staging/unisys/include/periodic_work.h40
-rw-r--r--drivers/staging/unisys/include/procobjecttree.h48
-rw-r--r--drivers/staging/unisys/include/sparstop.h30
-rw-r--r--drivers/staging/unisys/include/timskmod.h324
-rw-r--r--drivers/staging/unisys/include/timskmodutils.h75
-rw-r--r--drivers/staging/unisys/include/uisqueue.h440
-rw-r--r--drivers/staging/unisys/include/uisthread.h46
-rw-r--r--drivers/staging/unisys/include/uisutils.h348
-rw-r--r--drivers/staging/unisys/include/uniklog.h193
-rw-r--r--drivers/staging/unisys/include/vbushelper.h47
-rw-r--r--drivers/staging/unisys/uislib/Kconfig10
-rw-r--r--drivers/staging/unisys/uislib/Makefile17
-rw-r--r--drivers/staging/unisys/uislib/uislib.c2421
-rw-r--r--drivers/staging/unisys/uislib/uisqueue.c160
-rw-r--r--drivers/staging/unisys/uislib/uisthread.c85
-rw-r--r--drivers/staging/unisys/uislib/uisutils.c350
-rw-r--r--drivers/staging/unisys/virthba/Kconfig10
-rw-r--r--drivers/staging/unisys/virthba/Makefile16
-rw-r--r--drivers/staging/unisys/virthba/virthba.c1823
-rw-r--r--drivers/staging/unisys/virthba/virthba.h31
-rw-r--r--drivers/staging/unisys/virtpci/Kconfig10
-rw-r--r--drivers/staging/unisys/virtpci/Makefile13
-rw-r--r--drivers/staging/unisys/virtpci/virtpci.c1771
-rw-r--r--drivers/staging/unisys/virtpci/virtpci.h104
-rw-r--r--drivers/staging/unisys/visorchannel/Kconfig10
-rw-r--r--drivers/staging/unisys/visorchannel/Makefile14
-rw-r--r--drivers/staging/unisys/visorchannel/globals.h29
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel.h106
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel_funcs.c674
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel_main.c49
-rw-r--r--drivers/staging/unisys/visorchipset/Kconfig10
-rw-r--r--drivers/staging/unisys/visorchipset/Makefile18
-rw-r--r--drivers/staging/unisys/visorchipset/controlvm.h27
-rw-r--r--drivers/staging/unisys/visorchipset/controlvm_direct.c62
-rw-r--r--drivers/staging/unisys/visorchipset/file.c226
-rw-r--r--drivers/staging/unisys/visorchipset/file.h26
-rw-r--r--drivers/staging/unisys/visorchipset/filexfer.c506
-rw-r--r--drivers/staging/unisys/visorchipset/filexfer.h147
-rw-r--r--drivers/staging/unisys/visorchipset/globals.h45
-rw-r--r--drivers/staging/unisys/visorchipset/parser.c474
-rw-r--r--drivers/staging/unisys/visorchipset/parser.h45
-rw-r--r--drivers/staging/unisys/visorchipset/testing.h41
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset.h307
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset_main.c2945
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset_umode.h37
-rw-r--r--drivers/staging/unisys/visorutil/Kconfig10
-rw-r--r--drivers/staging/unisys/visorutil/Makefile11
-rw-r--r--drivers/staging/unisys/visorutil/charqueue.c141
-rw-r--r--drivers/staging/unisys/visorutil/charqueue.h37
-rw-r--r--drivers/staging/unisys/visorutil/easyproc.c371
-rw-r--r--drivers/staging/unisys/visorutil/easyproc.h92
-rw-r--r--drivers/staging/unisys/visorutil/memregion.h43
-rw-r--r--drivers/staging/unisys/visorutil/memregion_direct.c223
-rw-r--r--drivers/staging/unisys/visorutil/periodic_work.c236
-rw-r--r--drivers/staging/unisys/visorutil/procobjecttree.c348
-rw-r--r--drivers/staging/unisys/visorutil/visorkmodutils.c71
-rw-r--r--drivers/staging/usbip/Kconfig4
-rw-r--r--drivers/staging/usbip/stub.h3
-rw-r--r--drivers/staging/usbip/stub_dev.c164
-rw-r--r--drivers/staging/usbip/stub_main.c45
-rw-r--r--drivers/staging/usbip/stub_rx.c28
-rw-r--r--drivers/staging/usbip/stub_tx.c16
-rw-r--r--drivers/staging/usbip/uapi/usbip.h26
-rw-r--r--drivers/staging/usbip/usbip_common.c36
-rw-r--r--drivers/staging/usbip/usbip_common.h19
-rw-r--r--drivers/staging/usbip/userspace/README8
-rw-r--r--drivers/staging/usbip/userspace/configure.ac12
-rw-r--r--drivers/staging/usbip/userspace/libsrc/Makefile.am3
-rw-r--r--drivers/staging/usbip/userspace/libsrc/list.h136
-rw-r--r--drivers/staging/usbip/userspace/libsrc/names.c8
-rw-r--r--drivers/staging/usbip/userspace/libsrc/sysfs_utils.c31
-rw-r--r--drivers/staging/usbip/userspace/libsrc/sysfs_utils.h8
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_common.c93
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_common.h41
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c303
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h7
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.c397
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.h14
-rw-r--r--drivers/staging/usbip/userspace/src/Makefile.am1
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_attach.c1
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_bind.c195
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_detach.c2
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_list.c146
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_network.h1
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_unbind.c129
-rw-r--r--drivers/staging/usbip/userspace/src/usbipd.c21
-rw-r--r--drivers/staging/usbip/userspace/src/utils.c48
-rw-r--r--drivers/staging/usbip/vhci_hcd.c44
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c18
-rw-r--r--drivers/staging/vt6655/80211mgr.c331
-rw-r--r--drivers/staging/vt6655/aes_ccmp.c36
-rw-r--r--drivers/staging/vt6655/desc.h15
-rw-r--r--drivers/staging/vt6655/dpc.c36
-rw-r--r--drivers/staging/vt6655/key.c8
-rw-r--r--drivers/staging/vt6655/mac.c10
-rw-r--r--drivers/staging/vt6655/mac.h2
-rw-r--r--drivers/staging/vt6655/michael.c14
-rw-r--r--drivers/staging/vt6655/michael.h6
-rw-r--r--drivers/staging/vt6655/rxtx.c62
-rw-r--r--drivers/staging/vt6655/wmgr.c30
-rw-r--r--drivers/staging/vt6656/80211hdr.h2
-rw-r--r--drivers/staging/vt6656/aes_ccmp.c6
-rw-r--r--drivers/staging/vt6656/card.h7
-rw-r--r--drivers/staging/vt6656/device.h85
-rw-r--r--drivers/staging/vt6656/dpc.c92
-rw-r--r--drivers/staging/vt6656/int.c150
-rw-r--r--drivers/staging/vt6656/int.h45
-rw-r--r--drivers/staging/vt6656/iwctl.c10
-rw-r--r--drivers/staging/vt6656/mac.c16
-rw-r--r--drivers/staging/vt6656/mac.h2
-rw-r--r--drivers/staging/vt6656/main_usb.c350
-rw-r--r--drivers/staging/vt6656/power.c50
-rw-r--r--drivers/staging/vt6656/rxtx.c185
-rw-r--r--drivers/staging/vt6656/rxtx.h92
-rw-r--r--drivers/staging/vt6656/usbpipe.c451
-rw-r--r--drivers/staging/vt6656/wcmd.c77
-rw-r--r--drivers/staging/vt6656/wcmd.h4
-rw-r--r--drivers/staging/vt6656/wmgr.c8
-rw-r--r--drivers/staging/winbond/localpara.h84
-rw-r--r--drivers/staging/winbond/wb35reg.c140
-rw-r--r--drivers/staging/winbond/wb35rx.c27
-rw-r--r--drivers/staging/winbond/wbusb.c1
-rw-r--r--drivers/staging/wlags49_h2/dhf.c10
-rw-r--r--drivers/staging/wlags49_h2/hcf.c16
-rw-r--r--drivers/staging/wlags49_h2/sta_h2.c2
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c311
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c1992
-rw-r--r--drivers/staging/wlags49_h2/wl_util.c37
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c8
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c11
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h4
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h172
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c2
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c100
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c15
-rw-r--r--drivers/staging/xillybus/Kconfig3
-rw-r--r--drivers/staging/xillybus/xillybus_core.c6
-rw-r--r--drivers/staging/zram/zram.txt77
-rw-r--r--drivers/staging/zsmalloc/Kconfig24
-rw-r--r--drivers/staging/zsmalloc/Makefile3
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c1106
-rw-r--r--drivers/staging/zsmalloc/zsmalloc.h50
-rw-r--r--drivers/target/Kconfig2
-rw-r--r--drivers/target/iscsi/iscsi_target.c58
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c9
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/loopback/tcm_loop.c20
-rw-r--r--drivers/target/target_core_alua.c558
-rw-r--r--drivers/target/target_core_alua.h15
-rw-r--r--drivers/target/target_core_configfs.c194
-rw-r--r--drivers/target/target_core_device.c108
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_file.c256
-rw-r--r--drivers/target/target_core_file.h9
-rw-r--r--drivers/target/target_core_iblock.c95
-rw-r--r--drivers/target/target_core_internal.h8
-rw-r--r--drivers/target/target_core_pr.c11
-rw-r--r--drivers/target/target_core_pr.h5
-rw-r--r--drivers/target/target_core_rd.c252
-rw-r--r--drivers/target/target_core_rd.h4
-rw-r--r--drivers/target/target_core_sbc.c265
-rw-r--r--drivers/target/target_core_spc.c114
-rw-r--r--drivers/target/target_core_tpg.c10
-rw-r--r--drivers/target/target_core_transport.c103
-rw-r--r--drivers/target/target_core_ua.c1
-rw-r--r--drivers/target/target_core_xcopy.c4
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c6
-rw-r--r--drivers/thermal/Kconfig32
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/cpu_cooling.c67
-rw-r--r--drivers/thermal/imx_thermal.c54
-rw-r--r--drivers/thermal/int3403_thermal.c237
-rw-r--r--drivers/thermal/intel_powerclamp.c11
-rw-r--r--drivers/thermal/of-thermal.c849
-rw-r--r--drivers/thermal/rcar_thermal.c2
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c1
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c12
-rw-r--r--drivers/thermal/step_wise.c6
-rw-r--r--drivers/thermal/thermal_core.c113
-rw-r--r--drivers/thermal/thermal_core.h9
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c77
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c13
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/tty/hvc/hvc_console.c6
-rw-r--r--drivers/tty/hvc/hvc_iucv.c103
-rw-r--r--drivers/tty/hvc/hvc_opal.c8
-rw-r--r--drivers/tty/hvc/hvc_rtas.c12
-rw-r--r--drivers/tty/hvc/hvc_udbg.c9
-rw-r--r--drivers/tty/hvc/hvc_xen.c17
-rw-r--r--drivers/tty/ipwireless/tty.c3
-rw-r--r--drivers/tty/n_gsm.c11
-rw-r--r--drivers/tty/n_tty.c25
-rw-r--r--drivers/tty/serial/8250/8250_core.c37
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/8250/8250_pci.c46
-rw-r--r--drivers/tty/serial/Kconfig6
-rw-r--r--drivers/tty/serial/amba-pl011.c21
-rw-r--r--drivers/tty/serial/atmel_serial.c28
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c25
-rw-r--r--drivers/tty/serial/clps711x.c21
-rw-r--r--drivers/tty/serial/crisv10.c114
-rw-r--r--drivers/tty/serial/efm32-uart.c5
-rw-r--r--drivers/tty/serial/fsl_lpuart.c430
-rw-r--r--drivers/tty/serial/icom.c4
-rw-r--r--drivers/tty/serial/imx.c82
-rw-r--r--drivers/tty/serial/max310x.c417
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c90
-rw-r--r--drivers/tty/serial/msm_serial.c140
-rw-r--r--drivers/tty/serial/msm_serial.h9
-rw-r--r--drivers/tty/serial/omap-serial.c22
-rw-r--r--drivers/tty/serial/pch_uart.c12
-rw-r--r--drivers/tty/serial/samsung.c48
-rw-r--r--drivers/tty/serial/serial-tegra.c38
-rw-r--r--drivers/tty/serial/serial_core.c20
-rw-r--r--drivers/tty/serial/sh-sci.c502
-rw-r--r--drivers/tty/serial/sh-sci.h2
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c199
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h5
-rw-r--r--drivers/tty/serial/sunhv.c22
-rw-r--r--drivers/tty/serial/sunsab.c14
-rw-r--r--drivers/tty/serial/sunsu.c14
-rw-r--r--drivers/tty/serial/sunzilog.c14
-rw-r--r--drivers/tty/synclink.c1
-rw-r--r--drivers/tty/synclinkmp.c1
-rw-r--r--drivers/tty/tty_audit.c2
-rw-r--r--drivers/tty/tty_buffer.c20
-rw-r--r--drivers/tty/tty_io.c23
-rw-r--r--drivers/tty/tty_ldsem.c15
-rw-r--r--drivers/tty/vt/vt.c22
-rw-r--r--drivers/usb/Kconfig10
-rw-r--r--drivers/usb/chipidea/Makefile1
-rw-r--r--drivers/usb/chipidea/bits.h2
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_zevio.c72
-rw-r--r--drivers/usb/chipidea/core.c87
-rw-r--r--drivers/usb/chipidea/udc.c328
-rw-r--r--drivers/usb/core/Kconfig1
-rw-r--r--drivers/usb/core/config.c5
-rw-r--r--drivers/usb/core/devio.c286
-rw-r--r--drivers/usb/core/driver.c145
-rw-r--r--drivers/usb/core/generic.c1
-rw-r--r--drivers/usb/core/hcd.c38
-rw-r--r--drivers/usb/core/hub.c118
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/message.c10
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/core/urb.c2
-rw-r--r--drivers/usb/core/usb-acpi.c41
-rw-r--r--drivers/usb/core/usb.h11
-rw-r--r--drivers/usb/dwc2/core.c2
-rw-r--r--drivers/usb/dwc2/core_intr.c25
-rw-r--r--drivers/usb/dwc2/hcd.c11
-rw-r--r--drivers/usb/dwc2/hcd_intr.c14
-rw-r--r--drivers/usb/dwc2/platform.c3
-rw-r--r--drivers/usb/dwc3/core.c251
-rw-r--r--drivers/usb/dwc3/core.h105
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c8
-rw-r--r--drivers/usb/dwc3/gadget.c183
-rw-r--r--drivers/usb/dwc3/gadget.h12
-rw-r--r--drivers/usb/gadget/Kconfig3
-rw-r--r--drivers/usb/gadget/at91_udc.c14
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c18
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.h2
-rw-r--r--drivers/usb/gadget/bcm63xx_udc.c58
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/f_fs.c623
-rw-r--r--drivers/usb/gadget/f_midi.c7
-rw-r--r--drivers/usb/gadget/f_subset.c2
-rw-r--r--drivers/usb/gadget/f_uac2.c4
-rw-r--r--drivers/usb/gadget/gr_udc.c10
-rw-r--r--drivers/usb/gadget/inode.c9
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c4
-rw-r--r--drivers/usb/gadget/printer.c10
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c143
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c1
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c2
-rw-r--r--drivers/usb/gadget/u_ether.c101
-rw-r--r--drivers/usb/gadget/u_fs.h30
-rw-r--r--drivers/usb/gadget/u_serial.c4
-rw-r--r--drivers/usb/host/Kconfig4
-rw-r--r--drivers/usb/host/ehci-hcd.c13
-rw-r--r--drivers/usb/host/ehci-hub.c26
-rw-r--r--drivers/usb/host/ehci-platform.c182
-rw-r--r--drivers/usb/host/ehci-tegra.c18
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c13
-rw-r--r--drivers/usb/host/hwa-hc.c42
-rw-r--r--drivers/usb/host/ohci-platform.c199
-rw-r--r--drivers/usb/host/r8a66597-hcd.c4
-rw-r--r--drivers/usb/host/uhci-platform.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c6
-rw-r--r--drivers/usb/host/xhci-hub.c8
-rw-r--r--drivers/usb/host/xhci-mem.c226
-rw-r--r--drivers/usb/host/xhci-pci.c23
-rw-r--r--drivers/usb/host/xhci-plat.c4
-rw-r--r--drivers/usb/host/xhci-ring.c214
-rw-r--r--drivers/usb/host/xhci.c57
-rw-r--r--drivers/usb/host/xhci.h46
-rw-r--r--drivers/usb/misc/Kconfig1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c10
-rw-r--r--drivers/usb/misc/usbled.c34
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/musb_core.c20
-rw-r--r--drivers/usb/musb/musb_cppi41.c69
-rw-r--r--drivers/usb/musb/musb_dsps.c58
-rw-r--r--drivers/usb/musb/musb_host.c33
-rw-r--r--drivers/usb/musb/musb_virthub.c26
-rw-r--r--drivers/usb/musb/omap2430.c4
-rw-r--r--drivers/usb/phy/Kconfig21
-rw-r--r--drivers/usb/phy/Makefile2
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c9
-rw-r--r--drivers/usb/phy/phy-msm-usb.c95
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c310
-rw-r--r--drivers/usb/phy/phy-omap-usb3.c361
-rw-r--r--drivers/usb/phy/phy-rcar-gen2-usb.c6
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c2
-rw-r--r--drivers/usb/phy/phy-ulpi.c2
-rw-r--r--drivers/usb/phy/phy.c8
-rw-r--r--drivers/usb/serial/ch341.c6
-rw-r--r--drivers/usb/serial/cyberjack.c2
-rw-r--r--drivers/usb/serial/cypress_m8.c21
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h13
-rw-r--r--drivers/usb/serial/generic.c61
-rw-r--r--drivers/usb/serial/iuu_phoenix.c2
-rw-r--r--drivers/usb/serial/keyspan.c30
-rw-r--r--drivers/usb/serial/keyspan_pda.c2
-rw-r--r--drivers/usb/serial/kl5kusb105.c4
-rw-r--r--drivers/usb/serial/kobil_sct.c5
-rw-r--r--drivers/usb/serial/mos7720.c12
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/quatech2.c2
-rw-r--r--drivers/usb/serial/spcp8x5.c7
-rw-r--r--drivers/usb/serial/symbolserial.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c4
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/serial/usb-serial.c17
-rw-r--r--drivers/usb/storage/Kconfig6
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/usb/storage/uas-detect.h96
-rw-r--r--drivers/usb/storage/uas.c687
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h12
-rw-r--r--drivers/usb/storage/unusual_uas.h52
-rw-r--r--drivers/usb/storage/usb.c26
-rw-r--r--drivers/usb/storage/usb.h3
-rw-r--r--drivers/usb/wusbcore/devconnect.c4
-rw-r--r--drivers/usb/wusbcore/wa-hc.c2
-rw-r--r--drivers/usb/wusbcore/wa-hc.h26
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c2
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c583
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c29
-rw-r--r--drivers/vfio/vfio.c70
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c28
-rw-r--r--drivers/vfio/vfio_iommu_type1.c4
-rw-r--r--drivers/vhost/net.c76
-rw-r--r--drivers/vhost/scsi.c17
-rw-r--r--drivers/vhost/test.c8
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/Kconfig38
-rw-r--r--drivers/video/Makefile4
-rw-r--r--drivers/video/asiliantfb.c1
-rw-r--r--drivers/video/aty/aty128fb.c4
-rw-r--r--drivers/video/backlight/hp680_bl.c6
-rw-r--r--drivers/video/backlight/jornada720_bl.c15
-rw-r--r--drivers/video/backlight/jornada720_lcd.c13
-rw-r--r--drivers/video/backlight/kb3886_bl.c2
-rw-r--r--drivers/video/backlight/l4f00242t03.c6
-rw-r--r--drivers/video/backlight/lcd.c2
-rw-r--r--drivers/video/backlight/lp855x_bl.c2
-rw-r--r--drivers/video/backlight/lp8788_bl.c6
-rw-r--r--drivers/video/backlight/omap1_bl.c14
-rw-r--r--drivers/video/backlight/ot200_bl.c9
-rw-r--r--drivers/video/backlight/pwm_bl.c1
-rw-r--r--drivers/video/backlight/tosa_bl.c7
-rw-r--r--drivers/video/backlight/tosa_lcd.c6
-rw-r--r--drivers/video/console/Kconfig3
-rw-r--r--drivers/video/console/fbcon.c5
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/exynos/Kconfig3
-rw-r--r--drivers/video/fbmem.c34
-rw-r--r--drivers/video/gbefb.c4
-rw-r--r--drivers/video/hyperv_fb.c88
-rw-r--r--drivers/video/i810/i810_main.c4
-rw-r--r--drivers/video/logo/Kconfig2
-rw-r--r--drivers/video/logo/logo.c6
-rw-r--r--drivers/video/mmp/core.c9
-rw-r--r--drivers/video/mx3fb.c2
-rw-r--r--drivers/video/mxsfb.c126
-rw-r--r--drivers/video/nvidia/nvidia.c1
-rw-r--r--drivers/video/ocfb.c440
-rw-r--r--drivers/video/omap2/displays-new/panel-sony-acx565akm.c44
-rw-r--r--drivers/video/omap2/dss/apply.c11
-rw-r--r--drivers/video/omap2/dss/dispc.c72
-rw-r--r--drivers/video/omap2/dss/dispc.h20
-rw-r--r--drivers/video/omap2/dss/display-sysfs.c4
-rw-r--r--drivers/video/omap2/dss/dpi.c18
-rw-r--r--drivers/video/omap2/dss/dsi.c223
-rw-r--r--drivers/video/omap2/dss/dss.c163
-rw-r--r--drivers/video/omap2/dss/dss.h17
-rw-r--r--drivers/video/omap2/dss/dss_features.c1
-rw-r--r--drivers/video/omap2/dss/dss_features.h1
-rw-r--r--drivers/video/omap2/dss/hdmi.h16
-rw-r--r--drivers/video/omap2/dss/hdmi4.c22
-rw-r--r--drivers/video/omap2/dss/hdmi4_core.c18
-rw-r--r--drivers/video/omap2/dss/hdmi_common.c2
-rw-r--r--drivers/video/omap2/dss/hdmi_phy.c2
-rw-r--r--drivers/video/omap2/dss/hdmi_pll.c18
-rw-r--r--drivers/video/omap2/dss/hdmi_wp.c16
-rw-r--r--drivers/video/omap2/dss/overlay.c5
-rw-r--r--drivers/video/omap2/dss/sdi.c24
-rw-r--r--drivers/video/omap2/dss/venc.c3
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c19
-rw-r--r--drivers/video/output.c133
-rw-r--r--drivers/video/riva/fbdev.c1
-rw-r--r--drivers/video/sgivwfb.c889
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/tgafb.c21
-rw-r--r--drivers/video/udlfb.c2
-rw-r--r--drivers/video/uvesafb.c4
-rw-r--r--drivers/video/vermilion/vermilion.c1
-rw-r--r--drivers/video/xen-fbfront.c6
-rw-r--r--drivers/virtio/virtio_balloon.c16
-rw-r--r--drivers/virtio/virtio_pci.c8
-rw-r--r--drivers/virtio/virtio_ring.c12
-rw-r--r--drivers/vlynq/vlynq.c3
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c33
-rw-r--r--drivers/vme/bridges/vme_tsi148.c22
-rw-r--r--drivers/w1/masters/Kconfig3
-rw-r--r--drivers/w1/masters/ds2490.c155
-rw-r--r--drivers/w1/masters/mxc_w1.c43
-rw-r--r--drivers/w1/masters/w1-gpio.c41
-rw-r--r--drivers/w1/slaves/Kconfig5
-rw-r--r--drivers/w1/slaves/w1_therm.c21
-rw-r--r--drivers/w1/w1.c269
-rw-r--r--drivers/w1/w1.h186
-rw-r--r--drivers/w1/w1_family.c8
-rw-r--r--drivers/w1/w1_family.h13
-rw-r--r--drivers/w1/w1_int.c37
-rw-r--r--drivers/w1/w1_io.c102
-rw-r--r--drivers/w1/w1_netlink.c359
-rw-r--r--drivers/w1/w1_netlink.h33
-rw-r--r--drivers/watchdog/Kconfig96
-rw-r--r--drivers/watchdog/Makefile3
-rw-r--r--drivers/watchdog/acquirewdt.c21
-rw-r--r--drivers/watchdog/advantechwdt.c21
-rw-r--r--drivers/watchdog/alim1535_wdt.c2
-rw-r--r--drivers/watchdog/alim7101_wdt.c2
-rw-r--r--drivers/watchdog/ar7_wdt.c1
-rw-r--r--drivers/watchdog/at32ap700x_wdt.c4
-rw-r--r--drivers/watchdog/at91sam9_wdt.c320
-rw-r--r--drivers/watchdog/ath79_wdt.c1
-rw-r--r--drivers/watchdog/bcm2835_wdt.c4
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c1
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c2
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c368
-rw-r--r--drivers/watchdog/booke_wdt.c8
-rw-r--r--drivers/watchdog/cpu5wdt.c1
-rw-r--r--drivers/watchdog/cpwd.c1
-rw-r--r--drivers/watchdog/da9052_wdt.c1
-rw-r--r--drivers/watchdog/da9055_wdt.c4
-rw-r--r--drivers/watchdog/davinci_wdt.c226
-rw-r--r--drivers/watchdog/dw_wdt.c2
-rw-r--r--drivers/watchdog/ep93xx_wdt.c19
-rw-r--r--drivers/watchdog/geodewdt.c17
-rw-r--r--drivers/watchdog/gpio_wdt.c254
-rw-r--r--drivers/watchdog/hpwdt.c14
-rw-r--r--drivers/watchdog/i6300esb.c3
-rw-r--r--drivers/watchdog/iTCO_wdt.c8
-rw-r--r--drivers/watchdog/ib700wdt.c21
-rw-r--r--drivers/watchdog/ibmasr.c2
-rw-r--r--drivers/watchdog/imx2_wdt.c4
-rw-r--r--drivers/watchdog/indydog.c13
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c4
-rw-r--r--drivers/watchdog/it87_wdt.c41
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/moxart_wdt.c15
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c111
-rw-r--r--drivers/watchdog/mtx-1_wdt.c1
-rw-r--r--drivers/watchdog/nuc900_wdt.c1
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c390
-rw-r--r--drivers/watchdog/omap_wdt.c23
-rw-r--r--drivers/watchdog/orion_wdt.c1
-rw-r--r--drivers/watchdog/pc87413_wdt.c7
-rw-r--r--drivers/watchdog/pcwd_pci.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c4
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/rdc321x_wdt.c1
-rw-r--r--drivers/watchdog/retu_wdt.c1
-rw-r--r--drivers/watchdog/riowd.c1
-rw-r--r--drivers/watchdog/s3c2410_wdt.c212
-rw-r--r--drivers/watchdog/sc520_wdt.c3
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c2
-rw-r--r--drivers/watchdog/softdog.c2
-rw-r--r--drivers/watchdog/sp5100_tco.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c19
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c1
-rw-r--r--drivers/watchdog/sunxi_wdt.c2
-rw-r--r--drivers/watchdog/tegra_wdt.c302
-rw-r--r--drivers/watchdog/ts72xx_wdt.c6
-rw-r--r--drivers/watchdog/via_wdt.c2
-rw-r--r--drivers/watchdog/w83627hf_wdt.c234
-rw-r--r--drivers/watchdog/w83697hf_wdt.c6
-rw-r--r--drivers/watchdog/watchdog_core.c4
-rw-r--r--drivers/watchdog/wdt285.c3
-rw-r--r--drivers/watchdog/wdt_pci.c3
-rw-r--r--drivers/watchdog/wm831x_wdt.c1
-rw-r--r--drivers/xen/Kconfig3
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/balloon.c33
-rw-r--r--drivers/xen/dbgp.c2
-rw-r--r--drivers/xen/events/Makefile5
-rw-r--r--drivers/xen/events/events_2l.c365
-rw-r--r--drivers/xen/events/events_base.c (renamed from drivers/xen/events.c)824
-rw-r--r--drivers/xen/events/events_fifo.c424
-rw-r--r--drivers/xen/events/events_internal.h150
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/grant-table.c90
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--drivers/xen/platform-pci.c11
-rw-r--r--drivers/xen/swiotlb-xen.c22
-rw-r--r--drivers/xen/xen-acpi-cpuhotplug.c13
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c10
-rw-r--r--drivers/xen/xen-acpi-pad.c31
-rw-r--r--drivers/xen/xen-acpi-processor.c4
-rw-r--r--drivers/xen/xen-selfballoon.c22
-rw-r--r--drivers/xen/xenbus/xenbus_client.c3
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
-rw-r--r--drivers/xen/xencomm.c219
5308 files changed, 395886 insertions, 144898 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index b3138fbb46a4..0a0a90f52d26 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -52,6 +52,8 @@ source "drivers/i2c/Kconfig"
source "drivers/spi/Kconfig"
+source "drivers/spmi/Kconfig"
+
source "drivers/hsi/Kconfig"
source "drivers/pps/Kconfig"
@@ -170,4 +172,6 @@ source "drivers/phy/Kconfig"
source "drivers/powercap/Kconfig"
+source "drivers/mcb/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 3cc8214f9b26..e3ced91b1784 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/
obj-$(CONFIG_MTD) += mtd/
obj-$(CONFIG_SPI) += spi/
+obj-$(CONFIG_SPMI) += spmi/
obj-y += hsi/
obj-y += net/
obj-$(CONFIG_ATM) += atm/
@@ -118,7 +119,7 @@ obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
-obj-$(CONFIG_ARCH_SHMOBILE) += sh/
+obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += sh/
ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
obj-y += clocksource/
endif
@@ -155,3 +156,4 @@ obj-$(CONFIG_IPACK_BUS) += ipack/
obj-$(CONFIG_NTB) += ntb/
obj-$(CONFIG_FMC) += fmc/
obj-$(CONFIG_POWERCAP) += powercap/
+obj-$(CONFIG_MCB) += mcb/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 4770de5707b9..c205653e9644 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -43,19 +43,6 @@ config ACPI_SLEEP
depends on SUSPEND || HIBERNATION
default y
-config ACPI_PROCFS
- bool "Deprecated /proc/acpi files"
- depends on PROC_FS
- help
- For backwards compatibility, this option allows
- deprecated /proc/acpi/ files to exist, even when
- they have been replaced by functions in /sys.
-
- This option has no effect on /proc/acpi/ files
- and functions which do not yet exist in /sys.
-
- Say N to delete /proc/acpi/ files that have moved to /sys/
-
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
default n
@@ -115,7 +102,7 @@ config ACPI_BUTTON
config ACPI_VIDEO
tristate "Video"
- depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL
+ depends on X86 && BACKLIGHT_CLASS_DEVICE
depends on INPUT
select THERMAL
help
@@ -343,6 +330,19 @@ config ACPI_BGRT
data from the firmware boot splash. It will appear under
/sys/firmware/acpi/bgrt/ .
+config ACPI_REDUCED_HARDWARE_ONLY
+ bool "Hardware-reduced ACPI support only" if EXPERT
+ def_bool n
+ depends on ACPI
+ help
+ This config item changes the way the ACPI code is built. When this
+ option is selected, the kernel will use a specialized version of
+ ACPICA that ONLY supports the ACPI "reduced hardware" mode. The
+ resulting kernel will be smaller but it will also be restricted to
+ running in ACPI reduced hardware mode ONLY.
+
+ If you are unsure what to do, do not enable this option.
+
source "drivers/acpi/apei/Kconfig"
config ACPI_EXTLOG
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 3c2e4aa529c4..2c01c1da29ce 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -32,8 +32,8 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
+#include "battery.h"
#define PREFIX "ACPI: "
@@ -58,6 +58,7 @@ struct acpi_ac {
struct power_supply charger;
struct platform_device *pdev;
unsigned long long state;
+ struct notifier_block battery_nb;
};
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
@@ -153,6 +154,26 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
return;
}
+static int acpi_ac_battery_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct acpi_ac *ac = container_of(nb, struct acpi_ac, battery_nb);
+ struct acpi_bus_event *event = (struct acpi_bus_event *)data;
+
+ /*
+ * On HP Pavilion dv6-6179er AC status notifications aren't triggered
+ * when adapter is plugged/unplugged. However, battery status
+ * notifcations are triggered when battery starts charging or
+ * discharging. Re-reading AC status triggers lost AC notifications,
+ * if AC status has changed.
+ */
+ if (strcmp(event->device_class, ACPI_BATTERY_CLASS) == 0 &&
+ event->type == ACPI_BATTERY_NOTIFY_STATUS)
+ acpi_ac_get_state(ac);
+
+ return NOTIFY_OK;
+}
+
static int thinkpad_e530_quirk(const struct dmi_system_id *d)
{
ac_sleep_before_get_state_ms = 1000;
@@ -216,6 +237,8 @@ static int acpi_ac_probe(struct platform_device *pdev)
acpi_device_name(adev), acpi_device_bid(adev),
ac->state ? "on-line" : "off-line");
+ ac->battery_nb.notifier_call = acpi_ac_battery_notify;
+ register_acpi_notifier(&ac->battery_nb);
end:
if (result)
kfree(ac);
@@ -244,6 +267,8 @@ static int acpi_ac_resume(struct device *dev)
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
return 0;
}
+#else
+#define acpi_ac_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
@@ -260,6 +285,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
ac = platform_get_drvdata(pdev);
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
+ unregister_acpi_notifier(&ac->battery_nb);
kfree(ac);
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
index 84190ed89c04..961b45d18a5d 100644
--- a/drivers/acpi/acpi_cmos_rtc.c
+++ b/drivers/acpi/acpi_cmos_rtc.c
@@ -18,8 +18,6 @@
#include "internal.h"
-#define PREFIX "ACPI: "
-
ACPI_MODULE_NAME("cmos rtc");
static const struct acpi_device_id acpi_cmos_rtc_ids[] = {
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index 5d33c5415405..c4a5d87ede7e 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
#include <linux/cper.h>
#include <linux/ratelimit.h>
#include <linux/edac.h>
@@ -21,11 +20,9 @@
#define EXT_ELOG_ENTRY_MASK GENMASK_ULL(51, 0) /* elog entry address mask */
#define EXTLOG_DSM_REV 0x0
-#define EXTLOG_FN_QUERY 0x0
#define EXTLOG_FN_ADDR 0x1
#define FLAG_OS_OPTIN BIT(0)
-#define EXTLOG_QUERY_L1_EXIST BIT(1)
#define ELOG_ENTRY_VALID (1ULL<<63)
#define ELOG_ENTRY_LEN 0x1000
@@ -46,7 +43,7 @@ struct extlog_l1_head {
static int old_edac_report_status;
-static u8 extlog_dsm_uuid[] = "663E35AF-CC10-41A4-88EA-5470AF055295";
+static u8 extlog_dsm_uuid[] __initdata = "663E35AF-CC10-41A4-88EA-5470AF055295";
/* L1 table related physical address */
static u64 elog_base;
@@ -156,62 +153,27 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
return NOTIFY_STOP;
}
-static int extlog_get_dsm(acpi_handle handle, int rev, int func, u64 *ret)
+static bool __init extlog_get_l1addr(void)
{
- struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
- struct acpi_object_list input;
- union acpi_object params[4], *obj;
u8 uuid[16];
- int i;
+ acpi_handle handle;
+ union acpi_object *obj;
acpi_str_to_uuid(extlog_dsm_uuid, uuid);
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = 16;
- params[0].buffer.pointer = uuid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = rev;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
-
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf)))
- return -1;
-
- *ret = 0;
- obj = (union acpi_object *)buf.pointer;
- if (obj->type == ACPI_TYPE_INTEGER) {
- *ret = obj->integer.value;
- } else if (obj->type == ACPI_TYPE_BUFFER) {
- if (obj->buffer.length <= 8) {
- for (i = 0; i < obj->buffer.length; i++)
- *ret |= (obj->buffer.pointer[i] << (i * 8));
- }
- }
- kfree(buf.pointer);
-
- return 0;
-}
-
-static bool extlog_get_l1addr(void)
-{
- acpi_handle handle;
- u64 ret;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return false;
-
- if (extlog_get_dsm(handle, EXTLOG_DSM_REV, EXTLOG_FN_QUERY, &ret) ||
- !(ret & EXTLOG_QUERY_L1_EXIST))
+ if (!acpi_check_dsm(handle, uuid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
return false;
-
- if (extlog_get_dsm(handle, EXTLOG_DSM_REV, EXTLOG_FN_ADDR, &ret))
+ obj = acpi_evaluate_dsm_typed(handle, uuid, EXTLOG_DSM_REV,
+ EXTLOG_FN_ADDR, NULL, ACPI_TYPE_INTEGER);
+ if (!obj) {
return false;
+ } else {
+ l1_dirbase = obj->integer.value;
+ ACPI_FREE(obj);
+ }
- l1_dirbase = ret;
/* Spec says L1 directory must be 4K aligned, bail out if it isn't */
if (l1_dirbase & ((1 << 12) - 1)) {
pr_warn(FW_BUG "L1 Directory is invalid at physical %llx\n",
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 6745fe137b9e..69e29f409d4c 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -33,6 +33,13 @@ ACPI_MODULE_NAME("acpi_lpss");
#define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
#define LPSS_SW_LTR 0x10
#define LPSS_AUTO_LTR 0x14
+#define LPSS_LTR_SNOOP_REQ BIT(15)
+#define LPSS_LTR_SNOOP_MASK 0x0000FFFF
+#define LPSS_LTR_SNOOP_LAT_1US 0x800
+#define LPSS_LTR_SNOOP_LAT_32US 0xC00
+#define LPSS_LTR_SNOOP_LAT_SHIFT 5
+#define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
+#define LPSS_LTR_MAX_VAL 0x3FF
#define LPSS_TX_INT 0x20
#define LPSS_TX_INT_MASK BIT(1)
@@ -102,6 +109,16 @@ static struct lpss_device_desc lpt_sdio_dev_desc = {
.ltr_required = true,
};
+static struct lpss_shared_clock pwm_clock = {
+ .name = "pwm_clk",
+ .rate = 25000000,
+};
+
+static struct lpss_device_desc byt_pwm_dev_desc = {
+ .clk_required = true,
+ .shared_clock = &pwm_clock,
+};
+
static struct lpss_shared_clock uart_clock = {
.name = "uart_clk",
.rate = 44236800,
@@ -157,6 +174,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT33C7", },
/* BayTrail LPSS devices */
+ { "80860F09", (unsigned long)&byt_pwm_dev_desc },
{ "80860F0A", (unsigned long)&byt_uart_dev_desc },
{ "80860F0E", (unsigned long)&byt_spi_dev_desc },
{ "80860F14", (unsigned long)&byt_sdio_dev_desc },
@@ -315,6 +333,17 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
return ret;
}
+static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
+{
+ return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+}
+
+static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
+ unsigned int reg)
+{
+ writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+}
+
static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
{
struct acpi_device *adev;
@@ -336,7 +365,7 @@ static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
ret = -ENODEV;
goto out;
}
- *val = readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+ *val = __lpss_reg_read(pdata, reg);
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -389,6 +418,37 @@ static struct attribute_group lpss_attr_group = {
.name = "lpss_ltr",
};
+static void acpi_lpss_set_ltr(struct device *dev, s32 val)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+ u32 ltr_mode, ltr_val;
+
+ ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
+ if (val < 0) {
+ if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
+ ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
+ __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
+ }
+ return;
+ }
+ ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
+ if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
+ ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
+ val = LPSS_LTR_MAX_VAL;
+ } else if (val > LPSS_LTR_MAX_VAL) {
+ ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
+ val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
+ } else {
+ ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
+ }
+ ltr_val |= val;
+ __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
+ if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
+ ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
+ __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
+ }
+}
+
static int acpi_lpss_platform_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -426,9 +486,29 @@ static struct notifier_block acpi_lpss_nb = {
.notifier_call = acpi_lpss_platform_notify,
};
+static void acpi_lpss_bind(struct device *dev)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+
+ if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required)
+ return;
+
+ if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
+ dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
+ else
+ dev_err(dev, "MMIO size insufficient to access LTR\n");
+}
+
+static void acpi_lpss_unbind(struct device *dev)
+{
+ dev->power.set_latency_tolerance = NULL;
+}
+
static struct acpi_scan_handler lpss_handler = {
.ids = acpi_lpss_device_ids,
.attach = acpi_lpss_create_device,
+ .bind = acpi_lpss_bind,
+ .unbind = acpi_lpss_unbind,
};
void __init acpi_lpss_init(void)
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 551dad712ffe..b67be85ff0fc 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -180,14 +180,14 @@ static unsigned long acpi_meminfo_end_pfn(struct acpi_memory_info *info)
static int acpi_bind_memblk(struct memory_block *mem, void *arg)
{
- return acpi_bind_one(&mem->dev, (acpi_handle)arg);
+ return acpi_bind_one(&mem->dev, arg);
}
static int acpi_bind_memory_blocks(struct acpi_memory_info *info,
- acpi_handle handle)
+ struct acpi_device *adev)
{
return walk_memory_range(acpi_meminfo_start_pfn(info),
- acpi_meminfo_end_pfn(info), (void *)handle,
+ acpi_meminfo_end_pfn(info), adev,
acpi_bind_memblk);
}
@@ -197,8 +197,7 @@ static int acpi_unbind_memblk(struct memory_block *mem, void *arg)
return 0;
}
-static void acpi_unbind_memory_blocks(struct acpi_memory_info *info,
- acpi_handle handle)
+static void acpi_unbind_memory_blocks(struct acpi_memory_info *info)
{
walk_memory_range(acpi_meminfo_start_pfn(info),
acpi_meminfo_end_pfn(info), NULL, acpi_unbind_memblk);
@@ -242,9 +241,9 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
if (result && result != -EEXIST)
continue;
- result = acpi_bind_memory_blocks(info, handle);
+ result = acpi_bind_memory_blocks(info, mem_device->device);
if (result) {
- acpi_unbind_memory_blocks(info, handle);
+ acpi_unbind_memory_blocks(info);
return -ENODEV;
}
@@ -285,7 +284,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
if (nid == NUMA_NO_NODE)
nid = memory_add_physaddr_to_nid(info->start_addr);
- acpi_unbind_memory_blocks(info, handle);
+ acpi_unbind_memory_blocks(info);
remove_memory(nid, info->start_addr, info->length);
list_del(&info->list);
kfree(info);
@@ -361,7 +360,19 @@ static void acpi_memory_device_remove(struct acpi_device *device)
acpi_memory_device_free(mem_device);
}
+static bool __initdata acpi_no_memhotplug;
+
void __init acpi_memory_hotplug_init(void)
{
+ if (acpi_no_memhotplug)
+ return;
+
acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory");
}
+
+static int __init disable_acpi_memory_hotplug(char *str)
+{
+ acpi_no_memhotplug = true;
+ return 1;
+}
+__setup("acpi_no_memhotplug", disable_acpi_memory_hotplug);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 509452a62f96..37d73024b82e 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -28,8 +28,7 @@
#include <linux/cpu.h>
#include <linux/clockchips.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <asm/mwait.h>
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
@@ -409,28 +408,14 @@ static int acpi_pad_pur(acpi_handle handle)
return num;
}
-/* Notify firmware how many CPUs are idle */
-static void acpi_pad_ost(acpi_handle handle, int stat,
- uint32_t idle_cpus)
-{
- union acpi_object params[3] = {
- {.type = ACPI_TYPE_INTEGER,},
- {.type = ACPI_TYPE_INTEGER,},
- {.type = ACPI_TYPE_BUFFER,},
- };
- struct acpi_object_list arg_list = {3, params};
-
- params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
- params[1].integer.value = stat;
- params[2].buffer.length = 4;
- params[2].buffer.pointer = (void *)&idle_cpus;
- acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-}
-
static void acpi_pad_handle_notify(acpi_handle handle)
{
int num_cpus;
uint32_t idle_cpus;
+ struct acpi_buffer param = {
+ .length = 4,
+ .pointer = (void *)&idle_cpus,
+ };
mutex_lock(&isolated_cpus_lock);
num_cpus = acpi_pad_pur(handle);
@@ -440,7 +425,7 @@ static void acpi_pad_handle_notify(acpi_handle handle)
}
acpi_pad_idle_cpus(num_cpus);
idle_cpus = acpi_pad_idle_cpus_num();
- acpi_pad_ost(handle, 0, idle_cpus);
+ acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, &param);
mutex_unlock(&isolated_cpus_lock);
}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 3c1d6b0c09a4..c29c2c3ec0ad 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -212,7 +212,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
- int cpu_index, device_declaration = 0;
+ int apic_id, cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
@@ -258,18 +258,21 @@ static int acpi_processor_get_info(struct acpi_device *device)
device_declaration = 1;
pr->acpi_id = value;
}
- pr->apic_id = acpi_get_apicid(pr->handle, device_declaration,
- pr->acpi_id);
- cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
- /* Handle UP system running SMP kernel, with no LAPIC in MADT */
- if (!cpu0_initialized && (cpu_index == -1) &&
- (num_online_cpus() == 1)) {
- cpu_index = 0;
+ apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
+ if (apic_id < 0) {
+ acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
+ return -ENODEV;
}
+ pr->apic_id = apic_id;
- cpu0_initialized = 1;
-
+ cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
+ if (!cpu0_initialized) {
+ cpu0_initialized = 1;
+ /* Handle UP system running SMP kernel, with no LAPIC in MADT */
+ if ((cpu_index == -1) && (num_online_cpus() == 1))
+ cpu_index = 0;
+ }
pr->id = cpu_index;
/*
@@ -282,6 +285,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
if (ret)
return ret;
}
+
/*
* On some boxes several processors use the same processor bus id.
* But they are located in different scope. For example:
@@ -395,7 +399,7 @@ static int acpi_processor_add(struct acpi_device *device,
goto err;
}
- result = acpi_bind_one(dev, pr->handle);
+ result = acpi_bind_one(dev, device);
if (result)
goto err;
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 438304086ff1..b7ed86a20427 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -122,6 +122,8 @@ acpi-y += \
rsaddr.o \
rscalc.o \
rscreate.o \
+ rsdump.o \
+ rsdumpinfo.o \
rsinfo.o \
rsio.o \
rsirq.o \
@@ -132,8 +134,6 @@ acpi-y += \
rsutils.o \
rsxface.o
-acpi-$(ACPI_FUTURE_USAGE) += rsdump.o rsdumpinfo.o
-
acpi-y += \
tbfadt.o \
tbfind.o \
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 8a6c4a0d22db..6f1c616910ac 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index a9fd0b872062..68a91eb0fa48 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -113,9 +113,10 @@ void acpi_db_display_handlers(void);
ACPI_HW_DEPENDENT_RETURN_VOID(void
acpi_db_generate_gpe(char *gpe_arg,
char *block_arg))
-
ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void))
+void acpi_db_execute_test(char *type_arg);
+
/*
* dbconvert - miscellaneous conversion routines
*/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 427db72a6302..d3e2cc395d7f 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -139,20 +139,21 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
struct acpi_walk_state *walk_state);
/*
- * dsload - Parser/Interpreter interface, pass 1 namespace load callbacks
+ * dsload - Parser/Interpreter interface
*/
acpi_status
acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number);
+/* dsload - pass 1 namespace load callbacks */
+
acpi_status
acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op);
acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state);
-/*
- * dsload - Parser/Interpreter interface, pass 2 namespace load callbacks
- */
+/* dsload - pass 2 namespace load callbacks */
+
acpi_status
acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op);
@@ -200,7 +201,9 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state);
/*
* dsmethod - Parser/Interpreter interface - control method parsing
*/
-acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node);
+acpi_status
+acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
+ union acpi_operand_object *obj_desc);
acpi_status
acpi_ds_call_control_method(struct acpi_thread_state *thread,
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 41abe552c7a3..68ec61fff188 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,9 +71,8 @@ acpi_status acpi_ev_init_global_lock_handler(void);
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
acpi_ev_acquire_global_lock(u16 timeout))
-
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
- acpi_status acpi_ev_remove_global_lock_handler(void);
+acpi_status acpi_ev_remove_global_lock_handler(void);
/*
* evgpe - Low-level GPE support
@@ -133,7 +132,7 @@ acpi_status acpi_ev_gpe_initialize(void);
ACPI_HW_DEPENDENT_RETURN_VOID(void
acpi_ev_update_gpes(acpi_owner_id table_owner_id))
- acpi_status
+acpi_status
acpi_ev_match_gpe_method(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -149,7 +148,9 @@ acpi_status
acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context);
-struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
+acpi_status
+acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
+ struct acpi_gpe_xrupt_info **gpe_xrupt_block);
acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index e9f1fc7f99c7..49bbc71fad54 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,11 +51,19 @@
* to simplify maintenance of the code.
*/
#ifdef DEFINE_ACPI_GLOBALS
-#define ACPI_EXTERN
-#define ACPI_INIT_GLOBAL(a,b) a=b
+#define ACPI_GLOBAL(type,name) \
+ extern type name; \
+ type name
+
+#define ACPI_INIT_GLOBAL(type,name,value) \
+ type name=value
+
#else
-#define ACPI_EXTERN extern
-#define ACPI_INIT_GLOBAL(a,b) a
+#define ACPI_GLOBAL(type,name) \
+ extern type name
+
+#define ACPI_INIT_GLOBAL(type,name,value) \
+ extern type name
#endif
#ifdef DEFINE_ACPI_GLOBALS
@@ -82,33 +90,34 @@
* 5) Allow unresolved references (invalid target name) in package objects
* 6) Enable warning messages for behavior that is not ACPI spec compliant
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_interpreter_slack, FALSE);
/*
- * Automatically serialize ALL control methods? Default is FALSE, meaning
- * to use the Serialized/not_serialized method flags on a per method basis.
- * Only change this if the ASL code is poorly written and cannot handle
- * reentrancy even though methods are marked "NotSerialized".
+ * Automatically serialize all methods that create named objects? Default
+ * is TRUE, meaning that all non_serialized methods are scanned once at
+ * table load time to determine those that create named objects. Methods
+ * that create named objects are marked Serialized in order to prevent
+ * possible run-time problems if they are entered by more than one thread.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_auto_serialize_methods, TRUE);
/*
* Create the predefined _OSI method in the namespace? Default is TRUE
* because ACPI CA is fully compatible with other ACPI implementations.
* Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_create_osi_method, TRUE);
/*
* Optionally use default values for the ACPI register widths. Set this to
* TRUE to use the defaults, if an FADT contains incorrect widths/lengths.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use_default_register_widths, TRUE);
/*
* Optionally enable output from the AML Debug Object.
*/
-bool ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_aml_debug_object, FALSE);
/*
* Optionally copy the entire DSDT to local memory (instead of simply
@@ -116,7 +125,25 @@ bool ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
* DSDT, creating the need for this option. Default is FALSE, do not copy
* the DSDT.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE);
+
+/*
+ * Optionally ignore an XSDT if present and use the RSDT instead.
+ * Although the ACPI specification requires that an XSDT be used instead
+ * of the RSDT, the XSDT has been found to be corrupt or ill-formed on
+ * some machines. Default behavior is to use the XSDT if present.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
+
+/*
+ * Optionally use 32-bit FADT addresses if and when there is a conflict
+ * (address mismatch) between the 32-bit and 64-bit versions of the
+ * address. Although ACPICA adheres to the ACPI specification which
+ * requires the use of the corresponding 64-bit address if it is non-zero,
+ * some machines have been found to have a corrupted non-zero 64-bit
+ * address. Default is FALSE, do not favor the 32-bit addresses.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
/*
* Optionally truncate I/O addresses to 16 bits. Provides compatibility
@@ -124,47 +151,28 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
* this value is set to TRUE if any Windows OSI strings have been
* requested by the BIOS.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_truncate_io_addresses, FALSE);
/*
* Disable runtime checking and repair of values returned by control methods.
* Use only if the repair is causing a problem on a particular machine.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_auto_repair, FALSE);
/*
* Optionally do not load any SSDTs from the RSDT/XSDT during initialization.
* This can be useful for debugging ACPI problems on some machines.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_ssdt_table_load, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_ssdt_table_load, FALSE);
/*
* We keep track of the latest version of Windows that has been requested by
* the BIOS.
*/
-u8 ACPI_INIT_GLOBAL(acpi_gbl_osi_data, 0);
-
-/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
-
-struct acpi_table_fadt acpi_gbl_FADT;
-u32 acpi_current_gpe_count;
-u32 acpi_gbl_trace_flags;
-acpi_name acpi_gbl_trace_method_name;
-u8 acpi_gbl_system_awake_and_running;
-
-/*
- * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
- * that the ACPI hardware is no longer required. A flag in the FADT indicates
- * a reduced HW machine, and that flag is duplicated here for convenience.
- */
-u8 acpi_gbl_reduced_hardware;
+ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
#endif /* DEFINE_ACPI_GLOBALS */
-/* Do not disassemble buffers to resource descriptors */
-
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
-
/*****************************************************************************
*
* ACPI Table globals
@@ -172,37 +180,36 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
****************************************************************************/
/*
- * acpi_gbl_root_table_list is the master list of ACPI tables that were
- * found in the RSDT/XSDT.
+ * Master list of all ACPI tables that were found in the RSDT/XSDT.
*/
-ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
+ACPI_GLOBAL(struct acpi_table_list, acpi_gbl_root_table_list);
+
+/* DSDT information. Used to check for DSDT corruption */
+
+ACPI_GLOBAL(struct acpi_table_header *, acpi_gbl_DSDT);
+ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
#if (!ACPI_REDUCED_HARDWARE)
-ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
+ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
#endif /* !ACPI_REDUCED_HARDWARE */
/* These addresses are calculated from the FADT Event Block addresses */
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_status;
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
-
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_status;
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
-
-/* DSDT information. Used to check for DSDT corruption */
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1a_status);
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1a_enable);
-ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
-ACPI_EXTERN struct acpi_table_header acpi_gbl_original_dsdt_header;
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_status);
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_enable);
/*
- * Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
+ * Handle both ACPI 1.0 and ACPI 2.0+ Integer widths. The integer width is
* determined by the revision of the DSDT: If the DSDT revision is less than
* 2, use only the lower 32 bits of the internal 64-bit Integer.
*/
-ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
-ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
-ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
+ACPI_GLOBAL(u8, acpi_gbl_integer_bit_width);
+ACPI_GLOBAL(u8, acpi_gbl_integer_byte_width);
+ACPI_GLOBAL(u8, acpi_gbl_integer_nybble_width);
/*****************************************************************************
*
@@ -215,36 +222,36 @@ ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
* actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
* (The table maps local handles to the real OS handles)
*/
-ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
+ACPI_GLOBAL(struct acpi_mutex_info, acpi_gbl_mutex_info[ACPI_NUM_MUTEX]);
/*
* Global lock mutex is an actual AML mutex object
* Global lock semaphore works in conjunction with the actual global lock
* Global lock spinlock is used for "pending" handshake
*/
-ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
-ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
-ACPI_EXTERN acpi_spinlock acpi_gbl_global_lock_pending_lock;
-ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
-ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
-ACPI_EXTERN u8 acpi_gbl_global_lock_present;
-ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
+ACPI_GLOBAL(union acpi_operand_object *, acpi_gbl_global_lock_mutex);
+ACPI_GLOBAL(acpi_semaphore, acpi_gbl_global_lock_semaphore);
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_global_lock_pending_lock);
+ACPI_GLOBAL(u16, acpi_gbl_global_lock_handle);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_acquired);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_present);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_pending);
/*
* Spinlocks are used for interfaces that can be possibly called at
* interrupt level
*/
-ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
-ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock;
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
/* Mutex for _OSI support */
-ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
+ACPI_GLOBAL(acpi_mutex, acpi_gbl_osi_mutex);
/* Reader/Writer lock is used for namespace walk and dynamic table unload */
-ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
+ACPI_GLOBAL(struct acpi_rw_lock, acpi_gbl_namespace_rw_lock);
/*****************************************************************************
*
@@ -254,70 +261,69 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
/* Object caches */
-ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_state_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_ext_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_namespace_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_state_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_ps_node_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_ps_node_ext_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_operand_cache);
+
+/* System */
+
+ACPI_INIT_GLOBAL(u32, acpi_gbl_startup_flags, 0);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_shutdown, TRUE);
/* Global handlers */
-ACPI_EXTERN struct acpi_global_notify_handler acpi_gbl_global_notify[2];
-ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
-ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
-ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
-ACPI_EXTERN void *acpi_gbl_table_handler_context;
-ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
-ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
-ACPI_EXTERN struct acpi_sci_handler_info *acpi_gbl_sci_handler_list;
+ACPI_GLOBAL(struct acpi_global_notify_handler, acpi_gbl_global_notify[2]);
+ACPI_GLOBAL(acpi_exception_handler, acpi_gbl_exception_handler);
+ACPI_GLOBAL(acpi_init_handler, acpi_gbl_init_handler);
+ACPI_GLOBAL(acpi_table_handler, acpi_gbl_table_handler);
+ACPI_GLOBAL(void *, acpi_gbl_table_handler_context);
+ACPI_GLOBAL(struct acpi_walk_state *, acpi_gbl_breakpoint_walk);
+ACPI_GLOBAL(acpi_interface_handler, acpi_gbl_interface_handler);
+ACPI_GLOBAL(struct acpi_sci_handler_info *, acpi_gbl_sci_handler_list);
/* Owner ID support */
-ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
-ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
-ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
+ACPI_GLOBAL(u32, acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS]);
+ACPI_GLOBAL(u8, acpi_gbl_last_owner_id_index);
+ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
/* Initialization sequencing */
-ACPI_EXTERN u8 acpi_gbl_reg_methods_executed;
+ACPI_GLOBAL(u8, acpi_gbl_reg_methods_executed);
/* Misc */
-ACPI_EXTERN u32 acpi_gbl_original_mode;
-ACPI_EXTERN u32 acpi_gbl_rsdp_original_location;
-ACPI_EXTERN u32 acpi_gbl_ns_lookup_count;
-ACPI_EXTERN u32 acpi_gbl_ps_find_count;
-ACPI_EXTERN u16 acpi_gbl_pm1_enable_register_save;
-ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
-ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
-ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
-ACPI_EXTERN u8 acpi_gbl_events_initialized;
-ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
-ACPI_EXTERN struct acpi_address_range
- *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
-
-#ifndef DEFINE_ACPI_GLOBALS
-
-/* Other miscellaneous */
-
-extern u8 acpi_gbl_shutdown;
-extern u32 acpi_gbl_startup_flags;
+ACPI_GLOBAL(u32, acpi_gbl_original_mode);
+ACPI_GLOBAL(u32, acpi_gbl_rsdp_original_location);
+ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count);
+ACPI_GLOBAL(u32, acpi_gbl_ps_find_count);
+ACPI_GLOBAL(u16, acpi_gbl_pm1_enable_register_save);
+ACPI_GLOBAL(u8, acpi_gbl_debugger_configuration);
+ACPI_GLOBAL(u8, acpi_gbl_step_to_next_call);
+ACPI_GLOBAL(u8, acpi_gbl_acpi_hardware_present);
+ACPI_GLOBAL(u8, acpi_gbl_events_initialized);
+ACPI_GLOBAL(struct acpi_interface_info *, acpi_gbl_supported_interfaces);
+ACPI_GLOBAL(struct acpi_address_range *,
+ acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX]);
+
+/* Other miscellaneous, declared and initialized in utglobal */
+
extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
extern const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS];
extern const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS];
-extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
-
-#endif
+extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-/* Lists for tracking memory allocations */
+/* Lists for tracking memory allocations (debug only) */
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
-ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
-ACPI_EXTERN u8 acpi_gbl_disable_mem_tracking;
+ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
+ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
+ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
+ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
#endif
/*****************************************************************************
@@ -332,22 +338,23 @@ ACPI_EXTERN u8 acpi_gbl_disable_mem_tracking;
#define NUM_PREDEFINED_NAMES 9
#endif
-ACPI_EXTERN struct acpi_namespace_node acpi_gbl_root_node_struct;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_root_node;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_fadt_gpe_device;
-ACPI_EXTERN union acpi_operand_object *acpi_gbl_module_code_list;
+ACPI_GLOBAL(struct acpi_namespace_node, acpi_gbl_root_node_struct);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_root_node);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_fadt_gpe_device);
+ACPI_GLOBAL(union acpi_operand_object *, acpi_gbl_module_code_list);
extern const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES];
extern const struct acpi_predefined_names
acpi_gbl_pre_defined_names[NUM_PREDEFINED_NAMES];
#ifdef ACPI_DEBUG_OUTPUT
-ACPI_EXTERN u32 acpi_gbl_current_node_count;
-ACPI_EXTERN u32 acpi_gbl_current_node_size;
-ACPI_EXTERN u32 acpi_gbl_max_concurrent_node_count;
-ACPI_EXTERN acpi_size *acpi_gbl_entry_stack_pointer;
-ACPI_EXTERN acpi_size *acpi_gbl_lowest_stack_pointer;
-ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
+ACPI_GLOBAL(u32, acpi_gbl_current_node_count);
+ACPI_GLOBAL(u32, acpi_gbl_current_node_size);
+ACPI_GLOBAL(u32, acpi_gbl_max_concurrent_node_count);
+ACPI_GLOBAL(acpi_size *, acpi_gbl_entry_stack_pointer);
+ACPI_GLOBAL(acpi_size *, acpi_gbl_lowest_stack_pointer);
+ACPI_GLOBAL(u32, acpi_gbl_deepest_nesting);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
#endif
/*****************************************************************************
@@ -356,11 +363,11 @@ ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
*
****************************************************************************/
-ACPI_EXTERN struct acpi_thread_state *acpi_gbl_current_walk_list;
+ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
/* Control method single step flag */
-ACPI_EXTERN u8 acpi_gbl_cm_single_step;
+ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
/*****************************************************************************
*
@@ -370,8 +377,9 @@ ACPI_EXTERN u8 acpi_gbl_cm_single_step;
extern struct acpi_bit_register_info
acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
-ACPI_EXTERN u8 acpi_gbl_sleep_type_a;
-ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
+
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
/*****************************************************************************
*
@@ -381,14 +389,15 @@ ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
#if (!ACPI_REDUCED_HARDWARE)
-ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
-ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
-ACPI_EXTERN struct acpi_gpe_block_info
- *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler;
-ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
-ACPI_EXTERN struct acpi_fixed_event_handler
- acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
+ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized);
+ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head);
+ACPI_GLOBAL(struct acpi_gpe_block_info *,
+ acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]);
+ACPI_GLOBAL(acpi_gbl_event_handler, acpi_gbl_global_event_handler);
+ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context);
+ACPI_GLOBAL(struct acpi_fixed_event_handler,
+ acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]);
+
extern struct acpi_fixed_event_info
acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
@@ -400,23 +409,19 @@ extern struct acpi_fixed_event_info
*
****************************************************************************/
-/* Procedure nesting level for debug output */
-
-extern u32 acpi_gbl_nesting_level;
-
/* Event counters */
-ACPI_EXTERN u32 acpi_method_count;
-ACPI_EXTERN u32 acpi_gpe_count;
-ACPI_EXTERN u32 acpi_sci_count;
-ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+ACPI_GLOBAL(u32, acpi_method_count);
+ACPI_GLOBAL(u32, acpi_gpe_count);
+ACPI_GLOBAL(u32, acpi_sci_count);
+ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
/* Support for dynamic control method tracing mechanism */
-ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
+ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
+ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
+ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_level);
+ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_layer);
/*****************************************************************************
*
@@ -424,66 +429,81 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
*
****************************************************************************/
-ACPI_EXTERN u8 acpi_gbl_db_output_flags;
+ACPI_GLOBAL(u8, acpi_gbl_db_output_flags);
#ifdef ACPI_DISASSEMBLER
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
+/* Do not disassemble buffers to resource descriptors */
+
+ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE);
-ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
-ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
-ACPI_EXTERN u8 acpi_gbl_num_external_methods;
-ACPI_EXTERN u32 acpi_gbl_resolved_external_methods;
-ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list;
-ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list;
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose);
+ACPI_GLOBAL(u8, acpi_gbl_num_external_methods);
+ACPI_GLOBAL(u32, acpi_gbl_resolved_external_methods);
+ACPI_GLOBAL(struct acpi_external_list *, acpi_gbl_external_list);
+ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
#endif
#ifdef ACPI_DEBUGGER
-extern u8 acpi_gbl_method_executing;
-extern u8 acpi_gbl_abort_method;
-extern u8 acpi_gbl_db_terminate_threads;
+ACPI_INIT_GLOBAL(u8, acpi_gbl_db_terminate_threads, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE);
-ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
-ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
-ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
-ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
-ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
-ACPI_EXTERN char *acpi_gbl_db_buffer;
-ACPI_EXTERN char *acpi_gbl_db_filename;
-ACPI_EXTERN u32 acpi_gbl_db_debug_level;
-ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_tables);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_stats);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_ini_methods);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_region_support);
+ACPI_GLOBAL(u8, acpi_gbl_db_output_to_file);
+ACPI_GLOBAL(char *, acpi_gbl_db_buffer);
+ACPI_GLOBAL(char *, acpi_gbl_db_filename);
+ACPI_GLOBAL(u32, acpi_gbl_db_debug_level);
+ACPI_GLOBAL(u32, acpi_gbl_db_console_debug_level);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node);
-ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]);
+ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
/* These buffers should all be the same size */
-ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_GLOBAL(char, acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
/*
* Statistic globals
*/
-ACPI_EXTERN u16 acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
-ACPI_EXTERN u16 acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
-ACPI_EXTERN u16 acpi_gbl_obj_type_count_misc;
-ACPI_EXTERN u16 acpi_gbl_node_type_count_misc;
-ACPI_EXTERN u32 acpi_gbl_num_nodes;
-ACPI_EXTERN u32 acpi_gbl_num_objects;
-
-ACPI_EXTERN u32 acpi_gbl_size_of_parse_tree;
-ACPI_EXTERN u32 acpi_gbl_size_of_method_trees;
-ACPI_EXTERN u32 acpi_gbl_size_of_node_entries;
-ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
+ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
+ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
+ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
+ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
+ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
+ACPI_GLOBAL(u32, acpi_gbl_num_objects);
+
+ACPI_GLOBAL(u32, acpi_gbl_size_of_parse_tree);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_method_trees);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_node_entries);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_acpi_objects);
#endif /* ACPI_DEBUGGER */
/*****************************************************************************
*
+ * Application globals
+ *
+ ****************************************************************************/
+
+#ifdef ACPI_APPLICATION
+
+ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
+
+#endif /* ACPI_APPLICATION */
+
+/*****************************************************************************
+ *
* Info/help support
*
****************************************************************************/
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 6357e932bfd9..2ad2351a9833 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 8af8c9bdeb35..b01f71ce0523 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -87,6 +87,10 @@ typedef const struct acpi_exdump_info {
#define ACPI_EXD_PACKAGE 11
#define ACPI_EXD_FIELD 12
#define ACPI_EXD_REFERENCE 13
+#define ACPI_EXD_LIST 14 /* Operand object list */
+#define ACPI_EXD_HDLR_LIST 15 /* Address Handler list */
+#define ACPI_EXD_RGN_LIST 16 /* Region list */
+#define ACPI_EXD_NODE 17 /* Namespace Node */
/* restore default alignment */
@@ -454,10 +458,6 @@ void acpi_ex_enter_interpreter(void);
void acpi_ex_exit_interpreter(void);
-void acpi_ex_reacquire_interpreter(void);
-
-void acpi_ex_relinquish_interpreter(void);
-
u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
void acpi_ex_acquire_global_lock(u32 rule);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 53ed1a8ba4f0..52a21dafb540 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1038,15 +1038,16 @@ struct acpi_external_list {
struct acpi_external_list *next;
u32 value;
u16 length;
+ u16 flags;
u8 type;
- u8 flags;
- u8 resolved;
- u8 emitted;
};
/* Values for Flags field above */
-#define ACPI_IPATH_ALLOCATED 0x01
+#define ACPI_EXT_RESOLVED_REFERENCE 0x01 /* Object was resolved during cross ref */
+#define ACPI_EXT_ORIGIN_FROM_FILE 0x02 /* External came from a file */
+#define ACPI_EXT_INTERNAL_PATH_ALLOCATED 0x04 /* Deallocate internal path on completion */
+#define ACPI_EXT_EXTERNAL_EMITTED 0x08 /* External() statement has been emitted */
struct acpi_external_file {
char *path;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 2a86c65d873b..4bceb11c7380 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,17 +63,21 @@
#define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
/*
- * printf() format helpers
+ * printf() format helpers. These macros are workarounds for the difficulties
+ * with emitting 64-bit integers and 64-bit pointers with the same code
+ * for both 32-bit and 64-bit hosts.
*/
-
-/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
-
#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i)
+#define ACPI_FORMAT_TO_UINT(i) ACPI_FORMAT_UINT64(i)
+#define ACPI_PRINTF_UINT "0x%8.8X%8.8X"
+
#else
-#define ACPI_FORMAT_NATIVE_UINT(i) 0, (i)
+#define ACPI_FORMAT_NATIVE_UINT(i) 0, (u32) (i)
+#define ACPI_FORMAT_TO_UINT(i) (u32) (i)
+#define ACPI_PRINTF_UINT "0x%8.8X"
#endif
/*
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index e6138ac4a160..ee1c040f321c 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index cc7ab6dd724e..22fb6449d3d6 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -193,7 +193,8 @@ struct acpi_object_method {
#define ACPI_METHOD_INTERNAL_ONLY 0x02 /* Method is implemented internally (_OSI) */
#define ACPI_METHOD_SERIALIZED 0x04 /* Method is serialized */
#define ACPI_METHOD_SERIALIZED_PENDING 0x08 /* Method is to be marked serialized */
-#define ACPI_METHOD_MODIFIED_NAMESPACE 0x10 /* Method modified the namespace */
+#define ACPI_METHOD_IGNORE_SYNC_LEVEL 0x10 /* Method was auto-serialized at table load time */
+#define ACPI_METHOD_MODIFIED_NAMESPACE 0x20 /* Method modified the namespace */
/******************************************************************************
*
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 3fc9ca7e8aa3..dda0e6affcf1 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index aed319318835..6168b85463ed 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index f600aded7261..a48d713e9599 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
*
* Return Package types
*
- * 1) PTYPE1 packages do not contain sub-packages.
+ * 1) PTYPE1 packages do not contain subpackages.
*
* ACPI_PTYPE1_FIXED: Fixed-length length, 1 or 2 object types:
* object type
@@ -63,8 +63,8 @@
* (Used for _PRW)
*
*
- * 2) PTYPE2 packages contain a Variable-length number of sub-packages. Each
- * of the different types describe the contents of each of the sub-packages.
+ * 2) PTYPE2 packages contain a Variable-length number of subpackages. Each
+ * of the different types describe the contents of each of the subpackages.
*
* ACPI_PTYPE2: Each subpackage contains 1 or 2 object types. Zero-length
* parent package is allowed:
@@ -560,7 +560,7 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
/*
* For _HPX, a single package is returned, containing a variable-length number
- * of sub-packages. Each sub-package contains a PCI record setting.
+ * of subpackages. Each subpackage contains a PCI record setting.
* There are several different type of record settings, of different
* lengths, but all elements of all settings are Integers.
*/
@@ -698,6 +698,12 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+ {{"_PRP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: 1 Str, 1 Int/Str/Pkg */
+ PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_STRING, 1,
+ ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING |
+ ACPI_RTYPE_PACKAGE | ACPI_RTYPE_REFERENCE, 1, 0),
+
{{"_PRS", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index ff97430455cb..4b008e8884a1 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index fc83c0a5ca70..cf7346110bd8 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -133,6 +133,9 @@ struct acpi_init_walk_info {
u32 table_index;
u32 object_count;
u32 method_count;
+ u32 serial_method_count;
+ u32 non_serial_method_count;
+ u32 serialized_method_count;
u32 device_count;
u32 op_region_count;
u32 field_count;
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index c54f42c64fe2..5fa4b2027697 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index be8180c17d7e..ceeec0b7ccb1 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,7 +49,7 @@ extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
/* Strings used by the disassembler and debugger resource dump routines */
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
extern const char *acpi_gbl_bm_decode[];
extern const char *acpi_gbl_config_decode[];
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 48a3e331b72d..5908ccec6aea 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 87c26366d1df..f3f834408441 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index afdc6df17abf..720b1cdda711 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index eb56b66444b5..8daf9de82b73 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 2d4c07322576..3661c8e90540 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -105,7 +105,7 @@ acpi_ds_create_external_region(acpi_status lookup_status,
* operation_region not found. Generate an External for it, and
* insert the name into the namespace.
*/
- acpi_dm_add_to_external_list(op, path, ACPI_TYPE_REGION, 0);
+ acpi_dm_add_op_to_external_list(op, path, ACPI_TYPE_REGION, 0, 0);
status = acpi_ns_lookup(walk_state->scope_info, path, ACPI_TYPE_REGION,
ACPI_IMODE_LOAD_PASS1, ACPI_NS_SEARCH_PARENT,
walk_state, node);
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 14424200d246..aee5e45f6d35 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,8 +83,8 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
(struct acpi_init_walk_info *)context;
struct acpi_namespace_node *node =
(struct acpi_namespace_node *)obj_handle;
- acpi_object_type type;
acpi_status status;
+ union acpi_operand_object *obj_desc;
ACPI_FUNCTION_ENTRY();
@@ -100,9 +100,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
/* And even then, we are only interested in a few object types */
- type = acpi_ns_get_type(obj_handle);
-
- switch (type) {
+ switch (acpi_ns_get_type(obj_handle)) {
case ACPI_TYPE_REGION:
status = acpi_ds_initialize_region(obj_handle);
@@ -117,8 +115,44 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
break;
case ACPI_TYPE_METHOD:
-
+ /*
+ * Auto-serialization support. We will examine each method that is
+ * not_serialized to determine if it creates any Named objects. If
+ * it does, it will be marked serialized to prevent problems if
+ * the method is entered by two or more threads and an attempt is
+ * made to create the same named object twice -- which results in
+ * an AE_ALREADY_EXISTS exception and method abort.
+ */
info->method_count++;
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ break;
+ }
+
+ /* Ignore if already serialized */
+
+ if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
+ info->serial_method_count++;
+ break;
+ }
+
+ if (acpi_gbl_auto_serialize_methods) {
+
+ /* Parse/scan method and serialize it if necessary */
+
+ acpi_ds_auto_serialize_method(node, obj_desc);
+ if (obj_desc->method.
+ info_flags & ACPI_METHOD_SERIALIZED) {
+
+ /* Method was just converted to Serialized */
+
+ info->serial_method_count++;
+ info->serialized_method_count++;
+ break;
+ }
+ }
+
+ info->non_serial_method_count++;
break;
case ACPI_TYPE_DEVICE:
@@ -170,7 +204,6 @@ acpi_ds_initialize_objects(u32 table_index,
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
/* Set all init info to zero */
@@ -205,14 +238,16 @@ acpi_ds_initialize_objects(u32 table_index,
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\nTable [%4.4s](id %4.4X) - %u Objects with %u Devices %u Methods %u Regions\n",
+ "Table [%4.4s] (id %4.4X) - %4u Objects with %3u Devices, "
+ "%3u Regions, %3u Methods (%u/%u/%u Serial/Non/Cvt)\n",
table->signature, owner_id, info.object_count,
- info.device_count, info.method_count,
- info.op_region_count));
+ info.device_count, info.op_region_count,
+ info.method_count, info.serial_method_count,
+ info.non_serial_method_count,
+ info.serialized_method_count));
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "%u Methods, %u Regions\n", info.method_count,
- info.op_region_count));
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "%u Methods, %u Regions\n",
+ info.method_count, info.op_region_count));
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 81a78ba84311..3c7f7378b94d 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,16 +49,155 @@
#ifdef ACPI_DISASSEMBLER
#include "acdisasm.h"
#endif
+#include "acparser.h"
+#include "amlcode.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsmethod")
/* Local prototypes */
static acpi_status
+acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op);
+
+static acpi_status
acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
/*******************************************************************************
*
+ * FUNCTION: acpi_ds_auto_serialize_method
+ *
+ * PARAMETERS: node - Namespace Node of the method
+ * obj_desc - Method object attached to node
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Parse a control method AML to scan for control methods that
+ * need serialization due to the creation of named objects.
+ *
+ * NOTE: It is a bit of overkill to mark all such methods serialized, since
+ * there is only a problem if the method actually blocks during execution.
+ * A blocking operation is, for example, a Sleep() operation, or any access
+ * to an operation region. However, it is probably not possible to easily
+ * detect whether a method will block or not, so we simply mark all suspicious
+ * methods as serialized.
+ *
+ * NOTE2: This code is essentially a generic routine for parsing a single
+ * control method.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
+ union acpi_operand_object *obj_desc)
+{
+ acpi_status status;
+ union acpi_parse_object *op = NULL;
+ struct acpi_walk_state *walk_state;
+
+ ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Method auto-serialization parse [%4.4s] %p\n",
+ acpi_ut_get_node_name(node), node));
+
+ /* Create/Init a root op for the method parse tree */
+
+ op = acpi_ps_alloc_op(AML_METHOD_OP);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ acpi_ps_set_name(op, node->name.integer);
+ op->common.node = node;
+
+ /* Create and initialize a new walk state */
+
+ walk_state =
+ acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
+ if (!walk_state) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ status =
+ acpi_ds_init_aml_walk(walk_state, op, node,
+ obj_desc->method.aml_start,
+ obj_desc->method.aml_length, NULL, 0);
+ if (ACPI_FAILURE(status)) {
+ acpi_ds_delete_walk_state(walk_state);
+ return_ACPI_STATUS(status);
+ }
+
+ walk_state->descending_callback = acpi_ds_detect_named_opcodes;
+
+ /* Parse the method, scan for creation of named objects */
+
+ status = acpi_ps_parse_aml(walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ps_delete_parse_tree(op);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_detect_named_opcodes
+ *
+ * PARAMETERS: walk_state - Current state of the parse tree walk
+ * out_op - Unused, required for parser interface
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Descending callback used during the loading of ACPI tables.
+ * Currently used to detect methods that must be marked serialized
+ * in order to avoid problems with the creation of named objects.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op)
+{
+
+ ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes);
+
+ /* We are only interested in opcodes that create a new name */
+
+ if (!
+ (walk_state->op_info->
+ flags & (AML_NAMED | AML_CREATE | AML_FIELD))) {
+ return (AE_OK);
+ }
+
+ /*
+ * At this point, we know we have a Named object opcode.
+ * Mark the method as serialized. Later code will create a mutex for
+ * this method to enforce serialization.
+ *
+ * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the
+ * Sync Level mechanism for this method, even though it is now serialized.
+ * Otherwise, there can be conflicts with existing ASL code that actually
+ * uses sync levels.
+ */
+ walk_state->method_desc->method.sync_level = 0;
+ walk_state->method_desc->method.info_flags |=
+ (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Method serialized [%4.4s] %p - [%s] (%4.4X)\n",
+ walk_state->method_node->name.ascii,
+ walk_state->method_node, walk_state->op_info->name,
+ walk_state->opcode));
+
+ /* Abort the parse, no need to examine this method any further */
+
+ return (AE_CTRL_TERMINATE);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ds_method_error
*
* PARAMETERS: status - Execution status
@@ -74,7 +213,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
******************************************************************************/
acpi_status
-acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
+acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
{
ACPI_FUNCTION_ENTRY();
@@ -217,13 +356,19 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
/*
* The current_sync_level (per-thread) must be less than or equal to
* the sync level of the method. This mechanism provides some
- * deadlock prevention
+ * deadlock prevention.
+ *
+ * If the method was auto-serialized, we just ignore the sync level
+ * mechanism, because auto-serialization of methods can interfere
+ * with ASL code that actually uses sync levels.
*
* Top-level method invocation has no walk state at this point
*/
if (walk_state &&
- (walk_state->thread->current_sync_level >
- obj_desc->method.mutex->mutex.sync_level)) {
+ (!(obj_desc->method.
+ info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL))
+ && (walk_state->thread->current_sync_level >
+ obj_desc->method.mutex->mutex.sync_level)) {
ACPI_ERROR((AE_INFO,
"Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
acpi_ut_get_node_name(method_node),
@@ -668,7 +813,8 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
method_desc->method.info_flags &=
~ACPI_METHOD_SERIALIZED_PENDING;
method_desc->method.info_flags |=
- ACPI_METHOD_SERIALIZED;
+ (ACPI_METHOD_SERIALIZED |
+ ACPI_METHOD_IGNORE_SYNC_LEVEL);
method_desc->method.sync_level = 0;
}
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index c4b0b3657237..b67522df01ac 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index b1746a68dad1..a1e7e6b6fcf7 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 5205edcf2c01..6c0759c0db47 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index ade44e49deb4..9f74795e2268 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -727,27 +727,26 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
index++;
}
- index--;
+ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ "NumOperands %d, ArgCount %d, Index %d\n",
+ walk_state->num_operands, arg_count, index));
- /* It is the appropriate order to get objects from the Result stack */
+ /* Create the interpreter arguments, in reverse order */
+ index--;
for (i = 0; i < arg_count; i++) {
arg = arguments[index];
-
- /* Force the filling of the operand stack in inverse order */
-
- walk_state->operand_index = (u8) index;
+ walk_state->operand_index = (u8)index;
status = acpi_ds_create_operand(walk_state, arg, index);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
- index--;
-
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "Arg #%u (%p) done, Arg1=%p\n", index, arg,
- first_arg));
+ "Created Arg #%u (%p) %u args total\n",
+ index, arg, arg_count));
+ index--;
}
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 1bbb22fd6fa0..f7f5107e754d 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 95e681a36f9c..15623da26200 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,8 +73,20 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
{
switch (pass_number) {
+ case 0:
+
+ /* Parse only - caller will setup callbacks */
+
+ walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
+ ACPI_PARSE_DELETE_TREE | ACPI_PARSE_DISASSEMBLE;
+ walk_state->descending_callback = NULL;
+ walk_state->ascending_callback = NULL;
+ break;
+
case 1:
+ /* Load pass 1 */
+
walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
ACPI_PARSE_DELETE_TREE;
walk_state->descending_callback = acpi_ds_load1_begin_op;
@@ -83,6 +95,8 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
case 2:
+ /* Load pass 2 */
+
walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
ACPI_PARSE_DELETE_TREE;
walk_state->descending_callback = acpi_ds_load2_begin_op;
@@ -91,6 +105,8 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
case 3:
+ /* Execution pass */
+
#ifndef ACPI_NO_METHOD_EXECUTION
walk_state->parse_flags |= ACPI_PARSE_EXECUTE |
ACPI_PARSE_DELETE_TREE;
@@ -181,8 +197,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* Target of Scope() not found. Generate an External for it, and
* insert the name into the namespace.
*/
- acpi_dm_add_to_external_list(op, path, ACPI_TYPE_DEVICE,
- 0);
+ acpi_dm_add_op_to_external_list(op, path,
+ ACPI_TYPE_DEVICE, 0, 0);
status =
acpi_ns_lookup(walk_state->scope_info, path,
object_type, ACPI_IMODE_LOAD_PASS1,
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 7f569d573027..2ac28d297305 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index d67891de1b54..9d6e2c1de1f8 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index ecb12e2137ff..24f7d5ea678a 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 83cd45f4a870..c7bffff9ed32 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 4c67193a9fa7..3393a73ca0d6 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index a9cb4a1a4bb8..955f83da68a5 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index a9e76bc4ad97..caaed3c673fd 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -87,9 +87,9 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
return_ACPI_STATUS(status);
}
- gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
- if (!gpe_xrupt_block) {
- status = AE_NO_MEMORY;
+ status =
+ acpi_ev_get_gpe_xrupt_block(interrupt_number, &gpe_xrupt_block);
+ if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
@@ -112,7 +112,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
unlock_and_exit:
- status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index a3e2f38aadf6..ae779c1e871d 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index d3f5e1e2a2b1..17e4bbfdb096 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -197,8 +197,9 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
* FUNCTION: acpi_ev_get_gpe_xrupt_block
*
* PARAMETERS: interrupt_number - Interrupt for a GPE block
+ * gpe_xrupt_block - Where the block is returned
*
- * RETURN: A GPE interrupt block
+ * RETURN: Status
*
* DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
* block per unique interrupt level used for GPEs. Should be
@@ -207,7 +208,9 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
*
******************************************************************************/
-struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
+acpi_status
+acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
+ struct acpi_gpe_xrupt_info ** gpe_xrupt_block)
{
struct acpi_gpe_xrupt_info *next_gpe_xrupt;
struct acpi_gpe_xrupt_info *gpe_xrupt;
@@ -221,7 +224,8 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
while (next_gpe_xrupt) {
if (next_gpe_xrupt->interrupt_number == interrupt_number) {
- return_PTR(next_gpe_xrupt);
+ *gpe_xrupt_block = next_gpe_xrupt;
+ return_ACPI_STATUS(AE_OK);
}
next_gpe_xrupt = next_gpe_xrupt->next;
@@ -231,7 +235,7 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
if (!gpe_xrupt) {
- return_PTR(NULL);
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
gpe_xrupt->interrupt_number = interrupt_number;
@@ -250,6 +254,7 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
} else {
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
}
+
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
/* Install new interrupt handler if not SCI_INT */
@@ -259,14 +264,15 @@ struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
acpi_ev_gpe_xrupt_handler,
gpe_xrupt);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not install GPE interrupt handler at level 0x%X",
- interrupt_number));
- return_PTR(NULL);
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not install GPE interrupt handler at level 0x%X",
+ interrupt_number));
+ return_ACPI_STATUS(status);
}
}
- return_PTR(gpe_xrupt);
+ *gpe_xrupt_block = gpe_xrupt;
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index e3157313eb27..78ac29351c9e 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index a5687540e9a6..5d594eb2e5ec 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 144cbb9b73bc..9957297d1580 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -314,6 +314,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
{
union acpi_operand_object *handler_obj;
union acpi_operand_object *obj_desc;
+ union acpi_operand_object *start_desc;
union acpi_operand_object **last_obj_ptr;
acpi_adr_space_setup region_setup;
void **region_context;
@@ -341,6 +342,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
/* Find this region in the handler's list */
obj_desc = handler_obj->address_space.region_list;
+ start_desc = obj_desc;
last_obj_ptr = &handler_obj->address_space.region_list;
while (obj_desc) {
@@ -438,6 +440,15 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
last_obj_ptr = &obj_desc->region.next;
obj_desc = obj_desc->region.next;
+
+ /* Prevent infinite loop if list is corrupted */
+
+ if (obj_desc == start_desc) {
+ ACPI_ERROR((AE_INFO,
+ "Circular handler list in region object %p",
+ region_obj));
+ return_VOID;
+ }
}
/* If we get here, the region was not in the handler's region list */
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 8354c4f7f10c..1b148a440d67 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 9e9e3454d893..4d8a709c1fc4 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 23a7fadca412..a734b27da061 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 39d06af5e347..e286640ad4ff 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 5713da77c665..20a1392ffe06 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -583,6 +583,18 @@ acpi_install_gpe_block(acpi_handle gpe_device,
goto unlock_and_exit;
}
+ /* Validate the parent device */
+
+ if (node->type != ACPI_TYPE_DEVICE) {
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
+ if (node->object) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+
/*
* For user-installed GPE Block Devices, the gpe_block_base_number
* is always zero
@@ -666,6 +678,13 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
goto unlock_and_exit;
}
+ /* Validate the parent device */
+
+ if (node->type != ACPI_TYPE_DEVICE) {
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
/* Get the device_object attached to the node */
obj_desc = acpi_ns_get_attached_object(node);
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 02ed75ac56cd..2d6f187939c7 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 06d216c8d43a..8ba1464efd11 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 69e4a8cc9b71..c545386fee96 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 3c2e6dcdad3e..95d23dabcfbb 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 81c72a4ecd82..4cfc3d3b5c97 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 4d046faac48c..973fdae00f94 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -94,12 +94,13 @@ static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
{ACPI_EXD_BUFFER, 0, NULL}
};
-static struct acpi_exdump_info acpi_ex_dump_package[5] = {
+static struct acpi_exdump_info acpi_ex_dump_package[6] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(package.node), "Parent Node"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"},
@@ -108,11 +109,11 @@ static struct acpi_exdump_info acpi_ex_dump_package[5] = {
static struct acpi_exdump_info acpi_ex_dump_device[4] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[0]),
"System Notify"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[1]),
- "Device Notify"}
+ "Device Notify"},
+ {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(device.handler), "Handler"}
};
static struct acpi_exdump_info acpi_ex_dump_event[2] = {
@@ -142,17 +143,18 @@ static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
};
-static struct acpi_exdump_info acpi_ex_dump_region[7] = {
+static struct acpi_exdump_info acpi_ex_dump_region[8] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.space_id), "Space Id"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.flags), "Flags"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(region.node), "Parent Node"},
{ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(region.address), "Address"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(region.length), "Length"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.handler), "Handler"},
+ {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(region.handler), "Handler"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.next), "Next"}
};
-static struct acpi_exdump_info acpi_ex_dump_power[5] = {
+static struct acpi_exdump_info acpi_ex_dump_power[6] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_power), NULL},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.system_level),
"System Level"},
@@ -161,7 +163,8 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[0]),
"System Notify"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[1]),
- "Device Notify"}
+ "Device Notify"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.handler), "Handler"}
};
static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
@@ -225,7 +228,7 @@ static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.value), "Value"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.node), "Node"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(reference.node), "Node"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"},
{ACPI_EXD_REFERENCE, 0, NULL}
};
@@ -234,16 +237,16 @@ static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_address_handler),
NULL},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(address_space.space_id), "Space Id"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.next), "Next"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.region_list),
+ {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(address_space.next), "Next"},
+ {ACPI_EXD_RGN_LIST, ACPI_EXD_OFFSET(address_space.region_list),
"Region List"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.node), "Node"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(address_space.node), "Node"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"}
};
static struct acpi_exdump_info acpi_ex_dump_notify[7] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(notify.node), "Node"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(notify.handler_type), "Handler Type"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.handler), "Handler"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"},
@@ -252,14 +255,31 @@ static struct acpi_exdump_info acpi_ex_dump_notify[7] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[1]), "Next Device Notify"}
};
+static struct acpi_exdump_info acpi_ex_dump_extra[6] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_extra), NULL},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.method_REG), "_REG Method"},
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(extra.scope_node), "Scope Node"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.region_context),
+ "Region Context"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.aml_start), "Aml Start"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(extra.aml_length), "Aml Length"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_data[3] = {
+ {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_data), NULL},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(data.handler), "Handler"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(data.pointer), "Raw Data"}
+};
+
/* Miscellaneous tables */
-static struct acpi_exdump_info acpi_ex_dump_common[4] = {
+static struct acpi_exdump_info acpi_ex_dump_common[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_common), NULL},
{ACPI_EXD_TYPE, 0, NULL},
{ACPI_EXD_UINT16, ACPI_EXD_OFFSET(common.reference_count),
"Reference Count"},
- {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"}
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"},
+ {ACPI_EXD_LIST, ACPI_EXD_OFFSET(common.next_object), "Object List"}
};
static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
@@ -274,15 +294,17 @@ static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
"Field Bit Offset"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.base_byte_offset),
"Base Byte Offset"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
+ {ACPI_EXD_NODE, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
};
-static struct acpi_exdump_info acpi_ex_dump_node[5] = {
+static struct acpi_exdump_info acpi_ex_dump_node[7] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"},
{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
- {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(child), "Child List"},
- {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(peer), "Next Peer"}
+ {ACPI_EXD_LIST, ACPI_EXD_NSOFFSET(object), "Object List"},
+ {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(parent), "Parent"},
+ {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(child), "Child"},
+ {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(peer), "Peer"}
};
/* Dispatch table, indexed by object type */
@@ -315,7 +337,9 @@ static struct acpi_exdump_info *acpi_ex_dump_info[] = {
acpi_ex_dump_address_handler,
NULL,
NULL,
- NULL
+ NULL,
+ acpi_ex_dump_extra,
+ acpi_ex_dump_data
};
/*******************************************************************************
@@ -340,6 +364,10 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
char *name;
const char *reference_name;
u8 count;
+ union acpi_operand_object *start;
+ union acpi_operand_object *data = NULL;
+ union acpi_operand_object *next;
+ struct acpi_namespace_node *node;
if (!info) {
acpi_os_printf
@@ -363,9 +391,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
case ACPI_EXD_TYPE:
- acpi_ex_out_string("Type",
- acpi_ut_get_object_type_name
- (obj_desc));
+ acpi_os_printf("%20s : %2.2X [%s]\n", "Type",
+ obj_desc->common.type,
+ acpi_ut_get_object_type_name(obj_desc));
break;
case ACPI_EXD_UINT8:
@@ -433,6 +461,121 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
acpi_ex_dump_reference_obj(obj_desc);
break;
+ case ACPI_EXD_LIST:
+
+ start = *ACPI_CAST_PTR(void *, target);
+ next = start;
+
+ acpi_os_printf("%20s : %p", name, next);
+ if (next) {
+ acpi_os_printf("(%s %2.2X)",
+ acpi_ut_get_object_type_name
+ (next), next->common.type);
+
+ while (next->common.next_object) {
+ if ((next->common.type ==
+ ACPI_TYPE_LOCAL_DATA) && !data) {
+ data = next;
+ }
+
+ next = next->common.next_object;
+ acpi_os_printf("->%p(%s %2.2X)", next,
+ acpi_ut_get_object_type_name
+ (next),
+ next->common.type);
+
+ if ((next == start) || (next == data)) {
+ acpi_os_printf
+ ("\n**** Error: Object list appears to be circular linked");
+ break;
+ }
+ }
+ }
+
+ acpi_os_printf("\n", next);
+ break;
+
+ case ACPI_EXD_HDLR_LIST:
+
+ start = *ACPI_CAST_PTR(void *, target);
+ next = start;
+
+ acpi_os_printf("%20s : %p", name, next);
+ if (next) {
+ acpi_os_printf("(%s %2.2X)",
+ acpi_ut_get_object_type_name
+ (next), next->common.type);
+
+ while (next->address_space.next) {
+ if ((next->common.type ==
+ ACPI_TYPE_LOCAL_DATA) && !data) {
+ data = next;
+ }
+
+ next = next->address_space.next;
+ acpi_os_printf("->%p(%s %2.2X)", next,
+ acpi_ut_get_object_type_name
+ (next),
+ next->common.type);
+
+ if ((next == start) || (next == data)) {
+ acpi_os_printf
+ ("\n**** Error: Handler list appears to be circular linked");
+ break;
+ }
+ }
+ }
+
+ acpi_os_printf("\n", next);
+ break;
+
+ case ACPI_EXD_RGN_LIST:
+
+ start = *ACPI_CAST_PTR(void *, target);
+ next = start;
+
+ acpi_os_printf("%20s : %p", name, next);
+ if (next) {
+ acpi_os_printf("(%s %2.2X)",
+ acpi_ut_get_object_type_name
+ (next), next->common.type);
+
+ while (next->region.next) {
+ if ((next->common.type ==
+ ACPI_TYPE_LOCAL_DATA) && !data) {
+ data = next;
+ }
+
+ next = next->region.next;
+ acpi_os_printf("->%p(%s %2.2X)", next,
+ acpi_ut_get_object_type_name
+ (next),
+ next->common.type);
+
+ if ((next == start) || (next == data)) {
+ acpi_os_printf
+ ("\n**** Error: Region list appears to be circular linked");
+ break;
+ }
+ }
+ }
+
+ acpi_os_printf("\n", next);
+ break;
+
+ case ACPI_EXD_NODE:
+
+ node =
+ *ACPI_CAST_PTR(struct acpi_namespace_node *,
+ target);
+
+ acpi_os_printf("%20s : %p", name, node);
+ if (node) {
+ acpi_os_printf(" [%4.4s]", node->name.ascii);
+ }
+ acpi_os_printf("\n");
+ break;
+
default:
acpi_os_printf("**** Invalid table opcode [%X] ****\n",
@@ -821,10 +964,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
}
acpi_os_printf("%20s : %4.4s\n", "Name", acpi_ut_get_node_name(node));
- acpi_ex_out_string("Type", acpi_ut_get_type_name(node->type));
- acpi_ex_out_pointer("Attached Object",
- acpi_ns_get_attached_object(node));
- acpi_ex_out_pointer("Parent", node->parent);
+ acpi_os_printf("%20s : %2.2X [%s]\n", "Type",
+ node->type, acpi_ut_get_type_name(node->type));
acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node),
acpi_ex_dump_node);
@@ -1017,22 +1158,26 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
((struct acpi_namespace_node *)obj_desc)->
object);
- acpi_ex_dump_object_descriptor(((struct acpi_namespace_node *)
- obj_desc)->object, flags);
- return_VOID;
+ obj_desc = ((struct acpi_namespace_node *)obj_desc)->object;
+ goto dump_object;
}
if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
- acpi_os_printf
- ("ExDumpObjectDescriptor: %p is not an ACPI operand object: [%s]\n",
- obj_desc, acpi_ut_get_descriptor_name(obj_desc));
+ acpi_os_printf("%p is not an ACPI operand object: [%s]\n",
+ obj_desc, acpi_ut_get_descriptor_name(obj_desc));
return_VOID;
}
- if (obj_desc->common.type > ACPI_TYPE_NS_NODE_MAX) {
+ /* Validate the object type */
+
+ if (obj_desc->common.type > ACPI_TYPE_LOCAL_MAX) {
+ acpi_os_printf("Not a known object type: %2.2X\n",
+ obj_desc->common.type);
return_VOID;
}
+dump_object:
+
/* Common Fields */
acpi_ex_dump_object(obj_desc, acpi_ex_dump_common);
@@ -1040,6 +1185,22 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
/* Object-specific fields */
acpi_ex_dump_object(obj_desc, acpi_ex_dump_info[obj_desc->common.type]);
+
+ if (obj_desc->common.type == ACPI_TYPE_REGION) {
+ obj_desc = obj_desc->common.next_object;
+ if (obj_desc->common.type > ACPI_TYPE_LOCAL_MAX) {
+ acpi_os_printf
+ ("Secondary object is not a known object type: %2.2X\n",
+ obj_desc->common.type);
+
+ return_VOID;
+ }
+
+ acpi_os_printf("\nExtra attached Object (%p):\n", obj_desc);
+ acpi_ex_dump_object(obj_desc,
+ acpi_ex_dump_info[obj_desc->common.type]);
+ }
+
return_VOID;
}
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index cfd875243421..68d97441432c 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 49fb742d61b9..1d1b27a96c5b 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 65d93607f368..2207e624f538 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 7be0205ad067..b49ea2a95f4f 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 14689dec4960..dbb03b544e8c 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index d74cea416ca0..1b8e94104407 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index d6fa0fce1fc9..2ede656ee26a 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index bc042adf8804..363767cf01e5 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 4459e32c683d..29e9e99f7fe3 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 5a588611ab48..ee3f872870bc 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 9d28867e60dc..cd5288a257a9 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index acd34f599313..ab060261b43e 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,7 +124,8 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
}
if (!source_desc) {
- ACPI_ERROR((AE_INFO, "No object attached to node %p", node));
+ ACPI_ERROR((AE_INFO, "No object attached to node [%4.4s] %p",
+ node->name.ascii, node));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 1606524312e3..3cde553bcbe1 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index be3f66973ee8..3af8de3fcea4 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index f0b09bf9887d..daf49f7ea311 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 20d809d90c5b..04bd16c08f9e 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 26e371073b1a..fd11018b0168 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 6578dee2e51b..f7da64123ed5 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,7 +77,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
/* We must wait, so unlock the interpreter */
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
status = acpi_os_wait_semaphore(semaphore, 1, timeout);
@@ -87,7 +87,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
/* Reacquire the interpreter */
- acpi_ex_reacquire_interpreter();
+ acpi_ex_enter_interpreter();
}
return_ACPI_STATUS(status);
@@ -123,7 +123,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
/* We must wait, so unlock the interpreter */
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
status = acpi_os_acquire_mutex(mutex, timeout);
@@ -133,7 +133,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
/* Reacquire the interpreter */
- acpi_ex_reacquire_interpreter();
+ acpi_ex_enter_interpreter();
}
return_ACPI_STATUS(status);
@@ -198,7 +198,7 @@ acpi_status acpi_ex_system_do_sleep(u64 how_long)
/* Since this thread will sleep, we must release the interpreter */
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
/*
* For compatibility with other ACPI implementations and to prevent
@@ -212,7 +212,7 @@ acpi_status acpi_ex_system_do_sleep(u64 how_long)
/* And now we must get the interpreter again */
- acpi_ex_reacquire_interpreter();
+ acpi_ex_enter_interpreter();
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 99dc7b287d55..d9d72dff2a76 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -100,37 +100,6 @@ void acpi_ex_enter_interpreter(void)
/*******************************************************************************
*
- * FUNCTION: acpi_ex_reacquire_interpreter
- *
- * PARAMETERS: None
- *
- * RETURN: None
- *
- * DESCRIPTION: Reacquire the interpreter execution region from within the
- * interpreter code. Failure to enter the interpreter region is a
- * fatal system error. Used in conjunction with
- * relinquish_interpreter
- *
- ******************************************************************************/
-
-void acpi_ex_reacquire_interpreter(void)
-{
- ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
-
- /*
- * If the global serialized flag is set, do not release the interpreter,
- * since it was not actually released by acpi_ex_relinquish_interpreter.
- * This forces the interpreter to be single threaded.
- */
- if (!acpi_gbl_all_methods_serialized) {
- acpi_ex_enter_interpreter();
- }
-
- return_VOID;
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ex_exit_interpreter
*
* PARAMETERS: None
@@ -139,7 +108,16 @@ void acpi_ex_reacquire_interpreter(void)
*
* DESCRIPTION: Exit the interpreter execution region. This is the top level
* routine used to exit the interpreter when all processing has
- * been completed.
+ * been completed, or when the method blocks.
+ *
+ * Cases where the interpreter is unlocked internally:
+ * 1) Method will be blocked on a Sleep() AML opcode
+ * 2) Method will be blocked on an Acquire() AML opcode
+ * 3) Method will be blocked on a Wait() AML opcode
+ * 4) Method will be blocked to acquire the global lock
+ * 5) Method will be blocked waiting to execute a serialized control
+ * method that is currently executing
+ * 6) About to invoke a user-installed opregion handler
*
******************************************************************************/
@@ -160,44 +138,6 @@ void acpi_ex_exit_interpreter(void)
/*******************************************************************************
*
- * FUNCTION: acpi_ex_relinquish_interpreter
- *
- * PARAMETERS: None
- *
- * RETURN: None
- *
- * DESCRIPTION: Exit the interpreter execution region, from within the
- * interpreter - before attempting an operation that will possibly
- * block the running thread.
- *
- * Cases where the interpreter is unlocked internally
- * 1) Method to be blocked on a Sleep() AML opcode
- * 2) Method to be blocked on an Acquire() AML opcode
- * 3) Method to be blocked on a Wait() AML opcode
- * 4) Method to be blocked to acquire the global lock
- * 5) Method to be blocked waiting to execute a serialized control method
- * that is currently executing
- * 6) About to invoke a user-installed opregion handler
- *
- ******************************************************************************/
-
-void acpi_ex_relinquish_interpreter(void)
-{
- ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
-
- /*
- * If the global serialized flag is set, do not release the interpreter.
- * This forces the interpreter to be single threaded.
- */
- if (!acpi_gbl_all_methods_serialized) {
- acpi_ex_exit_interpreter();
- }
-
- return_VOID;
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ex_truncate_for32bit_table
*
* PARAMETERS: obj_desc - Object to be truncated
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 3d36df828f52..1e66d960fc11 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 414076818d40..858fdd6be598 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 96540506058f..2e6caabba07a 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 0889a629505f..e701d8c33dbf 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 12e6cff54f78..e0fd9b4978cd 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index e3828cc4361b..d590693eb54e 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 3c498dc1636e..76ab5c1a814e 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index eab70d58852a..6b919127cd9d 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index b4b47db2dee2..96d007df65ec 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 15dddc10fc9b..6921c7f3d208 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 14f65f6345b9..f1249e3463be 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index fd1ff54cda19..607eb9e5150d 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 74b24c82707e..80fcfc8c9c1b 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index acd2964c2690..b55642c4ee58 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 48b9c6f12643..3d88ef4a3e0d 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 283762511b73..42d37109aa5d 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 963ceef063f8..e634a05974db 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 3a0423af968c..a3fb7e4c0809 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -111,9 +111,8 @@ acpi_status acpi_ns_initialize_objects(void)
info.object_count));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "%u Control Methods found\n", info.method_count));
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "%u Op Regions found\n", info.op_region_count));
+ "%u Control Methods found\n%u Op Regions found\n",
+ info.method_count, info.op_region_count));
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 89ec645e7730..7c9d0181f341 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -128,12 +128,12 @@ unlock:
* parse trees.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "**** Begin Table Method Parsing and Object Initialization\n"));
+ "**** Begin Table Object Initialization\n"));
status = acpi_ds_initialize_objects(table_index, node);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "**** Completed Table Method Parsing and Object Initialization\n"));
+ "**** Completed Table Object Initialization\n"));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 90a0380fb8a0..7eee0a6f02f6 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 7a736f4d1fd8..fe54a8c73b8c 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -222,13 +222,19 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
}
}
- /* Clear the entry in all cases */
+ /* Clear the Node entry in all cases */
node->object = NULL;
if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_OPERAND) {
+
+ /* Unlink object from front of possible object list */
+
node->object = obj_desc->common.next_object;
+
+ /* Handle possible 2-descriptor object */
+
if (node->object &&
- ((node->object)->common.type != ACPI_TYPE_LOCAL_DATA)) {
+ (node->object->common.type != ACPI_TYPE_LOCAL_DATA)) {
node->object = node->object->common.next_object;
}
}
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 177857340271..e83cff31754b 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index d2855d9857c4..392910ffbed9 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 3d5391f9bcb5..68f725839eb6 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -132,12 +132,12 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
* Decode the type of the expected package contents
*
* PTYPE1 packages contain no subpackages
- * PTYPE2 packages contain sub-packages
+ * PTYPE2 packages contain subpackages
*/
switch (package->ret_info.type) {
case ACPI_PTYPE1_FIXED:
/*
- * The package count is fixed and there are no sub-packages
+ * The package count is fixed and there are no subpackages
*
* If package is too small, exit.
* If package is larger than expected, issue warning but continue
@@ -169,7 +169,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
case ACPI_PTYPE1_VAR:
/*
- * The package count is variable, there are no sub-packages, and all
+ * The package count is variable, there are no subpackages, and all
* elements must be of the same type
*/
for (i = 0; i < count; i++) {
@@ -185,7 +185,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
case ACPI_PTYPE1_OPTION:
/*
- * The package count is variable, there are no sub-packages. There are
+ * The package count is variable, there are no subpackages. There are
* a fixed number of required elements, and a variable number of
* optional elements.
*
@@ -242,7 +242,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
elements++;
count--;
- /* Examine the sub-packages */
+ /* Examine the subpackages */
status =
acpi_ns_check_package_list(info, package, elements, count);
@@ -250,7 +250,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
case ACPI_PTYPE2_PKG_COUNT:
- /* First element is the (Integer) count of sub-packages to follow */
+ /* First element is the (Integer) count of subpackages to follow */
status = acpi_ns_check_object_type(info, elements,
ACPI_RTYPE_INTEGER, 0);
@@ -270,7 +270,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
count = expected_count;
elements++;
- /* Examine the sub-packages */
+ /* Examine the subpackages */
status =
acpi_ns_check_package_list(info, package, elements, count);
@@ -283,9 +283,9 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
case ACPI_PTYPE2_FIX_VAR:
/*
* These types all return a single Package that consists of a
- * variable number of sub-Packages.
+ * variable number of subpackages.
*
- * First, ensure that the first element is a sub-Package. If not,
+ * First, ensure that the first element is a subpackage. If not,
* the BIOS may have incorrectly returned the object as a single
* package instead of a Package of Packages (a common error if
* there is only one entry). We may be able to repair this by
@@ -310,7 +310,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
count = 1;
}
- /* Examine the sub-packages */
+ /* Examine the subpackages */
status =
acpi_ns_check_package_list(info, package, elements, count);
@@ -370,9 +370,9 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
u32 j;
/*
- * Validate each sub-Package in the parent Package
+ * Validate each subpackage in the parent Package
*
- * NOTE: assumes list of sub-packages contains no NULL elements.
+ * NOTE: assumes list of subpackages contains no NULL elements.
* Any NULL elements should have been removed by earlier call
* to acpi_ns_remove_null_elements.
*/
@@ -389,7 +389,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
return (status);
}
- /* Examine the different types of expected sub-packages */
+ /* Examine the different types of expected subpackages */
info->parent_package = sub_package;
switch (package->ret_info.type) {
@@ -450,14 +450,14 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
case ACPI_PTYPE2_FIXED:
- /* Each sub-package has a fixed length */
+ /* Each subpackage has a fixed length */
expected_count = package->ret_info2.count;
if (sub_package->package.count < expected_count) {
goto package_too_small;
}
- /* Check the type of each sub-package element */
+ /* Check the type of each subpackage element */
for (j = 0; j < expected_count; j++) {
status =
@@ -475,14 +475,14 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
case ACPI_PTYPE2_MIN:
- /* Each sub-package has a variable but minimum length */
+ /* Each subpackage has a variable but minimum length */
expected_count = package->ret_info.count1;
if (sub_package->package.count < expected_count) {
goto package_too_small;
}
- /* Check the type of each sub-package element */
+ /* Check the type of each subpackage element */
status =
acpi_ns_check_package_elements(info, sub_elements,
@@ -531,7 +531,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
(*sub_elements)->integer.value = expected_count;
}
- /* Check the type of each sub-package element */
+ /* Check the type of each subpackage element */
status =
acpi_ns_check_package_elements(info,
@@ -557,10 +557,10 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
package_too_small:
- /* The sub-package count was smaller than required */
+ /* The subpackage count was smaller than required */
ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags,
- "Return Sub-Package[%u] is too small - found %u elements, expected %u",
+ "Return SubPackage[%u] is too small - found %u elements, expected %u",
i, sub_package->package.count, expected_count));
return (AE_AML_OPERAND_VALUE);
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index a05afff50eb9..7e417aa5c91e 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -207,13 +207,30 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
* this predefined name. Either one return value is expected, or none,
* for both methods and other objects.
*
- * Exit now if there is no return object. Warning if one was expected.
+ * Try to fix if there was no return object. Warning if failed to fix.
*/
if (!return_object) {
if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
- ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
- ACPI_WARN_ALWAYS,
- "Missing expected return value"));
+ if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+ ACPI_WARN_PREDEFINED((AE_INFO,
+ info->full_pathname,
+ ACPI_WARN_ALWAYS,
+ "Found unexpected NULL package element"));
+
+ status =
+ acpi_ns_repair_null_element(info,
+ expected_btypes,
+ package_index,
+ return_object_ptr);
+ if (ACPI_SUCCESS(status)) {
+ return (AE_OK); /* Repair was successful */
+ }
+ } else {
+ ACPI_WARN_PREDEFINED((AE_INFO,
+ info->full_pathname,
+ ACPI_WARN_ALWAYS,
+ "Missing expected return value"));
+ }
return (AE_AML_NO_RETURN_VALUE);
}
@@ -448,7 +465,7 @@ acpi_ns_repair_null_element(struct acpi_evaluate_info * info,
* RETURN: None.
*
* DESCRIPTION: Remove all NULL package elements from packages that contain
- * a variable number of sub-packages. For these types of
+ * a variable number of subpackages. For these types of
* packages, NULL elements can be safely removed.
*
*****************************************************************************/
@@ -469,7 +486,7 @@ acpi_ns_remove_null_elements(struct acpi_evaluate_info *info,
/*
* We can safely remove all NULL elements from these package types:
* PTYPE1_VAR packages contain a variable number of simple data types.
- * PTYPE2 packages contain a variable number of sub-packages.
+ * PTYPE2 packages contain a variable number of subpackages.
*/
switch (package_type) {
case ACPI_PTYPE1_VAR:
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 6a25d320b169..b09e6bef72b8 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -432,8 +432,8 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
* DESCRIPTION: Repair for the _CST object:
* 1. Sort the list ascending by C state type
* 2. Ensure type cannot be zero
- * 3. A sub-package count of zero means _CST is meaningless
- * 4. Count must match the number of C state sub-packages
+ * 3. A subpackage count of zero means _CST is meaningless
+ * 4. Count must match the number of C state subpackages
*
*****************************************************************************/
@@ -611,6 +611,7 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
union acpi_operand_object **top_object_list;
union acpi_operand_object **sub_object_list;
union acpi_operand_object *obj_desc;
+ union acpi_operand_object *sub_package;
u32 element_count;
u32 index;
@@ -619,8 +620,17 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
top_object_list = package_object->package.elements;
element_count = package_object->package.count;
- for (index = 0; index < element_count; index++) {
- sub_object_list = (*top_object_list)->package.elements;
+ /* Examine each subpackage */
+
+ for (index = 0; index < element_count; index++, top_object_list++) {
+ sub_package = *top_object_list;
+ sub_object_list = sub_package->package.elements;
+
+ /* Check for minimum required element count */
+
+ if (sub_package->package.count < 4) {
+ continue;
+ }
/*
* If the BIOS has erroneously reversed the _PRT source_name (index 2)
@@ -634,15 +644,12 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
sub_object_list[2] = obj_desc;
info->return_flags |= ACPI_OBJECT_REPAIRED;
- ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+ ACPI_WARN_PREDEFINED((AE_INFO,
+ info->full_pathname,
info->node_flags,
"PRT[%X]: Fixed reversed SourceName and SourceIndex",
index));
}
-
- /* Point to the next union acpi_operand_object in the top level package */
-
- top_object_list++;
}
return (AE_OK);
@@ -679,7 +686,7 @@ acpi_ns_repair_PSS(struct acpi_evaluate_info *info,
u32 i;
/*
- * Entries (sub-packages) in the _PSS Package must be sorted by power
+ * Entries (subpackages) in the _PSS Package must be sorted by power
* dissipation, in descending order. If it appears that the list is
* incorrectly sorted, sort it. We sort by cpu_frequency, since this
* should be proportional to the power.
@@ -767,9 +774,9 @@ acpi_ns_repair_TSS(struct acpi_evaluate_info *info,
*
* PARAMETERS: info - Method execution information block
* return_object - Pointer to the top-level returned object
- * start_index - Index of the first sub-package
- * expected_count - Minimum length of each sub-package
- * sort_index - Sub-package entry to sort on
+ * start_index - Index of the first subpackage
+ * expected_count - Minimum length of each subpackage
+ * sort_index - Subpackage entry to sort on
* sort_direction - Ascending or descending
* sort_key_name - Name of the sort_index field
*
@@ -805,7 +812,7 @@ acpi_ns_check_sorted_list(struct acpi_evaluate_info *info,
}
/*
- * NOTE: assumes list of sub-packages contains no NULL elements.
+ * NOTE: assumes list of subpackages contains no NULL elements.
* Any NULL elements should have been removed by earlier call
* to acpi_ns_remove_null_elements.
*/
@@ -832,7 +839,7 @@ acpi_ns_check_sorted_list(struct acpi_evaluate_info *info,
return (AE_AML_OPERAND_TYPE);
}
- /* Each sub-package must have the minimum length */
+ /* Each subpackage must have the minimum length */
if ((*outer_elements)->package.count < expected_count) {
return (AE_AML_PACKAGE_LIMIT);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 47420faef073..af1cc42a8aa1 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 4a0665b6bcc1..4a5e3f5c0ff7 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index e81f15ef659a..4758a1f2ce22 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index e973e311f856..4bd558bf10d2 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -84,7 +84,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
acpi_object_type return_type)
{
acpi_status status;
- u8 must_free = FALSE;
+ u8 free_buffer_on_error = FALSE;
ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed);
@@ -95,14 +95,13 @@ acpi_evaluate_object_typed(acpi_handle handle,
}
if (return_buffer->length == ACPI_ALLOCATE_BUFFER) {
- must_free = TRUE;
+ free_buffer_on_error = TRUE;
}
/* Evaluate the object */
- status =
- acpi_evaluate_object(handle, pathname, external_params,
- return_buffer);
+ status = acpi_evaluate_object(handle, pathname,
+ external_params, return_buffer);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -135,11 +134,15 @@ acpi_evaluate_object_typed(acpi_handle handle,
pointer)->type),
acpi_ut_get_type_name(return_type)));
- if (must_free) {
-
- /* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
-
- ACPI_FREE_BUFFER(*return_buffer);
+ if (free_buffer_on_error) {
+ /*
+ * Free a buffer created via ACPI_ALLOCATE_BUFFER.
+ * Note: We use acpi_os_free here because acpi_os_allocate was used
+ * to allocate the buffer. This purposefully bypasses the
+ * (optionally enabled) allocation tracking mechanism since we
+ * only want to track internal allocations.
+ */
+ acpi_os_free(return_buffer->pointer);
return_buffer->pointer = NULL;
}
@@ -920,19 +923,22 @@ ACPI_EXPORT_SYMBOL(acpi_detach_data)
/*******************************************************************************
*
- * FUNCTION: acpi_get_data
+ * FUNCTION: acpi_get_data_full
*
* PARAMETERS: obj_handle - Namespace node
* handler - Handler used in call to attach_data
* data - Where the data is returned
+ * callback - function to execute before returning
*
* RETURN: Status
*
- * DESCRIPTION: Retrieve data that was previously attached to a namespace node.
+ * DESCRIPTION: Retrieve data that was previously attached to a namespace node
+ * and execute a callback before returning.
*
******************************************************************************/
acpi_status
-acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
+acpi_get_data_full(acpi_handle obj_handle, acpi_object_handler handler,
+ void **data, void (*callback)(void *))
{
struct acpi_namespace_node *node;
acpi_status status;
@@ -957,10 +963,34 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
}
status = acpi_ns_get_attached_data(node, handler, data);
+ if (ACPI_SUCCESS(status) && callback) {
+ callback(*data);
+ }
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
+ACPI_EXPORT_SYMBOL(acpi_get_data_full)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_data
+ *
+ * PARAMETERS: obj_handle - Namespace node
+ * handler - Handler used in call to attach_data
+ * data - Where the data is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Retrieve data that was previously attached to a namespace node.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
+{
+ return acpi_get_data_full(obj_handle, handler, data, NULL);
+}
+
ACPI_EXPORT_SYMBOL(acpi_get_data)
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 3a4bd3ff49a3..8c6c11ce9760 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 0e6d79e462d4..dae9401be7a2 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 91a5a69db80c..314d314340ae 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 065b44ae538f..b058e2390fdd 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -480,6 +480,10 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
status = AE_OK;
}
+ if (status == AE_CTRL_TERMINATE) {
+ return_ACPI_STATUS(status);
+ }
+
status =
acpi_ps_complete_op(walk_state, &op,
status);
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 95dc608a66a8..a6885077d59e 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -219,7 +219,10 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
status = walk_state->descending_callback(walk_state, op);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
+ if (status != AE_CTRL_TERMINATE) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During name lookup/catalog"));
+ }
return_ACPI_STATUS(status);
}
@@ -230,7 +233,7 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
status = acpi_ps_next_parse_state(walk_state, *op, status);
if (ACPI_FAILURE(status)) {
if (status == AE_CTRL_PENDING) {
- return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
+ status = AE_CTRL_PARSE_PENDING;
}
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 1b659e59710a..1755d2ac5656 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 9ba5301e5751..0d8d37ffd04d 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,6 +71,10 @@ static const u8 acpi_gbl_argument_count[] =
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
+#ifdef ACPI_DEBUG_OUTPUT
+ const char *opcode_name = "Unknown AML opcode";
+#endif
+
ACPI_FUNCTION_NAME(ps_get_opcode_info);
/*
@@ -92,11 +96,54 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
return (&acpi_gbl_aml_op_info
[acpi_gbl_long_op_index[(u8)opcode]]);
}
+#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
+#include "asldefine.h"
+
+ switch (opcode) {
+ case AML_RAW_DATA_BYTE:
+ opcode_name = "-Raw Data Byte-";
+ break;
+
+ case AML_RAW_DATA_WORD:
+ opcode_name = "-Raw Data Word-";
+ break;
+
+ case AML_RAW_DATA_DWORD:
+ opcode_name = "-Raw Data Dword-";
+ break;
+
+ case AML_RAW_DATA_QWORD:
+ opcode_name = "-Raw Data Qword-";
+ break;
+
+ case AML_RAW_DATA_BUFFER:
+ opcode_name = "-Raw Data Buffer-";
+ break;
+
+ case AML_RAW_DATA_CHAIN:
+ opcode_name = "-Raw Data Buffer Chain-";
+ break;
+
+ case AML_PACKAGE_LENGTH:
+ opcode_name = "-Package Length-";
+ break;
+
+ case AML_UNASSIGNED_OPCODE:
+ opcode_name = "-Unassigned Opcode-";
+ break;
+
+ case AML_DEFAULT_ARG_OP:
+ opcode_name = "-Default Arg-";
+ break;
+
+ default:
+ break;
+ }
+#endif
/* Unknown AML opcode */
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Unknown AML opcode [%4.4X]\n", opcode));
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%4.4X]\n", opcode_name, opcode));
return (&acpi_gbl_aml_op_info[_UNK]);
}
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 79d9a28dedef..6d27b597394e 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 6a4b6fb39f32..32d250feea21 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 877dc0de8df3..0b64181e7720 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 91fa73a6e55e..3cd48802eede 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index abd65624754f..9cb07e1e76d9 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index fcb7a840e996..e135acaa5e1c 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index f3a9276ac665..916fd095ff34 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index b60c9cf82862..689556744b03 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -636,7 +636,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
for (index = 0; index < number_of_elements; index++) {
- /* Dereference the sub-package */
+ /* Dereference the subpackage */
package_element = *top_object_list;
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 3a2ace93e62c..75d369050657 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -273,7 +273,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
*/
user_prt->length = (sizeof(struct acpi_pci_routing_table) - 4);
- /* Each sub-package must be of length 4 */
+ /* Each subpackage must be of length 4 */
if ((*top_object_list)->package.count != 4) {
ACPI_ERROR((AE_INFO,
@@ -283,7 +283,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
}
/*
- * Dereference the sub-package.
+ * Dereference the subpackage.
* The sub_object_list will now point to an array of the four IRQ
* elements: [Address, Pin, Source, source_index]
*/
@@ -292,7 +292,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
/* 1) First subobject: Dereference the PRT.Address */
obj_desc = sub_object_list[0];
- if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+ if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
"(PRT[%u].Address) Need Integer, found %s",
index,
@@ -305,7 +305,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
/* 2) Second subobject: Dereference the PRT.Pin */
obj_desc = sub_object_list[1];
- if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+ if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
"(PRT[%u].Pin) Need Integer, found %s",
index,
@@ -394,7 +394,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
/* 4) Fourth subobject: Dereference the PRT.source_index */
obj_desc = sub_object_list[3];
- if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+ if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
"(PRT[%u].SourceIndex) Need Integer, found %s",
index,
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 8a2d4986b0aa..c3c56b5a9788 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,8 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsdump")
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
/* Local prototypes */
static void acpi_rs_out_string(char *title, char *value);
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index 46192bd53653..2f9332d5c973 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsdumpinfo")
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
#define ACPI_RSD_OFFSET(f) (u8) ACPI_OFFSET (union acpi_resource_data,f)
#define ACPI_PRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
#define ACPI_RSD_TABLE_SIZE(name) (sizeof(name) / sizeof (struct acpi_rsdump_info))
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 41fed78e0de6..9d3f8a9a24bd 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -132,8 +132,7 @@ struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
acpi_rs_convert_uart_serial_bus,
};
-#ifdef ACPI_FUTURE_USAGE
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
/* Dispatch table for resource dump functions */
@@ -168,7 +167,6 @@ struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
};
#endif
-#endif /* ACPI_FUTURE_USAGE */
/*
* Base sizes for external AML resource descriptors, indexed by internal type.
* Includes size of the descriptor header (1 byte for small descriptors,
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index ca183755a6f9..19d64873290a 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 364decc1028a..3461f7db26df 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 6053aa182093..77291293af64 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index ebc773a1b350..eab4483ff5f8 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index c99cec9cefde..41eea4bc089c 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index fe49fc43e10f..9e8407223d95 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 14a7982c9961..897a5ceb0420 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 01e476988aae..877ab9202133 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 9d99f2189693..ec14588254d4 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -56,10 +56,11 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
static void acpi_tb_convert_fadt(void);
-static void acpi_tb_validate_fadt(void);
-
static void acpi_tb_setup_fadt_registers(void);
+static u64
+acpi_tb_select_address(char *register_name, u32 address32, u64 address64);
+
/* Table for conversion of FADT to common internal format and FADT validation */
typedef struct acpi_fadt_info {
@@ -175,6 +176,7 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
* space_id - ACPI Space ID for this register
* byte_width - Width of this register
* address - Address of the register
+ * register_name - ASCII name of the ACPI register
*
* RETURN: None
*
@@ -220,6 +222,68 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
/*******************************************************************************
*
+ * FUNCTION: acpi_tb_select_address
+ *
+ * PARAMETERS: register_name - ASCII name of the ACPI register
+ * address32 - 32-bit address of the register
+ * address64 - 64-bit address of the register
+ *
+ * RETURN: The resolved 64-bit address
+ *
+ * DESCRIPTION: Select between 32-bit and 64-bit versions of addresses within
+ * the FADT. Used for the FACS and DSDT addresses.
+ *
+ * NOTES:
+ *
+ * Check for FACS and DSDT address mismatches. An address mismatch between
+ * the 32-bit and 64-bit address fields (FIRMWARE_CTRL/X_FIRMWARE_CTRL and
+ * DSDT/X_DSDT) could be a corrupted address field or it might indicate
+ * the presence of two FACS or two DSDT tables.
+ *
+ * November 2013:
+ * By default, as per the ACPICA specification, a valid 64-bit address is
+ * used regardless of the value of the 32-bit address. However, this
+ * behavior can be overridden via the acpi_gbl_use32_bit_fadt_addresses flag.
+ *
+ ******************************************************************************/
+
+static u64
+acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
+{
+
+ if (!address64) {
+
+ /* 64-bit address is zero, use 32-bit address */
+
+ return ((u64)address32);
+ }
+
+ if (address32 && (address64 != (u64)address32)) {
+
+ /* Address mismatch between 32-bit and 64-bit versions */
+
+ ACPI_BIOS_WARNING((AE_INFO,
+ "32/64X %s address mismatch in FADT: "
+ "0x%8.8X/0x%8.8X%8.8X, using %u-bit address",
+ register_name, address32,
+ ACPI_FORMAT_UINT64(address64),
+ acpi_gbl_use32_bit_fadt_addresses ? 32 :
+ 64));
+
+ /* 32-bit address override */
+
+ if (acpi_gbl_use32_bit_fadt_addresses) {
+ return ((u64)address32);
+ }
+ }
+
+ /* Default is to use the 64-bit address */
+
+ return (address64);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_tb_parse_fadt
*
* PARAMETERS: table_index - Index for the FADT
@@ -331,10 +395,6 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
acpi_tb_convert_fadt();
- /* Validate FADT values now, before we make any changes */
-
- acpi_tb_validate_fadt();
-
/* Initialize the global ACPI register structures */
acpi_tb_setup_fadt_registers();
@@ -344,66 +404,55 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
*
* FUNCTION: acpi_tb_convert_fadt
*
- * PARAMETERS: None, uses acpi_gbl_FADT
+ * PARAMETERS: none - acpi_gbl_FADT is used.
*
* RETURN: None
*
* DESCRIPTION: Converts all versions of the FADT to a common internal format.
- * Expand 32-bit addresses to 64-bit as necessary.
+ * Expand 32-bit addresses to 64-bit as necessary. Also validate
+ * important fields within the FADT.
*
- * NOTE: acpi_gbl_FADT must be of size (struct acpi_table_fadt),
- * and must contain a copy of the actual FADT.
+ * NOTE: acpi_gbl_FADT must be of size (struct acpi_table_fadt), and must
+ * contain a copy of the actual BIOS-provided FADT.
*
* Notes on 64-bit register addresses:
*
* After this FADT conversion, later ACPICA code will only use the 64-bit "X"
* fields of the FADT for all ACPI register addresses.
*
- * The 64-bit "X" fields are optional extensions to the original 32-bit FADT
+ * The 64-bit X fields are optional extensions to the original 32-bit FADT
* V1.0 fields. Even if they are present in the FADT, they are optional and
* are unused if the BIOS sets them to zero. Therefore, we must copy/expand
- * 32-bit V1.0 fields if the corresponding X field is zero.
+ * 32-bit V1.0 fields to the 64-bit X fields if the the 64-bit X field is
+ * originally zero.
*
- * For ACPI 1.0 FADTs, all 32-bit address fields are expanded to the
- * corresponding "X" fields in the internal FADT.
+ * For ACPI 1.0 FADTs (that contain no 64-bit addresses), all 32-bit address
+ * fields are expanded to the corresponding 64-bit X fields in the internal
+ * common FADT.
*
* For ACPI 2.0+ FADTs, all valid (non-zero) 32-bit address fields are expanded
- * to the corresponding 64-bit X fields. For compatibility with other ACPI
- * implementations, we ignore the 64-bit field if the 32-bit field is valid,
- * regardless of whether the host OS is 32-bit or 64-bit.
+ * to the corresponding 64-bit X fields, if the 64-bit field is originally
+ * zero. Adhering to the ACPI specification, we completely ignore the 32-bit
+ * field if the 64-bit field is valid, regardless of whether the host OS is
+ * 32-bit or 64-bit.
+ *
+ * Possible additional checks:
+ * (acpi_gbl_FADT.pm1_event_length >= 4)
+ * (acpi_gbl_FADT.pm1_control_length >= 2)
+ * (acpi_gbl_FADT.pm_timer_length >= 4)
+ * Gpe block lengths must be multiple of 2
*
******************************************************************************/
static void acpi_tb_convert_fadt(void)
{
+ char *name;
struct acpi_generic_address *address64;
u32 address32;
+ u8 length;
u32 i;
/*
- * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
- * Later code will always use the X 64-bit field. Also, check for an
- * address mismatch between the 32-bit and 64-bit address fields
- * (FIRMWARE_CTRL/X_FIRMWARE_CTRL, DSDT/X_DSDT) which would indicate
- * the presence of two FACS or two DSDT tables.
- */
- if (!acpi_gbl_FADT.Xfacs) {
- acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs;
- } else if (acpi_gbl_FADT.facs &&
- (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
- ACPI_WARNING((AE_INFO,
- "32/64 FACS address mismatch in FADT - two FACS tables!"));
- }
-
- if (!acpi_gbl_FADT.Xdsdt) {
- acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
- } else if (acpi_gbl_FADT.dsdt &&
- (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
- ACPI_WARNING((AE_INFO,
- "32/64 DSDT address mismatch in FADT - two DSDT tables!"));
- }
-
- /*
* For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
* should be zero are indeed zero. This will workaround BIOSs that
* inadvertently place values in these fields.
@@ -421,119 +470,24 @@ static void acpi_tb_convert_fadt(void)
acpi_gbl_FADT.boot_flags = 0;
}
- /* Update the local FADT table header length */
-
- acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
-
/*
- * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
- * generic address structures as necessary. Later code will always use
- * the 64-bit address structures.
- *
- * March 2009:
- * We now always use the 32-bit address if it is valid (non-null). This
- * is not in accordance with the ACPI specification which states that
- * the 64-bit address supersedes the 32-bit version, but we do this for
- * compatibility with other ACPI implementations. Most notably, in the
- * case where both the 32 and 64 versions are non-null, we use the 32-bit
- * version. This is the only address that is guaranteed to have been
- * tested by the BIOS manufacturer.
+ * Now we can update the local FADT length to the length of the
+ * current FADT version as defined by the ACPI specification.
+ * Thus, we will have a common FADT internally.
*/
- for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
- address32 = *ACPI_ADD_PTR(u32,
- &acpi_gbl_FADT,
- fadt_info_table[i].address32);
-
- address64 = ACPI_ADD_PTR(struct acpi_generic_address,
- &acpi_gbl_FADT,
- fadt_info_table[i].address64);
-
- /*
- * If both 32- and 64-bit addresses are valid (non-zero),
- * they must match.
- */
- if (address64->address && address32 &&
- (address64->address != (u64)address32)) {
- ACPI_BIOS_ERROR((AE_INFO,
- "32/64X address mismatch in FADT/%s: "
- "0x%8.8X/0x%8.8X%8.8X, using 32",
- fadt_info_table[i].name, address32,
- ACPI_FORMAT_UINT64(address64->
- address)));
- }
-
- /* Always use 32-bit address if it is valid (non-null) */
-
- if (address32) {
- /*
- * Copy the 32-bit address to the 64-bit GAS structure. The
- * Space ID is always I/O for 32-bit legacy address fields
- */
- acpi_tb_init_generic_address(address64,
- ACPI_ADR_SPACE_SYSTEM_IO,
- *ACPI_ADD_PTR(u8,
- &acpi_gbl_FADT,
- fadt_info_table
- [i].length),
- (u64) address32,
- fadt_info_table[i].name);
- }
- }
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_tb_validate_fadt
- *
- * PARAMETERS: table - Pointer to the FADT to be validated
- *
- * RETURN: None
- *
- * DESCRIPTION: Validate various important fields within the FADT. If a problem
- * is found, issue a message, but no status is returned.
- * Used by both the table manager and the disassembler.
- *
- * Possible additional checks:
- * (acpi_gbl_FADT.pm1_event_length >= 4)
- * (acpi_gbl_FADT.pm1_control_length >= 2)
- * (acpi_gbl_FADT.pm_timer_length >= 4)
- * Gpe block lengths must be multiple of 2
- *
- ******************************************************************************/
-
-static void acpi_tb_validate_fadt(void)
-{
- char *name;
- struct acpi_generic_address *address64;
- u8 length;
- u32 i;
+ acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
/*
- * Check for FACS and DSDT address mismatches. An address mismatch between
- * the 32-bit and 64-bit address fields (FIRMWARE_CTRL/X_FIRMWARE_CTRL and
- * DSDT/X_DSDT) would indicate the presence of two FACS or two DSDT tables.
+ * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
+ * Later ACPICA code will always use the X 64-bit field.
*/
- if (acpi_gbl_FADT.facs &&
- (acpi_gbl_FADT.Xfacs != (u64)acpi_gbl_FADT.facs)) {
- ACPI_BIOS_WARNING((AE_INFO,
- "32/64X FACS address mismatch in FADT - "
- "0x%8.8X/0x%8.8X%8.8X, using 32",
- acpi_gbl_FADT.facs,
- ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
-
- acpi_gbl_FADT.Xfacs = (u64)acpi_gbl_FADT.facs;
- }
-
- if (acpi_gbl_FADT.dsdt &&
- (acpi_gbl_FADT.Xdsdt != (u64)acpi_gbl_FADT.dsdt)) {
- ACPI_BIOS_WARNING((AE_INFO,
- "32/64X DSDT address mismatch in FADT - "
- "0x%8.8X/0x%8.8X%8.8X, using 32",
- acpi_gbl_FADT.dsdt,
- ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
+ acpi_gbl_FADT.Xfacs = acpi_tb_select_address("FACS",
+ acpi_gbl_FADT.facs,
+ acpi_gbl_FADT.Xfacs);
- acpi_gbl_FADT.Xdsdt = (u64)acpi_gbl_FADT.dsdt;
- }
+ acpi_gbl_FADT.Xdsdt = acpi_tb_select_address("DSDT",
+ acpi_gbl_FADT.dsdt,
+ acpi_gbl_FADT.Xdsdt);
/* If Hardware Reduced flag is set, we are all done */
@@ -545,18 +499,95 @@ static void acpi_tb_validate_fadt(void)
for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
/*
- * Generate pointer to the 64-bit address, get the register
- * length (width) and the register name
+ * Get the 32-bit and 64-bit addresses, as well as the register
+ * length and register name.
*/
+ address32 = *ACPI_ADD_PTR(u32,
+ &acpi_gbl_FADT,
+ fadt_info_table[i].address32);
+
address64 = ACPI_ADD_PTR(struct acpi_generic_address,
&acpi_gbl_FADT,
fadt_info_table[i].address64);
- length =
- *ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
- fadt_info_table[i].length);
+
+ length = *ACPI_ADD_PTR(u8,
+ &acpi_gbl_FADT,
+ fadt_info_table[i].length);
+
name = fadt_info_table[i].name;
/*
+ * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
+ * generic address structures as necessary. Later code will always use
+ * the 64-bit address structures.
+ *
+ * November 2013:
+ * Now always use the 64-bit address if it is valid (non-zero), in
+ * accordance with the ACPI specification which states that a 64-bit
+ * address supersedes the 32-bit version. This behavior can be
+ * overridden by the acpi_gbl_use32_bit_fadt_addresses flag.
+ *
+ * During 64-bit address construction and verification,
+ * these cases are handled:
+ *
+ * Address32 zero, Address64 [don't care] - Use Address64
+ *
+ * Address32 non-zero, Address64 zero - Copy/use Address32
+ * Address32 non-zero == Address64 non-zero - Use Address64
+ * Address32 non-zero != Address64 non-zero - Warning, use Address64
+ *
+ * Override: if acpi_gbl_use32_bit_fadt_addresses is TRUE, and:
+ * Address32 non-zero != Address64 non-zero - Warning, copy/use Address32
+ *
+ * Note: space_id is always I/O for 32-bit legacy address fields
+ */
+ if (address32) {
+ if (!address64->address) {
+
+ /* 64-bit address is zero, use 32-bit address */
+
+ acpi_tb_init_generic_address(address64,
+ ACPI_ADR_SPACE_SYSTEM_IO,
+ *ACPI_ADD_PTR(u8,
+ &acpi_gbl_FADT,
+ fadt_info_table
+ [i].
+ length),
+ (u64)address32,
+ name);
+ } else if (address64->address != (u64)address32) {
+
+ /* Address mismatch */
+
+ ACPI_BIOS_WARNING((AE_INFO,
+ "32/64X address mismatch in FADT/%s: "
+ "0x%8.8X/0x%8.8X%8.8X, using %u-bit address",
+ name, address32,
+ ACPI_FORMAT_UINT64
+ (address64->address),
+ acpi_gbl_use32_bit_fadt_addresses
+ ? 32 : 64));
+
+ if (acpi_gbl_use32_bit_fadt_addresses) {
+
+ /* 32-bit address override */
+
+ acpi_tb_init_generic_address(address64,
+ ACPI_ADR_SPACE_SYSTEM_IO,
+ *ACPI_ADD_PTR
+ (u8,
+ &acpi_gbl_FADT,
+ fadt_info_table
+ [i].
+ length),
+ (u64)
+ address32,
+ name);
+ }
+ }
+ }
+
+ /*
* For each extended field, check for length mismatch between the
* legacy length field and the corresponding 64-bit X length field.
* Note: If the legacy length field is > 0xFF bits, ignore this
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index e4f4f02d49e7..c12003947bd5 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 634357d51fe9..e3040947e9a0 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -292,10 +292,11 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
new_table = acpi_os_map_memory(new_address, new_table_length);
if (!new_table) {
ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
- "%4.4s %p Attempted physical table override failed",
+ "%4.4s " ACPI_PRINTF_UINT
+ " Attempted physical table override failed",
table_header->signature,
- ACPI_CAST_PTR(void,
- table_desc->address)));
+ ACPI_FORMAT_TO_UINT(table_desc->
+ address)));
return (NULL);
}
@@ -308,11 +309,11 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
finish_override:
- ACPI_INFO((AE_INFO,
- "%4.4s %p %s table override, new table: %p",
+ ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT
+ " %s table override, new table: " ACPI_PRINTF_UINT,
table_header->signature,
- ACPI_CAST_PTR(void, table_desc->address),
- override_type, new_table));
+ ACPI_FORMAT_TO_UINT(table_desc->address),
+ override_type, ACPI_FORMAT_TO_UINT(new_table)));
/* We can now unmap/delete the original table (if fully mapped) */
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 6866e767ba90..df3bb20ea325 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -128,15 +128,17 @@ acpi_tb_print_table_header(acpi_physical_address address,
struct acpi_table_header local_header;
/*
- * The reason that the Address is cast to a void pointer is so that we
- * can use %p which will work properly on both 32-bit and 64-bit hosts.
+ * The reason that we use ACPI_PRINTF_UINT and ACPI_FORMAT_TO_UINT is to
+ * support both 32-bit and 64-bit hosts/addresses in a consistent manner.
+ * The %p specifier does not emit uniform output on all hosts. On some,
+ * leading zeros are not supported.
*/
if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
- ACPI_INFO((AE_INFO, "%4.4s %p %06X",
- header->signature, ACPI_CAST_PTR(void, address),
+ ACPI_INFO((AE_INFO, "%-4.4s " ACPI_PRINTF_UINT " %06X",
+ header->signature, ACPI_FORMAT_TO_UINT(address),
header->length));
} else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
@@ -147,8 +149,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
header)->oem_id, ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
- ACPI_INFO((AE_INFO, "RSDP %p %06X (v%.2d %6.6s)",
- ACPI_CAST_PTR(void, address),
+ ACPI_INFO((AE_INFO,
+ "RSDP " ACPI_PRINTF_UINT " %06X (v%.2d %-6.6s)",
+ ACPI_FORMAT_TO_UINT(address),
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
revision >
0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
@@ -162,8 +165,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
acpi_tb_cleanup_table_header(&local_header, header);
ACPI_INFO((AE_INFO,
- "%4.4s %p %06X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
- local_header.signature, ACPI_CAST_PTR(void, address),
+ "%-4.4s " ACPI_PRINTF_UINT
+ " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
+ local_header.signature, ACPI_FORMAT_TO_UINT(address),
local_header.length, local_header.revision,
local_header.oem_id, local_header.oem_table_id,
local_header.oem_revision,
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 3d6bb83aa7e7..a4702eee91a8 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,69 +49,11 @@
ACPI_MODULE_NAME("tbutils")
/* Local prototypes */
+static acpi_status acpi_tb_validate_xsdt(acpi_physical_address address);
+
static acpi_physical_address
acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
-/*******************************************************************************
- *
- * FUNCTION: acpi_tb_check_xsdt
- *
- * PARAMETERS: address - Pointer to the XSDT
- *
- * RETURN: status
- * AE_OK - XSDT is okay
- * AE_NO_MEMORY - can't map XSDT
- * AE_INVALID_TABLE_LENGTH - invalid table length
- * AE_NULL_ENTRY - XSDT has NULL entry
- *
- * DESCRIPTION: validate XSDT
-******************************************************************************/
-
-static acpi_status
-acpi_tb_check_xsdt(acpi_physical_address address)
-{
- struct acpi_table_header *table;
- u32 length;
- u64 xsdt_entry_address;
- u8 *table_entry;
- u32 table_count;
- int i;
-
- table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
- if (!table)
- return AE_NO_MEMORY;
-
- length = table->length;
- acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
- if (length < sizeof(struct acpi_table_header))
- return AE_INVALID_TABLE_LENGTH;
-
- table = acpi_os_map_memory(address, length);
- if (!table)
- return AE_NO_MEMORY;
-
- /* Calculate the number of tables described in XSDT */
- table_count =
- (u32) ((table->length -
- sizeof(struct acpi_table_header)) / sizeof(u64));
- table_entry =
- ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
- for (i = 0; i < table_count; i++) {
- ACPI_MOVE_64_TO_64(&xsdt_entry_address, table_entry);
- if (!xsdt_entry_address) {
- /* XSDT has NULL entry */
- break;
- }
- table_entry += sizeof(u64);
- }
- acpi_os_unmap_memory(table, length);
-
- if (i < table_count)
- return AE_NULL_ENTRY;
- else
- return AE_OK;
-}
-
#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
@@ -383,7 +325,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
* Get the table physical address (32-bit for RSDT, 64-bit for XSDT):
* Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT
*/
- if (table_entry_size == sizeof(u32)) {
+ if (table_entry_size == ACPI_RSDT_ENTRY_SIZE) {
/*
* 32-bit platform, RSDT: Return 32-bit table entry
* 64-bit platform, RSDT: Expand 32-bit to 64-bit and return
@@ -415,6 +357,87 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
/*******************************************************************************
*
+ * FUNCTION: acpi_tb_validate_xsdt
+ *
+ * PARAMETERS: address - Physical address of the XSDT (from RSDP)
+ *
+ * RETURN: Status. AE_OK if the table appears to be valid.
+ *
+ * DESCRIPTION: Validate an XSDT to ensure that it is of minimum size and does
+ * not contain any NULL entries. A problem that is seen in the
+ * field is that the XSDT exists, but is actually useless because
+ * of one or more (or all) NULL entries.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_tb_validate_xsdt(acpi_physical_address xsdt_address)
+{
+ struct acpi_table_header *table;
+ u8 *next_entry;
+ acpi_physical_address address;
+ u32 length;
+ u32 entry_count;
+ acpi_status status;
+ u32 i;
+
+ /* Get the XSDT length */
+
+ table =
+ acpi_os_map_memory(xsdt_address, sizeof(struct acpi_table_header));
+ if (!table) {
+ return (AE_NO_MEMORY);
+ }
+
+ length = table->length;
+ acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+
+ /*
+ * Minimum XSDT length is the size of the standard ACPI header
+ * plus one physical address entry
+ */
+ if (length < (sizeof(struct acpi_table_header) + ACPI_XSDT_ENTRY_SIZE)) {
+ return (AE_INVALID_TABLE_LENGTH);
+ }
+
+ /* Map the entire XSDT */
+
+ table = acpi_os_map_memory(xsdt_address, length);
+ if (!table) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Get the number of entries and pointer to first entry */
+
+ status = AE_OK;
+ next_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
+ entry_count = (u32)((table->length - sizeof(struct acpi_table_header)) /
+ ACPI_XSDT_ENTRY_SIZE);
+
+ /* Validate each entry (physical address) within the XSDT */
+
+ for (i = 0; i < entry_count; i++) {
+ address =
+ acpi_tb_get_root_table_entry(next_entry,
+ ACPI_XSDT_ENTRY_SIZE);
+ if (!address) {
+
+ /* Detected a NULL entry, XSDT is invalid */
+
+ status = AE_NULL_ENTRY;
+ break;
+ }
+
+ next_entry += ACPI_XSDT_ENTRY_SIZE;
+ }
+
+ /* Unmap table */
+
+ acpi_os_unmap_memory(table, length);
+ return (status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_tb_parse_root_table
*
* PARAMETERS: rsdp - Pointer to the RSDP
@@ -438,16 +461,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
u32 table_count;
struct acpi_table_header *table;
acpi_physical_address address;
- acpi_physical_address uninitialized_var(rsdt_address);
u32 length;
u8 *table_entry;
acpi_status status;
ACPI_FUNCTION_TRACE(tb_parse_root_table);
- /*
- * Map the entire RSDP and extract the address of the RSDT or XSDT
- */
+ /* Map the entire RSDP and extract the address of the RSDT or XSDT */
+
rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp));
if (!rsdp) {
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -457,24 +478,22 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
ACPI_CAST_PTR(struct acpi_table_header,
rsdp));
- /* Differentiate between RSDT and XSDT root tables */
+ /* Use XSDT if present and not overridden. Otherwise, use RSDT */
- if (rsdp->revision > 1 && rsdp->xsdt_physical_address
- && !acpi_rsdt_forced) {
+ if ((rsdp->revision > 1) &&
+ rsdp->xsdt_physical_address && !acpi_gbl_do_not_use_xsdt) {
/*
- * Root table is an XSDT (64-bit physical addresses). We must use the
- * XSDT if the revision is > 1 and the XSDT pointer is present, as per
- * the ACPI specification.
+ * RSDP contains an XSDT (64-bit physical addresses). We must use
+ * the XSDT if the revision is > 1 and the XSDT pointer is present,
+ * as per the ACPI specification.
*/
address = (acpi_physical_address) rsdp->xsdt_physical_address;
- table_entry_size = sizeof(u64);
- rsdt_address = (acpi_physical_address)
- rsdp->rsdt_physical_address;
+ table_entry_size = ACPI_XSDT_ENTRY_SIZE;
} else {
/* Root table is an RSDT (32-bit physical addresses) */
address = (acpi_physical_address) rsdp->rsdt_physical_address;
- table_entry_size = sizeof(u32);
+ table_entry_size = ACPI_RSDT_ENTRY_SIZE;
}
/*
@@ -483,15 +502,25 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
*/
acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp));
- if (table_entry_size == sizeof(u64)) {
- if (acpi_tb_check_xsdt(address) == AE_NULL_ENTRY) {
- /* XSDT has NULL entry, RSDT is used */
- address = rsdt_address;
- table_entry_size = sizeof(u32);
- ACPI_WARNING((AE_INFO, "BIOS XSDT has NULL entry, "
- "using RSDT"));
+ /*
+ * If it is present and used, validate the XSDT for access/size
+ * and ensure that all table entries are at least non-NULL
+ */
+ if (table_entry_size == ACPI_XSDT_ENTRY_SIZE) {
+ status = acpi_tb_validate_xsdt(address);
+ if (ACPI_FAILURE(status)) {
+ ACPI_BIOS_WARNING((AE_INFO,
+ "XSDT is invalid (%s), using RSDT",
+ acpi_format_exception(status)));
+
+ /* Fall back to the RSDT */
+
+ address =
+ (acpi_physical_address) rsdp->rsdt_physical_address;
+ table_entry_size = ACPI_RSDT_ENTRY_SIZE;
}
}
+
/* Map the RSDT/XSDT table header to get the full table length */
table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
@@ -501,12 +530,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
acpi_tb_print_table_header(address, table);
- /* Get the length of the full table, verify length and map entire table */
-
+ /*
+ * Validate length of the table, and map entire table.
+ * Minimum length table must contain at least one entry.
+ */
length = table->length;
acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
- if (length < sizeof(struct acpi_table_header)) {
+ if (length < (sizeof(struct acpi_table_header) + table_entry_size)) {
ACPI_BIOS_ERROR((AE_INFO,
"Invalid table length 0x%X in RSDT/XSDT",
length));
@@ -526,22 +557,21 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
return_ACPI_STATUS(status);
}
- /* Calculate the number of tables described in the root table */
+ /* Get the number of entries and pointer to first entry */
table_count = (u32)((table->length - sizeof(struct acpi_table_header)) /
table_entry_size);
+ table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
+
/*
* First two entries in the table array are reserved for the DSDT
* and FACS, which are not actually present in the RSDT/XSDT - they
* come from the FADT
*/
- table_entry =
- ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
acpi_gbl_root_table_list.current_table_count = 2;
- /*
- * Initialize the root table array from the RSDT/XSDT
- */
+ /* Initialize the root table array from the RSDT/XSDT */
+
for (i = 0; i < table_count; i++) {
if (acpi_gbl_root_table_list.current_table_count >=
acpi_gbl_root_table_list.max_table_count) {
@@ -584,7 +614,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
address, NULL, i);
- /* Special case for FADT - get the DSDT and FACS */
+ /* Special case for FADT - validate it then get the DSDT and FACS */
if (ACPI_COMPARE_NAME
(&acpi_gbl_root_table_list.tables[i].signature,
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index db826eaadd1c..a1593159d9ea 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 60b5a871833c..0909420fc776 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index e4e1468877c3..65ab8fed3d5e 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index e0a2e2779c2e..a1acec9d2ef3 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -224,10 +224,11 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
while (range_info) {
/*
- * Check if the requested Address/Length overlaps this address_range.
- * Four cases to consider:
+ * Check if the requested address/length overlaps this
+ * address range. There are four cases to consider:
*
- * 1) Input address/length is contained completely in the address range
+ * 1) Input address/length is contained completely in the
+ * address range
* 2) Input address/length overlaps range at the range start
* 3) Input address/length overlaps range at the range end
* 4) Input address/length completely encompasses the range
@@ -244,11 +245,17 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
region_node);
ACPI_WARNING((AE_INFO,
- "0x%p-0x%p %s conflicts with Region %s %d",
+ "%s range 0x%p-0x%p conflicts with OpRegion 0x%p-0x%p (%s)",
+ acpi_ut_get_region_name(space_id),
ACPI_CAST_PTR(void, address),
ACPI_CAST_PTR(void, end_address),
- acpi_ut_get_region_name(space_id),
- pathname, overlap_count));
+ ACPI_CAST_PTR(void,
+ range_info->
+ start_address),
+ ACPI_CAST_PTR(void,
+ range_info->
+ end_address),
+ pathname));
ACPI_FREE(pathname);
}
}
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 814267f52715..efac83c606dc 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -302,9 +302,13 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
return (AE_BUFFER_OVERFLOW);
case ACPI_ALLOCATE_BUFFER:
-
- /* Allocate a new buffer */
-
+ /*
+ * Allocate a new buffer. We directectly call acpi_os_allocate here to
+ * purposefully bypass the (optionally enabled) internal allocation
+ * tracking mechanism since we only want to track internal
+ * allocations. Note: The caller should use acpi_os_free to free this
+ * buffer created via ACPI_ALLOCATE_BUFFER.
+ */
buffer->pointer = acpi_os_allocate(required_length);
break;
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 11fde93be120..3c1699740653 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 366bfec4b770..78fde0aac487 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -248,12 +248,12 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
ACPI_FUNCTION_NAME(os_acquire_object);
if (!cache) {
- return (NULL);
+ return_PTR(NULL);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
if (ACPI_FAILURE(status)) {
- return (NULL);
+ return_PTR(NULL);
}
ACPI_MEM_TRACKING(cache->requests++);
@@ -276,7 +276,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
if (ACPI_FAILURE(status)) {
- return (NULL);
+ return_PTR(NULL);
}
/* Clear (zero) the previously used Object */
@@ -299,15 +299,15 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
if (ACPI_FAILURE(status)) {
- return (NULL);
+ return_PTR(NULL);
}
object = ACPI_ALLOCATE_ZEROED(cache->object_size);
if (!object) {
- return (NULL);
+ return_PTR(NULL);
}
}
- return (object);
+ return_PTR(object);
}
#endif /* ACPI_USE_LOCAL_CACHE */
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index edff4e653d9a..270c16464dd9 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -535,10 +535,10 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
case ACPI_TYPE_LOCAL_REFERENCE:
- /* TBD: should validate incoming handle */
+ /* An incoming reference is defined to be a namespace node */
- internal_object->reference.class = ACPI_REFCLASS_NAME;
- internal_object->reference.node =
+ internal_object->reference.class = ACPI_REFCLASS_REFOF;
+ internal_object->reference.object =
external_object->reference.handle;
break;
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 03ae8affe48f..21a20ac5b1e1 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -194,9 +194,9 @@ acpi_debug_print(u32 requested_debug_level,
*/
acpi_os_printf("%9s-%04ld ", module_name, line_number);
-#ifdef ACPI_EXEC_APP
+#ifdef ACPI_APPLICATION
/*
- * For acpi_exec only, emit the thread ID and nesting level.
+ * For acpi_exec/iASL only, emit the thread ID and nesting level.
* Note: nesting level is really only useful during a single-thread
* execution. Otherwise, multiple threads will keep resetting the
* level.
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index b3f31dd89a45..fbfa9eca011f 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index c07d2227ea42..a3516de213fa 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -75,6 +75,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
union acpi_operand_object *handler_desc;
union acpi_operand_object *second_desc;
union acpi_operand_object *next_desc;
+ union acpi_operand_object *start_desc;
union acpi_operand_object **last_obj_ptr;
ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
@@ -235,10 +236,11 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
if (handler_desc) {
next_desc =
handler_desc->address_space.region_list;
+ start_desc = next_desc;
last_obj_ptr =
&handler_desc->address_space.region_list;
- /* Remove the region object from the handler's list */
+ /* Remove the region object from the handler list */
while (next_desc) {
if (next_desc == object) {
@@ -247,10 +249,19 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
break;
}
- /* Walk the linked list of handler */
+ /* Walk the linked list of handlers */
last_obj_ptr = &next_desc->region.next;
next_desc = next_desc->region.next;
+
+ /* Prevent infinite loop if list is corrupted */
+
+ if (next_desc == start_desc) {
+ ACPI_ERROR((AE_INFO,
+ "Circular region list in address handler object %p",
+ handler_desc));
+ return_VOID;
+ }
}
if (handler_desc->address_space.handler_flags &
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 154fdcaa5830..8e544d4688cd 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 16fb90506db7..8fed1482d228 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index 3cf7b597edb9..0403dcaabaf2 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 81f9a9584451..f3abeae9d2f8 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,31 +55,27 @@ ACPI_MODULE_NAME("utglobal")
* Static global variable initialization.
*
******************************************************************************/
-/*
- * We want the debug switches statically initialized so they
- * are already set when the debugger is entered.
- */
-/* Debug switch - level and trace mask */
+/* Debug output control masks */
u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
-/* Debug switch - layer (component) mask */
-
u32 acpi_dbg_layer = 0;
-u32 acpi_gbl_nesting_level = 0;
-
-/* Debugger globals */
-u8 acpi_gbl_db_terminate_threads = FALSE;
-u8 acpi_gbl_abort_method = FALSE;
-u8 acpi_gbl_method_executing = FALSE;
+/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
-/* System flags */
+struct acpi_table_fadt acpi_gbl_FADT;
+u32 acpi_gbl_trace_flags;
+acpi_name acpi_gbl_trace_method_name;
+u8 acpi_gbl_system_awake_and_running;
+u32 acpi_current_gpe_count;
-u32 acpi_gbl_startup_flags = 0;
-
-/* System starts uninitialized */
+/*
+ * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
+ * that the ACPI hardware is no longer required. A flag in the FADT indicates
+ * a reduced HW machine, and that flag is duplicated here for convenience.
+ */
+u8 acpi_gbl_reduced_hardware;
-u8 acpi_gbl_shutdown = TRUE;
+/* Various state name strings */
const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
"\\_S0_",
@@ -335,7 +331,6 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_DSDT = NULL;
acpi_gbl_cm_single_step = FALSE;
- acpi_gbl_db_terminate_threads = FALSE;
acpi_gbl_shutdown = FALSE;
acpi_gbl_ns_lookup_count = 0;
acpi_gbl_ps_find_count = 0;
@@ -382,17 +377,17 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_disable_mem_tracking = FALSE;
#endif
+#ifdef ACPI_DEBUGGER
+ acpi_gbl_db_terminate_threads = FALSE;
+#endif
+
return_ACPI_STATUS(AE_OK);
}
/* Public globals */
ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
-
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
-
ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
-
ACPI_EXPORT_SYMBOL(acpi_gpe_count)
-
ACPI_EXPORT_SYMBOL(acpi_current_gpe_count)
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index bfca7b4b6731..4b12880e5b11 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index c5d1ac44c07d..5f56fc49021e 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index 5c26ad420344..dc6e96547f18 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 909fe66e1934..d44dee6ee10a 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 02f9101b65e4..2e2bb14e1099 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 08c323245584..82717fff9ffc 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 517af700399d..dfa9009bfc87 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 8856bd37bc76..685766fc6ca8 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,31 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utosi")
+/******************************************************************************
+ *
+ * ACPICA policy for new _OSI strings:
+ *
+ * It is the stated policy of ACPICA that new _OSI strings will be integrated
+ * into this module as soon as possible after they are defined. It is strongly
+ * recommended that all ACPICA hosts mirror this policy and integrate any
+ * changes to this module as soon as possible. There are several historical
+ * reasons behind this policy:
+ *
+ * 1) New BIOSs tend to test only the case where the host responds TRUE to
+ * the latest version of Windows, which would respond to the latest/newest
+ * _OSI string. Not responding TRUE to the latest version of Windows will
+ * risk executing untested code paths throughout the DSDT and SSDTs.
+ *
+ * 2) If a new _OSI string is recognized only after a significant delay, this
+ * has the potential to cause problems on existing working machines because
+ * of the possibility that a new and different path through the ASL code
+ * will be executed.
+ *
+ * 3) New _OSI strings are tending to come out about once per year. A delay
+ * in recognizing a new string for a significant amount of time risks the
+ * release of another string which only compounds the initial problem.
+ *
+ *****************************************************************************/
/*
* Strings supported by the _OSI predefined control method (which is
* implemented internally within this module.)
@@ -74,6 +99,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */
{"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
+ {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index eb3aca761369..36bec57ebd23 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 2b1ce4cd3207..db30caff130a 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 2c2accb9e534..14cb6c0c8be2 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,8 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utresrc")
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
/*
* Strings used to decode resource descriptors.
* Used by both the disassembler and the debugger resource dump routines
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 03c4c2febd84..1cc97a752c15 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 45c0eb26b33d..77219336c7e0 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index c0027773cccb..7d0ee969d781 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -276,7 +276,8 @@ acpi_ut_free_and_track(void *allocation,
}
acpi_os_free(debug_block);
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation));
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed (block %p)\n",
+ allocation, debug_block));
return_VOID;
}
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index be322c83643a..502a8492dc83 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index f7edb88f6054..edd861102f1b 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 75efea0539c1..13380d818462 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -122,8 +122,16 @@ acpi_status __init acpi_initialize_subsystem(void)
/* If configured, initialize the AML debugger */
- ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
- return_ACPI_STATUS(status);
+#ifdef ACPI_DEBUGGER
+ status = acpi_db_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During Debugger initialization"));
+ return_ACPI_STATUS(status);
+ }
+#endif
+
+ return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_subsystem)
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 312299721ba1..2a0f9e04d3a4 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 3650b2183227..c4dac7150960 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -12,7 +12,7 @@ config ACPI_APEI
config ACPI_APEI_GHES
bool "APEI Generic Hardware Error Source"
- depends on ACPI_APEI && X86
+ depends on ACPI_APEI
select ACPI_HED
select IRQ_WORK
select GENERIC_ALLOCATOR
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index e55584a072c6..8678dfe5366b 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/kref.h>
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 21ba34a73883..e5bcd919d4e6 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -8,7 +8,6 @@
#include <linux/cper.h>
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
struct apei_exec_context;
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 7dcc8a824aae..1be6f5564485 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -33,7 +33,6 @@
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/mm.h>
-#include <acpi/acpi.h>
#include <asm/unaligned.h>
#include "apei-internal.h"
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 46766ef7ef5d..dab7cb7349df 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 5876a49dfd38..9a2c63b20050 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -36,19 +36,16 @@
#include <linux/suspend.h>
#include <asm/unaligned.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/power_supply.h>
+#include "battery.h"
+
#define PREFIX "ACPI: "
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
-#define ACPI_BATTERY_CLASS "battery"
#define ACPI_BATTERY_DEVICE_NAME "Battery"
-#define ACPI_BATTERY_NOTIFY_STATUS 0x80
-#define ACPI_BATTERY_NOTIFY_INFO 0x81
-#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82
/* Battery power unit: 0 means mW, 1 means mA */
#define ACPI_BATTERY_POWER_UNIT_MA 1
@@ -550,7 +547,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
- if (sscanf(buf, "%ld\n", &x) == 1)
+ if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm = x/1000;
if (acpi_battery_present(battery))
acpi_battery_set_alarm(battery);
@@ -737,6 +734,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
acpi_battery_present(battery));
+ acpi_notifier_call_chain(device, event, acpi_battery_present(battery));
/* acpi_battery_update could remove power_supply object */
if (old && battery->bat.dev)
power_supply_changed(&battery->bat);
@@ -842,6 +840,8 @@ static int acpi_battery_resume(struct device *dev)
acpi_battery_update(battery);
return 0;
}
+#else
+#define acpi_battery_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
diff --git a/drivers/acpi/battery.h b/drivers/acpi/battery.h
new file mode 100644
index 000000000000..6c084976987d
--- /dev/null
+++ b/drivers/acpi/battery.h
@@ -0,0 +1,10 @@
+#ifndef __ACPI_BATTERY_H
+#define __ACPI_BATTERY_H
+
+#define ACPI_BATTERY_CLASS "battery"
+
+#define ACPI_BATTERY_NOTIFY_STATUS 0x80
+#define ACPI_BATTERY_NOTIFY_INFO 0x81
+#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82
+
+#endif
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 078c4f7fe2dd..afec4526c48a 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
#include <linux/dmi.h>
#include "internal.h"
@@ -261,14 +260,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{
.callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 15R SE",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
.ident = "ThinkPad Edge E530",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 0710004055c8..e7e5844c87d0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -33,12 +33,11 @@
#include <linux/proc_fs.h>
#include <linux/acpi.h>
#include <linux/slab.h>
+#include <linux/regulator/machine.h>
#ifdef CONFIG_X86
#include <asm/mpspec.h>
#endif
#include <linux/pci.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#include <acpi/apei.h>
#include <linux/dmi.h>
#include <linux/suspend.h>
@@ -52,9 +51,6 @@ struct acpi_device *acpi_root;
struct proc_dir_entry *acpi_root_dir;
EXPORT_SYMBOL(acpi_root_dir);
-#define STRUCT_TO_INT(s) (*((int*)&s))
-
-
#ifdef CONFIG_X86
static int set_copy_dsdt(const struct dmi_system_id *id)
{
@@ -115,18 +111,16 @@ int acpi_bus_get_status(struct acpi_device *device)
if (ACPI_FAILURE(status))
return -ENODEV;
- STRUCT_TO_INT(device->status) = (int) sta;
+ acpi_set_device_status(device, sta);
if (device->status.functional && !device->status.present) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
"functional but not present;\n",
- device->pnp.bus_id,
- (u32) STRUCT_TO_INT(device->status)));
+ device->pnp.bus_id, (u32)sta));
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
- device->pnp.bus_id,
- (u32) STRUCT_TO_INT(device->status)));
+ device->pnp.bus_id, (u32)sta));
return 0;
}
EXPORT_SYMBOL(acpi_bus_get_status);
@@ -317,9 +311,7 @@ static void acpi_bus_osc_support(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
#endif
-#ifdef ACPI_HOTPLUG_OST
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
-#endif
if (!ghes_disable)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
@@ -339,58 +331,6 @@ static void acpi_bus_osc_support(void)
Notification Handling
-------------------------------------------------------------------------- */
-static void acpi_bus_check_device(acpi_handle handle)
-{
- struct acpi_device *device;
- acpi_status status;
- struct acpi_device_status old_status;
-
- if (acpi_bus_get_device(handle, &device))
- return;
- if (!device)
- return;
-
- old_status = device->status;
-
- /*
- * Make sure this device's parent is present before we go about
- * messing with the device.
- */
- if (device->parent && !device->parent->status.present) {
- device->status = device->parent->status;
- return;
- }
-
- status = acpi_bus_get_status(device);
- if (ACPI_FAILURE(status))
- return;
-
- if (STRUCT_TO_INT(old_status) == STRUCT_TO_INT(device->status))
- return;
-
- /*
- * Device Insertion/Removal
- */
- if ((device->status.present) && !(old_status.present)) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device insertion detected\n"));
- /* TBD: Handle device insertion */
- } else if (!(device->status.present) && (old_status.present)) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device removal detected\n"));
- /* TBD: Handle device removal */
- }
-}
-
-static void acpi_bus_check_scope(acpi_handle handle)
-{
- /* Status Change? */
- acpi_bus_check_device(handle);
-
- /*
- * TBD: Enumerate child devices within this device's scope and
- * run acpi_bus_check_device()'s on them.
- */
-}
-
/**
* acpi_bus_notify
* ---------------
@@ -398,68 +338,77 @@ static void acpi_bus_check_scope(acpi_handle handle)
*/
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
- struct acpi_device *device = NULL;
+ struct acpi_device *adev;
struct acpi_driver *driver;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n",
- type, handle));
+ acpi_status status;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
switch (type) {
-
case ACPI_NOTIFY_BUS_CHECK:
- acpi_bus_check_scope(handle);
- /*
- * TBD: We'll need to outsource certain events to non-ACPI
- * drivers via the device manager (device.c).
- */
+ acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
break;
case ACPI_NOTIFY_DEVICE_CHECK:
- acpi_bus_check_device(handle);
- /*
- * TBD: We'll need to outsource certain events to non-ACPI
- * drivers via the device manager (device.c).
- */
+ acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
break;
case ACPI_NOTIFY_DEVICE_WAKE:
- /* TBD */
+ acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
break;
case ACPI_NOTIFY_EJECT_REQUEST:
- /* TBD */
+ acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
+ acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
/* TBD: Exactly what does 'light' mean? */
break;
case ACPI_NOTIFY_FREQUENCY_MISMATCH:
- /* TBD */
+ acpi_handle_err(handle, "Device cannot be configured due "
+ "to a frequency mismatch\n");
break;
case ACPI_NOTIFY_BUS_MODE_MISMATCH:
- /* TBD */
+ acpi_handle_err(handle, "Device cannot be configured due "
+ "to a bus mode mismatch\n");
break;
case ACPI_NOTIFY_POWER_FAULT:
- /* TBD */
+ acpi_handle_err(handle, "Device has suffered a power fault\n");
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received unknown/unsupported notification [%08x]\n",
- type));
- break;
+ acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
+ ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
+ goto err;
}
- acpi_bus_get_device(handle, &device);
- if (device) {
- driver = device->driver;
- if (driver && driver->ops.notify &&
- (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
- driver->ops.notify(device, type);
+ adev = acpi_bus_get_acpi_device(handle);
+ if (!adev)
+ goto err;
+
+ driver = adev->driver;
+ if (driver && driver->ops.notify &&
+ (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
+ driver->ops.notify(adev, type);
+
+ switch (type) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ status = acpi_hotplug_schedule(adev, type);
+ if (ACPI_SUCCESS(status))
+ return;
+ default:
+ break;
}
+ acpi_bus_put_acpi_device(adev);
+ return;
+
+ err:
+ acpi_evaluate_ost(handle, type, ost_code, NULL);
}
/* --------------------------------------------------------------------------
@@ -576,6 +525,14 @@ void __init acpi_early_init(void)
goto error0;
}
+ /*
+ * If the system is using ACPI then we can be reasonably
+ * confident that any regulators are managed by the firmware
+ * so tell the regulator core it has everything it needs to
+ * know.
+ */
+ regulator_has_full_constraints();
+
return;
error0:
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index c971929d75c2..db35594d4df7 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -31,8 +31,7 @@
#include <linux/seq_file.h>
#include <linux/input.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <acpi/button.h>
#define PREFIX "ACPI: "
@@ -81,6 +80,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event);
#ifdef CONFIG_PM_SLEEP
static int acpi_button_resume(struct device *dev);
+#else
+#define acpi_button_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
@@ -101,7 +102,6 @@ struct acpi_button {
struct input_dev *input;
char phys[32]; /* for input device */
unsigned long pushed;
- bool wakeup_enabled;
};
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
@@ -302,6 +302,10 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
input_sync(input);
pm_wakeup_event(&device->dev, 0);
+ acpi_bus_generate_netlink_event(
+ device->pnp.device_class,
+ dev_name(&device->dev),
+ event, ++button->pushed);
}
break;
default:
@@ -407,16 +411,6 @@ static int acpi_button_add(struct acpi_device *device)
lid_device = device;
}
- if (device->wakeup.flags.valid) {
- /* Button's GPE is run-wake GPE */
- acpi_enable_gpe(device->wakeup.gpe_device,
- device->wakeup.gpe_number);
- if (!device_may_wakeup(&device->dev)) {
- device_set_wakeup_enable(&device->dev, true);
- button->wakeup_enabled = true;
- }
- }
-
printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
return 0;
@@ -433,13 +427,6 @@ static int acpi_button_remove(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
- if (device->wakeup.flags.valid) {
- acpi_disable_gpe(device->wakeup.gpe_device,
- device->wakeup.gpe_number);
- if (button->wakeup_enabled)
- device_set_wakeup_enable(&device->dev, false);
- }
-
acpi_button_remove_fs(device);
input_unregister_device(button->input);
kfree(button);
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index e23151667655..63119d09b354 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -27,13 +27,10 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/acpi.h>
+#include <linux/container.h>
#include "internal.h"
-#include "internal.h"
-
-#define PREFIX "ACPI: "
-
#define _COMPONENT ACPI_CONTAINER_COMPONENT
ACPI_MODULE_NAME("container");
@@ -44,19 +41,69 @@ static const struct acpi_device_id container_device_ids[] = {
{"", 0},
};
-static int container_device_attach(struct acpi_device *device,
+static int acpi_container_offline(struct container_dev *cdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&cdev->dev);
+ struct acpi_device *child;
+
+ /* Check all of the dependent devices' physical companions. */
+ list_for_each_entry(child, &adev->children, node)
+ if (!acpi_scan_is_offline(child, false))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void acpi_container_release(struct device *dev)
+{
+ kfree(to_container_dev(dev));
+}
+
+static int container_device_attach(struct acpi_device *adev,
const struct acpi_device_id *not_used)
{
- /* This is necessary for container hotplug to work. */
+ struct container_dev *cdev;
+ struct device *dev;
+ int ret;
+
+ if (adev->flags.is_dock_station)
+ return 0;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ cdev->offline = acpi_container_offline;
+ dev = &cdev->dev;
+ dev->bus = &container_subsys;
+ dev_set_name(dev, "%s", dev_name(&adev->dev));
+ ACPI_COMPANION_SET(dev, adev);
+ dev->release = acpi_container_release;
+ ret = device_register(dev);
+ if (ret) {
+ put_device(dev);
+ return ret;
+ }
+ adev->driver_data = dev;
return 1;
}
+static void container_device_detach(struct acpi_device *adev)
+{
+ struct device *dev = acpi_driver_data(adev);
+
+ adev->driver_data = NULL;
+ if (dev)
+ device_unregister(dev);
+}
+
static struct acpi_scan_handler container_handler = {
.ids = container_device_ids,
.attach = container_device_attach,
+ .detach = container_device_detach,
.hotplug = {
.enabled = true,
- .mode = AHM_CONTAINER,
+ .demand_offline = true,
},
};
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index 12b62f2cdb3f..c68e72414a67 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include "internal.h"
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index b55d6a20dc0e..6b1919f6bd82 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -5,7 +5,7 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/debugfs.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("debugfs");
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index b3480cf7db1a..d047739f3380 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -256,6 +256,8 @@ int acpi_bus_init_power(struct acpi_device *device)
return -EINVAL;
device->power.state = ACPI_STATE_UNKNOWN;
+ if (!acpi_device_is_present(device))
+ return 0;
result = acpi_device_get_power(device, &state);
if (result)
@@ -302,15 +304,18 @@ int acpi_device_fix_up_power(struct acpi_device *device)
return ret;
}
-int acpi_bus_update_power(acpi_handle handle, int *state_p)
+int acpi_device_update_power(struct acpi_device *device, int *state_p)
{
- struct acpi_device *device;
int state;
int result;
- result = acpi_bus_get_device(handle, &device);
- if (result)
+ if (device->power.state == ACPI_STATE_UNKNOWN) {
+ result = acpi_bus_init_power(device);
+ if (!result && state_p)
+ *state_p = device->power.state;
+
return result;
+ }
result = acpi_device_get_power(device, &state);
if (result)
@@ -338,6 +343,15 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p)
return 0;
}
+
+int acpi_bus_update_power(acpi_handle handle, int *state_p)
+{
+ struct acpi_device *device;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ return result ? result : acpi_device_update_power(device, state_p);
+}
EXPORT_SYMBOL_GPL(acpi_bus_update_power);
bool acpi_bus_power_manageable(acpi_handle handle)
@@ -713,18 +727,6 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
#endif /* CONFIG_PM_SLEEP */
/**
- * acpi_dev_pm_get_node - Get ACPI device node for the given physical device.
- * @dev: Device to get the ACPI node for.
- */
-struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
-{
- acpi_handle handle = ACPI_HANDLE(dev);
- struct acpi_device *adev;
-
- return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL;
-}
-
-/**
* acpi_dev_pm_low_power - Put ACPI device into a low-power state.
* @dev: Device to put into a low-power state.
* @adev: ACPI device node corresponding to @dev.
@@ -764,7 +766,7 @@ static int acpi_dev_pm_full_power(struct acpi_device *adev)
*/
int acpi_dev_runtime_suspend(struct device *dev)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
bool remote_wakeup;
int error;
@@ -795,7 +797,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend);
*/
int acpi_dev_runtime_resume(struct device *dev)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
int error;
if (!adev)
@@ -848,7 +850,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
*/
int acpi_dev_suspend_late(struct device *dev)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
u32 target_state;
bool wakeup;
int error;
@@ -880,7 +882,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_suspend_late);
*/
int acpi_dev_resume_early(struct device *dev)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
int error;
if (!adev)
@@ -899,15 +901,30 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
int acpi_subsys_prepare(struct device *dev)
{
/*
- * Follow PCI and resume devices suspended at run time before running
- * their system suspend callbacks.
+ * Devices having power.ignore_children set may still be necessary for
+ * suspending their children in the next phase of device suspend.
*/
- pm_runtime_resume(dev);
+ if (dev->power.ignore_children)
+ pm_runtime_resume(dev);
+
return pm_generic_prepare(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
/**
+ * acpi_subsys_suspend - Run the device driver's suspend callback.
+ * @dev: Device to handle.
+ *
+ * Follow PCI and resume devices suspended at run time before running their
+ * system suspend callbacks.
+ */
+int acpi_subsys_suspend(struct device *dev)
+{
+ pm_runtime_resume(dev);
+ return pm_generic_suspend(dev);
+}
+
+/**
* acpi_subsys_suspend_late - Suspend device using ACPI.
* @dev: Device to suspend.
*
@@ -935,6 +952,23 @@ int acpi_subsys_resume_early(struct device *dev)
return ret ? ret : pm_generic_resume_early(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
+
+/**
+ * acpi_subsys_freeze - Run the device driver's freeze callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_freeze(struct device *dev)
+{
+ /*
+ * This used to be done in acpi_subsys_prepare() for all devices and
+ * some drivers may depend on it, so do it here. Ideally, however,
+ * runtime-suspended devices should not be touched during freeze/thaw
+ * transitions.
+ */
+ pm_runtime_resume(dev);
+ return pm_generic_freeze(dev);
+}
+
#endif /* CONFIG_PM_SLEEP */
static struct dev_pm_domain acpi_general_pm_domain = {
@@ -945,8 +979,11 @@ static struct dev_pm_domain acpi_general_pm_domain = {
#endif
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,
+ .suspend = acpi_subsys_suspend,
.suspend_late = acpi_subsys_suspend_late,
.resume_early = acpi_subsys_resume_early,
+ .freeze = acpi_subsys_freeze,
+ .poweroff = acpi_subsys_suspend,
.poweroff_late = acpi_subsys_suspend_late,
.restore_early = acpi_subsys_resume_early,
#endif
@@ -971,7 +1008,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
*/
int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
if (!adev)
return -ENODEV;
@@ -1003,7 +1040,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
*/
void acpi_dev_pm_detach(struct device *dev, bool power_off)
{
- struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
if (adev && dev->pm_domain == &acpi_general_pm_domain) {
dev->pm_domain = NULL;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index dcd73ccb514c..f0fc6260266b 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -1,7 +1,9 @@
/*
* dock.c - ACPI dock station driver
*
- * Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
+ * Copyright (C) 2006, 2014, Intel Corp.
+ * Author: Kristen Carlson Accardi <kristen.c.accardi@intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -32,10 +34,8 @@
#include <linux/jiffies.h>
#include <linux/stddef.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#define PREFIX "ACPI: "
+#include "internal.h"
#define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver"
@@ -68,15 +68,10 @@ struct dock_station {
};
static LIST_HEAD(dock_stations);
static int dock_station_count;
-static DEFINE_MUTEX(hotplug_lock);
struct dock_dependent_device {
struct list_head list;
- acpi_handle handle;
- const struct acpi_dock_ops *hp_ops;
- void *hp_context;
- unsigned int hp_refcount;
- void (*hp_release)(void *);
+ struct acpi_device *adev;
};
#define DOCK_DOCKING 0x00000001
@@ -98,13 +93,13 @@ enum dock_callback_type {
*****************************************************************************/
/**
* add_dock_dependent_device - associate a device with the dock station
- * @ds: The dock station
- * @handle: handle of the dependent device
+ * @ds: Dock station.
+ * @adev: Dependent ACPI device object.
*
* Add the dependent device to the dock's dependent device list.
*/
-static int __init
-add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
+static int add_dock_dependent_device(struct dock_station *ds,
+ struct acpi_device *adev)
{
struct dock_dependent_device *dd;
@@ -112,180 +107,120 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
if (!dd)
return -ENOMEM;
- dd->handle = handle;
+ dd->adev = adev;
INIT_LIST_HEAD(&dd->list);
list_add_tail(&dd->list, &ds->dependent_devices);
return 0;
}
-static void remove_dock_dependent_devices(struct dock_station *ds)
+static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
+ enum dock_callback_type cb_type)
{
- struct dock_dependent_device *dd, *aux;
+ struct acpi_device *adev = dd->adev;
- list_for_each_entry_safe(dd, aux, &ds->dependent_devices, list) {
- list_del(&dd->list);
- kfree(dd);
- }
-}
+ acpi_lock_hp_context();
-/**
- * dock_init_hotplug - Initialize a hotplug device on a docking station.
- * @dd: Dock-dependent device.
- * @ops: Dock operations to attach to the dependent device.
- * @context: Data to pass to the @ops callbacks and @release.
- * @init: Optional initialization routine to run after setting up context.
- * @release: Optional release routine to run on removal.
- */
-static int dock_init_hotplug(struct dock_dependent_device *dd,
- const struct acpi_dock_ops *ops, void *context,
- void (*init)(void *), void (*release)(void *))
-{
- int ret = 0;
+ if (!adev->hp)
+ goto out;
- mutex_lock(&hotplug_lock);
- if (WARN_ON(dd->hp_context)) {
- ret = -EEXIST;
- } else {
- dd->hp_refcount = 1;
- dd->hp_ops = ops;
- dd->hp_context = context;
- dd->hp_release = release;
- if (init)
- init(context);
- }
- mutex_unlock(&hotplug_lock);
- return ret;
-}
+ if (cb_type == DOCK_CALL_FIXUP) {
+ void (*fixup)(struct acpi_device *);
-/**
- * dock_release_hotplug - Decrement hotplug reference counter of dock device.
- * @dd: Dock-dependent device.
- *
- * Decrement the reference counter of @dd and if 0, detach its hotplug
- * operations from it, reset its context pointer and run the optional release
- * routine if present.
- */
-static void dock_release_hotplug(struct dock_dependent_device *dd)
-{
- mutex_lock(&hotplug_lock);
- if (dd->hp_context && !--dd->hp_refcount) {
- void (*release)(void *) = dd->hp_release;
- void *context = dd->hp_context;
-
- dd->hp_ops = NULL;
- dd->hp_context = NULL;
- dd->hp_release = NULL;
- if (release)
- release(context);
- }
- mutex_unlock(&hotplug_lock);
-}
+ fixup = adev->hp->fixup;
+ if (fixup) {
+ acpi_unlock_hp_context();
+ fixup(adev);
+ return;
+ }
+ } else if (cb_type == DOCK_CALL_UEVENT) {
+ void (*uevent)(struct acpi_device *, u32);
+
+ uevent = adev->hp->uevent;
+ if (uevent) {
+ acpi_unlock_hp_context();
+ uevent(adev, event);
+ return;
+ }
+ } else {
+ int (*notify)(struct acpi_device *, u32);
-static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
- enum dock_callback_type cb_type)
-{
- acpi_notify_handler cb = NULL;
- bool run = false;
-
- mutex_lock(&hotplug_lock);
-
- if (dd->hp_context) {
- run = true;
- dd->hp_refcount++;
- if (dd->hp_ops) {
- switch (cb_type) {
- case DOCK_CALL_FIXUP:
- cb = dd->hp_ops->fixup;
- break;
- case DOCK_CALL_UEVENT:
- cb = dd->hp_ops->uevent;
- break;
- default:
- cb = dd->hp_ops->handler;
- }
+ notify = adev->hp->notify;
+ if (notify) {
+ acpi_unlock_hp_context();
+ notify(adev, event);
+ return;
}
}
- mutex_unlock(&hotplug_lock);
+ out:
+ acpi_unlock_hp_context();
+}
- if (!run)
- return;
+static struct dock_station *find_dock_station(acpi_handle handle)
+{
+ struct dock_station *ds;
- if (cb)
- cb(dd->handle, event, dd->hp_context);
+ list_for_each_entry(ds, &dock_stations, sibling)
+ if (ds->handle == handle)
+ return ds;
- dock_release_hotplug(dd);
+ return NULL;
}
/**
* find_dock_dependent_device - get a device dependent on this dock
* @ds: the dock station
- * @handle: the acpi_handle of the device we want
+ * @adev: ACPI device object to find.
*
* iterate over the dependent device list for this dock. If the
* dependent device matches the handle, return.
*/
static struct dock_dependent_device *
-find_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
+find_dock_dependent_device(struct dock_station *ds, struct acpi_device *adev)
{
struct dock_dependent_device *dd;
list_for_each_entry(dd, &ds->dependent_devices, list)
- if (handle == dd->handle)
+ if (adev == dd->adev)
return dd;
return NULL;
}
-/*****************************************************************************
- * Dock functions *
- *****************************************************************************/
-static int __init is_battery(acpi_handle handle)
+void register_dock_dependent_device(struct acpi_device *adev,
+ acpi_handle dshandle)
{
- struct acpi_device_info *info;
- int ret = 1;
-
- if (!ACPI_SUCCESS(acpi_get_object_info(handle, &info)))
- return 0;
- if (!(info->valid & ACPI_VALID_HID))
- ret = 0;
- else
- ret = !strcmp("PNP0C0A", info->hardware_id.string);
+ struct dock_station *ds = find_dock_station(dshandle);
- kfree(info);
- return ret;
+ if (ds && !find_dock_dependent_device(ds, adev))
+ add_dock_dependent_device(ds, adev);
}
-/* Check whether ACPI object is an ejectable battery or disk bay */
-static bool __init is_ejectable_bay(acpi_handle handle)
-{
- if (acpi_has_method(handle, "_EJ0") && is_battery(handle))
- return true;
-
- return acpi_bay_match(handle);
-}
+/*****************************************************************************
+ * Dock functions *
+ *****************************************************************************/
/**
* is_dock_device - see if a device is on a dock station
- * @handle: acpi handle of the device
+ * @adev: ACPI device object to check.
*
* If this device is either the dock station itself,
* or is a device dependent on the dock station, then it
* is a dock device
*/
-int is_dock_device(acpi_handle handle)
+int is_dock_device(struct acpi_device *adev)
{
struct dock_station *dock_station;
if (!dock_station_count)
return 0;
- if (acpi_dock_match(handle))
+ if (acpi_dock_match(adev->handle))
return 1;
list_for_each_entry(dock_station, &dock_stations, sibling)
- if (find_dock_dependent_device(dock_station, handle))
+ if (find_dock_dependent_device(dock_station, adev))
return 1;
return 0;
@@ -313,46 +248,6 @@ static int dock_present(struct dock_station *ds)
}
/**
- * dock_create_acpi_device - add new devices to acpi
- * @handle - handle of the device to add
- *
- * This function will create a new acpi_device for the given
- * handle if one does not exist already. This should cause
- * acpi to scan for drivers for the given devices, and call
- * matching driver's add routine.
- */
-static void dock_create_acpi_device(acpi_handle handle)
-{
- struct acpi_device *device;
- int ret;
-
- if (acpi_bus_get_device(handle, &device)) {
- /*
- * no device created for this object,
- * so we should create one.
- */
- ret = acpi_bus_scan(handle);
- if (ret)
- pr_debug("error adding bus, %x\n", -ret);
- }
-}
-
-/**
- * dock_remove_acpi_device - remove the acpi_device struct from acpi
- * @handle - the handle of the device to remove
- *
- * Tell acpi to remove the acpi_device. This should cause any loaded
- * driver to have it's remove routine called.
- */
-static void dock_remove_acpi_device(acpi_handle handle)
-{
- struct acpi_device *device;
-
- if (!acpi_bus_get_device(handle, &device))
- acpi_bus_trim(device);
-}
-
-/**
* hot_remove_dock_devices - Remove dock station devices.
* @ds: Dock station.
*/
@@ -369,7 +264,7 @@ static void hot_remove_dock_devices(struct dock_station *ds)
dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, false);
list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
- dock_remove_acpi_device(dd->handle);
+ acpi_bus_trim(dd->adev);
}
/**
@@ -395,12 +290,20 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
dock_hotplug_event(dd, event, DOCK_CALL_HANDLER);
/*
- * Now make sure that an acpi_device is created for each dependent
- * device. That will cause scan handlers to be attached to device
- * objects or acpi_drivers to be stopped/started if they are present.
+ * Check if all devices have been enumerated already. If not, run
+ * acpi_bus_scan() for them and that will cause scan handlers to be
+ * attached to device objects or acpi_drivers to be stopped/started if
+ * they are present.
*/
- list_for_each_entry(dd, &ds->dependent_devices, list)
- dock_create_acpi_device(dd->handle);
+ list_for_each_entry(dd, &ds->dependent_devices, list) {
+ struct acpi_device *adev = dd->adev;
+
+ if (!acpi_device_enumerated(adev)) {
+ int ret = acpi_bus_scan(adev->handle);
+ if (ret)
+ dev_dbg(&adev->dev, "scan error %d\n", -ret);
+ }
+ }
}
static void dock_event(struct dock_station *ds, u32 event, int num)
@@ -504,71 +407,6 @@ static int dock_in_progress(struct dock_station *ds)
}
/**
- * register_hotplug_dock_device - register a hotplug function
- * @handle: the handle of the device
- * @ops: handlers to call after docking
- * @context: device specific data
- * @init: Optional initialization routine to run after registration
- * @release: Optional release routine to run on unregistration
- *
- * If a driver would like to perform a hotplug operation after a dock
- * event, they can register an acpi_notifiy_handler to be called by
- * the dock driver after _DCK is executed.
- */
-int register_hotplug_dock_device(acpi_handle handle,
- const struct acpi_dock_ops *ops, void *context,
- void (*init)(void *), void (*release)(void *))
-{
- struct dock_dependent_device *dd;
- struct dock_station *dock_station;
- int ret = -EINVAL;
-
- if (WARN_ON(!context))
- return -EINVAL;
-
- if (!dock_station_count)
- return -ENODEV;
-
- /*
- * make sure this handle is for a device dependent on the dock,
- * this would include the dock station itself
- */
- list_for_each_entry(dock_station, &dock_stations, sibling) {
- /*
- * An ATA bay can be in a dock and itself can be ejected
- * separately, so there are two 'dock stations' which need the
- * ops
- */
- dd = find_dock_dependent_device(dock_station, handle);
- if (dd && !dock_init_hotplug(dd, ops, context, init, release))
- ret = 0;
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
-
-/**
- * unregister_hotplug_dock_device - remove yourself from the hotplug list
- * @handle: the acpi handle of the device
- */
-void unregister_hotplug_dock_device(acpi_handle handle)
-{
- struct dock_dependent_device *dd;
- struct dock_station *dock_station;
-
- if (!dock_station_count)
- return;
-
- list_for_each_entry(dock_station, &dock_stations, sibling) {
- dd = find_dock_dependent_device(dock_station, handle);
- if (dd)
- dock_release_hotplug(dd);
- }
-}
-EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
-
-/**
* handle_eject_request - handle an undock request checking for error conditions
*
* Check to make sure the dock device is still present, then undock and
@@ -601,20 +439,23 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
}
/**
- * dock_notify - act upon an acpi dock notification
- * @ds: dock station
- * @event: the acpi event
+ * dock_notify - Handle ACPI dock notification.
+ * @adev: Dock station's ACPI device object.
+ * @event: Event code.
*
* If we are notified to dock, then check to see if the dock is
* present and then dock. Notify all drivers of the dock event,
* and then hotplug and devices that may need hotplugging.
*/
-static void dock_notify(struct dock_station *ds, u32 event)
+int dock_notify(struct acpi_device *adev, u32 event)
{
- acpi_handle handle = ds->handle;
- struct acpi_device *ad;
+ acpi_handle handle = adev->handle;
+ struct dock_station *ds = find_dock_station(handle);
int surprise_removal = 0;
+ if (!ds)
+ return -ENODEV;
+
/*
* According to acpi spec 3.0a, if a DEVICE_CHECK notification
* is sent and _DCK is present, it is assumed to mean an undock
@@ -635,7 +476,7 @@ static void dock_notify(struct dock_station *ds, u32 event)
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
- if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) {
+ if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
@@ -664,49 +505,8 @@ static void dock_notify(struct dock_station *ds, u32 event)
else
dock_event(ds, event, UNDOCK_EVENT);
break;
- default:
- acpi_handle_err(handle, "Unknown dock event %d\n", event);
}
-}
-
-static void acpi_dock_deferred_cb(void *data, u32 event)
-{
- acpi_scan_lock_acquire();
- dock_notify(data, event);
- acpi_scan_lock_release();
-}
-
-static void dock_notify_handler(acpi_handle handle, u32 event, void *data)
-{
- if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
- && event != ACPI_NOTIFY_EJECT_REQUEST)
- return;
-
- acpi_hotplug_execute(acpi_dock_deferred_cb, data, event);
-}
-
-/**
- * find_dock_devices - find devices on the dock station
- * @handle: the handle of the device we are examining
- * @lvl: unused
- * @context: the dock station private data
- * @rv: unused
- *
- * This function is called by acpi_walk_namespace. It will
- * check to see if an object has an _EJD method. If it does, then it
- * will see if it is dependent on the dock station.
- */
-static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
- void *context, void **rv)
-{
- struct dock_station *ds = context;
- acpi_handle ejd = NULL;
-
- acpi_bus_get_ejd(handle, &ejd);
- if (ejd == ds->handle)
- add_dock_dependent_device(ds, handle);
-
- return AE_OK;
+ return 0;
}
/*
@@ -715,13 +515,11 @@ static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
static ssize_t show_docked(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct acpi_device *tmp;
-
struct dock_station *dock_station = dev->platform_data;
+ struct acpi_device *adev = NULL;
- if (!acpi_bus_get_device(dock_station->handle, &tmp))
- return snprintf(buf, PAGE_SIZE, "1\n");
- return snprintf(buf, PAGE_SIZE, "0\n");
+ acpi_bus_get_device(dock_station->handle, &adev);
+ return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
}
static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
@@ -807,23 +605,28 @@ static struct attribute_group dock_attribute_group = {
};
/**
- * dock_add - add a new dock station
- * @handle: the dock station handle
+ * acpi_dock_add - Add a new dock station
+ * @adev: Dock station ACPI device object.
*
- * allocated and initialize a new dock station device. Find all devices
- * that are on the dock station, and register for dock event notifications.
+ * allocated and initialize a new dock station device.
*/
-static int __init dock_add(acpi_handle handle)
+void acpi_dock_add(struct acpi_device *adev)
{
struct dock_station *dock_station, ds = { NULL, };
+ struct platform_device_info pdevinfo;
+ acpi_handle handle = adev->handle;
struct platform_device *dd;
- acpi_status status;
int ret;
- dd = platform_device_register_data(NULL, "dock", dock_station_count,
- &ds, sizeof(ds));
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.name = "dock";
+ pdevinfo.id = dock_station_count;
+ pdevinfo.acpi_node.companion = adev;
+ pdevinfo.data = &ds;
+ pdevinfo.size_data = sizeof(ds);
+ dd = platform_device_register_full(&pdevinfo);
if (IS_ERR(dd))
- return PTR_ERR(dd);
+ return;
dock_station = dd->dev.platform_data;
@@ -841,75 +644,29 @@ static int __init dock_add(acpi_handle handle)
dock_station->flags |= DOCK_IS_DOCK;
if (acpi_ata_match(handle))
dock_station->flags |= DOCK_IS_ATA;
- if (is_battery(handle))
+ if (acpi_device_is_battery(adev))
dock_station->flags |= DOCK_IS_BAT;
ret = sysfs_create_group(&dd->dev.kobj, &dock_attribute_group);
if (ret)
goto err_unregister;
- /* Find dependent devices */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_dock_devices, NULL,
- dock_station, NULL);
-
/* add the dock station as a device dependent on itself */
- ret = add_dock_dependent_device(dock_station, handle);
+ ret = add_dock_dependent_device(dock_station, adev);
if (ret)
goto err_rmgroup;
- status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- dock_notify_handler, dock_station);
- if (ACPI_FAILURE(status)) {
- ret = -ENODEV;
- goto err_rmgroup;
- }
-
dock_station_count++;
list_add(&dock_station->sibling, &dock_stations);
- return 0;
+ adev->flags.is_dock_station = true;
+ dev_info(&adev->dev, "ACPI dock station (docks/bays count: %d)\n",
+ dock_station_count);
+ return;
err_rmgroup:
- remove_dock_dependent_devices(dock_station);
sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
+
err_unregister:
platform_device_unregister(dd);
acpi_handle_err(handle, "%s encountered error %d\n", __func__, ret);
- return ret;
-}
-
-/**
- * find_dock_and_bay - look for dock stations and bays
- * @handle: acpi handle of a device
- * @lvl: unused
- * @context: unused
- * @rv: unused
- *
- * This is called by acpi_walk_namespace to look for dock stations and bays.
- */
-static acpi_status __init
-find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- if (acpi_dock_match(handle) || is_ejectable_bay(handle))
- dock_add(handle);
-
- return AE_OK;
-}
-
-void __init acpi_dock_init(void)
-{
- if (acpi_disabled)
- return;
-
- /* look for dock stations and bays */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_dock_and_bay, NULL, NULL, NULL);
-
- if (!dock_station_count) {
- pr_info(PREFIX "No dock devices found.\n");
- return;
- }
-
- pr_info(PREFIX "%s: %d docks/bays found\n",
- ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ba5b56db9d27..d7d32c28829b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -39,10 +39,9 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <asm/io.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <asm/io.h>
#include "internal.h"
@@ -68,6 +67,8 @@ enum ec_command {
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
+#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
+ * when trying to clear the EC */
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
@@ -91,10 +92,6 @@ static unsigned int ec_storm_threshold __read_mostly = 8;
module_param(ec_storm_threshold, uint, 0644);
MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
-/* If we find an EC via the ECDT, we need to keep a ptr to its context */
-/* External interfaces use first EC only, so remember */
-typedef int (*acpi_ec_query_func) (void *data);
-
struct acpi_ec_query_handler {
struct list_head node;
acpi_ec_query_func func;
@@ -121,6 +118,7 @@ EXPORT_SYMBOL(first_ec);
static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
+static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
/* --------------------------------------------------------------------------
Transaction Management
@@ -387,27 +385,6 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
return acpi_ec_transaction(ec, &t);
}
-/*
- * Externally callable EC access functions. For now, assume 1 EC only
- */
-int ec_burst_enable(void)
-{
- if (!first_ec)
- return -ENODEV;
- return acpi_ec_burst_enable(first_ec);
-}
-
-EXPORT_SYMBOL(ec_burst_enable);
-
-int ec_burst_disable(void)
-{
- if (!first_ec)
- return -ENODEV;
- return acpi_ec_burst_disable(first_ec);
-}
-
-EXPORT_SYMBOL(ec_burst_disable);
-
int ec_read(u8 addr, u8 *val)
{
int err;
@@ -466,6 +443,29 @@ acpi_handle ec_get_handle(void)
EXPORT_SYMBOL(ec_get_handle);
+static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
+
+/*
+ * Clears stale _Q events that might have accumulated in the EC.
+ * Run with locked ec mutex.
+ */
+static void acpi_ec_clear(struct acpi_ec *ec)
+{
+ int i, status;
+ u8 value = 0;
+
+ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
+ status = acpi_ec_query_unlocked(ec, &value);
+ if (status || !value)
+ break;
+ }
+
+ if (unlikely(i == ACPI_EC_CLEAR_MAX))
+ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
+ else
+ pr_info("%d stale EC events cleared\n", i);
+}
+
void acpi_ec_block_transactions(void)
{
struct acpi_ec *ec = first_ec;
@@ -489,6 +489,10 @@ void acpi_ec_unblock_transactions(void)
mutex_lock(&ec->mutex);
/* Allow transactions to be carried out again */
clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
+
+ if (EC_FLAGS_CLEAR_ON_RESUME)
+ acpi_ec_clear(ec);
+
mutex_unlock(&ec->mutex);
}
@@ -779,9 +783,9 @@ static int ec_install_handlers(struct acpi_ec *ec)
pr_err("Fail in evaluating the _REG object"
" of EC device. Broken bios is suspected.\n");
} else {
+ acpi_disable_gpe(NULL, ec->gpe);
acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler);
- acpi_disable_gpe(NULL, ec->gpe);
return -ENODEV;
}
}
@@ -847,6 +851,13 @@ static int acpi_ec_add(struct acpi_device *device)
/* EC is fully operational, allow queries */
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+
+ /* Clear stale _Q events if hardware might require that */
+ if (EC_FLAGS_CLEAR_ON_RESUME) {
+ mutex_lock(&ec->mutex);
+ acpi_ec_clear(ec);
+ mutex_unlock(&ec->mutex);
+ }
return ret;
}
@@ -948,6 +959,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
return 0;
}
+/*
+ * On some hardware it is necessary to clear events accumulated by the EC during
+ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
+ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
+ *
+ * Ideally, the EC should also be instructed NOT to accumulate events during
+ * sleep (which Windows seems to do somehow), but the interface to control this
+ * behaviour is not known at this time.
+ *
+ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
+ * however it is very likely that other Samsung models are affected.
+ *
+ * On systems which don't accumulate _Q events during sleep, this extra check
+ * should be harmless.
+ */
+static int ec_clear_on_resume(const struct dmi_system_id *id)
+{
+ pr_debug("Detected system needing EC poll on resume.\n");
+ EC_FLAGS_CLEAR_ON_RESUME = 1;
+ return 0;
+}
+
static struct dmi_system_id ec_dmi_table[] __initdata = {
{
ec_skip_dsdt_scan, "Compal JFL92", {
@@ -991,6 +1026,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
+ {
+ ec_clear_on_resume, "Samsung hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
{},
};
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 4e7b798900f2..b4c216bab22b 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -105,7 +105,7 @@ static const struct file_operations acpi_ec_io_ops = {
.llseek = default_llseek,
};
-int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
+static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
{
struct dentry *dev_dir;
char name[64];
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index cae3b387b867..ef2d730734dc 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/gfp.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index ba3da88cee45..8acf53e62966 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -29,8 +29,7 @@
#include <linux/types.h>
#include <asm/uaccess.h>
#include <linux/thermal.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#define PREFIX "ACPI: "
@@ -56,8 +55,16 @@ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev);
static int acpi_fan_resume(struct device *dev);
+static struct dev_pm_ops acpi_fan_pm = {
+ .resume = acpi_fan_resume,
+ .freeze = acpi_fan_suspend,
+ .thaw = acpi_fan_resume,
+ .restore = acpi_fan_resume,
+};
+#define FAN_PM_OPS_PTR (&acpi_fan_pm)
+#else
+#define FAN_PM_OPS_PTR NULL
#endif
-static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
static struct acpi_driver acpi_fan_driver = {
.name = "fan",
@@ -67,7 +74,7 @@ static struct acpi_driver acpi_fan_driver = {
.add = acpi_fan_add,
.remove = acpi_fan_remove,
},
- .drv.pm = &acpi_fan_pm,
+ .drv.pm = FAN_PM_OPS_PTR,
};
/* thermal cooling device callbacks */
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index a22a295edb69..f774c65ecb8b 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -37,7 +37,7 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return -ENODEV;
- if (type && type->match && type->find_device) {
+ if (type && type->match && type->find_companion) {
down_write(&bus_type_sem);
list_add_tail(&type->list, &bus_type_list);
up_write(&bus_type_sem);
@@ -82,109 +82,74 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
#define FIND_CHILD_MIN_SCORE 1
#define FIND_CHILD_MAX_SCORE 2
-static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
- void *not_used, void **ret_p)
-{
- struct acpi_device *adev = NULL;
-
- acpi_bus_get_device(handle, &adev);
- if (adev) {
- *ret_p = handle;
- return AE_CTRL_TERMINATE;
- }
- return AE_OK;
-}
-
-static int do_find_child_checks(acpi_handle handle, bool is_bridge)
+static int find_child_checks(struct acpi_device *adev, bool check_children)
{
bool sta_present = true;
unsigned long long sta;
acpi_status status;
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+ status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
if (status == AE_NOT_FOUND)
sta_present = false;
else if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
return -ENODEV;
- if (is_bridge) {
- void *test = NULL;
+ if (check_children && list_empty(&adev->children))
+ return -ENODEV;
- /* Check if this object has at least one child device. */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
- acpi_dev_present, NULL, NULL, &test);
- if (!test)
- return -ENODEV;
- }
return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
}
-struct find_child_context {
- u64 addr;
- bool is_bridge;
- acpi_handle ret;
- int ret_score;
-};
-
-static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
- void *data, void **not_used)
-{
- struct find_child_context *context = data;
- unsigned long long addr;
- acpi_status status;
- int score;
-
- status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
- if (ACPI_FAILURE(status) || addr != context->addr)
- return AE_OK;
-
- if (!context->ret) {
- /* This is the first matching object. Save its handle. */
- context->ret = handle;
- return AE_OK;
- }
- /*
- * There is more than one matching object with the same _ADR value.
- * That really is unexpected, so we are kind of beyond the scope of the
- * spec here. We have to choose which one to return, though.
- *
- * First, check if the previously found object is good enough and return
- * its handle if so. Second, check the same for the object that we've
- * just found.
- */
- if (!context->ret_score) {
- score = do_find_child_checks(context->ret, context->is_bridge);
- if (score == FIND_CHILD_MAX_SCORE)
- return AE_CTRL_TERMINATE;
- else
- context->ret_score = score;
- }
- score = do_find_child_checks(handle, context->is_bridge);
- if (score == FIND_CHILD_MAX_SCORE) {
- context->ret = handle;
- return AE_CTRL_TERMINATE;
- } else if (score > context->ret_score) {
- context->ret = handle;
- context->ret_score = score;
- }
- return AE_OK;
-}
-
-acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
+struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
+ u64 address, bool check_children)
{
- if (parent) {
- struct find_child_context context = {
- .addr = addr,
- .is_bridge = is_bridge,
- };
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
- NULL, &context, NULL);
- return context.ret;
+ struct acpi_device *adev, *ret = NULL;
+ int ret_score = 0;
+
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry(adev, &parent->children, node) {
+ unsigned long long addr;
+ acpi_status status;
+ int score;
+
+ status = acpi_evaluate_integer(adev->handle, METHOD_NAME__ADR,
+ NULL, &addr);
+ if (ACPI_FAILURE(status) || addr != address)
+ continue;
+
+ if (!ret) {
+ /* This is the first matching object. Save it. */
+ ret = adev;
+ continue;
+ }
+ /*
+ * There is more than one matching device object with the same
+ * _ADR value. That really is unexpected, so we are kind of
+ * beyond the scope of the spec here. We have to choose which
+ * one to return, though.
+ *
+ * First, check if the previously found object is good enough
+ * and return it if so. Second, do the same for the object that
+ * we've just found.
+ */
+ if (!ret_score) {
+ ret_score = find_child_checks(ret, check_children);
+ if (ret_score == FIND_CHILD_MAX_SCORE)
+ return ret;
+ }
+ score = find_child_checks(adev, check_children);
+ if (score == FIND_CHILD_MAX_SCORE) {
+ return adev;
+ } else if (score > ret_score) {
+ ret = adev;
+ ret_score = score;
+ }
}
- return NULL;
+ return ret;
}
-EXPORT_SYMBOL_GPL(acpi_find_child);
+EXPORT_SYMBOL_GPL(acpi_find_child_device);
static void acpi_physnode_link_name(char *buf, unsigned int node_id)
{
@@ -195,9 +160,8 @@ static void acpi_physnode_link_name(char *buf, unsigned int node_id)
strcpy(buf, PHYSICAL_NODE_STRING);
}
-int acpi_bind_one(struct device *dev, acpi_handle handle)
+int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
{
- struct acpi_device *acpi_dev = NULL;
struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
struct list_head *physnode_list;
@@ -205,14 +169,12 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
int retval = -EINVAL;
if (ACPI_COMPANION(dev)) {
- if (handle) {
+ if (acpi_dev) {
dev_warn(dev, "ACPI companion already set\n");
return -EINVAL;
} else {
acpi_dev = ACPI_COMPANION(dev);
}
- } else {
- acpi_bus_get_device(handle, &acpi_dev);
}
if (!acpi_dev)
return -EINVAL;
@@ -322,35 +284,34 @@ int acpi_unbind_one(struct device *dev)
}
EXPORT_SYMBOL_GPL(acpi_unbind_one);
-void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr)
-{
- struct acpi_device *adev;
-
- if (!acpi_bus_get_device(acpi_get_child(parent, addr), &adev))
- ACPI_COMPANION_SET(dev, adev);
-}
-EXPORT_SYMBOL_GPL(acpi_preset_companion);
-
static int acpi_platform_notify(struct device *dev)
{
struct acpi_bus_type *type = acpi_get_bus_type(dev);
- acpi_handle handle;
+ struct acpi_device *adev;
int ret;
ret = acpi_bind_one(dev, NULL);
if (ret && type) {
- ret = type->find_device(dev, &handle);
- if (ret) {
+ struct acpi_device *adev;
+
+ adev = type->find_companion(dev);
+ if (!adev) {
DBG("Unable to get handle for %s\n", dev_name(dev));
+ ret = -ENODEV;
goto out;
}
- ret = acpi_bind_one(dev, handle);
+ ret = acpi_bind_one(dev, adev);
if (ret)
goto out;
}
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ goto out;
if (type && type->setup)
type->setup(dev);
+ else if (adev->handler && adev->handler->bind)
+ adev->handler->bind(dev);
out:
#if ACPI_GLUE_DEBUG
@@ -369,11 +330,17 @@ static int acpi_platform_notify(struct device *dev)
static int acpi_platform_notify_remove(struct device *dev)
{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_bus_type *type;
+ if (!adev)
+ return 0;
+
type = acpi_get_bus_type(dev);
if (type && type->cleanup)
type->cleanup(dev);
+ else if (adev->handler && adev->handler->unbind)
+ adev->handler->unbind(dev);
acpi_unbind_one(dev);
return 0;
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index 13b1d39d7cdf..aafe3ca829c2 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -25,8 +25,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#include <acpi/hed.h>
static struct acpi_device_id acpi_hed_ids[] = {
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index a29739c0ba79..957391306cbf 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -28,7 +28,6 @@ int init_acpi_device_notify(void);
int acpi_scan_init(void);
void acpi_pci_root_init(void);
void acpi_pci_link_init(void);
-void acpi_pci_root_hp_init(void);
void acpi_processor_init(void);
void acpi_platform_init(void);
int acpi_sysfs_init(void);
@@ -38,9 +37,15 @@ void acpi_container_init(void);
static inline void acpi_container_init(void) {}
#endif
#ifdef CONFIG_ACPI_DOCK
-void acpi_dock_init(void);
+void register_dock_dependent_device(struct acpi_device *adev,
+ acpi_handle dshandle);
+int dock_notify(struct acpi_device *adev, u32 event);
+void acpi_dock_add(struct acpi_device *adev);
#else
-static inline void acpi_dock_init(void) {}
+static inline void register_dock_dependent_device(struct acpi_device *adev,
+ acpi_handle dshandle) {}
+static inline int dock_notify(struct acpi_device *adev, u32 event) { return -ENODEV; }
+static inline void acpi_dock_add(struct acpi_device *adev) {}
#endif
#ifdef CONFIG_ACPI_HOTPLUG_MEMORY
void acpi_memory_hotplug_init(void);
@@ -73,6 +78,11 @@ void acpi_lpss_init(void);
static inline void acpi_lpss_init(void) {}
#endif
+acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src);
+bool acpi_queue_hotplug_work(struct work_struct *work);
+void acpi_device_hotplug(struct acpi_device *adev, u32 src);
+bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
+
/* --------------------------------------------------------------------------
Device Node Initialization / Removal
-------------------------------------------------------------------------- */
@@ -85,9 +95,10 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, unsigned long long sta);
void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
-int acpi_bind_one(struct device *dev, acpi_handle handle);
+int acpi_bind_one(struct device *dev, struct acpi_device *adev);
int acpi_unbind_one(struct device *dev);
-void acpi_bus_device_eject(void *data, u32 ost_src);
+bool acpi_device_is_present(struct acpi_device *adev);
+bool acpi_device_is_battery(struct acpi_device *adev);
/* --------------------------------------------------------------------------
Power Resource
@@ -105,6 +116,8 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
+int acpi_device_update_power(struct acpi_device *device, int *state_p);
+
int acpi_wakeup_device_init(void);
void acpi_early_processor_set_pdc(void);
@@ -127,12 +140,21 @@ struct acpi_ec {
extern struct acpi_ec *first_ec;
+/* If we find an EC via the ECDT, we need to keep a ptr to its context */
+/* External interfaces use first EC only, so remember */
+typedef int (*acpi_ec_query_func) (void *data);
+
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
int acpi_boot_ec_enable(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
void acpi_ec_unblock_transactions_early(void);
+int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ acpi_handle handle, acpi_ec_query_func func,
+ void *data);
+void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
+
/*--------------------------------------------------------------------------
Suspend/Resume
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index a2343a1d9e0b..24b5476449a1 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -29,7 +29,6 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/numa.h>
-#include <acpi/acpi_bus.h>
#define PREFIX "ACPI: "
@@ -61,7 +60,7 @@ int node_to_pxm(int node)
return node_to_pxm_map[node];
}
-void __acpi_map_pxm_to_node(int pxm, int node)
+static void __acpi_map_pxm_to_node(int pxm, int node)
{
if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm])
pxm_to_node_map[pxm] = node;
@@ -194,7 +193,7 @@ static int __init acpi_parse_slit(struct acpi_table_header *table)
return 0;
}
-void __init __attribute__ ((weak))
+void __init __weak
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
{
printk(KERN_WARNING PREFIX
@@ -315,7 +314,7 @@ int __init acpi_numa_init(void)
return 0;
}
-int acpi_get_pxm(acpi_handle h)
+static int acpi_get_pxm(acpi_handle h)
{
unsigned long long pxm;
acpi_status status;
@@ -332,14 +331,14 @@ int acpi_get_pxm(acpi_handle h)
return -1;
}
-int acpi_get_node(acpi_handle *handle)
+int acpi_get_node(acpi_handle handle)
{
- int pxm, node = NUMA_NO_NODE;
+ int pxm;
pxm = acpi_get_pxm(handle);
- if (pxm >= 0 && pxm < MAX_PXM_DOMAINS)
- node = acpi_map_pxm_to_node(pxm);
+ if (pxm < 0 || pxm >= MAX_PXM_DOMAINS)
+ return NUMA_NO_NODE;
- return node;
+ return acpi_map_pxm_to_node(pxm);
}
EXPORT_SYMBOL(acpi_get_node);
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 386a9fe497b4..de4fe03873c5 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -12,7 +12,8 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
+
+#include "internal.h"
/* ACPI NVS regions, APEI may use it */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 54a20ff4b864..f7fd72ac69cf 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -39,7 +39,6 @@
#include <linux/workqueue.h>
#include <linux/nmi.h>
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include <linux/efi.h>
#include <linux/ioport.h>
#include <linux/list.h>
@@ -49,14 +48,11 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/processor.h>
#include "internal.h"
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("osl");
-#define PREFIX "ACPI: "
+
struct acpi_os_dpc {
acpi_osd_exec_callback function;
void *context;
@@ -544,7 +540,7 @@ static u64 acpi_tables_addr;
static int all_tables_size;
/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
-u8 __init acpi_table_checksum(u8 *buffer, u32 length)
+static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
{
u8 sum = 0;
u8 *end = buffer + length;
@@ -1172,8 +1168,7 @@ void acpi_os_wait_events_complete(void)
struct acpi_hp_work {
struct work_struct work;
- acpi_hp_callback func;
- void *data;
+ struct acpi_device *adev;
u32 src;
};
@@ -1182,25 +1177,24 @@ static void acpi_hotplug_work_fn(struct work_struct *work)
struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
acpi_os_wait_events_complete();
- hpw->func(hpw->data, hpw->src);
+ acpi_device_hotplug(hpw->adev, hpw->src);
kfree(hpw);
}
-acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src)
+acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
{
struct acpi_hp_work *hpw;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Scheduling function [%p(%p, %u)] for deferred execution.\n",
- func, data, src));
+ "Scheduling hotplug event (%p, %u) for deferred execution.\n",
+ adev, src));
hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
if (!hpw)
return AE_NO_MEMORY;
INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
- hpw->func = func;
- hpw->data = data;
+ hpw->adev = adev;
hpw->src = src;
/*
* We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
@@ -1215,6 +1209,10 @@ acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src)
return AE_OK;
}
+bool acpi_queue_hotplug_work(struct work_struct *work)
+{
+ return queue_work(kacpi_hotplug_wq, work);
+}
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
@@ -1282,7 +1280,7 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
jiffies = MAX_SCHEDULE_TIMEOUT;
else
jiffies = msecs_to_jiffies(timeout);
-
+
ret = down_timeout(sem, jiffies);
if (ret)
status = AE_TIME;
@@ -1539,17 +1537,21 @@ static int __init osi_setup(char *str)
__setup("acpi_osi=", osi_setup);
-/* enable serialization to combat AE_ALREADY_EXISTS errors */
-static int __init acpi_serialize_setup(char *str)
+/*
+ * Disable the auto-serialization of named objects creation methods.
+ *
+ * This feature is enabled by default. It marks the AML control methods
+ * that contain the opcodes to create named objects as "Serialized".
+ */
+static int __init acpi_no_auto_serialize_setup(char *str)
{
- printk(KERN_INFO PREFIX "serialize enabled\n");
-
- acpi_gbl_all_methods_serialized = TRUE;
+ acpi_gbl_auto_serialize_methods = FALSE;
+ pr_info("ACPI: auto-serialization disabled\n");
return 1;
}
-__setup("acpi_serialize", acpi_serialize_setup);
+__setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
/* Check of resource interference between native drivers and ACPI
* OperationRegions (SystemIO and System Memory only).
@@ -1780,6 +1782,17 @@ static int __init acpi_no_auto_ssdt_setup(char *s)
__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
+static int __init acpi_disable_return_repair(char *s)
+{
+ printk(KERN_NOTICE PREFIX
+ "ACPI: Predefined validation mechanism disabled\n");
+ acpi_gbl_disable_auto_repair = TRUE;
+
+ return 1;
+}
+
+__setup("acpica_no_return_repair", acpi_disable_return_repair);
+
acpi_status __init acpi_os_initialize(void)
{
acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
@@ -1794,7 +1807,7 @@ acpi_status __init acpi_os_initialize1(void)
{
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
- kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
+ kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 41c5e1b799ef..9c62340c2360 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -37,8 +37,6 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#define PREFIX "ACPI: "
@@ -372,6 +370,30 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
return NULL;
}
+#if IS_ENABLED(CONFIG_ISA) || IS_ENABLED(CONFIG_EISA)
+static int acpi_isa_register_gsi(struct pci_dev *dev)
+{
+ u32 dev_gsi;
+
+ /* Interrupt Line values above 0xF are forbidden */
+ if (dev->irq > 0 && (dev->irq <= 0xF) &&
+ (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
+ dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
+ pin_name(dev->pin), dev->irq);
+ acpi_register_gsi(&dev->dev, dev_gsi,
+ ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_LOW);
+ return 0;
+ }
+ return -EINVAL;
+}
+#else
+static inline int acpi_isa_register_gsi(struct pci_dev *dev)
+{
+ return -ENODEV;
+}
+#endif
+
int acpi_pci_irq_enable(struct pci_dev *dev)
{
struct acpi_prt_entry *entry;
@@ -418,20 +440,11 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
* driver reported one, then use it. Exit in any case.
*/
if (gsi < 0) {
- u32 dev_gsi;
- /* Interrupt Line values above 0xF are forbidden */
- if (dev->irq > 0 && (dev->irq <= 0xF) &&
- (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
- dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
- pin_name(pin), dev->irq);
- acpi_register_gsi(&dev->dev, dev_gsi,
- ACPI_LEVEL_SENSITIVE,
- ACPI_ACTIVE_LOW);
- } else {
+ if (acpi_isa_register_gsi(dev))
dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
pin_name(pin));
- }
+ kfree(entry);
return 0;
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 2652a614deeb..cfd7581cc19f 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -39,11 +39,9 @@
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
-#define PREFIX "ACPI: "
+#include "internal.h"
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_link");
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 5b01bd6d5ea0..d388f13d48b4 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -35,14 +35,10 @@
#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/apei.h>
+#include <acpi/apei.h> /* for acpi_hest_init() */
#include "internal.h"
-#define PREFIX "ACPI: "
-
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_root");
#define ACPI_PCI_ROOT_CLASS "pci_bridge"
@@ -51,6 +47,12 @@ static int acpi_pci_root_add(struct acpi_device *device,
const struct acpi_device_id *not_used);
static void acpi_pci_root_remove(struct acpi_device *device);
+static int acpi_pci_root_scan_dependent(struct acpi_device *adev)
+{
+ acpiphp_check_host_bridge(adev);
+ return 0;
+}
+
#define ACPI_PCIE_REQ_SUPPORT (OSC_PCI_EXT_CONFIG_SUPPORT \
| OSC_PCI_ASPM_SUPPORT \
| OSC_PCI_CLOCK_PM_SUPPORT \
@@ -66,7 +68,8 @@ static struct acpi_scan_handler pci_root_handler = {
.attach = acpi_pci_root_add,
.detach = acpi_pci_root_remove,
.hotplug = {
- .ignore = true,
+ .enabled = true,
+ .scan_dependent = acpi_pci_root_scan_dependent,
},
};
@@ -630,116 +633,9 @@ static void acpi_pci_root_remove(struct acpi_device *device)
void __init acpi_pci_root_init(void)
{
acpi_hest_init();
-
- if (!acpi_pci_disabled) {
- pci_acpi_crs_quirks();
- acpi_scan_add_handler(&pci_root_handler);
- }
-}
-/* Support root bridge hotplug */
-
-static void handle_root_bridge_insertion(acpi_handle handle)
-{
- struct acpi_device *device;
-
- if (!acpi_bus_get_device(handle, &device)) {
- dev_printk(KERN_DEBUG, &device->dev,
- "acpi device already exists; ignoring notify\n");
- return;
- }
-
- if (acpi_bus_scan(handle))
- acpi_handle_err(handle, "cannot add bridge to acpi list\n");
-}
-
-static void hotplug_event_root(void *data, u32 type)
-{
- acpi_handle handle = data;
- struct acpi_pci_root *root;
-
- acpi_scan_lock_acquire();
-
- root = acpi_pci_find_root(handle);
-
- switch (type) {
- case ACPI_NOTIFY_BUS_CHECK:
- /* bus enumerate */
- acpi_handle_printk(KERN_DEBUG, handle,
- "Bus check notify on %s\n", __func__);
- if (root)
- acpiphp_check_host_bridge(handle);
- else
- handle_root_bridge_insertion(handle);
-
- break;
-
- case ACPI_NOTIFY_DEVICE_CHECK:
- /* device check */
- acpi_handle_printk(KERN_DEBUG, handle,
- "Device check notify on %s\n", __func__);
- if (!root)
- handle_root_bridge_insertion(handle);
- break;
-
- case ACPI_NOTIFY_EJECT_REQUEST:
- /* request device eject */
- acpi_handle_printk(KERN_DEBUG, handle,
- "Device eject notify on %s\n", __func__);
- if (!root)
- break;
-
- get_device(&root->device->dev);
-
- acpi_scan_lock_release();
-
- acpi_bus_device_eject(root->device, ACPI_NOTIFY_EJECT_REQUEST);
+ if (acpi_pci_disabled)
return;
- default:
- acpi_handle_warn(handle,
- "notify_handler: unknown event type 0x%x\n",
- type);
- break;
- }
-
- acpi_scan_lock_release();
-}
-
-static void handle_hotplug_event_root(acpi_handle handle, u32 type,
- void *context)
-{
- acpi_hotplug_execute(hotplug_event_root, handle, type);
-}
-
-static acpi_status __init
-find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- acpi_status status;
- int *count = (int *)context;
-
- if (!acpi_is_root_bridge(handle))
- return AE_OK;
-
- (*count)++;
-
- status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_root, NULL);
- if (ACPI_FAILURE(status))
- acpi_handle_printk(KERN_DEBUG, handle,
- "notify handler is not installed, exit status: %u\n",
- (unsigned int)status);
- else
- acpi_handle_printk(KERN_DEBUG, handle,
- "notify handler is installed\n");
-
- return AE_OK;
-}
-
-void __init acpi_pci_root_hp_init(void)
-{
- int num = 0;
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_root_bridges, NULL, &num, NULL);
- printk(KERN_DEBUG "Found %d acpi root devices\n", num);
+ pci_acpi_crs_quirks();
+ acpi_scan_add_handler_with_hotplug(&pci_root_handler, "pci_root");
}
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index d678a180ca2a..139d9e479370 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -35,6 +35,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/pci-acpi.h>
static bool debug;
static int check_sta_before_sun;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index c2ad391d8041..e0bcfb642b52 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -42,13 +42,10 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include "sleep.h"
#include "internal.h"
-#define PREFIX "ACPI: "
-
#define _COMPONENT ACPI_POWER_COMPONENT
ACPI_MODULE_NAME("power");
#define ACPI_POWER_CLASS "power_resource"
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 6a5b152ad4d0..75c28eae8860 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -3,12 +3,11 @@
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/bcd.h>
+#include <linux/acpi.h>
#include <asm/uaccess.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
#include "sleep.h"
+#include "internal.h"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
@@ -61,7 +60,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%c%-8s %s:%s\n",
dev->wakeup.flags.run_wake ? '*' : ' ',
(device_may_wakeup(&dev->dev) ||
- (ldev && device_may_wakeup(ldev))) ?
+ device_may_wakeup(ldev)) ?
"enabled" : "disabled",
ldev->bus ? ldev->bus->name :
"no-bus", dev_name(ldev));
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index b3171f30b319..71e2065639a6 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -10,34 +10,14 @@
#include <linux/export.h>
#include <linux/dmi.h>
#include <linux/slab.h>
-
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <acpi/processor.h>
#include "internal.h"
-#define PREFIX "ACPI: "
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_core");
-static int __init set_no_mwait(const struct dmi_system_id *id)
-{
- printk(KERN_NOTICE PREFIX "%s detected - "
- "disabling mwait for CPU C-states\n", id->ident);
- boot_option_idle_override = IDLE_NOMWAIT;
- return 0;
-}
-
-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
- {
- set_no_mwait, "Extensa 5220", {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
- DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
- {},
-};
-
static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
@@ -45,13 +25,13 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
(struct acpi_madt_local_apic *)entry;
if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
- return 0;
+ return -ENODEV;
if (lapic->processor_id != acpi_id)
- return 0;
+ return -EINVAL;
*apic_id = lapic->id;
- return 1;
+ return 0;
}
static int map_x2apic_id(struct acpi_subtable_header *entry,
@@ -61,14 +41,14 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
(struct acpi_madt_local_x2apic *)entry;
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
- return 0;
+ return -ENODEV;
if (device_declaration && (apic->uid == acpi_id)) {
*apic_id = apic->local_apic_id;
- return 1;
+ return 0;
}
- return 0;
+ return -EINVAL;
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
@@ -78,16 +58,16 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
(struct acpi_madt_local_sapic *)entry;
if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
- return 0;
+ return -ENODEV;
if (device_declaration) {
if ((entry->length < 16) || (lsapic->uid != acpi_id))
- return 0;
+ return -EINVAL;
} else if (lsapic->processor_id != acpi_id)
- return 0;
+ return -EINVAL;
*apic_id = (lsapic->id << 8) | lsapic->eid;
- return 1;
+ return 0;
}
static int map_madt_entry(int type, u32 acpi_id)
@@ -117,13 +97,13 @@ static int map_madt_entry(int type, u32 acpi_id)
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
- if (map_lapic_id(header, acpi_id, &apic_id))
+ if (!map_lapic_id(header, acpi_id, &apic_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
- if (map_x2apic_id(header, type, acpi_id, &apic_id))
+ if (!map_x2apic_id(header, type, acpi_id, &apic_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
- if (map_lsapic_id(header, type, acpi_id, &apic_id))
+ if (!map_lsapic_id(header, type, acpi_id, &apic_id))
break;
}
entry += header->length;
@@ -324,7 +304,7 @@ static struct acpi_object_list *acpi_processor_alloc_pdc(void)
* _PDC is required for a BIOS-OS handshake for most of the newer
* ACPI processor features.
*/
-static int
+static acpi_status
acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
{
acpi_status status = AE_OK;
@@ -380,16 +360,43 @@ early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
-void __init acpi_early_processor_set_pdc(void)
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+static int __init set_no_mwait(const struct dmi_system_id *id)
+{
+ pr_notice(PREFIX "%s detected - disabling mwait for CPU C-states\n",
+ id->ident);
+ boot_option_idle_override = IDLE_NOMWAIT;
+ return 0;
+}
+
+static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
+ {
+ set_no_mwait, "Extensa 5220", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
+ {},
+};
+
+static void __init processor_dmi_check(void)
{
/*
* Check whether the system is DMI table. If yes, OSPM
* should not use mwait for CPU-states.
*/
dmi_check_system(processor_idle_dmi_table);
+}
+#else
+static inline void processor_dmi_check(void) {}
+#endif
+
+void __init acpi_early_processor_set_pdc(void)
+{
+ processor_dmi_check();
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
early_init_pdc, NULL, NULL, NULL);
- acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 146ab7e2b81d..7f70f3182d50 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -41,8 +41,6 @@
#include "internal.h"
-#define PREFIX "ACPI: "
-
#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
#define ACPI_PROCESSOR_NOTIFY_POWER 0x81
#define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
@@ -224,9 +222,9 @@ static int __acpi_processor_start(struct acpi_device *device)
static int acpi_processor_start(struct device *dev)
{
- struct acpi_device *device;
+ struct acpi_device *device = ACPI_COMPANION(dev);
- if (acpi_bus_get_device(ACPI_HANDLE(dev), &device))
+ if (!device)
return -ENODEV;
return __acpi_processor_start(device);
@@ -234,10 +232,10 @@ static int acpi_processor_start(struct device *dev)
static int acpi_processor_stop(struct device *dev)
{
- struct acpi_device *device;
+ struct acpi_device *device = ACPI_COMPANION(dev);
struct acpi_processor *pr;
- if (acpi_bus_get_device(ACPI_HANDLE(dev), &device))
+ if (!device)
return 0;
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f90c56c8379e..3dca36d4ad26 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -35,6 +35,7 @@
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
#include <linux/syscore_ops.h>
+#include <acpi/processor.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
@@ -46,9 +47,6 @@
#include <asm/apic.h>
#endif
-#include <acpi/acpi_bus.h>
-#include <acpi/processor.h>
-
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
@@ -213,7 +211,7 @@ static int acpi_processor_suspend(void)
static void acpi_processor_resume(void)
{
- u32 resumed_bm_rld;
+ u32 resumed_bm_rld = 0;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
if (resumed_bm_rld == saved_bm_rld)
@@ -598,7 +596,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
case ACPI_STATE_C2:
if (!cx->address)
break;
- cx->valid = 1;
+ cx->valid = 1;
break;
case ACPI_STATE_C3:
@@ -780,6 +778,13 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
+#ifdef CONFIG_HOTPLUG_CPU
+ if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
+ !pr->flags.has_cst &&
+ !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
+ return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
+#endif
+
/*
* Must be done before busmaster disable as we might need to
* access HPET !
@@ -821,6 +826,13 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
+#ifdef CONFIG_HOTPLUG_CPU
+ if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
+ !pr->flags.has_cst &&
+ !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
+ return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
+#endif
+
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
if (drv->safe_state_index >= 0) {
return drv->states[drv->safe_state_index].enter(dev,
@@ -917,12 +929,6 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
if (!cx->valid)
continue;
-#ifdef CONFIG_HOTPLUG_CPU
- if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst &&
- !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
- continue;
-#endif
per_cpu(acpi_cstate[count], dev->cpu) = cx;
count++;
@@ -930,8 +936,6 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
break;
}
- dev->state_count = count;
-
if (!count)
return -EINVAL;
@@ -972,13 +976,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
if (!cx->valid)
continue;
-#ifdef CONFIG_HOTPLUG_CPU
- if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
- !pr->flags.has_cst &&
- !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
- continue;
-#endif
-
state = &drv->states[count];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 60a7c28fc167..cfc8aba72f86 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -31,15 +31,12 @@
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
-
+#include <linux/acpi.h>
+#include <acpi/processor.h>
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
#endif
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/processor.h>
-
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
@@ -159,17 +156,9 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
*/
static void acpi_processor_ppc_ost(acpi_handle handle, int status)
{
- union acpi_object params[2] = {
- {.type = ACPI_TYPE_INTEGER,},
- {.type = ACPI_TYPE_INTEGER,},
- };
- struct acpi_object_list arg_list = {2, params};
-
- if (acpi_has_method(handle, "_OST")) {
- params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE;
- params[1].integer.value = status;
- acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
- }
+ if (acpi_has_method(handle, "_OST"))
+ acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
+ status, NULL);
}
int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index d1d2e7fb5b30..e003663b2f8e 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -30,12 +30,9 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
-
-#include <asm/uaccess.h>
-
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#include <acpi/processor.h>
-#include <acpi/acpi_drivers.h>
+#include <asm/uaccess.h>
#define PREFIX "ACPI: "
@@ -186,14 +183,14 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
#endif
-/* thermal coolign device callbacks */
+/* thermal cooling device callbacks */
static int acpi_processor_max_state(struct acpi_processor *pr)
{
int max_state = 0;
/*
* There exists four states according to
- * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
+ * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
*/
max_state += cpufreq_get_max_state(pr->id);
if (pr->flags.throttling)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index e7dd2c1fee79..84243c32e29c 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -32,14 +32,11 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
-
+#include <linux/acpi.h>
+#include <acpi/processor.h>
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/processor.h>
-
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
@@ -59,6 +56,12 @@ struct throttling_tstate {
int target_state; /* target T-state */
};
+struct acpi_processor_throttling_arg {
+ struct acpi_processor *pr;
+ int target_state;
+ bool force;
+};
+
#define THROTTLING_PRECHANGE (1)
#define THROTTLING_POSTCHANGE (2)
@@ -1063,16 +1066,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
return 0;
}
+static long acpi_processor_throttling_fn(void *data)
+{
+ struct acpi_processor_throttling_arg *arg = data;
+ struct acpi_processor *pr = arg->pr;
+
+ return pr->throttling.acpi_processor_set_throttling(pr,
+ arg->target_state, arg->force);
+}
+
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force)
{
- cpumask_var_t saved_mask;
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
struct acpi_processor_throttling *p_throttling;
+ struct acpi_processor_throttling_arg arg;
struct throttling_tstate t_state;
- cpumask_var_t online_throttling_cpus;
if (!pr)
return -EINVAL;
@@ -1083,14 +1094,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
- return -ENOMEM;
-
- if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
- free_cpumask_var(saved_mask);
- return -ENOMEM;
- }
-
if (cpu_is_offline(pr->id)) {
/*
* the cpu pointed by pr->id is offline. Unnecessary to change
@@ -1099,17 +1102,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
return -ENODEV;
}
- cpumask_copy(saved_mask, &current->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
- cpumask_and(online_throttling_cpus, cpu_online_mask,
- p_throttling->shared_cpu_map);
+
/*
* The throttling notifier will be called for every
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
@@ -1121,21 +1122,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
- /* FIXME: use work_on_cpu() */
- if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
- /* Can't migrate to the pr->id CPU. Exit */
- ret = -ENODEV;
- goto exit;
- }
- ret = p_throttling->acpi_processor_set_throttling(pr,
- t_state.target_state, force);
+ arg.pr = pr;
+ arg.target_state = state;
+ arg.force = force;
+ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
* it is necessary to set T-state for every affected
* cpus.
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask,
+ p_throttling->shared_cpu_map) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
@@ -1156,13 +1154,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
"on CPU %d\n", i));
continue;
}
- t_state.cpu = i;
- /* FIXME: use work_on_cpu() */
- if (set_cpus_allowed_ptr(current, cpumask_of(i)))
- continue;
- ret = match_pr->throttling.
- acpi_processor_set_throttling(
- match_pr, t_state.target_state, force);
+
+ arg.pr = match_pr;
+ arg.target_state = state;
+ arg.force = force;
+ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
+ &arg);
}
}
/*
@@ -1171,17 +1168,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
- /* restore the previous state */
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, saved_mask);
-exit:
- free_cpumask_var(online_throttling_cpus);
- free_cpumask_var(saved_mask);
+
return ret;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b7201fc6f1e1..0bdacc5e26a3 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
memory24 = &ares->data.memory24;
+ if (!memory24->address_length)
+ return false;
acpi_dev_get_memresource(res, memory24->minimum,
memory24->address_length,
memory24->write_protect);
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
memory32 = &ares->data.memory32;
+ if (!memory32->address_length)
+ return false;
acpi_dev_get_memresource(res, memory32->minimum,
memory32->address_length,
memory32->write_protect);
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
fixed_memory32 = &ares->data.fixed_memory32;
+ if (!fixed_memory32->address_length)
+ return false;
acpi_dev_get_memresource(res, fixed_memory32->address,
fixed_memory32->address_length,
fixed_memory32->write_protect);
@@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_IO:
io = &ares->data.io;
+ if (!io->address_length)
+ return false;
acpi_dev_get_ioresource(res, io->minimum,
io->address_length,
io->io_decode);
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
fixed_io = &ares->data.fixed_io;
+ if (!fixed_io->address_length)
+ return false;
acpi_dev_get_ioresource(res, fixed_io->address,
fixed_io->address_length,
ACPI_DECODE_10);
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index d465ae6cdd00..366ca40a6f70 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -37,12 +37,12 @@
#include <linux/power_supply.h>
#include "sbshc.h"
+#include "battery.h"
#define PREFIX "ACPI: "
#define ACPI_SBS_CLASS "sbs"
#define ACPI_AC_CLASS "ac_adapter"
-#define ACPI_BATTERY_CLASS "battery"
#define ACPI_SBS_DEVICE_NAME "Smart Battery System"
#define ACPI_SBS_FILE_INFO "info"
#define ACPI_SBS_FILE_STATE "state"
@@ -450,7 +450,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
- if (sscanf(buf, "%ld\n", &x) == 1)
+ if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm_capacity = x /
(1000 * acpi_battery_scale(battery));
if (battery->present)
@@ -668,6 +668,8 @@ static int acpi_sbs_resume(struct device *dev)
acpi_sbs_callback(sbs);
return 0;
}
+#else
+#define acpi_sbs_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index b78bc605837e..26e5b5060523 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -8,8 +8,7 @@
* the Free Software Foundation version 2.
*/
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fd39459926b1..7efe546a8c42 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -12,13 +12,12 @@
#include <linux/dmi.h>
#include <linux/nls.h>
-#include <acpi/acpi_drivers.h>
+#include <asm/pgtable.h>
#include "internal.h"
#define _COMPONENT ACPI_BUS_COMPONENT
ACPI_MODULE_NAME("scan");
-#define STRUCT_TO_INT(s) (*((int*)&s))
extern struct acpi_device *acpi_root;
#define ACPI_BUS_CLASS "system_bus"
@@ -27,6 +26,8 @@ extern struct acpi_device *acpi_root;
#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
+#define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page)
+
/*
* If set, devices will be hot-removed even if they cannot be put offline
* gracefully (from the kernel's standpoint).
@@ -40,6 +41,7 @@ static DEFINE_MUTEX(acpi_scan_lock);
static LIST_HEAD(acpi_scan_handlers_list);
DEFINE_MUTEX(acpi_device_lock);
LIST_HEAD(acpi_wakeup_device_list);
+static DEFINE_MUTEX(acpi_hp_context_lock);
struct acpi_device_bus_id{
char bus_id[15];
@@ -59,6 +61,27 @@ void acpi_scan_lock_release(void)
}
EXPORT_SYMBOL_GPL(acpi_scan_lock_release);
+void acpi_lock_hp_context(void)
+{
+ mutex_lock(&acpi_hp_context_lock);
+}
+
+void acpi_unlock_hp_context(void)
+{
+ mutex_unlock(&acpi_hp_context_lock);
+}
+
+void acpi_initialize_hp_context(struct acpi_device *adev,
+ struct acpi_hotplug_context *hp,
+ int (*notify)(struct acpi_device *, u32),
+ void (*uevent)(struct acpi_device *, u32))
+{
+ acpi_lock_hp_context();
+ acpi_set_hp_context(adev, hp, notify, uevent, NULL);
+ acpi_unlock_hp_context();
+}
+EXPORT_SYMBOL_GPL(acpi_initialize_hp_context);
+
int acpi_scan_add_handler(struct acpi_scan_handler *handler)
{
if (!handler || !handler->attach)
@@ -85,6 +108,9 @@ int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
* Creates hid/cid(s) string needed for modalias and uevent
* e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
* char *modalias: "acpi:IBM0001:ACPI0001"
+ * Return: 0: no _HID and no _CID
+ * -EINVAL: output error
+ * -ENOMEM: output is truncated
*/
static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
int size)
@@ -101,8 +127,10 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
count = snprintf(&modalias[len], size, "%s:", id->id);
- if (count < 0 || count >= size)
- return -EINVAL;
+ if (count < 0)
+ return EINVAL;
+ if (count >= size)
+ return -ENOMEM;
len += count;
size -= count;
}
@@ -111,20 +139,96 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
return len;
}
+/*
+ * Creates uevent modalias field for ACPI enumerated devices.
+ * Because the other buses does not support ACPI HIDs & CIDs.
+ * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
+ * "acpi:IBM0001:ACPI0001"
+ */
+int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct acpi_device *acpi_dev;
+ int len;
+
+ acpi_dev = ACPI_COMPANION(dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ /* Fall back to bus specific way of modalias exporting */
+ if (list_empty(&acpi_dev->pnp.ids))
+ return -ENODEV;
+
+ if (add_uevent_var(env, "MODALIAS="))
+ return -ENOMEM;
+ len = create_modalias(acpi_dev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ if (len <= 0)
+ return len;
+ env->buflen += len;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
+
+/*
+ * Creates modalias sysfs attribute for ACPI enumerated devices.
+ * Because the other buses does not support ACPI HIDs & CIDs.
+ * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
+ * "acpi:IBM0001:ACPI0001"
+ */
+int acpi_device_modalias(struct device *dev, char *buf, int size)
+{
+ struct acpi_device *acpi_dev;
+ int len;
+
+ acpi_dev = ACPI_COMPANION(dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ /* Fall back to bus specific way of modalias exporting */
+ if (list_empty(&acpi_dev->pnp.ids))
+ return -ENODEV;
+
+ len = create_modalias(acpi_dev, buf, size -1);
+ if (len <= 0)
+ return len;
+ buf[len++] = '\n';
+ return len;
+}
+EXPORT_SYMBOL_GPL(acpi_device_modalias);
+
static ssize_t
acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
int len;
- /* Device has no HID and no CID or string is >1024 */
len = create_modalias(acpi_dev, buf, 1024);
if (len <= 0)
- return 0;
+ return len;
buf[len++] = '\n';
return len;
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
+bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
+{
+ struct acpi_device_physical_node *pn;
+ bool offline = true;
+
+ mutex_lock(&adev->physical_node_lock);
+
+ list_for_each_entry(pn, &adev->physical_node_list, node)
+ if (device_supports_offline(pn->dev) && !pn->dev->offline) {
+ if (uevent)
+ kobject_uevent(&pn->dev->kobj, KOBJ_CHANGE);
+
+ offline = false;
+ break;
+ }
+
+ mutex_unlock(&adev->physical_node_lock);
+ return offline;
+}
+
static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
void **ret_p)
{
@@ -195,19 +299,11 @@ static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
return AE_OK;
}
-static int acpi_scan_hot_remove(struct acpi_device *device)
+static int acpi_scan_try_to_offline(struct acpi_device *device)
{
acpi_handle handle = device->handle;
- struct device *errdev;
+ struct device *errdev = NULL;
acpi_status status;
- unsigned long long sta;
-
- /* If there is no handle, the device node has been unregistered. */
- if (!handle) {
- dev_dbg(&device->dev, "ACPI handle missing\n");
- put_device(&device->dev);
- return -EINVAL;
- }
/*
* Carry out two passes here and ignore errors in the first pass,
@@ -218,7 +314,6 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
*
* If the first pass is successful, the second one isn't needed, though.
*/
- errdev = NULL;
status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
NULL, acpi_bus_offline, (void *)false,
(void **)&errdev);
@@ -226,7 +321,6 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
dev_warn(errdev, "Offline disabled.\n");
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
acpi_bus_online, NULL, NULL, NULL);
- put_device(&device->dev);
return -EPERM;
}
acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev);
@@ -245,20 +339,32 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
acpi_walk_namespace(ACPI_TYPE_ANY, handle,
ACPI_UINT32_MAX, acpi_bus_online,
NULL, NULL, NULL);
- put_device(&device->dev);
return -EBUSY;
}
}
+ return 0;
+}
+
+static int acpi_scan_hot_remove(struct acpi_device *device)
+{
+ acpi_handle handle = device->handle;
+ unsigned long long sta;
+ acpi_status status;
+
+ if (device->handler->hotplug.demand_offline && !acpi_force_hot_remove) {
+ if (!acpi_scan_is_offline(device, true))
+ return -EBUSY;
+ } else {
+ int error = acpi_scan_try_to_offline(device);
+ if (error)
+ return error;
+ }
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Hot-removing device %s...\n", dev_name(&device->dev)));
acpi_bus_trim(device);
- /* Device node has been unregistered. */
- put_device(&device->dev);
- device = NULL;
-
acpi_evaluate_lck(handle, 0);
/*
* TBD: _EJD support.
@@ -285,146 +391,145 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
return 0;
}
-void acpi_bus_device_eject(void *data, u32 ost_src)
+static int acpi_scan_device_not_present(struct acpi_device *adev)
{
- struct acpi_device *device = data;
- acpi_handle handle = device->handle;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
- int error;
-
- lock_device_hotplug();
- mutex_lock(&acpi_scan_lock);
-
- if (ost_src == ACPI_NOTIFY_EJECT_REQUEST)
- acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
- ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
-
- if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER)
- kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
-
- error = acpi_scan_hot_remove(device);
- if (error == -EPERM) {
- goto err_support;
- } else if (error) {
- goto err_out;
+ if (!acpi_device_enumerated(adev)) {
+ dev_warn(&adev->dev, "Still not present\n");
+ return -EALREADY;
}
+ acpi_bus_trim(adev);
+ return 0;
+}
- out:
- mutex_unlock(&acpi_scan_lock);
- unlock_device_hotplug();
- return;
+static int acpi_scan_device_check(struct acpi_device *adev)
+{
+ int error;
- err_support:
- ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
- err_out:
- acpi_evaluate_hotplug_ost(handle, ost_src, ost_code, NULL);
- goto out;
+ acpi_bus_get_status(adev);
+ if (adev->status.present || adev->status.functional) {
+ /*
+ * This function is only called for device objects for which
+ * matching scan handlers exist. The only situation in which
+ * the scan handler is not attached to this device object yet
+ * is when the device has just appeared (either it wasn't
+ * present at all before or it was removed and then added
+ * again).
+ */
+ if (adev->handler) {
+ dev_warn(&adev->dev, "Already enumerated\n");
+ return -EALREADY;
+ }
+ error = acpi_bus_scan(adev->handle);
+ if (error) {
+ dev_warn(&adev->dev, "Namespace scan failure\n");
+ return error;
+ }
+ if (!adev->handler) {
+ dev_warn(&adev->dev, "Enumeration failure\n");
+ error = -ENODEV;
+ }
+ } else {
+ error = acpi_scan_device_not_present(adev);
+ }
+ return error;
}
-static void acpi_scan_bus_device_check(void *data, u32 ost_source)
+static int acpi_scan_bus_check(struct acpi_device *adev)
{
- acpi_handle handle = data;
- struct acpi_device *device = NULL;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+ struct acpi_scan_handler *handler = adev->handler;
+ struct acpi_device *child;
int error;
- lock_device_hotplug();
- mutex_lock(&acpi_scan_lock);
-
- if (ost_source != ACPI_NOTIFY_BUS_CHECK) {
- acpi_bus_get_device(handle, &device);
- if (device) {
- dev_warn(&device->dev, "Attempt to re-insert\n");
- goto out;
- }
+ acpi_bus_get_status(adev);
+ if (!(adev->status.present || adev->status.functional)) {
+ acpi_scan_device_not_present(adev);
+ return 0;
}
- error = acpi_bus_scan(handle);
+ if (handler && handler->hotplug.scan_dependent)
+ return handler->hotplug.scan_dependent(adev);
+
+ error = acpi_bus_scan(adev->handle);
if (error) {
- acpi_handle_warn(handle, "Namespace scan failure\n");
- goto out;
+ dev_warn(&adev->dev, "Namespace scan failure\n");
+ return error;
}
- error = acpi_bus_get_device(handle, &device);
- if (error) {
- acpi_handle_warn(handle, "Missing device node object\n");
- goto out;
+ list_for_each_entry(child, &adev->children, node) {
+ error = acpi_scan_bus_check(child);
+ if (error)
+ return error;
}
- ost_code = ACPI_OST_SC_SUCCESS;
- if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER)
- kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
-
- out:
- acpi_evaluate_hotplug_ost(handle, ost_source, ost_code, NULL);
- mutex_unlock(&acpi_scan_lock);
- unlock_device_hotplug();
+ return 0;
}
-static void acpi_hotplug_unsupported(acpi_handle handle, u32 type)
+static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type)
{
- u32 ost_status;
-
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
- acpi_handle_debug(handle,
- "ACPI_NOTIFY_BUS_CHECK event: unsupported\n");
- ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED;
- break;
+ return acpi_scan_bus_check(adev);
case ACPI_NOTIFY_DEVICE_CHECK:
- acpi_handle_debug(handle,
- "ACPI_NOTIFY_DEVICE_CHECK event: unsupported\n");
- ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED;
- break;
+ return acpi_scan_device_check(adev);
case ACPI_NOTIFY_EJECT_REQUEST:
- acpi_handle_debug(handle,
- "ACPI_NOTIFY_EJECT_REQUEST event: unsupported\n");
- ost_status = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
- break;
- default:
- /* non-hotplug event; possibly handled by other handler */
- return;
+ case ACPI_OST_EC_OSPM_EJECT:
+ if (adev->handler && !adev->handler->hotplug.enabled) {
+ dev_info(&adev->dev, "Eject disabled\n");
+ return -EPERM;
+ }
+ acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_EJECT_REQUEST,
+ ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+ return acpi_scan_hot_remove(adev);
}
-
- acpi_evaluate_hotplug_ost(handle, type, ost_status, NULL);
+ return -EINVAL;
}
-static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
+void acpi_device_hotplug(struct acpi_device *adev, u32 src)
{
- struct acpi_scan_handler *handler = data;
- struct acpi_device *adev;
- acpi_status status;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+ int error = -ENODEV;
- if (!handler->hotplug.enabled)
- return acpi_hotplug_unsupported(handle, type);
+ lock_device_hotplug();
+ mutex_lock(&acpi_scan_lock);
- switch (type) {
- case ACPI_NOTIFY_BUS_CHECK:
- acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
- break;
- case ACPI_NOTIFY_DEVICE_CHECK:
- acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
- break;
- case ACPI_NOTIFY_EJECT_REQUEST:
- acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
- if (acpi_bus_get_device(handle, &adev))
- goto err_out;
+ /*
+ * The device object's ACPI handle cannot become invalid as long as we
+ * are holding acpi_scan_lock, but it might have become invalid before
+ * that lock was acquired.
+ */
+ if (adev->handle == INVALID_ACPI_HANDLE)
+ goto err_out;
- get_device(&adev->dev);
- status = acpi_hotplug_execute(acpi_bus_device_eject, adev, type);
- if (ACPI_SUCCESS(status))
- return;
+ if (adev->flags.is_dock_station) {
+ error = dock_notify(adev, src);
+ } else if (adev->flags.hotplug_notify) {
+ error = acpi_generic_hotplug_event(adev, src);
+ if (error == -EPERM) {
+ ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
+ goto err_out;
+ }
+ } else {
+ int (*notify)(struct acpi_device *, u32);
- put_device(&adev->dev);
- goto err_out;
- default:
- /* non-hotplug event; possibly handled by other handler */
- return;
+ acpi_lock_hp_context();
+ notify = adev->hp ? adev->hp->notify : NULL;
+ acpi_unlock_hp_context();
+ /*
+ * There may be additional notify handlers for device objects
+ * without the .event() callback, so ignore them here.
+ */
+ if (notify)
+ error = notify(adev, src);
+ else
+ goto out;
}
- status = acpi_hotplug_execute(acpi_scan_bus_device_check, handle, type);
- if (ACPI_SUCCESS(status))
- return;
+ if (!error)
+ ost_code = ACPI_OST_SC_SUCCESS;
err_out:
- acpi_evaluate_hotplug_ost(handle, type,
- ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+ acpi_evaluate_ost(adev->handle, src, ost_code, NULL);
+
+ out:
+ acpi_bus_put_acpi_device(adev);
+ mutex_unlock(&acpi_scan_lock);
+ unlock_device_hotplug();
}
static ssize_t real_power_state_show(struct device *dev,
@@ -472,17 +577,14 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
return -ENODEV;
- acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
- ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
get_device(&acpi_device->dev);
- status = acpi_hotplug_execute(acpi_bus_device_eject, acpi_device,
- ACPI_OST_EC_OSPM_EJECT);
+ status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
if (ACPI_SUCCESS(status))
return count;
put_device(&acpi_device->dev);
- acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
- ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+ acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
+ ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
}
@@ -567,6 +669,20 @@ acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ acpi_status status;
+ unsigned long long sta;
+
+ status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return sprintf(buf, "%llu\n", sta);
+}
+static DEVICE_ATTR_RO(status);
+
static int acpi_device_setup_files(struct acpi_device *dev)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
@@ -622,6 +738,12 @@ static int acpi_device_setup_files(struct acpi_device *dev)
dev->pnp.sun = (unsigned long)-1;
}
+ if (acpi_has_method(dev->handle, "_STA")) {
+ result = device_create_file(&dev->dev, &dev_attr_status);
+ if (result)
+ goto end;
+ }
+
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
@@ -677,6 +799,8 @@ static void acpi_device_remove_files(struct acpi_device *dev)
device_remove_file(&dev->dev, &dev_attr_adr);
device_remove_file(&dev->dev, &dev_attr_modalias);
device_remove_file(&dev->dev, &dev_attr_hid);
+ if (acpi_has_method(dev->handle, "_STA"))
+ device_remove_file(&dev->dev, &dev_attr_status);
if (dev->handle)
device_remove_file(&dev->dev, &dev_attr_path);
}
@@ -782,8 +906,8 @@ static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
return -ENOMEM;
len = create_modalias(acpi_dev, &env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
- if (len >= (sizeof(env->buf) - env->buflen))
- return -ENOMEM;
+ if (len <= 0)
+ return len;
env->buflen += len;
return 0;
}
@@ -907,19 +1031,103 @@ struct bus_type acpi_bus_type = {
.uevent = acpi_device_uevent,
};
-static void acpi_bus_data_handler(acpi_handle handle, void *context)
+static void acpi_device_del(struct acpi_device *device)
{
- /* Intentionally empty. */
+ mutex_lock(&acpi_device_lock);
+ if (device->parent)
+ list_del(&device->node);
+
+ list_del(&device->wakeup_list);
+ mutex_unlock(&acpi_device_lock);
+
+ acpi_power_add_remove_device(device, false);
+ acpi_device_remove_files(device);
+ if (device->remove)
+ device->remove(device);
+
+ device_del(&device->dev);
}
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
+static LIST_HEAD(acpi_device_del_list);
+static DEFINE_MUTEX(acpi_device_del_lock);
+
+static void acpi_device_del_work_fn(struct work_struct *work_not_used)
+{
+ for (;;) {
+ struct acpi_device *adev;
+
+ mutex_lock(&acpi_device_del_lock);
+
+ if (list_empty(&acpi_device_del_list)) {
+ mutex_unlock(&acpi_device_del_lock);
+ break;
+ }
+ adev = list_first_entry(&acpi_device_del_list,
+ struct acpi_device, del_list);
+ list_del(&adev->del_list);
+
+ mutex_unlock(&acpi_device_del_lock);
+
+ acpi_device_del(adev);
+ /*
+ * Drop references to all power resources that might have been
+ * used by the device.
+ */
+ acpi_power_transition(adev, ACPI_STATE_D3_COLD);
+ put_device(&adev->dev);
+ }
+}
+
+/**
+ * acpi_scan_drop_device - Drop an ACPI device object.
+ * @handle: Handle of an ACPI namespace node, not used.
+ * @context: Address of the ACPI device object to drop.
+ *
+ * This is invoked by acpi_ns_delete_node() during the removal of the ACPI
+ * namespace node the device object pointed to by @context is attached to.
+ *
+ * The unregistration is carried out asynchronously to avoid running
+ * acpi_device_del() under the ACPICA's namespace mutex and the list is used to
+ * ensure the correct ordering (the device objects must be unregistered in the
+ * same order in which the corresponding namespace nodes are deleted).
+ */
+static void acpi_scan_drop_device(acpi_handle handle, void *context)
+{
+ static DECLARE_WORK(work, acpi_device_del_work_fn);
+ struct acpi_device *adev = context;
+
+ mutex_lock(&acpi_device_del_lock);
+
+ /*
+ * Use the ACPI hotplug workqueue which is ordered, so this work item
+ * won't run after any hotplug work items submitted subsequently. That
+ * prevents attempts to register device objects identical to those being
+ * deleted from happening concurrently (such attempts result from
+ * hotplug events handled via the ACPI hotplug workqueue). It also will
+ * run after all of the work items submitted previosuly, which helps
+ * those work items to ensure that they are not accessing stale device
+ * objects.
+ */
+ if (list_empty(&acpi_device_del_list))
+ acpi_queue_hotplug_work(&work);
+
+ list_add_tail(&adev->del_list, &acpi_device_del_list);
+ /* Make acpi_ns_validate_handle() return NULL for this handle. */
+ adev->handle = INVALID_ACPI_HANDLE;
+
+ mutex_unlock(&acpi_device_del_lock);
+}
+
+static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
+ void (*callback)(void *))
{
acpi_status status;
if (!device)
return -EINVAL;
- status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
+ status = acpi_get_data_full(handle, acpi_scan_drop_device,
+ (void **)device, callback);
if (ACPI_FAILURE(status) || !*device) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
handle));
@@ -927,8 +1135,32 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
}
return 0;
}
+
+int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
+{
+ return acpi_get_device_data(handle, device, NULL);
+}
EXPORT_SYMBOL(acpi_bus_get_device);
+static void get_acpi_device(void *dev)
+{
+ if (dev)
+ get_device(&((struct acpi_device *)dev)->dev);
+}
+
+struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
+{
+ struct acpi_device *adev = NULL;
+
+ acpi_get_device_data(handle, &adev, get_acpi_device);
+ return adev;
+}
+
+void acpi_bus_put_acpi_device(struct acpi_device *adev)
+{
+ put_device(&adev->dev);
+}
+
int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *))
{
@@ -939,7 +1171,7 @@ int acpi_device_add(struct acpi_device *device,
if (device->handle) {
acpi_status status;
- status = acpi_attach_data(device->handle, acpi_bus_data_handler,
+ status = acpi_attach_data(device->handle, acpi_scan_drop_device,
device);
if (ACPI_FAILURE(status)) {
acpi_handle_err(device->handle,
@@ -957,6 +1189,7 @@ int acpi_device_add(struct acpi_device *device,
INIT_LIST_HEAD(&device->node);
INIT_LIST_HEAD(&device->wakeup_list);
INIT_LIST_HEAD(&device->physical_node_list);
+ INIT_LIST_HEAD(&device->del_list);
mutex_init(&device->physical_node_lock);
new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
@@ -1020,37 +1253,10 @@ int acpi_device_add(struct acpi_device *device,
mutex_unlock(&acpi_device_lock);
err_detach:
- acpi_detach_data(device->handle, acpi_bus_data_handler);
+ acpi_detach_data(device->handle, acpi_scan_drop_device);
return result;
}
-static void acpi_device_unregister(struct acpi_device *device)
-{
- mutex_lock(&acpi_device_lock);
- if (device->parent)
- list_del(&device->node);
-
- list_del(&device->wakeup_list);
- mutex_unlock(&acpi_device_lock);
-
- acpi_detach_data(device->handle, acpi_bus_data_handler);
-
- acpi_power_add_remove_device(device, false);
- acpi_device_remove_files(device);
- if (device->remove)
- device->remove(device);
-
- device_del(&device->dev);
- /*
- * Transition the device to D3cold to drop the reference counts of all
- * power resources the device depends on and turn off the ones that have
- * no more references.
- */
- acpi_device_set_power(device, ACPI_STATE_D3_COLD);
- device->handle = NULL;
- put_device(&device->dev);
-}
-
/* --------------------------------------------------------------------------
Driver Management
-------------------------------------------------------------------------- */
@@ -1465,6 +1671,27 @@ bool acpi_bay_match(acpi_handle handle)
return acpi_ata_match(phandle);
}
+bool acpi_device_is_battery(struct acpi_device *adev)
+{
+ struct acpi_hardware_id *hwid;
+
+ list_for_each_entry(hwid, &adev->pnp.ids, list)
+ if (!strcmp("PNP0C0A", hwid->id))
+ return true;
+
+ return false;
+}
+
+static bool is_ejectable_bay(struct acpi_device *adev)
+{
+ acpi_handle handle = adev->handle;
+
+ if (acpi_has_method(handle, "_EJ0") && acpi_device_is_battery(adev))
+ return true;
+
+ return acpi_bay_match(handle);
+}
+
/*
* acpi_dock_match - see if an acpi object has a _DCK method
*/
@@ -1530,6 +1757,20 @@ static bool acpi_ibm_smbus_match(acpi_handle handle)
return false;
}
+static bool acpi_object_is_system_bus(acpi_handle handle)
+{
+ acpi_handle tmp;
+
+ if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_SB", &tmp)) &&
+ tmp == handle)
+ return true;
+ if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_TZ", &tmp)) &&
+ tmp == handle)
+ return true;
+
+ return false;
+}
+
static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
int device_type)
{
@@ -1581,8 +1822,10 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
acpi_add_id(pnp, ACPI_DOCK_HID);
else if (acpi_ibm_smbus_match(handle))
acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
- else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) {
- acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
+ else if (list_empty(&pnp->ids) &&
+ acpi_object_is_system_bus(handle)) {
+ /* \_SB, \_TZ, LNXSYBUS */
+ acpi_add_id(pnp, ACPI_BUS_HID);
strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
strcpy(pnp->device_class, ACPI_BUS_CLASS);
}
@@ -1624,11 +1867,13 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device->device_type = type;
device->handle = handle;
device->parent = acpi_bus_get_parent(handle);
- STRUCT_TO_INT(device->status) = sta;
+ acpi_set_device_status(device, sta);
acpi_device_get_busid(device);
acpi_set_pnp_ids(handle, &device->pnp, type);
acpi_bus_get_flags(device);
device->flags.match_driver = false;
+ device->flags.initialized = true;
+ device->flags.visited = false;
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
}
@@ -1713,6 +1958,15 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
return 0;
}
+bool acpi_device_is_present(struct acpi_device *adev)
+{
+ if (adev->status.present || adev->status.functional)
+ return true;
+
+ adev->flags.initialized = false;
+ return false;
+}
+
static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
char *idstr,
const struct acpi_device_id **matchid)
@@ -1754,33 +2008,23 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
mutex_unlock(&acpi_scan_lock);
}
-static void acpi_scan_init_hotplug(acpi_handle handle, int type)
+static void acpi_scan_init_hotplug(struct acpi_device *adev)
{
- struct acpi_device_pnp pnp = {};
struct acpi_hardware_id *hwid;
- struct acpi_scan_handler *handler;
-
- INIT_LIST_HEAD(&pnp.ids);
- acpi_set_pnp_ids(handle, &pnp, type);
- if (!pnp.type.hardware_id)
- goto out;
+ if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) {
+ acpi_dock_add(adev);
+ return;
+ }
+ list_for_each_entry(hwid, &adev->pnp.ids, list) {
+ struct acpi_scan_handler *handler;
- /*
- * This relies on the fact that acpi_install_notify_handler() will not
- * install the same notify handler routine twice for the same handle.
- */
- list_for_each_entry(hwid, &pnp.ids, list) {
handler = acpi_scan_match_handler(hwid->id, NULL);
- if (handler && !handler->hotplug.ignore) {
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- acpi_hotplug_notify_cb, handler);
+ if (handler) {
+ adev->flags.hotplug_notify = true;
break;
}
}
-
-out:
- acpi_free_pnp_ids(&pnp);
}
static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
@@ -1804,24 +2048,12 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
return AE_OK;
}
- acpi_scan_init_hotplug(handle, type);
-
- if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
- !(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
- struct acpi_device_wakeup wakeup;
-
- if (acpi_has_method(handle, "_PRW")) {
- acpi_bus_extract_wakeup_device_power_package(handle,
- &wakeup);
- acpi_power_resources_list_free(&wakeup.resources);
- }
- return AE_CTRL_DEPTH;
- }
-
acpi_add_single_object(&device, handle, type, sta);
if (!device)
return AE_CTRL_DEPTH;
+ acpi_scan_init_hotplug(device);
+
out:
if (!*return_value)
*return_value = device;
@@ -1840,48 +2072,57 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
handler = acpi_scan_match_handler(hwid->id, &devid);
if (handler) {
+ device->handler = handler;
ret = handler->attach(device, devid);
- if (ret > 0) {
- device->handler = handler;
+ if (ret > 0)
break;
- } else if (ret < 0) {
+
+ device->handler = NULL;
+ if (ret < 0)
break;
- }
}
}
return ret;
}
-static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
- void *not_used, void **ret_not_used)
+static void acpi_bus_attach(struct acpi_device *device)
{
- struct acpi_device *device;
- unsigned long long sta_not_used;
+ struct acpi_device *child;
+ acpi_handle ejd;
int ret;
- /*
- * Ignore errors ignored by acpi_bus_check_add() to avoid terminating
- * namespace walks prematurely.
- */
- if (acpi_bus_type_and_status(handle, &ret, &sta_not_used))
- return AE_OK;
-
- if (acpi_bus_get_device(handle, &device))
- return AE_CTRL_DEPTH;
+ if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd)))
+ register_dock_dependent_device(device, ejd);
+ acpi_bus_get_status(device);
+ /* Skip devices that are not present. */
+ if (!acpi_device_is_present(device)) {
+ device->flags.visited = false;
+ return;
+ }
if (device->handler)
- return AE_OK;
+ goto ok;
+ if (!device->flags.initialized) {
+ acpi_bus_update_power(device, NULL);
+ device->flags.initialized = true;
+ }
+ device->flags.visited = false;
ret = acpi_scan_attach_handler(device);
if (ret < 0)
- return AE_CTRL_DEPTH;
+ return;
device->flags.match_driver = true;
- if (ret > 0)
- return AE_OK;
+ if (!ret) {
+ ret = device_attach(&device->dev);
+ if (ret < 0)
+ return;
+ }
+ device->flags.visited = true;
- ret = device_attach(&device->dev);
- return ret >= 0 ? AE_OK : AE_CTRL_DEPTH;
+ ok:
+ list_for_each_entry(child, &device->children, node)
+ acpi_bus_attach(child);
}
/**
@@ -1901,75 +2142,49 @@ static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
int acpi_bus_scan(acpi_handle handle)
{
void *device = NULL;
- int error = 0;
if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device)))
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
acpi_bus_check_add, NULL, NULL, &device);
- if (!device)
- error = -ENODEV;
- else if (ACPI_SUCCESS(acpi_bus_device_attach(handle, 0, NULL, NULL)))
- acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
- acpi_bus_device_attach, NULL, NULL, NULL);
-
- return error;
-}
-EXPORT_SYMBOL(acpi_bus_scan);
-
-static acpi_status acpi_bus_device_detach(acpi_handle handle, u32 lvl_not_used,
- void *not_used, void **ret_not_used)
-{
- struct acpi_device *device = NULL;
-
- if (!acpi_bus_get_device(handle, &device)) {
- struct acpi_scan_handler *dev_handler = device->handler;
-
- if (dev_handler) {
- if (dev_handler->detach)
- dev_handler->detach(device);
-
- device->handler = NULL;
- } else {
- device_release_driver(&device->dev);
- }
+ if (device) {
+ acpi_bus_attach(device);
+ return 0;
}
- return AE_OK;
-}
-
-static acpi_status acpi_bus_remove(acpi_handle handle, u32 lvl_not_used,
- void *not_used, void **ret_not_used)
-{
- struct acpi_device *device = NULL;
-
- if (!acpi_bus_get_device(handle, &device))
- acpi_device_unregister(device);
-
- return AE_OK;
+ return -ENODEV;
}
+EXPORT_SYMBOL(acpi_bus_scan);
/**
- * acpi_bus_trim - Remove ACPI device node and all of its descendants
- * @start: Root of the ACPI device nodes subtree to remove.
+ * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects.
+ * @adev: Root of the ACPI namespace scope to walk.
*
* Must be called under acpi_scan_lock.
*/
-void acpi_bus_trim(struct acpi_device *start)
+void acpi_bus_trim(struct acpi_device *adev)
{
+ struct acpi_scan_handler *handler = adev->handler;
+ struct acpi_device *child;
+
+ list_for_each_entry_reverse(child, &adev->children, node)
+ acpi_bus_trim(child);
+
+ adev->flags.match_driver = false;
+ if (handler) {
+ if (handler->detach)
+ handler->detach(adev);
+
+ adev->handler = NULL;
+ } else {
+ device_release_driver(&adev->dev);
+ }
/*
- * Execute acpi_bus_device_detach() as a post-order callback to detach
- * all ACPI drivers from the device nodes being removed.
- */
- acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL,
- acpi_bus_device_detach, NULL, NULL);
- acpi_bus_device_detach(start->handle, 0, NULL, NULL);
- /*
- * Execute acpi_bus_remove() as a post-order callback to remove device
- * nodes in the given namespace scope.
+ * Most likely, the device is going away, so put it into D3cold before
+ * that.
*/
- acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL,
- acpi_bus_remove, NULL, NULL);
- acpi_bus_remove(start->handle, 0, NULL, NULL);
+ acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
+ adev->flags.initialized = false;
+ adev->flags.visited = false;
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
@@ -2031,7 +2246,6 @@ int __init acpi_scan_init(void)
acpi_cmos_rtc_init();
acpi_container_init();
acpi_memory_hotplug_init();
- acpi_dock_init();
mutex_lock(&acpi_scan_lock);
/*
@@ -2047,14 +2261,14 @@ int __init acpi_scan_init(void)
result = acpi_bus_scan_fixed();
if (result) {
- acpi_device_unregister(acpi_root);
+ acpi_detach_data(acpi_root->handle, acpi_scan_drop_device);
+ acpi_device_del(acpi_root);
+ put_device(&acpi_root->dev);
goto out;
}
acpi_update_all_gpes();
- acpi_pci_root_hp_init();
-
out:
mutex_unlock(&acpi_scan_lock);
return result;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 721e949e606e..c40fb2e81bbc 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -18,12 +18,8 @@
#include <linux/reboot.h>
#include <linux/acpi.h>
#include <linux/module.h>
-
#include <asm/io.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
#include "internal.h"
#include "sleep.h"
@@ -75,6 +71,17 @@ static int acpi_sleep_prepare(u32 acpi_state)
return 0;
}
+static bool acpi_sleep_state_supported(u8 sleep_state)
+{
+ acpi_status status;
+ u8 type_a, type_b;
+
+ status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
+ return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
+ || (acpi_gbl_FADT.sleep_control.address
+ && acpi_gbl_FADT.sleep_status.address));
+}
+
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
@@ -608,15 +615,9 @@ static void acpi_sleep_suspend_setup(void)
{
int i;
- for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
- acpi_status status;
- u8 type_a, type_b;
-
- status = acpi_get_sleep_type_data(i, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
+ for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
+ if (acpi_sleep_state_supported(i))
sleep_states[i] = 1;
- }
- }
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
@@ -670,11 +671,8 @@ static void acpi_hibernation_leave(void)
/* Reprogram control registers */
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
/* Check the hardware signature */
- if (facs && s4_hardware_signature != facs->hardware_signature) {
- printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
- "cannot resume!\n");
- panic("ACPI S4 hardware signature mismatch");
- }
+ if (facs && s4_hardware_signature != facs->hardware_signature)
+ pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n");
/* Restore the NVS memory area */
suspend_nvs_restore();
/* Allow EC transactions to happen. */
@@ -747,11 +745,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
static void acpi_sleep_hibernate_setup(void)
{
- acpi_status status;
- u8 type_a, type_b;
-
- status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
- if (ACPI_FAILURE(status))
+ if (!acpi_sleep_state_supported(ACPI_STATE_S4))
return;
hibernation_set_ops(old_suspend_ordering ?
@@ -800,15 +794,10 @@ static void acpi_power_off(void)
int __init acpi_sleep_init(void)
{
- acpi_status status;
- u8 type_a, type_b;
char supported[ACPI_S_STATE_COUNT * 3 + 1];
char *pos = supported;
int i;
- if (acpi_disabled)
- return 0;
-
acpi_sleep_dmi_check();
sleep_states[ACPI_STATE_S0] = 1;
@@ -816,8 +805,7 @@ int __init acpi_sleep_init(void)
acpi_sleep_suspend_setup();
acpi_sleep_hibernate_setup();
- status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
sleep_states[ACPI_STATE_S5] = 1;
pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off;
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 6dbc3ca45223..38cb9782d4b8 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -5,15 +5,13 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include "internal.h"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("sysfs");
-#define PREFIX "ACPI: "
-
#ifdef CONFIG_ACPI_DEBUG
/*
* ACPI debug sysfs I/F, including:
@@ -226,7 +224,7 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
/* /sys/modules/acpi/parameters/aml_debug_output */
module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
- bool, 0644);
+ byte, 0644);
MODULE_PARM_DESC(aml_debug_output,
"To enable/disable the ACPI Debug Object output.");
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index d67a1fe07f0e..21782290df41 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -23,6 +23,8 @@
*
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/smp.h>
@@ -33,8 +35,6 @@
#include <linux/acpi.h>
#include <linux/bootmem.h>
-#define PREFIX "ACPI: "
-
#define ACPI_MAX_TABLES 128
static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
@@ -55,10 +55,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_apic *p =
(struct acpi_madt_local_apic *)header;
- printk(KERN_INFO PREFIX
- "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
- p->processor_id, p->id,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_info("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
+ p->processor_id, p->id,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -66,11 +65,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_x2apic *p =
(struct acpi_madt_local_x2apic *)header;
- printk(KERN_INFO PREFIX
- "X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
- p->local_apic_id, p->uid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ?
- "enabled" : "disabled");
+ pr_info("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
+ p->local_apic_id, p->uid,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -78,9 +75,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_io_apic *p =
(struct acpi_madt_io_apic *)header;
- printk(KERN_INFO PREFIX
- "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
- p->id, p->address, p->global_irq_base);
+ pr_info("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
+ p->id, p->address, p->global_irq_base);
}
break;
@@ -88,18 +84,15 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_interrupt_override *p =
(struct acpi_madt_interrupt_override *)header;
- printk(KERN_INFO PREFIX
- "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
- p->bus, p->source_irq, p->global_irq,
- mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
- mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
+ pr_info("INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
+ p->bus, p->source_irq, p->global_irq,
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
if (p->inti_flags &
~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK))
- printk(KERN_INFO PREFIX
- "INT_SRC_OVR unexpected reserved flags: 0x%x\n",
- p->inti_flags &
+ pr_info("INT_SRC_OVR unexpected reserved flags: 0x%x\n",
+ p->inti_flags &
~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK));
-
}
break;
@@ -107,11 +100,10 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_nmi_source *p =
(struct acpi_madt_nmi_source *)header;
- printk(KERN_INFO PREFIX
- "NMI_SRC (%s %s global_irq %d)\n",
- mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
- mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
- p->global_irq);
+ pr_info("NMI_SRC (%s %s global_irq %d)\n",
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+ p->global_irq);
}
break;
@@ -119,12 +111,11 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_apic_nmi *p =
(struct acpi_madt_local_apic_nmi *)header;
- printk(KERN_INFO PREFIX
- "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
- p->processor_id,
- mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ],
- mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
- p->lint);
+ pr_info("LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
+ p->processor_id,
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+ p->lint);
}
break;
@@ -137,12 +128,11 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
polarity = p->inti_flags & ACPI_MADT_POLARITY_MASK;
trigger = (p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
- printk(KERN_INFO PREFIX
- "X2APIC_NMI (uid[0x%02x] %s %s lint[0x%x])\n",
- p->uid,
- mps_inti_flags_polarity[polarity],
- mps_inti_flags_trigger[trigger],
- p->lint);
+ pr_info("X2APIC_NMI (uid[0x%02x] %s %s lint[0x%x])\n",
+ p->uid,
+ mps_inti_flags_polarity[polarity],
+ mps_inti_flags_trigger[trigger],
+ p->lint);
}
break;
@@ -150,9 +140,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_apic_override *p =
(struct acpi_madt_local_apic_override *)header;
- printk(KERN_INFO PREFIX
- "LAPIC_ADDR_OVR (address[%p])\n",
- (void *)(unsigned long)p->address);
+ pr_info("LAPIC_ADDR_OVR (address[%p])\n",
+ (void *)(unsigned long)p->address);
}
break;
@@ -160,10 +149,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_io_sapic *p =
(struct acpi_madt_io_sapic *)header;
- printk(KERN_INFO PREFIX
- "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
- p->id, (void *)(unsigned long)p->address,
- p->global_irq_base);
+ pr_info("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
+ p->id, (void *)(unsigned long)p->address,
+ p->global_irq_base);
}
break;
@@ -171,10 +159,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_sapic *p =
(struct acpi_madt_local_sapic *)header;
- printk(KERN_INFO PREFIX
- "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
- p->processor_id, p->id, p->eid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_info("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
+ p->processor_id, p->id, p->eid,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -182,19 +169,17 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_interrupt_source *p =
(struct acpi_madt_interrupt_source *)header;
- printk(KERN_INFO PREFIX
- "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
- mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
- mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
- p->type, p->id, p->eid, p->io_sapic_vector,
- p->global_irq);
+ pr_info("PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
+ mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+ mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+ p->type, p->id, p->eid, p->io_sapic_vector,
+ p->global_irq);
}
break;
default:
- printk(KERN_WARNING PREFIX
- "Found unsupported MADT entry (type = 0x%x)\n",
- header->type);
+ pr_warn("Found unsupported MADT entry (type = 0x%x)\n",
+ header->type);
break;
}
}
@@ -225,7 +210,7 @@ acpi_table_parse_entries(char *id,
acpi_get_table_with_size(id, 0, &table_header, &tbl_size);
if (!table_header) {
- printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
+ pr_warn("%4.4s not present\n", id);
return -ENODEV;
}
@@ -248,7 +233,7 @@ acpi_table_parse_entries(char *id,
* infinite loop.
*/
if (entry->length == 0) {
- pr_err(PREFIX "[%4.4s:0x%02x] Invalid zero length\n", id, entry_id);
+ pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, entry_id);
goto err;
}
@@ -256,8 +241,8 @@ acpi_table_parse_entries(char *id,
((unsigned long)entry + entry->length);
}
if (max_entries && count > max_entries) {
- printk(KERN_WARNING PREFIX "[%4.4s:0x%02x] ignored %i entries of "
- "%i found\n", id, entry_id, count - max_entries, count);
+ pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
+ id, entry_id, count - max_entries, count);
}
early_acpi_os_unmap_memory((char *)table_header, tbl_size);
@@ -278,12 +263,13 @@ acpi_table_parse_madt(enum acpi_madt_type id,
/**
* acpi_table_parse - find table with @id, run @handler on it
- *
* @id: table id to find
* @handler: handler to run
*
* Scan the ACPI System Descriptor Table (STD) for a table matching @id,
- * run @handler on it. Return 0 if table found, return on if not.
+ * run @handler on it.
+ *
+ * Return 0 if table found, -errno if not.
*/
int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
{
@@ -293,7 +279,7 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
if (acpi_disabled)
return -ENODEV;
- if (!handler)
+ if (!id || !handler)
return -EINVAL;
if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
@@ -306,7 +292,7 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
early_acpi_os_unmap_memory(table, tbl_size);
return 0;
} else
- return 1;
+ return -ENODEV;
}
/*
@@ -321,13 +307,11 @@ static void __init check_multiple_madt(void)
acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
if (table) {
- printk(KERN_WARNING PREFIX
- "BIOS bug: multiple APIC/MADT found,"
- " using %d\n", acpi_apic_instance);
- printk(KERN_WARNING PREFIX
- "If \"acpi_apic_instance=%d\" works better, "
- "notify linux-acpi@vger.kernel.org\n",
- acpi_apic_instance ? 0 : 2);
+ pr_warn("BIOS bug: multiple APIC/MADT found, using %d\n",
+ acpi_apic_instance);
+ pr_warn("If \"acpi_apic_instance=%d\" works better, "
+ "notify linux-acpi@vger.kernel.org\n",
+ acpi_apic_instance ? 0 : 2);
early_acpi_os_unmap_memory(table, tbl_size);
} else
@@ -351,7 +335,7 @@ int __init acpi_table_init(void)
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
if (ACPI_FAILURE(status))
- return 1;
+ return -EINVAL;
check_multiple_madt();
return 0;
@@ -364,8 +348,7 @@ static int __init acpi_parse_apic_instance(char *str)
acpi_apic_instance = simple_strtoul(str, NULL, 0);
- printk(KERN_NOTICE PREFIX "Shall use APIC/MADT table %d\n",
- acpi_apic_instance);
+ pr_notice("Shall use APIC/MADT table %d\n", acpi_apic_instance);
return 0;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 0d9f46b5ae6d..964068553334 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -41,10 +41,10 @@
#include <linux/kmod.h>
#include <linux/reboot.h>
#include <linux/device.h>
-#include <asm/uaccess.h>
#include <linux/thermal.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
+#include <linux/workqueue.h>
+#include <asm/uaccess.h>
#define PREFIX "ACPI: "
@@ -91,6 +91,8 @@ static int psv;
module_param(psv, int, 0644);
MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
+static struct workqueue_struct *acpi_thermal_pm_queue;
+
static int acpi_thermal_add(struct acpi_device *device);
static int acpi_thermal_remove(struct acpi_device *device);
static void acpi_thermal_notify(struct acpi_device *device, u32 event);
@@ -102,9 +104,13 @@ static const struct acpi_device_id thermal_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
#ifdef CONFIG_PM_SLEEP
+static int acpi_thermal_suspend(struct device *dev);
static int acpi_thermal_resume(struct device *dev);
+#else
+#define acpi_thermal_suspend NULL
+#define acpi_thermal_resume NULL
#endif
-static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
+static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, acpi_thermal_suspend, acpi_thermal_resume);
static struct acpi_driver acpi_thermal_driver = {
.name = "thermal",
@@ -185,6 +191,7 @@ struct acpi_thermal {
struct thermal_zone_device *thermal_zone;
int tz_enabled;
int kelvin_offset;
+ struct work_struct thermal_check_work;
};
/* --------------------------------------------------------------------------
@@ -862,7 +869,7 @@ acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
return acpi_thermal_cooling_device_cb(thermal, cdev, false);
}
-static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
+static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
.bind = acpi_thermal_bind_cooling_device,
.unbind = acpi_thermal_unbind_cooling_device,
.get_temp = thermal_get_temp,
@@ -1063,6 +1070,13 @@ static void acpi_thermal_guess_offset(struct acpi_thermal *tz)
tz->kelvin_offset = 2732;
}
+static void acpi_thermal_check_fn(struct work_struct *work)
+{
+ struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
+ thermal_check_work);
+ acpi_thermal_check(tz);
+}
+
static int acpi_thermal_add(struct acpi_device *device)
{
int result = 0;
@@ -1092,6 +1106,8 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
+ INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
+
pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature));
goto end;
@@ -1109,6 +1125,7 @@ static int acpi_thermal_remove(struct acpi_device *device)
if (!device || !acpi_driver_data(device))
return -EINVAL;
+ flush_workqueue(acpi_thermal_pm_queue);
tz = acpi_driver_data(device);
acpi_thermal_unregister_thermal_zone(tz);
@@ -1117,6 +1134,13 @@ static int acpi_thermal_remove(struct acpi_device *device)
}
#ifdef CONFIG_PM_SLEEP
+static int acpi_thermal_suspend(struct device *dev)
+{
+ /* Make sure the previously queued thermal check work has been done */
+ flush_workqueue(acpi_thermal_pm_queue);
+ return 0;
+}
+
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
@@ -1147,7 +1171,7 @@ static int acpi_thermal_resume(struct device *dev)
tz->state.active |= tz->trips.active[i].flags.enabled;
}
- acpi_thermal_check(tz);
+ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
return AE_OK;
}
@@ -1239,16 +1263,22 @@ static int __init acpi_thermal_init(void)
return -ENODEV;
}
+ acpi_thermal_pm_queue = create_workqueue("acpi_thermal_pm");
+ if (!acpi_thermal_pm_queue)
+ return -ENODEV;
+
result = acpi_bus_register_driver(&acpi_thermal_driver);
- if (result < 0)
+ if (result < 0) {
+ destroy_workqueue(acpi_thermal_pm_queue);
return -ENODEV;
+ }
return 0;
}
static void __exit acpi_thermal_exit(void)
{
-
+ destroy_workqueue(acpi_thermal_pm_queue);
acpi_bus_unregister_driver(&acpi_thermal_driver);
return;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 6d408bfbbb1d..0f5f78fa6545 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -30,8 +30,6 @@
#include <linux/types.h>
#include <linux/hardirq.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#include "internal.h"
@@ -101,10 +99,6 @@ acpi_extract_package(union acpi_object *package,
union acpi_object *element = &(package->package.elements[i]);
- if (!element) {
- return AE_BAD_DATA;
- }
-
switch (element->type) {
case ACPI_TYPE_INTEGER:
@@ -428,7 +422,7 @@ out:
EXPORT_SYMBOL(acpi_get_physical_device_location);
/**
- * acpi_evaluate_hotplug_ost: Evaluate _OST for hotplug operations
+ * acpi_evaluate_ost: Evaluate _OST for hotplug operations
* @handle: ACPI device handle
* @source_event: source event code
* @status_code: status code
@@ -439,17 +433,15 @@ EXPORT_SYMBOL(acpi_get_physical_device_location);
* When the platform does not support _OST, this function has no effect.
*/
acpi_status
-acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
- u32 status_code, struct acpi_buffer *status_buf)
+acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
+ struct acpi_buffer *status_buf)
{
-#ifdef ACPI_HOTPLUG_OST
union acpi_object params[3] = {
{.type = ACPI_TYPE_INTEGER,},
{.type = ACPI_TYPE_INTEGER,},
{.type = ACPI_TYPE_BUFFER,}
};
struct acpi_object_list arg_list = {3, params};
- acpi_status status;
params[0].integer.value = source_event;
params[1].integer.value = status_code;
@@ -461,13 +453,9 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
params[2].buffer.length = 0;
}
- status = acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
- return status;
-#else
- return AE_OK;
-#endif
+ return acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
}
-EXPORT_SYMBOL(acpi_evaluate_hotplug_ost);
+EXPORT_SYMBOL(acpi_evaluate_ost);
/**
* acpi_handle_printk: Print message with ACPI prefix and object path
@@ -574,3 +562,100 @@ acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
return status;
}
+
+/**
+ * acpi_evaluate_dsm - evaluate device's _DSM method
+ * @handle: ACPI device handle
+ * @uuid: UUID of requested functions, should be 16 bytes
+ * @rev: revision number of requested function
+ * @func: requested function number
+ * @argv4: the function specific parameter
+ *
+ * Evaluate device's _DSM method with specified UUID, revision id and
+ * function number. Caller needs to free the returned object.
+ *
+ * Though ACPI defines the fourth parameter for _DSM should be a package,
+ * some old BIOSes do expect a buffer or an integer etc.
+ */
+union acpi_object *
+acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, int rev, int func,
+ union acpi_object *argv4)
+{
+ acpi_status ret;
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object params[4];
+ struct acpi_object_list input = {
+ .count = 4,
+ .pointer = params,
+ };
+
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = 16;
+ params[0].buffer.pointer = (char *)uuid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = rev;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = func;
+ if (argv4) {
+ params[3] = *argv4;
+ } else {
+ params[3].type = ACPI_TYPE_PACKAGE;
+ params[3].package.count = 0;
+ params[3].package.elements = NULL;
+ }
+
+ ret = acpi_evaluate_object(handle, "_DSM", &input, &buf);
+ if (ACPI_SUCCESS(ret))
+ return (union acpi_object *)buf.pointer;
+
+ if (ret != AE_NOT_FOUND)
+ acpi_handle_warn(handle,
+ "failed to evaluate _DSM (0x%x)\n", ret);
+
+ return NULL;
+}
+EXPORT_SYMBOL(acpi_evaluate_dsm);
+
+/**
+ * acpi_check_dsm - check if _DSM method supports requested functions.
+ * @handle: ACPI device handle
+ * @uuid: UUID of requested functions, should be 16 bytes at least
+ * @rev: revision number of requested functions
+ * @funcs: bitmap of requested functions
+ * @exclude: excluding special value, used to support i915 and nouveau
+ *
+ * Evaluate device's _DSM method to check whether it supports requested
+ * functions. Currently only support 64 functions at maximum, should be
+ * enough for now.
+ */
+bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs)
+{
+ int i;
+ u64 mask = 0;
+ union acpi_object *obj;
+
+ if (funcs == 0)
+ return false;
+
+ obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
+ if (!obj)
+ return false;
+
+ /* For compatibility, old BIOSes may return an integer */
+ if (obj->type == ACPI_TYPE_INTEGER)
+ mask = obj->integer.value;
+ else if (obj->type == ACPI_TYPE_BUFFER)
+ for (i = 0; i < obj->buffer.length && i < 8; i++)
+ mask |= (((u8)obj->buffer.pointer[i]) << (i * 8));
+ ACPI_FREE(obj);
+
+ /*
+ * Bit 0 indicates whether there's support for any functions other than
+ * function 0 for the specified UUID and revision.
+ */
+ if ((mask & 0x1) && (mask & funcs) == funcs)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(acpi_check_dsm);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 995e91bcb97b..48c7e8af9c96 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -37,17 +37,14 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
#include <linux/dmi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#include <linux/suspend.h>
+#include <linux/acpi.h>
#include <acpi/video.h>
+#include <asm/uaccess.h>
#include "internal.h"
-#define PREFIX "ACPI: "
-
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
@@ -82,11 +79,12 @@ static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
/*
- * For Windows 8 systems: if set ture and the GPU driver has
- * registered a backlight interface, skip registering ACPI video's.
+ * For Windows 8 systems: used to decide if video module
+ * should skip registering backlight interface of its own.
*/
-static bool use_native_backlight = false;
-module_param(use_native_backlight, bool, 0644);
+static int use_native_backlight_param = -1;
+module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
+static bool use_native_backlight_dmi = false;
static int register_count;
static struct mutex video_list_lock;
@@ -232,9 +230,17 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
static int acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
+static bool acpi_video_use_native_backlight(void)
+{
+ if (use_native_backlight_param != -1)
+ return use_native_backlight_param;
+ else
+ return use_native_backlight_dmi;
+}
+
static bool acpi_video_verify_backlight_support(void)
{
- if (acpi_osi_is_win8() && use_native_backlight &&
+ if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
backlight_device_registered(BACKLIGHT_RAW))
return false;
return acpi_video_backlight_support();
@@ -399,6 +405,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
return 0;
}
+static int __init video_set_use_native_backlight(const struct dmi_system_id *d)
+{
+ use_native_backlight_dmi = true;
+ return 0;
+}
+
static struct dmi_system_id video_dmi_table[] __initdata = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -443,6 +455,120 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad T430s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad X230",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad X1 Carbon",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Lenovo Yoga 13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Dell Inspiron 7520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire 5733Z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire V5-431",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ProBook 4340s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ProBook 2013 models",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
+ DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP EliteBook 2013 models",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
+ DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 17",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP EliteBook 8780w",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
+ },
+ },
{}
};
@@ -686,6 +812,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
union acpi_object *o;
struct acpi_video_device_brightness *br = NULL;
int result = -EINVAL;
+ u32 value;
if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
@@ -716,7 +843,12 @@ acpi_video_init_brightness(struct acpi_video_device *device)
printk(KERN_ERR PREFIX "Invalid data\n");
continue;
}
- br->levels[count] = (u32) o->integer.value;
+ value = (u32) o->integer.value;
+ /* Skip duplicate entries */
+ if (count > 2 && br->levels[count - 1] == value)
+ continue;
+
+ br->levels[count] = value;
if (br->levels[count] > max_level)
max_level = br->levels[count];
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 84875fd4c74f..33e3db548a29 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -40,8 +40,6 @@
#include "internal.h"
-#define PREFIX "ACPI: "
-
ACPI_MODULE_NAME("video");
#define _COMPONENT ACPI_VIDEO_COMPONENT
@@ -50,7 +48,7 @@ static bool acpi_video_caps_checked;
static acpi_status
acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
- void **retyurn_value)
+ void **return_value)
{
long *cap = context;
@@ -168,14 +166,6 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
- {
- .callback = video_detect_force_vendor,
- .ident = "Lenovo Yoga 13",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
- },
- },
{ },
};
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index 7bfbe40bc43b..1638401ab282 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -5,7 +5,6 @@
#include <linux/init.h>
#include <linux/acpi.h>
-#include <acpi/acpi_drivers.h>
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index c4876ac9151a..9e6029105607 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -83,162 +83,6 @@ static struct device_attribute amba_dev_attrs[] = {
__ATTR_NULL,
};
-#ifdef CONFIG_PM_SLEEP
-
-static int amba_legacy_suspend(struct device *dev, pm_message_t mesg)
-{
- struct amba_driver *adrv = to_amba_driver(dev->driver);
- struct amba_device *adev = to_amba_device(dev);
- int ret = 0;
-
- if (dev->driver && adrv->suspend)
- ret = adrv->suspend(adev, mesg);
-
- return ret;
-}
-
-static int amba_legacy_resume(struct device *dev)
-{
- struct amba_driver *adrv = to_amba_driver(dev->driver);
- struct amba_device *adev = to_amba_device(dev);
- int ret = 0;
-
- if (dev->driver && adrv->resume)
- ret = adrv->resume(adev);
-
- return ret;
-}
-
-#endif /* CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_SUSPEND
-
-static int amba_pm_suspend(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend)
- ret = drv->pm->suspend(dev);
- } else {
- ret = amba_legacy_suspend(dev, PMSG_SUSPEND);
- }
-
- return ret;
-}
-
-static int amba_pm_resume(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume)
- ret = drv->pm->resume(dev);
- } else {
- ret = amba_legacy_resume(dev);
- }
-
- return ret;
-}
-
-#else /* !CONFIG_SUSPEND */
-
-#define amba_pm_suspend NULL
-#define amba_pm_resume NULL
-
-#endif /* !CONFIG_SUSPEND */
-
-#ifdef CONFIG_HIBERNATE_CALLBACKS
-
-static int amba_pm_freeze(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze)
- ret = drv->pm->freeze(dev);
- } else {
- ret = amba_legacy_suspend(dev, PMSG_FREEZE);
- }
-
- return ret;
-}
-
-static int amba_pm_thaw(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw)
- ret = drv->pm->thaw(dev);
- } else {
- ret = amba_legacy_resume(dev);
- }
-
- return ret;
-}
-
-static int amba_pm_poweroff(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff)
- ret = drv->pm->poweroff(dev);
- } else {
- ret = amba_legacy_suspend(dev, PMSG_HIBERNATE);
- }
-
- return ret;
-}
-
-static int amba_pm_restore(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore)
- ret = drv->pm->restore(dev);
- } else {
- ret = amba_legacy_resume(dev);
- }
-
- return ret;
-}
-
-#else /* !CONFIG_HIBERNATE_CALLBACKS */
-
-#define amba_pm_freeze NULL
-#define amba_pm_thaw NULL
-#define amba_pm_poweroff NULL
-#define amba_pm_restore NULL
-
-#endif /* !CONFIG_HIBERNATE_CALLBACKS */
-
#ifdef CONFIG_PM_RUNTIME
/*
* Hooks to provide runtime PM of the pclk (bus clock). It is safe to
@@ -251,7 +95,7 @@ static int amba_pm_runtime_suspend(struct device *dev)
int ret = pm_generic_runtime_suspend(dev);
if (ret == 0 && dev->driver)
- clk_disable(pcdev->pclk);
+ clk_disable_unprepare(pcdev->pclk);
return ret;
}
@@ -262,7 +106,7 @@ static int amba_pm_runtime_resume(struct device *dev)
int ret;
if (dev->driver) {
- ret = clk_enable(pcdev->pclk);
+ ret = clk_prepare_enable(pcdev->pclk);
/* Failure is probably fatal to the system, but... */
if (ret)
return ret;
@@ -272,15 +116,13 @@ static int amba_pm_runtime_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
-
static const struct dev_pm_ops amba_pm = {
- .suspend = amba_pm_suspend,
- .resume = amba_pm_resume,
- .freeze = amba_pm_freeze,
- .thaw = amba_pm_thaw,
- .poweroff = amba_pm_poweroff,
- .restore = amba_pm_restore,
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
SET_RUNTIME_PM_OPS(
amba_pm_runtime_suspend,
amba_pm_runtime_resume,
@@ -288,14 +130,6 @@ static const struct dev_pm_ops amba_pm = {
)
};
-#define AMBA_PM (&amba_pm)
-
-#else /* !CONFIG_PM */
-
-#define AMBA_PM NULL
-
-#endif /* !CONFIG_PM */
-
/*
* Primecells are part of the Advanced Microcontroller Bus Architecture,
* so we call the bus "amba".
@@ -305,7 +139,7 @@ struct bus_type amba_bustype = {
.dev_attrs = amba_dev_attrs,
.match = amba_match,
.uevent = amba_uevent,
- .pm = AMBA_PM,
+ .pm = &amba_pm,
};
static int __init amba_init(void)
@@ -317,36 +151,23 @@ postcore_initcall(amba_init);
static int amba_get_enable_pclk(struct amba_device *pcdev)
{
- struct clk *pclk = clk_get(&pcdev->dev, "apb_pclk");
int ret;
- pcdev->pclk = pclk;
-
- if (IS_ERR(pclk))
- return PTR_ERR(pclk);
+ pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
+ if (IS_ERR(pcdev->pclk))
+ return PTR_ERR(pcdev->pclk);
- ret = clk_prepare(pclk);
- if (ret) {
- clk_put(pclk);
- return ret;
- }
-
- ret = clk_enable(pclk);
- if (ret) {
- clk_unprepare(pclk);
- clk_put(pclk);
- }
+ ret = clk_prepare_enable(pcdev->pclk);
+ if (ret)
+ clk_put(pcdev->pclk);
return ret;
}
static void amba_put_disable_pclk(struct amba_device *pcdev)
{
- struct clk *pclk = pcdev->pclk;
-
- clk_disable(pclk);
- clk_unprepare(pclk);
- clk_put(pclk);
+ clk_disable_unprepare(pcdev->pclk);
+ clk_put(pcdev->pclk);
}
/*
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4e737728aee2..20e03a7eb8b4 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -11,13 +11,13 @@ config HAVE_PATA_PLATFORM
to update the PATA_PLATFORM entry.
menuconfig ATA
- tristate "Serial ATA and Parallel ATA drivers"
+ tristate "Serial ATA and Parallel ATA drivers (libata)"
depends on HAS_IOMEM
depends on BLOCK
depends on !(M32R || M68K || S390) || BROKEN
select SCSI
---help---
- If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
+ If you want to use an ATA hard disk, ATA tape drive, ATA CD-ROM or
any other ATA device under Linux, say Y and make sure that you know
the name of your ATA host adapter (the card inside your computer
that "speaks" the ATA protocol, also called ATA controller),
@@ -60,7 +60,7 @@ config ATA_ACPI
config SATA_ZPODD
bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
- depends on ATA_ACPI
+ depends on ATA_ACPI && PM_RUNTIME
default n
help
This option adds support for SATA Zero Power Optical Disc
@@ -97,15 +97,48 @@ config SATA_AHCI_PLATFORM
If unsure, say N.
+config AHCI_DA850
+ tristate "DaVinci DA850 AHCI SATA support"
+ depends on ARCH_DAVINCI_DA850
+ help
+ This option enables support for the DaVinci DA850 SoC's
+ onboard AHCI SATA.
+
+ If unsure, say N.
+
+config AHCI_ST
+ tristate "ST AHCI SATA support"
+ depends on ARCH_STI
+ help
+ This option enables support for ST AHCI SATA controller.
+
+ If unsure, say N.
+
config AHCI_IMX
tristate "Freescale i.MX AHCI SATA support"
- depends on SATA_AHCI_PLATFORM && MFD_SYSCON
+ depends on MFD_SYSCON
help
This option enables support for the Freescale i.MX SoC's
onboard AHCI SATA.
If unsure, say N.
+config AHCI_SUNXI
+ tristate "Allwinner sunxi AHCI SATA support"
+ depends on ARCH_SUNXI
+ help
+ This option enables support for the Allwinner sunxi SoC's
+ onboard AHCI SATA.
+
+ If unsure, say N.
+
+config AHCI_XGENE
+ tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
+ depends on ARM64 || COMPILE_TEST
+ select PHY_XGENE
+ help
+ This option enables support for APM X-Gene SoC SATA host controller.
+
config SATA_FSL
tristate "Freescale 3.0Gbps SATA support"
depends on FSL_SOC
@@ -239,6 +272,7 @@ config SATA_DWC_VDEBUG
config SATA_HIGHBANK
tristate "Calxeda Highbank SATA support"
+ depends on ARCH_HIGHBANK || COMPILE_TEST
help
This option enables support for the Calxeda Highbank SoC's
onboard SATA.
@@ -247,6 +281,9 @@ config SATA_HIGHBANK
config SATA_MV
tristate "Marvell SATA support"
+ depends on PCI || ARCH_DOVE || ARCH_KIRKWOOD || ARCH_MV78XX0 || \
+ ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
+ select GENERIC_PHY
help
This option enables support for the Marvell Serial ATA family.
Currently supports 88SX[56]0[48][01] PCI(-X) chips,
@@ -272,6 +309,7 @@ config SATA_PROMISE
config SATA_RCAR
tristate "Renesas R-Car SATA support"
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
This option enables support for Renesas R-Car Serial ATA.
@@ -351,6 +389,7 @@ config PATA_AMD
config PATA_ARASAN_CF
tristate "ARASAN CompactFlash PATA Controller Support"
+ depends on ARCH_SPEAR13XX || COMPILE_TEST
depends on DMADEVICES
select DMA_ENGINE
help
@@ -402,7 +441,7 @@ config PATA_CMD64X
config PATA_CS5520
tristate "CS5510/5520 PATA support"
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
help
This option enables support for the Cyrix 5510/5520
companion chip used with the MediaGX/Geode processor family.
@@ -411,7 +450,7 @@ config PATA_CS5520
config PATA_CS5530
tristate "CS5530 PATA support"
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
help
This option enables support for the Cyrix/NatSemi/AMD CS5530
companion chip used with the MediaGX/Geode processor family.
@@ -420,7 +459,7 @@ config PATA_CS5530
config PATA_CS5535
tristate "CS5535 PATA support (Experimental)"
- depends on PCI && X86 && !X86_64
+ depends on PCI && X86_32
help
This option enables support for the NatSemi/AMD CS5535
companion chip used with the Geode processor family.
@@ -429,7 +468,7 @@ config PATA_CS5535
config PATA_CS5536
tristate "CS5536 PATA support"
- depends on PCI
+ depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
help
This option enables support for the AMD CS5536
companion chip used with the Geode LX processor family.
@@ -665,7 +704,7 @@ config PATA_RDC
config PATA_SC1200
tristate "SC1200 PATA support"
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
help
This option enables support for the NatSemi/AMD SC1200 SoC
companion chip used with the Geode processor family.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 46518c622460..44c8016e565c 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -4,13 +4,17 @@ obj-$(CONFIG_ATA) += libata.o
# non-SFF interface
obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
-obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
+obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
obj-$(CONFIG_SATA_FSL) += sata_fsl.o
obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
-obj-$(CONFIG_AHCI_IMX) += ahci_imx.o
+obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_XGENE) += ahci_xgene.o libahci.o libahci_platform.o
# SFF w/ custom DMA
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index fd665d919df2..b51605ac5974 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -36,7 +36,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dc2756fb6f33..5a0bf8ed649b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -35,7 +35,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -61,6 +60,7 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
+ board_ahci_noncq,
board_ahci_nosntf,
board_ahci_yes_fbs,
@@ -121,6 +121,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_noncq] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_nosntf] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
@@ -452,6 +459,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+ /*
+ * Samsung SSDs found on some macbooks. NCQ times out.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=60731
+ */
+ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
+
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -564,6 +577,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
bool online;
int rc;
@@ -574,7 +588,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
deadline, &online, NULL);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
@@ -589,6 +603,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
{
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
bool online;
@@ -604,7 +619,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
deadline, &online, NULL);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
/* The pseudo configuration device on SIMG4726 attached to
* ASUS P5W-DH Deluxe doesn't send signature FIS after
@@ -1151,13 +1166,13 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
struct ahci_host_priv *hpriv)
{
- int rc, nvec;
+ int nvec;
if (hpriv->flags & AHCI_HFLAG_NO_MSI)
goto intx;
- rc = pci_msi_vec_count(pdev);
- if (rc < 0)
+ nvec = pci_msi_vec_count(pdev);
+ if (nvec < 0)
goto intx;
/*
@@ -1165,19 +1180,19 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
* Message mode could be enforced. In this case assume that advantage
* of multipe MSIs is negated and use single MSI mode instead.
*/
- if (rc < n_ports)
+ if (nvec < n_ports)
goto single_msi;
- nvec = rc;
- rc = pci_enable_msi_block(pdev, nvec);
- if (rc)
+ nvec = pci_enable_msi_range(pdev, nvec, nvec);
+ if (nvec == -ENOSPC)
+ goto single_msi;
+ else if (nvec < 0)
goto intx;
return nvec;
single_msi:
- rc = pci_enable_msi(pdev);
- if (rc)
+ if (pci_enable_msi(pdev))
goto intx;
return 1;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 2289efdf8203..51af275b3388 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -37,6 +37,8 @@
#include <linux/clk.h>
#include <linux/libata.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
/* Enclosure Management Control */
#define EM_CTRL_MSG_TYPE 0x000f0000
@@ -51,6 +53,7 @@
enum {
AHCI_MAX_PORTS = 32,
+ AHCI_MAX_CLKS = 3,
AHCI_MAX_SG = 168, /* hardware max is 64K */
AHCI_DMA_BOUNDARY = 0xffffffff,
AHCI_MAX_CMDS = 32,
@@ -321,8 +324,17 @@ struct ahci_host_priv {
u32 em_loc; /* enclosure management location */
u32 em_buf_sz; /* EM buffer size in byte */
u32 em_msg_type; /* EM message type */
- struct clk *clk; /* Only for platforms supporting clk */
+ bool got_runtime_pm; /* Did we do pm_runtime_get? */
+ struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
+ struct regulator *target_pwr; /* Optional */
+ struct phy *phy; /* If platform uses phy */
void *plat_data; /* Other platform data */
+ /*
+ * Optional ahci_start_engine override, if not set this gets set to the
+ * default ahci_start_engine during ahci_save_initial_config, this can
+ * be overridden anytime before the host is activated.
+ */
+ void (*start_engine)(struct ata_port *ap);
};
extern int ahci_ignore_sss;
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
new file mode 100644
index 000000000000..2c83613ce2db
--- /dev/null
+++ b/drivers/ata/ahci_da850.c
@@ -0,0 +1,114 @@
+/*
+ * DaVinci DA850 AHCI SATA platform driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include "ahci.h"
+
+/* SATA PHY Control Register offset from AHCI base */
+#define SATA_P0PHYCR_REG 0x178
+
+#define SATA_PHY_MPY(x) ((x) << 0)
+#define SATA_PHY_LOS(x) ((x) << 6)
+#define SATA_PHY_RXCDR(x) ((x) << 10)
+#define SATA_PHY_RXEQ(x) ((x) << 13)
+#define SATA_PHY_TXSWING(x) ((x) << 19)
+#define SATA_PHY_ENPLL(x) ((x) << 31)
+
+/*
+ * The multiplier needed for 1.5GHz PLL output.
+ *
+ * NOTE: This is currently hardcoded to be suitable for 100MHz crystal
+ * frequency (which is used by DA850 EVM board) and may need to be changed
+ * if you would like to use this driver on some other board.
+ */
+#define DA850_SATA_CLK_MULTIPLIER 7
+
+static void da850_sata_init(struct device *dev, void __iomem *pwrdn_reg,
+ void __iomem *ahci_base)
+{
+ unsigned int val;
+
+ /* Enable SATA clock receiver */
+ val = readl(pwrdn_reg);
+ val &= ~BIT(0);
+ writel(val, pwrdn_reg);
+
+ val = SATA_PHY_MPY(DA850_SATA_CLK_MULTIPLIER + 1) | SATA_PHY_LOS(1) |
+ SATA_PHY_RXCDR(4) | SATA_PHY_RXEQ(1) | SATA_PHY_TXSWING(3) |
+ SATA_PHY_ENPLL(1);
+
+ writel(val, ahci_base + SATA_P0PHYCR_REG);
+}
+
+static const struct ata_port_info ahci_da850_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
+};
+
+static int ahci_da850_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_host_priv *hpriv;
+ struct resource *res;
+ void __iomem *pwrdn_reg;
+ int rc;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ goto disable_resources;
+
+ pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
+ if (!pwrdn_reg)
+ goto disable_resources;
+
+ da850_sata_init(dev, pwrdn_reg, hpriv->mmio);
+
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info, 0, 0);
+ if (rc)
+ goto disable_resources;
+
+ return 0;
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_da850_pm_ops, ahci_platform_suspend,
+ ahci_platform_resume);
+
+static struct platform_driver ahci_da850_driver = {
+ .probe = ahci_da850_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "ahci_da850",
+ .owner = THIS_MODULE,
+ .pm = &ahci_da850_pm_ops,
+ },
+};
+module_platform_driver(ahci_da850_driver);
+
+MODULE_DESCRIPTION("DaVinci DA850 AHCI SATA platform driver");
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index dd4d6f74d7bd..497c7abe1c7d 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -42,13 +42,7 @@ enum ahci_imx_type {
struct imx_ahci_priv {
struct platform_device *ahci_pdev;
enum ahci_imx_type type;
-
- /* i.MX53 clock */
- struct clk *sata_gate_clk;
- /* Common clock */
- struct clk *sata_ref_clk;
struct clk *ahb_clk;
-
struct regmap *gpr;
bool no_device;
bool first_time;
@@ -58,28 +52,52 @@ static int ahci_imx_hotplug;
module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
-static int imx_sata_clock_enable(struct device *dev)
+static void ahci_imx_host_stop(struct ata_host *host);
+
+static int imx_sata_enable(struct ahci_host_priv *hpriv)
{
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
int ret;
- if (imxpriv->type == AHCI_IMX53) {
- ret = clk_prepare_enable(imxpriv->sata_gate_clk);
- if (ret < 0) {
- dev_err(dev, "prepare-enable sata_gate clock err:%d\n",
- ret);
+ if (imxpriv->no_device)
+ return 0;
+
+ if (hpriv->target_pwr) {
+ ret = regulator_enable(hpriv->target_pwr);
+ if (ret)
return ret;
- }
}
- ret = clk_prepare_enable(imxpriv->sata_ref_clk);
- if (ret < 0) {
- dev_err(dev, "prepare-enable sata_ref clock err:%d\n",
- ret);
- goto clk_err;
- }
+ ret = ahci_platform_enable_clks(hpriv);
+ if (ret < 0)
+ goto disable_regulator;
if (imxpriv->type == AHCI_IMX6Q) {
+ /*
+ * set PHY Paremeters, two steps to configure the GPR13,
+ * one write for rest of parameters, mask of first write
+ * is 0x07ffffff, and the other one write for setting
+ * the mpll_clk_en.
+ */
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
+ IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
+ IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
+ IMX6Q_GPR13_SATA_SPD_MODE_MASK |
+ IMX6Q_GPR13_SATA_MPLL_SS_EN |
+ IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
+ IMX6Q_GPR13_SATA_TX_BOOST_MASK |
+ IMX6Q_GPR13_SATA_TX_LVL_MASK |
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN |
+ IMX6Q_GPR13_SATA_TX_EDGE_RATE,
+ IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
+ IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
+ IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
+ IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
+ IMX6Q_GPR13_SATA_MPLL_SS_EN |
+ IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
+ IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
+ IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
IMX6Q_GPR13_SATA_MPLL_CLK_EN,
IMX6Q_GPR13_SATA_MPLL_CLK_EN);
@@ -89,15 +107,19 @@ static int imx_sata_clock_enable(struct device *dev)
return 0;
-clk_err:
- if (imxpriv->type == AHCI_IMX53)
- clk_disable_unprepare(imxpriv->sata_gate_clk);
+disable_regulator:
+ if (hpriv->target_pwr)
+ regulator_disable(hpriv->target_pwr);
+
return ret;
}
-static void imx_sata_clock_disable(struct device *dev)
+static void imx_sata_disable(struct ahci_host_priv *hpriv)
{
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+
+ if (imxpriv->no_device)
+ return;
if (imxpriv->type == AHCI_IMX6Q) {
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
@@ -105,10 +127,10 @@ static void imx_sata_clock_disable(struct device *dev)
!IMX6Q_GPR13_SATA_MPLL_CLK_EN);
}
- clk_disable_unprepare(imxpriv->sata_ref_clk);
+ ahci_platform_disable_clks(hpriv);
- if (imxpriv->type == AHCI_IMX53)
- clk_disable_unprepare(imxpriv->sata_gate_clk);
+ if (hpriv->target_pwr)
+ regulator_disable(hpriv->target_pwr);
}
static void ahci_imx_error_handler(struct ata_port *ap)
@@ -118,7 +140,7 @@ static void ahci_imx_error_handler(struct ata_port *ap)
struct ata_host *host = dev_get_drvdata(ap->dev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
ahci_error_handler(ap);
@@ -136,7 +158,7 @@ static void ahci_imx_error_handler(struct ata_port *ap)
*/
reg_val = readl(mmio + PORT_PHY_CTL);
writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
- imx_sata_clock_disable(ap->dev);
+ imx_sata_disable(hpriv);
imxpriv->no_device = true;
}
@@ -144,7 +166,9 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+ struct ata_host *host = dev_get_drvdata(ap->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
int ret = -EIO;
if (imxpriv->type == AHCI_IMX53)
@@ -156,7 +180,8 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
}
static struct ata_port_operations ahci_imx_ops = {
- .inherits = &ahci_platform_ops,
+ .inherits = &ahci_ops,
+ .host_stop = ahci_imx_host_stop,
.error_handler = ahci_imx_error_handler,
.softreset = ahci_imx_softreset,
};
@@ -168,79 +193,6 @@ static const struct ata_port_info ahci_imx_port_info = {
.port_ops = &ahci_imx_ops,
};
-static int imx_sata_init(struct device *dev, void __iomem *mmio)
-{
- int ret = 0;
- unsigned int reg_val;
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
-
- ret = imx_sata_clock_enable(dev);
- if (ret < 0)
- return ret;
-
- /*
- * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
- * and IP vendor specific register HOST_TIMER1MS.
- * Configure CAP_SSS (support stagered spin up).
- * Implement the port0.
- * Get the ahb clock rate, and configure the TIMER1MS register.
- */
- reg_val = readl(mmio + HOST_CAP);
- if (!(reg_val & HOST_CAP_SSS)) {
- reg_val |= HOST_CAP_SSS;
- writel(reg_val, mmio + HOST_CAP);
- }
- reg_val = readl(mmio + HOST_PORTS_IMPL);
- if (!(reg_val & 0x1)) {
- reg_val |= 0x1;
- writel(reg_val, mmio + HOST_PORTS_IMPL);
- }
-
- reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
- writel(reg_val, mmio + HOST_TIMER1MS);
-
- return 0;
-}
-
-static void imx_sata_exit(struct device *dev)
-{
- imx_sata_clock_disable(dev);
-}
-
-static int imx_ahci_suspend(struct device *dev)
-{
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
-
- /*
- * If no_device is set, The CLKs had been gated off in the
- * initialization so don't do it again here.
- */
- if (!imxpriv->no_device)
- imx_sata_clock_disable(dev);
-
- return 0;
-}
-
-static int imx_ahci_resume(struct device *dev)
-{
- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
- int ret = 0;
-
- if (!imxpriv->no_device)
- ret = imx_sata_clock_enable(dev);
-
- return ret;
-}
-
-static struct ahci_platform_data imx_sata_pdata = {
- .init = imx_sata_init,
- .exit = imx_sata_exit,
- .ata_port_info = &ahci_imx_port_info,
- .suspend = imx_ahci_suspend,
- .resume = imx_ahci_resume,
-
-};
-
static const struct of_device_id imx_ahci_of_match[] = {
{ .compatible = "fsl,imx53-ahci", .data = (void *)AHCI_IMX53 },
{ .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
@@ -251,151 +203,124 @@ MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
static int imx_ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *mem, *irq, res[2];
const struct of_device_id *of_id;
- enum ahci_imx_type type;
- const struct ahci_platform_data *pdata = NULL;
+ struct ahci_host_priv *hpriv;
struct imx_ahci_priv *imxpriv;
- struct device *ahci_dev;
- struct platform_device *ahci_pdev;
+ unsigned int reg_val;
int ret;
of_id = of_match_device(imx_ahci_of_match, dev);
if (!of_id)
return -EINVAL;
- type = (enum ahci_imx_type)of_id->data;
- pdata = &imx_sata_pdata;
-
imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
- if (!imxpriv) {
- dev_err(dev, "can't alloc ahci_host_priv\n");
+ if (!imxpriv)
return -ENOMEM;
- }
-
- ahci_pdev = platform_device_alloc("ahci", -1);
- if (!ahci_pdev)
- return -ENODEV;
-
- ahci_dev = &ahci_pdev->dev;
- ahci_dev->parent = dev;
imxpriv->no_device = false;
imxpriv->first_time = true;
- imxpriv->type = type;
-
+ imxpriv->type = (enum ahci_imx_type)of_id->data;
imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(imxpriv->ahb_clk)) {
dev_err(dev, "can't get ahb clock.\n");
- ret = PTR_ERR(imxpriv->ahb_clk);
- goto err_out;
+ return PTR_ERR(imxpriv->ahb_clk);
}
- if (type == AHCI_IMX53) {
- imxpriv->sata_gate_clk = devm_clk_get(dev, "sata_gate");
- if (IS_ERR(imxpriv->sata_gate_clk)) {
- dev_err(dev, "can't get sata_gate clock.\n");
- ret = PTR_ERR(imxpriv->sata_gate_clk);
- goto err_out;
+ if (imxpriv->type == AHCI_IMX6Q) {
+ imxpriv->gpr = syscon_regmap_lookup_by_compatible(
+ "fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imxpriv->gpr)) {
+ dev_err(dev,
+ "failed to find fsl,imx6q-iomux-gpr regmap\n");
+ return PTR_ERR(imxpriv->gpr);
}
}
- imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
- if (IS_ERR(imxpriv->sata_ref_clk)) {
- dev_err(dev, "can't get sata_ref clock.\n");
- ret = PTR_ERR(imxpriv->sata_ref_clk);
- goto err_out;
- }
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ hpriv->plat_data = imxpriv;
- imxpriv->ahci_pdev = ahci_pdev;
- platform_set_drvdata(pdev, imxpriv);
+ ret = imx_sata_enable(hpriv);
+ if (ret)
+ return ret;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!mem || !irq) {
- dev_err(dev, "no mmio/irq resource\n");
- ret = -ENOMEM;
- goto err_out;
+ /*
+ * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
+ * and IP vendor specific register HOST_TIMER1MS.
+ * Configure CAP_SSS (support stagered spin up).
+ * Implement the port0.
+ * Get the ahb clock rate, and configure the TIMER1MS register.
+ */
+ reg_val = readl(hpriv->mmio + HOST_CAP);
+ if (!(reg_val & HOST_CAP_SSS)) {
+ reg_val |= HOST_CAP_SSS;
+ writel(reg_val, hpriv->mmio + HOST_CAP);
+ }
+ reg_val = readl(hpriv->mmio + HOST_PORTS_IMPL);
+ if (!(reg_val & 0x1)) {
+ reg_val |= 0x1;
+ writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL);
}
- res[0] = *mem;
- res[1] = *irq;
+ reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
+ writel(reg_val, hpriv->mmio + HOST_TIMER1MS);
- ahci_dev->coherent_dma_mask = DMA_BIT_MASK(32);
- ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask;
- ahci_dev->of_node = dev->of_node;
+ ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0);
+ if (ret)
+ imx_sata_disable(hpriv);
- if (type == AHCI_IMX6Q) {
- imxpriv->gpr = syscon_regmap_lookup_by_compatible(
- "fsl,imx6q-iomuxc-gpr");
- if (IS_ERR(imxpriv->gpr)) {
- dev_err(dev,
- "failed to find fsl,imx6q-iomux-gpr regmap\n");
- ret = PTR_ERR(imxpriv->gpr);
- goto err_out;
- }
+ return ret;
+}
- /*
- * Set PHY Paremeters, two steps to configure the GPR13,
- * one write for rest of parameters, mask of first write
- * is 0x07fffffe, and the other one write for setting
- * the mpll_clk_en happens in imx_sata_clock_enable().
- */
- regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
- IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
- IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
- IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
- IMX6Q_GPR13_SATA_SPD_MODE_MASK |
- IMX6Q_GPR13_SATA_MPLL_SS_EN |
- IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
- IMX6Q_GPR13_SATA_TX_BOOST_MASK |
- IMX6Q_GPR13_SATA_TX_LVL_MASK |
- IMX6Q_GPR13_SATA_MPLL_CLK_EN |
- IMX6Q_GPR13_SATA_TX_EDGE_RATE,
- IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
- IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
- IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
- IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
- IMX6Q_GPR13_SATA_MPLL_SS_EN |
- IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
- IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
- IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
- }
+static void ahci_imx_host_stop(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
- ret = platform_device_add_resources(ahci_pdev, res, 2);
- if (ret)
- goto err_out;
+ imx_sata_disable(hpriv);
+}
- ret = platform_device_add_data(ahci_pdev, pdata, sizeof(*pdata));
- if (ret)
- goto err_out;
+#ifdef CONFIG_PM_SLEEP
+static int imx_ahci_suspend(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int ret;
- ret = platform_device_add(ahci_pdev);
- if (ret) {
-err_out:
- platform_device_put(ahci_pdev);
+ ret = ahci_platform_suspend_host(dev);
+ if (ret)
return ret;
- }
+
+ imx_sata_disable(hpriv);
return 0;
}
-static int imx_ahci_remove(struct platform_device *pdev)
+static int imx_ahci_resume(struct device *dev)
{
- struct imx_ahci_priv *imxpriv = platform_get_drvdata(pdev);
- struct platform_device *ahci_pdev = imxpriv->ahci_pdev;
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int ret;
- platform_device_unregister(ahci_pdev);
- return 0;
+ ret = imx_sata_enable(hpriv);
+ if (ret)
+ return ret;
+
+ return ahci_platform_resume_host(dev);
}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume);
static struct platform_driver imx_ahci_driver = {
.probe = imx_ahci_probe,
- .remove = imx_ahci_remove,
+ .remove = ata_platform_remove_one,
.driver = {
.name = "ahci-imx",
.owner = THIS_MODULE,
.of_match_table = imx_ahci_of_match,
+ .pm = &ahci_imx_pm_ops,
},
};
module_platform_driver(imx_ahci_driver);
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 4b231baceb09..ef67e79944f9 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -12,135 +12,36 @@
* any later version.
*/
-#include <linux/clk.h>
#include <linux/kernel.h>
-#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/pm.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
#include "ahci.h"
-static void ahci_host_stop(struct ata_host *host);
-
-enum ahci_type {
- AHCI, /* standard platform ahci */
- IMX53_AHCI, /* ahci on i.mx53 */
- STRICT_AHCI, /* delayed DMA engine start */
-};
-
-static struct platform_device_id ahci_devtype[] = {
- {
- .name = "ahci",
- .driver_data = AHCI,
- }, {
- .name = "imx53-ahci",
- .driver_data = IMX53_AHCI,
- }, {
- .name = "strict-ahci",
- .driver_data = STRICT_AHCI,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, ahci_devtype);
-
-struct ata_port_operations ahci_platform_ops = {
- .inherits = &ahci_ops,
- .host_stop = ahci_host_stop,
-};
-EXPORT_SYMBOL_GPL(ahci_platform_ops);
-
-static struct ata_port_operations ahci_platform_retry_srst_ops = {
- .inherits = &ahci_pmp_retry_srst_ops,
- .host_stop = ahci_host_stop,
-};
-
-static const struct ata_port_info ahci_port_info[] = {
- /* by features */
- [AHCI] = {
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_platform_ops,
- },
- [IMX53_AHCI] = {
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_platform_retry_srst_ops,
- },
- [STRICT_AHCI] = {
- AHCI_HFLAGS (AHCI_HFLAG_DELAY_ENGINE),
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_platform_ops,
- },
-};
-
-static struct scsi_host_template ahci_platform_sht = {
- AHCI_SHT("ahci_platform"),
+static const struct ata_port_info ahci_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
};
static int ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
- const struct platform_device_id *id = platform_get_device_id(pdev);
- struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
- const struct ata_port_info *ppi[] = { &pi, NULL };
struct ahci_host_priv *hpriv;
- struct ata_host *host;
- struct resource *mem;
- int irq;
- int n_ports;
- int i;
int rc;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(dev, "no mmio space\n");
- return -EINVAL;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "no irq\n");
- return -EINVAL;
- }
-
- if (pdata && pdata->ata_port_info)
- pi = *pdata->ata_port_info;
-
- hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
- if (!hpriv) {
- dev_err(dev, "can't alloc ahci_host_priv\n");
- return -ENOMEM;
- }
-
- hpriv->flags |= (unsigned long)pi.private_data;
-
- hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
- if (!hpriv->mmio) {
- dev_err(dev, "can't map %pR\n", mem);
- return -ENOMEM;
- }
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
- hpriv->clk = clk_get(dev, NULL);
- if (IS_ERR(hpriv->clk)) {
- dev_err(dev, "can't get clock\n");
- } else {
- rc = clk_prepare_enable(hpriv->clk);
- if (rc) {
- dev_err(dev, "clock prepare enable failed");
- goto free_clk;
- }
- }
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
/*
* Some platforms might need to prepare for mmio region access,
@@ -151,69 +52,10 @@ static int ahci_probe(struct platform_device *pdev)
if (pdata && pdata->init) {
rc = pdata->init(dev, hpriv->mmio);
if (rc)
- goto disable_unprepare_clk;
- }
-
- ahci_save_initial_config(dev, hpriv,
- pdata ? pdata->force_port_map : 0,
- pdata ? pdata->mask_port_map : 0);
-
- /* prepare host */
- if (hpriv->cap & HOST_CAP_NCQ)
- pi.flags |= ATA_FLAG_NCQ;
-
- if (hpriv->cap & HOST_CAP_PMP)
- pi.flags |= ATA_FLAG_PMP;
-
- ahci_set_em_messages(hpriv, &pi);
-
- /* CAP.NP sometimes indicate the index of the last enabled
- * port, at other times, that of the last possible port, so
- * determining the maximum port number requires looking at
- * both CAP.NP and port_map.
- */
- n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
-
- host = ata_host_alloc_pinfo(dev, ppi, n_ports);
- if (!host) {
- rc = -ENOMEM;
- goto pdata_exit;
- }
-
- host->private_data = hpriv;
-
- if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
- host->flags |= ATA_HOST_PARALLEL_SCAN;
- else
- dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
-
- if (pi.flags & ATA_FLAG_EM)
- ahci_reset_em(host);
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- ata_port_desc(ap, "mmio %pR", mem);
- ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
-
- /* set enclosure management message type */
- if (ap->flags & ATA_FLAG_EM)
- ap->em_message_type = hpriv->em_msg_type;
-
- /* disabled/not-implemented port */
- if (!(hpriv->port_map & (1 << i)))
- ap->ops = &ata_dummy_port_ops;
+ goto disable_resources;
}
- rc = ahci_reset_controller(host);
- if (rc)
- goto pdata_exit;
-
- ahci_init_controller(host);
- ahci_print_info(host, "platform");
-
- rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
- &ahci_platform_sht);
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, 0, 0);
if (rc)
goto pdata_exit;
@@ -221,115 +63,19 @@ static int ahci_probe(struct platform_device *pdev)
pdata_exit:
if (pdata && pdata->exit)
pdata->exit(dev);
-disable_unprepare_clk:
- if (!IS_ERR(hpriv->clk))
- clk_disable_unprepare(hpriv->clk);
-free_clk:
- if (!IS_ERR(hpriv->clk))
- clk_put(hpriv->clk);
- return rc;
-}
-
-static void ahci_host_stop(struct ata_host *host)
-{
- struct device *dev = host->dev;
- struct ahci_platform_data *pdata = dev_get_platdata(dev);
- struct ahci_host_priv *hpriv = host->private_data;
-
- if (pdata && pdata->exit)
- pdata->exit(dev);
-
- if (!IS_ERR(hpriv->clk)) {
- clk_disable_unprepare(hpriv->clk);
- clk_put(hpriv->clk);
- }
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ahci_suspend(struct device *dev)
-{
- struct ahci_platform_data *pdata = dev_get_platdata(dev);
- struct ata_host *host = dev_get_drvdata(dev);
- struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = hpriv->mmio;
- u32 ctl;
- int rc;
-
- if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
- dev_err(dev, "firmware update required for suspend/resume\n");
- return -EIO;
- }
-
- /*
- * AHCI spec rev1.1 section 8.3.3:
- * Software must disable interrupts prior to requesting a
- * transition of the HBA to D3 state.
- */
- ctl = readl(mmio + HOST_CTL);
- ctl &= ~HOST_IRQ_EN;
- writel(ctl, mmio + HOST_CTL);
- readl(mmio + HOST_CTL); /* flush */
-
- rc = ata_host_suspend(host, PMSG_SUSPEND);
- if (rc)
- return rc;
-
- if (pdata && pdata->suspend)
- return pdata->suspend(dev);
-
- if (!IS_ERR(hpriv->clk))
- clk_disable_unprepare(hpriv->clk);
-
- return 0;
-}
-
-static int ahci_resume(struct device *dev)
-{
- struct ahci_platform_data *pdata = dev_get_platdata(dev);
- struct ata_host *host = dev_get_drvdata(dev);
- struct ahci_host_priv *hpriv = host->private_data;
- int rc;
-
- if (!IS_ERR(hpriv->clk)) {
- rc = clk_prepare_enable(hpriv->clk);
- if (rc) {
- dev_err(dev, "clock prepare enable failed");
- return rc;
- }
- }
-
- if (pdata && pdata->resume) {
- rc = pdata->resume(dev);
- if (rc)
- goto disable_unprepare_clk;
- }
-
- if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
- rc = ahci_reset_controller(host);
- if (rc)
- goto disable_unprepare_clk;
-
- ahci_init_controller(host);
- }
-
- ata_host_resume(host);
-
- return 0;
-
-disable_unprepare_clk:
- if (!IS_ERR(hpriv->clk))
- clk_disable_unprepare(hpriv->clk);
-
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
return rc;
}
-#endif
-static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+ ahci_platform_resume);
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "snps,spear-ahci", },
{ .compatible = "snps,exynos5440-ahci", },
{ .compatible = "ibm,476gtr-ahci", },
+ { .compatible = "snps,dwc-ahci", },
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
@@ -343,7 +89,6 @@ static struct platform_driver ahci_driver = {
.of_match_table = ahci_of_match,
.pm = &ahci_pm_ops,
},
- .id_table = ahci_devtype,
};
module_platform_driver(ahci_driver);
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
new file mode 100644
index 000000000000..633222226c19
--- /dev/null
+++ b/drivers/ata/ahci_st.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2012 STMicroelectronics Limited
+ *
+ * Authors: Francesco Virlinzi <francesco.virlinzi@st.com>
+ * Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/ahci_platform.h>
+#include <linux/libata.h>
+#include <linux/reset.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+
+#include "ahci.h"
+
+#define ST_AHCI_OOBR 0xbc
+#define ST_AHCI_OOBR_WE BIT(31)
+#define ST_AHCI_OOBR_CWMIN_SHIFT 24
+#define ST_AHCI_OOBR_CWMAX_SHIFT 16
+#define ST_AHCI_OOBR_CIMIN_SHIFT 8
+#define ST_AHCI_OOBR_CIMAX_SHIFT 0
+
+struct st_ahci_drv_data {
+ struct platform_device *ahci;
+ struct reset_control *pwr;
+ struct reset_control *sw_rst;
+ struct reset_control *pwr_rst;
+ struct ahci_host_priv *hpriv;
+};
+
+static void st_ahci_configure_oob(void __iomem *mmio)
+{
+ unsigned long old_val, new_val;
+
+ new_val = (0x02 << ST_AHCI_OOBR_CWMIN_SHIFT) |
+ (0x04 << ST_AHCI_OOBR_CWMAX_SHIFT) |
+ (0x08 << ST_AHCI_OOBR_CIMIN_SHIFT) |
+ (0x0C << ST_AHCI_OOBR_CIMAX_SHIFT);
+
+ old_val = readl(mmio + ST_AHCI_OOBR);
+ writel(old_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR);
+ writel(new_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR);
+ writel(new_val, mmio + ST_AHCI_OOBR);
+}
+
+static int st_ahci_deassert_resets(struct device *dev)
+{
+ struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+ int err;
+
+ if (drv_data->pwr) {
+ err = reset_control_deassert(drv_data->pwr);
+ if (err) {
+ dev_err(dev, "unable to bring out of pwrdwn\n");
+ return err;
+ }
+ }
+
+ st_ahci_configure_oob(drv_data->hpriv->mmio);
+
+ if (drv_data->sw_rst) {
+ err = reset_control_deassert(drv_data->sw_rst);
+ if (err) {
+ dev_err(dev, "unable to bring out of sw-rst\n");
+ return err;
+ }
+ }
+
+ if (drv_data->pwr_rst) {
+ err = reset_control_deassert(drv_data->pwr_rst);
+ if (err) {
+ dev_err(dev, "unable to bring out of pwr-rst\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void st_ahci_host_stop(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct device *dev = host->dev;
+ struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+ int err;
+
+ if (drv_data->pwr) {
+ err = reset_control_assert(drv_data->pwr);
+ if (err)
+ dev_err(dev, "unable to pwrdwn\n");
+ }
+
+ ahci_platform_disable_resources(hpriv);
+}
+
+static int st_ahci_probe_resets(struct platform_device *pdev)
+{
+ struct st_ahci_drv_data *drv_data = platform_get_drvdata(pdev);
+
+ drv_data->pwr = devm_reset_control_get(&pdev->dev, "pwr-dwn");
+ if (IS_ERR(drv_data->pwr)) {
+ dev_info(&pdev->dev, "power reset control not defined\n");
+ drv_data->pwr = NULL;
+ }
+
+ drv_data->sw_rst = devm_reset_control_get(&pdev->dev, "sw-rst");
+ if (IS_ERR(drv_data->sw_rst)) {
+ dev_info(&pdev->dev, "soft reset control not defined\n");
+ drv_data->sw_rst = NULL;
+ }
+
+ drv_data->pwr_rst = devm_reset_control_get(&pdev->dev, "pwr-rst");
+ if (IS_ERR(drv_data->pwr_rst)) {
+ dev_dbg(&pdev->dev, "power soft reset control not defined\n");
+ drv_data->pwr_rst = NULL;
+ }
+
+ return st_ahci_deassert_resets(&pdev->dev);
+}
+
+static struct ata_port_operations st_ahci_port_ops = {
+ .inherits = &ahci_platform_ops,
+ .host_stop = st_ahci_host_stop,
+};
+
+static const struct ata_port_info st_ahci_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &st_ahci_port_ops,
+};
+
+static int st_ahci_probe(struct platform_device *pdev)
+{
+ struct st_ahci_drv_data *drv_data;
+ struct ahci_host_priv *hpriv;
+ int err;
+
+ drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, drv_data);
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ drv_data->hpriv = hpriv;
+
+ err = st_ahci_probe_resets(pdev);
+ if (err)
+ return err;
+
+ err = ahci_platform_enable_resources(hpriv);
+ if (err)
+ return err;
+
+ err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, 0, 0);
+ if (err) {
+ ahci_platform_disable_resources(hpriv);
+ return err;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int st_ahci_suspend(struct device *dev)
+{
+ struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = drv_data->hpriv;
+ int err;
+
+ err = ahci_platform_suspend_host(dev);
+ if (err)
+ return err;
+
+ if (drv_data->pwr) {
+ err = reset_control_assert(drv_data->pwr);
+ if (err) {
+ dev_err(dev, "unable to pwrdwn");
+ return err;
+ }
+ }
+
+ ahci_platform_disable_resources(hpriv);
+
+ return 0;
+}
+
+static int st_ahci_resume(struct device *dev)
+{
+ struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = drv_data->hpriv;
+ int err;
+
+ err = ahci_platform_enable_resources(hpriv);
+ if (err)
+ return err;
+
+ err = st_ahci_deassert_resets(dev);
+ if (err) {
+ ahci_platform_disable_resources(hpriv);
+ return err;
+ }
+
+ return ahci_platform_resume_host(dev);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
+
+static struct of_device_id st_ahci_match[] = {
+ { .compatible = "st,ahci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_ahci_match);
+
+static struct platform_driver st_ahci_driver = {
+ .driver = {
+ .name = "st_ahci",
+ .owner = THIS_MODULE,
+ .pm = &st_ahci_pm_ops,
+ .of_match_table = of_match_ptr(st_ahci_match),
+ },
+ .probe = st_ahci_probe,
+ .remove = ata_platform_remove_one,
+};
+module_platform_driver(st_ahci_driver);
+
+MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@st.com>");
+MODULE_AUTHOR("Francesco Virlinzi <francesco.virlinzi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SATA AHCI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
new file mode 100644
index 000000000000..42d3f64e74b3
--- /dev/null
+++ b/drivers/ata/ahci_sunxi.c
@@ -0,0 +1,249 @@
+/*
+ * Allwinner sunxi AHCI SATA platform driver
+ * Copyright 2013 Olliver Schinagl <oliver@schinagl.nl>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ * Based on code from Allwinner Technology Co., Ltd. <www.allwinnertech.com>,
+ * Daniel Wang <danielwang@allwinnertech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include "ahci.h"
+
+#define AHCI_BISTAFR 0x00a0
+#define AHCI_BISTCR 0x00a4
+#define AHCI_BISTFCTR 0x00a8
+#define AHCI_BISTSR 0x00ac
+#define AHCI_BISTDECR 0x00b0
+#define AHCI_DIAGNR0 0x00b4
+#define AHCI_DIAGNR1 0x00b8
+#define AHCI_OOBR 0x00bc
+#define AHCI_PHYCS0R 0x00c0
+#define AHCI_PHYCS1R 0x00c4
+#define AHCI_PHYCS2R 0x00c8
+#define AHCI_TIMER1MS 0x00e0
+#define AHCI_GPARAM1R 0x00e8
+#define AHCI_GPARAM2R 0x00ec
+#define AHCI_PPARAMR 0x00f0
+#define AHCI_TESTR 0x00f4
+#define AHCI_VERSIONR 0x00f8
+#define AHCI_IDR 0x00fc
+#define AHCI_RWCR 0x00fc
+#define AHCI_P0DMACR 0x0170
+#define AHCI_P0PHYCR 0x0178
+#define AHCI_P0PHYSR 0x017c
+
+static void sunxi_clrbits(void __iomem *reg, u32 clr_val)
+{
+ u32 reg_val;
+
+ reg_val = readl(reg);
+ reg_val &= ~(clr_val);
+ writel(reg_val, reg);
+}
+
+static void sunxi_setbits(void __iomem *reg, u32 set_val)
+{
+ u32 reg_val;
+
+ reg_val = readl(reg);
+ reg_val |= set_val;
+ writel(reg_val, reg);
+}
+
+static void sunxi_clrsetbits(void __iomem *reg, u32 clr_val, u32 set_val)
+{
+ u32 reg_val;
+
+ reg_val = readl(reg);
+ reg_val &= ~(clr_val);
+ reg_val |= set_val;
+ writel(reg_val, reg);
+}
+
+static u32 sunxi_getbits(void __iomem *reg, u8 mask, u8 shift)
+{
+ return (readl(reg) >> shift) & mask;
+}
+
+static int ahci_sunxi_phy_init(struct device *dev, void __iomem *reg_base)
+{
+ u32 reg_val;
+ int timeout;
+
+ /* This magic is from the original code */
+ writel(0, reg_base + AHCI_RWCR);
+ msleep(5);
+
+ sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(19));
+ sunxi_clrsetbits(reg_base + AHCI_PHYCS0R,
+ (0x7 << 24),
+ (0x5 << 24) | BIT(23) | BIT(18));
+ sunxi_clrsetbits(reg_base + AHCI_PHYCS1R,
+ (0x3 << 16) | (0x1f << 8) | (0x3 << 6),
+ (0x2 << 16) | (0x6 << 8) | (0x2 << 6));
+ sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(28) | BIT(15));
+ sunxi_clrbits(reg_base + AHCI_PHYCS1R, BIT(19));
+ sunxi_clrsetbits(reg_base + AHCI_PHYCS0R,
+ (0x7 << 20), (0x3 << 20));
+ sunxi_clrsetbits(reg_base + AHCI_PHYCS2R,
+ (0x1f << 5), (0x19 << 5));
+ msleep(5);
+
+ sunxi_setbits(reg_base + AHCI_PHYCS0R, (0x1 << 19));
+
+ timeout = 250; /* Power up takes aprox 50 us */
+ do {
+ reg_val = sunxi_getbits(reg_base + AHCI_PHYCS0R, 0x7, 28);
+ if (reg_val == 0x02)
+ break;
+
+ if (--timeout == 0) {
+ dev_err(dev, "PHY power up failed.\n");
+ return -EIO;
+ }
+ udelay(1);
+ } while (1);
+
+ sunxi_setbits(reg_base + AHCI_PHYCS2R, (0x1 << 24));
+
+ timeout = 100; /* Calibration takes aprox 10 us */
+ do {
+ reg_val = sunxi_getbits(reg_base + AHCI_PHYCS2R, 0x1, 24);
+ if (reg_val == 0x00)
+ break;
+
+ if (--timeout == 0) {
+ dev_err(dev, "PHY calibration failed.\n");
+ return -EIO;
+ }
+ udelay(1);
+ } while (1);
+
+ msleep(15);
+
+ writel(0x7, reg_base + AHCI_RWCR);
+
+ return 0;
+}
+
+static void ahci_sunxi_start_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ /* Setup DMA before DMA start */
+ sunxi_clrsetbits(hpriv->mmio + AHCI_P0DMACR, 0x0000ff00, 0x00004400);
+
+ /* Start DMA */
+ sunxi_setbits(port_mmio + PORT_CMD, PORT_CMD_START);
+}
+
+static const struct ata_port_info ahci_sunxi_port_info = {
+ AHCI_HFLAGS(AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
+ AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ),
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
+};
+
+static int ahci_sunxi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_host_priv *hpriv;
+ int rc;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ hpriv->start_engine = ahci_sunxi_start_engine;
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_sunxi_phy_init(dev, hpriv->mmio);
+ if (rc)
+ goto disable_resources;
+
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info, 0, 0);
+ if (rc)
+ goto disable_resources;
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_sunxi_resume(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_sunxi_phy_init(dev, hpriv->mmio);
+ if (rc)
+ goto disable_resources;
+
+ rc = ahci_platform_resume_host(dev);
+ if (rc)
+ goto disable_resources;
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return rc;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_sunxi_pm_ops, ahci_platform_suspend,
+ ahci_sunxi_resume);
+
+static const struct of_device_id ahci_sunxi_of_match[] = {
+ { .compatible = "allwinner,sun4i-a10-ahci", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
+
+static struct platform_driver ahci_sunxi_driver = {
+ .probe = ahci_sunxi_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "ahci-sunxi",
+ .owner = THIS_MODULE,
+ .of_match_table = ahci_sunxi_of_match,
+ .pm = &ahci_sunxi_pm_ops,
+ },
+};
+module_platform_driver(ahci_sunxi_driver);
+
+MODULE_DESCRIPTION("Allwinner sunxi AHCI SATA driver");
+MODULE_AUTHOR("Olliver Schinagl <oliver@schinagl.nl>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
new file mode 100644
index 000000000000..77c89bf171f1
--- /dev/null
+++ b/drivers/ata/ahci_xgene.c
@@ -0,0 +1,486 @@
+/*
+ * AppliedMicro X-Gene SoC SATA Host Controller Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Loc Ho <lho@apm.com>
+ * Tuan Phan <tphan@apm.com>
+ * Suman Tripathi <stripathi@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * NOTE: PM support is not currently available.
+ *
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/ahci_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/phy/phy.h>
+#include "ahci.h"
+
+/* Max # of disk per a controller */
+#define MAX_AHCI_CHN_PERCTR 2
+
+/* MUX CSR */
+#define SATA_ENET_CONFIG_REG 0x00000000
+#define CFG_SATA_ENET_SELECT_MASK 0x00000001
+
+/* SATA core host controller CSR */
+#define SLVRDERRATTRIBUTES 0x00000000
+#define SLVWRERRATTRIBUTES 0x00000004
+#define MSTRDERRATTRIBUTES 0x00000008
+#define MSTWRERRATTRIBUTES 0x0000000c
+#define BUSCTLREG 0x00000014
+#define IOFMSTRWAUX 0x00000018
+#define INTSTATUSMASK 0x0000002c
+#define ERRINTSTATUS 0x00000030
+#define ERRINTSTATUSMASK 0x00000034
+
+/* SATA host AHCI CSR */
+#define PORTCFG 0x000000a4
+#define PORTADDR_SET(dst, src) \
+ (((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
+#define PORTPHY1CFG 0x000000a8
+#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
+ (((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
+#define PORTPHY2CFG 0x000000ac
+#define PORTPHY3CFG 0x000000b0
+#define PORTPHY4CFG 0x000000b4
+#define PORTPHY5CFG 0x000000b8
+#define SCTL0 0x0000012C
+#define PORTPHY5CFG_RTCHG_SET(dst, src) \
+ (((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
+#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
+ (((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
+#define PORTAXICFG 0x000000bc
+#define PORTAXICFG_OUTTRANS_SET(dst, src) \
+ (((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
+
+/* SATA host controller AXI CSR */
+#define INT_SLV_TMOMASK 0x00000010
+
+/* SATA diagnostic CSR */
+#define CFG_MEM_RAM_SHUTDOWN 0x00000070
+#define BLOCK_MEM_RDY 0x00000074
+
+struct xgene_ahci_context {
+ struct ahci_host_priv *hpriv;
+ struct device *dev;
+ void __iomem *csr_core; /* Core CSR address of IP */
+ void __iomem *csr_diag; /* Diag CSR address of IP */
+ void __iomem *csr_axi; /* AXI CSR address of IP */
+ void __iomem *csr_mux; /* MUX CSR address of IP */
+};
+
+static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
+{
+ dev_dbg(ctx->dev, "Release memory from shutdown\n");
+ writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
+ readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
+ msleep(1); /* reset may take up to 1ms */
+ if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
+ dev_err(ctx->dev, "failed to release memory from shutdown\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/**
+ * xgene_ahci_read_id - Read ID data from the specified device
+ * @dev: device
+ * @tf: proposed taskfile
+ * @id: data buffer
+ *
+ * This custom read ID function is required due to the fact that the HW
+ * does not support DEVSLP and the controller state machine may get stuck
+ * after processing the ID query command.
+ */
+static unsigned int xgene_ahci_read_id(struct ata_device *dev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ u32 err_mask;
+ void __iomem *port_mmio = ahci_port_base(dev->link->ap);
+
+ err_mask = ata_do_dev_read_id(dev, tf, id);
+ if (err_mask)
+ return err_mask;
+
+ /*
+ * Mask reserved area. Word78 spec of Link Power Management
+ * bit15-8: reserved
+ * bit7: NCQ autosence
+ * bit6: Software settings preservation supported
+ * bit5: reserved
+ * bit4: In-order sata delivery supported
+ * bit3: DIPM requests supported
+ * bit2: DMA Setup FIS Auto-Activate optimization supported
+ * bit1: DMA Setup FIX non-Zero buffer offsets supported
+ * bit0: Reserved
+ *
+ * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
+ */
+ id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
+
+ /*
+ * Due to HW errata, restart the port if no other command active.
+ * Otherwise the controller may get stuck.
+ */
+ if (!readl(port_mmio + PORT_CMD_ISSUE)) {
+ writel(PORT_CMD_FIS_RX, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* Force a barrier */
+ writel(PORT_CMD_FIS_RX | PORT_CMD_START, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* Force a barrier */
+ }
+ return 0;
+}
+
+static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
+{
+ void __iomem *mmio = ctx->hpriv->mmio;
+ u32 val;
+
+ dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
+ mmio, channel);
+ val = readl(mmio + PORTCFG);
+ val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
+ writel(val, mmio + PORTCFG);
+ readl(mmio + PORTCFG); /* Force a barrier */
+ /* Disable fix rate */
+ writel(0x0001fffe, mmio + PORTPHY1CFG);
+ readl(mmio + PORTPHY1CFG); /* Force a barrier */
+ writel(0x5018461c, mmio + PORTPHY2CFG);
+ readl(mmio + PORTPHY2CFG); /* Force a barrier */
+ writel(0x1c081907, mmio + PORTPHY3CFG);
+ readl(mmio + PORTPHY3CFG); /* Force a barrier */
+ writel(0x1c080815, mmio + PORTPHY4CFG);
+ readl(mmio + PORTPHY4CFG); /* Force a barrier */
+ /* Set window negotiation */
+ val = readl(mmio + PORTPHY5CFG);
+ val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
+ writel(val, mmio + PORTPHY5CFG);
+ readl(mmio + PORTPHY5CFG); /* Force a barrier */
+ val = readl(mmio + PORTAXICFG);
+ val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
+ val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
+ writel(val, mmio + PORTAXICFG);
+ readl(mmio + PORTAXICFG); /* Force a barrier */
+}
+
+/**
+ * xgene_ahci_do_hardreset - Issue the actual COMRESET
+ * @link: link to reset
+ * @deadline: deadline jiffies for the operation
+ * @online: Return value to indicate if device online
+ *
+ * Due to the limitation of the hardware PHY, a difference set of setting is
+ * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
+ * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
+ * report disparity error and etc. In addition, during COMRESET, there can
+ * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
+ * SERR_10B_8B_ERR, the PHY receiver line must be reseted. The following
+ * algorithm is followed to proper configure the hardware PHY during COMRESET:
+ *
+ * Alg Part 1:
+ * 1. Start the PHY at Gen3 speed (default setting)
+ * 2. Issue the COMRESET
+ * 3. If no link, go to Alg Part 3
+ * 4. If link up, determine if the negotiated speed matches the PHY
+ * configured speed
+ * 5. If they matched, go to Alg Part 2
+ * 6. If they do not matched and first time, configure the PHY for the linked
+ * up disk speed and repeat step 2
+ * 7. Go to Alg Part 2
+ *
+ * Alg Part 2:
+ * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
+ * reported in the register PORT_SCR_ERR, then reset the PHY receiver line
+ * 2. Go to Alg Part 3
+ *
+ * Alg Part 3:
+ * 1. Clear any pending from register PORT_SCR_ERR.
+ *
+ * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
+ * and until the underlying PHY supports an method to reset the receiver
+ * line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
+ * an warning message will be printed.
+ */
+static int xgene_ahci_do_hardreset(struct ata_link *link,
+ unsigned long deadline, bool *online)
+{
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct xgene_ahci_context *ctx = hpriv->plat_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_taskfile tf;
+ int rc;
+ u32 val;
+
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = ATA_BUSY;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+ rc = sata_link_hardreset(link, timing, deadline, online,
+ ahci_check_ready);
+
+ val = readl(port_mmio + PORT_SCR_ERR);
+ if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
+ dev_warn(ctx->dev, "link has error\n");
+
+ /* clear all errors if any pending */
+ val = readl(port_mmio + PORT_SCR_ERR);
+ writel(val, port_mmio + PORT_SCR_ERR);
+
+ return rc;
+}
+
+static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ bool online;
+ int rc;
+ u32 portcmd_saved;
+ u32 portclb_saved;
+ u32 portclbhi_saved;
+ u32 portrxfis_saved;
+ u32 portrxfishi_saved;
+
+ /* As hardreset resets these CSR, save it to restore later */
+ portcmd_saved = readl(port_mmio + PORT_CMD);
+ portclb_saved = readl(port_mmio + PORT_LST_ADDR);
+ portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
+ portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
+ portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
+
+ ahci_stop_engine(ap);
+
+ rc = xgene_ahci_do_hardreset(link, deadline, &online);
+
+ /* As controller hardreset clears them, restore them */
+ writel(portcmd_saved, port_mmio + PORT_CMD);
+ writel(portclb_saved, port_mmio + PORT_LST_ADDR);
+ writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
+ writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
+ writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
+
+ hpriv->start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+
+ return rc;
+}
+
+static void xgene_ahci_host_stop(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ ahci_platform_disable_resources(hpriv);
+}
+
+static struct ata_port_operations xgene_ahci_ops = {
+ .inherits = &ahci_ops,
+ .host_stop = xgene_ahci_host_stop,
+ .hardreset = xgene_ahci_hardreset,
+ .read_id = xgene_ahci_read_id,
+};
+
+static const struct ata_port_info xgene_ahci_port_info = {
+ AHCI_HFLAGS(AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ),
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &xgene_ahci_ops,
+};
+
+static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
+{
+ struct xgene_ahci_context *ctx = hpriv->plat_data;
+ int i;
+ int rc;
+ u32 val;
+
+ /* Remove IP RAM out of shutdown */
+ rc = xgene_ahci_init_memram(ctx);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
+ xgene_ahci_set_phy_cfg(ctx, i);
+
+ /* AXI disable Mask */
+ writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
+ readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
+ writel(0, ctx->csr_core + INTSTATUSMASK);
+ val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
+ dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
+ INTSTATUSMASK, val);
+
+ writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
+ readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
+ writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
+ readl(ctx->csr_axi + INT_SLV_TMOMASK);
+
+ /* Enable AXI Interrupt */
+ writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
+ writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
+ writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
+ writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
+
+ /* Enable coherency */
+ val = readl(ctx->csr_core + BUSCTLREG);
+ val &= ~0x00000002; /* Enable write coherency */
+ val &= ~0x00000001; /* Enable read coherency */
+ writel(val, ctx->csr_core + BUSCTLREG);
+
+ val = readl(ctx->csr_core + IOFMSTRWAUX);
+ val |= (1 << 3); /* Enable read coherency */
+ val |= (1 << 9); /* Enable write coherency */
+ writel(val, ctx->csr_core + IOFMSTRWAUX);
+ val = readl(ctx->csr_core + IOFMSTRWAUX);
+ dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
+ IOFMSTRWAUX, val);
+
+ return rc;
+}
+
+static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
+{
+ u32 val;
+
+ /* Check for optional MUX resource */
+ if (IS_ERR(ctx->csr_mux))
+ return 0;
+
+ val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
+ val &= ~CFG_SATA_ENET_SELECT_MASK;
+ writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
+ val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
+ return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
+}
+
+static int xgene_ahci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_host_priv *hpriv;
+ struct xgene_ahci_context *ctx;
+ struct resource *res;
+ int rc;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ hpriv->plat_data = ctx;
+ ctx->hpriv = hpriv;
+ ctx->dev = dev;
+
+ /* Retrieve the IP core resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ctx->csr_core = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctx->csr_core))
+ return PTR_ERR(ctx->csr_core);
+
+ /* Retrieve the IP diagnostic resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ ctx->csr_diag = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctx->csr_diag))
+ return PTR_ERR(ctx->csr_diag);
+
+ /* Retrieve the IP AXI resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ ctx->csr_axi = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctx->csr_axi))
+ return PTR_ERR(ctx->csr_axi);
+
+ /* Retrieve the optional IP mux resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+ ctx->csr_mux = devm_ioremap_resource(dev, res);
+
+ dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
+ hpriv->mmio);
+
+ /* Select ATA */
+ if ((rc = xgene_ahci_mux_select(ctx))) {
+ dev_err(dev, "SATA mux selection failed error %d\n", rc);
+ return -ENODEV;
+ }
+
+ /* Due to errata, HW requires full toggle transition */
+ rc = ahci_platform_enable_clks(hpriv);
+ if (rc)
+ goto disable_resources;
+ ahci_platform_disable_clks(hpriv);
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ goto disable_resources;
+
+ /* Configure the host controller */
+ xgene_ahci_hw_init(hpriv);
+
+ /*
+ * Setup DMA mask. This is preliminary until the DMA range is sorted
+ * out.
+ */
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(dev, "Unable to set dma mask\n");
+ goto disable_resources;
+ }
+
+ rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info, 0, 0);
+ if (rc)
+ goto disable_resources;
+
+ dev_dbg(dev, "X-Gene SATA host controller initialized\n");
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return rc;
+}
+
+static const struct of_device_id xgene_ahci_of_match[] = {
+ {.compatible = "apm,xgene-ahci"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
+
+static struct platform_driver xgene_ahci_driver = {
+ .probe = xgene_ahci_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "xgene-ahci",
+ .owner = THIS_MODULE,
+ .of_match_table = xgene_ahci_of_match,
+ },
+};
+
+module_platform_driver(xgene_ahci_driver);
+
+MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
+MODULE_AUTHOR("Loc Ho <lho@apm.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.4");
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 7d196656adb5..9498a7d3846f 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 36605abe5a67..6bd4f660b4e1 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -35,7 +35,6 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -394,6 +393,9 @@ static ssize_t ahci_show_em_supported(struct device *dev,
*
* If inconsistent, config values are fixed up by this function.
*
+ * If it is not set already this function sets hpriv->start_engine to
+ * ahci_start_engine.
+ *
* LOCKING:
* None.
*/
@@ -500,6 +502,9 @@ void ahci_save_initial_config(struct device *dev,
hpriv->cap = cap;
hpriv->cap2 = cap2;
hpriv->port_map = port_map;
+
+ if (!hpriv->start_engine)
+ hpriv->start_engine = ahci_start_engine;
}
EXPORT_SYMBOL_GPL(ahci_save_initial_config);
@@ -766,7 +771,7 @@ static void ahci_start_port(struct ata_port *ap)
/* enable DMA */
if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE))
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
/* turn on LEDs */
if (ap->flags & ATA_FLAG_EM) {
@@ -1032,12 +1037,13 @@ static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
size_t size)
{
- int state;
+ unsigned int state;
int pmp;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp;
- state = simple_strtoul(buf, NULL, 0);
+ if (kstrtouint(buf, 0, &state) < 0)
+ return -EINVAL;
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
@@ -1234,7 +1240,7 @@ int ahci_kick_engine(struct ata_port *ap)
/* restart engine */
out_restart:
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_kick_engine);
@@ -1387,8 +1393,8 @@ static int ahci_bad_pmp_check_ready(struct ata_link *link)
return ata_check_ready(status);
}
-int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
+static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
{
struct ata_port *ap = link->ap;
void __iomem *port_mmio = ahci_port_base(ap);
@@ -1426,6 +1432,7 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class,
const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
bool online;
@@ -1443,7 +1450,7 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class,
rc = sata_link_hardreset(link, timing, deadline, &online,
ahci_check_ready);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
if (online)
*class = ahci_dev_classify(ap);
@@ -1629,7 +1636,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
}
if (irq_stat & PORT_IRQ_UNK_FIS) {
- u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
+ u32 *unk = pp->rx_fis + RX_FIS_UNK;
active_ehi->err_mask |= AC_ERR_HSM;
active_ehi->action |= ATA_EH_RESET;
@@ -2007,10 +2014,12 @@ static void ahci_thaw(struct ata_port *ap)
void ahci_error_handler(struct ata_port *ap)
{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
/* restart engine */
ahci_stop_engine(ap);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
}
sata_pmp_error_handler(ap);
@@ -2031,6 +2040,7 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_device *dev = ap->link.device;
u32 devslp, dm, dito, mdat, deto;
@@ -2094,7 +2104,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
PORT_DEVSLP_ADSE);
writel(devslp, port_mmio + PORT_DEVSLP);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
/* enable device sleep feature for the drive */
err_mask = ata_dev_set_feature(dev,
@@ -2106,6 +2116,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
static void ahci_enable_fbs(struct ata_port *ap)
{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 fbs;
@@ -2134,11 +2145,12 @@ static void ahci_enable_fbs(struct ata_port *ap)
} else
dev_err(ap->host->dev, "Failed to enable FBS\n");
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
}
static void ahci_disable_fbs(struct ata_port *ap)
{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 fbs;
@@ -2166,7 +2178,7 @@ static void ahci_disable_fbs(struct ata_port *ap)
pp->fbs_enabled = false;
}
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
}
static void ahci_pmp_attach(struct ata_port *ap)
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
new file mode 100644
index 000000000000..7cb3a85719c0
--- /dev/null
+++ b/drivers/ata/libahci_platform.c
@@ -0,0 +1,541 @@
+/*
+ * AHCI SATA platform library
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ * Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2010 MontaVista Software, LLC.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+#include "ahci.h"
+
+static void ahci_host_stop(struct ata_host *host);
+
+struct ata_port_operations ahci_platform_ops = {
+ .inherits = &ahci_ops,
+ .host_stop = ahci_host_stop,
+};
+EXPORT_SYMBOL_GPL(ahci_platform_ops);
+
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT("ahci_platform"),
+};
+
+/**
+ * ahci_platform_enable_clks - Enable platform clocks
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all the clks found in hpriv->clks, starting at
+ * index 0. If any clk fails to enable it disables all the clks already
+ * enabled in reverse order, and then returns an error.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_clks(struct ahci_host_priv *hpriv)
+{
+ int c, rc;
+
+ for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) {
+ rc = clk_prepare_enable(hpriv->clks[c]);
+ if (rc)
+ goto disable_unprepare_clk;
+ }
+ return 0;
+
+disable_unprepare_clk:
+ while (--c >= 0)
+ clk_disable_unprepare(hpriv->clks[c]);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_clks);
+
+/**
+ * ahci_platform_disable_clks - Disable platform clocks
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all the clks found in hpriv->clks, in reverse
+ * order of ahci_platform_enable_clks (starting at the end of the array).
+ */
+void ahci_platform_disable_clks(struct ahci_host_priv *hpriv)
+{
+ int c;
+
+ for (c = AHCI_MAX_CLKS - 1; c >= 0; c--)
+ if (hpriv->clks[c])
+ clk_disable_unprepare(hpriv->clks[c]);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
+
+/**
+ * ahci_platform_enable_resources - Enable platform resources
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all ahci_platform managed resources in the
+ * following order:
+ * 1) Regulator
+ * 2) Clocks (through ahci_platform_enable_clks)
+ * 3) Phy
+ *
+ * If resource enabling fails at any point the previous enabled resources
+ * are disabled in reverse order.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
+{
+ int rc;
+
+ if (hpriv->target_pwr) {
+ rc = regulator_enable(hpriv->target_pwr);
+ if (rc)
+ return rc;
+ }
+
+ rc = ahci_platform_enable_clks(hpriv);
+ if (rc)
+ goto disable_regulator;
+
+ if (hpriv->phy) {
+ rc = phy_init(hpriv->phy);
+ if (rc)
+ goto disable_clks;
+
+ rc = phy_power_on(hpriv->phy);
+ if (rc) {
+ phy_exit(hpriv->phy);
+ goto disable_clks;
+ }
+ }
+
+ return 0;
+
+disable_clks:
+ ahci_platform_disable_clks(hpriv);
+
+disable_regulator:
+ if (hpriv->target_pwr)
+ regulator_disable(hpriv->target_pwr);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
+
+/**
+ * ahci_platform_disable_resources - Disable platform resources
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all ahci_platform managed resources in the
+ * following order:
+ * 1) Phy
+ * 2) Clocks (through ahci_platform_disable_clks)
+ * 3) Regulator
+ */
+void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
+{
+ if (hpriv->phy) {
+ phy_power_off(hpriv->phy);
+ phy_exit(hpriv->phy);
+ }
+
+ ahci_platform_disable_clks(hpriv);
+
+ if (hpriv->target_pwr)
+ regulator_disable(hpriv->target_pwr);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_resources);
+
+static void ahci_platform_put_resources(struct device *dev, void *res)
+{
+ struct ahci_host_priv *hpriv = res;
+ int c;
+
+ if (hpriv->got_runtime_pm) {
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ }
+
+ for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++)
+ clk_put(hpriv->clks[c]);
+}
+
+/**
+ * ahci_platform_get_resources - Get platform resources
+ * @pdev: platform device to get resources for
+ *
+ * This function allocates an ahci_host_priv struct, and gets the following
+ * resources, storing a reference to them inside the returned struct:
+ *
+ * 1) mmio registers (IORESOURCE_MEM 0, mandatory)
+ * 2) regulator for controlling the targets power (optional)
+ * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
+ * or for non devicetree enabled platforms a single clock
+ * 4) phy (optional)
+ *
+ * RETURNS:
+ * The allocated ahci_host_priv on success, otherwise an ERR_PTR value
+ */
+struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_host_priv *hpriv;
+ struct clk *clk;
+ int i, rc = -ENOMEM;
+
+ if (!devres_open_group(dev, NULL, GFP_KERNEL))
+ return ERR_PTR(-ENOMEM);
+
+ hpriv = devres_alloc(ahci_platform_put_resources, sizeof(*hpriv),
+ GFP_KERNEL);
+ if (!hpriv)
+ goto err_out;
+
+ devres_add(dev, hpriv);
+
+ hpriv->mmio = devm_ioremap_resource(dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ if (IS_ERR(hpriv->mmio)) {
+ dev_err(dev, "no mmio space\n");
+ rc = PTR_ERR(hpriv->mmio);
+ goto err_out;
+ }
+
+ hpriv->target_pwr = devm_regulator_get_optional(dev, "target");
+ if (IS_ERR(hpriv->target_pwr)) {
+ rc = PTR_ERR(hpriv->target_pwr);
+ if (rc == -EPROBE_DEFER)
+ goto err_out;
+ hpriv->target_pwr = NULL;
+ }
+
+ for (i = 0; i < AHCI_MAX_CLKS; i++) {
+ /*
+ * For now we must use clk_get(dev, NULL) for the first clock,
+ * because some platforms (da850, spear13xx) are not yet
+ * converted to use devicetree for clocks. For new platforms
+ * this is equivalent to of_clk_get(dev->of_node, 0).
+ */
+ if (i == 0)
+ clk = clk_get(dev, NULL);
+ else
+ clk = of_clk_get(dev->of_node, i);
+
+ if (IS_ERR(clk)) {
+ rc = PTR_ERR(clk);
+ if (rc == -EPROBE_DEFER)
+ goto err_out;
+ break;
+ }
+ hpriv->clks[i] = clk;
+ }
+
+ hpriv->phy = devm_phy_get(dev, "sata-phy");
+ if (IS_ERR(hpriv->phy)) {
+ rc = PTR_ERR(hpriv->phy);
+ switch (rc) {
+ case -ENODEV:
+ case -ENOSYS:
+ /* continue normally */
+ hpriv->phy = NULL;
+ break;
+
+ case -EPROBE_DEFER:
+ goto err_out;
+
+ default:
+ dev_err(dev, "couldn't get sata-phy\n");
+ goto err_out;
+ }
+ }
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+ hpriv->got_runtime_pm = true;
+
+ devres_remove_group(dev, NULL);
+ return hpriv;
+
+err_out:
+ devres_release_group(dev, NULL);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
+
+/**
+ * ahci_platform_init_host - Bring up an ahci-platform host
+ * @pdev: platform device pointer for the host
+ * @hpriv: ahci-host private data for the host
+ * @pi_template: template for the ata_port_info to use
+ * @force_port_map: param passed to ahci_save_initial_config
+ * @mask_port_map: param passed to ahci_save_initial_config
+ *
+ * This function does all the usual steps needed to bring up an
+ * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
+ * must be initialized / enabled before calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_init_host(struct platform_device *pdev,
+ struct ahci_host_priv *hpriv,
+ const struct ata_port_info *pi_template,
+ unsigned int force_port_map,
+ unsigned int mask_port_map)
+{
+ struct device *dev = &pdev->dev;
+ struct ata_port_info pi = *pi_template;
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct ata_host *host;
+ int i, irq, n_ports, rc;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "no irq\n");
+ return -EINVAL;
+ }
+
+ /* prepare host */
+ hpriv->flags |= (unsigned long)pi.private_data;
+
+ ahci_save_initial_config(dev, hpriv, force_port_map, mask_port_map);
+
+ if (hpriv->cap & HOST_CAP_NCQ)
+ pi.flags |= ATA_FLAG_NCQ;
+
+ if (hpriv->cap & HOST_CAP_PMP)
+ pi.flags |= ATA_FLAG_PMP;
+
+ ahci_set_em_messages(hpriv, &pi);
+
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
+ * both CAP.NP and port_map.
+ */
+ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+ host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+ if (!host)
+ return -ENOMEM;
+
+ host->private_data = hpriv;
+
+ if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+ host->flags |= ATA_HOST_PARALLEL_SCAN;
+ else
+ dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
+
+ if (pi.flags & ATA_FLAG_EM)
+ ahci_reset_em(host);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ata_port_desc(ap, "mmio %pR",
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+
+ /* set enclosure management message type */
+ if (ap->flags & ATA_FLAG_EM)
+ ap->em_message_type = hpriv->em_msg_type;
+
+ /* disabled/not-implemented port */
+ if (!(hpriv->port_map & (1 << i)))
+ ap->ops = &ata_dummy_port_ops;
+ }
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ ahci_init_controller(host);
+ ahci_print_info(host, "platform");
+
+ return ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
+ &ahci_platform_sht);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_init_host);
+
+static void ahci_host_stop(struct ata_host *host)
+{
+ struct device *dev = host->dev;
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+
+ ahci_platform_disable_resources(hpriv);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * ahci_platform_suspend_host - Suspend an ahci-platform host
+ * @dev: device pointer for the host
+ *
+ * This function does all the usual steps needed to suspend an
+ * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
+ * must be disabled after calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_suspend_host(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 ctl;
+
+ if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+ dev_err(dev, "firmware update required for suspend/resume\n");
+ return -EIO;
+ }
+
+ /*
+ * AHCI spec rev1.1 section 8.3.3:
+ * Software must disable interrupts prior to requesting a
+ * transition of the HBA to D3 state.
+ */
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+
+ return ata_host_suspend(host, PMSG_SUSPEND);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
+
+/**
+ * ahci_platform_resume_host - Resume an ahci-platform host
+ * @dev: device pointer for the host
+ *
+ * This function does all the usual steps needed to resume an ahci-platform
+ * host, note any necessary resources (ie clks, phy, etc.) must be
+ * initialized / enabled before calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_resume_host(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ int rc;
+
+ if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ ahci_init_controller(host);
+ }
+
+ ata_host_resume(host);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_resume_host);
+
+/**
+ * ahci_platform_suspend - Suspend an ahci-platform device
+ * @dev: the platform device to suspend
+ *
+ * This function suspends the host associated with the device, followed by
+ * disabling all the resources of the device.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_suspend(struct device *dev)
+{
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_platform_suspend_host(dev);
+ if (rc)
+ return rc;
+
+ if (pdata && pdata->suspend) {
+ rc = pdata->suspend(dev);
+ if (rc)
+ goto resume_host;
+ }
+
+ ahci_platform_disable_resources(hpriv);
+
+ return 0;
+
+resume_host:
+ ahci_platform_resume_host(dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_suspend);
+
+/**
+ * ahci_platform_resume - Resume an ahci-platform device
+ * @dev: the platform device to resume
+ *
+ * This function enables all the resources of the device followed by
+ * resuming the host associated with the device.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_resume(struct device *dev)
+{
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ if (pdata && pdata->resume) {
+ rc = pdata->resume(dev);
+ if (rc)
+ goto disable_resources;
+ }
+
+ rc = ahci_platform_resume_host(dev);
+ if (rc)
+ goto disable_resources;
+
+ /* We resumed so update PM runtime state */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_resume);
+#endif
+
+MODULE_DESCRIPTION("AHCI SATA platform library");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 4372cfa883c9..97a14fe47de1 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -20,8 +20,6 @@
#include <scsi/scsi_device.h>
#include "libata.h"
-#include <acpi/acpi_bus.h>
-
unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT;
module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644);
MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock, 0x4=DIPM, 0x8=FPDMA non-zero offset, 0x10=FPDMA DMA Setup FIS auto-activate)");
@@ -40,6 +38,16 @@ static void ata_acpi_clear_gtf(struct ata_device *dev)
dev->gtf_cache = NULL;
}
+struct ata_acpi_hotplug_context {
+ struct acpi_hotplug_context hp;
+ union {
+ struct ata_port *ap;
+ struct ata_device *dev;
+ } data;
+};
+
+#define ata_hotplug_data(context) (container_of((context), struct ata_acpi_hotplug_context, hp)->data)
+
/**
* ata_dev_acpi_handle - provide the acpi_handle for an ata_device
* @dev: the acpi_handle returned will correspond to this device
@@ -123,18 +131,17 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
ata_port_wait_eh(ap);
}
-static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
+static int ata_acpi_dev_notify_dock(struct acpi_device *adev, u32 event)
{
- struct ata_device *dev = data;
-
+ struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
ata_acpi_handle_hotplug(dev->link->ap, dev, event);
+ return 0;
}
-static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data)
+static int ata_acpi_ap_notify_dock(struct acpi_device *adev, u32 event)
{
- struct ata_port *ap = data;
-
- ata_acpi_handle_hotplug(ap, NULL, event);
+ ata_acpi_handle_hotplug(ata_hotplug_data(adev->hp).ap, NULL, event);
+ return 0;
}
static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
@@ -156,59 +163,60 @@ static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
}
}
-static void ata_acpi_ap_uevent(acpi_handle handle, u32 event, void *data)
+static void ata_acpi_ap_uevent(struct acpi_device *adev, u32 event)
{
- ata_acpi_uevent(data, NULL, event);
+ ata_acpi_uevent(ata_hotplug_data(adev->hp).ap, NULL, event);
}
-static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
+static void ata_acpi_dev_uevent(struct acpi_device *adev, u32 event)
{
- struct ata_device *dev = data;
+ struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
ata_acpi_uevent(dev->link->ap, dev, event);
}
-static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
- .handler = ata_acpi_dev_notify_dock,
- .uevent = ata_acpi_dev_uevent,
-};
-
-static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
- .handler = ata_acpi_ap_notify_dock,
- .uevent = ata_acpi_ap_uevent,
-};
-
/* bind acpi handle to pata port */
void ata_acpi_bind_port(struct ata_port *ap)
{
- acpi_handle host_handle = ACPI_HANDLE(ap->host->dev);
+ struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
+ struct acpi_device *adev;
+ struct ata_acpi_hotplug_context *context;
- if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle)
+ if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_companion)
return;
- acpi_preset_companion(&ap->tdev, host_handle, ap->port_no);
+ acpi_preset_companion(&ap->tdev, host_companion, ap->port_no);
if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
- /* we might be on a docking station */
- register_hotplug_dock_device(ACPI_HANDLE(&ap->tdev),
- &ata_acpi_ap_dock_ops, ap, NULL, NULL);
+ adev = ACPI_COMPANION(&ap->tdev);
+ if (!adev || adev->hp)
+ return;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return;
+
+ context->data.ap = ap;
+ acpi_initialize_hp_context(adev, &context->hp, ata_acpi_ap_notify_dock,
+ ata_acpi_ap_uevent);
}
void ata_acpi_bind_dev(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
- acpi_handle port_handle = ACPI_HANDLE(&ap->tdev);
- acpi_handle host_handle = ACPI_HANDLE(ap->host->dev);
- acpi_handle parent_handle;
+ struct acpi_device *port_companion = ACPI_COMPANION(&ap->tdev);
+ struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
+ struct acpi_device *parent, *adev;
+ struct ata_acpi_hotplug_context *context;
u64 adr;
/*
- * For both sata/pata devices, host handle is required.
- * For pata device, port handle is also required.
+ * For both sata/pata devices, host companion device is required.
+ * For pata device, port companion device is also required.
*/
- if (libata_noacpi || !host_handle ||
- (!(ap->flags & ATA_FLAG_ACPI_SATA) && !port_handle))
+ if (libata_noacpi || !host_companion ||
+ (!(ap->flags & ATA_FLAG_ACPI_SATA) && !port_companion))
return;
if (ap->flags & ATA_FLAG_ACPI_SATA) {
@@ -216,16 +224,24 @@ void ata_acpi_bind_dev(struct ata_device *dev)
adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
else
adr = SATA_ADR(ap->port_no, dev->link->pmp);
- parent_handle = host_handle;
+ parent = host_companion;
} else {
adr = dev->devno;
- parent_handle = port_handle;
+ parent = port_companion;
}
- acpi_preset_companion(&dev->tdev, parent_handle, adr);
+ acpi_preset_companion(&dev->tdev, parent, adr);
+ adev = ACPI_COMPANION(&dev->tdev);
+ if (!adev || adev->hp)
+ return;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return;
- register_hotplug_dock_device(ata_dev_acpi_handle(dev),
- &ata_acpi_dev_dock_ops, dev, NULL, NULL);
+ context->data.dev = dev;
+ acpi_initialize_hp_context(adev, &context->hp, ata_acpi_dev_notify_dock,
+ ata_acpi_dev_uevent);
}
/**
@@ -837,6 +853,7 @@ void ata_acpi_on_resume(struct ata_port *ap)
ata_for_each_dev(dev, &ap->link, ALL) {
ata_acpi_clear_gtf(dev);
if (ata_dev_enabled(dev) &&
+ ata_dev_acpi_handle(dev) &&
ata_dev_get_GTF(dev, NULL) >= 0)
dev->flags |= ATA_DFLAG_ACPI_PENDING;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1a3dbd1b196e..c19734d96d7e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1524,7 +1524,7 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
* @cdb: CDB for packet command
- * @dma_dir: Data tranfer direction of the command
+ * @dma_dir: Data transfer direction of the command
* @sgl: sg list for the data buffer of the command
* @n_elem: Number of sg entries
* @timeout: Timeout in msecs (0 for default)
@@ -1712,7 +1712,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
* @cdb: CDB for packet command
- * @dma_dir: Data tranfer direction of the command
+ * @dma_dir: Data transfer direction of the command
* @buf: Data buffer of the command
* @buflen: Length of data buffer
* @timeout: Timeout in msecs (0 for default)
@@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
@@ -4224,7 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
/*
* Some WD SATA-I drives spin up and down erratically when the link
@@ -5351,22 +5352,17 @@ bool ata_link_offline(struct ata_link *link)
}
#ifdef CONFIG_PM
-static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
- unsigned int action, unsigned int ehi_flags,
- int *async)
+static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
+ unsigned int action, unsigned int ehi_flags,
+ bool async)
{
struct ata_link *link;
unsigned long flags;
- int rc = 0;
/* Previous resume operation might still be in
* progress. Wait for PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
- if (async) {
- *async = -EAGAIN;
- return 0;
- }
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
@@ -5375,11 +5371,6 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_lock_irqsave(ap->lock, flags);
ap->pm_mesg = mesg;
- if (async)
- ap->pm_result = async;
- else
- ap->pm_result = &rc;
-
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
link->eh_info.action |= action;
@@ -5390,87 +5381,81 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_unlock_irqrestore(ap->lock, flags);
- /* wait and check result */
if (!async) {
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
-
- return rc;
}
-static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
+/*
+ * On some hardware, device fails to respond after spun down for suspend. As
+ * the device won't be used before being resumed, we don't need to touch the
+ * device. Ask EH to skip the usual stuff and proceed directly to suspend.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/46764
+ */
+static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
+ | ATA_EHI_NO_AUTOPSY
+ | ATA_EHI_NO_RECOVERY;
+
+static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
- /*
- * On some hardware, device fails to respond after spun down
- * for suspend. As the device won't be used before being
- * resumed, we don't need to touch the device. Ask EH to skip
- * the usual stuff and proceed directly to suspend.
- *
- * http://thread.gmane.org/gmane.linux.ide/46764
- */
- unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
- ATA_EHI_NO_RECOVERY;
- return ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
+ ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
}
-static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
+static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
{
- struct ata_port *ap = to_ata_port(dev);
-
- return __ata_port_suspend_common(ap, mesg, NULL);
+ ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
}
-static int ata_port_suspend(struct device *dev)
+static int ata_port_pm_suspend(struct device *dev)
{
+ struct ata_port *ap = to_ata_port(dev);
+
if (pm_runtime_suspended(dev))
return 0;
- return ata_port_suspend_common(dev, PMSG_SUSPEND);
+ ata_port_suspend(ap, PMSG_SUSPEND);
+ return 0;
}
-static int ata_port_do_freeze(struct device *dev)
+static int ata_port_pm_freeze(struct device *dev)
{
+ struct ata_port *ap = to_ata_port(dev);
+
if (pm_runtime_suspended(dev))
return 0;
- return ata_port_suspend_common(dev, PMSG_FREEZE);
+ ata_port_suspend(ap, PMSG_FREEZE);
+ return 0;
}
-static int ata_port_poweroff(struct device *dev)
+static int ata_port_pm_poweroff(struct device *dev)
{
- return ata_port_suspend_common(dev, PMSG_HIBERNATE);
+ ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
+ return 0;
}
-static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg,
- int *async)
-{
- int rc;
+static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
+ | ATA_EHI_QUIET;
- rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET,
- ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
- return rc;
+static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
+{
+ ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
}
-static int ata_port_resume_common(struct device *dev, pm_message_t mesg)
+static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
{
- struct ata_port *ap = to_ata_port(dev);
-
- return __ata_port_resume_common(ap, mesg, NULL);
+ ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
}
-static int ata_port_resume(struct device *dev)
+static int ata_port_pm_resume(struct device *dev)
{
- int rc;
-
- rc = ata_port_resume_common(dev, PMSG_RESUME);
- if (!rc) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
- return rc;
+ ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ return 0;
}
/*
@@ -5499,21 +5484,23 @@ static int ata_port_runtime_idle(struct device *dev)
static int ata_port_runtime_suspend(struct device *dev)
{
- return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND);
+ ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
+ return 0;
}
static int ata_port_runtime_resume(struct device *dev)
{
- return ata_port_resume_common(dev, PMSG_AUTO_RESUME);
+ ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
+ return 0;
}
static const struct dev_pm_ops ata_port_pm_ops = {
- .suspend = ata_port_suspend,
- .resume = ata_port_resume,
- .freeze = ata_port_do_freeze,
- .thaw = ata_port_resume,
- .poweroff = ata_port_poweroff,
- .restore = ata_port_resume,
+ .suspend = ata_port_pm_suspend,
+ .resume = ata_port_pm_resume,
+ .freeze = ata_port_pm_freeze,
+ .thaw = ata_port_pm_resume,
+ .poweroff = ata_port_pm_poweroff,
+ .restore = ata_port_pm_resume,
.runtime_suspend = ata_port_runtime_suspend,
.runtime_resume = ata_port_runtime_resume,
@@ -5525,18 +5512,17 @@ static const struct dev_pm_ops ata_port_pm_ops = {
* level. sas suspend/resume is async to allow parallel port recovery
* since sas has multiple ata_port instances per Scsi_Host.
*/
-int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
+void ata_sas_port_suspend(struct ata_port *ap)
{
- return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
+ ata_port_suspend_async(ap, PMSG_SUSPEND);
}
-EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
+EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
-int ata_sas_port_async_resume(struct ata_port *ap, int *async)
+void ata_sas_port_resume(struct ata_port *ap)
{
- return __ata_port_resume_common(ap, PMSG_RESUME, async);
+ ata_port_resume_async(ap, PMSG_RESUME);
}
-EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
-
+EXPORT_SYMBOL_GPL(ata_sas_port_resume);
/**
* ata_host_suspend - suspend host
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6d8757008318..6760fc4e85b8 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -95,12 +95,13 @@ enum {
* represents timeout for that try. The first try can be soft or
* hardreset. All others are hardreset if available. In most cases
* the first reset w/ 10sec timeout should succeed. Following entries
- * are mostly for error handling, hotplug and retarded devices.
+ * are mostly for error handling, hotplug and those outlier devices that
+ * take an exceptionally long time to recover from reset.
*/
static const unsigned long ata_eh_reset_timeouts[] = {
10000, /* most drives spin up by 10sec */
10000, /* > 99% working drives spin up before 20sec */
- 35000, /* give > 30 secs of idleness for retarded devices */
+ 35000, /* give > 30 secs of idleness for outlier devices */
5000, /* and sweet one last chance */
ULONG_MAX, /* > 1 min has elapsed, give up */
};
@@ -4069,7 +4070,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
ata_acpi_set_state(ap, ap->pm_mesg);
out:
- /* report result */
+ /* update the flags */
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~ATA_PFLAG_PM_PENDING;
@@ -4078,11 +4079,6 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
else if (ap->pflags & ATA_PFLAG_FROZEN)
ata_port_schedule_eh(ap);
- if (ap->pm_result) {
- *ap->pm_result = rc;
- ap->pm_result = NULL;
- }
-
spin_unlock_irqrestore(ap->lock, flags);
return;
@@ -4134,13 +4130,9 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
/* tell ACPI that we're resuming */
ata_acpi_on_resume(ap);
- /* report result */
+ /* update the flags */
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
- if (ap->pm_result) {
- *ap->pm_result = rc;
- ap->pm_result = NULL;
- }
spin_unlock_irqrestore(ap->lock, flags);
}
#endif /* CONFIG_PM */
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 20fd337a5731..7ccc084bf1df 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -447,8 +447,11 @@ static void sata_pmp_quirks(struct ata_port *ap)
* otherwise. Don't try hard to recover it.
*/
ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
- } else if (vendor == 0x197b && devid == 0x2352) {
- /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
+ } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
+ /*
+ * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
+ * 0x0325: jmicron JMB394.
+ */
ata_for_each_link(link, ap, EDGE) {
/* SRST breaks detection and disks get misclassified
* LPM disabled to avoid potential problems
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 88949c6d55dd..f3a65a3140d3 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -85,21 +85,6 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
return ODD_MECH_TYPE_UNSUPPORTED;
}
-static bool odd_can_poweroff(struct ata_device *ata_dev)
-{
- acpi_handle handle;
- struct acpi_device *acpi_dev;
-
- handle = ata_dev_acpi_handle(ata_dev);
- if (!handle)
- return false;
-
- if (acpi_bus_get_device(handle, &acpi_dev))
- return false;
-
- return acpi_device_can_poweroff(acpi_dev);
-}
-
/* Test if ODD is zero power ready by sense code */
static bool zpready(struct ata_device *dev)
{
@@ -267,13 +252,11 @@ static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
void zpodd_init(struct ata_device *dev)
{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->tdev);
enum odd_mech_type mech_type;
struct zpodd *zpodd;
- if (dev->zpodd)
- return;
-
- if (!odd_can_poweroff(dev))
+ if (dev->zpodd || !adev || !acpi_device_can_poweroff(adev))
return;
mech_type = zpodd_get_mech_type(dev);
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 73212c9c6d5b..5108b8744dce 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -7,16 +7,14 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
-#include <scsi/scsi_host.h>
-#include <acpi/acpi_bus.h>
-
+#include <linux/acpi.h>
#include <linux/libata.h>
#include <linux/ata.h>
+#include <scsi/scsi_host.h>
#define DRV_NAME "pata_acpi"
#define DRV_VERSION "0.2.3"
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index d23e2b3ca0b6..1206fa6b62ca 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 73492dd4a4bc..6fac524c2f50 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -356,7 +356,7 @@ static void cf_exit(struct arasan_cf_dev *acdev)
static void dma_callback(void *dev)
{
- struct arasan_cf_dev *acdev = (struct arasan_cf_dev *) dev;
+ struct arasan_cf_dev *acdev = dev;
complete(&acdev->dma_completion);
}
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 1581dee2967a..3aa4e655e3c6 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index d63ee8f41a4f..e9c87274a781 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 24e51056ac26..30fa4ca4cef6 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 2ca5026f2c15..7e73a0f1e323 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 8fb69e5ca1b7..57f1be64dbf2 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/gfp.h>
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 1275a8d4dedc..6bca3505b9e9 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index f10baabbf5db..bcde4b786807 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -34,7 +34,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index f07f2296acdc..8afe854a5a50 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 997e16a3a63f..2c0986fa4bb2 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 0448860a2077..32ddcae5a360 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/libata.h>
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 810bc9964dde..3435bd6a5cc9 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 3c12fd7acd41..f440892225f4 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 980b88e109fc..cad9d45749c4 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -34,7 +34,6 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 35b521348d31..8e76f79689d3 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index a9d74eff5fc4..3ba843f5cdc0 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 4be0398c153d..b93c0f0729e7 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 85cf2861e0b7..255c5aaff3a8 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 26386f0b89a8..e0872db913d6 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -15,7 +15,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
@@ -100,13 +99,9 @@ static int pata_imx_probe(struct platform_device *pdev)
struct resource *io_res;
int ret;
- io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (io_res == NULL)
- return -EINVAL;
-
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -EINVAL;
+ if (irq < 0)
+ return irq;
priv = devm_kzalloc(&pdev->dev,
sizeof(struct pata_imx_priv), GFP_KERNEL);
@@ -119,7 +114,9 @@ static int pata_imx_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
@@ -134,11 +131,10 @@ static int pata_imx_probe(struct platform_device *pdev)
ap->pio_mask = ATA_PIO0;
ap->flags |= ATA_FLAG_SLAVE_POSS;
- priv->host_regs = devm_ioremap(&pdev->dev, io_res->start,
- resource_size(io_res));
- if (!priv->host_regs) {
- dev_err(&pdev->dev, "failed to map IO/CTL base\n");
- ret = -EBUSY;
+ io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->host_regs = devm_ioremap_resource(&pdev->dev, io_res);
+ if (IS_ERR(priv->host_regs)) {
+ ret = PTR_ERR(priv->host_regs);
goto err;
}
@@ -212,7 +208,9 @@ static int pata_imx_resume(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct pata_imx_priv *priv = host->private_data;
- clk_prepare_enable(priv->clk);
+ int ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
__raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 2a8dd9527ecc..81369d187a5c 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 581e04d80367..dc3d7877f29d 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -72,7 +72,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 76e739b031b6..b1cfa0258fd3 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index be816428b430..bce2a8ca4678 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -916,7 +916,6 @@ static __init int probe_chip_type(struct legacy_probe *probe)
local_irq_restore(flags);
return BIOS;
}
- local_irq_restore(flags);
}
if (ht6560a & mask)
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index a4f5e781c8c2..6bad3df3a13c 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 1f5f28bb0bb8..f39a5379e816 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index ad1a0febd620..e3b97093ef9a 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 9513e071040d..56201a69af12 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -37,7 +37,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 0c424dae56e7..6154c3ee11a5 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index 16dc3a63a23d..d44df7ccfe43 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index d77b2e1054ef..319b64491b7b 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 4ea70cd22aee..fb042e0519d0 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 78ede3fd1875..bb71ea214b99 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 40254f4df584..bcc4b968c049 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 9d874c85d64d..1151f23177bb 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index c34fc50070a6..defa050e1784 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index 2beb6b5045f8..0b46be117051 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 02794885de10..a5579b55e332 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -13,7 +13,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index a6f05acad61e..73259bfda1e3 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/ata.h>
#include <linux/libata.h>
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index f582ba180a7d..be3f10240dca 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 79a970f05a2e..521b2137ea3e 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -24,7 +24,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 040b093617a4..caedc90855b2 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index ce2f828c17b3..96a232fffae6 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -32,7 +32,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index f35f15f4d83e..f1f5b5ae3382 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -35,7 +35,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index d3830c45a369..5a1cde0ea360 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 96c6a79ef606..e27f31fe1b67 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -34,7 +34,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index c4b0b073ba8e..73fe362d9716 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 1e8363640bf5..78d913aa93c8 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 6816911ac422..900f0e4a1faf 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 94473da68c02..7bc78e264f9e 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -36,7 +36,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index c3ab9a6c3965..f6c9632bdff6 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -55,7 +55,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/gfp.h>
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 8ea6e6afd041..f10631beffa8 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -36,7 +36,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 523524b68022..0bb2cabd2197 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -462,8 +461,7 @@ static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
int chan;
u32 tfr_reg, err_reg;
unsigned long flags;
- struct sata_dwc_device *hsdev =
- (struct sata_dwc_device *)hsdev_instance;
+ struct sata_dwc_device *hsdev = hsdev_instance;
struct ata_host *host = (struct ata_host *)hsdev->host;
struct ata_port *ap;
struct sata_dwc_device_port *hsdevp;
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 870b11eadc6d..65965cf5af06 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -142,7 +141,7 @@ static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ecx_plat_data *pdata = (struct ecx_plat_data *) hpriv->plat_data;
+ struct ecx_plat_data *pdata = hpriv->plat_data;
struct ahci_port_priv *pp = ap->private_data;
unsigned long flags;
int pmp, i;
@@ -403,6 +402,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
static const unsigned long timing[] = { 5, 100, 500};
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
bool online;
@@ -431,7 +431,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
break;
} while (!online && retry--);
- ahci_start_engine(ap);
+ hpriv->start_engine(ap);
if (online)
*class = ahci_dev_classify(ap);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 20a7517bd339..05c8a44adf8e 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4104,7 +4104,6 @@ static int mv_platform_probe(struct platform_device *pdev)
if (!hpriv->port_phys)
return -ENOMEM;
host->private_data = hpriv;
- hpriv->n_ports = n_ports;
hpriv->board_idx = chip_soc;
host->iomap = NULL;
@@ -4126,17 +4125,24 @@ static int mv_platform_probe(struct platform_device *pdev)
clk_prepare_enable(hpriv->port_clks[port]);
sprintf(port_number, "port%d", port);
- hpriv->port_phys[port] = devm_phy_get(&pdev->dev, port_number);
+ hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
+ port_number);
if (IS_ERR(hpriv->port_phys[port])) {
rc = PTR_ERR(hpriv->port_phys[port]);
hpriv->port_phys[port] = NULL;
- if ((rc != -EPROBE_DEFER) && (rc != -ENODEV))
- dev_warn(&pdev->dev, "error getting phy");
+ if (rc != -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "error getting phy %d", rc);
+
+ /* Cleanup only the initialized ports */
+ hpriv->n_ports = port;
goto err;
} else
phy_power_on(hpriv->port_phys[port]);
}
+ /* All the ports have been initialized */
+ hpriv->n_ports = n_ports;
+
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
@@ -4174,7 +4180,7 @@ err:
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
- for (port = 0; port < n_ports; port++) {
+ for (port = 0; port < hpriv->n_ports; port++) {
if (!IS_ERR(hpriv->port_clks[port])) {
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index d74def823d3e..ba5f27120332 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -40,7 +40,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 97f4acb54ad6..3638887476f6 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -35,7 +35,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 3b0dd57984e1..9a6bd4cd29a0 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -31,7 +31,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index d67fc351343c..3062f8605b29 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -37,7 +37,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -157,6 +156,7 @@ static const struct sil_drivelist {
{ "ST380011ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
+ { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
{ "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
{ }
};
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 1ad2f62d34b9..b513428171b3 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index dc4f70179e7d..c630fa812624 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -39,7 +39,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 9947010afc0f..39b5de60a1f9 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -82,7 +82,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -1021,8 +1020,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
idx++;
dist = ((long) (window_size - (offset + size))) >= 0 ? size :
(long) (window_size - offset);
- memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
- dist);
+ memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
psource += dist;
size -= dist;
@@ -1031,8 +1029,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
readl(mmio + PDC_GENERAL_CTLR);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
- memcpy_fromio((char *) psource, (char *) (dimm_mmio),
- window_size / 4);
+ memcpy_fromio(psource, dimm_mmio, window_size / 4);
psource += window_size;
size -= window_size;
idx++;
@@ -1043,8 +1040,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
readl(mmio + PDC_GENERAL_CTLR);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
- memcpy_fromio((char *) psource, (char *) (dimm_mmio),
- size / 4);
+ memcpy_fromio(psource, dimm_mmio, size / 4);
}
}
#endif
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 6d6489118873..08f98c3ed5c8 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 87f056e54a9d..f72e84228c5c 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -36,7 +36,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 44f304b3de63..29e847aac34b 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -37,7 +37,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 8557adcd34ee..aa6be2698669 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -419,7 +419,6 @@ static void he_remove_one(struct pci_dev *pci_dev)
atm_dev_deregister(atm_dev);
kfree(he_dev);
- pci_set_drvdata(pci_dev, NULL);
pci_disable_device(pci_dev);
}
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 5aca5f4c5458..9587e959ce1a 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -52,6 +52,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
+#include <linux/etherdevice.h>
#include "nicstar.h"
#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
#include "suni.h"
@@ -781,8 +782,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) {
nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
card->atmdev->esi, 6);
- if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
- 0) {
+ if (ether_addr_equal(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00")) {
nicstar_read_eprom(card->membase,
NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
card->atmdev->esi, 6);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 32784d18d1f7..e3fb496c7163 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1335,7 +1335,6 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_unmap_both:
kfree(card->dma_bounce);
- pci_set_drvdata(dev, NULL);
pci_iounmap(dev, card->buffers);
out_unmap_config:
pci_iounmap(dev, card->config_regs);
@@ -1457,7 +1456,6 @@ static void fpga_remove(struct pci_dev *dev)
pci_release_regions(dev);
pci_disable_device(dev);
- pci_set_drvdata(dev, NULL);
kfree(card);
}
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index e373671652b0..8fa8deab6449 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -49,7 +49,7 @@ config DEVTMPFS_MOUNT
with the commandline parameter: devtmpfs.mount=0|1.
This option does not affect initramfs based booting, here
the devtmpfs filesystem always needs to be mounted manually
- after the roots is mounted.
+ after the rootfs is mounted.
With this option enabled, it allows to bring up a system in
rescue mode with init=/bin/sh, even when the /dev directory
on the rootfs is completely empty.
@@ -185,6 +185,9 @@ config GENERIC_CPU_DEVICES
bool
default n
+config GENERIC_CPU_AUTOPROBE
+ bool
+
config SOC_BUS
bool
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 870ecfd503af..04b314e0fa51 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -4,7 +4,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
driver.o class.o platform.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
- topology.o
+ topology.o container.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index ecc1929d7f6a..b84ca8f13f9e 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -12,7 +12,6 @@
*/
#include <linux/attribute_container.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 2cbc6774f4cd..24f424249d9b 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -100,6 +100,7 @@ static inline int hypervisor_init(void) { return 0; }
#endif
extern int platform_bus_init(void);
extern void cpu_dev_init(void);
+extern void container_dev_init(void);
struct kobject *virtual_device_parent(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 59dc8086e4fa..83e910a57563 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1218,7 +1218,7 @@ err_dev:
* with the name of the subsystem. The root device can carry subsystem-
* wide attributes. All registered devices are below this single root
* device and are named after the subsystem with a simple enumeration
- * number appended. The registered devices are not explicitely named;
+ * number appended. The registered devices are not explicitly named;
* only 'id' in the device needs to be set.
*
* Do not use this interface for anything new, it exists for compatibility
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c53efe6c6d8e..c4778995cd72 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -133,9 +133,16 @@ static int try_to_bring_up_master(struct master *master,
goto out;
}
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/* Found all components */
ret = master->ops->bind(master->dev);
if (ret < 0) {
+ devres_release_group(master->dev, NULL);
+ dev_info(master->dev, "master bind failed: %d\n", ret);
master_remove_components(master);
goto out;
}
@@ -166,6 +173,7 @@ static void take_down_master(struct master *master)
{
if (master->bound) {
master->ops->unbind(master->dev);
+ devres_release_group(master->dev, NULL);
master->bound = false;
}
diff --git a/drivers/base/container.c b/drivers/base/container.c
new file mode 100644
index 000000000000..ecbfbe2e908f
--- /dev/null
+++ b/drivers/base/container.c
@@ -0,0 +1,44 @@
+/*
+ * System bus type for containers.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/container.h>
+
+#include "base.h"
+
+#define CONTAINER_BUS_NAME "container"
+
+static int trivial_online(struct device *dev)
+{
+ return 0;
+}
+
+static int container_offline(struct device *dev)
+{
+ struct container_dev *cdev = to_container_dev(dev);
+
+ return cdev->offline ? cdev->offline(cdev) : 0;
+}
+
+struct bus_type container_subsys = {
+ .name = CONTAINER_BUS_NAME,
+ .dev_name = CONTAINER_BUS_NAME,
+ .online = trivial_online,
+ .offline = container_offline,
+};
+
+void __init container_dev_init(void)
+{
+ int ret;
+
+ ret = subsys_system_register(&container_subsys, NULL);
+ if (ret)
+ pr_err("%s() failed: %d\n", __func__, ret);
+}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2b567177ef78..0dd65281cc65 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -23,7 +23,6 @@
#include <linux/genhd.h>
#include <linux/kallsyms.h>
#include <linux/mutex.h>
-#include <linux/async.h>
#include <linux/pm_runtime.h>
#include <linux/netdevice.h>
#include <linux/sysfs.h>
@@ -571,6 +570,23 @@ void device_remove_file(struct device *dev,
EXPORT_SYMBOL_GPL(device_remove_file);
/**
+ * device_remove_file_self - remove sysfs attribute file from its own method.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ *
+ * See kernfs_remove_self() for details.
+ */
+bool device_remove_file_self(struct device *dev,
+ const struct device_attribute *attr)
+{
+ if (dev)
+ return sysfs_remove_file_self(&dev->kobj, &attr->attr);
+ else
+ return false;
+}
+EXPORT_SYMBOL_GPL(device_remove_file_self);
+
+/**
* device_create_bin_file - create sysfs binary attribute file for device.
* @dev: device.
* @attr: device binary attribute descriptor.
@@ -2003,7 +2019,6 @@ void device_shutdown(void)
spin_lock(&devices_kset->list_lock);
}
spin_unlock(&devices_kset->list_lock);
- async_synchronize_full();
}
/*
@@ -2058,7 +2073,6 @@ create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
return pos;
}
-EXPORT_SYMBOL(create_syslog_header);
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args)
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f48370dfc908..006b1bc5297d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -15,6 +15,7 @@
#include <linux/percpu.h>
#include <linux/acpi.h>
#include <linux/of.h>
+#include <linux/cpufeature.h>
#include "base.h"
@@ -286,6 +287,41 @@ static void cpu_device_release(struct device *dev)
*/
}
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static ssize_t print_cpu_modalias(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t n;
+ u32 i;
+
+ n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
+ CPU_FEATURE_TYPEVAL);
+
+ for (i = 0; i < MAX_CPU_FEATURES; i++)
+ if (cpu_have_feature(i)) {
+ if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
+ WARN(1, "CPU features overflow page\n");
+ break;
+ }
+ n += sprintf(&buf[n], ",%04X", i);
+ }
+ buf[n++] = '\n';
+ return n;
+}
+
+static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buf) {
+ print_cpu_modalias(NULL, NULL, buf);
+ add_uevent_var(env, "MODALIAS=%s", buf);
+ kfree(buf);
+ }
+ return 0;
+}
+#endif
+
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
@@ -306,8 +342,8 @@ int register_cpu(struct cpu *cpu, int num)
cpu->dev.offline_disabled = !cpu->hotpluggable;
cpu->dev.offline = !cpu_online(num);
cpu->dev.of_node = of_get_cpu_node(num, NULL);
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
- cpu->dev.bus->uevent = arch_cpu_uevent;
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+ cpu->dev.bus->uevent = cpu_uevent;
#endif
cpu->dev.groups = common_cpu_attr_groups;
if (cpu->hotpluggable)
@@ -330,8 +366,8 @@ struct device *get_cpu_device(unsigned cpu)
}
EXPORT_SYMBOL_GPL(get_cpu_device);
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
-static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
#endif
static struct attribute *cpu_root_attrs[] = {
@@ -344,7 +380,7 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
&dev_attr_modalias.attr,
#endif
NULL
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 545c4de412c3..db4e264eecb6 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -791,6 +791,32 @@ void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
EXPORT_SYMBOL_GPL(devm_kmalloc);
/**
+ * devm_kstrdup - Allocate resource managed space and
+ * copy an existing string into that.
+ * @dev: Device to allocate memory for
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ * allocating memory
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
+{
+ size_t size;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ size = strlen(s) + 1;
+ buf = devm_kmalloc(dev, size, gfp);
+ if (buf)
+ memcpy(buf, s, size);
+ return buf;
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup);
+
+/**
* devm_kfree - Resource-managed kfree
* @dev: Device this memory belongs to
* @p: Memory to free
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 1e16cbd61da2..ea77701deda4 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -251,9 +251,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
*
- * Returns struct dma_buf_attachment * for this attachment; may return negative
- * error codes.
- *
+ * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
+ * error.
*/
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
@@ -319,9 +318,8 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
* @attach: [in] attachment whose scatterlist is to be returned
* @direction: [in] direction of DMA transfer
*
- * Returns sg_table containing the scatterlist to be returned; may return NULL
- * or ERR_PTR.
- *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error.
*/
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
@@ -334,6 +332,8 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return ERR_PTR(-EINVAL);
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ if (!sg_table)
+ sg_table = ERR_PTR(-ENOMEM);
return sg_table;
}
@@ -544,6 +544,8 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
* Please attempt to use kmap/kunmap before thinking about these interfaces.
+ *
+ * Returns NULL on error.
*/
void *dma_buf_vmap(struct dma_buf *dmabuf)
{
@@ -566,7 +568,9 @@ void *dma_buf_vmap(struct dma_buf *dmabuf)
BUG_ON(dmabuf->vmap_ptr);
ptr = dmabuf->ops->vmap(dmabuf);
- if (IS_ERR_OR_NULL(ptr))
+ if (WARN_ON_ONCE(IS_ERR(ptr)))
+ ptr = NULL;
+ if (!ptr)
goto out_unlock;
dmabuf->vmap_ptr = ptr;
@@ -616,36 +620,35 @@ static int dma_buf_describe(struct seq_file *s)
if (ret)
return ret;
- seq_printf(s, "\nDma-buf Objects:\n");
- seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n");
+ seq_puts(s, "\nDma-buf Objects:\n");
+ seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
ret = mutex_lock_interruptible(&buf_obj->lock);
if (ret) {
- seq_printf(s,
- "\tERROR locking buffer object: skipping\n");
+ seq_puts(s,
+ "\tERROR locking buffer object: skipping\n");
continue;
}
- seq_printf(s, "\t");
-
- seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
- buf_obj->exp_name, buf_obj->size,
+ seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
+ buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
- (long)(buf_obj->file->f_count.counter));
+ (long)(buf_obj->file->f_count.counter),
+ buf_obj->exp_name);
- seq_printf(s, "\t\tAttached Devices:\n");
+ seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
- seq_printf(s, "\t\t");
+ seq_puts(s, "\t");
- seq_printf(s, "%s\n", attach_obj->dev->init_name);
+ seq_printf(s, "%s\n", dev_name(attach_obj->dev));
attach_count++;
}
- seq_printf(s, "\n\t\tTotal %d devices attached\n",
+ seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 8a97ddfa6122..d276e33880be 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -649,7 +649,9 @@ static ssize_t firmware_loading_store(struct device *dev,
* see the mapped 'buf->data' once the loading
* is completed.
* */
- fw_map_pages_buf(fw_buf);
+ if (fw_map_pages_buf(fw_buf))
+ dev_err(dev, "%s: map pages failed\n",
+ __func__);
list_del_init(&fw_buf->pending_list);
complete_all(&fw_buf->completion);
break;
@@ -900,7 +902,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
if (timeout != MAX_SCHEDULE_TIMEOUT)
- schedule_delayed_work(&fw_priv->timeout_work, timeout);
+ queue_delayed_work(system_power_efficient_wq,
+ &fw_priv->timeout_work, timeout);
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
@@ -908,6 +911,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
wait_for_completion(&buf->completion);
cancel_delayed_work_sync(&fw_priv->timeout_work);
+ if (!buf->data)
+ retval = -ENOMEM;
device_remove_file(f_dev, &dev_attr_loading);
err_del_bin_attr:
@@ -1570,8 +1575,8 @@ static void device_uncache_fw_images_work(struct work_struct *work)
*/
static void device_uncache_fw_images_delay(unsigned long delay)
{
- schedule_delayed_work(&fw_cache.work,
- msecs_to_jiffies(delay));
+ queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
+ msecs_to_jiffies(delay));
}
static int fw_pm_notify(struct notifier_block *notify_block,
@@ -1580,6 +1585,7 @@ static int fw_pm_notify(struct notifier_block *notify_block,
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
kill_requests_without_uevent();
device_cache_fw_images();
break;
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c16f0b808a17..da033d3bab3c 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -33,4 +33,5 @@ void __init driver_init(void)
platform_bus_init();
cpu_dev_init();
memory_dev_init();
+ container_dev_init();
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index bc9f43bf7e29..8f7ed9933a7c 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -599,7 +599,11 @@ int register_one_node(int nid)
void unregister_one_node(int nid)
{
+ if (!node_devices[nid])
+ return;
+
unregister_node(node_devices[nid]);
+ kfree(node_devices[nid]);
node_devices[nid] = NULL;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 3a94b799f166..e714709704e4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -481,11 +481,10 @@ static int platform_drv_probe(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
int ret;
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_attach(_dev, true);
+ acpi_dev_pm_attach(_dev, true);
ret = drv->probe(dev);
- if (ret && ACPI_HANDLE(_dev))
+ if (ret)
acpi_dev_pm_detach(_dev, true);
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
@@ -508,8 +507,7 @@ static int platform_drv_remove(struct device *_dev)
int ret;
ret = drv->remove(dev);
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_detach(_dev, true);
+ acpi_dev_pm_detach(_dev, true);
return ret;
}
@@ -520,8 +518,7 @@ static void platform_drv_shutdown(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
drv->shutdown(dev);
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_detach(_dev, true);
+ acpi_dev_pm_detach(_dev, true);
}
/**
@@ -677,7 +674,17 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
- int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
+ int len;
+
+ len = of_device_get_modalias(dev, buf, PAGE_SIZE -1);
+ if (len != -ENODEV)
+ return len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
+ if (len != -ENODEV)
+ return len;
+
+ len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
@@ -699,6 +706,10 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
if (rc != -ENODEV)
return rc;
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
pdev->name);
return 0;
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 2e58ebb1f6c0..1cb8544598d5 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,6 +1,5 @@
-obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o
+obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
-obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 9d8fde709390..b99e6c06ee67 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -33,6 +32,21 @@ struct pm_clock_entry {
};
/**
+ * pm_clk_enable - Enable a clock, reporting any errors
+ * @dev: The device for the given clock
+ * @clk: The clock being enabled.
+ */
+static inline int __pm_clk_enable(struct device *dev, struct clk *clk)
+{
+ int ret = clk_enable(clk);
+ if (ret)
+ dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+ __func__, clk, ret);
+
+ return ret;
+}
+
+/**
* pm_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
* @ce: PM clock entry corresponding to the clock.
@@ -43,6 +57,7 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
} else {
+ clk_prepare(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
}
@@ -99,10 +114,12 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
if (ce->status < PCE_STATUS_ERROR) {
if (ce->status == PCE_STATUS_ENABLED)
- clk_disable_unprepare(ce->clk);
+ clk_disable(ce->clk);
- if (ce->status >= PCE_STATUS_ACQUIRED)
+ if (ce->status >= PCE_STATUS_ACQUIRED) {
+ clk_unprepare(ce->clk);
clk_put(ce->clk);
+ }
}
kfree(ce->con_id);
@@ -249,6 +266,7 @@ int pm_clk_resume(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -259,8 +277,9 @@ int pm_clk_resume(struct device *dev)
list_for_each_entry(ce, &psd->clock_list, node) {
if (ce->status < PCE_STATUS_ERROR) {
- clk_enable(ce->clk);
- ce->status = PCE_STATUS_ENABLED;
+ ret = __pm_clk_enable(dev, ce->clk);
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
}
}
@@ -376,7 +395,7 @@ int pm_clk_resume(struct device *dev)
spin_lock_irqsave(&psd->lock, flags);
list_for_each_entry(ce, &psd->clock_list, node)
- clk_enable(ce->clk);
+ __pm_clk_enable(dev, ce->clk);
spin_unlock_irqrestore(&psd->lock, flags);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 5da914041305..df2e5eeaeb05 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/export.h>
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index bfb8955c406c..6f54962aae1d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
@@ -42,7 +41,7 @@
struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
if (!__retval && __elapsed > __td->field) { \
__td->field = __elapsed; \
- dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
+ dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \
__elapsed); \
genpd->max_off_time_changed = true; \
__td->constraint_changed = true; \
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 28dee3053f1f..a089e3bcdfbc 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 5ee030a864f9..96a92db83cad 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -10,7 +10,7 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
/**
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
* @dev: Device to suspend.
@@ -48,7 +48,7 @@ int pm_generic_runtime_resume(struct device *dev)
return ret;
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
/**
@@ -285,7 +285,7 @@ int pm_generic_restore(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_restore);
/**
- * pm_generic_complete - Generic routine competing a device power transition.
+ * pm_generic_complete - Generic routine completing a device power transition.
* @dev: Device to handle.
*
* Complete a device power transition during a system-wide power transition.
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 1b41fca3d65a..86d5e4fb5b98 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -29,6 +29,7 @@
#include <linux/async.h>
#include <linux/suspend.h>
#include <trace/events/power.h>
+#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
#include <linux/timer.h>
@@ -91,6 +92,8 @@ void device_pm_sleep_init(struct device *dev)
{
dev->power.is_prepared = false;
dev->power.is_suspended = false;
+ dev->power.is_noirq_suspended = false;
+ dev->power.is_late_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@@ -467,7 +470,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_resume_noirq(struct device *dev, pm_message_t state)
+static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
@@ -479,6 +482,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
if (dev->power.syscore)
goto Out;
+ if (!dev->power.is_noirq_suspended)
+ goto Out;
+
+ dpm_wait(dev->parent, async);
+
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -499,12 +507,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
}
error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_noirq_suspended = false;
Out:
+ complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
}
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_noirq(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -514,29 +542,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
*/
static void dpm_resume_noirq(pm_message_t state)
{
+ struct device *dev;
ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_noirq_list)) {
- struct device *dev = to_device(dpm_noirq_list.next);
- int error;
+ pm_transition = state;
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+ reinit_completion(&dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume_noirq, dev);
+ }
+ }
+
+ while (!list_empty(&dpm_noirq_list)) {
+ dev = to_device(dpm_noirq_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
mutex_unlock(&dpm_list_mtx);
- error = device_resume_noirq(dev, state);
- if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " noirq", error);
+ if (!is_async(dev)) {
+ int error;
+
+ error = device_resume_noirq(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_noirq++;
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " noirq", error);
+ }
}
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
cpuidle_resume();
@@ -549,7 +596,7 @@ static void dpm_resume_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_resume_early(struct device *dev, pm_message_t state)
+static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
@@ -561,6 +608,11 @@ static int device_resume_early(struct device *dev, pm_message_t state)
if (dev->power.syscore)
goto Out;
+ if (!dev->power.is_late_suspended)
+ goto Out;
+
+ dpm_wait(dev->parent, async);
+
if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -581,43 +633,75 @@ static int device_resume_early(struct device *dev, pm_message_t state)
}
error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_late_suspended = false;
Out:
TRACE_RESUME(error);
pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
return error;
}
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_early(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
static void dpm_resume_early(pm_message_t state)
{
+ struct device *dev;
ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.next);
- int error;
+ pm_transition = state;
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+ reinit_completion(&dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume_early, dev);
+ }
+ }
+
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
mutex_unlock(&dpm_list_mtx);
- error = device_resume_early(dev, state);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
+ if (!is_async(dev)) {
+ int error;
+ error = device_resume_early(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " early", error);
+ }
+ }
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, "early");
}
@@ -732,12 +816,6 @@ static void async_resume(void *data, async_cookie_t cookie)
put_device(dev);
}
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
-}
-
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -789,6 +867,8 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, NULL);
+
+ cpufreq_resume();
}
/**
@@ -913,13 +993,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_suspend_noirq(struct device *dev, pm_message_t state)
+static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
+ int error = 0;
+
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
if (dev->power.syscore)
- return 0;
+ goto Complete;
+
+ dpm_wait_for_children(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -940,7 +1031,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
callback = pm_noirq_op(dev->driver->pm, state);
}
- return dpm_run_callback(callback, dev, state, info);
+ error = dpm_run_callback(callback, dev, state, info);
+ if (!error)
+ dev->power.is_noirq_suspended = true;
+ else
+ async_error = error;
+
+Complete:
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_noirq(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend_noirq(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend_noirq, dev);
+ return 0;
+ }
+ return __device_suspend_noirq(dev, pm_transition, false);
}
/**
@@ -958,19 +1083,20 @@ static int dpm_suspend_noirq(pm_message_t state)
cpuidle_pause();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_noirq(dev, state);
+ error = device_suspend_noirq(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " noirq", error);
- suspend_stats.failed_suspend_noirq++;
- dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
@@ -979,16 +1105,21 @@ static int dpm_suspend_noirq(pm_message_t state)
list_move(&dev->power.entry, &dpm_noirq_list);
put_device(dev);
- if (pm_wakeup_pending()) {
- error = -EBUSY;
+ if (async_error)
break;
- }
}
mutex_unlock(&dpm_list_mtx);
- if (error)
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+
+ if (error) {
+ suspend_stats.failed_suspend_noirq++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_resume_noirq(resume_event(state));
- else
+ } else {
dpm_show_time(starttime, state, "noirq");
+ }
return error;
}
@@ -999,15 +1130,26 @@ static int dpm_suspend_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_suspend_late(struct device *dev, pm_message_t state)
+static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
+ int error = 0;
__pm_runtime_disable(dev, false);
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
if (dev->power.syscore)
- return 0;
+ goto Complete;
+
+ dpm_wait_for_children(dev, async);
if (dev->pm_domain) {
info = "late power domain ";
@@ -1028,7 +1170,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
callback = pm_late_early_op(dev->driver->pm, state);
}
- return dpm_run_callback(callback, dev, state, info);
+ error = dpm_run_callback(callback, dev, state, info);
+ if (!error)
+ dev->power.is_late_suspended = true;
+ else
+ async_error = error;
+
+Complete:
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_late(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_late(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+ put_device(dev);
+}
+
+static int device_suspend_late(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend_late, dev);
+ return 0;
+ }
+
+ return __device_suspend_late(dev, pm_transition, false);
}
/**
@@ -1041,19 +1217,20 @@ static int dpm_suspend_late(pm_message_t state)
int error = 0;
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
while (!list_empty(&dpm_suspended_list)) {
struct device *dev = to_device(dpm_suspended_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_late(dev, state);
+ error = device_suspend_late(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " late", error);
- suspend_stats.failed_suspend_late++;
- dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
@@ -1062,17 +1239,18 @@ static int dpm_suspend_late(pm_message_t state)
list_move(&dev->power.entry, &dpm_late_early_list);
put_device(dev);
- if (pm_wakeup_pending()) {
- error = -EBUSY;
+ if (async_error)
break;
- }
}
mutex_unlock(&dpm_list_mtx);
- if (error)
+ async_synchronize_full();
+ if (error) {
+ suspend_stats.failed_suspend_late++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
- else
+ } else {
dpm_show_time(starttime, state, "late");
-
+ }
return error;
}
@@ -1259,6 +1437,8 @@ int dpm_suspend(pm_message_t state)
might_sleep();
+ cpufreq_suspend();
+
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index fa4187418440..25538675d59e 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index cfc3226ec492..a21223d95926 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev);
extern void rpm_sysfs_remove(struct device *dev);
extern int wakeup_sysfs_add(struct device *dev);
extern void wakeup_sysfs_remove(struct device *dev);
-extern int pm_qos_sysfs_add_latency(struct device *dev);
-extern void pm_qos_sysfs_remove_latency(struct device *dev);
+extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
extern int pm_qos_sysfs_add_flags(struct device *dev);
extern void pm_qos_sysfs_remove_flags(struct device *dev);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 5c1361a9e5dd..36b9eb4862cb 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
s32 __dev_pm_qos_read_value(struct device *dev)
{
return IS_ERR_OR_NULL(dev->power.qos) ?
- 0 : pm_qos_read_value(&dev->power.qos->latency);
+ 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
}
/**
@@ -141,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req,
int ret;
switch(req->type) {
- case DEV_PM_QOS_LATENCY:
- ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
- action, value);
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = pm_qos_update_target(&qos->resume_latency,
+ &req->data.pnode, action, value);
if (ret) {
- value = pm_qos_read_value(&qos->latency);
+ value = pm_qos_read_value(&qos->resume_latency);
blocking_notifier_call_chain(&dev_pm_notifiers,
(unsigned long)value,
req);
}
break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ ret = pm_qos_update_target(&qos->latency_tolerance,
+ &req->data.pnode, action, value);
+ if (ret) {
+ value = pm_qos_read_value(&qos->latency_tolerance);
+ req->dev->power.set_latency_tolerance(req->dev, value);
+ }
+ break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
@@ -186,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
}
BLOCKING_INIT_NOTIFIER_HEAD(n);
- c = &qos->latency;
+ c = &qos->resume_latency;
plist_head_init(&c->list);
- c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
- c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+ c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->type = PM_QOS_MIN;
c->notifiers = n;
+ c = &qos->latency_tolerance;
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ c->type = PM_QOS_MIN;
+
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
@@ -224,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
- pm_qos_sysfs_remove_latency(dev);
+ pm_qos_sysfs_remove_resume_latency(dev);
pm_qos_sysfs_remove_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
@@ -237,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
goto out;
/* Flush the constraints lists for the device. */
- c = &qos->latency;
+ c = &qos->resume_latency;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
/*
* Update constraints list and call the notification
@@ -246,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+ c = &qos->latency_tolerance;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -265,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
+static bool dev_pm_qos_invalid_request(struct device *dev,
+ struct dev_pm_qos_request *req)
+{
+ return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
+ && !dev->power.set_latency_tolerance);
+}
+
+static int __dev_pm_qos_add_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
+{
+ int ret = 0;
+
+ if (!dev || dev_pm_qos_invalid_request(dev, req))
+ return -EINVAL;
+
+ if (WARN(dev_pm_qos_request_active(req),
+ "%s() called for already added request\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
+
+ trace_dev_pm_qos_add_request(dev_name(dev), type, value);
+ if (!ret) {
+ req->dev = dev;
+ req->type = type;
+ ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+ }
+ return ret;
+}
+
/**
* dev_pm_qos_add_request - inserts new qos request into the list
* @dev: target device for the constraint
@@ -290,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value)
{
- int ret = 0;
-
- if (!dev || !req) /*guard against callers passing in null */
- return -EINVAL;
-
- if (WARN(dev_pm_qos_request_active(req),
- "%s() called for already added request\n", __func__))
- return -EINVAL;
+ int ret;
mutex_lock(&dev_pm_qos_mtx);
-
- if (IS_ERR(dev->power.qos))
- ret = -ENODEV;
- else if (!dev->power.qos)
- ret = dev_pm_qos_constraints_allocate(dev);
-
- trace_dev_pm_qos_add_request(dev_name(dev), type, value);
- if (!ret) {
- req->dev = dev;
- req->type = type;
- ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
- }
-
+ ret = __dev_pm_qos_add_request(dev, req, type, value);
mutex_unlock(&dev_pm_qos_mtx);
-
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
@@ -341,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
return -ENODEV;
switch(req->type) {
- case DEV_PM_QOS_LATENCY:
+ case DEV_PM_QOS_RESUME_LATENCY:
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
curr_value = req->data.pnode.prio;
break;
case DEV_PM_QOS_FLAGS:
@@ -460,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
ret = dev_pm_qos_constraints_allocate(dev);
if (!ret)
- ret = blocking_notifier_chain_register(
- dev->power.qos->latency.notifiers, notifier);
+ ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
+ notifier);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
@@ -487,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev,
/* Silently return if the constraints object is not present. */
if (!IS_ERR_OR_NULL(dev->power.qos))
- retval = blocking_notifier_chain_unregister(
- dev->power.qos->latency.notifiers,
- notifier);
+ retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+ notifier);
mutex_unlock(&dev_pm_qos_mtx);
return retval;
@@ -530,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
* dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
* @dev: Device whose ancestor to add the request for.
* @req: Pointer to the preallocated handle.
+ * @type: Type of the request.
* @value: Constraint latency value.
*/
int dev_pm_qos_add_ancestor_request(struct device *dev,
- struct dev_pm_qos_request *req, s32 value)
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
{
struct device *ancestor = dev->parent;
int ret = -ENODEV;
- while (ancestor && !ancestor->power.ignore_children)
- ancestor = ancestor->parent;
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ while (ancestor && !ancestor->power.ignore_children)
+ ancestor = ancestor->parent;
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ while (ancestor && !ancestor->power.set_latency_tolerance)
+ ancestor = ancestor->parent;
+
+ break;
+ default:
+ ancestor = NULL;
+ }
if (ancestor)
- ret = dev_pm_qos_add_request(ancestor, req,
- DEV_PM_QOS_LATENCY, value);
+ ret = dev_pm_qos_add_request(ancestor, req, type, value);
if (ret < 0)
req->dev = NULL;
@@ -559,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
struct dev_pm_qos_request *req = NULL;
switch(type) {
- case DEV_PM_QOS_LATENCY:
- req = dev->power.qos->latency_req;
- dev->power.qos->latency_req = NULL;
+ case DEV_PM_QOS_RESUME_LATENCY:
+ req = dev->power.qos->resume_latency_req;
+ dev->power.qos->resume_latency_req = NULL;
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ req = dev->power.qos->latency_tolerance_req;
+ dev->power.qos->latency_tolerance_req = NULL;
break;
case DEV_PM_QOS_FLAGS:
req = dev->power.qos->flags_req;
@@ -597,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (!req)
return -ENOMEM;
- ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
if (ret < 0) {
kfree(req);
return ret;
@@ -609,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
- else if (dev->power.qos->latency_req)
+ else if (dev->power.qos->resume_latency_req)
ret = -EEXIST;
if (ret < 0) {
@@ -618,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
- dev->power.qos->latency_req = req;
+ dev->power.qos->resume_latency_req = req;
mutex_unlock(&dev_pm_qos_mtx);
- ret = pm_qos_sysfs_add_latency(dev);
+ ret = pm_qos_sysfs_add_resume_latency(dev);
if (ret)
- dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
out:
mutex_unlock(&dev_pm_qos_sysfs_mtx);
@@ -634,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
}
/**
@@ -646,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev)
{
mutex_lock(&dev_pm_qos_sysfs_mtx);
- pm_qos_sysfs_remove_latency(dev);
+ pm_qos_sysfs_remove_resume_latency(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
@@ -768,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
pm_runtime_put(dev);
return ret;
}
+
+/**
+ * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
+ * @dev: Device to obtain the user space latency tolerance for.
+ */
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+{
+ s32 ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req ?
+ PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
+ dev->power.qos->latency_tolerance_req->data.pnode.prio;
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+
+/**
+ * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
+ * @dev: Device to update the user space latency tolerance for.
+ * @val: New user space latency tolerance for @dev (negative values disable).
+ */
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req) {
+ struct dev_pm_qos_request *req;
+
+ if (val < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
+ if (ret < 0) {
+ kfree(req);
+ goto out;
+ }
+ dev->power.qos->latency_tolerance_req = req;
+ } else {
+ if (val < 0) {
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
+ ret = 0;
+ } else {
+ ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
+ }
+ }
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
#else /* !CONFIG_PM_RUNTIME */
static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
static void __dev_pm_qos_hide_flags(struct device *dev) {}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 72e00e66ecc5..67c7938e430b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -13,6 +13,43 @@
#include <trace/events/rpm.h>
#include "power.h"
+#define RPM_GET_CALLBACK(dev, cb) \
+({ \
+ int (*__rpm_cb)(struct device *__d); \
+ \
+ if (dev->pm_domain) \
+ __rpm_cb = dev->pm_domain->ops.cb; \
+ else if (dev->type && dev->type->pm) \
+ __rpm_cb = dev->type->pm->cb; \
+ else if (dev->class && dev->class->pm) \
+ __rpm_cb = dev->class->pm->cb; \
+ else if (dev->bus && dev->bus->pm) \
+ __rpm_cb = dev->bus->pm->cb; \
+ else \
+ __rpm_cb = NULL; \
+ \
+ if (!__rpm_cb && dev->driver && dev->driver->pm) \
+ __rpm_cb = dev->driver->pm->cb; \
+ \
+ __rpm_cb; \
+})
+
+static int (*rpm_get_suspend_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_suspend);
+}
+
+static int (*rpm_get_resume_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_resume);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int (*rpm_get_idle_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_idle);
+}
+
static int rpm_resume(struct device *dev, int rpmflags);
static int rpm_suspend(struct device *dev, int rpmflags);
@@ -310,19 +347,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_idle;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_idle;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_idle;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_idle;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_idle;
+ callback = rpm_get_idle_cb(dev);
if (callback)
retval = __rpm_callback(callback, dev);
@@ -492,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_suspend;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_suspend;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_suspend;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_suspend;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_suspend;
+ callback = rpm_get_suspend_cb(dev);
retval = rpm_callback(callback, dev);
if (retval)
@@ -724,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_resume;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_resume;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_resume;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_resume;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_resume;
+ callback = rpm_get_resume_cb(dev);
retval = rpm_callback(callback, dev);
if (retval) {
@@ -1130,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier);
* @dev: Device to handle.
* @check_resume: If set, check if there's a resume request for the device.
*
- * Increment power.disable_depth for the device and if was zero previously,
+ * Increment power.disable_depth for the device and if it was zero previously,
* cancel all pending runtime PM requests for the device and wait for all
* operations in progress to complete. The device can be either active or
* suspended after its runtime PM has been disabled.
@@ -1401,3 +1402,86 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.irq_safe && dev->parent)
pm_runtime_put(dev->parent);
}
+#endif
+
+/**
+ * pm_runtime_force_suspend - Force a device into suspend state if needed.
+ * @dev: Device to suspend.
+ *
+ * Disable runtime PM so we safely can check the device's runtime PM status and
+ * if it is active, invoke it's .runtime_suspend callback to bring it into
+ * suspend state. Keep runtime PM disabled to preserve the state unless we
+ * encounter errors.
+ *
+ * Typically this function may be invoked from a system suspend callback to make
+ * sure the device is put into low power state.
+ */
+int pm_runtime_force_suspend(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ pm_runtime_disable(dev);
+
+ /*
+ * Note that pm_runtime_status_suspended() returns false while
+ * !CONFIG_PM_RUNTIME, which means the device will be put into low
+ * power state.
+ */
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ callback = rpm_get_suspend_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto err;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto err;
+
+ pm_runtime_set_suspended(dev);
+ return 0;
+err:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+
+/**
+ * pm_runtime_force_resume - Force a device into resume state.
+ * @dev: Device to resume.
+ *
+ * Prior invoking this function we expect the user to have brought the device
+ * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
+ * those actions and brings the device into full power. We update the runtime PM
+ * status and re-enables runtime PM.
+ *
+ * Typically this function may be invoked from a system resume callback to make
+ * sure the device is put into full power state.
+ */
+int pm_runtime_force_resume(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ callback = rpm_get_resume_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto out;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_mark_last_busy(dev);
+out:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 03e089ade5ce..95b181d1ca6d 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
autosuspend_delay_ms_store);
-static ssize_t pm_qos_latency_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pm_qos_resume_latency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev));
+ return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
}
-static ssize_t pm_qos_latency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev,
if (value < 0)
return -EINVAL;
- ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value);
+ ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
+ value);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
- pm_qos_latency_show, pm_qos_latency_store);
+ pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+
+static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
+
+ if (value < 0)
+ return sprintf(buf, "auto\n");
+ else if (value == PM_QOS_LATENCY_ANY)
+ return sprintf(buf, "any\n");
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ s32 value;
+ int ret;
+
+ if (kstrtos32(buf, 0, &value)) {
+ if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+ value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+ value = PM_QOS_LATENCY_ANY;
+ }
+ ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
+ pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
@@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = {
.attrs = runtime_attrs,
};
-static struct attribute *pm_qos_latency_attrs[] = {
+static struct attribute *pm_qos_resume_latency_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_pm_qos_resume_latency_us.attr,
#endif /* CONFIG_PM_RUNTIME */
NULL,
};
-static struct attribute_group pm_qos_latency_attr_group = {
+static struct attribute_group pm_qos_resume_latency_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_resume_latency_attrs,
+};
+
+static struct attribute *pm_qos_latency_tolerance_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_pm_qos_latency_tolerance_us.attr,
+#endif /* CONFIG_PM_RUNTIME */
+ NULL,
+};
+static struct attribute_group pm_qos_latency_tolerance_attr_group = {
.name = power_group_name,
- .attrs = pm_qos_latency_attrs,
+ .attrs = pm_qos_latency_tolerance_attrs,
};
static struct attribute *pm_qos_flags_attrs[] = {
@@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev)
if (rc)
goto err_out;
}
-
if (device_can_wakeup(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
- if (rc) {
- if (pm_runtime_callbacks_present(dev))
- sysfs_unmerge_group(&dev->kobj,
- &pm_runtime_attr_group);
- goto err_out;
- }
+ if (rc)
+ goto err_runtime;
+ }
+ if (dev->power.set_latency_tolerance) {
+ rc = sysfs_merge_group(&dev->kobj,
+ &pm_qos_latency_tolerance_attr_group);
+ if (rc)
+ goto err_wakeup;
}
return 0;
+ err_wakeup:
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ err_runtime:
+ sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
err_out:
sysfs_remove_group(&dev->kobj, &pm_attr_group);
return rc;
@@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
-int pm_qos_sysfs_add_latency(struct device *dev)
+int pm_qos_sysfs_add_resume_latency(struct device *dev)
{
- return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group);
+ return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
}
-void pm_qos_sysfs_remove_latency(struct device *dev)
+void pm_qos_sysfs_remove_resume_latency(struct device *dev)
{
- sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
}
int pm_qos_sysfs_add_flags(struct device *dev)
@@ -708,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 33414b1de201..7d1326985bee 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -134,6 +134,8 @@ struct regmap {
/* if set, converts bulk rw to single rw */
bool use_single_rw;
+ /* if set, the device supports multi write mode */
+ bool can_multi_write;
struct rb_root range_tree;
void *selector_work_buf; /* Scratch buffer used for selector */
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d4dd77134814..29b4128da0b0 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -249,11 +249,12 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
{
unsigned int reg;
- for (reg = min; reg <= max; reg++) {
+ for (reg = min; reg <= max; reg += map->reg_stride) {
unsigned int val;
int ret;
- if (regmap_volatile(map, reg))
+ if (regmap_volatile(map, reg) ||
+ !regmap_writeable(map, reg))
continue;
ret = regcache_read(map, reg, &val);
@@ -312,10 +313,6 @@ int regcache_sync(struct regmap *map)
/* Apply any patch first */
map->cache_bypass = 1;
for (i = 0; i < map->patch_regs; i++) {
- if (map->patch[i].reg % map->reg_stride) {
- ret = -EINVAL;
- goto out;
- }
ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
if (ret != 0) {
dev_err(map->dev, "Failed to write %x = %x: %d\n",
@@ -636,10 +633,10 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
if (*data == NULL)
return 0;
- count = cur - base;
+ count = (cur - base) / map->reg_stride;
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
- count * val_bytes, count, base, cur - 1);
+ count * val_bytes, count, base, cur - map->reg_stride);
map->cache_bypass = 1;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index c5471cd6ebb7..45d812c0ea77 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -511,7 +511,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
debugfs_create_file("range", 0400, map->debugfs,
map, &regmap_reg_ranges_fops);
- if (map->max_register) {
+ if (map->max_register || regmap_readable(map, 0)) {
debugfs_create_file("registers", 0400, map->debugfs,
map, &regmap_map_fops);
debugfs_create_file("access", 0400, map->debugfs,
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index fa6bf5279d28..ebd189529760 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -13,7 +13,6 @@
#include <linux/regmap.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/init.h>
static int regmap_i2c_write(void *context, const void *data, size_t count)
{
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 763c60d3d277..edf88f20cbce 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -113,7 +113,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* OR if there is masked interrupt which hasn't been Acked,
* it'll be ignored in irq handler, then may introduce irq storm
*/
- if (d->mask_buf[i] && d->chip->ack_base) {
+ if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
reg = d->chip->ack_base +
(i * map->reg_stride * d->irq_reg_stride);
ret = regmap_write(map, reg, d->mask_buf[i]);
@@ -271,7 +271,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
for (i = 0; i < data->chip->num_regs; i++) {
data->status_buf[i] &= ~data->mask_buf[i];
- if (data->status_buf[i] && chip->ack_base) {
+ if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = chip->ack_base +
(i * map->reg_stride * data->irq_reg_stride);
ret = regmap_write(map, reg, data->status_buf[i]);
@@ -368,8 +368,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (!d)
return -ENOMEM;
- *data = d;
-
d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
GFP_KERNEL);
if (!d->status_buf)
@@ -448,7 +446,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
goto err_alloc;
}
- if (d->status_buf[i] && chip->ack_base) {
+ if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = chip->ack_base +
(i * map->reg_stride * d->irq_reg_stride);
ret = regmap_write(map, reg,
@@ -506,6 +504,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
goto err_domain;
}
+ *data = d;
+
return 0;
err_domain:
@@ -533,7 +533,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
return;
free_irq(irq, d);
- /* We should unmap the domain but... */
+ irq_domain_remove(d->domain);
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 81f977510775..1e03e7f8bacb 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -18,7 +18,6 @@
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -26,10 +25,47 @@
struct regmap_mmio_context {
void __iomem *regs;
+ unsigned reg_bytes;
unsigned val_bytes;
+ unsigned pad_bytes;
struct clk *clk;
};
+static inline void regmap_mmio_regsize_check(size_t reg_size)
+{
+ switch (reg_size) {
+ case 1:
+ case 2:
+ case 4:
+#ifdef CONFIG_64BIT
+ case 8:
+#endif
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int regmap_mmio_regbits_check(size_t reg_bits)
+{
+ switch (reg_bits) {
+ case 8:
+ case 16:
+ case 32:
+#ifdef CONFIG_64BIT
+ case 64:
+#endif
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline void regmap_mmio_count_check(size_t count)
+{
+ BUG_ON(count % 2 != 0);
+}
+
static int regmap_mmio_gather_write(void *context,
const void *reg, size_t reg_size,
const void *val, size_t val_size)
@@ -38,7 +74,7 @@ static int regmap_mmio_gather_write(void *context,
u32 offset;
int ret;
- BUG_ON(reg_size != 4);
+ regmap_mmio_regsize_check(reg_size);
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
@@ -81,9 +117,13 @@ static int regmap_mmio_gather_write(void *context,
static int regmap_mmio_write(void *context, const void *data, size_t count)
{
- BUG_ON(count < 4);
+ struct regmap_mmio_context *ctx = context;
+ u32 offset = ctx->reg_bytes + ctx->pad_bytes;
+
+ regmap_mmio_count_check(count);
- return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4);
+ return regmap_mmio_gather_write(context, data, ctx->reg_bytes,
+ data + offset, count - offset);
}
static int regmap_mmio_read(void *context,
@@ -94,7 +134,7 @@ static int regmap_mmio_read(void *context,
u32 offset;
int ret;
- BUG_ON(reg_size != 4);
+ regmap_mmio_regsize_check(reg_size);
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
@@ -165,8 +205,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
int min_stride;
int ret;
- if (config->reg_bits != 32)
- return ERR_PTR(-EINVAL);
+ ret = regmap_mmio_regbits_check(config->reg_bits);
+ if (ret)
+ return ERR_PTR(ret);
if (config->pad_bits)
return ERR_PTR(-EINVAL);
@@ -209,6 +250,8 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
+ ctx->reg_bytes = config->reg_bits / 8;
+ ctx->pad_bytes = config->pad_bits / 8;
ctx->clk = ERR_PTR(-ENODEV);
if (clk_id == NULL)
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 37f12ae7aada..0eb3097c0d76 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -12,7 +12,6 @@
#include <linux/regmap.h>
#include <linux/spi/spi.h>
-#include <linux/init.h>
#include <linux/module.h>
#include "internal.h"
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index ac2391013db1..d7026dc33388 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -22,69 +22,235 @@
#include <linux/module.h>
#include <linux/init.h>
-static int regmap_spmi_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int regmap_spmi_base_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
{
+ u8 addr = *(u8 *)reg;
+ int err = 0;
+
+ BUG_ON(reg_size != 1);
+
+ while (val_size-- && !err)
+ err = spmi_register_read(context, addr++, val++);
+
+ return err;
+}
+
+static int regmap_spmi_base_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ const u8 *data = val;
+ u8 addr = *(u8 *)reg;
+ int err = 0;
+
+ BUG_ON(reg_size != 1);
+
+ /*
+ * SPMI defines a more bandwidth-efficient 'Register 0 Write' sequence,
+ * use it when possible.
+ */
+ if (addr == 0 && val_size) {
+ err = spmi_register_zero_write(context, *data);
+ if (err)
+ goto err_out;
+
+ data++;
+ addr++;
+ val_size--;
+ }
+
+ while (val_size) {
+ err = spmi_register_write(context, addr, *data);
+ if (err)
+ goto err_out;
+
+ data++;
+ addr++;
+ val_size--;
+ }
+
+err_out:
+ return err;
+}
+
+static int regmap_spmi_base_write(void *context, const void *data,
+ size_t count)
+{
+ BUG_ON(count < 1);
+ return regmap_spmi_base_gather_write(context, data, 1, data + 1,
+ count - 1);
+}
+
+static struct regmap_bus regmap_spmi_base = {
+ .read = regmap_spmi_base_read,
+ .write = regmap_spmi_base_write,
+ .gather_write = regmap_spmi_base_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+/**
+ * regmap_init_spmi_base(): Create regmap for the Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_spmi_base);
+
+/**
+ * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base);
+
+static int regmap_spmi_ext_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int err = 0;
+ size_t len;
+ u16 addr;
+
BUG_ON(reg_size != 2);
- return spmi_ext_register_readl(context, *(u16 *)reg,
- val, val_size);
+
+ addr = *(u16 *)reg;
+
+ /*
+ * Split accesses into two to take advantage of the more
+ * bandwidth-efficient 'Extended Register Read' command when possible
+ */
+ while (addr <= 0xFF && val_size) {
+ len = min_t(size_t, val_size, 16);
+
+ err = spmi_ext_register_read(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+ while (val_size) {
+ len = min_t(size_t, val_size, 8);
+
+ err = spmi_ext_register_readl(context, addr, val, val_size);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+err_out:
+ return err;
}
-static int regmap_spmi_gather_write(void *context,
- const void *reg, size_t reg_size,
- const void *val, size_t val_size)
+static int regmap_spmi_ext_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
{
+ int err = 0;
+ size_t len;
+ u16 addr;
+
BUG_ON(reg_size != 2);
- return spmi_ext_register_writel(context, *(u16 *)reg, val, val_size);
+
+ addr = *(u16 *)reg;
+
+ while (addr <= 0xFF && val_size) {
+ len = min_t(size_t, val_size, 16);
+
+ err = spmi_ext_register_write(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+ while (val_size) {
+ len = min_t(size_t, val_size, 8);
+
+ err = spmi_ext_register_writel(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+err_out:
+ return err;
}
-static int regmap_spmi_write(void *context, const void *data,
- size_t count)
+static int regmap_spmi_ext_write(void *context, const void *data,
+ size_t count)
{
BUG_ON(count < 2);
- return regmap_spmi_gather_write(context, data, 2, data + 2, count - 2);
+ return regmap_spmi_ext_gather_write(context, data, 2, data + 2,
+ count - 2);
}
-static struct regmap_bus regmap_spmi = {
- .read = regmap_spmi_read,
- .write = regmap_spmi_write,
- .gather_write = regmap_spmi_gather_write,
+static struct regmap_bus regmap_spmi_ext = {
+ .read = regmap_spmi_ext_read,
+ .write = regmap_spmi_ext_write,
+ .gather_write = regmap_spmi_ext_gather_write,
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
/**
- * regmap_init_spmi(): Initialize register map
- *
- * @sdev: Device that will be interacted with
- * @config: Configuration for register map
+ * regmap_init_spmi_ext(): Create regmap for Ext register space
+ * @sdev: Device that will be interacted with
+ * @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
-struct regmap *regmap_init_spmi(struct spmi_device *sdev,
- const struct regmap_config *config)
+struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev,
+ const struct regmap_config *config)
{
- return regmap_init(&sdev->dev, &regmap_spmi, sdev, config);
+ return regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
}
-EXPORT_SYMBOL_GPL(regmap_init_spmi);
+EXPORT_SYMBOL_GPL(regmap_init_spmi_ext);
/**
- * devm_regmap_init_spmi(): Initialise managed register map
- *
- * @sdev: Device that will be interacted with
- * @config: Configuration for register map
+ * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap. The regmap will be automatically freed by the
* device management code.
*/
-struct regmap *devm_regmap_init_spmi(struct spmi_device *sdev,
+struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev,
const struct regmap_config *config)
{
- return devm_regmap_init(&sdev->dev, &regmap_spmi, sdev, config);
+ return devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_spmi);
+EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext);
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index c2e002100949..d0a072463a04 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -380,6 +380,28 @@ static void regmap_range_exit(struct regmap *map)
kfree(map->selector_work_buf);
}
+int regmap_attach_dev(struct device *dev, struct regmap *map,
+ const struct regmap_config *config)
+{
+ struct regmap **m;
+
+ map->dev = dev;
+
+ regmap_debugfs_init(map, config->name);
+
+ /* Add a devres resource for dev_get_regmap() */
+ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
+ if (!m) {
+ regmap_debugfs_exit(map);
+ return -ENOMEM;
+ }
+ *m = map;
+ devres_add(dev, m);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_attach_dev);
+
/**
* regmap_init(): Initialise register map
*
@@ -397,7 +419,7 @@ struct regmap *regmap_init(struct device *dev,
void *bus_context,
const struct regmap_config *config)
{
- struct regmap *map, **m;
+ struct regmap *map;
int ret = -EINVAL;
enum regmap_endian reg_endian, val_endian;
int i, j;
@@ -439,6 +461,7 @@ struct regmap *regmap_init(struct device *dev,
else
map->reg_stride = 1;
map->use_single_rw = config->use_single_rw;
+ map->can_multi_write = config->can_multi_write;
map->dev = dev;
map->bus = bus;
map->bus_context = bus_context;
@@ -718,7 +741,7 @@ skip_format_initialization:
new->window_start = range_cfg->window_start;
new->window_len = range_cfg->window_len;
- if (_regmap_range_add(map, new) == false) {
+ if (!_regmap_range_add(map, new)) {
dev_err(map->dev, "Failed to add range %d\n", i);
kfree(new);
goto err_range;
@@ -734,25 +757,18 @@ skip_format_initialization:
}
}
- regmap_debugfs_init(map, config->name);
-
ret = regcache_init(map, config);
if (ret != 0)
goto err_range;
- /* Add a devres resource for dev_get_regmap() */
- m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
- if (!m) {
- ret = -ENOMEM;
- goto err_debugfs;
- }
- *m = map;
- devres_add(dev, m);
+ if (dev)
+ ret = regmap_attach_dev(dev, map, config);
+ if (ret != 0)
+ goto err_regcache;
return map;
-err_debugfs:
- regmap_debugfs_exit(map);
+err_regcache:
regcache_exit(map);
err_range:
regmap_range_exit(map);
@@ -1514,58 +1530,278 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
{
int ret = 0, i;
size_t val_bytes = map->format.val_bytes;
- void *wval;
- if (!map->bus)
- return -EINVAL;
- if (!map->format.parse_inplace)
+ if (map->bus && !map->format.parse_inplace)
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map->lock_arg);
+ /*
+ * Some devices don't support bulk write, for
+ * them we have a series of single write operations.
+ */
+ if (!map->bus || map->use_single_rw) {
+ map->lock(map->lock_arg);
+ for (i = 0; i < val_count; i++) {
+ unsigned int ival;
- /* No formatting is require if val_byte is 1 */
- if (val_bytes == 1) {
- wval = (void *)val;
+ switch (val_bytes) {
+ case 1:
+ ival = *(u8 *)(val + (i * val_bytes));
+ break;
+ case 2:
+ ival = *(u16 *)(val + (i * val_bytes));
+ break;
+ case 4:
+ ival = *(u32 *)(val + (i * val_bytes));
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ ival = *(u64 *)(val + (i * val_bytes));
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = _regmap_write(map, reg + (i * map->reg_stride),
+ ival);
+ if (ret != 0)
+ goto out;
+ }
+out:
+ map->unlock(map->lock_arg);
} else {
+ void *wval;
+
wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
if (!wval) {
- ret = -ENOMEM;
dev_err(map->dev, "Error in memory allocation\n");
- goto out;
+ return -ENOMEM;
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(wval + i);
+
+ map->lock(map->lock_arg);
+ ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+ map->unlock(map->lock_arg);
+
+ kfree(wval);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_bulk_write);
+
+/*
+ * _regmap_raw_multi_reg_write()
+ *
+ * the (register,newvalue) pairs in regs have not been formatted, but
+ * they are all in the same page and have been changed to being page
+ * relative. The page register has been written if that was neccessary.
+ */
+static int _regmap_raw_multi_reg_write(struct regmap *map,
+ const struct reg_default *regs,
+ size_t num_regs)
+{
+ int ret;
+ void *buf;
+ int i;
+ u8 *u8;
+ size_t val_bytes = map->format.val_bytes;
+ size_t reg_bytes = map->format.reg_bytes;
+ size_t pad_bytes = map->format.pad_bytes;
+ size_t pair_size = reg_bytes + pad_bytes + val_bytes;
+ size_t len = pair_size * num_regs;
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* We have to linearise by hand. */
+
+ u8 = buf;
+
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ int val = regs[i].def;
+ trace_regmap_hw_write_start(map->dev, reg, 1);
+ map->format.format_reg(u8, reg, map->reg_shift);
+ u8 += reg_bytes + pad_bytes;
+ map->format.format_val(u8, val, 0);
+ u8 += val_bytes;
+ }
+ u8 = buf;
+ *u8 |= map->write_flag_mask;
+
+ ret = map->bus->write(map->bus_context, buf, len);
+
+ kfree(buf);
+
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ trace_regmap_hw_write_done(map->dev, reg, 1);
}
+ return ret;
+}
+
+static unsigned int _regmap_register_page(struct regmap *map,
+ unsigned int reg,
+ struct regmap_range_node *range)
+{
+ unsigned int win_page = (reg - range->range_min) / range->window_len;
+
+ return win_page;
+}
+
+static int _regmap_range_multi_paged_reg_write(struct regmap *map,
+ struct reg_default *regs,
+ size_t num_regs)
+{
+ int ret;
+ int i, n;
+ struct reg_default *base;
+ unsigned int this_page;
/*
- * Some devices does not support bulk write, for
- * them we have a series of single write operations.
+ * the set of registers are not neccessarily in order, but
+ * since the order of write must be preserved this algorithm
+ * chops the set each time the page changes
*/
- if (map->use_single_rw) {
- for (i = 0; i < val_count; i++) {
- ret = _regmap_raw_write(map,
- reg + (i * map->reg_stride),
- val + (i * val_bytes),
- val_bytes);
+ base = regs;
+ for (i = 0, n = 0; i < num_regs; i++, n++) {
+ unsigned int reg = regs[i].reg;
+ struct regmap_range_node *range;
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ unsigned int win_page = _regmap_register_page(map, reg,
+ range);
+
+ if (i == 0)
+ this_page = win_page;
+ if (win_page != this_page) {
+ this_page = win_page;
+ ret = _regmap_raw_multi_reg_write(map, base, n);
+ if (ret != 0)
+ return ret;
+ base += n;
+ n = 0;
+ }
+ ret = _regmap_select_page(map, &base[n].reg, range, 1);
if (ret != 0)
- goto out;
+ return ret;
}
- } else {
- ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
}
+ if (n > 0)
+ return _regmap_raw_multi_reg_write(map, base, n);
+ return 0;
+}
- if (val_bytes != 1)
- kfree(wval);
+static int _regmap_multi_reg_write(struct regmap *map,
+ const struct reg_default *regs,
+ size_t num_regs)
+{
+ int i;
+ int ret;
+
+ if (!map->can_multi_write) {
+ for (i = 0; i < num_regs; i++) {
+ ret = _regmap_write(map, regs[i].reg, regs[i].def);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
+ }
+
+ if (!map->format.parse_inplace)
+ return -EINVAL;
+
+ if (map->writeable_reg)
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ if (!map->writeable_reg(map->dev, reg))
+ return -EINVAL;
+ if (reg % map->reg_stride)
+ return -EINVAL;
+ }
+
+ if (!map->cache_bypass) {
+ for (i = 0; i < num_regs; i++) {
+ unsigned int val = regs[i].def;
+ unsigned int reg = regs[i].reg;
+ ret = regcache_write(map, reg, val);
+ if (ret) {
+ dev_err(map->dev,
+ "Error in caching of register: %x ret: %d\n",
+ reg, ret);
+ return ret;
+ }
+ }
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+
+ WARN_ON(!map->bus);
+
+ for (i = 0; i < num_regs; i++) {
+ unsigned int reg = regs[i].reg;
+ struct regmap_range_node *range;
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ size_t len = sizeof(struct reg_default)*num_regs;
+ struct reg_default *base = kmemdup(regs, len,
+ GFP_KERNEL);
+ if (!base)
+ return -ENOMEM;
+ ret = _regmap_range_multi_paged_reg_write(map, base,
+ num_regs);
+ kfree(base);
+
+ return ret;
+ }
+ }
+ return _regmap_raw_multi_reg_write(map, regs, num_regs);
+}
+
+/*
+ * regmap_multi_reg_write(): Write multiple registers to the device
+ *
+ * where the set of register,value pairs are supplied in any order,
+ * possibly not all in a single range.
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * The 'normal' block write mode will send ultimately send data on the
+ * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
+ * addressed. However, this alternative block multi write mode will send
+ * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
+ * must of course support the mode.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+ int num_regs)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
-out:
map->unlock(map->lock_arg);
+
return ret;
}
-EXPORT_SYMBOL_GPL(regmap_bulk_write);
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
/*
- * regmap_multi_reg_write(): Write multiple registers to the device
+ * regmap_multi_reg_write_bypassed(): Write multiple registers to the
+ * device but not the cache
*
* where the set of register are supplied in any order
*
@@ -1580,30 +1816,27 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write);
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_multi_reg_write(struct regmap *map, struct reg_default *regs,
- int num_regs)
+int regmap_multi_reg_write_bypassed(struct regmap *map,
+ const struct reg_default *regs,
+ int num_regs)
{
- int ret = 0, i;
-
- for (i = 0; i < num_regs; i++) {
- int reg = regs[i].reg;
- if (reg % map->reg_stride)
- return -EINVAL;
- }
+ int ret;
+ bool bypass;
map->lock(map->lock_arg);
- for (i = 0; i < num_regs; i++) {
- ret = _regmap_write(map, regs[i].reg, regs[i].def);
- if (ret != 0)
- goto out;
- }
-out:
+ bypass = map->cache_bypass;
+ map->cache_bypass = true;
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+ map->cache_bypass = bypass;
+
map->unlock(map->lock_arg);
return ret;
}
-EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
/**
* regmap_raw_write_async(): Write raw values to one or more registers
@@ -1724,6 +1957,9 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
if (map->cache_only)
return -EBUSY;
+ if (!regmap_readable(map, reg))
+ return -EIO;
+
ret = map->reg_read(context, reg, val);
if (ret == 0) {
#ifdef LOG_DEVICE
@@ -1897,14 +2133,10 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_bytes = map->format.val_bytes;
bool vol = regmap_volatile_range(map, reg, val_count);
- if (!map->bus)
- return -EINVAL;
- if (!map->format.parse_inplace)
- return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
- if (vol || map->cache_type == REGCACHE_NONE) {
+ if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
/*
* Some devices does not support bulk read, for
* them we have a series of single read operations.
@@ -1958,9 +2190,11 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
if (tmp != orig) {
ret = _regmap_write(map, reg, tmp);
- *change = true;
+ if (change)
+ *change = true;
} else {
- *change = false;
+ if (change)
+ *change = false;
}
return ret;
@@ -1979,11 +2213,10 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val)
{
- bool change;
int ret;
map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, &change);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL);
map->unlock(map->lock_arg);
return ret;
@@ -2008,14 +2241,13 @@ EXPORT_SYMBOL_GPL(regmap_update_bits);
int regmap_update_bits_async(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val)
{
- bool change;
int ret;
map->lock(map->lock_arg);
map->async = true;
- ret = _regmap_update_bits(map, reg, mask, val, &change);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL);
map->async = false;
@@ -2165,30 +2397,20 @@ EXPORT_SYMBOL_GPL(regmap_async_complete);
* apply them immediately. Typically this is used to apply
* corrections to be applied to the device defaults on startup, such
* as the updates some vendors provide to undocumented registers.
+ *
+ * The caller must ensure that this function cannot be called
+ * concurrently with either itself or regcache_sync().
*/
int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
int num_regs)
{
struct reg_default *p;
- int i, ret;
+ int ret;
bool bypass;
- map->lock(map->lock_arg);
-
- bypass = map->cache_bypass;
-
- map->cache_bypass = true;
- map->async = true;
-
- /* Write out first; it's useful to apply even if we fail later. */
- for (i = 0; i < num_regs; i++) {
- ret = _regmap_write(map, regs[i].reg, regs[i].def);
- if (ret != 0) {
- dev_err(map->dev, "Failed to write %x = %x: %d\n",
- regs[i].reg, regs[i].def, ret);
- goto out;
- }
- }
+ if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
+ num_regs))
+ return 0;
p = krealloc(map->patch,
sizeof(struct reg_default) * (map->patch_regs + num_regs),
@@ -2198,9 +2420,20 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
map->patch = p;
map->patch_regs += num_regs;
} else {
- ret = -ENOMEM;
+ return -ENOMEM;
}
+ map->lock(map->lock_arg);
+
+ bypass = map->cache_bypass;
+
+ map->cache_bypass = true;
+ map->async = true;
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+ if (ret != 0)
+ goto out;
+
out:
map->async = false;
map->cache_bypass = bypass;
@@ -2228,6 +2461,18 @@ int regmap_get_val_bytes(struct regmap *map)
}
EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
+int regmap_parse_val(struct regmap *map, const void *buf,
+ unsigned int *val)
+{
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ *val = map->format.parse_val(buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_parse_val);
+
static int __init regmap_initcall(void)
{
regmap_debugfs_initcall();
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 94ffee378f10..ad9d17762664 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,7 +23,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/module.h>
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 7c081b38ef3e..0ee48be23837 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -75,6 +75,7 @@ config BCMA_DRIVER_GMAC_CMN
config BCMA_DRIVER_GPIO
bool "BCMA GPIO driver"
depends on BCMA && GPIOLIB
+ select IRQ_DOMAIN if BCMA_HOST_SOC
help
Driver to provide access to the GPIO pins of the bcma bus.
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 0215f9ad755c..09b632ad0fe2 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -33,8 +33,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
-struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
- u8 unit);
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 4d07cce9c5d9..7e11ef4cb7db 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -38,7 +38,7 @@ static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
{ "M25P32", 0x15, 0x10000, 64, },
{ "M25P64", 0x16, 0x10000, 128, },
{ "M25FL128", 0x17, 0x10000, 256, },
- { 0 },
+ { NULL },
};
static const struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
@@ -56,7 +56,7 @@ static const struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
{ "SST25VF016", 0x41, 0x1000, 512, },
{ "SST25VF032", 0x4a, 0x1000, 1024, },
{ "SST25VF064", 0x4b, 0x1000, 2048, },
- { 0 },
+ { NULL },
};
static const struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
@@ -67,7 +67,7 @@ static const struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
{ "AT45DB161", 0x2c, 512, 4096, },
{ "AT45DB321", 0x34, 512, 8192, },
{ "AT45DB642", 0x3c, 1024, 8192, },
- { 0 },
+ { NULL },
};
static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 45f0996a3752..25f9887a35d0 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -9,6 +9,9 @@
*/
#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/export.h>
#include <linux/bcma/bcma.h>
@@ -73,19 +76,136 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
}
+#if IS_BUILTIN(CONFIG_BCMA_HOST_SOC)
static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
{
struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
- return bcma_core_irq(cc->core);
+ return irq_find_mapping(cc->irq_domain, gpio);
else
return -EINVAL;
}
+static void bcma_gpio_irq_unmask(struct irq_data *d)
+{
+ struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
+ u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
+
+ bcma_chipco_gpio_polarity(cc, BIT(gpio), val);
+ bcma_chipco_gpio_intmask(cc, BIT(gpio), BIT(gpio));
+}
+
+static void bcma_gpio_irq_mask(struct irq_data *d)
+{
+ struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
+
+ bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
+}
+
+static struct irq_chip bcma_gpio_irq_chip = {
+ .name = "BCMA-GPIO",
+ .irq_mask = bcma_gpio_irq_mask,
+ .irq_unmask = bcma_gpio_irq_unmask,
+};
+
+static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
+{
+ struct bcma_drv_cc *cc = dev_id;
+ u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN);
+ u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ);
+ u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL);
+ unsigned long irqs = (val ^ pol) & mask;
+ int gpio;
+
+ if (!irqs)
+ return IRQ_NONE;
+
+ for_each_set_bit(gpio, &irqs, cc->gpio.ngpio)
+ generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio));
+ bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
+
+ return IRQ_HANDLED;
+}
+
+static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+{
+ struct gpio_chip *chip = &cc->gpio;
+ int gpio, hwirq, err;
+
+ if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
+ return 0;
+
+ cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
+ &irq_domain_simple_ops, cc);
+ if (!cc->irq_domain) {
+ err = -ENODEV;
+ goto err_irq_domain;
+ }
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_create_mapping(cc->irq_domain, gpio);
+
+ irq_set_chip_data(irq, cc);
+ irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip,
+ handle_simple_irq);
+ }
+
+ hwirq = bcma_core_irq(cc->core);
+ err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
+ cc);
+ if (err)
+ goto err_req_irq;
+
+ bcma_chipco_gpio_intmask(cc, ~0, 0);
+ bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
+
+ return 0;
+
+err_req_irq:
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(cc->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(cc->irq_domain);
+err_irq_domain:
+ return err;
+}
+
+static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+{
+ struct gpio_chip *chip = &cc->gpio;
+ int gpio;
+
+ if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
+ return;
+
+ bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
+ free_irq(bcma_core_irq(cc->core), cc);
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(cc->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(cc->irq_domain);
+}
+#else
+static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+{
+ return 0;
+}
+
+static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+{
+}
+#endif
+
int bcma_gpio_init(struct bcma_drv_cc *cc)
{
struct gpio_chip *chip = &cc->gpio;
+ int err;
chip->label = "bcma_gpio";
chip->owner = THIS_MODULE;
@@ -95,7 +215,9 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
+#if IS_BUILTIN(CONFIG_BCMA_HOST_SOC)
chip->to_irq = bcma_gpio_to_irq;
+#endif
chip->ngpio = 16;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
@@ -105,10 +227,21 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
else
chip->base = -1;
- return gpiochip_add(chip);
+ err = bcma_gpio_irq_domain_init(cc);
+ if (err)
+ return err;
+
+ err = gpiochip_add(chip);
+ if (err) {
+ bcma_gpio_irq_domain_exit(cc);
+ return err;
+ }
+
+ return 0;
}
int bcma_gpio_unregister(struct bcma_drv_cc *cc)
{
+ bcma_gpio_irq_domain_exit(cc);
return gpiochip_remove(&cc->gpio);
}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 6fb98b53533f..e333305363aa 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -238,7 +238,6 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
pci_release_regions(dev);
pci_disable_device(dev);
kfree(bus);
- pci_set_drvdata(dev, NULL);
}
#ifdef CONFIG_PM_SLEEP
@@ -270,7 +269,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
#endif /* CONFIG_PM_SLEEP */
-static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
+static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index e15430a82e90..34ea4c588d36 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -78,18 +78,6 @@ static u16 bcma_cc_core_id(struct bcma_bus *bus)
return BCMA_CORE_CHIPCOMMON;
}
-struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
-{
- struct bcma_device *core;
-
- list_for_each_entry(core, &bus->cores, list) {
- if (core->id.id == coreid)
- return core;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(bcma_find_core);
-
struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
u8 unit)
{
@@ -101,6 +89,7 @@ struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
}
return NULL;
}
+EXPORT_SYMBOL_GPL(bcma_find_core_unit);
bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
int timeout)
@@ -176,6 +165,7 @@ static int bcma_register_cores(struct bcma_bus *bus)
bcma_err(bus,
"Could not register dev for core 0x%03X\n",
core->id.id);
+ put_device(&core->dev);
continue;
}
core->dev_registered = true;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index eb3950113e42..125d84505738 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6411,12 +6411,12 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
.ScatterGatherSegments[0]
.SegmentByteCount =
CommandMailbox->ControllerInfo.DataTransferSize;
- DAC960_ExecuteCommand(Command);
- while (Controller->V2.NewControllerInformation->PhysicalScanActive)
- {
- DAC960_ExecuteCommand(Command);
- sleep_on_timeout(&Controller->CommandWaitQueue, HZ);
- }
+ while (1) {
+ DAC960_ExecuteCommand(Command);
+ if (!Controller->V2.NewControllerInformation->PhysicalScanActive)
+ break;
+ msleep(1000);
+ }
DAC960_UserCritical("Discovery Completed\n", Controller);
}
}
@@ -7035,18 +7035,16 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
ErrorCode = -EFAULT;
break;
}
- while (Controller->V2.HealthStatusBuffer->StatusChangeCounter
- == HealthStatusBuffer.StatusChangeCounter &&
- Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
- == HealthStatusBuffer.NextEventSequenceNumber)
- {
- interruptible_sleep_on_timeout(&Controller->HealthStatusWaitQueue,
- DAC960_MonitoringTimerInterval);
- if (signal_pending(current)) {
- ErrorCode = -EINTR;
- break;
- }
- }
+ ErrorCode = wait_event_interruptible_timeout(Controller->HealthStatusWaitQueue,
+ !(Controller->V2.HealthStatusBuffer->StatusChangeCounter
+ == HealthStatusBuffer.StatusChangeCounter &&
+ Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
+ == HealthStatusBuffer.NextEventSequenceNumber),
+ DAC960_MonitoringTimerInterval);
+ if (ErrorCode == -ERESTARTSYS) {
+ ErrorCode = -EINTR;
+ break;
+ }
if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
Controller->V2.HealthStatusBuffer,
sizeof(DAC960_V2_HealthStatusBuffer_T)))
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 86b9f37d102e..014a1cfc41c5 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -108,6 +108,8 @@ source "drivers/block/paride/Kconfig"
source "drivers/block/mtip32xx/Kconfig"
+source "drivers/block/zram/Kconfig"
+
config BLK_CPQ_DA
tristate "Compaq SMART2 support"
depends on PCI && VIRT_TO_BUS && 0
@@ -368,7 +370,8 @@ config BLK_DEV_RAM
For details, read <file:Documentation/blockdev/ramdisk.txt>.
To compile this driver as a module, choose M here: the
- module will be called rd.
+ module will be called brd. An alias "rd" has been defined
+ for historical reasons.
Most normal users won't need the RAM disk functionality, and can
thus say N here.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 8cc98cd0d4a8..02b688d1438d 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
-obj-$(CONFIG_VIODASD) += viodasd.o
obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
obj-$(CONFIG_BLK_DEV_HD) += hd.o
@@ -43,6 +42,7 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
+obj-$(CONFIG_ZRAM) += zram/
nvme-y := nvme-core.o nvme-scsi.o
skd-y := skd_main.o
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 14a9d1912318..9220f8e833d0 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -100,11 +100,8 @@ enum {
struct buf {
ulong nframesout;
- ulong resid;
- ulong bv_resid;
- sector_t sector;
struct bio *bio;
- struct bio_vec *bv;
+ struct bvec_iter iter;
struct request *rq;
};
@@ -120,13 +117,10 @@ struct frame {
ulong waited;
ulong waited_total;
struct aoetgt *t; /* parent target I belong to */
- sector_t lba;
struct sk_buff *skb; /* command skb freed on module exit */
struct sk_buff *r_skb; /* response skb for async processing */
struct buf *buf;
- struct bio_vec *bv;
- ulong bcnt;
- ulong bv_off;
+ struct bvec_iter iter;
char flags;
};
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d2515435e23f..422b7d84f686 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -196,8 +196,7 @@ aoe_freetframe(struct frame *f)
t = f->t;
f->buf = NULL;
- f->lba = 0;
- f->bv = NULL;
+ memset(&f->iter, 0, sizeof(f->iter));
f->r_skb = NULL;
f->flags = 0;
list_add(&f->head, &t->ffree);
@@ -295,21 +294,14 @@ newframe(struct aoedev *d)
}
static void
-skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
+skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
{
int frag = 0;
- ulong fcnt;
-loop:
- fcnt = bv->bv_len - (off - bv->bv_offset);
- if (fcnt > cnt)
- fcnt = cnt;
- skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
- cnt -= fcnt;
- if (cnt <= 0)
- return;
- bv++;
- off = bv->bv_offset;
- goto loop;
+ struct bio_vec bv;
+
+ __bio_for_each_segment(bv, bio, iter, iter)
+ skb_fill_page_desc(skb, frag++, bv.bv_page,
+ bv.bv_offset, bv.bv_len);
}
static void
@@ -346,12 +338,10 @@ ata_rw_frameinit(struct frame *f)
t->nout++;
f->waited = 0;
f->waited_total = 0;
- if (f->buf)
- f->lba = f->buf->sector;
/* set up ata header */
- ah->scnt = f->bcnt >> 9;
- put_lba(ah, f->lba);
+ ah->scnt = f->iter.bi_size >> 9;
+ put_lba(ah, f->iter.bi_sector);
if (t->d->flags & DEVFL_EXT) {
ah->aflags |= AOEAFL_EXT;
} else {
@@ -360,11 +350,11 @@ ata_rw_frameinit(struct frame *f)
ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
}
if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
- skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
+ skb_fillup(skb, f->buf->bio, f->iter);
ah->aflags |= AOEAFL_WRITE;
- skb->len += f->bcnt;
- skb->data_len = f->bcnt;
- skb->truesize += f->bcnt;
+ skb->len += f->iter.bi_size;
+ skb->data_len = f->iter.bi_size;
+ skb->truesize += f->iter.bi_size;
t->wpkts++;
} else {
t->rpkts++;
@@ -382,7 +372,6 @@ aoecmd_ata_rw(struct aoedev *d)
struct buf *buf;
struct sk_buff *skb;
struct sk_buff_head queue;
- ulong bcnt, fbcnt;
buf = nextbuf(d);
if (buf == NULL)
@@ -390,39 +379,22 @@ aoecmd_ata_rw(struct aoedev *d)
f = newframe(d);
if (f == NULL)
return 0;
- bcnt = d->maxbcnt;
- if (bcnt == 0)
- bcnt = DEFAULTBCNT;
- if (bcnt > buf->resid)
- bcnt = buf->resid;
- fbcnt = bcnt;
- f->bv = buf->bv;
- f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
- do {
- if (fbcnt < buf->bv_resid) {
- buf->bv_resid -= fbcnt;
- buf->resid -= fbcnt;
- break;
- }
- fbcnt -= buf->bv_resid;
- buf->resid -= buf->bv_resid;
- if (buf->resid == 0) {
- d->ip.buf = NULL;
- break;
- }
- buf->bv++;
- buf->bv_resid = buf->bv->bv_len;
- WARN_ON(buf->bv_resid == 0);
- } while (fbcnt);
/* initialize the headers & frame */
f->buf = buf;
- f->bcnt = bcnt;
- ata_rw_frameinit(f);
+ f->iter = buf->iter;
+ f->iter.bi_size = min_t(unsigned long,
+ d->maxbcnt ?: DEFAULTBCNT,
+ f->iter.bi_size);
+ bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
+
+ if (!buf->iter.bi_size)
+ d->ip.buf = NULL;
/* mark all tracking fields and load out */
buf->nframesout += 1;
- buf->sector += bcnt >> 9;
+
+ ata_rw_frameinit(f);
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
@@ -613,10 +585,7 @@ reassign_frame(struct frame *f)
skb = nf->skb;
nf->skb = f->skb;
nf->buf = f->buf;
- nf->bcnt = f->bcnt;
- nf->lba = f->lba;
- nf->bv = f->bv;
- nf->bv_off = f->bv_off;
+ nf->iter = f->iter;
nf->waited = 0;
nf->waited_total = f->waited_total;
nf->sent = f->sent;
@@ -648,19 +617,19 @@ probe(struct aoetgt *t)
}
f->flags |= FFL_PROBE;
ifrotate(t);
- f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
+ f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
ata_rw_frameinit(f);
skb = f->skb;
- for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
+ for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
if (n < PAGE_SIZE)
m = n;
else
m = PAGE_SIZE;
skb_fill_page_desc(skb, frag, empty_page, 0, m);
}
- skb->len += f->bcnt;
- skb->data_len = f->bcnt;
- skb->truesize += f->bcnt;
+ skb->len += f->iter.bi_size;
+ skb->data_len = f->iter.bi_size;
+ skb->truesize += f->iter.bi_size;
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
@@ -897,15 +866,15 @@ rqbiocnt(struct request *r)
static void
bio_pageinc(struct bio *bio)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
struct page *page;
- int i;
+ struct bvec_iter iter;
- bio_for_each_segment(bv, bio, i) {
+ bio_for_each_segment(bv, bio, iter) {
/* Non-zero page count for non-head members of
* compound pages is no longer allowed by the kernel.
*/
- page = compound_trans_head(bv->bv_page);
+ page = compound_head(bv.bv_page);
atomic_inc(&page->_count);
}
}
@@ -913,12 +882,12 @@ bio_pageinc(struct bio *bio)
static void
bio_pagedec(struct bio *bio)
{
- struct bio_vec *bv;
struct page *page;
- int i;
+ struct bio_vec bv;
+ struct bvec_iter iter;
- bio_for_each_segment(bv, bio, i) {
- page = compound_trans_head(bv->bv_page);
+ bio_for_each_segment(bv, bio, iter) {
+ page = compound_head(bv.bv_page);
atomic_dec(&page->_count);
}
}
@@ -929,12 +898,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
memset(buf, 0, sizeof(*buf));
buf->rq = rq;
buf->bio = bio;
- buf->resid = bio->bi_size;
- buf->sector = bio->bi_sector;
+ buf->iter = bio->bi_iter;
bio_pageinc(bio);
- buf->bv = bio_iovec(bio);
- buf->bv_resid = buf->bv->bv_len;
- WARN_ON(buf->bv_resid == 0);
}
static struct buf *
@@ -1119,24 +1084,18 @@ gettgt(struct aoedev *d, char *addr)
}
static void
-bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
+bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
{
- ulong fcnt;
- char *p;
int soff = 0;
-loop:
- fcnt = bv->bv_len - (off - bv->bv_offset);
- if (fcnt > cnt)
- fcnt = cnt;
- p = page_address(bv->bv_page) + off;
- skb_copy_bits(skb, soff, p, fcnt);
- soff += fcnt;
- cnt -= fcnt;
- if (cnt <= 0)
- return;
- bv++;
- off = bv->bv_offset;
- goto loop;
+ struct bio_vec bv;
+
+ iter.bi_size = cnt;
+
+ __bio_for_each_segment(bv, bio, iter, iter) {
+ char *p = page_address(bv.bv_page) + bv.bv_offset;
+ skb_copy_bits(skb, soff, p, bv.bv_len);
+ soff += bv.bv_len;
+ }
}
void
@@ -1152,7 +1111,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
do {
bio = rq->bio;
bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
- } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
+ } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
@@ -1229,7 +1188,15 @@ noskb: if (buf)
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
break;
}
- bvcpy(f->bv, f->bv_off, skb, n);
+ if (n > f->iter.bi_size) {
+ pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n",
+ "aoe: too-large data size in read from",
+ (long) d->aoemajor, d->aoeminor,
+ n, f->iter.bi_size);
+ clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+ break;
+ }
+ bvcpy(skb, f->buf->bio, f->iter, n);
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
spin_lock_irq(&d->lock);
@@ -1272,7 +1239,7 @@ out:
aoe_freetframe(f);
- if (buf && --buf->nframesout == 0 && buf->resid == 0)
+ if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
aoe_end_buf(d, buf);
spin_unlock_irq(&d->lock);
@@ -1727,7 +1694,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
{
if (buf == NULL)
return;
- buf->resid = 0;
+ buf->iter.bi_size = 0;
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
if (buf->nframesout == 0)
aoe_end_buf(d, buf);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 0e30c6e5492a..96b629e1f0c9 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -68,6 +68,8 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
#include <asm/atafd.h>
#include <asm/atafdreg.h>
@@ -301,7 +303,7 @@ module_param_array(UserSteprate, int, NULL, 0);
/* Synchronization of FDC access. */
static volatile int fdc_busy = 0;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
-static DECLARE_WAIT_QUEUE_HEAD(format_wait);
+static DECLARE_COMPLETION(format_wait);
static unsigned long changed_floppies = 0xff, fake_change = 0;
#define CHECK_CHANGE_DELAY HZ/2
@@ -608,7 +610,7 @@ static void fd_error( void )
if (IsFormatting) {
IsFormatting = 0;
FormatError = 1;
- wake_up( &format_wait );
+ complete(&format_wait);
return;
}
@@ -650,9 +652,8 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
DPRINT(("do_format( dr=%d tr=%d he=%d offs=%d )\n",
drive, desc->track, desc->head, desc->sect_offset ));
+ wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
local_irq_save(flags);
- while( fdc_busy ) sleep_on( &fdc_wait );
- fdc_busy = 1;
stdma_lock(floppy_irq, NULL);
atari_turnon_irq( IRQ_MFP_FDC ); /* should be already, just to be sure */
local_irq_restore(flags);
@@ -706,7 +707,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
ReqSide = desc->head;
do_fd_action( drive );
- sleep_on( &format_wait );
+ wait_for_completion(&format_wait);
redo_fd_request();
return( FormatError ? -EIO : 0 );
@@ -1229,7 +1230,7 @@ static void fd_writetrack_done( int status )
goto err_end;
}
- wake_up( &format_wait );
+ complete(&format_wait);
return;
err_end:
@@ -1497,8 +1498,7 @@ repeat:
void do_fd_request(struct request_queue * q)
{
DPRINT(("do_fd_request for pid %d\n",current->pid));
- while( fdc_busy ) sleep_on( &fdc_wait );
- fdc_busy = 1;
+ wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
stdma_lock(floppy_irq, NULL);
atari_disable_irq( IRQ_MFP_FDC );
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d91f1a56e861..e73b85cf0756 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -328,18 +328,18 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
struct block_device *bdev = bio->bi_bdev;
struct brd_device *brd = bdev->bd_disk->private_data;
int rw;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
sector_t sector;
- int i;
+ struct bvec_iter iter;
int err = -EIO;
- sector = bio->bi_sector;
+ sector = bio->bi_iter.bi_sector;
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto out;
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
err = 0;
- discard_from_brd(brd, sector, bio->bi_size);
+ discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
}
@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
if (rw == READA)
rw = READ;
- bio_for_each_segment(bvec, bio, i) {
- unsigned int len = bvec->bv_len;
- err = brd_do_bvec(brd, bvec->bv_page, len,
- bvec->bv_offset, rw, sector);
+ bio_for_each_segment(bvec, bio, iter) {
+ unsigned int len = bvec.bv_len;
+ err = brd_do_bvec(brd, bvec.bv_page, len,
+ bvec.bv_offset, rw, sector);
if (err)
break;
sector += len >> SECTOR_SHIFT;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index b35fc4f5237c..73894ca33956 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -4092,11 +4092,9 @@ static void cciss_interrupt_mode(ctlr_info_t *h)
if (err > 0) {
dev_warn(&h->pdev->dev,
"only %d MSI-X vectors available\n", err);
- goto default_int_mode;
} else {
dev_warn(&h->pdev->dev,
"MSI-X init failed %d\n", err);
- goto default_int_mode;
}
}
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
@@ -5004,7 +5002,7 @@ reinit_after_soft_reset:
i = alloc_cciss_hba(pdev);
if (i < 0)
- return -1;
+ return -ENOMEM;
h = hba[i];
h->pdev = pdev;
@@ -5205,7 +5203,7 @@ clean_no_release_regions:
*/
pci_set_drvdata(pdev, NULL);
free_hba(h);
- return -1;
+ return -ENODEV;
}
static void cciss_shutdown(struct pci_dev *pdev)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 28c73ca320a8..90ae4ba8f9ee 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -95,34 +95,36 @@ struct __packed al_transaction_on_disk {
struct update_odbm_work {
struct drbd_work w;
+ struct drbd_device *device;
unsigned int enr;
};
struct update_al_work {
struct drbd_work w;
+ struct drbd_device *device;
struct completion event;
int err;
};
-void *drbd_md_get_buffer(struct drbd_conf *mdev)
+void *drbd_md_get_buffer(struct drbd_device *device)
{
int r;
- wait_event(mdev->misc_wait,
- (r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 ||
- mdev->state.disk <= D_FAILED);
+ wait_event(device->misc_wait,
+ (r = atomic_cmpxchg(&device->md_io_in_use, 0, 1)) == 0 ||
+ device->state.disk <= D_FAILED);
- return r ? NULL : page_address(mdev->md_io_page);
+ return r ? NULL : page_address(device->md_io_page);
}
-void drbd_md_put_buffer(struct drbd_conf *mdev)
+void drbd_md_put_buffer(struct drbd_device *device)
{
- if (atomic_dec_and_test(&mdev->md_io_in_use))
- wake_up(&mdev->misc_wait);
+ if (atomic_dec_and_test(&device->md_io_in_use))
+ wake_up(&device->misc_wait);
}
-void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev,
unsigned int *done)
{
long dt;
@@ -134,15 +136,15 @@ void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backi
if (dt == 0)
dt = MAX_SCHEDULE_TIMEOUT;
- dt = wait_event_timeout(mdev->misc_wait,
- *done || test_bit(FORCE_DETACH, &mdev->flags), dt);
+ dt = wait_event_timeout(device->misc_wait,
+ *done || test_bit(FORCE_DETACH, &device->flags), dt);
if (dt == 0) {
- dev_err(DEV, "meta-data IO operation timed out\n");
- drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
+ drbd_err(device, "meta-data IO operation timed out\n");
+ drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH);
}
}
-static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
+static int _drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev,
struct page *page, sector_t sector,
int rw, int size)
@@ -150,82 +152,82 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
struct bio *bio;
int err;
- mdev->md_io.done = 0;
- mdev->md_io.error = -ENODEV;
+ device->md_io.done = 0;
+ device->md_io.error = -ENODEV;
- if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
+ if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags))
rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, page, size, 0) != size)
goto out;
- bio->bi_private = &mdev->md_io;
+ bio->bi_private = &device->md_io;
bio->bi_end_io = drbd_md_io_complete;
bio->bi_rw = rw;
- if (!(rw & WRITE) && mdev->state.disk == D_DISKLESS && mdev->ldev == NULL)
+ if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL)
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
;
- else if (!get_ldev_if_state(mdev, D_ATTACHING)) {
+ else if (!get_ldev_if_state(device, D_ATTACHING)) {
/* Corresponding put_ldev in drbd_md_io_complete() */
- dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
+ drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
err = -ENODEV;
goto out;
}
bio_get(bio); /* one bio_put() is in the completion handler */
- atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
- if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
+ atomic_inc(&device->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
+ if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
- wait_until_done_or_force_detached(mdev, bdev, &mdev->md_io.done);
+ wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
if (bio_flagged(bio, BIO_UPTODATE))
- err = mdev->md_io.error;
+ err = device->md_io.error;
out:
bio_put(bio);
return err;
}
-int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
sector_t sector, int rw)
{
int err;
- struct page *iop = mdev->md_io_page;
+ struct page *iop = device->md_io_page;
- D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
+ D_ASSERT(device, atomic_read(&device->md_io_in_use) == 1);
BUG_ON(!bdev->md_bdev);
- dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
+ drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
current->comm, current->pid, __func__,
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
(void*)_RET_IP_ );
if (sector < drbd_md_first_sector(bdev) ||
sector + 7 > drbd_md_last_sector(bdev))
- dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
+ drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
current->comm, current->pid, __func__,
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
/* we do all our meta data IO in aligned 4k blocks. */
- err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, 4096);
+ err = _drbd_md_sync_page_io(device, bdev, iop, sector, rw, 4096);
if (err) {
- dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
+ drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
}
return err;
}
-static struct bm_extent *find_active_resync_extent(struct drbd_conf *mdev, unsigned int enr)
+static struct bm_extent *find_active_resync_extent(struct drbd_device *device, unsigned int enr)
{
struct lc_element *tmp;
- tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
+ tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
if (unlikely(tmp != NULL)) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
if (test_bit(BME_NO_WRITES, &bm_ext->flags))
@@ -234,47 +236,48 @@ static struct bm_extent *find_active_resync_extent(struct drbd_conf *mdev, unsig
return NULL;
}
-static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr, bool nonblock)
+static struct lc_element *_al_get(struct drbd_device *device, unsigned int enr, bool nonblock)
{
struct lc_element *al_ext;
struct bm_extent *bm_ext;
int wake;
- spin_lock_irq(&mdev->al_lock);
- bm_ext = find_active_resync_extent(mdev, enr);
+ spin_lock_irq(&device->al_lock);
+ bm_ext = find_active_resync_extent(device, enr);
if (bm_ext) {
wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
if (wake)
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
return NULL;
}
if (nonblock)
- al_ext = lc_try_get(mdev->act_log, enr);
+ al_ext = lc_try_get(device->act_log, enr);
else
- al_ext = lc_get(mdev->act_log, enr);
- spin_unlock_irq(&mdev->al_lock);
+ al_ext = lc_get(device->act_log, enr);
+ spin_unlock_irq(&device->al_lock);
return al_ext;
}
-bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i)
+bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
- D_ASSERT((unsigned)(last - first) <= 1);
- D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
+ D_ASSERT(device, (unsigned)(last - first) <= 1);
+ D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
/* FIXME figure out a fast path for bios crossing AL extent boundaries */
if (first != last)
return false;
- return _al_get(mdev, first, true);
+ return _al_get(device, first, true);
}
-bool drbd_al_begin_io_prepare(struct drbd_conf *mdev, struct drbd_interval *i)
+static
+bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
@@ -283,20 +286,20 @@ bool drbd_al_begin_io_prepare(struct drbd_conf *mdev, struct drbd_interval *i)
unsigned enr;
bool need_transaction = false;
- D_ASSERT(first <= last);
- D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
+ D_ASSERT(device, first <= last);
+ D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
for (enr = first; enr <= last; enr++) {
struct lc_element *al_ext;
- wait_event(mdev->al_wait,
- (al_ext = _al_get(mdev, enr, false)) != NULL);
+ wait_event(device->al_wait,
+ (al_ext = _al_get(device, enr, false)) != NULL);
if (al_ext->lc_number != enr)
need_transaction = true;
}
return need_transaction;
}
-static int al_write_transaction(struct drbd_conf *mdev, bool delegate);
+static int al_write_transaction(struct drbd_device *device, bool delegate);
/* When called through generic_make_request(), we must delegate
* activity log I/O to the worker thread: a further request
@@ -310,58 +313,58 @@ static int al_write_transaction(struct drbd_conf *mdev, bool delegate);
/*
* @delegate: delegate activity log I/O to the worker thread
*/
-void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate)
+void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate)
{
bool locked = false;
- BUG_ON(delegate && current == mdev->tconn->worker.task);
+ BUG_ON(delegate && current == first_peer_device(device)->connection->worker.task);
/* Serialize multiple transactions.
* This uses test_and_set_bit, memory barrier is implicit.
*/
- wait_event(mdev->al_wait,
- mdev->act_log->pending_changes == 0 ||
- (locked = lc_try_lock_for_transaction(mdev->act_log)));
+ wait_event(device->al_wait,
+ device->act_log->pending_changes == 0 ||
+ (locked = lc_try_lock_for_transaction(device->act_log)));
if (locked) {
/* Double check: it may have been committed by someone else,
* while we have been waiting for the lock. */
- if (mdev->act_log->pending_changes) {
+ if (device->act_log->pending_changes) {
bool write_al_updates;
rcu_read_lock();
- write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
+ write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
rcu_read_unlock();
if (write_al_updates)
- al_write_transaction(mdev, delegate);
- spin_lock_irq(&mdev->al_lock);
+ al_write_transaction(device, delegate);
+ spin_lock_irq(&device->al_lock);
/* FIXME
if (err)
we need an "lc_cancel" here;
*/
- lc_committed(mdev->act_log);
- spin_unlock_irq(&mdev->al_lock);
+ lc_committed(device->act_log);
+ spin_unlock_irq(&device->al_lock);
}
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
+ lc_unlock(device->act_log);
+ wake_up(&device->al_wait);
}
}
/*
* @delegate: delegate activity log I/O to the worker thread
*/
-void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate)
+void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate)
{
- BUG_ON(delegate && current == mdev->tconn->worker.task);
+ BUG_ON(delegate && current == first_peer_device(device)->connection->worker.task);
- if (drbd_al_begin_io_prepare(mdev, i))
- drbd_al_begin_io_commit(mdev, delegate);
+ if (drbd_al_begin_io_prepare(device, i))
+ drbd_al_begin_io_commit(device, delegate);
}
-int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
+int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i)
{
- struct lru_cache *al = mdev->act_log;
+ struct lru_cache *al = device->act_log;
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
@@ -370,7 +373,7 @@ int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
unsigned available_update_slots;
unsigned enr;
- D_ASSERT(first <= last);
+ D_ASSERT(device, first <= last);
nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
available_update_slots = min(al->nr_elements - al->used,
@@ -385,7 +388,7 @@ int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
/* Is resync active in this area? */
for (enr = first; enr <= last; enr++) {
struct lc_element *tmp;
- tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
+ tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT);
if (unlikely(tmp != NULL)) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
@@ -401,14 +404,14 @@ int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
* this has to be successful. */
for (enr = first; enr <= last; enr++) {
struct lc_element *al_ext;
- al_ext = lc_get_cumulative(mdev->act_log, enr);
+ al_ext = lc_get_cumulative(device->act_log, enr);
if (!al_ext)
- dev_info(DEV, "LOGIC BUG for enr=%u\n", enr);
+ drbd_info(device, "LOGIC BUG for enr=%u\n", enr);
}
return 0;
}
-void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
+void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
@@ -418,19 +421,19 @@ void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
struct lc_element *extent;
unsigned long flags;
- D_ASSERT(first <= last);
- spin_lock_irqsave(&mdev->al_lock, flags);
+ D_ASSERT(device, first <= last);
+ spin_lock_irqsave(&device->al_lock, flags);
for (enr = first; enr <= last; enr++) {
- extent = lc_find(mdev->act_log, enr);
+ extent = lc_find(device->act_log, enr);
if (!extent) {
- dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
+ drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr);
continue;
}
- lc_put(mdev->act_log, extent);
+ lc_put(device->act_log, extent);
}
- spin_unlock_irqrestore(&mdev->al_lock, flags);
- wake_up(&mdev->al_wait);
+ spin_unlock_irqrestore(&device->al_lock, flags);
+ wake_up(&device->al_wait);
}
#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
@@ -460,13 +463,13 @@ static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
(BM_EXT_SHIFT - BM_BLOCK_SHIFT));
}
-static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev)
+static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device)
{
- const unsigned int stripes = mdev->ldev->md.al_stripes;
- const unsigned int stripe_size_4kB = mdev->ldev->md.al_stripe_size_4k;
+ const unsigned int stripes = device->ldev->md.al_stripes;
+ const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k;
/* transaction number, modulo on-disk ring buffer wrap around */
- unsigned int t = mdev->al_tr_number % (mdev->ldev->md.al_size_4k);
+ unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k);
/* ... to aligned 4k on disk block */
t = ((t % stripes) * stripe_size_4kB) + t/stripes;
@@ -475,11 +478,11 @@ static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev)
t *= 8;
/* ... plus offset to the on disk position */
- return mdev->ldev->md.md_offset + mdev->ldev->md.al_offset + t;
+ return device->ldev->md.md_offset + device->ldev->md.al_offset + t;
}
static int
-_al_write_transaction(struct drbd_conf *mdev)
+_al_write_transaction(struct drbd_device *device)
{
struct al_transaction_on_disk *buffer;
struct lc_element *e;
@@ -489,31 +492,31 @@ _al_write_transaction(struct drbd_conf *mdev)
unsigned crc = 0;
int err = 0;
- if (!get_ldev(mdev)) {
- dev_err(DEV, "disk is %s, cannot start al transaction\n",
- drbd_disk_str(mdev->state.disk));
+ if (!get_ldev(device)) {
+ drbd_err(device, "disk is %s, cannot start al transaction\n",
+ drbd_disk_str(device->state.disk));
return -EIO;
}
/* The bitmap write may have failed, causing a state change. */
- if (mdev->state.disk < D_INCONSISTENT) {
- dev_err(DEV,
+ if (device->state.disk < D_INCONSISTENT) {
+ drbd_err(device,
"disk is %s, cannot write al transaction\n",
- drbd_disk_str(mdev->state.disk));
- put_ldev(mdev);
+ drbd_disk_str(device->state.disk));
+ put_ldev(device);
return -EIO;
}
- buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
+ buffer = drbd_md_get_buffer(device); /* protects md_io_buffer, al_tr_cycle, ... */
if (!buffer) {
- dev_err(DEV, "disk failed while waiting for md_io buffer\n");
- put_ldev(mdev);
+ drbd_err(device, "disk failed while waiting for md_io buffer\n");
+ put_ldev(device);
return -ENODEV;
}
memset(buffer, 0, sizeof(*buffer));
buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
- buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
+ buffer->tr_number = cpu_to_be32(device->al_tr_number);
i = 0;
@@ -521,8 +524,8 @@ _al_write_transaction(struct drbd_conf *mdev)
* once we set the LC_LOCKED -- from drbd_al_begin_io(),
* lc_try_lock_for_transaction() --, someone may still
* be in the process of changing it. */
- spin_lock_irq(&mdev->al_lock);
- list_for_each_entry(e, &mdev->act_log->to_be_changed, list) {
+ spin_lock_irq(&device->al_lock);
+ list_for_each_entry(e, &device->act_log->to_be_changed, list) {
if (i == AL_UPDATES_PER_TRANSACTION) {
i++;
break;
@@ -530,11 +533,11 @@ _al_write_transaction(struct drbd_conf *mdev)
buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
if (e->lc_number != LC_FREE)
- drbd_bm_mark_for_writeout(mdev,
+ drbd_bm_mark_for_writeout(device,
al_extent_to_bm_page(e->lc_number));
i++;
}
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
buffer->n_updates = cpu_to_be16(i);
@@ -543,48 +546,48 @@ _al_write_transaction(struct drbd_conf *mdev)
buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
}
- buffer->context_size = cpu_to_be16(mdev->act_log->nr_elements);
- buffer->context_start_slot_nr = cpu_to_be16(mdev->al_tr_cycle);
+ buffer->context_size = cpu_to_be16(device->act_log->nr_elements);
+ buffer->context_start_slot_nr = cpu_to_be16(device->al_tr_cycle);
mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
- mdev->act_log->nr_elements - mdev->al_tr_cycle);
+ device->act_log->nr_elements - device->al_tr_cycle);
for (i = 0; i < mx; i++) {
- unsigned idx = mdev->al_tr_cycle + i;
- extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number;
+ unsigned idx = device->al_tr_cycle + i;
+ extent_nr = lc_element_by_index(device->act_log, idx)->lc_number;
buffer->context[i] = cpu_to_be32(extent_nr);
}
for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
buffer->context[i] = cpu_to_be32(LC_FREE);
- mdev->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
- if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
- mdev->al_tr_cycle = 0;
+ device->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
+ if (device->al_tr_cycle >= device->act_log->nr_elements)
+ device->al_tr_cycle = 0;
- sector = al_tr_number_to_on_disk_sector(mdev);
+ sector = al_tr_number_to_on_disk_sector(device);
crc = crc32c(0, buffer, 4096);
buffer->crc32c = cpu_to_be32(crc);
- if (drbd_bm_write_hinted(mdev))
+ if (drbd_bm_write_hinted(device))
err = -EIO;
else {
bool write_al_updates;
rcu_read_lock();
- write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
+ write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates;
rcu_read_unlock();
if (write_al_updates) {
- if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
err = -EIO;
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
} else {
- mdev->al_tr_number++;
- mdev->al_writ_cnt++;
+ device->al_tr_number++;
+ device->al_writ_cnt++;
}
}
}
- drbd_md_put_buffer(mdev);
- put_ldev(mdev);
+ drbd_md_put_buffer(device);
+ put_ldev(device);
return err;
}
@@ -593,10 +596,10 @@ _al_write_transaction(struct drbd_conf *mdev)
static int w_al_write_transaction(struct drbd_work *w, int unused)
{
struct update_al_work *aw = container_of(w, struct update_al_work, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device = aw->device;
int err;
- err = _al_write_transaction(mdev);
+ err = _al_write_transaction(device);
aw->err = err;
complete(&aw->event);
@@ -606,63 +609,64 @@ static int w_al_write_transaction(struct drbd_work *w, int unused)
/* Calls from worker context (see w_restart_disk_io()) need to write the
transaction directly. Others came through generic_make_request(),
those need to delegate it to the worker. */
-static int al_write_transaction(struct drbd_conf *mdev, bool delegate)
+static int al_write_transaction(struct drbd_device *device, bool delegate)
{
if (delegate) {
struct update_al_work al_work;
init_completion(&al_work.event);
al_work.w.cb = w_al_write_transaction;
- al_work.w.mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
+ al_work.device = device;
+ drbd_queue_work_front(&first_peer_device(device)->connection->sender_work,
+ &al_work.w);
wait_for_completion(&al_work.event);
return al_work.err;
} else
- return _al_write_transaction(mdev);
+ return _al_write_transaction(device);
}
-static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
+static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext)
{
int rv;
- spin_lock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
rv = (al_ext->refcnt == 0);
if (likely(rv))
- lc_del(mdev->act_log, al_ext);
- spin_unlock_irq(&mdev->al_lock);
+ lc_del(device->act_log, al_ext);
+ spin_unlock_irq(&device->al_lock);
return rv;
}
/**
* drbd_al_shrink() - Removes all active extents form the activity log
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Removes all active extents form the activity log, waiting until
* the reference count of each entry dropped to 0 first, of course.
*
- * You need to lock mdev->act_log with lc_try_lock() / lc_unlock()
+ * You need to lock device->act_log with lc_try_lock() / lc_unlock()
*/
-void drbd_al_shrink(struct drbd_conf *mdev)
+void drbd_al_shrink(struct drbd_device *device)
{
struct lc_element *al_ext;
int i;
- D_ASSERT(test_bit(__LC_LOCKED, &mdev->act_log->flags));
+ D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags));
- for (i = 0; i < mdev->act_log->nr_elements; i++) {
- al_ext = lc_element_by_index(mdev->act_log, i);
+ for (i = 0; i < device->act_log->nr_elements; i++) {
+ al_ext = lc_element_by_index(device->act_log, i);
if (al_ext->lc_number == LC_FREE)
continue;
- wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext));
+ wait_event(device->al_wait, _try_lc_del(device, al_ext));
}
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
}
-int drbd_initialize_al(struct drbd_conf *mdev, void *buffer)
+int drbd_initialize_al(struct drbd_device *device, void *buffer)
{
struct al_transaction_on_disk *al = buffer;
- struct drbd_md *md = &mdev->ldev->md;
+ struct drbd_md *md = &device->ldev->md;
sector_t al_base = md->md_offset + md->al_offset;
int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
int i;
@@ -673,7 +677,7 @@ int drbd_initialize_al(struct drbd_conf *mdev, void *buffer)
al->crc32c = cpu_to_be32(crc32c(0, al, 4096));
for (i = 0; i < al_size_4k; i++) {
- int err = drbd_md_sync_page_io(mdev, mdev->ldev, al_base + i * 8, WRITE);
+ int err = drbd_md_sync_page_io(device, device->ldev, al_base + i * 8, WRITE);
if (err)
return err;
}
@@ -683,32 +687,32 @@ int drbd_initialize_al(struct drbd_conf *mdev, void *buffer)
static int w_update_odbm(struct drbd_work *w, int unused)
{
struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device = udw->device;
struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
- if (!get_ldev(mdev)) {
+ if (!get_ldev(device)) {
if (__ratelimit(&drbd_ratelimit_state))
- dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
+ drbd_warn(device, "Can not update on disk bitmap, local IO disabled.\n");
kfree(udw);
return 0;
}
- drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
- put_ldev(mdev);
+ drbd_bm_write_page(device, rs_extent_to_bm_page(udw->enr));
+ put_ldev(device);
kfree(udw);
- if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) {
- switch (mdev->state.conn) {
+ if (drbd_bm_total_weight(device) <= device->rs_failed) {
+ switch (device->state.conn) {
case C_SYNC_SOURCE: case C_SYNC_TARGET:
case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T:
- drbd_resync_finished(mdev);
+ drbd_resync_finished(device);
default:
/* nothing to do */
break;
}
}
- drbd_bcast_event(mdev, &sib);
+ drbd_bcast_event(device, &sib);
return 0;
}
@@ -720,7 +724,7 @@ static int w_update_odbm(struct drbd_work *w, int unused)
*
* TODO will be obsoleted once we have a caching lru of the on disk bitmap
*/
-static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
+static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t sector,
int count, int success)
{
struct lc_element *e;
@@ -728,13 +732,13 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
unsigned int enr;
- D_ASSERT(atomic_read(&mdev->local_cnt));
+ D_ASSERT(device, atomic_read(&device->local_cnt));
/* I simply assume that a sector/size pair never crosses
* a 16 MB extent border. (Currently this is true...) */
enr = BM_SECT_TO_EXT(sector);
- e = lc_get(mdev->resync, enr);
+ e = lc_get(device->resync, enr);
if (e) {
struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
if (ext->lce.lc_number == enr) {
@@ -743,12 +747,12 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
else
ext->rs_failed += count;
if (ext->rs_left < ext->rs_failed) {
- dev_warn(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
+ drbd_warn(device, "BAD! sector=%llus enr=%u rs_left=%d "
"rs_failed=%d count=%d cstate=%s\n",
(unsigned long long)sector,
ext->lce.lc_number, ext->rs_left,
ext->rs_failed, count,
- drbd_conn_str(mdev->state.conn));
+ drbd_conn_str(device->state.conn));
/* We don't expect to be able to clear more bits
* than have been set when we originally counted
@@ -756,7 +760,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
* Whatever the reason (disconnect during resync,
* delayed local completion of an application write),
* try to fix it up by recounting here. */
- ext->rs_left = drbd_bm_e_weight(mdev, enr);
+ ext->rs_left = drbd_bm_e_weight(device, enr);
}
} else {
/* Normally this element should be in the cache,
@@ -765,16 +769,16 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
* But maybe an application write finished, and we set
* something outside the resync lru_cache in sync.
*/
- int rs_left = drbd_bm_e_weight(mdev, enr);
+ int rs_left = drbd_bm_e_weight(device, enr);
if (ext->flags != 0) {
- dev_warn(DEV, "changing resync lce: %d[%u;%02lx]"
+ drbd_warn(device, "changing resync lce: %d[%u;%02lx]"
" -> %d[%u;00]\n",
ext->lce.lc_number, ext->rs_left,
ext->flags, enr, rs_left);
ext->flags = 0;
}
if (ext->rs_failed) {
- dev_warn(DEV, "Kicking resync_lru element enr=%u "
+ drbd_warn(device, "Kicking resync_lru element enr=%u "
"out with rs_failed=%d\n",
ext->lce.lc_number, ext->rs_failed);
}
@@ -782,9 +786,9 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
ext->rs_failed = success ? 0 : count;
/* we don't keep a persistent log of the resync lru,
* we can commit any change right away. */
- lc_committed(mdev->resync);
+ lc_committed(device->resync);
}
- lc_put(mdev->resync, &ext->lce);
+ lc_put(device->resync, &ext->lce);
/* no race, we are within the al_lock! */
if (ext->rs_left == ext->rs_failed) {
@@ -794,32 +798,33 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
if (udw) {
udw->enr = ext->lce.lc_number;
udw->w.cb = w_update_odbm;
- udw->w.mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
+ udw->device = device;
+ drbd_queue_work_front(&first_peer_device(device)->connection->sender_work,
+ &udw->w);
} else {
- dev_warn(DEV, "Could not kmalloc an udw\n");
+ drbd_warn(device, "Could not kmalloc an udw\n");
}
}
} else {
- dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n",
- mdev->resync_locked,
- mdev->resync->nr_elements,
- mdev->resync->flags);
+ drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n",
+ device->resync_locked,
+ device->resync->nr_elements,
+ device->resync->flags);
}
}
-void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
+void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go)
{
unsigned long now = jiffies;
- unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
- int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
+ unsigned long last = device->rs_mark_time[device->rs_last_mark];
+ int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS;
if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
- if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go &&
- mdev->state.conn != C_PAUSED_SYNC_T &&
- mdev->state.conn != C_PAUSED_SYNC_S) {
- mdev->rs_mark_time[next] = now;
- mdev->rs_mark_left[next] = still_to_go;
- mdev->rs_last_mark = next;
+ if (device->rs_mark_left[device->rs_last_mark] != still_to_go &&
+ device->state.conn != C_PAUSED_SYNC_T &&
+ device->state.conn != C_PAUSED_SYNC_S) {
+ device->rs_mark_time[next] = now;
+ device->rs_mark_left[next] = still_to_go;
+ device->rs_last_mark = next;
}
}
}
@@ -831,7 +836,7 @@ void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
* called by worker on C_SYNC_TARGET and receiver on SyncSource.
*
*/
-void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
+void __drbd_set_in_sync(struct drbd_device *device, sector_t sector, int size,
const char *file, const unsigned int line)
{
/* Is called from worker and receiver context _only_ */
@@ -842,15 +847,15 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
unsigned long flags;
if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
- dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
+ drbd_err(device, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
}
- if (!get_ldev(mdev))
+ if (!get_ldev(device))
return; /* no disk, no metadata, no bitmap to clear bits in */
- nr_sectors = drbd_get_capacity(mdev->this_bdev);
+ nr_sectors = drbd_get_capacity(device->this_bdev);
esector = sector + (size >> 9) - 1;
if (!expect(sector < nr_sectors))
@@ -878,21 +883,21 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors.
*/
- count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
+ count = drbd_bm_clear_bits(device, sbnr, ebnr);
if (count) {
- drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
- spin_lock_irqsave(&mdev->al_lock, flags);
- drbd_try_clear_on_disk_bm(mdev, sector, count, true);
- spin_unlock_irqrestore(&mdev->al_lock, flags);
+ drbd_advance_rs_marks(device, drbd_bm_total_weight(device));
+ spin_lock_irqsave(&device->al_lock, flags);
+ drbd_try_clear_on_disk_bm(device, sector, count, true);
+ spin_unlock_irqrestore(&device->al_lock, flags);
/* just wake_up unconditional now, various lc_chaged(),
* lc_put() in drbd_try_clear_on_disk_bm(). */
wake_up = 1;
}
out:
- put_ldev(mdev);
+ put_ldev(device);
if (wake_up)
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
}
/*
@@ -903,7 +908,7 @@ out:
* called by tl_clear and drbd_send_dblock (==drbd_make_request).
* so this can be _any_ process.
*/
-int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
+int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector, int size,
const char *file, const unsigned int line)
{
unsigned long sbnr, ebnr, flags;
@@ -916,15 +921,15 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
return 0;
if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
- dev_err(DEV, "sector: %llus, size: %d\n",
+ drbd_err(device, "sector: %llus, size: %d\n",
(unsigned long long)sector, size);
return 0;
}
- if (!get_ldev(mdev))
+ if (!get_ldev(device))
return 0; /* no disk, no metadata, no bitmap to set bits in */
- nr_sectors = drbd_get_capacity(mdev->this_bdev);
+ nr_sectors = drbd_get_capacity(device->this_bdev);
esector = sector + (size >> 9) - 1;
if (!expect(sector < nr_sectors))
@@ -939,55 +944,55 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
/* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors. */
- spin_lock_irqsave(&mdev->al_lock, flags);
- count = drbd_bm_set_bits(mdev, sbnr, ebnr);
+ spin_lock_irqsave(&device->al_lock, flags);
+ count = drbd_bm_set_bits(device, sbnr, ebnr);
enr = BM_SECT_TO_EXT(sector);
- e = lc_find(mdev->resync, enr);
+ e = lc_find(device->resync, enr);
if (e)
lc_entry(e, struct bm_extent, lce)->rs_left += count;
- spin_unlock_irqrestore(&mdev->al_lock, flags);
+ spin_unlock_irqrestore(&device->al_lock, flags);
out:
- put_ldev(mdev);
+ put_ldev(device);
return count;
}
static
-struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
+struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr)
{
struct lc_element *e;
struct bm_extent *bm_ext;
int wakeup = 0;
unsigned long rs_flags;
- spin_lock_irq(&mdev->al_lock);
- if (mdev->resync_locked > mdev->resync->nr_elements/2) {
- spin_unlock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
+ if (device->resync_locked > device->resync->nr_elements/2) {
+ spin_unlock_irq(&device->al_lock);
return NULL;
}
- e = lc_get(mdev->resync, enr);
+ e = lc_get(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
if (bm_ext->lce.lc_number != enr) {
- bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
+ bm_ext->rs_left = drbd_bm_e_weight(device, enr);
bm_ext->rs_failed = 0;
- lc_committed(mdev->resync);
+ lc_committed(device->resync);
wakeup = 1;
}
if (bm_ext->lce.refcnt == 1)
- mdev->resync_locked++;
+ device->resync_locked++;
set_bit(BME_NO_WRITES, &bm_ext->flags);
}
- rs_flags = mdev->resync->flags;
- spin_unlock_irq(&mdev->al_lock);
+ rs_flags = device->resync->flags;
+ spin_unlock_irq(&device->al_lock);
if (wakeup)
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
if (!bm_ext) {
if (rs_flags & LC_STARVING)
- dev_warn(DEV, "Have to wait for element"
+ drbd_warn(device, "Have to wait for element"
" (resync LRU too small?)\n");
BUG_ON(rs_flags & LC_LOCKED);
}
@@ -995,25 +1000,25 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
return bm_ext;
}
-static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
+static int _is_in_al(struct drbd_device *device, unsigned int enr)
{
int rv;
- spin_lock_irq(&mdev->al_lock);
- rv = lc_is_used(mdev->act_log, enr);
- spin_unlock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
+ rv = lc_is_used(device->act_log, enr);
+ spin_unlock_irq(&device->al_lock);
return rv;
}
/**
* drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @sector: The sector number.
*
* This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
*/
-int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
+int drbd_rs_begin_io(struct drbd_device *device, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct bm_extent *bm_ext;
@@ -1022,8 +1027,8 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
200 times -> 20 seconds. */
retry:
- sig = wait_event_interruptible(mdev->al_wait,
- (bm_ext = _bme_get(mdev, enr)));
+ sig = wait_event_interruptible(device->al_wait,
+ (bm_ext = _bme_get(device, enr)));
if (sig)
return -EINTR;
@@ -1031,24 +1036,24 @@ retry:
return 0;
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
- sig = wait_event_interruptible(mdev->al_wait,
- !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) ||
+ sig = wait_event_interruptible(device->al_wait,
+ !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) ||
test_bit(BME_PRIORITY, &bm_ext->flags));
if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) {
- spin_lock_irq(&mdev->al_lock);
- if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
+ spin_lock_irq(&device->al_lock);
+ if (lc_put(device->resync, &bm_ext->lce) == 0) {
bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
- mdev->resync_locked--;
- wake_up(&mdev->al_wait);
+ device->resync_locked--;
+ wake_up(&device->al_wait);
}
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
if (sig)
return -EINTR;
if (schedule_timeout_interruptible(HZ/10))
return -EINTR;
if (sa && --sa == 0)
- dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec."
+ drbd_warn(device, "drbd_rs_begin_io() stepped aside for 20sec."
"Resync stalled?\n");
goto retry;
}
@@ -1059,14 +1064,14 @@ retry:
/**
* drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @sector: The sector number.
*
* Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
* tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
* if there is still application IO going on in this area.
*/
-int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
+int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
@@ -1074,8 +1079,8 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
struct bm_extent *bm_ext;
int i;
- spin_lock_irq(&mdev->al_lock);
- if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
+ spin_lock_irq(&device->al_lock);
+ if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) {
/* in case you have very heavy scattered io, it may
* stall the syncer undefined if we give up the ref count
* when we try again and requeue.
@@ -1089,193 +1094,193 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
* the lc_put here...
* we also have to wake_up
*/
- e = lc_find(mdev->resync, mdev->resync_wenr);
+ e = lc_find(device->resync, device->resync_wenr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
- D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
- D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
+ D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
+ D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
clear_bit(BME_NO_WRITES, &bm_ext->flags);
- mdev->resync_wenr = LC_FREE;
- if (lc_put(mdev->resync, &bm_ext->lce) == 0)
- mdev->resync_locked--;
- wake_up(&mdev->al_wait);
+ device->resync_wenr = LC_FREE;
+ if (lc_put(device->resync, &bm_ext->lce) == 0)
+ device->resync_locked--;
+ wake_up(&device->al_wait);
} else {
- dev_alert(DEV, "LOGIC BUG\n");
+ drbd_alert(device, "LOGIC BUG\n");
}
}
/* TRY. */
- e = lc_try_get(mdev->resync, enr);
+ e = lc_try_get(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
if (test_bit(BME_LOCKED, &bm_ext->flags))
goto proceed;
if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
- mdev->resync_locked++;
+ device->resync_locked++;
} else {
/* we did set the BME_NO_WRITES,
* but then could not set BME_LOCKED,
* so we tried again.
* drop the extra reference. */
bm_ext->lce.refcnt--;
- D_ASSERT(bm_ext->lce.refcnt > 0);
+ D_ASSERT(device, bm_ext->lce.refcnt > 0);
}
goto check_al;
} else {
/* do we rather want to try later? */
- if (mdev->resync_locked > mdev->resync->nr_elements-3)
+ if (device->resync_locked > device->resync->nr_elements-3)
goto try_again;
/* Do or do not. There is no try. -- Yoda */
- e = lc_get(mdev->resync, enr);
+ e = lc_get(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (!bm_ext) {
- const unsigned long rs_flags = mdev->resync->flags;
+ const unsigned long rs_flags = device->resync->flags;
if (rs_flags & LC_STARVING)
- dev_warn(DEV, "Have to wait for element"
+ drbd_warn(device, "Have to wait for element"
" (resync LRU too small?)\n");
BUG_ON(rs_flags & LC_LOCKED);
goto try_again;
}
if (bm_ext->lce.lc_number != enr) {
- bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
+ bm_ext->rs_left = drbd_bm_e_weight(device, enr);
bm_ext->rs_failed = 0;
- lc_committed(mdev->resync);
- wake_up(&mdev->al_wait);
- D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
+ lc_committed(device->resync);
+ wake_up(&device->al_wait);
+ D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0);
}
set_bit(BME_NO_WRITES, &bm_ext->flags);
- D_ASSERT(bm_ext->lce.refcnt == 1);
- mdev->resync_locked++;
+ D_ASSERT(device, bm_ext->lce.refcnt == 1);
+ device->resync_locked++;
goto check_al;
}
check_al:
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
- if (lc_is_used(mdev->act_log, al_enr+i))
+ if (lc_is_used(device->act_log, al_enr+i))
goto try_again;
}
set_bit(BME_LOCKED, &bm_ext->flags);
proceed:
- mdev->resync_wenr = LC_FREE;
- spin_unlock_irq(&mdev->al_lock);
+ device->resync_wenr = LC_FREE;
+ spin_unlock_irq(&device->al_lock);
return 0;
try_again:
if (bm_ext)
- mdev->resync_wenr = enr;
- spin_unlock_irq(&mdev->al_lock);
+ device->resync_wenr = enr;
+ spin_unlock_irq(&device->al_lock);
return -EAGAIN;
}
-void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
+void drbd_rs_complete_io(struct drbd_device *device, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct lc_element *e;
struct bm_extent *bm_ext;
unsigned long flags;
- spin_lock_irqsave(&mdev->al_lock, flags);
- e = lc_find(mdev->resync, enr);
+ spin_lock_irqsave(&device->al_lock, flags);
+ e = lc_find(device->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (!bm_ext) {
- spin_unlock_irqrestore(&mdev->al_lock, flags);
+ spin_unlock_irqrestore(&device->al_lock, flags);
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n");
+ drbd_err(device, "drbd_rs_complete_io() called, but extent not found\n");
return;
}
if (bm_ext->lce.refcnt == 0) {
- spin_unlock_irqrestore(&mdev->al_lock, flags);
- dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, "
+ spin_unlock_irqrestore(&device->al_lock, flags);
+ drbd_err(device, "drbd_rs_complete_io(,%llu [=%u]) called, "
"but refcnt is 0!?\n",
(unsigned long long)sector, enr);
return;
}
- if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
+ if (lc_put(device->resync, &bm_ext->lce) == 0) {
bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
- mdev->resync_locked--;
- wake_up(&mdev->al_wait);
+ device->resync_locked--;
+ wake_up(&device->al_wait);
}
- spin_unlock_irqrestore(&mdev->al_lock, flags);
+ spin_unlock_irqrestore(&device->al_lock, flags);
}
/**
* drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
- * @mdev: DRBD device.
+ * @device: DRBD device.
*/
-void drbd_rs_cancel_all(struct drbd_conf *mdev)
+void drbd_rs_cancel_all(struct drbd_device *device)
{
- spin_lock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
- if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
- lc_reset(mdev->resync);
- put_ldev(mdev);
+ if (get_ldev_if_state(device, D_FAILED)) { /* Makes sure ->resync is there. */
+ lc_reset(device->resync);
+ put_ldev(device);
}
- mdev->resync_locked = 0;
- mdev->resync_wenr = LC_FREE;
- spin_unlock_irq(&mdev->al_lock);
- wake_up(&mdev->al_wait);
+ device->resync_locked = 0;
+ device->resync_wenr = LC_FREE;
+ spin_unlock_irq(&device->al_lock);
+ wake_up(&device->al_wait);
}
/**
* drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Returns 0 upon success, -EAGAIN if at least one reference count was
* not zero.
*/
-int drbd_rs_del_all(struct drbd_conf *mdev)
+int drbd_rs_del_all(struct drbd_device *device)
{
struct lc_element *e;
struct bm_extent *bm_ext;
int i;
- spin_lock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
- if (get_ldev_if_state(mdev, D_FAILED)) {
+ if (get_ldev_if_state(device, D_FAILED)) {
/* ok, ->resync is there. */
- for (i = 0; i < mdev->resync->nr_elements; i++) {
- e = lc_element_by_index(mdev->resync, i);
+ for (i = 0; i < device->resync->nr_elements; i++) {
+ e = lc_element_by_index(device->resync, i);
bm_ext = lc_entry(e, struct bm_extent, lce);
if (bm_ext->lce.lc_number == LC_FREE)
continue;
- if (bm_ext->lce.lc_number == mdev->resync_wenr) {
- dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently"
+ if (bm_ext->lce.lc_number == device->resync_wenr) {
+ drbd_info(device, "dropping %u in drbd_rs_del_all, apparently"
" got 'synced' by application io\n",
- mdev->resync_wenr);
- D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
- D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
+ device->resync_wenr);
+ D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
+ D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
clear_bit(BME_NO_WRITES, &bm_ext->flags);
- mdev->resync_wenr = LC_FREE;
- lc_put(mdev->resync, &bm_ext->lce);
+ device->resync_wenr = LC_FREE;
+ lc_put(device->resync, &bm_ext->lce);
}
if (bm_ext->lce.refcnt != 0) {
- dev_info(DEV, "Retrying drbd_rs_del_all() later. "
+ drbd_info(device, "Retrying drbd_rs_del_all() later. "
"refcnt=%d\n", bm_ext->lce.refcnt);
- put_ldev(mdev);
- spin_unlock_irq(&mdev->al_lock);
+ put_ldev(device);
+ spin_unlock_irq(&device->al_lock);
return -EAGAIN;
}
- D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
- D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags));
- lc_del(mdev->resync, &bm_ext->lce);
+ D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
+ D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags));
+ lc_del(device->resync, &bm_ext->lce);
}
- D_ASSERT(mdev->resync->used == 0);
- put_ldev(mdev);
+ D_ASSERT(device, device->resync->used == 0);
+ put_ldev(device);
}
- spin_unlock_irq(&mdev->al_lock);
- wake_up(&mdev->al_wait);
+ spin_unlock_irq(&device->al_lock);
+ wake_up(&device->al_wait);
return 0;
}
/**
* drbd_rs_failed_io() - Record information on a failure to resync the specified blocks
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @sector: The sector number.
* @size: Size of failed IO operation, in byte.
*/
-void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
+void drbd_rs_failed_io(struct drbd_device *device, sector_t sector, int size)
{
/* Is called from worker and receiver context _only_ */
unsigned long sbnr, ebnr, lbnr;
@@ -1284,11 +1289,11 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
int wake_up = 0;
if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
- dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
+ drbd_err(device, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
}
- nr_sectors = drbd_get_capacity(mdev->this_bdev);
+ nr_sectors = drbd_get_capacity(device->this_bdev);
esector = sector + (size >> 9) - 1;
if (!expect(sector < nr_sectors))
@@ -1316,21 +1321,21 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors.
*/
- spin_lock_irq(&mdev->al_lock);
- count = drbd_bm_count_bits(mdev, sbnr, ebnr);
+ spin_lock_irq(&device->al_lock);
+ count = drbd_bm_count_bits(device, sbnr, ebnr);
if (count) {
- mdev->rs_failed += count;
+ device->rs_failed += count;
- if (get_ldev(mdev)) {
- drbd_try_clear_on_disk_bm(mdev, sector, count, false);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ drbd_try_clear_on_disk_bm(device, sector, count, false);
+ put_ldev(device);
}
/* just wake_up unconditional now, various lc_chaged(),
* lc_put() in drbd_try_clear_on_disk_bm(). */
wake_up = 1;
}
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
if (wake_up)
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b12c11ec4bd2..1aa29f8fdfe1 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -113,54 +113,54 @@ struct drbd_bitmap {
};
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
-static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
+static void __bm_print_lock_info(struct drbd_device *device, const char *func)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!__ratelimit(&drbd_ratelimit_state))
return;
- dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
- drbd_task_to_thread_name(mdev->tconn, current),
- func, b->bm_why ?: "?",
- drbd_task_to_thread_name(mdev->tconn, b->bm_task));
+ drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n",
+ current->comm, task_pid_nr(current),
+ func, b->bm_why ?: "?",
+ b->bm_task->comm, task_pid_nr(b->bm_task));
}
-void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
+void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
int trylock_failed;
if (!b) {
- dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
+ drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n");
return;
}
trylock_failed = !mutex_trylock(&b->bm_change);
if (trylock_failed) {
- dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
- drbd_task_to_thread_name(mdev->tconn, current),
- why, b->bm_why ?: "?",
- drbd_task_to_thread_name(mdev->tconn, b->bm_task));
+ drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n",
+ current->comm, task_pid_nr(current),
+ why, b->bm_why ?: "?",
+ b->bm_task->comm, task_pid_nr(b->bm_task));
mutex_lock(&b->bm_change);
}
if (BM_LOCKED_MASK & b->bm_flags)
- dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
+ drbd_err(device, "FIXME bitmap already locked in bm_lock\n");
b->bm_flags |= flags & BM_LOCKED_MASK;
b->bm_why = why;
b->bm_task = current;
}
-void drbd_bm_unlock(struct drbd_conf *mdev)
+void drbd_bm_unlock(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!b) {
- dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
+ drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n");
return;
}
- if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
- dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
+ if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
+ drbd_err(device, "FIXME bitmap not locked in bm_unlock\n");
b->bm_flags &= ~BM_LOCKED_MASK;
b->bm_why = NULL;
@@ -211,19 +211,19 @@ static unsigned long bm_page_to_idx(struct page *page)
/* As is very unlikely that the same page is under IO from more than one
* context, we can get away with a bit per page and one wait queue per bitmap.
*/
-static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
+static void bm_page_lock_io(struct drbd_device *device, int page_nr)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
}
-static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
+static void bm_page_unlock_io(struct drbd_device *device, int page_nr)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
- wake_up(&mdev->bitmap->bm_io_wait);
+ wake_up(&device->bitmap->bm_io_wait);
}
/* set _before_ submit_io, so it may be reset due to being changed
@@ -242,22 +242,22 @@ static void bm_set_page_need_writeout(struct page *page)
/**
* drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @page_nr: the bitmap page to mark with the "hint" flag
*
* From within an activity log transaction, we mark a few pages with these
* hints, then call drbd_bm_write_hinted(), which will only write out changed
* pages which are flagged with this mark.
*/
-void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr)
+void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
{
struct page *page;
- if (page_nr >= mdev->bitmap->bm_number_of_pages) {
- dev_warn(DEV, "BAD: page_nr: %u, number_of_pages: %u\n",
- page_nr, (int)mdev->bitmap->bm_number_of_pages);
+ if (page_nr >= device->bitmap->bm_number_of_pages) {
+ drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
+ page_nr, (int)device->bitmap->bm_number_of_pages);
return;
}
- page = mdev->bitmap->bm_pages[page_nr];
+ page = device->bitmap->bm_pages[page_nr];
set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
}
@@ -340,7 +340,7 @@ static void bm_unmap(unsigned long *p_addr)
/*
* actually most functions herein should take a struct drbd_bitmap*, not a
- * struct drbd_conf*, but for the debug macros I like to have the mdev around
+ * struct drbd_device*, but for the debug macros I like to have the device around
* to be able to report device specific.
*/
@@ -436,11 +436,11 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
/*
* called on driver init only. TODO call when a device is created.
- * allocates the drbd_bitmap, and stores it in mdev->bitmap.
+ * allocates the drbd_bitmap, and stores it in device->bitmap.
*/
-int drbd_bm_init(struct drbd_conf *mdev)
+int drbd_bm_init(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
WARN_ON(b != NULL);
b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
if (!b)
@@ -449,28 +449,28 @@ int drbd_bm_init(struct drbd_conf *mdev)
mutex_init(&b->bm_change);
init_waitqueue_head(&b->bm_io_wait);
- mdev->bitmap = b;
+ device->bitmap = b;
return 0;
}
-sector_t drbd_bm_capacity(struct drbd_conf *mdev)
+sector_t drbd_bm_capacity(struct drbd_device *device)
{
- if (!expect(mdev->bitmap))
+ if (!expect(device->bitmap))
return 0;
- return mdev->bitmap->bm_dev_capacity;
+ return device->bitmap->bm_dev_capacity;
}
/* called on driver unload. TODO: call when a device is destroyed.
*/
-void drbd_bm_cleanup(struct drbd_conf *mdev)
+void drbd_bm_cleanup(struct drbd_device *device)
{
- if (!expect(mdev->bitmap))
+ if (!expect(device->bitmap))
return;
- bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
- bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
- kfree(mdev->bitmap);
- mdev->bitmap = NULL;
+ bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
+ bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags));
+ kfree(device->bitmap);
+ device->bitmap = NULL;
}
/*
@@ -631,9 +631,9 @@ static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
* In case this is actually a resize, we copy the old bitmap into the new one.
* Otherwise, the bitmap is initialized to all bits set.
*/
-int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
+int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long bits, words, owords, obits;
unsigned long want, have, onpages; /* number of pages */
struct page **npages, **opages = NULL;
@@ -643,9 +643,9 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
if (!expect(b))
return -ENOMEM;
- drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
+ drbd_bm_lock(device, "resize", BM_LOCKED_MASK);
- dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
+ drbd_info(device, "drbd_bm_resize called with capacity == %llu\n",
(unsigned long long)capacity);
if (capacity == b->bm_dev_capacity)
@@ -678,12 +678,12 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
*/
words = ALIGN(bits, 64) >> LN2_BPL;
- if (get_ldev(mdev)) {
- u64 bits_on_disk = drbd_md_on_disk_bits(mdev->ldev);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
+ put_ldev(device);
if (bits > bits_on_disk) {
- dev_info(DEV, "bits = %lu\n", bits);
- dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
+ drbd_info(device, "bits = %lu\n", bits);
+ drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk);
err = -ENOSPC;
goto out;
}
@@ -692,10 +692,10 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
have = b->bm_number_of_pages;
if (want == have) {
- D_ASSERT(b->bm_pages != NULL);
+ D_ASSERT(device, b->bm_pages != NULL);
npages = b->bm_pages;
} else {
- if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
+ if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
npages = NULL;
else
npages = bm_realloc_pages(b, want);
@@ -742,10 +742,10 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
bm_vk_free(opages, opages_vmalloced);
if (!growing)
b->bm_set = bm_count_bits(b);
- dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
+ drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
out:
- drbd_bm_unlock(mdev);
+ drbd_bm_unlock(device);
return err;
}
@@ -757,9 +757,9 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
*
* maybe bm_set should be atomic_t ?
*/
-unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
+unsigned long _drbd_bm_total_weight(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long s;
unsigned long flags;
@@ -775,20 +775,20 @@ unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
return s;
}
-unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
+unsigned long drbd_bm_total_weight(struct drbd_device *device)
{
unsigned long s;
/* if I don't have a disk, I don't know about out-of-sync status */
- if (!get_ldev_if_state(mdev, D_NEGOTIATING))
+ if (!get_ldev_if_state(device, D_NEGOTIATING))
return 0;
- s = _drbd_bm_total_weight(mdev);
- put_ldev(mdev);
+ s = _drbd_bm_total_weight(device);
+ put_ldev(device);
return s;
}
-size_t drbd_bm_words(struct drbd_conf *mdev)
+size_t drbd_bm_words(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return 0;
if (!expect(b->bm_pages))
@@ -797,9 +797,9 @@ size_t drbd_bm_words(struct drbd_conf *mdev)
return b->bm_words;
}
-unsigned long drbd_bm_bits(struct drbd_conf *mdev)
+unsigned long drbd_bm_bits(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return 0;
@@ -811,10 +811,10 @@ unsigned long drbd_bm_bits(struct drbd_conf *mdev)
* bitmap must be locked by drbd_bm_lock.
* currently only used from receive_bitmap.
*/
-void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
+void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number,
unsigned long *buffer)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr, *bm;
unsigned long word, bits;
unsigned int idx;
@@ -860,10 +860,10 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
/* copy number words from the bitmap starting at offset into the buffer.
* buffer[i] will be little endian unsigned long.
*/
-void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
+void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
unsigned long *buffer)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr, *bm;
size_t end, do_now;
@@ -878,7 +878,7 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
if ((offset >= b->bm_words) ||
(end > b->bm_words) ||
(number <= 0))
- dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
+ drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n",
(unsigned long) offset,
(unsigned long) number,
(unsigned long) b->bm_words);
@@ -897,9 +897,9 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
}
/* set all bits in the bitmap */
-void drbd_bm_set_all(struct drbd_conf *mdev)
+void drbd_bm_set_all(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return;
if (!expect(b->bm_pages))
@@ -913,9 +913,9 @@ void drbd_bm_set_all(struct drbd_conf *mdev)
}
/* clear all bits in the bitmap */
-void drbd_bm_clear_all(struct drbd_conf *mdev)
+void drbd_bm_clear_all(struct drbd_device *device)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
if (!expect(b))
return;
if (!expect(b->bm_pages))
@@ -928,7 +928,7 @@ void drbd_bm_clear_all(struct drbd_conf *mdev)
}
struct bm_aio_ctx {
- struct drbd_conf *mdev;
+ struct drbd_device *device;
atomic_t in_flight;
unsigned int done;
unsigned flags;
@@ -943,7 +943,7 @@ static void bm_aio_ctx_destroy(struct kref *kref)
{
struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
- put_ldev(ctx->mdev);
+ put_ldev(ctx->device);
kfree(ctx);
}
@@ -951,8 +951,8 @@ static void bm_aio_ctx_destroy(struct kref *kref)
static void bm_async_io_complete(struct bio *bio, int error)
{
struct bm_aio_ctx *ctx = bio->bi_private;
- struct drbd_conf *mdev = ctx->mdev;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_device *device = ctx->device;
+ struct drbd_bitmap *b = device->bitmap;
unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
int uptodate = bio_flagged(bio, BIO_UPTODATE);
@@ -966,7 +966,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
!bm_test_page_unchanged(b->bm_pages[idx]))
- dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
+ drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
if (error) {
/* ctx error will hold the completed-last non-zero error code,
@@ -976,14 +976,14 @@ static void bm_async_io_complete(struct bio *bio, int error)
/* Not identical to on disk version of it.
* Is BM_PAGE_IO_ERROR enough? */
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
+ drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
error, idx);
} else {
bm_clear_page_io_err(b->bm_pages[idx]);
- dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
+ dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
}
- bm_page_unlock_io(mdev, idx);
+ bm_page_unlock_io(device, idx);
if (ctx->flags & BM_AIO_COPY_PAGES)
mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
@@ -992,7 +992,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
if (atomic_dec_and_test(&ctx->in_flight)) {
ctx->done = 1;
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
}
}
@@ -1000,23 +1000,23 @@ static void bm_async_io_complete(struct bio *bio, int error)
static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{
struct bio *bio = bio_alloc_drbd(GFP_NOIO);
- struct drbd_conf *mdev = ctx->mdev;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_device *device = ctx->device;
+ struct drbd_bitmap *b = device->bitmap;
struct page *page;
unsigned int len;
sector_t on_disk_sector =
- mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
+ device->ldev->md.md_offset + device->ldev->md.bm_offset;
on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
/* this might happen with very small
* flexible external meta data device,
* or with PAGE_SIZE > 4k */
len = min_t(unsigned int, PAGE_SIZE,
- (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
+ (drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);
/* serialize IO on this page */
- bm_page_lock_io(mdev, page_nr);
+ bm_page_lock_io(device, page_nr);
/* before memcpy and submit,
* so it can be redirtied any time */
bm_set_page_unchanged(b->bm_pages[page_nr]);
@@ -1027,32 +1027,32 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
- bio->bi_bdev = mdev->ldev->md_bdev;
- bio->bi_sector = on_disk_sector;
+ bio->bi_bdev = device->ldev->md_bdev;
+ bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = bm_async_io_complete;
- if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
+ if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio->bi_rw |= rw;
bio_endio(bio, -EIO);
} else {
submit_bio(rw, bio);
/* this should not count as user activity and cause the
* resync to throttle -- see drbd_rs_should_slow_down(). */
- atomic_add(len >> 9, &mdev->rs_sect_ev);
+ atomic_add(len >> 9, &device->rs_sect_ev);
}
}
/*
* bm_rw: read/write the whole bitmap from/to its on disk location.
*/
-static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
+static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
{
struct bm_aio_ctx *ctx;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
int num_pages, i, count = 0;
unsigned long now;
char ppb[10];
@@ -1072,7 +1072,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
return -ENOMEM;
*ctx = (struct bm_aio_ctx) {
- .mdev = mdev,
+ .device = device,
.in_flight = ATOMIC_INIT(1),
.done = 0,
.flags = flags,
@@ -1080,8 +1080,8 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
.kref = { ATOMIC_INIT(2) },
};
- if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
- dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
+ if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
kfree(ctx);
return -ENODEV;
}
@@ -1106,14 +1106,14 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
if (!(flags & BM_WRITE_ALL_PAGES) &&
bm_test_page_unchanged(b->bm_pages[i])) {
- dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
+ dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
continue;
}
/* during lazy writeout,
* ignore those pages not marked for lazy writeout. */
if (lazy_writeout_upper_idx &&
!bm_test_page_lazy_writeout(b->bm_pages[i])) {
- dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
+ dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
continue;
}
}
@@ -1132,19 +1132,19 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
* "in_flight reached zero, all done" event.
*/
if (!atomic_dec_and_test(&ctx->in_flight))
- wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
else
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
/* summary for global bitmap IO */
if (flags == 0)
- dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
+ drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n",
rw == WRITE ? "WRITE" : "READ",
count, jiffies - now);
if (ctx->error) {
- dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
+ drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
err = -EIO; /* ctx->error ? */
}
@@ -1153,16 +1153,16 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
now = jiffies;
if (rw == WRITE) {
- drbd_md_flush(mdev);
+ drbd_md_flush(device);
} else /* rw == READ */ {
b->bm_set = bm_count_bits(b);
- dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
+ drbd_info(device, "recounting of set bits took additional %lu jiffies\n",
jiffies - now);
}
now = b->bm_set;
if (flags == 0)
- dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
+ drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
@@ -1171,48 +1171,38 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
/**
* drbd_bm_read() - Read the whole bitmap from its on disk location.
- * @mdev: DRBD device.
+ * @device: DRBD device.
*/
-int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
+int drbd_bm_read(struct drbd_device *device) __must_hold(local)
{
- return bm_rw(mdev, READ, 0, 0);
+ return bm_rw(device, READ, 0, 0);
}
/**
* drbd_bm_write() - Write the whole bitmap to its on disk location.
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Will only write pages that have changed since last IO.
*/
-int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
+int drbd_bm_write(struct drbd_device *device) __must_hold(local)
{
- return bm_rw(mdev, WRITE, 0, 0);
+ return bm_rw(device, WRITE, 0, 0);
}
/**
* drbd_bm_write_all() - Write the whole bitmap to its on disk location.
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Will write all pages.
*/
-int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local)
+int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
{
- return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
-}
-
-/**
- * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
- * @mdev: DRBD device.
- * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
- */
-int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
-{
- return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx);
+ return bm_rw(device, WRITE, BM_WRITE_ALL_PAGES, 0);
}
/**
* drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Will only write pages that have changed since last IO.
* In contrast to drbd_bm_write(), this will copy the bitmap pages
@@ -1221,23 +1211,23 @@ int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(l
* verify is aborted due to a failed peer disk, while local IO continues, or
* pending resync acks are still being processed.
*/
-int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
+int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
{
- return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
+ return bm_rw(device, WRITE, BM_AIO_COPY_PAGES, 0);
}
/**
* drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
- * @mdev: DRBD device.
+ * @device: DRBD device.
*/
-int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local)
+int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local)
{
- return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
+ return bm_rw(device, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
}
/**
* drbd_bm_write_page() - Writes a PAGE_SIZE aligned piece of bitmap
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @idx: bitmap page index
*
* We don't want to special case on logical_block_size of the backend device,
@@ -1247,13 +1237,13 @@ int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local)
* In case this becomes an issue on systems with larger PAGE_SIZE,
* we may want to change this again to write 4k aligned 4k pieces.
*/
-int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
+int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold(local)
{
struct bm_aio_ctx *ctx;
int err;
- if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
- dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
+ if (bm_test_page_unchanged(device->bitmap->bm_pages[idx])) {
+ dynamic_drbd_dbg(device, "skipped bm page write for idx %u\n", idx);
return 0;
}
@@ -1262,7 +1252,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
return -ENOMEM;
*ctx = (struct bm_aio_ctx) {
- .mdev = mdev,
+ .device = device,
.in_flight = ATOMIC_INIT(1),
.done = 0,
.flags = BM_AIO_COPY_PAGES,
@@ -1270,21 +1260,21 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
.kref = { ATOMIC_INIT(2) },
};
- if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
- dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
+ if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
kfree(ctx);
return -ENODEV;
}
bm_page_io_async(ctx, idx, WRITE_SYNC);
- wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
if (ctx->error)
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
/* that causes us to detach, so the in memory bitmap will be
* gone in a moment as well. */
- mdev->bm_writ_cnt++;
+ device->bm_writ_cnt++;
err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
return err;
@@ -1298,17 +1288,17 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
*
* this returns a bit number, NOT a sector!
*/
-static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
+static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo,
const int find_zero_bit)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr;
unsigned long bit_offset;
unsigned i;
if (bm_fo > b->bm_bits) {
- dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
+ drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
bm_fo = DRBD_END_OF_BITMAP;
} else {
while (bm_fo < b->bm_bits) {
@@ -1338,10 +1328,10 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
return bm_fo;
}
-static unsigned long bm_find_next(struct drbd_conf *mdev,
+static unsigned long bm_find_next(struct drbd_device *device,
unsigned long bm_fo, const int find_zero_bit)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long i = DRBD_END_OF_BITMAP;
if (!expect(b))
@@ -1351,39 +1341,39 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
spin_lock_irq(&b->bm_lock);
if (BM_DONT_TEST & b->bm_flags)
- bm_print_lock_info(mdev);
+ bm_print_lock_info(device);
- i = __bm_find_next(mdev, bm_fo, find_zero_bit);
+ i = __bm_find_next(device, bm_fo, find_zero_bit);
spin_unlock_irq(&b->bm_lock);
return i;
}
-unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
+unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
{
- return bm_find_next(mdev, bm_fo, 0);
+ return bm_find_next(device, bm_fo, 0);
}
#if 0
/* not yet needed for anything. */
-unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
+unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
{
- return bm_find_next(mdev, bm_fo, 1);
+ return bm_find_next(device, bm_fo, 1);
}
#endif
/* does not spin_lock_irqsave.
* you must take drbd_bm_lock() first */
-unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
+unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
{
- /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
- return __bm_find_next(mdev, bm_fo, 0);
+ /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
+ return __bm_find_next(device, bm_fo, 0);
}
-unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
+unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
{
- /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
- return __bm_find_next(mdev, bm_fo, 1);
+ /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
+ return __bm_find_next(device, bm_fo, 1);
}
/* returns number of bits actually changed.
@@ -1392,10 +1382,10 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
* wants bitnr, not sector.
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
-static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s,
unsigned long e, int val)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr = NULL;
unsigned long bitnr;
unsigned int last_page_nr = -1U;
@@ -1403,7 +1393,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
int changed_total = 0;
if (e >= b->bm_bits) {
- dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
+ drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
s, e, b->bm_bits);
e = b->bm_bits ? b->bm_bits -1 : 0;
}
@@ -1441,11 +1431,11 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
* for val != 0, we change 0 -> 1, return code positive
* for val == 0, we change 1 -> 0, return code negative
* wants bitnr, not sector */
-static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int bm_change_bits_to(struct drbd_device *device, const unsigned long s,
const unsigned long e, int val)
{
unsigned long flags;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
int c = 0;
if (!expect(b))
@@ -1455,24 +1445,24 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
spin_lock_irqsave(&b->bm_lock, flags);
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
- bm_print_lock_info(mdev);
+ bm_print_lock_info(device);
- c = __bm_change_bits_to(mdev, s, e, val);
+ c = __bm_change_bits_to(device, s, e, val);
spin_unlock_irqrestore(&b->bm_lock, flags);
return c;
}
/* returns number of bits changed 0 -> 1 */
-int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
- return bm_change_bits_to(mdev, s, e, 1);
+ return bm_change_bits_to(device, s, e, 1);
}
/* returns number of bits changed 1 -> 0 */
-int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
- return -bm_change_bits_to(mdev, s, e, 0);
+ return -bm_change_bits_to(device, s, e, 0);
}
/* sets all bits in full words,
@@ -1504,7 +1494,7 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
* You must first drbd_bm_lock().
* Can be called to set the whole bitmap in one go.
* Sets bits from s to e _inclusive_. */
-void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
/* First set_bit from the first bit (s)
* up to the next long boundary (sl),
@@ -1514,7 +1504,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* Do not use memset, because we must account for changes,
* so we need to loop over the words with hweight() anyways.
*/
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long sl = ALIGN(s,BITS_PER_LONG);
unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
int first_page;
@@ -1526,7 +1516,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
if (e - s <= 3*BITS_PER_LONG) {
/* don't bother; el and sl may even be wrong. */
spin_lock_irq(&b->bm_lock);
- __bm_change_bits_to(mdev, s, e, 1);
+ __bm_change_bits_to(device, s, e, 1);
spin_unlock_irq(&b->bm_lock);
return;
}
@@ -1537,7 +1527,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
/* bits filling the current long */
if (sl)
- __bm_change_bits_to(mdev, s, sl-1, 1);
+ __bm_change_bits_to(device, s, sl-1, 1);
first_page = sl >> (3 + PAGE_SHIFT);
last_page = el >> (3 + PAGE_SHIFT);
@@ -1549,7 +1539,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
/* first and full pages, unless first page == last page */
for (page_nr = first_page; page_nr < last_page; page_nr++) {
- bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
+ bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
spin_unlock_irq(&b->bm_lock);
cond_resched();
first_word = 0;
@@ -1565,7 +1555,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* as we did not allocate it, it is not present in bitmap->bm_pages.
*/
if (last_word)
- bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+ bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word);
/* possibly trailing bits.
* example: (e & 63) == 63, el will be e+1.
@@ -1573,7 +1563,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* it would trigger an assert in __bm_change_bits_to()
*/
if (el <= e)
- __bm_change_bits_to(mdev, el, e, 1);
+ __bm_change_bits_to(device, el, e, 1);
spin_unlock_irq(&b->bm_lock);
}
@@ -1584,10 +1574,10 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* 0 ... bit not set
* -1 ... first out of bounds access, stop testing for bits!
*/
-int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
+int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
{
unsigned long flags;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr;
int i;
@@ -1598,7 +1588,7 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
- bm_print_lock_info(mdev);
+ bm_print_lock_info(device);
if (bitnr < b->bm_bits) {
p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
@@ -1606,7 +1596,7 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
} else if (bitnr == b->bm_bits) {
i = -1;
} else { /* (bitnr > b->bm_bits) */
- dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
+ drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
i = 0;
}
@@ -1615,10 +1605,10 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
}
/* returns number of bits set in the range [s, e] */
-int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
unsigned long flags;
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
unsigned long *p_addr = NULL;
unsigned long bitnr;
unsigned int page_nr = -1U;
@@ -1635,7 +1625,7 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
- bm_print_lock_info(mdev);
+ bm_print_lock_info(device);
for (bitnr = s; bitnr <= e; bitnr++) {
unsigned int idx = bm_bit_to_page_idx(b, bitnr);
if (page_nr != idx) {
@@ -1647,7 +1637,7 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
if (expect(bitnr < b->bm_bits))
c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
else
- dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
+ drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
}
if (p_addr)
bm_unmap(p_addr);
@@ -1670,9 +1660,9 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* reference count of some bitmap extent element from some lru instead...
*
*/
-int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
+int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
{
- struct drbd_bitmap *b = mdev->bitmap;
+ struct drbd_bitmap *b = device->bitmap;
int count, s, e;
unsigned long flags;
unsigned long *p_addr, *bm;
@@ -1684,7 +1674,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
- bm_print_lock_info(mdev);
+ bm_print_lock_info(device);
s = S2W(enr);
e = min((size_t)S2W(enr+1), b->bm_words);
@@ -1697,7 +1687,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
count += hweight_long(*bm++);
bm_unmap(p_addr);
} else {
- dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
+ drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s);
}
spin_unlock_irqrestore(&b->bm_lock, flags);
return count;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 0e06f0c5dd1e..e7093d4291f1 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -45,7 +45,9 @@
#include <linux/prefetch.h>
#include <linux/drbd_genl_api.h>
#include <linux/drbd.h>
+#include "drbd_strings.h"
#include "drbd_state.h"
+#include "drbd_protocol.h"
#ifdef __CHECKER__
# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
@@ -65,6 +67,7 @@
extern unsigned int minor_count;
extern bool disable_sendpage;
extern bool allow_oos;
+void tl_abort_disk_io(struct drbd_device *device);
#ifdef CONFIG_DRBD_FAULT_INJECTION
extern int enable_faults;
@@ -95,25 +98,60 @@ extern char usermode_helper[];
#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
-struct drbd_conf;
-struct drbd_tconn;
-
-
-/* to shorten dev_warn(DEV, "msg"); and relatives statements */
-#define DEV (disk_to_dev(mdev->vdisk))
-
-#define conn_printk(LEVEL, TCONN, FMT, ARGS...) \
- printk(LEVEL "d-con %s: " FMT, TCONN->name , ## ARGS)
-#define conn_alert(TCONN, FMT, ARGS...) conn_printk(KERN_ALERT, TCONN, FMT, ## ARGS)
-#define conn_crit(TCONN, FMT, ARGS...) conn_printk(KERN_CRIT, TCONN, FMT, ## ARGS)
-#define conn_err(TCONN, FMT, ARGS...) conn_printk(KERN_ERR, TCONN, FMT, ## ARGS)
-#define conn_warn(TCONN, FMT, ARGS...) conn_printk(KERN_WARNING, TCONN, FMT, ## ARGS)
-#define conn_notice(TCONN, FMT, ARGS...) conn_printk(KERN_NOTICE, TCONN, FMT, ## ARGS)
-#define conn_info(TCONN, FMT, ARGS...) conn_printk(KERN_INFO, TCONN, FMT, ## ARGS)
-#define conn_dbg(TCONN, FMT, ARGS...) conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS)
-
-#define D_ASSERT(exp) if (!(exp)) \
- dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
+struct drbd_device;
+struct drbd_connection;
+
+#define __drbd_printk_device(level, device, fmt, args...) \
+ dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
+#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
+ dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
+#define __drbd_printk_resource(level, resource, fmt, args...) \
+ printk(level "drbd %s: " fmt, (resource)->name, ## args)
+#define __drbd_printk_connection(level, connection, fmt, args...) \
+ printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
+
+void drbd_printk_with_wrong_object_type(void);
+
+#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
+ (__builtin_types_compatible_p(typeof(obj), type) || \
+ __builtin_types_compatible_p(typeof(obj), const type)), \
+ func(level, (const type)(obj), fmt, ## args)
+
+#define drbd_printk(level, obj, fmt, args...) \
+ __builtin_choose_expr( \
+ __drbd_printk_if_same_type(obj, struct drbd_device *, \
+ __drbd_printk_device, level, fmt, ## args), \
+ __builtin_choose_expr( \
+ __drbd_printk_if_same_type(obj, struct drbd_resource *, \
+ __drbd_printk_resource, level, fmt, ## args), \
+ __builtin_choose_expr( \
+ __drbd_printk_if_same_type(obj, struct drbd_connection *, \
+ __drbd_printk_connection, level, fmt, ## args), \
+ __builtin_choose_expr( \
+ __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
+ __drbd_printk_peer_device, level, fmt, ## args), \
+ drbd_printk_with_wrong_object_type()))))
+
+#define drbd_dbg(obj, fmt, args...) \
+ drbd_printk(KERN_DEBUG, obj, fmt, ## args)
+#define drbd_alert(obj, fmt, args...) \
+ drbd_printk(KERN_ALERT, obj, fmt, ## args)
+#define drbd_err(obj, fmt, args...) \
+ drbd_printk(KERN_ERR, obj, fmt, ## args)
+#define drbd_warn(obj, fmt, args...) \
+ drbd_printk(KERN_WARNING, obj, fmt, ## args)
+#define drbd_info(obj, fmt, args...) \
+ drbd_printk(KERN_INFO, obj, fmt, ## args)
+#define drbd_emerg(obj, fmt, args...) \
+ drbd_printk(KERN_EMERG, obj, fmt, ## args)
+
+#define dynamic_drbd_dbg(device, fmt, args...) \
+ dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
+
+#define D_ASSERT(device, exp) do { \
+ if (!(exp)) \
+ drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
+ } while (0)
/**
* expect - Make an assertion
@@ -123,7 +161,7 @@ struct drbd_tconn;
#define expect(exp) ({ \
bool _bool = (exp); \
if (!_bool) \
- dev_err(DEV, "ASSERTION %s FAILED in %s\n", \
+ drbd_err(device, "ASSERTION %s FAILED in %s\n", \
#exp, __func__); \
_bool; \
})
@@ -145,14 +183,14 @@ enum {
};
extern unsigned int
-_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
+_drbd_insert_fault(struct drbd_device *device, unsigned int type);
static inline int
-drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
+drbd_insert_fault(struct drbd_device *device, unsigned int type) {
#ifdef CONFIG_DRBD_FAULT_INJECTION
return fault_rate &&
(enable_faults & (1<<type)) &&
- _drbd_insert_fault(mdev, type);
+ _drbd_insert_fault(device, type);
#else
return 0;
#endif
@@ -164,74 +202,8 @@ drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
#define div_floor(A, B) ((A)/(B))
extern struct ratelimit_state drbd_ratelimit_state;
-extern struct idr minors; /* RCU, updates: genl_lock() */
-extern struct list_head drbd_tconns; /* RCU, updates: genl_lock() */
-
-/* on the wire */
-enum drbd_packet {
- /* receiver (data socket) */
- P_DATA = 0x00,
- P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */
- P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */
- P_BARRIER = 0x03,
- P_BITMAP = 0x04,
- P_BECOME_SYNC_TARGET = 0x05,
- P_BECOME_SYNC_SOURCE = 0x06,
- P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */
- P_DATA_REQUEST = 0x08, /* Used to ask for a data block */
- P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */
- P_SYNC_PARAM = 0x0a,
- P_PROTOCOL = 0x0b,
- P_UUIDS = 0x0c,
- P_SIZES = 0x0d,
- P_STATE = 0x0e,
- P_SYNC_UUID = 0x0f,
- P_AUTH_CHALLENGE = 0x10,
- P_AUTH_RESPONSE = 0x11,
- P_STATE_CHG_REQ = 0x12,
-
- /* asender (meta socket */
- P_PING = 0x13,
- P_PING_ACK = 0x14,
- P_RECV_ACK = 0x15, /* Used in protocol B */
- P_WRITE_ACK = 0x16, /* Used in protocol C */
- P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
- P_SUPERSEDED = 0x18, /* Used in proto C, two-primaries conflict detection */
- P_NEG_ACK = 0x19, /* Sent if local disk is unusable */
- P_NEG_DREPLY = 0x1a, /* Local disk is broken... */
- P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */
- P_BARRIER_ACK = 0x1c,
- P_STATE_CHG_REPLY = 0x1d,
-
- /* "new" commands, no longer fitting into the ordering scheme above */
-
- P_OV_REQUEST = 0x1e, /* data socket */
- P_OV_REPLY = 0x1f,
- P_OV_RESULT = 0x20, /* meta socket */
- P_CSUM_RS_REQUEST = 0x21, /* data socket */
- P_RS_IS_IN_SYNC = 0x22, /* meta socket */
- P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
- P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */
- /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */
- /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */
- P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
- P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */
- P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
- P_CONN_ST_CHG_REQ = 0x2a, /* data sock: Connection wide state request */
- P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */
- P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */
- P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
-
- P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
- P_MAX_OPT_CMD = 0x101,
-
- /* special command ids for handshake */
-
- P_INITIAL_META = 0xfff1, /* First Packet on the MetaSock */
- P_INITIAL_DATA = 0xfff2, /* First Packet on the Socket */
-
- P_CONNECTION_FEATURES = 0xfffe /* FIXED for the next century! */
-};
+extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
+extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
extern const char *cmdname(enum drbd_packet cmd);
@@ -253,7 +225,7 @@ struct bm_xfer_ctx {
unsigned bytes[2];
};
-extern void INFO_bm_xfer_stats(struct drbd_conf *mdev,
+extern void INFO_bm_xfer_stats(struct drbd_device *device,
const char *direction, struct bm_xfer_ctx *c);
static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
@@ -275,233 +247,7 @@ static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
#endif
}
-#ifndef __packed
-#define __packed __attribute__((packed))
-#endif
-
-/* This is the layout for a packet on the wire.
- * The byteorder is the network byte order.
- * (except block_id and barrier fields.
- * these are pointers to local structs
- * and have no relevance for the partner,
- * which just echoes them as received.)
- *
- * NOTE that the payload starts at a long aligned offset,
- * regardless of 32 or 64 bit arch!
- */
-struct p_header80 {
- u32 magic;
- u16 command;
- u16 length; /* bytes of data after this header */
-} __packed;
-
-/* Header for big packets, Used for data packets exceeding 64kB */
-struct p_header95 {
- u16 magic; /* use DRBD_MAGIC_BIG here */
- u16 command;
- u32 length;
-} __packed;
-
-struct p_header100 {
- u32 magic;
- u16 volume;
- u16 command;
- u32 length;
- u32 pad;
-} __packed;
-
-extern unsigned int drbd_header_size(struct drbd_tconn *tconn);
-
-/* these defines must not be changed without changing the protocol version */
-#define DP_HARDBARRIER 1 /* depricated */
-#define DP_RW_SYNC 2 /* equals REQ_SYNC */
-#define DP_MAY_SET_IN_SYNC 4
-#define DP_UNPLUG 8 /* not used anymore */
-#define DP_FUA 16 /* equals REQ_FUA */
-#define DP_FLUSH 32 /* equals REQ_FLUSH */
-#define DP_DISCARD 64 /* equals REQ_DISCARD */
-#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
-#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
-
-struct p_data {
- u64 sector; /* 64 bits sector number */
- u64 block_id; /* to identify the request in protocol B&C */
- u32 seq_num;
- u32 dp_flags;
-} __packed;
-
-/*
- * commands which share a struct:
- * p_block_ack:
- * P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
- * P_SUPERSEDED (proto C, two-primaries conflict detection)
- * p_block_req:
- * P_DATA_REQUEST, P_RS_DATA_REQUEST
- */
-struct p_block_ack {
- u64 sector;
- u64 block_id;
- u32 blksize;
- u32 seq_num;
-} __packed;
-
-struct p_block_req {
- u64 sector;
- u64 block_id;
- u32 blksize;
- u32 pad; /* to multiple of 8 Byte */
-} __packed;
-
-/*
- * commands with their own struct for additional fields:
- * P_CONNECTION_FEATURES
- * P_BARRIER
- * P_BARRIER_ACK
- * P_SYNC_PARAM
- * ReportParams
- */
-
-struct p_connection_features {
- u32 protocol_min;
- u32 feature_flags;
- u32 protocol_max;
-
- /* should be more than enough for future enhancements
- * for now, feature_flags and the reserved array shall be zero.
- */
-
- u32 _pad;
- u64 reserved[7];
-} __packed;
-
-struct p_barrier {
- u32 barrier; /* barrier number _handle_ only */
- u32 pad; /* to multiple of 8 Byte */
-} __packed;
-
-struct p_barrier_ack {
- u32 barrier;
- u32 set_size;
-} __packed;
-
-struct p_rs_param {
- u32 resync_rate;
-
- /* Since protocol version 88 and higher. */
- char verify_alg[0];
-} __packed;
-
-struct p_rs_param_89 {
- u32 resync_rate;
- /* protocol version 89: */
- char verify_alg[SHARED_SECRET_MAX];
- char csums_alg[SHARED_SECRET_MAX];
-} __packed;
-
-struct p_rs_param_95 {
- u32 resync_rate;
- char verify_alg[SHARED_SECRET_MAX];
- char csums_alg[SHARED_SECRET_MAX];
- u32 c_plan_ahead;
- u32 c_delay_target;
- u32 c_fill_target;
- u32 c_max_rate;
-} __packed;
-
-enum drbd_conn_flags {
- CF_DISCARD_MY_DATA = 1,
- CF_DRY_RUN = 2,
-};
-
-struct p_protocol {
- u32 protocol;
- u32 after_sb_0p;
- u32 after_sb_1p;
- u32 after_sb_2p;
- u32 conn_flags;
- u32 two_primaries;
-
- /* Since protocol version 87 and higher. */
- char integrity_alg[0];
-
-} __packed;
-
-struct p_uuids {
- u64 uuid[UI_EXTENDED_SIZE];
-} __packed;
-
-struct p_rs_uuid {
- u64 uuid;
-} __packed;
-
-struct p_sizes {
- u64 d_size; /* size of disk */
- u64 u_size; /* user requested size */
- u64 c_size; /* current exported size */
- u32 max_bio_size; /* Maximal size of a BIO */
- u16 queue_order_type; /* not yet implemented in DRBD*/
- u16 dds_flags; /* use enum dds_flags here. */
-} __packed;
-
-struct p_state {
- u32 state;
-} __packed;
-
-struct p_req_state {
- u32 mask;
- u32 val;
-} __packed;
-
-struct p_req_state_reply {
- u32 retcode;
-} __packed;
-
-struct p_drbd06_param {
- u64 size;
- u32 state;
- u32 blksize;
- u32 protocol;
- u32 version;
- u32 gen_cnt[5];
- u32 bit_map_gen[5];
-} __packed;
-
-struct p_block_desc {
- u64 sector;
- u32 blksize;
- u32 pad; /* to multiple of 8 Byte */
-} __packed;
-
-/* Valid values for the encoding field.
- * Bump proto version when changing this. */
-enum drbd_bitmap_code {
- /* RLE_VLI_Bytes = 0,
- * and other bit variants had been defined during
- * algorithm evaluation. */
- RLE_VLI_Bits = 2,
-};
-
-struct p_compressed_bm {
- /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
- * (encoding & 0x80): polarity (set/unset) of first runlength
- * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
- * used to pad up to head.length bytes
- */
- u8 encoding;
-
- u8 code[0];
-} __packed;
-
-struct p_delay_probe93 {
- u32 seq_num; /* sequence number to match the two probe packets */
- u32 offset; /* usecs the probe got sent after the reference time point */
-} __packed;
-
-/*
- * Bitmap packets need to fit within a single page on the sender and receiver,
- * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
- */
-#define DRBD_SOCKET_BUFFER_SIZE 4096
+extern unsigned int drbd_header_size(struct drbd_connection *connection);
/**********************************************************************/
enum drbd_thread_state {
@@ -517,9 +263,10 @@ struct drbd_thread {
struct completion stop;
enum drbd_thread_state t_state;
int (*function) (struct drbd_thread *);
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
int reset_cpu_mask;
- char name[9];
+ const char *name;
};
static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
@@ -535,18 +282,20 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
struct drbd_work {
struct list_head list;
int (*cb)(struct drbd_work *, int cancel);
- union {
- struct drbd_conf *mdev;
- struct drbd_tconn *tconn;
- };
+};
+
+struct drbd_device_work {
+ struct drbd_work w;
+ struct drbd_device *device;
};
#include "drbd_interval.h"
-extern int drbd_wait_misc(struct drbd_conf *, struct drbd_interval *);
+extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
struct drbd_request {
struct drbd_work w;
+ struct drbd_device *device;
/* if local IO is not allowed, will be NULL.
* if local IO _is_ allowed, holds the locally submitted bio clone,
@@ -579,7 +328,7 @@ struct drbd_request {
};
struct drbd_epoch {
- struct drbd_tconn *tconn;
+ struct drbd_connection *connection;
struct list_head list;
unsigned int barrier_nr;
atomic_t epoch_size; /* increased on every request added. */
@@ -587,6 +336,10 @@ struct drbd_epoch {
unsigned long flags;
};
+/* Prototype declaration of function defined in drbd_receiver.c */
+int drbdd_init(struct drbd_thread *);
+int drbd_asender(struct drbd_thread *);
+
/* drbd_epoch flag bits */
enum {
DE_HAVE_BARRIER_NUMBER,
@@ -599,11 +352,6 @@ enum epoch_event {
EV_CLEANUP = 32, /* used as flag */
};
-struct drbd_wq_barrier {
- struct drbd_work w;
- struct completion done;
-};
-
struct digest_info {
int digest_size;
void *digest;
@@ -611,6 +359,7 @@ struct digest_info {
struct drbd_peer_request {
struct drbd_work w;
+ struct drbd_peer_device *peer_device;
struct drbd_epoch *epoch; /* for writes */
struct page *pages;
atomic_t pending_bios;
@@ -663,7 +412,7 @@ enum {
#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
-/* flag bits per mdev */
+/* flag bits per device */
enum {
UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
MD_DIRTY, /* current uuids and flags not yet on disk */
@@ -695,7 +444,7 @@ enum {
READ_BALANCE_RR,
};
-struct drbd_bitmap; /* opaque for drbd_conf */
+struct drbd_bitmap; /* opaque for drbd_device */
/* definition of bits in bm_flags to be used in drbd_bm_lock
* and drbd_bitmap_io and friends. */
@@ -769,7 +518,7 @@ struct drbd_backing_dev {
struct block_device *backing_bdev;
struct block_device *md_bdev;
struct drbd_md md;
- struct disk_conf *disk_conf; /* RCU, for updates: mdev->tconn->conf_update */
+ struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
sector_t known_size; /* last known size of that backing device */
};
@@ -782,8 +531,8 @@ struct bm_io_work {
struct drbd_work w;
char *why;
enum bm_flag flags;
- int (*io_fn)(struct drbd_conf *mdev);
- void (*done)(struct drbd_conf *mdev, int rv);
+ int (*io_fn)(struct drbd_device *device);
+ void (*done)(struct drbd_device *device, int rv);
};
enum write_ordering_e {
@@ -800,7 +549,7 @@ struct fifo_buffer {
};
extern struct fifo_buffer *fifo_alloc(int fifo_size);
-/* flag bits per tconn */
+/* flag bits per connection */
enum {
NET_CONGESTED, /* The data socket is congested */
RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
@@ -822,23 +571,35 @@ enum {
DISCONNECT_SENT,
};
-struct drbd_tconn { /* is a resource from the config file */
- char *name; /* Resource name */
- struct list_head all_tconn; /* linked on global drbd_tconns */
+struct drbd_resource {
+ char *name;
struct kref kref;
- struct idr volumes; /* <tconn, vnr> to mdev mapping */
- enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
+ struct idr devices; /* volume number to device mapping */
+ struct list_head connections;
+ struct list_head resources;
+ struct res_opts res_opts;
+ struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
+ spinlock_t req_lock;
+
unsigned susp:1; /* IO suspended by user */
unsigned susp_nod:1; /* IO suspended because no data */
unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
+
+ cpumask_var_t cpu_mask;
+};
+
+struct drbd_connection {
+ struct list_head connections;
+ struct drbd_resource *resource;
+ struct kref kref;
+ struct idr peer_devices; /* volume number to peer device mapping */
+ enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
struct mutex cstate_mutex; /* Protects graceful disconnects */
unsigned int connect_cnt; /* Inc each time a connection is established */
unsigned long flags;
struct net_conf *net_conf; /* content protected by rcu */
- struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
- struct res_opts res_opts;
struct sockaddr_storage my_addr;
int my_addr_len;
@@ -851,12 +612,10 @@ struct drbd_tconn { /* is a resource from the config file */
unsigned long last_received; /* in jiffies, either socket */
unsigned int ko_count;
- spinlock_t req_lock;
-
struct list_head transfer_log; /* all requests not yet fully processed */
struct crypto_hash *cram_hmac_tfm;
- struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by tconn->data->mutex */
+ struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
struct crypto_hash *csums_tfm;
struct crypto_hash *verify_tfm;
@@ -875,7 +634,6 @@ struct drbd_tconn { /* is a resource from the config file */
struct drbd_thread receiver;
struct drbd_thread worker;
struct drbd_thread asender;
- cpumask_var_t cpu_mask;
/* sender side */
struct drbd_work_queue sender_work;
@@ -903,8 +661,15 @@ struct submit_worker {
struct list_head writes;
};
-struct drbd_conf {
- struct drbd_tconn *tconn;
+struct drbd_peer_device {
+ struct list_head peer_devices;
+ struct drbd_device *device;
+ struct drbd_connection *connection;
+};
+
+struct drbd_device {
+ struct drbd_resource *resource;
+ struct list_head peer_devices;
int vnr; /* volume number within the connection */
struct kref kref;
@@ -920,11 +685,11 @@ struct drbd_conf {
struct gendisk *vdisk;
unsigned long last_reattach_jif;
- struct drbd_work resync_work,
- unplug_work,
- go_diskless,
- md_sync_work,
- start_resync_work;
+ struct drbd_work resync_work;
+ struct drbd_work unplug_work;
+ struct drbd_work go_diskless;
+ struct drbd_work md_sync_work;
+ struct drbd_work start_resync_work;
struct timer_list resync_timer;
struct timer_list md_sync_timer;
struct timer_list start_resync_timer;
@@ -1030,7 +795,7 @@ struct drbd_conf {
struct bm_io_work bm_io_work;
u64 ed_uuid; /* UUID of the exposed data */
struct mutex own_state_mutex;
- struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */
+ struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
char congestion_reason; /* Why we where congested... */
atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
atomic_t rs_sect_ev; /* for submitted resync data rate, both */
@@ -1038,7 +803,7 @@ struct drbd_conf {
int rs_last_events; /* counter of read or write "events" (unit sectors)
* on the lower level device when we last looked. */
int c_sync_rate; /* current resync rate after syncer throttle magic */
- struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, tconn->conn_update) */
+ struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
unsigned int peer_max_bio_size;
@@ -1049,19 +814,46 @@ struct drbd_conf {
struct submit_worker submit;
};
-static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
+static inline struct drbd_device *minor_to_device(unsigned int minor)
{
- return (struct drbd_conf *)idr_find(&minors, minor);
+ return (struct drbd_device *)idr_find(&drbd_devices, minor);
}
-static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
+static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
{
- return mdev->minor;
+ return list_first_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
}
-static inline struct drbd_conf *vnr_to_mdev(struct drbd_tconn *tconn, int vnr)
+#define for_each_resource(resource, _resources) \
+ list_for_each_entry(resource, _resources, resources)
+
+#define for_each_resource_rcu(resource, _resources) \
+ list_for_each_entry_rcu(resource, _resources, resources)
+
+#define for_each_resource_safe(resource, tmp, _resources) \
+ list_for_each_entry_safe(resource, tmp, _resources, resources)
+
+#define for_each_connection(connection, resource) \
+ list_for_each_entry(connection, &resource->connections, connections)
+
+#define for_each_connection_rcu(connection, resource) \
+ list_for_each_entry_rcu(connection, &resource->connections, connections)
+
+#define for_each_connection_safe(connection, tmp, resource) \
+ list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
+
+#define for_each_peer_device(peer_device, device) \
+ list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
+
+#define for_each_peer_device_rcu(peer_device, device) \
+ list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
+
+#define for_each_peer_device_safe(peer_device, tmp, device) \
+ list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
+
+static inline unsigned int device_to_minor(struct drbd_device *device)
{
- return (struct drbd_conf *)idr_find(&tconn->volumes, vnr);
+ return device->minor;
}
/*
@@ -1075,96 +867,93 @@ enum dds_flags {
DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
};
-extern void drbd_init_set_defaults(struct drbd_conf *mdev);
+extern void drbd_init_set_defaults(struct drbd_device *device);
extern int drbd_thread_start(struct drbd_thread *thi);
extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
-extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task);
#ifdef CONFIG_SMP
extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
-extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn);
#else
#define drbd_thread_current_set_cpu(A) ({})
-#define drbd_calc_cpu_mask(A) ({})
#endif
-extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr,
+extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
unsigned int set_size);
-extern void tl_clear(struct drbd_tconn *);
-extern void drbd_free_sock(struct drbd_tconn *tconn);
-extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
+extern void tl_clear(struct drbd_connection *);
+extern void drbd_free_sock(struct drbd_connection *connection);
+extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags);
-extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t,
+extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
unsigned);
-extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd);
-extern int drbd_send_protocol(struct drbd_tconn *tconn);
-extern int drbd_send_uuids(struct drbd_conf *mdev);
-extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
-extern void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
-extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
-extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
-extern int drbd_send_current_state(struct drbd_conf *mdev);
-extern int drbd_send_sync_param(struct drbd_conf *mdev);
-extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr,
+extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
+extern int drbd_send_protocol(struct drbd_connection *connection);
+extern int drbd_send_uuids(struct drbd_peer_device *);
+extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
+extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
+extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
+extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
+extern int drbd_send_current_state(struct drbd_peer_device *);
+extern int drbd_send_sync_param(struct drbd_peer_device *);
+extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
u32 set_size);
-extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet,
+extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
struct drbd_peer_request *);
-extern void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
+extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
struct p_block_req *rp);
-extern void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
+extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
struct p_data *dp, int data_size);
-extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
+extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
sector_t sector, int blksize, u64 block_id);
-extern int drbd_send_out_of_sync(struct drbd_conf *, struct drbd_request *);
-extern int drbd_send_block(struct drbd_conf *, enum drbd_packet,
+extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
+extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
struct drbd_peer_request *);
-extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
-extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
+extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
+extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
sector_t sector, int size, u64 block_id);
-extern int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector,
+extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
int size, void *digest, int digest_size,
enum drbd_packet cmd);
-extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size);
+extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
-extern int drbd_send_bitmap(struct drbd_conf *mdev);
-extern void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
-extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode);
+extern int drbd_send_bitmap(struct drbd_device *device);
+extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
+extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
extern void drbd_free_bc(struct drbd_backing_dev *ldev);
-extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
-void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
-
-extern void conn_md_sync(struct drbd_tconn *tconn);
-extern void drbd_md_write(struct drbd_conf *mdev, void *buffer);
-extern void drbd_md_sync(struct drbd_conf *mdev);
-extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
-extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
-extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
-extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
-extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
-extern void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local);
-extern void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
-extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
-extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
+extern void drbd_device_cleanup(struct drbd_device *device);
+void drbd_print_uuids(struct drbd_device *device, const char *text);
+
+extern void conn_md_sync(struct drbd_connection *connection);
+extern void drbd_md_write(struct drbd_device *device, void *buffer);
+extern void drbd_md_sync(struct drbd_device *device);
+extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
+extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
+extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
+extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
+extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
+extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
+extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
+extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
+extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
#ifndef DRBD_DEBUG_MD_SYNC
-extern void drbd_md_mark_dirty(struct drbd_conf *mdev);
+extern void drbd_md_mark_dirty(struct drbd_device *device);
#else
#define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ )
-extern void drbd_md_mark_dirty_(struct drbd_conf *mdev,
+extern void drbd_md_mark_dirty_(struct drbd_device *device,
unsigned int line, const char *func);
#endif
-extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
- void (*done)(struct drbd_conf *, int),
+extern void drbd_queue_bitmap_io(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *),
+ void (*done)(struct drbd_device *, int),
char *why, enum bm_flag flags);
-extern int drbd_bitmap_io(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
+extern int drbd_bitmap_io(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags);
-extern int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
+extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags);
-extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
-extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
-extern void drbd_ldev_destroy(struct drbd_conf *mdev);
+extern int drbd_bmio_set_n_write(struct drbd_device *device);
+extern int drbd_bmio_clear_n_write(struct drbd_device *device);
+extern void drbd_ldev_destroy(struct drbd_device *device);
/* Meta data layout
*
@@ -1350,52 +1139,52 @@ struct bm_extent {
#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
-extern int drbd_bm_init(struct drbd_conf *mdev);
-extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
-extern void drbd_bm_cleanup(struct drbd_conf *mdev);
-extern void drbd_bm_set_all(struct drbd_conf *mdev);
-extern void drbd_bm_clear_all(struct drbd_conf *mdev);
+extern int drbd_bm_init(struct drbd_device *device);
+extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
+extern void drbd_bm_cleanup(struct drbd_device *device);
+extern void drbd_bm_set_all(struct drbd_device *device);
+extern void drbd_bm_clear_all(struct drbd_device *device);
/* set/clear/test only a few bits at a time */
extern int drbd_bm_set_bits(
- struct drbd_conf *mdev, unsigned long s, unsigned long e);
+ struct drbd_device *device, unsigned long s, unsigned long e);
extern int drbd_bm_clear_bits(
- struct drbd_conf *mdev, unsigned long s, unsigned long e);
+ struct drbd_device *device, unsigned long s, unsigned long e);
extern int drbd_bm_count_bits(
- struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
+ struct drbd_device *device, const unsigned long s, const unsigned long e);
/* bm_set_bits variant for use while holding drbd_bm_lock,
* may process the whole bitmap in one go */
-extern void _drbd_bm_set_bits(struct drbd_conf *mdev,
+extern void _drbd_bm_set_bits(struct drbd_device *device,
const unsigned long s, const unsigned long e);
-extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
-extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
-extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
-extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
-extern void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr);
-extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
-extern int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local);
-extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);
-extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
-extern size_t drbd_bm_words(struct drbd_conf *mdev);
-extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
-extern sector_t drbd_bm_capacity(struct drbd_conf *mdev);
+extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
+extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
+extern int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold(local);
+extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
+extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
+extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
+extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
+extern size_t drbd_bm_words(struct drbd_device *device);
+extern unsigned long drbd_bm_bits(struct drbd_device *device);
+extern sector_t drbd_bm_capacity(struct drbd_device *device);
#define DRBD_END_OF_BITMAP (~(unsigned long)0)
-extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
+extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
/* bm_find_next variants for use while you hold drbd_bm_lock() */
-extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
-extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo);
-extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev);
-extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev);
-extern int drbd_bm_rs_done(struct drbd_conf *mdev);
+extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
+extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
+extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
+extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
+extern int drbd_bm_rs_done(struct drbd_device *device);
/* for receive_bitmap */
-extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset,
+extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
size_t number, unsigned long *buffer);
/* for _drbd_send_bitmap */
-extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset,
+extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
size_t number, unsigned long *buffer);
-extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags);
-extern void drbd_bm_unlock(struct drbd_conf *mdev);
+extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
+extern void drbd_bm_unlock(struct drbd_device *device);
/* drbd_main.c */
extern struct kmem_cache *drbd_request_cache;
@@ -1439,35 +1228,40 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
extern rwlock_t global_state_lock;
-extern int conn_lowest_minor(struct drbd_tconn *tconn);
-enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr);
-extern void drbd_minor_destroy(struct kref *kref);
+extern int conn_lowest_minor(struct drbd_connection *connection);
+enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr);
+extern void drbd_destroy_device(struct kref *kref);
+extern void drbd_delete_device(struct drbd_device *mdev);
+
+extern struct drbd_resource *drbd_create_resource(const char *name);
+extern void drbd_free_resource(struct drbd_resource *resource);
-extern int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts);
-extern struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts);
-extern void conn_destroy(struct kref *kref);
-struct drbd_tconn *conn_get_by_name(const char *name);
-extern struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
+extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
+extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
+extern void drbd_destroy_connection(struct kref *kref);
+extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
void *peer_addr, int peer_addr_len);
-extern void conn_free_crypto(struct drbd_tconn *tconn);
+extern struct drbd_resource *drbd_find_resource(const char *name);
+extern void drbd_destroy_resource(struct kref *kref);
+extern void conn_free_crypto(struct drbd_connection *connection);
extern int proc_details;
/* drbd_req */
extern void do_submit(struct work_struct *ws);
-extern void __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long);
+extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
extern void drbd_make_request(struct request_queue *q, struct bio *bio);
-extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
+extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
/* drbd_nl.c */
extern int drbd_msg_put_info(const char *info);
-extern void drbd_suspend_io(struct drbd_conf *mdev);
-extern void drbd_resume_io(struct drbd_conf *mdev);
+extern void drbd_suspend_io(struct drbd_device *device);
+extern void drbd_resume_io(struct drbd_device *device);
extern char *ppsize(char *buf, unsigned long long size);
-extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int);
+extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
enum determine_dev_size {
DS_ERROR_SHRINK = -3,
DS_ERROR_SPACE_MD = -2,
@@ -1478,48 +1272,47 @@ enum determine_dev_size {
DS_GREW_FROM_ZERO = 3,
};
extern enum determine_dev_size
-drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local);
-extern void resync_after_online_grow(struct drbd_conf *);
-extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
-extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
+drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
+extern void resync_after_online_grow(struct drbd_device *);
+extern void drbd_reconsider_max_bio_size(struct drbd_device *device);
+extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
enum drbd_role new_role,
int force);
-extern bool conn_try_outdate_peer(struct drbd_tconn *tconn);
-extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn);
-extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
+extern bool conn_try_outdate_peer(struct drbd_connection *connection);
+extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
+extern int drbd_khelper(struct drbd_device *device, char *cmd);
/* drbd_worker.c */
extern int drbd_worker(struct drbd_thread *thi);
-enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor);
-void drbd_resync_after_changed(struct drbd_conf *mdev);
-extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side);
-extern void resume_next_sg(struct drbd_conf *mdev);
-extern void suspend_other_sg(struct drbd_conf *mdev);
-extern int drbd_resync_finished(struct drbd_conf *mdev);
+enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
+void drbd_resync_after_changed(struct drbd_device *device);
+extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
+extern void resume_next_sg(struct drbd_device *device);
+extern void suspend_other_sg(struct drbd_device *device);
+extern int drbd_resync_finished(struct drbd_device *device);
/* maybe rather drbd_main.c ? */
-extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
-extern void drbd_md_put_buffer(struct drbd_conf *mdev);
-extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
+extern void *drbd_md_get_buffer(struct drbd_device *device);
+extern void drbd_md_put_buffer(struct drbd_device *device);
+extern int drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev, sector_t sector, int rw);
-extern void drbd_ov_out_of_sync_found(struct drbd_conf *, sector_t, int);
-extern void wait_until_done_or_force_detached(struct drbd_conf *mdev,
+extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
+extern void wait_until_done_or_force_detached(struct drbd_device *device,
struct drbd_backing_dev *bdev, unsigned int *done);
-extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
+extern void drbd_rs_controller_reset(struct drbd_device *device);
-static inline void ov_out_of_sync_print(struct drbd_conf *mdev)
+static inline void ov_out_of_sync_print(struct drbd_device *device)
{
- if (mdev->ov_last_oos_size) {
- dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
- (unsigned long long)mdev->ov_last_oos_start,
- (unsigned long)mdev->ov_last_oos_size);
+ if (device->ov_last_oos_size) {
+ drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
+ (unsigned long long)device->ov_last_oos_start,
+ (unsigned long)device->ov_last_oos_size);
}
- mdev->ov_last_oos_size=0;
+ device->ov_last_oos_size = 0;
}
-extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
-extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *,
- struct drbd_peer_request *, void *);
+extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
+extern void drbd_csum_ee(struct crypto_hash *, struct drbd_peer_request *, void *);
/* worker callbacks */
extern int w_e_end_data_req(struct drbd_work *, int);
extern int w_e_end_rsdata_req(struct drbd_work *, int);
@@ -1529,10 +1322,8 @@ extern int w_e_end_ov_req(struct drbd_work *, int);
extern int w_ov_finished(struct drbd_work *, int);
extern int w_resync_timer(struct drbd_work *, int);
extern int w_send_write_hint(struct drbd_work *, int);
-extern int w_make_resync_request(struct drbd_work *, int);
extern int w_send_dblock(struct drbd_work *, int);
extern int w_send_read_req(struct drbd_work *, int);
-extern int w_prev_work_done(struct drbd_work *, int);
extern int w_e_reissue(struct drbd_work *, int);
extern int w_restart_disk_io(struct drbd_work *, int);
extern int w_send_out_of_sync(struct drbd_work *, int);
@@ -1542,27 +1333,24 @@ extern void resync_timer_fn(unsigned long data);
extern void start_resync_timer_fn(unsigned long data);
/* drbd_receiver.c */
-extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
-extern int drbd_submit_peer_request(struct drbd_conf *,
+extern int drbd_receiver(struct drbd_thread *thi);
+extern int drbd_asender(struct drbd_thread *thi);
+extern int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector);
+extern int drbd_submit_peer_request(struct drbd_device *,
struct drbd_peer_request *, const unsigned,
const int);
-extern int drbd_free_peer_reqs(struct drbd_conf *, struct list_head *);
-extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64,
+extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
+extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
sector_t, unsigned int,
gfp_t) __must_hold(local);
-extern void __drbd_free_peer_req(struct drbd_conf *, struct drbd_peer_request *,
+extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
int);
#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
-extern struct page *drbd_alloc_pages(struct drbd_conf *, unsigned int, bool);
-extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
-extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
-extern void conn_flush_workqueue(struct drbd_tconn *tconn);
-extern int drbd_connected(struct drbd_conf *mdev);
-static inline void drbd_flush_workqueue(struct drbd_conf *mdev)
-{
- conn_flush_workqueue(mdev->tconn);
-}
+extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
+extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
+extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
+extern int drbd_connected(struct drbd_peer_device *);
/* Yes, there is kernel_setsockopt, but only since 2.6.18.
* So we have our own copy of it here. */
@@ -1613,7 +1401,7 @@ static inline void drbd_tcp_quickack(struct socket *sock)
(char*)&val, sizeof(val));
}
-void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo);
+void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo);
/* drbd_proc.c */
extern struct proc_dir_entry *drbd_proc;
@@ -1622,29 +1410,29 @@ extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */
-extern int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i);
-extern void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate);
-extern bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i);
-extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate);
-extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
-extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
-extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
-extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
-extern void drbd_rs_cancel_all(struct drbd_conf *mdev);
-extern int drbd_rs_del_all(struct drbd_conf *mdev);
-extern void drbd_rs_failed_io(struct drbd_conf *mdev,
+extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
+extern void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate);
+extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
+extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate);
+extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
+extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
+extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
+extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
+extern void drbd_rs_cancel_all(struct drbd_device *device);
+extern int drbd_rs_del_all(struct drbd_device *device);
+extern void drbd_rs_failed_io(struct drbd_device *device,
sector_t sector, int size);
-extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go);
-extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
+extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
+extern void __drbd_set_in_sync(struct drbd_device *device, sector_t sector,
int size, const char *file, const unsigned int line);
-#define drbd_set_in_sync(mdev, sector, size) \
- __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__)
-extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
+#define drbd_set_in_sync(device, sector, size) \
+ __drbd_set_in_sync(device, sector, size, __FILE__, __LINE__)
+extern int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector,
int size, const char *file, const unsigned int line);
-#define drbd_set_out_of_sync(mdev, sector, size) \
- __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
-extern void drbd_al_shrink(struct drbd_conf *mdev);
-extern int drbd_initialize_al(struct drbd_conf *, void *);
+#define drbd_set_out_of_sync(device, sector, size) \
+ __drbd_set_out_of_sync(device, sector, size, __FILE__, __LINE__)
+extern void drbd_al_shrink(struct drbd_device *device);
+extern int drbd_initialize_al(struct drbd_device *, void *);
/* drbd_nl.c */
/* state info broadcast */
@@ -1661,7 +1449,7 @@ struct sib_info {
};
};
};
-void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib);
+void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
/*
* inline helper functions
@@ -1690,26 +1478,27 @@ static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_r
}
static inline enum drbd_state_rv
-_drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
+_drbd_set_state(struct drbd_device *device, union drbd_state ns,
enum chg_state_flags flags, struct completion *done)
{
enum drbd_state_rv rv;
read_lock(&global_state_lock);
- rv = __drbd_set_state(mdev, ns, flags, done);
+ rv = __drbd_set_state(device, ns, flags, done);
read_unlock(&global_state_lock);
return rv;
}
-static inline union drbd_state drbd_read_state(struct drbd_conf *mdev)
+static inline union drbd_state drbd_read_state(struct drbd_device *device)
{
+ struct drbd_resource *resource = device->resource;
union drbd_state rv;
- rv.i = mdev->state.i;
- rv.susp = mdev->tconn->susp;
- rv.susp_nod = mdev->tconn->susp_nod;
- rv.susp_fen = mdev->tconn->susp_fen;
+ rv.i = device->state.i;
+ rv.susp = resource->susp;
+ rv.susp_nod = resource->susp_nod;
+ rv.susp_fen = resource->susp_fen;
return rv;
}
@@ -1722,22 +1511,22 @@ enum drbd_force_detach_flags {
};
#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
-static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
+static inline void __drbd_chk_io_error_(struct drbd_device *device,
enum drbd_force_detach_flags df,
const char *where)
{
enum drbd_io_error_p ep;
rcu_read_lock();
- ep = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
+ ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
rcu_read_unlock();
switch (ep) {
case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Local IO failed in %s.\n", where);
- if (mdev->state.disk > D_INCONSISTENT)
- _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
+ drbd_err(device, "Local IO failed in %s.\n", where);
+ if (device->state.disk > D_INCONSISTENT)
+ _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
/* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
@@ -1763,14 +1552,14 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
* we read meta data only once during attach,
* which will fail in case of errors.
*/
- set_bit(WAS_IO_ERROR, &mdev->flags);
+ set_bit(WAS_IO_ERROR, &device->flags);
if (df == DRBD_READ_ERROR)
- set_bit(WAS_READ_ERROR, &mdev->flags);
+ set_bit(WAS_READ_ERROR, &device->flags);
if (df == DRBD_FORCE_DETACH)
- set_bit(FORCE_DETACH, &mdev->flags);
- if (mdev->state.disk > D_FAILED) {
- _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
- dev_err(DEV,
+ set_bit(FORCE_DETACH, &device->flags);
+ if (device->state.disk > D_FAILED) {
+ _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
+ drbd_err(device,
"Local IO failed in %s. Detaching...\n", where);
}
break;
@@ -1779,21 +1568,21 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
/**
* drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @error: Error code passed to the IO completion callback
* @forcedetach: Force detach. I.e. the error happened while accessing the meta data
*
* See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
*/
#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
-static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
+static inline void drbd_chk_io_error_(struct drbd_device *device,
int error, enum drbd_force_detach_flags forcedetach, const char *where)
{
if (error) {
unsigned long flags;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- __drbd_chk_io_error_(mdev, forcedetach, where);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ __drbd_chk_io_error_(device, forcedetach, where);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
}
}
@@ -1916,31 +1705,33 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
wake_up(&q->q_wait);
}
-static inline void wake_asender(struct drbd_tconn *tconn)
+extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
+
+static inline void wake_asender(struct drbd_connection *connection)
{
- if (test_bit(SIGNAL_ASENDER, &tconn->flags))
- force_sig(DRBD_SIG, tconn->asender.task);
+ if (test_bit(SIGNAL_ASENDER, &connection->flags))
+ force_sig(DRBD_SIG, connection->asender.task);
}
-static inline void request_ping(struct drbd_tconn *tconn)
+static inline void request_ping(struct drbd_connection *connection)
{
- set_bit(SEND_PING, &tconn->flags);
- wake_asender(tconn);
+ set_bit(SEND_PING, &connection->flags);
+ wake_asender(connection);
}
-extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *);
-extern void *drbd_prepare_command(struct drbd_conf *, struct drbd_socket *);
-extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *,
+extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
+extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
+extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
enum drbd_packet, unsigned int, void *,
unsigned int);
-extern int drbd_send_command(struct drbd_conf *, struct drbd_socket *,
+extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
enum drbd_packet, unsigned int, void *,
unsigned int);
-extern int drbd_send_ping(struct drbd_tconn *tconn);
-extern int drbd_send_ping_ack(struct drbd_tconn *tconn);
-extern int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state);
-extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state);
+extern int drbd_send_ping(struct drbd_connection *connection);
+extern int drbd_send_ping_ack(struct drbd_connection *connection);
+extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
+extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
static inline void drbd_thread_stop(struct drbd_thread *thi)
{
@@ -1979,22 +1770,22 @@ static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
* _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
* [from tl_clear_barrier]
*/
-static inline void inc_ap_pending(struct drbd_conf *mdev)
+static inline void inc_ap_pending(struct drbd_device *device)
{
- atomic_inc(&mdev->ap_pending_cnt);
+ atomic_inc(&device->ap_pending_cnt);
}
#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
- if (atomic_read(&mdev->which) < 0) \
- dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \
+ if (atomic_read(&device->which) < 0) \
+ drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
func, line, \
- atomic_read(&mdev->which))
+ atomic_read(&device->which))
-#define dec_ap_pending(mdev) _dec_ap_pending(mdev, __FUNCTION__, __LINE__)
-static inline void _dec_ap_pending(struct drbd_conf *mdev, const char *func, int line)
+#define dec_ap_pending(device) _dec_ap_pending(device, __FUNCTION__, __LINE__)
+static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
{
- if (atomic_dec_and_test(&mdev->ap_pending_cnt))
- wake_up(&mdev->misc_wait);
+ if (atomic_dec_and_test(&device->ap_pending_cnt))
+ wake_up(&device->misc_wait);
ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
}
@@ -2004,15 +1795,15 @@ static inline void _dec_ap_pending(struct drbd_conf *mdev, const char *func, int
* C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER)
* (or P_NEG_ACK with ID_SYNCER)
*/
-static inline void inc_rs_pending(struct drbd_conf *mdev)
+static inline void inc_rs_pending(struct drbd_device *device)
{
- atomic_inc(&mdev->rs_pending_cnt);
+ atomic_inc(&device->rs_pending_cnt);
}
-#define dec_rs_pending(mdev) _dec_rs_pending(mdev, __FUNCTION__, __LINE__)
-static inline void _dec_rs_pending(struct drbd_conf *mdev, const char *func, int line)
+#define dec_rs_pending(device) _dec_rs_pending(device, __FUNCTION__, __LINE__)
+static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
{
- atomic_dec(&mdev->rs_pending_cnt);
+ atomic_dec(&device->rs_pending_cnt);
ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
}
@@ -2025,103 +1816,104 @@ static inline void _dec_rs_pending(struct drbd_conf *mdev, const char *func, int
* receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
* receive_Barrier_* we need to send a P_BARRIER_ACK
*/
-static inline void inc_unacked(struct drbd_conf *mdev)
+static inline void inc_unacked(struct drbd_device *device)
{
- atomic_inc(&mdev->unacked_cnt);
+ atomic_inc(&device->unacked_cnt);
}
-#define dec_unacked(mdev) _dec_unacked(mdev, __FUNCTION__, __LINE__)
-static inline void _dec_unacked(struct drbd_conf *mdev, const char *func, int line)
+#define dec_unacked(device) _dec_unacked(device, __FUNCTION__, __LINE__)
+static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
{
- atomic_dec(&mdev->unacked_cnt);
+ atomic_dec(&device->unacked_cnt);
ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
-#define sub_unacked(mdev, n) _sub_unacked(mdev, n, __FUNCTION__, __LINE__)
-static inline void _sub_unacked(struct drbd_conf *mdev, int n, const char *func, int line)
+#define sub_unacked(device, n) _sub_unacked(device, n, __FUNCTION__, __LINE__)
+static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
{
- atomic_sub(n, &mdev->unacked_cnt);
+ atomic_sub(n, &device->unacked_cnt);
ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
/**
- * get_ldev() - Increase the ref count on mdev->ldev. Returns 0 if there is no ldev
+ * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
* @M: DRBD device.
*
- * You have to call put_ldev() when finished working with mdev->ldev.
+ * You have to call put_ldev() when finished working with device->ldev.
*/
#define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT))
#define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS))
-static inline void put_ldev(struct drbd_conf *mdev)
+static inline void put_ldev(struct drbd_device *device)
{
- int i = atomic_dec_return(&mdev->local_cnt);
+ int i = atomic_dec_return(&device->local_cnt);
/* This may be called from some endio handler,
* so we must not sleep here. */
__release(local);
- D_ASSERT(i >= 0);
+ D_ASSERT(device, i >= 0);
if (i == 0) {
- if (mdev->state.disk == D_DISKLESS)
+ if (device->state.disk == D_DISKLESS)
/* even internal references gone, safe to destroy */
- drbd_ldev_destroy(mdev);
- if (mdev->state.disk == D_FAILED) {
+ drbd_ldev_destroy(device);
+ if (device->state.disk == D_FAILED) {
/* all application IO references gone. */
- if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
+ if (!test_and_set_bit(GO_DISKLESS, &device->flags))
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &device->go_diskless);
}
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
}
}
#ifndef __CHECKER__
-static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
+static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
{
int io_allowed;
/* never get a reference while D_DISKLESS */
- if (mdev->state.disk == D_DISKLESS)
+ if (device->state.disk == D_DISKLESS)
return 0;
- atomic_inc(&mdev->local_cnt);
- io_allowed = (mdev->state.disk >= mins);
+ atomic_inc(&device->local_cnt);
+ io_allowed = (device->state.disk >= mins);
if (!io_allowed)
- put_ldev(mdev);
+ put_ldev(device);
return io_allowed;
}
#else
-extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins);
+extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
#endif
/* you must have an "get_ldev" reference */
-static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
+static inline void drbd_get_syncer_progress(struct drbd_device *device,
unsigned long *bits_left, unsigned int *per_mil_done)
{
/* this is to break it at compile time when we change that, in case we
* want to support more than (1<<32) bits on a 32bit arch. */
- typecheck(unsigned long, mdev->rs_total);
+ typecheck(unsigned long, device->rs_total);
/* note: both rs_total and rs_left are in bits, i.e. in
* units of BM_BLOCK_SIZE.
* for the percentage, we don't care. */
- if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
- *bits_left = mdev->ov_left;
+ if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
+ *bits_left = device->ov_left;
else
- *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
+ *bits_left = drbd_bm_total_weight(device) - device->rs_failed;
/* >> 10 to prevent overflow,
* +1 to prevent division by zero */
- if (*bits_left > mdev->rs_total) {
+ if (*bits_left > device->rs_total) {
/* doh. maybe a logic bug somewhere.
* may also be just a race condition
* between this and a disconnect during sync.
* for now, just prevent in-kernel buffer overflow.
*/
smp_rmb();
- dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
- drbd_conn_str(mdev->state.conn),
- *bits_left, mdev->rs_total, mdev->rs_failed);
+ drbd_warn(device, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
+ drbd_conn_str(device->state.conn),
+ *bits_left, device->rs_total, device->rs_failed);
*per_mil_done = 0;
} else {
/* Make sure the division happens in long context.
@@ -2133,9 +1925,9 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
* Note: currently we don't support such large bitmaps on 32bit
* arch anyways, but no harm done to be prepared for it here.
*/
- unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10;
+ unsigned int shift = device->rs_total > UINT_MAX ? 16 : 10;
unsigned long left = *bits_left >> shift;
- unsigned long total = 1UL + (mdev->rs_total >> shift);
+ unsigned long total = 1UL + (device->rs_total >> shift);
unsigned long tmp = 1000UL - left * 1000UL/total;
*per_mil_done = tmp;
}
@@ -2145,22 +1937,22 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
/* this throttles on-the-fly application requests
* according to max_buffers settings;
* maybe re-implement using semaphores? */
-static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
+static inline int drbd_get_max_buffers(struct drbd_device *device)
{
struct net_conf *nc;
int mxb;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
rcu_read_unlock();
return mxb;
}
-static inline int drbd_state_is_stable(struct drbd_conf *mdev)
+static inline int drbd_state_is_stable(struct drbd_device *device)
{
- union drbd_dev_state s = mdev->state;
+ union drbd_dev_state s = device->state;
/* DO NOT add a default clause, we want the compiler to warn us
* for any newly introduced state we may have forgotten to add here */
@@ -2194,7 +1986,7 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
/* Allow IO in BM exchange states with new protocols */
case C_WF_BITMAP_S:
- if (mdev->tconn->agreed_pro_version < 96)
+ if (first_peer_device(device)->connection->agreed_pro_version < 96)
return 0;
break;
@@ -2228,20 +2020,20 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
return 1;
}
-static inline int drbd_suspended(struct drbd_conf *mdev)
+static inline int drbd_suspended(struct drbd_device *device)
{
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_resource *resource = device->resource;
- return tconn->susp || tconn->susp_fen || tconn->susp_nod;
+ return resource->susp || resource->susp_fen || resource->susp_nod;
}
-static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
+static inline bool may_inc_ap_bio(struct drbd_device *device)
{
- int mxb = drbd_get_max_buffers(mdev);
+ int mxb = drbd_get_max_buffers(device);
- if (drbd_suspended(mdev))
+ if (drbd_suspended(device))
return false;
- if (test_bit(SUSPEND_IO, &mdev->flags))
+ if (test_bit(SUSPEND_IO, &device->flags))
return false;
/* to avoid potential deadlock or bitmap corruption,
@@ -2249,32 +2041,32 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
* to start during "stable" states. */
/* no new io accepted when attaching or detaching the disk */
- if (!drbd_state_is_stable(mdev))
+ if (!drbd_state_is_stable(device))
return false;
/* since some older kernels don't have atomic_add_unless,
* and we are within the spinlock anyways, we have this workaround. */
- if (atomic_read(&mdev->ap_bio_cnt) > mxb)
+ if (atomic_read(&device->ap_bio_cnt) > mxb)
return false;
- if (test_bit(BITMAP_IO, &mdev->flags))
+ if (test_bit(BITMAP_IO, &device->flags))
return false;
return true;
}
-static inline bool inc_ap_bio_cond(struct drbd_conf *mdev)
+static inline bool inc_ap_bio_cond(struct drbd_device *device)
{
bool rv = false;
- spin_lock_irq(&mdev->tconn->req_lock);
- rv = may_inc_ap_bio(mdev);
+ spin_lock_irq(&device->resource->req_lock);
+ rv = may_inc_ap_bio(device);
if (rv)
- atomic_inc(&mdev->ap_bio_cnt);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ atomic_inc(&device->ap_bio_cnt);
+ spin_unlock_irq(&device->resource->req_lock);
return rv;
}
-static inline void inc_ap_bio(struct drbd_conf *mdev)
+static inline void inc_ap_bio(struct drbd_device *device)
{
/* we wait here
* as long as the device is suspended
@@ -2284,42 +2076,44 @@ static inline void inc_ap_bio(struct drbd_conf *mdev)
* to avoid races with the reconnect code,
* we need to atomic_inc within the spinlock. */
- wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev));
+ wait_event(device->misc_wait, inc_ap_bio_cond(device));
}
-static inline void dec_ap_bio(struct drbd_conf *mdev)
+static inline void dec_ap_bio(struct drbd_device *device)
{
- int mxb = drbd_get_max_buffers(mdev);
- int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
+ int mxb = drbd_get_max_buffers(device);
+ int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
- D_ASSERT(ap_bio >= 0);
+ D_ASSERT(device, ap_bio >= 0);
- if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
- if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
+ if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
+ if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
+ drbd_queue_work(&first_peer_device(device)->
+ connection->sender_work,
+ &device->bm_io_work.w);
}
/* this currently does wake_up for every dec_ap_bio!
* maybe rather introduce some type of hysteresis?
* e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
if (ap_bio < mxb)
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
}
-static inline bool verify_can_do_stop_sector(struct drbd_conf *mdev)
+static inline bool verify_can_do_stop_sector(struct drbd_device *device)
{
- return mdev->tconn->agreed_pro_version >= 97 &&
- mdev->tconn->agreed_pro_version != 100;
+ return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
+ first_peer_device(device)->connection->agreed_pro_version != 100;
}
-static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
+static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
{
- int changed = mdev->ed_uuid != val;
- mdev->ed_uuid = val;
+ int changed = device->ed_uuid != val;
+ device->ed_uuid = val;
return changed;
}
-static inline int drbd_queue_order_type(struct drbd_conf *mdev)
+static inline int drbd_queue_order_type(struct drbd_device *device)
{
/* sorry, we currently have no working implementation
* of distributed TCQ stuff */
@@ -2329,23 +2123,29 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev)
return QUEUE_ORDERED_NONE;
}
-static inline void drbd_md_flush(struct drbd_conf *mdev)
+static inline void drbd_md_flush(struct drbd_device *device)
{
int r;
- if (mdev->ldev == NULL) {
- dev_warn(DEV, "mdev->ldev == NULL in drbd_md_flush\n");
+ if (device->ldev == NULL) {
+ drbd_warn(device, "device->ldev == NULL in drbd_md_flush\n");
return;
}
- if (test_bit(MD_NO_FUA, &mdev->flags))
+ if (test_bit(MD_NO_FUA, &device->flags))
return;
- r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
+ r = blkdev_issue_flush(device->ldev->md_bdev, GFP_NOIO, NULL);
if (r) {
- set_bit(MD_NO_FUA, &mdev->flags);
- dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
+ set_bit(MD_NO_FUA, &device->flags);
+ drbd_err(device, "meta data flush failed with status %d, disabling md-flushes\n", r);
}
}
+static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
+{
+ return list_first_entry(&resource->connections,
+ struct drbd_connection, connections);
+}
+
#endif
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9e3818b1bc83..331e5cc1227d 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -52,16 +52,12 @@
#include <linux/drbd_limits.h>
#include "drbd_int.h"
+#include "drbd_protocol.h"
#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
#include "drbd_vli.h"
static DEFINE_MUTEX(drbd_main_mutex);
-int drbdd_init(struct drbd_thread *);
-int drbd_worker(struct drbd_thread *);
-int drbd_asender(struct drbd_thread *);
-
-int drbd_init(void);
static int drbd_open(struct block_device *bdev, fmode_t mode);
static void drbd_release(struct gendisk *gd, fmode_t mode);
static int w_md_sync(struct drbd_work *w, int unused);
@@ -118,8 +114,8 @@ module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0
/* in 2.6.x, our device mapping and config info contains our virtual gendisks
* as member "struct gendisk *vdisk;"
*/
-struct idr minors;
-struct list_head drbd_tconns; /* list of struct drbd_tconn */
+struct idr drbd_devices;
+struct list_head drbd_resources;
struct kmem_cache *drbd_request_cache;
struct kmem_cache *drbd_ee_cache; /* peer requests */
@@ -166,15 +162,15 @@ struct bio *bio_alloc_drbd(gfp_t gfp_mask)
/* When checking with sparse, and this is an inline function, sparse will
give tons of false positives. When this is a real functions sparse works.
*/
-int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
+int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
{
int io_allowed;
- atomic_inc(&mdev->local_cnt);
- io_allowed = (mdev->state.disk >= mins);
+ atomic_inc(&device->local_cnt);
+ io_allowed = (device->state.disk >= mins);
if (!io_allowed) {
- if (atomic_dec_and_test(&mdev->local_cnt))
- wake_up(&mdev->misc_wait);
+ if (atomic_dec_and_test(&device->local_cnt))
+ wake_up(&device->misc_wait);
}
return io_allowed;
}
@@ -183,7 +179,7 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
/**
* tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
- * @tconn: DRBD connection.
+ * @connection: DRBD connection.
* @barrier_nr: Expected identifier of the DRBD write barrier packet.
* @set_size: Expected number of requests before that barrier.
*
@@ -191,7 +187,7 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
* epoch of not yet barrier-acked requests, this function will cause a
* termination of the connection.
*/
-void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
+void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size)
{
struct drbd_request *r;
@@ -199,11 +195,11 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
int expect_epoch = 0;
int expect_size = 0;
- spin_lock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
/* find oldest not yet barrier-acked write request,
* count writes in its epoch. */
- list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
+ list_for_each_entry(r, &connection->transfer_log, tl_requests) {
const unsigned s = r->rq_state;
if (!req) {
if (!(s & RQ_WRITE))
@@ -228,18 +224,18 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
/* first some paranoia code */
if (req == NULL) {
- conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
+ drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
barrier_nr);
goto bail;
}
if (expect_epoch != barrier_nr) {
- conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
+ drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
barrier_nr, expect_epoch);
goto bail;
}
if (expect_size != set_size) {
- conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
+ drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
barrier_nr, set_size, expect_size);
goto bail;
}
@@ -248,90 +244,91 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
/* this extra list walk restart is paranoia,
* to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */
- list_for_each_entry(req, &tconn->transfer_log, tl_requests)
+ list_for_each_entry(req, &connection->transfer_log, tl_requests)
if (req->epoch == expect_epoch)
break;
- list_for_each_entry_safe_from(req, r, &tconn->transfer_log, tl_requests) {
+ list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch)
break;
_req_mod(req, BARRIER_ACKED);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
return;
bail:
- spin_unlock_irq(&tconn->req_lock);
- conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
+ spin_unlock_irq(&connection->resource->req_lock);
+ conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
/**
* _tl_restart() - Walks the transfer log, and applies an action to all requests
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @what: The action/event to perform with all request objects
*
* @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
* RESTART_FROZEN_DISK_IO.
*/
/* must hold resource->req_lock */
-void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{
struct drbd_request *req, *r;
- list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests)
+ list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
_req_mod(req, what);
}
-void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{
- spin_lock_irq(&tconn->req_lock);
- _tl_restart(tconn, what);
- spin_unlock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
+ _tl_restart(connection, what);
+ spin_unlock_irq(&connection->resource->req_lock);
}
/**
* tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* This is called after the connection to the peer was lost. The storage covered
* by the requests on the transfer gets marked as our of sync. Called from the
* receiver thread and the worker thread.
*/
-void tl_clear(struct drbd_tconn *tconn)
+void tl_clear(struct drbd_connection *connection)
{
- tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
+ tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
}
/**
- * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
- * @mdev: DRBD device.
+ * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
+ * @device: DRBD device.
*/
-void tl_abort_disk_io(struct drbd_conf *mdev)
+void tl_abort_disk_io(struct drbd_device *device)
{
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
struct drbd_request *req, *r;
- spin_lock_irq(&tconn->req_lock);
- list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
+ spin_lock_irq(&connection->resource->req_lock);
+ list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
if (!(req->rq_state & RQ_LOCAL_PENDING))
continue;
- if (req->w.mdev != mdev)
+ if (req->device != device)
continue;
_req_mod(req, ABORT_DISK_IO);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
}
static int drbd_thread_setup(void *arg)
{
struct drbd_thread *thi = (struct drbd_thread *) arg;
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_resource *resource = thi->resource;
unsigned long flags;
int retval;
snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
- thi->name[0], thi->tconn->name);
+ thi->name[0],
+ resource->name);
restart:
retval = thi->function(thi);
@@ -349,7 +346,7 @@ restart:
*/
if (thi->t_state == RESTARTING) {
- conn_info(tconn, "Restarting %s thread\n", thi->name);
+ drbd_info(resource, "Restarting %s thread\n", thi->name);
thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
goto restart;
@@ -361,29 +358,32 @@ restart:
complete_all(&thi->stop);
spin_unlock_irqrestore(&thi->t_lock, flags);
- conn_info(tconn, "Terminating %s\n", current->comm);
+ drbd_info(resource, "Terminating %s\n", current->comm);
/* Release mod reference taken when thread was started */
- kref_put(&tconn->kref, &conn_destroy);
+ if (thi->connection)
+ kref_put(&thi->connection->kref, drbd_destroy_connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
module_put(THIS_MODULE);
return retval;
}
-static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
- int (*func) (struct drbd_thread *), char *name)
+static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
+ int (*func) (struct drbd_thread *), const char *name)
{
spin_lock_init(&thi->t_lock);
thi->task = NULL;
thi->t_state = NONE;
thi->function = func;
- thi->tconn = tconn;
- strncpy(thi->name, name, ARRAY_SIZE(thi->name));
+ thi->resource = resource;
+ thi->connection = NULL;
+ thi->name = name;
}
int drbd_thread_start(struct drbd_thread *thi)
{
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_resource *resource = thi->resource;
struct task_struct *nt;
unsigned long flags;
@@ -393,17 +393,19 @@ int drbd_thread_start(struct drbd_thread *thi)
switch (thi->t_state) {
case NONE:
- conn_info(tconn, "Starting %s thread (from %s [%d])\n",
+ drbd_info(resource, "Starting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid);
/* Get ref on module for thread - this is released when thread exits */
if (!try_module_get(THIS_MODULE)) {
- conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
+ drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
spin_unlock_irqrestore(&thi->t_lock, flags);
return false;
}
- kref_get(&thi->tconn->kref);
+ kref_get(&resource->kref);
+ if (thi->connection)
+ kref_get(&thi->connection->kref);
init_completion(&thi->stop);
thi->reset_cpu_mask = 1;
@@ -412,12 +414,14 @@ int drbd_thread_start(struct drbd_thread *thi)
flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
nt = kthread_create(drbd_thread_setup, (void *) thi,
- "drbd_%c_%s", thi->name[0], thi->tconn->name);
+ "drbd_%c_%s", thi->name[0], thi->resource->name);
if (IS_ERR(nt)) {
- conn_err(tconn, "Couldn't start thread\n");
+ drbd_err(resource, "Couldn't start thread\n");
- kref_put(&tconn->kref, &conn_destroy);
+ if (thi->connection)
+ kref_put(&thi->connection->kref, drbd_destroy_connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
module_put(THIS_MODULE);
return false;
}
@@ -429,7 +433,7 @@ int drbd_thread_start(struct drbd_thread *thi)
break;
case EXITING:
thi->t_state = RESTARTING;
- conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
+ drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid);
/* fall through */
case RUNNING:
@@ -478,65 +482,60 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
wait_for_completion(&thi->stop);
}
-static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
-{
- struct drbd_thread *thi =
- task == tconn->receiver.task ? &tconn->receiver :
- task == tconn->asender.task ? &tconn->asender :
- task == tconn->worker.task ? &tconn->worker : NULL;
-
- return thi;
-}
-
-char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
+int conn_lowest_minor(struct drbd_connection *connection)
{
- struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
- return thi ? thi->name : task->comm;
-}
-
-int conn_lowest_minor(struct drbd_tconn *tconn)
-{
- struct drbd_conf *mdev;
- int vnr = 0, m;
+ struct drbd_peer_device *peer_device;
+ int vnr = 0, minor = -1;
rcu_read_lock();
- mdev = idr_get_next(&tconn->volumes, &vnr);
- m = mdev ? mdev_to_minor(mdev) : -1;
+ peer_device = idr_get_next(&connection->peer_devices, &vnr);
+ if (peer_device)
+ minor = device_to_minor(peer_device->device);
rcu_read_unlock();
- return m;
+ return minor;
}
#ifdef CONFIG_SMP
/**
* drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
- * @mdev: DRBD device.
*
- * Forces all threads of a device onto the same CPU. This is beneficial for
+ * Forces all threads of a resource onto the same CPU. This is beneficial for
* DRBD's performance. May be overwritten by user's configuration.
*/
-void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
+static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
{
- int ord, cpu;
+ unsigned int *resources_per_cpu, min_index = ~0;
- /* user override. */
- if (cpumask_weight(tconn->cpu_mask))
- return;
+ resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL);
+ if (resources_per_cpu) {
+ struct drbd_resource *resource;
+ unsigned int cpu, min = ~0;
- ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
- for_each_online_cpu(cpu) {
- if (ord-- == 0) {
- cpumask_set_cpu(cpu, tconn->cpu_mask);
- return;
+ rcu_read_lock();
+ for_each_resource_rcu(resource, &drbd_resources) {
+ for_each_cpu(cpu, resource->cpu_mask)
+ resources_per_cpu[cpu]++;
}
+ rcu_read_unlock();
+ for_each_online_cpu(cpu) {
+ if (resources_per_cpu[cpu] < min) {
+ min = resources_per_cpu[cpu];
+ min_index = cpu;
+ }
+ }
+ kfree(resources_per_cpu);
+ }
+ if (min_index == ~0) {
+ cpumask_setall(*cpu_mask);
+ return;
}
- /* should not be reached */
- cpumask_setall(tconn->cpu_mask);
+ cpumask_set_cpu(min_index, *cpu_mask);
}
/**
* drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @thi: drbd_thread object
*
* call in the "main loop" of _all_ threads, no need for any mutex, current won't die
@@ -544,13 +543,16 @@ void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
*/
void drbd_thread_current_set_cpu(struct drbd_thread *thi)
{
+ struct drbd_resource *resource = thi->resource;
struct task_struct *p = current;
if (!thi->reset_cpu_mask)
return;
thi->reset_cpu_mask = 0;
- set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
+ set_cpus_allowed_ptr(p, resource->cpu_mask);
}
+#else
+#define drbd_calc_cpu_mask(A) ({})
#endif
/**
@@ -560,9 +562,9 @@ void drbd_thread_current_set_cpu(struct drbd_thread *thi)
* word aligned on 64-bit architectures. (The bitmap send and receive code
* relies on this.)
*/
-unsigned int drbd_header_size(struct drbd_tconn *tconn)
+unsigned int drbd_header_size(struct drbd_connection *connection)
{
- if (tconn->agreed_pro_version >= 100) {
+ if (connection->agreed_pro_version >= 100) {
BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
return sizeof(struct p_header100);
} else {
@@ -600,44 +602,44 @@ static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cm
return sizeof(struct p_header100);
}
-static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
+static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
void *buffer, enum drbd_packet cmd, int size)
{
- if (tconn->agreed_pro_version >= 100)
+ if (connection->agreed_pro_version >= 100)
return prepare_header100(buffer, cmd, size, vnr);
- else if (tconn->agreed_pro_version >= 95 &&
+ else if (connection->agreed_pro_version >= 95 &&
size > DRBD_MAX_SIZE_H80_PACKET)
return prepare_header95(buffer, cmd, size);
else
return prepare_header80(buffer, cmd, size);
}
-static void *__conn_prepare_command(struct drbd_tconn *tconn,
+static void *__conn_prepare_command(struct drbd_connection *connection,
struct drbd_socket *sock)
{
if (!sock->socket)
return NULL;
- return sock->sbuf + drbd_header_size(tconn);
+ return sock->sbuf + drbd_header_size(connection);
}
-void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
+void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
{
void *p;
mutex_lock(&sock->mutex);
- p = __conn_prepare_command(tconn, sock);
+ p = __conn_prepare_command(connection, sock);
if (!p)
mutex_unlock(&sock->mutex);
return p;
}
-void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
+void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
{
- return conn_prepare_command(mdev->tconn, sock);
+ return conn_prepare_command(peer_device->connection, sock);
}
-static int __send_command(struct drbd_tconn *tconn, int vnr,
+static int __send_command(struct drbd_connection *connection, int vnr,
struct drbd_socket *sock, enum drbd_packet cmd,
unsigned int header_size, void *data,
unsigned int size)
@@ -654,82 +656,82 @@ static int __send_command(struct drbd_tconn *tconn, int vnr,
*/
msg_flags = data ? MSG_MORE : 0;
- header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
+ header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
header_size + size);
- err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
+ err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
msg_flags);
if (data && !err)
- err = drbd_send_all(tconn, sock->socket, data, size, 0);
+ err = drbd_send_all(connection, sock->socket, data, size, 0);
return err;
}
-static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
- return __send_command(tconn, 0, sock, cmd, header_size, data, size);
+ return __send_command(connection, 0, sock, cmd, header_size, data, size);
}
-int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
int err;
- err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
+ err = __conn_send_command(connection, sock, cmd, header_size, data, size);
mutex_unlock(&sock->mutex);
return err;
}
-int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
+int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size)
{
int err;
- err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
- data, size);
+ err = __send_command(peer_device->connection, peer_device->device->vnr,
+ sock, cmd, header_size, data, size);
mutex_unlock(&sock->mutex);
return err;
}
-int drbd_send_ping(struct drbd_tconn *tconn)
+int drbd_send_ping(struct drbd_connection *connection)
{
struct drbd_socket *sock;
- sock = &tconn->meta;
- if (!conn_prepare_command(tconn, sock))
+ sock = &connection->meta;
+ if (!conn_prepare_command(connection, sock))
return -EIO;
- return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
+ return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
}
-int drbd_send_ping_ack(struct drbd_tconn *tconn)
+int drbd_send_ping_ack(struct drbd_connection *connection)
{
struct drbd_socket *sock;
- sock = &tconn->meta;
- if (!conn_prepare_command(tconn, sock))
+ sock = &connection->meta;
+ if (!conn_prepare_command(connection, sock))
return -EIO;
- return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
+ return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
}
-int drbd_send_sync_param(struct drbd_conf *mdev)
+int drbd_send_sync_param(struct drbd_peer_device *peer_device)
{
struct drbd_socket *sock;
struct p_rs_param_95 *p;
int size;
- const int apv = mdev->tconn->agreed_pro_version;
+ const int apv = peer_device->connection->agreed_pro_version;
enum drbd_packet cmd;
struct net_conf *nc;
struct disk_conf *dc;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(peer_device->connection->net_conf);
size = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
@@ -742,14 +744,14 @@ int drbd_send_sync_param(struct drbd_conf *mdev)
/* initialize verify_alg and csums_alg */
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- if (get_ldev(mdev)) {
- dc = rcu_dereference(mdev->ldev->disk_conf);
+ if (get_ldev(peer_device->device)) {
+ dc = rcu_dereference(peer_device->device->ldev->disk_conf);
p->resync_rate = cpu_to_be32(dc->resync_rate);
p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
p->c_delay_target = cpu_to_be32(dc->c_delay_target);
p->c_fill_target = cpu_to_be32(dc->c_fill_target);
p->c_max_rate = cpu_to_be32(dc->c_max_rate);
- put_ldev(mdev);
+ put_ldev(peer_device->device);
} else {
p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
@@ -764,33 +766,33 @@ int drbd_send_sync_param(struct drbd_conf *mdev)
strcpy(p->csums_alg, nc->csums_alg);
rcu_read_unlock();
- return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
+ return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
}
-int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
+int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
{
struct drbd_socket *sock;
struct p_protocol *p;
struct net_conf *nc;
int size, cf;
- sock = &tconn->data;
- p = __conn_prepare_command(tconn, sock);
+ sock = &connection->data;
+ p = __conn_prepare_command(connection, sock);
if (!p)
return -EIO;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
- if (nc->tentative && tconn->agreed_pro_version < 92) {
+ if (nc->tentative && connection->agreed_pro_version < 92) {
rcu_read_unlock();
mutex_unlock(&sock->mutex);
- conn_err(tconn, "--dry-run is not supported by peer");
+ drbd_err(connection, "--dry-run is not supported by peer");
return -EOPNOTSUPP;
}
size = sizeof(*p);
- if (tconn->agreed_pro_version >= 87)
+ if (connection->agreed_pro_version >= 87)
size += strlen(nc->integrity_alg) + 1;
p->protocol = cpu_to_be32(nc->wire_protocol);
@@ -805,128 +807,131 @@ int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
cf |= CF_DRY_RUN;
p->conn_flags = cpu_to_be32(cf);
- if (tconn->agreed_pro_version >= 87)
+ if (connection->agreed_pro_version >= 87)
strcpy(p->integrity_alg, nc->integrity_alg);
rcu_read_unlock();
- return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
+ return __conn_send_command(connection, sock, cmd, size, NULL, 0);
}
-int drbd_send_protocol(struct drbd_tconn *tconn)
+int drbd_send_protocol(struct drbd_connection *connection)
{
int err;
- mutex_lock(&tconn->data.mutex);
- err = __drbd_send_protocol(tconn, P_PROTOCOL);
- mutex_unlock(&tconn->data.mutex);
+ mutex_lock(&connection->data.mutex);
+ err = __drbd_send_protocol(connection, P_PROTOCOL);
+ mutex_unlock(&connection->data.mutex);
return err;
}
-int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
+static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_uuids *p;
int i;
- if (!get_ldev_if_state(mdev, D_NEGOTIATING))
+ if (!get_ldev_if_state(device, D_NEGOTIATING))
return 0;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p) {
- put_ldev(mdev);
+ put_ldev(device);
return -EIO;
}
- spin_lock_irq(&mdev->ldev->md.uuid_lock);
+ spin_lock_irq(&device->ldev->md.uuid_lock);
for (i = UI_CURRENT; i < UI_SIZE; i++)
- p->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
- spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+ p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
+ spin_unlock_irq(&device->ldev->md.uuid_lock);
- mdev->comm_bm_set = drbd_bm_total_weight(mdev);
- p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
+ device->comm_bm_set = drbd_bm_total_weight(device);
+ p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
rcu_read_lock();
- uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
+ uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
rcu_read_unlock();
- uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
- uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
+ uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
+ uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
- put_ldev(mdev);
- return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
+ put_ldev(device);
+ return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
}
-int drbd_send_uuids(struct drbd_conf *mdev)
+int drbd_send_uuids(struct drbd_peer_device *peer_device)
{
- return _drbd_send_uuids(mdev, 0);
+ return _drbd_send_uuids(peer_device, 0);
}
-int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
+int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
{
- return _drbd_send_uuids(mdev, 8);
+ return _drbd_send_uuids(peer_device, 8);
}
-void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
+void drbd_print_uuids(struct drbd_device *device, const char *text)
{
- if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
- u64 *uuid = mdev->ldev->md.uuid;
- dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
+ if (get_ldev_if_state(device, D_NEGOTIATING)) {
+ u64 *uuid = device->ldev->md.uuid;
+ drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
text,
(unsigned long long)uuid[UI_CURRENT],
(unsigned long long)uuid[UI_BITMAP],
(unsigned long long)uuid[UI_HISTORY_START],
(unsigned long long)uuid[UI_HISTORY_END]);
- put_ldev(mdev);
+ put_ldev(device);
} else {
- dev_info(DEV, "%s effective data uuid: %016llX\n",
+ drbd_info(device, "%s effective data uuid: %016llX\n",
text,
- (unsigned long long)mdev->ed_uuid);
+ (unsigned long long)device->ed_uuid);
}
}
-void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
+void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_rs_uuid *p;
u64 uuid;
- D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
+ D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
- uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ uuid = device->ldev->md.uuid[UI_BITMAP];
if (uuid && uuid != UUID_JUST_CREATED)
uuid = uuid + UUID_NEW_BM_OFFSET;
else
get_random_bytes(&uuid, sizeof(u64));
- drbd_uuid_set(mdev, UI_BITMAP, uuid);
- drbd_print_uuids(mdev, "updated sync UUID");
- drbd_md_sync(mdev);
+ drbd_uuid_set(device, UI_BITMAP, uuid);
+ drbd_print_uuids(device, "updated sync UUID");
+ drbd_md_sync(device);
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (p) {
p->uuid = cpu_to_be64(uuid);
- drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
+ drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
}
}
-int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
+int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_sizes *p;
sector_t d_size, u_size;
int q_order_type;
unsigned int max_bio_size;
- if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
- D_ASSERT(mdev->ldev->backing_bdev);
- d_size = drbd_get_max_capacity(mdev->ldev);
+ if (get_ldev_if_state(device, D_NEGOTIATING)) {
+ D_ASSERT(device, device->ldev->backing_bdev);
+ d_size = drbd_get_max_capacity(device->ldev);
rcu_read_lock();
- u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
- q_order_type = drbd_queue_order_type(mdev);
- max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
+ q_order_type = drbd_queue_order_type(device);
+ max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
- put_ldev(mdev);
+ put_ldev(device);
} else {
d_size = 0;
u_size = 0;
@@ -934,45 +939,45 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
}
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
- if (mdev->tconn->agreed_pro_version <= 94)
+ if (peer_device->connection->agreed_pro_version <= 94)
max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
- else if (mdev->tconn->agreed_pro_version < 100)
+ else if (peer_device->connection->agreed_pro_version < 100)
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
p->d_size = cpu_to_be64(d_size);
p->u_size = cpu_to_be64(u_size);
- p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
+ p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
p->max_bio_size = cpu_to_be32(max_bio_size);
p->queue_order_type = cpu_to_be16(q_order_type);
p->dds_flags = cpu_to_be16(flags);
- return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_SIZES, sizeof(*p), NULL, 0);
}
/**
* drbd_send_current_state() - Sends the drbd state to the peer
- * @mdev: DRBD device.
+ * @peer_device: DRBD peer device.
*/
-int drbd_send_current_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_peer_device *peer_device)
{
struct drbd_socket *sock;
struct p_state *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
- p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
- return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+ p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
+ return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
}
/**
* drbd_send_state() - After a state change, sends the new state to the peer
- * @mdev: DRBD device.
+ * @peer_device: DRBD peer device.
* @state: the state to send, not necessarily the current state.
*
* Each state change queues an "after_state_ch" work, which will eventually
@@ -980,73 +985,73 @@ int drbd_send_current_state(struct drbd_conf *mdev)
* between queuing and processing of the after_state_ch work, we still
* want to send each intermediary state in the order it occurred.
*/
-int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
{
struct drbd_socket *sock;
struct p_state *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->state = cpu_to_be32(state.i); /* Within the send mutex */
- return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
}
-int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
+int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
{
struct drbd_socket *sock;
struct p_req_state *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->mask = cpu_to_be32(mask.i);
p->val = cpu_to_be32(val.i);
- return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
}
-int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
+int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
{
enum drbd_packet cmd;
struct drbd_socket *sock;
struct p_req_state *p;
- cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
- sock = &tconn->data;
- p = conn_prepare_command(tconn, sock);
+ cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
+ sock = &connection->data;
+ p = conn_prepare_command(connection, sock);
if (!p)
return -EIO;
p->mask = cpu_to_be32(mask.i);
p->val = cpu_to_be32(val.i);
- return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
+ return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
}
-void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
+void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
{
struct drbd_socket *sock;
struct p_req_state_reply *p;
- sock = &mdev->tconn->meta;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->meta;
+ p = drbd_prepare_command(peer_device, sock);
if (p) {
p->retcode = cpu_to_be32(retcode);
- drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
+ drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
}
}
-void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
+void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
{
struct drbd_socket *sock;
struct p_req_state_reply *p;
- enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
+ enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
- sock = &tconn->meta;
- p = conn_prepare_command(tconn, sock);
+ sock = &connection->meta;
+ p = conn_prepare_command(connection, sock);
if (p) {
p->retcode = cpu_to_be32(retcode);
- conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
+ conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
}
}
@@ -1067,7 +1072,7 @@ static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
}
-int fill_bitmap_rle_bits(struct drbd_conf *mdev,
+static int fill_bitmap_rle_bits(struct drbd_device *device,
struct p_compressed_bm *p,
unsigned int size,
struct bm_xfer_ctx *c)
@@ -1082,9 +1087,9 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
/* may we use this feature? */
rcu_read_lock();
- use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
+ use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
rcu_read_unlock();
- if (!use_rle || mdev->tconn->agreed_pro_version < 90)
+ if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
return 0;
if (c->bit_offset >= c->bm_bits)
@@ -1104,8 +1109,8 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
/* see how much plain bits we can stuff into one packet
* using RLE and VLI. */
do {
- tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
- : _drbd_bm_find_next(mdev, c->bit_offset);
+ tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
+ : _drbd_bm_find_next(device, c->bit_offset);
if (tmp == -1UL)
tmp = c->bm_bits;
rl = tmp - c->bit_offset;
@@ -1125,7 +1130,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
/* paranoia: catch zero runlength.
* can only happen if bitmap is modified while we scan it. */
if (rl == 0) {
- dev_err(DEV, "unexpected zero runlength while encoding bitmap "
+ drbd_err(device, "unexpected zero runlength while encoding bitmap "
"t:%u bo:%lu\n", toggle, c->bit_offset);
return -1;
}
@@ -1134,7 +1139,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
if (bits == -ENOBUFS) /* buffer full */
break;
if (bits <= 0) {
- dev_err(DEV, "error while encoding bitmap: %d\n", bits);
+ drbd_err(device, "error while encoding bitmap: %d\n", bits);
return 0;
}
@@ -1171,21 +1176,21 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
* code upon failure.
*/
static int
-send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
+send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
{
- struct drbd_socket *sock = &mdev->tconn->data;
- unsigned int header_size = drbd_header_size(mdev->tconn);
+ struct drbd_socket *sock = &first_peer_device(device)->connection->data;
+ unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
struct p_compressed_bm *p = sock->sbuf + header_size;
int len, err;
- len = fill_bitmap_rle_bits(mdev, p,
+ len = fill_bitmap_rle_bits(device, p,
DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
if (len < 0)
return -EIO;
if (len) {
dcbp_set_code(p, RLE_VLI_Bits);
- err = __send_command(mdev->tconn, mdev->vnr, sock,
+ err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
P_COMPRESSED_BITMAP, sizeof(*p) + len,
NULL, 0);
c->packets[0]++;
@@ -1205,8 +1210,8 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
c->bm_words - c->word_offset);
len = num_words * sizeof(*p);
if (len)
- drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
- err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
+ drbd_bm_get_lel(device, c->word_offset, num_words, p);
+ err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
@@ -1218,7 +1223,7 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
}
if (!err) {
if (len == 0) {
- INFO_bm_xfer_stats(mdev, "send", c);
+ INFO_bm_xfer_stats(device, "send", c);
return 0;
} else
return 1;
@@ -1227,128 +1232,128 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
}
/* See the comment at receive_bitmap() */
-static int _drbd_send_bitmap(struct drbd_conf *mdev)
+static int _drbd_send_bitmap(struct drbd_device *device)
{
struct bm_xfer_ctx c;
int err;
- if (!expect(mdev->bitmap))
+ if (!expect(device->bitmap))
return false;
- if (get_ldev(mdev)) {
- if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
- dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
- drbd_bm_set_all(mdev);
- if (drbd_bm_write(mdev)) {
+ if (get_ldev(device)) {
+ if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
+ drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
+ drbd_bm_set_all(device);
+ if (drbd_bm_write(device)) {
/* write_bm did fail! Leave full sync flag set in Meta P_DATA
* but otherwise process as per normal - need to tell other
* side that a full resync is required! */
- dev_err(DEV, "Failed to write bitmap to disk!\n");
+ drbd_err(device, "Failed to write bitmap to disk!\n");
} else {
- drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
+ drbd_md_clear_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
}
}
- put_ldev(mdev);
+ put_ldev(device);
}
c = (struct bm_xfer_ctx) {
- .bm_bits = drbd_bm_bits(mdev),
- .bm_words = drbd_bm_words(mdev),
+ .bm_bits = drbd_bm_bits(device),
+ .bm_words = drbd_bm_words(device),
};
do {
- err = send_bitmap_rle_or_plain(mdev, &c);
+ err = send_bitmap_rle_or_plain(device, &c);
} while (err > 0);
return err == 0;
}
-int drbd_send_bitmap(struct drbd_conf *mdev)
+int drbd_send_bitmap(struct drbd_device *device)
{
- struct drbd_socket *sock = &mdev->tconn->data;
+ struct drbd_socket *sock = &first_peer_device(device)->connection->data;
int err = -1;
mutex_lock(&sock->mutex);
if (sock->socket)
- err = !_drbd_send_bitmap(mdev);
+ err = !_drbd_send_bitmap(device);
mutex_unlock(&sock->mutex);
return err;
}
-void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size)
+void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
{
struct drbd_socket *sock;
struct p_barrier_ack *p;
- if (tconn->cstate < C_WF_REPORT_PARAMS)
+ if (connection->cstate < C_WF_REPORT_PARAMS)
return;
- sock = &tconn->meta;
- p = conn_prepare_command(tconn, sock);
+ sock = &connection->meta;
+ p = conn_prepare_command(connection, sock);
if (!p)
return;
p->barrier = barrier_nr;
p->set_size = cpu_to_be32(set_size);
- conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
+ conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
}
/**
* _drbd_send_ack() - Sends an ack packet
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @cmd: Packet command code.
* @sector: sector, needs to be in big endian byte order
* @blksize: size in byte, needs to be in big endian byte order
* @block_id: Id, big endian byte order
*/
-static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
u64 sector, u32 blksize, u64 block_id)
{
struct drbd_socket *sock;
struct p_block_ack *p;
- if (mdev->state.conn < C_CONNECTED)
+ if (peer_device->device->state.conn < C_CONNECTED)
return -EIO;
- sock = &mdev->tconn->meta;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->meta;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = sector;
p->block_id = block_id;
p->blksize = blksize;
- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
- return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
+ p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
+ return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
}
/* dp->sector and dp->block_id already/still in network byte order,
* data_size is payload size according to dp->head,
* and may need to be corrected for digest size. */
-void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
+void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct p_data *dp, int data_size)
{
- if (mdev->tconn->peer_integrity_tfm)
- data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
- _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
+ if (peer_device->connection->peer_integrity_tfm)
+ data_size -= crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+ _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id);
}
-void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
+void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct p_block_req *rp)
{
- _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
+ _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
}
/**
* drbd_send_ack() - Sends an ack packet
- * @mdev: DRBD device
+ * @device: DRBD device
* @cmd: packet command code
* @peer_req: peer request
*/
-int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct drbd_peer_request *peer_req)
{
- return _drbd_send_ack(mdev, cmd,
+ return _drbd_send_ack(peer_device, cmd,
cpu_to_be64(peer_req->i.sector),
cpu_to_be32(peer_req->i.size),
peer_req->block_id);
@@ -1356,32 +1361,32 @@ int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
/* This function misuses the block_id field to signal if the blocks
* are is sync or not. */
-int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
+int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id)
{
- return _drbd_send_ack(mdev, cmd,
+ return _drbd_send_ack(peer_device, cmd,
cpu_to_be64(sector),
cpu_to_be32(blksize),
cpu_to_be64(block_id));
}
-int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
+int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
sector_t sector, int size, u64 block_id)
{
struct drbd_socket *sock;
struct p_block_req *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = block_id;
p->blksize = cpu_to_be32(size);
- return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
}
-int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
+int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
void *digest, int digest_size, enum drbd_packet cmd)
{
struct drbd_socket *sock;
@@ -1389,64 +1394,63 @@ int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
/* FIXME: Put the digest into the preallocated socket buffer. */
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = ID_SYNCER /* unused */;
p->blksize = cpu_to_be32(size);
- return drbd_send_command(mdev, sock, cmd, sizeof(*p),
- digest, digest_size);
+ return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
}
-int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
+int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
{
struct drbd_socket *sock;
struct p_block_req *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(sector);
p->block_id = ID_SYNCER /* unused */;
p->blksize = cpu_to_be32(size);
- return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
}
/* called on sndtimeo
* returns false if we should retry,
* true if we think connection is dead
*/
-static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
+static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
{
int drop_it;
- /* long elapsed = (long)(jiffies - mdev->last_received); */
+ /* long elapsed = (long)(jiffies - device->last_received); */
- drop_it = tconn->meta.socket == sock
- || !tconn->asender.task
- || get_t_state(&tconn->asender) != RUNNING
- || tconn->cstate < C_WF_REPORT_PARAMS;
+ drop_it = connection->meta.socket == sock
+ || !connection->asender.task
+ || get_t_state(&connection->asender) != RUNNING
+ || connection->cstate < C_WF_REPORT_PARAMS;
if (drop_it)
return true;
- drop_it = !--tconn->ko_count;
+ drop_it = !--connection->ko_count;
if (!drop_it) {
- conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
- current->comm, current->pid, tconn->ko_count);
- request_ping(tconn);
+ drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
+ current->comm, current->pid, connection->ko_count);
+ request_ping(connection);
}
- return drop_it; /* && (mdev->state == R_PRIMARY) */;
+ return drop_it; /* && (device->state == R_PRIMARY) */;
}
-static void drbd_update_congested(struct drbd_tconn *tconn)
+static void drbd_update_congested(struct drbd_connection *connection)
{
- struct sock *sk = tconn->data.socket->sk;
+ struct sock *sk = connection->data.socket->sk;
if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
- set_bit(NET_CONGESTED, &tconn->flags);
+ set_bit(NET_CONGESTED, &connection->flags);
}
/* The idea of sendpage seems to be to put some kind of reference
@@ -1470,26 +1474,26 @@ static void drbd_update_congested(struct drbd_tconn *tconn)
* As a workaround, we disable sendpage on pages
* with page_count == 0 or PageSlab.
*/
-static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
+static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
int offset, size_t size, unsigned msg_flags)
{
struct socket *socket;
void *addr;
int err;
- socket = mdev->tconn->data.socket;
+ socket = peer_device->connection->data.socket;
addr = kmap(page) + offset;
- err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
+ err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
kunmap(page);
if (!err)
- mdev->send_cnt += size >> 9;
+ peer_device->device->send_cnt += size >> 9;
return err;
}
-static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
+static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
int offset, size_t size, unsigned msg_flags)
{
- struct socket *socket = mdev->tconn->data.socket;
+ struct socket *socket = peer_device->connection->data.socket;
mm_segment_t oldfs = get_fs();
int len = size;
int err = -EIO;
@@ -1501,10 +1505,10 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
- return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
+ return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL;
- drbd_update_congested(mdev->tconn);
+ drbd_update_congested(peer_device->connection);
set_fs(KERNEL_DS);
do {
int sent;
@@ -1512,11 +1516,11 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
if (sent <= 0) {
if (sent == -EAGAIN) {
- if (we_should_drop_the_connection(mdev->tconn, socket))
+ if (we_should_drop_the_connection(peer_device->connection, socket))
break;
continue;
}
- dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
+ drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
__func__, (int)size, len, sent);
if (sent < 0)
err = sent;
@@ -1524,52 +1528,55 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
}
len -= sent;
offset += sent;
- } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
+ } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
set_fs(oldfs);
- clear_bit(NET_CONGESTED, &mdev->tconn->flags);
+ clear_bit(NET_CONGESTED, &peer_device->connection->flags);
if (len == 0) {
err = 0;
- mdev->send_cnt += size >> 9;
+ peer_device->device->send_cnt += size >> 9;
}
return err;
}
-static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
+static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{
- struct bio_vec *bvec;
- int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
/* hint all but last page with MSG_MORE */
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
int err;
- err = _drbd_no_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ err = _drbd_no_send_page(peer_device, bvec.bv_page,
+ bvec.bv_offset, bvec.bv_len,
+ bio_iter_last(bvec, iter)
+ ? 0 : MSG_MORE);
if (err)
return err;
}
return 0;
}
-static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
+static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
{
- struct bio_vec *bvec;
- int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
/* hint all but last page with MSG_MORE */
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
int err;
- err = _drbd_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ err = _drbd_send_page(peer_device, bvec.bv_page,
+ bvec.bv_offset, bvec.bv_len,
+ bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
if (err)
return err;
}
return 0;
}
-static int _drbd_send_zc_ee(struct drbd_conf *mdev,
+static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
struct page *page = peer_req->pages;
@@ -1580,7 +1587,7 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev,
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- err = _drbd_send_page(mdev, page, 0, l,
+ err = _drbd_send_page(peer_device, page, 0, l,
page_chain_next(page) ? MSG_MORE : 0);
if (err)
return err;
@@ -1589,9 +1596,9 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev,
return 0;
}
-static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
+static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw)
{
- if (mdev->tconn->agreed_pro_version >= 95)
+ if (connection->agreed_pro_version >= 95)
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
(bi_rw & REQ_FUA ? DP_FUA : 0) |
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
@@ -1603,28 +1610,30 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
/* Used to send write requests
* R_PRIMARY -> Peer (P_DATA)
*/
-int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
+int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_data *p;
unsigned int dp_flags = 0;
int dgs;
int err;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
- dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
+ dgs = peer_device->connection->integrity_tfm ?
+ crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
p->sector = cpu_to_be64(req->i.sector);
p->block_id = (unsigned long)req;
- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
- dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
- if (mdev->state.conn >= C_SYNC_SOURCE &&
- mdev->state.conn <= C_PAUSED_SYNC_T)
+ p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
+ dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
+ if (device->state.conn >= C_SYNC_SOURCE &&
+ device->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC;
- if (mdev->tconn->agreed_pro_version >= 100) {
+ if (peer_device->connection->agreed_pro_version >= 100) {
if (req->rq_state & RQ_EXP_RECEIVE_ACK)
dp_flags |= DP_SEND_RECEIVE_ACK;
if (req->rq_state & RQ_EXP_WRITE_ACK)
@@ -1632,8 +1641,8 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
}
p->dp_flags = cpu_to_be32(dp_flags);
if (dgs)
- drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
- err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
+ drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1);
+ err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
if (!err) {
/* For protocol A, we have to memcpy the payload into
* socket buffers, as we may complete right away
@@ -1647,18 +1656,18 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
* receiving side, we sure have detected corruption elsewhere.
*/
if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
- err = _drbd_send_bio(mdev, req->master_bio);
+ err = _drbd_send_bio(peer_device, req->master_bio);
else
- err = _drbd_send_zc_bio(mdev, req->master_bio);
+ err = _drbd_send_zc_bio(peer_device, req->master_bio);
/* double check digest, sometimes buffers have been modified in flight. */
if (dgs > 0 && dgs <= 64) {
/* 64 byte, 512 bit, is the largest digest size
* currently supported in kernel crypto. */
unsigned char digest[64];
- drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
+ drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
if (memcmp(p + 1, digest, dgs)) {
- dev_warn(DEV,
+ drbd_warn(device,
"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
(unsigned long long)req->i.sector, req->i.size);
}
@@ -1675,18 +1684,20 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
* Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
* C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
*/
-int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
+int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
struct drbd_peer_request *peer_req)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_data *p;
int err;
int dgs;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
- dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ dgs = peer_device->connection->integrity_tfm ?
+ crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -1695,27 +1706,27 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
p->seq_num = 0; /* unused */
p->dp_flags = 0;
if (dgs)
- drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
- err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
+ drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
+ err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
if (!err)
- err = _drbd_send_zc_ee(mdev, peer_req);
+ err = _drbd_send_zc_ee(peer_device, peer_req);
mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
return err;
}
-int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
+int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_socket *sock;
struct p_block_desc *p;
- sock = &mdev->tconn->data;
- p = drbd_prepare_command(mdev, sock);
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
if (!p)
return -EIO;
p->sector = cpu_to_be64(req->i.sector);
p->blksize = cpu_to_be32(req->i.size);
- return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
+ return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
}
/*
@@ -1734,7 +1745,7 @@ int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
/*
* you must have down()ed the appropriate [m]sock_mutex elsewhere!
*/
-int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
+int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags)
{
struct kvec iov;
@@ -1755,11 +1766,11 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
- if (sock == tconn->data.socket) {
+ if (sock == connection->data.socket) {
rcu_read_lock();
- tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
+ connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
rcu_read_unlock();
- drbd_update_congested(tconn);
+ drbd_update_congested(connection);
}
do {
/* STRANGE
@@ -1773,7 +1784,7 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
*/
rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
if (rv == -EAGAIN) {
- if (we_should_drop_the_connection(tconn, sock))
+ if (we_should_drop_the_connection(connection, sock))
break;
else
continue;
@@ -1789,17 +1800,17 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
iov.iov_len -= rv;
} while (sent < size);
- if (sock == tconn->data.socket)
- clear_bit(NET_CONGESTED, &tconn->flags);
+ if (sock == connection->data.socket)
+ clear_bit(NET_CONGESTED, &connection->flags);
if (rv <= 0) {
if (rv != -EAGAIN) {
- conn_err(tconn, "%s_sendmsg returned %d\n",
- sock == tconn->meta.socket ? "msock" : "sock",
+ drbd_err(connection, "%s_sendmsg returned %d\n",
+ sock == connection->meta.socket ? "msock" : "sock",
rv);
- conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
+ conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
} else
- conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
+ conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
}
return sent;
@@ -1810,12 +1821,12 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
*
* Returns 0 upon success and a negative error value otherwise.
*/
-int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
+int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
size_t size, unsigned msg_flags)
{
int err;
- err = drbd_send(tconn, sock, buffer, size, msg_flags);
+ err = drbd_send(connection, sock, buffer, size, msg_flags);
if (err < 0)
return err;
if (err != size)
@@ -1825,16 +1836,16 @@ int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
static int drbd_open(struct block_device *bdev, fmode_t mode)
{
- struct drbd_conf *mdev = bdev->bd_disk->private_data;
+ struct drbd_device *device = bdev->bd_disk->private_data;
unsigned long flags;
int rv = 0;
mutex_lock(&drbd_main_mutex);
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- /* to have a stable mdev->state.role
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ /* to have a stable device->state.role
* and no race with updating open_cnt */
- if (mdev->state.role != R_PRIMARY) {
+ if (device->state.role != R_PRIMARY) {
if (mode & FMODE_WRITE)
rv = -EROFS;
else if (!allow_oos)
@@ -1842,8 +1853,8 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
}
if (!rv)
- mdev->open_cnt++;
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ device->open_cnt++;
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
mutex_unlock(&drbd_main_mutex);
return rv;
@@ -1851,17 +1862,17 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
static void drbd_release(struct gendisk *gd, fmode_t mode)
{
- struct drbd_conf *mdev = gd->private_data;
+ struct drbd_device *device = gd->private_data;
mutex_lock(&drbd_main_mutex);
- mdev->open_cnt--;
+ device->open_cnt--;
mutex_unlock(&drbd_main_mutex);
}
-static void drbd_set_defaults(struct drbd_conf *mdev)
+static void drbd_set_defaults(struct drbd_device *device)
{
/* Beware! The actual layout differs
* between big endian and little endian */
- mdev->state = (union drbd_dev_state) {
+ device->state = (union drbd_dev_state) {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
.conn = C_STANDALONE,
@@ -1870,130 +1881,123 @@ static void drbd_set_defaults(struct drbd_conf *mdev)
} };
}
-void drbd_init_set_defaults(struct drbd_conf *mdev)
+void drbd_init_set_defaults(struct drbd_device *device)
{
/* the memset(,0,) did most of this.
* note: only assignments, no allocation in here */
- drbd_set_defaults(mdev);
-
- atomic_set(&mdev->ap_bio_cnt, 0);
- atomic_set(&mdev->ap_pending_cnt, 0);
- atomic_set(&mdev->rs_pending_cnt, 0);
- atomic_set(&mdev->unacked_cnt, 0);
- atomic_set(&mdev->local_cnt, 0);
- atomic_set(&mdev->pp_in_use_by_net, 0);
- atomic_set(&mdev->rs_sect_in, 0);
- atomic_set(&mdev->rs_sect_ev, 0);
- atomic_set(&mdev->ap_in_flight, 0);
- atomic_set(&mdev->md_io_in_use, 0);
-
- mutex_init(&mdev->own_state_mutex);
- mdev->state_mutex = &mdev->own_state_mutex;
-
- spin_lock_init(&mdev->al_lock);
- spin_lock_init(&mdev->peer_seq_lock);
-
- INIT_LIST_HEAD(&mdev->active_ee);
- INIT_LIST_HEAD(&mdev->sync_ee);
- INIT_LIST_HEAD(&mdev->done_ee);
- INIT_LIST_HEAD(&mdev->read_ee);
- INIT_LIST_HEAD(&mdev->net_ee);
- INIT_LIST_HEAD(&mdev->resync_reads);
- INIT_LIST_HEAD(&mdev->resync_work.list);
- INIT_LIST_HEAD(&mdev->unplug_work.list);
- INIT_LIST_HEAD(&mdev->go_diskless.list);
- INIT_LIST_HEAD(&mdev->md_sync_work.list);
- INIT_LIST_HEAD(&mdev->start_resync_work.list);
- INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
-
- mdev->resync_work.cb = w_resync_timer;
- mdev->unplug_work.cb = w_send_write_hint;
- mdev->go_diskless.cb = w_go_diskless;
- mdev->md_sync_work.cb = w_md_sync;
- mdev->bm_io_work.w.cb = w_bitmap_io;
- mdev->start_resync_work.cb = w_start_resync;
-
- mdev->resync_work.mdev = mdev;
- mdev->unplug_work.mdev = mdev;
- mdev->go_diskless.mdev = mdev;
- mdev->md_sync_work.mdev = mdev;
- mdev->bm_io_work.w.mdev = mdev;
- mdev->start_resync_work.mdev = mdev;
-
- init_timer(&mdev->resync_timer);
- init_timer(&mdev->md_sync_timer);
- init_timer(&mdev->start_resync_timer);
- init_timer(&mdev->request_timer);
- mdev->resync_timer.function = resync_timer_fn;
- mdev->resync_timer.data = (unsigned long) mdev;
- mdev->md_sync_timer.function = md_sync_timer_fn;
- mdev->md_sync_timer.data = (unsigned long) mdev;
- mdev->start_resync_timer.function = start_resync_timer_fn;
- mdev->start_resync_timer.data = (unsigned long) mdev;
- mdev->request_timer.function = request_timer_fn;
- mdev->request_timer.data = (unsigned long) mdev;
-
- init_waitqueue_head(&mdev->misc_wait);
- init_waitqueue_head(&mdev->state_wait);
- init_waitqueue_head(&mdev->ee_wait);
- init_waitqueue_head(&mdev->al_wait);
- init_waitqueue_head(&mdev->seq_wait);
-
- mdev->resync_wenr = LC_FREE;
- mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
- mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
-}
-
-void drbd_mdev_cleanup(struct drbd_conf *mdev)
+ drbd_set_defaults(device);
+
+ atomic_set(&device->ap_bio_cnt, 0);
+ atomic_set(&device->ap_pending_cnt, 0);
+ atomic_set(&device->rs_pending_cnt, 0);
+ atomic_set(&device->unacked_cnt, 0);
+ atomic_set(&device->local_cnt, 0);
+ atomic_set(&device->pp_in_use_by_net, 0);
+ atomic_set(&device->rs_sect_in, 0);
+ atomic_set(&device->rs_sect_ev, 0);
+ atomic_set(&device->ap_in_flight, 0);
+ atomic_set(&device->md_io_in_use, 0);
+
+ mutex_init(&device->own_state_mutex);
+ device->state_mutex = &device->own_state_mutex;
+
+ spin_lock_init(&device->al_lock);
+ spin_lock_init(&device->peer_seq_lock);
+
+ INIT_LIST_HEAD(&device->active_ee);
+ INIT_LIST_HEAD(&device->sync_ee);
+ INIT_LIST_HEAD(&device->done_ee);
+ INIT_LIST_HEAD(&device->read_ee);
+ INIT_LIST_HEAD(&device->net_ee);
+ INIT_LIST_HEAD(&device->resync_reads);
+ INIT_LIST_HEAD(&device->resync_work.list);
+ INIT_LIST_HEAD(&device->unplug_work.list);
+ INIT_LIST_HEAD(&device->go_diskless.list);
+ INIT_LIST_HEAD(&device->md_sync_work.list);
+ INIT_LIST_HEAD(&device->start_resync_work.list);
+ INIT_LIST_HEAD(&device->bm_io_work.w.list);
+
+ device->resync_work.cb = w_resync_timer;
+ device->unplug_work.cb = w_send_write_hint;
+ device->go_diskless.cb = w_go_diskless;
+ device->md_sync_work.cb = w_md_sync;
+ device->bm_io_work.w.cb = w_bitmap_io;
+ device->start_resync_work.cb = w_start_resync;
+
+ init_timer(&device->resync_timer);
+ init_timer(&device->md_sync_timer);
+ init_timer(&device->start_resync_timer);
+ init_timer(&device->request_timer);
+ device->resync_timer.function = resync_timer_fn;
+ device->resync_timer.data = (unsigned long) device;
+ device->md_sync_timer.function = md_sync_timer_fn;
+ device->md_sync_timer.data = (unsigned long) device;
+ device->start_resync_timer.function = start_resync_timer_fn;
+ device->start_resync_timer.data = (unsigned long) device;
+ device->request_timer.function = request_timer_fn;
+ device->request_timer.data = (unsigned long) device;
+
+ init_waitqueue_head(&device->misc_wait);
+ init_waitqueue_head(&device->state_wait);
+ init_waitqueue_head(&device->ee_wait);
+ init_waitqueue_head(&device->al_wait);
+ init_waitqueue_head(&device->seq_wait);
+
+ device->resync_wenr = LC_FREE;
+ device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
+ device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
+}
+
+void drbd_device_cleanup(struct drbd_device *device)
{
int i;
- if (mdev->tconn->receiver.t_state != NONE)
- dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
- mdev->tconn->receiver.t_state);
-
- mdev->al_writ_cnt =
- mdev->bm_writ_cnt =
- mdev->read_cnt =
- mdev->recv_cnt =
- mdev->send_cnt =
- mdev->writ_cnt =
- mdev->p_size =
- mdev->rs_start =
- mdev->rs_total =
- mdev->rs_failed = 0;
- mdev->rs_last_events = 0;
- mdev->rs_last_sect_ev = 0;
+ if (first_peer_device(device)->connection->receiver.t_state != NONE)
+ drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
+ first_peer_device(device)->connection->receiver.t_state);
+
+ device->al_writ_cnt =
+ device->bm_writ_cnt =
+ device->read_cnt =
+ device->recv_cnt =
+ device->send_cnt =
+ device->writ_cnt =
+ device->p_size =
+ device->rs_start =
+ device->rs_total =
+ device->rs_failed = 0;
+ device->rs_last_events = 0;
+ device->rs_last_sect_ev = 0;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = 0;
- mdev->rs_mark_time[i] = 0;
+ device->rs_mark_left[i] = 0;
+ device->rs_mark_time[i] = 0;
}
- D_ASSERT(mdev->tconn->net_conf == NULL);
+ D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
- drbd_set_my_capacity(mdev, 0);
- if (mdev->bitmap) {
+ drbd_set_my_capacity(device, 0);
+ if (device->bitmap) {
/* maybe never allocated. */
- drbd_bm_resize(mdev, 0, 1);
- drbd_bm_cleanup(mdev);
+ drbd_bm_resize(device, 0, 1);
+ drbd_bm_cleanup(device);
}
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;
+ drbd_free_bc(device->ldev);
+ device->ldev = NULL;
- clear_bit(AL_SUSPENDED, &mdev->flags);
+ clear_bit(AL_SUSPENDED, &device->flags);
- D_ASSERT(list_empty(&mdev->active_ee));
- D_ASSERT(list_empty(&mdev->sync_ee));
- D_ASSERT(list_empty(&mdev->done_ee));
- D_ASSERT(list_empty(&mdev->read_ee));
- D_ASSERT(list_empty(&mdev->net_ee));
- D_ASSERT(list_empty(&mdev->resync_reads));
- D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
- D_ASSERT(list_empty(&mdev->resync_work.list));
- D_ASSERT(list_empty(&mdev->unplug_work.list));
- D_ASSERT(list_empty(&mdev->go_diskless.list));
+ D_ASSERT(device, list_empty(&device->active_ee));
+ D_ASSERT(device, list_empty(&device->sync_ee));
+ D_ASSERT(device, list_empty(&device->done_ee));
+ D_ASSERT(device, list_empty(&device->read_ee));
+ D_ASSERT(device, list_empty(&device->net_ee));
+ D_ASSERT(device, list_empty(&device->resync_reads));
+ D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
+ D_ASSERT(device, list_empty(&device->resync_work.list));
+ D_ASSERT(device, list_empty(&device->unplug_work.list));
+ D_ASSERT(device, list_empty(&device->go_diskless.list));
- drbd_set_defaults(mdev);
+ drbd_set_defaults(device);
}
@@ -2008,7 +2012,7 @@ static void drbd_destroy_mempools(void)
drbd_pp_vacant--;
}
- /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
+ /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
if (drbd_md_io_bio_set)
bioset_free(drbd_md_io_bio_set);
@@ -2128,69 +2132,73 @@ static struct notifier_block drbd_notifier = {
.notifier_call = drbd_notify_sys,
};
-static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
+static void drbd_release_all_peer_reqs(struct drbd_device *device)
{
int rr;
- rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
+ rr = drbd_free_peer_reqs(device, &device->active_ee);
if (rr)
- dev_err(DEV, "%d EEs in active list found!\n", rr);
+ drbd_err(device, "%d EEs in active list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
+ rr = drbd_free_peer_reqs(device, &device->sync_ee);
if (rr)
- dev_err(DEV, "%d EEs in sync list found!\n", rr);
+ drbd_err(device, "%d EEs in sync list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
+ rr = drbd_free_peer_reqs(device, &device->read_ee);
if (rr)
- dev_err(DEV, "%d EEs in read list found!\n", rr);
+ drbd_err(device, "%d EEs in read list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
+ rr = drbd_free_peer_reqs(device, &device->done_ee);
if (rr)
- dev_err(DEV, "%d EEs in done list found!\n", rr);
+ drbd_err(device, "%d EEs in done list found!\n", rr);
- rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
+ rr = drbd_free_peer_reqs(device, &device->net_ee);
if (rr)
- dev_err(DEV, "%d EEs in net list found!\n", rr);
+ drbd_err(device, "%d EEs in net list found!\n", rr);
}
/* caution. no locking. */
-void drbd_minor_destroy(struct kref *kref)
+void drbd_destroy_device(struct kref *kref)
{
- struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_device *device = container_of(kref, struct drbd_device, kref);
+ struct drbd_resource *resource = device->resource;
+ struct drbd_connection *connection;
- del_timer_sync(&mdev->request_timer);
+ del_timer_sync(&device->request_timer);
/* paranoia asserts */
- D_ASSERT(mdev->open_cnt == 0);
+ D_ASSERT(device, device->open_cnt == 0);
/* end paranoia asserts */
/* cleanup stuff that may have been allocated during
* device (re-)configuration or state changes */
- if (mdev->this_bdev)
- bdput(mdev->this_bdev);
+ if (device->this_bdev)
+ bdput(device->this_bdev);
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;
+ drbd_free_bc(device->ldev);
+ device->ldev = NULL;
- drbd_release_all_peer_reqs(mdev);
+ drbd_release_all_peer_reqs(device);
- lc_destroy(mdev->act_log);
- lc_destroy(mdev->resync);
+ lc_destroy(device->act_log);
+ lc_destroy(device->resync);
- kfree(mdev->p_uuid);
- /* mdev->p_uuid = NULL; */
+ kfree(device->p_uuid);
+ /* device->p_uuid = NULL; */
- if (mdev->bitmap) /* should no longer be there. */
- drbd_bm_cleanup(mdev);
- __free_page(mdev->md_io_page);
- put_disk(mdev->vdisk);
- blk_cleanup_queue(mdev->rq_queue);
- kfree(mdev->rs_plan_s);
- kfree(mdev);
+ if (device->bitmap) /* should no longer be there. */
+ drbd_bm_cleanup(device);
+ __free_page(device->md_io_page);
+ put_disk(device->vdisk);
+ blk_cleanup_queue(device->rq_queue);
+ kfree(device->rs_plan_s);
+ kfree(first_peer_device(device));
+ kfree(device);
- kref_put(&tconn->kref, &conn_destroy);
+ for_each_connection(connection, resource)
+ kref_put(&connection->kref, drbd_destroy_connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
}
/* One global retry thread, if we need to push back some bio and have it
@@ -2215,19 +2223,19 @@ static void do_retry(struct work_struct *ws)
spin_unlock_irq(&retry->lock);
list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct bio *bio = req->master_bio;
unsigned long start_time = req->start_time;
bool expected;
- expected =
+ expected =
expect(atomic_read(&req->completion_ref) == 0) &&
expect(req->rq_state & RQ_POSTPONED) &&
expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
(req->rq_state & RQ_LOCAL_ABORTED) != 0);
if (!expected)
- dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
+ drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
req, atomic_read(&req->completion_ref),
req->rq_state);
@@ -2251,8 +2259,8 @@ static void do_retry(struct work_struct *ws)
/* We are not just doing generic_make_request(),
* as we want to keep the start_time information. */
- inc_ap_bio(mdev);
- __drbd_make_request(mdev, bio, start_time);
+ inc_ap_bio(device);
+ __drbd_make_request(device, bio, start_time);
}
}
@@ -2266,17 +2274,38 @@ void drbd_restart_request(struct drbd_request *req)
/* Drop the extra reference that would otherwise
* have been dropped by complete_master_bio.
* do_retry() needs to grab a new one. */
- dec_ap_bio(req->w.mdev);
+ dec_ap_bio(req->device);
queue_work(retry.wq, &retry.worker);
}
+void drbd_destroy_resource(struct kref *kref)
+{
+ struct drbd_resource *resource =
+ container_of(kref, struct drbd_resource, kref);
+
+ idr_destroy(&resource->devices);
+ free_cpumask_var(resource->cpu_mask);
+ kfree(resource->name);
+ kfree(resource);
+}
+
+void drbd_free_resource(struct drbd_resource *resource)
+{
+ struct drbd_connection *connection, *tmp;
+
+ for_each_connection_safe(connection, tmp, resource) {
+ list_del(&connection->connections);
+ kref_put(&connection->kref, drbd_destroy_connection);
+ }
+ kref_put(&resource->kref, drbd_destroy_resource);
+}
static void drbd_cleanup(void)
{
unsigned int i;
- struct drbd_conf *mdev;
- struct drbd_tconn *tconn, *tmp;
+ struct drbd_device *device;
+ struct drbd_resource *resource, *tmp;
unregister_reboot_notifier(&drbd_notifier);
@@ -2296,26 +2325,19 @@ static void drbd_cleanup(void)
drbd_genl_unregister();
- idr_for_each_entry(&minors, mdev, i) {
- idr_remove(&minors, mdev_to_minor(mdev));
- idr_remove(&mdev->tconn->volumes, mdev->vnr);
- destroy_workqueue(mdev->submit.wq);
- del_gendisk(mdev->vdisk);
- /* synchronize_rcu(); No other threads running at this point */
- kref_put(&mdev->kref, &drbd_minor_destroy);
- }
+ idr_for_each_entry(&drbd_devices, device, i)
+ drbd_delete_device(device);
/* not _rcu since, no other updater anymore. Genl already unregistered */
- list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
- list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
- /* synchronize_rcu(); */
- kref_put(&tconn->kref, &conn_destroy);
+ for_each_resource_safe(resource, tmp, &drbd_resources) {
+ list_del(&resource->resources);
+ drbd_free_resource(resource);
}
drbd_destroy_mempools();
unregister_blkdev(DRBD_MAJOR, "drbd");
- idr_destroy(&minors);
+ idr_destroy(&drbd_devices);
printk(KERN_INFO "drbd: module cleanup done.\n");
}
@@ -2329,49 +2351,50 @@ static void drbd_cleanup(void)
*/
static int drbd_congested(void *congested_data, int bdi_bits)
{
- struct drbd_conf *mdev = congested_data;
+ struct drbd_device *device = congested_data;
struct request_queue *q;
char reason = '-';
int r = 0;
- if (!may_inc_ap_bio(mdev)) {
+ if (!may_inc_ap_bio(device)) {
/* DRBD has frozen IO */
r = bdi_bits;
reason = 'd';
goto out;
}
- if (test_bit(CALLBACK_PENDING, &mdev->tconn->flags)) {
+ if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
r |= (1 << BDI_async_congested);
/* Without good local data, we would need to read from remote,
* and that would need the worker thread as well, which is
* currently blocked waiting for that usermode helper to
* finish.
*/
- if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+ if (!get_ldev_if_state(device, D_UP_TO_DATE))
r |= (1 << BDI_sync_congested);
else
- put_ldev(mdev);
+ put_ldev(device);
r &= bdi_bits;
reason = 'c';
goto out;
}
- if (get_ldev(mdev)) {
- q = bdev_get_queue(mdev->ldev->backing_bdev);
+ if (get_ldev(device)) {
+ q = bdev_get_queue(device->ldev->backing_bdev);
r = bdi_congested(&q->backing_dev_info, bdi_bits);
- put_ldev(mdev);
+ put_ldev(device);
if (r)
reason = 'b';
}
- if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
+ if (bdi_bits & (1 << BDI_async_congested) &&
+ test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
r |= (1 << BDI_async_congested);
reason = reason == 'b' ? 'a' : 'n';
}
out:
- mdev->congestion_reason = reason;
+ device->congestion_reason = reason;
return r;
}
@@ -2382,45 +2405,72 @@ static void drbd_init_workqueue(struct drbd_work_queue* wq)
init_waitqueue_head(&wq->q_wait);
}
-struct drbd_tconn *conn_get_by_name(const char *name)
+struct completion_work {
+ struct drbd_work w;
+ struct completion done;
+};
+
+static int w_complete(struct drbd_work *w, int cancel)
+{
+ struct completion_work *completion_work =
+ container_of(w, struct completion_work, w);
+
+ complete(&completion_work->done);
+ return 0;
+}
+
+void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
+{
+ struct completion_work completion_work;
+
+ completion_work.w.cb = w_complete;
+ init_completion(&completion_work.done);
+ drbd_queue_work(work_queue, &completion_work.w);
+ wait_for_completion(&completion_work.done);
+}
+
+struct drbd_resource *drbd_find_resource(const char *name)
{
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
if (!name || !name[0])
return NULL;
rcu_read_lock();
- list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
- if (!strcmp(tconn->name, name)) {
- kref_get(&tconn->kref);
+ for_each_resource_rcu(resource, &drbd_resources) {
+ if (!strcmp(resource->name, name)) {
+ kref_get(&resource->kref);
goto found;
}
}
- tconn = NULL;
+ resource = NULL;
found:
rcu_read_unlock();
- return tconn;
+ return resource;
}
-struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
+struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
void *peer_addr, int peer_addr_len)
{
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
rcu_read_lock();
- list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
- if (tconn->my_addr_len == my_addr_len &&
- tconn->peer_addr_len == peer_addr_len &&
- !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
- !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
- kref_get(&tconn->kref);
- goto found;
+ for_each_resource_rcu(resource, &drbd_resources) {
+ for_each_connection_rcu(connection, resource) {
+ if (connection->my_addr_len == my_addr_len &&
+ connection->peer_addr_len == peer_addr_len &&
+ !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
+ !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
+ kref_get(&connection->kref);
+ goto found;
+ }
}
}
- tconn = NULL;
+ connection = NULL;
found:
rcu_read_unlock();
- return tconn;
+ return connection;
}
static int drbd_alloc_socket(struct drbd_socket *socket)
@@ -2440,29 +2490,30 @@ static void drbd_free_socket(struct drbd_socket *socket)
free_page((unsigned long) socket->rbuf);
}
-void conn_free_crypto(struct drbd_tconn *tconn)
+void conn_free_crypto(struct drbd_connection *connection)
{
- drbd_free_sock(tconn);
+ drbd_free_sock(connection);
- crypto_free_hash(tconn->csums_tfm);
- crypto_free_hash(tconn->verify_tfm);
- crypto_free_hash(tconn->cram_hmac_tfm);
- crypto_free_hash(tconn->integrity_tfm);
- crypto_free_hash(tconn->peer_integrity_tfm);
- kfree(tconn->int_dig_in);
- kfree(tconn->int_dig_vv);
+ crypto_free_hash(connection->csums_tfm);
+ crypto_free_hash(connection->verify_tfm);
+ crypto_free_hash(connection->cram_hmac_tfm);
+ crypto_free_hash(connection->integrity_tfm);
+ crypto_free_hash(connection->peer_integrity_tfm);
+ kfree(connection->int_dig_in);
+ kfree(connection->int_dig_vv);
- tconn->csums_tfm = NULL;
- tconn->verify_tfm = NULL;
- tconn->cram_hmac_tfm = NULL;
- tconn->integrity_tfm = NULL;
- tconn->peer_integrity_tfm = NULL;
- tconn->int_dig_in = NULL;
- tconn->int_dig_vv = NULL;
+ connection->csums_tfm = NULL;
+ connection->verify_tfm = NULL;
+ connection->cram_hmac_tfm = NULL;
+ connection->integrity_tfm = NULL;
+ connection->peer_integrity_tfm = NULL;
+ connection->int_dig_in = NULL;
+ connection->int_dig_vv = NULL;
}
-int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
+int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
{
+ struct drbd_connection *connection;
cpumask_var_t new_cpu_mask;
int err;
@@ -2475,22 +2526,24 @@ int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
/* silently ignore cpu mask on UP kernel */
if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
- /* FIXME: Get rid of constant 32 here */
- err = bitmap_parse(res_opts->cpu_mask, 32,
+ err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
cpumask_bits(new_cpu_mask), nr_cpu_ids);
if (err) {
- conn_warn(tconn, "bitmap_parse() failed with %d\n", err);
+ drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
/* retcode = ERR_CPU_MASK_PARSE; */
goto fail;
}
}
- tconn->res_opts = *res_opts;
- if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
- cpumask_copy(tconn->cpu_mask, new_cpu_mask);
- drbd_calc_cpu_mask(tconn);
- tconn->receiver.reset_cpu_mask = 1;
- tconn->asender.reset_cpu_mask = 1;
- tconn->worker.reset_cpu_mask = 1;
+ resource->res_opts = *res_opts;
+ if (cpumask_empty(new_cpu_mask))
+ drbd_calc_cpu_mask(&new_cpu_mask);
+ if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
+ cpumask_copy(resource->cpu_mask, new_cpu_mask);
+ for_each_connection_rcu(connection, resource) {
+ connection->receiver.reset_cpu_mask = 1;
+ connection->asender.reset_cpu_mask = 1;
+ connection->worker.reset_cpu_mask = 1;
+ }
}
err = 0;
@@ -2500,146 +2553,177 @@ fail:
}
+struct drbd_resource *drbd_create_resource(const char *name)
+{
+ struct drbd_resource *resource;
+
+ resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
+ if (!resource)
+ goto fail;
+ resource->name = kstrdup(name, GFP_KERNEL);
+ if (!resource->name)
+ goto fail_free_resource;
+ if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
+ goto fail_free_name;
+ kref_init(&resource->kref);
+ idr_init(&resource->devices);
+ INIT_LIST_HEAD(&resource->connections);
+ list_add_tail_rcu(&resource->resources, &drbd_resources);
+ mutex_init(&resource->conf_update);
+ spin_lock_init(&resource->req_lock);
+ return resource;
+
+fail_free_name:
+ kfree(resource->name);
+fail_free_resource:
+ kfree(resource);
+fail:
+ return NULL;
+}
+
/* caller must be under genl_lock() */
-struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
+struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
{
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
- tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
- if (!tconn)
+ connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
+ if (!connection)
return NULL;
- tconn->name = kstrdup(name, GFP_KERNEL);
- if (!tconn->name)
+ if (drbd_alloc_socket(&connection->data))
goto fail;
-
- if (drbd_alloc_socket(&tconn->data))
- goto fail;
- if (drbd_alloc_socket(&tconn->meta))
+ if (drbd_alloc_socket(&connection->meta))
goto fail;
- if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
+ connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
+ if (!connection->current_epoch)
goto fail;
- if (set_resource_options(tconn, res_opts))
- goto fail;
+ INIT_LIST_HEAD(&connection->transfer_log);
- tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
- if (!tconn->current_epoch)
- goto fail;
+ INIT_LIST_HEAD(&connection->current_epoch->list);
+ connection->epochs = 1;
+ spin_lock_init(&connection->epoch_lock);
+ connection->write_ordering = WO_bdev_flush;
- INIT_LIST_HEAD(&tconn->transfer_log);
+ connection->send.seen_any_write_yet = false;
+ connection->send.current_epoch_nr = 0;
+ connection->send.current_epoch_writes = 0;
- INIT_LIST_HEAD(&tconn->current_epoch->list);
- tconn->epochs = 1;
- spin_lock_init(&tconn->epoch_lock);
- tconn->write_ordering = WO_bdev_flush;
+ resource = drbd_create_resource(name);
+ if (!resource)
+ goto fail;
- tconn->send.seen_any_write_yet = false;
- tconn->send.current_epoch_nr = 0;
- tconn->send.current_epoch_writes = 0;
+ connection->cstate = C_STANDALONE;
+ mutex_init(&connection->cstate_mutex);
+ init_waitqueue_head(&connection->ping_wait);
+ idr_init(&connection->peer_devices);
- tconn->cstate = C_STANDALONE;
- mutex_init(&tconn->cstate_mutex);
- spin_lock_init(&tconn->req_lock);
- mutex_init(&tconn->conf_update);
- init_waitqueue_head(&tconn->ping_wait);
- idr_init(&tconn->volumes);
+ drbd_init_workqueue(&connection->sender_work);
+ mutex_init(&connection->data.mutex);
+ mutex_init(&connection->meta.mutex);
- drbd_init_workqueue(&tconn->sender_work);
- mutex_init(&tconn->data.mutex);
- mutex_init(&tconn->meta.mutex);
+ drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
+ connection->receiver.connection = connection;
+ drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
+ connection->worker.connection = connection;
+ drbd_thread_init(resource, &connection->asender, drbd_asender, "asender");
+ connection->asender.connection = connection;
- drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
- drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
- drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
+ kref_init(&connection->kref);
- kref_init(&tconn->kref);
- list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
+ connection->resource = resource;
- return tconn;
+ if (set_resource_options(resource, res_opts))
+ goto fail_resource;
-fail:
- kfree(tconn->current_epoch);
- free_cpumask_var(tconn->cpu_mask);
- drbd_free_socket(&tconn->meta);
- drbd_free_socket(&tconn->data);
- kfree(tconn->name);
- kfree(tconn);
+ kref_get(&resource->kref);
+ list_add_tail_rcu(&connection->connections, &resource->connections);
+ return connection;
+fail_resource:
+ list_del(&resource->resources);
+ drbd_free_resource(resource);
+fail:
+ kfree(connection->current_epoch);
+ drbd_free_socket(&connection->meta);
+ drbd_free_socket(&connection->data);
+ kfree(connection);
return NULL;
}
-void conn_destroy(struct kref *kref)
+void drbd_destroy_connection(struct kref *kref)
{
- struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
+ struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
+ struct drbd_resource *resource = connection->resource;
- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
- kfree(tconn->current_epoch);
+ if (atomic_read(&connection->current_epoch->epoch_size) != 0)
+ drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
+ kfree(connection->current_epoch);
- idr_destroy(&tconn->volumes);
+ idr_destroy(&connection->peer_devices);
- free_cpumask_var(tconn->cpu_mask);
- drbd_free_socket(&tconn->meta);
- drbd_free_socket(&tconn->data);
- kfree(tconn->name);
- kfree(tconn->int_dig_in);
- kfree(tconn->int_dig_vv);
- kfree(tconn);
+ drbd_free_socket(&connection->meta);
+ drbd_free_socket(&connection->data);
+ kfree(connection->int_dig_in);
+ kfree(connection->int_dig_vv);
+ kfree(connection);
+ kref_put(&resource->kref, drbd_destroy_resource);
}
-int init_submitter(struct drbd_conf *mdev)
+static int init_submitter(struct drbd_device *device)
{
/* opencoded create_singlethread_workqueue(),
* to be able to say "drbd%d", ..., minor */
- mdev->submit.wq = alloc_workqueue("drbd%u_submit",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1, mdev->minor);
- if (!mdev->submit.wq)
+ device->submit.wq = alloc_workqueue("drbd%u_submit",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1, device->minor);
+ if (!device->submit.wq)
return -ENOMEM;
- INIT_WORK(&mdev->submit.worker, do_submit);
- spin_lock_init(&mdev->submit.lock);
- INIT_LIST_HEAD(&mdev->submit.writes);
+ INIT_WORK(&device->submit.worker, do_submit);
+ spin_lock_init(&device->submit.lock);
+ INIT_LIST_HEAD(&device->submit.writes);
return 0;
}
-enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
+enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr)
{
- struct drbd_conf *mdev;
+ struct drbd_connection *connection;
+ struct drbd_device *device;
+ struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
struct request_queue *q;
- int vnr_got = vnr;
- int minor_got = minor;
+ int id;
enum drbd_ret_code err = ERR_NOMEM;
- mdev = minor_to_mdev(minor);
- if (mdev)
+ device = minor_to_device(minor);
+ if (device)
return ERR_MINOR_EXISTS;
/* GFP_KERNEL, we are outside of all write-out paths */
- mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
- if (!mdev)
+ device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
+ if (!device)
return ERR_NOMEM;
+ kref_init(&device->kref);
- kref_get(&tconn->kref);
- mdev->tconn = tconn;
-
- mdev->minor = minor;
- mdev->vnr = vnr;
+ kref_get(&resource->kref);
+ device->resource = resource;
+ device->minor = minor;
+ device->vnr = vnr;
- drbd_init_set_defaults(mdev);
+ drbd_init_set_defaults(device);
q = blk_alloc_queue(GFP_KERNEL);
if (!q)
goto out_no_q;
- mdev->rq_queue = q;
- q->queuedata = mdev;
+ device->rq_queue = q;
+ q->queuedata = device;
disk = alloc_disk(1);
if (!disk)
goto out_no_disk;
- mdev->vdisk = disk;
+ device->vdisk = disk;
set_disk_ro(disk, true);
@@ -2648,14 +2732,14 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
disk->first_minor = minor;
disk->fops = &drbd_ops;
sprintf(disk->disk_name, "drbd%d", minor);
- disk->private_data = mdev;
+ disk->private_data = device;
- mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
+ device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
/* we have no partitions. we contain only ourselves. */
- mdev->this_bdev->bd_contains = mdev->this_bdev;
+ device->this_bdev->bd_contains = device->this_bdev;
q->backing_dev_info.congested_fn = drbd_congested;
- q->backing_dev_info.congested_data = mdev;
+ q->backing_dev_info.congested_data = device;
blk_queue_make_request(q, drbd_make_request);
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
@@ -2664,70 +2748,125 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec);
- q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
+ q->queue_lock = &resource->req_lock;
- mdev->md_io_page = alloc_page(GFP_KERNEL);
- if (!mdev->md_io_page)
+ device->md_io_page = alloc_page(GFP_KERNEL);
+ if (!device->md_io_page)
goto out_no_io_page;
- if (drbd_bm_init(mdev))
+ if (drbd_bm_init(device))
goto out_no_bitmap;
- mdev->read_requests = RB_ROOT;
- mdev->write_requests = RB_ROOT;
+ device->read_requests = RB_ROOT;
+ device->write_requests = RB_ROOT;
- minor_got = idr_alloc(&minors, mdev, minor, minor + 1, GFP_KERNEL);
- if (minor_got < 0) {
- if (minor_got == -ENOSPC) {
+ id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
+ if (id < 0) {
+ if (id == -ENOSPC) {
err = ERR_MINOR_EXISTS;
drbd_msg_put_info("requested minor exists already");
}
goto out_no_minor_idr;
}
+ kref_get(&device->kref);
- vnr_got = idr_alloc(&tconn->volumes, mdev, vnr, vnr + 1, GFP_KERNEL);
- if (vnr_got < 0) {
- if (vnr_got == -ENOSPC) {
- err = ERR_INVALID_REQUEST;
- drbd_msg_put_info("requested volume exists already");
+ id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
+ if (id < 0) {
+ if (id == -ENOSPC) {
+ err = ERR_MINOR_EXISTS;
+ drbd_msg_put_info("requested minor exists already");
}
goto out_idr_remove_minor;
}
+ kref_get(&device->kref);
+
+ INIT_LIST_HEAD(&device->peer_devices);
+ for_each_connection(connection, resource) {
+ peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
+ if (!peer_device)
+ goto out_idr_remove_from_resource;
+ peer_device->connection = connection;
+ peer_device->device = device;
+
+ list_add(&peer_device->peer_devices, &device->peer_devices);
+ kref_get(&device->kref);
+
+ id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
+ if (id < 0) {
+ if (id == -ENOSPC) {
+ err = ERR_INVALID_REQUEST;
+ drbd_msg_put_info("requested volume exists already");
+ }
+ goto out_idr_remove_from_resource;
+ }
+ kref_get(&connection->kref);
+ }
- if (init_submitter(mdev)) {
+ if (init_submitter(device)) {
err = ERR_NOMEM;
drbd_msg_put_info("unable to create submit workqueue");
goto out_idr_remove_vol;
}
add_disk(disk);
- kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
/* inherit the connection state */
- mdev->state.conn = tconn->cstate;
- if (mdev->state.conn == C_WF_REPORT_PARAMS)
- drbd_connected(mdev);
+ device->state.conn = first_connection(resource)->cstate;
+ if (device->state.conn == C_WF_REPORT_PARAMS) {
+ for_each_peer_device(peer_device, device)
+ drbd_connected(peer_device);
+ }
return NO_ERROR;
out_idr_remove_vol:
- idr_remove(&tconn->volumes, vnr_got);
+ idr_remove(&connection->peer_devices, vnr);
+out_idr_remove_from_resource:
+ for_each_connection(connection, resource) {
+ peer_device = idr_find(&connection->peer_devices, vnr);
+ if (peer_device) {
+ idr_remove(&connection->peer_devices, vnr);
+ kref_put(&connection->kref, drbd_destroy_connection);
+ }
+ }
+ for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
+ list_del(&peer_device->peer_devices);
+ kfree(peer_device);
+ }
+ idr_remove(&resource->devices, vnr);
out_idr_remove_minor:
- idr_remove(&minors, minor_got);
+ idr_remove(&drbd_devices, minor);
synchronize_rcu();
out_no_minor_idr:
- drbd_bm_cleanup(mdev);
+ drbd_bm_cleanup(device);
out_no_bitmap:
- __free_page(mdev->md_io_page);
+ __free_page(device->md_io_page);
out_no_io_page:
put_disk(disk);
out_no_disk:
blk_cleanup_queue(q);
out_no_q:
- kfree(mdev);
- kref_put(&tconn->kref, &conn_destroy);
+ kref_put(&resource->kref, drbd_destroy_resource);
+ kfree(device);
return err;
}
+void drbd_delete_device(struct drbd_device *device)
+{
+ struct drbd_resource *resource = device->resource;
+ struct drbd_connection *connection;
+ int refs = 3;
+
+ for_each_connection(connection, resource) {
+ idr_remove(&connection->peer_devices, device->vnr);
+ refs++;
+ }
+ idr_remove(&resource->devices, device->vnr);
+ idr_remove(&drbd_devices, device_to_minor(device));
+ del_gendisk(device->vdisk);
+ synchronize_rcu();
+ kref_sub(&device->kref, refs, drbd_destroy_device);
+}
+
int __init drbd_init(void)
{
int err;
@@ -2758,10 +2897,10 @@ int __init drbd_init(void)
init_waitqueue_head(&drbd_pp_wait);
drbd_proc = NULL; /* play safe for drbd_cleanup */
- idr_init(&minors);
+ idr_init(&drbd_devices);
rwlock_init(&global_state_lock);
- INIT_LIST_HEAD(&drbd_tconns);
+ INIT_LIST_HEAD(&drbd_resources);
err = drbd_genl_register();
if (err) {
@@ -2819,37 +2958,39 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
kfree(ldev);
}
-void drbd_free_sock(struct drbd_tconn *tconn)
+void drbd_free_sock(struct drbd_connection *connection)
{
- if (tconn->data.socket) {
- mutex_lock(&tconn->data.mutex);
- kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
- sock_release(tconn->data.socket);
- tconn->data.socket = NULL;
- mutex_unlock(&tconn->data.mutex);
+ if (connection->data.socket) {
+ mutex_lock(&connection->data.mutex);
+ kernel_sock_shutdown(connection->data.socket, SHUT_RDWR);
+ sock_release(connection->data.socket);
+ connection->data.socket = NULL;
+ mutex_unlock(&connection->data.mutex);
}
- if (tconn->meta.socket) {
- mutex_lock(&tconn->meta.mutex);
- kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
- sock_release(tconn->meta.socket);
- tconn->meta.socket = NULL;
- mutex_unlock(&tconn->meta.mutex);
+ if (connection->meta.socket) {
+ mutex_lock(&connection->meta.mutex);
+ kernel_sock_shutdown(connection->meta.socket, SHUT_RDWR);
+ sock_release(connection->meta.socket);
+ connection->meta.socket = NULL;
+ mutex_unlock(&connection->meta.mutex);
}
}
/* meta data management */
-void conn_md_sync(struct drbd_tconn *tconn)
+void conn_md_sync(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+
+ kref_get(&device->kref);
rcu_read_unlock();
- drbd_md_sync(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_md_sync(device);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
@@ -2880,7 +3021,7 @@ struct meta_data_on_disk {
-void drbd_md_write(struct drbd_conf *mdev, void *b)
+void drbd_md_write(struct drbd_device *device, void *b)
{
struct meta_data_on_disk *buffer = b;
sector_t sector;
@@ -2888,39 +3029,39 @@ void drbd_md_write(struct drbd_conf *mdev, void *b)
memset(buffer, 0, sizeof(*buffer));
- buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
+ buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
for (i = UI_CURRENT; i < UI_SIZE; i++)
- buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
- buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
+ buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
+ buffer->flags = cpu_to_be32(device->ldev->md.flags);
buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
- buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
- buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
- buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
+ buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
+ buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
+ buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
- buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
+ buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
- buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
- buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
+ buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
+ buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
- buffer->al_stripes = cpu_to_be32(mdev->ldev->md.al_stripes);
- buffer->al_stripe_size_4k = cpu_to_be32(mdev->ldev->md.al_stripe_size_4k);
+ buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
+ buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
- D_ASSERT(drbd_md_ss(mdev->ldev) == mdev->ldev->md.md_offset);
- sector = mdev->ldev->md.md_offset;
+ D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
+ sector = device->ldev->md.md_offset;
- if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
/* this was a try anyways ... */
- dev_err(DEV, "meta data update failed!\n");
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ drbd_err(device, "meta data update failed!\n");
+ drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
}
}
/**
* drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
- * @mdev: DRBD device.
+ * @device: DRBD device.
*/
-void drbd_md_sync(struct drbd_conf *mdev)
+void drbd_md_sync(struct drbd_device *device)
{
struct meta_data_on_disk *buffer;
@@ -2928,32 +3069,32 @@ void drbd_md_sync(struct drbd_conf *mdev)
BUILD_BUG_ON(UI_SIZE != 4);
BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
- del_timer(&mdev->md_sync_timer);
+ del_timer(&device->md_sync_timer);
/* timer may be rearmed by drbd_md_mark_dirty() now. */
- if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
+ if (!test_and_clear_bit(MD_DIRTY, &device->flags))
return;
/* We use here D_FAILED and not D_ATTACHING because we try to write
* metadata even if we detach due to a disk failure! */
- if (!get_ldev_if_state(mdev, D_FAILED))
+ if (!get_ldev_if_state(device, D_FAILED))
return;
- buffer = drbd_md_get_buffer(mdev);
+ buffer = drbd_md_get_buffer(device);
if (!buffer)
goto out;
- drbd_md_write(mdev, buffer);
+ drbd_md_write(device, buffer);
- /* Update mdev->ldev->md.la_size_sect,
+ /* Update device->ldev->md.la_size_sect,
* since we updated it on metadata. */
- mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
+ device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
- drbd_md_put_buffer(mdev);
+ drbd_md_put_buffer(device);
out:
- put_ldev(mdev);
+ put_ldev(device);
}
-static int check_activity_log_stripe_size(struct drbd_conf *mdev,
+static int check_activity_log_stripe_size(struct drbd_device *device,
struct meta_data_on_disk *on_disk,
struct drbd_md *in_core)
{
@@ -2993,12 +3134,12 @@ static int check_activity_log_stripe_size(struct drbd_conf *mdev,
return 0;
err:
- dev_err(DEV, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
+ drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
al_stripes, al_stripe_size_4k);
return -EINVAL;
}
-static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
{
sector_t capacity = drbd_get_capacity(bdev->md_bdev);
struct drbd_md *in_core = &bdev->md;
@@ -3065,7 +3206,7 @@ static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_d
return 0;
err:
- dev_err(DEV, "meta data offsets don't make sense: idx=%d "
+ drbd_err(device, "meta data offsets don't make sense: idx=%d "
"al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
"md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
in_core->meta_dev_idx,
@@ -3080,25 +3221,25 @@ err:
/**
* drbd_md_read() - Reads in the meta data super block
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @bdev: Device from which the meta data should be read in.
*
* Return NO_ERROR on success, and an enum drbd_ret_code in case
* something goes wrong.
*
* Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
- * even before @bdev is assigned to @mdev->ldev.
+ * even before @bdev is assigned to @device->ldev.
*/
-int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
{
struct meta_data_on_disk *buffer;
u32 magic, flags;
int i, rv = NO_ERROR;
- if (mdev->state.disk != D_DISKLESS)
+ if (device->state.disk != D_DISKLESS)
return ERR_DISK_CONFIGURED;
- buffer = drbd_md_get_buffer(mdev);
+ buffer = drbd_md_get_buffer(device);
if (!buffer)
return ERR_NOMEM;
@@ -3107,10 +3248,10 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
bdev->md.md_offset = drbd_md_ss(bdev);
- if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
+ if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
called BEFORE disk is attached */
- dev_err(DEV, "Error while reading metadata.\n");
+ drbd_err(device, "Error while reading metadata.\n");
rv = ERR_IO_MD_DISK;
goto err;
}
@@ -3120,7 +3261,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
(magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
/* btw: that's Activity Log clean, not "all" clean. */
- dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
+ drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
rv = ERR_MD_UNCLEAN;
goto err;
}
@@ -3128,14 +3269,14 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
rv = ERR_MD_INVALID;
if (magic != DRBD_MD_MAGIC_08) {
if (magic == DRBD_MD_MAGIC_07)
- dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
+ drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
else
- dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
+ drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
goto err;
}
if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
- dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
+ drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
goto err;
}
@@ -3152,182 +3293,182 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
- if (check_activity_log_stripe_size(mdev, buffer, &bdev->md))
+ if (check_activity_log_stripe_size(device, buffer, &bdev->md))
goto err;
- if (check_offsets_and_sizes(mdev, bdev))
+ if (check_offsets_and_sizes(device, bdev))
goto err;
if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
- dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
+ drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
goto err;
}
if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
- dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
+ drbd_err(device, "unexpected md_size: %u (expected %u)\n",
be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
goto err;
}
rv = NO_ERROR;
- spin_lock_irq(&mdev->tconn->req_lock);
- if (mdev->state.conn < C_CONNECTED) {
+ spin_lock_irq(&device->resource->req_lock);
+ if (device->state.conn < C_CONNECTED) {
unsigned int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
- mdev->peer_max_bio_size = peer;
+ device->peer_max_bio_size = peer;
}
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
err:
- drbd_md_put_buffer(mdev);
+ drbd_md_put_buffer(device);
return rv;
}
/**
* drbd_md_mark_dirty() - Mark meta data super block as dirty
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Call this function if you change anything that should be written to
* the meta-data super block. This function sets MD_DIRTY, and starts a
* timer that ensures that within five seconds you have to call drbd_md_sync().
*/
#ifdef DEBUG
-void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
+void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
{
- if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
- mod_timer(&mdev->md_sync_timer, jiffies + HZ);
- mdev->last_md_mark_dirty.line = line;
- mdev->last_md_mark_dirty.func = func;
+ if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
+ mod_timer(&device->md_sync_timer, jiffies + HZ);
+ device->last_md_mark_dirty.line = line;
+ device->last_md_mark_dirty.func = func;
}
}
#else
-void drbd_md_mark_dirty(struct drbd_conf *mdev)
+void drbd_md_mark_dirty(struct drbd_device *device)
{
- if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
- mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
+ if (!test_and_set_bit(MD_DIRTY, &device->flags))
+ mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
}
#endif
-void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
+void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
{
int i;
for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
- mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
+ device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
}
-void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
if (idx == UI_CURRENT) {
- if (mdev->state.role == R_PRIMARY)
+ if (device->state.role == R_PRIMARY)
val |= 1;
else
val &= ~((u64)1);
- drbd_set_ed_uuid(mdev, val);
+ drbd_set_ed_uuid(device, val);
}
- mdev->ldev->md.uuid[idx] = val;
- drbd_md_mark_dirty(mdev);
+ device->ldev->md.uuid[idx] = val;
+ drbd_md_mark_dirty(device);
}
-void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
- spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
- __drbd_uuid_set(mdev, idx, val);
- spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
+ __drbd_uuid_set(device, idx, val);
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
}
-void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
- spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
- if (mdev->ldev->md.uuid[idx]) {
- drbd_uuid_move_history(mdev);
- mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
+ if (device->ldev->md.uuid[idx]) {
+ drbd_uuid_move_history(device);
+ device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
}
- __drbd_uuid_set(mdev, idx, val);
- spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+ __drbd_uuid_set(device, idx, val);
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
}
/**
* drbd_uuid_new_current() - Creates a new current UUID
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Creates a new current UUID, and rotates the old current UUID into
* the bitmap slot. Causes an incremental resync upon next connect.
*/
-void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
+void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
{
u64 val;
unsigned long long bm_uuid;
get_random_bytes(&val, sizeof(u64));
- spin_lock_irq(&mdev->ldev->md.uuid_lock);
- bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ spin_lock_irq(&device->ldev->md.uuid_lock);
+ bm_uuid = device->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
- dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
+ drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
- mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
- __drbd_uuid_set(mdev, UI_CURRENT, val);
- spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+ device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
+ __drbd_uuid_set(device, UI_CURRENT, val);
+ spin_unlock_irq(&device->ldev->md.uuid_lock);
- drbd_print_uuids(mdev, "new current UUID");
+ drbd_print_uuids(device, "new current UUID");
/* get it to stable storage _now_ */
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
}
-void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
+void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{
unsigned long flags;
- if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
+ if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
- spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (val == 0) {
- drbd_uuid_move_history(mdev);
- mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
- mdev->ldev->md.uuid[UI_BITMAP] = 0;
+ drbd_uuid_move_history(device);
+ device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
+ device->ldev->md.uuid[UI_BITMAP] = 0;
} else {
- unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
- dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
+ drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
- mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
+ device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
}
- spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
- drbd_md_mark_dirty(mdev);
+ drbd_md_mark_dirty(device);
}
/**
* drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Sets all bits in the bitmap and writes the whole bitmap to stable storage.
*/
-int drbd_bmio_set_n_write(struct drbd_conf *mdev)
+int drbd_bmio_set_n_write(struct drbd_device *device)
{
int rv = -EIO;
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- drbd_md_set_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
- drbd_bm_set_all(mdev);
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ drbd_md_set_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
+ drbd_bm_set_all(device);
- rv = drbd_bm_write(mdev);
+ rv = drbd_bm_write(device);
if (!rv) {
- drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
+ drbd_md_clear_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
}
- put_ldev(mdev);
+ put_ldev(device);
}
return rv;
@@ -3335,19 +3476,19 @@ int drbd_bmio_set_n_write(struct drbd_conf *mdev)
/**
* drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Clears all bits in the bitmap and writes the whole bitmap to stable storage.
*/
-int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
+int drbd_bmio_clear_n_write(struct drbd_device *device)
{
int rv = -EIO;
- drbd_resume_al(mdev);
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- drbd_bm_clear_all(mdev);
- rv = drbd_bm_write(mdev);
- put_ldev(mdev);
+ drbd_resume_al(device);
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ drbd_bm_clear_all(device);
+ rv = drbd_bm_write(device);
+ put_ldev(device);
}
return rv;
@@ -3355,50 +3496,52 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
static int w_bitmap_io(struct drbd_work *w, int unused)
{
- struct bm_io_work *work = container_of(w, struct bm_io_work, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, bm_io_work.w);
+ struct bm_io_work *work = &device->bm_io_work;
int rv = -EIO;
- D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
+ D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);
- if (get_ldev(mdev)) {
- drbd_bm_lock(mdev, work->why, work->flags);
- rv = work->io_fn(mdev);
- drbd_bm_unlock(mdev);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ drbd_bm_lock(device, work->why, work->flags);
+ rv = work->io_fn(device);
+ drbd_bm_unlock(device);
+ put_ldev(device);
}
- clear_bit_unlock(BITMAP_IO, &mdev->flags);
- wake_up(&mdev->misc_wait);
+ clear_bit_unlock(BITMAP_IO, &device->flags);
+ wake_up(&device->misc_wait);
if (work->done)
- work->done(mdev, rv);
+ work->done(device, rv);
- clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
+ clear_bit(BITMAP_IO_QUEUED, &device->flags);
work->why = NULL;
work->flags = 0;
return 0;
}
-void drbd_ldev_destroy(struct drbd_conf *mdev)
+void drbd_ldev_destroy(struct drbd_device *device)
{
- lc_destroy(mdev->resync);
- mdev->resync = NULL;
- lc_destroy(mdev->act_log);
- mdev->act_log = NULL;
+ lc_destroy(device->resync);
+ device->resync = NULL;
+ lc_destroy(device->act_log);
+ device->act_log = NULL;
__no_warn(local,
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;);
+ drbd_free_bc(device->ldev);
+ device->ldev = NULL;);
- clear_bit(GO_DISKLESS, &mdev->flags);
+ clear_bit(GO_DISKLESS, &device->flags);
}
static int w_go_diskless(struct drbd_work *w, int unused)
{
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, go_diskless);
- D_ASSERT(mdev->state.disk == D_FAILED);
+ D_ASSERT(device, device->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
* the protected members anymore, though, so once put_ldev reaches zero
@@ -3417,27 +3560,27 @@ static int w_go_diskless(struct drbd_work *w, int unused)
* We still need to check if both bitmap and ldev are present, we may
* end up here after a failed attach, before ldev was even assigned.
*/
- if (mdev->bitmap && mdev->ldev) {
+ if (device->bitmap && device->ldev) {
/* An interrupted resync or similar is allowed to recounts bits
* while we detach.
* Any modifications would not be expected anymore, though.
*/
- if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write,
+ if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
"detach", BM_LOCKED_TEST_ALLOWED)) {
- if (test_bit(WAS_READ_ERROR, &mdev->flags)) {
- drbd_md_set_flag(mdev, MDF_FULL_SYNC);
- drbd_md_sync(mdev);
+ if (test_bit(WAS_READ_ERROR, &device->flags)) {
+ drbd_md_set_flag(device, MDF_FULL_SYNC);
+ drbd_md_sync(device);
}
}
}
- drbd_force_state(mdev, NS(disk, D_DISKLESS));
+ drbd_force_state(device, NS(disk, D_DISKLESS));
return 0;
}
/**
* drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @io_fn: IO callback to be called when bitmap IO is possible
* @done: callback to be called after the bitmap IO was performed
* @why: Descriptive text of the reason for doing the IO
@@ -3447,76 +3590,77 @@ static int w_go_diskless(struct drbd_work *w, int unused)
* called from worker context. It MUST NOT be used while a previous such
* work is still pending!
*/
-void drbd_queue_bitmap_io(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
- void (*done)(struct drbd_conf *, int),
+void drbd_queue_bitmap_io(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *),
+ void (*done)(struct drbd_device *, int),
char *why, enum bm_flag flags)
{
- D_ASSERT(current == mdev->tconn->worker.task);
+ D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
- D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
- D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
- D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
- if (mdev->bm_io_work.why)
- dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
- why, mdev->bm_io_work.why);
+ D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
+ D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
+ D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
+ if (device->bm_io_work.why)
+ drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
+ why, device->bm_io_work.why);
- mdev->bm_io_work.io_fn = io_fn;
- mdev->bm_io_work.done = done;
- mdev->bm_io_work.why = why;
- mdev->bm_io_work.flags = flags;
+ device->bm_io_work.io_fn = io_fn;
+ device->bm_io_work.done = done;
+ device->bm_io_work.why = why;
+ device->bm_io_work.flags = flags;
- spin_lock_irq(&mdev->tconn->req_lock);
- set_bit(BITMAP_IO, &mdev->flags);
- if (atomic_read(&mdev->ap_bio_cnt) == 0) {
- if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
+ spin_lock_irq(&device->resource->req_lock);
+ set_bit(BITMAP_IO, &device->flags);
+ if (atomic_read(&device->ap_bio_cnt) == 0) {
+ if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &device->bm_io_work.w);
}
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
}
/**
* drbd_bitmap_io() - Does an IO operation on the whole bitmap
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @io_fn: IO callback to be called when bitmap IO is possible
* @why: Descriptive text of the reason for doing the IO
*
* freezes application IO while that the actual IO operations runs. This
* functions MAY NOT be called from worker context.
*/
-int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
+int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags)
{
int rv;
- D_ASSERT(current != mdev->tconn->worker.task);
+ D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
- drbd_suspend_io(mdev);
+ drbd_suspend_io(device);
- drbd_bm_lock(mdev, why, flags);
- rv = io_fn(mdev);
- drbd_bm_unlock(mdev);
+ drbd_bm_lock(device, why, flags);
+ rv = io_fn(device);
+ drbd_bm_unlock(device);
if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
- drbd_resume_io(mdev);
+ drbd_resume_io(device);
return rv;
}
-void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
+void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
{
- if ((mdev->ldev->md.flags & flag) != flag) {
- drbd_md_mark_dirty(mdev);
- mdev->ldev->md.flags |= flag;
+ if ((device->ldev->md.flags & flag) != flag) {
+ drbd_md_mark_dirty(device);
+ device->ldev->md.flags |= flag;
}
}
-void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
+void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
{
- if ((mdev->ldev->md.flags & flag) != 0) {
- drbd_md_mark_dirty(mdev);
- mdev->ldev->md.flags &= ~flag;
+ if ((device->ldev->md.flags & flag) != 0) {
+ drbd_md_mark_dirty(device);
+ device->ldev->md.flags &= ~flag;
}
}
int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
@@ -3526,23 +3670,25 @@ int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
static void md_sync_timer_fn(unsigned long data)
{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
+ struct drbd_device *device = (struct drbd_device *) data;
/* must not double-queue! */
- if (list_empty(&mdev->md_sync_work.list))
- drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
+ if (list_empty(&device->md_sync_work.list))
+ drbd_queue_work_front(&first_peer_device(device)->connection->sender_work,
+ &device->md_sync_work);
}
static int w_md_sync(struct drbd_work *w, int unused)
{
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, md_sync_work);
- dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
+ drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
#ifdef DEBUG
- dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
- mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
+ drbd_warn(device, "last md_mark_dirty: %s:%u\n",
+ device->last_md_mark_dirty.func, device->last_md_mark_dirty.line);
#endif
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
return 0;
}
@@ -3618,18 +3764,18 @@ const char *cmdname(enum drbd_packet cmd)
/**
* drbd_wait_misc - wait for a request to make progress
- * @mdev: device associated with the request
+ * @device: device associated with the request
* @i: the struct drbd_interval embedded in struct drbd_request or
* struct drbd_peer_request
*/
-int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
+int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
{
struct net_conf *nc;
DEFINE_WAIT(wait);
long timeout;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (!nc) {
rcu_read_unlock();
return -ETIMEDOUT;
@@ -3637,14 +3783,14 @@ int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
rcu_read_unlock();
- /* Indicate to wake up mdev->misc_wait on progress. */
+ /* Indicate to wake up device->misc_wait on progress. */
i->waiting = true;
- prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&device->resource->req_lock);
timeout = schedule_timeout(timeout);
- finish_wait(&mdev->misc_wait, &wait);
- spin_lock_irq(&mdev->tconn->req_lock);
- if (!timeout || mdev->state.conn < C_CONNECTED)
+ finish_wait(&device->misc_wait, &wait);
+ spin_lock_irq(&device->resource->req_lock);
+ if (!timeout || device->state.conn < C_CONNECTED)
return -ETIMEDOUT;
if (signal_pending(current))
return -ERESTARTSYS;
@@ -3700,20 +3846,20 @@ _drbd_fault_str(unsigned int type) {
}
unsigned int
-_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
+_drbd_insert_fault(struct drbd_device *device, unsigned int type)
{
static struct fault_random_state rrs = {0, 0};
unsigned int ret = (
(fault_devs == 0 ||
- ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
+ ((1 << device_to_minor(device)) & fault_devs) != 0) &&
(((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
if (ret) {
fault_count++;
if (__ratelimit(&drbd_ratelimit_state))
- dev_warn(DEV, "***Simulating %s failure\n",
+ drbd_warn(device, "***Simulating %s failure\n",
_drbd_fault_str(type));
}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index c706d50a8b06..526414bc2cab 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -32,6 +32,7 @@
#include <linux/blkpg.h>
#include <linux/cpumask.h>
#include "drbd_int.h"
+#include "drbd_protocol.h"
#include "drbd_req.h"
#include "drbd_wrappers.h"
#include <asm/unaligned.h>
@@ -44,8 +45,8 @@
// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
-int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
-int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
@@ -102,8 +103,9 @@ static struct drbd_config_context {
/* pointer into reply buffer */
struct drbd_genlmsghdr *reply_dh;
/* resolved from attributes, if possible */
- struct drbd_conf *mdev;
- struct drbd_tconn *tconn;
+ struct drbd_device *device;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
} adm_ctx;
static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
@@ -202,62 +204,67 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
if ((adm_ctx.my_addr &&
- nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
+ nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.connection->my_addr)) ||
(adm_ctx.peer_addr &&
- nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
+ nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.connection->peer_addr))) {
err = -EINVAL;
goto fail;
}
}
adm_ctx.minor = d_in->minor;
- adm_ctx.mdev = minor_to_mdev(d_in->minor);
- adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
+ adm_ctx.device = minor_to_device(d_in->minor);
+ if (adm_ctx.resource_name) {
+ adm_ctx.resource = drbd_find_resource(adm_ctx.resource_name);
+ }
- if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
+ if (!adm_ctx.device && (flags & DRBD_ADM_NEED_MINOR)) {
drbd_msg_put_info("unknown minor");
return ERR_MINOR_INVALID;
}
- if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
+ if (!adm_ctx.resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
drbd_msg_put_info("unknown resource");
+ if (adm_ctx.resource_name)
+ return ERR_RES_NOT_KNOWN;
return ERR_INVALID_REQUEST;
}
if (flags & DRBD_ADM_NEED_CONNECTION) {
- if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
+ if (adm_ctx.resource) {
drbd_msg_put_info("no resource name expected");
return ERR_INVALID_REQUEST;
}
- if (adm_ctx.mdev) {
+ if (adm_ctx.device) {
drbd_msg_put_info("no minor number expected");
return ERR_INVALID_REQUEST;
}
if (adm_ctx.my_addr && adm_ctx.peer_addr)
- adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
+ adm_ctx.connection = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
nla_len(adm_ctx.my_addr),
nla_data(adm_ctx.peer_addr),
nla_len(adm_ctx.peer_addr));
- if (!adm_ctx.tconn) {
+ if (!adm_ctx.connection) {
drbd_msg_put_info("unknown connection");
return ERR_INVALID_REQUEST;
}
}
/* some more paranoia, if the request was over-determined */
- if (adm_ctx.mdev && adm_ctx.tconn &&
- adm_ctx.mdev->tconn != adm_ctx.tconn) {
- pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
- adm_ctx.minor, adm_ctx.resource_name,
- adm_ctx.mdev->tconn->name);
+ if (adm_ctx.device && adm_ctx.resource &&
+ adm_ctx.device->resource != adm_ctx.resource) {
+ pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
+ adm_ctx.minor, adm_ctx.resource->name,
+ adm_ctx.device->resource->name);
drbd_msg_put_info("minor exists in different resource");
return ERR_INVALID_REQUEST;
}
- if (adm_ctx.mdev &&
+ if (adm_ctx.device &&
adm_ctx.volume != VOLUME_UNSPECIFIED &&
- adm_ctx.volume != adm_ctx.mdev->vnr) {
+ adm_ctx.volume != adm_ctx.device->vnr) {
pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
adm_ctx.minor, adm_ctx.volume,
- adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
+ adm_ctx.device->vnr,
+ adm_ctx.device->resource->name);
drbd_msg_put_info("minor exists as different volume");
return ERR_INVALID_REQUEST;
}
@@ -272,9 +279,13 @@ fail:
static int drbd_adm_finish(struct genl_info *info, int retcode)
{
- if (adm_ctx.tconn) {
- kref_put(&adm_ctx.tconn->kref, &conn_destroy);
- adm_ctx.tconn = NULL;
+ if (adm_ctx.connection) {
+ kref_put(&adm_ctx.connection->kref, drbd_destroy_connection);
+ adm_ctx.connection = NULL;
+ }
+ if (adm_ctx.resource) {
+ kref_put(&adm_ctx.resource->kref, drbd_destroy_resource);
+ adm_ctx.resource = NULL;
}
if (!adm_ctx.reply_skb)
@@ -285,34 +296,34 @@ static int drbd_adm_finish(struct genl_info *info, int retcode)
return 0;
}
-static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
+static void setup_khelper_env(struct drbd_connection *connection, char **envp)
{
char *afs;
/* FIXME: A future version will not allow this case. */
- if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
+ if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
return;
- switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
+ switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
case AF_INET6:
afs = "ipv6";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
- &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
+ &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
break;
case AF_INET:
afs = "ipv4";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+ &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
break;
default:
afs = "ssocks";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+ &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
}
snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
}
-int drbd_khelper(struct drbd_conf *mdev, char *cmd)
+int drbd_khelper(struct drbd_device *device, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
@@ -322,39 +333,39 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
NULL };
char mb[12];
char *argv[] = {usermode_helper, cmd, mb, NULL };
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
struct sib_info sib;
int ret;
- if (current == tconn->worker.task)
- set_bit(CALLBACK_PENDING, &tconn->flags);
+ if (current == connection->worker.task)
+ set_bit(CALLBACK_PENDING, &connection->flags);
- snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
- setup_khelper_env(tconn, envp);
+ snprintf(mb, 12, "minor-%d", device_to_minor(device));
+ setup_khelper_env(connection, envp);
/* The helper may take some time.
* write out any unsynced meta data changes now */
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
- dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
+ drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
sib.sib_reason = SIB_HELPER_PRE;
sib.helper_name = cmd;
- drbd_bcast_event(mdev, &sib);
+ drbd_bcast_event(device, &sib);
ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
- dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
+ drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
else
- dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
+ drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
sib.sib_reason = SIB_HELPER_POST;
sib.helper_exit_code = ret;
- drbd_bcast_event(mdev, &sib);
+ drbd_bcast_event(device, &sib);
- if (current == tconn->worker.task)
- clear_bit(CALLBACK_PENDING, &tconn->flags);
+ if (current == connection->worker.task)
+ clear_bit(CALLBACK_PENDING, &connection->flags);
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
@@ -362,7 +373,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
return ret;
}
-int conn_khelper(struct drbd_tconn *tconn, char *cmd)
+static int conn_khelper(struct drbd_connection *connection, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
@@ -370,23 +381,24 @@ int conn_khelper(struct drbd_tconn *tconn, char *cmd)
(char[20]) { }, /* address family */
(char[60]) { }, /* address */
NULL };
- char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
+ char *resource_name = connection->resource->name;
+ char *argv[] = {usermode_helper, cmd, resource_name, NULL };
int ret;
- setup_khelper_env(tconn, envp);
- conn_md_sync(tconn);
+ setup_khelper_env(connection, envp);
+ conn_md_sync(connection);
- conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
+ drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
/* TODO: conn_bcast_event() ?? */
ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
- conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, tconn->name,
+ drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
+ usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
else
- conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, tconn->name,
+ drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
+ usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
/* TODO: conn_bcast_event() ?? */
@@ -396,18 +408,20 @@ int conn_khelper(struct drbd_tconn *tconn, char *cmd)
return ret;
}
-static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
+static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
{
enum drbd_fencing_p fp = FP_NOT_AVAIL;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (get_ldev_if_state(mdev, D_CONSISTENT)) {
- fp = max_t(enum drbd_fencing_p, fp,
- rcu_dereference(mdev->ldev->disk_conf)->fencing);
- put_ldev(mdev);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ if (get_ldev_if_state(device, D_CONSISTENT)) {
+ struct disk_conf *disk_conf =
+ rcu_dereference(peer_device->device->ldev->disk_conf);
+ fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
+ put_ldev(device);
}
}
rcu_read_unlock();
@@ -415,7 +429,7 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
return fp;
}
-bool conn_try_outdate_peer(struct drbd_tconn *tconn)
+bool conn_try_outdate_peer(struct drbd_connection *connection)
{
unsigned int connect_cnt;
union drbd_state mask = { };
@@ -424,26 +438,26 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
char *ex_to_string;
int r;
- if (tconn->cstate >= C_WF_REPORT_PARAMS) {
- conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
+ if (connection->cstate >= C_WF_REPORT_PARAMS) {
+ drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
return false;
}
- spin_lock_irq(&tconn->req_lock);
- connect_cnt = tconn->connect_cnt;
- spin_unlock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
+ connect_cnt = connection->connect_cnt;
+ spin_unlock_irq(&connection->resource->req_lock);
- fp = highest_fencing_policy(tconn);
+ fp = highest_fencing_policy(connection);
switch (fp) {
case FP_NOT_AVAIL:
- conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
+ drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
goto out;
case FP_DONT_CARE:
return true;
default: ;
}
- r = conn_khelper(tconn, "fence-peer");
+ r = conn_khelper(connection, "fence-peer");
switch ((r>>8) & 0xff) {
case 3: /* peer is inconsistent */
@@ -457,7 +471,7 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
val.pdsk = D_OUTDATED;
break;
case 5: /* peer was down */
- if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
+ if (conn_highest_disk(connection) == D_UP_TO_DATE) {
/* we will(have) create(d) a new UUID anyways... */
ex_to_string = "peer is unreachable, assumed to be dead";
mask.pdsk = D_MASK;
@@ -470,70 +484,70 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
* This is useful when an unconnected R_SECONDARY is asked to
* become R_PRIMARY, but finds the other peer being active. */
ex_to_string = "peer is active";
- conn_warn(tconn, "Peer is primary, outdating myself.\n");
+ drbd_warn(connection, "Peer is primary, outdating myself.\n");
mask.disk = D_MASK;
val.disk = D_OUTDATED;
break;
case 7:
if (fp != FP_STONITH)
- conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
+ drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
ex_to_string = "peer was stonithed";
mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED;
break;
default:
/* The script is broken ... */
- conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
+ drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
return false; /* Eventually leave IO frozen */
}
- conn_info(tconn, "fence-peer helper returned %d (%s)\n",
+ drbd_info(connection, "fence-peer helper returned %d (%s)\n",
(r>>8) & 0xff, ex_to_string);
out:
/* Not using
- conn_request_state(tconn, mask, val, CS_VERBOSE);
+ conn_request_state(connection, mask, val, CS_VERBOSE);
here, because we might were able to re-establish the connection in the
meantime. */
- spin_lock_irq(&tconn->req_lock);
- if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) {
- if (tconn->connect_cnt != connect_cnt)
+ spin_lock_irq(&connection->resource->req_lock);
+ if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
+ if (connection->connect_cnt != connect_cnt)
/* In case the connection was established and droped
while the fence-peer handler was running, ignore it */
- conn_info(tconn, "Ignoring fence-peer exit code\n");
+ drbd_info(connection, "Ignoring fence-peer exit code\n");
else
- _conn_request_state(tconn, mask, val, CS_VERBOSE);
+ _conn_request_state(connection, mask, val, CS_VERBOSE);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
- return conn_highest_pdsk(tconn) <= D_OUTDATED;
+ return conn_highest_pdsk(connection) <= D_OUTDATED;
}
static int _try_outdate_peer_async(void *data)
{
- struct drbd_tconn *tconn = (struct drbd_tconn *)data;
+ struct drbd_connection *connection = (struct drbd_connection *)data;
- conn_try_outdate_peer(tconn);
+ conn_try_outdate_peer(connection);
- kref_put(&tconn->kref, &conn_destroy);
+ kref_put(&connection->kref, drbd_destroy_connection);
return 0;
}
-void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
+void conn_try_outdate_peer_async(struct drbd_connection *connection)
{
struct task_struct *opa;
- kref_get(&tconn->kref);
- opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
+ kref_get(&connection->kref);
+ opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
if (IS_ERR(opa)) {
- conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
- kref_put(&tconn->kref, &conn_destroy);
+ drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
+ kref_put(&connection->kref, drbd_destroy_connection);
}
}
enum drbd_state_rv
-drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
+drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
{
const int max_tries = 4;
enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
@@ -542,16 +556,24 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
int forced = 0;
union drbd_state mask, val;
- if (new_role == R_PRIMARY)
- request_ping(mdev->tconn); /* Detect a dead peer ASAP */
+ if (new_role == R_PRIMARY) {
+ struct drbd_connection *connection;
- mutex_lock(mdev->state_mutex);
+ /* Detect dead peers as soon as possible. */
+
+ rcu_read_lock();
+ for_each_connection(connection, device->resource)
+ request_ping(connection);
+ rcu_read_unlock();
+ }
+
+ mutex_lock(device->state_mutex);
mask.i = 0; mask.role = R_MASK;
val.i = 0; val.role = new_role;
while (try++ < max_tries) {
- rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
+ rv = _drbd_request_state(device, mask, val, CS_WAIT_COMPLETE);
/* in case we first succeeded to outdate,
* but now suddenly could establish a connection */
@@ -562,8 +584,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
}
if (rv == SS_NO_UP_TO_DATE_DISK && force &&
- (mdev->state.disk < D_UP_TO_DATE &&
- mdev->state.disk >= D_INCONSISTENT)) {
+ (device->state.disk < D_UP_TO_DATE &&
+ device->state.disk >= D_INCONSISTENT)) {
mask.disk = D_MASK;
val.disk = D_UP_TO_DATE;
forced = 1;
@@ -571,10 +593,10 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
}
if (rv == SS_NO_UP_TO_DATE_DISK &&
- mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
- D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
+ device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
+ D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
- if (conn_try_outdate_peer(mdev->tconn)) {
+ if (conn_try_outdate_peer(first_peer_device(device)->connection)) {
val.disk = D_UP_TO_DATE;
mask.disk = D_MASK;
}
@@ -584,8 +606,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
if (rv == SS_NOTHING_TO_DO)
goto out;
if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
- if (!conn_try_outdate_peer(mdev->tconn) && force) {
- dev_warn(DEV, "Forced into split brain situation!\n");
+ if (!conn_try_outdate_peer(first_peer_device(device)->connection) && force) {
+ drbd_warn(device, "Forced into split brain situation!\n");
mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED;
@@ -597,7 +619,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
retry at most once more in this case. */
int timeo;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
rcu_read_unlock();
schedule_timeout_interruptible(timeo);
@@ -606,7 +628,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
continue;
}
if (rv < SS_SUCCESS) {
- rv = _drbd_request_state(mdev, mask, val,
+ rv = _drbd_request_state(device, mask, val,
CS_VERBOSE + CS_WAIT_COMPLETE);
if (rv < SS_SUCCESS)
goto out;
@@ -618,53 +640,53 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
goto out;
if (forced)
- dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
+ drbd_warn(device, "Forced to consider local data as UpToDate!\n");
/* Wait until nothing is on the fly :) */
- wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
+ wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
/* FIXME also wait for all pending P_BARRIER_ACK? */
if (new_role == R_SECONDARY) {
- set_disk_ro(mdev->vdisk, true);
- if (get_ldev(mdev)) {
- mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
- put_ldev(mdev);
+ set_disk_ro(device->vdisk, true);
+ if (get_ldev(device)) {
+ device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
+ put_ldev(device);
}
} else {
- mutex_lock(&mdev->tconn->conf_update);
- nc = mdev->tconn->net_conf;
+ mutex_lock(&device->resource->conf_update);
+ nc = first_peer_device(device)->connection->net_conf;
if (nc)
nc->discard_my_data = 0; /* without copy; single bit op is atomic */
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&device->resource->conf_update);
- set_disk_ro(mdev->vdisk, false);
- if (get_ldev(mdev)) {
- if (((mdev->state.conn < C_CONNECTED ||
- mdev->state.pdsk <= D_FAILED)
- && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
- drbd_uuid_new_current(mdev);
+ set_disk_ro(device->vdisk, false);
+ if (get_ldev(device)) {
+ if (((device->state.conn < C_CONNECTED ||
+ device->state.pdsk <= D_FAILED)
+ && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
+ drbd_uuid_new_current(device);
- mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
- put_ldev(mdev);
+ device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
+ put_ldev(device);
}
}
/* writeout of activity log covered areas of the bitmap
* to stable storage done in after state change already */
- if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
+ if (device->state.conn >= C_WF_REPORT_PARAMS) {
/* if this was forced, we should consider sync */
if (forced)
- drbd_send_uuids(mdev);
- drbd_send_current_state(mdev);
+ drbd_send_uuids(first_peer_device(device));
+ drbd_send_current_state(first_peer_device(device));
}
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
- kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
+ kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
out:
- mutex_unlock(mdev->state_mutex);
+ mutex_unlock(device->state_mutex);
return rv;
}
@@ -699,9 +721,9 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
}
if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
- retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
+ retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
else
- retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
+ retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -728,7 +750,7 @@ out:
* Activity log size used to be fixed 32kB,
* but is about to become configurable.
*/
-static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
+static void drbd_md_set_sector_offsets(struct drbd_device *device,
struct drbd_backing_dev *bdev)
{
sector_t md_size_sect = 0;
@@ -804,35 +826,35 @@ char *ppsize(char *buf, unsigned long long size)
* drbd_adm_suspend_io/drbd_adm_resume_io,
* which are (sub) state changes triggered by admin (drbdsetup),
* and can be long lived.
- * This changes an mdev->flag, is triggered by drbd internals,
+ * This changes an device->flag, is triggered by drbd internals,
* and should be short-lived. */
-void drbd_suspend_io(struct drbd_conf *mdev)
+void drbd_suspend_io(struct drbd_device *device)
{
- set_bit(SUSPEND_IO, &mdev->flags);
- if (drbd_suspended(mdev))
+ set_bit(SUSPEND_IO, &device->flags);
+ if (drbd_suspended(device))
return;
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
+ wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
}
-void drbd_resume_io(struct drbd_conf *mdev)
+void drbd_resume_io(struct drbd_device *device)
{
- clear_bit(SUSPEND_IO, &mdev->flags);
- wake_up(&mdev->misc_wait);
+ clear_bit(SUSPEND_IO, &device->flags);
+ wake_up(&device->misc_wait);
}
/**
* drbd_determine_dev_size() - Sets the right device size obeying all constraints
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Returns 0 on success, negative return values indicate errors.
* You should call drbd_md_sync() after calling this function.
*/
enum determine_dev_size
-drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
+drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size_sect, u_size;
- struct drbd_md *md = &mdev->ldev->md;
+ struct drbd_md *md = &device->ldev->md;
u32 prev_al_stripe_size_4k;
u32 prev_al_stripes;
sector_t size;
@@ -851,19 +873,19 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
* Suspend IO right here.
* still lock the act_log to not trigger ASSERTs there.
*/
- drbd_suspend_io(mdev);
- buffer = drbd_md_get_buffer(mdev); /* Lock meta-data IO */
+ drbd_suspend_io(device);
+ buffer = drbd_md_get_buffer(device); /* Lock meta-data IO */
if (!buffer) {
- drbd_resume_io(mdev);
+ drbd_resume_io(device);
return DS_ERROR;
}
/* no wait necessary anymore, actually we could assert that */
- wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+ wait_event(device->al_wait, lc_try_lock(device->act_log));
- prev_first_sect = drbd_md_first_sector(mdev->ldev);
- prev_size = mdev->ldev->md.md_size_sect;
- la_size_sect = mdev->ldev->md.la_size_sect;
+ prev_first_sect = drbd_md_first_sector(device->ldev);
+ prev_size = device->ldev->md.md_size_sect;
+ la_size_sect = device->ldev->md.la_size_sect;
if (rs) {
/* rs is non NULL if we should change the AL layout only */
@@ -876,18 +898,18 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
}
- drbd_md_set_sector_offsets(mdev, mdev->ldev);
+ drbd_md_set_sector_offsets(device, device->ldev);
rcu_read_lock();
- u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
- size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
+ size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
if (size < la_size_sect) {
if (rs && u_size == 0) {
/* Remove "rs &&" later. This check should always be active, but
right now the receiver expects the permissive behavior */
- dev_warn(DEV, "Implicit shrink not allowed. "
+ drbd_warn(device, "Implicit shrink not allowed. "
"Use --size=%llus for explicit shrink.\n",
(unsigned long long)size);
rv = DS_ERROR_SHRINK;
@@ -898,60 +920,60 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
goto err_out;
}
- if (drbd_get_capacity(mdev->this_bdev) != size ||
- drbd_bm_capacity(mdev) != size) {
+ if (drbd_get_capacity(device->this_bdev) != size ||
+ drbd_bm_capacity(device) != size) {
int err;
- err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
+ err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
if (unlikely(err)) {
/* currently there is only one error: ENOMEM! */
- size = drbd_bm_capacity(mdev)>>1;
+ size = drbd_bm_capacity(device)>>1;
if (size == 0) {
- dev_err(DEV, "OUT OF MEMORY! "
+ drbd_err(device, "OUT OF MEMORY! "
"Could not allocate bitmap!\n");
} else {
- dev_err(DEV, "BM resizing failed. "
+ drbd_err(device, "BM resizing failed. "
"Leaving size unchanged at size = %lu KB\n",
(unsigned long)size);
}
rv = DS_ERROR;
}
/* racy, see comments above. */
- drbd_set_my_capacity(mdev, size);
- mdev->ldev->md.la_size_sect = size;
- dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
+ drbd_set_my_capacity(device, size);
+ device->ldev->md.la_size_sect = size;
+ drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
(unsigned long long)size>>1);
}
if (rv <= DS_ERROR)
goto err_out;
- la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
+ la_size_changed = (la_size_sect != device->ldev->md.la_size_sect);
- md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
- || prev_size != mdev->ldev->md.md_size_sect;
+ md_moved = prev_first_sect != drbd_md_first_sector(device->ldev)
+ || prev_size != device->ldev->md.md_size_sect;
if (la_size_changed || md_moved || rs) {
u32 prev_flags;
- drbd_al_shrink(mdev); /* All extents inactive. */
+ drbd_al_shrink(device); /* All extents inactive. */
prev_flags = md->flags;
md->flags &= ~MDF_PRIMARY_IND;
- drbd_md_write(mdev, buffer);
+ drbd_md_write(device, buffer);
- dev_info(DEV, "Writing the whole bitmap, %s\n",
+ drbd_info(device, "Writing the whole bitmap, %s\n",
la_size_changed && md_moved ? "size changed and md moved" :
la_size_changed ? "size changed" : "md moved");
/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
- drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
+ drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
"size changed", BM_LOCKED_MASK);
- drbd_initialize_al(mdev, buffer);
+ drbd_initialize_al(device, buffer);
md->flags = prev_flags;
- drbd_md_write(mdev, buffer);
+ drbd_md_write(device, buffer);
if (rs)
- dev_info(DEV, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
- md->al_stripes, md->al_stripe_size_4k * 4);
+ drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
+ md->al_stripes, md->al_stripe_size_4k * 4);
}
if (size > la_size_sect)
@@ -966,30 +988,30 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
md->al_stripe_size_4k = prev_al_stripe_size_4k;
md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k;
- drbd_md_set_sector_offsets(mdev, mdev->ldev);
+ drbd_md_set_sector_offsets(device, device->ldev);
}
}
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
- drbd_md_put_buffer(mdev);
- drbd_resume_io(mdev);
+ lc_unlock(device->act_log);
+ wake_up(&device->al_wait);
+ drbd_md_put_buffer(device);
+ drbd_resume_io(device);
return rv;
}
sector_t
-drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
sector_t u_size, int assume_peer_has_space)
{
- sector_t p_size = mdev->p_size; /* partner's disk size. */
+ sector_t p_size = device->p_size; /* partner's disk size. */
sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
sector_t m_size; /* my size */
sector_t size = 0;
m_size = drbd_get_max_capacity(bdev);
- if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
- dev_warn(DEV, "Resize while not connected was forced by the user!\n");
+ if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
+ drbd_warn(device, "Resize while not connected was forced by the user!\n");
p_size = m_size;
}
@@ -1011,11 +1033,11 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
}
if (size == 0)
- dev_err(DEV, "Both nodes diskless!\n");
+ drbd_err(device, "Both nodes diskless!\n");
if (u_size) {
if (u_size > size)
- dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
+ drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
(unsigned long)u_size>>1, (unsigned long)size>>1);
else
size = u_size;
@@ -1026,71 +1048,71 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
/**
* drbd_check_al_size() - Ensures that the AL is of the right size
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
* failed, and 0 on success. You should call drbd_md_sync() after you called
* this function.
*/
-static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
+static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
{
struct lru_cache *n, *t;
struct lc_element *e;
unsigned int in_use;
int i;
- if (mdev->act_log &&
- mdev->act_log->nr_elements == dc->al_extents)
+ if (device->act_log &&
+ device->act_log->nr_elements == dc->al_extents)
return 0;
in_use = 0;
- t = mdev->act_log;
+ t = device->act_log;
n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
dc->al_extents, sizeof(struct lc_element), 0);
if (n == NULL) {
- dev_err(DEV, "Cannot allocate act_log lru!\n");
+ drbd_err(device, "Cannot allocate act_log lru!\n");
return -ENOMEM;
}
- spin_lock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
if (t) {
for (i = 0; i < t->nr_elements; i++) {
e = lc_element_by_index(t, i);
if (e->refcnt)
- dev_err(DEV, "refcnt(%d)==%d\n",
+ drbd_err(device, "refcnt(%d)==%d\n",
e->lc_number, e->refcnt);
in_use += e->refcnt;
}
}
if (!in_use)
- mdev->act_log = n;
- spin_unlock_irq(&mdev->al_lock);
+ device->act_log = n;
+ spin_unlock_irq(&device->al_lock);
if (in_use) {
- dev_err(DEV, "Activity log still in use!\n");
+ drbd_err(device, "Activity log still in use!\n");
lc_destroy(n);
return -EBUSY;
} else {
if (t)
lc_destroy(t);
}
- drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
+ drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
return 0;
}
-static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
+static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_bio_size)
{
- struct request_queue * const q = mdev->rq_queue;
+ struct request_queue * const q = device->rq_queue;
unsigned int max_hw_sectors = max_bio_size >> 9;
unsigned int max_segments = 0;
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue;
max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
rcu_read_lock();
- max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
+ max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
rcu_read_unlock();
- put_ldev(mdev);
+ put_ldev(device);
}
blk_queue_logical_block_size(q, 512);
@@ -1099,46 +1121,46 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue;
blk_queue_stack_limits(q, b);
if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
- dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
+ drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
q->backing_dev_info.ra_pages,
b->backing_dev_info.ra_pages);
q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
}
- put_ldev(mdev);
+ put_ldev(device);
}
}
-void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
+void drbd_reconsider_max_bio_size(struct drbd_device *device)
{
unsigned int now, new, local, peer;
- now = queue_max_hw_sectors(mdev->rq_queue) << 9;
- local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
- peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
+ now = queue_max_hw_sectors(device->rq_queue) << 9;
+ local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
+ peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
- if (get_ldev_if_state(mdev, D_ATTACHING)) {
- local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
- mdev->local_max_bio_size = local;
- put_ldev(mdev);
+ if (get_ldev_if_state(device, D_ATTACHING)) {
+ local = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
+ device->local_max_bio_size = local;
+ put_ldev(device);
}
local = min(local, DRBD_MAX_BIO_SIZE);
/* We may ignore peer limits if the peer is modern enough.
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
- if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
- if (mdev->tconn->agreed_pro_version < 94)
- peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ if (device->state.conn >= C_WF_REPORT_PARAMS) {
+ if (first_peer_device(device)->connection->agreed_pro_version < 94)
+ peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
- else if (mdev->tconn->agreed_pro_version == 94)
+ else if (first_peer_device(device)->connection->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
- else if (mdev->tconn->agreed_pro_version < 100)
+ else if (first_peer_device(device)->connection->agreed_pro_version < 100)
peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
else
peer = DRBD_MAX_BIO_SIZE;
@@ -1146,57 +1168,57 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
new = min(local, peer);
- if (mdev->state.role == R_PRIMARY && new < now)
- dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
+ if (device->state.role == R_PRIMARY && new < now)
+ drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
if (new != now)
- dev_info(DEV, "max BIO size = %u\n", new);
+ drbd_info(device, "max BIO size = %u\n", new);
- drbd_setup_queue_param(mdev, new);
+ drbd_setup_queue_param(device, new);
}
/* Starts the worker thread */
-static void conn_reconfig_start(struct drbd_tconn *tconn)
+static void conn_reconfig_start(struct drbd_connection *connection)
{
- drbd_thread_start(&tconn->worker);
- conn_flush_workqueue(tconn);
+ drbd_thread_start(&connection->worker);
+ drbd_flush_workqueue(&connection->sender_work);
}
/* if still unconfigured, stops worker again. */
-static void conn_reconfig_done(struct drbd_tconn *tconn)
+static void conn_reconfig_done(struct drbd_connection *connection)
{
bool stop_threads;
- spin_lock_irq(&tconn->req_lock);
- stop_threads = conn_all_vols_unconf(tconn) &&
- tconn->cstate == C_STANDALONE;
- spin_unlock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
+ stop_threads = conn_all_vols_unconf(connection) &&
+ connection->cstate == C_STANDALONE;
+ spin_unlock_irq(&connection->resource->req_lock);
if (stop_threads) {
/* asender is implicitly stopped by receiver
* in conn_disconnect() */
- drbd_thread_stop(&tconn->receiver);
- drbd_thread_stop(&tconn->worker);
+ drbd_thread_stop(&connection->receiver);
+ drbd_thread_stop(&connection->worker);
}
}
/* Make sure IO is suspended before calling this function(). */
-static void drbd_suspend_al(struct drbd_conf *mdev)
+static void drbd_suspend_al(struct drbd_device *device)
{
int s = 0;
- if (!lc_try_lock(mdev->act_log)) {
- dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
+ if (!lc_try_lock(device->act_log)) {
+ drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
return;
}
- drbd_al_shrink(mdev);
- spin_lock_irq(&mdev->tconn->req_lock);
- if (mdev->state.conn < C_CONNECTED)
- s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
- spin_unlock_irq(&mdev->tconn->req_lock);
- lc_unlock(mdev->act_log);
+ drbd_al_shrink(device);
+ spin_lock_irq(&device->resource->req_lock);
+ if (device->state.conn < C_CONNECTED)
+ s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
+ spin_unlock_irq(&device->resource->req_lock);
+ lc_unlock(device->act_log);
if (s)
- dev_info(DEV, "Suspended AL updates\n");
+ drbd_info(device, "Suspended AL updates\n");
}
@@ -1237,7 +1259,7 @@ static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
- struct drbd_conf *mdev;
+ struct drbd_device *device;
struct disk_conf *new_disk_conf, *old_disk_conf;
struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
int err, fifo_size;
@@ -1248,11 +1270,11 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- mdev = adm_ctx.mdev;
+ device = adm_ctx.device;
/* we also need a disk
* to change the options on */
- if (!get_ldev(mdev)) {
+ if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto out;
}
@@ -1263,8 +1285,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
- mutex_lock(&mdev->tconn->conf_update);
- old_disk_conf = mdev->ldev->disk_conf;
+ mutex_lock(&device->resource->conf_update);
+ old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
if (should_set_defaults(info))
set_disk_conf_defaults(new_disk_conf);
@@ -1273,6 +1295,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto fail_unlock;
}
if (!expect(new_disk_conf->resync_rate >= 1))
@@ -1280,29 +1303,29 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
- if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev))
- new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev);
+ if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev))
+ new_disk_conf->al_extents = drbd_al_extents_max(device->ldev);
if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
- if (fifo_size != mdev->rs_plan_s->size) {
+ if (fifo_size != device->rs_plan_s->size) {
new_plan = fifo_alloc(fifo_size);
if (!new_plan) {
- dev_err(DEV, "kmalloc of fifo_buffer failed");
+ drbd_err(device, "kmalloc of fifo_buffer failed");
retcode = ERR_NOMEM;
goto fail_unlock;
}
}
- drbd_suspend_io(mdev);
- wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
- drbd_al_shrink(mdev);
- err = drbd_check_al_size(mdev, new_disk_conf);
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
- drbd_resume_io(mdev);
+ drbd_suspend_io(device);
+ wait_event(device->al_wait, lc_try_lock(device->act_log));
+ drbd_al_shrink(device);
+ err = drbd_check_al_size(device, new_disk_conf);
+ lc_unlock(device->act_log);
+ wake_up(&device->al_wait);
+ drbd_resume_io(device);
if (err) {
retcode = ERR_NOMEM;
@@ -1310,10 +1333,10 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
}
write_lock_irq(&global_state_lock);
- retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+ retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
if (retcode == NO_ERROR) {
- rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
- drbd_resync_after_changed(mdev);
+ rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
+ drbd_resync_after_changed(device);
}
write_unlock_irq(&global_state_lock);
@@ -1321,42 +1344,46 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
goto fail_unlock;
if (new_plan) {
- old_plan = mdev->rs_plan_s;
- rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+ old_plan = device->rs_plan_s;
+ rcu_assign_pointer(device->rs_plan_s, new_plan);
}
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&device->resource->conf_update);
if (new_disk_conf->al_updates)
- mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ device->ldev->md.flags &= ~MDF_AL_DISABLED;
else
- mdev->ldev->md.flags |= MDF_AL_DISABLED;
+ device->ldev->md.flags |= MDF_AL_DISABLED;
if (new_disk_conf->md_flushes)
- clear_bit(MD_NO_FUA, &mdev->flags);
+ clear_bit(MD_NO_FUA, &device->flags);
else
- set_bit(MD_NO_FUA, &mdev->flags);
+ set_bit(MD_NO_FUA, &device->flags);
- drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+ drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush);
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
- if (mdev->state.conn >= C_CONNECTED)
- drbd_send_sync_param(mdev);
+ if (device->state.conn >= C_CONNECTED) {
+ struct drbd_peer_device *peer_device;
+
+ for_each_peer_device(peer_device, device)
+ drbd_send_sync_param(peer_device);
+ }
synchronize_rcu();
kfree(old_disk_conf);
kfree(old_plan);
- mod_timer(&mdev->request_timer, jiffies + HZ);
+ mod_timer(&device->request_timer, jiffies + HZ);
goto success;
fail_unlock:
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&device->resource->conf_update);
fail:
kfree(new_disk_conf);
kfree(new_plan);
success:
- put_ldev(mdev);
+ put_ldev(device);
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -1364,7 +1391,7 @@ success:
int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
int err;
enum drbd_ret_code retcode;
enum determine_dev_size dd;
@@ -1385,11 +1412,11 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto finish;
- mdev = adm_ctx.mdev;
- conn_reconfig_start(mdev->tconn);
+ device = adm_ctx.device;
+ conn_reconfig_start(first_peer_device(device)->connection);
/* if you want to reconfigure, please tear down first */
- if (mdev->state.disk > D_DISKLESS) {
+ if (device->state.disk > D_DISKLESS) {
retcode = ERR_DISK_CONFIGURED;
goto fail;
}
@@ -1397,17 +1424,17 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
* drbd_ldev_destroy is done already, we may end up here very fast,
* e.g. if someone calls attach from the on-io-error handler,
* to realize a "hot spare" feature (not that I'd recommend that) */
- wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+ wait_event(device->misc_wait, !atomic_read(&device->local_cnt));
/* make sure there is no leftover from previous force-detach attempts */
- clear_bit(FORCE_DETACH, &mdev->flags);
- clear_bit(WAS_IO_ERROR, &mdev->flags);
- clear_bit(WAS_READ_ERROR, &mdev->flags);
+ clear_bit(FORCE_DETACH, &device->flags);
+ clear_bit(WAS_IO_ERROR, &device->flags);
+ clear_bit(WAS_READ_ERROR, &device->flags);
/* and no leftover from previously aborted resync or verify, either */
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
+ device->rs_total = 0;
+ device->rs_failed = 0;
+ atomic_set(&device->rs_pending_cnt, 0);
/* allocation not in the IO path, drbdsetup context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
@@ -1447,13 +1474,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
}
write_lock_irq(&global_state_lock);
- retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+ retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
write_unlock_irq(&global_state_lock);
if (retcode != NO_ERROR)
goto fail;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (nc) {
if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
rcu_read_unlock();
@@ -1464,9 +1491,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
rcu_read_unlock();
bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
- FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL, device);
if (IS_ERR(bdev)) {
- dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
+ drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
PTR_ERR(bdev));
retcode = ERR_OPEN_DISK;
goto fail;
@@ -1484,9 +1511,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL,
(new_disk_conf->meta_dev_idx < 0) ?
- (void *)mdev : (void *)drbd_m_holder);
+ (void *)device : (void *)drbd_m_holder);
if (IS_ERR(bdev)) {
- dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
+ drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
PTR_ERR(bdev));
retcode = ERR_OPEN_MD_DISK;
goto fail;
@@ -1510,7 +1537,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
/* Read our meta data super block early.
* This also sets other on-disk offsets. */
- retcode = drbd_md_read(mdev, nbc);
+ retcode = drbd_md_read(device, nbc);
if (retcode != NO_ERROR)
goto fail;
@@ -1520,7 +1547,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
new_disk_conf->al_extents = drbd_al_extents_max(nbc);
if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
- dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
+ drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
(unsigned long long) drbd_get_max_capacity(nbc),
(unsigned long long) new_disk_conf->disk_size);
retcode = ERR_DISK_TOO_SMALL;
@@ -1538,7 +1565,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
retcode = ERR_MD_DISK_TOO_SMALL;
- dev_warn(DEV, "refusing attach: md-device too small, "
+ drbd_warn(device, "refusing attach: md-device too small, "
"at least %llu sectors needed for this meta-disk type\n",
(unsigned long long) min_md_device_sectors);
goto fail;
@@ -1547,7 +1574,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
/* Make sure the new disk is big enough
* (we may currently be R_PRIMARY with no local disk...) */
if (drbd_get_max_capacity(nbc) <
- drbd_get_capacity(mdev->this_bdev)) {
+ drbd_get_capacity(device->this_bdev)) {
retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -1555,15 +1582,15 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
if (nbc->known_size > max_possible_sectors) {
- dev_warn(DEV, "==> truncating very big lower level device "
+ drbd_warn(device, "==> truncating very big lower level device "
"to currently maximum possible %llu sectors <==\n",
(unsigned long long) max_possible_sectors);
if (new_disk_conf->meta_dev_idx >= 0)
- dev_warn(DEV, "==>> using internal or flexible "
+ drbd_warn(device, "==>> using internal or flexible "
"meta data may help <<==\n");
}
- drbd_suspend_io(mdev);
+ drbd_suspend_io(device);
/* also wait for the last barrier ack. */
/* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
* We need a way to either ignore barrier acks for barriers sent before a device
@@ -1571,45 +1598,45 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
* As barriers are counted per resource,
* we'd need to suspend io on all devices of a resource.
*/
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
+ wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
/* and for any other previously queued work */
- drbd_flush_workqueue(mdev);
+ drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
- rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
+ rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
retcode = rv; /* FIXME: Type mismatch. */
- drbd_resume_io(mdev);
+ drbd_resume_io(device);
if (rv < SS_SUCCESS)
goto fail;
- if (!get_ldev_if_state(mdev, D_ATTACHING))
+ if (!get_ldev_if_state(device, D_ATTACHING))
goto force_diskless;
- if (!mdev->bitmap) {
- if (drbd_bm_init(mdev)) {
+ if (!device->bitmap) {
+ if (drbd_bm_init(device)) {
retcode = ERR_NOMEM;
goto force_diskless_dec;
}
}
- if (mdev->state.conn < C_CONNECTED &&
- mdev->state.role == R_PRIMARY &&
- (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
- dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
- (unsigned long long)mdev->ed_uuid);
+ if (device->state.conn < C_CONNECTED &&
+ device->state.role == R_PRIMARY &&
+ (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
+ drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
+ (unsigned long long)device->ed_uuid);
retcode = ERR_DATA_NOT_CURRENT;
goto force_diskless_dec;
}
/* Since we are diskless, fix the activity log first... */
- if (drbd_check_al_size(mdev, new_disk_conf)) {
+ if (drbd_check_al_size(device, new_disk_conf)) {
retcode = ERR_NOMEM;
goto force_diskless_dec;
}
/* Prevent shrinking of consistent devices ! */
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
- drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
- dev_warn(DEV, "refusing to truncate a consistent device\n");
+ drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
+ drbd_warn(device, "refusing to truncate a consistent device\n");
retcode = ERR_DISK_TOO_SMALL;
goto force_diskless_dec;
}
@@ -1617,40 +1644,40 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
if (new_disk_conf->md_flushes)
- clear_bit(MD_NO_FUA, &mdev->flags);
+ clear_bit(MD_NO_FUA, &device->flags);
else
- set_bit(MD_NO_FUA, &mdev->flags);
+ set_bit(MD_NO_FUA, &device->flags);
/* Point of no return reached.
* Devices and memory are no longer released by error cleanup below.
- * now mdev takes over responsibility, and the state engine should
+ * now device takes over responsibility, and the state engine should
* clean it up somewhere. */
- D_ASSERT(mdev->ldev == NULL);
- mdev->ldev = nbc;
- mdev->resync = resync_lru;
- mdev->rs_plan_s = new_plan;
+ D_ASSERT(device, device->ldev == NULL);
+ device->ldev = nbc;
+ device->resync = resync_lru;
+ device->rs_plan_s = new_plan;
nbc = NULL;
resync_lru = NULL;
new_disk_conf = NULL;
new_plan = NULL;
- drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+ drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush);
- if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
- set_bit(CRASHED_PRIMARY, &mdev->flags);
+ if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
+ set_bit(CRASHED_PRIMARY, &device->flags);
else
- clear_bit(CRASHED_PRIMARY, &mdev->flags);
+ clear_bit(CRASHED_PRIMARY, &device->flags);
- if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
- !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
- set_bit(CRASHED_PRIMARY, &mdev->flags);
+ if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
+ !(device->state.role == R_PRIMARY && device->resource->susp_nod))
+ set_bit(CRASHED_PRIMARY, &device->flags);
- mdev->send_cnt = 0;
- mdev->recv_cnt = 0;
- mdev->read_cnt = 0;
- mdev->writ_cnt = 0;
+ device->send_cnt = 0;
+ device->recv_cnt = 0;
+ device->read_cnt = 0;
+ device->writ_cnt = 0;
- drbd_reconsider_max_bio_size(mdev);
+ drbd_reconsider_max_bio_size(device);
/* If I am currently not R_PRIMARY,
* but meta data primary indicator is set,
@@ -1666,50 +1693,50 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
* so we can automatically recover from a crash of a
* degraded but active "cluster" after a certain timeout.
*/
- clear_bit(USE_DEGR_WFC_T, &mdev->flags);
- if (mdev->state.role != R_PRIMARY &&
- drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
- !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
- set_bit(USE_DEGR_WFC_T, &mdev->flags);
+ clear_bit(USE_DEGR_WFC_T, &device->flags);
+ if (device->state.role != R_PRIMARY &&
+ drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
+ !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
+ set_bit(USE_DEGR_WFC_T, &device->flags);
- dd = drbd_determine_dev_size(mdev, 0, NULL);
+ dd = drbd_determine_dev_size(device, 0, NULL);
if (dd <= DS_ERROR) {
retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec;
} else if (dd == DS_GREW)
- set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+ set_bit(RESYNC_AFTER_NEG, &device->flags);
- if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
- (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
- drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
- dev_info(DEV, "Assuming that all blocks are out of sync "
+ if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
+ (test_bit(CRASHED_PRIMARY, &device->flags) &&
+ drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
+ drbd_info(device, "Assuming that all blocks are out of sync "
"(aka FullSync)\n");
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
+ if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
"set_n_write from attaching", BM_LOCKED_MASK)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
} else {
- if (drbd_bitmap_io(mdev, &drbd_bm_read,
+ if (drbd_bitmap_io(device, &drbd_bm_read,
"read from attaching", BM_LOCKED_MASK)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
}
- if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
- drbd_suspend_al(mdev); /* IO is still suspended here... */
+ if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
+ drbd_suspend_al(device); /* IO is still suspended here... */
- spin_lock_irq(&mdev->tconn->req_lock);
- os = drbd_read_state(mdev);
+ spin_lock_irq(&device->resource->req_lock);
+ os = drbd_read_state(device);
ns = os;
/* If MDF_CONSISTENT is not set go into inconsistent state,
otherwise investigate MDF_WasUpToDate...
If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
otherwise into D_CONSISTENT state.
*/
- if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
- if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
+ if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
+ if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
ns.disk = D_CONSISTENT;
else
ns.disk = D_OUTDATED;
@@ -1717,12 +1744,12 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
ns.disk = D_INCONSISTENT;
}
- if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
+ if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
ns.pdsk = D_OUTDATED;
rcu_read_lock();
if (ns.disk == D_CONSISTENT &&
- (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
+ (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
ns.disk = D_UP_TO_DATE;
/* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
@@ -1730,56 +1757,56 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
this point, because drbd_request_state() modifies these
flags. */
- if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
- mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ if (rcu_dereference(device->ldev->disk_conf)->al_updates)
+ device->ldev->md.flags &= ~MDF_AL_DISABLED;
else
- mdev->ldev->md.flags |= MDF_AL_DISABLED;
+ device->ldev->md.flags |= MDF_AL_DISABLED;
rcu_read_unlock();
/* In case we are C_CONNECTED postpone any decision on the new disk
state after the negotiation phase. */
- if (mdev->state.conn == C_CONNECTED) {
- mdev->new_state_tmp.i = ns.i;
+ if (device->state.conn == C_CONNECTED) {
+ device->new_state_tmp.i = ns.i;
ns.i = os.i;
ns.disk = D_NEGOTIATING;
/* We expect to receive up-to-date UUIDs soon.
To avoid a race in receive_state, free p_uuid while
holding req_lock. I.e. atomic with the state change */
- kfree(mdev->p_uuid);
- mdev->p_uuid = NULL;
+ kfree(device->p_uuid);
+ device->p_uuid = NULL;
}
- rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
+ spin_unlock_irq(&device->resource->req_lock);
if (rv < SS_SUCCESS)
goto force_diskless_dec;
- mod_timer(&mdev->request_timer, jiffies + HZ);
+ mod_timer(&device->request_timer, jiffies + HZ);
- if (mdev->state.role == R_PRIMARY)
- mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
+ if (device->state.role == R_PRIMARY)
+ device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
else
- mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
+ device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
- drbd_md_mark_dirty(mdev);
- drbd_md_sync(mdev);
+ drbd_md_mark_dirty(device);
+ drbd_md_sync(device);
- kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
- put_ldev(mdev);
- conn_reconfig_done(mdev->tconn);
+ kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
+ put_ldev(device);
+ conn_reconfig_done(first_peer_device(device)->connection);
drbd_adm_finish(info, retcode);
return 0;
force_diskless_dec:
- put_ldev(mdev);
+ put_ldev(device);
force_diskless:
- drbd_force_state(mdev, NS(disk, D_DISKLESS));
- drbd_md_sync(mdev);
+ drbd_force_state(device, NS(disk, D_DISKLESS));
+ drbd_md_sync(device);
fail:
- conn_reconfig_done(mdev->tconn);
+ conn_reconfig_done(first_peer_device(device)->connection);
if (nbc) {
if (nbc->backing_bdev)
blkdev_put(nbc->backing_bdev,
@@ -1798,26 +1825,26 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static int adm_detach(struct drbd_conf *mdev, int force)
+static int adm_detach(struct drbd_device *device, int force)
{
enum drbd_state_rv retcode;
int ret;
if (force) {
- set_bit(FORCE_DETACH, &mdev->flags);
- drbd_force_state(mdev, NS(disk, D_FAILED));
+ set_bit(FORCE_DETACH, &device->flags);
+ drbd_force_state(device, NS(disk, D_FAILED));
retcode = SS_SUCCESS;
goto out;
}
- drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
- drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
- retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
- drbd_md_put_buffer(mdev);
+ drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
+ drbd_md_get_buffer(device); /* make sure there is no in-flight meta-data IO */
+ retcode = drbd_request_state(device, NS(disk, D_FAILED));
+ drbd_md_put_buffer(device);
/* D_FAILED will transition to DISKLESS. */
- ret = wait_event_interruptible(mdev->misc_wait,
- mdev->state.disk != D_FAILED);
- drbd_resume_io(mdev);
+ ret = wait_event_interruptible(device->misc_wait,
+ device->state.disk != D_FAILED);
+ drbd_resume_io(device);
if ((int)retcode == (int)SS_IS_DISKLESS)
retcode = SS_NOTHING_TO_DO;
if (ret)
@@ -1852,24 +1879,25 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
}
}
- retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
+ retcode = adm_detach(adm_ctx.device, parms.force_detach);
out:
drbd_adm_finish(info, retcode);
return 0;
}
-static bool conn_resync_running(struct drbd_tconn *tconn)
+static bool conn_resync_running(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
bool rv = false;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (mdev->state.conn == C_SYNC_SOURCE ||
- mdev->state.conn == C_SYNC_TARGET ||
- mdev->state.conn == C_PAUSED_SYNC_S ||
- mdev->state.conn == C_PAUSED_SYNC_T) {
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ if (device->state.conn == C_SYNC_SOURCE ||
+ device->state.conn == C_SYNC_TARGET ||
+ device->state.conn == C_PAUSED_SYNC_S ||
+ device->state.conn == C_PAUSED_SYNC_T) {
rv = true;
break;
}
@@ -1879,16 +1907,17 @@ static bool conn_resync_running(struct drbd_tconn *tconn)
return rv;
}
-static bool conn_ov_running(struct drbd_tconn *tconn)
+static bool conn_ov_running(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
bool rv = false;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (mdev->state.conn == C_VERIFY_S ||
- mdev->state.conn == C_VERIFY_T) {
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ if (device->state.conn == C_VERIFY_S ||
+ device->state.conn == C_VERIFY_T) {
rv = true;
break;
}
@@ -1899,63 +1928,65 @@ static bool conn_ov_running(struct drbd_tconn *tconn)
}
static enum drbd_ret_code
-_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
+_check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int i;
- if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
- if (new_conf->wire_protocol != old_conf->wire_protocol)
+ if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
+ if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
return ERR_NEED_APV_100;
- if (new_conf->two_primaries != old_conf->two_primaries)
+ if (new_net_conf->two_primaries != old_net_conf->two_primaries)
return ERR_NEED_APV_100;
- if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
+ if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
return ERR_NEED_APV_100;
}
- if (!new_conf->two_primaries &&
- conn_highest_role(tconn) == R_PRIMARY &&
- conn_highest_peer(tconn) == R_PRIMARY)
+ if (!new_net_conf->two_primaries &&
+ conn_highest_role(connection) == R_PRIMARY &&
+ conn_highest_peer(connection) == R_PRIMARY)
return ERR_NEED_ALLOW_TWO_PRI;
- if (new_conf->two_primaries &&
- (new_conf->wire_protocol != DRBD_PROT_C))
+ if (new_net_conf->two_primaries &&
+ (new_net_conf->wire_protocol != DRBD_PROT_C))
return ERR_NOT_PROTO_C;
- idr_for_each_entry(&tconn->volumes, mdev, i) {
- if (get_ldev(mdev)) {
- enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
- put_ldev(mdev);
- if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
+ idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ struct drbd_device *device = peer_device->device;
+ if (get_ldev(device)) {
+ enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
+ put_ldev(device);
+ if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
return ERR_STONITH_AND_PROT_A;
}
- if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
+ if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
return ERR_DISCARD_IMPOSSIBLE;
}
- if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
+ if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
return ERR_CONG_NOT_PROTO_A;
return NO_ERROR;
}
static enum drbd_ret_code
-check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
+check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
{
static enum drbd_ret_code rv;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int i;
rcu_read_lock();
- rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
+ rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
rcu_read_unlock();
- /* tconn->volumes protected by genl_lock() here */
- idr_for_each_entry(&tconn->volumes, mdev, i) {
- if (!mdev->bitmap) {
- if(drbd_bm_init(mdev))
+ /* connection->volumes protected by genl_lock() here */
+ idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ struct drbd_device *device = peer_device->device;
+ if (!device->bitmap) {
+ if (drbd_bm_init(device))
return ERR_NOMEM;
}
}
@@ -1986,26 +2017,26 @@ alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
}
static enum drbd_ret_code
-alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
+alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
{
char hmac_name[CRYPTO_MAX_ALG_NAME];
enum drbd_ret_code rv;
- rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
+ rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg,
ERR_CSUMS_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
+ rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg,
ERR_VERIFY_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
+ rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
ERR_INTEGRITY_ALG);
if (rv != NO_ERROR)
return rv;
- if (new_conf->cram_hmac_alg[0] != 0) {
+ if (new_net_conf->cram_hmac_alg[0] != 0) {
snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
- new_conf->cram_hmac_alg);
+ new_net_conf->cram_hmac_alg);
rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
ERR_AUTH_ALG);
@@ -2025,8 +2056,8 @@ static void free_crypto(struct crypto *crypto)
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
- struct drbd_tconn *tconn;
- struct net_conf *old_conf, *new_conf = NULL;
+ struct drbd_connection *connection;
+ struct net_conf *old_net_conf, *new_net_conf = NULL;
int err;
int ovr; /* online verify running */
int rsr; /* re-sync running */
@@ -2038,98 +2069,103 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- tconn = adm_ctx.tconn;
+ connection = adm_ctx.connection;
- new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
- if (!new_conf) {
+ new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
retcode = ERR_NOMEM;
goto out;
}
- conn_reconfig_start(tconn);
+ conn_reconfig_start(connection);
- mutex_lock(&tconn->data.mutex);
- mutex_lock(&tconn->conf_update);
- old_conf = tconn->net_conf;
+ mutex_lock(&connection->data.mutex);
+ mutex_lock(&connection->resource->conf_update);
+ old_net_conf = connection->net_conf;
- if (!old_conf) {
+ if (!old_net_conf) {
drbd_msg_put_info("net conf missing, try connect");
retcode = ERR_INVALID_REQUEST;
goto fail;
}
- *new_conf = *old_conf;
+ *new_net_conf = *old_net_conf;
if (should_set_defaults(info))
- set_net_conf_defaults(new_conf);
+ set_net_conf_defaults(new_net_conf);
- err = net_conf_from_attrs_for_change(new_conf, info);
+ err = net_conf_from_attrs_for_change(new_net_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- retcode = check_net_options(tconn, new_conf);
+ retcode = check_net_options(connection, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
/* re-sync running */
- rsr = conn_resync_running(tconn);
- if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
+ rsr = conn_resync_running(connection);
+ if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
retcode = ERR_CSUMS_RESYNC_RUNNING;
goto fail;
}
/* online verify running */
- ovr = conn_ov_running(tconn);
- if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
+ ovr = conn_ov_running(connection);
+ if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
retcode = ERR_VERIFY_RUNNING;
goto fail;
}
- retcode = alloc_crypto(&crypto, new_conf);
+ retcode = alloc_crypto(&crypto, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
- rcu_assign_pointer(tconn->net_conf, new_conf);
+ rcu_assign_pointer(connection->net_conf, new_net_conf);
if (!rsr) {
- crypto_free_hash(tconn->csums_tfm);
- tconn->csums_tfm = crypto.csums_tfm;
+ crypto_free_hash(connection->csums_tfm);
+ connection->csums_tfm = crypto.csums_tfm;
crypto.csums_tfm = NULL;
}
if (!ovr) {
- crypto_free_hash(tconn->verify_tfm);
- tconn->verify_tfm = crypto.verify_tfm;
+ crypto_free_hash(connection->verify_tfm);
+ connection->verify_tfm = crypto.verify_tfm;
crypto.verify_tfm = NULL;
}
- crypto_free_hash(tconn->integrity_tfm);
- tconn->integrity_tfm = crypto.integrity_tfm;
- if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
- /* Do this without trying to take tconn->data.mutex again. */
- __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
+ crypto_free_hash(connection->integrity_tfm);
+ connection->integrity_tfm = crypto.integrity_tfm;
+ if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
+ /* Do this without trying to take connection->data.mutex again. */
+ __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
- crypto_free_hash(tconn->cram_hmac_tfm);
- tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
+ crypto_free_hash(connection->cram_hmac_tfm);
+ connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
- mutex_unlock(&tconn->conf_update);
- mutex_unlock(&tconn->data.mutex);
+ mutex_unlock(&connection->resource->conf_update);
+ mutex_unlock(&connection->data.mutex);
synchronize_rcu();
- kfree(old_conf);
+ kfree(old_net_conf);
+
+ if (connection->cstate >= C_WF_REPORT_PARAMS) {
+ struct drbd_peer_device *peer_device;
+ int vnr;
- if (tconn->cstate >= C_WF_REPORT_PARAMS)
- drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ drbd_send_sync_param(peer_device);
+ }
goto done;
fail:
- mutex_unlock(&tconn->conf_update);
- mutex_unlock(&tconn->data.mutex);
+ mutex_unlock(&connection->resource->conf_update);
+ mutex_unlock(&connection->data.mutex);
free_crypto(&crypto);
- kfree(new_conf);
+ kfree(new_net_conf);
done:
- conn_reconfig_done(tconn);
+ conn_reconfig_done(connection);
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -2137,10 +2173,11 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
- struct net_conf *old_conf, *new_conf = NULL;
+ struct drbd_peer_device *peer_device;
+ struct net_conf *old_net_conf, *new_net_conf = NULL;
struct crypto crypto = { };
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
enum drbd_ret_code retcode;
int i;
int err;
@@ -2160,106 +2197,111 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
/* No need for _rcu here. All reconfiguration is
* strictly serialized on genl_lock(). We are protected against
* concurrent reconfiguration/addition/deletion */
- list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
- if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
- !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
- retcode = ERR_LOCAL_ADDR;
- goto out;
- }
+ for_each_resource(resource, &drbd_resources) {
+ for_each_connection(connection, resource) {
+ if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
+ !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
+ connection->my_addr_len)) {
+ retcode = ERR_LOCAL_ADDR;
+ goto out;
+ }
- if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
- !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
- retcode = ERR_PEER_ADDR;
- goto out;
+ if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
+ !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
+ connection->peer_addr_len)) {
+ retcode = ERR_PEER_ADDR;
+ goto out;
+ }
}
}
- tconn = adm_ctx.tconn;
- conn_reconfig_start(tconn);
+ connection = first_connection(adm_ctx.resource);
+ conn_reconfig_start(connection);
- if (tconn->cstate > C_STANDALONE) {
+ if (connection->cstate > C_STANDALONE) {
retcode = ERR_NET_CONFIGURED;
goto fail;
}
/* allocation not in the IO path, drbdsetup / netlink process context */
- new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
- if (!new_conf) {
+ new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
retcode = ERR_NOMEM;
goto fail;
}
- set_net_conf_defaults(new_conf);
+ set_net_conf_defaults(new_net_conf);
- err = net_conf_from_attrs(new_conf, info);
+ err = net_conf_from_attrs(new_net_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- retcode = check_net_options(tconn, new_conf);
+ retcode = check_net_options(connection, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
- retcode = alloc_crypto(&crypto, new_conf);
+ retcode = alloc_crypto(&crypto, new_net_conf);
if (retcode != NO_ERROR)
goto fail;
- ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
+ ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
- conn_flush_workqueue(tconn);
+ drbd_flush_workqueue(&connection->sender_work);
- mutex_lock(&tconn->conf_update);
- old_conf = tconn->net_conf;
- if (old_conf) {
+ mutex_lock(&adm_ctx.resource->conf_update);
+ old_net_conf = connection->net_conf;
+ if (old_net_conf) {
retcode = ERR_NET_CONFIGURED;
- mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&adm_ctx.resource->conf_update);
goto fail;
}
- rcu_assign_pointer(tconn->net_conf, new_conf);
+ rcu_assign_pointer(connection->net_conf, new_net_conf);
- conn_free_crypto(tconn);
- tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
- tconn->integrity_tfm = crypto.integrity_tfm;
- tconn->csums_tfm = crypto.csums_tfm;
- tconn->verify_tfm = crypto.verify_tfm;
+ conn_free_crypto(connection);
+ connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
+ connection->integrity_tfm = crypto.integrity_tfm;
+ connection->csums_tfm = crypto.csums_tfm;
+ connection->verify_tfm = crypto.verify_tfm;
- tconn->my_addr_len = nla_len(adm_ctx.my_addr);
- memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
- tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
- memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
+ connection->my_addr_len = nla_len(adm_ctx.my_addr);
+ memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
+ connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
+ memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
- mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&adm_ctx.resource->conf_update);
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, i) {
- mdev->send_cnt = 0;
- mdev->recv_cnt = 0;
+ idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ struct drbd_device *device = peer_device->device;
+ device->send_cnt = 0;
+ device->recv_cnt = 0;
}
rcu_read_unlock();
- retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+ retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
- conn_reconfig_done(tconn);
+ conn_reconfig_done(connection);
drbd_adm_finish(info, retcode);
return 0;
fail:
free_crypto(&crypto);
- kfree(new_conf);
+ kfree(new_net_conf);
- conn_reconfig_done(tconn);
+ conn_reconfig_done(connection);
out:
drbd_adm_finish(info, retcode);
return 0;
}
-static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
+static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
{
enum drbd_state_rv rv;
- rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+ rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
force ? CS_HARD : 0);
switch (rv) {
@@ -2269,18 +2311,18 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
return SS_SUCCESS;
case SS_PRIMARY_NOP:
/* Our state checking code wants to see the peer outdated. */
- rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
+ rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
- rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE);
+ rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
break;
case SS_CW_FAILED_BY_PEER:
/* The peer probably wants to see us outdated. */
- rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+ rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
disk, D_OUTDATED), 0);
if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
- rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+ rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
CS_HARD);
}
break;
@@ -2294,18 +2336,18 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
* The state handling only uses drbd_thread_stop_nowait(),
* we want to really wait here until the receiver is no more.
*/
- drbd_thread_stop(&adm_ctx.tconn->receiver);
+ drbd_thread_stop(&connection->receiver);
/* Race breaker. This additional state change request may be
* necessary, if this was a forced disconnect during a receiver
* restart. We may have "killed" the receiver thread just
- * after drbdd_init() returned. Typically, we should be
+ * after drbd_receiver() returned. Typically, we should be
* C_STANDALONE already, now, and this becomes a no-op.
*/
- rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
+ rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
CS_VERBOSE | CS_HARD);
if (rv2 < SS_SUCCESS)
- conn_err(tconn,
+ drbd_err(connection,
"unexpected rv2=%d in conn_try_disconnect()\n",
rv2);
}
@@ -2315,7 +2357,7 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
{
struct disconnect_parms parms;
- struct drbd_tconn *tconn;
+ struct drbd_connection *connection;
enum drbd_state_rv rv;
enum drbd_ret_code retcode;
int err;
@@ -2326,7 +2368,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto fail;
- tconn = adm_ctx.tconn;
+ connection = adm_ctx.connection;
memset(&parms, 0, sizeof(parms));
if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
err = disconnect_parms_from_attrs(&parms, info);
@@ -2337,7 +2379,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
}
}
- rv = conn_try_disconnect(tconn, parms.force_disconnect);
+ rv = conn_try_disconnect(connection, parms.force_disconnect);
if (rv < SS_SUCCESS)
retcode = rv; /* FIXME: Type mismatch. */
else
@@ -2347,27 +2389,27 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-void resync_after_online_grow(struct drbd_conf *mdev)
+void resync_after_online_grow(struct drbd_device *device)
{
int iass; /* I am sync source */
- dev_info(DEV, "Resync of new storage after online grow\n");
- if (mdev->state.role != mdev->state.peer)
- iass = (mdev->state.role == R_PRIMARY);
+ drbd_info(device, "Resync of new storage after online grow\n");
+ if (device->state.role != device->state.peer)
+ iass = (device->state.role == R_PRIMARY);
else
- iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
+ iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
if (iass)
- drbd_start_resync(mdev, C_SYNC_SOURCE);
+ drbd_start_resync(device, C_SYNC_SOURCE);
else
- _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
+ _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
}
int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
{
struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
struct resize_parms rs;
- struct drbd_conf *mdev;
+ struct drbd_device *device;
enum drbd_ret_code retcode;
enum determine_dev_size dd;
bool change_al_layout = false;
@@ -2381,15 +2423,15 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto fail;
- mdev = adm_ctx.mdev;
- if (!get_ldev(mdev)) {
+ device = adm_ctx.device;
+ if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto fail;
}
memset(&rs, 0, sizeof(struct resize_parms));
- rs.al_stripes = mdev->ldev->md.al_stripes;
- rs.al_stripe_size = mdev->ldev->md.al_stripe_size_4k * 4;
+ rs.al_stripes = device->ldev->md.al_stripes;
+ rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
err = resize_parms_from_attrs(&rs, info);
if (err) {
@@ -2399,24 +2441,24 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
}
}
- if (mdev->state.conn > C_CONNECTED) {
+ if (device->state.conn > C_CONNECTED) {
retcode = ERR_RESIZE_RESYNC;
goto fail_ldev;
}
- if (mdev->state.role == R_SECONDARY &&
- mdev->state.peer == R_SECONDARY) {
+ if (device->state.role == R_SECONDARY &&
+ device->state.peer == R_SECONDARY) {
retcode = ERR_NO_PRIMARY;
goto fail_ldev;
}
- if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
+ if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93;
goto fail_ldev;
}
rcu_read_lock();
- u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
if (u_size != (sector_t)rs.resize_size) {
new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
@@ -2426,8 +2468,8 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
}
}
- if (mdev->ldev->md.al_stripes != rs.al_stripes ||
- mdev->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
+ if (device->ldev->md.al_stripes != rs.al_stripes ||
+ device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
if (al_size_k > (16 * 1024 * 1024)) {
@@ -2440,7 +2482,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
goto fail_ldev;
}
- if (mdev->state.conn != C_CONNECTED) {
+ if (device->state.conn != C_CONNECTED) {
retcode = ERR_MD_LAYOUT_CONNECTED;
goto fail_ldev;
}
@@ -2448,24 +2490,24 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
change_al_layout = true;
}
- if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
- mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
+ if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
+ device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
if (new_disk_conf) {
- mutex_lock(&mdev->tconn->conf_update);
- old_disk_conf = mdev->ldev->disk_conf;
+ mutex_lock(&device->resource->conf_update);
+ old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
new_disk_conf->disk_size = (sector_t)rs.resize_size;
- rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
- mutex_unlock(&mdev->tconn->conf_update);
+ rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
+ mutex_unlock(&device->resource->conf_update);
synchronize_rcu();
kfree(old_disk_conf);
}
ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
- dd = drbd_determine_dev_size(mdev, ddsf, change_al_layout ? &rs : NULL);
- drbd_md_sync(mdev);
- put_ldev(mdev);
+ dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
+ drbd_md_sync(device);
+ put_ldev(device);
if (dd == DS_ERROR) {
retcode = ERR_NOMEM_BITMAP;
goto fail;
@@ -2477,12 +2519,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
- if (mdev->state.conn == C_CONNECTED) {
+ if (device->state.conn == C_CONNECTED) {
if (dd == DS_GREW)
- set_bit(RESIZE_PENDING, &mdev->flags);
+ set_bit(RESIZE_PENDING, &device->flags);
- drbd_send_uuids(mdev);
- drbd_send_sizes(mdev, 1, ddsf);
+ drbd_send_uuids(first_peer_device(device));
+ drbd_send_sizes(first_peer_device(device), 1, ddsf);
}
fail:
@@ -2490,14 +2532,13 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
return 0;
fail_ldev:
- put_ldev(mdev);
+ put_ldev(device);
goto fail;
}
int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
- struct drbd_tconn *tconn;
struct res_opts res_opts;
int err;
@@ -2506,9 +2547,8 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
return retcode;
if (retcode != NO_ERROR)
goto fail;
- tconn = adm_ctx.tconn;
- res_opts = tconn->res_opts;
+ res_opts = adm_ctx.resource->res_opts;
if (should_set_defaults(info))
set_res_opts_defaults(&res_opts);
@@ -2519,7 +2559,7 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
- err = set_resource_options(tconn, &res_opts);
+ err = set_resource_options(adm_ctx.resource, &res_opts);
if (err) {
retcode = ERR_INVALID_REQUEST;
if (err == -ENOMEM)
@@ -2533,7 +2573,7 @@ fail:
int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
@@ -2542,29 +2582,29 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- mdev = adm_ctx.mdev;
+ device = adm_ctx.device;
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
- drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
- drbd_flush_workqueue(mdev);
+ drbd_suspend_io(device);
+ wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
+ drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
/* If we happen to be C_STANDALONE R_SECONDARY, just change to
* D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
* try to start a resync handshake as sync target for full sync.
*/
- if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) {
- retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT));
+ if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
+ retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
if (retcode >= SS_SUCCESS) {
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
+ if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
"set_n_write from invalidate", BM_LOCKED_MASK))
retcode = ERR_IO_MD_DISK;
}
} else
- retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
- drbd_resume_io(mdev);
+ retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
+ drbd_resume_io(device);
out:
drbd_adm_finish(info, retcode);
@@ -2582,25 +2622,25 @@ static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *
if (retcode != NO_ERROR)
goto out;
- retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+ retcode = drbd_request_state(adm_ctx.device, mask, val);
out:
drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
+static int drbd_bmio_set_susp_al(struct drbd_device *device)
{
int rv;
- rv = drbd_bmio_set_n_write(mdev);
- drbd_suspend_al(mdev);
+ rv = drbd_bmio_set_n_write(device);
+ drbd_suspend_al(device);
return rv;
}
int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
{
int retcode; /* drbd_ret_code, drbd_state_rv */
- struct drbd_conf *mdev;
+ struct drbd_device *device;
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
@@ -2608,32 +2648,32 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- mdev = adm_ctx.mdev;
+ device = adm_ctx.device;
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
- drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
- drbd_flush_workqueue(mdev);
+ drbd_suspend_io(device);
+ wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
+ drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
/* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
* in the bitmap. Otherwise, try to start a resync handshake
* as sync source for full sync.
*/
- if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) {
+ if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
/* The peer will get a resync upon connect anyways. Just make that
into a full resync. */
- retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
+ retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
if (retcode >= SS_SUCCESS) {
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
+ if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
"set_n_write from invalidate_peer",
BM_LOCKED_SET_ALLOWED))
retcode = ERR_IO_MD_DISK;
}
} else
- retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
- drbd_resume_io(mdev);
+ retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
+ drbd_resume_io(device);
out:
drbd_adm_finish(info, retcode);
@@ -2650,7 +2690,7 @@ int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
+ if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
retcode = ERR_PAUSE_IS_SET;
out:
drbd_adm_finish(info, retcode);
@@ -2668,8 +2708,8 @@ int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
- s = adm_ctx.mdev->state;
+ if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
+ s = adm_ctx.device->state;
if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
@@ -2690,7 +2730,7 @@ int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
@@ -2699,20 +2739,20 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- mdev = adm_ctx.mdev;
- if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
- drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ device = adm_ctx.device;
+ if (test_bit(NEW_CUR_UUID, &device->flags)) {
+ drbd_uuid_new_current(device);
+ clear_bit(NEW_CUR_UUID, &device->flags);
}
- drbd_suspend_io(mdev);
- retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
+ drbd_suspend_io(device);
+ retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
if (retcode == SS_SUCCESS) {
- if (mdev->state.conn < C_CONNECTED)
- tl_clear(mdev->tconn);
- if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
- tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
+ if (device->state.conn < C_CONNECTED)
+ tl_clear(first_peer_device(device)->connection);
+ if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
+ tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
}
- drbd_resume_io(mdev);
+ drbd_resume_io(device);
out:
drbd_adm_finish(info, retcode);
@@ -2724,23 +2764,28 @@ int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
}
-int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
+static int nla_put_drbd_cfg_context(struct sk_buff *skb,
+ struct drbd_resource *resource,
+ struct drbd_connection *connection,
+ struct drbd_device *device)
{
struct nlattr *nla;
nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
if (!nla)
goto nla_put_failure;
- if (vnr != VOLUME_UNSPECIFIED &&
- nla_put_u32(skb, T_ctx_volume, vnr))
- goto nla_put_failure;
- if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
- goto nla_put_failure;
- if (tconn->my_addr_len &&
- nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
+ if (device &&
+ nla_put_u32(skb, T_ctx_volume, device->vnr))
goto nla_put_failure;
- if (tconn->peer_addr_len &&
- nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
+ if (nla_put_string(skb, T_ctx_resource_name, resource->name))
goto nla_put_failure;
+ if (connection) {
+ if (connection->my_addr_len &&
+ nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
+ goto nla_put_failure;
+ if (connection->peer_addr_len &&
+ nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
+ goto nla_put_failure;
+ }
nla_nest_end(skb, nla);
return 0;
@@ -2750,9 +2795,22 @@ nla_put_failure:
return -EMSGSIZE;
}
-int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
+/*
+ * Return the connection of @resource if @resource has exactly one connection.
+ */
+static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
+{
+ struct list_head *connections = &resource->connections;
+
+ if (list_empty(connections) || connections->next->next != connections)
+ return NULL;
+ return list_first_entry(&resource->connections, struct drbd_connection, connections);
+}
+
+int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
const struct sib_info *sib)
{
+ struct drbd_resource *resource = device->resource;
struct state_info *si = NULL; /* for sizeof(si->member); */
struct nlattr *nla;
int got_ldev;
@@ -2772,27 +2830,27 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
* always in the context of the receiving process */
exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
- got_ldev = get_ldev(mdev);
+ got_ldev = get_ldev(device);
/* We need to add connection name and volume number information still.
* Minor number is in drbd_genlmsghdr. */
- if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
+ if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
goto nla_put_failure;
- if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
+ if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
goto nla_put_failure;
rcu_read_lock();
if (got_ldev) {
struct disk_conf *disk_conf;
- disk_conf = rcu_dereference(mdev->ldev->disk_conf);
+ disk_conf = rcu_dereference(device->ldev->disk_conf);
err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
}
if (!err) {
struct net_conf *nc;
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (nc)
err = net_conf_to_skb(skb, nc, exclude_sensitive);
}
@@ -2804,38 +2862,38 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
if (!nla)
goto nla_put_failure;
if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
- nla_put_u32(skb, T_current_state, mdev->state.i) ||
- nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
- nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
- nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
- nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
- nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
- nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
- nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
- nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
- nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
- nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
- nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
+ nla_put_u32(skb, T_current_state, device->state.i) ||
+ nla_put_u64(skb, T_ed_uuid, device->ed_uuid) ||
+ nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) ||
+ nla_put_u64(skb, T_send_cnt, device->send_cnt) ||
+ nla_put_u64(skb, T_recv_cnt, device->recv_cnt) ||
+ nla_put_u64(skb, T_read_cnt, device->read_cnt) ||
+ nla_put_u64(skb, T_writ_cnt, device->writ_cnt) ||
+ nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) ||
+ nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
+ nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
+ nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
+ nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
goto nla_put_failure;
if (got_ldev) {
int err;
- spin_lock_irq(&mdev->ldev->md.uuid_lock);
- err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
- spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+ spin_lock_irq(&device->ldev->md.uuid_lock);
+ err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
+ spin_unlock_irq(&device->ldev->md.uuid_lock);
if (err)
goto nla_put_failure;
- if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
- nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
- nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
+ if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
+ nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) ||
+ nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device)))
goto nla_put_failure;
- if (C_SYNC_SOURCE <= mdev->state.conn &&
- C_PAUSED_SYNC_T >= mdev->state.conn) {
- if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
- nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
+ if (C_SYNC_SOURCE <= device->state.conn &&
+ C_PAUSED_SYNC_T >= device->state.conn) {
+ if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) ||
+ nla_put_u64(skb, T_bits_rs_failed, device->rs_failed))
goto nla_put_failure;
}
}
@@ -2867,7 +2925,7 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
nla_put_failure:
err = -EMSGSIZE;
if (got_ldev)
- put_ldev(mdev);
+ put_ldev(device);
return err;
}
@@ -2882,7 +2940,7 @@ int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
+ err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
if (err) {
nlmsg_free(adm_ctx.reply_skb);
return err;
@@ -2892,22 +2950,23 @@ out:
return 0;
}
-int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
+static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
struct drbd_genlmsghdr *dh;
- struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
- struct drbd_tconn *tconn = NULL;
- struct drbd_tconn *tmp;
+ struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
+ struct drbd_resource *resource = NULL;
+ struct drbd_resource *tmp;
unsigned volume = cb->args[1];
/* Open coded, deferred, iteration:
- * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
- * idr_for_each_entry(&tconn->volumes, mdev, i) {
+ * for_each_resource_safe(resource, tmp, &drbd_resources) {
+ * connection = "first connection of resource or undefined";
+ * idr_for_each_entry(&resource->devices, device, i) {
* ...
* }
* }
- * where tconn is cb->args[0];
+ * where resource is cb->args[0];
* and i is cb->args[1];
*
* cb->args[2] indicates if we shall loop over all resources,
@@ -2916,44 +2975,44 @@ int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
* This may miss entries inserted after this dump started,
* or entries deleted before they are reached.
*
- * We need to make sure the mdev won't disappear while
+ * We need to make sure the device won't disappear while
* we are looking at it, and revalidate our iterators
* on each iteration.
*/
- /* synchronize with conn_create()/conn_destroy() */
+ /* synchronize with conn_create()/drbd_destroy_connection() */
rcu_read_lock();
/* revalidate iterator position */
- list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
+ for_each_resource_rcu(tmp, &drbd_resources) {
if (pos == NULL) {
/* first iteration */
pos = tmp;
- tconn = pos;
+ resource = pos;
break;
}
if (tmp == pos) {
- tconn = pos;
+ resource = pos;
break;
}
}
- if (tconn) {
-next_tconn:
- mdev = idr_get_next(&tconn->volumes, &volume);
- if (!mdev) {
- /* No more volumes to dump on this tconn.
- * Advance tconn iterator. */
- pos = list_entry_rcu(tconn->all_tconn.next,
- struct drbd_tconn, all_tconn);
- /* Did we dump any volume on this tconn yet? */
+ if (resource) {
+next_resource:
+ device = idr_get_next(&resource->devices, &volume);
+ if (!device) {
+ /* No more volumes to dump on this resource.
+ * Advance resource iterator. */
+ pos = list_entry_rcu(resource->resources.next,
+ struct drbd_resource, resources);
+ /* Did we dump any volume of this resource yet? */
if (volume != 0) {
/* If we reached the end of the list,
* or only a single resource dump was requested,
* we are done. */
- if (&pos->all_tconn == &drbd_tconns || cb->args[2])
+ if (&pos->resources == &drbd_resources || cb->args[2])
goto out;
volume = 0;
- tconn = pos;
- goto next_tconn;
+ resource = pos;
+ goto next_resource;
}
}
@@ -2963,43 +3022,49 @@ next_tconn:
if (!dh)
goto out;
- if (!mdev) {
- /* This is a tconn without a single volume.
+ if (!device) {
+ /* This is a connection without a single volume.
* Suprisingly enough, it may have a network
* configuration. */
- struct net_conf *nc;
+ struct drbd_connection *connection;
+
dh->minor = -1U;
dh->ret_code = NO_ERROR;
- if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
- goto cancel;
- nc = rcu_dereference(tconn->net_conf);
- if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+ connection = the_only_connection(resource);
+ if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
goto cancel;
+ if (connection) {
+ struct net_conf *nc;
+
+ nc = rcu_dereference(connection->net_conf);
+ if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+ goto cancel;
+ }
goto done;
}
- D_ASSERT(mdev->vnr == volume);
- D_ASSERT(mdev->tconn == tconn);
+ D_ASSERT(device, device->vnr == volume);
+ D_ASSERT(device, device->resource == resource);
- dh->minor = mdev_to_minor(mdev);
+ dh->minor = device_to_minor(device);
dh->ret_code = NO_ERROR;
- if (nla_put_status_info(skb, mdev, NULL)) {
+ if (nla_put_status_info(skb, device, NULL)) {
cancel:
genlmsg_cancel(skb, dh);
goto out;
}
done:
genlmsg_end(skb, dh);
- }
+ }
out:
rcu_read_unlock();
/* where to start the next iteration */
- cb->args[0] = (long)pos;
- cb->args[1] = (pos == tconn) ? volume + 1 : 0;
+ cb->args[0] = (long)pos;
+ cb->args[1] = (pos == resource) ? volume + 1 : 0;
- /* No more tconns/volumes/minors found results in an empty skb.
+ /* No more resources/volumes/minors found results in an empty skb.
* Which will terminate the dump. */
return skb->len;
}
@@ -3019,7 +3084,7 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
struct nlattr *nla;
const char *resource_name;
- struct drbd_tconn *tconn;
+ struct drbd_resource *resource;
int maxtype;
/* Is this a followup call? */
@@ -3048,18 +3113,19 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
if (!nla)
return -EINVAL;
resource_name = nla_data(nla);
- tconn = conn_get_by_name(resource_name);
-
- if (!tconn)
+ if (!*resource_name)
+ return -ENODEV;
+ resource = drbd_find_resource(resource_name);
+ if (!resource)
return -ENODEV;
- kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
+ kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
/* prime iterators, and set "filter" mode mark:
- * only dump this tconn. */
- cb->args[0] = (long)tconn;
+ * only dump this connection. */
+ cb->args[0] = (long)resource;
/* cb->args[1] = 0; passed in this way. */
- cb->args[2] = (long)tconn;
+ cb->args[2] = (long)resource;
dump:
return get_one_status(skb, cb);
@@ -3078,8 +3144,8 @@ int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
goto out;
tp.timeout_type =
- adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
- test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
+ adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
+ test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
UT_DEFAULT;
err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
@@ -3094,7 +3160,7 @@ out:
int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
enum drbd_ret_code retcode;
struct start_ov_parms parms;
@@ -3104,10 +3170,10 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- mdev = adm_ctx.mdev;
+ device = adm_ctx.device;
/* resume from last known position, if possible */
- parms.ov_start_sector = mdev->ov_start_sector;
+ parms.ov_start_sector = device->ov_start_sector;
parms.ov_stop_sector = ULLONG_MAX;
if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
int err = start_ov_parms_from_attrs(&parms, info);
@@ -3118,15 +3184,15 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
}
}
/* w_make_ov_request expects position to be aligned */
- mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
- mdev->ov_stop_sector = parms.ov_stop_sector;
+ device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
+ device->ov_stop_sector = parms.ov_stop_sector;
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
- drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
- retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
- drbd_resume_io(mdev);
+ drbd_suspend_io(device);
+ wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
+ retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
+ drbd_resume_io(device);
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -3135,7 +3201,7 @@ out:
int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_conf *mdev;
+ struct drbd_device *device;
enum drbd_ret_code retcode;
int skip_initial_sync = 0;
int err;
@@ -3147,7 +3213,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out_nolock;
- mdev = adm_ctx.mdev;
+ device = adm_ctx.device;
memset(&args, 0, sizeof(args));
if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
err = new_c_uuid_parms_from_attrs(&args, info);
@@ -3158,49 +3224,50 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
}
}
- mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
+ mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
- if (!get_ldev(mdev)) {
+ if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto out;
}
/* this is "skip initial sync", assume to be clean */
- if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
- mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
- dev_info(DEV, "Preparing to skip initial sync\n");
+ if (device->state.conn == C_CONNECTED &&
+ first_peer_device(device)->connection->agreed_pro_version >= 90 &&
+ device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
+ drbd_info(device, "Preparing to skip initial sync\n");
skip_initial_sync = 1;
- } else if (mdev->state.conn != C_STANDALONE) {
+ } else if (device->state.conn != C_STANDALONE) {
retcode = ERR_CONNECTED;
goto out_dec;
}
- drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
- drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
+ drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
+ drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
if (args.clear_bm) {
- err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
+ err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
"clear_n_write from new_c_uuid", BM_LOCKED_MASK);
if (err) {
- dev_err(DEV, "Writing bitmap failed with %d\n",err);
+ drbd_err(device, "Writing bitmap failed with %d\n", err);
retcode = ERR_IO_MD_DISK;
}
if (skip_initial_sync) {
- drbd_send_uuids_skip_initial_sync(mdev);
- _drbd_uuid_set(mdev, UI_BITMAP, 0);
- drbd_print_uuids(mdev, "cleared bitmap UUID");
- spin_lock_irq(&mdev->tconn->req_lock);
- _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
+ drbd_send_uuids_skip_initial_sync(first_peer_device(device));
+ _drbd_uuid_set(device, UI_BITMAP, 0);
+ drbd_print_uuids(device, "cleared bitmap UUID");
+ spin_lock_irq(&device->resource->req_lock);
+ _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
}
}
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
out_dec:
- put_ldev(mdev);
+ put_ldev(device);
out:
- mutex_unlock(mdev->state_mutex);
+ mutex_unlock(device->state_mutex);
out_nolock:
drbd_adm_finish(info, retcode);
return 0;
@@ -3246,7 +3313,7 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- if (adm_ctx.tconn) {
+ if (adm_ctx.resource) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
retcode = ERR_INVALID_REQUEST;
drbd_msg_put_info("resource exists");
@@ -3262,7 +3329,7 @@ out:
return 0;
}
-int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
+int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_genlmsghdr *dh = info->userhdr;
enum drbd_ret_code retcode;
@@ -3285,41 +3352,36 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
}
/* drbd_adm_prepare made sure already
- * that mdev->tconn and mdev->vnr match the request. */
- if (adm_ctx.mdev) {
+ * that first_peer_device(device)->connection and device->vnr match the request. */
+ if (adm_ctx.device) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
retcode = ERR_MINOR_EXISTS;
/* else: still NO_ERROR */
goto out;
}
- retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
+ retcode = drbd_create_device(adm_ctx.resource, dh->minor, adm_ctx.volume);
out:
drbd_adm_finish(info, retcode);
return 0;
}
-static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
+static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
{
- if (mdev->state.disk == D_DISKLESS &&
- /* no need to be mdev->state.conn == C_STANDALONE &&
+ if (device->state.disk == D_DISKLESS &&
+ /* no need to be device->state.conn == C_STANDALONE &&
* we may want to delete a minor from a live replication group.
*/
- mdev->state.role == R_SECONDARY) {
- _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
+ device->state.role == R_SECONDARY) {
+ _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
CS_VERBOSE + CS_WAIT_COMPLETE);
- idr_remove(&mdev->tconn->volumes, mdev->vnr);
- idr_remove(&minors, mdev_to_minor(mdev));
- destroy_workqueue(mdev->submit.wq);
- del_gendisk(mdev->vdisk);
- synchronize_rcu();
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_delete_device(device);
return NO_ERROR;
} else
return ERR_MINOR_CONFIGURED;
}
-int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
+int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
@@ -3329,7 +3391,7 @@ int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- retcode = adm_delete_minor(adm_ctx.mdev);
+ retcode = adm_del_minor(adm_ctx.device);
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -3337,55 +3399,58 @@ out:
int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
+ struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
- struct drbd_conf *mdev;
unsigned i;
- retcode = drbd_adm_prepare(skb, info, 0);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
- if (!adm_ctx.tconn) {
- retcode = ERR_RES_NOT_KNOWN;
- goto out;
- }
-
+ resource = adm_ctx.resource;
/* demote */
- idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
- retcode = drbd_set_role(mdev, R_SECONDARY, 0);
+ for_each_connection(connection, resource) {
+ struct drbd_peer_device *peer_device;
+
+ idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
+ if (retcode < SS_SUCCESS) {
+ drbd_msg_put_info("failed to demote");
+ goto out;
+ }
+ }
+
+ retcode = conn_try_disconnect(connection, 0);
if (retcode < SS_SUCCESS) {
- drbd_msg_put_info("failed to demote");
+ drbd_msg_put_info("failed to disconnect");
goto out;
}
}
- retcode = conn_try_disconnect(adm_ctx.tconn, 0);
- if (retcode < SS_SUCCESS) {
- drbd_msg_put_info("failed to disconnect");
- goto out;
- }
-
/* detach */
- idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
- retcode = adm_detach(mdev, 0);
+ idr_for_each_entry(&resource->devices, device, i) {
+ retcode = adm_detach(device, 0);
if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
drbd_msg_put_info("failed to detach");
goto out;
}
}
- /* If we reach this, all volumes (of this tconn) are Secondary,
+ /* If we reach this, all volumes (of this connection) are Secondary,
* Disconnected, Diskless, aka Unconfigured. Make sure all threads have
* actually stopped, state handling only does drbd_thread_stop_nowait(). */
- drbd_thread_stop(&adm_ctx.tconn->worker);
+ for_each_connection(connection, resource)
+ drbd_thread_stop(&connection->worker);
/* Now, nothing can fail anymore */
/* delete volumes */
- idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
- retcode = adm_delete_minor(mdev);
+ idr_for_each_entry(&resource->devices, device, i) {
+ retcode = adm_del_minor(device);
if (retcode != NO_ERROR) {
/* "can not happen" */
drbd_msg_put_info("failed to delete volume");
@@ -3393,19 +3458,11 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
}
}
- /* delete connection */
- if (conn_lowest_minor(adm_ctx.tconn) < 0) {
- list_del_rcu(&adm_ctx.tconn->all_tconn);
- synchronize_rcu();
- kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+ list_del_rcu(&resource->resources);
+ synchronize_rcu();
+ drbd_free_resource(resource);
+ retcode = NO_ERROR;
- retcode = NO_ERROR;
- } else {
- /* "can not happen" */
- retcode = ERR_RES_IN_USE;
- drbd_msg_put_info("failed to delete connection");
- }
- goto out;
out:
drbd_adm_finish(info, retcode);
return 0;
@@ -3413,6 +3470,8 @@ out:
int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_resource *resource;
+ struct drbd_connection *connection;
enum drbd_ret_code retcode;
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
@@ -3421,24 +3480,30 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto out;
- if (conn_lowest_minor(adm_ctx.tconn) < 0) {
- list_del_rcu(&adm_ctx.tconn->all_tconn);
- synchronize_rcu();
- kref_put(&adm_ctx.tconn->kref, &conn_destroy);
-
- retcode = NO_ERROR;
- } else {
+ resource = adm_ctx.resource;
+ for_each_connection(connection, resource) {
+ if (connection->cstate > C_STANDALONE) {
+ retcode = ERR_NET_CONFIGURED;
+ goto out;
+ }
+ }
+ if (!idr_is_empty(&resource->devices)) {
retcode = ERR_RES_IN_USE;
+ goto out;
}
- if (retcode == NO_ERROR)
- drbd_thread_stop(&adm_ctx.tconn->worker);
+ list_del_rcu(&resource->resources);
+ for_each_connection(connection, resource)
+ drbd_thread_stop(&connection->worker);
+ synchronize_rcu();
+ drbd_free_resource(resource);
+ retcode = NO_ERROR;
out:
drbd_adm_finish(info, retcode);
return 0;
}
-void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
+void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
{
static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
struct sk_buff *msg;
@@ -3447,8 +3512,8 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
int err = -ENOMEM;
if (sib->sib_reason == SIB_SYNC_PROGRESS) {
- if (time_after(jiffies, mdev->rs_last_bcast + HZ))
- mdev->rs_last_bcast = jiffies;
+ if (time_after(jiffies, device->rs_last_bcast + HZ))
+ device->rs_last_bcast = jiffies;
else
return;
}
@@ -3462,10 +3527,10 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
if (!d_out) /* cannot happen, but anyways. */
goto nla_put_failure;
- d_out->minor = mdev_to_minor(mdev);
+ d_out->minor = device_to_minor(device);
d_out->ret_code = NO_ERROR;
- if (nla_put_status_info(msg, mdev, sib))
+ if (nla_put_status_info(msg, device, sib))
goto nla_put_failure;
genlmsg_end(msg, d_out);
err = drbd_genl_multicast_events(msg, 0);
@@ -3478,7 +3543,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
nla_put_failure:
nlmsg_free(msg);
failed:
- dev_err(DEV, "Error %d while broadcasting event. "
+ drbd_err(device, "Error %d while broadcasting event. "
"Event seq:%u sib_reason:%u\n",
err, seq, sib->sib_reason);
}
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index bf31d41dbaad..2f26e8ffa45b 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -46,7 +46,7 @@ const struct file_operations drbd_proc_fops = {
.release = drbd_proc_release,
};
-void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
+static void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
{
/* v is in kB/sec. We don't expect TiByte/sec yet. */
if (unlikely(v >= 1000000)) {
@@ -66,14 +66,14 @@ void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
* [=====>..............] 33.5% (23456/123456)
* finish: 2:20:20 speed: 6,345 (6,456) K/sec
*/
-static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
+static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *seq)
{
unsigned long db, dt, dbdt, rt, rs_left;
unsigned int res;
int i, x, y;
int stalled = 0;
- drbd_get_syncer_progress(mdev, &rs_left, &res);
+ drbd_get_syncer_progress(device, &rs_left, &res);
x = res/50;
y = 20-x;
@@ -85,21 +85,21 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
seq_printf(seq, ".");
seq_printf(seq, "] ");
- if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+ if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
seq_printf(seq, "verified:");
else
seq_printf(seq, "sync'ed:");
seq_printf(seq, "%3u.%u%% ", res / 10, res % 10);
/* if more than a few GB, display in MB */
- if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
+ if (device->rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
seq_printf(seq, "(%lu/%lu)M",
(unsigned long) Bit2KB(rs_left >> 10),
- (unsigned long) Bit2KB(mdev->rs_total >> 10));
+ (unsigned long) Bit2KB(device->rs_total >> 10));
else
seq_printf(seq, "(%lu/%lu)K\n\t",
(unsigned long) Bit2KB(rs_left),
- (unsigned long) Bit2KB(mdev->rs_total));
+ (unsigned long) Bit2KB(device->rs_total));
/* see drivers/md/md.c
* We do not want to overflow, so the order of operands and
@@ -114,14 +114,14 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
* at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at
* least DRBD_SYNC_MARK_STEP time before it will be modified. */
/* ------------------------ ~18s average ------------------------ */
- i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS;
- dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
+ i = (device->rs_last_mark + 2) % DRBD_SYNC_MARKS;
+ dt = (jiffies - device->rs_mark_time[i]) / HZ;
if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS))
stalled = 1;
if (!dt)
dt++;
- db = mdev->rs_mark_left[i] - rs_left;
+ db = device->rs_mark_left[i] - rs_left;
rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */
seq_printf(seq, "finish: %lu:%02lu:%02lu",
@@ -134,11 +134,11 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
/* ------------------------- ~3s average ------------------------ */
if (proc_details >= 1) {
/* this is what drbd_rs_should_slow_down() uses */
- i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
- dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
+ i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
+ dt = (jiffies - device->rs_mark_time[i]) / HZ;
if (!dt)
dt++;
- db = mdev->rs_mark_left[i] - rs_left;
+ db = device->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
seq_printf(seq, " -- ");
@@ -147,34 +147,34 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
/* --------------------- long term average ---------------------- */
/* mean speed since syncer started
* we do account for PausedSync periods */
- dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
+ dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
if (dt == 0)
dt = 1;
- db = mdev->rs_total - rs_left;
+ db = device->rs_total - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
seq_printf(seq, ")");
- if (mdev->state.conn == C_SYNC_TARGET ||
- mdev->state.conn == C_VERIFY_S) {
+ if (device->state.conn == C_SYNC_TARGET ||
+ device->state.conn == C_VERIFY_S) {
seq_printf(seq, " want: ");
- seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate);
+ seq_printf_with_thousands_grouping(seq, device->c_sync_rate);
}
seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
if (proc_details >= 1) {
/* 64 bit:
* we convert to sectors in the display below. */
- unsigned long bm_bits = drbd_bm_bits(mdev);
+ unsigned long bm_bits = drbd_bm_bits(device);
unsigned long bit_pos;
unsigned long long stop_sector = 0;
- if (mdev->state.conn == C_VERIFY_S ||
- mdev->state.conn == C_VERIFY_T) {
- bit_pos = bm_bits - mdev->ov_left;
- if (verify_can_do_stop_sector(mdev))
- stop_sector = mdev->ov_stop_sector;
+ if (device->state.conn == C_VERIFY_S ||
+ device->state.conn == C_VERIFY_T) {
+ bit_pos = bm_bits - device->ov_left;
+ if (verify_can_do_stop_sector(device))
+ stop_sector = device->ov_stop_sector;
} else
- bit_pos = mdev->bm_resync_fo;
+ bit_pos = device->bm_resync_fo;
/* Total sectors may be slightly off for oddly
* sized devices. So what. */
seq_printf(seq,
@@ -202,7 +202,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
{
int i, prev_i = -1;
const char *sn;
- struct drbd_conf *mdev;
+ struct drbd_device *device;
struct net_conf *nc;
char wp;
@@ -236,72 +236,72 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
*/
rcu_read_lock();
- idr_for_each_entry(&minors, mdev, i) {
+ idr_for_each_entry(&drbd_devices, device, i) {
if (prev_i != i - 1)
seq_printf(seq, "\n");
prev_i = i;
- sn = drbd_conn_str(mdev->state.conn);
+ sn = drbd_conn_str(device->state.conn);
- if (mdev->state.conn == C_STANDALONE &&
- mdev->state.disk == D_DISKLESS &&
- mdev->state.role == R_SECONDARY) {
+ if (device->state.conn == C_STANDALONE &&
+ device->state.disk == D_DISKLESS &&
+ device->state.role == R_SECONDARY) {
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
- /* reset mdev->congestion_reason */
- bdi_rw_congested(&mdev->rq_queue->backing_dev_info);
+ /* reset device->congestion_reason */
+ bdi_rw_congested(&device->rq_queue->backing_dev_info);
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
seq_printf(seq,
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
"lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c",
i, sn,
- drbd_role_str(mdev->state.role),
- drbd_role_str(mdev->state.peer),
- drbd_disk_str(mdev->state.disk),
- drbd_disk_str(mdev->state.pdsk),
+ drbd_role_str(device->state.role),
+ drbd_role_str(device->state.peer),
+ drbd_disk_str(device->state.disk),
+ drbd_disk_str(device->state.pdsk),
wp,
- drbd_suspended(mdev) ? 's' : 'r',
- mdev->state.aftr_isp ? 'a' : '-',
- mdev->state.peer_isp ? 'p' : '-',
- mdev->state.user_isp ? 'u' : '-',
- mdev->congestion_reason ?: '-',
- test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-',
- mdev->send_cnt/2,
- mdev->recv_cnt/2,
- mdev->writ_cnt/2,
- mdev->read_cnt/2,
- mdev->al_writ_cnt,
- mdev->bm_writ_cnt,
- atomic_read(&mdev->local_cnt),
- atomic_read(&mdev->ap_pending_cnt) +
- atomic_read(&mdev->rs_pending_cnt),
- atomic_read(&mdev->unacked_cnt),
- atomic_read(&mdev->ap_bio_cnt),
- mdev->tconn->epochs,
- write_ordering_chars[mdev->tconn->write_ordering]
+ drbd_suspended(device) ? 's' : 'r',
+ device->state.aftr_isp ? 'a' : '-',
+ device->state.peer_isp ? 'p' : '-',
+ device->state.user_isp ? 'u' : '-',
+ device->congestion_reason ?: '-',
+ test_bit(AL_SUSPENDED, &device->flags) ? 's' : '-',
+ device->send_cnt/2,
+ device->recv_cnt/2,
+ device->writ_cnt/2,
+ device->read_cnt/2,
+ device->al_writ_cnt,
+ device->bm_writ_cnt,
+ atomic_read(&device->local_cnt),
+ atomic_read(&device->ap_pending_cnt) +
+ atomic_read(&device->rs_pending_cnt),
+ atomic_read(&device->unacked_cnt),
+ atomic_read(&device->ap_bio_cnt),
+ first_peer_device(device)->connection->epochs,
+ write_ordering_chars[first_peer_device(device)->connection->write_ordering]
);
seq_printf(seq, " oos:%llu\n",
Bit2KB((unsigned long long)
- drbd_bm_total_weight(mdev)));
+ drbd_bm_total_weight(device)));
}
- if (mdev->state.conn == C_SYNC_SOURCE ||
- mdev->state.conn == C_SYNC_TARGET ||
- mdev->state.conn == C_VERIFY_S ||
- mdev->state.conn == C_VERIFY_T)
- drbd_syncer_progress(mdev, seq);
-
- if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) {
- lc_seq_printf_stats(seq, mdev->resync);
- lc_seq_printf_stats(seq, mdev->act_log);
- put_ldev(mdev);
+ if (device->state.conn == C_SYNC_SOURCE ||
+ device->state.conn == C_SYNC_TARGET ||
+ device->state.conn == C_VERIFY_S ||
+ device->state.conn == C_VERIFY_T)
+ drbd_syncer_progress(device, seq);
+
+ if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
+ lc_seq_printf_stats(seq, device->resync);
+ lc_seq_printf_stats(seq, device->act_log);
+ put_ldev(device);
}
if (proc_details >= 2) {
- if (mdev->resync) {
- lc_seq_dump_details(seq, mdev->resync, "rs_left",
+ if (device->resync) {
+ lc_seq_dump_details(seq, device->resync, "rs_left",
resync_dump_detail);
}
}
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h
new file mode 100644
index 000000000000..3c04ec0ea333
--- /dev/null
+++ b/drivers/block/drbd/drbd_protocol.h
@@ -0,0 +1,295 @@
+#ifndef __DRBD_PROTOCOL_H
+#define __DRBD_PROTOCOL_H
+
+enum drbd_packet {
+ /* receiver (data socket) */
+ P_DATA = 0x00,
+ P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */
+ P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */
+ P_BARRIER = 0x03,
+ P_BITMAP = 0x04,
+ P_BECOME_SYNC_TARGET = 0x05,
+ P_BECOME_SYNC_SOURCE = 0x06,
+ P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */
+ P_DATA_REQUEST = 0x08, /* Used to ask for a data block */
+ P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */
+ P_SYNC_PARAM = 0x0a,
+ P_PROTOCOL = 0x0b,
+ P_UUIDS = 0x0c,
+ P_SIZES = 0x0d,
+ P_STATE = 0x0e,
+ P_SYNC_UUID = 0x0f,
+ P_AUTH_CHALLENGE = 0x10,
+ P_AUTH_RESPONSE = 0x11,
+ P_STATE_CHG_REQ = 0x12,
+
+ /* asender (meta socket */
+ P_PING = 0x13,
+ P_PING_ACK = 0x14,
+ P_RECV_ACK = 0x15, /* Used in protocol B */
+ P_WRITE_ACK = 0x16, /* Used in protocol C */
+ P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
+ P_SUPERSEDED = 0x18, /* Used in proto C, two-primaries conflict detection */
+ P_NEG_ACK = 0x19, /* Sent if local disk is unusable */
+ P_NEG_DREPLY = 0x1a, /* Local disk is broken... */
+ P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */
+ P_BARRIER_ACK = 0x1c,
+ P_STATE_CHG_REPLY = 0x1d,
+
+ /* "new" commands, no longer fitting into the ordering scheme above */
+
+ P_OV_REQUEST = 0x1e, /* data socket */
+ P_OV_REPLY = 0x1f,
+ P_OV_RESULT = 0x20, /* meta socket */
+ P_CSUM_RS_REQUEST = 0x21, /* data socket */
+ P_RS_IS_IN_SYNC = 0x22, /* meta socket */
+ P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
+ P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */
+ /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */
+ /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */
+ P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
+ P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */
+ P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
+ P_CONN_ST_CHG_REQ = 0x2a, /* data sock: Connection wide state request */
+ P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */
+ P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */
+ P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
+
+ P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
+ P_MAX_OPT_CMD = 0x101,
+
+ /* special command ids for handshake */
+
+ P_INITIAL_META = 0xfff1, /* First Packet on the MetaSock */
+ P_INITIAL_DATA = 0xfff2, /* First Packet on the Socket */
+
+ P_CONNECTION_FEATURES = 0xfffe /* FIXED for the next century! */
+};
+
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
+
+/* This is the layout for a packet on the wire.
+ * The byteorder is the network byte order.
+ * (except block_id and barrier fields.
+ * these are pointers to local structs
+ * and have no relevance for the partner,
+ * which just echoes them as received.)
+ *
+ * NOTE that the payload starts at a long aligned offset,
+ * regardless of 32 or 64 bit arch!
+ */
+struct p_header80 {
+ u32 magic;
+ u16 command;
+ u16 length; /* bytes of data after this header */
+} __packed;
+
+/* Header for big packets, Used for data packets exceeding 64kB */
+struct p_header95 {
+ u16 magic; /* use DRBD_MAGIC_BIG here */
+ u16 command;
+ u32 length;
+} __packed;
+
+struct p_header100 {
+ u32 magic;
+ u16 volume;
+ u16 command;
+ u32 length;
+ u32 pad;
+} __packed;
+
+/* these defines must not be changed without changing the protocol version */
+#define DP_HARDBARRIER 1 /* depricated */
+#define DP_RW_SYNC 2 /* equals REQ_SYNC */
+#define DP_MAY_SET_IN_SYNC 4
+#define DP_UNPLUG 8 /* not used anymore */
+#define DP_FUA 16 /* equals REQ_FUA */
+#define DP_FLUSH 32 /* equals REQ_FLUSH */
+#define DP_DISCARD 64 /* equals REQ_DISCARD */
+#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
+#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
+
+struct p_data {
+ u64 sector; /* 64 bits sector number */
+ u64 block_id; /* to identify the request in protocol B&C */
+ u32 seq_num;
+ u32 dp_flags;
+} __packed;
+
+/*
+ * commands which share a struct:
+ * p_block_ack:
+ * P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
+ * P_SUPERSEDED (proto C, two-primaries conflict detection)
+ * p_block_req:
+ * P_DATA_REQUEST, P_RS_DATA_REQUEST
+ */
+struct p_block_ack {
+ u64 sector;
+ u64 block_id;
+ u32 blksize;
+ u32 seq_num;
+} __packed;
+
+struct p_block_req {
+ u64 sector;
+ u64 block_id;
+ u32 blksize;
+ u32 pad; /* to multiple of 8 Byte */
+} __packed;
+
+/*
+ * commands with their own struct for additional fields:
+ * P_CONNECTION_FEATURES
+ * P_BARRIER
+ * P_BARRIER_ACK
+ * P_SYNC_PARAM
+ * ReportParams
+ */
+
+struct p_connection_features {
+ u32 protocol_min;
+ u32 feature_flags;
+ u32 protocol_max;
+
+ /* should be more than enough for future enhancements
+ * for now, feature_flags and the reserved array shall be zero.
+ */
+
+ u32 _pad;
+ u64 reserved[7];
+} __packed;
+
+struct p_barrier {
+ u32 barrier; /* barrier number _handle_ only */
+ u32 pad; /* to multiple of 8 Byte */
+} __packed;
+
+struct p_barrier_ack {
+ u32 barrier;
+ u32 set_size;
+} __packed;
+
+struct p_rs_param {
+ u32 resync_rate;
+
+ /* Since protocol version 88 and higher. */
+ char verify_alg[0];
+} __packed;
+
+struct p_rs_param_89 {
+ u32 resync_rate;
+ /* protocol version 89: */
+ char verify_alg[SHARED_SECRET_MAX];
+ char csums_alg[SHARED_SECRET_MAX];
+} __packed;
+
+struct p_rs_param_95 {
+ u32 resync_rate;
+ char verify_alg[SHARED_SECRET_MAX];
+ char csums_alg[SHARED_SECRET_MAX];
+ u32 c_plan_ahead;
+ u32 c_delay_target;
+ u32 c_fill_target;
+ u32 c_max_rate;
+} __packed;
+
+enum drbd_conn_flags {
+ CF_DISCARD_MY_DATA = 1,
+ CF_DRY_RUN = 2,
+};
+
+struct p_protocol {
+ u32 protocol;
+ u32 after_sb_0p;
+ u32 after_sb_1p;
+ u32 after_sb_2p;
+ u32 conn_flags;
+ u32 two_primaries;
+
+ /* Since protocol version 87 and higher. */
+ char integrity_alg[0];
+
+} __packed;
+
+struct p_uuids {
+ u64 uuid[UI_EXTENDED_SIZE];
+} __packed;
+
+struct p_rs_uuid {
+ u64 uuid;
+} __packed;
+
+struct p_sizes {
+ u64 d_size; /* size of disk */
+ u64 u_size; /* user requested size */
+ u64 c_size; /* current exported size */
+ u32 max_bio_size; /* Maximal size of a BIO */
+ u16 queue_order_type; /* not yet implemented in DRBD*/
+ u16 dds_flags; /* use enum dds_flags here. */
+} __packed;
+
+struct p_state {
+ u32 state;
+} __packed;
+
+struct p_req_state {
+ u32 mask;
+ u32 val;
+} __packed;
+
+struct p_req_state_reply {
+ u32 retcode;
+} __packed;
+
+struct p_drbd06_param {
+ u64 size;
+ u32 state;
+ u32 blksize;
+ u32 protocol;
+ u32 version;
+ u32 gen_cnt[5];
+ u32 bit_map_gen[5];
+} __packed;
+
+struct p_block_desc {
+ u64 sector;
+ u32 blksize;
+ u32 pad; /* to multiple of 8 Byte */
+} __packed;
+
+/* Valid values for the encoding field.
+ * Bump proto version when changing this. */
+enum drbd_bitmap_code {
+ /* RLE_VLI_Bytes = 0,
+ * and other bit variants had been defined during
+ * algorithm evaluation. */
+ RLE_VLI_Bits = 2,
+};
+
+struct p_compressed_bm {
+ /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
+ * (encoding & 0x80): polarity (set/unset) of first runlength
+ * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
+ * used to pad up to head.length bytes
+ */
+ u8 encoding;
+
+ u8 code[0];
+} __packed;
+
+struct p_delay_probe93 {
+ u32 seq_num; /* sequence number to match the two probe packets */
+ u32 offset; /* usecs the probe got sent after the reference time point */
+} __packed;
+
+/*
+ * Bitmap packets need to fit within a single page on the sender and receiver,
+ * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
+ */
+#define DRBD_SOCKET_BUFFER_SIZE 4096
+
+#endif /* __DRBD_PROTOCOL_H */
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6fa6673b36b3..18c76e84d540 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -44,6 +44,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include "drbd_int.h"
+#include "drbd_protocol.h"
#include "drbd_req.h"
#include "drbd_vli.h"
@@ -61,11 +62,11 @@ enum finish_epoch {
FE_RECYCLED,
};
-static int drbd_do_features(struct drbd_tconn *tconn);
-static int drbd_do_auth(struct drbd_tconn *tconn);
-static int drbd_disconnected(struct drbd_conf *mdev);
+static int drbd_do_features(struct drbd_connection *connection);
+static int drbd_do_auth(struct drbd_connection *connection);
+static int drbd_disconnected(struct drbd_peer_device *);
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
static int e_end_block(struct drbd_work *, int);
@@ -150,7 +151,7 @@ static void page_chain_add(struct page **head,
*head = chain_first;
}
-static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
+static struct page *__drbd_alloc_pages(struct drbd_device *device,
unsigned int number)
{
struct page *page = NULL;
@@ -196,41 +197,39 @@ static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
return NULL;
}
-static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
+static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
struct list_head *to_be_freed)
{
- struct drbd_peer_request *peer_req;
- struct list_head *le, *tle;
+ struct drbd_peer_request *peer_req, *tmp;
/* The EEs are always appended to the end of the list. Since
they are sent in order over the wire, they have to finish
in order. As soon as we see the first not finished we can
stop to examine the list... */
- list_for_each_safe(le, tle, &mdev->net_ee) {
- peer_req = list_entry(le, struct drbd_peer_request, w.list);
+ list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
if (drbd_peer_req_has_active_page(peer_req))
break;
- list_move(le, to_be_freed);
+ list_move(&peer_req->w.list, to_be_freed);
}
}
-static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
+static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device)
{
LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
- spin_lock_irq(&mdev->tconn->req_lock);
- reclaim_finished_net_peer_reqs(mdev, &reclaimed);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ reclaim_finished_net_peer_reqs(device, &reclaimed);
+ spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(mdev, peer_req);
+ drbd_free_net_peer_req(device, peer_req);
}
/**
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @number: number of pages requested
* @retry: whether to retry, if not enough pages are available right now
*
@@ -240,9 +239,10 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
*
* Returns a page chain linked via page->private.
*/
-struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
+struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
bool retry)
{
+ struct drbd_device *device = peer_device->device;
struct page *page = NULL;
struct net_conf *nc;
DEFINE_WAIT(wait);
@@ -251,20 +251,20 @@ struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
/* Yes, we may run up to @number over max_buffers. If we
* follow it strictly, the admin will get it wrong anyways. */
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(peer_device->connection->net_conf);
mxb = nc ? nc->max_buffers : 1000000;
rcu_read_unlock();
- if (atomic_read(&mdev->pp_in_use) < mxb)
- page = __drbd_alloc_pages(mdev, number);
+ if (atomic_read(&device->pp_in_use) < mxb)
+ page = __drbd_alloc_pages(device, number);
while (page == NULL) {
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
- drbd_kick_lo_and_reclaim_net(mdev);
+ drbd_kick_lo_and_reclaim_net(device);
- if (atomic_read(&mdev->pp_in_use) < mxb) {
- page = __drbd_alloc_pages(mdev, number);
+ if (atomic_read(&device->pp_in_use) < mxb) {
+ page = __drbd_alloc_pages(device, number);
if (page)
break;
}
@@ -273,7 +273,7 @@ struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
break;
if (signal_pending(current)) {
- dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
+ drbd_warn(device, "drbd_alloc_pages interrupted!\n");
break;
}
@@ -282,17 +282,17 @@ struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
finish_wait(&drbd_pp_wait, &wait);
if (page)
- atomic_add(number, &mdev->pp_in_use);
+ atomic_add(number, &device->pp_in_use);
return page;
}
/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
- * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
+ * Is also used from inside an other spin_lock_irq(&resource->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
{
- atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
+ atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
int i;
if (page == NULL)
@@ -310,7 +310,7 @@ static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_ne
}
i = atomic_sub_return(i, a);
if (i < 0)
- dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
+ drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
is_net ? "pp_in_use_by_net" : "pp_in_use", i);
wake_up(&drbd_pp_wait);
}
@@ -330,25 +330,26 @@ You must not have the req_lock:
*/
struct drbd_peer_request *
-drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
+drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
struct page *page = NULL;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
- if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
+ if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
return NULL;
peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
if (!peer_req) {
if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "%s: allocation failed\n", __func__);
+ drbd_err(device, "%s: allocation failed\n", __func__);
return NULL;
}
if (data_size) {
- page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+ page = drbd_alloc_pages(peer_device, nr_pages, (gfp_mask & __GFP_WAIT));
if (!page)
goto fail;
}
@@ -360,7 +361,7 @@ drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
peer_req->i.waiting = false;
peer_req->epoch = NULL;
- peer_req->w.mdev = mdev;
+ peer_req->peer_device = peer_device;
peer_req->pages = page;
atomic_set(&peer_req->pending_bios, 0);
peer_req->flags = 0;
@@ -377,30 +378,30 @@ drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
return NULL;
}
-void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
+void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
int is_net)
{
if (peer_req->flags & EE_HAS_DIGEST)
kfree(peer_req->digest);
- drbd_free_pages(mdev, peer_req->pages, is_net);
- D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
- D_ASSERT(drbd_interval_empty(&peer_req->i));
+ drbd_free_pages(device, peer_req->pages, is_net);
+ D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
+ D_ASSERT(device, drbd_interval_empty(&peer_req->i));
mempool_free(peer_req, drbd_ee_mempool);
}
-int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
+int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
{
LIST_HEAD(work_list);
struct drbd_peer_request *peer_req, *t;
int count = 0;
- int is_net = list == &mdev->net_ee;
+ int is_net = list == &device->net_ee;
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
list_splice_init(list, &work_list);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
- __drbd_free_peer_req(mdev, peer_req, is_net);
+ __drbd_free_peer_req(device, peer_req, is_net);
count++;
}
return count;
@@ -409,20 +410,20 @@ int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
/*
* See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
*/
-static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
+static int drbd_finish_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(work_list);
LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
int err = 0;
- spin_lock_irq(&mdev->tconn->req_lock);
- reclaim_finished_net_peer_reqs(mdev, &reclaimed);
- list_splice_init(&mdev->done_ee, &work_list);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ reclaim_finished_net_peer_reqs(device, &reclaimed);
+ list_splice_init(&device->done_ee, &work_list);
+ spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(mdev, peer_req);
+ drbd_free_net_peer_req(device, peer_req);
/* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_superseded.
@@ -435,14 +436,14 @@ static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
err2 = peer_req->w.cb(&peer_req->w, !!err);
if (!err)
err = err2;
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
}
- wake_up(&mdev->ee_wait);
+ wake_up(&device->ee_wait);
return err;
}
-static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+static void _drbd_wait_ee_list_empty(struct drbd_device *device,
struct list_head *head)
{
DEFINE_WAIT(wait);
@@ -450,20 +451,20 @@ static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
/* avoids spin_lock/unlock
* and calling prepare_to_wait in the fast path */
while (!list_empty(head)) {
- prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&device->resource->req_lock);
io_schedule();
- finish_wait(&mdev->ee_wait, &wait);
- spin_lock_irq(&mdev->tconn->req_lock);
+ finish_wait(&device->ee_wait, &wait);
+ spin_lock_irq(&device->resource->req_lock);
}
}
-static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+static void drbd_wait_ee_list_empty(struct drbd_device *device,
struct list_head *head)
{
- spin_lock_irq(&mdev->tconn->req_lock);
- _drbd_wait_ee_list_empty(mdev, head);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ _drbd_wait_ee_list_empty(device, head);
+ spin_unlock_irq(&device->resource->req_lock);
}
static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
@@ -488,44 +489,44 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag
return rv;
}
-static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
+static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
{
int rv;
- rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
+ rv = drbd_recv_short(connection->data.socket, buf, size, 0);
if (rv < 0) {
if (rv == -ECONNRESET)
- conn_info(tconn, "sock was reset by peer\n");
+ drbd_info(connection, "sock was reset by peer\n");
else if (rv != -ERESTARTSYS)
- conn_err(tconn, "sock_recvmsg returned %d\n", rv);
+ drbd_err(connection, "sock_recvmsg returned %d\n", rv);
} else if (rv == 0) {
- if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ if (test_bit(DISCONNECT_SENT, &connection->flags)) {
long t;
rcu_read_lock();
- t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
rcu_read_unlock();
- t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
+ t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
if (t)
goto out;
}
- conn_info(tconn, "sock was shut down by peer\n");
+ drbd_info(connection, "sock was shut down by peer\n");
}
if (rv != size)
- conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
+ conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
out:
return rv;
}
-static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
+static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
{
int err;
- err = drbd_recv(tconn, buf, size);
+ err = drbd_recv(connection, buf, size);
if (err != size) {
if (err >= 0)
err = -EIO;
@@ -534,13 +535,13 @@ static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
return err;
}
-static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
+static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
{
int err;
- err = drbd_recv_all(tconn, buf, size);
+ err = drbd_recv_all(connection, buf, size);
if (err && !signal_pending(current))
- conn_warn(tconn, "short read (expected size %d)\n", (int)size);
+ drbd_warn(connection, "short read (expected size %d)\n", (int)size);
return err;
}
@@ -563,7 +564,7 @@ static void drbd_setbufsize(struct socket *sock, unsigned int snd,
}
}
-static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
+static struct socket *drbd_try_connect(struct drbd_connection *connection)
{
const char *what;
struct socket *sock;
@@ -575,7 +576,7 @@ static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
int disconnect_on_error = 1;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
if (!nc) {
rcu_read_unlock();
return NULL;
@@ -585,16 +586,16 @@ static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
connect_int = nc->connect_int;
rcu_read_unlock();
- my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
- memcpy(&src_in6, &tconn->my_addr, my_addr_len);
+ my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
+ memcpy(&src_in6, &connection->my_addr, my_addr_len);
- if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
+ if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
src_in6.sin6_port = 0;
else
((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
- peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
- memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
+ peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
+ memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
what = "sock_create_kern";
err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
@@ -642,17 +643,17 @@ out:
disconnect_on_error = 0;
break;
default:
- conn_err(tconn, "%s failed, err = %d\n", what, err);
+ drbd_err(connection, "%s failed, err = %d\n", what, err);
}
if (disconnect_on_error)
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
return sock;
}
struct accept_wait_data {
- struct drbd_tconn *tconn;
+ struct drbd_connection *connection;
struct socket *s_listen;
struct completion door_bell;
void (*original_sk_state_change)(struct sock *sk);
@@ -670,7 +671,7 @@ static void drbd_incoming_connection(struct sock *sk)
state_change(sk);
}
-static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
+static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
{
int err, sndbuf_size, rcvbuf_size, my_addr_len;
struct sockaddr_in6 my_addr;
@@ -679,7 +680,7 @@ static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_da
const char *what;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
if (!nc) {
rcu_read_unlock();
return -EIO;
@@ -688,8 +689,8 @@ static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_da
rcvbuf_size = nc->rcvbuf_size;
rcu_read_unlock();
- my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
- memcpy(&my_addr, &tconn->my_addr, my_addr_len);
+ my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
+ memcpy(&my_addr, &connection->my_addr, my_addr_len);
what = "sock_create_kern";
err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
@@ -725,8 +726,8 @@ out:
sock_release(s_listen);
if (err < 0) {
if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
- conn_err(tconn, "%s failed, err = %d\n", what, err);
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ drbd_err(connection, "%s failed, err = %d\n", what, err);
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
}
@@ -741,14 +742,14 @@ static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad
write_unlock_bh(&sk->sk_callback_lock);
}
-static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
+static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
{
int timeo, connect_int, err = 0;
struct socket *s_estab = NULL;
struct net_conf *nc;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
if (!nc) {
rcu_read_unlock();
return NULL;
@@ -767,8 +768,8 @@ static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct acc
err = kernel_accept(ad->s_listen, &s_estab, 0);
if (err < 0) {
if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
- conn_err(tconn, "accept failed, err = %d\n", err);
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ drbd_err(connection, "accept failed, err = %d\n", err);
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
}
@@ -778,29 +779,29 @@ static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct acc
return s_estab;
}
-static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
+static int decode_header(struct drbd_connection *, void *, struct packet_info *);
-static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
+static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd)
{
- if (!conn_prepare_command(tconn, sock))
+ if (!conn_prepare_command(connection, sock))
return -EIO;
- return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
+ return conn_send_command(connection, sock, cmd, 0, NULL, 0);
}
-static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
+static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
{
- unsigned int header_size = drbd_header_size(tconn);
+ unsigned int header_size = drbd_header_size(connection);
struct packet_info pi;
int err;
- err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
+ err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
if (err != header_size) {
if (err >= 0)
err = -EIO;
return err;
}
- err = decode_header(tconn, tconn->data.rbuf, &pi);
+ err = decode_header(connection, connection->data.rbuf, &pi);
if (err)
return err;
return pi.cmd;
@@ -830,28 +831,29 @@ static int drbd_socket_okay(struct socket **sock)
}
/* Gets called if a connection is established, or if a new minor gets created
in a connection */
-int drbd_connected(struct drbd_conf *mdev)
+int drbd_connected(struct drbd_peer_device *peer_device)
{
+ struct drbd_device *device = peer_device->device;
int err;
- atomic_set(&mdev->packet_seq, 0);
- mdev->peer_seq = 0;
+ atomic_set(&device->packet_seq, 0);
+ device->peer_seq = 0;
- mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
- &mdev->tconn->cstate_mutex :
- &mdev->own_state_mutex;
+ device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
+ &peer_device->connection->cstate_mutex :
+ &device->own_state_mutex;
- err = drbd_send_sync_param(mdev);
+ err = drbd_send_sync_param(peer_device);
if (!err)
- err = drbd_send_sizes(mdev, 0, 0);
+ err = drbd_send_sizes(peer_device, 0, 0);
if (!err)
- err = drbd_send_uuids(mdev);
+ err = drbd_send_uuids(peer_device);
if (!err)
- err = drbd_send_current_state(mdev);
- clear_bit(USE_DEGR_WFC_T, &mdev->flags);
- clear_bit(RESIZE_PENDING, &mdev->flags);
- atomic_set(&mdev->ap_in_flight, 0);
- mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
+ err = drbd_send_current_state(peer_device);
+ clear_bit(USE_DEGR_WFC_T, &device->flags);
+ clear_bit(RESIZE_PENDING, &device->flags);
+ atomic_set(&device->ap_in_flight, 0);
+ mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
return err;
}
@@ -863,59 +865,59 @@ int drbd_connected(struct drbd_conf *mdev)
* no point in trying again, please go standalone.
* -2 We do not have a network config...
*/
-static int conn_connect(struct drbd_tconn *tconn)
+static int conn_connect(struct drbd_connection *connection)
{
struct drbd_socket sock, msock;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
struct net_conf *nc;
int vnr, timeout, h, ok;
bool discard_my_data;
enum drbd_state_rv rv;
struct accept_wait_data ad = {
- .tconn = tconn,
+ .connection = connection,
.door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
};
- clear_bit(DISCONNECT_SENT, &tconn->flags);
- if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
+ clear_bit(DISCONNECT_SENT, &connection->flags);
+ if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
return -2;
mutex_init(&sock.mutex);
- sock.sbuf = tconn->data.sbuf;
- sock.rbuf = tconn->data.rbuf;
+ sock.sbuf = connection->data.sbuf;
+ sock.rbuf = connection->data.rbuf;
sock.socket = NULL;
mutex_init(&msock.mutex);
- msock.sbuf = tconn->meta.sbuf;
- msock.rbuf = tconn->meta.rbuf;
+ msock.sbuf = connection->meta.sbuf;
+ msock.rbuf = connection->meta.rbuf;
msock.socket = NULL;
/* Assume that the peer only understands protocol 80 until we know better. */
- tconn->agreed_pro_version = 80;
+ connection->agreed_pro_version = 80;
- if (prepare_listen_socket(tconn, &ad))
+ if (prepare_listen_socket(connection, &ad))
return 0;
do {
struct socket *s;
- s = drbd_try_connect(tconn);
+ s = drbd_try_connect(connection);
if (s) {
if (!sock.socket) {
sock.socket = s;
- send_first_packet(tconn, &sock, P_INITIAL_DATA);
+ send_first_packet(connection, &sock, P_INITIAL_DATA);
} else if (!msock.socket) {
- clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ clear_bit(RESOLVE_CONFLICTS, &connection->flags);
msock.socket = s;
- send_first_packet(tconn, &msock, P_INITIAL_META);
+ send_first_packet(connection, &msock, P_INITIAL_META);
} else {
- conn_err(tconn, "Logic error in conn_connect()\n");
+ drbd_err(connection, "Logic error in conn_connect()\n");
goto out_release_sockets;
}
}
if (sock.socket && msock.socket) {
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
timeout = nc->ping_timeo * HZ / 10;
rcu_read_unlock();
schedule_timeout_interruptible(timeout);
@@ -926,15 +928,15 @@ static int conn_connect(struct drbd_tconn *tconn)
}
retry:
- s = drbd_wait_for_connect(tconn, &ad);
+ s = drbd_wait_for_connect(connection, &ad);
if (s) {
- int fp = receive_first_packet(tconn, s);
+ int fp = receive_first_packet(connection, s);
drbd_socket_okay(&sock.socket);
drbd_socket_okay(&msock.socket);
switch (fp) {
case P_INITIAL_DATA:
if (sock.socket) {
- conn_warn(tconn, "initial packet S crossed\n");
+ drbd_warn(connection, "initial packet S crossed\n");
sock_release(sock.socket);
sock.socket = s;
goto randomize;
@@ -942,9 +944,9 @@ retry:
sock.socket = s;
break;
case P_INITIAL_META:
- set_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ set_bit(RESOLVE_CONFLICTS, &connection->flags);
if (msock.socket) {
- conn_warn(tconn, "initial packet M crossed\n");
+ drbd_warn(connection, "initial packet M crossed\n");
sock_release(msock.socket);
msock.socket = s;
goto randomize;
@@ -952,7 +954,7 @@ retry:
msock.socket = s;
break;
default:
- conn_warn(tconn, "Error receiving initial packet\n");
+ drbd_warn(connection, "Error receiving initial packet\n");
sock_release(s);
randomize:
if (prandom_u32() & 1)
@@ -960,12 +962,12 @@ randomize:
}
}
- if (tconn->cstate <= C_DISCONNECTING)
+ if (connection->cstate <= C_DISCONNECTING)
goto out_release_sockets;
if (signal_pending(current)) {
flush_signals(current);
smp_rmb();
- if (get_t_state(&tconn->receiver) == EXITING)
+ if (get_t_state(&connection->receiver) == EXITING)
goto out_release_sockets;
}
@@ -986,12 +988,12 @@ randomize:
msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
/* NOT YET ...
- * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
+ * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
* sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
* first set it to the P_CONNECTION_FEATURES timeout,
* which we set to 4x the configured ping_timeout. */
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
sock.socket->sk->sk_sndtimeo =
sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
@@ -1008,37 +1010,38 @@ randomize:
drbd_tcp_nodelay(sock.socket);
drbd_tcp_nodelay(msock.socket);
- tconn->data.socket = sock.socket;
- tconn->meta.socket = msock.socket;
- tconn->last_received = jiffies;
+ connection->data.socket = sock.socket;
+ connection->meta.socket = msock.socket;
+ connection->last_received = jiffies;
- h = drbd_do_features(tconn);
+ h = drbd_do_features(connection);
if (h <= 0)
return h;
- if (tconn->cram_hmac_tfm) {
- /* drbd_request_state(mdev, NS(conn, WFAuth)); */
- switch (drbd_do_auth(tconn)) {
+ if (connection->cram_hmac_tfm) {
+ /* drbd_request_state(device, NS(conn, WFAuth)); */
+ switch (drbd_do_auth(connection)) {
case -1:
- conn_err(tconn, "Authentication of peer failed\n");
+ drbd_err(connection, "Authentication of peer failed\n");
return -1;
case 0:
- conn_err(tconn, "Authentication of peer failed, trying again.\n");
+ drbd_err(connection, "Authentication of peer failed, trying again.\n");
return 0;
}
}
- tconn->data.socket->sk->sk_sndtimeo = timeout;
- tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ connection->data.socket->sk->sk_sndtimeo = timeout;
+ connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
+ if (drbd_send_protocol(connection) == -EOPNOTSUPP)
return -1;
- set_bit(STATE_SENT, &tconn->flags);
+ set_bit(STATE_SENT, &connection->flags);
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ kref_get(&device->kref);
rcu_read_unlock();
/* Prevent a race between resync-handshake and
@@ -1048,35 +1051,35 @@ randomize:
* drbd_set_role() is finished, and any incoming drbd_set_role
* will see the STATE_SENT flag, and wait for it to be cleared.
*/
- mutex_lock(mdev->state_mutex);
- mutex_unlock(mdev->state_mutex);
+ mutex_lock(device->state_mutex);
+ mutex_unlock(device->state_mutex);
if (discard_my_data)
- set_bit(DISCARD_MY_DATA, &mdev->flags);
+ set_bit(DISCARD_MY_DATA, &device->flags);
else
- clear_bit(DISCARD_MY_DATA, &mdev->flags);
+ clear_bit(DISCARD_MY_DATA, &device->flags);
- drbd_connected(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_connected(peer_device);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
- rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
- if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
- clear_bit(STATE_SENT, &tconn->flags);
+ rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
+ if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
+ clear_bit(STATE_SENT, &connection->flags);
return 0;
}
- drbd_thread_start(&tconn->asender);
+ drbd_thread_start(&connection->asender);
- mutex_lock(&tconn->conf_update);
+ mutex_lock(&connection->resource->conf_update);
/* The discard_my_data flag is a single-shot modifier to the next
* connection attempt, the handshake of which is now well underway.
* No need for rcu style copying of the whole struct
* just to clear a single value. */
- tconn->net_conf->discard_my_data = 0;
- mutex_unlock(&tconn->conf_update);
+ connection->net_conf->discard_my_data = 0;
+ mutex_unlock(&connection->resource->conf_update);
return h;
@@ -1090,15 +1093,15 @@ out_release_sockets:
return -1;
}
-static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
+static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
{
- unsigned int header_size = drbd_header_size(tconn);
+ unsigned int header_size = drbd_header_size(connection);
if (header_size == sizeof(struct p_header100) &&
*(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
struct p_header100 *h = header;
if (h->pad != 0) {
- conn_err(tconn, "Header padding is not zero\n");
+ drbd_err(connection, "Header padding is not zero\n");
return -EINVAL;
}
pi->vnr = be16_to_cpu(h->volume);
@@ -1117,55 +1120,57 @@ static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_i
pi->size = be16_to_cpu(h->length);
pi->vnr = 0;
} else {
- conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
+ drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
be32_to_cpu(*(__be32 *)header),
- tconn->agreed_pro_version);
+ connection->agreed_pro_version);
return -EINVAL;
}
pi->data = header + header_size;
return 0;
}
-static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
+static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
{
- void *buffer = tconn->data.rbuf;
+ void *buffer = connection->data.rbuf;
int err;
- err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
+ err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
if (err)
return err;
- err = decode_header(tconn, buffer, pi);
- tconn->last_received = jiffies;
+ err = decode_header(connection, buffer, pi);
+ connection->last_received = jiffies;
return err;
}
-static void drbd_flush(struct drbd_tconn *tconn)
+static void drbd_flush(struct drbd_connection *connection)
{
int rv;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
- if (tconn->write_ordering >= WO_bdev_flush) {
+ if (connection->write_ordering >= WO_bdev_flush) {
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (!get_ldev(mdev))
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+
+ if (!get_ldev(device))
continue;
- kref_get(&mdev->kref);
+ kref_get(&device->kref);
rcu_read_unlock();
- rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
+ rv = blkdev_issue_flush(device->ldev->backing_bdev,
GFP_NOIO, NULL);
if (rv) {
- dev_info(DEV, "local disk flush failed with status %d\n", rv);
+ drbd_info(device, "local disk flush failed with status %d\n", rv);
/* would rather check on EOPNOTSUPP, but that is not reliable.
* don't try again for ANY return value != 0
* if (rv == -EOPNOTSUPP) */
- drbd_bump_write_ordering(tconn, WO_drain_io);
+ drbd_bump_write_ordering(connection, WO_drain_io);
}
- put_ldev(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ put_ldev(device);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
if (rv)
@@ -1177,11 +1182,11 @@ static void drbd_flush(struct drbd_tconn *tconn)
/**
* drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @epoch: Epoch object.
* @ev: Epoch event.
*/
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
struct drbd_epoch *epoch,
enum epoch_event ev)
{
@@ -1189,7 +1194,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
struct drbd_epoch *next_epoch;
enum finish_epoch rv = FE_STILL_LIVE;
- spin_lock(&tconn->epoch_lock);
+ spin_lock(&connection->epoch_lock);
do {
next_epoch = NULL;
@@ -1211,22 +1216,22 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
atomic_read(&epoch->active) == 0 &&
(test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
- spin_unlock(&tconn->epoch_lock);
- drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
- spin_lock(&tconn->epoch_lock);
+ spin_unlock(&connection->epoch_lock);
+ drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
+ spin_lock(&connection->epoch_lock);
}
#if 0
/* FIXME: dec unacked on connection, once we have
* something to count pending connection packets in. */
if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
- dec_unacked(epoch->tconn);
+ dec_unacked(epoch->connection);
#endif
- if (tconn->current_epoch != epoch) {
+ if (connection->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
list_del(&epoch->list);
ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
- tconn->epochs--;
+ connection->epochs--;
kfree(epoch);
if (rv == FE_STILL_LIVE)
@@ -1246,20 +1251,20 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
epoch = next_epoch;
} while (1);
- spin_unlock(&tconn->epoch_lock);
+ spin_unlock(&connection->epoch_lock);
return rv;
}
/**
* drbd_bump_write_ordering() - Fall back to an other write ordering method
- * @tconn: DRBD connection.
+ * @connection: DRBD connection.
* @wo: Write ordering method to try.
*/
-void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
+void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo)
{
struct disk_conf *dc;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
enum write_ordering_e pwo;
int vnr;
static char *write_ordering_str[] = {
@@ -1268,29 +1273,31 @@ void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo
[WO_bdev_flush] = "flush",
};
- pwo = tconn->write_ordering;
+ pwo = connection->write_ordering;
wo = min(pwo, wo);
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (!get_ldev_if_state(mdev, D_ATTACHING))
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+
+ if (!get_ldev_if_state(device, D_ATTACHING))
continue;
- dc = rcu_dereference(mdev->ldev->disk_conf);
+ dc = rcu_dereference(device->ldev->disk_conf);
if (wo == WO_bdev_flush && !dc->disk_flushes)
wo = WO_drain_io;
if (wo == WO_drain_io && !dc->disk_drain)
wo = WO_none;
- put_ldev(mdev);
+ put_ldev(device);
}
rcu_read_unlock();
- tconn->write_ordering = wo;
- if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
- conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
+ connection->write_ordering = wo;
+ if (pwo != connection->write_ordering || wo == WO_bdev_flush)
+ drbd_info(connection, "Method to ensure write ordering: %s\n", write_ordering_str[connection->write_ordering]);
}
/**
* drbd_submit_peer_request()
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @peer_req: peer request
* @rw: flag field, see bio->bi_rw
*
@@ -1305,7 +1312,7 @@ void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo
* on certain Xen deployments.
*/
/* TODO allocate from our own bio_set. */
-int drbd_submit_peer_request(struct drbd_conf *mdev,
+int drbd_submit_peer_request(struct drbd_device *device,
struct drbd_peer_request *peer_req,
const unsigned rw, const int fault_type)
{
@@ -1329,12 +1336,12 @@ int drbd_submit_peer_request(struct drbd_conf *mdev,
next_bio:
bio = bio_alloc(GFP_NOIO, nr_pages);
if (!bio) {
- dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
+ drbd_err(device, "submit_ee: Allocation of a bio failed\n");
goto fail;
}
/* > peer_req->i.sector, unless this is the first bio */
- bio->bi_sector = sector;
- bio->bi_bdev = mdev->ldev->backing_bdev;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = device->ldev->backing_bdev;
bio->bi_rw = rw;
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
@@ -1350,10 +1357,10 @@ next_bio:
* But in case it fails anyways,
* we deal with it, and complain (below). */
if (bio->bi_vcnt == 0) {
- dev_err(DEV,
+ drbd_err(device,
"bio_add_page failed for len=%u, "
"bi_vcnt=0 (bi_sector=%llu)\n",
- len, (unsigned long long)bio->bi_sector);
+ len, (uint64_t)bio->bi_iter.bi_sector);
err = -ENOSPC;
goto fail;
}
@@ -1363,8 +1370,8 @@ next_bio:
sector += len >> 9;
--nr_pages;
}
- D_ASSERT(page == NULL);
- D_ASSERT(ds == 0);
+ D_ASSERT(device, page == NULL);
+ D_ASSERT(device, ds == 0);
atomic_set(&peer_req->pending_bios, n_bios);
do {
@@ -1372,7 +1379,7 @@ next_bio:
bios = bios->bi_next;
bio->bi_next = NULL;
- drbd_generic_make_request(mdev, fault_type, bio);
+ drbd_generic_make_request(device, fault_type, bio);
} while (bios);
return 0;
@@ -1385,36 +1392,44 @@ fail:
return err;
}
-static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
+static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
struct drbd_peer_request *peer_req)
{
struct drbd_interval *i = &peer_req->i;
- drbd_remove_interval(&mdev->write_requests, i);
+ drbd_remove_interval(&device->write_requests, i);
drbd_clear_interval(i);
/* Wake up any processes waiting for this peer request to complete. */
if (i->waiting)
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
}
-void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
+static void conn_wait_active_ee_empty(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+
+ kref_get(&device->kref);
rcu_read_unlock();
- drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_wait_ee_list_empty(device, &device->active_ee);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
}
-static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
+static struct drbd_peer_device *
+conn_peer_device(struct drbd_connection *connection, int volume_number)
+{
+ return idr_find(&connection->peer_devices, volume_number);
+}
+
+static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
{
int rv;
struct p_barrier *p = pi->data;
@@ -1423,16 +1438,16 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
/* FIXME these are unacked on connection,
* not a specific (peer)device.
*/
- tconn->current_epoch->barrier_nr = p->barrier;
- tconn->current_epoch->tconn = tconn;
- rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
+ connection->current_epoch->barrier_nr = p->barrier;
+ connection->current_epoch->connection = connection;
+ rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
* the activity log, which means it would not be resynced in case the
* R_PRIMARY crashes now.
* Therefore we must send the barrier_ack after the barrier request was
* completed. */
- switch (tconn->write_ordering) {
+ switch (connection->write_ordering) {
case WO_none:
if (rv == FE_RECYCLED)
return 0;
@@ -1443,15 +1458,15 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
if (epoch)
break;
else
- conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
+ drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
/* Fall through */
case WO_bdev_flush:
case WO_drain_io:
- conn_wait_active_ee_empty(tconn);
- drbd_flush(tconn);
+ conn_wait_active_ee_empty(connection);
+ drbd_flush(connection);
- if (atomic_read(&tconn->current_epoch->epoch_size)) {
+ if (atomic_read(&connection->current_epoch->epoch_size)) {
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
if (epoch)
break;
@@ -1459,7 +1474,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
return 0;
default:
- conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
+ drbd_err(connection, "Strangeness in connection->write_ordering %d\n", connection->write_ordering);
return -EIO;
}
@@ -1467,16 +1482,16 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
atomic_set(&epoch->epoch_size, 0);
atomic_set(&epoch->active, 0);
- spin_lock(&tconn->epoch_lock);
- if (atomic_read(&tconn->current_epoch->epoch_size)) {
- list_add(&epoch->list, &tconn->current_epoch->list);
- tconn->current_epoch = epoch;
- tconn->epochs++;
+ spin_lock(&connection->epoch_lock);
+ if (atomic_read(&connection->current_epoch->epoch_size)) {
+ list_add(&epoch->list, &connection->current_epoch->list);
+ connection->current_epoch = epoch;
+ connection->epochs++;
} else {
/* The current_epoch got recycled while we allocated this one... */
kfree(epoch);
}
- spin_unlock(&tconn->epoch_lock);
+ spin_unlock(&connection->epoch_lock);
return 0;
}
@@ -1484,25 +1499,26 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
/* used from receive_RSDataReply (recv_resync_read)
* and from receive_Data */
static struct drbd_peer_request *
-read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
+read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
int data_size) __must_hold(local)
{
- const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
+ struct drbd_device *device = peer_device->device;
+ const sector_t capacity = drbd_get_capacity(device->this_bdev);
struct drbd_peer_request *peer_req;
struct page *page;
int dgs, ds, err;
- void *dig_in = mdev->tconn->int_dig_in;
- void *dig_vv = mdev->tconn->int_dig_vv;
+ void *dig_in = peer_device->connection->int_dig_in;
+ void *dig_vv = peer_device->connection->int_dig_vv;
unsigned long *data;
dgs = 0;
- if (mdev->tconn->peer_integrity_tfm) {
- dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ if (peer_device->connection->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
/*
* FIXME: Receive the incoming digest into the receive buffer
* here, together with its struct p_data?
*/
- err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ err = drbd_recv_all_warn(peer_device->connection, dig_in, dgs);
if (err)
return NULL;
data_size -= dgs;
@@ -1516,7 +1532,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
/* even though we trust out peer,
* we sometimes have to double check. */
if (sector + (data_size>>9) > capacity) {
- dev_err(DEV, "request from peer beyond end of local disk: "
+ drbd_err(device, "request from peer beyond end of local disk: "
"capacity: %llus < sector: %llus + size: %u\n",
(unsigned long long)capacity,
(unsigned long long)sector, data_size);
@@ -1526,7 +1542,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
+ peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, GFP_NOIO);
if (!peer_req)
return NULL;
@@ -1538,36 +1554,36 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
page_chain_for_each(page) {
unsigned len = min_t(int, ds, PAGE_SIZE);
data = kmap(page);
- err = drbd_recv_all_warn(mdev->tconn, data, len);
- if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
- dev_err(DEV, "Fault injection: Corrupting data on receive\n");
+ err = drbd_recv_all_warn(peer_device->connection, data, len);
+ if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
+ drbd_err(device, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
kunmap(page);
if (err) {
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
return NULL;
}
ds -= len;
}
if (dgs) {
- drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
+ drbd_csum_ee(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
- dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
+ drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
(unsigned long long)sector, data_size);
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
return NULL;
}
}
- mdev->recv_cnt += data_size>>9;
+ device->recv_cnt += data_size>>9;
return peer_req;
}
/* drbd_drain_block() just takes a data block
* out of the socket input buffer, and discards it.
*/
-static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
+static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
{
struct page *page;
int err = 0;
@@ -1576,35 +1592,36 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
if (!data_size)
return 0;
- page = drbd_alloc_pages(mdev, 1, 1);
+ page = drbd_alloc_pages(peer_device, 1, 1);
data = kmap(page);
while (data_size) {
unsigned int len = min_t(int, data_size, PAGE_SIZE);
- err = drbd_recv_all_warn(mdev->tconn, data, len);
+ err = drbd_recv_all_warn(peer_device->connection, data, len);
if (err)
break;
data_size -= len;
}
kunmap(page);
- drbd_free_pages(mdev, page, 0);
+ drbd_free_pages(peer_device->device, page, 0);
return err;
}
-static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
+static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
sector_t sector, int data_size)
{
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct bio *bio;
- int dgs, err, i, expect;
- void *dig_in = mdev->tconn->int_dig_in;
- void *dig_vv = mdev->tconn->int_dig_vv;
+ int dgs, err, expect;
+ void *dig_in = peer_device->connection->int_dig_in;
+ void *dig_vv = peer_device->connection->int_dig_vv;
dgs = 0;
- if (mdev->tconn->peer_integrity_tfm) {
- dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
- err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ if (peer_device->connection->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+ err = drbd_recv_all_warn(peer_device->connection, dig_in, dgs);
if (err)
return err;
data_size -= dgs;
@@ -1612,30 +1629,30 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
/* optimistically update recv_cnt. if receiving fails below,
* we disconnect anyways, and counters will be reset. */
- mdev->recv_cnt += data_size>>9;
+ peer_device->device->recv_cnt += data_size>>9;
bio = req->master_bio;
- D_ASSERT(sector == bio->bi_sector);
+ D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
- bio_for_each_segment(bvec, bio, i) {
- void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
- expect = min_t(int, data_size, bvec->bv_len);
- err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
- kunmap(bvec->bv_page);
+ bio_for_each_segment(bvec, bio, iter) {
+ void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
+ expect = min_t(int, data_size, bvec.bv_len);
+ err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
+ kunmap(bvec.bv_page);
if (err)
return err;
data_size -= expect;
}
if (dgs) {
- drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
+ drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
- dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
+ drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
return -EINVAL;
}
}
- D_ASSERT(data_size == 0);
+ D_ASSERT(peer_device->device, data_size == 0);
return 0;
}
@@ -1647,64 +1664,67 @@ static int e_end_resync_block(struct drbd_work *w, int unused)
{
struct drbd_peer_request *peer_req =
container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
sector_t sector = peer_req->i.sector;
int err;
- D_ASSERT(drbd_interval_empty(&peer_req->i));
+ D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- drbd_set_in_sync(mdev, sector, peer_req->i.size);
- err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
+ drbd_set_in_sync(device, sector, peer_req->i.size);
+ err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
} else {
/* Record failure to sync */
- drbd_rs_failed_io(mdev, sector, peer_req->i.size);
+ drbd_rs_failed_io(device, sector, peer_req->i.size);
- err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
+ err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
}
- dec_unacked(mdev);
+ dec_unacked(device);
return err;
}
-static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
+static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
+ int data_size) __releases(local)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
- peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
+ peer_req = read_in_block(peer_device, ID_SYNCER, sector, data_size);
if (!peer_req)
goto fail;
- dec_rs_pending(mdev);
+ dec_rs_pending(device);
- inc_unacked(mdev);
+ inc_unacked(device);
/* corresponding dec_unacked() in e_end_resync_block()
* respective _drbd_clear_done_ee */
peer_req->w.cb = e_end_resync_block;
- spin_lock_irq(&mdev->tconn->req_lock);
- list_add(&peer_req->w.list, &mdev->sync_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ list_add(&peer_req->w.list, &device->sync_ee);
+ spin_unlock_irq(&device->resource->req_lock);
- atomic_add(data_size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
+ atomic_add(data_size >> 9, &device->rs_sect_ev);
+ if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
return 0;
/* don't care for the reason here */
- dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->tconn->req_lock);
+ drbd_err(device, "submit failed, triggering re-connect\n");
+ spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
fail:
- put_ldev(mdev);
+ put_ldev(device);
return -EIO;
}
static struct drbd_request *
-find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
+find_request(struct drbd_device *device, struct rb_root *root, u64 id,
sector_t sector, bool missing_ok, const char *func)
{
struct drbd_request *req;
@@ -1714,36 +1734,38 @@ find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
return req;
if (!missing_ok) {
- dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
+ drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
(unsigned long)id, (unsigned long long)sector);
}
return NULL;
}
-static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct drbd_request *req;
sector_t sector;
int err;
struct p_data *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
sector = be64_to_cpu(p->sector);
- spin_lock_irq(&mdev->tconn->req_lock);
- req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
+ spin_unlock_irq(&device->resource->req_lock);
if (unlikely(!req))
return -EIO;
/* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
* special casing it there for the various failure cases.
* still no race with drbd_fail_pending_reads */
- err = recv_dless_read(mdev, req, sector, pi->size);
+ err = recv_dless_read(peer_device, req, sector, pi->size);
if (!err)
req_mod(req, DATA_RECEIVED);
/* else: nothing. handled from drbd_disconnect...
@@ -1753,46 +1775,48 @@ static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
return err;
}
-static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
sector_t sector;
int err;
struct p_data *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
sector = be64_to_cpu(p->sector);
- D_ASSERT(p->block_id == ID_SYNCER);
+ D_ASSERT(device, p->block_id == ID_SYNCER);
- if (get_ldev(mdev)) {
+ if (get_ldev(device)) {
/* data is submitted to disk within recv_resync_read.
* corresponding put_ldev done below on error,
* or in drbd_peer_request_endio. */
- err = recv_resync_read(mdev, sector, pi->size);
+ err = recv_resync_read(peer_device, sector, pi->size);
} else {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Can not write resync data to local disk.\n");
+ drbd_err(device, "Can not write resync data to local disk.\n");
- err = drbd_drain_block(mdev, pi->size);
+ err = drbd_drain_block(peer_device, pi->size);
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
+ drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
}
- atomic_add(pi->size >> 9, &mdev->rs_sect_in);
+ atomic_add(pi->size >> 9, &device->rs_sect_in);
return err;
}
-static void restart_conflicting_writes(struct drbd_conf *mdev,
+static void restart_conflicting_writes(struct drbd_device *device,
sector_t sector, int size)
{
struct drbd_interval *i;
struct drbd_request *req;
- drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ drbd_for_each_overlap(i, &device->write_requests, sector, size) {
if (!i->local)
continue;
req = container_of(i, struct drbd_request, i);
@@ -1812,52 +1836,53 @@ static int e_end_block(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req =
container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
sector_t sector = peer_req->i.sector;
int err = 0, pcmd;
if (peer_req->flags & EE_SEND_WRITE_ACK) {
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
- mdev->state.conn <= C_PAUSED_SYNC_T &&
+ pcmd = (device->state.conn >= C_SYNC_SOURCE &&
+ device->state.conn <= C_PAUSED_SYNC_T &&
peer_req->flags & EE_MAY_SET_IN_SYNC) ?
P_RS_WRITE_ACK : P_WRITE_ACK;
- err = drbd_send_ack(mdev, pcmd, peer_req);
+ err = drbd_send_ack(peer_device, pcmd, peer_req);
if (pcmd == P_RS_WRITE_ACK)
- drbd_set_in_sync(mdev, sector, peer_req->i.size);
+ drbd_set_in_sync(device, sector, peer_req->i.size);
} else {
- err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
+ err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
/* we expect it to be marked out of sync anyways...
* maybe assert this? */
}
- dec_unacked(mdev);
+ dec_unacked(device);
}
/* we delete from the conflict detection hash _after_ we sent out the
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
if (peer_req->flags & EE_IN_INTERVAL_TREE) {
- spin_lock_irq(&mdev->tconn->req_lock);
- D_ASSERT(!drbd_interval_empty(&peer_req->i));
- drbd_remove_epoch_entry_interval(mdev, peer_req);
+ spin_lock_irq(&device->resource->req_lock);
+ D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
+ drbd_remove_epoch_entry_interval(device, peer_req);
if (peer_req->flags & EE_RESTART_REQUESTS)
- restart_conflicting_writes(mdev, sector, peer_req->i.size);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ restart_conflicting_writes(device, sector, peer_req->i.size);
+ spin_unlock_irq(&device->resource->req_lock);
} else
- D_ASSERT(drbd_interval_empty(&peer_req->i));
+ D_ASSERT(device, drbd_interval_empty(&peer_req->i));
- drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
+ drbd_may_finish_epoch(first_peer_device(device)->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
return err;
}
static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
{
- struct drbd_conf *mdev = w->mdev;
struct drbd_peer_request *peer_req =
container_of(w, struct drbd_peer_request, w);
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
int err;
- err = drbd_send_ack(mdev, ack, peer_req);
- dec_unacked(mdev);
+ err = drbd_send_ack(peer_device, ack, peer_req);
+ dec_unacked(peer_device->device);
return err;
}
@@ -1869,9 +1894,11 @@ static int e_send_superseded(struct drbd_work *w, int unused)
static int e_send_retry_write(struct drbd_work *w, int unused)
{
- struct drbd_tconn *tconn = w->mdev->tconn;
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ struct drbd_connection *connection = peer_req->peer_device->connection;
- return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
+ return e_send_ack(w, connection->agreed_pro_version >= 100 ?
P_RETRY_WRITE : P_SUPERSEDED);
}
@@ -1890,18 +1917,19 @@ static u32 seq_max(u32 a, u32 b)
return seq_greater(a, b) ? a : b;
}
-static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
+static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
{
+ struct drbd_device *device = peer_device->device;
unsigned int newest_peer_seq;
- if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
- spin_lock(&mdev->peer_seq_lock);
- newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
- mdev->peer_seq = newest_peer_seq;
- spin_unlock(&mdev->peer_seq_lock);
- /* wake up only if we actually changed mdev->peer_seq */
+ if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
+ spin_lock(&device->peer_seq_lock);
+ newest_peer_seq = seq_max(device->peer_seq, peer_seq);
+ device->peer_seq = newest_peer_seq;
+ spin_unlock(&device->peer_seq_lock);
+ /* wake up only if we actually changed device->peer_seq */
if (peer_seq == newest_peer_seq)
- wake_up(&mdev->seq_wait);
+ wake_up(&device->seq_wait);
}
}
@@ -1911,20 +1939,20 @@ static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
}
/* maybe change sync_ee into interval trees as well? */
-static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
+static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
struct drbd_peer_request *rs_req;
bool rv = 0;
- spin_lock_irq(&mdev->tconn->req_lock);
- list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
+ spin_lock_irq(&device->resource->req_lock);
+ list_for_each_entry(rs_req, &device->sync_ee, w.list) {
if (overlaps(peer_req->i.sector, peer_req->i.size,
rs_req->i.sector, rs_req->i.size)) {
rv = 1;
break;
}
}
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
return rv;
}
@@ -1938,9 +1966,9 @@ static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_re
*
* Note: we don't care for Ack packets overtaking P_DATA packets.
*
- * In case packet_seq is larger than mdev->peer_seq number, there are
+ * In case packet_seq is larger than device->peer_seq number, there are
* outstanding packets on the msock. We wait for them to arrive.
- * In case we are the logically next packet, we update mdev->peer_seq
+ * In case we are the logically next packet, we update device->peer_seq
* ourselves. Correctly handles 32bit wrap around.
*
* Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
@@ -1950,19 +1978,20 @@ static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_re
*
* returns 0 if we may process the packet,
* -ERESTARTSYS if we were interrupted (by disconnect signal). */
-static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
+static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
{
+ struct drbd_device *device = peer_device->device;
DEFINE_WAIT(wait);
long timeout;
int ret = 0, tp;
- if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
+ if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
return 0;
- spin_lock(&mdev->peer_seq_lock);
+ spin_lock(&device->peer_seq_lock);
for (;;) {
- if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
- mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
+ if (!seq_greater(peer_seq - 1, device->peer_seq)) {
+ device->peer_seq = seq_max(device->peer_seq, peer_seq);
break;
}
@@ -1972,35 +2001,35 @@ static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_s
}
rcu_read_lock();
- tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ tp = rcu_dereference(first_peer_device(device)->connection->net_conf)->two_primaries;
rcu_read_unlock();
if (!tp)
break;
/* Only need to wait if two_primaries is enabled */
- prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
- spin_unlock(&mdev->peer_seq_lock);
+ prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock(&device->peer_seq_lock);
rcu_read_lock();
- timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
+ timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
rcu_read_unlock();
timeout = schedule_timeout(timeout);
- spin_lock(&mdev->peer_seq_lock);
+ spin_lock(&device->peer_seq_lock);
if (!timeout) {
ret = -ETIMEDOUT;
- dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
+ drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
break;
}
}
- spin_unlock(&mdev->peer_seq_lock);
- finish_wait(&mdev->seq_wait, &wait);
+ spin_unlock(&device->peer_seq_lock);
+ finish_wait(&device->seq_wait, &wait);
return ret;
}
/* see also bio_flags_to_wire()
* DRBD_REQ_*, because we need to semantically map the flags to data packet
* flags and back. We may replicate to other kernel versions. */
-static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
+static unsigned long wire_flags_to_bio(u32 dpf)
{
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) |
@@ -2008,13 +2037,13 @@ static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
(dpf & DP_DISCARD ? REQ_DISCARD : 0);
}
-static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
+static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
unsigned int size)
{
struct drbd_interval *i;
repeat:
- drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ drbd_for_each_overlap(i, &device->write_requests, sector, size) {
struct drbd_request *req;
struct bio_and_error m;
@@ -2025,19 +2054,19 @@ static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
continue;
req->rq_state &= ~RQ_POSTPONED;
__req_mod(req, NEG_ACKED, &m);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
if (m.bio)
- complete_master_bio(mdev, &m);
- spin_lock_irq(&mdev->tconn->req_lock);
+ complete_master_bio(device, &m);
+ spin_lock_irq(&device->resource->req_lock);
goto repeat;
}
}
-static int handle_write_conflicts(struct drbd_conf *mdev,
+static int handle_write_conflicts(struct drbd_device *device,
struct drbd_peer_request *peer_req)
{
- struct drbd_tconn *tconn = mdev->tconn;
- bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ struct drbd_connection *connection = peer_req->peer_device->connection;
+ bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
sector_t sector = peer_req->i.sector;
const unsigned int size = peer_req->i.size;
struct drbd_interval *i;
@@ -2048,10 +2077,10 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
* Inserting the peer request into the write_requests tree will prevent
* new conflicting local requests from being added.
*/
- drbd_insert_interval(&mdev->write_requests, &peer_req->i);
+ drbd_insert_interval(&device->write_requests, &peer_req->i);
repeat:
- drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ drbd_for_each_overlap(i, &device->write_requests, sector, size) {
if (i == &peer_req->i)
continue;
@@ -2061,7 +2090,7 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
* should not happen in a two-node setup. Wait for the
* earlier peer request to complete.
*/
- err = drbd_wait_misc(mdev, i);
+ err = drbd_wait_misc(device, i);
if (err)
goto out;
goto repeat;
@@ -2079,18 +2108,18 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
(i->size >> 9) >= sector + (size >> 9);
if (!equal)
- dev_alert(DEV, "Concurrent writes detected: "
+ drbd_alert(device, "Concurrent writes detected: "
"local=%llus +%u, remote=%llus +%u, "
"assuming %s came first\n",
(unsigned long long)i->sector, i->size,
(unsigned long long)sector, size,
superseded ? "local" : "remote");
- inc_unacked(mdev);
+ inc_unacked(device);
peer_req->w.cb = superseded ? e_send_superseded :
e_send_retry_write;
- list_add_tail(&peer_req->w.list, &mdev->done_ee);
- wake_asender(mdev->tconn);
+ list_add_tail(&peer_req->w.list, &device->done_ee);
+ wake_asender(connection);
err = -ENOENT;
goto out;
@@ -2099,7 +2128,7 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
container_of(i, struct drbd_request, i);
if (!equal)
- dev_alert(DEV, "Concurrent writes detected: "
+ drbd_alert(device, "Concurrent writes detected: "
"local=%llus +%u, remote=%llus +%u\n",
(unsigned long long)i->sector, i->size,
(unsigned long long)sector, size);
@@ -2117,12 +2146,10 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
* request to finish locally before submitting
* the conflicting peer request.
*/
- err = drbd_wait_misc(mdev, &req->i);
+ err = drbd_wait_misc(device, &req->i);
if (err) {
- _conn_request_state(mdev->tconn,
- NS(conn, C_TIMEOUT),
- CS_HARD);
- fail_postponed_requests(mdev, sector, size);
+ _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
+ fail_postponed_requests(device, sector, size);
goto out;
}
goto repeat;
@@ -2138,14 +2165,15 @@ static int handle_write_conflicts(struct drbd_conf *mdev,
out:
if (err)
- drbd_remove_epoch_entry_interval(mdev, peer_req);
+ drbd_remove_epoch_entry_interval(device, peer_req);
return err;
}
/* mirrored write */
-static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
sector_t sector;
struct drbd_peer_request *peer_req;
struct p_data *p = pi->data;
@@ -2154,17 +2182,18 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
u32 dp_flags;
int err, tp;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- if (!get_ldev(mdev)) {
+ if (!get_ldev(device)) {
int err2;
- err = wait_for_and_update_peer_seq(mdev, peer_seq);
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
- atomic_inc(&tconn->current_epoch->epoch_size);
- err2 = drbd_drain_block(mdev, pi->size);
+ err = wait_for_and_update_peer_seq(peer_device, peer_seq);
+ drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
+ atomic_inc(&connection->current_epoch->epoch_size);
+ err2 = drbd_drain_block(peer_device, pi->size);
if (!err)
err = err2;
return err;
@@ -2177,61 +2206,61 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
*/
sector = be64_to_cpu(p->sector);
- peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
+ peer_req = read_in_block(peer_device, p->block_id, sector, pi->size);
if (!peer_req) {
- put_ldev(mdev);
+ put_ldev(device);
return -EIO;
}
peer_req->w.cb = e_end_block;
dp_flags = be32_to_cpu(p->dp_flags);
- rw |= wire_flags_to_bio(mdev, dp_flags);
+ rw |= wire_flags_to_bio(dp_flags);
if (peer_req->pages == NULL) {
- D_ASSERT(peer_req->i.size == 0);
- D_ASSERT(dp_flags & DP_FLUSH);
+ D_ASSERT(device, peer_req->i.size == 0);
+ D_ASSERT(device, dp_flags & DP_FLUSH);
}
if (dp_flags & DP_MAY_SET_IN_SYNC)
peer_req->flags |= EE_MAY_SET_IN_SYNC;
- spin_lock(&tconn->epoch_lock);
- peer_req->epoch = tconn->current_epoch;
+ spin_lock(&connection->epoch_lock);
+ peer_req->epoch = connection->current_epoch;
atomic_inc(&peer_req->epoch->epoch_size);
atomic_inc(&peer_req->epoch->active);
- spin_unlock(&tconn->epoch_lock);
+ spin_unlock(&connection->epoch_lock);
rcu_read_lock();
- tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
rcu_read_unlock();
if (tp) {
peer_req->flags |= EE_IN_INTERVAL_TREE;
- err = wait_for_and_update_peer_seq(mdev, peer_seq);
+ err = wait_for_and_update_peer_seq(peer_device, peer_seq);
if (err)
goto out_interrupted;
- spin_lock_irq(&mdev->tconn->req_lock);
- err = handle_write_conflicts(mdev, peer_req);
+ spin_lock_irq(&device->resource->req_lock);
+ err = handle_write_conflicts(device, peer_req);
if (err) {
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
if (err == -ENOENT) {
- put_ldev(mdev);
+ put_ldev(device);
return 0;
}
goto out_interrupted;
}
} else {
- update_peer_seq(mdev, peer_seq);
- spin_lock_irq(&mdev->tconn->req_lock);
+ update_peer_seq(peer_device, peer_seq);
+ spin_lock_irq(&device->resource->req_lock);
}
- list_add(&peer_req->w.list, &mdev->active_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &device->active_ee);
+ spin_unlock_irq(&device->resource->req_lock);
- if (mdev->state.conn == C_SYNC_TARGET)
- wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
+ if (device->state.conn == C_SYNC_TARGET)
+ wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
- if (mdev->tconn->agreed_pro_version < 100) {
+ if (peer_device->connection->agreed_pro_version < 100) {
rcu_read_lock();
- switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
+ switch (rcu_dereference(peer_device->connection->net_conf)->wire_protocol) {
case DRBD_PROT_C:
dp_flags |= DP_SEND_WRITE_ACK;
break;
@@ -2244,7 +2273,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
if (dp_flags & DP_SEND_WRITE_ACK) {
peer_req->flags |= EE_SEND_WRITE_ACK;
- inc_unacked(mdev);
+ inc_unacked(device);
/* corresponding dec_unacked() in e_end_block()
* respective _drbd_clear_done_ee */
}
@@ -2252,34 +2281,34 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
if (dp_flags & DP_SEND_RECEIVE_ACK) {
/* I really don't like it that the receiver thread
* sends on the msock, but anyways */
- drbd_send_ack(mdev, P_RECV_ACK, peer_req);
+ drbd_send_ack(first_peer_device(device), P_RECV_ACK, peer_req);
}
- if (mdev->state.pdsk < D_INCONSISTENT) {
+ if (device->state.pdsk < D_INCONSISTENT) {
/* In case we have the only disk of the cluster, */
- drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
+ drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
- drbd_al_begin_io(mdev, &peer_req->i, true);
+ drbd_al_begin_io(device, &peer_req->i, true);
}
- err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
+ err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR);
if (!err)
return 0;
/* don't care for the reason here */
- dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->tconn->req_lock);
+ drbd_err(device, "submit failed, triggering re-connect\n");
+ spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
- drbd_remove_epoch_entry_interval(mdev, peer_req);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ drbd_remove_epoch_entry_interval(device, peer_req);
+ spin_unlock_irq(&device->resource->req_lock);
if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
- drbd_al_complete_io(mdev, &peer_req->i);
+ drbd_al_complete_io(device, &peer_req->i);
out_interrupted:
- drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
- put_ldev(mdev);
- drbd_free_peer_req(mdev, peer_req);
+ drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT + EV_CLEANUP);
+ put_ldev(device);
+ drbd_free_peer_req(device, peer_req);
return err;
}
@@ -2294,9 +2323,9 @@ out_interrupted:
* The current sync rate used here uses only the most recent two step marks,
* to have a short time average so we can react faster.
*/
-int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
+int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector)
{
- struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
+ struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
unsigned long db, dt, dbdt;
struct lc_element *tmp;
int curr_events;
@@ -2304,48 +2333,48 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
unsigned int c_min_rate;
rcu_read_lock();
- c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
+ c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
rcu_read_unlock();
/* feature disabled? */
if (c_min_rate == 0)
return 0;
- spin_lock_irq(&mdev->al_lock);
- tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
+ spin_lock_irq(&device->al_lock);
+ tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
if (tmp) {
struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
return 0;
}
/* Do not slow down if app IO is already waiting for this extent */
}
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
- atomic_read(&mdev->rs_sect_ev);
+ atomic_read(&device->rs_sect_ev);
- if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
+ if (!device->rs_last_events || curr_events - device->rs_last_events > 64) {
unsigned long rs_left;
int i;
- mdev->rs_last_events = curr_events;
+ device->rs_last_events = curr_events;
/* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
* approx. */
- i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
+ i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
- if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
- rs_left = mdev->ov_left;
+ if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
+ rs_left = device->ov_left;
else
- rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
+ rs_left = drbd_bm_total_weight(device) - device->rs_failed;
- dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
+ dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
if (!dt)
dt++;
- db = mdev->rs_mark_left[i] - rs_left;
+ db = device->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
if (dbdt > c_min_rate)
@@ -2355,9 +2384,10 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
}
-static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
sector_t sector;
sector_t capacity;
struct drbd_peer_request *peer_req;
@@ -2366,58 +2396,59 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
unsigned int fault_type;
struct p_block_req *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
- capacity = drbd_get_capacity(mdev->this_bdev);
+ device = peer_device->device;
+ capacity = drbd_get_capacity(device->this_bdev);
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
- dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
+ drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
return -EINVAL;
}
if (sector + (size>>9) > capacity) {
- dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
+ drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
return -EINVAL;
}
- if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
+ if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
verb = 1;
switch (pi->cmd) {
case P_DATA_REQUEST:
- drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
+ drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
break;
case P_RS_DATA_REQUEST:
case P_CSUM_RS_REQUEST:
case P_OV_REQUEST:
- drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
+ drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
break;
case P_OV_REPLY:
verb = 0;
- dec_rs_pending(mdev);
- drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
+ dec_rs_pending(device);
+ drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
break;
default:
BUG();
}
if (verb && __ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Can not satisfy peer's read request, "
+ drbd_err(device, "Can not satisfy peer's read request, "
"no local data.\n");
/* drain possibly payload */
- return drbd_drain_block(mdev, pi->size);
+ return drbd_drain_block(peer_device, pi->size);
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
+ peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, GFP_NOIO);
if (!peer_req) {
- put_ldev(mdev);
+ put_ldev(device);
return -ENOMEM;
}
@@ -2432,7 +2463,7 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
/* used in the sector offset progress display */
- mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
+ device->bm_resync_fo = BM_SECT_TO_BIT(sector);
break;
case P_OV_REPLY:
@@ -2448,19 +2479,19 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
peer_req->digest = di;
peer_req->flags |= EE_HAS_DIGEST;
- if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
+ if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
goto out_free_e;
if (pi->cmd == P_CSUM_RS_REQUEST) {
- D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
+ D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
peer_req->w.cb = w_e_end_csum_rs_req;
/* used in the sector offset progress display */
- mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
+ device->bm_resync_fo = BM_SECT_TO_BIT(sector);
} else if (pi->cmd == P_OV_REPLY) {
/* track progress, we may need to throttle */
- atomic_add(size >> 9, &mdev->rs_sect_in);
+ atomic_add(size >> 9, &device->rs_sect_in);
peer_req->w.cb = w_e_end_ov_reply;
- dec_rs_pending(mdev);
+ dec_rs_pending(device);
/* drbd_rs_begin_io done when we sent this request,
* but accounting still needs to be done. */
goto submit_for_resync;
@@ -2468,19 +2499,19 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
break;
case P_OV_REQUEST:
- if (mdev->ov_start_sector == ~(sector_t)0 &&
- mdev->tconn->agreed_pro_version >= 90) {
+ if (device->ov_start_sector == ~(sector_t)0 &&
+ peer_device->connection->agreed_pro_version >= 90) {
unsigned long now = jiffies;
int i;
- mdev->ov_start_sector = sector;
- mdev->ov_position = sector;
- mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
- mdev->rs_total = mdev->ov_left;
+ device->ov_start_sector = sector;
+ device->ov_position = sector;
+ device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
+ device->rs_total = device->ov_left;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = mdev->ov_left;
- mdev->rs_mark_time[i] = now;
+ device->rs_mark_left[i] = device->ov_left;
+ device->rs_mark_time[i] = now;
}
- dev_info(DEV, "Online Verify start sector: %llu\n",
+ drbd_info(device, "Online Verify start sector: %llu\n",
(unsigned long long)sector);
}
peer_req->w.cb = w_e_end_ov_req;
@@ -2513,57 +2544,61 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
* we would also throttle its application reads.
* In that case, throttling is done on the SyncTarget only.
*/
- if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
+ if (device->state.peer != R_PRIMARY && drbd_rs_should_slow_down(device, sector))
schedule_timeout_uninterruptible(HZ/10);
- if (drbd_rs_begin_io(mdev, sector))
+ if (drbd_rs_begin_io(device, sector))
goto out_free_e;
submit_for_resync:
- atomic_add(size >> 9, &mdev->rs_sect_ev);
+ atomic_add(size >> 9, &device->rs_sect_ev);
submit:
- inc_unacked(mdev);
- spin_lock_irq(&mdev->tconn->req_lock);
- list_add_tail(&peer_req->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ inc_unacked(device);
+ spin_lock_irq(&device->resource->req_lock);
+ list_add_tail(&peer_req->w.list, &device->read_ee);
+ spin_unlock_irq(&device->resource->req_lock);
- if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
+ if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
return 0;
/* don't care for the reason here */
- dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->tconn->req_lock);
+ drbd_err(device, "submit failed, triggering re-connect\n");
+ spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
out_free_e:
- put_ldev(mdev);
- drbd_free_peer_req(mdev, peer_req);
+ put_ldev(device);
+ drbd_free_peer_req(device, peer_req);
return -EIO;
}
-static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
+/**
+ * drbd_asb_recover_0p - Recover after split-brain with no remaining primaries
+ */
+static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
{
+ struct drbd_device *device = peer_device->device;
int self, peer, rv = -100;
unsigned long ch_self, ch_peer;
enum drbd_after_sb_p after_sb_0p;
- self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
- peer = mdev->p_uuid[UI_BITMAP] & 1;
+ self = device->ldev->md.uuid[UI_BITMAP] & 1;
+ peer = device->p_uuid[UI_BITMAP] & 1;
- ch_peer = mdev->p_uuid[UI_SIZE];
- ch_self = mdev->comm_bm_set;
+ ch_peer = device->p_uuid[UI_SIZE];
+ ch_self = device->comm_bm_set;
rcu_read_lock();
- after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
+ after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
rcu_read_unlock();
switch (after_sb_0p) {
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
case ASB_CALL_HELPER:
case ASB_VIOLENTLY:
- dev_err(DEV, "Configuration error.\n");
+ drbd_err(device, "Configuration error.\n");
break;
case ASB_DISCONNECT:
break;
@@ -2587,11 +2622,11 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
break;
}
/* Else fall through to one of the other strategies... */
- dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
+ drbd_warn(device, "Discard younger/older primary did not find a decision\n"
"Using discard-least-changes instead\n");
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
- rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
+ rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
? -1 : 1;
break;
} else {
@@ -2607,7 +2642,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
rv = 1;
else /* ( ch_self == ch_peer ) */
/* Well, then use something else. */
- rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
+ rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
? -1 : 1;
break;
case ASB_DISCARD_LOCAL:
@@ -2620,13 +2655,17 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
return rv;
}
-static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
+/**
+ * drbd_asb_recover_1p - Recover after split-brain with one remaining primary
+ */
+static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
{
+ struct drbd_device *device = peer_device->device;
int hg, rv = -100;
enum drbd_after_sb_p after_sb_1p;
rcu_read_lock();
- after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
+ after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
rcu_read_unlock();
switch (after_sb_1p) {
case ASB_DISCARD_YOUNGER_PRI:
@@ -2635,35 +2674,35 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
case ASB_DISCARD_LOCAL:
case ASB_DISCARD_REMOTE:
case ASB_DISCARD_ZERO_CHG:
- dev_err(DEV, "Configuration error.\n");
+ drbd_err(device, "Configuration error.\n");
break;
case ASB_DISCONNECT:
break;
case ASB_CONSENSUS:
- hg = drbd_asb_recover_0p(mdev);
- if (hg == -1 && mdev->state.role == R_SECONDARY)
+ hg = drbd_asb_recover_0p(peer_device);
+ if (hg == -1 && device->state.role == R_SECONDARY)
rv = hg;
- if (hg == 1 && mdev->state.role == R_PRIMARY)
+ if (hg == 1 && device->state.role == R_PRIMARY)
rv = hg;
break;
case ASB_VIOLENTLY:
- rv = drbd_asb_recover_0p(mdev);
+ rv = drbd_asb_recover_0p(peer_device);
break;
case ASB_DISCARD_SECONDARY:
- return mdev->state.role == R_PRIMARY ? 1 : -1;
+ return device->state.role == R_PRIMARY ? 1 : -1;
case ASB_CALL_HELPER:
- hg = drbd_asb_recover_0p(mdev);
- if (hg == -1 && mdev->state.role == R_PRIMARY) {
+ hg = drbd_asb_recover_0p(peer_device);
+ if (hg == -1 && device->state.role == R_PRIMARY) {
enum drbd_state_rv rv2;
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
- rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
+ rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
if (rv2 != SS_SUCCESS) {
- drbd_khelper(mdev, "pri-lost-after-sb");
+ drbd_khelper(device, "pri-lost-after-sb");
} else {
- dev_warn(DEV, "Successfully gave up primary role.\n");
+ drbd_warn(device, "Successfully gave up primary role.\n");
rv = hg;
}
} else
@@ -2673,13 +2712,17 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
return rv;
}
-static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
+/**
+ * drbd_asb_recover_2p - Recover after split-brain with two remaining primaries
+ */
+static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
{
+ struct drbd_device *device = peer_device->device;
int hg, rv = -100;
enum drbd_after_sb_p after_sb_2p;
rcu_read_lock();
- after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
+ after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p;
rcu_read_unlock();
switch (after_sb_2p) {
case ASB_DISCARD_YOUNGER_PRI:
@@ -2690,26 +2733,26 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
case ASB_DISCARD_ZERO_CHG:
- dev_err(DEV, "Configuration error.\n");
+ drbd_err(device, "Configuration error.\n");
break;
case ASB_VIOLENTLY:
- rv = drbd_asb_recover_0p(mdev);
+ rv = drbd_asb_recover_0p(peer_device);
break;
case ASB_DISCONNECT:
break;
case ASB_CALL_HELPER:
- hg = drbd_asb_recover_0p(mdev);
+ hg = drbd_asb_recover_0p(peer_device);
if (hg == -1) {
enum drbd_state_rv rv2;
/* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
* we might be here in C_WF_REPORT_PARAMS which is transient.
* we do not need to wait for the after state change work either. */
- rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
+ rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
if (rv2 != SS_SUCCESS) {
- drbd_khelper(mdev, "pri-lost-after-sb");
+ drbd_khelper(device, "pri-lost-after-sb");
} else {
- dev_warn(DEV, "Successfully gave up primary role.\n");
+ drbd_warn(device, "Successfully gave up primary role.\n");
rv = hg;
}
} else
@@ -2719,14 +2762,14 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
return rv;
}
-static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
+static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
u64 bits, u64 flags)
{
if (!uuid) {
- dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
+ drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
return;
}
- dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
+ drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
text,
(unsigned long long)uuid[UI_CURRENT],
(unsigned long long)uuid[UI_BITMAP],
@@ -2748,13 +2791,13 @@ static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
-1091 requires proto 91
-1096 requires proto 96
*/
-static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
+static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_hold(local)
{
u64 self, peer;
int i, j;
- self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
- peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
+ self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
+ peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
*rule_nr = 10;
if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
@@ -2773,46 +2816,46 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (self == peer) {
int rct, dc; /* roles at crash time */
- if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
+ if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
- if (mdev->tconn->agreed_pro_version < 91)
+ if (first_peer_device(device)->connection->agreed_pro_version < 91)
return -1091;
- if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
- (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
- dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
- drbd_uuid_move_history(mdev);
- mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
- mdev->ldev->md.uuid[UI_BITMAP] = 0;
+ if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
+ (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
+ drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
+ drbd_uuid_move_history(device);
+ device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
+ device->ldev->md.uuid[UI_BITMAP] = 0;
- drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
- mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
+ drbd_uuid_dump(device, "self", device->ldev->md.uuid,
+ device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
*rule_nr = 34;
} else {
- dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
+ drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
*rule_nr = 36;
}
return 1;
}
- if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
+ if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
- if (mdev->tconn->agreed_pro_version < 91)
+ if (first_peer_device(device)->connection->agreed_pro_version < 91)
return -1091;
- if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
- (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
- dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
+ if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
+ (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
+ drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
- mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
- mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
- mdev->p_uuid[UI_BITMAP] = 0UL;
+ device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
+ device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
+ device->p_uuid[UI_BITMAP] = 0UL;
- drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
+ drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
*rule_nr = 35;
} else {
- dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
+ drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
*rule_nr = 37;
}
@@ -2820,8 +2863,8 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
}
/* Common power [off|failure] */
- rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
- (mdev->p_uuid[UI_FLAGS] & 2);
+ rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
+ (device->p_uuid[UI_FLAGS] & 2);
/* lowest bit is set when we were primary,
* next bit (weight 2) is set when peer was primary */
*rule_nr = 40;
@@ -2831,72 +2874,72 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
- dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
+ dc = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
return dc ? -1 : 1;
}
}
*rule_nr = 50;
- peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
+ peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
if (self == peer)
return -1;
*rule_nr = 51;
- peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
+ peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->tconn->agreed_pro_version < 96 ?
- (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
- (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
- peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
+ if (first_peer_device(device)->connection->agreed_pro_version < 96 ?
+ (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
+ (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
+ peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of the peer's UUIDs. */
- if (mdev->tconn->agreed_pro_version < 91)
+ if (first_peer_device(device)->connection->agreed_pro_version < 91)
return -1091;
- mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
- mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
+ device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
+ device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
- dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
- drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
+ drbd_info(device, "Lost last syncUUID packet, corrected:\n");
+ drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
return -1;
}
}
*rule_nr = 60;
- self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
+ self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
- peer = mdev->p_uuid[i] & ~((u64)1);
+ peer = device->p_uuid[i] & ~((u64)1);
if (self == peer)
return -2;
}
*rule_nr = 70;
- self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
- peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
+ self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
+ peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
if (self == peer)
return 1;
*rule_nr = 71;
- self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
+ self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->tconn->agreed_pro_version < 96 ?
- (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
- (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
- self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
+ if (first_peer_device(device)->connection->agreed_pro_version < 96 ?
+ (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
+ (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
+ self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of our UUIDs. */
- if (mdev->tconn->agreed_pro_version < 91)
+ if (first_peer_device(device)->connection->agreed_pro_version < 91)
return -1091;
- __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
- __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
+ __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
+ __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
- dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
- drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
- mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
+ drbd_info(device, "Last syncUUID did not get through, corrected:\n");
+ drbd_uuid_dump(device, "self", device->ldev->md.uuid,
+ device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
return 1;
}
@@ -2904,24 +2947,24 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
*rule_nr = 80;
- peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
+ peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
- self = mdev->ldev->md.uuid[i] & ~((u64)1);
+ self = device->ldev->md.uuid[i] & ~((u64)1);
if (self == peer)
return 2;
}
*rule_nr = 90;
- self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
- peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
+ self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
+ peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
if (self == peer && self != ((u64)0))
return 100;
*rule_nr = 100;
for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
- self = mdev->ldev->md.uuid[i] & ~((u64)1);
+ self = device->ldev->md.uuid[i] & ~((u64)1);
for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
- peer = mdev->p_uuid[j] & ~((u64)1);
+ peer = device->p_uuid[j] & ~((u64)1);
if (self == peer)
return -100;
}
@@ -2933,36 +2976,38 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
/* drbd_sync_handshake() returns the new conn state on success, or
CONN_MASK (-1) on failure.
*/
-static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
+static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
+ enum drbd_role peer_role,
enum drbd_disk_state peer_disk) __must_hold(local)
{
+ struct drbd_device *device = peer_device->device;
enum drbd_conns rv = C_MASK;
enum drbd_disk_state mydisk;
struct net_conf *nc;
int hg, rule_nr, rr_conflict, tentative;
- mydisk = mdev->state.disk;
+ mydisk = device->state.disk;
if (mydisk == D_NEGOTIATING)
- mydisk = mdev->new_state_tmp.disk;
+ mydisk = device->new_state_tmp.disk;
- dev_info(DEV, "drbd_sync_handshake:\n");
+ drbd_info(device, "drbd_sync_handshake:\n");
- spin_lock_irq(&mdev->ldev->md.uuid_lock);
- drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
- drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
- mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
+ spin_lock_irq(&device->ldev->md.uuid_lock);
+ drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
+ drbd_uuid_dump(device, "peer", device->p_uuid,
+ device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
- hg = drbd_uuid_compare(mdev, &rule_nr);
- spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+ hg = drbd_uuid_compare(device, &rule_nr);
+ spin_unlock_irq(&device->ldev->md.uuid_lock);
- dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
+ drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
if (hg == -1000) {
- dev_alert(DEV, "Unrelated data, aborting!\n");
+ drbd_alert(device, "Unrelated data, aborting!\n");
return C_MASK;
}
if (hg < -1000) {
- dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
+ drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
return C_MASK;
}
@@ -2972,38 +3017,38 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
hg = mydisk > D_INCONSISTENT ? 1 : -1;
if (f)
hg = hg*2;
- dev_info(DEV, "Becoming sync %s due to disk states.\n",
+ drbd_info(device, "Becoming sync %s due to disk states.\n",
hg > 0 ? "source" : "target");
}
if (abs(hg) == 100)
- drbd_khelper(mdev, "initial-split-brain");
+ drbd_khelper(device, "initial-split-brain");
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(peer_device->connection->net_conf);
if (hg == 100 || (hg == -100 && nc->always_asbp)) {
- int pcount = (mdev->state.role == R_PRIMARY)
+ int pcount = (device->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
int forced = (hg == -100);
switch (pcount) {
case 0:
- hg = drbd_asb_recover_0p(mdev);
+ hg = drbd_asb_recover_0p(peer_device);
break;
case 1:
- hg = drbd_asb_recover_1p(mdev);
+ hg = drbd_asb_recover_1p(peer_device);
break;
case 2:
- hg = drbd_asb_recover_2p(mdev);
+ hg = drbd_asb_recover_2p(peer_device);
break;
}
if (abs(hg) < 100) {
- dev_warn(DEV, "Split-Brain detected, %d primaries, "
+ drbd_warn(device, "Split-Brain detected, %d primaries, "
"automatically solved. Sync from %s node\n",
pcount, (hg < 0) ? "peer" : "this");
if (forced) {
- dev_warn(DEV, "Doing a full sync, since"
+ drbd_warn(device, "Doing a full sync, since"
" UUIDs where ambiguous.\n");
hg = hg*2;
}
@@ -3011,13 +3056,13 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
if (hg == -100) {
- if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
+ if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
hg = -1;
- if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
+ if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
hg = 1;
if (abs(hg) < 100)
- dev_warn(DEV, "Split-Brain detected, manually solved. "
+ drbd_warn(device, "Split-Brain detected, manually solved. "
"Sync from %s node\n",
(hg < 0) ? "peer" : "this");
}
@@ -3030,44 +3075,44 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
* after an attempted attach on a diskless node.
* We just refuse to attach -- well, we drop the "connection"
* to that disk, in a way... */
- dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
- drbd_khelper(mdev, "split-brain");
+ drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
+ drbd_khelper(device, "split-brain");
return C_MASK;
}
if (hg > 0 && mydisk <= D_INCONSISTENT) {
- dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
+ drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
return C_MASK;
}
if (hg < 0 && /* by intention we do not use mydisk here. */
- mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
+ device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
switch (rr_conflict) {
case ASB_CALL_HELPER:
- drbd_khelper(mdev, "pri-lost");
+ drbd_khelper(device, "pri-lost");
/* fall through */
case ASB_DISCONNECT:
- dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
+ drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
return C_MASK;
case ASB_VIOLENTLY:
- dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
+ drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
"assumption\n");
}
}
- if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
+ if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) {
if (hg == 0)
- dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
+ drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
else
- dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
+ drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
abs(hg) >= 2 ? "full" : "bit-map based");
return C_MASK;
}
if (abs(hg) >= 2) {
- dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
- if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
+ drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
+ if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
BM_LOCKED_SET_ALLOWED))
return C_MASK;
}
@@ -3078,9 +3123,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
rv = C_WF_BITMAP_T;
} else {
rv = C_CONNECTED;
- if (drbd_bm_total_weight(mdev)) {
- dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
- drbd_bm_total_weight(mdev));
+ if (drbd_bm_total_weight(device)) {
+ drbd_info(device, "No resync, but %lu bits in bitmap!\n",
+ drbd_bm_total_weight(device));
}
}
@@ -3101,7 +3146,7 @@ static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
return peer;
}
-static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_protocol *p = pi->data;
enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
@@ -3119,58 +3164,58 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
cf = be32_to_cpu(p->conn_flags);
p_discard_my_data = cf & CF_DISCARD_MY_DATA;
- if (tconn->agreed_pro_version >= 87) {
+ if (connection->agreed_pro_version >= 87) {
int err;
if (pi->size > sizeof(integrity_alg))
return -EIO;
- err = drbd_recv_all(tconn, integrity_alg, pi->size);
+ err = drbd_recv_all(connection, integrity_alg, pi->size);
if (err)
return err;
integrity_alg[SHARED_SECRET_MAX - 1] = 0;
}
if (pi->cmd != P_PROTOCOL_UPDATE) {
- clear_bit(CONN_DRY_RUN, &tconn->flags);
+ clear_bit(CONN_DRY_RUN, &connection->flags);
if (cf & CF_DRY_RUN)
- set_bit(CONN_DRY_RUN, &tconn->flags);
+ set_bit(CONN_DRY_RUN, &connection->flags);
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
if (p_proto != nc->wire_protocol) {
- conn_err(tconn, "incompatible %s settings\n", "protocol");
+ drbd_err(connection, "incompatible %s settings\n", "protocol");
goto disconnect_rcu_unlock;
}
if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
- conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
+ drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri");
goto disconnect_rcu_unlock;
}
if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
- conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
+ drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri");
goto disconnect_rcu_unlock;
}
if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
- conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
+ drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri");
goto disconnect_rcu_unlock;
}
if (p_discard_my_data && nc->discard_my_data) {
- conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
+ drbd_err(connection, "incompatible %s settings\n", "discard-my-data");
goto disconnect_rcu_unlock;
}
if (p_two_primaries != nc->two_primaries) {
- conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
+ drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries");
goto disconnect_rcu_unlock;
}
if (strcmp(integrity_alg, nc->integrity_alg)) {
- conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
+ drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg");
goto disconnect_rcu_unlock;
}
@@ -3191,7 +3236,7 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
if (!peer_integrity_tfm) {
- conn_err(tconn, "peer data-integrity-alg %s not supported\n",
+ drbd_err(connection, "peer data-integrity-alg %s not supported\n",
integrity_alg);
goto disconnect;
}
@@ -3200,20 +3245,20 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
int_dig_in = kmalloc(hash_size, GFP_KERNEL);
int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
if (!(int_dig_in && int_dig_vv)) {
- conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
+ drbd_err(connection, "Allocation of buffers for data integrity checking failed\n");
goto disconnect;
}
}
new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_net_conf) {
- conn_err(tconn, "Allocation of new net_conf failed\n");
+ drbd_err(connection, "Allocation of new net_conf failed\n");
goto disconnect;
}
- mutex_lock(&tconn->data.mutex);
- mutex_lock(&tconn->conf_update);
- old_net_conf = tconn->net_conf;
+ mutex_lock(&connection->data.mutex);
+ mutex_lock(&connection->resource->conf_update);
+ old_net_conf = connection->net_conf;
*new_net_conf = *old_net_conf;
new_net_conf->wire_protocol = p_proto;
@@ -3222,19 +3267,19 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
new_net_conf->two_primaries = p_two_primaries;
- rcu_assign_pointer(tconn->net_conf, new_net_conf);
- mutex_unlock(&tconn->conf_update);
- mutex_unlock(&tconn->data.mutex);
+ rcu_assign_pointer(connection->net_conf, new_net_conf);
+ mutex_unlock(&connection->resource->conf_update);
+ mutex_unlock(&connection->data.mutex);
- crypto_free_hash(tconn->peer_integrity_tfm);
- kfree(tconn->int_dig_in);
- kfree(tconn->int_dig_vv);
- tconn->peer_integrity_tfm = peer_integrity_tfm;
- tconn->int_dig_in = int_dig_in;
- tconn->int_dig_vv = int_dig_vv;
+ crypto_free_hash(connection->peer_integrity_tfm);
+ kfree(connection->int_dig_in);
+ kfree(connection->int_dig_vv);
+ connection->peer_integrity_tfm = peer_integrity_tfm;
+ connection->int_dig_in = int_dig_in;
+ connection->int_dig_vv = int_dig_vv;
if (strcmp(old_net_conf->integrity_alg, integrity_alg))
- conn_info(tconn, "peer data-integrity-alg: %s\n",
+ drbd_info(connection, "peer data-integrity-alg: %s\n",
integrity_alg[0] ? integrity_alg : "(none)");
synchronize_rcu();
@@ -3247,7 +3292,7 @@ disconnect:
crypto_free_hash(peer_integrity_tfm);
kfree(int_dig_in);
kfree(int_dig_vv);
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
@@ -3256,7 +3301,8 @@ disconnect:
* return: NULL (alg name was "")
* ERR_PTR(error) if something goes wrong
* or the crypto hash ptr, if it worked out ok. */
-struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
+static
+struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
const char *alg, const char *name)
{
struct crypto_hash *tfm;
@@ -3266,21 +3312,21 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
- dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
+ drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
alg, name, PTR_ERR(tfm));
return tfm;
}
return tfm;
}
-static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
+static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
{
- void *buffer = tconn->data.rbuf;
+ void *buffer = connection->data.rbuf;
int size = pi->size;
while (size) {
int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
- s = drbd_recv(tconn, buffer, s);
+ s = drbd_recv(connection, buffer, s);
if (s <= 0) {
if (s < 0)
return s;
@@ -3304,30 +3350,32 @@ static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info
*
* (We can also end up here if drbd is misconfigured.)
*/
-static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
+static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
{
- conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
+ drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
cmdname(pi->cmd), pi->vnr);
- return ignore_remaining_packet(tconn, pi);
+ return ignore_remaining_packet(connection, pi);
}
-static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
struct crypto_hash *verify_tfm = NULL;
struct crypto_hash *csums_tfm = NULL;
struct net_conf *old_net_conf, *new_net_conf = NULL;
struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
- const int apv = tconn->agreed_pro_version;
+ const int apv = connection->agreed_pro_version;
struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
int fifo_size = 0;
int err;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
- return config_unknown_volume(tconn, pi);
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
+ return config_unknown_volume(connection, pi);
+ device = peer_device->device;
exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
@@ -3336,7 +3384,7 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
if (pi->size > exp_max_sz) {
- dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
+ drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
pi->size, exp_max_sz);
return -EIO;
}
@@ -3347,33 +3395,33 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
} else if (apv <= 94) {
header_size = sizeof(struct p_rs_param_89);
data_size = pi->size - header_size;
- D_ASSERT(data_size == 0);
+ D_ASSERT(device, data_size == 0);
} else {
header_size = sizeof(struct p_rs_param_95);
data_size = pi->size - header_size;
- D_ASSERT(data_size == 0);
+ D_ASSERT(device, data_size == 0);
}
/* initialize verify_alg and csums_alg */
p = pi->data;
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- err = drbd_recv_all(mdev->tconn, p, header_size);
+ err = drbd_recv_all(peer_device->connection, p, header_size);
if (err)
return err;
- mutex_lock(&mdev->tconn->conf_update);
- old_net_conf = mdev->tconn->net_conf;
- if (get_ldev(mdev)) {
+ mutex_lock(&connection->resource->conf_update);
+ old_net_conf = peer_device->connection->net_conf;
+ if (get_ldev(device)) {
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
- put_ldev(mdev);
- mutex_unlock(&mdev->tconn->conf_update);
- dev_err(DEV, "Allocation of new disk_conf failed\n");
+ put_ldev(device);
+ mutex_unlock(&connection->resource->conf_update);
+ drbd_err(device, "Allocation of new disk_conf failed\n");
return -ENOMEM;
}
- old_disk_conf = mdev->ldev->disk_conf;
+ old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
@@ -3382,37 +3430,37 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
if (apv >= 88) {
if (apv == 88) {
if (data_size > SHARED_SECRET_MAX || data_size == 0) {
- dev_err(DEV, "verify-alg of wrong size, "
+ drbd_err(device, "verify-alg of wrong size, "
"peer wants %u, accepting only up to %u byte\n",
data_size, SHARED_SECRET_MAX);
err = -EIO;
goto reconnect;
}
- err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
+ err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
if (err)
goto reconnect;
/* we expect NUL terminated string */
/* but just in case someone tries to be evil */
- D_ASSERT(p->verify_alg[data_size-1] == 0);
+ D_ASSERT(device, p->verify_alg[data_size-1] == 0);
p->verify_alg[data_size-1] = 0;
} else /* apv >= 89 */ {
/* we still expect NUL terminated strings */
/* but just in case someone tries to be evil */
- D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
- D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
+ D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
+ D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
p->verify_alg[SHARED_SECRET_MAX-1] = 0;
p->csums_alg[SHARED_SECRET_MAX-1] = 0;
}
if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
- if (mdev->state.conn == C_WF_REPORT_PARAMS) {
- dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
+ if (device->state.conn == C_WF_REPORT_PARAMS) {
+ drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
old_net_conf->verify_alg, p->verify_alg);
goto disconnect;
}
- verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
+ verify_tfm = drbd_crypto_alloc_digest_safe(device,
p->verify_alg, "verify-alg");
if (IS_ERR(verify_tfm)) {
verify_tfm = NULL;
@@ -3421,12 +3469,12 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
}
if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
- if (mdev->state.conn == C_WF_REPORT_PARAMS) {
- dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
+ if (device->state.conn == C_WF_REPORT_PARAMS) {
+ drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
old_net_conf->csums_alg, p->csums_alg);
goto disconnect;
}
- csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
+ csums_tfm = drbd_crypto_alloc_digest_safe(device,
p->csums_alg, "csums-alg");
if (IS_ERR(csums_tfm)) {
csums_tfm = NULL;
@@ -3441,11 +3489,11 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
- if (fifo_size != mdev->rs_plan_s->size) {
+ if (fifo_size != device->rs_plan_s->size) {
new_plan = fifo_alloc(fifo_size);
if (!new_plan) {
- dev_err(DEV, "kmalloc of fifo_buffer failed");
- put_ldev(mdev);
+ drbd_err(device, "kmalloc of fifo_buffer failed");
+ put_ldev(device);
goto disconnect;
}
}
@@ -3454,7 +3502,7 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
if (verify_tfm || csums_tfm) {
new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_net_conf) {
- dev_err(DEV, "Allocation of new net_conf failed\n");
+ drbd_err(device, "Allocation of new net_conf failed\n");
goto disconnect;
}
@@ -3463,32 +3511,32 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
if (verify_tfm) {
strcpy(new_net_conf->verify_alg, p->verify_alg);
new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
- crypto_free_hash(mdev->tconn->verify_tfm);
- mdev->tconn->verify_tfm = verify_tfm;
- dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
+ crypto_free_hash(peer_device->connection->verify_tfm);
+ peer_device->connection->verify_tfm = verify_tfm;
+ drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
}
if (csums_tfm) {
strcpy(new_net_conf->csums_alg, p->csums_alg);
new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
- crypto_free_hash(mdev->tconn->csums_tfm);
- mdev->tconn->csums_tfm = csums_tfm;
- dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
+ crypto_free_hash(peer_device->connection->csums_tfm);
+ peer_device->connection->csums_tfm = csums_tfm;
+ drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
}
- rcu_assign_pointer(tconn->net_conf, new_net_conf);
+ rcu_assign_pointer(connection->net_conf, new_net_conf);
}
}
if (new_disk_conf) {
- rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
- put_ldev(mdev);
+ rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
+ put_ldev(device);
}
if (new_plan) {
- old_plan = mdev->rs_plan_s;
- rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+ old_plan = device->rs_plan_s;
+ rcu_assign_pointer(device->rs_plan_s, new_plan);
}
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&connection->resource->conf_update);
synchronize_rcu();
if (new_net_conf)
kfree(old_net_conf);
@@ -3499,30 +3547,30 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
reconnect:
if (new_disk_conf) {
- put_ldev(mdev);
+ put_ldev(device);
kfree(new_disk_conf);
}
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&connection->resource->conf_update);
return -EIO;
disconnect:
kfree(new_plan);
if (new_disk_conf) {
- put_ldev(mdev);
+ put_ldev(device);
kfree(new_disk_conf);
}
- mutex_unlock(&mdev->tconn->conf_update);
+ mutex_unlock(&connection->resource->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
crypto_free_hash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
crypto_free_hash(verify_tfm);
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
/* warn if the arguments differ by more than 12.5% */
-static void warn_if_differ_considerably(struct drbd_conf *mdev,
+static void warn_if_differ_considerably(struct drbd_device *device,
const char *s, sector_t a, sector_t b)
{
sector_t d;
@@ -3530,54 +3578,56 @@ static void warn_if_differ_considerably(struct drbd_conf *mdev,
return;
d = (a > b) ? (a - b) : (b - a);
if (d > (a>>3) || d > (b>>3))
- dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
+ drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
(unsigned long long)a, (unsigned long long)b);
}
-static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_sizes *p = pi->data;
enum determine_dev_size dd = DS_UNCHANGED;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
enum dds_flags ddsf;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
- return config_unknown_volume(tconn, pi);
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
+ return config_unknown_volume(connection, pi);
+ device = peer_device->device;
p_size = be64_to_cpu(p->d_size);
p_usize = be64_to_cpu(p->u_size);
/* just store the peer's disk size for now.
* we still need to figure out whether we accept that. */
- mdev->p_size = p_size;
+ device->p_size = p_size;
- if (get_ldev(mdev)) {
+ if (get_ldev(device)) {
rcu_read_lock();
- my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
- warn_if_differ_considerably(mdev, "lower level device sizes",
- p_size, drbd_get_max_capacity(mdev->ldev));
- warn_if_differ_considerably(mdev, "user requested size",
+ warn_if_differ_considerably(device, "lower level device sizes",
+ p_size, drbd_get_max_capacity(device->ldev));
+ warn_if_differ_considerably(device, "user requested size",
p_usize, my_usize);
/* if this is the first connect, or an otherwise expected
* param exchange, choose the minimum */
- if (mdev->state.conn == C_WF_REPORT_PARAMS)
+ if (device->state.conn == C_WF_REPORT_PARAMS)
p_usize = min_not_zero(my_usize, p_usize);
/* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */
- if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
- drbd_get_capacity(mdev->this_bdev) &&
- mdev->state.disk >= D_OUTDATED &&
- mdev->state.conn < C_CONNECTED) {
- dev_err(DEV, "The peer's disk size is too small!\n");
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
- put_ldev(mdev);
+ if (drbd_new_dev_size(device, device->ldev, p_usize, 0) <
+ drbd_get_capacity(device->this_bdev) &&
+ device->state.disk >= D_OUTDATED &&
+ device->state.conn < C_CONNECTED) {
+ drbd_err(device, "The peer's disk size is too small!\n");
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
+ put_ldev(device);
return -EIO;
}
@@ -3586,145 +3636,147 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
- dev_err(DEV, "Allocation of new disk_conf failed\n");
- put_ldev(mdev);
+ drbd_err(device, "Allocation of new disk_conf failed\n");
+ put_ldev(device);
return -ENOMEM;
}
- mutex_lock(&mdev->tconn->conf_update);
- old_disk_conf = mdev->ldev->disk_conf;
+ mutex_lock(&connection->resource->conf_update);
+ old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf;
new_disk_conf->disk_size = p_usize;
- rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
- mutex_unlock(&mdev->tconn->conf_update);
+ rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
+ mutex_unlock(&connection->resource->conf_update);
synchronize_rcu();
kfree(old_disk_conf);
- dev_info(DEV, "Peer sets u_size to %lu sectors\n",
+ drbd_info(device, "Peer sets u_size to %lu sectors\n",
(unsigned long)my_usize);
}
- put_ldev(mdev);
+ put_ldev(device);
}
ddsf = be16_to_cpu(p->dds_flags);
- if (get_ldev(mdev)) {
- dd = drbd_determine_dev_size(mdev, ddsf, NULL);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ dd = drbd_determine_dev_size(device, ddsf, NULL);
+ put_ldev(device);
if (dd == DS_ERROR)
return -EIO;
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
} else {
/* I am diskless, need to accept the peer's size. */
- drbd_set_my_capacity(mdev, p_size);
+ drbd_set_my_capacity(device, p_size);
}
- mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
- drbd_reconsider_max_bio_size(mdev);
+ device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
+ drbd_reconsider_max_bio_size(device);
- if (get_ldev(mdev)) {
- if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
- mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
+ if (get_ldev(device)) {
+ if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
+ device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
ldsc = 1;
}
- put_ldev(mdev);
+ put_ldev(device);
}
- if (mdev->state.conn > C_WF_REPORT_PARAMS) {
+ if (device->state.conn > C_WF_REPORT_PARAMS) {
if (be64_to_cpu(p->c_size) !=
- drbd_get_capacity(mdev->this_bdev) || ldsc) {
+ drbd_get_capacity(device->this_bdev) || ldsc) {
/* we have different sizes, probably peer
* needs to know my new size... */
- drbd_send_sizes(mdev, 0, ddsf);
+ drbd_send_sizes(peer_device, 0, ddsf);
}
- if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
- (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) {
- if (mdev->state.pdsk >= D_INCONSISTENT &&
- mdev->state.disk >= D_INCONSISTENT) {
+ if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
+ (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
+ if (device->state.pdsk >= D_INCONSISTENT &&
+ device->state.disk >= D_INCONSISTENT) {
if (ddsf & DDSF_NO_RESYNC)
- dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
+ drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
else
- resync_after_online_grow(mdev);
+ resync_after_online_grow(device);
} else
- set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+ set_bit(RESYNC_AFTER_NEG, &device->flags);
}
}
return 0;
}
-static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_uuids *p = pi->data;
u64 *p_uuid;
int i, updated_uuids = 0;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
- return config_unknown_volume(tconn, pi);
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
+ return config_unknown_volume(connection, pi);
+ device = peer_device->device;
p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
if (!p_uuid) {
- dev_err(DEV, "kmalloc of p_uuid failed\n");
+ drbd_err(device, "kmalloc of p_uuid failed\n");
return false;
}
for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
p_uuid[i] = be64_to_cpu(p->uuid[i]);
- kfree(mdev->p_uuid);
- mdev->p_uuid = p_uuid;
+ kfree(device->p_uuid);
+ device->p_uuid = p_uuid;
- if (mdev->state.conn < C_CONNECTED &&
- mdev->state.disk < D_INCONSISTENT &&
- mdev->state.role == R_PRIMARY &&
- (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
- dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
- (unsigned long long)mdev->ed_uuid);
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ if (device->state.conn < C_CONNECTED &&
+ device->state.disk < D_INCONSISTENT &&
+ device->state.role == R_PRIMARY &&
+ (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
+ drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
+ (unsigned long long)device->ed_uuid);
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
- if (get_ldev(mdev)) {
+ if (get_ldev(device)) {
int skip_initial_sync =
- mdev->state.conn == C_CONNECTED &&
- mdev->tconn->agreed_pro_version >= 90 &&
- mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
+ device->state.conn == C_CONNECTED &&
+ peer_device->connection->agreed_pro_version >= 90 &&
+ device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
(p_uuid[UI_FLAGS] & 8);
if (skip_initial_sync) {
- dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
- drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
+ drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
+ drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
"clear_n_write from receive_uuids",
BM_LOCKED_TEST_ALLOWED);
- _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
- _drbd_uuid_set(mdev, UI_BITMAP, 0);
- _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
+ _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
+ _drbd_uuid_set(device, UI_BITMAP, 0);
+ _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
updated_uuids = 1;
}
- put_ldev(mdev);
- } else if (mdev->state.disk < D_INCONSISTENT &&
- mdev->state.role == R_PRIMARY) {
+ put_ldev(device);
+ } else if (device->state.disk < D_INCONSISTENT &&
+ device->state.role == R_PRIMARY) {
/* I am a diskless primary, the peer just created a new current UUID
for me. */
- updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
+ updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
}
/* Before we test for the disk state, we should wait until an eventually
ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the
new disk state... */
- mutex_lock(mdev->state_mutex);
- mutex_unlock(mdev->state_mutex);
- if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
- updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
+ mutex_lock(device->state_mutex);
+ mutex_unlock(device->state_mutex);
+ if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
+ updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
if (updated_uuids)
- drbd_print_uuids(mdev, "receiver updated UUIDs to");
+ drbd_print_uuids(device, "receiver updated UUIDs to");
return 0;
}
@@ -3760,38 +3812,40 @@ static union drbd_state convert_state(union drbd_state ps)
return ms;
}
-static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_req_state *p = pi->data;
union drbd_state mask, val;
enum drbd_state_rv rv;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
- if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
- mutex_is_locked(mdev->state_mutex)) {
- drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
+ if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) &&
+ mutex_is_locked(device->state_mutex)) {
+ drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG);
return 0;
}
mask = convert_state(mask);
val = convert_state(val);
- rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
- drbd_send_sr_reply(mdev, rv);
+ rv = drbd_change_state(device, CS_VERBOSE, mask, val);
+ drbd_send_sr_reply(peer_device, rv);
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
return 0;
}
-static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_req_state *p = pi->data;
union drbd_state mask, val;
@@ -3800,46 +3854,48 @@ static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
- if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
- mutex_is_locked(&tconn->cstate_mutex)) {
- conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
+ if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
+ mutex_is_locked(&connection->cstate_mutex)) {
+ conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
return 0;
}
mask = convert_state(mask);
val = convert_state(val);
- rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
- conn_send_sr_reply(tconn, rv);
+ rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
+ conn_send_sr_reply(connection, rv);
return 0;
}
-static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_state *p = pi->data;
union drbd_state os, ns, peer_state;
enum drbd_disk_state real_peer_disk;
enum chg_state_flags cs_flags;
int rv;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
- return config_unknown_volume(tconn, pi);
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
+ return config_unknown_volume(connection, pi);
+ device = peer_device->device;
peer_state.i = be32_to_cpu(p->state);
real_peer_disk = peer_state.disk;
if (peer_state.disk == D_NEGOTIATING) {
- real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
- dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
+ real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
+ drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
}
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
retry:
- os = ns = drbd_read_state(mdev);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ os = ns = drbd_read_state(device);
+ spin_unlock_irq(&device->resource->req_lock);
/* If some other part of the code (asender thread, timeout)
* already decided to close the connection again,
@@ -3871,8 +3927,8 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
* Maybe we should finish it up, too? */
else if (os.conn >= C_SYNC_SOURCE &&
peer_state.conn == C_CONNECTED) {
- if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
- drbd_resync_finished(mdev);
+ if (drbd_bm_total_weight(device) <= device->rs_failed)
+ drbd_resync_finished(device);
return 0;
}
}
@@ -3880,8 +3936,8 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
/* explicit verify finished notification, stop sector reached. */
if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
- ov_out_of_sync_print(mdev);
- drbd_resync_finished(mdev);
+ ov_out_of_sync_print(device);
+ drbd_resync_finished(device);
return 0;
}
@@ -3900,8 +3956,8 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
if (peer_state.conn == C_AHEAD)
ns.conn = C_BEHIND;
- if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
- get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
+ get_ldev_if_state(device, D_NEGOTIATING)) {
int cr; /* consider resync */
/* if we established a new connection */
@@ -3913,7 +3969,7 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
os.disk == D_NEGOTIATING));
/* if we have both been inconsistent, and the peer has been
* forced to be UpToDate with --overwrite-data */
- cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
+ cr |= test_bit(CONSIDER_RESYNC, &device->flags);
/* if we had been plain connected, and the admin requested to
* start a sync by "invalidate" or "invalidate-remote" */
cr |= (os.conn == C_CONNECTED &&
@@ -3921,55 +3977,55 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
peer_state.conn <= C_WF_BITMAP_T));
if (cr)
- ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
+ ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk);
- put_ldev(mdev);
+ put_ldev(device);
if (ns.conn == C_MASK) {
ns.conn = C_CONNECTED;
- if (mdev->state.disk == D_NEGOTIATING) {
- drbd_force_state(mdev, NS(disk, D_FAILED));
+ if (device->state.disk == D_NEGOTIATING) {
+ drbd_force_state(device, NS(disk, D_FAILED));
} else if (peer_state.disk == D_NEGOTIATING) {
- dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
+ drbd_err(device, "Disk attach process on the peer node was aborted.\n");
peer_state.disk = D_DISKLESS;
real_peer_disk = D_DISKLESS;
} else {
- if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
+ if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags))
return -EIO;
- D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
}
}
- spin_lock_irq(&mdev->tconn->req_lock);
- if (os.i != drbd_read_state(mdev).i)
+ spin_lock_irq(&device->resource->req_lock);
+ if (os.i != drbd_read_state(device).i)
goto retry;
- clear_bit(CONSIDER_RESYNC, &mdev->flags);
+ clear_bit(CONSIDER_RESYNC, &device->flags);
ns.peer = peer_state.role;
ns.pdsk = real_peer_disk;
ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
- ns.disk = mdev->new_state_tmp.disk;
+ ns.disk = device->new_state_tmp.disk;
cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
- if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
- test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
+ test_bit(NEW_CUR_UUID, &device->flags)) {
/* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
for temporal network outages! */
- spin_unlock_irq(&mdev->tconn->req_lock);
- dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
- tl_clear(mdev->tconn);
- drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
- conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
+ spin_unlock_irq(&device->resource->req_lock);
+ drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
+ tl_clear(peer_device->connection);
+ drbd_uuid_new_current(device);
+ clear_bit(NEW_CUR_UUID, &device->flags);
+ conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
return -EIO;
}
- rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
- ns = drbd_read_state(mdev);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ rv = _drbd_set_state(device, ns, cs_flags, NULL);
+ ns = drbd_read_state(device);
+ spin_unlock_irq(&device->resource->req_lock);
if (rv < SS_SUCCESS) {
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
@@ -3979,47 +4035,49 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
/* we want resync, peer has not yet decided to sync... */
/* Nowadays only used when forcing a node into primary role and
setting its disk to UpToDate with that */
- drbd_send_uuids(mdev);
- drbd_send_current_state(mdev);
+ drbd_send_uuids(peer_device);
+ drbd_send_current_state(peer_device);
}
}
- clear_bit(DISCARD_MY_DATA, &mdev->flags);
+ clear_bit(DISCARD_MY_DATA, &device->flags);
- drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
+ drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
return 0;
}
-static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_rs_uuid *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- wait_event(mdev->misc_wait,
- mdev->state.conn == C_WF_SYNC_UUID ||
- mdev->state.conn == C_BEHIND ||
- mdev->state.conn < C_CONNECTED ||
- mdev->state.disk < D_NEGOTIATING);
+ wait_event(device->misc_wait,
+ device->state.conn == C_WF_SYNC_UUID ||
+ device->state.conn == C_BEHIND ||
+ device->state.conn < C_CONNECTED ||
+ device->state.disk < D_NEGOTIATING);
- /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
+ /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */
/* Here the _drbd_uuid_ functions are right, current should
_not_ be rotated into the history */
- if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
- _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
- _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
+ if (get_ldev_if_state(device, D_NEGOTIATING)) {
+ _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
+ _drbd_uuid_set(device, UI_BITMAP, 0UL);
- drbd_print_uuids(mdev, "updated sync uuid");
- drbd_start_resync(mdev, C_SYNC_TARGET);
+ drbd_print_uuids(device, "updated sync uuid");
+ drbd_start_resync(device, C_SYNC_TARGET);
- put_ldev(mdev);
+ put_ldev(device);
} else
- dev_err(DEV, "Ignoring SyncUUID packet!\n");
+ drbd_err(device, "Ignoring SyncUUID packet!\n");
return 0;
}
@@ -4031,27 +4089,27 @@ static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
* code upon failure.
*/
static int
-receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
+receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
unsigned long *p, struct bm_xfer_ctx *c)
{
unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
- drbd_header_size(mdev->tconn);
+ drbd_header_size(peer_device->connection);
unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
c->bm_words - c->word_offset);
unsigned int want = num_words * sizeof(*p);
int err;
if (want != size) {
- dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
+ drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size);
return -EIO;
}
if (want == 0)
return 0;
- err = drbd_recv_all(mdev->tconn, p, want);
+ err = drbd_recv_all(peer_device->connection, p, want);
if (err)
return err;
- drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
+ drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
@@ -4083,7 +4141,7 @@ static int dcbp_get_pad_bits(struct p_compressed_bm *p)
* code upon failure.
*/
static int
-recv_bm_rle_bits(struct drbd_conf *mdev,
+recv_bm_rle_bits(struct drbd_peer_device *peer_device,
struct p_compressed_bm *p,
struct bm_xfer_ctx *c,
unsigned int len)
@@ -4112,14 +4170,14 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
if (toggle) {
e = s + rl -1;
if (e >= c->bm_bits) {
- dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
+ drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
return -EIO;
}
- _drbd_bm_set_bits(mdev, s, e);
+ _drbd_bm_set_bits(peer_device->device, s, e);
}
if (have < bits) {
- dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
+ drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
have, bits, look_ahead,
(unsigned int)(bs.cur.b - p->code),
(unsigned int)bs.buf_len);
@@ -4152,28 +4210,28 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
* code upon failure.
*/
static int
-decode_bitmap_c(struct drbd_conf *mdev,
+decode_bitmap_c(struct drbd_peer_device *peer_device,
struct p_compressed_bm *p,
struct bm_xfer_ctx *c,
unsigned int len)
{
if (dcbp_get_code(p) == RLE_VLI_Bits)
- return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
+ return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
/* other variants had been implemented for evaluation,
* but have been dropped as this one turned out to be "best"
* during all our tests. */
- dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
- conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
+ drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
+ conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
return -EIO;
}
-void INFO_bm_xfer_stats(struct drbd_conf *mdev,
+void INFO_bm_xfer_stats(struct drbd_device *device,
const char *direction, struct bm_xfer_ctx *c)
{
/* what would it take to transfer it "plaintext" */
- unsigned int header_size = drbd_header_size(mdev->tconn);
+ unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
unsigned int plain =
header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
@@ -4197,7 +4255,7 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
r = 1000;
r = 1000 - r;
- dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
+ drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
"total %u; compression: %u.%u%%\n",
direction,
c->bytes[1], c->packets[1],
@@ -4213,129 +4271,133 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
in order to be agnostic to the 32 vs 64 bits issue.
returns 0 on failure, 1 if we successfully received it. */
-static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct bm_xfer_ctx c;
int err;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
+ drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
/* you are supposed to send additional out-of-sync information
* if you actually set bits during this phase */
c = (struct bm_xfer_ctx) {
- .bm_bits = drbd_bm_bits(mdev),
- .bm_words = drbd_bm_words(mdev),
+ .bm_bits = drbd_bm_bits(device),
+ .bm_words = drbd_bm_words(device),
};
for(;;) {
if (pi->cmd == P_BITMAP)
- err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
+ err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c);
else if (pi->cmd == P_COMPRESSED_BITMAP) {
/* MAYBE: sanity check that we speak proto >= 90,
* and the feature is enabled! */
struct p_compressed_bm *p = pi->data;
- if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
- dev_err(DEV, "ReportCBitmap packet too large\n");
+ if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
+ drbd_err(device, "ReportCBitmap packet too large\n");
err = -EIO;
goto out;
}
if (pi->size <= sizeof(*p)) {
- dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
+ drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
err = -EIO;
goto out;
}
- err = drbd_recv_all(mdev->tconn, p, pi->size);
+ err = drbd_recv_all(peer_device->connection, p, pi->size);
if (err)
goto out;
- err = decode_bitmap_c(mdev, p, &c, pi->size);
+ err = decode_bitmap_c(peer_device, p, &c, pi->size);
} else {
- dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
+ drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
err = -EIO;
goto out;
}
c.packets[pi->cmd == P_BITMAP]++;
- c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
+ c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
if (err <= 0) {
if (err < 0)
goto out;
break;
}
- err = drbd_recv_header(mdev->tconn, pi);
+ err = drbd_recv_header(peer_device->connection, pi);
if (err)
goto out;
}
- INFO_bm_xfer_stats(mdev, "receive", &c);
+ INFO_bm_xfer_stats(device, "receive", &c);
- if (mdev->state.conn == C_WF_BITMAP_T) {
+ if (device->state.conn == C_WF_BITMAP_T) {
enum drbd_state_rv rv;
- err = drbd_send_bitmap(mdev);
+ err = drbd_send_bitmap(device);
if (err)
goto out;
/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
- rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
- D_ASSERT(rv == SS_SUCCESS);
- } else if (mdev->state.conn != C_WF_BITMAP_S) {
+ rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
+ D_ASSERT(device, rv == SS_SUCCESS);
+ } else if (device->state.conn != C_WF_BITMAP_S) {
/* admin may have requested C_DISCONNECTING,
* other threads may have noticed network errors */
- dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
- drbd_conn_str(mdev->state.conn));
+ drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
+ drbd_conn_str(device->state.conn));
}
err = 0;
out:
- drbd_bm_unlock(mdev);
- if (!err && mdev->state.conn == C_WF_BITMAP_S)
- drbd_start_resync(mdev, C_SYNC_SOURCE);
+ drbd_bm_unlock(device);
+ if (!err && device->state.conn == C_WF_BITMAP_S)
+ drbd_start_resync(device, C_SYNC_SOURCE);
return err;
}
-static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
{
- conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
+ drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
pi->cmd, pi->size);
- return ignore_remaining_packet(tconn, pi);
+ return ignore_remaining_packet(connection, pi);
}
-static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
{
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
- drbd_tcp_quickack(tconn->data.socket);
+ drbd_tcp_quickack(connection->data.socket);
return 0;
}
-static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
+static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_desc *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- switch (mdev->state.conn) {
+ switch (device->state.conn) {
case C_WF_SYNC_UUID:
case C_WF_BITMAP_T:
case C_BEHIND:
break;
default:
- dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
- drbd_conn_str(mdev->state.conn));
+ drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
+ drbd_conn_str(device->state.conn));
}
- drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
+ drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
return 0;
}
@@ -4343,7 +4405,7 @@ static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
struct data_cmd {
int expect_payload;
size_t pkt_size;
- int (*fn)(struct drbd_tconn *, struct packet_info *);
+ int (*fn)(struct drbd_connection *, struct packet_info *);
};
static struct data_cmd drbd_cmd_handler[] = {
@@ -4373,43 +4435,43 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
};
-static void drbdd(struct drbd_tconn *tconn)
+static void drbdd(struct drbd_connection *connection)
{
struct packet_info pi;
size_t shs; /* sub header size */
int err;
- while (get_t_state(&tconn->receiver) == RUNNING) {
+ while (get_t_state(&connection->receiver) == RUNNING) {
struct data_cmd *cmd;
- drbd_thread_current_set_cpu(&tconn->receiver);
- if (drbd_recv_header(tconn, &pi))
+ drbd_thread_current_set_cpu(&connection->receiver);
+ if (drbd_recv_header(connection, &pi))
goto err_out;
cmd = &drbd_cmd_handler[pi.cmd];
if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
- conn_err(tconn, "Unexpected data packet %s (0x%04x)",
+ drbd_err(connection, "Unexpected data packet %s (0x%04x)",
cmdname(pi.cmd), pi.cmd);
goto err_out;
}
shs = cmd->pkt_size;
if (pi.size > shs && !cmd->expect_payload) {
- conn_err(tconn, "No payload expected %s l:%d\n",
+ drbd_err(connection, "No payload expected %s l:%d\n",
cmdname(pi.cmd), pi.size);
goto err_out;
}
if (shs) {
- err = drbd_recv_all_warn(tconn, pi.data, shs);
+ err = drbd_recv_all_warn(connection, pi.data, shs);
if (err)
goto err_out;
pi.size -= shs;
}
- err = cmd->fn(tconn, &pi);
+ err = cmd->fn(connection, &pi);
if (err) {
- conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
+ drbd_err(connection, "error receiving %s, e: %d l: %d!\n",
cmdname(pi.cmd), err, pi.size);
goto err_out;
}
@@ -4417,27 +4479,16 @@ static void drbdd(struct drbd_tconn *tconn)
return;
err_out:
- conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
-}
-
-void conn_flush_workqueue(struct drbd_tconn *tconn)
-{
- struct drbd_wq_barrier barr;
-
- barr.w.cb = w_prev_work_done;
- barr.w.tconn = tconn;
- init_completion(&barr.done);
- drbd_queue_work(&tconn->sender_work, &barr.w);
- wait_for_completion(&barr.done);
+ conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
-static void conn_disconnect(struct drbd_tconn *tconn)
+static void conn_disconnect(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
enum drbd_conns oc;
int vnr;
- if (tconn->cstate == C_STANDALONE)
+ if (connection->cstate == C_STANDALONE)
return;
/* We are about to start the cleanup after connection loss.
@@ -4445,54 +4496,56 @@ static void conn_disconnect(struct drbd_tconn *tconn)
* Usually we should be in some network failure state already,
* but just in case we are not, we fix it up here.
*/
- conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
+ conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
/* asender does not clean up anything. it must not interfere, either */
- drbd_thread_stop(&tconn->asender);
- drbd_free_sock(tconn);
+ drbd_thread_stop(&connection->asender);
+ drbd_free_sock(connection);
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ kref_get(&device->kref);
rcu_read_unlock();
- drbd_disconnected(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_disconnected(peer_device);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
- if (!list_empty(&tconn->current_epoch->list))
- conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
+ if (!list_empty(&connection->current_epoch->list))
+ drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
- atomic_set(&tconn->current_epoch->epoch_size, 0);
- tconn->send.seen_any_write_yet = false;
+ atomic_set(&connection->current_epoch->epoch_size, 0);
+ connection->send.seen_any_write_yet = false;
- conn_info(tconn, "Connection closed\n");
+ drbd_info(connection, "Connection closed\n");
- if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
- conn_try_outdate_peer_async(tconn);
+ if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
+ conn_try_outdate_peer_async(connection);
- spin_lock_irq(&tconn->req_lock);
- oc = tconn->cstate;
+ spin_lock_irq(&connection->resource->req_lock);
+ oc = connection->cstate;
if (oc >= C_UNCONNECTED)
- _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+ _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
if (oc == C_DISCONNECTING)
- conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
+ conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
}
-static int drbd_disconnected(struct drbd_conf *mdev)
+static int drbd_disconnected(struct drbd_peer_device *peer_device)
{
+ struct drbd_device *device = peer_device->device;
unsigned int i;
/* wait for current activity to cease. */
- spin_lock_irq(&mdev->tconn->req_lock);
- _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
- _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ _drbd_wait_ee_list_empty(device, &device->active_ee);
+ _drbd_wait_ee_list_empty(device, &device->sync_ee);
+ _drbd_wait_ee_list_empty(device, &device->read_ee);
+ spin_unlock_irq(&device->resource->req_lock);
/* We do not have data structures that would allow us to
* get the rs_pending_cnt down to 0 again.
@@ -4504,42 +4557,42 @@ static int drbd_disconnected(struct drbd_conf *mdev)
* resync_LRU. The resync_LRU tracks the whole operation including
* the disk-IO, while the rs_pending_cnt only tracks the blocks
* on the fly. */
- drbd_rs_cancel_all(mdev);
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
- wake_up(&mdev->misc_wait);
+ drbd_rs_cancel_all(device);
+ device->rs_total = 0;
+ device->rs_failed = 0;
+ atomic_set(&device->rs_pending_cnt, 0);
+ wake_up(&device->misc_wait);
- del_timer_sync(&mdev->resync_timer);
- resync_timer_fn((unsigned long)mdev);
+ del_timer_sync(&device->resync_timer);
+ resync_timer_fn((unsigned long)device);
/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
* w_make_resync_request etc. which may still be on the worker queue
* to be "canceled" */
- drbd_flush_workqueue(mdev);
+ drbd_flush_workqueue(&peer_device->connection->sender_work);
- drbd_finish_peer_reqs(mdev);
+ drbd_finish_peer_reqs(device);
/* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
might have issued a work again. The one before drbd_finish_peer_reqs() is
necessary to reclain net_ee in drbd_finish_peer_reqs(). */
- drbd_flush_workqueue(mdev);
+ drbd_flush_workqueue(&peer_device->connection->sender_work);
/* need to do it again, drbd_finish_peer_reqs() may have populated it
* again via drbd_try_clear_on_disk_bm(). */
- drbd_rs_cancel_all(mdev);
+ drbd_rs_cancel_all(device);
- kfree(mdev->p_uuid);
- mdev->p_uuid = NULL;
+ kfree(device->p_uuid);
+ device->p_uuid = NULL;
- if (!drbd_suspended(mdev))
- tl_clear(mdev->tconn);
+ if (!drbd_suspended(device))
+ tl_clear(peer_device->connection);
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
/* serialize with bitmap writeout triggered by the state change,
* if any. */
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
/* tcp_close and release of sendpage pages can be deferred. I don't
* want to use SO_LINGER, because apparently it can be deferred for
@@ -4548,20 +4601,20 @@ static int drbd_disconnected(struct drbd_conf *mdev)
* Actually we don't care for exactly when the network stack does its
* put_page(), but release our reference on these pages right here.
*/
- i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
+ i = drbd_free_peer_reqs(device, &device->net_ee);
if (i)
- dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
- i = atomic_read(&mdev->pp_in_use_by_net);
+ drbd_info(device, "net_ee not empty, killed %u entries\n", i);
+ i = atomic_read(&device->pp_in_use_by_net);
if (i)
- dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
- i = atomic_read(&mdev->pp_in_use);
+ drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
+ i = atomic_read(&device->pp_in_use);
if (i)
- dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
+ drbd_info(device, "pp_in_use = %d, expected 0\n", i);
- D_ASSERT(list_empty(&mdev->read_ee));
- D_ASSERT(list_empty(&mdev->active_ee));
- D_ASSERT(list_empty(&mdev->sync_ee));
- D_ASSERT(list_empty(&mdev->done_ee));
+ D_ASSERT(device, list_empty(&device->read_ee));
+ D_ASSERT(device, list_empty(&device->active_ee));
+ D_ASSERT(device, list_empty(&device->sync_ee));
+ D_ASSERT(device, list_empty(&device->done_ee));
return 0;
}
@@ -4575,19 +4628,19 @@ static int drbd_disconnected(struct drbd_conf *mdev)
*
* for now, they are expected to be zero, but ignored.
*/
-static int drbd_send_features(struct drbd_tconn *tconn)
+static int drbd_send_features(struct drbd_connection *connection)
{
struct drbd_socket *sock;
struct p_connection_features *p;
- sock = &tconn->data;
- p = conn_prepare_command(tconn, sock);
+ sock = &connection->data;
+ p = conn_prepare_command(connection, sock);
if (!p)
return -EIO;
memset(p, 0, sizeof(*p));
p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
- return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
+ return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
}
/*
@@ -4597,36 +4650,36 @@ static int drbd_send_features(struct drbd_tconn *tconn)
* -1 peer talks different language,
* no point in trying again, please go standalone.
*/
-static int drbd_do_features(struct drbd_tconn *tconn)
+static int drbd_do_features(struct drbd_connection *connection)
{
- /* ASSERT current == tconn->receiver ... */
+ /* ASSERT current == connection->receiver ... */
struct p_connection_features *p;
const int expect = sizeof(struct p_connection_features);
struct packet_info pi;
int err;
- err = drbd_send_features(tconn);
+ err = drbd_send_features(connection);
if (err)
return 0;
- err = drbd_recv_header(tconn, &pi);
+ err = drbd_recv_header(connection, &pi);
if (err)
return 0;
if (pi.cmd != P_CONNECTION_FEATURES) {
- conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
+ drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
return -1;
}
if (pi.size != expect) {
- conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
+ drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
expect, pi.size);
return -1;
}
p = pi.data;
- err = drbd_recv_all_warn(tconn, p, expect);
+ err = drbd_recv_all_warn(connection, p, expect);
if (err)
return 0;
@@ -4639,15 +4692,15 @@ static int drbd_do_features(struct drbd_tconn *tconn)
PRO_VERSION_MIN > p->protocol_max)
goto incompat;
- tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
+ connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
- conn_info(tconn, "Handshake successful: "
- "Agreed network protocol version %d\n", tconn->agreed_pro_version);
+ drbd_info(connection, "Handshake successful: "
+ "Agreed network protocol version %d\n", connection->agreed_pro_version);
return 1;
incompat:
- conn_err(tconn, "incompatible DRBD dialects: "
+ drbd_err(connection, "incompatible DRBD dialects: "
"I support %d-%d, peer supports %d-%d\n",
PRO_VERSION_MIN, PRO_VERSION_MAX,
p->protocol_min, p->protocol_max);
@@ -4655,10 +4708,10 @@ static int drbd_do_features(struct drbd_tconn *tconn)
}
#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
-static int drbd_do_auth(struct drbd_tconn *tconn)
+static int drbd_do_auth(struct drbd_connection *connection)
{
- conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
- conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
+ drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
+ drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
return -1;
}
#else
@@ -4670,7 +4723,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
-1 - auth failed, don't try again.
*/
-static int drbd_do_auth(struct drbd_tconn *tconn)
+static int drbd_do_auth(struct drbd_connection *connection)
{
struct drbd_socket *sock;
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
@@ -4689,69 +4742,69 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
/* FIXME: Put the challenge/response into the preallocated socket buffer. */
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
key_len = strlen(nc->shared_secret);
memcpy(secret, nc->shared_secret, key_len);
rcu_read_unlock();
- desc.tfm = tconn->cram_hmac_tfm;
+ desc.tfm = connection->cram_hmac_tfm;
desc.flags = 0;
- rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
+ rv = crypto_hash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
- conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
+ drbd_err(connection, "crypto_hash_setkey() failed with %d\n", rv);
rv = -1;
goto fail;
}
get_random_bytes(my_challenge, CHALLENGE_LEN);
- sock = &tconn->data;
- if (!conn_prepare_command(tconn, sock)) {
+ sock = &connection->data;
+ if (!conn_prepare_command(connection, sock)) {
rv = 0;
goto fail;
}
- rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
+ rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
my_challenge, CHALLENGE_LEN);
if (!rv)
goto fail;
- err = drbd_recv_header(tconn, &pi);
+ err = drbd_recv_header(connection, &pi);
if (err) {
rv = 0;
goto fail;
}
if (pi.cmd != P_AUTH_CHALLENGE) {
- conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
+ drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
if (pi.size > CHALLENGE_LEN * 2) {
- conn_err(tconn, "expected AuthChallenge payload too big.\n");
+ drbd_err(connection, "expected AuthChallenge payload too big.\n");
rv = -1;
goto fail;
}
peers_ch = kmalloc(pi.size, GFP_NOIO);
if (peers_ch == NULL) {
- conn_err(tconn, "kmalloc of peers_ch failed\n");
+ drbd_err(connection, "kmalloc of peers_ch failed\n");
rv = -1;
goto fail;
}
- err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
+ err = drbd_recv_all_warn(connection, peers_ch, pi.size);
if (err) {
rv = 0;
goto fail;
}
- resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
+ resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm);
response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) {
- conn_err(tconn, "kmalloc of response failed\n");
+ drbd_err(connection, "kmalloc of response failed\n");
rv = -1;
goto fail;
}
@@ -4761,40 +4814,40 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
rv = crypto_hash_digest(&desc, &sg, sg.length, response);
if (rv) {
- conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
+ drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
- if (!conn_prepare_command(tconn, sock)) {
+ if (!conn_prepare_command(connection, sock)) {
rv = 0;
goto fail;
}
- rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
+ rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
response, resp_size);
if (!rv)
goto fail;
- err = drbd_recv_header(tconn, &pi);
+ err = drbd_recv_header(connection, &pi);
if (err) {
rv = 0;
goto fail;
}
if (pi.cmd != P_AUTH_RESPONSE) {
- conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
+ drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
if (pi.size != resp_size) {
- conn_err(tconn, "expected AuthResponse payload of wrong size\n");
+ drbd_err(connection, "expected AuthResponse payload of wrong size\n");
rv = 0;
goto fail;
}
- err = drbd_recv_all_warn(tconn, response , resp_size);
+ err = drbd_recv_all_warn(connection, response , resp_size);
if (err) {
rv = 0;
goto fail;
@@ -4802,7 +4855,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
right_response = kmalloc(resp_size, GFP_NOIO);
if (right_response == NULL) {
- conn_err(tconn, "kmalloc of right_response failed\n");
+ drbd_err(connection, "kmalloc of right_response failed\n");
rv = -1;
goto fail;
}
@@ -4811,7 +4864,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
if (rv) {
- conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
+ drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
@@ -4819,7 +4872,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
rv = !memcmp(response, right_response, resp_size);
if (rv)
- conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
+ drbd_info(connection, "Peer authenticated using %d bytes HMAC\n",
resp_size);
else
rv = -1;
@@ -4833,163 +4886,169 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
}
#endif
-int drbdd_init(struct drbd_thread *thi)
+int drbd_receiver(struct drbd_thread *thi)
{
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_connection *connection = thi->connection;
int h;
- conn_info(tconn, "receiver (re)started\n");
+ drbd_info(connection, "receiver (re)started\n");
do {
- h = conn_connect(tconn);
+ h = conn_connect(connection);
if (h == 0) {
- conn_disconnect(tconn);
+ conn_disconnect(connection);
schedule_timeout_interruptible(HZ);
}
if (h == -1) {
- conn_warn(tconn, "Discarding network configuration.\n");
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ drbd_warn(connection, "Discarding network configuration.\n");
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
} while (h == 0);
if (h > 0)
- drbdd(tconn);
+ drbdd(connection);
- conn_disconnect(tconn);
+ conn_disconnect(connection);
- conn_info(tconn, "receiver terminated\n");
+ drbd_info(connection, "receiver terminated\n");
return 0;
}
/* ********* acknowledge sender ******** */
-static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_req_state_reply *p = pi->data;
int retcode = be32_to_cpu(p->retcode);
if (retcode >= SS_SUCCESS) {
- set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
+ set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
} else {
- set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
- conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
+ set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
+ drbd_err(connection, "Requested state change failed by peer: %s (%d)\n",
drbd_set_st_err_str(retcode), retcode);
}
- wake_up(&tconn->ping_wait);
+ wake_up(&connection->ping_wait);
return 0;
}
-static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_req_state_reply *p = pi->data;
int retcode = be32_to_cpu(p->retcode);
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
- D_ASSERT(tconn->agreed_pro_version < 100);
- return got_conn_RqSReply(tconn, pi);
+ if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
+ D_ASSERT(device, connection->agreed_pro_version < 100);
+ return got_conn_RqSReply(connection, pi);
}
if (retcode >= SS_SUCCESS) {
- set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
+ set_bit(CL_ST_CHG_SUCCESS, &device->flags);
} else {
- set_bit(CL_ST_CHG_FAIL, &mdev->flags);
- dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
+ set_bit(CL_ST_CHG_FAIL, &device->flags);
+ drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
drbd_set_st_err_str(retcode), retcode);
}
- wake_up(&mdev->state_wait);
+ wake_up(&device->state_wait);
return 0;
}
-static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
{
- return drbd_send_ping_ack(tconn);
+ return drbd_send_ping_ack(connection);
}
-static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
{
/* restore idle timeout */
- tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
- if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
- wake_up(&tconn->ping_wait);
+ connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
+ if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
+ wake_up(&connection->ping_wait);
return 0;
}
-static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
+ D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
- if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, sector);
- drbd_set_in_sync(mdev, sector, blksize);
+ if (get_ldev(device)) {
+ drbd_rs_complete_io(device, sector);
+ drbd_set_in_sync(device, sector, blksize);
/* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
- mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
- put_ldev(mdev);
+ device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
+ put_ldev(device);
}
- dec_rs_pending(mdev);
- atomic_add(blksize >> 9, &mdev->rs_sect_in);
+ dec_rs_pending(device);
+ atomic_add(blksize >> 9, &device->rs_sect_in);
return 0;
}
static int
-validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
+validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
struct rb_root *root, const char *func,
enum drbd_req_event what, bool missing_ok)
{
struct drbd_request *req;
struct bio_and_error m;
- spin_lock_irq(&mdev->tconn->req_lock);
- req = find_request(mdev, root, id, sector, missing_ok, func);
+ spin_lock_irq(&device->resource->req_lock);
+ req = find_request(device, root, id, sector, missing_ok, func);
if (unlikely(!req)) {
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
return -EIO;
}
__req_mod(req, what, &m);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
if (m.bio)
- complete_master_bio(mdev, &m);
+ complete_master_bio(device, &m);
return 0;
}
-static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
enum drbd_req_event what;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (p->block_id == ID_SYNCER) {
- drbd_set_in_sync(mdev, sector, blksize);
- dec_rs_pending(mdev);
+ drbd_set_in_sync(device, sector, blksize);
+ dec_rs_pending(device);
return 0;
}
switch (pi->cmd) {
@@ -5012,33 +5071,35 @@ static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
BUG();
}
- return validate_req_change_req_state(mdev, p->block_id, sector,
- &mdev->write_requests, __func__,
+ return validate_req_change_req_state(device, p->block_id, sector,
+ &device->write_requests, __func__,
what, false);
}
-static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int size = be32_to_cpu(p->blksize);
int err;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (p->block_id == ID_SYNCER) {
- dec_rs_pending(mdev);
- drbd_rs_failed_io(mdev, sector, size);
+ dec_rs_pending(device);
+ drbd_rs_failed_io(device, sector, size);
return 0;
}
- err = validate_req_change_req_state(mdev, p->block_id, sector,
- &mdev->write_requests, __func__,
+ err = validate_req_change_req_state(device, p->block_id, sector,
+ &device->write_requests, __func__,
NEG_ACKED, true);
if (err) {
/* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
@@ -5046,80 +5107,86 @@ static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
request is no longer in the collision hash. */
/* In Protocol B we might already have got a P_RECV_ACK
but then get a P_NEG_ACK afterwards. */
- drbd_set_out_of_sync(mdev, sector, size);
+ drbd_set_out_of_sync(device, sector, size);
}
return 0;
}
-static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
- dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
+ drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
(unsigned long long)sector, be32_to_cpu(p->blksize));
- return validate_req_change_req_state(mdev, p->block_id, sector,
- &mdev->read_requests, __func__,
+ return validate_req_change_req_state(device, p->block_id, sector,
+ &device->read_requests, __func__,
NEG_ACKED, false);
}
-static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
sector_t sector;
int size;
struct p_block_ack *p = pi->data;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
- dec_rs_pending(mdev);
+ dec_rs_pending(device);
- if (get_ldev_if_state(mdev, D_FAILED)) {
- drbd_rs_complete_io(mdev, sector);
+ if (get_ldev_if_state(device, D_FAILED)) {
+ drbd_rs_complete_io(device, sector);
switch (pi->cmd) {
case P_NEG_RS_DREPLY:
- drbd_rs_failed_io(mdev, sector, size);
+ drbd_rs_failed_io(device, sector, size);
case P_RS_CANCEL:
break;
default:
BUG();
}
- put_ldev(mdev);
+ put_ldev(device);
}
return 0;
}
-static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
{
struct p_barrier_ack *p = pi->data;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
- tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
+ tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (mdev->state.conn == C_AHEAD &&
- atomic_read(&mdev->ap_in_flight) == 0 &&
- !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
- mdev->start_resync_timer.expires = jiffies + HZ;
- add_timer(&mdev->start_resync_timer);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+
+ if (device->state.conn == C_AHEAD &&
+ atomic_read(&device->ap_in_flight) == 0 &&
+ !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
+ device->start_resync_timer.expires = jiffies + HZ;
+ add_timer(&device->start_resync_timer);
}
}
rcu_read_unlock();
@@ -5127,90 +5194,94 @@ static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
return 0;
}
-static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
+ struct drbd_device *device;
struct p_block_ack *p = pi->data;
- struct drbd_work *w;
+ struct drbd_device_work *dw;
sector_t sector;
int size;
- mdev = vnr_to_mdev(tconn, pi->vnr);
- if (!mdev)
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
return -EIO;
+ device = peer_device->device;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- update_peer_seq(mdev, be32_to_cpu(p->seq_num));
+ update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
- drbd_ov_out_of_sync_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(device, sector, size);
else
- ov_out_of_sync_print(mdev);
+ ov_out_of_sync_print(device);
- if (!get_ldev(mdev))
+ if (!get_ldev(device))
return 0;
- drbd_rs_complete_io(mdev, sector);
- dec_rs_pending(mdev);
+ drbd_rs_complete_io(device, sector);
+ dec_rs_pending(device);
- --mdev->ov_left;
+ --device->ov_left;
/* let's advance progress step marks only for every other megabyte */
- if ((mdev->ov_left & 0x200) == 0x200)
- drbd_advance_rs_marks(mdev, mdev->ov_left);
-
- if (mdev->ov_left == 0) {
- w = kmalloc(sizeof(*w), GFP_NOIO);
- if (w) {
- w->cb = w_ov_finished;
- w->mdev = mdev;
- drbd_queue_work(&mdev->tconn->sender_work, w);
+ if ((device->ov_left & 0x200) == 0x200)
+ drbd_advance_rs_marks(device, device->ov_left);
+
+ if (device->ov_left == 0) {
+ dw = kmalloc(sizeof(*dw), GFP_NOIO);
+ if (dw) {
+ dw->w.cb = w_ov_finished;
+ dw->device = device;
+ drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
} else {
- dev_err(DEV, "kmalloc(w) failed.");
- ov_out_of_sync_print(mdev);
- drbd_resync_finished(mdev);
+ drbd_err(device, "kmalloc(dw) failed.");
+ ov_out_of_sync_print(device);
+ drbd_resync_finished(device);
}
}
- put_ldev(mdev);
+ put_ldev(device);
return 0;
}
-static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
+static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
{
return 0;
}
-static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
+static int connection_finish_peer_reqs(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr, not_empty = 0;
do {
- clear_bit(SIGNAL_ASENDER, &tconn->flags);
+ clear_bit(SIGNAL_ASENDER, &connection->flags);
flush_signals(current);
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ kref_get(&device->kref);
rcu_read_unlock();
- if (drbd_finish_peer_reqs(mdev)) {
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ if (drbd_finish_peer_reqs(device)) {
+ kref_put(&device->kref, drbd_destroy_device);
return 1;
}
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
- set_bit(SIGNAL_ASENDER, &tconn->flags);
+ set_bit(SIGNAL_ASENDER, &connection->flags);
- spin_lock_irq(&tconn->req_lock);
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- not_empty = !list_empty(&mdev->done_ee);
+ spin_lock_irq(&connection->resource->req_lock);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ not_empty = !list_empty(&device->done_ee);
if (not_empty)
break;
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
rcu_read_unlock();
} while (not_empty);
@@ -5219,7 +5290,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
struct asender_cmd {
size_t pkt_size;
- int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
+ int (*fn)(struct drbd_connection *connection, struct packet_info *);
};
static struct asender_cmd asender_tbl[] = {
@@ -5244,13 +5315,13 @@ static struct asender_cmd asender_tbl[] = {
int drbd_asender(struct drbd_thread *thi)
{
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_connection *connection = thi->connection;
struct asender_cmd *cmd = NULL;
struct packet_info pi;
int rv;
- void *buf = tconn->meta.rbuf;
+ void *buf = connection->meta.rbuf;
int received = 0;
- unsigned int header_size = drbd_header_size(tconn);
+ unsigned int header_size = drbd_header_size(connection);
int expect = header_size;
bool ping_timeout_active = false;
struct net_conf *nc;
@@ -5259,45 +5330,45 @@ int drbd_asender(struct drbd_thread *thi)
rv = sched_setscheduler(current, SCHED_RR, &param);
if (rv < 0)
- conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
+ drbd_err(connection, "drbd_asender: ERROR set priority, ret=%d\n", rv);
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
ping_timeo = nc->ping_timeo;
tcp_cork = nc->tcp_cork;
ping_int = nc->ping_int;
rcu_read_unlock();
- if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
- if (drbd_send_ping(tconn)) {
- conn_err(tconn, "drbd_send_ping has failed\n");
+ if (test_and_clear_bit(SEND_PING, &connection->flags)) {
+ if (drbd_send_ping(connection)) {
+ drbd_err(connection, "drbd_send_ping has failed\n");
goto reconnect;
}
- tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
+ connection->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
ping_timeout_active = true;
}
/* TODO: conditionally cork; it may hurt latency if we cork without
much to send */
if (tcp_cork)
- drbd_tcp_cork(tconn->meta.socket);
- if (tconn_finish_peer_reqs(tconn)) {
- conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
+ drbd_tcp_cork(connection->meta.socket);
+ if (connection_finish_peer_reqs(connection)) {
+ drbd_err(connection, "connection_finish_peer_reqs() failed\n");
goto reconnect;
}
/* but unconditionally uncork unless disabled */
if (tcp_cork)
- drbd_tcp_uncork(tconn->meta.socket);
+ drbd_tcp_uncork(connection->meta.socket);
/* short circuit, recv_msg would return EINTR anyways. */
if (signal_pending(current))
continue;
- rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
- clear_bit(SIGNAL_ASENDER, &tconn->flags);
+ rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
+ clear_bit(SIGNAL_ASENDER, &connection->flags);
flush_signals(current);
@@ -5315,51 +5386,51 @@ int drbd_asender(struct drbd_thread *thi)
received += rv;
buf += rv;
} else if (rv == 0) {
- if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ if (test_bit(DISCONNECT_SENT, &connection->flags)) {
long t;
rcu_read_lock();
- t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
rcu_read_unlock();
- t = wait_event_timeout(tconn->ping_wait,
- tconn->cstate < C_WF_REPORT_PARAMS,
+ t = wait_event_timeout(connection->ping_wait,
+ connection->cstate < C_WF_REPORT_PARAMS,
t);
if (t)
break;
}
- conn_err(tconn, "meta connection shut down by peer.\n");
+ drbd_err(connection, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
/* If the data socket received something meanwhile,
* that is good enough: peer is still alive. */
- if (time_after(tconn->last_received,
- jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
+ if (time_after(connection->last_received,
+ jiffies - connection->meta.socket->sk->sk_rcvtimeo))
continue;
if (ping_timeout_active) {
- conn_err(tconn, "PingAck did not arrive in time.\n");
+ drbd_err(connection, "PingAck did not arrive in time.\n");
goto reconnect;
}
- set_bit(SEND_PING, &tconn->flags);
+ set_bit(SEND_PING, &connection->flags);
continue;
} else if (rv == -EINTR) {
continue;
} else {
- conn_err(tconn, "sock_recvmsg returned %d\n", rv);
+ drbd_err(connection, "sock_recvmsg returned %d\n", rv);
goto reconnect;
}
if (received == expect && cmd == NULL) {
- if (decode_header(tconn, tconn->meta.rbuf, &pi))
+ if (decode_header(connection, connection->meta.rbuf, &pi))
goto reconnect;
cmd = &asender_tbl[pi.cmd];
if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
- conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
+ drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n",
cmdname(pi.cmd), pi.cmd);
goto disconnect;
}
expect = header_size + cmd->pkt_size;
if (pi.size != expect - header_size) {
- conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
+ drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
pi.cmd, pi.size);
goto reconnect;
}
@@ -5367,21 +5438,21 @@ int drbd_asender(struct drbd_thread *thi)
if (received == expect) {
bool err;
- err = cmd->fn(tconn, &pi);
+ err = cmd->fn(connection, &pi);
if (err) {
- conn_err(tconn, "%pf failed\n", cmd->fn);
+ drbd_err(connection, "%pf failed\n", cmd->fn);
goto reconnect;
}
- tconn->last_received = jiffies;
+ connection->last_received = jiffies;
if (cmd == &asender_tbl[P_PING_ACK]) {
/* restore idle timeout */
- tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
+ connection->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
ping_timeout_active = false;
}
- buf = tconn->meta.rbuf;
+ buf = connection->meta.rbuf;
received = 0;
expect = header_size;
cmd = NULL;
@@ -5390,16 +5461,16 @@ int drbd_asender(struct drbd_thread *thi)
if (0) {
reconnect:
- conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
- conn_md_sync(tconn);
+ conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
+ conn_md_sync(connection);
}
if (0) {
disconnect:
- conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
}
- clear_bit(SIGNAL_ASENDER, &tconn->flags);
+ clear_bit(SIGNAL_ASENDER, &connection->flags);
- conn_info(tconn, "asender terminated\n");
+ drbd_info(connection, "asender terminated\n");
return 0;
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index fec7bef44994..3779c8d2875b 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -31,37 +31,37 @@
#include "drbd_req.h"
-static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
+static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
/* Update disk stats at start of I/O request */
-static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
+static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
{
const int rw = bio_data_dir(req->master_bio);
int cpu;
cpu = part_stat_lock();
- part_round_stats(cpu, &mdev->vdisk->part0);
- part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
- part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], req->i.size >> 9);
+ part_round_stats(cpu, &device->vdisk->part0);
+ part_stat_inc(cpu, &device->vdisk->part0, ios[rw]);
+ part_stat_add(cpu, &device->vdisk->part0, sectors[rw], req->i.size >> 9);
(void) cpu; /* The macro invocations above want the cpu argument, I do not like
the compiler warning about cpu only assigned but never used... */
- part_inc_in_flight(&mdev->vdisk->part0, rw);
+ part_inc_in_flight(&device->vdisk->part0, rw);
part_stat_unlock();
}
/* Update disk stats when completing request upwards */
-static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
+static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
{
int rw = bio_data_dir(req->master_bio);
unsigned long duration = jiffies - req->start_time;
int cpu;
cpu = part_stat_lock();
- part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration);
- part_round_stats(cpu, &mdev->vdisk->part0);
- part_dec_in_flight(&mdev->vdisk->part0, rw);
+ part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration);
+ part_round_stats(cpu, &device->vdisk->part0);
+ part_dec_in_flight(&device->vdisk->part0, rw);
part_stat_unlock();
}
-static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
+static struct drbd_request *drbd_req_new(struct drbd_device *device,
struct bio *bio_src)
{
struct drbd_request *req;
@@ -72,13 +72,13 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
drbd_req_make_private_bio(req, bio_src);
req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
- req->w.mdev = mdev;
+ req->device = device;
req->master_bio = bio_src;
req->epoch = 0;
drbd_clear_interval(&req->i);
- req->i.sector = bio_src->bi_sector;
- req->i.size = bio_src->bi_size;
+ req->i.sector = bio_src->bi_iter.bi_sector;
+ req->i.size = bio_src->bi_iter.bi_size;
req->i.local = true;
req->i.waiting = false;
@@ -95,14 +95,14 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
void drbd_req_destroy(struct kref *kref)
{
struct drbd_request *req = container_of(kref, struct drbd_request, kref);
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
const unsigned s = req->rq_state;
if ((req->master_bio && !(s & RQ_POSTPONED)) ||
atomic_read(&req->completion_ref) ||
(s & RQ_LOCAL_PENDING) ||
((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
- dev_err(DEV, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
+ drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
s, atomic_read(&req->completion_ref));
return;
}
@@ -132,10 +132,10 @@ void drbd_req_destroy(struct kref *kref)
*/
if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
- drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
+ drbd_set_out_of_sync(device, req->i.sector, req->i.size);
if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
- drbd_set_in_sync(mdev, req->i.sector, req->i.size);
+ drbd_set_in_sync(device, req->i.sector, req->i.size);
}
/* one might be tempted to move the drbd_al_complete_io
@@ -149,11 +149,11 @@ void drbd_req_destroy(struct kref *kref)
* we would forget to resync the corresponding extent.
*/
if (s & RQ_IN_ACT_LOG) {
- if (get_ldev_if_state(mdev, D_FAILED)) {
- drbd_al_complete_io(mdev, &req->i);
- put_ldev(mdev);
+ if (get_ldev_if_state(device, D_FAILED)) {
+ drbd_al_complete_io(device, &req->i);
+ put_ldev(device);
} else if (__ratelimit(&drbd_ratelimit_state)) {
- dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), "
+ drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
"but my Disk seems to have failed :(\n",
(unsigned long long) req->i.sector, req->i.size);
}
@@ -163,41 +163,42 @@ void drbd_req_destroy(struct kref *kref)
mempool_free(req, drbd_request_mempool);
}
-static void wake_all_senders(struct drbd_tconn *tconn) {
- wake_up(&tconn->sender_work.q_wait);
+static void wake_all_senders(struct drbd_connection *connection)
+{
+ wake_up(&connection->sender_work.q_wait);
}
/* must hold resource->req_lock */
-void start_new_tl_epoch(struct drbd_tconn *tconn)
+void start_new_tl_epoch(struct drbd_connection *connection)
{
/* no point closing an epoch, if it is empty, anyways. */
- if (tconn->current_tle_writes == 0)
+ if (connection->current_tle_writes == 0)
return;
- tconn->current_tle_writes = 0;
- atomic_inc(&tconn->current_tle_nr);
- wake_all_senders(tconn);
+ connection->current_tle_writes = 0;
+ atomic_inc(&connection->current_tle_nr);
+ wake_all_senders(connection);
}
-void complete_master_bio(struct drbd_conf *mdev,
+void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m)
{
bio_endio(m->bio, m->error);
- dec_ap_bio(mdev);
+ dec_ap_bio(device);
}
static void drbd_remove_request_interval(struct rb_root *root,
struct drbd_request *req)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct drbd_interval *i = &req->i;
drbd_remove_interval(root, i);
/* Wake up any processes waiting for this request to complete. */
if (i->waiting)
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
}
/* Helper for __req_mod().
@@ -210,7 +211,7 @@ static
void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{
const unsigned s = req->rq_state;
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
int rw;
int error, ok;
@@ -226,12 +227,12 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
(s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
(s & RQ_COMPLETION_SUSP)) {
- dev_err(DEV, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
+ drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
return;
}
if (!req->master_bio) {
- dev_err(DEV, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
+ drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
return;
}
@@ -259,9 +260,9 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
struct rb_root *root;
if (rw == WRITE)
- root = &mdev->write_requests;
+ root = &device->write_requests;
else
- root = &mdev->read_requests;
+ root = &device->read_requests;
drbd_remove_request_interval(root, req);
}
@@ -273,11 +274,11 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* and reset the transfer log epoch write_cnt.
*/
if (rw == WRITE &&
- req->epoch == atomic_read(&mdev->tconn->current_tle_nr))
- start_new_tl_epoch(mdev->tconn);
+ req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
+ start_new_tl_epoch(first_peer_device(device)->connection);
/* Update disk stats */
- _drbd_end_io_acct(mdev, req);
+ _drbd_end_io_acct(device, req);
/* If READ failed,
* have it be pushed back to the retry work queue,
@@ -305,8 +306,8 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
{
- struct drbd_conf *mdev = req->w.mdev;
- D_ASSERT(m || (req->rq_state & RQ_POSTPONED));
+ struct drbd_device *device = req->device;
+ D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
if (!atomic_sub_and_test(put, &req->completion_ref))
return 0;
@@ -328,12 +329,12 @@ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_
static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
int clear, int set)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
unsigned s = req->rq_state;
int c_put = 0;
int k_put = 0;
- if (drbd_suspended(mdev) && !((s | clear) & RQ_COMPLETION_SUSP))
+ if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
set |= RQ_COMPLETION_SUSP;
/* apply */
@@ -351,7 +352,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
atomic_inc(&req->completion_ref);
if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
- inc_ap_pending(mdev);
+ inc_ap_pending(device);
atomic_inc(&req->completion_ref);
}
@@ -362,7 +363,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
kref_get(&req->kref); /* wait for the DONE */
if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT))
- atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
+ atomic_add(req->i.size >> 9, &device->ap_in_flight);
if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
atomic_inc(&req->completion_ref);
@@ -373,7 +374,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
++c_put;
if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
- D_ASSERT(req->rq_state & RQ_LOCAL_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
/* local completion may still come in later,
* we need to keep the req object around. */
kref_get(&req->kref);
@@ -388,7 +389,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
}
if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
- dec_ap_pending(mdev);
+ dec_ap_pending(device);
++c_put;
}
@@ -397,7 +398,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
if (req->rq_state & RQ_NET_SENT)
- atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
+ atomic_sub(req->i.size >> 9, &device->ap_in_flight);
++k_put;
}
@@ -409,14 +410,14 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
int at_least = k_put + !!c_put;
int refcount = atomic_read(&req->kref.refcount);
if (refcount < at_least)
- dev_err(DEV,
+ drbd_err(device,
"mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
s, req->rq_state, refcount, at_least);
}
/* If we made progress, retry conflicting peer requests, if any. */
if (req->i.waiting)
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
if (c_put)
k_put += drbd_req_put_completion_ref(req, m, c_put);
@@ -424,18 +425,18 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
kref_sub(&req->kref, k_put, drbd_req_destroy);
}
-static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req)
+static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
{
char b[BDEVNAME_SIZE];
if (!__ratelimit(&drbd_ratelimit_state))
return;
- dev_warn(DEV, "local %s IO error sector %llu+%u on %s\n",
+ drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
(req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
(unsigned long long)req->i.sector,
req->i.size >> 9,
- bdevname(mdev->ldev->backing_bdev, b));
+ bdevname(device->ldev->backing_bdev, b));
}
/* obviously this could be coded as many single functions
@@ -453,7 +454,7 @@ static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *re
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct net_conf *nc;
int p, rv = 0;
@@ -462,7 +463,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
switch (what) {
default:
- dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
+ drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
break;
/* does not happen...
@@ -474,9 +475,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
case TO_BE_SENT: /* via network */
/* reached via __drbd_make_request
* and from w_read_retry_remote */
- D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+ D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
p = nc->wire_protocol;
rcu_read_unlock();
req->rq_state |=
@@ -487,15 +488,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
case TO_BE_SUBMITTED: /* locally */
/* reached via __drbd_make_request */
- D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
+ D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
break;
case COMPLETED_OK:
if (req->rq_state & RQ_WRITE)
- mdev->writ_cnt += req->i.size >> 9;
+ device->writ_cnt += req->i.size >> 9;
else
- mdev->read_cnt += req->i.size >> 9;
+ device->read_cnt += req->i.size >> 9;
mod_rq_state(req, m, RQ_LOCAL_PENDING,
RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
@@ -506,15 +507,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case WRITE_COMPLETED_WITH_ERROR:
- drbd_report_io_error(mdev, req);
- __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
+ drbd_report_io_error(device, req);
+ __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
case READ_COMPLETED_WITH_ERROR:
- drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
- drbd_report_io_error(mdev, req);
- __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
+ drbd_set_out_of_sync(device, req->i.sector, req->i.size);
+ drbd_report_io_error(device, req);
+ __drbd_chk_io_error(device, DRBD_READ_ERROR);
/* fall through. */
case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail READA, no __drbd_chk_io_error in that case. */
@@ -532,16 +533,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* So we can verify the handle in the answer packet.
* Corresponding drbd_remove_request_interval is in
* drbd_req_complete() */
- D_ASSERT(drbd_interval_empty(&req->i));
- drbd_insert_interval(&mdev->read_requests, &req->i);
+ D_ASSERT(device, drbd_interval_empty(&req->i));
+ drbd_insert_interval(&device->read_requests, &req->i);
- set_bit(UNPLUG_REMOTE, &mdev->flags);
+ set_bit(UNPLUG_REMOTE, &device->flags);
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
- D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_read_req;
- drbd_queue_work(&mdev->tconn->sender_work, &req->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &req->w);
break;
case QUEUE_FOR_NET_WRITE:
@@ -550,8 +552,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* Corresponding drbd_remove_request_interval is in
* drbd_req_complete() */
- D_ASSERT(drbd_interval_empty(&req->i));
- drbd_insert_interval(&mdev->write_requests, &req->i);
+ D_ASSERT(device, drbd_interval_empty(&req->i));
+ drbd_insert_interval(&device->write_requests, &req->i);
/* NOTE
* In case the req ended up on the transfer log before being
@@ -570,28 +572,30 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* otherwise we may lose an unplug, which may cause some remote
* io-scheduler timeout to expire, increasing maximum latency,
* hurting performance. */
- set_bit(UNPLUG_REMOTE, &mdev->flags);
+ set_bit(UNPLUG_REMOTE, &device->flags);
/* queue work item to send data */
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
req->w.cb = w_send_dblock;
- drbd_queue_work(&mdev->tconn->sender_work, &req->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &req->w);
/* close the epoch, in case it outgrew the limit */
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
p = nc->max_epoch_size;
rcu_read_unlock();
- if (mdev->tconn->current_tle_writes >= p)
- start_new_tl_epoch(mdev->tconn);
+ if (first_peer_device(device)->connection->current_tle_writes >= p)
+ start_new_tl_epoch(first_peer_device(device)->connection);
break;
case QUEUE_FOR_SEND_OOS:
mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_out_of_sync;
- drbd_queue_work(&mdev->tconn->sender_work, &req->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &req->w);
break;
case READ_RETRY_REMOTE_CANCELED:
@@ -639,15 +643,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* If this request had been marked as RQ_POSTPONED before,
* it will actually not be completed, but "restarted",
* resubmitted from the retry worker context. */
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
- D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
break;
case WRITE_ACKED_BY_PEER_AND_SIS:
req->rq_state |= RQ_NET_SIS;
case WRITE_ACKED_BY_PEER:
- D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
/* protocol C; successfully written on peer.
* Nothing more to do here.
* We want to keep the tl in place for all protocols, to cater
@@ -655,25 +659,25 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
goto ack_common;
case RECV_ACKED_BY_PEER:
- D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK);
+ D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
/* protocol B; pretends to be successfully written on peer.
* see also notes above in HANDED_OVER_TO_NETWORK about
* protocol != C */
ack_common:
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
break;
case POSTPONE_WRITE:
- D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
/* If this node has already detected the write conflict, the
* worker will be waiting on misc_wait. Wake it up once this
* request has completed locally.
*/
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_POSTPONED;
if (req->i.waiting)
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
/* Do not clear RQ_NET_PENDING. This request will make further
* progress via restart_conflicting_writes() or
* fail_postponed_requests(). Hopefully. */
@@ -701,9 +705,10 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
if (bio_data_dir(req->master_bio) == WRITE)
rv = MR_WRITE;
- get_ldev(mdev); /* always succeeds in this call path */
+ get_ldev(device); /* always succeeds in this call path */
req->w.cb = w_restart_disk_io;
- drbd_queue_work(&mdev->tconn->sender_work, &req->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &req->w);
break;
case RESEND:
@@ -719,12 +724,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
Throwing them out of the TL here by pretending we got a BARRIER_ACK.
During connection handshake, we ensure that the peer was not rebooted. */
if (!(req->rq_state & RQ_NET_OK)) {
- /* FIXME could this possibly be a req->w.cb == w_send_out_of_sync?
+ /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
* in that case we must not set RQ_NET_PENDING. */
mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
if (req->w.cb) {
- drbd_queue_work(&mdev->tconn->sender_work, &req->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
} /* else: FIXME can this happen? */
break;
@@ -740,7 +746,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* barrier came in before all requests were acked.
* this is bad, because if the connection is lost now,
* we won't be able to clean them up... */
- dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
+ drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
}
/* Allowed to complete requests, even while suspended.
* As this is called for all requests within a matching epoch,
@@ -751,12 +757,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case DATA_RECEIVED:
- D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
break;
case QUEUE_AS_DRBD_BARRIER:
- start_new_tl_epoch(mdev->tconn);
+ start_new_tl_epoch(first_peer_device(device)->connection);
mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
break;
};
@@ -771,27 +777,27 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* since size may be bigger than BM_BLOCK_SIZE,
* we may need to check several bits.
*/
-static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
+static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
{
unsigned long sbnr, ebnr;
sector_t esector, nr_sectors;
- if (mdev->state.disk == D_UP_TO_DATE)
+ if (device->state.disk == D_UP_TO_DATE)
return true;
- if (mdev->state.disk != D_INCONSISTENT)
+ if (device->state.disk != D_INCONSISTENT)
return false;
esector = sector + (size >> 9) - 1;
- nr_sectors = drbd_get_capacity(mdev->this_bdev);
- D_ASSERT(sector < nr_sectors);
- D_ASSERT(esector < nr_sectors);
+ nr_sectors = drbd_get_capacity(device->this_bdev);
+ D_ASSERT(device, sector < nr_sectors);
+ D_ASSERT(device, esector < nr_sectors);
sbnr = BM_SECT_TO_BIT(sector);
ebnr = BM_SECT_TO_BIT(esector);
- return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0;
+ return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
}
-static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector,
+static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
enum drbd_read_balancing rbm)
{
struct backing_dev_info *bdi;
@@ -799,11 +805,11 @@ static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector
switch (rbm) {
case RB_CONGESTED_REMOTE:
- bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+ bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi);
case RB_LEAST_PENDING:
- return atomic_read(&mdev->local_cnt) >
- atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt);
+ return atomic_read(&device->local_cnt) >
+ atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
case RB_32K_STRIPING: /* stripe_shift = 15 */
case RB_64K_STRIPING:
case RB_128K_STRIPING:
@@ -813,7 +819,7 @@ static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector
stripe_shift = (rbm - RB_32K_STRIPING + 15);
return (sector >> (stripe_shift - 9)) & 1;
case RB_ROUND_ROBIN:
- return test_and_change_bit(READ_BALANCE_RR, &mdev->flags);
+ return test_and_change_bit(READ_BALANCE_RR, &device->flags);
case RB_PREFER_REMOTE:
return true;
case RB_PREFER_LOCAL:
@@ -834,73 +840,73 @@ static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector
static void complete_conflicting_writes(struct drbd_request *req)
{
DEFINE_WAIT(wait);
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct drbd_interval *i;
sector_t sector = req->i.sector;
int size = req->i.size;
- i = drbd_find_overlap(&mdev->write_requests, sector, size);
+ i = drbd_find_overlap(&device->write_requests, sector, size);
if (!i)
return;
for (;;) {
- prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
- i = drbd_find_overlap(&mdev->write_requests, sector, size);
+ prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
+ i = drbd_find_overlap(&device->write_requests, sector, size);
if (!i)
break;
/* Indicate to wake up device->misc_wait on progress. */
i->waiting = true;
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
schedule();
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
}
- finish_wait(&mdev->misc_wait, &wait);
+ finish_wait(&device->misc_wait, &wait);
}
/* called within req_lock and rcu_read_lock() */
-static void maybe_pull_ahead(struct drbd_conf *mdev)
+static void maybe_pull_ahead(struct drbd_device *device)
{
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
struct net_conf *nc;
bool congested = false;
enum drbd_on_congestion on_congestion;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
+ nc = rcu_dereference(connection->net_conf);
on_congestion = nc ? nc->on_congestion : OC_BLOCK;
rcu_read_unlock();
if (on_congestion == OC_BLOCK ||
- tconn->agreed_pro_version < 96)
+ connection->agreed_pro_version < 96)
return;
/* If I don't even have good local storage, we can not reasonably try
* to pull ahead of the peer. We also need the local reference to make
- * sure mdev->act_log is there.
+ * sure device->act_log is there.
*/
- if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+ if (!get_ldev_if_state(device, D_UP_TO_DATE))
return;
if (nc->cong_fill &&
- atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) {
- dev_info(DEV, "Congestion-fill threshold reached\n");
+ atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
+ drbd_info(device, "Congestion-fill threshold reached\n");
congested = true;
}
- if (mdev->act_log->used >= nc->cong_extents) {
- dev_info(DEV, "Congestion-extents threshold reached\n");
+ if (device->act_log->used >= nc->cong_extents) {
+ drbd_info(device, "Congestion-extents threshold reached\n");
congested = true;
}
if (congested) {
/* start a new epoch for non-mirrored writes */
- start_new_tl_epoch(mdev->tconn);
+ start_new_tl_epoch(first_peer_device(device)->connection);
if (on_congestion == OC_PULL_AHEAD)
- _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+ _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
else /*nc->on_congestion == OC_DISCONNECT */
- _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+ _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
}
- put_ldev(mdev);
+ put_ldev(device);
}
/* If this returns false, and req->private_bio is still set,
@@ -914,19 +920,19 @@ static void maybe_pull_ahead(struct drbd_conf *mdev)
*/
static bool do_remote_read(struct drbd_request *req)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
enum drbd_read_balancing rbm;
if (req->private_bio) {
- if (!drbd_may_do_local_read(mdev,
+ if (!drbd_may_do_local_read(device,
req->i.sector, req->i.size)) {
bio_put(req->private_bio);
req->private_bio = NULL;
- put_ldev(mdev);
+ put_ldev(device);
}
}
- if (mdev->state.pdsk != D_UP_TO_DATE)
+ if (device->state.pdsk != D_UP_TO_DATE)
return false;
if (req->private_bio == NULL)
@@ -936,17 +942,17 @@ static bool do_remote_read(struct drbd_request *req)
* protocol, pending requests etc. */
rcu_read_lock();
- rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing;
+ rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
rcu_read_unlock();
if (rbm == RB_PREFER_LOCAL && req->private_bio)
return false; /* submit locally */
- if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) {
+ if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
if (req->private_bio) {
bio_put(req->private_bio);
req->private_bio = NULL;
- put_ldev(mdev);
+ put_ldev(device);
}
return true;
}
@@ -959,11 +965,11 @@ static bool do_remote_read(struct drbd_request *req)
* which does NOT include those that we are L_AHEAD for. */
static int drbd_process_write_request(struct drbd_request *req)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
int remote, send_oos;
- remote = drbd_should_do_remote(mdev->state);
- send_oos = drbd_should_send_out_of_sync(mdev->state);
+ remote = drbd_should_do_remote(device->state);
+ send_oos = drbd_should_send_out_of_sync(device->state);
/* Need to replicate writes. Unless it is an empty flush,
* which is better mapped to a DRBD P_BARRIER packet,
@@ -973,7 +979,7 @@ static int drbd_process_write_request(struct drbd_request *req)
* replicating, in which case there is no point. */
if (unlikely(req->i.size == 0)) {
/* The only size==0 bios we expect are empty flushes. */
- D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH);
+ D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH);
if (remote)
_req_mod(req, QUEUE_AS_DRBD_BARRIER);
return remote;
@@ -982,12 +988,12 @@ static int drbd_process_write_request(struct drbd_request *req)
if (!remote && !send_oos)
return 0;
- D_ASSERT(!(remote && send_oos));
+ D_ASSERT(device, !(remote && send_oos));
if (remote) {
_req_mod(req, TO_BE_SENT);
_req_mod(req, QUEUE_FOR_NET_WRITE);
- } else if (drbd_set_out_of_sync(mdev, req->i.sector, req->i.size))
+ } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
_req_mod(req, QUEUE_FOR_SEND_OOS);
return remote;
@@ -996,36 +1002,36 @@ static int drbd_process_write_request(struct drbd_request *req)
static void
drbd_submit_req_private_bio(struct drbd_request *req)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct bio *bio = req->private_bio;
const int rw = bio_rw(bio);
- bio->bi_bdev = mdev->ldev->backing_bdev;
+ bio->bi_bdev = device->ldev->backing_bdev;
/* State may have changed since we grabbed our reference on the
* ->ldev member. Double check, and short-circuit to endio.
* In case the last activity log transaction failed to get on
* stable storage, and this is a WRITE, we may not even submit
* this bio. */
- if (get_ldev(mdev)) {
- if (drbd_insert_fault(mdev,
+ if (get_ldev(device)) {
+ if (drbd_insert_fault(device,
rw == WRITE ? DRBD_FAULT_DT_WR
: rw == READ ? DRBD_FAULT_DT_RD
: DRBD_FAULT_DT_RA))
bio_endio(bio, -EIO);
else
generic_make_request(bio);
- put_ldev(mdev);
+ put_ldev(device);
} else
bio_endio(bio, -EIO);
}
-static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req)
+static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
{
- spin_lock(&mdev->submit.lock);
- list_add_tail(&req->tl_requests, &mdev->submit.writes);
- spin_unlock(&mdev->submit.lock);
- queue_work(mdev->submit.wq, &mdev->submit.worker);
+ spin_lock(&device->submit.lock);
+ list_add_tail(&req->tl_requests, &device->submit.writes);
+ spin_unlock(&device->submit.lock);
+ queue_work(device->submit.wq, &device->submit.worker);
}
/* returns the new drbd_request pointer, if the caller is expected to
@@ -1033,36 +1039,36 @@ static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req)
* request on the submitter thread.
* Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
*/
-struct drbd_request *
-drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+static struct drbd_request *
+drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_time)
{
const int rw = bio_data_dir(bio);
struct drbd_request *req;
/* allocate outside of all locks; */
- req = drbd_req_new(mdev, bio);
+ req = drbd_req_new(device, bio);
if (!req) {
- dec_ap_bio(mdev);
+ dec_ap_bio(device);
/* only pass the error to the upper layers.
* if user cannot handle io errors, that's not our business. */
- dev_err(DEV, "could not kmalloc() req\n");
+ drbd_err(device, "could not kmalloc() req\n");
bio_endio(bio, -ENOMEM);
return ERR_PTR(-ENOMEM);
}
req->start_time = start_time;
- if (!get_ldev(mdev)) {
+ if (!get_ldev(device)) {
bio_put(req->private_bio);
req->private_bio = NULL;
}
/* Update disk stats */
- _drbd_start_io_acct(mdev, req);
+ _drbd_start_io_acct(device, req);
if (rw == WRITE && req->private_bio && req->i.size
- && !test_bit(AL_SUSPENDED, &mdev->flags)) {
- if (!drbd_al_begin_io_fastpath(mdev, &req->i)) {
- drbd_queue_write(mdev, req);
+ && !test_bit(AL_SUSPENDED, &device->flags)) {
+ if (!drbd_al_begin_io_fastpath(device, &req->i)) {
+ drbd_queue_write(device, req);
return NULL;
}
req->rq_state |= RQ_IN_ACT_LOG;
@@ -1071,13 +1077,13 @@ drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long star
return req;
}
-static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *req)
+static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{
const int rw = bio_rw(req->master_bio);
struct bio_and_error m = { NULL, };
bool no_remote = false;
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
if (rw == WRITE) {
/* This may temporarily give up the req_lock,
* but will re-aquire it before it returns here.
@@ -1087,17 +1093,17 @@ static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *re
/* check for congestion, and potentially stop sending
* full data updates, but start sending "dirty bits" only. */
- maybe_pull_ahead(mdev);
+ maybe_pull_ahead(device);
}
- if (drbd_suspended(mdev)) {
+ if (drbd_suspended(device)) {
/* push back and retry: */
req->rq_state |= RQ_POSTPONED;
if (req->private_bio) {
bio_put(req->private_bio);
req->private_bio = NULL;
- put_ldev(mdev);
+ put_ldev(device);
}
goto out;
}
@@ -1111,15 +1117,15 @@ static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *re
}
/* which transfer log epoch does this belong to? */
- req->epoch = atomic_read(&mdev->tconn->current_tle_nr);
+ req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
/* no point in adding empty flushes to the transfer log,
* they are mapped to drbd barriers already. */
if (likely(req->i.size!=0)) {
if (rw == WRITE)
- mdev->tconn->current_tle_writes++;
+ first_peer_device(device)->connection->current_tle_writes++;
- list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log);
+ list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
}
if (rw == WRITE) {
@@ -1139,13 +1145,13 @@ static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *re
/* needs to be marked within the same spinlock */
_req_mod(req, TO_BE_SUBMITTED);
/* but we need to give up the spinlock to submit */
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
drbd_submit_req_private_bio(req);
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
} else if (no_remote) {
nodata:
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
+ drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
(unsigned long long)req->i.sector, req->i.size >> 9);
/* A write may have been queued for send_oos, however.
* So we can not simply free it, we must go through drbd_req_put_completion_ref() */
@@ -1154,21 +1160,21 @@ nodata:
out:
if (drbd_req_put_completion_ref(req, &m, 1))
kref_put(&req->kref, drbd_req_destroy);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
if (m.bio)
- complete_master_bio(mdev, &m);
+ complete_master_bio(device, &m);
}
-void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_time)
{
- struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time);
+ struct drbd_request *req = drbd_request_prepare(device, bio, start_time);
if (IS_ERR_OR_NULL(req))
return;
- drbd_send_and_submit(mdev, req);
+ drbd_send_and_submit(device, req);
}
-static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming)
+static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
{
struct drbd_request *req, *tmp;
list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
@@ -1176,19 +1182,19 @@ static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming)
if (rw == WRITE /* rw != WRITE should not even end up here! */
&& req->private_bio && req->i.size
- && !test_bit(AL_SUSPENDED, &mdev->flags)) {
- if (!drbd_al_begin_io_fastpath(mdev, &req->i))
+ && !test_bit(AL_SUSPENDED, &device->flags)) {
+ if (!drbd_al_begin_io_fastpath(device, &req->i))
continue;
req->rq_state |= RQ_IN_ACT_LOG;
}
list_del_init(&req->tl_requests);
- drbd_send_and_submit(mdev, req);
+ drbd_send_and_submit(device, req);
}
}
-static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
+static bool prepare_al_transaction_nonblock(struct drbd_device *device,
struct list_head *incoming,
struct list_head *pending)
{
@@ -1196,9 +1202,9 @@ static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
int wake = 0;
int err;
- spin_lock_irq(&mdev->al_lock);
+ spin_lock_irq(&device->al_lock);
list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
- err = drbd_al_begin_io_nonblock(mdev, &req->i);
+ err = drbd_al_begin_io_nonblock(device, &req->i);
if (err == -EBUSY)
wake = 1;
if (err)
@@ -1206,30 +1212,30 @@ static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
req->rq_state |= RQ_IN_ACT_LOG;
list_move_tail(&req->tl_requests, pending);
}
- spin_unlock_irq(&mdev->al_lock);
+ spin_unlock_irq(&device->al_lock);
if (wake)
- wake_up(&mdev->al_wait);
+ wake_up(&device->al_wait);
return !list_empty(pending);
}
void do_submit(struct work_struct *ws)
{
- struct drbd_conf *mdev = container_of(ws, struct drbd_conf, submit.worker);
+ struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
LIST_HEAD(incoming);
LIST_HEAD(pending);
struct drbd_request *req, *tmp;
for (;;) {
- spin_lock(&mdev->submit.lock);
- list_splice_tail_init(&mdev->submit.writes, &incoming);
- spin_unlock(&mdev->submit.lock);
+ spin_lock(&device->submit.lock);
+ list_splice_tail_init(&device->submit.writes, &incoming);
+ spin_unlock(&device->submit.lock);
- submit_fast_path(mdev, &incoming);
+ submit_fast_path(device, &incoming);
if (list_empty(&incoming))
break;
- wait_event(mdev->al_wait, prepare_al_transaction_nonblock(mdev, &incoming, &pending));
+ wait_event(device->al_wait, prepare_al_transaction_nonblock(device, &incoming, &pending));
/* Maybe more was queued, while we prepared the transaction?
* Try to stuff them into this transaction as well.
* Be strictly non-blocking here, no wait_event, we already
@@ -1243,17 +1249,17 @@ void do_submit(struct work_struct *ws)
/* It is ok to look outside the lock,
* it's only an optimization anyways */
- if (list_empty(&mdev->submit.writes))
+ if (list_empty(&device->submit.writes))
break;
- spin_lock(&mdev->submit.lock);
- list_splice_tail_init(&mdev->submit.writes, &more_incoming);
- spin_unlock(&mdev->submit.lock);
+ spin_lock(&device->submit.lock);
+ list_splice_tail_init(&device->submit.writes, &more_incoming);
+ spin_unlock(&device->submit.lock);
if (list_empty(&more_incoming))
break;
- made_progress = prepare_al_transaction_nonblock(mdev, &more_incoming, &more_pending);
+ made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending);
list_splice_tail_init(&more_pending, &pending);
list_splice_tail_init(&more_incoming, &incoming);
@@ -1261,18 +1267,18 @@ void do_submit(struct work_struct *ws)
if (!made_progress)
break;
}
- drbd_al_begin_io_commit(mdev, false);
+ drbd_al_begin_io_commit(device, false);
list_for_each_entry_safe(req, tmp, &pending, tl_requests) {
list_del_init(&req->tl_requests);
- drbd_send_and_submit(mdev, req);
+ drbd_send_and_submit(device, req);
}
}
}
void drbd_make_request(struct request_queue *q, struct bio *bio)
{
- struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
+ struct drbd_device *device = (struct drbd_device *) q->queuedata;
unsigned long start_time;
start_time = jiffies;
@@ -1280,10 +1286,10 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
/*
* what we "blindly" assume:
*/
- D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
+ D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
- inc_ap_bio(mdev);
- __drbd_make_request(mdev, bio, start_time);
+ inc_ap_bio(device);
+ __drbd_make_request(device, bio, start_time);
}
/* This is called by bio_add_page().
@@ -1300,32 +1306,32 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
*/
int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
{
- struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
+ struct drbd_device *device = (struct drbd_device *) q->queuedata;
unsigned int bio_size = bvm->bi_size;
int limit = DRBD_MAX_BIO_SIZE;
int backing_limit;
- if (bio_size && get_ldev(mdev)) {
+ if (bio_size && get_ldev(device)) {
unsigned int max_hw_sectors = queue_max_hw_sectors(q);
struct request_queue * const b =
- mdev->ldev->backing_bdev->bd_disk->queue;
+ device->ldev->backing_bdev->bd_disk->queue;
if (b->merge_bvec_fn) {
backing_limit = b->merge_bvec_fn(b, bvm, bvec);
limit = min(limit, backing_limit);
}
- put_ldev(mdev);
+ put_ldev(device);
if ((limit >> 9) > max_hw_sectors)
limit = max_hw_sectors << 9;
}
return limit;
}
-struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
+static struct drbd_request *find_oldest_request(struct drbd_connection *connection)
{
/* Walk the transfer log,
* and find the oldest not yet completed request */
struct drbd_request *r;
- list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
+ list_for_each_entry(r, &connection->transfer_log, tl_requests) {
if (atomic_read(&r->completion_ref))
return r;
}
@@ -1334,21 +1340,21 @@ struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
void request_timer_fn(unsigned long data)
{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_device *device = (struct drbd_device *) data;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
struct drbd_request *req; /* oldest request */
struct net_conf *nc;
unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
unsigned long now;
rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
- if (nc && mdev->state.conn >= C_WF_REPORT_PARAMS)
+ nc = rcu_dereference(connection->net_conf);
+ if (nc && device->state.conn >= C_WF_REPORT_PARAMS)
ent = nc->timeout * HZ/10 * nc->ko_count;
- if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
- dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10;
- put_ldev(mdev);
+ if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
+ dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
+ put_ldev(device);
}
rcu_read_unlock();
@@ -1359,11 +1365,11 @@ void request_timer_fn(unsigned long data)
now = jiffies;
- spin_lock_irq(&tconn->req_lock);
- req = find_oldest_request(tconn);
+ spin_lock_irq(&device->resource->req_lock);
+ req = find_oldest_request(connection);
if (!req) {
- spin_unlock_irq(&tconn->req_lock);
- mod_timer(&mdev->request_timer, now + et);
+ spin_unlock_irq(&device->resource->req_lock);
+ mod_timer(&device->request_timer, now + et);
return;
}
@@ -1385,17 +1391,17 @@ void request_timer_fn(unsigned long data)
*/
if (ent && req->rq_state & RQ_NET_PENDING &&
time_after(now, req->start_time + ent) &&
- !time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) {
- dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
- _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
+ !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
+ drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n");
+ _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
}
- if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev &&
+ if (dt && req->rq_state & RQ_LOCAL_PENDING && req->device == device &&
time_after(now, req->start_time + dt) &&
- !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
- dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
- __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH);
+ !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
+ drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
+ __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
}
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
- spin_unlock_irq(&tconn->req_lock);
- mod_timer(&mdev->request_timer, nt);
+ spin_unlock_irq(&connection->resource->req_lock);
+ mod_timer(&device->request_timer, nt);
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 978cb1addc98..c684c963538e 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -269,23 +269,23 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
/* Short lived temporary struct on the stack.
* We could squirrel the error to be returned into
- * bio->bi_size, or similar. But that would be too ugly. */
+ * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
struct bio_and_error {
struct bio *bio;
int error;
};
-extern void start_new_tl_epoch(struct drbd_tconn *tconn);
+extern void start_new_tl_epoch(struct drbd_connection *connection);
extern void drbd_req_destroy(struct kref *kref);
extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m);
-extern void complete_master_bio(struct drbd_conf *mdev,
+extern void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m);
extern void request_timer_fn(unsigned long data);
-extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
-extern void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
+extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
+extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
/* this is in drbd_main.c */
extern void drbd_restart_request(struct drbd_request *req);
@@ -294,14 +294,14 @@ extern void drbd_restart_request(struct drbd_request *req);
* outside the spinlock, e.g. when walking some list on cleanup. */
static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
{
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct bio_and_error m;
int rv;
/* __req_mod possibly frees req, do not touch req after that! */
rv = __req_mod(req, what, &m);
if (m.bio)
- complete_master_bio(mdev, &m);
+ complete_master_bio(device, &m);
return rv;
}
@@ -314,16 +314,16 @@ static inline int req_mod(struct drbd_request *req,
enum drbd_req_event what)
{
unsigned long flags;
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct bio_and_error m;
int rv;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
rv = __req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (m.bio)
- complete_master_bio(mdev, &m);
+ complete_master_bio(device, &m);
return rv;
}
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 216d47b7e88b..1a84345a3868 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -27,13 +27,12 @@
#include <linux/drbd_limits.h>
#include "drbd_int.h"
+#include "drbd_protocol.h"
#include "drbd_req.h"
-/* in drbd_main.c */
-extern void tl_abort_disk_io(struct drbd_conf *mdev);
-
struct after_state_chg_work {
struct drbd_work w;
+ struct drbd_device *device;
union drbd_state os;
union drbd_state ns;
enum chg_state_flags flags;
@@ -50,12 +49,12 @@ enum sanitize_state_warnings {
};
static int w_after_state_ch(struct drbd_work *w, int unused);
-static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+static void after_state_ch(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum chg_state_flags flags);
-static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
-static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_tconn *);
+static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state);
+static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *);
static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
-static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
+static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns,
enum sanitize_state_warnings *warn);
static inline bool is_susp(union drbd_state s)
@@ -63,17 +62,18 @@ static inline bool is_susp(union drbd_state s)
return s.susp || s.susp_nod || s.susp_fen;
}
-bool conn_all_vols_unconf(struct drbd_tconn *tconn)
+bool conn_all_vols_unconf(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
bool rv = true;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (mdev->state.disk != D_DISKLESS ||
- mdev->state.conn != C_STANDALONE ||
- mdev->state.role != R_SECONDARY) {
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ if (device->state.disk != D_DISKLESS ||
+ device->state.conn != C_STANDALONE ||
+ device->state.role != R_SECONDARY) {
rv = false;
break;
}
@@ -102,99 +102,111 @@ static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
return R_PRIMARY;
}
-enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
+enum drbd_role conn_highest_role(struct drbd_connection *connection)
{
enum drbd_role role = R_UNKNOWN;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- role = max_role(role, mdev->state.role);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ role = max_role(role, device->state.role);
+ }
rcu_read_unlock();
return role;
}
-enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
+enum drbd_role conn_highest_peer(struct drbd_connection *connection)
{
enum drbd_role peer = R_UNKNOWN;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- peer = max_role(peer, mdev->state.peer);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ peer = max_role(peer, device->state.peer);
+ }
rcu_read_unlock();
return peer;
}
-enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
+enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection)
{
enum drbd_disk_state ds = D_DISKLESS;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- ds = max_t(enum drbd_disk_state, ds, mdev->state.disk);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ ds = max_t(enum drbd_disk_state, ds, device->state.disk);
+ }
rcu_read_unlock();
return ds;
}
-enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn)
+enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection)
{
enum drbd_disk_state ds = D_MASK;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- ds = min_t(enum drbd_disk_state, ds, mdev->state.disk);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ ds = min_t(enum drbd_disk_state, ds, device->state.disk);
+ }
rcu_read_unlock();
return ds;
}
-enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
+enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection)
{
enum drbd_disk_state ds = D_DISKLESS;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- ds = max_t(enum drbd_disk_state, ds, mdev->state.pdsk);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ ds = max_t(enum drbd_disk_state, ds, device->state.pdsk);
+ }
rcu_read_unlock();
return ds;
}
-enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn)
+enum drbd_conns conn_lowest_conn(struct drbd_connection *connection)
{
enum drbd_conns conn = C_MASK;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- conn = min_t(enum drbd_conns, conn, mdev->state.conn);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ conn = min_t(enum drbd_conns, conn, device->state.conn);
+ }
rcu_read_unlock();
return conn;
}
-static bool no_peer_wf_report_params(struct drbd_tconn *tconn)
+static bool no_peer_wf_report_params(struct drbd_connection *connection)
{
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
bool rv = true;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- if (mdev->state.conn == C_WF_REPORT_PARAMS) {
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ if (peer_device->device->state.conn == C_WF_REPORT_PARAMS) {
rv = false;
break;
}
@@ -206,11 +218,11 @@ static bool no_peer_wf_report_params(struct drbd_tconn *tconn)
/**
* cl_wide_st_chg() - true if the state change is a cluster wide one
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @os: old (current) state.
* @ns: new (wanted) state.
*/
-static int cl_wide_st_chg(struct drbd_conf *mdev,
+static int cl_wide_st_chg(struct drbd_device *device,
union drbd_state os, union drbd_state ns)
{
return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
@@ -232,72 +244,72 @@ apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
}
enum drbd_state_rv
-drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
+drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
union drbd_state mask, union drbd_state val)
{
unsigned long flags;
union drbd_state ns;
enum drbd_state_rv rv;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- ns = apply_mask_val(drbd_read_state(mdev), mask, val);
- rv = _drbd_set_state(mdev, ns, f, NULL);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ ns = apply_mask_val(drbd_read_state(device), mask, val);
+ rv = _drbd_set_state(device, ns, f, NULL);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
return rv;
}
/**
* drbd_force_state() - Impose a change which happens outside our control on our state
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
*/
-void drbd_force_state(struct drbd_conf *mdev,
+void drbd_force_state(struct drbd_device *device,
union drbd_state mask, union drbd_state val)
{
- drbd_change_state(mdev, CS_HARD, mask, val);
+ drbd_change_state(device, CS_HARD, mask, val);
}
static enum drbd_state_rv
-_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
+_req_st_cond(struct drbd_device *device, union drbd_state mask,
union drbd_state val)
{
union drbd_state os, ns;
unsigned long flags;
enum drbd_state_rv rv;
- if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
+ if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &device->flags))
return SS_CW_SUCCESS;
- if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
+ if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
return SS_CW_FAILED_BY_PEER;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- os = drbd_read_state(mdev);
- ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ os = drbd_read_state(device);
+ ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
rv = is_valid_transition(os, ns);
if (rv >= SS_SUCCESS)
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
- if (!cl_wide_st_chg(mdev, os, ns))
+ if (!cl_wide_st_chg(device, os, ns))
rv = SS_CW_NO_NEED;
if (rv == SS_UNKNOWN_ERROR) {
- rv = is_valid_state(mdev, ns);
+ rv = is_valid_state(device, ns);
if (rv >= SS_SUCCESS) {
- rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
if (rv >= SS_SUCCESS)
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
}
}
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
return rv;
}
/**
* drbd_req_state() - Perform an eventually cluster wide state change
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
* @f: flags
@@ -306,7 +318,7 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
* _drbd_request_state().
*/
static enum drbd_state_rv
-drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
+drbd_req_state(struct drbd_device *device, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
{
struct completion done;
@@ -317,68 +329,68 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
init_completion(&done);
if (f & CS_SERIALIZE)
- mutex_lock(mdev->state_mutex);
+ mutex_lock(device->state_mutex);
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- os = drbd_read_state(mdev);
- ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ os = drbd_read_state(device);
+ ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
rv = is_valid_transition(os, ns);
if (rv < SS_SUCCESS) {
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
goto abort;
}
- if (cl_wide_st_chg(mdev, os, ns)) {
- rv = is_valid_state(mdev, ns);
+ if (cl_wide_st_chg(device, os, ns)) {
+ rv = is_valid_state(device, ns);
if (rv == SS_SUCCESS)
- rv = is_valid_soft_transition(os, ns, mdev->tconn);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (rv < SS_SUCCESS) {
if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
+ print_st_err(device, os, ns, rv);
goto abort;
}
- if (drbd_send_state_req(mdev, mask, val)) {
+ if (drbd_send_state_req(first_peer_device(device), mask, val)) {
rv = SS_CW_FAILED_BY_PEER;
if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
+ print_st_err(device, os, ns, rv);
goto abort;
}
- wait_event(mdev->state_wait,
- (rv = _req_st_cond(mdev, mask, val)));
+ wait_event(device->state_wait,
+ (rv = _req_st_cond(device, mask, val)));
if (rv < SS_SUCCESS) {
if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
+ print_st_err(device, os, ns, rv);
goto abort;
}
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- ns = apply_mask_val(drbd_read_state(mdev), mask, val);
- rv = _drbd_set_state(mdev, ns, f, &done);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ ns = apply_mask_val(drbd_read_state(device), mask, val);
+ rv = _drbd_set_state(device, ns, f, &done);
} else {
- rv = _drbd_set_state(mdev, ns, f, &done);
+ rv = _drbd_set_state(device, ns, f, &done);
}
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
- D_ASSERT(current != mdev->tconn->worker.task);
+ D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
wait_for_completion(&done);
}
abort:
if (f & CS_SERIALIZE)
- mutex_unlock(mdev->state_mutex);
+ mutex_unlock(device->state_mutex);
return rv;
}
/**
* _drbd_request_state() - Request a state change (with flags)
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
* @f: flags
@@ -387,20 +399,20 @@ abort:
* flag, or when logging of failed state change requests is not desired.
*/
enum drbd_state_rv
-_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
+_drbd_request_state(struct drbd_device *device, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
{
enum drbd_state_rv rv;
- wait_event(mdev->state_wait,
- (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
+ wait_event(device->state_wait,
+ (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE);
return rv;
}
-static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
+static void print_st(struct drbd_device *device, char *name, union drbd_state ns)
{
- dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
+ drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
name,
drbd_conn_str(ns.conn),
drbd_role_str(ns.role),
@@ -416,14 +428,14 @@ static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
);
}
-void print_st_err(struct drbd_conf *mdev, union drbd_state os,
+void print_st_err(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum drbd_state_rv err)
{
if (err == SS_IN_TRANSIENT_STATE)
return;
- dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
- print_st(mdev, " state", os);
- print_st(mdev, "wanted", ns);
+ drbd_err(device, "State change failed: %s\n", drbd_set_st_err_str(err));
+ print_st(device, " state", os);
+ print_st(device, "wanted", ns);
}
static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
@@ -457,7 +469,7 @@ static long print_state_change(char *pb, union drbd_state os, union drbd_state n
return pbp - pb;
}
-static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns,
+static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os, union drbd_state ns,
enum chg_state_flags flags)
{
char pb[300];
@@ -479,10 +491,10 @@ static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, un
ns.user_isp);
if (pbp != pb)
- dev_info(DEV, "%s\n", pb);
+ drbd_info(device, "%s\n", pb);
}
-static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns,
+static void conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns,
enum chg_state_flags flags)
{
char pb[300];
@@ -496,17 +508,17 @@ static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os,
is_susp(ns));
if (pbp != pb)
- conn_info(tconn, "%s\n", pb);
+ drbd_info(connection, "%s\n", pb);
}
/**
* is_valid_state() - Returns an SS_ error code if ns is not valid
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @ns: State to consider.
*/
static enum drbd_state_rv
-is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
+is_valid_state(struct drbd_device *device, union drbd_state ns)
{
/* See drbd_state_sw_errors in drbd_strings.c */
@@ -516,24 +528,24 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
rcu_read_lock();
fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ fp = rcu_dereference(device->ldev->disk_conf)->fencing;
+ put_ldev(device);
}
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
if (nc) {
if (!nc->two_primaries && ns.role == R_PRIMARY) {
if (ns.peer == R_PRIMARY)
rv = SS_TWO_PRIMARIES;
- else if (conn_highest_peer(mdev->tconn) == R_PRIMARY)
+ else if (conn_highest_peer(first_peer_device(device)->connection) == R_PRIMARY)
rv = SS_O_VOL_PEER_PRI;
}
}
if (rv <= 0)
/* already found a reason to abort */;
- else if (ns.role == R_SECONDARY && mdev->open_cnt)
+ else if (ns.role == R_SECONDARY && device->open_cnt)
rv = SS_DEVICE_IN_USE;
else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
@@ -567,7 +579,7 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
rv = SS_NO_VERIFY_ALG;
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
- mdev->tconn->agreed_pro_version < 88)
+ first_peer_device(device)->connection->agreed_pro_version < 88)
rv = SS_NOT_SUPPORTED;
else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
@@ -589,12 +601,12 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
* is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
* This function limits state transitions that may be declined by DRBD. I.e.
* user requests (aka soft transitions).
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @ns: new state.
* @os: old state.
*/
static enum drbd_state_rv
-is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_tconn *tconn)
+is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_connection *connection)
{
enum drbd_state_rv rv = SS_SUCCESS;
@@ -622,7 +634,7 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_t
/* While establishing a connection only allow cstate to change.
Delay/refuse role changes, detach attach etc... */
- if (test_bit(STATE_SENT, &tconn->flags) &&
+ if (test_bit(STATE_SENT, &connection->flags) &&
!(os.conn == C_WF_REPORT_PARAMS ||
(ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
rv = SS_IN_TRANSIENT_STATE;
@@ -703,7 +715,7 @@ is_valid_transition(union drbd_state os, union drbd_state ns)
return rv;
}
-static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
+static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_state_warnings warn)
{
static const char *msg_table[] = {
[NO_WARNING] = "",
@@ -715,12 +727,12 @@ static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_
};
if (warn != NO_WARNING)
- dev_warn(DEV, "%s\n", msg_table[warn]);
+ drbd_warn(device, "%s\n", msg_table[warn]);
}
/**
* sanitize_state() - Resolves implicitly necessary additional changes to a state transition
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @os: old state.
* @ns: new state.
* @warn_sync_abort:
@@ -728,7 +740,7 @@ static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_
* When we loose connection, we have to set the state of the peers disk (pdsk)
* to D_UNKNOWN. This rule and many more along those lines are in this function.
*/
-static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
+static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns,
enum sanitize_state_warnings *warn)
{
enum drbd_fencing_p fp;
@@ -738,11 +750,11 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
*warn = NO_WARNING;
fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
+ if (get_ldev(device)) {
rcu_read_lock();
- fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+ fp = rcu_dereference(device->ldev->disk_conf)->fencing;
rcu_read_unlock();
- put_ldev(mdev);
+ put_ldev(device);
}
/* Implications from connection to peer and peer_isp */
@@ -768,17 +780,17 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
/* Connection breaks down before we finished "Negotiating" */
if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
- get_ldev_if_state(mdev, D_NEGOTIATING)) {
- if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
- ns.disk = mdev->new_state_tmp.disk;
- ns.pdsk = mdev->new_state_tmp.pdsk;
+ get_ldev_if_state(device, D_NEGOTIATING)) {
+ if (device->ed_uuid == device->ldev->md.uuid[UI_CURRENT]) {
+ ns.disk = device->new_state_tmp.disk;
+ ns.pdsk = device->new_state_tmp.pdsk;
} else {
if (warn)
*warn = CONNECTION_LOST_NEGOTIATING;
ns.disk = D_DISKLESS;
ns.pdsk = D_UNKNOWN;
}
- put_ldev(mdev);
+ put_ldev(device);
}
/* D_CONSISTENT and D_OUTDATED vanish when we get connected */
@@ -873,7 +885,7 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
(ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
- if (mdev->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
+ if (device->resource->res_opts.on_no_data == OND_SUSPEND_IO &&
(ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
@@ -892,42 +904,42 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
return ns;
}
-void drbd_resume_al(struct drbd_conf *mdev)
+void drbd_resume_al(struct drbd_device *device)
{
- if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
- dev_info(DEV, "Resumed AL updates\n");
+ if (test_and_clear_bit(AL_SUSPENDED, &device->flags))
+ drbd_info(device, "Resumed AL updates\n");
}
/* helper for __drbd_set_state */
-static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
+static void set_ov_position(struct drbd_device *device, enum drbd_conns cs)
{
- if (mdev->tconn->agreed_pro_version < 90)
- mdev->ov_start_sector = 0;
- mdev->rs_total = drbd_bm_bits(mdev);
- mdev->ov_position = 0;
+ if (first_peer_device(device)->connection->agreed_pro_version < 90)
+ device->ov_start_sector = 0;
+ device->rs_total = drbd_bm_bits(device);
+ device->ov_position = 0;
if (cs == C_VERIFY_T) {
/* starting online verify from an arbitrary position
* does not fit well into the existing protocol.
* on C_VERIFY_T, we initialize ov_left and friends
* implicitly in receive_DataRequest once the
* first P_OV_REQUEST is received */
- mdev->ov_start_sector = ~(sector_t)0;
+ device->ov_start_sector = ~(sector_t)0;
} else {
- unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
- if (bit >= mdev->rs_total) {
- mdev->ov_start_sector =
- BM_BIT_TO_SECT(mdev->rs_total - 1);
- mdev->rs_total = 1;
+ unsigned long bit = BM_SECT_TO_BIT(device->ov_start_sector);
+ if (bit >= device->rs_total) {
+ device->ov_start_sector =
+ BM_BIT_TO_SECT(device->rs_total - 1);
+ device->rs_total = 1;
} else
- mdev->rs_total -= bit;
- mdev->ov_position = mdev->ov_start_sector;
+ device->rs_total -= bit;
+ device->ov_position = device->ov_start_sector;
}
- mdev->ov_left = mdev->rs_total;
+ device->ov_left = device->rs_total;
}
/**
* __drbd_set_state() - Set a new DRBD state
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @ns: new state.
* @flags: Flags
* @done: Optional completion, that will get completed after the after_state_ch() finished
@@ -935,7 +947,7 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
* Caller needs to hold req_lock, and global_state_lock. Do not call directly.
*/
enum drbd_state_rv
-__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
+__drbd_set_state(struct drbd_device *device, union drbd_state ns,
enum chg_state_flags flags, struct completion *done)
{
union drbd_state os;
@@ -944,9 +956,9 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
struct after_state_chg_work *ascw;
bool did_remote, should_do_remote;
- os = drbd_read_state(mdev);
+ os = drbd_read_state(device);
- ns = sanitize_state(mdev, ns, &ssw);
+ ns = sanitize_state(device, ns, &ssw);
if (ns.i == os.i)
return SS_NOTHING_TO_DO;
@@ -958,32 +970,33 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
/* pre-state-change checks ; only look at ns */
/* See drbd_state_sw_errors in drbd_strings.c */
- rv = is_valid_state(mdev, ns);
+ rv = is_valid_state(device, ns);
if (rv < SS_SUCCESS) {
/* If the old state was illegal as well, then let
this happen...*/
- if (is_valid_state(mdev, os) == rv)
- rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ if (is_valid_state(device, os) == rv)
+ rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
} else
- rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
}
if (rv < SS_SUCCESS) {
if (flags & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
+ print_st_err(device, os, ns, rv);
return rv;
}
- print_sanitize_warnings(mdev, ssw);
+ print_sanitize_warnings(device, ssw);
- drbd_pr_state_change(mdev, os, ns, flags);
+ drbd_pr_state_change(device, os, ns, flags);
/* Display changes to the susp* flags that where caused by the call to
sanitize_state(). Only display it here if we where not called from
_conn_request_state() */
if (!(flags & CS_DC_SUSP))
- conn_pr_state_change(mdev->tconn, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP);
+ conn_pr_state_change(first_peer_device(device)->connection, os, ns,
+ (flags & ~CS_DC_MASK) | CS_DC_SUSP);
/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
* on the ldev here, to be sure the transition -> D_DISKLESS resp.
@@ -991,55 +1004,55 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
* after_state_ch works run, where we put_ldev again. */
if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
(os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
- atomic_inc(&mdev->local_cnt);
+ atomic_inc(&device->local_cnt);
- did_remote = drbd_should_do_remote(mdev->state);
- mdev->state.i = ns.i;
- should_do_remote = drbd_should_do_remote(mdev->state);
- mdev->tconn->susp = ns.susp;
- mdev->tconn->susp_nod = ns.susp_nod;
- mdev->tconn->susp_fen = ns.susp_fen;
+ did_remote = drbd_should_do_remote(device->state);
+ device->state.i = ns.i;
+ should_do_remote = drbd_should_do_remote(device->state);
+ device->resource->susp = ns.susp;
+ device->resource->susp_nod = ns.susp_nod;
+ device->resource->susp_fen = ns.susp_fen;
/* put replicated vs not-replicated requests in seperate epochs */
if (did_remote != should_do_remote)
- start_new_tl_epoch(mdev->tconn);
+ start_new_tl_epoch(first_peer_device(device)->connection);
if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
- drbd_print_uuids(mdev, "attached to UUIDs");
+ drbd_print_uuids(device, "attached to UUIDs");
/* Wake up role changes, that were delayed because of connection establishing */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
- no_peer_wf_report_params(mdev->tconn))
- clear_bit(STATE_SENT, &mdev->tconn->flags);
+ no_peer_wf_report_params(first_peer_device(device)->connection))
+ clear_bit(STATE_SENT, &first_peer_device(device)->connection->flags);
- wake_up(&mdev->misc_wait);
- wake_up(&mdev->state_wait);
- wake_up(&mdev->tconn->ping_wait);
+ wake_up(&device->misc_wait);
+ wake_up(&device->state_wait);
+ wake_up(&first_peer_device(device)->connection->ping_wait);
/* Aborted verify run, or we reached the stop sector.
* Log the last position, unless end-of-device. */
if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
ns.conn <= C_CONNECTED) {
- mdev->ov_start_sector =
- BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
- if (mdev->ov_left)
- dev_info(DEV, "Online Verify reached sector %llu\n",
- (unsigned long long)mdev->ov_start_sector);
+ device->ov_start_sector =
+ BM_BIT_TO_SECT(drbd_bm_bits(device) - device->ov_left);
+ if (device->ov_left)
+ drbd_info(device, "Online Verify reached sector %llu\n",
+ (unsigned long long)device->ov_start_sector);
}
if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
(ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
- dev_info(DEV, "Syncer continues.\n");
- mdev->rs_paused += (long)jiffies
- -(long)mdev->rs_mark_time[mdev->rs_last_mark];
+ drbd_info(device, "Syncer continues.\n");
+ device->rs_paused += (long)jiffies
+ -(long)device->rs_mark_time[device->rs_last_mark];
if (ns.conn == C_SYNC_TARGET)
- mod_timer(&mdev->resync_timer, jiffies);
+ mod_timer(&device->resync_timer, jiffies);
}
if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
(ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
- dev_info(DEV, "Resync suspended\n");
- mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
+ drbd_info(device, "Resync suspended\n");
+ device->rs_mark_time[device->rs_last_mark] = jiffies;
}
if (os.conn == C_CONNECTED &&
@@ -1047,77 +1060,77 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
unsigned long now = jiffies;
int i;
- set_ov_position(mdev, ns.conn);
- mdev->rs_start = now;
- mdev->rs_last_events = 0;
- mdev->rs_last_sect_ev = 0;
- mdev->ov_last_oos_size = 0;
- mdev->ov_last_oos_start = 0;
+ set_ov_position(device, ns.conn);
+ device->rs_start = now;
+ device->rs_last_events = 0;
+ device->rs_last_sect_ev = 0;
+ device->ov_last_oos_size = 0;
+ device->ov_last_oos_start = 0;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = mdev->ov_left;
- mdev->rs_mark_time[i] = now;
+ device->rs_mark_left[i] = device->ov_left;
+ device->rs_mark_time[i] = now;
}
- drbd_rs_controller_reset(mdev);
+ drbd_rs_controller_reset(device);
if (ns.conn == C_VERIFY_S) {
- dev_info(DEV, "Starting Online Verify from sector %llu\n",
- (unsigned long long)mdev->ov_position);
- mod_timer(&mdev->resync_timer, jiffies);
+ drbd_info(device, "Starting Online Verify from sector %llu\n",
+ (unsigned long long)device->ov_position);
+ mod_timer(&device->resync_timer, jiffies);
}
}
- if (get_ldev(mdev)) {
- u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
+ if (get_ldev(device)) {
+ u32 mdf = device->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
mdf &= ~MDF_AL_CLEAN;
- if (test_bit(CRASHED_PRIMARY, &mdev->flags))
+ if (test_bit(CRASHED_PRIMARY, &device->flags))
mdf |= MDF_CRASHED_PRIMARY;
- if (mdev->state.role == R_PRIMARY ||
- (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
+ if (device->state.role == R_PRIMARY ||
+ (device->state.pdsk < D_INCONSISTENT && device->state.peer == R_PRIMARY))
mdf |= MDF_PRIMARY_IND;
- if (mdev->state.conn > C_WF_REPORT_PARAMS)
+ if (device->state.conn > C_WF_REPORT_PARAMS)
mdf |= MDF_CONNECTED_IND;
- if (mdev->state.disk > D_INCONSISTENT)
+ if (device->state.disk > D_INCONSISTENT)
mdf |= MDF_CONSISTENT;
- if (mdev->state.disk > D_OUTDATED)
+ if (device->state.disk > D_OUTDATED)
mdf |= MDF_WAS_UP_TO_DATE;
- if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
+ if (device->state.pdsk <= D_OUTDATED && device->state.pdsk >= D_INCONSISTENT)
mdf |= MDF_PEER_OUT_DATED;
- if (mdf != mdev->ldev->md.flags) {
- mdev->ldev->md.flags = mdf;
- drbd_md_mark_dirty(mdev);
+ if (mdf != device->ldev->md.flags) {
+ device->ldev->md.flags = mdf;
+ drbd_md_mark_dirty(device);
}
if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
- drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
- put_ldev(mdev);
+ drbd_set_ed_uuid(device, device->ldev->md.uuid[UI_CURRENT]);
+ put_ldev(device);
}
/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
- set_bit(CONSIDER_RESYNC, &mdev->flags);
+ set_bit(CONSIDER_RESYNC, &device->flags);
/* Receiver should clean up itself */
if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
- drbd_thread_stop_nowait(&mdev->tconn->receiver);
+ drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver);
/* Now the receiver finished cleaning up itself, it should die */
if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
- drbd_thread_stop_nowait(&mdev->tconn->receiver);
+ drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver);
/* Upon network failure, we need to restart the receiver. */
if (os.conn > C_WF_CONNECTION &&
ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
- drbd_thread_restart_nowait(&mdev->tconn->receiver);
+ drbd_thread_restart_nowait(&first_peer_device(device)->connection->receiver);
/* Resume AL writing if we get a connection */
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
- drbd_resume_al(mdev);
- mdev->tconn->connect_cnt++;
+ drbd_resume_al(device);
+ first_peer_device(device)->connection->connect_cnt++;
}
/* remember last attach time so request_timer_fn() won't
@@ -1125,7 +1138,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
* previously frozen IO */
if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
ns.disk > D_NEGOTIATING)
- mdev->last_reattach_jif = jiffies;
+ device->last_reattach_jif = jiffies;
ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
if (ascw) {
@@ -1133,11 +1146,12 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
ascw->ns = ns;
ascw->flags = flags;
ascw->w.cb = w_after_state_ch;
- ascw->w.mdev = mdev;
+ ascw->device = device;
ascw->done = done;
- drbd_queue_work(&mdev->tconn->sender_work, &ascw->w);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &ascw->w);
} else {
- dev_err(DEV, "Could not kmalloc an ascw\n");
+ drbd_err(device, "Could not kmalloc an ascw\n");
}
return rv;
@@ -1147,66 +1161,65 @@ static int w_after_state_ch(struct drbd_work *w, int unused)
{
struct after_state_chg_work *ascw =
container_of(w, struct after_state_chg_work, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device = ascw->device;
- after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
- if (ascw->flags & CS_WAIT_COMPLETE) {
- D_ASSERT(ascw->done != NULL);
+ after_state_ch(device, ascw->os, ascw->ns, ascw->flags);
+ if (ascw->flags & CS_WAIT_COMPLETE)
complete(ascw->done);
- }
kfree(ascw);
return 0;
}
-static void abw_start_sync(struct drbd_conf *mdev, int rv)
+static void abw_start_sync(struct drbd_device *device, int rv)
{
if (rv) {
- dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
- _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
+ drbd_err(device, "Writing the bitmap failed not starting resync.\n");
+ _drbd_request_state(device, NS(conn, C_CONNECTED), CS_VERBOSE);
return;
}
- switch (mdev->state.conn) {
+ switch (device->state.conn) {
case C_STARTING_SYNC_T:
- _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
+ _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
break;
case C_STARTING_SYNC_S:
- drbd_start_resync(mdev, C_SYNC_SOURCE);
+ drbd_start_resync(device, C_SYNC_SOURCE);
break;
}
}
-int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
+int drbd_bitmap_io_from_worker(struct drbd_device *device,
+ int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags)
{
int rv;
- D_ASSERT(current == mdev->tconn->worker.task);
+ D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
- /* open coded non-blocking drbd_suspend_io(mdev); */
- set_bit(SUSPEND_IO, &mdev->flags);
+ /* open coded non-blocking drbd_suspend_io(device); */
+ set_bit(SUSPEND_IO, &device->flags);
- drbd_bm_lock(mdev, why, flags);
- rv = io_fn(mdev);
- drbd_bm_unlock(mdev);
+ drbd_bm_lock(device, why, flags);
+ rv = io_fn(device);
+ drbd_bm_unlock(device);
- drbd_resume_io(mdev);
+ drbd_resume_io(device);
return rv;
}
/**
* after_state_ch() - Perform after state change actions that may sleep
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @os: old state.
* @ns: new state.
* @flags: Flags
*/
-static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+static void after_state_ch(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum chg_state_flags flags)
{
+ struct drbd_resource *resource = device->resource;
struct sib_info sib;
sib.sib_reason = SIB_STATE_CHANGE;
@@ -1214,63 +1227,63 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
sib.ns = ns;
if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
- clear_bit(CRASHED_PRIMARY, &mdev->flags);
- if (mdev->p_uuid)
- mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
+ clear_bit(CRASHED_PRIMARY, &device->flags);
+ if (device->p_uuid)
+ device->p_uuid[UI_FLAGS] &= ~((u64)2);
}
/* Inform userspace about the change... */
- drbd_bcast_event(mdev, &sib);
+ drbd_bcast_event(device, &sib);
if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
(ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
- drbd_khelper(mdev, "pri-on-incon-degr");
+ drbd_khelper(device, "pri-on-incon-degr");
/* Here we have the actions that are performed after a
state change. This function might sleep */
if (ns.susp_nod) {
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
enum drbd_req_event what = NOTHING;
- spin_lock_irq(&tconn->req_lock);
- if (os.conn < C_CONNECTED && conn_lowest_conn(tconn) >= C_CONNECTED)
+ spin_lock_irq(&device->resource->req_lock);
+ if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
what = RESEND;
if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
- conn_lowest_disk(tconn) > D_NEGOTIATING)
+ conn_lowest_disk(connection) > D_NEGOTIATING)
what = RESTART_FROZEN_DISK_IO;
- if (tconn->susp_nod && what != NOTHING) {
- _tl_restart(tconn, what);
- _conn_request_state(tconn,
+ if (resource->susp_nod && what != NOTHING) {
+ _tl_restart(connection, what);
+ _conn_request_state(connection,
(union drbd_state) { { .susp_nod = 1 } },
(union drbd_state) { { .susp_nod = 0 } },
CS_VERBOSE);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
}
if (ns.susp_fen) {
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
- spin_lock_irq(&tconn->req_lock);
- if (tconn->susp_fen && conn_lowest_conn(tconn) >= C_CONNECTED) {
+ spin_lock_irq(&device->resource->req_lock);
+ if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
/* case2: The connection was established again: */
- struct drbd_conf *odev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, odev, vnr)
- clear_bit(NEW_CUR_UUID, &odev->flags);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ clear_bit(NEW_CUR_UUID, &peer_device->device->flags);
rcu_read_unlock();
- _tl_restart(tconn, RESEND);
- _conn_request_state(tconn,
+ _tl_restart(connection, RESEND);
+ _conn_request_state(connection,
(union drbd_state) { { .susp_fen = 1 } },
(union drbd_state) { { .susp_fen = 0 } },
CS_VERBOSE);
}
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
}
/* Became sync source. With protocol >= 96, we still need to send out
@@ -1279,9 +1292,9 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* which is unexpected. */
if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
(ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
- mdev->tconn->agreed_pro_version >= 96 && get_ldev(mdev)) {
- drbd_gen_and_send_sync_uuid(mdev);
- put_ldev(mdev);
+ first_peer_device(device)->connection->agreed_pro_version >= 96 && get_ldev(device)) {
+ drbd_gen_and_send_sync_uuid(first_peer_device(device));
+ put_ldev(device);
}
/* Do not change the order of the if above and the two below... */
@@ -1289,20 +1302,20 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
ns.pdsk > D_DISKLESS && ns.pdsk != D_UNKNOWN) { /* attach on the peer */
/* we probably will start a resync soon.
* make sure those things are properly reset. */
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
- drbd_rs_cancel_all(mdev);
+ device->rs_total = 0;
+ device->rs_failed = 0;
+ atomic_set(&device->rs_pending_cnt, 0);
+ drbd_rs_cancel_all(device);
- drbd_send_uuids(mdev);
- drbd_send_state(mdev, ns);
+ drbd_send_uuids(first_peer_device(device));
+ drbd_send_state(first_peer_device(device), ns);
}
/* No point in queuing send_bitmap if we don't have a connection
* anymore, so check also the _current_ state, not only the new state
* at the time this work was queued. */
if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
- mdev->state.conn == C_WF_BITMAP_S)
- drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
+ device->state.conn == C_WF_BITMAP_S)
+ drbd_queue_bitmap_io(device, &drbd_send_bitmap, NULL,
"send_bitmap (WFBitMapS)",
BM_LOCKED_TEST_ALLOWED);
@@ -1313,80 +1326,80 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
&& (ns.pdsk < D_INCONSISTENT ||
ns.pdsk == D_UNKNOWN ||
ns.pdsk == D_OUTDATED)) {
- if (get_ldev(mdev)) {
+ if (get_ldev(device)) {
if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- if (drbd_suspended(mdev)) {
- set_bit(NEW_CUR_UUID, &mdev->flags);
+ device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
+ if (drbd_suspended(device)) {
+ set_bit(NEW_CUR_UUID, &device->flags);
} else {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
+ drbd_uuid_new_current(device);
+ drbd_send_uuids(first_peer_device(device));
}
}
- put_ldev(mdev);
+ put_ldev(device);
}
}
- if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
+ if (ns.pdsk < D_INCONSISTENT && get_ldev(device)) {
if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
+ device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
+ drbd_uuid_new_current(device);
+ drbd_send_uuids(first_peer_device(device));
}
/* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
/* We may still be Primary ourselves.
* No harm done if the bitmap still changes,
* redirtied pages will follow later. */
- drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ drbd_bitmap_io_from_worker(device, &drbd_bm_write,
"demote diskless peer", BM_LOCKED_SET_ALLOWED);
- put_ldev(mdev);
+ put_ldev(device);
}
/* Write out all changed bits on demote.
* Though, no need to da that just yet
* if there is a resync going on still */
if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
- mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
+ device->state.conn <= C_CONNECTED && get_ldev(device)) {
/* No changes to the bitmap expected this time, so assert that,
* even though no harm was done if it did change. */
- drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ drbd_bitmap_io_from_worker(device, &drbd_bm_write,
"demote", BM_LOCKED_TEST_ALLOWED);
- put_ldev(mdev);
+ put_ldev(device);
}
/* Last part of the attaching process ... */
if (ns.conn >= C_CONNECTED &&
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
- drbd_send_sizes(mdev, 0, 0); /* to start sync... */
- drbd_send_uuids(mdev);
- drbd_send_state(mdev, ns);
+ drbd_send_sizes(first_peer_device(device), 0, 0); /* to start sync... */
+ drbd_send_uuids(first_peer_device(device));
+ drbd_send_state(first_peer_device(device), ns);
}
/* We want to pause/continue resync, tell peer. */
if (ns.conn >= C_CONNECTED &&
((os.aftr_isp != ns.aftr_isp) ||
(os.user_isp != ns.user_isp)))
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
/* In case one of the isp bits got set, suspend other devices. */
if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
(ns.aftr_isp || ns.peer_isp || ns.user_isp))
- suspend_other_sg(mdev);
+ suspend_other_sg(device);
/* Make sure the peer gets informed about eventual state
changes (ISP bits) while we were in WFReportParams. */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
/* We are in the progress to start a full sync... */
if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
(os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
/* no other bitmap changes expected during this phase */
- drbd_queue_bitmap_io(mdev,
+ drbd_queue_bitmap_io(device,
&drbd_bmio_set_n_write, &abw_start_sync,
"set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
@@ -1399,15 +1412,15 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* our cleanup here with the transition to D_DISKLESS.
* But is is still not save to dreference ldev here, since
* we might come from an failed Attach before ldev was set. */
- if (mdev->ldev) {
+ if (device->ldev) {
rcu_read_lock();
- eh = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
+ eh = rcu_dereference(device->ldev->disk_conf)->on_io_error;
rcu_read_unlock();
- was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+ was_io_error = test_and_clear_bit(WAS_IO_ERROR, &device->flags);
if (was_io_error && eh == EP_CALL_HELPER)
- drbd_khelper(mdev, "local-io-error");
+ drbd_khelper(device, "local-io-error");
/* Immediately allow completion of all application IO,
* that waits for completion from the local disk,
@@ -1422,76 +1435,76 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* So aborting local requests may cause crashes,
* or even worse, silent data corruption.
*/
- if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
- tl_abort_disk_io(mdev);
+ if (test_and_clear_bit(FORCE_DETACH, &device->flags))
+ tl_abort_disk_io(device);
/* current state still has to be D_FAILED,
* there is only one way out: to D_DISKLESS,
* and that may only happen after our put_ldev below. */
- if (mdev->state.disk != D_FAILED)
- dev_err(DEV,
+ if (device->state.disk != D_FAILED)
+ drbd_err(device,
"ASSERT FAILED: disk is %s during detach\n",
- drbd_disk_str(mdev->state.disk));
+ drbd_disk_str(device->state.disk));
if (ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
- drbd_rs_cancel_all(mdev);
+ drbd_rs_cancel_all(device);
/* In case we want to get something to stable storage still,
* this may be the last chance.
* Following put_ldev may transition to D_DISKLESS. */
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
}
- put_ldev(mdev);
+ put_ldev(device);
}
- /* second half of local IO error, failure to attach,
- * or administrative detach,
- * after local_cnt references have reached zero again */
- if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
- /* We must still be diskless,
- * re-attach has to be serialized with this! */
- if (mdev->state.disk != D_DISKLESS)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s while going diskless\n",
- drbd_disk_str(mdev->state.disk));
+ /* second half of local IO error, failure to attach,
+ * or administrative detach,
+ * after local_cnt references have reached zero again */
+ if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
+ /* We must still be diskless,
+ * re-attach has to be serialized with this! */
+ if (device->state.disk != D_DISKLESS)
+ drbd_err(device,
+ "ASSERT FAILED: disk is %s while going diskless\n",
+ drbd_disk_str(device->state.disk));
if (ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
/* corresponding get_ldev in __drbd_set_state
* this may finally trigger drbd_ldev_destroy. */
- put_ldev(mdev);
+ put_ldev(device);
}
/* Notify peer that I had a local IO error, and did not detached.. */
if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
/* Disks got bigger while they were detached */
if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
- test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
+ test_and_clear_bit(RESYNC_AFTER_NEG, &device->flags)) {
if (ns.conn == C_CONNECTED)
- resync_after_online_grow(mdev);
+ resync_after_online_grow(device);
}
/* A resync finished or aborted, wake paused devices... */
if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
(os.peer_isp && !ns.peer_isp) ||
(os.user_isp && !ns.user_isp))
- resume_next_sg(mdev);
+ resume_next_sg(device);
/* sync target done with resync. Explicitly notify peer, even though
* it should (at least for non-empty resyncs) already know itself. */
if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
- drbd_send_state(mdev, ns);
+ drbd_send_state(first_peer_device(device), ns);
/* Verify finished, or reached stop sector. Peer did not know about
* the stop sector, and we may even have changed the stop sector during
* verify to interrupt/stop early. Send the new state. */
if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED
- && verify_can_do_stop_sector(mdev))
- drbd_send_state(mdev, ns);
+ && verify_can_do_stop_sector(device))
+ drbd_send_state(first_peer_device(device), ns);
/* This triggers bitmap writeout of potentially still unwritten pages
* if the resync finished cleanly, or aborted because of peer disk
@@ -1500,56 +1513,57 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* any bitmap writeout anymore.
* No harm done if some bits change during this phase.
*/
- if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
- drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
+ if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(device)) {
+ drbd_queue_bitmap_io(device, &drbd_bm_write_copy_pages, NULL,
"write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
- put_ldev(mdev);
+ put_ldev(device);
}
if (ns.disk == D_DISKLESS &&
ns.conn == C_STANDALONE &&
ns.role == R_SECONDARY) {
if (os.aftr_isp != ns.aftr_isp)
- resume_next_sg(mdev);
+ resume_next_sg(device);
}
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
}
struct after_conn_state_chg_work {
struct drbd_work w;
enum drbd_conns oc;
union drbd_state ns_min;
- union drbd_state ns_max; /* new, max state, over all mdevs */
+ union drbd_state ns_max; /* new, max state, over all devices */
enum chg_state_flags flags;
+ struct drbd_connection *connection;
};
static int w_after_conn_state_ch(struct drbd_work *w, int unused)
{
struct after_conn_state_chg_work *acscw =
container_of(w, struct after_conn_state_chg_work, w);
- struct drbd_tconn *tconn = w->tconn;
+ struct drbd_connection *connection = acscw->connection;
enum drbd_conns oc = acscw->oc;
union drbd_state ns_max = acscw->ns_max;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
kfree(acscw);
/* Upon network configuration, we need to start the receiver */
if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
- drbd_thread_start(&tconn->receiver);
+ drbd_thread_start(&connection->receiver);
if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
struct net_conf *old_conf;
- mutex_lock(&tconn->conf_update);
- old_conf = tconn->net_conf;
- tconn->my_addr_len = 0;
- tconn->peer_addr_len = 0;
- rcu_assign_pointer(tconn->net_conf, NULL);
- conn_free_crypto(tconn);
- mutex_unlock(&tconn->conf_update);
+ mutex_lock(&connection->resource->conf_update);
+ old_conf = connection->net_conf;
+ connection->my_addr_len = 0;
+ connection->peer_addr_len = 0;
+ rcu_assign_pointer(connection->net_conf, NULL);
+ conn_free_crypto(connection);
+ mutex_unlock(&connection->resource->conf_update);
synchronize_rcu();
kfree(old_conf);
@@ -1559,45 +1573,47 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
/* case1: The outdate peer handler is successful: */
if (ns_max.pdsk <= D_OUTDATED) {
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
- drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ if (test_bit(NEW_CUR_UUID, &device->flags)) {
+ drbd_uuid_new_current(device);
+ clear_bit(NEW_CUR_UUID, &device->flags);
}
}
rcu_read_unlock();
- spin_lock_irq(&tconn->req_lock);
- _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
- _conn_request_state(tconn,
+ spin_lock_irq(&connection->resource->req_lock);
+ _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
+ _conn_request_state(connection,
(union drbd_state) { { .susp_fen = 1 } },
(union drbd_state) { { .susp_fen = 0 } },
CS_VERBOSE);
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
}
}
- kref_put(&tconn->kref, &conn_destroy);
+ kref_put(&connection->kref, drbd_destroy_connection);
- conn_md_sync(tconn);
+ conn_md_sync(connection);
return 0;
}
-void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf)
+void conn_old_common_state(struct drbd_connection *connection, union drbd_state *pcs, enum chg_state_flags *pf)
{
enum chg_state_flags flags = ~0;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr, first_vol = 1;
union drbd_dev_state os, cs = {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
- .conn = tconn->cstate,
+ .conn = connection->cstate,
.disk = D_DISKLESS,
.pdsk = D_UNKNOWN,
} };
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- os = mdev->state;
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ os = device->state;
if (first_vol) {
cs = os;
@@ -1628,18 +1644,19 @@ void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum
}
static enum drbd_state_rv
-conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+conn_is_valid_transition(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags)
{
enum drbd_state_rv rv = SS_SUCCESS;
union drbd_state ns, os;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
int vnr;
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- os = drbd_read_state(mdev);
- ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ os = drbd_read_state(device);
+ ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
ns.disk = os.disk;
@@ -1648,30 +1665,29 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
continue;
rv = is_valid_transition(os, ns);
- if (rv < SS_SUCCESS)
- break;
- if (!(flags & CS_HARD)) {
- rv = is_valid_state(mdev, ns);
+ if (rv >= SS_SUCCESS && !(flags & CS_HARD)) {
+ rv = is_valid_state(device, ns);
if (rv < SS_SUCCESS) {
- if (is_valid_state(mdev, os) == rv)
- rv = is_valid_soft_transition(os, ns, tconn);
+ if (is_valid_state(device, os) == rv)
+ rv = is_valid_soft_transition(os, ns, connection);
} else
- rv = is_valid_soft_transition(os, ns, tconn);
+ rv = is_valid_soft_transition(os, ns, connection);
}
- if (rv < SS_SUCCESS)
+
+ if (rv < SS_SUCCESS) {
+ if (flags & CS_VERBOSE)
+ print_st_err(device, os, ns, rv);
break;
+ }
}
rcu_read_unlock();
- if (rv < SS_SUCCESS && flags & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
-
return rv;
}
void
-conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+conn_set_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
{
union drbd_state ns, os, ns_max = { };
@@ -1682,7 +1698,7 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
.disk = D_MASK,
.pdsk = D_MASK
} };
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
enum drbd_state_rv rv;
int vnr, number_of_volumes = 0;
@@ -1690,27 +1706,28 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
/* remember last connect time so request_timer_fn() won't
* kill newly established sessions while we are still trying to thaw
* previously frozen IO */
- if (tconn->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
- tconn->last_reconnect_jif = jiffies;
+ if (connection->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
+ connection->last_reconnect_jif = jiffies;
- tconn->cstate = val.conn;
+ connection->cstate = val.conn;
}
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
number_of_volumes++;
- os = drbd_read_state(mdev);
+ os = drbd_read_state(device);
ns = apply_mask_val(os, mask, val);
- ns = sanitize_state(mdev, ns, NULL);
+ ns = sanitize_state(device, ns, NULL);
if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
ns.disk = os.disk;
- rv = __drbd_set_state(mdev, ns, flags, NULL);
+ rv = __drbd_set_state(device, ns, flags, NULL);
if (rv < SS_SUCCESS)
BUG();
- ns.i = mdev->state.i;
+ ns.i = device->state.i;
ns_max.role = max_role(ns.role, ns_max.role);
ns_max.peer = max_role(ns.peer, ns_max.peer);
ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn);
@@ -1735,39 +1752,39 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
} };
}
- ns_min.susp = ns_max.susp = tconn->susp;
- ns_min.susp_nod = ns_max.susp_nod = tconn->susp_nod;
- ns_min.susp_fen = ns_max.susp_fen = tconn->susp_fen;
+ ns_min.susp = ns_max.susp = connection->resource->susp;
+ ns_min.susp_nod = ns_max.susp_nod = connection->resource->susp_nod;
+ ns_min.susp_fen = ns_max.susp_fen = connection->resource->susp_fen;
*pns_min = ns_min;
*pns_max = ns_max;
}
static enum drbd_state_rv
-_conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
+_conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
{
enum drbd_state_rv rv;
- if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags))
+ if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags))
return SS_CW_SUCCESS;
- if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
+ if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags))
return SS_CW_FAILED_BY_PEER;
- rv = conn_is_valid_transition(tconn, mask, val, 0);
- if (rv == SS_SUCCESS && tconn->cstate == C_WF_REPORT_PARAMS)
+ rv = conn_is_valid_transition(connection, mask, val, 0);
+ if (rv == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS)
rv = SS_UNKNOWN_ERROR; /* continue waiting */
return rv;
}
enum drbd_state_rv
-_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags)
{
enum drbd_state_rv rv = SS_SUCCESS;
struct after_conn_state_chg_work *acscw;
- enum drbd_conns oc = tconn->cstate;
+ enum drbd_conns oc = connection->cstate;
union drbd_state ns_max, ns_min, os;
bool have_mutex = false;
@@ -1777,7 +1794,7 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
goto abort;
}
- rv = conn_is_valid_transition(tconn, mask, val, flags);
+ rv = conn_is_valid_transition(connection, mask, val, flags);
if (rv < SS_SUCCESS)
goto abort;
@@ -1787,38 +1804,38 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
/* This will be a cluster-wide state change.
* Need to give up the spinlock, grab the mutex,
* then send the state change request, ... */
- spin_unlock_irq(&tconn->req_lock);
- mutex_lock(&tconn->cstate_mutex);
+ spin_unlock_irq(&connection->resource->req_lock);
+ mutex_lock(&connection->cstate_mutex);
have_mutex = true;
- set_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
- if (conn_send_state_req(tconn, mask, val)) {
+ set_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
+ if (conn_send_state_req(connection, mask, val)) {
/* sending failed. */
- clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
rv = SS_CW_FAILED_BY_PEER;
/* need to re-aquire the spin lock, though */
goto abort_unlocked;
}
if (val.conn == C_DISCONNECTING)
- set_bit(DISCONNECT_SENT, &tconn->flags);
+ set_bit(DISCONNECT_SENT, &connection->flags);
/* ... and re-aquire the spinlock.
* If _conn_rq_cond() returned >= SS_SUCCESS, we must call
* conn_set_state() within the same spinlock. */
- spin_lock_irq(&tconn->req_lock);
- wait_event_lock_irq(tconn->ping_wait,
- (rv = _conn_rq_cond(tconn, mask, val)),
- tconn->req_lock);
- clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ spin_lock_irq(&connection->resource->req_lock);
+ wait_event_lock_irq(connection->ping_wait,
+ (rv = _conn_rq_cond(connection, mask, val)),
+ connection->resource->req_lock);
+ clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
if (rv < SS_SUCCESS)
goto abort;
}
- conn_old_common_state(tconn, &os, &flags);
+ conn_old_common_state(connection, &os, &flags);
flags |= CS_DC_SUSP;
- conn_set_state(tconn, mask, val, &ns_min, &ns_max, flags);
- conn_pr_state_change(tconn, os, ns_max, flags);
+ conn_set_state(connection, mask, val, &ns_min, &ns_max, flags);
+ conn_pr_state_change(connection, os, ns_max, flags);
acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
if (acscw) {
@@ -1827,39 +1844,39 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
acscw->ns_max = ns_max;
acscw->flags = flags;
acscw->w.cb = w_after_conn_state_ch;
- kref_get(&tconn->kref);
- acscw->w.tconn = tconn;
- drbd_queue_work(&tconn->sender_work, &acscw->w);
+ kref_get(&connection->kref);
+ acscw->connection = connection;
+ drbd_queue_work(&connection->sender_work, &acscw->w);
} else {
- conn_err(tconn, "Could not kmalloc an acscw\n");
+ drbd_err(connection, "Could not kmalloc an acscw\n");
}
abort:
if (have_mutex) {
/* mutex_unlock() "... must not be used in interrupt context.",
* so give up the spinlock, then re-aquire it */
- spin_unlock_irq(&tconn->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
abort_unlocked:
- mutex_unlock(&tconn->cstate_mutex);
- spin_lock_irq(&tconn->req_lock);
+ mutex_unlock(&connection->cstate_mutex);
+ spin_lock_irq(&connection->resource->req_lock);
}
if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
- conn_err(tconn, "State change failed: %s\n", drbd_set_st_err_str(rv));
- conn_err(tconn, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
- conn_err(tconn, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
+ drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
+ drbd_err(connection, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
+ drbd_err(connection, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
}
return rv;
}
enum drbd_state_rv
-conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags)
{
enum drbd_state_rv rv;
- spin_lock_irq(&tconn->req_lock);
- rv = _conn_request_state(tconn, mask, val, flags);
- spin_unlock_irq(&tconn->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
+ rv = _conn_request_state(connection, mask, val, flags);
+ spin_unlock_irq(&connection->resource->req_lock);
return rv;
}
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
index a3c361bbc4b6..cc41605ba21c 100644
--- a/drivers/block/drbd/drbd_state.h
+++ b/drivers/block/drbd/drbd_state.h
@@ -1,8 +1,8 @@
#ifndef DRBD_STATE_H
#define DRBD_STATE_H
-struct drbd_conf;
-struct drbd_tconn;
+struct drbd_device;
+struct drbd_connection;
/**
* DOC: DRBD State macros
@@ -107,36 +107,36 @@ union drbd_dev_state {
unsigned int i;
};
-extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
+extern enum drbd_state_rv drbd_change_state(struct drbd_device *device,
enum chg_state_flags f,
union drbd_state mask,
union drbd_state val);
-extern void drbd_force_state(struct drbd_conf *, union drbd_state,
+extern void drbd_force_state(struct drbd_device *, union drbd_state,
union drbd_state);
-extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
+extern enum drbd_state_rv _drbd_request_state(struct drbd_device *,
union drbd_state,
union drbd_state,
enum chg_state_flags);
-extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
+extern enum drbd_state_rv __drbd_set_state(struct drbd_device *, union drbd_state,
enum chg_state_flags,
struct completion *done);
-extern void print_st_err(struct drbd_conf *, union drbd_state,
+extern void print_st_err(struct drbd_device *, union drbd_state,
union drbd_state, int);
enum drbd_state_rv
-_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags);
enum drbd_state_rv
-conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags);
-extern void drbd_resume_al(struct drbd_conf *mdev);
-extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
+extern void drbd_resume_al(struct drbd_device *device);
+extern bool conn_all_vols_unconf(struct drbd_connection *connection);
/**
* drbd_request_state() - Reqest a state change
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
*
@@ -144,18 +144,18 @@ extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
* quite verbose in case the state change is not possible, and all those
* state changes are globally serialized.
*/
-static inline int drbd_request_state(struct drbd_conf *mdev,
+static inline int drbd_request_state(struct drbd_device *device,
union drbd_state mask,
union drbd_state val)
{
- return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
+ return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED);
}
-enum drbd_role conn_highest_role(struct drbd_tconn *tconn);
-enum drbd_role conn_highest_peer(struct drbd_tconn *tconn);
-enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn);
-enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn);
-enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn);
-enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn);
+enum drbd_role conn_highest_role(struct drbd_connection *connection);
+enum drbd_role conn_highest_peer(struct drbd_connection *connection);
+enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection);
+enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection);
+enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection);
+enum drbd_conns conn_lowest_conn(struct drbd_connection *connection);
#endif
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 58e08ff2b2ce..80b0f63c7075 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -24,6 +24,7 @@
*/
#include <linux/drbd.h>
+#include "drbd_strings.h"
static const char *drbd_conn_s_names[] = {
[C_STANDALONE] = "StandAlone",
diff --git a/drivers/block/drbd/drbd_strings.h b/drivers/block/drbd/drbd_strings.h
new file mode 100644
index 000000000000..f9923cc88afb
--- /dev/null
+++ b/drivers/block/drbd/drbd_strings.h
@@ -0,0 +1,9 @@
+#ifndef __DRBD_STRINGS_H
+#define __DRBD_STRINGS_H
+
+extern const char *drbd_conn_str(enum drbd_conns);
+extern const char *drbd_role_str(enum drbd_role);
+extern const char *drbd_disk_str(enum drbd_disk_state);
+extern const char *drbd_set_st_err_str(enum drbd_state_rv);
+
+#endif /* __DRBD_STRINGS_H */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 891c0ecaa292..2c4ce42c3657 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -21,7 +21,7 @@
along with drbd; see the file COPYING. If not, write to
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+*/
#include <linux/module.h>
#include <linux/drbd.h>
@@ -36,10 +36,11 @@
#include <linux/scatterlist.h>
#include "drbd_int.h"
+#include "drbd_protocol.h"
#include "drbd_req.h"
-static int w_make_ov_request(struct drbd_work *w, int cancel);
-
+static int make_ov_request(struct drbd_device *, int);
+static int make_resync_request(struct drbd_device *, int);
/* endio handlers:
* drbd_md_io_complete (defined here)
@@ -67,10 +68,10 @@ rwlock_t global_state_lock;
void drbd_md_io_complete(struct bio *bio, int error)
{
struct drbd_md_io *md_io;
- struct drbd_conf *mdev;
+ struct drbd_device *device;
md_io = (struct drbd_md_io *)bio->bi_private;
- mdev = container_of(md_io, struct drbd_conf, md_io);
+ device = container_of(md_io, struct drbd_device, md_io);
md_io->error = error;
@@ -83,35 +84,36 @@ void drbd_md_io_complete(struct bio *bio, int error)
* Make sure we first drop the reference, and only then signal
* completion, or we may (in drbd_al_read_log()) cycle so fast into the
* next drbd_md_sync_page_io(), that we trigger the
- * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there.
+ * ASSERT(atomic_read(&device->md_io_in_use) == 1) there.
*/
- drbd_md_put_buffer(mdev);
+ drbd_md_put_buffer(device);
md_io->done = 1;
- wake_up(&mdev->misc_wait);
+ wake_up(&device->misc_wait);
bio_put(bio);
- if (mdev->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */
- put_ldev(mdev);
+ if (device->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */
+ put_ldev(device);
}
/* reads on behalf of the partner,
* "submitted" by the receiver
*/
-void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
+static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
- struct drbd_conf *mdev = peer_req->w.mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- mdev->read_cnt += peer_req->i.size >> 9;
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ device->read_cnt += peer_req->i.size >> 9;
list_del(&peer_req->w.list);
- if (list_empty(&mdev->read_ee))
- wake_up(&mdev->ee_wait);
+ if (list_empty(&device->read_ee))
+ wake_up(&device->ee_wait);
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
- __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ __drbd_chk_io_error(device, DRBD_READ_ERROR);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
- drbd_queue_work(&mdev->tconn->sender_work, &peer_req->w);
- put_ldev(mdev);
+ drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w);
+ put_ldev(device);
}
/* writes on behalf of the partner, or resync writes,
@@ -119,7 +121,8 @@ void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(lo
static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
- struct drbd_conf *mdev = peer_req->w.mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
struct drbd_interval i;
int do_wake;
u64 block_id;
@@ -133,35 +136,35 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
block_id = peer_req->block_id;
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
- mdev->writ_cnt += peer_req->i.size >> 9;
- list_move_tail(&peer_req->w.list, &mdev->done_ee);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
+ device->writ_cnt += peer_req->i.size >> 9;
+ list_move_tail(&peer_req->w.list, &device->done_ee);
/*
* Do not remove from the write_requests tree here: we did not send the
* Ack yet and did not wake possibly waiting conflicting requests.
* Removed from the tree from "drbd_process_done_ee" within the
- * appropriate w.cb (e_end_block/e_end_resync_block) or from
+ * appropriate dw.cb (e_end_block/e_end_resync_block) or from
* _drbd_clear_done_ee.
*/
- do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
+ do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
- __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
if (block_id == ID_SYNCER)
- drbd_rs_complete_io(mdev, i.sector);
+ drbd_rs_complete_io(device, i.sector);
if (do_wake)
- wake_up(&mdev->ee_wait);
+ wake_up(&device->ee_wait);
if (do_al_complete_io)
- drbd_al_complete_io(mdev, &i);
+ drbd_al_complete_io(device, &i);
- wake_asender(mdev->tconn);
- put_ldev(mdev);
+ wake_asender(peer_device->connection);
+ put_ldev(device);
}
/* writes on behalf of the partner, or resync writes,
@@ -170,17 +173,17 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
void drbd_peer_request_endio(struct bio *bio, int error)
{
struct drbd_peer_request *peer_req = bio->bi_private;
- struct drbd_conf *mdev = peer_req->w.mdev;
+ struct drbd_device *device = peer_req->peer_device->device;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE;
if (error && __ratelimit(&drbd_ratelimit_state))
- dev_warn(DEV, "%s: error=%d s=%llus\n",
+ drbd_warn(device, "%s: error=%d s=%llus\n",
is_write ? "write" : "read", error,
(unsigned long long)peer_req->i.sector);
if (!error && !uptodate) {
if (__ratelimit(&drbd_ratelimit_state))
- dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
+ drbd_warn(device, "%s: setting error to -EIO s=%llus\n",
is_write ? "write" : "read",
(unsigned long long)peer_req->i.sector);
/* strange behavior of some lower level drivers...
@@ -207,13 +210,13 @@ void drbd_request_endio(struct bio *bio, int error)
{
unsigned long flags;
struct drbd_request *req = bio->bi_private;
- struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_device *device = req->device;
struct bio_and_error m;
enum drbd_req_event what;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
if (!error && !uptodate) {
- dev_warn(DEV, "p %s: setting error to -EIO\n",
+ drbd_warn(device, "p %s: setting error to -EIO\n",
bio_data_dir(bio) == WRITE ? "write" : "read");
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
@@ -252,7 +255,7 @@ void drbd_request_endio(struct bio *bio, int error)
*/
if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
if (__ratelimit(&drbd_ratelimit_state))
- dev_emerg(DEV, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
+ drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
if (!error)
panic("possible random memory corruption caused by delayed completion of aborted local request\n");
@@ -272,17 +275,16 @@ void drbd_request_endio(struct bio *bio, int error)
req->private_bio = ERR_PTR(error);
/* not req_mod(), we need irqsave here! */
- spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ spin_lock_irqsave(&device->resource->req_lock, flags);
__req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
- put_ldev(mdev);
+ spin_unlock_irqrestore(&device->resource->req_lock, flags);
+ put_ldev(device);
if (m.bio)
- complete_master_bio(mdev, &m);
+ complete_master_bio(device, &m);
}
-void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
- struct drbd_peer_request *peer_req, void *digest)
+void drbd_csum_ee(struct crypto_hash *tfm, struct drbd_peer_request *peer_req, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
@@ -309,12 +311,12 @@ void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
crypto_hash_final(&desc, digest);
}
-void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
+void drbd_csum_bio(struct crypto_hash *tfm, struct bio *bio, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
- struct bio_vec *bvec;
- int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
desc.tfm = tfm;
desc.flags = 0;
@@ -322,8 +324,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
sg_init_table(&sg, 1);
crypto_hash_init(&desc);
- bio_for_each_segment(bvec, bio, i) {
- sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
+ bio_for_each_segment(bvec, bio, iter) {
+ sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
crypto_hash_update(&desc, &sg, sg.length);
}
crypto_hash_final(&desc, digest);
@@ -333,7 +335,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
static int w_e_send_csum(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
int digest_size;
void *digest;
int err = 0;
@@ -344,89 +347,92 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
- digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
+ digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
sector_t sector = peer_req->i.sector;
unsigned int size = peer_req->i.size;
- drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
+ drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
/* Free peer_req and pages before send.
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_alloc_pages due to pp_in_use > max_buffers. */
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
peer_req = NULL;
- inc_rs_pending(mdev);
- err = drbd_send_drequest_csum(mdev, sector, size,
+ inc_rs_pending(device);
+ err = drbd_send_drequest_csum(peer_device, sector, size,
digest, digest_size,
P_CSUM_RS_REQUEST);
kfree(digest);
} else {
- dev_err(DEV, "kmalloc() of digest failed.\n");
+ drbd_err(device, "kmalloc() of digest failed.\n");
err = -ENOMEM;
}
out:
if (peer_req)
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
if (unlikely(err))
- dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
+ drbd_err(device, "drbd_send_drequest(..., csum) failed\n");
return err;
}
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
-static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
+static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size)
{
+ struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
- if (!get_ldev(mdev))
+ if (!get_ldev(device))
return -EIO;
- if (drbd_rs_should_slow_down(mdev, sector))
+ if (drbd_rs_should_slow_down(device, sector))
goto defer;
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
- peer_req = drbd_alloc_peer_req(mdev, ID_SYNCER /* unused */, sector,
+ peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector,
size, GFP_TRY);
if (!peer_req)
goto defer;
peer_req->w.cb = w_e_send_csum;
- spin_lock_irq(&mdev->tconn->req_lock);
- list_add(&peer_req->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
+ list_add(&peer_req->w.list, &device->read_ee);
+ spin_unlock_irq(&device->resource->req_lock);
- atomic_add(size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_peer_request(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
+ atomic_add(size >> 9, &device->rs_sect_ev);
+ if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
return 0;
/* If it failed because of ENOMEM, retry should help. If it failed
* because bio_add_page failed (probably broken lower level driver),
* retry may or may not help.
* If it does not, you may need to force disconnect. */
- spin_lock_irq(&mdev->tconn->req_lock);
+ spin_lock_irq(&device->resource->req_lock);
list_del(&peer_req->w.list);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ spin_unlock_irq(&device->resource->req_lock);
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
defer:
- put_ldev(mdev);
+ put_ldev(device);
return -EAGAIN;
}
int w_resync_timer(struct drbd_work *w, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
- switch (mdev->state.conn) {
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, resync_work);
+
+ switch (device->state.conn) {
case C_VERIFY_S:
- w_make_ov_request(w, cancel);
+ make_ov_request(device, cancel);
break;
case C_SYNC_TARGET:
- w_make_resync_request(w, cancel);
+ make_resync_request(device, cancel);
break;
}
@@ -435,10 +441,11 @@ int w_resync_timer(struct drbd_work *w, int cancel)
void resync_timer_fn(unsigned long data)
{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
+ struct drbd_device *device = (struct drbd_device *) data;
- if (list_empty(&mdev->resync_work.list))
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work);
+ if (list_empty(&device->resync_work.list))
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &device->resync_work);
}
static void fifo_set(struct fifo_buffer *fb, int value)
@@ -485,7 +492,7 @@ struct fifo_buffer *fifo_alloc(int fifo_size)
return fb;
}
-static int drbd_rs_controller(struct drbd_conf *mdev)
+static int drbd_rs_controller(struct drbd_device *device)
{
struct disk_conf *dc;
unsigned int sect_in; /* Number of sectors that came in since the last turn */
@@ -498,22 +505,22 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
int max_sect;
struct fifo_buffer *plan;
- sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
- mdev->rs_in_flight -= sect_in;
+ sect_in = atomic_xchg(&device->rs_sect_in, 0); /* Number of sectors that came in */
+ device->rs_in_flight -= sect_in;
- dc = rcu_dereference(mdev->ldev->disk_conf);
- plan = rcu_dereference(mdev->rs_plan_s);
+ dc = rcu_dereference(device->ldev->disk_conf);
+ plan = rcu_dereference(device->rs_plan_s);
steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
- if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
+ if (device->rs_in_flight + sect_in == 0) { /* At start of resync */
want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
} else { /* normal path */
want = dc->c_fill_target ? dc->c_fill_target :
sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10);
}
- correction = want - mdev->rs_in_flight - plan->total;
+ correction = want - device->rs_in_flight - plan->total;
/* Plan ahead */
cps = correction / steps;
@@ -533,25 +540,25 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
req_sect = max_sect;
/*
- dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
- sect_in, mdev->rs_in_flight, want, correction,
- steps, cps, mdev->rs_planed, curr_corr, req_sect);
+ drbd_warn(device, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
+ sect_in, device->rs_in_flight, want, correction,
+ steps, cps, device->rs_planed, curr_corr, req_sect);
*/
return req_sect;
}
-static int drbd_rs_number_requests(struct drbd_conf *mdev)
+static int drbd_rs_number_requests(struct drbd_device *device)
{
int number;
rcu_read_lock();
- if (rcu_dereference(mdev->rs_plan_s)->size) {
- number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
- mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
+ if (rcu_dereference(device->rs_plan_s)->size) {
+ number = drbd_rs_controller(device) >> (BM_BLOCK_SHIFT - 9);
+ device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
} else {
- mdev->c_sync_rate = rcu_dereference(mdev->ldev->disk_conf)->resync_rate;
- number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
+ device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate;
+ number = SLEEP_TIME * device->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
}
rcu_read_unlock();
@@ -560,12 +567,11 @@ static int drbd_rs_number_requests(struct drbd_conf *mdev)
return number;
}
-int w_make_resync_request(struct drbd_work *w, int cancel)
+static int make_resync_request(struct drbd_device *device, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
unsigned long bit;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
+ const sector_t capacity = drbd_get_capacity(device->this_bdev);
int max_bio_size;
int number, rollback_i, size;
int align, queued, sndbuf;
@@ -574,61 +580,61 @@ int w_make_resync_request(struct drbd_work *w, int cancel)
if (unlikely(cancel))
return 0;
- if (mdev->rs_total == 0) {
+ if (device->rs_total == 0) {
/* empty resync? */
- drbd_resync_finished(mdev);
+ drbd_resync_finished(device);
return 0;
}
- if (!get_ldev(mdev)) {
- /* Since we only need to access mdev->rsync a
- get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
+ if (!get_ldev(device)) {
+ /* Since we only need to access device->rsync a
+ get_ldev_if_state(device,D_FAILED) would be sufficient, but
to continue resync with a broken disk makes no sense at
all */
- dev_err(DEV, "Disk broke down during resync!\n");
+ drbd_err(device, "Disk broke down during resync!\n");
return 0;
}
- max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
- number = drbd_rs_number_requests(mdev);
+ max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
+ number = drbd_rs_number_requests(device);
if (number == 0)
goto requeue;
for (i = 0; i < number; i++) {
/* Stop generating RS requests, when half of the send buffer is filled */
- mutex_lock(&mdev->tconn->data.mutex);
- if (mdev->tconn->data.socket) {
- queued = mdev->tconn->data.socket->sk->sk_wmem_queued;
- sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf;
+ mutex_lock(&first_peer_device(device)->connection->data.mutex);
+ if (first_peer_device(device)->connection->data.socket) {
+ queued = first_peer_device(device)->connection->data.socket->sk->sk_wmem_queued;
+ sndbuf = first_peer_device(device)->connection->data.socket->sk->sk_sndbuf;
} else {
queued = 1;
sndbuf = 0;
}
- mutex_unlock(&mdev->tconn->data.mutex);
+ mutex_unlock(&first_peer_device(device)->connection->data.mutex);
if (queued > sndbuf / 2)
goto requeue;
next_sector:
size = BM_BLOCK_SIZE;
- bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
+ bit = drbd_bm_find_next(device, device->bm_resync_fo);
if (bit == DRBD_END_OF_BITMAP) {
- mdev->bm_resync_fo = drbd_bm_bits(mdev);
- put_ldev(mdev);
+ device->bm_resync_fo = drbd_bm_bits(device);
+ put_ldev(device);
return 0;
}
sector = BM_BIT_TO_SECT(bit);
- if (drbd_rs_should_slow_down(mdev, sector) ||
- drbd_try_rs_begin_io(mdev, sector)) {
- mdev->bm_resync_fo = bit;
+ if (drbd_rs_should_slow_down(device, sector) ||
+ drbd_try_rs_begin_io(device, sector)) {
+ device->bm_resync_fo = bit;
goto requeue;
}
- mdev->bm_resync_fo = bit + 1;
+ device->bm_resync_fo = bit + 1;
- if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
- drbd_rs_complete_io(mdev, sector);
+ if (unlikely(drbd_bm_test_bit(device, bit) == 0)) {
+ drbd_rs_complete_io(device, sector);
goto next_sector;
}
@@ -657,7 +663,7 @@ next_sector:
* obscure reason; ( b == 0 ) would get the out-of-band
* only accidentally right because of the "oddly sized"
* adjustment below */
- if (drbd_bm_test_bit(mdev, bit+1) != 1)
+ if (drbd_bm_test_bit(device, bit+1) != 1)
break;
bit++;
size += BM_BLOCK_SIZE;
@@ -668,20 +674,21 @@ next_sector:
/* if we merged some,
* reset the offset to start the next drbd_bm_find_next from */
if (size > BM_BLOCK_SIZE)
- mdev->bm_resync_fo = bit + 1;
+ device->bm_resync_fo = bit + 1;
#endif
/* adjust very last sectors, in case we are oddly sized */
if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9;
- if (mdev->tconn->agreed_pro_version >= 89 && mdev->tconn->csums_tfm) {
- switch (read_for_csum(mdev, sector, size)) {
+ if (first_peer_device(device)->connection->agreed_pro_version >= 89 &&
+ first_peer_device(device)->connection->csums_tfm) {
+ switch (read_for_csum(first_peer_device(device), sector, size)) {
case -EIO: /* Disk failure */
- put_ldev(mdev);
+ put_ldev(device);
return -EIO;
case -EAGAIN: /* allocation failed, or ldev busy */
- drbd_rs_complete_io(mdev, sector);
- mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
+ drbd_rs_complete_io(device, sector);
+ device->bm_resync_fo = BM_SECT_TO_BIT(sector);
i = rollback_i;
goto requeue;
case 0:
@@ -693,50 +700,49 @@ next_sector:
} else {
int err;
- inc_rs_pending(mdev);
- err = drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
+ inc_rs_pending(device);
+ err = drbd_send_drequest(first_peer_device(device), P_RS_DATA_REQUEST,
sector, size, ID_SYNCER);
if (err) {
- dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
- dec_rs_pending(mdev);
- put_ldev(mdev);
+ drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
+ dec_rs_pending(device);
+ put_ldev(device);
return err;
}
}
}
- if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
+ if (device->bm_resync_fo >= drbd_bm_bits(device)) {
/* last syncer _request_ was sent,
* but the P_RS_DATA_REPLY not yet received. sync will end (and
* next sync group will resume), as soon as we receive the last
* resync data block, and the last bit is cleared.
* until then resync "work" is "inactive" ...
*/
- put_ldev(mdev);
+ put_ldev(device);
return 0;
}
requeue:
- mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
- mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
- put_ldev(mdev);
+ device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
+ mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
+ put_ldev(device);
return 0;
}
-static int w_make_ov_request(struct drbd_work *w, int cancel)
+static int make_ov_request(struct drbd_device *device, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
int number, i, size;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
+ const sector_t capacity = drbd_get_capacity(device->this_bdev);
bool stop_sector_reached = false;
if (unlikely(cancel))
return 1;
- number = drbd_rs_number_requests(mdev);
+ number = drbd_rs_number_requests(device);
- sector = mdev->ov_position;
+ sector = device->ov_position;
for (i = 0; i < number; i++) {
if (sector >= capacity)
return 1;
@@ -745,116 +751,121 @@ static int w_make_ov_request(struct drbd_work *w, int cancel)
* w_e_end_ov_reply().
* We need to send at least one request out. */
stop_sector_reached = i > 0
- && verify_can_do_stop_sector(mdev)
- && sector >= mdev->ov_stop_sector;
+ && verify_can_do_stop_sector(device)
+ && sector >= device->ov_stop_sector;
if (stop_sector_reached)
break;
size = BM_BLOCK_SIZE;
- if (drbd_rs_should_slow_down(mdev, sector) ||
- drbd_try_rs_begin_io(mdev, sector)) {
- mdev->ov_position = sector;
+ if (drbd_rs_should_slow_down(device, sector) ||
+ drbd_try_rs_begin_io(device, sector)) {
+ device->ov_position = sector;
goto requeue;
}
if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9;
- inc_rs_pending(mdev);
- if (drbd_send_ov_request(mdev, sector, size)) {
- dec_rs_pending(mdev);
+ inc_rs_pending(device);
+ if (drbd_send_ov_request(first_peer_device(device), sector, size)) {
+ dec_rs_pending(device);
return 0;
}
sector += BM_SECT_PER_BIT;
}
- mdev->ov_position = sector;
+ device->ov_position = sector;
requeue:
- mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
+ device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
if (i == 0 || !stop_sector_reached)
- mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
+ mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
return 1;
}
int w_ov_finished(struct drbd_work *w, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
- kfree(w);
- ov_out_of_sync_print(mdev);
- drbd_resync_finished(mdev);
+ struct drbd_device_work *dw =
+ container_of(w, struct drbd_device_work, w);
+ struct drbd_device *device = dw->device;
+ kfree(dw);
+ ov_out_of_sync_print(device);
+ drbd_resync_finished(device);
return 0;
}
static int w_resync_finished(struct drbd_work *w, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
- kfree(w);
+ struct drbd_device_work *dw =
+ container_of(w, struct drbd_device_work, w);
+ struct drbd_device *device = dw->device;
+ kfree(dw);
- drbd_resync_finished(mdev);
+ drbd_resync_finished(device);
return 0;
}
-static void ping_peer(struct drbd_conf *mdev)
+static void ping_peer(struct drbd_device *device)
{
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
- clear_bit(GOT_PING_ACK, &tconn->flags);
- request_ping(tconn);
- wait_event(tconn->ping_wait,
- test_bit(GOT_PING_ACK, &tconn->flags) || mdev->state.conn < C_CONNECTED);
+ clear_bit(GOT_PING_ACK, &connection->flags);
+ request_ping(connection);
+ wait_event(connection->ping_wait,
+ test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
}
-int drbd_resync_finished(struct drbd_conf *mdev)
+int drbd_resync_finished(struct drbd_device *device)
{
unsigned long db, dt, dbdt;
unsigned long n_oos;
union drbd_state os, ns;
- struct drbd_work *w;
+ struct drbd_device_work *dw;
char *khelper_cmd = NULL;
int verify_done = 0;
/* Remove all elements from the resync LRU. Since future actions
* might set bits in the (main) bitmap, then the entries in the
* resync LRU would be wrong. */
- if (drbd_rs_del_all(mdev)) {
+ if (drbd_rs_del_all(device)) {
/* In case this is not possible now, most probably because
* there are P_RS_DATA_REPLY Packets lingering on the worker's
* queue (or even the read operations for those packets
* is not finished by now). Retry in 100ms. */
schedule_timeout_interruptible(HZ / 10);
- w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
- if (w) {
- w->cb = w_resync_finished;
- w->mdev = mdev;
- drbd_queue_work(&mdev->tconn->sender_work, w);
+ dw = kmalloc(sizeof(struct drbd_device_work), GFP_ATOMIC);
+ if (dw) {
+ dw->w.cb = w_resync_finished;
+ dw->device = device;
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &dw->w);
return 1;
}
- dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
+ drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n");
}
- dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
+ dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
if (dt <= 0)
dt = 1;
-
- db = mdev->rs_total;
+
+ db = device->rs_total;
/* adjust for verify start and stop sectors, respective reached position */
- if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
- db -= mdev->ov_left;
+ if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
+ db -= device->ov_left;
dbdt = Bit2KB(db/dt);
- mdev->rs_paused /= HZ;
+ device->rs_paused /= HZ;
- if (!get_ldev(mdev))
+ if (!get_ldev(device))
goto out;
- ping_peer(mdev);
+ ping_peer(device);
- spin_lock_irq(&mdev->tconn->req_lock);
- os = drbd_read_state(mdev);
+ spin_lock_irq(&device->resource->req_lock);
+ os = drbd_read_state(device);
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
@@ -866,41 +877,41 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ns = os;
ns.conn = C_CONNECTED;
- dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
+ drbd_info(device, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
verify_done ? "Online verify" : "Resync",
- dt + mdev->rs_paused, mdev->rs_paused, dbdt);
+ dt + device->rs_paused, device->rs_paused, dbdt);
- n_oos = drbd_bm_total_weight(mdev);
+ n_oos = drbd_bm_total_weight(device);
if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
if (n_oos) {
- dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
+ drbd_alert(device, "Online verify found %lu %dk block out of sync!\n",
n_oos, Bit2KB(1));
khelper_cmd = "out-of-sync";
}
} else {
- D_ASSERT((n_oos - mdev->rs_failed) == 0);
+ D_ASSERT(device, (n_oos - device->rs_failed) == 0);
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
khelper_cmd = "after-resync-target";
- if (mdev->tconn->csums_tfm && mdev->rs_total) {
- const unsigned long s = mdev->rs_same_csum;
- const unsigned long t = mdev->rs_total;
+ if (first_peer_device(device)->connection->csums_tfm && device->rs_total) {
+ const unsigned long s = device->rs_same_csum;
+ const unsigned long t = device->rs_total;
const int ratio =
(t == 0) ? 0 :
(t < 100000) ? ((s*100)/t) : (s/(t/100));
- dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; "
+ drbd_info(device, "%u %% had equal checksums, eliminated: %luK; "
"transferred %luK total %luK\n",
ratio,
- Bit2KB(mdev->rs_same_csum),
- Bit2KB(mdev->rs_total - mdev->rs_same_csum),
- Bit2KB(mdev->rs_total));
+ Bit2KB(device->rs_same_csum),
+ Bit2KB(device->rs_total - device->rs_same_csum),
+ Bit2KB(device->rs_total));
}
}
- if (mdev->rs_failed) {
- dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed);
+ if (device->rs_failed) {
+ drbd_info(device, " %lu failed blocks\n", device->rs_failed);
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
ns.disk = D_INCONSISTENT;
@@ -914,179 +925,181 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ns.pdsk = D_UP_TO_DATE;
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
- if (mdev->p_uuid) {
+ if (device->p_uuid) {
int i;
for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
- _drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
- drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
- _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
+ _drbd_uuid_set(device, i, device->p_uuid[i]);
+ drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]);
+ _drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]);
} else {
- dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
+ drbd_err(device, "device->p_uuid is NULL! BUG\n");
}
}
if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
/* for verify runs, we don't update uuids here,
* so there would be nothing to report. */
- drbd_uuid_set_bm(mdev, 0UL);
- drbd_print_uuids(mdev, "updated UUIDs");
- if (mdev->p_uuid) {
+ drbd_uuid_set_bm(device, 0UL);
+ drbd_print_uuids(device, "updated UUIDs");
+ if (device->p_uuid) {
/* Now the two UUID sets are equal, update what we
* know of the peer. */
int i;
for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
- mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
+ device->p_uuid[i] = device->ldev->md.uuid[i];
}
}
}
- _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
+ _drbd_set_state(device, ns, CS_VERBOSE, NULL);
out_unlock:
- spin_unlock_irq(&mdev->tconn->req_lock);
- put_ldev(mdev);
+ spin_unlock_irq(&device->resource->req_lock);
+ put_ldev(device);
out:
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- mdev->rs_paused = 0;
+ device->rs_total = 0;
+ device->rs_failed = 0;
+ device->rs_paused = 0;
/* reset start sector, if we reached end of device */
- if (verify_done && mdev->ov_left == 0)
- mdev->ov_start_sector = 0;
+ if (verify_done && device->ov_left == 0)
+ device->ov_start_sector = 0;
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
if (khelper_cmd)
- drbd_khelper(mdev, khelper_cmd);
+ drbd_khelper(device, khelper_cmd);
return 1;
}
/* helper */
-static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
+static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
if (drbd_peer_req_has_active_page(peer_req)) {
/* This might happen if sendpage() has not finished */
int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
- atomic_add(i, &mdev->pp_in_use_by_net);
- atomic_sub(i, &mdev->pp_in_use);
- spin_lock_irq(&mdev->tconn->req_lock);
- list_add_tail(&peer_req->w.list, &mdev->net_ee);
- spin_unlock_irq(&mdev->tconn->req_lock);
+ atomic_add(i, &device->pp_in_use_by_net);
+ atomic_sub(i, &device->pp_in_use);
+ spin_lock_irq(&device->resource->req_lock);
+ list_add_tail(&peer_req->w.list, &device->net_ee);
+ spin_unlock_irq(&device->resource->req_lock);
wake_up(&drbd_pp_wait);
} else
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
}
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_e_end_data_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(mdev, peer_req);
- dec_unacked(mdev);
+ drbd_free_peer_req(device, peer_req);
+ dec_unacked(device);
return 0;
}
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- err = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
+ err = drbd_send_block(peer_device, P_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
+ drbd_err(device, "Sending NegDReply. sector=%llus.\n",
(unsigned long long)peer_req->i.sector);
- err = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
+ err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
}
- dec_unacked(mdev);
+ dec_unacked(device);
- move_to_net_ee_or_free(mdev, peer_req);
+ move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
- dev_err(DEV, "drbd_send_block() failed\n");
+ drbd_err(device, "drbd_send_block() failed\n");
return err;
}
/**
* w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
- * @mdev: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(mdev, peer_req);
- dec_unacked(mdev);
+ drbd_free_peer_req(device, peer_req);
+ dec_unacked(device);
return 0;
}
- if (get_ldev_if_state(mdev, D_FAILED)) {
- drbd_rs_complete_io(mdev, peer_req->i.sector);
- put_ldev(mdev);
+ if (get_ldev_if_state(device, D_FAILED)) {
+ drbd_rs_complete_io(device, peer_req->i.sector);
+ put_ldev(device);
}
- if (mdev->state.conn == C_AHEAD) {
- err = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
+ if (device->state.conn == C_AHEAD) {
+ err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req);
} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
- inc_rs_pending(mdev);
- err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
+ if (likely(device->state.pdsk >= D_INCONSISTENT)) {
+ inc_rs_pending(device);
+ err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Not sending RSDataReply, "
+ drbd_err(device, "Not sending RSDataReply, "
"partner DISKLESS!\n");
err = 0;
}
} else {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
+ drbd_err(device, "Sending NegRSDReply. sector %llus.\n",
(unsigned long long)peer_req->i.sector);
- err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
+ err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
/* update resync data with failure */
- drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
+ drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size);
}
- dec_unacked(mdev);
+ dec_unacked(device);
- move_to_net_ee_or_free(mdev, peer_req);
+ move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
- dev_err(DEV, "drbd_send_block() failed\n");
+ drbd_err(device, "drbd_send_block() failed\n");
return err;
}
int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
struct digest_info *di;
int digest_size;
void *digest = NULL;
int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_peer_req(mdev, peer_req);
- dec_unacked(mdev);
+ drbd_free_peer_req(device, peer_req);
+ dec_unacked(device);
return 0;
}
- if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, peer_req->i.sector);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ drbd_rs_complete_io(device, peer_req->i.sector);
+ put_ldev(device);
}
di = peer_req->digest;
@@ -1095,47 +1108,48 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
/* quick hack to try to avoid a race against reconfiguration.
* a real fix would be much more involved,
* introducing more locking mechanisms */
- if (mdev->tconn->csums_tfm) {
- digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
- D_ASSERT(digest_size == di->digest_size);
+ if (peer_device->connection->csums_tfm) {
+ digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
+ D_ASSERT(device, digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
if (digest) {
- drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
+ drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
if (eq) {
- drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
+ drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size);
/* rs_same_csums unit is BM_BLOCK_SIZE */
- mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
- err = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
+ device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
+ err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req);
} else {
- inc_rs_pending(mdev);
+ inc_rs_pending(device);
peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
kfree(di);
- err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
+ err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
}
} else {
- err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
+ err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
+ drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
}
- dec_unacked(mdev);
- move_to_net_ee_or_free(mdev, peer_req);
+ dec_unacked(device);
+ move_to_net_ee_or_free(device, peer_req);
if (unlikely(err))
- dev_err(DEV, "drbd_send_block/ack() failed\n");
+ drbd_err(device, "drbd_send_block/ack() failed\n");
return err;
}
int w_e_end_ov_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
sector_t sector = peer_req->i.sector;
unsigned int size = peer_req->i.size;
int digest_size;
@@ -1145,7 +1159,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
- digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
+ digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
err = 1; /* terminate the connection in case the allocation failed */
@@ -1153,7 +1167,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
}
if (likely(!(peer_req->flags & EE_WAS_ERROR)))
- drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
+ drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
else
memset(digest, 0, digest_size);
@@ -1162,36 +1176,37 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_alloc_pages due to pp_in_use > max_buffers. */
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
peer_req = NULL;
- inc_rs_pending(mdev);
- err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY);
+ inc_rs_pending(device);
+ err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY);
if (err)
- dec_rs_pending(mdev);
+ dec_rs_pending(device);
kfree(digest);
out:
if (peer_req)
- drbd_free_peer_req(mdev, peer_req);
- dec_unacked(mdev);
+ drbd_free_peer_req(device, peer_req);
+ dec_unacked(device);
return err;
}
-void drbd_ov_out_of_sync_found(struct drbd_conf *mdev, sector_t sector, int size)
+void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size)
{
- if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
- mdev->ov_last_oos_size += size>>9;
+ if (device->ov_last_oos_start + device->ov_last_oos_size == sector) {
+ device->ov_last_oos_size += size>>9;
} else {
- mdev->ov_last_oos_start = sector;
- mdev->ov_last_oos_size = size>>9;
+ device->ov_last_oos_start = sector;
+ device->ov_last_oos_size = size>>9;
}
- drbd_set_out_of_sync(mdev, sector, size);
+ drbd_set_out_of_sync(device, sector, size);
}
int w_e_end_ov_reply(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_device *peer_device = peer_req->peer_device;
+ struct drbd_device *device = peer_device->device;
struct digest_info *di;
void *digest;
sector_t sector = peer_req->i.sector;
@@ -1201,27 +1216,27 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
bool stop_sector_reached = false;
if (unlikely(cancel)) {
- drbd_free_peer_req(mdev, peer_req);
- dec_unacked(mdev);
+ drbd_free_peer_req(device, peer_req);
+ dec_unacked(device);
return 0;
}
/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
* the resync lru has been cleaned up already */
- if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, peer_req->i.sector);
- put_ldev(mdev);
+ if (get_ldev(device)) {
+ drbd_rs_complete_io(device, peer_req->i.sector);
+ put_ldev(device);
}
di = peer_req->digest;
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
+ digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
+ drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
- D_ASSERT(digest_size == di->digest_size);
+ D_ASSERT(device, digest_size == di->digest_size);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
@@ -1232,102 +1247,95 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_alloc_pages due to pp_in_use > max_buffers. */
- drbd_free_peer_req(mdev, peer_req);
+ drbd_free_peer_req(device, peer_req);
if (!eq)
- drbd_ov_out_of_sync_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(device, sector, size);
else
- ov_out_of_sync_print(mdev);
+ ov_out_of_sync_print(device);
- err = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
+ err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size,
eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
- dec_unacked(mdev);
+ dec_unacked(device);
- --mdev->ov_left;
+ --device->ov_left;
/* let's advance progress step marks only for every other megabyte */
- if ((mdev->ov_left & 0x200) == 0x200)
- drbd_advance_rs_marks(mdev, mdev->ov_left);
+ if ((device->ov_left & 0x200) == 0x200)
+ drbd_advance_rs_marks(device, device->ov_left);
- stop_sector_reached = verify_can_do_stop_sector(mdev) &&
- (sector + (size>>9)) >= mdev->ov_stop_sector;
+ stop_sector_reached = verify_can_do_stop_sector(device) &&
+ (sector + (size>>9)) >= device->ov_stop_sector;
- if (mdev->ov_left == 0 || stop_sector_reached) {
- ov_out_of_sync_print(mdev);
- drbd_resync_finished(mdev);
+ if (device->ov_left == 0 || stop_sector_reached) {
+ ov_out_of_sync_print(device);
+ drbd_resync_finished(device);
}
return err;
}
-int w_prev_work_done(struct drbd_work *w, int cancel)
-{
- struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
-
- complete(&b->done);
- return 0;
-}
-
/* FIXME
* We need to track the number of pending barrier acks,
* and to be able to wait for them.
* See also comment in drbd_adm_attach before drbd_suspend_io.
*/
-int drbd_send_barrier(struct drbd_tconn *tconn)
+static int drbd_send_barrier(struct drbd_connection *connection)
{
struct p_barrier *p;
struct drbd_socket *sock;
- sock = &tconn->data;
- p = conn_prepare_command(tconn, sock);
+ sock = &connection->data;
+ p = conn_prepare_command(connection, sock);
if (!p)
return -EIO;
- p->barrier = tconn->send.current_epoch_nr;
+ p->barrier = connection->send.current_epoch_nr;
p->pad = 0;
- tconn->send.current_epoch_writes = 0;
+ connection->send.current_epoch_writes = 0;
- return conn_send_command(tconn, sock, P_BARRIER, sizeof(*p), NULL, 0);
+ return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
}
int w_send_write_hint(struct drbd_work *w, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, unplug_work);
struct drbd_socket *sock;
if (cancel)
return 0;
- sock = &mdev->tconn->data;
- if (!drbd_prepare_command(mdev, sock))
+ sock = &first_peer_device(device)->connection->data;
+ if (!drbd_prepare_command(first_peer_device(device), sock))
return -EIO;
- return drbd_send_command(mdev, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
+ return drbd_send_command(first_peer_device(device), sock, P_UNPLUG_REMOTE, 0, NULL, 0);
}
-static void re_init_if_first_write(struct drbd_tconn *tconn, unsigned int epoch)
+static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
{
- if (!tconn->send.seen_any_write_yet) {
- tconn->send.seen_any_write_yet = true;
- tconn->send.current_epoch_nr = epoch;
- tconn->send.current_epoch_writes = 0;
+ if (!connection->send.seen_any_write_yet) {
+ connection->send.seen_any_write_yet = true;
+ connection->send.current_epoch_nr = epoch;
+ connection->send.current_epoch_writes = 0;
}
}
-static void maybe_send_barrier(struct drbd_tconn *tconn, unsigned int epoch)
+static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch)
{
/* re-init if first write on this connection */
- if (!tconn->send.seen_any_write_yet)
+ if (!connection->send.seen_any_write_yet)
return;
- if (tconn->send.current_epoch_nr != epoch) {
- if (tconn->send.current_epoch_writes)
- drbd_send_barrier(tconn);
- tconn->send.current_epoch_nr = epoch;
+ if (connection->send.current_epoch_nr != epoch) {
+ if (connection->send.current_epoch_writes)
+ drbd_send_barrier(connection);
+ connection->send.current_epoch_nr = epoch;
}
}
int w_send_out_of_sync(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- struct drbd_conf *mdev = w->mdev;
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_device *device = req->device;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
int err;
if (unlikely(cancel)) {
@@ -1335,13 +1343,13 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
return 0;
}
- /* this time, no tconn->send.current_epoch_writes++;
+ /* this time, no connection->send.current_epoch_writes++;
* If it was sent, it was the closing barrier for the last
* replicated epoch, before we went into AHEAD mode.
* No more barriers will be sent, until we leave AHEAD mode again. */
- maybe_send_barrier(tconn, req->epoch);
+ maybe_send_barrier(connection, req->epoch);
- err = drbd_send_out_of_sync(mdev, req);
+ err = drbd_send_out_of_sync(first_peer_device(device), req);
req_mod(req, OOS_HANDED_TO_NETWORK);
return err;
@@ -1349,15 +1357,14 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
/**
* w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
- * @mdev: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_send_dblock(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- struct drbd_conf *mdev = w->mdev;
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_device *device = req->device;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
int err;
if (unlikely(cancel)) {
@@ -1365,11 +1372,11 @@ int w_send_dblock(struct drbd_work *w, int cancel)
return 0;
}
- re_init_if_first_write(tconn, req->epoch);
- maybe_send_barrier(tconn, req->epoch);
- tconn->send.current_epoch_writes++;
+ re_init_if_first_write(connection, req->epoch);
+ maybe_send_barrier(connection, req->epoch);
+ connection->send.current_epoch_writes++;
- err = drbd_send_dblock(mdev, req);
+ err = drbd_send_dblock(first_peer_device(device), req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
return err;
@@ -1377,15 +1384,14 @@ int w_send_dblock(struct drbd_work *w, int cancel)
/**
* w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
- * @mdev: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
int w_send_read_req(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- struct drbd_conf *mdev = w->mdev;
- struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_device *device = req->device;
+ struct drbd_connection *connection = first_peer_device(device)->connection;
int err;
if (unlikely(cancel)) {
@@ -1395,9 +1401,9 @@ int w_send_read_req(struct drbd_work *w, int cancel)
/* Even read requests may close a write epoch,
* if there was any yet. */
- maybe_send_barrier(tconn, req->epoch);
+ maybe_send_barrier(connection, req->epoch);
- err = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size,
+ err = drbd_send_drequest(first_peer_device(device), P_DATA_REQUEST, req->i.sector, req->i.size,
(unsigned long)req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
@@ -1408,21 +1414,21 @@ int w_send_read_req(struct drbd_work *w, int cancel)
int w_restart_disk_io(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device = req->device;
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
- drbd_al_begin_io(mdev, &req->i, false);
+ drbd_al_begin_io(device, &req->i, false);
drbd_req_make_private_bio(req, req->master_bio);
- req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
+ req->private_bio->bi_bdev = device->ldev->backing_bdev;
generic_make_request(req->private_bio);
return 0;
}
-static int _drbd_may_sync_now(struct drbd_conf *mdev)
+static int _drbd_may_sync_now(struct drbd_device *device)
{
- struct drbd_conf *odev = mdev;
+ struct drbd_device *odev = device;
int resync_after;
while (1) {
@@ -1433,7 +1439,7 @@ static int _drbd_may_sync_now(struct drbd_conf *mdev)
rcu_read_unlock();
if (resync_after == -1)
return 1;
- odev = minor_to_mdev(resync_after);
+ odev = minor_to_device(resync_after);
if (!odev)
return 1;
if ((odev->state.conn >= C_SYNC_SOURCE &&
@@ -1446,17 +1452,17 @@ static int _drbd_may_sync_now(struct drbd_conf *mdev)
/**
* _drbd_pause_after() - Pause resync on all devices that may not resync now
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Called from process context only (admin command and after_state_ch).
*/
-static int _drbd_pause_after(struct drbd_conf *mdev)
+static int _drbd_pause_after(struct drbd_device *device)
{
- struct drbd_conf *odev;
+ struct drbd_device *odev;
int i, rv = 0;
rcu_read_lock();
- idr_for_each_entry(&minors, odev, i) {
+ idr_for_each_entry(&drbd_devices, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (!_drbd_may_sync_now(odev))
@@ -1470,17 +1476,17 @@ static int _drbd_pause_after(struct drbd_conf *mdev)
/**
* _drbd_resume_next() - Resume resync on all devices that may resync now
- * @mdev: DRBD device.
+ * @device: DRBD device.
*
* Called from process context only (admin command and worker).
*/
-static int _drbd_resume_next(struct drbd_conf *mdev)
+static int _drbd_resume_next(struct drbd_device *device)
{
- struct drbd_conf *odev;
+ struct drbd_device *odev;
int i, rv = 0;
rcu_read_lock();
- idr_for_each_entry(&minors, odev, i) {
+ idr_for_each_entry(&drbd_devices, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (odev->state.aftr_isp) {
@@ -1494,24 +1500,24 @@ static int _drbd_resume_next(struct drbd_conf *mdev)
return rv;
}
-void resume_next_sg(struct drbd_conf *mdev)
+void resume_next_sg(struct drbd_device *device)
{
write_lock_irq(&global_state_lock);
- _drbd_resume_next(mdev);
+ _drbd_resume_next(device);
write_unlock_irq(&global_state_lock);
}
-void suspend_other_sg(struct drbd_conf *mdev)
+void suspend_other_sg(struct drbd_device *device)
{
write_lock_irq(&global_state_lock);
- _drbd_pause_after(mdev);
+ _drbd_pause_after(device);
write_unlock_irq(&global_state_lock);
}
/* caller must hold global_state_lock */
-enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
+enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor)
{
- struct drbd_conf *odev;
+ struct drbd_device *odev;
int resync_after;
if (o_minor == -1)
@@ -1520,9 +1526,9 @@ enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
return ERR_RESYNC_AFTER;
/* check for loops */
- odev = minor_to_mdev(o_minor);
+ odev = minor_to_device(o_minor);
while (1) {
- if (odev == mdev)
+ if (odev == device)
return ERR_RESYNC_AFTER_CYCLE;
/* You are free to depend on diskless, non-existing,
@@ -1542,35 +1548,35 @@ enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
return NO_ERROR;
/* follow the dependency chain */
- odev = minor_to_mdev(resync_after);
+ odev = minor_to_device(resync_after);
}
}
/* caller must hold global_state_lock */
-void drbd_resync_after_changed(struct drbd_conf *mdev)
+void drbd_resync_after_changed(struct drbd_device *device)
{
int changes;
do {
- changes = _drbd_pause_after(mdev);
- changes |= _drbd_resume_next(mdev);
+ changes = _drbd_pause_after(device);
+ changes |= _drbd_resume_next(device);
} while (changes);
}
-void drbd_rs_controller_reset(struct drbd_conf *mdev)
+void drbd_rs_controller_reset(struct drbd_device *device)
{
struct fifo_buffer *plan;
- atomic_set(&mdev->rs_sect_in, 0);
- atomic_set(&mdev->rs_sect_ev, 0);
- mdev->rs_in_flight = 0;
+ atomic_set(&device->rs_sect_in, 0);
+ atomic_set(&device->rs_sect_ev, 0);
+ device->rs_in_flight = 0;
/* Updating the RCU protected object in place is necessary since
this function gets called from atomic context.
It is valid since all other updates also lead to an completely
empty fifo */
rcu_read_lock();
- plan = rcu_dereference(mdev->rs_plan_s);
+ plan = rcu_dereference(device->rs_plan_s);
plan->total = 0;
fifo_set(plan, 0);
rcu_read_unlock();
@@ -1578,101 +1584,104 @@ void drbd_rs_controller_reset(struct drbd_conf *mdev)
void start_resync_timer_fn(unsigned long data)
{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
+ struct drbd_device *device = (struct drbd_device *) data;
- drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work);
+ drbd_queue_work(&first_peer_device(device)->connection->sender_work,
+ &device->start_resync_work);
}
int w_start_resync(struct drbd_work *w, int cancel)
{
- struct drbd_conf *mdev = w->mdev;
+ struct drbd_device *device =
+ container_of(w, struct drbd_device, start_resync_work);
- if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
- dev_warn(DEV, "w_start_resync later...\n");
- mdev->start_resync_timer.expires = jiffies + HZ/10;
- add_timer(&mdev->start_resync_timer);
+ if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) {
+ drbd_warn(device, "w_start_resync later...\n");
+ device->start_resync_timer.expires = jiffies + HZ/10;
+ add_timer(&device->start_resync_timer);
return 0;
}
- drbd_start_resync(mdev, C_SYNC_SOURCE);
- clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
+ drbd_start_resync(device, C_SYNC_SOURCE);
+ clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags);
return 0;
}
/**
* drbd_start_resync() - Start the resync process
- * @mdev: DRBD device.
+ * @device: DRBD device.
* @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
*
* This function might bring you directly into one of the
* C_PAUSED_SYNC_* states.
*/
-void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
+void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
{
union drbd_state ns;
int r;
- if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
- dev_err(DEV, "Resync already running!\n");
+ if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) {
+ drbd_err(device, "Resync already running!\n");
return;
}
- if (!test_bit(B_RS_H_DONE, &mdev->flags)) {
+ if (!test_bit(B_RS_H_DONE, &device->flags)) {
if (side == C_SYNC_TARGET) {
/* Since application IO was locked out during C_WF_BITMAP_T and
C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
we check that we might make the data inconsistent. */
- r = drbd_khelper(mdev, "before-resync-target");
+ r = drbd_khelper(device, "before-resync-target");
r = (r >> 8) & 0xff;
if (r > 0) {
- dev_info(DEV, "before-resync-target handler returned %d, "
+ drbd_info(device, "before-resync-target handler returned %d, "
"dropping connection.\n", r);
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return;
}
} else /* C_SYNC_SOURCE */ {
- r = drbd_khelper(mdev, "before-resync-source");
+ r = drbd_khelper(device, "before-resync-source");
r = (r >> 8) & 0xff;
if (r > 0) {
if (r == 3) {
- dev_info(DEV, "before-resync-source handler returned %d, "
+ drbd_info(device, "before-resync-source handler returned %d, "
"ignoring. Old userland tools?", r);
} else {
- dev_info(DEV, "before-resync-source handler returned %d, "
+ drbd_info(device, "before-resync-source handler returned %d, "
"dropping connection.\n", r);
- conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ conn_request_state(first_peer_device(device)->connection,
+ NS(conn, C_DISCONNECTING), CS_HARD);
return;
}
}
}
}
- if (current == mdev->tconn->worker.task) {
+ if (current == first_peer_device(device)->connection->worker.task) {
/* The worker should not sleep waiting for state_mutex,
that can take long */
- if (!mutex_trylock(mdev->state_mutex)) {
- set_bit(B_RS_H_DONE, &mdev->flags);
- mdev->start_resync_timer.expires = jiffies + HZ/5;
- add_timer(&mdev->start_resync_timer);
+ if (!mutex_trylock(device->state_mutex)) {
+ set_bit(B_RS_H_DONE, &device->flags);
+ device->start_resync_timer.expires = jiffies + HZ/5;
+ add_timer(&device->start_resync_timer);
return;
}
} else {
- mutex_lock(mdev->state_mutex);
+ mutex_lock(device->state_mutex);
}
- clear_bit(B_RS_H_DONE, &mdev->flags);
+ clear_bit(B_RS_H_DONE, &device->flags);
write_lock_irq(&global_state_lock);
/* Did some connection breakage or IO error race with us? */
- if (mdev->state.conn < C_CONNECTED
- || !get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ if (device->state.conn < C_CONNECTED
+ || !get_ldev_if_state(device, D_NEGOTIATING)) {
write_unlock_irq(&global_state_lock);
- mutex_unlock(mdev->state_mutex);
+ mutex_unlock(device->state_mutex);
return;
}
- ns = drbd_read_state(mdev);
+ ns = drbd_read_state(device);
- ns.aftr_isp = !_drbd_may_sync_now(mdev);
+ ns.aftr_isp = !_drbd_may_sync_now(device);
ns.conn = side;
@@ -1681,43 +1690,43 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
else /* side == C_SYNC_SOURCE */
ns.pdsk = D_INCONSISTENT;
- r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- ns = drbd_read_state(mdev);
+ r = __drbd_set_state(device, ns, CS_VERBOSE, NULL);
+ ns = drbd_read_state(device);
if (ns.conn < C_CONNECTED)
r = SS_UNKNOWN_ERROR;
if (r == SS_SUCCESS) {
- unsigned long tw = drbd_bm_total_weight(mdev);
+ unsigned long tw = drbd_bm_total_weight(device);
unsigned long now = jiffies;
int i;
- mdev->rs_failed = 0;
- mdev->rs_paused = 0;
- mdev->rs_same_csum = 0;
- mdev->rs_last_events = 0;
- mdev->rs_last_sect_ev = 0;
- mdev->rs_total = tw;
- mdev->rs_start = now;
+ device->rs_failed = 0;
+ device->rs_paused = 0;
+ device->rs_same_csum = 0;
+ device->rs_last_events = 0;
+ device->rs_last_sect_ev = 0;
+ device->rs_total = tw;
+ device->rs_start = now;
for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = tw;
- mdev->rs_mark_time[i] = now;
+ device->rs_mark_left[i] = tw;
+ device->rs_mark_time[i] = now;
}
- _drbd_pause_after(mdev);
+ _drbd_pause_after(device);
}
write_unlock_irq(&global_state_lock);
if (r == SS_SUCCESS) {
/* reset rs_last_bcast when a resync or verify is started,
* to deal with potential jiffies wrap. */
- mdev->rs_last_bcast = jiffies - HZ;
+ device->rs_last_bcast = jiffies - HZ;
- dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
+ drbd_info(device, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
drbd_conn_str(ns.conn),
- (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
- (unsigned long) mdev->rs_total);
+ (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10),
+ (unsigned long) device->rs_total);
if (side == C_SYNC_TARGET)
- mdev->bm_resync_fo = 0;
+ device->bm_resync_fo = 0;
/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
* with w_send_oos, or the sync target will get confused as to
@@ -1726,10 +1735,12 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
* drbd_resync_finished from here in that case.
* We drbd_gen_and_send_sync_uuid here for protocol < 96,
* and from after_state_ch otherwise. */
- if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96)
- drbd_gen_and_send_sync_uuid(mdev);
+ if (side == C_SYNC_SOURCE &&
+ first_peer_device(device)->connection->agreed_pro_version < 96)
+ drbd_gen_and_send_sync_uuid(first_peer_device(device));
- if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) {
+ if (first_peer_device(device)->connection->agreed_pro_version < 95 &&
+ device->rs_total == 0) {
/* This still has a race (about when exactly the peers
* detect connection loss) that can lead to a full sync
* on next handshake. In 8.3.9 we fixed this with explicit
@@ -1745,33 +1756,33 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
int timeo;
rcu_read_lock();
- nc = rcu_dereference(mdev->tconn->net_conf);
+ nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
rcu_read_unlock();
schedule_timeout_interruptible(timeo);
}
- drbd_resync_finished(mdev);
+ drbd_resync_finished(device);
}
- drbd_rs_controller_reset(mdev);
- /* ns.conn may already be != mdev->state.conn,
+ drbd_rs_controller_reset(device);
+ /* ns.conn may already be != device->state.conn,
* we may have been paused in between, or become paused until
* the timer triggers.
* No matter, that is handled in resync_timer_fn() */
if (ns.conn == C_SYNC_TARGET)
- mod_timer(&mdev->resync_timer, jiffies);
+ mod_timer(&device->resync_timer, jiffies);
- drbd_md_sync(mdev);
+ drbd_md_sync(device);
}
- put_ldev(mdev);
- mutex_unlock(mdev->state_mutex);
+ put_ldev(device);
+ mutex_unlock(device->state_mutex);
}
/* If the resource already closed the current epoch, but we did not
* (because we have not yet seen new requests), we should send the
* corresponding barrier now. Must be checked within the same spinlock
* that is used to check for new requests. */
-bool need_to_send_barrier(struct drbd_tconn *connection)
+static bool need_to_send_barrier(struct drbd_connection *connection)
{
if (!connection->send.seen_any_write_yet)
return false;
@@ -1795,7 +1806,7 @@ bool need_to_send_barrier(struct drbd_tconn *connection)
return true;
}
-bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
+static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
{
spin_lock_irq(&queue->q_lock);
list_splice_init(&queue->q, work_list);
@@ -1803,7 +1814,7 @@ bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_li
return !list_empty(work_list);
}
-bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list)
+static bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list)
{
spin_lock_irq(&queue->q_lock);
if (!list_empty(&queue->q))
@@ -1812,7 +1823,7 @@ bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_lis
return !list_empty(work_list);
}
-void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
+static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list)
{
DEFINE_WAIT(wait);
struct net_conf *nc;
@@ -1842,7 +1853,7 @@ void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
for (;;) {
int send_barrier;
prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
- spin_lock_irq(&connection->req_lock);
+ spin_lock_irq(&connection->resource->req_lock);
spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
/* dequeue single item only,
* we still use drbd_queue_work_front() in some places */
@@ -1850,11 +1861,11 @@ void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
list_move(connection->sender_work.q.next, work_list);
spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
if (!list_empty(work_list) || signal_pending(current)) {
- spin_unlock_irq(&connection->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
break;
}
send_barrier = need_to_send_barrier(connection);
- spin_unlock_irq(&connection->req_lock);
+ spin_unlock_irq(&connection->resource->req_lock);
if (send_barrier) {
drbd_send_barrier(connection);
connection->send.current_epoch_nr++;
@@ -1883,9 +1894,9 @@ void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
int drbd_worker(struct drbd_thread *thi)
{
- struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_connection *connection = thi->connection;
struct drbd_work *w = NULL;
- struct drbd_conf *mdev;
+ struct drbd_peer_device *peer_device;
LIST_HEAD(work_list);
int vnr;
@@ -1895,12 +1906,12 @@ int drbd_worker(struct drbd_thread *thi)
/* as long as we use drbd_queue_work_front(),
* we may only dequeue single work items here, not batches. */
if (list_empty(&work_list))
- wait_for_work(tconn, &work_list);
+ wait_for_work(connection, &work_list);
if (signal_pending(current)) {
flush_signals(current);
if (get_t_state(thi) == RUNNING) {
- conn_warn(tconn, "Worker got an unexpected signal\n");
+ drbd_warn(connection, "Worker got an unexpected signal\n");
continue;
}
break;
@@ -1912,10 +1923,10 @@ int drbd_worker(struct drbd_thread *thi)
while (!list_empty(&work_list)) {
w = list_first_entry(&work_list, struct drbd_work, list);
list_del_init(&w->list);
- if (w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS) == 0)
+ if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
continue;
- if (tconn->cstate >= C_WF_REPORT_PARAMS)
- conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
+ if (connection->cstate >= C_WF_REPORT_PARAMS)
+ conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
}
}
@@ -1925,16 +1936,17 @@ int drbd_worker(struct drbd_thread *thi)
list_del_init(&w->list);
w->cb(w, 1);
}
- dequeue_work_batch(&tconn->sender_work, &work_list);
+ dequeue_work_batch(&connection->sender_work, &work_list);
} while (!list_empty(&work_list));
rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
- kref_get(&mdev->kref);
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
+ kref_get(&device->kref);
rcu_read_unlock();
- drbd_mdev_cleanup(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
+ drbd_device_cleanup(device);
+ kref_put(&device->kref, drbd_destroy_device);
rcu_read_lock();
}
rcu_read_unlock();
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index 328f18e4b4ee..3db9ebaf64f6 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -9,12 +9,12 @@
extern char *drbd_sec_holder;
/* sets the number of 512 byte sectors of our virtual device */
-static inline void drbd_set_my_capacity(struct drbd_conf *mdev,
+static inline void drbd_set_my_capacity(struct drbd_device *device,
sector_t size)
{
- /* set_capacity(mdev->this_bdev->bd_disk, size); */
- set_capacity(mdev->vdisk, size);
- mdev->this_bdev->bd_inode->i_size = (loff_t)size << 9;
+ /* set_capacity(device->this_bdev->bd_disk, size); */
+ set_capacity(device->vdisk, size);
+ device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
}
#define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE)
@@ -27,20 +27,20 @@ extern void drbd_request_endio(struct bio *bio, int error);
/*
* used to submit our private bio
*/
-static inline void drbd_generic_make_request(struct drbd_conf *mdev,
+static inline void drbd_generic_make_request(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
if (!bio->bi_bdev) {
printk(KERN_ERR "drbd%d: drbd_generic_make_request: "
"bio->bi_bdev == NULL\n",
- mdev_to_minor(mdev));
+ device_to_minor(device));
dump_stack();
bio_endio(bio, -ENODEV);
return;
}
- if (drbd_insert_fault(mdev, fault_type))
+ if (drbd_insert_fault(device, fault_type))
bio_endio(bio, -EIO);
else
generic_make_request(bio);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 000abe2f105c..8f5565bf34cd 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -961,17 +961,31 @@ static void empty(void)
{
}
-static DECLARE_WORK(floppy_work, NULL);
+static void (*floppy_work_fn)(void);
+
+static void floppy_work_workfn(struct work_struct *work)
+{
+ floppy_work_fn();
+}
+
+static DECLARE_WORK(floppy_work, floppy_work_workfn);
static void schedule_bh(void (*handler)(void))
{
WARN_ON(work_pending(&floppy_work));
- PREPARE_WORK(&floppy_work, (work_func_t)handler);
+ floppy_work_fn = handler;
queue_work(floppy_wq, &floppy_work);
}
-static DECLARE_DELAYED_WORK(fd_timer, NULL);
+static void (*fd_timer_fn)(void) = NULL;
+
+static void fd_timer_workfn(struct work_struct *work)
+{
+ fd_timer_fn();
+}
+
+static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn);
static void cancel_activity(void)
{
@@ -982,7 +996,7 @@ static void cancel_activity(void)
/* this function makes sure that the disk stays in the drive during the
* transfer */
-static void fd_watchdog(struct work_struct *arg)
+static void fd_watchdog(void)
{
debug_dcl(DP->flags, "calling disk change from watchdog\n");
@@ -993,7 +1007,7 @@ static void fd_watchdog(struct work_struct *arg)
reset_fdc();
} else {
cancel_delayed_work(&fd_timer);
- PREPARE_DELAYED_WORK(&fd_timer, fd_watchdog);
+ fd_timer_fn = fd_watchdog;
queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
}
}
@@ -1005,7 +1019,8 @@ static void main_command_interrupt(void)
}
/* waits for a delay (spinup or select) to pass */
-static int fd_wait_for_completion(unsigned long expires, work_func_t function)
+static int fd_wait_for_completion(unsigned long expires,
+ void (*function)(void))
{
if (FDCS->reset) {
reset_fdc(); /* do the reset during sleep to win time
@@ -1016,7 +1031,7 @@ static int fd_wait_for_completion(unsigned long expires, work_func_t function)
if (time_before(jiffies, expires)) {
cancel_delayed_work(&fd_timer);
- PREPARE_DELAYED_WORK(&fd_timer, function);
+ fd_timer_fn = function;
queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
return 1;
}
@@ -1334,8 +1349,7 @@ static int fdc_dtr(void)
* Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
*/
FDCS->dtr = raw_cmd->rate & 3;
- return fd_wait_for_completion(jiffies + 2UL * HZ / 100,
- (work_func_t)floppy_ready);
+ return fd_wait_for_completion(jiffies + 2UL * HZ / 100, floppy_ready);
} /* fdc_dtr */
static void tell_sector(void)
@@ -1440,7 +1454,7 @@ static void setup_rw_floppy(void)
int flags;
int dflags;
unsigned long ready_date;
- work_func_t function;
+ void (*function)(void);
flags = raw_cmd->flags;
if (flags & (FD_RAW_READ | FD_RAW_WRITE))
@@ -1454,9 +1468,9 @@ static void setup_rw_floppy(void)
*/
if (time_after(ready_date, jiffies + DP->select_delay)) {
ready_date -= DP->select_delay;
- function = (work_func_t)floppy_start;
+ function = floppy_start;
} else
- function = (work_func_t)setup_rw_floppy;
+ function = setup_rw_floppy;
/* wait until the floppy is spinning fast enough */
if (fd_wait_for_completion(ready_date, function))
@@ -1486,7 +1500,7 @@ static void setup_rw_floppy(void)
inr = result();
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
- fd_watchdog(NULL);
+ fd_watchdog();
}
static int blind_seek;
@@ -1863,7 +1877,7 @@ static int start_motor(void (*function)(void))
/* wait_for_completion also schedules reset if needed. */
return fd_wait_for_completion(DRS->select_date + DP->select_delay,
- (work_func_t)function);
+ function);
}
static void floppy_ready(void)
@@ -2351,7 +2365,7 @@ static void rw_interrupt(void)
/* Compute maximal contiguous buffer size. */
static int buffer_chain_size(void)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
int size;
struct req_iterator iter;
char *base;
@@ -2360,10 +2374,10 @@ static int buffer_chain_size(void)
size = 0;
rq_for_each_segment(bv, current_req, iter) {
- if (page_address(bv->bv_page) + bv->bv_offset != base + size)
+ if (page_address(bv.bv_page) + bv.bv_offset != base + size)
break;
- size += bv->bv_len;
+ size += bv.bv_len;
}
return size >> 9;
@@ -2389,7 +2403,7 @@ static int transfer_size(int ssize, int max_sector, int max_size)
static void copy_buffer(int ssize, int max_sector, int max_sector_2)
{
int remaining; /* number of transferred 512-byte sectors */
- struct bio_vec *bv;
+ struct bio_vec bv;
char *buffer;
char *dma_buffer;
int size;
@@ -2427,10 +2441,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
if (!remaining)
break;
- size = bv->bv_len;
+ size = bv.bv_len;
SUPBOUND(size, remaining);
- buffer = page_address(bv->bv_page) + bv->bv_offset;
+ buffer = page_address(bv.bv_page) + bv.bv_offset;
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
@@ -3691,9 +3705,12 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (!(mode & FMODE_NDELAY)) {
if (mode & (FMODE_READ|FMODE_WRITE)) {
UDRS->last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
check_disk_change(bdev);
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
@@ -3746,17 +3763,29 @@ static unsigned int floppy_check_events(struct gendisk *disk,
* a disk in the drive, and whether that disk is writable.
*/
-static void floppy_rb0_complete(struct bio *bio, int err)
+struct rb0_cbdata {
+ int drive;
+ struct completion complete;
+};
+
+static void floppy_rb0_cb(struct bio *bio, int err)
{
- complete((struct completion *)bio->bi_private);
+ struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
+ int drive = cbdata->drive;
+
+ if (err) {
+ pr_info("floppy: error %d while reading block 0", err);
+ set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ }
+ complete(&cbdata->complete);
}
-static int __floppy_read_block_0(struct block_device *bdev)
+static int __floppy_read_block_0(struct block_device *bdev, int drive)
{
struct bio bio;
struct bio_vec bio_vec;
- struct completion complete;
struct page *page;
+ struct rb0_cbdata cbdata;
size_t size;
page = alloc_page(GFP_NOIO);
@@ -3769,23 +3798,26 @@ static int __floppy_read_block_0(struct block_device *bdev)
if (!size)
size = 1024;
+ cbdata.drive = drive;
+
bio_init(&bio);
bio.bi_io_vec = &bio_vec;
bio_vec.bv_page = page;
bio_vec.bv_len = size;
bio_vec.bv_offset = 0;
bio.bi_vcnt = 1;
- bio.bi_size = size;
+ bio.bi_iter.bi_size = size;
bio.bi_bdev = bdev;
- bio.bi_sector = 0;
+ bio.bi_iter.bi_sector = 0;
bio.bi_flags = (1 << BIO_QUIET);
- init_completion(&complete);
- bio.bi_private = &complete;
- bio.bi_end_io = floppy_rb0_complete;
+ bio.bi_private = &cbdata;
+ bio.bi_end_io = floppy_rb0_cb;
submit_bio(READ, &bio);
process_fd_request();
- wait_for_completion(&complete);
+
+ init_completion(&cbdata.complete);
+ wait_for_completion(&cbdata.complete);
__free_page(page);
@@ -3827,7 +3859,7 @@ static int floppy_revalidate(struct gendisk *disk)
UDRS->generation++;
if (drive_no_geom(drive)) {
/* auto-sensing */
- res = __floppy_read_block_0(opened_bdev[drive]);
+ res = __floppy_read_block_0(opened_bdev[drive], drive);
} else {
if (cf)
poll_drive(false, FD_RAW_NEED_DISK);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c8dac7305244..66e8c3b94ef3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
{
int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
struct page *page);
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct page *page = NULL;
- int i, ret = 0;
+ int ret = 0;
if (lo->transfer != transfer_none) {
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
@@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
do_lo_send = do_lo_send_direct_write;
}
- bio_for_each_segment(bvec, bio, i) {
- ret = do_lo_send(lo, bvec, pos, page);
+ bio_for_each_segment(bvec, bio, iter) {
+ ret = do_lo_send(lo, &bvec, pos, page);
if (ret < 0)
break;
- pos += bvec->bv_len;
+ pos += bvec.bv_len;
}
if (page) {
kunmap(page);
@@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo,
static int
lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
{
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
ssize_t s;
- int i;
- bio_for_each_segment(bvec, bio, i) {
- s = do_lo_receive(lo, bvec, bsize, pos);
+ bio_for_each_segment(bvec, bio, iter) {
+ s = do_lo_receive(lo, &bvec, bsize, pos);
if (s < 0)
return s;
- if (s != bvec->bv_len) {
+ if (s != bvec.bv_len) {
zero_fill_bio(bio);
break;
}
- pos += bvec->bv_len;
+ pos += bvec.bv_len;
}
return 0;
}
@@ -415,7 +416,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
loff_t pos;
int ret;
- pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+ pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
if (bio_rw(bio) == WRITE) {
struct file *file = lo->lo_backing_file;
@@ -444,7 +445,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
goto out;
}
ret = file->f_op->fallocate(file, mode, pos,
- bio->bi_size);
+ bio->bi_iter.bi_size);
if (unlikely(ret && ret != -EINVAL &&
ret != -EOPNOTSUPP))
ret = -EIO;
@@ -798,7 +799,7 @@ static void loop_config_discard(struct loop_device *lo)
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do support discard if
+ * image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 7bc363f1ee82..eb59b1241366 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -915,7 +915,7 @@ static int mg_probe(struct platform_device *plat_dev)
/* disk reset */
if (prv_data->dev_attr == MG_STORAGE_DEV) {
- /* If POR seq. not yet finised, wait */
+ /* If POR seq. not yet finished, wait */
err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
if (err)
goto probe_err_3b;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 050c71267f14..59c5abe32f06 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -41,10 +41,31 @@
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
-#define HW_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16))
-#define HW_CMD_TBL_AR_SZ (HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS)
-#define HW_PORT_PRIV_DMA_SZ \
- (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
+
+/* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */
+#define AHCI_RX_FIS_SZ 0x100
+#define AHCI_RX_FIS_OFFSET 0x0
+#define AHCI_IDFY_SZ ATA_SECT_SIZE
+#define AHCI_IDFY_OFFSET 0x400
+#define AHCI_SECTBUF_SZ ATA_SECT_SIZE
+#define AHCI_SECTBUF_OFFSET 0x800
+#define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
+#define AHCI_SMARTBUF_OFFSET 0xC00
+/* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */
+#define BLOCK_DMA_ALLOC_SZ 4096
+
+/* DMA region containing command table (should be 8192 bytes) */
+#define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
+#define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
+#define AHCI_CMD_TBL_OFFSET 0x0
+
+/* DMA region per command (contains header and SGL) */
+#define AHCI_CMD_TBL_HDR_SZ 0x80
+#define AHCI_CMD_TBL_HDR_OFFSET 0x0
+#define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
+#define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
+#define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
+
#define HOST_CAP_NZDMA (1 << 19)
#define HOST_HSORG 0xFC
@@ -231,38 +252,45 @@ static void mtip_async_complete(struct mtip_port *port,
void *data,
int status)
{
- struct mtip_cmd *command;
+ struct mtip_cmd *cmd;
struct driver_data *dd = data;
- int cb_status = status ? -EIO : 0;
+ int unaligned, cb_status = status ? -EIO : 0;
+ void (*func)(void *, int);
if (unlikely(!dd) || unlikely(!port))
return;
- command = &port->commands[tag];
+ cmd = &port->commands[tag];
if (unlikely(status == PORT_IRQ_TF_ERR)) {
dev_warn(&port->dd->pdev->dev,
"Command tag %d failed due to TFE\n", tag);
}
+ /* Clear the active flag */
+ atomic_set(&port->commands[tag].active, 0);
+
/* Upper layer callback */
- if (likely(command->async_callback))
- command->async_callback(command->async_data, cb_status);
+ func = cmd->async_callback;
+ if (likely(func && cmpxchg(&cmd->async_callback, func, 0) == func)) {
- command->async_callback = NULL;
- command->comp_func = NULL;
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev,
+ cmd->sg,
+ cmd->scatter_ents,
+ cmd->direction);
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&dd->pdev->dev,
- command->sg,
- command->scatter_ents,
- command->direction);
+ func(cmd->async_data, cb_status);
+ unaligned = cmd->unaligned;
- /* Clear the allocated and active bits for the command */
- atomic_set(&port->commands[tag].active, 0);
- release_slot(port, tag);
+ /* Clear the allocated bit for the command */
+ release_slot(port, tag);
- up(&port->cmd_slot);
+ if (unlikely(unaligned))
+ up(&port->cmd_slot_unal);
+ else
+ up(&port->cmd_slot);
+ }
}
/*
@@ -639,11 +667,12 @@ static void mtip_timeout_function(unsigned long int data)
{
struct mtip_port *port = (struct mtip_port *) data;
struct host_to_dev_fis *fis;
- struct mtip_cmd *command;
- int tag, cmdto_cnt = 0;
+ struct mtip_cmd *cmd;
+ int unaligned, tag, cmdto_cnt = 0;
unsigned int bit, group;
unsigned int num_command_slots;
unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
+ void (*func)(void *, int);
if (unlikely(!port))
return;
@@ -673,8 +702,8 @@ static void mtip_timeout_function(unsigned long int data)
group = tag >> 5;
bit = tag & 0x1F;
- command = &port->commands[tag];
- fis = (struct host_to_dev_fis *) command->command;
+ cmd = &port->commands[tag];
+ fis = (struct host_to_dev_fis *) cmd->command;
set_bit(tag, tagaccum);
cmdto_cnt++;
@@ -688,27 +717,30 @@ static void mtip_timeout_function(unsigned long int data)
*/
writel(1 << bit, port->completed[group]);
- /* Call the async completion callback. */
- if (likely(command->async_callback))
- command->async_callback(command->async_data,
- -EIO);
- command->async_callback = NULL;
- command->comp_func = NULL;
+ /* Clear the active flag for the command */
+ atomic_set(&port->commands[tag].active, 0);
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&port->dd->pdev->dev,
- command->sg,
- command->scatter_ents,
- command->direction);
+ func = cmd->async_callback;
+ if (func &&
+ cmpxchg(&cmd->async_callback, func, 0) == func) {
- /*
- * Clear the allocated bit and active tag for the
- * command.
- */
- atomic_set(&port->commands[tag].active, 0);
- release_slot(port, tag);
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&port->dd->pdev->dev,
+ cmd->sg,
+ cmd->scatter_ents,
+ cmd->direction);
- up(&port->cmd_slot);
+ func(cmd->async_data, -EIO);
+ unaligned = cmd->unaligned;
+
+ /* Clear the allocated bit for the command. */
+ release_slot(port, tag);
+
+ if (unaligned)
+ up(&port->cmd_slot_unal);
+ else
+ up(&port->cmd_slot);
+ }
}
}
@@ -899,8 +931,9 @@ static void mtip_handle_tfe(struct driver_data *dd)
fail_reason = "thermal shutdown";
}
if (buf[288] == 0xBF) {
+ set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
dev_info(&dd->pdev->dev,
- "Drive indicates rebuild has failed.\n");
+ "Drive indicates rebuild has failed. Secure erase required.\n");
fail_all_ncq_cmds = 1;
fail_reason = "rebuild failed";
}
@@ -1566,6 +1599,12 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
}
#endif
+ /* Check security locked state */
+ if (port->identify[128] & 0x4)
+ set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+ else
+ clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+
#ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
/* Demux ID.DRAT & ID.RZAT to determine trim support */
if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
@@ -1887,6 +1926,10 @@ static void mtip_dump_identify(struct mtip_port *port)
strlcpy(cbuf, (char *)(port->identify+27), 41);
dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
+ dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
+ port->identify[128],
+ port->identify[128] & 0x4 ? "(LOCKED)" : "");
+
if (mtip_hw_get_capacity(port->dd, &sectors))
dev_info(&port->dd->pdev->dev,
"Capacity: %llu sectors (%llu MB)\n",
@@ -3313,6 +3356,118 @@ st_out:
}
/*
+ * DMA region teardown
+ *
+ * @dd Pointer to driver_data structure
+ *
+ * return value
+ * None
+ */
+static void mtip_dma_free(struct driver_data *dd)
+{
+ int i;
+ struct mtip_port *port = dd->port;
+
+ if (port->block1)
+ dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+ port->block1, port->block1_dma);
+
+ if (port->command_list) {
+ dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
+ port->command_list, port->command_list_dma);
+ }
+
+ for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
+ if (port->commands[i].command)
+ dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+ port->commands[i].command,
+ port->commands[i].command_dma);
+ }
+}
+
+/*
+ * DMA region setup
+ *
+ * @dd Pointer to driver_data structure
+ *
+ * return value
+ * -ENOMEM Not enough free DMA region space to initialize driver
+ */
+static int mtip_dma_alloc(struct driver_data *dd)
+{
+ struct mtip_port *port = dd->port;
+ int i, rv = 0;
+ u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
+
+ /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
+ port->block1 =
+ dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+ &port->block1_dma, GFP_KERNEL);
+ if (!port->block1)
+ return -ENOMEM;
+ memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ);
+
+ /* Allocate dma memory for command list */
+ port->command_list =
+ dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
+ &port->command_list_dma, GFP_KERNEL);
+ if (!port->command_list) {
+ dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+ port->block1, port->block1_dma);
+ port->block1 = NULL;
+ port->block1_dma = 0;
+ return -ENOMEM;
+ }
+ memset(port->command_list, 0, AHCI_CMD_TBL_SZ);
+
+ /* Setup all pointers into first DMA region */
+ port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET;
+ port->rxfis_dma = port->block1_dma + AHCI_RX_FIS_OFFSET;
+ port->identify = port->block1 + AHCI_IDFY_OFFSET;
+ port->identify_dma = port->block1_dma + AHCI_IDFY_OFFSET;
+ port->log_buf = port->block1 + AHCI_SECTBUF_OFFSET;
+ port->log_buf_dma = port->block1_dma + AHCI_SECTBUF_OFFSET;
+ port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET;
+ port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
+
+ /* Setup per command SGL DMA region */
+
+ /* Point the command headers at the command tables */
+ for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
+ port->commands[i].command =
+ dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+ &port->commands[i].command_dma, GFP_KERNEL);
+ if (!port->commands[i].command) {
+ rv = -ENOMEM;
+ mtip_dma_free(dd);
+ return rv;
+ }
+ memset(port->commands[i].command, 0, CMD_DMA_ALLOC_SZ);
+
+ port->commands[i].command_header = port->command_list +
+ (sizeof(struct mtip_cmd_hdr) * i);
+ port->commands[i].command_header_dma =
+ dd->port->command_list_dma +
+ (sizeof(struct mtip_cmd_hdr) * i);
+
+ if (host_cap_64)
+ port->commands[i].command_header->ctbau =
+ __force_bit2int cpu_to_le32(
+ (port->commands[i].command_dma >> 16) >> 16);
+
+ port->commands[i].command_header->ctba =
+ __force_bit2int cpu_to_le32(
+ port->commands[i].command_dma & 0xFFFFFFFF);
+
+ sg_init_table(port->commands[i].sg, MTIP_MAX_SG);
+
+ /* Mark command as currently inactive */
+ atomic_set(&dd->port->commands[i].active, 0);
+ }
+ return 0;
+}
+
+/*
* Called once for each card.
*
* @dd Pointer to the driver data structure.
@@ -3370,83 +3525,10 @@ static int mtip_hw_init(struct driver_data *dd)
dd->port->mmio = dd->mmio + PORT_OFFSET;
dd->port->dd = dd;
- /* Allocate memory for the command list. */
- dd->port->command_list =
- dmam_alloc_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
- &dd->port->command_list_dma,
- GFP_KERNEL);
- if (!dd->port->command_list) {
- dev_err(&dd->pdev->dev,
- "Memory allocation: command list\n");
- rv = -ENOMEM;
+ /* DMA allocations */
+ rv = mtip_dma_alloc(dd);
+ if (rv < 0)
goto out1;
- }
-
- /* Clear the memory we have allocated. */
- memset(dd->port->command_list,
- 0,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4));
-
- /* Setup the addresse of the RX FIS. */
- dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ;
- dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
-
- /* Setup the address of the command tables. */
- dd->port->command_table = dd->port->rxfis + AHCI_RX_FIS_SZ;
- dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
-
- /* Setup the address of the identify data. */
- dd->port->identify = dd->port->command_table +
- HW_CMD_TBL_AR_SZ;
- dd->port->identify_dma = dd->port->command_tbl_dma +
- HW_CMD_TBL_AR_SZ;
-
- /* Setup the address of the sector buffer - for some non-ncq cmds */
- dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
- dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
-
- /* Setup the address of the log buf - for read log command */
- dd->port->log_buf = (void *)dd->port->sector_buffer + ATA_SECT_SIZE;
- dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE;
-
- /* Setup the address of the smart buf - for smart read data command */
- dd->port->smart_buf = (void *)dd->port->log_buf + ATA_SECT_SIZE;
- dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE;
-
-
- /* Point the command headers at the command tables. */
- for (i = 0; i < num_command_slots; i++) {
- dd->port->commands[i].command_header =
- dd->port->command_list +
- (sizeof(struct mtip_cmd_hdr) * i);
- dd->port->commands[i].command_header_dma =
- dd->port->command_list_dma +
- (sizeof(struct mtip_cmd_hdr) * i);
-
- dd->port->commands[i].command =
- dd->port->command_table + (HW_CMD_TBL_SZ * i);
- dd->port->commands[i].command_dma =
- dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
-
- if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
- dd->port->commands[i].command_header->ctbau =
- __force_bit2int cpu_to_le32(
- (dd->port->commands[i].command_dma >> 16) >> 16);
- dd->port->commands[i].command_header->ctba =
- __force_bit2int cpu_to_le32(
- dd->port->commands[i].command_dma & 0xFFFFFFFF);
-
- /*
- * If this is not done, a bug is reported by the stock
- * FC11 i386. Due to the fact that it has lots of kernel
- * debugging enabled.
- */
- sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
-
- /* Mark all commands as currently inactive.*/
- atomic_set(&dd->port->commands[i].active, 0);
- }
/* Setup the pointers to the extended s_active and CI registers. */
for (i = 0; i < dd->slot_groups; i++) {
@@ -3594,12 +3676,8 @@ out3:
out2:
mtip_deinit_port(dd->port);
+ mtip_dma_free(dd);
- /* Free the command/command header memory. */
- dmam_free_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
- dd->port->command_list,
- dd->port->command_list_dma);
out1:
/* Free the memory allocated for the for structure. */
kfree(dd->port);
@@ -3622,7 +3700,8 @@ static int mtip_hw_exit(struct driver_data *dd)
* saves its state.
*/
if (!dd->sr) {
- if (!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
+ if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
+ !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
if (mtip_standby_immediate(dd->port))
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
@@ -3641,11 +3720,9 @@ static int mtip_hw_exit(struct driver_data *dd)
irq_set_affinity_hint(dd->pdev->irq, NULL);
devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
- /* Free the command/command header memory. */
- dmam_free_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
- dd->port->command_list,
- dd->port->command_list_dma);
+ /* Free dma regions */
+ mtip_dma_free(dd);
+
/* Free the memory allocated for the for structure. */
kfree(dd->port);
dd->port = NULL;
@@ -3962,8 +4039,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
{
struct driver_data *dd = queue->queuedata;
struct scatterlist *sg;
- struct bio_vec *bvec;
- int i, nents = 0;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ int nents = 0;
int tag = 0, unaligned = 0;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3993,7 +4071,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
- bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
+ bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
bio_sectors(bio)));
return;
}
@@ -4006,7 +4084,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
dd->unal_qdepth) {
- if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+ if (bio->bi_iter.bi_sector % 8 != 0)
+ /* Unaligned on 4k boundaries */
unaligned = 1;
else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
unaligned = 1;
@@ -4025,17 +4104,17 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
/* Create the scatter list for this bio. */
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
sg_set_page(&sg[nents],
- bvec->bv_page,
- bvec->bv_len,
- bvec->bv_offset);
+ bvec.bv_page,
+ bvec.bv_len,
+ bvec.bv_offset);
nents++;
}
/* Issue the read/write. */
mtip_hw_submit_io(dd,
- bio->bi_sector,
+ bio->bi_iter.bi_sector,
bio_sectors(bio),
nents,
tag,
@@ -4145,6 +4224,7 @@ skip_create_disk:
blk_queue_max_hw_sectors(dd->queue, 0xffff);
blk_queue_max_segment_size(dd->queue, 0x400000);
blk_queue_io_min(dd->queue, 4096);
+ blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
/*
* write back cache is not supported in the device. FUA depends on
@@ -4430,7 +4510,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
}
dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
- cpu_to_node(smp_processor_id()), smp_processor_id());
+ cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
if (dd == NULL) {
@@ -4547,7 +4627,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
if (rv) {
dev_warn(&pdev->dev,
"Unable to enable MSI interrupt.\n");
- goto block_initialize_err;
+ goto msi_initialize_err;
}
/* Initialize the block layer. */
@@ -4577,6 +4657,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
block_initialize_err:
pci_disable_msi(pdev);
+
+msi_initialize_err:
if (dd->isr_workq) {
flush_workqueue(dd->isr_workq);
destroy_workqueue(dd->isr_workq);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 9be7a1582ad3..ffb955e7ccb9 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -53,7 +53,7 @@
#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
/* unaligned IO handling */
-#define MTIP_MAX_UNALIGNED_SLOTS 8
+#define MTIP_MAX_UNALIGNED_SLOTS 2
/* Macro to extract the tag bit number from a tag value. */
#define MTIP_TAG_BIT(tag) (tag & 0x1F)
@@ -69,7 +69,7 @@
* Maximum number of scatter gather entries
* a single command may have.
*/
-#define MTIP_MAX_SG 128
+#define MTIP_MAX_SG 504
/*
* Maximum number of slot groups (Command Issue & s_active registers)
@@ -92,7 +92,7 @@
/* Driver name and version strings */
#define MTIP_DRV_NAME "mtip32xx"
-#define MTIP_DRV_VERSION "1.2.6os3"
+#define MTIP_DRV_VERSION "1.3.1"
/* Maximum number of minor device numbers per device. */
#define MTIP_MAX_MINORS 16
@@ -391,15 +391,13 @@ struct mtip_port {
*/
dma_addr_t rxfis_dma;
/*
- * Pointer to the beginning of the command table memory as used
- * by the driver.
+ * Pointer to the DMA region for RX Fis, Identify, RLE10, and SMART
*/
- void *command_table;
+ void *block1;
/*
- * Pointer to the beginning of the command table memory as used
- * by the DMA.
+ * DMA address of region for RX Fis, Identify, RLE10, and SMART
*/
- dma_addr_t command_tbl_dma;
+ dma_addr_t block1_dma;
/*
* Pointer to the beginning of the identify data memory as used
* by the driver.
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2dc3b5153f0d..55298db36b2d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,18 +271,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
if (nbd_cmd(req) == NBD_CMD_WRITE) {
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not...
*/
rq_for_each_segment(bvec, req, iter) {
flags = 0;
- if (!rq_iter_last(req, iter))
+ if (!rq_iter_last(bvec, iter))
flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
- nbd->disk->disk_name, req, bvec->bv_len);
- result = sock_send_bvec(nbd, bvec, flags);
+ nbd->disk->disk_name, req, bvec.bv_len);
+ result = sock_send_bvec(nbd, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
nbd->disk->disk_name, req);
if (nbd_cmd(req) == NBD_CMD_READ) {
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
- result = sock_recv_bvec(nbd, bvec);
+ result = sock_recv_bvec(nbd, &bvec);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
return req;
}
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
- nbd->disk->disk_name, req, bvec->bv_len);
+ nbd->disk->disk_name, req, bvec.bv_len);
}
}
return req;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 83a598ebb65a..091b9ea14feb 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -60,7 +60,9 @@ enum {
NULL_IRQ_NONE = 0,
NULL_IRQ_SOFTIRQ = 1,
NULL_IRQ_TIMER = 2,
+};
+enum {
NULL_Q_BIO = 0,
NULL_Q_RQ = 1,
NULL_Q_MQ = 2,
@@ -172,18 +174,20 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
- if (cmd->rq) {
- if (queue_mode == NULL_Q_MQ)
- blk_mq_end_io(cmd->rq, 0);
- else {
- INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, 0);
- }
- } else if (cmd->bio)
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_end_io(cmd->rq, 0);
+ return;
+ case NULL_Q_RQ:
+ INIT_LIST_HEAD(&cmd->rq->queuelist);
+ blk_end_request_all(cmd->rq, 0);
+ break;
+ case NULL_Q_BIO:
bio_endio(cmd->bio, 0);
+ break;
+ }
- if (queue_mode != NULL_Q_MQ)
- free_cmd(cmd);
+ free_cmd(cmd);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -195,6 +199,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
cq = &per_cpu(completion_queues, smp_processor_id());
while ((entry = llist_del_all(&cq->list)) != NULL) {
+ entry = llist_reverse_order(entry);
do {
cmd = container_of(entry, struct nullb_cmd, ll_list);
end_cmd(cmd);
@@ -221,61 +226,31 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
static void null_softirq_done_fn(struct request *rq)
{
- blk_end_request_all(rq, 0);
-}
-
-#ifdef CONFIG_SMP
-
-static void null_ipi_cmd_end_io(void *data)
-{
- struct completion_queue *cq;
- struct llist_node *entry, *next;
- struct nullb_cmd *cmd;
-
- cq = &per_cpu(completion_queues, smp_processor_id());
-
- entry = llist_del_all(&cq->list);
-
- while (entry) {
- next = entry->next;
- cmd = llist_entry(entry, struct nullb_cmd, ll_list);
- end_cmd(cmd);
- entry = next;
- }
+ end_cmd(rq->special);
}
-static void null_cmd_end_ipi(struct nullb_cmd *cmd)
-{
- struct call_single_data *data = &cmd->csd;
- int cpu = get_cpu();
- struct completion_queue *cq = &per_cpu(completion_queues, cpu);
-
- cmd->ll_list.next = NULL;
-
- if (llist_add(&cmd->ll_list, &cq->list)) {
- data->func = null_ipi_cmd_end_io;
- data->flags = 0;
- __smp_call_function_single(cpu, data, 0);
- }
-
- put_cpu();
-}
-
-#endif /* CONFIG_SMP */
-
static inline void null_handle_cmd(struct nullb_cmd *cmd)
{
/* Complete IO by inline, softirq or timer */
switch (irqmode) {
- case NULL_IRQ_NONE:
- end_cmd(cmd);
- break;
case NULL_IRQ_SOFTIRQ:
-#ifdef CONFIG_SMP
- null_cmd_end_ipi(cmd);
-#else
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_complete_request(cmd->rq);
+ break;
+ case NULL_Q_RQ:
+ blk_complete_request(cmd->rq);
+ break;
+ case NULL_Q_BIO:
+ /*
+ * XXX: no proper submitting cpu information available.
+ */
+ end_cmd(cmd);
+ break;
+ }
+ break;
+ case NULL_IRQ_NONE:
end_cmd(cmd);
-#endif
break;
case NULL_IRQ_TIMER:
null_cmd_end_timer(cmd);
@@ -411,6 +386,7 @@ static struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
+ .complete = null_softirq_done_fn,
};
static struct blk_mq_reg null_mq_reg = {
@@ -609,13 +585,11 @@ static int __init null_init(void)
{
unsigned int i;
-#if !defined(CONFIG_SMP)
- if (irqmode == NULL_IRQ_SOFTIRQ) {
- pr_warn("null_blk: softirq completions not available.\n");
- pr_warn("null_blk: using direct completions.\n");
- irqmode = NULL_IRQ_NONE;
+ if (bs > PAGE_SIZE) {
+ pr_warn("null_blk: invalid block size\n");
+ pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
+ bs = PAGE_SIZE;
}
-#endif
if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
if (submit_queues < nr_online_nodes) {
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 26d03fa0bf26..da085ff10d25 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -46,7 +46,6 @@
#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-#define NVME_MINORS 64
#define ADMIN_TIMEOUT (60 * HZ)
static int nvme_major;
@@ -58,6 +57,17 @@ module_param(use_threaded_interrupts, int, 0);
static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
+static struct workqueue_struct *nvme_workq;
+
+static void nvme_reset_failed_dev(struct work_struct *ws);
+
+struct async_cmd_info {
+ struct kthread_work work;
+ struct kthread_worker *worker;
+ u32 result;
+ int status;
+ void *ctx;
+};
/*
* An NVM Express queue. Each device has at least two (one for admin
@@ -66,6 +76,7 @@ static struct task_struct *nvme_thread;
struct nvme_queue {
struct device *q_dmadev;
struct nvme_dev *dev;
+ char irqname[24]; /* nvme4294967295-65535\0 */
spinlock_t q_lock;
struct nvme_command *sq_cmds;
volatile struct nvme_completion *cqes;
@@ -80,9 +91,11 @@ struct nvme_queue {
u16 sq_head;
u16 sq_tail;
u16 cq_head;
+ u16 qid;
u8 cq_phase;
u8 cqe_seen;
u8 q_suspended;
+ struct async_cmd_info cmdinfo;
unsigned long cmdid_data[];
};
@@ -97,6 +110,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@ -111,6 +125,7 @@ struct nvme_cmd_info {
nvme_completion_fn fn;
void *ctx;
unsigned long timeout;
+ int aborted;
};
static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
@@ -154,6 +169,7 @@ static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
info[cmdid].fn = handler;
info[cmdid].ctx = ctx;
info[cmdid].timeout = jiffies + timeout;
+ info[cmdid].aborted = 0;
return cmdid;
}
@@ -172,6 +188,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
+#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
static void special_completion(struct nvme_dev *dev, void *ctx,
struct nvme_completion *cqe)
@@ -180,6 +197,10 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
return;
if (ctx == CMD_CTX_FLUSH)
return;
+ if (ctx == CMD_CTX_ABORT) {
+ ++dev->abort_limit;
+ return;
+ }
if (ctx == CMD_CTX_COMPLETED) {
dev_warn(&dev->pci_dev->dev,
"completed id %d twice on queue %d\n",
@@ -196,6 +217,15 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
}
+static void async_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct async_cmd_info *cmdinfo = ctx;
+ cmdinfo->result = le32_to_cpup(&cqe->result);
+ cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+ queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
+}
+
/*
* Called with local interrupts disabled and the q_lock held. May not sleep.
*/
@@ -441,104 +471,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
return total_len;
}
-struct nvme_bio_pair {
- struct bio b1, b2, *parent;
- struct bio_vec *bv1, *bv2;
- int err;
- atomic_t cnt;
-};
-
-static void nvme_bio_pair_endio(struct bio *bio, int err)
-{
- struct nvme_bio_pair *bp = bio->bi_private;
-
- if (err)
- bp->err = err;
-
- if (atomic_dec_and_test(&bp->cnt)) {
- bio_endio(bp->parent, bp->err);
- kfree(bp->bv1);
- kfree(bp->bv2);
- kfree(bp);
- }
-}
-
-static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
- int len, int offset)
-{
- struct nvme_bio_pair *bp;
-
- BUG_ON(len > bio->bi_size);
- BUG_ON(idx > bio->bi_vcnt);
-
- bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
- if (!bp)
- return NULL;
- bp->err = 0;
-
- bp->b1 = *bio;
- bp->b2 = *bio;
-
- bp->b1.bi_size = len;
- bp->b2.bi_size -= len;
- bp->b1.bi_vcnt = idx;
- bp->b2.bi_idx = idx;
- bp->b2.bi_sector += len >> 9;
-
- if (offset) {
- bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
- GFP_ATOMIC);
- if (!bp->bv1)
- goto split_fail_1;
-
- bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
- GFP_ATOMIC);
- if (!bp->bv2)
- goto split_fail_2;
-
- memcpy(bp->bv1, bio->bi_io_vec,
- bio->bi_max_vecs * sizeof(struct bio_vec));
- memcpy(bp->bv2, bio->bi_io_vec,
- bio->bi_max_vecs * sizeof(struct bio_vec));
-
- bp->b1.bi_io_vec = bp->bv1;
- bp->b2.bi_io_vec = bp->bv2;
- bp->b2.bi_io_vec[idx].bv_offset += offset;
- bp->b2.bi_io_vec[idx].bv_len -= offset;
- bp->b1.bi_io_vec[idx].bv_len = offset;
- bp->b1.bi_vcnt++;
- } else
- bp->bv1 = bp->bv2 = NULL;
-
- bp->b1.bi_private = bp;
- bp->b2.bi_private = bp;
-
- bp->b1.bi_end_io = nvme_bio_pair_endio;
- bp->b2.bi_end_io = nvme_bio_pair_endio;
-
- bp->parent = bio;
- atomic_set(&bp->cnt, 2);
-
- return bp;
-
- split_fail_2:
- kfree(bp->bv1);
- split_fail_1:
- kfree(bp);
- return NULL;
-}
-
static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
- int idx, int len, int offset)
+ int len)
{
- struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
- if (!bp)
+ struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
+ if (!split)
return -ENOMEM;
+ bio_chain(split, bio);
+
if (bio_list_empty(&nvmeq->sq_cong))
add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, &bp->b1);
- bio_list_add(&nvmeq->sq_cong, &bp->b2);
+ bio_list_add(&nvmeq->sq_cong, split);
+ bio_list_add(&nvmeq->sq_cong, bio);
return 0;
}
@@ -550,41 +495,44 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
- struct bio_vec *bvec, *bvprv = NULL;
+ struct bio_vec bvec, bvprv;
+ struct bvec_iter iter;
struct scatterlist *sg = NULL;
- int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+ int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
+ int first = 1;
if (nvmeq->dev->stripe_size)
split_len = nvmeq->dev->stripe_size -
- ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
+ ((bio->bi_iter.bi_sector << 9) &
+ (nvmeq->dev->stripe_size - 1));
sg_init_table(iod->sg, psegs);
- bio_for_each_segment(bvec, bio, i) {
- if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
- sg->length += bvec->bv_len;
+ bio_for_each_segment(bvec, bio, iter) {
+ if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
+ sg->length += bvec.bv_len;
} else {
- if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
- return nvme_split_and_submit(bio, nvmeq, i,
- length, 0);
+ if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
+ return nvme_split_and_submit(bio, nvmeq,
+ length);
sg = sg ? sg + 1 : iod->sg;
- sg_set_page(sg, bvec->bv_page, bvec->bv_len,
- bvec->bv_offset);
+ sg_set_page(sg, bvec.bv_page,
+ bvec.bv_len, bvec.bv_offset);
nsegs++;
}
- if (split_len - length < bvec->bv_len)
- return nvme_split_and_submit(bio, nvmeq, i, split_len,
- split_len - length);
- length += bvec->bv_len;
+ if (split_len - length < bvec.bv_len)
+ return nvme_split_and_submit(bio, nvmeq, split_len);
+ length += bvec.bv_len;
bvprv = bvec;
+ first = 0;
}
iod->nents = nsegs;
sg_mark_end(sg);
if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
return -ENOMEM;
- BUG_ON(length != bio->bi_size);
+ BUG_ON(length != bio->bi_iter.bi_size);
return length;
}
@@ -608,8 +556,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
iod->npages = 0;
range->cattr = cpu_to_le32(0);
- range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
- range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+ range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
+ range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +622,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
}
result = -ENOMEM;
- iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+ iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
if (!iod)
goto nomem;
iod->private = bio;
@@ -723,7 +671,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
GFP_ATOMIC);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+ cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
@@ -775,7 +723,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return 0;
- writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+ writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
nvmeq->cq_head = head;
nvmeq->cq_phase = phase;
@@ -886,12 +834,34 @@ int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
return cmdinfo.status;
}
+static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd,
+ struct async_cmd_info *cmdinfo, unsigned timeout)
+{
+ int cmdid;
+
+ cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
+ if (cmdid < 0)
+ return cmdid;
+ cmdinfo->status = -EINTR;
+ cmd->common.command_id = cmdid;
+ nvme_submit_cmd(nvmeq, cmd);
+ return 0;
+}
+
int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
}
+static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
+ struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
+{
+ return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+ ADMIN_TIMEOUT);
+}
+
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
int status;
@@ -1002,6 +972,56 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
}
/**
+ * nvme_abort_cmd - Attempt aborting a command
+ * @cmdid: Command id of a timed out IO
+ * @queue: The queue with timed out IO
+ *
+ * Schedule controller reset if the command was already aborted once before and
+ * still hasn't been returned to the driver, or if this is the admin queue.
+ */
+static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
+{
+ int a_cmdid;
+ struct nvme_command cmd;
+ struct nvme_dev *dev = nvmeq->dev;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+ if (!nvmeq->qid || info[cmdid].aborted) {
+ if (work_busy(&dev->reset_work))
+ return;
+ list_del_init(&dev->node);
+ dev_warn(&dev->pci_dev->dev,
+ "I/O %d QID %d timeout, reset controller\n", cmdid,
+ nvmeq->qid);
+ dev->reset_workfn = nvme_reset_failed_dev;
+ queue_work(nvme_workq, &dev->reset_work);
+ return;
+ }
+
+ if (!dev->abort_limit)
+ return;
+
+ a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+ ADMIN_TIMEOUT);
+ if (a_cmdid < 0)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abort.opcode = nvme_admin_abort_cmd;
+ cmd.abort.cid = cmdid;
+ cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
+ cmd.abort.command_id = a_cmdid;
+
+ --dev->abort_limit;
+ info[cmdid].aborted = 1;
+ info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;
+
+ dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
+ nvmeq->qid);
+ nvme_submit_cmd(dev->queues[0], &cmd);
+}
+
+/**
* nvme_cancel_ios - Cancel outstanding I/Os
* @queue: The queue to cancel I/Os on
* @timeout: True to only cancel I/Os which have timed out
@@ -1024,7 +1044,12 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
continue;
if (info[cmdid].ctx == CMD_CTX_CANCELLED)
continue;
- dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
+ if (timeout && nvmeq->dev->initialized) {
+ nvme_abort_cmd(cmdid, nvmeq);
+ continue;
+ }
+ dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
+ nvmeq->qid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq->dev, ctx, &cqe);
}
@@ -1046,26 +1071,31 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
kfree(nvmeq);
}
-static void nvme_free_queues(struct nvme_dev *dev)
+static void nvme_free_queues(struct nvme_dev *dev, int lowest)
{
int i;
- for (i = dev->queue_count - 1; i >= 0; i--) {
+ for (i = dev->queue_count - 1; i >= lowest; i--) {
nvme_free_queue(dev->queues[i]);
dev->queue_count--;
dev->queues[i] = NULL;
}
}
-static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+/**
+ * nvme_suspend_queue - put queue into suspended state
+ * @nvmeq - queue to suspend
+ *
+ * Returns 1 if already suspended, 0 otherwise.
+ */
+static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
- struct nvme_queue *nvmeq = dev->queues[qid];
- int vector = dev->entry[nvmeq->cq_vector].vector;
+ int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
spin_lock_irq(&nvmeq->q_lock);
if (nvmeq->q_suspended) {
spin_unlock_irq(&nvmeq->q_lock);
- return;
+ return 1;
}
nvmeq->q_suspended = 1;
spin_unlock_irq(&nvmeq->q_lock);
@@ -1073,18 +1103,35 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
- /* Don't tell the adapter to delete the admin queue */
- if (qid) {
- adapter_delete_sq(dev, qid);
- adapter_delete_cq(dev, qid);
- }
+ return 0;
+}
+static void nvme_clear_queue(struct nvme_queue *nvmeq)
+{
spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq);
nvme_cancel_ios(nvmeq, false);
spin_unlock_irq(&nvmeq->q_lock);
}
+static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+{
+ struct nvme_queue *nvmeq = dev->queues[qid];
+
+ if (!nvmeq)
+ return;
+ if (nvme_suspend_queue(nvmeq))
+ return;
+
+ /* Don't tell the adapter to delete the admin queue.
+ * Don't tell a removed adapter to delete IO queues. */
+ if (qid && readl(&dev->bar->csts) != -1) {
+ adapter_delete_sq(dev, qid);
+ adapter_delete_cq(dev, qid);
+ }
+ nvme_clear_queue(nvmeq);
+}
+
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int vector)
{
@@ -1107,15 +1154,18 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_dmadev = dmadev;
nvmeq->dev = dev;
+ snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
+ dev->instance, qid);
spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
init_waitqueue_head(&nvmeq->sq_full);
init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
bio_list_init(&nvmeq->sq_cong);
- nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
+ nvmeq->qid = qid;
nvmeq->q_suspended = 1;
dev->queue_count++;
@@ -1134,11 +1184,10 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
{
if (use_threaded_interrupts)
return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
- nvme_irq_check, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED,
+ nvme_irq_check, nvme_irq, IRQF_SHARED,
name, nvmeq);
return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+ IRQF_SHARED, name, nvmeq);
}
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1149,7 +1198,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
nvmeq->sq_tail = 0;
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
- nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
memset(nvmeq->cmdid_data, 0, extra);
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
nvme_cancel_ios(nvmeq, false);
@@ -1169,13 +1218,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
if (result < 0)
goto release_cq;
- result = queue_request_irq(dev, nvmeq, "nvme");
+ result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result < 0)
goto release_sq;
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq, qid);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
return result;
@@ -1287,13 +1336,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
return result;
- result = queue_request_irq(dev, nvmeq, "nvme admin");
+ result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result)
return result;
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq, 0);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
return result;
}
@@ -1569,10 +1618,47 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
}
}
+#ifdef CONFIG_COMPAT
+static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case SG_IO:
+ return nvme_sg_io32(ns, arg);
+ }
+ return nvme_ioctl(bdev, mode, cmd, arg);
+}
+#else
+#define nvme_compat_ioctl NULL
+#endif
+
+static int nvme_open(struct block_device *bdev, fmode_t mode)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+ struct nvme_dev *dev = ns->dev;
+
+ kref_get(&dev->kref);
+ return 0;
+}
+
+static void nvme_free_dev(struct kref *kref);
+
+static void nvme_release(struct gendisk *disk, fmode_t mode)
+{
+ struct nvme_ns *ns = disk->private_data;
+ struct nvme_dev *dev = ns->dev;
+
+ kref_put(&dev->kref, nvme_free_dev);
+}
+
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_compat_ioctl,
+ .open = nvme_open,
+ .release = nvme_release,
};
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
@@ -1596,13 +1682,24 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
static int nvme_kthread(void *data)
{
- struct nvme_dev *dev;
+ struct nvme_dev *dev, *next;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&dev_list_lock);
- list_for_each_entry(dev, &dev_list, node) {
+ list_for_each_entry_safe(dev, next, &dev_list, node) {
int i;
+ if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
+ dev->initialized) {
+ if (work_busy(&dev->reset_work))
+ continue;
+ list_del_init(&dev->node);
+ dev_warn(&dev->pci_dev->dev,
+ "Failed status, reset controller\n");
+ dev->reset_workfn = nvme_reset_failed_dev;
+ queue_work(nvme_workq, &dev->reset_work);
+ continue;
+ }
for (i = 0; i < dev->queue_count; i++) {
struct nvme_queue *nvmeq = dev->queues[i];
if (!nvmeq)
@@ -1623,33 +1720,6 @@ static int nvme_kthread(void *data)
return 0;
}
-static DEFINE_IDA(nvme_index_ida);
-
-static int nvme_get_ns_idx(void)
-{
- int index, error;
-
- do {
- if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
- return -1;
-
- spin_lock(&dev_list_lock);
- error = ida_get_new(&nvme_index_ida, &index);
- spin_unlock(&dev_list_lock);
- } while (error == -EAGAIN);
-
- if (error)
- index = -1;
- return index;
-}
-
-static void nvme_put_ns_idx(int index)
-{
- spin_lock(&dev_list_lock);
- ida_remove(&nvme_index_ida, index);
- spin_unlock(&dev_list_lock);
-}
-
static void nvme_config_discard(struct nvme_ns *ns)
{
u32 logical_block_size = queue_logical_block_size(ns->queue);
@@ -1683,7 +1753,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
ns->dev = dev;
ns->queue->queuedata = ns;
- disk = alloc_disk(NVME_MINORS);
+ disk = alloc_disk(0);
if (!disk)
goto out_free_queue;
ns->ns_id = nsid;
@@ -1696,12 +1766,12 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
disk->major = nvme_major;
- disk->minors = NVME_MINORS;
- disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+ disk->first_minor = 0;
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
disk->driverfs_dev = &dev->pci_dev->dev;
+ disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
@@ -1717,15 +1787,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
return NULL;
}
-static void nvme_ns_free(struct nvme_ns *ns)
-{
- int index = ns->disk->first_minor / NVME_MINORS;
- put_disk(ns->disk);
- nvme_put_ns_idx(index);
- blk_cleanup_queue(ns->queue);
- kfree(ns);
-}
-
static int set_queue_count(struct nvme_dev *dev, int count)
{
int status;
@@ -1741,11 +1802,12 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
- return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+ return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
}
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
+ struct nvme_queue *adminq = dev->queues[0];
struct pci_dev *pdev = dev->pci_dev;
int result, cpu, i, vecs, nr_io_queues, size, q_depth;
@@ -1772,33 +1834,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
/* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, dev->queues[0]);
+ free_irq(dev->entry[0].vector, adminq);
- vecs = nr_io_queues;
- for (i = 0; i < vecs; i++)
+ for (i = 0; i < nr_io_queues; i++)
dev->entry[i].entry = i;
- for (;;) {
- result = pci_enable_msix(pdev, dev->entry, vecs);
- if (result <= 0)
- break;
- vecs = result;
- }
-
- if (result < 0) {
- vecs = nr_io_queues;
- if (vecs > 32)
- vecs = 32;
- for (;;) {
- result = pci_enable_msi_block(pdev, vecs);
- if (result == 0) {
- for (i = 0; i < vecs; i++)
- dev->entry[i].vector = i + pdev->irq;
- break;
- } else if (result < 0) {
- vecs = 1;
- break;
- }
- vecs = result;
+ vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
+ if (vecs < 0) {
+ vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32));
+ if (vecs < 0) {
+ vecs = 1;
+ } else {
+ for (i = 0; i < vecs; i++)
+ dev->entry[i].vector = i + pdev->irq;
}
}
@@ -1810,9 +1857,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
nr_io_queues = vecs;
- result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ result = queue_request_irq(dev, adminq, adminq->irqname);
if (result) {
- dev->queues[0]->q_suspended = 1;
+ adminq->q_suspended = 1;
goto free_queues;
}
@@ -1821,9 +1868,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_cancel_ios(nvmeq, false);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
nvme_free_queue(nvmeq);
dev->queue_count--;
@@ -1864,7 +1911,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
return 0;
free_queues:
- nvme_free_queues(dev);
+ nvme_free_queues(dev, 1);
return result;
}
@@ -1876,6 +1923,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
+ struct pci_dev *pdev = dev->pci_dev;
int res;
unsigned nn, i;
struct nvme_ns *ns;
@@ -1885,8 +1933,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
dma_addr_t dma_addr;
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
- mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
- GFP_KERNEL);
+ mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
if (!mem)
return -ENOMEM;
@@ -1899,13 +1946,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
ctrl = mem;
nn = le32_to_cpup(&ctrl->nn);
dev->oncs = le16_to_cpup(&ctrl->oncs);
+ dev->abort_limit = ctrl->acl + 1;
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
if (ctrl->mdts)
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
- if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
- (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+ if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+ (pdev->device == 0x0953) && ctrl->vs[3])
dev->stripe_size = 1 << (ctrl->vs[3] + shift);
id_ns = mem;
@@ -1953,16 +2001,21 @@ static int nvme_dev_map(struct nvme_dev *dev)
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
goto disable;
- pci_set_drvdata(pdev, dev);
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
if (!dev->bar)
goto disable;
-
- dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
+ if (readl(&dev->bar->csts) == -1) {
+ result = -ENODEV;
+ goto unmap;
+ }
+ dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
dev->dbs = ((void __iomem *)dev->bar) + 4096;
return 0;
+ unmap:
+ iounmap(dev->bar);
+ dev->bar = NULL;
disable:
pci_release_regions(pdev);
disable_pci:
@@ -1980,37 +2033,183 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
if (dev->bar) {
iounmap(dev->bar);
dev->bar = NULL;
+ pci_release_regions(dev->pci_dev);
}
- pci_release_regions(dev->pci_dev);
if (pci_is_enabled(dev->pci_dev))
pci_disable_device(dev->pci_dev);
}
+struct nvme_delq_ctx {
+ struct task_struct *waiter;
+ struct kthread_worker *worker;
+ atomic_t refcount;
+};
+
+static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
+{
+ dq->waiter = current;
+ mb();
+
+ for (;;) {
+ set_current_state(TASK_KILLABLE);
+ if (!atomic_read(&dq->refcount))
+ break;
+ if (!schedule_timeout(ADMIN_TIMEOUT) ||
+ fatal_signal_pending(current)) {
+ set_current_state(TASK_RUNNING);
+
+ nvme_disable_ctrl(dev, readq(&dev->bar->cap));
+ nvme_disable_queue(dev, 0);
+
+ send_sig(SIGKILL, dq->worker->task, 1);
+ flush_kthread_worker(dq->worker);
+ return;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+}
+
+static void nvme_put_dq(struct nvme_delq_ctx *dq)
+{
+ atomic_dec(&dq->refcount);
+ if (dq->waiter)
+ wake_up_process(dq->waiter);
+}
+
+static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
+{
+ atomic_inc(&dq->refcount);
+ return dq;
+}
+
+static void nvme_del_queue_end(struct nvme_queue *nvmeq)
+{
+ struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
+
+ nvme_clear_queue(nvmeq);
+ nvme_put_dq(dq);
+}
+
+static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
+ kthread_work_func_t fn)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.delete_queue.opcode = opcode;
+ c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
+
+ init_kthread_work(&nvmeq->cmdinfo.work, fn);
+ return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
+}
+
+static void nvme_del_cq_work_handler(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_cq(struct nvme_queue *nvmeq)
+{
+ return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
+ nvme_del_cq_work_handler);
+}
+
+static void nvme_del_sq_work_handler(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ int status = nvmeq->cmdinfo.status;
+
+ if (!status)
+ status = nvme_delete_cq(nvmeq);
+ if (status)
+ nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_sq(struct nvme_queue *nvmeq)
+{
+ return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
+ nvme_del_sq_work_handler);
+}
+
+static void nvme_del_queue_start(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ allow_signal(SIGKILL);
+ if (nvme_delete_sq(nvmeq))
+ nvme_del_queue_end(nvmeq);
+}
+
+static void nvme_disable_io_queues(struct nvme_dev *dev)
+{
+ int i;
+ DEFINE_KTHREAD_WORKER_ONSTACK(worker);
+ struct nvme_delq_ctx dq;
+ struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
+ &worker, "nvme%d", dev->instance);
+
+ if (IS_ERR(kworker_task)) {
+ dev_err(&dev->pci_dev->dev,
+ "Failed to create queue del task\n");
+ for (i = dev->queue_count - 1; i > 0; i--)
+ nvme_disable_queue(dev, i);
+ return;
+ }
+
+ dq.waiter = NULL;
+ atomic_set(&dq.refcount, 0);
+ dq.worker = &worker;
+ for (i = dev->queue_count - 1; i > 0; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+
+ if (nvme_suspend_queue(nvmeq))
+ continue;
+ nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
+ nvmeq->cmdinfo.worker = dq.worker;
+ init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
+ queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
+ }
+ nvme_wait_dq(&dq, dev);
+ kthread_stop(kworker_task);
+}
+
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
- for (i = dev->queue_count - 1; i >= 0; i--)
- nvme_disable_queue(dev, i);
+ dev->initialized = 0;
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
- if (dev->bar)
+ if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
+ for (i = dev->queue_count - 1; i >= 0; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+ nvme_suspend_queue(nvmeq);
+ nvme_clear_queue(nvmeq);
+ }
+ } else {
+ nvme_disable_io_queues(dev);
nvme_shutdown_ctrl(dev);
+ nvme_disable_queue(dev, 0);
+ }
nvme_dev_unmap(dev);
}
static void nvme_dev_remove(struct nvme_dev *dev)
{
- struct nvme_ns *ns, *next;
+ struct nvme_ns *ns;
- list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
- list_del(&ns->list);
- del_gendisk(ns->disk);
- nvme_ns_free(ns);
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ if (ns->disk->flags & GENHD_FL_UP)
+ del_gendisk(ns->disk);
+ if (!blk_queue_dying(ns->queue))
+ blk_cleanup_queue(ns->queue);
}
}
@@ -2067,14 +2266,22 @@ static void nvme_release_instance(struct nvme_dev *dev)
spin_unlock(&dev_list_lock);
}
+static void nvme_free_namespaces(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns, *next;
+
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ put_disk(ns->disk);
+ kfree(ns);
+ }
+}
+
static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
- nvme_dev_remove(dev);
- nvme_dev_shutdown(dev);
- nvme_free_queues(dev);
- nvme_release_instance(dev);
- nvme_release_prp_pools(dev);
+
+ nvme_free_namespaces(dev);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2138,6 +2345,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
return result;
disable:
+ nvme_disable_queue(dev, 0);
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
@@ -2146,6 +2354,77 @@ static int nvme_dev_start(struct nvme_dev *dev)
return result;
}
+static int nvme_remove_dead_ctrl(void *arg)
+{
+ struct nvme_dev *dev = (struct nvme_dev *)arg;
+ struct pci_dev *pdev = dev->pci_dev;
+
+ if (pci_get_drvdata(pdev))
+ pci_stop_and_remove_bus_device(pdev);
+ kref_put(&dev->kref, nvme_free_dev);
+ return 0;
+}
+
+static void nvme_remove_disks(struct work_struct *ws)
+{
+ int i;
+ struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+
+ nvme_dev_remove(dev);
+ spin_lock(&dev_list_lock);
+ for (i = dev->queue_count - 1; i > 0; i--) {
+ BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
+ nvme_free_queue(dev->queues[i]);
+ dev->queue_count--;
+ dev->queues[i] = NULL;
+ }
+ spin_unlock(&dev_list_lock);
+}
+
+static int nvme_dev_resume(struct nvme_dev *dev)
+{
+ int ret;
+
+ ret = nvme_dev_start(dev);
+ if (ret && ret != -EBUSY)
+ return ret;
+ if (ret == -EBUSY) {
+ spin_lock(&dev_list_lock);
+ dev->reset_workfn = nvme_remove_disks;
+ queue_work(nvme_workq, &dev->reset_work);
+ spin_unlock(&dev_list_lock);
+ }
+ dev->initialized = 1;
+ return 0;
+}
+
+static void nvme_dev_reset(struct nvme_dev *dev)
+{
+ nvme_dev_shutdown(dev);
+ if (nvme_dev_resume(dev)) {
+ dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
+ kref_get(&dev->kref);
+ if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
+ dev->instance))) {
+ dev_err(&dev->pci_dev->dev,
+ "Failed to start controller remove task\n");
+ kref_put(&dev->kref, nvme_free_dev);
+ }
+ }
+}
+
+static void nvme_reset_failed_dev(struct work_struct *ws)
+{
+ struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+ nvme_dev_reset(dev);
+}
+
+static void nvme_reset_workfn(struct work_struct *work)
+{
+ struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
+ dev->reset_workfn(work);
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int result = -ENOMEM;
@@ -2164,8 +2443,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto free;
INIT_LIST_HEAD(&dev->namespaces);
+ dev->reset_workfn = nvme_reset_failed_dev;
+ INIT_WORK(&dev->reset_work, nvme_reset_workfn);
dev->pci_dev = pdev;
-
+ pci_set_drvdata(pdev, dev);
result = nvme_set_instance(dev);
if (result)
goto free;
@@ -2181,6 +2462,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto release_pools;
}
+ kref_init(&dev->kref);
result = nvme_dev_add(dev);
if (result)
goto shutdown;
@@ -2195,15 +2477,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto remove;
- kref_init(&dev->kref);
+ dev->initialized = 1;
return 0;
remove:
nvme_dev_remove(dev);
+ nvme_free_namespaces(dev);
shutdown:
nvme_dev_shutdown(dev);
release_pools:
- nvme_free_queues(dev);
+ nvme_free_queues(dev, 0);
nvme_release_prp_pools(dev);
release:
nvme_release_instance(dev);
@@ -2214,10 +2497,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return result;
}
+static void nvme_shutdown(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+ nvme_dev_shutdown(dev);
+}
+
static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ spin_lock(&dev_list_lock);
+ list_del_init(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ pci_set_drvdata(pdev, NULL);
+ flush_work(&dev->reset_work);
misc_deregister(&dev->miscdev);
+ nvme_dev_remove(dev);
+ nvme_dev_shutdown(dev);
+ nvme_free_queues(dev, 0);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}
@@ -2241,13 +2542,12 @@ static int nvme_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- int ret;
- ret = nvme_dev_start(ndev);
- /* XXX: should remove gendisks if resume fails */
- if (ret)
- nvme_free_queues(ndev);
- return ret;
+ if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
+ ndev->reset_workfn = nvme_reset_failed_dev;
+ queue_work(nvme_workq, &ndev->reset_work);
+ }
+ return 0;
}
static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
@@ -2274,6 +2574,7 @@ static struct pci_driver nvme_driver = {
.id_table = nvme_id_table,
.probe = nvme_probe,
.remove = nvme_remove,
+ .shutdown = nvme_shutdown,
.driver = {
.pm = &nvme_dev_pm_ops,
},
@@ -2288,9 +2589,14 @@ static int __init nvme_init(void)
if (IS_ERR(nvme_thread))
return PTR_ERR(nvme_thread);
+ result = -ENOMEM;
+ nvme_workq = create_singlethread_workqueue("nvme");
+ if (!nvme_workq)
+ goto kill_kthread;
+
result = register_blkdev(nvme_major, "nvme");
if (result < 0)
- goto kill_kthread;
+ goto kill_workq;
else if (result > 0)
nvme_major = result;
@@ -2301,6 +2607,8 @@ static int __init nvme_init(void)
unregister_blkdev:
unregister_blkdev(nvme_major, "nvme");
+ kill_workq:
+ destroy_workqueue(nvme_workq);
kill_kthread:
kthread_stop(nvme_thread);
return result;
@@ -2310,6 +2618,7 @@ static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
unregister_blkdev(nvme_major, "nvme");
+ destroy_workqueue(nvme_workq);
kthread_stop(nvme_thread);
}
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a4ff4eb8e23..4a0ceb64e269 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -25,6 +25,7 @@
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -3038,6 +3039,152 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
return retcode;
}
+#ifdef CONFIG_COMPAT
+typedef struct sg_io_hdr32 {
+ compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
+ compat_int_t dxfer_direction; /* [i] data transfer direction */
+ unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
+ unsigned char mx_sb_len; /* [i] max length to write to sbp */
+ unsigned short iovec_count; /* [i] 0 implies no scatter gather */
+ compat_uint_t dxfer_len; /* [i] byte count of data transfer */
+ compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
+ or scatter gather list */
+ compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
+ compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
+ compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
+ compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
+ compat_int_t pack_id; /* [i->o] unused internally (normally) */
+ compat_uptr_t usr_ptr; /* [i->o] unused internally */
+ unsigned char status; /* [o] scsi status */
+ unsigned char masked_status; /* [o] shifted, masked scsi status */
+ unsigned char msg_status; /* [o] messaging level data (optional) */
+ unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
+ unsigned short host_status; /* [o] errors from host adapter */
+ unsigned short driver_status; /* [o] errors from software driver */
+ compat_int_t resid; /* [o] dxfer_len - actual_transferred */
+ compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
+ compat_uint_t info; /* [o] auxiliary information */
+} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
+
+typedef struct sg_iovec32 {
+ compat_uint_t iov_base;
+ compat_uint_t iov_len;
+} sg_iovec32_t;
+
+static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
+{
+ sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
+ sg_iovec32_t __user *iov32 = dxferp;
+ int i;
+
+ for (i = 0; i < iovec_count; i++) {
+ u32 base, len;
+
+ if (get_user(base, &iov32[i].iov_base) ||
+ get_user(len, &iov32[i].iov_len) ||
+ put_user(compat_ptr(base), &iov[i].iov_base) ||
+ put_user(len, &iov[i].iov_len))
+ return -EFAULT;
+ }
+
+ if (put_user(iov, &sgio->dxferp))
+ return -EFAULT;
+ return 0;
+}
+
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg)
+{
+ sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg;
+ sg_io_hdr_t __user *sgio;
+ u16 iovec_count;
+ u32 data;
+ void __user *dxferp;
+ int err;
+ int interface_id;
+
+ if (get_user(interface_id, &sgio32->interface_id))
+ return -EFAULT;
+ if (interface_id != 'S')
+ return -EINVAL;
+
+ if (get_user(iovec_count, &sgio32->iovec_count))
+ return -EFAULT;
+
+ {
+ void __user *top = compat_alloc_user_space(0);
+ void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
+ (iovec_count * sizeof(sg_iovec_t)));
+ if (new > top)
+ return -EINVAL;
+
+ sgio = new;
+ }
+
+ /* Ok, now construct. */
+ if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
+ (2 * sizeof(int)) +
+ (2 * sizeof(unsigned char)) +
+ (1 * sizeof(unsigned short)) +
+ (1 * sizeof(unsigned int))))
+ return -EFAULT;
+
+ if (get_user(data, &sgio32->dxferp))
+ return -EFAULT;
+ dxferp = compat_ptr(data);
+ if (iovec_count) {
+ if (sg_build_iovec(sgio, dxferp, iovec_count))
+ return -EFAULT;
+ } else {
+ if (put_user(dxferp, &sgio->dxferp))
+ return -EFAULT;
+ }
+
+ {
+ unsigned char __user *cmdp;
+ unsigned char __user *sbp;
+
+ if (get_user(data, &sgio32->cmdp))
+ return -EFAULT;
+ cmdp = compat_ptr(data);
+
+ if (get_user(data, &sgio32->sbp))
+ return -EFAULT;
+ sbp = compat_ptr(data);
+
+ if (put_user(cmdp, &sgio->cmdp) ||
+ put_user(sbp, &sgio->sbp))
+ return -EFAULT;
+ }
+
+ if (copy_in_user(&sgio->timeout, &sgio32->timeout,
+ 3 * sizeof(int)))
+ return -EFAULT;
+
+ if (get_user(data, &sgio32->usr_ptr))
+ return -EFAULT;
+ if (put_user(compat_ptr(data), &sgio->usr_ptr))
+ return -EFAULT;
+
+ err = nvme_sg_io(ns, sgio);
+ if (err >= 0) {
+ void __user *datap;
+
+ if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
+ sizeof(int)) ||
+ get_user(datap, &sgio->usr_ptr) ||
+ put_user((u32)(unsigned long)datap,
+ &sgio32->usr_ptr) ||
+ copy_in_user(&sgio32->status, &sgio->status,
+ (4 * sizeof(unsigned char)) +
+ (2 * sizeof(unsigned short)) +
+ (3 * sizeof(int))))
+ err = -EFAULT;
+ }
+
+ return err;
+}
+#endif
+
int nvme_sg_get_version_num(int __user *ip)
{
return put_user(sg_version_num, ip);
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 4a27b1de5fcb..2ce3dfd7e6b9 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -581,7 +581,7 @@ static ssize_t pg_write(struct file *filp, const char __user *buf, size_t count,
if (hdr.magic != PG_MAGIC)
return -EINVAL;
- if (hdr.dlen > PG_MAX_DATA)
+ if (hdr.dlen < 0 || hdr.dlen > PG_MAX_DATA)
return -EINVAL;
if ((count - hs) > PG_MAX_DATA)
return -EINVAL;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ff8668c5efb1..a2af73db187b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
for (;;) {
tmp = rb_entry(n, struct pkt_rb_node, rb_node);
- if (s <= tmp->bio->bi_sector)
+ if (s <= tmp->bio->bi_iter.bi_sector)
next = n->rb_left;
else
next = n->rb_right;
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
n = next;
}
- if (s > tmp->bio->bi_sector) {
+ if (s > tmp->bio->bi_iter.bi_sector) {
tmp = pkt_rbtree_next(tmp);
if (!tmp)
return NULL;
}
- BUG_ON(s > tmp->bio->bi_sector);
+ BUG_ON(s > tmp->bio->bi_iter.bi_sector);
return tmp;
}
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
{
struct rb_node **p = &pd->bio_queue.rb_node;
struct rb_node *parent = NULL;
- sector_t s = node->bio->bi_sector;
+ sector_t s = node->bio->bi_iter.bi_sector;
struct pkt_rb_node *tmp;
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
- if (s < tmp->bio->bi_sector)
+ if (s < tmp->bio->bi_iter.bi_sector)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -706,7 +706,9 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
WRITE : READ, __GFP_WAIT);
if (cgc->buflen) {
- if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
+ ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
+ __GFP_WAIT);
+ if (ret)
goto out;
}
@@ -857,7 +859,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
spin_lock(&pd->iosched.lock);
bio = bio_list_peek(&pd->iosched.write_queue);
spin_unlock(&pd->iosched.lock);
- if (bio && (bio->bi_sector == pd->iosched.last_write))
+ if (bio && (bio->bi_iter.bi_sector ==
+ pd->iosched.last_write))
need_write_seek = 0;
if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +891,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
continue;
if (bio_data_dir(bio) == READ)
- pd->iosched.successive_reads += bio->bi_size >> 10;
+ pd->iosched.successive_reads +=
+ bio->bi_iter.bi_size >> 10;
else {
pd->iosched.successive_reads = 0;
pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +982,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
bio, (unsigned long long)pkt->sector,
- (unsigned long long)bio->bi_sector, err);
+ (unsigned long long)bio->bi_iter.bi_sector, err);
if (err)
atomic_inc(&pkt->io_errors);
@@ -1026,8 +1030,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
memset(written, 0, sizeof(written));
spin_lock(&pkt->lock);
bio_list_for_each(bio, &pkt->orig_bios) {
- int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
- int num_frames = bio->bi_size / CD_FRAMESIZE;
+ int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
+ (CD_FRAMESIZE >> 9);
+ int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
BUG_ON(first_frame < 0);
BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1058,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
bio = pkt->r_bios[f];
bio_reset(bio);
- bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+ bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_bdev = pd->bdev;
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1150,8 +1155,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
bio_reset(pkt->bio);
pkt->bio->bi_bdev = pd->bdev;
pkt->bio->bi_rw = REQ_WRITE;
- pkt->bio->bi_sector = new_sector;
- pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+ pkt->bio->bi_iter.bi_sector = new_sector;
+ pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
pkt->bio->bi_vcnt = pkt->frames;
pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1218,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
node = first_node;
while (node) {
bio = node->bio;
- zone = get_zone(bio->bi_sector, pd);
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
if (p->sector == zone) {
bio = NULL;
@@ -1252,14 +1257,14 @@ try_next_bio:
pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
bio = node->bio;
- pkt_dbg(2, pd, "found zone=%llx\n",
- (unsigned long long)get_zone(bio->bi_sector, pd));
- if (get_zone(bio->bi_sector, pd) != zone)
+ pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
+ get_zone(bio->bi_iter.bi_sector, pd));
+ if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
break;
pkt_rbtree_erase(pd, node);
spin_lock(&pkt->lock);
bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+ pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
spin_unlock(&pkt->lock);
}
/* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1298,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
bio_reset(pkt->w_bio);
- pkt->w_bio->bi_sector = pkt->sector;
+ pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_bdev = pd->bdev;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -2335,75 +2340,29 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
pkt_bio_finished(pd);
}
-static void pkt_make_request(struct request_queue *q, struct bio *bio)
+static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
- struct pktcdvd_device *pd;
- char b[BDEVNAME_SIZE];
+ struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+ struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
+
+ psd->pd = pd;
+ psd->bio = bio;
+ cloned_bio->bi_bdev = pd->bdev;
+ cloned_bio->bi_private = psd;
+ cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+ pd->stats.secs_r += bio_sectors(bio);
+ pkt_queue_bio(pd, cloned_bio);
+}
+
+static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+{
+ struct pktcdvd_device *pd = q->queuedata;
sector_t zone;
struct packet_data *pkt;
int was_empty, blocked_bio;
struct pkt_rb_node *node;
- pd = q->queuedata;
- if (!pd) {
- pr_err("%s incorrect request queue\n",
- bdevname(bio->bi_bdev, b));
- goto end_io;
- }
-
- /*
- * Clone READ bios so we can have our own bi_end_io callback.
- */
- if (bio_data_dir(bio) == READ) {
- struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
- struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
-
- psd->pd = pd;
- psd->bio = bio;
- cloned_bio->bi_bdev = pd->bdev;
- cloned_bio->bi_private = psd;
- cloned_bio->bi_end_io = pkt_end_io_read_cloned;
- pd->stats.secs_r += bio_sectors(bio);
- pkt_queue_bio(pd, cloned_bio);
- return;
- }
-
- if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
- pkt_notice(pd, "WRITE for ro device (%llu)\n",
- (unsigned long long)bio->bi_sector);
- goto end_io;
- }
-
- if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
- pkt_err(pd, "wrong bio size\n");
- goto end_io;
- }
-
- blk_queue_bounce(q, &bio);
-
- zone = get_zone(bio->bi_sector, pd);
- pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
- (unsigned long long)bio->bi_sector,
- (unsigned long long)bio_end_sector(bio));
-
- /* Check if we have to split the bio */
- {
- struct bio_pair *bp;
- sector_t last_zone;
- int first_sectors;
-
- last_zone = get_zone(bio_end_sector(bio) - 1, pd);
- if (last_zone != zone) {
- BUG_ON(last_zone != zone + pd->settings.size);
- first_sectors = last_zone - bio->bi_sector;
- bp = bio_split(bio, first_sectors);
- BUG_ON(!bp);
- pkt_make_request(q, &bp->bio1);
- pkt_make_request(q, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
- }
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
/*
* If we find a matching packet in state WAITING or READ_WAIT, we can
@@ -2417,7 +2376,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
if ((pkt->state == PACKET_WAITING_STATE) ||
(pkt->state == PACKET_READ_WAIT_STATE)) {
bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+ pkt->write_size +=
+ bio->bi_iter.bi_size / CD_FRAMESIZE;
if ((pkt->write_size >= pkt->frames) &&
(pkt->state == PACKET_WAITING_STATE)) {
atomic_inc(&pkt->run_sm);
@@ -2476,6 +2436,64 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
*/
wake_up(&pd->wqueue);
}
+}
+
+static void pkt_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct pktcdvd_device *pd;
+ char b[BDEVNAME_SIZE];
+ struct bio *split;
+
+ pd = q->queuedata;
+ if (!pd) {
+ pr_err("%s incorrect request queue\n",
+ bdevname(bio->bi_bdev, b));
+ goto end_io;
+ }
+
+ pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
+ (unsigned long long)bio->bi_iter.bi_sector,
+ (unsigned long long)bio_end_sector(bio));
+
+ /*
+ * Clone READ bios so we can have our own bi_end_io callback.
+ */
+ if (bio_data_dir(bio) == READ) {
+ pkt_make_request_read(pd, bio);
+ return;
+ }
+
+ if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+ pkt_notice(pd, "WRITE for ro device (%llu)\n",
+ (unsigned long long)bio->bi_iter.bi_sector);
+ goto end_io;
+ }
+
+ if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
+ pkt_err(pd, "wrong bio size\n");
+ goto end_io;
+ }
+
+ blk_queue_bounce(q, &bio);
+
+ do {
+ sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
+ sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
+
+ if (last_zone != zone) {
+ BUG_ON(last_zone != zone + pd->settings.size);
+
+ split = bio_split(bio, last_zone -
+ bio->bi_iter.bi_sector,
+ GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
+
+ pkt_make_request_write(q, split);
+ } while (split != bio);
+
return;
end_io:
bio_io_error(bio);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d754a88d7585..c120d70d3fb3 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -94,26 +94,25 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
{
unsigned int offset = 0;
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
unsigned int i = 0;
size_t size;
void *buf;
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
- dev_dbg(&dev->sbd.core,
- "%s:%u: bio %u: %u segs %u sectors from %lu\n",
- __func__, __LINE__, i, bio_segments(iter.bio),
- bio_sectors(iter.bio), iter.bio->bi_sector);
+ dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
+ __func__, __LINE__, i, bio_sectors(iter.bio),
+ iter.bio->bi_iter.bi_sector);
- size = bvec->bv_len;
- buf = bvec_kmap_irq(bvec, &flags);
+ size = bvec.bv_len;
+ buf = bvec_kmap_irq(&bvec, &flags);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
- flush_kernel_dcache_page(bvec->bv_page);
+ flush_kernel_dcache_page(bvec.bv_page);
bvec_kunmap_irq(buf, &flags);
i++;
}
@@ -130,7 +129,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
#ifdef DEBUG
unsigned int n = 0;
- struct bio_vec *bv;
+ struct bio_vec bv;
struct req_iterator iter;
rq_for_each_segment(bv, req, iter)
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 06a2e53e5f37..ef45cfb98fd2 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -553,16 +553,16 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int write = bio_data_dir(bio) == WRITE;
const char *op = write ? "write" : "read";
- loff_t offset = bio->bi_sector << 9;
+ loff_t offset = bio->bi_iter.bi_sector << 9;
int error = 0;
- struct bio_vec *bvec;
- unsigned int i;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct bio *next;
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
/* PS3 is ppc64, so we don't handle highmem */
- char *ptr = page_address(bvec->bv_page) + bvec->bv_offset;
- size_t len = bvec->bv_len, retlen;
+ char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
+ size_t len = bvec.bv_len, retlen;
dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,
len, offset);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index cb1db2979d3d..34898d53395b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -41,6 +41,7 @@
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
+#include <linux/idr.h>
#include "rbd_types.h"
@@ -89,9 +90,9 @@ static int atomic_dec_return_safe(atomic_t *v)
}
#define RBD_DRV_NAME "rbd"
-#define RBD_DRV_NAME_LONG "rbd (rados block device)"
-#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
+#define RBD_MINORS_PER_MAJOR 256
+#define RBD_SINGLE_MAJOR_PART_SHIFT 4
#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
#define RBD_MAX_SNAP_NAME_LEN \
@@ -323,6 +324,7 @@ struct rbd_device {
int dev_id; /* blkdev unique id */
int major; /* blkdev assigned major */
+ int minor;
struct gendisk *disk; /* blkdev's gendisk and rq */
u32 image_format; /* Either 1 or 2 */
@@ -386,6 +388,17 @@ static struct kmem_cache *rbd_img_request_cache;
static struct kmem_cache *rbd_obj_request_cache;
static struct kmem_cache *rbd_segment_name_cache;
+static int rbd_major;
+static DEFINE_IDA(rbd_dev_id_ida);
+
+/*
+ * Default to false for now, as single-major requires >= 0.75 version of
+ * userspace rbd utility.
+ */
+static bool single_major = false;
+module_param(single_major, bool, S_IRUGO);
+MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
+
static int rbd_img_request_submit(struct rbd_img_request *img_request);
static void rbd_dev_device_release(struct device *dev);
@@ -394,18 +407,52 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf,
size_t count);
static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
size_t count);
+static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
+ size_t count);
+static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
+ size_t count);
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
static void rbd_spec_put(struct rbd_spec *spec);
+static int rbd_dev_id_to_minor(int dev_id)
+{
+ return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
+}
+
+static int minor_to_rbd_dev_id(int minor)
+{
+ return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
+}
+
static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
+static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
+static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
static struct attribute *rbd_bus_attrs[] = {
&bus_attr_add.attr,
&bus_attr_remove.attr,
+ &bus_attr_add_single_major.attr,
+ &bus_attr_remove_single_major.attr,
NULL,
};
-ATTRIBUTE_GROUPS(rbd_bus);
+
+static umode_t rbd_bus_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ if (!single_major &&
+ (attr == &bus_attr_add_single_major.attr ||
+ attr == &bus_attr_remove_single_major.attr))
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group rbd_bus_group = {
+ .attrs = rbd_bus_attrs,
+ .is_visible = rbd_bus_is_visible,
+};
+__ATTRIBUTE_GROUPS(rbd_bus);
static struct bus_type rbd_bus_type = {
.name = "rbd",
@@ -1041,9 +1088,9 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
name_format = "%s.%012llx";
if (rbd_dev->image_format == 2)
name_format = "%s.%016llx";
- ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
+ ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
rbd_dev->header.object_prefix, segment);
- if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
+ if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
pr_err("error formatting segment name for #%llu (%d)\n",
segment, ret);
kfree(name);
@@ -1109,23 +1156,23 @@ static void bio_chain_put(struct bio *chain)
*/
static void zero_bio_chain(struct bio *chain, int start_ofs)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
+ struct bvec_iter iter;
unsigned long flags;
void *buf;
- int i;
int pos = 0;
while (chain) {
- bio_for_each_segment(bv, chain, i) {
- if (pos + bv->bv_len > start_ofs) {
+ bio_for_each_segment(bv, chain, iter) {
+ if (pos + bv.bv_len > start_ofs) {
int remainder = max(start_ofs - pos, 0);
- buf = bvec_kmap_irq(bv, &flags);
+ buf = bvec_kmap_irq(&bv, &flags);
memset(buf + remainder, 0,
- bv->bv_len - remainder);
- flush_dcache_page(bv->bv_page);
+ bv.bv_len - remainder);
+ flush_dcache_page(bv.bv_page);
bvec_kunmap_irq(buf, &flags);
}
- pos += bv->bv_len;
+ pos += bv.bv_len;
}
chain = chain->bi_next;
@@ -1173,74 +1220,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
unsigned int len,
gfp_t gfpmask)
{
- struct bio_vec *bv;
- unsigned int resid;
- unsigned short idx;
- unsigned int voff;
- unsigned short end_idx;
- unsigned short vcnt;
struct bio *bio;
- /* Handle the easy case for the caller */
-
- if (!offset && len == bio_src->bi_size)
- return bio_clone(bio_src, gfpmask);
-
- if (WARN_ON_ONCE(!len))
- return NULL;
- if (WARN_ON_ONCE(len > bio_src->bi_size))
- return NULL;
- if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
- return NULL;
-
- /* Find first affected segment... */
-
- resid = offset;
- bio_for_each_segment(bv, bio_src, idx) {
- if (resid < bv->bv_len)
- break;
- resid -= bv->bv_len;
- }
- voff = resid;
-
- /* ...and the last affected segment */
-
- resid += len;
- __bio_for_each_segment(bv, bio_src, end_idx, idx) {
- if (resid <= bv->bv_len)
- break;
- resid -= bv->bv_len;
- }
- vcnt = end_idx - idx + 1;
-
- /* Build the clone */
-
- bio = bio_alloc(gfpmask, (unsigned int) vcnt);
+ bio = bio_clone(bio_src, gfpmask);
if (!bio)
return NULL; /* ENOMEM */
- bio->bi_bdev = bio_src->bi_bdev;
- bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
- bio->bi_rw = bio_src->bi_rw;
- bio->bi_flags |= 1 << BIO_CLONED;
-
- /*
- * Copy over our part of the bio_vec, then update the first
- * and last (or only) entries.
- */
- memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
- vcnt * sizeof (struct bio_vec));
- bio->bi_io_vec[0].bv_offset += voff;
- if (vcnt > 1) {
- bio->bi_io_vec[0].bv_len -= voff;
- bio->bi_io_vec[vcnt - 1].bv_len = resid;
- } else {
- bio->bi_io_vec[0].bv_len = len;
- }
-
- bio->bi_vcnt = vcnt;
- bio->bi_size = len;
- bio->bi_idx = 0;
+ bio_advance(bio, offset);
+ bio->bi_iter.bi_size = len;
return bio;
}
@@ -1271,7 +1258,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
/* Build up a chain of clone bios up to the limit */
- if (!bi || off >= bi->bi_size || !len)
+ if (!bi || off >= bi->bi_iter.bi_size || !len)
return NULL; /* Nothing to clone */
end = &chain;
@@ -1283,7 +1270,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
rbd_warn(NULL, "bio_chain exhausted with %u left", len);
goto out_err; /* EINVAL; ran out of bio's */
}
- bi_size = min_t(unsigned int, bi->bi_size - off, len);
+ bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
bio = bio_clone_range(bi, off, bi_size, gfpmask);
if (!bio)
goto out_err; /* ENOMEM */
@@ -1292,7 +1279,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
end = &bio->bi_next;
off += bi_size;
- if (off == bi->bi_size) {
+ if (off == bi->bi_iter.bi_size) {
bi = bi->bi_next;
off = 0;
}
@@ -1761,11 +1748,8 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
- osd_req->r_oid_len = strlen(obj_request->object_name);
- rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
- memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
-
- osd_req->r_file_layout = rbd_dev->layout; /* struct */
+ osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
return osd_req;
}
@@ -1802,11 +1786,8 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
- osd_req->r_oid_len = strlen(obj_request->object_name);
- rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
- memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
-
- osd_req->r_file_layout = rbd_dev->layout; /* struct */
+ osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
return osd_req;
}
@@ -2128,7 +2109,6 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
rbd_assert(img_request->obj_request_count > 0);
rbd_assert(which != BAD_WHICH);
rbd_assert(which < img_request->obj_request_count);
- rbd_assert(which >= img_request->next_completion);
spin_lock_irq(&img_request->completion_lock);
if (which != img_request->next_completion)
@@ -2186,7 +2166,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
if (type == OBJ_REQUEST_BIO) {
bio_list = data_desc;
- rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
+ rbd_assert(img_offset ==
+ bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
} else {
rbd_assert(type == OBJ_REQUEST_PAGES);
pages = data_desc;
@@ -2866,7 +2847,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
* Request sync osd watch/unwatch. The value of "start" determines
* whether a watch request is being initiated or torn down.
*/
-static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
+static int __rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
@@ -2941,6 +2922,22 @@ out_cancel:
return ret;
}
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
+{
+ return __rbd_dev_header_watch_sync(rbd_dev, true);
+}
+
+static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+{
+ int ret;
+
+ ret = __rbd_dev_header_watch_sync(rbd_dev, false);
+ if (ret) {
+ rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
+ ret);
+ }
+}
+
/*
* Synchronous osd object method call. Returns the number of bytes
* returned in the outbound buffer, or a negative error code.
@@ -3388,14 +3385,18 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
u64 segment_size;
/* create gendisk info */
- disk = alloc_disk(RBD_MINORS_PER_MAJOR);
+ disk = alloc_disk(single_major ?
+ (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
+ RBD_MINORS_PER_MAJOR);
if (!disk)
return -ENOMEM;
snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
rbd_dev->dev_id);
disk->major = rbd_dev->major;
- disk->first_minor = 0;
+ disk->first_minor = rbd_dev->minor;
+ if (single_major)
+ disk->flags |= GENHD_FL_EXT_DEVT;
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
@@ -3467,7 +3468,14 @@ static ssize_t rbd_major_show(struct device *dev,
return sprintf(buf, "%d\n", rbd_dev->major);
return sprintf(buf, "(none)\n");
+}
+
+static ssize_t rbd_minor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ return sprintf(buf, "%d\n", rbd_dev->minor);
}
static ssize_t rbd_client_id_show(struct device *dev,
@@ -3589,6 +3597,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
+static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
@@ -3602,6 +3611,7 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
&dev_attr_features.attr,
&dev_attr_major.attr,
+ &dev_attr_minor.attr,
&dev_attr_client_id.attr,
&dev_attr_pool.attr,
&dev_attr_pool_id.attr,
@@ -4372,21 +4382,29 @@ static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
device_unregister(&rbd_dev->dev);
}
-static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
-
/*
* Get a unique rbd identifier for the given new rbd_dev, and add
- * the rbd_dev to the global list. The minimum rbd id is 1.
+ * the rbd_dev to the global list.
*/
-static void rbd_dev_id_get(struct rbd_device *rbd_dev)
+static int rbd_dev_id_get(struct rbd_device *rbd_dev)
{
- rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
+ int new_dev_id;
+
+ new_dev_id = ida_simple_get(&rbd_dev_id_ida,
+ 0, minor_to_rbd_dev_id(1 << MINORBITS),
+ GFP_KERNEL);
+ if (new_dev_id < 0)
+ return new_dev_id;
+
+ rbd_dev->dev_id = new_dev_id;
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
spin_unlock(&rbd_dev_list_lock);
- dout("rbd_dev %p given dev id %llu\n", rbd_dev,
- (unsigned long long) rbd_dev->dev_id);
+
+ dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
+
+ return 0;
}
/*
@@ -4395,49 +4413,13 @@ static void rbd_dev_id_get(struct rbd_device *rbd_dev)
*/
static void rbd_dev_id_put(struct rbd_device *rbd_dev)
{
- struct list_head *tmp;
- int rbd_id = rbd_dev->dev_id;
- int max_id;
-
- rbd_assert(rbd_id > 0);
-
- dout("rbd_dev %p released dev id %llu\n", rbd_dev,
- (unsigned long long) rbd_dev->dev_id);
spin_lock(&rbd_dev_list_lock);
list_del_init(&rbd_dev->node);
-
- /*
- * If the id being "put" is not the current maximum, there
- * is nothing special we need to do.
- */
- if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
- spin_unlock(&rbd_dev_list_lock);
- return;
- }
-
- /*
- * We need to update the current maximum id. Search the
- * list to find out what it is. We're more likely to find
- * the maximum at the end, so search the list backward.
- */
- max_id = 0;
- list_for_each_prev(tmp, &rbd_dev_list) {
- struct rbd_device *rbd_dev;
-
- rbd_dev = list_entry(tmp, struct rbd_device, node);
- if (rbd_dev->dev_id > max_id)
- max_id = rbd_dev->dev_id;
- }
spin_unlock(&rbd_dev_list_lock);
- /*
- * The max id could have been updated by rbd_dev_id_get(), in
- * which case it now accurately reflects the new maximum.
- * Be careful not to overwrite the maximum value in that
- * case.
- */
- atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
- dout(" max dev id has been reset\n");
+ ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+
+ dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
}
/*
@@ -4860,20 +4842,29 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
- /* generate unique id: find highest unique id, add one */
- rbd_dev_id_get(rbd_dev);
+ /* Get an id and fill in device name. */
+
+ ret = rbd_dev_id_get(rbd_dev);
+ if (ret)
+ return ret;
- /* Fill in the device name, now that we have its id. */
BUILD_BUG_ON(DEV_NAME_LEN
< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
- /* Get our block major device number. */
+ /* Record our major and minor device numbers. */
- ret = register_blkdev(0, rbd_dev->name);
- if (ret < 0)
- goto err_out_id;
- rbd_dev->major = ret;
+ if (!single_major) {
+ ret = register_blkdev(0, rbd_dev->name);
+ if (ret < 0)
+ goto err_out_id;
+
+ rbd_dev->major = ret;
+ rbd_dev->minor = 0;
+ } else {
+ rbd_dev->major = rbd_major;
+ rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
+ }
/* Set up the blkdev mapping. */
@@ -4905,7 +4896,8 @@ err_out_mapping:
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
- unregister_blkdev(rbd_dev->major, rbd_dev->name);
+ if (!single_major)
+ unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
@@ -4961,7 +4953,6 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
{
int ret;
- int tmp;
/*
* Get the id from the image id object. Unless there's an
@@ -4980,7 +4971,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
goto err_out_format;
if (mapping) {
- ret = rbd_dev_header_watch_sync(rbd_dev, true);
+ ret = rbd_dev_header_watch_sync(rbd_dev);
if (ret)
goto out_header_name;
}
@@ -5007,12 +4998,8 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
err_out_probe:
rbd_dev_unprobe(rbd_dev);
err_out_watch:
- if (mapping) {
- tmp = rbd_dev_header_watch_sync(rbd_dev, false);
- if (tmp)
- rbd_warn(rbd_dev, "unable to tear down "
- "watch request (%d)\n", tmp);
- }
+ if (mapping)
+ rbd_dev_header_unwatch_sync(rbd_dev);
out_header_name:
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
@@ -5026,9 +5013,9 @@ err_out_format:
return ret;
}
-static ssize_t rbd_add(struct bus_type *bus,
- const char *buf,
- size_t count)
+static ssize_t do_rbd_add(struct bus_type *bus,
+ const char *buf,
+ size_t count)
{
struct rbd_device *rbd_dev = NULL;
struct ceph_options *ceph_opts = NULL;
@@ -5090,6 +5077,12 @@ static ssize_t rbd_add(struct bus_type *bus,
rc = rbd_dev_device_setup(rbd_dev);
if (rc) {
+ /*
+ * rbd_dev_header_unwatch_sync() can't be moved into
+ * rbd_dev_image_release() without refactoring, see
+ * commit 1f3ef78861ac.
+ */
+ rbd_dev_header_unwatch_sync(rbd_dev);
rbd_dev_image_release(rbd_dev);
goto err_out_module;
}
@@ -5110,6 +5103,23 @@ err_out_module:
return (ssize_t)rc;
}
+static ssize_t rbd_add(struct bus_type *bus,
+ const char *buf,
+ size_t count)
+{
+ if (single_major)
+ return -EINVAL;
+
+ return do_rbd_add(bus, buf, count);
+}
+
+static ssize_t rbd_add_single_major(struct bus_type *bus,
+ const char *buf,
+ size_t count)
+{
+ return do_rbd_add(bus, buf, count);
+}
+
static void rbd_dev_device_release(struct device *dev)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
@@ -5117,8 +5127,8 @@ static void rbd_dev_device_release(struct device *dev)
rbd_free_disk(rbd_dev);
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
rbd_dev_mapping_clear(rbd_dev);
- unregister_blkdev(rbd_dev->major, rbd_dev->name);
- rbd_dev->major = 0;
+ if (!single_major)
+ unregister_blkdev(rbd_dev->major, rbd_dev->name);
rbd_dev_id_put(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
}
@@ -5149,9 +5159,9 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
}
}
-static ssize_t rbd_remove(struct bus_type *bus,
- const char *buf,
- size_t count)
+static ssize_t do_rbd_remove(struct bus_type *bus,
+ const char *buf,
+ size_t count)
{
struct rbd_device *rbd_dev = NULL;
struct list_head *tmp;
@@ -5191,16 +5201,14 @@ static ssize_t rbd_remove(struct bus_type *bus,
if (ret < 0 || already)
return ret;
- ret = rbd_dev_header_watch_sync(rbd_dev, false);
- if (ret)
- rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
-
+ rbd_dev_header_unwatch_sync(rbd_dev);
/*
* flush remaining watch callbacks - these must be complete
* before the osd_client is shutdown
*/
dout("%s: flushing notifies", __func__);
ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
+
/*
* Don't free anything from rbd_dev->disk until after all
* notifies are completely processed. Otherwise
@@ -5214,6 +5222,23 @@ static ssize_t rbd_remove(struct bus_type *bus,
return count;
}
+static ssize_t rbd_remove(struct bus_type *bus,
+ const char *buf,
+ size_t count)
+{
+ if (single_major)
+ return -EINVAL;
+
+ return do_rbd_remove(bus, buf, count);
+}
+
+static ssize_t rbd_remove_single_major(struct bus_type *bus,
+ const char *buf,
+ size_t count)
+{
+ return do_rbd_remove(bus, buf, count);
+}
+
/*
* create control files in sysfs
* /sys/bus/rbd/...
@@ -5259,7 +5284,7 @@ static int rbd_slab_init(void)
rbd_assert(!rbd_segment_name_cache);
rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
- MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
+ CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
if (rbd_segment_name_cache)
return 0;
out_err:
@@ -5295,24 +5320,45 @@ static int __init rbd_init(void)
if (!libceph_compatible(NULL)) {
rbd_warn(NULL, "libceph incompatibility (quitting)");
-
return -EINVAL;
}
+
rc = rbd_slab_init();
if (rc)
return rc;
+
+ if (single_major) {
+ rbd_major = register_blkdev(0, RBD_DRV_NAME);
+ if (rbd_major < 0) {
+ rc = rbd_major;
+ goto err_out_slab;
+ }
+ }
+
rc = rbd_sysfs_init();
if (rc)
- rbd_slab_exit();
+ goto err_out_blkdev;
+
+ if (single_major)
+ pr_info("loaded (major %d)\n", rbd_major);
else
- pr_info("loaded " RBD_DRV_NAME_LONG "\n");
+ pr_info("loaded\n");
+
+ return 0;
+err_out_blkdev:
+ if (single_major)
+ unregister_blkdev(rbd_major, RBD_DRV_NAME);
+err_out_slab:
+ rbd_slab_exit();
return rc;
}
static void __exit rbd_exit(void)
{
rbd_sysfs_cleanup();
+ if (single_major)
+ unregister_blkdev(rbd_major, RBD_DRV_NAME);
rbd_slab_exit();
}
@@ -5322,9 +5368,8 @@ module_exit(rbd_exit);
MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
-MODULE_DESCRIPTION("rados block device");
-
/* following authorship retained from original osdblk.c */
MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
+MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 2284f5d3a54a..2839d37e5af7 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
if (!card)
goto req_err;
- if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
+ if (bio_end_sector(bio) > get_capacity(card->gendisk))
goto req_err;
if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
goto req_err;
}
- if (bio->bi_size == 0) {
+ if (bio->bi_iter.bi_size == 0) {
dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
goto req_err;
}
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
bio_data_dir(bio) ? 'W' : 'R', bio_meta,
- (u64)bio->bi_sector << 9, bio->bi_size);
+ (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
bio_dma_done_cb, bio_meta);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index fc88ba3e1bd2..cf8cd293abb5 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
void *cb_data)
{
struct list_head dma_list[RSXX_MAX_TARGETS];
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
unsigned long long addr8;
unsigned int laddr;
unsigned int bv_len;
@@ -696,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
int st;
int i;
- addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
+ addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
atomic_set(n_dmas, 0);
for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
}
if (bio->bi_rw & REQ_DISCARD) {
- bv_len = bio->bi_size;
+ bv_len = bio->bi_iter.bi_size;
while (bv_len > 0) {
tgt = rsxx_get_dma_tgt(card, addr8);
@@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
bv_len -= RSXX_HW_BLK_SIZE;
}
} else {
- bio_for_each_segment(bvec, bio, i) {
- bv_len = bvec->bv_len;
- bv_off = bvec->bv_offset;
+ bio_for_each_segment(bvec, bio, iter) {
+ bv_len = bvec.bv_len;
+ bv_off = bvec.bv_offset;
while (bv_len > 0) {
tgt = rsxx_get_dma_tgt(card, addr8);
@@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
st = rsxx_queue_dma(card, &dma_list[tgt],
bio_data_dir(bio),
dma_off, dma_len,
- laddr, bvec->bv_page,
+ laddr, bvec.bv_page,
bv_off, cb, cb_data);
if (st)
goto bvec_err;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index eb6e1e0e8db2..a69dd93d1bd5 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -3910,18 +3910,22 @@ static void skd_release_msix(struct skd_device *skdev)
struct skd_msix_entry *qentry;
int i;
- if (skdev->msix_entries == NULL)
- return;
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
- skdev = qentry->rsp;
+ if (skdev->msix_entries) {
+ for (i = 0; i < skdev->msix_count; i++) {
+ qentry = &skdev->msix_entries[i];
+ skdev = qentry->rsp;
+
+ if (qentry->have_irq)
+ devm_free_irq(&skdev->pdev->dev,
+ qentry->vector, qentry->rsp);
+ }
- if (qentry->have_irq)
- devm_free_irq(&skdev->pdev->dev,
- qentry->vector, qentry->rsp);
+ kfree(skdev->msix_entries);
}
- pci_disable_msix(skdev->pdev);
- kfree(skdev->msix_entries);
+
+ if (skdev->msix_count)
+ pci_disable_msix(skdev->pdev);
+
skdev->msix_count = 0;
skdev->msix_entries = NULL;
}
@@ -3929,12 +3933,10 @@ static void skd_release_msix(struct skd_device *skdev)
static int skd_acquire_msix(struct skd_device *skdev)
{
int i, rc;
- struct pci_dev *pdev;
- struct msix_entry *entries = NULL;
+ struct pci_dev *pdev = skdev->pdev;
+ struct msix_entry *entries;
struct skd_msix_entry *qentry;
- pdev = skdev->pdev;
- skdev->msix_count = SKD_MAX_MSIX_COUNT;
entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
GFP_KERNEL);
if (!entries)
@@ -3943,40 +3945,26 @@ static int skd_acquire_msix(struct skd_device *skdev)
for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
entries[i].entry = i;
- rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT);
- if (rc < 0)
+ rc = pci_enable_msix_range(pdev, entries,
+ SKD_MIN_MSIX_COUNT, SKD_MAX_MSIX_COUNT);
+ if (rc < 0) {
+ pr_err("(%s): failed to enable MSI-X %d\n",
+ skd_name(skdev), rc);
goto msix_out;
- if (rc) {
- if (rc < SKD_MIN_MSIX_COUNT) {
- pr_err("(%s): failed to enable MSI-X %d\n",
- skd_name(skdev), rc);
- goto msix_out;
- }
- pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n",
- skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name, rc);
-
- skdev->msix_count = rc;
- rc = pci_enable_msix(pdev, entries, skdev->msix_count);
- if (rc) {
- pr_err("(%s): failed to enable MSI-X "
- "support (%d) %d\n",
- skd_name(skdev), skdev->msix_count, rc);
- goto msix_out;
- }
}
+
+ skdev->msix_count = rc;
skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
skdev->msix_count, GFP_KERNEL);
if (!skdev->msix_entries) {
rc = -ENOMEM;
- skdev->msix_count = 0;
pr_err("(%s): msix table allocation error\n",
skd_name(skdev));
goto msix_out;
}
- qentry = skdev->msix_entries;
for (i = 0; i < skdev->msix_count; i++) {
+ qentry = &skdev->msix_entries[i];
qentry->vector = entries[i].vector;
qentry->entry = entries[i].entry;
qentry->rsp = NULL;
@@ -3985,11 +3973,10 @@ static int skd_acquire_msix(struct skd_device *skdev)
skdev->name, __func__, __LINE__,
pci_name(pdev), skdev->name,
i, qentry->vector, qentry->entry);
- qentry++;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+ for (i = 0; i < skdev->msix_count; i++) {
qentry = &skdev->msix_entries[i];
snprintf(qentry->isr_name, sizeof(qentry->isr_name),
"%s%d-msix %s", DRV_NAME, skdev->devno,
@@ -4045,8 +4032,8 @@ RETRY_IRQ_TYPE:
case SKD_IRQ_MSI:
snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
DRV_NAME, skdev->devno);
- rc = pci_enable_msi(pdev);
- if (!rc) {
+ rc = pci_enable_msi_range(pdev, 1, 1);
+ if (rc > 0) {
rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
skdev->isr_name, skdev);
if (rc) {
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 20e061c3e023..c74f7b56e7c4 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -30,6 +30,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/wait.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/prom.h>
@@ -840,14 +841,17 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
spin_lock_irqsave(&swim3_lock, flags);
if (fs->state != idle && fs->state != available) {
++fs->wanted;
- while (fs->state != available) {
+ /* this will enable irqs in order to sleep */
+ if (!interruptible)
+ wait_event_lock_irq(fs->wait,
+ fs->state == available,
+ swim3_lock);
+ else if (wait_event_interruptible_lock_irq(fs->wait,
+ fs->state == available,
+ swim3_lock)) {
+ --fs->wanted;
spin_unlock_irqrestore(&swim3_lock, flags);
- if (interruptible && signal_pending(current)) {
- --fs->wanted;
- return -EINTR;
- }
- interruptible_sleep_on(&fs->wait);
- spin_lock_irqsave(&swim3_lock, flags);
+ return -EINTR;
}
--fs->wanted;
}
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 3fb6ab4c8b4e..d5e2d12b9d9e 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1744,20 +1744,6 @@ static void carm_remove_one (struct pci_dev *pdev)
kfree(host);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
-static int __init carm_init(void)
-{
- return pci_register_driver(&carm_driver);
-}
-
-static void __exit carm_exit(void)
-{
- pci_unregister_driver(&carm_driver);
-}
-
-module_init(carm_init);
-module_exit(carm_exit);
-
-
+module_pci_driver(carm_driver);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index ad70868f8a96..4cf81b5bf0f7 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -108,8 +108,7 @@ struct cardinfo {
* have been written
*/
struct bio *bio, *currentbio, **biotail;
- int current_idx;
- sector_t current_sector;
+ struct bvec_iter current_iter;
struct request_queue *queue;
@@ -118,7 +117,7 @@ struct cardinfo {
struct mm_dma_desc *desc;
int cnt, headcnt;
struct bio *bio, **biotail;
- int idx;
+ struct bvec_iter iter;
} mm_pages[2];
#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
@@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card)
dma_addr_t dma_handle;
int offset;
struct bio *bio;
- struct bio_vec *vec;
- int idx;
+ struct bio_vec vec;
int rw;
- int len;
bio = card->currentbio;
if (!bio && card->bio) {
card->currentbio = card->bio;
- card->current_idx = card->bio->bi_idx;
- card->current_sector = card->bio->bi_sector;
+ card->current_iter = card->bio->bi_iter;
card->bio = card->bio->bi_next;
if (card->bio == NULL)
card->biotail = &card->bio;
@@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card)
}
if (!bio)
return 0;
- idx = card->current_idx;
rw = bio_rw(bio);
if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
return 0;
- vec = bio_iovec_idx(bio, idx);
- len = vec->bv_len;
+ vec = bio_iter_iovec(bio, card->current_iter);
+
dma_handle = pci_map_page(card->dev,
- vec->bv_page,
- vec->bv_offset,
- len,
+ vec.bv_page,
+ vec.bv_offset,
+ vec.bv_len,
(rw == READ) ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
@@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card)
desc = &p->desc[p->cnt];
p->cnt++;
if (p->bio == NULL)
- p->idx = idx;
+ p->iter = card->current_iter;
if ((p->biotail) != &bio->bi_next) {
*(p->biotail) = bio;
p->biotail = &(bio->bi_next);
@@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card)
desc->data_dma_handle = dma_handle;
desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
- desc->local_addr = cpu_to_le64(card->current_sector << 9);
- desc->transfer_size = cpu_to_le32(len);
+ desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
+ desc->transfer_size = cpu_to_le32(vec.bv_len);
offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
desc->zero1 = desc->zero2 = 0;
@@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card)
desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
desc->sem_control_bits = desc->control_bits;
- card->current_sector += (len >> 9);
- idx++;
- card->current_idx = idx;
- if (idx >= bio->bi_vcnt)
+
+ bio_advance_iter(bio, &card->current_iter, vec.bv_len);
+ if (!card->current_iter.bi_size)
card->currentbio = NULL;
return 1;
@@ -439,23 +433,25 @@ static void process_page(unsigned long data)
struct mm_dma_desc *desc = &page->desc[page->headcnt];
int control = le32_to_cpu(desc->sem_control_bits);
int last = 0;
- int idx;
+ struct bio_vec vec;
if (!(control & DMASCR_DMA_COMPLETE)) {
control = dma_status;
last = 1;
}
+
page->headcnt++;
- idx = page->idx;
- page->idx++;
- if (page->idx >= bio->bi_vcnt) {
+ vec = bio_iter_iovec(bio, page->iter);
+ bio_advance_iter(bio, &page->iter, vec.bv_len);
+
+ if (!page->iter.bi_size) {
page->bio = bio->bi_next;
if (page->bio)
- page->idx = page->bio->bi_idx;
+ page->iter = page->bio->bi_iter;
}
pci_unmap_page(card->dev, desc->data_dma_handle,
- bio_iovec_idx(bio, idx)->bv_len,
+ vec.bv_len,
(control & DMASCR_TRANSFER_READ) ?
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
if (control & DMASCR_HARD_ERROR) {
@@ -532,7 +528,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
pr_debug("mm_make_request %llu %u\n",
- (unsigned long long)bio->bi_sector, bio->bi_size);
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
spin_lock_irq(&card->lock);
*card->biotail = bio;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a680d4de7f1..6d8a87f252de 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -110,9 +110,9 @@ static int __virtblk_add_req(struct virtqueue *vq,
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static inline void virtblk_request_done(struct virtblk_req *vbr)
+static inline void virtblk_request_done(struct request *req)
{
- struct request *req = vbr->req;
+ struct virtblk_req *vbr = req->special;
int error = virtblk_result(vbr);
if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -138,7 +138,7 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
- virtblk_request_done(vbr);
+ blk_mq_complete_request(vbr->req);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
@@ -158,6 +158,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
unsigned long flags;
unsigned int num;
const bool last = (req->cmd_flags & REQ_END) != 0;
+ int err;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
@@ -198,11 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
}
spin_lock_irqsave(&vblk->vq_lock, flags);
- if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
+ err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
+ if (err) {
virtqueue_kick(vblk->vq);
spin_unlock_irqrestore(&vblk->vq_lock, flags);
blk_mq_stop_hw_queue(hctx);
- return BLK_MQ_RQ_QUEUE_BUSY;
+ /* Out of mem doesn't actually happen, since we fall back
+ * to direct descriptors */
+ if (err == -ENOMEM || err == -ENOSPC)
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ return BLK_MQ_RQ_QUEUE_ERROR;
}
if (last)
@@ -479,23 +485,26 @@ static struct blk_mq_ops virtio_mq_ops = {
.map_queue = blk_mq_map_queue,
.alloc_hctx = blk_mq_alloc_single_hw_queue,
.free_hctx = blk_mq_free_single_hw_queue,
+ .complete = virtblk_request_done,
};
static struct blk_mq_reg virtio_mq_reg = {
.ops = &virtio_mq_ops,
.nr_hw_queues = 1,
- .queue_depth = 64,
+ .queue_depth = 0, /* Set in virtblk_probe */
.numa_node = NUMA_NO_NODE,
.flags = BLK_MQ_F_SHOULD_MERGE,
};
+module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444);
-static void virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
+static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
struct request *rq, unsigned int nr)
{
struct virtio_blk *vblk = data;
struct virtblk_req *vbr = rq->special;
sg_init_table(vbr->sg, vblk->sg_elems);
+ return 0;
}
static int virtblk_probe(struct virtio_device *vdev)
@@ -551,6 +560,13 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_vq;
}
+ /* Default queue sizing is to fill the ring. */
+ if (!virtio_mq_reg.queue_depth) {
+ virtio_mq_reg.queue_depth = vblk->vq->num_free;
+ /* ... but without indirect descs, we use 2 descs per req */
+ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
+ virtio_mq_reg.queue_depth /= 2;
+ }
virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 6620b73d0490..64c60edcdfbc 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -299,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
BUG_ON(num != 0);
}
-static void unmap_purged_grants(struct work_struct *work)
+void xen_blkbk_unmap_purged_grants(struct work_struct *work)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -375,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
- INIT_LIST_HEAD(&blkif->persistent_purge_list);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
root = &blkif->persistent_gnts;
purge_list:
foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -420,7 +420,6 @@ finished:
blkif->vbd.overflow_max_grants = 0;
/* We can defer this work */
- INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
schedule_work(&blkif->persistent_purge_work);
pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
return;
@@ -625,9 +624,23 @@ purge_gnt_list:
print_stats(blkif);
}
- /* Since we are shutting down remove all pages from the buffer */
- shrink_free_pagepool(blkif, 0 /* All */);
+ /* Drain pending purge work */
+ flush_work(&blkif->persistent_purge_work);
+ if (log_stats)
+ print_stats(blkif);
+
+ blkif->xenblkd = NULL;
+ xen_blkif_put(blkif);
+
+ return 0;
+}
+
+/*
+ * Remove persistent grants and empty the pool of free pages
+ */
+void xen_blkbk_free_caches(struct xen_blkif *blkif)
+{
/* Free all persistent grant pages */
if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
free_persistent_gnts(blkif, &blkif->persistent_gnts,
@@ -636,13 +649,8 @@ purge_gnt_list:
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
blkif->persistent_gnt_c = 0;
- if (log_stats)
- print_stats(blkif);
-
- blkif->xenblkd = NULL;
- xen_blkif_put(blkif);
-
- return 0;
+ /* Since we are shutting down remove all pages from the buffer */
+ shrink_free_pagepool(blkif, 0 /* All */);
}
/*
@@ -838,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
struct grant_page **pages = pending_req->indirect_pages;
struct xen_blkif *blkif = pending_req->blkif;
int indirect_grefs, rc, n, nseg, i;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
nseg = pending_req->nr_pages;
indirect_grefs = INDIRECT_PAGES(nseg);
@@ -934,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif)
{
atomic_set(&blkif->drain, 1);
do {
- /* The initial value is one, and one refcnt taken at the
- * start of the xen_blkif_schedule thread. */
- if (atomic_read(&blkif->refcnt) <= 2)
+ if (atomic_read(&blkif->inflight) == 0)
break;
wait_for_completion_interruptible_timeout(
&blkif->drain_complete, HZ);
@@ -976,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
* the proper response on the ring.
*/
if (atomic_dec_and_test(&pending_req->pendcnt)) {
- xen_blkbk_unmap(pending_req->blkif,
+ struct xen_blkif *blkif = pending_req->blkif;
+
+ xen_blkbk_unmap(blkif,
pending_req->segments,
pending_req->nr_pages);
- make_response(pending_req->blkif, pending_req->id,
+ make_response(blkif, pending_req->id,
pending_req->operation, pending_req->status);
- xen_blkif_put(pending_req->blkif);
- if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
- if (atomic_read(&pending_req->blkif->drain))
- complete(&pending_req->blkif->drain_complete);
+ free_req(blkif, pending_req);
+ /*
+ * Make sure the request is freed before releasing blkif,
+ * or there could be a race between free_req and the
+ * cleanup done in xen_blkif_free during shutdown.
+ *
+ * NB: The fact that we might try to wake up pending_free_wq
+ * before drain_complete (in case there's a drain going on)
+ * it's not a problem with our current implementation
+ * because we can assure there's no thread waiting on
+ * pending_free_wq if there's a drain going on, but it has
+ * to be taken into account if the current model is changed.
+ */
+ if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
+ complete(&blkif->drain_complete);
}
- free_req(pending_req->blkif, pending_req);
+ xen_blkif_put(blkif);
}
}
@@ -1240,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
*/
xen_blkif_get(blkif);
+ atomic_inc(&blkif->inflight);
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
@@ -1257,7 +1277,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
bio->bi_bdev = preq.bdev;
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
- bio->bi_sector = preq.sector_number;
+ bio->bi_iter.bi_sector = preq.sector_number;
}
preq.sector_number += seg[i].nsec;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 8d8807563d99..be052773ad03 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -57,7 +57,7 @@
#define MAX_INDIRECT_SEGMENTS 256
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define MAX_INDIRECT_PAGES \
((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
#define INDIRECT_PAGES(_segs) \
@@ -278,6 +278,7 @@ struct xen_blkif {
/* for barrier (drain) requests */
struct completion drain_complete;
atomic_t drain;
+ atomic_t inflight;
/* One thread per one blkif. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
int xen_blkif_purge_persistent(void *arg);
+void xen_blkbk_free_caches(struct xen_blkif *blkif);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state);
@@ -383,6 +385,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
+void xen_blkbk_unmap_purged_grants(struct work_struct *work);
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2014a0aa206..9a547e6b6ebf 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -125,8 +125,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
blkif->persistent_gnts.rb_node = NULL;
spin_lock_init(&blkif->free_pages_lock);
INIT_LIST_HEAD(&blkif->free_pages);
+ INIT_LIST_HEAD(&blkif->persistent_purge_list);
blkif->free_pages_num = 0;
atomic_set(&blkif->persistent_gnt_in_use, 0);
+ atomic_set(&blkif->inflight, 0);
+ INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
INIT_LIST_HEAD(&blkif->pending_free);
@@ -259,6 +262,17 @@ static void xen_blkif_free(struct xen_blkif *blkif)
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
+ /* Remove all persistent grants and the cache of ballooned pages. */
+ xen_blkbk_free_caches(blkif);
+
+ /* Make sure everything is drained before shutting down */
+ BUG_ON(blkif->persistent_gnt_c != 0);
+ BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
+ BUG_ON(blkif->free_pages_num != 0);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
+ BUG_ON(!list_empty(&blkif->free_pages));
+ BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
+
/* Check that there is no request in use */
list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
list_del(&req->free_list);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c4a4c9006288..efe1b4761735 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
#define DEV_NAME "xvd" /* name in /dev */
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define INDIRECT_GREFS(_segs) \
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
unsigned long id;
unsigned int fsect, lsect;
int i, ref, n;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
/*
* Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
} else {
n = i % SEGS_PER_INDIRECT_FRAME;
segments[n] =
- (struct blkif_request_segment_aligned) {
+ (struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
@@ -1356,7 +1356,7 @@ static int blkfront_probe(struct xenbus_device *dev,
char *type;
int len;
/* no unplug has been done: do not hook devices != xen vbds */
- if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
+ if (xen_has_pv_and_legacy_disk_devices()) {
int major;
if (!VDEV_IS_EXTENDED(vdevice))
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
for (i = 0; i < pending; i++) {
offset = (i * segs * PAGE_SIZE) >> 9;
size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
- (unsigned int)(bio->bi_size >> 9) - offset);
+ (unsigned int)bio_sectors(bio) - offset);
cloned_bio = bio_clone(bio, GFP_NOIO);
BUG_ON(cloned_bio == NULL);
bio_trim(cloned_bio, offset, size);
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateConnected:
blkfront_connect(info);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's Closing state -- fallthrough */
case XenbusStateClosing:
blkfront_closing(info);
break;
@@ -2079,7 +2082,7 @@ static int __init xlblk_init(void)
if (!xen_domain())
return -ENODEV;
- if (xen_hvm_domain() && !xen_platform_pci_unplug)
+ if (!xen_has_pv_disk_devices())
return -ENODEV;
if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
diff --git a/drivers/staging/zram/Kconfig b/drivers/block/zram/Kconfig
index 983314c41349..3450be850399 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -14,7 +14,6 @@ config ZRAM
disks and maybe many more.
See zram.txt for more information.
- Project home: <https://compcache.googlecode.com/>
config ZRAM_DEBUG
bool "Compressed RAM block device debug support"
diff --git a/drivers/staging/zram/Makefile b/drivers/block/zram/Makefile
index cb0f9ced6a93..cb0f9ced6a93 100644
--- a/drivers/staging/zram/Makefile
+++ b/drivers/block/zram/Makefile
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 3277d9838f4e..51c557cfd92b 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2,6 +2,7 @@
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
+ * 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the licence that better fits your requirements.
@@ -9,7 +10,6 @@
* Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0
*
- * Project home: http://compcache.googlecode.com
*/
#define KMSG_COMPONENT "zram"
@@ -104,7 +104,7 @@ static ssize_t zero_pages_show(struct device *dev,
{
struct zram *zram = dev_to_zram(dev);
- return sprintf(buf, "%u\n", zram->stats.pages_zero);
+ return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
}
static ssize_t orig_data_size_show(struct device *dev,
@@ -113,7 +113,7 @@ static ssize_t orig_data_size_show(struct device *dev,
struct zram *zram = dev_to_zram(dev);
return sprintf(buf, "%llu\n",
- (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
+ (u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
}
static ssize_t compr_data_size_show(struct device *dev,
@@ -140,6 +140,7 @@ static ssize_t mem_used_total_show(struct device *dev,
return sprintf(buf, "%llu\n", val);
}
+/* flag operations needs meta->tb_lock */
static int zram_test_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
@@ -171,13 +172,14 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
u64 start, end, bound;
/* unaligned request */
- if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+ if (unlikely(bio->bi_iter.bi_sector &
+ (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
return 0;
- if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+ if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
return 0;
- start = bio->bi_sector;
- end = start + (bio->bi_size >> SECTOR_SHIFT);
+ start = bio->bi_iter.bi_sector;
+ end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
bound = zram->disksize >> SECTOR_SHIFT;
/* out of range range */
if (unlikely(start >= bound || end > bound || start > end))
@@ -227,6 +229,8 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
goto free_table;
}
+ rwlock_init(&meta->tb_lock);
+ mutex_init(&meta->buffer_lock);
return meta;
free_table:
@@ -279,6 +283,7 @@ static void handle_zero_page(struct bio_vec *bvec)
flush_dcache_page(page);
}
+/* NOTE: caller should hold meta->tb_lock with write-side */
static void zram_free_page(struct zram *zram, size_t index)
{
struct zram_meta *meta = zram->meta;
@@ -292,21 +297,21 @@ static void zram_free_page(struct zram *zram, size_t index)
*/
if (zram_test_flag(meta, index, ZRAM_ZERO)) {
zram_clear_flag(meta, index, ZRAM_ZERO);
- zram->stats.pages_zero--;
+ atomic_dec(&zram->stats.pages_zero);
}
return;
}
if (unlikely(size > max_zpage_size))
- zram->stats.bad_compress--;
+ atomic_dec(&zram->stats.bad_compress);
zs_free(meta->mem_pool, handle);
if (size <= PAGE_SIZE / 2)
- zram->stats.good_compress--;
+ atomic_dec(&zram->stats.good_compress);
atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
- zram->stats.pages_stored--;
+ atomic_dec(&zram->stats.pages_stored);
meta->table[index].handle = 0;
meta->table[index].size = 0;
@@ -318,20 +323,26 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
size_t clen = PAGE_SIZE;
unsigned char *cmem;
struct zram_meta *meta = zram->meta;
- unsigned long handle = meta->table[index].handle;
+ unsigned long handle;
+ u16 size;
+
+ read_lock(&meta->tb_lock);
+ handle = meta->table[index].handle;
+ size = meta->table[index].size;
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ read_unlock(&meta->tb_lock);
clear_page(mem);
return 0;
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
- if (meta->table[index].size == PAGE_SIZE)
+ if (size == PAGE_SIZE)
copy_page(mem, cmem);
else
- ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
- mem, &clen);
+ ret = lzo1x_decompress_safe(cmem, size, mem, &clen);
zs_unmap_object(meta->mem_pool, handle);
+ read_unlock(&meta->tb_lock);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
@@ -352,11 +363,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
+ read_lock(&meta->tb_lock);
if (unlikely(!meta->table[index].handle) ||
zram_test_flag(meta, index, ZRAM_ZERO)) {
+ read_unlock(&meta->tb_lock);
handle_zero_page(bvec);
return 0;
}
+ read_unlock(&meta->tb_lock);
if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
@@ -399,6 +413,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
+ bool locked = false;
page = bvec->bv_page;
src = meta->compress_buffer;
@@ -418,6 +433,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
+ mutex_lock(&meta->buffer_lock);
+ locked = true;
user_mem = kmap_atomic(page);
if (is_partial_io(bvec)) {
@@ -432,25 +449,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
+ write_lock(&zram->meta->tb_lock);
zram_free_page(zram, index);
-
- zram->stats.pages_zero++;
zram_set_flag(meta, index, ZRAM_ZERO);
+ write_unlock(&zram->meta->tb_lock);
+
+ atomic_inc(&zram->stats.pages_zero);
ret = 0;
goto out;
}
- /*
- * zram_slot_free_notify could miss free so that let's
- * double check.
- */
- if (unlikely(meta->table[index].handle ||
- zram_test_flag(meta, index, ZRAM_ZERO)))
- zram_free_page(zram, index);
-
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
meta->compress_workmem);
-
if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem);
user_mem = NULL;
@@ -463,7 +473,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
}
if (unlikely(clen > max_zpage_size)) {
- zram->stats.bad_compress++;
+ atomic_inc(&zram->stats.bad_compress);
clen = PAGE_SIZE;
src = NULL;
if (is_partial_io(bvec))
@@ -493,18 +503,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
* Free memory associated with this sector
* before overwriting unused sectors.
*/
+ write_lock(&zram->meta->tb_lock);
zram_free_page(zram, index);
meta->table[index].handle = handle;
meta->table[index].size = clen;
+ write_unlock(&zram->meta->tb_lock);
/* Update stats */
atomic64_add(clen, &zram->stats.compr_size);
- zram->stats.pages_stored++;
+ atomic_inc(&zram->stats.pages_stored);
if (clen <= PAGE_SIZE / 2)
- zram->stats.good_compress++;
+ atomic_inc(&zram->stats.good_compress);
out:
+ if (locked)
+ mutex_unlock(&meta->buffer_lock);
if (is_partial_io(bvec))
kfree(uncmem);
@@ -513,36 +527,15 @@ out:
return ret;
}
-static void handle_pending_slot_free(struct zram *zram)
-{
- struct zram_slot_free *free_rq;
-
- spin_lock(&zram->slot_free_lock);
- while (zram->slot_free_rq) {
- free_rq = zram->slot_free_rq;
- zram->slot_free_rq = free_rq->next;
- zram_free_page(zram, free_rq->index);
- kfree(free_rq);
- }
- spin_unlock(&zram->slot_free_lock);
-}
-
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, struct bio *bio, int rw)
{
int ret;
- if (rw == READ) {
- down_read(&zram->lock);
- handle_pending_slot_free(zram);
+ if (rw == READ)
ret = zram_bvec_read(zram, bvec, index, offset, bio);
- up_read(&zram->lock);
- } else {
- down_write(&zram->lock);
- handle_pending_slot_free(zram);
+ else
ret = zram_bvec_write(zram, bvec, index, offset);
- up_write(&zram->lock);
- }
return ret;
}
@@ -552,8 +545,6 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
size_t index;
struct zram_meta *meta;
- flush_work(&zram->free_work);
-
down_write(&zram->init_lock);
if (!zram->init_done) {
up_write(&zram->init_lock);
@@ -621,6 +612,8 @@ static ssize_t disksize_store(struct device *dev,
disksize = PAGE_ALIGN(disksize);
meta = zram_meta_alloc(disksize);
+ if (!meta)
+ return -ENOMEM;
down_write(&zram->init_lock);
if (zram->init_done) {
up_write(&zram->init_lock);
@@ -680,9 +673,10 @@ out:
static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
{
- int i, offset;
+ int offset;
u32 index;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
switch (rw) {
case READ:
@@ -693,36 +687,37 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
break;
}
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
- offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
+ index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ offset = (bio->bi_iter.bi_sector &
+ (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
- bio_for_each_segment(bvec, bio, i) {
+ bio_for_each_segment(bvec, bio, iter) {
int max_transfer_size = PAGE_SIZE - offset;
- if (bvec->bv_len > max_transfer_size) {
+ if (bvec.bv_len > max_transfer_size) {
/*
* zram_bvec_rw() can only make operation on a single
* zram page. Split the bio vector.
*/
struct bio_vec bv;
- bv.bv_page = bvec->bv_page;
+ bv.bv_page = bvec.bv_page;
bv.bv_len = max_transfer_size;
- bv.bv_offset = bvec->bv_offset;
+ bv.bv_offset = bvec.bv_offset;
if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
goto out;
- bv.bv_len = bvec->bv_len - max_transfer_size;
+ bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
goto out;
} else
- if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
+ if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
< 0)
goto out;
- update_position(&index, &offset, bvec);
+ update_position(&index, &offset, &bvec);
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -759,40 +754,19 @@ error:
bio_io_error(bio);
}
-static void zram_slot_free(struct work_struct *work)
-{
- struct zram *zram;
-
- zram = container_of(work, struct zram, free_work);
- down_write(&zram->lock);
- handle_pending_slot_free(zram);
- up_write(&zram->lock);
-}
-
-static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
-{
- spin_lock(&zram->slot_free_lock);
- free_rq->next = zram->slot_free_rq;
- zram->slot_free_rq = free_rq;
- spin_unlock(&zram->slot_free_lock);
-}
-
static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
- struct zram_slot_free *free_rq;
+ struct zram_meta *meta;
zram = bdev->bd_disk->private_data;
- atomic64_inc(&zram->stats.notify_free);
-
- free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
- if (!free_rq)
- return;
+ meta = zram->meta;
- free_rq->index = index;
- add_slot_free(zram, free_rq);
- schedule_work(&zram->free_work);
+ write_lock(&meta->tb_lock);
+ zram_free_page(zram, index);
+ write_unlock(&meta->tb_lock);
+ atomic64_inc(&zram->stats.notify_free);
}
static const struct block_device_operations zram_devops = {
@@ -836,13 +810,8 @@ static int create_device(struct zram *zram, int device_id)
{
int ret = -ENOMEM;
- init_rwsem(&zram->lock);
init_rwsem(&zram->init_lock);
- INIT_WORK(&zram->free_work, zram_slot_free);
- spin_lock_init(&zram->slot_free_lock);
- zram->slot_free_rq = NULL;
-
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 97a3acf6ab76..ad8aa35bae00 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -2,6 +2,7 @@
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
+ * 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the licence that better fits your requirements.
@@ -9,7 +10,6 @@
* Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0
*
- * Project home: http://compcache.googlecode.com
*/
#ifndef _ZRAM_DRV_H_
@@ -17,8 +17,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
-
-#include "../zsmalloc/zsmalloc.h"
+#include <linux/zsmalloc.h>
/*
* Some arbitrary value. This is just to catch
@@ -69,10 +68,6 @@ struct table {
u8 flags;
} __aligned(4);
-/*
- * All 64bit fields should only be manipulated by 64bit atomic accessors.
- * All modifications to 32bit counter should be protected by zram->lock.
- */
struct zram_stats {
atomic64_t compr_size; /* compressed size of pages stored */
atomic64_t num_reads; /* failed + successful */
@@ -81,33 +76,23 @@ struct zram_stats {
atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */
atomic64_t notify_free; /* no. of swap slot free notifications */
- u32 pages_zero; /* no. of zero filled pages */
- u32 pages_stored; /* no. of pages currently stored */
- u32 good_compress; /* % of pages with compression ratio<=50% */
- u32 bad_compress; /* % of pages with compression ratio>=75% */
+ atomic_t pages_zero; /* no. of zero filled pages */
+ atomic_t pages_stored; /* no. of pages currently stored */
+ atomic_t good_compress; /* % of pages with compression ratio<=50% */
+ atomic_t bad_compress; /* % of pages with compression ratio>=75% */
};
struct zram_meta {
+ rwlock_t tb_lock; /* protect table */
void *compress_workmem;
void *compress_buffer;
struct table *table;
struct zs_pool *mem_pool;
-};
-
-struct zram_slot_free {
- unsigned long index;
- struct zram_slot_free *next;
+ struct mutex buffer_lock; /* protect compress buffers */
};
struct zram {
struct zram_meta *meta;
- struct rw_semaphore lock; /* protect compression buffers, table,
- * 32bit stat counters against concurrent
- * notifications, reads and writes */
-
- struct work_struct free_work; /* handle pending free request */
- struct zram_slot_free *slot_free_rq; /* list head of free request */
-
struct request_queue *queue;
struct gendisk *disk;
int init_done;
@@ -118,7 +103,6 @@ struct zram {
* we can store in a disk.
*/
u64 disksize; /* bytes */
- spinlock_t slot_free_lock;
struct zram_stats stats;
};
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11a6104a1e4f..f5ce64e03fd7 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -241,5 +241,6 @@ config BT_WILINK
core driver to communicate with the BT core of the combo chip.
Say Y here to compile support for Texas Instrument's WiLink7 driver
- into the kernel or say M to compile it as module.
+ into the kernel or say M to compile it as module (btwilink).
+
endmenu
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index dceb85f8d9a8..106d1d8e16ad 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -83,6 +83,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x3005) },
{ USB_DEVICE(0x04CA, 0x3006) },
{ USB_DEVICE(0x04CA, 0x3008) },
+ { USB_DEVICE(0x04CA, 0x300b) },
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x0CF3, 0xE005) },
@@ -97,6 +98,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x0cf3, 0x3121) },
{ USB_DEVICE(0x0cf3, 0xe003) },
+ { USB_DEVICE(0x0489, 0xe05f) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -126,6 +128,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
@@ -140,6 +143,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index f9d183387f45..7399303d7d99 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -23,8 +23,6 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <net/bluetooth/bluetooth.h>
-#include <linux/ctype.h>
-#include <linux/firmware.h>
#define BTM_HEADER_LEN 4
#define BTM_UPLD_SIZE 2312
@@ -43,8 +41,6 @@ struct btmrvl_thread {
struct btmrvl_device {
void *card;
struct hci_dev *hcidev;
- struct device *dev;
- const char *cal_data;
u8 dev_type;
@@ -90,12 +86,12 @@ struct btmrvl_private {
#define MRVL_VENDOR_PKT 0xFE
-/* Bluetooth commands */
-#define BT_CMD_AUTO_SLEEP_MODE 0x23
-#define BT_CMD_HOST_SLEEP_CONFIG 0x59
-#define BT_CMD_HOST_SLEEP_ENABLE 0x5A
-#define BT_CMD_MODULE_CFG_REQ 0x5B
-#define BT_CMD_LOAD_CONFIG_DATA 0x61
+/* Vendor specific Bluetooth commands */
+#define BT_CMD_AUTO_SLEEP_MODE 0xFC23
+#define BT_CMD_HOST_SLEEP_CONFIG 0xFC59
+#define BT_CMD_HOST_SLEEP_ENABLE 0xFC5A
+#define BT_CMD_MODULE_CFG_REQ 0xFC5B
+#define BT_CMD_LOAD_CONFIG_DATA 0xFC61
/* Sub-commands: Module Bringup/Shutdown Request/Response */
#define MODULE_BRINGUP_REQ 0xF1
@@ -104,6 +100,11 @@ struct btmrvl_private {
#define MODULE_SHUTDOWN_REQ 0xF2
+/* Vendor specific Bluetooth events */
+#define BT_EVENT_AUTO_SLEEP_MODE 0x23
+#define BT_EVENT_HOST_SLEEP_CONFIG 0x59
+#define BT_EVENT_HOST_SLEEP_ENABLE 0x5A
+#define BT_EVENT_MODULE_CFG_REQ 0x5B
#define BT_EVENT_POWER_STATE 0x20
/* Bluetooth Power States */
@@ -111,8 +112,6 @@ struct btmrvl_private {
#define BT_PS_DISABLE 0x03
#define BT_PS_SLEEP 0x01
-#define OGF 0x3F
-
/* Host Sleep states */
#define HS_ACTIVATED 0x01
#define HS_DEACTIVATED 0x00
@@ -121,7 +120,7 @@ struct btmrvl_private {
#define PS_SLEEP 0x01
#define PS_AWAKE 0x00
-#define BT_CMD_DATA_SIZE 32
+#define BT_CAL_HDR_LEN 4
#define BT_CAL_DATA_SIZE 28
struct btmrvl_event {
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 5cf31c4fe6d1..1e0320af00c6 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -19,7 +19,7 @@
**/
#include <linux/module.h>
-
+#include <linux/of.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -50,12 +50,10 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
if (hdr->evt == HCI_EV_CMD_COMPLETE) {
struct hci_ev_cmd_complete *ec;
- u16 opcode, ocf, ogf;
+ u16 opcode;
ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
opcode = __le16_to_cpu(ec->opcode);
- ocf = hci_opcode_ocf(opcode);
- ogf = hci_opcode_ogf(opcode);
if (priv->btmrvl_dev.sendcmdflag) {
priv->btmrvl_dev.sendcmdflag = false;
@@ -63,9 +61,8 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
wake_up_interruptible(&priv->adapter->cmd_wait_q);
}
- if (ogf == OGF) {
- BT_DBG("vendor event skipped: ogf 0x%4.4x ocf 0x%4.4x",
- ogf, ocf);
+ if (hci_opcode_ogf(opcode) == 0x3F) {
+ BT_DBG("vendor event skipped: opcode=%#4.4x", opcode);
kfree_skb(skb);
return false;
}
@@ -89,7 +86,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
}
switch (event->data[0]) {
- case BT_CMD_AUTO_SLEEP_MODE:
+ case BT_EVENT_AUTO_SLEEP_MODE:
if (!event->data[2]) {
if (event->data[1] == BT_PS_ENABLE)
adapter->psmode = 1;
@@ -102,7 +99,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
}
break;
- case BT_CMD_HOST_SLEEP_CONFIG:
+ case BT_EVENT_HOST_SLEEP_CONFIG:
if (!event->data[3])
BT_DBG("gpio=%x, gap=%x", event->data[1],
event->data[2]);
@@ -110,7 +107,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
BT_DBG("HSCFG command failed");
break;
- case BT_CMD_HOST_SLEEP_ENABLE:
+ case BT_EVENT_HOST_SLEEP_ENABLE:
if (!event->data[1]) {
adapter->hs_state = HS_ACTIVATED;
if (adapter->psmode)
@@ -121,7 +118,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
}
break;
- case BT_CMD_MODULE_CFG_REQ:
+ case BT_EVENT_MODULE_CFG_REQ:
if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_BRINGUP_REQ) {
BT_DBG("EVENT:%s",
@@ -166,7 +163,7 @@ exit:
}
EXPORT_SYMBOL_GPL(btmrvl_process_event);
-static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 cmd_no,
+static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
const void *param, u8 len)
{
struct sk_buff *skb;
@@ -179,7 +176,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 cmd_no,
}
hdr = (struct hci_command_hdr *)skb_put(skb, HCI_COMMAND_HDR_SIZE);
- hdr->opcode = cpu_to_le16(hci_opcode_pack(OGF, cmd_no));
+ hdr->opcode = cpu_to_le16(opcode);
hdr->plen = len;
if (len)
@@ -417,127 +414,62 @@ static int btmrvl_open(struct hci_dev *hdev)
return 0;
}
-/*
- * This function parses provided calibration data input. It should contain
- * hex bytes separated by space or new line character. Here is an example.
- * 00 1C 01 37 FF FF FF FF 02 04 7F 01
- * CE BA 00 00 00 2D C6 C0 00 00 00 00
- * 00 F0 00 00
- */
-static int btmrvl_parse_cal_cfg(const u8 *src, u32 len, u8 *dst, u32 dst_size)
+static int btmrvl_download_cal_data(struct btmrvl_private *priv,
+ u8 *data, int len)
{
- const u8 *s = src;
- u8 *d = dst;
int ret;
- u8 tmp[3];
-
- tmp[2] = '\0';
- while ((s - src) <= len - 2) {
- if (isspace(*s)) {
- s++;
- continue;
- }
-
- if (isxdigit(*s)) {
- if ((d - dst) >= dst_size) {
- BT_ERR("calibration data file too big!!!");
- return -EINVAL;
- }
-
- memcpy(tmp, s, 2);
-
- ret = kstrtou8(tmp, 16, d++);
- if (ret < 0)
- return ret;
-
- s += 2;
- } else {
- return -EINVAL;
- }
- }
- if (d == dst)
- return -EINVAL;
-
- return 0;
-}
-
-static int btmrvl_load_cal_data(struct btmrvl_private *priv,
- u8 *config_data)
-{
- int i, ret;
- u8 data[BT_CMD_DATA_SIZE];
data[0] = 0x00;
data[1] = 0x00;
data[2] = 0x00;
- data[3] = BT_CMD_DATA_SIZE - 4;
-
- /* Swap cal-data bytes. Each four bytes are swapped. Considering 4
- * byte SDIO header offset, mapping of input and output bytes will be
- * {3, 2, 1, 0} -> {0+4, 1+4, 2+4, 3+4},
- * {7, 6, 5, 4} -> {4+4, 5+4, 6+4, 7+4} */
- for (i = 4; i < BT_CMD_DATA_SIZE; i++)
- data[i] = config_data[(i / 4) * 8 - 1 - i];
+ data[3] = len;
print_hex_dump_bytes("Calibration data: ",
- DUMP_PREFIX_OFFSET, data, BT_CMD_DATA_SIZE);
+ DUMP_PREFIX_OFFSET, data, BT_CAL_HDR_LEN + len);
ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
- BT_CMD_DATA_SIZE);
+ BT_CAL_HDR_LEN + len);
if (ret)
BT_ERR("Failed to download caibration data\n");
return 0;
}
-static int
-btmrvl_process_cal_cfg(struct btmrvl_private *priv, u8 *data, u32 size)
+static int btmrvl_cal_data_dt(struct btmrvl_private *priv)
{
- u8 cal_data[BT_CAL_DATA_SIZE];
+ struct device_node *dt_node;
+ u8 cal_data[BT_CAL_HDR_LEN + BT_CAL_DATA_SIZE];
+ const char name[] = "btmrvl_caldata";
+ const char property[] = "btmrvl,caldata";
int ret;
- ret = btmrvl_parse_cal_cfg(data, size, cal_data, sizeof(cal_data));
+ dt_node = of_find_node_by_name(NULL, name);
+ if (!dt_node)
+ return -ENODEV;
+
+ ret = of_property_read_u8_array(dt_node, property,
+ cal_data + BT_CAL_HDR_LEN,
+ BT_CAL_DATA_SIZE);
if (ret)
return ret;
- ret = btmrvl_load_cal_data(priv, cal_data);
+ BT_DBG("Use cal data from device tree");
+ ret = btmrvl_download_cal_data(priv, cal_data, BT_CAL_DATA_SIZE);
if (ret) {
- BT_ERR("Fail to load calibrate data");
+ BT_ERR("Fail to download calibrate data");
return ret;
}
return 0;
}
-static int btmrvl_cal_data_config(struct btmrvl_private *priv)
-{
- const struct firmware *cfg;
- int ret;
- const char *cal_data = priv->btmrvl_dev.cal_data;
-
- if (!cal_data)
- return 0;
-
- ret = request_firmware(&cfg, cal_data, priv->btmrvl_dev.dev);
- if (ret < 0) {
- BT_DBG("Failed to get %s file, skipping cal data download",
- cal_data);
- return 0;
- }
-
- ret = btmrvl_process_cal_cfg(priv, (u8 *)cfg->data, cfg->size);
- release_firmware(cfg);
- return ret;
-}
-
static int btmrvl_setup(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
- if (btmrvl_cal_data_config(priv))
- BT_ERR("Set cal data failed");
+ btmrvl_cal_data_dt(priv);
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index fabcf5bb48af..1b52c9f5230d 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -18,6 +18,7 @@
* this warranty disclaimer.
**/
+#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/mmc/sdio_ids.h>
@@ -101,7 +102,6 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "mrvl/sd8688_helper.bin",
.firmware = "mrvl/sd8688.bin",
- .cal_data = NULL,
.reg = &btmrvl_reg_8688,
.sd_blksz_fw_dl = 64,
};
@@ -109,7 +109,6 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
- .cal_data = NULL,
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -117,7 +116,6 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
.helper = NULL,
.firmware = "mrvl/sd8797_uapsta.bin",
- .cal_data = "mrvl/sd8797_caldata.conf",
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -125,7 +123,6 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
.helper = NULL,
.firmware = "mrvl/sd8897_uapsta.bin",
- .cal_data = NULL,
.reg = &btmrvl_reg_88xx,
.sd_blksz_fw_dl = 256,
};
@@ -1007,7 +1004,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
struct btmrvl_sdio_device *data = (void *) id->driver_data;
card->helper = data->helper;
card->firmware = data->firmware;
- card->cal_data = data->cal_data;
card->reg = data->reg;
card->sd_blksz_fw_dl = data->sd_blksz_fw_dl;
}
@@ -1036,8 +1032,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
}
card->priv = priv;
- priv->btmrvl_dev.dev = &card->func->dev;
- priv->btmrvl_dev.cal_data = card->cal_data;
/* Initialize the interface specific function pointers */
priv->hw_host_to_card = btmrvl_sdio_host_to_card;
@@ -1220,5 +1214,4 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
MODULE_FIRMWARE("mrvl/sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
-MODULE_FIRMWARE("mrvl/sd8797_caldata.conf");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 6872d9ecac07..43d35a609ca9 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -85,7 +85,6 @@ struct btmrvl_sdio_card {
u32 ioport;
const char *helper;
const char *firmware;
- const char *cal_data;
const struct btmrvl_sdio_card_reg *reg;
u16 sd_blksz_fw_dl;
u8 rx_unit;
@@ -95,7 +94,6 @@ struct btmrvl_sdio_card {
struct btmrvl_sdio_device {
const char *helper;
const char *firmware;
- const char *cal_data;
const struct btmrvl_sdio_card_reg *reg;
u16 sd_blksz_fw_dl;
};
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index b61440aaee65..83f6437dd91d 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -73,6 +73,7 @@ struct btsdio_data {
#define REG_CL_INTRD 0x13 /* Interrupt Clear */
#define REG_EN_INTRD 0x14 /* Interrupt Enable */
#define REG_MD_STAT 0x20 /* Bluetooth Mode Status */
+#define REG_MD_SET 0x20 /* Bluetooth Mode Set */
static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb)
{
@@ -212,7 +213,7 @@ static int btsdio_open(struct hci_dev *hdev)
}
if (data->func->class == SDIO_CLASS_BT_B)
- sdio_writeb(data->func, 0x00, REG_MD_STAT, NULL);
+ sdio_writeb(data->func, 0x00, REG_MD_SET, NULL);
sdio_writeb(data->func, 0x01, REG_EN_INTRD, NULL);
@@ -333,6 +334,9 @@ static int btsdio_probe(struct sdio_func *func,
hdev->flush = btsdio_flush;
hdev->send = btsdio_send_frame;
+ if (func->vendor == 0x0104 && func->device == 0x00c5)
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
err = hci_register_dev(hdev);
if (err < 0) {
hci_free_dev(hdev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3980fd18f6ea..baeaaed299e4 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -150,6 +150,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
@@ -164,6 +165,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -224,6 +226,7 @@ static const struct usb_device_id blacklist_table[] = {
/* Intel Bluetooth device */
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
+ { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
{ } /* Terminating entry */
};
@@ -962,6 +965,45 @@ static int btusb_setup_bcm92035(struct hci_dev *hdev)
return 0;
}
+static int btusb_setup_csr(struct hci_dev *hdev)
+{
+ struct hci_rp_read_local_version *rp;
+ struct sk_buff *skb;
+ int ret;
+
+ BT_DBG("%s", hdev->name);
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("Reading local version failed (%ld)", -PTR_ERR(skb));
+ return -PTR_ERR(skb);
+ }
+
+ rp = (struct hci_rp_read_local_version *) skb->data;
+
+ if (!rp->status) {
+ if (le16_to_cpu(rp->manufacturer) != 10) {
+ /* Clear the reset quirk since this is not an actual
+ * early Bluetooth 1.1 device from CSR.
+ */
+ clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+ /* These fake CSR controllers have all a broken
+ * stored link key handling and so just disable it.
+ */
+ set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY,
+ &hdev->quirks);
+ }
+ }
+
+ ret = -bt_to_errno(rp->status);
+
+ kfree_skb(skb);
+
+ return ret;
+}
+
struct intel_version {
u8 status;
u8 hw_platform;
@@ -1436,8 +1478,10 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
- if (id->driver_info & BTUSB_INTEL)
+ if (id->driver_info & BTUSB_INTEL) {
+ usb_enable_autosuspend(data->udev);
hdev->setup = btusb_setup_intel;
+ }
/* Interface numbers are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, 1);
@@ -1460,10 +1504,15 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_CSR) {
struct usb_device *udev = data->udev;
+ u16 bcdDevice = le16_to_cpu(udev->descriptor.bcdDevice);
/* Old firmware would otherwise execute USB reset */
- if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
+ if (bcdDevice < 0x117)
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+ /* Fake CSR devices with broken commands */
+ if (bcdDevice <= 0x100)
+ hdev->setup = btusb_setup_csr;
}
if (id->driver_info & BTUSB_SNIFFER) {
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 7b167385a1c4..1ef6990a5c7e 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -141,22 +141,28 @@ static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
}
static inline ssize_t vhci_get_user(struct vhci_data *data,
- const char __user *buf, size_t count)
+ const struct iovec *iov,
+ unsigned long count)
{
+ size_t len = iov_length(iov, count);
struct sk_buff *skb;
__u8 pkt_type, dev_type;
+ unsigned long i;
int ret;
- if (count < 2 || count > HCI_MAX_FRAME_SIZE)
+ if (len < 2 || len > HCI_MAX_FRAME_SIZE)
return -EINVAL;
- skb = bt_skb_alloc(count, GFP_KERNEL);
+ skb = bt_skb_alloc(len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
- if (copy_from_user(skb_put(skb, count), buf, count)) {
- kfree_skb(skb);
- return -EFAULT;
+ for (i = 0; i < count; i++) {
+ if (copy_from_user(skb_put(skb, iov[i].iov_len),
+ iov[i].iov_base, iov[i].iov_len)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
}
pkt_type = *((__u8 *) skb->data);
@@ -205,7 +211,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
return -EINVAL;
}
- return (ret < 0) ? ret : count;
+ return (ret < 0) ? ret : len;
}
static inline ssize_t vhci_put_user(struct vhci_data *data,
@@ -272,12 +278,13 @@ static ssize_t vhci_read(struct file *file,
return ret;
}
-static ssize_t vhci_write(struct file *file,
- const char __user *buf, size_t count, loff_t *pos)
+static ssize_t vhci_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long count, loff_t pos)
{
+ struct file *file = iocb->ki_filp;
struct vhci_data *data = file->private_data;
- return vhci_get_user(data, buf, count);
+ return vhci_get_user(data, iov, count);
}
static unsigned int vhci_poll(struct file *file, poll_table *wait)
@@ -342,7 +349,7 @@ static int vhci_release(struct inode *inode, struct file *file)
static const struct file_operations vhci_fops = {
.owner = THIS_MODULE,
.read = vhci_read,
- .write = vhci_write,
+ .aio_write = vhci_write,
.poll = vhci_poll,
.open = vhci_open,
.release = vhci_release,
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index b6739cb78e32..962fd35cbd8d 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -979,7 +979,7 @@ static int cci_probe(void)
nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
- ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
+ ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
if (!ports)
return -ENOMEM;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 2394e9753ef5..2ac754e18bcf 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -588,12 +588,6 @@ static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = {
.show_cpu_target = mvebu_sdram_debug_show_orion,
};
-/*
- * The driver doesn't yet have a DT binding because the details of
- * this DT binding still need to be sorted out. However, as a
- * preparation, we already use of_device_id to match a SoC description
- * string against the SoC specific details of this driver.
- */
static const struct of_device_id of_mvebu_mbus_ids[] = {
{ .compatible = "marvell,armada370-mbus",
.data = &armada_370_xp_mbus_data, },
@@ -734,11 +728,11 @@ int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
{
const struct of_device_id *of_id;
- for (of_id = of_mvebu_mbus_ids; of_id->compatible; of_id++)
+ for (of_id = of_mvebu_mbus_ids; of_id->compatible[0]; of_id++)
if (!strcmp(of_id->compatible, soc))
break;
- if (!of_id->compatible) {
+ if (!of_id->compatible[0]) {
pr_err("could not find a matching SoC family\n");
return -ENODEV;
}
@@ -876,14 +870,14 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
if (!ret) {
mem->start = reg[0];
- mem->end = mem->start + reg[1];
+ mem->end = mem->start + reg[1] - 1;
mem->flags = IORESOURCE_MEM;
}
ret = of_property_read_u32_array(np, "pcie-io-aperture", reg, ARRAY_SIZE(reg));
if (!ret) {
io->start = reg[0];
- io->end = io->start + reg[1];
+ io->end = io->start + reg[1] - 1;
io->flags = IORESOURCE_IO;
}
}
diff --git a/drivers/cdrom/Makefile b/drivers/cdrom/Makefile
index ecf85fda0fc1..8ffde4f8ab9a 100644
--- a/drivers/cdrom/Makefile
+++ b/drivers/cdrom/Makefile
@@ -10,5 +10,4 @@ obj-$(CONFIG_BLK_DEV_SR) += cdrom.o
obj-$(CONFIG_PARIDE_PCD) += cdrom.o
obj-$(CONFIG_CDROM_PKTCDVD) += cdrom.o
-obj-$(CONFIG_VIOCD) += viocd.o cdrom.o
obj-$(CONFIG_GDROM) += gdrom.o cdrom.o
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 5980cb9af857..51e75ad96422 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -561,11 +561,11 @@ static int gdrom_set_interrupt_handlers(void)
int err;
err = request_irq(HW_EVENT_GDROM_CMD, gdrom_command_interrupt,
- IRQF_DISABLED, "gdrom_command", &gd);
+ 0, "gdrom_command", &gd);
if (err)
return err;
err = request_irq(HW_EVENT_GDROM_DMA, gdrom_dma_interrupt,
- IRQF_DISABLED, "gdrom_dma", &gd);
+ 0, "gdrom_dma", &gd);
if (err)
free_irq(HW_EVENT_GDROM_CMD, &gd);
return err;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fa3243d71c76..1386749b48ff 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -499,6 +499,7 @@ config RAW_DRIVER
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-65536)"
depends on RAW_DRIVER
+ range 1 65536
default "256"
help
The maximum number of RAW devices that are supported.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7ff1d0d208a7..a324f9303e36 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o
obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
-obj-$(CONFIG_VIOTAPE) += viotape.o
obj-$(CONFIG_IBM_BSR) += bsr.o
obj-$(CONFIG_SGI_MBCS) += mbcs.o
obj-$(CONFIG_BFIN_OTP) += bfin-otp.o
@@ -50,7 +49,7 @@ obj-$(CONFIG_GPIO_TB0219) += tb0219.o
obj-$(CONFIG_TELCLOCK) += tlclk.o
obj-$(CONFIG_MWAVE) += mwave/
-obj-$(CONFIG_AGP) += agp/
+obj-y += agp/
obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index d8b1b576556c..c528f96ee204 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -68,6 +68,7 @@ config AGP_AMD64
config AGP_INTEL
tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
depends on AGP && X86
+ select INTEL_GTT
help
This option gives you AGP support for the GLX component of X
on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
@@ -155,3 +156,7 @@ config AGP_SGI_TIOCA
This option gives you AGP GART support for the SGI TIO chipset
for IA64 processors.
+config INTEL_GTT
+ tristate
+ depends on X86 && PCI
+
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 8eb56e273e75..604489bcdbf9 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
-obj-$(CONFIG_AGP_INTEL) += intel-gtt.o
+obj-$(CONFIG_INTEL_GTT) += intel-gtt.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
obj-$(CONFIG_AGP_SIS) += sis-agp.o
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 1b192395a90c..8121b4c70ede 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -31,7 +31,6 @@
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/agp_backend.h>
#include <linux/agpgart.h>
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index f39437addb58..0fbccce1cee9 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -29,7 +29,6 @@
*/
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/miscdevice.h>
#include <linux/pm.h>
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index a7c276585a9f..f9b9ca5d31b7 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -14,9 +14,6 @@
#include "intel-agp.h"
#include <drm/intel-gtt.h>
-int intel_agp_enabled;
-EXPORT_SYMBOL(intel_agp_enabled);
-
static int intel_fetch_size(void)
{
int i;
@@ -806,8 +803,6 @@ static int agp_intel_probe(struct pci_dev *pdev,
found_gmch:
pci_set_drvdata(pdev, bridge);
err = agp_add_bridge(bridge);
- if (!err)
- intel_agp_enabled = 1;
return err;
}
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index ad5da1ffcbe9..9a024f899dd4 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
@@ -94,6 +93,7 @@ static struct _intel_private {
#define IS_IRONLAKE intel_private.driver->is_ironlake
#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_gtt_map_memory(struct page **pages,
unsigned int num_entries,
struct sg_table *st)
@@ -168,6 +168,7 @@ static void i8xx_destroy_pages(struct page *page)
__free_pages(page, 2);
atomic_dec(&agp_bridge->current_memory_agp);
}
+#endif
#define I810_GTT_ORDER 4
static int i810_setup(void)
@@ -208,6 +209,7 @@ static void i810_cleanup(void)
free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
}
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
@@ -288,6 +290,7 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
}
kfree(curr);
}
+#endif
static int intel_gtt_setup_scratch_page(void)
{
@@ -645,7 +648,9 @@ static int intel_gtt_init(void)
return -ENOMEM;
}
+#if IS_ENABLED(CONFIG_AGP_INTEL)
global_cache_flush(); /* FIXME: ? */
+#endif
intel_private.stolen_size = intel_gtt_stolen_size();
@@ -666,6 +671,7 @@ static int intel_gtt_init(void)
return 0;
}
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_fetch_size(void)
{
int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
@@ -684,6 +690,7 @@ static int intel_fake_agp_fetch_size(void)
return 0;
}
+#endif
static void i830_cleanup(void)
{
@@ -795,6 +802,7 @@ static int i830_setup(void)
return 0;
}
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
{
agp_bridge->gatt_table_real = NULL;
@@ -819,6 +827,7 @@ static int intel_fake_agp_configure(void)
return 0;
}
+#endif
static bool i830_check_flags(unsigned int flags)
{
@@ -857,6 +866,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static void intel_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
struct page **pages,
@@ -922,6 +932,7 @@ out_err:
mem->is_flushed = true;
return ret;
}
+#endif
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
{
@@ -935,6 +946,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
}
EXPORT_SYMBOL(intel_gtt_clear_range);
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
off_t pg_start, int type)
{
@@ -976,6 +988,7 @@ static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
/* always return NULL for other allocation types for now */
return NULL;
}
+#endif
static int intel_alloc_chipset_flush_resource(void)
{
@@ -1129,6 +1142,7 @@ static int i9xx_setup(void)
return 0;
}
+#if IS_ENABLED(CONFIG_AGP_INTEL)
static const struct agp_bridge_driver intel_fake_agp_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
@@ -1150,6 +1164,7 @@ static const struct agp_bridge_driver intel_fake_agp_driver = {
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
};
+#endif
static const struct intel_gtt_driver i81x_gtt_driver = {
.gen = 1,
@@ -1367,11 +1382,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
intel_private.refcount++;
+#if IS_ENABLED(CONFIG_AGP_INTEL)
if (bridge) {
bridge->driver = &intel_fake_agp_driver;
bridge->dev_private_data = &intel_private;
bridge->dev = bridge_pdev;
}
+#endif
intel_private.bridge_dev = pci_dev_get(bridge_pdev);
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index 05b8d0241bde..3051c73bc383 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/agp_backend.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 46118f845948..dd9dfa15e9d1 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -531,6 +531,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
{
struct apm_user *as;
int err;
+ unsigned long apm_event;
/* short-cut emergency suspends */
if (atomic_read(&userspace_notification_inhibit))
@@ -538,6 +539,9 @@ static int apm_suspend_notifier(struct notifier_block *nb,
switch (event) {
case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ apm_event = (event == PM_SUSPEND_PREPARE) ?
+ APM_USER_SUSPEND : APM_USER_HIBERNATION;
/*
* Queue an event to all "writer" users that we want
* to suspend and need their ack.
@@ -550,7 +554,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
as->writer && as->suser) {
as->suspend_state = SUSPEND_PENDING;
atomic_inc(&suspend_acks_pending);
- queue_add_event(&as->queue, APM_USER_SUSPEND);
+ queue_add_event(&as->queue, apm_event);
}
}
@@ -601,11 +605,14 @@ static int apm_suspend_notifier(struct notifier_block *nb,
return notifier_from_errno(err);
case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ apm_event = (event == PM_POST_SUSPEND) ?
+ APM_NORMAL_RESUME : APM_HIBERNATION_RESUME;
/*
* Anyone on the APM queues will think we're still suspended.
* Send a message so everyone knows we're now awake again.
*/
- queue_event(APM_NORMAL_RESUME);
+ queue_event(apm_event);
/*
* Finally, wake up anyone who is sleeping on the suspend.
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 5d9c31dfc905..d5d4cd82b9f7 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -34,15 +34,12 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/io.h>
-
+#include <linux/acpi.h>
+#include <linux/hpet.h>
#include <asm/current.h>
#include <asm/irq.h>
#include <asm/div64.h>
-#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <linux/hpet.h>
-
/*
* The High Precision Event Timer driver.
* This driver is closely modelled after the rtc.c driver.
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 43577ca780e3..8c3b255e629a 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -8,7 +8,6 @@
*/
#include <linux/hw_random.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index a0f7724852eb..b9495a8c05c6 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -37,7 +37,6 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 402ccfb625c5..9f8277cc44b4 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -22,7 +22,6 @@
#include <linux/hw_random.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index f9beed54d0c8..432232eefe05 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -7,7 +7,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/preempt.h>
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 232b87fb5fc9..00e9d2d46634 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/hw_random.h>
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index f2885dbe1849..b5cc3420c659 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/hw_random.h>
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index c12398d1517c..2ce0e225e58c 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -47,8 +47,7 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
- if (virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL) < 0)
- BUG();
+ virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL);
virtqueue_kick(vq);
}
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index e210f858d3cb..d915707d2ba1 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org>
*
* Hwmon integration:
- * Copyright (C) 2011 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2011 Jean Delvare <jdelvare@suse.de>
* Copyright (C) 2013 Guenter Roeck <linux@roeck-us.net>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index a22a7a502740..f5e4cd7617f6 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -201,7 +201,7 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
}
bt->state = BT_STATE_IDLE; /* start here */
bt->complete = BT_STATE_IDLE; /* end here */
- bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000;
+ bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
/* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
return 3; /* We claim 3 bytes of space; ought to check SPMI table */
@@ -613,7 +613,7 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
HOST2BMC(42); /* Sequence number */
HOST2BMC(3); /* Cmd == Soft reset */
BT_CONTROL(BT_H2B_ATN);
- bt->timeout = BT_RESET_DELAY * 1000000;
+ bt->timeout = BT_RESET_DELAY * USEC_PER_SEC;
BT_STATE_CHANGE(BT_STATE_RESET3,
SI_SM_CALL_WITH_DELAY);
@@ -651,14 +651,14 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
bt_init_data(bt, bt->io);
if ((i == 8) && !BT_CAP[2]) {
bt->BT_CAP_outreqs = BT_CAP[3];
- bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000;
+ bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
bt->BT_CAP_retries = BT_CAP[7];
} else
printk(KERN_WARNING "IPMI BT: using default values\n");
if (!bt->BT_CAP_outreqs)
bt->BT_CAP_outreqs = 1;
printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
- bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries);
+ bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
bt->timeout = bt->BT_CAP_req2rsp;
return SI_SM_CALL_WITHOUT_DELAY;
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index e53fc24c6af3..6a4bdc18955a 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -118,8 +118,8 @@ enum kcs_states {
#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
/* Timeouts in microseconds. */
-#define IBF_RETRY_TIMEOUT 5000000
-#define OBF_RETRY_TIMEOUT 5000000
+#define IBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
+#define OBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
#define MAX_ERROR_RETRIES 10
#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 15e4a6031934..b7efd3c1a882 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -61,7 +61,6 @@
#include <linux/ipmi_smi.h>
#include <asm/io.h>
#include "ipmi_si_sm.h"
-#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/string.h>
#include <linux/ctype.h>
@@ -1358,7 +1357,7 @@ static int std_irq_setup(struct smi_info *info)
if (info->si_type == SI_BT) {
rv = request_irq(info->irq,
si_bt_irq_handler,
- IRQF_SHARED | IRQF_DISABLED,
+ IRQF_SHARED,
DEVICE_NAME,
info);
if (!rv)
@@ -1368,7 +1367,7 @@ static int std_irq_setup(struct smi_info *info)
} else
rv = request_irq(info->irq,
si_irq_handler,
- IRQF_SHARED | IRQF_DISABLED,
+ IRQF_SHARED,
DEVICE_NAME,
info);
if (rv) {
@@ -1849,11 +1848,15 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
info->irq_setup = std_irq_setup;
info->slave_addr = ipmb;
- if (!add_smi(info)) {
- if (try_smi_init(info))
- cleanup_one_si(info);
- } else {
+ rv = add_smi(info);
+ if (rv) {
kfree(info);
+ goto out;
+ }
+ rv = try_smi_init(info);
+ if (rv) {
+ cleanup_one_si(info);
+ goto out;
}
} else {
/* remove */
@@ -2067,6 +2070,7 @@ struct SPMITable {
static int try_init_spmi(struct SPMITable *spmi)
{
struct smi_info *info;
+ int rv;
if (spmi->IPMIlegacy != 1) {
printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
@@ -2141,10 +2145,11 @@ static int try_init_spmi(struct SPMITable *spmi)
info->io.addr_data, info->io.regsize, info->io.regspacing,
info->irq);
- if (add_smi(info))
+ rv = add_smi(info);
+ if (rv)
kfree(info);
- return 0;
+ return rv;
}
static void spmi_find_bmc(void)
@@ -2178,6 +2183,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
acpi_handle handle;
acpi_status status;
unsigned long long tmp;
+ int rv;
acpi_dev = pnp_acpi_device(dev);
if (!acpi_dev)
@@ -2259,10 +2265,11 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
res, info->io.regsize, info->io.regspacing,
info->irq);
- if (add_smi(info))
- goto err_free;
+ rv = add_smi(info);
+ if (rv)
+ kfree(info);
- return 0;
+ return rv;
err_free:
kfree(info);
@@ -2566,16 +2573,20 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
&pdev->resource[0], info->io.regsize, info->io.regspacing,
info->irq);
- if (add_smi(info))
+ rv = add_smi(info);
+ if (rv) {
kfree(info);
+ pci_disable_device(pdev);
+ }
- return 0;
+ return rv;
}
static void ipmi_pci_remove(struct pci_dev *pdev)
{
struct smi_info *info = pci_get_drvdata(pdev);
cleanup_one_si(info);
+ pci_disable_device(pdev);
}
static struct pci_device_id ipmi_pci_devices[] = {
@@ -2670,9 +2681,10 @@ static int ipmi_probe(struct platform_device *dev)
dev_set_drvdata(&dev->dev, info);
- if (add_smi(info)) {
+ ret = add_smi(info);
+ if (ret) {
kfree(info);
- return -EBUSY;
+ return ret;
}
#endif
return 0;
@@ -2711,6 +2723,7 @@ static struct platform_driver ipmi_driver = {
static int ipmi_parisc_probe(struct parisc_device *dev)
{
struct smi_info *info;
+ int rv;
info = smi_info_alloc();
@@ -2736,9 +2749,10 @@ static int ipmi_parisc_probe(struct parisc_device *dev)
dev_set_drvdata(&dev->dev, info);
- if (add_smi(info)) {
+ rv = add_smi(info);
+ if (rv) {
kfree(info);
- return -EBUSY;
+ return rv;
}
return 0;
@@ -2773,7 +2787,7 @@ static int wait_for_msg_done(struct smi_info *smi_info)
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
schedule_timeout_uninterruptible(1);
smi_result = smi_info->handlers->event(
- smi_info->si_sm, 100);
+ smi_info->si_sm, jiffies_to_usecs(1));
} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
smi_result = smi_info->handlers->event(
smi_info->si_sm, 0);
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index faed92971907..c8e77afa8b96 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -80,7 +80,7 @@ enum smic_states {
#define SMIC_MAX_ERROR_RETRIES 3
/* Timeouts in microseconds. */
-#define SMIC_RETRY_TIMEOUT 2000000
+#define SMIC_RETRY_TIMEOUT (2*USEC_PER_SEC)
/* SMIC Flags Register Bits */
#define SMIC_RX_DATA_READY 0x80
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 92c5937f80c3..917403fe10da 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -99,6 +99,9 @@ static ssize_t read_mem(struct file *file, char __user *buf,
ssize_t read, sz;
char *ptr;
+ if (p != *ppos)
+ return 0;
+
if (!valid_phys_addr_range(p, count))
return -EFAULT;
read = 0;
@@ -157,6 +160,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
unsigned long copied;
void *ptr;
+ if (p != *ppos)
+ return -EFBIG;
+
if (!valid_phys_addr_range(p, count))
return -EFAULT;
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
index 8eca55deb3a3..ba82a06d9684 100644
--- a/drivers/char/msm_smd_pkt.c
+++ b/drivers/char/msm_smd_pkt.c
@@ -182,7 +182,7 @@ static int smd_pkt_write(struct file *file, const char __user *buf,
if (count > MAX_BUF_SIZE)
return -EINVAL;
- DBG("writting %d bytes\n", count);
+ DBG("writing %d bytes\n", count);
smd_pkt_devp = file->private_data;
if (!smd_pkt_devp || !smd_pkt_devp->ch)
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
index 881c9e595939..28740046bc83 100644
--- a/drivers/char/mwave/3780i.c
+++ b/drivers/char/mwave/3780i.c
@@ -50,7 +50,6 @@
#include <linux/unistd.h>
#include <linux/delay.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/sched.h> /* cond_resched() */
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index f3223aac4df1..6e8d65e9b1d3 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev)
struct raw_device_data *rawdev;
struct block_device *bdev;
- if (number <= 0 || number >= MAX_RAW_MINORS)
+ if (number <= 0 || number >= max_raw_minors)
return -EINVAL;
rawdev = &raw_devices[number];
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 0e506bad1986..bd377472dcfb 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/init.h>
#include <linux/kernel.h> /* printk() */
#include <linux/slab.h> /* kmalloc() */
#include <linux/fs.h> /* everything... */
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
index 64420b3396a2..b9a57fa4b710 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_acpi.c
@@ -23,7 +23,7 @@
#include <linux/security.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <acpi/acpi.h>
+#include <linux/acpi.h>
#include "tpm.h"
#include "tpm_eventlog.h"
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 52b9b2b2f300..472af4bb1b61 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -21,7 +21,6 @@
*
*
*/
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/wait.h>
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
index 5b0dd8ef74c0..3b7bf2162898 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -38,7 +38,6 @@
#include <linux/miscdevice.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/wait.h>
#include <linux/string.h>
#include <linux/interrupt.h>
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 2db4419831be..b3ea223585bd 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -1,16 +1,6 @@
#include <linux/acpi.h>
-#include <acpi/acpi_drivers.h>
#include "tpm.h"
-static const u8 tpm_ppi_uuid[] = {
- 0xA6, 0xFA, 0xDD, 0x3D,
- 0x1B, 0x36,
- 0xB4, 0x4E,
- 0xA4, 0x24,
- 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53
-};
-static char *tpm_device_name = "TPM";
-
#define TPM_PPI_REVISION_ID 1
#define TPM_PPI_FN_VERSION 1
#define TPM_PPI_FN_SUBREQ 2
@@ -24,250 +14,178 @@ static char *tpm_device_name = "TPM";
#define PPI_VS_REQ_END 255
#define PPI_VERSION_LEN 3
+static const u8 tpm_ppi_uuid[] = {
+ 0xA6, 0xFA, 0xDD, 0x3D,
+ 0x1B, 0x36,
+ 0xB4, 0x4E,
+ 0xA4, 0x24,
+ 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53
+};
+
+static char tpm_ppi_version[PPI_VERSION_LEN + 1];
+static acpi_handle tpm_ppi_handle;
+
static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
void **return_value)
{
- acpi_status status = AE_OK;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
- if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) {
- if (strstr(buffer.pointer, context) != NULL) {
- *return_value = handle;
- status = AE_CTRL_TERMINATE;
- }
- kfree(buffer.pointer);
+ if (!acpi_check_dsm(handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
+ 1 << TPM_PPI_FN_VERSION))
+ return AE_OK;
+
+ /* Cache version string */
+ obj = acpi_evaluate_dsm_typed(handle, tpm_ppi_uuid,
+ TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION,
+ NULL, ACPI_TYPE_STRING);
+ if (obj) {
+ strlcpy(tpm_ppi_version, obj->string.pointer,
+ PPI_VERSION_LEN + 1);
+ ACPI_FREE(obj);
}
- return status;
+ *return_value = handle;
+
+ return AE_CTRL_TERMINATE;
}
-static inline void ppi_assign_params(union acpi_object params[4],
- u64 function_num)
+static inline union acpi_object *
+tpm_eval_dsm(int func, acpi_object_type type, union acpi_object *argv4)
{
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(tpm_ppi_uuid);
- params[0].buffer.pointer = (char *)tpm_ppi_uuid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = TPM_PPI_REVISION_ID;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = function_num;
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
+ BUG_ON(!tpm_ppi_handle);
+ return acpi_evaluate_dsm_typed(tpm_ppi_handle, tpm_ppi_uuid,
+ TPM_PPI_REVISION_ID, func, argv4, type);
}
static ssize_t tpm_show_ppi_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
- union acpi_object *obj;
-
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_VERSION);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
- return -ENXIO;
-
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_STRING);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
- obj = (union acpi_object *)output.pointer;
- status = scnprintf(buf, PAGE_SIZE, "%s\n", obj->string.pointer);
- kfree(output.pointer);
- return status;
+ return scnprintf(buf, PAGE_SIZE, "%s\n", tpm_ppi_version);
}
static ssize_t tpm_show_ppi_request(struct device *dev,
struct device_attribute *attr, char *buf)
{
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
- union acpi_object *ret_obj;
-
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_GETREQ);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
+ ssize_t size = -EINVAL;
+ union acpi_object *obj;
+
+ obj = tpm_eval_dsm(TPM_PPI_FN_GETREQ, ACPI_TYPE_PACKAGE, NULL);
+ if (!obj)
return -ENXIO;
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
/*
* output.pointer should be of package type, including two integers.
* The first is function return code, 0 means success and 1 means
* error. The second is pending TPM operation requested by the OS, 0
* means none and >0 means operation value.
*/
- ret_obj = ((union acpi_object *)output.pointer)->package.elements;
- if (ret_obj->type == ACPI_TYPE_INTEGER) {
- if (ret_obj->integer.value) {
- status = -EFAULT;
- goto cleanup;
- }
- ret_obj++;
- if (ret_obj->type == ACPI_TYPE_INTEGER)
- status = scnprintf(buf, PAGE_SIZE, "%llu\n",
- ret_obj->integer.value);
+ if (obj->package.count == 2 &&
+ obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
+ obj->package.elements[1].type == ACPI_TYPE_INTEGER) {
+ if (obj->package.elements[0].integer.value)
+ size = -EFAULT;
else
- status = -EINVAL;
- } else {
- status = -EINVAL;
+ size = scnprintf(buf, PAGE_SIZE, "%llu\n",
+ obj->package.elements[1].integer.value);
}
-cleanup:
- kfree(output.pointer);
- return status;
+
+ ACPI_FREE(obj);
+
+ return size;
}
static ssize_t tpm_store_ppi_request(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- char version[PPI_VERSION_LEN + 1];
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
- union acpi_object obj;
u32 req;
u64 ret;
+ int func = TPM_PPI_FN_SUBREQ;
+ union acpi_object *obj, tmp;
+ union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_VERSION);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
- return -ENXIO;
-
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_STRING);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
- strlcpy(version,
- ((union acpi_object *)output.pointer)->string.pointer,
- PPI_VERSION_LEN + 1);
- kfree(output.pointer);
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL;
/*
* the function to submit TPM operation request to pre-os environment
* is updated with function index from SUBREQ to SUBREQ2 since PPI
* version 1.1
*/
- if (strcmp(version, "1.1") < 0)
- params[2].integer.value = TPM_PPI_FN_SUBREQ;
- else
- params[2].integer.value = TPM_PPI_FN_SUBREQ2;
+ if (acpi_check_dsm(tpm_ppi_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
+ 1 << TPM_PPI_FN_SUBREQ2))
+ func = TPM_PPI_FN_SUBREQ2;
+
/*
* PPI spec defines params[3].type as ACPI_TYPE_PACKAGE. Some BIOS
* accept buffer/string/integer type, but some BIOS accept buffer/
* string/package type. For PPI version 1.0 and 1.1, use buffer type
* for compatibility, and use package type since 1.2 according to spec.
*/
- if (strcmp(version, "1.2") < 0) {
- params[3].type = ACPI_TYPE_BUFFER;
- params[3].buffer.length = sizeof(req);
- sscanf(buf, "%d", &req);
- params[3].buffer.pointer = (char *)&req;
+ if (strcmp(tpm_ppi_version, "1.2") < 0) {
+ if (sscanf(buf, "%d", &req) != 1)
+ return -EINVAL;
+ argv4.type = ACPI_TYPE_BUFFER;
+ argv4.buffer.length = sizeof(req);
+ argv4.buffer.pointer = (u8 *)&req;
+ } else {
+ tmp.type = ACPI_TYPE_INTEGER;
+ if (sscanf(buf, "%llu", &tmp.integer.value) != 1)
+ return -EINVAL;
+ }
+
+ obj = tpm_eval_dsm(func, ACPI_TYPE_INTEGER, &argv4);
+ if (!obj) {
+ return -ENXIO;
} else {
- params[3].package.count = 1;
- obj.type = ACPI_TYPE_INTEGER;
- sscanf(buf, "%llu", &obj.integer.value);
- params[3].package.elements = &obj;
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
}
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_INTEGER);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
- ret = ((union acpi_object *)output.pointer)->integer.value;
if (ret == 0)
- status = (acpi_status)count;
- else if (ret == 1)
- status = -EPERM;
- else
- status = -EFAULT;
- kfree(output.pointer);
- return status;
+ return (acpi_status)count;
+
+ return (ret == 1) ? -EPERM : -EFAULT;
}
static ssize_t tpm_show_ppi_transition_action(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- char version[PPI_VERSION_LEN + 1];
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
u32 ret;
- char *info[] = {
+ acpi_status status;
+ union acpi_object *obj = NULL;
+ union acpi_object tmp = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = 0,
+ .buffer.pointer = NULL
+ };
+
+ static char *info[] = {
"None",
"Shutdown",
"Reboot",
"OS Vendor-specific",
"Error",
};
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_VERSION);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
- return -ENXIO;
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_STRING);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
- strlcpy(version,
- ((union acpi_object *)output.pointer)->string.pointer,
- PPI_VERSION_LEN + 1);
/*
* PPI spec defines params[3].type as empty package, but some platforms
* (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for
* compatibility, define params[3].type as buffer, if PPI version < 1.2
*/
- if (strcmp(version, "1.2") < 0) {
- params[3].type = ACPI_TYPE_BUFFER;
- params[3].buffer.length = 0;
- params[3].buffer.pointer = NULL;
+ if (strcmp(tpm_ppi_version, "1.2") < 0)
+ obj = &tmp;
+ obj = tpm_eval_dsm(TPM_PPI_FN_GETACT, ACPI_TYPE_INTEGER, obj);
+ if (!obj) {
+ return -ENXIO;
+ } else {
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
}
- params[2].integer.value = TPM_PPI_FN_GETACT;
- kfree(output.pointer);
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL;
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_INTEGER);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
- ret = ((union acpi_object *)output.pointer)->integer.value;
+
if (ret < ARRAY_SIZE(info) - 1)
status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]);
else
status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret,
info[ARRAY_SIZE(info)-1]);
- kfree(output.pointer);
return status;
}
@@ -275,27 +193,14 @@ static ssize_t tpm_show_ppi_response(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
- union acpi_object *ret_obj;
- u64 req;
-
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_GETRSP);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
+ acpi_status status = -EINVAL;
+ union acpi_object *obj, *ret_obj;
+ u64 req, res;
+
+ obj = tpm_eval_dsm(TPM_PPI_FN_GETRSP, ACPI_TYPE_PACKAGE, NULL);
+ if (!obj)
return -ENXIO;
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
/*
* parameter output.pointer should be of package type, including
* 3 integers. The first means function return code, the second means
@@ -303,115 +208,82 @@ static ssize_t tpm_show_ppi_response(struct device *dev,
* the most recent TPM operation request. Only if the first is 0, and
* the second integer is not 0, the response makes sense.
*/
- ret_obj = ((union acpi_object *)output.pointer)->package.elements;
- if (ret_obj->type != ACPI_TYPE_INTEGER) {
- status = -EINVAL;
+ ret_obj = obj->package.elements;
+ if (obj->package.count < 3 ||
+ ret_obj[0].type != ACPI_TYPE_INTEGER ||
+ ret_obj[1].type != ACPI_TYPE_INTEGER ||
+ ret_obj[2].type != ACPI_TYPE_INTEGER)
goto cleanup;
- }
- if (ret_obj->integer.value) {
+
+ if (ret_obj[0].integer.value) {
status = -EFAULT;
goto cleanup;
}
- ret_obj++;
- if (ret_obj->type != ACPI_TYPE_INTEGER) {
- status = -EINVAL;
- goto cleanup;
- }
- if (ret_obj->integer.value) {
- req = ret_obj->integer.value;
- ret_obj++;
- if (ret_obj->type != ACPI_TYPE_INTEGER) {
- status = -EINVAL;
- goto cleanup;
- }
- if (ret_obj->integer.value == 0)
+
+ req = ret_obj[1].integer.value;
+ res = ret_obj[2].integer.value;
+ if (req) {
+ if (res == 0)
status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
"0: Success");
- else if (ret_obj->integer.value == 0xFFFFFFF0)
+ else if (res == 0xFFFFFFF0)
status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
"0xFFFFFFF0: User Abort");
- else if (ret_obj->integer.value == 0xFFFFFFF1)
+ else if (res == 0xFFFFFFF1)
status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
"0xFFFFFFF1: BIOS Failure");
- else if (ret_obj->integer.value >= 1 &&
- ret_obj->integer.value <= 0x00000FFF)
+ else if (res >= 1 && res <= 0x00000FFF)
status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, ret_obj->integer.value,
- "Corresponding TPM error");
+ req, res, "Corresponding TPM error");
else
status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, ret_obj->integer.value,
- "Error");
+ req, res, "Error");
} else {
status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n",
- ret_obj->integer.value, "No Recent Request");
+ req, "No Recent Request");
}
+
cleanup:
- kfree(output.pointer);
+ ACPI_FREE(obj);
return status;
}
static ssize_t show_ppi_operations(char *buf, u32 start, u32 end)
{
- char *str = buf;
- char version[PPI_VERSION_LEN + 1];
- acpi_handle handle;
- acpi_status status;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4];
- union acpi_object obj;
int i;
u32 ret;
- char *info[] = {
+ char *str = buf;
+ union acpi_object *obj, tmp;
+ union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
+
+ static char *info[] = {
"Not implemented",
"BIOS only",
"Blocked for OS by BIOS",
"User required",
"User not required",
};
- input.count = 4;
- ppi_assign_params(params, TPM_PPI_FN_VERSION);
- input.pointer = params;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, ppi_callback, NULL,
- tpm_device_name, &handle);
- if (ACPI_FAILURE(status))
- return -ENXIO;
- status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
- ACPI_TYPE_STRING);
- if (ACPI_FAILURE(status))
- return -ENOMEM;
-
- strlcpy(version,
- ((union acpi_object *)output.pointer)->string.pointer,
- PPI_VERSION_LEN + 1);
- kfree(output.pointer);
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL;
- if (strcmp(version, "1.2") < 0)
+ if (!acpi_check_dsm(tpm_ppi_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
+ 1 << TPM_PPI_FN_GETOPR))
return -EPERM;
- params[2].integer.value = TPM_PPI_FN_GETOPR;
- params[3].package.count = 1;
- obj.type = ACPI_TYPE_INTEGER;
- params[3].package.elements = &obj;
+ tmp.integer.type = ACPI_TYPE_INTEGER;
for (i = start; i <= end; i++) {
- obj.integer.value = i;
- status = acpi_evaluate_object_typed(handle, "_DSM",
- &input, &output, ACPI_TYPE_INTEGER);
- if (ACPI_FAILURE(status))
+ tmp.integer.value = i;
+ obj = tpm_eval_dsm(TPM_PPI_FN_GETOPR, ACPI_TYPE_INTEGER, &argv);
+ if (!obj) {
return -ENOMEM;
+ } else {
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
+ }
- ret = ((union acpi_object *)output.pointer)->integer.value;
if (ret > 0 && ret < ARRAY_SIZE(info))
str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n",
i, ret, info[ret]);
- kfree(output.pointer);
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL;
}
+
return str - buf;
}
@@ -453,6 +325,12 @@ static struct attribute_group ppi_attr_grp = {
int tpm_add_ppi(struct kobject *parent)
{
+ /* Cache TPM ACPI handle and version string */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+ ppi_callback, NULL, NULL, &tpm_ppi_handle);
+ if (tpm_ppi_handle == NULL)
+ return -ENODEV;
+
return sysfs_create_group(parent, &ppi_attr_grp);
}
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 92b097064df5..2064b4527040 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -17,6 +17,7 @@
#include <xen/xenbus.h>
#include <xen/page.h>
#include "tpm.h"
+#include <xen/platform_pci.h>
struct tpm_private {
struct tpm_chip *chip;
@@ -378,6 +379,9 @@ static int __init xen_tpmfront_init(void)
if (!xen_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
return xenbus_register_frontend(&tpmfront_driver);
}
module_init(xen_tpmfront_init);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index feea87cc6b8f..6928d094451d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -890,12 +890,10 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
} else {
/* Failback to copying a page */
struct page *page = alloc_page(GFP_KERNEL);
- char *src = buf->ops->map(pipe, buf, 1);
- char *dst;
+ char *src;
if (!page)
return -ENOMEM;
- dst = kmap(page);
offset = sd->pos & ~PAGE_MASK;
@@ -903,9 +901,8 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- memcpy(dst + offset, src + buf->offset, len);
-
- kunmap(page);
+ src = buf->ops->map(pipe, buf, 1);
+ memcpy(page_address(page) + offset, src + buf->offset, len);
buf->ops->unmap(pipe, buf, src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 5c51115081b3..7641965d208d 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -23,16 +23,6 @@ config COMMON_CLK
menu "Common Clock Framework"
depends on COMMON_CLK
-config COMMON_CLK_DEBUG
- bool "DebugFS representation of clock tree"
- select DEBUG_FS
- ---help---
- Creates a directory hierarchy in debugfs for visualizing the clk
- tree structure. Each directory contains read-only members
- that export information specific to that clk node: clk_rate,
- clk_flags, clk_prepare_count, clk_enable_count &
- clk_notifier_count.
-
config COMMON_CLK_WM831X
tristate "Clock driver for WM831x/2x PMICs"
depends on MFD_WM831X
@@ -64,6 +54,16 @@ config COMMON_CLK_SI5351
This driver supports Silicon Labs 5351A/B/C programmable clock
generators.
+config COMMON_CLK_SI570
+ tristate "Clock driver for SiLabs 570 and compatible devices"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ ---help---
+ This driver supports Silicon Labs 570/571/598/599 programmable
+ clock generators.
+
config COMMON_CLK_S2MPS11
tristate "Clock driver for S2MPS11 MFD"
depends on MFD_SEC_CORE
@@ -107,6 +107,8 @@ config COMMON_CLK_KEYSTONE
Supports clock drivers for Keystone based SOCs. These SOCs have local
a power sleep control module that gate the clock to the IPs and PLLs.
+source "drivers/clk/qcom/Kconfig"
+
endmenu
source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 7a10bc9a23e7..a367a9831717 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -9,40 +9,44 @@ obj-$(CONFIG_COMMON_CLK) += clk-gate.o
obj-$(CONFIG_COMMON_CLK) += clk-mux.o
obj-$(CONFIG_COMMON_CLK) += clk-composite.o
-# SoCs specific
-obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
-obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
-obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
-obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
-obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
-obj-$(CONFIG_ARCH_MXS) += mxs/
-obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
-obj-$(CONFIG_PLAT_SPEAR) += spear/
-obj-$(CONFIG_ARCH_U300) += clk-u300.o
-obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
-obj-$(CONFIG_ARCH_SIRF) += clk-prima2.o
-obj-$(CONFIG_PLAT_ORION) += mvebu/
+# hardware specific clock types
+# please keep this section sorted lexicographically by file/directory path name
+obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
+obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
+obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
+obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
+obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
+obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
+obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
+obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
+obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o
+obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
+obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
+obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
+obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
+obj-$(CONFIG_ARCH_U300) += clk-u300.o
+obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
+obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
+obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
+obj-$(CONFIG_COMMON_CLK_AT91) += at91/
+obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/
+obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
ifeq ($(CONFIG_COMMON_CLK), y)
-obj-$(CONFIG_ARCH_MMP) += mmp/
+obj-$(CONFIG_ARCH_MMP) += mmp/
endif
-obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
-obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
-obj-$(CONFIG_ARCH_SUNXI) += sunxi/
-obj-$(CONFIG_ARCH_U8500) += ux500/
-obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
-obj-$(CONFIG_ARCH_ZYNQ) += zynq/
-obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-$(CONFIG_PLAT_SAMSUNG) += samsung/
-obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
-obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
-
-obj-$(CONFIG_X86) += x86/
-
-# Chip specific
-obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
-obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
-obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
-obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
-obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
-obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
-obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o
+obj-$(CONFIG_PLAT_ORION) += mvebu/
+obj-$(CONFIG_ARCH_MXS) += mxs/
+obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
+obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
+obj-$(CONFIG_PLAT_SAMSUNG) += samsung/
+obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile/
+obj-$(CONFIG_ARCH_SIRF) += sirf/
+obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
+obj-$(CONFIG_PLAT_SPEAR) += spear/
+obj-$(CONFIG_ARCH_SUNXI) += sunxi/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
+obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/
+obj-$(CONFIG_ARCH_U8500) += ux500/
+obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
+obj-$(CONFIG_X86) += x86/
+obj-$(CONFIG_ARCH_ZYNQ) += zynq/
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
new file mode 100644
index 000000000000..46c1d3d0d66b
--- /dev/null
+++ b/drivers/clk/at91/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for at91 specific clk
+#
+
+obj-y += pmc.o
+obj-y += clk-main.o clk-pll.o clk-plldiv.o clk-master.o
+obj-y += clk-system.o clk-peripheral.o clk-programmable.o
+
+obj-$(CONFIG_HAVE_AT91_UTMI) += clk-utmi.o
+obj-$(CONFIG_HAVE_AT91_USB_CLK) += clk-usb.o
+obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
new file mode 100644
index 000000000000..8e9e8cc0412d
--- /dev/null
+++ b/drivers/clk/at91/clk-main.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "pmc.h"
+
+#define SLOW_CLOCK_FREQ 32768
+#define MAINF_DIV 16
+#define MAINFRDY_TIMEOUT (((MAINF_DIV + 1) * USEC_PER_SEC) / \
+ SLOW_CLOCK_FREQ)
+#define MAINF_LOOP_MIN_WAIT (USEC_PER_SEC / SLOW_CLOCK_FREQ)
+#define MAINF_LOOP_MAX_WAIT MAINFRDY_TIMEOUT
+
+struct clk_main {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned long rate;
+ unsigned int irq;
+ wait_queue_head_t wait;
+};
+
+#define to_clk_main(hw) container_of(hw, struct clk_main, hw)
+
+static irqreturn_t clk_main_irq_handler(int irq, void *dev_id)
+{
+ struct clk_main *clkmain = (struct clk_main *)dev_id;
+
+ wake_up(&clkmain->wait);
+ disable_irq_nosync(clkmain->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_main_prepare(struct clk_hw *hw)
+{
+ struct clk_main *clkmain = to_clk_main(hw);
+ struct at91_pmc *pmc = clkmain->pmc;
+ unsigned long halt_time, timeout;
+ u32 tmp;
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS)) {
+ enable_irq(clkmain->irq);
+ wait_event(clkmain->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS);
+ }
+
+ if (clkmain->rate)
+ return 0;
+
+ timeout = jiffies + usecs_to_jiffies(MAINFRDY_TIMEOUT);
+ do {
+ halt_time = jiffies;
+ tmp = pmc_read(pmc, AT91_CKGR_MCFR);
+ if (tmp & AT91_PMC_MAINRDY)
+ return 0;
+ usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
+ } while (time_before(halt_time, timeout));
+
+ return 0;
+}
+
+static int clk_main_is_prepared(struct clk_hw *hw)
+{
+ struct clk_main *clkmain = to_clk_main(hw);
+
+ return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCS);
+}
+
+static unsigned long clk_main_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ struct clk_main *clkmain = to_clk_main(hw);
+ struct at91_pmc *pmc = clkmain->pmc;
+
+ if (clkmain->rate)
+ return clkmain->rate;
+
+ tmp = pmc_read(pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINF;
+ clkmain->rate = (tmp * parent_rate) / MAINF_DIV;
+
+ return clkmain->rate;
+}
+
+static const struct clk_ops main_ops = {
+ .prepare = clk_main_prepare,
+ .is_prepared = clk_main_is_prepared,
+ .recalc_rate = clk_main_recalc_rate,
+};
+
+static struct clk * __init
+at91_clk_register_main(struct at91_pmc *pmc,
+ unsigned int irq,
+ const char *name,
+ const char *parent_name,
+ unsigned long rate)
+{
+ int ret;
+ struct clk_main *clkmain;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !irq || !name)
+ return ERR_PTR(-EINVAL);
+
+ if (!rate && !parent_name)
+ return ERR_PTR(-EINVAL);
+
+ clkmain = kzalloc(sizeof(*clkmain), GFP_KERNEL);
+ if (!clkmain)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &main_ops;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ init.flags = parent_name ? 0 : CLK_IS_ROOT;
+
+ clkmain->hw.init = &init;
+ clkmain->rate = rate;
+ clkmain->pmc = pmc;
+ clkmain->irq = irq;
+ init_waitqueue_head(&clkmain->wait);
+ irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN);
+ ret = request_irq(clkmain->irq, clk_main_irq_handler,
+ IRQF_TRIGGER_HIGH, "clk-main", clkmain);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &clkmain->hw);
+ if (IS_ERR(clk)) {
+ free_irq(clkmain->irq, clkmain);
+ kfree(clkmain);
+ }
+
+ return clk;
+}
+
+
+
+static void __init
+of_at91_clk_main_setup(struct device_node *np, struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ unsigned int irq;
+ const char *parent_name;
+ const char *name = np->name;
+ u32 rate = 0;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ of_property_read_string(np, "clock-output-names", &name);
+ of_property_read_u32(np, "clock-frequency", &rate);
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_main(pmc, irq, name, parent_name, rate);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+void __init of_at91rm9200_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_main_setup(np, pmc);
+}
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
new file mode 100644
index 000000000000..c1af80bcdf20
--- /dev/null
+++ b/drivers/clk/at91/clk-master.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "pmc.h"
+
+#define MASTER_SOURCE_MAX 4
+
+#define MASTER_PRES_MASK 0x7
+#define MASTER_PRES_MAX MASTER_PRES_MASK
+#define MASTER_DIV_SHIFT 8
+#define MASTER_DIV_MASK 0x3
+
+struct clk_master_characteristics {
+ struct clk_range output;
+ u32 divisors[4];
+ u8 have_div3_pres;
+};
+
+struct clk_master_layout {
+ u32 mask;
+ u8 pres_shift;
+};
+
+#define to_clk_master(hw) container_of(hw, struct clk_master, hw)
+
+struct clk_master {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ const struct clk_master_layout *layout;
+ const struct clk_master_characteristics *characteristics;
+};
+
+static irqreturn_t clk_master_irq_handler(int irq, void *dev_id)
+{
+ struct clk_master *master = (struct clk_master *)dev_id;
+
+ wake_up(&master->wait);
+ disable_irq_nosync(master->irq);
+
+ return IRQ_HANDLED;
+}
+static int clk_master_prepare(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ struct at91_pmc *pmc = master->pmc;
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY)) {
+ enable_irq(master->irq);
+ wait_event(master->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
+ }
+
+ return 0;
+}
+
+static int clk_master_is_prepared(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ return !!(pmc_read(master->pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
+}
+
+static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u8 pres;
+ u8 div;
+ unsigned long rate = parent_rate;
+ struct clk_master *master = to_clk_master(hw);
+ struct at91_pmc *pmc = master->pmc;
+ const struct clk_master_layout *layout = master->layout;
+ const struct clk_master_characteristics *characteristics =
+ master->characteristics;
+ u32 tmp;
+
+ pmc_lock(pmc);
+ tmp = pmc_read(pmc, AT91_PMC_MCKR) & layout->mask;
+ pmc_unlock(pmc);
+
+ pres = (tmp >> layout->pres_shift) & MASTER_PRES_MASK;
+ div = (tmp >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+
+ if (characteristics->have_div3_pres && pres == MASTER_PRES_MAX)
+ rate /= 3;
+ else
+ rate >>= pres;
+
+ rate /= characteristics->divisors[div];
+
+ if (rate < characteristics->output.min)
+ pr_warn("master clk is underclocked");
+ else if (rate > characteristics->output.max)
+ pr_warn("master clk is overclocked");
+
+ return rate;
+}
+
+static u8 clk_master_get_parent(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ struct at91_pmc *pmc = master->pmc;
+
+ return pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_CSS;
+}
+
+static const struct clk_ops master_ops = {
+ .prepare = clk_master_prepare,
+ .is_prepared = clk_master_is_prepared,
+ .recalc_rate = clk_master_recalc_rate,
+ .get_parent = clk_master_get_parent,
+};
+
+static struct clk * __init
+at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
+ const char *name, int num_parents,
+ const char **parent_names,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics)
+{
+ int ret;
+ struct clk_master *master;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !irq || !name || !num_parents || !parent_names)
+ return ERR_PTR(-EINVAL);
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &master_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = 0;
+
+ master->hw.init = &init;
+ master->layout = layout;
+ master->characteristics = characteristics;
+ master->pmc = pmc;
+ master->irq = irq;
+ init_waitqueue_head(&master->wait);
+ irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
+ ret = request_irq(master->irq, clk_master_irq_handler,
+ IRQF_TRIGGER_HIGH, "clk-master", master);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &master->hw);
+ if (IS_ERR(clk))
+ kfree(master);
+
+ return clk;
+}
+
+
+static const struct clk_master_layout at91rm9200_master_layout = {
+ .mask = 0x31F,
+ .pres_shift = 2,
+};
+
+static const struct clk_master_layout at91sam9x5_master_layout = {
+ .mask = 0x373,
+ .pres_shift = 4,
+};
+
+
+static struct clk_master_characteristics * __init
+of_at91_clk_master_get_characteristics(struct device_node *np)
+{
+ struct clk_master_characteristics *characteristics;
+
+ characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
+ if (!characteristics)
+ return NULL;
+
+ if (of_at91_get_clk_range(np, "atmel,clk-output-range", &characteristics->output))
+ goto out_free_characteristics;
+
+ of_property_read_u32_array(np, "atmel,clk-divisors",
+ characteristics->divisors, 4);
+
+ characteristics->have_div3_pres =
+ of_property_read_bool(np, "atmel,master-clk-have-div3-pres");
+
+ return characteristics;
+
+out_free_characteristics:
+ kfree(characteristics);
+ return NULL;
+}
+
+static void __init
+of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
+ const struct clk_master_layout *layout)
+{
+ struct clk *clk;
+ int num_parents;
+ int i;
+ unsigned int irq;
+ const char *parent_names[MASTER_SOURCE_MAX];
+ const char *name = np->name;
+ struct clk_master_characteristics *characteristics;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ characteristics = of_at91_clk_master_get_characteristics(np);
+ if (!characteristics)
+ return;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ goto out_free_characteristics;
+
+ clk = at91_clk_register_master(pmc, irq, name, num_parents,
+ parent_names, layout,
+ characteristics);
+ if (IS_ERR(clk))
+ goto out_free_characteristics;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+
+out_free_characteristics:
+ kfree(characteristics);
+}
+
+void __init of_at91rm9200_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_master_setup(np, pmc, &at91rm9200_master_layout);
+}
+
+void __init of_at91sam9x5_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_master_setup(np, pmc, &at91sam9x5_master_layout);
+}
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
new file mode 100644
index 000000000000..597fed423d7d
--- /dev/null
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define PERIPHERAL_MAX 64
+
+#define PERIPHERAL_AT91RM9200 0
+#define PERIPHERAL_AT91SAM9X5 1
+
+#define PERIPHERAL_ID_MIN 2
+#define PERIPHERAL_ID_MAX 31
+#define PERIPHERAL_MASK(id) (1 << ((id) & PERIPHERAL_ID_MAX))
+
+#define PERIPHERAL_RSHIFT_MASK 0x3
+#define PERIPHERAL_RSHIFT(val) (((val) >> 16) & PERIPHERAL_RSHIFT_MASK)
+
+#define PERIPHERAL_MAX_SHIFT 4
+
+struct clk_peripheral {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ u32 id;
+};
+
+#define to_clk_peripheral(hw) container_of(hw, struct clk_peripheral, hw)
+
+struct clk_sam9x5_peripheral {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ struct clk_range range;
+ u32 id;
+ u32 div;
+ bool auto_div;
+};
+
+#define to_clk_sam9x5_peripheral(hw) \
+ container_of(hw, struct clk_sam9x5_peripheral, hw)
+
+static int clk_peripheral_enable(struct clk_hw *hw)
+{
+ struct clk_peripheral *periph = to_clk_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int offset = AT91_PMC_PCER;
+ u32 id = periph->id;
+
+ if (id < PERIPHERAL_ID_MIN)
+ return 0;
+ if (id > PERIPHERAL_ID_MAX)
+ offset = AT91_PMC_PCER1;
+ pmc_write(pmc, offset, PERIPHERAL_MASK(id));
+ return 0;
+}
+
+static void clk_peripheral_disable(struct clk_hw *hw)
+{
+ struct clk_peripheral *periph = to_clk_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int offset = AT91_PMC_PCDR;
+ u32 id = periph->id;
+
+ if (id < PERIPHERAL_ID_MIN)
+ return;
+ if (id > PERIPHERAL_ID_MAX)
+ offset = AT91_PMC_PCDR1;
+ pmc_write(pmc, offset, PERIPHERAL_MASK(id));
+}
+
+static int clk_peripheral_is_enabled(struct clk_hw *hw)
+{
+ struct clk_peripheral *periph = to_clk_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int offset = AT91_PMC_PCSR;
+ u32 id = periph->id;
+
+ if (id < PERIPHERAL_ID_MIN)
+ return 1;
+ if (id > PERIPHERAL_ID_MAX)
+ offset = AT91_PMC_PCSR1;
+ return !!(pmc_read(pmc, offset) & PERIPHERAL_MASK(id));
+}
+
+static const struct clk_ops peripheral_ops = {
+ .enable = clk_peripheral_enable,
+ .disable = clk_peripheral_disable,
+ .is_enabled = clk_peripheral_is_enabled,
+};
+
+static struct clk * __init
+at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, u32 id)
+{
+ struct clk_peripheral *periph;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !name || !parent_name || id > PERIPHERAL_ID_MAX)
+ return ERR_PTR(-EINVAL);
+
+ periph = kzalloc(sizeof(*periph), GFP_KERNEL);
+ if (!periph)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &peripheral_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ init.flags = 0;
+
+ periph->id = id;
+ periph->hw.init = &init;
+ periph->pmc = pmc;
+
+ clk = clk_register(NULL, &periph->hw);
+ if (IS_ERR(clk))
+ kfree(periph);
+
+ return clk;
+}
+
+static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph)
+{
+ struct clk *parent;
+ unsigned long parent_rate;
+ int shift = 0;
+
+ if (!periph->auto_div)
+ return;
+
+ if (periph->range.max) {
+ parent = clk_get_parent_by_index(periph->hw.clk, 0);
+ parent_rate = __clk_get_rate(parent);
+ if (!parent_rate)
+ return;
+
+ for (; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ if (parent_rate >> shift <= periph->range.max)
+ break;
+ }
+ }
+
+ periph->auto_div = false;
+ periph->div = shift;
+}
+
+static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return 0;
+
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID) |
+ AT91_PMC_PCR_CMD |
+ AT91_PMC_PCR_DIV(periph->div) |
+ AT91_PMC_PCR_EN);
+ return 0;
+}
+
+static void clk_sam9x5_peripheral_disable(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return;
+
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID) |
+ AT91_PMC_PCR_CMD);
+}
+
+static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ int ret;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return 1;
+
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID));
+ ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_EN);
+ pmc_unlock(pmc);
+
+ return ret;
+}
+
+static unsigned long
+clk_sam9x5_peripheral_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ struct at91_pmc *pmc = periph->pmc;
+ u32 tmp;
+
+ if (periph->id < PERIPHERAL_ID_MIN)
+ return parent_rate;
+
+ pmc_lock(pmc);
+ pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID));
+ tmp = pmc_read(pmc, AT91_PMC_PCR);
+ pmc_unlock(pmc);
+
+ if (tmp & AT91_PMC_PCR_EN) {
+ periph->div = PERIPHERAL_RSHIFT(tmp);
+ periph->auto_div = false;
+ } else {
+ clk_sam9x5_peripheral_autodiv(periph);
+ }
+
+ return parent_rate >> periph->div;
+}
+
+static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ int shift = 0;
+ unsigned long best_rate;
+ unsigned long best_diff;
+ unsigned long cur_rate = *parent_rate;
+ unsigned long cur_diff;
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
+ return *parent_rate;
+
+ if (periph->range.max) {
+ for (; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ cur_rate = *parent_rate >> shift;
+ if (cur_rate <= periph->range.max)
+ break;
+ }
+ }
+
+ if (rate >= cur_rate)
+ return cur_rate;
+
+ best_diff = cur_rate - rate;
+ best_rate = cur_rate;
+ for (; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ cur_rate = *parent_rate >> shift;
+ if (cur_rate < rate)
+ cur_diff = rate - cur_rate;
+ else
+ cur_diff = cur_rate - rate;
+
+ if (cur_diff < best_diff) {
+ best_diff = cur_diff;
+ best_rate = cur_rate;
+ }
+
+ if (!best_diff || cur_rate < rate)
+ break;
+ }
+
+ return best_rate;
+}
+
+static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ int shift;
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ if (parent_rate == rate)
+ return 0;
+ else
+ return -EINVAL;
+ }
+
+ if (periph->range.max && rate > periph->range.max)
+ return -EINVAL;
+
+ for (shift = 0; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+ if (parent_rate >> shift == rate) {
+ periph->auto_div = false;
+ periph->div = shift;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct clk_ops sam9x5_peripheral_ops = {
+ .enable = clk_sam9x5_peripheral_enable,
+ .disable = clk_sam9x5_peripheral_disable,
+ .is_enabled = clk_sam9x5_peripheral_is_enabled,
+ .recalc_rate = clk_sam9x5_peripheral_recalc_rate,
+ .round_rate = clk_sam9x5_peripheral_round_rate,
+ .set_rate = clk_sam9x5_peripheral_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, u32 id,
+ const struct clk_range *range)
+{
+ struct clk_sam9x5_peripheral *periph;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!pmc || !name || !parent_name)
+ return ERR_PTR(-EINVAL);
+
+ periph = kzalloc(sizeof(*periph), GFP_KERNEL);
+ if (!periph)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &sam9x5_peripheral_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ init.flags = 0;
+
+ periph->id = id;
+ periph->hw.init = &init;
+ periph->div = 0;
+ periph->pmc = pmc;
+ periph->auto_div = true;
+ periph->range = *range;
+
+ clk = clk_register(NULL, &periph->hw);
+ if (IS_ERR(clk))
+ kfree(periph);
+ else
+ clk_sam9x5_peripheral_autodiv(periph);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
+{
+ int num;
+ u32 id;
+ struct clk *clk;
+ const char *parent_name;
+ const char *name;
+ struct device_node *periphclknp;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ num = of_get_child_count(np);
+ if (!num || num > PERIPHERAL_MAX)
+ return;
+
+ for_each_child_of_node(np, periphclknp) {
+ if (of_property_read_u32(periphclknp, "reg", &id))
+ continue;
+
+ if (id >= PERIPHERAL_MAX)
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = periphclknp->name;
+
+ if (type == PERIPHERAL_AT91RM9200) {
+ clk = at91_clk_register_peripheral(pmc, name,
+ parent_name, id);
+ } else {
+ struct clk_range range = CLK_RANGE(0, 0);
+
+ of_at91_get_clk_range(periphclknp,
+ "atmel,clk-output-range",
+ &range);
+
+ clk = at91_clk_register_sam9x5_peripheral(pmc, name,
+ parent_name,
+ id, &range);
+ }
+
+ if (IS_ERR(clk))
+ continue;
+
+ of_clk_add_provider(periphclknp, of_clk_src_simple_get, clk);
+ }
+}
+
+void __init of_at91rm9200_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91RM9200);
+}
+
+void __init of_at91sam9x5_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91SAM9X5);
+}
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
new file mode 100644
index 000000000000..cf6ed023504c
--- /dev/null
+++ b/drivers/clk/at91/clk-pll.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "pmc.h"
+
+#define PLL_STATUS_MASK(id) (1 << (1 + (id)))
+#define PLL_REG(id) (AT91_CKGR_PLLAR + ((id) * 4))
+#define PLL_DIV_MASK 0xff
+#define PLL_DIV_MAX PLL_DIV_MASK
+#define PLL_DIV(reg) ((reg) & PLL_DIV_MASK)
+#define PLL_MUL(reg, layout) (((reg) >> (layout)->mul_shift) & \
+ (layout)->mul_mask)
+#define PLL_ICPR_SHIFT(id) ((id) * 16)
+#define PLL_ICPR_MASK(id) (0xffff << PLL_ICPR_SHIFT(id))
+#define PLL_MAX_COUNT 0x3ff
+#define PLL_COUNT_SHIFT 8
+#define PLL_OUT_SHIFT 14
+#define PLL_MAX_ID 1
+
+struct clk_pll_characteristics {
+ struct clk_range input;
+ int num_output;
+ struct clk_range *output;
+ u16 *icpll;
+ u8 *out;
+};
+
+struct clk_pll_layout {
+ u32 pllr_mask;
+ u16 mul_mask;
+ u8 mul_shift;
+};
+
+#define to_clk_pll(hw) container_of(hw, struct clk_pll, hw)
+
+struct clk_pll {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ u8 id;
+ u8 div;
+ u8 range;
+ u16 mul;
+ const struct clk_pll_layout *layout;
+ const struct clk_pll_characteristics *characteristics;
+};
+
+static irqreturn_t clk_pll_irq_handler(int irq, void *dev_id)
+{
+ struct clk_pll *pll = (struct clk_pll *)dev_id;
+
+ wake_up(&pll->wait);
+ disable_irq_nosync(pll->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_pll_prepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct at91_pmc *pmc = pll->pmc;
+ const struct clk_pll_layout *layout = pll->layout;
+ const struct clk_pll_characteristics *characteristics =
+ pll->characteristics;
+ u8 id = pll->id;
+ u32 mask = PLL_STATUS_MASK(id);
+ int offset = PLL_REG(id);
+ u8 out = 0;
+ u32 pllr, icpr;
+ u8 div;
+ u16 mul;
+
+ pllr = pmc_read(pmc, offset);
+ div = PLL_DIV(pllr);
+ mul = PLL_MUL(pllr, layout);
+
+ if ((pmc_read(pmc, AT91_PMC_SR) & mask) &&
+ (div == pll->div && mul == pll->mul))
+ return 0;
+
+ if (characteristics->out)
+ out = characteristics->out[pll->range];
+ if (characteristics->icpll) {
+ icpr = pmc_read(pmc, AT91_PMC_PLLICPR) & ~PLL_ICPR_MASK(id);
+ icpr |= (characteristics->icpll[pll->range] <<
+ PLL_ICPR_SHIFT(id));
+ pmc_write(pmc, AT91_PMC_PLLICPR, icpr);
+ }
+
+ pllr &= ~layout->pllr_mask;
+ pllr |= layout->pllr_mask &
+ (pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) |
+ (out << PLL_OUT_SHIFT) |
+ ((pll->mul & layout->mul_mask) << layout->mul_shift));
+ pmc_write(pmc, offset, pllr);
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
+ enable_irq(pll->irq);
+ wait_event(pll->wait,
+ pmc_read(pmc, AT91_PMC_SR) & mask);
+ }
+
+ return 0;
+}
+
+static int clk_pll_is_prepared(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct at91_pmc *pmc = pll->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_SR) &
+ PLL_STATUS_MASK(pll->id));
+}
+
+static void clk_pll_unprepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct at91_pmc *pmc = pll->pmc;
+ const struct clk_pll_layout *layout = pll->layout;
+ int offset = PLL_REG(pll->id);
+ u32 tmp = pmc_read(pmc, offset) & ~(layout->pllr_mask);
+
+ pmc_write(pmc, offset, tmp);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ const struct clk_pll_layout *layout = pll->layout;
+ struct at91_pmc *pmc = pll->pmc;
+ int offset = PLL_REG(pll->id);
+ u32 tmp = pmc_read(pmc, offset) & layout->pllr_mask;
+ u8 div = PLL_DIV(tmp);
+ u16 mul = PLL_MUL(tmp, layout);
+ if (!div || !mul)
+ return 0;
+
+ return (parent_rate * (mul + 1)) / div;
+}
+
+static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
+ unsigned long parent_rate,
+ u32 *div, u32 *mul,
+ u32 *index) {
+ unsigned long maxrate;
+ unsigned long minrate;
+ unsigned long divrate;
+ unsigned long bestdiv = 1;
+ unsigned long bestmul;
+ unsigned long tmpdiv;
+ unsigned long roundup;
+ unsigned long rounddown;
+ unsigned long remainder;
+ unsigned long bestremainder;
+ unsigned long maxmul;
+ unsigned long maxdiv;
+ unsigned long mindiv;
+ int i = 0;
+ const struct clk_pll_layout *layout = pll->layout;
+ const struct clk_pll_characteristics *characteristics =
+ pll->characteristics;
+
+ /* Minimum divider = 1 */
+ /* Maximum multiplier = max_mul */
+ maxmul = layout->mul_mask + 1;
+ maxrate = (parent_rate * maxmul) / 1;
+
+ /* Maximum divider = max_div */
+ /* Minimum multiplier = 2 */
+ maxdiv = PLL_DIV_MAX;
+ minrate = (parent_rate * 2) / maxdiv;
+
+ if (parent_rate < characteristics->input.min ||
+ parent_rate < characteristics->input.max)
+ return -ERANGE;
+
+ if (parent_rate < minrate || parent_rate > maxrate)
+ return -ERANGE;
+
+ for (i = 0; i < characteristics->num_output; i++) {
+ if (parent_rate >= characteristics->output[i].min &&
+ parent_rate <= characteristics->output[i].max)
+ break;
+ }
+
+ if (i >= characteristics->num_output)
+ return -ERANGE;
+
+ bestmul = rate / parent_rate;
+ rounddown = parent_rate % rate;
+ roundup = rate - rounddown;
+ bestremainder = roundup < rounddown ? roundup : rounddown;
+
+ if (!bestremainder) {
+ if (div)
+ *div = bestdiv;
+ if (mul)
+ *mul = bestmul;
+ if (index)
+ *index = i;
+ return rate;
+ }
+
+ maxdiv = 255 / (bestmul + 1);
+ if (parent_rate / maxdiv < characteristics->input.min)
+ maxdiv = parent_rate / characteristics->input.min;
+ mindiv = parent_rate / characteristics->input.max;
+ if (parent_rate % characteristics->input.max)
+ mindiv++;
+
+ for (tmpdiv = mindiv; tmpdiv < maxdiv; tmpdiv++) {
+ divrate = parent_rate / tmpdiv;
+
+ rounddown = rate % divrate;
+ roundup = divrate - rounddown;
+ remainder = roundup < rounddown ? roundup : rounddown;
+
+ if (remainder < bestremainder) {
+ bestremainder = remainder;
+ bestmul = rate / divrate;
+ bestdiv = tmpdiv;
+ }
+
+ if (!remainder)
+ break;
+ }
+
+ rate = (parent_rate / bestdiv) * bestmul;
+
+ if (div)
+ *div = bestdiv;
+ if (mul)
+ *mul = bestmul;
+ if (index)
+ *index = i;
+
+ return rate;
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ return clk_pll_get_best_div_mul(pll, rate, *parent_rate,
+ NULL, NULL, NULL);
+}
+
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ long ret;
+ u32 div;
+ u32 mul;
+ u32 index;
+
+ ret = clk_pll_get_best_div_mul(pll, rate, parent_rate,
+ &div, &mul, &index);
+ if (ret < 0)
+ return ret;
+
+ pll->range = index;
+ pll->div = div;
+ pll->mul = mul;
+
+ return 0;
+}
+
+static const struct clk_ops pll_ops = {
+ .prepare = clk_pll_prepare,
+ .unprepare = clk_pll_unprepare,
+ .is_prepared = clk_pll_is_prepared,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
+ const char *parent_name, u8 id,
+ const struct clk_pll_layout *layout,
+ const struct clk_pll_characteristics *characteristics)
+{
+ struct clk_pll *pll;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+ int ret;
+ int offset = PLL_REG(id);
+ u32 tmp;
+
+ if (id > PLL_MAX_ID)
+ return ERR_PTR(-EINVAL);
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pll_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE;
+
+ pll->id = id;
+ pll->hw.init = &init;
+ pll->layout = layout;
+ pll->characteristics = characteristics;
+ pll->pmc = pmc;
+ pll->irq = irq;
+ tmp = pmc_read(pmc, offset) & layout->pllr_mask;
+ pll->div = PLL_DIV(tmp);
+ pll->mul = PLL_MUL(tmp, layout);
+ init_waitqueue_head(&pll->wait);
+ irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
+ ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
+ id ? "clk-pllb" : "clk-plla", pll);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+
+static const struct clk_pll_layout at91rm9200_pll_layout = {
+ .pllr_mask = 0x7FFFFFF,
+ .mul_shift = 16,
+ .mul_mask = 0x7FF,
+};
+
+static const struct clk_pll_layout at91sam9g45_pll_layout = {
+ .pllr_mask = 0xFFFFFF,
+ .mul_shift = 16,
+ .mul_mask = 0xFF,
+};
+
+static const struct clk_pll_layout at91sam9g20_pllb_layout = {
+ .pllr_mask = 0x3FFFFF,
+ .mul_shift = 16,
+ .mul_mask = 0x3F,
+};
+
+static const struct clk_pll_layout sama5d3_pll_layout = {
+ .pllr_mask = 0x1FFFFFF,
+ .mul_shift = 18,
+ .mul_mask = 0x7F,
+};
+
+
+static struct clk_pll_characteristics * __init
+of_at91_clk_pll_get_characteristics(struct device_node *np)
+{
+ int i;
+ int offset;
+ u32 tmp;
+ int num_output;
+ u32 num_cells;
+ struct clk_range input;
+ struct clk_range *output;
+ u8 *out = NULL;
+ u16 *icpll = NULL;
+ struct clk_pll_characteristics *characteristics;
+
+ if (of_at91_get_clk_range(np, "atmel,clk-input-range", &input))
+ return NULL;
+
+ if (of_property_read_u32(np, "#atmel,pll-clk-output-range-cells",
+ &num_cells))
+ return NULL;
+
+ if (num_cells < 2 || num_cells > 4)
+ return NULL;
+
+ if (!of_get_property(np, "atmel,pll-clk-output-ranges", &tmp))
+ return NULL;
+ num_output = tmp / (sizeof(u32) * num_cells);
+
+ characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
+ if (!characteristics)
+ return NULL;
+
+ output = kzalloc(sizeof(*output) * num_output, GFP_KERNEL);
+ if (!output)
+ goto out_free_characteristics;
+
+ if (num_cells > 2) {
+ out = kzalloc(sizeof(*out) * num_output, GFP_KERNEL);
+ if (!out)
+ goto out_free_output;
+ }
+
+ if (num_cells > 3) {
+ icpll = kzalloc(sizeof(*icpll) * num_output, GFP_KERNEL);
+ if (!icpll)
+ goto out_free_output;
+ }
+
+ for (i = 0; i < num_output; i++) {
+ offset = i * num_cells;
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset, &tmp))
+ goto out_free_output;
+ output[i].min = tmp;
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 1, &tmp))
+ goto out_free_output;
+ output[i].max = tmp;
+
+ if (num_cells == 2)
+ continue;
+
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 2, &tmp))
+ goto out_free_output;
+ out[i] = tmp;
+
+ if (num_cells == 3)
+ continue;
+
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 3, &tmp))
+ goto out_free_output;
+ icpll[i] = tmp;
+ }
+
+ characteristics->input = input;
+ characteristics->num_output = num_output;
+ characteristics->output = output;
+ characteristics->out = out;
+ characteristics->icpll = icpll;
+ return characteristics;
+
+out_free_output:
+ kfree(icpll);
+ kfree(out);
+ kfree(output);
+out_free_characteristics:
+ kfree(characteristics);
+ return NULL;
+}
+
+static void __init
+of_at91_clk_pll_setup(struct device_node *np, struct at91_pmc *pmc,
+ const struct clk_pll_layout *layout)
+{
+ u32 id;
+ unsigned int irq;
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+ struct clk_pll_characteristics *characteristics;
+
+ if (of_property_read_u32(np, "reg", &id))
+ return;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ characteristics = of_at91_clk_pll_get_characteristics(np);
+ if (!characteristics)
+ return;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_pll(pmc, irq, name, parent_name, id, layout,
+ characteristics);
+ if (IS_ERR(clk))
+ goto out_free_characteristics;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+
+out_free_characteristics:
+ kfree(characteristics);
+}
+
+void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &at91rm9200_pll_layout);
+}
+
+void __init of_at91sam9g45_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &at91sam9g45_pll_layout);
+}
+
+void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &at91sam9g20_pllb_layout);
+}
+
+void __init of_sama5d3_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_pll_setup(np, pmc, &sama5d3_pll_layout);
+}
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
new file mode 100644
index 000000000000..ea226562bb40
--- /dev/null
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define to_clk_plldiv(hw) container_of(hw, struct clk_plldiv, hw)
+
+struct clk_plldiv {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_plldiv *plldiv = to_clk_plldiv(hw);
+ struct at91_pmc *pmc = plldiv->pmc;
+
+ if (pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_PLLADIV2)
+ return parent_rate / 2;
+
+ return parent_rate;
+}
+
+static long clk_plldiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long div;
+
+ if (rate > *parent_rate)
+ return *parent_rate;
+ div = *parent_rate / 2;
+ if (rate < div)
+ return div;
+
+ if (rate - div < *parent_rate - rate)
+ return div;
+
+ return *parent_rate;
+}
+
+static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_plldiv *plldiv = to_clk_plldiv(hw);
+ struct at91_pmc *pmc = plldiv->pmc;
+ u32 tmp;
+
+ if (parent_rate != rate && (parent_rate / 2) != rate)
+ return -EINVAL;
+
+ pmc_lock(pmc);
+ tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_PLLADIV2;
+ if ((parent_rate / 2) == rate)
+ tmp |= AT91_PMC_PLLADIV2;
+ pmc_write(pmc, AT91_PMC_MCKR, tmp);
+ pmc_unlock(pmc);
+
+ return 0;
+}
+
+static const struct clk_ops plldiv_ops = {
+ .recalc_rate = clk_plldiv_recalc_rate,
+ .round_rate = clk_plldiv_round_rate,
+ .set_rate = clk_plldiv_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name,
+ const char *parent_name)
+{
+ struct clk_plldiv *plldiv;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ plldiv = kzalloc(sizeof(*plldiv), GFP_KERNEL);
+ if (!plldiv)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &plldiv_ops;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ init.flags = CLK_SET_RATE_GATE;
+
+ plldiv->hw.init = &init;
+ plldiv->pmc = pmc;
+
+ clk = clk_register(NULL, &plldiv->hw);
+
+ if (IS_ERR(clk))
+ kfree(plldiv);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_plldiv_setup(struct device_node *np, struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91_clk_register_plldiv(pmc, name, parent_name);
+
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+}
+
+void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_plldiv_setup(np, pmc);
+}
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
new file mode 100644
index 000000000000..fd792b203eaf
--- /dev/null
+++ b/drivers/clk/at91/clk-programmable.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "pmc.h"
+
+#define PROG_SOURCE_MAX 5
+#define PROG_ID_MAX 7
+
+#define PROG_STATUS_MASK(id) (1 << ((id) + 8))
+#define PROG_PRES_MASK 0x7
+#define PROG_MAX_RM9200_CSS 3
+
+struct clk_programmable_layout {
+ u8 pres_shift;
+ u8 css_mask;
+ u8 have_slck_mck;
+};
+
+struct clk_programmable {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+ u8 id;
+ u8 css;
+ u8 pres;
+ u8 slckmck;
+ const struct clk_programmable_layout *layout;
+};
+
+#define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw)
+
+
+static irqreturn_t clk_programmable_irq_handler(int irq, void *dev_id)
+{
+ struct clk_programmable *prog = (struct clk_programmable *)dev_id;
+
+ wake_up(&prog->wait);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_programmable_prepare(struct clk_hw *hw)
+{
+ u32 tmp;
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct at91_pmc *pmc = prog->pmc;
+ const struct clk_programmable_layout *layout = prog->layout;
+ u8 id = prog->id;
+ u32 mask = PROG_STATUS_MASK(id);
+
+ tmp = prog->css | (prog->pres << layout->pres_shift);
+ if (layout->have_slck_mck && prog->slckmck)
+ tmp |= AT91_PMC_CSSMCK_MCK;
+
+ pmc_write(pmc, AT91_PMC_PCKR(id), tmp);
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & mask))
+ wait_event(prog->wait, pmc_read(pmc, AT91_PMC_SR) & mask);
+
+ return 0;
+}
+
+static int clk_programmable_is_ready(struct clk_hw *hw)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct at91_pmc *pmc = prog->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_PCKR(prog->id));
+}
+
+static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct at91_pmc *pmc = prog->pmc;
+ const struct clk_programmable_layout *layout = prog->layout;
+
+ tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id));
+ prog->pres = (tmp >> layout->pres_shift) & PROG_PRES_MASK;
+
+ return parent_rate >> prog->pres;
+}
+
+static long clk_programmable_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long best_rate = *parent_rate;
+ unsigned long best_diff;
+ unsigned long new_diff;
+ unsigned long cur_rate;
+ int shift = shift;
+
+ if (rate > *parent_rate)
+ return *parent_rate;
+ else
+ best_diff = *parent_rate - rate;
+
+ if (!best_diff)
+ return best_rate;
+
+ for (shift = 1; shift < PROG_PRES_MASK; shift++) {
+ cur_rate = *parent_rate >> shift;
+
+ if (cur_rate > rate)
+ new_diff = cur_rate - rate;
+ else
+ new_diff = rate - cur_rate;
+
+ if (!new_diff)
+ return cur_rate;
+
+ if (new_diff < best_diff) {
+ best_diff = new_diff;
+ best_rate = cur_rate;
+ }
+
+ if (rate > cur_rate)
+ break;
+ }
+
+ return best_rate;
+}
+
+static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ const struct clk_programmable_layout *layout = prog->layout;
+ if (index > layout->css_mask) {
+ if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) {
+ prog->css = 0;
+ prog->slckmck = 1;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ prog->css = index;
+ return 0;
+}
+
+static u8 clk_programmable_get_parent(struct clk_hw *hw)
+{
+ u32 tmp;
+ u8 ret;
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct at91_pmc *pmc = prog->pmc;
+ const struct clk_programmable_layout *layout = prog->layout;
+
+ tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id));
+ prog->css = tmp & layout->css_mask;
+ ret = prog->css;
+ if (layout->have_slck_mck) {
+ prog->slckmck = !!(tmp & AT91_PMC_CSSMCK_MCK);
+ if (prog->slckmck && !ret)
+ ret = PROG_MAX_RM9200_CSS + 1;
+ }
+
+ return ret;
+}
+
+static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ unsigned long best_rate = parent_rate;
+ unsigned long best_diff;
+ unsigned long new_diff;
+ unsigned long cur_rate;
+ int shift = 0;
+
+ if (rate > parent_rate)
+ return parent_rate;
+ else
+ best_diff = parent_rate - rate;
+
+ if (!best_diff) {
+ prog->pres = shift;
+ return 0;
+ }
+
+ for (shift = 1; shift < PROG_PRES_MASK; shift++) {
+ cur_rate = parent_rate >> shift;
+
+ if (cur_rate > rate)
+ new_diff = cur_rate - rate;
+ else
+ new_diff = rate - cur_rate;
+
+ if (!new_diff)
+ break;
+
+ if (new_diff < best_diff) {
+ best_diff = new_diff;
+ best_rate = cur_rate;
+ }
+
+ if (rate > cur_rate)
+ break;
+ }
+
+ prog->pres = shift;
+ return 0;
+}
+
+static const struct clk_ops programmable_ops = {
+ .prepare = clk_programmable_prepare,
+ .is_prepared = clk_programmable_is_ready,
+ .recalc_rate = clk_programmable_recalc_rate,
+ .round_rate = clk_programmable_round_rate,
+ .get_parent = clk_programmable_get_parent,
+ .set_parent = clk_programmable_set_parent,
+ .set_rate = clk_programmable_set_rate,
+};
+
+static struct clk * __init
+at91_clk_register_programmable(struct at91_pmc *pmc, unsigned int irq,
+ const char *name, const char **parent_names,
+ u8 num_parents, u8 id,
+ const struct clk_programmable_layout *layout)
+{
+ int ret;
+ struct clk_programmable *prog;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+ char irq_name[11];
+
+ if (id > PROG_ID_MAX)
+ return ERR_PTR(-EINVAL);
+
+ prog = kzalloc(sizeof(*prog), GFP_KERNEL);
+ if (!prog)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &programmable_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ prog->id = id;
+ prog->layout = layout;
+ prog->hw.init = &init;
+ prog->pmc = pmc;
+ prog->irq = irq;
+ init_waitqueue_head(&prog->wait);
+ irq_set_status_flags(prog->irq, IRQ_NOAUTOEN);
+ snprintf(irq_name, sizeof(irq_name), "clk-prog%d", id);
+ ret = request_irq(prog->irq, clk_programmable_irq_handler,
+ IRQF_TRIGGER_HIGH, irq_name, prog);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &prog->hw);
+ if (IS_ERR(clk))
+ kfree(prog);
+
+ return clk;
+}
+
+static const struct clk_programmable_layout at91rm9200_programmable_layout = {
+ .pres_shift = 2,
+ .css_mask = 0x3,
+ .have_slck_mck = 0,
+};
+
+static const struct clk_programmable_layout at91sam9g45_programmable_layout = {
+ .pres_shift = 2,
+ .css_mask = 0x3,
+ .have_slck_mck = 1,
+};
+
+static const struct clk_programmable_layout at91sam9x5_programmable_layout = {
+ .pres_shift = 4,
+ .css_mask = 0x7,
+ .have_slck_mck = 0,
+};
+
+static void __init
+of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
+ const struct clk_programmable_layout *layout)
+{
+ int num;
+ u32 id;
+ int i;
+ unsigned int irq;
+ struct clk *clk;
+ int num_parents;
+ const char *parent_names[PROG_SOURCE_MAX];
+ const char *name;
+ struct device_node *progclknp;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; ++i) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ num = of_get_child_count(np);
+ if (!num || num > (PROG_ID_MAX + 1))
+ return;
+
+ for_each_child_of_node(np, progclknp) {
+ if (of_property_read_u32(progclknp, "reg", &id))
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = progclknp->name;
+
+ irq = irq_of_parse_and_map(progclknp, 0);
+ if (!irq)
+ continue;
+
+ clk = at91_clk_register_programmable(pmc, irq, name,
+ parent_names, num_parents,
+ id, layout);
+ if (IS_ERR(clk))
+ continue;
+
+ of_clk_add_provider(progclknp, of_clk_src_simple_get, clk);
+ }
+}
+
+
+void __init of_at91rm9200_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_prog_setup(np, pmc, &at91rm9200_programmable_layout);
+}
+
+void __init of_at91sam9g45_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_prog_setup(np, pmc, &at91sam9g45_programmable_layout);
+}
+
+void __init of_at91sam9x5_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_prog_setup(np, pmc, &at91sam9x5_programmable_layout);
+}
diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c
new file mode 100644
index 000000000000..144d47ecfe63
--- /dev/null
+++ b/drivers/clk/at91/clk-smd.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define SMD_SOURCE_MAX 2
+
+#define SMD_DIV_SHIFT 8
+#define SMD_MAX_DIV 0xf
+
+struct at91sam9x5_clk_smd {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+#define to_at91sam9x5_clk_smd(hw) \
+ container_of(hw, struct at91sam9x5_clk_smd, hw)
+
+static unsigned long at91sam9x5_clk_smd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ u8 smddiv;
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+
+ tmp = pmc_read(pmc, AT91_PMC_SMD);
+ smddiv = (tmp & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT;
+ return parent_rate / (smddiv + 1);
+}
+
+static long at91sam9x5_clk_smd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long div;
+ unsigned long bestrate;
+ unsigned long tmp;
+
+ if (rate >= *parent_rate)
+ return *parent_rate;
+
+ div = *parent_rate / rate;
+ if (div > SMD_MAX_DIV)
+ return *parent_rate / (SMD_MAX_DIV + 1);
+
+ bestrate = *parent_rate / div;
+ tmp = *parent_rate / (div + 1);
+ if (bestrate - rate > rate - tmp)
+ bestrate = tmp;
+
+ return bestrate;
+}
+
+static int at91sam9x5_clk_smd_set_parent(struct clk_hw *hw, u8 index)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+
+ if (index > 1)
+ return -EINVAL;
+ tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMDS;
+ if (index)
+ tmp |= AT91_PMC_SMDS;
+ pmc_write(pmc, AT91_PMC_SMD, tmp);
+ return 0;
+}
+
+static u8 at91sam9x5_clk_smd_get_parent(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+
+ return pmc_read(pmc, AT91_PMC_SMD) & AT91_PMC_SMDS;
+}
+
+static int at91sam9x5_clk_smd_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
+ struct at91_pmc *pmc = smd->pmc;
+ unsigned long div = parent_rate / rate;
+
+ if (parent_rate % rate || div < 1 || div > (SMD_MAX_DIV + 1))
+ return -EINVAL;
+ tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMD_DIV;
+ tmp |= (div - 1) << SMD_DIV_SHIFT;
+ pmc_write(pmc, AT91_PMC_SMD, tmp);
+
+ return 0;
+}
+
+static const struct clk_ops at91sam9x5_smd_ops = {
+ .recalc_rate = at91sam9x5_clk_smd_recalc_rate,
+ .round_rate = at91sam9x5_clk_smd_round_rate,
+ .get_parent = at91sam9x5_clk_smd_get_parent,
+ .set_parent = at91sam9x5_clk_smd_set_parent,
+ .set_rate = at91sam9x5_clk_smd_set_rate,
+};
+
+static struct clk * __init
+at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name,
+ const char **parent_names, u8 num_parents)
+{
+ struct at91sam9x5_clk_smd *smd;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ smd = kzalloc(sizeof(*smd), GFP_KERNEL);
+ if (!smd)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91sam9x5_smd_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ smd->hw.init = &init;
+ smd->pmc = pmc;
+
+ clk = clk_register(NULL, &smd->hw);
+ if (IS_ERR(clk))
+ kfree(smd);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ int i;
+ int num_parents;
+ const char *parent_names[SMD_SOURCE_MAX];
+ const char *name = np->name;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; i++) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91sam9x5_clk_register_smd(pmc, name, parent_names,
+ num_parents);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
new file mode 100644
index 000000000000..8f7c0434a09f
--- /dev/null
+++ b/drivers/clk/at91/clk-system.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define SYSTEM_MAX_ID 31
+
+#define SYSTEM_MAX_NAME_SZ 32
+
+#define to_clk_system(hw) container_of(hw, struct clk_system, hw)
+struct clk_system {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ u8 id;
+};
+
+static int clk_system_enable(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+ struct at91_pmc *pmc = sys->pmc;
+
+ pmc_write(pmc, AT91_PMC_SCER, 1 << sys->id);
+ return 0;
+}
+
+static void clk_system_disable(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+ struct at91_pmc *pmc = sys->pmc;
+
+ pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id);
+}
+
+static int clk_system_is_enabled(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+ struct at91_pmc *pmc = sys->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id));
+}
+
+static const struct clk_ops system_ops = {
+ .enable = clk_system_enable,
+ .disable = clk_system_disable,
+ .is_enabled = clk_system_is_enabled,
+};
+
+static struct clk * __init
+at91_clk_register_system(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, u8 id)
+{
+ struct clk_system *sys;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ if (!parent_name || id > SYSTEM_MAX_ID)
+ return ERR_PTR(-EINVAL);
+
+ sys = kzalloc(sizeof(*sys), GFP_KERNEL);
+ if (!sys)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &system_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ /*
+ * CLK_IGNORE_UNUSED is used to avoid ddrck switch off.
+ * TODO : we should implement a driver supporting at91 ddr controller
+ * (see drivers/memory) which would request and enable the ddrck clock.
+ * When this is done we will be able to remove CLK_IGNORE_UNUSED flag.
+ */
+ init.flags = CLK_IGNORE_UNUSED;
+
+ sys->id = id;
+ sys->hw.init = &init;
+ sys->pmc = pmc;
+
+ clk = clk_register(NULL, &sys->hw);
+ if (IS_ERR(clk))
+ kfree(sys);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
+{
+ int num;
+ u32 id;
+ struct clk *clk;
+ const char *name;
+ struct device_node *sysclknp;
+ const char *parent_name;
+
+ num = of_get_child_count(np);
+ if (num > (SYSTEM_MAX_ID + 1))
+ return;
+
+ for_each_child_of_node(np, sysclknp) {
+ if (of_property_read_u32(sysclknp, "reg", &id))
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = sysclknp->name;
+
+ parent_name = of_clk_get_parent_name(sysclknp, 0);
+
+ clk = at91_clk_register_system(pmc, name, parent_name, id);
+ if (IS_ERR(clk))
+ continue;
+
+ of_clk_add_provider(sysclknp, of_clk_src_simple_get, clk);
+ }
+}
+
+void __init of_at91rm9200_clk_sys_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_sys_setup(np, pmc);
+}
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
new file mode 100644
index 000000000000..7d1d26a4bd04
--- /dev/null
+++ b/drivers/clk/at91/clk-usb.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include "pmc.h"
+
+#define USB_SOURCE_MAX 2
+
+#define SAM9X5_USB_DIV_SHIFT 8
+#define SAM9X5_USB_MAX_DIV 0xf
+
+#define RM9200_USB_DIV_SHIFT 28
+#define RM9200_USB_DIV_TAB_SIZE 4
+
+struct at91sam9x5_clk_usb {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+};
+
+#define to_at91sam9x5_clk_usb(hw) \
+ container_of(hw, struct at91sam9x5_clk_usb, hw)
+
+struct at91rm9200_clk_usb {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ u32 divisors[4];
+};
+
+#define to_at91rm9200_clk_usb(hw) \
+ container_of(hw, struct at91rm9200_clk_usb, hw)
+
+static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ u8 usbdiv;
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ tmp = pmc_read(pmc, AT91_PMC_USB);
+ usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
+ return parent_rate / (usbdiv + 1);
+}
+
+static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long div;
+ unsigned long bestrate;
+ unsigned long tmp;
+
+ if (rate >= *parent_rate)
+ return *parent_rate;
+
+ div = *parent_rate / rate;
+ if (div >= SAM9X5_USB_MAX_DIV)
+ return *parent_rate / (SAM9X5_USB_MAX_DIV + 1);
+
+ bestrate = *parent_rate / div;
+ tmp = *parent_rate / (div + 1);
+ if (bestrate - rate > rate - tmp)
+ bestrate = tmp;
+
+ return bestrate;
+}
+
+static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ if (index > 1)
+ return -EINVAL;
+ tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS;
+ if (index)
+ tmp |= AT91_PMC_USBS;
+ pmc_write(pmc, AT91_PMC_USB, tmp);
+ return 0;
+}
+
+static u8 at91sam9x5_clk_usb_get_parent(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ return pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS;
+}
+
+static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+ unsigned long div = parent_rate / rate;
+
+ if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV)
+ return -EINVAL;
+
+ tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
+ tmp |= (div - 1) << SAM9X5_USB_DIV_SHIFT;
+ pmc_write(pmc, AT91_PMC_USB, tmp);
+
+ return 0;
+}
+
+static const struct clk_ops at91sam9x5_usb_ops = {
+ .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+ .round_rate = at91sam9x5_clk_usb_round_rate,
+ .get_parent = at91sam9x5_clk_usb_get_parent,
+ .set_parent = at91sam9x5_clk_usb_set_parent,
+ .set_rate = at91sam9x5_clk_usb_set_rate,
+};
+
+static int at91sam9n12_clk_usb_enable(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ pmc_write(pmc, AT91_PMC_USB,
+ pmc_read(pmc, AT91_PMC_USB) | AT91_PMC_USBS);
+ return 0;
+}
+
+static void at91sam9n12_clk_usb_disable(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ pmc_write(pmc, AT91_PMC_USB,
+ pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS);
+}
+
+static int at91sam9n12_clk_usb_is_enabled(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS);
+}
+
+static const struct clk_ops at91sam9n12_usb_ops = {
+ .enable = at91sam9n12_clk_usb_enable,
+ .disable = at91sam9n12_clk_usb_disable,
+ .is_enabled = at91sam9n12_clk_usb_is_enabled,
+ .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+ .round_rate = at91sam9x5_clk_usb_round_rate,
+ .set_rate = at91sam9x5_clk_usb_set_rate,
+};
+
+static struct clk * __init
+at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ const char **parent_names, u8 num_parents)
+{
+ struct at91sam9x5_clk_usb *usb;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ usb = kzalloc(sizeof(*usb), GFP_KERNEL);
+ if (!usb)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91sam9x5_usb_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+
+ clk = clk_register(NULL, &usb->hw);
+ if (IS_ERR(clk))
+ kfree(usb);
+
+ return clk;
+}
+
+static struct clk * __init
+at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ const char *parent_name)
+{
+ struct at91sam9x5_clk_usb *usb;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ usb = kzalloc(sizeof(*usb), GFP_KERNEL);
+ if (!usb)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91sam9n12_usb_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+
+ clk = clk_register(NULL, &usb->hw);
+ if (IS_ERR(clk))
+ kfree(usb);
+
+ return clk;
+}
+
+static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+ u32 tmp;
+ u8 usbdiv;
+
+ tmp = pmc_read(pmc, AT91_CKGR_PLLBR);
+ usbdiv = (tmp & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT;
+ if (usb->divisors[usbdiv])
+ return parent_rate / usb->divisors[usbdiv];
+
+ return 0;
+}
+
+static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
+ unsigned long bestrate = 0;
+ int bestdiff = -1;
+ unsigned long tmprate;
+ int tmpdiff;
+ int i = 0;
+
+ for (i = 0; i < 4; i++) {
+ if (!usb->divisors[i])
+ continue;
+ tmprate = *parent_rate / usb->divisors[i];
+ if (tmprate < rate)
+ tmpdiff = rate - tmprate;
+ else
+ tmpdiff = tmprate - rate;
+
+ if (bestdiff < 0 || bestdiff > tmpdiff) {
+ bestrate = tmprate;
+ bestdiff = tmpdiff;
+ }
+
+ if (!bestdiff)
+ break;
+ }
+
+ return bestrate;
+}
+
+static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 tmp;
+ int i;
+ struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
+ struct at91_pmc *pmc = usb->pmc;
+ unsigned long div = parent_rate / rate;
+
+ if (parent_rate % rate)
+ return -EINVAL;
+ for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
+ if (usb->divisors[i] == div) {
+ tmp = pmc_read(pmc, AT91_CKGR_PLLBR) &
+ ~AT91_PMC_USBDIV;
+ tmp |= i << RM9200_USB_DIV_SHIFT;
+ pmc_write(pmc, AT91_CKGR_PLLBR, tmp);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct clk_ops at91rm9200_usb_ops = {
+ .recalc_rate = at91rm9200_clk_usb_recalc_rate,
+ .round_rate = at91rm9200_clk_usb_round_rate,
+ .set_rate = at91rm9200_clk_usb_set_rate,
+};
+
+static struct clk * __init
+at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ const char *parent_name, const u32 *divisors)
+{
+ struct at91rm9200_clk_usb *usb;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ usb = kzalloc(sizeof(*usb), GFP_KERNEL);
+ if (!usb)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &at91rm9200_usb_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+ memcpy(usb->divisors, divisors, sizeof(usb->divisors));
+
+ clk = clk_register(NULL, &usb->hw);
+ if (IS_ERR(clk))
+ kfree(usb);
+
+ return clk;
+}
+
+void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ int i;
+ int num_parents;
+ const char *parent_names[USB_SOURCE_MAX];
+ const char *name = np->name;
+
+ num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (num_parents <= 0 || num_parents > USB_SOURCE_MAX)
+ return;
+
+ for (i = 0; i < num_parents; i++) {
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ if (!parent_names[i])
+ return;
+ }
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91sam9x5_clk_register_usb(pmc, name, parent_names, num_parents);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91sam9n12_clk_register_usb(pmc, name, parent_name);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+ u32 divisors[4] = {0, 0, 0, 0};
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ of_property_read_u32_array(np, "atmel,clk-divisors", divisors, 4);
+ if (!divisors[0])
+ return;
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ clk = at91rm9200_clk_register_usb(pmc, name, parent_name, divisors);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
new file mode 100644
index 000000000000..ae3263bc1476
--- /dev/null
+++ b/drivers/clk/at91/clk-utmi.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "pmc.h"
+
+#define UTMI_FIXED_MUL 40
+
+struct clk_utmi {
+ struct clk_hw hw;
+ struct at91_pmc *pmc;
+ unsigned int irq;
+ wait_queue_head_t wait;
+};
+
+#define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw)
+
+static irqreturn_t clk_utmi_irq_handler(int irq, void *dev_id)
+{
+ struct clk_utmi *utmi = (struct clk_utmi *)dev_id;
+
+ wake_up(&utmi->wait);
+ disable_irq_nosync(utmi->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_utmi_prepare(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+ struct at91_pmc *pmc = utmi->pmc;
+ u32 tmp = at91_pmc_read(AT91_CKGR_UCKR) | AT91_PMC_UPLLEN |
+ AT91_PMC_UPLLCOUNT | AT91_PMC_BIASEN;
+
+ pmc_write(pmc, AT91_CKGR_UCKR, tmp);
+
+ while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU)) {
+ enable_irq(utmi->irq);
+ wait_event(utmi->wait,
+ pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
+ }
+
+ return 0;
+}
+
+static int clk_utmi_is_prepared(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+ struct at91_pmc *pmc = utmi->pmc;
+
+ return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
+}
+
+static void clk_utmi_unprepare(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+ struct at91_pmc *pmc = utmi->pmc;
+ u32 tmp = at91_pmc_read(AT91_CKGR_UCKR) & ~AT91_PMC_UPLLEN;
+
+ pmc_write(pmc, AT91_CKGR_UCKR, tmp);
+}
+
+static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ /* UTMI clk is a fixed clk multiplier */
+ return parent_rate * UTMI_FIXED_MUL;
+}
+
+static const struct clk_ops utmi_ops = {
+ .prepare = clk_utmi_prepare,
+ .unprepare = clk_utmi_unprepare,
+ .is_prepared = clk_utmi_is_prepared,
+ .recalc_rate = clk_utmi_recalc_rate,
+};
+
+static struct clk * __init
+at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
+ const char *name, const char *parent_name)
+{
+ int ret;
+ struct clk_utmi *utmi;
+ struct clk *clk = NULL;
+ struct clk_init_data init;
+
+ utmi = kzalloc(sizeof(*utmi), GFP_KERNEL);
+ if (!utmi)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &utmi_ops;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ init.flags = CLK_SET_RATE_GATE;
+
+ utmi->hw.init = &init;
+ utmi->pmc = pmc;
+ utmi->irq = irq;
+ init_waitqueue_head(&utmi->wait);
+ irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
+ ret = request_irq(utmi->irq, clk_utmi_irq_handler,
+ IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = clk_register(NULL, &utmi->hw);
+ if (IS_ERR(clk))
+ kfree(utmi);
+
+ return clk;
+}
+
+static void __init
+of_at91_clk_utmi_setup(struct device_node *np, struct at91_pmc *pmc)
+{
+ unsigned int irq;
+ struct clk *clk;
+ const char *parent_name;
+ const char *name = np->name;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq)
+ return;
+
+ clk = at91_clk_register_utmi(pmc, irq, name, parent_name);
+ if (IS_ERR(clk))
+ return;
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+}
+
+void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np,
+ struct at91_pmc *pmc)
+{
+ of_at91_clk_utmi_setup(np, pmc);
+}
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
new file mode 100644
index 000000000000..6a61477a57e0
--- /dev/null
+++ b/drivers/clk/at91/pmc.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+
+#include <asm/proc-fns.h>
+
+#include "pmc.h"
+
+void __iomem *at91_pmc_base;
+EXPORT_SYMBOL_GPL(at91_pmc_base);
+
+void at91sam9_idle(void)
+{
+ at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK);
+ cpu_do_idle();
+}
+
+int of_at91_get_clk_range(struct device_node *np, const char *propname,
+ struct clk_range *range)
+{
+ u32 min, max;
+ int ret;
+
+ ret = of_property_read_u32_index(np, propname, 0, &min);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(np, propname, 1, &max);
+ if (ret)
+ return ret;
+
+ if (range) {
+ range->min = min;
+ range->max = max;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_at91_get_clk_range);
+
+static void pmc_irq_mask(struct irq_data *d)
+{
+ struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+ pmc_write(pmc, AT91_PMC_IDR, 1 << d->hwirq);
+}
+
+static void pmc_irq_unmask(struct irq_data *d)
+{
+ struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+ pmc_write(pmc, AT91_PMC_IER, 1 << d->hwirq);
+}
+
+static int pmc_irq_set_type(struct irq_data *d, unsigned type)
+{
+ if (type != IRQ_TYPE_LEVEL_HIGH) {
+ pr_warn("PMC: type not supported (support only IRQ_TYPE_LEVEL_HIGH type)\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct irq_chip pmc_irq = {
+ .name = "PMC",
+ .irq_disable = pmc_irq_mask,
+ .irq_mask = pmc_irq_mask,
+ .irq_unmask = pmc_irq_unmask,
+ .irq_set_type = pmc_irq_set_type,
+};
+
+static struct lock_class_key pmc_lock_class;
+
+static int pmc_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct at91_pmc *pmc = h->host_data;
+
+ irq_set_lockdep_class(virq, &pmc_lock_class);
+
+ irq_set_chip_and_handler(virq, &pmc_irq,
+ handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID);
+ irq_set_chip_data(virq, pmc);
+
+ return 0;
+}
+
+static int pmc_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ struct at91_pmc *pmc = d->host_data;
+ const struct at91_pmc_caps *caps = pmc->caps;
+
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+
+ if (!(caps->available_irqs & (1 << *out_hwirq)))
+ return -EINVAL;
+
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static struct irq_domain_ops pmc_irq_ops = {
+ .map = pmc_irq_map,
+ .xlate = pmc_irq_domain_xlate,
+};
+
+static irqreturn_t pmc_irq_handler(int irq, void *data)
+{
+ struct at91_pmc *pmc = (struct at91_pmc *)data;
+ unsigned long sr;
+ int n;
+
+ sr = pmc_read(pmc, AT91_PMC_SR) & pmc_read(pmc, AT91_PMC_IMR);
+ if (!sr)
+ return IRQ_NONE;
+
+ for_each_set_bit(n, &sr, BITS_PER_LONG)
+ generic_handle_irq(irq_find_mapping(pmc->irqdomain, n));
+
+ return IRQ_HANDLED;
+}
+
+static const struct at91_pmc_caps at91rm9200_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
+ AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
+ AT91_PMC_PCK3RDY,
+};
+
+static const struct at91_pmc_caps at91sam9260_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
+ AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY,
+};
+
+static const struct at91_pmc_caps at91sam9g45_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
+ AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY,
+};
+
+static const struct at91_pmc_caps at91sam9n12_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
+ AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
+ AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
+};
+
+static const struct at91_pmc_caps at91sam9x5_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
+ AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
+ AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
+};
+
+static const struct at91_pmc_caps sama5d3_caps = {
+ .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
+ AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
+ AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
+ AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS |
+ AT91_PMC_CFDEV,
+};
+
+static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
+ void __iomem *regbase, int virq,
+ const struct at91_pmc_caps *caps)
+{
+ struct at91_pmc *pmc;
+
+ if (!regbase || !virq || !caps)
+ return NULL;
+
+ at91_pmc_base = regbase;
+
+ pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
+ if (!pmc)
+ return NULL;
+
+ spin_lock_init(&pmc->lock);
+ pmc->regbase = regbase;
+ pmc->virq = virq;
+ pmc->caps = caps;
+
+ pmc->irqdomain = irq_domain_add_linear(np, 32, &pmc_irq_ops, pmc);
+
+ if (!pmc->irqdomain)
+ goto out_free_pmc;
+
+ pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
+ if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc))
+ goto out_remove_irqdomain;
+
+ return pmc;
+
+out_remove_irqdomain:
+ irq_domain_remove(pmc->irqdomain);
+out_free_pmc:
+ kfree(pmc);
+
+ return NULL;
+}
+
+static const struct of_device_id pmc_clk_ids[] __initconst = {
+ /* Main clock */
+ {
+ .compatible = "atmel,at91rm9200-clk-main",
+ .data = of_at91rm9200_clk_main_setup,
+ },
+ /* PLL clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-pll",
+ .data = of_at91rm9200_clk_pll_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-clk-pll",
+ .data = of_at91sam9g45_clk_pll_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9g20-clk-pllb",
+ .data = of_at91sam9g20_clk_pllb_setup,
+ },
+ {
+ .compatible = "atmel,sama5d3-clk-pll",
+ .data = of_sama5d3_clk_pll_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-plldiv",
+ .data = of_at91sam9x5_clk_plldiv_setup,
+ },
+ /* Master clock */
+ {
+ .compatible = "atmel,at91rm9200-clk-master",
+ .data = of_at91rm9200_clk_master_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-master",
+ .data = of_at91sam9x5_clk_master_setup,
+ },
+ /* System clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-system",
+ .data = of_at91rm9200_clk_sys_setup,
+ },
+ /* Peripheral clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-peripheral",
+ .data = of_at91rm9200_clk_periph_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-peripheral",
+ .data = of_at91sam9x5_clk_periph_setup,
+ },
+ /* Programmable clocks */
+ {
+ .compatible = "atmel,at91rm9200-clk-programmable",
+ .data = of_at91rm9200_clk_prog_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-clk-programmable",
+ .data = of_at91sam9g45_clk_prog_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-programmable",
+ .data = of_at91sam9x5_clk_prog_setup,
+ },
+ /* UTMI clock */
+#if defined(CONFIG_HAVE_AT91_UTMI)
+ {
+ .compatible = "atmel,at91sam9x5-clk-utmi",
+ .data = of_at91sam9x5_clk_utmi_setup,
+ },
+#endif
+ /* USB clock */
+#if defined(CONFIG_HAVE_AT91_USB_CLK)
+ {
+ .compatible = "atmel,at91rm9200-clk-usb",
+ .data = of_at91rm9200_clk_usb_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-clk-usb",
+ .data = of_at91sam9x5_clk_usb_setup,
+ },
+ {
+ .compatible = "atmel,at91sam9n12-clk-usb",
+ .data = of_at91sam9n12_clk_usb_setup,
+ },
+#endif
+ /* SMD clock */
+#if defined(CONFIG_HAVE_AT91_SMD)
+ {
+ .compatible = "atmel,at91sam9x5-clk-smd",
+ .data = of_at91sam9x5_clk_smd_setup,
+ },
+#endif
+ { /*sentinel*/ }
+};
+
+static void __init of_at91_pmc_setup(struct device_node *np,
+ const struct at91_pmc_caps *caps)
+{
+ struct at91_pmc *pmc;
+ struct device_node *childnp;
+ void (*clk_setup)(struct device_node *, struct at91_pmc *);
+ const struct of_device_id *clk_id;
+ void __iomem *regbase = of_iomap(np, 0);
+ int virq;
+
+ if (!regbase)
+ return;
+
+ virq = irq_of_parse_and_map(np, 0);
+ if (!virq)
+ return;
+
+ pmc = at91_pmc_init(np, regbase, virq, caps);
+ if (!pmc)
+ return;
+ for_each_child_of_node(np, childnp) {
+ clk_id = of_match_node(pmc_clk_ids, childnp);
+ if (!clk_id)
+ continue;
+ clk_setup = clk_id->data;
+ clk_setup(childnp, pmc);
+ }
+}
+
+static void __init of_at91rm9200_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91rm9200_caps);
+}
+CLK_OF_DECLARE(at91rm9200_clk_pmc, "atmel,at91rm9200-pmc",
+ of_at91rm9200_pmc_setup);
+
+static void __init of_at91sam9260_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9260_caps);
+}
+CLK_OF_DECLARE(at91sam9260_clk_pmc, "atmel,at91sam9260-pmc",
+ of_at91sam9260_pmc_setup);
+
+static void __init of_at91sam9g45_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9g45_caps);
+}
+CLK_OF_DECLARE(at91sam9g45_clk_pmc, "atmel,at91sam9g45-pmc",
+ of_at91sam9g45_pmc_setup);
+
+static void __init of_at91sam9n12_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9n12_caps);
+}
+CLK_OF_DECLARE(at91sam9n12_clk_pmc, "atmel,at91sam9n12-pmc",
+ of_at91sam9n12_pmc_setup);
+
+static void __init of_at91sam9x5_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &at91sam9x5_caps);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_pmc, "atmel,at91sam9x5-pmc",
+ of_at91sam9x5_pmc_setup);
+
+static void __init of_sama5d3_pmc_setup(struct device_node *np)
+{
+ of_at91_pmc_setup(np, &sama5d3_caps);
+}
+CLK_OF_DECLARE(sama5d3_clk_pmc, "atmel,sama5d3-pmc",
+ of_sama5d3_pmc_setup);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
new file mode 100644
index 000000000000..441350983ccb
--- /dev/null
+++ b/drivers/clk/at91/pmc.h
@@ -0,0 +1,114 @@
+/*
+ * drivers/clk/at91/pmc.h
+ *
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __PMC_H_
+#define __PMC_H_
+
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/spinlock.h>
+
+struct clk_range {
+ unsigned long min;
+ unsigned long max;
+};
+
+#define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,}
+
+struct at91_pmc_caps {
+ u32 available_irqs;
+};
+
+struct at91_pmc {
+ void __iomem *regbase;
+ int virq;
+ spinlock_t lock;
+ const struct at91_pmc_caps *caps;
+ struct irq_domain *irqdomain;
+};
+
+static inline void pmc_lock(struct at91_pmc *pmc)
+{
+ spin_lock(&pmc->lock);
+}
+
+static inline void pmc_unlock(struct at91_pmc *pmc)
+{
+ spin_unlock(&pmc->lock);
+}
+
+static inline u32 pmc_read(struct at91_pmc *pmc, int offset)
+{
+ return readl(pmc->regbase + offset);
+}
+
+static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value)
+{
+ writel(value, pmc->regbase + offset);
+}
+
+int of_at91_get_clk_range(struct device_node *np, const char *propname,
+ struct clk_range *range);
+
+extern void __init of_at91rm9200_clk_main_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9g45_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_sama5d3_clk_pll_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_master_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_sys_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_periph_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+extern void __init of_at91rm9200_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9g45_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_prog_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+
+#if defined(CONFIG_HAVE_AT91_UTMI)
+extern void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+#endif
+
+#if defined(CONFIG_HAVE_AT91_USB_CLK)
+extern void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+extern void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+#endif
+
+#if defined(CONFIG_HAVE_AT91_SMD)
+extern void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
+ struct at91_pmc *pmc);
+#endif
+
+#endif /* __PMC_H_ */
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index a33f46f20a41..57a078e06efe 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -55,6 +55,30 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
return rate_ops->recalc_rate(rate_hw, parent_rate);
}
+static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_p)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *rate_ops = composite->rate_ops;
+ const struct clk_ops *mux_ops = composite->mux_ops;
+ struct clk_hw *rate_hw = composite->rate_hw;
+ struct clk_hw *mux_hw = composite->mux_hw;
+
+ if (rate_hw && rate_ops && rate_ops->determine_rate) {
+ rate_hw->clk = hw->clk;
+ return rate_ops->determine_rate(rate_hw, rate, best_parent_rate,
+ best_parent_p);
+ } else if (mux_hw && mux_ops && mux_ops->determine_rate) {
+ mux_hw->clk = hw->clk;
+ return mux_ops->determine_rate(mux_hw, rate, best_parent_rate,
+ best_parent_p);
+ } else {
+ pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
+ return 0;
+ }
+}
+
static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
@@ -147,6 +171,8 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
composite->mux_ops = mux_ops;
clk_composite_ops->get_parent = clk_composite_get_parent;
clk_composite_ops->set_parent = clk_composite_set_parent;
+ if (mux_ops->determine_rate)
+ clk_composite_ops->determine_rate = clk_composite_determine_rate;
}
if (rate_hw && rate_ops) {
@@ -170,6 +196,8 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
composite->rate_hw = rate_hw;
composite->rate_ops = rate_ops;
clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
+ if (rate_ops->determine_rate)
+ clk_composite_ops->determine_rate = clk_composite_determine_rate;
}
if (gate_hw && gate_ops) {
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 1ed591ab8b1d..0fc56ab6e844 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -34,22 +34,31 @@ static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
return to_clk_fixed_rate(hw)->fixed_rate;
}
+static unsigned long clk_fixed_rate_recalc_accuracy(struct clk_hw *hw,
+ unsigned long parent_accuracy)
+{
+ return to_clk_fixed_rate(hw)->fixed_accuracy;
+}
+
const struct clk_ops clk_fixed_rate_ops = {
.recalc_rate = clk_fixed_rate_recalc_rate,
+ .recalc_accuracy = clk_fixed_rate_recalc_accuracy,
};
EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
/**
- * clk_register_fixed_rate - register fixed-rate clock with the clock framework
+ * clk_register_fixed_rate_with_accuracy - register fixed-rate clock with the
+ * clock framework
* @dev: device that is registering this clock
* @name: name of this clock
* @parent_name: name of clock's parent
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock rate
*/
-struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- unsigned long fixed_rate)
+struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy)
{
struct clk_fixed_rate *fixed;
struct clk *clk;
@@ -70,16 +79,33 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
/* struct clk_fixed_rate assignments */
fixed->fixed_rate = fixed_rate;
+ fixed->fixed_accuracy = fixed_accuracy;
fixed->hw.init = &init;
/* register the clock */
clk = clk_register(dev, &fixed->hw);
-
if (IS_ERR(clk))
kfree(fixed);
return clk;
}
+EXPORT_SYMBOL_GPL(clk_register_fixed_rate_with_accuracy);
+
+/**
+ * clk_register_fixed_rate - register fixed-rate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate)
+{
+ return clk_register_fixed_rate_with_accuracy(dev, name, parent_name,
+ flags, fixed_rate, 0);
+}
EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
#ifdef CONFIG_OF
@@ -91,13 +117,18 @@ void of_fixed_clk_setup(struct device_node *node)
struct clk *clk;
const char *clk_name = node->name;
u32 rate;
+ u32 accuracy = 0;
if (of_property_read_u32(node, "clock-frequency", &rate))
return;
+ of_property_read_u32(node, "clock-accuracy", &accuracy);
+
of_property_read_string(node, "clock-output-names", &clk_name);
- clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
+ clk = clk_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
+ CLK_IS_ROOT, rate,
+ accuracy);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index 9f57bc37cd60..3d7e8dd8fd58 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -66,7 +66,7 @@ static void max77686_clk_unprepare(struct clk_hw *hw)
MAX77686_REG_32KHZ, max77686->mask, ~max77686->mask);
}
-static int max77686_clk_is_enabled(struct clk_hw *hw)
+static int max77686_clk_is_prepared(struct clk_hw *hw)
{
struct max77686_clk *max77686 = to_max77686_clk(hw);
int ret;
@@ -81,10 +81,17 @@ static int max77686_clk_is_enabled(struct clk_hw *hw)
return val & max77686->mask;
}
+static unsigned long max77686_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 32768;
+}
+
static struct clk_ops max77686_clk_ops = {
.prepare = max77686_clk_prepare,
.unprepare = max77686_clk_unprepare,
- .is_enabled = max77686_clk_is_enabled,
+ .is_prepared = max77686_clk_is_prepared,
+ .recalc_rate = max77686_recalc_rate,
};
static struct clk_init_data max77686_clks_init[MAX77686_CLKS_NUM] = {
@@ -105,38 +112,38 @@ static struct clk_init_data max77686_clks_init[MAX77686_CLKS_NUM] = {
},
};
-static int max77686_clk_register(struct device *dev,
+static struct clk *max77686_clk_register(struct device *dev,
struct max77686_clk *max77686)
{
struct clk *clk;
struct clk_hw *hw = &max77686->hw;
clk = clk_register(dev, hw);
-
if (IS_ERR(clk))
- return -ENOMEM;
+ return clk;
max77686->lookup = kzalloc(sizeof(struct clk_lookup), GFP_KERNEL);
if (!max77686->lookup)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
max77686->lookup->con_id = hw->init->name;
max77686->lookup->clk = clk;
clkdev_add(max77686->lookup);
- return 0;
+ return clk;
}
static int max77686_clk_probe(struct platform_device *pdev)
{
struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct max77686_clk **max77686_clks;
+ struct max77686_clk *max77686_clks[MAX77686_CLKS_NUM];
+ struct clk **clocks;
int i, ret;
- max77686_clks = devm_kzalloc(&pdev->dev, sizeof(struct max77686_clk *)
+ clocks = devm_kzalloc(&pdev->dev, sizeof(struct clk *)
* MAX77686_CLKS_NUM, GFP_KERNEL);
- if (!max77686_clks)
+ if (!clocks)
return -ENOMEM;
for (i = 0; i < MAX77686_CLKS_NUM; i++) {
@@ -151,47 +158,63 @@ static int max77686_clk_probe(struct platform_device *pdev)
max77686_clks[i]->mask = 1 << i;
max77686_clks[i]->hw.init = &max77686_clks_init[i];
- ret = max77686_clk_register(&pdev->dev, max77686_clks[i]);
+ clocks[i] = max77686_clk_register(&pdev->dev, max77686_clks[i]);
+ if (IS_ERR(clocks[i])) {
+ ret = PTR_ERR(clocks[i]);
+ dev_err(&pdev->dev, "failed to register %s\n",
+ max77686_clks[i]->hw.init->name);
+ goto err_clocks;
+ }
+ }
+
+ platform_set_drvdata(pdev, clocks);
+
+ if (iodev->dev->of_node) {
+ struct clk_onecell_data *of_data;
+
+ of_data = devm_kzalloc(&pdev->dev,
+ sizeof(*of_data), GFP_KERNEL);
+ if (!of_data) {
+ ret = -ENOMEM;
+ goto err_clocks;
+ }
+
+ of_data->clks = clocks;
+ of_data->clk_num = MAX77686_CLKS_NUM;
+ ret = of_clk_add_provider(iodev->dev->of_node,
+ of_clk_src_onecell_get, of_data);
if (ret) {
- switch (i) {
- case MAX77686_CLK_AP:
- dev_err(&pdev->dev, "Fail to register CLK_AP\n");
- goto err_clk_ap;
- break;
- case MAX77686_CLK_CP:
- dev_err(&pdev->dev, "Fail to register CLK_CP\n");
- goto err_clk_cp;
- break;
- case MAX77686_CLK_PMIC:
- dev_err(&pdev->dev, "Fail to register CLK_PMIC\n");
- goto err_clk_pmic;
- }
+ dev_err(&pdev->dev, "failed to register OF clock provider\n");
+ goto err_clocks;
}
}
- platform_set_drvdata(pdev, max77686_clks);
+ return 0;
- goto out;
+err_clocks:
+ for (--i; i >= 0; --i) {
+ clkdev_drop(max77686_clks[i]->lookup);
+ clk_unregister(max77686_clks[i]->hw.clk);
+ }
-err_clk_pmic:
- clkdev_drop(max77686_clks[MAX77686_CLK_CP]->lookup);
- kfree(max77686_clks[MAX77686_CLK_CP]->hw.clk);
-err_clk_cp:
- clkdev_drop(max77686_clks[MAX77686_CLK_AP]->lookup);
- kfree(max77686_clks[MAX77686_CLK_AP]->hw.clk);
-err_clk_ap:
-out:
return ret;
}
static int max77686_clk_remove(struct platform_device *pdev)
{
- struct max77686_clk **max77686_clks = platform_get_drvdata(pdev);
+ struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct clk **clocks = platform_get_drvdata(pdev);
int i;
+ if (iodev->dev->of_node)
+ of_clk_del_provider(iodev->dev->of_node);
+
for (i = 0; i < MAX77686_CLKS_NUM; i++) {
- clkdev_drop(max77686_clks[i]->lookup);
- kfree(max77686_clks[i]->hw.clk);
+ struct clk_hw *hw = __clk_get_hw(clocks[i]);
+ struct max77686_clk *max77686 = to_max77686_clk(hw);
+
+ clkdev_drop(max77686->lookup);
+ clk_unregister(clocks[i]);
}
return 0;
}
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 6a934a5296bd..05e04ce0f148 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -494,6 +494,9 @@ static const struct file_operations nomadik_src_clk_debugfs_ops = {
static int __init nomadik_src_clk_init_debugfs(void)
{
+ /* Vital for multiplatform */
+ if (!src_base)
+ return -ENODEV;
src_pcksr0_boot = readl(src_base + SRC_PCKSR0);
src_pcksr1_boot = readl(src_base + SRC_PCKSR1);
debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO,
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index c50e83744b0a..3b2a66f78755 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1111,11 +1111,11 @@ static const struct of_device_id si5351_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, si5351_dt_ids);
-static int si5351_dt_parse(struct i2c_client *client)
+static int si5351_dt_parse(struct i2c_client *client,
+ enum si5351_variant variant)
{
struct device_node *child, *np = client->dev.of_node;
struct si5351_platform_data *pdata;
- const struct of_device_id *match;
struct property *prop;
const __be32 *p;
int num = 0;
@@ -1124,15 +1124,10 @@ static int si5351_dt_parse(struct i2c_client *client)
if (np == NULL)
return 0;
- match = of_match_node(si5351_dt_ids, np);
- if (match == NULL)
- return -EINVAL;
-
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->variant = (enum si5351_variant)match->data;
pdata->clk_xtal = of_clk_get(np, 0);
if (!IS_ERR(pdata->clk_xtal))
clk_put(pdata->clk_xtal);
@@ -1163,7 +1158,7 @@ static int si5351_dt_parse(struct i2c_client *client)
pdata->pll_src[num] = SI5351_PLL_SRC_XTAL;
break;
case 1:
- if (pdata->variant != SI5351_VARIANT_C) {
+ if (variant != SI5351_VARIANT_C) {
dev_err(&client->dev,
"invalid parent %d for pll %d\n",
val, num);
@@ -1187,7 +1182,7 @@ static int si5351_dt_parse(struct i2c_client *client)
}
if (num >= 8 ||
- (pdata->variant == SI5351_VARIANT_A3 && num >= 3)) {
+ (variant == SI5351_VARIANT_A3 && num >= 3)) {
dev_err(&client->dev, "invalid clkout %d\n", num);
return -EINVAL;
}
@@ -1226,7 +1221,7 @@ static int si5351_dt_parse(struct i2c_client *client)
SI5351_CLKOUT_SRC_XTAL;
break;
case 3:
- if (pdata->variant != SI5351_VARIANT_C) {
+ if (variant != SI5351_VARIANT_C) {
dev_err(&client->dev,
"invalid parent %d for clkout %d\n",
val, num);
@@ -1298,7 +1293,7 @@ static int si5351_dt_parse(struct i2c_client *client)
return 0;
}
#else
-static int si5351_dt_parse(struct i2c_client *client)
+static int si5351_dt_parse(struct i2c_client *client, enum si5351_variant variant)
{
return 0;
}
@@ -1307,6 +1302,7 @@ static int si5351_dt_parse(struct i2c_client *client)
static int si5351_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ enum si5351_variant variant = (enum si5351_variant)id->driver_data;
struct si5351_platform_data *pdata;
struct si5351_driver_data *drvdata;
struct clk_init_data init;
@@ -1315,7 +1311,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
u8 num_parents, num_clocks;
int ret, n;
- ret = si5351_dt_parse(client);
+ ret = si5351_dt_parse(client, variant);
if (ret)
return ret;
@@ -1331,7 +1327,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, drvdata);
drvdata->client = client;
- drvdata->variant = pdata->variant;
+ drvdata->variant = variant;
drvdata->pxtal = pdata->clk_xtal;
drvdata->pclkin = pdata->clk_clkin;
@@ -1568,10 +1564,10 @@ static int si5351_i2c_probe(struct i2c_client *client,
}
static const struct i2c_device_id si5351_i2c_ids[] = {
- { "si5351a", 0 },
- { "si5351a-msop", 0 },
- { "si5351b", 0 },
- { "si5351c", 0 },
+ { "si5351a", SI5351_VARIANT_A },
+ { "si5351a-msop", SI5351_VARIANT_A3 },
+ { "si5351b", SI5351_VARIANT_B },
+ { "si5351c", SI5351_VARIANT_C },
{ }
};
MODULE_DEVICE_TABLE(i2c, si5351_i2c_ids);
diff --git a/drivers/clk/clk-si5351.h b/drivers/clk/clk-si5351.h
index c0dbf2676872..4d0746b50c32 100644
--- a/drivers/clk/clk-si5351.h
+++ b/drivers/clk/clk-si5351.h
@@ -153,4 +153,18 @@
#define SI5351_XTAL_ENABLE (1<<6)
#define SI5351_MULTISYNTH_ENABLE (1<<4)
+/**
+ * enum si5351_variant - SiLabs Si5351 chip variant
+ * @SI5351_VARIANT_A: Si5351A (8 output clocks, XTAL input)
+ * @SI5351_VARIANT_A3: Si5351A MSOP10 (3 output clocks, XTAL input)
+ * @SI5351_VARIANT_B: Si5351B (8 output clocks, XTAL/VXCO input)
+ * @SI5351_VARIANT_C: Si5351C (8 output clocks, XTAL/CLKIN input)
+ */
+enum si5351_variant {
+ SI5351_VARIANT_A = 1,
+ SI5351_VARIANT_A3 = 2,
+ SI5351_VARIANT_B = 3,
+ SI5351_VARIANT_C = 4,
+};
+
#endif
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
new file mode 100644
index 000000000000..4bbbe32585ec
--- /dev/null
+++ b/drivers/clk/clk-si570.c
@@ -0,0 +1,531 @@
+/*
+ * Driver for Silicon Labs Si570/Si571 Programmable XO/VCXO
+ *
+ * Copyright (C) 2010, 2011 Ericsson AB.
+ * Copyright (C) 2011 Guenter Roeck.
+ * Copyright (C) 2011 - 2013 Xilinx Inc.
+ *
+ * Author: Guenter Roeck <guenter.roeck@ericsson.com>
+ * Sören Brinkmann <soren.brinkmann@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Si570 registers */
+#define SI570_REG_HS_N1 7
+#define SI570_REG_N1_RFREQ0 8
+#define SI570_REG_RFREQ1 9
+#define SI570_REG_RFREQ2 10
+#define SI570_REG_RFREQ3 11
+#define SI570_REG_RFREQ4 12
+#define SI570_REG_CONTROL 135
+#define SI570_REG_FREEZE_DCO 137
+#define SI570_DIV_OFFSET_7PPM 6
+
+#define HS_DIV_SHIFT 5
+#define HS_DIV_MASK 0xe0
+#define HS_DIV_OFFSET 4
+#define N1_6_2_MASK 0x1f
+#define N1_1_0_MASK 0xc0
+#define RFREQ_37_32_MASK 0x3f
+
+#define SI570_MIN_FREQ 10000000L
+#define SI570_MAX_FREQ 1417500000L
+#define SI598_MAX_FREQ 525000000L
+
+#define FDCO_MIN 4850000000LL
+#define FDCO_MAX 5670000000LL
+
+#define SI570_CNTRL_RECALL (1 << 0)
+#define SI570_CNTRL_FREEZE_M (1 << 5)
+#define SI570_CNTRL_NEWFREQ (1 << 6)
+
+#define SI570_FREEZE_DCO (1 << 4)
+
+/**
+ * struct clk_si570:
+ * @hw: Clock hw struct
+ * @regmap: Device's regmap
+ * @div_offset: Rgister offset for dividers
+ * @max_freq: Maximum frequency for this device
+ * @fxtal: Factory xtal frequency
+ * @n1: Clock divider N1
+ * @hs_div: Clock divider HSDIV
+ * @rfreq: Clock multiplier RFREQ
+ * @frequency: Current output frequency
+ * @i2c_client: I2C client pointer
+ */
+struct clk_si570 {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int div_offset;
+ u64 max_freq;
+ u64 fxtal;
+ unsigned int n1;
+ unsigned int hs_div;
+ u64 rfreq;
+ u64 frequency;
+ struct i2c_client *i2c_client;
+};
+#define to_clk_si570(_hw) container_of(_hw, struct clk_si570, hw)
+
+enum clk_si570_variant {
+ si57x,
+ si59x
+};
+
+/**
+ * si570_get_divs() - Read clock dividers from HW
+ * @data: Pointer to struct clk_si570
+ * @rfreq: Fractional multiplier (output)
+ * @n1: Divider N1 (output)
+ * @hs_div: Divider HSDIV (output)
+ * Returns 0 on success, negative errno otherwise.
+ *
+ * Retrieve clock dividers and multipliers from the HW.
+ */
+static int si570_get_divs(struct clk_si570 *data, u64 *rfreq,
+ unsigned int *n1, unsigned int *hs_div)
+{
+ int err;
+ u8 reg[6];
+ u64 tmp;
+
+ err = regmap_bulk_read(data->regmap, SI570_REG_HS_N1 + data->div_offset,
+ reg, ARRAY_SIZE(reg));
+ if (err)
+ return err;
+
+ *hs_div = ((reg[0] & HS_DIV_MASK) >> HS_DIV_SHIFT) + HS_DIV_OFFSET;
+ *n1 = ((reg[0] & N1_6_2_MASK) << 2) + ((reg[1] & N1_1_0_MASK) >> 6) + 1;
+ /* Handle invalid cases */
+ if (*n1 > 1)
+ *n1 &= ~1;
+
+ tmp = reg[1] & RFREQ_37_32_MASK;
+ tmp = (tmp << 8) + reg[2];
+ tmp = (tmp << 8) + reg[3];
+ tmp = (tmp << 8) + reg[4];
+ tmp = (tmp << 8) + reg[5];
+ *rfreq = tmp;
+
+ return 0;
+}
+
+/**
+ * si570_get_defaults() - Get default values
+ * @data: Driver data structure
+ * @fout: Factory frequency output
+ * Returns 0 on success, negative errno otherwise.
+ */
+static int si570_get_defaults(struct clk_si570 *data, u64 fout)
+{
+ int err;
+ u64 fdco;
+
+ regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_RECALL);
+
+ err = si570_get_divs(data, &data->rfreq, &data->n1, &data->hs_div);
+ if (err)
+ return err;
+
+ /*
+ * Accept optional precision loss to avoid arithmetic overflows.
+ * Acceptable per Silicon Labs Application Note AN334.
+ */
+ fdco = fout * data->n1 * data->hs_div;
+ if (fdco >= (1LL << 36))
+ data->fxtal = div64_u64(fdco << 24, data->rfreq >> 4);
+ else
+ data->fxtal = div64_u64(fdco << 28, data->rfreq);
+
+ data->frequency = fout;
+
+ return 0;
+}
+
+/**
+ * si570_update_rfreq() - Update clock multiplier
+ * @data: Driver data structure
+ * Passes on regmap_bulk_write() return value.
+ */
+static int si570_update_rfreq(struct clk_si570 *data)
+{
+ u8 reg[5];
+
+ reg[0] = ((data->n1 - 1) << 6) |
+ ((data->rfreq >> 32) & RFREQ_37_32_MASK);
+ reg[1] = (data->rfreq >> 24) & 0xff;
+ reg[2] = (data->rfreq >> 16) & 0xff;
+ reg[3] = (data->rfreq >> 8) & 0xff;
+ reg[4] = data->rfreq & 0xff;
+
+ return regmap_bulk_write(data->regmap, SI570_REG_N1_RFREQ0 +
+ data->div_offset, reg, ARRAY_SIZE(reg));
+}
+
+/**
+ * si570_calc_divs() - Caluclate clock dividers
+ * @frequency: Target frequency
+ * @data: Driver data structure
+ * @out_rfreq: RFREG fractional multiplier (output)
+ * @out_n1: Clock divider N1 (output)
+ * @out_hs_div: Clock divider HSDIV (output)
+ * Returns 0 on success, negative errno otherwise.
+ *
+ * Calculate the clock dividers (@out_hs_div, @out_n1) and clock multiplier
+ * (@out_rfreq) for a given target @frequency.
+ */
+static int si570_calc_divs(unsigned long frequency, struct clk_si570 *data,
+ u64 *out_rfreq, unsigned int *out_n1, unsigned int *out_hs_div)
+{
+ int i;
+ unsigned int n1, hs_div;
+ u64 fdco, best_fdco = ULLONG_MAX;
+ static const uint8_t si570_hs_div_values[] = { 11, 9, 7, 6, 5, 4 };
+
+ for (i = 0; i < ARRAY_SIZE(si570_hs_div_values); i++) {
+ hs_div = si570_hs_div_values[i];
+ /* Calculate lowest possible value for n1 */
+ n1 = div_u64(div_u64(FDCO_MIN, hs_div), frequency);
+ if (!n1 || (n1 & 1))
+ n1++;
+ while (n1 <= 128) {
+ fdco = (u64)frequency * (u64)hs_div * (u64)n1;
+ if (fdco > FDCO_MAX)
+ break;
+ if (fdco >= FDCO_MIN && fdco < best_fdco) {
+ *out_n1 = n1;
+ *out_hs_div = hs_div;
+ *out_rfreq = div64_u64(fdco << 28, data->fxtal);
+ best_fdco = fdco;
+ }
+ n1 += (n1 == 1 ? 1 : 2);
+ }
+ }
+
+ if (best_fdco == ULLONG_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned long si570_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ int err;
+ u64 rfreq, rate;
+ unsigned int n1, hs_div;
+ struct clk_si570 *data = to_clk_si570(hw);
+
+ err = si570_get_divs(data, &rfreq, &n1, &hs_div);
+ if (err) {
+ dev_err(&data->i2c_client->dev, "unable to recalc rate\n");
+ return data->frequency;
+ }
+
+ rfreq = div_u64(rfreq, hs_div * n1);
+ rate = (data->fxtal * rfreq) >> 28;
+
+ return rate;
+}
+
+static long si570_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ int err;
+ u64 rfreq;
+ unsigned int n1, hs_div;
+ struct clk_si570 *data = to_clk_si570(hw);
+
+ if (!rate)
+ return 0;
+
+ if (div64_u64(abs(rate - data->frequency) * 10000LL,
+ data->frequency) < 35) {
+ rfreq = div64_u64((data->rfreq * rate) +
+ div64_u64(data->frequency, 2), data->frequency);
+ n1 = data->n1;
+ hs_div = data->hs_div;
+
+ } else {
+ err = si570_calc_divs(rate, data, &rfreq, &n1, &hs_div);
+ if (err) {
+ dev_err(&data->i2c_client->dev,
+ "unable to round rate\n");
+ return 0;
+ }
+ }
+
+ return rate;
+}
+
+/**
+ * si570_set_frequency() - Adjust output frequency
+ * @data: Driver data structure
+ * @frequency: Target frequency
+ * Returns 0 on success.
+ *
+ * Update output frequency for big frequency changes (> 3,500 ppm).
+ */
+static int si570_set_frequency(struct clk_si570 *data, unsigned long frequency)
+{
+ int err;
+
+ err = si570_calc_divs(frequency, data, &data->rfreq, &data->n1,
+ &data->hs_div);
+ if (err)
+ return err;
+
+ /*
+ * The DCO reg should be accessed with a read-modify-write operation
+ * per AN334
+ */
+ regmap_write(data->regmap, SI570_REG_FREEZE_DCO, SI570_FREEZE_DCO);
+ regmap_write(data->regmap, SI570_REG_HS_N1 + data->div_offset,
+ ((data->hs_div - HS_DIV_OFFSET) << HS_DIV_SHIFT) |
+ (((data->n1 - 1) >> 2) & N1_6_2_MASK));
+ si570_update_rfreq(data);
+ regmap_write(data->regmap, SI570_REG_FREEZE_DCO, 0);
+ regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_NEWFREQ);
+
+ /* Applying a new frequency can take up to 10ms */
+ usleep_range(10000, 12000);
+
+ return 0;
+}
+
+/**
+ * si570_set_frequency_small() - Adjust output frequency
+ * @data: Driver data structure
+ * @frequency: Target frequency
+ * Returns 0 on success.
+ *
+ * Update output frequency for small frequency changes (< 3,500 ppm).
+ */
+static int si570_set_frequency_small(struct clk_si570 *data,
+ unsigned long frequency)
+{
+ /*
+ * This is a re-implementation of DIV_ROUND_CLOSEST
+ * using the div64_u64 function lieu of letting the compiler
+ * insert EABI calls
+ */
+ data->rfreq = div64_u64((data->rfreq * frequency) +
+ div_u64(data->frequency, 2), data->frequency);
+ regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_FREEZE_M);
+ si570_update_rfreq(data);
+ regmap_write(data->regmap, SI570_REG_CONTROL, 0);
+
+ /* Applying a new frequency (small change) can take up to 100us */
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static int si570_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_si570 *data = to_clk_si570(hw);
+ struct i2c_client *client = data->i2c_client;
+ int err;
+
+ if (rate < SI570_MIN_FREQ || rate > data->max_freq) {
+ dev_err(&client->dev,
+ "requested frequency %lu Hz is out of range\n", rate);
+ return -EINVAL;
+ }
+
+ if (div64_u64(abs(rate - data->frequency) * 10000LL,
+ data->frequency) < 35)
+ err = si570_set_frequency_small(data, rate);
+ else
+ err = si570_set_frequency(data, rate);
+
+ if (err)
+ return err;
+
+ data->frequency = rate;
+
+ return 0;
+}
+
+static const struct clk_ops si570_clk_ops = {
+ .recalc_rate = si570_recalc_rate,
+ .round_rate = si570_round_rate,
+ .set_rate = si570_set_rate,
+};
+
+static bool si570_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SI570_REG_CONTROL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool si570_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SI570_REG_HS_N1 ... (SI570_REG_RFREQ4 + SI570_DIV_OFFSET_7PPM):
+ case SI570_REG_CONTROL:
+ case SI570_REG_FREEZE_DCO:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct regmap_config si570_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 137,
+ .writeable_reg = si570_regmap_is_writeable,
+ .volatile_reg = si570_regmap_is_volatile,
+};
+
+static int si570_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct clk_si570 *data;
+ struct clk_init_data init;
+ struct clk *clk;
+ u32 initial_fout, factory_fout, stability;
+ int err;
+ enum clk_si570_variant variant = id->driver_data;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ init.ops = &si570_clk_ops;
+ init.flags = CLK_IS_ROOT;
+ init.num_parents = 0;
+ data->hw.init = &init;
+ data->i2c_client = client;
+
+ if (variant == si57x) {
+ err = of_property_read_u32(client->dev.of_node,
+ "temperature-stability", &stability);
+ if (err) {
+ dev_err(&client->dev,
+ "'temperature-stability' property missing\n");
+ return err;
+ }
+ /* adjust register offsets for 7ppm devices */
+ if (stability == 7)
+ data->div_offset = SI570_DIV_OFFSET_7PPM;
+
+ data->max_freq = SI570_MAX_FREQ;
+ } else {
+ data->max_freq = SI598_MAX_FREQ;
+ }
+
+ if (of_property_read_string(client->dev.of_node, "clock-output-names",
+ &init.name))
+ init.name = client->dev.of_node->name;
+
+ err = of_property_read_u32(client->dev.of_node, "factory-fout",
+ &factory_fout);
+ if (err) {
+ dev_err(&client->dev, "'factory-fout' property missing\n");
+ return err;
+ }
+
+ data->regmap = devm_regmap_init_i2c(client, &si570_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ i2c_set_clientdata(client, data);
+ err = si570_get_defaults(data, factory_fout);
+ if (err)
+ return err;
+
+ clk = devm_clk_register(&client->dev, &data->hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "clock registration failed\n");
+ return PTR_ERR(clk);
+ }
+ err = of_clk_add_provider(client->dev.of_node, of_clk_src_simple_get,
+ clk);
+ if (err) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ return err;
+ }
+
+ /* Read the requested initial output frequency from device tree */
+ if (!of_property_read_u32(client->dev.of_node, "clock-frequency",
+ &initial_fout)) {
+ err = clk_set_rate(clk, initial_fout);
+ if (err) {
+ of_clk_del_provider(client->dev.of_node);
+ return err;
+ }
+ }
+
+ /* Display a message indicating that we've successfully registered */
+ dev_info(&client->dev, "registered, current frequency %llu Hz\n",
+ data->frequency);
+
+ return 0;
+}
+
+static int si570_remove(struct i2c_client *client)
+{
+ of_clk_del_provider(client->dev.of_node);
+ return 0;
+}
+
+static const struct i2c_device_id si570_id[] = {
+ { "si570", si57x },
+ { "si571", si57x },
+ { "si598", si59x },
+ { "si599", si59x },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si570_id);
+
+static const struct of_device_id clk_si570_of_match[] = {
+ { .compatible = "silabs,si570" },
+ { .compatible = "silabs,si571" },
+ { .compatible = "silabs,si598" },
+ { .compatible = "silabs,si599" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, clk_si570_of_match);
+
+static struct i2c_driver si570_driver = {
+ .driver = {
+ .name = "si570",
+ .of_match_table = clk_si570_of_match,
+ },
+ .probe = si570_probe,
+ .remove = si570_remove,
+ .id_table = si570_id,
+};
+module_i2c_driver(si570_driver);
+
+MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
+MODULE_DESCRIPTION("Si570 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 7fd5c5e9e25d..37e928846ec5 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -641,7 +641,7 @@ static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
return pll_freq;
}
-const struct clk_ops vtwm_pll_ops = {
+static const struct clk_ops vtwm_pll_ops = {
.round_rate = vtwm_pll_round_rate,
.set_rate = vtwm_pll_set_rate,
.recalc_rate = vtwm_pll_recalc_rate,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 2cf2ea6b77a1..c42e608af6bb 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -21,6 +21,8 @@
#include <linux/init.h>
#include <linux/sched.h>
+#include "clk.h"
+
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
@@ -92,7 +94,7 @@ static void clk_enable_unlock(unsigned long flags)
/*** debugfs support ***/
-#ifdef CONFIG_COMMON_CLK_DEBUG
+#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
static struct dentry *rootdir;
@@ -104,10 +106,11 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
if (!c)
return;
- seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
+ seq_printf(s, "%*s%-*s %-11d %-12d %-10lu %-11lu",
level * 3 + 1, "",
30 - level * 3, c->name,
- c->enable_count, c->prepare_count, clk_get_rate(c));
+ c->enable_count, c->prepare_count, clk_get_rate(c),
+ clk_get_accuracy(c));
seq_printf(s, "\n");
}
@@ -129,8 +132,8 @@ static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk *c;
- seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
- seq_printf(s, "---------------------------------------------------------------------\n");
+ seq_printf(s, " clock enable_cnt prepare_cnt rate accuracy\n");
+ seq_printf(s, "---------------------------------------------------------------------------------\n");
clk_prepare_lock();
@@ -167,6 +170,7 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
seq_printf(s, "\"enable_count\": %d,", c->enable_count);
seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
+ seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
}
static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
@@ -248,6 +252,11 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
if (!d)
goto err_out;
+ d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry,
+ (u32 *)&clk->accuracy);
+ if (!d)
+ goto err_out;
+
d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
(u32 *)&clk->flags);
if (!d)
@@ -272,7 +281,8 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
goto out;
err_out:
- debugfs_remove(clk->dentry);
+ debugfs_remove_recursive(clk->dentry);
+ clk->dentry = NULL;
out:
return ret;
}
@@ -342,6 +352,21 @@ out:
return ret;
}
+ /**
+ * clk_debug_unregister - remove a clk node from the debugfs clk tree
+ * @clk: the clk being removed from the debugfs clk tree
+ *
+ * Dynamically removes a clk and all it's children clk nodes from the
+ * debugfs clk tree if clk->dentry points to debugfs created by
+ * clk_debug_register in __clk_init.
+ *
+ * Caller must hold prepare_lock.
+ */
+static void clk_debug_unregister(struct clk *clk)
+{
+ debugfs_remove_recursive(clk->dentry);
+}
+
/**
* clk_debug_reparent - reparent clk node in the debugfs clk tree
* @clk: the clk being reparented
@@ -432,6 +457,9 @@ static inline int clk_debug_register(struct clk *clk) { return 0; }
static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
{
}
+static inline void clk_debug_unregister(struct clk *clk)
+{
+}
#endif
/* caller must hold prepare_lock */
@@ -547,16 +575,19 @@ struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->hw;
}
+EXPORT_SYMBOL_GPL(__clk_get_hw);
u8 __clk_get_num_parents(struct clk *clk)
{
return !clk ? 0 : clk->num_parents;
}
+EXPORT_SYMBOL_GPL(__clk_get_num_parents);
struct clk *__clk_get_parent(struct clk *clk)
{
return !clk ? NULL : clk->parent;
}
+EXPORT_SYMBOL_GPL(__clk_get_parent);
struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
{
@@ -570,6 +601,7 @@ struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
else
return clk->parents[index];
}
+EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
unsigned int __clk_get_enable_count(struct clk *clk)
{
@@ -601,6 +633,15 @@ unsigned long __clk_get_rate(struct clk *clk)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(__clk_get_rate);
+
+unsigned long __clk_get_accuracy(struct clk *clk)
+{
+ if (!clk)
+ return 0;
+
+ return clk->accuracy;
+}
unsigned long __clk_get_flags(struct clk *clk)
{
@@ -649,6 +690,7 @@ bool __clk_is_enabled(struct clk *clk)
out:
return !!ret;
}
+EXPORT_SYMBOL_GPL(__clk_is_enabled);
static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
{
@@ -740,6 +782,7 @@ out:
return best;
}
+EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
/*** clk api ***/
@@ -1016,6 +1059,59 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
}
/**
+ * __clk_recalc_accuracies
+ * @clk: first clk in the subtree
+ *
+ * Walks the subtree of clks starting with clk and recalculates accuracies as
+ * it goes. Note that if a clk does not implement the .recalc_accuracy
+ * callback then it is assumed that the clock will take on the accuracy of it's
+ * parent.
+ *
+ * Caller must hold prepare_lock.
+ */
+static void __clk_recalc_accuracies(struct clk *clk)
+{
+ unsigned long parent_accuracy = 0;
+ struct clk *child;
+
+ if (clk->parent)
+ parent_accuracy = clk->parent->accuracy;
+
+ if (clk->ops->recalc_accuracy)
+ clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
+ parent_accuracy);
+ else
+ clk->accuracy = parent_accuracy;
+
+ hlist_for_each_entry(child, &clk->children, child_node)
+ __clk_recalc_accuracies(child);
+}
+
+/**
+ * clk_get_accuracy - return the accuracy of clk
+ * @clk: the clk whose accuracy is being returned
+ *
+ * Simply returns the cached accuracy of the clk, unless
+ * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
+ * issued.
+ * If clk is NULL then returns 0.
+ */
+long clk_get_accuracy(struct clk *clk)
+{
+ unsigned long accuracy;
+
+ clk_prepare_lock();
+ if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
+ __clk_recalc_accuracies(clk);
+
+ accuracy = __clk_get_accuracy(clk);
+ clk_prepare_unlock();
+
+ return accuracy;
+}
+EXPORT_SYMBOL_GPL(clk_get_accuracy);
+
+/**
* __clk_recalc_rates
* @clk: first clk in the subtree
* @msg: notification type (see include/linux/clk.h)
@@ -1129,10 +1225,9 @@ static void clk_reparent(struct clk *clk, struct clk *new_parent)
clk->parent = new_parent;
}
-static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
{
unsigned long flags;
- int ret = 0;
struct clk *old_parent = clk->parent;
/*
@@ -1163,6 +1258,34 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
clk_reparent(clk, parent);
clk_enable_unlock(flags);
+ return old_parent;
+}
+
+static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
+ struct clk *old_parent)
+{
+ /*
+ * Finish the migration of prepare state and undo the changes done
+ * for preventing a race with clk_enable().
+ */
+ if (clk->prepare_count) {
+ clk_disable(clk);
+ clk_disable(old_parent);
+ __clk_unprepare(old_parent);
+ }
+
+ /* update debugfs with new clk tree topology */
+ clk_debug_reparent(clk, parent);
+}
+
+static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct clk *old_parent;
+
+ old_parent = __clk_set_parent_before(clk, parent);
+
/* change clock input source */
if (parent && clk->ops->set_parent)
ret = clk->ops->set_parent(clk->hw, p_index);
@@ -1180,18 +1303,8 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
return ret;
}
- /*
- * Finish the migration of prepare state and undo the changes done
- * for preventing a race with clk_enable().
- */
- if (clk->prepare_count) {
- clk_disable(clk);
- clk_disable(old_parent);
- __clk_unprepare(old_parent);
- }
+ __clk_set_parent_after(clk, parent, old_parent);
- /* update debugfs with new clk tree topology */
- clk_debug_reparent(clk, parent);
return 0;
}
@@ -1376,17 +1489,32 @@ static void clk_change_rate(struct clk *clk)
struct clk *child;
unsigned long old_rate;
unsigned long best_parent_rate = 0;
+ bool skip_set_rate = false;
+ struct clk *old_parent;
old_rate = clk->rate;
- /* set parent */
- if (clk->new_parent && clk->new_parent != clk->parent)
- __clk_set_parent(clk, clk->new_parent, clk->new_parent_index);
-
- if (clk->parent)
+ if (clk->new_parent)
+ best_parent_rate = clk->new_parent->rate;
+ else if (clk->parent)
best_parent_rate = clk->parent->rate;
- if (clk->ops->set_rate)
+ if (clk->new_parent && clk->new_parent != clk->parent) {
+ old_parent = __clk_set_parent_before(clk, clk->new_parent);
+
+ if (clk->ops->set_rate_and_parent) {
+ skip_set_rate = true;
+ clk->ops->set_rate_and_parent(clk->hw, clk->new_rate,
+ best_parent_rate,
+ clk->new_parent_index);
+ } else if (clk->ops->set_parent) {
+ clk->ops->set_parent(clk->hw, clk->new_parent_index);
+ }
+
+ __clk_set_parent_after(clk, clk->new_parent, old_parent);
+ }
+
+ if (!skip_set_rate && clk->ops->set_rate)
clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
if (clk->ops->recalc_rate)
@@ -1551,6 +1679,7 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
{
clk_reparent(clk, new_parent);
clk_debug_reparent(clk, new_parent);
+ __clk_recalc_accuracies(clk);
__clk_recalc_rates(clk, POST_RATE_CHANGE);
}
@@ -1621,11 +1750,13 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
/* do the re-parent */
ret = __clk_set_parent(clk, parent, p_index);
- /* propagate rate recalculation accordingly */
- if (ret)
+ /* propagate rate an accuracy recalculation accordingly */
+ if (ret) {
__clk_recalc_rates(clk, ABORT_RATE_CHANGE);
- else
+ } else {
__clk_recalc_rates(clk, POST_RATE_CHANGE);
+ __clk_recalc_accuracies(clk);
+ }
out:
clk_prepare_unlock();
@@ -1678,6 +1809,14 @@ int __clk_init(struct device *dev, struct clk *clk)
goto out;
}
+ if (clk->ops->set_rate_and_parent &&
+ !(clk->ops->set_parent && clk->ops->set_rate)) {
+ pr_warn("%s: %s must implement .set_parent & .set_rate\n",
+ __func__, clk->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
/* throw a WARN if any entries in parent_names are NULL */
for (i = 0; i < clk->num_parents; i++)
WARN(!clk->parent_names[i],
@@ -1730,6 +1869,21 @@ int __clk_init(struct device *dev, struct clk *clk)
hlist_add_head(&clk->child_node, &clk_orphan_list);
/*
+ * Set clk's accuracy. The preferred method is to use
+ * .recalc_accuracy. For simple clocks and lazy developers the default
+ * fallback is to use the parent's accuracy. If a clock doesn't have a
+ * parent (or is orphaned) then accuracy is set to zero (perfect
+ * clock).
+ */
+ if (clk->ops->recalc_accuracy)
+ clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
+ __clk_get_accuracy(clk->parent));
+ else if (clk->parent)
+ clk->accuracy = clk->parent->accuracy;
+ else
+ clk->accuracy = 0;
+
+ /*
* Set clk's rate. The preferred method is to use .recalc_rate. For
* simple clocks and lazy developers the default fallback is to use the
* parent's rate. If a clock doesn't have a parent (or is orphaned)
@@ -1743,6 +1897,7 @@ int __clk_init(struct device *dev, struct clk *clk)
else
clk->rate = 0;
+ clk_debug_register(clk);
/*
* walk the list of orphan clocks and reparent any that are children of
* this clock
@@ -1773,8 +1928,7 @@ int __clk_init(struct device *dev, struct clk *clk)
if (clk->ops->init)
clk->ops->init(clk->hw);
- clk_debug_register(clk);
-
+ kref_init(&clk->ref);
out:
clk_prepare_unlock();
@@ -1810,6 +1964,10 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
clk->flags = hw->init->flags;
clk->parent_names = hw->init->parent_names;
clk->num_parents = hw->init->num_parents;
+ if (dev && dev->driver)
+ clk->owner = dev->driver->owner;
+ else
+ clk->owner = NULL;
ret = __clk_init(dev, clk);
if (ret)
@@ -1830,6 +1988,8 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
goto fail_name;
}
clk->ops = hw->init->ops;
+ if (dev && dev->driver)
+ clk->owner = dev->driver->owner;
clk->hw = hw;
clk->flags = hw->init->flags;
clk->num_parents = hw->init->num_parents;
@@ -1904,13 +2064,104 @@ fail_out:
}
EXPORT_SYMBOL_GPL(clk_register);
+/*
+ * Free memory allocated for a clock.
+ * Caller must hold prepare_lock.
+ */
+static void __clk_release(struct kref *ref)
+{
+ struct clk *clk = container_of(ref, struct clk, ref);
+ int i = clk->num_parents;
+
+ kfree(clk->parents);
+ while (--i >= 0)
+ kfree(clk->parent_names[i]);
+
+ kfree(clk->parent_names);
+ kfree(clk->name);
+ kfree(clk);
+}
+
+/*
+ * Empty clk_ops for unregistered clocks. These are used temporarily
+ * after clk_unregister() was called on a clock and until last clock
+ * consumer calls clk_put() and the struct clk object is freed.
+ */
+static int clk_nodrv_prepare_enable(struct clk_hw *hw)
+{
+ return -ENXIO;
+}
+
+static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return -ENXIO;
+}
+
+static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
+{
+ return -ENXIO;
+}
+
+static const struct clk_ops clk_nodrv_ops = {
+ .enable = clk_nodrv_prepare_enable,
+ .disable = clk_nodrv_disable_unprepare,
+ .prepare = clk_nodrv_prepare_enable,
+ .unprepare = clk_nodrv_disable_unprepare,
+ .set_rate = clk_nodrv_set_rate,
+ .set_parent = clk_nodrv_set_parent,
+};
+
/**
* clk_unregister - unregister a currently registered clock
* @clk: clock to unregister
- *
- * Currently unimplemented.
*/
-void clk_unregister(struct clk *clk) {}
+void clk_unregister(struct clk *clk)
+{
+ unsigned long flags;
+
+ if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
+ return;
+
+ clk_prepare_lock();
+
+ if (clk->ops == &clk_nodrv_ops) {
+ pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
+ goto out;
+ }
+ /*
+ * Assign empty clock ops for consumers that might still hold
+ * a reference to this clock.
+ */
+ flags = clk_enable_lock();
+ clk->ops = &clk_nodrv_ops;
+ clk_enable_unlock(flags);
+
+ if (!hlist_empty(&clk->children)) {
+ struct clk *child;
+
+ /* Reparent all children to the orphan list. */
+ hlist_for_each_entry(child, &clk->children, child_node)
+ clk_set_parent(child, NULL);
+ }
+
+ clk_debug_unregister(clk);
+
+ hlist_del_init(&clk->child_node);
+
+ if (clk->prepare_count)
+ pr_warn("%s: unregistering prepared clock: %s\n",
+ __func__, clk->name);
+
+ kref_put(&clk->ref, __clk_release);
+out:
+ clk_prepare_unlock();
+}
EXPORT_SYMBOL_GPL(clk_unregister);
static void devm_clk_release(struct device *dev, void *res)
@@ -1970,6 +2221,32 @@ void devm_clk_unregister(struct device *dev, struct clk *clk)
}
EXPORT_SYMBOL_GPL(devm_clk_unregister);
+/*
+ * clkdev helpers
+ */
+int __clk_get(struct clk *clk)
+{
+ if (clk) {
+ if (!try_module_get(clk->owner))
+ return 0;
+
+ kref_get(&clk->ref);
+ }
+ return 1;
+}
+
+void __clk_put(struct clk *clk)
+{
+ if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
+ return;
+
+ clk_prepare_lock();
+ kref_put(&clk->ref, __clk_release);
+ clk_prepare_unlock();
+
+ module_put(clk->owner);
+}
+
/*** clk rate change notifiers ***/
/**
@@ -2104,13 +2381,22 @@ struct of_clk_provider {
void *data;
};
-extern struct of_device_id __clk_of_table[];
-
static const struct of_device_id __clk_of_table_sentinel
__used __section(__clk_of_table_end);
static LIST_HEAD(of_clk_providers);
-static DEFINE_MUTEX(of_clk_lock);
+static DEFINE_MUTEX(of_clk_mutex);
+
+/* of_clk_provider list locking helpers */
+void of_clk_lock(void)
+{
+ mutex_lock(&of_clk_mutex);
+}
+
+void of_clk_unlock(void)
+{
+ mutex_unlock(&of_clk_mutex);
+}
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data)
@@ -2154,9 +2440,9 @@ int of_clk_add_provider(struct device_node *np,
cp->data = data;
cp->get = clk_src_get;
- mutex_lock(&of_clk_lock);
+ mutex_lock(&of_clk_mutex);
list_add(&cp->link, &of_clk_providers);
- mutex_unlock(&of_clk_lock);
+ mutex_unlock(&of_clk_mutex);
pr_debug("Added clock from %s\n", np->full_name);
return 0;
@@ -2171,7 +2457,7 @@ void of_clk_del_provider(struct device_node *np)
{
struct of_clk_provider *cp;
- mutex_lock(&of_clk_lock);
+ mutex_lock(&of_clk_mutex);
list_for_each_entry(cp, &of_clk_providers, link) {
if (cp->node == np) {
list_del(&cp->link);
@@ -2180,24 +2466,33 @@ void of_clk_del_provider(struct device_node *np)
break;
}
}
- mutex_unlock(&of_clk_lock);
+ mutex_unlock(&of_clk_mutex);
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);
-struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
struct of_clk_provider *provider;
struct clk *clk = ERR_PTR(-ENOENT);
/* Check if we have such a provider in our array */
- mutex_lock(&of_clk_lock);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np)
clk = provider->get(clkspec, provider->data);
if (!IS_ERR(clk))
break;
}
- mutex_unlock(&of_clk_lock);
+
+ return clk;
+}
+
+struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
+{
+ struct clk *clk;
+
+ mutex_lock(&of_clk_mutex);
+ clk = __of_clk_get_from_provider(clkspec);
+ mutex_unlock(&of_clk_mutex);
return clk;
}
@@ -2245,7 +2540,7 @@ void __init of_clk_init(const struct of_device_id *matches)
struct device_node *np;
if (!matches)
- matches = __clk_of_table;
+ matches = &__clk_of_table;
for_each_matching_node_and_match(np, matches, &match) {
of_clk_init_cb_t clk_init_cb = match->data;
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
new file mode 100644
index 000000000000..795cc9f0dac0
--- /dev/null
+++ b/drivers/clk/clk.h
@@ -0,0 +1,16 @@
+/*
+ * linux/drivers/clk/clk.h
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec);
+void of_clk_lock(void);
+void of_clk_unlock(void);
+#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 442a31363873..48f67218247c 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -21,6 +21,8 @@
#include <linux/clkdev.h>
#include <linux/of.h>
+#include "clk.h"
+
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
@@ -39,7 +41,13 @@ struct clk *of_clk_get(struct device_node *np, int index)
if (rc)
return ERR_PTR(rc);
- clk = of_clk_get_from_provider(&clkspec);
+ of_clk_lock();
+ clk = __of_clk_get_from_provider(&clkspec);
+
+ if (!IS_ERR(clk) && !__clk_get(clk))
+ clk = ERR_PTR(-ENOENT);
+
+ of_clk_unlock();
of_node_put(clkspec.np);
return clk;
}
@@ -157,7 +165,7 @@ struct clk *clk_get(struct device *dev, const char *con_id)
if (dev) {
clk = of_clk_get_by_name(dev->of_node, con_id);
- if (!IS_ERR(clk) && __clk_get(clk))
+ if (!IS_ERR(clk))
return clk;
}
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
new file mode 100644
index 000000000000..a049108341fc
--- /dev/null
+++ b/drivers/clk/hisilicon/Makefile
@@ -0,0 +1,5 @@
+#
+# Hisilicon Clock specific Makefile
+#
+
+obj-y += clk.o clkgate-separated.o clk-hi3620.o
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
new file mode 100644
index 000000000000..f24ad6a3a797
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -0,0 +1,242 @@
+/*
+ * Hisilicon Hi3620 clock driver
+ *
+ * Copyright (c) 2012-2013 Hisilicon Limited.
+ * Copyright (c) 2012-2013 Linaro Limited.
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
+ * Xin Li <li.xin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include <dt-bindings/clock/hi3620-clock.h>
+
+#include "clk.h"
+
+/* clock parent list */
+static const char *timer0_mux_p[] __initdata = { "osc32k", "timerclk01", };
+static const char *timer1_mux_p[] __initdata = { "osc32k", "timerclk01", };
+static const char *timer2_mux_p[] __initdata = { "osc32k", "timerclk23", };
+static const char *timer3_mux_p[] __initdata = { "osc32k", "timerclk23", };
+static const char *timer4_mux_p[] __initdata = { "osc32k", "timerclk45", };
+static const char *timer5_mux_p[] __initdata = { "osc32k", "timerclk45", };
+static const char *timer6_mux_p[] __initdata = { "osc32k", "timerclk67", };
+static const char *timer7_mux_p[] __initdata = { "osc32k", "timerclk67", };
+static const char *timer8_mux_p[] __initdata = { "osc32k", "timerclk89", };
+static const char *timer9_mux_p[] __initdata = { "osc32k", "timerclk89", };
+static const char *uart0_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart1_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart2_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart3_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *uart4_mux_p[] __initdata = { "osc26m", "pclk", };
+static const char *spi0_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
+static const char *spi1_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
+static const char *spi2_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
+/* share axi parent */
+static const char *saxi_mux_p[] __initdata = { "armpll3", "armpll2", };
+static const char *pwm0_mux_p[] __initdata = { "osc32k", "osc26m", };
+static const char *pwm1_mux_p[] __initdata = { "osc32k", "osc26m", };
+static const char *sd_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *mmc1_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *mmc1_mux2_p[] __initdata = { "osc26m", "mmc1_div", };
+static const char *g2d_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *venc_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *vdec_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *vpp_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *edc0_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *ldi0_mux_p[] __initdata = { "armpll2", "armpll4",
+ "armpll3", "armpll5", };
+static const char *edc1_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *ldi1_mux_p[] __initdata = { "armpll2", "armpll4",
+ "armpll3", "armpll5", };
+static const char *rclk_hsic_p[] __initdata = { "armpll3", "armpll2", };
+static const char *mmc2_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *mmc3_mux_p[] __initdata = { "armpll2", "armpll3", };
+
+
+/* fixed rate clocks */
+static struct hisi_fixed_rate_clock hi3620_fixed_rate_clks[] __initdata = {
+ { HI3620_OSC32K, "osc32k", NULL, CLK_IS_ROOT, 32768, },
+ { HI3620_OSC26M, "osc26m", NULL, CLK_IS_ROOT, 26000000, },
+ { HI3620_PCLK, "pclk", NULL, CLK_IS_ROOT, 26000000, },
+ { HI3620_PLL_ARM0, "armpll0", NULL, CLK_IS_ROOT, 1600000000, },
+ { HI3620_PLL_ARM1, "armpll1", NULL, CLK_IS_ROOT, 1600000000, },
+ { HI3620_PLL_PERI, "armpll2", NULL, CLK_IS_ROOT, 1440000000, },
+ { HI3620_PLL_USB, "armpll3", NULL, CLK_IS_ROOT, 1440000000, },
+ { HI3620_PLL_HDMI, "armpll4", NULL, CLK_IS_ROOT, 1188000000, },
+ { HI3620_PLL_GPU, "armpll5", NULL, CLK_IS_ROOT, 1300000000, },
+};
+
+/* fixed factor clocks */
+static struct hisi_fixed_factor_clock hi3620_fixed_factor_clks[] __initdata = {
+ { HI3620_RCLK_TCXO, "rclk_tcxo", "osc26m", 1, 4, 0, },
+ { HI3620_RCLK_CFGAXI, "rclk_cfgaxi", "armpll2", 1, 30, 0, },
+ { HI3620_RCLK_PICO, "rclk_pico", "hsic_div", 1, 40, 0, },
+};
+
+static struct hisi_mux_clock hi3620_mux_clks[] __initdata = {
+ { HI3620_TIMER0_MUX, "timer0_mux", timer0_mux_p, ARRAY_SIZE(timer0_mux_p), CLK_SET_RATE_PARENT, 0, 15, 2, 0, },
+ { HI3620_TIMER1_MUX, "timer1_mux", timer1_mux_p, ARRAY_SIZE(timer1_mux_p), CLK_SET_RATE_PARENT, 0, 17, 2, 0, },
+ { HI3620_TIMER2_MUX, "timer2_mux", timer2_mux_p, ARRAY_SIZE(timer2_mux_p), CLK_SET_RATE_PARENT, 0, 19, 2, 0, },
+ { HI3620_TIMER3_MUX, "timer3_mux", timer3_mux_p, ARRAY_SIZE(timer3_mux_p), CLK_SET_RATE_PARENT, 0, 21, 2, 0, },
+ { HI3620_TIMER4_MUX, "timer4_mux", timer4_mux_p, ARRAY_SIZE(timer4_mux_p), CLK_SET_RATE_PARENT, 0x18, 0, 2, 0, },
+ { HI3620_TIMER5_MUX, "timer5_mux", timer5_mux_p, ARRAY_SIZE(timer5_mux_p), CLK_SET_RATE_PARENT, 0x18, 2, 2, 0, },
+ { HI3620_TIMER6_MUX, "timer6_mux", timer6_mux_p, ARRAY_SIZE(timer6_mux_p), CLK_SET_RATE_PARENT, 0x18, 4, 2, 0, },
+ { HI3620_TIMER7_MUX, "timer7_mux", timer7_mux_p, ARRAY_SIZE(timer7_mux_p), CLK_SET_RATE_PARENT, 0x18, 6, 2, 0, },
+ { HI3620_TIMER8_MUX, "timer8_mux", timer8_mux_p, ARRAY_SIZE(timer8_mux_p), CLK_SET_RATE_PARENT, 0x18, 8, 2, 0, },
+ { HI3620_TIMER9_MUX, "timer9_mux", timer9_mux_p, ARRAY_SIZE(timer9_mux_p), CLK_SET_RATE_PARENT, 0x18, 10, 2, 0, },
+ { HI3620_UART0_MUX, "uart0_mux", uart0_mux_p, ARRAY_SIZE(uart0_mux_p), CLK_SET_RATE_PARENT, 0x100, 7, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_UART1_MUX, "uart1_mux", uart1_mux_p, ARRAY_SIZE(uart1_mux_p), CLK_SET_RATE_PARENT, 0x100, 8, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_UART2_MUX, "uart2_mux", uart2_mux_p, ARRAY_SIZE(uart2_mux_p), CLK_SET_RATE_PARENT, 0x100, 9, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_UART3_MUX, "uart3_mux", uart3_mux_p, ARRAY_SIZE(uart3_mux_p), CLK_SET_RATE_PARENT, 0x100, 10, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_UART4_MUX, "uart4_mux", uart4_mux_p, ARRAY_SIZE(uart4_mux_p), CLK_SET_RATE_PARENT, 0x100, 11, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_SPI0_MUX, "spi0_mux", spi0_mux_p, ARRAY_SIZE(spi0_mux_p), CLK_SET_RATE_PARENT, 0x100, 12, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_SPI1_MUX, "spi1_mux", spi1_mux_p, ARRAY_SIZE(spi1_mux_p), CLK_SET_RATE_PARENT, 0x100, 13, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_SPI2_MUX, "spi2_mux", spi2_mux_p, ARRAY_SIZE(spi2_mux_p), CLK_SET_RATE_PARENT, 0x100, 14, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_SAXI_MUX, "saxi_mux", saxi_mux_p, ARRAY_SIZE(saxi_mux_p), CLK_SET_RATE_PARENT, 0x100, 15, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_PWM0_MUX, "pwm0_mux", pwm0_mux_p, ARRAY_SIZE(pwm0_mux_p), CLK_SET_RATE_PARENT, 0x104, 10, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_PWM1_MUX, "pwm1_mux", pwm1_mux_p, ARRAY_SIZE(pwm1_mux_p), CLK_SET_RATE_PARENT, 0x104, 11, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_SD_MUX, "sd_mux", sd_mux_p, ARRAY_SIZE(sd_mux_p), CLK_SET_RATE_PARENT, 0x108, 4, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_MMC1_MUX, "mmc1_mux", mmc1_mux_p, ARRAY_SIZE(mmc1_mux_p), CLK_SET_RATE_PARENT, 0x108, 9, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_MMC1_MUX2, "mmc1_mux2", mmc1_mux2_p, ARRAY_SIZE(mmc1_mux2_p), CLK_SET_RATE_PARENT, 0x108, 10, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_G2D_MUX, "g2d_mux", g2d_mux_p, ARRAY_SIZE(g2d_mux_p), CLK_SET_RATE_PARENT, 0x10c, 5, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_VENC_MUX, "venc_mux", venc_mux_p, ARRAY_SIZE(venc_mux_p), CLK_SET_RATE_PARENT, 0x10c, 11, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_VDEC_MUX, "vdec_mux", vdec_mux_p, ARRAY_SIZE(vdec_mux_p), CLK_SET_RATE_PARENT, 0x110, 5, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_VPP_MUX, "vpp_mux", vpp_mux_p, ARRAY_SIZE(vpp_mux_p), CLK_SET_RATE_PARENT, 0x110, 11, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_EDC0_MUX, "edc0_mux", edc0_mux_p, ARRAY_SIZE(edc0_mux_p), CLK_SET_RATE_PARENT, 0x114, 6, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_LDI0_MUX, "ldi0_mux", ldi0_mux_p, ARRAY_SIZE(ldi0_mux_p), CLK_SET_RATE_PARENT, 0x114, 13, 2, CLK_MUX_HIWORD_MASK, },
+ { HI3620_EDC1_MUX, "edc1_mux", edc1_mux_p, ARRAY_SIZE(edc1_mux_p), CLK_SET_RATE_PARENT, 0x118, 6, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_LDI1_MUX, "ldi1_mux", ldi1_mux_p, ARRAY_SIZE(ldi1_mux_p), CLK_SET_RATE_PARENT, 0x118, 14, 2, CLK_MUX_HIWORD_MASK, },
+ { HI3620_RCLK_HSIC, "rclk_hsic", rclk_hsic_p, ARRAY_SIZE(rclk_hsic_p), CLK_SET_RATE_PARENT, 0x130, 2, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_MMC2_MUX, "mmc2_mux", mmc2_mux_p, ARRAY_SIZE(mmc2_mux_p), CLK_SET_RATE_PARENT, 0x140, 4, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3620_MMC3_MUX, "mmc3_mux", mmc3_mux_p, ARRAY_SIZE(mmc3_mux_p), CLK_SET_RATE_PARENT, 0x140, 9, 1, CLK_MUX_HIWORD_MASK, },
+};
+
+static struct hisi_divider_clock hi3620_div_clks[] __initdata = {
+ { HI3620_SHAREAXI_DIV, "saxi_div", "saxi_mux", 0, 0x100, 0, 5, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_CFGAXI_DIV, "cfgaxi_div", "saxi_div", 0, 0x100, 5, 2, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_SD_DIV, "sd_div", "sd_mux", 0, 0x108, 0, 4, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_MMC1_DIV, "mmc1_div", "mmc1_mux", 0, 0x108, 5, 4, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_HSIC_DIV, "hsic_div", "rclk_hsic", 0, 0x130, 0, 2, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_MMC2_DIV, "mmc2_div", "mmc2_mux", 0, 0x140, 0, 4, CLK_DIVIDER_HIWORD_MASK, NULL, },
+ { HI3620_MMC3_DIV, "mmc3_div", "mmc3_mux", 0, 0x140, 5, 4, CLK_DIVIDER_HIWORD_MASK, NULL, },
+};
+
+static struct hisi_gate_clock hi3620_seperated_gate_clks[] __initdata = {
+ { HI3620_TIMERCLK01, "timerclk01", "timer_rclk01", CLK_SET_RATE_PARENT, 0x20, 0, 0, },
+ { HI3620_TIMER_RCLK01, "timer_rclk01", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x20, 1, 0, },
+ { HI3620_TIMERCLK23, "timerclk23", "timer_rclk23", CLK_SET_RATE_PARENT, 0x20, 2, 0, },
+ { HI3620_TIMER_RCLK23, "timer_rclk23", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x20, 3, 0, },
+ { HI3620_RTCCLK, "rtcclk", "pclk", CLK_SET_RATE_PARENT, 0x20, 5, 0, },
+ { HI3620_KPC_CLK, "kpc_clk", "pclk", CLK_SET_RATE_PARENT, 0x20, 6, 0, },
+ { HI3620_GPIOCLK0, "gpioclk0", "pclk", CLK_SET_RATE_PARENT, 0x20, 8, 0, },
+ { HI3620_GPIOCLK1, "gpioclk1", "pclk", CLK_SET_RATE_PARENT, 0x20, 9, 0, },
+ { HI3620_GPIOCLK2, "gpioclk2", "pclk", CLK_SET_RATE_PARENT, 0x20, 10, 0, },
+ { HI3620_GPIOCLK3, "gpioclk3", "pclk", CLK_SET_RATE_PARENT, 0x20, 11, 0, },
+ { HI3620_GPIOCLK4, "gpioclk4", "pclk", CLK_SET_RATE_PARENT, 0x20, 12, 0, },
+ { HI3620_GPIOCLK5, "gpioclk5", "pclk", CLK_SET_RATE_PARENT, 0x20, 13, 0, },
+ { HI3620_GPIOCLK6, "gpioclk6", "pclk", CLK_SET_RATE_PARENT, 0x20, 14, 0, },
+ { HI3620_GPIOCLK7, "gpioclk7", "pclk", CLK_SET_RATE_PARENT, 0x20, 15, 0, },
+ { HI3620_GPIOCLK8, "gpioclk8", "pclk", CLK_SET_RATE_PARENT, 0x20, 16, 0, },
+ { HI3620_GPIOCLK9, "gpioclk9", "pclk", CLK_SET_RATE_PARENT, 0x20, 17, 0, },
+ { HI3620_GPIOCLK10, "gpioclk10", "pclk", CLK_SET_RATE_PARENT, 0x20, 18, 0, },
+ { HI3620_GPIOCLK11, "gpioclk11", "pclk", CLK_SET_RATE_PARENT, 0x20, 19, 0, },
+ { HI3620_GPIOCLK12, "gpioclk12", "pclk", CLK_SET_RATE_PARENT, 0x20, 20, 0, },
+ { HI3620_GPIOCLK13, "gpioclk13", "pclk", CLK_SET_RATE_PARENT, 0x20, 21, 0, },
+ { HI3620_GPIOCLK14, "gpioclk14", "pclk", CLK_SET_RATE_PARENT, 0x20, 22, 0, },
+ { HI3620_GPIOCLK15, "gpioclk15", "pclk", CLK_SET_RATE_PARENT, 0x20, 23, 0, },
+ { HI3620_GPIOCLK16, "gpioclk16", "pclk", CLK_SET_RATE_PARENT, 0x20, 24, 0, },
+ { HI3620_GPIOCLK17, "gpioclk17", "pclk", CLK_SET_RATE_PARENT, 0x20, 25, 0, },
+ { HI3620_GPIOCLK18, "gpioclk18", "pclk", CLK_SET_RATE_PARENT, 0x20, 26, 0, },
+ { HI3620_GPIOCLK19, "gpioclk19", "pclk", CLK_SET_RATE_PARENT, 0x20, 27, 0, },
+ { HI3620_GPIOCLK20, "gpioclk20", "pclk", CLK_SET_RATE_PARENT, 0x20, 28, 0, },
+ { HI3620_GPIOCLK21, "gpioclk21", "pclk", CLK_SET_RATE_PARENT, 0x20, 29, 0, },
+ { HI3620_DPHY0_CLK, "dphy0_clk", "osc26m", CLK_SET_RATE_PARENT, 0x30, 15, 0, },
+ { HI3620_DPHY1_CLK, "dphy1_clk", "osc26m", CLK_SET_RATE_PARENT, 0x30, 16, 0, },
+ { HI3620_DPHY2_CLK, "dphy2_clk", "osc26m", CLK_SET_RATE_PARENT, 0x30, 17, 0, },
+ { HI3620_USBPHY_CLK, "usbphy_clk", "rclk_pico", CLK_SET_RATE_PARENT, 0x30, 24, 0, },
+ { HI3620_ACP_CLK, "acp_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x30, 28, 0, },
+ { HI3620_TIMERCLK45, "timerclk45", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x40, 3, 0, },
+ { HI3620_TIMERCLK67, "timerclk67", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x40, 4, 0, },
+ { HI3620_TIMERCLK89, "timerclk89", "rclk_tcxo", CLK_SET_RATE_PARENT, 0x40, 5, 0, },
+ { HI3620_PWMCLK0, "pwmclk0", "pwm0_mux", CLK_SET_RATE_PARENT, 0x40, 7, 0, },
+ { HI3620_PWMCLK1, "pwmclk1", "pwm1_mux", CLK_SET_RATE_PARENT, 0x40, 8, 0, },
+ { HI3620_UARTCLK0, "uartclk0", "uart0_mux", CLK_SET_RATE_PARENT, 0x40, 16, 0, },
+ { HI3620_UARTCLK1, "uartclk1", "uart1_mux", CLK_SET_RATE_PARENT, 0x40, 17, 0, },
+ { HI3620_UARTCLK2, "uartclk2", "uart2_mux", CLK_SET_RATE_PARENT, 0x40, 18, 0, },
+ { HI3620_UARTCLK3, "uartclk3", "uart3_mux", CLK_SET_RATE_PARENT, 0x40, 19, 0, },
+ { HI3620_UARTCLK4, "uartclk4", "uart4_mux", CLK_SET_RATE_PARENT, 0x40, 20, 0, },
+ { HI3620_SPICLK0, "spiclk0", "spi0_mux", CLK_SET_RATE_PARENT, 0x40, 21, 0, },
+ { HI3620_SPICLK1, "spiclk1", "spi1_mux", CLK_SET_RATE_PARENT, 0x40, 22, 0, },
+ { HI3620_SPICLK2, "spiclk2", "spi2_mux", CLK_SET_RATE_PARENT, 0x40, 23, 0, },
+ { HI3620_I2CCLK0, "i2cclk0", "pclk", CLK_SET_RATE_PARENT, 0x40, 24, 0, },
+ { HI3620_I2CCLK1, "i2cclk1", "pclk", CLK_SET_RATE_PARENT, 0x40, 25, 0, },
+ { HI3620_SCI_CLK, "sci_clk", "osc26m", CLK_SET_RATE_PARENT, 0x40, 26, 0, },
+ { HI3620_I2CCLK2, "i2cclk2", "pclk", CLK_SET_RATE_PARENT, 0x40, 28, 0, },
+ { HI3620_I2CCLK3, "i2cclk3", "pclk", CLK_SET_RATE_PARENT, 0x40, 29, 0, },
+ { HI3620_DDRC_PER_CLK, "ddrc_per_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x50, 9, 0, },
+ { HI3620_DMAC_CLK, "dmac_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x50, 10, 0, },
+ { HI3620_USB2DVC_CLK, "usb2dvc_clk", "rclk_cfgaxi", CLK_SET_RATE_PARENT, 0x50, 17, 0, },
+ { HI3620_SD_CLK, "sd_clk", "sd_div", CLK_SET_RATE_PARENT, 0x50, 20, 0, },
+ { HI3620_MMC_CLK1, "mmc_clk1", "mmc1_mux2", CLK_SET_RATE_PARENT, 0x50, 21, 0, },
+ { HI3620_MMC_CLK2, "mmc_clk2", "mmc2_div", CLK_SET_RATE_PARENT, 0x50, 22, 0, },
+ { HI3620_MMC_CLK3, "mmc_clk3", "mmc3_div", CLK_SET_RATE_PARENT, 0x50, 23, 0, },
+ { HI3620_MCU_CLK, "mcu_clk", "acp_clk", CLK_SET_RATE_PARENT, 0x50, 24, 0, },
+};
+
+static void __init hi3620_clk_init(struct device_node *np)
+{
+ void __iomem *base;
+
+ if (np) {
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("failed to map Hi3620 clock registers\n");
+ return;
+ }
+ } else {
+ pr_err("failed to find Hi3620 clock node in DTS\n");
+ return;
+ }
+
+ hisi_clk_init(np, HI3620_NR_CLKS);
+
+ hisi_clk_register_fixed_rate(hi3620_fixed_rate_clks,
+ ARRAY_SIZE(hi3620_fixed_rate_clks),
+ base);
+ hisi_clk_register_fixed_factor(hi3620_fixed_factor_clks,
+ ARRAY_SIZE(hi3620_fixed_factor_clks),
+ base);
+ hisi_clk_register_mux(hi3620_mux_clks, ARRAY_SIZE(hi3620_mux_clks),
+ base);
+ hisi_clk_register_divider(hi3620_div_clks, ARRAY_SIZE(hi3620_div_clks),
+ base);
+ hisi_clk_register_gate_sep(hi3620_seperated_gate_clks,
+ ARRAY_SIZE(hi3620_seperated_gate_clks),
+ base);
+}
+CLK_OF_DECLARE(hi3620_clk, "hisilicon,hi3620-clock", hi3620_clk_init);
diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c
new file mode 100644
index 000000000000..a3a7152c92d9
--- /dev/null
+++ b/drivers/clk/hisilicon/clk.c
@@ -0,0 +1,171 @@
+/*
+ * Hisilicon clock driver
+ *
+ * Copyright (c) 2012-2013 Hisilicon Limited.
+ * Copyright (c) 2012-2013 Linaro Limited.
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
+ * Xin Li <li.xin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+static DEFINE_SPINLOCK(hisi_clk_lock);
+static struct clk **clk_table;
+static struct clk_onecell_data clk_data;
+
+void __init hisi_clk_init(struct device_node *np, int nr_clks)
+{
+ clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL);
+ if (!clk_table) {
+ pr_err("%s: could not allocate clock lookup table\n", __func__);
+ return;
+ }
+ clk_data.clks = clk_table;
+ clk_data.clk_num = nr_clks;
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+
+void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *clks,
+ int nums, void __iomem *base)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = clk_register_fixed_rate(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags,
+ clks[i].fixed_rate);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ }
+}
+
+void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *clks,
+ int nums, void __iomem *base)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = clk_register_fixed_factor(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags, clks[i].mult,
+ clks[i].div);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ }
+}
+
+void __init hisi_clk_register_mux(struct hisi_mux_clock *clks,
+ int nums, void __iomem *base)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = clk_register_mux(NULL, clks[i].name, clks[i].parent_names,
+ clks[i].num_parents, clks[i].flags,
+ base + clks[i].offset, clks[i].shift,
+ clks[i].width, clks[i].mux_flags,
+ &hisi_clk_lock);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+
+ if (clks[i].alias)
+ clk_register_clkdev(clk, clks[i].alias, NULL);
+
+ clk_table[clks[i].id] = clk;
+ }
+}
+
+void __init hisi_clk_register_divider(struct hisi_divider_clock *clks,
+ int nums, void __iomem *base)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = clk_register_divider_table(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags,
+ base + clks[i].offset,
+ clks[i].shift, clks[i].width,
+ clks[i].div_flags,
+ clks[i].table,
+ &hisi_clk_lock);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+
+ if (clks[i].alias)
+ clk_register_clkdev(clk, clks[i].alias, NULL);
+
+ clk_table[clks[i].id] = clk;
+ }
+}
+
+void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks,
+ int nums, void __iomem *base)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = hisi_register_clkgate_sep(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags,
+ base + clks[i].offset,
+ clks[i].bit_idx,
+ clks[i].gate_flags,
+ &hisi_clk_lock);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+
+ if (clks[i].alias)
+ clk_register_clkdev(clk, clks[i].alias, NULL);
+
+ clk_table[clks[i].id] = clk;
+ }
+}
diff --git a/drivers/clk/hisilicon/clk.h b/drivers/clk/hisilicon/clk.h
new file mode 100644
index 000000000000..4a6beebefb7a
--- /dev/null
+++ b/drivers/clk/hisilicon/clk.h
@@ -0,0 +1,103 @@
+/*
+ * Hisilicon Hi3620 clock gate driver
+ *
+ * Copyright (c) 2012-2013 Hisilicon Limited.
+ * Copyright (c) 2012-2013 Linaro Limited.
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
+ * Xin Li <li.xin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __HISI_CLK_H
+#define __HISI_CLK_H
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+struct hisi_fixed_rate_clock {
+ unsigned int id;
+ char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long fixed_rate;
+};
+
+struct hisi_fixed_factor_clock {
+ unsigned int id;
+ char *name;
+ const char *parent_name;
+ unsigned long mult;
+ unsigned long div;
+ unsigned long flags;
+};
+
+struct hisi_mux_clock {
+ unsigned int id;
+ const char *name;
+ const char **parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+ u8 shift;
+ u8 width;
+ u8 mux_flags;
+ const char *alias;
+};
+
+struct hisi_divider_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long offset;
+ u8 shift;
+ u8 width;
+ u8 div_flags;
+ struct clk_div_table *table;
+ const char *alias;
+};
+
+struct hisi_gate_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long offset;
+ u8 bit_idx;
+ u8 gate_flags;
+ const char *alias;
+};
+
+struct clk *hisi_register_clkgate_sep(struct device *, const char *,
+ const char *, unsigned long,
+ void __iomem *, u8,
+ u8, spinlock_t *);
+
+void __init hisi_clk_init(struct device_node *, int);
+void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *,
+ int, void __iomem *);
+void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *,
+ int, void __iomem *);
+void __init hisi_clk_register_mux(struct hisi_mux_clock *, int,
+ void __iomem *);
+void __init hisi_clk_register_divider(struct hisi_divider_clock *,
+ int, void __iomem *);
+void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *,
+ int, void __iomem *);
+#endif /* __HISI_CLK_H */
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
new file mode 100644
index 000000000000..b03d5a7246f9
--- /dev/null
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -0,0 +1,130 @@
+/*
+ * Hisilicon clock separated gate driver
+ *
+ * Copyright (c) 2012-2013 Hisilicon Limited.
+ * Copyright (c) 2012-2013 Linaro Limited.
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
+ * Xin Li <li.xin@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+/* clock separated gate register offset */
+#define CLKGATE_SEPERATED_ENABLE 0x0
+#define CLKGATE_SEPERATED_DISABLE 0x4
+#define CLKGATE_SEPERATED_STATUS 0x8
+
+struct clkgate_separated {
+ struct clk_hw hw;
+ void __iomem *enable; /* enable register */
+ u8 bit_idx; /* bits in enable/disable register */
+ u8 flags;
+ spinlock_t *lock;
+};
+
+static int clkgate_separated_enable(struct clk_hw *hw)
+{
+ struct clkgate_separated *sclk;
+ unsigned long flags = 0;
+ u32 reg;
+
+ sclk = container_of(hw, struct clkgate_separated, hw);
+ if (sclk->lock)
+ spin_lock_irqsave(sclk->lock, flags);
+ reg = BIT(sclk->bit_idx);
+ writel_relaxed(reg, sclk->enable);
+ readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ if (sclk->lock)
+ spin_unlock_irqrestore(sclk->lock, flags);
+ return 0;
+}
+
+static void clkgate_separated_disable(struct clk_hw *hw)
+{
+ struct clkgate_separated *sclk;
+ unsigned long flags = 0;
+ u32 reg;
+
+ sclk = container_of(hw, struct clkgate_separated, hw);
+ if (sclk->lock)
+ spin_lock_irqsave(sclk->lock, flags);
+ reg = BIT(sclk->bit_idx);
+ writel_relaxed(reg, sclk->enable + CLKGATE_SEPERATED_DISABLE);
+ readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ if (sclk->lock)
+ spin_unlock_irqrestore(sclk->lock, flags);
+}
+
+static int clkgate_separated_is_enabled(struct clk_hw *hw)
+{
+ struct clkgate_separated *sclk;
+ u32 reg;
+
+ sclk = container_of(hw, struct clkgate_separated, hw);
+ reg = readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ reg &= BIT(sclk->bit_idx);
+
+ return reg ? 1 : 0;
+}
+
+static struct clk_ops clkgate_separated_ops = {
+ .enable = clkgate_separated_enable,
+ .disable = clkgate_separated_disable,
+ .is_enabled = clkgate_separated_is_enabled,
+};
+
+struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock)
+{
+ struct clkgate_separated *sclk;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
+ if (!sclk) {
+ pr_err("%s: fail to allocate separated gated clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &clkgate_separated_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ sclk->enable = reg + CLKGATE_SEPERATED_ENABLE;
+ sclk->bit_idx = bit_idx;
+ sclk->flags = clk_gate_flags;
+ sclk->hw.init = &init;
+
+ clk = clk_register(dev, &sclk->hw);
+ if (IS_ERR(clk))
+ kfree(sclk);
+ return clk;
+}
diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c
index 1f333bcfc22e..86f1e362eafb 100644
--- a/drivers/clk/keystone/gate.c
+++ b/drivers/clk/keystone/gate.c
@@ -179,6 +179,7 @@ static struct clk *clk_register_psc(struct device *dev,
init.name = name;
init.ops = &clk_psc_ops;
+ init.flags = 0;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
@@ -223,8 +224,7 @@ static void __init of_psc_clk_init(struct device_node *node, spinlock_t *lock)
data->domain_base = of_iomap(node, i);
if (!data->domain_base) {
pr_err("%s: domain ioremap failed\n", __func__);
- iounmap(data->control_base);
- goto out;
+ goto unmap_ctrl;
}
of_property_read_u32(node, "domain-id", &data->domain_id);
@@ -237,16 +237,21 @@ static void __init of_psc_clk_init(struct device_node *node, spinlock_t *lock)
parent_name = of_clk_get_parent_name(node, 0);
if (!parent_name) {
pr_err("%s: Parent clock not found\n", __func__);
- goto out;
+ goto unmap_domain;
}
clk = clk_register_psc(NULL, clk_name, parent_name, data, lock);
- if (clk) {
+ if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
return;
}
pr_err("%s: error registering clk %s\n", __func__, node->name);
+
+unmap_domain:
+ iounmap(data->domain_base);
+unmap_ctrl:
+ iounmap(data->control_base);
out:
kfree(data);
return;
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
index 47a1bd9f1726..0dd8a4b12747 100644
--- a/drivers/clk/keystone/pll.c
+++ b/drivers/clk/keystone/pll.c
@@ -24,6 +24,8 @@
#define MAIN_PLLM_HIGH_MASK 0x7f000
#define PLLM_HIGH_SHIFT 6
#define PLLD_MASK 0x3f
+#define CLKOD_MASK 0x780000
+#define CLKOD_SHIFT 19
/**
* struct clk_pll_data - pll data structure
@@ -41,7 +43,10 @@
* @pllm_upper_mask: multiplier upper mask
* @pllm_upper_shift: multiplier upper shift
* @plld_mask: divider mask
- * @postdiv: Post divider
+ * @clkod_mask: output divider mask
+ * @clkod_shift: output divider shift
+ * @plld_mask: divider mask
+ * @postdiv: Fixed post divider
*/
struct clk_pll_data {
bool has_pllctrl;
@@ -53,6 +58,8 @@ struct clk_pll_data {
u32 pllm_upper_mask;
u32 pllm_upper_shift;
u32 plld_mask;
+ u32 clkod_mask;
+ u32 clkod_shift;
u32 postdiv;
};
@@ -90,7 +97,13 @@ static unsigned long clk_pllclk_recalc(struct clk_hw *hw,
mult |= ((val & pll_data->pllm_upper_mask)
>> pll_data->pllm_upper_shift);
prediv = (val & pll_data->plld_mask);
- postdiv = pll_data->postdiv;
+
+ if (!pll_data->has_pllctrl)
+ /* read post divider from od bits*/
+ postdiv = ((val & pll_data->clkod_mask) >>
+ pll_data->clkod_shift) + 1;
+ else
+ postdiv = pll_data->postdiv;
rate /= (prediv + 1);
rate = (rate * (mult + 1));
@@ -155,8 +168,11 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
}
parent_name = of_clk_get_parent_name(node, 0);
- if (of_property_read_u32(node, "fixed-postdiv", &pll_data->postdiv))
- goto out;
+ if (of_property_read_u32(node, "fixed-postdiv", &pll_data->postdiv)) {
+ /* assume the PLL has output divider register bits */
+ pll_data->clkod_mask = CLKOD_MASK;
+ pll_data->clkod_shift = CLKOD_SHIFT;
+ }
i = of_property_match_string(node, "reg-names", "control");
pll_data->pll_ctl0 = of_iomap(node, i);
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index 0b0f3e729cf7..c339b829d3e3 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -4,15 +4,20 @@ config MVEBU_CLK_COMMON
config MVEBU_CLK_CPU
bool
+config MVEBU_CLK_COREDIV
+ bool
+
config ARMADA_370_CLK
bool
select MVEBU_CLK_COMMON
select MVEBU_CLK_CPU
+ select MVEBU_CLK_COREDIV
config ARMADA_XP_CLK
bool
select MVEBU_CLK_COMMON
select MVEBU_CLK_CPU
+ select MVEBU_CLK_COREDIV
config DOVE_CLK
bool
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
index 1c7e70c63fb2..21bbfb4a9f42 100644
--- a/drivers/clk/mvebu/Makefile
+++ b/drivers/clk/mvebu/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_MVEBU_CLK_COMMON) += common.o
obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o
+obj-$(CONFIG_MVEBU_CLK_COREDIV) += clk-corediv.o
obj-$(CONFIG_ARMADA_370_CLK) += armada-370.o
obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 81a202d12a7a..bef198a83863 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -141,13 +141,6 @@ static const struct coreclk_soc_desc a370_coreclks = {
.num_ratios = ARRAY_SIZE(a370_coreclk_ratios),
};
-static void __init a370_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &a370_coreclks);
-}
-CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock",
- a370_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -168,9 +161,15 @@ static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = {
{ }
};
-static void __init a370_clk_gating_init(struct device_node *np)
+static void __init a370_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, a370_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock");
+
+ mvebu_coreclk_setup(np, &a370_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, a370_gating_desc);
}
-CLK_OF_DECLARE(a370_clk_gating, "marvell,armada-370-gating-clock",
- a370_clk_gating_init);
+CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
+
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index 9922c4475aa8..b3094315a3c0 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -158,13 +158,6 @@ static const struct coreclk_soc_desc axp_coreclks = {
.num_ratios = ARRAY_SIZE(axp_coreclk_ratios),
};
-static void __init axp_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &axp_coreclks);
-}
-CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock",
- axp_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -202,9 +195,14 @@ static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = {
{ }
};
-static void __init axp_clk_gating_init(struct device_node *np)
+static void __init axp_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, axp_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock");
+
+ mvebu_coreclk_setup(np, &axp_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, axp_gating_desc);
}
-CLK_OF_DECLARE(axp_clk_gating, "marvell,armada-xp-gating-clock",
- axp_clk_gating_init);
+CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
new file mode 100644
index 000000000000..7162615bcdcd
--- /dev/null
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -0,0 +1,223 @@
+/*
+ * MVEBU Core divider clock
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "common.h"
+
+#define CORE_CLK_DIV_RATIO_MASK 0xff
+#define CORE_CLK_DIV_RATIO_RELOAD BIT(8)
+#define CORE_CLK_DIV_ENABLE_OFFSET 24
+#define CORE_CLK_DIV_RATIO_OFFSET 0x8
+
+struct clk_corediv_desc {
+ unsigned int mask;
+ unsigned int offset;
+ unsigned int fieldbit;
+};
+
+struct clk_corediv {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct clk_corediv_desc desc;
+ spinlock_t lock;
+};
+
+static struct clk_onecell_data clk_data;
+
+static const struct clk_corediv_desc mvebu_corediv_desc[] __initconst = {
+ { .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */
+};
+
+#define to_corediv_clk(p) container_of(p, struct clk_corediv, hw)
+
+static int clk_corediv_is_enabled(struct clk_hw *hwclk)
+{
+ struct clk_corediv *corediv = to_corediv_clk(hwclk);
+ struct clk_corediv_desc *desc = &corediv->desc;
+ u32 enable_mask = BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET;
+
+ return !!(readl(corediv->reg) & enable_mask);
+}
+
+static int clk_corediv_enable(struct clk_hw *hwclk)
+{
+ struct clk_corediv *corediv = to_corediv_clk(hwclk);
+ struct clk_corediv_desc *desc = &corediv->desc;
+ unsigned long flags = 0;
+ u32 reg;
+
+ spin_lock_irqsave(&corediv->lock, flags);
+
+ reg = readl(corediv->reg);
+ reg |= (BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET);
+ writel(reg, corediv->reg);
+
+ spin_unlock_irqrestore(&corediv->lock, flags);
+
+ return 0;
+}
+
+static void clk_corediv_disable(struct clk_hw *hwclk)
+{
+ struct clk_corediv *corediv = to_corediv_clk(hwclk);
+ struct clk_corediv_desc *desc = &corediv->desc;
+ unsigned long flags = 0;
+ u32 reg;
+
+ spin_lock_irqsave(&corediv->lock, flags);
+
+ reg = readl(corediv->reg);
+ reg &= ~(BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET);
+ writel(reg, corediv->reg);
+
+ spin_unlock_irqrestore(&corediv->lock, flags);
+}
+
+static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct clk_corediv *corediv = to_corediv_clk(hwclk);
+ struct clk_corediv_desc *desc = &corediv->desc;
+ u32 reg, div;
+
+ reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
+ div = (reg >> desc->offset) & desc->mask;
+ return parent_rate / div;
+}
+
+static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
+ u32 div;
+
+ div = *parent_rate / rate;
+ if (div < 4)
+ div = 4;
+ else if (div > 6)
+ div = 8;
+
+ return *parent_rate / div;
+}
+
+static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_corediv *corediv = to_corediv_clk(hwclk);
+ struct clk_corediv_desc *desc = &corediv->desc;
+ unsigned long flags = 0;
+ u32 reg, div;
+
+ div = parent_rate / rate;
+
+ spin_lock_irqsave(&corediv->lock, flags);
+
+ /* Write new divider to the divider ratio register */
+ reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
+ reg &= ~(desc->mask << desc->offset);
+ reg |= (div & desc->mask) << desc->offset;
+ writel(reg, corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
+
+ /* Set reload-force for this clock */
+ reg = readl(corediv->reg) | BIT(desc->fieldbit);
+ writel(reg, corediv->reg);
+
+ /* Now trigger the clock update */
+ reg = readl(corediv->reg) | CORE_CLK_DIV_RATIO_RELOAD;
+ writel(reg, corediv->reg);
+
+ /*
+ * Wait for clocks to settle down, and then clear all the
+ * ratios request and the reload request.
+ */
+ udelay(1000);
+ reg &= ~(CORE_CLK_DIV_RATIO_MASK | CORE_CLK_DIV_RATIO_RELOAD);
+ writel(reg, corediv->reg);
+ udelay(1000);
+
+ spin_unlock_irqrestore(&corediv->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops corediv_ops = {
+ .enable = clk_corediv_enable,
+ .disable = clk_corediv_disable,
+ .is_enabled = clk_corediv_is_enabled,
+ .recalc_rate = clk_corediv_recalc_rate,
+ .round_rate = clk_corediv_round_rate,
+ .set_rate = clk_corediv_set_rate,
+};
+
+static void __init mvebu_corediv_clk_init(struct device_node *node)
+{
+ struct clk_init_data init;
+ struct clk_corediv *corediv;
+ struct clk **clks;
+ void __iomem *base;
+ const char *parent_name;
+ const char *clk_name;
+ int i;
+
+ base = of_iomap(node, 0);
+ if (WARN_ON(!base))
+ return;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ clk_data.clk_num = ARRAY_SIZE(mvebu_corediv_desc);
+
+ /* clks holds the clock array */
+ clks = kcalloc(clk_data.clk_num, sizeof(struct clk *),
+ GFP_KERNEL);
+ if (WARN_ON(!clks))
+ goto err_unmap;
+ /* corediv holds the clock specific array */
+ corediv = kcalloc(clk_data.clk_num, sizeof(struct clk_corediv),
+ GFP_KERNEL);
+ if (WARN_ON(!corediv))
+ goto err_free_clks;
+
+ spin_lock_init(&corediv->lock);
+
+ for (i = 0; i < clk_data.clk_num; i++) {
+ of_property_read_string_index(node, "clock-output-names",
+ i, &clk_name);
+ init.num_parents = 1;
+ init.parent_names = &parent_name;
+ init.name = clk_name;
+ init.ops = &corediv_ops;
+ init.flags = 0;
+
+ corediv[i].desc = mvebu_corediv_desc[i];
+ corediv[i].reg = base;
+ corediv[i].hw.init = &init;
+
+ clks[i] = clk_register(NULL, &corediv[i].hw);
+ WARN_ON(IS_ERR(clks[i]));
+ }
+
+ clk_data.clks = clks;
+ of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
+ return;
+
+err_free_clks:
+ kfree(clks);
+err_unmap:
+ iounmap(base);
+}
+CLK_OF_DECLARE(mvebu_corediv_clk, "marvell,armada-370-corediv-clock",
+ mvebu_corediv_clk_init);
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 1466865b0743..8ebf757d29e2 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -101,7 +101,7 @@ static const struct clk_ops cpu_ops = {
.set_rate = clk_cpu_set_rate,
};
-void __init of_cpu_clk_setup(struct device_node *node)
+static void __init of_cpu_clk_setup(struct device_node *node)
{
struct cpu_clk *cpuclk;
void __iomem *clock_complex_base = of_iomap(node, 0);
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index 38aee1e3f242..b8c2424ac926 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -154,12 +154,6 @@ static const struct coreclk_soc_desc dove_coreclks = {
.num_ratios = ARRAY_SIZE(dove_coreclk_ratios),
};
-static void __init dove_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &dove_coreclks);
-}
-CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -186,9 +180,14 @@ static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = {
{ }
};
-static void __init dove_clk_gating_init(struct device_node *np)
+static void __init dove_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, dove_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock");
+
+ mvebu_coreclk_setup(np, &dove_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, dove_gating_desc);
}
-CLK_OF_DECLARE(dove_clk_gating, "marvell,dove-gating-clock",
- dove_clk_gating_init);
+CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 2636a55f29f9..ddb666a86500 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -193,13 +193,6 @@ static const struct coreclk_soc_desc kirkwood_coreclks = {
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
};
-static void __init kirkwood_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &kirkwood_coreclks);
-}
-CLK_OF_DECLARE(kirkwood_core_clk, "marvell,kirkwood-core-clock",
- kirkwood_coreclk_init);
-
static const struct coreclk_soc_desc mv88f6180_coreclks = {
.get_tclk_freq = kirkwood_get_tclk_freq,
.get_cpu_freq = mv88f6180_get_cpu_freq,
@@ -208,13 +201,6 @@ static const struct coreclk_soc_desc mv88f6180_coreclks = {
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
};
-static void __init mv88f6180_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &mv88f6180_coreclks);
-}
-CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock",
- mv88f6180_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -239,9 +225,21 @@ static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = {
{ }
};
-static void __init kirkwood_clk_gating_init(struct device_node *np)
+static void __init kirkwood_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, kirkwood_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock");
+
+
+ if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock"))
+ mvebu_coreclk_setup(np, &mv88f6180_coreclks);
+ else
+ mvebu_coreclk_setup(np, &kirkwood_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
}
-CLK_OF_DECLARE(kirkwood_clk_gating, "marvell,kirkwood-gating-clock",
- kirkwood_clk_gating_init);
+CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
+ kirkwood_clk_init);
+CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock",
+ kirkwood_clk_init);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
new file mode 100644
index 000000000000..995bcfa021a4
--- /dev/null
+++ b/drivers/clk/qcom/Kconfig
@@ -0,0 +1,47 @@
+config COMMON_CLK_QCOM
+ tristate "Support for Qualcomm's clock controllers"
+ depends on OF
+ select REGMAP_MMIO
+ select RESET_CONTROLLER
+
+config MSM_GCC_8660
+ tristate "MSM8660 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8660 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MSM_GCC_8960
+ tristate "MSM8960 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8960 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_MMCC_8960
+ tristate "MSM8960 Multimedia Clock Controller"
+ select MSM_GCC_8960
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8960 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8974
+ tristate "MSM8974 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8974 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config MSM_MMCC_8974
+ tristate "MSM8974 Multimedia Clock Controller"
+ select MSM_GCC_8974
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8974 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
new file mode 100644
index 000000000000..f60db2ef1aee
--- /dev/null
+++ b/drivers/clk/qcom/Makefile
@@ -0,0 +1,14 @@
+obj-$(CONFIG_COMMON_CLK_QCOM) += clk-qcom.o
+
+clk-qcom-y += clk-regmap.o
+clk-qcom-y += clk-pll.o
+clk-qcom-y += clk-rcg.o
+clk-qcom-y += clk-rcg2.o
+clk-qcom-y += clk-branch.o
+clk-qcom-y += reset.o
+
+obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
+obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
+obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
+obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
new file mode 100644
index 000000000000..6b4d2bcb1a53
--- /dev/null
+++ b/drivers/clk/qcom/clk-branch.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "clk-branch.h"
+
+static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
+{
+ u32 val;
+
+ if (!br->hwcg_reg)
+ return 0;
+
+ regmap_read(br->clkr.regmap, br->hwcg_reg, &val);
+
+ return !!(val & BIT(br->hwcg_bit));
+}
+
+static bool clk_branch_check_halt(const struct clk_branch *br, bool enabling)
+{
+ bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
+ u32 val;
+
+ regmap_read(br->clkr.regmap, br->halt_reg, &val);
+
+ val &= BIT(br->halt_bit);
+ if (invert)
+ val = !val;
+
+ return !!val == !enabling;
+}
+
+#define BRANCH_CLK_OFF BIT(31)
+#define BRANCH_NOC_FSM_STATUS_SHIFT 28
+#define BRANCH_NOC_FSM_STATUS_MASK 0x7
+#define BRANCH_NOC_FSM_STATUS_ON (0x2 << BRANCH_NOC_FSM_STATUS_SHIFT)
+
+static bool clk_branch2_check_halt(const struct clk_branch *br, bool enabling)
+{
+ u32 val;
+ u32 mask;
+
+ mask = BRANCH_NOC_FSM_STATUS_MASK << BRANCH_NOC_FSM_STATUS_SHIFT;
+ mask |= BRANCH_CLK_OFF;
+
+ regmap_read(br->clkr.regmap, br->halt_reg, &val);
+
+ if (enabling) {
+ val &= mask;
+ return (val & BRANCH_CLK_OFF) == 0 ||
+ val == BRANCH_NOC_FSM_STATUS_ON;
+ } else {
+ return val & BRANCH_CLK_OFF;
+ }
+}
+
+static int clk_branch_wait(const struct clk_branch *br, bool enabling,
+ bool (check_halt)(const struct clk_branch *, bool))
+{
+ bool voted = br->halt_check & BRANCH_VOTED;
+ const char *name = __clk_get_name(br->clkr.hw.clk);
+
+ /* Skip checking halt bit if the clock is in hardware gated mode */
+ if (clk_branch_in_hwcg_mode(br))
+ return 0;
+
+ if (br->halt_check == BRANCH_HALT_DELAY || (!enabling && voted)) {
+ udelay(10);
+ } else if (br->halt_check == BRANCH_HALT_ENABLE ||
+ br->halt_check == BRANCH_HALT ||
+ (enabling && voted)) {
+ int count = 200;
+
+ while (count-- > 0) {
+ if (check_halt(br, enabling))
+ return 0;
+ udelay(1);
+ }
+ WARN(1, "%s status stuck at 'o%s'", name,
+ enabling ? "ff" : "n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int clk_branch_toggle(struct clk_hw *hw, bool en,
+ bool (check_halt)(const struct clk_branch *, bool))
+{
+ struct clk_branch *br = to_clk_branch(hw);
+ int ret;
+
+ if (en) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ } else {
+ clk_disable_regmap(hw);
+ }
+
+ return clk_branch_wait(br, en, check_halt);
+}
+
+static int clk_branch_enable(struct clk_hw *hw)
+{
+ return clk_branch_toggle(hw, true, clk_branch_check_halt);
+}
+
+static void clk_branch_disable(struct clk_hw *hw)
+{
+ clk_branch_toggle(hw, false, clk_branch_check_halt);
+}
+
+const struct clk_ops clk_branch_ops = {
+ .enable = clk_branch_enable,
+ .disable = clk_branch_disable,
+ .is_enabled = clk_is_enabled_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_branch_ops);
+
+static int clk_branch2_enable(struct clk_hw *hw)
+{
+ return clk_branch_toggle(hw, true, clk_branch2_check_halt);
+}
+
+static void clk_branch2_disable(struct clk_hw *hw)
+{
+ clk_branch_toggle(hw, false, clk_branch2_check_halt);
+}
+
+const struct clk_ops clk_branch2_ops = {
+ .enable = clk_branch2_enable,
+ .disable = clk_branch2_disable,
+ .is_enabled = clk_is_enabled_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_branch2_ops);
+
+const struct clk_ops clk_branch_simple_ops = {
+ .enable = clk_enable_regmap,
+ .disable = clk_disable_regmap,
+ .is_enabled = clk_is_enabled_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_branch_simple_ops);
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
new file mode 100644
index 000000000000..284df3f3c55f
--- /dev/null
+++ b/drivers/clk/qcom/clk-branch.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_BRANCH_H__
+#define __QCOM_CLK_BRANCH_H__
+
+#include <linux/clk-provider.h>
+
+#include "clk-regmap.h"
+
+/**
+ * struct clk_branch - gating clock with status bit and dynamic hardware gating
+ *
+ * @hwcg_reg: dynamic hardware clock gating register
+ * @hwcg_bit: ORed with @hwcg_reg to enable dynamic hardware clock gating
+ * @halt_reg: halt register
+ * @halt_bit: ANDed with @halt_reg to test for clock halted
+ * @halt_check: type of halt checking to perform
+ * @clkr: handle between common and hardware-specific interfaces
+ *
+ * Clock which can gate its output.
+ */
+struct clk_branch {
+ u32 hwcg_reg;
+ u32 halt_reg;
+ u8 hwcg_bit;
+ u8 halt_bit;
+ u8 halt_check;
+#define BRANCH_VOTED BIT(7) /* Delay on disable */
+#define BRANCH_HALT 0 /* pol: 1 = halt */
+#define BRANCH_HALT_VOTED (BRANCH_HALT | BRANCH_VOTED)
+#define BRANCH_HALT_ENABLE 1 /* pol: 0 = halt */
+#define BRANCH_HALT_ENABLE_VOTED (BRANCH_HALT_ENABLE | BRANCH_VOTED)
+#define BRANCH_HALT_DELAY 2 /* No bit to check; just delay */
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_branch_ops;
+extern const struct clk_ops clk_branch2_ops;
+extern const struct clk_ops clk_branch_simple_ops;
+
+#define to_clk_branch(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_branch, clkr)
+
+#endif
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
new file mode 100644
index 000000000000..0f927c538613
--- /dev/null
+++ b/drivers/clk/qcom/clk-pll.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <asm/div64.h>
+
+#include "clk-pll.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+#define PLL_LOCK_COUNT_SHIFT 8
+#define PLL_LOCK_COUNT_MASK 0x3f
+#define PLL_BIAS_COUNT_SHIFT 14
+#define PLL_BIAS_COUNT_MASK 0x3f
+#define PLL_VOTE_FSM_ENA BIT(20)
+#define PLL_VOTE_FSM_RESET BIT(21)
+
+static int clk_pll_enable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ int ret;
+ u32 mask, val;
+
+ mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
+ ret = regmap_read(pll->clkr.regmap, pll->mode_reg, &val);
+ if (ret)
+ return ret;
+
+ /* Skip if already enabled or in FSM mode */
+ if ((val & mask) == mask || val & PLL_VOTE_FSM_ENA)
+ return 0;
+
+ /* Disable PLL bypass mode. */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_BYPASSNL,
+ PLL_BYPASSNL);
+ if (ret)
+ return ret;
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_RESET_N,
+ PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Wait until PLL is locked. */
+ udelay(50);
+
+ /* Enable PLL output. */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_OUTCTRL,
+ PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void clk_pll_disable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ u32 mask;
+ u32 val;
+
+ regmap_read(pll->clkr.regmap, pll->mode_reg, &val);
+ /* Skip if in FSM mode */
+ if (val & PLL_VOTE_FSM_ENA)
+ return;
+ mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
+ regmap_update_bits(pll->clkr.regmap, pll->mode_reg, mask, 0);
+}
+
+static unsigned long
+clk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ u32 l, m, n;
+ unsigned long rate;
+ u64 tmp;
+
+ regmap_read(pll->clkr.regmap, pll->l_reg, &l);
+ regmap_read(pll->clkr.regmap, pll->m_reg, &m);
+ regmap_read(pll->clkr.regmap, pll->n_reg, &n);
+
+ l &= 0x3ff;
+ m &= 0x7ffff;
+ n &= 0x7ffff;
+
+ rate = parent_rate * l;
+ if (n) {
+ tmp = parent_rate;
+ tmp *= m;
+ do_div(tmp, n);
+ rate += tmp;
+ }
+ return rate;
+}
+
+const struct clk_ops clk_pll_ops = {
+ .enable = clk_pll_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_pll_ops);
+
+static int wait_for_pll(struct clk_pll *pll)
+{
+ u32 val;
+ int count;
+ int ret;
+ const char *name = __clk_get_name(pll->clkr.hw.clk);
+
+ /* Wait for pll to enable. */
+ for (count = 200; count > 0; count--) {
+ ret = regmap_read(pll->clkr.regmap, pll->status_reg, &val);
+ if (ret)
+ return ret;
+ if (val & BIT(pll->status_bit))
+ return 0;
+ udelay(1);
+ }
+
+ WARN(1, "%s didn't enable after voting for it!\n", name);
+ return -ETIMEDOUT;
+}
+
+static int clk_pll_vote_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_pll *p = to_clk_pll(__clk_get_hw(__clk_get_parent(hw->clk)));
+
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+
+ return wait_for_pll(p);
+}
+
+const struct clk_ops clk_pll_vote_ops = {
+ .enable = clk_pll_vote_enable,
+ .disable = clk_disable_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_pll_vote_ops);
+
+static void
+clk_pll_set_fsm_mode(struct clk_pll *pll, struct regmap *regmap)
+{
+ u32 val;
+ u32 mask;
+
+ /* De-assert reset to FSM */
+ regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_RESET, 0);
+
+ /* Program bias count and lock count */
+ val = 1 << PLL_BIAS_COUNT_SHIFT;
+ mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
+ mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
+ regmap_update_bits(regmap, pll->mode_reg, mask, val);
+
+ /* Enable PLL FSM voting */
+ regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_ENA,
+ PLL_VOTE_FSM_ENA);
+}
+
+static void clk_pll_configure(struct clk_pll *pll, struct regmap *regmap,
+ const struct pll_config *config)
+{
+ u32 val;
+ u32 mask;
+
+ regmap_write(regmap, pll->l_reg, config->l);
+ regmap_write(regmap, pll->m_reg, config->m);
+ regmap_write(regmap, pll->n_reg, config->n);
+
+ val = config->vco_val;
+ val |= config->pre_div_val;
+ val |= config->post_div_val;
+ val |= config->mn_ena_mask;
+ val |= config->main_output_mask;
+ val |= config->aux_output_mask;
+
+ mask = config->vco_mask;
+ mask |= config->pre_div_mask;
+ mask |= config->post_div_mask;
+ mask |= config->mn_ena_mask;
+ mask |= config->main_output_mask;
+ mask |= config->aux_output_mask;
+
+ regmap_update_bits(regmap, pll->config_reg, mask, val);
+}
+
+void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
+ const struct pll_config *config, bool fsm_mode)
+{
+ clk_pll_configure(pll, regmap, config);
+ if (fsm_mode)
+ clk_pll_set_fsm_mode(pll, regmap);
+}
+EXPORT_SYMBOL_GPL(clk_pll_configure_sr_hpm_lp);
diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h
new file mode 100644
index 000000000000..0775a99ca768
--- /dev/null
+++ b/drivers/clk/qcom/clk-pll.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_PLL_H__
+#define __QCOM_CLK_PLL_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+/**
+ * struct clk_pll - phase locked loop (PLL)
+ * @l_reg: L register
+ * @m_reg: M register
+ * @n_reg: N register
+ * @config_reg: config register
+ * @mode_reg: mode register
+ * @status_reg: status register
+ * @status_bit: ANDed with @status_reg to determine if PLL is enabled
+ * @hw: handle between common and hardware-specific interfaces
+ */
+struct clk_pll {
+ u32 l_reg;
+ u32 m_reg;
+ u32 n_reg;
+ u32 config_reg;
+ u32 mode_reg;
+ u32 status_reg;
+ u8 status_bit;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_pll_ops;
+extern const struct clk_ops clk_pll_vote_ops;
+
+#define to_clk_pll(_hw) container_of(to_clk_regmap(_hw), struct clk_pll, clkr)
+
+struct pll_config {
+ u16 l;
+ u32 m;
+ u32 n;
+ u32 vco_val;
+ u32 vco_mask;
+ u32 pre_div_val;
+ u32 pre_div_mask;
+ u32 post_div_val;
+ u32 post_div_mask;
+ u32 mn_ena_mask;
+ u32 main_output_mask;
+ u32 aux_output_mask;
+};
+
+void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
+ const struct pll_config *config, bool fsm_mode);
+
+#endif
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
new file mode 100644
index 000000000000..abfc2b675aea
--- /dev/null
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -0,0 +1,517 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <asm/div64.h>
+
+#include "clk-rcg.h"
+
+static u32 ns_to_src(struct src_sel *s, u32 ns)
+{
+ ns >>= s->src_sel_shift;
+ ns &= SRC_SEL_MASK;
+ return ns;
+}
+
+static u32 src_to_ns(struct src_sel *s, u8 src, u32 ns)
+{
+ u32 mask;
+
+ mask = SRC_SEL_MASK;
+ mask <<= s->src_sel_shift;
+ ns &= ~mask;
+
+ ns |= src << s->src_sel_shift;
+ return ns;
+}
+
+static u8 clk_rcg_get_parent(struct clk_hw *hw)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ int num_parents = __clk_get_num_parents(hw->clk);
+ u32 ns;
+ int i;
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ ns = ns_to_src(&rcg->s, ns);
+ for (i = 0; i < num_parents; i++)
+ if (ns == rcg->s.parent_map[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int reg_to_bank(struct clk_dyn_rcg *rcg, u32 bank)
+{
+ bank &= BIT(rcg->mux_sel_bit);
+ return !!bank;
+}
+
+static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw)
+{
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+ int num_parents = __clk_get_num_parents(hw->clk);
+ u32 ns, ctl;
+ int bank;
+ int i;
+ struct src_sel *s;
+
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
+ bank = reg_to_bank(rcg, ctl);
+ s = &rcg->s[bank];
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ ns = ns_to_src(s, ns);
+
+ for (i = 0; i < num_parents; i++)
+ if (ns == s->parent_map[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int clk_rcg_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ u32 ns;
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ ns = src_to_ns(&rcg->s, rcg->s.parent_map[index], ns);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+
+ return 0;
+}
+
+static u32 md_to_m(struct mn *mn, u32 md)
+{
+ md >>= mn->m_val_shift;
+ md &= BIT(mn->width) - 1;
+ return md;
+}
+
+static u32 ns_to_pre_div(struct pre_div *p, u32 ns)
+{
+ ns >>= p->pre_div_shift;
+ ns &= BIT(p->pre_div_width) - 1;
+ return ns;
+}
+
+static u32 pre_div_to_ns(struct pre_div *p, u8 pre_div, u32 ns)
+{
+ u32 mask;
+
+ mask = BIT(p->pre_div_width) - 1;
+ mask <<= p->pre_div_shift;
+ ns &= ~mask;
+
+ ns |= pre_div << p->pre_div_shift;
+ return ns;
+}
+
+static u32 mn_to_md(struct mn *mn, u32 m, u32 n, u32 md)
+{
+ u32 mask, mask_w;
+
+ mask_w = BIT(mn->width) - 1;
+ mask = (mask_w << mn->m_val_shift) | mask_w;
+ md &= ~mask;
+
+ if (n) {
+ m <<= mn->m_val_shift;
+ md |= m;
+ md |= ~n & mask_w;
+ }
+
+ return md;
+}
+
+static u32 ns_m_to_n(struct mn *mn, u32 ns, u32 m)
+{
+ ns = ~ns >> mn->n_val_shift;
+ ns &= BIT(mn->width) - 1;
+ return ns + m;
+}
+
+static u32 reg_to_mnctr_mode(struct mn *mn, u32 val)
+{
+ val >>= mn->mnctr_mode_shift;
+ val &= MNCTR_MODE_MASK;
+ return val;
+}
+
+static u32 mn_to_ns(struct mn *mn, u32 m, u32 n, u32 ns)
+{
+ u32 mask;
+
+ mask = BIT(mn->width) - 1;
+ mask <<= mn->n_val_shift;
+ ns &= ~mask;
+
+ if (n) {
+ n = n - m;
+ n = ~n;
+ n &= BIT(mn->width) - 1;
+ n <<= mn->n_val_shift;
+ ns |= n;
+ }
+
+ return ns;
+}
+
+static u32 mn_to_reg(struct mn *mn, u32 m, u32 n, u32 val)
+{
+ u32 mask;
+
+ mask = MNCTR_MODE_MASK << mn->mnctr_mode_shift;
+ mask |= BIT(mn->mnctr_en_bit);
+ val &= ~mask;
+
+ if (n) {
+ val |= BIT(mn->mnctr_en_bit);
+ val |= MNCTR_MODE_DUAL << mn->mnctr_mode_shift;
+ }
+
+ return val;
+}
+
+static void configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f)
+{
+ u32 ns, md, ctl, *regp;
+ int bank, new_bank;
+ struct mn *mn;
+ struct pre_div *p;
+ struct src_sel *s;
+ bool enabled;
+ u32 md_reg;
+ u32 bank_reg;
+ bool banked_mn = !!rcg->mn[1].width;
+ struct clk_hw *hw = &rcg->clkr.hw;
+
+ enabled = __clk_is_enabled(hw->clk);
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
+
+ if (banked_mn) {
+ regp = &ctl;
+ bank_reg = rcg->clkr.enable_reg;
+ } else {
+ regp = &ns;
+ bank_reg = rcg->ns_reg;
+ }
+
+ bank = reg_to_bank(rcg, *regp);
+ new_bank = enabled ? !bank : bank;
+
+ if (banked_mn) {
+ mn = &rcg->mn[new_bank];
+ md_reg = rcg->md_reg[new_bank];
+
+ ns |= BIT(mn->mnctr_reset_bit);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+
+ regmap_read(rcg->clkr.regmap, md_reg, &md);
+ md = mn_to_md(mn, f->m, f->n, md);
+ regmap_write(rcg->clkr.regmap, md_reg, md);
+
+ ns = mn_to_ns(mn, f->m, f->n, ns);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+
+ ctl = mn_to_reg(mn, f->m, f->n, ctl);
+ regmap_write(rcg->clkr.regmap, rcg->clkr.enable_reg, ctl);
+
+ ns &= ~BIT(mn->mnctr_reset_bit);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+ } else {
+ p = &rcg->p[new_bank];
+ ns = pre_div_to_ns(p, f->pre_div - 1, ns);
+ }
+
+ s = &rcg->s[new_bank];
+ ns = src_to_ns(s, s->parent_map[f->src], ns);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+
+ if (enabled) {
+ *regp ^= BIT(rcg->mux_sel_bit);
+ regmap_write(rcg->clkr.regmap, bank_reg, *regp);
+ }
+}
+
+static int clk_dyn_rcg_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+ u32 ns, ctl, md, reg;
+ int bank;
+ struct freq_tbl f = { 0 };
+ bool banked_mn = !!rcg->mn[1].width;
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
+ reg = banked_mn ? ctl : ns;
+
+ bank = reg_to_bank(rcg, reg);
+
+ if (banked_mn) {
+ regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md);
+ f.m = md_to_m(&rcg->mn[bank], md);
+ f.n = ns_m_to_n(&rcg->mn[bank], ns, f.m);
+ } else {
+ f.pre_div = ns_to_pre_div(&rcg->p[bank], ns) + 1;
+ }
+ f.src = index;
+
+ configure_bank(rcg, &f);
+
+ return 0;
+}
+
+/*
+ * Calculate m/n:d rate
+ *
+ * parent_rate m
+ * rate = ----------- x ---
+ * pre_div n
+ */
+static unsigned long
+calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 pre_div)
+{
+ if (pre_div)
+ rate /= pre_div + 1;
+
+ if (mode) {
+ u64 tmp = rate;
+ tmp *= m;
+ do_div(tmp, n);
+ rate = tmp;
+ }
+
+ return rate;
+}
+
+static unsigned long
+clk_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ u32 pre_div, m = 0, n = 0, ns, md, mode = 0;
+ struct mn *mn = &rcg->mn;
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ pre_div = ns_to_pre_div(&rcg->p, ns);
+
+ if (rcg->mn.width) {
+ regmap_read(rcg->clkr.regmap, rcg->md_reg, &md);
+ m = md_to_m(mn, md);
+ n = ns_m_to_n(mn, ns, m);
+ /* MN counter mode is in hw.enable_reg sometimes */
+ if (rcg->clkr.enable_reg != rcg->ns_reg)
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &mode);
+ else
+ mode = ns;
+ mode = reg_to_mnctr_mode(mn, mode);
+ }
+
+ return calc_rate(parent_rate, m, n, mode, pre_div);
+}
+
+static unsigned long
+clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+ u32 m, n, pre_div, ns, md, mode, reg;
+ int bank;
+ struct mn *mn;
+ bool banked_mn = !!rcg->mn[1].width;
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+
+ if (banked_mn)
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &reg);
+ else
+ reg = ns;
+
+ bank = reg_to_bank(rcg, reg);
+
+ if (banked_mn) {
+ mn = &rcg->mn[bank];
+ regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md);
+ m = md_to_m(mn, md);
+ n = ns_m_to_n(mn, ns, m);
+ mode = reg_to_mnctr_mode(mn, reg);
+ return calc_rate(parent_rate, m, n, mode, 0);
+ } else {
+ pre_div = ns_to_pre_div(&rcg->p[bank], ns);
+ return calc_rate(parent_rate, 0, 0, 0, pre_div);
+ }
+}
+
+static const
+struct freq_tbl *find_freq(const struct freq_tbl *f, unsigned long rate)
+{
+ if (!f)
+ return NULL;
+
+ for (; f->freq; f++)
+ if (rate <= f->freq)
+ return f;
+
+ return NULL;
+}
+
+static long _freq_tbl_determine_rate(struct clk_hw *hw,
+ const struct freq_tbl *f, unsigned long rate,
+ unsigned long *p_rate, struct clk **p)
+{
+ unsigned long clk_flags;
+
+ f = find_freq(f, rate);
+ if (!f)
+ return -EINVAL;
+
+ clk_flags = __clk_get_flags(hw->clk);
+ *p = clk_get_parent_by_index(hw->clk, f->src);
+ if (clk_flags & CLK_SET_RATE_PARENT) {
+ rate = rate * f->pre_div;
+ if (f->n) {
+ u64 tmp = rate;
+ tmp = tmp * f->n;
+ do_div(tmp, f->m);
+ rate = tmp;
+ }
+ } else {
+ rate = __clk_get_rate(*p);
+ }
+ *p_rate = rate;
+
+ return f->freq;
+}
+
+static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *p_rate, struct clk **p)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+}
+
+static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *p_rate, struct clk **p)
+{
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+}
+
+static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ const struct freq_tbl *f;
+ u32 ns, md, ctl;
+ struct mn *mn = &rcg->mn;
+ u32 mask = 0;
+ unsigned int reset_reg;
+
+ f = find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ if (rcg->mn.reset_in_cc)
+ reset_reg = rcg->clkr.enable_reg;
+ else
+ reset_reg = rcg->ns_reg;
+
+ if (rcg->mn.width) {
+ mask = BIT(mn->mnctr_reset_bit);
+ regmap_update_bits(rcg->clkr.regmap, reset_reg, mask, mask);
+
+ regmap_read(rcg->clkr.regmap, rcg->md_reg, &md);
+ md = mn_to_md(mn, f->m, f->n, md);
+ regmap_write(rcg->clkr.regmap, rcg->md_reg, md);
+
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ /* MN counter mode is in hw.enable_reg sometimes */
+ if (rcg->clkr.enable_reg != rcg->ns_reg) {
+ regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
+ ctl = mn_to_reg(mn, f->m, f->n, ctl);
+ regmap_write(rcg->clkr.regmap, rcg->clkr.enable_reg, ctl);
+ } else {
+ ns = mn_to_reg(mn, f->m, f->n, ns);
+ }
+ ns = mn_to_ns(mn, f->m, f->n, ns);
+ } else {
+ regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
+ }
+
+ ns = pre_div_to_ns(&rcg->p, f->pre_div - 1, ns);
+ regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
+
+ regmap_update_bits(rcg->clkr.regmap, reset_reg, mask, 0);
+
+ return 0;
+}
+
+static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
+{
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+ const struct freq_tbl *f;
+
+ f = find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ configure_bank(rcg, f);
+
+ return 0;
+}
+
+static int clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_dyn_rcg_set_rate(hw, rate);
+}
+
+static int clk_dyn_rcg_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_dyn_rcg_set_rate(hw, rate);
+}
+
+const struct clk_ops clk_rcg_ops = {
+ .enable = clk_enable_regmap,
+ .disable = clk_disable_regmap,
+ .get_parent = clk_rcg_get_parent,
+ .set_parent = clk_rcg_set_parent,
+ .recalc_rate = clk_rcg_recalc_rate,
+ .determine_rate = clk_rcg_determine_rate,
+ .set_rate = clk_rcg_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_rcg_ops);
+
+const struct clk_ops clk_dyn_rcg_ops = {
+ .enable = clk_enable_regmap,
+ .is_enabled = clk_is_enabled_regmap,
+ .disable = clk_disable_regmap,
+ .get_parent = clk_dyn_rcg_get_parent,
+ .set_parent = clk_dyn_rcg_set_parent,
+ .recalc_rate = clk_dyn_rcg_recalc_rate,
+ .determine_rate = clk_dyn_rcg_determine_rate,
+ .set_rate = clk_dyn_rcg_set_rate,
+ .set_rate_and_parent = clk_dyn_rcg_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_dyn_rcg_ops);
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
new file mode 100644
index 000000000000..1d6b6dece328
--- /dev/null
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_RCG_H__
+#define __QCOM_CLK_RCG_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct freq_tbl {
+ unsigned long freq;
+ u8 src;
+ u8 pre_div;
+ u16 m;
+ u16 n;
+};
+
+/**
+ * struct mn - M/N:D counter
+ * @mnctr_en_bit: bit to enable mn counter
+ * @mnctr_reset_bit: bit to assert mn counter reset
+ * @mnctr_mode_shift: lowest bit of mn counter mode field
+ * @n_val_shift: lowest bit of n value field
+ * @m_val_shift: lowest bit of m value field
+ * @width: number of bits in m/n/d values
+ * @reset_in_cc: true if the mnctr_reset_bit is in the CC register
+ */
+struct mn {
+ u8 mnctr_en_bit;
+ u8 mnctr_reset_bit;
+ u8 mnctr_mode_shift;
+#define MNCTR_MODE_DUAL 0x2
+#define MNCTR_MODE_MASK 0x3
+ u8 n_val_shift;
+ u8 m_val_shift;
+ u8 width;
+ bool reset_in_cc;
+};
+
+/**
+ * struct pre_div - pre-divider
+ * @pre_div_shift: lowest bit of pre divider field
+ * @pre_div_width: number of bits in predivider
+ */
+struct pre_div {
+ u8 pre_div_shift;
+ u8 pre_div_width;
+};
+
+/**
+ * struct src_sel - source selector
+ * @src_sel_shift: lowest bit of source selection field
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ */
+struct src_sel {
+ u8 src_sel_shift;
+#define SRC_SEL_MASK 0x7
+ const u8 *parent_map;
+};
+
+/**
+ * struct clk_rcg - root clock generator
+ *
+ * @ns_reg: NS register
+ * @md_reg: MD register
+ * @mn: mn counter
+ * @p: pre divider
+ * @s: source selector
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_rcg {
+ u32 ns_reg;
+ u32 md_reg;
+
+ struct mn mn;
+ struct pre_div p;
+ struct src_sel s;
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_rcg_ops;
+
+#define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
+
+/**
+ * struct clk_dyn_rcg - root clock generator with glitch free mux
+ *
+ * @mux_sel_bit: bit to switch glitch free mux
+ * @ns_reg: NS register
+ * @md_reg: MD0 and MD1 register
+ * @mn: mn counter (banked)
+ * @s: source selector (banked)
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_dyn_rcg {
+ u32 ns_reg;
+ u32 md_reg[2];
+
+ u8 mux_sel_bit;
+
+ struct mn mn[2];
+ struct pre_div p[2];
+ struct src_sel s[2];
+
+ const struct freq_tbl *freq_tbl;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_dyn_rcg_ops;
+
+#define to_clk_dyn_rcg(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_dyn_rcg, clkr)
+
+/**
+ * struct clk_rcg2 - root clock generator
+ *
+ * @cmd_rcgr: corresponds to *_CMD_RCGR
+ * @mnd_width: number of bits in m/n/d values
+ * @hid_width: number of bits in half integer divider
+ * @parent_map: map from software's parent index to hardware's src_sel field
+ * @freq_tbl: frequency table
+ * @clkr: regmap clock handle
+ * @lock: register lock
+ *
+ */
+struct clk_rcg2 {
+ u32 cmd_rcgr;
+ u8 mnd_width;
+ u8 hid_width;
+ const u8 *parent_map;
+ const struct freq_tbl *freq_tbl;
+ struct clk_regmap clkr;
+};
+
+#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
+
+extern const struct clk_ops clk_rcg2_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
new file mode 100644
index 000000000000..00f878a04d3f
--- /dev/null
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+
+#include <asm/div64.h>
+
+#include "clk-rcg.h"
+
+#define CMD_REG 0x0
+#define CMD_UPDATE BIT(0)
+#define CMD_ROOT_EN BIT(1)
+#define CMD_DIRTY_CFG BIT(4)
+#define CMD_DIRTY_N BIT(5)
+#define CMD_DIRTY_M BIT(6)
+#define CMD_DIRTY_D BIT(7)
+#define CMD_ROOT_OFF BIT(31)
+
+#define CFG_REG 0x4
+#define CFG_SRC_DIV_SHIFT 0
+#define CFG_SRC_SEL_SHIFT 8
+#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
+#define CFG_MODE_SHIFT 12
+#define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
+#define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
+
+#define M_REG 0x8
+#define N_REG 0xc
+#define D_REG 0x10
+
+static int clk_rcg2_is_enabled(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cmd;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+
+ return (cmd & CMD_ROOT_OFF) != 0;
+}
+
+static u8 clk_rcg2_get_parent(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int num_parents = __clk_get_num_parents(hw->clk);
+ u32 cfg;
+ int i, ret;
+
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ if (ret)
+ return ret;
+
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int update_config(struct clk_rcg2 *rcg)
+{
+ int count, ret;
+ u32 cmd;
+ struct clk_hw *hw = &rcg->clkr.hw;
+ const char *name = __clk_get_name(hw->clk);
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_UPDATE, CMD_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for update to take effect */
+ for (count = 500; count > 0; count--) {
+ ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
+ if (ret)
+ return ret;
+ if (!(cmd & CMD_UPDATE))
+ return 0;
+ udelay(1);
+ }
+
+ WARN(1, "%s: rcg didn't update its configuration.", name);
+ return 0;
+}
+
+static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+ CFG_SRC_SEL_MASK,
+ rcg->parent_map[index] << CFG_SRC_SEL_SHIFT);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+/*
+ * Calculate m/n:d rate
+ *
+ * parent_rate m
+ * rate = ----------- x ---
+ * hid_div n
+ */
+static unsigned long
+calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+{
+ if (hid_div) {
+ rate *= 2;
+ rate /= hid_div + 1;
+ }
+
+ if (mode) {
+ u64 tmp = rate;
+ tmp *= m;
+ do_div(tmp, n);
+ rate = tmp;
+ }
+
+ return rate;
+}
+
+static unsigned long
+clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ if (rcg->mnd_width) {
+ mask = BIT(rcg->mnd_width) - 1;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
+ m &= mask;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
+ n = ~n;
+ n &= mask;
+ n += m;
+ mode = cfg & CFG_MODE_MASK;
+ mode >>= CFG_MODE_SHIFT;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ hid_div = cfg >> CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+
+ return calc_rate(parent_rate, m, n, mode, hid_div);
+}
+
+static const
+struct freq_tbl *find_freq(const struct freq_tbl *f, unsigned long rate)
+{
+ if (!f)
+ return NULL;
+
+ for (; f->freq; f++)
+ if (rate <= f->freq)
+ return f;
+
+ return NULL;
+}
+
+static long _freq_tbl_determine_rate(struct clk_hw *hw,
+ const struct freq_tbl *f, unsigned long rate,
+ unsigned long *p_rate, struct clk **p)
+{
+ unsigned long clk_flags;
+
+ f = find_freq(f, rate);
+ if (!f)
+ return -EINVAL;
+
+ clk_flags = __clk_get_flags(hw->clk);
+ *p = clk_get_parent_by_index(hw->clk, f->src);
+ if (clk_flags & CLK_SET_RATE_PARENT) {
+ if (f->pre_div) {
+ rate /= 2;
+ rate *= f->pre_div + 1;
+ }
+
+ if (f->n) {
+ u64 tmp = rate;
+ tmp = tmp * f->n;
+ do_div(tmp, f->m);
+ rate = tmp;
+ }
+ } else {
+ rate = __clk_get_rate(*p);
+ }
+ *p_rate = rate;
+
+ return f->freq;
+}
+
+static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *p_rate, struct clk **p)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+}
+
+static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f;
+ u32 cfg, mask;
+ int ret;
+
+ f = find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ if (rcg->mnd_width && f->n) {
+ mask = BIT(rcg->mnd_width) - 1;
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG,
+ mask, f->m);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG,
+ mask, ~(f->n - f->m));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + D_REG,
+ mask, ~f->n);
+ if (ret)
+ return ret;
+ }
+
+ mask = BIT(rcg->hid_width) - 1;
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
+ cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
+ if (rcg->mnd_width && f->n)
+ cfg |= CFG_MODE_DUAL_EDGE;
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, mask,
+ cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_set_rate(hw, rate);
+}
+
+static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_set_rate(hw, rate);
+}
+
+const struct clk_ops clk_rcg2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_set_rate,
+ .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_ops);
diff --git a/drivers/clk/qcom/clk-regmap.c b/drivers/clk/qcom/clk-regmap.c
new file mode 100644
index 000000000000..a58ba39a900c
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap.h"
+
+/**
+ * clk_is_enabled_regmap - standard is_enabled() for regmap users
+ *
+ * @hw: clk to operate on
+ *
+ * Clocks that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their struct clk_regmap and then use
+ * this as their is_enabled operation, saving some code.
+ */
+int clk_is_enabled_regmap(struct clk_hw *hw)
+{
+ struct clk_regmap *rclk = to_clk_regmap(hw);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rclk->regmap, rclk->enable_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ if (rclk->enable_is_inverted)
+ return (val & rclk->enable_mask) == 0;
+ else
+ return (val & rclk->enable_mask) != 0;
+}
+EXPORT_SYMBOL_GPL(clk_is_enabled_regmap);
+
+/**
+ * clk_enable_regmap - standard enable() for regmap users
+ *
+ * @hw: clk to operate on
+ *
+ * Clocks that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their struct clk_regmap and then use
+ * this as their enable() operation, saving some code.
+ */
+int clk_enable_regmap(struct clk_hw *hw)
+{
+ struct clk_regmap *rclk = to_clk_regmap(hw);
+ unsigned int val;
+
+ if (rclk->enable_is_inverted)
+ val = 0;
+ else
+ val = rclk->enable_mask;
+
+ return regmap_update_bits(rclk->regmap, rclk->enable_reg,
+ rclk->enable_mask, val);
+}
+EXPORT_SYMBOL_GPL(clk_enable_regmap);
+
+/**
+ * clk_disable_regmap - standard disable() for regmap users
+ *
+ * @hw: clk to operate on
+ *
+ * Clocks that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their struct clk_regmap and then use
+ * this as their disable() operation, saving some code.
+ */
+void clk_disable_regmap(struct clk_hw *hw)
+{
+ struct clk_regmap *rclk = to_clk_regmap(hw);
+ unsigned int val;
+
+ if (rclk->enable_is_inverted)
+ val = rclk->enable_mask;
+ else
+ val = 0;
+
+ regmap_update_bits(rclk->regmap, rclk->enable_reg, rclk->enable_mask,
+ val);
+}
+EXPORT_SYMBOL_GPL(clk_disable_regmap);
+
+/**
+ * devm_clk_register_regmap - register a clk_regmap clock
+ *
+ * @rclk: clk to operate on
+ *
+ * Clocks that use regmap for their register I/O should register their
+ * clk_regmap struct via this function so that the regmap is initialized
+ * and so that the clock is registered with the common clock framework.
+ */
+struct clk *devm_clk_register_regmap(struct device *dev,
+ struct clk_regmap *rclk)
+{
+ if (dev && dev_get_regmap(dev, NULL))
+ rclk->regmap = dev_get_regmap(dev, NULL);
+ else if (dev && dev->parent)
+ rclk->regmap = dev_get_regmap(dev->parent, NULL);
+
+ return devm_clk_register(dev, &rclk->hw);
+}
+EXPORT_SYMBOL_GPL(devm_clk_register_regmap);
diff --git a/drivers/clk/qcom/clk-regmap.h b/drivers/clk/qcom/clk-regmap.h
new file mode 100644
index 000000000000..491a63d537df
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_H__
+#define __QCOM_CLK_REGMAP_H__
+
+#include <linux/clk-provider.h>
+
+struct regmap;
+
+/**
+ * struct clk_regmap - regmap supporting clock
+ * @hw: handle between common and hardware-specific interfaces
+ * @regmap: regmap to use for regmap helpers and/or by providers
+ * @enable_reg: register when using regmap enable/disable ops
+ * @enable_mask: mask when using regmap enable/disable ops
+ * @enable_is_inverted: flag to indicate set enable_mask bits to disable
+ * when using clock_enable_regmap and friends APIs.
+ */
+struct clk_regmap {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int enable_reg;
+ unsigned int enable_mask;
+ bool enable_is_inverted;
+};
+#define to_clk_regmap(_hw) container_of(_hw, struct clk_regmap, hw)
+
+int clk_is_enabled_regmap(struct clk_hw *hw);
+int clk_enable_regmap(struct clk_hw *hw);
+void clk_disable_regmap(struct clk_hw *hw);
+struct clk *
+devm_clk_register_regmap(struct device *dev, struct clk_regmap *rclk);
+
+#endif
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
new file mode 100644
index 000000000000..bc0b7f1fcfbe
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -0,0 +1,2819 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8660.h>
+#include <dt-bindings/reset/qcom,gcc-msm8660.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+static struct clk_pll pll8 = {
+ .l_reg = 0x3144,
+ .m_reg = 0x3148,
+ .n_reg = 0x314c,
+ .config_reg = 0x3154,
+ .mode_reg = 0x3140,
+ .status_reg = 0x3158,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll8",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap pll8_vote = {
+ .enable_reg = 0x34c0,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll8_vote",
+ .parent_names = (const char *[]){ "pll8" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+#define P_PXO 0
+#define P_PLL8 1
+#define P_CXO 2
+
+static const u8 gcc_pxo_pll8_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 3,
+};
+
+static const char *gcc_pxo_pll8[] = {
+ "pxo",
+ "pll8_vote",
+};
+
+static const u8 gcc_pxo_pll8_cxo_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 3,
+ [P_CXO] = 5,
+};
+
+static const char *gcc_pxo_pll8_cxo[] = {
+ "pxo",
+ "pll8_vote",
+ "cxo",
+};
+
+static struct freq_tbl clk_tbl_gsbi_uart[] = {
+ { 1843200, P_PLL8, 2, 6, 625 },
+ { 3686400, P_PLL8, 2, 12, 625 },
+ { 7372800, P_PLL8, 2, 24, 625 },
+ { 14745600, P_PLL8, 2, 48, 625 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 32000000, P_PLL8, 4, 1, 3 },
+ { 40000000, P_PLL8, 1, 5, 48 },
+ { 46400000, P_PLL8, 1, 29, 240 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { 56000000, P_PLL8, 1, 7, 48 },
+ { 58982400, P_PLL8, 1, 96, 625 },
+ { 64000000, P_PLL8, 2, 1, 3 },
+ { }
+};
+
+static struct clk_rcg gsbi1_uart_src = {
+ .ns_reg = 0x29d4,
+ .md_reg = 0x29d0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x29d4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x29d4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi1_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi2_uart_src = {
+ .ns_reg = 0x29f4,
+ .md_reg = 0x29f0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x29f4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x29f4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi2_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi3_uart_src = {
+ .ns_reg = 0x2a14,
+ .md_reg = 0x2a10,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a14,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x2a14,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi3_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi4_uart_src = {
+ .ns_reg = 0x2a34,
+ .md_reg = 0x2a30,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a34,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 26,
+ .clkr = {
+ .enable_reg = 0x2a34,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi4_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi5_uart_src = {
+ .ns_reg = 0x2a54,
+ .md_reg = 0x2a50,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a54,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x2a54,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi5_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi6_uart_src = {
+ .ns_reg = 0x2a74,
+ .md_reg = 0x2a70,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a74,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x2a74,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi6_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi7_uart_src = {
+ .ns_reg = 0x2a94,
+ .md_reg = 0x2a90,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a94,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2a94,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi7_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi8_uart_src = {
+ .ns_reg = 0x2ab4,
+ .md_reg = 0x2ab0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2ab4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x2ab4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_uart_clk",
+ .parent_names = (const char *[]){ "gsbi8_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi9_uart_src = {
+ .ns_reg = 0x2ad4,
+ .md_reg = 0x2ad0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2ad4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x2ad4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_uart_clk",
+ .parent_names = (const char *[]){ "gsbi9_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi10_uart_src = {
+ .ns_reg = 0x2af4,
+ .md_reg = 0x2af0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2af4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x2af4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_uart_clk",
+ .parent_names = (const char *[]){ "gsbi10_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi11_uart_src = {
+ .ns_reg = 0x2b14,
+ .md_reg = 0x2b10,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2b14,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_uart_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 17,
+ .clkr = {
+ .enable_reg = 0x2b14,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_uart_clk",
+ .parent_names = (const char *[]){ "gsbi11_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi12_uart_src = {
+ .ns_reg = 0x2b34,
+ .md_reg = 0x2b30,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2b34,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_uart_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x2b34,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_uart_clk",
+ .parent_names = (const char *[]){ "gsbi12_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_gsbi_qup[] = {
+ { 1100000, P_PXO, 1, 2, 49 },
+ { 5400000, P_PXO, 1, 1, 5 },
+ { 10800000, P_PXO, 1, 2, 5 },
+ { 15060000, P_PLL8, 1, 2, 51 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 25600000, P_PLL8, 1, 1, 15 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { }
+};
+
+static struct clk_rcg gsbi1_qup_src = {
+ .ns_reg = 0x29cc,
+ .md_reg = 0x29c8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x29cc,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x29cc,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_qup_clk",
+ .parent_names = (const char *[]){ "gsbi1_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi2_qup_src = {
+ .ns_reg = 0x29ec,
+ .md_reg = 0x29e8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x29ec,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x29ec,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_qup_clk",
+ .parent_names = (const char *[]){ "gsbi2_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi3_qup_src = {
+ .ns_reg = 0x2a0c,
+ .md_reg = 0x2a08,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a0c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x2a0c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_qup_clk",
+ .parent_names = (const char *[]){ "gsbi3_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi4_qup_src = {
+ .ns_reg = 0x2a2c,
+ .md_reg = 0x2a28,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a2c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 24,
+ .clkr = {
+ .enable_reg = 0x2a2c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_qup_clk",
+ .parent_names = (const char *[]){ "gsbi4_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi5_qup_src = {
+ .ns_reg = 0x2a4c,
+ .md_reg = 0x2a48,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a4c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x2a4c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_qup_clk",
+ .parent_names = (const char *[]){ "gsbi5_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi6_qup_src = {
+ .ns_reg = 0x2a6c,
+ .md_reg = 0x2a68,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a6c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x2a6c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_qup_clk",
+ .parent_names = (const char *[]){ "gsbi6_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi7_qup_src = {
+ .ns_reg = 0x2a8c,
+ .md_reg = 0x2a88,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a8c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x2a8c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_qup_clk",
+ .parent_names = (const char *[]){ "gsbi7_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi8_qup_src = {
+ .ns_reg = 0x2aac,
+ .md_reg = 0x2aa8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2aac,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x2aac,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_qup_clk",
+ .parent_names = (const char *[]){ "gsbi8_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi9_qup_src = {
+ .ns_reg = 0x2acc,
+ .md_reg = 0x2ac8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2acc,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x2acc,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_qup_clk",
+ .parent_names = (const char *[]){ "gsbi9_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi10_qup_src = {
+ .ns_reg = 0x2aec,
+ .md_reg = 0x2ae8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2aec,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x2aec,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_qup_clk",
+ .parent_names = (const char *[]){ "gsbi10_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi11_qup_src = {
+ .ns_reg = 0x2b0c,
+ .md_reg = 0x2b08,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2b0c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_qup_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2b0c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_qup_clk",
+ .parent_names = (const char *[]){ "gsbi11_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi12_qup_src = {
+ .ns_reg = 0x2b2c,
+ .md_reg = 0x2b28,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2b2c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_qup_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2b2c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_qup_clk",
+ .parent_names = (const char *[]){ "gsbi12_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_gp[] = {
+ { 9600000, P_CXO, 2, 0, 0 },
+ { 13500000, P_PXO, 2, 0, 0 },
+ { 19200000, P_CXO, 1, 0, 0 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 64000000, P_PLL8, 2, 1, 3 },
+ { 76800000, P_PLL8, 1, 1, 5 },
+ { 96000000, P_PLL8, 4, 0, 0 },
+ { 128000000, P_PLL8, 3, 0, 0 },
+ { 192000000, P_PLL8, 2, 0, 0 },
+ { }
+};
+
+static struct clk_rcg gp0_src = {
+ .ns_reg = 0x2d24,
+ .md_reg = 0x2d00,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d24,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp0_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2d24,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_clk",
+ .parent_names = (const char *[]){ "gp0_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gp1_src = {
+ .ns_reg = 0x2d44,
+ .md_reg = 0x2d40,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d44,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp1_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x2d44,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_clk",
+ .parent_names = (const char *[]){ "gp1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gp2_src = {
+ .ns_reg = 0x2d64,
+ .md_reg = 0x2d60,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d64,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp2_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp2_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x2d64,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp2_clk",
+ .parent_names = (const char *[]){ "gp2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch pmem_clk = {
+ .hwcg_reg = 0x25a0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x25a0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmem_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_rcg prng_src = {
+ .ns_reg = 0x2e80,
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .clkr.hw = {
+ .init = &(struct clk_init_data){
+ .name = "prng_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch prng_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "prng_clk",
+ .parent_names = (const char *[]){ "prng_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_sdc[] = {
+ { 144000, P_PXO, 3, 2, 125 },
+ { 400000, P_PLL8, 4, 1, 240 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 17070000, P_PLL8, 1, 2, 45 },
+ { 20210000, P_PLL8, 1, 1, 19 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { }
+};
+
+static struct clk_rcg sdc1_src = {
+ .ns_reg = 0x282c,
+ .md_reg = 0x2828,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x282c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc1_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x282c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_clk",
+ .parent_names = (const char *[]){ "sdc1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc2_src = {
+ .ns_reg = 0x284c,
+ .md_reg = 0x2848,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x284c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc2_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x284c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_clk",
+ .parent_names = (const char *[]){ "sdc2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc3_src = {
+ .ns_reg = 0x286c,
+ .md_reg = 0x2868,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x286c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc3_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x286c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_clk",
+ .parent_names = (const char *[]){ "sdc3_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc4_src = {
+ .ns_reg = 0x288c,
+ .md_reg = 0x2888,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x288c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc4_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x288c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_clk",
+ .parent_names = (const char *[]){ "sdc4_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc5_src = {
+ .ns_reg = 0x28ac,
+ .md_reg = 0x28a8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x28ac,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc5_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x28ac,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_clk",
+ .parent_names = (const char *[]){ "sdc5_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_tsif_ref[] = {
+ { 105000, P_PXO, 1, 1, 256 },
+ { }
+};
+
+static struct clk_rcg tsif_ref_src = {
+ .ns_reg = 0x2710,
+ .md_reg = 0x270c,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_tsif_ref,
+ .clkr = {
+ .enable_reg = 0x2710,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch tsif_ref_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x2710,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_clk",
+ .parent_names = (const char *[]){ "tsif_ref_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_usb[] = {
+ { 60000000, P_PLL8, 1, 5, 32 },
+ { }
+};
+
+static struct clk_rcg usb_hs1_xcvr_src = {
+ .ns_reg = 0x290c,
+ .md_reg = 0x2908,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x290c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_xcvr_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch usb_hs1_xcvr_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x290c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_xcvr_clk",
+ .parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg usb_fs1_xcvr_fs_src = {
+ .ns_reg = 0x2968,
+ .md_reg = 0x2964,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x2968,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_xcvr_fs_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static const char *usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
+
+static struct clk_branch usb_fs1_xcvr_fs_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2968,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_xcvr_fs_clk",
+ .parent_names = usb_fs1_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs1_system_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x296c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = usb_fs1_xcvr_fs_src_p,
+ .num_parents = 1,
+ .name = "usb_fs1_system_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg usb_fs2_xcvr_fs_src = {
+ .ns_reg = 0x2988,
+ .md_reg = 0x2984,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x2988,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_xcvr_fs_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static const char *usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
+
+static struct clk_branch usb_fs2_xcvr_fs_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x2988,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_xcvr_fs_clk",
+ .parent_names = usb_fs2_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs2_system_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x298c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_system_clk",
+ .parent_names = usb_fs2_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x29c0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x29e0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x2a00,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 27,
+ .clkr = {
+ .enable_reg = 0x2a20,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x2a40,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x2a60,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2a80,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2aa0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2ac0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_h_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x2ae0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_h_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x2b00,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_h_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2b20,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch tsif_h_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2700,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs1_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 17,
+ .clkr = {
+ .enable_reg = 0x2960,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs2_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2980,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_hs1_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2900,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc1_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2820,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc2_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x2840,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc3_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x2860,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc4_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x2880,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc5_h_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x28a0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm0_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm0_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm0_pbus_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm0_pbus_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm1_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_bit = 12,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm1_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm1_pbus_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_bit = 11,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm1_pbus_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch modem_ahb1_h_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_bit = 8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "modem_ahb1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch modem_ahb2_h_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_bit = 7,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "modem_ahb2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_arb0_h_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_arb0_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_arb1_h_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 21,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_arb1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_ssbi2_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_ssbi2_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch rpm_msg_ram_h_clk = {
+ .hwcg_reg = 0x27e0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "rpm_msg_ram_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_msm8660_clks[] = {
+ [PLL8] = &pll8.clkr,
+ [PLL8_VOTE] = &pll8_vote,
+ [GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
+ [GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
+ [GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
+ [GSBI2_UART_CLK] = &gsbi2_uart_clk.clkr,
+ [GSBI3_UART_SRC] = &gsbi3_uart_src.clkr,
+ [GSBI3_UART_CLK] = &gsbi3_uart_clk.clkr,
+ [GSBI4_UART_SRC] = &gsbi4_uart_src.clkr,
+ [GSBI4_UART_CLK] = &gsbi4_uart_clk.clkr,
+ [GSBI5_UART_SRC] = &gsbi5_uart_src.clkr,
+ [GSBI5_UART_CLK] = &gsbi5_uart_clk.clkr,
+ [GSBI6_UART_SRC] = &gsbi6_uart_src.clkr,
+ [GSBI6_UART_CLK] = &gsbi6_uart_clk.clkr,
+ [GSBI7_UART_SRC] = &gsbi7_uart_src.clkr,
+ [GSBI7_UART_CLK] = &gsbi7_uart_clk.clkr,
+ [GSBI8_UART_SRC] = &gsbi8_uart_src.clkr,
+ [GSBI8_UART_CLK] = &gsbi8_uart_clk.clkr,
+ [GSBI9_UART_SRC] = &gsbi9_uart_src.clkr,
+ [GSBI9_UART_CLK] = &gsbi9_uart_clk.clkr,
+ [GSBI10_UART_SRC] = &gsbi10_uart_src.clkr,
+ [GSBI10_UART_CLK] = &gsbi10_uart_clk.clkr,
+ [GSBI11_UART_SRC] = &gsbi11_uart_src.clkr,
+ [GSBI11_UART_CLK] = &gsbi11_uart_clk.clkr,
+ [GSBI12_UART_SRC] = &gsbi12_uart_src.clkr,
+ [GSBI12_UART_CLK] = &gsbi12_uart_clk.clkr,
+ [GSBI1_QUP_SRC] = &gsbi1_qup_src.clkr,
+ [GSBI1_QUP_CLK] = &gsbi1_qup_clk.clkr,
+ [GSBI2_QUP_SRC] = &gsbi2_qup_src.clkr,
+ [GSBI2_QUP_CLK] = &gsbi2_qup_clk.clkr,
+ [GSBI3_QUP_SRC] = &gsbi3_qup_src.clkr,
+ [GSBI3_QUP_CLK] = &gsbi3_qup_clk.clkr,
+ [GSBI4_QUP_SRC] = &gsbi4_qup_src.clkr,
+ [GSBI4_QUP_CLK] = &gsbi4_qup_clk.clkr,
+ [GSBI5_QUP_SRC] = &gsbi5_qup_src.clkr,
+ [GSBI5_QUP_CLK] = &gsbi5_qup_clk.clkr,
+ [GSBI6_QUP_SRC] = &gsbi6_qup_src.clkr,
+ [GSBI6_QUP_CLK] = &gsbi6_qup_clk.clkr,
+ [GSBI7_QUP_SRC] = &gsbi7_qup_src.clkr,
+ [GSBI7_QUP_CLK] = &gsbi7_qup_clk.clkr,
+ [GSBI8_QUP_SRC] = &gsbi8_qup_src.clkr,
+ [GSBI8_QUP_CLK] = &gsbi8_qup_clk.clkr,
+ [GSBI9_QUP_SRC] = &gsbi9_qup_src.clkr,
+ [GSBI9_QUP_CLK] = &gsbi9_qup_clk.clkr,
+ [GSBI10_QUP_SRC] = &gsbi10_qup_src.clkr,
+ [GSBI10_QUP_CLK] = &gsbi10_qup_clk.clkr,
+ [GSBI11_QUP_SRC] = &gsbi11_qup_src.clkr,
+ [GSBI11_QUP_CLK] = &gsbi11_qup_clk.clkr,
+ [GSBI12_QUP_SRC] = &gsbi12_qup_src.clkr,
+ [GSBI12_QUP_CLK] = &gsbi12_qup_clk.clkr,
+ [GP0_SRC] = &gp0_src.clkr,
+ [GP0_CLK] = &gp0_clk.clkr,
+ [GP1_SRC] = &gp1_src.clkr,
+ [GP1_CLK] = &gp1_clk.clkr,
+ [GP2_SRC] = &gp2_src.clkr,
+ [GP2_CLK] = &gp2_clk.clkr,
+ [PMEM_CLK] = &pmem_clk.clkr,
+ [PRNG_SRC] = &prng_src.clkr,
+ [PRNG_CLK] = &prng_clk.clkr,
+ [SDC1_SRC] = &sdc1_src.clkr,
+ [SDC1_CLK] = &sdc1_clk.clkr,
+ [SDC2_SRC] = &sdc2_src.clkr,
+ [SDC2_CLK] = &sdc2_clk.clkr,
+ [SDC3_SRC] = &sdc3_src.clkr,
+ [SDC3_CLK] = &sdc3_clk.clkr,
+ [SDC4_SRC] = &sdc4_src.clkr,
+ [SDC4_CLK] = &sdc4_clk.clkr,
+ [SDC5_SRC] = &sdc5_src.clkr,
+ [SDC5_CLK] = &sdc5_clk.clkr,
+ [TSIF_REF_SRC] = &tsif_ref_src.clkr,
+ [TSIF_REF_CLK] = &tsif_ref_clk.clkr,
+ [USB_HS1_XCVR_SRC] = &usb_hs1_xcvr_src.clkr,
+ [USB_HS1_XCVR_CLK] = &usb_hs1_xcvr_clk.clkr,
+ [USB_FS1_XCVR_FS_SRC] = &usb_fs1_xcvr_fs_src.clkr,
+ [USB_FS1_XCVR_FS_CLK] = &usb_fs1_xcvr_fs_clk.clkr,
+ [USB_FS1_SYSTEM_CLK] = &usb_fs1_system_clk.clkr,
+ [USB_FS2_XCVR_FS_SRC] = &usb_fs2_xcvr_fs_src.clkr,
+ [USB_FS2_XCVR_FS_CLK] = &usb_fs2_xcvr_fs_clk.clkr,
+ [USB_FS2_SYSTEM_CLK] = &usb_fs2_system_clk.clkr,
+ [GSBI1_H_CLK] = &gsbi1_h_clk.clkr,
+ [GSBI2_H_CLK] = &gsbi2_h_clk.clkr,
+ [GSBI3_H_CLK] = &gsbi3_h_clk.clkr,
+ [GSBI4_H_CLK] = &gsbi4_h_clk.clkr,
+ [GSBI5_H_CLK] = &gsbi5_h_clk.clkr,
+ [GSBI6_H_CLK] = &gsbi6_h_clk.clkr,
+ [GSBI7_H_CLK] = &gsbi7_h_clk.clkr,
+ [GSBI8_H_CLK] = &gsbi8_h_clk.clkr,
+ [GSBI9_H_CLK] = &gsbi9_h_clk.clkr,
+ [GSBI10_H_CLK] = &gsbi10_h_clk.clkr,
+ [GSBI11_H_CLK] = &gsbi11_h_clk.clkr,
+ [GSBI12_H_CLK] = &gsbi12_h_clk.clkr,
+ [TSIF_H_CLK] = &tsif_h_clk.clkr,
+ [USB_FS1_H_CLK] = &usb_fs1_h_clk.clkr,
+ [USB_FS2_H_CLK] = &usb_fs2_h_clk.clkr,
+ [USB_HS1_H_CLK] = &usb_hs1_h_clk.clkr,
+ [SDC1_H_CLK] = &sdc1_h_clk.clkr,
+ [SDC2_H_CLK] = &sdc2_h_clk.clkr,
+ [SDC3_H_CLK] = &sdc3_h_clk.clkr,
+ [SDC4_H_CLK] = &sdc4_h_clk.clkr,
+ [SDC5_H_CLK] = &sdc5_h_clk.clkr,
+ [ADM0_CLK] = &adm0_clk.clkr,
+ [ADM0_PBUS_CLK] = &adm0_pbus_clk.clkr,
+ [ADM1_CLK] = &adm1_clk.clkr,
+ [ADM1_PBUS_CLK] = &adm1_pbus_clk.clkr,
+ [MODEM_AHB1_H_CLK] = &modem_ahb1_h_clk.clkr,
+ [MODEM_AHB2_H_CLK] = &modem_ahb2_h_clk.clkr,
+ [PMIC_ARB0_H_CLK] = &pmic_arb0_h_clk.clkr,
+ [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
+ [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
+ [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_msm8660_resets[] = {
+ [AFAB_CORE_RESET] = { 0x2080, 7 },
+ [SCSS_SYS_RESET] = { 0x20b4, 1 },
+ [SCSS_SYS_POR_RESET] = { 0x20b4 },
+ [AFAB_SMPSS_S_RESET] = { 0x20b8, 2 },
+ [AFAB_SMPSS_M1_RESET] = { 0x20b8, 1 },
+ [AFAB_SMPSS_M0_RESET] = { 0x20b8 },
+ [AFAB_EBI1_S_RESET] = { 0x20c0, 7 },
+ [SFAB_CORE_RESET] = { 0x2120, 7 },
+ [SFAB_ADM0_M0_RESET] = { 0x21e0, 7 },
+ [SFAB_ADM0_M1_RESET] = { 0x21e4, 7 },
+ [SFAB_ADM0_M2_RESET] = { 0x21e4, 7 },
+ [ADM0_C2_RESET] = { 0x220c, 4 },
+ [ADM0_C1_RESET] = { 0x220c, 3 },
+ [ADM0_C0_RESET] = { 0x220c, 2 },
+ [ADM0_PBUS_RESET] = { 0x220c, 1 },
+ [ADM0_RESET] = { 0x220c },
+ [SFAB_ADM1_M0_RESET] = { 0x2220, 7 },
+ [SFAB_ADM1_M1_RESET] = { 0x2224, 7 },
+ [SFAB_ADM1_M2_RESET] = { 0x2228, 7 },
+ [MMFAB_ADM1_M3_RESET] = { 0x2240, 7 },
+ [ADM1_C3_RESET] = { 0x226c, 5 },
+ [ADM1_C2_RESET] = { 0x226c, 4 },
+ [ADM1_C1_RESET] = { 0x226c, 3 },
+ [ADM1_C0_RESET] = { 0x226c, 2 },
+ [ADM1_PBUS_RESET] = { 0x226c, 1 },
+ [ADM1_RESET] = { 0x226c },
+ [IMEM0_RESET] = { 0x2280, 7 },
+ [SFAB_LPASS_Q6_RESET] = { 0x23a0, 7 },
+ [SFAB_AFAB_M_RESET] = { 0x23e0, 7 },
+ [AFAB_SFAB_M0_RESET] = { 0x2420, 7 },
+ [AFAB_SFAB_M1_RESET] = { 0x2424, 7 },
+ [DFAB_CORE_RESET] = { 0x24ac, 7 },
+ [SFAB_DFAB_M_RESET] = { 0x2500, 7 },
+ [DFAB_SFAB_M_RESET] = { 0x2520, 7 },
+ [DFAB_SWAY0_RESET] = { 0x2540, 7 },
+ [DFAB_SWAY1_RESET] = { 0x2544, 7 },
+ [DFAB_ARB0_RESET] = { 0x2560, 7 },
+ [DFAB_ARB1_RESET] = { 0x2564, 7 },
+ [PPSS_PROC_RESET] = { 0x2594, 1 },
+ [PPSS_RESET] = { 0x2594 },
+ [PMEM_RESET] = { 0x25a0, 7 },
+ [DMA_BAM_RESET] = { 0x25c0, 7 },
+ [SIC_RESET] = { 0x25e0, 7 },
+ [SPS_TIC_RESET] = { 0x2600, 7 },
+ [CFBP0_RESET] = { 0x2650, 7 },
+ [CFBP1_RESET] = { 0x2654, 7 },
+ [CFBP2_RESET] = { 0x2658, 7 },
+ [EBI2_RESET] = { 0x2664, 7 },
+ [SFAB_CFPB_M_RESET] = { 0x2680, 7 },
+ [CFPB_MASTER_RESET] = { 0x26a0, 7 },
+ [SFAB_CFPB_S_RESET] = { 0x26c0, 7 },
+ [CFPB_SPLITTER_RESET] = { 0x26e0, 7 },
+ [TSIF_RESET] = { 0x2700, 7 },
+ [CE1_RESET] = { 0x2720, 7 },
+ [CE2_RESET] = { 0x2740, 7 },
+ [SFAB_SFPB_M_RESET] = { 0x2780, 7 },
+ [SFAB_SFPB_S_RESET] = { 0x27a0, 7 },
+ [RPM_PROC_RESET] = { 0x27c0, 7 },
+ [RPM_BUS_RESET] = { 0x27c4, 7 },
+ [RPM_MSG_RAM_RESET] = { 0x27e0, 7 },
+ [PMIC_ARB0_RESET] = { 0x2800, 7 },
+ [PMIC_ARB1_RESET] = { 0x2804, 7 },
+ [PMIC_SSBI2_RESET] = { 0x280c, 12 },
+ [SDC1_RESET] = { 0x2830 },
+ [SDC2_RESET] = { 0x2850 },
+ [SDC3_RESET] = { 0x2870 },
+ [SDC4_RESET] = { 0x2890 },
+ [SDC5_RESET] = { 0x28b0 },
+ [USB_HS1_RESET] = { 0x2910 },
+ [USB_HS2_XCVR_RESET] = { 0x2934, 1 },
+ [USB_HS2_RESET] = { 0x2934 },
+ [USB_FS1_XCVR_RESET] = { 0x2974, 1 },
+ [USB_FS1_RESET] = { 0x2974 },
+ [USB_FS2_XCVR_RESET] = { 0x2994, 1 },
+ [USB_FS2_RESET] = { 0x2994 },
+ [GSBI1_RESET] = { 0x29dc },
+ [GSBI2_RESET] = { 0x29fc },
+ [GSBI3_RESET] = { 0x2a1c },
+ [GSBI4_RESET] = { 0x2a3c },
+ [GSBI5_RESET] = { 0x2a5c },
+ [GSBI6_RESET] = { 0x2a7c },
+ [GSBI7_RESET] = { 0x2a9c },
+ [GSBI8_RESET] = { 0x2abc },
+ [GSBI9_RESET] = { 0x2adc },
+ [GSBI10_RESET] = { 0x2afc },
+ [GSBI11_RESET] = { 0x2b1c },
+ [GSBI12_RESET] = { 0x2b3c },
+ [SPDM_RESET] = { 0x2b6c },
+ [SEC_CTRL_RESET] = { 0x2b80, 7 },
+ [TLMM_H_RESET] = { 0x2ba0, 7 },
+ [TLMM_RESET] = { 0x2ba4, 7 },
+ [MARRM_PWRON_RESET] = { 0x2bd4, 1 },
+ [MARM_RESET] = { 0x2bd4 },
+ [MAHB1_RESET] = { 0x2be4, 7 },
+ [SFAB_MSS_S_RESET] = { 0x2c00, 7 },
+ [MAHB2_RESET] = { 0x2c20, 7 },
+ [MODEM_SW_AHB_RESET] = { 0x2c48, 1 },
+ [MODEM_RESET] = { 0x2c48 },
+ [SFAB_MSS_MDM1_RESET] = { 0x2c4c, 1 },
+ [SFAB_MSS_MDM0_RESET] = { 0x2c4c },
+ [MSS_SLP_RESET] = { 0x2c60, 7 },
+ [MSS_MARM_SAW_RESET] = { 0x2c68, 1 },
+ [MSS_WDOG_RESET] = { 0x2c68 },
+ [TSSC_RESET] = { 0x2ca0, 7 },
+ [PDM_RESET] = { 0x2cc0, 12 },
+ [SCSS_CORE0_RESET] = { 0x2d60, 1 },
+ [SCSS_CORE0_POR_RESET] = { 0x2d60 },
+ [SCSS_CORE1_RESET] = { 0x2d80, 1 },
+ [SCSS_CORE1_POR_RESET] = { 0x2d80 },
+ [MPM_RESET] = { 0x2da4, 1 },
+ [EBI1_1X_DIV_RESET] = { 0x2dec, 9 },
+ [EBI1_RESET] = { 0x2dec, 7 },
+ [SFAB_SMPSS_S_RESET] = { 0x2e00, 7 },
+ [USB_PHY0_RESET] = { 0x2e20 },
+ [USB_PHY1_RESET] = { 0x2e40 },
+ [PRNG_RESET] = { 0x2e80, 12 },
+};
+
+static const struct regmap_config gcc_msm8660_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x363c,
+ .fast_io = true,
+};
+
+static const struct of_device_id gcc_msm8660_match_table[] = {
+ { .compatible = "qcom,gcc-msm8660" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8660_match_table);
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+static int gcc_msm8660_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct clk_onecell_data *data;
+ struct clk **clks;
+ struct regmap *regmap;
+ size_t num_clks;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &gcc_msm8660_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ num_clks = ARRAY_SIZE(gcc_msm8660_clks);
+ cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ /* Temporary until RPM clocks supported */
+ clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 19200000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk = clk_register_fixed_rate(dev, "pxo", NULL, CLK_IS_ROOT, 27000000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ for (i = 0; i < num_clks; i++) {
+ if (!gcc_msm8660_clks[i])
+ continue;
+ clk = devm_clk_register_regmap(dev, gcc_msm8660_clks[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret)
+ return ret;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops,
+ reset->rcdev.owner = THIS_MODULE,
+ reset->rcdev.nr_resets = ARRAY_SIZE(gcc_msm8660_resets),
+ reset->regmap = regmap;
+ reset->reset_map = gcc_msm8660_resets,
+ platform_set_drvdata(pdev, &reset->rcdev);
+
+ ret = reset_controller_register(&reset->rcdev);
+ if (ret)
+ of_clk_del_provider(dev->of_node);
+
+ return ret;
+}
+
+static int gcc_msm8660_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ reset_controller_unregister(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver gcc_msm8660_driver = {
+ .probe = gcc_msm8660_probe,
+ .remove = gcc_msm8660_remove,
+ .driver = {
+ .name = "gcc-msm8660",
+ .owner = THIS_MODULE,
+ .of_match_table = gcc_msm8660_match_table,
+ },
+};
+
+static int __init gcc_msm8660_init(void)
+{
+ return platform_driver_register(&gcc_msm8660_driver);
+}
+core_initcall(gcc_msm8660_init);
+
+static void __exit gcc_msm8660_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8660_driver);
+}
+module_exit(gcc_msm8660_exit);
+
+MODULE_DESCRIPTION("GCC MSM 8660 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8660");
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
new file mode 100644
index 000000000000..fd446ab2fd98
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -0,0 +1,2993 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8960.h>
+#include <dt-bindings/reset/qcom,gcc-msm8960.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+static struct clk_pll pll3 = {
+ .l_reg = 0x3164,
+ .m_reg = 0x3168,
+ .n_reg = 0x316c,
+ .config_reg = 0x3174,
+ .mode_reg = 0x3160,
+ .status_reg = 0x3178,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll3",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_pll pll8 = {
+ .l_reg = 0x3144,
+ .m_reg = 0x3148,
+ .n_reg = 0x314c,
+ .config_reg = 0x3154,
+ .mode_reg = 0x3140,
+ .status_reg = 0x3158,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll8",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap pll8_vote = {
+ .enable_reg = 0x34c0,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll8_vote",
+ .parent_names = (const char *[]){ "pll8" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll pll14 = {
+ .l_reg = 0x31c4,
+ .m_reg = 0x31c8,
+ .n_reg = 0x31cc,
+ .config_reg = 0x31d4,
+ .mode_reg = 0x31c0,
+ .status_reg = 0x31d8,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll14",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap pll14_vote = {
+ .enable_reg = 0x34c0,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll14_vote",
+ .parent_names = (const char *[]){ "pll14" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+#define P_PXO 0
+#define P_PLL8 1
+#define P_CXO 2
+
+static const u8 gcc_pxo_pll8_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 3,
+};
+
+static const char *gcc_pxo_pll8[] = {
+ "pxo",
+ "pll8_vote",
+};
+
+static const u8 gcc_pxo_pll8_cxo_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 3,
+ [P_CXO] = 5,
+};
+
+static const char *gcc_pxo_pll8_cxo[] = {
+ "pxo",
+ "pll8_vote",
+ "cxo",
+};
+
+static struct freq_tbl clk_tbl_gsbi_uart[] = {
+ { 1843200, P_PLL8, 2, 6, 625 },
+ { 3686400, P_PLL8, 2, 12, 625 },
+ { 7372800, P_PLL8, 2, 24, 625 },
+ { 14745600, P_PLL8, 2, 48, 625 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 32000000, P_PLL8, 4, 1, 3 },
+ { 40000000, P_PLL8, 1, 5, 48 },
+ { 46400000, P_PLL8, 1, 29, 240 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { 56000000, P_PLL8, 1, 7, 48 },
+ { 58982400, P_PLL8, 1, 96, 625 },
+ { 64000000, P_PLL8, 2, 1, 3 },
+ { }
+};
+
+static struct clk_rcg gsbi1_uart_src = {
+ .ns_reg = 0x29d4,
+ .md_reg = 0x29d0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x29d4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x29d4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi1_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi2_uart_src = {
+ .ns_reg = 0x29f4,
+ .md_reg = 0x29f0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x29f4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x29f4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi2_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi3_uart_src = {
+ .ns_reg = 0x2a14,
+ .md_reg = 0x2a10,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a14,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_uart_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x2a14,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi3_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi4_uart_src = {
+ .ns_reg = 0x2a34,
+ .md_reg = 0x2a30,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a34,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 26,
+ .clkr = {
+ .enable_reg = 0x2a34,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi4_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi5_uart_src = {
+ .ns_reg = 0x2a54,
+ .md_reg = 0x2a50,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a54,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x2a54,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi5_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi6_uart_src = {
+ .ns_reg = 0x2a74,
+ .md_reg = 0x2a70,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a74,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x2a74,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi6_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi7_uart_src = {
+ .ns_reg = 0x2a94,
+ .md_reg = 0x2a90,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2a94,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2a94,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_uart_clk",
+ .parent_names = (const char *[]){
+ "gsbi7_uart_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi8_uart_src = {
+ .ns_reg = 0x2ab4,
+ .md_reg = 0x2ab0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2ab4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x2ab4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_uart_clk",
+ .parent_names = (const char *[]){ "gsbi8_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi9_uart_src = {
+ .ns_reg = 0x2ad4,
+ .md_reg = 0x2ad0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2ad4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x2ad4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_uart_clk",
+ .parent_names = (const char *[]){ "gsbi9_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi10_uart_src = {
+ .ns_reg = 0x2af4,
+ .md_reg = 0x2af0,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2af4,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_uart_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x2af4,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_uart_clk",
+ .parent_names = (const char *[]){ "gsbi10_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi11_uart_src = {
+ .ns_reg = 0x2b14,
+ .md_reg = 0x2b10,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2b14,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_uart_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 17,
+ .clkr = {
+ .enable_reg = 0x2b14,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_uart_clk",
+ .parent_names = (const char *[]){ "gsbi11_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi12_uart_src = {
+ .ns_reg = 0x2b34,
+ .md_reg = 0x2b30,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_uart,
+ .clkr = {
+ .enable_reg = 0x2b34,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_uart_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_uart_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x2b34,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_uart_clk",
+ .parent_names = (const char *[]){ "gsbi12_uart_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_gsbi_qup[] = {
+ { 1100000, P_PXO, 1, 2, 49 },
+ { 5400000, P_PXO, 1, 1, 5 },
+ { 10800000, P_PXO, 1, 2, 5 },
+ { 15060000, P_PLL8, 1, 2, 51 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 25600000, P_PLL8, 1, 1, 15 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { }
+};
+
+static struct clk_rcg gsbi1_qup_src = {
+ .ns_reg = 0x29cc,
+ .md_reg = 0x29c8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x29cc,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x29cc,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_qup_clk",
+ .parent_names = (const char *[]){ "gsbi1_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi2_qup_src = {
+ .ns_reg = 0x29ec,
+ .md_reg = 0x29e8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x29ec,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x29ec,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_qup_clk",
+ .parent_names = (const char *[]){ "gsbi2_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi3_qup_src = {
+ .ns_reg = 0x2a0c,
+ .md_reg = 0x2a08,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a0c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_qup_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x2a0c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_qup_clk",
+ .parent_names = (const char *[]){ "gsbi3_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi4_qup_src = {
+ .ns_reg = 0x2a2c,
+ .md_reg = 0x2a28,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a2c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 24,
+ .clkr = {
+ .enable_reg = 0x2a2c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_qup_clk",
+ .parent_names = (const char *[]){ "gsbi4_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi5_qup_src = {
+ .ns_reg = 0x2a4c,
+ .md_reg = 0x2a48,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a4c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x2a4c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_qup_clk",
+ .parent_names = (const char *[]){ "gsbi5_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi6_qup_src = {
+ .ns_reg = 0x2a6c,
+ .md_reg = 0x2a68,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a6c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x2a6c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_qup_clk",
+ .parent_names = (const char *[]){ "gsbi6_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi7_qup_src = {
+ .ns_reg = 0x2a8c,
+ .md_reg = 0x2a88,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2a8c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x2a8c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_qup_clk",
+ .parent_names = (const char *[]){ "gsbi7_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi8_qup_src = {
+ .ns_reg = 0x2aac,
+ .md_reg = 0x2aa8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2aac,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x2aac,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_qup_clk",
+ .parent_names = (const char *[]){ "gsbi8_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi9_qup_src = {
+ .ns_reg = 0x2acc,
+ .md_reg = 0x2ac8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2acc,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x2acc,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_qup_clk",
+ .parent_names = (const char *[]){ "gsbi9_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi10_qup_src = {
+ .ns_reg = 0x2aec,
+ .md_reg = 0x2ae8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2aec,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_qup_clk = {
+ .halt_reg = 0x2fd0,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x2aec,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_qup_clk",
+ .parent_names = (const char *[]){ "gsbi10_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi11_qup_src = {
+ .ns_reg = 0x2b0c,
+ .md_reg = 0x2b08,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2b0c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_qup_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2b0c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_qup_clk",
+ .parent_names = (const char *[]){ "gsbi11_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gsbi12_qup_src = {
+ .ns_reg = 0x2b2c,
+ .md_reg = 0x2b28,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_gsbi_qup,
+ .clkr = {
+ .enable_reg = 0x2b2c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_qup_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_qup_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2b2c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_qup_clk",
+ .parent_names = (const char *[]){ "gsbi12_qup_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_gp[] = {
+ { 9600000, P_CXO, 2, 0, 0 },
+ { 13500000, P_PXO, 2, 0, 0 },
+ { 19200000, P_CXO, 1, 0, 0 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 64000000, P_PLL8, 2, 1, 3 },
+ { 76800000, P_PLL8, 1, 1, 5 },
+ { 96000000, P_PLL8, 4, 0, 0 },
+ { 128000000, P_PLL8, 3, 0, 0 },
+ { 192000000, P_PLL8, 2, 0, 0 },
+ { }
+};
+
+static struct clk_rcg gp0_src = {
+ .ns_reg = 0x2d24,
+ .md_reg = 0x2d00,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d24,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_PARENT_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp0_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2d24,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_clk",
+ .parent_names = (const char *[]){ "gp0_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gp1_src = {
+ .ns_reg = 0x2d44,
+ .md_reg = 0x2d40,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d44,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp1_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x2d44,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_clk",
+ .parent_names = (const char *[]){ "gp1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg gp2_src = {
+ .ns_reg = 0x2d64,
+ .md_reg = 0x2d60,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_cxo_map,
+ },
+ .freq_tbl = clk_tbl_gp,
+ .clkr = {
+ .enable_reg = 0x2d64,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp2_src",
+ .parent_names = gcc_pxo_pll8_cxo,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch gp2_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x2d64,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gp2_clk",
+ .parent_names = (const char *[]){ "gp2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch pmem_clk = {
+ .hwcg_reg = 0x25a0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x25a0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmem_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_rcg prng_src = {
+ .ns_reg = 0x2e80,
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "prng_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch prng_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "prng_clk",
+ .parent_names = (const char *[]){ "prng_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_sdc[] = {
+ { 144000, P_PXO, 3, 2, 125 },
+ { 400000, P_PLL8, 4, 1, 240 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 17070000, P_PLL8, 1, 2, 45 },
+ { 20210000, P_PLL8, 1, 1, 19 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 64000000, P_PLL8, 3, 1, 2 },
+ { 96000000, P_PLL8, 4, 0, 0 },
+ { 192000000, P_PLL8, 2, 0, 0 },
+ { }
+};
+
+static struct clk_rcg sdc1_src = {
+ .ns_reg = 0x282c,
+ .md_reg = 0x2828,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x282c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc1_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x282c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_clk",
+ .parent_names = (const char *[]){ "sdc1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc2_src = {
+ .ns_reg = 0x284c,
+ .md_reg = 0x2848,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x284c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc2_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x284c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_clk",
+ .parent_names = (const char *[]){ "sdc2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc3_src = {
+ .ns_reg = 0x286c,
+ .md_reg = 0x2868,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x286c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc3_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x286c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_clk",
+ .parent_names = (const char *[]){ "sdc3_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc4_src = {
+ .ns_reg = 0x288c,
+ .md_reg = 0x2888,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x288c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc4_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x288c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_clk",
+ .parent_names = (const char *[]){ "sdc4_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg sdc5_src = {
+ .ns_reg = 0x28ac,
+ .md_reg = 0x28a8,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_sdc,
+ .clkr = {
+ .enable_reg = 0x28ac,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch sdc5_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x28ac,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_clk",
+ .parent_names = (const char *[]){ "sdc5_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_tsif_ref[] = {
+ { 105000, P_PXO, 1, 1, 256 },
+ { }
+};
+
+static struct clk_rcg tsif_ref_src = {
+ .ns_reg = 0x2710,
+ .md_reg = 0x270c,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 16,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_tsif_ref,
+ .clkr = {
+ .enable_reg = 0x2710,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch tsif_ref_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x2710,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_clk",
+ .parent_names = (const char *[]){ "tsif_ref_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_usb[] = {
+ { 60000000, P_PLL8, 1, 5, 32 },
+ { }
+};
+
+static struct clk_rcg usb_hs1_xcvr_src = {
+ .ns_reg = 0x290c,
+ .md_reg = 0x2908,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x290c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_xcvr_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static struct clk_branch usb_hs1_xcvr_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x290c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_xcvr_clk",
+ .parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg usb_hsic_xcvr_fs_src = {
+ .ns_reg = 0x2928,
+ .md_reg = 0x2924,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x2928,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_xcvr_fs_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static const char *usb_hsic_xcvr_fs_src_p[] = { "usb_hsic_xcvr_fs_src" };
+
+static struct clk_branch usb_hsic_xcvr_fs_clk = {
+ .halt_reg = 0x2fc8,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x2928,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_xcvr_fs_clk",
+ .parent_names = usb_hsic_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_hsic_system_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 24,
+ .clkr = {
+ .enable_reg = 0x292c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = usb_hsic_xcvr_fs_src_p,
+ .num_parents = 1,
+ .name = "usb_hsic_system_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_hsic_hsic_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x2b44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pll14_vote" },
+ .num_parents = 1,
+ .name = "usb_hsic_hsic_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch usb_hsic_hsio_cal_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x2b48,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_hsio_cal_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_rcg usb_fs1_xcvr_fs_src = {
+ .ns_reg = 0x2968,
+ .md_reg = 0x2964,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x2968,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_xcvr_fs_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static const char *usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
+
+static struct clk_branch usb_fs1_xcvr_fs_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2968,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_xcvr_fs_clk",
+ .parent_names = usb_fs1_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs1_system_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x296c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = usb_fs1_xcvr_fs_src_p,
+ .num_parents = 1,
+ .name = "usb_fs1_system_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg usb_fs2_xcvr_fs_src = {
+ .ns_reg = 0x2988,
+ .md_reg = 0x2984,
+ .mn = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_map,
+ },
+ .freq_tbl = clk_tbl_usb,
+ .clkr = {
+ .enable_reg = 0x2988,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_xcvr_fs_src",
+ .parent_names = gcc_pxo_pll8,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_GATE,
+ },
+ }
+};
+
+static const char *usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
+
+static struct clk_branch usb_fs2_xcvr_fs_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x2988,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_xcvr_fs_clk",
+ .parent_names = usb_fs2_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs2_system_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x298c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_system_clk",
+ .parent_names = usb_fs2_xcvr_fs_src_p,
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch ce1_core_clk = {
+ .hwcg_reg = 0x2724,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd4,
+ .halt_bit = 27,
+ .clkr = {
+ .enable_reg = 0x2724,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "ce1_core_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch ce1_h_clk = {
+ .halt_reg = 0x2fd4,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2720,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "ce1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch dma_bam_h_clk = {
+ .hwcg_reg = 0x25c0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x25c0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "dma_bam_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi1_h_clk = {
+ .hwcg_reg = 0x29c0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fcc,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x29c0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi2_h_clk = {
+ .hwcg_reg = 0x29e0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fcc,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x29e0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi3_h_clk = {
+ .hwcg_reg = 0x2a00,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fcc,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x2a00,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi3_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi4_h_clk = {
+ .hwcg_reg = 0x2a20,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 27,
+ .clkr = {
+ .enable_reg = 0x2a20,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi4_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi5_h_clk = {
+ .hwcg_reg = 0x2a40,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x2a40,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi5_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi6_h_clk = {
+ .hwcg_reg = 0x2a60,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x2a60,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi6_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi7_h_clk = {
+ .hwcg_reg = 0x2a80,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x2a80,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi7_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi8_h_clk = {
+ .hwcg_reg = 0x2aa0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2aa0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi8_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi9_h_clk = {
+ .hwcg_reg = 0x2ac0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2ac0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi9_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi10_h_clk = {
+ .hwcg_reg = 0x2ae0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd0,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x2ae0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi10_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi11_h_clk = {
+ .hwcg_reg = 0x2b00,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd4,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x2b00,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi11_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gsbi12_h_clk = {
+ .hwcg_reg = 0x2b20,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd4,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2b20,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gsbi12_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch tsif_h_clk = {
+ .hwcg_reg = 0x2700,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd4,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x2700,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "tsif_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs1_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 17,
+ .clkr = {
+ .enable_reg = 0x2960,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_fs2_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x2980,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_fs2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_hs1_h_clk = {
+ .hwcg_reg = 0x2900,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2900,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hs1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch usb_hsic_h_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 28,
+ .clkr = {
+ .enable_reg = 0x2920,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc1_h_clk = {
+ .hwcg_reg = 0x2820,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x2820,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc2_h_clk = {
+ .hwcg_reg = 0x2840,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x2840,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc2_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc3_h_clk = {
+ .hwcg_reg = 0x2860,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x2860,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc3_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc4_h_clk = {
+ .hwcg_reg = 0x2880,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x2880,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc4_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch sdc5_h_clk = {
+ .hwcg_reg = 0x28a0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fc8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x28a0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "sdc5_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm0_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm0_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch adm0_pbus_clk = {
+ .hwcg_reg = 0x2208,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fdc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "adm0_pbus_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_arb0_h_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_arb0_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_arb1_h_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 21,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_arb1_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch pmic_ssbi2_clk = {
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "pmic_ssbi2_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch rpm_msg_ram_h_clk = {
+ .hwcg_reg = 0x27e0,
+ .hwcg_bit = 6,
+ .halt_reg = 0x2fd8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x3080,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "rpm_msg_ram_h_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_msm8960_clks[] = {
+ [PLL3] = &pll3.clkr,
+ [PLL8] = &pll8.clkr,
+ [PLL8_VOTE] = &pll8_vote,
+ [PLL14] = &pll14.clkr,
+ [PLL14_VOTE] = &pll14_vote,
+ [GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
+ [GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
+ [GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
+ [GSBI2_UART_CLK] = &gsbi2_uart_clk.clkr,
+ [GSBI3_UART_SRC] = &gsbi3_uart_src.clkr,
+ [GSBI3_UART_CLK] = &gsbi3_uart_clk.clkr,
+ [GSBI4_UART_SRC] = &gsbi4_uart_src.clkr,
+ [GSBI4_UART_CLK] = &gsbi4_uart_clk.clkr,
+ [GSBI5_UART_SRC] = &gsbi5_uart_src.clkr,
+ [GSBI5_UART_CLK] = &gsbi5_uart_clk.clkr,
+ [GSBI6_UART_SRC] = &gsbi6_uart_src.clkr,
+ [GSBI6_UART_CLK] = &gsbi6_uart_clk.clkr,
+ [GSBI7_UART_SRC] = &gsbi7_uart_src.clkr,
+ [GSBI7_UART_CLK] = &gsbi7_uart_clk.clkr,
+ [GSBI8_UART_SRC] = &gsbi8_uart_src.clkr,
+ [GSBI8_UART_CLK] = &gsbi8_uart_clk.clkr,
+ [GSBI9_UART_SRC] = &gsbi9_uart_src.clkr,
+ [GSBI9_UART_CLK] = &gsbi9_uart_clk.clkr,
+ [GSBI10_UART_SRC] = &gsbi10_uart_src.clkr,
+ [GSBI10_UART_CLK] = &gsbi10_uart_clk.clkr,
+ [GSBI11_UART_SRC] = &gsbi11_uart_src.clkr,
+ [GSBI11_UART_CLK] = &gsbi11_uart_clk.clkr,
+ [GSBI12_UART_SRC] = &gsbi12_uart_src.clkr,
+ [GSBI12_UART_CLK] = &gsbi12_uart_clk.clkr,
+ [GSBI1_QUP_SRC] = &gsbi1_qup_src.clkr,
+ [GSBI1_QUP_CLK] = &gsbi1_qup_clk.clkr,
+ [GSBI2_QUP_SRC] = &gsbi2_qup_src.clkr,
+ [GSBI2_QUP_CLK] = &gsbi2_qup_clk.clkr,
+ [GSBI3_QUP_SRC] = &gsbi3_qup_src.clkr,
+ [GSBI3_QUP_CLK] = &gsbi3_qup_clk.clkr,
+ [GSBI4_QUP_SRC] = &gsbi4_qup_src.clkr,
+ [GSBI4_QUP_CLK] = &gsbi4_qup_clk.clkr,
+ [GSBI5_QUP_SRC] = &gsbi5_qup_src.clkr,
+ [GSBI5_QUP_CLK] = &gsbi5_qup_clk.clkr,
+ [GSBI6_QUP_SRC] = &gsbi6_qup_src.clkr,
+ [GSBI6_QUP_CLK] = &gsbi6_qup_clk.clkr,
+ [GSBI7_QUP_SRC] = &gsbi7_qup_src.clkr,
+ [GSBI7_QUP_CLK] = &gsbi7_qup_clk.clkr,
+ [GSBI8_QUP_SRC] = &gsbi8_qup_src.clkr,
+ [GSBI8_QUP_CLK] = &gsbi8_qup_clk.clkr,
+ [GSBI9_QUP_SRC] = &gsbi9_qup_src.clkr,
+ [GSBI9_QUP_CLK] = &gsbi9_qup_clk.clkr,
+ [GSBI10_QUP_SRC] = &gsbi10_qup_src.clkr,
+ [GSBI10_QUP_CLK] = &gsbi10_qup_clk.clkr,
+ [GSBI11_QUP_SRC] = &gsbi11_qup_src.clkr,
+ [GSBI11_QUP_CLK] = &gsbi11_qup_clk.clkr,
+ [GSBI12_QUP_SRC] = &gsbi12_qup_src.clkr,
+ [GSBI12_QUP_CLK] = &gsbi12_qup_clk.clkr,
+ [GP0_SRC] = &gp0_src.clkr,
+ [GP0_CLK] = &gp0_clk.clkr,
+ [GP1_SRC] = &gp1_src.clkr,
+ [GP1_CLK] = &gp1_clk.clkr,
+ [GP2_SRC] = &gp2_src.clkr,
+ [GP2_CLK] = &gp2_clk.clkr,
+ [PMEM_A_CLK] = &pmem_clk.clkr,
+ [PRNG_SRC] = &prng_src.clkr,
+ [PRNG_CLK] = &prng_clk.clkr,
+ [SDC1_SRC] = &sdc1_src.clkr,
+ [SDC1_CLK] = &sdc1_clk.clkr,
+ [SDC2_SRC] = &sdc2_src.clkr,
+ [SDC2_CLK] = &sdc2_clk.clkr,
+ [SDC3_SRC] = &sdc3_src.clkr,
+ [SDC3_CLK] = &sdc3_clk.clkr,
+ [SDC4_SRC] = &sdc4_src.clkr,
+ [SDC4_CLK] = &sdc4_clk.clkr,
+ [SDC5_SRC] = &sdc5_src.clkr,
+ [SDC5_CLK] = &sdc5_clk.clkr,
+ [TSIF_REF_SRC] = &tsif_ref_src.clkr,
+ [TSIF_REF_CLK] = &tsif_ref_clk.clkr,
+ [USB_HS1_XCVR_SRC] = &usb_hs1_xcvr_src.clkr,
+ [USB_HS1_XCVR_CLK] = &usb_hs1_xcvr_clk.clkr,
+ [USB_HSIC_XCVR_FS_SRC] = &usb_hsic_xcvr_fs_src.clkr,
+ [USB_HSIC_XCVR_FS_CLK] = &usb_hsic_xcvr_fs_clk.clkr,
+ [USB_HSIC_SYSTEM_CLK] = &usb_hsic_system_clk.clkr,
+ [USB_HSIC_HSIC_CLK] = &usb_hsic_hsic_clk.clkr,
+ [USB_HSIC_HSIO_CAL_CLK] = &usb_hsic_hsio_cal_clk.clkr,
+ [USB_FS1_XCVR_FS_SRC] = &usb_fs1_xcvr_fs_src.clkr,
+ [USB_FS1_XCVR_FS_CLK] = &usb_fs1_xcvr_fs_clk.clkr,
+ [USB_FS1_SYSTEM_CLK] = &usb_fs1_system_clk.clkr,
+ [USB_FS2_XCVR_FS_SRC] = &usb_fs2_xcvr_fs_src.clkr,
+ [USB_FS2_XCVR_FS_CLK] = &usb_fs2_xcvr_fs_clk.clkr,
+ [USB_FS2_SYSTEM_CLK] = &usb_fs2_system_clk.clkr,
+ [CE1_CORE_CLK] = &ce1_core_clk.clkr,
+ [CE1_H_CLK] = &ce1_h_clk.clkr,
+ [DMA_BAM_H_CLK] = &dma_bam_h_clk.clkr,
+ [GSBI1_H_CLK] = &gsbi1_h_clk.clkr,
+ [GSBI2_H_CLK] = &gsbi2_h_clk.clkr,
+ [GSBI3_H_CLK] = &gsbi3_h_clk.clkr,
+ [GSBI4_H_CLK] = &gsbi4_h_clk.clkr,
+ [GSBI5_H_CLK] = &gsbi5_h_clk.clkr,
+ [GSBI6_H_CLK] = &gsbi6_h_clk.clkr,
+ [GSBI7_H_CLK] = &gsbi7_h_clk.clkr,
+ [GSBI8_H_CLK] = &gsbi8_h_clk.clkr,
+ [GSBI9_H_CLK] = &gsbi9_h_clk.clkr,
+ [GSBI10_H_CLK] = &gsbi10_h_clk.clkr,
+ [GSBI11_H_CLK] = &gsbi11_h_clk.clkr,
+ [GSBI12_H_CLK] = &gsbi12_h_clk.clkr,
+ [TSIF_H_CLK] = &tsif_h_clk.clkr,
+ [USB_FS1_H_CLK] = &usb_fs1_h_clk.clkr,
+ [USB_FS2_H_CLK] = &usb_fs2_h_clk.clkr,
+ [USB_HS1_H_CLK] = &usb_hs1_h_clk.clkr,
+ [USB_HSIC_H_CLK] = &usb_hsic_h_clk.clkr,
+ [SDC1_H_CLK] = &sdc1_h_clk.clkr,
+ [SDC2_H_CLK] = &sdc2_h_clk.clkr,
+ [SDC3_H_CLK] = &sdc3_h_clk.clkr,
+ [SDC4_H_CLK] = &sdc4_h_clk.clkr,
+ [SDC5_H_CLK] = &sdc5_h_clk.clkr,
+ [ADM0_CLK] = &adm0_clk.clkr,
+ [ADM0_PBUS_CLK] = &adm0_pbus_clk.clkr,
+ [PMIC_ARB0_H_CLK] = &pmic_arb0_h_clk.clkr,
+ [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
+ [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
+ [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_msm8960_resets[] = {
+ [SFAB_MSS_Q6_SW_RESET] = { 0x2040, 7 },
+ [SFAB_MSS_Q6_FW_RESET] = { 0x2044, 7 },
+ [QDSS_STM_RESET] = { 0x2060, 6 },
+ [AFAB_SMPSS_S_RESET] = { 0x20b8, 2 },
+ [AFAB_SMPSS_M1_RESET] = { 0x20b8, 1 },
+ [AFAB_SMPSS_M0_RESET] = { 0x20b8 },
+ [AFAB_EBI1_CH0_RESET] = { 0x20c0, 7 },
+ [AFAB_EBI1_CH1_RESET] = { 0x20c4, 7},
+ [SFAB_ADM0_M0_RESET] = { 0x21e0, 7 },
+ [SFAB_ADM0_M1_RESET] = { 0x21e4, 7 },
+ [SFAB_ADM0_M2_RESET] = { 0x21e8, 7 },
+ [ADM0_C2_RESET] = { 0x220c, 4},
+ [ADM0_C1_RESET] = { 0x220c, 3},
+ [ADM0_C0_RESET] = { 0x220c, 2},
+ [ADM0_PBUS_RESET] = { 0x220c, 1 },
+ [ADM0_RESET] = { 0x220c },
+ [QDSS_CLKS_SW_RESET] = { 0x2260, 5 },
+ [QDSS_POR_RESET] = { 0x2260, 4 },
+ [QDSS_TSCTR_RESET] = { 0x2260, 3 },
+ [QDSS_HRESET_RESET] = { 0x2260, 2 },
+ [QDSS_AXI_RESET] = { 0x2260, 1 },
+ [QDSS_DBG_RESET] = { 0x2260 },
+ [PCIE_A_RESET] = { 0x22c0, 7 },
+ [PCIE_AUX_RESET] = { 0x22c8, 7 },
+ [PCIE_H_RESET] = { 0x22d0, 7 },
+ [SFAB_PCIE_M_RESET] = { 0x22d4, 1 },
+ [SFAB_PCIE_S_RESET] = { 0x22d4 },
+ [SFAB_MSS_M_RESET] = { 0x2340, 7 },
+ [SFAB_USB3_M_RESET] = { 0x2360, 7 },
+ [SFAB_RIVA_M_RESET] = { 0x2380, 7 },
+ [SFAB_LPASS_RESET] = { 0x23a0, 7 },
+ [SFAB_AFAB_M_RESET] = { 0x23e0, 7 },
+ [AFAB_SFAB_M0_RESET] = { 0x2420, 7 },
+ [AFAB_SFAB_M1_RESET] = { 0x2424, 7 },
+ [SFAB_SATA_S_RESET] = { 0x2480, 7 },
+ [SFAB_DFAB_M_RESET] = { 0x2500, 7 },
+ [DFAB_SFAB_M_RESET] = { 0x2520, 7 },
+ [DFAB_SWAY0_RESET] = { 0x2540, 7 },
+ [DFAB_SWAY1_RESET] = { 0x2544, 7 },
+ [DFAB_ARB0_RESET] = { 0x2560, 7 },
+ [DFAB_ARB1_RESET] = { 0x2564, 7 },
+ [PPSS_PROC_RESET] = { 0x2594, 1 },
+ [PPSS_RESET] = { 0x2594},
+ [DMA_BAM_RESET] = { 0x25c0, 7 },
+ [SIC_TIC_RESET] = { 0x2600, 7 },
+ [SLIMBUS_H_RESET] = { 0x2620, 7 },
+ [SFAB_CFPB_M_RESET] = { 0x2680, 7 },
+ [SFAB_CFPB_S_RESET] = { 0x26c0, 7 },
+ [TSIF_H_RESET] = { 0x2700, 7 },
+ [CE1_H_RESET] = { 0x2720, 7 },
+ [CE1_CORE_RESET] = { 0x2724, 7 },
+ [CE1_SLEEP_RESET] = { 0x2728, 7 },
+ [CE2_H_RESET] = { 0x2740, 7 },
+ [CE2_CORE_RESET] = { 0x2744, 7 },
+ [SFAB_SFPB_M_RESET] = { 0x2780, 7 },
+ [SFAB_SFPB_S_RESET] = { 0x27a0, 7 },
+ [RPM_PROC_RESET] = { 0x27c0, 7 },
+ [PMIC_SSBI2_RESET] = { 0x270c, 12 },
+ [SDC1_RESET] = { 0x2830 },
+ [SDC2_RESET] = { 0x2850 },
+ [SDC3_RESET] = { 0x2870 },
+ [SDC4_RESET] = { 0x2890 },
+ [SDC5_RESET] = { 0x28b0 },
+ [DFAB_A2_RESET] = { 0x28c0, 7 },
+ [USB_HS1_RESET] = { 0x2910 },
+ [USB_HSIC_RESET] = { 0x2934 },
+ [USB_FS1_XCVR_RESET] = { 0x2974, 1 },
+ [USB_FS1_RESET] = { 0x2974 },
+ [USB_FS2_XCVR_RESET] = { 0x2994, 1 },
+ [USB_FS2_RESET] = { 0x2994 },
+ [GSBI1_RESET] = { 0x29dc },
+ [GSBI2_RESET] = { 0x29fc },
+ [GSBI3_RESET] = { 0x2a1c },
+ [GSBI4_RESET] = { 0x2a3c },
+ [GSBI5_RESET] = { 0x2a5c },
+ [GSBI6_RESET] = { 0x2a7c },
+ [GSBI7_RESET] = { 0x2a9c },
+ [GSBI8_RESET] = { 0x2abc },
+ [GSBI9_RESET] = { 0x2adc },
+ [GSBI10_RESET] = { 0x2afc },
+ [GSBI11_RESET] = { 0x2b1c },
+ [GSBI12_RESET] = { 0x2b3c },
+ [SPDM_RESET] = { 0x2b6c },
+ [TLMM_H_RESET] = { 0x2ba0, 7 },
+ [SFAB_MSS_S_RESET] = { 0x2c00, 7 },
+ [MSS_SLP_RESET] = { 0x2c60, 7 },
+ [MSS_Q6SW_JTAG_RESET] = { 0x2c68, 7 },
+ [MSS_Q6FW_JTAG_RESET] = { 0x2c6c, 7 },
+ [MSS_RESET] = { 0x2c64 },
+ [SATA_H_RESET] = { 0x2c80, 7 },
+ [SATA_RXOOB_RESE] = { 0x2c8c, 7 },
+ [SATA_PMALIVE_RESET] = { 0x2c90, 7 },
+ [SATA_SFAB_M_RESET] = { 0x2c98, 7 },
+ [TSSC_RESET] = { 0x2ca0, 7 },
+ [PDM_RESET] = { 0x2cc0, 12 },
+ [MPM_H_RESET] = { 0x2da0, 7 },
+ [MPM_RESET] = { 0x2da4 },
+ [SFAB_SMPSS_S_RESET] = { 0x2e00, 7 },
+ [PRNG_RESET] = { 0x2e80, 12 },
+ [RIVA_RESET] = { 0x35e0 },
+};
+
+static const struct regmap_config gcc_msm8960_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3660,
+ .fast_io = true,
+};
+
+static const struct of_device_id gcc_msm8960_match_table[] = {
+ { .compatible = "qcom,gcc-msm8960" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8960_match_table);
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+static int gcc_msm8960_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct clk_onecell_data *data;
+ struct clk **clks;
+ struct regmap *regmap;
+ size_t num_clks;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &gcc_msm8960_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ num_clks = ARRAY_SIZE(gcc_msm8960_clks);
+ cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ /* Temporary until RPM clocks supported */
+ clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 19200000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk = clk_register_fixed_rate(dev, "pxo", NULL, CLK_IS_ROOT, 27000000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ for (i = 0; i < num_clks; i++) {
+ if (!gcc_msm8960_clks[i])
+ continue;
+ clk = devm_clk_register_regmap(dev, gcc_msm8960_clks[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret)
+ return ret;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops,
+ reset->rcdev.owner = THIS_MODULE,
+ reset->rcdev.nr_resets = ARRAY_SIZE(gcc_msm8960_resets),
+ reset->regmap = regmap;
+ reset->reset_map = gcc_msm8960_resets,
+ platform_set_drvdata(pdev, &reset->rcdev);
+
+ ret = reset_controller_register(&reset->rcdev);
+ if (ret)
+ of_clk_del_provider(dev->of_node);
+
+ return ret;
+}
+
+static int gcc_msm8960_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ reset_controller_unregister(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver gcc_msm8960_driver = {
+ .probe = gcc_msm8960_probe,
+ .remove = gcc_msm8960_remove,
+ .driver = {
+ .name = "gcc-msm8960",
+ .owner = THIS_MODULE,
+ .of_match_table = gcc_msm8960_match_table,
+ },
+};
+
+static int __init gcc_msm8960_init(void)
+{
+ return platform_driver_register(&gcc_msm8960_driver);
+}
+core_initcall(gcc_msm8960_init);
+
+static void __exit gcc_msm8960_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8960_driver);
+}
+module_exit(gcc_msm8960_exit);
+
+MODULE_DESCRIPTION("QCOM GCC MSM8960 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8960");
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
new file mode 100644
index 000000000000..51d457e2b959
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -0,0 +1,2694 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8974.h>
+#include <dt-bindings/reset/qcom,gcc-msm8974.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#define P_XO 0
+#define P_GPLL0 1
+#define P_GPLL1 1
+
+static const u8 gcc_xo_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_GPLL0] = 1,
+};
+
+static const char *gcc_xo_gpll0[] = {
+ "xo",
+ "gpll0_vote",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static struct clk_pll gpll0 = {
+ .l_reg = 0x0004,
+ .m_reg = 0x0008,
+ .n_reg = 0x000c,
+ .config_reg = 0x0014,
+ .mode_reg = 0x0000,
+ .status_reg = 0x001c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll0_vote = {
+ .enable_reg = 0x1480,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_vote",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_rcg2 config_noc_clk_src = {
+ .cmd_rcgr = 0x0150,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "config_noc_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 periph_noc_clk_src = {
+ .cmd_rcgr = 0x0190,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "periph_noc_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 system_noc_clk_src = {
+ .cmd_rcgr = 0x0120,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_noc_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_pll gpll1 = {
+ .l_reg = 0x0044,
+ .m_reg = 0x0048,
+ .n_reg = 0x004c,
+ .config_reg = 0x0054,
+ .mode_reg = 0x0040,
+ .status_reg = 0x005c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll1_vote = {
+ .enable_reg = 0x1480,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_vote",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_master_clk[] = {
+ F(125000000, P_GPLL0, 1, 5, 24),
+ { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+ .cmd_rcgr = 0x03d4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb30_master_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_master_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0, 16, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0660,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x064c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0760,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x074c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0860,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x084c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x08e0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x08cc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_uart1_6_apps_clk[] = {
+ F(3686400, P_GPLL0, 1, 96, 15625),
+ F(7372800, P_GPLL0, 1, 192, 15625),
+ F(14745600, P_GPLL0, 1, 384, 15625),
+ F(16000000, P_GPLL0, 5, 2, 15),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ F(32000000, P_GPLL0, 1, 4, 75),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(46400000, P_GPLL0, 1, 29, 375),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ F(51200000, P_GPLL0, 1, 32, 375),
+ F(56000000, P_GPLL0, 1, 7, 75),
+ F(58982400, P_GPLL0, 1, 1536, 15625),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(63160000, P_GPLL0, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x068c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x070c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x078c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x080c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x088c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart5_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x090c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart6_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x09a0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x098c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0a20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0a0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0aa0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0a8c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0b20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0b0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0ba0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0b8c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0c20,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x0c0c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x09cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x0a4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x0acc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x0b4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x0bcc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart5_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x0c4c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart6_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ce1_clk[] = {
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ce1_clk_src = {
+ .cmd_rcgr = 0x1050,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_ce1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ce1_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ce2_clk[] = {
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ce2_clk_src = {
+ .cmd_rcgr = 0x1090,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_ce2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ce2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp_clk[] = {
+ F(4800000, P_XO, 4, 0, 0),
+ F(6000000, P_GPLL0, 10, 1, 10),
+ F(6750000, P_GPLL0, 1, 1, 89),
+ F(8000000, P_GPLL0, 15, 1, 5),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0, 1, 2, 75),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ { }
+};
+
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x1904,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x1944,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x1984,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x0cd0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_pdm2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_4_apps_clk[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 15, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x04d0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x0510,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc3_apps_clk_src = {
+ .cmd_rcgr = 0x0550,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x0590,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk[] = {
+ F(105000, P_XO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 tsif_ref_clk_src = {
+ .cmd_rcgr = 0x0d90,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x03e8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb30_mock_utmi_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x0490,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_hs_system_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hs_system_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_clk[] = {
+ F(480000000, P_GPLL1, 1, 0, 0),
+ { }
+};
+
+static u8 usb_hsic_clk_src_map[] = {
+ [P_XO] = 0,
+ [P_GPLL1] = 4,
+};
+
+static struct clk_rcg2 usb_hsic_clk_src = {
+ .cmd_rcgr = 0x0440,
+ .hid_width = 5,
+ .parent_map = usb_hsic_clk_src_map,
+ .freq_tbl = ftbl_gcc_usb_hsic_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_clk_src",
+ .parent_names = (const char *[]){
+ "xo",
+ "gpll1_vote",
+ },
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_io_cal_clk[] = {
+ F(9600000, P_XO, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hsic_io_cal_clk_src = {
+ .cmd_rcgr = 0x0458,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_hsic_io_cal_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_io_cal_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_system_clk[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hsic_system_clk_src = {
+ .cmd_rcgr = 0x041c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_hsic_system_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hsic_system_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap gcc_mmss_gpll0_clk_src = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_gpll0_vote",
+ .parent_names = (const char *[]){
+ "gpll0_vote",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_simple_ops,
+ },
+};
+
+static struct clk_branch gcc_bam_dma_ahb_clk = {
+ .halt_reg = 0x0d44,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bam_dma_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x05c4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x0648,
+ .clkr = {
+ .enable_reg = 0x0648,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x0644,
+ .clkr = {
+ .enable_reg = 0x0644,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x06c8,
+ .clkr = {
+ .enable_reg = 0x06c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x06c4,
+ .clkr = {
+ .enable_reg = 0x06c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x0748,
+ .clkr = {
+ .enable_reg = 0x0748,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0744,
+ .clkr = {
+ .enable_reg = 0x0744,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x07c8,
+ .clkr = {
+ .enable_reg = 0x07c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x07c4,
+ .clkr = {
+ .enable_reg = 0x07c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x0848,
+ .clkr = {
+ .enable_reg = 0x0848,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x0844,
+ .clkr = {
+ .enable_reg = 0x0844,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x08c8,
+ .clkr = {
+ .enable_reg = 0x08c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x08c4,
+ .clkr = {
+ .enable_reg = 0x08c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0684,
+ .clkr = {
+ .enable_reg = 0x0684,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0704,
+ .clkr = {
+ .enable_reg = 0x0704,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x0784,
+ .clkr = {
+ .enable_reg = 0x0784,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x0804,
+ .clkr = {
+ .enable_reg = 0x0804,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+ .halt_reg = 0x0884,
+ .clkr = {
+ .enable_reg = 0x0884,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart5_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart5_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+ .halt_reg = 0x0904,
+ .clkr = {
+ .enable_reg = 0x0904,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart6_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart6_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0x05c4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0x0988,
+ .clkr = {
+ .enable_reg = 0x0988,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0x0984,
+ .clkr = {
+ .enable_reg = 0x0984,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0x0a08,
+ .clkr = {
+ .enable_reg = 0x0a08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0x0a04,
+ .clkr = {
+ .enable_reg = 0x0a04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0x0a88,
+ .clkr = {
+ .enable_reg = 0x0a88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0x0a84,
+ .clkr = {
+ .enable_reg = 0x0a84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x0b08,
+ .clkr = {
+ .enable_reg = 0x0b08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x0b04,
+ .clkr = {
+ .enable_reg = 0x0b04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
+ .halt_reg = 0x0b88,
+ .clkr = {
+ .enable_reg = 0x0b88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup5_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
+ .halt_reg = 0x0b84,
+ .clkr = {
+ .enable_reg = 0x0b84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup5_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
+ .halt_reg = 0x0c08,
+ .clkr = {
+ .enable_reg = 0x0c08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup6_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
+ .halt_reg = 0x0c04,
+ .clkr = {
+ .enable_reg = 0x0c04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup6_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0x09c4,
+ .clkr = {
+ .enable_reg = 0x09c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0x0a44,
+ .clkr = {
+ .enable_reg = 0x0a44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart3_apps_clk = {
+ .halt_reg = 0x0ac4,
+ .clkr = {
+ .enable_reg = 0x0ac4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart4_apps_clk = {
+ .halt_reg = 0x0b44,
+ .clkr = {
+ .enable_reg = 0x0b44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart4_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart5_apps_clk = {
+ .halt_reg = 0x0bc4,
+ .clkr = {
+ .enable_reg = 0x0bc4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart5_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart5_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart6_apps_clk = {
+ .halt_reg = 0x0c44,
+ .clkr = {
+ .enable_reg = 0x0c44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart6_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart6_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x0e04,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x104c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x1048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .parent_names = (const char *[]){
+ "system_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x1050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .parent_names = (const char *[]){
+ "ce1_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce2_ahb_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce2_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce2_axi_clk = {
+ .halt_reg = 0x1088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce2_axi_clk",
+ .parent_names = (const char *[]){
+ "system_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce2_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce2_clk",
+ .parent_names = (const char *[]){
+ "ce2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x1900,
+ .clkr = {
+ .enable_reg = 0x1900,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x1940,
+ .clkr = {
+ .enable_reg = 0x1940,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x1980,
+ .clkr = {
+ .enable_reg = 0x1980,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_q6_axi_clk = {
+ .halt_reg = 0x11c0,
+ .clkr = {
+ .enable_reg = 0x11c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_q6_axi_clk",
+ .parent_names = (const char *[]){
+ "system_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ .halt_reg = 0x024c,
+ .clkr = {
+ .enable_reg = 0x024c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_noc_cfg_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static struct clk_branch gcc_ocmem_noc_cfg_ahb_clk = {
+ .halt_reg = 0x0248,
+ .clkr = {
+ .enable_reg = 0x0248,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ocmem_noc_cfg_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x0280,
+ .clkr = {
+ .enable_reg = 0x0280,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .parent_names = (const char *[]){
+ "config_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x0284,
+ .clkr = {
+ .enable_reg = 0x0284,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .flags = CLK_IS_ROOT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x0ccc,
+ .clkr = {
+ .enable_reg = 0x0ccc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x0cc4,
+ .clkr = {
+ .enable_reg = 0x0cc4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x0d04,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x04c8,
+ .clkr = {
+ .enable_reg = 0x04c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x04c4,
+ .clkr = {
+ .enable_reg = 0x04c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x0508,
+ .clkr = {
+ .enable_reg = 0x0508,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x0504,
+ .clkr = {
+ .enable_reg = 0x0504,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_ahb_clk = {
+ .halt_reg = 0x0548,
+ .clkr = {
+ .enable_reg = 0x0548,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc3_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_apps_clk = {
+ .halt_reg = 0x0544,
+ .clkr = {
+ .enable_reg = 0x0544,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc3_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x0588,
+ .clkr = {
+ .enable_reg = 0x0588,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x0584,
+ .clkr = {
+ .enable_reg = 0x0584,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
+ .halt_reg = 0x0108,
+ .clkr = {
+ .enable_reg = 0x0108,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_axi_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x0d84,
+ .clkr = {
+ .enable_reg = 0x0d84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x0d88,
+ .clkr = {
+ .enable_reg = 0x0d88,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]){
+ "tsif_ref_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+ .halt_reg = 0x04ac,
+ .clkr = {
+ .enable_reg = 0x04ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2a_phy_sleep_clk",
+ .parent_names = (const char *[]){
+ "sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2b_phy_sleep_clk = {
+ .halt_reg = 0x04b4,
+ .clkr = {
+ .enable_reg = 0x04b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2b_phy_sleep_clk",
+ .parent_names = (const char *[]){
+ "sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0x03c8,
+ .clkr = {
+ .enable_reg = 0x03c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0x03d0,
+ .clkr = {
+ .enable_reg = 0x03d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0x03cc,
+ .clkr = {
+ .enable_reg = 0x03cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .parent_names = (const char *[]){
+ "sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+ .halt_reg = 0x0488,
+ .clkr = {
+ .enable_reg = 0x0488,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x0484,
+ .clkr = {
+ .enable_reg = 0x0484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_system_clk",
+ .parent_names = (const char *[]){
+ "usb_hs_system_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hsic_ahb_clk = {
+ .halt_reg = 0x0408,
+ .clkr = {
+ .enable_reg = 0x0408,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hsic_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hsic_clk = {
+ .halt_reg = 0x0410,
+ .clkr = {
+ .enable_reg = 0x0410,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hsic_clk",
+ .parent_names = (const char *[]){
+ "usb_hsic_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hsic_io_cal_clk = {
+ .halt_reg = 0x0414,
+ .clkr = {
+ .enable_reg = 0x0414,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hsic_io_cal_clk",
+ .parent_names = (const char *[]){
+ "usb_hsic_io_cal_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hsic_io_cal_sleep_clk = {
+ .halt_reg = 0x0418,
+ .clkr = {
+ .enable_reg = 0x0418,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hsic_io_cal_sleep_clk",
+ .parent_names = (const char *[]){
+ "sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hsic_system_clk = {
+ .halt_reg = 0x040c,
+ .clkr = {
+ .enable_reg = 0x040c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hsic_system_clk",
+ .parent_names = (const char *[]){
+ "usb_hsic_system_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_msm8974_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_VOTE] = &gpll0_vote,
+ [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+ [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+ [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL1_VOTE] = &gpll1_vote,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+ [BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+ [BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+ [BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_QUP5_I2C_APPS_CLK_SRC] = &blsp2_qup5_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP5_SPI_APPS_CLK_SRC] = &blsp2_qup5_spi_apps_clk_src.clkr,
+ [BLSP2_QUP6_I2C_APPS_CLK_SRC] = &blsp2_qup6_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP6_SPI_APPS_CLK_SRC] = &blsp2_qup6_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BLSP2_UART3_APPS_CLK_SRC] = &blsp2_uart3_apps_clk_src.clkr,
+ [BLSP2_UART4_APPS_CLK_SRC] = &blsp2_uart4_apps_clk_src.clkr,
+ [BLSP2_UART5_APPS_CLK_SRC] = &blsp2_uart5_apps_clk_src.clkr,
+ [BLSP2_UART6_APPS_CLK_SRC] = &blsp2_uart6_apps_clk_src.clkr,
+ [CE1_CLK_SRC] = &ce1_clk_src.clkr,
+ [CE2_CLK_SRC] = &ce2_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [SDCC3_APPS_CLK_SRC] = &sdcc3_apps_clk_src.clkr,
+ [SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr,
+ [TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [USB_HSIC_CLK_SRC] = &usb_hsic_clk_src.clkr,
+ [USB_HSIC_IO_CAL_CLK_SRC] = &usb_hsic_io_cal_clk_src.clkr,
+ [USB_HSIC_SYSTEM_CLK_SRC] = &usb_hsic_system_clk_src.clkr,
+ [GCC_BAM_DMA_AHB_CLK] = &gcc_bam_dma_ahb_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+ [GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_I2C_APPS_CLK] = &gcc_blsp2_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_SPI_APPS_CLK] = &gcc_blsp2_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_I2C_APPS_CLK] = &gcc_blsp2_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_SPI_APPS_CLK] = &gcc_blsp2_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BLSP2_UART3_APPS_CLK] = &gcc_blsp2_uart3_apps_clk.clkr,
+ [GCC_BLSP2_UART4_APPS_CLK] = &gcc_blsp2_uart4_apps_clk.clkr,
+ [GCC_BLSP2_UART5_APPS_CLK] = &gcc_blsp2_uart5_apps_clk.clkr,
+ [GCC_BLSP2_UART6_APPS_CLK] = &gcc_blsp2_uart6_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CE2_AHB_CLK] = &gcc_ce2_ahb_clk.clkr,
+ [GCC_CE2_AXI_CLK] = &gcc_ce2_axi_clk.clkr,
+ [GCC_CE2_CLK] = &gcc_ce2_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_LPASS_Q6_AXI_CLK] = &gcc_lpass_q6_axi_clk.clkr,
+ [GCC_MMSS_NOC_CFG_AHB_CLK] = &gcc_mmss_noc_cfg_ahb_clk.clkr,
+ [GCC_OCMEM_NOC_CFG_AHB_CLK] = &gcc_ocmem_noc_cfg_ahb_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC3_AHB_CLK] = &gcc_sdcc3_ahb_clk.clkr,
+ [GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SYS_NOC_USB3_AXI_CLK] = &gcc_sys_noc_usb3_axi_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB2B_PHY_SLEEP_CLK] = &gcc_usb2b_phy_sleep_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_USB_HSIC_AHB_CLK] = &gcc_usb_hsic_ahb_clk.clkr,
+ [GCC_USB_HSIC_CLK] = &gcc_usb_hsic_clk.clkr,
+ [GCC_USB_HSIC_IO_CAL_CLK] = &gcc_usb_hsic_io_cal_clk.clkr,
+ [GCC_USB_HSIC_IO_CAL_SLEEP_CLK] = &gcc_usb_hsic_io_cal_sleep_clk.clkr,
+ [GCC_USB_HSIC_SYSTEM_CLK] = &gcc_usb_hsic_system_clk.clkr,
+ [GCC_MMSS_GPLL0_CLK_SRC] = &gcc_mmss_gpll0_clk_src,
+};
+
+static const struct qcom_reset_map gcc_msm8974_resets[] = {
+ [GCC_SYSTEM_NOC_BCR] = { 0x0100 },
+ [GCC_CONFIG_NOC_BCR] = { 0x0140 },
+ [GCC_PERIPH_NOC_BCR] = { 0x0180 },
+ [GCC_IMEM_BCR] = { 0x0200 },
+ [GCC_MMSS_BCR] = { 0x0240 },
+ [GCC_QDSS_BCR] = { 0x0300 },
+ [GCC_USB_30_BCR] = { 0x03c0 },
+ [GCC_USB3_PHY_BCR] = { 0x03fc },
+ [GCC_USB_HS_HSIC_BCR] = { 0x0400 },
+ [GCC_USB_HS_BCR] = { 0x0480 },
+ [GCC_USB2A_PHY_BCR] = { 0x04a8 },
+ [GCC_USB2B_PHY_BCR] = { 0x04b0 },
+ [GCC_SDCC1_BCR] = { 0x04c0 },
+ [GCC_SDCC2_BCR] = { 0x0500 },
+ [GCC_SDCC3_BCR] = { 0x0540 },
+ [GCC_SDCC4_BCR] = { 0x0580 },
+ [GCC_BLSP1_BCR] = { 0x05c0 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x0640 },
+ [GCC_BLSP1_UART1_BCR] = { 0x0680 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x06c0 },
+ [GCC_BLSP1_UART2_BCR] = { 0x0700 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x0740 },
+ [GCC_BLSP1_UART3_BCR] = { 0x0780 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x07c0 },
+ [GCC_BLSP1_UART4_BCR] = { 0x0800 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x0840 },
+ [GCC_BLSP1_UART5_BCR] = { 0x0880 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x08c0 },
+ [GCC_BLSP1_UART6_BCR] = { 0x0900 },
+ [GCC_BLSP2_BCR] = { 0x0940 },
+ [GCC_BLSP2_QUP1_BCR] = { 0x0980 },
+ [GCC_BLSP2_UART1_BCR] = { 0x09c0 },
+ [GCC_BLSP2_QUP2_BCR] = { 0x0a00 },
+ [GCC_BLSP2_UART2_BCR] = { 0x0a40 },
+ [GCC_BLSP2_QUP3_BCR] = { 0x0a80 },
+ [GCC_BLSP2_UART3_BCR] = { 0x0ac0 },
+ [GCC_BLSP2_QUP4_BCR] = { 0x0b00 },
+ [GCC_BLSP2_UART4_BCR] = { 0x0b40 },
+ [GCC_BLSP2_QUP5_BCR] = { 0x0b80 },
+ [GCC_BLSP2_UART5_BCR] = { 0x0bc0 },
+ [GCC_BLSP2_QUP6_BCR] = { 0x0c00 },
+ [GCC_BLSP2_UART6_BCR] = { 0x0c40 },
+ [GCC_PDM_BCR] = { 0x0cc0 },
+ [GCC_BAM_DMA_BCR] = { 0x0d40 },
+ [GCC_TSIF_BCR] = { 0x0d80 },
+ [GCC_TCSR_BCR] = { 0x0dc0 },
+ [GCC_BOOT_ROM_BCR] = { 0x0e00 },
+ [GCC_MSG_RAM_BCR] = { 0x0e40 },
+ [GCC_TLMM_BCR] = { 0x0e80 },
+ [GCC_MPM_BCR] = { 0x0ec0 },
+ [GCC_SEC_CTRL_BCR] = { 0x0f40 },
+ [GCC_SPMI_BCR] = { 0x0fc0 },
+ [GCC_SPDM_BCR] = { 0x1000 },
+ [GCC_CE1_BCR] = { 0x1040 },
+ [GCC_CE2_BCR] = { 0x1080 },
+ [GCC_BIMC_BCR] = { 0x1100 },
+ [GCC_MPM_NON_AHB_RESET] = { 0x0ec4, 2 },
+ [GCC_MPM_AHB_RESET] = { 0x0ec4, 1 },
+ [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x1240 },
+ [GCC_SNOC_BUS_TIMEOUT2_BCR] = { 0x1248 },
+ [GCC_PNOC_BUS_TIMEOUT0_BCR] = { 0x1280 },
+ [GCC_PNOC_BUS_TIMEOUT1_BCR] = { 0x1288 },
+ [GCC_PNOC_BUS_TIMEOUT2_BCR] = { 0x1290 },
+ [GCC_PNOC_BUS_TIMEOUT3_BCR] = { 0x1298 },
+ [GCC_PNOC_BUS_TIMEOUT4_BCR] = { 0x12a0 },
+ [GCC_CNOC_BUS_TIMEOUT0_BCR] = { 0x12c0 },
+ [GCC_CNOC_BUS_TIMEOUT1_BCR] = { 0x12c8 },
+ [GCC_CNOC_BUS_TIMEOUT2_BCR] = { 0x12d0 },
+ [GCC_CNOC_BUS_TIMEOUT3_BCR] = { 0x12d8 },
+ [GCC_CNOC_BUS_TIMEOUT4_BCR] = { 0x12e0 },
+ [GCC_CNOC_BUS_TIMEOUT5_BCR] = { 0x12e8 },
+ [GCC_CNOC_BUS_TIMEOUT6_BCR] = { 0x12f0 },
+ [GCC_DEHR_BCR] = { 0x1300 },
+ [GCC_RBCPR_BCR] = { 0x1380 },
+ [GCC_MSS_RESTART] = { 0x1680 },
+ [GCC_LPASS_RESTART] = { 0x16c0 },
+ [GCC_WCSS_RESTART] = { 0x1700 },
+ [GCC_VENUS_RESTART] = { 0x1740 },
+};
+
+static const struct regmap_config gcc_msm8974_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1fc0,
+ .fast_io = true,
+};
+
+static const struct of_device_id gcc_msm8974_match_table[] = {
+ { .compatible = "qcom,gcc-msm8974" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8974_match_table);
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+static int gcc_msm8974_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct clk_onecell_data *data;
+ struct clk **clks;
+ struct regmap *regmap;
+ size_t num_clks;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &gcc_msm8974_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ num_clks = ARRAY_SIZE(gcc_msm8974_clocks);
+ cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ /* Temporary until RPM clocks supported */
+ clk = clk_register_fixed_rate(dev, "xo", NULL, CLK_IS_ROOT, 19200000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ /* Should move to DT node? */
+ clk = clk_register_fixed_rate(dev, "sleep_clk_src", NULL,
+ CLK_IS_ROOT, 32768);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ for (i = 0; i < num_clks; i++) {
+ if (!gcc_msm8974_clocks[i])
+ continue;
+ clk = devm_clk_register_regmap(dev, gcc_msm8974_clocks[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret)
+ return ret;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops,
+ reset->rcdev.owner = THIS_MODULE,
+ reset->rcdev.nr_resets = ARRAY_SIZE(gcc_msm8974_resets),
+ reset->regmap = regmap;
+ reset->reset_map = gcc_msm8974_resets,
+ platform_set_drvdata(pdev, &reset->rcdev);
+
+ ret = reset_controller_register(&reset->rcdev);
+ if (ret)
+ of_clk_del_provider(dev->of_node);
+
+ return ret;
+}
+
+static int gcc_msm8974_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ reset_controller_unregister(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver gcc_msm8974_driver = {
+ .probe = gcc_msm8974_probe,
+ .remove = gcc_msm8974_remove,
+ .driver = {
+ .name = "gcc-msm8974",
+ .owner = THIS_MODULE,
+ .of_match_table = gcc_msm8974_match_table,
+ },
+};
+
+static int __init gcc_msm8974_init(void)
+{
+ return platform_driver_register(&gcc_msm8974_driver);
+}
+core_initcall(gcc_msm8974_init);
+
+static void __exit gcc_msm8974_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8974_driver);
+}
+module_exit(gcc_msm8974_exit);
+
+MODULE_DESCRIPTION("QCOM GCC MSM8974 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8974");
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
new file mode 100644
index 000000000000..f9b59c7e48e9
--- /dev/null
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -0,0 +1,2321 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
+#include <dt-bindings/reset/qcom,mmcc-msm8960.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#define P_PXO 0
+#define P_PLL8 1
+#define P_PLL2 2
+#define P_PLL3 3
+
+static u8 mmcc_pxo_pll8_pll2_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 2,
+ [P_PLL2] = 1,
+};
+
+static const char *mmcc_pxo_pll8_pll2[] = {
+ "pxo",
+ "pll8_vote",
+ "pll2",
+};
+
+static u8 mmcc_pxo_pll8_pll2_pll3_map[] = {
+ [P_PXO] = 0,
+ [P_PLL8] = 2,
+ [P_PLL2] = 1,
+ [P_PLL3] = 3,
+};
+
+static const char *mmcc_pxo_pll8_pll2_pll3[] = {
+ "pxo",
+ "pll2",
+ "pll8_vote",
+ "pll3",
+};
+
+static struct clk_pll pll2 = {
+ .l_reg = 0x320,
+ .m_reg = 0x324,
+ .n_reg = 0x328,
+ .config_reg = 0x32c,
+ .mode_reg = 0x31c,
+ .status_reg = 0x334,
+ .status_bit = 16,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll2",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct freq_tbl clk_tbl_cam[] = {
+ { 6000000, P_PLL8, 4, 1, 16 },
+ { 8000000, P_PLL8, 4, 1, 12 },
+ { 12000000, P_PLL8, 4, 1, 8 },
+ { 16000000, P_PLL8, 4, 1, 6 },
+ { 19200000, P_PLL8, 4, 1, 5 },
+ { 24000000, P_PLL8, 4, 1, 4 },
+ { 32000000, P_PLL8, 4, 1, 3 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 64000000, P_PLL8, 3, 1, 2 },
+ { 96000000, P_PLL8, 4, 0, 0 },
+ { 128000000, P_PLL8, 3, 0, 0 },
+ { }
+};
+
+static struct clk_rcg camclk0_src = {
+ .ns_reg = 0x0148,
+ .md_reg = 0x0144,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 8,
+ .reset_in_cc = true,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_cam,
+ .clkr = {
+ .enable_reg = 0x0140,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk0_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch camclk0_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x0140,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk0_clk",
+ .parent_names = (const char *[]){ "camclk0_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ },
+ },
+
+};
+
+static struct clk_rcg camclk1_src = {
+ .ns_reg = 0x015c,
+ .md_reg = 0x0158,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 8,
+ .reset_in_cc = true,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_cam,
+ .clkr = {
+ .enable_reg = 0x0154,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk1_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch camclk1_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x0154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk1_clk",
+ .parent_names = (const char *[]){ "camclk1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ },
+ },
+
+};
+
+static struct clk_rcg camclk2_src = {
+ .ns_reg = 0x0228,
+ .md_reg = 0x0224,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 8,
+ .reset_in_cc = true,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_cam,
+ .clkr = {
+ .enable_reg = 0x0220,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk2_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch camclk2_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x0220,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camclk2_clk",
+ .parent_names = (const char *[]){ "camclk2_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ },
+ },
+
+};
+
+static struct freq_tbl clk_tbl_csi[] = {
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 85330000, P_PLL8, 1, 2, 9 },
+ { 177780000, P_PLL2, 1, 2, 9 },
+ { }
+};
+
+static struct clk_rcg csi0_src = {
+ .ns_reg = 0x0048,
+ .md_reg = 0x0044,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_csi,
+ .clkr = {
+ .enable_reg = 0x0040,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi0_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch csi0_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x0040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi0_src" },
+ .num_parents = 1,
+ .name = "csi0_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch csi0_phy_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x0040,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi0_src" },
+ .num_parents = 1,
+ .name = "csi0_phy_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg csi1_src = {
+ .ns_reg = 0x0010,
+ .md_reg = 0x0028,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_csi,
+ .clkr = {
+ .enable_reg = 0x0024,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi1_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch csi1_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x0024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi1_src" },
+ .num_parents = 1,
+ .name = "csi1_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch csi1_phy_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x0024,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi1_src" },
+ .num_parents = 1,
+ .name = "csi1_phy_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg csi2_src = {
+ .ns_reg = 0x0234,
+ .md_reg = 0x022c,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_csi,
+ .clkr = {
+ .enable_reg = 0x022c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi2_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch csi2_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 29,
+ .clkr = {
+ .enable_reg = 0x022c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi2_src" },
+ .num_parents = 1,
+ .name = "csi2_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch csi2_phy_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 29,
+ .clkr = {
+ .enable_reg = 0x022c,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "csi2_src" },
+ .num_parents = 1,
+ .name = "csi2_phy_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+struct clk_pix_rdi {
+ u32 s_reg;
+ u32 s_mask;
+ u32 s2_reg;
+ u32 s2_mask;
+ struct clk_regmap clkr;
+};
+
+#define to_clk_pix_rdi(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_pix_rdi, clkr)
+
+static int pix_rdi_set_parent(struct clk_hw *hw, u8 index)
+{
+ int i;
+ int ret = 0;
+ u32 val;
+ struct clk_pix_rdi *rdi = to_clk_pix_rdi(hw);
+ struct clk *clk = hw->clk;
+ int num_parents = __clk_get_num_parents(hw->clk);
+
+ /*
+ * These clocks select three inputs via two muxes. One mux selects
+ * between csi0 and csi1 and the second mux selects between that mux's
+ * output and csi2. The source and destination selections for each
+ * mux must be clocking for the switch to succeed so just turn on
+ * all three sources because it's easier than figuring out what source
+ * needs to be on at what time.
+ */
+ for (i = 0; i < num_parents; i++) {
+ ret = clk_prepare_enable(clk_get_parent_by_index(clk, i));
+ if (ret)
+ goto err;
+ }
+
+ if (index == 2)
+ val = rdi->s2_mask;
+ else
+ val = 0;
+ regmap_update_bits(rdi->clkr.regmap, rdi->s2_reg, rdi->s2_mask, val);
+ /*
+ * Wait at least 6 cycles of slowest clock
+ * for the glitch-free MUX to fully switch sources.
+ */
+ udelay(1);
+
+ if (index == 1)
+ val = rdi->s_mask;
+ else
+ val = 0;
+ regmap_update_bits(rdi->clkr.regmap, rdi->s_reg, rdi->s_mask, val);
+ /*
+ * Wait at least 6 cycles of slowest clock
+ * for the glitch-free MUX to fully switch sources.
+ */
+ udelay(1);
+
+err:
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(clk_get_parent_by_index(clk, i));
+
+ return ret;
+}
+
+static u8 pix_rdi_get_parent(struct clk_hw *hw)
+{
+ u32 val;
+ struct clk_pix_rdi *rdi = to_clk_pix_rdi(hw);
+
+
+ regmap_read(rdi->clkr.regmap, rdi->s2_reg, &val);
+ if (val & rdi->s2_mask)
+ return 2;
+
+ regmap_read(rdi->clkr.regmap, rdi->s_reg, &val);
+ if (val & rdi->s_mask)
+ return 1;
+
+ return 0;
+}
+
+static const struct clk_ops clk_ops_pix_rdi = {
+ .enable = clk_enable_regmap,
+ .disable = clk_disable_regmap,
+ .set_parent = pix_rdi_set_parent,
+ .get_parent = pix_rdi_get_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static const char *pix_rdi_parents[] = {
+ "csi0_clk",
+ "csi1_clk",
+ "csi2_clk",
+};
+
+static struct clk_pix_rdi csi_pix_clk = {
+ .s_reg = 0x0058,
+ .s_mask = BIT(25),
+ .s2_reg = 0x0238,
+ .s2_mask = BIT(13),
+ .clkr = {
+ .enable_reg = 0x0058,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_pix_clk",
+ .parent_names = pix_rdi_parents,
+ .num_parents = 3,
+ .ops = &clk_ops_pix_rdi,
+ },
+ },
+};
+
+static struct clk_pix_rdi csi_pix1_clk = {
+ .s_reg = 0x0238,
+ .s_mask = BIT(8),
+ .s2_reg = 0x0238,
+ .s2_mask = BIT(9),
+ .clkr = {
+ .enable_reg = 0x0238,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_pix1_clk",
+ .parent_names = pix_rdi_parents,
+ .num_parents = 3,
+ .ops = &clk_ops_pix_rdi,
+ },
+ },
+};
+
+static struct clk_pix_rdi csi_rdi_clk = {
+ .s_reg = 0x0058,
+ .s_mask = BIT(12),
+ .s2_reg = 0x0238,
+ .s2_mask = BIT(12),
+ .clkr = {
+ .enable_reg = 0x0058,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_rdi_clk",
+ .parent_names = pix_rdi_parents,
+ .num_parents = 3,
+ .ops = &clk_ops_pix_rdi,
+ },
+ },
+};
+
+static struct clk_pix_rdi csi_rdi1_clk = {
+ .s_reg = 0x0238,
+ .s_mask = BIT(0),
+ .s2_reg = 0x0238,
+ .s2_mask = BIT(1),
+ .clkr = {
+ .enable_reg = 0x0238,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_rdi1_clk",
+ .parent_names = pix_rdi_parents,
+ .num_parents = 3,
+ .ops = &clk_ops_pix_rdi,
+ },
+ },
+};
+
+static struct clk_pix_rdi csi_rdi2_clk = {
+ .s_reg = 0x0238,
+ .s_mask = BIT(4),
+ .s2_reg = 0x0238,
+ .s2_mask = BIT(5),
+ .clkr = {
+ .enable_reg = 0x0238,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_rdi2_clk",
+ .parent_names = pix_rdi_parents,
+ .num_parents = 3,
+ .ops = &clk_ops_pix_rdi,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_csiphytimer[] = {
+ { 85330000, P_PLL8, 1, 2, 9 },
+ { 177780000, P_PLL2, 1, 2, 9 },
+ { }
+};
+
+static struct clk_rcg csiphytimer_src = {
+ .ns_reg = 0x0168,
+ .md_reg = 0x0164,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 8,
+ .reset_in_cc = true,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_csiphytimer,
+ .clkr = {
+ .enable_reg = 0x0160,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "csiphytimer_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static const char *csixphy_timer_src[] = { "csiphytimer_src" };
+
+static struct clk_branch csiphy0_timer_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 17,
+ .clkr = {
+ .enable_reg = 0x0160,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = csixphy_timer_src,
+ .num_parents = 1,
+ .name = "csiphy0_timer_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch csiphy1_timer_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x0160,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = csixphy_timer_src,
+ .num_parents = 1,
+ .name = "csiphy1_timer_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch csiphy2_timer_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 30,
+ .clkr = {
+ .enable_reg = 0x0160,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = csixphy_timer_src,
+ .num_parents = 1,
+ .name = "csiphy2_timer_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_gfx2d[] = {
+ { 27000000, P_PXO, 1, 0 },
+ { 48000000, P_PLL8, 1, 8 },
+ { 54857000, P_PLL8, 1, 7 },
+ { 64000000, P_PLL8, 1, 6 },
+ { 76800000, P_PLL8, 1, 5 },
+ { 96000000, P_PLL8, 1, 4 },
+ { 128000000, P_PLL8, 1, 3 },
+ { 145455000, P_PLL2, 2, 11 },
+ { 160000000, P_PLL2, 1, 5 },
+ { 177778000, P_PLL2, 2, 9 },
+ { 200000000, P_PLL2, 1, 4 },
+ { 228571000, P_PLL2, 2, 7 },
+ { }
+};
+
+static struct clk_dyn_rcg gfx2d0_src = {
+ .ns_reg = 0x0070,
+ .md_reg[0] = 0x0064,
+ .md_reg[1] = 0x0068,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 25,
+ .mnctr_mode_shift = 9,
+ .n_val_shift = 20,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 24,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .s[0] = {
+ .src_sel_shift = 3,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .mux_sel_bit = 11,
+ .freq_tbl = clk_tbl_gfx2d,
+ .clkr = {
+ .enable_reg = 0x0060,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d0_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch gfx2d0_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x0060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d0_clk",
+ .parent_names = (const char *[]){ "gfx2d0_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_dyn_rcg gfx2d1_src = {
+ .ns_reg = 0x007c,
+ .md_reg[0] = 0x0078,
+ .md_reg[1] = 0x006c,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 25,
+ .mnctr_mode_shift = 9,
+ .n_val_shift = 20,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 24,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .s[0] = {
+ .src_sel_shift = 3,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .mux_sel_bit = 11,
+ .freq_tbl = clk_tbl_gfx2d,
+ .clkr = {
+ .enable_reg = 0x0074,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d1_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch gfx2d1_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x0074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d1_clk",
+ .parent_names = (const char *[]){ "gfx2d1_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_gfx3d[] = {
+ { 27000000, P_PXO, 1, 0 },
+ { 48000000, P_PLL8, 1, 8 },
+ { 54857000, P_PLL8, 1, 7 },
+ { 64000000, P_PLL8, 1, 6 },
+ { 76800000, P_PLL8, 1, 5 },
+ { 96000000, P_PLL8, 1, 4 },
+ { 128000000, P_PLL8, 1, 3 },
+ { 145455000, P_PLL2, 2, 11 },
+ { 160000000, P_PLL2, 1, 5 },
+ { 177778000, P_PLL2, 2, 9 },
+ { 200000000, P_PLL2, 1, 4 },
+ { 228571000, P_PLL2, 2, 7 },
+ { 266667000, P_PLL2, 1, 3 },
+ { 300000000, P_PLL3, 1, 4 },
+ { 320000000, P_PLL2, 2, 5 },
+ { 400000000, P_PLL2, 1, 2 },
+ { }
+};
+
+static struct clk_dyn_rcg gfx3d_src = {
+ .ns_reg = 0x008c,
+ .md_reg[0] = 0x0084,
+ .md_reg[1] = 0x0088,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 25,
+ .mnctr_mode_shift = 9,
+ .n_val_shift = 18,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 24,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 14,
+ .m_val_shift = 4,
+ .width = 4,
+ },
+ .s[0] = {
+ .src_sel_shift = 3,
+ .parent_map = mmcc_pxo_pll8_pll2_pll3_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_pll3_map,
+ },
+ .mux_sel_bit = 11,
+ .freq_tbl = clk_tbl_gfx3d,
+ .clkr = {
+ .enable_reg = 0x0080,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_src",
+ .parent_names = mmcc_pxo_pll8_pll2_pll3,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch gfx3d_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x0080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk",
+ .parent_names = (const char *[]){ "gfx3d_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_ijpeg[] = {
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 36570000, P_PLL8, 1, 2, 21 },
+ { 54860000, P_PLL8, 7, 0, 0 },
+ { 96000000, P_PLL8, 4, 0, 0 },
+ { 109710000, P_PLL8, 1, 2, 7 },
+ { 128000000, P_PLL8, 3, 0, 0 },
+ { 153600000, P_PLL8, 1, 2, 5 },
+ { 200000000, P_PLL2, 4, 0, 0 },
+ { 228571000, P_PLL2, 1, 2, 7 },
+ { 266667000, P_PLL2, 1, 1, 3 },
+ { 320000000, P_PLL2, 1, 2, 5 },
+ { }
+};
+
+static struct clk_rcg ijpeg_src = {
+ .ns_reg = 0x00a0,
+ .md_reg = 0x009c,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_ijpeg,
+ .clkr = {
+ .enable_reg = 0x0098,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "ijpeg_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch ijpeg_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 24,
+ .clkr = {
+ .enable_reg = 0x0098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ijpeg_clk",
+ .parent_names = (const char *[]){ "ijpeg_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_jpegd[] = {
+ { 64000000, P_PLL8, 6 },
+ { 76800000, P_PLL8, 5 },
+ { 96000000, P_PLL8, 4 },
+ { 160000000, P_PLL2, 5 },
+ { 200000000, P_PLL2, 4 },
+ { }
+};
+
+static struct clk_rcg jpegd_src = {
+ .ns_reg = 0x00ac,
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_jpegd,
+ .clkr = {
+ .enable_reg = 0x00a4,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "jpegd_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch jpegd_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x00a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "jpegd_clk",
+ .parent_names = (const char *[]){ "jpegd_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_mdp[] = {
+ { 9600000, P_PLL8, 1, 1, 40 },
+ { 13710000, P_PLL8, 1, 1, 28 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 29540000, P_PLL8, 1, 1, 13 },
+ { 34910000, P_PLL8, 1, 1, 11 },
+ { 38400000, P_PLL8, 1, 1, 10 },
+ { 59080000, P_PLL8, 1, 2, 13 },
+ { 76800000, P_PLL8, 1, 1, 5 },
+ { 85330000, P_PLL8, 1, 2, 9 },
+ { 96000000, P_PLL8, 1, 1, 4 },
+ { 128000000, P_PLL8, 1, 1, 3 },
+ { 160000000, P_PLL2, 1, 1, 5 },
+ { 177780000, P_PLL2, 1, 2, 9 },
+ { 200000000, P_PLL2, 1, 1, 4 },
+ { 228571000, P_PLL2, 1, 2, 7 },
+ { 266667000, P_PLL2, 1, 1, 3 },
+ { }
+};
+
+static struct clk_dyn_rcg mdp_src = {
+ .ns_reg = 0x00d0,
+ .md_reg[0] = 0x00c4,
+ .md_reg[1] = 0x00c8,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 31,
+ .mnctr_mode_shift = 9,
+ .n_val_shift = 22,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 30,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 14,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .s[0] = {
+ .src_sel_shift = 3,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .mux_sel_bit = 11,
+ .freq_tbl = clk_tbl_mdp,
+ .clkr = {
+ .enable_reg = 0x00c0,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch mdp_clk = {
+ .halt_reg = 0x01d0,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x00c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_clk",
+ .parent_names = (const char *[]){ "mdp_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdp_lut_clk = {
+ .halt_reg = 0x01e8,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x016c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "mdp_clk" },
+ .num_parents = 1,
+ .name = "mdp_lut_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdp_vsync_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x0058,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_vsync_clk",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_rot[] = {
+ { 27000000, P_PXO, 1 },
+ { 29540000, P_PLL8, 13 },
+ { 32000000, P_PLL8, 12 },
+ { 38400000, P_PLL8, 10 },
+ { 48000000, P_PLL8, 8 },
+ { 54860000, P_PLL8, 7 },
+ { 64000000, P_PLL8, 6 },
+ { 76800000, P_PLL8, 5 },
+ { 96000000, P_PLL8, 4 },
+ { 100000000, P_PLL2, 8 },
+ { 114290000, P_PLL2, 7 },
+ { 133330000, P_PLL2, 6 },
+ { 160000000, P_PLL2, 5 },
+ { 200000000, P_PLL2, 4 },
+ { }
+};
+
+static struct clk_dyn_rcg rot_src = {
+ .ns_reg = 0x00e8,
+ .p[0] = {
+ .pre_div_shift = 22,
+ .pre_div_width = 4,
+ },
+ .p[1] = {
+ .pre_div_shift = 26,
+ .pre_div_width = 4,
+ },
+ .s[0] = {
+ .src_sel_shift = 16,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 19,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .mux_sel_bit = 30,
+ .freq_tbl = clk_tbl_rot,
+ .clkr = {
+ .enable_reg = 0x00e0,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "rot_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch rot_clk = {
+ .halt_reg = 0x01d0,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x00e0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "rot_clk",
+ .parent_names = (const char *[]){ "rot_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+#define P_HDMI_PLL 1
+
+static u8 mmcc_pxo_hdmi_map[] = {
+ [P_PXO] = 0,
+ [P_HDMI_PLL] = 2,
+};
+
+static const char *mmcc_pxo_hdmi[] = {
+ "pxo",
+ "hdmi_pll",
+};
+
+static struct freq_tbl clk_tbl_tv[] = {
+ { 25200000, P_HDMI_PLL, 1, 0, 0 },
+ { 27000000, P_HDMI_PLL, 1, 0, 0 },
+ { 27030000, P_HDMI_PLL, 1, 0, 0 },
+ { 74250000, P_HDMI_PLL, 1, 0, 0 },
+ { 108000000, P_HDMI_PLL, 1, 0, 0 },
+ { 148500000, P_HDMI_PLL, 1, 0, 0 },
+ { }
+};
+
+static struct clk_rcg tv_src = {
+ .ns_reg = 0x00f4,
+ .md_reg = 0x00f0,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 14,
+ .pre_div_width = 2,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_hdmi_map,
+ },
+ .freq_tbl = clk_tbl_tv,
+ .clkr = {
+ .enable_reg = 0x00ec,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "tv_src",
+ .parent_names = mmcc_pxo_hdmi,
+ .num_parents = 2,
+ .ops = &clk_rcg_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const char *tv_src_name[] = { "tv_src" };
+
+static struct clk_branch tv_enc_clk = {
+ .halt_reg = 0x01d4,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x00ec,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = tv_src_name,
+ .num_parents = 1,
+ .name = "tv_enc_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch tv_dac_clk = {
+ .halt_reg = 0x01d4,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x00ec,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = tv_src_name,
+ .num_parents = 1,
+ .name = "tv_dac_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdp_tv_clk = {
+ .halt_reg = 0x01d4,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x00ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = tv_src_name,
+ .num_parents = 1,
+ .name = "mdp_tv_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch hdmi_tv_clk = {
+ .halt_reg = 0x01d4,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x00ec,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = tv_src_name,
+ .num_parents = 1,
+ .name = "hdmi_tv_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch hdmi_app_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 25,
+ .clkr = {
+ .enable_reg = 0x005c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hdmi_app_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_vcodec[] = {
+ { 27000000, P_PXO, 1, 0 },
+ { 32000000, P_PLL8, 1, 12 },
+ { 48000000, P_PLL8, 1, 8 },
+ { 54860000, P_PLL8, 1, 7 },
+ { 96000000, P_PLL8, 1, 4 },
+ { 133330000, P_PLL2, 1, 6 },
+ { 200000000, P_PLL2, 1, 4 },
+ { 228570000, P_PLL2, 2, 7 },
+ { 266670000, P_PLL2, 1, 3 },
+ { }
+};
+
+static struct clk_dyn_rcg vcodec_src = {
+ .ns_reg = 0x0100,
+ .md_reg[0] = 0x00fc,
+ .md_reg[1] = 0x0128,
+ .mn[0] = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 31,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 11,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 10,
+ .mnctr_reset_bit = 30,
+ .mnctr_mode_shift = 11,
+ .n_val_shift = 19,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .s[0] = {
+ .src_sel_shift = 27,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .mux_sel_bit = 13,
+ .freq_tbl = clk_tbl_vcodec,
+ .clkr = {
+ .enable_reg = 0x00f8,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch vcodec_clk = {
+ .halt_reg = 0x01d0,
+ .halt_bit = 29,
+ .clkr = {
+ .enable_reg = 0x00f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_clk",
+ .parent_names = (const char *[]){ "vcodec_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_vpe[] = {
+ { 27000000, P_PXO, 1 },
+ { 34909000, P_PLL8, 11 },
+ { 38400000, P_PLL8, 10 },
+ { 64000000, P_PLL8, 6 },
+ { 76800000, P_PLL8, 5 },
+ { 96000000, P_PLL8, 4 },
+ { 100000000, P_PLL2, 8 },
+ { 160000000, P_PLL2, 5 },
+ { }
+};
+
+static struct clk_rcg vpe_src = {
+ .ns_reg = 0x0118,
+ .p = {
+ .pre_div_shift = 12,
+ .pre_div_width = 4,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_vpe,
+ .clkr = {
+ .enable_reg = 0x0110,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "vpe_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch vpe_clk = {
+ .halt_reg = 0x01c8,
+ .halt_bit = 28,
+ .clkr = {
+ .enable_reg = 0x0110,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "vpe_clk",
+ .parent_names = (const char *[]){ "vpe_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct freq_tbl clk_tbl_vfe[] = {
+ { 13960000, P_PLL8, 1, 2, 55 },
+ { 27000000, P_PXO, 1, 0, 0 },
+ { 36570000, P_PLL8, 1, 2, 21 },
+ { 38400000, P_PLL8, 2, 1, 5 },
+ { 45180000, P_PLL8, 1, 2, 17 },
+ { 48000000, P_PLL8, 2, 1, 4 },
+ { 54860000, P_PLL8, 1, 1, 7 },
+ { 64000000, P_PLL8, 2, 1, 3 },
+ { 76800000, P_PLL8, 1, 1, 5 },
+ { 96000000, P_PLL8, 2, 1, 2 },
+ { 109710000, P_PLL8, 1, 2, 7 },
+ { 128000000, P_PLL8, 1, 1, 3 },
+ { 153600000, P_PLL8, 1, 2, 5 },
+ { 200000000, P_PLL2, 2, 1, 2 },
+ { 228570000, P_PLL2, 1, 2, 7 },
+ { 266667000, P_PLL2, 1, 1, 3 },
+ { 320000000, P_PLL2, 1, 2, 5 },
+ { }
+};
+
+static struct clk_rcg vfe_src = {
+ .ns_reg = 0x0108,
+ .mn = {
+ .mnctr_en_bit = 5,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 6,
+ .n_val_shift = 16,
+ .m_val_shift = 8,
+ .width = 8,
+ },
+ .p = {
+ .pre_div_shift = 10,
+ .pre_div_width = 1,
+ },
+ .s = {
+ .src_sel_shift = 0,
+ .parent_map = mmcc_pxo_pll8_pll2_map,
+ },
+ .freq_tbl = clk_tbl_vfe,
+ .clkr = {
+ .enable_reg = 0x0104,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "vfe_src",
+ .parent_names = mmcc_pxo_pll8_pll2,
+ .num_parents = 3,
+ .ops = &clk_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch vfe_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x0104,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "vfe_clk",
+ .parent_names = (const char *[]){ "vfe_src" },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch vfe_csi_clk = {
+ .halt_reg = 0x01cc,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x0104,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "vfe_src" },
+ .num_parents = 1,
+ .name = "vfe_csi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gmem_axi_clk = {
+ .halt_reg = 0x01d8,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gmem_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch ijpeg_axi_clk = {
+ .hwcg_reg = 0x0018,
+ .hwcg_bit = 11,
+ .halt_reg = 0x01d8,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "ijpeg_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch mmss_imem_axi_clk = {
+ .hwcg_reg = 0x0018,
+ .hwcg_bit = 15,
+ .halt_reg = 0x01d8,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_imem_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch jpegd_axi_clk = {
+ .halt_reg = 0x01d8,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "jpegd_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vcodec_axi_b_clk = {
+ .hwcg_reg = 0x0114,
+ .hwcg_bit = 22,
+ .halt_reg = 0x01e8,
+ .halt_bit = 25,
+ .clkr = {
+ .enable_reg = 0x0114,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_axi_b_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vcodec_axi_a_clk = {
+ .hwcg_reg = 0x0114,
+ .hwcg_bit = 24,
+ .halt_reg = 0x01e8,
+ .halt_bit = 26,
+ .clkr = {
+ .enable_reg = 0x0114,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_axi_a_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vcodec_axi_clk = {
+ .hwcg_reg = 0x0018,
+ .hwcg_bit = 13,
+ .halt_reg = 0x01d8,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vfe_axi_clk = {
+ .halt_reg = 0x01d8,
+ .halt_bit = 0,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "vfe_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch mdp_axi_clk = {
+ .hwcg_reg = 0x0018,
+ .hwcg_bit = 16,
+ .halt_reg = 0x01d8,
+ .halt_bit = 8,
+ .clkr = {
+ .enable_reg = 0x0018,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch rot_axi_clk = {
+ .hwcg_reg = 0x0020,
+ .hwcg_bit = 25,
+ .halt_reg = 0x01d8,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x0020,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "rot_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vpe_axi_clk = {
+ .hwcg_reg = 0x0020,
+ .hwcg_bit = 27,
+ .halt_reg = 0x01d8,
+ .halt_bit = 1,
+ .clkr = {
+ .enable_reg = 0x0020,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "vpe_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gfx3d_axi_clk = {
+ .hwcg_reg = 0x0244,
+ .hwcg_bit = 24,
+ .halt_reg = 0x0240,
+ .halt_bit = 30,
+ .clkr = {
+ .enable_reg = 0x0244,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_axi_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch amp_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "amp_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch csi_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 16,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "csi_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT
+ },
+ },
+};
+
+static struct clk_branch dsi_m_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi_m_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch dsi_s_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 20,
+ .halt_reg = 0x01dc,
+ .halt_bit = 21,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi_s_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch dsi2_m_ahb_clk = {
+ .halt_reg = 0x01d8,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_m_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT
+ },
+ },
+};
+
+static struct clk_branch dsi2_s_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 15,
+ .halt_reg = 0x01dc,
+ .halt_bit = 20,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_s_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gfx2d0_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 28,
+ .halt_reg = 0x01dc,
+ .halt_bit = 2,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d0_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gfx2d1_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 29,
+ .halt_reg = 0x01dc,
+ .halt_bit = 3,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx2d1_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch gfx3d_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 27,
+ .halt_reg = 0x01dc,
+ .halt_bit = 4,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch hdmi_m_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 21,
+ .halt_reg = 0x01dc,
+ .halt_bit = 5,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_m_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch hdmi_s_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 22,
+ .halt_reg = 0x01dc,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_s_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch ijpeg_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 9,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "ijpeg_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT
+ },
+ },
+};
+
+static struct clk_branch mmss_imem_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 12,
+ .halt_reg = 0x01dc,
+ .halt_bit = 10,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_imem_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT
+ },
+ },
+};
+
+static struct clk_branch jpegd_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 7,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "jpegd_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch mdp_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 11,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch rot_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 13,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "rot_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT
+ },
+ },
+};
+
+static struct clk_branch smmu_ahb_clk = {
+ .hwcg_reg = 0x0008,
+ .hwcg_bit = 26,
+ .halt_reg = 0x01dc,
+ .halt_bit = 22,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch tv_enc_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 23,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "tv_enc_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vcodec_ahb_clk = {
+ .hwcg_reg = 0x0038,
+ .hwcg_bit = 26,
+ .halt_reg = 0x01dc,
+ .halt_bit = 12,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "vcodec_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vfe_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "vfe_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_branch vpe_ahb_clk = {
+ .halt_reg = 0x01dc,
+ .halt_bit = 15,
+ .clkr = {
+ .enable_reg = 0x0008,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "vpe_ahb_clk",
+ .ops = &clk_branch_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ },
+};
+
+static struct clk_regmap *mmcc_msm8960_clks[] = {
+ [TV_ENC_AHB_CLK] = &tv_enc_ahb_clk.clkr,
+ [AMP_AHB_CLK] = &amp_ahb_clk.clkr,
+ [DSI2_S_AHB_CLK] = &dsi2_s_ahb_clk.clkr,
+ [JPEGD_AHB_CLK] = &jpegd_ahb_clk.clkr,
+ [GFX2D0_AHB_CLK] = &gfx2d0_ahb_clk.clkr,
+ [DSI_S_AHB_CLK] = &dsi_s_ahb_clk.clkr,
+ [DSI2_M_AHB_CLK] = &dsi2_m_ahb_clk.clkr,
+ [VPE_AHB_CLK] = &vpe_ahb_clk.clkr,
+ [SMMU_AHB_CLK] = &smmu_ahb_clk.clkr,
+ [HDMI_M_AHB_CLK] = &hdmi_m_ahb_clk.clkr,
+ [VFE_AHB_CLK] = &vfe_ahb_clk.clkr,
+ [ROT_AHB_CLK] = &rot_ahb_clk.clkr,
+ [VCODEC_AHB_CLK] = &vcodec_ahb_clk.clkr,
+ [MDP_AHB_CLK] = &mdp_ahb_clk.clkr,
+ [DSI_M_AHB_CLK] = &dsi_m_ahb_clk.clkr,
+ [CSI_AHB_CLK] = &csi_ahb_clk.clkr,
+ [MMSS_IMEM_AHB_CLK] = &mmss_imem_ahb_clk.clkr,
+ [IJPEG_AHB_CLK] = &ijpeg_ahb_clk.clkr,
+ [HDMI_S_AHB_CLK] = &hdmi_s_ahb_clk.clkr,
+ [GFX3D_AHB_CLK] = &gfx3d_ahb_clk.clkr,
+ [GFX2D1_AHB_CLK] = &gfx2d1_ahb_clk.clkr,
+ [JPEGD_AXI_CLK] = &jpegd_axi_clk.clkr,
+ [GMEM_AXI_CLK] = &gmem_axi_clk.clkr,
+ [MDP_AXI_CLK] = &mdp_axi_clk.clkr,
+ [MMSS_IMEM_AXI_CLK] = &mmss_imem_axi_clk.clkr,
+ [IJPEG_AXI_CLK] = &ijpeg_axi_clk.clkr,
+ [GFX3D_AXI_CLK] = &gfx3d_axi_clk.clkr,
+ [VCODEC_AXI_CLK] = &vcodec_axi_clk.clkr,
+ [VFE_AXI_CLK] = &vfe_axi_clk.clkr,
+ [VPE_AXI_CLK] = &vpe_axi_clk.clkr,
+ [ROT_AXI_CLK] = &rot_axi_clk.clkr,
+ [VCODEC_AXI_A_CLK] = &vcodec_axi_a_clk.clkr,
+ [VCODEC_AXI_B_CLK] = &vcodec_axi_b_clk.clkr,
+ [CSI0_SRC] = &csi0_src.clkr,
+ [CSI0_CLK] = &csi0_clk.clkr,
+ [CSI0_PHY_CLK] = &csi0_phy_clk.clkr,
+ [CSI1_SRC] = &csi1_src.clkr,
+ [CSI1_CLK] = &csi1_clk.clkr,
+ [CSI1_PHY_CLK] = &csi1_phy_clk.clkr,
+ [CSI2_SRC] = &csi2_src.clkr,
+ [CSI2_CLK] = &csi2_clk.clkr,
+ [CSI2_PHY_CLK] = &csi2_phy_clk.clkr,
+ [CSI_PIX_CLK] = &csi_pix_clk.clkr,
+ [CSI_RDI_CLK] = &csi_rdi_clk.clkr,
+ [MDP_VSYNC_CLK] = &mdp_vsync_clk.clkr,
+ [HDMI_APP_CLK] = &hdmi_app_clk.clkr,
+ [CSI_PIX1_CLK] = &csi_pix1_clk.clkr,
+ [CSI_RDI2_CLK] = &csi_rdi2_clk.clkr,
+ [CSI_RDI1_CLK] = &csi_rdi1_clk.clkr,
+ [GFX2D0_SRC] = &gfx2d0_src.clkr,
+ [GFX2D0_CLK] = &gfx2d0_clk.clkr,
+ [GFX2D1_SRC] = &gfx2d1_src.clkr,
+ [GFX2D1_CLK] = &gfx2d1_clk.clkr,
+ [GFX3D_SRC] = &gfx3d_src.clkr,
+ [GFX3D_CLK] = &gfx3d_clk.clkr,
+ [IJPEG_SRC] = &ijpeg_src.clkr,
+ [IJPEG_CLK] = &ijpeg_clk.clkr,
+ [JPEGD_SRC] = &jpegd_src.clkr,
+ [JPEGD_CLK] = &jpegd_clk.clkr,
+ [MDP_SRC] = &mdp_src.clkr,
+ [MDP_CLK] = &mdp_clk.clkr,
+ [MDP_LUT_CLK] = &mdp_lut_clk.clkr,
+ [ROT_SRC] = &rot_src.clkr,
+ [ROT_CLK] = &rot_clk.clkr,
+ [TV_ENC_CLK] = &tv_enc_clk.clkr,
+ [TV_DAC_CLK] = &tv_dac_clk.clkr,
+ [HDMI_TV_CLK] = &hdmi_tv_clk.clkr,
+ [MDP_TV_CLK] = &mdp_tv_clk.clkr,
+ [TV_SRC] = &tv_src.clkr,
+ [VCODEC_SRC] = &vcodec_src.clkr,
+ [VCODEC_CLK] = &vcodec_clk.clkr,
+ [VFE_SRC] = &vfe_src.clkr,
+ [VFE_CLK] = &vfe_clk.clkr,
+ [VFE_CSI_CLK] = &vfe_csi_clk.clkr,
+ [VPE_SRC] = &vpe_src.clkr,
+ [VPE_CLK] = &vpe_clk.clkr,
+ [CAMCLK0_SRC] = &camclk0_src.clkr,
+ [CAMCLK0_CLK] = &camclk0_clk.clkr,
+ [CAMCLK1_SRC] = &camclk1_src.clkr,
+ [CAMCLK1_CLK] = &camclk1_clk.clkr,
+ [CAMCLK2_SRC] = &camclk2_src.clkr,
+ [CAMCLK2_CLK] = &camclk2_clk.clkr,
+ [CSIPHYTIMER_SRC] = &csiphytimer_src.clkr,
+ [CSIPHY2_TIMER_CLK] = &csiphy2_timer_clk.clkr,
+ [CSIPHY1_TIMER_CLK] = &csiphy1_timer_clk.clkr,
+ [CSIPHY0_TIMER_CLK] = &csiphy0_timer_clk.clkr,
+ [PLL2] = &pll2.clkr,
+};
+
+static const struct qcom_reset_map mmcc_msm8960_resets[] = {
+ [VPE_AXI_RESET] = { 0x0208, 15 },
+ [IJPEG_AXI_RESET] = { 0x0208, 14 },
+ [MPD_AXI_RESET] = { 0x0208, 13 },
+ [VFE_AXI_RESET] = { 0x0208, 9 },
+ [SP_AXI_RESET] = { 0x0208, 8 },
+ [VCODEC_AXI_RESET] = { 0x0208, 7 },
+ [ROT_AXI_RESET] = { 0x0208, 6 },
+ [VCODEC_AXI_A_RESET] = { 0x0208, 5 },
+ [VCODEC_AXI_B_RESET] = { 0x0208, 4 },
+ [FAB_S3_AXI_RESET] = { 0x0208, 3 },
+ [FAB_S2_AXI_RESET] = { 0x0208, 2 },
+ [FAB_S1_AXI_RESET] = { 0x0208, 1 },
+ [FAB_S0_AXI_RESET] = { 0x0208 },
+ [SMMU_GFX3D_ABH_RESET] = { 0x020c, 31 },
+ [SMMU_VPE_AHB_RESET] = { 0x020c, 30 },
+ [SMMU_VFE_AHB_RESET] = { 0x020c, 29 },
+ [SMMU_ROT_AHB_RESET] = { 0x020c, 28 },
+ [SMMU_VCODEC_B_AHB_RESET] = { 0x020c, 27 },
+ [SMMU_VCODEC_A_AHB_RESET] = { 0x020c, 26 },
+ [SMMU_MDP1_AHB_RESET] = { 0x020c, 25 },
+ [SMMU_MDP0_AHB_RESET] = { 0x020c, 24 },
+ [SMMU_JPEGD_AHB_RESET] = { 0x020c, 23 },
+ [SMMU_IJPEG_AHB_RESET] = { 0x020c, 22 },
+ [SMMU_GFX2D0_AHB_RESET] = { 0x020c, 21 },
+ [SMMU_GFX2D1_AHB_RESET] = { 0x020c, 20 },
+ [APU_AHB_RESET] = { 0x020c, 18 },
+ [CSI_AHB_RESET] = { 0x020c, 17 },
+ [TV_ENC_AHB_RESET] = { 0x020c, 15 },
+ [VPE_AHB_RESET] = { 0x020c, 14 },
+ [FABRIC_AHB_RESET] = { 0x020c, 13 },
+ [GFX2D0_AHB_RESET] = { 0x020c, 12 },
+ [GFX2D1_AHB_RESET] = { 0x020c, 11 },
+ [GFX3D_AHB_RESET] = { 0x020c, 10 },
+ [HDMI_AHB_RESET] = { 0x020c, 9 },
+ [MSSS_IMEM_AHB_RESET] = { 0x020c, 8 },
+ [IJPEG_AHB_RESET] = { 0x020c, 7 },
+ [DSI_M_AHB_RESET] = { 0x020c, 6 },
+ [DSI_S_AHB_RESET] = { 0x020c, 5 },
+ [JPEGD_AHB_RESET] = { 0x020c, 4 },
+ [MDP_AHB_RESET] = { 0x020c, 3 },
+ [ROT_AHB_RESET] = { 0x020c, 2 },
+ [VCODEC_AHB_RESET] = { 0x020c, 1 },
+ [VFE_AHB_RESET] = { 0x020c, 0 },
+ [DSI2_M_AHB_RESET] = { 0x0210, 31 },
+ [DSI2_S_AHB_RESET] = { 0x0210, 30 },
+ [CSIPHY2_RESET] = { 0x0210, 29 },
+ [CSI_PIX1_RESET] = { 0x0210, 28 },
+ [CSIPHY0_RESET] = { 0x0210, 27 },
+ [CSIPHY1_RESET] = { 0x0210, 26 },
+ [DSI2_RESET] = { 0x0210, 25 },
+ [VFE_CSI_RESET] = { 0x0210, 24 },
+ [MDP_RESET] = { 0x0210, 21 },
+ [AMP_RESET] = { 0x0210, 20 },
+ [JPEGD_RESET] = { 0x0210, 19 },
+ [CSI1_RESET] = { 0x0210, 18 },
+ [VPE_RESET] = { 0x0210, 17 },
+ [MMSS_FABRIC_RESET] = { 0x0210, 16 },
+ [VFE_RESET] = { 0x0210, 15 },
+ [GFX2D0_RESET] = { 0x0210, 14 },
+ [GFX2D1_RESET] = { 0x0210, 13 },
+ [GFX3D_RESET] = { 0x0210, 12 },
+ [HDMI_RESET] = { 0x0210, 11 },
+ [MMSS_IMEM_RESET] = { 0x0210, 10 },
+ [IJPEG_RESET] = { 0x0210, 9 },
+ [CSI0_RESET] = { 0x0210, 8 },
+ [DSI_RESET] = { 0x0210, 7 },
+ [VCODEC_RESET] = { 0x0210, 6 },
+ [MDP_TV_RESET] = { 0x0210, 4 },
+ [MDP_VSYNC_RESET] = { 0x0210, 3 },
+ [ROT_RESET] = { 0x0210, 2 },
+ [TV_HDMI_RESET] = { 0x0210, 1 },
+ [TV_ENC_RESET] = { 0x0210 },
+ [CSI2_RESET] = { 0x0214, 2 },
+ [CSI_RDI1_RESET] = { 0x0214, 1 },
+ [CSI_RDI2_RESET] = { 0x0214 },
+};
+
+static const struct regmap_config mmcc_msm8960_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x334,
+ .fast_io = true,
+};
+
+static const struct of_device_id mmcc_msm8960_match_table[] = {
+ { .compatible = "qcom,mmcc-msm8960" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcc_msm8960_match_table);
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+static int mmcc_msm8960_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct clk_onecell_data *data;
+ struct clk **clks;
+ struct regmap *regmap;
+ size_t num_clks;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &mmcc_msm8960_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ num_clks = ARRAY_SIZE(mmcc_msm8960_clks);
+ cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!mmcc_msm8960_clks[i])
+ continue;
+ clk = devm_clk_register_regmap(dev, mmcc_msm8960_clks[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret)
+ return ret;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops,
+ reset->rcdev.owner = THIS_MODULE,
+ reset->rcdev.nr_resets = ARRAY_SIZE(mmcc_msm8960_resets),
+ reset->regmap = regmap;
+ reset->reset_map = mmcc_msm8960_resets,
+ platform_set_drvdata(pdev, &reset->rcdev);
+
+ ret = reset_controller_register(&reset->rcdev);
+ if (ret)
+ of_clk_del_provider(dev->of_node);
+
+ return ret;
+}
+
+static int mmcc_msm8960_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ reset_controller_unregister(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver mmcc_msm8960_driver = {
+ .probe = mmcc_msm8960_probe,
+ .remove = mmcc_msm8960_remove,
+ .driver = {
+ .name = "mmcc-msm8960",
+ .owner = THIS_MODULE,
+ .of_match_table = mmcc_msm8960_match_table,
+ },
+};
+
+module_platform_driver(mmcc_msm8960_driver);
+
+MODULE_DESCRIPTION("QCOM MMCC MSM8960 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mmcc-msm8960");
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
new file mode 100644
index 000000000000..c95774514b81
--- /dev/null
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -0,0 +1,2625 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,mmcc-msm8974.h>
+#include <dt-bindings/reset/qcom,mmcc-msm8974.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#define P_XO 0
+#define P_MMPLL0 1
+#define P_EDPLINK 1
+#define P_MMPLL1 2
+#define P_HDMIPLL 2
+#define P_GPLL0 3
+#define P_EDPVCO 3
+#define P_GPLL1 4
+#define P_DSI0PLL 4
+#define P_MMPLL2 4
+#define P_MMPLL3 4
+#define P_DSI1PLL 5
+
+static const u8 mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_MMPLL0] = 1,
+ [P_MMPLL1] = 2,
+ [P_GPLL0] = 5,
+};
+
+static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
+ "xo",
+ "mmpll0_vote",
+ "mmpll1_vote",
+ "mmss_gpll0_vote",
+};
+
+static const u8 mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_MMPLL0] = 1,
+ [P_HDMIPLL] = 4,
+ [P_GPLL0] = 5,
+ [P_DSI0PLL] = 2,
+ [P_DSI1PLL] = 3,
+};
+
+static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
+ "xo",
+ "mmpll0_vote",
+ "hdmipll",
+ "mmss_gpll0_vote",
+ "dsi0pll",
+ "dsi1pll",
+};
+
+static const u8 mmcc_xo_mmpll0_1_2_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_MMPLL0] = 1,
+ [P_MMPLL1] = 2,
+ [P_GPLL0] = 5,
+ [P_MMPLL2] = 3,
+};
+
+static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
+ "xo",
+ "mmpll0_vote",
+ "mmpll1_vote",
+ "mmss_gpll0_vote",
+ "mmpll2",
+};
+
+static const u8 mmcc_xo_mmpll0_1_3_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_MMPLL0] = 1,
+ [P_MMPLL1] = 2,
+ [P_GPLL0] = 5,
+ [P_MMPLL3] = 3,
+};
+
+static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
+ "xo",
+ "mmpll0_vote",
+ "mmpll1_vote",
+ "mmss_gpll0_vote",
+ "mmpll3",
+};
+
+static const u8 mmcc_xo_mmpll0_1_gpll1_0_map[] = {
+ [P_XO] = 0,
+ [P_MMPLL0] = 1,
+ [P_MMPLL1] = 2,
+ [P_GPLL0] = 5,
+ [P_GPLL1] = 4,
+};
+
+static const char *mmcc_xo_mmpll0_1_gpll1_0[] = {
+ "xo",
+ "mmpll0_vote",
+ "mmpll1_vote",
+ "mmss_gpll0_vote",
+ "gpll1_vote",
+};
+
+static const u8 mmcc_xo_dsi_hdmi_edp_map[] = {
+ [P_XO] = 0,
+ [P_EDPLINK] = 4,
+ [P_HDMIPLL] = 3,
+ [P_EDPVCO] = 5,
+ [P_DSI0PLL] = 1,
+ [P_DSI1PLL] = 2,
+};
+
+static const char *mmcc_xo_dsi_hdmi_edp[] = {
+ "xo",
+ "edp_link_clk",
+ "hdmipll",
+ "edp_vco_div",
+ "dsi0pll",
+ "dsi1pll",
+};
+
+static const u8 mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
+ [P_XO] = 0,
+ [P_EDPLINK] = 4,
+ [P_HDMIPLL] = 3,
+ [P_GPLL0] = 5,
+ [P_DSI0PLL] = 1,
+ [P_DSI1PLL] = 2,
+};
+
+static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
+ "xo",
+ "edp_link_clk",
+ "hdmipll",
+ "gpll0_vote",
+ "dsi0pll",
+ "dsi1pll",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static struct clk_pll mmpll0 = {
+ .l_reg = 0x0004,
+ .m_reg = 0x0008,
+ .n_reg = 0x000c,
+ .config_reg = 0x0014,
+ .mode_reg = 0x0000,
+ .status_reg = 0x001c,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll0",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap mmpll0_vote = {
+ .enable_reg = 0x0100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll0_vote",
+ .parent_names = (const char *[]){ "mmpll0" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll mmpll1 = {
+ .l_reg = 0x0044,
+ .m_reg = 0x0048,
+ .n_reg = 0x004c,
+ .config_reg = 0x0054,
+ .mode_reg = 0x0040,
+ .status_reg = 0x005c,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll1",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap mmpll1_vote = {
+ .enable_reg = 0x0100,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll1_vote",
+ .parent_names = (const char *[]){ "mmpll1" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll mmpll2 = {
+ .l_reg = 0x4104,
+ .m_reg = 0x4108,
+ .n_reg = 0x410c,
+ .config_reg = 0x4114,
+ .mode_reg = 0x4100,
+ .status_reg = 0x411c,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll2",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_pll mmpll3 = {
+ .l_reg = 0x0084,
+ .m_reg = 0x0088,
+ .n_reg = 0x008c,
+ .config_reg = 0x0094,
+ .mode_reg = 0x0080,
+ .status_reg = 0x009c,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll3",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_rcg2 mmss_ahb_clk_src = {
+ .cmd_rcgr = 0x5000,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmss_ahb_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mmss_axi_clk[] = {
+ F( 19200000, P_XO, 1, 0, 0),
+ F( 37500000, P_GPLL0, 16, 0, 0),
+ F( 50000000, P_GPLL0, 12, 0, 0),
+ F( 75000000, P_GPLL0, 8, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(291750000, P_MMPLL1, 4, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(466800000, P_MMPLL1, 2.5, 0, 0),
+};
+
+static struct clk_rcg2 mmss_axi_clk_src = {
+ .cmd_rcgr = 0x5040,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_mmss_axi_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmss_axi_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+ F( 19200000, P_XO, 1, 0, 0),
+ F( 37500000, P_GPLL0, 16, 0, 0),
+ F( 50000000, P_GPLL0, 12, 0, 0),
+ F( 75000000, P_GPLL0, 8, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(291750000, P_MMPLL1, 4, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+};
+
+static struct clk_rcg2 ocmemnoc_clk_src = {
+ .cmd_rcgr = 0x5090,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_ocmemnoc_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ocmemnoc_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_csi0_3_clk[] = {
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x3090,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_csi0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x3100,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_csi0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3160,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_csi0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi3_clk_src = {
+ .cmd_rcgr = 0x31c0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_csi0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi3_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
+ F(37500000, P_GPLL0, 16, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(80000000, P_GPLL0, 7.5, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(109090000, P_GPLL0, 5.5, 0, 0),
+ F(133330000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(228570000, P_MMPLL0, 3.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(465000000, P_MMPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x3600,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_vfe_vfe0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+ .cmd_rcgr = 0x3620,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_vfe_vfe0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe1_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_mdp_clk[] = {
+ F(37500000, P_GPLL0, 16, 0, 0),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(85710000, P_GPLL0, 7, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(133330000, P_MMPLL0, 6, 0, 0),
+ F(160000000, P_MMPLL0, 5, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ F(228570000, P_MMPLL0, 3.5, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x2040,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_dsi_hdmi_gpll0_map,
+ .freq_tbl = ftbl_mdss_mdp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_names = mmcc_xo_mmpll0_dsi_hdmi_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x4000,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_1_2_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_names = mmcc_xo_mmpll0_1_2_gpll0,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(228570000, P_MMPLL0, 3.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x3500,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 jpeg1_clk_src = {
+ .cmd_rcgr = 0x3520,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg1_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 jpeg2_clk_src = {
+ .cmd_rcgr = 0x3540,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg2_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_pclk0_clk[] = {
+ F(125000000, P_DSI0PLL, 2, 0, 0),
+ F(250000000, P_DSI0PLL, 1, 0, 0),
+ { }
+};
+
+static struct freq_tbl ftbl_mdss_pclk1_clk[] = {
+ F(125000000, P_DSI1PLL, 2, 0, 0),
+ F(250000000, P_DSI1PLL, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x2000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_pclk0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x2020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_pclk1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(133330000, P_MMPLL0, 6, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(465000000, P_MMPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_1_3_gpll0_map,
+ .freq_tbl = ftbl_venus0_vcodec0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vcodec0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_1_3_gpll0,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_cci_cci_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x3300,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_cci_cci_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_gp0_1_clk[] = {
+ F(10000, P_XO, 16, 1, 120),
+ F(24000, P_XO, 16, 1, 50),
+ F(6000000, P_GPLL0, 10, 1, 10),
+ F(12000000, P_GPLL0, 10, 1, 5),
+ F(13000000, P_GPLL0, 4, 13, 150),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x3420,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_1_gpll1_0_map,
+ .freq_tbl = ftbl_camss_gp0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_1_gpll1_0,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x3450,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_1_gpll1_0_map,
+ .freq_tbl = ftbl_camss_gp0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk_src",
+ .parent_names = mmcc_xo_mmpll0_1_gpll1_0,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
+ F(4800000, P_XO, 4, 0, 0),
+ F(6000000, P_GPLL0, 10, 1, 10),
+ F(8000000, P_GPLL0, 15, 1, 5),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0, 12.5, 1, 3),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ F(32000000, P_MMPLL0, 5, 1, 5),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ F(64000000, P_MMPLL0, 12.5, 0, 0),
+ F(66670000, P_GPLL0, 9, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x3360,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_mclk0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x3390,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_mclk0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+ .cmd_rcgr = 0x33c0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_mclk0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk2_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+ .cmd_rcgr = 0x33f0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_mclk0_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk3_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_MMPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x3030,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x3060,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2phytimer_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
+ F(133330000, P_GPLL0, 4.5, 0, 0),
+ F(266670000, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(465000000, P_MMPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x3640,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_camss_vfe_cpp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_byte0_clk[] = {
+ F(93750000, P_DSI0PLL, 8, 0, 0),
+ F(187500000, P_DSI0PLL, 4, 0, 0),
+ { }
+};
+
+static struct freq_tbl ftbl_mdss_byte1_clk[] = {
+ F(93750000, P_DSI1PLL, 8, 0, 0),
+ F(187500000, P_DSI1PLL, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x2120,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_byte0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x2140,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_byte1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_edpaux_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 edpaux_clk_src = {
+ .cmd_rcgr = 0x20e0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_mdss_edpaux_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "edpaux_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_edplink_clk[] = {
+ F(135000000, P_EDPLINK, 2, 0, 0),
+ F(270000000, P_EDPLINK, 11, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 edplink_clk_src = {
+ .cmd_rcgr = 0x20c0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_edplink_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "edplink_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_edppixel_clk[] = {
+ F(175000000, P_EDPVCO, 2, 0, 0),
+ F(350000000, P_EDPVCO, 11, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 edppixel_clk_src = {
+ .cmd_rcgr = 0x20a0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_map,
+ .freq_tbl = ftbl_mdss_edppixel_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "edppixel_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x2160,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_esc0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x2180,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_esc0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_extpclk_clk[] = {
+ F(25200000, P_HDMIPLL, 1, 0, 0),
+ F(27000000, P_HDMIPLL, 1, 0, 0),
+ F(27030000, P_HDMIPLL, 1, 0, 0),
+ F(65000000, P_HDMIPLL, 1, 0, 0),
+ F(74250000, P_HDMIPLL, 1, 0, 0),
+ F(108000000, P_HDMIPLL, 1, 0, 0),
+ F(148500000, P_HDMIPLL, 1, 0, 0),
+ F(268500000, P_HDMIPLL, 1, 0, 0),
+ F(297000000, P_HDMIPLL, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 extpclk_clk_src = {
+ .cmd_rcgr = 0x2060,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+ .freq_tbl = ftbl_mdss_extpclk_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "extpclk_clk_src",
+ .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_hdmi_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hdmi_clk_src = {
+ .cmd_rcgr = 0x2100,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_mdss_hdmi_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hdmi_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x2080,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+ .freq_tbl = ftbl_mdss_vsync_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch camss_cci_cci_ahb_clk = {
+ .halt_reg = 0x3348,
+ .clkr = {
+ .enable_reg = 0x3348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_cci_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_cci_clk = {
+ .halt_reg = 0x3344,
+ .clkr = {
+ .enable_reg = 0x3344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_cci_clk",
+ .parent_names = (const char *[]){
+ "cci_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_ahb_clk = {
+ .halt_reg = 0x30bc,
+ .clkr = {
+ .enable_reg = 0x30bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_clk = {
+ .halt_reg = 0x30b4,
+ .clkr = {
+ .enable_reg = 0x30b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0phy_clk = {
+ .halt_reg = 0x30c4,
+ .clkr = {
+ .enable_reg = 0x30c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0phy_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0pix_clk = {
+ .halt_reg = 0x30e4,
+ .clkr = {
+ .enable_reg = 0x30e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0pix_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0rdi_clk = {
+ .halt_reg = 0x30d4,
+ .clkr = {
+ .enable_reg = 0x30d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0rdi_clk",
+ .parent_names = (const char *[]){
+ "csi0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_ahb_clk = {
+ .halt_reg = 0x3128,
+ .clkr = {
+ .enable_reg = 0x3128,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_clk = {
+ .halt_reg = 0x3124,
+ .clkr = {
+ .enable_reg = 0x3124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1phy_clk = {
+ .halt_reg = 0x3134,
+ .clkr = {
+ .enable_reg = 0x3134,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1phy_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1pix_clk = {
+ .halt_reg = 0x3154,
+ .clkr = {
+ .enable_reg = 0x3154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1pix_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1rdi_clk = {
+ .halt_reg = 0x3144,
+ .clkr = {
+ .enable_reg = 0x3144,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1rdi_clk",
+ .parent_names = (const char *[]){
+ "csi1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_ahb_clk = {
+ .halt_reg = 0x3188,
+ .clkr = {
+ .enable_reg = 0x3188,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_clk = {
+ .halt_reg = 0x3184,
+ .clkr = {
+ .enable_reg = 0x3184,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_clk",
+ .parent_names = (const char *[]){
+ "csi2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2phy_clk = {
+ .halt_reg = 0x3194,
+ .clkr = {
+ .enable_reg = 0x3194,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2phy_clk",
+ .parent_names = (const char *[]){
+ "csi2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2pix_clk = {
+ .halt_reg = 0x31b4,
+ .clkr = {
+ .enable_reg = 0x31b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2pix_clk",
+ .parent_names = (const char *[]){
+ "csi2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2rdi_clk = {
+ .halt_reg = 0x31a4,
+ .clkr = {
+ .enable_reg = 0x31a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2rdi_clk",
+ .parent_names = (const char *[]){
+ "csi2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_ahb_clk = {
+ .halt_reg = 0x31e8,
+ .clkr = {
+ .enable_reg = 0x31e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_clk = {
+ .halt_reg = 0x31e4,
+ .clkr = {
+ .enable_reg = 0x31e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_clk",
+ .parent_names = (const char *[]){
+ "csi3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3phy_clk = {
+ .halt_reg = 0x31f4,
+ .clkr = {
+ .enable_reg = 0x31f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3phy_clk",
+ .parent_names = (const char *[]){
+ "csi3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3pix_clk = {
+ .halt_reg = 0x3214,
+ .clkr = {
+ .enable_reg = 0x3214,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3pix_clk",
+ .parent_names = (const char *[]){
+ "csi3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3rdi_clk = {
+ .halt_reg = 0x3204,
+ .clkr = {
+ .enable_reg = 0x3204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3rdi_clk",
+ .parent_names = (const char *[]){
+ "csi3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe0_clk = {
+ .halt_reg = 0x3704,
+ .clkr = {
+ .enable_reg = 0x3704,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe0_clk",
+ .parent_names = (const char *[]){
+ "vfe0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe1_clk = {
+ .halt_reg = 0x3714,
+ .clkr = {
+ .enable_reg = 0x3714,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe1_clk",
+ .parent_names = (const char *[]){
+ "vfe1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp0_clk = {
+ .halt_reg = 0x3444,
+ .clkr = {
+ .enable_reg = 0x3444,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk",
+ .parent_names = (const char *[]){
+ "camss_gp0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp1_clk = {
+ .halt_reg = 0x3474,
+ .clkr = {
+ .enable_reg = 0x3474,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk",
+ .parent_names = (const char *[]){
+ "camss_gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_ispif_ahb_clk = {
+ .halt_reg = 0x3224,
+ .clkr = {
+ .enable_reg = 0x3224,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ispif_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg0_clk = {
+ .halt_reg = 0x35a8,
+ .clkr = {
+ .enable_reg = 0x35a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg0_clk",
+ .parent_names = (const char *[]){
+ "jpeg0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg1_clk = {
+ .halt_reg = 0x35ac,
+ .clkr = {
+ .enable_reg = 0x35ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg1_clk",
+ .parent_names = (const char *[]){
+ "jpeg1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg2_clk = {
+ .halt_reg = 0x35b0,
+ .clkr = {
+ .enable_reg = 0x35b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg2_clk",
+ .parent_names = (const char *[]){
+ "jpeg2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg_ahb_clk = {
+ .halt_reg = 0x35b4,
+ .clkr = {
+ .enable_reg = 0x35b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg_axi_clk = {
+ .halt_reg = 0x35b8,
+ .clkr = {
+ .enable_reg = 0x35b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_jpeg_ocmemnoc_clk = {
+ .halt_reg = 0x35bc,
+ .clkr = {
+ .enable_reg = 0x35bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_jpeg_ocmemnoc_clk",
+ .parent_names = (const char *[]){
+ "ocmemnoc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk0_clk = {
+ .halt_reg = 0x3384,
+ .clkr = {
+ .enable_reg = 0x3384,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk0_clk",
+ .parent_names = (const char *[]){
+ "mclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk1_clk = {
+ .halt_reg = 0x33b4,
+ .clkr = {
+ .enable_reg = 0x33b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk1_clk",
+ .parent_names = (const char *[]){
+ "mclk1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk2_clk = {
+ .halt_reg = 0x33e4,
+ .clkr = {
+ .enable_reg = 0x33e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk2_clk",
+ .parent_names = (const char *[]){
+ "mclk2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk3_clk = {
+ .halt_reg = 0x3414,
+ .clkr = {
+ .enable_reg = 0x3414,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk3_clk",
+ .parent_names = (const char *[]){
+ "mclk3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_micro_ahb_clk = {
+ .halt_reg = 0x3494,
+ .clkr = {
+ .enable_reg = 0x3494,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_micro_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_phy0_csi0phytimer_clk = {
+ .halt_reg = 0x3024,
+ .clkr = {
+ .enable_reg = 0x3024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_phy0_csi0phytimer_clk",
+ .parent_names = (const char *[]){
+ "csi0phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_phy1_csi1phytimer_clk = {
+ .halt_reg = 0x3054,
+ .clkr = {
+ .enable_reg = 0x3054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_phy1_csi1phytimer_clk",
+ .parent_names = (const char *[]){
+ "csi1phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_phy2_csi2phytimer_clk = {
+ .halt_reg = 0x3084,
+ .clkr = {
+ .enable_reg = 0x3084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_phy2_csi2phytimer_clk",
+ .parent_names = (const char *[]){
+ "csi2phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_top_ahb_clk = {
+ .halt_reg = 0x3484,
+ .clkr = {
+ .enable_reg = 0x3484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_top_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_cpp_ahb_clk = {
+ .halt_reg = 0x36b4,
+ .clkr = {
+ .enable_reg = 0x36b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_cpp_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_cpp_clk = {
+ .halt_reg = 0x36b0,
+ .clkr = {
+ .enable_reg = 0x36b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_cpp_clk",
+ .parent_names = (const char *[]){
+ "cpp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe0_clk = {
+ .halt_reg = 0x36a8,
+ .clkr = {
+ .enable_reg = 0x36a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe0_clk",
+ .parent_names = (const char *[]){
+ "vfe0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe1_clk = {
+ .halt_reg = 0x36ac,
+ .clkr = {
+ .enable_reg = 0x36ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe1_clk",
+ .parent_names = (const char *[]){
+ "vfe1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe_ahb_clk = {
+ .halt_reg = 0x36b8,
+ .clkr = {
+ .enable_reg = 0x36b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe_axi_clk = {
+ .halt_reg = 0x36bc,
+ .clkr = {
+ .enable_reg = 0x36bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vfe_ocmemnoc_clk = {
+ .halt_reg = 0x36c0,
+ .clkr = {
+ .enable_reg = 0x36c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vfe_ocmemnoc_clk",
+ .parent_names = (const char *[]){
+ "ocmemnoc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_ahb_clk = {
+ .halt_reg = 0x2308,
+ .clkr = {
+ .enable_reg = 0x2308,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_axi_clk = {
+ .halt_reg = 0x2310,
+ .clkr = {
+ .enable_reg = 0x2310,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte0_clk = {
+ .halt_reg = 0x233c,
+ .clkr = {
+ .enable_reg = 0x233c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_clk",
+ .parent_names = (const char *[]){
+ "byte0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte1_clk = {
+ .halt_reg = 0x2340,
+ .clkr = {
+ .enable_reg = 0x2340,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_clk",
+ .parent_names = (const char *[]){
+ "byte1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_edpaux_clk = {
+ .halt_reg = 0x2334,
+ .clkr = {
+ .enable_reg = 0x2334,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_edpaux_clk",
+ .parent_names = (const char *[]){
+ "edpaux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_edplink_clk = {
+ .halt_reg = 0x2330,
+ .clkr = {
+ .enable_reg = 0x2330,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_edplink_clk",
+ .parent_names = (const char *[]){
+ "edplink_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_edppixel_clk = {
+ .halt_reg = 0x232c,
+ .clkr = {
+ .enable_reg = 0x232c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_edppixel_clk",
+ .parent_names = (const char *[]){
+ "edppixel_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc0_clk = {
+ .halt_reg = 0x2344,
+ .clkr = {
+ .enable_reg = 0x2344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc0_clk",
+ .parent_names = (const char *[]){
+ "esc0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc1_clk = {
+ .halt_reg = 0x2348,
+ .clkr = {
+ .enable_reg = 0x2348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc1_clk",
+ .parent_names = (const char *[]){
+ "esc1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_extpclk_clk = {
+ .halt_reg = 0x2324,
+ .clkr = {
+ .enable_reg = 0x2324,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_extpclk_clk",
+ .parent_names = (const char *[]){
+ "extpclk_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_ahb_clk = {
+ .halt_reg = 0x230c,
+ .clkr = {
+ .enable_reg = 0x230c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_clk = {
+ .halt_reg = 0x2338,
+ .clkr = {
+ .enable_reg = 0x2338,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_clk",
+ .parent_names = (const char *[]){
+ "hdmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_mdp_clk = {
+ .halt_reg = 0x231c,
+ .clkr = {
+ .enable_reg = 0x231c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_mdp_clk",
+ .parent_names = (const char *[]){
+ "mdp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_mdp_lut_clk = {
+ .halt_reg = 0x2320,
+ .clkr = {
+ .enable_reg = 0x2320,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_mdp_lut_clk",
+ .parent_names = (const char *[]){
+ "mdp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk0_clk = {
+ .halt_reg = 0x2314,
+ .clkr = {
+ .enable_reg = 0x2314,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk0_clk",
+ .parent_names = (const char *[]){
+ "pclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk1_clk = {
+ .halt_reg = 0x2318,
+ .clkr = {
+ .enable_reg = 0x2318,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk1_clk",
+ .parent_names = (const char *[]){
+ "pclk1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_vsync_clk = {
+ .halt_reg = 0x2328,
+ .clkr = {
+ .enable_reg = 0x2328,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_vsync_clk",
+ .parent_names = (const char *[]){
+ "vsync_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_misc_ahb_clk = {
+ .halt_reg = 0x502c,
+ .clkr = {
+ .enable_reg = 0x502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_misc_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_mmssnoc_ahb_clk = {
+ .halt_reg = 0x5024,
+ .clkr = {
+ .enable_reg = 0x5024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_mmssnoc_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static struct clk_branch mmss_mmssnoc_bto_ahb_clk = {
+ .halt_reg = 0x5028,
+ .clkr = {
+ .enable_reg = 0x5028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_mmssnoc_bto_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static struct clk_branch mmss_mmssnoc_axi_clk = {
+ .halt_reg = 0x506c,
+ .clkr = {
+ .enable_reg = 0x506c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_mmssnoc_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_s0_axi_clk = {
+ .halt_reg = 0x5064,
+ .clkr = {
+ .enable_reg = 0x5064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_s0_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static struct clk_branch ocmemcx_ahb_clk = {
+ .halt_reg = 0x405c,
+ .clkr = {
+ .enable_reg = 0x405c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ocmemcx_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ocmemcx_ocmemnoc_clk = {
+ .halt_reg = 0x4058,
+ .clkr = {
+ .enable_reg = 0x4058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ocmemcx_ocmemnoc_clk",
+ .parent_names = (const char *[]){
+ "ocmemnoc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch oxili_ocmemgx_clk = {
+ .halt_reg = 0x402c,
+ .clkr = {
+ .enable_reg = 0x402c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "oxili_ocmemgx_clk",
+ .parent_names = (const char *[]){
+ "gfx3d_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ocmemnoc_clk = {
+ .halt_reg = 0x50b4,
+ .clkr = {
+ .enable_reg = 0x50b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ocmemnoc_clk",
+ .parent_names = (const char *[]){
+ "ocmemnoc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch oxili_gfx3d_clk = {
+ .halt_reg = 0x4028,
+ .clkr = {
+ .enable_reg = 0x4028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "oxili_gfx3d_clk",
+ .parent_names = (const char *[]){
+ "gfx3d_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch oxilicx_ahb_clk = {
+ .halt_reg = 0x403c,
+ .clkr = {
+ .enable_reg = 0x403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "oxilicx_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch oxilicx_axi_clk = {
+ .halt_reg = 0x4038,
+ .clkr = {
+ .enable_reg = 0x4038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "oxilicx_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_ahb_clk = {
+ .halt_reg = 0x1030,
+ .clkr = {
+ .enable_reg = 0x1030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_ahb_clk",
+ .parent_names = (const char *[]){
+ "mmss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_axi_clk = {
+ .halt_reg = 0x1034,
+ .clkr = {
+ .enable_reg = 0x1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_axi_clk",
+ .parent_names = (const char *[]){
+ "mmss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_ocmemnoc_clk = {
+ .halt_reg = 0x1038,
+ .clkr = {
+ .enable_reg = 0x1038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_ocmemnoc_clk",
+ .parent_names = (const char *[]){
+ "ocmemnoc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch venus0_vcodec0_clk = {
+ .halt_reg = 0x1028,
+ .clkr = {
+ .enable_reg = 0x1028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "venus0_vcodec0_clk",
+ .parent_names = (const char *[]){
+ "vcodec0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct pll_config mmpll1_config = {
+ .l = 60,
+ .m = 25,
+ .n = 32,
+ .vco_val = 0x0,
+ .vco_mask = 0x3 << 20,
+ .pre_div_val = 0x0,
+ .pre_div_mask = 0x3 << 12,
+ .post_div_val = 0x0,
+ .post_div_mask = 0x3 << 8,
+ .mn_ena_mask = BIT(24),
+ .main_output_mask = BIT(0),
+};
+
+static struct pll_config mmpll3_config = {
+ .l = 48,
+ .m = 7,
+ .n = 16,
+ .vco_val = 0x0,
+ .vco_mask = 0x3 << 20,
+ .pre_div_val = 0x0,
+ .pre_div_mask = 0x3 << 12,
+ .post_div_val = 0x0,
+ .post_div_mask = 0x3 << 8,
+ .mn_ena_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+};
+
+static struct clk_regmap *mmcc_msm8974_clocks[] = {
+ [MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
+ [MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
+ [OCMEMNOC_CLK_SRC] = &ocmemnoc_clk_src.clkr,
+ [MMPLL0] = &mmpll0.clkr,
+ [MMPLL0_VOTE] = &mmpll0_vote,
+ [MMPLL1] = &mmpll1.clkr,
+ [MMPLL1_VOTE] = &mmpll1_vote,
+ [MMPLL2] = &mmpll2.clkr,
+ [MMPLL3] = &mmpll3.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [CSI3_CLK_SRC] = &csi3_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [JPEG1_CLK_SRC] = &jpeg1_clk_src.clkr,
+ [JPEG2_CLK_SRC] = &jpeg2_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [EDPAUX_CLK_SRC] = &edpaux_clk_src.clkr,
+ [EDPLINK_CLK_SRC] = &edplink_clk_src.clkr,
+ [EDPPIXEL_CLK_SRC] = &edppixel_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [EXTPCLK_CLK_SRC] = &extpclk_clk_src.clkr,
+ [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [CAMSS_CCI_CCI_AHB_CLK] = &camss_cci_cci_ahb_clk.clkr,
+ [CAMSS_CCI_CCI_CLK] = &camss_cci_cci_clk.clkr,
+ [CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+ [CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+ [CAMSS_CSI0PHY_CLK] = &camss_csi0phy_clk.clkr,
+ [CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+ [CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+ [CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+ [CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+ [CAMSS_CSI1PHY_CLK] = &camss_csi1phy_clk.clkr,
+ [CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+ [CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+ [CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr,
+ [CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr,
+ [CAMSS_CSI2PHY_CLK] = &camss_csi2phy_clk.clkr,
+ [CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr,
+ [CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr,
+ [CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr,
+ [CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr,
+ [CAMSS_CSI3PHY_CLK] = &camss_csi3phy_clk.clkr,
+ [CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr,
+ [CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr,
+ [CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+ [CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr,
+ [CAMSS_GP0_CLK] = &camss_gp0_clk.clkr,
+ [CAMSS_GP1_CLK] = &camss_gp1_clk.clkr,
+ [CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+ [CAMSS_JPEG_JPEG0_CLK] = &camss_jpeg_jpeg0_clk.clkr,
+ [CAMSS_JPEG_JPEG1_CLK] = &camss_jpeg_jpeg1_clk.clkr,
+ [CAMSS_JPEG_JPEG2_CLK] = &camss_jpeg_jpeg2_clk.clkr,
+ [CAMSS_JPEG_JPEG_AHB_CLK] = &camss_jpeg_jpeg_ahb_clk.clkr,
+ [CAMSS_JPEG_JPEG_AXI_CLK] = &camss_jpeg_jpeg_axi_clk.clkr,
+ [CAMSS_JPEG_JPEG_OCMEMNOC_CLK] = &camss_jpeg_jpeg_ocmemnoc_clk.clkr,
+ [CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+ [CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+ [CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr,
+ [CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr,
+ [CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+ [CAMSS_PHY0_CSI0PHYTIMER_CLK] = &camss_phy0_csi0phytimer_clk.clkr,
+ [CAMSS_PHY1_CSI1PHYTIMER_CLK] = &camss_phy1_csi1phytimer_clk.clkr,
+ [CAMSS_PHY2_CSI2PHYTIMER_CLK] = &camss_phy2_csi2phytimer_clk.clkr,
+ [CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+ [CAMSS_VFE_CPP_AHB_CLK] = &camss_vfe_cpp_ahb_clk.clkr,
+ [CAMSS_VFE_CPP_CLK] = &camss_vfe_cpp_clk.clkr,
+ [CAMSS_VFE_VFE0_CLK] = &camss_vfe_vfe0_clk.clkr,
+ [CAMSS_VFE_VFE1_CLK] = &camss_vfe_vfe1_clk.clkr,
+ [CAMSS_VFE_VFE_AHB_CLK] = &camss_vfe_vfe_ahb_clk.clkr,
+ [CAMSS_VFE_VFE_AXI_CLK] = &camss_vfe_vfe_axi_clk.clkr,
+ [CAMSS_VFE_VFE_OCMEMNOC_CLK] = &camss_vfe_vfe_ocmemnoc_clk.clkr,
+ [MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+ [MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+ [MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+ [MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr,
+ [MDSS_EDPAUX_CLK] = &mdss_edpaux_clk.clkr,
+ [MDSS_EDPLINK_CLK] = &mdss_edplink_clk.clkr,
+ [MDSS_EDPPIXEL_CLK] = &mdss_edppixel_clk.clkr,
+ [MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+ [MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr,
+ [MDSS_EXTPCLK_CLK] = &mdss_extpclk_clk.clkr,
+ [MDSS_HDMI_AHB_CLK] = &mdss_hdmi_ahb_clk.clkr,
+ [MDSS_HDMI_CLK] = &mdss_hdmi_clk.clkr,
+ [MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+ [MDSS_MDP_LUT_CLK] = &mdss_mdp_lut_clk.clkr,
+ [MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+ [MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr,
+ [MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+ [MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr,
+ [MMSS_MMSSNOC_AHB_CLK] = &mmss_mmssnoc_ahb_clk.clkr,
+ [MMSS_MMSSNOC_BTO_AHB_CLK] = &mmss_mmssnoc_bto_ahb_clk.clkr,
+ [MMSS_MMSSNOC_AXI_CLK] = &mmss_mmssnoc_axi_clk.clkr,
+ [MMSS_S0_AXI_CLK] = &mmss_s0_axi_clk.clkr,
+ [OCMEMCX_AHB_CLK] = &ocmemcx_ahb_clk.clkr,
+ [OCMEMCX_OCMEMNOC_CLK] = &ocmemcx_ocmemnoc_clk.clkr,
+ [OXILI_OCMEMGX_CLK] = &oxili_ocmemgx_clk.clkr,
+ [OCMEMNOC_CLK] = &ocmemnoc_clk.clkr,
+ [OXILI_GFX3D_CLK] = &oxili_gfx3d_clk.clkr,
+ [OXILICX_AHB_CLK] = &oxilicx_ahb_clk.clkr,
+ [OXILICX_AXI_CLK] = &oxilicx_axi_clk.clkr,
+ [VENUS0_AHB_CLK] = &venus0_ahb_clk.clkr,
+ [VENUS0_AXI_CLK] = &venus0_axi_clk.clkr,
+ [VENUS0_OCMEMNOC_CLK] = &venus0_ocmemnoc_clk.clkr,
+ [VENUS0_VCODEC0_CLK] = &venus0_vcodec0_clk.clkr,
+};
+
+static const struct qcom_reset_map mmcc_msm8974_resets[] = {
+ [SPDM_RESET] = { 0x0200 },
+ [SPDM_RM_RESET] = { 0x0300 },
+ [VENUS0_RESET] = { 0x1020 },
+ [MDSS_RESET] = { 0x2300 },
+ [CAMSS_PHY0_RESET] = { 0x3020 },
+ [CAMSS_PHY1_RESET] = { 0x3050 },
+ [CAMSS_PHY2_RESET] = { 0x3080 },
+ [CAMSS_CSI0_RESET] = { 0x30b0 },
+ [CAMSS_CSI0PHY_RESET] = { 0x30c0 },
+ [CAMSS_CSI0RDI_RESET] = { 0x30d0 },
+ [CAMSS_CSI0PIX_RESET] = { 0x30e0 },
+ [CAMSS_CSI1_RESET] = { 0x3120 },
+ [CAMSS_CSI1PHY_RESET] = { 0x3130 },
+ [CAMSS_CSI1RDI_RESET] = { 0x3140 },
+ [CAMSS_CSI1PIX_RESET] = { 0x3150 },
+ [CAMSS_CSI2_RESET] = { 0x3180 },
+ [CAMSS_CSI2PHY_RESET] = { 0x3190 },
+ [CAMSS_CSI2RDI_RESET] = { 0x31a0 },
+ [CAMSS_CSI2PIX_RESET] = { 0x31b0 },
+ [CAMSS_CSI3_RESET] = { 0x31e0 },
+ [CAMSS_CSI3PHY_RESET] = { 0x31f0 },
+ [CAMSS_CSI3RDI_RESET] = { 0x3200 },
+ [CAMSS_CSI3PIX_RESET] = { 0x3210 },
+ [CAMSS_ISPIF_RESET] = { 0x3220 },
+ [CAMSS_CCI_RESET] = { 0x3340 },
+ [CAMSS_MCLK0_RESET] = { 0x3380 },
+ [CAMSS_MCLK1_RESET] = { 0x33b0 },
+ [CAMSS_MCLK2_RESET] = { 0x33e0 },
+ [CAMSS_MCLK3_RESET] = { 0x3410 },
+ [CAMSS_GP0_RESET] = { 0x3440 },
+ [CAMSS_GP1_RESET] = { 0x3470 },
+ [CAMSS_TOP_RESET] = { 0x3480 },
+ [CAMSS_MICRO_RESET] = { 0x3490 },
+ [CAMSS_JPEG_RESET] = { 0x35a0 },
+ [CAMSS_VFE_RESET] = { 0x36a0 },
+ [CAMSS_CSI_VFE0_RESET] = { 0x3700 },
+ [CAMSS_CSI_VFE1_RESET] = { 0x3710 },
+ [OXILI_RESET] = { 0x4020 },
+ [OXILICX_RESET] = { 0x4030 },
+ [OCMEMCX_RESET] = { 0x4050 },
+ [MMSS_RBCRP_RESET] = { 0x4080 },
+ [MMSSNOCAHB_RESET] = { 0x5020 },
+ [MMSSNOCAXI_RESET] = { 0x5060 },
+ [OCMEMNOC_RESET] = { 0x50b0 },
+};
+
+static const struct regmap_config mmcc_msm8974_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x5104,
+ .fast_io = true,
+};
+
+static const struct of_device_id mmcc_msm8974_match_table[] = {
+ { .compatible = "qcom,mmcc-msm8974" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcc_msm8974_match_table);
+
+struct qcom_cc {
+ struct qcom_reset_controller reset;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+static int mmcc_msm8974_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int i, ret;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct clk_onecell_data *data;
+ struct clk **clks;
+ struct regmap *regmap;
+ size_t num_clks;
+ struct qcom_reset_controller *reset;
+ struct qcom_cc *cc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &mmcc_msm8974_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ num_clks = ARRAY_SIZE(mmcc_msm8974_clocks);
+ cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true);
+ clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false);
+
+ for (i = 0; i < num_clks; i++) {
+ if (!mmcc_msm8974_clocks[i])
+ continue;
+ clk = devm_clk_register_regmap(dev, mmcc_msm8974_clocks[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret)
+ return ret;
+
+ reset = &cc->reset;
+ reset->rcdev.of_node = dev->of_node;
+ reset->rcdev.ops = &qcom_reset_ops,
+ reset->rcdev.owner = THIS_MODULE,
+ reset->rcdev.nr_resets = ARRAY_SIZE(mmcc_msm8974_resets),
+ reset->regmap = regmap;
+ reset->reset_map = mmcc_msm8974_resets,
+ platform_set_drvdata(pdev, &reset->rcdev);
+
+ ret = reset_controller_register(&reset->rcdev);
+ if (ret)
+ of_clk_del_provider(dev->of_node);
+
+ return ret;
+}
+
+static int mmcc_msm8974_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ reset_controller_unregister(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver mmcc_msm8974_driver = {
+ .probe = mmcc_msm8974_probe,
+ .remove = mmcc_msm8974_remove,
+ .driver = {
+ .name = "mmcc-msm8974",
+ .owner = THIS_MODULE,
+ .of_match_table = mmcc_msm8974_match_table,
+ },
+};
+module_platform_driver(mmcc_msm8974_driver);
+
+MODULE_DESCRIPTION("QCOM MMCC MSM8974 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mmcc-msm8974");
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
new file mode 100644
index 000000000000..6c977d3a8590
--- /dev/null
+++ b/drivers/clk/qcom/reset.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/delay.h>
+
+#include "reset.h"
+
+static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ rcdev->ops->assert(rcdev, id);
+ udelay(1);
+ rcdev->ops->deassert(rcdev, id);
+ return 0;
+}
+
+static int
+qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct qcom_reset_controller *rst;
+ const struct qcom_reset_map *map;
+ u32 mask;
+
+ rst = to_qcom_reset_controller(rcdev);
+ map = &rst->reset_map[id];
+ mask = BIT(map->bit);
+
+ return regmap_update_bits(rst->regmap, map->reg, mask, mask);
+}
+
+static int
+qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct qcom_reset_controller *rst;
+ const struct qcom_reset_map *map;
+ u32 mask;
+
+ rst = to_qcom_reset_controller(rcdev);
+ map = &rst->reset_map[id];
+ mask = BIT(map->bit);
+
+ return regmap_update_bits(rst->regmap, map->reg, mask, 0);
+}
+
+struct reset_control_ops qcom_reset_ops = {
+ .reset = qcom_reset,
+ .assert = qcom_reset_assert,
+ .deassert = qcom_reset_deassert,
+};
+EXPORT_SYMBOL_GPL(qcom_reset_ops);
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
new file mode 100644
index 000000000000..0e11e2130f97
--- /dev/null
+++ b/drivers/clk/qcom/reset.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_RESET_H__
+#define __QCOM_CLK_RESET_H__
+
+#include <linux/reset-controller.h>
+
+struct qcom_reset_map {
+ unsigned int reg;
+ u8 bit;
+};
+
+struct regmap;
+
+struct qcom_reset_controller {
+ const struct qcom_reset_map *reset_map;
+ struct regmap *regmap;
+ struct reset_controller_dev rcdev;
+};
+
+#define to_qcom_reset_controller(r) \
+ container_of(r, struct qcom_reset_controller, rcdev);
+
+extern struct reset_control_ops qcom_reset_ops;
+
+#endif
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 68e515d093d8..884187fbfe00 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -14,9 +14,17 @@
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <dt-bindings/clk/exynos-audss-clk.h>
+enum exynos_audss_clk_type {
+ TYPE_EXYNOS4210,
+ TYPE_EXYNOS5250,
+ TYPE_EXYNOS5420,
+};
+
static DEFINE_SPINLOCK(lock);
static struct clk **clk_table;
static void __iomem *reg_base;
@@ -26,10 +34,6 @@ static struct clk_onecell_data clk_data;
#define ASS_CLK_DIV 0x4
#define ASS_CLK_GATE 0x8
-/* list of all parent clock list */
-static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
-static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
-
#ifdef CONFIG_PM_SLEEP
static unsigned long reg_save[][2] = {
{ASS_CLK_SRC, 0},
@@ -61,31 +65,69 @@ static struct syscore_ops exynos_audss_clk_syscore_ops = {
};
#endif /* CONFIG_PM_SLEEP */
+static const struct of_device_id exynos_audss_clk_of_match[] = {
+ { .compatible = "samsung,exynos4210-audss-clock",
+ .data = (void *)TYPE_EXYNOS4210, },
+ { .compatible = "samsung,exynos5250-audss-clock",
+ .data = (void *)TYPE_EXYNOS5250, },
+ { .compatible = "samsung,exynos5420-audss-clock",
+ .data = (void *)TYPE_EXYNOS5420, },
+ {},
+};
+
/* register exynos_audss clocks */
-static void __init exynos_audss_clk_init(struct device_node *np)
+static int exynos_audss_clk_probe(struct platform_device *pdev)
{
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("%s: failed to map audss registers\n", __func__);
- return;
+ int i, ret = 0;
+ struct resource *res;
+ const char *mout_audss_p[] = {"fin_pll", "fout_epll"};
+ const char *mout_i2s_p[] = {"mout_audss", "cdclk0", "sclk_audio0"};
+ const char *sclk_pcm_p = "sclk_pcm0";
+ struct clk *pll_ref, *pll_in, *cdclk, *sclk_audio, *sclk_pcm_in;
+ const struct of_device_id *match;
+ enum exynos_audss_clk_type variant;
+
+ match = of_match_node(exynos_audss_clk_of_match, pdev->dev.of_node);
+ if (!match)
+ return -EINVAL;
+ variant = (enum exynos_audss_clk_type)match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg_base)) {
+ dev_err(&pdev->dev, "failed to map audss registers\n");
+ return PTR_ERR(reg_base);
}
- clk_table = kzalloc(sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS,
+ clk_table = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS,
GFP_KERNEL);
- if (!clk_table) {
- pr_err("%s: could not allocate clk lookup table\n", __func__);
- return;
- }
+ if (!clk_table)
+ return -ENOMEM;
clk_data.clks = clk_table;
- clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS;
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-
+ if (variant == TYPE_EXYNOS5420)
+ clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS;
+ else
+ clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS - 1;
+
+ pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
+ pll_in = devm_clk_get(&pdev->dev, "pll_in");
+ if (!IS_ERR(pll_ref))
+ mout_audss_p[0] = __clk_get_name(pll_ref);
+ if (!IS_ERR(pll_in))
+ mout_audss_p[1] = __clk_get_name(pll_in);
clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
mout_audss_p, ARRAY_SIZE(mout_audss_p),
CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
+ cdclk = devm_clk_get(&pdev->dev, "cdclk");
+ sclk_audio = devm_clk_get(&pdev->dev, "sclk_audio");
+ if (!IS_ERR(cdclk))
+ mout_i2s_p[1] = __clk_get_name(cdclk);
+ if (!IS_ERR(sclk_audio))
+ mout_i2s_p[2] = __clk_get_name(sclk_audio);
clk_table[EXYNOS_MOUT_I2S] = clk_register_mux(NULL, "mout_i2s",
mout_i2s_p, ARRAY_SIZE(mout_i2s_p),
CLK_SET_RATE_NO_REPARENT,
@@ -119,17 +161,88 @@ static void __init exynos_audss_clk_init(struct device_node *np)
"sclk_pcm", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 4, 0, &lock);
+ sclk_pcm_in = devm_clk_get(&pdev->dev, "sclk_pcm_in");
+ if (!IS_ERR(sclk_pcm_in))
+ sclk_pcm_p = __clk_get_name(sclk_pcm_in);
clk_table[EXYNOS_SCLK_PCM] = clk_register_gate(NULL, "sclk_pcm",
- "div_pcm0", CLK_SET_RATE_PARENT,
+ sclk_pcm_p, CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 5, 0, &lock);
+ if (variant == TYPE_EXYNOS5420) {
+ clk_table[EXYNOS_ADMA] = clk_register_gate(NULL, "adma",
+ "dout_srp", CLK_SET_RATE_PARENT,
+ reg_base + ASS_CLK_GATE, 9, 0, &lock);
+ }
+
+ for (i = 0; i < clk_data.clk_num; i++) {
+ if (IS_ERR(clk_table[i])) {
+ dev_err(&pdev->dev, "failed to register clock %d\n", i);
+ ret = PTR_ERR(clk_table[i]);
+ goto unregister;
+ }
+ }
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+ &clk_data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add clock provider\n");
+ goto unregister;
+ }
+
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&exynos_audss_clk_syscore_ops);
#endif
- pr_info("Exynos: Audss: clock setup completed\n");
+ dev_info(&pdev->dev, "setup completed\n");
+
+ return 0;
+
+unregister:
+ for (i = 0; i < clk_data.clk_num; i++) {
+ if (!IS_ERR(clk_table[i]))
+ clk_unregister(clk_table[i]);
+ }
+
+ return ret;
}
-CLK_OF_DECLARE(exynos4210_audss_clk, "samsung,exynos4210-audss-clock",
- exynos_audss_clk_init);
-CLK_OF_DECLARE(exynos5250_audss_clk, "samsung,exynos5250-audss-clock",
- exynos_audss_clk_init);
+
+static int exynos_audss_clk_remove(struct platform_device *pdev)
+{
+ int i;
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ for (i = 0; i < clk_data.clk_num; i++) {
+ if (!IS_ERR(clk_table[i]))
+ clk_unregister(clk_table[i]);
+ }
+
+ return 0;
+}
+
+static struct platform_driver exynos_audss_clk_driver = {
+ .driver = {
+ .name = "exynos-audss-clk",
+ .owner = THIS_MODULE,
+ .of_match_table = exynos_audss_clk_of_match,
+ },
+ .probe = exynos_audss_clk_probe,
+ .remove = exynos_audss_clk_remove,
+};
+
+static int __init exynos_audss_clk_init(void)
+{
+ return platform_driver_register(&exynos_audss_clk_driver);
+}
+core_initcall(exynos_audss_clk_init);
+
+static void __exit exynos_audss_clk_exit(void)
+{
+ platform_driver_unregister(&exynos_audss_clk_driver);
+}
+module_exit(exynos_audss_clk_exit);
+
+MODULE_AUTHOR("Padmavathi Venna <padma.v@samsung.com>");
+MODULE_DESCRIPTION("Exynos Audio Subsystem Clock Controller");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:exynos-audss-clk");
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 1a7c1b929c69..010f071af883 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -10,6 +10,7 @@
* Common Clock Framework support for all Exynos4 SoCs.
*/
+#include <dt-bindings/clock/exynos4.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
@@ -130,68 +131,6 @@ enum exynos4_plls {
};
/*
- * Let each supported clock get a unique id. This id is used to lookup the clock
- * for device tree based platforms. The clocks are categorized into three
- * sections: core, sclk gate and bus interface gate clocks.
- *
- * When adding a new clock to this list, it is advised to choose a clock
- * category and add it to the end of that category. That is because the the
- * device tree source file is referring to these ids and any change in the
- * sequence number of existing clocks will require corresponding change in the
- * device tree files. This limitation would go away when pre-processor support
- * for dtc would be available.
- */
-enum exynos4_clks {
- none,
-
- /* core clocks */
- xxti, xusbxti, fin_pll, fout_apll, fout_mpll, fout_epll, fout_vpll,
- sclk_apll, sclk_mpll, sclk_epll, sclk_vpll, arm_clk, aclk200, aclk100,
- aclk160, aclk133, mout_mpll_user_t, mout_mpll_user_c, mout_core,
- mout_apll, /* 20 */
-
- /* gate for special clocks (sclk) */
- sclk_fimc0 = 128, sclk_fimc1, sclk_fimc2, sclk_fimc3, sclk_cam0,
- sclk_cam1, sclk_csis0, sclk_csis1, sclk_hdmi, sclk_mixer, sclk_dac,
- sclk_pixel, sclk_fimd0, sclk_mdnie0, sclk_mdnie_pwm0, sclk_mipi0,
- sclk_audio0, sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_mmc4,
- sclk_sata, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_uart4,
- sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2,
- sclk_slimbus, sclk_fimd1, sclk_mipi1, sclk_pcm1, sclk_pcm2, sclk_i2s1,
- sclk_i2s2, sclk_mipihsi, sclk_mfc, sclk_pcm0, sclk_g3d, sclk_pwm_isp,
- sclk_spi0_isp, sclk_spi1_isp, sclk_uart_isp, sclk_fimg2d,
-
- /* gate clocks */
- fimc0 = 256, fimc1, fimc2, fimc3, csis0, csis1, jpeg, smmu_fimc0,
- smmu_fimc1, smmu_fimc2, smmu_fimc3, smmu_jpeg, vp, mixer, tvenc, hdmi,
- smmu_tv, mfc, smmu_mfcl, smmu_mfcr, g3d, g2d, rotator, mdma, smmu_g2d,
- smmu_rotator, smmu_mdma, fimd0, mie0, mdnie0, dsim0, smmu_fimd0, fimd1,
- mie1, dsim1, smmu_fimd1, pdma0, pdma1, pcie_phy, sata_phy, tsi, sdmmc0,
- sdmmc1, sdmmc2, sdmmc3, sdmmc4, sata, sromc, usb_host, usb_device, pcie,
- onenand, nfcon, smmu_pcie, gps, smmu_gps, uart0, uart1, uart2, uart3,
- uart4, i2c0, i2c1, i2c2, i2c3, i2c4, i2c5, i2c6, i2c7, i2c_hdmi, tsadc,
- spi0, spi1, spi2, i2s1, i2s2, pcm0, i2s0, pcm1, pcm2, pwm, slimbus,
- spdif, ac97, modemif, chipid, sysreg, hdmi_cec, mct, wdt, rtc, keyif,
- audss, mipi_hsi, mdma2, pixelasyncm0, pixelasyncm1, fimc_lite0,
- fimc_lite1, ppmuispx, ppmuispmx, fimc_isp, fimc_drc, fimc_fd, mcuisp,
- gicisp, smmu_isp, smmu_drc, smmu_fd, smmu_lite0, smmu_lite1, mcuctl_isp,
- mpwm_isp, i2c0_isp, i2c1_isp, mtcadc_isp, pwm_isp, wdt_isp, uart_isp,
- asyncaxim, smmu_ispcx, spi0_isp, spi1_isp, pwm_isp_sclk, spi0_isp_sclk,
- spi1_isp_sclk, uart_isp_sclk, tmu_apbif,
-
- /* mux clocks */
- mout_fimc0 = 384, mout_fimc1, mout_fimc2, mout_fimc3, mout_cam0,
- mout_cam1, mout_csis0, mout_csis1, mout_g3d0, mout_g3d1, mout_g3d,
- aclk400_mcuisp,
-
- /* div clocks */
- div_isp0 = 450, div_isp1, div_mcuisp0, div_mcuisp1, div_aclk200,
- div_aclk400_mcuisp,
-
- nr_clks,
-};
-
-/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
@@ -347,255 +286,256 @@ PNAME(mout_user_aclk266_gps_p4x12) = {"fin_pll", "div_aclk266_gps", };
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
- FRATE(xxti, "xxti", NULL, CLK_IS_ROOT, 0),
- FRATE(xusbxti, "xusbxti", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_XXTI, "xxti", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_XUSBXTI, "xusbxti", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
- FRATE(none, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
- FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
- FRATE(none, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
};
static struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
- FRATE(none, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
};
/* list of mux clocks supported in all exynos4 soc's */
static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
- MUX_FA(mout_apll, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
CLK_SET_RATE_PARENT, 0, "mout_apll"),
- MUX(none, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
- MUX(none, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1),
- MUX(none, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
- MUX_F(mout_g3d1, "mout_g3d1", sclk_evpll_p, SRC_G3D, 4, 1,
+ MUX(0, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
+ MUX(0, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1),
+ MUX(0, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
+ MUX_F(CLK_MOUT_G3D1, "mout_g3d1", sclk_evpll_p, SRC_G3D, 4, 1,
CLK_SET_RATE_PARENT, 0),
- MUX_F(mout_g3d, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1,
+ MUX_F(CLK_MOUT_G3D, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1,
CLK_SET_RATE_PARENT, 0),
- MUX(none, "mout_spdif", mout_spdif_p, SRC_PERIL1, 8, 2),
- MUX(none, "mout_onenand1", mout_onenand1_p, SRC_TOP0, 0, 1),
- MUX(sclk_epll, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1),
- MUX(none, "mout_onenand", mout_onenand_p, SRC_TOP0, 28, 1),
+ MUX(0, "mout_spdif", mout_spdif_p, SRC_PERIL1, 8, 2),
+ MUX(0, "mout_onenand1", mout_onenand1_p, SRC_TOP0, 0, 1),
+ MUX(CLK_SCLK_EPLL, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1),
+ MUX(0, "mout_onenand", mout_onenand_p, SRC_TOP0, 28, 1),
};
/* list of mux clocks supported in exynos4210 soc */
static struct samsung_mux_clock exynos4210_mux_early[] __initdata = {
- MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
+ MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
};
static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
- MUX(none, "mout_aclk200", sclk_ampll_p4210, SRC_TOP0, 12, 1),
- MUX(none, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1),
- MUX(none, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1),
- MUX(none, "mout_aclk133", sclk_ampll_p4210, SRC_TOP0, 24, 1),
- MUX(none, "mout_mixer", mout_mixer_p4210, SRC_TV, 4, 1),
- MUX(none, "mout_dac", mout_dac_p4210, SRC_TV, 8, 1),
- MUX(none, "mout_g2d0", sclk_ampll_p4210, E4210_SRC_IMAGE, 0, 1),
- MUX(none, "mout_g2d1", sclk_evpll_p, E4210_SRC_IMAGE, 4, 1),
- MUX(none, "mout_g2d", mout_g2d_p, E4210_SRC_IMAGE, 8, 1),
- MUX(none, "mout_fimd1", group1_p4210, E4210_SRC_LCD1, 0, 4),
- MUX(none, "mout_mipi1", group1_p4210, E4210_SRC_LCD1, 12, 4),
- MUX(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1),
- MUX(mout_core, "mout_core", mout_core_p4210, SRC_CPU, 16, 1),
- MUX(sclk_vpll, "sclk_vpll", sclk_vpll_p4210, SRC_TOP0, 8, 1),
- MUX(mout_fimc0, "mout_fimc0", group1_p4210, SRC_CAM, 0, 4),
- MUX(mout_fimc1, "mout_fimc1", group1_p4210, SRC_CAM, 4, 4),
- MUX(mout_fimc2, "mout_fimc2", group1_p4210, SRC_CAM, 8, 4),
- MUX(mout_fimc3, "mout_fimc3", group1_p4210, SRC_CAM, 12, 4),
- MUX(mout_cam0, "mout_cam0", group1_p4210, SRC_CAM, 16, 4),
- MUX(mout_cam1, "mout_cam1", group1_p4210, SRC_CAM, 20, 4),
- MUX(mout_csis0, "mout_csis0", group1_p4210, SRC_CAM, 24, 4),
- MUX(mout_csis1, "mout_csis1", group1_p4210, SRC_CAM, 28, 4),
- MUX(none, "mout_mfc0", sclk_ampll_p4210, SRC_MFC, 0, 1),
- MUX_F(mout_g3d0, "mout_g3d0", sclk_ampll_p4210, SRC_G3D, 0, 1,
+ MUX(0, "mout_aclk200", sclk_ampll_p4210, SRC_TOP0, 12, 1),
+ MUX(0, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1),
+ MUX(0, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1),
+ MUX(0, "mout_aclk133", sclk_ampll_p4210, SRC_TOP0, 24, 1),
+ MUX(0, "mout_mixer", mout_mixer_p4210, SRC_TV, 4, 1),
+ MUX(0, "mout_dac", mout_dac_p4210, SRC_TV, 8, 1),
+ MUX(0, "mout_g2d0", sclk_ampll_p4210, E4210_SRC_IMAGE, 0, 1),
+ MUX(0, "mout_g2d1", sclk_evpll_p, E4210_SRC_IMAGE, 4, 1),
+ MUX(0, "mout_g2d", mout_g2d_p, E4210_SRC_IMAGE, 8, 1),
+ MUX(0, "mout_fimd1", group1_p4210, E4210_SRC_LCD1, 0, 4),
+ MUX(0, "mout_mipi1", group1_p4210, E4210_SRC_LCD1, 12, 4),
+ MUX(CLK_SCLK_MPLL, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1),
+ MUX(CLK_MOUT_CORE, "mout_core", mout_core_p4210, SRC_CPU, 16, 1),
+ MUX(CLK_SCLK_VPLL, "sclk_vpll", sclk_vpll_p4210, SRC_TOP0, 8, 1),
+ MUX(CLK_MOUT_FIMC0, "mout_fimc0", group1_p4210, SRC_CAM, 0, 4),
+ MUX(CLK_MOUT_FIMC1, "mout_fimc1", group1_p4210, SRC_CAM, 4, 4),
+ MUX(CLK_MOUT_FIMC2, "mout_fimc2", group1_p4210, SRC_CAM, 8, 4),
+ MUX(CLK_MOUT_FIMC3, "mout_fimc3", group1_p4210, SRC_CAM, 12, 4),
+ MUX(CLK_MOUT_CAM0, "mout_cam0", group1_p4210, SRC_CAM, 16, 4),
+ MUX(CLK_MOUT_CAM1, "mout_cam1", group1_p4210, SRC_CAM, 20, 4),
+ MUX(CLK_MOUT_CSIS0, "mout_csis0", group1_p4210, SRC_CAM, 24, 4),
+ MUX(CLK_MOUT_CSIS1, "mout_csis1", group1_p4210, SRC_CAM, 28, 4),
+ MUX(0, "mout_mfc0", sclk_ampll_p4210, SRC_MFC, 0, 1),
+ MUX_F(CLK_MOUT_G3D0, "mout_g3d0", sclk_ampll_p4210, SRC_G3D, 0, 1,
CLK_SET_RATE_PARENT, 0),
- MUX(none, "mout_fimd0", group1_p4210, SRC_LCD0, 0, 4),
- MUX(none, "mout_mipi0", group1_p4210, SRC_LCD0, 12, 4),
- MUX(none, "mout_audio0", mout_audio0_p4210, SRC_MAUDIO, 0, 4),
- MUX(none, "mout_mmc0", group1_p4210, SRC_FSYS, 0, 4),
- MUX(none, "mout_mmc1", group1_p4210, SRC_FSYS, 4, 4),
- MUX(none, "mout_mmc2", group1_p4210, SRC_FSYS, 8, 4),
- MUX(none, "mout_mmc3", group1_p4210, SRC_FSYS, 12, 4),
- MUX(none, "mout_mmc4", group1_p4210, SRC_FSYS, 16, 4),
- MUX(none, "mout_sata", sclk_ampll_p4210, SRC_FSYS, 24, 1),
- MUX(none, "mout_uart0", group1_p4210, SRC_PERIL0, 0, 4),
- MUX(none, "mout_uart1", group1_p4210, SRC_PERIL0, 4, 4),
- MUX(none, "mout_uart2", group1_p4210, SRC_PERIL0, 8, 4),
- MUX(none, "mout_uart3", group1_p4210, SRC_PERIL0, 12, 4),
- MUX(none, "mout_uart4", group1_p4210, SRC_PERIL0, 16, 4),
- MUX(none, "mout_audio1", mout_audio1_p4210, SRC_PERIL1, 0, 4),
- MUX(none, "mout_audio2", mout_audio2_p4210, SRC_PERIL1, 4, 4),
- MUX(none, "mout_spi0", group1_p4210, SRC_PERIL1, 16, 4),
- MUX(none, "mout_spi1", group1_p4210, SRC_PERIL1, 20, 4),
- MUX(none, "mout_spi2", group1_p4210, SRC_PERIL1, 24, 4),
+ MUX(0, "mout_fimd0", group1_p4210, SRC_LCD0, 0, 4),
+ MUX(0, "mout_mipi0", group1_p4210, SRC_LCD0, 12, 4),
+ MUX(0, "mout_audio0", mout_audio0_p4210, SRC_MAUDIO, 0, 4),
+ MUX(0, "mout_mmc0", group1_p4210, SRC_FSYS, 0, 4),
+ MUX(0, "mout_mmc1", group1_p4210, SRC_FSYS, 4, 4),
+ MUX(0, "mout_mmc2", group1_p4210, SRC_FSYS, 8, 4),
+ MUX(0, "mout_mmc3", group1_p4210, SRC_FSYS, 12, 4),
+ MUX(0, "mout_mmc4", group1_p4210, SRC_FSYS, 16, 4),
+ MUX(0, "mout_sata", sclk_ampll_p4210, SRC_FSYS, 24, 1),
+ MUX(0, "mout_uart0", group1_p4210, SRC_PERIL0, 0, 4),
+ MUX(0, "mout_uart1", group1_p4210, SRC_PERIL0, 4, 4),
+ MUX(0, "mout_uart2", group1_p4210, SRC_PERIL0, 8, 4),
+ MUX(0, "mout_uart3", group1_p4210, SRC_PERIL0, 12, 4),
+ MUX(0, "mout_uart4", group1_p4210, SRC_PERIL0, 16, 4),
+ MUX(0, "mout_audio1", mout_audio1_p4210, SRC_PERIL1, 0, 4),
+ MUX(0, "mout_audio2", mout_audio2_p4210, SRC_PERIL1, 4, 4),
+ MUX(0, "mout_spi0", group1_p4210, SRC_PERIL1, 16, 4),
+ MUX(0, "mout_spi1", group1_p4210, SRC_PERIL1, 20, 4),
+ MUX(0, "mout_spi2", group1_p4210, SRC_PERIL1, 24, 4),
};
/* list of mux clocks supported in exynos4x12 soc */
static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
- MUX(mout_mpll_user_c, "mout_mpll_user_c", mout_mpll_user_p4x12,
+ MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p4x12,
SRC_CPU, 24, 1),
- MUX(none, "mout_aclk266_gps", aclk_p4412, SRC_TOP1, 4, 1),
- MUX(none, "mout_aclk400_mcuisp", aclk_p4412, SRC_TOP1, 8, 1),
- MUX(mout_mpll_user_t, "mout_mpll_user_t", mout_mpll_user_p4x12,
+ MUX(0, "mout_aclk266_gps", aclk_p4412, SRC_TOP1, 4, 1),
+ MUX(0, "mout_aclk400_mcuisp", aclk_p4412, SRC_TOP1, 8, 1),
+ MUX(CLK_MOUT_MPLL_USER_T, "mout_mpll_user_t", mout_mpll_user_p4x12,
SRC_TOP1, 12, 1),
- MUX(none, "mout_user_aclk266_gps", mout_user_aclk266_gps_p4x12,
+ MUX(0, "mout_user_aclk266_gps", mout_user_aclk266_gps_p4x12,
SRC_TOP1, 16, 1),
- MUX(aclk200, "aclk200", mout_user_aclk200_p4x12, SRC_TOP1, 20, 1),
- MUX(aclk400_mcuisp, "aclk400_mcuisp", mout_user_aclk400_mcuisp_p4x12,
- SRC_TOP1, 24, 1),
- MUX(none, "mout_aclk200", aclk_p4412, SRC_TOP0, 12, 1),
- MUX(none, "mout_aclk100", aclk_p4412, SRC_TOP0, 16, 1),
- MUX(none, "mout_aclk160", aclk_p4412, SRC_TOP0, 20, 1),
- MUX(none, "mout_aclk133", aclk_p4412, SRC_TOP0, 24, 1),
- MUX(none, "mout_mdnie0", group1_p4x12, SRC_LCD0, 4, 4),
- MUX(none, "mout_mdnie_pwm0", group1_p4x12, SRC_LCD0, 8, 4),
- MUX(none, "mout_sata", sclk_ampll_p4x12, SRC_FSYS, 24, 1),
- MUX(none, "mout_jpeg0", sclk_ampll_p4x12, E4X12_SRC_CAM1, 0, 1),
- MUX(none, "mout_jpeg1", sclk_evpll_p, E4X12_SRC_CAM1, 4, 1),
- MUX(none, "mout_jpeg", mout_jpeg_p, E4X12_SRC_CAM1, 8, 1),
- MUX(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_DMC, 12, 1),
- MUX(sclk_vpll, "sclk_vpll", mout_vpll_p, SRC_TOP0, 8, 1),
- MUX(mout_core, "mout_core", mout_core_p4x12, SRC_CPU, 16, 1),
- MUX(mout_fimc0, "mout_fimc0", group1_p4x12, SRC_CAM, 0, 4),
- MUX(mout_fimc1, "mout_fimc1", group1_p4x12, SRC_CAM, 4, 4),
- MUX(mout_fimc2, "mout_fimc2", group1_p4x12, SRC_CAM, 8, 4),
- MUX(mout_fimc3, "mout_fimc3", group1_p4x12, SRC_CAM, 12, 4),
- MUX(mout_cam0, "mout_cam0", group1_p4x12, SRC_CAM, 16, 4),
- MUX(mout_cam1, "mout_cam1", group1_p4x12, SRC_CAM, 20, 4),
- MUX(mout_csis0, "mout_csis0", group1_p4x12, SRC_CAM, 24, 4),
- MUX(mout_csis1, "mout_csis1", group1_p4x12, SRC_CAM, 28, 4),
- MUX(none, "mout_mfc0", sclk_ampll_p4x12, SRC_MFC, 0, 1),
- MUX_F(mout_g3d0, "mout_g3d0", sclk_ampll_p4x12, SRC_G3D, 0, 1,
+ MUX(CLK_ACLK200, "aclk200", mout_user_aclk200_p4x12, SRC_TOP1, 20, 1),
+ MUX(CLK_ACLK400_MCUISP, "aclk400_mcuisp",
+ mout_user_aclk400_mcuisp_p4x12, SRC_TOP1, 24, 1),
+ MUX(0, "mout_aclk200", aclk_p4412, SRC_TOP0, 12, 1),
+ MUX(0, "mout_aclk100", aclk_p4412, SRC_TOP0, 16, 1),
+ MUX(0, "mout_aclk160", aclk_p4412, SRC_TOP0, 20, 1),
+ MUX(0, "mout_aclk133", aclk_p4412, SRC_TOP0, 24, 1),
+ MUX(0, "mout_mdnie0", group1_p4x12, SRC_LCD0, 4, 4),
+ MUX(0, "mout_mdnie_pwm0", group1_p4x12, SRC_LCD0, 8, 4),
+ MUX(0, "mout_sata", sclk_ampll_p4x12, SRC_FSYS, 24, 1),
+ MUX(0, "mout_jpeg0", sclk_ampll_p4x12, E4X12_SRC_CAM1, 0, 1),
+ MUX(0, "mout_jpeg1", sclk_evpll_p, E4X12_SRC_CAM1, 4, 1),
+ MUX(0, "mout_jpeg", mout_jpeg_p, E4X12_SRC_CAM1, 8, 1),
+ MUX(CLK_SCLK_MPLL, "sclk_mpll", mout_mpll_p, SRC_DMC, 12, 1),
+ MUX(CLK_SCLK_VPLL, "sclk_vpll", mout_vpll_p, SRC_TOP0, 8, 1),
+ MUX(CLK_MOUT_CORE, "mout_core", mout_core_p4x12, SRC_CPU, 16, 1),
+ MUX(CLK_MOUT_FIMC0, "mout_fimc0", group1_p4x12, SRC_CAM, 0, 4),
+ MUX(CLK_MOUT_FIMC1, "mout_fimc1", group1_p4x12, SRC_CAM, 4, 4),
+ MUX(CLK_MOUT_FIMC2, "mout_fimc2", group1_p4x12, SRC_CAM, 8, 4),
+ MUX(CLK_MOUT_FIMC3, "mout_fimc3", group1_p4x12, SRC_CAM, 12, 4),
+ MUX(CLK_MOUT_CAM0, "mout_cam0", group1_p4x12, SRC_CAM, 16, 4),
+ MUX(CLK_MOUT_CAM1, "mout_cam1", group1_p4x12, SRC_CAM, 20, 4),
+ MUX(CLK_MOUT_CSIS0, "mout_csis0", group1_p4x12, SRC_CAM, 24, 4),
+ MUX(CLK_MOUT_CSIS1, "mout_csis1", group1_p4x12, SRC_CAM, 28, 4),
+ MUX(0, "mout_mfc0", sclk_ampll_p4x12, SRC_MFC, 0, 1),
+ MUX_F(CLK_MOUT_G3D0, "mout_g3d0", sclk_ampll_p4x12, SRC_G3D, 0, 1,
CLK_SET_RATE_PARENT, 0),
- MUX(none, "mout_fimd0", group1_p4x12, SRC_LCD0, 0, 4),
- MUX(none, "mout_mipi0", group1_p4x12, SRC_LCD0, 12, 4),
- MUX(none, "mout_audio0", mout_audio0_p4x12, SRC_MAUDIO, 0, 4),
- MUX(none, "mout_mmc0", group1_p4x12, SRC_FSYS, 0, 4),
- MUX(none, "mout_mmc1", group1_p4x12, SRC_FSYS, 4, 4),
- MUX(none, "mout_mmc2", group1_p4x12, SRC_FSYS, 8, 4),
- MUX(none, "mout_mmc3", group1_p4x12, SRC_FSYS, 12, 4),
- MUX(none, "mout_mmc4", group1_p4x12, SRC_FSYS, 16, 4),
- MUX(none, "mout_mipihsi", aclk_p4412, SRC_FSYS, 24, 1),
- MUX(none, "mout_uart0", group1_p4x12, SRC_PERIL0, 0, 4),
- MUX(none, "mout_uart1", group1_p4x12, SRC_PERIL0, 4, 4),
- MUX(none, "mout_uart2", group1_p4x12, SRC_PERIL0, 8, 4),
- MUX(none, "mout_uart3", group1_p4x12, SRC_PERIL0, 12, 4),
- MUX(none, "mout_uart4", group1_p4x12, SRC_PERIL0, 16, 4),
- MUX(none, "mout_audio1", mout_audio1_p4x12, SRC_PERIL1, 0, 4),
- MUX(none, "mout_audio2", mout_audio2_p4x12, SRC_PERIL1, 4, 4),
- MUX(none, "mout_spi0", group1_p4x12, SRC_PERIL1, 16, 4),
- MUX(none, "mout_spi1", group1_p4x12, SRC_PERIL1, 20, 4),
- MUX(none, "mout_spi2", group1_p4x12, SRC_PERIL1, 24, 4),
- MUX(none, "mout_pwm_isp", group1_p4x12, E4X12_SRC_ISP, 0, 4),
- MUX(none, "mout_spi0_isp", group1_p4x12, E4X12_SRC_ISP, 4, 4),
- MUX(none, "mout_spi1_isp", group1_p4x12, E4X12_SRC_ISP, 8, 4),
- MUX(none, "mout_uart_isp", group1_p4x12, E4X12_SRC_ISP, 12, 4),
- MUX(none, "mout_g2d0", sclk_ampll_p4210, SRC_DMC, 20, 1),
- MUX(none, "mout_g2d1", sclk_evpll_p, SRC_DMC, 24, 1),
- MUX(none, "mout_g2d", mout_g2d_p, SRC_DMC, 28, 1),
+ MUX(0, "mout_fimd0", group1_p4x12, SRC_LCD0, 0, 4),
+ MUX(0, "mout_mipi0", group1_p4x12, SRC_LCD0, 12, 4),
+ MUX(0, "mout_audio0", mout_audio0_p4x12, SRC_MAUDIO, 0, 4),
+ MUX(0, "mout_mmc0", group1_p4x12, SRC_FSYS, 0, 4),
+ MUX(0, "mout_mmc1", group1_p4x12, SRC_FSYS, 4, 4),
+ MUX(0, "mout_mmc2", group1_p4x12, SRC_FSYS, 8, 4),
+ MUX(0, "mout_mmc3", group1_p4x12, SRC_FSYS, 12, 4),
+ MUX(0, "mout_mmc4", group1_p4x12, SRC_FSYS, 16, 4),
+ MUX(0, "mout_mipihsi", aclk_p4412, SRC_FSYS, 24, 1),
+ MUX(0, "mout_uart0", group1_p4x12, SRC_PERIL0, 0, 4),
+ MUX(0, "mout_uart1", group1_p4x12, SRC_PERIL0, 4, 4),
+ MUX(0, "mout_uart2", group1_p4x12, SRC_PERIL0, 8, 4),
+ MUX(0, "mout_uart3", group1_p4x12, SRC_PERIL0, 12, 4),
+ MUX(0, "mout_uart4", group1_p4x12, SRC_PERIL0, 16, 4),
+ MUX(0, "mout_audio1", mout_audio1_p4x12, SRC_PERIL1, 0, 4),
+ MUX(0, "mout_audio2", mout_audio2_p4x12, SRC_PERIL1, 4, 4),
+ MUX(0, "mout_spi0", group1_p4x12, SRC_PERIL1, 16, 4),
+ MUX(0, "mout_spi1", group1_p4x12, SRC_PERIL1, 20, 4),
+ MUX(0, "mout_spi2", group1_p4x12, SRC_PERIL1, 24, 4),
+ MUX(0, "mout_pwm_isp", group1_p4x12, E4X12_SRC_ISP, 0, 4),
+ MUX(0, "mout_spi0_isp", group1_p4x12, E4X12_SRC_ISP, 4, 4),
+ MUX(0, "mout_spi1_isp", group1_p4x12, E4X12_SRC_ISP, 8, 4),
+ MUX(0, "mout_uart_isp", group1_p4x12, E4X12_SRC_ISP, 12, 4),
+ MUX(0, "mout_g2d0", sclk_ampll_p4210, SRC_DMC, 20, 1),
+ MUX(0, "mout_g2d1", sclk_evpll_p, SRC_DMC, 24, 1),
+ MUX(0, "mout_g2d", mout_g2d_p, SRC_DMC, 28, 1),
};
/* list of divider clocks supported in all exynos4 soc's */
static struct samsung_div_clock exynos4_div_clks[] __initdata = {
- DIV(none, "div_core", "mout_core", DIV_CPU0, 0, 3),
- DIV(none, "div_core2", "div_core", DIV_CPU0, 28, 3),
- DIV(none, "div_fimc0", "mout_fimc0", DIV_CAM, 0, 4),
- DIV(none, "div_fimc1", "mout_fimc1", DIV_CAM, 4, 4),
- DIV(none, "div_fimc2", "mout_fimc2", DIV_CAM, 8, 4),
- DIV(none, "div_fimc3", "mout_fimc3", DIV_CAM, 12, 4),
- DIV(none, "div_cam0", "mout_cam0", DIV_CAM, 16, 4),
- DIV(none, "div_cam1", "mout_cam1", DIV_CAM, 20, 4),
- DIV(none, "div_csis0", "mout_csis0", DIV_CAM, 24, 4),
- DIV(none, "div_csis1", "mout_csis1", DIV_CAM, 28, 4),
- DIV(sclk_mfc, "sclk_mfc", "mout_mfc", DIV_MFC, 0, 4),
- DIV_F(none, "div_g3d", "mout_g3d", DIV_G3D, 0, 4,
+ DIV(0, "div_core", "mout_core", DIV_CPU0, 0, 3),
+ DIV(0, "div_core2", "div_core", DIV_CPU0, 28, 3),
+ DIV(0, "div_fimc0", "mout_fimc0", DIV_CAM, 0, 4),
+ DIV(0, "div_fimc1", "mout_fimc1", DIV_CAM, 4, 4),
+ DIV(0, "div_fimc2", "mout_fimc2", DIV_CAM, 8, 4),
+ DIV(0, "div_fimc3", "mout_fimc3", DIV_CAM, 12, 4),
+ DIV(0, "div_cam0", "mout_cam0", DIV_CAM, 16, 4),
+ DIV(0, "div_cam1", "mout_cam1", DIV_CAM, 20, 4),
+ DIV(0, "div_csis0", "mout_csis0", DIV_CAM, 24, 4),
+ DIV(0, "div_csis1", "mout_csis1", DIV_CAM, 28, 4),
+ DIV(CLK_SCLK_MFC, "sclk_mfc", "mout_mfc", DIV_MFC, 0, 4),
+ DIV_F(0, "div_g3d", "mout_g3d", DIV_G3D, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(0, "div_fimd0", "mout_fimd0", DIV_LCD0, 0, 4),
+ DIV(0, "div_mipi0", "mout_mipi0", DIV_LCD0, 16, 4),
+ DIV(0, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4),
+ DIV(CLK_SCLK_PCM0, "sclk_pcm0", "sclk_audio0", DIV_MAUDIO, 4, 8),
+ DIV(0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
+ DIV(0, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
+ DIV(0, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
+ DIV(0, "div_mmc3", "mout_mmc3", DIV_FSYS2, 16, 4),
+ DIV(CLK_SCLK_PIXEL, "sclk_pixel", "sclk_vpll", DIV_TV, 0, 4),
+ DIV(CLK_ACLK100, "aclk100", "mout_aclk100", DIV_TOP, 4, 4),
+ DIV(CLK_ACLK160, "aclk160", "mout_aclk160", DIV_TOP, 8, 3),
+ DIV(CLK_ACLK133, "aclk133", "mout_aclk133", DIV_TOP, 12, 3),
+ DIV(0, "div_onenand", "mout_onenand1", DIV_TOP, 16, 3),
+ DIV(CLK_SCLK_SLIMBUS, "sclk_slimbus", "sclk_epll", DIV_PERIL3, 4, 4),
+ DIV(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_audio1", DIV_PERIL4, 4, 8),
+ DIV(CLK_SCLK_PCM2, "sclk_pcm2", "sclk_audio2", DIV_PERIL4, 20, 8),
+ DIV(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_audio1", DIV_PERIL5, 0, 6),
+ DIV(CLK_SCLK_I2S2, "sclk_i2s2", "sclk_audio2", DIV_PERIL5, 8, 6),
+ DIV(0, "div_mmc4", "mout_mmc4", DIV_FSYS3, 0, 4),
+ DIV_F(0, "div_mmc_pre4", "div_mmc4", DIV_FSYS3, 8, 8,
CLK_SET_RATE_PARENT, 0),
- DIV(none, "div_fimd0", "mout_fimd0", DIV_LCD0, 0, 4),
- DIV(none, "div_mipi0", "mout_mipi0", DIV_LCD0, 16, 4),
- DIV(none, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4),
- DIV(sclk_pcm0, "sclk_pcm0", "sclk_audio0", DIV_MAUDIO, 4, 8),
- DIV(none, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
- DIV(none, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
- DIV(none, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
- DIV(none, "div_mmc3", "mout_mmc3", DIV_FSYS2, 16, 4),
- DIV(sclk_pixel, "sclk_pixel", "sclk_vpll", DIV_TV, 0, 4),
- DIV(aclk100, "aclk100", "mout_aclk100", DIV_TOP, 4, 4),
- DIV(aclk160, "aclk160", "mout_aclk160", DIV_TOP, 8, 3),
- DIV(aclk133, "aclk133", "mout_aclk133", DIV_TOP, 12, 3),
- DIV(none, "div_onenand", "mout_onenand1", DIV_TOP, 16, 3),
- DIV(sclk_slimbus, "sclk_slimbus", "sclk_epll", DIV_PERIL3, 4, 4),
- DIV(sclk_pcm1, "sclk_pcm1", "sclk_audio1", DIV_PERIL4, 4, 8),
- DIV(sclk_pcm2, "sclk_pcm2", "sclk_audio2", DIV_PERIL4, 20, 8),
- DIV(sclk_i2s1, "sclk_i2s1", "sclk_audio1", DIV_PERIL5, 0, 6),
- DIV(sclk_i2s2, "sclk_i2s2", "sclk_audio2", DIV_PERIL5, 8, 6),
- DIV(none, "div_mmc4", "mout_mmc4", DIV_FSYS3, 0, 4),
- DIV(none, "div_mmc_pre4", "div_mmc4", DIV_FSYS3, 8, 8),
- DIV(none, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
- DIV(none, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
- DIV(none, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4),
- DIV(none, "div_uart3", "mout_uart3", DIV_PERIL0, 12, 4),
- DIV(none, "div_uart4", "mout_uart4", DIV_PERIL0, 16, 4),
- DIV(none, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4),
- DIV(none, "div_spi_pre0", "div_spi0", DIV_PERIL1, 8, 8),
- DIV(none, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4),
- DIV(none, "div_spi_pre1", "div_spi1", DIV_PERIL1, 24, 8),
- DIV(none, "div_spi2", "mout_spi2", DIV_PERIL2, 0, 4),
- DIV(none, "div_spi_pre2", "div_spi2", DIV_PERIL2, 8, 8),
- DIV(none, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
- DIV(none, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
- DIV(arm_clk, "arm_clk", "div_core2", DIV_CPU0, 28, 3),
- DIV(sclk_apll, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
- DIV_F(none, "div_mipi_pre0", "div_mipi0", DIV_LCD0, 20, 4,
+ DIV(0, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
+ DIV(0, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
+ DIV(0, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4),
+ DIV(0, "div_uart3", "mout_uart3", DIV_PERIL0, 12, 4),
+ DIV(0, "div_uart4", "mout_uart4", DIV_PERIL0, 16, 4),
+ DIV(0, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4),
+ DIV(0, "div_spi_pre0", "div_spi0", DIV_PERIL1, 8, 8),
+ DIV(0, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4),
+ DIV(0, "div_spi_pre1", "div_spi1", DIV_PERIL1, 24, 8),
+ DIV(0, "div_spi2", "mout_spi2", DIV_PERIL2, 0, 4),
+ DIV(0, "div_spi_pre2", "div_spi2", DIV_PERIL2, 8, 8),
+ DIV(0, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
+ DIV(0, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
+ DIV(CLK_ARM_CLK, "arm_clk", "div_core2", DIV_CPU0, 28, 3),
+ DIV(CLK_SCLK_APLL, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
+ DIV_F(0, "div_mipi_pre0", "div_mipi0", DIV_LCD0, 20, 4,
CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre0", "div_mmc0", DIV_FSYS1, 8, 8,
+ DIV_F(0, "div_mmc_pre0", "div_mmc0", DIV_FSYS1, 8, 8,
CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre1", "div_mmc1", DIV_FSYS1, 24, 8,
+ DIV_F(0, "div_mmc_pre1", "div_mmc1", DIV_FSYS1, 24, 8,
CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre2", "div_mmc2", DIV_FSYS2, 8, 8,
+ DIV_F(0, "div_mmc_pre2", "div_mmc2", DIV_FSYS2, 8, 8,
CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre3", "div_mmc3", DIV_FSYS2, 24, 8,
+ DIV_F(0, "div_mmc_pre3", "div_mmc3", DIV_FSYS2, 24, 8,
CLK_SET_RATE_PARENT, 0),
};
/* list of divider clocks supported in exynos4210 soc */
static struct samsung_div_clock exynos4210_div_clks[] __initdata = {
- DIV(aclk200, "aclk200", "mout_aclk200", DIV_TOP, 0, 3),
- DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_IMAGE, 0, 4),
- DIV(none, "div_fimd1", "mout_fimd1", E4210_DIV_LCD1, 0, 4),
- DIV(none, "div_mipi1", "mout_mipi1", E4210_DIV_LCD1, 16, 4),
- DIV(none, "div_sata", "mout_sata", DIV_FSYS0, 20, 4),
- DIV_F(none, "div_mipi_pre1", "div_mipi1", E4210_DIV_LCD1, 20, 4,
+ DIV(CLK_ACLK200, "aclk200", "mout_aclk200", DIV_TOP, 0, 3),
+ DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_IMAGE, 0, 4),
+ DIV(0, "div_fimd1", "mout_fimd1", E4210_DIV_LCD1, 0, 4),
+ DIV(0, "div_mipi1", "mout_mipi1", E4210_DIV_LCD1, 16, 4),
+ DIV(0, "div_sata", "mout_sata", DIV_FSYS0, 20, 4),
+ DIV_F(0, "div_mipi_pre1", "div_mipi1", E4210_DIV_LCD1, 20, 4,
CLK_SET_RATE_PARENT, 0),
};
/* list of divider clocks supported in exynos4x12 soc */
static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
- DIV(none, "div_mdnie0", "mout_mdnie0", DIV_LCD0, 4, 4),
- DIV(none, "div_mdnie_pwm0", "mout_mdnie_pwm0", DIV_LCD0, 8, 4),
- DIV(none, "div_mdnie_pwm_pre0", "div_mdnie_pwm0", DIV_LCD0, 12, 4),
- DIV(none, "div_mipihsi", "mout_mipihsi", DIV_FSYS0, 20, 4),
- DIV(none, "div_jpeg", "mout_jpeg", E4X12_DIV_CAM1, 0, 4),
- DIV(div_aclk200, "div_aclk200", "mout_aclk200", DIV_TOP, 0, 3),
- DIV(none, "div_aclk266_gps", "mout_aclk266_gps", DIV_TOP, 20, 3),
- DIV(div_aclk400_mcuisp, "div_aclk400_mcuisp", "mout_aclk400_mcuisp",
+ DIV(0, "div_mdnie0", "mout_mdnie0", DIV_LCD0, 4, 4),
+ DIV(0, "div_mdnie_pwm0", "mout_mdnie_pwm0", DIV_LCD0, 8, 4),
+ DIV(0, "div_mdnie_pwm_pre0", "div_mdnie_pwm0", DIV_LCD0, 12, 4),
+ DIV(0, "div_mipihsi", "mout_mipihsi", DIV_FSYS0, 20, 4),
+ DIV(0, "div_jpeg", "mout_jpeg", E4X12_DIV_CAM1, 0, 4),
+ DIV(CLK_DIV_ACLK200, "div_aclk200", "mout_aclk200", DIV_TOP, 0, 3),
+ DIV(0, "div_aclk266_gps", "mout_aclk266_gps", DIV_TOP, 20, 3),
+ DIV(CLK_DIV_ACLK400_MCUISP, "div_aclk400_mcuisp", "mout_aclk400_mcuisp",
DIV_TOP, 24, 3),
- DIV(none, "div_pwm_isp", "mout_pwm_isp", E4X12_DIV_ISP, 0, 4),
- DIV(none, "div_spi0_isp", "mout_spi0_isp", E4X12_DIV_ISP, 4, 4),
- DIV(none, "div_spi0_isp_pre", "div_spi0_isp", E4X12_DIV_ISP, 8, 8),
- DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
- DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
- DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
- DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
+ DIV(0, "div_pwm_isp", "mout_pwm_isp", E4X12_DIV_ISP, 0, 4),
+ DIV(0, "div_spi0_isp", "mout_spi0_isp", E4X12_DIV_ISP, 4, 4),
+ DIV(0, "div_spi0_isp_pre", "div_spi0_isp", E4X12_DIV_ISP, 8, 8),
+ DIV(0, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
+ DIV(0, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
+ DIV(0, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
+ DIV_F(CLK_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
CLK_GET_RATE_NOCACHE, 0),
- DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
+ DIV_F(CLK_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
CLK_GET_RATE_NOCACHE, 0),
- DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
- DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
+ DIV(0, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
+ DIV_F(CLK_DIV_MCUISP0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
4, 3, CLK_GET_RATE_NOCACHE, 0),
- DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
+ DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
8, 3, CLK_GET_RATE_NOCACHE, 0),
- DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
+ DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
};
/* list of gate clocks supported in all exynos4 soc's */
@@ -605,333 +545,341 @@ static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
* the device name and clock alias names specified below for some
* of the clocks can be removed.
*/
- GATE(sclk_hdmi, "sclk_hdmi", "mout_hdmi", SRC_MASK_TV, 0, 0, 0),
- GATE(sclk_spdif, "sclk_spdif", "mout_spdif", SRC_MASK_PERIL1, 8, 0, 0),
- GATE(jpeg, "jpeg", "aclk160", GATE_IP_CAM, 6, 0, 0),
- GATE(mie0, "mie0", "aclk160", GATE_IP_LCD0, 1, 0, 0),
- GATE(dsim0, "dsim0", "aclk160", GATE_IP_LCD0, 3, 0, 0),
- GATE(fimd1, "fimd1", "aclk160", E4210_GATE_IP_LCD1, 0, 0, 0),
- GATE(mie1, "mie1", "aclk160", E4210_GATE_IP_LCD1, 1, 0, 0),
- GATE(dsim1, "dsim1", "aclk160", E4210_GATE_IP_LCD1, 3, 0, 0),
- GATE(smmu_fimd1, "smmu_fimd1", "aclk160", E4210_GATE_IP_LCD1, 4, 0, 0),
- GATE(tsi, "tsi", "aclk133", GATE_IP_FSYS, 4, 0, 0),
- GATE(sromc, "sromc", "aclk133", GATE_IP_FSYS, 11, 0, 0),
- GATE(sclk_g3d, "sclk_g3d", "div_g3d", GATE_IP_G3D, 0,
+ GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi", SRC_MASK_TV, 0, 0, 0),
+ GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif", SRC_MASK_PERIL1, 8, 0,
+ 0),
+ GATE(CLK_JPEG, "jpeg", "aclk160", GATE_IP_CAM, 6, 0, 0),
+ GATE(CLK_MIE0, "mie0", "aclk160", GATE_IP_LCD0, 1, 0, 0),
+ GATE(CLK_DSIM0, "dsim0", "aclk160", GATE_IP_LCD0, 3, 0, 0),
+ GATE(CLK_FIMD1, "fimd1", "aclk160", E4210_GATE_IP_LCD1, 0, 0, 0),
+ GATE(CLK_MIE1, "mie1", "aclk160", E4210_GATE_IP_LCD1, 1, 0, 0),
+ GATE(CLK_DSIM1, "dsim1", "aclk160", E4210_GATE_IP_LCD1, 3, 0, 0),
+ GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "aclk160", E4210_GATE_IP_LCD1, 4, 0,
+ 0),
+ GATE(CLK_TSI, "tsi", "aclk133", GATE_IP_FSYS, 4, 0, 0),
+ GATE(CLK_SROMC, "sromc", "aclk133", GATE_IP_FSYS, 11, 0, 0),
+ GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d", GATE_IP_G3D, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(usb_device, "usb_device", "aclk133", GATE_IP_FSYS, 13, 0, 0),
- GATE(onenand, "onenand", "aclk133", GATE_IP_FSYS, 15, 0, 0),
- GATE(nfcon, "nfcon", "aclk133", GATE_IP_FSYS, 16, 0, 0),
- GATE(gps, "gps", "aclk133", GATE_IP_GPS, 0, 0, 0),
- GATE(smmu_gps, "smmu_gps", "aclk133", GATE_IP_GPS, 1, 0, 0),
- GATE(slimbus, "slimbus", "aclk100", GATE_IP_PERIL, 25, 0, 0),
- GATE(sclk_cam0, "sclk_cam0", "div_cam0", GATE_SCLK_CAM, 4,
+ GATE(CLK_USB_DEVICE, "usb_device", "aclk133", GATE_IP_FSYS, 13, 0, 0),
+ GATE(CLK_ONENAND, "onenand", "aclk133", GATE_IP_FSYS, 15, 0, 0),
+ GATE(CLK_NFCON, "nfcon", "aclk133", GATE_IP_FSYS, 16, 0, 0),
+ GATE(CLK_GPS, "gps", "aclk133", GATE_IP_GPS, 0, 0, 0),
+ GATE(CLK_SMMU_GPS, "smmu_gps", "aclk133", GATE_IP_GPS, 1, 0, 0),
+ GATE(CLK_SLIMBUS, "slimbus", "aclk100", GATE_IP_PERIL, 25, 0, 0),
+ GATE(CLK_SCLK_CAM0, "sclk_cam0", "div_cam0", GATE_SCLK_CAM, 4,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_cam1, "sclk_cam1", "div_cam1", GATE_SCLK_CAM, 5,
+ GATE(CLK_SCLK_CAM1, "sclk_cam1", "div_cam1", GATE_SCLK_CAM, 5,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mipi0, "sclk_mipi0", "div_mipi_pre0",
+ GATE(CLK_SCLK_MIPI0, "sclk_mipi0", "div_mipi_pre0",
SRC_MASK_LCD0, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_audio0, "sclk_audio0", "div_audio0", SRC_MASK_MAUDIO, 0,
+ GATE(CLK_SCLK_AUDIO0, "sclk_audio0", "div_audio0", SRC_MASK_MAUDIO, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_audio1, "sclk_audio1", "div_audio1", SRC_MASK_PERIL1, 0,
+ GATE(CLK_SCLK_AUDIO1, "sclk_audio1", "div_audio1", SRC_MASK_PERIL1, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(vp, "vp", "aclk160", GATE_IP_TV, 0, 0, 0),
- GATE(mixer, "mixer", "aclk160", GATE_IP_TV, 1, 0, 0),
- GATE(hdmi, "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0),
- GATE(pwm, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0),
- GATE(sdmmc4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0),
- GATE(usb_host, "usb_host", "aclk133", GATE_IP_FSYS, 12, 0, 0),
- GATE(sclk_fimc0, "sclk_fimc0", "div_fimc0", SRC_MASK_CAM, 0,
+ GATE(CLK_VP, "vp", "aclk160", GATE_IP_TV, 0, 0, 0),
+ GATE(CLK_MIXER, "mixer", "aclk160", GATE_IP_TV, 1, 0, 0),
+ GATE(CLK_HDMI, "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0),
+ GATE(CLK_PWM, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0),
+ GATE(CLK_SDMMC4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0),
+ GATE(CLK_USB_HOST, "usb_host", "aclk133", GATE_IP_FSYS, 12, 0, 0),
+ GATE(CLK_SCLK_FIMC0, "sclk_fimc0", "div_fimc0", SRC_MASK_CAM, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_fimc1, "sclk_fimc1", "div_fimc1", SRC_MASK_CAM, 4,
+ GATE(CLK_SCLK_FIMC1, "sclk_fimc1", "div_fimc1", SRC_MASK_CAM, 4,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_fimc2, "sclk_fimc2", "div_fimc2", SRC_MASK_CAM, 8,
+ GATE(CLK_SCLK_FIMC2, "sclk_fimc2", "div_fimc2", SRC_MASK_CAM, 8,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_fimc3, "sclk_fimc3", "div_fimc3", SRC_MASK_CAM, 12,
+ GATE(CLK_SCLK_FIMC3, "sclk_fimc3", "div_fimc3", SRC_MASK_CAM, 12,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_csis0, "sclk_csis0", "div_csis0", SRC_MASK_CAM, 24,
+ GATE(CLK_SCLK_CSIS0, "sclk_csis0", "div_csis0", SRC_MASK_CAM, 24,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_csis1, "sclk_csis1", "div_csis1", SRC_MASK_CAM, 28,
+ GATE(CLK_SCLK_CSIS1, "sclk_csis1", "div_csis1", SRC_MASK_CAM, 28,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_fimd0, "sclk_fimd0", "div_fimd0", SRC_MASK_LCD0, 0,
+ GATE(CLK_SCLK_FIMD0, "sclk_fimd0", "div_fimd0", SRC_MASK_LCD0, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc0, "sclk_mmc0", "div_mmc_pre0", SRC_MASK_FSYS, 0,
+ GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc_pre0", SRC_MASK_FSYS, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc1, "sclk_mmc1", "div_mmc_pre1", SRC_MASK_FSYS, 4,
+ GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc_pre1", SRC_MASK_FSYS, 4,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc2, "sclk_mmc2", "div_mmc_pre2", SRC_MASK_FSYS, 8,
+ GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc_pre2", SRC_MASK_FSYS, 8,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc3, "sclk_mmc3", "div_mmc_pre3", SRC_MASK_FSYS, 12,
+ GATE(CLK_SCLK_MMC3, "sclk_mmc3", "div_mmc_pre3", SRC_MASK_FSYS, 12,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc4, "sclk_mmc4", "div_mmc_pre4", SRC_MASK_FSYS, 16,
+ GATE(CLK_SCLK_MMC4, "sclk_mmc4", "div_mmc_pre4", SRC_MASK_FSYS, 16,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart0, "uclk0", "div_uart0", SRC_MASK_PERIL0, 0,
+ GATE(CLK_SCLK_UART0, "uclk0", "div_uart0", SRC_MASK_PERIL0, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart1, "uclk1", "div_uart1", SRC_MASK_PERIL0, 4,
+ GATE(CLK_SCLK_UART1, "uclk1", "div_uart1", SRC_MASK_PERIL0, 4,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart2, "uclk2", "div_uart2", SRC_MASK_PERIL0, 8,
+ GATE(CLK_SCLK_UART2, "uclk2", "div_uart2", SRC_MASK_PERIL0, 8,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart3, "uclk3", "div_uart3", SRC_MASK_PERIL0, 12,
+ GATE(CLK_SCLK_UART3, "uclk3", "div_uart3", SRC_MASK_PERIL0, 12,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart4, "uclk4", "div_uart4", SRC_MASK_PERIL0, 16,
+ GATE(CLK_SCLK_UART4, "uclk4", "div_uart4", SRC_MASK_PERIL0, 16,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_audio2, "sclk_audio2", "div_audio2", SRC_MASK_PERIL1, 4,
+ GATE(CLK_SCLK_AUDIO2, "sclk_audio2", "div_audio2", SRC_MASK_PERIL1, 4,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi0, "sclk_spi0", "div_spi_pre0", SRC_MASK_PERIL1, 16,
+ GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi_pre0", SRC_MASK_PERIL1, 16,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi1, "sclk_spi1", "div_spi_pre1", SRC_MASK_PERIL1, 20,
+ GATE(CLK_SCLK_SPI1, "sclk_spi1", "div_spi_pre1", SRC_MASK_PERIL1, 20,
CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi2, "sclk_spi2", "div_spi_pre2", SRC_MASK_PERIL1, 24,
+ GATE(CLK_SCLK_SPI2, "sclk_spi2", "div_spi_pre2", SRC_MASK_PERIL1, 24,
CLK_SET_RATE_PARENT, 0),
- GATE(fimc0, "fimc0", "aclk160", GATE_IP_CAM, 0,
+ GATE(CLK_FIMC0, "fimc0", "aclk160", GATE_IP_CAM, 0,
0, 0),
- GATE(fimc1, "fimc1", "aclk160", GATE_IP_CAM, 1,
+ GATE(CLK_FIMC1, "fimc1", "aclk160", GATE_IP_CAM, 1,
0, 0),
- GATE(fimc2, "fimc2", "aclk160", GATE_IP_CAM, 2,
+ GATE(CLK_FIMC2, "fimc2", "aclk160", GATE_IP_CAM, 2,
0, 0),
- GATE(fimc3, "fimc3", "aclk160", GATE_IP_CAM, 3,
+ GATE(CLK_FIMC3, "fimc3", "aclk160", GATE_IP_CAM, 3,
0, 0),
- GATE(csis0, "csis0", "aclk160", GATE_IP_CAM, 4,
+ GATE(CLK_CSIS0, "csis0", "aclk160", GATE_IP_CAM, 4,
0, 0),
- GATE(csis1, "csis1", "aclk160", GATE_IP_CAM, 5,
+ GATE(CLK_CSIS1, "csis1", "aclk160", GATE_IP_CAM, 5,
0, 0),
- GATE(smmu_fimc0, "smmu_fimc0", "aclk160", GATE_IP_CAM, 7,
+ GATE(CLK_SMMU_FIMC0, "smmu_fimc0", "aclk160", GATE_IP_CAM, 7,
0, 0),
- GATE(smmu_fimc1, "smmu_fimc1", "aclk160", GATE_IP_CAM, 8,
+ GATE(CLK_SMMU_FIMC1, "smmu_fimc1", "aclk160", GATE_IP_CAM, 8,
0, 0),
- GATE(smmu_fimc2, "smmu_fimc2", "aclk160", GATE_IP_CAM, 9,
+ GATE(CLK_SMMU_FIMC2, "smmu_fimc2", "aclk160", GATE_IP_CAM, 9,
0, 0),
- GATE(smmu_fimc3, "smmu_fimc3", "aclk160", GATE_IP_CAM, 10,
+ GATE(CLK_SMMU_FIMC3, "smmu_fimc3", "aclk160", GATE_IP_CAM, 10,
0, 0),
- GATE(smmu_jpeg, "smmu_jpeg", "aclk160", GATE_IP_CAM, 11,
+ GATE(CLK_SMMU_JPEG, "smmu_jpeg", "aclk160", GATE_IP_CAM, 11,
0, 0),
- GATE(pixelasyncm0, "pxl_async0", "aclk160", GATE_IP_CAM, 17, 0, 0),
- GATE(pixelasyncm1, "pxl_async1", "aclk160", GATE_IP_CAM, 18, 0, 0),
- GATE(smmu_tv, "smmu_tv", "aclk160", GATE_IP_TV, 4,
+ GATE(CLK_PIXELASYNCM0, "pxl_async0", "aclk160", GATE_IP_CAM, 17, 0, 0),
+ GATE(CLK_PIXELASYNCM1, "pxl_async1", "aclk160", GATE_IP_CAM, 18, 0, 0),
+ GATE(CLK_SMMU_TV, "smmu_tv", "aclk160", GATE_IP_TV, 4,
0, 0),
- GATE(mfc, "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0),
- GATE(smmu_mfcl, "smmu_mfcl", "aclk100", GATE_IP_MFC, 1,
+ GATE(CLK_MFC, "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0),
+ GATE(CLK_SMMU_MFCL, "smmu_mfcl", "aclk100", GATE_IP_MFC, 1,
0, 0),
- GATE(smmu_mfcr, "smmu_mfcr", "aclk100", GATE_IP_MFC, 2,
+ GATE(CLK_SMMU_MFCR, "smmu_mfcr", "aclk100", GATE_IP_MFC, 2,
0, 0),
- GATE(fimd0, "fimd0", "aclk160", GATE_IP_LCD0, 0,
+ GATE(CLK_FIMD0, "fimd0", "aclk160", GATE_IP_LCD0, 0,
0, 0),
- GATE(smmu_fimd0, "smmu_fimd0", "aclk160", GATE_IP_LCD0, 4,
+ GATE(CLK_SMMU_FIMD0, "smmu_fimd0", "aclk160", GATE_IP_LCD0, 4,
0, 0),
- GATE(pdma0, "pdma0", "aclk133", GATE_IP_FSYS, 0,
+ GATE(CLK_PDMA0, "pdma0", "aclk133", GATE_IP_FSYS, 0,
0, 0),
- GATE(pdma1, "pdma1", "aclk133", GATE_IP_FSYS, 1,
+ GATE(CLK_PDMA1, "pdma1", "aclk133", GATE_IP_FSYS, 1,
0, 0),
- GATE(sdmmc0, "sdmmc0", "aclk133", GATE_IP_FSYS, 5,
+ GATE(CLK_SDMMC0, "sdmmc0", "aclk133", GATE_IP_FSYS, 5,
0, 0),
- GATE(sdmmc1, "sdmmc1", "aclk133", GATE_IP_FSYS, 6,
+ GATE(CLK_SDMMC1, "sdmmc1", "aclk133", GATE_IP_FSYS, 6,
0, 0),
- GATE(sdmmc2, "sdmmc2", "aclk133", GATE_IP_FSYS, 7,
+ GATE(CLK_SDMMC2, "sdmmc2", "aclk133", GATE_IP_FSYS, 7,
0, 0),
- GATE(sdmmc3, "sdmmc3", "aclk133", GATE_IP_FSYS, 8,
+ GATE(CLK_SDMMC3, "sdmmc3", "aclk133", GATE_IP_FSYS, 8,
0, 0),
- GATE(uart0, "uart0", "aclk100", GATE_IP_PERIL, 0,
+ GATE(CLK_UART0, "uart0", "aclk100", GATE_IP_PERIL, 0,
0, 0),
- GATE(uart1, "uart1", "aclk100", GATE_IP_PERIL, 1,
+ GATE(CLK_UART1, "uart1", "aclk100", GATE_IP_PERIL, 1,
0, 0),
- GATE(uart2, "uart2", "aclk100", GATE_IP_PERIL, 2,
+ GATE(CLK_UART2, "uart2", "aclk100", GATE_IP_PERIL, 2,
0, 0),
- GATE(uart3, "uart3", "aclk100", GATE_IP_PERIL, 3,
+ GATE(CLK_UART3, "uart3", "aclk100", GATE_IP_PERIL, 3,
0, 0),
- GATE(uart4, "uart4", "aclk100", GATE_IP_PERIL, 4,
+ GATE(CLK_UART4, "uart4", "aclk100", GATE_IP_PERIL, 4,
0, 0),
- GATE(i2c0, "i2c0", "aclk100", GATE_IP_PERIL, 6,
+ GATE(CLK_I2C0, "i2c0", "aclk100", GATE_IP_PERIL, 6,
0, 0),
- GATE(i2c1, "i2c1", "aclk100", GATE_IP_PERIL, 7,
+ GATE(CLK_I2C1, "i2c1", "aclk100", GATE_IP_PERIL, 7,
0, 0),
- GATE(i2c2, "i2c2", "aclk100", GATE_IP_PERIL, 8,
+ GATE(CLK_I2C2, "i2c2", "aclk100", GATE_IP_PERIL, 8,
0, 0),
- GATE(i2c3, "i2c3", "aclk100", GATE_IP_PERIL, 9,
+ GATE(CLK_I2C3, "i2c3", "aclk100", GATE_IP_PERIL, 9,
0, 0),
- GATE(i2c4, "i2c4", "aclk100", GATE_IP_PERIL, 10,
+ GATE(CLK_I2C4, "i2c4", "aclk100", GATE_IP_PERIL, 10,
0, 0),
- GATE(i2c5, "i2c5", "aclk100", GATE_IP_PERIL, 11,
+ GATE(CLK_I2C5, "i2c5", "aclk100", GATE_IP_PERIL, 11,
0, 0),
- GATE(i2c6, "i2c6", "aclk100", GATE_IP_PERIL, 12,
+ GATE(CLK_I2C6, "i2c6", "aclk100", GATE_IP_PERIL, 12,
0, 0),
- GATE(i2c7, "i2c7", "aclk100", GATE_IP_PERIL, 13,
+ GATE(CLK_I2C7, "i2c7", "aclk100", GATE_IP_PERIL, 13,
0, 0),
- GATE(i2c_hdmi, "i2c-hdmi", "aclk100", GATE_IP_PERIL, 14,
+ GATE(CLK_I2C_HDMI, "i2c-hdmi", "aclk100", GATE_IP_PERIL, 14,
0, 0),
- GATE(spi0, "spi0", "aclk100", GATE_IP_PERIL, 16,
+ GATE(CLK_SPI0, "spi0", "aclk100", GATE_IP_PERIL, 16,
0, 0),
- GATE(spi1, "spi1", "aclk100", GATE_IP_PERIL, 17,
+ GATE(CLK_SPI1, "spi1", "aclk100", GATE_IP_PERIL, 17,
0, 0),
- GATE(spi2, "spi2", "aclk100", GATE_IP_PERIL, 18,
+ GATE(CLK_SPI2, "spi2", "aclk100", GATE_IP_PERIL, 18,
0, 0),
- GATE(i2s1, "i2s1", "aclk100", GATE_IP_PERIL, 20,
+ GATE(CLK_I2S1, "i2s1", "aclk100", GATE_IP_PERIL, 20,
0, 0),
- GATE(i2s2, "i2s2", "aclk100", GATE_IP_PERIL, 21,
+ GATE(CLK_I2S2, "i2s2", "aclk100", GATE_IP_PERIL, 21,
0, 0),
- GATE(pcm1, "pcm1", "aclk100", GATE_IP_PERIL, 22,
+ GATE(CLK_PCM1, "pcm1", "aclk100", GATE_IP_PERIL, 22,
0, 0),
- GATE(pcm2, "pcm2", "aclk100", GATE_IP_PERIL, 23,
+ GATE(CLK_PCM2, "pcm2", "aclk100", GATE_IP_PERIL, 23,
0, 0),
- GATE(spdif, "spdif", "aclk100", GATE_IP_PERIL, 26,
+ GATE(CLK_SPDIF, "spdif", "aclk100", GATE_IP_PERIL, 26,
0, 0),
- GATE(ac97, "ac97", "aclk100", GATE_IP_PERIL, 27,
+ GATE(CLK_AC97, "ac97", "aclk100", GATE_IP_PERIL, 27,
0, 0),
};
/* list of gate clocks supported in exynos4210 soc */
static struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
- GATE(tvenc, "tvenc", "aclk160", GATE_IP_TV, 2, 0, 0),
- GATE(g2d, "g2d", "aclk200", E4210_GATE_IP_IMAGE, 0, 0, 0),
- GATE(rotator, "rotator", "aclk200", E4210_GATE_IP_IMAGE, 1, 0, 0),
- GATE(mdma, "mdma", "aclk200", E4210_GATE_IP_IMAGE, 2, 0, 0),
- GATE(smmu_g2d, "smmu_g2d", "aclk200", E4210_GATE_IP_IMAGE, 3, 0, 0),
- GATE(smmu_mdma, "smmu_mdma", "aclk200", E4210_GATE_IP_IMAGE, 5, 0, 0),
- GATE(pcie_phy, "pcie_phy", "aclk133", GATE_IP_FSYS, 2, 0, 0),
- GATE(sata_phy, "sata_phy", "aclk133", GATE_IP_FSYS, 3, 0, 0),
- GATE(sata, "sata", "aclk133", GATE_IP_FSYS, 10, 0, 0),
- GATE(pcie, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
- GATE(smmu_pcie, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
- GATE(modemif, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
- GATE(chipid, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
- GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
+ GATE(CLK_TVENC, "tvenc", "aclk160", GATE_IP_TV, 2, 0, 0),
+ GATE(CLK_G2D, "g2d", "aclk200", E4210_GATE_IP_IMAGE, 0, 0, 0),
+ GATE(CLK_ROTATOR, "rotator", "aclk200", E4210_GATE_IP_IMAGE, 1, 0, 0),
+ GATE(CLK_MDMA, "mdma", "aclk200", E4210_GATE_IP_IMAGE, 2, 0, 0),
+ GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk200", E4210_GATE_IP_IMAGE, 3, 0, 0),
+ GATE(CLK_SMMU_MDMA, "smmu_mdma", "aclk200", E4210_GATE_IP_IMAGE, 5, 0,
+ 0),
+ GATE(CLK_PCIE_PHY, "pcie_phy", "aclk133", GATE_IP_FSYS, 2, 0, 0),
+ GATE(CLK_SATA_PHY, "sata_phy", "aclk133", GATE_IP_FSYS, 3, 0, 0),
+ GATE(CLK_SATA, "sata", "aclk133", GATE_IP_FSYS, 10, 0, 0),
+ GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
+ GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
+ GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
+ GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
+ GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
CLK_IGNORE_UNUSED, 0),
- GATE(hdmi_cec, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, 0),
- GATE(smmu_rotator, "smmu_rotator", "aclk200",
+ GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0,
+ 0),
+ GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "aclk200",
E4210_GATE_IP_IMAGE, 4, 0, 0),
- GATE(sclk_mipi1, "sclk_mipi1", "div_mipi_pre1",
+ GATE(CLK_SCLK_MIPI1, "sclk_mipi1", "div_mipi_pre1",
E4210_SRC_MASK_LCD1, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_sata, "sclk_sata", "div_sata",
+ GATE(CLK_SCLK_SATA, "sclk_sata", "div_sata",
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mixer, "sclk_mixer", "mout_mixer", SRC_MASK_TV, 4, 0, 0),
- GATE(sclk_dac, "sclk_dac", "mout_dac", SRC_MASK_TV, 8, 0, 0),
- GATE(tsadc, "tsadc", "aclk100", GATE_IP_PERIL, 15,
+ GATE(CLK_SCLK_MIXER, "sclk_mixer", "mout_mixer", SRC_MASK_TV, 4, 0, 0),
+ GATE(CLK_SCLK_DAC, "sclk_dac", "mout_dac", SRC_MASK_TV, 8, 0, 0),
+ GATE(CLK_TSADC, "tsadc", "aclk100", GATE_IP_PERIL, 15,
0, 0),
- GATE(mct, "mct", "aclk100", E4210_GATE_IP_PERIR, 13,
+ GATE(CLK_MCT, "mct", "aclk100", E4210_GATE_IP_PERIR, 13,
0, 0),
- GATE(wdt, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14,
+ GATE(CLK_WDT, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14,
0, 0),
- GATE(rtc, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15,
+ GATE(CLK_RTC, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15,
0, 0),
- GATE(keyif, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16,
+ GATE(CLK_KEYIF, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16,
0, 0),
- GATE(sclk_fimd1, "sclk_fimd1", "div_fimd1", E4210_SRC_MASK_LCD1, 0,
+ GATE(CLK_SCLK_FIMD1, "sclk_fimd1", "div_fimd1", E4210_SRC_MASK_LCD1, 0,
CLK_SET_RATE_PARENT, 0),
- GATE(tmu_apbif, "tmu_apbif", "aclk100", E4210_GATE_IP_PERIR, 17, 0, 0),
+ GATE(CLK_TMU_APBIF, "tmu_apbif", "aclk100", E4210_GATE_IP_PERIR, 17, 0,
+ 0),
};
/* list of gate clocks supported in exynos4x12 soc */
static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
- GATE(audss, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0),
- GATE(mdnie0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0),
- GATE(rotator, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0),
- GATE(mdma2, "mdma2", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0),
- GATE(smmu_mdma, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0),
- GATE(mipi_hsi, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
- GATE(chipid, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
- GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
+ GATE(CLK_AUDSS, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0),
+ GATE(CLK_MDNIE0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0),
+ GATE(CLK_ROTATOR, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0),
+ GATE(CLK_MDMA2, "mdma2", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0),
+ GATE(CLK_SMMU_MDMA, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0,
+ 0),
+ GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
+ GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
+ GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
CLK_IGNORE_UNUSED, 0),
- GATE(hdmi_cec, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, 0),
- GATE(sclk_mdnie0, "sclk_mdnie0", "div_mdnie0",
+ GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0,
+ 0),
+ GATE(CLK_SCLK_MDNIE0, "sclk_mdnie0", "div_mdnie0",
SRC_MASK_LCD0, 4, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mdnie_pwm0, "sclk_mdnie_pwm0", "div_mdnie_pwm_pre0",
+ GATE(CLK_SCLK_MDNIE_PWM0, "sclk_mdnie_pwm0", "div_mdnie_pwm_pre0",
SRC_MASK_LCD0, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mipihsi, "sclk_mipihsi", "div_mipihsi",
+ GATE(CLK_SCLK_MIPIHSI, "sclk_mipihsi", "div_mipihsi",
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
- GATE(smmu_rotator, "smmu_rotator", "aclk200",
+ GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "aclk200",
E4X12_GATE_IP_IMAGE, 4, 0, 0),
- GATE(mct, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13,
+ GATE(CLK_MCT, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13,
0, 0),
- GATE(rtc, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
+ GATE(CLK_RTC, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
0, 0),
- GATE(keyif, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
- GATE(sclk_pwm_isp, "sclk_pwm_isp", "div_pwm_isp",
+ GATE(CLK_KEYIF, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
+ GATE(CLK_SCLK_PWM_ISP, "sclk_pwm_isp", "div_pwm_isp",
E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi0_isp, "sclk_spi0_isp", "div_spi0_isp_pre",
+ GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "div_spi0_isp_pre",
E4X12_SRC_MASK_ISP, 4, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi1_isp, "sclk_spi1_isp", "div_spi1_isp_pre",
+ GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "div_spi1_isp_pre",
E4X12_SRC_MASK_ISP, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart_isp, "sclk_uart_isp", "div_uart_isp",
+ GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "div_uart_isp",
E4X12_SRC_MASK_ISP, 12, CLK_SET_RATE_PARENT, 0),
- GATE(pwm_isp_sclk, "pwm_isp_sclk", "sclk_pwm_isp",
+ GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "sclk_pwm_isp",
E4X12_GATE_IP_ISP, 0, 0, 0),
- GATE(spi0_isp_sclk, "spi0_isp_sclk", "sclk_spi0_isp",
+ GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "sclk_spi0_isp",
E4X12_GATE_IP_ISP, 1, 0, 0),
- GATE(spi1_isp_sclk, "spi1_isp_sclk", "sclk_spi1_isp",
+ GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "sclk_spi1_isp",
E4X12_GATE_IP_ISP, 2, 0, 0),
- GATE(uart_isp_sclk, "uart_isp_sclk", "sclk_uart_isp",
+ GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "sclk_uart_isp",
E4X12_GATE_IP_ISP, 3, 0, 0),
- GATE(wdt, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
- GATE(pcm0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
+ GATE(CLK_WDT, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
+ GATE(CLK_PCM0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
0, 0),
- GATE(i2s0, "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3,
+ GATE(CLK_I2S0, "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3,
0, 0),
- GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
+ GATE(CLK_FIMC_ISP, "isp", "aclk200", E4X12_GATE_ISP0, 0,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
+ GATE(CLK_FIMC_DRC, "drc", "aclk200", E4X12_GATE_ISP0, 1,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2,
+ GATE(CLK_FIMC_FD, "fd", "aclk200", E4X12_GATE_ISP0, 2,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
+ GATE(CLK_FIMC_LITE0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
+ GATE(CLK_FIMC_LITE1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
+ GATE(CLK_MCUISP, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
+ GATE(CLK_GICISP, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
+ GATE(CLK_SMMU_ISP, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
+ GATE(CLK_SMMU_DRC, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
+ GATE(CLK_SMMU_FD, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
+ GATE(CLK_SMMU_LITE0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
+ GATE(CLK_SMMU_LITE1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
+ GATE(CLK_PPMUISPMX, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
+ GATE(CLK_PPMUISPX, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
+ GATE(CLK_MCUCTL_ISP, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
+ GATE(CLK_MPWM_ISP, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
+ GATE(CLK_I2C0_ISP, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
+ GATE(CLK_I2C1_ISP, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
+ GATE(CLK_MTCADC_ISP, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
+ GATE(CLK_PWM_ISP, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
+ GATE(CLK_WDT_ISP, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
+ GATE(CLK_UART_ISP, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
+ GATE(CLK_ASYNCAXIM, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
+ GATE(CLK_SMMU_ISPCX, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
+ GATE(CLK_SPI0_ISP, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
+ GATE(CLK_SPI1_ISP, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
- GATE(tmu_apbif, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0, 0),
+ GATE(CLK_G2D, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
+ GATE(CLK_TMU_APBIF, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0,
+ 0),
};
static struct samsung_clock_alias exynos4_aliases[] __initdata = {
- ALIAS(mout_core, NULL, "moutcore"),
- ALIAS(arm_clk, NULL, "armclk"),
- ALIAS(sclk_apll, NULL, "mout_apll"),
+ ALIAS(CLK_MOUT_CORE, NULL, "moutcore"),
+ ALIAS(CLK_ARM_CLK, NULL, "armclk"),
+ ALIAS(CLK_SCLK_APLL, NULL, "mout_apll"),
};
static struct samsung_clock_alias exynos4210_aliases[] __initdata = {
- ALIAS(sclk_mpll, NULL, "mout_mpll"),
+ ALIAS(CLK_SCLK_MPLL, NULL, "mout_mpll"),
};
static struct samsung_clock_alias exynos4x12_aliases[] __initdata = {
- ALIAS(mout_mpll_user_c, NULL, "mout_mpll"),
+ ALIAS(CLK_MOUT_MPLL_USER_C, NULL, "mout_mpll"),
};
/*
@@ -977,7 +925,7 @@ static void __init exynos4_clk_register_finpll(unsigned long xom)
finpll_f = clk_get_rate(clk);
}
- fclk.id = fin_pll;
+ fclk.id = CLK_FIN_PLL;
fclk.name = "fin_pll";
fclk.parent_name = NULL;
fclk.flags = CLK_IS_ROOT;
@@ -1067,24 +1015,24 @@ static struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initdata = {
};
static struct samsung_pll_clock exynos4210_plls[nr_plls] __initdata = {
- [apll] = PLL_A(pll_4508, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
- APLL_CON0, "fout_apll", NULL),
- [mpll] = PLL_A(pll_4508, fout_mpll, "fout_mpll", "fin_pll",
+ [apll] = PLL_A(pll_4508, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, "fout_apll", NULL),
+ [mpll] = PLL_A(pll_4508, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
E4210_MPLL_LOCK, E4210_MPLL_CON0, "fout_mpll", NULL),
- [epll] = PLL_A(pll_4600, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
- EPLL_CON0, "fout_epll", NULL),
- [vpll] = PLL_A(pll_4650c, fout_vpll, "fout_vpll", "mout_vpllsrc",
+ [epll] = PLL_A(pll_4600, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, "fout_epll", NULL),
+ [vpll] = PLL_A(pll_4650c, CLK_FOUT_VPLL, "fout_vpll", "mout_vpllsrc",
VPLL_LOCK, VPLL_CON0, "fout_vpll", NULL),
};
static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
- [apll] = PLL(pll_35xx, fout_apll, "fout_apll", "fin_pll",
+ [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
APLL_LOCK, APLL_CON0, NULL),
- [mpll] = PLL(pll_35xx, fout_mpll, "fout_mpll", "fin_pll",
+ [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
E4X12_MPLL_LOCK, E4X12_MPLL_CON0, NULL),
- [epll] = PLL(pll_36xx, fout_epll, "fout_epll", "fin_pll",
+ [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
EPLL_LOCK, EPLL_CON0, NULL),
- [vpll] = PLL(pll_36xx, fout_vpll, "fout_vpll", "fin_pll",
+ [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
VPLL_LOCK, VPLL_CON0, NULL),
};
@@ -1098,11 +1046,11 @@ static void __init exynos4_clk_init(struct device_node *np,
panic("%s: failed to map registers\n", __func__);
if (exynos4_soc == EXYNOS4210)
- samsung_clk_init(np, reg_base, nr_clks,
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS,
exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
exynos4210_clk_save, ARRAY_SIZE(exynos4210_clk_save));
else
- samsung_clk_init(np, reg_base, nr_clks,
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS,
exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
exynos4x12_clk_save, ARRAY_SIZE(exynos4x12_clk_save));
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index e52359cf9b6f..ff4beebe1f0b 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -10,6 +10,7 @@
* Common Clock Framework support for Exynos5250 SoC.
*/
+#include <dt-bindings/clock/exynos5250.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
@@ -36,6 +37,7 @@
#define GPLL_CON0 0x10150
#define SRC_TOP0 0x10210
#define SRC_TOP2 0x10218
+#define SRC_TOP3 0x1021c
#define SRC_GSCL 0x10220
#define SRC_DISP1_0 0x1022c
#define SRC_MAU 0x10240
@@ -66,6 +68,7 @@
#define DIV_PERIC4 0x10568
#define DIV_PERIC5 0x1056c
#define GATE_IP_GSCL 0x10920
+#define GATE_IP_DISP1 0x10928
#define GATE_IP_MFC 0x1092c
#define GATE_IP_GEN 0x10934
#define GATE_IP_FSYS 0x10944
@@ -75,7 +78,6 @@
#define BPLL_CON0 0x20110
#define SRC_CDREX 0x20200
#define PLL_DIV2_SEL 0x20a24
-#define GATE_IP_DISP1 0x10928
/* list of PLLs to be registered */
enum exynos5250_plls {
@@ -84,52 +86,6 @@ enum exynos5250_plls {
};
/*
- * Let each supported clock get a unique id. This id is used to lookup the clock
- * for device tree based platforms. The clocks are categorized into three
- * sections: core, sclk gate and bus interface gate clocks.
- *
- * When adding a new clock to this list, it is advised to choose a clock
- * category and add it to the end of that category. That is because the the
- * device tree source file is referring to these ids and any change in the
- * sequence number of existing clocks will require corresponding change in the
- * device tree files. This limitation would go away when pre-processor support
- * for dtc would be available.
- */
-enum exynos5250_clks {
- none,
-
- /* core clocks */
- fin_pll, fout_apll, fout_mpll, fout_bpll, fout_gpll, fout_cpll,
- fout_epll, fout_vpll,
-
- /* gate for special clocks (sclk) */
- sclk_cam_bayer = 128, sclk_cam0, sclk_cam1, sclk_gscl_wa, sclk_gscl_wb,
- sclk_fimd1, sclk_mipi1, sclk_dp, sclk_hdmi, sclk_pixel, sclk_audio0,
- sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_sata, sclk_usb3,
- sclk_jpeg, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_pwm,
- sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2,
- div_i2s1, div_i2s2, sclk_hdmiphy,
-
- /* gate clocks */
- gscl0 = 256, gscl1, gscl2, gscl3, gscl_wa, gscl_wb, smmu_gscl0,
- smmu_gscl1, smmu_gscl2, smmu_gscl3, mfc, smmu_mfcl, smmu_mfcr, rotator,
- jpeg, mdma1, smmu_rotator, smmu_jpeg, smmu_mdma1, pdma0, pdma1, sata,
- usbotg, mipi_hsi, sdmmc0, sdmmc1, sdmmc2, sdmmc3, sromc, usb2, usb3,
- sata_phyctrl, sata_phyi2c, uart0, uart1, uart2, uart3, uart4, i2c0,
- i2c1, i2c2, i2c3, i2c4, i2c5, i2c6, i2c7, i2c_hdmi, adc, spi0, spi1,
- spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
- hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
- tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
- wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0,
- smmu_mdma0,
-
- /* mux clocks */
- mout_hdmi = 1024,
-
- nr_clks,
-};
-
-/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
@@ -139,6 +95,7 @@ static unsigned long exynos5250_clk_regs[] __initdata = {
SRC_CORE1,
SRC_TOP0,
SRC_TOP2,
+ SRC_TOP3,
SRC_GSCL,
SRC_DISP1_0,
SRC_MAU,
@@ -182,7 +139,7 @@ static unsigned long exynos5250_clk_regs[] __initdata = {
/* list of all parent clock list */
PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
-PNAME(mout_cpu_p) = { "mout_apll", "sclk_mpll", };
+PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", };
PNAME(mout_mpll_fout_p) = { "fout_mplldiv2", "fout_mpll" };
PNAME(mout_mpll_p) = { "fin_pll", "mout_mpll_fout" };
PNAME(mout_bpll_fout_p) = { "fout_bplldiv2", "fout_bpll" };
@@ -191,311 +148,432 @@ PNAME(mout_vpllsrc_p) = { "fin_pll", "sclk_hdmi27m" };
PNAME(mout_vpll_p) = { "mout_vpllsrc", "fout_vpll" };
PNAME(mout_cpll_p) = { "fin_pll", "fout_cpll" };
PNAME(mout_epll_p) = { "fin_pll", "fout_epll" };
-PNAME(mout_mpll_user_p) = { "fin_pll", "sclk_mpll" };
-PNAME(mout_bpll_user_p) = { "fin_pll", "sclk_bpll" };
-PNAME(mout_aclk166_p) = { "sclk_cpll", "sclk_mpll_user" };
-PNAME(mout_aclk200_p) = { "sclk_mpll_user", "sclk_bpll_user" };
+PNAME(mout_mpll_user_p) = { "fin_pll", "mout_mpll" };
+PNAME(mout_bpll_user_p) = { "fin_pll", "mout_bpll" };
+PNAME(mout_aclk166_p) = { "mout_cpll", "mout_mpll_user" };
+PNAME(mout_aclk200_p) = { "mout_mpll_user", "mout_bpll_user" };
+PNAME(mout_aclk200_sub_p) = { "fin_pll", "div_aclk200" };
+PNAME(mout_aclk266_sub_p) = { "fin_pll", "div_aclk266" };
+PNAME(mout_aclk333_sub_p) = { "fin_pll", "div_aclk333" };
PNAME(mout_hdmi_p) = { "div_hdmi_pixel", "sclk_hdmiphy" };
-PNAME(mout_usb3_p) = { "sclk_mpll_user", "sclk_cpll" };
+PNAME(mout_usb3_p) = { "mout_mpll_user", "mout_cpll" };
PNAME(mout_group1_p) = { "fin_pll", "fin_pll", "sclk_hdmi27m",
"sclk_dptxphy", "sclk_uhostphy", "sclk_hdmiphy",
- "sclk_mpll_user", "sclk_epll", "sclk_vpll",
- "sclk_cpll" };
+ "mout_mpll_user", "mout_epll", "mout_vpll",
+ "mout_cpll", "none", "none",
+ "none", "none", "none",
+ "none" };
PNAME(mout_audio0_p) = { "cdclk0", "fin_pll", "sclk_hdmi27m", "sclk_dptxphy",
- "sclk_uhostphy", "sclk_hdmiphy",
- "sclk_mpll_user", "sclk_epll", "sclk_vpll",
- "sclk_cpll" };
+ "sclk_uhostphy", "fin_pll",
+ "mout_mpll_user", "mout_epll", "mout_vpll",
+ "mout_cpll", "none", "none",
+ "none", "none", "none",
+ "none" };
PNAME(mout_audio1_p) = { "cdclk1", "fin_pll", "sclk_hdmi27m", "sclk_dptxphy",
- "sclk_uhostphy", "sclk_hdmiphy",
- "sclk_mpll_user", "sclk_epll", "sclk_vpll",
- "sclk_cpll" };
+ "sclk_uhostphy", "fin_pll",
+ "mout_mpll_user", "mout_epll", "mout_vpll",
+ "mout_cpll", "none", "none",
+ "none", "none", "none",
+ "none" };
PNAME(mout_audio2_p) = { "cdclk2", "fin_pll", "sclk_hdmi27m", "sclk_dptxphy",
- "sclk_uhostphy", "sclk_hdmiphy",
- "sclk_mpll_user", "sclk_epll", "sclk_vpll",
- "sclk_cpll" };
+ "sclk_uhostphy", "fin_pll",
+ "mout_mpll_user", "mout_epll", "mout_vpll",
+ "mout_cpll", "none", "none",
+ "none", "none", "none",
+ "none" };
PNAME(mout_spdif_p) = { "sclk_audio0", "sclk_audio1", "sclk_audio2",
"spdif_extclk" };
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initdata = {
- FRATE(fin_pll, "fin_pll", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_FIN_PLL, "fin_pll", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
- FRATE(sclk_hdmiphy, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(none, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
- FRATE(none, "sclk_dptxphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(none, "sclk_uhostphy", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(0, "sclk_dptxphy", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_uhostphy", NULL, CLK_IS_ROOT, 48000000),
};
static struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
- FFACTOR(none, "fout_mplldiv2", "fout_mpll", 1, 2, 0),
- FFACTOR(none, "fout_bplldiv2", "fout_bpll", 1, 2, 0),
+ FFACTOR(0, "fout_mplldiv2", "fout_mpll", 1, 2, 0),
+ FFACTOR(0, "fout_bplldiv2", "fout_bpll", 1, 2, 0),
};
static struct samsung_mux_clock exynos5250_pll_pmux_clks[] __initdata = {
- MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
+ MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
};
static struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
- MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
- MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
- MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
- MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
- MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
- MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
- MUX(none, "sclk_vpll", mout_vpll_p, SRC_TOP2, 16, 1),
- MUX(none, "sclk_epll", mout_epll_p, SRC_TOP2, 12, 1),
- MUX(none, "sclk_cpll", mout_cpll_p, SRC_TOP2, 8, 1),
- MUX(none, "sclk_mpll_user", mout_mpll_user_p, SRC_TOP2, 20, 1),
- MUX(none, "sclk_bpll_user", mout_bpll_user_p, SRC_TOP2, 24, 1),
- MUX(none, "mout_aclk166", mout_aclk166_p, SRC_TOP0, 8, 1),
- MUX(none, "mout_aclk333", mout_aclk166_p, SRC_TOP0, 16, 1),
- MUX(none, "mout_aclk200", mout_aclk200_p, SRC_TOP0, 12, 1),
- MUX(none, "mout_cam_bayer", mout_group1_p, SRC_GSCL, 12, 4),
- MUX(none, "mout_cam0", mout_group1_p, SRC_GSCL, 16, 4),
- MUX(none, "mout_cam1", mout_group1_p, SRC_GSCL, 20, 4),
- MUX(none, "mout_gscl_wa", mout_group1_p, SRC_GSCL, 24, 4),
- MUX(none, "mout_gscl_wb", mout_group1_p, SRC_GSCL, 28, 4),
- MUX(none, "mout_fimd1", mout_group1_p, SRC_DISP1_0, 0, 4),
- MUX(none, "mout_mipi1", mout_group1_p, SRC_DISP1_0, 12, 4),
- MUX(none, "mout_dp", mout_group1_p, SRC_DISP1_0, 16, 4),
- MUX(mout_hdmi, "mout_hdmi", mout_hdmi_p, SRC_DISP1_0, 20, 1),
- MUX(none, "mout_audio0", mout_audio0_p, SRC_MAU, 0, 4),
- MUX(none, "mout_mmc0", mout_group1_p, SRC_FSYS, 0, 4),
- MUX(none, "mout_mmc1", mout_group1_p, SRC_FSYS, 4, 4),
- MUX(none, "mout_mmc2", mout_group1_p, SRC_FSYS, 8, 4),
- MUX(none, "mout_mmc3", mout_group1_p, SRC_FSYS, 12, 4),
- MUX(none, "mout_sata", mout_aclk200_p, SRC_FSYS, 24, 1),
- MUX(none, "mout_usb3", mout_usb3_p, SRC_FSYS, 28, 1),
- MUX(none, "mout_jpeg", mout_group1_p, SRC_GEN, 0, 4),
- MUX(none, "mout_uart0", mout_group1_p, SRC_PERIC0, 0, 4),
- MUX(none, "mout_uart1", mout_group1_p, SRC_PERIC0, 4, 4),
- MUX(none, "mout_uart2", mout_group1_p, SRC_PERIC0, 8, 4),
- MUX(none, "mout_uart3", mout_group1_p, SRC_PERIC0, 12, 4),
- MUX(none, "mout_pwm", mout_group1_p, SRC_PERIC0, 24, 4),
- MUX(none, "mout_audio1", mout_audio1_p, SRC_PERIC1, 0, 4),
- MUX(none, "mout_audio2", mout_audio2_p, SRC_PERIC1, 4, 4),
- MUX(none, "mout_spdif", mout_spdif_p, SRC_PERIC1, 8, 2),
- MUX(none, "mout_spi0", mout_group1_p, SRC_PERIC1, 16, 4),
- MUX(none, "mout_spi1", mout_group1_p, SRC_PERIC1, 20, 4),
- MUX(none, "mout_spi2", mout_group1_p, SRC_PERIC1, 24, 4),
+ /*
+ * NOTE: Following table is sorted by (clock domain, register address,
+ * bitfield shift) triplet in ascending order. When adding new entries,
+ * please make sure that the order is kept, to avoid merge conflicts
+ * and make further work with defined data easier.
+ */
+
+ /*
+ * CMU_CPU
+ */
+ MUX_FA(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ CLK_SET_RATE_PARENT, 0, "mout_apll"),
+ MUX_A(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
+
+ /*
+ * CMU_CORE
+ */
+ MUX_A(0, "mout_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
+
+ /*
+ * CMU_TOP
+ */
+ MUX(0, "mout_aclk166", mout_aclk166_p, SRC_TOP0, 8, 1),
+ MUX(0, "mout_aclk200", mout_aclk200_p, SRC_TOP0, 12, 1),
+ MUX(0, "mout_aclk333", mout_aclk166_p, SRC_TOP0, 16, 1),
+
+ MUX(0, "mout_cpll", mout_cpll_p, SRC_TOP2, 8, 1),
+ MUX(0, "mout_epll", mout_epll_p, SRC_TOP2, 12, 1),
+ MUX(0, "mout_vpll", mout_vpll_p, SRC_TOP2, 16, 1),
+ MUX(0, "mout_mpll_user", mout_mpll_user_p, SRC_TOP2, 20, 1),
+ MUX(0, "mout_bpll_user", mout_bpll_user_p, SRC_TOP2, 24, 1),
+
+ MUX(0, "mout_aclk200_disp1_sub", mout_aclk200_sub_p, SRC_TOP3, 4, 1),
+ MUX(0, "mout_aclk266_gscl_sub", mout_aclk266_sub_p, SRC_TOP3, 8, 1),
+ MUX(0, "mout_aclk333_sub", mout_aclk333_sub_p, SRC_TOP3, 24, 1),
+
+ MUX(0, "mout_cam_bayer", mout_group1_p, SRC_GSCL, 12, 4),
+ MUX(0, "mout_cam0", mout_group1_p, SRC_GSCL, 16, 4),
+ MUX(0, "mout_cam1", mout_group1_p, SRC_GSCL, 20, 4),
+ MUX(0, "mout_gscl_wa", mout_group1_p, SRC_GSCL, 24, 4),
+ MUX(0, "mout_gscl_wb", mout_group1_p, SRC_GSCL, 28, 4),
+
+ MUX(0, "mout_fimd1", mout_group1_p, SRC_DISP1_0, 0, 4),
+ MUX(0, "mout_mipi1", mout_group1_p, SRC_DISP1_0, 12, 4),
+ MUX(0, "mout_dp", mout_group1_p, SRC_DISP1_0, 16, 4),
+ MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_DISP1_0, 20, 1),
+
+ MUX(0, "mout_audio0", mout_audio0_p, SRC_MAU, 0, 4),
+
+ MUX(0, "mout_mmc0", mout_group1_p, SRC_FSYS, 0, 4),
+ MUX(0, "mout_mmc1", mout_group1_p, SRC_FSYS, 4, 4),
+ MUX(0, "mout_mmc2", mout_group1_p, SRC_FSYS, 8, 4),
+ MUX(0, "mout_mmc3", mout_group1_p, SRC_FSYS, 12, 4),
+ MUX(0, "mout_sata", mout_aclk200_p, SRC_FSYS, 24, 1),
+ MUX(0, "mout_usb3", mout_usb3_p, SRC_FSYS, 28, 1),
+
+ MUX(0, "mout_jpeg", mout_group1_p, SRC_GEN, 0, 4),
+
+ MUX(0, "mout_uart0", mout_group1_p, SRC_PERIC0, 0, 4),
+ MUX(0, "mout_uart1", mout_group1_p, SRC_PERIC0, 4, 4),
+ MUX(0, "mout_uart2", mout_group1_p, SRC_PERIC0, 8, 4),
+ MUX(0, "mout_uart3", mout_group1_p, SRC_PERIC0, 12, 4),
+ MUX(0, "mout_pwm", mout_group1_p, SRC_PERIC0, 24, 4),
+
+ MUX(0, "mout_audio1", mout_audio1_p, SRC_PERIC1, 0, 4),
+ MUX(0, "mout_audio2", mout_audio2_p, SRC_PERIC1, 4, 4),
+ MUX(0, "mout_spdif", mout_spdif_p, SRC_PERIC1, 8, 2),
+ MUX(0, "mout_spi0", mout_group1_p, SRC_PERIC1, 16, 4),
+ MUX(0, "mout_spi1", mout_group1_p, SRC_PERIC1, 20, 4),
+ MUX(0, "mout_spi2", mout_group1_p, SRC_PERIC1, 24, 4),
+
+ /*
+ * CMU_CDREX
+ */
+ MUX(0, "mout_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
+
+ MUX(0, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
+ MUX(0, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
};
static struct samsung_div_clock exynos5250_div_clks[] __initdata = {
- DIV(none, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
- DIV(none, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
- DIV(none, "aclk66_pre", "sclk_mpll_user", DIV_TOP1, 24, 3),
- DIV(none, "aclk66", "aclk66_pre", DIV_TOP0, 0, 3),
- DIV(none, "aclk266", "sclk_mpll_user", DIV_TOP0, 16, 3),
- DIV(none, "aclk166", "mout_aclk166", DIV_TOP0, 8, 3),
- DIV(none, "aclk333", "mout_aclk333", DIV_TOP0, 20, 3),
- DIV(none, "aclk200", "mout_aclk200", DIV_TOP0, 12, 3),
- DIV(none, "div_cam_bayer", "mout_cam_bayer", DIV_GSCL, 12, 4),
- DIV(none, "div_cam0", "mout_cam0", DIV_GSCL, 16, 4),
- DIV(none, "div_cam1", "mout_cam1", DIV_GSCL, 20, 4),
- DIV(none, "div_gscl_wa", "mout_gscl_wa", DIV_GSCL, 24, 4),
- DIV(none, "div_gscl_wb", "mout_gscl_wb", DIV_GSCL, 28, 4),
- DIV(none, "div_fimd1", "mout_fimd1", DIV_DISP1_0, 0, 4),
- DIV(none, "div_mipi1", "mout_mipi1", DIV_DISP1_0, 16, 4),
- DIV(none, "div_dp", "mout_dp", DIV_DISP1_0, 24, 4),
- DIV(none, "div_jpeg", "mout_jpeg", DIV_GEN, 4, 4),
- DIV(none, "div_audio0", "mout_audio0", DIV_MAU, 0, 4),
- DIV(none, "div_pcm0", "sclk_audio0", DIV_MAU, 4, 8),
- DIV(none, "div_sata", "mout_sata", DIV_FSYS0, 20, 4),
- DIV(none, "div_usb3", "mout_usb3", DIV_FSYS0, 24, 4),
- DIV(none, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
- DIV(none, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
- DIV(none, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
- DIV(none, "div_mmc3", "mout_mmc3", DIV_FSYS2, 16, 4),
- DIV(none, "div_uart0", "mout_uart0", DIV_PERIC0, 0, 4),
- DIV(none, "div_uart1", "mout_uart1", DIV_PERIC0, 4, 4),
- DIV(none, "div_uart2", "mout_uart2", DIV_PERIC0, 8, 4),
- DIV(none, "div_uart3", "mout_uart3", DIV_PERIC0, 12, 4),
- DIV(none, "div_spi0", "mout_spi0", DIV_PERIC1, 0, 4),
- DIV(none, "div_spi1", "mout_spi1", DIV_PERIC1, 16, 4),
- DIV(none, "div_spi2", "mout_spi2", DIV_PERIC2, 0, 4),
- DIV(none, "div_pwm", "mout_pwm", DIV_PERIC3, 0, 4),
- DIV(none, "div_audio1", "mout_audio1", DIV_PERIC4, 0, 4),
- DIV(none, "div_pcm1", "sclk_audio1", DIV_PERIC4, 4, 8),
- DIV(none, "div_audio2", "mout_audio2", DIV_PERIC4, 16, 4),
- DIV(none, "div_pcm2", "sclk_audio2", DIV_PERIC4, 20, 8),
- DIV(div_i2s1, "div_i2s1", "sclk_audio1", DIV_PERIC5, 0, 6),
- DIV(div_i2s2, "div_i2s2", "sclk_audio2", DIV_PERIC5, 8, 6),
- DIV(sclk_pixel, "div_hdmi_pixel", "sclk_vpll", DIV_DISP1_0, 28, 4),
- DIV_A(none, "armclk", "div_arm", DIV_CPU0, 28, 3, "armclk"),
- DIV_F(none, "div_mipi1_pre", "div_mipi1",
+ /*
+ * NOTE: Following table is sorted by (clock domain, register address,
+ * bitfield shift) triplet in ascending order. When adding new entries,
+ * please make sure that the order is kept, to avoid merge conflicts
+ * and make further work with defined data easier.
+ */
+
+ /*
+ * CMU_CPU
+ */
+ DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
+ DIV(0, "div_apll", "mout_apll", DIV_CPU0, 24, 3),
+ DIV_A(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3, "armclk"),
+
+ /*
+ * CMU_TOP
+ */
+ DIV(0, "div_aclk66", "div_aclk66_pre", DIV_TOP0, 0, 3),
+ DIV(0, "div_aclk166", "mout_aclk166", DIV_TOP0, 8, 3),
+ DIV(0, "div_aclk200", "mout_aclk200", DIV_TOP0, 12, 3),
+ DIV(0, "div_aclk266", "mout_mpll_user", DIV_TOP0, 16, 3),
+ DIV(0, "div_aclk333", "mout_aclk333", DIV_TOP0, 20, 3),
+
+ DIV(0, "div_aclk66_pre", "mout_mpll_user", DIV_TOP1, 24, 3),
+
+ DIV(0, "div_cam_bayer", "mout_cam_bayer", DIV_GSCL, 12, 4),
+ DIV(0, "div_cam0", "mout_cam0", DIV_GSCL, 16, 4),
+ DIV(0, "div_cam1", "mout_cam1", DIV_GSCL, 20, 4),
+ DIV(0, "div_gscl_wa", "mout_gscl_wa", DIV_GSCL, 24, 4),
+ DIV(0, "div_gscl_wb", "mout_gscl_wb", DIV_GSCL, 28, 4),
+
+ DIV(0, "div_fimd1", "mout_fimd1", DIV_DISP1_0, 0, 4),
+ DIV(0, "div_mipi1", "mout_mipi1", DIV_DISP1_0, 16, 4),
+ DIV_F(0, "div_mipi1_pre", "div_mipi1",
DIV_DISP1_0, 20, 4, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre0", "div_mmc0",
+ DIV(0, "div_dp", "mout_dp", DIV_DISP1_0, 24, 4),
+ DIV(CLK_SCLK_PIXEL, "div_hdmi_pixel", "mout_vpll", DIV_DISP1_0, 28, 4),
+
+ DIV(0, "div_jpeg", "mout_jpeg", DIV_GEN, 4, 4),
+
+ DIV(0, "div_audio0", "mout_audio0", DIV_MAU, 0, 4),
+ DIV(CLK_DIV_PCM0, "div_pcm0", "sclk_audio0", DIV_MAU, 4, 8),
+
+ DIV(0, "div_sata", "mout_sata", DIV_FSYS0, 20, 4),
+ DIV(0, "div_usb3", "mout_usb3", DIV_FSYS0, 24, 4),
+
+ DIV(0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
+ DIV_F(0, "div_mmc_pre0", "div_mmc0",
DIV_FSYS1, 8, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre1", "div_mmc1",
+ DIV(0, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
+ DIV_F(0, "div_mmc_pre1", "div_mmc1",
DIV_FSYS1, 24, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre2", "div_mmc2",
+
+ DIV(0, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
+ DIV_F(0, "div_mmc_pre2", "div_mmc2",
DIV_FSYS2, 8, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_mmc_pre3", "div_mmc3",
+ DIV(0, "div_mmc3", "mout_mmc3", DIV_FSYS2, 16, 4),
+ DIV_F(0, "div_mmc_pre3", "div_mmc3",
DIV_FSYS2, 24, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_spi_pre0", "div_spi0",
+
+ DIV(0, "div_uart0", "mout_uart0", DIV_PERIC0, 0, 4),
+ DIV(0, "div_uart1", "mout_uart1", DIV_PERIC0, 4, 4),
+ DIV(0, "div_uart2", "mout_uart2", DIV_PERIC0, 8, 4),
+ DIV(0, "div_uart3", "mout_uart3", DIV_PERIC0, 12, 4),
+
+ DIV(0, "div_spi0", "mout_spi0", DIV_PERIC1, 0, 4),
+ DIV_F(0, "div_spi_pre0", "div_spi0",
DIV_PERIC1, 8, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_spi_pre1", "div_spi1",
+ DIV(0, "div_spi1", "mout_spi1", DIV_PERIC1, 16, 4),
+ DIV_F(0, "div_spi_pre1", "div_spi1",
DIV_PERIC1, 24, 8, CLK_SET_RATE_PARENT, 0),
- DIV_F(none, "div_spi_pre2", "div_spi2",
+
+ DIV(0, "div_spi2", "mout_spi2", DIV_PERIC2, 0, 4),
+ DIV_F(0, "div_spi_pre2", "div_spi2",
DIV_PERIC2, 8, 8, CLK_SET_RATE_PARENT, 0),
+
+ DIV(0, "div_pwm", "mout_pwm", DIV_PERIC3, 0, 4),
+
+ DIV(0, "div_audio1", "mout_audio1", DIV_PERIC4, 0, 4),
+ DIV(0, "div_pcm1", "sclk_audio1", DIV_PERIC4, 4, 8),
+ DIV(0, "div_audio2", "mout_audio2", DIV_PERIC4, 16, 4),
+ DIV(0, "div_pcm2", "sclk_audio2", DIV_PERIC4, 20, 8),
+
+ DIV(CLK_DIV_I2S1, "div_i2s1", "sclk_audio1", DIV_PERIC5, 0, 6),
+ DIV(CLK_DIV_I2S2, "div_i2s2", "sclk_audio2", DIV_PERIC5, 8, 6),
};
static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
- GATE(gscl0, "gscl0", "none", GATE_IP_GSCL, 0, 0, 0),
- GATE(gscl1, "gscl1", "none", GATE_IP_GSCL, 1, 0, 0),
- GATE(gscl2, "gscl2", "aclk266", GATE_IP_GSCL, 2, 0, 0),
- GATE(gscl3, "gscl3", "aclk266", GATE_IP_GSCL, 3, 0, 0),
- GATE(gscl_wa, "gscl_wa", "div_gscl_wa", GATE_IP_GSCL, 5, 0, 0),
- GATE(gscl_wb, "gscl_wb", "div_gscl_wb", GATE_IP_GSCL, 6, 0, 0),
- GATE(smmu_gscl0, "smmu_gscl0", "aclk266", GATE_IP_GSCL, 7, 0, 0),
- GATE(smmu_gscl1, "smmu_gscl1", "aclk266", GATE_IP_GSCL, 8, 0, 0),
- GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
- GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
- GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
- GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0),
- GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0),
- GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
- GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
- GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
- GATE(smmu_rotator, "smmu_rotator", "aclk266", GATE_IP_GEN, 6, 0, 0),
- GATE(smmu_jpeg, "smmu_jpeg", "aclk166", GATE_IP_GEN, 7, 0, 0),
- GATE(smmu_mdma1, "smmu_mdma1", "aclk266", GATE_IP_GEN, 9, 0, 0),
- GATE(pdma0, "pdma0", "aclk200", GATE_IP_FSYS, 1, 0, 0),
- GATE(pdma1, "pdma1", "aclk200", GATE_IP_FSYS, 2, 0, 0),
- GATE(sata, "sata", "aclk200", GATE_IP_FSYS, 6, 0, 0),
- GATE(usbotg, "usbotg", "aclk200", GATE_IP_FSYS, 7, 0, 0),
- GATE(mipi_hsi, "mipi_hsi", "aclk200", GATE_IP_FSYS, 8, 0, 0),
- GATE(sdmmc0, "sdmmc0", "aclk200", GATE_IP_FSYS, 12, 0, 0),
- GATE(sdmmc1, "sdmmc1", "aclk200", GATE_IP_FSYS, 13, 0, 0),
- GATE(sdmmc2, "sdmmc2", "aclk200", GATE_IP_FSYS, 14, 0, 0),
- GATE(sdmmc3, "sdmmc3", "aclk200", GATE_IP_FSYS, 15, 0, 0),
- GATE(sromc, "sromc", "aclk200", GATE_IP_FSYS, 17, 0, 0),
- GATE(usb2, "usb2", "aclk200", GATE_IP_FSYS, 18, 0, 0),
- GATE(usb3, "usb3", "aclk200", GATE_IP_FSYS, 19, 0, 0),
- GATE(sata_phyctrl, "sata_phyctrl", "aclk200", GATE_IP_FSYS, 24, 0, 0),
- GATE(sata_phyi2c, "sata_phyi2c", "aclk200", GATE_IP_FSYS, 25, 0, 0),
- GATE(uart0, "uart0", "aclk66", GATE_IP_PERIC, 0, 0, 0),
- GATE(uart1, "uart1", "aclk66", GATE_IP_PERIC, 1, 0, 0),
- GATE(uart2, "uart2", "aclk66", GATE_IP_PERIC, 2, 0, 0),
- GATE(uart3, "uart3", "aclk66", GATE_IP_PERIC, 3, 0, 0),
- GATE(uart4, "uart4", "aclk66", GATE_IP_PERIC, 4, 0, 0),
- GATE(i2c0, "i2c0", "aclk66", GATE_IP_PERIC, 6, 0, 0),
- GATE(i2c1, "i2c1", "aclk66", GATE_IP_PERIC, 7, 0, 0),
- GATE(i2c2, "i2c2", "aclk66", GATE_IP_PERIC, 8, 0, 0),
- GATE(i2c3, "i2c3", "aclk66", GATE_IP_PERIC, 9, 0, 0),
- GATE(i2c4, "i2c4", "aclk66", GATE_IP_PERIC, 10, 0, 0),
- GATE(i2c5, "i2c5", "aclk66", GATE_IP_PERIC, 11, 0, 0),
- GATE(i2c6, "i2c6", "aclk66", GATE_IP_PERIC, 12, 0, 0),
- GATE(i2c7, "i2c7", "aclk66", GATE_IP_PERIC, 13, 0, 0),
- GATE(i2c_hdmi, "i2c_hdmi", "aclk66", GATE_IP_PERIC, 14, 0, 0),
- GATE(adc, "adc", "aclk66", GATE_IP_PERIC, 15, 0, 0),
- GATE(spi0, "spi0", "aclk66", GATE_IP_PERIC, 16, 0, 0),
- GATE(spi1, "spi1", "aclk66", GATE_IP_PERIC, 17, 0, 0),
- GATE(spi2, "spi2", "aclk66", GATE_IP_PERIC, 18, 0, 0),
- GATE(i2s1, "i2s1", "aclk66", GATE_IP_PERIC, 20, 0, 0),
- GATE(i2s2, "i2s2", "aclk66", GATE_IP_PERIC, 21, 0, 0),
- GATE(pcm1, "pcm1", "aclk66", GATE_IP_PERIC, 22, 0, 0),
- GATE(pcm2, "pcm2", "aclk66", GATE_IP_PERIC, 23, 0, 0),
- GATE(pwm, "pwm", "aclk66", GATE_IP_PERIC, 24, 0, 0),
- GATE(spdif, "spdif", "aclk66", GATE_IP_PERIC, 26, 0, 0),
- GATE(ac97, "ac97", "aclk66", GATE_IP_PERIC, 27, 0, 0),
- GATE(hsi2c0, "hsi2c0", "aclk66", GATE_IP_PERIC, 28, 0, 0),
- GATE(hsi2c1, "hsi2c1", "aclk66", GATE_IP_PERIC, 29, 0, 0),
- GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
- GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
- GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
- GATE(sysreg, "sysreg", "aclk66",
- GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
- GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
- GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
- GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
- GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0),
- GATE(tzpc3, "tzpc3", "aclk66", GATE_IP_PERIS, 9, 0, 0),
- GATE(tzpc4, "tzpc4", "aclk66", GATE_IP_PERIS, 10, 0, 0),
- GATE(tzpc5, "tzpc5", "aclk66", GATE_IP_PERIS, 11, 0, 0),
- GATE(tzpc6, "tzpc6", "aclk66", GATE_IP_PERIS, 12, 0, 0),
- GATE(tzpc7, "tzpc7", "aclk66", GATE_IP_PERIS, 13, 0, 0),
- GATE(tzpc8, "tzpc8", "aclk66", GATE_IP_PERIS, 14, 0, 0),
- GATE(tzpc9, "tzpc9", "aclk66", GATE_IP_PERIS, 15, 0, 0),
- GATE(hdmi_cec, "hdmi_cec", "aclk66", GATE_IP_PERIS, 16, 0, 0),
- GATE(mct, "mct", "aclk66", GATE_IP_PERIS, 18, 0, 0),
- GATE(wdt, "wdt", "aclk66", GATE_IP_PERIS, 19, 0, 0),
- GATE(rtc, "rtc", "aclk66", GATE_IP_PERIS, 20, 0, 0),
- GATE(tmu, "tmu", "aclk66", GATE_IP_PERIS, 21, 0, 0),
- GATE(cmu_top, "cmu_top", "aclk66",
- GATE_IP_PERIS, 3, CLK_IGNORE_UNUSED, 0),
- GATE(cmu_core, "cmu_core", "aclk66",
- GATE_IP_PERIS, 4, CLK_IGNORE_UNUSED, 0),
- GATE(cmu_mem, "cmu_mem", "aclk66",
- GATE_IP_PERIS, 5, CLK_IGNORE_UNUSED, 0),
- GATE(sclk_cam_bayer, "sclk_cam_bayer", "div_cam_bayer",
+ /*
+ * NOTE: Following table is sorted by (clock domain, register address,
+ * bitfield shift) triplet in ascending order. When adding new entries,
+ * please make sure that the order is kept, to avoid merge conflicts
+ * and make further work with defined data easier.
+ */
+
+ /*
+ * CMU_ACP
+ */
+ GATE(CLK_MDMA0, "mdma0", "div_aclk266", GATE_IP_ACP, 1, 0, 0),
+ GATE(CLK_G2D, "g2d", "div_aclk200", GATE_IP_ACP, 3, 0, 0),
+ GATE(CLK_SMMU_MDMA0, "smmu_mdma0", "div_aclk266", GATE_IP_ACP, 5, 0, 0),
+
+ /*
+ * CMU_TOP
+ */
+ GATE(CLK_SCLK_CAM_BAYER, "sclk_cam_bayer", "div_cam_bayer",
SRC_MASK_GSCL, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_cam0, "sclk_cam0", "div_cam0",
+ GATE(CLK_SCLK_CAM0, "sclk_cam0", "div_cam0",
SRC_MASK_GSCL, 16, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_cam1, "sclk_cam1", "div_cam1",
+ GATE(CLK_SCLK_CAM1, "sclk_cam1", "div_cam1",
SRC_MASK_GSCL, 20, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_gscl_wa, "sclk_gscl_wa", "div_gscl_wa",
+ GATE(CLK_SCLK_GSCL_WA, "sclk_gscl_wa", "div_gscl_wa",
SRC_MASK_GSCL, 24, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_gscl_wb, "sclk_gscl_wb", "div_gscl_wb",
+ GATE(CLK_SCLK_GSCL_WB, "sclk_gscl_wb", "div_gscl_wb",
SRC_MASK_GSCL, 28, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_fimd1, "sclk_fimd1", "div_fimd1",
+
+ GATE(CLK_SCLK_FIMD1, "sclk_fimd1", "div_fimd1",
SRC_MASK_DISP1_0, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mipi1, "sclk_mipi1", "div_mipi1",
+ GATE(CLK_SCLK_MIPI1, "sclk_mipi1", "div_mipi1",
SRC_MASK_DISP1_0, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_dp, "sclk_dp", "div_dp",
+ GATE(CLK_SCLK_DP, "sclk_dp", "div_dp",
SRC_MASK_DISP1_0, 16, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_hdmi, "sclk_hdmi", "mout_hdmi",
+ GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi",
SRC_MASK_DISP1_0, 20, 0, 0),
- GATE(sclk_audio0, "sclk_audio0", "div_audio0",
+
+ GATE(CLK_SCLK_AUDIO0, "sclk_audio0", "div_audio0",
SRC_MASK_MAU, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc0, "sclk_mmc0", "div_mmc_pre0",
+
+ GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc_pre0",
SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc1, "sclk_mmc1", "div_mmc_pre1",
+ GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc_pre1",
SRC_MASK_FSYS, 4, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc2, "sclk_mmc2", "div_mmc_pre2",
+ GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc_pre2",
SRC_MASK_FSYS, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc3, "sclk_mmc3", "div_mmc_pre3",
+ GATE(CLK_SCLK_MMC3, "sclk_mmc3", "div_mmc_pre3",
SRC_MASK_FSYS, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_sata, "sclk_sata", "div_sata",
+ GATE(CLK_SCLK_SATA, "sclk_sata", "div_sata",
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usb3, "sclk_usb3", "div_usb3",
+ GATE(CLK_SCLK_USB3, "sclk_usb3", "div_usb3",
SRC_MASK_FSYS, 28, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_jpeg, "sclk_jpeg", "div_jpeg",
+
+ GATE(CLK_SCLK_JPEG, "sclk_jpeg", "div_jpeg",
SRC_MASK_GEN, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart0, "sclk_uart0", "div_uart0",
+
+ GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
SRC_MASK_PERIC0, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart1, "sclk_uart1", "div_uart1",
+ GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1",
SRC_MASK_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart2, "sclk_uart2", "div_uart2",
+ GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2",
SRC_MASK_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart3, "sclk_uart3", "div_uart3",
+ GATE(CLK_SCLK_UART3, "sclk_uart3", "div_uart3",
SRC_MASK_PERIC0, 12, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_pwm, "sclk_pwm", "div_pwm",
+ GATE(CLK_SCLK_PWM, "sclk_pwm", "div_pwm",
SRC_MASK_PERIC0, 24, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_audio1, "sclk_audio1", "div_audio1",
+
+ GATE(CLK_SCLK_AUDIO1, "sclk_audio1", "div_audio1",
SRC_MASK_PERIC1, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_audio2, "sclk_audio2", "div_audio2",
+ GATE(CLK_SCLK_AUDIO2, "sclk_audio2", "div_audio2",
SRC_MASK_PERIC1, 4, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spdif, "sclk_spdif", "mout_spdif",
+ GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif",
SRC_MASK_PERIC1, 4, 0, 0),
- GATE(sclk_spi0, "sclk_spi0", "div_spi_pre0",
+ GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi_pre0",
SRC_MASK_PERIC1, 16, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi1, "sclk_spi1", "div_spi_pre1",
+ GATE(CLK_SCLK_SPI1, "sclk_spi1", "div_spi_pre1",
SRC_MASK_PERIC1, 20, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi2, "sclk_spi2", "div_spi_pre2",
+ GATE(CLK_SCLK_SPI2, "sclk_spi2", "div_spi_pre2",
SRC_MASK_PERIC1, 24, CLK_SET_RATE_PARENT, 0),
- GATE(fimd1, "fimd1", "aclk200", GATE_IP_DISP1, 0, 0, 0),
- GATE(mie1, "mie1", "aclk200", GATE_IP_DISP1, 1, 0, 0),
- GATE(dsim0, "dsim0", "aclk200", GATE_IP_DISP1, 3, 0, 0),
- GATE(dp, "dp", "aclk200", GATE_IP_DISP1, 4, 0, 0),
- GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
- GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
- GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
- GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0),
- GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0),
+
+ GATE(CLK_GSCL0, "gscl0", "mout_aclk266_gscl_sub", GATE_IP_GSCL, 0, 0,
+ 0),
+ GATE(CLK_GSCL1, "gscl1", "mout_aclk266_gscl_sub", GATE_IP_GSCL, 1, 0,
+ 0),
+ GATE(CLK_GSCL2, "gscl2", "mout_aclk266_gscl_sub", GATE_IP_GSCL, 2, 0,
+ 0),
+ GATE(CLK_GSCL3, "gscl3", "mout_aclk266_gscl_sub", GATE_IP_GSCL, 3, 0,
+ 0),
+ GATE(CLK_GSCL_WA, "gscl_wa", "div_gscl_wa", GATE_IP_GSCL, 5, 0, 0),
+ GATE(CLK_GSCL_WB, "gscl_wb", "div_gscl_wb", GATE_IP_GSCL, 6, 0, 0),
+ GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 7, 0, 0),
+ GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 8, 0, 0),
+ GATE(CLK_SMMU_GSCL2, "smmu_gscl2", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 9, 0, 0),
+ GATE(CLK_SMMU_GSCL3, "smmu_gscl3", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 10, 0, 0),
+
+ GATE(CLK_FIMD1, "fimd1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 0, 0,
+ 0),
+ GATE(CLK_MIE1, "mie1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 1, 0,
+ 0),
+ GATE(CLK_DSIM0, "dsim0", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 3, 0,
+ 0),
+ GATE(CLK_DP, "dp", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 4, 0, 0),
+ GATE(CLK_MIXER, "mixer", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 5, 0,
+ 0),
+ GATE(CLK_HDMI, "hdmi", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 6, 0,
+ 0),
+
+ GATE(CLK_MFC, "mfc", "mout_aclk333_sub", GATE_IP_MFC, 0, 0, 0),
+ GATE(CLK_SMMU_MFCR, "smmu_mfcr", "mout_aclk333_sub", GATE_IP_MFC, 1, 0,
+ 0),
+ GATE(CLK_SMMU_MFCL, "smmu_mfcl", "mout_aclk333_sub", GATE_IP_MFC, 2, 0,
+ 0),
+
+ GATE(CLK_ROTATOR, "rotator", "div_aclk266", GATE_IP_GEN, 1, 0, 0),
+ GATE(CLK_JPEG, "jpeg", "div_aclk166", GATE_IP_GEN, 2, 0, 0),
+ GATE(CLK_MDMA1, "mdma1", "div_aclk266", GATE_IP_GEN, 4, 0, 0),
+ GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "div_aclk266", GATE_IP_GEN, 6, 0,
+ 0),
+ GATE(CLK_SMMU_JPEG, "smmu_jpeg", "div_aclk166", GATE_IP_GEN, 7, 0, 0),
+ GATE(CLK_SMMU_MDMA1, "smmu_mdma1", "div_aclk266", GATE_IP_GEN, 9, 0, 0),
+
+ GATE(CLK_PDMA0, "pdma0", "div_aclk200", GATE_IP_FSYS, 1, 0, 0),
+ GATE(CLK_PDMA1, "pdma1", "div_aclk200", GATE_IP_FSYS, 2, 0, 0),
+ GATE(CLK_SATA, "sata", "div_aclk200", GATE_IP_FSYS, 6, 0, 0),
+ GATE(CLK_USBOTG, "usbotg", "div_aclk200", GATE_IP_FSYS, 7, 0, 0),
+ GATE(CLK_MIPI_HSI, "mipi_hsi", "div_aclk200", GATE_IP_FSYS, 8, 0, 0),
+ GATE(CLK_SDMMC0, "sdmmc0", "div_aclk200", GATE_IP_FSYS, 12, 0, 0),
+ GATE(CLK_SDMMC1, "sdmmc1", "div_aclk200", GATE_IP_FSYS, 13, 0, 0),
+ GATE(CLK_SDMMC2, "sdmmc2", "div_aclk200", GATE_IP_FSYS, 14, 0, 0),
+ GATE(CLK_SDMMC3, "sdmmc3", "div_aclk200", GATE_IP_FSYS, 15, 0, 0),
+ GATE(CLK_SROMC, "sromc", "div_aclk200", GATE_IP_FSYS, 17, 0, 0),
+ GATE(CLK_USB2, "usb2", "div_aclk200", GATE_IP_FSYS, 18, 0, 0),
+ GATE(CLK_USB3, "usb3", "div_aclk200", GATE_IP_FSYS, 19, 0, 0),
+ GATE(CLK_SATA_PHYCTRL, "sata_phyctrl", "div_aclk200",
+ GATE_IP_FSYS, 24, 0, 0),
+ GATE(CLK_SATA_PHYI2C, "sata_phyi2c", "div_aclk200", GATE_IP_FSYS, 25, 0,
+ 0),
+
+ GATE(CLK_UART0, "uart0", "div_aclk66", GATE_IP_PERIC, 0, 0, 0),
+ GATE(CLK_UART1, "uart1", "div_aclk66", GATE_IP_PERIC, 1, 0, 0),
+ GATE(CLK_UART2, "uart2", "div_aclk66", GATE_IP_PERIC, 2, 0, 0),
+ GATE(CLK_UART3, "uart3", "div_aclk66", GATE_IP_PERIC, 3, 0, 0),
+ GATE(CLK_UART4, "uart4", "div_aclk66", GATE_IP_PERIC, 4, 0, 0),
+ GATE(CLK_I2C0, "i2c0", "div_aclk66", GATE_IP_PERIC, 6, 0, 0),
+ GATE(CLK_I2C1, "i2c1", "div_aclk66", GATE_IP_PERIC, 7, 0, 0),
+ GATE(CLK_I2C2, "i2c2", "div_aclk66", GATE_IP_PERIC, 8, 0, 0),
+ GATE(CLK_I2C3, "i2c3", "div_aclk66", GATE_IP_PERIC, 9, 0, 0),
+ GATE(CLK_I2C4, "i2c4", "div_aclk66", GATE_IP_PERIC, 10, 0, 0),
+ GATE(CLK_I2C5, "i2c5", "div_aclk66", GATE_IP_PERIC, 11, 0, 0),
+ GATE(CLK_I2C6, "i2c6", "div_aclk66", GATE_IP_PERIC, 12, 0, 0),
+ GATE(CLK_I2C7, "i2c7", "div_aclk66", GATE_IP_PERIC, 13, 0, 0),
+ GATE(CLK_I2C_HDMI, "i2c_hdmi", "div_aclk66", GATE_IP_PERIC, 14, 0, 0),
+ GATE(CLK_ADC, "adc", "div_aclk66", GATE_IP_PERIC, 15, 0, 0),
+ GATE(CLK_SPI0, "spi0", "div_aclk66", GATE_IP_PERIC, 16, 0, 0),
+ GATE(CLK_SPI1, "spi1", "div_aclk66", GATE_IP_PERIC, 17, 0, 0),
+ GATE(CLK_SPI2, "spi2", "div_aclk66", GATE_IP_PERIC, 18, 0, 0),
+ GATE(CLK_I2S1, "i2s1", "div_aclk66", GATE_IP_PERIC, 20, 0, 0),
+ GATE(CLK_I2S2, "i2s2", "div_aclk66", GATE_IP_PERIC, 21, 0, 0),
+ GATE(CLK_PCM1, "pcm1", "div_aclk66", GATE_IP_PERIC, 22, 0, 0),
+ GATE(CLK_PCM2, "pcm2", "div_aclk66", GATE_IP_PERIC, 23, 0, 0),
+ GATE(CLK_PWM, "pwm", "div_aclk66", GATE_IP_PERIC, 24, 0, 0),
+ GATE(CLK_SPDIF, "spdif", "div_aclk66", GATE_IP_PERIC, 26, 0, 0),
+ GATE(CLK_AC97, "ac97", "div_aclk66", GATE_IP_PERIC, 27, 0, 0),
+ GATE(CLK_HSI2C0, "hsi2c0", "div_aclk66", GATE_IP_PERIC, 28, 0, 0),
+ GATE(CLK_HSI2C1, "hsi2c1", "div_aclk66", GATE_IP_PERIC, 29, 0, 0),
+ GATE(CLK_HSI2C2, "hsi2c2", "div_aclk66", GATE_IP_PERIC, 30, 0, 0),
+ GATE(CLK_HSI2C3, "hsi2c3", "div_aclk66", GATE_IP_PERIC, 31, 0, 0),
+
+ GATE(CLK_CHIPID, "chipid", "div_aclk66", GATE_IP_PERIS, 0, 0, 0),
+ GATE(CLK_SYSREG, "sysreg", "div_aclk66",
+ GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PMU, "pmu", "div_aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED,
+ 0),
+ GATE(CLK_CMU_TOP, "cmu_top", "div_aclk66",
+ GATE_IP_PERIS, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CMU_CORE, "cmu_core", "div_aclk66",
+ GATE_IP_PERIS, 4, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CMU_MEM, "cmu_mem", "div_aclk66",
+ GATE_IP_PERIS, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC0, "tzpc0", "div_aclk66", GATE_IP_PERIS, 6, 0, 0),
+ GATE(CLK_TZPC1, "tzpc1", "div_aclk66", GATE_IP_PERIS, 7, 0, 0),
+ GATE(CLK_TZPC2, "tzpc2", "div_aclk66", GATE_IP_PERIS, 8, 0, 0),
+ GATE(CLK_TZPC3, "tzpc3", "div_aclk66", GATE_IP_PERIS, 9, 0, 0),
+ GATE(CLK_TZPC4, "tzpc4", "div_aclk66", GATE_IP_PERIS, 10, 0, 0),
+ GATE(CLK_TZPC5, "tzpc5", "div_aclk66", GATE_IP_PERIS, 11, 0, 0),
+ GATE(CLK_TZPC6, "tzpc6", "div_aclk66", GATE_IP_PERIS, 12, 0, 0),
+ GATE(CLK_TZPC7, "tzpc7", "div_aclk66", GATE_IP_PERIS, 13, 0, 0),
+ GATE(CLK_TZPC8, "tzpc8", "div_aclk66", GATE_IP_PERIS, 14, 0, 0),
+ GATE(CLK_TZPC9, "tzpc9", "div_aclk66", GATE_IP_PERIS, 15, 0, 0),
+ GATE(CLK_HDMI_CEC, "hdmi_cec", "div_aclk66", GATE_IP_PERIS, 16, 0, 0),
+ GATE(CLK_MCT, "mct", "div_aclk66", GATE_IP_PERIS, 18, 0, 0),
+ GATE(CLK_WDT, "wdt", "div_aclk66", GATE_IP_PERIS, 19, 0, 0),
+ GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0),
+ GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0),
};
static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
@@ -521,20 +599,41 @@ static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = {
{ },
};
+static struct samsung_pll_rate_table apll_24mhz_tbl[] __initdata = {
+ /* sorted in descending order */
+ /* PLL_35XX_RATE(rate, m, p, s) */
+ PLL_35XX_RATE(1700000000, 425, 6, 0),
+ PLL_35XX_RATE(1600000000, 200, 3, 0),
+ PLL_35XX_RATE(1500000000, 250, 4, 0),
+ PLL_35XX_RATE(1400000000, 175, 3, 0),
+ PLL_35XX_RATE(1300000000, 325, 6, 0),
+ PLL_35XX_RATE(1200000000, 200, 4, 0),
+ PLL_35XX_RATE(1100000000, 275, 6, 0),
+ PLL_35XX_RATE(1000000000, 125, 3, 0),
+ PLL_35XX_RATE(900000000, 150, 4, 0),
+ PLL_35XX_RATE(800000000, 100, 3, 0),
+ PLL_35XX_RATE(700000000, 175, 3, 1),
+ PLL_35XX_RATE(600000000, 200, 4, 1),
+ PLL_35XX_RATE(500000000, 125, 3, 1),
+ PLL_35XX_RATE(400000000, 100, 3, 1),
+ PLL_35XX_RATE(300000000, 200, 4, 2),
+ PLL_35XX_RATE(200000000, 100, 3, 2),
+};
+
static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
- [apll] = PLL_A(pll_35xx, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
- APLL_CON0, "fout_apll", NULL),
- [mpll] = PLL_A(pll_35xx, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
- MPLL_CON0, "fout_mpll", NULL),
- [bpll] = PLL(pll_35xx, fout_bpll, "fout_bpll", "fin_pll", BPLL_LOCK,
+ [apll] = PLL_A(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, "fout_apll", NULL),
+ [mpll] = PLL_A(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
+ MPLL_LOCK, MPLL_CON0, "fout_mpll", NULL),
+ [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK,
BPLL_CON0, NULL),
- [gpll] = PLL(pll_35xx, fout_gpll, "fout_gpll", "fin_pll", GPLL_LOCK,
+ [gpll] = PLL(pll_35xx, CLK_FOUT_GPLL, "fout_gpll", "fin_pll", GPLL_LOCK,
GPLL_CON0, NULL),
- [cpll] = PLL(pll_35xx, fout_cpll, "fout_cpll", "fin_pll", CPLL_LOCK,
+ [cpll] = PLL(pll_35xx, CLK_FOUT_CPLL, "fout_cpll", "fin_pll", CPLL_LOCK,
CPLL_CON0, NULL),
- [epll] = PLL(pll_36xx, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+ [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", EPLL_LOCK,
EPLL_CON0, NULL),
- [vpll] = PLL(pll_36xx, fout_vpll, "fout_vpll", "mout_vpllsrc",
+ [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "mout_vpllsrc",
VPLL_LOCK, VPLL_CON0, NULL),
};
@@ -556,7 +655,7 @@ static void __init exynos5250_clk_init(struct device_node *np)
panic("%s: unable to determine soc\n", __func__);
}
- samsung_clk_init(np, reg_base, nr_clks,
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS,
exynos5250_clk_regs, ARRAY_SIZE(exynos5250_clk_regs),
NULL, 0);
samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks,
@@ -565,8 +664,10 @@ static void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_register_mux(exynos5250_pll_pmux_clks,
ARRAY_SIZE(exynos5250_pll_pmux_clks));
- if (_get_rate("fin_pll") == 24 * MHZ)
+ if (_get_rate("fin_pll") == 24 * MHZ) {
exynos5250_plls[epll].rate_table = epll_24mhz_tbl;
+ exynos5250_plls[apll].rate_table = apll_24mhz_tbl;
+ }
if (_get_rate("mout_vpllsrc") == 24 * MHZ)
exynos5250_plls[vpll].rate_table = vpll_24mhz_tbl;
@@ -585,6 +686,6 @@ static void __init exynos5250_clk_init(struct device_node *np)
ARRAY_SIZE(exynos5250_gate_clks));
pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
- _get_rate("armclk"));
+ _get_rate("div_arm2"));
}
CLK_OF_DECLARE(exynos5250_clk, "samsung,exynos5250-clock", exynos5250_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 48c4a9350b91..ab4f2f7d88ef 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -10,6 +10,7 @@
* Common Clock Framework support for Exynos5420 SoC.
*/
+#include <dt-bindings/clock/exynos5420.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
@@ -107,48 +108,6 @@ enum exynos5420_plls {
nr_plls /* number of PLLs */
};
-enum exynos5420_clks {
- none,
-
- /* core clocks */
- fin_pll, fout_apll, fout_cpll, fout_dpll, fout_epll, fout_rpll,
- fout_ipll, fout_spll, fout_vpll, fout_mpll, fout_bpll, fout_kpll,
-
- /* gate for special clocks (sclk) */
- sclk_uart0 = 128, sclk_uart1, sclk_uart2, sclk_uart3, sclk_mmc0,
- sclk_mmc1, sclk_mmc2, sclk_spi0, sclk_spi1, sclk_spi2, sclk_i2s1,
- sclk_i2s2, sclk_pcm1, sclk_pcm2, sclk_spdif, sclk_hdmi, sclk_pixel,
- sclk_dp1, sclk_mipi1, sclk_fimd1, sclk_maudio0, sclk_maupcm0,
- sclk_usbd300, sclk_usbd301, sclk_usbphy300, sclk_usbphy301, sclk_unipro,
- sclk_pwm, sclk_gscl_wa, sclk_gscl_wb, sclk_hdmiphy,
-
- /* gate clocks */
- aclk66_peric = 256, uart0, uart1, uart2, uart3, i2c0, i2c1, i2c2, i2c3,
- i2c4, i2c5, i2c6, i2c7, i2c_hdmi, tsadc, spi0, spi1, spi2, keyif, i2s1,
- i2s2, pcm1, pcm2, pwm, spdif, i2c8, i2c9, i2c10, aclk66_psgen = 300,
- chipid, sysreg, tzpc0, tzpc1, tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7,
- tzpc8, tzpc9, hdmi_cec, seckey, mct, wdt, rtc, tmu, tmu_gpu,
- pclk66_gpio = 330, aclk200_fsys2 = 350, mmc0, mmc1, mmc2, sromc, ufs,
- aclk200_fsys = 360, tsi, pdma0, pdma1, rtic, usbh20, usbd300, usbd301,
- aclk400_mscl = 380, mscl0, mscl1, mscl2, smmu_mscl0, smmu_mscl1,
- smmu_mscl2, aclk333 = 400, mfc, smmu_mfcl, smmu_mfcr,
- aclk200_disp1 = 410, dsim1, dp1, hdmi, aclk300_disp1 = 420, fimd1,
- smmu_fimd1, aclk166 = 430, mixer, aclk266 = 440, rotator, mdma1,
- smmu_rotator, smmu_mdma1, aclk300_jpeg = 450, jpeg, jpeg2, smmu_jpeg,
- aclk300_gscl = 460, smmu_gscl0, smmu_gscl1, gscl_wa, gscl_wb, gscl0,
- gscl1, clk_3aa, aclk266_g2d = 470, sss, slim_sss, mdma0,
- aclk333_g2d = 480, g2d, aclk333_432_gscl = 490, smmu_3aa, smmu_fimcl0,
- smmu_fimcl1, smmu_fimcl3, fimc_lite3, aclk_g3d = 500, g3d, smmu_mixer,
-
- /* mux clocks */
- mout_hdmi = 640,
-
- /* divider clocks */
- dout_pixel = 768,
-
- nr_clks,
-};
-
/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
@@ -298,225 +257,226 @@ PNAME(maudio0_p) = { "fin_pll", "maudio_clk", "sclk_dpll", "sclk_mpll",
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos5420_fixed_rate_ext_clks[] __initdata = {
- FRATE(fin_pll, "fin_pll", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_FIN_PLL, "fin_pll", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata = {
- FRATE(sclk_hdmiphy, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(none, "sclk_pwi", NULL, CLK_IS_ROOT, 24000000),
- FRATE(none, "sclk_usbh20", NULL, CLK_IS_ROOT, 48000000),
- FRATE(none, "mphy_refclk_ixtal24", NULL, CLK_IS_ROOT, 48000000),
- FRATE(none, "sclk_usbh20_scan_clk", NULL, CLK_IS_ROOT, 480000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_pwi", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_usbh20", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "mphy_refclk_ixtal24", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "sclk_usbh20_scan_clk", NULL, CLK_IS_ROOT, 480000000),
};
static struct samsung_fixed_factor_clock exynos5420_fixed_factor_clks[] __initdata = {
- FFACTOR(none, "sclk_hsic_12m", "fin_pll", 1, 2, 0),
+ FFACTOR(0, "sclk_hsic_12m", "fin_pll", 1, 2, 0),
};
static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
- MUX(none, "mout_mspll_kfc", mspll_cpu_p, SRC_TOP7, 8, 2),
- MUX(none, "mout_mspll_cpu", mspll_cpu_p, SRC_TOP7, 12, 2),
- MUX(none, "mout_apll", apll_p, SRC_CPU, 0, 1),
- MUX(none, "mout_cpu", cpu_p, SRC_CPU, 16, 1),
- MUX(none, "mout_kpll", kpll_p, SRC_KFC, 0, 1),
- MUX(none, "mout_cpu_kfc", kfc_p, SRC_KFC, 16, 1),
+ MUX(0, "mout_mspll_kfc", mspll_cpu_p, SRC_TOP7, 8, 2),
+ MUX(0, "mout_mspll_cpu", mspll_cpu_p, SRC_TOP7, 12, 2),
+ MUX(0, "mout_apll", apll_p, SRC_CPU, 0, 1),
+ MUX(0, "mout_cpu", cpu_p, SRC_CPU, 16, 1),
+ MUX(0, "mout_kpll", kpll_p, SRC_KFC, 0, 1),
+ MUX(0, "mout_cpu_kfc", kfc_p, SRC_KFC, 16, 1),
- MUX(none, "sclk_bpll", bpll_p, SRC_CDREX, 0, 1),
+ MUX(0, "sclk_bpll", bpll_p, SRC_CDREX, 0, 1),
- MUX_A(none, "mout_aclk400_mscl", group1_p,
+ MUX_A(0, "mout_aclk400_mscl", group1_p,
SRC_TOP0, 4, 2, "aclk400_mscl"),
- MUX(none, "mout_aclk200", group1_p, SRC_TOP0, 8, 2),
- MUX(none, "mout_aclk200_fsys2", group1_p, SRC_TOP0, 12, 2),
- MUX(none, "mout_aclk200_fsys", group1_p, SRC_TOP0, 28, 2),
-
- MUX(none, "mout_aclk333_432_gscl", group4_p, SRC_TOP1, 0, 2),
- MUX(none, "mout_aclk66", group1_p, SRC_TOP1, 8, 2),
- MUX(none, "mout_aclk266", group1_p, SRC_TOP1, 20, 2),
- MUX(none, "mout_aclk166", group1_p, SRC_TOP1, 24, 2),
- MUX(none, "mout_aclk333", group1_p, SRC_TOP1, 28, 2),
-
- MUX(none, "mout_aclk333_g2d", group1_p, SRC_TOP2, 8, 2),
- MUX(none, "mout_aclk266_g2d", group1_p, SRC_TOP2, 12, 2),
- MUX(none, "mout_aclk_g3d", group5_p, SRC_TOP2, 16, 1),
- MUX(none, "mout_aclk300_jpeg", group1_p, SRC_TOP2, 20, 2),
- MUX(none, "mout_aclk300_disp1", group1_p, SRC_TOP2, 24, 2),
- MUX(none, "mout_aclk300_gscl", group1_p, SRC_TOP2, 28, 2),
-
- MUX(none, "mout_user_aclk400_mscl", user_aclk400_mscl_p,
+ MUX(0, "mout_aclk200", group1_p, SRC_TOP0, 8, 2),
+ MUX(0, "mout_aclk200_fsys2", group1_p, SRC_TOP0, 12, 2),
+ MUX(0, "mout_aclk200_fsys", group1_p, SRC_TOP0, 28, 2),
+
+ MUX(0, "mout_aclk333_432_gscl", group4_p, SRC_TOP1, 0, 2),
+ MUX(0, "mout_aclk66", group1_p, SRC_TOP1, 8, 2),
+ MUX(0, "mout_aclk266", group1_p, SRC_TOP1, 20, 2),
+ MUX(0, "mout_aclk166", group1_p, SRC_TOP1, 24, 2),
+ MUX(0, "mout_aclk333", group1_p, SRC_TOP1, 28, 2),
+
+ MUX(0, "mout_aclk333_g2d", group1_p, SRC_TOP2, 8, 2),
+ MUX(0, "mout_aclk266_g2d", group1_p, SRC_TOP2, 12, 2),
+ MUX(0, "mout_aclk_g3d", group5_p, SRC_TOP2, 16, 1),
+ MUX(0, "mout_aclk300_jpeg", group1_p, SRC_TOP2, 20, 2),
+ MUX(0, "mout_aclk300_disp1", group1_p, SRC_TOP2, 24, 2),
+ MUX(0, "mout_aclk300_gscl", group1_p, SRC_TOP2, 28, 2),
+
+ MUX(0, "mout_user_aclk400_mscl", user_aclk400_mscl_p,
SRC_TOP3, 4, 1),
- MUX_A(none, "mout_aclk200_disp1", aclk200_disp1_p,
+ MUX_A(0, "mout_aclk200_disp1", aclk200_disp1_p,
SRC_TOP3, 8, 1, "aclk200_disp1"),
- MUX(none, "mout_user_aclk200_fsys2", user_aclk200_fsys2_p,
+ MUX(0, "mout_user_aclk200_fsys2", user_aclk200_fsys2_p,
SRC_TOP3, 12, 1),
- MUX(none, "mout_user_aclk200_fsys", user_aclk200_fsys_p,
+ MUX(0, "mout_user_aclk200_fsys", user_aclk200_fsys_p,
SRC_TOP3, 28, 1),
- MUX(none, "mout_user_aclk333_432_gscl", user_aclk333_432_gscl_p,
+ MUX(0, "mout_user_aclk333_432_gscl", user_aclk333_432_gscl_p,
SRC_TOP4, 0, 1),
- MUX(none, "mout_aclk66_peric", aclk66_peric_p, SRC_TOP4, 8, 1),
- MUX(none, "mout_user_aclk266", user_aclk266_p, SRC_TOP4, 20, 1),
- MUX(none, "mout_user_aclk166", user_aclk166_p, SRC_TOP4, 24, 1),
- MUX(none, "mout_user_aclk333", user_aclk333_p, SRC_TOP4, 28, 1),
-
- MUX(none, "mout_aclk66_psgen", aclk66_peric_p, SRC_TOP5, 4, 1),
- MUX(none, "mout_user_aclk333_g2d", user_aclk333_g2d_p, SRC_TOP5, 8, 1),
- MUX(none, "mout_user_aclk266_g2d", user_aclk266_g2d_p, SRC_TOP5, 12, 1),
- MUX_A(none, "mout_user_aclk_g3d", user_aclk_g3d_p,
+ MUX(0, "mout_aclk66_peric", aclk66_peric_p, SRC_TOP4, 8, 1),
+ MUX(0, "mout_user_aclk266", user_aclk266_p, SRC_TOP4, 20, 1),
+ MUX(0, "mout_user_aclk166", user_aclk166_p, SRC_TOP4, 24, 1),
+ MUX(0, "mout_user_aclk333", user_aclk333_p, SRC_TOP4, 28, 1),
+
+ MUX(0, "mout_aclk66_psgen", aclk66_peric_p, SRC_TOP5, 4, 1),
+ MUX(0, "mout_user_aclk333_g2d", user_aclk333_g2d_p, SRC_TOP5, 8, 1),
+ MUX(0, "mout_user_aclk266_g2d", user_aclk266_g2d_p, SRC_TOP5, 12, 1),
+ MUX_A(0, "mout_user_aclk_g3d", user_aclk_g3d_p,
SRC_TOP5, 16, 1, "aclkg3d"),
- MUX(none, "mout_user_aclk300_jpeg", user_aclk300_jpeg_p,
+ MUX(0, "mout_user_aclk300_jpeg", user_aclk300_jpeg_p,
SRC_TOP5, 20, 1),
- MUX(none, "mout_user_aclk300_disp1", user_aclk300_disp1_p,
+ MUX(0, "mout_user_aclk300_disp1", user_aclk300_disp1_p,
SRC_TOP5, 24, 1),
- MUX(none, "mout_user_aclk300_gscl", user_aclk300_gscl_p,
+ MUX(0, "mout_user_aclk300_gscl", user_aclk300_gscl_p,
SRC_TOP5, 28, 1),
- MUX(none, "sclk_mpll", mpll_p, SRC_TOP6, 0, 1),
- MUX(none, "sclk_vpll", vpll_p, SRC_TOP6, 4, 1),
- MUX(none, "sclk_spll", spll_p, SRC_TOP6, 8, 1),
- MUX(none, "sclk_ipll", ipll_p, SRC_TOP6, 12, 1),
- MUX(none, "sclk_rpll", rpll_p, SRC_TOP6, 16, 1),
- MUX(none, "sclk_epll", epll_p, SRC_TOP6, 20, 1),
- MUX(none, "sclk_dpll", dpll_p, SRC_TOP6, 24, 1),
- MUX(none, "sclk_cpll", cpll_p, SRC_TOP6, 28, 1),
-
- MUX(none, "mout_sw_aclk400_mscl", sw_aclk400_mscl_p, SRC_TOP10, 4, 1),
- MUX(none, "mout_sw_aclk200", sw_aclk200_p, SRC_TOP10, 8, 1),
- MUX(none, "mout_sw_aclk200_fsys2", sw_aclk200_fsys2_p,
+ MUX(0, "sclk_mpll", mpll_p, SRC_TOP6, 0, 1),
+ MUX(0, "sclk_vpll", vpll_p, SRC_TOP6, 4, 1),
+ MUX(0, "sclk_spll", spll_p, SRC_TOP6, 8, 1),
+ MUX(0, "sclk_ipll", ipll_p, SRC_TOP6, 12, 1),
+ MUX(0, "sclk_rpll", rpll_p, SRC_TOP6, 16, 1),
+ MUX(0, "sclk_epll", epll_p, SRC_TOP6, 20, 1),
+ MUX(0, "sclk_dpll", dpll_p, SRC_TOP6, 24, 1),
+ MUX(0, "sclk_cpll", cpll_p, SRC_TOP6, 28, 1),
+
+ MUX(0, "mout_sw_aclk400_mscl", sw_aclk400_mscl_p, SRC_TOP10, 4, 1),
+ MUX(0, "mout_sw_aclk200", sw_aclk200_p, SRC_TOP10, 8, 1),
+ MUX(0, "mout_sw_aclk200_fsys2", sw_aclk200_fsys2_p,
SRC_TOP10, 12, 1),
- MUX(none, "mout_sw_aclk200_fsys", sw_aclk200_fsys_p, SRC_TOP10, 28, 1),
+ MUX(0, "mout_sw_aclk200_fsys", sw_aclk200_fsys_p, SRC_TOP10, 28, 1),
- MUX(none, "mout_sw_aclk333_432_gscl", sw_aclk333_432_gscl_p,
+ MUX(0, "mout_sw_aclk333_432_gscl", sw_aclk333_432_gscl_p,
SRC_TOP11, 0, 1),
- MUX(none, "mout_sw_aclk66", sw_aclk66_p, SRC_TOP11, 8, 1),
- MUX(none, "mout_sw_aclk266", sw_aclk266_p, SRC_TOP11, 20, 1),
- MUX(none, "mout_sw_aclk166", sw_aclk166_p, SRC_TOP11, 24, 1),
- MUX(none, "mout_sw_aclk333", sw_aclk333_p, SRC_TOP11, 28, 1),
-
- MUX(none, "mout_sw_aclk333_g2d", sw_aclk333_g2d_p, SRC_TOP12, 8, 1),
- MUX(none, "mout_sw_aclk266_g2d", sw_aclk266_g2d_p, SRC_TOP12, 12, 1),
- MUX(none, "mout_sw_aclk_g3d", sw_aclk_g3d_p, SRC_TOP12, 16, 1),
- MUX(none, "mout_sw_aclk300_jpeg", sw_aclk300_jpeg_p, SRC_TOP12, 20, 1),
- MUX(none, "mout_sw_aclk300_disp1", sw_aclk300_disp1_p,
+ MUX(0, "mout_sw_aclk66", sw_aclk66_p, SRC_TOP11, 8, 1),
+ MUX(0, "mout_sw_aclk266", sw_aclk266_p, SRC_TOP11, 20, 1),
+ MUX(0, "mout_sw_aclk166", sw_aclk166_p, SRC_TOP11, 24, 1),
+ MUX(0, "mout_sw_aclk333", sw_aclk333_p, SRC_TOP11, 28, 1),
+
+ MUX(0, "mout_sw_aclk333_g2d", sw_aclk333_g2d_p, SRC_TOP12, 8, 1),
+ MUX(0, "mout_sw_aclk266_g2d", sw_aclk266_g2d_p, SRC_TOP12, 12, 1),
+ MUX(0, "mout_sw_aclk_g3d", sw_aclk_g3d_p, SRC_TOP12, 16, 1),
+ MUX(0, "mout_sw_aclk300_jpeg", sw_aclk300_jpeg_p, SRC_TOP12, 20, 1),
+ MUX(0, "mout_sw_aclk300_disp1", sw_aclk300_disp1_p,
SRC_TOP12, 24, 1),
- MUX(none, "mout_sw_aclk300_gscl", sw_aclk300_gscl_p, SRC_TOP12, 28, 1),
+ MUX(0, "mout_sw_aclk300_gscl", sw_aclk300_gscl_p, SRC_TOP12, 28, 1),
/* DISP1 Block */
- MUX(none, "mout_fimd1", group3_p, SRC_DISP10, 4, 1),
- MUX(none, "mout_mipi1", group2_p, SRC_DISP10, 16, 3),
- MUX(none, "mout_dp1", group2_p, SRC_DISP10, 20, 3),
- MUX(none, "mout_pixel", group2_p, SRC_DISP10, 24, 3),
- MUX(mout_hdmi, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1),
+ MUX(0, "mout_fimd1", group3_p, SRC_DISP10, 4, 1),
+ MUX(0, "mout_mipi1", group2_p, SRC_DISP10, 16, 3),
+ MUX(0, "mout_dp1", group2_p, SRC_DISP10, 20, 3),
+ MUX(0, "mout_pixel", group2_p, SRC_DISP10, 24, 3),
+ MUX(CLK_MOUT_HDMI, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1),
/* MAU Block */
- MUX(none, "mout_maudio0", maudio0_p, SRC_MAU, 28, 3),
+ MUX(0, "mout_maudio0", maudio0_p, SRC_MAU, 28, 3),
/* FSYS Block */
- MUX(none, "mout_usbd301", group2_p, SRC_FSYS, 4, 3),
- MUX(none, "mout_mmc0", group2_p, SRC_FSYS, 8, 3),
- MUX(none, "mout_mmc1", group2_p, SRC_FSYS, 12, 3),
- MUX(none, "mout_mmc2", group2_p, SRC_FSYS, 16, 3),
- MUX(none, "mout_usbd300", group2_p, SRC_FSYS, 20, 3),
- MUX(none, "mout_unipro", group2_p, SRC_FSYS, 24, 3),
+ MUX(0, "mout_usbd301", group2_p, SRC_FSYS, 4, 3),
+ MUX(0, "mout_mmc0", group2_p, SRC_FSYS, 8, 3),
+ MUX(0, "mout_mmc1", group2_p, SRC_FSYS, 12, 3),
+ MUX(0, "mout_mmc2", group2_p, SRC_FSYS, 16, 3),
+ MUX(0, "mout_usbd300", group2_p, SRC_FSYS, 20, 3),
+ MUX(0, "mout_unipro", group2_p, SRC_FSYS, 24, 3),
/* PERIC Block */
- MUX(none, "mout_uart0", group2_p, SRC_PERIC0, 4, 3),
- MUX(none, "mout_uart1", group2_p, SRC_PERIC0, 8, 3),
- MUX(none, "mout_uart2", group2_p, SRC_PERIC0, 12, 3),
- MUX(none, "mout_uart3", group2_p, SRC_PERIC0, 16, 3),
- MUX(none, "mout_pwm", group2_p, SRC_PERIC0, 24, 3),
- MUX(none, "mout_spdif", spdif_p, SRC_PERIC0, 28, 3),
- MUX(none, "mout_audio0", audio0_p, SRC_PERIC1, 8, 3),
- MUX(none, "mout_audio1", audio1_p, SRC_PERIC1, 12, 3),
- MUX(none, "mout_audio2", audio2_p, SRC_PERIC1, 16, 3),
- MUX(none, "mout_spi0", group2_p, SRC_PERIC1, 20, 3),
- MUX(none, "mout_spi1", group2_p, SRC_PERIC1, 24, 3),
- MUX(none, "mout_spi2", group2_p, SRC_PERIC1, 28, 3),
+ MUX(0, "mout_uart0", group2_p, SRC_PERIC0, 4, 3),
+ MUX(0, "mout_uart1", group2_p, SRC_PERIC0, 8, 3),
+ MUX(0, "mout_uart2", group2_p, SRC_PERIC0, 12, 3),
+ MUX(0, "mout_uart3", group2_p, SRC_PERIC0, 16, 3),
+ MUX(0, "mout_pwm", group2_p, SRC_PERIC0, 24, 3),
+ MUX(0, "mout_spdif", spdif_p, SRC_PERIC0, 28, 3),
+ MUX(0, "mout_audio0", audio0_p, SRC_PERIC1, 8, 3),
+ MUX(0, "mout_audio1", audio1_p, SRC_PERIC1, 12, 3),
+ MUX(0, "mout_audio2", audio2_p, SRC_PERIC1, 16, 3),
+ MUX(0, "mout_spi0", group2_p, SRC_PERIC1, 20, 3),
+ MUX(0, "mout_spi1", group2_p, SRC_PERIC1, 24, 3),
+ MUX(0, "mout_spi2", group2_p, SRC_PERIC1, 28, 3),
};
static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
- DIV(none, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
- DIV(none, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
- DIV(none, "armclk2", "div_arm", DIV_CPU0, 28, 3),
- DIV(none, "div_kfc", "mout_cpu_kfc", DIV_KFC0, 0, 3),
- DIV(none, "sclk_kpll", "mout_kpll", DIV_KFC0, 24, 3),
-
- DIV(none, "dout_aclk400_mscl", "mout_aclk400_mscl", DIV_TOP0, 4, 3),
- DIV(none, "dout_aclk200", "mout_aclk200", DIV_TOP0, 8, 3),
- DIV(none, "dout_aclk200_fsys2", "mout_aclk200_fsys2", DIV_TOP0, 12, 3),
- DIV(none, "dout_pclk200_fsys", "mout_pclk200_fsys", DIV_TOP0, 24, 3),
- DIV(none, "dout_aclk200_fsys", "mout_aclk200_fsys", DIV_TOP0, 28, 3),
-
- DIV(none, "dout_aclk333_432_gscl", "mout_aclk333_432_gscl",
+ DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
+ DIV(0, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
+ DIV(0, "armclk2", "div_arm", DIV_CPU0, 28, 3),
+ DIV(0, "div_kfc", "mout_cpu_kfc", DIV_KFC0, 0, 3),
+ DIV(0, "sclk_kpll", "mout_kpll", DIV_KFC0, 24, 3),
+
+ DIV(0, "dout_aclk400_mscl", "mout_aclk400_mscl", DIV_TOP0, 4, 3),
+ DIV(0, "dout_aclk200", "mout_aclk200", DIV_TOP0, 8, 3),
+ DIV(0, "dout_aclk200_fsys2", "mout_aclk200_fsys2", DIV_TOP0, 12, 3),
+ DIV(0, "dout_pclk200_fsys", "mout_pclk200_fsys", DIV_TOP0, 24, 3),
+ DIV(0, "dout_aclk200_fsys", "mout_aclk200_fsys", DIV_TOP0, 28, 3),
+
+ DIV(0, "dout_aclk333_432_gscl", "mout_aclk333_432_gscl",
DIV_TOP1, 0, 3),
- DIV(none, "dout_aclk66", "mout_aclk66", DIV_TOP1, 8, 6),
- DIV(none, "dout_aclk266", "mout_aclk266", DIV_TOP1, 20, 3),
- DIV(none, "dout_aclk166", "mout_aclk166", DIV_TOP1, 24, 3),
- DIV(none, "dout_aclk333", "mout_aclk333", DIV_TOP1, 28, 3),
-
- DIV(none, "dout_aclk333_g2d", "mout_aclk333_g2d", DIV_TOP2, 8, 3),
- DIV(none, "dout_aclk266_g2d", "mout_aclk266_g2d", DIV_TOP2, 12, 3),
- DIV(none, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2, 16, 3),
- DIV(none, "dout_aclk300_jpeg", "mout_aclk300_jpeg", DIV_TOP2, 20, 3),
- DIV_A(none, "dout_aclk300_disp1", "mout_aclk300_disp1",
+ DIV(0, "dout_aclk66", "mout_aclk66", DIV_TOP1, 8, 6),
+ DIV(0, "dout_aclk266", "mout_aclk266", DIV_TOP1, 20, 3),
+ DIV(0, "dout_aclk166", "mout_aclk166", DIV_TOP1, 24, 3),
+ DIV(0, "dout_aclk333", "mout_aclk333", DIV_TOP1, 28, 3),
+
+ DIV(0, "dout_aclk333_g2d", "mout_aclk333_g2d", DIV_TOP2, 8, 3),
+ DIV(0, "dout_aclk266_g2d", "mout_aclk266_g2d", DIV_TOP2, 12, 3),
+ DIV(0, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2, 16, 3),
+ DIV(0, "dout_aclk300_jpeg", "mout_aclk300_jpeg", DIV_TOP2, 20, 3),
+ DIV_A(0, "dout_aclk300_disp1", "mout_aclk300_disp1",
DIV_TOP2, 24, 3, "aclk300_disp1"),
- DIV(none, "dout_aclk300_gscl", "mout_aclk300_gscl", DIV_TOP2, 28, 3),
+ DIV(0, "dout_aclk300_gscl", "mout_aclk300_gscl", DIV_TOP2, 28, 3),
/* DISP1 Block */
- DIV(none, "dout_fimd1", "mout_fimd1", DIV_DISP10, 0, 4),
- DIV(none, "dout_mipi1", "mout_mipi1", DIV_DISP10, 16, 8),
- DIV(none, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4),
- DIV(dout_pixel, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
+ DIV(0, "dout_fimd1", "mout_fimd1", DIV_DISP10, 0, 4),
+ DIV(0, "dout_mipi1", "mout_mipi1", DIV_DISP10, 16, 8),
+ DIV(0, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4),
+ DIV(CLK_DOUT_PIXEL, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
/* Audio Block */
- DIV(none, "dout_maudio0", "mout_maudio0", DIV_MAU, 20, 4),
- DIV(none, "dout_maupcm0", "dout_maudio0", DIV_MAU, 24, 8),
+ DIV(0, "dout_maudio0", "mout_maudio0", DIV_MAU, 20, 4),
+ DIV(0, "dout_maupcm0", "dout_maudio0", DIV_MAU, 24, 8),
/* USB3.0 */
- DIV(none, "dout_usbphy301", "mout_usbd301", DIV_FSYS0, 12, 4),
- DIV(none, "dout_usbphy300", "mout_usbd300", DIV_FSYS0, 16, 4),
- DIV(none, "dout_usbd301", "mout_usbd301", DIV_FSYS0, 20, 4),
- DIV(none, "dout_usbd300", "mout_usbd300", DIV_FSYS0, 24, 4),
+ DIV(0, "dout_usbphy301", "mout_usbd301", DIV_FSYS0, 12, 4),
+ DIV(0, "dout_usbphy300", "mout_usbd300", DIV_FSYS0, 16, 4),
+ DIV(0, "dout_usbd301", "mout_usbd301", DIV_FSYS0, 20, 4),
+ DIV(0, "dout_usbd300", "mout_usbd300", DIV_FSYS0, 24, 4),
/* MMC */
- DIV(none, "dout_mmc0", "mout_mmc0", DIV_FSYS1, 0, 10),
- DIV(none, "dout_mmc1", "mout_mmc1", DIV_FSYS1, 10, 10),
- DIV(none, "dout_mmc2", "mout_mmc2", DIV_FSYS1, 20, 10),
+ DIV(0, "dout_mmc0", "mout_mmc0", DIV_FSYS1, 0, 10),
+ DIV(0, "dout_mmc1", "mout_mmc1", DIV_FSYS1, 10, 10),
+ DIV(0, "dout_mmc2", "mout_mmc2", DIV_FSYS1, 20, 10),
- DIV(none, "dout_unipro", "mout_unipro", DIV_FSYS2, 24, 8),
+ DIV(0, "dout_unipro", "mout_unipro", DIV_FSYS2, 24, 8),
/* UART and PWM */
- DIV(none, "dout_uart0", "mout_uart0", DIV_PERIC0, 8, 4),
- DIV(none, "dout_uart1", "mout_uart1", DIV_PERIC0, 12, 4),
- DIV(none, "dout_uart2", "mout_uart2", DIV_PERIC0, 16, 4),
- DIV(none, "dout_uart3", "mout_uart3", DIV_PERIC0, 20, 4),
- DIV(none, "dout_pwm", "mout_pwm", DIV_PERIC0, 28, 4),
+ DIV(0, "dout_uart0", "mout_uart0", DIV_PERIC0, 8, 4),
+ DIV(0, "dout_uart1", "mout_uart1", DIV_PERIC0, 12, 4),
+ DIV(0, "dout_uart2", "mout_uart2", DIV_PERIC0, 16, 4),
+ DIV(0, "dout_uart3", "mout_uart3", DIV_PERIC0, 20, 4),
+ DIV(0, "dout_pwm", "mout_pwm", DIV_PERIC0, 28, 4),
/* SPI */
- DIV(none, "dout_spi0", "mout_spi0", DIV_PERIC1, 20, 4),
- DIV(none, "dout_spi1", "mout_spi1", DIV_PERIC1, 24, 4),
- DIV(none, "dout_spi2", "mout_spi2", DIV_PERIC1, 28, 4),
+ DIV(0, "dout_spi0", "mout_spi0", DIV_PERIC1, 20, 4),
+ DIV(0, "dout_spi1", "mout_spi1", DIV_PERIC1, 24, 4),
+ DIV(0, "dout_spi2", "mout_spi2", DIV_PERIC1, 28, 4),
/* PCM */
- DIV(none, "dout_pcm1", "dout_audio1", DIV_PERIC2, 16, 8),
- DIV(none, "dout_pcm2", "dout_audio2", DIV_PERIC2, 24, 8),
+ DIV(0, "dout_pcm1", "dout_audio1", DIV_PERIC2, 16, 8),
+ DIV(0, "dout_pcm2", "dout_audio2", DIV_PERIC2, 24, 8),
/* Audio - I2S */
- DIV(none, "dout_i2s1", "dout_audio1", DIV_PERIC3, 6, 6),
- DIV(none, "dout_i2s2", "dout_audio2", DIV_PERIC3, 12, 6),
- DIV(none, "dout_audio0", "mout_audio0", DIV_PERIC3, 20, 4),
- DIV(none, "dout_audio1", "mout_audio1", DIV_PERIC3, 24, 4),
- DIV(none, "dout_audio2", "mout_audio2", DIV_PERIC3, 28, 4),
+ DIV(0, "dout_i2s1", "dout_audio1", DIV_PERIC3, 6, 6),
+ DIV(0, "dout_i2s2", "dout_audio2", DIV_PERIC3, 12, 6),
+ DIV(0, "dout_audio0", "mout_audio0", DIV_PERIC3, 20, 4),
+ DIV(0, "dout_audio1", "mout_audio1", DIV_PERIC3, 24, 4),
+ DIV(0, "dout_audio2", "mout_audio2", DIV_PERIC3, 28, 4),
/* SPI Pre-Ratio */
- DIV(none, "dout_pre_spi0", "dout_spi0", DIV_PERIC4, 8, 8),
- DIV(none, "dout_pre_spi1", "dout_spi1", DIV_PERIC4, 16, 8),
- DIV(none, "dout_pre_spi2", "dout_spi2", DIV_PERIC4, 24, 8),
+ DIV(0, "dout_pre_spi0", "dout_spi0", DIV_PERIC4, 8, 8),
+ DIV(0, "dout_pre_spi1", "dout_spi1", DIV_PERIC4, 16, 8),
+ DIV(0, "dout_pre_spi2", "dout_spi2", DIV_PERIC4, 24, 8),
};
static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
/* TODO: Re-verify the CG bits for all the gate clocks */
- GATE_A(mct, "pclk_st", "aclk66_psgen", GATE_BUS_PERIS1, 2, 0, 0, "mct"),
+ GATE_A(CLK_MCT, "pclk_st", "aclk66_psgen", GATE_BUS_PERIS1, 2, 0, 0,
+ "mct"),
GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
@@ -545,217 +505,227 @@ static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
/* sclk */
- GATE(sclk_uart0, "sclk_uart0", "dout_uart0",
+ GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_uart0",
GATE_TOP_SCLK_PERIC, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart1, "sclk_uart1", "dout_uart1",
+ GATE(CLK_SCLK_UART1, "sclk_uart1", "dout_uart1",
GATE_TOP_SCLK_PERIC, 1, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart2, "sclk_uart2", "dout_uart2",
+ GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_uart2",
GATE_TOP_SCLK_PERIC, 2, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_uart3, "sclk_uart3", "dout_uart3",
+ GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_uart3",
GATE_TOP_SCLK_PERIC, 3, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi0, "sclk_spi0", "dout_pre_spi0",
+ GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_pre_spi0",
GATE_TOP_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi1, "sclk_spi1", "dout_pre_spi1",
+ GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_pre_spi1",
GATE_TOP_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spi2, "sclk_spi2", "dout_pre_spi2",
+ GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_pre_spi2",
GATE_TOP_SCLK_PERIC, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_spdif, "sclk_spdif", "mout_spdif",
+ GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif",
GATE_TOP_SCLK_PERIC, 9, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_pwm, "sclk_pwm", "dout_pwm",
+ GATE(CLK_SCLK_PWM, "sclk_pwm", "dout_pwm",
GATE_TOP_SCLK_PERIC, 11, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_pcm1, "sclk_pcm1", "dout_pcm1",
+ GATE(CLK_SCLK_PCM1, "sclk_pcm1", "dout_pcm1",
GATE_TOP_SCLK_PERIC, 15, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_pcm2, "sclk_pcm2", "dout_pcm2",
+ GATE(CLK_SCLK_PCM2, "sclk_pcm2", "dout_pcm2",
GATE_TOP_SCLK_PERIC, 16, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_i2s1, "sclk_i2s1", "dout_i2s1",
+ GATE(CLK_SCLK_I2S1, "sclk_i2s1", "dout_i2s1",
GATE_TOP_SCLK_PERIC, 17, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_i2s2, "sclk_i2s2", "dout_i2s2",
+ GATE(CLK_SCLK_I2S2, "sclk_i2s2", "dout_i2s2",
GATE_TOP_SCLK_PERIC, 18, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc0, "sclk_mmc0", "dout_mmc0",
+ GATE(CLK_SCLK_MMC0, "sclk_mmc0", "dout_mmc0",
GATE_TOP_SCLK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc1, "sclk_mmc1", "dout_mmc1",
+ GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_mmc1",
GATE_TOP_SCLK_FSYS, 1, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mmc2, "sclk_mmc2", "dout_mmc2",
+ GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_mmc2",
GATE_TOP_SCLK_FSYS, 2, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usbphy301, "sclk_usbphy301", "dout_usbphy301",
+ GATE(CLK_SCLK_USBPHY301, "sclk_usbphy301", "dout_usbphy301",
GATE_TOP_SCLK_FSYS, 7, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usbphy300, "sclk_usbphy300", "dout_usbphy300",
+ GATE(CLK_SCLK_USBPHY300, "sclk_usbphy300", "dout_usbphy300",
GATE_TOP_SCLK_FSYS, 8, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usbd300, "sclk_usbd300", "dout_usbd300",
+ GATE(CLK_SCLK_USBD300, "sclk_usbd300", "dout_usbd300",
GATE_TOP_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usbd301, "sclk_usbd301", "dout_usbd301",
+ GATE(CLK_SCLK_USBD301, "sclk_usbd301", "dout_usbd301",
GATE_TOP_SCLK_FSYS, 10, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_usbd301, "sclk_unipro", "dout_unipro",
+ GATE(CLK_SCLK_USBD301, "sclk_unipro", "dout_unipro",
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_gscl_wa, "sclk_gscl_wa", "aclK333_432_gscl",
+ GATE(CLK_SCLK_GSCL_WA, "sclk_gscl_wa", "aclK333_432_gscl",
GATE_TOP_SCLK_GSCL, 6, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_gscl_wb, "sclk_gscl_wb", "aclk333_432_gscl",
+ GATE(CLK_SCLK_GSCL_WB, "sclk_gscl_wb", "aclk333_432_gscl",
GATE_TOP_SCLK_GSCL, 7, CLK_SET_RATE_PARENT, 0),
/* Display */
- GATE(sclk_fimd1, "sclk_fimd1", "dout_fimd1",
+ GATE(CLK_SCLK_FIMD1, "sclk_fimd1", "dout_fimd1",
GATE_TOP_SCLK_DISP1, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_mipi1, "sclk_mipi1", "dout_mipi1",
+ GATE(CLK_SCLK_MIPI1, "sclk_mipi1", "dout_mipi1",
GATE_TOP_SCLK_DISP1, 3, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_hdmi, "sclk_hdmi", "mout_hdmi",
+ GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi",
GATE_TOP_SCLK_DISP1, 9, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_pixel, "sclk_pixel", "dout_hdmi_pixel",
+ GATE(CLK_SCLK_PIXEL, "sclk_pixel", "dout_hdmi_pixel",
GATE_TOP_SCLK_DISP1, 10, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_dp1, "sclk_dp1", "dout_dp1",
+ GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
/* Maudio Block */
- GATE(sclk_maudio0, "sclk_maudio0", "dout_maudio0",
+ GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
- GATE(sclk_maupcm0, "sclk_maupcm0", "dout_maupcm0",
+ GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
/* FSYS */
- GATE(tsi, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
- GATE(pdma0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
- GATE(pdma1, "pdma1", "aclk200_fsys", GATE_BUS_FSYS0, 2, 0, 0),
- GATE(ufs, "ufs", "aclk200_fsys2", GATE_BUS_FSYS0, 3, 0, 0),
- GATE(rtic, "rtic", "aclk200_fsys", GATE_BUS_FSYS0, 5, 0, 0),
- GATE(mmc0, "mmc0", "aclk200_fsys2", GATE_BUS_FSYS0, 12, 0, 0),
- GATE(mmc1, "mmc1", "aclk200_fsys2", GATE_BUS_FSYS0, 13, 0, 0),
- GATE(mmc2, "mmc2", "aclk200_fsys2", GATE_BUS_FSYS0, 14, 0, 0),
- GATE(sromc, "sromc", "aclk200_fsys2",
+ GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
+ GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
+ GATE(CLK_PDMA1, "pdma1", "aclk200_fsys", GATE_BUS_FSYS0, 2, 0, 0),
+ GATE(CLK_UFS, "ufs", "aclk200_fsys2", GATE_BUS_FSYS0, 3, 0, 0),
+ GATE(CLK_RTIC, "rtic", "aclk200_fsys", GATE_BUS_FSYS0, 5, 0, 0),
+ GATE(CLK_MMC0, "mmc0", "aclk200_fsys2", GATE_BUS_FSYS0, 12, 0, 0),
+ GATE(CLK_MMC1, "mmc1", "aclk200_fsys2", GATE_BUS_FSYS0, 13, 0, 0),
+ GATE(CLK_MMC2, "mmc2", "aclk200_fsys2", GATE_BUS_FSYS0, 14, 0, 0),
+ GATE(CLK_SROMC, "sromc", "aclk200_fsys2",
GATE_BUS_FSYS0, 19, CLK_IGNORE_UNUSED, 0),
- GATE(usbh20, "usbh20", "aclk200_fsys", GATE_BUS_FSYS0, 20, 0, 0),
- GATE(usbd300, "usbd300", "aclk200_fsys", GATE_BUS_FSYS0, 21, 0, 0),
- GATE(usbd301, "usbd301", "aclk200_fsys", GATE_BUS_FSYS0, 28, 0, 0),
+ GATE(CLK_USBH20, "usbh20", "aclk200_fsys", GATE_BUS_FSYS0, 20, 0, 0),
+ GATE(CLK_USBD300, "usbd300", "aclk200_fsys", GATE_BUS_FSYS0, 21, 0, 0),
+ GATE(CLK_USBD301, "usbd301", "aclk200_fsys", GATE_BUS_FSYS0, 28, 0, 0),
/* UART */
- GATE(uart0, "uart0", "aclk66_peric", GATE_BUS_PERIC, 4, 0, 0),
- GATE(uart1, "uart1", "aclk66_peric", GATE_BUS_PERIC, 5, 0, 0),
- GATE_A(uart2, "uart2", "aclk66_peric",
+ GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_BUS_PERIC, 4, 0, 0),
+ GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_BUS_PERIC, 5, 0, 0),
+ GATE_A(CLK_UART2, "uart2", "aclk66_peric",
GATE_BUS_PERIC, 6, CLK_IGNORE_UNUSED, 0, "uart2"),
- GATE(uart3, "uart3", "aclk66_peric", GATE_BUS_PERIC, 7, 0, 0),
+ GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_BUS_PERIC, 7, 0, 0),
/* I2C */
- GATE(i2c0, "i2c0", "aclk66_peric", GATE_BUS_PERIC, 9, 0, 0),
- GATE(i2c1, "i2c1", "aclk66_peric", GATE_BUS_PERIC, 10, 0, 0),
- GATE(i2c2, "i2c2", "aclk66_peric", GATE_BUS_PERIC, 11, 0, 0),
- GATE(i2c3, "i2c3", "aclk66_peric", GATE_BUS_PERIC, 12, 0, 0),
- GATE(i2c4, "i2c4", "aclk66_peric", GATE_BUS_PERIC, 13, 0, 0),
- GATE(i2c5, "i2c5", "aclk66_peric", GATE_BUS_PERIC, 14, 0, 0),
- GATE(i2c6, "i2c6", "aclk66_peric", GATE_BUS_PERIC, 15, 0, 0),
- GATE(i2c7, "i2c7", "aclk66_peric", GATE_BUS_PERIC, 16, 0, 0),
- GATE(i2c_hdmi, "i2c_hdmi", "aclk66_peric", GATE_BUS_PERIC, 17, 0, 0),
- GATE(tsadc, "tsadc", "aclk66_peric", GATE_BUS_PERIC, 18, 0, 0),
+ GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_BUS_PERIC, 9, 0, 0),
+ GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_BUS_PERIC, 10, 0, 0),
+ GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_BUS_PERIC, 11, 0, 0),
+ GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_BUS_PERIC, 12, 0, 0),
+ GATE(CLK_I2C4, "i2c4", "aclk66_peric", GATE_BUS_PERIC, 13, 0, 0),
+ GATE(CLK_I2C5, "i2c5", "aclk66_peric", GATE_BUS_PERIC, 14, 0, 0),
+ GATE(CLK_I2C6, "i2c6", "aclk66_peric", GATE_BUS_PERIC, 15, 0, 0),
+ GATE(CLK_I2C7, "i2c7", "aclk66_peric", GATE_BUS_PERIC, 16, 0, 0),
+ GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_BUS_PERIC, 17, 0,
+ 0),
+ GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_BUS_PERIC, 18, 0, 0),
/* SPI */
- GATE(spi0, "spi0", "aclk66_peric", GATE_BUS_PERIC, 19, 0, 0),
- GATE(spi1, "spi1", "aclk66_peric", GATE_BUS_PERIC, 20, 0, 0),
- GATE(spi2, "spi2", "aclk66_peric", GATE_BUS_PERIC, 21, 0, 0),
- GATE(keyif, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0),
+ GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_BUS_PERIC, 19, 0, 0),
+ GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_BUS_PERIC, 20, 0, 0),
+ GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_BUS_PERIC, 21, 0, 0),
+ GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0),
/* I2S */
- GATE(i2s1, "i2s1", "aclk66_peric", GATE_BUS_PERIC, 23, 0, 0),
- GATE(i2s2, "i2s2", "aclk66_peric", GATE_BUS_PERIC, 24, 0, 0),
+ GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_BUS_PERIC, 23, 0, 0),
+ GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_BUS_PERIC, 24, 0, 0),
/* PCM */
- GATE(pcm1, "pcm1", "aclk66_peric", GATE_BUS_PERIC, 25, 0, 0),
- GATE(pcm2, "pcm2", "aclk66_peric", GATE_BUS_PERIC, 26, 0, 0),
+ GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_BUS_PERIC, 25, 0, 0),
+ GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_BUS_PERIC, 26, 0, 0),
/* PWM */
- GATE(pwm, "pwm", "aclk66_peric", GATE_BUS_PERIC, 27, 0, 0),
+ GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_BUS_PERIC, 27, 0, 0),
/* SPDIF */
- GATE(spdif, "spdif", "aclk66_peric", GATE_BUS_PERIC, 29, 0, 0),
+ GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_BUS_PERIC, 29, 0, 0),
- GATE(i2c8, "i2c8", "aclk66_peric", GATE_BUS_PERIC1, 0, 0, 0),
- GATE(i2c9, "i2c9", "aclk66_peric", GATE_BUS_PERIC1, 1, 0, 0),
- GATE(i2c10, "i2c10", "aclk66_peric", GATE_BUS_PERIC1, 2, 0, 0),
+ GATE(CLK_I2C8, "i2c8", "aclk66_peric", GATE_BUS_PERIC1, 0, 0, 0),
+ GATE(CLK_I2C9, "i2c9", "aclk66_peric", GATE_BUS_PERIC1, 1, 0, 0),
+ GATE(CLK_I2C10, "i2c10", "aclk66_peric", GATE_BUS_PERIC1, 2, 0, 0),
- GATE(chipid, "chipid", "aclk66_psgen",
+ GATE(CLK_CHIPID, "chipid", "aclk66_psgen",
GATE_BUS_PERIS0, 12, CLK_IGNORE_UNUSED, 0),
- GATE(sysreg, "sysreg", "aclk66_psgen",
+ GATE(CLK_SYSREG, "sysreg", "aclk66_psgen",
GATE_BUS_PERIS0, 13, CLK_IGNORE_UNUSED, 0),
- GATE(tzpc0, "tzpc0", "aclk66_psgen", GATE_BUS_PERIS0, 18, 0, 0),
- GATE(tzpc1, "tzpc1", "aclk66_psgen", GATE_BUS_PERIS0, 19, 0, 0),
- GATE(tzpc2, "tzpc2", "aclk66_psgen", GATE_BUS_PERIS0, 20, 0, 0),
- GATE(tzpc3, "tzpc3", "aclk66_psgen", GATE_BUS_PERIS0, 21, 0, 0),
- GATE(tzpc4, "tzpc4", "aclk66_psgen", GATE_BUS_PERIS0, 22, 0, 0),
- GATE(tzpc5, "tzpc5", "aclk66_psgen", GATE_BUS_PERIS0, 23, 0, 0),
- GATE(tzpc6, "tzpc6", "aclk66_psgen", GATE_BUS_PERIS0, 24, 0, 0),
- GATE(tzpc7, "tzpc7", "aclk66_psgen", GATE_BUS_PERIS0, 25, 0, 0),
- GATE(tzpc8, "tzpc8", "aclk66_psgen", GATE_BUS_PERIS0, 26, 0, 0),
- GATE(tzpc9, "tzpc9", "aclk66_psgen", GATE_BUS_PERIS0, 27, 0, 0),
-
- GATE(hdmi_cec, "hdmi_cec", "aclk66_psgen", GATE_BUS_PERIS1, 0, 0, 0),
- GATE(seckey, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
- GATE(wdt, "wdt", "aclk66_psgen", GATE_BUS_PERIS1, 3, 0, 0),
- GATE(rtc, "rtc", "aclk66_psgen", GATE_BUS_PERIS1, 4, 0, 0),
- GATE(tmu, "tmu", "aclk66_psgen", GATE_BUS_PERIS1, 5, 0, 0),
- GATE(tmu_gpu, "tmu_gpu", "aclk66_psgen", GATE_BUS_PERIS1, 6, 0, 0),
-
- GATE(gscl0, "gscl0", "aclk300_gscl", GATE_IP_GSCL0, 0, 0, 0),
- GATE(gscl1, "gscl1", "aclk300_gscl", GATE_IP_GSCL0, 1, 0, 0),
- GATE(clk_3aa, "clk_3aa", "aclk300_gscl", GATE_IP_GSCL0, 4, 0, 0),
-
- GATE(smmu_3aa, "smmu_3aa", "aclk333_432_gscl", GATE_IP_GSCL1, 2, 0, 0),
- GATE(smmu_fimcl0, "smmu_fimcl0", "aclk333_432_gscl",
+ GATE(CLK_TZPC0, "tzpc0", "aclk66_psgen", GATE_BUS_PERIS0, 18, 0, 0),
+ GATE(CLK_TZPC1, "tzpc1", "aclk66_psgen", GATE_BUS_PERIS0, 19, 0, 0),
+ GATE(CLK_TZPC2, "tzpc2", "aclk66_psgen", GATE_BUS_PERIS0, 20, 0, 0),
+ GATE(CLK_TZPC3, "tzpc3", "aclk66_psgen", GATE_BUS_PERIS0, 21, 0, 0),
+ GATE(CLK_TZPC4, "tzpc4", "aclk66_psgen", GATE_BUS_PERIS0, 22, 0, 0),
+ GATE(CLK_TZPC5, "tzpc5", "aclk66_psgen", GATE_BUS_PERIS0, 23, 0, 0),
+ GATE(CLK_TZPC6, "tzpc6", "aclk66_psgen", GATE_BUS_PERIS0, 24, 0, 0),
+ GATE(CLK_TZPC7, "tzpc7", "aclk66_psgen", GATE_BUS_PERIS0, 25, 0, 0),
+ GATE(CLK_TZPC8, "tzpc8", "aclk66_psgen", GATE_BUS_PERIS0, 26, 0, 0),
+ GATE(CLK_TZPC9, "tzpc9", "aclk66_psgen", GATE_BUS_PERIS0, 27, 0, 0),
+
+ GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk66_psgen", GATE_BUS_PERIS1, 0, 0,
+ 0),
+ GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
+ GATE(CLK_WDT, "wdt", "aclk66_psgen", GATE_BUS_PERIS1, 3, 0, 0),
+ GATE(CLK_RTC, "rtc", "aclk66_psgen", GATE_BUS_PERIS1, 4, 0, 0),
+ GATE(CLK_TMU, "tmu", "aclk66_psgen", GATE_BUS_PERIS1, 5, 0, 0),
+ GATE(CLK_TMU_GPU, "tmu_gpu", "aclk66_psgen", GATE_BUS_PERIS1, 6, 0, 0),
+
+ GATE(CLK_GSCL0, "gscl0", "aclk300_gscl", GATE_IP_GSCL0, 0, 0, 0),
+ GATE(CLK_GSCL1, "gscl1", "aclk300_gscl", GATE_IP_GSCL0, 1, 0, 0),
+ GATE(CLK_CLK_3AA, "clk_3aa", "aclk300_gscl", GATE_IP_GSCL0, 4, 0, 0),
+
+ GATE(CLK_SMMU_3AA, "smmu_3aa", "aclk333_432_gscl", GATE_IP_GSCL1, 2, 0,
+ 0),
+ GATE(CLK_SMMU_FIMCL0, "smmu_fimcl0", "aclk333_432_gscl",
GATE_IP_GSCL1, 3, 0, 0),
- GATE(smmu_fimcl1, "smmu_fimcl1", "aclk333_432_gscl",
+ GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "aclk333_432_gscl",
GATE_IP_GSCL1, 4, 0, 0),
- GATE(smmu_gscl0, "smmu_gscl0", "aclk300_gscl", GATE_IP_GSCL1, 6, 0, 0),
- GATE(smmu_gscl1, "smmu_gscl1", "aclk300_gscl", GATE_IP_GSCL1, 7, 0, 0),
- GATE(gscl_wa, "gscl_wa", "aclk300_gscl", GATE_IP_GSCL1, 12, 0, 0),
- GATE(gscl_wb, "gscl_wb", "aclk300_gscl", GATE_IP_GSCL1, 13, 0, 0),
- GATE(smmu_fimcl3, "smmu_fimcl3,", "aclk333_432_gscl",
+ GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "aclk300_gscl", GATE_IP_GSCL1, 6, 0,
+ 0),
+ GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "aclk300_gscl", GATE_IP_GSCL1, 7, 0,
+ 0),
+ GATE(CLK_GSCL_WA, "gscl_wa", "aclk300_gscl", GATE_IP_GSCL1, 12, 0, 0),
+ GATE(CLK_GSCL_WB, "gscl_wb", "aclk300_gscl", GATE_IP_GSCL1, 13, 0, 0),
+ GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "aclk333_432_gscl",
GATE_IP_GSCL1, 16, 0, 0),
- GATE(fimc_lite3, "fimc_lite3", "aclk333_432_gscl",
+ GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
GATE_IP_GSCL1, 17, 0, 0),
- GATE(fimd1, "fimd1", "aclk300_disp1", GATE_IP_DISP1, 0, 0, 0),
- GATE(dsim1, "dsim1", "aclk200_disp1", GATE_IP_DISP1, 3, 0, 0),
- GATE(dp1, "dp1", "aclk200_disp1", GATE_IP_DISP1, 4, 0, 0),
- GATE(mixer, "mixer", "aclk166", GATE_IP_DISP1, 5, 0, 0),
- GATE(hdmi, "hdmi", "aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
- GATE(smmu_fimd1, "smmu_fimd1", "aclk300_disp1", GATE_IP_DISP1, 8, 0, 0),
-
- GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
- GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
- GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
-
- GATE(g3d, "g3d", "aclkg3d", GATE_IP_G3D, 9, 0, 0),
-
- GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
- GATE(jpeg, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0),
- GATE(jpeg2, "jpeg2", "aclk300_jpeg", GATE_IP_GEN, 3, 0, 0),
- GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
- GATE(smmu_rotator, "smmu_rotator", "aclk266", GATE_IP_GEN, 6, 0, 0),
- GATE(smmu_jpeg, "smmu_jpeg", "aclk300_jpeg", GATE_IP_GEN, 7, 0, 0),
- GATE(smmu_mdma1, "smmu_mdma1", "aclk266", GATE_IP_GEN, 9, 0, 0),
-
- GATE(mscl0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
- GATE(mscl1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
- GATE(mscl2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
- GATE(smmu_mscl0, "smmu_mscl0", "aclk400_mscl", GATE_IP_MSCL, 8, 0, 0),
- GATE(smmu_mscl1, "smmu_mscl1", "aclk400_mscl", GATE_IP_MSCL, 9, 0, 0),
- GATE(smmu_mscl2, "smmu_mscl2", "aclk400_mscl", GATE_IP_MSCL, 10, 0, 0),
- GATE(smmu_mixer, "smmu_mixer", "aclk200_disp1", GATE_IP_DISP1, 9, 0, 0),
+ GATE(CLK_FIMD1, "fimd1", "aclk300_disp1", GATE_IP_DISP1, 0, 0, 0),
+ GATE(CLK_DSIM1, "dsim1", "aclk200_disp1", GATE_IP_DISP1, 3, 0, 0),
+ GATE(CLK_DP1, "dp1", "aclk200_disp1", GATE_IP_DISP1, 4, 0, 0),
+ GATE(CLK_MIXER, "mixer", "aclk166", GATE_IP_DISP1, 5, 0, 0),
+ GATE(CLK_HDMI, "hdmi", "aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
+ GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "aclk300_disp1", GATE_IP_DISP1, 8, 0,
+ 0),
+
+ GATE(CLK_MFC, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
+ GATE(CLK_SMMU_MFCL, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
+ GATE(CLK_SMMU_MFCR, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
+
+ GATE(CLK_G3D, "g3d", "aclkg3d", GATE_IP_G3D, 9, 0, 0),
+
+ GATE(CLK_ROTATOR, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
+ GATE(CLK_JPEG, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0),
+ GATE(CLK_JPEG2, "jpeg2", "aclk300_jpeg", GATE_IP_GEN, 3, 0, 0),
+ GATE(CLK_MDMA1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
+ GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "aclk266", GATE_IP_GEN, 6, 0, 0),
+ GATE(CLK_SMMU_JPEG, "smmu_jpeg", "aclk300_jpeg", GATE_IP_GEN, 7, 0, 0),
+ GATE(CLK_SMMU_MDMA1, "smmu_mdma1", "aclk266", GATE_IP_GEN, 9, 0, 0),
+
+ GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
+ GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
+ GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
+ GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "aclk400_mscl", GATE_IP_MSCL, 8, 0,
+ 0),
+ GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "aclk400_mscl", GATE_IP_MSCL, 9, 0,
+ 0),
+ GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "aclk400_mscl", GATE_IP_MSCL, 10, 0,
+ 0),
+ GATE(CLK_SMMU_MIXER, "smmu_mixer", "aclk200_disp1", GATE_IP_DISP1, 9, 0,
+ 0),
};
static struct samsung_pll_clock exynos5420_plls[nr_plls] __initdata = {
- [apll] = PLL(pll_2550, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
+ [apll] = PLL(pll_2550, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
APLL_CON0, NULL),
- [cpll] = PLL(pll_2550, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
- MPLL_CON0, NULL),
- [dpll] = PLL(pll_2550, fout_dpll, "fout_dpll", "fin_pll", DPLL_LOCK,
+ [cpll] = PLL(pll_2550, CLK_FOUT_CPLL, "fout_cpll", "fin_pll", CPLL_LOCK,
+ CPLL_CON0, NULL),
+ [dpll] = PLL(pll_2550, CLK_FOUT_DPLL, "fout_dpll", "fin_pll", DPLL_LOCK,
DPLL_CON0, NULL),
- [epll] = PLL(pll_2650, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+ [epll] = PLL(pll_2650, CLK_FOUT_EPLL, "fout_epll", "fin_pll", EPLL_LOCK,
EPLL_CON0, NULL),
- [rpll] = PLL(pll_2650, fout_rpll, "fout_rpll", "fin_pll", RPLL_LOCK,
+ [rpll] = PLL(pll_2650, CLK_FOUT_RPLL, "fout_rpll", "fin_pll", RPLL_LOCK,
RPLL_CON0, NULL),
- [ipll] = PLL(pll_2550, fout_ipll, "fout_ipll", "fin_pll", IPLL_LOCK,
+ [ipll] = PLL(pll_2550, CLK_FOUT_IPLL, "fout_ipll", "fin_pll", IPLL_LOCK,
IPLL_CON0, NULL),
- [spll] = PLL(pll_2550, fout_spll, "fout_spll", "fin_pll", SPLL_LOCK,
+ [spll] = PLL(pll_2550, CLK_FOUT_SPLL, "fout_spll", "fin_pll", SPLL_LOCK,
SPLL_CON0, NULL),
- [vpll] = PLL(pll_2550, fout_vpll, "fout_vpll", "fin_pll", VPLL_LOCK,
+ [vpll] = PLL(pll_2550, CLK_FOUT_VPLL, "fout_vpll", "fin_pll", VPLL_LOCK,
VPLL_CON0, NULL),
- [mpll] = PLL(pll_2550, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
+ [mpll] = PLL(pll_2550, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", MPLL_LOCK,
MPLL_CON0, NULL),
- [bpll] = PLL(pll_2550, fout_bpll, "fout_bpll", "fin_pll", BPLL_LOCK,
+ [bpll] = PLL(pll_2550, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK,
BPLL_CON0, NULL),
- [kpll] = PLL(pll_2550, fout_kpll, "fout_kpll", "fin_pll", KPLL_LOCK,
+ [kpll] = PLL(pll_2550, CLK_FOUT_KPLL, "fout_kpll", "fin_pll", KPLL_LOCK,
KPLL_CON0, NULL),
};
@@ -777,7 +747,7 @@ static void __init exynos5420_clk_init(struct device_node *np)
panic("%s: unable to determine soc\n", __func__);
}
- samsung_clk_init(np, reg_base, nr_clks,
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS,
exynos5420_clk_regs, ARRAY_SIZE(exynos5420_clk_regs),
NULL, 0);
samsung_clk_of_register_fixed_ext(exynos5420_fixed_rate_ext_clks,
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index f8658945bfd2..cbc15b56891d 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -9,6 +9,7 @@
* Common Clock Framework support for Exynos5440 SoC.
*/
+#include <dt-bindings/clock/exynos5440.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
@@ -22,79 +23,65 @@
#define CPU_CLK_STATUS 0xfc
#define MISC_DOUT1 0x558
-/*
- * Let each supported clock get a unique id. This id is used to lookup the clock
- * for device tree based platforms.
- */
-enum exynos5440_clks {
- none, xtal, arm_clk,
-
- spi_baud = 16, pb0_250, pr0_250, pr1_250, b_250, b_125, b_200, sata,
- usb, gmac0, cs250, pb0_250_o, pr0_250_o, pr1_250_o, b_250_o, b_125_o,
- b_200_o, sata_o, usb_o, gmac0_o, cs250_o,
-
- nr_clks,
-};
-
/* parent clock name list */
PNAME(mout_armclk_p) = { "cplla", "cpllb" };
PNAME(mout_spi_p) = { "div125", "div200" };
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
- FRATE(none, "xtal", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "xtal", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks */
static struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
- FRATE(none, "ppll", NULL, CLK_IS_ROOT, 1000000000),
- FRATE(none, "usb_phy0", NULL, CLK_IS_ROOT, 60000000),
- FRATE(none, "usb_phy1", NULL, CLK_IS_ROOT, 60000000),
- FRATE(none, "usb_ohci12", NULL, CLK_IS_ROOT, 12000000),
- FRATE(none, "usb_ohci48", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "ppll", NULL, CLK_IS_ROOT, 1000000000),
+ FRATE(0, "usb_phy0", NULL, CLK_IS_ROOT, 60000000),
+ FRATE(0, "usb_phy1", NULL, CLK_IS_ROOT, 60000000),
+ FRATE(0, "usb_ohci12", NULL, CLK_IS_ROOT, 12000000),
+ FRATE(0, "usb_ohci48", NULL, CLK_IS_ROOT, 48000000),
};
/* fixed factor clocks */
static struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initdata = {
- FFACTOR(none, "div250", "ppll", 1, 4, 0),
- FFACTOR(none, "div200", "ppll", 1, 5, 0),
- FFACTOR(none, "div125", "div250", 1, 2, 0),
+ FFACTOR(0, "div250", "ppll", 1, 4, 0),
+ FFACTOR(0, "div200", "ppll", 1, 5, 0),
+ FFACTOR(0, "div125", "div250", 1, 2, 0),
};
/* mux clocks */
static struct samsung_mux_clock exynos5440_mux_clks[] __initdata = {
- MUX(none, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
- MUX_A(arm_clk, "arm_clk", mout_armclk_p,
+ MUX(0, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
+ MUX_A(CLK_ARM_CLK, "arm_clk", mout_armclk_p,
CPU_CLK_STATUS, 0, 1, "armclk"),
};
/* divider clocks */
static struct samsung_div_clock exynos5440_div_clks[] __initdata = {
- DIV(spi_baud, "div_spi", "mout_spi", MISC_DOUT1, 3, 2),
+ DIV(CLK_SPI_BAUD, "div_spi", "mout_spi", MISC_DOUT1, 3, 2),
};
/* gate clocks */
static struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
- GATE(pb0_250, "pb0_250", "div250", CLKEN_OV_VAL, 3, 0, 0),
- GATE(pr0_250, "pr0_250", "div250", CLKEN_OV_VAL, 4, 0, 0),
- GATE(pr1_250, "pr1_250", "div250", CLKEN_OV_VAL, 5, 0, 0),
- GATE(b_250, "b_250", "div250", CLKEN_OV_VAL, 9, 0, 0),
- GATE(b_125, "b_125", "div125", CLKEN_OV_VAL, 10, 0, 0),
- GATE(b_200, "b_200", "div200", CLKEN_OV_VAL, 11, 0, 0),
- GATE(sata, "sata", "div200", CLKEN_OV_VAL, 12, 0, 0),
- GATE(usb, "usb", "div200", CLKEN_OV_VAL, 13, 0, 0),
- GATE(gmac0, "gmac0", "div200", CLKEN_OV_VAL, 14, 0, 0),
- GATE(cs250, "cs250", "div250", CLKEN_OV_VAL, 19, 0, 0),
- GATE(pb0_250_o, "pb0_250_o", "pb0_250", CLKEN_OV_VAL, 3, 0, 0),
- GATE(pr0_250_o, "pr0_250_o", "pr0_250", CLKEN_OV_VAL, 4, 0, 0),
- GATE(pr1_250_o, "pr1_250_o", "pr1_250", CLKEN_OV_VAL, 5, 0, 0),
- GATE(b_250_o, "b_250_o", "b_250", CLKEN_OV_VAL, 9, 0, 0),
- GATE(b_125_o, "b_125_o", "b_125", CLKEN_OV_VAL, 10, 0, 0),
- GATE(b_200_o, "b_200_o", "b_200", CLKEN_OV_VAL, 11, 0, 0),
- GATE(sata_o, "sata_o", "sata", CLKEN_OV_VAL, 12, 0, 0),
- GATE(usb_o, "usb_o", "usb", CLKEN_OV_VAL, 13, 0, 0),
- GATE(gmac0_o, "gmac0_o", "gmac", CLKEN_OV_VAL, 14, 0, 0),
- GATE(cs250_o, "cs250_o", "cs250", CLKEN_OV_VAL, 19, 0, 0),
+ GATE(CLK_PB0_250, "pb0_250", "div250", CLKEN_OV_VAL, 3, 0, 0),
+ GATE(CLK_PR0_250, "pr0_250", "div250", CLKEN_OV_VAL, 4, 0, 0),
+ GATE(CLK_PR1_250, "pr1_250", "div250", CLKEN_OV_VAL, 5, 0, 0),
+ GATE(CLK_B_250, "b_250", "div250", CLKEN_OV_VAL, 9, 0, 0),
+ GATE(CLK_B_125, "b_125", "div125", CLKEN_OV_VAL, 10, 0, 0),
+ GATE(CLK_B_200, "b_200", "div200", CLKEN_OV_VAL, 11, 0, 0),
+ GATE(CLK_SATA, "sata", "div200", CLKEN_OV_VAL, 12, 0, 0),
+ GATE(CLK_USB, "usb", "div200", CLKEN_OV_VAL, 13, 0, 0),
+ GATE(CLK_GMAC0, "gmac0", "div200", CLKEN_OV_VAL, 14, 0, 0),
+ GATE(CLK_CS250, "cs250", "div250", CLKEN_OV_VAL, 19, 0, 0),
+ GATE(CLK_PB0_250_O, "pb0_250_o", "pb0_250", CLKEN_OV_VAL, 3, 0, 0),
+ GATE(CLK_PR0_250_O, "pr0_250_o", "pr0_250", CLKEN_OV_VAL, 4, 0, 0),
+ GATE(CLK_PR1_250_O, "pr1_250_o", "pr1_250", CLKEN_OV_VAL, 5, 0, 0),
+ GATE(CLK_B_250_O, "b_250_o", "b_250", CLKEN_OV_VAL, 9, 0, 0),
+ GATE(CLK_B_125_O, "b_125_o", "b_125", CLKEN_OV_VAL, 10, 0, 0),
+ GATE(CLK_B_200_O, "b_200_o", "b_200", CLKEN_OV_VAL, 11, 0, 0),
+ GATE(CLK_SATA_O, "sata_o", "sata", CLKEN_OV_VAL, 12, 0, 0),
+ GATE(CLK_USB_O, "usb_o", "usb", CLKEN_OV_VAL, 13, 0, 0),
+ GATE(CLK_GMAC0_O, "gmac0_o", "gmac", CLKEN_OV_VAL, 14, 0, 0),
+ GATE(CLK_CS250_O, "cs250_o", "cs250", CLKEN_OV_VAL, 19, 0, 0),
};
static struct of_device_id ext_clk_match[] __initdata = {
@@ -114,7 +101,7 @@ static void __init exynos5440_clk_init(struct device_node *np)
return;
}
- samsung_clk_init(np, reg_base, nr_clks, NULL, 0, NULL, 0);
+ samsung_clk_init(np, reg_base, CLK_NR_CLKS, NULL, 0, NULL, 0);
samsung_clk_of_register_fixed_ext(exynos5440_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 529e11dc2c6b..81e6d2f49aa0 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -375,7 +375,7 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
break;
default:
break;
- };
+ }
/* Set new configuration. */
__raw_writel(con1, pll->con_reg + 0x4);
diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile
new file mode 100644
index 000000000000..9ecef140dba7
--- /dev/null
+++ b/drivers/clk/shmobile/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
+obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o
+obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o
+obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o
+obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-mstp.o
+# for emply built-in.o
+obj-n := dummy
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
new file mode 100644
index 000000000000..aac4756ec52e
--- /dev/null
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -0,0 +1,185 @@
+/*
+ * r8a7790 Common Clock Framework support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define CPG_DIV6_CKSTP BIT(8)
+#define CPG_DIV6_DIV(d) ((d) & 0x3f)
+#define CPG_DIV6_DIV_MASK 0x3f
+
+/**
+ * struct div6_clock - MSTP gating clock
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: IO-remapped register
+ * @div: divisor value (1-64)
+ */
+struct div6_clock {
+ struct clk_hw hw;
+ void __iomem *reg;
+ unsigned int div;
+};
+
+#define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw)
+
+static int cpg_div6_clock_enable(struct clk_hw *hw)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+
+ clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg);
+
+ return 0;
+}
+
+static void cpg_div6_clock_disable(struct clk_hw *hw)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+
+ /* DIV6 clocks require the divisor field to be non-zero when stopping
+ * the clock.
+ */
+ clk_writel(CPG_DIV6_CKSTP | CPG_DIV6_DIV(CPG_DIV6_DIV_MASK),
+ clock->reg);
+}
+
+static int cpg_div6_clock_is_enabled(struct clk_hw *hw)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+
+ return !(clk_readl(clock->reg) & CPG_DIV6_CKSTP);
+}
+
+static unsigned long cpg_div6_clock_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+ unsigned int div = (clk_readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;
+
+ return parent_rate / div;
+}
+
+static unsigned int cpg_div6_clock_calc_div(unsigned long rate,
+ unsigned long parent_rate)
+{
+ unsigned int div;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ return clamp_t(unsigned int, div, 1, 64);
+}
+
+static long cpg_div6_clock_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned int div = cpg_div6_clock_calc_div(rate, *parent_rate);
+
+ return *parent_rate / div;
+}
+
+static int cpg_div6_clock_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+ unsigned int div = cpg_div6_clock_calc_div(rate, parent_rate);
+
+ clock->div = div;
+
+ /* Only program the new divisor if the clock isn't stopped. */
+ if (!(clk_readl(clock->reg) & CPG_DIV6_CKSTP))
+ clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg);
+
+ return 0;
+}
+
+static const struct clk_ops cpg_div6_clock_ops = {
+ .enable = cpg_div6_clock_enable,
+ .disable = cpg_div6_clock_disable,
+ .is_enabled = cpg_div6_clock_is_enabled,
+ .recalc_rate = cpg_div6_clock_recalc_rate,
+ .round_rate = cpg_div6_clock_round_rate,
+ .set_rate = cpg_div6_clock_set_rate,
+};
+
+static void __init cpg_div6_clock_init(struct device_node *np)
+{
+ struct clk_init_data init;
+ struct div6_clock *clock;
+ const char *parent_name;
+ const char *name;
+ struct clk *clk;
+ int ret;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ pr_err("%s: failed to allocate %s DIV6 clock\n",
+ __func__, np->name);
+ return;
+ }
+
+ /* Remap the clock register and read the divisor. Disabling the
+ * clock overwrites the divisor, so we need to cache its value for the
+ * enable operation.
+ */
+ clock->reg = of_iomap(np, 0);
+ if (clock->reg == NULL) {
+ pr_err("%s: failed to map %s DIV6 clock register\n",
+ __func__, np->name);
+ goto error;
+ }
+
+ clock->div = (clk_readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;
+
+ /* Parse the DT properties. */
+ ret = of_property_read_string(np, "clock-output-names", &name);
+ if (ret < 0) {
+ pr_err("%s: failed to get %s DIV6 clock output name\n",
+ __func__, np->name);
+ goto error;
+ }
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (parent_name == NULL) {
+ pr_err("%s: failed to get %s DIV6 clock parent name\n",
+ __func__, np->name);
+ goto error;
+ }
+
+ /* Register the clock. */
+ init.name = name;
+ init.ops = &cpg_div6_clock_ops;
+ init.flags = CLK_IS_BASIC;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clock->hw.init = &init;
+
+ clk = clk_register(NULL, &clock->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register %s DIV6 clock (%ld)\n",
+ __func__, np->name, PTR_ERR(clk));
+ goto error;
+ }
+
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+
+ return;
+
+error:
+ if (clock->reg)
+ iounmap(clock->reg);
+ kfree(clock);
+}
+CLK_OF_DECLARE(cpg_div6_clk, "renesas,cpg-div6-clock", cpg_div6_clock_init);
diff --git a/drivers/clk/shmobile/clk-emev2.c b/drivers/clk/shmobile/clk-emev2.c
new file mode 100644
index 000000000000..6c7c929c7765
--- /dev/null
+++ b/drivers/clk/shmobile/clk-emev2.c
@@ -0,0 +1,104 @@
+/*
+ * EMMA Mobile EV2 common clock framework support
+ *
+ * Copyright (C) 2013 Takashi Yoshii <takashi.yoshii.ze@renesas.com>
+ * Copyright (C) 2012 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+/* EMEV2 SMU registers */
+#define USIAU0_RSTCTRL 0x094
+#define USIBU1_RSTCTRL 0x0ac
+#define USIBU2_RSTCTRL 0x0b0
+#define USIBU3_RSTCTRL 0x0b4
+#define STI_RSTCTRL 0x124
+#define STI_CLKSEL 0x688
+
+static DEFINE_SPINLOCK(lock);
+
+/* not pretty, but hey */
+void __iomem *smu_base;
+
+static void __init emev2_smu_write(unsigned long value, int offs)
+{
+ BUG_ON(!smu_base || (offs >= PAGE_SIZE));
+ writel_relaxed(value, smu_base + offs);
+}
+
+static const struct of_device_id smu_id[] __initconst = {
+ { .compatible = "renesas,emev2-smu", },
+ {},
+};
+
+static void __init emev2_smu_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, smu_id);
+ BUG_ON(!np);
+ smu_base = of_iomap(np, 0);
+ BUG_ON(!smu_base);
+ of_node_put(np);
+
+ /* setup STI timer to run on 32.768 kHz and deassert reset */
+ emev2_smu_write(0, STI_CLKSEL);
+ emev2_smu_write(1, STI_RSTCTRL);
+
+ /* deassert reset for UART0->UART3 */
+ emev2_smu_write(2, USIAU0_RSTCTRL);
+ emev2_smu_write(2, USIBU1_RSTCTRL);
+ emev2_smu_write(2, USIBU2_RSTCTRL);
+ emev2_smu_write(2, USIBU3_RSTCTRL);
+}
+
+static void __init emev2_smu_clkdiv_init(struct device_node *np)
+{
+ u32 reg[2];
+ struct clk *clk;
+ const char *parent_name = of_clk_get_parent_name(np, 0);
+ if (WARN_ON(of_property_read_u32_array(np, "reg", reg, 2)))
+ return;
+ if (!smu_base)
+ emev2_smu_init();
+ clk = clk_register_divider(NULL, np->name, parent_name, 0,
+ smu_base + reg[0], reg[1], 8, 0, &lock);
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, np->name, NULL);
+ pr_debug("## %s %s %p\n", __func__, np->name, clk);
+}
+CLK_OF_DECLARE(emev2_smu_clkdiv, "renesas,emev2-smu-clkdiv",
+ emev2_smu_clkdiv_init);
+
+static void __init emev2_smu_gclk_init(struct device_node *np)
+{
+ u32 reg[2];
+ struct clk *clk;
+ const char *parent_name = of_clk_get_parent_name(np, 0);
+ if (WARN_ON(of_property_read_u32_array(np, "reg", reg, 2)))
+ return;
+ if (!smu_base)
+ emev2_smu_init();
+ clk = clk_register_gate(NULL, np->name, parent_name, 0,
+ smu_base + reg[0], reg[1], 0, &lock);
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, np->name, NULL);
+ pr_debug("## %s %s %p\n", __func__, np->name, clk);
+}
+CLK_OF_DECLARE(emev2_smu_gclk, "renesas,emev2-smu-gclk", emev2_smu_gclk_init);
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
new file mode 100644
index 000000000000..42d5912b1d25
--- /dev/null
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -0,0 +1,233 @@
+/*
+ * R-Car MSTP clocks
+ *
+ * Copyright (C) 2013 Ideas On Board SPRL
+ *
+ * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+/*
+ * MSTP clocks. We can't use standard gate clocks as we need to poll on the
+ * status register when enabling the clock.
+ */
+
+#define MSTP_MAX_CLOCKS 32
+
+/**
+ * struct mstp_clock_group - MSTP gating clocks group
+ *
+ * @data: clocks in this group
+ * @smstpcr: module stop control register
+ * @mstpsr: module stop status register (optional)
+ * @lock: protects writes to SMSTPCR
+ */
+struct mstp_clock_group {
+ struct clk_onecell_data data;
+ void __iomem *smstpcr;
+ void __iomem *mstpsr;
+ spinlock_t lock;
+};
+
+/**
+ * struct mstp_clock - MSTP gating clock
+ * @hw: handle between common and hardware-specific interfaces
+ * @bit_index: control bit index
+ * @group: MSTP clocks group
+ */
+struct mstp_clock {
+ struct clk_hw hw;
+ u32 bit_index;
+ struct mstp_clock_group *group;
+};
+
+#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
+
+static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
+{
+ struct mstp_clock *clock = to_mstp_clock(hw);
+ struct mstp_clock_group *group = clock->group;
+ u32 bitmask = BIT(clock->bit_index);
+ unsigned long flags;
+ unsigned int i;
+ u32 value;
+
+ spin_lock_irqsave(&group->lock, flags);
+
+ value = clk_readl(group->smstpcr);
+ if (enable)
+ value &= ~bitmask;
+ else
+ value |= bitmask;
+ clk_writel(value, group->smstpcr);
+
+ spin_unlock_irqrestore(&group->lock, flags);
+
+ if (!enable || !group->mstpsr)
+ return 0;
+
+ for (i = 1000; i > 0; --i) {
+ if (!(clk_readl(group->mstpsr) & bitmask))
+ break;
+ cpu_relax();
+ }
+
+ if (!i) {
+ pr_err("%s: failed to enable %p[%d]\n", __func__,
+ group->smstpcr, clock->bit_index);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int cpg_mstp_clock_enable(struct clk_hw *hw)
+{
+ return cpg_mstp_clock_endisable(hw, true);
+}
+
+static void cpg_mstp_clock_disable(struct clk_hw *hw)
+{
+ cpg_mstp_clock_endisable(hw, false);
+}
+
+static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
+{
+ struct mstp_clock *clock = to_mstp_clock(hw);
+ struct mstp_clock_group *group = clock->group;
+ u32 value;
+
+ if (group->mstpsr)
+ value = clk_readl(group->mstpsr);
+ else
+ value = clk_readl(group->smstpcr);
+
+ return !!(value & BIT(clock->bit_index));
+}
+
+static const struct clk_ops cpg_mstp_clock_ops = {
+ .enable = cpg_mstp_clock_enable,
+ .disable = cpg_mstp_clock_disable,
+ .is_enabled = cpg_mstp_clock_is_enabled,
+};
+
+static struct clk * __init
+cpg_mstp_clock_register(const char *name, const char *parent_name,
+ unsigned int index, struct mstp_clock_group *group)
+{
+ struct clk_init_data init;
+ struct mstp_clock *clock;
+ struct clk *clk;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ pr_err("%s: failed to allocate MSTP clock.\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &cpg_mstp_clock_ops;
+ init.flags = CLK_IS_BASIC;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clock->bit_index = index;
+ clock->group = group;
+ clock->hw.init = &init;
+
+ clk = clk_register(NULL, &clock->hw);
+
+ if (IS_ERR(clk))
+ kfree(clock);
+
+ return clk;
+}
+
+static void __init cpg_mstp_clocks_init(struct device_node *np)
+{
+ struct mstp_clock_group *group;
+ struct clk **clks;
+ unsigned int i;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ clks = kmalloc(MSTP_MAX_CLOCKS * sizeof(*clks), GFP_KERNEL);
+ if (group == NULL || clks == NULL) {
+ kfree(group);
+ kfree(clks);
+ pr_err("%s: failed to allocate group\n", __func__);
+ return;
+ }
+
+ spin_lock_init(&group->lock);
+ group->data.clks = clks;
+
+ group->smstpcr = of_iomap(np, 0);
+ group->mstpsr = of_iomap(np, 1);
+
+ if (group->smstpcr == NULL) {
+ pr_err("%s: failed to remap SMSTPCR\n", __func__);
+ kfree(group);
+ kfree(clks);
+ return;
+ }
+
+ for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
+ clks[i] = ERR_PTR(-ENOENT);
+
+ for (i = 0; i < MSTP_MAX_CLOCKS; ++i) {
+ const char *parent_name;
+ const char *name;
+ u32 clkidx;
+ int ret;
+
+ /* Skip clocks with no name. */
+ ret = of_property_read_string_index(np, "clock-output-names",
+ i, &name);
+ if (ret < 0 || strlen(name) == 0)
+ continue;
+
+ parent_name = of_clk_get_parent_name(np, i);
+ ret = of_property_read_u32_index(np, "renesas,clock-indices", i,
+ &clkidx);
+ if (parent_name == NULL || ret < 0)
+ break;
+
+ if (clkidx >= MSTP_MAX_CLOCKS) {
+ pr_err("%s: invalid clock %s %s index %u)\n",
+ __func__, np->name, name, clkidx);
+ continue;
+ }
+
+ clks[clkidx] = cpg_mstp_clock_register(name, parent_name,
+ clkidx, group);
+ if (!IS_ERR(clks[clkidx])) {
+ group->data.clk_num = max(group->data.clk_num,
+ clkidx + 1);
+ /*
+ * Register a clkdev to let board code retrieve the
+ * clock by name and register aliases for non-DT
+ * devices.
+ *
+ * FIXME: Remove this when all devices that require a
+ * clock will be instantiated from DT.
+ */
+ clk_register_clkdev(clks[clkidx], name, NULL);
+ } else {
+ pr_err("%s: failed to register %s %s clock (%ld)\n",
+ __func__, np->name, name, PTR_ERR(clks[clkidx]));
+ }
+ }
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &group->data);
+}
+CLK_OF_DECLARE(cpg_mstp_clks, "renesas,cpg-mstp-clocks", cpg_mstp_clocks_init);
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
new file mode 100644
index 000000000000..99c27b1c625b
--- /dev/null
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -0,0 +1,338 @@
+/*
+ * rcar_gen2 Core CPG Clocks
+ *
+ * Copyright (C) 2013 Ideas On Board SPRL
+ *
+ * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/shmobile.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+struct rcar_gen2_cpg {
+ struct clk_onecell_data data;
+ spinlock_t lock;
+ void __iomem *reg;
+};
+
+#define CPG_FRQCRB 0x00000004
+#define CPG_FRQCRB_KICK BIT(31)
+#define CPG_SDCKCR 0x00000074
+#define CPG_PLL0CR 0x000000d8
+#define CPG_FRQCRC 0x000000e0
+#define CPG_FRQCRC_ZFC_MASK (0x1f << 8)
+#define CPG_FRQCRC_ZFC_SHIFT 8
+
+/* -----------------------------------------------------------------------------
+ * Z Clock
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is adjustable. clk->rate = parent->rate * mult / 32
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+struct cpg_z_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+ void __iomem *kick_reg;
+};
+
+#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
+
+static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ unsigned int val;
+
+ val = (clk_readl(zclk->reg) & CPG_FRQCRC_ZFC_MASK)
+ >> CPG_FRQCRC_ZFC_SHIFT;
+ mult = 32 - val;
+
+ return div_u64((u64)parent_rate * mult, 32);
+}
+
+static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long prate = *parent_rate;
+ unsigned int mult;
+
+ if (!prate)
+ prate = 1;
+
+ mult = div_u64((u64)rate * 32, prate);
+ mult = clamp(mult, 1U, 32U);
+
+ return *parent_rate / 32 * mult;
+}
+
+static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ u32 val, kick;
+ unsigned int i;
+
+ mult = div_u64((u64)rate * 32, parent_rate);
+ mult = clamp(mult, 1U, 32U);
+
+ if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
+ val = clk_readl(zclk->reg);
+ val &= ~CPG_FRQCRC_ZFC_MASK;
+ val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
+ clk_writel(val, zclk->reg);
+
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ kick = clk_readl(zclk->kick_reg);
+ kick |= CPG_FRQCRB_KICK;
+ clk_writel(kick, zclk->kick_reg);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependant of external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct clk_ops cpg_z_clk_ops = {
+ .recalc_rate = cpg_z_clk_recalc_rate,
+ .round_rate = cpg_z_clk_round_rate,
+ .set_rate = cpg_z_clk_set_rate,
+};
+
+static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
+{
+ static const char *parent_name = "pll0";
+ struct clk_init_data init;
+ struct cpg_z_clk *zclk;
+ struct clk *clk;
+
+ zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
+ if (!zclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = "z";
+ init.ops = &cpg_z_clk_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ zclk->reg = cpg->reg + CPG_FRQCRC;
+ zclk->kick_reg = cpg->reg + CPG_FRQCRB;
+ zclk->hw.init = &init;
+
+ clk = clk_register(NULL, &zclk->hw);
+ if (IS_ERR(clk))
+ kfree(zclk);
+
+ return clk;
+}
+
+/* -----------------------------------------------------------------------------
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *1
+ *---------------------------------------------------
+ * 0 0 0 15 x 1 x172/2 x208/2 x106
+ * 0 0 1 15 x 1 x172/2 x208/2 x88
+ * 0 1 0 20 x 1 x130/2 x156/2 x80
+ * 0 1 1 20 x 1 x130/2 x156/2 x66
+ * 1 0 0 26 / 2 x200/2 x240/2 x122
+ * 1 0 1 26 / 2 x200/2 x240/2 x102
+ * 1 1 0 30 / 2 x172/2 x208/2 x106
+ * 1 1 1 30 / 2 x172/2 x208/2 x88
+ *
+ * *1 : Table 7.6 indicates VCO ouput (PLLx = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+struct cpg_pll_config {
+ unsigned int extal_div;
+ unsigned int pll1_mult;
+ unsigned int pll3_mult;
+};
+
+static const struct cpg_pll_config cpg_pll_configs[8] __initconst = {
+ { 1, 208, 106 }, { 1, 208, 88 }, { 1, 156, 80 }, { 1, 156, 66 },
+ { 2, 240, 122 }, { 2, 240, 102 }, { 2, 208, 106 }, { 2, 208, 88 },
+};
+
+/* SDHI divisors */
+static const struct clk_div_table cpg_sdh_div_table[] = {
+ { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 },
+ { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 },
+ { 8, 24 }, { 10, 36 }, { 11, 48 }, { 0, 0 },
+};
+
+static const struct clk_div_table cpg_sd01_div_table[] = {
+ { 5, 12 }, { 6, 16 }, { 7, 18 }, { 8, 24 },
+ { 10, 36 }, { 11, 48 }, { 12, 10 }, { 0, 0 },
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+static u32 cpg_mode __initdata;
+
+static struct clk * __init
+rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
+ const struct cpg_pll_config *config,
+ const char *name)
+{
+ const struct clk_div_table *table = NULL;
+ const char *parent_name;
+ unsigned int shift;
+ unsigned int mult = 1;
+ unsigned int div = 1;
+
+ if (!strcmp(name, "main")) {
+ parent_name = of_clk_get_parent_name(np, 0);
+ div = config->extal_div;
+ } else if (!strcmp(name, "pll0")) {
+ /* PLL0 is a configurable multiplier clock. Register it as a
+ * fixed factor clock for now as there's no generic multiplier
+ * clock implementation and we currently have no need to change
+ * the multiplier value.
+ */
+ u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ parent_name = "main";
+ mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
+ } else if (!strcmp(name, "pll1")) {
+ parent_name = "main";
+ mult = config->pll1_mult / 2;
+ } else if (!strcmp(name, "pll3")) {
+ parent_name = "main";
+ mult = config->pll3_mult;
+ } else if (!strcmp(name, "lb")) {
+ parent_name = "pll1_div2";
+ div = cpg_mode & BIT(18) ? 36 : 24;
+ } else if (!strcmp(name, "qspi")) {
+ parent_name = "pll1_div2";
+ div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
+ ? 8 : 10;
+ } else if (!strcmp(name, "sdh")) {
+ parent_name = "pll1_div2";
+ table = cpg_sdh_div_table;
+ shift = 8;
+ } else if (!strcmp(name, "sd0")) {
+ parent_name = "pll1_div2";
+ table = cpg_sd01_div_table;
+ shift = 4;
+ } else if (!strcmp(name, "sd1")) {
+ parent_name = "pll1_div2";
+ table = cpg_sd01_div_table;
+ shift = 0;
+ } else if (!strcmp(name, "z")) {
+ return cpg_z_clk_register(cpg);
+ } else {
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!table)
+ return clk_register_fixed_factor(NULL, name, parent_name, 0,
+ mult, div);
+ else
+ return clk_register_divider_table(NULL, name, parent_name, 0,
+ cpg->reg + CPG_SDCKCR, shift,
+ 4, 0, table, &cpg->lock);
+}
+
+static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
+{
+ const struct cpg_pll_config *config;
+ struct rcar_gen2_cpg *cpg;
+ struct clk **clks;
+ unsigned int i;
+ int num_clks;
+
+ num_clks = of_property_count_strings(np, "clock-output-names");
+ if (num_clks < 0) {
+ pr_err("%s: failed to count clocks\n", __func__);
+ return;
+ }
+
+ cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ clks = kzalloc(num_clks * sizeof(*clks), GFP_KERNEL);
+ if (cpg == NULL || clks == NULL) {
+ /* We're leaking memory on purpose, there's no point in cleaning
+ * up as the system won't boot anyway.
+ */
+ pr_err("%s: failed to allocate cpg\n", __func__);
+ return;
+ }
+
+ spin_lock_init(&cpg->lock);
+
+ cpg->data.clks = clks;
+ cpg->data.clk_num = num_clks;
+
+ cpg->reg = of_iomap(np, 0);
+ if (WARN_ON(cpg->reg == NULL))
+ return;
+
+ config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ for (i = 0; i < num_clks; ++i) {
+ const char *name;
+ struct clk *clk;
+
+ of_property_read_string_index(np, "clock-output-names", i,
+ &name);
+
+ clk = rcar_gen2_cpg_register_clock(np, cpg, config, name);
+ if (IS_ERR(clk))
+ pr_err("%s: failed to register %s %s clock (%ld)\n",
+ __func__, np->name, name, PTR_ERR(clk));
+ else
+ cpg->data.clks[i] = clk;
+ }
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+}
+CLK_OF_DECLARE(rcar_gen2_cpg_clks, "renesas,rcar-gen2-cpg-clocks",
+ rcar_gen2_cpg_clocks_init);
+
+void __init rcar_gen2_clocks_init(u32 mode)
+{
+ cpg_mode = mode;
+
+ of_clk_init(NULL);
+}
diff --git a/drivers/clk/sirf/Makefile b/drivers/clk/sirf/Makefile
new file mode 100644
index 000000000000..36b8e203f6e7
--- /dev/null
+++ b/drivers/clk/sirf/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for sirf specific clk
+#
+
+obj-$(CONFIG_ARCH_SIRF) += clk-prima2.o clk-atlas6.o
diff --git a/drivers/clk/sirf/atlas6.h b/drivers/clk/sirf/atlas6.h
new file mode 100644
index 000000000000..376217f3bf8f
--- /dev/null
+++ b/drivers/clk/sirf/atlas6.h
@@ -0,0 +1,31 @@
+#define SIRFSOC_CLKC_CLK_EN0 0x0000
+#define SIRFSOC_CLKC_CLK_EN1 0x0004
+#define SIRFSOC_CLKC_REF_CFG 0x0020
+#define SIRFSOC_CLKC_CPU_CFG 0x0024
+#define SIRFSOC_CLKC_MEM_CFG 0x0028
+#define SIRFSOC_CLKC_MEMDIV_CFG 0x002C
+#define SIRFSOC_CLKC_SYS_CFG 0x0030
+#define SIRFSOC_CLKC_IO_CFG 0x0034
+#define SIRFSOC_CLKC_DSP_CFG 0x0038
+#define SIRFSOC_CLKC_GFX_CFG 0x003c
+#define SIRFSOC_CLKC_MM_CFG 0x0040
+#define SIRFSOC_CLKC_GFX2D_CFG 0x0040
+#define SIRFSOC_CLKC_LCD_CFG 0x0044
+#define SIRFSOC_CLKC_MMC01_CFG 0x0048
+#define SIRFSOC_CLKC_MMC23_CFG 0x004C
+#define SIRFSOC_CLKC_MMC45_CFG 0x0050
+#define SIRFSOC_CLKC_NAND_CFG 0x0054
+#define SIRFSOC_CLKC_NANDDIV_CFG 0x0058
+#define SIRFSOC_CLKC_PLL1_CFG0 0x0080
+#define SIRFSOC_CLKC_PLL2_CFG0 0x0084
+#define SIRFSOC_CLKC_PLL3_CFG0 0x0088
+#define SIRFSOC_CLKC_PLL1_CFG1 0x008c
+#define SIRFSOC_CLKC_PLL2_CFG1 0x0090
+#define SIRFSOC_CLKC_PLL3_CFG1 0x0094
+#define SIRFSOC_CLKC_PLL1_CFG2 0x0098
+#define SIRFSOC_CLKC_PLL2_CFG2 0x009c
+#define SIRFSOC_CLKC_PLL3_CFG2 0x00A0
+#define SIRFSOC_USBPHY_PLL_CTRL 0x0008
+#define SIRFSOC_USBPHY_PLL_POWERDOWN BIT(1)
+#define SIRFSOC_USBPHY_PLL_BYPASS BIT(2)
+#define SIRFSOC_USBPHY_PLL_LOCK BIT(3)
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
new file mode 100644
index 000000000000..f9f4a15a64ab
--- /dev/null
+++ b/drivers/clk/sirf/clk-atlas6.c
@@ -0,0 +1,152 @@
+/*
+ * Clock tree for CSR SiRFatlasVI
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#include "atlas6.h"
+#include "clk-common.c"
+
+static struct clk_dmn clk_mmc01 = {
+ .regofs = SIRFSOC_CLKC_MMC01_CFG,
+ .enable_bit = 59,
+ .hw = {
+ .init = &clk_mmc01_init,
+ },
+};
+
+static struct clk_dmn clk_mmc23 = {
+ .regofs = SIRFSOC_CLKC_MMC23_CFG,
+ .enable_bit = 60,
+ .hw = {
+ .init = &clk_mmc23_init,
+ },
+};
+
+static struct clk_dmn clk_mmc45 = {
+ .regofs = SIRFSOC_CLKC_MMC45_CFG,
+ .enable_bit = 61,
+ .hw = {
+ .init = &clk_mmc45_init,
+ },
+};
+
+static struct clk_init_data clk_nand_init = {
+ .name = "nand",
+ .ops = &dmn_ops,
+ .parent_names = dmn_clk_parents,
+ .num_parents = ARRAY_SIZE(dmn_clk_parents),
+};
+
+static struct clk_dmn clk_nand = {
+ .regofs = SIRFSOC_CLKC_NAND_CFG,
+ .enable_bit = 34,
+ .hw = {
+ .init = &clk_nand_init,
+ },
+};
+
+enum atlas6_clk_index {
+ /* 0 1 2 3 4 5 6 7 8 9 */
+ rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
+ mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
+ spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
+ usp2, vip, gfx, gfx2d, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
+ usb0, usb1, cphif, maxclk,
+};
+
+static __initdata struct clk_hw *atlas6_clk_hw_array[maxclk] = {
+ NULL, /* dummy */
+ NULL,
+ &clk_pll1.hw,
+ &clk_pll2.hw,
+ &clk_pll3.hw,
+ &clk_mem.hw,
+ &clk_sys.hw,
+ &clk_security.hw,
+ &clk_dsp.hw,
+ &clk_gps.hw,
+ &clk_mf.hw,
+ &clk_io.hw,
+ &clk_cpu.hw,
+ &clk_uart0.hw,
+ &clk_uart1.hw,
+ &clk_uart2.hw,
+ &clk_tsc.hw,
+ &clk_i2c0.hw,
+ &clk_i2c1.hw,
+ &clk_spi0.hw,
+ &clk_spi1.hw,
+ &clk_pwmc.hw,
+ &clk_efuse.hw,
+ &clk_pulse.hw,
+ &clk_dmac0.hw,
+ &clk_dmac1.hw,
+ &clk_nand.hw,
+ &clk_audio.hw,
+ &clk_usp0.hw,
+ &clk_usp1.hw,
+ &clk_usp2.hw,
+ &clk_vip.hw,
+ &clk_gfx.hw,
+ &clk_gfx2d.hw,
+ &clk_lcd.hw,
+ &clk_vpp.hw,
+ &clk_mmc01.hw,
+ &clk_mmc23.hw,
+ &clk_mmc45.hw,
+ &usb_pll_clk_hw,
+ &clk_usb0.hw,
+ &clk_usb1.hw,
+ &clk_cphif.hw,
+};
+
+static struct clk *atlas6_clks[maxclk];
+
+static void __init atlas6_clk_init(struct device_node *np)
+{
+ struct device_node *rscnp;
+ int i;
+
+ rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
+ sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
+ if (!sirfsoc_rsc_vbase)
+ panic("unable to map rsc registers\n");
+ of_node_put(rscnp);
+
+ sirfsoc_clk_vbase = of_iomap(np, 0);
+ if (!sirfsoc_clk_vbase)
+ panic("unable to map clkc registers\n");
+
+ /* These are always available (RTC and 26MHz OSC)*/
+ atlas6_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL,
+ CLK_IS_ROOT, 32768);
+ atlas6_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL,
+ CLK_IS_ROOT, 26000000);
+
+ for (i = pll1; i < maxclk; i++) {
+ atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
+ BUG_ON(!atlas6_clks[i]);
+ }
+ clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
+ clk_register_clkdev(atlas6_clks[io], NULL, "io");
+ clk_register_clkdev(atlas6_clks[mem], NULL, "mem");
+ clk_register_clkdev(atlas6_clks[mem], NULL, "osc");
+
+ clk_data.clks = atlas6_clks;
+ clk_data.clk_num = maxclk;
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+CLK_OF_DECLARE(atlas6_clk, "sirf,atlas6-clkc", atlas6_clk_init);
diff --git a/drivers/clk/clk-prima2.c b/drivers/clk/sirf/clk-common.c
index 6c15e3316137..7dde6a82f514 100644
--- a/drivers/clk/clk-prima2.c
+++ b/drivers/clk/sirf/clk-common.c
@@ -1,51 +1,18 @@
/*
- * Clock tree for CSR SiRFprimaII
+ * common clks module for all SiRF SoCs
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
*
* Licensed under GPLv2 or later.
*/
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
-
-#define SIRFSOC_CLKC_CLK_EN0 0x0000
-#define SIRFSOC_CLKC_CLK_EN1 0x0004
-#define SIRFSOC_CLKC_REF_CFG 0x0014
-#define SIRFSOC_CLKC_CPU_CFG 0x0018
-#define SIRFSOC_CLKC_MEM_CFG 0x001c
-#define SIRFSOC_CLKC_SYS_CFG 0x0020
-#define SIRFSOC_CLKC_IO_CFG 0x0024
-#define SIRFSOC_CLKC_DSP_CFG 0x0028
-#define SIRFSOC_CLKC_GFX_CFG 0x002c
-#define SIRFSOC_CLKC_MM_CFG 0x0030
-#define SIRFSOC_CLKC_LCD_CFG 0x0034
-#define SIRFSOC_CLKC_MMC_CFG 0x0038
-#define SIRFSOC_CLKC_PLL1_CFG0 0x0040
-#define SIRFSOC_CLKC_PLL2_CFG0 0x0044
-#define SIRFSOC_CLKC_PLL3_CFG0 0x0048
-#define SIRFSOC_CLKC_PLL1_CFG1 0x004c
-#define SIRFSOC_CLKC_PLL2_CFG1 0x0050
-#define SIRFSOC_CLKC_PLL3_CFG1 0x0054
-#define SIRFSOC_CLKC_PLL1_CFG2 0x0058
-#define SIRFSOC_CLKC_PLL2_CFG2 0x005c
-#define SIRFSOC_CLKC_PLL3_CFG2 0x0060
-#define SIRFSOC_USBPHY_PLL_CTRL 0x0008
-#define SIRFSOC_USBPHY_PLL_POWERDOWN BIT(1)
-#define SIRFSOC_USBPHY_PLL_BYPASS BIT(2)
-#define SIRFSOC_USBPHY_PLL_LOCK BIT(3)
-
-static void *sirfsoc_clk_vbase, *sirfsoc_rsc_vbase;
-
#define KHZ 1000
#define MHZ (KHZ * KHZ)
+static void *sirfsoc_clk_vbase;
+static void *sirfsoc_rsc_vbase;
+static struct clk_onecell_data clk_data;
+
/*
* SiRFprimaII clock controller
* - 2 oscillators: osc-26MHz, rtc-32.768KHz
@@ -127,6 +94,7 @@ static long pll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long fin, nf, nr, od;
+ u64 dividend;
/*
* fout = fin * nf / (nr * od);
@@ -147,7 +115,10 @@ static long pll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
nr = BIT(6);
od = 1;
- return fin * nf / (nr * od);
+ dividend = (u64)fin * nf;
+ do_div(dividend, nr * od);
+
+ return (long)dividend;
}
static int pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -186,6 +157,30 @@ static int pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * SiRF SoC has not cpu clock control,
+ * So bypass to it's parent pll.
+ */
+ struct clk *parent_clk = clk_get_parent(hw->clk);
+ struct clk *pll_parent_clk = clk_get_parent(parent_clk);
+ unsigned long pll_parent_rate = clk_get_rate(pll_parent_clk);
+ return pll_clk_round_rate(__clk_get_hw(parent_clk), rate, &pll_parent_rate);
+}
+
+static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ /*
+ * SiRF SoC has not cpu clock control,
+ * So return the parent pll rate.
+ */
+ struct clk *parent_clk = clk_get_parent(hw->clk);
+ return __clk_get_rate(parent_clk);
+}
+
static struct clk_ops std_pll_ops = {
.recalc_rate = pll_clk_recalc_rate,
.round_rate = pll_clk_round_rate,
@@ -403,6 +398,42 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int ret1, ret2;
+ struct clk *cur_parent;
+
+ if (rate == clk_get_rate(clk_pll1.hw.clk)) {
+ ret1 = clk_set_parent(hw->clk, clk_pll1.hw.clk);
+ return ret1;
+ }
+
+ if (rate == clk_get_rate(clk_pll2.hw.clk)) {
+ ret1 = clk_set_parent(hw->clk, clk_pll2.hw.clk);
+ return ret1;
+ }
+
+ if (rate == clk_get_rate(clk_pll3.hw.clk)) {
+ ret1 = clk_set_parent(hw->clk, clk_pll3.hw.clk);
+ return ret1;
+ }
+
+ cur_parent = clk_get_parent(hw->clk);
+
+ /* switch to tmp pll before setting parent clock's rate */
+ if (cur_parent == clk_pll1.hw.clk) {
+ ret1 = clk_set_parent(hw->clk, clk_pll2.hw.clk);
+ BUG_ON(ret1);
+ }
+
+ ret2 = clk_set_rate(clk_pll1.hw.clk, rate);
+
+ ret1 = clk_set_parent(hw->clk, clk_pll1.hw.clk);
+
+ return ret2 ? ret2 : ret1;
+}
+
static struct clk_ops msi_ops = {
.set_rate = dmn_clk_set_rate,
.round_rate = dmn_clk_round_rate,
@@ -457,6 +488,9 @@ static struct clk_dmn clk_io = {
static struct clk_ops cpu_ops = {
.set_parent = dmn_clk_set_parent,
.get_parent = dmn_clk_get_parent,
+ .set_rate = cpu_clk_set_rate,
+ .round_rate = cpu_clk_round_rate,
+ .recalc_rate = cpu_clk_recalc_rate,
};
static struct clk_init_data clk_cpu_init = {
@@ -532,6 +566,11 @@ static struct clk_dmn clk_mm = {
},
};
+/*
+ * for atlas6, gfx2d holds the bit of prima2's clk_mm
+ */
+#define clk_gfx2d clk_mm
+
static struct clk_init_data clk_lcd_init = {
.name = "lcd",
.ops = &dmn_ops,
@@ -569,14 +608,6 @@ static struct clk_init_data clk_mmc01_init = {
.num_parents = ARRAY_SIZE(dmn_clk_parents),
};
-static struct clk_dmn clk_mmc01 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 59,
- .hw = {
- .init = &clk_mmc01_init,
- },
-};
-
static struct clk_init_data clk_mmc23_init = {
.name = "mmc23",
.ops = &dmn_ops,
@@ -584,14 +615,6 @@ static struct clk_init_data clk_mmc23_init = {
.num_parents = ARRAY_SIZE(dmn_clk_parents),
};
-static struct clk_dmn clk_mmc23 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 60,
- .hw = {
- .init = &clk_mmc23_init,
- },
-};
-
static struct clk_init_data clk_mmc45_init = {
.name = "mmc45",
.ops = &dmn_ops,
@@ -599,14 +622,6 @@ static struct clk_init_data clk_mmc45_init = {
.num_parents = ARRAY_SIZE(dmn_clk_parents),
};
-static struct clk_dmn clk_mmc45 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 61,
- .hw = {
- .init = &clk_mmc45_init,
- },
-};
-
/*
* peripheral controllers in io domain
*/
@@ -667,6 +682,20 @@ static struct clk_ops ios_ops = {
.disable = std_clk_disable,
};
+static struct clk_init_data clk_cphif_init = {
+ .name = "cphif",
+ .ops = &ios_ops,
+ .parent_names = std_clk_io_parents,
+ .num_parents = ARRAY_SIZE(std_clk_io_parents),
+};
+
+static struct clk_std clk_cphif = {
+ .enable_bit = 20,
+ .hw = {
+ .init = &clk_cphif_init,
+ },
+};
+
static struct clk_init_data clk_dmac0_init = {
.name = "dmac0",
.ops = &ios_ops,
@@ -695,20 +724,6 @@ static struct clk_std clk_dmac1 = {
},
};
-static struct clk_init_data clk_nand_init = {
- .name = "nand",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_nand = {
- .enable_bit = 34,
- .hw = {
- .init = &clk_nand_init,
- },
-};
-
static struct clk_init_data clk_audio_init = {
.name = "audio",
.ops = &ios_ops,
@@ -970,7 +985,7 @@ static const char *std_clk_sys_parents[] = {
};
static struct clk_init_data clk_security_init = {
- .name = "mf",
+ .name = "security",
.ops = &ios_ops,
.parent_names = std_clk_sys_parents,
.num_parents = ARRAY_SIZE(std_clk_sys_parents),
@@ -1014,96 +1029,3 @@ static struct clk_std clk_usb1 = {
.init = &clk_usb1_init,
},
};
-
-enum prima2_clk_index {
- /* 0 1 2 3 4 5 6 7 8 9 */
- rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
- mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
- spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
- usp2, vip, gfx, mm, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
- usb0, usb1, maxclk,
-};
-
-static struct clk_hw *prima2_clk_hw_array[maxclk] __initdata = {
- NULL, /* dummy */
- NULL,
- &clk_pll1.hw,
- &clk_pll2.hw,
- &clk_pll3.hw,
- &clk_mem.hw,
- &clk_sys.hw,
- &clk_security.hw,
- &clk_dsp.hw,
- &clk_gps.hw,
- &clk_mf.hw,
- &clk_io.hw,
- &clk_cpu.hw,
- &clk_uart0.hw,
- &clk_uart1.hw,
- &clk_uart2.hw,
- &clk_tsc.hw,
- &clk_i2c0.hw,
- &clk_i2c1.hw,
- &clk_spi0.hw,
- &clk_spi1.hw,
- &clk_pwmc.hw,
- &clk_efuse.hw,
- &clk_pulse.hw,
- &clk_dmac0.hw,
- &clk_dmac1.hw,
- &clk_nand.hw,
- &clk_audio.hw,
- &clk_usp0.hw,
- &clk_usp1.hw,
- &clk_usp2.hw,
- &clk_vip.hw,
- &clk_gfx.hw,
- &clk_mm.hw,
- &clk_lcd.hw,
- &clk_vpp.hw,
- &clk_mmc01.hw,
- &clk_mmc23.hw,
- &clk_mmc45.hw,
- &usb_pll_clk_hw,
- &clk_usb0.hw,
- &clk_usb1.hw,
-};
-
-static struct clk *prima2_clks[maxclk];
-static struct clk_onecell_data clk_data;
-
-static void __init sirfsoc_clk_init(struct device_node *np)
-{
- struct device_node *rscnp;
- int i;
-
- rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
- sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
- if (!sirfsoc_rsc_vbase)
- panic("unable to map rsc registers\n");
- of_node_put(rscnp);
-
- sirfsoc_clk_vbase = of_iomap(np, 0);
- if (!sirfsoc_clk_vbase)
- panic("unable to map clkc registers\n");
-
- /* These are always available (RTC and 26MHz OSC)*/
- prima2_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL,
- CLK_IS_ROOT, 32768);
- prima2_clks[osc]= clk_register_fixed_rate(NULL, "osc", NULL,
- CLK_IS_ROOT, 26000000);
-
- for (i = pll1; i < maxclk; i++) {
- prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
- BUG_ON(IS_ERR(prima2_clks[i]));
- }
- clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
- clk_register_clkdev(prima2_clks[io], NULL, "io");
- clk_register_clkdev(prima2_clks[mem], NULL, "mem");
-
- clk_data.clks = prima2_clks;
- clk_data.clk_num = maxclk;
-
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-}
-CLK_OF_DECLARE(sirfsoc_clk, "sirf,prima2-clkc", sirfsoc_clk_init);
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
new file mode 100644
index 000000000000..7adc5c70c7ff
--- /dev/null
+++ b/drivers/clk/sirf/clk-prima2.c
@@ -0,0 +1,151 @@
+/*
+ * Clock tree for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#include "prima2.h"
+#include "clk-common.c"
+
+static struct clk_dmn clk_mmc01 = {
+ .regofs = SIRFSOC_CLKC_MMC_CFG,
+ .enable_bit = 59,
+ .hw = {
+ .init = &clk_mmc01_init,
+ },
+};
+
+static struct clk_dmn clk_mmc23 = {
+ .regofs = SIRFSOC_CLKC_MMC_CFG,
+ .enable_bit = 60,
+ .hw = {
+ .init = &clk_mmc23_init,
+ },
+};
+
+static struct clk_dmn clk_mmc45 = {
+ .regofs = SIRFSOC_CLKC_MMC_CFG,
+ .enable_bit = 61,
+ .hw = {
+ .init = &clk_mmc45_init,
+ },
+};
+
+static struct clk_init_data clk_nand_init = {
+ .name = "nand",
+ .ops = &ios_ops,
+ .parent_names = std_clk_io_parents,
+ .num_parents = ARRAY_SIZE(std_clk_io_parents),
+};
+
+static struct clk_std clk_nand = {
+ .enable_bit = 34,
+ .hw = {
+ .init = &clk_nand_init,
+ },
+};
+
+enum prima2_clk_index {
+ /* 0 1 2 3 4 5 6 7 8 9 */
+ rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
+ mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
+ spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
+ usp2, vip, gfx, mm, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
+ usb0, usb1, cphif, maxclk,
+};
+
+static __initdata struct clk_hw *prima2_clk_hw_array[maxclk] = {
+ NULL, /* dummy */
+ NULL,
+ &clk_pll1.hw,
+ &clk_pll2.hw,
+ &clk_pll3.hw,
+ &clk_mem.hw,
+ &clk_sys.hw,
+ &clk_security.hw,
+ &clk_dsp.hw,
+ &clk_gps.hw,
+ &clk_mf.hw,
+ &clk_io.hw,
+ &clk_cpu.hw,
+ &clk_uart0.hw,
+ &clk_uart1.hw,
+ &clk_uart2.hw,
+ &clk_tsc.hw,
+ &clk_i2c0.hw,
+ &clk_i2c1.hw,
+ &clk_spi0.hw,
+ &clk_spi1.hw,
+ &clk_pwmc.hw,
+ &clk_efuse.hw,
+ &clk_pulse.hw,
+ &clk_dmac0.hw,
+ &clk_dmac1.hw,
+ &clk_nand.hw,
+ &clk_audio.hw,
+ &clk_usp0.hw,
+ &clk_usp1.hw,
+ &clk_usp2.hw,
+ &clk_vip.hw,
+ &clk_gfx.hw,
+ &clk_mm.hw,
+ &clk_lcd.hw,
+ &clk_vpp.hw,
+ &clk_mmc01.hw,
+ &clk_mmc23.hw,
+ &clk_mmc45.hw,
+ &usb_pll_clk_hw,
+ &clk_usb0.hw,
+ &clk_usb1.hw,
+ &clk_cphif.hw,
+};
+
+static struct clk *prima2_clks[maxclk];
+
+static void __init prima2_clk_init(struct device_node *np)
+{
+ struct device_node *rscnp;
+ int i;
+
+ rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
+ sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
+ if (!sirfsoc_rsc_vbase)
+ panic("unable to map rsc registers\n");
+ of_node_put(rscnp);
+
+ sirfsoc_clk_vbase = of_iomap(np, 0);
+ if (!sirfsoc_clk_vbase)
+ panic("unable to map clkc registers\n");
+
+ /* These are always available (RTC and 26MHz OSC)*/
+ prima2_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL,
+ CLK_IS_ROOT, 32768);
+ prima2_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL,
+ CLK_IS_ROOT, 26000000);
+
+ for (i = pll1; i < maxclk; i++) {
+ prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
+ BUG_ON(!prima2_clks[i]);
+ }
+ clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
+ clk_register_clkdev(prima2_clks[io], NULL, "io");
+ clk_register_clkdev(prima2_clks[mem], NULL, "mem");
+ clk_register_clkdev(prima2_clks[mem], NULL, "osc");
+
+ clk_data.clks = prima2_clks;
+ clk_data.clk_num = maxclk;
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+CLK_OF_DECLARE(prima2_clk, "sirf,prima2-clkc", prima2_clk_init);
diff --git a/drivers/clk/sirf/prima2.h b/drivers/clk/sirf/prima2.h
new file mode 100644
index 000000000000..01bc3854a058
--- /dev/null
+++ b/drivers/clk/sirf/prima2.h
@@ -0,0 +1,25 @@
+#define SIRFSOC_CLKC_CLK_EN0 0x0000
+#define SIRFSOC_CLKC_CLK_EN1 0x0004
+#define SIRFSOC_CLKC_REF_CFG 0x0014
+#define SIRFSOC_CLKC_CPU_CFG 0x0018
+#define SIRFSOC_CLKC_MEM_CFG 0x001c
+#define SIRFSOC_CLKC_SYS_CFG 0x0020
+#define SIRFSOC_CLKC_IO_CFG 0x0024
+#define SIRFSOC_CLKC_DSP_CFG 0x0028
+#define SIRFSOC_CLKC_GFX_CFG 0x002c
+#define SIRFSOC_CLKC_MM_CFG 0x0030
+#define SIRFSOC_CLKC_LCD_CFG 0x0034
+#define SIRFSOC_CLKC_MMC_CFG 0x0038
+#define SIRFSOC_CLKC_PLL1_CFG0 0x0040
+#define SIRFSOC_CLKC_PLL2_CFG0 0x0044
+#define SIRFSOC_CLKC_PLL3_CFG0 0x0048
+#define SIRFSOC_CLKC_PLL1_CFG1 0x004c
+#define SIRFSOC_CLKC_PLL2_CFG1 0x0050
+#define SIRFSOC_CLKC_PLL3_CFG1 0x0054
+#define SIRFSOC_CLKC_PLL1_CFG2 0x0058
+#define SIRFSOC_CLKC_PLL2_CFG2 0x005c
+#define SIRFSOC_CLKC_PLL3_CFG2 0x0060
+#define SIRFSOC_USBPHY_PLL_CTRL 0x0008
+#define SIRFSOC_USBPHY_PLL_POWERDOWN BIT(1)
+#define SIRFSOC_USBPHY_PLL_BYPASS BIT(2)
+#define SIRFSOC_USBPHY_PLL_LOCK BIT(3)
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 81dd31a686df..5983a26a8c5f 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -121,9 +121,7 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
int rc;
u32 fixed_div;
- rc = of_property_read_u32(node, "reg", &reg);
- if (WARN_ON(rc))
- return NULL;
+ of_property_read_u32(node, "reg", &reg);
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
if (WARN_ON(!socfpga_clk))
@@ -292,7 +290,7 @@ static void __init socfpga_gate_clk_init(struct device_node *node,
socfpga_clk->shift = div_reg[1];
socfpga_clk->width = div_reg[2];
} else {
- socfpga_clk->div_reg = 0;
+ socfpga_clk->div_reg = NULL;
}
of_property_read_string(node, "clock-output-names", &clk_name);
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 958aa3ad1d60..dffd4ce6c8b5 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -116,7 +116,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-struct clk_ops clk_frac_ops = {
+static struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
.round_rate = clk_frac_round_rate,
.set_rate = clk_frac_set_rate,
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index 88523f91d9b7..9e232644f07e 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -30,17 +30,9 @@
* parent - fixed parent. No clk_set_parent support
*/
-struct clk_factors {
- struct clk_hw hw;
- void __iomem *reg;
- struct clk_factors_config *config;
- void (*get_factors) (u32 *rate, u32 parent, u8 *n, u8 *k, u8 *m, u8 *p);
- spinlock_t *lock;
-};
-
#define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
-#define SETMASK(len, pos) (((-1U) >> (31-len)) << (pos))
+#define SETMASK(len, pos) (((1U << (len)) - 1) << (pos))
#define CLRMASK(len, pos) (~(SETMASK(len, pos)))
#define FACTOR_GET(bit, len, reg) (((reg) & SETMASK(len, bit)) >> (bit))
@@ -88,7 +80,7 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u8 n, k, m, p;
+ u8 n = 0, k = 0, m = 0, p = 0;
u32 reg;
struct clk_factors *factors = to_clk_factors(hw);
struct clk_factors_config *config = factors->config;
@@ -120,61 +112,8 @@ static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static const struct clk_ops clk_factors_ops = {
+const struct clk_ops clk_factors_ops = {
.recalc_rate = clk_factors_recalc_rate,
.round_rate = clk_factors_round_rate,
.set_rate = clk_factors_set_rate,
};
-
-/**
- * clk_register_factors - register a factors clock with
- * the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust factors
- * @config: shift and width of factors n, k, m and p
- * @get_factors: function to calculate the factors for a given frequency
- * @lock: shared register lock for this clock
- */
-struct clk *clk_register_factors(struct device *dev, const char *name,
- const char *parent_name,
- unsigned long flags, void __iomem *reg,
- struct clk_factors_config *config,
- void (*get_factors)(u32 *rate, u32 parent,
- u8 *n, u8 *k, u8 *m, u8 *p),
- spinlock_t *lock)
-{
- struct clk_factors *factors;
- struct clk *clk;
- struct clk_init_data init;
-
- /* allocate the factors */
- factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
- if (!factors) {
- pr_err("%s: could not allocate factors clk\n", __func__);
- return ERR_PTR(-ENOMEM);
- }
-
- init.name = name;
- init.ops = &clk_factors_ops;
- init.flags = flags;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
-
- /* struct clk_factors assignments */
- factors->reg = reg;
- factors->config = config;
- factors->lock = lock;
- factors->hw.init = &init;
- factors->get_factors = get_factors;
-
- /* register the clock */
- clk = clk_register(dev, &factors->hw);
-
- if (IS_ERR(clk))
- kfree(factors);
-
- return clk;
-}
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index f49851cc4380..02e1a43ebac7 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -17,11 +17,13 @@ struct clk_factors_config {
u8 pwidth;
};
-struct clk *clk_register_factors(struct device *dev, const char *name,
- const char *parent_name,
- unsigned long flags, void __iomem *reg,
- struct clk_factors_config *config,
- void (*get_factors) (u32 *rate, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p),
- spinlock_t *lock);
+struct clk_factors {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct clk_factors_config *config;
+ void (*get_factors) (u32 *rate, u32 parent, u8 *n, u8 *k, u8 *m, u8 *p);
+ spinlock_t *lock;
+};
+
+extern const struct clk_ops clk_factors_ops;
#endif
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 9bbd03514540..abb6c5ac8a10 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -23,6 +23,9 @@
static DEFINE_SPINLOCK(clk_lock);
+/* Maximum number of parents our clocks have */
+#define SUNXI_MAX_PARENTS 5
+
/**
* sun4i_osc_clk_setup() - Setup function for gatable oscillator
*/
@@ -37,18 +40,16 @@ static void __init sun4i_osc_clk_setup(struct device_node *node)
const char *clk_name = node->name;
u32 rate;
+ if (of_property_read_u32(node, "clock-frequency", &rate))
+ return;
+
/* allocate fixed-rate and gate clock structs */
fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
if (!fixed)
return;
gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
- if (!gate) {
- kfree(fixed);
- return;
- }
-
- if (of_property_read_u32(node, "clock-frequency", &rate))
- return;
+ if (!gate)
+ goto err_free_fixed;
/* set up gate and fixed rate properties */
gate->reg = of_iomap(node, 0);
@@ -63,10 +64,18 @@ static void __init sun4i_osc_clk_setup(struct device_node *node)
&gate->hw, &clk_gate_ops,
CLK_IS_ROOT);
- if (!IS_ERR(clk)) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
- }
+ if (IS_ERR(clk))
+ goto err_free_gate;
+
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+
+ return;
+
+err_free_gate:
+ kfree(gate);
+err_free_fixed:
+ kfree(fixed);
}
CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-osc-clk", sun4i_osc_clk_setup);
@@ -209,6 +218,40 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
}
/**
+ * sun4i_get_pll5_factors() - calculates n, k factors for PLL5
+ * PLL5 rate is calculated as follows
+ * rate = parent_rate * n * (k + 1)
+ * parent_rate is always 24Mhz
+ */
+
+static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u8 div;
+
+ /* Normalize value to a parent_rate multiple (24M) */
+ div = *freq / parent_rate;
+ *freq = parent_rate * div;
+
+ /* we were called to round the frequency, we can now return */
+ if (n == NULL)
+ return;
+
+ if (div < 31)
+ *k = 0;
+ else if (div / 2 < 31)
+ *k = 1;
+ else if (div / 3 < 31)
+ *k = 2;
+ else
+ *k = 3;
+
+ *n = DIV_ROUND_UP(div, (*k+1));
+}
+
+
+
+/**
* sun4i_get_apb1_factors() - calculates m, p factors for APB1
* APB1 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
@@ -252,10 +295,96 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
/**
+ * sun4i_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
+ * MMC rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+
+static void sun4i_get_mod0_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u8 div, calcm, calcp;
+
+ /* These clocks can only divide, so we will never be able to achieve
+ * frequencies higher than the parent frequency */
+ if (*freq > parent_rate)
+ *freq = parent_rate;
+
+ div = parent_rate / *freq;
+
+ if (div < 16)
+ calcp = 0;
+ else if (div / 2 < 16)
+ calcp = 1;
+ else if (div / 4 < 16)
+ calcp = 2;
+ else
+ calcp = 3;
+
+ calcm = DIV_ROUND_UP(div, 1 << calcp);
+
+ *freq = (parent_rate >> calcp) / calcm;
+
+ /* we were called to round the frequency, we can now return */
+ if (n == NULL)
+ return;
+
+ *m = calcm - 1;
+ *p = calcp;
+}
+
+
+
+/**
+ * sun7i_a20_get_out_factors() - calculates m, p factors for CLK_OUT_A/B
+ * CLK_OUT rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+
+static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u8 div, calcm, calcp;
+
+ /* These clocks can only divide, so we will never be able to achieve
+ * frequencies higher than the parent frequency */
+ if (*freq > parent_rate)
+ *freq = parent_rate;
+
+ div = parent_rate / *freq;
+
+ if (div < 32)
+ calcp = 0;
+ else if (div / 2 < 32)
+ calcp = 1;
+ else if (div / 4 < 32)
+ calcp = 2;
+ else
+ calcp = 3;
+
+ calcm = DIV_ROUND_UP(div, 1 << calcp);
+
+ *freq = (parent_rate >> calcp) / calcm;
+
+ /* we were called to round the frequency, we can now return */
+ if (n == NULL)
+ return;
+
+ *m = calcm - 1;
+ *p = calcp;
+}
+
+
+
+/**
* sunxi_factors_clk_setup() - Setup function for factor clocks
*/
+#define SUNXI_FACTORS_MUX_MASK 0x3
+
struct factors_data {
+ int enable;
+ int mux;
struct clk_factors_config *table;
void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
};
@@ -280,6 +409,13 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
.mwidth = 2,
};
+static struct clk_factors_config sun4i_pll5_config = {
+ .nshift = 8,
+ .nwidth = 5,
+ .kshift = 4,
+ .kwidth = 2,
+};
+
static struct clk_factors_config sun4i_apb1_config = {
.mshift = 0,
.mwidth = 5,
@@ -287,40 +423,143 @@ static struct clk_factors_config sun4i_apb1_config = {
.pwidth = 2,
};
+/* user manual says "n" but it's really "p" */
+static struct clk_factors_config sun4i_mod0_config = {
+ .mshift = 0,
+ .mwidth = 4,
+ .pshift = 16,
+ .pwidth = 2,
+};
+
+/* user manual says "n" but it's really "p" */
+static struct clk_factors_config sun7i_a20_out_config = {
+ .mshift = 8,
+ .mwidth = 5,
+ .pshift = 20,
+ .pwidth = 2,
+};
+
static const struct factors_data sun4i_pll1_data __initconst = {
+ .enable = 31,
.table = &sun4i_pll1_config,
.getter = sun4i_get_pll1_factors,
};
static const struct factors_data sun6i_a31_pll1_data __initconst = {
+ .enable = 31,
.table = &sun6i_a31_pll1_config,
.getter = sun6i_a31_get_pll1_factors,
};
+static const struct factors_data sun4i_pll5_data __initconst = {
+ .enable = 31,
+ .table = &sun4i_pll5_config,
+ .getter = sun4i_get_pll5_factors,
+};
+
static const struct factors_data sun4i_apb1_data __initconst = {
.table = &sun4i_apb1_config,
.getter = sun4i_get_apb1_factors,
};
-static void __init sunxi_factors_clk_setup(struct device_node *node,
- struct factors_data *data)
+static const struct factors_data sun4i_mod0_data __initconst = {
+ .enable = 31,
+ .mux = 24,
+ .table = &sun4i_mod0_config,
+ .getter = sun4i_get_mod0_factors,
+};
+
+static const struct factors_data sun7i_a20_out_data __initconst = {
+ .enable = 31,
+ .mux = 24,
+ .table = &sun7i_a20_out_config,
+ .getter = sun7i_a20_get_out_factors,
+};
+
+static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
+ const struct factors_data *data)
{
struct clk *clk;
+ struct clk_factors *factors;
+ struct clk_gate *gate = NULL;
+ struct clk_mux *mux = NULL;
+ struct clk_hw *gate_hw = NULL;
+ struct clk_hw *mux_hw = NULL;
const char *clk_name = node->name;
- const char *parent;
+ const char *parents[SUNXI_MAX_PARENTS];
void *reg;
+ int i = 0;
reg = of_iomap(node, 0);
- parent = of_clk_get_parent_name(node, 0);
+ /* if we have a mux, we will have >1 parents */
+ while (i < SUNXI_MAX_PARENTS &&
+ (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
+ i++;
+
+ /* Nodes should be providing the name via clock-output-names
+ * but originally our dts didn't, and so we used node->name.
+ * The new, better nodes look like clk@deadbeef, so we pull the
+ * name just in this case */
+ if (!strcmp("clk", clk_name)) {
+ of_property_read_string_index(node, "clock-output-names",
+ 0, &clk_name);
+ }
+
+ factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
+ if (!factors)
+ return NULL;
+
+ /* Add a gate if this factor clock can be gated */
+ if (data->enable) {
+ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+ if (!gate) {
+ kfree(factors);
+ return NULL;
+ }
+
+ /* set up gate properties */
+ gate->reg = reg;
+ gate->bit_idx = data->enable;
+ gate->lock = &clk_lock;
+ gate_hw = &gate->hw;
+ }
+
+ /* Add a mux if this factor clock can be muxed */
+ if (data->mux) {
+ mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+ if (!mux) {
+ kfree(factors);
+ kfree(gate);
+ return NULL;
+ }
+
+ /* set up gate properties */
+ mux->reg = reg;
+ mux->shift = data->mux;
+ mux->mask = SUNXI_FACTORS_MUX_MASK;
+ mux->lock = &clk_lock;
+ mux_hw = &mux->hw;
+ }
- clk = clk_register_factors(NULL, clk_name, parent, 0, reg,
- data->table, data->getter, &clk_lock);
+ /* set up factors properties */
+ factors->reg = reg;
+ factors->config = data->table;
+ factors->get_factors = data->getter;
+ factors->lock = &clk_lock;
+
+ clk = clk_register_composite(NULL, clk_name,
+ parents, i,
+ mux_hw, &clk_mux_ops,
+ &factors->hw, &clk_factors_ops,
+ gate_hw, &clk_gate_ops, 0);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
+
+ return clk;
}
@@ -352,13 +591,14 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
{
struct clk *clk;
const char *clk_name = node->name;
- const char *parents[5];
+ const char *parents[SUNXI_MAX_PARENTS];
void *reg;
int i = 0;
reg = of_iomap(node, 0);
- while (i < 5 && (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
+ while (i < SUNXI_MAX_PARENTS &&
+ (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
i++;
clk = clk_register_mux(NULL, clk_name, parents, i,
@@ -555,11 +795,186 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
+
+
+/**
+ * sunxi_divs_clk_setup() helper data
+ */
+
+#define SUNXI_DIVS_MAX_QTY 2
+#define SUNXI_DIVISOR_WIDTH 2
+
+struct divs_data {
+ const struct factors_data *factors; /* data for the factor clock */
+ struct {
+ u8 fixed; /* is it a fixed divisor? if not... */
+ struct clk_div_table *table; /* is it a table based divisor? */
+ u8 shift; /* otherwise it's a normal divisor with this shift */
+ u8 pow; /* is it power-of-two based? */
+ u8 gate; /* is it independently gateable? */
+ } div[SUNXI_DIVS_MAX_QTY];
+};
+
+static struct clk_div_table pll6_sata_tbl[] = {
+ { .val = 0, .div = 6, },
+ { .val = 1, .div = 12, },
+ { .val = 2, .div = 18, },
+ { .val = 3, .div = 24, },
+ { } /* sentinel */
+};
+
+static const struct divs_data pll5_divs_data __initconst = {
+ .factors = &sun4i_pll5_data,
+ .div = {
+ { .shift = 0, .pow = 0, }, /* M, DDR */
+ { .shift = 16, .pow = 1, }, /* P, other */
+ }
+};
+
+static const struct divs_data pll6_divs_data __initconst = {
+ .factors = &sun4i_pll5_data,
+ .div = {
+ { .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */
+ { .fixed = 2 }, /* P, other */
+ }
+};
+
+/**
+ * sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks
+ *
+ * These clocks look something like this
+ * ________________________
+ * | ___divisor 1---|----> to consumer
+ * parent >--| pll___/___divisor 2---|----> to consumer
+ * | \_______________|____> to consumer
+ * |________________________|
+ */
+
+static void __init sunxi_divs_clk_setup(struct device_node *node,
+ struct divs_data *data)
+{
+ struct clk_onecell_data *clk_data;
+ const char *parent = node->name;
+ const char *clk_name;
+ struct clk **clks, *pclk;
+ struct clk_hw *gate_hw, *rate_hw;
+ const struct clk_ops *rate_ops;
+ struct clk_gate *gate = NULL;
+ struct clk_fixed_factor *fix_factor;
+ struct clk_divider *divider;
+ void *reg;
+ int i = 0;
+ int flags, clkflags;
+
+ /* Set up factor clock that we will be dividing */
+ pclk = sunxi_factors_clk_setup(node, data->factors);
+
+ reg = of_iomap(node, 0);
+
+ clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clks = kzalloc((SUNXI_DIVS_MAX_QTY+1) * sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ goto free_clkdata;
+
+ clk_data->clks = clks;
+
+ /* It's not a good idea to have automatic reparenting changing
+ * our RAM clock! */
+ clkflags = !strcmp("pll5", parent) ? 0 : CLK_SET_RATE_PARENT;
+
+ for (i = 0; i < SUNXI_DIVS_MAX_QTY; i++) {
+ if (of_property_read_string_index(node, "clock-output-names",
+ i, &clk_name) != 0)
+ break;
+
+ gate_hw = NULL;
+ rate_hw = NULL;
+ rate_ops = NULL;
+
+ /* If this leaf clock can be gated, create a gate */
+ if (data->div[i].gate) {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto free_clks;
+
+ gate->reg = reg;
+ gate->bit_idx = data->div[i].gate;
+ gate->lock = &clk_lock;
+
+ gate_hw = &gate->hw;
+ }
+
+ /* Leaves can be fixed or configurable divisors */
+ if (data->div[i].fixed) {
+ fix_factor = kzalloc(sizeof(*fix_factor), GFP_KERNEL);
+ if (!fix_factor)
+ goto free_gate;
+
+ fix_factor->mult = 1;
+ fix_factor->div = data->div[i].fixed;
+
+ rate_hw = &fix_factor->hw;
+ rate_ops = &clk_fixed_factor_ops;
+ } else {
+ divider = kzalloc(sizeof(*divider), GFP_KERNEL);
+ if (!divider)
+ goto free_gate;
+
+ flags = data->div[i].pow ? CLK_DIVIDER_POWER_OF_TWO : 0;
+
+ divider->reg = reg;
+ divider->shift = data->div[i].shift;
+ divider->width = SUNXI_DIVISOR_WIDTH;
+ divider->flags = flags;
+ divider->lock = &clk_lock;
+ divider->table = data->div[i].table;
+
+ rate_hw = &divider->hw;
+ rate_ops = &clk_divider_ops;
+ }
+
+ /* Wrap the (potential) gate and the divisor on a composite
+ * clock to unify them */
+ clks[i] = clk_register_composite(NULL, clk_name, &parent, 1,
+ NULL, NULL,
+ rate_hw, rate_ops,
+ gate_hw, &clk_gate_ops,
+ clkflags);
+
+ WARN_ON(IS_ERR(clk_data->clks[i]));
+ clk_register_clkdev(clks[i], clk_name, NULL);
+ }
+
+ /* The last clock available on the getter is the parent */
+ clks[i++] = pclk;
+
+ /* Adjust to the real max */
+ clk_data->clk_num = i;
+
+ of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ return;
+
+free_gate:
+ kfree(gate);
+free_clks:
+ kfree(clks);
+free_clkdata:
+ kfree(clk_data);
+}
+
+
+
/* Matches for factors clocks */
static const struct of_device_id clk_factors_match[] __initconst = {
{.compatible = "allwinner,sun4i-pll1-clk", .data = &sun4i_pll1_data,},
{.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
{.compatible = "allwinner,sun4i-apb1-clk", .data = &sun4i_apb1_data,},
+ {.compatible = "allwinner,sun4i-mod0-clk", .data = &sun4i_mod0_data,},
+ {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
{}
};
@@ -572,6 +987,13 @@ static const struct of_device_id clk_div_match[] __initconst = {
{}
};
+/* Matches for divided outputs */
+static const struct of_device_id clk_divs_match[] __initconst = {
+ {.compatible = "allwinner,sun4i-pll5-clk", .data = &pll5_divs_data,},
+ {.compatible = "allwinner,sun4i-pll6-clk", .data = &pll6_divs_data,},
+ {}
+};
+
/* Matches for mux clocks */
static const struct of_device_id clk_mux_match[] __initconst = {
{.compatible = "allwinner,sun4i-cpu-clk", .data = &sun4i_cpu_mux_data,},
@@ -616,7 +1038,32 @@ static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_mat
}
}
-static void __init sunxi_init_clocks(struct device_node *np)
+/**
+ * System clock protection
+ *
+ * By enabling these critical clocks, we prevent their accidental gating
+ * by the framework
+ */
+static void __init sunxi_clock_protect(void)
+{
+ struct clk *clk;
+
+ /* memory bus clock - sun5i+ */
+ clk = clk_get(NULL, "mbus");
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+
+ /* DDR clock - sun4i+ */
+ clk = clk_get(NULL, "pll5_ddr");
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+}
+
+static void __init sunxi_init_clocks(void)
{
/* Register factor clocks */
of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
@@ -624,11 +1071,17 @@ static void __init sunxi_init_clocks(struct device_node *np)
/* Register divider clocks */
of_sunxi_table_clock_setup(clk_div_match, sunxi_divider_clk_setup);
+ /* Register divided output clocks */
+ of_sunxi_table_clock_setup(clk_divs_match, sunxi_divs_clk_setup);
+
/* Register mux clocks */
of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup);
/* Register gate clocks */
of_sunxi_table_clock_setup(clk_gates_match, sunxi_gates_clk_setup);
+
+ /* Enable core system clocks */
+ sunxi_clock_protect();
}
CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sunxi_init_clocks);
CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sunxi_init_clocks);
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index f49fac2d193a..f7dfb72884a4 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -6,7 +6,12 @@ obj-y += clk-periph-gate.o
obj-y += clk-pll.o
obj-y += clk-pll-out.o
obj-y += clk-super.o
-
+obj-y += clk-tegra-audio.o
+obj-y += clk-tegra-periph.o
+obj-y += clk-tegra-pmc.o
+obj-y += clk-tegra-fixed.o
+obj-y += clk-tegra-super-gen4.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
+obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 4d75b1f37e3a..290f9c1a3749 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -59,7 +59,7 @@ static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
return 0;
if (divider_ux1 > get_max_div(divider))
- return -EINVAL;
+ return get_max_div(divider);
return divider_ux1;
}
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
new file mode 100644
index 000000000000..c39613c519af
--- /dev/null
+++ b/drivers/clk/tegra/clk-id.h
@@ -0,0 +1,239 @@
+/*
+ * This header provides IDs for clocks common between several Tegra SoCs
+ */
+#ifndef _TEGRA_CLK_ID_H
+#define _TEGRA_CLK_ID_H
+
+enum clk_id {
+ tegra_clk_actmon,
+ tegra_clk_adx,
+ tegra_clk_adx1,
+ tegra_clk_afi,
+ tegra_clk_amx,
+ tegra_clk_amx1,
+ tegra_clk_apbdma,
+ tegra_clk_apbif,
+ tegra_clk_audio0,
+ tegra_clk_audio0_2x,
+ tegra_clk_audio0_mux,
+ tegra_clk_audio1,
+ tegra_clk_audio1_2x,
+ tegra_clk_audio1_mux,
+ tegra_clk_audio2,
+ tegra_clk_audio2_2x,
+ tegra_clk_audio2_mux,
+ tegra_clk_audio3,
+ tegra_clk_audio3_2x,
+ tegra_clk_audio3_mux,
+ tegra_clk_audio4,
+ tegra_clk_audio4_2x,
+ tegra_clk_audio4_mux,
+ tegra_clk_blink,
+ tegra_clk_bsea,
+ tegra_clk_bsev,
+ tegra_clk_cclk_g,
+ tegra_clk_cclk_lp,
+ tegra_clk_cilab,
+ tegra_clk_cilcd,
+ tegra_clk_cile,
+ tegra_clk_clk_32k,
+ tegra_clk_clk72Mhz,
+ tegra_clk_clk_m,
+ tegra_clk_clk_m_div2,
+ tegra_clk_clk_m_div4,
+ tegra_clk_clk_out_1,
+ tegra_clk_clk_out_1_mux,
+ tegra_clk_clk_out_2,
+ tegra_clk_clk_out_2_mux,
+ tegra_clk_clk_out_3,
+ tegra_clk_clk_out_3_mux,
+ tegra_clk_cml0,
+ tegra_clk_cml1,
+ tegra_clk_csi,
+ tegra_clk_csite,
+ tegra_clk_csus,
+ tegra_clk_cve,
+ tegra_clk_dam0,
+ tegra_clk_dam1,
+ tegra_clk_dam2,
+ tegra_clk_d_audio,
+ tegra_clk_dds,
+ tegra_clk_dfll_ref,
+ tegra_clk_dfll_soc,
+ tegra_clk_disp1,
+ tegra_clk_disp2,
+ tegra_clk_dp2,
+ tegra_clk_dpaux,
+ tegra_clk_dsia,
+ tegra_clk_dsialp,
+ tegra_clk_dsia_mux,
+ tegra_clk_dsib,
+ tegra_clk_dsiblp,
+ tegra_clk_dsib_mux,
+ tegra_clk_dtv,
+ tegra_clk_emc,
+ tegra_clk_entropy,
+ tegra_clk_epp,
+ tegra_clk_epp_8,
+ tegra_clk_extern1,
+ tegra_clk_extern2,
+ tegra_clk_extern3,
+ tegra_clk_fuse,
+ tegra_clk_fuse_burn,
+ tegra_clk_gpu,
+ tegra_clk_gr2d,
+ tegra_clk_gr2d_8,
+ tegra_clk_gr3d,
+ tegra_clk_gr3d_8,
+ tegra_clk_hclk,
+ tegra_clk_hda,
+ tegra_clk_hda2codec_2x,
+ tegra_clk_hda2hdmi,
+ tegra_clk_hdmi,
+ tegra_clk_hdmi_audio,
+ tegra_clk_host1x,
+ tegra_clk_host1x_8,
+ tegra_clk_i2c1,
+ tegra_clk_i2c2,
+ tegra_clk_i2c3,
+ tegra_clk_i2c4,
+ tegra_clk_i2c5,
+ tegra_clk_i2c6,
+ tegra_clk_i2cslow,
+ tegra_clk_i2s0,
+ tegra_clk_i2s0_sync,
+ tegra_clk_i2s1,
+ tegra_clk_i2s1_sync,
+ tegra_clk_i2s2,
+ tegra_clk_i2s2_sync,
+ tegra_clk_i2s3,
+ tegra_clk_i2s3_sync,
+ tegra_clk_i2s4,
+ tegra_clk_i2s4_sync,
+ tegra_clk_isp,
+ tegra_clk_isp_8,
+ tegra_clk_ispb,
+ tegra_clk_kbc,
+ tegra_clk_kfuse,
+ tegra_clk_la,
+ tegra_clk_mipi,
+ tegra_clk_mipi_cal,
+ tegra_clk_mpe,
+ tegra_clk_mselect,
+ tegra_clk_msenc,
+ tegra_clk_ndflash,
+ tegra_clk_ndflash_8,
+ tegra_clk_ndspeed,
+ tegra_clk_ndspeed_8,
+ tegra_clk_nor,
+ tegra_clk_owr,
+ tegra_clk_pcie,
+ tegra_clk_pclk,
+ tegra_clk_pll_a,
+ tegra_clk_pll_a_out0,
+ tegra_clk_pll_c,
+ tegra_clk_pll_c2,
+ tegra_clk_pll_c3,
+ tegra_clk_pll_c4,
+ tegra_clk_pll_c_out1,
+ tegra_clk_pll_d,
+ tegra_clk_pll_d2,
+ tegra_clk_pll_d2_out0,
+ tegra_clk_pll_d_out0,
+ tegra_clk_pll_dp,
+ tegra_clk_pll_e_out0,
+ tegra_clk_pll_m,
+ tegra_clk_pll_m_out1,
+ tegra_clk_pll_p,
+ tegra_clk_pll_p_out1,
+ tegra_clk_pll_p_out2,
+ tegra_clk_pll_p_out2_int,
+ tegra_clk_pll_p_out3,
+ tegra_clk_pll_p_out4,
+ tegra_clk_pll_p_out5,
+ tegra_clk_pll_ref,
+ tegra_clk_pll_re_out,
+ tegra_clk_pll_re_vco,
+ tegra_clk_pll_u,
+ tegra_clk_pll_u_12m,
+ tegra_clk_pll_u_480m,
+ tegra_clk_pll_u_48m,
+ tegra_clk_pll_u_60m,
+ tegra_clk_pll_x,
+ tegra_clk_pll_x_out0,
+ tegra_clk_pwm,
+ tegra_clk_rtc,
+ tegra_clk_sata,
+ tegra_clk_sata_cold,
+ tegra_clk_sata_oob,
+ tegra_clk_sbc1,
+ tegra_clk_sbc1_8,
+ tegra_clk_sbc2,
+ tegra_clk_sbc2_8,
+ tegra_clk_sbc3,
+ tegra_clk_sbc3_8,
+ tegra_clk_sbc4,
+ tegra_clk_sbc4_8,
+ tegra_clk_sbc5,
+ tegra_clk_sbc5_8,
+ tegra_clk_sbc6,
+ tegra_clk_sbc6_8,
+ tegra_clk_sclk,
+ tegra_clk_sdmmc1,
+ tegra_clk_sdmmc1_8,
+ tegra_clk_sdmmc2,
+ tegra_clk_sdmmc2_8,
+ tegra_clk_sdmmc3,
+ tegra_clk_sdmmc3_8,
+ tegra_clk_sdmmc4,
+ tegra_clk_sdmmc4_8,
+ tegra_clk_se,
+ tegra_clk_soc_therm,
+ tegra_clk_sor0,
+ tegra_clk_sor0_lvds,
+ tegra_clk_spdif,
+ tegra_clk_spdif_2x,
+ tegra_clk_spdif_in,
+ tegra_clk_spdif_in_sync,
+ tegra_clk_spdif_mux,
+ tegra_clk_spdif_out,
+ tegra_clk_timer,
+ tegra_clk_trace,
+ tegra_clk_tsec,
+ tegra_clk_tsensor,
+ tegra_clk_tvdac,
+ tegra_clk_tvo,
+ tegra_clk_uarta,
+ tegra_clk_uartb,
+ tegra_clk_uartc,
+ tegra_clk_uartd,
+ tegra_clk_uarte,
+ tegra_clk_usb2,
+ tegra_clk_usb3,
+ tegra_clk_usbd,
+ tegra_clk_vcp,
+ tegra_clk_vde,
+ tegra_clk_vde_8,
+ tegra_clk_vfir,
+ tegra_clk_vi,
+ tegra_clk_vi_8,
+ tegra_clk_vi_9,
+ tegra_clk_vic03,
+ tegra_clk_vim2_clk,
+ tegra_clk_vimclk_sync,
+ tegra_clk_vi_sensor,
+ tegra_clk_vi_sensor2,
+ tegra_clk_vi_sensor_8,
+ tegra_clk_xusb_dev,
+ tegra_clk_xusb_dev_src,
+ tegra_clk_xusb_falcon_src,
+ tegra_clk_xusb_fs_src,
+ tegra_clk_xusb_host,
+ tegra_clk_xusb_host_src,
+ tegra_clk_xusb_hs_src,
+ tegra_clk_xusb_ss,
+ tegra_clk_xusb_ss_src,
+ tegra_clk_max,
+};
+
+#endif /* _TEGRA_CLK_ID_H */
diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
index bafee9895a24..507015314827 100644
--- a/drivers/clk/tegra/clk-periph-gate.c
+++ b/drivers/clk/tegra/clk-periph-gate.c
@@ -36,8 +36,6 @@ static DEFINE_SPINLOCK(periph_ref_lock);
#define read_rst(gate) \
readl_relaxed(gate->clk_base + (gate->regs->rst_reg))
-#define write_rst_set(val, gate) \
- writel_relaxed(val, gate->clk_base + (gate->regs->rst_set_reg))
#define write_rst_clr(val, gate) \
writel_relaxed(val, gate->clk_base + (gate->regs->rst_clr_reg))
@@ -123,26 +121,6 @@ static void clk_periph_disable(struct clk_hw *hw)
spin_unlock_irqrestore(&periph_ref_lock, flags);
}
-void tegra_periph_reset(struct tegra_clk_periph_gate *gate, bool assert)
-{
- if (gate->flags & TEGRA_PERIPH_NO_RESET)
- return;
-
- if (assert) {
- /*
- * If peripheral is in the APB bus then read the APB bus to
- * flush the write operation in apb bus. This will avoid the
- * peripheral access after disabling clock
- */
- if (gate->flags & TEGRA_PERIPH_ON_APB)
- tegra_read_chipid();
-
- write_rst_set(periph_clk_to_bit(gate), gate);
- } else {
- write_rst_clr(periph_clk_to_bit(gate), gate);
- }
-}
-
const struct clk_ops tegra_clk_periph_gate_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
@@ -151,12 +129,16 @@ const struct clk_ops tegra_clk_periph_gate_ops = {
struct clk *tegra_clk_register_periph_gate(const char *name,
const char *parent_name, u8 gate_flags, void __iomem *clk_base,
- unsigned long flags, int clk_num,
- struct tegra_clk_periph_regs *pregs, int *enable_refcnt)
+ unsigned long flags, int clk_num, int *enable_refcnt)
{
struct tegra_clk_periph_gate *gate;
struct clk *clk;
struct clk_init_data init;
+ struct tegra_clk_periph_regs *pregs;
+
+ pregs = get_reg_bank(clk_num);
+ if (!pregs)
+ return ERR_PTR(-EINVAL);
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate) {
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index b2309d37a963..356e9b804421 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -111,46 +111,6 @@ static void clk_periph_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw);
}
-void tegra_periph_reset_deassert(struct clk *c)
-{
- struct clk_hw *hw = __clk_get_hw(c);
- struct tegra_clk_periph *periph = to_clk_periph(hw);
- struct tegra_clk_periph_gate *gate;
-
- if (periph->magic != TEGRA_CLK_PERIPH_MAGIC) {
- gate = to_clk_periph_gate(hw);
- if (gate->magic != TEGRA_CLK_PERIPH_GATE_MAGIC) {
- WARN_ON(1);
- return;
- }
- } else {
- gate = &periph->gate;
- }
-
- tegra_periph_reset(gate, 0);
-}
-EXPORT_SYMBOL(tegra_periph_reset_deassert);
-
-void tegra_periph_reset_assert(struct clk *c)
-{
- struct clk_hw *hw = __clk_get_hw(c);
- struct tegra_clk_periph *periph = to_clk_periph(hw);
- struct tegra_clk_periph_gate *gate;
-
- if (periph->magic != TEGRA_CLK_PERIPH_MAGIC) {
- gate = to_clk_periph_gate(hw);
- if (gate->magic != TEGRA_CLK_PERIPH_GATE_MAGIC) {
- WARN_ON(1);
- return;
- }
- } else {
- gate = &periph->gate;
- }
-
- tegra_periph_reset(gate, 1);
-}
-EXPORT_SYMBOL(tegra_periph_reset_assert);
-
const struct clk_ops tegra_clk_periph_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
@@ -162,7 +122,7 @@ const struct clk_ops tegra_clk_periph_ops = {
.disable = clk_periph_disable,
};
-const struct clk_ops tegra_clk_periph_nodiv_ops = {
+static const struct clk_ops tegra_clk_periph_nodiv_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
.is_enabled = clk_periph_is_enabled,
@@ -170,27 +130,50 @@ const struct clk_ops tegra_clk_periph_nodiv_ops = {
.disable = clk_periph_disable,
};
+const struct clk_ops tegra_clk_periph_no_gate_ops = {
+ .get_parent = clk_periph_get_parent,
+ .set_parent = clk_periph_set_parent,
+ .recalc_rate = clk_periph_recalc_rate,
+ .round_rate = clk_periph_round_rate,
+ .set_rate = clk_periph_set_rate,
+};
+
static struct clk *_tegra_clk_register_periph(const char *name,
const char **parent_names, int num_parents,
struct tegra_clk_periph *periph,
- void __iomem *clk_base, u32 offset, bool div,
+ void __iomem *clk_base, u32 offset,
unsigned long flags)
{
struct clk *clk;
struct clk_init_data init;
+ struct tegra_clk_periph_regs *bank;
+ bool div = !(periph->gate.flags & TEGRA_PERIPH_NO_DIV);
+
+ if (periph->gate.flags & TEGRA_PERIPH_NO_DIV) {
+ flags |= CLK_SET_RATE_PARENT;
+ init.ops = &tegra_clk_periph_nodiv_ops;
+ } else if (periph->gate.flags & TEGRA_PERIPH_NO_GATE)
+ init.ops = &tegra_clk_periph_no_gate_ops;
+ else
+ init.ops = &tegra_clk_periph_ops;
init.name = name;
- init.ops = div ? &tegra_clk_periph_ops : &tegra_clk_periph_nodiv_ops;
init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
+ bank = get_reg_bank(periph->gate.clk_num);
+ if (!bank)
+ return ERR_PTR(-EINVAL);
+
/* Data in .init is copied by clk_register(), so stack variable OK */
periph->hw.init = &init;
periph->magic = TEGRA_CLK_PERIPH_MAGIC;
periph->mux.reg = clk_base + offset;
periph->divider.reg = div ? (clk_base + offset) : NULL;
periph->gate.clk_base = clk_base;
+ periph->gate.regs = bank;
+ periph->gate.enable_refcnt = periph_clk_enb_refcnt;
clk = clk_register(NULL, &periph->hw);
if (IS_ERR(clk))
@@ -209,7 +192,7 @@ struct clk *tegra_clk_register_periph(const char *name,
u32 offset, unsigned long flags)
{
return _tegra_clk_register_periph(name, parent_names, num_parents,
- periph, clk_base, offset, true, flags);
+ periph, clk_base, offset, flags);
}
struct clk *tegra_clk_register_periph_nodiv(const char *name,
@@ -217,6 +200,7 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
struct tegra_clk_periph *periph, void __iomem *clk_base,
u32 offset)
{
+ periph->gate.flags |= TEGRA_PERIPH_NO_DIV;
return _tegra_clk_register_periph(name, parent_names, num_parents,
- periph, clk_base, offset, false, CLK_SET_RATE_PARENT);
+ periph, clk_base, offset, CLK_SET_RATE_PARENT);
}
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 197074a57754..0d20241e0770 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -77,7 +77,23 @@
#define PLLE_MISC_SETUP_VALUE (7 << PLLE_MISC_SETUP_BASE_SHIFT)
#define PLLE_SS_CTRL 0x68
-#define PLLE_SS_DISABLE (7 << 10)
+#define PLLE_SS_CNTL_BYPASS_SS BIT(10)
+#define PLLE_SS_CNTL_INTERP_RESET BIT(11)
+#define PLLE_SS_CNTL_SSC_BYP BIT(12)
+#define PLLE_SS_CNTL_CENTER BIT(14)
+#define PLLE_SS_CNTL_INVERT BIT(15)
+#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\
+ PLLE_SS_CNTL_SSC_BYP)
+#define PLLE_SS_MAX_MASK 0x1ff
+#define PLLE_SS_MAX_VAL 0x25
+#define PLLE_SS_INC_MASK (0xff << 16)
+#define PLLE_SS_INC_VAL (0x1 << 16)
+#define PLLE_SS_INCINTRV_MASK (0x3f << 24)
+#define PLLE_SS_INCINTRV_VAL (0x20 << 24)
+#define PLLE_SS_COEFFICIENTS_MASK \
+ (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK)
+#define PLLE_SS_COEFFICIENTS_VAL \
+ (PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL)
#define PLLE_AUX_PLLP_SEL BIT(2)
#define PLLE_AUX_ENABLE_SWCTL BIT(4)
@@ -121,6 +137,36 @@
#define PMC_SATA_PWRGT_PLLE_IDDQ_VALUE BIT(5)
#define PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL BIT(4)
+#define PLLSS_MISC_KCP 0
+#define PLLSS_MISC_KVCO 0
+#define PLLSS_MISC_SETUP 0
+#define PLLSS_EN_SDM 0
+#define PLLSS_EN_SSC 0
+#define PLLSS_EN_DITHER2 0
+#define PLLSS_EN_DITHER 1
+#define PLLSS_SDM_RESET 0
+#define PLLSS_CLAMP 0
+#define PLLSS_SDM_SSC_MAX 0
+#define PLLSS_SDM_SSC_MIN 0
+#define PLLSS_SDM_SSC_STEP 0
+#define PLLSS_SDM_DIN 0
+#define PLLSS_MISC_DEFAULT ((PLLSS_MISC_KCP << 25) | \
+ (PLLSS_MISC_KVCO << 24) | \
+ PLLSS_MISC_SETUP)
+#define PLLSS_CFG_DEFAULT ((PLLSS_EN_SDM << 31) | \
+ (PLLSS_EN_SSC << 30) | \
+ (PLLSS_EN_DITHER2 << 29) | \
+ (PLLSS_EN_DITHER << 28) | \
+ (PLLSS_SDM_RESET) << 27 | \
+ (PLLSS_CLAMP << 22))
+#define PLLSS_CTRL1_DEFAULT \
+ ((PLLSS_SDM_SSC_MAX << 16) | PLLSS_SDM_SSC_MIN)
+#define PLLSS_CTRL2_DEFAULT \
+ ((PLLSS_SDM_SSC_STEP << 16) | PLLSS_SDM_DIN)
+#define PLLSS_LOCK_OVERRIDE BIT(24)
+#define PLLSS_REF_SRC_SEL_SHIFT 25
+#define PLLSS_REF_SRC_SEL_MASK (3 << PLLSS_REF_SRC_SEL_SHIFT)
+
#define pll_readl(offset, p) readl_relaxed(p->clk_base + offset)
#define pll_readl_base(p) pll_readl(p->params->base_reg, p)
#define pll_readl_misc(p) pll_readl(p->params->misc_reg, p)
@@ -134,7 +180,7 @@
#define mask(w) ((1 << (w)) - 1)
#define divm_mask(p) mask(p->params->div_nmp->divm_width)
#define divn_mask(p) mask(p->params->div_nmp->divn_width)
-#define divp_mask(p) (p->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK : \
+#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
mask(p->params->div_nmp->divp_width))
#define divm_max(p) (divm_mask(p))
@@ -154,10 +200,10 @@ static void clk_pll_enable_lock(struct tegra_clk_pll *pll)
{
u32 val;
- if (!(pll->flags & TEGRA_PLL_USE_LOCK))
+ if (!(pll->params->flags & TEGRA_PLL_USE_LOCK))
return;
- if (!(pll->flags & TEGRA_PLL_HAS_LOCK_ENABLE))
+ if (!(pll->params->flags & TEGRA_PLL_HAS_LOCK_ENABLE))
return;
val = pll_readl_misc(pll);
@@ -171,13 +217,13 @@ static int clk_pll_wait_for_lock(struct tegra_clk_pll *pll)
u32 val, lock_mask;
void __iomem *lock_addr;
- if (!(pll->flags & TEGRA_PLL_USE_LOCK)) {
+ if (!(pll->params->flags & TEGRA_PLL_USE_LOCK)) {
udelay(pll->params->lock_delay);
return 0;
}
lock_addr = pll->clk_base;
- if (pll->flags & TEGRA_PLL_LOCK_MISC)
+ if (pll->params->flags & TEGRA_PLL_LOCK_MISC)
lock_addr += pll->params->misc_reg;
else
lock_addr += pll->params->base_reg;
@@ -204,7 +250,7 @@ static int clk_pll_is_enabled(struct clk_hw *hw)
struct tegra_clk_pll *pll = to_clk_pll(hw);
u32 val;
- if (pll->flags & TEGRA_PLLM) {
+ if (pll->params->flags & TEGRA_PLLM) {
val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)
return val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE ? 1 : 0;
@@ -223,12 +269,12 @@ static void _clk_pll_enable(struct clk_hw *hw)
clk_pll_enable_lock(pll);
val = pll_readl_base(pll);
- if (pll->flags & TEGRA_PLL_BYPASS)
+ if (pll->params->flags & TEGRA_PLL_BYPASS)
val &= ~PLL_BASE_BYPASS;
val |= PLL_BASE_ENABLE;
pll_writel_base(val, pll);
- if (pll->flags & TEGRA_PLLM) {
+ if (pll->params->flags & TEGRA_PLLM) {
val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
val |= PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
@@ -241,12 +287,12 @@ static void _clk_pll_disable(struct clk_hw *hw)
u32 val;
val = pll_readl_base(pll);
- if (pll->flags & TEGRA_PLL_BYPASS)
+ if (pll->params->flags & TEGRA_PLL_BYPASS)
val &= ~PLL_BASE_BYPASS;
val &= ~PLL_BASE_ENABLE;
pll_writel_base(val, pll);
- if (pll->flags & TEGRA_PLLM) {
+ if (pll->params->flags & TEGRA_PLLM) {
val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
val &= ~PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
@@ -326,7 +372,7 @@ static int _get_table_rate(struct clk_hw *hw,
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table *sel;
- for (sel = pll->freq_table; sel->input_rate != 0; sel++)
+ for (sel = pll->params->freq_table; sel->input_rate != 0; sel++)
if (sel->input_rate == parent_rate &&
sel->output_rate == rate)
break;
@@ -389,12 +435,11 @@ static int _calc_rate(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
if (cfg->m > divm_max(pll) || cfg->n > divn_max(pll) ||
(1 << p_div) > divp_max(pll)
|| cfg->output_rate > pll->params->vco_max) {
- pr_err("%s: Failed to set %s rate %lu\n",
- __func__, __clk_get_name(hw->clk), rate);
- WARN_ON(1);
return -EINVAL;
}
+ cfg->output_rate >>= p_div;
+
if (pll->params->pdiv_tohw) {
ret = _p_div_to_hw(hw, 1 << p_div);
if (ret < 0)
@@ -414,7 +459,7 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
struct tegra_clk_pll_params *params = pll->params;
struct div_nmp *div_nmp = params->div_nmp;
- if ((pll->flags & TEGRA_PLLM) &&
+ if ((params->flags & TEGRA_PLLM) &&
(pll_override_readl(PMC_PLLP_WB0_OVERRIDE, pll) &
PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)) {
val = pll_override_readl(params->pmc_divp_reg, pll);
@@ -450,7 +495,7 @@ static void _get_pll_mnp(struct tegra_clk_pll *pll,
struct tegra_clk_pll_params *params = pll->params;
struct div_nmp *div_nmp = params->div_nmp;
- if ((pll->flags & TEGRA_PLLM) &&
+ if ((params->flags & TEGRA_PLLM) &&
(pll_override_readl(PMC_PLLP_WB0_OVERRIDE, pll) &
PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)) {
val = pll_override_readl(params->pmc_divp_reg, pll);
@@ -479,11 +524,11 @@ static void _update_pll_cpcon(struct tegra_clk_pll *pll,
val &= ~(PLL_MISC_CPCON_MASK << PLL_MISC_CPCON_SHIFT);
val |= cfg->cpcon << PLL_MISC_CPCON_SHIFT;
- if (pll->flags & TEGRA_PLL_SET_LFCON) {
+ if (pll->params->flags & TEGRA_PLL_SET_LFCON) {
val &= ~(PLL_MISC_LFCON_MASK << PLL_MISC_LFCON_SHIFT);
if (cfg->n >= PLLDU_LFCON_SET_DIVN)
val |= 1 << PLL_MISC_LFCON_SHIFT;
- } else if (pll->flags & TEGRA_PLL_SET_DCCON) {
+ } else if (pll->params->flags & TEGRA_PLL_SET_DCCON) {
val &= ~(1 << PLL_MISC_DCCON_SHIFT);
if (rate >= (pll->params->vco_max >> 1))
val |= 1 << PLL_MISC_DCCON_SHIFT;
@@ -505,7 +550,7 @@ static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
_update_pll_mnp(pll, cfg);
- if (pll->flags & TEGRA_PLL_HAS_CPCON)
+ if (pll->params->flags & TEGRA_PLL_HAS_CPCON)
_update_pll_cpcon(pll, cfg, rate);
if (state) {
@@ -524,11 +569,11 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags = 0;
int ret = 0;
- if (pll->flags & TEGRA_PLL_FIXED) {
- if (rate != pll->fixed_rate) {
+ if (pll->params->flags & TEGRA_PLL_FIXED) {
+ if (rate != pll->params->fixed_rate) {
pr_err("%s: Can not change %s fixed rate %lu to %lu\n",
__func__, __clk_get_name(hw->clk),
- pll->fixed_rate, rate);
+ pll->params->fixed_rate, rate);
return -EINVAL;
}
return 0;
@@ -536,6 +581,8 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (_get_table_rate(hw, &cfg, rate, parent_rate) &&
_calc_rate(hw, &cfg, rate, parent_rate)) {
+ pr_err("%s: Failed to set %s rate %lu\n", __func__,
+ __clk_get_name(hw->clk), rate);
WARN_ON(1);
return -EINVAL;
}
@@ -559,18 +606,16 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
- if (pll->flags & TEGRA_PLL_FIXED)
- return pll->fixed_rate;
+ if (pll->params->flags & TEGRA_PLL_FIXED)
+ return pll->params->fixed_rate;
/* PLLM is used for memory; we do not change rate */
- if (pll->flags & TEGRA_PLLM)
+ if (pll->params->flags & TEGRA_PLLM)
return __clk_get_rate(hw->clk);
if (_get_table_rate(hw, &cfg, rate, *prate) &&
- _calc_rate(hw, &cfg, rate, *prate)) {
- WARN_ON(1);
+ _calc_rate(hw, &cfg, rate, *prate))
return -EINVAL;
- }
return cfg.output_rate;
}
@@ -586,17 +631,19 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
val = pll_readl_base(pll);
- if ((pll->flags & TEGRA_PLL_BYPASS) && (val & PLL_BASE_BYPASS))
+ if ((pll->params->flags & TEGRA_PLL_BYPASS) && (val & PLL_BASE_BYPASS))
return parent_rate;
- if ((pll->flags & TEGRA_PLL_FIXED) && !(val & PLL_BASE_OVERRIDE)) {
+ if ((pll->params->flags & TEGRA_PLL_FIXED) &&
+ !(val & PLL_BASE_OVERRIDE)) {
struct tegra_clk_pll_freq_table sel;
- if (_get_table_rate(hw, &sel, pll->fixed_rate, parent_rate)) {
+ if (_get_table_rate(hw, &sel, pll->params->fixed_rate,
+ parent_rate)) {
pr_err("Clock %s has unknown fixed frequency\n",
__clk_get_name(hw->clk));
BUG();
}
- return pll->fixed_rate;
+ return pll->params->fixed_rate;
}
_get_pll_mnp(pll, &cfg);
@@ -664,7 +711,7 @@ static int clk_plle_enable(struct clk_hw *hw)
u32 val;
int err;
- if (_get_table_rate(hw, &sel, pll->fixed_rate, input_rate))
+ if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
clk_pll_disable(hw);
@@ -680,7 +727,7 @@ static int clk_plle_enable(struct clk_hw *hw)
return err;
}
- if (pll->flags & TEGRA_PLLE_CONFIGURE) {
+ if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
/* configure dividers */
val = pll_readl_base(pll);
val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
@@ -744,7 +791,7 @@ const struct clk_ops tegra_clk_plle_ops = {
.enable = clk_plle_enable,
};
-#ifdef CONFIG_ARCH_TEGRA_114_SOC
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
unsigned long parent_rate)
@@ -755,6 +802,48 @@ static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
return 1;
}
+static unsigned long _clip_vco_min(unsigned long vco_min,
+ unsigned long parent_rate)
+{
+ return DIV_ROUND_UP(vco_min, parent_rate) * parent_rate;
+}
+
+static int _setup_dynamic_ramp(struct tegra_clk_pll_params *pll_params,
+ void __iomem *clk_base,
+ unsigned long parent_rate)
+{
+ u32 val;
+ u32 step_a, step_b;
+
+ switch (parent_rate) {
+ case 12000000:
+ case 13000000:
+ case 26000000:
+ step_a = 0x2B;
+ step_b = 0x0B;
+ break;
+ case 16800000:
+ step_a = 0x1A;
+ step_b = 0x09;
+ break;
+ case 19200000:
+ step_a = 0x12;
+ step_b = 0x08;
+ break;
+ default:
+ pr_err("%s: Unexpected reference rate %lu\n",
+ __func__, parent_rate);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ val = step_a << pll_params->stepa_shift;
+ val |= step_b << pll_params->stepb_shift;
+ writel_relaxed(val, clk_base + pll_params->dyn_ramp_reg);
+
+ return 0;
+}
+
static int clk_pll_iddq_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
@@ -1173,7 +1262,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
unsigned long flags = 0;
unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
- if (_get_table_rate(hw, &sel, pll->fixed_rate, input_rate))
+ if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
if (pll->lock)
@@ -1217,6 +1306,18 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
if (ret < 0)
goto out;
+ val = pll_readl(PLLE_SS_CTRL, pll);
+ val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
+ val &= ~PLLE_SS_COEFFICIENTS_MASK;
+ val |= PLLE_SS_COEFFICIENTS_VAL;
+ pll_writel(val, PLLE_SS_CTRL, pll);
+ val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
+ pll_writel(val, PLLE_SS_CTRL, pll);
+ udelay(1);
+ val &= ~PLLE_SS_CNTL_INTERP_RESET;
+ pll_writel(val, PLLE_SS_CTRL, pll);
+ udelay(1);
+
/* TODO: enable hw control of xusb brick pll */
out:
@@ -1248,9 +1349,8 @@ static void clk_plle_tegra114_disable(struct clk_hw *hw)
#endif
static struct tegra_clk_pll *_tegra_init_pll(void __iomem *clk_base,
- void __iomem *pmc, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+ void __iomem *pmc, struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock)
{
struct tegra_clk_pll *pll;
@@ -1261,10 +1361,7 @@ static struct tegra_clk_pll *_tegra_init_pll(void __iomem *clk_base,
pll->clk_base = clk_base;
pll->pmc = pmc;
- pll->freq_table = freq_table;
pll->params = pll_params;
- pll->fixed_rate = fixed_rate;
- pll->flags = pll_flags;
pll->lock = lock;
if (!pll_params->div_nmp)
@@ -1293,17 +1390,15 @@ static struct clk *_tegra_clk_register_pll(struct tegra_clk_pll *pll,
struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+ unsigned long flags, struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock)
{
struct tegra_clk_pll *pll;
struct clk *clk;
- pll_flags |= TEGRA_PLL_BYPASS;
- pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ pll_params->flags |= TEGRA_PLL_BYPASS;
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1317,17 +1412,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+ unsigned long flags, struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock)
{
struct tegra_clk_pll *pll;
struct clk *clk;
- pll_flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
- pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1339,8 +1432,8 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
return clk;
}
-#ifdef CONFIG_ARCH_TEGRA_114_SOC
-const struct clk_ops tegra_clk_pllxc_ops = {
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+static const struct clk_ops tegra_clk_pllxc_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_iddq_enable,
.disable = clk_pll_iddq_disable,
@@ -1349,7 +1442,7 @@ const struct clk_ops tegra_clk_pllxc_ops = {
.set_rate = clk_pllxc_set_rate,
};
-const struct clk_ops tegra_clk_pllm_ops = {
+static const struct clk_ops tegra_clk_pllm_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_iddq_enable,
.disable = clk_pll_iddq_disable,
@@ -1358,7 +1451,7 @@ const struct clk_ops tegra_clk_pllm_ops = {
.set_rate = clk_pllm_set_rate,
};
-const struct clk_ops tegra_clk_pllc_ops = {
+static const struct clk_ops tegra_clk_pllc_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pllc_enable,
.disable = clk_pllc_disable,
@@ -1367,7 +1460,7 @@ const struct clk_ops tegra_clk_pllc_ops = {
.set_rate = clk_pllc_set_rate,
};
-const struct clk_ops tegra_clk_pllre_ops = {
+static const struct clk_ops tegra_clk_pllre_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_iddq_enable,
.disable = clk_pll_iddq_disable,
@@ -1376,7 +1469,7 @@ const struct clk_ops tegra_clk_pllre_ops = {
.set_rate = clk_pllre_set_rate,
};
-const struct clk_ops tegra_clk_plle_tegra114_ops = {
+static const struct clk_ops tegra_clk_plle_tegra114_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_plle_tegra114_enable,
.disable = clk_plle_tegra114_disable,
@@ -1386,21 +1479,46 @@ const struct clk_ops tegra_clk_plle_tegra114_ops = {
struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct tegra_clk_pll *pll;
- struct clk *clk;
+ struct clk *clk, *parent;
+ unsigned long parent_rate;
+ int err;
+ u32 val, val_iddq;
+
+ parent = __clk_lookup(parent_name);
+ if (!parent) {
+ WARN(1, "parent clk %s of %s must be registered first\n",
+ name, parent_name);
+ return ERR_PTR(-EINVAL);
+ }
if (!pll_params->pdiv_tohw)
return ERR_PTR(-EINVAL);
- pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ parent_rate = __clk_get_rate(parent);
+
+ pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
+
+ err = _setup_dynamic_ramp(pll_params, clk_base, parent_rate);
+ if (err)
+ return ERR_PTR(err);
+
+ val = readl_relaxed(clk_base + pll_params->base_reg);
+ val_iddq = readl_relaxed(clk_base + pll_params->iddq_reg);
+
+ if (val & PLL_BASE_ENABLE)
+ WARN_ON(val_iddq & BIT(pll_params->iddq_bit_idx));
+ else {
+ val_iddq |= BIT(pll_params->iddq_bit_idx);
+ writel_relaxed(val_iddq, clk_base + pll_params->iddq_reg);
+ }
+
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1414,19 +1532,19 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock, unsigned long parent_rate)
{
u32 val;
struct tegra_clk_pll *pll;
struct clk *clk;
- pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_LOCK_MISC;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_LOCK_MISC;
+
+ pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
+
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1461,23 +1579,32 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct tegra_clk_pll *pll;
- struct clk *clk;
+ struct clk *clk, *parent;
+ unsigned long parent_rate;
if (!pll_params->pdiv_tohw)
return ERR_PTR(-EINVAL);
- pll_flags |= TEGRA_PLL_BYPASS;
- pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
- pll_flags |= TEGRA_PLLM;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ parent = __clk_lookup(parent_name);
+ if (!parent) {
+ WARN(1, "parent clk %s of %s must be registered first\n",
+ name, parent_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ parent_rate = __clk_get_rate(parent);
+
+ pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
+
+ pll_params->flags |= TEGRA_PLL_BYPASS;
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll_params->flags |= TEGRA_PLLM;
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1491,10 +1618,8 @@ struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct clk *parent, *clk;
@@ -1507,20 +1632,21 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
return ERR_PTR(-EINVAL);
parent = __clk_lookup(parent_name);
- if (IS_ERR(parent)) {
+ if (!parent) {
WARN(1, "parent clk %s of %s must be registered first\n",
name, parent_name);
return ERR_PTR(-EINVAL);
}
- pll_flags |= TEGRA_PLL_BYPASS;
- pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
- freq_table, lock);
+ parent_rate = __clk_get_rate(parent);
+
+ pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
+
+ pll_params->flags |= TEGRA_PLL_BYPASS;
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
- parent_rate = __clk_get_rate(parent);
-
/*
* Most of PLLC register fields are shadowed, and can not be read
* directly from PLL h/w. Hence, actual PLLC boot state is unknown.
@@ -1567,17 +1693,15 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
struct clk *tegra_clk_register_plle_tegra114(const char *name,
const char *parent_name,
void __iomem *clk_base, unsigned long flags,
- unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct tegra_clk_pll *pll;
struct clk *clk;
u32 val, val_aux;
- pll = _tegra_init_pll(clk_base, NULL, fixed_rate, pll_params,
- TEGRA_PLL_HAS_LOCK_ENABLE, freq_table, lock);
+ pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
+ pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
@@ -1587,11 +1711,13 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
val_aux = pll_readl(pll_params->aux_reg, pll);
if (val & PLL_BASE_ENABLE) {
- if (!(val_aux & PLLE_AUX_PLLRE_SEL))
+ if ((val_aux & PLLE_AUX_PLLRE_SEL) ||
+ (val_aux & PLLE_AUX_PLLP_SEL))
WARN(1, "pll_e enabled with unsupported parent %s\n",
- (val & PLLE_AUX_PLLP_SEL) ? "pllp_out0" : "pll_ref");
+ (val_aux & PLLE_AUX_PLLP_SEL) ? "pllp_out0" :
+ "pll_re_vco");
} else {
- val_aux |= PLLE_AUX_PLLRE_SEL;
+ val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
pll_writel(val, pll_params->aux_reg, pll);
}
@@ -1603,3 +1729,92 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
return clk;
}
#endif
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+static const struct clk_ops tegra_clk_pllss_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pll_iddq_enable,
+ .disable = clk_pll_iddq_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_ramp_round_rate,
+ .set_rate = clk_pllxc_set_rate,
+};
+
+struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
+ void __iomem *clk_base, unsigned long flags,
+ struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock)
+{
+ struct tegra_clk_pll *pll;
+ struct clk *clk, *parent;
+ struct tegra_clk_pll_freq_table cfg;
+ unsigned long parent_rate;
+ u32 val;
+ int i;
+
+ if (!pll_params->div_nmp)
+ return ERR_PTR(-EINVAL);
+
+ parent = __clk_lookup(parent_name);
+ if (!parent) {
+ WARN(1, "parent clk %s of %s must be registered first\n",
+ name, parent_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pll_params->flags = TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_USE_LOCK;
+ pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ val = pll_readl_base(pll);
+ val &= ~PLLSS_REF_SRC_SEL_MASK;
+ pll_writel_base(val, pll);
+
+ parent_rate = __clk_get_rate(parent);
+
+ pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
+
+ /* initialize PLL to minimum rate */
+
+ cfg.m = _pll_fixed_mdiv(pll_params, parent_rate);
+ cfg.n = cfg.m * pll_params->vco_min / parent_rate;
+
+ for (i = 0; pll_params->pdiv_tohw[i].pdiv; i++)
+ ;
+ if (!i) {
+ kfree(pll);
+ return ERR_PTR(-EINVAL);
+ }
+
+ cfg.p = pll_params->pdiv_tohw[i-1].hw_val;
+
+ _update_pll_mnp(pll, &cfg);
+
+ pll_writel_misc(PLLSS_MISC_DEFAULT, pll);
+ pll_writel(PLLSS_CFG_DEFAULT, pll_params->ext_misc_reg[0], pll);
+ pll_writel(PLLSS_CTRL1_DEFAULT, pll_params->ext_misc_reg[1], pll);
+ pll_writel(PLLSS_CTRL1_DEFAULT, pll_params->ext_misc_reg[2], pll);
+
+ val = pll_readl_base(pll);
+ if (val & PLL_BASE_ENABLE) {
+ if (val & BIT(pll_params->iddq_bit_idx)) {
+ WARN(1, "%s is on but IDDQ set\n", name);
+ kfree(pll);
+ return ERR_PTR(-EINVAL);
+ }
+ } else
+ val |= BIT(pll_params->iddq_bit_idx);
+
+ val &= ~PLLSS_LOCK_OVERRIDE;
+ pll_writel_base(val, pll);
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_pllss_ops);
+
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+#endif
diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c
new file mode 100644
index 000000000000..5c38aab2c5b8
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-audio.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define AUDIO_SYNC_CLK_I2S0 0x4a0
+#define AUDIO_SYNC_CLK_I2S1 0x4a4
+#define AUDIO_SYNC_CLK_I2S2 0x4a8
+#define AUDIO_SYNC_CLK_I2S3 0x4ac
+#define AUDIO_SYNC_CLK_I2S4 0x4b0
+#define AUDIO_SYNC_CLK_SPDIF 0x4b4
+
+#define AUDIO_SYNC_DOUBLER 0x49c
+
+#define PLLA_OUT 0xb4
+
+struct tegra_sync_source_initdata {
+ char *name;
+ unsigned long rate;
+ unsigned long max_rate;
+ int clk_id;
+};
+
+#define SYNC(_name) \
+ {\
+ .name = #_name,\
+ .rate = 24000000,\
+ .max_rate = 24000000,\
+ .clk_id = tegra_clk_ ## _name,\
+ }
+
+struct tegra_audio_clk_initdata {
+ char *gate_name;
+ char *mux_name;
+ u32 offset;
+ int gate_clk_id;
+ int mux_clk_id;
+};
+
+#define AUDIO(_name, _offset) \
+ {\
+ .gate_name = #_name,\
+ .mux_name = #_name"_mux",\
+ .offset = _offset,\
+ .gate_clk_id = tegra_clk_ ## _name,\
+ .mux_clk_id = tegra_clk_ ## _name ## _mux,\
+ }
+
+struct tegra_audio2x_clk_initdata {
+ char *parent;
+ char *gate_name;
+ char *name_2x;
+ char *div_name;
+ int clk_id;
+ int clk_num;
+ u8 div_offset;
+};
+
+#define AUDIO2X(_name, _num, _offset) \
+ {\
+ .parent = #_name,\
+ .gate_name = #_name"_2x",\
+ .name_2x = #_name"_doubler",\
+ .div_name = #_name"_div",\
+ .clk_id = tegra_clk_ ## _name ## _2x,\
+ .clk_num = _num,\
+ .div_offset = _offset,\
+ }
+
+static DEFINE_SPINLOCK(clk_doubler_lock);
+
+static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync",
+ "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",
+};
+
+static struct tegra_sync_source_initdata sync_source_clks[] __initdata = {
+ SYNC(spdif_in_sync),
+ SYNC(i2s0_sync),
+ SYNC(i2s1_sync),
+ SYNC(i2s2_sync),
+ SYNC(i2s3_sync),
+ SYNC(i2s4_sync),
+ SYNC(vimclk_sync),
+};
+
+static struct tegra_audio_clk_initdata audio_clks[] = {
+ AUDIO(audio0, AUDIO_SYNC_CLK_I2S0),
+ AUDIO(audio1, AUDIO_SYNC_CLK_I2S1),
+ AUDIO(audio2, AUDIO_SYNC_CLK_I2S2),
+ AUDIO(audio3, AUDIO_SYNC_CLK_I2S3),
+ AUDIO(audio4, AUDIO_SYNC_CLK_I2S4),
+ AUDIO(spdif, AUDIO_SYNC_CLK_SPDIF),
+};
+
+static struct tegra_audio2x_clk_initdata audio2x_clks[] = {
+ AUDIO2X(audio0, 113, 24),
+ AUDIO2X(audio1, 114, 25),
+ AUDIO2X(audio2, 115, 26),
+ AUDIO2X(audio3, 116, 27),
+ AUDIO2X(audio4, 117, 28),
+ AUDIO2X(spdif, 118, 29),
+};
+
+void __init tegra_audio_clk_init(void __iomem *clk_base,
+ void __iomem *pmc_base, struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_a_params)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+ int i;
+
+ /* PLLA */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_a, tegra_clks);
+ if (dt_clk) {
+ clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base,
+ pmc_base, 0, pll_a_params, NULL);
+ *dt_clk = clk;
+ }
+
+ /* PLLA_OUT0 */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_a_out0, tegra_clks);
+ if (dt_clk) {
+ clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
+ clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
+ clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ *dt_clk = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sync_source_clks); i++) {
+ struct tegra_sync_source_initdata *data;
+
+ data = &sync_source_clks[i];
+
+ dt_clk = tegra_lookup_dt_id(data->clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = tegra_clk_register_sync_source(data->name,
+ data->rate, data->max_rate);
+ *dt_clk = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(audio_clks); i++) {
+ struct tegra_audio_clk_initdata *data;
+
+ data = &audio_clks[i];
+ dt_clk = tegra_lookup_dt_id(data->mux_clk_id, tegra_clks);
+
+ if (!dt_clk)
+ continue;
+ clk = clk_register_mux(NULL, data->mux_name, mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
+ clk_base + data->offset, 0, 3, 0,
+ NULL);
+ *dt_clk = clk;
+
+ dt_clk = tegra_lookup_dt_id(data->gate_clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = clk_register_gate(NULL, data->gate_name, data->mux_name,
+ 0, clk_base + data->offset, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ *dt_clk = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(audio2x_clks); i++) {
+ struct tegra_audio2x_clk_initdata *data;
+
+ data = &audio2x_clks[i];
+ dt_clk = tegra_lookup_dt_id(data->clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = clk_register_fixed_factor(NULL, data->name_2x,
+ data->parent, CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider(data->div_name,
+ data->name_2x, clk_base + AUDIO_SYNC_DOUBLER,
+ 0, 0, data->div_offset, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate(data->gate_name,
+ data->div_name, TEGRA_PERIPH_NO_RESET,
+ clk_base, CLK_SET_RATE_PARENT, data->clk_num,
+ periph_clk_enb_refcnt);
+ *dt_clk = clk;
+ }
+}
+
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
new file mode 100644
index 000000000000..f3b773833429
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define OSC_CTRL 0x50
+#define OSC_CTRL_OSC_FREQ_SHIFT 28
+#define OSC_CTRL_PLL_REF_DIV_SHIFT 26
+
+int __init tegra_osc_clk_init(void __iomem *clk_base,
+ struct tegra_clk *tegra_clks,
+ unsigned long *input_freqs, int num,
+ unsigned long *osc_freq,
+ unsigned long *pll_ref_freq)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+ u32 val, pll_ref_div;
+ unsigned osc_idx;
+
+ val = readl_relaxed(clk_base + OSC_CTRL);
+ osc_idx = val >> OSC_CTRL_OSC_FREQ_SHIFT;
+
+ if (osc_idx < num)
+ *osc_freq = input_freqs[osc_idx];
+ else
+ *osc_freq = 0;
+
+ if (!*osc_freq) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m, tegra_clks);
+ if (!dt_clk)
+ return 0;
+
+ clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
+ *osc_freq);
+ *dt_clk = clk;
+
+ /* pll_ref */
+ val = (val >> OSC_CTRL_PLL_REF_DIV_SHIFT) & 3;
+ pll_ref_div = 1 << val;
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_ref, tegra_clks);
+ if (!dt_clk)
+ return 0;
+
+ clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
+ 0, 1, pll_ref_div);
+ *dt_clk = clk;
+
+ if (pll_ref_freq)
+ *pll_ref_freq = *osc_freq / pll_ref_div;
+
+ return 0;
+}
+
+void __init tegra_fixed_clk_init(struct tegra_clk *tegra_clks)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+
+ /* clk_32k */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_32k, tegra_clks);
+ if (dt_clk) {
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL,
+ CLK_IS_ROOT, 32768);
+ *dt_clk = clk;
+ }
+
+ /* clk_m_div2 */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div2, tegra_clks);
+ if (dt_clk) {
+ clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 2);
+ *dt_clk = clk;
+ }
+
+ /* clk_m_div4 */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div4, tegra_clks);
+ if (dt_clk) {
+ clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 4);
+ *dt_clk = clk;
+ }
+}
+
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
new file mode 100644
index 000000000000..1fa5c3f33b20
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define CLK_SOURCE_I2S0 0x1d8
+#define CLK_SOURCE_I2S1 0x100
+#define CLK_SOURCE_I2S2 0x104
+#define CLK_SOURCE_NDFLASH 0x160
+#define CLK_SOURCE_I2S3 0x3bc
+#define CLK_SOURCE_I2S4 0x3c0
+#define CLK_SOURCE_SPDIF_OUT 0x108
+#define CLK_SOURCE_SPDIF_IN 0x10c
+#define CLK_SOURCE_PWM 0x110
+#define CLK_SOURCE_ADX 0x638
+#define CLK_SOURCE_ADX1 0x670
+#define CLK_SOURCE_AMX 0x63c
+#define CLK_SOURCE_AMX1 0x674
+#define CLK_SOURCE_HDA 0x428
+#define CLK_SOURCE_HDA2CODEC_2X 0x3e4
+#define CLK_SOURCE_SBC1 0x134
+#define CLK_SOURCE_SBC2 0x118
+#define CLK_SOURCE_SBC3 0x11c
+#define CLK_SOURCE_SBC4 0x1b4
+#define CLK_SOURCE_SBC5 0x3c8
+#define CLK_SOURCE_SBC6 0x3cc
+#define CLK_SOURCE_SATA_OOB 0x420
+#define CLK_SOURCE_SATA 0x424
+#define CLK_SOURCE_NDSPEED 0x3f8
+#define CLK_SOURCE_VFIR 0x168
+#define CLK_SOURCE_SDMMC1 0x150
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC3 0x1bc
+#define CLK_SOURCE_SDMMC4 0x164
+#define CLK_SOURCE_CVE 0x140
+#define CLK_SOURCE_TVO 0x188
+#define CLK_SOURCE_TVDAC 0x194
+#define CLK_SOURCE_VDE 0x1c8
+#define CLK_SOURCE_CSITE 0x1d4
+#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_TRACE 0x634
+#define CLK_SOURCE_OWR 0x1cc
+#define CLK_SOURCE_NOR 0x1d0
+#define CLK_SOURCE_MIPI 0x174
+#define CLK_SOURCE_I2C1 0x124
+#define CLK_SOURCE_I2C2 0x198
+#define CLK_SOURCE_I2C3 0x1b8
+#define CLK_SOURCE_I2C4 0x3c4
+#define CLK_SOURCE_I2C5 0x128
+#define CLK_SOURCE_I2C6 0x65c
+#define CLK_SOURCE_UARTA 0x178
+#define CLK_SOURCE_UARTB 0x17c
+#define CLK_SOURCE_UARTC 0x1a0
+#define CLK_SOURCE_UARTD 0x1c0
+#define CLK_SOURCE_UARTE 0x1c4
+#define CLK_SOURCE_3D 0x158
+#define CLK_SOURCE_2D 0x15c
+#define CLK_SOURCE_MPE 0x170
+#define CLK_SOURCE_UARTE 0x1c4
+#define CLK_SOURCE_VI_SENSOR 0x1a8
+#define CLK_SOURCE_VI 0x148
+#define CLK_SOURCE_EPP 0x16c
+#define CLK_SOURCE_MSENC 0x1f0
+#define CLK_SOURCE_TSEC 0x1f4
+#define CLK_SOURCE_HOST1X 0x180
+#define CLK_SOURCE_HDMI 0x18c
+#define CLK_SOURCE_DISP1 0x138
+#define CLK_SOURCE_DISP2 0x13c
+#define CLK_SOURCE_CILAB 0x614
+#define CLK_SOURCE_CILCD 0x618
+#define CLK_SOURCE_CILE 0x61c
+#define CLK_SOURCE_DSIALP 0x620
+#define CLK_SOURCE_DSIBLP 0x624
+#define CLK_SOURCE_TSENSOR 0x3b8
+#define CLK_SOURCE_D_AUDIO 0x3d0
+#define CLK_SOURCE_DAM0 0x3d8
+#define CLK_SOURCE_DAM1 0x3dc
+#define CLK_SOURCE_DAM2 0x3e0
+#define CLK_SOURCE_ACTMON 0x3e8
+#define CLK_SOURCE_EXTERN1 0x3ec
+#define CLK_SOURCE_EXTERN2 0x3f0
+#define CLK_SOURCE_EXTERN3 0x3f4
+#define CLK_SOURCE_I2CSLOW 0x3fc
+#define CLK_SOURCE_SE 0x42c
+#define CLK_SOURCE_MSELECT 0x3b4
+#define CLK_SOURCE_DFLL_REF 0x62c
+#define CLK_SOURCE_DFLL_SOC 0x630
+#define CLK_SOURCE_SOC_THERM 0x644
+#define CLK_SOURCE_XUSB_HOST_SRC 0x600
+#define CLK_SOURCE_XUSB_FALCON_SRC 0x604
+#define CLK_SOURCE_XUSB_FS_SRC 0x608
+#define CLK_SOURCE_XUSB_SS_SRC 0x610
+#define CLK_SOURCE_XUSB_DEV_SRC 0x60c
+#define CLK_SOURCE_ISP 0x144
+#define CLK_SOURCE_SOR0 0x414
+#define CLK_SOURCE_DPAUX 0x418
+#define CLK_SOURCE_SATA_OOB 0x420
+#define CLK_SOURCE_SATA 0x424
+#define CLK_SOURCE_ENTROPY 0x628
+#define CLK_SOURCE_VI_SENSOR2 0x658
+#define CLK_SOURCE_HDMI_AUDIO 0x668
+#define CLK_SOURCE_VIC03 0x678
+#define CLK_SOURCE_CLK72MHZ 0x66c
+
+#define MASK(x) (BIT(x) - 1)
+
+#define MUX(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, \
+ _clk_num, _gate_flags, _clk_id, _parents##_idx, 0,\
+ NULL)
+
+#define MUX_FLAGS(_name, _parents, _offset,\
+ _clk_num, _gate_flags, _clk_id, flags)\
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\
+ _clk_num, _gate_flags, _clk_id, _parents##_idx, flags,\
+ NULL)
+
+#define MUX8(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\
+ _clk_num, _gate_flags, _clk_id, _parents##_idx, 0,\
+ NULL)
+
+#define MUX8_NOGATE_LOCK(_name, _parents, _offset, _clk_id, _lock) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset, \
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\
+ 0, TEGRA_PERIPH_NO_GATE, _clk_id,\
+ _parents##_idx, 0, _lock)
+
+#define INT(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_INT| \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, _gate_flags,\
+ _clk_id, _parents##_idx, 0, NULL)
+
+#define INT_FLAGS(_name, _parents, _offset,\
+ _clk_num, _gate_flags, _clk_id, flags)\
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_INT| \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, _gate_flags,\
+ _clk_id, _parents##_idx, flags, NULL)
+
+#define INT8(_name, _parents, _offset,\
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_INT| \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, _gate_flags,\
+ _clk_id, _parents##_idx, 0, NULL)
+
+#define UART(_name, _parents, _offset,\
+ _clk_num, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 16, 1, TEGRA_DIVIDER_UART| \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, 0, _clk_id,\
+ _parents##_idx, 0, NULL)
+
+#define I2C(_name, _parents, _offset,\
+ _clk_num, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ 30, MASK(2), 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP,\
+ _clk_num, 0, _clk_id, _parents##_idx, 0, NULL)
+
+#define XUSB(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset, \
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_INT| \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, _gate_flags,\
+ _clk_id, _parents##_idx, 0, NULL)
+
+#define AUDIO(_name, _offset, _clk_num,\
+ _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, mux_d_audio_clk, \
+ _offset, 16, 0xE01F, 0, 0, 8, 1, \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, _gate_flags, \
+ _clk_id, mux_d_audio_clk_idx, 0, NULL)
+
+#define NODIV(_name, _parents, _offset, \
+ _mux_shift, _mux_mask, _clk_num, \
+ _gate_flags, _clk_id, _lock) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ _mux_shift, _mux_mask, 0, 0, 0, 0, 0,\
+ _clk_num, (_gate_flags) | TEGRA_PERIPH_NO_DIV,\
+ _clk_id, _parents##_idx, 0, _lock)
+
+#define GATE(_name, _parent_name, \
+ _clk_num, _gate_flags, _clk_id, _flags) \
+ { \
+ .name = _name, \
+ .clk_id = _clk_id, \
+ .p.parent_name = _parent_name, \
+ .periph = TEGRA_CLK_PERIPH(0, 0, 0, 0, 0, 0, 0, \
+ _clk_num, _gate_flags, 0, NULL), \
+ .flags = _flags \
+ }
+
+#define PLLP_BASE 0xa0
+#define PLLP_MISC 0xac
+#define PLLP_OUTA 0xa4
+#define PLLP_OUTB 0xa8
+#define PLLP_OUTC 0x67c
+
+#define PLL_BASE_LOCK BIT(27)
+#define PLL_MISC_LOCK_ENABLE 18
+
+static DEFINE_SPINLOCK(PLLP_OUTA_lock);
+static DEFINE_SPINLOCK(PLLP_OUTB_lock);
+static DEFINE_SPINLOCK(PLLP_OUTC_lock);
+static DEFINE_SPINLOCK(sor0_lock);
+
+#define MUX_I2S_SPDIF(_id) \
+static const char *mux_pllaout0_##_id##_2x_pllp_clkm[] = { "pll_a_out0", \
+ #_id, "pll_p",\
+ "clk_m"};
+MUX_I2S_SPDIF(audio0)
+MUX_I2S_SPDIF(audio1)
+MUX_I2S_SPDIF(audio2)
+MUX_I2S_SPDIF(audio3)
+MUX_I2S_SPDIF(audio4)
+MUX_I2S_SPDIF(audio)
+
+#define mux_pllaout0_audio0_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio1_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio2_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio3_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio4_2x_pllp_clkm_idx NULL
+#define mux_pllaout0_audio_2x_pllp_clkm_idx NULL
+
+static const char *mux_pllp_pllc_pllm_clkm[] = {
+ "pll_p", "pll_c", "pll_m", "clk_m"
+};
+#define mux_pllp_pllc_pllm_clkm_idx NULL
+
+static const char *mux_pllp_pllc_pllm[] = { "pll_p", "pll_c", "pll_m" };
+#define mux_pllp_pllc_pllm_idx NULL
+
+static const char *mux_pllp_pllc_clk32_clkm[] = {
+ "pll_p", "pll_c", "clk_32k", "clk_m"
+};
+#define mux_pllp_pllc_clk32_clkm_idx NULL
+
+static const char *mux_plla_pllc_pllp_clkm[] = {
+ "pll_a_out0", "pll_c", "pll_p", "clk_m"
+};
+#define mux_plla_pllc_pllp_clkm_idx mux_pllp_pllc_pllm_clkm_idx
+
+static const char *mux_pllp_pllc2_c_c3_pllm_clkm[] = {
+ "pll_p", "pll_c2", "pll_c", "pll_c3", "pll_m", "clk_m"
+};
+static u32 mux_pllp_pllc2_c_c3_pllm_clkm_idx[] = {
+ [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
+};
+
+static const char *mux_pllp_clkm[] = {
+ "pll_p", "clk_m"
+};
+static u32 mux_pllp_clkm_idx[] = {
+ [0] = 0, [1] = 3,
+};
+
+static const char *mux_pllm_pllc2_c_c3_pllp_plla[] = {
+ "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0"
+};
+#define mux_pllm_pllc2_c_c3_pllp_plla_idx mux_pllp_pllc2_c_c3_pllm_clkm_idx
+
+static const char *mux_pllp_pllm_plld_plla_pllc_plld2_clkm[] = {
+ "pll_p", "pll_m", "pll_d_out0", "pll_a_out0", "pll_c",
+ "pll_d2_out0", "clk_m"
+};
+#define mux_pllp_pllm_plld_plla_pllc_plld2_clkm_idx NULL
+
+static const char *mux_pllm_pllc_pllp_plla[] = {
+ "pll_m", "pll_c", "pll_p", "pll_a_out0"
+};
+#define mux_pllm_pllc_pllp_plla_idx mux_pllp_pllc_pllm_clkm_idx
+
+static const char *mux_pllp_pllc_clkm[] = {
+ "pll_p", "pll_c", "pll_m"
+};
+static u32 mux_pllp_pllc_clkm_idx[] = {
+ [0] = 0, [1] = 1, [2] = 3,
+};
+
+static const char *mux_pllp_pllc_clkm_clk32[] = {
+ "pll_p", "pll_c", "clk_m", "clk_32k"
+};
+#define mux_pllp_pllc_clkm_clk32_idx NULL
+
+static const char *mux_plla_clk32_pllp_clkm_plle[] = {
+ "pll_a_out0", "clk_32k", "pll_p", "clk_m", "pll_e_out0"
+};
+#define mux_plla_clk32_pllp_clkm_plle_idx NULL
+
+static const char *mux_clkm_pllp_pllc_pllre[] = {
+ "clk_m", "pll_p", "pll_c", "pll_re_out"
+};
+static u32 mux_clkm_pllp_pllc_pllre_idx[] = {
+ [0] = 0, [1] = 1, [2] = 3, [3] = 5,
+};
+
+static const char *mux_clkm_48M_pllp_480M[] = {
+ "clk_m", "pll_u_48M", "pll_p", "pll_u_480M"
+};
+#define mux_clkm_48M_pllp_480M_idx NULL
+
+static const char *mux_clkm_pllre_clk32_480M_pllc_ref[] = {
+ "clk_m", "pll_re_out", "clk_32k", "pll_u_480M", "pll_c", "pll_ref"
+};
+static u32 mux_clkm_pllre_clk32_480M_pllc_ref_idx[] = {
+ [0] = 0, [1] = 1, [2] = 3, [3] = 3, [4] = 4, [5] = 7,
+};
+
+static const char *mux_d_audio_clk[] = {
+ "pll_a_out0", "pll_p", "clk_m", "spdif_in_sync", "i2s0_sync",
+ "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",
+};
+static u32 mux_d_audio_clk_idx[] = {
+ [0] = 0, [1] = 0x8000, [2] = 0xc000, [3] = 0xE000, [4] = 0xE001,
+ [5] = 0xE002, [6] = 0xE003, [7] = 0xE004, [8] = 0xE005, [9] = 0xE007,
+};
+
+static const char *mux_pllp_plld_pllc_clkm[] = {
+ "pll_p", "pll_d_out0", "pll_c", "clk_m"
+};
+#define mux_pllp_plld_pllc_clkm_idx NULL
+static const char *mux_pllm_pllc_pllp_plla_clkm_pllc4[] = {
+ "pll_m", "pll_c", "pll_p", "pll_a_out0", "clk_m", "pll_c4",
+};
+static u32 mux_pllm_pllc_pllp_plla_clkm_pllc4_idx[] = {
+ [0] = 0, [1] = 1, [2] = 3, [3] = 3, [4] = 6, [5] = 7,
+};
+
+static const char *mux_pllp_clkm1[] = {
+ "pll_p", "clk_m",
+};
+#define mux_pllp_clkm1_idx NULL
+
+static const char *mux_pllp3_pllc_clkm[] = {
+ "pll_p_out3", "pll_c", "pll_c2", "clk_m",
+};
+#define mux_pllp3_pllc_clkm_idx NULL
+
+static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = {
+ "pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m"
+};
+#define mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx NULL
+
+static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = {
+ "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4",
+};
+static u32 mux_pllm_pllc2_c_c3_pllp_plla_pllc4_idx[] = {
+ [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6, [6] = 7,
+};
+
+static const char *mux_clkm_plldp_sor0lvds[] = {
+ "clk_m", "pll_dp", "sor0_lvds",
+};
+#define mux_clkm_plldp_sor0lvds_idx NULL
+
+static struct tegra_periph_init_data periph_clks[] = {
+ AUDIO("d_audio", CLK_SOURCE_D_AUDIO, 106, TEGRA_PERIPH_ON_APB, tegra_clk_d_audio),
+ AUDIO("dam0", CLK_SOURCE_DAM0, 108, TEGRA_PERIPH_ON_APB, tegra_clk_dam0),
+ AUDIO("dam1", CLK_SOURCE_DAM1, 109, TEGRA_PERIPH_ON_APB, tegra_clk_dam1),
+ AUDIO("dam2", CLK_SOURCE_DAM2, 110, TEGRA_PERIPH_ON_APB, tegra_clk_dam2),
+ I2C("i2c1", mux_pllp_clkm, CLK_SOURCE_I2C1, 12, tegra_clk_i2c1),
+ I2C("i2c2", mux_pllp_clkm, CLK_SOURCE_I2C2, 54, tegra_clk_i2c2),
+ I2C("i2c3", mux_pllp_clkm, CLK_SOURCE_I2C3, 67, tegra_clk_i2c3),
+ I2C("i2c4", mux_pllp_clkm, CLK_SOURCE_I2C4, 103, tegra_clk_i2c4),
+ I2C("i2c5", mux_pllp_clkm, CLK_SOURCE_I2C5, 47, tegra_clk_i2c5),
+ INT("vde", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VDE, 61, 0, tegra_clk_vde),
+ INT("vi", mux_pllm_pllc_pllp_plla, CLK_SOURCE_VI, 20, 0, tegra_clk_vi),
+ INT("epp", mux_pllm_pllc_pllp_plla, CLK_SOURCE_EPP, 19, 0, tegra_clk_epp),
+ INT("host1x", mux_pllm_pllc_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x),
+ INT("mpe", mux_pllm_pllc_pllp_plla, CLK_SOURCE_MPE, 60, 0, tegra_clk_mpe),
+ INT("2d", mux_pllm_pllc_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d),
+ INT("3d", mux_pllm_pllc_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d),
+ INT8("vde", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_VDE, 61, 0, tegra_clk_vde_8),
+ INT8("vi", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI, 20, 0, tegra_clk_vi_8),
+ INT8("vi", mux_pllm_pllc2_c_c3_pllp_plla_pllc4, CLK_SOURCE_VI, 20, 0, tegra_clk_vi_9),
+ INT8("epp", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_EPP, 19, 0, tegra_clk_epp_8),
+ INT8("msenc", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_MSENC, 91, TEGRA_PERIPH_WAR_1005168, tegra_clk_msenc),
+ INT8("tsec", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_TSEC, 83, 0, tegra_clk_tsec),
+ INT8("host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_8),
+ INT8("se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
+ INT8("2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d_8),
+ INT8("3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d_8),
+ INT8("vic03", mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, CLK_SOURCE_VIC03, 178, 0, tegra_clk_vic03),
+ INT_FLAGS("mselect", mux_pllp_clkm, CLK_SOURCE_MSELECT, 99, 0, tegra_clk_mselect, CLK_IGNORE_UNUSED),
+ MUX("i2s0", mux_pllaout0_audio0_2x_pllp_clkm, CLK_SOURCE_I2S0, 30, TEGRA_PERIPH_ON_APB, tegra_clk_i2s0),
+ MUX("i2s1", mux_pllaout0_audio1_2x_pllp_clkm, CLK_SOURCE_I2S1, 11, TEGRA_PERIPH_ON_APB, tegra_clk_i2s1),
+ MUX("i2s2", mux_pllaout0_audio2_2x_pllp_clkm, CLK_SOURCE_I2S2, 18, TEGRA_PERIPH_ON_APB, tegra_clk_i2s2),
+ MUX("i2s3", mux_pllaout0_audio3_2x_pllp_clkm, CLK_SOURCE_I2S3, 101, TEGRA_PERIPH_ON_APB, tegra_clk_i2s3),
+ MUX("i2s4", mux_pllaout0_audio4_2x_pllp_clkm, CLK_SOURCE_I2S4, 102, TEGRA_PERIPH_ON_APB, tegra_clk_i2s4),
+ MUX("spdif_out", mux_pllaout0_audio_2x_pllp_clkm, CLK_SOURCE_SPDIF_OUT, 10, TEGRA_PERIPH_ON_APB, tegra_clk_spdif_out),
+ MUX("spdif_in", mux_pllp_pllc_pllm, CLK_SOURCE_SPDIF_IN, 10, TEGRA_PERIPH_ON_APB, tegra_clk_spdif_in),
+ MUX("pwm", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_PWM, 17, TEGRA_PERIPH_ON_APB, tegra_clk_pwm),
+ MUX("adx", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX, 154, TEGRA_PERIPH_ON_APB, tegra_clk_adx),
+ MUX("amx", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX, 153, TEGRA_PERIPH_ON_APB, tegra_clk_amx),
+ MUX("hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, TEGRA_PERIPH_ON_APB, tegra_clk_hda),
+ MUX("hda2codec_2x", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, TEGRA_PERIPH_ON_APB, tegra_clk_hda2codec_2x),
+ MUX("vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, TEGRA_PERIPH_ON_APB, tegra_clk_vfir),
+ MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1),
+ MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2),
+ MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3),
+ MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4),
+ MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la),
+ MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace),
+ MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr),
+ MUX("nor", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_NOR, 42, 0, tegra_clk_nor),
+ MUX("mipi", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_MIPI, 50, TEGRA_PERIPH_ON_APB, tegra_clk_mipi),
+ MUX("vi_sensor", mux_pllm_pllc_pllp_plla, CLK_SOURCE_VI_SENSOR, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor),
+ MUX("cilab", mux_pllp_pllc_clkm, CLK_SOURCE_CILAB, 144, 0, tegra_clk_cilab),
+ MUX("cilcd", mux_pllp_pllc_clkm, CLK_SOURCE_CILCD, 145, 0, tegra_clk_cilcd),
+ MUX("cile", mux_pllp_pllc_clkm, CLK_SOURCE_CILE, 146, 0, tegra_clk_cile),
+ MUX("dsialp", mux_pllp_pllc_clkm, CLK_SOURCE_DSIALP, 147, 0, tegra_clk_dsialp),
+ MUX("dsiblp", mux_pllp_pllc_clkm, CLK_SOURCE_DSIBLP, 148, 0, tegra_clk_dsiblp),
+ MUX("tsensor", mux_pllp_pllc_clkm_clk32, CLK_SOURCE_TSENSOR, 100, TEGRA_PERIPH_ON_APB, tegra_clk_tsensor),
+ MUX("actmon", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_ACTMON, 119, 0, tegra_clk_actmon),
+ MUX("dfll_ref", mux_pllp_clkm, CLK_SOURCE_DFLL_REF, 155, TEGRA_PERIPH_ON_APB, tegra_clk_dfll_ref),
+ MUX("dfll_soc", mux_pllp_clkm, CLK_SOURCE_DFLL_SOC, 155, TEGRA_PERIPH_ON_APB, tegra_clk_dfll_soc),
+ MUX("i2cslow", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_I2CSLOW, 81, TEGRA_PERIPH_ON_APB, tegra_clk_i2cslow),
+ MUX("sbc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1),
+ MUX("sbc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2),
+ MUX("sbc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3),
+ MUX("sbc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC4, 68, TEGRA_PERIPH_ON_APB, tegra_clk_sbc4),
+ MUX("sbc5", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC5, 104, TEGRA_PERIPH_ON_APB, tegra_clk_sbc5),
+ MUX("sbc6", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC6, 105, TEGRA_PERIPH_ON_APB, tegra_clk_sbc6),
+ MUX("cve", mux_pllp_plld_pllc_clkm, CLK_SOURCE_CVE, 49, 0, tegra_clk_cve),
+ MUX("tvo", mux_pllp_plld_pllc_clkm, CLK_SOURCE_TVO, 49, 0, tegra_clk_tvo),
+ MUX("tvdac", mux_pllp_plld_pllc_clkm, CLK_SOURCE_TVDAC, 53, 0, tegra_clk_tvdac),
+ MUX("ndflash", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_NDFLASH, 13, TEGRA_PERIPH_ON_APB, tegra_clk_ndflash),
+ MUX("ndspeed", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_NDSPEED, 80, TEGRA_PERIPH_ON_APB, tegra_clk_ndspeed),
+ MUX("sata_oob", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SATA_OOB, 123, TEGRA_PERIPH_ON_APB, tegra_clk_sata_oob),
+ MUX("sata", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SATA, 124, TEGRA_PERIPH_ON_APB, tegra_clk_sata),
+ MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
+ MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
+ MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
+ MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
+ MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
+ MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
+ MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8),
+ MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
+ MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
+ MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
+ MUX8("sbc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC4, 68, TEGRA_PERIPH_ON_APB, tegra_clk_sbc4_8),
+ MUX8("sbc5", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC5, 104, TEGRA_PERIPH_ON_APB, tegra_clk_sbc5_8),
+ MUX8("sbc6", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC6, 105, TEGRA_PERIPH_ON_APB, tegra_clk_sbc6_8),
+ MUX8("ndflash", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_NDFLASH, 13, TEGRA_PERIPH_ON_APB, tegra_clk_ndflash_8),
+ MUX8("ndspeed", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_NDSPEED, 80, TEGRA_PERIPH_ON_APB, tegra_clk_ndspeed_8),
+ MUX8("hdmi", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_HDMI, 51, 0, tegra_clk_hdmi),
+ MUX8("extern1", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN1, 120, 0, tegra_clk_extern1),
+ MUX8("extern2", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, 0, tegra_clk_extern2),
+ MUX8("extern3", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, 0, tegra_clk_extern3),
+ MUX8("soc_therm", mux_pllm_pllc_pllp_plla, CLK_SOURCE_SOC_THERM, 78, TEGRA_PERIPH_ON_APB, tegra_clk_soc_therm),
+ MUX8("vi_sensor", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor_8),
+ MUX8("isp", mux_pllm_pllc_pllp_plla_clkm_pllc4, CLK_SOURCE_ISP, 23, TEGRA_PERIPH_ON_APB, tegra_clk_isp_8),
+ MUX8("entropy", mux_pllp_clkm1, CLK_SOURCE_ENTROPY, 149, 0, tegra_clk_entropy),
+ MUX8("hdmi_audio", mux_pllp3_pllc_clkm, CLK_SOURCE_HDMI_AUDIO, 176, TEGRA_PERIPH_NO_RESET, tegra_clk_hdmi_audio),
+ MUX8("clk72mhz", mux_pllp3_pllc_clkm, CLK_SOURCE_CLK72MHZ, 177, TEGRA_PERIPH_NO_RESET, tegra_clk_clk72Mhz),
+ MUX8_NOGATE_LOCK("sor0_lvds", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_SOR0, tegra_clk_sor0_lvds, &sor0_lock),
+ MUX_FLAGS("csite", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_CSITE, 73, TEGRA_PERIPH_ON_APB, tegra_clk_csite, CLK_IGNORE_UNUSED),
+ NODIV("disp1", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP1, 29, 7, 27, 0, tegra_clk_disp1, NULL),
+ NODIV("disp2", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP2, 29, 7, 26, 0, tegra_clk_disp2, NULL),
+ NODIV("sor0", mux_clkm_plldp_sor0lvds, CLK_SOURCE_SOR0, 14, 3, 182, 0, tegra_clk_sor0, &sor0_lock),
+ UART("uarta", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTA, 6, tegra_clk_uarta),
+ UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb),
+ UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc),
+ UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd),
+ UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 66, tegra_clk_uarte),
+ XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src),
+ XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src),
+ XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src),
+ XUSB("xusb_ss_src", mux_clkm_pllre_clk32_480M_pllc_ref, CLK_SOURCE_XUSB_SS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_ss_src),
+ XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src),
+};
+
+static struct tegra_periph_init_data gate_clks[] = {
+ GATE("rtc", "clk_32k", 4, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_rtc, 0),
+ GATE("timer", "clk_m", 5, 0, tegra_clk_timer, 0),
+ GATE("isp", "clk_m", 23, 0, tegra_clk_isp, 0),
+ GATE("vcp", "clk_m", 29, 0, tegra_clk_vcp, 0),
+ GATE("apbdma", "clk_m", 34, 0, tegra_clk_apbdma, 0),
+ GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
+ GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
+ GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
+ GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0),
+ GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0),
+ GATE("hda2hdmi", "clk_m", 128, TEGRA_PERIPH_ON_APB, tegra_clk_hda2hdmi, 0),
+ GATE("bsea", "clk_m", 62, 0, tegra_clk_bsea, 0),
+ GATE("bsev", "clk_m", 63, 0, tegra_clk_bsev, 0),
+ GATE("mipi-cal", "clk_m", 56, 0, tegra_clk_mipi_cal, 0),
+ GATE("usbd", "clk_m", 22, 0, tegra_clk_usbd, 0),
+ GATE("usb2", "clk_m", 58, 0, tegra_clk_usb2, 0),
+ GATE("usb3", "clk_m", 59, 0, tegra_clk_usb3, 0),
+ GATE("csi", "pll_p_out3", 52, 0, tegra_clk_csi, 0),
+ GATE("afi", "clk_m", 72, 0, tegra_clk_afi, 0),
+ GATE("csus", "clk_m", 92, TEGRA_PERIPH_NO_RESET, tegra_clk_csus, 0),
+ GATE("dds", "clk_m", 150, TEGRA_PERIPH_ON_APB, tegra_clk_dds, 0),
+ GATE("dp2", "clk_m", 152, TEGRA_PERIPH_ON_APB, tegra_clk_dp2, 0),
+ GATE("dtv", "clk_m", 79, TEGRA_PERIPH_ON_APB, tegra_clk_dtv, 0),
+ GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
+ GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
+ GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
+ GATE("dsia", "dsia_mux", 48, 0, tegra_clk_dsia, 0),
+ GATE("dsib", "dsib_mux", 82, 0, tegra_clk_dsib, 0),
+ GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
+ GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
+ GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0),
+ GATE("vim2_clk", "clk_m", 11, 0, tegra_clk_vim2_clk, 0),
+ GATE("pcie", "clk_m", 70, 0, tegra_clk_pcie, 0),
+ GATE("dpaux", "clk_m", 181, 0, tegra_clk_dpaux, 0),
+ GATE("gpu", "pll_ref", 184, 0, tegra_clk_gpu, 0),
+};
+
+struct pll_out_data {
+ char *div_name;
+ char *pll_out_name;
+ u32 offset;
+ int clk_id;
+ u8 div_shift;
+ u8 div_flags;
+ u8 rst_shift;
+ spinlock_t *lock;
+};
+
+#define PLL_OUT(_num, _offset, _div_shift, _div_flags, _rst_shift, _id) \
+ {\
+ .div_name = "pll_p_out" #_num "_div",\
+ .pll_out_name = "pll_p_out" #_num,\
+ .offset = _offset,\
+ .div_shift = _div_shift,\
+ .div_flags = _div_flags | TEGRA_DIVIDER_FIXED |\
+ TEGRA_DIVIDER_ROUND_UP,\
+ .rst_shift = _rst_shift,\
+ .clk_id = tegra_clk_ ## _id,\
+ .lock = &_offset ##_lock,\
+ }
+
+static struct pll_out_data pllp_out_clks[] = {
+ PLL_OUT(1, PLLP_OUTA, 8, 0, 0, pll_p_out1),
+ PLL_OUT(2, PLLP_OUTA, 24, 0, 16, pll_p_out2),
+ PLL_OUT(2, PLLP_OUTA, 24, TEGRA_DIVIDER_INT, 16, pll_p_out2_int),
+ PLL_OUT(3, PLLP_OUTB, 8, 0, 0, pll_p_out3),
+ PLL_OUT(4, PLLP_OUTB, 24, 0, 16, pll_p_out4),
+ PLL_OUT(5, PLLP_OUTC, 24, 0, 16, pll_p_out5),
+};
+
+static void __init periph_clk_init(void __iomem *clk_base,
+ struct tegra_clk *tegra_clks)
+{
+ int i;
+ struct clk *clk;
+ struct clk **dt_clk;
+
+ for (i = 0; i < ARRAY_SIZE(periph_clks); i++) {
+ struct tegra_clk_periph_regs *bank;
+ struct tegra_periph_init_data *data;
+
+ data = periph_clks + i;
+
+ dt_clk = tegra_lookup_dt_id(data->clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ bank = get_reg_bank(data->periph.gate.clk_num);
+ if (!bank)
+ continue;
+
+ data->periph.gate.regs = bank;
+ clk = tegra_clk_register_periph(data->name,
+ data->p.parent_names, data->num_parents,
+ &data->periph, clk_base, data->offset,
+ data->flags);
+ *dt_clk = clk;
+ }
+}
+
+static void __init gate_clk_init(void __iomem *clk_base,
+ struct tegra_clk *tegra_clks)
+{
+ int i;
+ struct clk *clk;
+ struct clk **dt_clk;
+
+ for (i = 0; i < ARRAY_SIZE(gate_clks); i++) {
+ struct tegra_periph_init_data *data;
+
+ data = gate_clks + i;
+
+ dt_clk = tegra_lookup_dt_id(data->clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = tegra_clk_register_periph_gate(data->name,
+ data->p.parent_name, data->periph.gate.flags,
+ clk_base, data->flags,
+ data->periph.gate.clk_num,
+ periph_clk_enb_refcnt);
+ *dt_clk = clk;
+ }
+}
+
+static void __init init_pllp(void __iomem *clk_base, void __iomem *pmc_base,
+ struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_params)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+ int i;
+
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_p, tegra_clks);
+ if (dt_clk) {
+ /* PLLP */
+ clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base,
+ pmc_base, 0, pll_params, NULL);
+ clk_register_clkdev(clk, "pll_p", NULL);
+ *dt_clk = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pllp_out_clks); i++) {
+ struct pll_out_data *data;
+
+ data = pllp_out_clks + i;
+
+ dt_clk = tegra_lookup_dt_id(data->clk_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = tegra_clk_register_divider(data->div_name, "pll_p",
+ clk_base + data->offset, 0, data->div_flags,
+ data->div_shift, 8, 1, data->lock);
+ clk = tegra_clk_register_pll_out(data->pll_out_name,
+ data->div_name, clk_base + data->offset,
+ data->rst_shift + 1, data->rst_shift,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ data->lock);
+ *dt_clk = clk;
+ }
+}
+
+void __init tegra_periph_clk_init(void __iomem *clk_base,
+ void __iomem *pmc_base, struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_params)
+{
+ init_pllp(clk_base, pmc_base, tegra_clks, pll_params);
+ periph_clk_init(clk_base, tegra_clks);
+ gate_clk_init(clk_base, tegra_clks);
+}
diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c
new file mode 100644
index 000000000000..08b21c1ee867
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-pmc.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define PMC_CLK_OUT_CNTRL 0x1a8
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
+#define PMC_CTRL 0
+#define PMC_CTRL_BLINK_ENB 7
+#define PMC_BLINK_TIMER 0x40
+
+struct pmc_clk_init_data {
+ char *mux_name;
+ char *gate_name;
+ const char **parents;
+ int num_parents;
+ int mux_id;
+ int gate_id;
+ char *dev_name;
+ u8 mux_shift;
+ u8 gate_shift;
+};
+
+#define PMC_CLK(_num, _mux_shift, _gate_shift)\
+ {\
+ .mux_name = "clk_out_" #_num "_mux",\
+ .gate_name = "clk_out_" #_num,\
+ .parents = clk_out ##_num ##_parents,\
+ .num_parents = ARRAY_SIZE(clk_out ##_num ##_parents),\
+ .mux_id = tegra_clk_clk_out_ ##_num ##_mux,\
+ .gate_id = tegra_clk_clk_out_ ##_num,\
+ .dev_name = "extern" #_num,\
+ .mux_shift = _mux_shift,\
+ .gate_shift = _gate_shift,\
+ }
+
+static DEFINE_SPINLOCK(clk_out_lock);
+
+static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern1",
+};
+
+static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern2",
+};
+
+static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern3",
+};
+
+static struct pmc_clk_init_data pmc_clks[] = {
+ PMC_CLK(1, 6, 2),
+ PMC_CLK(2, 14, 10),
+ PMC_CLK(3, 22, 18),
+};
+
+void __init tegra_pmc_clk_init(void __iomem *pmc_base,
+ struct tegra_clk *tegra_clks)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmc_clks); i++) {
+ struct pmc_clk_init_data *data;
+
+ data = pmc_clks + i;
+
+ dt_clk = tegra_lookup_dt_id(data->mux_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = clk_register_mux(NULL, data->mux_name, data->parents,
+ data->num_parents, CLK_SET_RATE_NO_REPARENT,
+ pmc_base + PMC_CLK_OUT_CNTRL, data->mux_shift,
+ 3, 0, &clk_out_lock);
+ *dt_clk = clk;
+
+
+ dt_clk = tegra_lookup_dt_id(data->gate_id, tegra_clks);
+ if (!dt_clk)
+ continue;
+
+ clk = clk_register_gate(NULL, data->gate_name, data->mux_name,
+ 0, pmc_base + PMC_CLK_OUT_CNTRL,
+ data->gate_shift, 0, &clk_out_lock);
+ *dt_clk = clk;
+ clk_register_clkdev(clk, data->dev_name, data->gate_name);
+ }
+
+ /* blink */
+ writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
+ clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
+ pmc_base + PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
+
+ dt_clk = tegra_lookup_dt_id(tegra_clk_blink, tegra_clks);
+ if (!dt_clk)
+ return;
+
+ clk = clk_register_gate(NULL, "blink", "blink_override", 0,
+ pmc_base + PMC_CTRL,
+ PMC_CTRL_BLINK_ENB, 0, NULL);
+ clk_register_clkdev(clk, "blink", NULL);
+ *dt_clk = clk;
+}
+
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
new file mode 100644
index 000000000000..feb3201c85ce
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define PLLX_BASE 0xe0
+#define PLLX_MISC 0xe4
+#define PLLX_MISC2 0x514
+#define PLLX_MISC3 0x518
+
+#define CCLKG_BURST_POLICY 0x368
+#define CCLKLP_BURST_POLICY 0x370
+#define SCLK_BURST_POLICY 0x028
+#define SYSTEM_CLK_RATE 0x030
+
+static DEFINE_SPINLOCK(sysrate_lock);
+
+static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
+ "pll_p", "pll_p_out2", "unused",
+ "clk_32k", "pll_m_out1" };
+
+static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p", "pll_p_out4", "unused",
+ "unused", "pll_x" };
+
+static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p", "pll_p_out4", "unused",
+ "unused", "pll_x", "pll_x_out0" };
+
+static void __init tegra_sclk_init(void __iomem *clk_base,
+ struct tegra_clk *tegra_clks)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+
+ /* SCLK */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_sclk, tegra_clks);
+ if (dt_clk) {
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + SCLK_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ *dt_clk = clk;
+ }
+
+ /* HCLK */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_hclk, tegra_clks);
+ if (dt_clk) {
+ clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "hclk", "hclk_div",
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ clk_base + SYSTEM_CLK_RATE,
+ 7, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ *dt_clk = clk;
+ }
+
+ /* PCLK */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pclk, tegra_clks);
+ if (!dt_clk)
+ return;
+
+ clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT |
+ CLK_IGNORE_UNUSED, clk_base + SYSTEM_CLK_RATE,
+ 3, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ *dt_clk = clk;
+}
+
+void __init tegra_super_clk_gen4_init(void __iomem *clk_base,
+ void __iomem *pmc_base,
+ struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *params)
+{
+ struct clk *clk;
+ struct clk **dt_clk;
+
+ /* CCLKG */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_cclk_g, tegra_clks);
+ if (dt_clk) {
+ clk = tegra_clk_register_super_mux("cclk_g", cclk_g_parents,
+ ARRAY_SIZE(cclk_g_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKG_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ *dt_clk = clk;
+ }
+
+ /* CCLKLP */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_cclk_lp, tegra_clks);
+ if (dt_clk) {
+ clk = tegra_clk_register_super_mux("cclk_lp", cclk_lp_parents,
+ ARRAY_SIZE(cclk_lp_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKLP_BURST_POLICY,
+ TEGRA_DIVIDER_2, 4, 8, 9, NULL);
+ *dt_clk = clk;
+ }
+
+ tegra_sclk_init(clk_base, tegra_clks);
+
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+ /* PLLX */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_x, tegra_clks);
+ if (!dt_clk)
+ return;
+
+ clk = tegra_clk_register_pllxc("pll_x", "pll_ref", clk_base,
+ pmc_base, CLK_IGNORE_UNUSED, params, NULL);
+ *dt_clk = clk;
+
+ /* PLLX_OUT0 */
+
+ dt_clk = tegra_lookup_dt_id(tegra_clk_pll_x_out0, tegra_clks);
+ if (!dt_clk)
+ return;
+ clk = clk_register_fixed_factor(NULL, "pll_x_out0", "pll_x",
+ CLK_SET_RATE_PARENT, 1, 2);
+ *dt_clk = clk;
+#endif
+}
+
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 9467da7dee49..80431f0fb268 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -23,30 +23,15 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/clk/tegra.h>
+#include <dt-bindings/clock/tegra114-car.h>
#include "clk.h"
+#include "clk-id.h"
-#define RST_DEVICES_L 0x004
-#define RST_DEVICES_H 0x008
-#define RST_DEVICES_U 0x00C
#define RST_DFLL_DVCO 0x2F4
-#define RST_DEVICES_V 0x358
-#define RST_DEVICES_W 0x35C
-#define RST_DEVICES_X 0x28C
-#define RST_DEVICES_SET_L 0x300
-#define RST_DEVICES_CLR_L 0x304
-#define RST_DEVICES_SET_H 0x308
-#define RST_DEVICES_CLR_H 0x30c
-#define RST_DEVICES_SET_U 0x310
-#define RST_DEVICES_CLR_U 0x314
-#define RST_DEVICES_SET_V 0x430
-#define RST_DEVICES_CLR_V 0x434
-#define RST_DEVICES_SET_W 0x438
-#define RST_DEVICES_CLR_W 0x43c
#define CPU_FINETRIM_SELECT 0x4d4 /* override default prop dlys */
#define CPU_FINETRIM_DR 0x4d8 /* rise->rise prop dly A */
#define CPU_FINETRIM_R 0x4e4 /* rise->rise prop dly inc A */
-#define RST_DEVICES_NUM 5
/* RST_DFLL_DVCO bitfields */
#define DVFS_DFLL_RESET_SHIFT 0
@@ -73,25 +58,7 @@
#define CPU_FINETRIM_R_FCPU_6_SHIFT 10 /* ftop */
#define CPU_FINETRIM_R_FCPU_6_MASK (0x3 << CPU_FINETRIM_R_FCPU_6_SHIFT)
-#define CLK_OUT_ENB_L 0x010
-#define CLK_OUT_ENB_H 0x014
-#define CLK_OUT_ENB_U 0x018
-#define CLK_OUT_ENB_V 0x360
-#define CLK_OUT_ENB_W 0x364
-#define CLK_OUT_ENB_X 0x280
-#define CLK_OUT_ENB_SET_L 0x320
-#define CLK_OUT_ENB_CLR_L 0x324
-#define CLK_OUT_ENB_SET_H 0x328
-#define CLK_OUT_ENB_CLR_H 0x32c
-#define CLK_OUT_ENB_SET_U 0x330
-#define CLK_OUT_ENB_CLR_U 0x334
-#define CLK_OUT_ENB_SET_V 0x440
-#define CLK_OUT_ENB_CLR_V 0x444
-#define CLK_OUT_ENB_SET_W 0x448
-#define CLK_OUT_ENB_CLR_W 0x44c
-#define CLK_OUT_ENB_SET_X 0x284
-#define CLK_OUT_ENB_CLR_X 0x288
-#define CLK_OUT_ENB_NUM 6
+#define TEGRA114_CLK_PERIPH_BANKS 5
#define PLLC_BASE 0x80
#define PLLC_MISC2 0x88
@@ -139,25 +106,6 @@
#define PLLE_AUX 0x48c
#define PLLC_OUT 0x84
#define PLLM_OUT 0x94
-#define PLLP_OUTA 0xa4
-#define PLLP_OUTB 0xa8
-#define PLLA_OUT 0xb4
-
-#define AUDIO_SYNC_CLK_I2S0 0x4a0
-#define AUDIO_SYNC_CLK_I2S1 0x4a4
-#define AUDIO_SYNC_CLK_I2S2 0x4a8
-#define AUDIO_SYNC_CLK_I2S3 0x4ac
-#define AUDIO_SYNC_CLK_I2S4 0x4b0
-#define AUDIO_SYNC_CLK_SPDIF 0x4b4
-
-#define AUDIO_SYNC_DOUBLER 0x49c
-
-#define PMC_CLK_OUT_CNTRL 0x1a8
-#define PMC_DPD_PADS_ORIDE 0x1c
-#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
-#define PMC_CTRL 0
-#define PMC_CTRL_BLINK_ENB 7
-#define PMC_BLINK_TIMER 0x40
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_SHIFT 28
@@ -166,9 +114,6 @@
#define PLLXC_SW_MAX_P 6
#define CCLKG_BURST_POLICY 0x368
-#define CCLKLP_BURST_POLICY 0x370
-#define SCLK_BURST_POLICY 0x028
-#define SYSTEM_CLK_RATE 0x030
#define UTMIP_PLL_CFG2 0x488
#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
@@ -196,91 +141,8 @@
#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1)
#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0)
-#define CLK_SOURCE_I2S0 0x1d8
-#define CLK_SOURCE_I2S1 0x100
-#define CLK_SOURCE_I2S2 0x104
-#define CLK_SOURCE_NDFLASH 0x160
-#define CLK_SOURCE_I2S3 0x3bc
-#define CLK_SOURCE_I2S4 0x3c0
-#define CLK_SOURCE_SPDIF_OUT 0x108
-#define CLK_SOURCE_SPDIF_IN 0x10c
-#define CLK_SOURCE_PWM 0x110
-#define CLK_SOURCE_ADX 0x638
-#define CLK_SOURCE_AMX 0x63c
-#define CLK_SOURCE_HDA 0x428
-#define CLK_SOURCE_HDA2CODEC_2X 0x3e4
-#define CLK_SOURCE_SBC1 0x134
-#define CLK_SOURCE_SBC2 0x118
-#define CLK_SOURCE_SBC3 0x11c
-#define CLK_SOURCE_SBC4 0x1b4
-#define CLK_SOURCE_SBC5 0x3c8
-#define CLK_SOURCE_SBC6 0x3cc
-#define CLK_SOURCE_SATA_OOB 0x420
-#define CLK_SOURCE_SATA 0x424
-#define CLK_SOURCE_NDSPEED 0x3f8
-#define CLK_SOURCE_VFIR 0x168
-#define CLK_SOURCE_SDMMC1 0x150
-#define CLK_SOURCE_SDMMC2 0x154
-#define CLK_SOURCE_SDMMC3 0x1bc
-#define CLK_SOURCE_SDMMC4 0x164
-#define CLK_SOURCE_VDE 0x1c8
#define CLK_SOURCE_CSITE 0x1d4
-#define CLK_SOURCE_LA 0x1f8
-#define CLK_SOURCE_TRACE 0x634
-#define CLK_SOURCE_OWR 0x1cc
-#define CLK_SOURCE_NOR 0x1d0
-#define CLK_SOURCE_MIPI 0x174
-#define CLK_SOURCE_I2C1 0x124
-#define CLK_SOURCE_I2C2 0x198
-#define CLK_SOURCE_I2C3 0x1b8
-#define CLK_SOURCE_I2C4 0x3c4
-#define CLK_SOURCE_I2C5 0x128
-#define CLK_SOURCE_UARTA 0x178
-#define CLK_SOURCE_UARTB 0x17c
-#define CLK_SOURCE_UARTC 0x1a0
-#define CLK_SOURCE_UARTD 0x1c0
-#define CLK_SOURCE_UARTE 0x1c4
-#define CLK_SOURCE_UARTA_DBG 0x178
-#define CLK_SOURCE_UARTB_DBG 0x17c
-#define CLK_SOURCE_UARTC_DBG 0x1a0
-#define CLK_SOURCE_UARTD_DBG 0x1c0
-#define CLK_SOURCE_UARTE_DBG 0x1c4
-#define CLK_SOURCE_3D 0x158
-#define CLK_SOURCE_2D 0x15c
-#define CLK_SOURCE_VI_SENSOR 0x1a8
-#define CLK_SOURCE_VI 0x148
-#define CLK_SOURCE_EPP 0x16c
-#define CLK_SOURCE_MSENC 0x1f0
-#define CLK_SOURCE_TSEC 0x1f4
-#define CLK_SOURCE_HOST1X 0x180
-#define CLK_SOURCE_HDMI 0x18c
-#define CLK_SOURCE_DISP1 0x138
-#define CLK_SOURCE_DISP2 0x13c
-#define CLK_SOURCE_CILAB 0x614
-#define CLK_SOURCE_CILCD 0x618
-#define CLK_SOURCE_CILE 0x61c
-#define CLK_SOURCE_DSIALP 0x620
-#define CLK_SOURCE_DSIBLP 0x624
-#define CLK_SOURCE_TSENSOR 0x3b8
-#define CLK_SOURCE_D_AUDIO 0x3d0
-#define CLK_SOURCE_DAM0 0x3d8
-#define CLK_SOURCE_DAM1 0x3dc
-#define CLK_SOURCE_DAM2 0x3e0
-#define CLK_SOURCE_ACTMON 0x3e8
-#define CLK_SOURCE_EXTERN1 0x3ec
-#define CLK_SOURCE_EXTERN2 0x3f0
-#define CLK_SOURCE_EXTERN3 0x3f4
-#define CLK_SOURCE_I2CSLOW 0x3fc
-#define CLK_SOURCE_SE 0x42c
-#define CLK_SOURCE_MSELECT 0x3b4
-#define CLK_SOURCE_DFLL_REF 0x62c
-#define CLK_SOURCE_DFLL_SOC 0x630
-#define CLK_SOURCE_SOC_THERM 0x644
-#define CLK_SOURCE_XUSB_HOST_SRC 0x600
-#define CLK_SOURCE_XUSB_FALCON_SRC 0x604
-#define CLK_SOURCE_XUSB_FS_SRC 0x608
#define CLK_SOURCE_XUSB_SS_SRC 0x610
-#define CLK_SOURCE_XUSB_DEV_SRC 0x60c
#define CLK_SOURCE_EMC 0x19c
/* PLLM override registers */
@@ -298,19 +160,13 @@ static struct cpu_clk_suspend_context {
} tegra114_cpu_clk_sctx;
#endif
-static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
-
static void __iomem *clk_base;
static void __iomem *pmc_base;
static DEFINE_SPINLOCK(pll_d_lock);
static DEFINE_SPINLOCK(pll_d2_lock);
static DEFINE_SPINLOCK(pll_u_lock);
-static DEFINE_SPINLOCK(pll_div_lock);
static DEFINE_SPINLOCK(pll_re_lock);
-static DEFINE_SPINLOCK(clk_doubler_lock);
-static DEFINE_SPINLOCK(clk_out_lock);
-static DEFINE_SPINLOCK(sysrate_lock);
static struct div_nmp pllxc_nmp = {
.divm_shift = 0,
@@ -370,6 +226,8 @@ static struct tegra_clk_pll_params pll_c_params = {
.stepb_shift = 9,
.pdiv_tohw = pllxc_p,
.div_nmp = &pllxc_nmp,
+ .freq_table = pll_c_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct div_nmp pllcx_nmp = {
@@ -417,6 +275,8 @@ static struct tegra_clk_pll_params pll_c2_params = {
.ext_misc_reg[0] = 0x4f0,
.ext_misc_reg[1] = 0x4f4,
.ext_misc_reg[2] = 0x4f8,
+ .freq_table = pll_cx_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_c3_params = {
@@ -437,6 +297,8 @@ static struct tegra_clk_pll_params pll_c3_params = {
.ext_misc_reg[0] = 0x504,
.ext_misc_reg[1] = 0x508,
.ext_misc_reg[2] = 0x50c,
+ .freq_table = pll_cx_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct div_nmp pllm_nmp = {
@@ -483,6 +345,8 @@ static struct tegra_clk_pll_params pll_m_params = {
.div_nmp = &pllm_nmp,
.pmc_divnm_reg = PMC_PLLM_WB0_OVERRIDE,
.pmc_divp_reg = PMC_PLLM_WB0_OVERRIDE_2,
+ .freq_table = pll_m_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct div_nmp pllp_nmp = {
@@ -516,6 +380,9 @@ static struct tegra_clk_pll_params pll_p_params = {
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
.div_nmp = &pllp_nmp,
+ .freq_table = pll_p_freq_table,
+ .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK,
+ .fixed_rate = 408000000,
};
static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
@@ -543,6 +410,8 @@ static struct tegra_clk_pll_params pll_a_params = {
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
.div_nmp = &pllp_nmp,
+ .freq_table = pll_a_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
@@ -579,6 +448,9 @@ static struct tegra_clk_pll_params pll_d_params = {
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
.div_nmp = &pllp_nmp,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_d2_params = {
@@ -594,6 +466,9 @@ static struct tegra_clk_pll_params pll_d2_params = {
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
.div_nmp = &pllp_nmp,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
};
static struct pdiv_map pllu_p[] = {
@@ -634,6 +509,9 @@ static struct tegra_clk_pll_params pll_u_params = {
.lock_delay = 1000,
.pdiv_tohw = pllu_p,
.div_nmp = &pllu_nmp,
+ .freq_table = pll_u_freq_table,
+ .flags = TEGRA_PLLU | TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
@@ -667,12 +545,15 @@ static struct tegra_clk_pll_params pll_x_params = {
.stepb_shift = 24,
.pdiv_tohw = pllxc_p,
.div_nmp = &pllxc_nmp,
+ .freq_table = pll_x_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
/* PLLE special case: use cpcon field to store cml divider value */
{336000000, 100000000, 100, 21, 16, 11},
{312000000, 100000000, 200, 26, 24, 13},
+ {12000000, 100000000, 200, 1, 24, 13},
{0, 0, 0, 0, 0, 0},
};
@@ -699,6 +580,9 @@ static struct tegra_clk_pll_params pll_e_params = {
.lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
.lock_delay = 300,
.div_nmp = &plle_nmp,
+ .freq_table = pll_e_freq_table,
+ .flags = TEGRA_PLL_FIXED,
+ .fixed_rate = 100000000,
};
static struct div_nmp pllre_nmp = {
@@ -725,53 +609,7 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
.iddq_reg = PLLRE_MISC,
.iddq_bit_idx = PLLRE_IDDQ_BIT,
.div_nmp = &pllre_nmp,
-};
-
-/* Peripheral clock registers */
-
-static struct tegra_clk_periph_regs periph_l_regs = {
- .enb_reg = CLK_OUT_ENB_L,
- .enb_set_reg = CLK_OUT_ENB_SET_L,
- .enb_clr_reg = CLK_OUT_ENB_CLR_L,
- .rst_reg = RST_DEVICES_L,
- .rst_set_reg = RST_DEVICES_SET_L,
- .rst_clr_reg = RST_DEVICES_CLR_L,
-};
-
-static struct tegra_clk_periph_regs periph_h_regs = {
- .enb_reg = CLK_OUT_ENB_H,
- .enb_set_reg = CLK_OUT_ENB_SET_H,
- .enb_clr_reg = CLK_OUT_ENB_CLR_H,
- .rst_reg = RST_DEVICES_H,
- .rst_set_reg = RST_DEVICES_SET_H,
- .rst_clr_reg = RST_DEVICES_CLR_H,
-};
-
-static struct tegra_clk_periph_regs periph_u_regs = {
- .enb_reg = CLK_OUT_ENB_U,
- .enb_set_reg = CLK_OUT_ENB_SET_U,
- .enb_clr_reg = CLK_OUT_ENB_CLR_U,
- .rst_reg = RST_DEVICES_U,
- .rst_set_reg = RST_DEVICES_SET_U,
- .rst_clr_reg = RST_DEVICES_CLR_U,
-};
-
-static struct tegra_clk_periph_regs periph_v_regs = {
- .enb_reg = CLK_OUT_ENB_V,
- .enb_set_reg = CLK_OUT_ENB_SET_V,
- .enb_clr_reg = CLK_OUT_ENB_CLR_V,
- .rst_reg = RST_DEVICES_V,
- .rst_set_reg = RST_DEVICES_SET_V,
- .rst_clr_reg = RST_DEVICES_CLR_V,
-};
-
-static struct tegra_clk_periph_regs periph_w_regs = {
- .enb_reg = CLK_OUT_ENB_W,
- .enb_set_reg = CLK_OUT_ENB_SET_W,
- .enb_clr_reg = CLK_OUT_ENB_CLR_W,
- .rst_reg = RST_DEVICES_W,
- .rst_set_reg = RST_DEVICES_SET_W,
- .rst_clr_reg = RST_DEVICES_CLR_W,
+ .flags = TEGRA_PLL_USE_LOCK,
};
/* possible OSC frequencies in Hz */
@@ -787,120 +625,6 @@ static unsigned long tegra114_input_freq[] = {
#define MASK(x) (BIT(x) - 1)
-#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags, _clk_id, \
- _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_MUX_FLAGS(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _gate_flags, _clk_id, flags)\
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags, _clk_id, \
- _parents##_idx, flags)
-
-#define TEGRA_INIT_DATA_MUX8(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 29, MASK(3), 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags, _clk_id, \
- _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs,\
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id, _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_INT_FLAGS(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _gate_flags, _clk_id, flags)\
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs,\
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id, _parents##_idx, flags)
-
-#define TEGRA_INIT_DATA_INT8(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs,\
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id, _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_UART(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 16, 1, TEGRA_DIVIDER_UART, _regs,\
- _clk_num, periph_clk_enb_refcnt, 0, _clk_id, \
- _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_I2C(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- 30, MASK(2), 0, 0, 16, 0, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, 0, _clk_id, _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
- _mux_shift, _mux_mask, _clk_num, _regs, \
- _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset,\
- _mux_shift, _mux_mask, 0, 0, 0, 0, 0, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id, _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_XUSB(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parents, _offset, \
- 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id, _parents##_idx, 0)
-
-#define TEGRA_INIT_DATA_AUDIO(_name, _con_id, _dev_id, _offset, _clk_num,\
- _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, mux_d_audio_clk, \
- _offset, 16, 0xE01F, 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags , _clk_id, \
- mux_d_audio_clk_idx, 0)
-
-enum tegra114_clk {
- rtc = 4, timer = 5, uarta = 6, sdmmc2 = 9, i2s1 = 11, i2c1 = 12,
- ndflash = 13, sdmmc1 = 14, sdmmc4 = 15, pwm = 17, i2s2 = 18, epp = 19,
- gr_2d = 21, usbd = 22, isp = 23, gr_3d = 24, disp2 = 26, disp1 = 27,
- host1x = 28, vcp = 29, i2s0 = 30, apbdma = 34, kbc = 36, kfuse = 40,
- sbc1 = 41, nor = 42, sbc2 = 44, sbc3 = 46, i2c5 = 47, dsia = 48,
- mipi = 50, hdmi = 51, csi = 52, i2c2 = 54, uartc = 55, mipi_cal = 56,
- emc, usb2, usb3, vde = 61, bsea = 62, bsev = 63, uartd = 65,
- i2c3 = 67, sbc4 = 68, sdmmc3 = 69, owr = 71, csite = 73,
- la = 76, trace = 77, soc_therm = 78, dtv = 79, ndspeed = 80,
- i2cslow = 81, dsib = 82, tsec = 83, xusb_host = 89, msenc = 91,
- csus = 92, mselect = 99, tsensor = 100, i2s3 = 101, i2s4 = 102,
- i2c4 = 103, sbc5 = 104, sbc6 = 105, d_audio, apbif = 107, dam0, dam1,
- dam2, hda2codec_2x = 111, audio0_2x = 113, audio1_2x, audio2_2x,
- audio3_2x, audio4_2x, spdif_2x, actmon = 119, extern1 = 120,
- extern2 = 121, extern3 = 122, hda = 125, se = 127, hda2hdmi = 128,
- cilab = 144, cilcd = 145, cile = 146, dsialp = 147, dsiblp = 148,
- dds = 150, dp2 = 152, amx = 153, adx = 154, xusb_ss = 156, uartb = 192,
- vfir, spdif_in, spdif_out, vi, vi_sensor, fuse, fuse_burn, clk_32k,
- clk_m, clk_m_div2, clk_m_div4, pll_ref, pll_c, pll_c_out1, pll_c2,
- pll_c3, pll_m, pll_m_out1, pll_p, pll_p_out1, pll_p_out2, pll_p_out3,
- pll_p_out4, pll_a, pll_a_out0, pll_d, pll_d_out0, pll_d2, pll_d2_out0,
- pll_u, pll_u_480M, pll_u_60M, pll_u_48M, pll_u_12M, pll_x, pll_x_out0,
- pll_re_vco, pll_re_out, pll_e_out0, spdif_in_sync, i2s0_sync,
- i2s1_sync, i2s2_sync, i2s3_sync, i2s4_sync, vimclk_sync, audio0,
- audio1, audio2, audio3, audio4, spdif, clk_out_1, clk_out_2, clk_out_3,
- blink, xusb_host_src = 252, xusb_falcon_src, xusb_fs_src, xusb_ss_src,
- xusb_dev_src, xusb_dev, xusb_hs_src, sclk, hclk, pclk, cclk_g, cclk_lp,
- dfll_ref = 264, dfll_soc,
-
- /* Mux clocks */
-
- audio0_mux = 300, audio1_mux, audio2_mux, audio3_mux, audio4_mux,
- spdif_mux, clk_out_1_mux, clk_out_2_mux, clk_out_3_mux, dsia_mux,
- dsib_mux, clk_max,
-};
-
struct utmi_clk_param {
/* Oscillator Frequency in KHz */
u32 osc_frequency;
@@ -934,122 +658,11 @@ static const struct utmi_clk_param utmi_parameters[] = {
/* peripheral mux definitions */
-#define MUX_I2S_SPDIF(_id) \
-static const char *mux_pllaout0_##_id##_2x_pllp_clkm[] = { "pll_a_out0", \
- #_id, "pll_p",\
- "clk_m"};
-MUX_I2S_SPDIF(audio0)
-MUX_I2S_SPDIF(audio1)
-MUX_I2S_SPDIF(audio2)
-MUX_I2S_SPDIF(audio3)
-MUX_I2S_SPDIF(audio4)
-MUX_I2S_SPDIF(audio)
-
-#define mux_pllaout0_audio0_2x_pllp_clkm_idx NULL
-#define mux_pllaout0_audio1_2x_pllp_clkm_idx NULL
-#define mux_pllaout0_audio2_2x_pllp_clkm_idx NULL
-#define mux_pllaout0_audio3_2x_pllp_clkm_idx NULL
-#define mux_pllaout0_audio4_2x_pllp_clkm_idx NULL
-#define mux_pllaout0_audio_2x_pllp_clkm_idx NULL
-
-static const char *mux_pllp_pllc_pllm_clkm[] = {
- "pll_p", "pll_c", "pll_m", "clk_m"
-};
-#define mux_pllp_pllc_pllm_clkm_idx NULL
-
-static const char *mux_pllp_pllc_pllm[] = { "pll_p", "pll_c", "pll_m" };
-#define mux_pllp_pllc_pllm_idx NULL
-
-static const char *mux_pllp_pllc_clk32_clkm[] = {
- "pll_p", "pll_c", "clk_32k", "clk_m"
-};
-#define mux_pllp_pllc_clk32_clkm_idx NULL
-
-static const char *mux_plla_pllc_pllp_clkm[] = {
- "pll_a_out0", "pll_c", "pll_p", "clk_m"
-};
-#define mux_plla_pllc_pllp_clkm_idx mux_pllp_pllc_pllm_clkm_idx
-
-static const char *mux_pllp_pllc2_c_c3_pllm_clkm[] = {
- "pll_p", "pll_c2", "pll_c", "pll_c3", "pll_m", "clk_m"
-};
-static u32 mux_pllp_pllc2_c_c3_pllm_clkm_idx[] = {
- [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
-};
-
-static const char *mux_pllp_clkm[] = {
- "pll_p", "clk_m"
-};
-static u32 mux_pllp_clkm_idx[] = {
- [0] = 0, [1] = 3,
-};
-
-static const char *mux_pllm_pllc2_c_c3_pllp_plla[] = {
- "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0"
-};
-#define mux_pllm_pllc2_c_c3_pllp_plla_idx mux_pllp_pllc2_c_c3_pllm_clkm_idx
-
-static const char *mux_pllp_pllm_plld_plla_pllc_plld2_clkm[] = {
- "pll_p", "pll_m", "pll_d_out0", "pll_a_out0", "pll_c",
- "pll_d2_out0", "clk_m"
-};
-#define mux_pllp_pllm_plld_plla_pllc_plld2_clkm_idx NULL
-
-static const char *mux_pllm_pllc_pllp_plla[] = {
- "pll_m", "pll_c", "pll_p", "pll_a_out0"
-};
-#define mux_pllm_pllc_pllp_plla_idx mux_pllp_pllc_pllm_clkm_idx
-
-static const char *mux_pllp_pllc_clkm[] = {
- "pll_p", "pll_c", "pll_m"
-};
-static u32 mux_pllp_pllc_clkm_idx[] = {
- [0] = 0, [1] = 1, [2] = 3,
-};
-
-static const char *mux_pllp_pllc_clkm_clk32[] = {
- "pll_p", "pll_c", "clk_m", "clk_32k"
-};
-#define mux_pllp_pllc_clkm_clk32_idx NULL
-
-static const char *mux_plla_clk32_pllp_clkm_plle[] = {
- "pll_a_out0", "clk_32k", "pll_p", "clk_m", "pll_e_out0"
-};
-#define mux_plla_clk32_pllp_clkm_plle_idx NULL
-
-static const char *mux_clkm_pllp_pllc_pllre[] = {
- "clk_m", "pll_p", "pll_c", "pll_re_out"
-};
-static u32 mux_clkm_pllp_pllc_pllre_idx[] = {
- [0] = 0, [1] = 1, [2] = 3, [3] = 5,
-};
-
-static const char *mux_clkm_48M_pllp_480M[] = {
- "clk_m", "pll_u_48M", "pll_p", "pll_u_480M"
-};
-#define mux_clkm_48M_pllp_480M_idx NULL
-
-static const char *mux_clkm_pllre_clk32_480M_pllc_ref[] = {
- "clk_m", "pll_re_out", "clk_32k", "pll_u_480M", "pll_c", "pll_ref"
-};
-static u32 mux_clkm_pllre_clk32_480M_pllc_ref_idx[] = {
- [0] = 0, [1] = 1, [2] = 3, [3] = 3, [4] = 4, [5] = 7,
-};
-
static const char *mux_plld_out0_plld2_out0[] = {
"pll_d_out0", "pll_d2_out0",
};
#define mux_plld_out0_plld2_out0_idx NULL
-static const char *mux_d_audio_clk[] = {
- "pll_a_out0", "pll_p", "clk_m", "spdif_in_sync", "i2s0_sync",
- "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",
-};
-static u32 mux_d_audio_clk_idx[] = {
- [0] = 0, [1] = 0x8000, [2] = 0xc000, [3] = 0xE000, [4] = 0xE001,
- [5] = 0xE002, [6] = 0xE003, [7] = 0xE004, [8] = 0xE005, [9] = 0xE007,
-};
-
static const char *mux_pllmcp_clkm[] = {
"pll_m_out0", "pll_c_out0", "pll_p_out0", "clk_m", "pll_m_ud",
};
@@ -1064,8 +677,253 @@ static const struct clk_div_table pll_re_div_table[] = {
{ .val = 0, .div = 0 },
};
-static struct clk *clks[clk_max];
-static struct clk_onecell_data clk_data;
+static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
+ [tegra_clk_rtc] = { .dt_id = TEGRA114_CLK_RTC, .present = true },
+ [tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true },
+ [tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true },
+ [tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true },
+ [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
+ [tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true },
+ [tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true },
+ [tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true },
+ [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
+ [tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true },
+ [tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true },
+ [tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true },
+ [tegra_clk_epp_8] = { .dt_id = TEGRA114_CLK_EPP, .present = true },
+ [tegra_clk_gr2d_8] = { .dt_id = TEGRA114_CLK_GR2D, .present = true },
+ [tegra_clk_usbd] = { .dt_id = TEGRA114_CLK_USBD, .present = true },
+ [tegra_clk_isp] = { .dt_id = TEGRA114_CLK_ISP, .present = true },
+ [tegra_clk_gr3d_8] = { .dt_id = TEGRA114_CLK_GR3D, .present = true },
+ [tegra_clk_disp2] = { .dt_id = TEGRA114_CLK_DISP2, .present = true },
+ [tegra_clk_disp1] = { .dt_id = TEGRA114_CLK_DISP1, .present = true },
+ [tegra_clk_host1x_8] = { .dt_id = TEGRA114_CLK_HOST1X, .present = true },
+ [tegra_clk_vcp] = { .dt_id = TEGRA114_CLK_VCP, .present = true },
+ [tegra_clk_apbdma] = { .dt_id = TEGRA114_CLK_APBDMA, .present = true },
+ [tegra_clk_kbc] = { .dt_id = TEGRA114_CLK_KBC, .present = true },
+ [tegra_clk_kfuse] = { .dt_id = TEGRA114_CLK_KFUSE, .present = true },
+ [tegra_clk_sbc1_8] = { .dt_id = TEGRA114_CLK_SBC1, .present = true },
+ [tegra_clk_nor] = { .dt_id = TEGRA114_CLK_NOR, .present = true },
+ [tegra_clk_sbc2_8] = { .dt_id = TEGRA114_CLK_SBC2, .present = true },
+ [tegra_clk_sbc3_8] = { .dt_id = TEGRA114_CLK_SBC3, .present = true },
+ [tegra_clk_i2c5] = { .dt_id = TEGRA114_CLK_I2C5, .present = true },
+ [tegra_clk_dsia] = { .dt_id = TEGRA114_CLK_DSIA, .present = true },
+ [tegra_clk_mipi] = { .dt_id = TEGRA114_CLK_MIPI, .present = true },
+ [tegra_clk_hdmi] = { .dt_id = TEGRA114_CLK_HDMI, .present = true },
+ [tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true },
+ [tegra_clk_i2c2] = { .dt_id = TEGRA114_CLK_I2C2, .present = true },
+ [tegra_clk_uartc] = { .dt_id = TEGRA114_CLK_UARTC, .present = true },
+ [tegra_clk_mipi_cal] = { .dt_id = TEGRA114_CLK_MIPI_CAL, .present = true },
+ [tegra_clk_emc] = { .dt_id = TEGRA114_CLK_EMC, .present = true },
+ [tegra_clk_usb2] = { .dt_id = TEGRA114_CLK_USB2, .present = true },
+ [tegra_clk_usb3] = { .dt_id = TEGRA114_CLK_USB3, .present = true },
+ [tegra_clk_vde_8] = { .dt_id = TEGRA114_CLK_VDE, .present = true },
+ [tegra_clk_bsea] = { .dt_id = TEGRA114_CLK_BSEA, .present = true },
+ [tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true },
+ [tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true },
+ [tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true },
+ [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
+ [tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true },
+ [tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true },
+ [tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true },
+ [tegra_clk_trace] = { .dt_id = TEGRA114_CLK_TRACE, .present = true },
+ [tegra_clk_soc_therm] = { .dt_id = TEGRA114_CLK_SOC_THERM, .present = true },
+ [tegra_clk_dtv] = { .dt_id = TEGRA114_CLK_DTV, .present = true },
+ [tegra_clk_ndspeed] = { .dt_id = TEGRA114_CLK_NDSPEED, .present = true },
+ [tegra_clk_i2cslow] = { .dt_id = TEGRA114_CLK_I2CSLOW, .present = true },
+ [tegra_clk_dsib] = { .dt_id = TEGRA114_CLK_DSIB, .present = true },
+ [tegra_clk_tsec] = { .dt_id = TEGRA114_CLK_TSEC, .present = true },
+ [tegra_clk_xusb_host] = { .dt_id = TEGRA114_CLK_XUSB_HOST, .present = true },
+ [tegra_clk_msenc] = { .dt_id = TEGRA114_CLK_MSENC, .present = true },
+ [tegra_clk_csus] = { .dt_id = TEGRA114_CLK_CSUS, .present = true },
+ [tegra_clk_mselect] = { .dt_id = TEGRA114_CLK_MSELECT, .present = true },
+ [tegra_clk_tsensor] = { .dt_id = TEGRA114_CLK_TSENSOR, .present = true },
+ [tegra_clk_i2s3] = { .dt_id = TEGRA114_CLK_I2S3, .present = true },
+ [tegra_clk_i2s4] = { .dt_id = TEGRA114_CLK_I2S4, .present = true },
+ [tegra_clk_i2c4] = { .dt_id = TEGRA114_CLK_I2C4, .present = true },
+ [tegra_clk_sbc5_8] = { .dt_id = TEGRA114_CLK_SBC5, .present = true },
+ [tegra_clk_sbc6_8] = { .dt_id = TEGRA114_CLK_SBC6, .present = true },
+ [tegra_clk_d_audio] = { .dt_id = TEGRA114_CLK_D_AUDIO, .present = true },
+ [tegra_clk_apbif] = { .dt_id = TEGRA114_CLK_APBIF, .present = true },
+ [tegra_clk_dam0] = { .dt_id = TEGRA114_CLK_DAM0, .present = true },
+ [tegra_clk_dam1] = { .dt_id = TEGRA114_CLK_DAM1, .present = true },
+ [tegra_clk_dam2] = { .dt_id = TEGRA114_CLK_DAM2, .present = true },
+ [tegra_clk_hda2codec_2x] = { .dt_id = TEGRA114_CLK_HDA2CODEC_2X, .present = true },
+ [tegra_clk_audio0_2x] = { .dt_id = TEGRA114_CLK_AUDIO0_2X, .present = true },
+ [tegra_clk_audio1_2x] = { .dt_id = TEGRA114_CLK_AUDIO1_2X, .present = true },
+ [tegra_clk_audio2_2x] = { .dt_id = TEGRA114_CLK_AUDIO2_2X, .present = true },
+ [tegra_clk_audio3_2x] = { .dt_id = TEGRA114_CLK_AUDIO3_2X, .present = true },
+ [tegra_clk_audio4_2x] = { .dt_id = TEGRA114_CLK_AUDIO4_2X, .present = true },
+ [tegra_clk_spdif_2x] = { .dt_id = TEGRA114_CLK_SPDIF_2X, .present = true },
+ [tegra_clk_actmon] = { .dt_id = TEGRA114_CLK_ACTMON, .present = true },
+ [tegra_clk_extern1] = { .dt_id = TEGRA114_CLK_EXTERN1, .present = true },
+ [tegra_clk_extern2] = { .dt_id = TEGRA114_CLK_EXTERN2, .present = true },
+ [tegra_clk_extern3] = { .dt_id = TEGRA114_CLK_EXTERN3, .present = true },
+ [tegra_clk_hda] = { .dt_id = TEGRA114_CLK_HDA, .present = true },
+ [tegra_clk_se] = { .dt_id = TEGRA114_CLK_SE, .present = true },
+ [tegra_clk_hda2hdmi] = { .dt_id = TEGRA114_CLK_HDA2HDMI, .present = true },
+ [tegra_clk_cilab] = { .dt_id = TEGRA114_CLK_CILAB, .present = true },
+ [tegra_clk_cilcd] = { .dt_id = TEGRA114_CLK_CILCD, .present = true },
+ [tegra_clk_cile] = { .dt_id = TEGRA114_CLK_CILE, .present = true },
+ [tegra_clk_dsialp] = { .dt_id = TEGRA114_CLK_DSIALP, .present = true },
+ [tegra_clk_dsiblp] = { .dt_id = TEGRA114_CLK_DSIBLP, .present = true },
+ [tegra_clk_dds] = { .dt_id = TEGRA114_CLK_DDS, .present = true },
+ [tegra_clk_dp2] = { .dt_id = TEGRA114_CLK_DP2, .present = true },
+ [tegra_clk_amx] = { .dt_id = TEGRA114_CLK_AMX, .present = true },
+ [tegra_clk_adx] = { .dt_id = TEGRA114_CLK_ADX, .present = true },
+ [tegra_clk_xusb_ss] = { .dt_id = TEGRA114_CLK_XUSB_SS, .present = true },
+ [tegra_clk_uartb] = { .dt_id = TEGRA114_CLK_UARTB, .present = true },
+ [tegra_clk_vfir] = { .dt_id = TEGRA114_CLK_VFIR, .present = true },
+ [tegra_clk_spdif_in] = { .dt_id = TEGRA114_CLK_SPDIF_IN, .present = true },
+ [tegra_clk_spdif_out] = { .dt_id = TEGRA114_CLK_SPDIF_OUT, .present = true },
+ [tegra_clk_vi_8] = { .dt_id = TEGRA114_CLK_VI, .present = true },
+ [tegra_clk_vi_sensor_8] = { .dt_id = TEGRA114_CLK_VI_SENSOR, .present = true },
+ [tegra_clk_fuse] = { .dt_id = TEGRA114_CLK_FUSE, .present = true },
+ [tegra_clk_fuse_burn] = { .dt_id = TEGRA114_CLK_FUSE_BURN, .present = true },
+ [tegra_clk_clk_32k] = { .dt_id = TEGRA114_CLK_CLK_32K, .present = true },
+ [tegra_clk_clk_m] = { .dt_id = TEGRA114_CLK_CLK_M, .present = true },
+ [tegra_clk_clk_m_div2] = { .dt_id = TEGRA114_CLK_CLK_M_DIV2, .present = true },
+ [tegra_clk_clk_m_div4] = { .dt_id = TEGRA114_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_pll_ref] = { .dt_id = TEGRA114_CLK_PLL_REF, .present = true },
+ [tegra_clk_pll_c] = { .dt_id = TEGRA114_CLK_PLL_C, .present = true },
+ [tegra_clk_pll_c_out1] = { .dt_id = TEGRA114_CLK_PLL_C_OUT1, .present = true },
+ [tegra_clk_pll_c2] = { .dt_id = TEGRA114_CLK_PLL_C2, .present = true },
+ [tegra_clk_pll_c3] = { .dt_id = TEGRA114_CLK_PLL_C3, .present = true },
+ [tegra_clk_pll_m] = { .dt_id = TEGRA114_CLK_PLL_M, .present = true },
+ [tegra_clk_pll_m_out1] = { .dt_id = TEGRA114_CLK_PLL_M_OUT1, .present = true },
+ [tegra_clk_pll_p] = { .dt_id = TEGRA114_CLK_PLL_P, .present = true },
+ [tegra_clk_pll_p_out1] = { .dt_id = TEGRA114_CLK_PLL_P_OUT1, .present = true },
+ [tegra_clk_pll_p_out2_int] = { .dt_id = TEGRA114_CLK_PLL_P_OUT2, .present = true },
+ [tegra_clk_pll_p_out3] = { .dt_id = TEGRA114_CLK_PLL_P_OUT3, .present = true },
+ [tegra_clk_pll_p_out4] = { .dt_id = TEGRA114_CLK_PLL_P_OUT4, .present = true },
+ [tegra_clk_pll_a] = { .dt_id = TEGRA114_CLK_PLL_A, .present = true },
+ [tegra_clk_pll_a_out0] = { .dt_id = TEGRA114_CLK_PLL_A_OUT0, .present = true },
+ [tegra_clk_pll_d] = { .dt_id = TEGRA114_CLK_PLL_D, .present = true },
+ [tegra_clk_pll_d_out0] = { .dt_id = TEGRA114_CLK_PLL_D_OUT0, .present = true },
+ [tegra_clk_pll_d2] = { .dt_id = TEGRA114_CLK_PLL_D2, .present = true },
+ [tegra_clk_pll_d2_out0] = { .dt_id = TEGRA114_CLK_PLL_D2_OUT0, .present = true },
+ [tegra_clk_pll_u] = { .dt_id = TEGRA114_CLK_PLL_U, .present = true },
+ [tegra_clk_pll_u_480m] = { .dt_id = TEGRA114_CLK_PLL_U_480M, .present = true },
+ [tegra_clk_pll_u_60m] = { .dt_id = TEGRA114_CLK_PLL_U_60M, .present = true },
+ [tegra_clk_pll_u_48m] = { .dt_id = TEGRA114_CLK_PLL_U_48M, .present = true },
+ [tegra_clk_pll_u_12m] = { .dt_id = TEGRA114_CLK_PLL_U_12M, .present = true },
+ [tegra_clk_pll_x] = { .dt_id = TEGRA114_CLK_PLL_X, .present = true },
+ [tegra_clk_pll_x_out0] = { .dt_id = TEGRA114_CLK_PLL_X_OUT0, .present = true },
+ [tegra_clk_pll_re_vco] = { .dt_id = TEGRA114_CLK_PLL_RE_VCO, .present = true },
+ [tegra_clk_pll_re_out] = { .dt_id = TEGRA114_CLK_PLL_RE_OUT, .present = true },
+ [tegra_clk_pll_e_out0] = { .dt_id = TEGRA114_CLK_PLL_E_OUT0, .present = true },
+ [tegra_clk_spdif_in_sync] = { .dt_id = TEGRA114_CLK_SPDIF_IN_SYNC, .present = true },
+ [tegra_clk_i2s0_sync] = { .dt_id = TEGRA114_CLK_I2S0_SYNC, .present = true },
+ [tegra_clk_i2s1_sync] = { .dt_id = TEGRA114_CLK_I2S1_SYNC, .present = true },
+ [tegra_clk_i2s2_sync] = { .dt_id = TEGRA114_CLK_I2S2_SYNC, .present = true },
+ [tegra_clk_i2s3_sync] = { .dt_id = TEGRA114_CLK_I2S3_SYNC, .present = true },
+ [tegra_clk_i2s4_sync] = { .dt_id = TEGRA114_CLK_I2S4_SYNC, .present = true },
+ [tegra_clk_vimclk_sync] = { .dt_id = TEGRA114_CLK_VIMCLK_SYNC, .present = true },
+ [tegra_clk_audio0] = { .dt_id = TEGRA114_CLK_AUDIO0, .present = true },
+ [tegra_clk_audio1] = { .dt_id = TEGRA114_CLK_AUDIO1, .present = true },
+ [tegra_clk_audio2] = { .dt_id = TEGRA114_CLK_AUDIO2, .present = true },
+ [tegra_clk_audio3] = { .dt_id = TEGRA114_CLK_AUDIO3, .present = true },
+ [tegra_clk_audio4] = { .dt_id = TEGRA114_CLK_AUDIO4, .present = true },
+ [tegra_clk_spdif] = { .dt_id = TEGRA114_CLK_SPDIF, .present = true },
+ [tegra_clk_clk_out_1] = { .dt_id = TEGRA114_CLK_CLK_OUT_1, .present = true },
+ [tegra_clk_clk_out_2] = { .dt_id = TEGRA114_CLK_CLK_OUT_2, .present = true },
+ [tegra_clk_clk_out_3] = { .dt_id = TEGRA114_CLK_CLK_OUT_3, .present = true },
+ [tegra_clk_blink] = { .dt_id = TEGRA114_CLK_BLINK, .present = true },
+ [tegra_clk_xusb_host_src] = { .dt_id = TEGRA114_CLK_XUSB_HOST_SRC, .present = true },
+ [tegra_clk_xusb_falcon_src] = { .dt_id = TEGRA114_CLK_XUSB_FALCON_SRC, .present = true },
+ [tegra_clk_xusb_fs_src] = { .dt_id = TEGRA114_CLK_XUSB_FS_SRC, .present = true },
+ [tegra_clk_xusb_ss_src] = { .dt_id = TEGRA114_CLK_XUSB_SS_SRC, .present = true },
+ [tegra_clk_xusb_dev_src] = { .dt_id = TEGRA114_CLK_XUSB_DEV_SRC, .present = true },
+ [tegra_clk_xusb_dev] = { .dt_id = TEGRA114_CLK_XUSB_DEV, .present = true },
+ [tegra_clk_xusb_hs_src] = { .dt_id = TEGRA114_CLK_XUSB_HS_SRC, .present = true },
+ [tegra_clk_sclk] = { .dt_id = TEGRA114_CLK_SCLK, .present = true },
+ [tegra_clk_hclk] = { .dt_id = TEGRA114_CLK_HCLK, .present = true },
+ [tegra_clk_pclk] = { .dt_id = TEGRA114_CLK_PCLK, .present = true },
+ [tegra_clk_cclk_g] = { .dt_id = TEGRA114_CLK_CCLK_G, .present = true },
+ [tegra_clk_cclk_lp] = { .dt_id = TEGRA114_CLK_CCLK_LP, .present = true },
+ [tegra_clk_dfll_ref] = { .dt_id = TEGRA114_CLK_DFLL_REF, .present = true },
+ [tegra_clk_dfll_soc] = { .dt_id = TEGRA114_CLK_DFLL_SOC, .present = true },
+ [tegra_clk_audio0_mux] = { .dt_id = TEGRA114_CLK_AUDIO0_MUX, .present = true },
+ [tegra_clk_audio1_mux] = { .dt_id = TEGRA114_CLK_AUDIO1_MUX, .present = true },
+ [tegra_clk_audio2_mux] = { .dt_id = TEGRA114_CLK_AUDIO2_MUX, .present = true },
+ [tegra_clk_audio3_mux] = { .dt_id = TEGRA114_CLK_AUDIO3_MUX, .present = true },
+ [tegra_clk_audio4_mux] = { .dt_id = TEGRA114_CLK_AUDIO4_MUX, .present = true },
+ [tegra_clk_spdif_mux] = { .dt_id = TEGRA114_CLK_SPDIF_MUX, .present = true },
+ [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_1_MUX, .present = true },
+ [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_2_MUX, .present = true },
+ [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_3_MUX, .present = true },
+ [tegra_clk_dsia_mux] = { .dt_id = TEGRA114_CLK_DSIA_MUX, .present = true },
+ [tegra_clk_dsib_mux] = { .dt_id = TEGRA114_CLK_DSIB_MUX, .present = true },
+};
+
+static struct tegra_devclk devclks[] __initdata = {
+ { .con_id = "clk_m", .dt_id = TEGRA114_CLK_CLK_M },
+ { .con_id = "pll_ref", .dt_id = TEGRA114_CLK_PLL_REF },
+ { .con_id = "clk_32k", .dt_id = TEGRA114_CLK_CLK_32K },
+ { .con_id = "clk_m_div2", .dt_id = TEGRA114_CLK_CLK_M_DIV2 },
+ { .con_id = "clk_m_div4", .dt_id = TEGRA114_CLK_CLK_M_DIV4 },
+ { .con_id = "pll_c", .dt_id = TEGRA114_CLK_PLL_C },
+ { .con_id = "pll_c_out1", .dt_id = TEGRA114_CLK_PLL_C_OUT1 },
+ { .con_id = "pll_c2", .dt_id = TEGRA114_CLK_PLL_C2 },
+ { .con_id = "pll_c3", .dt_id = TEGRA114_CLK_PLL_C3 },
+ { .con_id = "pll_p", .dt_id = TEGRA114_CLK_PLL_P },
+ { .con_id = "pll_p_out1", .dt_id = TEGRA114_CLK_PLL_P_OUT1 },
+ { .con_id = "pll_p_out2", .dt_id = TEGRA114_CLK_PLL_P_OUT2 },
+ { .con_id = "pll_p_out3", .dt_id = TEGRA114_CLK_PLL_P_OUT3 },
+ { .con_id = "pll_p_out4", .dt_id = TEGRA114_CLK_PLL_P_OUT4 },
+ { .con_id = "pll_m", .dt_id = TEGRA114_CLK_PLL_M },
+ { .con_id = "pll_m_out1", .dt_id = TEGRA114_CLK_PLL_M_OUT1 },
+ { .con_id = "pll_x", .dt_id = TEGRA114_CLK_PLL_X },
+ { .con_id = "pll_x_out0", .dt_id = TEGRA114_CLK_PLL_X_OUT0 },
+ { .con_id = "pll_u", .dt_id = TEGRA114_CLK_PLL_U },
+ { .con_id = "pll_u_480M", .dt_id = TEGRA114_CLK_PLL_U_480M },
+ { .con_id = "pll_u_60M", .dt_id = TEGRA114_CLK_PLL_U_60M },
+ { .con_id = "pll_u_48M", .dt_id = TEGRA114_CLK_PLL_U_48M },
+ { .con_id = "pll_u_12M", .dt_id = TEGRA114_CLK_PLL_U_12M },
+ { .con_id = "pll_d", .dt_id = TEGRA114_CLK_PLL_D },
+ { .con_id = "pll_d_out0", .dt_id = TEGRA114_CLK_PLL_D_OUT0 },
+ { .con_id = "pll_d2", .dt_id = TEGRA114_CLK_PLL_D2 },
+ { .con_id = "pll_d2_out0", .dt_id = TEGRA114_CLK_PLL_D2_OUT0 },
+ { .con_id = "pll_a", .dt_id = TEGRA114_CLK_PLL_A },
+ { .con_id = "pll_a_out0", .dt_id = TEGRA114_CLK_PLL_A_OUT0 },
+ { .con_id = "pll_re_vco", .dt_id = TEGRA114_CLK_PLL_RE_VCO },
+ { .con_id = "pll_re_out", .dt_id = TEGRA114_CLK_PLL_RE_OUT },
+ { .con_id = "pll_e_out0", .dt_id = TEGRA114_CLK_PLL_E_OUT0 },
+ { .con_id = "spdif_in_sync", .dt_id = TEGRA114_CLK_SPDIF_IN_SYNC },
+ { .con_id = "i2s0_sync", .dt_id = TEGRA114_CLK_I2S0_SYNC },
+ { .con_id = "i2s1_sync", .dt_id = TEGRA114_CLK_I2S1_SYNC },
+ { .con_id = "i2s2_sync", .dt_id = TEGRA114_CLK_I2S2_SYNC },
+ { .con_id = "i2s3_sync", .dt_id = TEGRA114_CLK_I2S3_SYNC },
+ { .con_id = "i2s4_sync", .dt_id = TEGRA114_CLK_I2S4_SYNC },
+ { .con_id = "vimclk_sync", .dt_id = TEGRA114_CLK_VIMCLK_SYNC },
+ { .con_id = "audio0", .dt_id = TEGRA114_CLK_AUDIO0 },
+ { .con_id = "audio1", .dt_id = TEGRA114_CLK_AUDIO1 },
+ { .con_id = "audio2", .dt_id = TEGRA114_CLK_AUDIO2 },
+ { .con_id = "audio3", .dt_id = TEGRA114_CLK_AUDIO3 },
+ { .con_id = "audio4", .dt_id = TEGRA114_CLK_AUDIO4 },
+ { .con_id = "spdif", .dt_id = TEGRA114_CLK_SPDIF },
+ { .con_id = "audio0_2x", .dt_id = TEGRA114_CLK_AUDIO0_2X },
+ { .con_id = "audio1_2x", .dt_id = TEGRA114_CLK_AUDIO1_2X },
+ { .con_id = "audio2_2x", .dt_id = TEGRA114_CLK_AUDIO2_2X },
+ { .con_id = "audio3_2x", .dt_id = TEGRA114_CLK_AUDIO3_2X },
+ { .con_id = "audio4_2x", .dt_id = TEGRA114_CLK_AUDIO4_2X },
+ { .con_id = "spdif_2x", .dt_id = TEGRA114_CLK_SPDIF_2X },
+ { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA114_CLK_EXTERN1 },
+ { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA114_CLK_EXTERN2 },
+ { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA114_CLK_EXTERN3 },
+ { .con_id = "blink", .dt_id = TEGRA114_CLK_BLINK },
+ { .con_id = "cclk_g", .dt_id = TEGRA114_CLK_CCLK_G },
+ { .con_id = "cclk_lp", .dt_id = TEGRA114_CLK_CCLK_LP },
+ { .con_id = "sclk", .dt_id = TEGRA114_CLK_SCLK },
+ { .con_id = "hclk", .dt_id = TEGRA114_CLK_HCLK },
+ { .con_id = "pclk", .dt_id = TEGRA114_CLK_PCLK },
+ { .con_id = "fuse", .dt_id = TEGRA114_CLK_FUSE },
+ { .dev_id = "rtc-tegra", .dt_id = TEGRA114_CLK_RTC },
+ { .dev_id = "timer", .dt_id = TEGRA114_CLK_TIMER },
+};
+
+static struct clk **clks;
static unsigned long osc_freq;
static unsigned long pll_ref_freq;
@@ -1086,16 +944,14 @@ static int __init tegra114_osc_clk_init(void __iomem *clk_base)
/* clk_m */
clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
osc_freq);
- clk_register_clkdev(clk, "clk_m", NULL);
- clks[clk_m] = clk;
+ clks[TEGRA114_CLK_CLK_M] = clk;
/* pll_ref */
val = (val >> OSC_CTRL_PLL_REF_DIV_SHIFT) & 3;
pll_ref_div = 1 << val;
clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
CLK_SET_RATE_PARENT, 1, pll_ref_div);
- clk_register_clkdev(clk, "pll_ref", NULL);
- clks[pll_ref] = clk;
+ clks[TEGRA114_CLK_PLL_REF] = clk;
pll_ref_freq = osc_freq / pll_ref_div;
@@ -1109,20 +965,17 @@ static void __init tegra114_fixed_clk_init(void __iomem *clk_base)
/* clk_32k */
clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
32768);
- clk_register_clkdev(clk, "clk_32k", NULL);
- clks[clk_32k] = clk;
+ clks[TEGRA114_CLK_CLK_32K] = clk;
/* clk_m_div2 */
clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "clk_m_div2", NULL);
- clks[clk_m_div2] = clk;
+ clks[TEGRA114_CLK_CLK_M_DIV2] = clk;
/* clk_m_div4 */
clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
CLK_SET_RATE_PARENT, 1, 4);
- clk_register_clkdev(clk, "clk_m_div4", NULL);
- clks[clk_m_div4] = clk;
+ clks[TEGRA114_CLK_CLK_M_DIV4] = clk;
}
@@ -1208,63 +1061,6 @@ static __init void tegra114_utmi_param_configure(void __iomem *clk_base)
writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
}
-static void __init _clip_vco_min(struct tegra_clk_pll_params *pll_params)
-{
- pll_params->vco_min =
- DIV_ROUND_UP(pll_params->vco_min, pll_ref_freq) * pll_ref_freq;
-}
-
-static int __init _setup_dynamic_ramp(struct tegra_clk_pll_params *pll_params,
- void __iomem *clk_base)
-{
- u32 val;
- u32 step_a, step_b;
-
- switch (pll_ref_freq) {
- case 12000000:
- case 13000000:
- case 26000000:
- step_a = 0x2B;
- step_b = 0x0B;
- break;
- case 16800000:
- step_a = 0x1A;
- step_b = 0x09;
- break;
- case 19200000:
- step_a = 0x12;
- step_b = 0x08;
- break;
- default:
- pr_err("%s: Unexpected reference rate %lu\n",
- __func__, pll_ref_freq);
- WARN_ON(1);
- return -EINVAL;
- }
-
- val = step_a << pll_params->stepa_shift;
- val |= step_b << pll_params->stepb_shift;
- writel_relaxed(val, clk_base + pll_params->dyn_ramp_reg);
-
- return 0;
-}
-
-static void __init _init_iddq(struct tegra_clk_pll_params *pll_params,
- void __iomem *clk_base)
-{
- u32 val, val_iddq;
-
- val = readl_relaxed(clk_base + pll_params->base_reg);
- val_iddq = readl_relaxed(clk_base + pll_params->iddq_reg);
-
- if (val & BIT(30))
- WARN_ON(val_iddq & BIT(pll_params->iddq_bit_idx));
- else {
- val_iddq |= BIT(pll_params->iddq_bit_idx);
- writel_relaxed(val_iddq, clk_base + pll_params->iddq_reg);
- }
-}
-
static void __init tegra114_pll_init(void __iomem *clk_base,
void __iomem *pmc)
{
@@ -1272,104 +1068,34 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
struct clk *clk;
/* PLLC */
- _clip_vco_min(&pll_c_params);
- if (_setup_dynamic_ramp(&pll_c_params, clk_base) >= 0) {
- _init_iddq(&pll_c_params, clk_base);
- clk = tegra_clk_register_pllxc("pll_c", "pll_ref", clk_base,
- pmc, 0, 0, &pll_c_params, TEGRA_PLL_USE_LOCK,
- pll_c_freq_table, NULL);
- clk_register_clkdev(clk, "pll_c", NULL);
- clks[pll_c] = clk;
-
- /* PLLC_OUT1 */
- clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
- clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
- 8, 8, 1, NULL);
- clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
- clk_base + PLLC_OUT, 1, 0,
- CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_c_out1", NULL);
- clks[pll_c_out1] = clk;
- }
+ clk = tegra_clk_register_pllxc("pll_c", "pll_ref", clk_base,
+ pmc, 0, &pll_c_params, NULL);
+ clks[TEGRA114_CLK_PLL_C] = clk;
+
+ /* PLLC_OUT1 */
+ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
+ clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
+ clk_base + PLLC_OUT, 1, 0,
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clks[TEGRA114_CLK_PLL_C_OUT1] = clk;
/* PLLC2 */
- _clip_vco_min(&pll_c2_params);
- clk = tegra_clk_register_pllc("pll_c2", "pll_ref", clk_base, pmc, 0, 0,
- &pll_c2_params, TEGRA_PLL_USE_LOCK,
- pll_cx_freq_table, NULL);
- clk_register_clkdev(clk, "pll_c2", NULL);
- clks[pll_c2] = clk;
+ clk = tegra_clk_register_pllc("pll_c2", "pll_ref", clk_base, pmc, 0,
+ &pll_c2_params, NULL);
+ clks[TEGRA114_CLK_PLL_C2] = clk;
/* PLLC3 */
- _clip_vco_min(&pll_c3_params);
- clk = tegra_clk_register_pllc("pll_c3", "pll_ref", clk_base, pmc, 0, 0,
- &pll_c3_params, TEGRA_PLL_USE_LOCK,
- pll_cx_freq_table, NULL);
- clk_register_clkdev(clk, "pll_c3", NULL);
- clks[pll_c3] = clk;
-
- /* PLLP */
- clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, pmc, 0,
- 408000000, &pll_p_params,
- TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK,
- pll_p_freq_table, NULL);
- clk_register_clkdev(clk, "pll_p", NULL);
- clks[pll_p] = clk;
-
- /* PLLP_OUT1 */
- clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
- clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 8, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
- clk_base + PLLP_OUTA, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out1", NULL);
- clks[pll_p_out1] = clk;
-
- /* PLLP_OUT2 */
- clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
- clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP | TEGRA_DIVIDER_INT, 24,
- 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
- clk_base + PLLP_OUTA, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out2", NULL);
- clks[pll_p_out2] = clk;
-
- /* PLLP_OUT3 */
- clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
- clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 8, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
- clk_base + PLLP_OUTB, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out3", NULL);
- clks[pll_p_out3] = clk;
-
- /* PLLP_OUT4 */
- clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
- clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
- &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
- clk_base + PLLP_OUTB, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out4", NULL);
- clks[pll_p_out4] = clk;
+ clk = tegra_clk_register_pllc("pll_c3", "pll_ref", clk_base, pmc, 0,
+ &pll_c3_params, NULL);
+ clks[TEGRA114_CLK_PLL_C3] = clk;
/* PLLM */
- _clip_vco_min(&pll_m_params);
clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
- &pll_m_params, TEGRA_PLL_USE_LOCK,
- pll_m_freq_table, NULL);
- clk_register_clkdev(clk, "pll_m", NULL);
- clks[pll_m] = clk;
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ &pll_m_params, NULL);
+ clks[TEGRA114_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
@@ -1378,41 +1104,20 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_m_out1", NULL);
- clks[pll_m_out1] = clk;
+ clks[TEGRA114_CLK_PLL_M_OUT1] = clk;
/* PLLM_UD */
clk = clk_register_fixed_factor(NULL, "pll_m_ud", "pll_m",
CLK_SET_RATE_PARENT, 1, 1);
- /* PLLX */
- _clip_vco_min(&pll_x_params);
- if (_setup_dynamic_ramp(&pll_x_params, clk_base) >= 0) {
- _init_iddq(&pll_x_params, clk_base);
- clk = tegra_clk_register_pllxc("pll_x", "pll_ref", clk_base,
- pmc, CLK_IGNORE_UNUSED, 0, &pll_x_params,
- TEGRA_PLL_USE_LOCK, pll_x_freq_table, NULL);
- clk_register_clkdev(clk, "pll_x", NULL);
- clks[pll_x] = clk;
- }
-
- /* PLLX_OUT0 */
- clk = clk_register_fixed_factor(NULL, "pll_x_out0", "pll_x",
- CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_x_out0", NULL);
- clks[pll_x_out0] = clk;
-
/* PLLU */
val = readl(clk_base + pll_u_params.base_reg);
val &= ~BIT(24); /* disable PLLU_OVERRIDE */
writel(val, clk_base + pll_u_params.base_reg);
clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc, 0,
- 0, &pll_u_params, TEGRA_PLLU |
- TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
- TEGRA_PLL_USE_LOCK, pll_u_freq_table, &pll_u_lock);
- clk_register_clkdev(clk, "pll_u", NULL);
- clks[pll_u] = clk;
+ &pll_u_params, &pll_u_lock);
+ clks[TEGRA114_CLK_PLL_U] = clk;
tegra114_utmi_param_configure(clk_base);
@@ -1420,731 +1125,97 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
clk = clk_register_gate(NULL, "pll_u_480M", "pll_u",
CLK_SET_RATE_PARENT, clk_base + PLLU_BASE,
22, 0, &pll_u_lock);
- clk_register_clkdev(clk, "pll_u_480M", NULL);
- clks[pll_u_480M] = clk;
+ clks[TEGRA114_CLK_PLL_U_480M] = clk;
/* PLLU_60M */
clk = clk_register_fixed_factor(NULL, "pll_u_60M", "pll_u",
CLK_SET_RATE_PARENT, 1, 8);
- clk_register_clkdev(clk, "pll_u_60M", NULL);
- clks[pll_u_60M] = clk;
+ clks[TEGRA114_CLK_PLL_U_60M] = clk;
/* PLLU_48M */
clk = clk_register_fixed_factor(NULL, "pll_u_48M", "pll_u",
CLK_SET_RATE_PARENT, 1, 10);
- clk_register_clkdev(clk, "pll_u_48M", NULL);
- clks[pll_u_48M] = clk;
+ clks[TEGRA114_CLK_PLL_U_48M] = clk;
/* PLLU_12M */
clk = clk_register_fixed_factor(NULL, "pll_u_12M", "pll_u",
CLK_SET_RATE_PARENT, 1, 40);
- clk_register_clkdev(clk, "pll_u_12M", NULL);
- clks[pll_u_12M] = clk;
+ clks[TEGRA114_CLK_PLL_U_12M] = clk;
/* PLLD */
clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc, 0,
- 0, &pll_d_params,
- TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
- TEGRA_PLL_USE_LOCK, pll_d_freq_table, &pll_d_lock);
- clk_register_clkdev(clk, "pll_d", NULL);
- clks[pll_d] = clk;
+ &pll_d_params, &pll_d_lock);
+ clks[TEGRA114_CLK_PLL_D] = clk;
/* PLLD_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_d_out0", NULL);
- clks[pll_d_out0] = clk;
+ clks[TEGRA114_CLK_PLL_D_OUT0] = clk;
/* PLLD2 */
clk = tegra_clk_register_pll("pll_d2", "pll_ref", clk_base, pmc, 0,
- 0, &pll_d2_params,
- TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
- TEGRA_PLL_USE_LOCK, pll_d_freq_table, &pll_d2_lock);
- clk_register_clkdev(clk, "pll_d2", NULL);
- clks[pll_d2] = clk;
+ &pll_d2_params, &pll_d2_lock);
+ clks[TEGRA114_CLK_PLL_D2] = clk;
/* PLLD2_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_d2_out0", NULL);
- clks[pll_d2_out0] = clk;
-
- /* PLLA */
- clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, pmc, 0,
- 0, &pll_a_params, TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_USE_LOCK, pll_a_freq_table, NULL);
- clk_register_clkdev(clk, "pll_a", NULL);
- clks[pll_a] = clk;
-
- /* PLLA_OUT0 */
- clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
- clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
- 8, 8, 1, NULL);
- clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
- clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
- CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_a_out0", NULL);
- clks[pll_a_out0] = clk;
+ clks[TEGRA114_CLK_PLL_D2_OUT0] = clk;
/* PLLRE */
- _clip_vco_min(&pll_re_vco_params);
clk = tegra_clk_register_pllre("pll_re_vco", "pll_ref", clk_base, pmc,
- 0, 0, &pll_re_vco_params, TEGRA_PLL_USE_LOCK,
- NULL, &pll_re_lock, pll_ref_freq);
- clk_register_clkdev(clk, "pll_re_vco", NULL);
- clks[pll_re_vco] = clk;
+ 0, &pll_re_vco_params, &pll_re_lock, pll_ref_freq);
+ clks[TEGRA114_CLK_PLL_RE_VCO] = clk;
clk = clk_register_divider_table(NULL, "pll_re_out", "pll_re_vco", 0,
clk_base + PLLRE_BASE, 16, 4, 0,
pll_re_div_table, &pll_re_lock);
- clk_register_clkdev(clk, "pll_re_out", NULL);
- clks[pll_re_out] = clk;
+ clks[TEGRA114_CLK_PLL_RE_OUT] = clk;
/* PLLE */
- clk = tegra_clk_register_plle_tegra114("pll_e_out0", "pll_re_vco",
- clk_base, 0, 100000000, &pll_e_params,
- pll_e_freq_table, NULL);
- clk_register_clkdev(clk, "pll_e_out0", NULL);
- clks[pll_e_out0] = clk;
-}
-
-static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync",
- "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",
-};
-
-static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern1",
-};
-
-static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern2",
-};
-
-static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern3",
-};
-
-static void __init tegra114_audio_clk_init(void __iomem *clk_base)
-{
- struct clk *clk;
-
- /* spdif_in_sync */
- clk = tegra_clk_register_sync_source("spdif_in_sync", 24000000,
- 24000000);
- clk_register_clkdev(clk, "spdif_in_sync", NULL);
- clks[spdif_in_sync] = clk;
-
- /* i2s0_sync */
- clk = tegra_clk_register_sync_source("i2s0_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s0_sync", NULL);
- clks[i2s0_sync] = clk;
-
- /* i2s1_sync */
- clk = tegra_clk_register_sync_source("i2s1_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s1_sync", NULL);
- clks[i2s1_sync] = clk;
-
- /* i2s2_sync */
- clk = tegra_clk_register_sync_source("i2s2_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s2_sync", NULL);
- clks[i2s2_sync] = clk;
-
- /* i2s3_sync */
- clk = tegra_clk_register_sync_source("i2s3_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s3_sync", NULL);
- clks[i2s3_sync] = clk;
-
- /* i2s4_sync */
- clk = tegra_clk_register_sync_source("i2s4_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s4_sync", NULL);
- clks[i2s4_sync] = clk;
-
- /* vimclk_sync */
- clk = tegra_clk_register_sync_source("vimclk_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "vimclk_sync", NULL);
- clks[vimclk_sync] = clk;
-
- /* audio0 */
- clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0,
- NULL);
- clks[audio0_mux] = clk;
- clk = clk_register_gate(NULL, "audio0", "audio0_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S0, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio0", NULL);
- clks[audio0] = clk;
-
- /* audio1 */
- clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0,
- NULL);
- clks[audio1_mux] = clk;
- clk = clk_register_gate(NULL, "audio1", "audio1_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S1, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio1", NULL);
- clks[audio1] = clk;
-
- /* audio2 */
- clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0,
- NULL);
- clks[audio2_mux] = clk;
- clk = clk_register_gate(NULL, "audio2", "audio2_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S2, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio2", NULL);
- clks[audio2] = clk;
-
- /* audio3 */
- clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0,
- NULL);
- clks[audio3_mux] = clk;
- clk = clk_register_gate(NULL, "audio3", "audio3_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S3, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio3", NULL);
- clks[audio3] = clk;
-
- /* audio4 */
- clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0,
- NULL);
- clks[audio4_mux] = clk;
- clk = clk_register_gate(NULL, "audio4", "audio4_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S4, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio4", NULL);
- clks[audio4] = clk;
-
- /* spdif */
- clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0,
- NULL);
- clks[spdif_mux] = clk;
- clk = clk_register_gate(NULL, "spdif", "spdif_mux", 0,
- clk_base + AUDIO_SYNC_CLK_SPDIF, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "spdif", NULL);
- clks[spdif] = clk;
-
- /* audio0_2x */
- clk = clk_register_fixed_factor(NULL, "audio0_doubler", "audio0",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio0_div", "audio0_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 24, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio0_2x", "audio0_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 113, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio0_2x", NULL);
- clks[audio0_2x] = clk;
-
- /* audio1_2x */
- clk = clk_register_fixed_factor(NULL, "audio1_doubler", "audio1",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio1_div", "audio1_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 25, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio1_2x", "audio1_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 114, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio1_2x", NULL);
- clks[audio1_2x] = clk;
-
- /* audio2_2x */
- clk = clk_register_fixed_factor(NULL, "audio2_doubler", "audio2",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio2_div", "audio2_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 26, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio2_2x", "audio2_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 115, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio2_2x", NULL);
- clks[audio2_2x] = clk;
-
- /* audio3_2x */
- clk = clk_register_fixed_factor(NULL, "audio3_doubler", "audio3",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio3_div", "audio3_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 27, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio3_2x", "audio3_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 116, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio3_2x", NULL);
- clks[audio3_2x] = clk;
-
- /* audio4_2x */
- clk = clk_register_fixed_factor(NULL, "audio4_doubler", "audio4",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio4_div", "audio4_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 28, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio4_2x", "audio4_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 117, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio4_2x", NULL);
- clks[audio4_2x] = clk;
-
- /* spdif_2x */
- clk = clk_register_fixed_factor(NULL, "spdif_doubler", "spdif",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("spdif_div", "spdif_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 29, 1,
- 0, &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("spdif_2x", "spdif_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 118,
- &periph_v_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "spdif_2x", NULL);
- clks[spdif_2x] = clk;
-}
-
-static void __init tegra114_pmc_clk_init(void __iomem *pmc_base)
-{
- struct clk *clk;
-
- /* clk_out_1 */
- clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
- ARRAY_SIZE(clk_out1_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
- &clk_out_lock);
- clks[clk_out_1_mux] = clk;
- clk = clk_register_gate(NULL, "clk_out_1", "clk_out_1_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 2, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern1", "clk_out_1");
- clks[clk_out_1] = clk;
-
- /* clk_out_2 */
- clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
- ARRAY_SIZE(clk_out2_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
- &clk_out_lock);
- clks[clk_out_2_mux] = clk;
- clk = clk_register_gate(NULL, "clk_out_2", "clk_out_2_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 10, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern2", "clk_out_2");
- clks[clk_out_2] = clk;
-
- /* clk_out_3 */
- clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
- ARRAY_SIZE(clk_out3_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
- &clk_out_lock);
- clks[clk_out_3_mux] = clk;
- clk = clk_register_gate(NULL, "clk_out_3", "clk_out_3_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 18, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern3", "clk_out_3");
- clks[clk_out_3] = clk;
-
- /* blink */
- /* clear the blink timer register to directly output clk_32k */
- writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
- clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
- pmc_base + PMC_DPD_PADS_ORIDE,
- PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
- clk = clk_register_gate(NULL, "blink", "blink_override", 0,
- pmc_base + PMC_CTRL,
- PMC_CTRL_BLINK_ENB, 0, NULL);
- clk_register_clkdev(clk, "blink", NULL);
- clks[blink] = clk;
-
+ clk = tegra_clk_register_plle_tegra114("pll_e_out0", "pll_ref",
+ clk_base, 0, &pll_e_params, NULL);
+ clks[TEGRA114_CLK_PLL_E_OUT0] = clk;
}
-static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
- "pll_p", "pll_p_out2", "unused",
- "clk_32k", "pll_m_out1" };
-
-static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
- "pll_p", "pll_p_out4", "unused",
- "unused", "pll_x" };
-
-static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
- "pll_p", "pll_p_out4", "unused",
- "unused", "pll_x", "pll_x_out0" };
-
-static void __init tegra114_super_clk_init(void __iomem *clk_base)
+static __init void tegra114_periph_clk_init(void __iomem *clk_base,
+ void __iomem *pmc_base)
{
struct clk *clk;
+ u32 val;
- /* CCLKG */
- clk = tegra_clk_register_super_mux("cclk_g", cclk_g_parents,
- ARRAY_SIZE(cclk_g_parents),
- CLK_SET_RATE_PARENT,
- clk_base + CCLKG_BURST_POLICY,
- 0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "cclk_g", NULL);
- clks[cclk_g] = clk;
-
- /* CCLKLP */
- clk = tegra_clk_register_super_mux("cclk_lp", cclk_lp_parents,
- ARRAY_SIZE(cclk_lp_parents),
- CLK_SET_RATE_PARENT,
- clk_base + CCLKLP_BURST_POLICY,
- 0, 4, 8, 9, NULL);
- clk_register_clkdev(clk, "cclk_lp", NULL);
- clks[cclk_lp] = clk;
-
- /* SCLK */
- clk = tegra_clk_register_super_mux("sclk", sclk_parents,
- ARRAY_SIZE(sclk_parents),
- CLK_SET_RATE_PARENT,
- clk_base + SCLK_BURST_POLICY,
- 0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "sclk", NULL);
- clks[sclk] = clk;
-
- /* HCLK */
- clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
- clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT |
- CLK_IGNORE_UNUSED, clk_base + SYSTEM_CLK_RATE,
- 7, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "hclk", NULL);
- clks[hclk] = clk;
-
- /* PCLK */
- clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
- clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT |
- CLK_IGNORE_UNUSED, clk_base + SYSTEM_CLK_RATE,
- 3, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "pclk", NULL);
- clks[pclk] = clk;
-}
-
-static struct tegra_periph_init_data tegra_periph_clk_list[] = {
- TEGRA_INIT_DATA_MUX("i2s0", NULL, "tegra30-i2s.0", mux_pllaout0_audio0_2x_pllp_clkm, CLK_SOURCE_I2S0, 30, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s0),
- TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra30-i2s.1", mux_pllaout0_audio1_2x_pllp_clkm, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
- TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra30-i2s.2", mux_pllaout0_audio2_2x_pllp_clkm, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
- TEGRA_INIT_DATA_MUX("i2s3", NULL, "tegra30-i2s.3", mux_pllaout0_audio3_2x_pllp_clkm, CLK_SOURCE_I2S3, 101, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s3),
- TEGRA_INIT_DATA_MUX("i2s4", NULL, "tegra30-i2s.4", mux_pllaout0_audio4_2x_pllp_clkm, CLK_SOURCE_I2S4, 102, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s4),
- TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra30-spdif", mux_pllaout0_audio_2x_pllp_clkm, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
- TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra30-spdif", mux_pllp_pllc_pllm, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
- TEGRA_INIT_DATA_MUX("pwm", NULL, "pwm", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_PWM, 17, &periph_l_regs, TEGRA_PERIPH_ON_APB, pwm),
- TEGRA_INIT_DATA_MUX("adx", NULL, "adx", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX, 154, &periph_w_regs, TEGRA_PERIPH_ON_APB, adx),
- TEGRA_INIT_DATA_MUX("amx", NULL, "amx", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX, 153, &periph_w_regs, TEGRA_PERIPH_ON_APB, amx),
- TEGRA_INIT_DATA_MUX("hda", "hda", "tegra30-hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, &periph_v_regs, TEGRA_PERIPH_ON_APB, hda),
- TEGRA_INIT_DATA_MUX("hda2codec_2x", "hda2codec", "tegra30-hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, &periph_v_regs, TEGRA_PERIPH_ON_APB, hda2codec_2x),
- TEGRA_INIT_DATA_MUX("sbc1", NULL, "tegra11-spi.0", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
- TEGRA_INIT_DATA_MUX("sbc2", NULL, "tegra11-spi.1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
- TEGRA_INIT_DATA_MUX("sbc3", NULL, "tegra11-spi.2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
- TEGRA_INIT_DATA_MUX("sbc4", NULL, "tegra11-spi.3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
- TEGRA_INIT_DATA_MUX("sbc5", NULL, "tegra11-spi.4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC5, 104, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc5),
- TEGRA_INIT_DATA_MUX("sbc6", NULL, "tegra11-spi.5", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SBC6, 105, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc6),
- TEGRA_INIT_DATA_MUX8("ndflash", NULL, "tegra_nand", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_u_regs, TEGRA_PERIPH_ON_APB, ndspeed),
- TEGRA_INIT_DATA_MUX8("ndspeed", NULL, "tegra_nand_speed", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_NDSPEED, 80, &periph_u_regs, TEGRA_PERIPH_ON_APB, ndspeed),
- TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
- TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
- TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
- TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
- TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
- TEGRA_INIT_DATA_INT("vde", NULL, "vde", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
- TEGRA_INIT_DATA_MUX_FLAGS("csite", NULL, "csite", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, TEGRA_PERIPH_ON_APB, csite, CLK_IGNORE_UNUSED),
- TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, TEGRA_PERIPH_ON_APB, la),
- TEGRA_INIT_DATA_MUX("trace", NULL, "trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, &periph_u_regs, TEGRA_PERIPH_ON_APB, trace),
- TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
- TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
- TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
- TEGRA_INIT_DATA_I2C("i2c1", "div-clk", "tegra11-i2c.0", mux_pllp_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, i2c1),
- TEGRA_INIT_DATA_I2C("i2c2", "div-clk", "tegra11-i2c.1", mux_pllp_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, i2c2),
- TEGRA_INIT_DATA_I2C("i2c3", "div-clk", "tegra11-i2c.2", mux_pllp_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, i2c3),
- TEGRA_INIT_DATA_I2C("i2c4", "div-clk", "tegra11-i2c.3", mux_pllp_clkm, CLK_SOURCE_I2C4, 103, &periph_v_regs, i2c4),
- TEGRA_INIT_DATA_I2C("i2c5", "div-clk", "tegra11-i2c.4", mux_pllp_clkm, CLK_SOURCE_I2C5, 47, &periph_h_regs, i2c5),
- TEGRA_INIT_DATA_UART("uarta", NULL, "tegra_uart.0", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTA, 6, &periph_l_regs, uarta),
- TEGRA_INIT_DATA_UART("uartb", NULL, "tegra_uart.1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, &periph_l_regs, uartb),
- TEGRA_INIT_DATA_UART("uartc", NULL, "tegra_uart.2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, &periph_h_regs, uartc),
- TEGRA_INIT_DATA_UART("uartd", NULL, "tegra_uart.3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, &periph_u_regs, uartd),
- TEGRA_INIT_DATA_INT("3d", NULL, "3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, &periph_l_regs, 0, gr_3d),
- TEGRA_INIT_DATA_INT("2d", NULL, "2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr_2d),
- TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
- TEGRA_INIT_DATA_INT8("vi", "vi", "tegra_camera", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
- TEGRA_INIT_DATA_INT8("epp", NULL, "epp", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
- TEGRA_INIT_DATA_INT8("msenc", NULL, "msenc", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_MSENC, 91, &periph_u_regs, TEGRA_PERIPH_WAR_1005168, msenc),
- TEGRA_INIT_DATA_INT8("tsec", NULL, "tsec", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_TSEC, 83, &periph_u_regs, 0, tsec),
- TEGRA_INIT_DATA_INT8("host1x", NULL, "host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
- TEGRA_INIT_DATA_MUX8("hdmi", NULL, "hdmi", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
- TEGRA_INIT_DATA_MUX("cilab", "cilab", "tegra_camera", mux_pllp_pllc_clkm, CLK_SOURCE_CILAB, 144, &periph_w_regs, 0, cilab),
- TEGRA_INIT_DATA_MUX("cilcd", "cilcd", "tegra_camera", mux_pllp_pllc_clkm, CLK_SOURCE_CILCD, 145, &periph_w_regs, 0, cilcd),
- TEGRA_INIT_DATA_MUX("cile", "cile", "tegra_camera", mux_pllp_pllc_clkm, CLK_SOURCE_CILE, 146, &periph_w_regs, 0, cile),
- TEGRA_INIT_DATA_MUX("dsialp", "dsialp", "tegradc.0", mux_pllp_pllc_clkm, CLK_SOURCE_DSIALP, 147, &periph_w_regs, 0, dsialp),
- TEGRA_INIT_DATA_MUX("dsiblp", "dsiblp", "tegradc.1", mux_pllp_pllc_clkm, CLK_SOURCE_DSIBLP, 148, &periph_w_regs, 0, dsiblp),
- TEGRA_INIT_DATA_MUX("tsensor", NULL, "tegra-tsensor", mux_pllp_pllc_clkm_clk32, CLK_SOURCE_TSENSOR, 100, &periph_v_regs, TEGRA_PERIPH_ON_APB, tsensor),
- TEGRA_INIT_DATA_MUX("actmon", NULL, "actmon", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_ACTMON, 119, &periph_v_regs, 0, actmon),
- TEGRA_INIT_DATA_MUX8("extern1", NULL, "extern1", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN1, 120, &periph_v_regs, 0, extern1),
- TEGRA_INIT_DATA_MUX8("extern2", NULL, "extern2", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, &periph_v_regs, 0, extern2),
- TEGRA_INIT_DATA_MUX8("extern3", NULL, "extern3", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, &periph_v_regs, 0, extern3),
- TEGRA_INIT_DATA_MUX("i2cslow", NULL, "i2cslow", mux_pllp_pllc_clk32_clkm, CLK_SOURCE_I2CSLOW, 81, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2cslow),
- TEGRA_INIT_DATA_INT8("se", NULL, "se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, &periph_v_regs, TEGRA_PERIPH_ON_APB, se),
- TEGRA_INIT_DATA_INT_FLAGS("mselect", NULL, "mselect", mux_pllp_clkm, CLK_SOURCE_MSELECT, 99, &periph_v_regs, 0, mselect, CLK_IGNORE_UNUSED),
- TEGRA_INIT_DATA_MUX("dfll_ref", "ref", "t114_dfll", mux_pllp_clkm, CLK_SOURCE_DFLL_REF, 155, &periph_w_regs, TEGRA_PERIPH_ON_APB, dfll_ref),
- TEGRA_INIT_DATA_MUX("dfll_soc", "soc", "t114_dfll", mux_pllp_clkm, CLK_SOURCE_DFLL_SOC, 155, &periph_w_regs, TEGRA_PERIPH_ON_APB, dfll_soc),
- TEGRA_INIT_DATA_MUX8("soc_therm", NULL, "soc_therm", mux_pllm_pllc_pllp_plla, CLK_SOURCE_SOC_THERM, 78, &periph_u_regs, TEGRA_PERIPH_ON_APB, soc_therm),
- TEGRA_INIT_DATA_XUSB("xusb_host_src", "host_src", "tegra_xhci", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, &periph_w_regs, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, xusb_host_src),
- TEGRA_INIT_DATA_XUSB("xusb_falcon_src", "falcon_src", "tegra_xhci", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, &periph_w_regs, TEGRA_PERIPH_NO_RESET, xusb_falcon_src),
- TEGRA_INIT_DATA_XUSB("xusb_fs_src", "fs_src", "tegra_xhci", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, &periph_w_regs, TEGRA_PERIPH_NO_RESET, xusb_fs_src),
- TEGRA_INIT_DATA_XUSB("xusb_ss_src", "ss_src", "tegra_xhci", mux_clkm_pllre_clk32_480M_pllc_ref, CLK_SOURCE_XUSB_SS_SRC, 143, &periph_w_regs, TEGRA_PERIPH_NO_RESET, xusb_ss_src),
- TEGRA_INIT_DATA_XUSB("xusb_dev_src", "dev_src", "tegra_xhci", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, &periph_u_regs, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, xusb_dev_src),
- TEGRA_INIT_DATA_AUDIO("d_audio", "d_audio", "tegra30-ahub", CLK_SOURCE_D_AUDIO, 106, &periph_v_regs, TEGRA_PERIPH_ON_APB, d_audio),
- TEGRA_INIT_DATA_AUDIO("dam0", NULL, "tegra30-dam.0", CLK_SOURCE_DAM0, 108, &periph_v_regs, TEGRA_PERIPH_ON_APB, dam0),
- TEGRA_INIT_DATA_AUDIO("dam1", NULL, "tegra30-dam.1", CLK_SOURCE_DAM1, 109, &periph_v_regs, TEGRA_PERIPH_ON_APB, dam1),
- TEGRA_INIT_DATA_AUDIO("dam2", NULL, "tegra30-dam.2", CLK_SOURCE_DAM2, 110, &periph_v_regs, TEGRA_PERIPH_ON_APB, dam2),
-};
-
-static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
- TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP1, 29, 7, 27, &periph_l_regs, 0, disp1),
- TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP2, 29, 7, 26, &periph_l_regs, 0, disp2),
-};
+ /* xusb_hs_src */
+ val = readl(clk_base + CLK_SOURCE_XUSB_SS_SRC);
+ val |= BIT(25); /* always select PLLU_60M */
+ writel(val, clk_base + CLK_SOURCE_XUSB_SS_SRC);
-static __init void tegra114_periph_clk_init(void __iomem *clk_base)
-{
- struct tegra_periph_init_data *data;
- struct clk *clk;
- int i;
- u32 val;
+ clk = clk_register_fixed_factor(NULL, "xusb_hs_src", "pll_u_60M", 0,
+ 1, 1);
+ clks[TEGRA114_CLK_XUSB_HS_SRC] = clk;
- /* apbdma */
- clk = tegra_clk_register_periph_gate("apbdma", "clk_m", 0, clk_base,
- 0, 34, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[apbdma] = clk;
-
- /* rtc */
- clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
- TEGRA_PERIPH_ON_APB |
- TEGRA_PERIPH_NO_RESET, clk_base,
- 0, 4, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "rtc-tegra");
- clks[rtc] = clk;
-
- /* kbc */
- clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
- TEGRA_PERIPH_ON_APB |
- TEGRA_PERIPH_NO_RESET, clk_base,
- 0, 36, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[kbc] = clk;
-
- /* timer */
- clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base,
- 0, 5, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "timer");
- clks[timer] = clk;
-
- /* kfuse */
- clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 40,
- &periph_h_regs, periph_clk_enb_refcnt);
- clks[kfuse] = clk;
-
- /* fuse */
- clk = tegra_clk_register_periph_gate("fuse", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 39,
- &periph_h_regs, periph_clk_enb_refcnt);
- clks[fuse] = clk;
-
- /* fuse_burn */
- clk = tegra_clk_register_periph_gate("fuse_burn", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 39,
- &periph_h_regs, periph_clk_enb_refcnt);
- clks[fuse_burn] = clk;
-
- /* apbif */
- clk = tegra_clk_register_periph_gate("apbif", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 107,
- &periph_v_regs, periph_clk_enb_refcnt);
- clks[apbif] = clk;
-
- /* hda2hdmi */
- clk = tegra_clk_register_periph_gate("hda2hdmi", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 128,
- &periph_w_regs, periph_clk_enb_refcnt);
- clks[hda2hdmi] = clk;
-
- /* vcp */
- clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0, clk_base, 0,
- 29, &periph_l_regs,
- periph_clk_enb_refcnt);
- clks[vcp] = clk;
-
- /* bsea */
- clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0, clk_base,
- 0, 62, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[bsea] = clk;
-
- /* bsev */
- clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0, clk_base,
- 0, 63, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[bsev] = clk;
-
- /* mipi-cal */
- clk = tegra_clk_register_periph_gate("mipi-cal", "clk_m", 0, clk_base,
- 0, 56, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[mipi_cal] = clk;
-
- /* usbd */
- clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base,
- 0, 22, &periph_l_regs,
- periph_clk_enb_refcnt);
- clks[usbd] = clk;
-
- /* usb2 */
- clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base,
- 0, 58, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[usb2] = clk;
-
- /* usb3 */
- clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base,
- 0, 59, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[usb3] = clk;
-
- /* csi */
- clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
- 0, 52, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[csi] = clk;
-
- /* isp */
- clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0,
- 23, &periph_l_regs,
- periph_clk_enb_refcnt);
- clks[isp] = clk;
-
- /* csus */
- clk = tegra_clk_register_periph_gate("csus", "clk_m",
- TEGRA_PERIPH_NO_RESET, clk_base, 0, 92,
- &periph_u_regs, periph_clk_enb_refcnt);
- clks[csus] = clk;
-
- /* dds */
- clk = tegra_clk_register_periph_gate("dds", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 150,
- &periph_w_regs, periph_clk_enb_refcnt);
- clks[dds] = clk;
-
- /* dp2 */
- clk = tegra_clk_register_periph_gate("dp2", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 152,
- &periph_w_regs, periph_clk_enb_refcnt);
- clks[dp2] = clk;
-
- /* dtv */
- clk = tegra_clk_register_periph_gate("dtv", "clk_m",
- TEGRA_PERIPH_ON_APB, clk_base, 0, 79,
- &periph_u_regs, periph_clk_enb_refcnt);
- clks[dtv] = clk;
-
- /* dsia */
+ /* dsia mux */
clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
ARRAY_SIZE(mux_plld_out0_plld2_out0),
CLK_SET_RATE_NO_REPARENT,
clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
- clks[dsia_mux] = clk;
- clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
- 0, 48, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[dsia] = clk;
+ clks[TEGRA114_CLK_DSIA_MUX] = clk;
- /* dsib */
+ /* dsib mux */
clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
ARRAY_SIZE(mux_plld_out0_plld2_out0),
CLK_SET_RATE_NO_REPARENT,
clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
- clks[dsib_mux] = clk;
- clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
- 0, 82, &periph_u_regs,
- periph_clk_enb_refcnt);
- clks[dsib] = clk;
+ clks[TEGRA114_CLK_DSIB_MUX] = clk;
- /* xusb_hs_src */
- val = readl(clk_base + CLK_SOURCE_XUSB_SS_SRC);
- val |= BIT(25); /* always select PLLU_60M */
- writel(val, clk_base + CLK_SOURCE_XUSB_SS_SRC);
-
- clk = clk_register_fixed_factor(NULL, "xusb_hs_src", "pll_u_60M", 0,
- 1, 1);
- clks[xusb_hs_src] = clk;
-
- /* xusb_host */
- clk = tegra_clk_register_periph_gate("xusb_host", "xusb_host_src", 0,
- clk_base, 0, 89, &periph_u_regs,
- periph_clk_enb_refcnt);
- clks[xusb_host] = clk;
-
- /* xusb_ss */
- clk = tegra_clk_register_periph_gate("xusb_ss", "xusb_ss_src", 0,
- clk_base, 0, 156, &periph_w_regs,
- periph_clk_enb_refcnt);
- clks[xusb_host] = clk;
-
- /* xusb_dev */
- clk = tegra_clk_register_periph_gate("xusb_dev", "xusb_dev_src", 0,
- clk_base, 0, 95, &periph_u_regs,
- periph_clk_enb_refcnt);
- clks[xusb_dev] = clk;
-
- /* emc */
+ /* emc mux */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
ARRAY_SIZE(mux_pllmcp_clkm),
CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
29, 3, 0, NULL);
- clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base,
- CLK_IGNORE_UNUSED, 57, &periph_h_regs,
- periph_clk_enb_refcnt);
- clks[emc] = clk;
-
- for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
- data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name, data->parent_names,
- data->num_parents, &data->periph,
- clk_base, data->offset, data->flags);
- clks[data->clk_id] = clk;
- }
- for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
- data = &tegra_periph_nodiv_clk_list[i];
- clk = tegra_clk_register_periph_nodiv(data->name,
- data->parent_names, data->num_parents,
- &data->periph, clk_base, data->offset);
- clks[data->clk_id] = clk;
- }
+ tegra_periph_clk_init(clk_base, pmc_base, tegra114_clks,
+ &pll_p_params);
}
/* Tegra114 CPU clock and reset control functions */
@@ -2207,28 +1278,37 @@ static const struct of_device_id pmc_match[] __initconst = {
* breaks
*/
static struct tegra_clk_init_table init_table[] __initdata = {
- {uarta, pll_p, 408000000, 0},
- {uartb, pll_p, 408000000, 0},
- {uartc, pll_p, 408000000, 0},
- {uartd, pll_p, 408000000, 0},
- {pll_a, clk_max, 564480000, 1},
- {pll_a_out0, clk_max, 11289600, 1},
- {extern1, pll_a_out0, 0, 1},
- {clk_out_1_mux, extern1, 0, 1},
- {clk_out_1, clk_max, 0, 1},
- {i2s0, pll_a_out0, 11289600, 0},
- {i2s1, pll_a_out0, 11289600, 0},
- {i2s2, pll_a_out0, 11289600, 0},
- {i2s3, pll_a_out0, 11289600, 0},
- {i2s4, pll_a_out0, 11289600, 0},
- {dfll_soc, pll_p, 51000000, 1},
- {dfll_ref, pll_p, 51000000, 1},
- {clk_max, clk_max, 0, 0}, /* This MUST be the last entry. */
+ {TEGRA114_CLK_UARTA, TEGRA114_CLK_PLL_P, 408000000, 0},
+ {TEGRA114_CLK_UARTB, TEGRA114_CLK_PLL_P, 408000000, 0},
+ {TEGRA114_CLK_UARTC, TEGRA114_CLK_PLL_P, 408000000, 0},
+ {TEGRA114_CLK_UARTD, TEGRA114_CLK_PLL_P, 408000000, 0},
+ {TEGRA114_CLK_PLL_A, TEGRA114_CLK_CLK_MAX, 564480000, 1},
+ {TEGRA114_CLK_PLL_A_OUT0, TEGRA114_CLK_CLK_MAX, 11289600, 1},
+ {TEGRA114_CLK_EXTERN1, TEGRA114_CLK_PLL_A_OUT0, 0, 1},
+ {TEGRA114_CLK_CLK_OUT_1_MUX, TEGRA114_CLK_EXTERN1, 0, 1},
+ {TEGRA114_CLK_CLK_OUT_1, TEGRA114_CLK_CLK_MAX, 0, 1},
+ {TEGRA114_CLK_I2S0, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA114_CLK_I2S1, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA114_CLK_I2S2, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA114_CLK_I2S3, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA114_CLK_I2S4, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA114_CLK_HOST1X, TEGRA114_CLK_PLL_P, 136000000, 0},
+ {TEGRA114_CLK_DFLL_SOC, TEGRA114_CLK_PLL_P, 51000000, 1},
+ {TEGRA114_CLK_DFLL_REF, TEGRA114_CLK_PLL_P, 51000000, 1},
+ {TEGRA114_CLK_DISP1, TEGRA114_CLK_PLL_P, 0, 0},
+ {TEGRA114_CLK_DISP2, TEGRA114_CLK_PLL_P, 0, 0},
+ {TEGRA114_CLK_GR2D, TEGRA114_CLK_PLL_C2, 300000000, 0},
+ {TEGRA114_CLK_GR3D, TEGRA114_CLK_PLL_C2, 300000000, 0},
+ {TEGRA114_CLK_DSIALP, TEGRA114_CLK_PLL_P, 68000000, 0},
+ {TEGRA114_CLK_DSIBLP, TEGRA114_CLK_PLL_P, 68000000, 0},
+
+ /* This MUST be the last entry. */
+ {TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0},
};
static void __init tegra114_clock_apply_init_table(void)
{
- tegra_init_from_table(init_table, clks, clk_max);
+ tegra_init_from_table(init_table, clks, TEGRA114_CLK_CLK_MAX);
}
@@ -2359,7 +1439,6 @@ EXPORT_SYMBOL(tegra114_clock_deassert_dfll_dvco_reset);
static void __init tegra114_clock_init(struct device_node *np)
{
struct device_node *node;
- int i;
clk_base = of_iomap(np, 0);
if (!clk_base) {
@@ -2381,29 +1460,24 @@ static void __init tegra114_clock_init(struct device_node *np)
return;
}
+ clks = tegra_clk_init(clk_base, TEGRA114_CLK_CLK_MAX,
+ TEGRA114_CLK_PERIPH_BANKS);
+ if (!clks)
+ return;
+
if (tegra114_osc_clk_init(clk_base) < 0)
return;
tegra114_fixed_clk_init(clk_base);
tegra114_pll_init(clk_base, pmc_base);
- tegra114_periph_clk_init(clk_base);
- tegra114_audio_clk_init(clk_base);
- tegra114_pmc_clk_init(pmc_base);
- tegra114_super_clk_init(clk_base);
-
- for (i = 0; i < ARRAY_SIZE(clks); i++) {
- if (IS_ERR(clks[i])) {
- pr_err
- ("Tegra114 clk %d: register failed with %ld\n",
- i, PTR_ERR(clks[i]));
- }
- if (!clks[i])
- clks[i] = ERR_PTR(-EINVAL);
- }
-
- clk_data.clks = clks;
- clk_data.clk_num = ARRAY_SIZE(clks);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ tegra114_periph_clk_init(clk_base, pmc_base);
+ tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks, &pll_a_params);
+ tegra_pmc_clk_init(pmc_base, tegra114_clks);
+ tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
+ &pll_x_params);
+
+ tegra_add_of_provider(np);
+ tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra_clk_apply_init_table = tegra114_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
new file mode 100644
index 000000000000..166e02f16c8a
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -0,0 +1,1430 @@
+/*
+ * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/clk/tegra.h>
+#include <dt-bindings/clock/tegra124-car.h>
+
+#include "clk.h"
+#include "clk-id.h"
+
+#define CLK_SOURCE_CSITE 0x1d4
+#define CLK_SOURCE_EMC 0x19c
+#define CLK_SOURCE_XUSB_SS_SRC 0x610
+
+#define PLLC_BASE 0x80
+#define PLLC_OUT 0x84
+#define PLLC_MISC2 0x88
+#define PLLC_MISC 0x8c
+#define PLLC2_BASE 0x4e8
+#define PLLC2_MISC 0x4ec
+#define PLLC3_BASE 0x4fc
+#define PLLC3_MISC 0x500
+#define PLLM_BASE 0x90
+#define PLLM_OUT 0x94
+#define PLLM_MISC 0x9c
+#define PLLP_BASE 0xa0
+#define PLLP_MISC 0xac
+#define PLLA_BASE 0xb0
+#define PLLA_MISC 0xbc
+#define PLLD_BASE 0xd0
+#define PLLD_MISC 0xdc
+#define PLLU_BASE 0xc0
+#define PLLU_MISC 0xcc
+#define PLLX_BASE 0xe0
+#define PLLX_MISC 0xe4
+#define PLLX_MISC2 0x514
+#define PLLX_MISC3 0x518
+#define PLLE_BASE 0xe8
+#define PLLE_MISC 0xec
+#define PLLD2_BASE 0x4b8
+#define PLLD2_MISC 0x4bc
+#define PLLE_AUX 0x48c
+#define PLLRE_BASE 0x4c4
+#define PLLRE_MISC 0x4c8
+#define PLLDP_BASE 0x590
+#define PLLDP_MISC 0x594
+#define PLLC4_BASE 0x5a4
+#define PLLC4_MISC 0x5a8
+
+#define PLLC_IDDQ_BIT 26
+#define PLLRE_IDDQ_BIT 16
+#define PLLSS_IDDQ_BIT 19
+
+#define PLL_BASE_LOCK BIT(27)
+#define PLLE_MISC_LOCK BIT(11)
+#define PLLRE_MISC_LOCK BIT(24)
+
+#define PLL_MISC_LOCK_ENABLE 18
+#define PLLC_MISC_LOCK_ENABLE 24
+#define PLLDU_MISC_LOCK_ENABLE 22
+#define PLLE_MISC_LOCK_ENABLE 9
+#define PLLRE_MISC_LOCK_ENABLE 30
+#define PLLSS_MISC_LOCK_ENABLE 30
+
+#define PLLXC_SW_MAX_P 6
+
+#define PMC_PLLM_WB0_OVERRIDE 0x1dc
+#define PMC_PLLM_WB0_OVERRIDE_2 0x2b0
+
+#define UTMIP_PLL_CFG2 0x488
+#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
+#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4)
+
+#define UTMIP_PLL_CFG1 0x484
+#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6)
+#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
+
+#define UTMIPLL_HW_PWRDN_CFG0 0x52c
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE BIT(25)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24)
+#define UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET BIT(6)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_RESET_INPUT_VALUE BIT(5)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_IN_SWCTL BIT(4)
+#define UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2)
+#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1)
+#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0)
+
+/* Tegra CPU clock and reset control regs */
+#define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470
+
+#ifdef CONFIG_PM_SLEEP
+static struct cpu_clk_suspend_context {
+ u32 clk_csite_src;
+} tegra124_cpu_clk_sctx;
+#endif
+
+static void __iomem *clk_base;
+static void __iomem *pmc_base;
+
+static unsigned long osc_freq;
+static unsigned long pll_ref_freq;
+
+static DEFINE_SPINLOCK(pll_d_lock);
+static DEFINE_SPINLOCK(pll_d2_lock);
+static DEFINE_SPINLOCK(pll_e_lock);
+static DEFINE_SPINLOCK(pll_re_lock);
+static DEFINE_SPINLOCK(pll_u_lock);
+
+/* possible OSC frequencies in Hz */
+static unsigned long tegra124_input_freq[] = {
+ [0] = 13000000,
+ [1] = 16800000,
+ [4] = 19200000,
+ [5] = 38400000,
+ [8] = 12000000,
+ [9] = 48000000,
+ [12] = 260000000,
+};
+
+static const char *mux_plld_out0_plld2_out0[] = {
+ "pll_d_out0", "pll_d2_out0",
+};
+#define mux_plld_out0_plld2_out0_idx NULL
+
+static const char *mux_pllmcp_clkm[] = {
+ "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
+};
+#define mux_pllmcp_clkm_idx NULL
+
+static struct div_nmp pllxc_nmp = {
+ .divm_shift = 0,
+ .divm_width = 8,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .divp_shift = 20,
+ .divp_width = 4,
+};
+
+static struct pdiv_map pllxc_p[] = {
+ { .pdiv = 1, .hw_val = 0 },
+ { .pdiv = 2, .hw_val = 1 },
+ { .pdiv = 3, .hw_val = 2 },
+ { .pdiv = 4, .hw_val = 3 },
+ { .pdiv = 5, .hw_val = 4 },
+ { .pdiv = 6, .hw_val = 5 },
+ { .pdiv = 8, .hw_val = 6 },
+ { .pdiv = 10, .hw_val = 7 },
+ { .pdiv = 12, .hw_val = 8 },
+ { .pdiv = 16, .hw_val = 9 },
+ { .pdiv = 12, .hw_val = 10 },
+ { .pdiv = 16, .hw_val = 11 },
+ { .pdiv = 20, .hw_val = 12 },
+ { .pdiv = 24, .hw_val = 13 },
+ { .pdiv = 32, .hw_val = 14 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
+ /* 1 GHz */
+ {12000000, 1000000000, 83, 0, 1}, /* actual: 996.0 MHz */
+ {13000000, 1000000000, 76, 0, 1}, /* actual: 988.0 MHz */
+ {16800000, 1000000000, 59, 0, 1}, /* actual: 991.2 MHz */
+ {19200000, 1000000000, 52, 0, 1}, /* actual: 998.4 MHz */
+ {26000000, 1000000000, 76, 1, 1}, /* actual: 988.0 MHz */
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_x_params = {
+ .input_min = 12000000,
+ .input_max = 800000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */
+ .vco_min = 700000000,
+ .vco_max = 3000000000UL,
+ .base_reg = PLLX_BASE,
+ .misc_reg = PLLX_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLX_MISC3,
+ .iddq_bit_idx = 3,
+ .max_p = 6,
+ .dyn_ramp_reg = PLLX_MISC2,
+ .stepa_shift = 16,
+ .stepb_shift = 24,
+ .pdiv_tohw = pllxc_p,
+ .div_nmp = &pllxc_nmp,
+ .freq_table = pll_x_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
+ { 12000000, 624000000, 104, 1, 2},
+ { 12000000, 600000000, 100, 1, 2},
+ { 13000000, 600000000, 92, 1, 2}, /* actual: 598.0 MHz */
+ { 16800000, 600000000, 71, 1, 2}, /* actual: 596.4 MHz */
+ { 19200000, 600000000, 62, 1, 2}, /* actual: 595.2 MHz */
+ { 26000000, 600000000, 92, 2, 2}, /* actual: 598.0 MHz */
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_params pll_c_params = {
+ .input_min = 12000000,
+ .input_max = 800000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */
+ .vco_min = 600000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLC_BASE,
+ .misc_reg = PLLC_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLC_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLC_MISC,
+ .iddq_bit_idx = PLLC_IDDQ_BIT,
+ .max_p = PLLXC_SW_MAX_P,
+ .dyn_ramp_reg = PLLC_MISC2,
+ .stepa_shift = 17,
+ .stepb_shift = 9,
+ .pdiv_tohw = pllxc_p,
+ .div_nmp = &pllxc_nmp,
+ .freq_table = pll_c_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct div_nmp pllcx_nmp = {
+ .divm_shift = 0,
+ .divm_width = 2,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .divp_shift = 20,
+ .divp_width = 3,
+};
+
+static struct pdiv_map pllc_p[] = {
+ { .pdiv = 1, .hw_val = 0 },
+ { .pdiv = 2, .hw_val = 1 },
+ { .pdiv = 3, .hw_val = 2 },
+ { .pdiv = 4, .hw_val = 3 },
+ { .pdiv = 6, .hw_val = 4 },
+ { .pdiv = 8, .hw_val = 5 },
+ { .pdiv = 12, .hw_val = 6 },
+ { .pdiv = 16, .hw_val = 7 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_cx_freq_table[] = {
+ {12000000, 600000000, 100, 1, 2},
+ {13000000, 600000000, 92, 1, 2}, /* actual: 598.0 MHz */
+ {16800000, 600000000, 71, 1, 2}, /* actual: 596.4 MHz */
+ {19200000, 600000000, 62, 1, 2}, /* actual: 595.2 MHz */
+ {26000000, 600000000, 92, 2, 2}, /* actual: 598.0 MHz */
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_c2_params = {
+ .input_min = 12000000,
+ .input_max = 48000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000,
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLC2_BASE,
+ .misc_reg = PLLC2_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .pdiv_tohw = pllc_p,
+ .div_nmp = &pllcx_nmp,
+ .max_p = 7,
+ .ext_misc_reg[0] = 0x4f0,
+ .ext_misc_reg[1] = 0x4f4,
+ .ext_misc_reg[2] = 0x4f8,
+ .freq_table = pll_cx_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct tegra_clk_pll_params pll_c3_params = {
+ .input_min = 12000000,
+ .input_max = 48000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000,
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLC3_BASE,
+ .misc_reg = PLLC3_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .pdiv_tohw = pllc_p,
+ .div_nmp = &pllcx_nmp,
+ .max_p = 7,
+ .ext_misc_reg[0] = 0x504,
+ .ext_misc_reg[1] = 0x508,
+ .ext_misc_reg[2] = 0x50c,
+ .freq_table = pll_cx_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct div_nmp pllss_nmp = {
+ .divm_shift = 0,
+ .divm_width = 8,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .divp_shift = 20,
+ .divp_width = 4,
+};
+
+static struct pdiv_map pll12g_ssd_esd_p[] = {
+ { .pdiv = 1, .hw_val = 0 },
+ { .pdiv = 2, .hw_val = 1 },
+ { .pdiv = 3, .hw_val = 2 },
+ { .pdiv = 4, .hw_val = 3 },
+ { .pdiv = 5, .hw_val = 4 },
+ { .pdiv = 6, .hw_val = 5 },
+ { .pdiv = 8, .hw_val = 6 },
+ { .pdiv = 10, .hw_val = 7 },
+ { .pdiv = 12, .hw_val = 8 },
+ { .pdiv = 16, .hw_val = 9 },
+ { .pdiv = 12, .hw_val = 10 },
+ { .pdiv = 16, .hw_val = 11 },
+ { .pdiv = 20, .hw_val = 12 },
+ { .pdiv = 24, .hw_val = 13 },
+ { .pdiv = 32, .hw_val = 14 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_c4_freq_table[] = {
+ { 12000000, 600000000, 100, 1, 1},
+ { 13000000, 600000000, 92, 1, 1}, /* actual: 598.0 MHz */
+ { 16800000, 600000000, 71, 1, 1}, /* actual: 596.4 MHz */
+ { 19200000, 600000000, 62, 1, 1}, /* actual: 595.2 MHz */
+ { 26000000, 600000000, 92, 2, 1}, /* actual: 598.0 MHz */
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_params pll_c4_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLC4_BASE,
+ .misc_reg = PLLC4_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLC4_BASE,
+ .iddq_bit_idx = PLLSS_IDDQ_BIT,
+ .pdiv_tohw = pll12g_ssd_esd_p,
+ .div_nmp = &pllss_nmp,
+ .ext_misc_reg[0] = 0x5ac,
+ .ext_misc_reg[1] = 0x5b0,
+ .ext_misc_reg[2] = 0x5b4,
+ .freq_table = pll_c4_freq_table,
+};
+
+static struct pdiv_map pllm_p[] = {
+ { .pdiv = 1, .hw_val = 0 },
+ { .pdiv = 2, .hw_val = 1 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
+ {12000000, 800000000, 66, 1, 1}, /* actual: 792.0 MHz */
+ {13000000, 800000000, 61, 1, 1}, /* actual: 793.0 MHz */
+ {16800000, 800000000, 47, 1, 1}, /* actual: 789.6 MHz */
+ {19200000, 800000000, 41, 1, 1}, /* actual: 787.2 MHz */
+ {26000000, 800000000, 61, 2, 1}, /* actual: 793.0 MHz */
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct div_nmp pllm_nmp = {
+ .divm_shift = 0,
+ .divm_width = 8,
+ .override_divm_shift = 0,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .override_divn_shift = 8,
+ .divp_shift = 20,
+ .divp_width = 1,
+ .override_divp_shift = 27,
+};
+
+static struct tegra_clk_pll_params pll_m_params = {
+ .input_min = 12000000,
+ .input_max = 500000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 50 MHz */
+ .vco_min = 400000000,
+ .vco_max = 1066000000,
+ .base_reg = PLLM_BASE,
+ .misc_reg = PLLM_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .max_p = 2,
+ .pdiv_tohw = pllm_p,
+ .div_nmp = &pllm_nmp,
+ .pmc_divnm_reg = PMC_PLLM_WB0_OVERRIDE,
+ .pmc_divp_reg = PMC_PLLM_WB0_OVERRIDE_2,
+ .freq_table = pll_m_freq_table,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
+ /* PLLE special case: use cpcon field to store cml divider value */
+ {336000000, 100000000, 100, 21, 16, 11},
+ {312000000, 100000000, 200, 26, 24, 13},
+ {13000000, 100000000, 200, 1, 26, 13},
+ {12000000, 100000000, 200, 1, 24, 13},
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct div_nmp plle_nmp = {
+ .divm_shift = 0,
+ .divm_width = 8,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .divp_shift = 24,
+ .divp_width = 4,
+};
+
+static struct tegra_clk_pll_params pll_e_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 75000000,
+ .vco_min = 1600000000,
+ .vco_max = 2400000000U,
+ .base_reg = PLLE_BASE,
+ .misc_reg = PLLE_MISC,
+ .aux_reg = PLLE_AUX,
+ .lock_mask = PLLE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .div_nmp = &plle_nmp,
+ .freq_table = pll_e_freq_table,
+ .flags = TEGRA_PLL_FIXED,
+ .fixed_rate = 100000000,
+};
+
+static const struct clk_div_table pll_re_div_table[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 3 },
+ { .val = 3, .div = 4 },
+ { .val = 4, .div = 5 },
+ { .val = 5, .div = 6 },
+ { .val = 0, .div = 0 },
+};
+
+static struct div_nmp pllre_nmp = {
+ .divm_shift = 0,
+ .divm_width = 8,
+ .divn_shift = 8,
+ .divn_width = 8,
+ .divp_shift = 16,
+ .divp_width = 4,
+};
+
+static struct tegra_clk_pll_params pll_re_vco_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */
+ .vco_min = 300000000,
+ .vco_max = 600000000,
+ .base_reg = PLLRE_BASE,
+ .misc_reg = PLLRE_MISC,
+ .lock_mask = PLLRE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLRE_MISC,
+ .iddq_bit_idx = PLLRE_IDDQ_BIT,
+ .div_nmp = &pllre_nmp,
+ .flags = TEGRA_PLL_USE_LOCK,
+};
+
+static struct div_nmp pllp_nmp = {
+ .divm_shift = 0,
+ .divm_width = 5,
+ .divn_shift = 8,
+ .divn_width = 10,
+ .divp_shift = 20,
+ .divp_width = 3,
+};
+
+static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
+ {12000000, 408000000, 408, 12, 0, 8},
+ {13000000, 408000000, 408, 13, 0, 8},
+ {16800000, 408000000, 340, 14, 0, 8},
+ {19200000, 408000000, 340, 16, 0, 8},
+ {26000000, 408000000, 408, 26, 0, 8},
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_p_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 200000000,
+ .vco_max = 700000000,
+ .base_reg = PLLP_BASE,
+ .misc_reg = PLLP_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .div_nmp = &pllp_nmp,
+ .freq_table = pll_p_freq_table,
+ .fixed_rate = 408000000,
+ .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK,
+};
+
+static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
+ {9600000, 282240000, 147, 5, 0, 4},
+ {9600000, 368640000, 192, 5, 0, 4},
+ {9600000, 240000000, 200, 8, 0, 8},
+
+ {28800000, 282240000, 245, 25, 0, 8},
+ {28800000, 368640000, 320, 25, 0, 8},
+ {28800000, 240000000, 200, 24, 0, 8},
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_a_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 200000000,
+ .vco_max = 700000000,
+ .base_reg = PLLA_BASE,
+ .misc_reg = PLLA_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .div_nmp = &pllp_nmp,
+ .freq_table = pll_a_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
+};
+
+static struct div_nmp plld_nmp = {
+ .divm_shift = 0,
+ .divm_width = 5,
+ .divn_shift = 8,
+ .divn_width = 11,
+ .divp_shift = 20,
+ .divp_width = 3,
+};
+
+static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
+ {12000000, 216000000, 864, 12, 4, 12},
+ {13000000, 216000000, 864, 13, 4, 12},
+ {16800000, 216000000, 720, 14, 4, 12},
+ {19200000, 216000000, 720, 16, 4, 12},
+ {26000000, 216000000, 864, 26, 4, 12},
+
+ {12000000, 594000000, 594, 12, 1, 12},
+ {13000000, 594000000, 594, 13, 1, 12},
+ {16800000, 594000000, 495, 14, 1, 12},
+ {19200000, 594000000, 495, 16, 1, 12},
+ {26000000, 594000000, 594, 26, 1, 12},
+
+ {12000000, 1000000000, 1000, 12, 1, 12},
+ {13000000, 1000000000, 1000, 13, 1, 12},
+ {19200000, 1000000000, 625, 12, 1, 12},
+ {26000000, 1000000000, 1000, 26, 1, 12},
+
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_d_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 500000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD_BASE,
+ .misc_reg = PLLD_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+ .div_nmp = &plld_nmp,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
+};
+
+static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = {
+ { 12000000, 594000000, 99, 1, 2},
+ { 13000000, 594000000, 91, 1, 2}, /* actual: 591.5 MHz */
+ { 16800000, 594000000, 71, 1, 2}, /* actual: 596.4 MHz */
+ { 19200000, 594000000, 62, 1, 2}, /* actual: 595.2 MHz */
+ { 26000000, 594000000, 91, 2, 2}, /* actual: 591.5 MHz */
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_params tegra124_pll_d2_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLD2_BASE,
+ .misc_reg = PLLD2_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLD2_BASE,
+ .iddq_bit_idx = PLLSS_IDDQ_BIT,
+ .pdiv_tohw = pll12g_ssd_esd_p,
+ .div_nmp = &pllss_nmp,
+ .ext_misc_reg[0] = 0x570,
+ .ext_misc_reg[1] = 0x574,
+ .ext_misc_reg[2] = 0x578,
+ .max_p = 15,
+ .freq_table = tegra124_pll_d2_freq_table,
+};
+
+static struct tegra_clk_pll_freq_table pll_dp_freq_table[] = {
+ { 12000000, 600000000, 100, 1, 1},
+ { 13000000, 600000000, 92, 1, 1}, /* actual: 598.0 MHz */
+ { 16800000, 600000000, 71, 1, 1}, /* actual: 596.4 MHz */
+ { 19200000, 600000000, 62, 1, 1}, /* actual: 595.2 MHz */
+ { 26000000, 600000000, 92, 2, 1}, /* actual: 598.0 MHz */
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_params pll_dp_params = {
+ .input_min = 12000000,
+ .input_max = 1000000000,
+ .cf_min = 12000000,
+ .cf_max = 19200000, /* s/w policy, h/w capability 38 MHz */
+ .vco_min = 600000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLDP_BASE,
+ .misc_reg = PLLDP_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+ .iddq_reg = PLLDP_BASE,
+ .iddq_bit_idx = PLLSS_IDDQ_BIT,
+ .pdiv_tohw = pll12g_ssd_esd_p,
+ .div_nmp = &pllss_nmp,
+ .ext_misc_reg[0] = 0x598,
+ .ext_misc_reg[1] = 0x59c,
+ .ext_misc_reg[2] = 0x5a0,
+ .max_p = 5,
+ .freq_table = pll_dp_freq_table,
+};
+
+static struct pdiv_map pllu_p[] = {
+ { .pdiv = 1, .hw_val = 1 },
+ { .pdiv = 2, .hw_val = 0 },
+ { .pdiv = 0, .hw_val = 0 },
+};
+
+static struct div_nmp pllu_nmp = {
+ .divm_shift = 0,
+ .divm_width = 5,
+ .divn_shift = 8,
+ .divn_width = 10,
+ .divp_shift = 20,
+ .divp_width = 1,
+};
+
+static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+ {12000000, 480000000, 960, 12, 2, 12},
+ {13000000, 480000000, 960, 13, 2, 12},
+ {16800000, 480000000, 400, 7, 2, 5},
+ {19200000, 480000000, 200, 4, 2, 3},
+ {26000000, 480000000, 960, 26, 2, 12},
+ {0, 0, 0, 0, 0, 0},
+};
+
+static struct tegra_clk_pll_params pll_u_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 480000000,
+ .vco_max = 960000000,
+ .base_reg = PLLU_BASE,
+ .misc_reg = PLLU_MISC,
+ .lock_mask = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+ .pdiv_tohw = pllu_p,
+ .div_nmp = &pllu_nmp,
+ .freq_table = pll_u_freq_table,
+ .flags = TEGRA_PLLU | TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
+};
+
+struct utmi_clk_param {
+ /* Oscillator Frequency in KHz */
+ u32 osc_frequency;
+ /* UTMIP PLL Enable Delay Count */
+ u8 enable_delay_count;
+ /* UTMIP PLL Stable count */
+ u8 stable_count;
+ /* UTMIP PLL Active delay count */
+ u8 active_delay_count;
+ /* UTMIP PLL Xtal frequency count */
+ u8 xtal_freq_count;
+};
+
+static const struct utmi_clk_param utmi_parameters[] = {
+ {.osc_frequency = 13000000, .enable_delay_count = 0x02,
+ .stable_count = 0x33, .active_delay_count = 0x05,
+ .xtal_freq_count = 0x7F},
+ {.osc_frequency = 19200000, .enable_delay_count = 0x03,
+ .stable_count = 0x4B, .active_delay_count = 0x06,
+ .xtal_freq_count = 0xBB},
+ {.osc_frequency = 12000000, .enable_delay_count = 0x02,
+ .stable_count = 0x2F, .active_delay_count = 0x04,
+ .xtal_freq_count = 0x76},
+ {.osc_frequency = 26000000, .enable_delay_count = 0x04,
+ .stable_count = 0x66, .active_delay_count = 0x09,
+ .xtal_freq_count = 0xFE},
+ {.osc_frequency = 16800000, .enable_delay_count = 0x03,
+ .stable_count = 0x41, .active_delay_count = 0x0A,
+ .xtal_freq_count = 0xA4},
+};
+
+static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
+ [tegra_clk_ispb] = { .dt_id = TEGRA124_CLK_ISPB, .present = true },
+ [tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true },
+ [tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true },
+ [tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true },
+ [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
+ [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
+ [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
+ [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
+ [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
+ [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
+ [tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true },
+ [tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true },
+ [tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true },
+ [tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true },
+ [tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true },
+ [tegra_clk_host1x_8] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
+ [tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true },
+ [tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true },
+ [tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true },
+ [tegra_clk_kbc] = { .dt_id = TEGRA124_CLK_KBC, .present = true },
+ [tegra_clk_kfuse] = { .dt_id = TEGRA124_CLK_KFUSE, .present = true },
+ [tegra_clk_sbc1] = { .dt_id = TEGRA124_CLK_SBC1, .present = true },
+ [tegra_clk_nor] = { .dt_id = TEGRA124_CLK_NOR, .present = true },
+ [tegra_clk_sbc2] = { .dt_id = TEGRA124_CLK_SBC2, .present = true },
+ [tegra_clk_sbc3] = { .dt_id = TEGRA124_CLK_SBC3, .present = true },
+ [tegra_clk_i2c5] = { .dt_id = TEGRA124_CLK_I2C5, .present = true },
+ [tegra_clk_dsia] = { .dt_id = TEGRA124_CLK_DSIA, .present = true },
+ [tegra_clk_mipi] = { .dt_id = TEGRA124_CLK_MIPI, .present = true },
+ [tegra_clk_hdmi] = { .dt_id = TEGRA124_CLK_HDMI, .present = true },
+ [tegra_clk_csi] = { .dt_id = TEGRA124_CLK_CSI, .present = true },
+ [tegra_clk_i2c2] = { .dt_id = TEGRA124_CLK_I2C2, .present = true },
+ [tegra_clk_uartc] = { .dt_id = TEGRA124_CLK_UARTC, .present = true },
+ [tegra_clk_mipi_cal] = { .dt_id = TEGRA124_CLK_MIPI_CAL, .present = true },
+ [tegra_clk_emc] = { .dt_id = TEGRA124_CLK_EMC, .present = true },
+ [tegra_clk_usb2] = { .dt_id = TEGRA124_CLK_USB2, .present = true },
+ [tegra_clk_usb3] = { .dt_id = TEGRA124_CLK_USB3, .present = true },
+ [tegra_clk_vde_8] = { .dt_id = TEGRA124_CLK_VDE, .present = true },
+ [tegra_clk_bsea] = { .dt_id = TEGRA124_CLK_BSEA, .present = true },
+ [tegra_clk_bsev] = { .dt_id = TEGRA124_CLK_BSEV, .present = true },
+ [tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true },
+ [tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true },
+ [tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true },
+ [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
+ [tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true },
+ [tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true },
+ [tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true },
+ [tegra_clk_csite] = { .dt_id = TEGRA124_CLK_CSITE, .present = true },
+ [tegra_clk_la] = { .dt_id = TEGRA124_CLK_LA, .present = true },
+ [tegra_clk_trace] = { .dt_id = TEGRA124_CLK_TRACE, .present = true },
+ [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
+ [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
+ [tegra_clk_ndspeed] = { .dt_id = TEGRA124_CLK_NDSPEED, .present = true },
+ [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
+ [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
+ [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
+ [tegra_clk_xusb_host] = { .dt_id = TEGRA124_CLK_XUSB_HOST, .present = true },
+ [tegra_clk_msenc] = { .dt_id = TEGRA124_CLK_MSENC, .present = true },
+ [tegra_clk_csus] = { .dt_id = TEGRA124_CLK_CSUS, .present = true },
+ [tegra_clk_mselect] = { .dt_id = TEGRA124_CLK_MSELECT, .present = true },
+ [tegra_clk_tsensor] = { .dt_id = TEGRA124_CLK_TSENSOR, .present = true },
+ [tegra_clk_i2s3] = { .dt_id = TEGRA124_CLK_I2S3, .present = true },
+ [tegra_clk_i2s4] = { .dt_id = TEGRA124_CLK_I2S4, .present = true },
+ [tegra_clk_i2c4] = { .dt_id = TEGRA124_CLK_I2C4, .present = true },
+ [tegra_clk_sbc5] = { .dt_id = TEGRA124_CLK_SBC5, .present = true },
+ [tegra_clk_sbc6] = { .dt_id = TEGRA124_CLK_SBC6, .present = true },
+ [tegra_clk_d_audio] = { .dt_id = TEGRA124_CLK_D_AUDIO, .present = true },
+ [tegra_clk_apbif] = { .dt_id = TEGRA124_CLK_APBIF, .present = true },
+ [tegra_clk_dam0] = { .dt_id = TEGRA124_CLK_DAM0, .present = true },
+ [tegra_clk_dam1] = { .dt_id = TEGRA124_CLK_DAM1, .present = true },
+ [tegra_clk_dam2] = { .dt_id = TEGRA124_CLK_DAM2, .present = true },
+ [tegra_clk_hda2codec_2x] = { .dt_id = TEGRA124_CLK_HDA2CODEC_2X, .present = true },
+ [tegra_clk_audio0_2x] = { .dt_id = TEGRA124_CLK_AUDIO0_2X, .present = true },
+ [tegra_clk_audio1_2x] = { .dt_id = TEGRA124_CLK_AUDIO1_2X, .present = true },
+ [tegra_clk_audio2_2x] = { .dt_id = TEGRA124_CLK_AUDIO2_2X, .present = true },
+ [tegra_clk_audio3_2x] = { .dt_id = TEGRA124_CLK_AUDIO3_2X, .present = true },
+ [tegra_clk_audio4_2x] = { .dt_id = TEGRA124_CLK_AUDIO4_2X, .present = true },
+ [tegra_clk_spdif_2x] = { .dt_id = TEGRA124_CLK_SPDIF_2X, .present = true },
+ [tegra_clk_actmon] = { .dt_id = TEGRA124_CLK_ACTMON, .present = true },
+ [tegra_clk_extern1] = { .dt_id = TEGRA124_CLK_EXTERN1, .present = true },
+ [tegra_clk_extern2] = { .dt_id = TEGRA124_CLK_EXTERN2, .present = true },
+ [tegra_clk_extern3] = { .dt_id = TEGRA124_CLK_EXTERN3, .present = true },
+ [tegra_clk_sata_oob] = { .dt_id = TEGRA124_CLK_SATA_OOB, .present = true },
+ [tegra_clk_sata] = { .dt_id = TEGRA124_CLK_SATA, .present = true },
+ [tegra_clk_hda] = { .dt_id = TEGRA124_CLK_HDA, .present = true },
+ [tegra_clk_se] = { .dt_id = TEGRA124_CLK_SE, .present = true },
+ [tegra_clk_hda2hdmi] = { .dt_id = TEGRA124_CLK_HDA2HDMI, .present = true },
+ [tegra_clk_sata_cold] = { .dt_id = TEGRA124_CLK_SATA_COLD, .present = true },
+ [tegra_clk_cilab] = { .dt_id = TEGRA124_CLK_CILAB, .present = true },
+ [tegra_clk_cilcd] = { .dt_id = TEGRA124_CLK_CILCD, .present = true },
+ [tegra_clk_cile] = { .dt_id = TEGRA124_CLK_CILE, .present = true },
+ [tegra_clk_dsialp] = { .dt_id = TEGRA124_CLK_DSIALP, .present = true },
+ [tegra_clk_dsiblp] = { .dt_id = TEGRA124_CLK_DSIBLP, .present = true },
+ [tegra_clk_entropy] = { .dt_id = TEGRA124_CLK_ENTROPY, .present = true },
+ [tegra_clk_dds] = { .dt_id = TEGRA124_CLK_DDS, .present = true },
+ [tegra_clk_dp2] = { .dt_id = TEGRA124_CLK_DP2, .present = true },
+ [tegra_clk_amx] = { .dt_id = TEGRA124_CLK_AMX, .present = true },
+ [tegra_clk_adx] = { .dt_id = TEGRA124_CLK_ADX, .present = true },
+ [tegra_clk_xusb_ss] = { .dt_id = TEGRA124_CLK_XUSB_SS, .present = true },
+ [tegra_clk_i2c6] = { .dt_id = TEGRA124_CLK_I2C6, .present = true },
+ [tegra_clk_vim2_clk] = { .dt_id = TEGRA124_CLK_VIM2_CLK, .present = true },
+ [tegra_clk_hdmi_audio] = { .dt_id = TEGRA124_CLK_HDMI_AUDIO, .present = true },
+ [tegra_clk_clk72Mhz] = { .dt_id = TEGRA124_CLK_CLK72MHZ, .present = true },
+ [tegra_clk_vic03] = { .dt_id = TEGRA124_CLK_VIC03, .present = true },
+ [tegra_clk_adx1] = { .dt_id = TEGRA124_CLK_ADX1, .present = true },
+ [tegra_clk_dpaux] = { .dt_id = TEGRA124_CLK_DPAUX, .present = true },
+ [tegra_clk_sor0] = { .dt_id = TEGRA124_CLK_SOR0, .present = true },
+ [tegra_clk_sor0_lvds] = { .dt_id = TEGRA124_CLK_SOR0_LVDS, .present = true },
+ [tegra_clk_gpu] = { .dt_id = TEGRA124_CLK_GPU, .present = true },
+ [tegra_clk_amx1] = { .dt_id = TEGRA124_CLK_AMX1, .present = true },
+ [tegra_clk_uartb] = { .dt_id = TEGRA124_CLK_UARTB, .present = true },
+ [tegra_clk_vfir] = { .dt_id = TEGRA124_CLK_VFIR, .present = true },
+ [tegra_clk_spdif_in] = { .dt_id = TEGRA124_CLK_SPDIF_IN, .present = true },
+ [tegra_clk_spdif_out] = { .dt_id = TEGRA124_CLK_SPDIF_OUT, .present = true },
+ [tegra_clk_vi_9] = { .dt_id = TEGRA124_CLK_VI, .present = true },
+ [tegra_clk_vi_sensor] = { .dt_id = TEGRA124_CLK_VI_SENSOR, .present = true },
+ [tegra_clk_fuse] = { .dt_id = TEGRA124_CLK_FUSE, .present = true },
+ [tegra_clk_fuse_burn] = { .dt_id = TEGRA124_CLK_FUSE_BURN, .present = true },
+ [tegra_clk_clk_32k] = { .dt_id = TEGRA124_CLK_CLK_32K, .present = true },
+ [tegra_clk_clk_m] = { .dt_id = TEGRA124_CLK_CLK_M, .present = true },
+ [tegra_clk_clk_m_div2] = { .dt_id = TEGRA124_CLK_CLK_M_DIV2, .present = true },
+ [tegra_clk_clk_m_div4] = { .dt_id = TEGRA124_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_pll_ref] = { .dt_id = TEGRA124_CLK_PLL_REF, .present = true },
+ [tegra_clk_pll_c] = { .dt_id = TEGRA124_CLK_PLL_C, .present = true },
+ [tegra_clk_pll_c_out1] = { .dt_id = TEGRA124_CLK_PLL_C_OUT1, .present = true },
+ [tegra_clk_pll_c2] = { .dt_id = TEGRA124_CLK_PLL_C2, .present = true },
+ [tegra_clk_pll_c3] = { .dt_id = TEGRA124_CLK_PLL_C3, .present = true },
+ [tegra_clk_pll_m] = { .dt_id = TEGRA124_CLK_PLL_M, .present = true },
+ [tegra_clk_pll_m_out1] = { .dt_id = TEGRA124_CLK_PLL_M_OUT1, .present = true },
+ [tegra_clk_pll_p] = { .dt_id = TEGRA124_CLK_PLL_P, .present = true },
+ [tegra_clk_pll_p_out1] = { .dt_id = TEGRA124_CLK_PLL_P_OUT1, .present = true },
+ [tegra_clk_pll_p_out2] = { .dt_id = TEGRA124_CLK_PLL_P_OUT2, .present = true },
+ [tegra_clk_pll_p_out3] = { .dt_id = TEGRA124_CLK_PLL_P_OUT3, .present = true },
+ [tegra_clk_pll_p_out4] = { .dt_id = TEGRA124_CLK_PLL_P_OUT4, .present = true },
+ [tegra_clk_pll_a] = { .dt_id = TEGRA124_CLK_PLL_A, .present = true },
+ [tegra_clk_pll_a_out0] = { .dt_id = TEGRA124_CLK_PLL_A_OUT0, .present = true },
+ [tegra_clk_pll_d] = { .dt_id = TEGRA124_CLK_PLL_D, .present = true },
+ [tegra_clk_pll_d_out0] = { .dt_id = TEGRA124_CLK_PLL_D_OUT0, .present = true },
+ [tegra_clk_pll_d2] = { .dt_id = TEGRA124_CLK_PLL_D2, .present = true },
+ [tegra_clk_pll_d2_out0] = { .dt_id = TEGRA124_CLK_PLL_D2_OUT0, .present = true },
+ [tegra_clk_pll_u] = { .dt_id = TEGRA124_CLK_PLL_U, .present = true },
+ [tegra_clk_pll_u_480m] = { .dt_id = TEGRA124_CLK_PLL_U_480M, .present = true },
+ [tegra_clk_pll_u_60m] = { .dt_id = TEGRA124_CLK_PLL_U_60M, .present = true },
+ [tegra_clk_pll_u_48m] = { .dt_id = TEGRA124_CLK_PLL_U_48M, .present = true },
+ [tegra_clk_pll_u_12m] = { .dt_id = TEGRA124_CLK_PLL_U_12M, .present = true },
+ [tegra_clk_pll_x] = { .dt_id = TEGRA124_CLK_PLL_X, .present = true },
+ [tegra_clk_pll_x_out0] = { .dt_id = TEGRA124_CLK_PLL_X_OUT0, .present = true },
+ [tegra_clk_pll_re_vco] = { .dt_id = TEGRA124_CLK_PLL_RE_VCO, .present = true },
+ [tegra_clk_pll_re_out] = { .dt_id = TEGRA124_CLK_PLL_RE_OUT, .present = true },
+ [tegra_clk_spdif_in_sync] = { .dt_id = TEGRA124_CLK_SPDIF_IN_SYNC, .present = true },
+ [tegra_clk_i2s0_sync] = { .dt_id = TEGRA124_CLK_I2S0_SYNC, .present = true },
+ [tegra_clk_i2s1_sync] = { .dt_id = TEGRA124_CLK_I2S1_SYNC, .present = true },
+ [tegra_clk_i2s2_sync] = { .dt_id = TEGRA124_CLK_I2S2_SYNC, .present = true },
+ [tegra_clk_i2s3_sync] = { .dt_id = TEGRA124_CLK_I2S3_SYNC, .present = true },
+ [tegra_clk_i2s4_sync] = { .dt_id = TEGRA124_CLK_I2S4_SYNC, .present = true },
+ [tegra_clk_vimclk_sync] = { .dt_id = TEGRA124_CLK_VIMCLK_SYNC, .present = true },
+ [tegra_clk_audio0] = { .dt_id = TEGRA124_CLK_AUDIO0, .present = true },
+ [tegra_clk_audio1] = { .dt_id = TEGRA124_CLK_AUDIO1, .present = true },
+ [tegra_clk_audio2] = { .dt_id = TEGRA124_CLK_AUDIO2, .present = true },
+ [tegra_clk_audio3] = { .dt_id = TEGRA124_CLK_AUDIO3, .present = true },
+ [tegra_clk_audio4] = { .dt_id = TEGRA124_CLK_AUDIO4, .present = true },
+ [tegra_clk_spdif] = { .dt_id = TEGRA124_CLK_SPDIF, .present = true },
+ [tegra_clk_clk_out_1] = { .dt_id = TEGRA124_CLK_CLK_OUT_1, .present = true },
+ [tegra_clk_clk_out_2] = { .dt_id = TEGRA124_CLK_CLK_OUT_2, .present = true },
+ [tegra_clk_clk_out_3] = { .dt_id = TEGRA124_CLK_CLK_OUT_3, .present = true },
+ [tegra_clk_blink] = { .dt_id = TEGRA124_CLK_BLINK, .present = true },
+ [tegra_clk_xusb_host_src] = { .dt_id = TEGRA124_CLK_XUSB_HOST_SRC, .present = true },
+ [tegra_clk_xusb_falcon_src] = { .dt_id = TEGRA124_CLK_XUSB_FALCON_SRC, .present = true },
+ [tegra_clk_xusb_fs_src] = { .dt_id = TEGRA124_CLK_XUSB_FS_SRC, .present = true },
+ [tegra_clk_xusb_ss_src] = { .dt_id = TEGRA124_CLK_XUSB_SS_SRC, .present = true },
+ [tegra_clk_xusb_dev_src] = { .dt_id = TEGRA124_CLK_XUSB_DEV_SRC, .present = true },
+ [tegra_clk_xusb_dev] = { .dt_id = TEGRA124_CLK_XUSB_DEV, .present = true },
+ [tegra_clk_xusb_hs_src] = { .dt_id = TEGRA124_CLK_XUSB_HS_SRC, .present = true },
+ [tegra_clk_sclk] = { .dt_id = TEGRA124_CLK_SCLK, .present = true },
+ [tegra_clk_hclk] = { .dt_id = TEGRA124_CLK_HCLK, .present = true },
+ [tegra_clk_pclk] = { .dt_id = TEGRA124_CLK_PCLK, .present = true },
+ [tegra_clk_cclk_g] = { .dt_id = TEGRA124_CLK_CCLK_G, .present = true },
+ [tegra_clk_cclk_lp] = { .dt_id = TEGRA124_CLK_CCLK_LP, .present = true },
+ [tegra_clk_dfll_ref] = { .dt_id = TEGRA124_CLK_DFLL_REF, .present = true },
+ [tegra_clk_dfll_soc] = { .dt_id = TEGRA124_CLK_DFLL_SOC, .present = true },
+ [tegra_clk_vi_sensor2] = { .dt_id = TEGRA124_CLK_VI_SENSOR2, .present = true },
+ [tegra_clk_pll_p_out5] = { .dt_id = TEGRA124_CLK_PLL_P_OUT5, .present = true },
+ [tegra_clk_pll_c4] = { .dt_id = TEGRA124_CLK_PLL_C4, .present = true },
+ [tegra_clk_pll_dp] = { .dt_id = TEGRA124_CLK_PLL_DP, .present = true },
+ [tegra_clk_audio0_mux] = { .dt_id = TEGRA124_CLK_AUDIO0_MUX, .present = true },
+ [tegra_clk_audio1_mux] = { .dt_id = TEGRA124_CLK_AUDIO1_MUX, .present = true },
+ [tegra_clk_audio2_mux] = { .dt_id = TEGRA124_CLK_AUDIO2_MUX, .present = true },
+ [tegra_clk_audio3_mux] = { .dt_id = TEGRA124_CLK_AUDIO3_MUX, .present = true },
+ [tegra_clk_audio4_mux] = { .dt_id = TEGRA124_CLK_AUDIO4_MUX, .present = true },
+ [tegra_clk_spdif_mux] = { .dt_id = TEGRA124_CLK_SPDIF_MUX, .present = true },
+ [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true },
+ [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true },
+ [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
+ [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
+ [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
+ [tegra_clk_uarte] = { .dt_id = TEGRA124_CLK_UARTE, .present = true },
+};
+
+static struct tegra_devclk devclks[] __initdata = {
+ { .con_id = "clk_m", .dt_id = TEGRA124_CLK_CLK_M },
+ { .con_id = "pll_ref", .dt_id = TEGRA124_CLK_PLL_REF },
+ { .con_id = "clk_32k", .dt_id = TEGRA124_CLK_CLK_32K },
+ { .con_id = "clk_m_div2", .dt_id = TEGRA124_CLK_CLK_M_DIV2 },
+ { .con_id = "clk_m_div4", .dt_id = TEGRA124_CLK_CLK_M_DIV4 },
+ { .con_id = "pll_c", .dt_id = TEGRA124_CLK_PLL_C },
+ { .con_id = "pll_c_out1", .dt_id = TEGRA124_CLK_PLL_C_OUT1 },
+ { .con_id = "pll_c2", .dt_id = TEGRA124_CLK_PLL_C2 },
+ { .con_id = "pll_c3", .dt_id = TEGRA124_CLK_PLL_C3 },
+ { .con_id = "pll_p", .dt_id = TEGRA124_CLK_PLL_P },
+ { .con_id = "pll_p_out1", .dt_id = TEGRA124_CLK_PLL_P_OUT1 },
+ { .con_id = "pll_p_out2", .dt_id = TEGRA124_CLK_PLL_P_OUT2 },
+ { .con_id = "pll_p_out3", .dt_id = TEGRA124_CLK_PLL_P_OUT3 },
+ { .con_id = "pll_p_out4", .dt_id = TEGRA124_CLK_PLL_P_OUT4 },
+ { .con_id = "pll_m", .dt_id = TEGRA124_CLK_PLL_M },
+ { .con_id = "pll_m_out1", .dt_id = TEGRA124_CLK_PLL_M_OUT1 },
+ { .con_id = "pll_x", .dt_id = TEGRA124_CLK_PLL_X },
+ { .con_id = "pll_x_out0", .dt_id = TEGRA124_CLK_PLL_X_OUT0 },
+ { .con_id = "pll_u", .dt_id = TEGRA124_CLK_PLL_U },
+ { .con_id = "pll_u_480M", .dt_id = TEGRA124_CLK_PLL_U_480M },
+ { .con_id = "pll_u_60M", .dt_id = TEGRA124_CLK_PLL_U_60M },
+ { .con_id = "pll_u_48M", .dt_id = TEGRA124_CLK_PLL_U_48M },
+ { .con_id = "pll_u_12M", .dt_id = TEGRA124_CLK_PLL_U_12M },
+ { .con_id = "pll_d", .dt_id = TEGRA124_CLK_PLL_D },
+ { .con_id = "pll_d_out0", .dt_id = TEGRA124_CLK_PLL_D_OUT0 },
+ { .con_id = "pll_d2", .dt_id = TEGRA124_CLK_PLL_D2 },
+ { .con_id = "pll_d2_out0", .dt_id = TEGRA124_CLK_PLL_D2_OUT0 },
+ { .con_id = "pll_a", .dt_id = TEGRA124_CLK_PLL_A },
+ { .con_id = "pll_a_out0", .dt_id = TEGRA124_CLK_PLL_A_OUT0 },
+ { .con_id = "pll_re_vco", .dt_id = TEGRA124_CLK_PLL_RE_VCO },
+ { .con_id = "pll_re_out", .dt_id = TEGRA124_CLK_PLL_RE_OUT },
+ { .con_id = "spdif_in_sync", .dt_id = TEGRA124_CLK_SPDIF_IN_SYNC },
+ { .con_id = "i2s0_sync", .dt_id = TEGRA124_CLK_I2S0_SYNC },
+ { .con_id = "i2s1_sync", .dt_id = TEGRA124_CLK_I2S1_SYNC },
+ { .con_id = "i2s2_sync", .dt_id = TEGRA124_CLK_I2S2_SYNC },
+ { .con_id = "i2s3_sync", .dt_id = TEGRA124_CLK_I2S3_SYNC },
+ { .con_id = "i2s4_sync", .dt_id = TEGRA124_CLK_I2S4_SYNC },
+ { .con_id = "vimclk_sync", .dt_id = TEGRA124_CLK_VIMCLK_SYNC },
+ { .con_id = "audio0", .dt_id = TEGRA124_CLK_AUDIO0 },
+ { .con_id = "audio1", .dt_id = TEGRA124_CLK_AUDIO1 },
+ { .con_id = "audio2", .dt_id = TEGRA124_CLK_AUDIO2 },
+ { .con_id = "audio3", .dt_id = TEGRA124_CLK_AUDIO3 },
+ { .con_id = "audio4", .dt_id = TEGRA124_CLK_AUDIO4 },
+ { .con_id = "spdif", .dt_id = TEGRA124_CLK_SPDIF },
+ { .con_id = "audio0_2x", .dt_id = TEGRA124_CLK_AUDIO0_2X },
+ { .con_id = "audio1_2x", .dt_id = TEGRA124_CLK_AUDIO1_2X },
+ { .con_id = "audio2_2x", .dt_id = TEGRA124_CLK_AUDIO2_2X },
+ { .con_id = "audio3_2x", .dt_id = TEGRA124_CLK_AUDIO3_2X },
+ { .con_id = "audio4_2x", .dt_id = TEGRA124_CLK_AUDIO4_2X },
+ { .con_id = "spdif_2x", .dt_id = TEGRA124_CLK_SPDIF_2X },
+ { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA124_CLK_EXTERN1 },
+ { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA124_CLK_EXTERN2 },
+ { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA124_CLK_EXTERN3 },
+ { .con_id = "blink", .dt_id = TEGRA124_CLK_BLINK },
+ { .con_id = "cclk_g", .dt_id = TEGRA124_CLK_CCLK_G },
+ { .con_id = "cclk_lp", .dt_id = TEGRA124_CLK_CCLK_LP },
+ { .con_id = "sclk", .dt_id = TEGRA124_CLK_SCLK },
+ { .con_id = "hclk", .dt_id = TEGRA124_CLK_HCLK },
+ { .con_id = "pclk", .dt_id = TEGRA124_CLK_PCLK },
+ { .con_id = "fuse", .dt_id = TEGRA124_CLK_FUSE },
+ { .dev_id = "rtc-tegra", .dt_id = TEGRA124_CLK_RTC },
+ { .dev_id = "timer", .dt_id = TEGRA124_CLK_TIMER },
+};
+
+static struct clk **clks;
+
+static void tegra124_utmi_param_configure(void __iomem *clk_base)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
+ if (osc_freq == utmi_parameters[i].osc_frequency)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(utmi_parameters)) {
+ pr_err("%s: Unexpected oscillator freq %lu\n", __func__,
+ osc_freq);
+ return;
+ }
+
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL stable and active counts */
+ /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */
+ reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
+ reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count);
+
+ reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i].
+ active_delay_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
+
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL delay and oscillator frequency counts */
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
+ reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i].
+ enable_delay_count);
+
+ reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
+ reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(utmi_parameters[i].
+ xtal_freq_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
+
+ /* Setup HW control of UTMIPLL */
+ reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET;
+ reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL;
+ reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE;
+ writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
+
+ udelay(1);
+
+ /* Setup SW override of UTMIPLL assuming USB2.0
+ ports are assigned to USB2 */
+ reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL;
+ reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE;
+ writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+ udelay(1);
+
+ /* Enable HW control UTMIPLL */
+ reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE;
+ writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
+}
+
+static __init void tegra124_periph_clk_init(void __iomem *clk_base,
+ void __iomem *pmc_base)
+{
+ struct clk *clk;
+ u32 val;
+
+ /* xusb_hs_src */
+ val = readl(clk_base + CLK_SOURCE_XUSB_SS_SRC);
+ val |= BIT(25); /* always select PLLU_60M */
+ writel(val, clk_base + CLK_SOURCE_XUSB_SS_SRC);
+
+ clk = clk_register_fixed_factor(NULL, "xusb_hs_src", "pll_u_60M", 0,
+ 1, 1);
+ clks[TEGRA124_CLK_XUSB_HS_SRC] = clk;
+
+ /* dsia mux */
+ clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
+ ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+ clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
+ clks[TEGRA124_CLK_DSIA_MUX] = clk;
+
+ /* dsib mux */
+ clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
+ ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+ clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
+ clks[TEGRA124_CLK_DSIB_MUX] = clk;
+
+ /* emc mux */
+ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
+ ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ clk_base + CLK_SOURCE_EMC,
+ 29, 3, 0, NULL);
+
+ /* cml0 */
+ clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
+ 0, 0, &pll_e_lock);
+ clk_register_clkdev(clk, "cml0", NULL);
+ clks[TEGRA124_CLK_CML0] = clk;
+
+ /* cml1 */
+ clk = clk_register_gate(NULL, "cml1", "pll_e", 0, clk_base + PLLE_AUX,
+ 1, 0, &pll_e_lock);
+ clk_register_clkdev(clk, "cml1", NULL);
+ clks[TEGRA124_CLK_CML1] = clk;
+
+ tegra_periph_clk_init(clk_base, pmc_base, tegra124_clks, &pll_p_params);
+}
+
+static void __init tegra124_pll_init(void __iomem *clk_base,
+ void __iomem *pmc)
+{
+ u32 val;
+ struct clk *clk;
+
+ /* PLLC */
+ clk = tegra_clk_register_pllxc("pll_c", "pll_ref", clk_base,
+ pmc, 0, &pll_c_params, NULL);
+ clk_register_clkdev(clk, "pll_c", NULL);
+ clks[TEGRA124_CLK_PLL_C] = clk;
+
+ /* PLLC_OUT1 */
+ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
+ clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
+ clk_base + PLLC_OUT, 1, 0,
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_c_out1", NULL);
+ clks[TEGRA124_CLK_PLL_C_OUT1] = clk;
+
+ /* PLLC2 */
+ clk = tegra_clk_register_pllc("pll_c2", "pll_ref", clk_base, pmc, 0,
+ &pll_c2_params, NULL);
+ clk_register_clkdev(clk, "pll_c2", NULL);
+ clks[TEGRA124_CLK_PLL_C2] = clk;
+
+ /* PLLC3 */
+ clk = tegra_clk_register_pllc("pll_c3", "pll_ref", clk_base, pmc, 0,
+ &pll_c3_params, NULL);
+ clk_register_clkdev(clk, "pll_c3", NULL);
+ clks[TEGRA124_CLK_PLL_C3] = clk;
+
+ /* PLLM */
+ clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ &pll_m_params, NULL);
+ clk_register_clkdev(clk, "pll_m", NULL);
+ clks[TEGRA124_CLK_PLL_M] = clk;
+
+ /* PLLM_OUT1 */
+ clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
+ clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+ clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_m_out1", NULL);
+ clks[TEGRA124_CLK_PLL_M_OUT1] = clk;
+
+ /* PLLM_UD */
+ clk = clk_register_fixed_factor(NULL, "pll_m_ud", "pll_m",
+ CLK_SET_RATE_PARENT, 1, 1);
+
+ /* PLLU */
+ val = readl(clk_base + pll_u_params.base_reg);
+ val &= ~BIT(24); /* disable PLLU_OVERRIDE */
+ writel(val, clk_base + pll_u_params.base_reg);
+
+ clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc, 0,
+ &pll_u_params, &pll_u_lock);
+ clk_register_clkdev(clk, "pll_u", NULL);
+ clks[TEGRA124_CLK_PLL_U] = clk;
+
+ tegra124_utmi_param_configure(clk_base);
+
+ /* PLLU_480M */
+ clk = clk_register_gate(NULL, "pll_u_480M", "pll_u",
+ CLK_SET_RATE_PARENT, clk_base + PLLU_BASE,
+ 22, 0, &pll_u_lock);
+ clk_register_clkdev(clk, "pll_u_480M", NULL);
+ clks[TEGRA124_CLK_PLL_U_480M] = clk;
+
+ /* PLLU_60M */
+ clk = clk_register_fixed_factor(NULL, "pll_u_60M", "pll_u",
+ CLK_SET_RATE_PARENT, 1, 8);
+ clk_register_clkdev(clk, "pll_u_60M", NULL);
+ clks[TEGRA124_CLK_PLL_U_60M] = clk;
+
+ /* PLLU_48M */
+ clk = clk_register_fixed_factor(NULL, "pll_u_48M", "pll_u",
+ CLK_SET_RATE_PARENT, 1, 10);
+ clk_register_clkdev(clk, "pll_u_48M", NULL);
+ clks[TEGRA124_CLK_PLL_U_48M] = clk;
+
+ /* PLLU_12M */
+ clk = clk_register_fixed_factor(NULL, "pll_u_12M", "pll_u",
+ CLK_SET_RATE_PARENT, 1, 40);
+ clk_register_clkdev(clk, "pll_u_12M", NULL);
+ clks[TEGRA124_CLK_PLL_U_12M] = clk;
+
+ /* PLLD */
+ clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc, 0,
+ &pll_d_params, &pll_d_lock);
+ clk_register_clkdev(clk, "pll_d", NULL);
+ clks[TEGRA124_CLK_PLL_D] = clk;
+
+ /* PLLD_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d_out0", NULL);
+ clks[TEGRA124_CLK_PLL_D_OUT0] = clk;
+
+ /* PLLRE */
+ clk = tegra_clk_register_pllre("pll_re_vco", "pll_ref", clk_base, pmc,
+ 0, &pll_re_vco_params, &pll_re_lock, pll_ref_freq);
+ clk_register_clkdev(clk, "pll_re_vco", NULL);
+ clks[TEGRA124_CLK_PLL_RE_VCO] = clk;
+
+ clk = clk_register_divider_table(NULL, "pll_re_out", "pll_re_vco", 0,
+ clk_base + PLLRE_BASE, 16, 4, 0,
+ pll_re_div_table, &pll_re_lock);
+ clk_register_clkdev(clk, "pll_re_out", NULL);
+ clks[TEGRA124_CLK_PLL_RE_OUT] = clk;
+
+ /* PLLE */
+ clk = tegra_clk_register_plle_tegra114("pll_e", "pll_ref",
+ clk_base, 0, &pll_e_params, NULL);
+ clk_register_clkdev(clk, "pll_e", NULL);
+ clks[TEGRA124_CLK_PLL_E] = clk;
+
+ /* PLLC4 */
+ clk = tegra_clk_register_pllss("pll_c4", "pll_ref", clk_base, 0,
+ &pll_c4_params, NULL);
+ clk_register_clkdev(clk, "pll_c4", NULL);
+ clks[TEGRA124_CLK_PLL_C4] = clk;
+
+ /* PLLDP */
+ clk = tegra_clk_register_pllss("pll_dp", "pll_ref", clk_base, 0,
+ &pll_dp_params, NULL);
+ clk_register_clkdev(clk, "pll_dp", NULL);
+ clks[TEGRA124_CLK_PLL_DP] = clk;
+
+ /* PLLD2 */
+ clk = tegra_clk_register_pllss("pll_d2", "pll_ref", clk_base, 0,
+ &tegra124_pll_d2_params, NULL);
+ clk_register_clkdev(clk, "pll_d2", NULL);
+ clks[TEGRA124_CLK_PLL_D2] = clk;
+
+ /* PLLD2_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
+ CLK_SET_RATE_PARENT, 1, 1);
+ clk_register_clkdev(clk, "pll_d2_out0", NULL);
+ clks[TEGRA124_CLK_PLL_D2_OUT0] = clk;
+
+}
+
+/* Tegra124 CPU clock and reset control functions */
+static void tegra124_wait_cpu_in_reset(u32 cpu)
+{
+ unsigned int reg;
+
+ do {
+ reg = readl(clk_base + CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
+ cpu_relax();
+ } while (!(reg & (1 << cpu))); /* check CPU been reset or not */
+}
+
+static void tegra124_disable_cpu_clock(u32 cpu)
+{
+ /* flow controller would take care in the power sequence. */
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void tegra124_cpu_clock_suspend(void)
+{
+ /* switch coresite to clk_m, save off original source */
+ tegra124_cpu_clk_sctx.clk_csite_src =
+ readl(clk_base + CLK_SOURCE_CSITE);
+ writel(3 << 30, clk_base + CLK_SOURCE_CSITE);
+}
+
+static void tegra124_cpu_clock_resume(void)
+{
+ writel(tegra124_cpu_clk_sctx.clk_csite_src,
+ clk_base + CLK_SOURCE_CSITE);
+}
+#endif
+
+static struct tegra_cpu_car_ops tegra124_cpu_car_ops = {
+ .wait_for_reset = tegra124_wait_cpu_in_reset,
+ .disable_clock = tegra124_disable_cpu_clock,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = tegra124_cpu_clock_suspend,
+ .resume = tegra124_cpu_clock_resume,
+#endif
+};
+
+static const struct of_device_id pmc_match[] __initconst = {
+ { .compatible = "nvidia,tegra124-pmc" },
+ {},
+};
+
+static struct tegra_clk_init_table init_table[] __initdata = {
+ {TEGRA124_CLK_UARTA, TEGRA124_CLK_PLL_P, 408000000, 0},
+ {TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0},
+ {TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0},
+ {TEGRA124_CLK_UARTD, TEGRA124_CLK_PLL_P, 408000000, 0},
+ {TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 564480000, 1},
+ {TEGRA124_CLK_PLL_A_OUT0, TEGRA124_CLK_CLK_MAX, 11289600, 1},
+ {TEGRA124_CLK_EXTERN1, TEGRA124_CLK_PLL_A_OUT0, 0, 1},
+ {TEGRA124_CLK_CLK_OUT_1_MUX, TEGRA124_CLK_EXTERN1, 0, 1},
+ {TEGRA124_CLK_CLK_OUT_1, TEGRA124_CLK_CLK_MAX, 0, 1},
+ {TEGRA124_CLK_I2S0, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA124_CLK_I2S1, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA124_CLK_I2S2, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA124_CLK_I2S3, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0},
+ {TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1},
+ {TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1},
+ {TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1},
+ {TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1},
+ {TEGRA124_CLK_PLL_C, TEGRA124_CLK_CLK_MAX, 768000000, 0},
+ {TEGRA124_CLK_PLL_C_OUT1, TEGRA124_CLK_CLK_MAX, 100000000, 0},
+ {TEGRA124_CLK_SBC4, TEGRA124_CLK_PLL_P, 12000000, 1},
+ {TEGRA124_CLK_TSEC, TEGRA124_CLK_PLL_C3, 0, 0},
+ {TEGRA124_CLK_MSENC, TEGRA124_CLK_PLL_C3, 0, 0},
+ /* This MUST be the last entry. */
+ {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
+};
+
+static void __init tegra124_clock_apply_init_table(void)
+{
+ tegra_init_from_table(init_table, clks, TEGRA124_CLK_CLK_MAX);
+}
+
+static void __init tegra124_clock_init(struct device_node *np)
+{
+ struct device_node *node;
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("ioremap tegra124 CAR failed\n");
+ return;
+ }
+
+ node = of_find_matching_node(NULL, pmc_match);
+ if (!node) {
+ pr_err("Failed to find pmc node\n");
+ WARN_ON(1);
+ return;
+ }
+
+ pmc_base = of_iomap(node, 0);
+ if (!pmc_base) {
+ pr_err("Can't map pmc registers\n");
+ WARN_ON(1);
+ return;
+ }
+
+ clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX, 6);
+ if (!clks)
+ return;
+
+ if (tegra_osc_clk_init(clk_base, tegra124_clks, tegra124_input_freq,
+ ARRAY_SIZE(tegra124_input_freq), &osc_freq, &pll_ref_freq) < 0)
+ return;
+
+ tegra_fixed_clk_init(tegra124_clks);
+ tegra124_pll_init(clk_base, pmc_base);
+ tegra124_periph_clk_init(clk_base, pmc_base);
+ tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params);
+ tegra_pmc_clk_init(pmc_base, tegra124_clks);
+
+ tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks,
+ &pll_x_params);
+ tegra_add_of_provider(np);
+ tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
+
+ tegra_clk_apply_init_table = tegra124_clock_apply_init_table;
+
+ tegra_cpu_car_ops = &tegra124_cpu_car_ops;
+}
+CLK_OF_DECLARE(tegra124, "nvidia,tegra124-car", tegra124_clock_init);
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 056f649d0d89..dace2b1b5ae6 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -22,30 +22,10 @@
#include <linux/of_address.h>
#include <linux/clk/tegra.h>
#include <linux/delay.h>
+#include <dt-bindings/clock/tegra20-car.h>
#include "clk.h"
-
-#define RST_DEVICES_L 0x004
-#define RST_DEVICES_H 0x008
-#define RST_DEVICES_U 0x00c
-#define RST_DEVICES_SET_L 0x300
-#define RST_DEVICES_CLR_L 0x304
-#define RST_DEVICES_SET_H 0x308
-#define RST_DEVICES_CLR_H 0x30c
-#define RST_DEVICES_SET_U 0x310
-#define RST_DEVICES_CLR_U 0x314
-#define RST_DEVICES_NUM 3
-
-#define CLK_OUT_ENB_L 0x010
-#define CLK_OUT_ENB_H 0x014
-#define CLK_OUT_ENB_U 0x018
-#define CLK_OUT_ENB_SET_L 0x320
-#define CLK_OUT_ENB_CLR_L 0x324
-#define CLK_OUT_ENB_SET_H 0x328
-#define CLK_OUT_ENB_CLR_H 0x32c
-#define CLK_OUT_ENB_SET_U 0x330
-#define CLK_OUT_ENB_CLR_U 0x334
-#define CLK_OUT_ENB_NUM 3
+#include "clk-id.h"
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
@@ -67,6 +47,8 @@
#define OSC_FREQ_DET_BUSY (1<<31)
#define OSC_FREQ_DET_CNT_MASK 0xFFFF
+#define TEGRA20_CLK_PERIPH_BANKS 3
+
#define PLLS_BASE 0xf0
#define PLLS_MISC 0xf4
#define PLLC_BASE 0x80
@@ -114,34 +96,15 @@
#define CLK_SOURCE_I2S1 0x100
#define CLK_SOURCE_I2S2 0x104
-#define CLK_SOURCE_SPDIF_OUT 0x108
-#define CLK_SOURCE_SPDIF_IN 0x10c
#define CLK_SOURCE_PWM 0x110
#define CLK_SOURCE_SPI 0x114
-#define CLK_SOURCE_SBC1 0x134
-#define CLK_SOURCE_SBC2 0x118
-#define CLK_SOURCE_SBC3 0x11c
-#define CLK_SOURCE_SBC4 0x1b4
#define CLK_SOURCE_XIO 0x120
#define CLK_SOURCE_TWC 0x12c
#define CLK_SOURCE_IDE 0x144
-#define CLK_SOURCE_NDFLASH 0x160
-#define CLK_SOURCE_VFIR 0x168
-#define CLK_SOURCE_SDMMC1 0x150
-#define CLK_SOURCE_SDMMC2 0x154
-#define CLK_SOURCE_SDMMC3 0x1bc
-#define CLK_SOURCE_SDMMC4 0x164
-#define CLK_SOURCE_CVE 0x140
-#define CLK_SOURCE_TVO 0x188
-#define CLK_SOURCE_TVDAC 0x194
#define CLK_SOURCE_HDMI 0x18c
#define CLK_SOURCE_DISP1 0x138
#define CLK_SOURCE_DISP2 0x13c
#define CLK_SOURCE_CSITE 0x1d4
-#define CLK_SOURCE_LA 0x1f8
-#define CLK_SOURCE_OWR 0x1cc
-#define CLK_SOURCE_NOR 0x1d0
-#define CLK_SOURCE_MIPI 0x174
#define CLK_SOURCE_I2C1 0x124
#define CLK_SOURCE_I2C2 0x198
#define CLK_SOURCE_I2C3 0x1b8
@@ -151,24 +114,10 @@
#define CLK_SOURCE_UARTC 0x1a0
#define CLK_SOURCE_UARTD 0x1c0
#define CLK_SOURCE_UARTE 0x1c4
-#define CLK_SOURCE_3D 0x158
-#define CLK_SOURCE_2D 0x15c
-#define CLK_SOURCE_MPE 0x170
-#define CLK_SOURCE_EPP 0x16c
-#define CLK_SOURCE_HOST1X 0x180
-#define CLK_SOURCE_VDE 0x1c8
-#define CLK_SOURCE_VI 0x148
-#define CLK_SOURCE_VI_SENSOR 0x1a8
#define CLK_SOURCE_EMC 0x19c
#define AUDIO_SYNC_CLK 0x38
-#define PMC_CTRL 0x0
-#define PMC_CTRL_BLINK_ENB 7
-#define PMC_DPD_PADS_ORIDE 0x1c
-#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
-#define PMC_BLINK_TIMER 0x40
-
/* Tegra CPU clock and reset control regs */
#define TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4c
#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET 0x340
@@ -188,64 +137,32 @@ static struct cpu_clk_suspend_context {
} tegra20_cpu_clk_sctx;
#endif
-static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
-
static void __iomem *clk_base;
static void __iomem *pmc_base;
-static DEFINE_SPINLOCK(pll_div_lock);
-static DEFINE_SPINLOCK(sysrate_lock);
-
-#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, \
- _regs, _clk_num, periph_clk_enb_refcnt, \
+ _clk_num, \
_gate_flags, _clk_id)
-#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id)
-
-#define TEGRA_INIT_DATA_DIV16(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+#define TEGRA_INIT_DATA_DIV16(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, \
+ _clk_num, _gate_flags, \
_clk_id)
-#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
- _mux_shift, _mux_width, _clk_num, _regs, \
+#define TEGRA_INIT_DATA_NODIV(_name, _parents, _offset, \
+ _mux_shift, _mux_width, _clk_num, \
_gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- _mux_shift, _mux_width, 0, 0, 0, 0, 0, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ _mux_shift, _mux_width, 0, 0, 0, 0, 0, \
+ _clk_num, _gate_flags, \
_clk_id)
-/* IDs assigned here must be in sync with DT bindings definition
- * for Tegra20 clocks .
- */
-enum tegra20_clk {
- cpu, ac97 = 3, rtc, timer, uarta, gpio = 8, sdmmc2, i2s1 = 11, i2c1,
- ndflash, sdmmc1, sdmmc4, twc, pwm, i2s2, epp, gr2d = 21, usbd, isp,
- gr3d, ide, disp2, disp1, host1x, vcp, cache2 = 31, mem, ahbdma, apbdma,
- kbc = 36, stat_mon, pmc, fuse, kfuse, sbc1, nor, spi, sbc2, xio, sbc3,
- dvc, dsi, mipi = 50, hdmi, csi, tvdac, i2c2, uartc, emc = 57, usb2,
- usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
- pex, owr, afi, csite, pcie_xclk, avpucq = 75, la, irama = 84, iramb,
- iramc, iramd, cram2, audio_2x, clk_d, csus = 92, cdev2, cdev1,
- uartb = 96, vfir, spdif_in, spdif_out, vi, vi_sensor, tvo, cve,
- osc, clk_32k, clk_m, sclk, cclk, hclk, pclk, blink, pll_a, pll_a_out0,
- pll_c, pll_c_out1, pll_d, pll_d_out0, pll_e, pll_m, pll_m_out1,
- pll_p, pll_p_out1, pll_p_out2, pll_p_out3, pll_p_out4, pll_s, pll_u,
- pll_x, cop, audio, pll_ref, twd, clk_max,
-};
-
-static struct clk *clks[clk_max];
-static struct clk_onecell_data clk_data;
+static struct clk **clks;
static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
{ 12000000, 600000000, 600, 12, 0, 8 },
@@ -383,6 +300,8 @@ static struct tegra_clk_pll_params pll_c_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_c_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON,
};
static struct tegra_clk_pll_params pll_m_params = {
@@ -397,6 +316,8 @@ static struct tegra_clk_pll_params pll_m_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_m_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON,
};
static struct tegra_clk_pll_params pll_p_params = {
@@ -411,6 +332,9 @@ static struct tegra_clk_pll_params pll_p_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_p_freq_table,
+ .flags = TEGRA_PLL_FIXED | TEGRA_PLL_HAS_CPCON,
+ .fixed_rate = 216000000,
};
static struct tegra_clk_pll_params pll_a_params = {
@@ -425,6 +349,8 @@ static struct tegra_clk_pll_params pll_a_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_a_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON,
};
static struct tegra_clk_pll_params pll_d_params = {
@@ -439,6 +365,8 @@ static struct tegra_clk_pll_params pll_d_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON,
};
static struct pdiv_map pllu_p[] = {
@@ -460,6 +388,8 @@ static struct tegra_clk_pll_params pll_u_params = {
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
.pdiv_tohw = pllu_p,
+ .freq_table = pll_u_freq_table,
+ .flags = TEGRA_PLLU | TEGRA_PLL_HAS_CPCON,
};
static struct tegra_clk_pll_params pll_x_params = {
@@ -474,6 +404,8 @@ static struct tegra_clk_pll_params pll_x_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_x_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON,
};
static struct tegra_clk_pll_params pll_e_params = {
@@ -488,34 +420,162 @@ static struct tegra_clk_pll_params pll_e_params = {
.lock_mask = PLLE_MISC_LOCK,
.lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
.lock_delay = 0,
+ .freq_table = pll_e_freq_table,
+ .flags = TEGRA_PLL_FIXED,
+ .fixed_rate = 100000000,
};
-/* Peripheral clock registers */
-static struct tegra_clk_periph_regs periph_l_regs = {
- .enb_reg = CLK_OUT_ENB_L,
- .enb_set_reg = CLK_OUT_ENB_SET_L,
- .enb_clr_reg = CLK_OUT_ENB_CLR_L,
- .rst_reg = RST_DEVICES_L,
- .rst_set_reg = RST_DEVICES_SET_L,
- .rst_clr_reg = RST_DEVICES_CLR_L,
-};
-
-static struct tegra_clk_periph_regs periph_h_regs = {
- .enb_reg = CLK_OUT_ENB_H,
- .enb_set_reg = CLK_OUT_ENB_SET_H,
- .enb_clr_reg = CLK_OUT_ENB_CLR_H,
- .rst_reg = RST_DEVICES_H,
- .rst_set_reg = RST_DEVICES_SET_H,
- .rst_clr_reg = RST_DEVICES_CLR_H,
+static struct tegra_devclk devclks[] __initdata = {
+ { .con_id = "pll_c", .dt_id = TEGRA20_CLK_PLL_C },
+ { .con_id = "pll_c_out1", .dt_id = TEGRA20_CLK_PLL_C_OUT1 },
+ { .con_id = "pll_p", .dt_id = TEGRA20_CLK_PLL_P },
+ { .con_id = "pll_p_out1", .dt_id = TEGRA20_CLK_PLL_P_OUT1 },
+ { .con_id = "pll_p_out2", .dt_id = TEGRA20_CLK_PLL_P_OUT2 },
+ { .con_id = "pll_p_out3", .dt_id = TEGRA20_CLK_PLL_P_OUT3 },
+ { .con_id = "pll_p_out4", .dt_id = TEGRA20_CLK_PLL_P_OUT4 },
+ { .con_id = "pll_m", .dt_id = TEGRA20_CLK_PLL_M },
+ { .con_id = "pll_m_out1", .dt_id = TEGRA20_CLK_PLL_M_OUT1 },
+ { .con_id = "pll_x", .dt_id = TEGRA20_CLK_PLL_X },
+ { .con_id = "pll_u", .dt_id = TEGRA20_CLK_PLL_U },
+ { .con_id = "pll_d", .dt_id = TEGRA20_CLK_PLL_D },
+ { .con_id = "pll_d_out0", .dt_id = TEGRA20_CLK_PLL_D_OUT0 },
+ { .con_id = "pll_a", .dt_id = TEGRA20_CLK_PLL_A },
+ { .con_id = "pll_a_out0", .dt_id = TEGRA20_CLK_PLL_A_OUT0 },
+ { .con_id = "pll_e", .dt_id = TEGRA20_CLK_PLL_E },
+ { .con_id = "cclk", .dt_id = TEGRA20_CLK_CCLK },
+ { .con_id = "sclk", .dt_id = TEGRA20_CLK_SCLK },
+ { .con_id = "hclk", .dt_id = TEGRA20_CLK_HCLK },
+ { .con_id = "pclk", .dt_id = TEGRA20_CLK_PCLK },
+ { .con_id = "fuse", .dt_id = TEGRA20_CLK_FUSE },
+ { .con_id = "twd", .dt_id = TEGRA20_CLK_TWD },
+ { .con_id = "audio", .dt_id = TEGRA20_CLK_AUDIO },
+ { .con_id = "audio_2x", .dt_id = TEGRA20_CLK_AUDIO_2X },
+ { .dev_id = "tegra20-ac97", .dt_id = TEGRA20_CLK_AC97 },
+ { .dev_id = "tegra-apbdma", .dt_id = TEGRA20_CLK_APBDMA },
+ { .dev_id = "rtc-tegra", .dt_id = TEGRA20_CLK_RTC },
+ { .dev_id = "timer", .dt_id = TEGRA20_CLK_TIMER },
+ { .dev_id = "tegra-kbc", .dt_id = TEGRA20_CLK_KBC },
+ { .con_id = "csus", .dev_id = "tegra_camera", .dt_id = TEGRA20_CLK_CSUS },
+ { .con_id = "vcp", .dev_id = "tegra-avp", .dt_id = TEGRA20_CLK_VCP },
+ { .con_id = "bsea", .dev_id = "tegra-avp", .dt_id = TEGRA20_CLK_BSEA },
+ { .con_id = "bsev", .dev_id = "tegra-aes", .dt_id = TEGRA20_CLK_BSEV },
+ { .con_id = "emc", .dt_id = TEGRA20_CLK_EMC },
+ { .dev_id = "fsl-tegra-udc", .dt_id = TEGRA20_CLK_USBD },
+ { .dev_id = "tegra-ehci.1", .dt_id = TEGRA20_CLK_USB2 },
+ { .dev_id = "tegra-ehci.2", .dt_id = TEGRA20_CLK_USB3 },
+ { .dev_id = "dsi", .dt_id = TEGRA20_CLK_DSI },
+ { .con_id = "csi", .dev_id = "tegra_camera", .dt_id = TEGRA20_CLK_CSI },
+ { .con_id = "isp", .dev_id = "tegra_camera", .dt_id = TEGRA20_CLK_ISP },
+ { .con_id = "pex", .dt_id = TEGRA20_CLK_PEX },
+ { .con_id = "afi", .dt_id = TEGRA20_CLK_AFI },
+ { .con_id = "cdev1", .dt_id = TEGRA20_CLK_CDEV1 },
+ { .con_id = "cdev2", .dt_id = TEGRA20_CLK_CDEV2 },
+ { .con_id = "clk_32k", .dt_id = TEGRA20_CLK_CLK_32K },
+ { .con_id = "blink", .dt_id = TEGRA20_CLK_BLINK },
+ { .con_id = "clk_m", .dt_id = TEGRA20_CLK_CLK_M },
+ { .con_id = "pll_ref", .dt_id = TEGRA20_CLK_PLL_REF },
+ { .dev_id = "tegra20-i2s.0", .dt_id = TEGRA20_CLK_I2S1 },
+ { .dev_id = "tegra20-i2s.1", .dt_id = TEGRA20_CLK_I2S2 },
+ { .con_id = "spdif_out", .dev_id = "tegra20-spdif", .dt_id = TEGRA20_CLK_SPDIF_OUT },
+ { .con_id = "spdif_in", .dev_id = "tegra20-spdif", .dt_id = TEGRA20_CLK_SPDIF_IN },
+ { .dev_id = "spi_tegra.0", .dt_id = TEGRA20_CLK_SBC1 },
+ { .dev_id = "spi_tegra.1", .dt_id = TEGRA20_CLK_SBC2 },
+ { .dev_id = "spi_tegra.2", .dt_id = TEGRA20_CLK_SBC3 },
+ { .dev_id = "spi_tegra.3", .dt_id = TEGRA20_CLK_SBC4 },
+ { .dev_id = "spi", .dt_id = TEGRA20_CLK_SPI },
+ { .dev_id = "xio", .dt_id = TEGRA20_CLK_XIO },
+ { .dev_id = "twc", .dt_id = TEGRA20_CLK_TWC },
+ { .dev_id = "ide", .dt_id = TEGRA20_CLK_IDE },
+ { .dev_id = "tegra_nand", .dt_id = TEGRA20_CLK_NDFLASH },
+ { .dev_id = "vfir", .dt_id = TEGRA20_CLK_VFIR },
+ { .dev_id = "csite", .dt_id = TEGRA20_CLK_CSITE },
+ { .dev_id = "la", .dt_id = TEGRA20_CLK_LA },
+ { .dev_id = "tegra_w1", .dt_id = TEGRA20_CLK_OWR },
+ { .dev_id = "mipi", .dt_id = TEGRA20_CLK_MIPI },
+ { .dev_id = "vde", .dt_id = TEGRA20_CLK_VDE },
+ { .con_id = "vi", .dev_id = "tegra_camera", .dt_id = TEGRA20_CLK_VI },
+ { .dev_id = "epp", .dt_id = TEGRA20_CLK_EPP },
+ { .dev_id = "mpe", .dt_id = TEGRA20_CLK_MPE },
+ { .dev_id = "host1x", .dt_id = TEGRA20_CLK_HOST1X },
+ { .dev_id = "3d", .dt_id = TEGRA20_CLK_GR3D },
+ { .dev_id = "2d", .dt_id = TEGRA20_CLK_GR2D },
+ { .dev_id = "tegra-nor", .dt_id = TEGRA20_CLK_NOR },
+ { .dev_id = "sdhci-tegra.0", .dt_id = TEGRA20_CLK_SDMMC1 },
+ { .dev_id = "sdhci-tegra.1", .dt_id = TEGRA20_CLK_SDMMC2 },
+ { .dev_id = "sdhci-tegra.2", .dt_id = TEGRA20_CLK_SDMMC3 },
+ { .dev_id = "sdhci-tegra.3", .dt_id = TEGRA20_CLK_SDMMC4 },
+ { .dev_id = "cve", .dt_id = TEGRA20_CLK_CVE },
+ { .dev_id = "tvo", .dt_id = TEGRA20_CLK_TVO },
+ { .dev_id = "tvdac", .dt_id = TEGRA20_CLK_TVDAC },
+ { .con_id = "vi_sensor", .dev_id = "tegra_camera", .dt_id = TEGRA20_CLK_VI_SENSOR },
+ { .dev_id = "hdmi", .dt_id = TEGRA20_CLK_HDMI },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.0", .dt_id = TEGRA20_CLK_I2C1 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.1", .dt_id = TEGRA20_CLK_I2C2 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.2", .dt_id = TEGRA20_CLK_I2C3 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.3", .dt_id = TEGRA20_CLK_DVC },
+ { .dev_id = "tegra-pwm", .dt_id = TEGRA20_CLK_PWM },
+ { .dev_id = "tegra_uart.0", .dt_id = TEGRA20_CLK_UARTA },
+ { .dev_id = "tegra_uart.1", .dt_id = TEGRA20_CLK_UARTB },
+ { .dev_id = "tegra_uart.2", .dt_id = TEGRA20_CLK_UARTC },
+ { .dev_id = "tegra_uart.3", .dt_id = TEGRA20_CLK_UARTD },
+ { .dev_id = "tegra_uart.4", .dt_id = TEGRA20_CLK_UARTE },
+ { .dev_id = "tegradc.0", .dt_id = TEGRA20_CLK_DISP1 },
+ { .dev_id = "tegradc.1", .dt_id = TEGRA20_CLK_DISP2 },
};
-static struct tegra_clk_periph_regs periph_u_regs = {
- .enb_reg = CLK_OUT_ENB_U,
- .enb_set_reg = CLK_OUT_ENB_SET_U,
- .enb_clr_reg = CLK_OUT_ENB_CLR_U,
- .rst_reg = RST_DEVICES_U,
- .rst_set_reg = RST_DEVICES_SET_U,
- .rst_clr_reg = RST_DEVICES_CLR_U,
+static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
+ [tegra_clk_spdif_out] = { .dt_id = TEGRA20_CLK_SPDIF_OUT, .present = true },
+ [tegra_clk_spdif_in] = { .dt_id = TEGRA20_CLK_SPDIF_IN, .present = true },
+ [tegra_clk_sdmmc1] = { .dt_id = TEGRA20_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc2] = { .dt_id = TEGRA20_CLK_SDMMC2, .present = true },
+ [tegra_clk_sdmmc3] = { .dt_id = TEGRA20_CLK_SDMMC3, .present = true },
+ [tegra_clk_sdmmc4] = { .dt_id = TEGRA20_CLK_SDMMC4, .present = true },
+ [tegra_clk_la] = { .dt_id = TEGRA20_CLK_LA, .present = true },
+ [tegra_clk_csite] = { .dt_id = TEGRA20_CLK_CSITE, .present = true },
+ [tegra_clk_vfir] = { .dt_id = TEGRA20_CLK_VFIR, .present = true },
+ [tegra_clk_mipi] = { .dt_id = TEGRA20_CLK_MIPI, .present = true },
+ [tegra_clk_nor] = { .dt_id = TEGRA20_CLK_NOR, .present = true },
+ [tegra_clk_rtc] = { .dt_id = TEGRA20_CLK_RTC, .present = true },
+ [tegra_clk_timer] = { .dt_id = TEGRA20_CLK_TIMER, .present = true },
+ [tegra_clk_kbc] = { .dt_id = TEGRA20_CLK_KBC, .present = true },
+ [tegra_clk_csus] = { .dt_id = TEGRA20_CLK_CSUS, .present = true },
+ [tegra_clk_vcp] = { .dt_id = TEGRA20_CLK_VCP, .present = true },
+ [tegra_clk_bsea] = { .dt_id = TEGRA20_CLK_BSEA, .present = true },
+ [tegra_clk_bsev] = { .dt_id = TEGRA20_CLK_BSEV, .present = true },
+ [tegra_clk_usbd] = { .dt_id = TEGRA20_CLK_USBD, .present = true },
+ [tegra_clk_usb2] = { .dt_id = TEGRA20_CLK_USB2, .present = true },
+ [tegra_clk_usb3] = { .dt_id = TEGRA20_CLK_USB3, .present = true },
+ [tegra_clk_csi] = { .dt_id = TEGRA20_CLK_CSI, .present = true },
+ [tegra_clk_isp] = { .dt_id = TEGRA20_CLK_ISP, .present = true },
+ [tegra_clk_clk_32k] = { .dt_id = TEGRA20_CLK_CLK_32K, .present = true },
+ [tegra_clk_blink] = { .dt_id = TEGRA20_CLK_BLINK, .present = true },
+ [tegra_clk_hclk] = { .dt_id = TEGRA20_CLK_HCLK, .present = true },
+ [tegra_clk_pclk] = { .dt_id = TEGRA20_CLK_PCLK, .present = true },
+ [tegra_clk_pll_p_out1] = { .dt_id = TEGRA20_CLK_PLL_P_OUT1, .present = true },
+ [tegra_clk_pll_p_out2] = { .dt_id = TEGRA20_CLK_PLL_P_OUT2, .present = true },
+ [tegra_clk_pll_p_out3] = { .dt_id = TEGRA20_CLK_PLL_P_OUT3, .present = true },
+ [tegra_clk_pll_p_out4] = { .dt_id = TEGRA20_CLK_PLL_P_OUT4, .present = true },
+ [tegra_clk_pll_p] = { .dt_id = TEGRA20_CLK_PLL_P, .present = true },
+ [tegra_clk_owr] = { .dt_id = TEGRA20_CLK_OWR, .present = true },
+ [tegra_clk_sbc1] = { .dt_id = TEGRA20_CLK_SBC1, .present = true },
+ [tegra_clk_sbc2] = { .dt_id = TEGRA20_CLK_SBC2, .present = true },
+ [tegra_clk_sbc3] = { .dt_id = TEGRA20_CLK_SBC3, .present = true },
+ [tegra_clk_sbc4] = { .dt_id = TEGRA20_CLK_SBC4, .present = true },
+ [tegra_clk_vde] = { .dt_id = TEGRA20_CLK_VDE, .present = true },
+ [tegra_clk_vi] = { .dt_id = TEGRA20_CLK_VI, .present = true },
+ [tegra_clk_epp] = { .dt_id = TEGRA20_CLK_EPP, .present = true },
+ [tegra_clk_mpe] = { .dt_id = TEGRA20_CLK_MPE, .present = true },
+ [tegra_clk_host1x] = { .dt_id = TEGRA20_CLK_HOST1X, .present = true },
+ [tegra_clk_gr2d] = { .dt_id = TEGRA20_CLK_GR2D, .present = true },
+ [tegra_clk_gr3d] = { .dt_id = TEGRA20_CLK_GR3D, .present = true },
+ [tegra_clk_ndflash] = { .dt_id = TEGRA20_CLK_NDFLASH, .present = true },
+ [tegra_clk_cve] = { .dt_id = TEGRA20_CLK_CVE, .present = true },
+ [tegra_clk_tvo] = { .dt_id = TEGRA20_CLK_TVO, .present = true },
+ [tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true },
+ [tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true },
+ [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
+ [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
+ [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
};
static unsigned long tegra20_clk_measure_input_freq(void)
@@ -577,10 +637,8 @@ static void tegra20_pll_init(void)
/* PLLC */
clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, NULL, 0,
- 0, &pll_c_params, TEGRA_PLL_HAS_CPCON,
- pll_c_freq_table, NULL);
- clk_register_clkdev(clk, "pll_c", NULL);
- clks[pll_c] = clk;
+ &pll_c_params, NULL);
+ clks[TEGRA20_CLK_PLL_C] = clk;
/* PLLC_OUT1 */
clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
@@ -589,71 +647,13 @@ static void tegra20_pll_init(void)
clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
clk_base + PLLC_OUT, 1, 0, CLK_SET_RATE_PARENT,
0, NULL);
- clk_register_clkdev(clk, "pll_c_out1", NULL);
- clks[pll_c_out1] = clk;
-
- /* PLLP */
- clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, NULL, 0,
- 216000000, &pll_p_params, TEGRA_PLL_FIXED |
- TEGRA_PLL_HAS_CPCON, pll_p_freq_table, NULL);
- clk_register_clkdev(clk, "pll_p", NULL);
- clks[pll_p] = clk;
-
- /* PLLP_OUT1 */
- clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
- clk_base + PLLP_OUTA, 0,
- TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
- 8, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
- clk_base + PLLP_OUTA, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out1", NULL);
- clks[pll_p_out1] = clk;
-
- /* PLLP_OUT2 */
- clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
- clk_base + PLLP_OUTA, 0,
- TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
- 24, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
- clk_base + PLLP_OUTA, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out2", NULL);
- clks[pll_p_out2] = clk;
-
- /* PLLP_OUT3 */
- clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
- clk_base + PLLP_OUTB, 0,
- TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
- 8, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
- clk_base + PLLP_OUTB, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out3", NULL);
- clks[pll_p_out3] = clk;
-
- /* PLLP_OUT4 */
- clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
- clk_base + PLLP_OUTB, 0,
- TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
- 24, 8, 1, &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
- clk_base + PLLP_OUTB, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out4", NULL);
- clks[pll_p_out4] = clk;
+ clks[TEGRA20_CLK_PLL_C_OUT1] = clk;
/* PLLM */
clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, NULL,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
- &pll_m_params, TEGRA_PLL_HAS_CPCON,
- pll_m_freq_table, NULL);
- clk_register_clkdev(clk, "pll_m", NULL);
- clks[pll_m] = clk;
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ &pll_m_params, NULL);
+ clks[TEGRA20_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
@@ -662,42 +662,32 @@ static void tegra20_pll_init(void)
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_m_out1", NULL);
- clks[pll_m_out1] = clk;
+ clks[TEGRA20_CLK_PLL_M_OUT1] = clk;
/* PLLX */
clk = tegra_clk_register_pll("pll_x", "pll_ref", clk_base, NULL, 0,
- 0, &pll_x_params, TEGRA_PLL_HAS_CPCON,
- pll_x_freq_table, NULL);
- clk_register_clkdev(clk, "pll_x", NULL);
- clks[pll_x] = clk;
+ &pll_x_params, NULL);
+ clks[TEGRA20_CLK_PLL_X] = clk;
/* PLLU */
clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, NULL, 0,
- 0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON,
- pll_u_freq_table, NULL);
- clk_register_clkdev(clk, "pll_u", NULL);
- clks[pll_u] = clk;
+ &pll_u_params, NULL);
+ clks[TEGRA20_CLK_PLL_U] = clk;
/* PLLD */
clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, NULL, 0,
- 0, &pll_d_params, TEGRA_PLL_HAS_CPCON,
- pll_d_freq_table, NULL);
- clk_register_clkdev(clk, "pll_d", NULL);
- clks[pll_d] = clk;
+ &pll_d_params, NULL);
+ clks[TEGRA20_CLK_PLL_D] = clk;
/* PLLD_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_d_out0", NULL);
- clks[pll_d_out0] = clk;
+ clks[TEGRA20_CLK_PLL_D_OUT0] = clk;
/* PLLA */
clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, NULL, 0,
- 0, &pll_a_params, TEGRA_PLL_HAS_CPCON,
- pll_a_freq_table, NULL);
- clk_register_clkdev(clk, "pll_a", NULL);
- clks[pll_a] = clk;
+ &pll_a_params, NULL);
+ clks[TEGRA20_CLK_PLL_A] = clk;
/* PLLA_OUT0 */
clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
@@ -706,15 +696,12 @@ static void tegra20_pll_init(void)
clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_a_out0", NULL);
- clks[pll_a_out0] = clk;
+ clks[TEGRA20_CLK_PLL_A_OUT0] = clk;
/* PLLE */
clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base,
- 0, 100000000, &pll_e_params,
- 0, pll_e_freq_table, NULL);
- clk_register_clkdev(clk, "pll_e", NULL);
- clks[pll_e] = clk;
+ 0, &pll_e_params, NULL);
+ clks[TEGRA20_CLK_PLL_E] = clk;
}
static const char *cclk_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
@@ -732,40 +719,17 @@ static void tegra20_super_clk_init(void)
clk = tegra_clk_register_super_mux("cclk", cclk_parents,
ARRAY_SIZE(cclk_parents), CLK_SET_RATE_PARENT,
clk_base + CCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "cclk", NULL);
- clks[cclk] = clk;
+ clks[TEGRA20_CLK_CCLK] = clk;
/* SCLK */
clk = tegra_clk_register_super_mux("sclk", sclk_parents,
ARRAY_SIZE(sclk_parents), CLK_SET_RATE_PARENT,
clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "sclk", NULL);
- clks[sclk] = clk;
-
- /* HCLK */
- clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
- clk_base + CLK_SYSTEM_RATE, 4, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT,
- clk_base + CLK_SYSTEM_RATE, 7,
- CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "hclk", NULL);
- clks[hclk] = clk;
-
- /* PCLK */
- clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
- clk_base + CLK_SYSTEM_RATE, 0, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT,
- clk_base + CLK_SYSTEM_RATE, 3,
- CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "pclk", NULL);
- clks[pclk] = clk;
+ clks[TEGRA20_CLK_SCLK] = clk;
/* twd */
clk = clk_register_fixed_factor(NULL, "twd", "cclk", 0, 1, 4);
- clk_register_clkdev(clk, "twd", NULL);
- clks[twd] = clk;
+ clks[TEGRA20_CLK_TWD] = clk;
}
static const char *audio_parents[] = {"spdif_in", "i2s1", "i2s2", "unused",
@@ -784,18 +748,16 @@ static void __init tegra20_audio_clk_init(void)
clk = clk_register_gate(NULL, "audio", "audio_mux", 0,
clk_base + AUDIO_SYNC_CLK, 4,
CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio", NULL);
- clks[audio] = clk;
+ clks[TEGRA20_CLK_AUDIO] = clk;
/* audio_2x */
clk = clk_register_fixed_factor(NULL, "audio_doubler", "audio",
CLK_SET_RATE_PARENT, 2, 1);
clk = tegra_clk_register_periph_gate("audio_2x", "audio_doubler",
TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 89, &periph_u_regs,
+ CLK_SET_RATE_PARENT, 89,
periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio_2x", NULL);
- clks[audio_2x] = clk;
+ clks[TEGRA20_CLK_AUDIO_2X] = clk;
}
@@ -803,68 +765,36 @@ static const char *i2s1_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
"clk_m"};
static const char *i2s2_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
"clk_m"};
-static const char *spdif_out_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
- "clk_m"};
-static const char *spdif_in_parents[] = {"pll_p", "pll_c", "pll_m"};
static const char *pwm_parents[] = {"pll_p", "pll_c", "audio", "clk_m",
"clk_32k"};
static const char *mux_pllpcm_clkm[] = {"pll_p", "pll_c", "pll_m", "clk_m"};
-static const char *mux_pllmcpa[] = {"pll_m", "pll_c", "pll_c", "pll_a"};
static const char *mux_pllpdc_clkm[] = {"pll_p", "pll_d_out0", "pll_c",
"clk_m"};
static const char *mux_pllmcp_clkm[] = {"pll_m", "pll_c", "pll_p", "clk_m"};
static struct tegra_periph_init_data tegra_periph_clk_list[] = {
- TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra20-i2s.0", i2s1_parents, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
- TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra20-i2s.1", i2s2_parents, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
- TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra20-spdif", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
- TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra20-spdif", spdif_in_parents, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
- TEGRA_INIT_DATA_MUX("sbc1", NULL, "spi_tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
- TEGRA_INIT_DATA_MUX("sbc2", NULL, "spi_tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
- TEGRA_INIT_DATA_MUX("sbc3", NULL, "spi_tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
- TEGRA_INIT_DATA_MUX("sbc4", NULL, "spi_tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
- TEGRA_INIT_DATA_MUX("spi", NULL, "spi", mux_pllpcm_clkm, CLK_SOURCE_SPI, 43, &periph_h_regs, TEGRA_PERIPH_ON_APB, spi),
- TEGRA_INIT_DATA_MUX("xio", NULL, "xio", mux_pllpcm_clkm, CLK_SOURCE_XIO, 45, &periph_h_regs, 0, xio),
- TEGRA_INIT_DATA_MUX("twc", NULL, "twc", mux_pllpcm_clkm, CLK_SOURCE_TWC, 16, &periph_l_regs, TEGRA_PERIPH_ON_APB, twc),
- TEGRA_INIT_DATA_MUX("ide", NULL, "ide", mux_pllpcm_clkm, CLK_SOURCE_XIO, 25, &periph_l_regs, 0, ide),
- TEGRA_INIT_DATA_MUX("ndflash", NULL, "tegra_nand", mux_pllpcm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_l_regs, 0, ndflash),
- TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllpcm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
- TEGRA_INIT_DATA_MUX("csite", NULL, "csite", mux_pllpcm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, 0, csite),
- TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllpcm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, 0, la),
- TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllpcm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
- TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllpcm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
- TEGRA_INIT_DATA_MUX("vde", NULL, "vde", mux_pllpcm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
- TEGRA_INIT_DATA_MUX("vi", "vi", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
- TEGRA_INIT_DATA_MUX("epp", NULL, "epp", mux_pllmcpa, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
- TEGRA_INIT_DATA_MUX("mpe", NULL, "mpe", mux_pllmcpa, CLK_SOURCE_MPE, 60, &periph_h_regs, 0, mpe),
- TEGRA_INIT_DATA_MUX("host1x", NULL, "host1x", mux_pllmcpa, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
- TEGRA_INIT_DATA_MUX("3d", NULL, "3d", mux_pllmcpa, CLK_SOURCE_3D, 24, &periph_l_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d),
- TEGRA_INIT_DATA_MUX("2d", NULL, "2d", mux_pllmcpa, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr2d),
- TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllpcm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
- TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
- TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
- TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
- TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
- TEGRA_INIT_DATA_MUX("cve", NULL, "cve", mux_pllpdc_clkm, CLK_SOURCE_CVE, 49, &periph_h_regs, 0, cve),
- TEGRA_INIT_DATA_MUX("tvo", NULL, "tvo", mux_pllpdc_clkm, CLK_SOURCE_TVO, 49, &periph_h_regs, 0, tvo),
- TEGRA_INIT_DATA_MUX("tvdac", NULL, "tvdac", mux_pllpdc_clkm, CLK_SOURCE_TVDAC, 53, &periph_h_regs, 0, tvdac),
- TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
- TEGRA_INIT_DATA_DIV16("i2c1", "div-clk", "tegra-i2c.0", mux_pllpcm_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2c1),
- TEGRA_INIT_DATA_DIV16("i2c2", "div-clk", "tegra-i2c.1", mux_pllpcm_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c2),
- TEGRA_INIT_DATA_DIV16("i2c3", "div-clk", "tegra-i2c.2", mux_pllpcm_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2c3),
- TEGRA_INIT_DATA_DIV16("dvc", "div-clk", "tegra-i2c.3", mux_pllpcm_clkm, CLK_SOURCE_DVC, 47, &periph_h_regs, TEGRA_PERIPH_ON_APB, dvc),
- TEGRA_INIT_DATA_MUX("hdmi", NULL, "hdmi", mux_pllpdc_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
- TEGRA_INIT_DATA("pwm", NULL, "tegra-pwm", pwm_parents, CLK_SOURCE_PWM, 28, 3, 0, 0, 8, 1, 0, &periph_l_regs, 17, periph_clk_enb_refcnt, TEGRA_PERIPH_ON_APB, pwm),
+ TEGRA_INIT_DATA_MUX("i2s1", i2s1_parents, CLK_SOURCE_I2S1, 11, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2S1),
+ TEGRA_INIT_DATA_MUX("i2s2", i2s2_parents, CLK_SOURCE_I2S2, 18, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2S2),
+ TEGRA_INIT_DATA_MUX("spi", mux_pllpcm_clkm, CLK_SOURCE_SPI, 43, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_SPI),
+ TEGRA_INIT_DATA_MUX("xio", mux_pllpcm_clkm, CLK_SOURCE_XIO, 45, 0, TEGRA20_CLK_XIO),
+ TEGRA_INIT_DATA_MUX("twc", mux_pllpcm_clkm, CLK_SOURCE_TWC, 16, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_TWC),
+ TEGRA_INIT_DATA_MUX("ide", mux_pllpcm_clkm, CLK_SOURCE_XIO, 25, 0, TEGRA20_CLK_IDE),
+ TEGRA_INIT_DATA_DIV16("dvc", mux_pllpcm_clkm, CLK_SOURCE_DVC, 47, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_DVC),
+ TEGRA_INIT_DATA_DIV16("i2c1", mux_pllpcm_clkm, CLK_SOURCE_I2C1, 12, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2C1),
+ TEGRA_INIT_DATA_DIV16("i2c2", mux_pllpcm_clkm, CLK_SOURCE_I2C2, 54, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2C2),
+ TEGRA_INIT_DATA_DIV16("i2c3", mux_pllpcm_clkm, CLK_SOURCE_I2C3, 67, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2C3),
+ TEGRA_INIT_DATA_MUX("hdmi", mux_pllpdc_clkm, CLK_SOURCE_HDMI, 51, 0, TEGRA20_CLK_HDMI),
+ TEGRA_INIT_DATA("pwm", NULL, NULL, pwm_parents, CLK_SOURCE_PWM, 28, 3, 0, 0, 8, 1, 0, 17, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_PWM),
};
static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
- TEGRA_INIT_DATA_NODIV("uarta", NULL, "tegra_uart.0", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 30, 2, 6, &periph_l_regs, TEGRA_PERIPH_ON_APB, uarta),
- TEGRA_INIT_DATA_NODIV("uartb", NULL, "tegra_uart.1", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 30, 2, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, uartb),
- TEGRA_INIT_DATA_NODIV("uartc", NULL, "tegra_uart.2", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 30, 2, 55, &periph_h_regs, TEGRA_PERIPH_ON_APB, uartc),
- TEGRA_INIT_DATA_NODIV("uartd", NULL, "tegra_uart.3", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 30, 2, 65, &periph_u_regs, TEGRA_PERIPH_ON_APB, uartd),
- TEGRA_INIT_DATA_NODIV("uarte", NULL, "tegra_uart.4", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 30, 2, 66, &periph_u_regs, TEGRA_PERIPH_ON_APB, uarte),
- TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllpdc_clkm, CLK_SOURCE_DISP1, 30, 2, 27, &periph_l_regs, 0, disp1),
- TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, &periph_l_regs, 0, disp2),
+ TEGRA_INIT_DATA_NODIV("uarta", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 30, 2, 6, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_UARTA),
+ TEGRA_INIT_DATA_NODIV("uartb", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 30, 2, 7, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_UARTB),
+ TEGRA_INIT_DATA_NODIV("uartc", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 30, 2, 55, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_UARTC),
+ TEGRA_INIT_DATA_NODIV("uartd", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 30, 2, 65, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_UARTD),
+ TEGRA_INIT_DATA_NODIV("uarte", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 30, 2, 66, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_UARTE),
+ TEGRA_INIT_DATA_NODIV("disp1", mux_pllpdc_clkm, CLK_SOURCE_DISP1, 30, 2, 27, 0, TEGRA20_CLK_DISP1),
+ TEGRA_INIT_DATA_NODIV("disp2", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, 0, TEGRA20_CLK_DISP2),
};
static void __init tegra20_periph_clk_init(void)
@@ -876,69 +806,13 @@ static void __init tegra20_periph_clk_init(void)
/* ac97 */
clk = tegra_clk_register_periph_gate("ac97", "pll_a_out0",
TEGRA_PERIPH_ON_APB,
- clk_base, 0, 3, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra20-ac97");
- clks[ac97] = clk;
+ clk_base, 0, 3, periph_clk_enb_refcnt);
+ clks[TEGRA20_CLK_AC97] = clk;
/* apbdma */
clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
- 0, 34, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-apbdma");
- clks[apbdma] = clk;
-
- /* rtc */
- clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
- TEGRA_PERIPH_NO_RESET,
- clk_base, 0, 4, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "rtc-tegra");
- clks[rtc] = clk;
-
- /* timer */
- clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base,
- 0, 5, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "timer");
- clks[timer] = clk;
-
- /* kbc */
- clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
- TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
- clk_base, 0, 36, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-kbc");
- clks[kbc] = clk;
-
- /* csus */
- clk = tegra_clk_register_periph_gate("csus", "clk_m",
- TEGRA_PERIPH_NO_RESET,
- clk_base, 0, 92, &periph_u_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "csus", "tengra_camera");
- clks[csus] = clk;
-
- /* vcp */
- clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0,
- clk_base, 0, 29, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "vcp", "tegra-avp");
- clks[vcp] = clk;
-
- /* bsea */
- clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0,
- clk_base, 0, 62, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "bsea", "tegra-avp");
- clks[bsea] = clk;
-
- /* bsev */
- clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0,
- clk_base, 0, 63, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "bsev", "tegra-aes");
- clks[bsev] = clk;
+ 0, 34, periph_clk_enb_refcnt);
+ clks[TEGRA20_CLK_APBDMA] = clk;
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
@@ -947,130 +821,52 @@ static void __init tegra20_periph_clk_init(void)
clk_base + CLK_SOURCE_EMC,
30, 2, 0, NULL);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
- 57, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "emc", NULL);
- clks[emc] = clk;
-
- /* usbd */
- clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base, 0,
- 22, &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "fsl-tegra-udc");
- clks[usbd] = clk;
-
- /* usb2 */
- clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base, 0,
- 58, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-ehci.1");
- clks[usb2] = clk;
-
- /* usb3 */
- clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base, 0,
- 59, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-ehci.2");
- clks[usb3] = clk;
+ 57, periph_clk_enb_refcnt);
+ clks[TEGRA20_CLK_EMC] = clk;
/* dsi */
clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,
- 48, &periph_h_regs, periph_clk_enb_refcnt);
+ 48, periph_clk_enb_refcnt);
clk_register_clkdev(clk, NULL, "dsi");
- clks[dsi] = clk;
-
- /* csi */
- clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
- 0, 52, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "csi", "tegra_camera");
- clks[csi] = clk;
-
- /* isp */
- clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0, 23,
- &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "isp", "tegra_camera");
- clks[isp] = clk;
+ clks[TEGRA20_CLK_DSI] = clk;
/* pex */
clk = tegra_clk_register_periph_gate("pex", "clk_m", 0, clk_base, 0, 70,
- &periph_u_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "pex", NULL);
- clks[pex] = clk;
-
- /* afi */
- clk = tegra_clk_register_periph_gate("afi", "clk_m", 0, clk_base, 0, 72,
- &periph_u_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "afi", NULL);
- clks[afi] = clk;
-
- /* pcie_xclk */
- clk = tegra_clk_register_periph_gate("pcie_xclk", "clk_m", 0, clk_base,
- 0, 74, &periph_u_regs,
periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "pcie_xclk", NULL);
- clks[pcie_xclk] = clk;
+ clks[TEGRA20_CLK_PEX] = clk;
/* cdev1 */
clk = clk_register_fixed_rate(NULL, "cdev1_fixed", NULL, CLK_IS_ROOT,
26000000);
clk = tegra_clk_register_periph_gate("cdev1", "cdev1_fixed", 0,
- clk_base, 0, 94, &periph_u_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "cdev1", NULL);
- clks[cdev1] = clk;
+ clk_base, 0, 94, periph_clk_enb_refcnt);
+ clks[TEGRA20_CLK_CDEV1] = clk;
/* cdev2 */
clk = clk_register_fixed_rate(NULL, "cdev2_fixed", NULL, CLK_IS_ROOT,
26000000);
clk = tegra_clk_register_periph_gate("cdev2", "cdev2_fixed", 0,
- clk_base, 0, 93, &periph_u_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "cdev2", NULL);
- clks[cdev2] = clk;
+ clk_base, 0, 93, periph_clk_enb_refcnt);
+ clks[TEGRA20_CLK_CDEV2] = clk;
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name, data->parent_names,
+ clk = tegra_clk_register_periph(data->name, data->p.parent_names,
data->num_parents, &data->periph,
clk_base, data->offset, data->flags);
- clk_register_clkdev(clk, data->con_id, data->dev_id);
clks[data->clk_id] = clk;
}
for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
data = &tegra_periph_nodiv_clk_list[i];
clk = tegra_clk_register_periph_nodiv(data->name,
- data->parent_names,
+ data->p.parent_names,
data->num_parents, &data->periph,
clk_base, data->offset);
- clk_register_clkdev(clk, data->con_id, data->dev_id);
clks[data->clk_id] = clk;
}
-}
-
-
-static void __init tegra20_fixed_clk_init(void)
-{
- struct clk *clk;
-
- /* clk_32k */
- clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
- 32768);
- clk_register_clkdev(clk, "clk_32k", NULL);
- clks[clk_32k] = clk;
-}
-
-static void __init tegra20_pmc_clk_init(void)
-{
- struct clk *clk;
- /* blink */
- writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
- clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
- pmc_base + PMC_DPD_PADS_ORIDE,
- PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
- clk = clk_register_gate(NULL, "blink", "blink_override", 0,
- pmc_base + PMC_CTRL,
- PMC_CTRL_BLINK_ENB, 0, NULL);
- clk_register_clkdev(clk, "blink", NULL);
- clks[blink] = clk;
+ tegra_periph_clk_init(clk_base, pmc_base, tegra20_clks, &pll_p_params);
}
static void __init tegra20_osc_clk_init(void)
@@ -1084,15 +880,13 @@ static void __init tegra20_osc_clk_init(void)
/* clk_m */
clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT |
CLK_IGNORE_UNUSED, input_freq);
- clk_register_clkdev(clk, "clk_m", NULL);
- clks[clk_m] = clk;
+ clks[TEGRA20_CLK_CLK_M] = clk;
/* pll_ref */
pll_ref_div = tegra20_get_pll_ref_div();
clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
CLK_SET_RATE_PARENT, 1, pll_ref_div);
- clk_register_clkdev(clk, "pll_ref", NULL);
- clks[pll_ref] = clk;
+ clks[TEGRA20_CLK_PLL_REF] = clk;
}
/* Tegra20 CPU clock and reset control functions */
@@ -1226,49 +1020,49 @@ static struct tegra_cpu_car_ops tegra20_cpu_car_ops = {
};
static struct tegra_clk_init_table init_table[] __initdata = {
- {pll_p, clk_max, 216000000, 1},
- {pll_p_out1, clk_max, 28800000, 1},
- {pll_p_out2, clk_max, 48000000, 1},
- {pll_p_out3, clk_max, 72000000, 1},
- {pll_p_out4, clk_max, 24000000, 1},
- {pll_c, clk_max, 600000000, 1},
- {pll_c_out1, clk_max, 120000000, 1},
- {sclk, pll_c_out1, 0, 1},
- {hclk, clk_max, 0, 1},
- {pclk, clk_max, 60000000, 1},
- {csite, clk_max, 0, 1},
- {emc, clk_max, 0, 1},
- {cclk, clk_max, 0, 1},
- {uarta, pll_p, 0, 0},
- {uartb, pll_p, 0, 0},
- {uartc, pll_p, 0, 0},
- {uartd, pll_p, 0, 0},
- {uarte, pll_p, 0, 0},
- {pll_a, clk_max, 56448000, 1},
- {pll_a_out0, clk_max, 11289600, 1},
- {cdev1, clk_max, 0, 1},
- {blink, clk_max, 32768, 1},
- {i2s1, pll_a_out0, 11289600, 0},
- {i2s2, pll_a_out0, 11289600, 0},
- {sdmmc1, pll_p, 48000000, 0},
- {sdmmc3, pll_p, 48000000, 0},
- {sdmmc4, pll_p, 48000000, 0},
- {spi, pll_p, 20000000, 0},
- {sbc1, pll_p, 100000000, 0},
- {sbc2, pll_p, 100000000, 0},
- {sbc3, pll_p, 100000000, 0},
- {sbc4, pll_p, 100000000, 0},
- {host1x, pll_c, 150000000, 0},
- {disp1, pll_p, 600000000, 0},
- {disp2, pll_p, 600000000, 0},
- {gr2d, pll_c, 300000000, 0},
- {gr3d, pll_c, 300000000, 0},
- {clk_max, clk_max, 0, 0}, /* This MUST be the last entry */
+ {TEGRA20_CLK_PLL_P, TEGRA20_CLK_CLK_MAX, 216000000, 1},
+ {TEGRA20_CLK_PLL_P_OUT1, TEGRA20_CLK_CLK_MAX, 28800000, 1},
+ {TEGRA20_CLK_PLL_P_OUT2, TEGRA20_CLK_CLK_MAX, 48000000, 1},
+ {TEGRA20_CLK_PLL_P_OUT3, TEGRA20_CLK_CLK_MAX, 72000000, 1},
+ {TEGRA20_CLK_PLL_P_OUT4, TEGRA20_CLK_CLK_MAX, 24000000, 1},
+ {TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 1},
+ {TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 120000000, 1},
+ {TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 0, 1},
+ {TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 0, 1},
+ {TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 1},
+ {TEGRA20_CLK_CSITE, TEGRA20_CLK_CLK_MAX, 0, 1},
+ {TEGRA20_CLK_EMC, TEGRA20_CLK_CLK_MAX, 0, 1},
+ {TEGRA20_CLK_CCLK, TEGRA20_CLK_CLK_MAX, 0, 1},
+ {TEGRA20_CLK_UARTA, TEGRA20_CLK_PLL_P, 0, 0},
+ {TEGRA20_CLK_UARTB, TEGRA20_CLK_PLL_P, 0, 0},
+ {TEGRA20_CLK_UARTC, TEGRA20_CLK_PLL_P, 0, 0},
+ {TEGRA20_CLK_UARTD, TEGRA20_CLK_PLL_P, 0, 0},
+ {TEGRA20_CLK_UARTE, TEGRA20_CLK_PLL_P, 0, 0},
+ {TEGRA20_CLK_PLL_A, TEGRA20_CLK_CLK_MAX, 56448000, 1},
+ {TEGRA20_CLK_PLL_A_OUT0, TEGRA20_CLK_CLK_MAX, 11289600, 1},
+ {TEGRA20_CLK_CDEV1, TEGRA20_CLK_CLK_MAX, 0, 1},
+ {TEGRA20_CLK_BLINK, TEGRA20_CLK_CLK_MAX, 32768, 1},
+ {TEGRA20_CLK_I2S1, TEGRA20_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA20_CLK_I2S2, TEGRA20_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA20_CLK_SDMMC1, TEGRA20_CLK_PLL_P, 48000000, 0},
+ {TEGRA20_CLK_SDMMC3, TEGRA20_CLK_PLL_P, 48000000, 0},
+ {TEGRA20_CLK_SDMMC4, TEGRA20_CLK_PLL_P, 48000000, 0},
+ {TEGRA20_CLK_SPI, TEGRA20_CLK_PLL_P, 20000000, 0},
+ {TEGRA20_CLK_SBC1, TEGRA20_CLK_PLL_P, 100000000, 0},
+ {TEGRA20_CLK_SBC2, TEGRA20_CLK_PLL_P, 100000000, 0},
+ {TEGRA20_CLK_SBC3, TEGRA20_CLK_PLL_P, 100000000, 0},
+ {TEGRA20_CLK_SBC4, TEGRA20_CLK_PLL_P, 100000000, 0},
+ {TEGRA20_CLK_HOST1X, TEGRA20_CLK_PLL_C, 150000000, 0},
+ {TEGRA20_CLK_DISP1, TEGRA20_CLK_PLL_P, 600000000, 0},
+ {TEGRA20_CLK_DISP2, TEGRA20_CLK_PLL_P, 600000000, 0},
+ {TEGRA20_CLK_GR2D, TEGRA20_CLK_PLL_C, 300000000, 0},
+ {TEGRA20_CLK_GR3D, TEGRA20_CLK_PLL_C, 300000000, 0},
+ {TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0}, /* This MUST be the last entry */
};
static void __init tegra20_clock_apply_init_table(void)
{
- tegra_init_from_table(init_table, clks, clk_max);
+ tegra_init_from_table(init_table, clks, TEGRA20_CLK_CLK_MAX);
}
/*
@@ -1277,11 +1071,11 @@ static void __init tegra20_clock_apply_init_table(void)
* table under two names.
*/
static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
- TEGRA_CLK_DUPLICATE(usbd, "utmip-pad", NULL),
- TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL),
- TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL),
- TEGRA_CLK_DUPLICATE(cclk, NULL, "cpu"),
- TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* Must be the last entry */
+ TEGRA_CLK_DUPLICATE(TEGRA20_CLK_USBD, "utmip-pad", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA20_CLK_USBD, "tegra-ehci.0", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA20_CLK_USBD, "tegra-otg", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA20_CLK_CCLK, NULL, "cpu"),
+ TEGRA_CLK_DUPLICATE(TEGRA20_CLK_CLK_MAX, NULL, NULL), /* Must be the last entry */
};
static const struct of_device_id pmc_match[] __initconst = {
@@ -1291,7 +1085,6 @@ static const struct of_device_id pmc_match[] __initconst = {
static void __init tegra20_clock_init(struct device_node *np)
{
- int i;
struct device_node *node;
clk_base = of_iomap(np, 0);
@@ -1312,30 +1105,24 @@ static void __init tegra20_clock_init(struct device_node *np)
BUG();
}
+ clks = tegra_clk_init(clk_base, TEGRA20_CLK_CLK_MAX,
+ TEGRA20_CLK_PERIPH_BANKS);
+ if (!clks)
+ return;
+
tegra20_osc_clk_init();
- tegra20_pmc_clk_init();
- tegra20_fixed_clk_init();
+ tegra_fixed_clk_init(tegra20_clks);
tegra20_pll_init();
tegra20_super_clk_init();
+ tegra_super_clk_gen4_init(clk_base, pmc_base, tegra20_clks, NULL);
tegra20_periph_clk_init();
tegra20_audio_clk_init();
+ tegra_pmc_clk_init(pmc_base, tegra20_clks);
+ tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA20_CLK_CLK_MAX);
- for (i = 0; i < ARRAY_SIZE(clks); i++) {
- if (IS_ERR(clks[i])) {
- pr_err("Tegra20 clk %d: register failed with %ld\n",
- i, PTR_ERR(clks[i]));
- BUG();
- }
- if (!clks[i])
- clks[i] = ERR_PTR(-EINVAL);
- }
-
- tegra_init_dup_clks(tegra_clk_duplicates, clks, clk_max);
-
- clk_data.clks = clks;
- clk_data.clk_num = ARRAY_SIZE(clks);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ tegra_add_of_provider(np);
+ tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra_clk_apply_init_table = tegra20_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index dbe7c8003c5c..8b10c38b6e3c 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -23,42 +23,9 @@
#include <linux/of_address.h>
#include <linux/clk/tegra.h>
#include <linux/tegra-powergate.h>
-
+#include <dt-bindings/clock/tegra30-car.h>
#include "clk.h"
-
-#define RST_DEVICES_L 0x004
-#define RST_DEVICES_H 0x008
-#define RST_DEVICES_U 0x00c
-#define RST_DEVICES_V 0x358
-#define RST_DEVICES_W 0x35c
-#define RST_DEVICES_SET_L 0x300
-#define RST_DEVICES_CLR_L 0x304
-#define RST_DEVICES_SET_H 0x308
-#define RST_DEVICES_CLR_H 0x30c
-#define RST_DEVICES_SET_U 0x310
-#define RST_DEVICES_CLR_U 0x314
-#define RST_DEVICES_SET_V 0x430
-#define RST_DEVICES_CLR_V 0x434
-#define RST_DEVICES_SET_W 0x438
-#define RST_DEVICES_CLR_W 0x43c
-#define RST_DEVICES_NUM 5
-
-#define CLK_OUT_ENB_L 0x010
-#define CLK_OUT_ENB_H 0x014
-#define CLK_OUT_ENB_U 0x018
-#define CLK_OUT_ENB_V 0x360
-#define CLK_OUT_ENB_W 0x364
-#define CLK_OUT_ENB_SET_L 0x320
-#define CLK_OUT_ENB_CLR_L 0x324
-#define CLK_OUT_ENB_SET_H 0x328
-#define CLK_OUT_ENB_CLR_H 0x32c
-#define CLK_OUT_ENB_SET_U 0x330
-#define CLK_OUT_ENB_CLR_U 0x334
-#define CLK_OUT_ENB_SET_V 0x440
-#define CLK_OUT_ENB_CLR_V 0x444
-#define CLK_OUT_ENB_SET_W 0x448
-#define CLK_OUT_ENB_CLR_W 0x44c
-#define CLK_OUT_ENB_NUM 5
+#include "clk-id.h"
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_MASK (0xF<<28)
@@ -92,6 +59,8 @@
#define SYSTEM_CLK_RATE 0x030
+#define TEGRA30_CLK_PERIPH_BANKS 5
+
#define PLLC_BASE 0x80
#define PLLC_MISC 0x8c
#define PLLM_BASE 0x90
@@ -132,88 +101,21 @@
#define AUDIO_SYNC_CLK_I2S4 0x4b0
#define AUDIO_SYNC_CLK_SPDIF 0x4b4
-#define PMC_CLK_OUT_CNTRL 0x1a8
-
-#define CLK_SOURCE_I2S0 0x1d8
-#define CLK_SOURCE_I2S1 0x100
-#define CLK_SOURCE_I2S2 0x104
-#define CLK_SOURCE_I2S3 0x3bc
-#define CLK_SOURCE_I2S4 0x3c0
#define CLK_SOURCE_SPDIF_OUT 0x108
-#define CLK_SOURCE_SPDIF_IN 0x10c
#define CLK_SOURCE_PWM 0x110
#define CLK_SOURCE_D_AUDIO 0x3d0
#define CLK_SOURCE_DAM0 0x3d8
#define CLK_SOURCE_DAM1 0x3dc
#define CLK_SOURCE_DAM2 0x3e0
-#define CLK_SOURCE_HDA 0x428
-#define CLK_SOURCE_HDA2CODEC_2X 0x3e4
-#define CLK_SOURCE_SBC1 0x134
-#define CLK_SOURCE_SBC2 0x118
-#define CLK_SOURCE_SBC3 0x11c
-#define CLK_SOURCE_SBC4 0x1b4
-#define CLK_SOURCE_SBC5 0x3c8
-#define CLK_SOURCE_SBC6 0x3cc
-#define CLK_SOURCE_SATA_OOB 0x420
-#define CLK_SOURCE_SATA 0x424
-#define CLK_SOURCE_NDFLASH 0x160
-#define CLK_SOURCE_NDSPEED 0x3f8
-#define CLK_SOURCE_VFIR 0x168
-#define CLK_SOURCE_SDMMC1 0x150
-#define CLK_SOURCE_SDMMC2 0x154
-#define CLK_SOURCE_SDMMC3 0x1bc
-#define CLK_SOURCE_SDMMC4 0x164
-#define CLK_SOURCE_VDE 0x1c8
-#define CLK_SOURCE_CSITE 0x1d4
-#define CLK_SOURCE_LA 0x1f8
-#define CLK_SOURCE_OWR 0x1cc
-#define CLK_SOURCE_NOR 0x1d0
-#define CLK_SOURCE_MIPI 0x174
-#define CLK_SOURCE_I2C1 0x124
-#define CLK_SOURCE_I2C2 0x198
-#define CLK_SOURCE_I2C3 0x1b8
-#define CLK_SOURCE_I2C4 0x3c4
-#define CLK_SOURCE_I2C5 0x128
-#define CLK_SOURCE_UARTA 0x178
-#define CLK_SOURCE_UARTB 0x17c
-#define CLK_SOURCE_UARTC 0x1a0
-#define CLK_SOURCE_UARTD 0x1c0
-#define CLK_SOURCE_UARTE 0x1c4
-#define CLK_SOURCE_VI 0x148
-#define CLK_SOURCE_VI_SENSOR 0x1a8
-#define CLK_SOURCE_3D 0x158
#define CLK_SOURCE_3D2 0x3b0
#define CLK_SOURCE_2D 0x15c
-#define CLK_SOURCE_EPP 0x16c
-#define CLK_SOURCE_MPE 0x170
-#define CLK_SOURCE_HOST1X 0x180
-#define CLK_SOURCE_CVE 0x140
-#define CLK_SOURCE_TVO 0x188
-#define CLK_SOURCE_DTV 0x1dc
#define CLK_SOURCE_HDMI 0x18c
-#define CLK_SOURCE_TVDAC 0x194
-#define CLK_SOURCE_DISP1 0x138
-#define CLK_SOURCE_DISP2 0x13c
#define CLK_SOURCE_DSIB 0xd0
-#define CLK_SOURCE_TSENSOR 0x3b8
-#define CLK_SOURCE_ACTMON 0x3e8
-#define CLK_SOURCE_EXTERN1 0x3ec
-#define CLK_SOURCE_EXTERN2 0x3f0
-#define CLK_SOURCE_EXTERN3 0x3f4
-#define CLK_SOURCE_I2CSLOW 0x3fc
#define CLK_SOURCE_SE 0x42c
-#define CLK_SOURCE_MSELECT 0x3b4
#define CLK_SOURCE_EMC 0x19c
#define AUDIO_SYNC_DOUBLER 0x49c
-#define PMC_CTRL 0
-#define PMC_CTRL_BLINK_ENB 7
-
-#define PMC_DPD_PADS_ORIDE 0x1c
-#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
-#define PMC_BLINK_TIMER 0x40
-
#define UTMIP_PLL_CFG2 0x488
#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
@@ -266,89 +168,41 @@ static struct cpu_clk_suspend_context {
} tegra30_cpu_clk_sctx;
#endif
-static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
-
static void __iomem *clk_base;
static void __iomem *pmc_base;
static unsigned long input_freq;
-static DEFINE_SPINLOCK(clk_doubler_lock);
-static DEFINE_SPINLOCK(clk_out_lock);
-static DEFINE_SPINLOCK(pll_div_lock);
static DEFINE_SPINLOCK(cml_lock);
static DEFINE_SPINLOCK(pll_d_lock);
-static DEFINE_SPINLOCK(sysrate_lock);
-
-#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags, _clk_id)
-
-#define TEGRA_INIT_DATA_DIV16(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, \
- _regs, _clk_num, periph_clk_enb_refcnt, \
- _gate_flags, _clk_id)
-
-#define TEGRA_INIT_DATA_MUX8(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 29, 3, 0, 0, 8, 1, 0, _regs, _clk_num, \
- periph_clk_enb_refcnt, _gate_flags, _clk_id)
-
-#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
- _clk_num, _regs, _gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
- _clk_id)
-#define TEGRA_INIT_DATA_UART(_name, _con_id, _dev_id, _parents, _offset,\
- _clk_num, _regs, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- 30, 2, 0, 0, 16, 1, TEGRA_DIVIDER_UART, _regs, \
- _clk_num, periph_clk_enb_refcnt, 0, _clk_id)
+#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, \
+ _clk_num, _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_MUX8(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ 29, 3, 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, \
+ _clk_num, _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_INT(_name, _parents, _offset, \
+ _clk_num, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT | \
+ TEGRA_DIVIDER_ROUND_UP, _clk_num, \
+ _gate_flags, _clk_id)
-#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
- _mux_shift, _mux_width, _clk_num, _regs, \
+#define TEGRA_INIT_DATA_NODIV(_name, _parents, _offset, \
+ _mux_shift, _mux_width, _clk_num, \
_gate_flags, _clk_id) \
- TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
- _mux_shift, _mux_width, 0, 0, 0, 0, 0, _regs, \
- _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
+ _mux_shift, _mux_width, 0, 0, 0, 0, 0,\
+ _clk_num, _gate_flags, \
_clk_id)
-/*
- * IDs assigned here must be in sync with DT bindings definition
- * for Tegra30 clocks.
- */
-enum tegra30_clk {
- cpu, rtc = 4, timer, uarta, gpio = 8, sdmmc2, i2s1 = 11, i2c1, ndflash,
- sdmmc1, sdmmc4, pwm = 17, i2s2, epp, gr2d = 21, usbd, isp, gr3d,
- disp2 = 26, disp1, host1x, vcp, i2s0, cop_cache, mc, ahbdma, apbdma,
- kbc = 36, statmon, pmc, kfuse = 40, sbc1, nor, sbc2 = 44, sbc3 = 46,
- i2c5, dsia, mipi = 50, hdmi, csi, tvdac, i2c2, uartc, emc = 57, usb2,
- usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
- pcie, owr, afi, csite, pciex, avpucq, la, dtv = 79, ndspeed, i2cslow,
- dsib, irama = 84, iramb, iramc, iramd, cram2, audio_2x = 90, csus = 92,
- cdev2, cdev1, cpu_g = 96, cpu_lp, gr3d2, mselect, tsensor, i2s3, i2s4,
- i2c4, sbc5, sbc6, d_audio, apbif, dam0, dam1, dam2, hda2codec_2x,
- atomics, audio0_2x, audio1_2x, audio2_2x, audio3_2x, audio4_2x,
- spdif_2x, actmon, extern1, extern2, extern3, sata_oob, sata, hda,
- se = 127, hda2hdmi, sata_cold, uartb = 160, vfir, spdif_in, spdif_out,
- vi, vi_sensor, fuse, fuse_burn, cve, tvo, clk_32k, clk_m, clk_m_div2,
- clk_m_div4, pll_ref, pll_c, pll_c_out1, pll_m, pll_m_out1, pll_p,
- pll_p_out1, pll_p_out2, pll_p_out3, pll_p_out4, pll_a, pll_a_out0,
- pll_d, pll_d_out0, pll_d2, pll_d2_out0, pll_u, pll_x, pll_x_out0, pll_e,
- spdif_in_sync, i2s0_sync, i2s1_sync, i2s2_sync, i2s3_sync, i2s4_sync,
- vimclk_sync, audio0, audio1, audio2, audio3, audio4, spdif, clk_out_1,
- clk_out_2, clk_out_3, sclk, blink, cclk_g, cclk_lp, twd, cml0, cml1,
- hclk, pclk, clk_out_1_mux = 300, clk_max
-};
-
-static struct clk *clks[clk_max];
-static struct clk_onecell_data clk_data;
+static struct clk **clks;
/*
* Structure defining the fields for USB UTMI clocks Parameters.
@@ -564,6 +418,8 @@ static struct tegra_clk_pll_params pll_c_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_c_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
};
static struct div_nmp pllm_nmp = {
@@ -593,6 +449,9 @@ static struct tegra_clk_pll_params pll_m_params = {
.div_nmp = &pllm_nmp,
.pmc_divnm_reg = PMC_PLLM_WB0_OVERRIDE,
.pmc_divp_reg = PMC_PLLM_WB0_OVERRIDE,
+ .freq_table = pll_m_freq_table,
+ .flags = TEGRA_PLLM | TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_p_params = {
@@ -607,6 +466,9 @@ static struct tegra_clk_pll_params pll_p_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_p_freq_table,
+ .flags = TEGRA_PLL_FIXED | TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
+ .fixed_rate = 408000000,
};
static struct tegra_clk_pll_params pll_a_params = {
@@ -621,6 +483,8 @@ static struct tegra_clk_pll_params pll_a_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_a_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_d_params = {
@@ -635,6 +499,10 @@ static struct tegra_clk_pll_params pll_d_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
+
};
static struct tegra_clk_pll_params pll_d2_params = {
@@ -649,6 +517,9 @@ static struct tegra_clk_pll_params pll_d2_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
+ .freq_table = pll_d_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
+ TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_u_params = {
@@ -664,6 +535,8 @@ static struct tegra_clk_pll_params pll_u_params = {
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
.pdiv_tohw = pllu_p,
+ .freq_table = pll_u_freq_table,
+ .flags = TEGRA_PLLU | TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON,
};
static struct tegra_clk_pll_params pll_x_params = {
@@ -678,6 +551,9 @@ static struct tegra_clk_pll_params pll_x_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_x_freq_table,
+ .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_DCCON |
+ TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_params pll_e_params = {
@@ -692,116 +568,299 @@ static struct tegra_clk_pll_params pll_e_params = {
.lock_mask = PLLE_MISC_LOCK,
.lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
.lock_delay = 300,
+ .freq_table = pll_e_freq_table,
+ .flags = TEGRA_PLLE_CONFIGURE | TEGRA_PLL_FIXED,
+ .fixed_rate = 100000000,
};
-/* Peripheral clock registers */
-static struct tegra_clk_periph_regs periph_l_regs = {
- .enb_reg = CLK_OUT_ENB_L,
- .enb_set_reg = CLK_OUT_ENB_SET_L,
- .enb_clr_reg = CLK_OUT_ENB_CLR_L,
- .rst_reg = RST_DEVICES_L,
- .rst_set_reg = RST_DEVICES_SET_L,
- .rst_clr_reg = RST_DEVICES_CLR_L,
+static unsigned long tegra30_input_freq[] = {
+ [0] = 13000000,
+ [1] = 16800000,
+ [4] = 19200000,
+ [5] = 38400000,
+ [8] = 12000000,
+ [9] = 48000000,
+ [12] = 260000000,
};
-static struct tegra_clk_periph_regs periph_h_regs = {
- .enb_reg = CLK_OUT_ENB_H,
- .enb_set_reg = CLK_OUT_ENB_SET_H,
- .enb_clr_reg = CLK_OUT_ENB_CLR_H,
- .rst_reg = RST_DEVICES_H,
- .rst_set_reg = RST_DEVICES_SET_H,
- .rst_clr_reg = RST_DEVICES_CLR_H,
+static struct tegra_devclk devclks[] __initdata = {
+ { .con_id = "pll_c", .dt_id = TEGRA30_CLK_PLL_C },
+ { .con_id = "pll_c_out1", .dt_id = TEGRA30_CLK_PLL_C_OUT1 },
+ { .con_id = "pll_p", .dt_id = TEGRA30_CLK_PLL_P },
+ { .con_id = "pll_p_out1", .dt_id = TEGRA30_CLK_PLL_P_OUT1 },
+ { .con_id = "pll_p_out2", .dt_id = TEGRA30_CLK_PLL_P_OUT2 },
+ { .con_id = "pll_p_out3", .dt_id = TEGRA30_CLK_PLL_P_OUT3 },
+ { .con_id = "pll_p_out4", .dt_id = TEGRA30_CLK_PLL_P_OUT4 },
+ { .con_id = "pll_m", .dt_id = TEGRA30_CLK_PLL_M },
+ { .con_id = "pll_m_out1", .dt_id = TEGRA30_CLK_PLL_M_OUT1 },
+ { .con_id = "pll_x", .dt_id = TEGRA30_CLK_PLL_X },
+ { .con_id = "pll_x_out0", .dt_id = TEGRA30_CLK_PLL_X_OUT0 },
+ { .con_id = "pll_u", .dt_id = TEGRA30_CLK_PLL_U },
+ { .con_id = "pll_d", .dt_id = TEGRA30_CLK_PLL_D },
+ { .con_id = "pll_d_out0", .dt_id = TEGRA30_CLK_PLL_D_OUT0 },
+ { .con_id = "pll_d2", .dt_id = TEGRA30_CLK_PLL_D2 },
+ { .con_id = "pll_d2_out0", .dt_id = TEGRA30_CLK_PLL_D2_OUT0 },
+ { .con_id = "pll_a", .dt_id = TEGRA30_CLK_PLL_A },
+ { .con_id = "pll_a_out0", .dt_id = TEGRA30_CLK_PLL_A_OUT0 },
+ { .con_id = "pll_e", .dt_id = TEGRA30_CLK_PLL_E },
+ { .con_id = "spdif_in_sync", .dt_id = TEGRA30_CLK_SPDIF_IN_SYNC },
+ { .con_id = "i2s0_sync", .dt_id = TEGRA30_CLK_I2S0_SYNC },
+ { .con_id = "i2s1_sync", .dt_id = TEGRA30_CLK_I2S1_SYNC },
+ { .con_id = "i2s2_sync", .dt_id = TEGRA30_CLK_I2S2_SYNC },
+ { .con_id = "i2s3_sync", .dt_id = TEGRA30_CLK_I2S3_SYNC },
+ { .con_id = "i2s4_sync", .dt_id = TEGRA30_CLK_I2S4_SYNC },
+ { .con_id = "vimclk_sync", .dt_id = TEGRA30_CLK_VIMCLK_SYNC },
+ { .con_id = "audio0", .dt_id = TEGRA30_CLK_AUDIO0 },
+ { .con_id = "audio1", .dt_id = TEGRA30_CLK_AUDIO1 },
+ { .con_id = "audio2", .dt_id = TEGRA30_CLK_AUDIO2 },
+ { .con_id = "audio3", .dt_id = TEGRA30_CLK_AUDIO3 },
+ { .con_id = "audio4", .dt_id = TEGRA30_CLK_AUDIO4 },
+ { .con_id = "spdif", .dt_id = TEGRA30_CLK_SPDIF },
+ { .con_id = "audio0_2x", .dt_id = TEGRA30_CLK_AUDIO0_2X },
+ { .con_id = "audio1_2x", .dt_id = TEGRA30_CLK_AUDIO1_2X },
+ { .con_id = "audio2_2x", .dt_id = TEGRA30_CLK_AUDIO2_2X },
+ { .con_id = "audio3_2x", .dt_id = TEGRA30_CLK_AUDIO3_2X },
+ { .con_id = "audio4_2x", .dt_id = TEGRA30_CLK_AUDIO4_2X },
+ { .con_id = "spdif_2x", .dt_id = TEGRA30_CLK_SPDIF_2X },
+ { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA30_CLK_EXTERN1 },
+ { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA30_CLK_EXTERN2 },
+ { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA30_CLK_EXTERN3 },
+ { .con_id = "blink", .dt_id = TEGRA30_CLK_BLINK },
+ { .con_id = "cclk_g", .dt_id = TEGRA30_CLK_CCLK_G },
+ { .con_id = "cclk_lp", .dt_id = TEGRA30_CLK_CCLK_LP },
+ { .con_id = "sclk", .dt_id = TEGRA30_CLK_SCLK },
+ { .con_id = "hclk", .dt_id = TEGRA30_CLK_HCLK },
+ { .con_id = "pclk", .dt_id = TEGRA30_CLK_PCLK },
+ { .con_id = "twd", .dt_id = TEGRA30_CLK_TWD },
+ { .con_id = "emc", .dt_id = TEGRA30_CLK_EMC },
+ { .con_id = "clk_32k", .dt_id = TEGRA30_CLK_CLK_32K },
+ { .con_id = "clk_m_div2", .dt_id = TEGRA30_CLK_CLK_M_DIV2 },
+ { .con_id = "clk_m_div4", .dt_id = TEGRA30_CLK_CLK_M_DIV4 },
+ { .con_id = "cml0", .dt_id = TEGRA30_CLK_CML0 },
+ { .con_id = "cml1", .dt_id = TEGRA30_CLK_CML1 },
+ { .con_id = "clk_m", .dt_id = TEGRA30_CLK_CLK_M },
+ { .con_id = "pll_ref", .dt_id = TEGRA30_CLK_PLL_REF },
+ { .con_id = "csus", .dev_id = "tengra_camera", .dt_id = TEGRA30_CLK_CSUS },
+ { .con_id = "vcp", .dev_id = "tegra-avp", .dt_id = TEGRA30_CLK_VCP },
+ { .con_id = "bsea", .dev_id = "tegra-avp", .dt_id = TEGRA30_CLK_BSEA },
+ { .con_id = "bsev", .dev_id = "tegra-aes", .dt_id = TEGRA30_CLK_BSEV },
+ { .con_id = "dsia", .dev_id = "tegradc.0", .dt_id = TEGRA30_CLK_DSIA },
+ { .con_id = "csi", .dev_id = "tegra_camera", .dt_id = TEGRA30_CLK_CSI },
+ { .con_id = "isp", .dev_id = "tegra_camera", .dt_id = TEGRA30_CLK_ISP },
+ { .con_id = "pcie", .dev_id = "tegra-pcie", .dt_id = TEGRA30_CLK_PCIE },
+ { .con_id = "afi", .dev_id = "tegra-pcie", .dt_id = TEGRA30_CLK_AFI },
+ { .con_id = "fuse", .dt_id = TEGRA30_CLK_FUSE },
+ { .con_id = "fuse_burn", .dev_id = "fuse-tegra", .dt_id = TEGRA30_CLK_FUSE_BURN },
+ { .con_id = "apbif", .dev_id = "tegra30-ahub", .dt_id = TEGRA30_CLK_APBIF },
+ { .con_id = "hda2hdmi", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA2HDMI },
+ { .dev_id = "tegra-apbdma", .dt_id = TEGRA30_CLK_APBDMA },
+ { .dev_id = "rtc-tegra", .dt_id = TEGRA30_CLK_RTC },
+ { .dev_id = "timer", .dt_id = TEGRA30_CLK_TIMER },
+ { .dev_id = "tegra-kbc", .dt_id = TEGRA30_CLK_KBC },
+ { .dev_id = "fsl-tegra-udc", .dt_id = TEGRA30_CLK_USBD },
+ { .dev_id = "tegra-ehci.1", .dt_id = TEGRA30_CLK_USB2 },
+ { .dev_id = "tegra-ehci.2", .dt_id = TEGRA30_CLK_USB2 },
+ { .dev_id = "kfuse-tegra", .dt_id = TEGRA30_CLK_KFUSE },
+ { .dev_id = "tegra_sata_cold", .dt_id = TEGRA30_CLK_SATA_COLD },
+ { .dev_id = "dtv", .dt_id = TEGRA30_CLK_DTV },
+ { .dev_id = "tegra30-i2s.0", .dt_id = TEGRA30_CLK_I2S0 },
+ { .dev_id = "tegra30-i2s.1", .dt_id = TEGRA30_CLK_I2S1 },
+ { .dev_id = "tegra30-i2s.2", .dt_id = TEGRA30_CLK_I2S2 },
+ { .dev_id = "tegra30-i2s.3", .dt_id = TEGRA30_CLK_I2S3 },
+ { .dev_id = "tegra30-i2s.4", .dt_id = TEGRA30_CLK_I2S4 },
+ { .con_id = "spdif_out", .dev_id = "tegra30-spdif", .dt_id = TEGRA30_CLK_SPDIF_OUT },
+ { .con_id = "spdif_in", .dev_id = "tegra30-spdif", .dt_id = TEGRA30_CLK_SPDIF_IN },
+ { .con_id = "d_audio", .dev_id = "tegra30-ahub", .dt_id = TEGRA30_CLK_D_AUDIO },
+ { .dev_id = "tegra30-dam.0", .dt_id = TEGRA30_CLK_DAM0 },
+ { .dev_id = "tegra30-dam.1", .dt_id = TEGRA30_CLK_DAM1 },
+ { .dev_id = "tegra30-dam.2", .dt_id = TEGRA30_CLK_DAM2 },
+ { .con_id = "hda", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA },
+ { .con_id = "hda2codec", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA2CODEC_2X },
+ { .dev_id = "spi_tegra.0", .dt_id = TEGRA30_CLK_SBC1 },
+ { .dev_id = "spi_tegra.1", .dt_id = TEGRA30_CLK_SBC2 },
+ { .dev_id = "spi_tegra.2", .dt_id = TEGRA30_CLK_SBC3 },
+ { .dev_id = "spi_tegra.3", .dt_id = TEGRA30_CLK_SBC4 },
+ { .dev_id = "spi_tegra.4", .dt_id = TEGRA30_CLK_SBC5 },
+ { .dev_id = "spi_tegra.5", .dt_id = TEGRA30_CLK_SBC6 },
+ { .dev_id = "tegra_sata_oob", .dt_id = TEGRA30_CLK_SATA_OOB },
+ { .dev_id = "tegra_sata", .dt_id = TEGRA30_CLK_SATA },
+ { .dev_id = "tegra_nand", .dt_id = TEGRA30_CLK_NDFLASH },
+ { .dev_id = "tegra_nand_speed", .dt_id = TEGRA30_CLK_NDSPEED },
+ { .dev_id = "vfir", .dt_id = TEGRA30_CLK_VFIR },
+ { .dev_id = "csite", .dt_id = TEGRA30_CLK_CSITE },
+ { .dev_id = "la", .dt_id = TEGRA30_CLK_LA },
+ { .dev_id = "tegra_w1", .dt_id = TEGRA30_CLK_OWR },
+ { .dev_id = "mipi", .dt_id = TEGRA30_CLK_MIPI },
+ { .dev_id = "tegra-tsensor", .dt_id = TEGRA30_CLK_TSENSOR },
+ { .dev_id = "i2cslow", .dt_id = TEGRA30_CLK_I2CSLOW },
+ { .dev_id = "vde", .dt_id = TEGRA30_CLK_VDE },
+ { .con_id = "vi", .dev_id = "tegra_camera", .dt_id = TEGRA30_CLK_VI },
+ { .dev_id = "epp", .dt_id = TEGRA30_CLK_EPP },
+ { .dev_id = "mpe", .dt_id = TEGRA30_CLK_MPE },
+ { .dev_id = "host1x", .dt_id = TEGRA30_CLK_HOST1X },
+ { .dev_id = "3d", .dt_id = TEGRA30_CLK_GR3D },
+ { .dev_id = "3d2", .dt_id = TEGRA30_CLK_GR3D2 },
+ { .dev_id = "2d", .dt_id = TEGRA30_CLK_GR2D },
+ { .dev_id = "se", .dt_id = TEGRA30_CLK_SE },
+ { .dev_id = "mselect", .dt_id = TEGRA30_CLK_MSELECT },
+ { .dev_id = "tegra-nor", .dt_id = TEGRA30_CLK_NOR },
+ { .dev_id = "sdhci-tegra.0", .dt_id = TEGRA30_CLK_SDMMC1 },
+ { .dev_id = "sdhci-tegra.1", .dt_id = TEGRA30_CLK_SDMMC2 },
+ { .dev_id = "sdhci-tegra.2", .dt_id = TEGRA30_CLK_SDMMC3 },
+ { .dev_id = "sdhci-tegra.3", .dt_id = TEGRA30_CLK_SDMMC4 },
+ { .dev_id = "cve", .dt_id = TEGRA30_CLK_CVE },
+ { .dev_id = "tvo", .dt_id = TEGRA30_CLK_TVO },
+ { .dev_id = "tvdac", .dt_id = TEGRA30_CLK_TVDAC },
+ { .dev_id = "actmon", .dt_id = TEGRA30_CLK_ACTMON },
+ { .con_id = "vi_sensor", .dev_id = "tegra_camera", .dt_id = TEGRA30_CLK_VI_SENSOR },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.0", .dt_id = TEGRA30_CLK_I2C1 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.1", .dt_id = TEGRA30_CLK_I2C2 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.2", .dt_id = TEGRA30_CLK_I2C3 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.3", .dt_id = TEGRA30_CLK_I2C4 },
+ { .con_id = "div-clk", .dev_id = "tegra-i2c.4", .dt_id = TEGRA30_CLK_I2C5 },
+ { .dev_id = "tegra_uart.0", .dt_id = TEGRA30_CLK_UARTA },
+ { .dev_id = "tegra_uart.1", .dt_id = TEGRA30_CLK_UARTB },
+ { .dev_id = "tegra_uart.2", .dt_id = TEGRA30_CLK_UARTC },
+ { .dev_id = "tegra_uart.3", .dt_id = TEGRA30_CLK_UARTD },
+ { .dev_id = "tegra_uart.4", .dt_id = TEGRA30_CLK_UARTE },
+ { .dev_id = "hdmi", .dt_id = TEGRA30_CLK_HDMI },
+ { .dev_id = "extern1", .dt_id = TEGRA30_CLK_EXTERN1 },
+ { .dev_id = "extern2", .dt_id = TEGRA30_CLK_EXTERN2 },
+ { .dev_id = "extern3", .dt_id = TEGRA30_CLK_EXTERN3 },
+ { .dev_id = "pwm", .dt_id = TEGRA30_CLK_PWM },
+ { .dev_id = "tegradc.0", .dt_id = TEGRA30_CLK_DISP1 },
+ { .dev_id = "tegradc.1", .dt_id = TEGRA30_CLK_DISP2 },
+ { .dev_id = "tegradc.1", .dt_id = TEGRA30_CLK_DSIB },
};
-static struct tegra_clk_periph_regs periph_u_regs = {
- .enb_reg = CLK_OUT_ENB_U,
- .enb_set_reg = CLK_OUT_ENB_SET_U,
- .enb_clr_reg = CLK_OUT_ENB_CLR_U,
- .rst_reg = RST_DEVICES_U,
- .rst_set_reg = RST_DEVICES_SET_U,
- .rst_clr_reg = RST_DEVICES_CLR_U,
-};
+static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
+ [tegra_clk_clk_32k] = { .dt_id = TEGRA30_CLK_CLK_32K, .present = true },
+ [tegra_clk_clk_m] = { .dt_id = TEGRA30_CLK_CLK_M, .present = true },
+ [tegra_clk_clk_m_div2] = { .dt_id = TEGRA30_CLK_CLK_M_DIV2, .present = true },
+ [tegra_clk_clk_m_div4] = { .dt_id = TEGRA30_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_pll_ref] = { .dt_id = TEGRA30_CLK_PLL_REF, .present = true },
+ [tegra_clk_spdif_in_sync] = { .dt_id = TEGRA30_CLK_SPDIF_IN_SYNC, .present = true },
+ [tegra_clk_i2s0_sync] = { .dt_id = TEGRA30_CLK_I2S0_SYNC, .present = true },
+ [tegra_clk_i2s1_sync] = { .dt_id = TEGRA30_CLK_I2S1_SYNC, .present = true },
+ [tegra_clk_i2s2_sync] = { .dt_id = TEGRA30_CLK_I2S2_SYNC, .present = true },
+ [tegra_clk_i2s3_sync] = { .dt_id = TEGRA30_CLK_I2S3_SYNC, .present = true },
+ [tegra_clk_i2s4_sync] = { .dt_id = TEGRA30_CLK_I2S4_SYNC, .present = true },
+ [tegra_clk_vimclk_sync] = { .dt_id = TEGRA30_CLK_VIMCLK_SYNC, .present = true },
+ [tegra_clk_audio0] = { .dt_id = TEGRA30_CLK_AUDIO0, .present = true },
+ [tegra_clk_audio1] = { .dt_id = TEGRA30_CLK_AUDIO1, .present = true },
+ [tegra_clk_audio2] = { .dt_id = TEGRA30_CLK_AUDIO2, .present = true },
+ [tegra_clk_audio3] = { .dt_id = TEGRA30_CLK_AUDIO3, .present = true },
+ [tegra_clk_audio4] = { .dt_id = TEGRA30_CLK_AUDIO4, .present = true },
+ [tegra_clk_spdif] = { .dt_id = TEGRA30_CLK_SPDIF, .present = true },
+ [tegra_clk_audio0_mux] = { .dt_id = TEGRA30_CLK_AUDIO0_MUX, .present = true },
+ [tegra_clk_audio1_mux] = { .dt_id = TEGRA30_CLK_AUDIO1_MUX, .present = true },
+ [tegra_clk_audio2_mux] = { .dt_id = TEGRA30_CLK_AUDIO2_MUX, .present = true },
+ [tegra_clk_audio3_mux] = { .dt_id = TEGRA30_CLK_AUDIO3_MUX, .present = true },
+ [tegra_clk_audio4_mux] = { .dt_id = TEGRA30_CLK_AUDIO4_MUX, .present = true },
+ [tegra_clk_spdif_mux] = { .dt_id = TEGRA30_CLK_SPDIF_MUX, .present = true },
+ [tegra_clk_audio0_2x] = { .dt_id = TEGRA30_CLK_AUDIO0_2X, .present = true },
+ [tegra_clk_audio1_2x] = { .dt_id = TEGRA30_CLK_AUDIO1_2X, .present = true },
+ [tegra_clk_audio2_2x] = { .dt_id = TEGRA30_CLK_AUDIO2_2X, .present = true },
+ [tegra_clk_audio3_2x] = { .dt_id = TEGRA30_CLK_AUDIO3_2X, .present = true },
+ [tegra_clk_audio4_2x] = { .dt_id = TEGRA30_CLK_AUDIO4_2X, .present = true },
+ [tegra_clk_spdif_2x] = { .dt_id = TEGRA30_CLK_SPDIF_2X, .present = true },
+ [tegra_clk_clk_out_1] = { .dt_id = TEGRA30_CLK_CLK_OUT_1, .present = true },
+ [tegra_clk_clk_out_2] = { .dt_id = TEGRA30_CLK_CLK_OUT_2, .present = true },
+ [tegra_clk_clk_out_3] = { .dt_id = TEGRA30_CLK_CLK_OUT_3, .present = true },
+ [tegra_clk_blink] = { .dt_id = TEGRA30_CLK_BLINK, .present = true },
+ [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_1_MUX, .present = true },
+ [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_2_MUX, .present = true },
+ [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_3_MUX, .present = true },
+ [tegra_clk_hclk] = { .dt_id = TEGRA30_CLK_HCLK, .present = true },
+ [tegra_clk_pclk] = { .dt_id = TEGRA30_CLK_PCLK, .present = true },
+ [tegra_clk_i2s0] = { .dt_id = TEGRA30_CLK_I2S0, .present = true },
+ [tegra_clk_i2s1] = { .dt_id = TEGRA30_CLK_I2S1, .present = true },
+ [tegra_clk_i2s2] = { .dt_id = TEGRA30_CLK_I2S2, .present = true },
+ [tegra_clk_i2s3] = { .dt_id = TEGRA30_CLK_I2S3, .present = true },
+ [tegra_clk_i2s4] = { .dt_id = TEGRA30_CLK_I2S4, .present = true },
+ [tegra_clk_spdif_in] = { .dt_id = TEGRA30_CLK_SPDIF_IN, .present = true },
+ [tegra_clk_hda] = { .dt_id = TEGRA30_CLK_HDA, .present = true },
+ [tegra_clk_hda2codec_2x] = { .dt_id = TEGRA30_CLK_HDA2CODEC_2X, .present = true },
+ [tegra_clk_sbc1] = { .dt_id = TEGRA30_CLK_SBC1, .present = true },
+ [tegra_clk_sbc2] = { .dt_id = TEGRA30_CLK_SBC2, .present = true },
+ [tegra_clk_sbc3] = { .dt_id = TEGRA30_CLK_SBC3, .present = true },
+ [tegra_clk_sbc4] = { .dt_id = TEGRA30_CLK_SBC4, .present = true },
+ [tegra_clk_sbc5] = { .dt_id = TEGRA30_CLK_SBC5, .present = true },
+ [tegra_clk_sbc6] = { .dt_id = TEGRA30_CLK_SBC6, .present = true },
+ [tegra_clk_ndflash] = { .dt_id = TEGRA30_CLK_NDFLASH, .present = true },
+ [tegra_clk_ndspeed] = { .dt_id = TEGRA30_CLK_NDSPEED, .present = true },
+ [tegra_clk_vfir] = { .dt_id = TEGRA30_CLK_VFIR, .present = true },
+ [tegra_clk_la] = { .dt_id = TEGRA30_CLK_LA, .present = true },
+ [tegra_clk_csite] = { .dt_id = TEGRA30_CLK_CSITE, .present = true },
+ [tegra_clk_owr] = { .dt_id = TEGRA30_CLK_OWR, .present = true },
+ [tegra_clk_mipi] = { .dt_id = TEGRA30_CLK_MIPI, .present = true },
+ [tegra_clk_tsensor] = { .dt_id = TEGRA30_CLK_TSENSOR, .present = true },
+ [tegra_clk_i2cslow] = { .dt_id = TEGRA30_CLK_I2CSLOW, .present = true },
+ [tegra_clk_vde] = { .dt_id = TEGRA30_CLK_VDE, .present = true },
+ [tegra_clk_vi] = { .dt_id = TEGRA30_CLK_VI, .present = true },
+ [tegra_clk_epp] = { .dt_id = TEGRA30_CLK_EPP, .present = true },
+ [tegra_clk_mpe] = { .dt_id = TEGRA30_CLK_MPE, .present = true },
+ [tegra_clk_host1x] = { .dt_id = TEGRA30_CLK_HOST1X, .present = true },
+ [tegra_clk_gr2d] = { .dt_id = TEGRA30_CLK_GR2D, .present = true },
+ [tegra_clk_gr3d] = { .dt_id = TEGRA30_CLK_GR3D, .present = true },
+ [tegra_clk_mselect] = { .dt_id = TEGRA30_CLK_MSELECT, .present = true },
+ [tegra_clk_nor] = { .dt_id = TEGRA30_CLK_NOR, .present = true },
+ [tegra_clk_sdmmc1] = { .dt_id = TEGRA30_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc2] = { .dt_id = TEGRA30_CLK_SDMMC2, .present = true },
+ [tegra_clk_sdmmc3] = { .dt_id = TEGRA30_CLK_SDMMC3, .present = true },
+ [tegra_clk_sdmmc4] = { .dt_id = TEGRA30_CLK_SDMMC4, .present = true },
+ [tegra_clk_cve] = { .dt_id = TEGRA30_CLK_CVE, .present = true },
+ [tegra_clk_tvo] = { .dt_id = TEGRA30_CLK_TVO, .present = true },
+ [tegra_clk_tvdac] = { .dt_id = TEGRA30_CLK_TVDAC, .present = true },
+ [tegra_clk_actmon] = { .dt_id = TEGRA30_CLK_ACTMON, .present = true },
+ [tegra_clk_vi_sensor] = { .dt_id = TEGRA30_CLK_VI_SENSOR, .present = true },
+ [tegra_clk_i2c1] = { .dt_id = TEGRA30_CLK_I2C1, .present = true },
+ [tegra_clk_i2c2] = { .dt_id = TEGRA30_CLK_I2C2, .present = true },
+ [tegra_clk_i2c3] = { .dt_id = TEGRA30_CLK_I2C3, .present = true },
+ [tegra_clk_i2c4] = { .dt_id = TEGRA30_CLK_I2C4, .present = true },
+ [tegra_clk_i2c5] = { .dt_id = TEGRA30_CLK_I2C5, .present = true },
+ [tegra_clk_uarta] = { .dt_id = TEGRA30_CLK_UARTA, .present = true },
+ [tegra_clk_uartb] = { .dt_id = TEGRA30_CLK_UARTB, .present = true },
+ [tegra_clk_uartc] = { .dt_id = TEGRA30_CLK_UARTC, .present = true },
+ [tegra_clk_uartd] = { .dt_id = TEGRA30_CLK_UARTD, .present = true },
+ [tegra_clk_uarte] = { .dt_id = TEGRA30_CLK_UARTE, .present = true },
+ [tegra_clk_extern1] = { .dt_id = TEGRA30_CLK_EXTERN1, .present = true },
+ [tegra_clk_extern2] = { .dt_id = TEGRA30_CLK_EXTERN2, .present = true },
+ [tegra_clk_extern3] = { .dt_id = TEGRA30_CLK_EXTERN3, .present = true },
+ [tegra_clk_disp1] = { .dt_id = TEGRA30_CLK_DISP1, .present = true },
+ [tegra_clk_disp2] = { .dt_id = TEGRA30_CLK_DISP2, .present = true },
+ [tegra_clk_apbdma] = { .dt_id = TEGRA30_CLK_APBDMA, .present = true },
+ [tegra_clk_rtc] = { .dt_id = TEGRA30_CLK_RTC, .present = true },
+ [tegra_clk_timer] = { .dt_id = TEGRA30_CLK_TIMER, .present = true },
+ [tegra_clk_kbc] = { .dt_id = TEGRA30_CLK_KBC, .present = true },
+ [tegra_clk_csus] = { .dt_id = TEGRA30_CLK_CSUS, .present = true },
+ [tegra_clk_vcp] = { .dt_id = TEGRA30_CLK_VCP, .present = true },
+ [tegra_clk_bsea] = { .dt_id = TEGRA30_CLK_BSEA, .present = true },
+ [tegra_clk_bsev] = { .dt_id = TEGRA30_CLK_BSEV, .present = true },
+ [tegra_clk_usbd] = { .dt_id = TEGRA30_CLK_USBD, .present = true },
+ [tegra_clk_usb2] = { .dt_id = TEGRA30_CLK_USB2, .present = true },
+ [tegra_clk_usb3] = { .dt_id = TEGRA30_CLK_USB3, .present = true },
+ [tegra_clk_csi] = { .dt_id = TEGRA30_CLK_CSI, .present = true },
+ [tegra_clk_isp] = { .dt_id = TEGRA30_CLK_ISP, .present = true },
+ [tegra_clk_kfuse] = { .dt_id = TEGRA30_CLK_KFUSE, .present = true },
+ [tegra_clk_fuse] = { .dt_id = TEGRA30_CLK_FUSE, .present = true },
+ [tegra_clk_fuse_burn] = { .dt_id = TEGRA30_CLK_FUSE_BURN, .present = true },
+ [tegra_clk_apbif] = { .dt_id = TEGRA30_CLK_APBIF, .present = true },
+ [tegra_clk_hda2hdmi] = { .dt_id = TEGRA30_CLK_HDA2HDMI, .present = true },
+ [tegra_clk_sata_cold] = { .dt_id = TEGRA30_CLK_SATA_COLD, .present = true },
+ [tegra_clk_sata_oob] = { .dt_id = TEGRA30_CLK_SATA_OOB, .present = true },
+ [tegra_clk_sata] = { .dt_id = TEGRA30_CLK_SATA, .present = true },
+ [tegra_clk_dtv] = { .dt_id = TEGRA30_CLK_DTV, .present = true },
+ [tegra_clk_pll_p] = { .dt_id = TEGRA30_CLK_PLL_P, .present = true },
+ [tegra_clk_pll_p_out1] = { .dt_id = TEGRA30_CLK_PLL_P_OUT1, .present = true },
+ [tegra_clk_pll_p_out2] = { .dt_id = TEGRA30_CLK_PLL_P_OUT2, .present = true },
+ [tegra_clk_pll_p_out3] = { .dt_id = TEGRA30_CLK_PLL_P_OUT3, .present = true },
+ [tegra_clk_pll_p_out4] = { .dt_id = TEGRA30_CLK_PLL_P_OUT4, .present = true },
+ [tegra_clk_pll_a] = { .dt_id = TEGRA30_CLK_PLL_A, .present = true },
+ [tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true },
-static struct tegra_clk_periph_regs periph_v_regs = {
- .enb_reg = CLK_OUT_ENB_V,
- .enb_set_reg = CLK_OUT_ENB_SET_V,
- .enb_clr_reg = CLK_OUT_ENB_CLR_V,
- .rst_reg = RST_DEVICES_V,
- .rst_set_reg = RST_DEVICES_SET_V,
- .rst_clr_reg = RST_DEVICES_CLR_V,
};
-static struct tegra_clk_periph_regs periph_w_regs = {
- .enb_reg = CLK_OUT_ENB_W,
- .enb_set_reg = CLK_OUT_ENB_SET_W,
- .enb_clr_reg = CLK_OUT_ENB_CLR_W,
- .rst_reg = RST_DEVICES_W,
- .rst_set_reg = RST_DEVICES_SET_W,
- .rst_clr_reg = RST_DEVICES_CLR_W,
-};
-
-static void tegra30_clk_measure_input_freq(void)
-{
- u32 osc_ctrl = readl_relaxed(clk_base + OSC_CTRL);
- u32 auto_clk_control = osc_ctrl & OSC_CTRL_OSC_FREQ_MASK;
- u32 pll_ref_div = osc_ctrl & OSC_CTRL_PLL_REF_DIV_MASK;
-
- switch (auto_clk_control) {
- case OSC_CTRL_OSC_FREQ_12MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
- input_freq = 12000000;
- break;
- case OSC_CTRL_OSC_FREQ_13MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
- input_freq = 13000000;
- break;
- case OSC_CTRL_OSC_FREQ_19_2MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
- input_freq = 19200000;
- break;
- case OSC_CTRL_OSC_FREQ_26MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
- input_freq = 26000000;
- break;
- case OSC_CTRL_OSC_FREQ_16_8MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
- input_freq = 16800000;
- break;
- case OSC_CTRL_OSC_FREQ_38_4MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_2);
- input_freq = 38400000;
- break;
- case OSC_CTRL_OSC_FREQ_48MHZ:
- BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_4);
- input_freq = 48000000;
- break;
- default:
- pr_err("Unexpected auto clock control value %d",
- auto_clk_control);
- BUG();
- return;
- }
-}
-
-static unsigned int tegra30_get_pll_ref_div(void)
-{
- u32 pll_ref_div = readl_relaxed(clk_base + OSC_CTRL) &
- OSC_CTRL_PLL_REF_DIV_MASK;
-
- switch (pll_ref_div) {
- case OSC_CTRL_PLL_REF_DIV_1:
- return 1;
- case OSC_CTRL_PLL_REF_DIV_2:
- return 2;
- case OSC_CTRL_PLL_REF_DIV_4:
- return 4;
- default:
- pr_err("Invalid pll ref divider %d", pll_ref_div);
- BUG();
- }
- return 0;
-}
-
static void tegra30_utmi_param_configure(void)
{
u32 reg;
@@ -863,11 +922,8 @@ static void __init tegra30_pll_init(void)
/* PLLC */
clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, pmc_base, 0,
- 0, &pll_c_params,
- TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
- pll_c_freq_table, NULL);
- clk_register_clkdev(clk, "pll_c", NULL);
- clks[pll_c] = clk;
+ &pll_c_params, NULL);
+ clks[TEGRA30_CLK_PLL_C] = clk;
/* PLLC_OUT1 */
clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
@@ -876,73 +932,13 @@ static void __init tegra30_pll_init(void)
clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
clk_base + PLLC_OUT, 1, 0, CLK_SET_RATE_PARENT,
0, NULL);
- clk_register_clkdev(clk, "pll_c_out1", NULL);
- clks[pll_c_out1] = clk;
-
- /* PLLP */
- clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, pmc_base, 0,
- 408000000, &pll_p_params,
- TEGRA_PLL_FIXED | TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_USE_LOCK, pll_p_freq_table, NULL);
- clk_register_clkdev(clk, "pll_p", NULL);
- clks[pll_p] = clk;
-
- /* PLLP_OUT1 */
- clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
- clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 8, 8, 1,
- &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
- clk_base + PLLP_OUTA, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out1", NULL);
- clks[pll_p_out1] = clk;
-
- /* PLLP_OUT2 */
- clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
- clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
- &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
- clk_base + PLLP_OUTA, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out2", NULL);
- clks[pll_p_out2] = clk;
-
- /* PLLP_OUT3 */
- clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
- clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 8, 8, 1,
- &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
- clk_base + PLLP_OUTB, 1, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out3", NULL);
- clks[pll_p_out3] = clk;
-
- /* PLLP_OUT4 */
- clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
- clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
- TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
- &pll_div_lock);
- clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
- clk_base + PLLP_OUTB, 17, 16,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
- &pll_div_lock);
- clk_register_clkdev(clk, "pll_p_out4", NULL);
- clks[pll_p_out4] = clk;
+ clks[TEGRA30_CLK_PLL_C_OUT1] = clk;
/* PLLM */
clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
- &pll_m_params, TEGRA_PLLM | TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
- pll_m_freq_table, NULL);
- clk_register_clkdev(clk, "pll_m", NULL);
- clks[pll_m] = clk;
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ &pll_m_params, NULL);
+ clks[TEGRA30_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
@@ -951,78 +947,44 @@ static void __init tegra30_pll_init(void)
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_m_out1", NULL);
- clks[pll_m_out1] = clk;
+ clks[TEGRA30_CLK_PLL_M_OUT1] = clk;
/* PLLX */
clk = tegra_clk_register_pll("pll_x", "pll_ref", clk_base, pmc_base, 0,
- 0, &pll_x_params, TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
- pll_x_freq_table, NULL);
- clk_register_clkdev(clk, "pll_x", NULL);
- clks[pll_x] = clk;
+ &pll_x_params, NULL);
+ clks[TEGRA30_CLK_PLL_X] = clk;
/* PLLX_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_x_out0", "pll_x",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_x_out0", NULL);
- clks[pll_x_out0] = clk;
+ clks[TEGRA30_CLK_PLL_X_OUT0] = clk;
/* PLLU */
clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc_base, 0,
- 0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_LFCON,
- pll_u_freq_table,
- NULL);
- clk_register_clkdev(clk, "pll_u", NULL);
- clks[pll_u] = clk;
+ &pll_u_params, NULL);
+ clks[TEGRA30_CLK_PLL_U] = clk;
tegra30_utmi_param_configure();
/* PLLD */
clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc_base, 0,
- 0, &pll_d_params, TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
- pll_d_freq_table, &pll_d_lock);
- clk_register_clkdev(clk, "pll_d", NULL);
- clks[pll_d] = clk;
+ &pll_d_params, &pll_d_lock);
+ clks[TEGRA30_CLK_PLL_D] = clk;
/* PLLD_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_d_out0", NULL);
- clks[pll_d_out0] = clk;
+ clks[TEGRA30_CLK_PLL_D_OUT0] = clk;
/* PLLD2 */
clk = tegra_clk_register_pll("pll_d2", "pll_ref", clk_base, pmc_base, 0,
- 0, &pll_d2_params, TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
- pll_d_freq_table, NULL);
- clk_register_clkdev(clk, "pll_d2", NULL);
- clks[pll_d2] = clk;
+ &pll_d2_params, NULL);
+ clks[TEGRA30_CLK_PLL_D2] = clk;
/* PLLD2_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "pll_d2_out0", NULL);
- clks[pll_d2_out0] = clk;
-
- /* PLLA */
- clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, pmc_base,
- 0, 0, &pll_a_params, TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_USE_LOCK, pll_a_freq_table, NULL);
- clk_register_clkdev(clk, "pll_a", NULL);
- clks[pll_a] = clk;
-
- /* PLLA_OUT0 */
- clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
- clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
- 8, 8, 1, NULL);
- clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
- clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
- CLK_SET_RATE_PARENT, 0, NULL);
- clk_register_clkdev(clk, "pll_a_out0", NULL);
- clks[pll_a_out0] = clk;
+ clks[TEGRA30_CLK_PLL_D2_OUT0] = clk;
/* PLLE */
clk = clk_register_mux(NULL, "pll_e_mux", pll_e_parents,
@@ -1030,258 +992,8 @@ static void __init tegra30_pll_init(void)
CLK_SET_RATE_NO_REPARENT,
clk_base + PLLE_AUX, 2, 1, 0, NULL);
clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
- CLK_GET_RATE_NOCACHE, 100000000, &pll_e_params,
- TEGRA_PLLE_CONFIGURE, pll_e_freq_table, NULL);
- clk_register_clkdev(clk, "pll_e", NULL);
- clks[pll_e] = clk;
-}
-
-static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync",
- "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",};
-static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern1", };
-static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern2", };
-static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern3", };
-
-static void __init tegra30_audio_clk_init(void)
-{
- struct clk *clk;
-
- /* spdif_in_sync */
- clk = tegra_clk_register_sync_source("spdif_in_sync", 24000000,
- 24000000);
- clk_register_clkdev(clk, "spdif_in_sync", NULL);
- clks[spdif_in_sync] = clk;
-
- /* i2s0_sync */
- clk = tegra_clk_register_sync_source("i2s0_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s0_sync", NULL);
- clks[i2s0_sync] = clk;
-
- /* i2s1_sync */
- clk = tegra_clk_register_sync_source("i2s1_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s1_sync", NULL);
- clks[i2s1_sync] = clk;
-
- /* i2s2_sync */
- clk = tegra_clk_register_sync_source("i2s2_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s2_sync", NULL);
- clks[i2s2_sync] = clk;
-
- /* i2s3_sync */
- clk = tegra_clk_register_sync_source("i2s3_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s3_sync", NULL);
- clks[i2s3_sync] = clk;
-
- /* i2s4_sync */
- clk = tegra_clk_register_sync_source("i2s4_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "i2s4_sync", NULL);
- clks[i2s4_sync] = clk;
-
- /* vimclk_sync */
- clk = tegra_clk_register_sync_source("vimclk_sync", 24000000, 24000000);
- clk_register_clkdev(clk, "vimclk_sync", NULL);
- clks[vimclk_sync] = clk;
-
- /* audio0 */
- clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "audio0", "audio0_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S0, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio0", NULL);
- clks[audio0] = clk;
-
- /* audio1 */
- clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "audio1", "audio1_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S1, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio1", NULL);
- clks[audio1] = clk;
-
- /* audio2 */
- clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "audio2", "audio2_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S2, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio2", NULL);
- clks[audio2] = clk;
-
- /* audio3 */
- clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "audio3", "audio3_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S3, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio3", NULL);
- clks[audio3] = clk;
-
- /* audio4 */
- clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "audio4", "audio4_mux", 0,
- clk_base + AUDIO_SYNC_CLK_I2S4, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "audio4", NULL);
- clks[audio4] = clk;
-
- /* spdif */
- clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0, NULL);
- clk = clk_register_gate(NULL, "spdif", "spdif_mux", 0,
- clk_base + AUDIO_SYNC_CLK_SPDIF, 4,
- CLK_GATE_SET_TO_DISABLE, NULL);
- clk_register_clkdev(clk, "spdif", NULL);
- clks[spdif] = clk;
-
- /* audio0_2x */
- clk = clk_register_fixed_factor(NULL, "audio0_doubler", "audio0",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio0_div", "audio0_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 24, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio0_2x", "audio0_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 113, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio0_2x", NULL);
- clks[audio0_2x] = clk;
-
- /* audio1_2x */
- clk = clk_register_fixed_factor(NULL, "audio1_doubler", "audio1",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio1_div", "audio1_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 25, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio1_2x", "audio1_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 114, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio1_2x", NULL);
- clks[audio1_2x] = clk;
-
- /* audio2_2x */
- clk = clk_register_fixed_factor(NULL, "audio2_doubler", "audio2",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio2_div", "audio2_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 26, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio2_2x", "audio2_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 115, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio2_2x", NULL);
- clks[audio2_2x] = clk;
-
- /* audio3_2x */
- clk = clk_register_fixed_factor(NULL, "audio3_doubler", "audio3",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio3_div", "audio3_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 27, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio3_2x", "audio3_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 116, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio3_2x", NULL);
- clks[audio3_2x] = clk;
-
- /* audio4_2x */
- clk = clk_register_fixed_factor(NULL, "audio4_doubler", "audio4",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("audio4_div", "audio4_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 28, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("audio4_2x", "audio4_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 117, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "audio4_2x", NULL);
- clks[audio4_2x] = clk;
-
- /* spdif_2x */
- clk = clk_register_fixed_factor(NULL, "spdif_doubler", "spdif",
- CLK_SET_RATE_PARENT, 2, 1);
- clk = tegra_clk_register_divider("spdif_div", "spdif_doubler",
- clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 29, 1, 0,
- &clk_doubler_lock);
- clk = tegra_clk_register_periph_gate("spdif_2x", "spdif_div",
- TEGRA_PERIPH_NO_RESET, clk_base,
- CLK_SET_RATE_PARENT, 118, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "spdif_2x", NULL);
- clks[spdif_2x] = clk;
-}
-
-static void __init tegra30_pmc_clk_init(void)
-{
- struct clk *clk;
-
- /* clk_out_1 */
- clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
- ARRAY_SIZE(clk_out1_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
- &clk_out_lock);
- clks[clk_out_1_mux] = clk;
- clk = clk_register_gate(NULL, "clk_out_1", "clk_out_1_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 2, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern1", "clk_out_1");
- clks[clk_out_1] = clk;
-
- /* clk_out_2 */
- clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
- ARRAY_SIZE(clk_out2_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
- &clk_out_lock);
- clk = clk_register_gate(NULL, "clk_out_2", "clk_out_2_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 10, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern2", "clk_out_2");
- clks[clk_out_2] = clk;
-
- /* clk_out_3 */
- clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
- ARRAY_SIZE(clk_out3_parents),
- CLK_SET_RATE_NO_REPARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
- &clk_out_lock);
- clk = clk_register_gate(NULL, "clk_out_3", "clk_out_3_mux", 0,
- pmc_base + PMC_CLK_OUT_CNTRL, 18, 0,
- &clk_out_lock);
- clk_register_clkdev(clk, "extern3", "clk_out_3");
- clks[clk_out_3] = clk;
-
- /* blink */
- writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
- clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
- pmc_base + PMC_DPD_PADS_ORIDE,
- PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
- clk = clk_register_gate(NULL, "blink", "blink_override", 0,
- pmc_base + PMC_CTRL,
- PMC_CTRL_BLINK_ENB, 0, NULL);
- clk_register_clkdev(clk, "blink", NULL);
- clks[blink] = clk;
-
+ CLK_GET_RATE_NOCACHE, &pll_e_params, NULL);
+ clks[TEGRA30_CLK_PLL_E] = clk;
}
static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
@@ -1332,8 +1044,7 @@ static void __init tegra30_super_clk_init(void)
CLK_SET_RATE_PARENT,
clk_base + CCLKG_BURST_POLICY,
0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "cclk_g", NULL);
- clks[cclk_g] = clk;
+ clks[TEGRA30_CLK_CCLK_G] = clk;
/*
* Clock input to cclk_lp divided from pll_p using
@@ -1369,8 +1080,7 @@ static void __init tegra30_super_clk_init(void)
clk_base + CCLKLP_BURST_POLICY,
TEGRA_DIVIDER_2, 4, 8, 9,
NULL);
- clk_register_clkdev(clk, "cclk_lp", NULL);
- clks[cclk_lp] = clk;
+ clks[TEGRA30_CLK_CCLK_LP] = clk;
/* SCLK */
clk = tegra_clk_register_super_mux("sclk", sclk_parents,
@@ -1378,142 +1088,44 @@ static void __init tegra30_super_clk_init(void)
CLK_SET_RATE_PARENT,
clk_base + SCLK_BURST_POLICY,
0, 4, 0, 0, NULL);
- clk_register_clkdev(clk, "sclk", NULL);
- clks[sclk] = clk;
-
- /* HCLK */
- clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
- clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT,
- clk_base + SYSTEM_CLK_RATE, 7,
- CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "hclk", NULL);
- clks[hclk] = clk;
-
- /* PCLK */
- clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
- clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
- &sysrate_lock);
- clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT,
- clk_base + SYSTEM_CLK_RATE, 3,
- CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
- clk_register_clkdev(clk, "pclk", NULL);
- clks[pclk] = clk;
+ clks[TEGRA30_CLK_SCLK] = clk;
/* twd */
clk = clk_register_fixed_factor(NULL, "twd", "cclk_g",
CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "twd", NULL);
- clks[twd] = clk;
+ clks[TEGRA30_CLK_TWD] = clk;
+
+ tegra_super_clk_gen4_init(clk_base, pmc_base, tegra30_clks, NULL);
}
static const char *mux_pllacp_clkm[] = { "pll_a_out0", "unused", "pll_p",
"clk_m" };
static const char *mux_pllpcm_clkm[] = { "pll_p", "pll_c", "pll_m", "clk_m" };
static const char *mux_pllmcp_clkm[] = { "pll_m", "pll_c", "pll_p", "clk_m" };
-static const char *i2s0_parents[] = { "pll_a_out0", "audio0_2x", "pll_p",
- "clk_m" };
-static const char *i2s1_parents[] = { "pll_a_out0", "audio1_2x", "pll_p",
- "clk_m" };
-static const char *i2s2_parents[] = { "pll_a_out0", "audio2_2x", "pll_p",
- "clk_m" };
-static const char *i2s3_parents[] = { "pll_a_out0", "audio3_2x", "pll_p",
- "clk_m" };
-static const char *i2s4_parents[] = { "pll_a_out0", "audio4_2x", "pll_p",
- "clk_m" };
static const char *spdif_out_parents[] = { "pll_a_out0", "spdif_2x", "pll_p",
"clk_m" };
-static const char *spdif_in_parents[] = { "pll_p", "pll_c", "pll_m" };
-static const char *mux_pllpc_clk32k_clkm[] = { "pll_p", "pll_c", "clk_32k",
- "clk_m" };
-static const char *mux_pllpc_clkm_clk32k[] = { "pll_p", "pll_c", "clk_m",
- "clk_32k" };
static const char *mux_pllmcpa[] = { "pll_m", "pll_c", "pll_p", "pll_a_out0" };
-static const char *mux_pllpdc_clkm[] = { "pll_p", "pll_d_out0", "pll_c",
- "clk_m" };
-static const char *mux_pllp_clkm[] = { "pll_p", "unused", "unused", "clk_m" };
static const char *mux_pllpmdacd2_clkm[] = { "pll_p", "pll_m", "pll_d_out0",
"pll_a_out0", "pll_c",
"pll_d2_out0", "clk_m" };
-static const char *mux_plla_clk32k_pllp_clkm_plle[] = { "pll_a_out0",
- "clk_32k", "pll_p",
- "clk_m", "pll_e" };
static const char *mux_plld_out0_plld2_out0[] = { "pll_d_out0",
"pll_d2_out0" };
+static const char *pwm_parents[] = { "pll_p", "pll_c", "clk_32k", "clk_m" };
static struct tegra_periph_init_data tegra_periph_clk_list[] = {
- TEGRA_INIT_DATA_MUX("i2s0", NULL, "tegra30-i2s.0", i2s0_parents, CLK_SOURCE_I2S0, 30, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s0),
- TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra30-i2s.1", i2s1_parents, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
- TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra30-i2s.2", i2s2_parents, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
- TEGRA_INIT_DATA_MUX("i2s3", NULL, "tegra30-i2s.3", i2s3_parents, CLK_SOURCE_I2S3, 101, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s3),
- TEGRA_INIT_DATA_MUX("i2s4", NULL, "tegra30-i2s.4", i2s4_parents, CLK_SOURCE_I2S4, 102, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s4),
- TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra30-spdif", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
- TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra30-spdif", spdif_in_parents, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
- TEGRA_INIT_DATA_MUX("d_audio", "d_audio", "tegra30-ahub", mux_pllacp_clkm, CLK_SOURCE_D_AUDIO, 106, &periph_v_regs, 0, d_audio),
- TEGRA_INIT_DATA_MUX("dam0", NULL, "tegra30-dam.0", mux_pllacp_clkm, CLK_SOURCE_DAM0, 108, &periph_v_regs, 0, dam0),
- TEGRA_INIT_DATA_MUX("dam1", NULL, "tegra30-dam.1", mux_pllacp_clkm, CLK_SOURCE_DAM1, 109, &periph_v_regs, 0, dam1),
- TEGRA_INIT_DATA_MUX("dam2", NULL, "tegra30-dam.2", mux_pllacp_clkm, CLK_SOURCE_DAM2, 110, &periph_v_regs, 0, dam2),
- TEGRA_INIT_DATA_MUX("hda", "hda", "tegra30-hda", mux_pllpcm_clkm, CLK_SOURCE_HDA, 125, &periph_v_regs, 0, hda),
- TEGRA_INIT_DATA_MUX("hda2codec_2x", "hda2codec", "tegra30-hda", mux_pllpcm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, &periph_v_regs, 0, hda2codec_2x),
- TEGRA_INIT_DATA_MUX("sbc1", NULL, "spi_tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
- TEGRA_INIT_DATA_MUX("sbc2", NULL, "spi_tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
- TEGRA_INIT_DATA_MUX("sbc3", NULL, "spi_tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
- TEGRA_INIT_DATA_MUX("sbc4", NULL, "spi_tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
- TEGRA_INIT_DATA_MUX("sbc5", NULL, "spi_tegra.4", mux_pllpcm_clkm, CLK_SOURCE_SBC5, 104, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc5),
- TEGRA_INIT_DATA_MUX("sbc6", NULL, "spi_tegra.5", mux_pllpcm_clkm, CLK_SOURCE_SBC6, 105, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc6),
- TEGRA_INIT_DATA_MUX("sata_oob", NULL, "tegra_sata_oob", mux_pllpcm_clkm, CLK_SOURCE_SATA_OOB, 123, &periph_v_regs, TEGRA_PERIPH_ON_APB, sata_oob),
- TEGRA_INIT_DATA_MUX("sata", NULL, "tegra_sata", mux_pllpcm_clkm, CLK_SOURCE_SATA, 124, &periph_v_regs, TEGRA_PERIPH_ON_APB, sata),
- TEGRA_INIT_DATA_MUX("ndflash", NULL, "tegra_nand", mux_pllpcm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_l_regs, TEGRA_PERIPH_ON_APB, ndflash),
- TEGRA_INIT_DATA_MUX("ndspeed", NULL, "tegra_nand_speed", mux_pllpcm_clkm, CLK_SOURCE_NDSPEED, 80, &periph_u_regs, TEGRA_PERIPH_ON_APB, ndspeed),
- TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllpcm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
- TEGRA_INIT_DATA_MUX("csite", NULL, "csite", mux_pllpcm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, TEGRA_PERIPH_ON_APB, csite),
- TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllpcm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, TEGRA_PERIPH_ON_APB, la),
- TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllpcm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
- TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllpcm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
- TEGRA_INIT_DATA_MUX("tsensor", NULL, "tegra-tsensor", mux_pllpc_clkm_clk32k, CLK_SOURCE_TSENSOR, 100, &periph_v_regs, TEGRA_PERIPH_ON_APB, tsensor),
- TEGRA_INIT_DATA_MUX("i2cslow", NULL, "i2cslow", mux_pllpc_clk32k_clkm, CLK_SOURCE_I2CSLOW, 81, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2cslow),
- TEGRA_INIT_DATA_INT("vde", NULL, "vde", mux_pllpcm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
- TEGRA_INIT_DATA_INT("vi", "vi", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
- TEGRA_INIT_DATA_INT("epp", NULL, "epp", mux_pllmcpa, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
- TEGRA_INIT_DATA_INT("mpe", NULL, "mpe", mux_pllmcpa, CLK_SOURCE_MPE, 60, &periph_h_regs, 0, mpe),
- TEGRA_INIT_DATA_INT("host1x", NULL, "host1x", mux_pllmcpa, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
- TEGRA_INIT_DATA_INT("3d", NULL, "3d", mux_pllmcpa, CLK_SOURCE_3D, 24, &periph_l_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d),
- TEGRA_INIT_DATA_INT("3d2", NULL, "3d2", mux_pllmcpa, CLK_SOURCE_3D2, 98, &periph_v_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d2),
- TEGRA_INIT_DATA_INT("2d", NULL, "2d", mux_pllmcpa, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr2d),
- TEGRA_INIT_DATA_INT("se", NULL, "se", mux_pllpcm_clkm, CLK_SOURCE_SE, 127, &periph_v_regs, 0, se),
- TEGRA_INIT_DATA_MUX("mselect", NULL, "mselect", mux_pllp_clkm, CLK_SOURCE_MSELECT, 99, &periph_v_regs, 0, mselect),
- TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllpcm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
- TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
- TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
- TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
- TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
- TEGRA_INIT_DATA_MUX("cve", NULL, "cve", mux_pllpdc_clkm, CLK_SOURCE_CVE, 49, &periph_h_regs, 0, cve),
- TEGRA_INIT_DATA_MUX("tvo", NULL, "tvo", mux_pllpdc_clkm, CLK_SOURCE_TVO, 49, &periph_h_regs, 0, tvo),
- TEGRA_INIT_DATA_MUX("tvdac", NULL, "tvdac", mux_pllpdc_clkm, CLK_SOURCE_TVDAC, 53, &periph_h_regs, 0, tvdac),
- TEGRA_INIT_DATA_MUX("actmon", NULL, "actmon", mux_pllpc_clk32k_clkm, CLK_SOURCE_ACTMON, 119, &periph_v_regs, 0, actmon),
- TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
- TEGRA_INIT_DATA_DIV16("i2c1", "div-clk", "tegra-i2c.0", mux_pllp_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2c1),
- TEGRA_INIT_DATA_DIV16("i2c2", "div-clk", "tegra-i2c.1", mux_pllp_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c2),
- TEGRA_INIT_DATA_DIV16("i2c3", "div-clk", "tegra-i2c.2", mux_pllp_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2c3),
- TEGRA_INIT_DATA_DIV16("i2c4", "div-clk", "tegra-i2c.3", mux_pllp_clkm, CLK_SOURCE_I2C4, 103, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2c4),
- TEGRA_INIT_DATA_DIV16("i2c5", "div-clk", "tegra-i2c.4", mux_pllp_clkm, CLK_SOURCE_I2C5, 47, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c5),
- TEGRA_INIT_DATA_UART("uarta", NULL, "tegra_uart.0", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 6, &periph_l_regs, uarta),
- TEGRA_INIT_DATA_UART("uartb", NULL, "tegra_uart.1", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 7, &periph_l_regs, uartb),
- TEGRA_INIT_DATA_UART("uartc", NULL, "tegra_uart.2", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 55, &periph_h_regs, uartc),
- TEGRA_INIT_DATA_UART("uartd", NULL, "tegra_uart.3", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 65, &periph_u_regs, uartd),
- TEGRA_INIT_DATA_UART("uarte", NULL, "tegra_uart.4", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 66, &periph_u_regs, uarte),
- TEGRA_INIT_DATA_MUX8("hdmi", NULL, "hdmi", mux_pllpmdacd2_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
- TEGRA_INIT_DATA_MUX8("extern1", NULL, "extern1", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN1, 120, &periph_v_regs, 0, extern1),
- TEGRA_INIT_DATA_MUX8("extern2", NULL, "extern2", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, &periph_v_regs, 0, extern2),
- TEGRA_INIT_DATA_MUX8("extern3", NULL, "extern3", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, &periph_v_regs, 0, extern3),
- TEGRA_INIT_DATA("pwm", NULL, "pwm", mux_pllpc_clk32k_clkm, CLK_SOURCE_PWM, 28, 2, 0, 0, 8, 1, 0, &periph_l_regs, 17, periph_clk_enb_refcnt, 0, pwm),
+ TEGRA_INIT_DATA_MUX("spdif_out", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, TEGRA_PERIPH_ON_APB, TEGRA30_CLK_SPDIF_OUT),
+ TEGRA_INIT_DATA_MUX("d_audio", mux_pllacp_clkm, CLK_SOURCE_D_AUDIO, 106, 0, TEGRA30_CLK_D_AUDIO),
+ TEGRA_INIT_DATA_MUX("dam0", mux_pllacp_clkm, CLK_SOURCE_DAM0, 108, 0, TEGRA30_CLK_DAM0),
+ TEGRA_INIT_DATA_MUX("dam1", mux_pllacp_clkm, CLK_SOURCE_DAM1, 109, 0, TEGRA30_CLK_DAM1),
+ TEGRA_INIT_DATA_MUX("dam2", mux_pllacp_clkm, CLK_SOURCE_DAM2, 110, 0, TEGRA30_CLK_DAM2),
+ TEGRA_INIT_DATA_INT("3d2", mux_pllmcpa, CLK_SOURCE_3D2, 98, TEGRA_PERIPH_MANUAL_RESET, TEGRA30_CLK_GR3D2),
+ TEGRA_INIT_DATA_INT("se", mux_pllpcm_clkm, CLK_SOURCE_SE, 127, 0, TEGRA30_CLK_SE),
+ TEGRA_INIT_DATA_MUX8("hdmi", mux_pllpmdacd2_clkm, CLK_SOURCE_HDMI, 51, 0, TEGRA30_CLK_HDMI),
+ TEGRA_INIT_DATA("pwm", NULL, NULL, pwm_parents, CLK_SOURCE_PWM, 28, 2, 0, 0, 8, 1, 0, 17, TEGRA_PERIPH_ON_APB, TEGRA30_CLK_PWM),
};
static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
- TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllpmdacd2_clkm, CLK_SOURCE_DISP1, 29, 3, 27, &periph_l_regs, 0, disp1),
- TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllpmdacd2_clkm, CLK_SOURCE_DISP2, 29, 3, 26, &periph_l_regs, 0, disp2),
- TEGRA_INIT_DATA_NODIV("dsib", NULL, "tegradc.1", mux_plld_out0_plld2_out0, CLK_SOURCE_DSIB, 25, 1, 82, &periph_u_regs, 0, dsib),
+ TEGRA_INIT_DATA_NODIV("dsib", mux_plld_out0_plld2_out0, CLK_SOURCE_DSIB, 25, 1, 82, 0, TEGRA30_CLK_DSIB),
};
static void __init tegra30_periph_clk_init(void)
@@ -1522,170 +1134,20 @@ static void __init tegra30_periph_clk_init(void)
struct clk *clk;
int i;
- /* apbdma */
- clk = tegra_clk_register_periph_gate("apbdma", "clk_m", 0, clk_base, 0, 34,
- &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-apbdma");
- clks[apbdma] = clk;
-
- /* rtc */
- clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
- TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
- clk_base, 0, 4, &periph_l_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "rtc-tegra");
- clks[rtc] = clk;
-
- /* timer */
- clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base, 0,
- 5, &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "timer");
- clks[timer] = clk;
-
- /* kbc */
- clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
- TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
- clk_base, 0, 36, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-kbc");
- clks[kbc] = clk;
-
- /* csus */
- clk = tegra_clk_register_periph_gate("csus", "clk_m",
- TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
- clk_base, 0, 92, &periph_u_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "csus", "tengra_camera");
- clks[csus] = clk;
-
- /* vcp */
- clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0, clk_base, 0, 29,
- &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "vcp", "tegra-avp");
- clks[vcp] = clk;
-
- /* bsea */
- clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0, clk_base, 0,
- 62, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "bsea", "tegra-avp");
- clks[bsea] = clk;
-
- /* bsev */
- clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0, clk_base, 0,
- 63, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "bsev", "tegra-aes");
- clks[bsev] = clk;
-
- /* usbd */
- clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base, 0,
- 22, &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "fsl-tegra-udc");
- clks[usbd] = clk;
-
- /* usb2 */
- clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base, 0,
- 58, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-ehci.1");
- clks[usb2] = clk;
-
- /* usb3 */
- clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base, 0,
- 59, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra-ehci.2");
- clks[usb3] = clk;
-
/* dsia */
clk = tegra_clk_register_periph_gate("dsia", "pll_d_out0", 0, clk_base,
- 0, 48, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "dsia", "tegradc.0");
- clks[dsia] = clk;
-
- /* csi */
- clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
- 0, 52, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "csi", "tegra_camera");
- clks[csi] = clk;
-
- /* isp */
- clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0, 23,
- &periph_l_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "isp", "tegra_camera");
- clks[isp] = clk;
+ 0, 48, periph_clk_enb_refcnt);
+ clks[TEGRA30_CLK_DSIA] = clk;
/* pcie */
clk = tegra_clk_register_periph_gate("pcie", "clk_m", 0, clk_base, 0,
- 70, &periph_u_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "pcie", "tegra-pcie");
- clks[pcie] = clk;
+ 70, periph_clk_enb_refcnt);
+ clks[TEGRA30_CLK_PCIE] = clk;
/* afi */
clk = tegra_clk_register_periph_gate("afi", "clk_m", 0, clk_base, 0, 72,
- &periph_u_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "afi", "tegra-pcie");
- clks[afi] = clk;
-
- /* pciex */
- clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0,
- 74, &periph_u_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "pciex", "tegra-pcie");
- clks[pciex] = clk;
-
- /* kfuse */
- clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 40, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "kfuse-tegra");
- clks[kfuse] = clk;
-
- /* fuse */
- clk = tegra_clk_register_periph_gate("fuse", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 39, &periph_h_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "fuse", "fuse-tegra");
- clks[fuse] = clk;
-
- /* fuse_burn */
- clk = tegra_clk_register_periph_gate("fuse_burn", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 39, &periph_h_regs,
periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "fuse_burn", "fuse-tegra");
- clks[fuse_burn] = clk;
-
- /* apbif */
- clk = tegra_clk_register_periph_gate("apbif", "clk_m", 0,
- clk_base, 0, 107, &periph_v_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "apbif", "tegra30-ahub");
- clks[apbif] = clk;
-
- /* hda2hdmi */
- clk = tegra_clk_register_periph_gate("hda2hdmi", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 128, &periph_w_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "hda2hdmi", "tegra30-hda");
- clks[hda2hdmi] = clk;
-
- /* sata_cold */
- clk = tegra_clk_register_periph_gate("sata_cold", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 129, &periph_w_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "tegra_sata_cold");
- clks[sata_cold] = clk;
-
- /* dtv */
- clk = tegra_clk_register_periph_gate("dtv", "clk_m",
- TEGRA_PERIPH_ON_APB,
- clk_base, 0, 79, &periph_u_regs,
- periph_clk_enb_refcnt);
- clk_register_clkdev(clk, NULL, "dtv");
- clks[dtv] = clk;
+ clks[TEGRA30_CLK_AFI] = clk;
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
@@ -1694,84 +1156,37 @@ static void __init tegra30_periph_clk_init(void)
clk_base + CLK_SOURCE_EMC,
30, 2, 0, NULL);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
- 57, &periph_h_regs, periph_clk_enb_refcnt);
- clk_register_clkdev(clk, "emc", NULL);
- clks[emc] = clk;
+ 57, periph_clk_enb_refcnt);
+ clks[TEGRA30_CLK_EMC] = clk;
+
+ /* cml0 */
+ clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
+ 0, 0, &cml_lock);
+ clks[TEGRA30_CLK_CML0] = clk;
+
+ /* cml1 */
+ clk = clk_register_gate(NULL, "cml1", "pll_e", 0, clk_base + PLLE_AUX,
+ 1, 0, &cml_lock);
+ clks[TEGRA30_CLK_CML1] = clk;
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name, data->parent_names,
+ clk = tegra_clk_register_periph(data->name, data->p.parent_names,
data->num_parents, &data->periph,
clk_base, data->offset, data->flags);
- clk_register_clkdev(clk, data->con_id, data->dev_id);
clks[data->clk_id] = clk;
}
for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
data = &tegra_periph_nodiv_clk_list[i];
clk = tegra_clk_register_periph_nodiv(data->name,
- data->parent_names,
+ data->p.parent_names,
data->num_parents, &data->periph,
clk_base, data->offset);
- clk_register_clkdev(clk, data->con_id, data->dev_id);
clks[data->clk_id] = clk;
}
-}
-
-static void __init tegra30_fixed_clk_init(void)
-{
- struct clk *clk;
-
- /* clk_32k */
- clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
- 32768);
- clk_register_clkdev(clk, "clk_32k", NULL);
- clks[clk_32k] = clk;
- /* clk_m_div2 */
- clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
- CLK_SET_RATE_PARENT, 1, 2);
- clk_register_clkdev(clk, "clk_m_div2", NULL);
- clks[clk_m_div2] = clk;
-
- /* clk_m_div4 */
- clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
- CLK_SET_RATE_PARENT, 1, 4);
- clk_register_clkdev(clk, "clk_m_div4", NULL);
- clks[clk_m_div4] = clk;
-
- /* cml0 */
- clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
- 0, 0, &cml_lock);
- clk_register_clkdev(clk, "cml0", NULL);
- clks[cml0] = clk;
-
- /* cml1 */
- clk = clk_register_gate(NULL, "cml1", "pll_e", 0, clk_base + PLLE_AUX,
- 1, 0, &cml_lock);
- clk_register_clkdev(clk, "cml1", NULL);
- clks[cml1] = clk;
-}
-
-static void __init tegra30_osc_clk_init(void)
-{
- struct clk *clk;
- unsigned int pll_ref_div;
-
- tegra30_clk_measure_input_freq();
-
- /* clk_m */
- clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
- input_freq);
- clk_register_clkdev(clk, "clk_m", NULL);
- clks[clk_m] = clk;
-
- /* pll_ref */
- pll_ref_div = tegra30_get_pll_ref_div();
- clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
- CLK_SET_RATE_PARENT, 1, pll_ref_div);
- clk_register_clkdev(clk, "pll_ref", NULL);
- clks[pll_ref] = clk;
+ tegra_periph_clk_init(clk_base, pmc_base, tegra30_clks, &pll_p_params);
}
/* Tegra30 CPU clock and reset control functions */
@@ -1913,48 +1328,49 @@ static struct tegra_cpu_car_ops tegra30_cpu_car_ops = {
};
static struct tegra_clk_init_table init_table[] __initdata = {
- {uarta, pll_p, 408000000, 0},
- {uartb, pll_p, 408000000, 0},
- {uartc, pll_p, 408000000, 0},
- {uartd, pll_p, 408000000, 0},
- {uarte, pll_p, 408000000, 0},
- {pll_a, clk_max, 564480000, 1},
- {pll_a_out0, clk_max, 11289600, 1},
- {extern1, pll_a_out0, 0, 1},
- {clk_out_1_mux, extern1, 0, 0},
- {clk_out_1, clk_max, 0, 1},
- {blink, clk_max, 0, 1},
- {i2s0, pll_a_out0, 11289600, 0},
- {i2s1, pll_a_out0, 11289600, 0},
- {i2s2, pll_a_out0, 11289600, 0},
- {i2s3, pll_a_out0, 11289600, 0},
- {i2s4, pll_a_out0, 11289600, 0},
- {sdmmc1, pll_p, 48000000, 0},
- {sdmmc2, pll_p, 48000000, 0},
- {sdmmc3, pll_p, 48000000, 0},
- {pll_m, clk_max, 0, 1},
- {pclk, clk_max, 0, 1},
- {csite, clk_max, 0, 1},
- {emc, clk_max, 0, 1},
- {mselect, clk_max, 0, 1},
- {sbc1, pll_p, 100000000, 0},
- {sbc2, pll_p, 100000000, 0},
- {sbc3, pll_p, 100000000, 0},
- {sbc4, pll_p, 100000000, 0},
- {sbc5, pll_p, 100000000, 0},
- {sbc6, pll_p, 100000000, 0},
- {host1x, pll_c, 150000000, 0},
- {disp1, pll_p, 600000000, 0},
- {disp2, pll_p, 600000000, 0},
- {twd, clk_max, 0, 1},
- {gr2d, pll_c, 300000000, 0},
- {gr3d, pll_c, 300000000, 0},
- {clk_max, clk_max, 0, 0}, /* This MUST be the last entry. */
+ {TEGRA30_CLK_UARTA, TEGRA30_CLK_PLL_P, 408000000, 0},
+ {TEGRA30_CLK_UARTB, TEGRA30_CLK_PLL_P, 408000000, 0},
+ {TEGRA30_CLK_UARTC, TEGRA30_CLK_PLL_P, 408000000, 0},
+ {TEGRA30_CLK_UARTD, TEGRA30_CLK_PLL_P, 408000000, 0},
+ {TEGRA30_CLK_UARTE, TEGRA30_CLK_PLL_P, 408000000, 0},
+ {TEGRA30_CLK_PLL_A, TEGRA30_CLK_CLK_MAX, 564480000, 1},
+ {TEGRA30_CLK_PLL_A_OUT0, TEGRA30_CLK_CLK_MAX, 11289600, 1},
+ {TEGRA30_CLK_EXTERN1, TEGRA30_CLK_PLL_A_OUT0, 0, 1},
+ {TEGRA30_CLK_CLK_OUT_1_MUX, TEGRA30_CLK_EXTERN1, 0, 0},
+ {TEGRA30_CLK_CLK_OUT_1, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_BLINK, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_I2S0, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA30_CLK_I2S1, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA30_CLK_I2S2, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA30_CLK_I2S3, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA30_CLK_I2S4, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0},
+ {TEGRA30_CLK_SDMMC1, TEGRA30_CLK_PLL_P, 48000000, 0},
+ {TEGRA30_CLK_SDMMC2, TEGRA30_CLK_PLL_P, 48000000, 0},
+ {TEGRA30_CLK_SDMMC3, TEGRA30_CLK_PLL_P, 48000000, 0},
+ {TEGRA30_CLK_PLL_M, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_PCLK, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_CSITE, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_EMC, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_MSELECT, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_SBC1, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_SBC2, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_SBC3, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_SBC4, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_SBC5, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_SBC6, TEGRA30_CLK_PLL_P, 100000000, 0},
+ {TEGRA30_CLK_HOST1X, TEGRA30_CLK_PLL_C, 150000000, 0},
+ {TEGRA30_CLK_DISP1, TEGRA30_CLK_PLL_P, 600000000, 0},
+ {TEGRA30_CLK_DISP2, TEGRA30_CLK_PLL_P, 600000000, 0},
+ {TEGRA30_CLK_TWD, TEGRA30_CLK_CLK_MAX, 0, 1},
+ {TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0},
+ {TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0},
+ {TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0},
+ {TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0}, /* This MUST be the last entry. */
};
static void __init tegra30_clock_apply_init_table(void)
{
- tegra_init_from_table(init_table, clks, clk_max);
+ tegra_init_from_table(init_table, clks, TEGRA30_CLK_CLK_MAX);
}
/*
@@ -1963,19 +1379,18 @@ static void __init tegra30_clock_apply_init_table(void)
* table under two names.
*/
static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
- TEGRA_CLK_DUPLICATE(usbd, "utmip-pad", NULL),
- TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL),
- TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL),
- TEGRA_CLK_DUPLICATE(bsev, "tegra-avp", "bsev"),
- TEGRA_CLK_DUPLICATE(bsev, "nvavp", "bsev"),
- TEGRA_CLK_DUPLICATE(vde, "tegra-aes", "vde"),
- TEGRA_CLK_DUPLICATE(bsea, "tegra-aes", "bsea"),
- TEGRA_CLK_DUPLICATE(bsea, "nvavp", "bsea"),
- TEGRA_CLK_DUPLICATE(cml1, "tegra_sata_cml", NULL),
- TEGRA_CLK_DUPLICATE(cml0, "tegra_pcie", "cml"),
- TEGRA_CLK_DUPLICATE(pciex, "tegra_pcie", "pciex"),
- TEGRA_CLK_DUPLICATE(vcp, "nvavp", "vcp"),
- TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* MUST be the last entry */
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_USBD, "utmip-pad", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_USBD, "tegra-ehci.0", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_USBD, "tegra-otg", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_BSEV, "tegra-avp", "bsev"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_BSEV, "nvavp", "bsev"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_VDE, "tegra-aes", "vde"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_BSEA, "tegra-aes", "bsea"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_BSEA, "nvavp", "bsea"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_CML1, "tegra_sata_cml", NULL),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_CML0, "tegra_pcie", "cml"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_VCP, "nvavp", "vcp"),
+ TEGRA_CLK_DUPLICATE(TEGRA30_CLK_CLK_MAX, NULL, NULL), /* MUST be the last entry */
};
static const struct of_device_id pmc_match[] __initconst = {
@@ -1986,7 +1401,6 @@ static const struct of_device_id pmc_match[] __initconst = {
static void __init tegra30_clock_init(struct device_node *np)
{
struct device_node *node;
- int i;
clk_base = of_iomap(np, 0);
if (!clk_base) {
@@ -2006,29 +1420,27 @@ static void __init tegra30_clock_init(struct device_node *np)
BUG();
}
- tegra30_osc_clk_init();
- tegra30_fixed_clk_init();
+ clks = tegra_clk_init(clk_base, TEGRA30_CLK_CLK_MAX,
+ TEGRA30_CLK_PERIPH_BANKS);
+ if (!clks)
+ return;
+
+ if (tegra_osc_clk_init(clk_base, tegra30_clks, tegra30_input_freq,
+ ARRAY_SIZE(tegra30_input_freq), &input_freq, NULL) < 0)
+ return;
+
+
+ tegra_fixed_clk_init(tegra30_clks);
tegra30_pll_init();
tegra30_super_clk_init();
tegra30_periph_clk_init();
- tegra30_audio_clk_init();
- tegra30_pmc_clk_init();
-
- for (i = 0; i < ARRAY_SIZE(clks); i++) {
- if (IS_ERR(clks[i])) {
- pr_err("Tegra30 clk %d: register failed with %ld\n",
- i, PTR_ERR(clks[i]));
- BUG();
- }
- if (!clks[i])
- clks[i] = ERR_PTR(-EINVAL);
- }
+ tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks, &pll_a_params);
+ tegra_pmc_clk_init(pmc_base, tegra30_clks);
- tegra_init_dup_clks(tegra_clk_duplicates, clks, clk_max);
+ tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
- clk_data.clks = clks;
- clk_data.clk_num = ARRAY_SIZE(clks);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ tegra_add_of_provider(np);
+ tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra_clk_apply_init_table = tegra30_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 86581ac1fd69..c0a7d7723510 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -18,13 +18,175 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/clk/tegra.h>
+#include <linux/reset-controller.h>
+#include <linux/tegra-soc.h>
#include "clk.h"
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_V 0x360
+#define CLK_OUT_ENB_W 0x364
+#define CLK_OUT_ENB_X 0x280
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_H 0x328
+#define CLK_OUT_ENB_CLR_H 0x32c
+#define CLK_OUT_ENB_SET_U 0x330
+#define CLK_OUT_ENB_CLR_U 0x334
+#define CLK_OUT_ENB_SET_V 0x440
+#define CLK_OUT_ENB_CLR_V 0x444
+#define CLK_OUT_ENB_SET_W 0x448
+#define CLK_OUT_ENB_CLR_W 0x44c
+#define CLK_OUT_ENB_SET_X 0x284
+#define CLK_OUT_ENB_CLR_X 0x288
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00C
+#define RST_DFLL_DVCO 0x2F4
+#define RST_DEVICES_V 0x358
+#define RST_DEVICES_W 0x35C
+#define RST_DEVICES_X 0x28C
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_H 0x308
+#define RST_DEVICES_CLR_H 0x30c
+#define RST_DEVICES_SET_U 0x310
+#define RST_DEVICES_CLR_U 0x314
+#define RST_DEVICES_SET_V 0x430
+#define RST_DEVICES_CLR_V 0x434
+#define RST_DEVICES_SET_W 0x438
+#define RST_DEVICES_CLR_W 0x43c
+#define RST_DEVICES_SET_X 0x290
+#define RST_DEVICES_CLR_X 0x294
+
/* Global data of Tegra CPU CAR ops */
static struct tegra_cpu_car_ops dummy_car_ops;
struct tegra_cpu_car_ops *tegra_cpu_car_ops = &dummy_car_ops;
+int *periph_clk_enb_refcnt;
+static int periph_banks;
+static struct clk **clks;
+static int clk_num;
+static struct clk_onecell_data clk_data;
+
+static struct tegra_clk_periph_regs periph_regs[] = {
+ [0] = {
+ .enb_reg = CLK_OUT_ENB_L,
+ .enb_set_reg = CLK_OUT_ENB_SET_L,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_L,
+ .rst_reg = RST_DEVICES_L,
+ .rst_set_reg = RST_DEVICES_SET_L,
+ .rst_clr_reg = RST_DEVICES_CLR_L,
+ },
+ [1] = {
+ .enb_reg = CLK_OUT_ENB_H,
+ .enb_set_reg = CLK_OUT_ENB_SET_H,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_H,
+ .rst_reg = RST_DEVICES_H,
+ .rst_set_reg = RST_DEVICES_SET_H,
+ .rst_clr_reg = RST_DEVICES_CLR_H,
+ },
+ [2] = {
+ .enb_reg = CLK_OUT_ENB_U,
+ .enb_set_reg = CLK_OUT_ENB_SET_U,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_U,
+ .rst_reg = RST_DEVICES_U,
+ .rst_set_reg = RST_DEVICES_SET_U,
+ .rst_clr_reg = RST_DEVICES_CLR_U,
+ },
+ [3] = {
+ .enb_reg = CLK_OUT_ENB_V,
+ .enb_set_reg = CLK_OUT_ENB_SET_V,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_V,
+ .rst_reg = RST_DEVICES_V,
+ .rst_set_reg = RST_DEVICES_SET_V,
+ .rst_clr_reg = RST_DEVICES_CLR_V,
+ },
+ [4] = {
+ .enb_reg = CLK_OUT_ENB_W,
+ .enb_set_reg = CLK_OUT_ENB_SET_W,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_W,
+ .rst_reg = RST_DEVICES_W,
+ .rst_set_reg = RST_DEVICES_SET_W,
+ .rst_clr_reg = RST_DEVICES_CLR_W,
+ },
+ [5] = {
+ .enb_reg = CLK_OUT_ENB_X,
+ .enb_set_reg = CLK_OUT_ENB_SET_X,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_X,
+ .rst_reg = RST_DEVICES_X,
+ .rst_set_reg = RST_DEVICES_SET_X,
+ .rst_clr_reg = RST_DEVICES_CLR_X,
+ },
+};
+
+static void __iomem *clk_base;
+
+static int tegra_clk_rst_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ /*
+ * If peripheral is on the APB bus then we must read the APB bus to
+ * flush the write operation in apb bus. This will avoid peripheral
+ * access after disabling clock. Since the reset driver has no
+ * knowledge of which reset IDs represent which devices, simply do
+ * this all the time.
+ */
+ tegra_read_chipid();
+
+ writel_relaxed(BIT(id % 32),
+ clk_base + periph_regs[id / 32].rst_set_reg);
+
+ return 0;
+}
+
+static int tegra_clk_rst_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ writel_relaxed(BIT(id % 32),
+ clk_base + periph_regs[id / 32].rst_clr_reg);
+
+ return 0;
+}
+
+struct tegra_clk_periph_regs *get_reg_bank(int clkid)
+{
+ int reg_bank = clkid / 32;
+
+ if (reg_bank < periph_banks)
+ return &periph_regs[reg_bank];
+ else {
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
+struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
+{
+ clk_base = regs;
+
+ if (WARN_ON(banks > ARRAY_SIZE(periph_regs)))
+ return NULL;
+
+ periph_clk_enb_refcnt = kzalloc(32 * banks *
+ sizeof(*periph_clk_enb_refcnt), GFP_KERNEL);
+ if (!periph_clk_enb_refcnt)
+ return NULL;
+
+ periph_banks = banks;
+
+ clks = kzalloc(num * sizeof(struct clk *), GFP_KERNEL);
+ if (!clks)
+ kfree(periph_clk_enb_refcnt);
+
+ clk_num = num;
+
+ return clks;
+}
+
void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
struct clk *clks[], int clk_max)
{
@@ -74,6 +236,58 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
}
}
+static struct reset_control_ops rst_ops = {
+ .assert = tegra_clk_rst_assert,
+ .deassert = tegra_clk_rst_deassert,
+};
+
+static struct reset_controller_dev rst_ctlr = {
+ .ops = &rst_ops,
+ .owner = THIS_MODULE,
+ .of_reset_n_cells = 1,
+};
+
+void __init tegra_add_of_provider(struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i < clk_num; i++) {
+ if (IS_ERR(clks[i])) {
+ pr_err
+ ("Tegra clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ }
+ if (!clks[i])
+ clks[i] = ERR_PTR(-EINVAL);
+ }
+
+ clk_data.clks = clks;
+ clk_data.clk_num = clk_num;
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ rst_ctlr.of_node = np;
+ rst_ctlr.nr_resets = clk_num * 32;
+ reset_controller_register(&rst_ctlr);
+}
+
+void __init tegra_register_devclks(struct tegra_devclk *dev_clks, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++, dev_clks++)
+ clk_register_clkdev(clks[dev_clks->dt_id], dev_clks->con_id,
+ dev_clks->dev_id);
+}
+
+struct clk ** __init tegra_lookup_dt_id(int clk_id,
+ struct tegra_clk *tegra_clk)
+{
+ if (tegra_clk[clk_id].present)
+ return &clks[tegra_clk[clk_id].dt_id];
+ else
+ return NULL;
+}
+
tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
void __init tegra_clocks_apply_init_table(void)
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 07cfacd91686..16ec8d6bb87f 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -37,6 +37,8 @@ struct tegra_clk_sync_source {
container_of(_hw, struct tegra_clk_sync_source, hw)
extern const struct clk_ops tegra_clk_sync_source_ops;
+extern int *periph_clk_enb_refcnt;
+
struct clk *tegra_clk_register_sync_source(const char *name,
unsigned long fixed_rate, unsigned long max_rate);
@@ -188,12 +190,15 @@ struct tegra_clk_pll_params {
u32 ext_misc_reg[3];
u32 pmc_divnm_reg;
u32 pmc_divp_reg;
+ u32 flags;
int stepa_shift;
int stepb_shift;
int lock_delay;
int max_p;
struct pdiv_map *pdiv_tohw;
struct div_nmp *div_nmp;
+ struct tegra_clk_pll_freq_table *freq_table;
+ unsigned long fixed_rate;
};
/**
@@ -233,10 +238,7 @@ struct tegra_clk_pll {
struct clk_hw hw;
void __iomem *clk_base;
void __iomem *pmc;
- u32 flags;
- unsigned long fixed_rate;
spinlock_t *lock;
- struct tegra_clk_pll_freq_table *freq_table;
struct tegra_clk_pll_params *params;
};
@@ -258,56 +260,49 @@ extern const struct clk_ops tegra_clk_pll_ops;
extern const struct clk_ops tegra_clk_plle_ops;
struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+ unsigned long flags, struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock);
struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
- struct tegra_clk_pll_params *pll_params, u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+ unsigned long flags, struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock);
struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock);
struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock);
struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock);
struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
- unsigned long flags, unsigned long fixed_rate,
+ unsigned long flags,
struct tegra_clk_pll_params *pll_params,
- u32 pll_flags,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock, unsigned long parent_rate);
struct clk *tegra_clk_register_plle_tegra114(const char *name,
const char *parent_name,
void __iomem *clk_base, unsigned long flags,
- unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params,
- struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock);
+struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
+ void __iomem *clk_base, unsigned long flags,
+ struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock);
+
/**
* struct tegra_clk_pll_out - PLL divider down clock
*
@@ -395,13 +390,13 @@ struct tegra_clk_periph_gate {
#define TEGRA_PERIPH_MANUAL_RESET BIT(1)
#define TEGRA_PERIPH_ON_APB BIT(2)
#define TEGRA_PERIPH_WAR_1005168 BIT(3)
+#define TEGRA_PERIPH_NO_DIV BIT(4)
+#define TEGRA_PERIPH_NO_GATE BIT(5)
-void tegra_periph_reset(struct tegra_clk_periph_gate *gate, bool assert);
extern const struct clk_ops tegra_clk_periph_gate_ops;
struct clk *tegra_clk_register_periph_gate(const char *name,
const char *parent_name, u8 gate_flags, void __iomem *clk_base,
- unsigned long flags, int clk_num,
- struct tegra_clk_periph_regs *pregs, int *enable_refcnt);
+ unsigned long flags, int clk_num, int *enable_refcnt);
/**
* struct clk-periph - peripheral clock
@@ -443,26 +438,26 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
#define TEGRA_CLK_PERIPH(_mux_shift, _mux_mask, _mux_flags, \
_div_shift, _div_width, _div_frac_width, \
- _div_flags, _clk_num, _enb_refcnt, _regs, \
- _gate_flags, _table) \
+ _div_flags, _clk_num,\
+ _gate_flags, _table, _lock) \
{ \
.mux = { \
.flags = _mux_flags, \
.shift = _mux_shift, \
.mask = _mux_mask, \
.table = _table, \
+ .lock = _lock, \
}, \
.divider = { \
.flags = _div_flags, \
.shift = _div_shift, \
.width = _div_width, \
.frac_width = _div_frac_width, \
+ .lock = _lock, \
}, \
.gate = { \
.flags = _gate_flags, \
.clk_num = _clk_num, \
- .enable_refcnt = _enb_refcnt, \
- .regs = _regs, \
}, \
.mux_ops = &clk_mux_ops, \
.div_ops = &tegra_clk_frac_div_ops, \
@@ -472,7 +467,10 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
struct tegra_periph_init_data {
const char *name;
int clk_id;
- const char **parent_names;
+ union {
+ const char **parent_names;
+ const char *parent_name;
+ } p;
int num_parents;
struct tegra_clk_periph periph;
u32 offset;
@@ -483,20 +481,19 @@ struct tegra_periph_init_data {
#define TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parent_names, _offset,\
_mux_shift, _mux_mask, _mux_flags, _div_shift, \
- _div_width, _div_frac_width, _div_flags, _regs, \
- _clk_num, _enb_refcnt, _gate_flags, _clk_id, _table,\
- _flags) \
+ _div_width, _div_frac_width, _div_flags, \
+ _clk_num, _gate_flags, _clk_id, _table, \
+ _flags, _lock) \
{ \
.name = _name, \
.clk_id = _clk_id, \
- .parent_names = _parent_names, \
+ .p.parent_names = _parent_names, \
.num_parents = ARRAY_SIZE(_parent_names), \
.periph = TEGRA_CLK_PERIPH(_mux_shift, _mux_mask, \
_mux_flags, _div_shift, \
_div_width, _div_frac_width, \
_div_flags, _clk_num, \
- _enb_refcnt, _regs, \
- _gate_flags, _table), \
+ _gate_flags, _table, _lock), \
.offset = _offset, \
.con_id = _con_id, \
.dev_id = _dev_id, \
@@ -505,13 +502,13 @@ struct tegra_periph_init_data {
#define TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parent_names, _offset,\
_mux_shift, _mux_width, _mux_flags, _div_shift, \
- _div_width, _div_frac_width, _div_flags, _regs, \
- _clk_num, _enb_refcnt, _gate_flags, _clk_id) \
+ _div_width, _div_frac_width, _div_flags, \
+ _clk_num, _gate_flags, _clk_id) \
TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parent_names, _offset,\
_mux_shift, BIT(_mux_width) - 1, _mux_flags, \
_div_shift, _div_width, _div_frac_width, _div_flags, \
- _regs, _clk_num, _enb_refcnt, _gate_flags, _clk_id,\
- NULL, 0)
+ _clk_num, _gate_flags, _clk_id,\
+ NULL, 0, NULL)
/**
* struct clk_super_mux - super clock
@@ -581,12 +578,49 @@ struct tegra_clk_duplicate {
}, \
}
+struct tegra_clk {
+ int dt_id;
+ bool present;
+};
+
+struct tegra_devclk {
+ int dt_id;
+ char *dev_id;
+ char *con_id;
+};
+
void tegra_init_from_table(struct tegra_clk_init_table *tbl,
struct clk *clks[], int clk_max);
void tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
struct clk *clks[], int clk_max);
+struct tegra_clk_periph_regs *get_reg_bank(int clkid);
+struct clk **tegra_clk_init(void __iomem *clk_base, int num, int periph_banks);
+
+struct clk **tegra_lookup_dt_id(int clk_id, struct tegra_clk *tegra_clk);
+
+void tegra_add_of_provider(struct device_node *np);
+void tegra_register_devclks(struct tegra_devclk *dev_clks, int num);
+
+void tegra_audio_clk_init(void __iomem *clk_base,
+ void __iomem *pmc_base, struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_params);
+
+void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base,
+ struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_params);
+
+void tegra_pmc_clk_init(void __iomem *pmc_base, struct tegra_clk *tegra_clks);
+void tegra_fixed_clk_init(struct tegra_clk *tegra_clks);
+int tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *tegra_clks,
+ unsigned long *input_freqs, int num,
+ unsigned long *osc_freq,
+ unsigned long *pll_ref_freq);
+void tegra_super_clk_gen4_init(void __iomem *clk_base,
+ void __iomem *pmc_base, struct tegra_clk *tegra_clks,
+ struct tegra_clk_pll_params *pll_params);
+
void tegra114_clock_tune_cpu_trimmers_high(void);
void tegra114_clock_tune_cpu_trimmers_low(void);
void tegra114_clock_tune_cpu_trimmers_init(void);
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
new file mode 100644
index 000000000000..4319d4031aa3
--- /dev/null
+++ b/drivers/clk/ti/Makefile
@@ -0,0 +1,11 @@
+ifneq ($(CONFIG_OF),)
+obj-y += clk.o autoidle.o clockdomain.o
+clk-common = dpll.o composite.o divider.o gate.o \
+ fixed-factor.o mux.o apll.o
+obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o
+obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o clk-3xxx.o
+obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o
+obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o
+obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o
+obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o
+endif
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
new file mode 100644
index 000000000000..b986f61f5a77
--- /dev/null
+++ b/drivers/clk/ti/apll.c
@@ -0,0 +1,223 @@
+/*
+ * OMAP APLL clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/log2.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <linux/delay.h>
+
+#define APLL_FORCE_LOCK 0x1
+#define APLL_AUTO_IDLE 0x2
+#define MAX_APLL_WAIT_TRIES 1000000
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static int dra7_apll_enable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ int r = 0, i = 0;
+ struct dpll_data *ad;
+ const char *clk_name;
+ u8 state = 1;
+ u32 v;
+
+ ad = clk->dpll_data;
+ if (!ad)
+ return -EINVAL;
+
+ clk_name = __clk_get_name(clk->hw.clk);
+
+ state <<= __ffs(ad->idlest_mask);
+
+ /* Check is already locked */
+ v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
+
+ if ((v & ad->idlest_mask) == state)
+ return r;
+
+ v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+ v &= ~ad->enable_mask;
+ v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, ad->control_reg);
+
+ state <<= __ffs(ad->idlest_mask);
+
+ while (1) {
+ v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
+ if ((v & ad->idlest_mask) == state)
+ break;
+ if (i > MAX_APLL_WAIT_TRIES)
+ break;
+ i++;
+ udelay(1);
+ }
+
+ if (i == MAX_APLL_WAIT_TRIES) {
+ pr_warn("clock: %s failed transition to '%s'\n",
+ clk_name, (state) ? "locked" : "bypassed");
+ } else {
+ pr_debug("clock: %s transition to '%s' in %d loops\n",
+ clk_name, (state) ? "locked" : "bypassed", i);
+
+ r = 0;
+ }
+
+ return r;
+}
+
+static void dra7_apll_disable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad;
+ u8 state = 1;
+ u32 v;
+
+ ad = clk->dpll_data;
+
+ state <<= __ffs(ad->idlest_mask);
+
+ v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+ v &= ~ad->enable_mask;
+ v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, ad->control_reg);
+}
+
+static int dra7_apll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad;
+ u32 v;
+
+ ad = clk->dpll_data;
+
+ v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+ v &= ad->enable_mask;
+
+ v >>= __ffs(ad->enable_mask);
+
+ return v == APLL_AUTO_IDLE ? 0 : 1;
+}
+
+static u8 dra7_init_apll_parent(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static const struct clk_ops apll_ck_ops = {
+ .enable = &dra7_apll_enable,
+ .disable = &dra7_apll_disable,
+ .is_enabled = &dra7_apll_is_enabled,
+ .get_parent = &dra7_init_apll_parent,
+};
+
+static void __init omap_clk_register_apll(struct clk_hw *hw,
+ struct device_node *node)
+{
+ struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
+ struct dpll_data *ad = clk_hw->dpll_data;
+ struct clk *clk;
+
+ ad->clk_ref = of_clk_get(node, 0);
+ ad->clk_bypass = of_clk_get(node, 1);
+
+ if (IS_ERR(ad->clk_ref) || IS_ERR(ad->clk_bypass)) {
+ pr_debug("clk-ref or clk-bypass for %s not ready, retry\n",
+ node->name);
+ if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
+ return;
+
+ goto cleanup;
+ }
+
+ clk = clk_register(NULL, &clk_hw->hw);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ kfree(clk_hw->hw.init->parent_names);
+ kfree(clk_hw->hw.init);
+ return;
+ }
+
+cleanup:
+ kfree(clk_hw->dpll_data);
+ kfree(clk_hw->hw.init->parent_names);
+ kfree(clk_hw->hw.init);
+ kfree(clk_hw);
+}
+
+static void __init of_dra7_apll_setup(struct device_node *node)
+{
+ struct dpll_data *ad = NULL;
+ struct clk_hw_omap *clk_hw = NULL;
+ struct clk_init_data *init = NULL;
+ const char **parent_names = NULL;
+ int i;
+
+ ad = kzalloc(sizeof(*ad), GFP_KERNEL);
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ init = kzalloc(sizeof(*init), GFP_KERNEL);
+ if (!ad || !clk_hw || !init)
+ goto cleanup;
+
+ clk_hw->dpll_data = ad;
+ clk_hw->hw.init = init;
+ clk_hw->flags = MEMMAP_ADDRESSING;
+
+ init->name = node->name;
+ init->ops = &apll_ck_ops;
+
+ init->num_parents = of_clk_get_parent_count(node);
+ if (init->num_parents < 1) {
+ pr_err("dra7 apll %s must have parent(s)\n", node->name);
+ goto cleanup;
+ }
+
+ parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
+ if (!parent_names)
+ goto cleanup;
+
+ for (i = 0; i < init->num_parents; i++)
+ parent_names[i] = of_clk_get_parent_name(node, i);
+
+ init->parent_names = parent_names;
+
+ ad->control_reg = ti_clk_get_reg_addr(node, 0);
+ ad->idlest_reg = ti_clk_get_reg_addr(node, 1);
+
+ if (!ad->control_reg || !ad->idlest_reg)
+ goto cleanup;
+
+ ad->idlest_mask = 0x1;
+ ad->enable_mask = 0x3;
+
+ omap_clk_register_apll(&clk_hw->hw, node);
+ return;
+
+cleanup:
+ kfree(parent_names);
+ kfree(ad);
+ kfree(clk_hw);
+ kfree(init);
+}
+CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup);
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
new file mode 100644
index 000000000000..8912ff80af34
--- /dev/null
+++ b/drivers/clk/ti/autoidle.c
@@ -0,0 +1,133 @@
+/*
+ * TI clock autoidle support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+struct clk_ti_autoidle {
+ void __iomem *reg;
+ u8 shift;
+ u8 flags;
+ const char *name;
+ struct list_head node;
+};
+
+#define AUTOIDLE_LOW 0x1
+
+static LIST_HEAD(autoidle_clks);
+
+static void ti_allow_autoidle(struct clk_ti_autoidle *clk)
+{
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(clk->reg);
+
+ if (clk->flags & AUTOIDLE_LOW)
+ val &= ~(1 << clk->shift);
+ else
+ val |= (1 << clk->shift);
+
+ ti_clk_ll_ops->clk_writel(val, clk->reg);
+}
+
+static void ti_deny_autoidle(struct clk_ti_autoidle *clk)
+{
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(clk->reg);
+
+ if (clk->flags & AUTOIDLE_LOW)
+ val |= (1 << clk->shift);
+ else
+ val &= ~(1 << clk->shift);
+
+ ti_clk_ll_ops->clk_writel(val, clk->reg);
+}
+
+/**
+ * of_ti_clk_allow_autoidle_all - enable autoidle for all clocks
+ *
+ * Enables hardware autoidle for all registered DT clocks, which have
+ * the feature.
+ */
+void of_ti_clk_allow_autoidle_all(void)
+{
+ struct clk_ti_autoidle *c;
+
+ list_for_each_entry(c, &autoidle_clks, node)
+ ti_allow_autoidle(c);
+}
+
+/**
+ * of_ti_clk_deny_autoidle_all - disable autoidle for all clocks
+ *
+ * Disables hardware autoidle for all registered DT clocks, which have
+ * the feature.
+ */
+void of_ti_clk_deny_autoidle_all(void)
+{
+ struct clk_ti_autoidle *c;
+
+ list_for_each_entry(c, &autoidle_clks, node)
+ ti_deny_autoidle(c);
+}
+
+/**
+ * of_ti_clk_autoidle_setup - sets up hardware autoidle for a clock
+ * @node: pointer to the clock device node
+ *
+ * Checks if a clock has hardware autoidle support or not (check
+ * for presence of 'ti,autoidle-shift' property in the device tree
+ * node) and sets up the hardware autoidle feature for the clock
+ * if available. If autoidle is available, the clock is also added
+ * to the autoidle list for later processing. Returns 0 on success,
+ * negative error value on failure.
+ */
+int __init of_ti_clk_autoidle_setup(struct device_node *node)
+{
+ u32 shift;
+ struct clk_ti_autoidle *clk;
+
+ /* Check if this clock has autoidle support or not */
+ if (of_property_read_u32(node, "ti,autoidle-shift", &shift))
+ return 0;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+
+ if (!clk)
+ return -ENOMEM;
+
+ clk->shift = shift;
+ clk->name = node->name;
+ clk->reg = ti_clk_get_reg_addr(node, 0);
+
+ if (!clk->reg) {
+ kfree(clk);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(node, "ti,invert-autoidle-bit"))
+ clk->flags |= AUTOIDLE_LOW;
+
+ list_add(&clk->node, &autoidle_clks);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
new file mode 100644
index 000000000000..776ee4594bd4
--- /dev/null
+++ b/drivers/clk/ti/clk-33xx.c
@@ -0,0 +1,161 @@
+/*
+ * AM33XX Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+static struct ti_dt_clk am33xx_clks[] = {
+ DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"),
+ DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_24000000_ck", "virt_24000000_ck"),
+ DT_CLK(NULL, "virt_25000000_ck", "virt_25000000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "sys_clkin_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "tclkin_ck", "tclkin_ck"),
+ DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+ DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+ DT_CLK(NULL, "dpll_core_m4_ck", "dpll_core_m4_ck"),
+ DT_CLK(NULL, "dpll_core_m5_ck", "dpll_core_m5_ck"),
+ DT_CLK(NULL, "dpll_core_m6_ck", "dpll_core_m6_ck"),
+ DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+ DT_CLK("cpu0", NULL, "dpll_mpu_ck"),
+ DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+ DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"),
+ DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"),
+ DT_CLK(NULL, "dpll_ddr_m2_div2_ck", "dpll_ddr_m2_div2_ck"),
+ DT_CLK(NULL, "dpll_disp_ck", "dpll_disp_ck"),
+ DT_CLK(NULL, "dpll_disp_m2_ck", "dpll_disp_m2_ck"),
+ DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+ DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+ DT_CLK(NULL, "dpll_per_m2_div4_wkupdm_ck", "dpll_per_m2_div4_wkupdm_ck"),
+ DT_CLK(NULL, "dpll_per_m2_div4_ck", "dpll_per_m2_div4_ck"),
+ DT_CLK(NULL, "adc_tsc_fck", "adc_tsc_fck"),
+ DT_CLK(NULL, "cefuse_fck", "cefuse_fck"),
+ DT_CLK(NULL, "clkdiv32k_ck", "clkdiv32k_ck"),
+ DT_CLK(NULL, "clkdiv32k_ick", "clkdiv32k_ick"),
+ DT_CLK(NULL, "dcan0_fck", "dcan0_fck"),
+ DT_CLK("481cc000.d_can", NULL, "dcan0_fck"),
+ DT_CLK(NULL, "dcan1_fck", "dcan1_fck"),
+ DT_CLK("481d0000.d_can", NULL, "dcan1_fck"),
+ DT_CLK(NULL, "pruss_ocp_gclk", "pruss_ocp_gclk"),
+ DT_CLK(NULL, "mcasp0_fck", "mcasp0_fck"),
+ DT_CLK(NULL, "mcasp1_fck", "mcasp1_fck"),
+ DT_CLK(NULL, "mmu_fck", "mmu_fck"),
+ DT_CLK(NULL, "smartreflex0_fck", "smartreflex0_fck"),
+ DT_CLK(NULL, "smartreflex1_fck", "smartreflex1_fck"),
+ DT_CLK(NULL, "sha0_fck", "sha0_fck"),
+ DT_CLK(NULL, "aes0_fck", "aes0_fck"),
+ DT_CLK(NULL, "rng_fck", "rng_fck"),
+ DT_CLK(NULL, "timer1_fck", "timer1_fck"),
+ DT_CLK(NULL, "timer2_fck", "timer2_fck"),
+ DT_CLK(NULL, "timer3_fck", "timer3_fck"),
+ DT_CLK(NULL, "timer4_fck", "timer4_fck"),
+ DT_CLK(NULL, "timer5_fck", "timer5_fck"),
+ DT_CLK(NULL, "timer6_fck", "timer6_fck"),
+ DT_CLK(NULL, "timer7_fck", "timer7_fck"),
+ DT_CLK(NULL, "usbotg_fck", "usbotg_fck"),
+ DT_CLK(NULL, "ieee5000_fck", "ieee5000_fck"),
+ DT_CLK(NULL, "wdt1_fck", "wdt1_fck"),
+ DT_CLK(NULL, "l4_rtc_gclk", "l4_rtc_gclk"),
+ DT_CLK(NULL, "l3_gclk", "l3_gclk"),
+ DT_CLK(NULL, "dpll_core_m4_div2_ck", "dpll_core_m4_div2_ck"),
+ DT_CLK(NULL, "l4hs_gclk", "l4hs_gclk"),
+ DT_CLK(NULL, "l3s_gclk", "l3s_gclk"),
+ DT_CLK(NULL, "l4fw_gclk", "l4fw_gclk"),
+ DT_CLK(NULL, "l4ls_gclk", "l4ls_gclk"),
+ DT_CLK(NULL, "clk_24mhz", "clk_24mhz"),
+ DT_CLK(NULL, "sysclk_div_ck", "sysclk_div_ck"),
+ DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"),
+ DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"),
+ DT_CLK(NULL, "gpio0_dbclk_mux_ck", "gpio0_dbclk_mux_ck"),
+ DT_CLK(NULL, "gpio0_dbclk", "gpio0_dbclk"),
+ DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+ DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+ DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+ DT_CLK(NULL, "lcd_gclk", "lcd_gclk"),
+ DT_CLK(NULL, "mmc_clk", "mmc_clk"),
+ DT_CLK(NULL, "gfx_fclk_clksel_ck", "gfx_fclk_clksel_ck"),
+ DT_CLK(NULL, "gfx_fck_div_ck", "gfx_fck_div_ck"),
+ DT_CLK(NULL, "sysclkout_pre_ck", "sysclkout_pre_ck"),
+ DT_CLK(NULL, "clkout2_div_ck", "clkout2_div_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "dbg_sysclk_ck", "dbg_sysclk_ck"),
+ DT_CLK(NULL, "dbg_clka_ck", "dbg_clka_ck"),
+ DT_CLK(NULL, "stm_pmd_clock_mux_ck", "stm_pmd_clock_mux_ck"),
+ DT_CLK(NULL, "trace_pmd_clk_mux_ck", "trace_pmd_clk_mux_ck"),
+ DT_CLK(NULL, "stm_clk_div_ck", "stm_clk_div_ck"),
+ DT_CLK(NULL, "trace_clk_div_ck", "trace_clk_div_ck"),
+ DT_CLK(NULL, "clkout2_ck", "clkout2_ck"),
+ DT_CLK("48300200.ehrpwm", "tbclk", "ehrpwm0_tbclk"),
+ DT_CLK("48302200.ehrpwm", "tbclk", "ehrpwm1_tbclk"),
+ DT_CLK("48304200.ehrpwm", "tbclk", "ehrpwm2_tbclk"),
+ { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+ "dpll_ddr_m2_ck",
+ "dpll_mpu_m2_ck",
+ "l3_gclk",
+ "l4hs_gclk",
+ "l4fw_gclk",
+ "l4ls_gclk",
+ /* Required for external peripherals like, Audio codecs */
+ "clkout2_ck",
+};
+
+int __init am33xx_dt_clk_init(void)
+{
+ struct clk *clk1, *clk2;
+
+ ti_dt_clocks_register(am33xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ /* TRM ERRATA: Timer 3 & 6 default parent (TCLKIN) may not be always
+ * physically present, in such a case HWMOD enabling of
+ * clock would be failure with default parent. And timer
+ * probe thinks clock is already enabled, this leads to
+ * crash upon accessing timer 3 & 6 registers in probe.
+ * Fix by setting parent of both these timers to master
+ * oscillator clock.
+ */
+
+ clk1 = clk_get_sys(NULL, "sys_clkin_ck");
+ clk2 = clk_get_sys(NULL, "timer3_fck");
+ clk_set_parent(clk2, clk1);
+
+ clk2 = clk_get_sys(NULL, "timer6_fck");
+ clk_set_parent(clk2, clk1);
+ /*
+ * The On-Chip 32K RC Osc clock is not an accurate clock-source as per
+ * the design/spec, so as a result, for example, timer which supposed
+ * to get expired @60Sec, but will expire somewhere ~@40Sec, which is
+ * not expected by any use-case, so change WDT1 clock source to PRCM
+ * 32KHz clock.
+ */
+ clk1 = clk_get_sys(NULL, "wdt1_fck");
+ clk2 = clk_get_sys(NULL, "clkdiv32k_ick");
+ clk_set_parent(clk1, clk2);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
new file mode 100644
index 000000000000..d3230234f07b
--- /dev/null
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -0,0 +1,401 @@
+/*
+ * OMAP3 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+
+static struct ti_dt_clk omap3xxx_clks[] = {
+ DT_CLK(NULL, "apb_pclk", "dummy_apb_pclk"),
+ DT_CLK(NULL, "omap_32k_fck", "omap_32k_fck"),
+ DT_CLK(NULL, "virt_12m_ck", "virt_12m_ck"),
+ DT_CLK(NULL, "virt_13m_ck", "virt_13m_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "virt_38_4m_ck", "virt_38_4m_ck"),
+ DT_CLK(NULL, "osc_sys_ck", "osc_sys_ck"),
+ DT_CLK("twl", "fck", "osc_sys_ck"),
+ DT_CLK(NULL, "sys_ck", "sys_ck"),
+ DT_CLK(NULL, "omap_96m_alwon_fck", "omap_96m_alwon_fck"),
+ DT_CLK("etb", "emu_core_alwon_ck", "emu_core_alwon_ck"),
+ DT_CLK(NULL, "sys_altclk", "sys_altclk"),
+ DT_CLK(NULL, "mcbsp_clks", "mcbsp_clks"),
+ DT_CLK(NULL, "sys_clkout1", "sys_clkout1"),
+ DT_CLK(NULL, "dpll1_ck", "dpll1_ck"),
+ DT_CLK(NULL, "dpll1_x2_ck", "dpll1_x2_ck"),
+ DT_CLK(NULL, "dpll1_x2m2_ck", "dpll1_x2m2_ck"),
+ DT_CLK(NULL, "dpll3_ck", "dpll3_ck"),
+ DT_CLK(NULL, "core_ck", "core_ck"),
+ DT_CLK(NULL, "dpll3_x2_ck", "dpll3_x2_ck"),
+ DT_CLK(NULL, "dpll3_m2_ck", "dpll3_m2_ck"),
+ DT_CLK(NULL, "dpll3_m2x2_ck", "dpll3_m2x2_ck"),
+ DT_CLK(NULL, "dpll3_m3_ck", "dpll3_m3_ck"),
+ DT_CLK(NULL, "dpll3_m3x2_ck", "dpll3_m3x2_ck"),
+ DT_CLK(NULL, "dpll4_ck", "dpll4_ck"),
+ DT_CLK(NULL, "dpll4_x2_ck", "dpll4_x2_ck"),
+ DT_CLK(NULL, "omap_96m_fck", "omap_96m_fck"),
+ DT_CLK(NULL, "cm_96m_fck", "cm_96m_fck"),
+ DT_CLK(NULL, "omap_54m_fck", "omap_54m_fck"),
+ DT_CLK(NULL, "omap_48m_fck", "omap_48m_fck"),
+ DT_CLK(NULL, "omap_12m_fck", "omap_12m_fck"),
+ DT_CLK(NULL, "dpll4_m2_ck", "dpll4_m2_ck"),
+ DT_CLK(NULL, "dpll4_m2x2_ck", "dpll4_m2x2_ck"),
+ DT_CLK(NULL, "dpll4_m3_ck", "dpll4_m3_ck"),
+ DT_CLK(NULL, "dpll4_m3x2_ck", "dpll4_m3x2_ck"),
+ DT_CLK(NULL, "dpll4_m4_ck", "dpll4_m4_ck"),
+ DT_CLK(NULL, "dpll4_m4x2_ck", "dpll4_m4x2_ck"),
+ DT_CLK(NULL, "dpll4_m5_ck", "dpll4_m5_ck"),
+ DT_CLK(NULL, "dpll4_m5x2_ck", "dpll4_m5x2_ck"),
+ DT_CLK(NULL, "dpll4_m6_ck", "dpll4_m6_ck"),
+ DT_CLK(NULL, "dpll4_m6x2_ck", "dpll4_m6x2_ck"),
+ DT_CLK("etb", "emu_per_alwon_ck", "emu_per_alwon_ck"),
+ DT_CLK(NULL, "clkout2_src_ck", "clkout2_src_ck"),
+ DT_CLK(NULL, "sys_clkout2", "sys_clkout2"),
+ DT_CLK(NULL, "corex2_fck", "corex2_fck"),
+ DT_CLK(NULL, "dpll1_fck", "dpll1_fck"),
+ DT_CLK(NULL, "mpu_ck", "mpu_ck"),
+ DT_CLK(NULL, "arm_fck", "arm_fck"),
+ DT_CLK("etb", "emu_mpu_alwon_ck", "emu_mpu_alwon_ck"),
+ DT_CLK(NULL, "l3_ick", "l3_ick"),
+ DT_CLK(NULL, "l4_ick", "l4_ick"),
+ DT_CLK(NULL, "rm_ick", "rm_ick"),
+ DT_CLK(NULL, "gpt10_fck", "gpt10_fck"),
+ DT_CLK(NULL, "gpt11_fck", "gpt11_fck"),
+ DT_CLK(NULL, "core_96m_fck", "core_96m_fck"),
+ DT_CLK(NULL, "mmchs2_fck", "mmchs2_fck"),
+ DT_CLK(NULL, "mmchs1_fck", "mmchs1_fck"),
+ DT_CLK(NULL, "i2c3_fck", "i2c3_fck"),
+ DT_CLK(NULL, "i2c2_fck", "i2c2_fck"),
+ DT_CLK(NULL, "i2c1_fck", "i2c1_fck"),
+ DT_CLK(NULL, "mcbsp5_fck", "mcbsp5_fck"),
+ DT_CLK(NULL, "mcbsp1_fck", "mcbsp1_fck"),
+ DT_CLK(NULL, "core_48m_fck", "core_48m_fck"),
+ DT_CLK(NULL, "mcspi4_fck", "mcspi4_fck"),
+ DT_CLK(NULL, "mcspi3_fck", "mcspi3_fck"),
+ DT_CLK(NULL, "mcspi2_fck", "mcspi2_fck"),
+ DT_CLK(NULL, "mcspi1_fck", "mcspi1_fck"),
+ DT_CLK(NULL, "uart2_fck", "uart2_fck"),
+ DT_CLK(NULL, "uart1_fck", "uart1_fck"),
+ DT_CLK(NULL, "core_12m_fck", "core_12m_fck"),
+ DT_CLK("omap_hdq.0", "fck", "hdq_fck"),
+ DT_CLK(NULL, "hdq_fck", "hdq_fck"),
+ DT_CLK(NULL, "core_l3_ick", "core_l3_ick"),
+ DT_CLK(NULL, "sdrc_ick", "sdrc_ick"),
+ DT_CLK(NULL, "gpmc_fck", "gpmc_fck"),
+ DT_CLK(NULL, "core_l4_ick", "core_l4_ick"),
+ DT_CLK("omap_hsmmc.1", "ick", "mmchs2_ick"),
+ DT_CLK("omap_hsmmc.0", "ick", "mmchs1_ick"),
+ DT_CLK(NULL, "mmchs2_ick", "mmchs2_ick"),
+ DT_CLK(NULL, "mmchs1_ick", "mmchs1_ick"),
+ DT_CLK("omap_hdq.0", "ick", "hdq_ick"),
+ DT_CLK(NULL, "hdq_ick", "hdq_ick"),
+ DT_CLK("omap2_mcspi.4", "ick", "mcspi4_ick"),
+ DT_CLK("omap2_mcspi.3", "ick", "mcspi3_ick"),
+ DT_CLK("omap2_mcspi.2", "ick", "mcspi2_ick"),
+ DT_CLK("omap2_mcspi.1", "ick", "mcspi1_ick"),
+ DT_CLK(NULL, "mcspi4_ick", "mcspi4_ick"),
+ DT_CLK(NULL, "mcspi3_ick", "mcspi3_ick"),
+ DT_CLK(NULL, "mcspi2_ick", "mcspi2_ick"),
+ DT_CLK(NULL, "mcspi1_ick", "mcspi1_ick"),
+ DT_CLK("omap_i2c.3", "ick", "i2c3_ick"),
+ DT_CLK("omap_i2c.2", "ick", "i2c2_ick"),
+ DT_CLK("omap_i2c.1", "ick", "i2c1_ick"),
+ DT_CLK(NULL, "i2c3_ick", "i2c3_ick"),
+ DT_CLK(NULL, "i2c2_ick", "i2c2_ick"),
+ DT_CLK(NULL, "i2c1_ick", "i2c1_ick"),
+ DT_CLK(NULL, "uart2_ick", "uart2_ick"),
+ DT_CLK(NULL, "uart1_ick", "uart1_ick"),
+ DT_CLK(NULL, "gpt11_ick", "gpt11_ick"),
+ DT_CLK(NULL, "gpt10_ick", "gpt10_ick"),
+ DT_CLK("omap-mcbsp.5", "ick", "mcbsp5_ick"),
+ DT_CLK("omap-mcbsp.1", "ick", "mcbsp1_ick"),
+ DT_CLK(NULL, "mcbsp5_ick", "mcbsp5_ick"),
+ DT_CLK(NULL, "mcbsp1_ick", "mcbsp1_ick"),
+ DT_CLK(NULL, "omapctrl_ick", "omapctrl_ick"),
+ DT_CLK(NULL, "dss_tv_fck", "dss_tv_fck"),
+ DT_CLK(NULL, "dss_96m_fck", "dss_96m_fck"),
+ DT_CLK(NULL, "dss2_alwon_fck", "dss2_alwon_fck"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "dummy_ck"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "dummy_ck"),
+ DT_CLK(NULL, "xclk60mhsp1_ck", "dummy_ck"),
+ DT_CLK(NULL, "xclk60mhsp2_ck", "dummy_ck"),
+ DT_CLK(NULL, "init_60m_fclk", "dummy_ck"),
+ DT_CLK(NULL, "gpt1_fck", "gpt1_fck"),
+ DT_CLK(NULL, "aes2_ick", "aes2_ick"),
+ DT_CLK(NULL, "wkup_32k_fck", "wkup_32k_fck"),
+ DT_CLK(NULL, "gpio1_dbck", "gpio1_dbck"),
+ DT_CLK(NULL, "sha12_ick", "sha12_ick"),
+ DT_CLK(NULL, "wdt2_fck", "wdt2_fck"),
+ DT_CLK("omap_wdt", "ick", "wdt2_ick"),
+ DT_CLK(NULL, "wdt2_ick", "wdt2_ick"),
+ DT_CLK(NULL, "wdt1_ick", "wdt1_ick"),
+ DT_CLK(NULL, "gpio1_ick", "gpio1_ick"),
+ DT_CLK(NULL, "omap_32ksync_ick", "omap_32ksync_ick"),
+ DT_CLK(NULL, "gpt12_ick", "gpt12_ick"),
+ DT_CLK(NULL, "gpt1_ick", "gpt1_ick"),
+ DT_CLK(NULL, "per_96m_fck", "per_96m_fck"),
+ DT_CLK(NULL, "per_48m_fck", "per_48m_fck"),
+ DT_CLK(NULL, "uart3_fck", "uart3_fck"),
+ DT_CLK(NULL, "gpt2_fck", "gpt2_fck"),
+ DT_CLK(NULL, "gpt3_fck", "gpt3_fck"),
+ DT_CLK(NULL, "gpt4_fck", "gpt4_fck"),
+ DT_CLK(NULL, "gpt5_fck", "gpt5_fck"),
+ DT_CLK(NULL, "gpt6_fck", "gpt6_fck"),
+ DT_CLK(NULL, "gpt7_fck", "gpt7_fck"),
+ DT_CLK(NULL, "gpt8_fck", "gpt8_fck"),
+ DT_CLK(NULL, "gpt9_fck", "gpt9_fck"),
+ DT_CLK(NULL, "per_32k_alwon_fck", "per_32k_alwon_fck"),
+ DT_CLK(NULL, "gpio6_dbck", "gpio6_dbck"),
+ DT_CLK(NULL, "gpio5_dbck", "gpio5_dbck"),
+ DT_CLK(NULL, "gpio4_dbck", "gpio4_dbck"),
+ DT_CLK(NULL, "gpio3_dbck", "gpio3_dbck"),
+ DT_CLK(NULL, "gpio2_dbck", "gpio2_dbck"),
+ DT_CLK(NULL, "wdt3_fck", "wdt3_fck"),
+ DT_CLK(NULL, "per_l4_ick", "per_l4_ick"),
+ DT_CLK(NULL, "gpio6_ick", "gpio6_ick"),
+ DT_CLK(NULL, "gpio5_ick", "gpio5_ick"),
+ DT_CLK(NULL, "gpio4_ick", "gpio4_ick"),
+ DT_CLK(NULL, "gpio3_ick", "gpio3_ick"),
+ DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
+ DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
+ DT_CLK(NULL, "uart3_ick", "uart3_ick"),
+ DT_CLK(NULL, "uart4_ick", "uart4_ick"),
+ DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
+ DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
+ DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
+ DT_CLK(NULL, "gpt6_ick", "gpt6_ick"),
+ DT_CLK(NULL, "gpt5_ick", "gpt5_ick"),
+ DT_CLK(NULL, "gpt4_ick", "gpt4_ick"),
+ DT_CLK(NULL, "gpt3_ick", "gpt3_ick"),
+ DT_CLK(NULL, "gpt2_ick", "gpt2_ick"),
+ DT_CLK("omap-mcbsp.2", "ick", "mcbsp2_ick"),
+ DT_CLK("omap-mcbsp.3", "ick", "mcbsp3_ick"),
+ DT_CLK("omap-mcbsp.4", "ick", "mcbsp4_ick"),
+ DT_CLK(NULL, "mcbsp4_ick", "mcbsp2_ick"),
+ DT_CLK(NULL, "mcbsp3_ick", "mcbsp3_ick"),
+ DT_CLK(NULL, "mcbsp2_ick", "mcbsp4_ick"),
+ DT_CLK(NULL, "mcbsp2_fck", "mcbsp2_fck"),
+ DT_CLK(NULL, "mcbsp3_fck", "mcbsp3_fck"),
+ DT_CLK(NULL, "mcbsp4_fck", "mcbsp4_fck"),
+ DT_CLK("etb", "emu_src_ck", "emu_src_ck"),
+ DT_CLK(NULL, "emu_src_ck", "emu_src_ck"),
+ DT_CLK(NULL, "pclk_fck", "pclk_fck"),
+ DT_CLK(NULL, "pclkx2_fck", "pclkx2_fck"),
+ DT_CLK(NULL, "atclk_fck", "atclk_fck"),
+ DT_CLK(NULL, "traceclk_src_fck", "traceclk_src_fck"),
+ DT_CLK(NULL, "traceclk_fck", "traceclk_fck"),
+ DT_CLK(NULL, "secure_32k_fck", "secure_32k_fck"),
+ DT_CLK(NULL, "gpt12_fck", "gpt12_fck"),
+ DT_CLK(NULL, "wdt1_fck", "wdt1_fck"),
+ DT_CLK(NULL, "timer_32k_ck", "omap_32k_fck"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_ck"),
+ DT_CLK(NULL, "cpufreq_ck", "dpll1_ck"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap34xx_omap36xx_clks[] = {
+ DT_CLK(NULL, "aes1_ick", "aes1_ick"),
+ DT_CLK("omap_rng", "ick", "rng_ick"),
+ DT_CLK("omap3-rom-rng", "ick", "rng_ick"),
+ DT_CLK(NULL, "sha11_ick", "sha11_ick"),
+ DT_CLK(NULL, "des1_ick", "des1_ick"),
+ DT_CLK(NULL, "cam_mclk", "cam_mclk"),
+ DT_CLK(NULL, "cam_ick", "cam_ick"),
+ DT_CLK(NULL, "csi2_96m_fck", "csi2_96m_fck"),
+ DT_CLK(NULL, "security_l3_ick", "security_l3_ick"),
+ DT_CLK(NULL, "pka_ick", "pka_ick"),
+ DT_CLK(NULL, "icr_ick", "icr_ick"),
+ DT_CLK("omap-aes", "ick", "aes2_ick"),
+ DT_CLK("omap-sham", "ick", "sha12_ick"),
+ DT_CLK(NULL, "des2_ick", "des2_ick"),
+ DT_CLK(NULL, "mspro_ick", "mspro_ick"),
+ DT_CLK(NULL, "mailboxes_ick", "mailboxes_ick"),
+ DT_CLK(NULL, "ssi_l4_ick", "ssi_l4_ick"),
+ DT_CLK(NULL, "sr1_fck", "sr1_fck"),
+ DT_CLK(NULL, "sr2_fck", "sr2_fck"),
+ DT_CLK(NULL, "sr_l4_ick", "sr_l4_ick"),
+ DT_CLK(NULL, "security_l4_ick2", "security_l4_ick2"),
+ DT_CLK(NULL, "wkup_l4_ick", "wkup_l4_ick"),
+ DT_CLK(NULL, "dpll2_fck", "dpll2_fck"),
+ DT_CLK(NULL, "iva2_ck", "iva2_ck"),
+ DT_CLK(NULL, "modem_fck", "modem_fck"),
+ DT_CLK(NULL, "sad2d_ick", "sad2d_ick"),
+ DT_CLK(NULL, "mad2d_ick", "mad2d_ick"),
+ DT_CLK(NULL, "mspro_fck", "mspro_fck"),
+ DT_CLK(NULL, "dpll2_ck", "dpll2_ck"),
+ DT_CLK(NULL, "dpll2_m2_ck", "dpll2_m2_ck"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap36xx_omap3430es2plus_clks[] = {
+ DT_CLK(NULL, "ssi_ssr_fck", "ssi_ssr_fck_3430es2"),
+ DT_CLK(NULL, "ssi_sst_fck", "ssi_sst_fck_3430es2"),
+ DT_CLK("musb-omap2430", "ick", "hsotgusb_ick_3430es2"),
+ DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_3430es2"),
+ DT_CLK(NULL, "ssi_ick", "ssi_ick_3430es2"),
+ DT_CLK(NULL, "usim_fck", "usim_fck"),
+ DT_CLK(NULL, "usim_ick", "usim_ick"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap3430es1_clks[] = {
+ DT_CLK(NULL, "gfx_l3_ck", "gfx_l3_ck"),
+ DT_CLK(NULL, "gfx_l3_fck", "gfx_l3_fck"),
+ DT_CLK(NULL, "gfx_l3_ick", "gfx_l3_ick"),
+ DT_CLK(NULL, "gfx_cg1_ck", "gfx_cg1_ck"),
+ DT_CLK(NULL, "gfx_cg2_ck", "gfx_cg2_ck"),
+ DT_CLK(NULL, "d2d_26m_fck", "d2d_26m_fck"),
+ DT_CLK(NULL, "fshostusb_fck", "fshostusb_fck"),
+ DT_CLK(NULL, "ssi_ssr_fck", "ssi_ssr_fck_3430es1"),
+ DT_CLK(NULL, "ssi_sst_fck", "ssi_sst_fck_3430es1"),
+ DT_CLK("musb-omap2430", "ick", "hsotgusb_ick_3430es1"),
+ DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_3430es1"),
+ DT_CLK(NULL, "fac_ick", "fac_ick"),
+ DT_CLK(NULL, "ssi_ick", "ssi_ick_3430es1"),
+ DT_CLK(NULL, "usb_l4_ick", "usb_l4_ick"),
+ DT_CLK(NULL, "dss1_alwon_fck", "dss1_alwon_fck_3430es1"),
+ DT_CLK("omapdss_dss", "ick", "dss_ick_3430es1"),
+ DT_CLK(NULL, "dss_ick", "dss_ick_3430es1"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap36xx_am35xx_omap3430es2plus_clks[] = {
+ DT_CLK(NULL, "virt_16_8m_ck", "virt_16_8m_ck"),
+ DT_CLK(NULL, "dpll5_ck", "dpll5_ck"),
+ DT_CLK(NULL, "dpll5_m2_ck", "dpll5_m2_ck"),
+ DT_CLK(NULL, "sgx_fck", "sgx_fck"),
+ DT_CLK(NULL, "sgx_ick", "sgx_ick"),
+ DT_CLK(NULL, "cpefuse_fck", "cpefuse_fck"),
+ DT_CLK(NULL, "ts_fck", "ts_fck"),
+ DT_CLK(NULL, "usbtll_fck", "usbtll_fck"),
+ DT_CLK(NULL, "usbtll_ick", "usbtll_ick"),
+ DT_CLK("omap_hsmmc.2", "ick", "mmchs3_ick"),
+ DT_CLK(NULL, "mmchs3_ick", "mmchs3_ick"),
+ DT_CLK(NULL, "mmchs3_fck", "mmchs3_fck"),
+ DT_CLK(NULL, "dss1_alwon_fck", "dss1_alwon_fck_3430es2"),
+ DT_CLK("omapdss_dss", "ick", "dss_ick_3430es2"),
+ DT_CLK(NULL, "dss_ick", "dss_ick_3430es2"),
+ DT_CLK(NULL, "usbhost_120m_fck", "usbhost_120m_fck"),
+ DT_CLK(NULL, "usbhost_48m_fck", "usbhost_48m_fck"),
+ DT_CLK(NULL, "usbhost_ick", "usbhost_ick"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk am35xx_clks[] = {
+ DT_CLK(NULL, "ipss_ick", "ipss_ick"),
+ DT_CLK(NULL, "rmii_ck", "rmii_ck"),
+ DT_CLK(NULL, "pclk_ck", "pclk_ck"),
+ DT_CLK(NULL, "emac_ick", "emac_ick"),
+ DT_CLK(NULL, "emac_fck", "emac_fck"),
+ DT_CLK("davinci_emac.0", NULL, "emac_ick"),
+ DT_CLK("davinci_mdio.0", NULL, "emac_fck"),
+ DT_CLK("vpfe-capture", "master", "vpfe_ick"),
+ DT_CLK("vpfe-capture", "slave", "vpfe_fck"),
+ DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_am35xx"),
+ DT_CLK(NULL, "hsotgusb_fck", "hsotgusb_fck_am35xx"),
+ DT_CLK(NULL, "hecc_ck", "hecc_ck"),
+ DT_CLK(NULL, "uart4_ick", "uart4_ick_am35xx"),
+ DT_CLK(NULL, "uart4_fck", "uart4_fck_am35xx"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap36xx_clks[] = {
+ DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
+ DT_CLK(NULL, "uart4_fck", "uart4_fck"),
+ { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+ "sdrc_ick",
+ "gpmc_fck",
+ "omapctrl_ick",
+};
+
+enum {
+ OMAP3_SOC_AM35XX,
+ OMAP3_SOC_OMAP3430_ES1,
+ OMAP3_SOC_OMAP3430_ES2_PLUS,
+ OMAP3_SOC_OMAP3630,
+ OMAP3_SOC_TI81XX,
+};
+
+static int __init omap3xxx_dt_clk_init(int soc_type)
+{
+ if (soc_type == OMAP3_SOC_AM35XX || soc_type == OMAP3_SOC_OMAP3630 ||
+ soc_type == OMAP3_SOC_OMAP3430_ES1 ||
+ soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS)
+ ti_dt_clocks_register(omap3xxx_clks);
+
+ if (soc_type == OMAP3_SOC_AM35XX)
+ ti_dt_clocks_register(am35xx_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3630 || soc_type == OMAP3_SOC_AM35XX ||
+ soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS)
+ ti_dt_clocks_register(omap36xx_am35xx_omap3430es2plus_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3430_ES1)
+ ti_dt_clocks_register(omap3430es1_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS ||
+ soc_type == OMAP3_SOC_OMAP3630)
+ ti_dt_clocks_register(omap36xx_omap3430es2plus_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3430_ES1 ||
+ soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS ||
+ soc_type == OMAP3_SOC_OMAP3630)
+ ti_dt_clocks_register(omap34xx_omap36xx_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3630)
+ ti_dt_clocks_register(omap36xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
+ (clk_get_rate(clk_get_sys(NULL, "osc_sys_ck")) / 1000000),
+ (clk_get_rate(clk_get_sys(NULL, "osc_sys_ck")) / 100000) % 10,
+ (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000),
+ (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000));
+
+ if (soc_type != OMAP3_SOC_TI81XX && soc_type != OMAP3_SOC_OMAP3430_ES1)
+ omap3_clk_lock_dpll5();
+
+ return 0;
+}
+
+int __init omap3430_dt_clk_init(void)
+{
+ return omap3xxx_dt_clk_init(OMAP3_SOC_OMAP3430_ES2_PLUS);
+}
+
+int __init omap3630_dt_clk_init(void)
+{
+ return omap3xxx_dt_clk_init(OMAP3_SOC_OMAP3630);
+}
+
+int __init am35xx_dt_clk_init(void)
+{
+ return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX);
+}
+
+int __init ti81xx_dt_clk_init(void)
+{
+ return omap3xxx_dt_clk_init(OMAP3_SOC_TI81XX);
+}
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
new file mode 100644
index 000000000000..67c8de572c50
--- /dev/null
+++ b/drivers/clk/ti/clk-43xx.c
@@ -0,0 +1,118 @@
+/*
+ * AM43XX Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+static struct ti_dt_clk am43xx_clks[] = {
+ DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"),
+ DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_24000000_ck", "virt_24000000_ck"),
+ DT_CLK(NULL, "virt_25000000_ck", "virt_25000000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "sys_clkin_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "tclkin_ck", "tclkin_ck"),
+ DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+ DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+ DT_CLK(NULL, "dpll_core_m4_ck", "dpll_core_m4_ck"),
+ DT_CLK(NULL, "dpll_core_m5_ck", "dpll_core_m5_ck"),
+ DT_CLK(NULL, "dpll_core_m6_ck", "dpll_core_m6_ck"),
+ DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+ DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+ DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"),
+ DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"),
+ DT_CLK(NULL, "dpll_disp_ck", "dpll_disp_ck"),
+ DT_CLK(NULL, "dpll_disp_m2_ck", "dpll_disp_m2_ck"),
+ DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+ DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+ DT_CLK(NULL, "dpll_per_m2_div4_wkupdm_ck", "dpll_per_m2_div4_wkupdm_ck"),
+ DT_CLK(NULL, "dpll_per_m2_div4_ck", "dpll_per_m2_div4_ck"),
+ DT_CLK(NULL, "adc_tsc_fck", "adc_tsc_fck"),
+ DT_CLK(NULL, "clkdiv32k_ck", "clkdiv32k_ck"),
+ DT_CLK(NULL, "clkdiv32k_ick", "clkdiv32k_ick"),
+ DT_CLK(NULL, "dcan0_fck", "dcan0_fck"),
+ DT_CLK(NULL, "dcan1_fck", "dcan1_fck"),
+ DT_CLK(NULL, "pruss_ocp_gclk", "pruss_ocp_gclk"),
+ DT_CLK(NULL, "mcasp0_fck", "mcasp0_fck"),
+ DT_CLK(NULL, "mcasp1_fck", "mcasp1_fck"),
+ DT_CLK(NULL, "smartreflex0_fck", "smartreflex0_fck"),
+ DT_CLK(NULL, "smartreflex1_fck", "smartreflex1_fck"),
+ DT_CLK(NULL, "sha0_fck", "sha0_fck"),
+ DT_CLK(NULL, "aes0_fck", "aes0_fck"),
+ DT_CLK(NULL, "timer1_fck", "timer1_fck"),
+ DT_CLK(NULL, "timer2_fck", "timer2_fck"),
+ DT_CLK(NULL, "timer3_fck", "timer3_fck"),
+ DT_CLK(NULL, "timer4_fck", "timer4_fck"),
+ DT_CLK(NULL, "timer5_fck", "timer5_fck"),
+ DT_CLK(NULL, "timer6_fck", "timer6_fck"),
+ DT_CLK(NULL, "timer7_fck", "timer7_fck"),
+ DT_CLK(NULL, "wdt1_fck", "wdt1_fck"),
+ DT_CLK(NULL, "l3_gclk", "l3_gclk"),
+ DT_CLK(NULL, "dpll_core_m4_div2_ck", "dpll_core_m4_div2_ck"),
+ DT_CLK(NULL, "l4hs_gclk", "l4hs_gclk"),
+ DT_CLK(NULL, "l3s_gclk", "l3s_gclk"),
+ DT_CLK(NULL, "l4ls_gclk", "l4ls_gclk"),
+ DT_CLK(NULL, "clk_24mhz", "clk_24mhz"),
+ DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"),
+ DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"),
+ DT_CLK(NULL, "gpio0_dbclk_mux_ck", "gpio0_dbclk_mux_ck"),
+ DT_CLK(NULL, "gpio0_dbclk", "gpio0_dbclk"),
+ DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+ DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+ DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+ DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+ DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+ DT_CLK(NULL, "mmc_clk", "mmc_clk"),
+ DT_CLK(NULL, "gfx_fclk_clksel_ck", "gfx_fclk_clksel_ck"),
+ DT_CLK(NULL, "gfx_fck_div_ck", "gfx_fck_div_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "sysclk_div", "sysclk_div"),
+ DT_CLK(NULL, "disp_clk", "disp_clk"),
+ DT_CLK(NULL, "clk_32k_mosc_ck", "clk_32k_mosc_ck"),
+ DT_CLK(NULL, "clk_32k_tpm_ck", "clk_32k_tpm_ck"),
+ DT_CLK(NULL, "dpll_extdev_ck", "dpll_extdev_ck"),
+ DT_CLK(NULL, "dpll_extdev_m2_ck", "dpll_extdev_m2_ck"),
+ DT_CLK(NULL, "mux_synctimer32k_ck", "mux_synctimer32k_ck"),
+ DT_CLK(NULL, "synctimer_32kclk", "synctimer_32kclk"),
+ DT_CLK(NULL, "timer8_fck", "timer8_fck"),
+ DT_CLK(NULL, "timer9_fck", "timer9_fck"),
+ DT_CLK(NULL, "timer10_fck", "timer10_fck"),
+ DT_CLK(NULL, "timer11_fck", "timer11_fck"),
+ DT_CLK(NULL, "cpsw_50m_clkdiv", "cpsw_50m_clkdiv"),
+ DT_CLK(NULL, "cpsw_5m_clkdiv", "cpsw_5m_clkdiv"),
+ DT_CLK(NULL, "dpll_ddr_x2_ck", "dpll_ddr_x2_ck"),
+ DT_CLK(NULL, "dpll_ddr_m4_ck", "dpll_ddr_m4_ck"),
+ DT_CLK(NULL, "dpll_per_clkdcoldo", "dpll_per_clkdcoldo"),
+ DT_CLK(NULL, "dll_aging_clk_div", "dll_aging_clk_div"),
+ DT_CLK(NULL, "div_core_25m_ck", "div_core_25m_ck"),
+ DT_CLK(NULL, "func_12m_clk", "func_12m_clk"),
+ DT_CLK(NULL, "vtp_clk_div", "vtp_clk_div"),
+ DT_CLK(NULL, "usbphy_32khz_clkmux", "usbphy_32khz_clkmux"),
+ { .node_name = NULL },
+};
+
+int __init am43xx_dt_clk_init(void)
+{
+ ti_dt_clocks_register(am43xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
new file mode 100644
index 000000000000..ae00218b5da3
--- /dev/null
+++ b/drivers/clk/ti/clk-44xx.c
@@ -0,0 +1,316 @@
+/*
+ * OMAP4 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-private.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+
+/*
+ * OMAP4 ABE DPLL default frequency. In OMAP4460 TRM version V, section
+ * "3.6.3.2.3 CM1_ABE Clock Generator" states that the "DPLL_ABE_X2_CLK
+ * must be set to 196.608 MHz" and hence, the DPLL locked frequency is
+ * half of this value.
+ */
+#define OMAP4_DPLL_ABE_DEFFREQ 98304000
+
+/*
+ * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
+ * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
+ * locked frequency for the USB DPLL is 960MHz.
+ */
+#define OMAP4_DPLL_USB_DEFFREQ 960000000
+
+static struct ti_dt_clk omap44xx_clks[] = {
+ DT_CLK(NULL, "extalt_clkin_ck", "extalt_clkin_ck"),
+ DT_CLK(NULL, "pad_clks_src_ck", "pad_clks_src_ck"),
+ DT_CLK(NULL, "pad_clks_ck", "pad_clks_ck"),
+ DT_CLK(NULL, "pad_slimbus_core_clks_ck", "pad_slimbus_core_clks_ck"),
+ DT_CLK(NULL, "secure_32k_clk_src_ck", "secure_32k_clk_src_ck"),
+ DT_CLK(NULL, "slimbus_src_clk", "slimbus_src_clk"),
+ DT_CLK(NULL, "slimbus_clk", "slimbus_clk"),
+ DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "virt_12000000_ck", "virt_12000000_ck"),
+ DT_CLK(NULL, "virt_13000000_ck", "virt_13000000_ck"),
+ DT_CLK(NULL, "virt_16800000_ck", "virt_16800000_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "virt_27000000_ck", "virt_27000000_ck"),
+ DT_CLK(NULL, "virt_38400000_ck", "virt_38400000_ck"),
+ DT_CLK(NULL, "sys_clkin_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "tie_low_clock_ck", "tie_low_clock_ck"),
+ DT_CLK(NULL, "utmi_phy_clkout_ck", "utmi_phy_clkout_ck"),
+ DT_CLK(NULL, "xclk60mhsp1_ck", "xclk60mhsp1_ck"),
+ DT_CLK(NULL, "xclk60mhsp2_ck", "xclk60mhsp2_ck"),
+ DT_CLK(NULL, "xclk60motg_ck", "xclk60motg_ck"),
+ DT_CLK(NULL, "abe_dpll_bypass_clk_mux_ck", "abe_dpll_bypass_clk_mux_ck"),
+ DT_CLK(NULL, "abe_dpll_refclk_mux_ck", "abe_dpll_refclk_mux_ck"),
+ DT_CLK(NULL, "dpll_abe_ck", "dpll_abe_ck"),
+ DT_CLK(NULL, "dpll_abe_x2_ck", "dpll_abe_x2_ck"),
+ DT_CLK(NULL, "dpll_abe_m2x2_ck", "dpll_abe_m2x2_ck"),
+ DT_CLK(NULL, "abe_24m_fclk", "abe_24m_fclk"),
+ DT_CLK(NULL, "abe_clk", "abe_clk"),
+ DT_CLK(NULL, "aess_fclk", "aess_fclk"),
+ DT_CLK(NULL, "dpll_abe_m3x2_ck", "dpll_abe_m3x2_ck"),
+ DT_CLK(NULL, "core_hsd_byp_clk_mux_ck", "core_hsd_byp_clk_mux_ck"),
+ DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+ DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+ DT_CLK(NULL, "dpll_core_m6x2_ck", "dpll_core_m6x2_ck"),
+ DT_CLK(NULL, "dbgclk_mux_ck", "dbgclk_mux_ck"),
+ DT_CLK(NULL, "dpll_core_m2_ck", "dpll_core_m2_ck"),
+ DT_CLK(NULL, "ddrphy_ck", "ddrphy_ck"),
+ DT_CLK(NULL, "dpll_core_m5x2_ck", "dpll_core_m5x2_ck"),
+ DT_CLK(NULL, "div_core_ck", "div_core_ck"),
+ DT_CLK(NULL, "div_iva_hs_clk", "div_iva_hs_clk"),
+ DT_CLK(NULL, "div_mpu_hs_clk", "div_mpu_hs_clk"),
+ DT_CLK(NULL, "dpll_core_m4x2_ck", "dpll_core_m4x2_ck"),
+ DT_CLK(NULL, "dll_clk_div_ck", "dll_clk_div_ck"),
+ DT_CLK(NULL, "dpll_abe_m2_ck", "dpll_abe_m2_ck"),
+ DT_CLK(NULL, "dpll_core_m3x2_ck", "dpll_core_m3x2_ck"),
+ DT_CLK(NULL, "dpll_core_m7x2_ck", "dpll_core_m7x2_ck"),
+ DT_CLK(NULL, "iva_hsd_byp_clk_mux_ck", "iva_hsd_byp_clk_mux_ck"),
+ DT_CLK(NULL, "dpll_iva_ck", "dpll_iva_ck"),
+ DT_CLK(NULL, "dpll_iva_x2_ck", "dpll_iva_x2_ck"),
+ DT_CLK(NULL, "dpll_iva_m4x2_ck", "dpll_iva_m4x2_ck"),
+ DT_CLK(NULL, "dpll_iva_m5x2_ck", "dpll_iva_m5x2_ck"),
+ DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+ DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+ DT_CLK(NULL, "per_hs_clk_div_ck", "per_hs_clk_div_ck"),
+ DT_CLK(NULL, "per_hsd_byp_clk_mux_ck", "per_hsd_byp_clk_mux_ck"),
+ DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+ DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+ DT_CLK(NULL, "dpll_per_x2_ck", "dpll_per_x2_ck"),
+ DT_CLK(NULL, "dpll_per_m2x2_ck", "dpll_per_m2x2_ck"),
+ DT_CLK(NULL, "dpll_per_m3x2_ck", "dpll_per_m3x2_ck"),
+ DT_CLK(NULL, "dpll_per_m4x2_ck", "dpll_per_m4x2_ck"),
+ DT_CLK(NULL, "dpll_per_m5x2_ck", "dpll_per_m5x2_ck"),
+ DT_CLK(NULL, "dpll_per_m6x2_ck", "dpll_per_m6x2_ck"),
+ DT_CLK(NULL, "dpll_per_m7x2_ck", "dpll_per_m7x2_ck"),
+ DT_CLK(NULL, "usb_hs_clk_div_ck", "usb_hs_clk_div_ck"),
+ DT_CLK(NULL, "dpll_usb_ck", "dpll_usb_ck"),
+ DT_CLK(NULL, "dpll_usb_clkdcoldo_ck", "dpll_usb_clkdcoldo_ck"),
+ DT_CLK(NULL, "dpll_usb_m2_ck", "dpll_usb_m2_ck"),
+ DT_CLK(NULL, "ducati_clk_mux_ck", "ducati_clk_mux_ck"),
+ DT_CLK(NULL, "func_12m_fclk", "func_12m_fclk"),
+ DT_CLK(NULL, "func_24m_clk", "func_24m_clk"),
+ DT_CLK(NULL, "func_24mc_fclk", "func_24mc_fclk"),
+ DT_CLK(NULL, "func_48m_fclk", "func_48m_fclk"),
+ DT_CLK(NULL, "func_48mc_fclk", "func_48mc_fclk"),
+ DT_CLK(NULL, "func_64m_fclk", "func_64m_fclk"),
+ DT_CLK(NULL, "func_96m_fclk", "func_96m_fclk"),
+ DT_CLK(NULL, "init_60m_fclk", "init_60m_fclk"),
+ DT_CLK(NULL, "l3_div_ck", "l3_div_ck"),
+ DT_CLK(NULL, "l4_div_ck", "l4_div_ck"),
+ DT_CLK(NULL, "lp_clk_div_ck", "lp_clk_div_ck"),
+ DT_CLK(NULL, "l4_wkup_clk_mux_ck", "l4_wkup_clk_mux_ck"),
+ DT_CLK("smp_twd", NULL, "mpu_periphclk"),
+ DT_CLK(NULL, "ocp_abe_iclk", "ocp_abe_iclk"),
+ DT_CLK(NULL, "per_abe_24m_fclk", "per_abe_24m_fclk"),
+ DT_CLK(NULL, "per_abe_nc_fclk", "per_abe_nc_fclk"),
+ DT_CLK(NULL, "syc_clk_div_ck", "syc_clk_div_ck"),
+ DT_CLK(NULL, "aes1_fck", "aes1_fck"),
+ DT_CLK(NULL, "aes2_fck", "aes2_fck"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "dmic_sync_mux_ck"),
+ DT_CLK(NULL, "func_dmic_abe_gfclk", "func_dmic_abe_gfclk"),
+ DT_CLK(NULL, "dss_sys_clk", "dss_sys_clk"),
+ DT_CLK(NULL, "dss_tv_clk", "dss_tv_clk"),
+ DT_CLK(NULL, "dss_dss_clk", "dss_dss_clk"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss_48mhz_clk"),
+ DT_CLK(NULL, "dss_fck", "dss_fck"),
+ DT_CLK("omapdss_dss", "ick", "dss_fck"),
+ DT_CLK(NULL, "fdif_fck", "fdif_fck"),
+ DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+ DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+ DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+ DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+ DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+ DT_CLK(NULL, "gpio6_dbclk", "gpio6_dbclk"),
+ DT_CLK(NULL, "sgx_clk_mux", "sgx_clk_mux"),
+ DT_CLK(NULL, "hsi_fck", "hsi_fck"),
+ DT_CLK(NULL, "iss_ctrlclk", "iss_ctrlclk"),
+ DT_CLK(NULL, "mcasp_sync_mux_ck", "mcasp_sync_mux_ck"),
+ DT_CLK(NULL, "func_mcasp_abe_gfclk", "func_mcasp_abe_gfclk"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "mcbsp1_sync_mux_ck"),
+ DT_CLK(NULL, "func_mcbsp1_gfclk", "func_mcbsp1_gfclk"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "mcbsp2_sync_mux_ck"),
+ DT_CLK(NULL, "func_mcbsp2_gfclk", "func_mcbsp2_gfclk"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "mcbsp3_sync_mux_ck"),
+ DT_CLK(NULL, "func_mcbsp3_gfclk", "func_mcbsp3_gfclk"),
+ DT_CLK(NULL, "mcbsp4_sync_mux_ck", "mcbsp4_sync_mux_ck"),
+ DT_CLK(NULL, "per_mcbsp4_gfclk", "per_mcbsp4_gfclk"),
+ DT_CLK(NULL, "hsmmc1_fclk", "hsmmc1_fclk"),
+ DT_CLK(NULL, "hsmmc2_fclk", "hsmmc2_fclk"),
+ DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "ocp2scp_usb_phy_phy_48m"),
+ DT_CLK(NULL, "sha2md5_fck", "sha2md5_fck"),
+ DT_CLK(NULL, "slimbus1_fclk_1", "slimbus1_fclk_1"),
+ DT_CLK(NULL, "slimbus1_fclk_0", "slimbus1_fclk_0"),
+ DT_CLK(NULL, "slimbus1_fclk_2", "slimbus1_fclk_2"),
+ DT_CLK(NULL, "slimbus1_slimbus_clk", "slimbus1_slimbus_clk"),
+ DT_CLK(NULL, "slimbus2_fclk_1", "slimbus2_fclk_1"),
+ DT_CLK(NULL, "slimbus2_fclk_0", "slimbus2_fclk_0"),
+ DT_CLK(NULL, "slimbus2_slimbus_clk", "slimbus2_slimbus_clk"),
+ DT_CLK(NULL, "smartreflex_core_fck", "smartreflex_core_fck"),
+ DT_CLK(NULL, "smartreflex_iva_fck", "smartreflex_iva_fck"),
+ DT_CLK(NULL, "smartreflex_mpu_fck", "smartreflex_mpu_fck"),
+ DT_CLK(NULL, "dmt1_clk_mux", "dmt1_clk_mux"),
+ DT_CLK(NULL, "cm2_dm10_mux", "cm2_dm10_mux"),
+ DT_CLK(NULL, "cm2_dm11_mux", "cm2_dm11_mux"),
+ DT_CLK(NULL, "cm2_dm2_mux", "cm2_dm2_mux"),
+ DT_CLK(NULL, "cm2_dm3_mux", "cm2_dm3_mux"),
+ DT_CLK(NULL, "cm2_dm4_mux", "cm2_dm4_mux"),
+ DT_CLK(NULL, "timer5_sync_mux", "timer5_sync_mux"),
+ DT_CLK(NULL, "timer6_sync_mux", "timer6_sync_mux"),
+ DT_CLK(NULL, "timer7_sync_mux", "timer7_sync_mux"),
+ DT_CLK(NULL, "timer8_sync_mux", "timer8_sync_mux"),
+ DT_CLK(NULL, "cm2_dm9_mux", "cm2_dm9_mux"),
+ DT_CLK(NULL, "usb_host_fs_fck", "usb_host_fs_fck"),
+ DT_CLK("usbhs_omap", "fs_fck", "usb_host_fs_fck"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "utmi_p1_gfclk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "usb_host_hs_utmi_p1_clk"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "utmi_p2_gfclk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "usb_host_hs_utmi_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "usb_host_hs_utmi_p3_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "usb_host_hs_hsic480m_p1_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "usb_host_hs_hsic60m_p1_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "usb_host_hs_hsic60m_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "usb_host_hs_hsic480m_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_func48mclk", "usb_host_hs_func48mclk"),
+ DT_CLK(NULL, "usb_host_hs_fck", "usb_host_hs_fck"),
+ DT_CLK("usbhs_omap", "hs_fck", "usb_host_hs_fck"),
+ DT_CLK(NULL, "otg_60m_gfclk", "otg_60m_gfclk"),
+ DT_CLK(NULL, "usb_otg_hs_xclk", "usb_otg_hs_xclk"),
+ DT_CLK(NULL, "usb_otg_hs_ick", "usb_otg_hs_ick"),
+ DT_CLK("musb-omap2430", "ick", "usb_otg_hs_ick"),
+ DT_CLK(NULL, "usb_phy_cm_clk32k", "usb_phy_cm_clk32k"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "usb_tll_hs_usb_ch2_clk"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "usb_tll_hs_usb_ch0_clk"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "usb_tll_hs_usb_ch1_clk"),
+ DT_CLK(NULL, "usb_tll_hs_ick", "usb_tll_hs_ick"),
+ DT_CLK("usbhs_omap", "usbtll_ick", "usb_tll_hs_ick"),
+ DT_CLK("usbhs_tll", "usbtll_ick", "usb_tll_hs_ick"),
+ DT_CLK(NULL, "usim_ck", "usim_ck"),
+ DT_CLK(NULL, "usim_fclk", "usim_fclk"),
+ DT_CLK(NULL, "pmd_stm_clock_mux_ck", "pmd_stm_clock_mux_ck"),
+ DT_CLK(NULL, "pmd_trace_clk_mux_ck", "pmd_trace_clk_mux_ck"),
+ DT_CLK(NULL, "stm_clk_div_ck", "stm_clk_div_ck"),
+ DT_CLK(NULL, "trace_clk_div_ck", "trace_clk_div_ck"),
+ DT_CLK(NULL, "auxclk0_src_ck", "auxclk0_src_ck"),
+ DT_CLK(NULL, "auxclk0_ck", "auxclk0_ck"),
+ DT_CLK(NULL, "auxclkreq0_ck", "auxclkreq0_ck"),
+ DT_CLK(NULL, "auxclk1_src_ck", "auxclk1_src_ck"),
+ DT_CLK(NULL, "auxclk1_ck", "auxclk1_ck"),
+ DT_CLK(NULL, "auxclkreq1_ck", "auxclkreq1_ck"),
+ DT_CLK(NULL, "auxclk2_src_ck", "auxclk2_src_ck"),
+ DT_CLK(NULL, "auxclk2_ck", "auxclk2_ck"),
+ DT_CLK(NULL, "auxclkreq2_ck", "auxclkreq2_ck"),
+ DT_CLK(NULL, "auxclk3_src_ck", "auxclk3_src_ck"),
+ DT_CLK(NULL, "auxclk3_ck", "auxclk3_ck"),
+ DT_CLK(NULL, "auxclkreq3_ck", "auxclkreq3_ck"),
+ DT_CLK(NULL, "auxclk4_src_ck", "auxclk4_src_ck"),
+ DT_CLK(NULL, "auxclk4_ck", "auxclk4_ck"),
+ DT_CLK(NULL, "auxclkreq4_ck", "auxclkreq4_ck"),
+ DT_CLK(NULL, "auxclk5_src_ck", "auxclk5_src_ck"),
+ DT_CLK(NULL, "auxclk5_ck", "auxclk5_ck"),
+ DT_CLK(NULL, "auxclkreq5_ck", "auxclkreq5_ck"),
+ DT_CLK("50000000.gpmc", "fck", "dummy_ck"),
+ DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "mailboxes_ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.0", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.1", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.2", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.3", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.4", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.1", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.2", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.3", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.4", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.1", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.2", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.3", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "uart1_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart2_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart3_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart4_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbhost_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
+ DT_CLK("usbhs_tll", "usbtll_fck", "dummy_ck"),
+ DT_CLK("omap_wdt", "ick", "dummy_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ DT_CLK("omap_timer.1", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.2", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.3", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.4", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.9", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.10", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.11", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("omap_timer.5", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("omap_timer.6", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("omap_timer.7", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("omap_timer.8", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("4a318000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("48032000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("48034000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("48036000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("4803e000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("48086000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("48088000.timer", "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK("40138000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("4013a000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("4013c000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK("4013e000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+ DT_CLK(NULL, "cpufreq_ck", "dpll_mpu_ck"),
+ DT_CLK(NULL, "bandgap_fclk", "bandgap_fclk"),
+ DT_CLK(NULL, "div_ts_ck", "div_ts_ck"),
+ DT_CLK(NULL, "bandgap_ts_fclk", "bandgap_ts_fclk"),
+ { .node_name = NULL },
+};
+
+int __init omap4xxx_dt_clk_init(void)
+{
+ int rc;
+ struct clk *abe_dpll_ref, *abe_dpll, *sys_32k_ck, *usb_dpll;
+
+ ti_dt_clocks_register(omap44xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ /*
+ * Lock USB DPLL on OMAP4 devices so that the L3INIT power
+ * domain can transition to retention state when not in use.
+ */
+ usb_dpll = clk_get_sys(NULL, "dpll_usb_ck");
+ rc = clk_set_rate(usb_dpll, OMAP4_DPLL_USB_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+ /*
+ * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
+ * state when turning the ABE clock domain. Workaround this by
+ * locking the ABE DPLL on boot.
+ * Lock the ABE DPLL in any case to avoid issues with audio.
+ */
+ abe_dpll_ref = clk_get_sys(NULL, "abe_dpll_refclk_mux_ck");
+ sys_32k_ck = clk_get_sys(NULL, "sys_32k_ck");
+ rc = clk_set_parent(abe_dpll_ref, sys_32k_ck);
+ abe_dpll = clk_get_sys(NULL, "dpll_abe_ck");
+ if (!rc)
+ rc = clk_set_rate(abe_dpll, OMAP4_DPLL_ABE_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
new file mode 100644
index 000000000000..0ef9f581286b
--- /dev/null
+++ b/drivers/clk/ti/clk-54xx.c
@@ -0,0 +1,255 @@
+/*
+ * OMAP5 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-private.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+
+#define OMAP5_DPLL_ABE_DEFFREQ 98304000
+
+/*
+ * OMAP543x TRM, section "3.6.3.9.5 DPLL_USB Preferred Settings"
+ * states it must be at 960MHz
+ */
+#define OMAP5_DPLL_USB_DEFFREQ 960000000
+
+static struct ti_dt_clk omap54xx_clks[] = {
+ DT_CLK(NULL, "pad_clks_src_ck", "pad_clks_src_ck"),
+ DT_CLK(NULL, "pad_clks_ck", "pad_clks_ck"),
+ DT_CLK(NULL, "secure_32k_clk_src_ck", "secure_32k_clk_src_ck"),
+ DT_CLK(NULL, "slimbus_src_clk", "slimbus_src_clk"),
+ DT_CLK(NULL, "slimbus_clk", "slimbus_clk"),
+ DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "virt_12000000_ck", "virt_12000000_ck"),
+ DT_CLK(NULL, "virt_13000000_ck", "virt_13000000_ck"),
+ DT_CLK(NULL, "virt_16800000_ck", "virt_16800000_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "virt_27000000_ck", "virt_27000000_ck"),
+ DT_CLK(NULL, "virt_38400000_ck", "virt_38400000_ck"),
+ DT_CLK(NULL, "sys_clkin", "sys_clkin"),
+ DT_CLK(NULL, "xclk60mhsp1_ck", "xclk60mhsp1_ck"),
+ DT_CLK(NULL, "xclk60mhsp2_ck", "xclk60mhsp2_ck"),
+ DT_CLK(NULL, "abe_dpll_bypass_clk_mux", "abe_dpll_bypass_clk_mux"),
+ DT_CLK(NULL, "abe_dpll_clk_mux", "abe_dpll_clk_mux"),
+ DT_CLK(NULL, "dpll_abe_ck", "dpll_abe_ck"),
+ DT_CLK(NULL, "dpll_abe_x2_ck", "dpll_abe_x2_ck"),
+ DT_CLK(NULL, "dpll_abe_m2x2_ck", "dpll_abe_m2x2_ck"),
+ DT_CLK(NULL, "abe_24m_fclk", "abe_24m_fclk"),
+ DT_CLK(NULL, "abe_clk", "abe_clk"),
+ DT_CLK(NULL, "abe_iclk", "abe_iclk"),
+ DT_CLK(NULL, "abe_lp_clk_div", "abe_lp_clk_div"),
+ DT_CLK(NULL, "dpll_abe_m3x2_ck", "dpll_abe_m3x2_ck"),
+ DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+ DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+ DT_CLK(NULL, "dpll_core_h21x2_ck", "dpll_core_h21x2_ck"),
+ DT_CLK(NULL, "c2c_fclk", "c2c_fclk"),
+ DT_CLK(NULL, "c2c_iclk", "c2c_iclk"),
+ DT_CLK(NULL, "custefuse_sys_gfclk_div", "custefuse_sys_gfclk_div"),
+ DT_CLK(NULL, "dpll_core_h11x2_ck", "dpll_core_h11x2_ck"),
+ DT_CLK(NULL, "dpll_core_h12x2_ck", "dpll_core_h12x2_ck"),
+ DT_CLK(NULL, "dpll_core_h13x2_ck", "dpll_core_h13x2_ck"),
+ DT_CLK(NULL, "dpll_core_h14x2_ck", "dpll_core_h14x2_ck"),
+ DT_CLK(NULL, "dpll_core_h22x2_ck", "dpll_core_h22x2_ck"),
+ DT_CLK(NULL, "dpll_core_h23x2_ck", "dpll_core_h23x2_ck"),
+ DT_CLK(NULL, "dpll_core_h24x2_ck", "dpll_core_h24x2_ck"),
+ DT_CLK(NULL, "dpll_core_m2_ck", "dpll_core_m2_ck"),
+ DT_CLK(NULL, "dpll_core_m3x2_ck", "dpll_core_m3x2_ck"),
+ DT_CLK(NULL, "iva_dpll_hs_clk_div", "iva_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_iva_ck", "dpll_iva_ck"),
+ DT_CLK(NULL, "dpll_iva_x2_ck", "dpll_iva_x2_ck"),
+ DT_CLK(NULL, "dpll_iva_h11x2_ck", "dpll_iva_h11x2_ck"),
+ DT_CLK(NULL, "dpll_iva_h12x2_ck", "dpll_iva_h12x2_ck"),
+ DT_CLK(NULL, "mpu_dpll_hs_clk_div", "mpu_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+ DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+ DT_CLK(NULL, "per_dpll_hs_clk_div", "per_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+ DT_CLK(NULL, "dpll_per_x2_ck", "dpll_per_x2_ck"),
+ DT_CLK(NULL, "dpll_per_h11x2_ck", "dpll_per_h11x2_ck"),
+ DT_CLK(NULL, "dpll_per_h12x2_ck", "dpll_per_h12x2_ck"),
+ DT_CLK(NULL, "dpll_per_h14x2_ck", "dpll_per_h14x2_ck"),
+ DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+ DT_CLK(NULL, "dpll_per_m2x2_ck", "dpll_per_m2x2_ck"),
+ DT_CLK(NULL, "dpll_per_m3x2_ck", "dpll_per_m3x2_ck"),
+ DT_CLK(NULL, "dpll_unipro1_ck", "dpll_unipro1_ck"),
+ DT_CLK(NULL, "dpll_unipro1_clkdcoldo", "dpll_unipro1_clkdcoldo"),
+ DT_CLK(NULL, "dpll_unipro1_m2_ck", "dpll_unipro1_m2_ck"),
+ DT_CLK(NULL, "dpll_unipro2_ck", "dpll_unipro2_ck"),
+ DT_CLK(NULL, "dpll_unipro2_clkdcoldo", "dpll_unipro2_clkdcoldo"),
+ DT_CLK(NULL, "dpll_unipro2_m2_ck", "dpll_unipro2_m2_ck"),
+ DT_CLK(NULL, "usb_dpll_hs_clk_div", "usb_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_usb_ck", "dpll_usb_ck"),
+ DT_CLK(NULL, "dpll_usb_clkdcoldo", "dpll_usb_clkdcoldo"),
+ DT_CLK(NULL, "dpll_usb_m2_ck", "dpll_usb_m2_ck"),
+ DT_CLK(NULL, "dss_syc_gfclk_div", "dss_syc_gfclk_div"),
+ DT_CLK(NULL, "func_128m_clk", "func_128m_clk"),
+ DT_CLK(NULL, "func_12m_fclk", "func_12m_fclk"),
+ DT_CLK(NULL, "func_24m_clk", "func_24m_clk"),
+ DT_CLK(NULL, "func_48m_fclk", "func_48m_fclk"),
+ DT_CLK(NULL, "func_96m_fclk", "func_96m_fclk"),
+ DT_CLK(NULL, "l3_iclk_div", "l3_iclk_div"),
+ DT_CLK(NULL, "gpu_l3_iclk", "gpu_l3_iclk"),
+ DT_CLK(NULL, "l3init_60m_fclk", "l3init_60m_fclk"),
+ DT_CLK(NULL, "wkupaon_iclk_mux", "wkupaon_iclk_mux"),
+ DT_CLK(NULL, "l3instr_ts_gclk_div", "l3instr_ts_gclk_div"),
+ DT_CLK(NULL, "l4_root_clk_div", "l4_root_clk_div"),
+ DT_CLK(NULL, "dss_32khz_clk", "dss_32khz_clk"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss_48mhz_clk"),
+ DT_CLK(NULL, "dss_dss_clk", "dss_dss_clk"),
+ DT_CLK(NULL, "dss_sys_clk", "dss_sys_clk"),
+ DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+ DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+ DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+ DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+ DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+ DT_CLK(NULL, "gpio6_dbclk", "gpio6_dbclk"),
+ DT_CLK(NULL, "gpio7_dbclk", "gpio7_dbclk"),
+ DT_CLK(NULL, "gpio8_dbclk", "gpio8_dbclk"),
+ DT_CLK(NULL, "iss_ctrlclk", "iss_ctrlclk"),
+ DT_CLK(NULL, "lli_txphy_clk", "lli_txphy_clk"),
+ DT_CLK(NULL, "lli_txphy_ls_clk", "lli_txphy_ls_clk"),
+ DT_CLK(NULL, "mmc1_32khz_clk", "mmc1_32khz_clk"),
+ DT_CLK(NULL, "sata_ref_clk", "sata_ref_clk"),
+ DT_CLK(NULL, "slimbus1_slimbus_clk", "slimbus1_slimbus_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "usb_host_hs_hsic480m_p1_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "usb_host_hs_hsic480m_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "usb_host_hs_hsic480m_p3_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "usb_host_hs_hsic60m_p1_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "usb_host_hs_hsic60m_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "usb_host_hs_hsic60m_p3_clk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "usb_host_hs_utmi_p1_clk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "usb_host_hs_utmi_p2_clk"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "usb_host_hs_utmi_p3_clk"),
+ DT_CLK(NULL, "usb_otg_ss_refclk960m", "usb_otg_ss_refclk960m"),
+ DT_CLK(NULL, "usb_phy_cm_clk32k", "usb_phy_cm_clk32k"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "usb_tll_hs_usb_ch0_clk"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "usb_tll_hs_usb_ch1_clk"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "usb_tll_hs_usb_ch2_clk"),
+ DT_CLK(NULL, "aess_fclk", "aess_fclk"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "dmic_sync_mux_ck"),
+ DT_CLK(NULL, "dmic_gfclk", "dmic_gfclk"),
+ DT_CLK(NULL, "fdif_fclk", "fdif_fclk"),
+ DT_CLK(NULL, "gpu_core_gclk_mux", "gpu_core_gclk_mux"),
+ DT_CLK(NULL, "gpu_hyd_gclk_mux", "gpu_hyd_gclk_mux"),
+ DT_CLK(NULL, "hsi_fclk", "hsi_fclk"),
+ DT_CLK(NULL, "mcasp_sync_mux_ck", "mcasp_sync_mux_ck"),
+ DT_CLK(NULL, "mcasp_gfclk", "mcasp_gfclk"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "mcbsp1_sync_mux_ck"),
+ DT_CLK(NULL, "mcbsp1_gfclk", "mcbsp1_gfclk"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "mcbsp2_sync_mux_ck"),
+ DT_CLK(NULL, "mcbsp2_gfclk", "mcbsp2_gfclk"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "mcbsp3_sync_mux_ck"),
+ DT_CLK(NULL, "mcbsp3_gfclk", "mcbsp3_gfclk"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "mmc1_fclk_mux"),
+ DT_CLK(NULL, "mmc1_fclk", "mmc1_fclk"),
+ DT_CLK(NULL, "mmc2_fclk_mux", "mmc2_fclk_mux"),
+ DT_CLK(NULL, "mmc2_fclk", "mmc2_fclk"),
+ DT_CLK(NULL, "timer10_gfclk_mux", "timer10_gfclk_mux"),
+ DT_CLK(NULL, "timer11_gfclk_mux", "timer11_gfclk_mux"),
+ DT_CLK(NULL, "timer1_gfclk_mux", "timer1_gfclk_mux"),
+ DT_CLK(NULL, "timer2_gfclk_mux", "timer2_gfclk_mux"),
+ DT_CLK(NULL, "timer3_gfclk_mux", "timer3_gfclk_mux"),
+ DT_CLK(NULL, "timer4_gfclk_mux", "timer4_gfclk_mux"),
+ DT_CLK(NULL, "timer5_gfclk_mux", "timer5_gfclk_mux"),
+ DT_CLK(NULL, "timer6_gfclk_mux", "timer6_gfclk_mux"),
+ DT_CLK(NULL, "timer7_gfclk_mux", "timer7_gfclk_mux"),
+ DT_CLK(NULL, "timer8_gfclk_mux", "timer8_gfclk_mux"),
+ DT_CLK(NULL, "timer9_gfclk_mux", "timer9_gfclk_mux"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "utmi_p1_gfclk"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "utmi_p2_gfclk"),
+ DT_CLK(NULL, "auxclk0_src_ck", "auxclk0_src_ck"),
+ DT_CLK(NULL, "auxclk0_ck", "auxclk0_ck"),
+ DT_CLK(NULL, "auxclkreq0_ck", "auxclkreq0_ck"),
+ DT_CLK(NULL, "auxclk1_src_ck", "auxclk1_src_ck"),
+ DT_CLK(NULL, "auxclk1_ck", "auxclk1_ck"),
+ DT_CLK(NULL, "auxclkreq1_ck", "auxclkreq1_ck"),
+ DT_CLK(NULL, "auxclk2_src_ck", "auxclk2_src_ck"),
+ DT_CLK(NULL, "auxclk2_ck", "auxclk2_ck"),
+ DT_CLK(NULL, "auxclkreq2_ck", "auxclkreq2_ck"),
+ DT_CLK(NULL, "auxclk3_src_ck", "auxclk3_src_ck"),
+ DT_CLK(NULL, "auxclk3_ck", "auxclk3_ck"),
+ DT_CLK(NULL, "auxclkreq3_ck", "auxclkreq3_ck"),
+ DT_CLK(NULL, "gpmc_ck", "dummy_ck"),
+ DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "mailboxes_ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.0", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.1", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.2", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.3", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.4", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.1", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.2", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.3", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.4", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.1", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.2", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.3", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "uart1_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart2_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart3_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart4_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbhost_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
+ DT_CLK("omap_wdt", "ick", "dummy_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ DT_CLK("omap_timer.1", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.2", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.3", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.4", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.9", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.10", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.11", "sys_ck", "sys_clkin"),
+ DT_CLK("omap_timer.5", "sys_ck", "dss_syc_gfclk_div"),
+ DT_CLK("omap_timer.6", "sys_ck", "dss_syc_gfclk_div"),
+ DT_CLK("omap_timer.7", "sys_ck", "dss_syc_gfclk_div"),
+ DT_CLK("omap_timer.8", "sys_ck", "dss_syc_gfclk_div"),
+ { .node_name = NULL },
+};
+
+int __init omap5xxx_dt_clk_init(void)
+{
+ int rc;
+ struct clk *abe_dpll_ref, *abe_dpll, *sys_32k_ck, *usb_dpll;
+
+ ti_dt_clocks_register(omap54xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ abe_dpll_ref = clk_get_sys(NULL, "abe_dpll_clk_mux");
+ sys_32k_ck = clk_get_sys(NULL, "sys_32k_ck");
+ rc = clk_set_parent(abe_dpll_ref, sys_32k_ck);
+ abe_dpll = clk_get_sys(NULL, "dpll_abe_ck");
+ if (!rc)
+ rc = clk_set_rate(abe_dpll, OMAP5_DPLL_ABE_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+
+ usb_dpll = clk_get_sys(NULL, "dpll_usb_ck");
+ rc = clk_set_rate(usb_dpll, OMAP5_DPLL_USB_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+ usb_dpll = clk_get_sys(NULL, "dpll_usb_m2_ck");
+ rc = clk_set_rate(usb_dpll, OMAP5_DPLL_USB_DEFFREQ/2);
+ if (rc)
+ pr_err("%s: failed to set USB_DPLL M2 OUT\n", __func__);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
new file mode 100644
index 000000000000..9977653f2d63
--- /dev/null
+++ b/drivers/clk/ti/clk-7xx.c
@@ -0,0 +1,332 @@
+/*
+ * DRA7 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-private.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+
+#define DRA7_DPLL_ABE_DEFFREQ 361267200
+#define DRA7_DPLL_GMAC_DEFFREQ 1000000000
+
+
+static struct ti_dt_clk dra7xx_clks[] = {
+ DT_CLK(NULL, "atl_clkin0_ck", "atl_clkin0_ck"),
+ DT_CLK(NULL, "atl_clkin1_ck", "atl_clkin1_ck"),
+ DT_CLK(NULL, "atl_clkin2_ck", "atl_clkin2_ck"),
+ DT_CLK(NULL, "atlclkin3_ck", "atlclkin3_ck"),
+ DT_CLK(NULL, "hdmi_clkin_ck", "hdmi_clkin_ck"),
+ DT_CLK(NULL, "mlb_clkin_ck", "mlb_clkin_ck"),
+ DT_CLK(NULL, "mlbp_clkin_ck", "mlbp_clkin_ck"),
+ DT_CLK(NULL, "pciesref_acs_clk_ck", "pciesref_acs_clk_ck"),
+ DT_CLK(NULL, "ref_clkin0_ck", "ref_clkin0_ck"),
+ DT_CLK(NULL, "ref_clkin1_ck", "ref_clkin1_ck"),
+ DT_CLK(NULL, "ref_clkin2_ck", "ref_clkin2_ck"),
+ DT_CLK(NULL, "ref_clkin3_ck", "ref_clkin3_ck"),
+ DT_CLK(NULL, "rmii_clk_ck", "rmii_clk_ck"),
+ DT_CLK(NULL, "sdvenc_clkin_ck", "sdvenc_clkin_ck"),
+ DT_CLK(NULL, "secure_32k_clk_src_ck", "secure_32k_clk_src_ck"),
+ DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "virt_12000000_ck", "virt_12000000_ck"),
+ DT_CLK(NULL, "virt_13000000_ck", "virt_13000000_ck"),
+ DT_CLK(NULL, "virt_16800000_ck", "virt_16800000_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_20000000_ck", "virt_20000000_ck"),
+ DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+ DT_CLK(NULL, "virt_27000000_ck", "virt_27000000_ck"),
+ DT_CLK(NULL, "virt_38400000_ck", "virt_38400000_ck"),
+ DT_CLK(NULL, "sys_clkin1", "sys_clkin1"),
+ DT_CLK(NULL, "sys_clkin2", "sys_clkin2"),
+ DT_CLK(NULL, "usb_otg_clkin_ck", "usb_otg_clkin_ck"),
+ DT_CLK(NULL, "video1_clkin_ck", "video1_clkin_ck"),
+ DT_CLK(NULL, "video1_m2_clkin_ck", "video1_m2_clkin_ck"),
+ DT_CLK(NULL, "video2_clkin_ck", "video2_clkin_ck"),
+ DT_CLK(NULL, "video2_m2_clkin_ck", "video2_m2_clkin_ck"),
+ DT_CLK(NULL, "abe_dpll_sys_clk_mux", "abe_dpll_sys_clk_mux"),
+ DT_CLK(NULL, "abe_dpll_bypass_clk_mux", "abe_dpll_bypass_clk_mux"),
+ DT_CLK(NULL, "abe_dpll_clk_mux", "abe_dpll_clk_mux"),
+ DT_CLK(NULL, "dpll_abe_ck", "dpll_abe_ck"),
+ DT_CLK(NULL, "dpll_abe_x2_ck", "dpll_abe_x2_ck"),
+ DT_CLK(NULL, "dpll_abe_m2x2_ck", "dpll_abe_m2x2_ck"),
+ DT_CLK(NULL, "abe_24m_fclk", "abe_24m_fclk"),
+ DT_CLK(NULL, "abe_clk", "abe_clk"),
+ DT_CLK(NULL, "aess_fclk", "aess_fclk"),
+ DT_CLK(NULL, "abe_giclk_div", "abe_giclk_div"),
+ DT_CLK(NULL, "abe_lp_clk_div", "abe_lp_clk_div"),
+ DT_CLK(NULL, "abe_sys_clk_div", "abe_sys_clk_div"),
+ DT_CLK(NULL, "adc_gfclk_mux", "adc_gfclk_mux"),
+ DT_CLK(NULL, "dpll_pcie_ref_ck", "dpll_pcie_ref_ck"),
+ DT_CLK(NULL, "dpll_pcie_ref_m2ldo_ck", "dpll_pcie_ref_m2ldo_ck"),
+ DT_CLK(NULL, "apll_pcie_ck", "apll_pcie_ck"),
+ DT_CLK(NULL, "apll_pcie_clkvcoldo", "apll_pcie_clkvcoldo"),
+ DT_CLK(NULL, "apll_pcie_clkvcoldo_div", "apll_pcie_clkvcoldo_div"),
+ DT_CLK(NULL, "apll_pcie_m2_ck", "apll_pcie_m2_ck"),
+ DT_CLK(NULL, "sys_clk1_dclk_div", "sys_clk1_dclk_div"),
+ DT_CLK(NULL, "sys_clk2_dclk_div", "sys_clk2_dclk_div"),
+ DT_CLK(NULL, "dpll_abe_m2_ck", "dpll_abe_m2_ck"),
+ DT_CLK(NULL, "per_abe_x1_dclk_div", "per_abe_x1_dclk_div"),
+ DT_CLK(NULL, "dpll_abe_m3x2_ck", "dpll_abe_m3x2_ck"),
+ DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+ DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+ DT_CLK(NULL, "dpll_core_h12x2_ck", "dpll_core_h12x2_ck"),
+ DT_CLK(NULL, "mpu_dpll_hs_clk_div", "mpu_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+ DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+ DT_CLK(NULL, "mpu_dclk_div", "mpu_dclk_div"),
+ DT_CLK(NULL, "dsp_dpll_hs_clk_div", "dsp_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_dsp_ck", "dpll_dsp_ck"),
+ DT_CLK(NULL, "dpll_dsp_m2_ck", "dpll_dsp_m2_ck"),
+ DT_CLK(NULL, "dsp_gclk_div", "dsp_gclk_div"),
+ DT_CLK(NULL, "iva_dpll_hs_clk_div", "iva_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_iva_ck", "dpll_iva_ck"),
+ DT_CLK(NULL, "dpll_iva_m2_ck", "dpll_iva_m2_ck"),
+ DT_CLK(NULL, "iva_dclk", "iva_dclk"),
+ DT_CLK(NULL, "dpll_gpu_ck", "dpll_gpu_ck"),
+ DT_CLK(NULL, "dpll_gpu_m2_ck", "dpll_gpu_m2_ck"),
+ DT_CLK(NULL, "gpu_dclk", "gpu_dclk"),
+ DT_CLK(NULL, "dpll_core_m2_ck", "dpll_core_m2_ck"),
+ DT_CLK(NULL, "core_dpll_out_dclk_div", "core_dpll_out_dclk_div"),
+ DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"),
+ DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"),
+ DT_CLK(NULL, "emif_phy_dclk_div", "emif_phy_dclk_div"),
+ DT_CLK(NULL, "dpll_gmac_ck", "dpll_gmac_ck"),
+ DT_CLK(NULL, "dpll_gmac_m2_ck", "dpll_gmac_m2_ck"),
+ DT_CLK(NULL, "gmac_250m_dclk_div", "gmac_250m_dclk_div"),
+ DT_CLK(NULL, "video2_dclk_div", "video2_dclk_div"),
+ DT_CLK(NULL, "video1_dclk_div", "video1_dclk_div"),
+ DT_CLK(NULL, "hdmi_dclk_div", "hdmi_dclk_div"),
+ DT_CLK(NULL, "per_dpll_hs_clk_div", "per_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+ DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+ DT_CLK(NULL, "func_96m_aon_dclk_div", "func_96m_aon_dclk_div"),
+ DT_CLK(NULL, "usb_dpll_hs_clk_div", "usb_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_usb_ck", "dpll_usb_ck"),
+ DT_CLK(NULL, "dpll_usb_m2_ck", "dpll_usb_m2_ck"),
+ DT_CLK(NULL, "l3init_480m_dclk_div", "l3init_480m_dclk_div"),
+ DT_CLK(NULL, "usb_otg_dclk_div", "usb_otg_dclk_div"),
+ DT_CLK(NULL, "sata_dclk_div", "sata_dclk_div"),
+ DT_CLK(NULL, "dpll_pcie_ref_m2_ck", "dpll_pcie_ref_m2_ck"),
+ DT_CLK(NULL, "pcie2_dclk_div", "pcie2_dclk_div"),
+ DT_CLK(NULL, "pcie_dclk_div", "pcie_dclk_div"),
+ DT_CLK(NULL, "emu_dclk_div", "emu_dclk_div"),
+ DT_CLK(NULL, "secure_32k_dclk_div", "secure_32k_dclk_div"),
+ DT_CLK(NULL, "eve_dpll_hs_clk_div", "eve_dpll_hs_clk_div"),
+ DT_CLK(NULL, "dpll_eve_ck", "dpll_eve_ck"),
+ DT_CLK(NULL, "dpll_eve_m2_ck", "dpll_eve_m2_ck"),
+ DT_CLK(NULL, "eve_dclk_div", "eve_dclk_div"),
+ DT_CLK(NULL, "clkoutmux0_clk_mux", "clkoutmux0_clk_mux"),
+ DT_CLK(NULL, "clkoutmux1_clk_mux", "clkoutmux1_clk_mux"),
+ DT_CLK(NULL, "clkoutmux2_clk_mux", "clkoutmux2_clk_mux"),
+ DT_CLK(NULL, "custefuse_sys_gfclk_div", "custefuse_sys_gfclk_div"),
+ DT_CLK(NULL, "dpll_core_h13x2_ck", "dpll_core_h13x2_ck"),
+ DT_CLK(NULL, "dpll_core_h14x2_ck", "dpll_core_h14x2_ck"),
+ DT_CLK(NULL, "dpll_core_h22x2_ck", "dpll_core_h22x2_ck"),
+ DT_CLK(NULL, "dpll_core_h23x2_ck", "dpll_core_h23x2_ck"),
+ DT_CLK(NULL, "dpll_core_h24x2_ck", "dpll_core_h24x2_ck"),
+ DT_CLK(NULL, "dpll_ddr_x2_ck", "dpll_ddr_x2_ck"),
+ DT_CLK(NULL, "dpll_ddr_h11x2_ck", "dpll_ddr_h11x2_ck"),
+ DT_CLK(NULL, "dpll_dsp_x2_ck", "dpll_dsp_x2_ck"),
+ DT_CLK(NULL, "dpll_dsp_m3x2_ck", "dpll_dsp_m3x2_ck"),
+ DT_CLK(NULL, "dpll_gmac_x2_ck", "dpll_gmac_x2_ck"),
+ DT_CLK(NULL, "dpll_gmac_h11x2_ck", "dpll_gmac_h11x2_ck"),
+ DT_CLK(NULL, "dpll_gmac_h12x2_ck", "dpll_gmac_h12x2_ck"),
+ DT_CLK(NULL, "dpll_gmac_h13x2_ck", "dpll_gmac_h13x2_ck"),
+ DT_CLK(NULL, "dpll_gmac_m3x2_ck", "dpll_gmac_m3x2_ck"),
+ DT_CLK(NULL, "dpll_per_x2_ck", "dpll_per_x2_ck"),
+ DT_CLK(NULL, "dpll_per_h11x2_ck", "dpll_per_h11x2_ck"),
+ DT_CLK(NULL, "dpll_per_h12x2_ck", "dpll_per_h12x2_ck"),
+ DT_CLK(NULL, "dpll_per_h13x2_ck", "dpll_per_h13x2_ck"),
+ DT_CLK(NULL, "dpll_per_h14x2_ck", "dpll_per_h14x2_ck"),
+ DT_CLK(NULL, "dpll_per_m2x2_ck", "dpll_per_m2x2_ck"),
+ DT_CLK(NULL, "dpll_usb_clkdcoldo", "dpll_usb_clkdcoldo"),
+ DT_CLK(NULL, "eve_clk", "eve_clk"),
+ DT_CLK(NULL, "func_128m_clk", "func_128m_clk"),
+ DT_CLK(NULL, "func_12m_fclk", "func_12m_fclk"),
+ DT_CLK(NULL, "func_24m_clk", "func_24m_clk"),
+ DT_CLK(NULL, "func_48m_fclk", "func_48m_fclk"),
+ DT_CLK(NULL, "func_96m_fclk", "func_96m_fclk"),
+ DT_CLK(NULL, "gmii_m_clk_div", "gmii_m_clk_div"),
+ DT_CLK(NULL, "hdmi_clk2_div", "hdmi_clk2_div"),
+ DT_CLK(NULL, "hdmi_div_clk", "hdmi_div_clk"),
+ DT_CLK(NULL, "hdmi_dpll_clk_mux", "hdmi_dpll_clk_mux"),
+ DT_CLK(NULL, "l3_iclk_div", "l3_iclk_div"),
+ DT_CLK(NULL, "l3init_60m_fclk", "l3init_60m_fclk"),
+ DT_CLK(NULL, "l4_root_clk_div", "l4_root_clk_div"),
+ DT_CLK(NULL, "mlb_clk", "mlb_clk"),
+ DT_CLK(NULL, "mlbp_clk", "mlbp_clk"),
+ DT_CLK(NULL, "per_abe_x1_gfclk2_div", "per_abe_x1_gfclk2_div"),
+ DT_CLK(NULL, "timer_sys_clk_div", "timer_sys_clk_div"),
+ DT_CLK(NULL, "video1_clk2_div", "video1_clk2_div"),
+ DT_CLK(NULL, "video1_div_clk", "video1_div_clk"),
+ DT_CLK(NULL, "video1_dpll_clk_mux", "video1_dpll_clk_mux"),
+ DT_CLK(NULL, "video2_clk2_div", "video2_clk2_div"),
+ DT_CLK(NULL, "video2_div_clk", "video2_div_clk"),
+ DT_CLK(NULL, "video2_dpll_clk_mux", "video2_dpll_clk_mux"),
+ DT_CLK(NULL, "wkupaon_iclk_mux", "wkupaon_iclk_mux"),
+ DT_CLK(NULL, "dss_32khz_clk", "dss_32khz_clk"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss_48mhz_clk"),
+ DT_CLK(NULL, "dss_dss_clk", "dss_dss_clk"),
+ DT_CLK(NULL, "dss_hdmi_clk", "dss_hdmi_clk"),
+ DT_CLK(NULL, "dss_video1_clk", "dss_video1_clk"),
+ DT_CLK(NULL, "dss_video2_clk", "dss_video2_clk"),
+ DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+ DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+ DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+ DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+ DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+ DT_CLK(NULL, "gpio6_dbclk", "gpio6_dbclk"),
+ DT_CLK(NULL, "gpio7_dbclk", "gpio7_dbclk"),
+ DT_CLK(NULL, "gpio8_dbclk", "gpio8_dbclk"),
+ DT_CLK(NULL, "mmc1_clk32k", "mmc1_clk32k"),
+ DT_CLK(NULL, "mmc2_clk32k", "mmc2_clk32k"),
+ DT_CLK(NULL, "mmc3_clk32k", "mmc3_clk32k"),
+ DT_CLK(NULL, "mmc4_clk32k", "mmc4_clk32k"),
+ DT_CLK(NULL, "sata_ref_clk", "sata_ref_clk"),
+ DT_CLK(NULL, "usb_otg_ss1_refclk960m", "usb_otg_ss1_refclk960m"),
+ DT_CLK(NULL, "usb_otg_ss2_refclk960m", "usb_otg_ss2_refclk960m"),
+ DT_CLK(NULL, "usb_phy1_always_on_clk32k", "usb_phy1_always_on_clk32k"),
+ DT_CLK(NULL, "usb_phy2_always_on_clk32k", "usb_phy2_always_on_clk32k"),
+ DT_CLK(NULL, "usb_phy3_always_on_clk32k", "usb_phy3_always_on_clk32k"),
+ DT_CLK(NULL, "atl_dpll_clk_mux", "atl_dpll_clk_mux"),
+ DT_CLK(NULL, "atl_gfclk_mux", "atl_gfclk_mux"),
+ DT_CLK(NULL, "dcan1_sys_clk_mux", "dcan1_sys_clk_mux"),
+ DT_CLK(NULL, "gmac_gmii_ref_clk_div", "gmac_gmii_ref_clk_div"),
+ DT_CLK(NULL, "gmac_rft_clk_mux", "gmac_rft_clk_mux"),
+ DT_CLK(NULL, "gpu_core_gclk_mux", "gpu_core_gclk_mux"),
+ DT_CLK(NULL, "gpu_hyd_gclk_mux", "gpu_hyd_gclk_mux"),
+ DT_CLK(NULL, "ipu1_gfclk_mux", "ipu1_gfclk_mux"),
+ DT_CLK(NULL, "l3instr_ts_gclk_div", "l3instr_ts_gclk_div"),
+ DT_CLK(NULL, "mcasp1_ahclkr_mux", "mcasp1_ahclkr_mux"),
+ DT_CLK(NULL, "mcasp1_ahclkx_mux", "mcasp1_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "mcasp1_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp2_ahclkr_mux", "mcasp2_ahclkr_mux"),
+ DT_CLK(NULL, "mcasp2_ahclkx_mux", "mcasp2_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "mcasp2_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp3_ahclkx_mux", "mcasp3_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "mcasp3_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp4_ahclkx_mux", "mcasp4_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "mcasp4_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp5_ahclkx_mux", "mcasp5_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "mcasp5_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp6_ahclkx_mux", "mcasp6_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "mcasp6_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp7_ahclkx_mux", "mcasp7_ahclkx_mux"),
+ DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "mcasp7_aux_gfclk_mux"),
+ DT_CLK(NULL, "mcasp8_ahclk_mux", "mcasp8_ahclk_mux"),
+ DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "mcasp8_aux_gfclk_mux"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "mmc1_fclk_mux"),
+ DT_CLK(NULL, "mmc1_fclk_div", "mmc1_fclk_div"),
+ DT_CLK(NULL, "mmc2_fclk_mux", "mmc2_fclk_mux"),
+ DT_CLK(NULL, "mmc2_fclk_div", "mmc2_fclk_div"),
+ DT_CLK(NULL, "mmc3_gfclk_mux", "mmc3_gfclk_mux"),
+ DT_CLK(NULL, "mmc3_gfclk_div", "mmc3_gfclk_div"),
+ DT_CLK(NULL, "mmc4_gfclk_mux", "mmc4_gfclk_mux"),
+ DT_CLK(NULL, "mmc4_gfclk_div", "mmc4_gfclk_div"),
+ DT_CLK(NULL, "qspi_gfclk_mux", "qspi_gfclk_mux"),
+ DT_CLK(NULL, "qspi_gfclk_div", "qspi_gfclk_div"),
+ DT_CLK(NULL, "timer10_gfclk_mux", "timer10_gfclk_mux"),
+ DT_CLK(NULL, "timer11_gfclk_mux", "timer11_gfclk_mux"),
+ DT_CLK(NULL, "timer13_gfclk_mux", "timer13_gfclk_mux"),
+ DT_CLK(NULL, "timer14_gfclk_mux", "timer14_gfclk_mux"),
+ DT_CLK(NULL, "timer15_gfclk_mux", "timer15_gfclk_mux"),
+ DT_CLK(NULL, "timer16_gfclk_mux", "timer16_gfclk_mux"),
+ DT_CLK(NULL, "timer1_gfclk_mux", "timer1_gfclk_mux"),
+ DT_CLK(NULL, "timer2_gfclk_mux", "timer2_gfclk_mux"),
+ DT_CLK(NULL, "timer3_gfclk_mux", "timer3_gfclk_mux"),
+ DT_CLK(NULL, "timer4_gfclk_mux", "timer4_gfclk_mux"),
+ DT_CLK(NULL, "timer5_gfclk_mux", "timer5_gfclk_mux"),
+ DT_CLK(NULL, "timer6_gfclk_mux", "timer6_gfclk_mux"),
+ DT_CLK(NULL, "timer7_gfclk_mux", "timer7_gfclk_mux"),
+ DT_CLK(NULL, "timer8_gfclk_mux", "timer8_gfclk_mux"),
+ DT_CLK(NULL, "timer9_gfclk_mux", "timer9_gfclk_mux"),
+ DT_CLK(NULL, "uart10_gfclk_mux", "uart10_gfclk_mux"),
+ DT_CLK(NULL, "uart1_gfclk_mux", "uart1_gfclk_mux"),
+ DT_CLK(NULL, "uart2_gfclk_mux", "uart2_gfclk_mux"),
+ DT_CLK(NULL, "uart3_gfclk_mux", "uart3_gfclk_mux"),
+ DT_CLK(NULL, "uart4_gfclk_mux", "uart4_gfclk_mux"),
+ DT_CLK(NULL, "uart5_gfclk_mux", "uart5_gfclk_mux"),
+ DT_CLK(NULL, "uart6_gfclk_mux", "uart6_gfclk_mux"),
+ DT_CLK(NULL, "uart7_gfclk_mux", "uart7_gfclk_mux"),
+ DT_CLK(NULL, "uart8_gfclk_mux", "uart8_gfclk_mux"),
+ DT_CLK(NULL, "uart9_gfclk_mux", "uart9_gfclk_mux"),
+ DT_CLK(NULL, "vip1_gclk_mux", "vip1_gclk_mux"),
+ DT_CLK(NULL, "vip2_gclk_mux", "vip2_gclk_mux"),
+ DT_CLK(NULL, "vip3_gclk_mux", "vip3_gclk_mux"),
+ DT_CLK(NULL, "gpmc_ck", "dummy_ck"),
+ DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
+ DT_CLK("omap_i2c.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "mailboxes_ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.0", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.1", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.2", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.3", "ick", "dummy_ck"),
+ DT_CLK("omap_hsmmc.4", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.1", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.2", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.3", "ick", "dummy_ck"),
+ DT_CLK("omap-mcbsp.4", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.1", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.2", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.3", "ick", "dummy_ck"),
+ DT_CLK("omap2_mcspi.4", "ick", "dummy_ck"),
+ DT_CLK(NULL, "uart1_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart2_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart3_ick", "dummy_ck"),
+ DT_CLK(NULL, "uart4_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbhost_ick", "dummy_ck"),
+ DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
+ DT_CLK("omap_wdt", "ick", "dummy_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ DT_CLK("4ae18000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48032000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48034000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48036000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("4803e000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48086000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48088000.timer", "timer_sys_ck", "sys_clkin2"),
+ DT_CLK("48820000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48822000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48824000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK("48826000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+ DT_CLK(NULL, "sys_clkin", "sys_clkin1"),
+ { .node_name = NULL },
+};
+
+int __init dra7xx_dt_clk_init(void)
+{
+ int rc;
+ struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck;
+
+ ti_dt_clocks_register(dra7xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux");
+ sys_clkin2 = clk_get_sys(NULL, "sys_clkin2");
+ dpll_ck = clk_get_sys(NULL, "dpll_abe_ck");
+
+ rc = clk_set_parent(abe_dpll_mux, sys_clkin2);
+ if (!rc)
+ rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+
+ dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
+ rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure GMAC DPLL!\n", __func__);
+
+ return rc;
+}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
new file mode 100644
index 000000000000..b1a6f7144f3f
--- /dev/null
+++ b/drivers/clk/ti/clk.c
@@ -0,0 +1,167 @@
+/*
+ * TI clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/list.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static int ti_dt_clk_memmap_index;
+struct ti_clk_ll_ops *ti_clk_ll_ops;
+
+/**
+ * ti_dt_clocks_register - register DT alias clocks during boot
+ * @oclks: list of clocks to register
+ *
+ * Register alias or non-standard DT clock entries during boot. By
+ * default, DT clocks are found based on their node name. If any
+ * additional con-id / dev-id -> clock mapping is required, use this
+ * function to list these.
+ */
+void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
+{
+ struct ti_dt_clk *c;
+ struct device_node *node;
+ struct clk *clk;
+ struct of_phandle_args clkspec;
+
+ for (c = oclks; c->node_name != NULL; c++) {
+ node = of_find_node_by_name(NULL, c->node_name);
+ clkspec.np = node;
+ clk = of_clk_get_from_provider(&clkspec);
+
+ if (!IS_ERR(clk)) {
+ c->lk.clk = clk;
+ clkdev_add(&c->lk);
+ } else {
+ pr_warn("failed to lookup clock node %s\n",
+ c->node_name);
+ }
+ }
+}
+
+struct clk_init_item {
+ struct device_node *node;
+ struct clk_hw *hw;
+ ti_of_clk_init_cb_t func;
+ struct list_head link;
+};
+
+static LIST_HEAD(retry_list);
+
+/**
+ * ti_clk_retry_init - retries a failed clock init at later phase
+ * @node: device not for the clock
+ * @hw: partially initialized clk_hw struct for the clock
+ * @func: init function to be called for the clock
+ *
+ * Adds a failed clock init to the retry list. The retry list is parsed
+ * once all the other clocks have been initialized.
+ */
+int __init ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
+ ti_of_clk_init_cb_t func)
+{
+ struct clk_init_item *retry;
+
+ pr_debug("%s: adding to retry list...\n", node->name);
+ retry = kzalloc(sizeof(*retry), GFP_KERNEL);
+ if (!retry)
+ return -ENOMEM;
+
+ retry->node = node;
+ retry->func = func;
+ retry->hw = hw;
+ list_add(&retry->link, &retry_list);
+
+ return 0;
+}
+
+/**
+ * ti_clk_get_reg_addr - get register address for a clock register
+ * @node: device node for the clock
+ * @index: register index from the clock node
+ *
+ * Builds clock register address from device tree information. This
+ * is a struct of type clk_omap_reg.
+ */
+void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index)
+{
+ struct clk_omap_reg *reg;
+ u32 val;
+ u32 tmp;
+
+ reg = (struct clk_omap_reg *)&tmp;
+ reg->index = ti_dt_clk_memmap_index;
+
+ if (of_property_read_u32_index(node, "reg", index, &val)) {
+ pr_err("%s must have reg[%d]!\n", node->name, index);
+ return NULL;
+ }
+
+ reg->offset = val;
+
+ return (void __iomem *)tmp;
+}
+
+/**
+ * ti_dt_clk_init_provider - init master clock provider
+ * @parent: master node
+ * @index: internal index for clk_reg_ops
+ *
+ * Initializes a master clock IP block and its child clock nodes.
+ * Regmap is provided for accessing the register space for the
+ * IP block and all the clocks under it.
+ */
+void ti_dt_clk_init_provider(struct device_node *parent, int index)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+ struct device_node *clocks;
+ of_clk_init_cb_t clk_init_cb;
+ struct clk_init_item *retry;
+ struct clk_init_item *tmp;
+
+ ti_dt_clk_memmap_index = index;
+
+ /* get clocks for this parent */
+ clocks = of_get_child_by_name(parent, "clocks");
+ if (!clocks) {
+ pr_err("%s missing 'clocks' child node.\n", parent->name);
+ return;
+ }
+
+ for_each_child_of_node(clocks, np) {
+ match = of_match_node(&__clk_of_table, np);
+ if (!match)
+ continue;
+ clk_init_cb = (of_clk_init_cb_t)match->data;
+ pr_debug("%s: initializing: %s\n", __func__, np->name);
+ clk_init_cb(np);
+ }
+
+ list_for_each_entry_safe(retry, tmp, &retry_list, link) {
+ pr_debug("retry-init: %s\n", retry->node->name);
+ retry->func(retry->hw, retry->node);
+ list_del(&retry->link);
+ kfree(retry);
+ }
+}
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
new file mode 100644
index 000000000000..f1e0038d76ac
--- /dev/null
+++ b/drivers/clk/ti/clockdomain.c
@@ -0,0 +1,70 @@
+/*
+ * OMAP clockdomain support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static void __init of_ti_clockdomain_setup(struct device_node *node)
+{
+ struct clk *clk;
+ struct clk_hw *clk_hw;
+ const char *clkdm_name = node->name;
+ int i;
+ int num_clks;
+
+ num_clks = of_count_phandle_with_args(node, "clocks", "#clock-cells");
+
+ for (i = 0; i < num_clks; i++) {
+ clk = of_clk_get(node, i);
+ if (__clk_get_flags(clk) & CLK_IS_BASIC) {
+ pr_warn("can't setup clkdm for basic clk %s\n",
+ __clk_get_name(clk));
+ continue;
+ }
+ clk_hw = __clk_get_hw(clk);
+ to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
+ omap2_init_clk_clkdm(clk_hw);
+ }
+}
+
+static struct of_device_id ti_clkdm_match_table[] __initdata = {
+ { .compatible = "ti,clockdomain" },
+ { }
+};
+
+/**
+ * ti_dt_clockdomains_setup - setup device tree clockdomains
+ *
+ * Initializes clockdomain nodes for a SoC. This parses through all the
+ * nodes with compatible = "ti,clockdomain", and add the clockdomain
+ * info for all the clocks listed under these. This function shall be
+ * called after rest of the DT clock init has completed and all
+ * clock nodes have been registered.
+ */
+void __init ti_dt_clockdomains_setup(void)
+{
+ struct device_node *np;
+ for_each_matching_node(np, ti_clkdm_match_table) {
+ of_ti_clockdomain_setup(np);
+ }
+}
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
new file mode 100644
index 000000000000..19d8980ba458
--- /dev/null
+++ b/drivers/clk/ti/composite.c
@@ -0,0 +1,269 @@
+/*
+ * TI composite clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <linux/list.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
+static unsigned long ti_composite_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return ti_clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static long ti_composite_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return -EINVAL;
+}
+
+static int ti_composite_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return -EINVAL;
+}
+
+static const struct clk_ops ti_composite_divider_ops = {
+ .recalc_rate = &ti_composite_recalc_rate,
+ .round_rate = &ti_composite_round_rate,
+ .set_rate = &ti_composite_set_rate,
+};
+
+static const struct clk_ops ti_composite_gate_ops = {
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+struct component_clk {
+ int num_parents;
+ const char **parent_names;
+ struct device_node *node;
+ int type;
+ struct clk_hw *hw;
+ struct list_head link;
+};
+
+static const char * __initconst component_clk_types[] = {
+ "gate", "divider", "mux"
+};
+
+static LIST_HEAD(component_clks);
+
+static struct device_node *_get_component_node(struct device_node *node, int i)
+{
+ int rc;
+ struct of_phandle_args clkspec;
+
+ rc = of_parse_phandle_with_args(node, "clocks", "#clock-cells", i,
+ &clkspec);
+ if (rc)
+ return NULL;
+
+ return clkspec.np;
+}
+
+static struct component_clk *_lookup_component(struct device_node *node)
+{
+ struct component_clk *comp;
+
+ list_for_each_entry(comp, &component_clks, link) {
+ if (comp->node == node)
+ return comp;
+ }
+ return NULL;
+}
+
+struct clk_hw_omap_comp {
+ struct clk_hw hw;
+ struct device_node *comp_nodes[CLK_COMPONENT_TYPE_MAX];
+ struct component_clk *comp_clks[CLK_COMPONENT_TYPE_MAX];
+};
+
+static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx)
+{
+ if (!clk)
+ return NULL;
+
+ if (!clk->comp_clks[idx])
+ return NULL;
+
+ return clk->comp_clks[idx]->hw;
+}
+
+#define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw)
+
+static void __init ti_clk_register_composite(struct clk_hw *hw,
+ struct device_node *node)
+{
+ struct clk *clk;
+ struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw);
+ struct component_clk *comp;
+ int num_parents = 0;
+ const char **parent_names = NULL;
+ int i;
+
+ /* Check for presence of each component clock */
+ for (i = 0; i < CLK_COMPONENT_TYPE_MAX; i++) {
+ if (!cclk->comp_nodes[i])
+ continue;
+
+ comp = _lookup_component(cclk->comp_nodes[i]);
+ if (!comp) {
+ pr_debug("component %s not ready for %s, retry\n",
+ cclk->comp_nodes[i]->name, node->name);
+ if (!ti_clk_retry_init(node, hw,
+ ti_clk_register_composite))
+ return;
+
+ goto cleanup;
+ }
+ if (cclk->comp_clks[comp->type] != NULL) {
+ pr_err("duplicate component types for %s (%s)!\n",
+ node->name, component_clk_types[comp->type]);
+ goto cleanup;
+ }
+
+ cclk->comp_clks[comp->type] = comp;
+
+ /* Mark this node as found */
+ cclk->comp_nodes[i] = NULL;
+ }
+
+ /* All components exists, proceed with registration */
+ for (i = CLK_COMPONENT_TYPE_MAX - 1; i >= 0; i--) {
+ comp = cclk->comp_clks[i];
+ if (!comp)
+ continue;
+ if (comp->num_parents) {
+ num_parents = comp->num_parents;
+ parent_names = comp->parent_names;
+ break;
+ }
+ }
+
+ if (!num_parents) {
+ pr_err("%s: no parents found for %s!\n", __func__, node->name);
+ goto cleanup;
+ }
+
+ clk = clk_register_composite(NULL, node->name,
+ parent_names, num_parents,
+ _get_hw(cclk, CLK_COMPONENT_TYPE_MUX),
+ &ti_clk_mux_ops,
+ _get_hw(cclk, CLK_COMPONENT_TYPE_DIVIDER),
+ &ti_composite_divider_ops,
+ _get_hw(cclk, CLK_COMPONENT_TYPE_GATE),
+ &ti_composite_gate_ops, 0);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+cleanup:
+ /* Free component clock list entries */
+ for (i = 0; i < CLK_COMPONENT_TYPE_MAX; i++) {
+ if (!cclk->comp_clks[i])
+ continue;
+ list_del(&cclk->comp_clks[i]->link);
+ kfree(cclk->comp_clks[i]);
+ }
+
+ kfree(cclk);
+}
+
+static void __init of_ti_composite_clk_setup(struct device_node *node)
+{
+ int num_clks;
+ int i;
+ struct clk_hw_omap_comp *cclk;
+
+ /* Number of component clocks to be put inside this clock */
+ num_clks = of_clk_get_parent_count(node);
+
+ if (num_clks < 1) {
+ pr_err("composite clk %s must have component(s)\n", node->name);
+ return;
+ }
+
+ cclk = kzalloc(sizeof(*cclk), GFP_KERNEL);
+ if (!cclk)
+ return;
+
+ /* Get device node pointers for each component clock */
+ for (i = 0; i < num_clks; i++)
+ cclk->comp_nodes[i] = _get_component_node(node, i);
+
+ ti_clk_register_composite(&cclk->hw, node);
+}
+CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
+ of_ti_composite_clk_setup);
+
+/**
+ * ti_clk_add_component - add a component clock to the pool
+ * @node: device node of the component clock
+ * @hw: hardware clock definition for the component clock
+ * @type: type of the component clock
+ *
+ * Adds a component clock to the list of available components, so that
+ * it can be registered by a composite clock.
+ */
+int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw,
+ int type)
+{
+ int num_parents;
+ const char **parent_names;
+ struct component_clk *clk;
+ int i;
+
+ num_parents = of_clk_get_parent_count(node);
+
+ if (num_parents < 1) {
+ pr_err("component-clock %s must have parent(s)\n", node->name);
+ return -EINVAL;
+ }
+
+ parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ for (i = 0; i < num_parents; i++)
+ parent_names[i] = of_clk_get_parent_name(node, i);
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk) {
+ kfree(parent_names);
+ return -ENOMEM;
+ }
+
+ clk->num_parents = num_parents;
+ clk->parent_names = parent_names;
+ clk->hw = hw;
+ clk->node = node;
+ clk->type = type;
+ list_add(&clk->link, &component_clks);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
new file mode 100644
index 000000000000..a15e445570b2
--- /dev/null
+++ b/drivers/clk/ti/divider.c
@@ -0,0 +1,487 @@
+/*
+ * TI Divider Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
+#define div_mask(d) ((1 << ((d)->width)) - 1)
+
+static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
+{
+ unsigned int maxdiv = 0;
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div > maxdiv)
+ maxdiv = clkt->div;
+ return maxdiv;
+}
+
+static unsigned int _get_maxdiv(struct clk_divider *divider)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return div_mask(divider);
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << div_mask(divider);
+ if (divider->table)
+ return _get_table_maxdiv(divider->table);
+ return div_mask(divider) + 1;
+}
+
+static unsigned int _get_table_div(const struct clk_div_table *table,
+ unsigned int val)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->val == val)
+ return clkt->div;
+ return 0;
+}
+
+static unsigned int _get_div(struct clk_divider *divider, unsigned int val)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return val;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << val;
+ if (divider->table)
+ return _get_table_div(divider->table, val);
+ return val + 1;
+}
+
+static unsigned int _get_table_val(const struct clk_div_table *table,
+ unsigned int div)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div == div)
+ return clkt->val;
+ return 0;
+}
+
+static unsigned int _get_val(struct clk_divider *divider, u8 div)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return div;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return __ffs(div);
+ if (divider->table)
+ return _get_table_val(divider->table, div);
+ return div - 1;
+}
+
+static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int div, val;
+
+ val = ti_clk_ll_ops->clk_readl(divider->reg) >> divider->shift;
+ val &= div_mask(divider);
+
+ div = _get_div(divider, val);
+ if (!div) {
+ WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+ "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
+ __clk_get_name(hw->clk));
+ return parent_rate;
+ }
+
+ return parent_rate / div;
+}
+
+/*
+ * The reverse of DIV_ROUND_UP: The maximum number which
+ * divided by m is r
+ */
+#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
+
+static bool _is_valid_table_div(const struct clk_div_table *table,
+ unsigned int div)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div == div)
+ return true;
+ return false;
+}
+
+static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
+{
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return is_power_of_2(div);
+ if (divider->table)
+ return _is_valid_table_div(divider->table, div);
+ return true;
+}
+
+static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int i, bestdiv = 0;
+ unsigned long parent_rate, best = 0, now, maxdiv;
+ unsigned long parent_rate_saved = *best_parent_rate;
+
+ if (!rate)
+ rate = 1;
+
+ maxdiv = _get_maxdiv(divider);
+
+ if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+ parent_rate = *best_parent_rate;
+ bestdiv = DIV_ROUND_UP(parent_rate, rate);
+ bestdiv = bestdiv == 0 ? 1 : bestdiv;
+ bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
+ return bestdiv;
+ }
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * i below
+ */
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (i = 1; i <= maxdiv; i++) {
+ if (!_is_valid_div(divider, i))
+ continue;
+ if (rate * i == parent_rate_saved) {
+ /*
+ * It's the most ideal case if the requested rate can be
+ * divided from parent clock without needing to change
+ * parent rate, so return the divider immediately.
+ */
+ *best_parent_rate = parent_rate_saved;
+ return i;
+ }
+ parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
+ MULT_ROUND_UP(rate, i));
+ now = parent_rate / i;
+ if (now <= rate && now > best) {
+ bestdiv = i;
+ best = now;
+ *best_parent_rate = parent_rate;
+ }
+ }
+
+ if (!bestdiv) {
+ bestdiv = _get_maxdiv(divider);
+ *best_parent_rate =
+ __clk_round_rate(__clk_get_parent(hw->clk), 1);
+ }
+
+ return bestdiv;
+}
+
+static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int div;
+ div = ti_clk_divider_bestdiv(hw, rate, prate);
+
+ return *prate / div;
+}
+
+static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int div, value;
+ unsigned long flags = 0;
+ u32 val;
+
+ div = parent_rate / rate;
+ value = _get_val(divider, div);
+
+ if (value > div_mask(divider))
+ value = div_mask(divider);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+
+ if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
+ val = div_mask(divider) << (divider->shift + 16);
+ } else {
+ val = ti_clk_ll_ops->clk_readl(divider->reg);
+ val &= ~(div_mask(divider) << divider->shift);
+ }
+ val |= value << divider->shift;
+ ti_clk_ll_ops->clk_writel(val, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops ti_clk_divider_ops = {
+ .recalc_rate = ti_clk_divider_recalc_rate,
+ .round_rate = ti_clk_divider_round_rate,
+ .set_rate = ti_clk_divider_set_rate,
+};
+
+static struct clk *_register_divider(struct device *dev, const char *name,
+ const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ u8 shift, u8 width, u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_divider *div;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div) {
+ pr_err("%s: could not allocate divider clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &ti_clk_divider_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ clk = clk_register(dev, &div->hw);
+
+ if (IS_ERR(clk))
+ kfree(div);
+
+ return clk;
+}
+
+static struct clk_div_table
+__init *ti_clk_get_div_table(struct device_node *node)
+{
+ struct clk_div_table *table;
+ const __be32 *divspec;
+ u32 val;
+ u32 num_div;
+ u32 valid_div;
+ int i;
+
+ divspec = of_get_property(node, "ti,dividers", &num_div);
+
+ if (!divspec)
+ return NULL;
+
+ num_div /= 4;
+
+ valid_div = 0;
+
+ /* Determine required size for divider table */
+ for (i = 0; i < num_div; i++) {
+ of_property_read_u32_index(node, "ti,dividers", i, &val);
+ if (val)
+ valid_div++;
+ }
+
+ if (!valid_div) {
+ pr_err("no valid dividers for %s table\n", node->name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL);
+
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ valid_div = 0;
+
+ for (i = 0; i < num_div; i++) {
+ of_property_read_u32_index(node, "ti,dividers", i, &val);
+ if (val) {
+ table[valid_div].div = val;
+ table[valid_div].val = i;
+ valid_div++;
+ }
+ }
+
+ return table;
+}
+
+static int _get_divider_width(struct device_node *node,
+ const struct clk_div_table *table,
+ u8 flags)
+{
+ u32 min_div;
+ u32 max_div;
+ u32 val = 0;
+ u32 div;
+
+ if (!table) {
+ /* Clk divider table not provided, determine min/max divs */
+ if (of_property_read_u32(node, "ti,min-div", &min_div))
+ min_div = 1;
+
+ if (of_property_read_u32(node, "ti,max-div", &max_div)) {
+ pr_err("no max-div for %s!\n", node->name);
+ return -EINVAL;
+ }
+
+ /* Determine bit width for the field */
+ if (flags & CLK_DIVIDER_ONE_BASED)
+ val = 1;
+
+ div = min_div;
+
+ while (div < max_div) {
+ if (flags & CLK_DIVIDER_POWER_OF_TWO)
+ div <<= 1;
+ else
+ div++;
+ val++;
+ }
+ } else {
+ div = 0;
+
+ while (table[div].div) {
+ val = table[div].val;
+ div++;
+ }
+ }
+
+ return fls(val);
+}
+
+static int __init ti_clk_divider_populate(struct device_node *node,
+ void __iomem **reg, const struct clk_div_table **table,
+ u32 *flags, u8 *div_flags, u8 *width, u8 *shift)
+{
+ u32 val;
+
+ *reg = ti_clk_get_reg_addr(node, 0);
+ if (!*reg)
+ return -EINVAL;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ *shift = val;
+ else
+ *shift = 0;
+
+ *flags = 0;
+ *div_flags = 0;
+
+ if (of_property_read_bool(node, "ti,index-starts-at-one"))
+ *div_flags |= CLK_DIVIDER_ONE_BASED;
+
+ if (of_property_read_bool(node, "ti,index-power-of-two"))
+ *div_flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ *flags |= CLK_SET_RATE_PARENT;
+
+ *table = ti_clk_get_div_table(node);
+
+ if (IS_ERR(*table))
+ return PTR_ERR(*table);
+
+ *width = _get_divider_width(node, *table, *div_flags);
+
+ return 0;
+}
+
+/**
+ * of_ti_divider_clk_setup - Setup function for simple div rate clock
+ * @node: device node for this clock
+ *
+ * Sets up a basic divider clock.
+ */
+static void __init of_ti_divider_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *parent_name;
+ void __iomem *reg;
+ u8 clk_divider_flags = 0;
+ u8 width = 0;
+ u8 shift = 0;
+ const struct clk_div_table *table = NULL;
+ u32 flags = 0;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ if (ti_clk_divider_populate(node, &reg, &table, &flags,
+ &clk_divider_flags, &width, &shift))
+ goto cleanup;
+
+ clk = _register_divider(NULL, node->name, parent_name, flags, reg,
+ shift, width, clk_divider_flags, table, NULL);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_ti_clk_autoidle_setup(node);
+ return;
+ }
+
+cleanup:
+ kfree(table);
+}
+CLK_OF_DECLARE(divider_clk, "ti,divider-clock", of_ti_divider_clk_setup);
+
+static void __init of_ti_composite_divider_clk_setup(struct device_node *node)
+{
+ struct clk_divider *div;
+ u32 val;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return;
+
+ if (ti_clk_divider_populate(node, &div->reg, &div->table, &val,
+ &div->flags, &div->width, &div->shift) < 0)
+ goto cleanup;
+
+ if (!ti_clk_add_component(node, &div->hw, CLK_COMPONENT_TYPE_DIVIDER))
+ return;
+
+cleanup:
+ kfree(div->table);
+ kfree(div);
+}
+CLK_OF_DECLARE(ti_composite_divider_clk, "ti,composite-divider-clock",
+ of_ti_composite_divider_clk_setup);
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
new file mode 100644
index 000000000000..7e498a44f97d
--- /dev/null
+++ b/drivers/clk/ti/dpll.c
@@ -0,0 +1,558 @@
+/*
+ * OMAP DPLL clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define DPLL_HAS_AUTOIDLE 0x1
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX)
+static const struct clk_ops dpll_m4xen_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .recalc_rate = &omap4_dpll_regm4xen_recalc,
+ .round_rate = &omap4_dpll_regm4xen_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .get_parent = &omap2_init_dpll_parent,
+};
+#endif
+
+static const struct clk_ops dpll_core_ck_ops = {
+ .recalc_rate = &omap3_dpll_recalc,
+ .get_parent = &omap2_init_dpll_parent,
+};
+
+#ifdef CONFIG_ARCH_OMAP3
+static const struct clk_ops omap3_dpll_core_ck_ops = {
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+};
+#endif
+
+static const struct clk_ops dpll_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .get_parent = &omap2_init_dpll_parent,
+};
+
+static const struct clk_ops dpll_no_gate_ck_ops = {
+ .recalc_rate = &omap3_dpll_recalc,
+ .get_parent = &omap2_init_dpll_parent,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+};
+
+#ifdef CONFIG_ARCH_OMAP3
+static const struct clk_ops omap3_dpll_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .round_rate = &omap2_dpll_round_rate,
+};
+
+static const struct clk_ops omap3_dpll_per_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .set_rate = &omap3_dpll4_set_rate,
+ .round_rate = &omap2_dpll_round_rate,
+};
+#endif
+
+static const struct clk_ops dpll_x2_ck_ops = {
+ .recalc_rate = &omap3_clkoutx2_recalc,
+};
+
+/**
+ * ti_clk_register_dpll - low level registration of a DPLL clock
+ * @hw: hardware clock definition for the clock
+ * @node: device node for the clock
+ *
+ * Finalizes DPLL registration process. In case a failure (clk-ref or
+ * clk-bypass is missing), the clock is added to retry list and
+ * the initialization is retried on later stage.
+ */
+static void __init ti_clk_register_dpll(struct clk_hw *hw,
+ struct device_node *node)
+{
+ struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
+ struct dpll_data *dd = clk_hw->dpll_data;
+ struct clk *clk;
+
+ dd->clk_ref = of_clk_get(node, 0);
+ dd->clk_bypass = of_clk_get(node, 1);
+
+ if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
+ pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
+ node->name);
+ if (!ti_clk_retry_init(node, hw, ti_clk_register_dpll))
+ return;
+
+ goto cleanup;
+ }
+
+ /* register the clock */
+ clk = clk_register(NULL, &clk_hw->hw);
+
+ if (!IS_ERR(clk)) {
+ omap2_init_clk_hw_omap_clocks(clk);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ kfree(clk_hw->hw.init->parent_names);
+ kfree(clk_hw->hw.init);
+ return;
+ }
+
+cleanup:
+ kfree(clk_hw->dpll_data);
+ kfree(clk_hw->hw.init->parent_names);
+ kfree(clk_hw->hw.init);
+ kfree(clk_hw);
+}
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX)
+/**
+ * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
+ * @node: device node for this clock
+ * @ops: clk_ops for this clock
+ * @hw_ops: clk_hw_ops for this clock
+ *
+ * Initializes a DPLL x 2 clock from device tree data.
+ */
+static void ti_clk_register_dpll_x2(struct device_node *node,
+ const struct clk_ops *ops,
+ const struct clk_hw_omap_ops *hw_ops)
+{
+ struct clk *clk;
+ struct clk_init_data init = { NULL };
+ struct clk_hw_omap *clk_hw;
+ const char *name = node->name;
+ const char *parent_name;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ if (!parent_name) {
+ pr_err("%s must have parent\n", node->name);
+ return;
+ }
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw)
+ return;
+
+ clk_hw->ops = hw_ops;
+ clk_hw->hw.init = &init;
+
+ init.name = name;
+ init.ops = ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ /* register the clock */
+ clk = clk_register(NULL, &clk_hw->hw);
+
+ if (IS_ERR(clk)) {
+ kfree(clk_hw);
+ } else {
+ omap2_init_clk_hw_omap_clocks(clk);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+}
+#endif
+
+/**
+ * of_ti_dpll_setup - Setup function for OMAP DPLL clocks
+ * @node: device node containing the DPLL info
+ * @ops: ops for the DPLL
+ * @ddt: DPLL data template to use
+ * @init_flags: flags for controlling init types
+ *
+ * Initializes a DPLL clock from device tree data.
+ */
+static void __init of_ti_dpll_setup(struct device_node *node,
+ const struct clk_ops *ops,
+ const struct dpll_data *ddt,
+ u8 init_flags)
+{
+ struct clk_hw_omap *clk_hw = NULL;
+ struct clk_init_data *init = NULL;
+ const char **parent_names = NULL;
+ struct dpll_data *dd = NULL;
+ int i;
+ u8 dpll_mode = 0;
+
+ dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ init = kzalloc(sizeof(*init), GFP_KERNEL);
+ if (!dd || !clk_hw || !init)
+ goto cleanup;
+
+ memcpy(dd, ddt, sizeof(*dd));
+
+ clk_hw->dpll_data = dd;
+ clk_hw->ops = &clkhwops_omap3_dpll;
+ clk_hw->hw.init = init;
+ clk_hw->flags = MEMMAP_ADDRESSING;
+
+ init->name = node->name;
+ init->ops = ops;
+
+ init->num_parents = of_clk_get_parent_count(node);
+ if (init->num_parents < 1) {
+ pr_err("%s must have parent(s)\n", node->name);
+ goto cleanup;
+ }
+
+ parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
+ if (!parent_names)
+ goto cleanup;
+
+ for (i = 0; i < init->num_parents; i++)
+ parent_names[i] = of_clk_get_parent_name(node, i);
+
+ init->parent_names = parent_names;
+
+ dd->control_reg = ti_clk_get_reg_addr(node, 0);
+ dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
+ dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
+
+ if (!dd->control_reg || !dd->idlest_reg || !dd->mult_div1_reg)
+ goto cleanup;
+
+ if (init_flags & DPLL_HAS_AUTOIDLE) {
+ dd->autoidle_reg = ti_clk_get_reg_addr(node, 3);
+ if (!dd->autoidle_reg)
+ goto cleanup;
+ }
+
+ if (of_property_read_bool(node, "ti,low-power-stop"))
+ dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
+
+ if (of_property_read_bool(node, "ti,low-power-bypass"))
+ dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
+
+ if (of_property_read_bool(node, "ti,lock"))
+ dpll_mode |= 1 << DPLL_LOCKED;
+
+ if (dpll_mode)
+ dd->modes = dpll_mode;
+
+ ti_clk_register_dpll(&clk_hw->hw, node);
+ return;
+
+cleanup:
+ kfree(dd);
+ kfree(parent_names);
+ kfree(init);
+ kfree(clk_hw);
+}
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX)
+static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
+{
+ ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
+}
+CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
+ of_ti_omap4_dpll_x2_setup);
+#endif
+
+#ifdef CONFIG_SOC_AM33XX
+static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
+{
+ ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
+}
+CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
+ of_ti_am3_dpll_x2_setup);
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_omap3_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .freqsel_mask = 0xf0,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
+ of_ti_omap3_dpll_setup);
+
+static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 16,
+ .div1_mask = 0x7f << 8,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .freqsel_mask = 0xf0,
+ };
+
+ of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
+ of_ti_omap3_core_dpll_setup);
+
+static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1 << 1,
+ .enable_mask = 0x7 << 16,
+ .autoidle_mask = 0x7 << 3,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .freqsel_mask = 0xf00000,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
+ of_ti_omap3_per_dpll_setup);
+
+static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1 << 1,
+ .enable_mask = 0x7 << 16,
+ .autoidle_mask = 0x7 << 3,
+ .mult_mask = 0xfff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 4095,
+ .max_divider = 128,
+ .min_divider = 1,
+ .sddiv_mask = 0xff << 24,
+ .dco_mask = 0xe << 20,
+ .flags = DPLL_J_TYPE,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
+ of_ti_omap3_per_jtype_dpll_setup);
+#endif
+
+static void __init of_ti_omap4_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
+ of_ti_omap4_dpll_setup);
+
+static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
+ of_ti_omap4_core_dpll_setup);
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX)
+static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .m4xen_mask = 0x800,
+ .lpmode_mask = 1 << 10,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
+ of_ti_omap4_m4xen_dpll_setup);
+
+static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0xfff << 8,
+ .div1_mask = 0xff,
+ .max_multiplier = 4095,
+ .max_divider = 256,
+ .min_divider = 1,
+ .sddiv_mask = 0xff << 24,
+ .flags = DPLL_J_TYPE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
+ of_ti_omap4_jtype_dpll_setup);
+#endif
+
+static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
+ of_ti_am3_no_gate_dpll_setup);
+
+static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 4095,
+ .max_divider = 256,
+ .min_divider = 2,
+ .flags = DPLL_J_TYPE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
+ of_ti_am3_jtype_dpll_setup);
+
+static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .flags = DPLL_J_TYPE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
+ "ti,am3-dpll-no-gate-j-type-clock",
+ of_ti_am3_no_gate_jtype_dpll_setup);
+
+static void __init of_ti_am3_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
+
+static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
+ of_ti_am3_core_dpll_setup);
diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c
new file mode 100644
index 000000000000..c2c8a287408c
--- /dev/null
+++ b/drivers/clk/ti/fixed-factor.c
@@ -0,0 +1,66 @@
+/*
+ * TI Fixed Factor Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+/**
+ * of_ti_fixed_factor_clk_setup - Setup function for TI fixed factor clock
+ * @node: device node for this clock
+ *
+ * Sets up a simple fixed factor clock based on device tree info.
+ */
+static void __init of_ti_fixed_factor_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *clk_name = node->name;
+ const char *parent_name;
+ u32 div, mult;
+ u32 flags = 0;
+
+ if (of_property_read_u32(node, "ti,clock-div", &div)) {
+ pr_err("%s must have a clock-div property\n", node->name);
+ return;
+ }
+
+ if (of_property_read_u32(node, "ti,clock-mult", &mult)) {
+ pr_err("%s must have a clock-mult property\n", node->name);
+ return;
+ }
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ flags |= CLK_SET_RATE_PARENT;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
+ mult, div);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_ti_clk_autoidle_setup(node);
+ }
+}
+CLK_OF_DECLARE(ti_fixed_factor_clk, "ti,fixed-factor-clock",
+ of_ti_fixed_factor_clk_setup);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
new file mode 100644
index 000000000000..3e2999d11d15
--- /dev/null
+++ b/drivers/clk/ti/gate.c
@@ -0,0 +1,249 @@
+/*
+ * OMAP gate clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk);
+
+static const struct clk_ops omap_gate_clkdm_clk_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_clkops_enable_clkdm,
+ .disable = &omap2_clkops_disable_clkdm,
+};
+
+static const struct clk_ops omap_gate_clk_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap36xx_gate_clk_enable_with_hsdiv_restore,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+/**
+ * omap36xx_gate_clk_enable_with_hsdiv_restore - enable clocks suffering
+ * from HSDivider PWRDN problem Implements Errata ID: i556.
+ * @clk: DPLL output struct clk
+ *
+ * 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck,
+ * dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset
+ * valueafter their respective PWRDN bits are set. Any dummy write
+ * (Any other value different from the Read value) to the
+ * corresponding CM_CLKSEL register will refresh the dividers.
+ */
+static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
+{
+ struct clk_divider *parent;
+ struct clk_hw *parent_hw;
+ u32 dummy_v, orig_v;
+ int ret;
+
+ /* Clear PWRDN bit of HSDIVIDER */
+ ret = omap2_dflt_clk_enable(clk);
+
+ /* Parent is the x2 node, get parent of parent for the m2 div */
+ parent_hw = __clk_get_hw(__clk_get_parent(__clk_get_parent(clk->clk)));
+ parent = to_clk_divider(parent_hw);
+
+ /* Restore the dividers */
+ if (!ret) {
+ orig_v = ti_clk_ll_ops->clk_readl(parent->reg);
+ dummy_v = orig_v;
+
+ /* Write any other value different from the Read value */
+ dummy_v ^= (1 << parent->shift);
+ ti_clk_ll_ops->clk_writel(dummy_v, parent->reg);
+
+ /* Write the original divider */
+ ti_clk_ll_ops->clk_writel(orig_v, parent->reg);
+ }
+
+ return ret;
+}
+
+static void __init _of_ti_gate_clk_setup(struct device_node *node,
+ const struct clk_ops *ops,
+ const struct clk_hw_omap_ops *hw_ops)
+{
+ struct clk *clk;
+ struct clk_init_data init = { NULL };
+ struct clk_hw_omap *clk_hw;
+ const char *clk_name = node->name;
+ const char *parent_name;
+ u32 val;
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw)
+ return;
+
+ clk_hw->hw.init = &init;
+
+ init.name = clk_name;
+ init.ops = ops;
+
+ if (ops != &omap_gate_clkdm_clk_ops) {
+ clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
+ if (!clk_hw->enable_reg)
+ goto cleanup;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ clk_hw->enable_bit = val;
+ }
+
+ clk_hw->ops = hw_ops;
+
+ clk_hw->flags = MEMMAP_ADDRESSING;
+
+ if (of_clk_get_parent_count(node) != 1) {
+ pr_err("%s must have 1 parent\n", clk_name);
+ goto cleanup;
+ }
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ init.flags |= CLK_SET_RATE_PARENT;
+
+ if (of_property_read_bool(node, "ti,set-bit-to-disable"))
+ clk_hw->flags |= INVERT_ENABLE;
+
+ clk = clk_register(NULL, &clk_hw->hw);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ return;
+ }
+
+cleanup:
+ kfree(clk_hw);
+}
+
+static void __init
+_of_ti_composite_gate_clk_setup(struct device_node *node,
+ const struct clk_hw_omap_ops *hw_ops)
+{
+ struct clk_hw_omap *gate;
+ u32 val = 0;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return;
+
+ gate->enable_reg = ti_clk_get_reg_addr(node, 0);
+ if (!gate->enable_reg)
+ goto cleanup;
+
+ of_property_read_u32(node, "ti,bit-shift", &val);
+
+ gate->enable_bit = val;
+ gate->ops = hw_ops;
+ gate->flags = MEMMAP_ADDRESSING;
+
+ if (!ti_clk_add_component(node, &gate->hw, CLK_COMPONENT_TYPE_GATE))
+ return;
+
+cleanup:
+ kfree(gate);
+}
+
+static void __init
+of_ti_composite_no_wait_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_composite_gate_clk_setup(node, NULL);
+}
+CLK_OF_DECLARE(ti_composite_no_wait_gate_clk, "ti,composite-no-wait-gate-clock",
+ of_ti_composite_no_wait_gate_clk_setup);
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_composite_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_composite_gate_clk_setup(node, &clkhwops_iclk_wait);
+}
+CLK_OF_DECLARE(ti_composite_interface_clk, "ti,composite-interface-clock",
+ of_ti_composite_interface_clk_setup);
+#endif
+
+static void __init of_ti_composite_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_composite_gate_clk_setup(node, &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_composite_gate_clk, "ti,composite-gate-clock",
+ of_ti_composite_gate_clk_setup);
+
+
+static void __init of_ti_clkdm_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clkdm_clk_ops, NULL);
+}
+CLK_OF_DECLARE(ti_clkdm_gate_clk, "ti,clkdm-gate-clock",
+ of_ti_clkdm_gate_clk_setup);
+
+static void __init of_ti_hsdiv_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_hsdiv_restore_ops,
+ &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_hsdiv_gate_clk, "ti,hsdiv-gate-clock",
+ of_ti_hsdiv_gate_clk_setup);
+
+static void __init of_ti_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, NULL);
+}
+CLK_OF_DECLARE(ti_gate_clk, "ti,gate-clock", of_ti_gate_clk_setup)
+
+static void __init of_ti_wait_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_wait_gate_clk, "ti,wait-gate-clock",
+ of_ti_wait_gate_clk_setup);
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_am35xx_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops,
+ &clkhwops_am35xx_ipss_module_wait);
+}
+CLK_OF_DECLARE(ti_am35xx_gate_clk, "ti,am35xx-gate-clock",
+ of_ti_am35xx_gate_clk_setup);
+
+static void __init of_ti_dss_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops,
+ &clkhwops_omap3430es2_dss_usbhost_wait);
+}
+CLK_OF_DECLARE(ti_dss_gate_clk, "ti,dss-gate-clock",
+ of_ti_dss_gate_clk_setup);
+#endif
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
new file mode 100644
index 000000000000..320a2b168bb2
--- /dev/null
+++ b/drivers/clk/ti/interface.c
@@ -0,0 +1,125 @@
+/*
+ * OMAP interface clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static const struct clk_ops ti_interface_clk_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+static void __init _of_ti_interface_clk_setup(struct device_node *node,
+ const struct clk_hw_omap_ops *ops)
+{
+ struct clk *clk;
+ struct clk_init_data init = { NULL };
+ struct clk_hw_omap *clk_hw;
+ const char *parent_name;
+ u32 val;
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw)
+ return;
+
+ clk_hw->hw.init = &init;
+ clk_hw->ops = ops;
+ clk_hw->flags = MEMMAP_ADDRESSING;
+
+ clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
+ if (!clk_hw->enable_reg)
+ goto cleanup;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ clk_hw->enable_bit = val;
+
+ init.name = node->name;
+ init.ops = &ti_interface_clk_ops;
+ init.flags = 0;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ if (!parent_name) {
+ pr_err("%s must have a parent\n", node->name);
+ goto cleanup;
+ }
+
+ init.num_parents = 1;
+ init.parent_names = &parent_name;
+
+ clk = clk_register(NULL, &clk_hw->hw);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ omap2_init_clk_hw_omap_clocks(clk);
+ return;
+ }
+
+cleanup:
+ kfree(clk_hw);
+}
+
+static void __init of_ti_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_iclk_wait);
+}
+CLK_OF_DECLARE(ti_interface_clk, "ti,omap3-interface-clock",
+ of_ti_interface_clk_setup);
+
+static void __init of_ti_no_wait_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_iclk);
+}
+CLK_OF_DECLARE(ti_no_wait_interface_clk, "ti,omap3-no-wait-interface-clock",
+ of_ti_no_wait_interface_clk_setup);
+
+static void __init of_ti_hsotgusb_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node,
+ &clkhwops_omap3430es2_iclk_hsotgusb_wait);
+}
+CLK_OF_DECLARE(ti_hsotgusb_interface_clk, "ti,omap3-hsotgusb-interface-clock",
+ of_ti_hsotgusb_interface_clk_setup);
+
+static void __init of_ti_dss_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node,
+ &clkhwops_omap3430es2_iclk_dss_usbhost_wait);
+}
+CLK_OF_DECLARE(ti_dss_interface_clk, "ti,omap3-dss-interface-clock",
+ of_ti_dss_interface_clk_setup);
+
+static void __init of_ti_ssi_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_omap3430es2_iclk_ssi_wait);
+}
+CLK_OF_DECLARE(ti_ssi_interface_clk, "ti,omap3-ssi-interface-clock",
+ of_ti_ssi_interface_clk_setup);
+
+static void __init of_ti_am35xx_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_am35xx_ipss_wait);
+}
+CLK_OF_DECLARE(ti_am35xx_interface_clk, "ti,am35xx-interface-clock",
+ of_ti_am35xx_interface_clk_setup);
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
new file mode 100644
index 000000000000..0197a478720c
--- /dev/null
+++ b/drivers/clk/ti/mux.c
@@ -0,0 +1,246 @@
+/*
+ * TI Multiplexer Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
+
+static u8 ti_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ int num_parents = __clk_get_num_parents(hw->clk);
+ u32 val;
+
+ /*
+ * FIXME need a mux-specific flag to determine if val is bitwise or
+ * numeric. e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges
+ * from 0x1 to 0x7 (index starts at one)
+ * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
+ * val = 0x4 really means "bit 2, index starts at bit 0"
+ */
+ val = ti_clk_ll_ops->clk_readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
+
+ if (mux->table) {
+ int i;
+
+ for (i = 0; i < num_parents; i++)
+ if (mux->table[i] == val)
+ return i;
+ return -EINVAL;
+ }
+
+ if (val && (mux->flags & CLK_MUX_INDEX_BIT))
+ val = ffs(val) - 1;
+
+ if (val && (mux->flags & CLK_MUX_INDEX_ONE))
+ val--;
+
+ if (val >= num_parents)
+ return -EINVAL;
+
+ return val;
+}
+
+static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+ unsigned long flags = 0;
+
+ if (mux->table) {
+ index = mux->table[index];
+ } else {
+ if (mux->flags & CLK_MUX_INDEX_BIT)
+ index = (1 << ffs(index));
+
+ if (mux->flags & CLK_MUX_INDEX_ONE)
+ index++;
+ }
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+
+ if (mux->flags & CLK_MUX_HIWORD_MASK) {
+ val = mux->mask << (mux->shift + 16);
+ } else {
+ val = ti_clk_ll_ops->clk_readl(mux->reg);
+ val &= ~(mux->mask << mux->shift);
+ }
+ val |= index << mux->shift;
+ ti_clk_ll_ops->clk_writel(val, mux->reg);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops ti_clk_mux_ops = {
+ .get_parent = ti_clk_mux_get_parent,
+ .set_parent = ti_clk_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk *_register_mux(struct device *dev, const char *name,
+ const char **parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg,
+ u8 shift, u32 mask, u8 clk_mux_flags,
+ u32 *table, spinlock_t *lock)
+{
+ struct clk_mux *mux;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /* allocate the mux */
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ pr_err("%s: could not allocate mux clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &ti_clk_mux_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ /* struct clk_mux assignments */
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->mask = mask;
+ mux->flags = clk_mux_flags;
+ mux->lock = lock;
+ mux->table = table;
+ mux->hw.init = &init;
+
+ clk = clk_register(dev, &mux->hw);
+
+ if (IS_ERR(clk))
+ kfree(mux);
+
+ return clk;
+}
+
+/**
+ * of_mux_clk_setup - Setup function for simple mux rate clock
+ * @node: DT node for the clock
+ *
+ * Sets up a basic clock multiplexer.
+ */
+static void of_mux_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ void __iomem *reg;
+ int num_parents;
+ const char **parent_names;
+ int i;
+ u8 clk_mux_flags = 0;
+ u32 mask = 0;
+ u32 shift = 0;
+ u32 flags = 0;
+
+ num_parents = of_clk_get_parent_count(node);
+ if (num_parents < 2) {
+ pr_err("mux-clock %s must have parents\n", node->name);
+ return;
+ }
+ parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ if (!parent_names)
+ goto cleanup;
+
+ for (i = 0; i < num_parents; i++)
+ parent_names[i] = of_clk_get_parent_name(node, i);
+
+ reg = ti_clk_get_reg_addr(node, 0);
+
+ if (!reg)
+ goto cleanup;
+
+ of_property_read_u32(node, "ti,bit-shift", &shift);
+
+ if (of_property_read_bool(node, "ti,index-starts-at-one"))
+ clk_mux_flags |= CLK_MUX_INDEX_ONE;
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ flags |= CLK_SET_RATE_PARENT;
+
+ /* Generate bit-mask based on parent info */
+ mask = num_parents;
+ if (!(clk_mux_flags & CLK_MUX_INDEX_ONE))
+ mask--;
+
+ mask = (1 << fls(mask)) - 1;
+
+ clk = _register_mux(NULL, node->name, parent_names, num_parents, flags,
+ reg, shift, mask, clk_mux_flags, NULL, NULL);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+cleanup:
+ kfree(parent_names);
+}
+CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup);
+
+static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
+{
+ struct clk_mux *mux;
+ int num_parents;
+ u32 val;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return;
+
+ mux->reg = ti_clk_get_reg_addr(node, 0);
+
+ if (!mux->reg)
+ goto cleanup;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ mux->shift = val;
+
+ if (of_property_read_bool(node, "ti,index-starts-at-one"))
+ mux->flags |= CLK_MUX_INDEX_ONE;
+
+ num_parents = of_clk_get_parent_count(node);
+
+ if (num_parents < 2) {
+ pr_err("%s must have parents\n", node->name);
+ goto cleanup;
+ }
+
+ mux->mask = num_parents - 1;
+ mux->mask = (1 << fls(mux->mask)) - 1;
+
+ if (!ti_clk_add_component(node, &mux->hw, CLK_COMPONENT_TYPE_MUX))
+ return;
+
+cleanup:
+ kfree(mux);
+}
+CLK_OF_DECLARE(ti_composite_mux_clk_setup, "ti,composite-mux-clock",
+ of_ti_composite_mux_clk_setup);
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index 293a28854417..e2d63bc47436 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -36,7 +36,7 @@ static int clk_prcmu_prepare(struct clk_hw *hw)
if (!ret)
clk->is_prepared = 1;
- return ret;;
+ return ret;
}
static void clk_prcmu_unprepare(struct clk_hw *hw)
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index f5e4c21b301f..8cbfcf88fae3 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -119,6 +119,7 @@ static const struct clk_ops icst_ops = {
struct clk *icst_clk_register(struct device *dev,
const struct clk_icst_desc *desc,
+ const char *name,
void __iomem *base)
{
struct clk *clk;
@@ -130,7 +131,7 @@ struct clk *icst_clk_register(struct device *dev,
pr_err("could not allocate ICST clock!\n");
return ERR_PTR(-ENOMEM);
}
- init.name = "icst";
+ init.name = name;
init.ops = &icst_ops;
init.flags = CLK_IS_ROOT;
init.parent_names = NULL;
diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h
index dad51b6ffd00..be99dd0da785 100644
--- a/drivers/clk/versatile/clk-icst.h
+++ b/drivers/clk/versatile/clk-icst.h
@@ -15,4 +15,5 @@ struct clk_icst_desc {
struct clk *icst_clk_register(struct device *dev,
const struct clk_icst_desc *desc,
+ const char *name,
void __iomem *base);
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index 369139af2a3b..844f8d711a12 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -1,6 +1,6 @@
/*
* Clock driver for the ARM Integrator/IM-PD1 board
- * Copyright (C) 2012 Linus Walleij
+ * Copyright (C) 2012-2013 Linus Walleij
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,20 +18,28 @@
#include "clk-icst.h"
struct impd1_clk {
- struct clk *vcoclk;
+ char *vco1name;
+ struct clk *vco1clk;
+ char *vco2name;
+ struct clk *vco2clk;
+ struct clk *mmciclk;
+ char *uartname;
struct clk *uartclk;
- struct clk_lookup *clks[3];
+ char *spiname;
+ struct clk *spiclk;
+ char *scname;
+ struct clk *scclk;
+ struct clk_lookup *clks[6];
};
+/* One entry for each connected IM-PD1 LM */
static struct impd1_clk impd1_clks[4];
/*
- * There are two VCO's on the IM-PD1 but only one is used by the
- * kernel, that is why we are only implementing the control of
- * IMPD1_OSC1 here.
+ * There are two VCO's on the IM-PD1
*/
-static const struct icst_params impd1_vco_params = {
+static const struct icst_params impd1_vco1_params = {
.ref = 24000000, /* 24 MHz */
.vco_max = ICST525_VCO_MAX_3V,
.vco_min = ICST525_VCO_MIN,
@@ -44,11 +52,29 @@ static const struct icst_params impd1_vco_params = {
};
static const struct clk_icst_desc impd1_icst1_desc = {
- .params = &impd1_vco_params,
+ .params = &impd1_vco1_params,
.vco_offset = IMPD1_OSC1,
.lock_offset = IMPD1_LOCK,
};
+static const struct icst_params impd1_vco2_params = {
+ .ref = 24000000, /* 24 MHz */
+ .vco_max = ICST525_VCO_MAX_3V,
+ .vco_min = ICST525_VCO_MIN,
+ .vd_min = 12,
+ .vd_max = 519,
+ .rd_min = 3,
+ .rd_max = 120,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
+};
+
+static const struct clk_icst_desc impd1_icst2_desc = {
+ .params = &impd1_vco2_params,
+ .vco_offset = IMPD1_OSC2,
+ .lock_offset = IMPD1_LOCK,
+};
+
/**
* integrator_impd1_clk_init() - set up the integrator clock tree
* @base: base address of the logic module (LM)
@@ -66,16 +92,39 @@ void integrator_impd1_clk_init(void __iomem *base, unsigned int id)
}
imc = &impd1_clks[id];
- clk = icst_clk_register(NULL, &impd1_icst1_desc, base);
- imc->vcoclk = clk;
+ imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id);
+ clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, base);
+ imc->vco1clk = clk;
imc->clks[0] = clkdev_alloc(clk, NULL, "lm%x:01000", id);
- /* UART reference clock */
- clk = clk_register_fixed_rate(NULL, "uartclk", NULL, CLK_IS_ROOT,
- 14745600);
+ /* VCO2 is also called "CLK2" */
+ imc->vco2name = kasprintf(GFP_KERNEL, "lm%x-vco2", id);
+ clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, base);
+ imc->vco2clk = clk;
+
+ /* MMCI uses CLK2 right off */
+ imc->clks[1] = clkdev_alloc(clk, NULL, "lm%x:00700", id);
+
+ /* UART reference clock divides CLK2 by a fixed factor 4 */
+ imc->uartname = kasprintf(GFP_KERNEL, "lm%x-uartclk", id);
+ clk = clk_register_fixed_factor(NULL, imc->uartname, imc->vco2name,
+ CLK_IGNORE_UNUSED, 1, 4);
imc->uartclk = clk;
- imc->clks[1] = clkdev_alloc(clk, NULL, "lm%x:00100", id);
- imc->clks[2] = clkdev_alloc(clk, NULL, "lm%x:00200", id);
+ imc->clks[2] = clkdev_alloc(clk, NULL, "lm%x:00100", id);
+ imc->clks[3] = clkdev_alloc(clk, NULL, "lm%x:00200", id);
+
+ /* SPI PL022 clock divides CLK2 by a fixed factor 64 */
+ imc->spiname = kasprintf(GFP_KERNEL, "lm%x-spiclk", id);
+ clk = clk_register_fixed_factor(NULL, imc->spiname, imc->vco2name,
+ CLK_IGNORE_UNUSED, 1, 64);
+ imc->clks[4] = clkdev_alloc(clk, NULL, "lm%x:00300", id);
+
+ /* Smart Card clock divides CLK2 by a fixed factor 4 */
+ imc->scname = kasprintf(GFP_KERNEL, "lm%x-scclk", id);
+ clk = clk_register_fixed_factor(NULL, imc->scname, imc->vco2name,
+ CLK_IGNORE_UNUSED, 1, 4);
+ imc->scclk = clk;
+ imc->clks[5] = clkdev_alloc(clk, NULL, "lm%x:00600", id);
for (i = 0; i < ARRAY_SIZE(imc->clks); i++)
clkdev_add(imc->clks[i]);
@@ -92,6 +141,13 @@ void integrator_impd1_clk_exit(unsigned int id)
for (i = 0; i < ARRAY_SIZE(imc->clks); i++)
clkdev_drop(imc->clks[i]);
+ clk_unregister(imc->spiclk);
clk_unregister(imc->uartclk);
- clk_unregister(imc->vcoclk);
+ clk_unregister(imc->vco2clk);
+ clk_unregister(imc->vco1clk);
+ kfree(imc->scname);
+ kfree(imc->spiname);
+ kfree(imc->uartname);
+ kfree(imc->vco2name);
+ kfree(imc->vco1name);
}
diff --git a/drivers/clk/versatile/clk-integrator.c b/drivers/clk/versatile/clk-integrator.c
index 08593b4ee2c9..bda8967e09c2 100644
--- a/drivers/clk/versatile/clk-integrator.c
+++ b/drivers/clk/versatile/clk-integrator.c
@@ -78,7 +78,7 @@ void __init integrator_clk_init(bool is_cp)
clk_register_clkdev(clk, NULL, "sp804");
/* ICST VCO clock used on the Integrator/CP CLCD */
- clk = icst_clk_register(NULL, &cp_icst_desc,
+ clk = icst_clk_register(NULL, &cp_icst_desc, "icst",
__io_address(INTEGRATOR_HDR_BASE));
clk_register_clkdev(clk, NULL, "clcd");
}
diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c
index cda07e70a408..747e7b31117c 100644
--- a/drivers/clk/versatile/clk-realview.c
+++ b/drivers/clk/versatile/clk-realview.c
@@ -84,9 +84,11 @@ void __init realview_clk_init(void __iomem *sysbase, bool is_pb1176)
/* ICST VCO clock */
if (is_pb1176)
- clk = icst_clk_register(NULL, &realview_osc0_desc, sysbase);
+ clk = icst_clk_register(NULL, &realview_osc0_desc,
+ "osc0", sysbase);
else
- clk = icst_clk_register(NULL, &realview_osc4_desc, sysbase);
+ clk = icst_clk_register(NULL, &realview_osc4_desc,
+ "osc4", sysbase);
clk_register_clkdev(clk, NULL, "dev:clcd");
clk_register_clkdev(clk, NULL, "issp:clcd");
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index bf9b15a585e1..c6e86a9a2aa3 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -123,7 +123,7 @@ static const struct clk_ops clk_sp810_timerclken_ops = {
.set_parent = clk_sp810_timerclken_set_parent,
};
-struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
+static struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
void *data)
{
struct clk_sp810 *sp810 = data;
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 10772aa72e4e..09dd0173ea0a 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -102,9 +102,10 @@ static const char *swdt_ext_clk_input_names[] __initdata = {"swdt_ext_clk"};
static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
const char *clk_name, void __iomem *fclk_ctrl_reg,
- const char **parents)
+ const char **parents, int enable)
{
struct clk *clk;
+ u32 enable_reg;
char *mux_name;
char *div0_name;
char *div1_name;
@@ -147,6 +148,12 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
clks[fclk] = clk_register_gate(NULL, clk_name,
div1_name, CLK_SET_RATE_PARENT, fclk_gate_reg,
0, CLK_GATE_SET_TO_DISABLE, fclk_gate_lock);
+ enable_reg = readl(fclk_gate_reg) & 1;
+ if (enable && !enable_reg) {
+ if (clk_prepare_enable(clks[fclk]))
+ pr_warn("%s: FCLK%u enable failed\n", __func__,
+ fclk - fclk0);
+ }
kfree(mux_name);
kfree(div0_name);
kfree(div1_name);
@@ -213,6 +220,7 @@ static void __init zynq_clk_setup(struct device_node *np)
int ret;
struct clk *clk;
char *clk_name;
+ unsigned int fclk_enable = 0;
const char *clk_output_name[clk_max];
const char *cpu_parents[4];
const char *periph_parents[4];
@@ -238,6 +246,8 @@ static void __init zynq_clk_setup(struct device_node *np)
periph_parents[2] = clk_output_name[armpll];
periph_parents[3] = clk_output_name[ddrpll];
+ of_property_read_u32(np, "fclk-enable", &fclk_enable);
+
/* ps_clk */
ret = of_property_read_u32(np, "ps-clk-frequency", &tmp);
if (ret) {
@@ -340,10 +350,12 @@ static void __init zynq_clk_setup(struct device_node *np)
clk_prepare_enable(clks[dci]);
/* Peripheral clocks */
- for (i = fclk0; i <= fclk3; i++)
+ for (i = fclk0; i <= fclk3; i++) {
+ int enable = !!(fclk_enable & BIT(i - fclk0));
zynq_clk_register_fclk(i, clk_output_name[i],
SLCR_FPGA0_CLK_CTRL + 0x10 * (i - fclk0),
- periph_parents);
+ periph_parents, enable);
+ }
zynq_clk_register_periph_clk(lqspi, 0, clk_output_name[lqspi], NULL,
SLCR_LQSPI_CLK_CTRL, periph_parents, 0);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index cd6950fd8caf..52e9329e3c51 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -140,3 +140,51 @@ config VF_PIT_TIMER
bool
help
Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
+
+config SYS_SUPPORTS_SH_CMT
+ bool
+
+config SYS_SUPPORTS_SH_MTU2
+ bool
+
+config SYS_SUPPORTS_SH_TMU
+ bool
+
+config SYS_SUPPORTS_EM_STI
+ bool
+
+config SH_TIMER_CMT
+ bool "Renesas CMT timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ default SYS_SUPPORTS_SH_CMT
+ help
+ This enables build of a clocksource and clockevent driver for
+ the Compare Match Timer (CMT) hardware available in 16/32/48-bit
+ variants on a wide range of Mobile and Automotive SoCs from Renesas.
+
+config SH_TIMER_MTU2
+ bool "Renesas MTU2 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ default SYS_SUPPORTS_SH_MTU2
+ help
+ This enables build of a clockevent driver for the Multi-Function
+ Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas.
+ This hardware comes with 16 bit-timer registers.
+
+config SH_TIMER_TMU
+ bool "Renesas TMU timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ default SYS_SUPPORTS_SH_TMU
+ help
+ This enables build of a clocksource and clockevent driver for
+ the 32-bit Timer Unit (TMU) hardware available on a wide range
+ SoCs from Renesas.
+
+config EM_TIMER_STI
+ bool "Renesas STI timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ default SYS_SUPPORTS_EM_STI
+ help
+ This enables build of a clocksource and clockevent driver for
+ the 48-bit System Timer (STI) hardware available on a SoCs
+ such as EMEV2 from former NEC Electronics.
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 358358d87b6d..f3fe4cb4974b 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -1,6 +1,5 @@
obj-$(CONFIG_CLKSRC_OF) += clksrc-of.o
obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
-obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
@@ -21,12 +20,13 @@ obj-$(CONFIG_ARCH_MARCO) += timer-marco.o
obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
+obj-$(CONFIG_ARCH_U300) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o
-obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o
+obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
@@ -37,3 +37,4 @@ obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
+obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 95fb944e15ee..57e823c44d2a 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -277,6 +277,7 @@ static void __arch_timer_setup(unsigned type,
clk->set_next_event = arch_timer_set_next_event_phys;
}
} else {
+ clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
clk->rating = 400;
clk->cpumask = cpu_all_mask;
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 5176e761166b..0595dc6c453e 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -17,6 +17,7 @@
#include <linux/jiffies.h>
#include <linux/clockchips.h>
#include <linux/types.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mach/time.h>
@@ -98,24 +99,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
return;
}
-static void __init kona_timers_init(struct device_node *node)
-{
- u32 freq;
-
- if (!of_property_read_u32(node, "clock-frequency", &freq))
- arch_timer_rate = freq;
- else
- panic("clock-frequency not set in the .dts file");
-
- /* Setup IRQ numbers */
- timers.tmr_irq = irq_of_parse_and_map(node, 0);
-
- /* Setup IO addresses */
- timers.tmr_regs = of_iomap(node, 0);
-
- kona_timer_disable_and_clear(timers.tmr_regs);
-}
-
static int kona_timer_set_next_event(unsigned long clc,
struct clock_event_device *unused)
{
@@ -190,7 +173,34 @@ static struct irqaction kona_timer_irq = {
static void __init kona_timer_init(struct device_node *node)
{
- kona_timers_init(node);
+ u32 freq;
+ struct clk *external_clk;
+
+ if (!of_device_is_available(node)) {
+ pr_info("Kona Timer v1 marked as disabled in device tree\n");
+ return;
+ }
+
+ external_clk = of_clk_get_by_name(node, NULL);
+
+ if (!IS_ERR(external_clk)) {
+ arch_timer_rate = clk_get_rate(external_clk);
+ clk_prepare_enable(external_clk);
+ } else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
+ arch_timer_rate = freq;
+ } else {
+ pr_err("Kona Timer v1 unable to determine clock-frequency");
+ return;
+ }
+
+ /* Setup IRQ numbers */
+ timers.tmr_irq = irq_of_parse_and_map(node, 0);
+
+ /* Setup IO addresses */
+ timers.tmr_regs = of_iomap(node, 0);
+
+ kona_timer_disable_and_clear(timers.tmr_regs);
+
kona_timer_clockevents_init();
setup_irq(timers.tmr_irq, &kona_timer_irq);
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 63f176de0d02..49fbe2847c84 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -16,6 +16,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <linux/of_address.h>
@@ -52,6 +53,8 @@
#define TTC_CNT_CNTRL_DISABLE_MASK 0x1
#define TTC_CLK_CNTRL_CSRC_MASK (1 << 5) /* clock source */
+#define TTC_CLK_CNTRL_PSV_MASK 0x1e
+#define TTC_CLK_CNTRL_PSV_SHIFT 1
/*
* Setup the timers to use pre-scaling, using a fixed value for now that will
@@ -63,6 +66,8 @@
#define CLK_CNTRL_PRESCALE_EN 1
#define CNT_CNTRL_RESET (1 << 4)
+#define MAX_F_ERR 50
+
/**
* struct ttc_timer - This definition defines local timer structure
*
@@ -82,6 +87,8 @@ struct ttc_timer {
container_of(x, struct ttc_timer, clk_rate_change_nb)
struct ttc_timer_clocksource {
+ u32 scale_clk_ctrl_reg_old;
+ u32 scale_clk_ctrl_reg_new;
struct ttc_timer ttc;
struct clocksource cs;
};
@@ -229,32 +236,89 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
struct ttc_timer_clocksource, ttc);
switch (event) {
- case POST_RATE_CHANGE:
+ case PRE_RATE_CHANGE:
+ {
+ u32 psv;
+ unsigned long factor, rate_low, rate_high;
+
+ if (ndata->new_rate > ndata->old_rate) {
+ factor = DIV_ROUND_CLOSEST(ndata->new_rate,
+ ndata->old_rate);
+ rate_low = ndata->old_rate;
+ rate_high = ndata->new_rate;
+ } else {
+ factor = DIV_ROUND_CLOSEST(ndata->old_rate,
+ ndata->new_rate);
+ rate_low = ndata->new_rate;
+ rate_high = ndata->old_rate;
+ }
+
+ if (!is_power_of_2(factor))
+ return NOTIFY_BAD;
+
+ if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR)
+ return NOTIFY_BAD;
+
+ factor = __ilog2_u32(factor);
+
/*
- * Do whatever is necessary to maintain a proper time base
- *
- * I cannot find a way to adjust the currently used clocksource
- * to the new frequency. __clocksource_updatefreq_hz() sounds
- * good, but does not work. Not sure what's that missing.
- *
- * This approach works, but triggers two clocksource switches.
- * The first after unregister to clocksource jiffies. And
- * another one after the register to the newly registered timer.
- *
- * Alternatively we could 'waste' another HW timer to ping pong
- * between clock sources. That would also use one register and
- * one unregister call, but only trigger one clocksource switch
- * for the cost of another HW timer used by the OS.
+ * store timer clock ctrl register so we can restore it in case
+ * of an abort.
*/
- clocksource_unregister(&ttccs->cs);
- clocksource_register_hz(&ttccs->cs,
- ndata->new_rate / PRESCALE);
- /* fall through */
- case PRE_RATE_CHANGE:
+ ttccs->scale_clk_ctrl_reg_old =
+ __raw_readl(ttccs->ttc.base_addr +
+ TTC_CLK_CNTRL_OFFSET);
+
+ psv = (ttccs->scale_clk_ctrl_reg_old &
+ TTC_CLK_CNTRL_PSV_MASK) >>
+ TTC_CLK_CNTRL_PSV_SHIFT;
+ if (ndata->new_rate < ndata->old_rate)
+ psv -= factor;
+ else
+ psv += factor;
+
+ /* prescaler within legal range? */
+ if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT))
+ return NOTIFY_BAD;
+
+ ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old &
+ ~TTC_CLK_CNTRL_PSV_MASK;
+ ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT;
+
+
+ /* scale down: adjust divider in post-change notification */
+ if (ndata->new_rate < ndata->old_rate)
+ return NOTIFY_DONE;
+
+ /* scale up: adjust divider now - before frequency change */
+ __raw_writel(ttccs->scale_clk_ctrl_reg_new,
+ ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ break;
+ }
+ case POST_RATE_CHANGE:
+ /* scale up: pre-change notification did the adjustment */
+ if (ndata->new_rate > ndata->old_rate)
+ return NOTIFY_OK;
+
+ /* scale down: adjust divider now - after frequency change */
+ __raw_writel(ttccs->scale_clk_ctrl_reg_new,
+ ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ break;
+
case ABORT_RATE_CHANGE:
+ /* we have to undo the adjustment in case we scale up */
+ if (ndata->new_rate < ndata->old_rate)
+ return NOTIFY_OK;
+
+ /* restore original register value */
+ __raw_writel(ttccs->scale_clk_ctrl_reg_old,
+ ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+ /* fall through */
default:
return NOTIFY_DONE;
}
+
+ return NOTIFY_DONE;
}
static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
@@ -321,25 +385,12 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
switch (event) {
case POST_RATE_CHANGE:
- {
- unsigned long flags;
-
- /*
- * clockevents_update_freq should be called with IRQ disabled on
- * the CPU the timer provides events for. The timer we use is
- * common to both CPUs, not sure if we need to run on both
- * cores.
- */
- local_irq_save(flags);
- clockevents_update_freq(&ttcce->ce,
- ndata->new_rate / PRESCALE);
- local_irq_restore(flags);
-
/* update cached frequency */
ttc->freq = ndata->new_rate;
+ clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE);
+
/* fall through */
- }
case PRE_RATE_CHANGE:
case ABORT_RATE_CHANGE:
default:
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c
deleted file mode 100644
index 9e0998f22885..000000000000
--- a/drivers/clocksource/cyclone.c
+++ /dev/null
@@ -1,113 +0,0 @@
-#include <linux/clocksource.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/timex.h>
-#include <linux/init.h>
-
-#include <asm/pgtable.h>
-#include <asm/io.h>
-
-#include <asm/mach_timer.h>
-
-#define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */
-#define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */
-#define CYCLONE_MPCS_OFFSET 0x51A8 /* offset to select register */
-#define CYCLONE_MPMC_OFFSET 0x51D0 /* offset to count register */
-#define CYCLONE_TIMER_FREQ 99780000 /* 100Mhz, but not really */
-#define CYCLONE_TIMER_MASK CLOCKSOURCE_MASK(32) /* 32 bit mask */
-
-int use_cyclone = 0;
-static void __iomem *cyclone_ptr;
-
-static cycle_t read_cyclone(struct clocksource *cs)
-{
- return (cycle_t)readl(cyclone_ptr);
-}
-
-static struct clocksource clocksource_cyclone = {
- .name = "cyclone",
- .rating = 250,
- .read = read_cyclone,
- .mask = CYCLONE_TIMER_MASK,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int __init init_cyclone_clocksource(void)
-{
- unsigned long base; /* saved value from CBAR */
- unsigned long offset;
- u32 __iomem* volatile cyclone_timer; /* Cyclone MPMC0 register */
- u32 __iomem* reg;
- int i;
-
- /* make sure we're on a summit box: */
- if (!use_cyclone)
- return -ENODEV;
-
- printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
-
- /* find base address: */
- offset = CYCLONE_CBAR_ADDR;
- reg = ioremap_nocache(offset, sizeof(reg));
- if (!reg) {
- printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
- return -ENODEV;
- }
- /* even on 64bit systems, this is only 32bits: */
- base = readl(reg);
- iounmap(reg);
- if (!base) {
- printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
- return -ENODEV;
- }
-
- /* setup PMCC: */
- offset = base + CYCLONE_PMCC_OFFSET;
- reg = ioremap_nocache(offset, sizeof(reg));
- if (!reg) {
- printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
- return -ENODEV;
- }
- writel(0x00000001,reg);
- iounmap(reg);
-
- /* setup MPCS: */
- offset = base + CYCLONE_MPCS_OFFSET;
- reg = ioremap_nocache(offset, sizeof(reg));
- if (!reg) {
- printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
- return -ENODEV;
- }
- writel(0x00000001,reg);
- iounmap(reg);
-
- /* map in cyclone_timer: */
- offset = base + CYCLONE_MPMC_OFFSET;
- cyclone_timer = ioremap_nocache(offset, sizeof(u64));
- if (!cyclone_timer) {
- printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
- return -ENODEV;
- }
-
- /* quick test to make sure its ticking: */
- for (i = 0; i < 3; i++){
- u32 old = readl(cyclone_timer);
- int stall = 100;
-
- while (stall--)
- barrier();
-
- if (readl(cyclone_timer) == old) {
- printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
- iounmap(cyclone_timer);
- cyclone_timer = NULL;
- return -ENODEV;
- }
- }
- cyclone_ptr = cyclone_timer;
-
- return clocksource_register_hz(&clocksource_cyclone,
- CYCLONE_TIMER_FREQ);
-}
-
-arch_initcall(init_cyclone_clocksource);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 62b0de6a1837..c2e390efbdca 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -71,6 +71,10 @@ enum {
MCT_L1_IRQ,
MCT_L2_IRQ,
MCT_L3_IRQ,
+ MCT_L4_IRQ,
+ MCT_L5_IRQ,
+ MCT_L6_IRQ,
+ MCT_L7_IRQ,
MCT_NR_IRQS,
};
@@ -406,7 +410,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
mevt = container_of(evt, struct mct_clock_event_device, evt);
mevt->base = EXYNOS4_MCT_L_BASE(cpu);
- sprintf(mevt->name, "mct_tick%d", cpu);
+ snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
evt->name = mevt->name;
evt->cpumask = cpumask_of(cpu);
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 152a3f3875ee..a709cfa49d85 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -20,7 +20,6 @@
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/platform_data/clocksource-nomadik-mtu.h>
#include <linux/sched_clock.h>
#include <asm/mach/time.h>
@@ -103,7 +102,7 @@ static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
return 0;
}
-void nmdk_clkevt_reset(void)
+static void nmdk_clkevt_reset(void)
{
if (clkevt_periodic) {
/* Timer: configure load and background-load, and fire it up */
@@ -144,7 +143,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
}
}
-void nmdk_clksrc_reset(void)
+static void nmdk_clksrc_reset(void)
{
/* Disable */
writel(0, mtu_base + MTU_CR(0));
@@ -192,8 +191,8 @@ static struct irqaction nmdk_timer_irq = {
.dev_id = &nmdk_clkevt,
};
-static void __init __nmdk_timer_init(void __iomem *base, int irq,
- struct clk *pclk, struct clk *clk)
+static void __init nmdk_timer_init(void __iomem *base, int irq,
+ struct clk *pclk, struct clk *clk)
{
unsigned long rate;
@@ -245,18 +244,6 @@ static void __init __nmdk_timer_init(void __iomem *base, int irq,
register_current_timer_delay(&mtu_delay_timer);
}
-void __init nmdk_timer_init(void __iomem *base, int irq)
-{
- struct clk *clk0, *pclk0;
-
- pclk0 = clk_get_sys("mtu0", "apb_pclk");
- BUG_ON(IS_ERR(pclk0));
- clk0 = clk_get_sys("mtu0", NULL);
- BUG_ON(IS_ERR(clk0));
-
- __nmdk_timer_init(base, irq, pclk0, clk0);
-}
-
static void __init nmdk_timer_of_init(struct device_node *node)
{
struct clk *pclk;
@@ -280,7 +267,7 @@ static void __init nmdk_timer_of_init(struct device_node *node)
if (irq <= 0)
panic("Can't parse IRQ");
- __nmdk_timer_init(base, irq, pclk, clk);
+ nmdk_timer_init(base, irq, pclk, clk);
}
CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",
nmdk_timer_of_init);
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index bf497afba9ad..efb17c3ee120 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -196,5 +196,5 @@ static void __init sun4i_timer_init(struct device_node *node)
clockevents_config_and_register(&sun4i_clockevent, rate,
TIMER_SYNC_TICKS, 0xffffffff);
}
-CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
+CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
sun4i_timer_init);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index ee8691b89944..0451e62fac7a 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -85,12 +85,6 @@ static u32 ticks_per_jiffy;
static struct clock_event_device __percpu *armada_370_xp_evt;
-static void timer_ctrl_clrset(u32 clr, u32 set)
-{
- writel((readl(timer_base + TIMER_CTRL_OFF) & ~clr) | set,
- timer_base + TIMER_CTRL_OFF);
-}
-
static void local_timer_ctrl_clrset(u32 clr, u32 set)
{
writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
@@ -245,7 +239,7 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
clr = TIMER0_25MHZ;
enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
}
- timer_ctrl_clrset(clr, set);
+ atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);
local_timer_ctrl_clrset(clr, set);
/*
@@ -263,7 +257,9 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
- timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
+ atomic_io_modify(timer_base + TIMER_CTRL_OFF,
+ TIMER0_RELOAD_EN | enable_mask,
+ TIMER0_RELOAD_EN | enable_mask);
/*
* Set scale and timer for sched_clock.
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index 20066222f3f2..0b3ce0399c51 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -35,20 +35,6 @@
#define ORION_ONESHOT_MAX 0xfffffffe
static void __iomem *timer_base;
-static DEFINE_SPINLOCK(timer_ctrl_lock);
-
-/*
- * Thread-safe access to TIMER_CTRL register
- * (shared with watchdog timer)
- */
-void orion_timer_ctrl_clrset(u32 clr, u32 set)
-{
- spin_lock(&timer_ctrl_lock);
- writel((readl(timer_base + TIMER_CTRL) & ~clr) | set,
- timer_base + TIMER_CTRL);
- spin_unlock(&timer_ctrl_lock);
-}
-EXPORT_SYMBOL(orion_timer_ctrl_clrset);
/*
* Free-running clocksource handling.
@@ -68,7 +54,8 @@ static int orion_clkevt_next_event(unsigned long delta,
{
/* setup and enable one-shot timer */
writel(delta, timer_base + TIMER1_VAL);
- orion_timer_ctrl_clrset(TIMER1_RELOAD_EN, TIMER1_EN);
+ atomic_io_modify(timer_base + TIMER_CTRL,
+ TIMER1_RELOAD_EN | TIMER1_EN, TIMER1_EN);
return 0;
}
@@ -80,10 +67,13 @@ static void orion_clkevt_mode(enum clock_event_mode mode,
/* setup and enable periodic timer at 1/HZ intervals */
writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);
writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL);
- orion_timer_ctrl_clrset(0, TIMER1_RELOAD_EN | TIMER1_EN);
+ atomic_io_modify(timer_base + TIMER_CTRL,
+ TIMER1_RELOAD_EN | TIMER1_EN,
+ TIMER1_RELOAD_EN | TIMER1_EN);
} else {
/* disable timer */
- orion_timer_ctrl_clrset(TIMER1_RELOAD_EN | TIMER1_EN, 0);
+ atomic_io_modify(timer_base + TIMER_CTRL,
+ TIMER1_RELOAD_EN | TIMER1_EN, 0);
}
}
@@ -131,7 +121,9 @@ static void __init orion_timer_init(struct device_node *np)
/* setup timer0 as free-running clocksource */
writel(~0, timer_base + TIMER0_VAL);
writel(~0, timer_base + TIMER0_RELOAD);
- orion_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | TIMER0_EN);
+ atomic_io_modify(timer_base + TIMER_CTRL,
+ TIMER0_RELOAD_EN | TIMER0_EN,
+ TIMER0_RELOAD_EN | TIMER0_EN);
clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
clk_get_rate(clk), 300, 32,
clocksource_mmio_readl_down);
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
new file mode 100644
index 000000000000..0250354f7e55
--- /dev/null
+++ b/drivers/clocksource/timer-keystone.c
@@ -0,0 +1,241 @@
+/*
+ * Keystone broadcast clock-event
+ *
+ * Copyright 2013 Texas Instruments, Inc.
+ *
+ * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define TIMER_NAME "timer-keystone"
+
+/* Timer register offsets */
+#define TIM12 0x10
+#define TIM34 0x14
+#define PRD12 0x18
+#define PRD34 0x1c
+#define TCR 0x20
+#define TGCR 0x24
+#define INTCTLSTAT 0x44
+
+/* Timer register bitfields */
+#define TCR_ENAMODE_MASK 0xC0
+#define TCR_ENAMODE_ONESHOT_MASK 0x40
+#define TCR_ENAMODE_PERIODIC_MASK 0x80
+
+#define TGCR_TIM_UNRESET_MASK 0x03
+#define INTCTLSTAT_ENINT_MASK 0x01
+
+/**
+ * struct keystone_timer: holds timer's data
+ * @base: timer memory base address
+ * @hz_period: cycles per HZ period
+ * @event_dev: event device based on timer
+ */
+static struct keystone_timer {
+ void __iomem *base;
+ unsigned long hz_period;
+ struct clock_event_device event_dev;
+} timer;
+
+static inline u32 keystone_timer_readl(unsigned long rg)
+{
+ return readl_relaxed(timer.base + rg);
+}
+
+static inline void keystone_timer_writel(u32 val, unsigned long rg)
+{
+ writel_relaxed(val, timer.base + rg);
+}
+
+/**
+ * keystone_timer_barrier: write memory barrier
+ * use explicit barrier to avoid using readl/writel non relaxed function
+ * variants, because in our case non relaxed variants hide the true places
+ * where barrier is needed.
+ */
+static inline void keystone_timer_barrier(void)
+{
+ __iowmb();
+}
+
+/**
+ * keystone_timer_config: configures timer to work in oneshot/periodic modes.
+ * @ mode: mode to configure
+ * @ period: cycles number to configure for
+ */
+static int keystone_timer_config(u64 period, enum clock_event_mode mode)
+{
+ u32 tcr;
+ u32 off;
+
+ tcr = keystone_timer_readl(TCR);
+ off = tcr & ~(TCR_ENAMODE_MASK);
+
+ /* set enable mode */
+ switch (mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ tcr |= TCR_ENAMODE_ONESHOT_MASK;
+ break;
+ case CLOCK_EVT_MODE_PERIODIC:
+ tcr |= TCR_ENAMODE_PERIODIC_MASK;
+ break;
+ default:
+ return -1;
+ }
+
+ /* disable timer */
+ keystone_timer_writel(off, TCR);
+ /* here we have to be sure the timer has been disabled */
+ keystone_timer_barrier();
+
+ /* reset counter to zero, set new period */
+ keystone_timer_writel(0, TIM12);
+ keystone_timer_writel(0, TIM34);
+ keystone_timer_writel(period & 0xffffffff, PRD12);
+ keystone_timer_writel(period >> 32, PRD34);
+
+ /*
+ * enable timer
+ * here we have to be sure that CNTLO, CNTHI, PRDLO, PRDHI registers
+ * have been written.
+ */
+ keystone_timer_barrier();
+ keystone_timer_writel(tcr, TCR);
+ return 0;
+}
+
+static void keystone_timer_disable(void)
+{
+ u32 tcr;
+
+ tcr = keystone_timer_readl(TCR);
+
+ /* disable timer */
+ tcr &= ~(TCR_ENAMODE_MASK);
+ keystone_timer_writel(tcr, TCR);
+}
+
+static irqreturn_t keystone_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static int keystone_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ return keystone_timer_config(cycles, evt->mode);
+}
+
+static void keystone_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ keystone_timer_config(timer.hz_period, CLOCK_EVT_MODE_PERIODIC);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_ONESHOT:
+ keystone_timer_disable();
+ break;
+ default:
+ break;
+ }
+}
+
+static void __init keystone_timer_init(struct device_node *np)
+{
+ struct clock_event_device *event_dev = &timer.event_dev;
+ unsigned long rate;
+ struct clk *clk;
+ int irq, error;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq == NO_IRQ) {
+ pr_err("%s: failed to map interrupts\n", __func__);
+ return;
+ }
+
+ timer.base = of_iomap(np, 0);
+ if (!timer.base) {
+ pr_err("%s: failed to map registers\n", __func__);
+ return;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to get clock\n", __func__);
+ iounmap(timer.base);
+ return;
+ }
+
+ error = clk_prepare_enable(clk);
+ if (error) {
+ pr_err("%s: failed to enable clock\n", __func__);
+ goto err;
+ }
+
+ rate = clk_get_rate(clk);
+
+ /* disable, use internal clock source */
+ keystone_timer_writel(0, TCR);
+ /* here we have to be sure the timer has been disabled */
+ keystone_timer_barrier();
+
+ /* reset timer as 64-bit, no pre-scaler, plus features are disabled */
+ keystone_timer_writel(0, TGCR);
+
+ /* unreset timer */
+ keystone_timer_writel(TGCR_TIM_UNRESET_MASK, TGCR);
+
+ /* init counter to zero */
+ keystone_timer_writel(0, TIM12);
+ keystone_timer_writel(0, TIM34);
+
+ timer.hz_period = DIV_ROUND_UP(rate, HZ);
+
+ /* enable timer interrupts */
+ keystone_timer_writel(INTCTLSTAT_ENINT_MASK, INTCTLSTAT);
+
+ error = request_irq(irq, keystone_timer_interrupt, IRQF_TIMER,
+ TIMER_NAME, event_dev);
+ if (error) {
+ pr_err("%s: failed to setup irq\n", __func__);
+ goto err;
+ }
+
+ /* setup clockevent */
+ event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ event_dev->set_next_event = keystone_set_next_event;
+ event_dev->set_mode = keystone_set_mode;
+ event_dev->cpumask = cpu_all_mask;
+ event_dev->owner = THIS_MODULE;
+ event_dev->name = TIMER_NAME;
+ event_dev->irq = irq;
+
+ clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
+
+ pr_info("keystone timer clock @%lu Hz\n", rate);
+ return;
+err:
+ clk_put(clk);
+ iounmap(timer.base);
+}
+
+CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer",
+ keystone_timer_init);
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
new file mode 100644
index 000000000000..e63d469661fd
--- /dev/null
+++ b/drivers/clocksource/timer-u300.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (C) 2007-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * Timer COH 901 328, runs the OS timer interrupt.
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+/* Generic stuff */
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+
+/*
+ * APP side special timer registers
+ * This timer contains four timers which can fire an interrupt each.
+ * OS (operating system) timer @ 32768 Hz
+ * DD (device driver) timer @ 1 kHz
+ * GP1 (general purpose 1) timer @ 1MHz
+ * GP2 (general purpose 2) timer @ 1MHz
+ */
+
+/* Reset OS Timer 32bit (-/W) */
+#define U300_TIMER_APP_ROST (0x0000)
+#define U300_TIMER_APP_ROST_TIMER_RESET (0x00000000)
+/* Enable OS Timer 32bit (-/W) */
+#define U300_TIMER_APP_EOST (0x0004)
+#define U300_TIMER_APP_EOST_TIMER_ENABLE (0x00000000)
+/* Disable OS Timer 32bit (-/W) */
+#define U300_TIMER_APP_DOST (0x0008)
+#define U300_TIMER_APP_DOST_TIMER_DISABLE (0x00000000)
+/* OS Timer Mode Register 32bit (-/W) */
+#define U300_TIMER_APP_SOSTM (0x000c)
+#define U300_TIMER_APP_SOSTM_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_SOSTM_MODE_ONE_SHOT (0x00000001)
+/* OS Timer Status Register 32bit (R/-) */
+#define U300_TIMER_APP_OSTS (0x0010)
+#define U300_TIMER_APP_OSTS_TIMER_STATE_MASK (0x0000000F)
+#define U300_TIMER_APP_OSTS_TIMER_STATE_IDLE (0x00000001)
+#define U300_TIMER_APP_OSTS_TIMER_STATE_ACTIVE (0x00000002)
+#define U300_TIMER_APP_OSTS_ENABLE_IND (0x00000010)
+#define U300_TIMER_APP_OSTS_MODE_MASK (0x00000020)
+#define U300_TIMER_APP_OSTS_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_OSTS_MODE_ONE_SHOT (0x00000020)
+#define U300_TIMER_APP_OSTS_IRQ_ENABLED_IND (0x00000040)
+#define U300_TIMER_APP_OSTS_IRQ_PENDING_IND (0x00000080)
+/* OS Timer Current Count Register 32bit (R/-) */
+#define U300_TIMER_APP_OSTCC (0x0014)
+/* OS Timer Terminal Count Register 32bit (R/W) */
+#define U300_TIMER_APP_OSTTC (0x0018)
+/* OS Timer Interrupt Enable Register 32bit (-/W) */
+#define U300_TIMER_APP_OSTIE (0x001c)
+#define U300_TIMER_APP_OSTIE_IRQ_DISABLE (0x00000000)
+#define U300_TIMER_APP_OSTIE_IRQ_ENABLE (0x00000001)
+/* OS Timer Interrupt Acknowledge Register 32bit (-/W) */
+#define U300_TIMER_APP_OSTIA (0x0020)
+#define U300_TIMER_APP_OSTIA_IRQ_ACK (0x00000080)
+
+/* Reset DD Timer 32bit (-/W) */
+#define U300_TIMER_APP_RDDT (0x0040)
+#define U300_TIMER_APP_RDDT_TIMER_RESET (0x00000000)
+/* Enable DD Timer 32bit (-/W) */
+#define U300_TIMER_APP_EDDT (0x0044)
+#define U300_TIMER_APP_EDDT_TIMER_ENABLE (0x00000000)
+/* Disable DD Timer 32bit (-/W) */
+#define U300_TIMER_APP_DDDT (0x0048)
+#define U300_TIMER_APP_DDDT_TIMER_DISABLE (0x00000000)
+/* DD Timer Mode Register 32bit (-/W) */
+#define U300_TIMER_APP_SDDTM (0x004c)
+#define U300_TIMER_APP_SDDTM_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_SDDTM_MODE_ONE_SHOT (0x00000001)
+/* DD Timer Status Register 32bit (R/-) */
+#define U300_TIMER_APP_DDTS (0x0050)
+#define U300_TIMER_APP_DDTS_TIMER_STATE_MASK (0x0000000F)
+#define U300_TIMER_APP_DDTS_TIMER_STATE_IDLE (0x00000001)
+#define U300_TIMER_APP_DDTS_TIMER_STATE_ACTIVE (0x00000002)
+#define U300_TIMER_APP_DDTS_ENABLE_IND (0x00000010)
+#define U300_TIMER_APP_DDTS_MODE_MASK (0x00000020)
+#define U300_TIMER_APP_DDTS_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_DDTS_MODE_ONE_SHOT (0x00000020)
+#define U300_TIMER_APP_DDTS_IRQ_ENABLED_IND (0x00000040)
+#define U300_TIMER_APP_DDTS_IRQ_PENDING_IND (0x00000080)
+/* DD Timer Current Count Register 32bit (R/-) */
+#define U300_TIMER_APP_DDTCC (0x0054)
+/* DD Timer Terminal Count Register 32bit (R/W) */
+#define U300_TIMER_APP_DDTTC (0x0058)
+/* DD Timer Interrupt Enable Register 32bit (-/W) */
+#define U300_TIMER_APP_DDTIE (0x005c)
+#define U300_TIMER_APP_DDTIE_IRQ_DISABLE (0x00000000)
+#define U300_TIMER_APP_DDTIE_IRQ_ENABLE (0x00000001)
+/* DD Timer Interrupt Acknowledge Register 32bit (-/W) */
+#define U300_TIMER_APP_DDTIA (0x0060)
+#define U300_TIMER_APP_DDTIA_IRQ_ACK (0x00000080)
+
+/* Reset GP1 Timer 32bit (-/W) */
+#define U300_TIMER_APP_RGPT1 (0x0080)
+#define U300_TIMER_APP_RGPT1_TIMER_RESET (0x00000000)
+/* Enable GP1 Timer 32bit (-/W) */
+#define U300_TIMER_APP_EGPT1 (0x0084)
+#define U300_TIMER_APP_EGPT1_TIMER_ENABLE (0x00000000)
+/* Disable GP1 Timer 32bit (-/W) */
+#define U300_TIMER_APP_DGPT1 (0x0088)
+#define U300_TIMER_APP_DGPT1_TIMER_DISABLE (0x00000000)
+/* GP1 Timer Mode Register 32bit (-/W) */
+#define U300_TIMER_APP_SGPT1M (0x008c)
+#define U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT (0x00000001)
+/* GP1 Timer Status Register 32bit (R/-) */
+#define U300_TIMER_APP_GPT1S (0x0090)
+#define U300_TIMER_APP_GPT1S_TIMER_STATE_MASK (0x0000000F)
+#define U300_TIMER_APP_GPT1S_TIMER_STATE_IDLE (0x00000001)
+#define U300_TIMER_APP_GPT1S_TIMER_STATE_ACTIVE (0x00000002)
+#define U300_TIMER_APP_GPT1S_ENABLE_IND (0x00000010)
+#define U300_TIMER_APP_GPT1S_MODE_MASK (0x00000020)
+#define U300_TIMER_APP_GPT1S_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_GPT1S_MODE_ONE_SHOT (0x00000020)
+#define U300_TIMER_APP_GPT1S_IRQ_ENABLED_IND (0x00000040)
+#define U300_TIMER_APP_GPT1S_IRQ_PENDING_IND (0x00000080)
+/* GP1 Timer Current Count Register 32bit (R/-) */
+#define U300_TIMER_APP_GPT1CC (0x0094)
+/* GP1 Timer Terminal Count Register 32bit (R/W) */
+#define U300_TIMER_APP_GPT1TC (0x0098)
+/* GP1 Timer Interrupt Enable Register 32bit (-/W) */
+#define U300_TIMER_APP_GPT1IE (0x009c)
+#define U300_TIMER_APP_GPT1IE_IRQ_DISABLE (0x00000000)
+#define U300_TIMER_APP_GPT1IE_IRQ_ENABLE (0x00000001)
+/* GP1 Timer Interrupt Acknowledge Register 32bit (-/W) */
+#define U300_TIMER_APP_GPT1IA (0x00a0)
+#define U300_TIMER_APP_GPT1IA_IRQ_ACK (0x00000080)
+
+/* Reset GP2 Timer 32bit (-/W) */
+#define U300_TIMER_APP_RGPT2 (0x00c0)
+#define U300_TIMER_APP_RGPT2_TIMER_RESET (0x00000000)
+/* Enable GP2 Timer 32bit (-/W) */
+#define U300_TIMER_APP_EGPT2 (0x00c4)
+#define U300_TIMER_APP_EGPT2_TIMER_ENABLE (0x00000000)
+/* Disable GP2 Timer 32bit (-/W) */
+#define U300_TIMER_APP_DGPT2 (0x00c8)
+#define U300_TIMER_APP_DGPT2_TIMER_DISABLE (0x00000000)
+/* GP2 Timer Mode Register 32bit (-/W) */
+#define U300_TIMER_APP_SGPT2M (0x00cc)
+#define U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_SGPT2M_MODE_ONE_SHOT (0x00000001)
+/* GP2 Timer Status Register 32bit (R/-) */
+#define U300_TIMER_APP_GPT2S (0x00d0)
+#define U300_TIMER_APP_GPT2S_TIMER_STATE_MASK (0x0000000F)
+#define U300_TIMER_APP_GPT2S_TIMER_STATE_IDLE (0x00000001)
+#define U300_TIMER_APP_GPT2S_TIMER_STATE_ACTIVE (0x00000002)
+#define U300_TIMER_APP_GPT2S_ENABLE_IND (0x00000010)
+#define U300_TIMER_APP_GPT2S_MODE_MASK (0x00000020)
+#define U300_TIMER_APP_GPT2S_MODE_CONTINUOUS (0x00000000)
+#define U300_TIMER_APP_GPT2S_MODE_ONE_SHOT (0x00000020)
+#define U300_TIMER_APP_GPT2S_IRQ_ENABLED_IND (0x00000040)
+#define U300_TIMER_APP_GPT2S_IRQ_PENDING_IND (0x00000080)
+/* GP2 Timer Current Count Register 32bit (R/-) */
+#define U300_TIMER_APP_GPT2CC (0x00d4)
+/* GP2 Timer Terminal Count Register 32bit (R/W) */
+#define U300_TIMER_APP_GPT2TC (0x00d8)
+/* GP2 Timer Interrupt Enable Register 32bit (-/W) */
+#define U300_TIMER_APP_GPT2IE (0x00dc)
+#define U300_TIMER_APP_GPT2IE_IRQ_DISABLE (0x00000000)
+#define U300_TIMER_APP_GPT2IE_IRQ_ENABLE (0x00000001)
+/* GP2 Timer Interrupt Acknowledge Register 32bit (-/W) */
+#define U300_TIMER_APP_GPT2IA (0x00e0)
+#define U300_TIMER_APP_GPT2IA_IRQ_ACK (0x00000080)
+
+/* Clock request control register - all four timers */
+#define U300_TIMER_APP_CRC (0x100)
+#define U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE (0x00000001)
+
+static void __iomem *u300_timer_base;
+
+struct u300_clockevent_data {
+ struct clock_event_device cevd;
+ unsigned ticks_per_jiffy;
+};
+
+/*
+ * The u300_set_mode() function is always called first, if we
+ * have oneshot timer active, the oneshot scheduling function
+ * u300_set_next_event() is called immediately after.
+ */
+static void u300_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ struct u300_clockevent_data *cevdata =
+ container_of(evt, struct u300_clockevent_data, cevd);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* Disable interrupts on GPT1 */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Disable GP1 while we're reprogramming it. */
+ writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DGPT1);
+ /*
+ * Set the periodic mode to a certain number of ticks per
+ * jiffy.
+ */
+ writel(cevdata->ticks_per_jiffy,
+ u300_timer_base + U300_TIMER_APP_GPT1TC);
+ /*
+ * Set continuous mode, so the timer keeps triggering
+ * interrupts.
+ */
+ writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS,
+ u300_timer_base + U300_TIMER_APP_SGPT1M);
+ /* Enable timer interrupts */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Then enable the OS timer again */
+ writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
+ u300_timer_base + U300_TIMER_APP_EGPT1);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* Just break; here? */
+ /*
+ * The actual event will be programmed by the next event hook,
+ * so we just set a dummy value somewhere at the end of the
+ * universe here.
+ */
+ /* Disable interrupts on GPT1 */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Disable GP1 while we're reprogramming it. */
+ writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DGPT1);
+ /*
+ * Expire far in the future, u300_set_next_event() will be
+ * called soon...
+ */
+ writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC);
+ /* We run one shot per tick here! */
+ writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
+ u300_timer_base + U300_TIMER_APP_SGPT1M);
+ /* Enable interrupts for this timer */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Enable timer */
+ writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
+ u300_timer_base + U300_TIMER_APP_EGPT1);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ /* Disable interrupts on GP1 */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Disable GP1 */
+ writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DGPT1);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ /* Ignore this call */
+ break;
+ }
+}
+
+/*
+ * The app timer in one shot mode obviously has to be reprogrammed
+ * in EXACTLY this sequence to work properly. Do NOT try to e.g. replace
+ * the interrupt disable + timer disable commands with a reset command,
+ * it will fail miserably. Apparently (and I found this the hard way)
+ * the timer is very sensitive to the instruction order, though you don't
+ * get that impression from the data sheet.
+ */
+static int u300_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+
+{
+ /* Disable interrupts on GPT1 */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Disable GP1 while we're reprogramming it. */
+ writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DGPT1);
+ /* Reset the General Purpose timer 1. */
+ writel(U300_TIMER_APP_RGPT1_TIMER_RESET,
+ u300_timer_base + U300_TIMER_APP_RGPT1);
+ /* IRQ in n * cycles */
+ writel(cycles, u300_timer_base + U300_TIMER_APP_GPT1TC);
+ /*
+ * We run one shot per tick here! (This is necessary to reconfigure,
+ * the timer will tilt if you don't!)
+ */
+ writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
+ u300_timer_base + U300_TIMER_APP_SGPT1M);
+ /* Enable timer interrupts */
+ writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
+ u300_timer_base + U300_TIMER_APP_GPT1IE);
+ /* Then enable the OS timer again */
+ writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
+ u300_timer_base + U300_TIMER_APP_EGPT1);
+ return 0;
+}
+
+static struct u300_clockevent_data u300_clockevent_data = {
+ /* Use general purpose timer 1 as clock event */
+ .cevd = {
+ .name = "GPT1",
+ /* Reasonably fast and accurate clock event */
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = u300_set_next_event,
+ .set_mode = u300_set_mode,
+ },
+};
+
+/* Clock event timer interrupt handler */
+static irqreturn_t u300_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &u300_clockevent_data.cevd;
+ /* ACK/Clear timer IRQ for the APP GPT1 Timer */
+
+ writel(U300_TIMER_APP_GPT1IA_IRQ_ACK,
+ u300_timer_base + U300_TIMER_APP_GPT1IA);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction u300_timer_irq = {
+ .name = "U300 Timer Tick",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = u300_timer_interrupt,
+};
+
+/*
+ * Override the global weak sched_clock symbol with this
+ * local implementation which uses the clocksource to get some
+ * better resolution when scheduling the kernel. We accept that
+ * this wraps around for now, since it is just a relative time
+ * stamp. (Inspired by OMAP implementation.)
+ */
+
+static u64 notrace u300_read_sched_clock(void)
+{
+ return readl(u300_timer_base + U300_TIMER_APP_GPT2CC);
+}
+
+static unsigned long u300_read_current_timer(void)
+{
+ return readl(u300_timer_base + U300_TIMER_APP_GPT2CC);
+}
+
+static struct delay_timer u300_delay_timer;
+
+/*
+ * This sets up the system timers, clock source and clock event.
+ */
+static void __init u300_timer_init_of(struct device_node *np)
+{
+ unsigned int irq;
+ struct clk *clk;
+ unsigned long rate;
+
+ u300_timer_base = of_iomap(np, 0);
+ if (!u300_timer_base)
+ panic("could not ioremap system timer\n");
+
+ /* Get the IRQ for the GP1 timer */
+ irq = irq_of_parse_and_map(np, 2);
+ if (!irq)
+ panic("no IRQ for system timer\n");
+
+ pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq);
+
+ /* Clock the interrupt controller */
+ clk = of_clk_get(np, 0);
+ BUG_ON(IS_ERR(clk));
+ clk_prepare_enable(clk);
+ rate = clk_get_rate(clk);
+
+ u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
+
+ sched_clock_register(u300_read_sched_clock, 32, rate);
+
+ u300_delay_timer.read_current_timer = &u300_read_current_timer;
+ u300_delay_timer.freq = rate;
+ register_current_timer_delay(&u300_delay_timer);
+
+ /*
+ * Disable the "OS" and "DD" timers - these are designed for Symbian!
+ * Example usage in cnh1601578 cpu subsystem pd_timer_app.c
+ */
+ writel(U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE,
+ u300_timer_base + U300_TIMER_APP_CRC);
+ writel(U300_TIMER_APP_ROST_TIMER_RESET,
+ u300_timer_base + U300_TIMER_APP_ROST);
+ writel(U300_TIMER_APP_DOST_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DOST);
+ writel(U300_TIMER_APP_RDDT_TIMER_RESET,
+ u300_timer_base + U300_TIMER_APP_RDDT);
+ writel(U300_TIMER_APP_DDDT_TIMER_DISABLE,
+ u300_timer_base + U300_TIMER_APP_DDDT);
+
+ /* Reset the General Purpose timer 1. */
+ writel(U300_TIMER_APP_RGPT1_TIMER_RESET,
+ u300_timer_base + U300_TIMER_APP_RGPT1);
+
+ /* Set up the IRQ handler */
+ setup_irq(irq, &u300_timer_irq);
+
+ /* Reset the General Purpose timer 2 */
+ writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
+ u300_timer_base + U300_TIMER_APP_RGPT2);
+ /* Set this timer to run around forever */
+ writel(0xFFFFFFFFU, u300_timer_base + U300_TIMER_APP_GPT2TC);
+ /* Set continuous mode so it wraps around */
+ writel(U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS,
+ u300_timer_base + U300_TIMER_APP_SGPT2M);
+ /* Disable timer interrupts */
+ writel(U300_TIMER_APP_GPT2IE_IRQ_DISABLE,
+ u300_timer_base + U300_TIMER_APP_GPT2IE);
+ /* Then enable the GP2 timer to use as a free running us counter */
+ writel(U300_TIMER_APP_EGPT2_TIMER_ENABLE,
+ u300_timer_base + U300_TIMER_APP_EGPT2);
+
+ /* Use general purpose timer 2 as clock source */
+ if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
+ "GPT2", rate, 300, 32, clocksource_mmio_readl_up))
+ pr_err("timer: failed to initialize U300 clock source\n");
+
+ /* Configure and register the clockevent */
+ clockevents_config_and_register(&u300_clockevent_data.cevd, rate,
+ 1, 0xffffffff);
+
+ /*
+ * TODO: init and register the rest of the timers too, they can be
+ * used by hrtimers!
+ */
+}
+
+CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
+ u300_timer_init_of);
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index 02821b06a39e..a918bc481c52 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -54,7 +54,7 @@ static inline void pit_irq_acknowledge(void)
static u64 pit_read_sched_clock(void)
{
- return __raw_readl(clksrc_base + PITCVAL);
+ return ~__raw_readl(clksrc_base + PITCVAL);
}
static int __init pit_clocksource_init(unsigned long rate)
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 18c5b9b16645..148d707a1d43 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -95,7 +95,7 @@ void proc_fork_connector(struct task_struct *task)
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
/* If cn_netlink_send() failed, the data is not sent */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_exec_connector(struct task_struct *task)
@@ -122,7 +122,7 @@ void proc_exec_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_id_connector(struct task_struct *task, int which_id)
@@ -163,7 +163,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_sid_connector(struct task_struct *task)
@@ -190,7 +190,7 @@ void proc_sid_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
@@ -225,7 +225,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_comm_connector(struct task_struct *task)
@@ -253,7 +253,7 @@ void proc_comm_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_coredump_connector(struct task_struct *task)
@@ -280,7 +280,7 @@ void proc_coredump_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
void proc_exit_connector(struct task_struct *task)
@@ -309,7 +309,7 @@ void proc_exit_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
/*
@@ -343,7 +343,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
}
/**
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index a36749f1e44a..77afe7487d34 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -50,7 +50,7 @@ static int cn_already_initialized;
*
* Sequence number is incremented with each message to be sent.
*
- * If we expect reply to our message then the sequence number in
+ * If we expect a reply to our message then the sequence number in
* received message MUST be the same as in original message, and
* acknowledge number MUST be the same + 1.
*
@@ -62,8 +62,11 @@ static int cn_already_initialized;
* the acknowledgement number in the original message + 1, then it is
* a new message.
*
+ * The message is sent to, the portid if given, the group if given, both if
+ * both, or if both are zero then the group is looked up and sent there.
*/
-int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
+int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
+ gfp_t gfp_mask)
{
struct cn_callback_entry *__cbq;
unsigned int size;
@@ -74,7 +77,9 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
u32 group = 0;
int found = 0;
- if (!__group) {
+ if (portid || __group) {
+ group = __group;
+ } else {
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list,
callback_entry) {
@@ -88,11 +93,9 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
if (!found)
return -ENODEV;
- } else {
- group = __group;
}
- if (!netlink_has_listeners(dev->nls, group))
+ if (!portid && !netlink_has_listeners(dev->nls, group))
return -ESRCH;
size = sizeof(*msg) + msg->len;
@@ -113,7 +116,10 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
NETLINK_CB(skb).dst_group = group;
- return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
+ if (group)
+ return netlink_broadcast(dev->nls, skb, portid, group,
+ gfp_mask);
+ return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT));
}
EXPORT_SYMBOL_GPL(cn_netlink_send);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 38093e272377..1fbe11f2a146 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -20,6 +20,10 @@ if CPU_FREQ
config CPU_FREQ_GOV_COMMON
bool
+config CPU_FREQ_BOOST_SW
+ bool
+ depends on THERMAL
+
config CPU_FREQ_STAT
tristate "CPU frequency translation statistics"
default y
@@ -181,7 +185,8 @@ config CPU_FREQ_GOV_CONSERVATIVE
config GENERIC_CPUFREQ_CPU0
tristate "Generic CPU0 cpufreq driver"
- depends on HAVE_CLK && REGULATOR && PM_OPP && OF
+ depends on HAVE_CLK && REGULATOR && OF && THERMAL && CPU_THERMAL
+ select PM_OPP
help
This adds a generic cpufreq driver for CPU0 frequency management.
It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
@@ -195,7 +200,7 @@ source "drivers/cpufreq/Kconfig.x86"
endmenu
menu "ARM CPU frequency scaling drivers"
-depends on ARM
+depends on ARM || ARM64
source "drivers/cpufreq/Kconfig.arm"
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ce52ed949249..9fb627046e17 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,9 +2,11 @@
# ARM CPU Frequency scaling drivers
#
+# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
- depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
+ depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
+ select PM_OPP
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@@ -15,6 +17,14 @@ config ARM_DT_BL_CPUFREQ
This enables probing via DT for Generic CPUfreq driver for ARM
big.LITTLE platform. This gets frequency tables from DT.
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
+
+
config ARM_EXYNOS_CPUFREQ
bool
@@ -54,7 +64,8 @@ config ARM_EXYNOS5250_CPUFREQ
config ARM_EXYNOS5440_CPUFREQ
bool "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
- depends on HAVE_CLK && PM_OPP && OF
+ depends on HAVE_CLK && OF
+ select PM_OPP
default y
help
This adds the CPUFreq driver for Samsung EXYNOS5440
@@ -64,6 +75,21 @@ config ARM_EXYNOS5440_CPUFREQ
If in doubt, say N.
+config ARM_EXYNOS_CPU_FREQ_BOOST_SW
+ bool "EXYNOS Frequency Overclocking - Software"
+ depends on ARM_EXYNOS_CPUFREQ
+ select CPU_FREQ_BOOST_SW
+ select EXYNOS_THERMAL
+ help
+ This driver supports software managed overclocking (BOOST).
+ It allows usage of special frequencies for Samsung Exynos
+ processors if thermal conditions are appropriate.
+
+ It reguires, for safe operation, thermal framework with properly
+ defined trip points.
+
+ If in doubt, say N.
+
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK
@@ -79,11 +105,11 @@ config ARM_HIGHBANK_CPUFREQ
If in doubt, say N.
config ARM_IMX6Q_CPUFREQ
- tristate "Freescale i.MX6Q cpufreq support"
- depends on SOC_IMX6Q
+ tristate "Freescale i.MX6 cpufreq support"
+ depends on ARCH_MXC
depends on REGULATOR_ANATOP
help
- This adds cpufreq driver support for Freescale i.MX6Q SOC.
+ This adds cpufreq driver support for Freescale i.MX6 series SoCs.
If in doubt, say N.
@@ -224,11 +250,3 @@ config ARM_TEGRA_CPUFREQ
default y
help
This adds the CPUFreq driver support for TEGRA SOCs.
-
-config ARM_VEXPRESS_SPC_CPUFREQ
- tristate "Versatile Express SPC based CPUfreq driver"
- select ARM_BIG_LITTLE_CPUFREQ
- depends on ARCH_VEXPRESS_SPC
- help
- This add the CPUfreq driver support for Versatile Express
- big.LITTLE platforms using SPC for power management.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index caf41ebea184..822ca03a87f7 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -80,7 +80,6 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
static struct cpufreq_driver acpi_cpufreq_driver;
static unsigned int acpi_pstate_strict;
-static bool boost_enabled, boost_supported;
static struct msr __percpu *msrs;
static bool boost_state(unsigned int cpu)
@@ -133,49 +132,16 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
wrmsr_on_cpus(cpumask, msr_addr, msrs);
}
-static ssize_t _store_boost(const char *buf, size_t count)
+static int _store_boost(int val)
{
- int ret;
- unsigned long val = 0;
-
- if (!boost_supported)
- return -EINVAL;
-
- ret = kstrtoul(buf, 10, &val);
- if (ret || (val > 1))
- return -EINVAL;
-
- if ((val && boost_enabled) || (!val && !boost_enabled))
- return count;
-
get_online_cpus();
-
boost_set_msrs(val, cpu_online_mask);
-
put_online_cpus();
-
- boost_enabled = val;
pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
- return count;
-}
-
-static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- return _store_boost(buf, count);
-}
-
-static ssize_t show_global_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- return sprintf(buf, "%u\n", boost_enabled);
+ return 0;
}
-static struct global_attr global_boost = __ATTR(boost, 0644,
- show_global_boost,
- store_global_boost);
-
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
@@ -186,15 +152,32 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
cpufreq_freq_attr_ro(freqdomain_cpus);
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+static ssize_t store_boost(const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val = 0;
+
+ if (!acpi_cpufreq_driver.boost_supported)
+ return -EINVAL;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret || (val > 1))
+ return -EINVAL;
+
+ _store_boost((int) val);
+
+ return count;
+}
+
static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
size_t count)
{
- return _store_boost(buf, count);
+ return store_boost(buf, count);
}
static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
{
- return sprintf(buf, "%u\n", boost_enabled);
+ return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
}
cpufreq_freq_attr_rw(cpb);
@@ -554,7 +537,7 @@ static int boost_notify(struct notifier_block *nb, unsigned long action,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- boost_set_msrs(boost_enabled, cpumask);
+ boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
break;
case CPU_DOWN_PREPARE:
@@ -872,7 +855,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
pr_debug("acpi_cpufreq_cpu_exit\n");
if (data) {
- cpufreq_frequency_table_put_attr(policy->cpu);
per_cpu(acfreq_data, policy->cpu) = NULL;
acpi_processor_unregister_performance(data->acpi_data,
policy->cpu);
@@ -911,6 +893,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.resume = acpi_cpufreq_resume,
.name = "acpi-cpufreq",
.attr = acpi_cpufreq_attr,
+ .set_boost = _store_boost,
};
static void __init acpi_cpufreq_boost_init(void)
@@ -921,33 +904,22 @@ static void __init acpi_cpufreq_boost_init(void)
if (!msrs)
return;
- boost_supported = true;
- boost_enabled = boost_state(0);
-
+ acpi_cpufreq_driver.boost_supported = true;
+ acpi_cpufreq_driver.boost_enabled = boost_state(0);
get_online_cpus();
/* Force all MSRs to the same value */
- boost_set_msrs(boost_enabled, cpu_online_mask);
+ boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
+ cpu_online_mask);
register_cpu_notifier(&boost_nb);
put_online_cpus();
- } else
- global_boost.attr.mode = 0444;
-
- /* We create the boost file in any case, though for systems without
- * hardware support it will be read-only and hardwired to return 0.
- */
- if (cpufreq_sysfs_create_file(&(global_boost.attr)))
- pr_warn(PFX "could not register global boost sysfs file\n");
- else
- pr_debug("registered global boost sysfs file\n");
+ }
}
-static void __exit acpi_cpufreq_boost_exit(void)
+static void acpi_cpufreq_boost_exit(void)
{
- cpufreq_sysfs_remove_file(&(global_boost.attr));
-
if (msrs) {
unregister_cpu_notifier(&boost_nb);
@@ -993,13 +965,13 @@ static int __init acpi_cpufreq_init(void)
*iter = &cpb;
}
#endif
+ acpi_cpufreq_boost_init();
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
- if (ret)
+ if (ret) {
free_acpi_perf_data();
- else
- acpi_cpufreq_boost_init();
-
+ acpi_cpufreq_boost_exit();
+ }
return ret;
}
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 5519933813ea..bad2ed317ba2 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -446,9 +446,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
}
if (cur_cluster < MAX_CLUSTERS) {
+ int cpu;
+
cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
- per_cpu(physical_cluster, policy->cpu) = cur_cluster;
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(physical_cluster, cpu) = cur_cluster;
} else {
/* Assumption: during init, we are always running on A15 */
per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
@@ -478,7 +481,6 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
return -ENODEV;
}
- cpufreq_frequency_table_put_attr(policy->cpu);
put_cluster_clk_and_freq_table(cpu_dev);
dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
@@ -488,7 +490,8 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver bL_cpufreq_driver = {
.name = "arm-big-little",
.flags = CPUFREQ_STICKY |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = bL_cpufreq_set_target,
.get = bL_cpufreq_get_rate,
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index 7c03dd84f66a..a1c79f549edb 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -21,17 +21,8 @@
#include <linux/export.h>
#include <linux/slab.h>
-static struct clk *cpuclk;
static struct cpufreq_frequency_table *freq_table;
-static unsigned int at32_get_speed(unsigned int cpu)
-{
- /* No SMP support */
- if (cpu)
- return 0;
- return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000);
-}
-
static unsigned int ref_freq;
static unsigned long loops_per_jiffy_ref;
@@ -39,7 +30,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int old_freq, new_freq;
- old_freq = at32_get_speed(0);
+ old_freq = policy->cur;
new_freq = freq_table[index].frequency;
if (!ref_freq) {
@@ -50,7 +41,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
if (old_freq < new_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
loops_per_jiffy_ref, ref_freq, new_freq);
- clk_set_rate(cpuclk, new_freq * 1000);
+ clk_set_rate(policy->clk, new_freq * 1000);
if (new_freq < old_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
loops_per_jiffy_ref, ref_freq, new_freq);
@@ -61,6 +52,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
{
unsigned int frequency, rate, min_freq;
+ static struct clk *cpuclk;
int retval, steps, i;
if (policy->cpu != 0)
@@ -103,6 +95,7 @@ static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
frequency /= 2;
}
+ policy->clk = cpuclk;
freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
retval = cpufreq_table_validate_and_show(policy, freq_table);
@@ -123,7 +116,7 @@ static struct cpufreq_driver at32_driver = {
.init = at32_cpufreq_driver_init,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = at32_set_target,
- .get = at32_get_speed,
+ .get = cpufreq_generic_get,
.flags = CPUFREQ_STICKY,
};
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
index e9e63fc9c2c9..a9f8e5bd0716 100644
--- a/drivers/cpufreq/blackfin-cpufreq.c
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -195,7 +195,6 @@ static struct cpufreq_driver bfin_driver = {
.target_index = bfin_target,
.get = bfin_getfreq_khz,
.init = __bfin_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "bfin cpufreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index d4585ce2346c..1bf6bbac3e03 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -13,7 +13,9 @@
#include <linux/clk.h>
#include <linux/cpu.h>
+#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -21,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/thermal.h>
static unsigned int transition_latency;
static unsigned int voltage_tolerance; /* in percentage */
@@ -29,11 +32,7 @@ static struct device *cpu_dev;
static struct clk *cpu_clk;
static struct regulator *cpu_reg;
static struct cpufreq_frequency_table *freq_table;
-
-static unsigned int cpu0_get_speed(unsigned int cpu)
-{
- return clk_get_rate(cpu_clk) / 1000;
-}
+static struct thermal_cooling_device *cdev;
static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
{
@@ -44,7 +43,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
int ret;
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
- if (freq_Hz < 0)
+ if (freq_Hz <= 0)
freq_Hz = freq_table[index].frequency * 1000;
freq_exact = freq_Hz;
@@ -100,6 +99,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
{
+ policy->clk = cpu_clk;
return cpufreq_generic_init(policy, freq_table, transition_latency);
}
@@ -107,9 +107,8 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
.flags = CPUFREQ_STICKY,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cpu0_set_target,
- .get = cpu0_get_speed,
+ .get = cpufreq_generic_get,
.init = cpu0_cpufreq_init,
- .exit = cpufreq_generic_exit,
.name = "generic_cpu0",
.attr = cpufreq_generic_attr,
};
@@ -201,6 +200,17 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
goto out_free_table;
}
+ /*
+ * For now, just loading the cooling device;
+ * thermal DT code takes care of matching them.
+ */
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
+ if (IS_ERR(cdev))
+ pr_err("running cpufreq without cooling device: %ld\n",
+ PTR_ERR(cdev));
+ }
+
of_node_put(np);
return 0;
@@ -213,6 +223,7 @@ out_put_node:
static int cpu0_cpufreq_remove(struct platform_device *pdev)
{
+ cpufreq_cooling_unregister(cdev);
cpufreq_unregister_driver(&cpu0_cpufreq_driver);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index a05b876f375e..bc447b9003c3 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -270,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
pr_debug("Old CPU frequency %d kHz, new %d kHz\n",
freqs.old, freqs.new);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
/* Disable IRQs */
/* local_irq_save(flags); */
@@ -285,7 +285,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
/* Enable IRQs */
/* local_irq_restore(flags); */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8d19f7c06010..abda6609d3e7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -26,7 +26,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/syscore_ops.h>
+#include <linux/suspend.h>
#include <linux/tick.h>
#include <trace/events/power.h>
@@ -39,13 +39,14 @@ static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
static DEFINE_RWLOCK(cpufreq_driver_lock);
-static DEFINE_MUTEX(cpufreq_governor_lock);
+DEFINE_MUTEX(cpufreq_governor_lock);
static LIST_HEAD(cpufreq_policy_list);
-#ifdef CONFIG_HOTPLUG_CPU
/* This one keeps track of the previously set governor of a removed CPU */
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
-#endif
+
+/* Flag to suspend/resume CPUFreq governors */
+static bool cpufreq_suspended;
static inline bool has_target(void)
{
@@ -176,6 +177,26 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
+unsigned int cpufreq_generic_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+ if (!policy || IS_ERR(policy->clk)) {
+ pr_err("%s: No %s associated to cpu: %d\n",
+ __func__, policy ? "clk" : "policy", cpu);
+ return 0;
+ }
+
+ return clk_get_rate(policy->clk) / 1000;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_get);
+
+/* Only for cpufreq core internal use */
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+{
+ return per_cpu(cpufreq_cpu_data, cpu);
+}
+
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *policy = NULL;
@@ -240,15 +261,14 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
- pr_debug("saving %lu as reference value for loops_per_jiffy; "
- "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
+ pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
+ l_p_j_ref, l_p_j_ref_freq);
}
- if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
- (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+ if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
- pr_debug("scaling loops_per_jiffy to %lu "
- "for frequency %u kHz\n", loops_per_jiffy, ci->new);
+ pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
+ loops_per_jiffy, ci->new);
}
}
#else
@@ -268,7 +288,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n",
- state, freqs->new);
+ state, freqs->new);
switch (state) {
@@ -280,9 +300,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
if ((policy) && (policy->cpu == freqs->cpu) &&
(policy->cur) && (policy->cur != freqs->old)) {
- pr_debug("Warning: CPU frequency is"
- " %u, cpufreq assumed %u kHz.\n",
- freqs->old, policy->cur);
+ pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
+ freqs->old, policy->cur);
freqs->old = policy->cur;
}
}
@@ -293,8 +312,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
- pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
- (unsigned long)freqs->cpu);
+ pr_debug("FREQ: %lu - CPU: %lu\n",
+ (unsigned long)freqs->new, (unsigned long)freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
@@ -312,18 +331,92 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
* function. It is called twice on all CPU frequency changes that have
* external effects.
*/
-void cpufreq_notify_transition(struct cpufreq_policy *policy,
+static void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state)
{
for_each_cpu(freqs->cpu, policy->cpus)
__cpufreq_notify_transition(policy, freqs, state);
}
-EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
+
+/* Do post notifications when there are chances that transition has failed */
+static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int transition_failed)
+{
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
+ if (!transition_failed)
+ return;
+
+ swap(freqs->old, freqs->new);
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
+}
+
+void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs)
+{
+wait:
+ wait_event(policy->transition_wait, !policy->transition_ongoing);
+
+ spin_lock(&policy->transition_lock);
+
+ if (unlikely(policy->transition_ongoing)) {
+ spin_unlock(&policy->transition_lock);
+ goto wait;
+ }
+
+ policy->transition_ongoing = true;
+
+ spin_unlock(&policy->transition_lock);
+
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
+}
+EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
+
+void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int transition_failed)
+{
+ if (unlikely(WARN_ON(!policy->transition_ongoing)))
+ return;
+
+ cpufreq_notify_post_transition(policy, freqs, transition_failed);
+
+ policy->transition_ongoing = false;
+
+ wake_up(&policy->transition_wait);
+}
+EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
+static ssize_t show_boost(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+}
+
+static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret, enable;
+
+ ret = sscanf(buf, "%d", &enable);
+ if (ret != 1 || enable < 0 || enable > 1)
+ return -EINVAL;
+
+ if (cpufreq_boost_trigger_state(enable)) {
+ pr_err("%s: Cannot %s BOOST!\n",
+ __func__, enable ? "enable" : "disable");
+ return -EINVAL;
+ }
+
+ pr_debug("%s: cpufreq BOOST %s\n",
+ __func__, enable ? "enabled" : "disabled");
+
+ return count;
+}
+define_one_global_rw(boost);
static struct cpufreq_governor *__find_governor(const char *str_governor)
{
@@ -824,18 +917,25 @@ err_out_kobj_put:
static void cpufreq_init_policy(struct cpufreq_policy *policy)
{
+ struct cpufreq_governor *gov = NULL;
struct cpufreq_policy new_policy;
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
+ /* Update governor of new_policy to the governor used before hotplug */
+ gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
+ if (gov)
+ pr_debug("Restoring governor %s for cpu %d\n",
+ policy->governor->name, policy->cpu);
+ else
+ gov = CPUFREQ_DEFAULT_GOVERNOR;
+
+ new_policy.governor = gov;
+
/* Use the default policy if its valid. */
if (cpufreq_driver->setpolicy)
- cpufreq_parse_governor(policy->governor->name,
- &new_policy.policy, NULL);
-
- /* assure that the starting sequence is run in cpufreq_set_policy */
- policy->governor = NULL;
+ cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
/* set default policy */
ret = cpufreq_set_policy(policy, &new_policy);
@@ -872,8 +972,11 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
up_write(&policy->rwsem);
if (has_target()) {
- if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
- (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (!ret)
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ if (ret) {
pr_err("%s: Failed to start governor\n", __func__);
return ret;
}
@@ -894,6 +997,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ policy->governor = NULL;
+
return policy;
}
@@ -913,6 +1018,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem);
+ spin_lock_init(&policy->transition_lock);
+ init_waitqueue_head(&policy->transition_wait);
return policy;
@@ -929,6 +1036,9 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
struct kobject *kobj;
struct completion *cmp;
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_REMOVE_POLICY, policy);
+
down_read(&policy->rwsem);
kobj = &policy->kobj;
cmp = &policy->kobj_unregister;
@@ -964,21 +1074,19 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
up_write(&policy->rwsem);
- cpufreq_frequency_table_update_policy_cpu(policy);
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_UPDATE_POLICY_CPU, policy);
}
-static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
- bool frozen)
+static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int j, cpu = dev->id;
int ret = -ENOMEM;
struct cpufreq_policy *policy;
unsigned long flags;
+ bool recover_policy = cpufreq_suspended;
#ifdef CONFIG_HOTPLUG_CPU
struct cpufreq_policy *tpolicy;
- struct cpufreq_governor *gov;
#endif
if (cpu_is_offline(cpu))
@@ -1017,9 +1125,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
* Restore the saved policy when doing light-weight init and fall back
* to the full init if that fails.
*/
- policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
+ policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
if (!policy) {
- frozen = false;
+ recover_policy = false;
policy = cpufreq_policy_alloc();
if (!policy)
goto nomem_out;
@@ -1031,12 +1139,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
* the creation of a brand new one. So we need to perform this update
* by invoking update_policy_cpu().
*/
- if (frozen && cpu != policy->cpu)
+ if (recover_policy && cpu != policy->cpu)
update_policy_cpu(policy, cpu);
else
policy->cpu = cpu;
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
cpumask_copy(policy->cpus, cpumask_of(cpu));
init_completion(&policy->kobj_unregister);
@@ -1051,14 +1158,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
goto err_set_policy_cpu;
}
- if (cpufreq_driver->get) {
- policy->cur = cpufreq_driver->get(policy->cpu);
- if (!policy->cur) {
- pr_err("%s: ->get() failed\n", __func__);
- goto err_get_freq;
- }
- }
-
/* related cpus should atleast have policy->cpus */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
@@ -1068,32 +1167,74 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
- if (!frozen) {
+ if (!recover_policy) {
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
}
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_START, policy);
-
-#ifdef CONFIG_HOTPLUG_CPU
- gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
- if (gov) {
- policy->governor = gov;
- pr_debug("Restoring governor %s for cpu %d\n",
- policy->governor->name, cpu);
- }
-#endif
-
+ down_write(&policy->rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (!frozen) {
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
+ policy->cur = cpufreq_driver->get(policy->cpu);
+ if (!policy->cur) {
+ pr_err("%s: ->get() failed\n", __func__);
+ goto err_get_freq;
+ }
+ }
+
+ /*
+ * Sometimes boot loaders set CPU frequency to a value outside of
+ * frequency table present with cpufreq core. In such cases CPU might be
+ * unstable if it has to run on that frequency for long duration of time
+ * and so its better to set it to a frequency which is specified in
+ * freq-table. This also makes cpufreq stats inconsistent as
+ * cpufreq-stats would fail to register because current frequency of CPU
+ * isn't found in freq-table.
+ *
+ * Because we don't want this change to effect boot process badly, we go
+ * for the next freq which is >= policy->cur ('cur' must be set by now,
+ * otherwise we will end up setting freq to lowest of the table as 'cur'
+ * is initialized to zero).
+ *
+ * We are passing target-freq as "policy->cur - 1" otherwise
+ * __cpufreq_driver_target() would simply fail, as policy->cur will be
+ * equal to target-freq.
+ */
+ if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
+ && has_target()) {
+ /* Are we running at unknown frequency ? */
+ ret = cpufreq_frequency_table_get_index(policy, policy->cur);
+ if (ret == -EINVAL) {
+ /* Warn user and fix it */
+ pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
+ __func__, policy->cpu, policy->cur);
+ ret = __cpufreq_driver_target(policy, policy->cur - 1,
+ CPUFREQ_RELATION_L);
+
+ /*
+ * Reaching here after boot in a few seconds may not
+ * mean that system will remain stable at "unknown"
+ * frequency for longer duration. Hence, a BUG_ON().
+ */
+ BUG_ON(ret);
+ pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
+ __func__, policy->cpu, policy->cur);
+ }
+ }
+
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_START, policy);
+
+ if (!recover_policy) {
ret = cpufreq_add_dev_interface(policy, dev);
if (ret)
goto err_out_unregister;
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_CREATE_POLICY, policy);
}
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1102,10 +1243,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
cpufreq_init_policy(policy);
- if (!frozen) {
+ if (!recover_policy) {
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
}
+ up_write(&policy->rwsem);
kobject_uevent(&policy->kobj, KOBJ_ADD);
up_read(&cpufreq_rwsem);
@@ -1115,16 +1257,16 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
return 0;
err_out_unregister:
+err_get_freq:
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-err_get_freq:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
- if (frozen) {
+ if (recover_policy) {
/* Do not leave stale fallback data behind. */
per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
cpufreq_policy_put_kobj(policy);
@@ -1148,7 +1290,7 @@ nomem_out:
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
- return __cpufreq_add_dev(dev, sif, false);
+ return __cpufreq_add_dev(dev, sif);
}
static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
@@ -1163,7 +1305,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
if (ret) {
- pr_err("%s: Failed to move kobj: %d", __func__, ret);
+ pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
down_write(&policy->rwsem);
cpumask_set_cpu(old_cpu, policy->cpus);
@@ -1179,8 +1321,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
}
static int __cpufreq_remove_dev_prepare(struct device *dev,
- struct subsys_interface *sif,
- bool frozen)
+ struct subsys_interface *sif)
{
unsigned int cpu = dev->id, cpus;
int new_cpu, ret;
@@ -1194,7 +1335,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
policy = per_cpu(cpufreq_cpu_data, cpu);
/* Save the policy somewhere when doing a light-weight tear-down */
- if (frozen)
+ if (cpufreq_suspended)
per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -1212,37 +1353,34 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
}
}
-#ifdef CONFIG_HOTPLUG_CPU
if (!cpufreq_driver->setpolicy)
strncpy(per_cpu(cpufreq_cpu_governor, cpu),
policy->governor->name, CPUFREQ_NAME_LEN);
-#endif
down_read(&policy->rwsem);
cpus = cpumask_weight(policy->cpus);
up_read(&policy->rwsem);
if (cpu != policy->cpu) {
- if (!frozen)
- sysfs_remove_link(&dev->kobj, "cpufreq");
+ sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu);
- if (!frozen) {
+ if (!cpufreq_suspended)
pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
- __func__, new_cpu, cpu);
- }
+ __func__, new_cpu, cpu);
}
+ } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
+ cpufreq_driver->stop_cpu(policy);
}
return 0;
}
static int __cpufreq_remove_dev_finish(struct device *dev,
- struct subsys_interface *sif,
- bool frozen)
+ struct subsys_interface *sif)
{
unsigned int cpu = dev->id, cpus;
int ret;
@@ -1272,12 +1410,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
CPUFREQ_GOV_POLICY_EXIT);
if (ret) {
pr_err("%s: Failed to exit governor\n",
- __func__);
+ __func__);
return ret;
}
}
- if (!frozen)
+ if (!cpufreq_suspended)
cpufreq_policy_put_kobj(policy);
/*
@@ -1293,16 +1431,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
list_del(&policy->policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (!frozen)
+ if (!cpufreq_suspended)
cpufreq_policy_free(policy);
- } else {
- if (has_target()) {
- if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
- (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
- pr_err("%s: Failed to start governor\n",
- __func__);
- return ret;
- }
+ } else if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (!ret)
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ if (ret) {
+ pr_err("%s: Failed to start governor\n", __func__);
+ return ret;
}
}
@@ -1323,10 +1461,10 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (cpu_is_offline(cpu))
return 0;
- ret = __cpufreq_remove_dev_prepare(dev, sif, false);
+ ret = __cpufreq_remove_dev_prepare(dev, sif);
if (!ret)
- ret = __cpufreq_remove_dev_finish(dev, sif, false);
+ ret = __cpufreq_remove_dev_finish(dev, sif);
return ret;
}
@@ -1357,8 +1495,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
struct cpufreq_freqs freqs;
unsigned long flags;
- pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
- "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
+ pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
+ old_freq, new_freq);
freqs.old = old_freq;
freqs.new = new_freq;
@@ -1367,8 +1505,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
policy = per_cpu(cpufreq_cpu_data, cpu);
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
}
/**
@@ -1447,23 +1585,16 @@ static unsigned int __cpufreq_get(unsigned int cpu)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
- if (cpufreq_disabled() || !cpufreq_driver)
- return -ENOENT;
-
- BUG_ON(!policy);
-
- if (!down_read_trylock(&cpufreq_rwsem))
- return 0;
-
- down_read(&policy->rwsem);
-
- ret_freq = __cpufreq_get(cpu);
+ if (policy) {
+ down_read(&policy->rwsem);
+ ret_freq = __cpufreq_get(cpu);
+ up_read(&policy->rwsem);
- up_read(&policy->rwsem);
- up_read(&cpufreq_rwsem);
+ cpufreq_cpu_put(policy);
+ }
return ret_freq;
}
@@ -1476,83 +1607,103 @@ static struct subsys_interface cpufreq_interface = {
.remove_dev = cpufreq_remove_dev,
};
+/*
+ * In case platform wants some specific frequency to be configured
+ * during suspend..
+ */
+int cpufreq_generic_suspend(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ if (!policy->suspend_freq) {
+ pr_err("%s: suspend_freq can't be zero\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: Setting suspend-freq: %u\n", __func__,
+ policy->suspend_freq);
+
+ ret = __cpufreq_driver_target(policy, policy->suspend_freq,
+ CPUFREQ_RELATION_H);
+ if (ret)
+ pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
+ __func__, policy->suspend_freq, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_generic_suspend);
+
/**
- * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
+ * cpufreq_suspend() - Suspend CPUFreq governors
*
- * This function is only executed for the boot processor. The other CPUs
- * have been put offline by means of CPU hotplug.
+ * Called during system wide Suspend/Hibernate cycles for suspending governors
+ * as some platforms can't change frequency after this point in suspend cycle.
+ * Because some of the devices (like: i2c, regulators, etc) they use for
+ * changing frequency are suspended quickly after this point.
*/
-static int cpufreq_bp_suspend(void)
+void cpufreq_suspend(void)
{
- int ret = 0;
-
- int cpu = smp_processor_id();
struct cpufreq_policy *policy;
- pr_debug("suspending cpu %u\n", cpu);
+ if (!cpufreq_driver)
+ return;
- /* If there's no policy for the boot CPU, we have nothing to do. */
- policy = cpufreq_cpu_get(cpu);
- if (!policy)
- return 0;
+ if (!has_target())
+ return;
- if (cpufreq_driver->suspend) {
- ret = cpufreq_driver->suspend(policy);
- if (ret)
- printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
- "step on CPU %u\n", policy->cpu);
+ pr_debug("%s: Suspending Governors\n", __func__);
+
+ list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
+ pr_err("%s: Failed to stop governor for policy: %p\n",
+ __func__, policy);
+ else if (cpufreq_driver->suspend
+ && cpufreq_driver->suspend(policy))
+ pr_err("%s: Failed to suspend driver: %p\n", __func__,
+ policy);
}
- cpufreq_cpu_put(policy);
- return ret;
+ cpufreq_suspended = true;
}
/**
- * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
+ * cpufreq_resume() - Resume CPUFreq governors
*
- * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
- * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
- * restored. It will verify that the current freq is in sync with
- * what we believe it to be. This is a bit later than when it
- * should be, but nonethteless it's better than calling
- * cpufreq_driver->get() here which might re-enable interrupts...
- *
- * This function is only executed for the boot CPU. The other CPUs have not
- * been turned on yet.
+ * Called during system wide Suspend/Hibernate cycle for resuming governors that
+ * are suspended with cpufreq_suspend().
*/
-static void cpufreq_bp_resume(void)
+void cpufreq_resume(void)
{
- int ret = 0;
-
- int cpu = smp_processor_id();
struct cpufreq_policy *policy;
- pr_debug("resuming cpu %u\n", cpu);
+ if (!cpufreq_driver)
+ return;
- /* If there's no policy for the boot CPU, we have nothing to do. */
- policy = cpufreq_cpu_get(cpu);
- if (!policy)
+ if (!has_target())
return;
- if (cpufreq_driver->resume) {
- ret = cpufreq_driver->resume(policy);
- if (ret) {
- printk(KERN_ERR "cpufreq: resume failed in ->resume "
- "step on CPU %u\n", policy->cpu);
- goto fail;
- }
- }
+ pr_debug("%s: Resuming Governors\n", __func__);
- schedule_work(&policy->update);
+ cpufreq_suspended = false;
-fail:
- cpufreq_cpu_put(policy);
-}
+ list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
+ pr_err("%s: Failed to resume driver: %p\n", __func__,
+ policy);
+ else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
+ || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
+ pr_err("%s: Failed to start governor for policy: %p\n",
+ __func__, policy);
-static struct syscore_ops cpufreq_syscore_ops = {
- .suspend = cpufreq_bp_suspend,
- .resume = cpufreq_bp_resume,
-};
+ /*
+ * schedule call cpufreq_update_policy() for boot CPU, i.e. last
+ * policy in list. It will verify that the current freq is in
+ * sync with what we believe it to be.
+ */
+ if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
+ schedule_work(&policy->update);
+ }
+}
/**
* cpufreq_get_current_driver - return current driver's name
@@ -1668,7 +1819,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
target_freq = policy->min;
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
- policy->cpu, target_freq, relation, old_target_freq);
+ policy->cpu, target_freq, relation, old_target_freq);
/*
* This might look like a redundant call as we are checking it again
@@ -1713,29 +1864,18 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
freqs.flags = 0;
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
- __func__, policy->cpu, freqs.old,
- freqs.new);
+ __func__, policy->cpu, freqs.old, freqs.new);
- cpufreq_notify_transition(policy, &freqs,
- CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
}
retval = cpufreq_driver->target_index(policy, index);
if (retval)
pr_err("%s: Failed to change cpu frequency: %d\n",
- __func__, retval);
+ __func__, retval);
- if (notify) {
- /*
- * Notify with old freq in case we failed to change
- * frequency
- */
- if (retval)
- freqs.new = freqs.old;
-
- cpufreq_notify_transition(policy, &freqs,
- CPUFREQ_POSTCHANGE);
- }
+ if (notify)
+ cpufreq_freq_transition_end(policy, &freqs, retval);
}
out:
@@ -1778,17 +1918,18 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
struct cpufreq_governor *gov = NULL;
#endif
+ /* Don't start any governor operations if we are entering suspend */
+ if (cpufreq_suspended)
+ return 0;
+
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
policy->governor->max_transition_latency) {
if (!gov)
return -EINVAL;
else {
- printk(KERN_WARNING "%s governor failed, too long"
- " transition latency of HW, fallback"
- " to %s governor\n",
- policy->governor->name,
- gov->name);
+ pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
+ policy->governor->name, gov->name);
policy->governor = gov;
}
}
@@ -1798,7 +1939,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
return -EINVAL;
pr_debug("__cpufreq_governor for CPU %u, event %u\n",
- policy->cpu, event);
+ policy->cpu, event);
mutex_lock(&cpufreq_governor_lock);
if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
@@ -1865,9 +2006,7 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
{
-#ifdef CONFIG_HOTPLUG_CPU
int cpu;
-#endif
if (!governor)
return;
@@ -1875,14 +2014,12 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
if (cpufreq_disabled())
return;
-#ifdef CONFIG_HOTPLUG_CPU
for_each_present_cpu(cpu) {
if (cpu_online(cpu))
continue;
if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
}
-#endif
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
@@ -1927,22 +2064,21 @@ EXPORT_SYMBOL(cpufreq_get_policy);
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy)
{
- int ret = 0, failed = 1;
+ struct cpufreq_governor *old_gov;
+ int ret;
- pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
- new_policy->min, new_policy->max);
+ pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
+ new_policy->cpu, new_policy->min, new_policy->max);
memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
- if (new_policy->min > policy->max || new_policy->max < policy->min) {
- ret = -EINVAL;
- goto error_out;
- }
+ if (new_policy->min > policy->max || new_policy->max < policy->min)
+ return -EINVAL;
/* verify the cpu speed can be set within this limit */
ret = cpufreq_driver->verify(new_policy);
if (ret)
- goto error_out;
+ return ret;
/* adjust if necessary - all reasons */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -1958,7 +2094,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
ret = cpufreq_driver->verify(new_policy);
if (ret)
- goto error_out;
+ return ret;
/* notification of the new policy */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -1968,63 +2104,53 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->max = new_policy->max;
pr_debug("new min and max freqs are %u - %u kHz\n",
- policy->min, policy->max);
+ policy->min, policy->max);
if (cpufreq_driver->setpolicy) {
policy->policy = new_policy->policy;
pr_debug("setting range\n");
- ret = cpufreq_driver->setpolicy(new_policy);
- } else {
- if (new_policy->governor != policy->governor) {
- /* save old, working values */
- struct cpufreq_governor *old_gov = policy->governor;
-
- pr_debug("governor switch\n");
-
- /* end old governor */
- if (policy->governor) {
- __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- up_write(&policy->rwsem);
- __cpufreq_governor(policy,
- CPUFREQ_GOV_POLICY_EXIT);
- down_write(&policy->rwsem);
- }
+ return cpufreq_driver->setpolicy(new_policy);
+ }
- /* start new governor */
- policy->governor = new_policy->governor;
- if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
- if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
- failed = 0;
- } else {
- up_write(&policy->rwsem);
- __cpufreq_governor(policy,
- CPUFREQ_GOV_POLICY_EXIT);
- down_write(&policy->rwsem);
- }
- }
+ if (new_policy->governor == policy->governor)
+ goto out;
- if (failed) {
- /* new governor failed, so re-start old one */
- pr_debug("starting governor %s failed\n",
- policy->governor->name);
- if (old_gov) {
- policy->governor = old_gov;
- __cpufreq_governor(policy,
- CPUFREQ_GOV_POLICY_INIT);
- __cpufreq_governor(policy,
- CPUFREQ_GOV_START);
- }
- ret = -EINVAL;
- goto error_out;
- }
- /* might be a policy change, too, so fall through */
- }
- pr_debug("governor: change or update limits\n");
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ pr_debug("governor switch\n");
+
+ /* save old, working values */
+ old_gov = policy->governor;
+ /* end old governor */
+ if (old_gov) {
+ __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ up_write(&policy->rwsem);
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ down_write(&policy->rwsem);
}
-error_out:
- return ret;
+ /* start new governor */
+ policy->governor = new_policy->governor;
+ if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
+ if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
+ goto out;
+
+ up_write(&policy->rwsem);
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ down_write(&policy->rwsem);
+ }
+
+ /* new governor failed, so re-start old one */
+ pr_debug("starting governor %s failed\n", policy->governor->name);
+ if (old_gov) {
+ policy->governor = old_gov;
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+ __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ }
+
+ return -EINVAL;
+
+ out:
+ pr_debug("governor: change or update limits\n");
+ return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
}
/**
@@ -2058,10 +2184,15 @@ int cpufreq_update_policy(unsigned int cpu)
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
*/
- if (cpufreq_driver->get) {
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
new_policy.cur = cpufreq_driver->get(cpu);
+ if (WARN_ON(!new_policy.cur)) {
+ ret = -EIO;
+ goto no_policy;
+ }
+
if (!policy->cur) {
- pr_debug("Driver did not initialize current freq");
+ pr_debug("Driver did not initialize current freq\n");
policy->cur = new_policy.cur;
} else {
if (policy->cur != new_policy.cur && has_target())
@@ -2085,30 +2216,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
struct device *dev;
- bool frozen = false;
dev = get_cpu_device(cpu);
if (dev) {
-
- if (action & CPU_TASKS_FROZEN)
- frozen = true;
-
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
- __cpufreq_add_dev(dev, NULL, frozen);
- cpufreq_update_policy(cpu);
+ __cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE:
- __cpufreq_remove_dev_prepare(dev, NULL, frozen);
+ __cpufreq_remove_dev_prepare(dev, NULL);
break;
case CPU_POST_DEAD:
- __cpufreq_remove_dev_finish(dev, NULL, frozen);
+ __cpufreq_remove_dev_finish(dev, NULL);
break;
case CPU_DOWN_FAILED:
- __cpufreq_add_dev(dev, NULL, frozen);
+ __cpufreq_add_dev(dev, NULL);
break;
}
}
@@ -2120,6 +2245,73 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = {
};
/*********************************************************************
+ * BOOST *
+ *********************************************************************/
+static int cpufreq_boost_set_sw(int state)
+{
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_policy *policy;
+ int ret = -EINVAL;
+
+ list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (freq_table) {
+ ret = cpufreq_frequency_table_cpuinfo(policy,
+ freq_table);
+ if (ret) {
+ pr_err("%s: Policy frequency update failed\n",
+ __func__);
+ break;
+ }
+ policy->user_policy.max = policy->max;
+ __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ }
+ }
+
+ return ret;
+}
+
+int cpufreq_boost_trigger_state(int state)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (cpufreq_driver->boost_enabled == state)
+ return 0;
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ ret = cpufreq_driver->set_boost(state);
+ if (ret) {
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = !state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ pr_err("%s: Cannot %s BOOST\n",
+ __func__, state ? "enable" : "disable");
+ }
+
+ return ret;
+}
+
+int cpufreq_boost_supported(void)
+{
+ if (likely(cpufreq_driver))
+ return cpufreq_driver->boost_supported;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
+
+int cpufreq_boost_enabled(void)
+{
+ return cpufreq_driver->boost_enabled;
+}
+EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
+
+/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
*********************************************************************/
@@ -2143,7 +2335,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (!driver_data || !driver_data->verify || !driver_data->init ||
!(driver_data->setpolicy || driver_data->target_index ||
- driver_data->target))
+ driver_data->target) ||
+ (driver_data->setpolicy && (driver_data->target_index ||
+ driver_data->target)))
return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name);
@@ -2159,9 +2353,25 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ if (cpufreq_boost_supported()) {
+ /*
+ * Check if driver provides function to enable boost -
+ * if not, use cpufreq_boost_set_sw as default
+ */
+ if (!cpufreq_driver->set_boost)
+ cpufreq_driver->set_boost = cpufreq_boost_set_sw;
+
+ ret = cpufreq_sysfs_create_file(&boost.attr);
+ if (ret) {
+ pr_err("%s: cannot register global BOOST sysfs file\n",
+ __func__);
+ goto err_null_driver;
+ }
+ }
+
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
- goto err_null_driver;
+ goto err_boost_unreg;
if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i;
@@ -2177,7 +2387,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
/* if all ->init() calls failed, unregister */
if (ret) {
pr_debug("no CPU initialized for driver %s\n",
- driver_data->name);
+ driver_data->name);
goto err_if_unreg;
}
}
@@ -2188,6 +2398,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return 0;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
+err_boost_unreg:
+ if (cpufreq_boost_supported())
+ cpufreq_sysfs_remove_file(&boost.attr);
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
@@ -2214,6 +2427,9 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
pr_debug("unregistering driver %s\n", driver->name);
subsys_interface_unregister(&cpufreq_interface);
+ if (cpufreq_boost_supported())
+ cpufreq_sysfs_remove_file(&boost.attr);
+
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
down_write(&cpufreq_rwsem);
@@ -2235,7 +2451,6 @@ static int __init cpufreq_core_init(void)
cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject);
- register_syscore_ops(&cpufreq_syscore_ops);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index e6be63561fa6..ba43991ba98a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -119,8 +119,9 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
{
int i;
+ mutex_lock(&cpufreq_governor_lock);
if (!policy->governor_enabled)
- return;
+ goto out_unlock;
if (!all_cpus) {
/*
@@ -135,6 +136,9 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
}
+
+out_unlock:
+ mutex_unlock(&cpufreq_governor_lock);
}
EXPORT_SYMBOL_GPL(gov_queue_work);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index b5f2b8618949..bfb9ae14142c 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -257,6 +257,8 @@ static ssize_t show_sampling_rate_min_gov_pol \
return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
}
+extern struct mutex cpufreq_governor_lock;
+
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 4cf0d2805cb2..ecaaebf969fc 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -13,7 +13,7 @@
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
static spinlock_t cpufreq_stats_lock;
@@ -151,64 +151,54 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
return -1;
}
-/* should be called late in the CPU removal sequence so that the stats
- * memory is still available in case someone tries to use it.
- */
-static void cpufreq_stats_free_table(unsigned int cpu)
+static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
{
- struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
- if (stat) {
- pr_debug("%s: Free stat table\n", __func__);
- kfree(stat->time_in_state);
- kfree(stat);
- per_cpu(cpufreq_stats_table, cpu) = NULL;
- }
+ if (!stat)
+ return;
+
+ pr_debug("%s: Free stat table\n", __func__);
+
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ kfree(stat->time_in_state);
+ kfree(stat);
+ per_cpu(cpufreq_stats_table, policy->cpu) = NULL;
}
-/* must be called early in the CPU removal sequence (before
- * cpufreq_remove_dev) so that policy is still valid.
- */
-static void cpufreq_stats_free_sysfs(unsigned int cpu)
+static void cpufreq_stats_free_table(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy;
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
- if (!cpufreq_frequency_get_table(cpu))
- goto put_ref;
-
- if (!policy_is_shared(policy)) {
- pr_debug("%s: Free sysfs stat\n", __func__);
- sysfs_remove_group(&policy->kobj, &stats_attr_group);
- }
+ if (cpufreq_frequency_get_table(policy->cpu))
+ __cpufreq_stats_free_table(policy);
-put_ref:
cpufreq_cpu_put(policy);
}
-static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
+static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
{
unsigned int i, j, count = 0, ret = 0;
struct cpufreq_stats *stat;
- struct cpufreq_policy *current_policy;
unsigned int alloc_size;
unsigned int cpu = policy->cpu;
+ struct cpufreq_frequency_table *table;
+
+ table = cpufreq_frequency_get_table(cpu);
+ if (unlikely(!table))
+ return 0;
+
if (per_cpu(cpufreq_stats_table, cpu))
return -EBUSY;
stat = kzalloc(sizeof(*stat), GFP_KERNEL);
if ((stat) == NULL)
return -ENOMEM;
- current_policy = cpufreq_cpu_get(cpu);
- if (current_policy == NULL) {
- ret = -EINVAL;
- goto error_get_fail;
- }
-
- ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
+ ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
if (ret)
goto error_out;
@@ -231,7 +221,7 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
if (!stat->time_in_state) {
ret = -ENOMEM;
- goto error_out;
+ goto error_alloc;
}
stat->freq_table = (unsigned int *)(stat->time_in_state + count);
@@ -251,16 +241,32 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur);
spin_unlock(&cpufreq_stats_lock);
- cpufreq_cpu_put(current_policy);
return 0;
+error_alloc:
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
error_out:
- cpufreq_cpu_put(current_policy);
-error_get_fail:
kfree(stat);
per_cpu(cpufreq_stats_table, cpu) = NULL;
return ret;
}
+static void cpufreq_stats_create_table(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+
+ /*
+ * "likely(!policy)" because normally cpufreq_stats will be registered
+ * before cpufreq driver
+ */
+ policy = cpufreq_cpu_get(cpu);
+ if (likely(!policy))
+ return;
+
+ __cpufreq_stats_create_table(policy);
+
+ cpufreq_cpu_put(policy);
+}
+
static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
@@ -277,25 +283,20 @@ static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
- int ret;
+ int ret = 0;
struct cpufreq_policy *policy = data;
- struct cpufreq_frequency_table *table;
- unsigned int cpu = policy->cpu;
if (val == CPUFREQ_UPDATE_POLICY_CPU) {
cpufreq_stats_update_policy_cpu(policy);
return 0;
}
- if (val != CPUFREQ_NOTIFY)
- return 0;
- table = cpufreq_frequency_get_table(cpu);
- if (!table)
- return 0;
- ret = cpufreq_stats_create_table(policy, table);
- if (ret)
- return ret;
- return 0;
+ if (val == CPUFREQ_CREATE_POLICY)
+ ret = __cpufreq_stats_create_table(policy);
+ else if (val == CPUFREQ_REMOVE_POLICY)
+ __cpufreq_stats_free_table(policy);
+
+ return ret;
}
static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
@@ -334,29 +335,6 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
return 0;
}
-static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_DOWN_PREPARE:
- cpufreq_stats_free_sysfs(cpu);
- break;
- case CPU_DEAD:
- cpufreq_stats_free_table(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-/* priority=1 so this will get called before cpufreq_remove_dev */
-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
- .notifier_call = cpufreq_stat_cpu_callback,
- .priority = 1,
-};
-
static struct notifier_block notifier_policy_block = {
.notifier_call = cpufreq_stat_notifier_policy
};
@@ -376,14 +354,14 @@ static int __init cpufreq_stats_init(void)
if (ret)
return ret;
- register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
+ for_each_online_cpu(cpu)
+ cpufreq_stats_create_table(cpu);
ret = cpufreq_register_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
cpufreq_unregister_notifier(&notifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
- unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu)
cpufreq_stats_free_table(cpu);
return ret;
@@ -399,11 +377,8 @@ static void __exit cpufreq_stats_exit(void)
CPUFREQ_POLICY_NOTIFIER);
cpufreq_unregister_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
- unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
- for_each_online_cpu(cpu) {
+ for_each_online_cpu(cpu)
cpufreq_stats_free_table(cpu);
- cpufreq_stats_free_sysfs(cpu);
- }
}
MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
index 86559040c54c..d4573032cbbc 100644
--- a/drivers/cpufreq/cris-artpec3-cpufreq.c
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -57,7 +57,6 @@ static struct cpufreq_driver cris_freq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cris_freq_target,
.init = cris_freq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "cris_freq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
index 26d940d40b1d..13c3361437f7 100644
--- a/drivers/cpufreq/cris-etraxfs-cpufreq.c
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -57,7 +57,6 @@ static struct cpufreq_driver cris_freq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cris_freq_target,
.init = cris_freq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "cris_freq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 5e8a854381b7..28a16dc6e02e 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -58,14 +58,6 @@ static int davinci_verify_speed(struct cpufreq_policy *policy)
return 0;
}
-static unsigned int davinci_getspeed(unsigned int cpu)
-{
- if (cpu)
- return 0;
-
- return clk_get_rate(cpufreq.armclk) / 1000;
-}
-
static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
@@ -73,7 +65,7 @@ static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
unsigned int old_freq, new_freq;
int ret = 0;
- old_freq = davinci_getspeed(0);
+ old_freq = policy->cur;
new_freq = pdata->freq_table[idx].frequency;
/* if moving to higher frequency, up the voltage beforehand */
@@ -116,6 +108,8 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
return result;
}
+ policy->clk = cpufreq.armclk;
+
/*
* Time measurement across the target() function yields ~1500-1800us
* time taken with no drivers on notification list.
@@ -126,12 +120,11 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver davinci_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = davinci_verify_speed,
.target_index = davinci_target,
- .get = davinci_getspeed,
+ .get = cpufreq_generic_get,
.init = davinci_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "davinci",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 0e67ab96321a..412a78bb0c94 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -26,32 +26,18 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
return clk_set_rate(armss_clk, freq_table[index].frequency * 1000);
}
-static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
-{
- int i = 0;
- unsigned long freq = clk_get_rate(armss_clk) / 1000;
-
- /* The value is rounded to closest frequency in the defined table. */
- while (freq_table[i + 1].frequency != CPUFREQ_TABLE_END) {
- if (freq < freq_table[i].frequency +
- (freq_table[i + 1].frequency - freq_table[i].frequency) / 2)
- return freq_table[i].frequency;
- i++;
- }
-
- return freq_table[i].frequency;
-}
-
static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
{
+ policy->clk = armss_clk;
return cpufreq_generic_init(policy, freq_table, 20 * 1000);
}
static struct cpufreq_driver dbx500_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
+ .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = dbx500_cpufreq_target,
- .get = dbx500_cpufreq_getspeed,
+ .get = cpufreq_generic_get,
.init = dbx500_cpufreq_init,
.name = "DBX500",
.attr = cpufreq_generic_attr,
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 9012b8bb6b64..a0d2a423cea9 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -382,7 +382,6 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
unsigned int cpu = policy->cpu;
/* Bye */
- cpufreq_frequency_table_put_attr(policy->cpu);
kfree(eps_cpu[cpu]);
eps_cpu[cpu] = NULL;
return 0;
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index de08acff5101..c987e94708f5 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -198,7 +198,6 @@ static struct cpufreq_driver elanfreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = elanfreq_target,
.init = elanfreq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "elanfreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index f3c22874da75..f99cfe24e7bc 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -16,24 +16,15 @@
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/cpufreq.h>
-#include <linux/suspend.h>
+#include <linux/platform_device.h>
#include <plat/cpu.h>
#include "exynos-cpufreq.h"
static struct exynos_dvfs_info *exynos_info;
-
static struct regulator *arm_regulator;
-
static unsigned int locking_frequency;
-static bool frequency_locked;
-static DEFINE_MUTEX(cpufreq_lock);
-
-static unsigned int exynos_getspeed(unsigned int cpu)
-{
- return clk_get_rate(exynos_info->cpu_clk) / 1000;
-}
static int exynos_cpufreq_get_index(unsigned int freq)
{
@@ -138,101 +129,33 @@ out:
static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
- int ret = 0;
-
- mutex_lock(&cpufreq_lock);
-
- if (frequency_locked)
- goto out;
-
- ret = exynos_cpufreq_scale(freq_table[index].frequency);
-
-out:
- mutex_unlock(&cpufreq_lock);
-
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int exynos_cpufreq_suspend(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
-static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
-{
- return 0;
+ return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency);
}
-#endif
-
-/**
- * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
- * context
- * @notifier
- * @pm_event
- * @v
- *
- * While frequency_locked == true, target() ignores every frequency but
- * locking_frequency. The locking_frequency value is the initial frequency,
- * which is set by the bootloader. In order to eliminate possible
- * inconsistency in clock values, we save and restore frequencies during
- * suspend and resume and block CPUFREQ activities. Note that the standard
- * suspend/resume cannot be used as they are too deep (syscore_ops) for
- * regulator actions.
- */
-static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
- unsigned long pm_event, void *v)
-{
- int ret;
-
- switch (pm_event) {
- case PM_SUSPEND_PREPARE:
- mutex_lock(&cpufreq_lock);
- frequency_locked = true;
- mutex_unlock(&cpufreq_lock);
-
- ret = exynos_cpufreq_scale(locking_frequency);
- if (ret < 0)
- return NOTIFY_BAD;
-
- break;
-
- case PM_POST_SUSPEND:
- mutex_lock(&cpufreq_lock);
- frequency_locked = false;
- mutex_unlock(&cpufreq_lock);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block exynos_cpufreq_nb = {
- .notifier_call = exynos_cpufreq_pm_notifier,
-};
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
+ policy->clk = exynos_info->cpu_clk;
+ policy->suspend_freq = locking_frequency;
return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
}
static struct cpufreq_driver exynos_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = exynos_target,
- .get = exynos_getspeed,
+ .get = cpufreq_generic_get,
.init = exynos_cpufreq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "exynos_cpufreq",
.attr = cpufreq_generic_attr,
+#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW
+ .boost_supported = true,
+#endif
#ifdef CONFIG_PM
- .suspend = exynos_cpufreq_suspend,
- .resume = exynos_cpufreq_resume,
+ .suspend = cpufreq_generic_suspend,
#endif
};
-static int __init exynos_cpufreq_init(void)
+static int exynos_cpufreq_probe(struct platform_device *pdev)
{
int ret = -EINVAL;
@@ -263,22 +186,24 @@ static int __init exynos_cpufreq_init(void)
goto err_vdd_arm;
}
- locking_frequency = exynos_getspeed(0);
-
- register_pm_notifier(&exynos_cpufreq_nb);
+ /* Done here as we want to capture boot frequency */
+ locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
- if (cpufreq_register_driver(&exynos_driver)) {
- pr_err("%s: failed to register cpufreq driver\n", __func__);
- goto err_cpufreq;
- }
-
- return 0;
-err_cpufreq:
- unregister_pm_notifier(&exynos_cpufreq_nb);
+ if (!cpufreq_register_driver(&exynos_driver))
+ return 0;
+ pr_err("%s: failed to register cpufreq driver\n", __func__);
regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
return -EINVAL;
}
-late_initcall(exynos_cpufreq_init);
+
+static struct platform_driver exynos_cpufreq_platdrv = {
+ .driver = {
+ .name = "exynos-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = exynos_cpufreq_probe,
+};
+module_platform_driver(exynos_cpufreq_platdrv);
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
index 7f25cee8cec2..3ddade8a5125 100644
--- a/drivers/cpufreq/exynos-cpufreq.h
+++ b/drivers/cpufreq/exynos-cpufreq.h
@@ -67,3 +67,25 @@ static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
return -EOPNOTSUPP;
}
#endif
+
+#include <plat/cpu.h>
+#include <mach/map.h>
+
+#define EXYNOS4_CLKSRC_CPU (S5P_VA_CMU + 0x14200)
+#define EXYNOS4_CLKMUX_STATCPU (S5P_VA_CMU + 0x14400)
+
+#define EXYNOS4_CLKDIV_CPU (S5P_VA_CMU + 0x14500)
+#define EXYNOS4_CLKDIV_CPU1 (S5P_VA_CMU + 0x14504)
+#define EXYNOS4_CLKDIV_STATCPU (S5P_VA_CMU + 0x14600)
+#define EXYNOS4_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x14604)
+
+#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16)
+#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)
+
+#define EXYNOS5_APLL_LOCK (S5P_VA_CMU + 0x00000)
+#define EXYNOS5_APLL_CON0 (S5P_VA_CMU + 0x00100)
+#define EXYNOS5_CLKMUX_STATCPU (S5P_VA_CMU + 0x00400)
+#define EXYNOS5_CLKDIV_CPU0 (S5P_VA_CMU + 0x00500)
+#define EXYNOS5_CLKDIV_CPU1 (S5P_VA_CMU + 0x00504)
+#define EXYNOS5_CLKDIV_STATCPU0 (S5P_VA_CMU + 0x00600)
+#define EXYNOS5_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x00604)
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index dfd1643b0b2f..40d84c43d8f4 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -17,8 +17,6 @@
#include <linux/slab.h>
#include <linux/cpufreq.h>
-#include <mach/regs-clock.h>
-
#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index efad5e657f6f..7c11ace3b3fc 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -17,8 +17,6 @@
#include <linux/slab.h>
#include <linux/cpufreq.h>
-#include <mach/regs-clock.h>
-
#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
@@ -32,7 +30,7 @@ static unsigned int exynos4x12_volt_table[] = {
};
static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
- {L0, CPUFREQ_ENTRY_INVALID},
+ {CPUFREQ_BOOST_FREQ, 1500 * 1000},
{L1, 1400 * 1000},
{L2, 1300 * 1000},
{L3, 1200 * 1000},
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index 8feda86fe42c..5f90b82a4082 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -18,7 +18,6 @@
#include <linux/cpufreq.h>
#include <mach/map.h>
-#include <mach/regs-clock.h>
#include "exynos-cpufreq.h"
@@ -102,12 +101,12 @@ static void set_clkdiv(unsigned int div_index)
cpu_relax();
}
-static void set_apll(unsigned int new_index,
- unsigned int old_index)
+static void set_apll(unsigned int index)
{
- unsigned int tmp, pdiv;
+ unsigned int tmp;
+ unsigned int freq = apll_freq_5250[index].freq;
- /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
clk_set_parent(moutcore, mout_mpll);
do {
@@ -116,24 +115,9 @@ static void set_apll(unsigned int new_index,
tmp &= 0x7;
} while (tmp != 0x2);
- /* 2. Set APLL Lock time */
- pdiv = ((apll_freq_5250[new_index].mps >> 8) & 0x3f);
-
- __raw_writel((pdiv * 250), EXYNOS5_APLL_LOCK);
+ clk_set_rate(mout_apll, freq * 1000);
- /* 3. Change PLL PMS values */
- tmp = __raw_readl(EXYNOS5_APLL_CON0);
- tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= apll_freq_5250[new_index].mps;
- __raw_writel(tmp, EXYNOS5_APLL_CON0);
-
- /* 4. wait_lock_time */
- do {
- cpu_relax();
- tmp = __raw_readl(EXYNOS5_APLL_CON0);
- } while (!(tmp & (0x1 << 29)));
-
- /* 5. MUX_CORE_SEL = APLL */
+ /* MUX_CORE_SEL = APLL */
clk_set_parent(moutcore, mout_apll);
do {
@@ -141,55 +125,17 @@ static void set_apll(unsigned int new_index,
tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU);
tmp &= (0x7 << 16);
} while (tmp != (0x1 << 16));
-
-}
-
-static bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index)
-{
- unsigned int old_pm = apll_freq_5250[old_index].mps >> 8;
- unsigned int new_pm = apll_freq_5250[new_index].mps >> 8;
-
- return (old_pm == new_pm) ? 0 : 1;
}
static void exynos5250_set_frequency(unsigned int old_index,
unsigned int new_index)
{
- unsigned int tmp;
-
if (old_index > new_index) {
- if (!exynos5250_pms_change(old_index, new_index)) {
- /* 1. Change the system clock divider values */
- set_clkdiv(new_index);
- /* 2. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS5_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_5250[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS5_APLL_CON0);
-
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the system clock divider values */
- set_clkdiv(new_index);
- /* 2. Change the apll m,p,s value */
- set_apll(new_index, old_index);
- }
+ set_clkdiv(new_index);
+ set_apll(new_index);
} else if (old_index < new_index) {
- if (!exynos5250_pms_change(old_index, new_index)) {
- /* 1. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS5_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_5250[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS5_APLL_CON0);
- /* 2. Change the system clock divider values */
- set_clkdiv(new_index);
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the apll m,p,s value */
- set_apll(new_index, old_index);
- /* 2. Change the system clock divider values */
- set_clkdiv(new_index);
- }
+ set_apll(new_index);
+ set_clkdiv(new_index);
}
}
@@ -222,7 +168,6 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
info->volt_table = exynos5250_volt_table;
info->freq_table = exynos5250_freq_table;
info->set_freq = exynos5250_set_frequency;
- info->need_apll_change = exynos5250_pms_change;
return 0;
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index 76bef8b078cb..a6b8214d7b77 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -100,7 +100,6 @@ struct exynos_dvfs_data {
struct resource *mem;
int irq;
struct clk *cpu_clk;
- unsigned int cur_frequency;
unsigned int latency;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_count;
@@ -165,7 +164,7 @@ static int init_div_table(void)
return 0;
}
-static void exynos_enable_dvfs(void)
+static void exynos_enable_dvfs(unsigned int cur_frequency)
{
unsigned int tmp, i, cpu;
struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
@@ -184,18 +183,18 @@ static void exynos_enable_dvfs(void)
/* Set initial performance index */
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
- if (freq_table[i].frequency == dvfs_info->cur_frequency)
+ if (freq_table[i].frequency == cur_frequency)
break;
if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
/* Assign the highest frequency */
i = 0;
- dvfs_info->cur_frequency = freq_table[i].frequency;
+ cur_frequency = freq_table[i].frequency;
}
dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
- dvfs_info->cur_frequency);
+ cur_frequency);
for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
@@ -209,11 +208,6 @@ static void exynos_enable_dvfs(void)
dvfs_info->base + XMU_DVFS_CTRL);
}
-static unsigned int exynos_getspeed(unsigned int cpu)
-{
- return dvfs_info->cur_frequency;
-}
-
static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int tmp;
@@ -222,10 +216,10 @@ static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
mutex_lock(&cpufreq_lock);
- freqs.old = dvfs_info->cur_frequency;
+ freqs.old = policy->cur;
freqs.new = freq_table[index].frequency;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
/* Set the target frequency in all C0_3_PSTATE register */
for_each_cpu(i, policy->cpus) {
@@ -250,7 +244,7 @@ static void exynos_cpufreq_work(struct work_struct *work)
goto skip_work;
mutex_lock(&cpufreq_lock);
- freqs.old = dvfs_info->cur_frequency;
+ freqs.old = policy->cur;
cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
@@ -260,12 +254,11 @@ static void exynos_cpufreq_work(struct work_struct *work)
if (likely(index < dvfs_info->freq_count)) {
freqs.new = freq_table[index].frequency;
- dvfs_info->cur_frequency = freqs.new;
} else {
dev_crit(dvfs_info->dev, "New frequency out of range\n");
- freqs.new = dvfs_info->cur_frequency;
+ freqs.new = freqs.old;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
cpufreq_cpu_put(policy);
mutex_unlock(&cpufreq_lock);
@@ -307,17 +300,18 @@ static void exynos_sort_descend_freq_table(void)
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
+ policy->clk = dvfs_info->cpu_clk;
return cpufreq_generic_init(policy, dvfs_info->freq_table,
dvfs_info->latency);
}
static struct cpufreq_driver exynos_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION,
+ .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = exynos_target,
- .get = exynos_getspeed,
+ .get = cpufreq_generic_get,
.init = exynos_cpufreq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = CPUFREQ_NAME,
.attr = cpufreq_generic_attr,
};
@@ -335,6 +329,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
int ret = -EINVAL;
struct device_node *np;
struct resource res;
+ unsigned int cur_frequency;
np = pdev->dev.of_node;
if (!np)
@@ -391,13 +386,13 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
goto err_free_table;
}
- dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
- if (!dvfs_info->cur_frequency) {
+ cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
+ if (!cur_frequency) {
dev_err(dvfs_info->dev, "Failed to get clock rate\n");
ret = -EINVAL;
goto err_free_table;
}
- dvfs_info->cur_frequency /= 1000;
+ cur_frequency /= 1000;
INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
@@ -414,7 +409,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
goto err_free_table;
}
- exynos_enable_dvfs();
+ exynos_enable_dvfs(cur_frequency);
ret = cpufreq_register_driver(&exynos_driver);
if (ret) {
dev_err(dvfs_info->dev,
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 3458d27f63b4..65a477075b3f 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -32,6 +32,10 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
continue;
}
+ if (!cpufreq_boost_enabled()
+ && table[i].driver_data == CPUFREQ_BOOST_FREQ)
+ continue;
+
pr_debug("table entry %u: %u kHz, %u driver_data\n",
i, freq, table[i].driver_data);
if (freq < min_freq)
@@ -87,8 +91,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
/*
- * Generic routine to verify policy & frequency table, requires driver to call
- * cpufreq_frequency_table_get_attr() prior to it.
+ * Generic routine to verify policy & frequency table, requires driver to set
+ * policy->freq_table prior to it.
*/
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
{
@@ -178,25 +182,57 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
-static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
+int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
+ unsigned int freq)
+{
+ struct cpufreq_frequency_table *table;
+ int i;
+
+ table = cpufreq_frequency_get_table(policy->cpu);
+ if (unlikely(!table)) {
+ pr_debug("%s: Unable to find frequency table\n", __func__);
+ return -ENOENT;
+ }
+
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (table[i].frequency == freq)
+ return i;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
+
/**
* show_available_freqs - show available frequencies for the specified CPU
*/
-static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
+static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
+ bool show_boost)
{
unsigned int i = 0;
- unsigned int cpu = policy->cpu;
ssize_t count = 0;
- struct cpufreq_frequency_table *table;
+ struct cpufreq_frequency_table *table = policy->freq_table;
- if (!per_cpu(cpufreq_show_table, cpu))
+ if (!table)
return -ENODEV;
- table = per_cpu(cpufreq_show_table, cpu);
-
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
continue;
+ /*
+ * show_boost = true and driver_data = BOOST freq
+ * display BOOST freqs
+ *
+ * show_boost = false and driver_data = BOOST freq
+ * show_boost = true and driver_data != BOOST freq
+ * continue - do not display anything
+ *
+ * show_boost = false and driver_data != BOOST freq
+ * display NON BOOST freqs
+ */
+ if (show_boost ^ (table[i].driver_data == CPUFREQ_BOOST_FREQ))
+ continue;
+
count += sprintf(&buf[count], "%d ", table[i].frequency);
}
count += sprintf(&buf[count], "\n");
@@ -205,38 +241,42 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
}
-struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
- .attr = { .name = "scaling_available_frequencies",
- .mode = 0444,
- },
- .show = show_available_freqs,
-};
-EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
+#define cpufreq_attr_available_freq(_name) \
+struct freq_attr cpufreq_freq_attr_##_name##_freqs = \
+__ATTR_RO(_name##_frequencies)
-struct freq_attr *cpufreq_generic_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
-
-/*
- * if you use these, you must assure that the frequency table is valid
- * all the time between get_attr and put_attr!
+/**
+ * show_scaling_available_frequencies - show available normal frequencies for
+ * the specified CPU
*/
-void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
- unsigned int cpu)
+static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
+ char *buf)
{
- pr_debug("setting show_table for cpu %u to %p\n", cpu, table);
- per_cpu(cpufreq_show_table, cpu) = table;
+ return show_available_freqs(policy, buf, false);
}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
+cpufreq_attr_available_freq(scaling_available);
+EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
-void cpufreq_frequency_table_put_attr(unsigned int cpu)
+/**
+ * show_available_boost_freqs - show available boost frequencies for
+ * the specified CPU
+ */
+static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
+ char *buf)
{
- pr_debug("clearing show_table for cpu %u\n", cpu);
- per_cpu(cpufreq_show_table, cpu) = NULL;
+ return show_available_freqs(policy, buf, true);
}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
+cpufreq_attr_available_freq(scaling_boost);
+EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
+
+struct freq_attr *cpufreq_generic_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+#ifdef CONFIG_CPU_FREQ_BOOST_SW
+ &cpufreq_freq_attr_scaling_boost_freqs,
+#endif
+ NULL,
+};
+EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
@@ -244,24 +284,18 @@ int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
int ret = cpufreq_frequency_table_cpuinfo(policy, table);
if (!ret)
- cpufreq_frequency_table_get_attr(table, policy->cpu);
+ policy->freq_table = table;
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
-void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
-{
- pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
- policy->cpu, policy->last_cpu);
- per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
- policy->last_cpu);
- per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
-}
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
- return per_cpu(cpufreq_show_table, cpu);
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ return policy ? policy->freq_table : NULL;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index d83e8266a58e..1d723dc8880c 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -265,7 +265,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
freqs.new = new_khz;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
local_irq_save(flags);
if (new_khz != stock_freq) {
@@ -314,7 +314,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
gx_params->pci_suscfg = suscfg;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
gx_params->on_duration * 32, gx_params->off_duration * 32);
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 53c6ac637e10..a22b5d182e0e 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -332,7 +332,6 @@ acpi_cpufreq_cpu_exit (
pr_debug("acpi_cpufreq_cpu_exit\n");
if (data) {
- cpufreq_frequency_table_put_attr(policy->cpu);
acpi_io_data[policy->cpu] = NULL;
acpi_processor_unregister_performance(&data->acpi_data,
policy->cpu);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 4b3f18e5f36b..e27fca86fe4f 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -35,10 +35,8 @@ static struct device *cpu_dev;
static struct cpufreq_frequency_table *freq_table;
static unsigned int transition_latency;
-static unsigned int imx6q_get_speed(unsigned int cpu)
-{
- return clk_get_rate(arm_clk) / 1000;
-}
+static u32 *imx6_soc_volt;
+static u32 soc_opp_count;
static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
{
@@ -69,23 +67,22 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* scaling up? scale voltage before frequency */
if (new_freq > old_freq) {
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret);
+ return ret;
+ }
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_err(cpu_dev,
"failed to scale vddarm up: %d\n", ret);
return ret;
}
-
- /*
- * Need to increase vddpu and vddsoc for safety
- * if we are about to run at 1.2 GHz.
- */
- if (new_freq == FREQ_1P2_GHZ / 1000) {
- regulator_set_voltage_tol(pu_reg,
- PU_SOC_VOLTAGE_HIGH, 0);
- regulator_set_voltage_tol(soc_reg,
- PU_SOC_VOLTAGE_HIGH, 0);
- }
}
/*
@@ -120,12 +117,15 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
"failed to scale vddarm down: %d\n", ret);
ret = 0;
}
-
- if (old_freq == FREQ_1P2_GHZ / 1000) {
- regulator_set_voltage_tol(pu_reg,
- PU_SOC_VOLTAGE_NORMAL, 0);
- regulator_set_voltage_tol(soc_reg,
- PU_SOC_VOLTAGE_NORMAL, 0);
+ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
+ ret = 0;
+ }
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
+ ret = 0;
}
}
@@ -134,15 +134,16 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
+ policy->clk = arm_clk;
return cpufreq_generic_init(policy, freq_table, transition_latency);
}
static struct cpufreq_driver imx6q_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = imx6q_set_target,
- .get = imx6q_get_speed,
+ .get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
- .exit = cpufreq_generic_exit,
.name = "imx6q-cpufreq",
.attr = cpufreq_generic_attr,
};
@@ -153,6 +154,9 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
struct dev_pm_opp *opp;
unsigned long min_volt, max_volt;
int num, ret;
+ const struct property *prop;
+ const __be32 *val;
+ u32 nr, i, j;
cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
@@ -187,12 +191,25 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_node;
}
- /* We expect an OPP table supplied by platform */
+ /*
+ * We expect an OPP table supplied by platform.
+ * Just, incase the platform did not supply the OPP
+ * table, it will try to get it.
+ */
num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
- ret = num;
- dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
- goto put_node;
+ ret = of_init_opp_table(cpu_dev);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
+ goto put_node;
+ }
+
+ num = dev_pm_opp_get_opp_count(cpu_dev);
+ if (num < 0) {
+ ret = num;
+ dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
+ goto put_node;
+ }
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
@@ -201,10 +218,62 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_node;
}
+ /* Make imx6_soc_volt array's size same as arm opp number */
+ imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL);
+ if (imx6_soc_volt == NULL) {
+ ret = -ENOMEM;
+ goto free_freq_table;
+ }
+
+ prop = of_find_property(np, "fsl,soc-operating-points", NULL);
+ if (!prop || !prop->value)
+ goto soc_opp_out;
+
+ /*
+ * Each OPP is a set of tuples consisting of frequency and
+ * voltage like <freq-kHz vol-uV>.
+ */
+ nr = prop->length / sizeof(u32);
+ if (nr % 2 || (nr / 2) < num)
+ goto soc_opp_out;
+
+ for (j = 0; j < num; j++) {
+ val = prop->value;
+ for (i = 0; i < nr / 2; i++) {
+ unsigned long freq = be32_to_cpup(val++);
+ unsigned long volt = be32_to_cpup(val++);
+ if (freq_table[j].frequency == freq) {
+ imx6_soc_volt[soc_opp_count++] = volt;
+ break;
+ }
+ }
+ }
+
+soc_opp_out:
+ /* use fixed soc opp volt if no valid soc opp info found in dtb */
+ if (soc_opp_count != num) {
+ dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
+ for (j = 0; j < num; j++)
+ imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
+ if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
+ imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
+ }
+
if (of_property_read_u32(np, "clock-latency", &transition_latency))
transition_latency = CPUFREQ_ETERNAL;
/*
+ * Calculate the ramp time for max voltage change in the
+ * VDDSOC and VDDPU regulators.
+ */
+ ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+
+ /*
* OPP is maintained in order of increasing frequency, and
* freq_table initialised from OPP is therefore sorted in the
* same order.
@@ -221,18 +290,6 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
if (ret > 0)
transition_latency += ret * 1000;
- /* Count vddpu and vddsoc latency in for 1.2 GHz support */
- if (freq_table[num].frequency == FREQ_1P2_GHZ / 1000) {
- ret = regulator_set_voltage_time(pu_reg, PU_SOC_VOLTAGE_NORMAL,
- PU_SOC_VOLTAGE_HIGH);
- if (ret > 0)
- transition_latency += ret * 1000;
- ret = regulator_set_voltage_time(soc_reg, PU_SOC_VOLTAGE_NORMAL,
- PU_SOC_VOLTAGE_HIGH);
- if (ret > 0)
- transition_latency += ret * 1000;
- }
-
ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
if (ret) {
dev_err(cpu_dev, "failed register driver: %d\n", ret);
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 7d8ab000d317..e5122f1bfe78 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -122,7 +122,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
return 0;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
@@ -143,7 +143,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
*/
set_cpus_allowed(current, cpus_allowed);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}
@@ -190,6 +190,7 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver integrator_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = integrator_verify_policy,
.target = integrator_set_target,
.get = integrator_get,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index d51f17ed691e..099967302bf2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,11 +34,15 @@
#define SAMPLE_COUNT 3
-#define BYT_RATIOS 0x66a
+#define BYT_RATIOS 0x66a
+#define BYT_VIDS 0x66b
+#define BYT_TURBO_RATIOS 0x66c
-#define FRAC_BITS 8
+
+#define FRAC_BITS 6
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
+#define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS)
static inline int32_t mul_fp(int32_t x, int32_t y)
{
@@ -54,6 +58,7 @@ struct sample {
int32_t core_pct_busy;
u64 aperf;
u64 mperf;
+ unsigned long long tsc;
int freq;
};
@@ -64,6 +69,12 @@ struct pstate_data {
int turbo_pstate;
};
+struct vid_data {
+ int32_t min;
+ int32_t max;
+ int32_t ratio;
+};
+
struct _pid {
int setpoint;
int32_t integral;
@@ -82,14 +93,13 @@ struct cpudata {
struct timer_list timer;
struct pstate_data pstate;
+ struct vid_data vid;
struct _pid pid;
- int min_pstate_count;
-
u64 prev_aperf;
u64 prev_mperf;
- int sample_ptr;
- struct sample samples[SAMPLE_COUNT];
+ unsigned long long prev_tsc;
+ struct sample sample;
};
static struct cpudata **all_cpu_data;
@@ -106,7 +116,8 @@ struct pstate_funcs {
int (*get_max)(void);
int (*get_min)(void);
int (*get_turbo)(void);
- void (*set)(int pstate);
+ void (*set)(struct cpudata*, int pstate);
+ void (*get_vid)(struct cpudata *);
};
struct cpu_defaults {
@@ -142,7 +153,7 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
pid->setpoint = setpoint;
pid->deadband = deadband;
pid->integral = int_tofp(integral);
- pid->last_err = setpoint - busy;
+ pid->last_err = int_tofp(setpoint) - int_tofp(busy);
}
static inline void pid_p_gain_set(struct _pid *pid, int percent)
@@ -348,7 +359,7 @@ static int byt_get_min_pstate(void)
{
u64 value;
rdmsrl(BYT_RATIOS, value);
- return value & 0xFF;
+ return (value >> 8) & 0xFF;
}
static int byt_get_max_pstate(void)
@@ -358,6 +369,49 @@ static int byt_get_max_pstate(void)
return (value >> 16) & 0xFF;
}
+static int byt_get_turbo_pstate(void)
+{
+ u64 value;
+ rdmsrl(BYT_TURBO_RATIOS, value);
+ return value & 0x3F;
+}
+
+static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+{
+ u64 val;
+ int32_t vid_fp;
+ u32 vid;
+
+ val = pstate << 8;
+ if (limits.no_turbo)
+ val |= (u64)1 << 32;
+
+ vid_fp = cpudata->vid.min + mul_fp(
+ int_tofp(pstate - cpudata->pstate.min_pstate),
+ cpudata->vid.ratio);
+
+ vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
+ vid = fp_toint(vid_fp);
+
+ val |= vid;
+
+ wrmsrl(MSR_IA32_PERF_CTL, val);
+}
+
+static void byt_get_vid(struct cpudata *cpudata)
+{
+ u64 value;
+
+ rdmsrl(BYT_VIDS, value);
+ cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
+ cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
+ cpudata->vid.ratio = div_fp(
+ cpudata->vid.max - cpudata->vid.min,
+ int_tofp(cpudata->pstate.max_pstate -
+ cpudata->pstate.min_pstate));
+}
+
+
static int core_get_min_pstate(void)
{
u64 value;
@@ -384,7 +438,7 @@ static int core_get_turbo_pstate(void)
return ret;
}
-static void core_set_pstate(int pstate)
+static void core_set_pstate(struct cpudata *cpudata, int pstate)
{
u64 val;
@@ -392,7 +446,7 @@ static void core_set_pstate(int pstate)
if (limits.no_turbo)
val |= (u64)1 << 32;
- wrmsrl(MSR_IA32_PERF_CTL, val);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
}
static struct cpu_defaults core_params = {
@@ -424,8 +478,9 @@ static struct cpu_defaults byt_params = {
.funcs = {
.get_max = byt_get_max_pstate,
.get_min = byt_get_min_pstate,
- .get_turbo = byt_get_max_pstate,
- .set = core_set_pstate,
+ .get_turbo = byt_get_turbo_pstate,
+ .set = byt_set_pstate,
+ .get_vid = byt_get_vid,
},
};
@@ -462,7 +517,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
cpu->pstate.current_pstate = pstate;
- pstate_funcs.set(pstate);
+ pstate_funcs.set(cpu, pstate);
}
static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -488,6 +543,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu->pstate.max_pstate = pstate_funcs.get_max();
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+ if (pstate_funcs.get_vid)
+ pstate_funcs.get_vid(cpu);
+
/*
* goto max pstate so we don't slow up boot if we are built-in if we are
* a module we will take care of it during normal operation
@@ -498,30 +556,47 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample)
{
- u64 core_pct;
- core_pct = div64_u64(int_tofp(sample->aperf * 100),
- sample->mperf);
- sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
+ int32_t core_pct;
+ int32_t c0_pct;
+
+ core_pct = div_fp(int_tofp((sample->aperf)),
+ int_tofp((sample->mperf)));
+ core_pct = mul_fp(core_pct, int_tofp(100));
+ FP_ROUNDUP(core_pct);
+
+ c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc));
- sample->core_pct_busy = core_pct;
+ sample->freq = fp_toint(
+ mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
+
+ sample->core_pct_busy = mul_fp(core_pct, c0_pct);
}
static inline void intel_pstate_sample(struct cpudata *cpu)
{
u64 aperf, mperf;
+ unsigned long long tsc;
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
- cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
- cpu->samples[cpu->sample_ptr].aperf = aperf;
- cpu->samples[cpu->sample_ptr].mperf = mperf;
- cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
- cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+ tsc = native_read_tsc();
+
+ aperf = aperf >> FRAC_BITS;
+ mperf = mperf >> FRAC_BITS;
+ tsc = tsc >> FRAC_BITS;
+
+ cpu->sample.aperf = aperf;
+ cpu->sample.mperf = mperf;
+ cpu->sample.tsc = tsc;
+ cpu->sample.aperf -= cpu->prev_aperf;
+ cpu->sample.mperf -= cpu->prev_mperf;
+ cpu->sample.tsc -= cpu->prev_tsc;
- intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
+ intel_pstate_calc_busy(cpu, &cpu->sample);
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
+ cpu->prev_tsc = tsc;
}
static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
@@ -537,10 +612,11 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
int32_t core_busy, max_pstate, current_pstate;
- core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
+ core_busy = cpu->sample.core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
- return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ return FP_ROUNDUP(core_busy);
}
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
@@ -556,6 +632,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
ctl = pid_calc(pid, busy_scaled);
steps = abs(ctl);
+
if (ctl < 0)
intel_pstate_pstate_increase(cpu, steps);
else
@@ -565,17 +642,20 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
static void intel_pstate_timer_func(unsigned long __data)
{
struct cpudata *cpu = (struct cpudata *) __data;
+ struct sample *sample;
intel_pstate_sample(cpu);
+
+ sample = &cpu->sample;
+
intel_pstate_adjust_busy_pstate(cpu);
- if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
- cpu->min_pstate_count++;
- if (!(cpu->min_pstate_count % 5)) {
- intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
- }
- } else
- cpu->min_pstate_count = 0;
+ trace_pstate_sample(fp_toint(sample->core_pct_busy),
+ fp_toint(intel_pstate_get_scaled_busy(cpu)),
+ cpu->pstate.current_pstate,
+ sample->mperf,
+ sample->aperf,
+ sample->freq);
intel_pstate_set_sample_time(cpu);
}
@@ -647,7 +727,7 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
cpu = all_cpu_data[cpu_num];
if (!cpu)
return 0;
- sample = &cpu->samples[cpu->sample_ptr];
+ sample = &cpu->sample;
return sample->freq;
}
@@ -691,14 +771,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
return 0;
}
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
{
- int cpu = policy->cpu;
+ int cpu_num = policy->cpu;
+ struct cpudata *cpu = all_cpu_data[cpu_num];
- del_timer(&all_cpu_data[cpu]->timer);
- kfree(all_cpu_data[cpu]);
- all_cpu_data[cpu] = NULL;
- return 0;
+ pr_info("intel_pstate CPU %d exiting\n", cpu_num);
+
+ del_timer_sync(&all_cpu_data[cpu_num]->timer);
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+ kfree(all_cpu_data[cpu_num]);
+ all_cpu_data[cpu_num] = NULL;
}
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -736,7 +819,7 @@ static struct cpufreq_driver intel_pstate_driver = {
.setpolicy = intel_pstate_set_policy,
.get = intel_pstate_get,
.init = intel_pstate_cpu_init,
- .exit = intel_pstate_cpu_exit,
+ .stop_cpu = intel_pstate_stop_cpu,
.name = "intel_pstate",
};
@@ -782,6 +865,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_min = funcs->get_min;
pstate_funcs.get_turbo = funcs->get_turbo;
pstate_funcs.set = funcs->set;
+ pstate_funcs.get_vid = funcs->get_vid;
}
#if IS_ENABLED(CONFIG_ACPI)
@@ -890,6 +974,7 @@ static int __init intel_pstate_init(void)
intel_pstate_debug_expose_params();
intel_pstate_sysfs_expose_params();
+
return rc;
out:
get_online_cpus();
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 0767a4e29dfe..3d114bc5a97a 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -97,11 +97,11 @@ static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver kirkwood_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.get = kirkwood_cpufreq_get_cpu_frequency,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "kirkwood-cpufreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 45bafddfd8ea..5c440f87ba8a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -269,7 +269,7 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
@@ -386,7 +386,7 @@ retry_loop:
}
}
/* Report true CPU frequency */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
if (!bm_timeout)
printk(KERN_INFO PFX "Warning: Timeout while waiting for "
@@ -913,7 +913,6 @@ static struct cpufreq_driver longhaul_driver = {
.target_index = longhaul_target,
.get = longhaul_get,
.init = longhaul_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "longhaul",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index a43609218105..a3588d61d933 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -24,8 +24,6 @@
static uint nowait;
-static struct clk *cpuclk;
-
static void (*saved_cpu_wait) (void);
static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
@@ -44,11 +42,6 @@ static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
return 0;
}
-static unsigned int loongson2_cpufreq_get(unsigned int cpu)
-{
- return clk_get_rate(cpuclk);
-}
-
/*
* Here we notify other drivers of the proposed change and the final change.
*/
@@ -69,13 +62,14 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
set_cpus_allowed_ptr(current, &cpus_allowed);
/* setting the cpu frequency */
- clk_set_rate(cpuclk, freq);
+ clk_set_rate(policy->clk, freq);
return 0;
}
static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
+ static struct clk *cpuclk;
int i;
unsigned long rate;
int ret;
@@ -104,13 +98,13 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret;
}
+ policy->clk = cpuclk;
return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
}
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
- clk_put(cpuclk);
+ clk_put(policy->clk);
return 0;
}
@@ -119,7 +113,7 @@ static struct cpufreq_driver loongson2_cpufreq_driver = {
.init = loongson2_cpufreq_cpu_init,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = loongson2_cpufreq_target,
- .get = loongson2_cpufreq_get,
+ .get = cpufreq_generic_get,
.exit = loongson2_cpufreq_exit,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index a0acd0bfba40..5f69c9aa703c 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -36,21 +36,9 @@
static struct cpufreq_frequency_table *freq_table;
static atomic_t freq_table_users = ATOMIC_INIT(0);
-static struct clk *mpu_clk;
static struct device *mpu_dev;
static struct regulator *mpu_reg;
-static unsigned int omap_getspeed(unsigned int cpu)
-{
- unsigned long rate;
-
- if (cpu >= NR_CPUS)
- return 0;
-
- rate = clk_get_rate(mpu_clk) / 1000;
- return rate;
-}
-
static int omap_target(struct cpufreq_policy *policy, unsigned int index)
{
int r, ret;
@@ -58,11 +46,11 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
unsigned long freq, volt = 0, volt_old = 0, tol = 0;
unsigned int old_freq, new_freq;
- old_freq = omap_getspeed(policy->cpu);
+ old_freq = policy->cur;
new_freq = freq_table[index].frequency;
freq = new_freq * 1000;
- ret = clk_round_rate(mpu_clk, freq);
+ ret = clk_round_rate(policy->clk, freq);
if (IS_ERR_VALUE(ret)) {
dev_warn(mpu_dev,
"CPUfreq: Cannot find matching frequency for %lu\n",
@@ -100,7 +88,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
}
}
- ret = clk_set_rate(mpu_clk, new_freq * 1000);
+ ret = clk_set_rate(policy->clk, new_freq * 1000);
/* scaling down? scale voltage after frequency */
if (mpu_reg && (new_freq < old_freq)) {
@@ -108,7 +96,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
__func__);
- clk_set_rate(mpu_clk, old_freq * 1000);
+ clk_set_rate(policy->clk, old_freq * 1000);
return r;
}
}
@@ -126,9 +114,9 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
{
int result;
- mpu_clk = clk_get(NULL, "cpufreq_ck");
- if (IS_ERR(mpu_clk))
- return PTR_ERR(mpu_clk);
+ policy->clk = clk_get(NULL, "cpufreq_ck");
+ if (IS_ERR(policy->clk))
+ return PTR_ERR(policy->clk);
if (!freq_table) {
result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
@@ -149,23 +137,22 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
freq_table_free();
fail:
- clk_put(mpu_clk);
+ clk_put(policy->clk);
return result;
}
static int omap_cpu_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
freq_table_free();
- clk_put(mpu_clk);
+ clk_put(policy->clk);
return 0;
}
static struct cpufreq_driver omap_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = omap_target,
- .get = omap_getspeed,
+ .get = cpufreq_generic_get,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
.name = "omap",
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 3d1cba9fd5f9..74f593e70e19 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -237,7 +237,6 @@ static struct cpufreq_driver p4clockmod_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cpufreq_p4_target,
.init = cpufreq_p4_cpu_init,
- .exit = cpufreq_generic_exit,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
.attr = cpufreq_generic_attr,
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 0426008380d8..6a2b7d3e85a7 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -234,7 +234,6 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
if (sdcpwr_mapbase)
iounmap(sdcpwr_mapbase);
- cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index e2b4f40ff69a..728a2d879499 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -213,8 +213,9 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
cpu, target_freq,
(pcch_virt_addr + pcc_cpu_data->input_offset));
+ freqs.old = policy->cur;
freqs.new = target_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
input_buffer = 0x1 | (((target_freq * 100)
/ (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
@@ -228,25 +229,20 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
status = ioread16(&pcch_hdr->status);
+ iowrite16(0, &pcch_hdr->status);
+
+ cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
+ spin_unlock(&pcc_lock);
+
if (status != CMD_COMPLETE) {
pr_debug("target: FAILED for cpu %d, with status: 0x%x\n",
cpu, status);
- goto cmd_incomplete;
+ return -EINVAL;
}
- iowrite16(0, &pcch_hdr->status);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
- spin_unlock(&pcc_lock);
return 0;
-
-cmd_incomplete:
- freqs.new = freqs.old;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- iowrite16(0, &pcch_hdr->status);
- spin_unlock(&pcc_lock);
- return -EINVAL;
}
static int pcc_get_offset(int cpu)
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 643e7952cad3..62c6f2e5afce 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -26,41 +26,108 @@
static unsigned int busfreq; /* FSB, in 10 kHz */
static unsigned int max_multiplier;
+static unsigned int param_busfreq = 0;
+static unsigned int param_max_multiplier = 0;
+
+module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
+MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
+
+module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
+MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
static struct cpufreq_frequency_table clock_ratio[] = {
- {45, /* 000 -> 4.5x */ 0},
+ {60, /* 110 -> 6.0x */ 0},
+ {55, /* 011 -> 5.5x */ 0},
{50, /* 001 -> 5.0x */ 0},
+ {45, /* 000 -> 4.5x */ 0},
{40, /* 010 -> 4.0x */ 0},
- {55, /* 011 -> 5.5x */ 0},
- {20, /* 100 -> 2.0x */ 0},
- {30, /* 101 -> 3.0x */ 0},
- {60, /* 110 -> 6.0x */ 0},
{35, /* 111 -> 3.5x */ 0},
+ {30, /* 101 -> 3.0x */ 0},
+ {20, /* 100 -> 2.0x */ 0},
{0, CPUFREQ_TABLE_END}
};
+static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
+static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
+
+static const struct {
+ unsigned freq;
+ unsigned mult;
+} usual_frequency_table[] = {
+ { 400000, 40 }, // 100 * 4
+ { 450000, 45 }, // 100 * 4.5
+ { 475000, 50 }, // 95 * 5
+ { 500000, 50 }, // 100 * 5
+ { 506250, 45 }, // 112.5 * 4.5
+ { 533500, 55 }, // 97 * 5.5
+ { 550000, 55 }, // 100 * 5.5
+ { 562500, 50 }, // 112.5 * 5
+ { 570000, 60 }, // 95 * 6
+ { 600000, 60 }, // 100 * 6
+ { 618750, 55 }, // 112.5 * 5.5
+ { 660000, 55 }, // 120 * 5.5
+ { 675000, 60 }, // 112.5 * 6
+ { 720000, 60 }, // 120 * 6
+};
+
+#define FREQ_RANGE 3000
/**
* powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
*
- * Returns the current setting of the frequency multiplier. Core clock
+ * Returns the current setting of the frequency multiplier. Core clock
* speed is frequency of the Front-Side Bus multiplied with this value.
*/
static int powernow_k6_get_cpu_multiplier(void)
{
- u64 invalue = 0;
+ unsigned long invalue = 0;
u32 msrval;
+ local_irq_disable();
+
msrval = POWERNOW_IOPORT + 0x1;
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
invalue = inl(POWERNOW_IOPORT + 0x8);
msrval = POWERNOW_IOPORT + 0x0;
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
- return clock_ratio[(invalue >> 5)&7].driver_data;
+ local_irq_enable();
+
+ return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data;
}
+static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
+{
+ unsigned long outvalue, invalue;
+ unsigned long msrval;
+ unsigned long cr0;
+
+ /* we now need to transform best_i to the BVC format, see AMD#23446 */
+
+ /*
+ * The processor doesn't respond to inquiry cycles while changing the
+ * frequency, so we must disable cache.
+ */
+ local_irq_disable();
+ cr0 = read_cr0();
+ write_cr0(cr0 | X86_CR0_CD);
+ wbinvd();
+
+ outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
+
+ msrval = POWERNOW_IOPORT + 0x1;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+ invalue = inl(POWERNOW_IOPORT + 0x8);
+ invalue = invalue & 0x1f;
+ outvalue = outvalue | invalue;
+ outl(outvalue, (POWERNOW_IOPORT + 0x8));
+ msrval = POWERNOW_IOPORT + 0x0;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+
+ write_cr0(cr0);
+ local_irq_enable();
+}
/**
* powernow_k6_target - set the PowerNow! multiplier
@@ -71,8 +138,6 @@ static int powernow_k6_get_cpu_multiplier(void)
static int powernow_k6_target(struct cpufreq_policy *policy,
unsigned int best_i)
{
- unsigned long outvalue = 0, invalue = 0;
- unsigned long msrval;
struct cpufreq_freqs freqs;
if (clock_ratio[best_i].driver_data > max_multiplier) {
@@ -83,37 +148,65 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
freqs.new = busfreq * clock_ratio[best_i].driver_data;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
- /* we now need to transform best_i to the BVC format, see AMD#23446 */
-
- outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
-
- msrval = POWERNOW_IOPORT + 0x1;
- wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
- invalue = inl(POWERNOW_IOPORT + 0x8);
- invalue = invalue & 0xf;
- outvalue = outvalue | invalue;
- outl(outvalue , (POWERNOW_IOPORT + 0x8));
- msrval = POWERNOW_IOPORT + 0x0;
- wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+ powernow_k6_set_cpu_multiplier(best_i);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}
-
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i, f;
+ unsigned khz;
if (policy->cpu != 0)
return -ENODEV;
- /* get frequencies */
- max_multiplier = powernow_k6_get_cpu_multiplier();
- busfreq = cpu_khz / max_multiplier;
+ max_multiplier = 0;
+ khz = cpu_khz;
+ for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
+ if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
+ khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
+ khz = usual_frequency_table[i].freq;
+ max_multiplier = usual_frequency_table[i].mult;
+ break;
+ }
+ }
+ if (param_max_multiplier) {
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (clock_ratio[i].driver_data == param_max_multiplier) {
+ max_multiplier = param_max_multiplier;
+ goto have_max_multiplier;
+ }
+ }
+ printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
+ return -EINVAL;
+ }
+
+ if (!max_multiplier) {
+ printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
+ printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
+ return -EOPNOTSUPP;
+ }
+
+have_max_multiplier:
+ param_max_multiplier = max_multiplier;
+
+ if (param_busfreq) {
+ if (param_busfreq >= 50000 && param_busfreq <= 150000) {
+ busfreq = param_busfreq / 10;
+ goto have_busfreq;
+ }
+ printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
+ return -EINVAL;
+ }
+
+ busfreq = khz / max_multiplier;
+have_busfreq:
+ param_busfreq = busfreq * 10;
/* table init */
for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
@@ -125,7 +218,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
}
/* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 200000;
+ policy->cpuinfo.transition_latency = 500000;
return cpufreq_table_validate_and_show(policy, clock_ratio);
}
@@ -138,7 +231,6 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
if (i == max_multiplier)
powernow_k6_target(policy, i);
}
- cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 946708a1d745..f911645c3f6d 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -269,7 +269,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
freqs.new = powernow_table[index].frequency;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
/* Now do the magic poking into the MSRs. */
@@ -290,7 +290,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
if (have_a0 == 1)
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
return 0;
}
@@ -664,8 +664,6 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
static int powernow_cpu_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
-
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (acpi_processor_perf) {
acpi_processor_unregister_performance(acpi_processor_perf, 0);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 0023c7d40a51..770a9e1b3468 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -963,15 +963,10 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
policy = cpufreq_cpu_get(smp_processor_id());
cpufreq_cpu_put(policy);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
+ cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid);
- if (res)
- freqs.new = freqs.old;
- else
- freqs.new = find_khz_freq_from_fid(data->currfid);
+ cpufreq_freq_transition_end(policy, &freqs, res);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return res;
}
@@ -1081,7 +1076,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data;
struct init_on_cpu init_on_cpu;
- int rc;
+ int rc, cpu;
smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
if (rc)
@@ -1145,7 +1140,9 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
- per_cpu(powernow_data, pol->cpu) = data;
+ /* Point all the CPUs in this policy to the same data */
+ for_each_cpu(cpu, pol->cpus)
+ per_cpu(powernow_data, cpu) = data;
return 0;
@@ -1160,17 +1157,17 @@ err_out:
static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ int cpu;
if (!data)
return -EINVAL;
powernow_k8_cpu_exit_acpi(data);
- cpufreq_frequency_table_put_attr(pol->cpu);
-
kfree(data->powernow_table);
kfree(data);
- per_cpu(powernow_data, pol->cpu) = NULL;
+ for_each_cpu(cpu, pol->cpus)
+ per_cpu(powernow_data, cpu) = NULL;
return 0;
}
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 3f7be46d2b27..3bd9123e7026 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -21,15 +21,14 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/smp.h>
+#include <sysdev/fsl_soc.h>
/**
* struct cpu_data - per CPU data struct
- * @clk: the clk of CPU
* @parent: the parent node of cpu clock
* @table: frequency table
*/
struct cpu_data {
- struct clk *clk;
struct device_node *parent;
struct cpufreq_frequency_table *table;
};
@@ -81,13 +80,6 @@ static inline const struct cpumask *cpu_core_mask(int cpu)
}
#endif
-static unsigned int corenet_cpufreq_get_speed(unsigned int cpu)
-{
- struct cpu_data *data = per_cpu(cpu_data, cpu);
-
- return clk_get_rate(data->clk) / 1000;
-}
-
/* reduce the duplicated frequencies in frequency table */
static void freq_table_redup(struct cpufreq_frequency_table *freq_table,
int count)
@@ -158,8 +150,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_np;
}
- data->clk = of_clk_get(np, 0);
- if (IS_ERR(data->clk)) {
+ policy->clk = of_clk_get(np, 0);
+ if (IS_ERR(policy->clk)) {
pr_err("%s: no clock information\n", __func__);
goto err_nomem2;
}
@@ -214,7 +206,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
for_each_cpu(i, per_cpu(cpu_mask, cpu))
per_cpu(cpu_data, i) = data;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->cpuinfo.transition_latency =
+ (12 * NSEC_PER_SEC) / fsl_get_sys_freq();
of_node_put(np);
return 0;
@@ -237,7 +230,6 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
unsigned int cpu;
- cpufreq_frequency_table_put_attr(policy->cpu);
of_node_put(data->parent);
kfree(data->table);
kfree(data);
@@ -255,7 +247,7 @@ static int corenet_cpufreq_target(struct cpufreq_policy *policy,
struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
parent = of_clk_get(data->parent, data->table[index].driver_data);
- return clk_set_parent(data->clk, parent);
+ return clk_set_parent(policy->clk, parent);
}
static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
@@ -265,7 +257,7 @@ static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
.exit = __exit_p(corenet_cpufreq_cpu_exit),
.verify = cpufreq_generic_frequency_table_verify,
.target_index = corenet_cpufreq_target,
- .get = corenet_cpufreq_get_speed,
+ .get = cpufreq_generic_get,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index e42ca9c31cea..af7b1cabd1e7 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -141,7 +141,6 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cbe_cpufreq_target,
.init = cbe_cpufreq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "cbe-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
};
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 0a0f4369636a..e24269ab4e9b 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -423,10 +423,10 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pxa_set_target,
.init = pxa_cpufreq_init,
- .exit = cpufreq_generic_exit,
.get = pxa_cpufreq_get,
.name = "PXA2xx",
};
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index 93840048dd11..a01275900389 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -201,10 +201,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa3xx_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pxa3xx_cpufreq_set,
.init = pxa3xx_cpufreq_init,
- .exit = cpufreq_generic_exit,
.get = pxa3xx_cpufreq_get,
.name = "pxa3xx-cpufreq",
};
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 8d904a00027b..826b8be23099 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -481,7 +481,7 @@ err_hclk:
}
static struct cpufreq_driver s3c2416_cpufreq_driver = {
- .flags = 0,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = s3c2416_cpufreq_set_target,
.get = s3c2416_cpufreq_get_speed,
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
index 72b2cc8a5a85..f84ed10755b5 100644
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -22,8 +22,6 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <mach/hardware.h>
-
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -55,7 +53,7 @@ static inline int within_khz(unsigned long a, unsigned long b)
* specified in @cfg. The values are stored in @cfg for later use
* by the relevant set routine if the request settings can be reached.
*/
-int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
{
unsigned int hdiv, pdiv;
unsigned long hclk, fclk, armclk;
@@ -242,7 +240,7 @@ static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg,
return ret;
}
-struct s3c_cpufreq_info s3c2440_cpufreq_info = {
+static struct s3c_cpufreq_info s3c2440_cpufreq_info = {
.max = {
.fclk = 400000000,
.hclk = 133333333,
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 485088253358..a3dc192d21f9 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -217,7 +217,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
/* start the frequency change */
- cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs.freqs);
/* If hclk is staying the same, then we do not need to
* re-write the IO or the refresh timings whilst we are changing
@@ -261,7 +261,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
local_irq_restore(flags);
/* notify everyone we've done this */
- cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs.freqs, 0);
s3c_freq_dbg("%s: finished\n", __func__);
return 0;
@@ -355,11 +355,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
return -EINVAL;
}
-static unsigned int s3c_cpufreq_get(unsigned int cpu)
-{
- return clk_get_rate(clk_arm) / 1000;
-}
-
struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
{
struct clk *clk;
@@ -373,6 +368,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{
+ policy->clk = clk_arm;
return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
}
@@ -408,7 +404,7 @@ static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
{
suspend_pll.frequency = clk_get_rate(_clk_mpll);
suspend_pll.driver_data = __raw_readl(S3C2410_MPLLCON);
- suspend_freq = s3c_cpufreq_get(0) * 1000;
+ suspend_freq = clk_get_rate(clk_arm);
return 0;
}
@@ -448,9 +444,9 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
#endif
static struct cpufreq_driver s3c24xx_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.target = s3c_cpufreq_target,
- .get = s3c_cpufreq_get,
+ .get = cpufreq_generic_get,
.init = s3c_cpufreq_init,
.suspend = s3c_cpufreq_suspend,
.resume = s3c_cpufreq_resume,
@@ -509,7 +505,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
return 0;
}
-int __init s3c_cpufreq_auto_io(void)
+static int __init s3c_cpufreq_auto_io(void)
{
int ret;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 67e302eeefec..c4226de079ab 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -19,7 +19,6 @@
#include <linux/regulator/consumer.h>
#include <linux/module.h>
-static struct clk *armclk;
static struct regulator *vddarm;
static unsigned long regulator_latency;
@@ -54,14 +53,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
};
#endif
-static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
-{
- if (cpu != 0)
- return 0;
-
- return clk_get_rate(armclk) / 1000;
-}
-
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
@@ -69,7 +60,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int old_freq, new_freq;
int ret;
- old_freq = clk_get_rate(armclk) / 1000;
+ old_freq = clk_get_rate(policy->clk) / 1000;
new_freq = s3c64xx_freq_table[index].frequency;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
@@ -86,7 +77,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
}
#endif
- ret = clk_set_rate(armclk, new_freq * 1000);
+ ret = clk_set_rate(policy->clk, new_freq * 1000);
if (ret < 0) {
pr_err("Failed to set rate %dkHz: %d\n",
new_freq, ret);
@@ -101,7 +92,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
new_freq, ret);
- if (clk_set_rate(armclk, old_freq * 1000) < 0)
+ if (clk_set_rate(policy->clk, old_freq * 1000) < 0)
pr_err("Failed to restore original clock rate\n");
return ret;
@@ -110,7 +101,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
#endif
pr_debug("Set actual frequency %lukHz\n",
- clk_get_rate(armclk) / 1000);
+ clk_get_rate(policy->clk) / 1000);
return 0;
}
@@ -169,11 +160,11 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- armclk = clk_get(NULL, "armclk");
- if (IS_ERR(armclk)) {
+ policy->clk = clk_get(NULL, "armclk");
+ if (IS_ERR(policy->clk)) {
pr_err("Unable to obtain ARMCLK: %ld\n",
- PTR_ERR(armclk));
- return PTR_ERR(armclk);
+ PTR_ERR(policy->clk));
+ return PTR_ERR(policy->clk);
}
#ifdef CONFIG_REGULATOR
@@ -193,7 +184,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
unsigned long r;
/* Check for frequencies we can generate */
- r = clk_round_rate(armclk, freq->frequency * 1000);
+ r = clk_round_rate(policy->clk, freq->frequency * 1000);
r /= 1000;
if (r != freq->frequency) {
pr_debug("%dkHz unsupported by clock\n",
@@ -203,7 +194,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
/* If we have no regulator then assume startup
* frequency is the maximum we can support. */
- if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0))
+ if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000)
freq->frequency = CPUFREQ_ENTRY_INVALID;
freq++;
@@ -219,17 +210,17 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
pr_err("Failed to configure frequency table: %d\n",
ret);
regulator_put(vddarm);
- clk_put(armclk);
+ clk_put(policy->clk);
}
return ret;
}
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
- .flags = 0,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = s3c64xx_cpufreq_set_target,
- .get = s3c64xx_cpufreq_get_speed,
+ .get = cpufreq_generic_get,
.init = s3c64xx_cpufreq_driver_init,
.name = "s3c",
};
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index e3973dae28a7..72421534fff5 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -18,12 +18,10 @@
#include <linux/cpufreq.h>
#include <linux/reboot.h>
#include <linux/regulator/consumer.h>
-#include <linux/suspend.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
-static struct clk *cpu_clk;
static struct clk *dmc0_clk;
static struct clk *dmc1_clk;
static DEFINE_MUTEX(set_freq_lock);
@@ -164,14 +162,6 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
__raw_writel(tmp1, reg);
}
-static unsigned int s5pv210_getspeed(unsigned int cpu)
-{
- if (cpu)
- return 0;
-
- return clk_get_rate(cpu_clk) / 1000;
-}
-
static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned long reg;
@@ -193,7 +183,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
goto exit;
}
- old_freq = s5pv210_getspeed(0);
+ old_freq = policy->cur;
new_freq = s5pv210_freq_table[index].frequency;
/* Finding current running level index */
@@ -444,18 +434,6 @@ exit:
return ret;
}
-#ifdef CONFIG_PM
-static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
-static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
-{
- return 0;
-}
-#endif
-
static int check_mem_type(void __iomem *dmc_reg)
{
unsigned long val;
@@ -471,9 +449,9 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
unsigned long mem_type;
int ret;
- cpu_clk = clk_get(NULL, "armclk");
- if (IS_ERR(cpu_clk))
- return PTR_ERR(cpu_clk);
+ policy->clk = clk_get(NULL, "armclk");
+ if (IS_ERR(policy->clk))
+ return PTR_ERR(policy->clk);
dmc0_clk = clk_get(NULL, "sclk_dmc0");
if (IS_ERR(dmc0_clk)) {
@@ -511,41 +489,16 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
+ policy->suspend_freq = SLEEP_FREQ;
return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
out_dmc1:
clk_put(dmc0_clk);
out_dmc0:
- clk_put(cpu_clk);
+ clk_put(policy->clk);
return ret;
}
-static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- int ret;
-
- switch (event) {
- case PM_SUSPEND_PREPARE:
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
- if (ret < 0)
- return NOTIFY_BAD;
-
- /* Disable updation of cpu frequency */
- no_cpufreq_access = true;
- return NOTIFY_OK;
- case PM_POST_RESTORE:
- case PM_POST_SUSPEND:
- /* Enable updation of cpu frequency */
- no_cpufreq_access = false;
- cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
-
- return NOTIFY_OK;
- }
-
- return NOTIFY_DONE;
-}
-
static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -560,22 +513,18 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
}
static struct cpufreq_driver s5pv210_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = s5pv210_target,
- .get = s5pv210_getspeed,
+ .get = cpufreq_generic_get,
.init = s5pv210_cpu_init,
.name = "s5pv210",
#ifdef CONFIG_PM
- .suspend = s5pv210_cpufreq_suspend,
- .resume = s5pv210_cpufreq_resume,
+ .suspend = cpufreq_generic_suspend,
+ .resume = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
#endif
};
-static struct notifier_block s5pv210_cpufreq_notifier = {
- .notifier_call = s5pv210_cpufreq_notifier_event,
-};
-
static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
.notifier_call = s5pv210_cpufreq_reboot_notifier_event,
};
@@ -595,7 +544,6 @@ static int __init s5pv210_cpufreq_init(void)
return PTR_ERR(int_regulator);
}
- register_pm_notifier(&s5pv210_cpufreq_notifier);
register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
return cpufreq_register_driver(&s5pv210_driver);
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index 623da742f8e7..728eab77e8e0 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -201,7 +201,7 @@ static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver sa1100_driver __refdata = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1100_target,
.get = sa11x0_getspeed,
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 2c2b2e601d13..546376719d8f 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -312,7 +312,7 @@ static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1110_target,
.get = sa11x0_getspeed,
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 6adb354e359c..69371bf0886d 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -93,7 +93,6 @@ static struct cpufreq_driver sc520_freq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sc520_freq_target,
.init = sc520_freq_cpu_init,
- .exit = cpufreq_generic_exit,
.name = "sc520_freq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 387af12503a6..86628e22b2a3 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -68,10 +68,10 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = (freq + 500) / 1000;
freqs.flags = 0;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ cpufreq_freq_transition_begin(policy, &freqs);
set_cpus_allowed_ptr(current, &cpus_allowed);
clk_set_rate(cpuclk, freq);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
dev_dbg(dev, "set frequency %lu Hz\n", freq);
@@ -143,7 +143,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
unsigned int cpu = policy->cpu;
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
- cpufreq_frequency_table_put_attr(cpu);
clk_put(cpuclk);
return 0;
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 62aa23e219d4..b73feeb666f9 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -301,10 +301,8 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us2e_driver) {
- cpufreq_frequency_table_put_attr(policy->cpu);
+ if (cpufreq_us2e_driver)
us2e_freq_target(policy, 0);
- }
return 0;
}
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index 724ffbd7105d..9bb42ba50efa 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -156,10 +156,8 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us3_driver) {
- cpufreq_frequency_table_put_attr(policy->cpu);
+ if (cpufreq_us3_driver)
us3_freq_target(policy, 0);
- }
return 0;
}
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index d02ccd19c9c4..4cfdcff8a310 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -30,11 +31,6 @@ static struct {
u32 cnt;
} spear_cpufreq;
-static unsigned int spear_cpufreq_get(unsigned int cpu)
-{
- return clk_get_rate(spear_cpufreq.clk) / 1000;
-}
-
static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
{
struct clk *sys_pclk;
@@ -138,7 +134,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
}
newfreq = clk_round_rate(srcclk, newfreq * mult);
- if (newfreq < 0) {
+ if (newfreq <= 0) {
pr_err("clk_round_rate failed for cpu src clock\n");
return newfreq;
}
@@ -156,22 +152,22 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
static int spear_cpufreq_init(struct cpufreq_policy *policy)
{
+ policy->clk = spear_cpufreq.clk;
return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
spear_cpufreq.transition_latency);
}
static struct cpufreq_driver spear_cpufreq_driver = {
.name = "cpufreq-spear",
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = spear_cpufreq_target,
- .get = spear_cpufreq_get,
+ .get = cpufreq_generic_get,
.init = spear_cpufreq_init,
- .exit = cpufreq_generic_exit,
.attr = cpufreq_generic_attr,
};
-static int spear_cpufreq_driver_init(void)
+static int spear_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
const struct property *prop;
@@ -239,7 +235,15 @@ out_put_node:
of_node_put(np);
return ret;
}
-late_initcall(spear_cpufreq_driver_init);
+
+static struct platform_driver spear_cpufreq_platdrv = {
+ .driver = {
+ .name = "spear-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = spear_cpufreq_probe,
+};
+module_platform_driver(spear_cpufreq_platdrv);
MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>");
MODULE_DESCRIPTION("SPEAr CPUFreq driver");
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 4e1daca5ce3b..6723f0390f20 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -406,8 +406,6 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
if (!per_cpu(centrino_model, cpu))
return -ENODEV;
- cpufreq_frequency_table_put_attr(cpu);
-
per_cpu(centrino_model, cpu) = NULL;
return 0;
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 7639b2be2a90..394ac159312a 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -311,7 +311,6 @@ static struct cpufreq_driver speedstep_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = speedstep_target,
.init = speedstep_cpu_init,
- .exit = cpufreq_generic_exit,
.get = speedstep_get,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 0f5326d6f79f..db5d274dc13a 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -141,38 +141,6 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high)
}
/**
- * speedstep_get_state - set the SpeedStep state
- * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
- *
- */
-static int speedstep_get_state(void)
-{
- u32 function = GET_SPEEDSTEP_STATE;
- u32 result, state, edi, command, dummy;
-
- command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
-
- pr_debug("trying to determine current setting with command %x "
- "at port %x\n", command, smi_port);
-
- __asm__ __volatile__(
- "push %%ebp\n"
- "out %%al, (%%dx)\n"
- "pop %%ebp\n"
- : "=a" (result),
- "=b" (state), "=D" (edi),
- "=c" (dummy), "=d" (dummy), "=S" (dummy)
- : "a" (command), "b" (function), "c" (0),
- "d" (smi_port), "S" (0), "D" (0)
- );
-
- pr_debug("state is %x, result is %x\n", state, result);
-
- return state & 1;
-}
-
-
-/**
* speedstep_set_state - set the SpeedStep state
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
*
@@ -312,7 +280,6 @@ static struct cpufreq_driver speedstep_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = speedstep_target,
.init = speedstep_cpu_init,
- .exit = cpufreq_generic_exit,
.get = speedstep_get,
.resume = speedstep_resume,
.attr = cpufreq_generic_attr,
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index b7309c37033d..63f00598a251 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -26,7 +26,6 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/suspend.h>
static struct cpufreq_frequency_table freq_table[] = {
{ .frequency = 216000 },
@@ -47,21 +46,6 @@ static struct clk *pll_x_clk;
static struct clk *pll_p_clk;
static struct clk *emc_clk;
-static unsigned long target_cpu_speed[NUM_CPUS];
-static DEFINE_MUTEX(tegra_cpu_lock);
-static bool is_suspended;
-
-static unsigned int tegra_getspeed(unsigned int cpu)
-{
- unsigned long rate;
-
- if (cpu >= NUM_CPUS)
- return 0;
-
- rate = clk_get_rate(cpu_clk) / 1000;
- return rate;
-}
-
static int tegra_cpu_clk_set_rate(unsigned long rate)
{
int ret;
@@ -103,9 +87,6 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
{
int ret = 0;
- if (tegra_getspeed(0) == rate)
- return ret;
-
/*
* Vote on memory bus frequency based on cpu frequency
* This sets the minimum frequency, display or avp may request higher
@@ -125,60 +106,11 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
return ret;
}
-static unsigned long tegra_cpu_highest_speed(void)
-{
- unsigned long rate = 0;
- int i;
-
- for_each_online_cpu(i)
- rate = max(rate, target_cpu_speed[i]);
- return rate;
-}
-
static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int freq;
- int ret = 0;
-
- mutex_lock(&tegra_cpu_lock);
-
- if (is_suspended)
- goto out;
-
- freq = freq_table[index].frequency;
-
- target_cpu_speed[policy->cpu] = freq;
-
- ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed());
-
-out:
- mutex_unlock(&tegra_cpu_lock);
- return ret;
+ return tegra_update_cpu_speed(policy, freq_table[index].frequency);
}
-static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
- void *dummy)
-{
- mutex_lock(&tegra_cpu_lock);
- if (event == PM_SUSPEND_PREPARE) {
- struct cpufreq_policy *policy = cpufreq_cpu_get(0);
- is_suspended = true;
- pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
- freq_table[0].frequency);
- tegra_update_cpu_speed(policy, freq_table[0].frequency);
- cpufreq_cpu_put(policy);
- } else if (event == PM_POST_SUSPEND) {
- is_suspended = false;
- }
- mutex_unlock(&tegra_cpu_lock);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block tegra_cpu_pm_notifier = {
- .notifier_call = tegra_pm_notify,
-};
-
static int tegra_cpu_init(struct cpufreq_policy *policy)
{
int ret;
@@ -189,8 +121,6 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
clk_prepare_enable(emc_clk);
clk_prepare_enable(cpu_clk);
- target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu);
-
/* FIXME: what's the actual transition time? */
ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
if (ret) {
@@ -199,28 +129,30 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
return ret;
}
- if (policy->cpu == 0)
- register_pm_notifier(&tegra_cpu_pm_notifier);
-
+ policy->clk = cpu_clk;
+ policy->suspend_freq = freq_table[0].frequency;
return 0;
}
static int tegra_cpu_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
clk_disable_unprepare(cpu_clk);
clk_disable_unprepare(emc_clk);
return 0;
}
static struct cpufreq_driver tegra_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra_target,
- .get = tegra_getspeed,
+ .get = cpufreq_generic_get,
.init = tegra_cpu_init,
.exit = tegra_cpu_exit,
.name = "tegra",
.attr = cpufreq_generic_attr,
+#ifdef CONFIG_PM
+ .suspend = cpufreq_generic_suspend,
+#endif
};
static int __init tegra_cpufreq_init(void)
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 653ae2955b55..13be802b6170 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -33,42 +34,34 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy)
return 0;
}
-static unsigned int ucv2_getspeed(unsigned int cpu)
-{
- struct clk *mclk = clk_get(NULL, "MAIN_CLK");
-
- if (cpu)
- return 0;
- return clk_get_rate(mclk)/1000;
-}
-
static int ucv2_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
- unsigned int cur = ucv2_getspeed(0);
struct cpufreq_freqs freqs;
- struct clk *mclk = clk_get(NULL, "MAIN_CLK");
+ int ret;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
- if (!clk_set_rate(mclk, target_freq * 1000)) {
- freqs.old = cur;
- freqs.new = target_freq;
- }
+ cpufreq_freq_transition_begin(policy, &freqs);
+ ret = clk_set_rate(policy->mclk, target_freq * 1000);
+ cpufreq_freq_transition_end(policy, &freqs, ret);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return 0;
+ return ret;
}
static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
+
policy->min = policy->cpuinfo.min_freq = 250000;
policy->max = policy->cpuinfo.max_freq = 1000000;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->clk = clk_get(NULL, "MAIN_CLK");
+ if (IS_ERR(policy->clk))
+ return PTR_ERR(policy->clk);
return 0;
}
@@ -76,7 +69,7 @@ static struct cpufreq_driver ucv2_driver = {
.flags = CPUFREQ_STICKY,
.verify = ucv2_verify_speed,
.target = ucv2_target,
- .get = ucv2_getspeed,
+ .get = cpufreq_generic_get,
.init = ucv2_cpu_init,
.name = "UniCore-II",
};
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index b3fb81d7cf04..f04e25f6c98d 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -35,6 +35,11 @@ depends on ARM
source "drivers/cpuidle/Kconfig.arm"
endmenu
+menu "POWERPC CPU Idle Drivers"
+depends on PPC
+source "drivers/cpuidle/Kconfig.powerpc"
+endmenu
+
endif
config ARCH_NEEDS_CPU_IDLE_COUPLED
diff --git a/drivers/cpuidle/Kconfig.powerpc b/drivers/cpuidle/Kconfig.powerpc
new file mode 100644
index 000000000000..66c3a09574e9
--- /dev/null
+++ b/drivers/cpuidle/Kconfig.powerpc
@@ -0,0 +1,20 @@
+#
+# POWERPC CPU Idle Drivers
+#
+config PSERIES_CPUIDLE
+ bool "Cpuidle driver for pSeries platforms"
+ depends on CPU_IDLE
+ depends on PPC_PSERIES
+ default y
+ help
+ Select this option to enable processor idle state management
+ through cpuidle subsystem.
+
+config POWERNV_CPUIDLE
+ bool "Cpuidle driver for powernv platforms"
+ depends on CPU_IDLE
+ depends on PPC_POWERNV
+ default y
+ help
+ Select this option to enable processor idle state management
+ through cpuidle subsystem.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 527be28e5c1e..f71ae1b373c5 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -13,3 +13,8 @@ obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
+
+###############################################################################
+# POWERPC drivers
+obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
+obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index e952936418d0..cb6654bfad77 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -323,7 +323,7 @@ static void cpuidle_coupled_poke(int cpu)
struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
- __smp_call_function_single(cpu, csd, 0);
+ smp_call_function_single_async(cpu, csd);
}
/**
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
new file mode 100644
index 000000000000..719f6fb5b1c3
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -0,0 +1,256 @@
+/*
+ * cpuidle-powernv - idle state cpuidle driver.
+ * Adapted from drivers/cpuidle/cpuidle-pseries
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/clockchips.h>
+#include <linux/of.h>
+
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/runlatch.h>
+
+/* Flags and constants used in PowerNV platform */
+
+#define MAX_POWERNV_IDLE_STATES 8
+#define IDLE_USE_INST_NAP 0x00010000 /* Use nap instruction */
+#define IDLE_USE_INST_SLEEP 0x00020000 /* Use sleep instruction */
+
+struct cpuidle_driver powernv_idle_driver = {
+ .name = "powernv_idle",
+ .owner = THIS_MODULE,
+};
+
+static int max_idle_state;
+static struct cpuidle_state *cpuidle_state_table;
+
+static int snooze_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ local_irq_enable();
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ ppc64_runlatch_off();
+ while (!need_resched()) {
+ HMT_low();
+ HMT_very_low();
+ }
+
+ HMT_medium();
+ ppc64_runlatch_on();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ smp_mb();
+ return index;
+}
+
+static int nap_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ ppc64_runlatch_off();
+ power7_idle();
+ ppc64_runlatch_on();
+ return index;
+}
+
+static int fastsleep_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long old_lpcr = mfspr(SPRN_LPCR);
+ unsigned long new_lpcr;
+
+ if (unlikely(system_state < SYSTEM_RUNNING))
+ return index;
+
+ new_lpcr = old_lpcr;
+ new_lpcr &= ~(LPCR_MER | LPCR_PECE); /* lpcr[mer] must be 0 */
+
+ /* exit powersave upon external interrupt, but not decrementer
+ * interrupt.
+ */
+ new_lpcr |= LPCR_PECE0;
+
+ mtspr(SPRN_LPCR, new_lpcr);
+ power7_sleep();
+
+ mtspr(SPRN_LPCR, old_lpcr);
+
+ return index;
+}
+
+/*
+ * States for dedicated partition case.
+ */
+static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = {
+ { /* Snooze */
+ .name = "snooze",
+ .desc = "snooze",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 0,
+ .target_residency = 0,
+ .enter = &snooze_loop },
+};
+
+static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
+ unsigned long action, void *hcpu)
+{
+ int hotcpu = (unsigned long)hcpu;
+ struct cpuidle_device *dev =
+ per_cpu(cpuidle_devices, hotcpu);
+
+ if (dev && cpuidle_get_driver()) {
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ cpuidle_pause_and_lock();
+ cpuidle_enable_device(dev);
+ cpuidle_resume_and_unlock();
+ break;
+
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ cpuidle_pause_and_lock();
+ cpuidle_disable_device(dev);
+ cpuidle_resume_and_unlock();
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block setup_hotplug_notifier = {
+ .notifier_call = powernv_cpuidle_add_cpu_notifier,
+};
+
+/*
+ * powernv_cpuidle_driver_init()
+ */
+static int powernv_cpuidle_driver_init(void)
+{
+ int idle_state;
+ struct cpuidle_driver *drv = &powernv_idle_driver;
+
+ drv->state_count = 0;
+
+ for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
+ /* Is the state not enabled? */
+ if (cpuidle_state_table[idle_state].enter == NULL)
+ continue;
+
+ drv->states[drv->state_count] = /* structure copy */
+ cpuidle_state_table[idle_state];
+
+ drv->state_count += 1;
+ }
+
+ return 0;
+}
+
+static int powernv_add_idle_states(void)
+{
+ struct device_node *power_mgt;
+ struct property *prop;
+ int nr_idle_states = 1; /* Snooze */
+ int dt_idle_states;
+ u32 *flags;
+ int i;
+
+ /* Currently we have snooze statically defined */
+
+ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+ if (!power_mgt) {
+ pr_warn("opal: PowerMgmt Node not found\n");
+ return nr_idle_states;
+ }
+
+ prop = of_find_property(power_mgt, "ibm,cpu-idle-state-flags", NULL);
+ if (!prop) {
+ pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
+ return nr_idle_states;
+ }
+
+ dt_idle_states = prop->length / sizeof(u32);
+ flags = (u32 *) prop->value;
+
+ for (i = 0; i < dt_idle_states; i++) {
+
+ if (flags[i] & IDLE_USE_INST_NAP) {
+ /* Add NAP state */
+ strcpy(powernv_states[nr_idle_states].name, "Nap");
+ strcpy(powernv_states[nr_idle_states].desc, "Nap");
+ powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIME_VALID;
+ powernv_states[nr_idle_states].exit_latency = 10;
+ powernv_states[nr_idle_states].target_residency = 100;
+ powernv_states[nr_idle_states].enter = &nap_loop;
+ nr_idle_states++;
+ }
+
+ if (flags[i] & IDLE_USE_INST_SLEEP) {
+ /* Add FASTSLEEP state */
+ strcpy(powernv_states[nr_idle_states].name, "FastSleep");
+ strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
+ powernv_states[nr_idle_states].flags =
+ CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TIMER_STOP;
+ powernv_states[nr_idle_states].exit_latency = 300;
+ powernv_states[nr_idle_states].target_residency = 1000000;
+ powernv_states[nr_idle_states].enter = &fastsleep_loop;
+ nr_idle_states++;
+ }
+ }
+
+ return nr_idle_states;
+}
+
+/*
+ * powernv_idle_probe()
+ * Choose state table for shared versus dedicated partition
+ */
+static int powernv_idle_probe(void)
+{
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ return -ENODEV;
+
+ if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+ cpuidle_state_table = powernv_states;
+ /* Device tree can indicate more idle states */
+ max_idle_state = powernv_add_idle_states();
+ } else
+ return -ENODEV;
+
+ return 0;
+}
+
+static int __init powernv_processor_idle_init(void)
+{
+ int retval;
+
+ retval = powernv_idle_probe();
+ if (retval)
+ return retval;
+
+ powernv_cpuidle_driver_init();
+ retval = cpuidle_register(&powernv_idle_driver, NULL);
+ if (retval) {
+ printk(KERN_DEBUG "Registration of powernv driver failed.\n");
+ return retval;
+ }
+
+ register_cpu_notifier(&setup_hotplug_notifier);
+ printk(KERN_DEBUG "powernv_idle_driver registered\n");
+ return 0;
+}
+
+device_initcall(powernv_processor_idle_init);
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
new file mode 100644
index 000000000000..6f7b01956885
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -0,0 +1,273 @@
+/*
+ * cpuidle-pseries - idle state cpuidle driver.
+ * Adapted from drivers/idle/intel_idle.c and
+ * drivers/acpi/processor_idle.c
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+
+#include <asm/paca.h>
+#include <asm/reg.h>
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/runlatch.h>
+#include <asm/plpar_wrappers.h>
+
+struct cpuidle_driver pseries_idle_driver = {
+ .name = "pseries_idle",
+ .owner = THIS_MODULE,
+};
+
+static int max_idle_state;
+static struct cpuidle_state *cpuidle_state_table;
+
+static inline void idle_loop_prolog(unsigned long *in_purr)
+{
+ ppc64_runlatch_off();
+ *in_purr = mfspr(SPRN_PURR);
+ /*
+ * Indicate to the HV that we are idle. Now would be
+ * a good time to find other work to dispatch.
+ */
+ get_lppaca()->idle = 1;
+}
+
+static inline void idle_loop_epilog(unsigned long in_purr)
+{
+ u64 wait_cycles;
+
+ wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
+ wait_cycles += mfspr(SPRN_PURR) - in_purr;
+ get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
+ get_lppaca()->idle = 0;
+
+ if (irqs_disabled())
+ local_irq_enable();
+ ppc64_runlatch_on();
+}
+
+static int snooze_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long in_purr;
+
+ idle_loop_prolog(&in_purr);
+ local_irq_enable();
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ while (!need_resched()) {
+ HMT_low();
+ HMT_very_low();
+ }
+
+ HMT_medium();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ smp_mb();
+
+ idle_loop_epilog(in_purr);
+
+ return index;
+}
+
+static void check_and_cede_processor(void)
+{
+ /*
+ * Ensure our interrupt state is properly tracked,
+ * also checks if no interrupt has occurred while we
+ * were soft-disabled
+ */
+ if (prep_irq_for_idle()) {
+ cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Ensure that H_CEDE returns with IRQs on */
+ if (WARN_ON(!(mfmsr() & MSR_EE)))
+ __hard_irq_enable();
+#endif
+ }
+}
+
+static int dedicated_cede_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long in_purr;
+
+ idle_loop_prolog(&in_purr);
+ get_lppaca()->donate_dedicated_cpu = 1;
+
+ HMT_medium();
+ check_and_cede_processor();
+
+ get_lppaca()->donate_dedicated_cpu = 0;
+
+ idle_loop_epilog(in_purr);
+
+ return index;
+}
+
+static int shared_cede_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long in_purr;
+
+ idle_loop_prolog(&in_purr);
+
+ /*
+ * Yield the processor to the hypervisor. We return if
+ * an external interrupt occurs (which are driven prior
+ * to returning here) or if a prod occurs from another
+ * processor. When returning here, external interrupts
+ * are enabled.
+ */
+ check_and_cede_processor();
+
+ idle_loop_epilog(in_purr);
+
+ return index;
+}
+
+/*
+ * States for dedicated partition case.
+ */
+static struct cpuidle_state dedicated_states[] = {
+ { /* Snooze */
+ .name = "snooze",
+ .desc = "snooze",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 0,
+ .target_residency = 0,
+ .enter = &snooze_loop },
+ { /* CEDE */
+ .name = "CEDE",
+ .desc = "CEDE",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 100,
+ .enter = &dedicated_cede_loop },
+};
+
+/*
+ * States for shared partition case.
+ */
+static struct cpuidle_state shared_states[] = {
+ { /* Shared Cede */
+ .name = "Shared Cede",
+ .desc = "Shared Cede",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 0,
+ .target_residency = 0,
+ .enter = &shared_cede_loop },
+};
+
+static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
+ unsigned long action, void *hcpu)
+{
+ int hotcpu = (unsigned long)hcpu;
+ struct cpuidle_device *dev =
+ per_cpu(cpuidle_devices, hotcpu);
+
+ if (dev && cpuidle_get_driver()) {
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ cpuidle_pause_and_lock();
+ cpuidle_enable_device(dev);
+ cpuidle_resume_and_unlock();
+ break;
+
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ cpuidle_pause_and_lock();
+ cpuidle_disable_device(dev);
+ cpuidle_resume_and_unlock();
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block setup_hotplug_notifier = {
+ .notifier_call = pseries_cpuidle_add_cpu_notifier,
+};
+
+/*
+ * pseries_cpuidle_driver_init()
+ */
+static int pseries_cpuidle_driver_init(void)
+{
+ int idle_state;
+ struct cpuidle_driver *drv = &pseries_idle_driver;
+
+ drv->state_count = 0;
+
+ for (idle_state = 0; idle_state < max_idle_state; ++idle_state) {
+ /* Is the state not enabled? */
+ if (cpuidle_state_table[idle_state].enter == NULL)
+ continue;
+
+ drv->states[drv->state_count] = /* structure copy */
+ cpuidle_state_table[idle_state];
+
+ drv->state_count += 1;
+ }
+
+ return 0;
+}
+
+/*
+ * pseries_idle_probe()
+ * Choose state table for shared versus dedicated partition
+ */
+static int pseries_idle_probe(void)
+{
+
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ return -ENODEV;
+
+ if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ if (lppaca_shared_proc(get_lppaca())) {
+ cpuidle_state_table = shared_states;
+ max_idle_state = ARRAY_SIZE(shared_states);
+ } else {
+ cpuidle_state_table = dedicated_states;
+ max_idle_state = ARRAY_SIZE(dedicated_states);
+ }
+ } else
+ return -ENODEV;
+
+ return 0;
+}
+
+static int __init pseries_processor_idle_init(void)
+{
+ int retval;
+
+ retval = pseries_idle_probe();
+ if (retval)
+ return retval;
+
+ pseries_cpuidle_driver_init();
+ retval = cpuidle_register(&pseries_idle_driver, NULL);
+ if (retval) {
+ printk(KERN_DEBUG "Registration of pseries driver failed.\n");
+ return retval;
+ }
+
+ register_cpu_notifier(&setup_hotplug_notifier);
+ printk(KERN_DEBUG "pseries_idle_driver registered\n");
+ return 0;
+}
+
+device_initcall(pseries_processor_idle_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index a55e68f2cfc8..8236746e46bb 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -65,6 +65,26 @@ int cpuidle_play_dead(void)
}
/**
+ * cpuidle_enabled - check if the cpuidle framework is ready
+ * @dev: cpuidle device for this cpu
+ * @drv: cpuidle driver for this cpu
+ *
+ * Return 0 on success, otherwise:
+ * -NODEV : the cpuidle framework is not available
+ * -EBUSY : the cpuidle framework is not initialized
+ */
+int cpuidle_enabled(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+{
+ if (off || !initialized)
+ return -ENODEV;
+
+ if (!drv || !dev || !dev->enabled)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
* cpuidle_enter_state - enter the state and update stats
* @dev: cpuidle device for this cpu
* @drv: cpuidle driver for this cpu
@@ -85,7 +105,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_end = ktime_get();
- local_irq_enable();
+ if (!cpuidle_state_is_coupled(dev, drv, entered_state))
+ local_irq_enable();
diff = ktime_to_us(ktime_sub(time_end, time_start));
if (diff > INT_MAX)
@@ -108,61 +129,48 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
}
/**
- * cpuidle_idle_call - the main idle loop
+ * cpuidle_select - ask the cpuidle framework to choose an idle state
*
- * NOTE: no locks or semaphores should be used here
- * return non-zero on failure
+ * @drv: the cpuidle driver
+ * @dev: the cpuidle device
+ *
+ * Returns the index of the idle state.
*/
-int cpuidle_idle_call(void)
+int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
- struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
- struct cpuidle_driver *drv;
- int next_state, entered_state;
- bool broadcast;
-
- if (off || !initialized)
- return -ENODEV;
-
- /* check if the device is ready */
- if (!dev || !dev->enabled)
- return -EBUSY;
-
- drv = cpuidle_get_cpu_driver(dev);
-
- /* ask the governor for the next state */
- next_state = cpuidle_curr_governor->select(drv, dev);
- if (need_resched()) {
- dev->last_residency = 0;
- /* give the governor an opportunity to reflect on the outcome */
- if (cpuidle_curr_governor->reflect)
- cpuidle_curr_governor->reflect(dev, next_state);
- local_irq_enable();
- return 0;
- }
-
- trace_cpu_idle_rcuidle(next_state, dev->cpu);
-
- broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
-
- if (broadcast)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
-
- if (cpuidle_state_is_coupled(dev, drv, next_state))
- entered_state = cpuidle_enter_state_coupled(dev, drv,
- next_state);
- else
- entered_state = cpuidle_enter_state(dev, drv, next_state);
-
- if (broadcast)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+ return cpuidle_curr_governor->select(drv, dev);
+}
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
+/**
+ * cpuidle_enter - enter into the specified idle state
+ *
+ * @drv: the cpuidle driver tied with the cpu
+ * @dev: the cpuidle device
+ * @index: the index in the idle state table
+ *
+ * Returns the index in the idle state, < 0 in case of error.
+ * The error code depends on the backend driver
+ */
+int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ int index)
+{
+ if (cpuidle_state_is_coupled(dev, drv, index))
+ return cpuidle_enter_state_coupled(dev, drv, index);
+ return cpuidle_enter_state(dev, drv, index);
+}
- /* give the governor an opportunity to reflect on the outcome */
+/**
+ * cpuidle_reflect - tell the underlying governor what was the state
+ * we were in
+ *
+ * @dev : the cpuidle device
+ * @index: the index in the idle state table
+ *
+ */
+void cpuidle_reflect(struct cpuidle_device *dev, int index)
+{
if (cpuidle_curr_governor->reflect)
- cpuidle_curr_governor->reflect(dev, entered_state);
-
- return 0;
+ cpuidle_curr_governor->reflect(dev, index);
}
/**
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 06dbe7c86199..136d6a283e0a 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -209,7 +209,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
state->exit_latency = 0;
state->target_residency = 0;
state->power_usage = -1;
- state->flags = 0;
+ state->flags = CPUIDLE_FLAG_TIME_VALID;
state->enter = poll_idle;
state->disabled = false;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index cf7f2f0e4ef5..71b523293354 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -122,9 +122,8 @@ struct menu_device {
int last_state_idx;
int needs_update;
- unsigned int expected_us;
+ unsigned int next_timer_us;
unsigned int predicted_us;
- unsigned int exit_us;
unsigned int bucket;
unsigned int correction_factor[BUCKETS];
unsigned int intervals[INTERVALS];
@@ -257,7 +256,7 @@ again:
stddev = int_sqrt(stddev);
if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
|| stddev <= 20) {
- if (data->expected_us > avg)
+ if (data->next_timer_us > avg)
data->predicted_us = avg;
return;
}
@@ -289,7 +288,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i;
- int multiplier;
+ unsigned int interactivity_req;
struct timespec t;
if (data->needs_update) {
@@ -298,7 +297,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
data->last_state_idx = 0;
- data->exit_us = 0;
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0))
@@ -306,13 +304,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
/* determine the expected residency time, round up */
t = ktime_to_timespec(tick_nohz_get_sleep_length());
- data->expected_us =
+ data->next_timer_us =
t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
- data->bucket = which_bucket(data->expected_us);
-
- multiplier = performance_multiplier();
+ data->bucket = which_bucket(data->next_timer_us);
/*
* if the correction factor is 0 (eg first time init or cpu hotplug
@@ -326,17 +322,26 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* operands are 32 bits.
* Make sure to round up for half microseconds.
*/
- data->predicted_us = div_round64((uint64_t)data->expected_us *
+ data->predicted_us = div_round64((uint64_t)data->next_timer_us *
data->correction_factor[data->bucket],
RESOLUTION * DECAY);
get_typical_interval(data);
/*
+ * Performance multiplier defines a minimum predicted idle
+ * duration / latency ratio. Adjust the latency limit if
+ * necessary.
+ */
+ interactivity_req = data->predicted_us / performance_multiplier();
+ if (latency_req > interactivity_req)
+ latency_req = interactivity_req;
+
+ /*
* We want to default to C1 (hlt), not to busy polling
* unless the timer is happening really really soon.
*/
- if (data->expected_us > 5 &&
+ if (data->next_timer_us > 5 &&
!drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
@@ -355,11 +360,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
continue;
if (s->exit_latency > latency_req)
continue;
- if (s->exit_latency * multiplier > data->predicted_us)
- continue;
data->last_state_idx = i;
- data->exit_us = s->exit_latency;
}
return data->last_state_idx;
@@ -390,36 +392,47 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int last_idx = data->last_state_idx;
- unsigned int last_idle_us = cpuidle_get_last_residency(dev);
struct cpuidle_state *target = &drv->states[last_idx];
unsigned int measured_us;
unsigned int new_factor;
/*
- * Ugh, this idle state doesn't support residency measurements, so we
- * are basically lost in the dark. As a compromise, assume we slept
- * for the whole expected time.
+ * Try to figure out how much time passed between entry to low
+ * power state and occurrence of the wakeup event.
+ *
+ * If the entered idle state didn't support residency measurements,
+ * we are basically lost in the dark how much time passed.
+ * As a compromise, assume we slept for the whole expected time.
+ *
+ * Any measured amount of time will include the exit latency.
+ * Since we are interested in when the wakeup begun, not when it
+ * was completed, we must substract the exit latency. However, if
+ * the measured amount of time is less than the exit latency,
+ * assume the state was never reached and the exit latency is 0.
*/
- if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
- last_idle_us = data->expected_us;
+ if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) {
+ /* Use timer value as is */
+ measured_us = data->next_timer_us;
+ } else {
+ /* Use measured value */
+ measured_us = cpuidle_get_last_residency(dev);
- measured_us = last_idle_us;
-
- /*
- * We correct for the exit latency; we are assuming here that the
- * exit latency happens after the event that we're interested in.
- */
- if (measured_us > data->exit_us)
- measured_us -= data->exit_us;
+ /* Deduct exit latency */
+ if (measured_us > target->exit_latency)
+ measured_us -= target->exit_latency;
+ /* Make sure our coefficients do not exceed unity */
+ if (measured_us > data->next_timer_us)
+ measured_us = data->next_timer_us;
+ }
/* Update our correction ratio */
new_factor = data->correction_factor[data->bucket];
new_factor -= new_factor / DECAY;
- if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
- new_factor += RESOLUTION * measured_us / data->expected_us;
+ if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
+ new_factor += RESOLUTION * measured_us / data->next_timer_us;
else
/*
* we were idle so long that we count it as a perfect
@@ -439,7 +452,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
/* update the repeating-pattern data */
- data->intervals[data->interval_ptr++] = last_idle_us;
+ data->intervals[data->interval_ptr++] = measured_us;
if (data->interval_ptr >= INTERVALS)
data->interval_ptr = 0;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f4fd837bcb82..13857f5d28f7 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -289,16 +289,6 @@ config CRYPTO_DEV_SAHARA
This option enables support for the SAHARA HW crypto accelerator
found in some Freescale i.MX chips.
-config CRYPTO_DEV_DCP
- tristate "Support for the DCP engine"
- depends on ARCH_MXS && OF
- select CRYPTO_BLKCIPHER
- select CRYPTO_AES
- select CRYPTO_CBC
- help
- This options enables support for the hardware crypto-acceleration
- capabilities of the DCP co-processor
-
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210 crypto accelerator"
depends on ARCH_S5PV210
@@ -399,4 +389,33 @@ config CRYPTO_DEV_ATMEL_SHA
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
+config CRYPTO_DEV_CCP
+ bool "Support for AMD Cryptographic Coprocessor"
+ depends on X86 && PCI
+ default n
+ help
+ The AMD Cryptographic Coprocessor provides hardware support
+ for encryption, hashing and related operations.
+
+if CRYPTO_DEV_CCP
+ source "drivers/crypto/ccp/Kconfig"
+endif
+
+config CRYPTO_DEV_MXS_DCP
+ tristate "Support for Freescale MXS DCP"
+ depends on ARCH_MXS
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_ALGAPI
+ help
+ The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB
+ co-processor on the die.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mxs-dcp.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index b4946ddd2550..0bc6aa0a54d7 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,24 +1,25 @@
-obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
-obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
+obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
-obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
-n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
-obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
-obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
-obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
+obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
+obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
+n2_crypto-y := n2_core.o n2_asm.o
+obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
+obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
+obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
-obj-$(CONFIG_CRYPTO_DEV_DCP) += dcp.o
+obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
+obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
+obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
-obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
-obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
-obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
-obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
-obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index efaf6302405f..37f9cc98ba17 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -724,7 +724,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
crypto4xx_destroy_pdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_sdr(core_dev->dev);
- dev_set_drvdata(core_dev->device, NULL);
iounmap(core_dev->dev->ce_base);
kfree(core_dev->dev);
kfree(core_dev);
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index c1efd910d97b..d7c9e317423c 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
@@ -39,6 +40,7 @@
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <linux/platform_data/crypto-atmel.h>
+#include <dt-bindings/dma/at91.h>
#include "atmel-aes-regs.h"
#define CFB8_BLOCK_SIZE 1
@@ -747,59 +749,50 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
struct crypto_platform_data *pdata)
{
int err = -ENOMEM;
- dma_cap_mask_t mask_in, mask_out;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Try to grab 2 DMA channels */
+ dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
+ atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
+ if (!dd->dma_lch_in.chan)
+ goto err_dma_in;
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ AES_IDATAR(0);
+ dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
+ atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
+ if (!dd->dma_lch_out.chan)
+ goto err_dma_out;
+
+ dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
+ dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
+ AES_ODATAR(0);
+ dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.device_fc = false;
- if (pdata && pdata->dma_slave->txdata.dma_dev &&
- pdata->dma_slave->rxdata.dma_dev) {
-
- /* Try to grab 2 DMA channels */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
-
- dd->dma_lch_in.chan = dma_request_channel(mask_in,
- atmel_aes_filter, &pdata->dma_slave->rxdata);
-
- if (!dd->dma_lch_in.chan)
- goto err_dma_in;
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
- dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
- AES_IDATAR(0);
- dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
- dd->dma_lch_in.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.device_fc = false;
-
- dma_cap_zero(mask_out);
- dma_cap_set(DMA_SLAVE, mask_out);
- dd->dma_lch_out.chan = dma_request_channel(mask_out,
- atmel_aes_filter, &pdata->dma_slave->txdata);
-
- if (!dd->dma_lch_out.chan)
- goto err_dma_out;
-
- dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
- dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
- AES_ODATAR(0);
- dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
- dd->dma_lch_out.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.device_fc = false;
-
- return 0;
- } else {
- return -ENODEV;
- }
+ return 0;
err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
+ dev_warn(dd->dev, "no DMA channel available\n");
return err;
}
@@ -1261,6 +1254,47 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
}
}
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_aes_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g46-aes" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
+
+static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct crypto_platform_data *pdata;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->dma_slave = devm_kzalloc(&pdev->dev,
+ sizeof(*(pdata->dma_slave)),
+ GFP_KERNEL);
+ if (!pdata->dma_slave) {
+ dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ devm_kfree(&pdev->dev, pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pdata;
+}
+#else
+static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
@@ -1272,6 +1306,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (!pdata) {
+ pdata = atmel_aes_of_init(pdev);
+ if (IS_ERR(pdata)) {
+ err = PTR_ERR(pdata);
+ goto aes_dd_err;
+ }
+ }
+
+ if (!pdata->dma_slave) {
err = -ENXIO;
goto aes_dd_err;
}
@@ -1358,7 +1400,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (err)
goto err_algs;
- dev_info(dev, "Atmel AES\n");
+ dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
+ dma_chan_name(aes_dd->dma_lch_in.chan),
+ dma_chan_name(aes_dd->dma_lch_out.chan));
return 0;
@@ -1424,6 +1468,7 @@ static struct platform_driver atmel_aes_driver = {
.driver = {
.name = "atmel_aes",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_aes_dt_ids),
},
};
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index eaed8bf183bc..0618be06b9fb 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
@@ -1263,32 +1264,29 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
int err = -ENOMEM;
dma_cap_mask_t mask_in;
- if (pdata && pdata->dma_slave->rxdata.dma_dev) {
- /* Try to grab DMA channel */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
+ /* Try to grab DMA channel */
+ dma_cap_zero(mask_in);
+ dma_cap_set(DMA_SLAVE, mask_in);
- dd->dma_lch_in.chan = dma_request_channel(mask_in,
- atmel_sha_filter, &pdata->dma_slave->rxdata);
-
- if (!dd->dma_lch_in.chan)
- return err;
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
- dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
- SHA_REG_DIN(0);
- dd->dma_lch_in.dma_conf.src_maxburst = 1;
- dd->dma_lch_in.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.dst_maxburst = 1;
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.device_fc = false;
-
- return 0;
+ dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
+ atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
+ if (!dd->dma_lch_in.chan) {
+ dev_warn(dd->dev, "no DMA channel available\n");
+ return err;
}
- return -ENODEV;
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ SHA_REG_DIN(0);
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ return 0;
}
static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
@@ -1326,6 +1324,48 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
}
}
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_sha_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g46-sha" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
+
+static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct crypto_platform_data *pdata;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->dma_slave = devm_kzalloc(&pdev->dev,
+ sizeof(*(pdata->dma_slave)),
+ GFP_KERNEL);
+ if (!pdata->dma_slave) {
+ dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ devm_kfree(&pdev->dev, pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pdata;
+}
+#else /* CONFIG_OF */
+static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
@@ -1402,13 +1442,23 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (sha_dd->caps.has_dma) {
pdata = pdev->dev.platform_data;
if (!pdata) {
- dev_err(&pdev->dev, "platform data not available\n");
+ pdata = atmel_sha_of_init(pdev);
+ if (IS_ERR(pdata)) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ err = PTR_ERR(pdata);
+ goto err_pdata;
+ }
+ }
+ if (!pdata->dma_slave) {
err = -ENXIO;
goto err_pdata;
}
err = atmel_sha_dma_init(sha_dd, pdata);
if (err)
goto err_sha_dma;
+
+ dev_info(dev, "using %s for DMA transfers\n",
+ dma_chan_name(sha_dd->dma_lch_in.chan));
}
spin_lock(&atmel_sha.lock);
@@ -1419,7 +1469,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (err)
goto err_algs;
- dev_info(dev, "Atmel SHA1/SHA256\n");
+ dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
+ sha_dd->caps.has_sha224 ? "/SHA224" : "",
+ sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
return 0;
@@ -1483,6 +1535,7 @@ static struct platform_driver atmel_sha_driver = {
.driver = {
.name = "atmel_sha",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_sha_dt_ids),
},
};
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 4a99564a08e6..6cde5b530c69 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
@@ -716,59 +717,50 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
struct crypto_platform_data *pdata)
{
int err = -ENOMEM;
- dma_cap_mask_t mask_in, mask_out;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Try to grab 2 DMA channels */
+ dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
+ atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
+ if (!dd->dma_lch_in.chan)
+ goto err_dma_in;
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ TDES_IDATA1R;
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
+ atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
+ if (!dd->dma_lch_out.chan)
+ goto err_dma_out;
+
+ dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
+ dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
+ TDES_ODATA1R;
+ dd->dma_lch_out.dma_conf.src_maxburst = 1;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_out.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.device_fc = false;
- if (pdata && pdata->dma_slave->txdata.dma_dev &&
- pdata->dma_slave->rxdata.dma_dev) {
-
- /* Try to grab 2 DMA channels */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
-
- dd->dma_lch_in.chan = dma_request_channel(mask_in,
- atmel_tdes_filter, &pdata->dma_slave->rxdata);
-
- if (!dd->dma_lch_in.chan)
- goto err_dma_in;
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
- dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
- TDES_IDATA1R;
- dd->dma_lch_in.dma_conf.src_maxburst = 1;
- dd->dma_lch_in.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.dst_maxburst = 1;
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_in.dma_conf.device_fc = false;
-
- dma_cap_zero(mask_out);
- dma_cap_set(DMA_SLAVE, mask_out);
- dd->dma_lch_out.chan = dma_request_channel(mask_out,
- atmel_tdes_filter, &pdata->dma_slave->txdata);
-
- if (!dd->dma_lch_out.chan)
- goto err_dma_out;
-
- dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
- dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
- TDES_ODATA1R;
- dd->dma_lch_out.dma_conf.src_maxburst = 1;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.dst_maxburst = 1;
- dd->dma_lch_out.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.device_fc = false;
-
- return 0;
- } else {
- return -ENODEV;
- }
+ return 0;
err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
+ dev_warn(dd->dev, "no DMA channel available\n");
return err;
}
@@ -1317,6 +1309,47 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
}
}
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_tdes_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g46-tdes" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
+
+static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct crypto_platform_data *pdata;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->dma_slave = devm_kzalloc(&pdev->dev,
+ sizeof(*(pdata->dma_slave)),
+ GFP_KERNEL);
+ if (!pdata->dma_slave) {
+ dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ devm_kfree(&pdev->dev, pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pdata;
+}
+#else /* CONFIG_OF */
+static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
@@ -1399,13 +1432,24 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (tdes_dd->caps.has_dma) {
pdata = pdev->dev.platform_data;
if (!pdata) {
- dev_err(&pdev->dev, "platform data not available\n");
+ pdata = atmel_tdes_of_init(pdev);
+ if (IS_ERR(pdata)) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ err = PTR_ERR(pdata);
+ goto err_pdata;
+ }
+ }
+ if (!pdata->dma_slave) {
err = -ENXIO;
goto err_pdata;
}
err = atmel_tdes_dma_init(tdes_dd, pdata);
if (err)
goto err_tdes_dma;
+
+ dev_info(dev, "using %s, %s for DMA transfers\n",
+ dma_chan_name(tdes_dd->dma_lch_in.chan),
+ dma_chan_name(tdes_dd->dma_lch_out.chan));
}
spin_lock(&atmel_tdes.lock);
@@ -1487,6 +1531,7 @@ static struct platform_driver atmel_tdes_driver = {
.driver = {
.name = "atmel_tdes",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_tdes_dt_ids),
},
};
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4cf5dec826e1..b71f2fd749df 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -467,24 +467,10 @@ static int aead_setkey(struct crypto_aead *aead,
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- unsigned int authkeylen;
- unsigned int enckeylen;
+ struct crypto_authenc_keys keys;
int ret = 0;
- param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < enckeylen)
- goto badkey;
-
- authkeylen = keylen - enckeylen;
-
- if (keylen > CAAM_MAX_KEY_SIZE)
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
/* Pick class 2 key length from algorithm submask */
@@ -492,25 +478,29 @@ static int aead_setkey(struct crypto_aead *aead,
OP_ALG_ALGSEL_SHIFT] * 2;
ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+ if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
#ifdef DEBUG
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
- keylen, enckeylen, authkeylen);
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
ctx->split_key_len, ctx->split_key_pad_len);
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ret = gen_split_aead_key(ctx, key, authkeylen);
+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
if (ret) {
goto badkey;
}
/* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
+ memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
- enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
@@ -518,15 +508,15 @@ static int aead_setkey(struct crypto_aead *aead,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len + enckeylen, 1);
+ ctx->split_key_pad_len + keys.enckeylen, 1);
#endif
- ctx->enckeylen = enckeylen;
+ ctx->enckeylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
if (ret) {
dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
- enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, DMA_TO_DEVICE);
}
return ret;
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
new file mode 100644
index 000000000000..7639ffc36c68
--- /dev/null
+++ b/drivers/crypto/ccp/Kconfig
@@ -0,0 +1,24 @@
+config CRYPTO_DEV_CCP_DD
+ tristate "Cryptographic Coprocessor device driver"
+ depends on CRYPTO_DEV_CCP
+ default m
+ select HW_RANDOM
+ help
+ Provides the interface to use the AMD Cryptographic Coprocessor
+ which can be used to accelerate or offload encryption operations
+ such as SHA, AES and more. If you choose 'M' here, this module
+ will be called ccp.
+
+config CRYPTO_DEV_CCP_CRYPTO
+ tristate "Encryption and hashing acceleration support"
+ depends on CRYPTO_DEV_CCP_DD
+ default m
+ select CRYPTO_ALGAPI
+ select CRYPTO_HASH
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ help
+ Support for using the cryptographic API with the AMD Cryptographic
+ Coprocessor. This module supports acceleration and offload of SHA
+ and AES algorithms. If you choose 'M' here, this module will be
+ called ccp_crypto.
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 000000000000..d3505a018720
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
+ccp-objs := ccp-dev.o ccp-ops.o
+ccp-objs += ccp-pci.o
+
+obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
+ccp-crypto-objs := ccp-crypto-main.o \
+ ccp-crypto-aes.o \
+ ccp-crypto-aes-cmac.o \
+ ccp-crypto-aes-xts.o \
+ ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
new file mode 100644
index 000000000000..8e162ad82085
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -0,0 +1,365 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+
+static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
+ int ret)
+{
+ struct ahash_request *req = ahash_request_cast(async_req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ unsigned int digest_size = crypto_ahash_digestsize(tfm);
+
+ if (ret)
+ goto e_free;
+
+ if (rctx->hash_rem) {
+ /* Save remaining data to buffer */
+ unsigned int offset = rctx->nbytes - rctx->hash_rem;
+ scatterwalk_map_and_copy(rctx->buf, rctx->src,
+ offset, rctx->hash_rem, 0);
+ rctx->buf_count = rctx->hash_rem;
+ } else
+ rctx->buf_count = 0;
+
+ /* Update result area if supplied */
+ if (req->result)
+ memcpy(req->result, rctx->iv, digest_size);
+
+e_free:
+ sg_free_table(&rctx->data_sg);
+
+ return ret;
+}
+
+static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
+ unsigned int final)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct scatterlist *sg, *cmac_key_sg = NULL;
+ unsigned int block_size =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int need_pad, sg_count;
+ gfp_t gfp;
+ u64 len;
+ int ret;
+
+ if (!ctx->u.aes.key_len)
+ return -EINVAL;
+
+ if (nbytes)
+ rctx->null_msg = 0;
+
+ len = (u64)rctx->buf_count + (u64)nbytes;
+
+ if (!final && (len <= block_size)) {
+ scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
+ 0, nbytes, 0);
+ rctx->buf_count += nbytes;
+
+ return 0;
+ }
+
+ rctx->src = req->src;
+ rctx->nbytes = nbytes;
+
+ rctx->final = final;
+ rctx->hash_rem = final ? 0 : len & (block_size - 1);
+ rctx->hash_cnt = len - rctx->hash_rem;
+ if (!final && !rctx->hash_rem) {
+ /* CCP can't do zero length final, so keep some data around */
+ rctx->hash_cnt -= block_size;
+ rctx->hash_rem = block_size;
+ }
+
+ if (final && (rctx->null_msg || (len & (block_size - 1))))
+ need_pad = 1;
+ else
+ need_pad = 0;
+
+ sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv));
+
+ /* Build the data scatterlist table - allocate enough entries for all
+ * possible data pieces (buffer, input data, padding)
+ */
+ sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
+ gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+ ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
+ if (ret)
+ return ret;
+
+ sg = NULL;
+ if (rctx->buf_count) {
+ sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
+ sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
+ }
+
+ if (nbytes)
+ sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
+
+ if (need_pad) {
+ int pad_length = block_size - (len & (block_size - 1));
+
+ rctx->hash_cnt += pad_length;
+
+ memset(rctx->pad, 0, sizeof(rctx->pad));
+ rctx->pad[0] = 0x80;
+ sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
+ sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
+ }
+ if (sg) {
+ sg_mark_end(sg);
+ sg = rctx->data_sg.sgl;
+ }
+
+ /* Initialize the K1/K2 scatterlist */
+ if (final)
+ cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg
+ : &ctx->u.aes.k1_sg;
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_AES;
+ rctx->cmd.u.aes.type = ctx->u.aes.type;
+ rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+ rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT;
+ rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
+ rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
+ rctx->cmd.u.aes.iv = &rctx->iv_sg;
+ rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE;
+ rctx->cmd.u.aes.src = sg;
+ rctx->cmd.u.aes.src_len = rctx->hash_cnt;
+ rctx->cmd.u.aes.dst = NULL;
+ rctx->cmd.u.aes.cmac_key = cmac_key_sg;
+ rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len;
+ rctx->cmd.u.aes.cmac_final = final;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_aes_cmac_init(struct ahash_request *req)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+
+ memset(rctx, 0, sizeof(*rctx));
+
+ rctx->null_msg = 1;
+
+ return 0;
+}
+
+static int ccp_aes_cmac_update(struct ahash_request *req)
+{
+ return ccp_do_cmac_update(req, req->nbytes, 0);
+}
+
+static int ccp_aes_cmac_final(struct ahash_request *req)
+{
+ return ccp_do_cmac_update(req, 0, 1);
+}
+
+static int ccp_aes_cmac_finup(struct ahash_request *req)
+{
+ return ccp_do_cmac_update(req, req->nbytes, 1);
+}
+
+static int ccp_aes_cmac_digest(struct ahash_request *req)
+{
+ int ret;
+
+ ret = ccp_aes_cmac_init(req);
+ if (ret)
+ return ret;
+
+ return ccp_aes_cmac_finup(req);
+}
+
+static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct ccp_crypto_ahash_alg *alg =
+ ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
+ u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
+ u64 rb_hi = 0x00, rb_lo = 0x87;
+ __be64 *gk;
+ int ret;
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ ctx->u.aes.type = CCP_AES_TYPE_128;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->u.aes.type = CCP_AES_TYPE_192;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->u.aes.type = CCP_AES_TYPE_256;
+ break;
+ default:
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->u.aes.mode = alg->mode;
+
+ /* Set to zero until complete */
+ ctx->u.aes.key_len = 0;
+
+ /* Set the key for the AES cipher used to generate the keys */
+ ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len);
+ if (ret)
+ return ret;
+
+ /* Encrypt a block of zeroes - use key area in context */
+ memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
+ crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key,
+ ctx->u.aes.key);
+
+ /* Generate K1 and K2 */
+ k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key));
+ k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1));
+
+ k1_hi = (k0_hi << 1) | (k0_lo >> 63);
+ k1_lo = k0_lo << 1;
+ if (ctx->u.aes.key[0] & 0x80) {
+ k1_hi ^= rb_hi;
+ k1_lo ^= rb_lo;
+ }
+ gk = (__be64 *)ctx->u.aes.k1;
+ *gk = cpu_to_be64(k1_hi);
+ gk++;
+ *gk = cpu_to_be64(k1_lo);
+
+ k2_hi = (k1_hi << 1) | (k1_lo >> 63);
+ k2_lo = k1_lo << 1;
+ if (ctx->u.aes.k1[0] & 0x80) {
+ k2_hi ^= rb_hi;
+ k2_lo ^= rb_lo;
+ }
+ gk = (__be64 *)ctx->u.aes.k2;
+ *gk = cpu_to_be64(k2_hi);
+ gk++;
+ *gk = cpu_to_be64(k2_lo);
+
+ ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1);
+ sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1));
+ sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2));
+
+ /* Save the supplied key */
+ memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
+ memcpy(ctx->u.aes.key, key, key_len);
+ ctx->u.aes.key_len = key_len;
+ sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+ return ret;
+}
+
+static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_cipher *cipher_tfm;
+
+ ctx->complete = ccp_aes_cmac_complete;
+ ctx->u.aes.key_len = 0;
+
+ crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
+
+ cipher_tfm = crypto_alloc_cipher("aes", 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(cipher_tfm)) {
+ pr_warn("could not load aes cipher driver\n");
+ return PTR_ERR(cipher_tfm);
+ }
+ ctx->u.aes.tfm_cipher = cipher_tfm;
+
+ return 0;
+}
+
+static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->u.aes.tfm_cipher)
+ crypto_free_cipher(ctx->u.aes.tfm_cipher);
+ ctx->u.aes.tfm_cipher = NULL;
+}
+
+int ccp_register_aes_cmac_algs(struct list_head *head)
+{
+ struct ccp_crypto_ahash_alg *ccp_alg;
+ struct ahash_alg *alg;
+ struct hash_alg_common *halg;
+ struct crypto_alg *base;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+ ccp_alg->mode = CCP_AES_MODE_CMAC;
+
+ alg = &ccp_alg->alg;
+ alg->init = ccp_aes_cmac_init;
+ alg->update = ccp_aes_cmac_update;
+ alg->final = ccp_aes_cmac_final;
+ alg->finup = ccp_aes_cmac_finup;
+ alg->digest = ccp_aes_cmac_digest;
+ alg->setkey = ccp_aes_cmac_setkey;
+
+ halg = &alg->halg;
+ halg->digestsize = AES_BLOCK_SIZE;
+
+ base = &halg->base;
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
+ snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
+ base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ base->cra_blocksize = AES_BLOCK_SIZE;
+ base->cra_ctxsize = sizeof(struct ccp_ctx);
+ base->cra_priority = CCP_CRA_PRIORITY;
+ base->cra_type = &crypto_ahash_type;
+ base->cra_init = ccp_aes_cmac_cra_init;
+ base->cra_exit = ccp_aes_cmac_cra_exit;
+ base->cra_module = THIS_MODULE;
+
+ ret = crypto_register_ahash(alg);
+ if (ret) {
+ pr_err("%s ahash algorithm registration error (%d)\n",
+ base->cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
new file mode 100644
index 000000000000..0237ab58f242
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -0,0 +1,279 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+
+struct ccp_aes_xts_def {
+ const char *name;
+ const char *drv_name;
+};
+
+static struct ccp_aes_xts_def aes_xts_algs[] = {
+ {
+ .name = "xts(aes)",
+ .drv_name = "xts-aes-ccp",
+ },
+};
+
+struct ccp_unit_size_map {
+ unsigned int size;
+ u32 value;
+};
+
+static struct ccp_unit_size_map unit_size_map[] = {
+ {
+ .size = 4096,
+ .value = CCP_XTS_AES_UNIT_SIZE_4096,
+ },
+ {
+ .size = 2048,
+ .value = CCP_XTS_AES_UNIT_SIZE_2048,
+ },
+ {
+ .size = 1024,
+ .value = CCP_XTS_AES_UNIT_SIZE_1024,
+ },
+ {
+ .size = 512,
+ .value = CCP_XTS_AES_UNIT_SIZE_512,
+ },
+ {
+ .size = 256,
+ .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ },
+ {
+ .size = 128,
+ .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ },
+ {
+ .size = 64,
+ .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ },
+ {
+ .size = 32,
+ .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ },
+ {
+ .size = 16,
+ .value = CCP_XTS_AES_UNIT_SIZE_16,
+ },
+ {
+ .size = 1,
+ .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ },
+};
+
+static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+ if (ret)
+ return ret;
+
+ memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+
+ /* Only support 128-bit AES key with a 128-bit Tweak key,
+ * otherwise use the fallback
+ */
+ switch (key_len) {
+ case AES_KEYSIZE_128 * 2:
+ memcpy(ctx->u.aes.key, key, key_len);
+ break;
+ }
+ ctx->u.aes.key_len = key_len / 2;
+ sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+ return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key,
+ key_len);
+}
+
+static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+ unsigned int encrypt)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ unsigned int unit;
+ int ret;
+
+ if (!ctx->u.aes.key_len)
+ return -EINVAL;
+
+ if (req->nbytes & (AES_BLOCK_SIZE - 1))
+ return -EINVAL;
+
+ if (!req->info)
+ return -EINVAL;
+
+ for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
+ if (!(req->nbytes & (unit_size_map[unit].size - 1)))
+ break;
+
+ if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+ (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
+ /* Use the fallback to process the request for any
+ * unsupported unit sizes or key sizes
+ */
+ ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher);
+ ret = (encrypt) ? crypto_ablkcipher_encrypt(req) :
+ crypto_ablkcipher_decrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+
+ return ret;
+ }
+
+ memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+ sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
+ rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
+ : CCP_AES_ACTION_DECRYPT;
+ rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
+ rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
+ rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
+ rctx->cmd.u.xts.iv = &rctx->iv_sg;
+ rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
+ rctx->cmd.u.xts.src = req->src;
+ rctx->cmd.u.xts.src_len = req->nbytes;
+ rctx->cmd.u.xts.dst = req->dst;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_xts_crypt(req, 1);
+}
+
+static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_xts_crypt(req, 0);
+}
+
+static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ablkcipher *fallback_tfm;
+
+ ctx->complete = ccp_aes_xts_complete;
+ ctx->u.aes.key_len = 0;
+
+ fallback_tfm = crypto_alloc_ablkcipher(tfm->__crt_alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm)) {
+ pr_warn("could not load fallback driver %s\n",
+ tfm->__crt_alg->cra_name);
+ return PTR_ERR(fallback_tfm);
+ }
+ ctx->u.aes.tfm_ablkcipher = fallback_tfm;
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) +
+ fallback_tfm->base.crt_ablkcipher.reqsize;
+
+ return 0;
+}
+
+static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->u.aes.tfm_ablkcipher)
+ crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher);
+ ctx->u.aes.tfm_ablkcipher = NULL;
+}
+
+
+static int ccp_register_aes_xts_alg(struct list_head *head,
+ const struct ccp_aes_xts_def *def)
+{
+ struct ccp_crypto_ablkcipher_alg *ccp_alg;
+ struct crypto_alg *alg;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ alg = &ccp_alg->alg;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->drv_name);
+ alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ alg->cra_blocksize = AES_BLOCK_SIZE;
+ alg->cra_ctxsize = sizeof(struct ccp_ctx);
+ alg->cra_priority = CCP_CRA_PRIORITY;
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
+ alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
+ alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
+ alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
+ alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
+ alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
+ alg->cra_init = ccp_aes_xts_cra_init;
+ alg->cra_exit = ccp_aes_xts_cra_exit;
+ alg->cra_module = THIS_MODULE;
+
+ ret = crypto_register_alg(alg);
+ if (ret) {
+ pr_err("%s ablkcipher algorithm registration error (%d)\n",
+ alg->cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return 0;
+}
+
+int ccp_register_aes_xts_algs(struct list_head *head)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) {
+ ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
new file mode 100644
index 000000000000..e46490db0f63
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -0,0 +1,369 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+
+static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+ if (ret)
+ return ret;
+
+ if (ctx->u.aes.mode != CCP_AES_MODE_ECB)
+ memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+ struct ccp_crypto_ablkcipher_alg *alg =
+ ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ ctx->u.aes.type = CCP_AES_TYPE_128;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->u.aes.type = CCP_AES_TYPE_192;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->u.aes.type = CCP_AES_TYPE_256;
+ break;
+ default:
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->u.aes.mode = alg->mode;
+ ctx->u.aes.key_len = key_len;
+
+ memcpy(ctx->u.aes.key, key, key_len);
+ sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+ return 0;
+}
+
+static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct scatterlist *iv_sg = NULL;
+ unsigned int iv_len = 0;
+ int ret;
+
+ if (!ctx->u.aes.key_len)
+ return -EINVAL;
+
+ if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
+ (ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
+ (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
+ (req->nbytes & (AES_BLOCK_SIZE - 1)))
+ return -EINVAL;
+
+ if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
+ if (!req->info)
+ return -EINVAL;
+
+ memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+ iv_sg = &rctx->iv_sg;
+ iv_len = AES_BLOCK_SIZE;
+ sg_init_one(iv_sg, rctx->iv, iv_len);
+ }
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_AES;
+ rctx->cmd.u.aes.type = ctx->u.aes.type;
+ rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+ rctx->cmd.u.aes.action =
+ (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT;
+ rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
+ rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
+ rctx->cmd.u.aes.iv = iv_sg;
+ rctx->cmd.u.aes.iv_len = iv_len;
+ rctx->cmd.u.aes.src = req->src;
+ rctx->cmd.u.aes.src_len = req->nbytes;
+ rctx->cmd.u.aes.dst = req->dst;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_aes_encrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_crypt(req, true);
+}
+
+static int ccp_aes_decrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_crypt(req, false);
+}
+
+static int ccp_aes_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->complete = ccp_aes_complete;
+ ctx->u.aes.key_len = 0;
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+
+ return 0;
+}
+
+static void ccp_aes_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
+ int ret)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+ /* Restore the original pointer */
+ req->info = rctx->rfc3686_info;
+
+ return ccp_aes_complete(async_req, ret);
+}
+
+static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+
+ if (key_len < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+ key_len -= CTR_RFC3686_NONCE_SIZE;
+ memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE);
+
+ return ccp_aes_setkey(tfm, key, key_len);
+}
+
+static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ u8 *iv;
+
+ /* Initialize the CTR block */
+ iv = rctx->rfc3686_iv;
+ memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE);
+
+ iv += CTR_RFC3686_NONCE_SIZE;
+ memcpy(iv, req->info, CTR_RFC3686_IV_SIZE);
+
+ iv += CTR_RFC3686_IV_SIZE;
+ *(__be32 *)iv = cpu_to_be32(1);
+
+ /* Point to the new IV */
+ rctx->rfc3686_info = req->info;
+ req->info = rctx->rfc3686_iv;
+
+ return ccp_aes_crypt(req, encrypt);
+}
+
+static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_rfc3686_crypt(req, true);
+}
+
+static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req)
+{
+ return ccp_aes_rfc3686_crypt(req, false);
+}
+
+static int ccp_aes_rfc3686_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->complete = ccp_aes_rfc3686_complete;
+ ctx->u.aes.key_len = 0;
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+
+ return 0;
+}
+
+static void ccp_aes_rfc3686_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct crypto_alg ccp_aes_defaults = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ccp_ctx),
+ .cra_priority = CCP_CRA_PRIORITY,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = ccp_aes_cra_init,
+ .cra_exit = ccp_aes_cra_exit,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = ccp_aes_setkey,
+ .encrypt = ccp_aes_encrypt,
+ .decrypt = ccp_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+};
+
+static struct crypto_alg ccp_aes_rfc3686_defaults = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ccp_ctx),
+ .cra_priority = CCP_CRA_PRIORITY,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = ccp_aes_rfc3686_cra_init,
+ .cra_exit = ccp_aes_rfc3686_cra_exit,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = ccp_aes_rfc3686_setkey,
+ .encrypt = ccp_aes_rfc3686_encrypt,
+ .decrypt = ccp_aes_rfc3686_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ },
+};
+
+struct ccp_aes_def {
+ enum ccp_aes_mode mode;
+ const char *name;
+ const char *driver_name;
+ unsigned int blocksize;
+ unsigned int ivsize;
+ struct crypto_alg *alg_defaults;
+};
+
+static struct ccp_aes_def aes_algs[] = {
+ {
+ .mode = CCP_AES_MODE_ECB,
+ .name = "ecb(aes)",
+ .driver_name = "ecb-aes-ccp",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = 0,
+ .alg_defaults = &ccp_aes_defaults,
+ },
+ {
+ .mode = CCP_AES_MODE_CBC,
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-ccp",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .alg_defaults = &ccp_aes_defaults,
+ },
+ {
+ .mode = CCP_AES_MODE_CFB,
+ .name = "cfb(aes)",
+ .driver_name = "cfb-aes-ccp",
+ .blocksize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .alg_defaults = &ccp_aes_defaults,
+ },
+ {
+ .mode = CCP_AES_MODE_OFB,
+ .name = "ofb(aes)",
+ .driver_name = "ofb-aes-ccp",
+ .blocksize = 1,
+ .ivsize = AES_BLOCK_SIZE,
+ .alg_defaults = &ccp_aes_defaults,
+ },
+ {
+ .mode = CCP_AES_MODE_CTR,
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-ccp",
+ .blocksize = 1,
+ .ivsize = AES_BLOCK_SIZE,
+ .alg_defaults = &ccp_aes_defaults,
+ },
+ {
+ .mode = CCP_AES_MODE_CTR,
+ .name = "rfc3686(ctr(aes))",
+ .driver_name = "rfc3686-ctr-aes-ccp",
+ .blocksize = 1,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .alg_defaults = &ccp_aes_rfc3686_defaults,
+ },
+};
+
+static int ccp_register_aes_alg(struct list_head *head,
+ const struct ccp_aes_def *def)
+{
+ struct ccp_crypto_ablkcipher_alg *ccp_alg;
+ struct crypto_alg *alg;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ ccp_alg->mode = def->mode;
+
+ /* Copy the defaults and override as necessary */
+ alg = &ccp_alg->alg;
+ *alg = *def->alg_defaults;
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->driver_name);
+ alg->cra_blocksize = def->blocksize;
+ alg->cra_ablkcipher.ivsize = def->ivsize;
+
+ ret = crypto_register_alg(alg);
+ if (ret) {
+ pr_err("%s ablkcipher algorithm registration error (%d)\n",
+ alg->cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return 0;
+}
+
+int ccp_register_aes_algs(struct list_head *head)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ ret = ccp_register_aes_alg(head, &aes_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
new file mode 100644
index 000000000000..2636f044789d
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -0,0 +1,432 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/ccp.h>
+#include <linux/scatterlist.h>
+#include <crypto/internal/hash.h>
+
+#include "ccp-crypto.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
+MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
+
+
+/* List heads for the supported algorithms */
+static LIST_HEAD(hash_algs);
+static LIST_HEAD(cipher_algs);
+
+/* For any tfm, requests for that tfm on the same CPU must be returned
+ * in the order received. With multiple queues available, the CCP can
+ * process more than one cmd at a time. Therefore we must maintain
+ * a cmd list to insure the proper ordering of requests on a given tfm/cpu
+ * combination.
+ */
+struct ccp_crypto_cpu_queue {
+ struct list_head cmds;
+ struct list_head *backlog;
+ unsigned int cmd_count;
+};
+#define CCP_CRYPTO_MAX_QLEN 50
+
+struct ccp_crypto_percpu_queue {
+ struct ccp_crypto_cpu_queue __percpu *cpu_queue;
+};
+static struct ccp_crypto_percpu_queue req_queue;
+
+struct ccp_crypto_cmd {
+ struct list_head entry;
+
+ struct ccp_cmd *cmd;
+
+ /* Save the crypto_tfm and crypto_async_request addresses
+ * separately to avoid any reference to a possibly invalid
+ * crypto_async_request structure after invoking the request
+ * callback
+ */
+ struct crypto_async_request *req;
+ struct crypto_tfm *tfm;
+
+ /* Used for held command processing to determine state */
+ int ret;
+
+ int cpu;
+};
+
+struct ccp_crypto_cpu {
+ struct work_struct work;
+ struct completion completion;
+ struct ccp_crypto_cmd *crypto_cmd;
+ int err;
+};
+
+
+static inline bool ccp_crypto_success(int err)
+{
+ if (err && (err != -EINPROGRESS) && (err != -EBUSY))
+ return false;
+
+ return true;
+}
+
+/*
+ * ccp_crypto_cmd_complete must be called while running on the appropriate
+ * cpu and the caller must have done a get_cpu to disable preemption
+ */
+static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
+ struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
+{
+ struct ccp_crypto_cpu_queue *cpu_queue;
+ struct ccp_crypto_cmd *held = NULL, *tmp;
+
+ *backlog = NULL;
+
+ cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
+
+ /* Held cmds will be after the current cmd in the queue so start
+ * searching for a cmd with a matching tfm for submission.
+ */
+ tmp = crypto_cmd;
+ list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
+ if (crypto_cmd->tfm != tmp->tfm)
+ continue;
+ held = tmp;
+ break;
+ }
+
+ /* Process the backlog:
+ * Because cmds can be executed from any point in the cmd list
+ * special precautions have to be taken when handling the backlog.
+ */
+ if (cpu_queue->backlog != &cpu_queue->cmds) {
+ /* Skip over this cmd if it is the next backlog cmd */
+ if (cpu_queue->backlog == &crypto_cmd->entry)
+ cpu_queue->backlog = crypto_cmd->entry.next;
+
+ *backlog = container_of(cpu_queue->backlog,
+ struct ccp_crypto_cmd, entry);
+ cpu_queue->backlog = cpu_queue->backlog->next;
+
+ /* Skip over this cmd if it is now the next backlog cmd */
+ if (cpu_queue->backlog == &crypto_cmd->entry)
+ cpu_queue->backlog = crypto_cmd->entry.next;
+ }
+
+ /* Remove the cmd entry from the list of cmds */
+ cpu_queue->cmd_count--;
+ list_del(&crypto_cmd->entry);
+
+ return held;
+}
+
+static void ccp_crypto_complete_on_cpu(struct work_struct *work)
+{
+ struct ccp_crypto_cpu *cpu_work =
+ container_of(work, struct ccp_crypto_cpu, work);
+ struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd;
+ struct ccp_crypto_cmd *held, *next, *backlog;
+ struct crypto_async_request *req = crypto_cmd->req;
+ struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
+ int cpu, ret;
+
+ cpu = get_cpu();
+
+ if (cpu_work->err == -EINPROGRESS) {
+ /* Only propogate the -EINPROGRESS if necessary */
+ if (crypto_cmd->ret == -EBUSY) {
+ crypto_cmd->ret = -EINPROGRESS;
+ req->complete(req, -EINPROGRESS);
+ }
+
+ goto e_cpu;
+ }
+
+ /* Operation has completed - update the queue before invoking
+ * the completion callbacks and retrieve the next cmd (cmd with
+ * a matching tfm) that can be submitted to the CCP.
+ */
+ held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
+ if (backlog) {
+ backlog->ret = -EINPROGRESS;
+ backlog->req->complete(backlog->req, -EINPROGRESS);
+ }
+
+ /* Transition the state from -EBUSY to -EINPROGRESS first */
+ if (crypto_cmd->ret == -EBUSY)
+ req->complete(req, -EINPROGRESS);
+
+ /* Completion callbacks */
+ ret = cpu_work->err;
+ if (ctx->complete)
+ ret = ctx->complete(req, ret);
+ req->complete(req, ret);
+
+ /* Submit the next cmd */
+ while (held) {
+ ret = ccp_enqueue_cmd(held->cmd);
+ if (ccp_crypto_success(ret))
+ break;
+
+ /* Error occurred, report it and get the next entry */
+ held->req->complete(held->req, ret);
+
+ next = ccp_crypto_cmd_complete(held, &backlog);
+ if (backlog) {
+ backlog->ret = -EINPROGRESS;
+ backlog->req->complete(backlog->req, -EINPROGRESS);
+ }
+
+ kfree(held);
+ held = next;
+ }
+
+ kfree(crypto_cmd);
+
+e_cpu:
+ put_cpu();
+
+ complete(&cpu_work->completion);
+}
+
+static void ccp_crypto_complete(void *data, int err)
+{
+ struct ccp_crypto_cmd *crypto_cmd = data;
+ struct ccp_crypto_cpu cpu_work;
+
+ INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu);
+ init_completion(&cpu_work.completion);
+ cpu_work.crypto_cmd = crypto_cmd;
+ cpu_work.err = err;
+
+ schedule_work_on(crypto_cmd->cpu, &cpu_work.work);
+
+ /* Keep the completion call synchronous */
+ wait_for_completion(&cpu_work.completion);
+}
+
+static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
+{
+ struct ccp_crypto_cpu_queue *cpu_queue;
+ struct ccp_crypto_cmd *active = NULL, *tmp;
+ int cpu, ret;
+
+ cpu = get_cpu();
+ crypto_cmd->cpu = cpu;
+
+ cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
+
+ /* Check if the cmd can/should be queued */
+ if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
+ ret = -EBUSY;
+ if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
+ goto e_cpu;
+ }
+
+ /* Look for an entry with the same tfm. If there is a cmd
+ * with the same tfm in the list for this cpu then the current
+ * cmd cannot be submitted to the CCP yet.
+ */
+ list_for_each_entry(tmp, &cpu_queue->cmds, entry) {
+ if (crypto_cmd->tfm != tmp->tfm)
+ continue;
+ active = tmp;
+ break;
+ }
+
+ ret = -EINPROGRESS;
+ if (!active) {
+ ret = ccp_enqueue_cmd(crypto_cmd->cmd);
+ if (!ccp_crypto_success(ret))
+ goto e_cpu;
+ }
+
+ if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
+ ret = -EBUSY;
+ if (cpu_queue->backlog == &cpu_queue->cmds)
+ cpu_queue->backlog = &crypto_cmd->entry;
+ }
+ crypto_cmd->ret = ret;
+
+ cpu_queue->cmd_count++;
+ list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds);
+
+e_cpu:
+ put_cpu();
+
+ return ret;
+}
+
+/**
+ * ccp_crypto_enqueue_request - queue an crypto async request for processing
+ * by the CCP
+ *
+ * @req: crypto_async_request struct to be processed
+ * @cmd: ccp_cmd struct to be sent to the CCP
+ */
+int ccp_crypto_enqueue_request(struct crypto_async_request *req,
+ struct ccp_cmd *cmd)
+{
+ struct ccp_crypto_cmd *crypto_cmd;
+ gfp_t gfp;
+ int ret;
+
+ gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+
+ crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
+ if (!crypto_cmd)
+ return -ENOMEM;
+
+ /* The tfm pointer must be saved and not referenced from the
+ * crypto_async_request (req) pointer because it is used after
+ * completion callback for the request and the req pointer
+ * might not be valid anymore.
+ */
+ crypto_cmd->cmd = cmd;
+ crypto_cmd->req = req;
+ crypto_cmd->tfm = req->tfm;
+
+ cmd->callback = ccp_crypto_complete;
+ cmd->data = crypto_cmd;
+
+ if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ cmd->flags |= CCP_CMD_MAY_BACKLOG;
+ else
+ cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
+
+ ret = ccp_crypto_enqueue_cmd(crypto_cmd);
+ if (!ccp_crypto_success(ret))
+ kfree(crypto_cmd);
+
+ return ret;
+}
+
+struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
+ struct scatterlist *sg_add)
+{
+ struct scatterlist *sg, *sg_last = NULL;
+
+ for (sg = table->sgl; sg; sg = sg_next(sg))
+ if (!sg_page(sg))
+ break;
+ BUG_ON(!sg);
+
+ for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
+ sg_set_page(sg, sg_page(sg_add), sg_add->length,
+ sg_add->offset);
+ sg_last = sg;
+ }
+ BUG_ON(sg_add);
+
+ return sg_last;
+}
+
+static int ccp_register_algs(void)
+{
+ int ret;
+
+ ret = ccp_register_aes_algs(&cipher_algs);
+ if (ret)
+ return ret;
+
+ ret = ccp_register_aes_cmac_algs(&hash_algs);
+ if (ret)
+ return ret;
+
+ ret = ccp_register_aes_xts_algs(&cipher_algs);
+ if (ret)
+ return ret;
+
+ ret = ccp_register_sha_algs(&hash_algs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ccp_unregister_algs(void)
+{
+ struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
+ struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
+
+ list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
+ crypto_unregister_ahash(&ahash_alg->alg);
+ list_del(&ahash_alg->entry);
+ kfree(ahash_alg);
+ }
+
+ list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
+ crypto_unregister_alg(&ablk_alg->alg);
+ list_del(&ablk_alg->entry);
+ kfree(ablk_alg);
+ }
+}
+
+static int ccp_init_queues(void)
+{
+ struct ccp_crypto_cpu_queue *cpu_queue;
+ int cpu;
+
+ req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue);
+ if (!req_queue.cpu_queue)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
+ INIT_LIST_HEAD(&cpu_queue->cmds);
+ cpu_queue->backlog = &cpu_queue->cmds;
+ cpu_queue->cmd_count = 0;
+ }
+
+ return 0;
+}
+
+static void ccp_fini_queue(void)
+{
+ struct ccp_crypto_cpu_queue *cpu_queue;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
+ BUG_ON(!list_empty(&cpu_queue->cmds));
+ }
+ free_percpu(req_queue.cpu_queue);
+}
+
+static int ccp_crypto_init(void)
+{
+ int ret;
+
+ ret = ccp_init_queues();
+ if (ret)
+ return ret;
+
+ ret = ccp_register_algs();
+ if (ret) {
+ ccp_unregister_algs();
+ ccp_fini_queue();
+ }
+
+ return ret;
+}
+
+static void ccp_crypto_exit(void)
+{
+ ccp_unregister_algs();
+ ccp_fini_queue();
+}
+
+module_init(ccp_crypto_init);
+module_exit(ccp_crypto_exit);
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
new file mode 100644
index 000000000000..3867290b3531
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -0,0 +1,517 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+
+struct ccp_sha_result {
+ struct completion completion;
+ int err;
+};
+
+static void ccp_sync_hash_complete(struct crypto_async_request *req, int err)
+{
+ struct ccp_sha_result *result = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ result->err = err;
+ complete(&result->completion);
+}
+
+static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf,
+ struct scatterlist *sg, unsigned int len)
+{
+ struct ccp_sha_result result;
+ struct ahash_request *req;
+ int ret;
+
+ init_completion(&result.completion);
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ ccp_sync_hash_complete, &result);
+ ahash_request_set_crypt(req, sg, buf, len);
+
+ ret = crypto_ahash_digest(req);
+ if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
+ ret = wait_for_completion_interruptible(&result.completion);
+ if (!ret)
+ ret = result.err;
+ }
+
+ ahash_request_free(req);
+
+ return ret;
+}
+
+static int ccp_sha_finish_hmac(struct crypto_async_request *async_req)
+{
+ struct ahash_request *req = ahash_request_cast(async_req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct scatterlist sg[2];
+ unsigned int block_size =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int digest_size = crypto_ahash_digestsize(tfm);
+
+ sg_init_table(sg, ARRAY_SIZE(sg));
+ sg_set_buf(&sg[0], ctx->u.sha.opad, block_size);
+ sg_set_buf(&sg[1], rctx->ctx, digest_size);
+
+ return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg,
+ block_size + digest_size);
+}
+
+static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
+{
+ struct ahash_request *req = ahash_request_cast(async_req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ unsigned int digest_size = crypto_ahash_digestsize(tfm);
+
+ if (ret)
+ goto e_free;
+
+ if (rctx->hash_rem) {
+ /* Save remaining data to buffer */
+ unsigned int offset = rctx->nbytes - rctx->hash_rem;
+ scatterwalk_map_and_copy(rctx->buf, rctx->src,
+ offset, rctx->hash_rem, 0);
+ rctx->buf_count = rctx->hash_rem;
+ } else
+ rctx->buf_count = 0;
+
+ /* Update result area if supplied */
+ if (req->result)
+ memcpy(req->result, rctx->ctx, digest_size);
+
+ /* If we're doing an HMAC, we need to perform that on the final op */
+ if (rctx->final && ctx->u.sha.key_len)
+ ret = ccp_sha_finish_hmac(async_req);
+
+e_free:
+ sg_free_table(&rctx->data_sg);
+
+ return ret;
+}
+
+static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
+ unsigned int final)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct scatterlist *sg;
+ unsigned int block_size =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int sg_count;
+ gfp_t gfp;
+ u64 len;
+ int ret;
+
+ len = (u64)rctx->buf_count + (u64)nbytes;
+
+ if (!final && (len <= block_size)) {
+ scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
+ 0, nbytes, 0);
+ rctx->buf_count += nbytes;
+
+ return 0;
+ }
+
+ rctx->src = req->src;
+ rctx->nbytes = nbytes;
+
+ rctx->final = final;
+ rctx->hash_rem = final ? 0 : len & (block_size - 1);
+ rctx->hash_cnt = len - rctx->hash_rem;
+ if (!final && !rctx->hash_rem) {
+ /* CCP can't do zero length final, so keep some data around */
+ rctx->hash_cnt -= block_size;
+ rctx->hash_rem = block_size;
+ }
+
+ /* Initialize the context scatterlist */
+ sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx));
+
+ sg = NULL;
+ if (rctx->buf_count && nbytes) {
+ /* Build the data scatterlist table - allocate enough entries
+ * for both data pieces (buffer and input data)
+ */
+ gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+ sg_count = sg_nents(req->src) + 1;
+ ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
+ if (ret)
+ return ret;
+
+ sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
+ sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
+ sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
+ sg_mark_end(sg);
+
+ sg = rctx->data_sg.sgl;
+ } else if (rctx->buf_count) {
+ sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
+
+ sg = &rctx->buf_sg;
+ } else if (nbytes) {
+ sg = req->src;
+ }
+
+ rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_SHA;
+ rctx->cmd.u.sha.type = rctx->type;
+ rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
+ rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
+ rctx->cmd.u.sha.src = sg;
+ rctx->cmd.u.sha.src_len = rctx->hash_cnt;
+ rctx->cmd.u.sha.final = rctx->final;
+ rctx->cmd.u.sha.msg_bits = rctx->msg_bits;
+
+ rctx->first = 0;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_crypto_ahash_alg *alg =
+ ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
+ unsigned int block_size =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+ memset(rctx, 0, sizeof(*rctx));
+
+ memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx));
+ rctx->type = alg->type;
+ rctx->first = 1;
+
+ if (ctx->u.sha.key_len) {
+ /* Buffer the HMAC key for first update */
+ memcpy(rctx->buf, ctx->u.sha.ipad, block_size);
+ rctx->buf_count = block_size;
+ }
+
+ return 0;
+}
+
+static int ccp_sha_update(struct ahash_request *req)
+{
+ return ccp_do_sha_update(req, req->nbytes, 0);
+}
+
+static int ccp_sha_final(struct ahash_request *req)
+{
+ return ccp_do_sha_update(req, 0, 1);
+}
+
+static int ccp_sha_finup(struct ahash_request *req)
+{
+ return ccp_do_sha_update(req, req->nbytes, 1);
+}
+
+static int ccp_sha_digest(struct ahash_request *req)
+{
+ int ret;
+
+ ret = ccp_sha_init(req);
+ if (ret)
+ return ret;
+
+ return ccp_sha_finup(req);
+}
+
+static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct scatterlist sg;
+ unsigned int block_size =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int digest_size = crypto_ahash_digestsize(tfm);
+ int i, ret;
+
+ /* Set to zero until complete */
+ ctx->u.sha.key_len = 0;
+
+ /* Clear key area to provide zero padding for keys smaller
+ * than the block size
+ */
+ memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key));
+
+ if (key_len > block_size) {
+ /* Must hash the input key */
+ sg_init_one(&sg, key, key_len);
+ ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len);
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ key_len = digest_size;
+ } else
+ memcpy(ctx->u.sha.key, key, key_len);
+
+ for (i = 0; i < block_size; i++) {
+ ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36;
+ ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c;
+ }
+
+ ctx->u.sha.key_len = key_len;
+
+ return 0;
+}
+
+static int ccp_sha_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+
+ ctx->complete = ccp_sha_complete;
+ ctx->u.sha.key_len = 0;
+
+ crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx));
+
+ return 0;
+}
+
+static void ccp_sha_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm);
+ struct crypto_ahash *hmac_tfm;
+
+ hmac_tfm = crypto_alloc_ahash(alg->child_alg,
+ CRYPTO_ALG_TYPE_AHASH, 0);
+ if (IS_ERR(hmac_tfm)) {
+ pr_warn("could not load driver %s need for HMAC support\n",
+ alg->child_alg);
+ return PTR_ERR(hmac_tfm);
+ }
+
+ ctx->u.sha.hmac_tfm = hmac_tfm;
+
+ return ccp_sha_cra_init(tfm);
+}
+
+static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->u.sha.hmac_tfm)
+ crypto_free_ahash(ctx->u.sha.hmac_tfm);
+
+ ccp_sha_cra_exit(tfm);
+}
+
+static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
+ cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
+ cpu_to_be32(SHA1_H4), 0, 0, 0,
+};
+
+static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
+ cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
+ cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
+ cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
+};
+
+static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+ cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
+ cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
+ cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
+ cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
+};
+
+struct ccp_sha_def {
+ const char *name;
+ const char *drv_name;
+ const __be32 *init;
+ enum ccp_sha_type type;
+ u32 digest_size;
+ u32 block_size;
+};
+
+static struct ccp_sha_def sha_algs[] = {
+ {
+ .name = "sha1",
+ .drv_name = "sha1-ccp",
+ .init = sha1_init,
+ .type = CCP_SHA_TYPE_1,
+ .digest_size = SHA1_DIGEST_SIZE,
+ .block_size = SHA1_BLOCK_SIZE,
+ },
+ {
+ .name = "sha224",
+ .drv_name = "sha224-ccp",
+ .init = sha224_init,
+ .type = CCP_SHA_TYPE_224,
+ .digest_size = SHA224_DIGEST_SIZE,
+ .block_size = SHA224_BLOCK_SIZE,
+ },
+ {
+ .name = "sha256",
+ .drv_name = "sha256-ccp",
+ .init = sha256_init,
+ .type = CCP_SHA_TYPE_256,
+ .digest_size = SHA256_DIGEST_SIZE,
+ .block_size = SHA256_BLOCK_SIZE,
+ },
+};
+
+static int ccp_register_hmac_alg(struct list_head *head,
+ const struct ccp_sha_def *def,
+ const struct ccp_crypto_ahash_alg *base_alg)
+{
+ struct ccp_crypto_ahash_alg *ccp_alg;
+ struct ahash_alg *alg;
+ struct hash_alg_common *halg;
+ struct crypto_alg *base;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ /* Copy the base algorithm and only change what's necessary */
+ *ccp_alg = *base_alg;
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME);
+
+ alg = &ccp_alg->alg;
+ alg->setkey = ccp_sha_setkey;
+
+ halg = &alg->halg;
+
+ base = &halg->base;
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name);
+ snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s",
+ def->drv_name);
+ base->cra_init = ccp_hmac_sha_cra_init;
+ base->cra_exit = ccp_hmac_sha_cra_exit;
+
+ ret = crypto_register_ahash(alg);
+ if (ret) {
+ pr_err("%s ahash algorithm registration error (%d)\n",
+ base->cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return ret;
+}
+
+static int ccp_register_sha_alg(struct list_head *head,
+ const struct ccp_sha_def *def)
+{
+ struct ccp_crypto_ahash_alg *ccp_alg;
+ struct ahash_alg *alg;
+ struct hash_alg_common *halg;
+ struct crypto_alg *base;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ ccp_alg->init = def->init;
+ ccp_alg->type = def->type;
+
+ alg = &ccp_alg->alg;
+ alg->init = ccp_sha_init;
+ alg->update = ccp_sha_update;
+ alg->final = ccp_sha_final;
+ alg->finup = ccp_sha_finup;
+ alg->digest = ccp_sha_digest;
+
+ halg = &alg->halg;
+ halg->digestsize = def->digest_size;
+
+ base = &halg->base;
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->drv_name);
+ base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ base->cra_blocksize = def->block_size;
+ base->cra_ctxsize = sizeof(struct ccp_ctx);
+ base->cra_priority = CCP_CRA_PRIORITY;
+ base->cra_type = &crypto_ahash_type;
+ base->cra_init = ccp_sha_cra_init;
+ base->cra_exit = ccp_sha_cra_exit;
+ base->cra_module = THIS_MODULE;
+
+ ret = crypto_register_ahash(alg);
+ if (ret) {
+ pr_err("%s ahash algorithm registration error (%d)\n",
+ base->cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ ret = ccp_register_hmac_alg(head, def, ccp_alg);
+
+ return ret;
+}
+
+int ccp_register_sha_algs(struct list_head *head)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
+ ret = ccp_register_sha_alg(head, &sha_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
new file mode 100644
index 000000000000..b222231b6169
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -0,0 +1,197 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CCP_CRYPTO_H__
+#define __CCP_CRYPTO_H__
+
+
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/ccp.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+
+
+#define CCP_CRA_PRIORITY 300
+
+struct ccp_crypto_ablkcipher_alg {
+ struct list_head entry;
+
+ u32 mode;
+
+ struct crypto_alg alg;
+};
+
+struct ccp_crypto_ahash_alg {
+ struct list_head entry;
+
+ const __be32 *init;
+ u32 type;
+ u32 mode;
+
+ /* Child algorithm used for HMAC, CMAC, etc */
+ char child_alg[CRYPTO_MAX_ALG_NAME];
+
+ struct ahash_alg alg;
+};
+
+static inline struct ccp_crypto_ablkcipher_alg *
+ ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ return container_of(alg, struct ccp_crypto_ablkcipher_alg, alg);
+}
+
+static inline struct ccp_crypto_ahash_alg *
+ ccp_crypto_ahash_alg(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct ahash_alg *ahash_alg;
+
+ ahash_alg = container_of(alg, struct ahash_alg, halg.base);
+
+ return container_of(ahash_alg, struct ccp_crypto_ahash_alg, alg);
+}
+
+
+/***** AES related defines *****/
+struct ccp_aes_ctx {
+ /* Fallback cipher for XTS with unsupported unit sizes */
+ struct crypto_ablkcipher *tfm_ablkcipher;
+
+ /* Cipher used to generate CMAC K1/K2 keys */
+ struct crypto_cipher *tfm_cipher;
+
+ enum ccp_engine engine;
+ enum ccp_aes_type type;
+ enum ccp_aes_mode mode;
+
+ struct scatterlist key_sg;
+ unsigned int key_len;
+ u8 key[AES_MAX_KEY_SIZE];
+
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
+
+ /* CMAC key structures */
+ struct scatterlist k1_sg;
+ struct scatterlist k2_sg;
+ unsigned int kn_len;
+ u8 k1[AES_BLOCK_SIZE];
+ u8 k2[AES_BLOCK_SIZE];
+};
+
+struct ccp_aes_req_ctx {
+ struct scatterlist iv_sg;
+ u8 iv[AES_BLOCK_SIZE];
+
+ /* Fields used for RFC3686 requests */
+ u8 *rfc3686_info;
+ u8 rfc3686_iv[AES_BLOCK_SIZE];
+
+ struct ccp_cmd cmd;
+};
+
+struct ccp_aes_cmac_req_ctx {
+ unsigned int null_msg;
+ unsigned int final;
+
+ struct scatterlist *src;
+ unsigned int nbytes;
+
+ u64 hash_cnt;
+ unsigned int hash_rem;
+
+ struct sg_table data_sg;
+
+ struct scatterlist iv_sg;
+ u8 iv[AES_BLOCK_SIZE];
+
+ struct scatterlist buf_sg;
+ unsigned int buf_count;
+ u8 buf[AES_BLOCK_SIZE];
+
+ struct scatterlist pad_sg;
+ unsigned int pad_count;
+ u8 pad[AES_BLOCK_SIZE];
+
+ struct ccp_cmd cmd;
+};
+
+/***** SHA related defines *****/
+#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
+#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+struct ccp_sha_ctx {
+ unsigned int key_len;
+ u8 key[MAX_SHA_BLOCK_SIZE];
+ u8 ipad[MAX_SHA_BLOCK_SIZE];
+ u8 opad[MAX_SHA_BLOCK_SIZE];
+ struct crypto_ahash *hmac_tfm;
+};
+
+struct ccp_sha_req_ctx {
+ enum ccp_sha_type type;
+
+ u64 msg_bits;
+
+ unsigned int first;
+ unsigned int final;
+
+ struct scatterlist *src;
+ unsigned int nbytes;
+
+ u64 hash_cnt;
+ unsigned int hash_rem;
+
+ struct sg_table data_sg;
+
+ struct scatterlist ctx_sg;
+ u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+ struct scatterlist buf_sg;
+ unsigned int buf_count;
+ u8 buf[MAX_SHA_BLOCK_SIZE];
+
+ /* HMAC support field */
+ struct scatterlist pad_sg;
+
+ /* CCP driver command */
+ struct ccp_cmd cmd;
+};
+
+/***** Common Context Structure *****/
+struct ccp_ctx {
+ int (*complete)(struct crypto_async_request *req, int ret);
+
+ union {
+ struct ccp_aes_ctx aes;
+ struct ccp_sha_ctx sha;
+ } u;
+};
+
+int ccp_crypto_enqueue_request(struct crypto_async_request *req,
+ struct ccp_cmd *cmd);
+struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
+ struct scatterlist *sg_add);
+
+int ccp_register_aes_algs(struct list_head *head);
+int ccp_register_aes_cmac_algs(struct list_head *head);
+int ccp_register_aes_xts_algs(struct list_head *head);
+int ccp_register_sha_algs(struct list_head *head);
+
+#endif
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
new file mode 100644
index 000000000000..c3bc21264600
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -0,0 +1,595 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/cpu.h>
+#include <asm/cpu_device_id.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
+MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
+
+
+static struct ccp_device *ccp_dev;
+static inline struct ccp_device *ccp_get_device(void)
+{
+ return ccp_dev;
+}
+
+static inline void ccp_add_device(struct ccp_device *ccp)
+{
+ ccp_dev = ccp;
+}
+
+static inline void ccp_del_device(struct ccp_device *ccp)
+{
+ ccp_dev = NULL;
+}
+
+/**
+ * ccp_enqueue_cmd - queue an operation for processing by the CCP
+ *
+ * @cmd: ccp_cmd struct to be processed
+ *
+ * Queue a cmd to be processed by the CCP. If queueing the cmd
+ * would exceed the defined length of the cmd queue the cmd will
+ * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
+ * result in a return code of -EBUSY.
+ *
+ * The callback routine specified in the ccp_cmd struct will be
+ * called to notify the caller of completion (if the cmd was not
+ * backlogged) or advancement out of the backlog. If the cmd has
+ * advanced out of the backlog the "err" value of the callback
+ * will be -EINPROGRESS. Any other "err" value during callback is
+ * the result of the operation.
+ *
+ * The cmd has been successfully queued if:
+ * the return code is -EINPROGRESS or
+ * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
+ */
+int ccp_enqueue_cmd(struct ccp_cmd *cmd)
+{
+ struct ccp_device *ccp = ccp_get_device();
+ unsigned long flags;
+ unsigned int i;
+ int ret;
+
+ if (!ccp)
+ return -ENODEV;
+
+ /* Caller must supply a callback routine */
+ if (!cmd->callback)
+ return -EINVAL;
+
+ cmd->ccp = ccp;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ i = ccp->cmd_q_count;
+
+ if (ccp->cmd_count >= MAX_CMD_QLEN) {
+ ret = -EBUSY;
+ if (cmd->flags & CCP_CMD_MAY_BACKLOG)
+ list_add_tail(&cmd->entry, &ccp->backlog);
+ } else {
+ ret = -EINPROGRESS;
+ ccp->cmd_count++;
+ list_add_tail(&cmd->entry, &ccp->cmd);
+
+ /* Find an idle queue */
+ if (!ccp->suspending) {
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ if (ccp->cmd_q[i].active)
+ continue;
+
+ break;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ /* If we found an idle queue, wake it up */
+ if (i < ccp->cmd_q_count)
+ wake_up_process(ccp->cmd_q[i].kthread);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
+
+static void ccp_do_cmd_backlog(struct work_struct *work)
+{
+ struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
+ struct ccp_device *ccp = cmd->ccp;
+ unsigned long flags;
+ unsigned int i;
+
+ cmd->callback(cmd->data, -EINPROGRESS);
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->cmd_count++;
+ list_add_tail(&cmd->entry, &ccp->cmd);
+
+ /* Find an idle queue */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ if (ccp->cmd_q[i].active)
+ continue;
+
+ break;
+ }
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ /* If we found an idle queue, wake it up */
+ if (i < ccp->cmd_q_count)
+ wake_up_process(ccp->cmd_q[i].kthread);
+}
+
+static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
+{
+ struct ccp_device *ccp = cmd_q->ccp;
+ struct ccp_cmd *cmd = NULL;
+ struct ccp_cmd *backlog = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ cmd_q->active = 0;
+
+ if (ccp->suspending) {
+ cmd_q->suspended = 1;
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+ wake_up_interruptible(&ccp->suspend_queue);
+
+ return NULL;
+ }
+
+ if (ccp->cmd_count) {
+ cmd_q->active = 1;
+
+ cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+
+ ccp->cmd_count--;
+ }
+
+ if (!list_empty(&ccp->backlog)) {
+ backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
+ entry);
+ list_del(&backlog->entry);
+ }
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ if (backlog) {
+ INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
+ schedule_work(&backlog->work);
+ }
+
+ return cmd;
+}
+
+static void ccp_do_cmd_complete(struct work_struct *work)
+{
+ struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
+
+ cmd->callback(cmd->data, cmd->ret);
+}
+
+static int ccp_cmd_queue_thread(void *data)
+{
+ struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
+ struct ccp_cmd *cmd;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ cmd = ccp_dequeue_cmd(cmd_q);
+ if (!cmd)
+ continue;
+
+ __set_current_state(TASK_RUNNING);
+
+ /* Execute the command */
+ cmd->ret = ccp_run_cmd(cmd_q, cmd);
+
+ /* Schedule the completion callback */
+ INIT_WORK(&cmd->work, ccp_do_cmd_complete);
+ schedule_work(&cmd->work);
+ }
+
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
+ u32 trng_value;
+ int len = min_t(int, sizeof(trng_value), max);
+
+ /*
+ * Locking is provided by the caller so we can update device
+ * hwrng-related fields safely
+ */
+ trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
+ if (!trng_value) {
+ /* Zero is returned if not data is available or if a
+ * bad-entropy error is present. Assume an error if
+ * we exceed TRNG_RETRIES reads of zero.
+ */
+ if (ccp->hwrng_retries++ > TRNG_RETRIES)
+ return -EIO;
+
+ return 0;
+ }
+
+ /* Reset the counter and save the rng value */
+ ccp->hwrng_retries = 0;
+ memcpy(data, &trng_value, len);
+
+ return len;
+}
+
+/**
+ * ccp_alloc_struct - allocate and initialize the ccp_device struct
+ *
+ * @dev: device struct of the CCP
+ */
+struct ccp_device *ccp_alloc_struct(struct device *dev)
+{
+ struct ccp_device *ccp;
+
+ ccp = kzalloc(sizeof(*ccp), GFP_KERNEL);
+ if (ccp == NULL) {
+ dev_err(dev, "unable to allocate device struct\n");
+ return NULL;
+ }
+ ccp->dev = dev;
+
+ INIT_LIST_HEAD(&ccp->cmd);
+ INIT_LIST_HEAD(&ccp->backlog);
+
+ spin_lock_init(&ccp->cmd_lock);
+ mutex_init(&ccp->req_mutex);
+ mutex_init(&ccp->ksb_mutex);
+ ccp->ksb_count = KSB_COUNT;
+ ccp->ksb_start = 0;
+
+ return ccp;
+}
+
+/**
+ * ccp_init - initialize the CCP device
+ *
+ * @ccp: ccp_device struct
+ */
+int ccp_init(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct ccp_cmd_queue *cmd_q;
+ struct dma_pool *dma_pool;
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+ unsigned int qmr, qim, i;
+ int ret;
+
+ /* Find available queues */
+ qim = 0;
+ qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+ continue;
+
+ /* Allocate a dma pool for this queue */
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
+ dma_pool = dma_pool_create(dma_pool_name, dev,
+ CCP_DMAPOOL_MAX_SIZE,
+ CCP_DMAPOOL_ALIGN, 0);
+ if (!dma_pool) {
+ dev_err(dev, "unable to allocate dma pool\n");
+ ret = -ENOMEM;
+ goto e_pool;
+ }
+
+ cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
+ ccp->cmd_q_count++;
+
+ cmd_q->ccp = ccp;
+ cmd_q->id = i;
+ cmd_q->dma_pool = dma_pool;
+
+ /* Reserve 2 KSB regions for the queue */
+ cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
+ cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
+ ccp->ksb_count -= 2;
+
+ /* Preset some register values and masks that are queue
+ * number dependent
+ */
+ cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
+ (CMD_Q_STATUS_INCR * i);
+ cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
+ (CMD_Q_STATUS_INCR * i);
+ cmd_q->int_ok = 1 << (i * 2);
+ cmd_q->int_err = 1 << ((i * 2) + 1);
+
+ cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+
+ init_waitqueue_head(&cmd_q->int_queue);
+
+ /* Build queue interrupt mask (two interrupts per queue) */
+ qim |= cmd_q->int_ok | cmd_q->int_err;
+
+ dev_dbg(dev, "queue #%u available\n", i);
+ }
+ if (ccp->cmd_q_count == 0) {
+ dev_notice(dev, "no command queues available\n");
+ ret = -EIO;
+ goto e_pool;
+ }
+ dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
+
+ /* Disable and clear interrupts until ready */
+ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+ iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ /* Request an irq */
+ ret = ccp->get_irq(ccp);
+ if (ret) {
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_pool;
+ }
+
+ /* Initialize the queues used to wait for KSB space and suspend */
+ init_waitqueue_head(&ccp->ksb_queue);
+ init_waitqueue_head(&ccp->suspend_queue);
+
+ /* Create a kthread for each queue */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct task_struct *kthread;
+
+ cmd_q = &ccp->cmd_q[i];
+
+ kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
+ "ccp-q%u", cmd_q->id);
+ if (IS_ERR(kthread)) {
+ dev_err(dev, "error creating queue thread (%ld)\n",
+ PTR_ERR(kthread));
+ ret = PTR_ERR(kthread);
+ goto e_kthread;
+ }
+
+ cmd_q->kthread = kthread;
+ wake_up_process(kthread);
+ }
+
+ /* Register the RNG */
+ ccp->hwrng.name = "ccp-rng";
+ ccp->hwrng.read = ccp_trng_read;
+ ret = hwrng_register(&ccp->hwrng);
+ if (ret) {
+ dev_err(dev, "error registering hwrng (%d)\n", ret);
+ goto e_kthread;
+ }
+
+ /* Make the device struct available before enabling interrupts */
+ ccp_add_device(ccp);
+
+ /* Enable interrupts */
+ iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
+
+ return 0;
+
+e_kthread:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+ ccp->free_irq(ccp);
+
+e_pool:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+ return ret;
+}
+
+/**
+ * ccp_destroy - tear down the CCP device
+ *
+ * @ccp: ccp_device struct
+ */
+void ccp_destroy(struct ccp_device *ccp)
+{
+ struct ccp_cmd_queue *cmd_q;
+ struct ccp_cmd *cmd;
+ unsigned int qim, i;
+
+ /* Remove general access to the device struct */
+ ccp_del_device(ccp);
+
+ /* Unregister the RNG */
+ hwrng_unregister(&ccp->hwrng);
+
+ /* Stop the queue kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+ /* Build queue interrupt mask (two interrupt masks per queue) */
+ qim = 0;
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+ qim |= cmd_q->int_ok | cmd_q->int_err;
+ }
+
+ /* Disable and clear interrupts */
+ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+ iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ ccp->free_irq(ccp);
+
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+ /* Flush the cmd and backlog queue */
+ while (!list_empty(&ccp->cmd)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+ while (!list_empty(&ccp->backlog)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+}
+
+/**
+ * ccp_irq_handler - handle interrupts generated by the CCP device
+ *
+ * @irq: the irq associated with the interrupt
+ * @data: the data value supplied when the irq was created
+ */
+irqreturn_t ccp_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ struct ccp_cmd_queue *cmd_q;
+ u32 q_int, status;
+ unsigned int i;
+
+ status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ q_int = status & (cmd_q->int_ok | cmd_q->int_err);
+ if (q_int) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_status);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+ /* On error, only save the first error value */
+ if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 1;
+
+ /* Acknowledge the interrupt and wake the kthread */
+ iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
+ wake_up_interruptible(&cmd_q->int_queue);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM
+bool ccp_queues_suspended(struct ccp_device *ccp)
+{
+ unsigned int suspended = 0;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].suspended)
+ suspended++;
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ return ccp->cmd_q_count == suspended;
+}
+#endif
+
+static const struct x86_cpu_id ccp_support[] = {
+ { X86_VENDOR_AMD, 22, },
+};
+
+static int __init ccp_mod_init(void)
+{
+ struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
+ int ret;
+
+ if (!x86_match_cpu(ccp_support))
+ return -ENODEV;
+
+ switch (cpuinfo->x86) {
+ case 22:
+ if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
+ return -ENODEV;
+
+ ret = ccp_pci_init();
+ if (ret)
+ return ret;
+
+ /* Don't leave the driver loaded if init failed */
+ if (!ccp_get_device()) {
+ ccp_pci_exit();
+ return -ENODEV;
+ }
+
+ return 0;
+
+ break;
+ }
+
+ return -ENODEV;
+}
+
+static void __exit ccp_mod_exit(void)
+{
+ struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
+
+ switch (cpuinfo->x86) {
+ case 22:
+ ccp_pci_exit();
+ break;
+ }
+}
+
+module_init(ccp_mod_init);
+module_exit(ccp_mod_exit);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
new file mode 100644
index 000000000000..7ec536e702ec
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -0,0 +1,272 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CCP_DEV_H__
+#define __CCP_DEV_H__
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+
+
+#define IO_OFFSET 0x20000
+
+#define MAX_DMAPOOL_NAME_LEN 32
+
+#define MAX_HW_QUEUES 5
+#define MAX_CMD_QLEN 100
+
+#define TRNG_RETRIES 10
+
+
+/****** Register Mappings ******/
+#define Q_MASK_REG 0x000
+#define TRNG_OUT_REG 0x00c
+#define IRQ_MASK_REG 0x040
+#define IRQ_STATUS_REG 0x200
+
+#define DEL_CMD_Q_JOB 0x124
+#define DEL_Q_ACTIVE 0x00000200
+#define DEL_Q_ID_SHIFT 6
+
+#define CMD_REQ0 0x180
+#define CMD_REQ_INCR 0x04
+
+#define CMD_Q_STATUS_BASE 0x210
+#define CMD_Q_INT_STATUS_BASE 0x214
+#define CMD_Q_STATUS_INCR 0x20
+
+#define CMD_Q_CACHE 0x228
+#define CMD_Q_CACHE_INC 0x20
+
+#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f);
+#define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f);
+
+/****** REQ0 Related Values ******/
+#define REQ0_WAIT_FOR_WRITE 0x00000004
+#define REQ0_INT_ON_COMPLETE 0x00000002
+#define REQ0_STOP_ON_COMPLETE 0x00000001
+
+#define REQ0_CMD_Q_SHIFT 9
+#define REQ0_JOBID_SHIFT 3
+
+/****** REQ1 Related Values ******/
+#define REQ1_PROTECT_SHIFT 27
+#define REQ1_ENGINE_SHIFT 23
+#define REQ1_KEY_KSB_SHIFT 2
+
+#define REQ1_EOM 0x00000002
+#define REQ1_INIT 0x00000001
+
+/* AES Related Values */
+#define REQ1_AES_TYPE_SHIFT 21
+#define REQ1_AES_MODE_SHIFT 18
+#define REQ1_AES_ACTION_SHIFT 17
+#define REQ1_AES_CFB_SIZE_SHIFT 10
+
+/* XTS-AES Related Values */
+#define REQ1_XTS_AES_SIZE_SHIFT 10
+
+/* SHA Related Values */
+#define REQ1_SHA_TYPE_SHIFT 21
+
+/* RSA Related Values */
+#define REQ1_RSA_MOD_SIZE_SHIFT 10
+
+/* Pass-Through Related Values */
+#define REQ1_PT_BW_SHIFT 12
+#define REQ1_PT_BS_SHIFT 10
+
+/* ECC Related Values */
+#define REQ1_ECC_AFFINE_CONVERT 0x00200000
+#define REQ1_ECC_FUNCTION_SHIFT 18
+
+/****** REQ4 Related Values ******/
+#define REQ4_KSB_SHIFT 18
+#define REQ4_MEMTYPE_SHIFT 16
+
+/****** REQ6 Related Values ******/
+#define REQ6_MEMTYPE_SHIFT 16
+
+
+/****** Key Storage Block ******/
+#define KSB_START 77
+#define KSB_END 127
+#define KSB_COUNT (KSB_END - KSB_START + 1)
+#define CCP_KSB_BITS 256
+#define CCP_KSB_BYTES 32
+
+#define CCP_JOBID_MASK 0x0000003f
+
+#define CCP_DMAPOOL_MAX_SIZE 64
+#define CCP_DMAPOOL_ALIGN (1 << 5)
+
+#define CCP_REVERSE_BUF_SIZE 64
+
+#define CCP_AES_KEY_KSB_COUNT 1
+#define CCP_AES_CTX_KSB_COUNT 1
+
+#define CCP_XTS_AES_KEY_KSB_COUNT 1
+#define CCP_XTS_AES_CTX_KSB_COUNT 1
+
+#define CCP_SHA_KSB_COUNT 1
+
+#define CCP_RSA_MAX_WIDTH 4096
+
+#define CCP_PASSTHRU_BLOCKSIZE 256
+#define CCP_PASSTHRU_MASKSIZE 32
+#define CCP_PASSTHRU_KSB_COUNT 1
+
+#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */
+#define CCP_ECC_MAX_OPERANDS 6
+#define CCP_ECC_MAX_OUTPUTS 3
+#define CCP_ECC_SRC_BUF_SIZE 448
+#define CCP_ECC_DST_BUF_SIZE 192
+#define CCP_ECC_OPERAND_SIZE 64
+#define CCP_ECC_OUTPUT_SIZE 64
+#define CCP_ECC_RESULT_OFFSET 60
+#define CCP_ECC_RESULT_SUCCESS 0x0001
+
+
+struct ccp_device;
+struct ccp_cmd;
+
+struct ccp_cmd_queue {
+ struct ccp_device *ccp;
+
+ /* Queue identifier */
+ u32 id;
+
+ /* Queue dma pool */
+ struct dma_pool *dma_pool;
+
+ /* Queue reserved KSB regions */
+ u32 ksb_key;
+ u32 ksb_ctx;
+
+ /* Queue processing thread */
+ struct task_struct *kthread;
+ unsigned int active;
+ unsigned int suspended;
+
+ /* Number of free command slots available */
+ unsigned int free_slots;
+
+ /* Interrupt masks */
+ u32 int_ok;
+ u32 int_err;
+
+ /* Register addresses for queue */
+ void __iomem *reg_status;
+ void __iomem *reg_int_status;
+
+ /* Status values from job */
+ u32 int_status;
+ u32 q_status;
+ u32 q_int_status;
+ u32 cmd_error;
+
+ /* Interrupt wait queue */
+ wait_queue_head_t int_queue;
+ unsigned int int_rcvd;
+} ____cacheline_aligned;
+
+struct ccp_device {
+ struct device *dev;
+
+ /*
+ * Bus specific device information
+ */
+ void *dev_specific;
+ int (*get_irq)(struct ccp_device *ccp);
+ void (*free_irq)(struct ccp_device *ccp);
+
+ /*
+ * I/O area used for device communication. The register mapping
+ * starts at an offset into the mapped bar.
+ * The CMD_REQx registers and the Delete_Cmd_Queue_Job register
+ * need to be protected while a command queue thread is accessing
+ * them.
+ */
+ struct mutex req_mutex ____cacheline_aligned;
+ void __iomem *io_map;
+ void __iomem *io_regs;
+
+ /*
+ * Master lists that all cmds are queued on. Because there can be
+ * more than one CCP command queue that can process a cmd a separate
+ * backlog list is neeeded so that the backlog completion call
+ * completes before the cmd is available for execution.
+ */
+ spinlock_t cmd_lock ____cacheline_aligned;
+ unsigned int cmd_count;
+ struct list_head cmd;
+ struct list_head backlog;
+
+ /*
+ * The command queues. These represent the queues available on the
+ * CCP that are available for processing cmds
+ */
+ struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES];
+ unsigned int cmd_q_count;
+
+ /*
+ * Support for the CCP True RNG
+ */
+ struct hwrng hwrng;
+ unsigned int hwrng_retries;
+
+ /*
+ * A counter used to generate job-ids for cmds submitted to the CCP
+ */
+ atomic_t current_id ____cacheline_aligned;
+
+ /*
+ * The CCP uses key storage blocks (KSB) to maintain context for certain
+ * operations. To prevent multiple cmds from using the same KSB range
+ * a command queue reserves a KSB range for the duration of the cmd.
+ * Each queue, will however, reserve 2 KSB blocks for operations that
+ * only require single KSB entries (eg. AES context/iv and key) in order
+ * to avoid allocation contention. This will reserve at most 10 KSB
+ * entries, leaving 40 KSB entries available for dynamic allocation.
+ */
+ struct mutex ksb_mutex ____cacheline_aligned;
+ DECLARE_BITMAP(ksb, KSB_COUNT);
+ wait_queue_head_t ksb_queue;
+ unsigned int ksb_avail;
+ unsigned int ksb_count;
+ u32 ksb_start;
+
+ /* Suspend support */
+ unsigned int suspending;
+ wait_queue_head_t suspend_queue;
+};
+
+
+int ccp_pci_init(void);
+void ccp_pci_exit(void);
+
+struct ccp_device *ccp_alloc_struct(struct device *dev);
+int ccp_init(struct ccp_device *ccp);
+void ccp_destroy(struct ccp_device *ccp);
+bool ccp_queues_suspended(struct ccp_device *ccp);
+
+irqreturn_t ccp_irq_handler(int irq, void *data);
+
+int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
+
+#endif
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
new file mode 100644
index 000000000000..71ed3ade7e12
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -0,0 +1,2024 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-dev.h"
+
+
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_KSB,
+ CCP_MEMTYPE_LOCAL,
+ CCP_MEMTYPE__LAST,
+};
+
+struct ccp_dma_info {
+ dma_addr_t address;
+ unsigned int offset;
+ unsigned int length;
+ enum dma_data_direction dir;
+};
+
+struct ccp_dm_workarea {
+ struct device *dev;
+ struct dma_pool *dma_pool;
+ unsigned int length;
+
+ u8 *address;
+ struct ccp_dma_info dma;
+};
+
+struct ccp_sg_workarea {
+ struct scatterlist *sg;
+ unsigned int nents;
+ unsigned int length;
+
+ struct scatterlist *dma_sg;
+ struct device *dma_dev;
+ unsigned int dma_count;
+ enum dma_data_direction dma_dir;
+
+ unsigned int sg_used;
+
+ u64 bytes_left;
+};
+
+struct ccp_data {
+ struct ccp_sg_workarea sg_wa;
+ struct ccp_dm_workarea dm_wa;
+};
+
+struct ccp_mem {
+ enum ccp_memtype type;
+ union {
+ struct ccp_dma_info dma;
+ u32 ksb;
+ } u;
+};
+
+struct ccp_aes_op {
+ enum ccp_aes_type type;
+ enum ccp_aes_mode mode;
+ enum ccp_aes_action action;
+};
+
+struct ccp_xts_aes_op {
+ enum ccp_aes_action action;
+ enum ccp_xts_aes_unit_size unit_size;
+};
+
+struct ccp_sha_op {
+ enum ccp_sha_type type;
+ u64 msg_bits;
+};
+
+struct ccp_rsa_op {
+ u32 mod_size;
+ u32 input_len;
+};
+
+struct ccp_passthru_op {
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+};
+
+struct ccp_ecc_op {
+ enum ccp_ecc_function function;
+};
+
+struct ccp_op {
+ struct ccp_cmd_queue *cmd_q;
+
+ u32 jobid;
+ u32 ioc;
+ u32 soc;
+ u32 ksb_key;
+ u32 ksb_ctx;
+ u32 init;
+ u32 eom;
+
+ struct ccp_mem src;
+ struct ccp_mem dst;
+
+ union {
+ struct ccp_aes_op aes;
+ struct ccp_xts_aes_op xts;
+ struct ccp_sha_op sha;
+ struct ccp_rsa_op rsa;
+ struct ccp_passthru_op passthru;
+ struct ccp_ecc_op ecc;
+ } u;
+};
+
+/* The CCP cannot perform zero-length sha operations so the caller
+ * is required to buffer data for the final operation. However, a
+ * sha operation for a message with a total length of zero is valid
+ * so known values are required to supply the result.
+ */
+static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
+ 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
+ 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
+ 0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
+};
+
+static u32 ccp_addr_lo(struct ccp_dma_info *info)
+{
+ return lower_32_bits(info->address + info->offset);
+}
+
+static u32 ccp_addr_hi(struct ccp_dma_info *info)
+{
+ return upper_32_bits(info->address + info->offset) & 0x0000ffff;
+}
+
+static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
+{
+ struct ccp_cmd_queue *cmd_q = op->cmd_q;
+ struct ccp_device *ccp = cmd_q->ccp;
+ void __iomem *cr_addr;
+ u32 cr0, cmd;
+ unsigned int i;
+ int ret = 0;
+
+ /* We could read a status register to see how many free slots
+ * are actually available, but reading that register resets it
+ * and you could lose some error information.
+ */
+ cmd_q->free_slots--;
+
+ cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
+ | (op->jobid << REQ0_JOBID_SHIFT)
+ | REQ0_WAIT_FOR_WRITE;
+
+ if (op->soc)
+ cr0 |= REQ0_STOP_ON_COMPLETE
+ | REQ0_INT_ON_COMPLETE;
+
+ if (op->ioc || !cmd_q->free_slots)
+ cr0 |= REQ0_INT_ON_COMPLETE;
+
+ /* Start at CMD_REQ1 */
+ cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
+
+ mutex_lock(&ccp->req_mutex);
+
+ /* Write CMD_REQ1 through CMD_REQx first */
+ for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
+ iowrite32(*(cr + i), cr_addr);
+
+ /* Tell the CCP to start */
+ wmb();
+ iowrite32(cr0, ccp->io_regs + CMD_REQ0);
+
+ mutex_unlock(&ccp->req_mutex);
+
+ if (cr0 & REQ0_INT_ON_COMPLETE) {
+ /* Wait for the job to complete */
+ ret = wait_event_interruptible(cmd_q->int_queue,
+ cmd_q->int_rcvd);
+ if (ret || cmd_q->cmd_error) {
+ /* On error delete all related jobs from the queue */
+ cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
+ | op->jobid;
+
+ iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+
+ if (!ret)
+ ret = -EIO;
+ } else if (op->soc) {
+ /* Delete just head job from the queue on SoC */
+ cmd = DEL_Q_ACTIVE
+ | (cmd_q->id << DEL_Q_ID_SHIFT)
+ | op->jobid;
+
+ iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+ }
+
+ cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 0;
+ }
+
+ return ret;
+}
+
+static int ccp_perform_aes(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
+ | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
+ | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
+ | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ if (op->u.aes.mode == CCP_AES_MODE_CFB)
+ cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ if (op->init)
+ cr[0] |= REQ1_INIT;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_xts_aes(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
+ | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
+ | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ if (op->init)
+ cr[0] |= REQ1_INIT;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_sha(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
+ | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
+ | REQ1_INIT;
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+
+ if (op->eom) {
+ cr[0] |= REQ1_EOM;
+ cr[4] = lower_32_bits(op->u.sha.msg_bits);
+ cr[5] = upper_32_bits(op->u.sha.msg_bits);
+ } else {
+ cr[4] = 0;
+ cr[5] = 0;
+ }
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_rsa(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
+ | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
+ | REQ1_EOM;
+ cr[1] = op->u.rsa.input_len - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_passthru(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
+ | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
+ | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
+
+ if (op->src.type == CCP_MEMTYPE_SYSTEM)
+ cr[1] = op->src.u.dma.length - 1;
+ else
+ cr[1] = op->dst.u.dma.length - 1;
+
+ if (op->src.type == CCP_MEMTYPE_SYSTEM) {
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+
+ if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
+ } else {
+ cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
+ cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
+ }
+
+ if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+ } else {
+ cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
+ cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
+ }
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_ecc(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = REQ1_ECC_AFFINE_CONVERT
+ | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
+ | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
+ | REQ1_EOM;
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
+{
+ int start;
+
+ for (;;) {
+ mutex_lock(&ccp->ksb_mutex);
+
+ start = (u32)bitmap_find_next_zero_area(ccp->ksb,
+ ccp->ksb_count,
+ ccp->ksb_start,
+ count, 0);
+ if (start <= ccp->ksb_count) {
+ bitmap_set(ccp->ksb, start, count);
+
+ mutex_unlock(&ccp->ksb_mutex);
+ break;
+ }
+
+ ccp->ksb_avail = 0;
+
+ mutex_unlock(&ccp->ksb_mutex);
+
+ /* Wait for KSB entries to become available */
+ if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail))
+ return 0;
+ }
+
+ return KSB_START + start;
+}
+
+static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start,
+ unsigned int count)
+{
+ if (!start)
+ return;
+
+ mutex_lock(&ccp->ksb_mutex);
+
+ bitmap_clear(ccp->ksb, start - KSB_START, count);
+
+ ccp->ksb_avail = 1;
+
+ mutex_unlock(&ccp->ksb_mutex);
+
+ wake_up_interruptible_all(&ccp->ksb_queue);
+}
+
+static u32 ccp_gen_jobid(struct ccp_device *ccp)
+{
+ return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
+}
+
+static void ccp_sg_free(struct ccp_sg_workarea *wa)
+{
+ if (wa->dma_count)
+ dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
+
+ wa->dma_count = 0;
+}
+
+static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
+ struct scatterlist *sg, u64 len,
+ enum dma_data_direction dma_dir)
+{
+ memset(wa, 0, sizeof(*wa));
+
+ wa->sg = sg;
+ if (!sg)
+ return 0;
+
+ wa->nents = sg_nents(sg);
+ wa->length = sg->length;
+ wa->bytes_left = len;
+ wa->sg_used = 0;
+
+ if (len == 0)
+ return 0;
+
+ if (dma_dir == DMA_NONE)
+ return 0;
+
+ wa->dma_sg = sg;
+ wa->dma_dev = dev;
+ wa->dma_dir = dma_dir;
+ wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
+ if (!wa->dma_count)
+ return -ENOMEM;
+
+
+ return 0;
+}
+
+static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
+{
+ unsigned int nbytes = min_t(u64, len, wa->bytes_left);
+
+ if (!wa->sg)
+ return;
+
+ wa->sg_used += nbytes;
+ wa->bytes_left -= nbytes;
+ if (wa->sg_used == wa->sg->length) {
+ wa->sg = sg_next(wa->sg);
+ wa->sg_used = 0;
+ }
+}
+
+static void ccp_dm_free(struct ccp_dm_workarea *wa)
+{
+ if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
+ if (wa->address)
+ dma_pool_free(wa->dma_pool, wa->address,
+ wa->dma.address);
+ } else {
+ if (wa->dma.address)
+ dma_unmap_single(wa->dev, wa->dma.address, wa->length,
+ wa->dma.dir);
+ kfree(wa->address);
+ }
+
+ wa->address = NULL;
+ wa->dma.address = 0;
+}
+
+static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
+ struct ccp_cmd_queue *cmd_q,
+ unsigned int len,
+ enum dma_data_direction dir)
+{
+ memset(wa, 0, sizeof(*wa));
+
+ if (!len)
+ return 0;
+
+ wa->dev = cmd_q->ccp->dev;
+ wa->length = len;
+
+ if (len <= CCP_DMAPOOL_MAX_SIZE) {
+ wa->dma_pool = cmd_q->dma_pool;
+
+ wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
+ &wa->dma.address);
+ if (!wa->address)
+ return -ENOMEM;
+
+ wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
+
+ memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
+ } else {
+ wa->address = kzalloc(len, GFP_KERNEL);
+ if (!wa->address)
+ return -ENOMEM;
+
+ wa->dma.address = dma_map_single(wa->dev, wa->address, len,
+ dir);
+ if (!wa->dma.address)
+ return -ENOMEM;
+
+ wa->dma.length = len;
+ }
+ wa->dma.dir = dir;
+
+ return 0;
+}
+
+static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+ struct scatterlist *sg, unsigned int sg_offset,
+ unsigned int len)
+{
+ WARN_ON(!wa->address);
+
+ scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
+ 0);
+}
+
+static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+ struct scatterlist *sg, unsigned int sg_offset,
+ unsigned int len)
+{
+ WARN_ON(!wa->address);
+
+ scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
+ 1);
+}
+
+static void ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
+ struct scatterlist *sg,
+ unsigned int len, unsigned int se_len,
+ bool sign_extend)
+{
+ unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
+ u8 buffer[CCP_REVERSE_BUF_SIZE];
+
+ BUG_ON(se_len > sizeof(buffer));
+
+ sg_offset = len;
+ dm_offset = 0;
+ nbytes = len;
+ while (nbytes) {
+ ksb_len = min_t(unsigned int, nbytes, se_len);
+ sg_offset -= ksb_len;
+
+ scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0);
+ for (i = 0; i < ksb_len; i++)
+ wa->address[dm_offset + i] = buffer[ksb_len - i - 1];
+
+ dm_offset += ksb_len;
+ nbytes -= ksb_len;
+
+ if ((ksb_len != se_len) && sign_extend) {
+ /* Must sign-extend to nearest sign-extend length */
+ if (wa->address[dm_offset - 1] & 0x80)
+ memset(wa->address + dm_offset, 0xff,
+ se_len - ksb_len);
+ }
+ }
+}
+
+static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
+ struct scatterlist *sg,
+ unsigned int len)
+{
+ unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
+ u8 buffer[CCP_REVERSE_BUF_SIZE];
+
+ sg_offset = 0;
+ dm_offset = len;
+ nbytes = len;
+ while (nbytes) {
+ ksb_len = min_t(unsigned int, nbytes, sizeof(buffer));
+ dm_offset -= ksb_len;
+
+ for (i = 0; i < ksb_len; i++)
+ buffer[ksb_len - i - 1] = wa->address[dm_offset + i];
+ scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1);
+
+ sg_offset += ksb_len;
+ nbytes -= ksb_len;
+ }
+}
+
+static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
+{
+ ccp_dm_free(&data->dm_wa);
+ ccp_sg_free(&data->sg_wa);
+}
+
+static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
+ struct scatterlist *sg, u64 sg_len,
+ unsigned int dm_len,
+ enum dma_data_direction dir)
+{
+ int ret;
+
+ memset(data, 0, sizeof(*data));
+
+ ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
+ dir);
+ if (ret)
+ goto e_err;
+
+ ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
+ if (ret)
+ goto e_err;
+
+ return 0;
+
+e_err:
+ ccp_free_data(data, cmd_q);
+
+ return ret;
+}
+
+static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
+{
+ struct ccp_sg_workarea *sg_wa = &data->sg_wa;
+ struct ccp_dm_workarea *dm_wa = &data->dm_wa;
+ unsigned int buf_count, nbytes;
+
+ /* Clear the buffer if setting it */
+ if (!from)
+ memset(dm_wa->address, 0, dm_wa->length);
+
+ if (!sg_wa->sg)
+ return 0;
+
+ /* Perform the copy operation
+ * nbytes will always be <= UINT_MAX because dm_wa->length is
+ * an unsigned int
+ */
+ nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
+ scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
+ nbytes, from);
+
+ /* Update the structures and generate the count */
+ buf_count = 0;
+ while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
+ nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
+ dm_wa->length - buf_count);
+ nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
+
+ buf_count += nbytes;
+ ccp_update_sg_workarea(sg_wa, nbytes);
+ }
+
+ return buf_count;
+}
+
+static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
+{
+ return ccp_queue_buf(data, 0);
+}
+
+static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
+{
+ return ccp_queue_buf(data, 1);
+}
+
+static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
+ struct ccp_op *op, unsigned int block_size,
+ bool blocksize_op)
+{
+ unsigned int sg_src_len, sg_dst_len, op_len;
+
+ /* The CCP can only DMA from/to one address each per operation. This
+ * requires that we find the smallest DMA area between the source
+ * and destination. The resulting len values will always be <= UINT_MAX
+ * because the dma length is an unsigned int.
+ */
+ sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
+ sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
+
+ if (dst) {
+ sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
+ sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
+ op_len = min(sg_src_len, sg_dst_len);
+ } else
+ op_len = sg_src_len;
+
+ /* The data operation length will be at least block_size in length
+ * or the smaller of available sg room remaining for the source or
+ * the destination
+ */
+ op_len = max(op_len, block_size);
+
+ /* Unless we have to buffer data, there's no reason to wait */
+ op->soc = 0;
+
+ if (sg_src_len < block_size) {
+ /* Not enough data in the sg element, so it
+ * needs to be buffered into a blocksize chunk
+ */
+ int cp_len = ccp_fill_queue_buf(src);
+
+ op->soc = 1;
+ op->src.u.dma.address = src->dm_wa.dma.address;
+ op->src.u.dma.offset = 0;
+ op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
+ } else {
+ /* Enough data in the sg element, but we need to
+ * adjust for any previously copied data
+ */
+ op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
+ op->src.u.dma.offset = src->sg_wa.sg_used;
+ op->src.u.dma.length = op_len & ~(block_size - 1);
+
+ ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
+ }
+
+ if (dst) {
+ if (sg_dst_len < block_size) {
+ /* Not enough room in the sg element or we're on the
+ * last piece of data (when using padding), so the
+ * output needs to be buffered into a blocksize chunk
+ */
+ op->soc = 1;
+ op->dst.u.dma.address = dst->dm_wa.dma.address;
+ op->dst.u.dma.offset = 0;
+ op->dst.u.dma.length = op->src.u.dma.length;
+ } else {
+ /* Enough room in the sg element, but we need to
+ * adjust for any previously used area
+ */
+ op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
+ op->dst.u.dma.offset = dst->sg_wa.sg_used;
+ op->dst.u.dma.length = op->src.u.dma.length;
+ }
+ }
+}
+
+static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
+ struct ccp_op *op)
+{
+ op->init = 0;
+
+ if (dst) {
+ if (op->dst.u.dma.address == dst->dm_wa.dma.address)
+ ccp_empty_queue_buf(dst);
+ else
+ ccp_update_sg_workarea(&dst->sg_wa,
+ op->dst.u.dma.length);
+ }
+}
+
+static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
+ struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
+ u32 byte_swap, bool from)
+{
+ struct ccp_op op;
+
+ memset(&op, 0, sizeof(op));
+
+ op.cmd_q = cmd_q;
+ op.jobid = jobid;
+ op.eom = 1;
+
+ if (from) {
+ op.soc = 1;
+ op.src.type = CCP_MEMTYPE_KSB;
+ op.src.u.ksb = ksb;
+ op.dst.type = CCP_MEMTYPE_SYSTEM;
+ op.dst.u.dma.address = wa->dma.address;
+ op.dst.u.dma.length = wa->length;
+ } else {
+ op.src.type = CCP_MEMTYPE_SYSTEM;
+ op.src.u.dma.address = wa->dma.address;
+ op.src.u.dma.length = wa->length;
+ op.dst.type = CCP_MEMTYPE_KSB;
+ op.dst.u.ksb = ksb;
+ }
+
+ op.u.passthru.byte_swap = byte_swap;
+
+ return ccp_perform_passthru(&op);
+}
+
+static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q,
+ struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
+ u32 byte_swap)
+{
+ return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false);
+}
+
+static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q,
+ struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
+ u32 byte_swap)
+{
+ return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true);
+}
+
+static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
+ struct ccp_cmd *cmd)
+{
+ struct ccp_aes_engine *aes = &cmd->u.aes;
+ struct ccp_dm_workarea key, ctx;
+ struct ccp_data src;
+ struct ccp_op op;
+ unsigned int dm_offset;
+ int ret;
+
+ if (!((aes->key_len == AES_KEYSIZE_128) ||
+ (aes->key_len == AES_KEYSIZE_192) ||
+ (aes->key_len == AES_KEYSIZE_256)))
+ return -EINVAL;
+
+ if (aes->src_len & (AES_BLOCK_SIZE - 1))
+ return -EINVAL;
+
+ if (aes->iv_len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (!aes->key || !aes->iv || !aes->src)
+ return -EINVAL;
+
+ if (aes->cmac_final) {
+ if (aes->cmac_key_len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (!aes->cmac_key)
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
+ BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
+
+ ret = -EIO;
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.ksb_key = cmd_q->ksb_key;
+ op.ksb_ctx = cmd_q->ksb_ctx;
+ op.init = 1;
+ op.u.aes.type = aes->type;
+ op.u.aes.mode = aes->mode;
+ op.u.aes.action = aes->action;
+
+ /* All supported key sizes fit in a single (32-byte) KSB entry
+ * and must be in little endian format. Use the 256-bit byte
+ * swap passthru option to convert from big endian to little
+ * endian.
+ */
+ ret = ccp_init_dm_workarea(&key, cmd_q,
+ CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ dm_offset = CCP_KSB_BYTES - aes->key_len;
+ ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_key;
+ }
+
+ /* The AES context fits in a single (32-byte) KSB entry and
+ * must be in little endian format. Use the 256-bit byte swap
+ * passthru option to convert from big endian to little endian.
+ */
+ ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_key;
+
+ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_ctx;
+ }
+
+ /* Send data to the CCP AES engine */
+ ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
+
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
+ if (aes->cmac_final && !src.sg_wa.bytes_left) {
+ op.eom = 1;
+
+ /* Push the K1/K2 key to the CCP now */
+ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid,
+ op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_src;
+ }
+
+ ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
+ aes->cmac_key_len);
+ ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_src;
+ }
+ }
+
+ ret = ccp_perform_aes(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_src;
+ }
+
+ ccp_process_data(&src, NULL, &op);
+ }
+
+ /* Retrieve the AES context - convert from LE to BE using
+ * 32-byte (256-bit) byteswapping
+ */
+ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_src;
+ }
+
+ /* ...but we only need AES_BLOCK_SIZE bytes */
+ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+
+e_src:
+ ccp_free_data(&src, cmd_q);
+
+e_ctx:
+ ccp_dm_free(&ctx);
+
+e_key:
+ ccp_dm_free(&key);
+
+ return ret;
+}
+
+static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_aes_engine *aes = &cmd->u.aes;
+ struct ccp_dm_workarea key, ctx;
+ struct ccp_data src, dst;
+ struct ccp_op op;
+ unsigned int dm_offset;
+ bool in_place = false;
+ int ret;
+
+ if (aes->mode == CCP_AES_MODE_CMAC)
+ return ccp_run_aes_cmac_cmd(cmd_q, cmd);
+
+ if (!((aes->key_len == AES_KEYSIZE_128) ||
+ (aes->key_len == AES_KEYSIZE_192) ||
+ (aes->key_len == AES_KEYSIZE_256)))
+ return -EINVAL;
+
+ if (((aes->mode == CCP_AES_MODE_ECB) ||
+ (aes->mode == CCP_AES_MODE_CBC) ||
+ (aes->mode == CCP_AES_MODE_CFB)) &&
+ (aes->src_len & (AES_BLOCK_SIZE - 1)))
+ return -EINVAL;
+
+ if (!aes->key || !aes->src || !aes->dst)
+ return -EINVAL;
+
+ if (aes->mode != CCP_AES_MODE_ECB) {
+ if (aes->iv_len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (!aes->iv)
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
+ BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
+
+ ret = -EIO;
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.ksb_key = cmd_q->ksb_key;
+ op.ksb_ctx = cmd_q->ksb_ctx;
+ op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
+ op.u.aes.type = aes->type;
+ op.u.aes.mode = aes->mode;
+ op.u.aes.action = aes->action;
+
+ /* All supported key sizes fit in a single (32-byte) KSB entry
+ * and must be in little endian format. Use the 256-bit byte
+ * swap passthru option to convert from big endian to little
+ * endian.
+ */
+ ret = ccp_init_dm_workarea(&key, cmd_q,
+ CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ dm_offset = CCP_KSB_BYTES - aes->key_len;
+ ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_key;
+ }
+
+ /* The AES context fits in a single (32-byte) KSB entry and
+ * must be in little endian format. Use the 256-bit byte swap
+ * passthru option to convert from big endian to little endian.
+ */
+ ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_key;
+
+ if (aes->mode != CCP_AES_MODE_ECB) {
+ /* Load the AES context - conver to LE */
+ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_ctx;
+ }
+ }
+
+ /* Prepare the input and output data workareas. For in-place
+ * operations we need to set the dma direction to BIDIRECTIONAL
+ * and copy the src workarea to the dst workarea.
+ */
+ if (sg_virt(aes->src) == sg_virt(aes->dst))
+ in_place = true;
+
+ ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
+ AES_BLOCK_SIZE,
+ in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
+
+ if (in_place)
+ dst = src;
+ else {
+ ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
+ AES_BLOCK_SIZE, DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+ }
+
+ /* Send data to the CCP AES engine */
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
+ if (!src.sg_wa.bytes_left) {
+ op.eom = 1;
+
+ /* Since we don't retrieve the AES context in ECB
+ * mode we have to wait for the operation to complete
+ * on the last piece of data
+ */
+ if (aes->mode == CCP_AES_MODE_ECB)
+ op.soc = 1;
+ }
+
+ ret = ccp_perform_aes(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ccp_process_data(&src, &dst, &op);
+ }
+
+ if (aes->mode != CCP_AES_MODE_ECB) {
+ /* Retrieve the AES context - convert from LE to BE using
+ * 32-byte (256-bit) byteswapping
+ */
+ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ /* ...but we only need AES_BLOCK_SIZE bytes */
+ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ }
+
+e_dst:
+ if (!in_place)
+ ccp_free_data(&dst, cmd_q);
+
+e_src:
+ ccp_free_data(&src, cmd_q);
+
+e_ctx:
+ ccp_dm_free(&ctx);
+
+e_key:
+ ccp_dm_free(&key);
+
+ return ret;
+}
+
+static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+ struct ccp_cmd *cmd)
+{
+ struct ccp_xts_aes_engine *xts = &cmd->u.xts;
+ struct ccp_dm_workarea key, ctx;
+ struct ccp_data src, dst;
+ struct ccp_op op;
+ unsigned int unit_size, dm_offset;
+ bool in_place = false;
+ int ret;
+
+ switch (xts->unit_size) {
+ case CCP_XTS_AES_UNIT_SIZE_16:
+ unit_size = 16;
+ break;
+ case CCP_XTS_AES_UNIT_SIZE_512:
+ unit_size = 512;
+ break;
+ case CCP_XTS_AES_UNIT_SIZE_1024:
+ unit_size = 1024;
+ break;
+ case CCP_XTS_AES_UNIT_SIZE_2048:
+ unit_size = 2048;
+ break;
+ case CCP_XTS_AES_UNIT_SIZE_4096:
+ unit_size = 4096;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (xts->key_len != AES_KEYSIZE_128)
+ return -EINVAL;
+
+ if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
+ return -EINVAL;
+
+ if (xts->iv_len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (!xts->key || !xts->iv || !xts->src || !xts->dst)
+ return -EINVAL;
+
+ BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1);
+ BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1);
+
+ ret = -EIO;
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.ksb_key = cmd_q->ksb_key;
+ op.ksb_ctx = cmd_q->ksb_ctx;
+ op.init = 1;
+ op.u.xts.action = xts->action;
+ op.u.xts.unit_size = xts->unit_size;
+
+ /* All supported key sizes fit in a single (32-byte) KSB entry
+ * and must be in little endian format. Use the 256-bit byte
+ * swap passthru option to convert from big endian to little
+ * endian.
+ */
+ ret = ccp_init_dm_workarea(&key, cmd_q,
+ CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128;
+ ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+ ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
+ ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_key;
+ }
+
+ /* The AES context fits in a single (32-byte) KSB entry and
+ * for XTS is already in little endian format so no byte swapping
+ * is needed.
+ */
+ ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_key;
+
+ ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
+ ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_ctx;
+ }
+
+ /* Prepare the input and output data workareas. For in-place
+ * operations we need to set the dma direction to BIDIRECTIONAL
+ * and copy the src workarea to the dst workarea.
+ */
+ if (sg_virt(xts->src) == sg_virt(xts->dst))
+ in_place = true;
+
+ ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
+ unit_size,
+ in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
+
+ if (in_place)
+ dst = src;
+ else {
+ ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
+ unit_size, DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+ }
+
+ /* Send data to the CCP AES engine */
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, &dst, &op, unit_size, true);
+ if (!src.sg_wa.bytes_left)
+ op.eom = 1;
+
+ ret = ccp_perform_xts_aes(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ccp_process_data(&src, &dst, &op);
+ }
+
+ /* Retrieve the AES context - convert from LE to BE using
+ * 32-byte (256-bit) byteswapping
+ */
+ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ /* ...but we only need AES_BLOCK_SIZE bytes */
+ dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
+
+e_dst:
+ if (!in_place)
+ ccp_free_data(&dst, cmd_q);
+
+e_src:
+ ccp_free_data(&src, cmd_q);
+
+e_ctx:
+ ccp_dm_free(&ctx);
+
+e_key:
+ ccp_dm_free(&key);
+
+ return ret;
+}
+
+static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_sha_engine *sha = &cmd->u.sha;
+ struct ccp_dm_workarea ctx;
+ struct ccp_data src;
+ struct ccp_op op;
+ int ret;
+
+ if (sha->ctx_len != CCP_SHA_CTXSIZE)
+ return -EINVAL;
+
+ if (!sha->ctx)
+ return -EINVAL;
+
+ if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1)))
+ return -EINVAL;
+
+ if (!sha->src_len) {
+ const u8 *sha_zero;
+
+ /* Not final, just return */
+ if (!sha->final)
+ return 0;
+
+ /* CCP can't do a zero length sha operation so the caller
+ * must buffer the data.
+ */
+ if (sha->msg_bits)
+ return -EINVAL;
+
+ /* A sha operation for a message with a total length of zero,
+ * return known result.
+ */
+ switch (sha->type) {
+ case CCP_SHA_TYPE_1:
+ sha_zero = ccp_sha1_zero;
+ break;
+ case CCP_SHA_TYPE_224:
+ sha_zero = ccp_sha224_zero;
+ break;
+ case CCP_SHA_TYPE_256:
+ sha_zero = ccp_sha256_zero;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
+ sha->ctx_len, 1);
+
+ return 0;
+ }
+
+ if (!sha->src)
+ return -EINVAL;
+
+ BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1);
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.ksb_ctx = cmd_q->ksb_ctx;
+ op.u.sha.type = sha->type;
+ op.u.sha.msg_bits = sha->msg_bits;
+
+ /* The SHA context fits in a single (32-byte) KSB entry and
+ * must be in little endian format. Use the 256-bit byte swap
+ * passthru option to convert from big endian to little endian.
+ */
+ ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ CCP_SHA_KSB_COUNT * CCP_KSB_BYTES,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ return ret;
+
+ ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+ ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_ctx;
+ }
+
+ /* Send data to the CCP SHA engine */
+ ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
+ CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
+
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false);
+ if (sha->final && !src.sg_wa.bytes_left)
+ op.eom = 1;
+
+ ret = ccp_perform_sha(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_data;
+ }
+
+ ccp_process_data(&src, NULL, &op);
+ }
+
+ /* Retrieve the SHA context - convert from LE to BE using
+ * 32-byte (256-bit) byteswapping to BE
+ */
+ ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_data;
+ }
+
+ ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+
+e_data:
+ ccp_free_data(&src, cmd_q);
+
+e_ctx:
+ ccp_dm_free(&ctx);
+
+ return ret;
+}
+
+static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_rsa_engine *rsa = &cmd->u.rsa;
+ struct ccp_dm_workarea exp, src;
+ struct ccp_data dst;
+ struct ccp_op op;
+ unsigned int ksb_count, i_len, o_len;
+ int ret;
+
+ if (rsa->key_size > CCP_RSA_MAX_WIDTH)
+ return -EINVAL;
+
+ if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
+ return -EINVAL;
+
+ /* The RSA modulus must precede the message being acted upon, so
+ * it must be copied to a DMA area where the message and the
+ * modulus can be concatenated. Therefore the input buffer
+ * length required is twice the output buffer length (which
+ * must be a multiple of 256-bits).
+ */
+ o_len = ((rsa->key_size + 255) / 256) * 32;
+ i_len = o_len * 2;
+
+ ksb_count = o_len / CCP_KSB_BYTES;
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count);
+ if (!op.ksb_key)
+ return -EIO;
+
+ /* The RSA exponent may span multiple (32-byte) KSB entries and must
+ * be in little endian format. Reverse copy each 32-byte chunk
+ * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
+ * and each byte within that chunk and do not perform any byte swap
+ * operations on the passthru operation.
+ */
+ ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
+ if (ret)
+ goto e_ksb;
+
+ ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES,
+ true);
+ ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_exp;
+ }
+
+ /* Concatenate the modulus and the message. Both the modulus and
+ * the operands must be in little endian format. Since the input
+ * is in big endian format it must be converted.
+ */
+ ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
+ if (ret)
+ goto e_exp;
+
+ ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES,
+ true);
+ src.address += o_len; /* Adjust the address for the copy operation */
+ ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES,
+ true);
+ src.address -= o_len; /* Reset the address to original value */
+
+ /* Prepare the output area for the operation */
+ ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
+ o_len, DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+
+ op.soc = 1;
+ op.src.u.dma.address = src.dma.address;
+ op.src.u.dma.offset = 0;
+ op.src.u.dma.length = i_len;
+ op.dst.u.dma.address = dst.dm_wa.dma.address;
+ op.dst.u.dma.offset = 0;
+ op.dst.u.dma.length = o_len;
+
+ op.u.rsa.mod_size = rsa->key_size;
+ op.u.rsa.input_len = i_len;
+
+ ret = ccp_perform_rsa(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len);
+
+e_dst:
+ ccp_free_data(&dst, cmd_q);
+
+e_src:
+ ccp_dm_free(&src);
+
+e_exp:
+ ccp_dm_free(&exp);
+
+e_ksb:
+ ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count);
+
+ return ret;
+}
+
+static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
+ struct ccp_cmd *cmd)
+{
+ struct ccp_passthru_engine *pt = &cmd->u.passthru;
+ struct ccp_dm_workarea mask;
+ struct ccp_data src, dst;
+ struct ccp_op op;
+ bool in_place = false;
+ unsigned int i;
+ int ret;
+
+ if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
+ return -EINVAL;
+
+ if (!pt->src || !pt->dst)
+ return -EINVAL;
+
+ if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+ if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
+ return -EINVAL;
+ if (!pt->mask)
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+
+ if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+ /* Load the mask */
+ op.ksb_key = cmd_q->ksb_key;
+
+ ret = ccp_init_dm_workarea(&mask, cmd_q,
+ CCP_PASSTHRU_KSB_COUNT *
+ CCP_KSB_BYTES,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
+ ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_mask;
+ }
+ }
+
+ /* Prepare the input and output data workareas. For in-place
+ * operations we need to set the dma direction to BIDIRECTIONAL
+ * and copy the src workarea to the dst workarea.
+ */
+ if (sg_virt(pt->src) == sg_virt(pt->dst))
+ in_place = true;
+
+ ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
+ CCP_PASSTHRU_MASKSIZE,
+ in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ if (ret)
+ goto e_mask;
+
+ if (in_place)
+ dst = src;
+ else {
+ ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
+ CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+ }
+
+ /* Send data to the CCP Passthru engine
+ * Because the CCP engine works on a single source and destination
+ * dma address at a time, each entry in the source scatterlist
+ * (after the dma_map_sg call) must be less than or equal to the
+ * (remaining) length in the destination scatterlist entry and the
+ * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
+ */
+ dst.sg_wa.sg_used = 0;
+ for (i = 1; i <= src.sg_wa.dma_count; i++) {
+ if (!dst.sg_wa.sg ||
+ (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
+ ret = -EINVAL;
+ goto e_dst;
+ }
+
+ if (i == src.sg_wa.dma_count) {
+ op.eom = 1;
+ op.soc = 1;
+ }
+
+ op.src.type = CCP_MEMTYPE_SYSTEM;
+ op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
+ op.src.u.dma.offset = 0;
+ op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
+
+ op.dst.type = CCP_MEMTYPE_SYSTEM;
+ op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
+ op.src.u.dma.offset = dst.sg_wa.sg_used;
+ op.src.u.dma.length = op.src.u.dma.length;
+
+ ret = ccp_perform_passthru(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ dst.sg_wa.sg_used += src.sg_wa.sg->length;
+ if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
+ dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
+ dst.sg_wa.sg_used = 0;
+ }
+ src.sg_wa.sg = sg_next(src.sg_wa.sg);
+ }
+
+e_dst:
+ if (!in_place)
+ ccp_free_data(&dst, cmd_q);
+
+e_src:
+ ccp_free_data(&src, cmd_q);
+
+e_mask:
+ if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ ccp_dm_free(&mask);
+
+ return ret;
+}
+
+static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_ecc_engine *ecc = &cmd->u.ecc;
+ struct ccp_dm_workarea src, dst;
+ struct ccp_op op;
+ int ret;
+ u8 *save;
+
+ if (!ecc->u.mm.operand_1 ||
+ (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
+ if (!ecc->u.mm.operand_2 ||
+ (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ if (!ecc->u.mm.result ||
+ (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+
+ /* Concatenate the modulus and the operands. Both the modulus and
+ * the operands must be in little endian format. Since the input
+ * is in big endian format it must be converted and placed in a
+ * fixed length buffer.
+ */
+ ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ /* Save the workarea address since it is updated in order to perform
+ * the concatenation
+ */
+ save = src.address;
+
+ /* Copy the ECC modulus */
+ ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ /* Copy the first operand */
+ ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
+ ecc->u.mm.operand_1_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
+ /* Copy the second operand */
+ ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
+ ecc->u.mm.operand_2_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+ }
+
+ /* Restore the workarea address */
+ src.address = save;
+
+ /* Prepare the output area for the operation */
+ ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+
+ op.soc = 1;
+ op.src.u.dma.address = src.dma.address;
+ op.src.u.dma.offset = 0;
+ op.src.u.dma.length = src.length;
+ op.dst.u.dma.address = dst.dma.address;
+ op.dst.u.dma.offset = 0;
+ op.dst.u.dma.length = dst.length;
+
+ op.u.ecc.function = cmd->u.ecc.function;
+
+ ret = ccp_perform_ecc(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ecc->ecc_result = le16_to_cpup(
+ (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
+ if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
+ ret = -EIO;
+ goto e_dst;
+ }
+
+ /* Save the ECC result */
+ ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES);
+
+e_dst:
+ ccp_dm_free(&dst);
+
+e_src:
+ ccp_dm_free(&src);
+
+ return ret;
+}
+
+static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_ecc_engine *ecc = &cmd->u.ecc;
+ struct ccp_dm_workarea src, dst;
+ struct ccp_op op;
+ int ret;
+ u8 *save;
+
+ if (!ecc->u.pm.point_1.x ||
+ (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
+ !ecc->u.pm.point_1.y ||
+ (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
+ if (!ecc->u.pm.point_2.x ||
+ (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
+ !ecc->u.pm.point_2.y ||
+ (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+ } else {
+ if (!ecc->u.pm.domain_a ||
+ (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
+ if (!ecc->u.pm.scalar ||
+ (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+ }
+
+ if (!ecc->u.pm.result.x ||
+ (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
+ !ecc->u.pm.result.y ||
+ (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+
+ /* Concatenate the modulus and the operands. Both the modulus and
+ * the operands must be in little endian format. Since the input
+ * is in big endian format it must be converted and placed in a
+ * fixed length buffer.
+ */
+ ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ /* Save the workarea address since it is updated in order to perform
+ * the concatenation
+ */
+ save = src.address;
+
+ /* Copy the ECC modulus */
+ ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ /* Copy the first point X and Y coordinate */
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
+ ecc->u.pm.point_1.x_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
+ ecc->u.pm.point_1.y_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ /* Set the first point Z coordianate to 1 */
+ *(src.address) = 0x01;
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
+ /* Copy the second point X and Y coordinate */
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
+ ecc->u.pm.point_2.x_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
+ ecc->u.pm.point_2.y_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ /* Set the second point Z coordianate to 1 */
+ *(src.address) = 0x01;
+ src.address += CCP_ECC_OPERAND_SIZE;
+ } else {
+ /* Copy the Domain "a" parameter */
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
+ ecc->u.pm.domain_a_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+
+ if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
+ /* Copy the scalar value */
+ ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
+ ecc->u.pm.scalar_len,
+ CCP_ECC_OPERAND_SIZE, true);
+ src.address += CCP_ECC_OPERAND_SIZE;
+ }
+ }
+
+ /* Restore the workarea address */
+ src.address = save;
+
+ /* Prepare the output area for the operation */
+ ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto e_src;
+
+ op.soc = 1;
+ op.src.u.dma.address = src.dma.address;
+ op.src.u.dma.offset = 0;
+ op.src.u.dma.length = src.length;
+ op.dst.u.dma.address = dst.dma.address;
+ op.dst.u.dma.offset = 0;
+ op.dst.u.dma.length = dst.length;
+
+ op.u.ecc.function = cmd->u.ecc.function;
+
+ ret = ccp_perform_ecc(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_dst;
+ }
+
+ ecc->ecc_result = le16_to_cpup(
+ (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
+ if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
+ ret = -EIO;
+ goto e_dst;
+ }
+
+ /* Save the workarea address since it is updated as we walk through
+ * to copy the point math result
+ */
+ save = dst.address;
+
+ /* Save the ECC result X and Y coordinates */
+ ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x,
+ CCP_ECC_MODULUS_BYTES);
+ dst.address += CCP_ECC_OUTPUT_SIZE;
+ ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y,
+ CCP_ECC_MODULUS_BYTES);
+ dst.address += CCP_ECC_OUTPUT_SIZE;
+
+ /* Restore the workarea address */
+ dst.address = save;
+
+e_dst:
+ ccp_dm_free(&dst);
+
+e_src:
+ ccp_dm_free(&src);
+
+ return ret;
+}
+
+static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ struct ccp_ecc_engine *ecc = &cmd->u.ecc;
+
+ ecc->ecc_result = 0;
+
+ if (!ecc->mod ||
+ (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
+ return -EINVAL;
+
+ switch (ecc->function) {
+ case CCP_ECC_FUNCTION_MMUL_384BIT:
+ case CCP_ECC_FUNCTION_MADD_384BIT:
+ case CCP_ECC_FUNCTION_MINV_384BIT:
+ return ccp_run_ecc_mm_cmd(cmd_q, cmd);
+
+ case CCP_ECC_FUNCTION_PADD_384BIT:
+ case CCP_ECC_FUNCTION_PMUL_384BIT:
+ case CCP_ECC_FUNCTION_PDBL_384BIT:
+ return ccp_run_ecc_pm_cmd(cmd_q, cmd);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+ int ret;
+
+ cmd->engine_error = 0;
+ cmd_q->cmd_error = 0;
+ cmd_q->int_rcvd = 0;
+ cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+
+ switch (cmd->engine) {
+ case CCP_ENGINE_AES:
+ ret = ccp_run_aes_cmd(cmd_q, cmd);
+ break;
+ case CCP_ENGINE_XTS_AES_128:
+ ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
+ break;
+ case CCP_ENGINE_SHA:
+ ret = ccp_run_sha_cmd(cmd_q, cmd);
+ break;
+ case CCP_ENGINE_RSA:
+ ret = ccp_run_rsa_cmd(cmd_q, cmd);
+ break;
+ case CCP_ENGINE_PASSTHRU:
+ ret = ccp_run_passthru_cmd(cmd_q, cmd);
+ break;
+ case CCP_ENGINE_ECC:
+ ret = ccp_run_ecc_cmd(cmd_q, cmd);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
new file mode 100644
index 000000000000..93319f9db753
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -0,0 +1,361 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+#define IO_BAR 2
+#define MSIX_VECTORS 2
+
+struct ccp_msix {
+ u32 vector;
+ char name[16];
+};
+
+struct ccp_pci {
+ int msix_count;
+ struct ccp_msix msix[MSIX_VECTORS];
+};
+
+static int ccp_get_msix_irqs(struct ccp_device *ccp)
+{
+ struct ccp_pci *ccp_pci = ccp->dev_specific;
+ struct device *dev = ccp->dev;
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct msix_entry msix_entry[MSIX_VECTORS];
+ unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
+ int v, ret;
+
+ for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
+ msix_entry[v].entry = v;
+
+ while ((ret = pci_enable_msix(pdev, msix_entry, v)) > 0)
+ v = ret;
+ if (ret)
+ return ret;
+
+ ccp_pci->msix_count = v;
+ for (v = 0; v < ccp_pci->msix_count; v++) {
+ /* Set the interrupt names and request the irqs */
+ snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
+ ccp_pci->msix[v].vector = msix_entry[v].vector;
+ ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler,
+ 0, ccp_pci->msix[v].name, dev);
+ if (ret) {
+ dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
+ ret);
+ goto e_irq;
+ }
+ }
+
+ return 0;
+
+e_irq:
+ while (v--)
+ free_irq(ccp_pci->msix[v].vector, dev);
+
+ pci_disable_msix(pdev);
+
+ ccp_pci->msix_count = 0;
+
+ return ret;
+}
+
+static int ccp_get_msi_irq(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ return ret;
+
+ ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev);
+ if (ret) {
+ dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
+ goto e_msi;
+ }
+
+ return 0;
+
+e_msi:
+ pci_disable_msi(pdev);
+
+ return ret;
+}
+
+static int ccp_get_irqs(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ int ret;
+
+ ret = ccp_get_msix_irqs(ccp);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI-X vectors, try MSI */
+ dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = ccp_get_msi_irq(ccp);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI interrupt */
+ dev_notice(dev, "could not enable MSI (%d)\n", ret);
+
+ return ret;
+}
+
+static void ccp_free_irqs(struct ccp_device *ccp)
+{
+ struct ccp_pci *ccp_pci = ccp->dev_specific;
+ struct device *dev = ccp->dev;
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+
+ if (ccp_pci->msix_count) {
+ while (ccp_pci->msix_count--)
+ free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
+ dev);
+ pci_disable_msix(pdev);
+ } else {
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+ }
+}
+
+static int ccp_find_mmio_area(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ resource_size_t io_len;
+ unsigned long io_flags;
+ int bar;
+
+ io_flags = pci_resource_flags(pdev, IO_BAR);
+ io_len = pci_resource_len(pdev, IO_BAR);
+ if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
+ return IO_BAR;
+
+ for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) {
+ io_flags = pci_resource_flags(pdev, bar);
+ io_len = pci_resource_len(pdev, bar);
+ if ((io_flags & IORESOURCE_MEM) &&
+ (io_len >= (IO_OFFSET + 0x800)))
+ return bar;
+ }
+
+ return -EIO;
+}
+
+static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ccp_device *ccp;
+ struct ccp_pci *ccp_pci;
+ struct device *dev = &pdev->dev;
+ unsigned int bar;
+ int ret;
+
+ ret = -ENOMEM;
+ ccp = ccp_alloc_struct(dev);
+ if (!ccp)
+ goto e_err;
+
+ ccp_pci = kzalloc(sizeof(*ccp_pci), GFP_KERNEL);
+ if (!ccp_pci) {
+ ret = -ENOMEM;
+ goto e_free1;
+ }
+ ccp->dev_specific = ccp_pci;
+ ccp->get_irq = ccp_get_irqs;
+ ccp->free_irq = ccp_free_irqs;
+
+ ret = pci_request_regions(pdev, "ccp");
+ if (ret) {
+ dev_err(dev, "pci_request_regions failed (%d)\n", ret);
+ goto e_free2;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pci_enable_device failed (%d)\n", ret);
+ goto e_regions;
+ }
+
+ pci_set_master(pdev);
+
+ ret = ccp_find_mmio_area(ccp);
+ if (ret < 0)
+ goto e_device;
+ bar = ret;
+
+ ret = -EIO;
+ ccp->io_map = pci_iomap(pdev, bar, 0);
+ if (ccp->io_map == NULL) {
+ dev_err(dev, "pci_iomap failed\n");
+ goto e_device;
+ }
+ ccp->io_regs = ccp->io_map + IO_OFFSET;
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(48));
+ if (ret == 0) {
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev,
+ "pci_set_consistent_dma_mask failed (%d)\n",
+ ret);
+ goto e_bar0;
+ }
+ } else {
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret);
+ goto e_bar0;
+ }
+ }
+
+ dev_set_drvdata(dev, ccp);
+
+ ret = ccp_init(ccp);
+ if (ret)
+ goto e_bar0;
+
+ dev_notice(dev, "enabled\n");
+
+ return 0;
+
+e_bar0:
+ pci_iounmap(pdev, ccp->io_map);
+
+e_device:
+ pci_disable_device(pdev);
+
+e_regions:
+ pci_release_regions(pdev);
+
+e_free2:
+ kfree(ccp_pci);
+
+e_free1:
+ kfree(ccp);
+
+e_err:
+ dev_notice(dev, "initialization failed\n");
+ return ret;
+}
+
+static void ccp_pci_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+
+ if (!ccp)
+ return;
+
+ ccp_destroy(ccp);
+
+ pci_iounmap(pdev, ccp->io_map);
+
+ pci_disable_device(pdev);
+
+ pci_release_regions(pdev);
+
+ kfree(ccp);
+
+ dev_notice(dev, "disabled\n");
+}
+
+#ifdef CONFIG_PM
+static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct device *dev = &pdev->dev;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->suspending = 1;
+
+ /* Wake all the queue kthreads to prepare for suspend */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ wake_up_process(ccp->cmd_q[i].kthread);
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ /* Wait for all queue kthreads to say they're done */
+ while (!ccp_queues_suspended(ccp))
+ wait_event_interruptible(ccp->suspend_queue,
+ ccp_queues_suspended(ccp));
+
+ return 0;
+}
+
+static int ccp_pci_resume(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->suspending = 0;
+
+ /* Wake up all the kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ ccp->cmd_q[i].suspended = 0;
+ wake_up_process(ccp->cmd_q[i].kthread);
+ }
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ return 0;
+}
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(ccp_pci_table) = {
+ { PCI_VDEVICE(AMD, 0x1537), },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ccp_pci_table);
+
+static struct pci_driver ccp_pci_driver = {
+ .name = "AMD Cryptographic Coprocessor",
+ .id_table = ccp_pci_table,
+ .probe = ccp_pci_probe,
+ .remove = ccp_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = ccp_pci_suspend,
+ .resume = ccp_pci_resume,
+#endif
+};
+
+int ccp_pci_init(void)
+{
+ return pci_register_driver(&ccp_pci_driver);
+}
+
+void ccp_pci_exit(void)
+{
+ pci_unregister_driver(&ccp_pci_driver);
+}
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c
deleted file mode 100644
index 247ab8048f5b..000000000000
--- a/drivers/crypto/dcp.c
+++ /dev/null
@@ -1,903 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Support for DCP cryptographic accelerator.
- *
- * Copyright (c) 2013
- * Author: Tobias Rauter <tobias.rauter@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Based on tegra-aes.c, dcp.c (from freescale SDK) and sahara.c
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/crypto.h>
-#include <linux/miscdevice.h>
-
-#include <crypto/scatterwalk.h>
-#include <crypto/aes.h>
-
-
-/* IOCTL for DCP OTP Key AES - taken from Freescale's SDK*/
-#define DBS_IOCTL_BASE 'd'
-#define DBS_ENC _IOW(DBS_IOCTL_BASE, 0x00, uint8_t[16])
-#define DBS_DEC _IOW(DBS_IOCTL_BASE, 0x01, uint8_t[16])
-
-/* DCP channel used for AES */
-#define USED_CHANNEL 1
-/* Ring Buffers' maximum size */
-#define DCP_MAX_PKG 20
-
-/* Control Register */
-#define DCP_REG_CTRL 0x000
-#define DCP_CTRL_SFRST (1<<31)
-#define DCP_CTRL_CLKGATE (1<<30)
-#define DCP_CTRL_CRYPTO_PRESENT (1<<29)
-#define DCP_CTRL_SHA_PRESENT (1<<28)
-#define DCP_CTRL_GATHER_RES_WRITE (1<<23)
-#define DCP_CTRL_ENABLE_CONTEXT_CACHE (1<<22)
-#define DCP_CTRL_ENABLE_CONTEXT_SWITCH (1<<21)
-#define DCP_CTRL_CH_IRQ_E_0 0x01
-#define DCP_CTRL_CH_IRQ_E_1 0x02
-#define DCP_CTRL_CH_IRQ_E_2 0x04
-#define DCP_CTRL_CH_IRQ_E_3 0x08
-
-/* Status register */
-#define DCP_REG_STAT 0x010
-#define DCP_STAT_OTP_KEY_READY (1<<28)
-#define DCP_STAT_CUR_CHANNEL(stat) ((stat>>24)&0x0F)
-#define DCP_STAT_READY_CHANNEL(stat) ((stat>>16)&0x0F)
-#define DCP_STAT_IRQ(stat) (stat&0x0F)
-#define DCP_STAT_CHAN_0 (0x01)
-#define DCP_STAT_CHAN_1 (0x02)
-#define DCP_STAT_CHAN_2 (0x04)
-#define DCP_STAT_CHAN_3 (0x08)
-
-/* Channel Control Register */
-#define DCP_REG_CHAN_CTRL 0x020
-#define DCP_CHAN_CTRL_CH0_IRQ_MERGED (1<<16)
-#define DCP_CHAN_CTRL_HIGH_PRIO_0 (0x0100)
-#define DCP_CHAN_CTRL_HIGH_PRIO_1 (0x0200)
-#define DCP_CHAN_CTRL_HIGH_PRIO_2 (0x0400)
-#define DCP_CHAN_CTRL_HIGH_PRIO_3 (0x0800)
-#define DCP_CHAN_CTRL_ENABLE_0 (0x01)
-#define DCP_CHAN_CTRL_ENABLE_1 (0x02)
-#define DCP_CHAN_CTRL_ENABLE_2 (0x04)
-#define DCP_CHAN_CTRL_ENABLE_3 (0x08)
-
-/*
- * Channel Registers:
- * The DCP has 4 channels. Each of this channels
- * has 4 registers (command pointer, semaphore, status and options).
- * The address of register REG of channel CHAN is obtained by
- * dcp_chan_reg(REG, CHAN)
- */
-#define DCP_REG_CHAN_PTR 0x00000100
-#define DCP_REG_CHAN_SEMA 0x00000110
-#define DCP_REG_CHAN_STAT 0x00000120
-#define DCP_REG_CHAN_OPT 0x00000130
-
-#define DCP_CHAN_STAT_NEXT_CHAIN_IS_0 0x010000
-#define DCP_CHAN_STAT_NO_CHAIN 0x020000
-#define DCP_CHAN_STAT_CONTEXT_ERROR 0x030000
-#define DCP_CHAN_STAT_PAYLOAD_ERROR 0x040000
-#define DCP_CHAN_STAT_INVALID_MODE 0x050000
-#define DCP_CHAN_STAT_PAGEFAULT 0x40
-#define DCP_CHAN_STAT_DST 0x20
-#define DCP_CHAN_STAT_SRC 0x10
-#define DCP_CHAN_STAT_PACKET 0x08
-#define DCP_CHAN_STAT_SETUP 0x04
-#define DCP_CHAN_STAT_MISMATCH 0x02
-
-/* hw packet control*/
-
-#define DCP_PKT_PAYLOAD_KEY (1<<11)
-#define DCP_PKT_OTP_KEY (1<<10)
-#define DCP_PKT_CIPHER_INIT (1<<9)
-#define DCP_PKG_CIPHER_ENCRYPT (1<<8)
-#define DCP_PKT_CIPHER_ENABLE (1<<5)
-#define DCP_PKT_DECR_SEM (1<<1)
-#define DCP_PKT_CHAIN (1<<2)
-#define DCP_PKT_IRQ 1
-
-#define DCP_PKT_MODE_CBC (1<<4)
-#define DCP_PKT_KEYSELECT_OTP (0xFF<<8)
-
-/* cipher flags */
-#define DCP_ENC 0x0001
-#define DCP_DEC 0x0002
-#define DCP_ECB 0x0004
-#define DCP_CBC 0x0008
-#define DCP_CBC_INIT 0x0010
-#define DCP_NEW_KEY 0x0040
-#define DCP_OTP_KEY 0x0080
-#define DCP_AES 0x1000
-
-/* DCP Flags */
-#define DCP_FLAG_BUSY 0x01
-#define DCP_FLAG_PRODUCING 0x02
-
-/* clock defines */
-#define CLOCK_ON 1
-#define CLOCK_OFF 0
-
-struct dcp_dev_req_ctx {
- int mode;
-};
-
-struct dcp_op {
- unsigned int flags;
- u8 key[AES_KEYSIZE_128];
- int keylen;
-
- struct ablkcipher_request *req;
- struct crypto_ablkcipher *fallback;
-
- uint32_t stat;
- uint32_t pkt1;
- uint32_t pkt2;
- struct ablkcipher_walk walk;
-};
-
-struct dcp_dev {
- struct device *dev;
- void __iomem *dcp_regs_base;
-
- int dcp_vmi_irq;
- int dcp_irq;
-
- spinlock_t queue_lock;
- struct crypto_queue queue;
-
- uint32_t pkt_produced;
- uint32_t pkt_consumed;
-
- struct dcp_hw_packet *hw_pkg[DCP_MAX_PKG];
- dma_addr_t hw_phys_pkg;
-
- /* [KEY][IV] Both with 16 Bytes */
- u8 *payload_base;
- dma_addr_t payload_base_dma;
-
-
- struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
- struct timer_list watchdog;
-
- unsigned long flags;
-
- struct dcp_op *ctx;
-
- struct miscdevice dcp_bootstream_misc;
-};
-
-struct dcp_hw_packet {
- uint32_t next;
- uint32_t pkt1;
- uint32_t pkt2;
- uint32_t src;
- uint32_t dst;
- uint32_t size;
- uint32_t payload;
- uint32_t stat;
-};
-
-static struct dcp_dev *global_dev;
-
-static inline u32 dcp_chan_reg(u32 reg, int chan)
-{
- return reg + (chan) * 0x40;
-}
-
-static inline void dcp_write(struct dcp_dev *dev, u32 data, u32 reg)
-{
- writel(data, dev->dcp_regs_base + reg);
-}
-
-static inline void dcp_set(struct dcp_dev *dev, u32 data, u32 reg)
-{
- writel(data, dev->dcp_regs_base + (reg | 0x04));
-}
-
-static inline void dcp_clear(struct dcp_dev *dev, u32 data, u32 reg)
-{
- writel(data, dev->dcp_regs_base + (reg | 0x08));
-}
-
-static inline void dcp_toggle(struct dcp_dev *dev, u32 data, u32 reg)
-{
- writel(data, dev->dcp_regs_base + (reg | 0x0C));
-}
-
-static inline unsigned int dcp_read(struct dcp_dev *dev, u32 reg)
-{
- return readl(dev->dcp_regs_base + reg);
-}
-
-static void dcp_dma_unmap(struct dcp_dev *dev, struct dcp_hw_packet *pkt)
-{
- dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
- dma_unmap_page(dev->dev, pkt->dst, pkt->size, DMA_FROM_DEVICE);
- dev_dbg(dev->dev, "unmap packet %x", (unsigned int) pkt);
-}
-
-static int dcp_dma_map(struct dcp_dev *dev,
- struct ablkcipher_walk *walk, struct dcp_hw_packet *pkt)
-{
- dev_dbg(dev->dev, "map packet %x", (unsigned int) pkt);
- /* align to length = 16 */
- pkt->size = walk->nbytes - (walk->nbytes % 16);
-
- pkt->src = dma_map_page(dev->dev, walk->src.page, walk->src.offset,
- pkt->size, DMA_TO_DEVICE);
-
- if (pkt->src == 0) {
- dev_err(dev->dev, "Unable to map src");
- return -ENOMEM;
- }
-
- pkt->dst = dma_map_page(dev->dev, walk->dst.page, walk->dst.offset,
- pkt->size, DMA_FROM_DEVICE);
-
- if (pkt->dst == 0) {
- dev_err(dev->dev, "Unable to map dst");
- dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void dcp_op_one(struct dcp_dev *dev, struct dcp_hw_packet *pkt,
- uint8_t last)
-{
- struct dcp_op *ctx = dev->ctx;
- pkt->pkt1 = ctx->pkt1;
- pkt->pkt2 = ctx->pkt2;
-
- pkt->payload = (u32) dev->payload_base_dma;
- pkt->stat = 0;
-
- if (ctx->flags & DCP_CBC_INIT) {
- pkt->pkt1 |= DCP_PKT_CIPHER_INIT;
- ctx->flags &= ~DCP_CBC_INIT;
- }
-
- mod_timer(&dev->watchdog, jiffies + msecs_to_jiffies(500));
- pkt->pkt1 |= DCP_PKT_IRQ;
- if (!last)
- pkt->pkt1 |= DCP_PKT_CHAIN;
-
- dev->pkt_produced++;
-
- dcp_write(dev, 1,
- dcp_chan_reg(DCP_REG_CHAN_SEMA, USED_CHANNEL));
-}
-
-static void dcp_op_proceed(struct dcp_dev *dev)
-{
- struct dcp_op *ctx = dev->ctx;
- struct dcp_hw_packet *pkt;
-
- while (ctx->walk.nbytes) {
- int err = 0;
-
- pkt = dev->hw_pkg[dev->pkt_produced % DCP_MAX_PKG];
- err = dcp_dma_map(dev, &ctx->walk, pkt);
- if (err) {
- dev->ctx->stat |= err;
- /* start timer to wait for already set up calls */
- mod_timer(&dev->watchdog,
- jiffies + msecs_to_jiffies(500));
- break;
- }
-
-
- err = ctx->walk.nbytes - pkt->size;
- ablkcipher_walk_done(dev->ctx->req, &dev->ctx->walk, err);
-
- dcp_op_one(dev, pkt, ctx->walk.nbytes == 0);
- /* we have to wait if no space is left in buffer */
- if (dev->pkt_produced - dev->pkt_consumed == DCP_MAX_PKG)
- break;
- }
- clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
-}
-
-static void dcp_op_start(struct dcp_dev *dev, uint8_t use_walk)
-{
- struct dcp_op *ctx = dev->ctx;
-
- if (ctx->flags & DCP_NEW_KEY) {
- memcpy(dev->payload_base, ctx->key, ctx->keylen);
- ctx->flags &= ~DCP_NEW_KEY;
- }
-
- ctx->pkt1 = 0;
- ctx->pkt1 |= DCP_PKT_CIPHER_ENABLE;
- ctx->pkt1 |= DCP_PKT_DECR_SEM;
-
- if (ctx->flags & DCP_OTP_KEY)
- ctx->pkt1 |= DCP_PKT_OTP_KEY;
- else
- ctx->pkt1 |= DCP_PKT_PAYLOAD_KEY;
-
- if (ctx->flags & DCP_ENC)
- ctx->pkt1 |= DCP_PKG_CIPHER_ENCRYPT;
-
- ctx->pkt2 = 0;
- if (ctx->flags & DCP_CBC)
- ctx->pkt2 |= DCP_PKT_MODE_CBC;
-
- dev->pkt_produced = 0;
- dev->pkt_consumed = 0;
-
- ctx->stat = 0;
- dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
- dcp_write(dev, (u32) dev->hw_phys_pkg,
- dcp_chan_reg(DCP_REG_CHAN_PTR, USED_CHANNEL));
-
- set_bit(DCP_FLAG_PRODUCING, &dev->flags);
-
- if (use_walk) {
- ablkcipher_walk_init(&ctx->walk, ctx->req->dst,
- ctx->req->src, ctx->req->nbytes);
- ablkcipher_walk_phys(ctx->req, &ctx->walk);
- dcp_op_proceed(dev);
- } else {
- dcp_op_one(dev, dev->hw_pkg[0], 1);
- clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
- }
-}
-
-static void dcp_done_task(unsigned long data)
-{
- struct dcp_dev *dev = (struct dcp_dev *)data;
- struct dcp_hw_packet *last_packet;
- int fin;
- fin = 0;
-
- for (last_packet = dev->hw_pkg[(dev->pkt_consumed) % DCP_MAX_PKG];
- last_packet->stat == 1;
- last_packet =
- dev->hw_pkg[++(dev->pkt_consumed) % DCP_MAX_PKG]) {
-
- dcp_dma_unmap(dev, last_packet);
- last_packet->stat = 0;
- fin++;
- }
- /* the last call of this function already consumed this IRQ's packet */
- if (fin == 0)
- return;
-
- dev_dbg(dev->dev,
- "Packet(s) done with status %x; finished: %d, produced:%d, complete consumed: %d",
- dev->ctx->stat, fin, dev->pkt_produced, dev->pkt_consumed);
-
- last_packet = dev->hw_pkg[(dev->pkt_consumed - 1) % DCP_MAX_PKG];
- if (!dev->ctx->stat && last_packet->pkt1 & DCP_PKT_CHAIN) {
- if (!test_and_set_bit(DCP_FLAG_PRODUCING, &dev->flags))
- dcp_op_proceed(dev);
- return;
- }
-
- while (unlikely(dev->pkt_consumed < dev->pkt_produced)) {
- dcp_dma_unmap(dev,
- dev->hw_pkg[dev->pkt_consumed++ % DCP_MAX_PKG]);
- }
-
- if (dev->ctx->flags & DCP_OTP_KEY) {
- /* we used the miscdevice, no walk to finish */
- clear_bit(DCP_FLAG_BUSY, &dev->flags);
- return;
- }
-
- ablkcipher_walk_complete(&dev->ctx->walk);
- dev->ctx->req->base.complete(&dev->ctx->req->base,
- dev->ctx->stat);
- dev->ctx->req = NULL;
- /* in case there are other requests in the queue */
- tasklet_schedule(&dev->queue_task);
-}
-
-static void dcp_watchdog(unsigned long data)
-{
- struct dcp_dev *dev = (struct dcp_dev *)data;
- dev->ctx->stat |= dcp_read(dev,
- dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
-
- dev_err(dev->dev, "Timeout, Channel status: %x", dev->ctx->stat);
-
- if (!dev->ctx->stat)
- dev->ctx->stat = -ETIMEDOUT;
-
- dcp_done_task(data);
-}
-
-
-static irqreturn_t dcp_common_irq(int irq, void *context)
-{
- u32 msk;
- struct dcp_dev *dev = (struct dcp_dev *) context;
-
- del_timer(&dev->watchdog);
-
- msk = DCP_STAT_IRQ(dcp_read(dev, DCP_REG_STAT));
- dcp_clear(dev, msk, DCP_REG_STAT);
- if (msk == 0)
- return IRQ_NONE;
-
- dev->ctx->stat |= dcp_read(dev,
- dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
-
- if (msk & DCP_STAT_CHAN_1)
- tasklet_schedule(&dev->done_task);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t dcp_vmi_irq(int irq, void *context)
-{
- return dcp_common_irq(irq, context);
-}
-
-static irqreturn_t dcp_irq(int irq, void *context)
-{
- return dcp_common_irq(irq, context);
-}
-
-static void dcp_crypt(struct dcp_dev *dev, struct dcp_op *ctx)
-{
- dev->ctx = ctx;
-
- if ((ctx->flags & DCP_CBC) && ctx->req->info) {
- ctx->flags |= DCP_CBC_INIT;
- memcpy(dev->payload_base + AES_KEYSIZE_128,
- ctx->req->info, AES_KEYSIZE_128);
- }
-
- dcp_op_start(dev, 1);
-}
-
-static void dcp_queue_task(unsigned long data)
-{
- struct dcp_dev *dev = (struct dcp_dev *) data;
- struct crypto_async_request *async_req, *backlog;
- struct crypto_ablkcipher *tfm;
- struct dcp_op *ctx;
- struct dcp_dev_req_ctx *rctx;
- struct ablkcipher_request *req;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->queue_lock, flags);
-
- backlog = crypto_get_backlog(&dev->queue);
- async_req = crypto_dequeue_request(&dev->queue);
-
- spin_unlock_irqrestore(&dev->queue_lock, flags);
-
- if (!async_req)
- goto ret_nothing_done;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- req = ablkcipher_request_cast(async_req);
- tfm = crypto_ablkcipher_reqtfm(req);
- rctx = ablkcipher_request_ctx(req);
- ctx = crypto_ablkcipher_ctx(tfm);
-
- if (!req->src || !req->dst)
- goto ret_nothing_done;
-
- ctx->flags |= rctx->mode;
- ctx->req = req;
-
- dcp_crypt(dev, ctx);
-
- return;
-
-ret_nothing_done:
- clear_bit(DCP_FLAG_BUSY, &dev->flags);
-}
-
-
-static int dcp_cra_init(struct crypto_tfm *tfm)
-{
- const char *name = tfm->__crt_alg->cra_name;
- struct dcp_op *ctx = crypto_tfm_ctx(tfm);
-
- tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_dev_req_ctx);
-
- ctx->fallback = crypto_alloc_ablkcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
-
- if (IS_ERR(ctx->fallback)) {
- dev_err(global_dev->dev, "Error allocating fallback algo %s\n",
- name);
- return PTR_ERR(ctx->fallback);
- }
-
- return 0;
-}
-
-static void dcp_cra_exit(struct crypto_tfm *tfm)
-{
- struct dcp_op *ctx = crypto_tfm_ctx(tfm);
-
- if (ctx->fallback)
- crypto_free_ablkcipher(ctx->fallback);
-
- ctx->fallback = NULL;
-}
-
-/* async interface */
-static int dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int len)
-{
- struct dcp_op *ctx = crypto_ablkcipher_ctx(tfm);
- unsigned int ret = 0;
- ctx->keylen = len;
- ctx->flags = 0;
- if (len == AES_KEYSIZE_128) {
- if (memcmp(ctx->key, key, AES_KEYSIZE_128)) {
- memcpy(ctx->key, key, len);
- ctx->flags |= DCP_NEW_KEY;
- }
- return 0;
- }
-
- ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- ctx->fallback->base.crt_flags |=
- (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_ablkcipher_setkey(ctx->fallback, key, len);
- if (ret) {
- struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
-
- tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm_aux->crt_flags |=
- (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
- }
- return ret;
-}
-
-static int dcp_aes_cbc_crypt(struct ablkcipher_request *req, int mode)
-{
- struct dcp_dev_req_ctx *rctx = ablkcipher_request_ctx(req);
- struct dcp_dev *dev = global_dev;
- unsigned long flags;
- int err = 0;
-
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
- return -EINVAL;
-
- rctx->mode = mode;
-
- spin_lock_irqsave(&dev->queue_lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
- spin_unlock_irqrestore(&dev->queue_lock, flags);
-
- flags = test_and_set_bit(DCP_FLAG_BUSY, &dev->flags);
-
- if (!(flags & DCP_FLAG_BUSY))
- tasklet_schedule(&dev->queue_task);
-
- return err;
-}
-
-static int dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct dcp_op *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- int err = 0;
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_encrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
- return err;
- }
-
- return dcp_aes_cbc_crypt(req, DCP_AES | DCP_ENC | DCP_CBC);
-}
-
-static int dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct dcp_op *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- int err = 0;
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
- return err;
- }
- return dcp_aes_cbc_crypt(req, DCP_AES | DCP_DEC | DCP_CBC);
-}
-
-static struct crypto_alg algs[] = {
- {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "dcp-cbc-aes",
- .cra_alignmask = 3,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_KEYSIZE_128,
- .cra_type = &crypto_ablkcipher_type,
- .cra_priority = 300,
- .cra_u.ablkcipher = {
- .min_keysize = AES_KEYSIZE_128,
- .max_keysize = AES_KEYSIZE_128,
- .setkey = dcp_aes_setkey,
- .encrypt = dcp_aes_cbc_encrypt,
- .decrypt = dcp_aes_cbc_decrypt,
- .ivsize = AES_KEYSIZE_128,
- }
-
- },
-};
-
-/* DCP bootstream verification interface: uses OTP key for crypto */
-static int dcp_bootstream_open(struct inode *inode, struct file *file)
-{
- file->private_data = container_of((file->private_data),
- struct dcp_dev, dcp_bootstream_misc);
- return 0;
-}
-
-static long dcp_bootstream_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct dcp_dev *dev = (struct dcp_dev *) file->private_data;
- void __user *argp = (void __user *)arg;
- int ret;
-
- if (dev == NULL)
- return -EBADF;
-
- if (cmd != DBS_ENC && cmd != DBS_DEC)
- return -EINVAL;
-
- if (copy_from_user(dev->payload_base, argp, 16))
- return -EFAULT;
-
- if (test_and_set_bit(DCP_FLAG_BUSY, &dev->flags))
- return -EAGAIN;
-
- dev->ctx = kzalloc(sizeof(struct dcp_op), GFP_KERNEL);
- if (!dev->ctx) {
- dev_err(dev->dev,
- "cannot allocate context for OTP crypto");
- clear_bit(DCP_FLAG_BUSY, &dev->flags);
- return -ENOMEM;
- }
-
- dev->ctx->flags = DCP_AES | DCP_ECB | DCP_OTP_KEY | DCP_CBC_INIT;
- dev->ctx->flags |= (cmd == DBS_ENC) ? DCP_ENC : DCP_DEC;
- dev->hw_pkg[0]->src = dev->payload_base_dma;
- dev->hw_pkg[0]->dst = dev->payload_base_dma;
- dev->hw_pkg[0]->size = 16;
-
- dcp_op_start(dev, 0);
-
- while (test_bit(DCP_FLAG_BUSY, &dev->flags))
- cpu_relax();
-
- ret = dev->ctx->stat;
- if (!ret && copy_to_user(argp, dev->payload_base, 16))
- ret = -EFAULT;
-
- kfree(dev->ctx);
-
- return ret;
-}
-
-static const struct file_operations dcp_bootstream_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = dcp_bootstream_ioctl,
- .open = dcp_bootstream_open,
-};
-
-static int dcp_probe(struct platform_device *pdev)
-{
- struct dcp_dev *dev = NULL;
- struct resource *r;
- int i, ret, j;
-
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- global_dev = dev;
- dev->dev = &pdev->dev;
-
- platform_set_drvdata(pdev, dev);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(dev->dcp_regs_base))
- return PTR_ERR(dev->dcp_regs_base);
-
- dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
- udelay(10);
- dcp_clear(dev, DCP_CTRL_SFRST | DCP_CTRL_CLKGATE, DCP_REG_CTRL);
-
- dcp_write(dev, DCP_CTRL_GATHER_RES_WRITE |
- DCP_CTRL_ENABLE_CONTEXT_CACHE | DCP_CTRL_CH_IRQ_E_1,
- DCP_REG_CTRL);
-
- dcp_write(dev, DCP_CHAN_CTRL_ENABLE_1, DCP_REG_CHAN_CTRL);
-
- for (i = 0; i < 4; i++)
- dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, i));
-
- dcp_clear(dev, -1, DCP_REG_STAT);
-
-
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!r) {
- dev_err(&pdev->dev, "can't get IRQ resource (0)\n");
- return -EIO;
- }
- dev->dcp_vmi_irq = r->start;
- ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0,
- "dcp", dev);
- if (ret != 0) {
- dev_err(&pdev->dev, "can't request_irq (0)\n");
- return -EIO;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- if (!r) {
- dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
- return -EIO;
- }
- dev->dcp_irq = r->start;
- ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp",
- dev);
- if (ret != 0) {
- dev_err(&pdev->dev, "can't request_irq (1)\n");
- return -EIO;
- }
-
- dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
- DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
- &dev->hw_phys_pkg,
- GFP_KERNEL);
- if (!dev->hw_pkg[0]) {
- dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
- return -ENOMEM;
- }
-
- for (i = 1; i < DCP_MAX_PKG; i++) {
- dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg
- + i * sizeof(struct dcp_hw_packet);
- dev->hw_pkg[i] = dev->hw_pkg[i - 1] + 1;
- }
- dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg;
-
-
- dev->payload_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
- &dev->payload_base_dma, GFP_KERNEL);
- if (!dev->payload_base) {
- dev_err(&pdev->dev, "Could not allocate memory for key\n");
- ret = -ENOMEM;
- goto err_free_hw_packet;
- }
- tasklet_init(&dev->queue_task, dcp_queue_task,
- (unsigned long) dev);
- tasklet_init(&dev->done_task, dcp_done_task,
- (unsigned long) dev);
- spin_lock_init(&dev->queue_lock);
-
- crypto_init_queue(&dev->queue, 10);
-
- init_timer(&dev->watchdog);
- dev->watchdog.function = &dcp_watchdog;
- dev->watchdog.data = (unsigned long)dev;
-
- dev->dcp_bootstream_misc.minor = MISC_DYNAMIC_MINOR,
- dev->dcp_bootstream_misc.name = "dcpboot",
- dev->dcp_bootstream_misc.fops = &dcp_bootstream_fops,
- ret = misc_register(&dev->dcp_bootstream_misc);
- if (ret != 0) {
- dev_err(dev->dev, "Unable to register misc device\n");
- goto err_free_key_iv;
- }
-
- for (i = 0; i < ARRAY_SIZE(algs); i++) {
- algs[i].cra_priority = 300;
- algs[i].cra_ctxsize = sizeof(struct dcp_op);
- algs[i].cra_module = THIS_MODULE;
- algs[i].cra_init = dcp_cra_init;
- algs[i].cra_exit = dcp_cra_exit;
- if (crypto_register_alg(&algs[i])) {
- dev_err(&pdev->dev, "register algorithm failed\n");
- ret = -ENOMEM;
- goto err_unregister;
- }
- }
- dev_notice(&pdev->dev, "DCP crypto enabled.!\n");
-
- return 0;
-
-err_unregister:
- for (j = 0; j < i; j++)
- crypto_unregister_alg(&algs[j]);
-err_free_key_iv:
- tasklet_kill(&dev->done_task);
- tasklet_kill(&dev->queue_task);
- dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
- dev->payload_base_dma);
-err_free_hw_packet:
- dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
- sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
- dev->hw_phys_pkg);
-
- return ret;
-}
-
-static int dcp_remove(struct platform_device *pdev)
-{
- struct dcp_dev *dev;
- int j;
- dev = platform_get_drvdata(pdev);
-
- misc_deregister(&dev->dcp_bootstream_misc);
-
- for (j = 0; j < ARRAY_SIZE(algs); j++)
- crypto_unregister_alg(&algs[j]);
-
- tasklet_kill(&dev->done_task);
- tasklet_kill(&dev->queue_task);
-
- dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
- dev->payload_base_dma);
-
- dma_free_coherent(&pdev->dev,
- DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
- dev->hw_pkg[0], dev->hw_phys_pkg);
-
- return 0;
-}
-
-static struct of_device_id fs_dcp_of_match[] = {
- { .compatible = "fsl-dcp"},
- {},
-};
-
-static struct platform_driver fs_dcp_driver = {
- .probe = dcp_probe,
- .remove = dcp_remove,
- .driver = {
- .name = "fsl-dcp",
- .owner = THIS_MODULE,
- .of_match_table = fs_dcp_of_match
- }
-};
-
-module_platform_driver(fs_dcp_driver);
-
-
-MODULE_AUTHOR("Tobias Rauter <tobias.rauter@gmail.com>");
-MODULE_DESCRIPTION("Freescale DCP Crypto Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
new file mode 100644
index 000000000000..a6db7fa6f891
--- /dev/null
+++ b/drivers/crypto/mxs-dcp.c
@@ -0,0 +1,1100 @@
+/*
+ * Freescale i.MX23/i.MX28 Data Co-Processor driver
+ *
+ * Copyright (C) 2013 Marek Vasut <marex@denx.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/stmp_device.h>
+
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+
+#define DCP_MAX_CHANS 4
+#define DCP_BUF_SZ PAGE_SIZE
+
+/* DCP DMA descriptor. */
+struct dcp_dma_desc {
+ uint32_t next_cmd_addr;
+ uint32_t control0;
+ uint32_t control1;
+ uint32_t source;
+ uint32_t destination;
+ uint32_t size;
+ uint32_t payload;
+ uint32_t status;
+};
+
+/* Coherent aligned block for bounce buffering. */
+struct dcp_coherent_block {
+ uint8_t aes_in_buf[DCP_BUF_SZ];
+ uint8_t aes_out_buf[DCP_BUF_SZ];
+ uint8_t sha_in_buf[DCP_BUF_SZ];
+
+ uint8_t aes_key[2 * AES_KEYSIZE_128];
+ uint8_t sha_digest[SHA256_DIGEST_SIZE];
+
+ struct dcp_dma_desc desc[DCP_MAX_CHANS];
+};
+
+struct dcp {
+ struct device *dev;
+ void __iomem *base;
+
+ uint32_t caps;
+
+ struct dcp_coherent_block *coh;
+
+ struct completion completion[DCP_MAX_CHANS];
+ struct mutex mutex[DCP_MAX_CHANS];
+ struct task_struct *thread[DCP_MAX_CHANS];
+ struct crypto_queue queue[DCP_MAX_CHANS];
+};
+
+enum dcp_chan {
+ DCP_CHAN_HASH_SHA = 0,
+ DCP_CHAN_CRYPTO = 2,
+};
+
+struct dcp_async_ctx {
+ /* Common context */
+ enum dcp_chan chan;
+ uint32_t fill;
+
+ /* SHA Hash-specific context */
+ struct mutex mutex;
+ uint32_t alg;
+ unsigned int hot:1;
+
+ /* Crypto-specific context */
+ unsigned int enc:1;
+ unsigned int ecb:1;
+ struct crypto_ablkcipher *fallback;
+ unsigned int key_len;
+ uint8_t key[AES_KEYSIZE_128];
+};
+
+struct dcp_sha_req_ctx {
+ unsigned int init:1;
+ unsigned int fini:1;
+};
+
+/*
+ * There can even be only one instance of the MXS DCP due to the
+ * design of Linux Crypto API.
+ */
+static struct dcp *global_sdcp;
+static DEFINE_MUTEX(global_mutex);
+
+/* DCP register layout. */
+#define MXS_DCP_CTRL 0x00
+#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
+#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
+
+#define MXS_DCP_STAT 0x10
+#define MXS_DCP_STAT_CLR 0x18
+#define MXS_DCP_STAT_IRQ_MASK 0xf
+
+#define MXS_DCP_CHANNELCTRL 0x20
+#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
+
+#define MXS_DCP_CAPABILITY1 0x40
+#define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
+#define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
+#define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
+
+#define MXS_DCP_CONTEXT 0x50
+
+#define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
+
+#define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
+
+#define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
+#define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
+
+/* DMA descriptor bits. */
+#define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
+#define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
+#define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
+#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
+#define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
+#define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
+#define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
+#define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
+#define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
+
+#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
+#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
+#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
+#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
+#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
+
+static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
+{
+ struct dcp *sdcp = global_sdcp;
+ const int chan = actx->chan;
+ uint32_t stat;
+ int ret;
+ struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+
+ dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
+ DMA_TO_DEVICE);
+
+ reinit_completion(&sdcp->completion[chan]);
+
+ /* Clear status register. */
+ writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
+
+ /* Load the DMA descriptor. */
+ writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
+
+ /* Increment the semaphore to start the DMA transfer. */
+ writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
+
+ ret = wait_for_completion_timeout(&sdcp->completion[chan],
+ msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
+ chan, readl(sdcp->base + MXS_DCP_STAT));
+ return -ETIMEDOUT;
+ }
+
+ stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
+ if (stat & 0xff) {
+ dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
+ chan, stat);
+ return -EINVAL;
+ }
+
+ dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
+
+ return 0;
+}
+
+/*
+ * Encryption (AES128)
+ */
+static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+ int ret;
+
+ dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
+ 2 * AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+ dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
+ DCP_BUF_SZ, DMA_TO_DEVICE);
+ dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
+ DCP_BUF_SZ, DMA_FROM_DEVICE);
+
+ /* Fill in the DMA descriptor. */
+ desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
+ MXS_DCP_CONTROL0_INTERRUPT |
+ MXS_DCP_CONTROL0_ENABLE_CIPHER;
+
+ /* Payload contains the key. */
+ desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
+
+ if (actx->enc)
+ desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
+ if (init)
+ desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
+
+ desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
+
+ if (actx->ecb)
+ desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
+ else
+ desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
+
+ desc->next_cmd_addr = 0;
+ desc->source = src_phys;
+ desc->destination = dst_phys;
+ desc->size = actx->fill;
+ desc->payload = key_phys;
+ desc->status = 0;
+
+ ret = mxs_dcp_start_dma(actx);
+
+ dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+ dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+ dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
+{
+ struct dcp *sdcp = global_sdcp;
+
+ struct ablkcipher_request *req = ablkcipher_request_cast(arq);
+ struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
+
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *src = req->src;
+ const int nents = sg_nents(req->src);
+
+ const int out_off = DCP_BUF_SZ;
+ uint8_t *in_buf = sdcp->coh->aes_in_buf;
+ uint8_t *out_buf = sdcp->coh->aes_out_buf;
+
+ uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
+ uint32_t dst_off = 0;
+
+ uint8_t *key = sdcp->coh->aes_key;
+
+ int ret = 0;
+ int split = 0;
+ unsigned int i, len, clen, rem = 0;
+ int init = 0;
+
+ actx->fill = 0;
+
+ /* Copy the key from the temporary location. */
+ memcpy(key, actx->key, actx->key_len);
+
+ if (!actx->ecb) {
+ /* Copy the CBC IV just past the key. */
+ memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
+ /* CBC needs the INIT set. */
+ init = 1;
+ } else {
+ memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
+ }
+
+ for_each_sg(req->src, src, nents, i) {
+ src_buf = sg_virt(src);
+ len = sg_dma_len(src);
+
+ do {
+ if (actx->fill + len > out_off)
+ clen = out_off - actx->fill;
+ else
+ clen = len;
+
+ memcpy(in_buf + actx->fill, src_buf, clen);
+ len -= clen;
+ src_buf += clen;
+ actx->fill += clen;
+
+ /*
+ * If we filled the buffer or this is the last SG,
+ * submit the buffer.
+ */
+ if (actx->fill == out_off || sg_is_last(src)) {
+ ret = mxs_dcp_run_aes(actx, init);
+ if (ret)
+ return ret;
+ init = 0;
+
+ out_tmp = out_buf;
+ while (dst && actx->fill) {
+ if (!split) {
+ dst_buf = sg_virt(dst);
+ dst_off = 0;
+ }
+ rem = min(sg_dma_len(dst) - dst_off,
+ actx->fill);
+
+ memcpy(dst_buf + dst_off, out_tmp, rem);
+ out_tmp += rem;
+ dst_off += rem;
+ actx->fill -= rem;
+
+ if (dst_off == sg_dma_len(dst)) {
+ dst = sg_next(dst);
+ split = 0;
+ } else {
+ split = 1;
+ }
+ }
+ }
+ } while (len);
+ }
+
+ return ret;
+}
+
+static int dcp_chan_thread_aes(void *data)
+{
+ struct dcp *sdcp = global_sdcp;
+ const int chan = DCP_CHAN_CRYPTO;
+
+ struct crypto_async_request *backlog;
+ struct crypto_async_request *arq;
+
+ int ret;
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ mutex_lock(&sdcp->mutex[chan]);
+ backlog = crypto_get_backlog(&sdcp->queue[chan]);
+ arq = crypto_dequeue_request(&sdcp->queue[chan]);
+ mutex_unlock(&sdcp->mutex[chan]);
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ if (arq) {
+ ret = mxs_dcp_aes_block_crypt(arq);
+ arq->complete(arq, ret);
+ continue;
+ }
+
+ schedule();
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ int ret;
+
+ ablkcipher_request_set_tfm(req, ctx->fallback);
+
+ if (enc)
+ ret = crypto_ablkcipher_encrypt(req);
+ else
+ ret = crypto_ablkcipher_decrypt(req);
+
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+
+ return ret;
+}
+
+static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct crypto_async_request *arq = &req->base;
+ struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
+ int ret;
+
+ if (unlikely(actx->key_len != AES_KEYSIZE_128))
+ return mxs_dcp_block_fallback(req, enc);
+
+ actx->enc = enc;
+ actx->ecb = ecb;
+ actx->chan = DCP_CHAN_CRYPTO;
+
+ mutex_lock(&sdcp->mutex[actx->chan]);
+ ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
+ mutex_unlock(&sdcp->mutex[actx->chan]);
+
+ wake_up_process(sdcp->thread[actx->chan]);
+
+ return -EINPROGRESS;
+}
+
+static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return mxs_dcp_aes_enqueue(req, 0, 1);
+}
+
+static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return mxs_dcp_aes_enqueue(req, 1, 1);
+}
+
+static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return mxs_dcp_aes_enqueue(req, 0, 0);
+}
+
+static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return mxs_dcp_aes_enqueue(req, 1, 0);
+}
+
+static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
+ unsigned int ret;
+
+ /*
+ * AES 128 is supposed by the hardware, store key into temporary
+ * buffer and exit. We must use the temporary buffer here, since
+ * there can still be an operation in progress.
+ */
+ actx->key_len = len;
+ if (len == AES_KEYSIZE_128) {
+ memcpy(actx->key, key, len);
+ return 0;
+ }
+
+ /* Check if the key size is supported by kernel at all. */
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ /*
+ * If the requested AES key size is not supported by the hardware,
+ * but is supported by in-kernel software implementation, we use
+ * software fallback.
+ */
+ actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ actx->fallback->base.crt_flags |=
+ tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK;
+
+ ret = crypto_ablkcipher_setkey(actx->fallback, key, len);
+ if (!ret)
+ return 0;
+
+ tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->base.crt_flags |=
+ actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK;
+
+ return ret;
+}
+
+static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
+ struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+ struct crypto_ablkcipher *blk;
+
+ blk = crypto_alloc_ablkcipher(name, 0, flags);
+ if (IS_ERR(blk))
+ return PTR_ERR(blk);
+
+ actx->fallback = blk;
+ tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_async_ctx);
+ return 0;
+}
+
+static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
+{
+ struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ablkcipher(actx->fallback);
+ actx->fallback = NULL;
+}
+
+/*
+ * Hashing (SHA1/SHA256)
+ */
+static int mxs_dcp_run_sha(struct ahash_request *req)
+{
+ struct dcp *sdcp = global_sdcp;
+ int ret;
+
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+ dma_addr_t digest_phys = dma_map_single(sdcp->dev,
+ sdcp->coh->sha_digest,
+ SHA256_DIGEST_SIZE,
+ DMA_FROM_DEVICE);
+
+ dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
+ DCP_BUF_SZ, DMA_TO_DEVICE);
+
+ /* Fill in the DMA descriptor. */
+ desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
+ MXS_DCP_CONTROL0_INTERRUPT |
+ MXS_DCP_CONTROL0_ENABLE_HASH;
+ if (rctx->init)
+ desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
+
+ desc->control1 = actx->alg;
+ desc->next_cmd_addr = 0;
+ desc->source = buf_phys;
+ desc->destination = 0;
+ desc->size = actx->fill;
+ desc->payload = 0;
+ desc->status = 0;
+
+ /* Set HASH_TERM bit for last transfer block. */
+ if (rctx->fini) {
+ desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
+ desc->payload = digest_phys;
+ }
+
+ ret = mxs_dcp_start_dma(actx);
+
+ dma_unmap_single(sdcp->dev, digest_phys, SHA256_DIGEST_SIZE,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
+{
+ struct dcp *sdcp = global_sdcp;
+
+ struct ahash_request *req = ahash_request_cast(arq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+ const int nents = sg_nents(req->src);
+
+ uint8_t *digest = sdcp->coh->sha_digest;
+ uint8_t *in_buf = sdcp->coh->sha_in_buf;
+
+ uint8_t *src_buf;
+
+ struct scatterlist *src;
+
+ unsigned int i, len, clen;
+ int ret;
+
+ int fin = rctx->fini;
+ if (fin)
+ rctx->fini = 0;
+
+ for_each_sg(req->src, src, nents, i) {
+ src_buf = sg_virt(src);
+ len = sg_dma_len(src);
+
+ do {
+ if (actx->fill + len > DCP_BUF_SZ)
+ clen = DCP_BUF_SZ - actx->fill;
+ else
+ clen = len;
+
+ memcpy(in_buf + actx->fill, src_buf, clen);
+ len -= clen;
+ src_buf += clen;
+ actx->fill += clen;
+
+ /*
+ * If we filled the buffer and still have some
+ * more data, submit the buffer.
+ */
+ if (len && actx->fill == DCP_BUF_SZ) {
+ ret = mxs_dcp_run_sha(req);
+ if (ret)
+ return ret;
+ actx->fill = 0;
+ rctx->init = 0;
+ }
+ } while (len);
+ }
+
+ if (fin) {
+ rctx->fini = 1;
+
+ /* Submit whatever is left. */
+ ret = mxs_dcp_run_sha(req);
+ if (ret || !req->result)
+ return ret;
+ actx->fill = 0;
+
+ /* For some reason, the result is flipped. */
+ for (i = 0; i < halg->digestsize; i++)
+ req->result[i] = digest[halg->digestsize - i - 1];
+ }
+
+ return 0;
+}
+
+static int dcp_chan_thread_sha(void *data)
+{
+ struct dcp *sdcp = global_sdcp;
+ const int chan = DCP_CHAN_HASH_SHA;
+
+ struct crypto_async_request *backlog;
+ struct crypto_async_request *arq;
+
+ struct dcp_sha_req_ctx *rctx;
+
+ struct ahash_request *req;
+ int ret, fini;
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ mutex_lock(&sdcp->mutex[chan]);
+ backlog = crypto_get_backlog(&sdcp->queue[chan]);
+ arq = crypto_dequeue_request(&sdcp->queue[chan]);
+ mutex_unlock(&sdcp->mutex[chan]);
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ if (arq) {
+ req = ahash_request_cast(arq);
+ rctx = ahash_request_ctx(req);
+
+ ret = dcp_sha_req_to_buf(arq);
+ fini = rctx->fini;
+ arq->complete(arq, ret);
+ if (!fini)
+ continue;
+ }
+
+ schedule();
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static int dcp_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+
+ struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+
+ /*
+ * Start hashing session. The code below only inits the
+ * hashing session context, nothing more.
+ */
+ memset(actx, 0, sizeof(*actx));
+
+ if (strcmp(halg->base.cra_name, "sha1") == 0)
+ actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
+ else
+ actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
+
+ actx->fill = 0;
+ actx->hot = 0;
+ actx->chan = DCP_CHAN_HASH_SHA;
+
+ mutex_init(&actx->mutex);
+
+ return 0;
+}
+
+static int dcp_sha_update_fx(struct ahash_request *req, int fini)
+{
+ struct dcp *sdcp = global_sdcp;
+
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+
+ int ret;
+
+ /*
+ * Ignore requests that have no data in them and are not
+ * the trailing requests in the stream of requests.
+ */
+ if (!req->nbytes && !fini)
+ return 0;
+
+ mutex_lock(&actx->mutex);
+
+ rctx->fini = fini;
+
+ if (!actx->hot) {
+ actx->hot = 1;
+ rctx->init = 1;
+ }
+
+ mutex_lock(&sdcp->mutex[actx->chan]);
+ ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
+ mutex_unlock(&sdcp->mutex[actx->chan]);
+
+ wake_up_process(sdcp->thread[actx->chan]);
+ mutex_unlock(&actx->mutex);
+
+ return -EINPROGRESS;
+}
+
+static int dcp_sha_update(struct ahash_request *req)
+{
+ return dcp_sha_update_fx(req, 0);
+}
+
+static int dcp_sha_final(struct ahash_request *req)
+{
+ ahash_request_set_crypt(req, NULL, req->result, 0);
+ req->nbytes = 0;
+ return dcp_sha_update_fx(req, 1);
+}
+
+static int dcp_sha_finup(struct ahash_request *req)
+{
+ return dcp_sha_update_fx(req, 1);
+}
+
+static int dcp_sha_digest(struct ahash_request *req)
+{
+ int ret;
+
+ ret = dcp_sha_init(req);
+ if (ret)
+ return ret;
+
+ return dcp_sha_finup(req);
+}
+
+static int dcp_sha_cra_init(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct dcp_sha_req_ctx));
+ return 0;
+}
+
+static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+/* AES 128 ECB and AES 128 CBC */
+static struct crypto_alg dcp_aes_algs[] = {
+ {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-dcp",
+ .cra_priority = 400,
+ .cra_alignmask = 15,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_init = mxs_dcp_aes_fallback_init,
+ .cra_exit = mxs_dcp_aes_fallback_exit,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mxs_dcp_aes_setkey,
+ .encrypt = mxs_dcp_aes_ecb_encrypt,
+ .decrypt = mxs_dcp_aes_ecb_decrypt
+ },
+ },
+ }, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-dcp",
+ .cra_priority = 400,
+ .cra_alignmask = 15,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_init = mxs_dcp_aes_fallback_init,
+ .cra_exit = mxs_dcp_aes_fallback_exit,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mxs_dcp_aes_setkey,
+ .encrypt = mxs_dcp_aes_cbc_encrypt,
+ .decrypt = mxs_dcp_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ },
+ },
+};
+
+/* SHA1 */
+static struct ahash_alg dcp_sha1_alg = {
+ .init = dcp_sha_init,
+ .update = dcp_sha_update,
+ .final = dcp_sha_final,
+ .finup = dcp_sha_finup,
+ .digest = dcp_sha_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-dcp",
+ .cra_priority = 400,
+ .cra_alignmask = 63,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = dcp_sha_cra_init,
+ .cra_exit = dcp_sha_cra_exit,
+ },
+ },
+};
+
+/* SHA256 */
+static struct ahash_alg dcp_sha256_alg = {
+ .init = dcp_sha_init,
+ .update = dcp_sha_update,
+ .final = dcp_sha_final,
+ .finup = dcp_sha_finup,
+ .digest = dcp_sha_digest,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-dcp",
+ .cra_priority = 400,
+ .cra_alignmask = 63,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = dcp_sha_cra_init,
+ .cra_exit = dcp_sha_cra_exit,
+ },
+ },
+};
+
+static irqreturn_t mxs_dcp_irq(int irq, void *context)
+{
+ struct dcp *sdcp = context;
+ uint32_t stat;
+ int i;
+
+ stat = readl(sdcp->base + MXS_DCP_STAT);
+ stat &= MXS_DCP_STAT_IRQ_MASK;
+ if (!stat)
+ return IRQ_NONE;
+
+ /* Clear the interrupts. */
+ writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
+
+ /* Complete the DMA requests that finished. */
+ for (i = 0; i < DCP_MAX_CHANS; i++)
+ if (stat & (1 << i))
+ complete(&sdcp->completion[i]);
+
+ return IRQ_HANDLED;
+}
+
+static int mxs_dcp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dcp *sdcp = NULL;
+ int i, ret;
+
+ struct resource *iores;
+ int dcp_vmi_irq, dcp_irq;
+
+ mutex_lock(&global_mutex);
+ if (global_sdcp) {
+ dev_err(dev, "Only one DCP instance allowed!\n");
+ ret = -ENODEV;
+ goto err_mutex;
+ }
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dcp_vmi_irq = platform_get_irq(pdev, 0);
+ dcp_irq = platform_get_irq(pdev, 1);
+ if (dcp_vmi_irq < 0 || dcp_irq < 0) {
+ ret = -EINVAL;
+ goto err_mutex;
+ }
+
+ sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
+ if (!sdcp) {
+ ret = -ENOMEM;
+ goto err_mutex;
+ }
+
+ sdcp->dev = dev;
+ sdcp->base = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(sdcp->base)) {
+ ret = PTR_ERR(sdcp->base);
+ goto err_mutex;
+ }
+
+ ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
+ "dcp-vmi-irq", sdcp);
+ if (ret) {
+ dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
+ goto err_mutex;
+ }
+
+ ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
+ "dcp-irq", sdcp);
+ if (ret) {
+ dev_err(dev, "Failed to claim DCP IRQ!\n");
+ goto err_mutex;
+ }
+
+ /* Allocate coherent helper block. */
+ sdcp->coh = kzalloc(sizeof(struct dcp_coherent_block), GFP_KERNEL);
+ if (!sdcp->coh) {
+ dev_err(dev, "Error allocating coherent block\n");
+ ret = -ENOMEM;
+ goto err_mutex;
+ }
+
+ /* Restart the DCP block. */
+ stmp_reset_block(sdcp->base);
+
+ /* Initialize control register. */
+ writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
+ MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
+ sdcp->base + MXS_DCP_CTRL);
+
+ /* Enable all DCP DMA channels. */
+ writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
+ sdcp->base + MXS_DCP_CHANNELCTRL);
+
+ /*
+ * We do not enable context switching. Give the context buffer a
+ * pointer to an illegal address so if context switching is
+ * inadvertantly enabled, the DCP will return an error instead of
+ * trashing good memory. The DCP DMA cannot access ROM, so any ROM
+ * address will do.
+ */
+ writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
+ for (i = 0; i < DCP_MAX_CHANS; i++)
+ writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
+ writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
+
+ global_sdcp = sdcp;
+
+ platform_set_drvdata(pdev, sdcp);
+
+ for (i = 0; i < DCP_MAX_CHANS; i++) {
+ mutex_init(&sdcp->mutex[i]);
+ init_completion(&sdcp->completion[i]);
+ crypto_init_queue(&sdcp->queue[i], 50);
+ }
+
+ /* Create the SHA and AES handler threads. */
+ sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
+ NULL, "mxs_dcp_chan/sha");
+ if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
+ dev_err(dev, "Error starting SHA thread!\n");
+ ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
+ goto err_free_coherent;
+ }
+
+ sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
+ NULL, "mxs_dcp_chan/aes");
+ if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
+ dev_err(dev, "Error starting SHA thread!\n");
+ ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
+ goto err_destroy_sha_thread;
+ }
+
+ /* Register the various crypto algorithms. */
+ sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
+ ret = crypto_register_algs(dcp_aes_algs,
+ ARRAY_SIZE(dcp_aes_algs));
+ if (ret) {
+ /* Failed to register algorithm. */
+ dev_err(dev, "Failed to register AES crypto!\n");
+ goto err_destroy_aes_thread;
+ }
+ }
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
+ ret = crypto_register_ahash(&dcp_sha1_alg);
+ if (ret) {
+ dev_err(dev, "Failed to register %s hash!\n",
+ dcp_sha1_alg.halg.base.cra_name);
+ goto err_unregister_aes;
+ }
+ }
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
+ ret = crypto_register_ahash(&dcp_sha256_alg);
+ if (ret) {
+ dev_err(dev, "Failed to register %s hash!\n",
+ dcp_sha256_alg.halg.base.cra_name);
+ goto err_unregister_sha1;
+ }
+ }
+
+ return 0;
+
+err_unregister_sha1:
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
+ crypto_unregister_ahash(&dcp_sha1_alg);
+
+err_unregister_aes:
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
+ crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+
+err_destroy_aes_thread:
+ kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
+
+err_destroy_sha_thread:
+ kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
+
+err_free_coherent:
+ kfree(sdcp->coh);
+err_mutex:
+ mutex_unlock(&global_mutex);
+ return ret;
+}
+
+static int mxs_dcp_remove(struct platform_device *pdev)
+{
+ struct dcp *sdcp = platform_get_drvdata(pdev);
+
+ kfree(sdcp->coh);
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
+ crypto_unregister_ahash(&dcp_sha256_alg);
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
+ crypto_unregister_ahash(&dcp_sha1_alg);
+
+ if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
+ crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+
+ kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
+ kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
+
+ platform_set_drvdata(pdev, NULL);
+
+ mutex_lock(&global_mutex);
+ global_sdcp = NULL;
+ mutex_unlock(&global_mutex);
+
+ return 0;
+}
+
+static const struct of_device_id mxs_dcp_dt_ids[] = {
+ { .compatible = "fsl,imx23-dcp", .data = NULL, },
+ { .compatible = "fsl,imx28-dcp", .data = NULL, },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
+
+static struct platform_driver mxs_dcp_driver = {
+ .probe = mxs_dcp_probe,
+ .remove = mxs_dcp_remove,
+ .driver = {
+ .name = "mxs-dcp",
+ .owner = THIS_MODULE,
+ .of_match_table = mxs_dcp_dt_ids,
+ },
+};
+
+module_platform_driver(mxs_dcp_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Freescale MXS DCP Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-dcp");
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 6c4c000671c5..1e5481d88a26 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -158,6 +158,15 @@ static inline unsigned long nx842_get_scatterlist_size(
return sl->entry_nr * sizeof(struct nx842_slentry);
}
+static inline unsigned long nx842_get_pa(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ return page_to_phys(vmalloc_to_page(addr))
+ + offset_in_page(addr);
+ else
+ return __pa(addr);
+}
+
static int nx842_build_scatterlist(unsigned long buf, int len,
struct nx842_scatterlist *sl)
{
@@ -168,7 +177,7 @@ static int nx842_build_scatterlist(unsigned long buf, int len,
entry = sl->entries;
while (len) {
- entry->ptr = __pa(buf);
+ entry->ptr = nx842_get_pa((void *)buf);
nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
if (nextpage < buf + len) {
/* we aren't at the end yet */
@@ -370,8 +379,8 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_COMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
- op.out = __pa(slout.entries);
+ op.csbcpb = nx842_get_pa(csbcpb);
+ op.out = nx842_get_pa(slout.entries);
for (i = 0; i < hdr->blocks_nr; i++) {
/*
@@ -401,13 +410,13 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, max_sync_size, &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -565,7 +574,7 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_DECOMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
+ op.csbcpb = nx842_get_pa(csbcpb);
/*
* max_sync_size may have changed since compression,
@@ -597,12 +606,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
if (likely((inbuf & NX842_HW_PAGE_MASK) ==
((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = hdr->sizes[i];
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -613,12 +622,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.out = __pa(outbuf);
+ op.out = nx842_get_pa((void *)outbuf);
op.outlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(outbuf, max_sync_size, &slout);
- op.out = __pa(slout.entries);
+ op.out = nx842_get_pa(slout.entries);
op.outlen = -nx842_get_scatterlist_size(&slout);
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a9ccbf14096e..dde41f1df608 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -784,6 +784,7 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
struct omap_aes_dev *dd = NULL;
+ int err;
/* Find AES device, currently picks the first device */
spin_lock_bh(&list_lock);
@@ -792,7 +793,13 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
}
spin_unlock_bh(&list_lock);
- pm_runtime_get_sync(dd->dev);
+ err = pm_runtime_get_sync(dd->dev);
+ if (err < 0) {
+ dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
+ __func__, err);
+ return err;
+ }
+
tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
return 0;
@@ -1182,7 +1189,12 @@ static int omap_aes_probe(struct platform_device *pdev)
dd->phys_base = res.start;
pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "%s: failed to get_sync(%d)\n",
+ __func__, err);
+ goto err_res;
+ }
omap_aes_dma_stop(dd);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index e45aaaf0db30..a727a6a59653 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -789,10 +789,13 @@ static int omap_sham_update_cpu(struct omap_sham_dev *dd)
dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n",
ctx->bufcnt, ctx->digcnt, final);
- bufcnt = ctx->bufcnt;
- ctx->bufcnt = 0;
+ if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
+ bufcnt = ctx->bufcnt;
+ ctx->bufcnt = 0;
+ return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
+ }
- return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
+ return 0;
}
static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
@@ -1103,6 +1106,9 @@ static int omap_sham_update(struct ahash_request *req)
return 0;
}
+ if (dd->polling_mode)
+ ctx->flags |= BIT(FLAGS_CPU);
+
return omap_sham_enqueue(req, OP_UPDATE);
}
@@ -1970,7 +1976,8 @@ err_algs:
crypto_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
pm_runtime_disable(dev);
- dma_release_channel(dd->dma_lch);
+ if (dd->dma_lch)
+ dma_release_channel(dd->dma_lch);
data_err:
dev_err(dev, "initialization failed.\n");
@@ -1994,7 +2001,9 @@ static int omap_sham_remove(struct platform_device *pdev)
&dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
pm_runtime_disable(&pdev->dev);
- dma_release_channel(dd->dma_lch);
+
+ if (dd->dma_lch)
+ dma_release_channel(dd->dma_lch);
return 0;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index b44f4ddc565c..5967667e1a8f 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -338,20 +338,29 @@ DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
static u32 current_desc_hdr(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
- int tail = priv->chan[ch].tail;
+ int tail, iter;
dma_addr_t cur_desc;
- cur_desc = in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
+ cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
+ cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
- while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
- tail = (tail + 1) & (priv->fifo_len - 1);
- if (tail == priv->chan[ch].tail) {
+ if (!cur_desc) {
+ dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
+ return 0;
+ }
+
+ tail = priv->chan[ch].tail;
+
+ iter = tail;
+ while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
+ iter = (iter + 1) & (priv->fifo_len - 1);
+ if (iter == tail) {
dev_err(dev, "couldn't locate current descriptor\n");
return 0;
}
}
- return priv->chan[ch].fifo[tail].desc->hdr;
+ return priv->chan[ch].fifo[iter].desc->hdr;
}
/*
@@ -2486,8 +2495,6 @@ static int talitos_remove(struct platform_device *ofdev)
iounmap(priv->reg);
- dev_set_drvdata(dev, NULL);
-
kfree(priv);
return 0;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 31f3adba4cf3..7d2f43550700 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -67,7 +67,7 @@ comment "DEVFREQ Drivers"
config ARM_EXYNOS4_BUS_DEVFREQ
bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
- depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412
+ depends on (CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
select ARCH_HAS_OPP
select DEVFREQ_GOV_SIMPLE_ONDEMAND
help
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index a0b2f7e0eedb..2042ec3656ba 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -91,26 +91,35 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
*/
static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
{
- int lev, prev_lev;
+ int lev, prev_lev, ret = 0;
unsigned long cur_time;
- lev = devfreq_get_freq_level(devfreq, freq);
- if (lev < 0)
- return lev;
-
cur_time = jiffies;
- devfreq->time_in_state[lev] +=
+
+ prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
+ if (prev_lev < 0) {
+ ret = prev_lev;
+ goto out;
+ }
+
+ devfreq->time_in_state[prev_lev] +=
cur_time - devfreq->last_stat_updated;
- if (freq != devfreq->previous_freq) {
- prev_lev = devfreq_get_freq_level(devfreq,
- devfreq->previous_freq);
+
+ lev = devfreq_get_freq_level(devfreq, freq);
+ if (lev < 0) {
+ ret = lev;
+ goto out;
+ }
+
+ if (lev != prev_lev) {
devfreq->trans_table[(prev_lev *
devfreq->profile->max_state) + lev]++;
devfreq->total_trans++;
}
- devfreq->last_stat_updated = cur_time;
- return 0;
+out:
+ devfreq->last_stat_updated = cur_time;
+ return ret;
}
/**
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c
index cede6f71cd63..e07b0c68c715 100644
--- a/drivers/devfreq/exynos/exynos4_bus.c
+++ b/drivers/devfreq/exynos/exynos4_bus.c
@@ -30,9 +30,9 @@
extern unsigned int exynos_result_of_asv;
#endif
-#include <mach/regs-clock.h>
+#include <mach/map.h>
-#include <plat/map-s5p.h>
+#include "exynos4_bus.h"
#define MAX_SAFEVOLT 1200000 /* 1.2V */
@@ -116,7 +116,7 @@ static struct bus_opp_table exynos4210_busclk_table[] = {
};
/*
- * MIF is the main control knob clock for exynox4x12 MIF/INT
+ * MIF is the main control knob clock for Exynos4x12 MIF/INT
* clock and voltage of both mif/int are controlled.
*/
static struct bus_opp_table exynos4x12_mifclk_table[] = {
diff --git a/drivers/devfreq/exynos/exynos4_bus.h b/drivers/devfreq/exynos/exynos4_bus.h
new file mode 100644
index 000000000000..94c73c18d28c
--- /dev/null
+++ b/drivers/devfreq/exynos/exynos4_bus.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS4 BUS header
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __DEVFREQ_EXYNOS4_BUS_H
+#define __DEVFREQ_EXYNOS4_BUS_H __FILE__
+
+#include <mach/map.h>
+
+#define EXYNOS4_CLKDIV_LEFTBUS (S5P_VA_CMU + 0x04500)
+#define EXYNOS4_CLKDIV_STAT_LEFTBUS (S5P_VA_CMU + 0x04600)
+
+#define EXYNOS4_CLKDIV_RIGHTBUS (S5P_VA_CMU + 0x08500)
+#define EXYNOS4_CLKDIV_STAT_RIGHTBUS (S5P_VA_CMU + 0x08600)
+
+#define EXYNOS4_CLKDIV_TOP (S5P_VA_CMU + 0x0C510)
+#define EXYNOS4_CLKDIV_CAM (S5P_VA_CMU + 0x0C520)
+#define EXYNOS4_CLKDIV_MFC (S5P_VA_CMU + 0x0C528)
+
+#define EXYNOS4_CLKDIV_STAT_TOP (S5P_VA_CMU + 0x0C610)
+#define EXYNOS4_CLKDIV_STAT_MFC (S5P_VA_CMU + 0x0C628)
+
+#define EXYNOS4210_CLKGATE_IP_IMAGE (S5P_VA_CMU + 0x0C930)
+#define EXYNOS4212_CLKGATE_IP_IMAGE (S5P_VA_CMU + 0x04930)
+
+#define EXYNOS4_CLKDIV_DMC0 (S5P_VA_CMU + 0x10500)
+#define EXYNOS4_CLKDIV_DMC1 (S5P_VA_CMU + 0x10504)
+#define EXYNOS4_CLKDIV_STAT_DMC0 (S5P_VA_CMU + 0x10600)
+#define EXYNOS4_CLKDIV_STAT_DMC1 (S5P_VA_CMU + 0x10604)
+
+#define EXYNOS4_DMC_PAUSE_CTRL (S5P_VA_CMU + 0x11094)
+#define EXYNOS4_DMC_PAUSE_ENABLE (1 << 0)
+
+#define EXYNOS4_CLKDIV_DMC0_ACP_SHIFT (0)
+#define EXYNOS4_CLKDIV_DMC0_ACP_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_ACP_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT (4)
+#define EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT (8)
+#define EXYNOS4_CLKDIV_DMC0_DPHY_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_DMC_SHIFT (12)
+#define EXYNOS4_CLKDIV_DMC0_DMC_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMC_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT (16)
+#define EXYNOS4_CLKDIV_DMC0_DMCD_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT (20)
+#define EXYNOS4_CLKDIV_DMC0_DMCP_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT (24)
+#define EXYNOS4_CLKDIV_DMC0_COPY2_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT)
+#define EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT (28)
+#define EXYNOS4_CLKDIV_DMC0_CORETI_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT)
+
+#define EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT (0)
+#define EXYNOS4_CLKDIV_DMC1_G2D_ACP_MASK (0xf << EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT)
+#define EXYNOS4_CLKDIV_DMC1_C2C_SHIFT (4)
+#define EXYNOS4_CLKDIV_DMC1_C2C_MASK (0x7 << EXYNOS4_CLKDIV_DMC1_C2C_SHIFT)
+#define EXYNOS4_CLKDIV_DMC1_PWI_SHIFT (8)
+#define EXYNOS4_CLKDIV_DMC1_PWI_MASK (0xf << EXYNOS4_CLKDIV_DMC1_PWI_SHIFT)
+#define EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT (12)
+#define EXYNOS4_CLKDIV_DMC1_C2CACLK_MASK (0x7 << EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT)
+#define EXYNOS4_CLKDIV_DMC1_DVSEM_SHIFT (16)
+#define EXYNOS4_CLKDIV_DMC1_DVSEM_MASK (0x7f << EXYNOS4_CLKDIV_DMC1_DVSEM_SHIFT)
+#define EXYNOS4_CLKDIV_DMC1_DPM_SHIFT (24)
+#define EXYNOS4_CLKDIV_DMC1_DPM_MASK (0x7f << EXYNOS4_CLKDIV_DMC1_DPM_SHIFT)
+
+#define EXYNOS4_CLKDIV_MFC_SHIFT (0)
+#define EXYNOS4_CLKDIV_MFC_MASK (0x7 << EXYNOS4_CLKDIV_MFC_SHIFT)
+
+#define EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT (0)
+#define EXYNOS4_CLKDIV_TOP_ACLK200_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT (4)
+#define EXYNOS4_CLKDIV_TOP_ACLK100_MASK (0xF << EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT (8)
+#define EXYNOS4_CLKDIV_TOP_ACLK160_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT (12)
+#define EXYNOS4_CLKDIV_TOP_ACLK133_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT (16)
+#define EXYNOS4_CLKDIV_TOP_ONENAND_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT (20)
+#define EXYNOS4_CLKDIV_TOP_ACLK266_GPS_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT)
+#define EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_SHIFT (24)
+#define EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_SHIFT)
+
+#define EXYNOS4_CLKDIV_BUS_GDLR_SHIFT (0)
+#define EXYNOS4_CLKDIV_BUS_GDLR_MASK (0x7 << EXYNOS4_CLKDIV_BUS_GDLR_SHIFT)
+#define EXYNOS4_CLKDIV_BUS_GPLR_SHIFT (4)
+#define EXYNOS4_CLKDIV_BUS_GPLR_MASK (0x7 << EXYNOS4_CLKDIV_BUS_GPLR_SHIFT)
+
+#define EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT (0)
+#define EXYNOS4_CLKDIV_CAM_FIMC0_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT)
+#define EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT (4)
+#define EXYNOS4_CLKDIV_CAM_FIMC1_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT)
+#define EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT (8)
+#define EXYNOS4_CLKDIV_CAM_FIMC2_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT)
+#define EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT (12)
+#define EXYNOS4_CLKDIV_CAM_FIMC3_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT)
+
+#define EXYNOS4_CLKDIV_CAM1 (S5P_VA_CMU + 0x0C568)
+
+#define EXYNOS4_CLKDIV_STAT_CAM1 (S5P_VA_CMU + 0x0C668)
+
+#define EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT (0)
+#define EXYNOS4_CLKDIV_CAM1_JPEG_MASK (0xf << EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT)
+
+#endif /* __DEVFREQ_EXYNOS4_BUS_H */
diff --git a/drivers/devfreq/exynos/exynos5_bus.c b/drivers/devfreq/exynos/exynos5_bus.c
index a60da3c1c48e..6eef1f7397c6 100644
--- a/drivers/devfreq/exynos/exynos5_bus.c
+++ b/drivers/devfreq/exynos/exynos5_bus.c
@@ -152,7 +152,7 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
if (old_freq == freq)
return 0;
- dev_dbg(dev, "targetting %lukHz %luuV\n", freq, volt);
+ dev_dbg(dev, "targeting %lukHz %luuV\n", freq, volt);
mutex_lock(&data->lock);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c823daaf9043..605b016bcea4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -292,9 +292,11 @@ config MMP_TDMA
bool "MMP Two-Channel DMA support"
depends on ARCH_MMP
select DMA_ENGINE
+ select MMP_SRAM
help
Support the MMP Two-Channel DMA engine.
This engine used for MMP Audio DMA and pxa910 SQU.
+ It needs sram driver under mach-mmp.
Say Y here if you enabled MMP ADMA, otherwise say N.
@@ -304,6 +306,12 @@ config DMA_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+config DMA_BCM2835
+ tristate "BCM2835 DMA engine support"
+ depends on (ARCH_BCM2835 || MACH_BCM2708)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
config TI_CPPI41
tristate "AM33xx CPPI41 DMA support"
depends on ARCH_OMAP
@@ -334,6 +342,15 @@ config K3_DMA
Support the DMA engine for Hisilicon K3 platform
devices.
+config MOXART_DMA
+ tristate "MOXART DMA support"
+ depends on ARCH_MOXART
+ select DMA_ENGINE
+ select DMA_OF
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the MOXA ART SoC DMA controller.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da97e429..a029d0f4a1be 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -38,7 +38,9 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index e69b03c0fa50..1e506afa33f5 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -30,11 +30,12 @@ static DEFINE_MUTEX(acpi_dma_lock);
* @adev: ACPI device to match with
* @adma: struct acpi_dma of the given DMA controller
*
- * Returns 1 on success, 0 when no information is available, or appropriate
- * errno value on error.
- *
* In order to match a device from DSDT table to the corresponding CSRT device
* we use MMIO address and IRQ.
+ *
+ * Return:
+ * 1 on success, 0 when no information is available, or appropriate errno value
+ * on error.
*/
static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
struct acpi_device *adev, struct acpi_dma *adma)
@@ -101,7 +102,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
*
* We are using this table to get the request line range of the specific DMA
* controller to be used later.
- *
*/
static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
{
@@ -141,10 +141,11 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
* @data pointer to controller specific data to be used by
* translation function
*
- * Returns 0 on success or appropriate errno value on error.
- *
* Allocated memory should be freed with appropriate acpi_dma_controller_free()
* call.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
*/
int acpi_dma_controller_register(struct device *dev,
struct dma_chan *(*acpi_dma_xlate)
@@ -188,6 +189,9 @@ EXPORT_SYMBOL_GPL(acpi_dma_controller_register);
* @dev: struct device of DMA controller
*
* Memory allocated by acpi_dma_controller_register() is freed here.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
*/
int acpi_dma_controller_free(struct device *dev)
{
@@ -225,6 +229,9 @@ static void devm_acpi_dma_release(struct device *dev, void *res)
* Managed acpi_dma_controller_register(). DMA controller registered by this
* function are automatically freed on driver detach. See
* acpi_dma_controller_register() for more information.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
*/
int devm_acpi_dma_controller_register(struct device *dev,
struct dma_chan *(*acpi_dma_xlate)
@@ -267,8 +274,6 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
* @adma: struct acpi_dma of DMA controller
* @dma_spec: dma specifier to update
*
- * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
- *
* Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
* Descriptor":
* DMA Request Line bits is a platform-relative number uniquely
@@ -276,6 +281,9 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
* mapping is done in a controller-specific OS driver.
* That's why we can safely adjust slave_id when the appropriate controller is
* found.
+ *
+ * Return:
+ * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
*/
static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
struct acpi_dma_spec *dma_spec)
@@ -334,7 +342,8 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
* @dev: struct device to get DMA request from
* @index: index of FixedDMA descriptor for @dev
*
- * Returns pointer to appropriate dma channel on success or NULL on error.
+ * Return:
+ * Pointer to appropriate dma channel on success or NULL on error.
*/
struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
size_t index)
@@ -403,7 +412,8 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
* translate the names "tx" and "rx" here based on the most common case where
* the first FixedDMA descriptor is TX and second is RX.
*
- * Returns pointer to appropriate dma channel on success or NULL on error.
+ * Return:
+ * Pointer to appropriate dma channel on success or NULL on error.
*/
struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
const char *name)
@@ -427,8 +437,10 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
* @adma: pointer to ACPI DMA controller data
*
* A simple translation function for ACPI based devices. Passes &struct
- * dma_spec to the DMA controller driver provided filter function. Returns
- * pointer to the channel if found or %NULL otherwise.
+ * dma_spec to the DMA controller driver provided filter function.
+ *
+ * Return:
+ * Pointer to the channel if found or %NULL otherwise.
*/
struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
struct acpi_dma *adma)
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index ec4ee5c1fe9d..8114731a1c62 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,6 +83,7 @@
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -1771,6 +1772,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
return false;
}
+EXPORT_SYMBOL_GPL(pl08x_filter_id);
/*
* Just check that the device is there and active
@@ -2167,7 +2169,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Register slave channels */
ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
pl08x->pd->num_slave_channels, true);
- if (ret <= 0) {
+ if (ret < 0) {
dev_warn(&pl08x->adev->dev,
"%s failed to enumerate slave channels - %d\n",
__func__, ret);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
new file mode 100644
index 000000000000..a03602164e3e
--- /dev/null
+++ b/drivers/dma/bcm2835-dma.c
@@ -0,0 +1,707 @@
+/*
+ * BCM2835 DMA engine support
+ *
+ * This driver only supports cyclic DMA transfers
+ * as needed for the I2S module.
+ *
+ * Author: Florian Meier <florian.meier@koalo.de>
+ * Copyright 2013
+ *
+ * Based on
+ * OMAP DMAengine support by Russell King
+ *
+ * BCM2708 DMA Driver
+ * Copyright (C) 2010 Broadcom
+ *
+ * Raspberry Pi PCM I2S ALSA Driver
+ * Copyright (c) by Phil Poole 2013
+ *
+ * MARVELL MMP Peripheral DMA Driver
+ * Copyright 2012 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+struct bcm2835_dmadev {
+ struct dma_device ddev;
+ spinlock_t lock;
+ void __iomem *base;
+ struct device_dma_parameters dma_parms;
+};
+
+struct bcm2835_dma_cb {
+ uint32_t info;
+ uint32_t src;
+ uint32_t dst;
+ uint32_t length;
+ uint32_t stride;
+ uint32_t next;
+ uint32_t pad[2];
+};
+
+struct bcm2835_chan {
+ struct virt_dma_chan vc;
+ struct list_head node;
+
+ struct dma_slave_config cfg;
+ bool cyclic;
+ unsigned int dreq;
+
+ int ch;
+ struct bcm2835_desc *desc;
+
+ void __iomem *chan_base;
+ int irq_number;
+};
+
+struct bcm2835_desc {
+ struct virt_dma_desc vd;
+ enum dma_transfer_direction dir;
+
+ unsigned int control_block_size;
+ struct bcm2835_dma_cb *control_block_base;
+ dma_addr_t control_block_base_phys;
+
+ unsigned int frames;
+ size_t size;
+};
+
+#define BCM2835_DMA_CS 0x00
+#define BCM2835_DMA_ADDR 0x04
+#define BCM2835_DMA_SOURCE_AD 0x0c
+#define BCM2835_DMA_DEST_AD 0x10
+#define BCM2835_DMA_NEXTCB 0x1C
+
+/* DMA CS Control and Status bits */
+#define BCM2835_DMA_ACTIVE BIT(0)
+#define BCM2835_DMA_INT BIT(2)
+#define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */
+#define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */
+#define BCM2835_DMA_ERR BIT(8)
+#define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */
+#define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */
+
+#define BCM2835_DMA_INT_EN BIT(0)
+#define BCM2835_DMA_D_INC BIT(4)
+#define BCM2835_DMA_D_DREQ BIT(6)
+#define BCM2835_DMA_S_INC BIT(8)
+#define BCM2835_DMA_S_DREQ BIT(10)
+
+#define BCM2835_DMA_PER_MAP(x) ((x) << 16)
+
+#define BCM2835_DMA_DATA_TYPE_S8 1
+#define BCM2835_DMA_DATA_TYPE_S16 2
+#define BCM2835_DMA_DATA_TYPE_S32 4
+#define BCM2835_DMA_DATA_TYPE_S128 16
+
+#define BCM2835_DMA_BULK_MASK BIT(0)
+#define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3))
+
+/* Valid only for channels 0 - 14, 15 has its own base address */
+#define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */
+#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
+
+static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
+{
+ return container_of(d, struct bcm2835_dmadev, ddev);
+}
+
+static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct bcm2835_chan, vc.chan);
+}
+
+static inline struct bcm2835_desc *to_bcm2835_dma_desc(
+ struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct bcm2835_desc, vd.tx);
+}
+
+static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
+{
+ struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
+ dma_free_coherent(desc->vd.tx.chan->device->dev,
+ desc->control_block_size,
+ desc->control_block_base,
+ desc->control_block_base_phys);
+ kfree(desc);
+}
+
+static int bcm2835_dma_abort(void __iomem *chan_base)
+{
+ unsigned long cs;
+ long int timeout = 10000;
+
+ cs = readl(chan_base + BCM2835_DMA_CS);
+ if (!(cs & BCM2835_DMA_ACTIVE))
+ return 0;
+
+ /* Write 0 to the active bit - Pause the DMA */
+ writel(0, chan_base + BCM2835_DMA_CS);
+
+ /* Wait for any current AXI transfer to complete */
+ while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
+ cpu_relax();
+ cs = readl(chan_base + BCM2835_DMA_CS);
+ }
+
+ /* We'll un-pause when we set of our next DMA */
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ if (!(cs & BCM2835_DMA_ACTIVE))
+ return 0;
+
+ /* Terminate the control block chain */
+ writel(0, chan_base + BCM2835_DMA_NEXTCB);
+
+ /* Abort the whole DMA */
+ writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
+ chan_base + BCM2835_DMA_CS);
+
+ return 0;
+}
+
+static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+ struct bcm2835_desc *d;
+
+ if (!vd) {
+ c->desc = NULL;
+ return;
+ }
+
+ list_del(&vd->node);
+
+ c->desc = d = to_bcm2835_dma_desc(&vd->tx);
+
+ writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
+ writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+}
+
+static irqreturn_t bcm2835_dma_callback(int irq, void *data)
+{
+ struct bcm2835_chan *c = data;
+ struct bcm2835_desc *d;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+
+ /* Acknowledge interrupt */
+ writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
+
+ d = c->desc;
+
+ if (d) {
+ /* TODO Only works for cyclic DMA */
+ vchan_cyclic_callback(&d->vd);
+ }
+
+ /* Keep the DMA engine running */
+ writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+ dev_dbg(c->vc.chan.device->dev,
+ "Allocating DMA channel %d\n", c->ch);
+
+ return request_irq(c->irq_number,
+ bcm2835_dma_callback, 0, "DMA IRQ", c);
+}
+
+static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+ vchan_free_chan_resources(&c->vc);
+ free_irq(c->irq_number, c);
+
+ dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
+}
+
+static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
+{
+ return d->size;
+}
+
+static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
+{
+ unsigned int i;
+ size_t size;
+
+ for (size = i = 0; i < d->frames; i++) {
+ struct bcm2835_dma_cb *control_block =
+ &d->control_block_base[i];
+ size_t this_size = control_block->length;
+ dma_addr_t dma;
+
+ if (d->dir == DMA_DEV_TO_MEM)
+ dma = control_block->dst;
+ else
+ dma = control_block->src;
+
+ if (size)
+ size += this_size;
+ else if (addr >= dma && addr < dma + this_size)
+ size += dma + this_size - addr;
+ }
+
+ return size;
+}
+
+static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ txstate->residue =
+ bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
+ } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
+ struct bcm2835_desc *d = c->desc;
+ dma_addr_t pos;
+
+ if (d->dir == DMA_MEM_TO_DEV)
+ pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
+ else if (d->dir == DMA_DEV_TO_MEM)
+ pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
+ else
+ pos = 0;
+
+ txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
+ } else {
+ txstate->residue = 0;
+ }
+
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+
+ return ret;
+}
+
+static void bcm2835_dma_issue_pending(struct dma_chan *chan)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ unsigned long flags;
+
+ c->cyclic = true; /* Nothing else is implemented */
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (vchan_issue_pending(&c->vc) && !c->desc)
+ bcm2835_dma_start_desc(c);
+
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct bcm2835_desc *d;
+ dma_addr_t dev_addr;
+ unsigned int es, sync_type;
+ unsigned int frame;
+
+ /* Grab configuration */
+ if (!is_slave_direction(direction)) {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (direction == DMA_DEV_TO_MEM) {
+ dev_addr = c->cfg.src_addr;
+ dev_width = c->cfg.src_addr_width;
+ sync_type = BCM2835_DMA_S_DREQ;
+ } else {
+ dev_addr = c->cfg.dst_addr;
+ dev_width = c->cfg.dst_addr_width;
+ sync_type = BCM2835_DMA_D_DREQ;
+ }
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ es = BCM2835_DMA_DATA_TYPE_S32;
+ break;
+ default:
+ return NULL;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ d = kzalloc(sizeof(*d), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->dir = direction;
+ d->frames = buf_len / period_len;
+
+ /* Allocate memory for control blocks */
+ d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
+ d->control_block_base = dma_zalloc_coherent(chan->device->dev,
+ d->control_block_size, &d->control_block_base_phys,
+ GFP_NOWAIT);
+
+ if (!d->control_block_base) {
+ kfree(d);
+ return NULL;
+ }
+
+ /*
+ * Iterate over all frames, create a control block
+ * for each frame and link them together.
+ */
+ for (frame = 0; frame < d->frames; frame++) {
+ struct bcm2835_dma_cb *control_block =
+ &d->control_block_base[frame];
+
+ /* Setup adresses */
+ if (d->dir == DMA_DEV_TO_MEM) {
+ control_block->info = BCM2835_DMA_D_INC;
+ control_block->src = dev_addr;
+ control_block->dst = buf_addr + frame * period_len;
+ } else {
+ control_block->info = BCM2835_DMA_S_INC;
+ control_block->src = buf_addr + frame * period_len;
+ control_block->dst = dev_addr;
+ }
+
+ /* Enable interrupt */
+ control_block->info |= BCM2835_DMA_INT_EN;
+
+ /* Setup synchronization */
+ if (sync_type != 0)
+ control_block->info |= sync_type;
+
+ /* Setup DREQ channel */
+ if (c->dreq != 0)
+ control_block->info |=
+ BCM2835_DMA_PER_MAP(c->dreq);
+
+ /* Length of a frame */
+ control_block->length = period_len;
+ d->size += control_block->length;
+
+ /*
+ * Next block is the next frame.
+ * This DMA engine driver currently only supports cyclic DMA.
+ * Therefore, wrap around at number of frames.
+ */
+ control_block->next = d->control_block_base_phys +
+ sizeof(struct bcm2835_dma_cb)
+ * ((frame + 1) % d->frames);
+ }
+
+ return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
+static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
+ struct dma_slave_config *cfg)
+{
+ if ((cfg->direction == DMA_DEV_TO_MEM &&
+ cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+ (cfg->direction == DMA_MEM_TO_DEV &&
+ cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+ !is_slave_direction(cfg->direction)) {
+ return -EINVAL;
+ }
+
+ c->cfg = *cfg;
+
+ return 0;
+}
+
+static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
+{
+ struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
+ unsigned long flags;
+ int timeout = 10000;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+
+ /* Prevent this channel being scheduled */
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ /*
+ * Stop DMA activity: we assume the callback will not be called
+ * after bcm_dma_abort() returns (even if it does, it will see
+ * c->desc is NULL and exit.)
+ */
+ if (c->desc) {
+ c->desc = NULL;
+ bcm2835_dma_abort(c->chan_base);
+
+ /* Wait for stopping */
+ while (--timeout) {
+ if (!(readl(c->chan_base + BCM2835_DMA_CS) &
+ BCM2835_DMA_ACTIVE))
+ break;
+
+ cpu_relax();
+ }
+
+ if (!timeout)
+ dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
+ }
+
+ vchan_get_all_descriptors(&c->vc, &head);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+
+ return 0;
+}
+
+static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ return bcm2835_dma_slave_config(c,
+ (struct dma_slave_config *)arg);
+
+ case DMA_TERMINATE_ALL:
+ return bcm2835_dma_terminate_all(c);
+
+ default:
+ return -ENXIO;
+ }
+}
+
+static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
+{
+ struct bcm2835_chan *c;
+
+ c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ c->vc.desc_free = bcm2835_dma_desc_free;
+ vchan_init(&c->vc, &d->ddev);
+ INIT_LIST_HEAD(&c->node);
+
+ d->ddev.chancnt++;
+
+ c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
+ c->ch = chan_id;
+ c->irq_number = irq;
+
+ return 0;
+}
+
+static void bcm2835_dma_free(struct bcm2835_dmadev *od)
+{
+ struct bcm2835_chan *c, *next;
+
+ list_for_each_entry_safe(c, next, &od->ddev.channels,
+ vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ }
+}
+
+static const struct of_device_id bcm2835_dma_of_match[] = {
+ { .compatible = "brcm,bcm2835-dma", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
+
+static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
+ struct of_dma *ofdma)
+{
+ struct bcm2835_dmadev *d = ofdma->of_dma_data;
+ struct dma_chan *chan;
+
+ chan = dma_get_any_slave_channel(&d->ddev);
+ if (!chan)
+ return NULL;
+
+ /* Set DREQ from param */
+ to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
+
+ return chan;
+}
+
+static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = false;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
+static int bcm2835_dma_probe(struct platform_device *pdev)
+{
+ struct bcm2835_dmadev *od;
+ struct resource *res;
+ void __iomem *base;
+ int rc;
+ int i;
+ int irq;
+ uint32_t chans_available;
+
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
+ if (!od)
+ return -ENOMEM;
+
+ pdev->dev.dma_parms = &od->dma_parms;
+ dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ od->base = base;
+
+ dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+ od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
+ od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
+ od->ddev.device_tx_status = bcm2835_dma_tx_status;
+ od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
+ od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
+ od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
+ od->ddev.device_control = bcm2835_dma_control;
+ od->ddev.dev = &pdev->dev;
+ INIT_LIST_HEAD(&od->ddev.channels);
+ spin_lock_init(&od->lock);
+
+ platform_set_drvdata(pdev, od);
+
+ /* Request DMA channel mask from device tree */
+ if (of_property_read_u32(pdev->dev.of_node,
+ "brcm,dma-channel-mask",
+ &chans_available)) {
+ dev_err(&pdev->dev, "Failed to get channel mask\n");
+ rc = -EINVAL;
+ goto err_no_dma;
+ }
+
+ /*
+ * Do not use the FIQ and BULK channels,
+ * because they are used by the GPU.
+ */
+ chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK);
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ break;
+
+ if (chans_available & (1 << i)) {
+ rc = bcm2835_dma_chan_init(od, i, irq);
+ if (rc)
+ goto err_no_dma;
+ }
+ }
+
+ dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
+
+ /* Device-tree DMA controller registration */
+ rc = of_dma_controller_register(pdev->dev.of_node,
+ bcm2835_dma_xlate, od);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to register DMA controller\n");
+ goto err_no_dma;
+ }
+
+ rc = dma_async_device_register(&od->ddev);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to register slave DMA engine device: %d\n", rc);
+ goto err_no_dma;
+ }
+
+ dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
+
+ return 0;
+
+err_no_dma:
+ bcm2835_dma_free(od);
+ return rc;
+}
+
+static int bcm2835_dma_remove(struct platform_device *pdev)
+{
+ struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&od->ddev);
+ bcm2835_dma_free(od);
+
+ return 0;
+}
+
+static struct platform_driver bcm2835_dma_driver = {
+ .probe = bcm2835_dma_probe,
+ .remove = bcm2835_dma_remove,
+ .driver = {
+ .name = "bcm2835-dma",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(bcm2835_dma_of_match),
+ },
+};
+
+module_platform_driver(bcm2835_dma_driver);
+
+MODULE_ALIAS("platform:bcm2835-dma");
+MODULE_DESCRIPTION("BCM2835 DMA engine driver");
+MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index c29dacff66fa..c18aebf7d5aa 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -972,8 +972,10 @@ static int cppi41_dma_probe(struct platform_device *pdev)
goto err_chans;
irq = irq_of_parse_and_map(dev->of_node, 0);
- if (!irq)
+ if (!irq) {
+ ret = -EINVAL;
goto err_irq;
+ }
cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 92caad629d99..ed610b497518 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -535,6 +535,34 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
}
EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ int err;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* lock against __dma_request_channel */
+ mutex_lock(&dma_list_mutex);
+
+ chan = private_candidate(&mask, device, NULL, NULL);
+ if (chan) {
+ err = dma_chan_get(chan);
+ if (err) {
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+ chan = NULL;
+ }
+ }
+
+ mutex_unlock(&dma_list_mutex);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
+
/**
* __dma_request_channel - try to allocate an exclusive channel
* @mask: capabilities that the channel must satisfy
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 9dfcaf5c1288..05b6dea770a4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -31,7 +31,7 @@ module_param_string(channel, test_channel, sizeof(test_channel),
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
-static char test_device[20];
+static char test_device[32];
module_param_string(device, test_device, sizeof(test_device),
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
@@ -89,7 +89,7 @@ MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
struct dmatest_params {
unsigned int buf_size;
char channel[20];
- char device[20];
+ char device[32];
unsigned int threads_per_chan;
unsigned int max_channels;
unsigned int iterations;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 7516be4677cf..13ac3f240e79 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -218,8 +218,10 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
u32 ctllo;
- /* Software emulation of LLP mode relies on interrupts to continue
- * multi block transfer. */
+ /*
+ * Software emulation of LLP mode relies on interrupts to continue
+ * multi block transfer.
+ */
ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
channel_writel(dwc, SAR, desc->lli.sar);
@@ -253,8 +255,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
&dwc->flags);
if (was_soft_llp) {
dev_err(chan2dev(&dwc->chan),
- "BUG: Attempted to start new LLP transfer "
- "inside ongoing one\n");
+ "BUG: Attempted to start new LLP transfer inside ongoing one\n");
return;
}
@@ -420,8 +421,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
return;
}
- dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
- (unsigned long long)llp);
+ dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
/* Initial residue value */
@@ -567,9 +567,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
unlikely(status_xfer & dwc->mask)) {
int i;
- dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
- "interrupt, stopping DMA transfer\n",
- status_xfer ? "xfer" : "error");
+ dev_err(chan2dev(&dwc->chan),
+ "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
+ status_xfer ? "xfer" : "error");
spin_lock_irqsave(&dwc->lock, flags);
@@ -711,9 +711,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
u32 ctllo;
dev_vdbg(chan2dev(chan),
- "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
- (unsigned long long)dest, (unsigned long long)src,
- len, flags);
+ "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
+ &dest, &src, len, flags);
if (unlikely(!len)) {
dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1401,9 +1400,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
/* Let's make a cyclic list */
last->lli.llp = cdesc->desc[0]->txd.phys;
- dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
- "period %zu periods %d\n", (unsigned long long)buf_addr,
- buf_len, period_len, periods);
+ dev_dbg(chan2dev(&dwc->chan),
+ "cyclic prepared buf %pad len %zu period %zu periods %d\n",
+ &buf_addr, buf_len, period_len, periods);
cdesc->periods = periods;
dwc->cdesc = cdesc;
@@ -1603,9 +1602,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
dwc_params);
- /* Decode maximum block size for given channel. The
+ /*
+ * Decode maximum block size for given channel. The
* stored 4 bit value represents blocks from 0x00 for 3
- * up to 0x0a for 4095. */
+ * up to 0x0a for 4095.
+ */
dwc->block_size =
(4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
dwc->nollp =
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 2539ea0cbc63..cd8da451d199 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -699,8 +699,8 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
echan->alloced = true;
echan->slot[0] = echan->ch_num;
- dev_info(dev, "allocated channel for %u:%u\n",
- EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
+ dev_dbg(dev, "allocated channel for %u:%u\n",
+ EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
return 0;
@@ -736,7 +736,7 @@ static void edma_free_chan_resources(struct dma_chan *chan)
echan->alloced = false;
}
- dev_info(dev, "freeing channel for %u\n", echan->ch_num);
+ dev_dbg(dev, "freeing channel for %u\n", echan->ch_num);
}
/* Send pending descriptor to hardware */
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 1ffc24484d23..d56e83599825 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -41,7 +41,7 @@
* channel is allowed to transfer before the DMA engine pauses
* the current channel and switches to the next channel
*/
-#define FSL_DMA_MR_BWC 0x08000000
+#define FSL_DMA_MR_BWC 0x0A000000
/* Special MR definition for MPC8349 */
#define FSL_DMA_MR_EOTIE 0x00000080
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index c75679d42028..19041cefabb1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -323,6 +323,7 @@ struct sdma_engine {
struct clk *clk_ipg;
struct clk *clk_ahb;
spinlock_t channel_0_lock;
+ u32 script_number;
struct sdma_script_start_addrs *script_addrs;
const struct sdma_driver_data *drvdata;
};
@@ -448,6 +449,7 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
+ { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
@@ -724,6 +726,10 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
per_2_emi = sdma->script_addrs->app_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_app_addr;
break;
+ case IMX_DMATYPE_SSI_DUAL:
+ per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
+ break;
case IMX_DMATYPE_SSI_SP:
case IMX_DMATYPE_MMC:
case IMX_DMATYPE_SDHC:
@@ -1238,6 +1244,7 @@ static void sdma_issue_pending(struct dma_chan *chan)
}
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
static void sdma_add_scripts(struct sdma_engine *sdma,
const struct sdma_script_start_addrs *addr)
@@ -1246,7 +1253,11 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
s32 *saddr_arr = (u32 *)sdma->script_addrs;
int i;
- for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
+ /* use the default firmware in ROM if missing external firmware */
+ if (!sdma->script_number)
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+
+ for (i = 0; i < sdma->script_number; i++)
if (addr_arr[i] > 0)
saddr_arr[i] = addr_arr[i];
}
@@ -1272,6 +1283,17 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
goto err_firmware;
if (header->ram_code_start + header->ram_code_size > fw->size)
goto err_firmware;
+ switch (header->version_major) {
+ case 1:
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+ break;
+ case 2:
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
+ break;
+ default:
+ dev_err(sdma->dev, "unknown firmware version\n");
+ goto err_firmware;
+ }
addr = (void *)header + header->script_addrs_start;
ram_code = (void *)header + header->ram_code_start;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 87529181efcc..4e3549a16132 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
chan = ioat_chan_by_index(instance, bit);
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
}
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
struct ioat_chan_common *chan = data;
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
return IRQ_HANDLED;
}
@@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
chan->timer.function = device->timer_fn;
chan->timer.data = data;
tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
- tasklet_disable(&chan->cleanup_task);
}
/**
@@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
writel(((u64) chan->completion_dma) >> 32,
chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
- tasklet_enable(&chan->cleanup_task);
+ set_bit(IOAT_RUN, &chan->state);
ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
__func__, ioat->desccount);
return ioat->desccount;
}
+void ioat_stop(struct ioat_chan_common *chan)
+{
+ struct ioatdma_device *device = chan->device;
+ struct pci_dev *pdev = device->pdev;
+ int chan_id = chan_num(chan);
+ struct msix_entry *msix;
+
+ /* 1/ stop irq from firing tasklets
+ * 2/ stop the tasklet from re-arming irqs
+ */
+ clear_bit(IOAT_RUN, &chan->state);
+
+ /* flush inflight interrupts */
+ switch (device->irq_mode) {
+ case IOAT_MSIX:
+ msix = &device->msix_entries[chan_id];
+ synchronize_irq(msix->vector);
+ break;
+ case IOAT_MSI:
+ case IOAT_INTX:
+ synchronize_irq(pdev->irq);
+ break;
+ default:
+ break;
+ }
+
+ /* flush inflight timers */
+ del_timer_sync(&chan->timer);
+
+ /* flush inflight tasklet runs */
+ tasklet_kill(&chan->cleanup_task);
+
+ /* final cleanup now that everything is quiesced and can't re-arm */
+ device->cleanup_fn((unsigned long) &chan->common);
+}
+
/**
* ioat1_dma_free_chan_resources - release all the descriptors
* @chan: the channel to be cleaned
@@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
if (ioat->desccount == 0)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- ioat1_cleanup(ioat);
+ ioat_stop(chan);
/* Delay 100ms after reset to allow internal DMA logic to quiesce
* before removing DMA descriptor resources.
@@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
static void ioat1_cleanup_event(unsigned long data)
{
struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat1_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 11fb877ddca9..e982f00a9843 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -356,6 +356,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
int ioat_dma_setup_interrupts(struct ioatdma_device *device);
+void ioat_stop(struct ioat_chan_common *chan);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d3affe7e976..8d1058085eeb 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
void ioat2_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat2_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
@@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->issued = 0;
ioat->tail = 0;
ioat->alloc_order = order;
+ set_bit(IOAT_RUN, &chan->state);
spin_unlock_bh(&ioat->prep_lock);
spin_unlock_bh(&chan->cleanup_lock);
- tasklet_enable(&chan->cleanup_task);
ioat2_start_null_desc(ioat);
/* check that we got off the ground */
@@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
if (is_ioat_active(status) || is_ioat_idle(status)) {
- set_bit(IOAT_RUN, &chan->state);
return 1 << ioat->alloc_order;
} else {
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
if (!ioat->ring)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- device->cleanup_fn((unsigned long) c);
+ ioat_stop(chan);
device->reset_hw(chan);
- clear_bit(IOAT_RUN, &chan->state);
spin_lock_bh(&chan->cleanup_lock);
spin_lock_bh(&ioat->prep_lock);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 820817e97e62..b9b38a1cf92f 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -464,8 +464,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
static void ioat3_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat3_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index e26075408e9b..a1f911aaf220 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -477,7 +477,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
dma_addr_t addr, src = 0, dst = 0;
int num = sglen, i;
- if (sgl == 0)
+ if (sgl == NULL)
return NULL;
for_each_sg(sgl, sg, sglen, i) {
@@ -817,7 +817,7 @@ static int k3_dma_resume(struct device *dev)
return 0;
}
-SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
static struct platform_driver k3_pdma_driver = {
.driver = {
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 8869500ab92b..b439679f4126 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -32,38 +33,37 @@
#define DTADR 0x0208
#define DCMD 0x020c
-#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
-#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
-#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
-#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
-#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
-#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
-#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
-#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
-
-#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
-#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
-#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
-#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
-#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
-#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
-#define DCSR_EORINTR (1 << 9) /* The end of Receive */
-
-#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
- (((n) & 0x3f) << 2))
-#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
-#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
+#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
+#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
+#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
+#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
+#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
+#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
+#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
+#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
+
+#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */
+#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
+#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
+#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
+#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
+#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
+#define DCSR_EORINTR BIT(9) /* The end of Receive */
+
+#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
+#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
+#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
-#define DDADR_STOP (1 << 0) /* Stop (read / write) */
-
-#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
-#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
-#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
-#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
-#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
-#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
-#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
+#define DDADR_STOP BIT(0) /* Stop (read / write) */
+
+#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
+#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
+#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
+#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
+#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
+#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
+#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
@@ -132,10 +132,14 @@ struct mmp_pdma_device {
spinlock_t phy_lock; /* protect alloc/free phy channels */
};
-#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
-#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
-#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
-#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
+#define tx_to_mmp_pdma_desc(tx) \
+ container_of(tx, struct mmp_pdma_desc_sw, async_tx)
+#define to_mmp_pdma_desc(lh) \
+ container_of(lh, struct mmp_pdma_desc_sw, node)
+#define to_mmp_pdma_chan(dchan) \
+ container_of(dchan, struct mmp_pdma_chan, chan)
+#define to_mmp_pdma_dev(dmadev) \
+ container_of(dmadev, struct mmp_pdma_device, device)
static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
{
@@ -162,19 +166,18 @@ static void enable_chan(struct mmp_pdma_phy *phy)
writel(dalgn, phy->base + DALGN);
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) | DCSR_RUN,
- phy->base + reg);
+ writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
}
static void disable_chan(struct mmp_pdma_phy *phy)
{
u32 reg;
- if (phy) {
- reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) & ~DCSR_RUN,
- phy->base + reg);
- }
+ if (!phy)
+ return;
+
+ reg = (phy->idx << 2) + DCSR;
+ writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
}
static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -183,26 +186,27 @@ static int clear_chan_irq(struct mmp_pdma_phy *phy)
u32 dint = readl(phy->base + DINT);
u32 reg = (phy->idx << 2) + DCSR;
- if (dint & BIT(phy->idx)) {
- /* clear irq */
- dcsr = readl(phy->base + reg);
- writel(dcsr, phy->base + reg);
- if ((dcsr & DCSR_BUSERR) && (phy->vchan))
- dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
- return 0;
- }
- return -EAGAIN;
+ if (!(dint & BIT(phy->idx)))
+ return -EAGAIN;
+
+ /* clear irq */
+ dcsr = readl(phy->base + reg);
+ writel(dcsr, phy->base + reg);
+ if ((dcsr & DCSR_BUSERR) && (phy->vchan))
+ dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
+
+ return 0;
}
static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
{
struct mmp_pdma_phy *phy = dev_id;
- if (clear_chan_irq(phy) == 0) {
- tasklet_schedule(&phy->vchan->tasklet);
- return IRQ_HANDLED;
- } else
+ if (clear_chan_irq(phy) != 0)
return IRQ_NONE;
+
+ tasklet_schedule(&phy->vchan->tasklet);
+ return IRQ_HANDLED;
}
static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
@@ -224,8 +228,8 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
if (irq_num)
return IRQ_HANDLED;
- else
- return IRQ_NONE;
+
+ return IRQ_NONE;
}
/* lookup free phy channel as descending priority */
@@ -245,9 +249,9 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
*/
spin_lock_irqsave(&pdev->phy_lock, flags);
- for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
+ for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
for (i = 0; i < pdev->dma_channels; i++) {
- if (prio != ((i & 0xf) >> 2))
+ if (prio != (i & 0xf) >> 2)
continue;
phy = &pdev->phy[i];
if (!phy->vchan) {
@@ -389,14 +393,16 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->desc_pool)
return 1;
- chan->desc_pool =
- dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
- sizeof(struct mmp_pdma_desc_sw),
- __alignof__(struct mmp_pdma_desc_sw), 0);
+ chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
+ chan->dev,
+ sizeof(struct mmp_pdma_desc_sw),
+ __alignof__(struct mmp_pdma_desc_sw),
+ 0);
if (!chan->desc_pool) {
dev_err(chan->dev, "unable to allocate descriptor pool\n");
return -ENOMEM;
}
+
mmp_pdma_free_phy(chan);
chan->idle = true;
chan->dev_addr = 0;
@@ -404,7 +410,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
}
static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
- struct list_head *list)
+ struct list_head *list)
{
struct mmp_pdma_desc_sw *desc, *_desc;
@@ -434,8 +440,8 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
static struct dma_async_tx_descriptor *
mmp_pdma_prep_memcpy(struct dma_chan *dchan,
- dma_addr_t dma_dst, dma_addr_t dma_src,
- size_t len, unsigned long flags)
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
{
struct mmp_pdma_chan *chan;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -515,8 +521,8 @@ fail:
static struct dma_async_tx_descriptor *
mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction dir,
- unsigned long flags, void *context)
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
@@ -591,10 +597,11 @@ fail:
return NULL;
}
-static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
- struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
+ dma_addr_t buf_addr, size_t len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
{
struct mmp_pdma_chan *chan;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -636,8 +643,8 @@ static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
goto fail;
}
- new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
- (DCMD_LENGTH & period_len);
+ new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
+ (DCMD_LENGTH & period_len));
new->desc.dsadr = dma_src;
new->desc.dtadr = dma_dst;
@@ -677,12 +684,11 @@ fail:
}
static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
- unsigned long arg)
+ unsigned long arg)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
struct dma_slave_config *cfg = (void *)arg;
unsigned long flags;
- int ret = 0;
u32 maxburst = 0, addr = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
@@ -739,11 +745,12 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
return -ENOSYS;
}
- return ret;
+ return 0;
}
static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
return dma_cookie_status(dchan, cookie, txstate);
}
@@ -845,15 +852,14 @@ static int mmp_pdma_remove(struct platform_device *op)
return 0;
}
-static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
- int idx, int irq)
+static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
{
struct mmp_pdma_phy *phy = &pdev->phy[idx];
struct mmp_pdma_chan *chan;
int ret;
- chan = devm_kzalloc(pdev->dev,
- sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+ chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan),
+ GFP_KERNEL);
if (chan == NULL)
return -ENOMEM;
@@ -861,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
phy->base = pdev->base;
if (irq) {
- ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_chan_handler, 0, "pdma", phy);
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
+ "pdma", phy);
if (ret) {
dev_err(pdev->dev, "channel request irq fail!\n");
return ret;
@@ -877,8 +883,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
INIT_LIST_HEAD(&chan->chain_running);
/* register virt channel to dma engine */
- list_add_tail(&chan->chan.device_node,
- &pdev->device.channels);
+ list_add_tail(&chan->chan.device_node, &pdev->device.channels);
return 0;
}
@@ -893,33 +898,15 @@ static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct mmp_pdma_device *d = ofdma->of_dma_data;
- struct dma_chan *chan, *candidate;
-
-retry:
- candidate = NULL;
+ struct dma_chan *chan;
- /* walk the list of channels registered with the current instance and
- * find one that is currently unused */
- list_for_each_entry(chan, &d->device.channels, device_node)
- if (chan->client_count == 0) {
- candidate = chan;
- break;
- }
-
- if (!candidate)
+ chan = dma_get_any_slave_channel(&d->device);
+ if (!chan)
return NULL;
- /* dma_get_slave_channel will return NULL if we lost a race between
- * the lookup and the reservation */
- chan = dma_get_slave_channel(candidate);
+ to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
- if (chan) {
- struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
- c->drcmr = dma_spec->args[0];
- return chan;
- }
-
- goto retry;
+ return chan;
}
static int mmp_pdma_probe(struct platform_device *op)
@@ -934,6 +921,7 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
if (!pdev)
return -ENOMEM;
+
pdev->dev = &op->dev;
spin_lock_init(&pdev->phy_lock);
@@ -945,8 +933,8 @@ static int mmp_pdma_probe(struct platform_device *op)
of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
if (of_id)
- of_property_read_u32(pdev->dev->of_node,
- "#dma-channels", &dma_channels);
+ of_property_read_u32(pdev->dev->of_node, "#dma-channels",
+ &dma_channels);
else if (pdata && pdata->dma_channels)
dma_channels = pdata->dma_channels;
else
@@ -958,8 +946,9 @@ static int mmp_pdma_probe(struct platform_device *op)
irq_num++;
}
- pdev->phy = devm_kzalloc(pdev->dev,
- dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+ pdev->phy = devm_kcalloc(pdev->dev,
+ dma_channels, sizeof(struct mmp_pdma_chan),
+ GFP_KERNEL);
if (pdev->phy == NULL)
return -ENOMEM;
@@ -968,8 +957,8 @@ static int mmp_pdma_probe(struct platform_device *op)
if (irq_num != dma_channels) {
/* all chan share one irq, demux inside */
irq = platform_get_irq(op, 0);
- ret = devm_request_irq(pdev->dev, irq,
- mmp_pdma_int_handler, 0, "pdma", pdev);
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
+ "pdma", pdev);
if (ret)
return ret;
}
@@ -1045,7 +1034,7 @@ bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
if (chan->device->dev->driver != &mmp_pdma_driver.driver)
return false;
- c->drcmr = *(unsigned int *) param;
+ c->drcmr = *(unsigned int *)param;
return true;
}
@@ -1053,6 +1042,6 @@ EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
module_platform_driver(mmp_pdma_driver);
-MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
+MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 3ddacc14a736..33f96aaa80c7 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -121,11 +121,13 @@ struct mmp_tdma_chan {
int idx;
enum mmp_tdma_type type;
int irq;
- unsigned long reg_base;
+ void __iomem *reg_base;
size_t buf_len;
size_t period_len;
size_t pos;
+
+ struct gen_pool *pool;
};
#define TDMA_CHANNEL_NUM 2
@@ -182,7 +184,7 @@ static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
{
- unsigned int tdcr;
+ unsigned int tdcr = 0;
mmp_tdma_disable_chan(tdmac);
@@ -324,7 +326,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
struct gen_pool *gpool;
int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
- gpool = sram_get_gpool("asram");
+ gpool = tdmac->pool;
if (tdmac->desc_arr)
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
size);
@@ -374,7 +376,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
struct gen_pool *gpool;
int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
- gpool = sram_get_gpool("asram");
+ gpool = tdmac->pool;
if (!gpool)
return NULL;
@@ -505,7 +507,8 @@ static int mmp_tdma_remove(struct platform_device *pdev)
}
static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
- int idx, int irq, int type)
+ int idx, int irq,
+ int type, struct gen_pool *pool)
{
struct mmp_tdma_chan *tdmac;
@@ -526,7 +529,8 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
tdmac->chan.device = &tdev->device;
tdmac->idx = idx;
tdmac->type = type;
- tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
+ tdmac->reg_base = tdev->base + idx * 4;
+ tdmac->pool = pool;
tdmac->status = DMA_COMPLETE;
tdev->tdmac[tdmac->idx] = tdmac;
tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
@@ -553,6 +557,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
int i, ret;
int irq = 0, irq_num = 0;
int chan_num = TDMA_CHANNEL_NUM;
+ struct gen_pool *pool;
of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
if (of_id)
@@ -579,6 +584,15 @@ static int mmp_tdma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&tdev->device.channels);
+ if (pdev->dev.of_node)
+ pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0);
+ else
+ pool = sram_get_gpool("asram");
+ if (!pool) {
+ dev_err(&pdev->dev, "asram pool not available\n");
+ return -ENOMEM;
+ }
+
if (irq_num != chan_num) {
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq,
@@ -590,7 +604,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
/* initialize channel parameters */
for (i = 0; i < chan_num; i++) {
irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
- ret = mmp_tdma_chan_init(tdev, i, irq, type);
+ ret = mmp_tdma_chan_init(tdev, i, irq, type, pool);
if (ret)
return ret;
}
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 000000000000..3258e484e4f6
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL 4
+
+#define REG_OFF_ADDRESS_SOURCE 0
+#define REG_OFF_ADDRESS_DEST 4
+#define REG_OFF_CYCLES 8
+#define REG_OFF_CTRL 12
+#define REG_OFF_CHAN_SIZE 16
+
+#define APB_DMA_ENABLE BIT(0)
+#define APB_DMA_FIN_INT_STS BIT(1)
+#define APB_DMA_FIN_INT_EN BIT(2)
+#define APB_DMA_BURST_MODE BIT(3)
+#define APB_DMA_ERR_INT_STS BIT(4)
+#define APB_DMA_ERR_INT_EN BIT(5)
+
+/*
+ * Unset: APB
+ * Set: AHB
+ */
+#define APB_DMA_SOURCE_SELECT 0x40
+#define APB_DMA_DEST_SELECT 0x80
+
+#define APB_DMA_SOURCE 0x100
+#define APB_DMA_DEST 0x1000
+
+#define APB_DMA_SOURCE_MASK 0x700
+#define APB_DMA_DEST_MASK 0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4 (Burst=1)
+ * 010: +2 (Burst=0), +8 (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4 (Burst=1)
+ * 110: -2 (Burst=0), -8 (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0 0
+#define APB_DMA_SOURCE_INC_1_4 0x100
+#define APB_DMA_SOURCE_INC_2_8 0x200
+#define APB_DMA_SOURCE_INC_4_16 0x300
+#define APB_DMA_SOURCE_DEC_1_4 0x500
+#define APB_DMA_SOURCE_DEC_2_8 0x600
+#define APB_DMA_SOURCE_DEC_4_16 0x700
+#define APB_DMA_DEST_INC_0 0
+#define APB_DMA_DEST_INC_1_4 0x1000
+#define APB_DMA_DEST_INC_2_8 0x2000
+#define APB_DMA_DEST_INC_4_16 0x3000
+#define APB_DMA_DEST_DEC_1_4 0x5000
+#define APB_DMA_DEST_DEC_2_8 0x6000
+#define APB_DMA_DEST_DEC_4_16 0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0: No request / Grant signal
+ * 1-15: Request / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO 0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
+#define APB_DMA_DEST_REQ_NO 0x10000
+#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
+
+#define APB_DMA_DATA_WIDTH 0x100000
+#define APB_DMA_DATA_WIDTH_MASK 0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4 0
+#define APB_DMA_DATA_WIDTH_2 0x100000
+#define APB_DMA_DATA_WIDTH_1 0x200000
+
+#define APB_DMA_CYCLES_MASK 0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8 0x00
+#define MOXART_DMA_DATA_TYPE_S16 0x01
+#define MOXART_DMA_DATA_TYPE_S32 0x02
+
+struct moxart_sg {
+ dma_addr_t addr;
+ uint32_t len;
+};
+
+struct moxart_desc {
+ enum dma_transfer_direction dma_dir;
+ dma_addr_t dev_addr;
+ unsigned int sglen;
+ unsigned int dma_cycles;
+ struct virt_dma_desc vd;
+ uint8_t es;
+ struct moxart_sg sg[0];
+};
+
+struct moxart_chan {
+ struct virt_dma_chan vc;
+
+ void __iomem *base;
+ struct moxart_desc *desc;
+
+ struct dma_slave_config cfg;
+
+ bool allocated;
+ bool error;
+ int ch_num;
+ unsigned int line_reqno;
+ unsigned int sgidx;
+};
+
+struct moxart_dmadev {
+ struct dma_device dma_slave;
+ struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+ struct moxart_dmadev *mdc;
+ struct of_phandle_args *dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+ [MOXART_DMA_DATA_TYPE_S8] = 1,
+ [MOXART_DMA_DATA_TYPE_S16] = 2,
+ [MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+ struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+ u32 ctrl;
+
+ dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+ spin_lock_irqsave(&ch->vc.lock, flags);
+
+ if (ch->desc)
+ ch->desc = NULL;
+
+ ctrl = readl(ch->base + REG_OFF_CTRL);
+ ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, ch->base + REG_OFF_CTRL);
+
+ vchan_get_all_descriptors(&ch->vc, &head);
+ spin_unlock_irqrestore(&ch->vc.lock, flags);
+ vchan_dma_desc_free_list(&ch->vc, &head);
+
+ return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ u32 ctrl;
+
+ ch->cfg = *cfg;
+
+ ctrl = readl(ch->base + REG_OFF_CTRL);
+ ctrl |= APB_DMA_BURST_MODE;
+ ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+ ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+ switch (ch->cfg.src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl |= APB_DMA_DATA_WIDTH_1;
+ if (ch->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_1_4;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_1_4;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl |= APB_DMA_DATA_WIDTH_2;
+ if (ch->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_2_8;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_2_8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ ctrl &= ~APB_DMA_DATA_WIDTH;
+ if (ch->cfg.direction != DMA_MEM_TO_DEV)
+ ctrl |= APB_DMA_DEST_INC_4_16;
+ else
+ ctrl |= APB_DMA_SOURCE_INC_4_16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+ ctrl &= ~APB_DMA_DEST_SELECT;
+ ctrl |= APB_DMA_SOURCE_SELECT;
+ ctrl |= (ch->line_reqno << 16 &
+ APB_DMA_DEST_REQ_NO_MASK);
+ } else {
+ ctrl |= APB_DMA_DEST_SELECT;
+ ctrl &= ~APB_DMA_SOURCE_SELECT;
+ ctrl |= (ch->line_reqno << 24 &
+ APB_DMA_SOURCE_REQ_NO_MASK);
+ }
+
+ writel(ctrl, ch->base + REG_OFF_CTRL);
+
+ return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case DMA_PAUSE:
+ case DMA_RESUME:
+ return -EINVAL;
+ case DMA_TERMINATE_ALL:
+ moxart_terminate_all(chan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ struct moxart_desc *d;
+ enum dma_slave_buswidth dev_width;
+ dma_addr_t dev_addr;
+ struct scatterlist *sgent;
+ unsigned int es;
+ unsigned int i;
+
+ if (!is_slave_direction(dir)) {
+ dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+ __func__);
+ return NULL;
+ }
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = ch->cfg.src_addr;
+ dev_width = ch->cfg.src_addr_width;
+ } else {
+ dev_addr = ch->cfg.dst_addr;
+ dev_width = ch->cfg.dst_addr_width;
+ }
+
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ es = MOXART_DMA_DATA_TYPE_S8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ es = MOXART_DMA_DATA_TYPE_S16;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ es = MOXART_DMA_DATA_TYPE_S32;
+ break;
+ default:
+ dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+ __func__, dev_width);
+ return NULL;
+ }
+
+ d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+ if (!d)
+ return NULL;
+
+ d->dma_dir = dir;
+ d->dev_addr = dev_addr;
+ d->es = es;
+
+ for_each_sg(sgl, sgent, sg_len, i) {
+ d->sg[i].addr = sg_dma_address(sgent);
+ d->sg[i].len = sg_dma_len(sgent);
+ }
+
+ d->sglen = sg_len;
+
+ ch->error = 0;
+
+ return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct moxart_dmadev *mdc = ofdma->of_dma_data;
+ struct dma_chan *chan;
+ struct moxart_chan *ch;
+
+ chan = dma_get_any_slave_channel(&mdc->dma_slave);
+ if (!chan)
+ return NULL;
+
+ ch = to_moxart_dma_chan(chan);
+ ch->line_reqno = dma_spec->args[0];
+
+ return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+ dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+ __func__, ch->ch_num);
+ ch->allocated = 1;
+
+ return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+ vchan_free_chan_resources(&ch->vc);
+
+ dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+ __func__, ch->ch_num);
+ ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+ dma_addr_t dst_addr)
+{
+ writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+ writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+ struct moxart_desc *d = ch->desc;
+ unsigned int sglen_div = es_bytes[d->es];
+
+ d->dma_cycles = len >> sglen_div;
+
+ /*
+ * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+ * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+ */
+ writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+ dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+ __func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+ u32 ctrl;
+
+ ctrl = readl(ch->base + REG_OFF_CTRL);
+ ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+ writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+ struct moxart_desc *d = ch->desc;
+ struct moxart_sg *sg = ch->desc->sg + idx;
+
+ if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+ moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+ else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+ moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+ moxart_set_transfer_params(ch, sg->len);
+
+ moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&ch->vc);
+
+ if (!vd) {
+ ch->desc = NULL;
+ return;
+ }
+
+ list_del(&vd->node);
+
+ ch->desc = to_moxart_dma_desc(&vd->tx);
+ ch->sgidx = 0;
+
+ moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->vc.lock, flags);
+ if (vchan_issue_pending(&ch->vc) && !ch->desc)
+ moxart_dma_start_desc(chan);
+ spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d,
+ unsigned int completed_sgs)
+{
+ unsigned int i;
+ size_t size;
+
+ for (size = i = completed_sgs; i < d->sglen; i++)
+ size += d->sg[i].len;
+
+ return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+ size_t size;
+ unsigned int completed_cycles, cycles;
+
+ size = moxart_dma_desc_size(ch->desc, ch->sgidx);
+ cycles = readl(ch->base + REG_OFF_CYCLES);
+ completed_cycles = (ch->desc->dma_cycles - cycles);
+ size -= completed_cycles << es_bytes[ch->desc->es];
+
+ dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+ return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct moxart_chan *ch = to_moxart_dma_chan(chan);
+ struct virt_dma_desc *vd;
+ struct moxart_desc *d;
+ enum dma_status ret;
+ unsigned long flags;
+
+ /*
+ * dma_cookie_status() assigns initial residue value.
+ */
+ ret = dma_cookie_status(chan, cookie, txstate);
+
+ spin_lock_irqsave(&ch->vc.lock, flags);
+ vd = vchan_find_desc(&ch->vc, cookie);
+ if (vd) {
+ d = to_moxart_dma_desc(&vd->tx);
+ txstate->residue = moxart_dma_desc_size(d, 0);
+ } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+ txstate->residue = moxart_dma_desc_size_in_flight(ch);
+ }
+ spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+ if (ch->error)
+ return DMA_ERROR;
+
+ return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+ dma->device_prep_slave_sg = moxart_prep_slave_sg;
+ dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
+ dma->device_free_chan_resources = moxart_free_chan_resources;
+ dma->device_issue_pending = moxart_issue_pending;
+ dma->device_tx_status = moxart_tx_status;
+ dma->device_control = moxart_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+ struct moxart_dmadev *mc = devid;
+ struct moxart_chan *ch = &mc->slave_chans[0];
+ unsigned int i;
+ unsigned long flags;
+ u32 ctrl;
+
+ dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+ if (!ch->allocated)
+ continue;
+
+ ctrl = readl(ch->base + REG_OFF_CTRL);
+
+ dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+ __func__, ch, ch->base, ctrl);
+
+ if (ctrl & APB_DMA_FIN_INT_STS) {
+ ctrl &= ~APB_DMA_FIN_INT_STS;
+ if (ch->desc) {
+ spin_lock_irqsave(&ch->vc.lock, flags);
+ if (++ch->sgidx < ch->desc->sglen) {
+ moxart_dma_start_sg(ch, ch->sgidx);
+ } else {
+ vchan_cookie_complete(&ch->desc->vd);
+ moxart_dma_start_desc(&ch->vc.chan);
+ }
+ spin_unlock_irqrestore(&ch->vc.lock, flags);
+ }
+ }
+
+ if (ctrl & APB_DMA_ERR_INT_STS) {
+ ctrl &= ~APB_DMA_ERR_INT_STS;
+ ch->error = 1;
+ }
+
+ writel(ctrl, ch->base + REG_OFF_CTRL);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *dma_base_addr;
+ int ret, i;
+ unsigned int irq;
+ struct moxart_chan *ch;
+ struct moxart_dmadev *mdc;
+
+ mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+ if (!mdc) {
+ dev_err(dev, "can't allocate DMA container\n");
+ return -ENOMEM;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq == NO_IRQ) {
+ dev_err(dev, "no IRQ resource\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dma_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dma_base_addr))
+ return PTR_ERR(dma_base_addr);
+
+ dma_cap_zero(mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+ dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+ moxart_dma_init(&mdc->dma_slave, dev);
+
+ ch = &mdc->slave_chans[0];
+ for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+ ch->ch_num = i;
+ ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+ ch->allocated = 0;
+
+ ch->vc.desc_free = moxart_dma_desc_free;
+ vchan_init(&ch->vc, &mdc->dma_slave);
+
+ dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+ __func__, i, ch->ch_num, ch->base);
+ }
+
+ platform_set_drvdata(pdev, mdc);
+
+ ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+ "moxart-dma-engine", mdc);
+ if (ret) {
+ dev_err(dev, "devm_request_irq failed\n");
+ return ret;
+ }
+
+ ret = dma_async_device_register(&mdc->dma_slave);
+ if (ret) {
+ dev_err(dev, "dma_async_device_register failed\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+ if (ret) {
+ dev_err(dev, "of_dma_controller_register failed\n");
+ dma_async_device_unregister(&mdc->dma_slave);
+ return ret;
+ }
+
+ dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+ return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&m->dma_slave);
+
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+ { .compatible = "moxa,moxart-dma" },
+ { }
+};
+
+static struct platform_driver moxart_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_dma_match,
+ },
+};
+
+static int moxart_init(void)
+{
+ return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+ platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 53fb0c8365b0..766b68ed505c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -497,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_can_chain(grp_start))
goto submit_done;
- dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
- old_chain_tail->async_tx.phys);
+ dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
+ &old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
@@ -527,7 +527,8 @@ submit_done:
/* returns the number of allocated descriptors */
static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
{
- char *hw_desc;
+ void *virt_desc;
+ dma_addr_t dma_desc;
int idx;
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *slot = NULL;
@@ -542,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
" %d descriptor slots", idx);
break;
}
- hw_desc = (char *) mv_chan->dma_desc_pool_virt;
- slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ virt_desc = mv_chan->dma_desc_pool_virt;
+ slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->tx_list);
- hw_desc = (char *) mv_chan->dma_desc_pool;
- slot->async_tx.phys =
- (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ dma_desc = mv_chan->dma_desc_pool;
+ slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
spin_lock_bh(&mv_chan->lock);
@@ -582,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
int slot_cnt;
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s dest: %x src %x len: %u flags: %ld\n",
- __func__, dest, src, len, flags);
+ "%s dest: %pad src %pad len: %u flags: %ld\n",
+ __func__, &dest, &src, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
@@ -626,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s src_cnt: %d len: dest %x %u flags: %ld\n",
- __func__, src_cnt, len, dest, flags);
+ "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
+ __func__, src_cnt, len, &dest, flags);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 2f66cf4e54fe..362e7c49f2e1 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -190,7 +190,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
- dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+ dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
return omap_request_dma(c->dma_sig, "DMA engine",
omap_dma_callback, c, &c->dma_ch);
@@ -203,7 +203,7 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(&c->vc);
omap_free_dma(c->dma_ch);
- dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+ dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
}
static size_t omap_dma_sg_size(struct omap_sg *sg)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index c90edecee463..73fa9b7a10ab 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -543,7 +543,9 @@ struct dma_pl330_chan {
/* DMA-Engine Channel */
struct dma_chan chan;
- /* List of to be xfered descriptors */
+ /* List of submitted descriptors */
+ struct list_head submitted_list;
+ /* List of issued descriptors */
struct list_head work_list;
/* List of completed descriptors */
struct list_head completed_list;
@@ -578,12 +580,16 @@ struct dma_pl330_dmac {
/* DMA-Engine Device */
struct dma_device ddma;
+ /* Holds info about sg limitations */
+ struct device_dma_parameters dma_parms;
+
/* Pool of descriptors available for the DMAC's channels */
struct list_head desc_pool;
/* To protect desc_pool manipulation */
spinlock_t pool_lock;
/* Peripheral channels connected to this DMAC */
+ unsigned int num_peripherals;
struct dma_pl330_chan *peripherals; /* keep at end */
};
@@ -606,11 +612,6 @@ struct dma_pl330_desc {
struct dma_pl330_chan *pchan;
};
-struct dma_pl330_filter_args {
- struct dma_pl330_dmac *pdmac;
- unsigned int chan_id;
-};
-
static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
{
if (r && r->xfer_cb)
@@ -2298,16 +2299,6 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
-static bool pl330_dt_filter(struct dma_chan *chan, void *param)
-{
- struct dma_pl330_filter_args *fargs = param;
-
- if (chan->device != &fargs->pdmac->ddma)
- return false;
-
- return (chan->chan_id == fargs->chan_id);
-}
-
bool pl330_filter(struct dma_chan *chan, void *param)
{
u8 *peri_id;
@@ -2325,23 +2316,16 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
{
int count = dma_spec->args_count;
struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
- struct dma_pl330_filter_args fargs;
- dma_cap_mask_t cap;
-
- if (!pdmac)
- return NULL;
+ unsigned int chan_id;
if (count != 1)
return NULL;
- fargs.pdmac = pdmac;
- fargs.chan_id = dma_spec->args[0];
-
- dma_cap_zero(cap);
- dma_cap_set(DMA_SLAVE, cap);
- dma_cap_set(DMA_CYCLIC, cap);
+ chan_id = dma_spec->args[0];
+ if (chan_id >= pdmac->num_peripherals)
+ return NULL;
- return dma_request_channel(cap, pl330_dt_filter, &fargs);
+ return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan);
}
static int pl330_alloc_chan_resources(struct dma_chan *chan)
@@ -2385,6 +2369,11 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
/* Mark all desc done */
+ list_for_each_entry(desc, &pch->submitted_list, node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
+ }
+
list_for_each_entry(desc, &pch->work_list , node) {
desc->status = FREE;
dma_cookie_complete(&desc->txd);
@@ -2395,6 +2384,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
dma_cookie_complete(&desc->txd);
}
+ list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
@@ -2453,7 +2443,14 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
static void pl330_issue_pending(struct dma_chan *chan)
{
- pl330_tasklet((unsigned long) to_pchan(chan));
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+ list_splice_tail_init(&pch->submitted_list, &pch->work_list);
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ pl330_tasklet((unsigned long)pch);
}
/*
@@ -2480,11 +2477,11 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
dma_cookie_assign(&desc->txd);
- list_move_tail(&desc->node, &pch->work_list);
+ list_move_tail(&desc->node, &pch->submitted_list);
}
cookie = dma_cookie_assign(&last->txd);
- list_add_tail(&last->node, &pch->work_list);
+ list_add_tail(&last->node, &pch->submitted_list);
spin_unlock_irqrestore(&pch->lock, flags);
return cookie;
@@ -2960,6 +2957,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
else
num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
+ pdmac->num_peripherals = num_chan;
+
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
if (!pdmac->peripherals) {
ret = -ENOMEM;
@@ -2974,6 +2973,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
else
pch->chan.private = adev->dev.of_node;
+ INIT_LIST_HEAD(&pch->submitted_list);
INIT_LIST_HEAD(&pch->work_list);
INIT_LIST_HEAD(&pch->completed_list);
spin_lock_init(&pch->lock);
@@ -3021,6 +3021,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
"unable to register DMA to the generic DT DMA helpers\n");
}
}
+
+ adev->dev.dma_parms = &pdmac->dma_parms;
+
/*
* This is the limit for transfers with a buswidth of 1, larger
* buswidths will have larger limits.
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 8bba298535b0..ce7a8d7564ba 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4114,6 +4114,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
regs = ioremap(res.start, resource_size(&res));
if (!regs) {
dev_err(&ofdev->dev, "failed to ioremap regs!\n");
+ ret = -ENOMEM;
goto err_regs_alloc;
}
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 6aec3ad814d3..d4d3a3109b16 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -640,6 +640,25 @@ bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
}
EXPORT_SYMBOL(sirfsoc_dma_filter_id);
+#define SIRFSOC_DMA_BUSWIDTHS \
+ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = true;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
static int sirfsoc_dma_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
@@ -712,6 +731,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
dma->device_tx_status = sirfsoc_dma_tx_status;
dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
+ dma->device_slave_caps = sirfsoc_dma_device_slave_caps;
INIT_LIST_HEAD(&dma->channels);
dma_cap_set(DMA_SLAVE, dma->cap_mask);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index b8c031b7de4e..bf18c786ed40 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1641,6 +1641,7 @@ static void dma_tasklet(unsigned long data)
struct d40_chan *d40c = (struct d40_chan *) data;
struct d40_desc *d40d;
unsigned long flags;
+ bool callback_active;
dma_async_tx_callback callback;
void *callback_param;
@@ -1668,6 +1669,7 @@ static void dma_tasklet(unsigned long data)
}
/* Callback to client */
+ callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
callback = d40d->txd.callback;
callback_param = d40d->txd.callback_param;
@@ -1690,7 +1692,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&d40c->lock, flags);
- if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
+ if (callback_active && callback)
callback(callback_param);
return;
@@ -2409,6 +2411,7 @@ static void d40_set_prio_realtime(struct d40_chan *d40c)
#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
+#define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1)
static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
@@ -2446,6 +2449,9 @@ static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
cfg.use_fixed_channel = true;
}
+ if (D40_DT_FLAGS_HIGH_PRIO(flags))
+ cfg.high_priority = true;
+
return dma_request_channel(cap, stedma40_filter, &cfg);
}
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 73654e33f13b..03ad64ecaaf0 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1,7 +1,7 @@
/*
* DMA driver for Nvidia's Tegra20 APB DMA controller.
*
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -29,11 +29,12 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
-#include <linux/clk/tegra.h>
#include "dmaengine.h"
@@ -99,6 +100,11 @@
#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
+/* Tegra148 specific registers */
+#define TEGRA_APBDMA_CHAN_WCOUNT 0x20
+
+#define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
+
/*
* If any burst is in flight and DMA paused then this is the time to complete
* on-flight burst and update DMA status register.
@@ -108,21 +114,22 @@
/* Channel base address offset from APBDMA base address */
#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
-/* DMA channel register space size */
-#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
-
struct tegra_dma;
/*
* tegra_dma_chip_data Tegra chip specific DMA data
* @nr_channels: Number of channels available in the controller.
+ * @channel_reg_size: Channel register size/stride.
* @max_dma_count: Maximum DMA transfer count supported by DMA controller.
* @support_channel_pause: Support channel wise pause of dma.
+ * @support_separate_wcount_reg: Support separate word count register.
*/
struct tegra_dma_chip_data {
int nr_channels;
+ int channel_reg_size;
int max_dma_count;
bool support_channel_pause;
+ bool support_separate_wcount_reg;
};
/* DMA channel registers */
@@ -132,6 +139,7 @@ struct tegra_dma_channel_regs {
unsigned long apb_ptr;
unsigned long ahb_seq;
unsigned long apb_seq;
+ unsigned long wcount;
};
/*
@@ -199,6 +207,7 @@ struct tegra_dma_channel {
void *callback_param;
/* Channel-slave specific configuration */
+ unsigned int slave_id;
struct dma_slave_config dma_sconfig;
struct tegra_dma_channel_regs channel_reg;
};
@@ -208,6 +217,7 @@ struct tegra_dma {
struct dma_device dma_dev;
struct device *dev;
struct clk *dma_clk;
+ struct reset_control *rst;
spinlock_t global_lock;
void __iomem *base_addr;
const struct tegra_dma_chip_data *chip_data;
@@ -339,6 +349,8 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
}
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
+ if (!tdc->slave_id)
+ tdc->slave_id = sconfig->slave_id;
tdc->config_init = true;
return 0;
}
@@ -421,6 +433,8 @@ static void tegra_dma_start(struct tegra_dma_channel *tdc,
tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
+ if (tdc->tdma->chip_data->support_separate_wcount_reg)
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
/* Start DMA */
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
@@ -460,6 +474,9 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
/* Safe to program new configuration */
tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
+ if (tdc->tdma->chip_data->support_separate_wcount_reg)
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
+ nsg_req->ch_regs.wcount);
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
nsg_req->configured = true;
@@ -713,6 +730,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
struct tegra_dma_desc *dma_desc;
unsigned long flags;
unsigned long status;
+ unsigned long wcount;
bool was_busy;
spin_lock_irqsave(&tdc->lock, flags);
@@ -733,6 +751,10 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
tdc->isr_handler(tdc, true);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
}
+ if (tdc->tdma->chip_data->support_separate_wcount_reg)
+ wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
+ else
+ wcount = status;
was_busy = tdc->busy;
tegra_dma_stop(tdc);
@@ -741,7 +763,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
sgreq = list_first_entry(&tdc->pending_sg_req,
typeof(*sgreq), node);
sgreq->dma_desc->bytes_transferred +=
- get_current_xferred_count(tdc, sgreq, status);
+ get_current_xferred_count(tdc, sgreq, wcount);
}
tegra_dma_resume(tdc);
@@ -903,6 +925,17 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
return -EINVAL;
}
+static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
+ struct tegra_dma_channel_regs *ch_regs, u32 len)
+{
+ u32 len_field = (len - 4) & 0xFFFC;
+
+ if (tdc->tdma->chip_data->support_separate_wcount_reg)
+ ch_regs->wcount = len_field;
+ else
+ ch_regs->csr |= len_field;
+}
+
static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction direction, unsigned long flags,
@@ -941,7 +974,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
- csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
@@ -986,7 +1019,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
sg_req->ch_regs.apb_ptr = apb_ptr;
sg_req->ch_regs.ahb_ptr = mem;
- sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+ sg_req->ch_regs.csr = csr;
+ tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
sg_req->ch_regs.apb_seq = apb_seq;
sg_req->ch_regs.ahb_seq = ahb_seq;
sg_req->configured = false;
@@ -1085,7 +1119,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
csr |= TEGRA_APBDMA_CSR_FLOW;
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
- csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
@@ -1115,7 +1149,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
sg_req->ch_regs.apb_ptr = apb_ptr;
sg_req->ch_regs.ahb_ptr = mem;
- sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+ sg_req->ch_regs.csr = csr;
+ tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
sg_req->ch_regs.apb_seq = apb_seq;
sg_req->ch_regs.ahb_seq = ahb_seq;
sg_req->configured = false;
@@ -1205,32 +1240,69 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
kfree(sg_req);
}
clk_disable_unprepare(tdma->dma_clk);
+
+ tdc->slave_id = 0;
+}
+
+static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct tegra_dma *tdma = ofdma->of_dma_data;
+ struct dma_chan *chan;
+ struct tegra_dma_channel *tdc;
+
+ chan = dma_get_any_slave_channel(&tdma->dma_dev);
+ if (!chan)
+ return NULL;
+
+ tdc = to_tegra_dma_chan(chan);
+ tdc->slave_id = dma_spec->args[0];
+
+ return chan;
}
/* Tegra20 specific DMA controller information */
static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
.nr_channels = 16,
+ .channel_reg_size = 0x20,
.max_dma_count = 1024UL * 64,
.support_channel_pause = false,
+ .support_separate_wcount_reg = false,
};
/* Tegra30 specific DMA controller information */
static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
.nr_channels = 32,
+ .channel_reg_size = 0x20,
.max_dma_count = 1024UL * 64,
.support_channel_pause = false,
+ .support_separate_wcount_reg = false,
};
/* Tegra114 specific DMA controller information */
static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
.nr_channels = 32,
+ .channel_reg_size = 0x20,
+ .max_dma_count = 1024UL * 64,
+ .support_channel_pause = true,
+ .support_separate_wcount_reg = false,
+};
+
+/* Tegra148 specific DMA controller information */
+static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
+ .nr_channels = 32,
+ .channel_reg_size = 0x40,
.max_dma_count = 1024UL * 64,
.support_channel_pause = true,
+ .support_separate_wcount_reg = true,
};
static const struct of_device_id tegra_dma_of_match[] = {
{
+ .compatible = "nvidia,tegra148-apbdma",
+ .data = &tegra148_dma_chip_data,
+ }, {
.compatible = "nvidia,tegra114-apbdma",
.data = &tegra114_dma_chip_data,
}, {
@@ -1282,6 +1354,12 @@ static int tegra_dma_probe(struct platform_device *pdev)
return PTR_ERR(tdma->dma_clk);
}
+ tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
+ if (IS_ERR(tdma->rst)) {
+ dev_err(&pdev->dev, "Error: Missing reset\n");
+ return PTR_ERR(tdma->rst);
+ }
+
spin_lock_init(&tdma->global_lock);
pm_runtime_enable(&pdev->dev);
@@ -1302,9 +1380,9 @@ static int tegra_dma_probe(struct platform_device *pdev)
}
/* Reset DMA controller */
- tegra_periph_reset_assert(tdma->dma_clk);
+ reset_control_assert(tdma->rst);
udelay(2);
- tegra_periph_reset_deassert(tdma->dma_clk);
+ reset_control_deassert(tdma->rst);
/* Enable global DMA registers */
tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
@@ -1318,7 +1396,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
struct tegra_dma_channel *tdc = &tdma->channels[i];
tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
- i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
+ i * cdata->channel_reg_size;
res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
if (!res) {
@@ -1376,10 +1454,20 @@ static int tegra_dma_probe(struct platform_device *pdev)
goto err_irq;
}
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ tegra_dma_of_xlate, tdma);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Tegra20 APB DMA OF registration failed %d\n", ret);
+ goto err_unregister_dma_dev;
+ }
+
dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
cdata->nr_channels);
return 0;
+err_unregister_dma_dev:
+ dma_async_device_unregister(&tdma->dma_dev);
err_irq:
while (--i >= 0) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 85c19d63f9fb..181b95267866 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -84,10 +84,12 @@ static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+ dma_cookie_t cookie;
+ cookie = vd->tx.cookie;
dma_cookie_complete(&vd->tx);
dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
- vd, vd->tx.cookie);
+ vd, cookie);
list_add_tail(&vd->node, &vc->desc_completed);
tasklet_schedule(&vc->task);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 98e14ee4833c..f8bf00010d45 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1239,9 +1239,17 @@ static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
if (num_dcts_intlv == 2) {
select = (sys_addr >> 8) & 0x3;
channel = select ? 0x3 : 0;
- } else if (num_dcts_intlv == 4)
- channel = (sys_addr >> 8) & 0x7;
-
+ } else if (num_dcts_intlv == 4) {
+ u8 intlv_addr = dct_sel_interleave_addr(pvt);
+ switch (intlv_addr) {
+ case 0x4:
+ channel = (sys_addr >> 8) & 0x3;
+ break;
+ case 0x5:
+ channel = (sys_addr >> 9) & 0x3;
+ break;
+ }
+ }
return channel;
}
@@ -1799,6 +1807,17 @@ static struct amd64_family_type family_types[] = {
.read_dct_pci_cfg = f10_read_dct_pci_cfg,
}
},
+ [F16_M30H_CPUS] = {
+ .ctl_name = "F16h_M30h",
+ .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
+ .f3_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F3,
+ .ops = {
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f16_dbam_to_chip_select,
+ .read_dct_pci_cfg = f10_read_dct_pci_cfg,
+ }
+ },
};
/*
@@ -2578,6 +2597,11 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
break;
case 0x16:
+ if (pvt->model == 0x30) {
+ fam_type = &family_types[F16_M30H_CPUS];
+ pvt->ops = &family_types[F16_M30H_CPUS].ops;
+ break;
+ }
fam_type = &family_types[F16_CPUS];
pvt->ops = &family_types[F16_CPUS].ops;
break;
@@ -2830,6 +2854,14 @@ static const struct pci_device_id amd64_pci_table[] = {
.class = 0,
.class_mask = 0,
},
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ },
{0, }
};
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 6dc1fcc25afb..d903e0c21144 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -168,6 +168,8 @@
#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
#define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531
#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
+#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
+#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
/*
* Function 1 - Address Map
@@ -300,6 +302,7 @@ enum amd_families {
F15_CPUS,
F15_M30H_CPUS,
F16_CPUS,
+ F16_M30H_CPUS,
NUM_FAMILIES,
};
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index ddd890052ce2..2b63f7c2d6d2 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -350,6 +350,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct amd8111_dev_info *dev_info = &amd8111_devices[id->driver_data];
+ int ret = -ENODEV;
dev_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
dev_info->err_dev, NULL);
@@ -359,16 +360,15 @@ static int amd8111_dev_probe(struct pci_dev *dev,
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, dev_info->err_dev,
dev_info->ctl_name);
- return -ENODEV;
+ goto err;
}
if (pci_enable_device(dev_info->dev)) {
- pci_dev_put(dev_info->dev);
printk(KERN_ERR "failed to enable:"
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, dev_info->err_dev,
dev_info->ctl_name);
- return -ENODEV;
+ goto err_dev_put;
}
/*
@@ -381,8 +381,10 @@ static int amd8111_dev_probe(struct pci_dev *dev,
edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
NULL, 0, 0,
NULL, 0, dev_info->edac_idx);
- if (!dev_info->edac_dev)
- return -ENOMEM;
+ if (!dev_info->edac_dev) {
+ ret = -ENOMEM;
+ goto err_dev_put;
+ }
dev_info->edac_dev->pvt_info = dev_info;
dev_info->edac_dev->dev = &dev_info->dev->dev;
@@ -399,8 +401,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
if (edac_device_add_device(dev_info->edac_dev) > 0) {
printk(KERN_ERR "failed to add edac_dev for %s\n",
dev_info->ctl_name);
- edac_device_free_ctl_info(dev_info->edac_dev);
- return -ENODEV;
+ goto err_edac_free_ctl;
}
printk(KERN_INFO "added one edac_dev on AMD8111 "
@@ -409,6 +410,13 @@ static int amd8111_dev_probe(struct pci_dev *dev,
dev_info->ctl_name);
return 0;
+
+err_edac_free_ctl:
+ edac_device_free_ctl_info(dev_info->edac_dev);
+err_dev_put:
+ pci_dev_put(dev_info->dev);
+err:
+ return ret;
}
static void amd8111_dev_remove(struct pci_dev *dev)
@@ -437,6 +445,7 @@ static int amd8111_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct amd8111_pci_info *pci_info = &amd8111_pcis[id->driver_data];
+ int ret = -ENODEV;
pci_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
pci_info->err_dev, NULL);
@@ -446,16 +455,15 @@ static int amd8111_pci_probe(struct pci_dev *dev,
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, pci_info->err_dev,
pci_info->ctl_name);
- return -ENODEV;
+ goto err;
}
if (pci_enable_device(pci_info->dev)) {
- pci_dev_put(pci_info->dev);
printk(KERN_ERR "failed to enable:"
"vendor %x, device %x, name %s\n",
PCI_VENDOR_ID_AMD, pci_info->err_dev,
pci_info->ctl_name);
- return -ENODEV;
+ goto err_dev_put;
}
/*
@@ -465,8 +473,10 @@ static int amd8111_pci_probe(struct pci_dev *dev,
*/
pci_info->edac_idx = edac_pci_alloc_index();
pci_info->edac_dev = edac_pci_alloc_ctl_info(0, pci_info->ctl_name);
- if (!pci_info->edac_dev)
- return -ENOMEM;
+ if (!pci_info->edac_dev) {
+ ret = -ENOMEM;
+ goto err_dev_put;
+ }
pci_info->edac_dev->pvt_info = pci_info;
pci_info->edac_dev->dev = &pci_info->dev->dev;
@@ -483,8 +493,7 @@ static int amd8111_pci_probe(struct pci_dev *dev,
if (edac_pci_add_device(pci_info->edac_dev, pci_info->edac_idx) > 0) {
printk(KERN_ERR "failed to add edac_pci for %s\n",
pci_info->ctl_name);
- edac_pci_free_ctl_info(pci_info->edac_dev);
- return -ENODEV;
+ goto err_edac_free_ctl;
}
printk(KERN_INFO "added one edac_pci on AMD8111 "
@@ -493,6 +502,13 @@ static int amd8111_pci_probe(struct pci_dev *dev,
pci_info->ctl_name);
return 0;
+
+err_edac_free_ctl:
+ edac_pci_free_ctl_info(pci_info->edac_dev);
+err_dev_put:
+ pci_dev_put(pci_info->dev);
+err:
+ return ret;
}
static void amd8111_pci_remove(struct pci_dev *dev)
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 92d54fa65f93..b2d71388172b 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -209,7 +209,6 @@ enum e752x_chips {
*/
struct e752x_pvt {
- struct pci_dev *bridge_ck;
struct pci_dev *dev_d0f0;
struct pci_dev *dev_d0f1;
u32 tolm;
@@ -891,7 +890,7 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
info->buf_ferr);
if (info->dram_ferr)
- pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
+ pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_FERR,
info->dram_ferr, info->dram_ferr);
pci_write_config_dword(dev, E752X_FERR_GLOBAL,
@@ -936,7 +935,7 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
info->buf_nerr);
if (info->dram_nerr)
- pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
+ pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_NERR,
info->dram_nerr, info->dram_nerr);
pci_write_config_dword(dev, E752X_NERR_GLOBAL,
@@ -1177,38 +1176,33 @@ static void e752x_init_mem_map_table(struct pci_dev *pdev,
static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
struct e752x_pvt *pvt)
{
- struct pci_dev *dev;
-
- pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
- pvt->dev_info->err_dev, pvt->bridge_ck);
+ pvt->dev_d0f1 = pci_get_device(PCI_VENDOR_ID_INTEL,
+ pvt->dev_info->err_dev, NULL);
- if (pvt->bridge_ck == NULL) {
- pvt->bridge_ck = pci_scan_single_device(pdev->bus,
+ if (pvt->dev_d0f1 == NULL) {
+ pvt->dev_d0f1 = pci_scan_single_device(pdev->bus,
PCI_DEVFN(0, 1));
- pci_dev_get(pvt->bridge_ck);
+ pci_dev_get(pvt->dev_d0f1);
}
- if (pvt->bridge_ck == NULL) {
+ if (pvt->dev_d0f1 == NULL) {
e752x_printk(KERN_ERR, "error reporting device not found:"
"vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
return 1;
}
- dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ pvt->dev_d0f0 = pci_get_device(PCI_VENDOR_ID_INTEL,
e752x_devs[dev_idx].ctl_dev,
NULL);
- if (dev == NULL)
+ if (pvt->dev_d0f0 == NULL)
goto fail;
- pvt->dev_d0f0 = dev;
- pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
-
return 0;
fail:
- pci_dev_put(pvt->bridge_ck);
+ pci_dev_put(pvt->dev_d0f1);
return 1;
}
@@ -1385,7 +1379,6 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
fail:
pci_dev_put(pvt->dev_d0f0);
pci_dev_put(pvt->dev_d0f1);
- pci_dev_put(pvt->bridge_ck);
edac_mc_free(mci);
return -ENODEV;
@@ -1419,7 +1412,6 @@ static void e752x_remove_one(struct pci_dev *pdev)
pvt = (struct e752x_pvt *)mci->pvt_info;
pci_dev_put(pvt->dev_d0f0);
pci_dev_put(pvt->dev_d0f1);
- pci_dev_put(pvt->bridge_ck);
edac_mc_free(mci);
}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index e8c9ef03495b..33edd6766344 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -559,7 +559,8 @@ static void edac_mc_workq_function(struct work_struct *work_req)
*
* called with the mem_ctls_mutex held
*/
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+ bool init)
{
edac_dbg(0, "\n");
@@ -567,7 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
if (mci->op_state != OP_RUNNING_POLL)
return;
- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+ if (init)
+ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+
mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
}
@@ -601,7 +604,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
* user space has updated our poll period value, need to
* reset our workq delays
*/
-void edac_mc_reset_delay_period(int value)
+void edac_mc_reset_delay_period(unsigned long value)
{
struct mem_ctl_info *mci;
struct list_head *item;
@@ -611,7 +614,7 @@ void edac_mc_reset_delay_period(int value)
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
- edac_mc_workq_setup(mci, (unsigned long) value);
+ edac_mc_workq_setup(mci, value, false);
}
mutex_unlock(&mem_ctls_mutex);
@@ -782,7 +785,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
/* This instance is NOW RUNNING */
mci->op_state = OP_RUNNING_POLL;
- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+ edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true);
} else {
mci->op_state = OP_RUNNING_INTERRUPT;
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 51c0362acf5c..b335c6ab5efe 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -52,18 +52,20 @@ int edac_mc_get_poll_msec(void)
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
- long l;
+ unsigned long l;
int ret;
if (!val)
return -EINVAL;
- ret = kstrtol(val, 0, &l);
+ ret = kstrtoul(val, 0, &l);
if (ret)
return ret;
- if ((int)l != l)
+
+ if (l < 1000)
return -EINVAL;
- *((int *)kp->arg) = l;
+
+ *((unsigned long *)kp->arg) = l;
/* notify edac_mc engine to reset the poll period */
edac_mc_reset_delay_period(l);
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 3d139c6e7fe3..f2118bfcf8df 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -52,7 +52,7 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
-extern void edac_mc_reset_delay_period(int value);
+extern void edac_mc_reset_delay_period(unsigned long value);
extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index fa1326e5a4b0..022a70273ada 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -464,6 +464,8 @@ static void i3200_remove_one(struct pci_dev *pdev)
iounmap(priv->window);
edac_mc_free(mci);
+
+ pci_disable_device(pdev);
}
static const struct pci_device_id i3200_pci_tbl[] = {
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 36a38ee94fa8..6247d186177e 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -869,16 +869,13 @@ static void i5100_init_csrows(struct mem_ctl_info *mci)
chan, rank, 0);
dimm->nr_pages = npages;
- if (npages) {
- dimm->grain = 32;
- dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
- DEV_X4 : DEV_X8;
- dimm->mtype = MEM_RDDR2;
- dimm->edac_mode = EDAC_SECDED;
- snprintf(dimm->label, sizeof(dimm->label),
- "DIMM%u",
- i5100_rank_to_slot(mci, chan, rank));
- }
+ dimm->grain = 32;
+ dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
+ DEV_X4 : DEV_X8;
+ dimm->mtype = MEM_RDDR2;
+ dimm->edac_mode = EDAC_SECDED;
+ snprintf(dimm->label, sizeof(dimm->label), "DIMM%u",
+ i5100_rank_to_slot(mci, chan, rank));
edac_dbg(2, "dimm channel %d, rank %d, size %ld\n",
chan, rank, (long)PAGES_TO_MiB(npages));
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index e080cbfa8fc9..5381e98d9c0c 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1408,6 +1408,8 @@ static void i5400_remove_one(struct pci_dev *pdev)
/* retrieve references to resources, and free those resources */
i5400_put_devices(mci);
+ pci_disable_device(pdev);
+
edac_mc_free(mci);
}
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index d63f4798f7d0..57e96a3350f0 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -943,33 +943,35 @@ static int i7300_get_devices(struct mem_ctl_info *mci)
/* Attempt to 'get' the MCH register we want */
pdev = NULL;
- while (!pvt->pci_dev_16_1_fsb_addr_map ||
- !pvt->pci_dev_16_2_fsb_err_regs) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
- if (!pdev) {
- /* End of list, leave */
- i7300_printk(KERN_ERR,
- "'system address,Process Bus' "
- "device not found:"
- "vendor 0x%x device 0x%x ERR funcs "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
- goto error;
- }
-
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
+ pdev))) {
/* Store device 16 funcs 1 and 2 */
switch (PCI_FUNC(pdev->devfn)) {
case 1:
- pvt->pci_dev_16_1_fsb_addr_map = pdev;
+ if (!pvt->pci_dev_16_1_fsb_addr_map)
+ pvt->pci_dev_16_1_fsb_addr_map =
+ pci_dev_get(pdev);
break;
case 2:
- pvt->pci_dev_16_2_fsb_err_regs = pdev;
+ if (!pvt->pci_dev_16_2_fsb_err_regs)
+ pvt->pci_dev_16_2_fsb_err_regs =
+ pci_dev_get(pdev);
break;
}
}
+ if (!pvt->pci_dev_16_1_fsb_addr_map ||
+ !pvt->pci_dev_16_2_fsb_err_regs) {
+ /* At least one device was not found */
+ i7300_printk(KERN_ERR,
+ "'system address,Process Bus' device not found:"
+ "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
+ goto error;
+ }
+
edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
pci_name(pvt->pci_dev_16_0_fsb_ctlr),
pvt->pci_dev_16_0_fsb_ctlr->vendor,
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 87533ca7752e..8bc83b99974b 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1334,14 +1334,19 @@ static int i7core_get_onedevice(struct pci_dev **prev,
* is at addr 8086:2c40, instead of 8086:2c41. So, we need
* to probe for the alternate address in case of failure
*/
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
+ pci_dev_get(*prev); /* pci_get_device will put it */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
+ }
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
+ !pdev) {
+ pci_dev_get(*prev); /* pci_get_device will put it */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
*prev);
+ }
if (!pdev) {
if (*prev) {
@@ -1703,7 +1708,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
- char *type, *optype, *err;
+ char *optype, *err;
enum hw_event_mc_err_type tp_event;
unsigned long error = m->status & 0x1ff0000l;
bool uncorrected_error = m->mcgstatus & 1ll << 61;
@@ -1716,15 +1721,11 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
u32 errnum = find_first_bit(&error, 32);
if (uncorrected_error) {
- if (ripv) {
- type = "FATAL";
+ if (ripv)
tp_event = HW_EVENT_ERR_FATAL;
- } else {
- type = "NON_FATAL";
+ else
tp_event = HW_EVENT_ERR_UNCORRECTED;
- }
} else {
- type = "CORRECTED";
tp_event = HW_EVENT_ERR_CORRECTED;
}
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 80573df0a4d7..8d0450b9b9af 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -406,8 +406,6 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
edac_dbg(0, "\n");
- ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
-
if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
return -ENODEV;
drc = readl(ovrfl_window + I82875P_DRC);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 30f7309446a6..51b9caa0b024 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -741,6 +741,36 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (amd_filter_mce(m))
return NOTIFY_STOP;
+ pr_emerg(HW_ERR "%s\n", decode_error_status(m));
+
+ pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s",
+ m->extcpu,
+ c->x86, c->x86_model, c->x86_mask,
+ m->bank,
+ ((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
+ ((m->status & MCI_STATUS_UC) ? "UE" : "CE"),
+ ((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"),
+ ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
+ ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
+
+ if (c->x86 == 0x15 || c->x86 == 0x16)
+ pr_cont("|%s|%s",
+ ((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
+ ((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
+
+ /* do the two bits[14:13] together */
+ ecc = (m->status >> 45) & 0x3;
+ if (ecc)
+ pr_cont("|%sECC", ((ecc == 2) ? "C" : "U"));
+
+ pr_cont("]: 0x%016llx\n", m->status);
+
+ if (m->status & MCI_STATUS_ADDRV)
+ pr_emerg(HW_ERR "MC%d_ADDR: 0x%016llx\n", m->bank, m->addr);
+
+ if (!fam_ops)
+ goto err_code;
+
switch (m->bank) {
case 0:
decode_mc0_mce(m);
@@ -774,33 +804,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
break;
}
- pr_emerg(HW_ERR "Error Status: %s\n", decode_error_status(m));
-
- pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s",
- m->extcpu,
- c->x86, c->x86_model, c->x86_mask,
- m->bank,
- ((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
- ((m->status & MCI_STATUS_UC) ? "UE" : "CE"),
- ((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"),
- ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
- ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
-
- if (c->x86 == 0x15 || c->x86 == 0x16)
- pr_cont("|%s|%s",
- ((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
- ((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
-
- /* do the two bits[14:13] together */
- ecc = (m->status >> 45) & 0x3;
- if (ecc)
- pr_cont("|%sECC", ((ecc == 2) ? "C" : "U"));
-
- pr_cont("]: 0x%016llx\n", m->status);
-
- if (m->status & MCI_STATUS_ADDRV)
- pr_emerg(HW_ERR "MC%d_ADDR: 0x%016llx\n", m->bank, m->addr);
-
+ err_code:
amd_decode_err_code(m->status & 0xffff);
return NOTIFY_STOP;
@@ -816,10 +820,7 @@ static int __init mce_amd_init(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
if (c->x86_vendor != X86_VENDOR_AMD)
- return 0;
-
- if (c->x86 < 0xf || c->x86 > 0x16)
- return 0;
+ return -ENODEV;
fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
if (!fam_ops)
@@ -874,7 +875,7 @@ static int __init mce_amd_init(void)
default:
printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
kfree(fam_ops);
- return -EINVAL;
+ fam_ops = NULL;
}
pr_info("MCE: In-kernel MCE decoding enabled.\n");
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 8f9182179a7c..f4aec2e6ef56 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -357,7 +357,7 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
mpc85xx_pci_isr,
- IRQF_DISABLED | IRQF_SHARED,
+ IRQF_SHARED,
"[EDAC] PCI err", pci);
if (res < 0) {
printk(KERN_ERR
@@ -633,7 +633,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
if (edac_op_state == EDAC_OPSTATE_INT) {
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
- mpc85xx_l2_isr, IRQF_DISABLED,
+ mpc85xx_l2_isr, 0,
"[EDAC] L2 err", edac_dev);
if (res < 0) {
printk(KERN_ERR
@@ -1133,7 +1133,7 @@ static int mpc85xx_mc_err_probe(struct platform_device *op)
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq,
mpc85xx_mc_isr,
- IRQF_DISABLED | IRQF_SHARED,
+ IRQF_SHARED,
"[EDAC] MC err", mci);
if (res < 0) {
printk(KERN_ERR "%s: Unable to request irq %d for "
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index 93412d6b3af1..4bd10f94f068 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -5,12 +5,16 @@
*
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
+ *
+ * Copyright (c) 2013 by Cisco Systems, Inc.
+ * All rights reserved.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/edac.h>
+#include <linux/ctype.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-lmcx-defs.h>
@@ -20,6 +24,18 @@
#define OCTEON_MAX_MC 4
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+struct octeon_lmc_pvt {
+ unsigned long inject;
+ unsigned long error_type;
+ unsigned long dimm;
+ unsigned long rank;
+ unsigned long bank;
+ unsigned long row;
+ unsigned long col;
+};
+
static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
{
union cvmx_lmcx_mem_cfg0 cfg0;
@@ -55,14 +71,31 @@ static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
{
+ struct octeon_lmc_pvt *pvt = mci->pvt_info;
union cvmx_lmcx_int int_reg;
bool do_clear = false;
char msg[64];
- int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
+ if (!pvt->inject)
+ int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
+ else {
+ if (pvt->error_type == 1)
+ int_reg.s.sec_err = 1;
+ if (pvt->error_type == 2)
+ int_reg.s.ded_err = 1;
+ }
+
if (int_reg.s.sec_err || int_reg.s.ded_err) {
union cvmx_lmcx_fadr fadr;
- fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+ if (likely(!pvt->inject))
+ fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+ else {
+ fadr.cn61xx.fdimm = pvt->dimm;
+ fadr.cn61xx.fbunk = pvt->rank;
+ fadr.cn61xx.fbank = pvt->bank;
+ fadr.cn61xx.frow = pvt->row;
+ fadr.cn61xx.fcol = pvt->col;
+ }
snprintf(msg, sizeof(msg),
"DIMM %d rank %d bank %d row %d col %d",
fadr.cn61xx.fdimm, fadr.cn61xx.fbunk,
@@ -82,8 +115,128 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
int_reg.s.ded_err = -1; /* Done, re-arm */
do_clear = true;
}
- if (do_clear)
- cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
+
+ if (do_clear) {
+ if (likely(!pvt->inject))
+ cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
+ else
+ pvt->inject = 0;
+ }
+}
+
+/************************ MC SYSFS parts ***********************************/
+
+/* Only a couple naming differences per template, so very similar */
+#define TEMPLATE_SHOW(reg) \
+static ssize_t octeon_mc_inject_##reg##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *data) \
+{ \
+ struct mem_ctl_info *mci = to_mci(dev); \
+ struct octeon_lmc_pvt *pvt = mci->pvt_info; \
+ return sprintf(data, "%016llu\n", (u64)pvt->reg); \
+}
+
+#define TEMPLATE_STORE(reg) \
+static ssize_t octeon_mc_inject_##reg##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *data, size_t count) \
+{ \
+ struct mem_ctl_info *mci = to_mci(dev); \
+ struct octeon_lmc_pvt *pvt = mci->pvt_info; \
+ if (isdigit(*data)) { \
+ if (!kstrtoul(data, 0, &pvt->reg)) \
+ return count; \
+ } \
+ return 0; \
+}
+
+TEMPLATE_SHOW(inject);
+TEMPLATE_STORE(inject);
+TEMPLATE_SHOW(dimm);
+TEMPLATE_STORE(dimm);
+TEMPLATE_SHOW(bank);
+TEMPLATE_STORE(bank);
+TEMPLATE_SHOW(rank);
+TEMPLATE_STORE(rank);
+TEMPLATE_SHOW(row);
+TEMPLATE_STORE(row);
+TEMPLATE_SHOW(col);
+TEMPLATE_STORE(col);
+
+static ssize_t octeon_mc_inject_error_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data,
+ size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct octeon_lmc_pvt *pvt = mci->pvt_info;
+
+ if (!strncmp(data, "single", 6))
+ pvt->error_type = 1;
+ else if (!strncmp(data, "double", 6))
+ pvt->error_type = 2;
+
+ return count;
+}
+
+static ssize_t octeon_mc_inject_error_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *data)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct octeon_lmc_pvt *pvt = mci->pvt_info;
+ if (pvt->error_type == 1)
+ return sprintf(data, "single");
+ else if (pvt->error_type == 2)
+ return sprintf(data, "double");
+
+ return 0;
+}
+
+static DEVICE_ATTR(inject, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_inject_show, octeon_mc_inject_inject_store);
+static DEVICE_ATTR(error_type, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_error_type_show, octeon_mc_inject_error_type_store);
+static DEVICE_ATTR(dimm, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_dimm_show, octeon_mc_inject_dimm_store);
+static DEVICE_ATTR(rank, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_rank_show, octeon_mc_inject_rank_store);
+static DEVICE_ATTR(bank, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_bank_show, octeon_mc_inject_bank_store);
+static DEVICE_ATTR(row, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_row_show, octeon_mc_inject_row_store);
+static DEVICE_ATTR(col, S_IRUGO | S_IWUSR,
+ octeon_mc_inject_col_show, octeon_mc_inject_col_store);
+
+
+static int octeon_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+{
+ int rc;
+
+ rc = device_create_file(&mci->dev, &dev_attr_inject);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_error_type);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_dimm);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_rank);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_bank);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_row);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_col);
+ if (rc < 0)
+ return rc;
+
+ return 0;
}
static int octeon_lmc_edac_probe(struct platform_device *pdev)
@@ -92,6 +245,8 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
struct edac_mc_layer layers[1];
int mc = pdev->id;
+ opstate_init();
+
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = 1;
layers[0].is_virt_csrow = false;
@@ -105,7 +260,7 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
return 0;
}
- mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
if (!mci)
return -ENXIO;
@@ -122,6 +277,12 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
return -ENXIO;
}
+ if (octeon_set_mc_sysfs_attributes(mci)) {
+ dev_err(&pdev->dev, "octeon_set_mc_sysfs_attributes() failed\n");
+ return -ENXIO;
+ }
+
+
cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
cfg0.s.intr_ded_ena = 0; /* We poll */
cfg0.s.intr_sec_ena = 0;
@@ -137,7 +298,7 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
return 0;
}
- mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
if (!mci)
return -ENXIO;
@@ -154,6 +315,12 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
return -ENXIO;
}
+ if (octeon_set_mc_sysfs_attributes(mci)) {
+ dev_err(&pdev->dev, "octeon_set_mc_sysfs_attributes() failed\n");
+ return -ENXIO;
+ }
+
+
en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
en.s.intr_ded_ena = 0; /* We poll */
en.s.intr_sec_ena = 0;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 54e2abe671f7..347c7a1c2725 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1263,7 +1263,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
struct pci_dev *pdev = NULL;
u8 bus = 0;
- sbridge_printk(KERN_INFO,
+ sbridge_printk(KERN_DEBUG,
"Seeking for: dev %02x.%d PCI ID %04x:%04x\n",
dev_descr->dev, dev_descr->func,
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index bdb5a00f1dfa..be56e8ac95e6 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -14,10 +14,6 @@ if EXTCON
comment "Extcon Device Drivers"
-config OF_EXTCON
- def_tristate y
- depends on OF
-
config EXTCON_GPIO
tristate "GPIO extcon support"
depends on GPIOLIB
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 43eccc0e3448..bf7861ec0906 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -2,8 +2,6 @@
# Makefile for external connector class (extcon) devices
#
-obj-$(CONFIG_OF_EXTCON) += of_extcon.o
-
obj-$(CONFIG_EXTCON) += extcon-class.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index c20602f601ee..98a14f6143a7 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -222,27 +222,19 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
struct snd_soc_dapm_context *dapm = arizona->dapm;
int ret;
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_force_enable_pin(dapm, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to enable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
if (!arizona->pdata.micd_force_micbias) {
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to disable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
}
}
@@ -304,16 +296,12 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
ARIZONA_MICD_ENA, 0,
&change);
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_disable_pin(dapm, widget);
if (ret != 0)
dev_warn(arizona->dev,
"Failed to disable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
if (info->micd_reva) {
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 76322330cbd7..7ab21aa6eaa1 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -31,6 +31,7 @@
#include <linux/extcon.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/of.h>
/*
* extcon_cable_name suggests the standard cable names for commonly used
@@ -818,6 +819,47 @@ void extcon_dev_unregister(struct extcon_dev *edev)
}
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
+#ifdef CONFIG_OF
+/*
+ * extcon_get_edev_by_phandle - Get the extcon device from devicetree
+ * @dev - instance to the given device
+ * @index - index into list of extcon_dev
+ *
+ * return the instance of extcon device
+ */
+struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
+{
+ struct device_node *node;
+ struct extcon_dev *edev;
+
+ if (!dev->of_node) {
+ dev_err(dev, "device does not have a device node entry\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ node = of_parse_phandle(dev->of_node, "extcon", index);
+ if (!node) {
+ dev_err(dev, "failed to get phandle in %s node\n",
+ dev->of_node->full_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ edev = extcon_get_extcon_dev(node->name);
+ if (!edev) {
+ dev_err(dev, "unable to get extcon device : %s\n", node->name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return edev;
+}
+#else
+struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(extcon_get_edev_by_phandle);
+
static int __init extcon_class_init(void)
{
return create_extcon_class();
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index a63a6b21c9ad..13d522255d81 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -176,9 +176,7 @@ static int gpio_extcon_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops gpio_extcon_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, gpio_extcon_resume)
-};
+static SIMPLE_DEV_PM_OPS(gpio_extcon_pm_ops, NULL, gpio_extcon_resume);
static struct platform_driver gpio_extcon_driver = {
.probe = gpio_extcon_probe,
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 2aea4bcdd7f3..ddff2b72f0a8 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -271,10 +271,7 @@ static int palmas_usb_resume(struct device *dev)
};
#endif
-static const struct dev_pm_ops palmas_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(palmas_usb_suspend,
- palmas_usb_resume)
-};
+static SIMPLE_DEV_PM_OPS(palmas_pm_ops, palmas_usb_suspend, palmas_usb_resume);
static struct of_device_id of_palmas_match_tbl[] = {
{ .compatible = "ti,palmas-usb", },
diff --git a/drivers/extcon/of_extcon.c b/drivers/extcon/of_extcon.c
deleted file mode 100644
index 72173ecbb311..000000000000
--- a/drivers/extcon/of_extcon.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * OF helpers for External connector (extcon) framework
- *
- * Copyright (C) 2013 Texas Instruments, Inc.
- * Kishon Vijay Abraham I <kishon@ti.com>
- *
- * Copyright (C) 2013 Samsung Electronics
- * Chanwoo Choi <cw00.choi@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/extcon.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/extcon/of_extcon.h>
-
-/*
- * of_extcon_get_extcon_dev - Get the name of extcon device from devicetree
- * @dev - instance to the given device
- * @index - index into list of extcon_dev
- *
- * return the instance of extcon device
- */
-struct extcon_dev *of_extcon_get_extcon_dev(struct device *dev, int index)
-{
- struct device_node *node;
- struct extcon_dev *edev;
- struct platform_device *extcon_parent_dev;
-
- if (!dev->of_node) {
- dev_dbg(dev, "device does not have a device node entry\n");
- return ERR_PTR(-EINVAL);
- }
-
- node = of_parse_phandle(dev->of_node, "extcon", index);
- if (!node) {
- dev_dbg(dev, "failed to get phandle in %s node\n",
- dev->of_node->full_name);
- return ERR_PTR(-ENODEV);
- }
-
- extcon_parent_dev = of_find_device_by_node(node);
- if (!extcon_parent_dev) {
- dev_dbg(dev, "unable to find device by node\n");
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- edev = extcon_get_extcon_dev(dev_name(&extcon_parent_dev->dev));
- if (!edev) {
- dev_dbg(dev, "unable to get extcon device : %s\n",
- dev_name(&extcon_parent_dev->dev));
- return ERR_PTR(-ENODEV);
- }
-
- return edev;
-}
-EXPORT_SYMBOL_GPL(of_extcon_get_extcon_dev);
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index de4aa409abe2..2c6d5e118ac1 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data)
old->config_rom_retries = 0;
fw_notice(card, "rediscovered device %s\n", dev_name(dev));
- PREPARE_DELAYED_WORK(&old->work, fw_device_update);
+ old->workfn = fw_device_update;
fw_schedule_device_work(old, 0);
if (current_node == card->root_node)
@@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work)
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
} else {
fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
@@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work)
dev_name(&device->device), fw_rcode_string(ret));
gone:
atomic_set(&device->state, FW_DEVICE_GONE);
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
out:
if (node_id == card->root_node->node_id)
fw_schedule_bm_work(card, 0);
}
+static void fw_device_workfn(struct work_struct *work)
+{
+ struct fw_device *device = container_of(to_delayed_work(work),
+ struct fw_device, work);
+ device->workfn(work);
+}
+
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
{
struct fw_device *device;
@@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* power-up after getting plugged in. We schedule the
* first config rom scan half a second after bus reset.
*/
- INIT_DELAYED_WORK(&device->work, fw_device_init);
+ device->workfn = fw_device_init;
+ INIT_DELAYED_WORK(&device->work, fw_device_workfn);
fw_schedule_device_work(device, INITIAL_DELAY);
break;
@@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
if (atomic_cmpxchg(&device->state,
FW_DEVICE_RUNNING,
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
+ device->workfn = fw_device_refresh;
fw_schedule_device_work(device,
device->is_local ? 0 : INITIAL_DELAY);
}
@@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
smp_wmb(); /* update node_id before generation */
device->generation = card->generation;
if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_update);
+ device->workfn = fw_device_update;
fw_schedule_device_work(device, 0);
}
break;
@@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
device = node->data;
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device,
list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
}
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 0e799516a2ab..eb6935c8ad94 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -523,11 +523,11 @@ static DEFINE_SPINLOCK(address_handler_list_lock);
static LIST_HEAD(address_handler_list);
const struct fw_address_region fw_high_memory_region =
- { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
+ { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, };
EXPORT_SYMBOL(fw_high_memory_region);
static const struct fw_address_region low_memory_region =
- { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
+ { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, };
#if 0
const struct fw_address_region fw_private_region =
@@ -1217,7 +1217,7 @@ static void handle_low_memory(struct fw_card *card, struct fw_request *request,
}
static struct fw_address_handler low_memory = {
- .length = 0x000100000000ULL,
+ .length = FW_MAX_PHYSICAL_RANGE,
.address_callback = handle_low_memory,
};
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 515a42c786d0..c98764aeeec6 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -237,6 +237,9 @@ static inline bool is_next_generation(int new_generation, int old_generation)
#define LOCAL_BUS 0xffc0
+/* arbitrarily chosen maximum range for physical DMA: 128 TB */
+#define FW_MAX_PHYSICAL_RANGE (128ULL << 40)
+
void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
int fw_get_response_length(struct fw_request *request);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 6b895986dc22..4af0a7bad7f2 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
if (rcode == RCODE_COMPLETE) {
fwnet_transmit_packet_done(ptask);
} else {
- fwnet_transmit_packet_failed(ptask);
-
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
dev_err(&ptask->dev->netdev->dev,
"fwnet_write_complete failed: %x (skipped %d)\n",
@@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
errors_skipped = 0;
last_rcode = rcode;
- } else
+ } else {
errors_skipped++;
+ }
+ fwnet_transmit_packet_failed(ptask);
}
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6aa8a86cb83b..8db663219560 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_NO_MSI 0x10
#define QUIRK_TI_SLLZ059 0x20
#define QUIRK_IR_WAKE 0x40
-#define QUIRK_PHY_LCTRL_TIMEOUT 0x80
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
@@ -303,10 +302,7 @@ static const struct {
QUIRK_BE_HEADERS},
{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
- QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
-
- {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
- QUIRK_PHY_LCTRL_TIMEOUT},
+ QUIRK_NO_MSI},
{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
QUIRK_RESET_PACKET},
@@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", disable MSI = " __stringify(QUIRK_NO_MSI)
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
- ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -370,6 +365,10 @@ MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
", or a combination, or all = -1)");
+static bool param_remote_dma;
+module_param_named(remote_dma, param_remote_dma, bool, 0444);
+MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
+
static void log_irqs(struct fw_ohci *ohci, u32 evt)
{
if (likely(!(param_debug &
@@ -2050,10 +2049,10 @@ static void bus_reset_work(struct work_struct *work)
be32_to_cpu(ohci->next_header));
}
-#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
- reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
- reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
-#endif
+ if (param_remote_dma) {
+ reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
+ reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+ }
spin_unlock_irq(&ohci->lock);
@@ -2295,9 +2294,6 @@ static int ohci_enable(struct fw_card *card,
* TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
* cannot actually use the phy at that time. These need tens of
* millisecods pause between LPS write and first phy access too.
- *
- * But do not wait for 50msec on Agere/LSI cards. Their phy
- * arbitration state machine may time out during such a long wait.
*/
reg_write(ohci, OHCI1394_HCControlSet,
@@ -2305,11 +2301,8 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_HCControl_postedWriteEnable);
flush_writes(ohci);
- if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
+ for (lps = 0, i = 0; !lps && i < 3; i++) {
msleep(50);
-
- for (lps = 0, i = 0; !lps && i < 150; i++) {
- msleep(1);
lps = reg_read(ohci, OHCI1394_HCControlSet) &
OHCI1394_HCControl_LPS;
}
@@ -2363,7 +2356,7 @@ static int ohci_enable(struct fw_card *card,
reg_write(ohci, OHCI1394_FairnessControl, 0);
card->priority_budget_implemented = ohci->pri_req_max != 0;
- reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
+ reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
reg_write(ohci, OHCI1394_IntEventClear, ~0);
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
@@ -2587,13 +2580,13 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
static int ohci_enable_phys_dma(struct fw_card *card,
int node_id, int generation)
{
-#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
- return 0;
-#else
struct fw_ohci *ohci = fw_ohci(card);
unsigned long flags;
int n, ret = 0;
+ if (param_remote_dma)
+ return 0;
+
/*
* FIXME: Make sure this bitmask is cleared when we clear the busReset
* interrupt bit. Clear physReqResourceAllBuses on bus reset.
@@ -2622,7 +2615,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
spin_unlock_irqrestore(&ohci->lock, flags);
return ret;
-#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
@@ -3720,9 +3712,11 @@ static int pci_probe(struct pci_dev *dev,
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
ohci_notice(ohci,
"added OHCI v%x.%x device as card %d, "
- "%d IR + %d IT contexts, quirks 0x%x\n",
+ "%d IR + %d IT contexts, quirks 0x%x%s\n",
version >> 16, version & 0xff, ohci->card.index,
- ohci->n_ir, ohci->n_it, ohci->quirks);
+ ohci->n_ir, ohci->n_it, ohci->quirks,
+ reg_read(ohci, OHCI1394_PhyUpperBound) ?
+ ", >4 GB phys DMA" : "");
return 0;
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 281029daf98c..7aef911fdc71 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -146,6 +146,7 @@ struct sbp2_logical_unit {
*/
int generation;
int retries;
+ work_func_t workfn;
struct delayed_work work;
bool has_sdev;
bool blocked;
@@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
sbp2_set_busy_timeout(lu);
- PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
+ lu->workfn = sbp2_reconnect;
sbp2_agent_reset(lu);
/* This was a re-login. */
@@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
* If a bus reset happened, sbp2_update will have requeued
* lu->work already. Reset the work from reconnect to login.
*/
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
static void sbp2_reconnect(struct work_struct *work)
@@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
lu->retries++ >= 5) {
dev_err(tgt_dev(tgt), "failed to reconnect\n");
lu->retries = 0;
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
@@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
sbp2_conditionally_unblock(lu);
}
+static void sbp2_lu_workfn(struct work_struct *work)
+{
+ struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
+ struct sbp2_logical_unit, work);
+ lu->workfn(work);
+}
+
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
{
struct sbp2_logical_unit *lu;
@@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
lu->blocked = false;
++tgt->dont_block;
INIT_LIST_HEAD(&lu->orb_list);
- INIT_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
+ INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
list_add_tail(&lu->link, &tgt->lu_list);
return 0;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 074787281c94..41983883cef4 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -108,9 +108,12 @@ config DMI_SYSFS
under /sys/firmware/dmi when this option is enabled and
loaded.
+config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
+ bool
+
config ISCSI_IBFT_FIND
bool "iSCSI Boot Firmware Table Attributes"
- depends on X86
+ depends on X86 && ACPI
default n
help
This option enables the kernel to find the region of memory
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 1b5e8e46226d..7160c43c59fc 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -584,7 +584,7 @@ static struct platform_driver dcdbas_driver = {
.remove = dcdbas_remove,
};
-static const struct platform_device_info dcdbas_dev_info __initdata = {
+static const struct platform_device_info dcdbas_dev_info __initconst = {
.name = DRIVER_NAME,
.id = -1,
.dma_mask = DMA_BIT_MASK(32),
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index c7e81ff8f3ef..17afc51f3054 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -116,7 +116,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
{
u8 *buf;
- buf = dmi_ioremap(dmi_base, dmi_len);
+ buf = dmi_early_remap(dmi_base, dmi_len);
if (buf == NULL)
return -1;
@@ -124,7 +124,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
add_device_randomness(buf, dmi_len);
- dmi_iounmap(buf, dmi_len);
+ dmi_early_unmap(buf, dmi_len);
return 0;
}
@@ -527,18 +527,18 @@ void __init dmi_scan_machine(void)
* needed during early boot. This also means we can
* iounmap the space when we're done with it.
*/
- p = dmi_ioremap(efi.smbios, 32);
+ p = dmi_early_remap(efi.smbios, 32);
if (p == NULL)
goto error;
memcpy_fromio(buf, p, 32);
- dmi_iounmap(p, 32);
+ dmi_early_unmap(p, 32);
if (!dmi_present(buf)) {
dmi_available = 1;
goto out;
}
- } else {
- p = dmi_ioremap(0xF0000, 0x10000);
+ } else if (IS_ENABLED(CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK)) {
+ p = dmi_early_remap(0xF0000, 0x10000);
if (p == NULL)
goto error;
@@ -554,12 +554,12 @@ void __init dmi_scan_machine(void)
memcpy_fromio(buf + 16, q, 16);
if (!dmi_present(buf)) {
dmi_available = 1;
- dmi_iounmap(p, 0x10000);
+ dmi_early_unmap(p, 0x10000);
goto out;
}
memcpy(buf, buf + 16, 16);
}
- dmi_iounmap(p, 0x10000);
+ dmi_early_unmap(p, 0x10000);
}
error:
pr_info("DMI not present or invalid.\n");
@@ -831,13 +831,13 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
if (!dmi_available)
return -1;
- buf = ioremap(dmi_base, dmi_len);
+ buf = dmi_remap(dmi_base, dmi_len);
if (buf == NULL)
return -1;
dmi_table(buf, dmi_len, dmi_num, decode, private_data);
- iounmap(buf);
+ dmi_unmap(buf);
return 0;
}
EXPORT_SYMBOL_GPL(dmi_walk);
diff --git a/drivers/firmware/efi/efi-stub-helper.c b/drivers/firmware/efi/efi-stub-helper.c
index b6bffbfd3be7..ff50aeebf0d9 100644
--- a/drivers/firmware/efi/efi-stub-helper.c
+++ b/drivers/firmware/efi/efi-stub-helper.c
@@ -16,18 +16,6 @@ struct file_info {
u64 size;
};
-
-
-
-static void efi_char16_printk(efi_system_table_t *sys_table_arg,
- efi_char16_t *str)
-{
- struct efi_simple_text_output_protocol *out;
-
- out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out;
- efi_call_phys2(out->output_string, out, str);
-}
-
static void efi_printk(efi_system_table_t *sys_table_arg, char *str)
{
char *s8;
@@ -65,20 +53,23 @@ again:
* allocation which may be in a new descriptor region.
*/
*map_size += sizeof(*m);
- status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
- EFI_LOADER_DATA, *map_size, (void **)&m);
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ *map_size, (void **)&m);
if (status != EFI_SUCCESS)
goto fail;
- status = efi_call_phys5(sys_table_arg->boottime->get_memory_map,
- map_size, m, &key, desc_size, &desc_version);
+ *desc_size = 0;
+ key = 0;
+ status = efi_call_early(get_memory_map, map_size, m,
+ &key, desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL) {
- efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+ efi_call_early(free_pool, m);
goto again;
}
if (status != EFI_SUCCESS)
- efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+ efi_call_early(free_pool, m);
+
if (key_ptr && status == EFI_SUCCESS)
*key_ptr = key;
if (desc_ver && status == EFI_SUCCESS)
@@ -158,7 +149,7 @@ again:
if (!max_addr)
status = EFI_NOT_FOUND;
else {
- status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ status = efi_call_early(allocate_pages,
EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
nr_pages, &max_addr);
if (status != EFI_SUCCESS) {
@@ -170,8 +161,7 @@ again:
*addr = max_addr;
}
- efi_call_phys1(sys_table_arg->boottime->free_pool, map);
-
+ efi_call_early(free_pool, map);
fail:
return status;
}
@@ -231,7 +221,7 @@ static efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
if ((start + size) > end)
continue;
- status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ status = efi_call_early(allocate_pages,
EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
nr_pages, &start);
if (status == EFI_SUCCESS) {
@@ -243,7 +233,7 @@ static efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
if (i == map_size / desc_size)
status = EFI_NOT_FOUND;
- efi_call_phys1(sys_table_arg->boottime->free_pool, map);
+ efi_call_early(free_pool, map);
fail:
return status;
}
@@ -257,7 +247,7 @@ static void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
return;
nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- efi_call_phys2(sys_table_arg->boottime->free_pages, addr, nr_pages);
+ efi_call_early(free_pages, addr, nr_pages);
}
@@ -276,9 +266,7 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
{
struct file_info *files;
unsigned long file_addr;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
u64 file_size_total;
- efi_file_io_interface_t *io;
efi_file_handle_t *fh;
efi_status_t status;
int nr_files;
@@ -319,10 +307,8 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
if (!nr_files)
return EFI_SUCCESS;
- status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
- EFI_LOADER_DATA,
- nr_files * sizeof(*files),
- (void **)&files);
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ nr_files * sizeof(*files), (void **)&files);
if (status != EFI_SUCCESS) {
efi_printk(sys_table_arg, "Failed to alloc mem for file handle list\n");
goto fail;
@@ -331,13 +317,8 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
str = cmd_line;
for (i = 0; i < nr_files; i++) {
struct file_info *file;
- efi_file_handle_t *h;
- efi_file_info_t *info;
efi_char16_t filename_16[256];
- unsigned long info_sz;
- efi_guid_t info_guid = EFI_FILE_INFO_ID;
efi_char16_t *p;
- u64 file_sz;
str = strstr(str, option_string);
if (!str)
@@ -368,71 +349,18 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
/* Only open the volume once. */
if (!i) {
- efi_boot_services_t *boottime;
-
- boottime = sys_table_arg->boottime;
-
- status = efi_call_phys3(boottime->handle_protocol,
- image->device_handle, &fs_proto,
- (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
- goto free_files;
- }
-
- status = efi_call_phys2(io->open_volume, io, &fh);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to open volume\n");
+ status = efi_open_volume(sys_table_arg, image,
+ (void **)&fh);
+ if (status != EFI_SUCCESS)
goto free_files;
- }
}
- status = efi_call_phys5(fh->open, fh, &h, filename_16,
- EFI_FILE_MODE_READ, (u64)0);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to open file: ");
- efi_char16_printk(sys_table_arg, filename_16);
- efi_printk(sys_table_arg, "\n");
+ status = efi_file_size(sys_table_arg, fh, filename_16,
+ (void **)&file->handle, &file->size);
+ if (status != EFI_SUCCESS)
goto close_handles;
- }
- file->handle = h;
-
- info_sz = 0;
- status = efi_call_phys4(h->get_info, h, &info_guid,
- &info_sz, NULL);
- if (status != EFI_BUFFER_TOO_SMALL) {
- efi_printk(sys_table_arg, "Failed to get file info size\n");
- goto close_handles;
- }
-
-grow:
- status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
- EFI_LOADER_DATA, info_sz,
- (void **)&info);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
- goto close_handles;
- }
-
- status = efi_call_phys4(h->get_info, h, &info_guid,
- &info_sz, info);
- if (status == EFI_BUFFER_TOO_SMALL) {
- efi_call_phys1(sys_table_arg->boottime->free_pool,
- info);
- goto grow;
- }
-
- file_sz = info->file_size;
- efi_call_phys1(sys_table_arg->boottime->free_pool, info);
-
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to get file info\n");
- goto close_handles;
- }
-
- file->size = file_sz;
- file_size_total += file_sz;
+ file_size_total += file->size;
}
if (file_size_total) {
@@ -468,10 +396,10 @@ grow:
chunksize = EFI_READ_CHUNK_SIZE;
else
chunksize = size;
- status = efi_call_phys3(fh->read,
- files[j].handle,
- &chunksize,
- (void *)addr);
+
+ status = efi_file_read(fh, files[j].handle,
+ &chunksize,
+ (void *)addr);
if (status != EFI_SUCCESS) {
efi_printk(sys_table_arg, "Failed to read file\n");
goto free_file_total;
@@ -480,12 +408,12 @@ grow:
size -= chunksize;
}
- efi_call_phys1(fh->close, files[j].handle);
+ efi_file_close(fh, files[j].handle);
}
}
- efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+ efi_call_early(free_pool, files);
*load_addr = file_addr;
*load_size = file_size_total;
@@ -497,9 +425,9 @@ free_file_total:
close_handles:
for (k = j; k < i; k++)
- efi_call_phys1(fh->close, files[k].handle);
+ efi_file_close(fh, files[k].handle);
free_files:
- efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+ efi_call_early(free_pool, files);
fail:
*load_addr = 0;
*load_size = 0;
@@ -545,7 +473,7 @@ static efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
* as possible while respecting the required alignment.
*/
nr_pages = round_up(alloc_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+ status = efi_call_early(allocate_pages,
EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
nr_pages, &efi_addr);
new_addr = efi_addr;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 4753bac65279..af20f1712337 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -233,7 +233,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab},
{SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
{UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
- {NULL_GUID, NULL, 0},
+ {NULL_GUID, NULL, NULL},
};
static __init int match_config_table(efi_guid_t *guid,
@@ -313,5 +313,8 @@ int __init efi_config_init(efi_config_table_type_t *arch_tables)
}
pr_cont("\n");
early_iounmap(config_tables, efi.systab->nr_tables * sz);
+
+ set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
return 0;
}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 3dc248239197..50ea412a25e6 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -227,7 +227,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
memcpy(&entry->var, new_var, count);
err = efivar_entry_set(entry, new_var->Attributes,
- new_var->DataSize, new_var->Data, false);
+ new_var->DataSize, new_var->Data, NULL);
if (err) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%d\n", err);
return -EIO;
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 2f21b0bfe653..29c8cdda82a1 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -12,8 +12,7 @@ menu "Google Firmware Drivers"
config GOOGLE_SMI
tristate "SMI interface for Google platforms"
- depends on ACPI && DMI
- select EFI
+ depends on ACPI && DMI && EFI
select EFI_VARS
help
Say Y here if you want to enable SMI callbacks for Google
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index e5a67b24587a..f1ab05ea56bb 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -892,13 +892,6 @@ static __init int gsmi_init(void)
goto out_remove_sysfs_files;
}
- ret = efivars_sysfs_init();
- if (ret) {
- printk(KERN_INFO "gsmi: Failed to create efivars files\n");
- efivars_unregister(&efivars);
- goto out_remove_sysfs_files;
- }
-
register_reboot_notifier(&gsmi_reboot_notifier);
register_die_notifier(&gsmi_die_notifier);
atomic_notifier_chain_register(&panic_notifier_list,
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index 2a90ba613613..2f569aaed4c7 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -15,6 +15,7 @@
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/dmi.h>
+#include <linux/io.h>
#include <asm/bios_ebda.h>
#define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE
@@ -41,15 +42,25 @@ struct biosmemcon_ebda {
};
} __packed;
-static char *memconsole_baseaddr;
+static u32 memconsole_baseaddr;
static size_t memconsole_length;
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
- return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
- memconsole_length);
+ char *memconsole;
+ ssize_t ret;
+
+ memconsole = ioremap_cache(memconsole_baseaddr, memconsole_length);
+ if (!memconsole) {
+ pr_err("memconsole: ioremap_cache failed\n");
+ return -ENOMEM;
+ }
+ ret = memory_read_from_buffer(buf, count, &pos, memconsole,
+ memconsole_length);
+ iounmap(memconsole);
+ return ret;
}
static struct bin_attribute memconsole_bin_attr = {
@@ -58,43 +69,42 @@ static struct bin_attribute memconsole_bin_attr = {
};
-static void found_v1_header(struct biosmemcon_ebda *hdr)
+static void __init found_v1_header(struct biosmemcon_ebda *hdr)
{
- printk(KERN_INFO "BIOS console v1 EBDA structure found at %p\n", hdr);
- printk(KERN_INFO "BIOS console buffer at 0x%.8x, "
+ pr_info("BIOS console v1 EBDA structure found at %p\n", hdr);
+ pr_info("BIOS console buffer at 0x%.8x, "
"start = %d, end = %d, num = %d\n",
hdr->v1.buffer_addr, hdr->v1.start,
hdr->v1.end, hdr->v1.num_chars);
memconsole_length = hdr->v1.num_chars;
- memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr);
+ memconsole_baseaddr = hdr->v1.buffer_addr;
}
-static void found_v2_header(struct biosmemcon_ebda *hdr)
+static void __init found_v2_header(struct biosmemcon_ebda *hdr)
{
- printk(KERN_INFO "BIOS console v2 EBDA structure found at %p\n", hdr);
- printk(KERN_INFO "BIOS console buffer at 0x%.8x, "
+ pr_info("BIOS console v2 EBDA structure found at %p\n", hdr);
+ pr_info("BIOS console buffer at 0x%.8x, "
"start = %d, end = %d, num_bytes = %d\n",
hdr->v2.buffer_addr, hdr->v2.start,
hdr->v2.end, hdr->v2.num_bytes);
memconsole_length = hdr->v2.end - hdr->v2.start;
- memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr
- + hdr->v2.start);
+ memconsole_baseaddr = hdr->v2.buffer_addr + hdr->v2.start;
}
/*
* Search through the EBDA for the BIOS Memory Console, and
* set the global variables to point to it. Return true if found.
*/
-static bool found_memconsole(void)
+static bool __init found_memconsole(void)
{
unsigned int address;
size_t length, cur;
address = get_bios_ebda();
if (!address) {
- printk(KERN_INFO "BIOS EBDA non-existent.\n");
+ pr_info("BIOS EBDA non-existent.\n");
return false;
}
@@ -122,7 +132,7 @@ static bool found_memconsole(void)
}
}
- printk(KERN_INFO "BIOS console EBDA structure not found!\n");
+ pr_info("BIOS console EBDA structure not found!\n");
return false;
}
@@ -139,8 +149,6 @@ MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table);
static int __init memconsole_init(void)
{
- int ret;
-
if (!dmi_check_system(memconsole_dmi_table))
return -ENODEV;
@@ -148,10 +156,7 @@ static int __init memconsole_init(void)
return -ENODEV;
memconsole_bin_attr.size = memconsole_length;
-
- ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
-
- return ret;
+ return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
}
static void __exit memconsole_exit(void)
diff --git a/drivers/fmc/fmc-core.c b/drivers/fmc/fmc-core.c
index 24d52497524d..353fc546fb08 100644
--- a/drivers/fmc/fmc-core.c
+++ b/drivers/fmc/fmc-core.c
@@ -99,10 +99,23 @@ static ssize_t fmc_read_eeprom(struct file *file, struct kobject *kobj,
return count;
}
+static ssize_t fmc_write_eeprom(struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct fmc_device *fmc;
+
+ dev = container_of(kobj, struct device, kobj);
+ fmc = container_of(dev, struct fmc_device, dev);
+ return fmc->op->write_ee(fmc, off, buf, count);
+}
+
static struct bin_attribute fmc_eeprom_attr = {
- .attr = { .name = "eeprom", .mode = S_IRUGO, },
+ .attr = { .name = "eeprom", .mode = S_IRUGO | S_IWUSR, },
.size = 8192, /* more or less standard */
.read = fmc_read_eeprom,
+ .write = fmc_write_eeprom,
};
/*
@@ -154,7 +167,7 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
ret = -EINVAL;
break;
}
- if (fmc->flags == FMC_DEVICE_NO_MEZZANINE) {
+ if (fmc->flags & FMC_DEVICE_NO_MEZZANINE) {
dev_info(fmc->hwdev, "absent mezzanine in slot %d\n",
fmc->slot_id);
continue;
@@ -189,9 +202,6 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
for (i = 0; i < n; i++) {
fmc = devarray[i];
- if (fmc->flags == FMC_DEVICE_NO_MEZZANINE)
- continue; /* dev_info already done above */
-
fmc->nr_slots = n; /* each slot must know how many are there */
fmc->devarray = devarray;
@@ -263,8 +273,6 @@ void fmc_device_unregister_n(struct fmc_device **devs, int n)
kfree(devs[0]->devarray);
for (i = 0; i < n; i++) {
- if (devs[i]->flags == FMC_DEVICE_NO_MEZZANINE)
- continue;
sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
device_del(&devs[i]->dev);
fmc_free_id_info(devs[i]);
diff --git a/drivers/fmc/fmc-sdb.c b/drivers/fmc/fmc-sdb.c
index 79adc39221ea..4603fdb74465 100644
--- a/drivers/fmc/fmc-sdb.c
+++ b/drivers/fmc/fmc-sdb.c
@@ -150,23 +150,36 @@ int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
}
EXPORT_SYMBOL(fmc_reprogram);
+static char *__strip_trailing_space(char *buf, char *str, int len)
+{
+ int i = len - 1;
+
+ memcpy(buf, str, len);
+ while(i >= 0 && buf[i] == ' ')
+ buf[i--] = '\0';
+ return buf;
+}
+
+#define __sdb_string(buf, field) ({ \
+ BUILD_BUG_ON(sizeof(buf) < sizeof(field)); \
+ __strip_trailing_space(buf, (void *)(field), sizeof(field)); \
+ })
+
static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
const struct sdb_array *arr)
{
+ unsigned long base = arr->baseaddr;
int i, j, n = arr->len, level = arr->level;
- const struct sdb_array *ap;
+ char buf[64];
for (i = 0; i < n; i++) {
- unsigned long base;
union sdb_record *r;
struct sdb_product *p;
struct sdb_component *c;
r = &arr->record[i];
c = &r->dev.sdb_component;
p = &c->product;
- base = 0;
- for (ap = arr; ap; ap = ap->parent)
- base += ap->baseaddr;
+
dev_info(&fmc->dev, "SDB: ");
for (j = 0; j < level; j++)
@@ -193,8 +206,8 @@ static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
p->name,
__be64_to_cpu(c->addr_first) + base);
if (IS_ERR(arr->subtree[i])) {
- printk(KERN_CONT "(bridge error %li)\n",
- PTR_ERR(arr->subtree[i]));
+ dev_info(&fmc->dev, "SDB: (bridge error %li)\n",
+ PTR_ERR(arr->subtree[i]));
break;
}
__fmc_show_sdb_tree(fmc, arr->subtree[i]);
@@ -203,10 +216,20 @@ static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
printk(KERN_CONT "integration\n");
break;
case sdb_type_repo_url:
- printk(KERN_CONT "repo-url\n");
+ printk(KERN_CONT "Synthesis repository: %s\n",
+ __sdb_string(buf, r->repo_url.repo_url));
break;
case sdb_type_synthesis:
- printk(KERN_CONT "synthesis-info\n");
+ printk(KERN_CONT "Bitstream '%s' ",
+ __sdb_string(buf, r->synthesis.syn_name));
+ printk(KERN_CONT "synthesized %08x by %s ",
+ __be32_to_cpu(r->synthesis.date),
+ __sdb_string(buf, r->synthesis.user_name));
+ printk(KERN_CONT "(%s version %x), ",
+ __sdb_string(buf, r->synthesis.tool_name),
+ __be32_to_cpu(r->synthesis.tool_version));
+ printk(KERN_CONT "commit %pm\n",
+ r->synthesis.commit_id);
break;
case sdb_type_empty:
printk(KERN_CONT "empty\n");
diff --git a/drivers/fmc/fmc-write-eeprom.c b/drivers/fmc/fmc-write-eeprom.c
index ee5b47904130..9bb2cbd5c9f2 100644
--- a/drivers/fmc/fmc-write-eeprom.c
+++ b/drivers/fmc/fmc-write-eeprom.c
@@ -27,7 +27,7 @@ FMC_PARAM_BUSID(fwe_drv);
/* The "file=" is like the generic "gateware=" used elsewhere */
static char *fwe_file[FMC_MAX_CARDS];
static int fwe_file_n;
-module_param_array_named(file, fwe_file, charp, &fwe_file_n, 444);
+module_param_array_named(file, fwe_file, charp, &fwe_file_n, 0444);
static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw,
int write)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 50cc557abe41..903f24d28ba0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -115,6 +115,13 @@ config GPIO_CLPS711X
help
Say yes here to support GPIO on CLPS711X SoCs.
+config GPIO_DAVINCI
+ bool "TI Davinci/Keystone GPIO support"
+ default y if ARCH_DAVINCI
+ depends on ARM && (ARCH_DAVINCI || ARCH_KEYSTONE)
+ help
+ Say yes here to enable GPIO support for TI Davinci/Keystone SoCs.
+
config GPIO_GENERIC_PLATFORM
tristate "Generic memory-mapped GPIO controller support (MMIO platform device)"
select GPIO_GENERIC
@@ -396,6 +403,7 @@ config GPIO_GRGPIO
config GPIO_TB10X
bool
+ select GENERIC_IRQ_CHIP
select OF_GPIO
comment "I2C GPIO expanders:"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 0248471402e4..5d50179ece16 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o
-obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
+obj-$(CONFIG_GPIO_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EM) += gpio-em.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_F7188X) += gpio-f7188x.o
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 233d088ac59f..f32357e2d78d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2013 Broadcom Corporation
+ * Copyright (C) 2012-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -657,6 +657,6 @@ static struct platform_driver bcm_kona_gpio_driver = {
module_platform_driver(bcm_kona_gpio_driver);
-MODULE_AUTHOR("Broadcom");
+MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
MODULE_DESCRIPTION("Broadcom Kona GPIO Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index d3550274b8f7..3c2ba2ad0ada 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -97,3 +97,4 @@ module_platform_driver(clps711x_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("CLPS711X GPIO driver");
+MODULE_ALIAS("platform:clps711x-gpio");
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 84be70157ad6..7629b4f12b7f 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -16,8 +16,13 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/platform_data/gpio-davinci.h>
+#include <linux/irqchip/chained_irq.h>
struct davinci_gpio_regs {
u32 dir;
@@ -82,14 +87,14 @@ static inline int __davinci_direction(struct gpio_chip *chip,
u32 mask = 1 << offset;
spin_lock_irqsave(&d->lock, flags);
- temp = __raw_readl(&g->dir);
+ temp = readl_relaxed(&g->dir);
if (out) {
temp &= ~mask;
- __raw_writel(mask, value ? &g->set_data : &g->clr_data);
+ writel_relaxed(mask, value ? &g->set_data : &g->clr_data);
} else {
temp |= mask;
}
- __raw_writel(temp, &g->dir);
+ writel_relaxed(temp, &g->dir);
spin_unlock_irqrestore(&d->lock, flags);
return 0;
@@ -118,7 +123,7 @@ static int davinci_gpio_get(struct gpio_chip *chip, unsigned offset)
struct davinci_gpio_controller *d = chip2controller(chip);
struct davinci_gpio_regs __iomem *g = d->regs;
- return (1 << offset) & __raw_readl(&g->in_data);
+ return (1 << offset) & readl_relaxed(&g->in_data);
}
/*
@@ -130,7 +135,41 @@ davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
struct davinci_gpio_controller *d = chip2controller(chip);
struct davinci_gpio_regs __iomem *g = d->regs;
- __raw_writel((1 << offset), value ? &g->set_data : &g->clr_data);
+ writel_relaxed((1 << offset), value ? &g->set_data : &g->clr_data);
+}
+
+static struct davinci_gpio_platform_data *
+davinci_gpio_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct davinci_gpio_platform_data *pdata;
+ int ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
+ return pdev->dev.platform_data;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ ret = of_property_read_u32(dn, "ti,ngpio", &val);
+ if (ret)
+ goto of_err;
+
+ pdata->ngpio = val;
+
+ ret = of_property_read_u32(dn, "ti,davinci-gpio-unbanked", &val);
+ if (ret)
+ goto of_err;
+
+ pdata->gpio_unbanked = val;
+
+ return pdata;
+
+of_err:
+ dev_err(&pdev->dev, "Populating pdata from DT failed: err %d\n", ret);
+ return NULL;
}
static int davinci_gpio_probe(struct platform_device *pdev)
@@ -143,12 +182,14 @@ static int davinci_gpio_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
- pdata = dev->platform_data;
+ pdata = davinci_gpio_get_pdata(pdev);
if (!pdata) {
dev_err(dev, "No platform data found\n");
return -EINVAL;
}
+ dev->platform_data = pdata;
+
/*
* The gpio banks conceptually expose a segmented bitmap,
* and "ngpio" is one more than the largest zero-based
@@ -160,8 +201,8 @@ static int davinci_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (WARN_ON(DAVINCI_N_GPIO < ngpio))
- ngpio = DAVINCI_N_GPIO;
+ if (WARN_ON(ARCH_NR_GPIOS < ngpio))
+ ngpio = ARCH_NR_GPIOS;
chips = devm_kzalloc(dev,
ngpio * sizeof(struct davinci_gpio_controller),
@@ -194,6 +235,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
if (chips[i].chip.ngpio > 32)
chips[i].chip.ngpio = 32;
+#ifdef CONFIG_OF_GPIO
+ chips[i].chip.of_node = dev->of_node;
+#endif
spin_lock_init(&chips[i].lock);
regs = gpio2regs(base);
@@ -227,8 +271,8 @@ static void gpio_irq_disable(struct irq_data *d)
struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
u32 mask = (u32) irq_data_get_irq_handler_data(d);
- __raw_writel(mask, &g->clr_falling);
- __raw_writel(mask, &g->clr_rising);
+ writel_relaxed(mask, &g->clr_falling);
+ writel_relaxed(mask, &g->clr_rising);
}
static void gpio_irq_enable(struct irq_data *d)
@@ -242,9 +286,9 @@ static void gpio_irq_enable(struct irq_data *d)
status = IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
if (status & IRQ_TYPE_EDGE_FALLING)
- __raw_writel(mask, &g->set_falling);
+ writel_relaxed(mask, &g->set_falling);
if (status & IRQ_TYPE_EDGE_RISING)
- __raw_writel(mask, &g->set_rising);
+ writel_relaxed(mask, &g->set_rising);
}
static int gpio_irq_type(struct irq_data *d, unsigned trigger)
@@ -278,34 +322,28 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
mask <<= 16;
/* temporarily mask (level sensitive) parent IRQ */
- desc->irq_data.chip->irq_mask(&desc->irq_data);
- desc->irq_data.chip->irq_ack(&desc->irq_data);
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
while (1) {
u32 status;
- int n;
- int res;
+ int bit;
/* ack any irqs */
- status = __raw_readl(&g->intstat) & mask;
+ status = readl_relaxed(&g->intstat) & mask;
if (!status)
break;
- __raw_writel(status, &g->intstat);
+ writel_relaxed(status, &g->intstat);
/* now demux them to the right lowlevel handler */
- n = d->irq_base;
- if (irq & 1) {
- n += 16;
- status >>= 16;
- }
while (status) {
- res = ffs(status);
- n += res;
- generic_handle_irq(n - 1);
- status >>= res;
+ bit = __ffs(status);
+ status &= ~BIT(bit);
+ generic_handle_irq(
+ irq_find_mapping(d->irq_domain,
+ d->chip.base + bit));
}
}
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
/* now it may re-trigger */
}
@@ -313,10 +351,10 @@ static int gpio_to_irq_banked(struct gpio_chip *chip, unsigned offset)
{
struct davinci_gpio_controller *d = chip2controller(chip);
- if (d->irq_base >= 0)
- return d->irq_base + offset;
+ if (d->irq_domain)
+ return irq_create_mapping(d->irq_domain, d->chip.base + offset);
else
- return -ENODEV;
+ return -ENXIO;
}
static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
@@ -346,14 +384,35 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
- __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_FALLING)
+ writel_relaxed(mask, (trigger & IRQ_TYPE_EDGE_FALLING)
? &g->set_falling : &g->clr_falling);
- __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_RISING)
+ writel_relaxed(mask, (trigger & IRQ_TYPE_EDGE_RISING)
? &g->set_rising : &g->clr_rising);
return 0;
}
+static int
+davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct davinci_gpio_regs __iomem *g = gpio2regs(hw);
+
+ irq_set_chip_and_handler_name(irq, &gpio_irqchip, handle_simple_irq,
+ "davinci_gpio");
+ irq_set_irq_type(irq, IRQ_TYPE_NONE);
+ irq_set_chip_data(irq, (__force void *)g);
+ irq_set_handler_data(irq, (void *)__gpio_mask(hw));
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops davinci_gpio_irq_ops = {
+ .map = davinci_gpio_irq_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
/*
* NOTE: for suspend/resume, probably best to make a platform_device with
* suspend_late/resume_resume calls hooking into results of the set_wake()
@@ -373,6 +432,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
struct davinci_gpio_controller *chips = platform_get_drvdata(pdev);
struct davinci_gpio_platform_data *pdata = dev->platform_data;
struct davinci_gpio_regs __iomem *g;
+ struct irq_domain *irq_domain = NULL;
ngpio = pdata->ngpio;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -396,6 +456,22 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
}
clk_prepare_enable(clk);
+ if (!pdata->gpio_unbanked) {
+ irq = irq_alloc_descs(-1, 0, ngpio, 0);
+ if (irq < 0) {
+ dev_err(dev, "Couldn't allocate IRQ numbers\n");
+ return irq;
+ }
+
+ irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0,
+ &davinci_gpio_irq_ops,
+ chips);
+ if (!irq_domain) {
+ dev_err(dev, "Couldn't register an IRQ domain\n");
+ return -ENODEV;
+ }
+ }
+
/*
* Arrange gpio_to_irq() support, handling either direct IRQs or
* banked IRQs. Having GPIOs in the first GPIO bank use direct
@@ -404,9 +480,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
*/
for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) {
chips[bank].chip.to_irq = gpio_to_irq_banked;
- chips[bank].irq_base = pdata->gpio_unbanked
- ? -EINVAL
- : (pdata->intc_irq_num + gpio);
+ chips[bank].irq_domain = irq_domain;
}
/*
@@ -432,8 +506,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
/* default trigger: both edges */
g = gpio2regs(0);
- __raw_writel(~0, &g->set_falling);
- __raw_writel(~0, &g->set_rising);
+ writel_relaxed(~0, &g->set_falling);
+ writel_relaxed(~0, &g->set_rising);
/* set the direct IRQs up to use that irqchip */
for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) {
@@ -449,15 +523,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
* Or, AINTC can handle IRQs for banks of 16 GPIO IRQs, which we
* then chain through our own handler.
*/
- for (gpio = 0, irq = gpio_to_irq(0), bank = 0;
- gpio < ngpio;
- bank++, bank_irq++) {
- unsigned i;
-
+ for (gpio = 0, bank = 0; gpio < ngpio; bank++, bank_irq++, gpio += 16) {
/* disabled by default, enabled only as needed */
g = gpio2regs(gpio);
- __raw_writel(~0, &g->clr_falling);
- __raw_writel(~0, &g->clr_rising);
+ writel_relaxed(~0, &g->clr_falling);
+ writel_relaxed(~0, &g->clr_rising);
/* set up all irqs in this bank */
irq_set_chained_handler(bank_irq, gpio_irq_handler);
@@ -469,14 +539,6 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
*/
irq_set_handler_data(bank_irq, &chips[gpio / 32]);
- for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
- irq_set_chip(irq, &gpio_irqchip);
- irq_set_chip_data(irq, (__force void *)g);
- irq_set_handler_data(irq, (void *)__gpio_mask(gpio));
- irq_set_handler(irq, handle_simple_irq);
- set_irq_flags(irq, IRQF_VALID);
- }
-
binten |= BIT(bank);
}
@@ -485,18 +547,25 @@ done:
* BINTEN -- per-bank interrupt enable. genirq would also let these
* bits be set/cleared dynamically.
*/
- __raw_writel(binten, gpio_base + BINTEN);
-
- printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0));
+ writel_relaxed(binten, gpio_base + BINTEN);
return 0;
}
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id davinci_gpio_ids[] = {
+ { .compatible = "ti,dm6441-gpio", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, davinci_gpio_ids);
+#endif
+
static struct platform_driver davinci_gpio_driver = {
.probe = davinci_gpio_probe,
.driver = {
- .name = "davinci_gpio",
- .owner = THIS_MODULE,
+ .name = "davinci_gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(davinci_gpio_ids),
},
};
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index d1b50ef5fab8..e585163f1ad5 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -394,8 +394,8 @@ static const struct irq_domain_ops intel_gpio_irq_ops = {
static int intel_gpio_runtime_idle(struct device *dev)
{
- pm_schedule_suspend(dev, 500);
- return -EBUSY;
+ int err = pm_schedule_suspend(dev, 500);
+ return err ?: -EBUSY;
}
static const struct dev_pm_ops intel_gpio_pm_ops = {
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index 9902732a382d..66cbcc108e62 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -81,6 +81,7 @@ static DEFINE_SPINLOCK(giu_lock);
static unsigned long giu_flags;
static void __iomem *giu_base;
+static struct gpio_chip vr41xx_gpio_chip;
#define giu_read(offset) readw(giu_base + (offset))
#define giu_write(offset, value) writew((value), giu_base + (offset))
@@ -135,12 +136,31 @@ static void unmask_giuint_low(struct irq_data *d)
giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
}
+static unsigned int startup_giuint(struct irq_data *data)
+{
+ if (gpio_lock_as_irq(&vr41xx_gpio_chip, data->hwirq))
+ dev_err(vr41xx_gpio_chip.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ data->hwirq);
+ /* Satisfy the .enable semantics by unmasking the line */
+ unmask_giuint_low(data);
+ return 0;
+}
+
+static void shutdown_giuint(struct irq_data *data)
+{
+ mask_giuint_low(data);
+ gpio_unlock_as_irq(&vr41xx_gpio_chip, data->hwirq);
+}
+
static struct irq_chip giuint_low_irq_chip = {
.name = "GIUINTL",
.irq_ack = ack_giuint_low,
.irq_mask = mask_giuint_low,
.irq_mask_ack = mask_ack_giuint_low,
.irq_unmask = unmask_giuint_low,
+ .irq_startup = startup_giuint,
+ .irq_shutdown = shutdown_giuint,
};
static void ack_giuint_high(struct irq_data *d)
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 1d136eceda62..7081304d6797 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -40,6 +40,8 @@
#error GPIO32 option is not enabled for your xtensa core variant
#endif
+#if XCHAL_HAVE_CP
+
static inline unsigned long enable_cp(unsigned long *cpenable)
{
unsigned long flags;
@@ -57,6 +59,20 @@ static inline void disable_cp(unsigned long flags, unsigned long cpenable)
local_irq_restore(flags);
}
+#else
+
+static inline unsigned long enable_cp(unsigned long *cpenable)
+{
+ *cpenable = 0; /* avoid uninitialized value warning */
+ return 0;
+}
+
+static inline void disable_cp(unsigned long flags, unsigned long cpenable)
+{
+}
+
+#endif /* XCHAL_HAVE_CP */
+
static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset)
{
return 1; /* input only */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f86427591167..8e7fa4dbaed8 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -20,6 +20,10 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+config DRM_MIPI_DSI
+ bool
+ depends on DRM
+
config DRM_USB
tristate
depends on DRM
@@ -188,6 +192,10 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
source "drivers/gpu/drm/qxl/Kconfig"
+source "drivers/gpu/drm/bochs/Kconfig"
+
source "drivers/gpu/drm/msm/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
+
+source "drivers/gpu/drm/panel/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index cc08b845f965..292a79d64146 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,6 +18,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_PCI) += ati_pcigart.o
+drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-usb-y := drm_usb.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
CFLAGS_drm_trace_points.o := -I$(src)
obj-$(CONFIG_DRM) += drm.o
+obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_USB) += drm_usb.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
@@ -56,6 +58,8 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_DRM_TILCDC) += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
+obj-$(CONFIG_DRM_BOCHS) += bochs/
obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
+obj-y += panel/
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
index 40d371521fe1..50ae88ad4d76 100644
--- a/drivers/gpu/drm/armada/Kconfig
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -5,6 +5,7 @@ config DRM_ARMADA
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
help
Support the "LCD" controllers found on the Marvell Armada 510
devices. There are two controllers on the device, each controller
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 62d0ff3efddf..32982da82694 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev,
{
struct armada_private *priv = dev->dev_private;
- /*
- * Yes, we really must jump through these hoops just to store a
- * _pointer_ to something into the kfifo. This is utterly insane
- * and idiotic, because it kfifo requires the _data_ pointed to by
- * the pointer const, not the pointer itself. Not only that, but
- * you have to pass a pointer _to_ the pointer you want stored.
- */
- const struct drm_framebuffer *silly_api_alert = fb;
- WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+ WARN_ON(!kfifo_put(&priv->fb_unref, fb));
schedule_work(&priv->fb_unref_work);
}
@@ -128,6 +120,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
return -ENOMEM;
}
+ platform_set_drvdata(dev->platformdev, dev);
dev->dev_private = priv;
/* Get the implementation specific driver data. */
@@ -381,7 +374,7 @@ static int armada_drm_probe(struct platform_device *pdev)
static int armada_drm_remove(struct platform_device *pdev)
{
- drm_platform_exit(&armada_drm_driver, pdev);
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 7b33e14e44aa..a28640f47c27 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!in_interrupt())
+ if (drm_can_sleep())
ret = ast_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index af0b868a9dfd..50535fd5a88d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -189,53 +189,6 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp)
-{
- struct ast_private *ast = dev->dev_private;
- uint32_t dclk, jreg;
- uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500;
-
- dram_bus_width = ast->dram_bus_width;
- mclk = ast->mclk;
-
- if (ast->chip == AST2100 ||
- ast->chip == AST1100 ||
- ast->chip == AST2200 ||
- ast->chip == AST2150 ||
- ast->dram_bus_width == 16)
- dram_efficency = 600;
- else if (ast->chip == AST2300)
- dram_efficency = 400;
-
- dram_bandwidth = mclk * dram_bus_width * 2 / 8;
- actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000;
-
- if (ast->chip == AST1180)
- dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
- else {
- jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
- if ((jreg & 0x08) && (ast->chip == AST2000))
- dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8);
- else if ((jreg & 0x08) && (bpp == 8))
- dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8);
- else
- dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
- }
-
- if (ast->chip == AST2100 ||
- ast->chip == AST2200 ||
- ast->chip == AST2300 ||
- ast->chip == AST1180) {
- if (dclk > 200)
- dclk = 200;
- } else {
- if (dclk > 165)
- dclk = 165;
- }
-
- return dclk;
-}
-
static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
@@ -449,7 +402,7 @@ int ast_dumb_create(struct drm_file *file,
return 0;
}
-void ast_bo_unref(struct ast_bo **bo)
+static void ast_bo_unref(struct ast_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 7fc9f7272b56..cca063b11083 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -404,7 +404,7 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
}
}
-void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
+static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
struct ast_private *ast = dev->dev_private;
@@ -415,7 +415,7 @@ void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
}
-bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
switch (crtc->fb->bits_per_pixel) {
@@ -427,7 +427,7 @@ bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
return true;
}
-void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
{
struct ast_private *ast = crtc->dev->dev_private;
u32 addr;
@@ -623,7 +623,7 @@ static const struct drm_crtc_funcs ast_crtc_funcs = {
.destroy = ast_crtc_destroy,
};
-int ast_crtc_init(struct drm_device *dev)
+static int ast_crtc_init(struct drm_device *dev)
{
struct ast_crtc *crtc;
int i;
@@ -710,7 +710,7 @@ static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
.mode_set = ast_encoder_mode_set,
};
-int ast_encoder_init(struct drm_device *dev)
+static int ast_encoder_init(struct drm_device *dev)
{
struct ast_encoder *ast_encoder;
@@ -777,7 +777,7 @@ static const struct drm_connector_funcs ast_connector_funcs = {
.destroy = ast_connector_destroy,
};
-int ast_connector_init(struct drm_device *dev)
+static int ast_connector_init(struct drm_device *dev)
{
struct ast_connector *ast_connector;
struct drm_connector *connector;
@@ -810,7 +810,7 @@ int ast_connector_init(struct drm_device *dev)
}
/* allocate cursor cache and pin at start of VRAM */
-int ast_cursor_init(struct drm_device *dev)
+static int ast_cursor_init(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
int size;
@@ -847,7 +847,7 @@ fail:
return ret;
}
-void ast_cursor_fini(struct drm_device *dev)
+static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
ttm_bo_kunmap(&ast->cache_kmap);
@@ -965,7 +965,7 @@ static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
kfree(i2c);
}
-void ast_show_cursor(struct drm_crtc *crtc)
+static void ast_show_cursor(struct drm_crtc *crtc)
{
struct ast_private *ast = crtc->dev->dev_private;
u8 jreg;
@@ -976,7 +976,7 @@ void ast_show_cursor(struct drm_crtc *crtc)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
}
-void ast_hide_cursor(struct drm_crtc *crtc)
+static void ast_hide_cursor(struct drm_crtc *crtc)
{
struct ast_private *ast = crtc->dev->dev_private;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 32aecb34dbce..4ea9b17ac17a 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -80,7 +80,7 @@ static int ast_ttm_global_init(struct ast_private *ast)
return 0;
}
-void
+static void
ast_ttm_global_release(struct ast_private *ast)
{
if (ast->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@ static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
+static bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &ast_bo_ttm_destroy)
return true;
@@ -208,7 +208,7 @@ static struct ttm_backend_func ast_tt_backend_func = {
};
-struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
new file mode 100644
index 000000000000..5f8b0c2b9a44
--- /dev/null
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -0,0 +1,12 @@
+config DRM_BOCHS
+ tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
+ depends on DRM && PCI
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_TTM
+ help
+ Choose this option for qemu.
+ If M is selected the module will be called bochs-drm.
diff --git a/drivers/gpu/drm/bochs/Makefile b/drivers/gpu/drm/bochs/Makefile
new file mode 100644
index 000000000000..844a55614920
--- /dev/null
+++ b/drivers/gpu/drm/bochs/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_fbdev.o bochs_hw.o
+
+obj-$(CONFIG_DRM_BOCHS) += bochs-drm.o
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
new file mode 100644
index 000000000000..741965c001a6
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -0,0 +1,164 @@
+#include <linux/io.h>
+#include <linux/fb.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_page_alloc.h>
+
+/* ---------------------------------------------------------------------- */
+
+#define VBE_DISPI_IOPORT_INDEX 0x01CE
+#define VBE_DISPI_IOPORT_DATA 0x01CF
+
+#define VBE_DISPI_INDEX_ID 0x0
+#define VBE_DISPI_INDEX_XRES 0x1
+#define VBE_DISPI_INDEX_YRES 0x2
+#define VBE_DISPI_INDEX_BPP 0x3
+#define VBE_DISPI_INDEX_ENABLE 0x4
+#define VBE_DISPI_INDEX_BANK 0x5
+#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
+#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
+#define VBE_DISPI_INDEX_X_OFFSET 0x8
+#define VBE_DISPI_INDEX_Y_OFFSET 0x9
+#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa
+
+#define VBE_DISPI_ID0 0xB0C0
+#define VBE_DISPI_ID1 0xB0C1
+#define VBE_DISPI_ID2 0xB0C2
+#define VBE_DISPI_ID3 0xB0C3
+#define VBE_DISPI_ID4 0xB0C4
+#define VBE_DISPI_ID5 0xB0C5
+
+#define VBE_DISPI_DISABLED 0x00
+#define VBE_DISPI_ENABLED 0x01
+#define VBE_DISPI_GETCAPS 0x02
+#define VBE_DISPI_8BIT_DAC 0x20
+#define VBE_DISPI_LFB_ENABLED 0x40
+#define VBE_DISPI_NOCLEARMEM 0x80
+
+/* ---------------------------------------------------------------------- */
+
+enum bochs_types {
+ BOCHS_QEMU_STDVGA,
+ BOCHS_UNKNOWN,
+};
+
+struct bochs_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct bochs_device {
+ /* hw */
+ void __iomem *mmio;
+ int ioports;
+ void __iomem *fb_map;
+ unsigned long fb_base;
+ unsigned long fb_size;
+
+ /* mode */
+ u16 xres;
+ u16 yres;
+ u16 yres_virtual;
+ u32 stride;
+ u32 bpp;
+
+ /* drm */
+ struct drm_device *dev;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ bool mode_config_initialized;
+
+ /* ttm */
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ bool initialized;
+ } ttm;
+
+ /* fbdev */
+ struct {
+ struct bochs_framebuffer gfb;
+ struct drm_fb_helper helper;
+ int size;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
+ bool initialized;
+ } fb;
+};
+
+#define to_bochs_framebuffer(x) container_of(x, struct bochs_framebuffer, base)
+
+struct bochs_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+
+static inline struct bochs_bo *bochs_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct bochs_bo, bo);
+}
+
+static inline struct bochs_bo *gem_to_bochs_bo(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct bochs_bo, gem);
+}
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* bochs_hw.c */
+int bochs_hw_init(struct drm_device *dev, uint32_t flags);
+void bochs_hw_fini(struct drm_device *dev);
+
+void bochs_hw_setmode(struct bochs_device *bochs,
+ struct drm_display_mode *mode);
+void bochs_hw_setbase(struct bochs_device *bochs,
+ int x, int y, u64 addr);
+
+/* bochs_mm.c */
+int bochs_mm_init(struct bochs_device *bochs);
+void bochs_mm_fini(struct bochs_device *bochs);
+int bochs_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int bochs_gem_init_object(struct drm_gem_object *obj);
+void bochs_gem_free_object(struct drm_gem_object *obj);
+int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+
+int bochs_framebuffer_init(struct drm_device *dev,
+ struct bochs_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int bochs_bo_unpin(struct bochs_bo *bo);
+
+extern const struct drm_mode_config_funcs bochs_mode_funcs;
+
+/* bochs_kms.c */
+int bochs_kms_init(struct bochs_device *bochs);
+void bochs_kms_fini(struct bochs_device *bochs);
+
+/* bochs_fbdev.c */
+int bochs_fbdev_init(struct bochs_device *bochs);
+void bochs_fbdev_fini(struct bochs_device *bochs);
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
new file mode 100644
index 000000000000..395bba261c9a
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -0,0 +1,178 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "bochs.h"
+
+static bool enable_fbdev = true;
+module_param_named(fbdev, enable_fbdev, bool, 0444);
+MODULE_PARM_DESC(fbdev, "register fbdev device");
+
+/* ---------------------------------------------------------------------- */
+/* drm interface */
+
+static int bochs_unload(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+
+ bochs_fbdev_fini(bochs);
+ bochs_kms_fini(bochs);
+ bochs_mm_fini(bochs);
+ bochs_hw_fini(dev);
+ kfree(bochs);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+static int bochs_load(struct drm_device *dev, unsigned long flags)
+{
+ struct bochs_device *bochs;
+ int ret;
+
+ bochs = kzalloc(sizeof(*bochs), GFP_KERNEL);
+ if (bochs == NULL)
+ return -ENOMEM;
+ dev->dev_private = bochs;
+ bochs->dev = dev;
+
+ ret = bochs_hw_init(dev, flags);
+ if (ret)
+ goto err;
+
+ ret = bochs_mm_init(bochs);
+ if (ret)
+ goto err;
+
+ ret = bochs_kms_init(bochs);
+ if (ret)
+ goto err;
+
+ if (enable_fbdev)
+ bochs_fbdev_init(bochs);
+
+ return 0;
+
+err:
+ bochs_unload(dev);
+ return ret;
+}
+
+static const struct file_operations bochs_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = bochs_mmap,
+};
+
+static struct drm_driver bochs_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET,
+ .load = bochs_load,
+ .unload = bochs_unload,
+ .fops = &bochs_fops,
+ .name = "bochs-drm",
+ .desc = "bochs dispi vga interface (qemu stdvga)",
+ .date = "20130925",
+ .major = 1,
+ .minor = 0,
+ .gem_free_object = bochs_gem_free_object,
+ .dumb_create = bochs_dumb_create,
+ .dumb_map_offset = bochs_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+};
+
+/* ---------------------------------------------------------------------- */
+/* pci interface */
+
+static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+ remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
+ kfree(ap);
+
+ return 0;
+}
+
+static int bochs_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = bochs_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
+
+ return drm_get_pci_dev(pdev, ent, &bochs_driver);
+}
+
+static void bochs_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(bochs_pci_tbl) = {
+ {
+ .vendor = 0x1234,
+ .device = 0x1111,
+ .subvendor = 0x1af4,
+ .subdevice = 0x1100,
+ .driver_data = BOCHS_QEMU_STDVGA,
+ },
+ {
+ .vendor = 0x1234,
+ .device = 0x1111,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = BOCHS_UNKNOWN,
+ },
+ { /* end of list */ }
+};
+
+static struct pci_driver bochs_pci_driver = {
+ .name = "bochs-drm",
+ .id_table = bochs_pci_tbl,
+ .probe = bochs_pci_probe,
+ .remove = bochs_pci_remove,
+};
+
+/* ---------------------------------------------------------------------- */
+/* module init/exit */
+
+static int __init bochs_init(void)
+{
+ return drm_pci_init(&bochs_driver, &bochs_pci_driver);
+}
+
+static void __exit bochs_exit(void)
+{
+ drm_pci_exit(&bochs_driver, &bochs_pci_driver);
+}
+
+module_init(bochs_init);
+module_exit(bochs_exit);
+
+MODULE_DEVICE_TABLE(pci, bochs_pci_tbl);
+MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
new file mode 100644
index 000000000000..4da5206b7cc9
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -0,0 +1,215 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static struct fb_ops bochsfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int bochsfb_create_object(struct bochs_device *bochs,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = bochs->dev;
+ struct drm_gem_object *gobj;
+ u32 size;
+ int ret = 0;
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = bochs_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int bochsfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct bochs_device *bochs =
+ container_of(helper, struct bochs_device, fb.helper);
+ struct drm_device *dev = bochs->dev;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ struct drm_gem_object *gobj = NULL;
+ struct bochs_bo *bo = NULL;
+ int size, ret;
+
+ if (sizes->surface_bpp != 32)
+ return -EINVAL;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ /* alloc, pin & map bo */
+ ret = bochsfb_create_object(bochs, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ bo = gem_to_bochs_bo(gobj);
+
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ if (ret)
+ return ret;
+
+ ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin fbcon\n");
+ ttm_bo_unreserve(&bo->bo);
+ return ret;
+ }
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages,
+ &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fbcon\n");
+ ttm_bo_unreserve(&bo->bo);
+ return ret;
+ }
+
+ ttm_bo_unreserve(&bo->bo);
+
+ /* init fb device */
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = &bochs->fb.helper;
+
+ ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ bochs->fb.size = size;
+
+ /* setup helper */
+ fb = &bochs->fb.gfb.base;
+ bochs->fb.helper.fb = fb;
+ bochs->fb.helper.fbdev = info;
+
+ strcpy(info->fix.id, "bochsdrmfb");
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &bochsfb_ops;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = bo->kmap.virtual;
+ info->screen_size = size;
+
+#if 0
+ /* FIXME: get this right for mmap(/dev/fb0) */
+ info->fix.smem_start = bochs_bo_mmap_offset(bo);
+ info->fix.smem_len = size;
+#endif
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int bochs_fbdev_destroy(struct bochs_device *bochs)
+{
+ struct bochs_framebuffer *gfb = &bochs->fb.gfb;
+ struct fb_info *info;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (bochs->fb.helper.fbdev) {
+ info = bochs->fb.helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (gfb->obj) {
+ drm_gem_object_unreference_unlocked(gfb->obj);
+ gfb->obj = NULL;
+ }
+
+ drm_fb_helper_fini(&bochs->fb.helper);
+ drm_framebuffer_unregister_private(&gfb->base);
+ drm_framebuffer_cleanup(&gfb->base);
+
+ return 0;
+}
+
+void bochs_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+}
+
+void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ *red = regno;
+ *green = regno;
+ *blue = regno;
+}
+
+static struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
+ .gamma_set = bochs_fb_gamma_set,
+ .gamma_get = bochs_fb_gamma_get,
+ .fb_probe = bochsfb_create,
+};
+
+int bochs_fbdev_init(struct bochs_device *bochs)
+{
+ int ret;
+
+ bochs->fb.helper.funcs = &bochs_fb_helper_funcs;
+ spin_lock_init(&bochs->fb.dirty_lock);
+
+ ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
+ 1, 1);
+ if (ret)
+ return ret;
+
+ drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
+ drm_helper_disable_unused_functions(bochs->dev);
+ drm_fb_helper_initial_config(&bochs->fb.helper, 32);
+
+ bochs->fb.initialized = true;
+ return 0;
+}
+
+void bochs_fbdev_fini(struct bochs_device *bochs)
+{
+ if (!bochs->fb.initialized)
+ return;
+
+ bochs_fbdev_destroy(bochs);
+ bochs->fb.initialized = false;
+}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
new file mode 100644
index 000000000000..dbe619e6aab4
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -0,0 +1,177 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val)
+{
+ if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
+ return;
+
+ if (bochs->mmio) {
+ int offset = ioport - 0x3c0 + 0x400;
+ writeb(val, bochs->mmio + offset);
+ } else {
+ outb(val, ioport);
+ }
+}
+
+static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg)
+{
+ u16 ret = 0;
+
+ if (bochs->mmio) {
+ int offset = 0x500 + (reg << 1);
+ ret = readw(bochs->mmio + offset);
+ } else {
+ outw(reg, VBE_DISPI_IOPORT_INDEX);
+ ret = inw(VBE_DISPI_IOPORT_DATA);
+ }
+ return ret;
+}
+
+static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val)
+{
+ if (bochs->mmio) {
+ int offset = 0x500 + (reg << 1);
+ writew(val, bochs->mmio + offset);
+ } else {
+ outw(reg, VBE_DISPI_IOPORT_INDEX);
+ outw(val, VBE_DISPI_IOPORT_DATA);
+ }
+}
+
+int bochs_hw_init(struct drm_device *dev, uint32_t flags)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ unsigned long addr, size, mem, ioaddr, iosize;
+ u16 id;
+
+ if (/* (ent->driver_data == BOCHS_QEMU_STDVGA) && */
+ (pdev->resource[2].flags & IORESOURCE_MEM)) {
+ /* mmio bar with vga and bochs registers present */
+ if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
+ DRM_ERROR("Cannot request mmio region\n");
+ return -EBUSY;
+ }
+ ioaddr = pci_resource_start(pdev, 2);
+ iosize = pci_resource_len(pdev, 2);
+ bochs->mmio = ioremap(ioaddr, iosize);
+ if (bochs->mmio == NULL) {
+ DRM_ERROR("Cannot map mmio region\n");
+ return -ENOMEM;
+ }
+ } else {
+ ioaddr = VBE_DISPI_IOPORT_INDEX;
+ iosize = 2;
+ if (!request_region(ioaddr, iosize, "bochs-drm")) {
+ DRM_ERROR("Cannot request ioports\n");
+ return -EBUSY;
+ }
+ bochs->ioports = 1;
+ }
+
+ id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID);
+ mem = bochs_dispi_read(bochs, VBE_DISPI_INDEX_VIDEO_MEMORY_64K)
+ * 64 * 1024;
+ if ((id & 0xfff0) != VBE_DISPI_ID0) {
+ DRM_ERROR("ID mismatch\n");
+ return -ENODEV;
+ }
+
+ if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0)
+ return -ENODEV;
+ addr = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+ if (addr == 0)
+ return -ENODEV;
+ if (size != mem) {
+ DRM_ERROR("Size mismatch: pci=%ld, bochs=%ld\n",
+ size, mem);
+ size = min(size, mem);
+ }
+
+ if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
+ DRM_ERROR("Cannot request framebuffer\n");
+ return -EBUSY;
+ }
+
+ bochs->fb_map = ioremap(addr, size);
+ if (bochs->fb_map == NULL) {
+ DRM_ERROR("Cannot map framebuffer\n");
+ return -ENOMEM;
+ }
+ bochs->fb_base = addr;
+ bochs->fb_size = size;
+
+ DRM_INFO("Found bochs VGA, ID 0x%x.\n", id);
+ DRM_INFO("Framebuffer size %ld kB @ 0x%lx, %s @ 0x%lx.\n",
+ size / 1024, addr,
+ bochs->ioports ? "ioports" : "mmio",
+ ioaddr);
+ return 0;
+}
+
+void bochs_hw_fini(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+
+ if (bochs->mmio)
+ iounmap(bochs->mmio);
+ if (bochs->ioports)
+ release_region(VBE_DISPI_IOPORT_INDEX, 2);
+ if (bochs->fb_map)
+ iounmap(bochs->fb_map);
+ pci_release_regions(dev->pdev);
+}
+
+void bochs_hw_setmode(struct bochs_device *bochs,
+ struct drm_display_mode *mode)
+{
+ bochs->xres = mode->hdisplay;
+ bochs->yres = mode->vdisplay;
+ bochs->bpp = 32;
+ bochs->stride = mode->hdisplay * (bochs->bpp / 8);
+ bochs->yres_virtual = bochs->fb_size / bochs->stride;
+
+ DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n",
+ bochs->xres, bochs->yres, bochs->bpp,
+ bochs->yres_virtual);
+
+ bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
+
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK, 0);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, bochs->xres);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT,
+ bochs->yres_virtual);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, 0);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, 0);
+
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
+ VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
+}
+
+void bochs_hw_setbase(struct bochs_device *bochs,
+ int x, int y, u64 addr)
+{
+ unsigned long offset = (unsigned long)addr +
+ y * bochs->stride +
+ x * (bochs->bpp / 8);
+ int vy = offset / bochs->stride;
+ int vx = (offset % bochs->stride) * 8 / bochs->bpp;
+
+ DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n",
+ x, y, addr, offset, vx, vy);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
+}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
new file mode 100644
index 000000000000..62ec7d4b3816
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -0,0 +1,294 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+static int defx = 1024;
+static int defy = 768;
+
+module_param(defx, int, 0444);
+module_param(defy, int, 0444);
+MODULE_PARM_DESC(defx, "default x resolution");
+MODULE_PARM_DESC(defy, "default y resolution");
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ default:
+ return;
+ }
+}
+
+static bool bochs_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct bochs_device *bochs =
+ container_of(crtc, struct bochs_device, crtc);
+ struct bochs_framebuffer *bochs_fb;
+ struct bochs_bo *bo;
+ u64 gpu_addr = 0;
+ int ret;
+
+ if (old_fb) {
+ bochs_fb = to_bochs_framebuffer(old_fb);
+ bo = gem_to_bochs_bo(bochs_fb->obj);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ if (ret) {
+ DRM_ERROR("failed to reserve old_fb bo\n");
+ } else {
+ bochs_bo_unpin(bo);
+ ttm_bo_unreserve(&bo->bo);
+ }
+ }
+
+ if (WARN_ON(crtc->fb == NULL))
+ return -EINVAL;
+
+ bochs_fb = to_bochs_framebuffer(crtc->fb);
+ bo = gem_to_bochs_bo(bochs_fb->obj);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ if (ret)
+ return ret;
+
+ ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ ttm_bo_unreserve(&bo->bo);
+ return ret;
+ }
+
+ ttm_bo_unreserve(&bo->bo);
+ bochs_hw_setbase(bochs, x, y, gpu_addr);
+ return 0;
+}
+
+static int bochs_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct bochs_device *bochs =
+ container_of(crtc, struct bochs_device, crtc);
+
+ bochs_hw_setmode(bochs, mode);
+ bochs_crtc_mode_set_base(crtc, x, y, old_fb);
+ return 0;
+}
+
+static void bochs_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs bochs_crtc_funcs = {
+ .gamma_set = bochs_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
+ .dpms = bochs_crtc_dpms,
+ .mode_fixup = bochs_crtc_mode_fixup,
+ .mode_set = bochs_crtc_mode_set,
+ .mode_set_base = bochs_crtc_mode_set_base,
+ .prepare = bochs_crtc_prepare,
+ .commit = bochs_crtc_commit,
+ .load_lut = bochs_crtc_load_lut,
+};
+
+static void bochs_crtc_init(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct drm_crtc *crtc = &bochs->crtc;
+
+ drm_crtc_init(dev, crtc, &bochs_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+ drm_crtc_helper_add(crtc, &bochs_helper_funcs);
+}
+
+static bool bochs_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void bochs_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void bochs_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+}
+
+static void bochs_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void bochs_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = {
+ .dpms = bochs_encoder_dpms,
+ .mode_fixup = bochs_encoder_mode_fixup,
+ .mode_set = bochs_encoder_mode_set,
+ .prepare = bochs_encoder_prepare,
+ .commit = bochs_encoder_commit,
+};
+
+static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void bochs_encoder_init(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct drm_encoder *encoder = &bochs->encoder;
+
+ encoder->possible_crtcs = 0x1;
+ drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs);
+}
+
+
+int bochs_connector_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, 8192, 8192);
+ drm_set_preferred_mode(connector, defx, defy);
+ return count;
+}
+
+static int bochs_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct bochs_device *bochs =
+ container_of(connector, struct bochs_device, connector);
+ unsigned long size = mode->hdisplay * mode->vdisplay * 4;
+
+ /*
+ * Make sure we can fit two framebuffers into video memory.
+ * This allows up to 1600x1200 with 16 MB (default size).
+ * If you want more try this:
+ * 'qemu -vga std -global VGA.vgamem_mb=32 $otherargs'
+ */
+ if (size * 2 > bochs->fb_size)
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+bochs_connector_best_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status bochs_connector_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
+ .get_modes = bochs_connector_get_modes,
+ .mode_valid = bochs_connector_mode_valid,
+ .best_encoder = bochs_connector_best_encoder,
+};
+
+struct drm_connector_funcs bochs_connector_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = bochs_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+};
+
+static void bochs_connector_init(struct drm_device *dev)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct drm_connector *connector = &bochs->connector;
+
+ drm_connector_init(dev, connector, &bochs_connector_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ drm_connector_helper_add(connector,
+ &bochs_connector_connector_helper_funcs);
+}
+
+
+int bochs_kms_init(struct bochs_device *bochs)
+{
+ drm_mode_config_init(bochs->dev);
+ bochs->mode_config_initialized = true;
+
+ bochs->dev->mode_config.max_width = 8192;
+ bochs->dev->mode_config.max_height = 8192;
+
+ bochs->dev->mode_config.fb_base = bochs->fb_base;
+ bochs->dev->mode_config.preferred_depth = 24;
+ bochs->dev->mode_config.prefer_shadow = 0;
+
+ bochs->dev->mode_config.funcs = (void *)&bochs_mode_funcs;
+
+ bochs_crtc_init(bochs->dev);
+ bochs_encoder_init(bochs->dev);
+ bochs_connector_init(bochs->dev);
+ drm_mode_connector_attach_encoder(&bochs->connector,
+ &bochs->encoder);
+
+ return 0;
+}
+
+void bochs_kms_fini(struct bochs_device *bochs)
+{
+ if (bochs->mode_config_initialized) {
+ drm_mode_config_cleanup(bochs->dev);
+ bochs->mode_config_initialized = false;
+ }
+}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
new file mode 100644
index 000000000000..ce6858765b37
--- /dev/null
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -0,0 +1,546 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+static void bochs_ttm_placement(struct bochs_bo *bo, int domain);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct bochs_device, ttm.bdev);
+}
+
+static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int bochs_ttm_global_init(struct bochs_device *bochs)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &bochs->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &bochs_ttm_mem_global_init;
+ global_ref->release = &bochs_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ bochs->ttm.bo_global_ref.mem_glob =
+ bochs->ttm.mem_global_ref.object;
+ global_ref = &bochs->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&bochs->ttm.mem_global_ref);
+ return r;
+ }
+
+ return 0;
+}
+
+static void bochs_ttm_global_release(struct bochs_device *bochs)
+{
+ if (bochs->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&bochs->ttm.mem_global_ref);
+ bochs->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct bochs_bo *bo;
+
+ bo = container_of(tbo, struct bochs_bo, bo);
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool bochs_ttm_bo_is_bochs_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &bochs_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int bochs_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+bochs_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct bochs_bo *bochsbo = bochs_bo(bo);
+
+ if (!bochs_ttm_bo_is_bochs_bo(bo))
+ return;
+
+ bochs_ttm_placement(bochsbo, TTM_PL_FLAG_SYSTEM);
+ *pl = bochsbo->placement;
+}
+
+static int bochs_bo_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ struct bochs_bo *bochsbo = bochs_bo(bo);
+
+ return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp);
+}
+
+static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct bochs_device *bochs = bochs_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = bochs->fb_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+}
+
+static int bochs_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+}
+
+
+static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func bochs_tt_backend_func = {
+ .destroy = &bochs_ttm_backend_destroy,
+};
+
+static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &bochs_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+struct ttm_bo_driver bochs_bo_driver = {
+ .ttm_tt_create = bochs_ttm_tt_create,
+ .ttm_tt_populate = ttm_pool_populate,
+ .ttm_tt_unpopulate = ttm_pool_unpopulate,
+ .init_mem_type = bochs_bo_init_mem_type,
+ .evict_flags = bochs_bo_evict_flags,
+ .move = bochs_bo_move,
+ .verify_access = bochs_bo_verify_access,
+ .io_mem_reserve = &bochs_ttm_io_mem_reserve,
+ .io_mem_free = &bochs_ttm_io_mem_free,
+};
+
+int bochs_mm_init(struct bochs_device *bochs)
+{
+ struct ttm_bo_device *bdev = &bochs->ttm.bdev;
+ int ret;
+
+ ret = bochs_ttm_global_init(bochs);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&bochs->ttm.bdev,
+ bochs->ttm.bo_global_ref.ref.object,
+ &bochs_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ bochs->fb_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ bochs->ttm.initialized = true;
+ return 0;
+}
+
+void bochs_mm_fini(struct bochs_device *bochs)
+{
+ if (!bochs->ttm.initialized)
+ return;
+
+ ttm_bo_device_release(&bochs->ttm.bdev);
+ bochs_ttm_global_release(bochs);
+ bochs->ttm.initialized = false;
+}
+
+static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM) {
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED
+ | TTM_PL_FLAG_VRAM;
+ }
+ if (domain & TTM_PL_FLAG_SYSTEM) {
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ }
+ if (!c) {
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ }
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = bochs_bo_gpu_offset(bo);
+ return 0;
+ }
+
+ bochs_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = bochs_bo_gpu_offset(bo);
+ return 0;
+}
+
+int bochs_bo_unpin(struct bochs_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct bochs_device *bochs;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ bochs = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int bochs_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct bochs_bo **pbochsbo)
+{
+ struct bochs_device *bochs = dev->dev_private;
+ struct bochs_bo *bochsbo;
+ size_t acc_size;
+ int ret;
+
+ bochsbo = kzalloc(sizeof(struct bochs_bo), GFP_KERNEL);
+ if (!bochsbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &bochsbo->gem, size);
+ if (ret) {
+ kfree(bochsbo);
+ return ret;
+ }
+
+ bochsbo->bo.bdev = &bochs->ttm.bdev;
+ bochsbo->bo.bdev->dev_mapping = dev->dev_mapping;
+
+ bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size,
+ sizeof(struct bochs_bo));
+
+ ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size,
+ ttm_bo_type_device, &bochsbo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, bochs_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pbochsbo = bochsbo;
+ return 0;
+}
+
+int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct bochs_bo *bochsbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = ALIGN(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = bochs_bo_create(dev, size, 0, 0, &bochsbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &bochsbo->gem;
+ return 0;
+}
+
+int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gobj;
+ u32 handle;
+ int ret;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = bochs_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+static void bochs_bo_unref(struct bochs_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+
+void bochs_gem_free_object(struct drm_gem_object *obj)
+{
+ struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj);
+
+ if (!bochs_bo)
+ return;
+ bochs_bo_unref(&bochs_bo);
+}
+
+int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct bochs_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_bochs_bo(obj);
+ *offset = bochs_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
+ if (bochs_fb->obj)
+ drm_gem_object_unreference_unlocked(bochs_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static const struct drm_framebuffer_funcs bochs_fb_funcs = {
+ .destroy = bochs_user_framebuffer_destroy,
+};
+
+int bochs_framebuffer_init(struct drm_device *dev,
+ struct bochs_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static struct drm_framebuffer *
+bochs_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct bochs_framebuffer *bochs_fb;
+ int ret;
+
+ DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
+ mode_cmd->width, mode_cmd->height,
+ (mode_cmd->pixel_format) & 0xff,
+ (mode_cmd->pixel_format >> 8) & 0xff,
+ (mode_cmd->pixel_format >> 16) & 0xff,
+ (mode_cmd->pixel_format >> 24) & 0xff);
+
+ if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
+ return ERR_PTR(-ENOENT);
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ bochs_fb = kzalloc(sizeof(*bochs_fb), GFP_KERNEL);
+ if (!bochs_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = bochs_framebuffer_init(dev, bochs_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(bochs_fb);
+ return ERR_PTR(ret);
+ }
+ return &bochs_fb->base;
+}
+
+const struct drm_mode_config_funcs bochs_mode_funcs = {
+ .fb_create = bochs_user_framebuffer_create,
+};
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index b6aded73838b..117d3eca5e37 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -222,7 +222,7 @@ void cirrus_fbdev_fini(struct cirrus_device *cdev);
void cirrus_driver_irq_preinstall(struct drm_device *dev);
int cirrus_driver_irq_postinstall(struct drm_device *dev);
void cirrus_driver_irq_uninstall(struct drm_device *dev);
-irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t cirrus_driver_irq_handler(int irq, void *arg);
/* cirrus_kms.c */
int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index b27e95666fab..32bbba0a787b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!in_interrupt())
+ if (drm_can_sleep())
ret = cirrus_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
@@ -233,6 +233,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = cdev->mc.vram_size;
+ info->fix.smem_start = cdev->dev->mode_config.fb_base;
+ info->fix.smem_len = cdev->mc.vram_size;
+
info->screen_base = sysram;
info->screen_size = size;
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 78e76f24343d..4b0170cf53fd 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,7 +255,7 @@ int cirrus_dumb_create(struct drm_file *file,
return 0;
}
-void cirrus_bo_unref(struct cirrus_bo **bo)
+static void cirrus_bo_unref(struct cirrus_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index adabc3daaa5b..530f78f84dee 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -102,7 +102,7 @@ static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
+static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
{
struct cirrus_device *cdev = crtc->dev->dev_private;
u32 addr;
@@ -273,8 +273,8 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
sr07 |= 0x11;
break;
case 16:
- sr07 |= 0xc1;
- hdr = 0xc0;
+ sr07 |= 0x17;
+ hdr = 0xc1;
break;
case 24:
sr07 |= 0x15;
@@ -453,7 +453,7 @@ static void cirrus_encoder_commit(struct drm_encoder *encoder)
{
}
-void cirrus_encoder_destroy(struct drm_encoder *encoder)
+static void cirrus_encoder_destroy(struct drm_encoder *encoder)
{
struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
drm_encoder_cleanup(encoder);
@@ -492,7 +492,7 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
}
-int cirrus_vga_get_modes(struct drm_connector *connector)
+static int cirrus_vga_get_modes(struct drm_connector *connector)
{
int count;
@@ -509,7 +509,7 @@ static int cirrus_vga_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
+static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
*connector)
{
int enc_id = connector->encoder_ids[0];
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 75becdeac07d..8b37c25ff9bd 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -80,7 +80,7 @@ static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
return 0;
}
-void
+static void
cirrus_ttm_global_release(struct cirrus_device *cirrus)
{
if (cirrus->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@ static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
+static bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &cirrus_bo_ttm_destroy)
return true;
@@ -208,7 +208,7 @@ static struct ttm_backend_func cirrus_tt_backend_func = {
};
-struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
@@ -375,26 +375,6 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
return 0;
}
-int cirrus_bo_unpin(struct cirrus_bo *bo)
-{
- int i, ret;
- if (!bo->pin_count) {
- DRM_ERROR("unpin bad %p\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
-
- for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
- if (ret)
- return ret;
-
- return 0;
-}
-
int cirrus_bo_push_sysram(struct cirrus_bo *bo)
{
int i, ret;
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index e301d653d97e..dde205cef384 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -53,7 +53,7 @@
*/
int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
- DRM_AGP_KERN *kern;
+ struct agp_kern_info *kern;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -198,17 +198,15 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
struct drm_agp_mem *entry;
- DRM_AGP_MEM *memory;
+ struct agp_memory *memory;
unsigned long pages;
u32 type;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
+ if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL)))
return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
-
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request->type;
if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
@@ -393,14 +391,16 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
* Gets the drm_agp_t structure which is made available by the agpgart module
* via the inter_module_* functions. Creates and initializes a drm_agp_head
* structure.
+ *
+ * Note that final cleanup of the kmalloced structure is directly done in
+ * drm_pci_agp_destroy.
*/
struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
struct drm_agp_head *head = NULL;
- if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
+ if (!(head = kzalloc(sizeof(*head), GFP_KERNEL)))
return NULL;
- memset((void *)head, 0, sizeof(*head));
head->bridge = agp_find_bridge(dev->pdev);
if (!head->bridge) {
if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
@@ -439,7 +439,7 @@ void drm_agp_clear(struct drm_device *dev)
{
struct drm_agp_mem *entry, *tempe;
- if (!drm_core_has_AGP(dev) || !dev->agp)
+ if (!dev->agp)
return;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
@@ -460,35 +460,20 @@ void drm_agp_clear(struct drm_device *dev)
}
/**
- * drm_agp_destroy - Destroy AGP head
- * @dev: DRM device
- *
- * Destroy resources that were previously allocated via drm_agp_initp. Caller
- * must ensure to clean up all AGP resources before calling this. See
- * drm_agp_clear().
- *
- * Call this to destroy AGP heads allocated via drm_agp_init().
- */
-void drm_agp_destroy(struct drm_agp_head *agp)
-{
- kfree(agp);
-}
-
-/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
* No reference is held on the pages during this time -- it is up to the
* caller to handle that.
*/
-DRM_AGP_MEM *
+struct agp_memory *
drm_agp_bind_pages(struct drm_device *dev,
struct page **pages,
unsigned long num_pages,
uint32_t gtt_offset,
u32 type)
{
- DRM_AGP_MEM *mem;
+ struct agp_memory *mem;
int ret, i;
DRM_DEBUG("\n");
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 39a718340319..0406110f83ed 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -114,7 +114,7 @@ int drm_buffer_copy_from_user(struct drm_buffer *buf,
for (idx = 0; idx < nr_pages; ++idx) {
- if (DRM_COPY_FROM_USER(buf->data[idx],
+ if (copy_from_user(buf->data[idx],
user_data + idx * PAGE_SIZE,
min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
DRM_ERROR("Failed to copy user data (%p) to drm buffer"
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 471e051d295e..edec31fe3fed 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -261,7 +261,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
struct drm_agp_mem *entry;
int valid = 0;
- if (!drm_core_has_AGP(dev)) {
+ if (!dev->agp) {
kfree(map);
return -EINVAL;
}
@@ -303,9 +303,6 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
break;
}
- case _DRM_GEM:
- DRM_ERROR("tried to addmap GEM object\n");
- break;
case _DRM_SCATTER_GATHER:
if (!dev->sg) {
kfree(map);
@@ -483,9 +480,6 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
}
kfree(map);
@@ -1396,7 +1390,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
spin_unlock(&dev->count_lock);
if (request->count >= dma->buf_count) {
- if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+ if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
|| (drm_core_check_feature(dev, DRIVER_SG)
&& (dma->flags & _DRM_DMA_USE_SG))) {
struct drm_local_map *map = dev->agp_buffer_map;
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index bb8f58012189..534cb89b160d 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -32,6 +32,12 @@
#include <drm/drmP.h>
#if defined(CONFIG_X86)
+
+/*
+ * clflushopt is an unordered instruction which needs fencing with mfence or
+ * sfence to avoid ordering issues. For drm_clflush_page this fencing happens
+ * in the caller.
+ */
static void
drm_clflush_page(struct page *page)
{
@@ -44,7 +50,7 @@ drm_clflush_page(struct page *page)
page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += size)
- clflush(page_virtual + i);
+ clflushopt(page_virtual + i);
kunmap_atomic(page_virtual);
}
@@ -133,7 +139,7 @@ drm_clflush_virt_range(char *addr, unsigned long length)
mb();
for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
clflush(addr);
- clflush(end - 1);
+ clflushopt(end - 1);
mb();
return;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d6cf77c472e7..3b7d32da1604 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -675,6 +675,29 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_cleanup);
/**
+ * drm_crtc_index - find the index of a registered CRTC
+ * @crtc: CRTC to find index for
+ *
+ * Given a registered CRTC, return the index of that CRTC within a DRM
+ * device's list of CRTCs.
+ */
+unsigned int drm_crtc_index(struct drm_crtc *crtc)
+{
+ unsigned int index = 0;
+ struct drm_crtc *tmp;
+
+ list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ return index;
+
+ index++;
+ }
+
+ BUG();
+}
+EXPORT_SYMBOL(drm_crtc_index);
+
+/**
* drm_mode_probed_add - add a mode to a connector's probed mode list
* @connector: connector the new mode
* @mode: mode data
@@ -2767,10 +2790,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (fb->funcs->dirty) {
- drm_modeset_lock_all(dev);
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
- drm_modeset_unlock_all(dev);
} else {
ret = -ENOSYS;
}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 01361aba033b..f7a81209beb3 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -324,35 +324,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
-/**
- * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
- * @encoder: encoder to test
- * @crtc: crtc to test
- *
- * Return false if @encoder can't be driven by @crtc, true otherwise.
- */
-static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
- struct drm_crtc *crtc)
-{
- struct drm_device *dev;
- struct drm_crtc *tmp;
- int crtc_mask = 1;
-
- WARN(!crtc, "checking null crtc?\n");
-
- dev = crtc->dev;
-
- list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
- if (tmp == crtc)
- break;
- crtc_mask <<= 1;
- }
-
- if (encoder->possible_crtcs & crtc_mask)
- return true;
- return false;
-}
-
/*
* Check the CRTC we're going to map each output to vs. its current
* CRTC. If they don't match, we have to disable the output and the CRTC
@@ -536,7 +507,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
- drm_calc_timestamping_constants(crtc);
+ drm_calc_timestamping_constants(crtc, &crtc->hwmode);
/* FIXME: add subpixel order */
done:
@@ -593,7 +564,7 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
* Caller must hold mode config lock.
*
* Setup a new configuration, provided by the upper layers (either an ioctl call
- * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * from userspace or internally e.g. from the fbdev support code) in @set, and
* enable it. This is the main helper functions for drivers that implement
* kernel mode setting with the crtc helper functions and the assorted
* ->prepare(), ->modeset() and ->commit() helper callbacks.
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index d9137e49c4e8..345be03c23db 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -315,9 +315,6 @@ long drm_ioctl(struct file *filp,
if (drm_device_is_unplugged(dev))
return -ENODEV;
- atomic_inc(&dev->ioctl_count);
- ++file_priv->ioctl_count;
-
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
goto err_i1;
@@ -410,7 +407,6 @@ long drm_ioctl(struct file *filp,
if (kdata != stack_kdata)
kfree(kdata);
- atomic_dec(&dev->ioctl_count);
if (retcode)
DRM_DEBUG("ret = %d\n", retcode);
return retcode;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 8835dcddfac3..b924306b8477 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -605,347 +605,347 @@ static const struct drm_display_mode edid_cea_modes[] = {
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 2 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 3 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 4 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 12 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 13 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 14 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 15 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 16 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 17 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 18 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 19 - 1280x720@50Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 27 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 28 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 29 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 30 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 31 - 1920x1080@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 32 - 1920x1080@24Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, },
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 33 - 1920x1080@25Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, },
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 34 - 1920x1080@30Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, },
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 35 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 36 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, },
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 37 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 38 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, },
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 41 - 1280x720@100Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 42 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 43 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 47 - 1280x720@120Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 48 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 49 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 52 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 200, },
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 53 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 200, },
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 200, },
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 200, },
+ .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 56 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 240, },
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 57 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 240, },
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 240, },
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 240, },
+ .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 60 - 1280x720@24Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, },
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 61 - 1280x720@25Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, },
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 62 - 1280x720@30Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, },
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 63 - 1920x1080@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, },
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, },
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
};
/*
@@ -2562,25 +2562,40 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
return modes;
}
-static int
-do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
+static struct drm_display_mode *
+drm_display_mode_from_vic_index(struct drm_connector *connector,
+ const u8 *video_db, u8 video_len,
+ u8 video_index)
{
struct drm_device *dev = connector->dev;
- const u8 *mode;
+ struct drm_display_mode *newmode;
u8 cea_mode;
- int modes = 0;
- for (mode = db; mode < db + len; mode++) {
- cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
- if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
- struct drm_display_mode *newmode;
- newmode = drm_mode_duplicate(dev,
- &edid_cea_modes[cea_mode]);
- if (newmode) {
- newmode->vrefresh = 0;
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
+ if (video_db == NULL || video_index >= video_len)
+ return NULL;
+
+ /* CEA modes are numbered 1..127 */
+ cea_mode = (video_db[video_index] & 127) - 1;
+ if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
+ return NULL;
+
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ newmode->vrefresh = 0;
+
+ return newmode;
+}
+
+static int
+do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
+{
+ int i, modes = 0;
+
+ for (i = 0; i < len; i++) {
+ struct drm_display_mode *mode;
+ mode = drm_display_mode_from_vic_index(connector, db, len, i);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
}
}
@@ -2674,21 +2689,13 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
const u8 *video_db, u8 video_len, u8 video_index)
{
- struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode;
int modes = 0;
- u8 cea_mode;
-
- if (video_db == NULL || video_index >= video_len)
- return 0;
-
- /* CEA modes are numbered 1..127 */
- cea_mode = (video_db[video_index] & 127) - 1;
- if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
- return 0;
if (structure & (1 << 0)) {
- newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ newmode = drm_display_mode_from_vic_index(connector, video_db,
+ video_len,
+ video_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
drm_mode_probed_add(connector, newmode);
@@ -2696,7 +2703,9 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
}
}
if (structure & (1 << 6)) {
- newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ newmode = drm_display_mode_from_vic_index(connector, video_db,
+ video_len,
+ video_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
drm_mode_probed_add(connector, newmode);
@@ -2704,7 +2713,9 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
}
}
if (structure & (1 << 8)) {
- newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+ newmode = drm_display_mode_from_vic_index(connector, video_db,
+ video_len,
+ video_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
drm_mode_probed_add(connector, newmode);
@@ -2728,7 +2739,7 @@ static int
do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
const u8 *video_db, u8 video_len)
{
- int modes = 0, offset = 0, i, multi_present = 0;
+ int modes = 0, offset = 0, i, multi_present = 0, multi_len;
u8 vic_len, hdmi_3d_len = 0;
u16 mask;
u16 structure_all;
@@ -2774,32 +2785,84 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
}
offset += 1 + vic_len;
- if (!(multi_present == 1 || multi_present == 2))
- goto out;
+ if (multi_present == 1)
+ multi_len = 2;
+ else if (multi_present == 2)
+ multi_len = 4;
+ else
+ multi_len = 0;
- if ((multi_present == 1 && len < (9 + offset)) ||
- (multi_present == 2 && len < (11 + offset)))
+ if (len < (8 + offset + hdmi_3d_len - 1))
goto out;
- if ((multi_present == 1 && hdmi_3d_len < 2) ||
- (multi_present == 2 && hdmi_3d_len < 4))
+ if (hdmi_3d_len < multi_len)
goto out;
- /* 3D_Structure_ALL */
- structure_all = (db[8 + offset] << 8) | db[9 + offset];
+ if (multi_present == 1 || multi_present == 2) {
+ /* 3D_Structure_ALL */
+ structure_all = (db[8 + offset] << 8) | db[9 + offset];
- /* check if 3D_MASK is present */
- if (multi_present == 2)
- mask = (db[10 + offset] << 8) | db[11 + offset];
- else
- mask = 0xffff;
-
- for (i = 0; i < 16; i++) {
- if (mask & (1 << i))
- modes += add_3d_struct_modes(connector,
- structure_all,
- video_db,
- video_len, i);
+ /* check if 3D_MASK is present */
+ if (multi_present == 2)
+ mask = (db[10 + offset] << 8) | db[11 + offset];
+ else
+ mask = 0xffff;
+
+ for (i = 0; i < 16; i++) {
+ if (mask & (1 << i))
+ modes += add_3d_struct_modes(connector,
+ structure_all,
+ video_db,
+ video_len, i);
+ }
+ }
+
+ offset += multi_len;
+
+ for (i = 0; i < (hdmi_3d_len - multi_len); i++) {
+ int vic_index;
+ struct drm_display_mode *newmode = NULL;
+ unsigned int newflag = 0;
+ bool detail_present;
+
+ detail_present = ((db[8 + offset + i] & 0x0f) > 7);
+
+ if (detail_present && (i + 1 == hdmi_3d_len - multi_len))
+ break;
+
+ /* 2D_VIC_order_X */
+ vic_index = db[8 + offset + i] >> 4;
+
+ /* 3D_Structure_X */
+ switch (db[8 + offset + i] & 0x0f) {
+ case 0:
+ newflag = DRM_MODE_FLAG_3D_FRAME_PACKING;
+ break;
+ case 6:
+ newflag = DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+ break;
+ case 8:
+ /* 3D_Detail_X */
+ if ((db[9 + offset + i] >> 4) == 1)
+ newflag = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+ break;
+ }
+
+ if (newflag != 0) {
+ newmode = drm_display_mode_from_vic_index(connector,
+ video_db,
+ video_len,
+ vic_index);
+
+ if (newmode) {
+ newmode->flags |= newflag;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ if (detail_present)
+ i++;
}
out:
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9081172ef057..1b4c7a5442c5 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -141,7 +141,7 @@ static int edid_size(const u8 *edid, int data_size)
return (edid[0x7e] + 1) * EDID_LENGTH;
}
-static u8 *edid_load(struct drm_connector *connector, const char *name,
+static void *edid_load(struct drm_connector *connector, const char *name,
const char *connector_name)
{
const struct firmware *fw = NULL;
@@ -263,7 +263,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
if (*last == '\n')
*last = '\0';
- edid = (struct edid *) edid_load(connector, edidname, connector_name);
+ edid = edid_load(connector, edidname, connector_name);
if (IS_ERR_OR_NULL(edid))
return 0;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0a19401aff80..98a03639b413 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -359,6 +359,11 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
struct drm_crtc *crtc;
int bound = 0, crtcs_bound = 0;
+ /* Sometimes user space wants everything disabled, so don't steal the
+ * display if there's a master. */
+ if (dev->primary->master)
+ return false;
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb)
crtcs_bound++;
@@ -368,6 +373,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
if (bound < crtcs_bound)
return false;
+
return true;
}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index c5b929c3f77a..309023f12d7f 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -232,7 +232,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_put_pid;
}
- priv->ioctl_count = 0;
/* for compatibility root is always authenticated */
priv->always_authenticated = capable(CAP_SYS_ADMIN);
priv->authenticated = priv->always_authenticated;
@@ -320,7 +319,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
pci_dev_put(pci_dev);
}
if (!dev->hose) {
- struct pci_bus *b = pci_bus_b(pci_root_buses.next);
+ struct pci_bus *b = list_entry(pci_root_buses.next,
+ struct pci_bus, node);
if (b)
dev->hose = b->sysdata;
}
@@ -392,9 +392,6 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
-
dev->sigdata.lock = NULL;
dev->context_flag = 0;
@@ -578,12 +575,7 @@ int drm_release(struct inode *inode, struct file *filp)
*/
if (!--dev->open_count) {
- if (atomic_read(&dev->ioctl_count)) {
- DRM_ERROR("Device busy: %d\n",
- atomic_read(&dev->ioctl_count));
- retcode = -EBUSY;
- } else
- retcode = drm_lastclose(dev);
+ retcode = drm_lastclose(dev);
if (drm_device_is_unplugged(dev))
drm_put_dev(dev);
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4761adedad2a..5bbad873c798 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -91,19 +91,19 @@
int
drm_gem_init(struct drm_device *dev)
{
- struct drm_gem_mm *mm;
+ struct drm_vma_offset_manager *vma_offset_manager;
mutex_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
- mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
- if (!mm) {
+ vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+ if (!vma_offset_manager) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
}
- dev->mm_private = mm;
- drm_vma_offset_manager_init(&mm->vma_manager,
+ dev->vma_offset_manager = vma_offset_manager;
+ drm_vma_offset_manager_init(vma_offset_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
@@ -113,11 +113,10 @@ drm_gem_init(struct drm_device *dev)
void
drm_gem_destroy(struct drm_device *dev)
{
- struct drm_gem_mm *mm = dev->mm_private;
- drm_vma_offset_manager_destroy(&mm->vma_manager);
- kfree(mm);
- dev->mm_private = NULL;
+ drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+ kfree(dev->vma_offset_manager);
+ dev->vma_offset_manager = NULL;
}
/**
@@ -129,11 +128,12 @@ int drm_gem_object_init(struct drm_device *dev,
{
struct file *filp;
+ drm_gem_private_object_init(dev, obj, size);
+
filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(filp))
return PTR_ERR(filp);
- drm_gem_private_object_init(dev, obj, size);
obj->filp = filp;
return 0;
@@ -175,11 +175,6 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
mutex_unlock(&filp->prime.lock);
}
-static void drm_gem_object_ref_bug(struct kref *list_kref)
-{
- BUG();
-}
-
/**
* Called after the last handle to the object has been closed
*
@@ -195,13 +190,6 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj)
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
obj->name = 0;
- /*
- * The object name held a reference to this object, drop
- * that now.
- *
- * This cannot be the last reference, since the handle holds one too.
- */
- kref_put(&obj->refcount, drm_gem_object_ref_bug);
}
}
@@ -374,9 +362,8 @@ void
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
+ drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
@@ -398,9 +385,8 @@ int
drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
{
struct drm_device *dev = obj->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+ return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
size / PAGE_SIZE);
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
@@ -602,9 +588,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
goto err;
obj->name = ret;
-
- /* Allocate a reference for the name table. */
- drm_gem_object_reference(obj);
}
args->name = (uint64_t) obj->name;
@@ -833,7 +816,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
- struct drm_gem_mm *mm = dev->mm_private;
struct drm_gem_object *obj;
struct drm_vma_offset_node *node;
int ret = 0;
@@ -843,7 +825,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
mutex_lock(&dev->struct_mutex);
- node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
+ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+ vma->vm_pgoff,
vma_pages(vma));
if (!node) {
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 7d5a152eeb02..7473035dd28b 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -186,14 +186,14 @@ int drm_clients_info(struct seq_file *m, void *data)
struct drm_file *priv;
mutex_lock(&dev->struct_mutex);
- seq_printf(m, "a dev pid uid magic ioctls\n\n");
+ seq_printf(m, "a dev pid uid magic\n\n");
list_for_each_entry(priv, &dev->filelist, lhead) {
- seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+ seq_printf(m, "%c %3d %5d %5d %10u\n",
priv->authenticated ? 'y' : 'n',
priv->minor->index,
pid_vnr(priv->pid),
from_kuid_munged(seq_user_ns(m), priv->uid),
- priv->magic, priv->ioctl_count);
+ priv->magic);
}
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -234,14 +234,18 @@ int drm_vma_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_vma_entry *pt;
struct vm_area_struct *vma;
+ unsigned long vma_count = 0;
#if defined(__i386__)
unsigned int pgprot;
#endif
mutex_lock(&dev->struct_mutex);
- seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
- atomic_read(&dev->vma_count),
- high_memory, (void *)(unsigned long)virt_to_phys(high_memory));
+ list_for_each_entry(pt, &dev->vmalist, head)
+ vma_count++;
+
+ seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
+ vma_count, high_memory,
+ (void *)(unsigned long)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
vma = pt->vma;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index dffc836144cc..f4dc9b7a3831 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -296,6 +296,18 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_ASYNC_PAGE_FLIP:
req->value = dev->mode_config.async_page_flip;
break;
+ case DRM_CAP_CURSOR_WIDTH:
+ if (dev->mode_config.cursor_width)
+ req->value = dev->mode_config.cursor_width;
+ else
+ req->value = 64;
+ break;
+ case DRM_CAP_CURSOR_HEIGHT:
+ if (dev->mode_config.cursor_height)
+ req->value = dev->mode_config.cursor_height;
+ else
+ req->value = 64;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 64c34d5876ff..c2676b5908d9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -368,7 +368,7 @@ int drm_irq_uninstall(struct drm_device *dev)
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vblank[i].queue);
+ wake_up(&dev->vblank[i].queue);
dev->vblank[i].enabled = false;
dev->vblank[i].last =
dev->driver->get_vblank_counter(dev, i);
@@ -436,45 +436,41 @@ int drm_control(struct drm_device *dev, void *data,
}
/**
- * drm_calc_timestamping_constants - Calculate and
- * store various constants which are later needed by
- * vblank and swap-completion timestamping, e.g, by
- * drm_calc_vbltimestamp_from_scanoutpos().
- * They are derived from crtc's true scanout timing,
- * so they take things like panel scaling or other
- * adjustments into account.
+ * drm_calc_timestamping_constants - Calculate vblank timestamp constants
*
* @crtc drm_crtc whose timestamp constants should be updated.
+ * @mode display mode containing the scanout timings
*
+ * Calculate and store various constants which are later
+ * needed by vblank and swap-completion timestamping, e.g,
+ * by drm_calc_vbltimestamp_from_scanoutpos(). They are
+ * derived from crtc's true scanout timing, so they take
+ * things like panel scaling or other adjustments into account.
*/
-void drm_calc_timestamping_constants(struct drm_crtc *crtc)
+void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
- s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
- u64 dotclock;
-
- /* Dot clock in Hz: */
- dotclock = (u64) crtc->hwmode.clock * 1000;
-
- /* Fields of interlaced scanout modes are only half a frame duration.
- * Double the dotclock to get half the frame-/line-/pixelduration.
- */
- if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
- dotclock *= 2;
+ int linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+ int dotclock = mode->crtc_clock;
/* Valid dotclock? */
if (dotclock > 0) {
- int frame_size;
- /* Convert scanline length in pixels and video dot clock to
- * line duration, frame duration and pixel duration in
- * nanoseconds:
+ int frame_size = mode->crtc_htotal * mode->crtc_vtotal;
+
+ /*
+ * Convert scanline length in pixels and video
+ * dot clock to line duration, frame duration
+ * and pixel duration in nanoseconds:
*/
- pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
- linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
- 1000000000), dotclock);
- frame_size = crtc->hwmode.crtc_htotal *
- crtc->hwmode.crtc_vtotal;
- framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
- dotclock);
+ pixeldur_ns = 1000000 / dotclock;
+ linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
+ framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
+
+ /*
+ * Fields of interlaced scanout modes are only half a frame duration.
+ */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ framedur_ns /= 2;
} else
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
@@ -484,11 +480,11 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
crtc->framedur_ns = framedur_ns;
DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
- crtc->base.id, crtc->hwmode.crtc_htotal,
- crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+ crtc->base.id, mode->crtc_htotal,
+ mode->crtc_vtotal, mode->crtc_vdisplay);
DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
- crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
- (int) linedur_ns, (int) pixeldur_ns);
+ crtc->base.id, dotclock, framedur_ns,
+ linedur_ns, pixeldur_ns);
}
EXPORT_SYMBOL(drm_calc_timestamping_constants);
@@ -521,6 +517,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* 0 = Default.
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
* @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ * @mode: mode which defines the scanout timings
*
* Returns negative value on error, failure or if not supported in current
* video mode:
@@ -540,14 +537,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
int *max_error,
struct timeval *vblank_time,
unsigned flags,
- struct drm_crtc *refcrtc)
+ const struct drm_crtc *refcrtc,
+ const struct drm_display_mode *mode)
{
ktime_t stime, etime, mono_time_offset;
struct timeval tv_etime;
- struct drm_display_mode *mode;
- int vbl_status, vtotal, vdisplay;
+ int vbl_status;
int vpos, hpos, i;
- s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+ int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
bool invbl;
if (crtc < 0 || crtc >= dev->num_crtcs) {
@@ -561,10 +558,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EIO;
}
- mode = &refcrtc->hwmode;
- vtotal = mode->crtc_vtotal;
- vdisplay = mode->crtc_vdisplay;
-
/* Durations of frames, lines, pixels in nanoseconds. */
framedur_ns = refcrtc->framedur_ns;
linedur_ns = refcrtc->linedur_ns;
@@ -573,7 +566,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
/* If mode timing undefined, just return as no-op:
* Happens during initial modesetting of a crtc.
*/
- if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+ if (framedur_ns == 0) {
DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
return -EAGAIN;
}
@@ -590,7 +583,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* Get vertical and horizontal scanout position vpos, hpos,
* and bounding timestamps stime, etime, pre/post query.
*/
- vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
&hpos, &stime, &etime);
/*
@@ -611,18 +604,18 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
/* Accept result with < max_error nsecs timing uncertainty. */
- if (duration_ns <= (s64) *max_error)
+ if (duration_ns <= *max_error)
break;
}
/* Noisy system timing? */
if (i == DRM_TIMESTAMP_MAXRETRIES) {
DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
- crtc, (int) duration_ns/1000, *max_error/1000, i);
+ crtc, duration_ns/1000, *max_error/1000, i);
}
/* Return upper bound of timestamp precision error. */
- *max_error = (int) duration_ns;
+ *max_error = duration_ns;
/* Check if in vblank area:
* vpos is >=0 in video scanout area, but negative
@@ -635,25 +628,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* since start of scanout at first display scanline. delta_ns
* can be negative if start of scanout hasn't happened yet.
*/
- delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
-
- /* Is vpos outside nominal vblank area, but less than
- * 1/100 of a frame height away from start of vblank?
- * If so, assume this isn't a massively delayed vblank
- * interrupt, but a vblank interrupt that fired a few
- * microseconds before true start of vblank. Compensate
- * by adding a full frame duration to the final timestamp.
- * Happens, e.g., on ATI R500, R600.
- *
- * We only do this if DRM_CALLED_FROM_VBLIRQ.
- */
- if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
- ((vdisplay - vpos) < vtotal / 100)) {
- delta_ns = delta_ns - framedur_ns;
-
- /* Signal this correction as "applied". */
- vbl_status |= 0x8;
- }
+ delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
if (!drm_timestamp_monotonic)
etime = ktime_sub(etime, mono_time_offset);
@@ -673,7 +648,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
crtc, (int)vbl_status, hpos, vpos,
(long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
- (int)duration_ns/1000, i);
+ duration_ns/1000, i);
vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
if (invbl)
@@ -960,7 +935,7 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
(drm_vblank_offdelay > 0))
mod_timer(&dev->vblank_disable_timer,
- jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
+ jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
EXPORT_SYMBOL(drm_vblank_put);
@@ -980,7 +955,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
vblank_disable_and_save(dev, crtc);
- DRM_WAKEUP(&dev->vblank[crtc].queue);
+ wake_up(&dev->vblank[crtc].queue);
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1244,7 +1219,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
dev->vblank[crtc].last_wait = vblwait->request.sequence;
- DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
!dev->irq_enabled));
@@ -1363,7 +1338,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
crtc, (int) diff_ns);
}
- DRM_WAKEUP(&dev->vblank[crtc].queue);
+ wake_up(&dev->vblank[crtc].queue);
drm_handle_vblank_events(dev, crtc);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 64e44fad8ae8..00c67c0f2381 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -82,19 +82,19 @@ static void *agp_remap(unsigned long offset, unsigned long size,
}
/** Wrapper around agp_free_memory() */
-void drm_free_agp(DRM_AGP_MEM * handle, int pages)
+void drm_free_agp(struct agp_memory * handle, int pages)
{
agp_free_memory(handle);
}
/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+int drm_bind_agp(struct agp_memory * handle, unsigned int start)
{
return agp_bind_memory(handle, start);
}
/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(DRM_AGP_MEM * handle)
+int drm_unbind_agp(struct agp_memory * handle)
{
return agp_unbind_memory(handle);
}
@@ -110,8 +110,7 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap(map->offset, map->size);
@@ -120,8 +119,7 @@ EXPORT_SYMBOL(drm_core_ioremap);
void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
{
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap_wc(map->offset, map->size);
@@ -133,8 +131,7 @@ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
if (!map->handle || !map->size)
return;
- if (drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
vunmap(map->handle);
else
iounmap(map->handle);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
new file mode 100644
index 000000000000..b155ee2ffa17
--- /dev/null
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -0,0 +1,315 @@
+/*
+ * MIPI DSI Bus
+ *
+ * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_mipi_dsi.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <video/mipi_display.h>
+
+static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
+{
+ return of_driver_match_device(dev, drv);
+}
+
+static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
+ .runtime_suspend = pm_generic_runtime_suspend,
+ .runtime_resume = pm_generic_runtime_resume,
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
+};
+
+static struct bus_type mipi_dsi_bus_type = {
+ .name = "mipi-dsi",
+ .match = mipi_dsi_device_match,
+ .pm = &mipi_dsi_device_pm_ops,
+};
+
+static void mipi_dsi_dev_release(struct device *dev)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ of_node_put(dev->of_node);
+ kfree(dsi);
+}
+
+static const struct device_type mipi_dsi_device_type = {
+ .release = mipi_dsi_dev_release,
+};
+
+static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
+{
+ struct mipi_dsi_device *dsi;
+
+ dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return ERR_PTR(-ENOMEM);
+
+ dsi->host = host;
+ dsi->dev.bus = &mipi_dsi_bus_type;
+ dsi->dev.parent = host->dev;
+ dsi->dev.type = &mipi_dsi_device_type;
+
+ device_initialize(&dsi->dev);
+
+ return dsi;
+}
+
+static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
+{
+ struct mipi_dsi_host *host = dsi->host;
+
+ dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev), dsi->channel);
+
+ return device_add(&dsi->dev);
+}
+
+static struct mipi_dsi_device *
+of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
+{
+ struct mipi_dsi_device *dsi;
+ struct device *dev = host->dev;
+ int ret;
+ u32 reg;
+
+ ret = of_property_read_u32(node, "reg", &reg);
+ if (ret) {
+ dev_err(dev, "device node %s has no valid reg property: %d\n",
+ node->full_name, ret);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (reg > 3) {
+ dev_err(dev, "device node %s has invalid reg property: %u\n",
+ node->full_name, reg);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dsi = mipi_dsi_device_alloc(host);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to allocate DSI device %s: %ld\n",
+ node->full_name, PTR_ERR(dsi));
+ return dsi;
+ }
+
+ dsi->dev.of_node = of_node_get(node);
+ dsi->channel = reg;
+
+ ret = mipi_dsi_device_add(dsi);
+ if (ret) {
+ dev_err(dev, "failed to add DSI device %s: %d\n",
+ node->full_name, ret);
+ kfree(dsi);
+ return ERR_PTR(ret);
+ }
+
+ return dsi;
+}
+
+int mipi_dsi_host_register(struct mipi_dsi_host *host)
+{
+ struct device_node *node;
+
+ for_each_available_child_of_node(host->dev->of_node, node)
+ of_mipi_dsi_device_add(host, node);
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_host_register);
+
+static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ device_unregister(&dsi->dev);
+
+ return 0;
+}
+
+void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
+{
+ device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+}
+EXPORT_SYMBOL(mipi_dsi_host_unregister);
+
+/**
+ * mipi_dsi_attach - attach a DSI device to its DSI host
+ * @dsi: DSI peripheral
+ */
+int mipi_dsi_attach(struct mipi_dsi_device *dsi)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+ if (!ops || !ops->attach)
+ return -ENOSYS;
+
+ return ops->attach(dsi->host, dsi);
+}
+EXPORT_SYMBOL(mipi_dsi_attach);
+
+/**
+ * mipi_dsi_detach - detach a DSI device from its DSI host
+ * @dsi: DSI peripheral
+ */
+int mipi_dsi_detach(struct mipi_dsi_device *dsi)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+ if (!ops || !ops->detach)
+ return -ENOSYS;
+
+ return ops->detach(dsi->host, dsi);
+}
+EXPORT_SYMBOL(mipi_dsi_detach);
+
+/**
+ * mipi_dsi_dcs_write - send DCS write command
+ * @dsi: DSI device
+ * @channel: virtual channel
+ * @data: pointer to the command followed by parameters
+ * @len: length of @data
+ */
+int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
+ const void *data, size_t len)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .tx_buf = data,
+ .tx_len = len
+ };
+
+ if (!ops || !ops->transfer)
+ return -ENOSYS;
+
+ switch (len) {
+ case 0:
+ return -EINVAL;
+ case 1:
+ msg.type = MIPI_DSI_DCS_SHORT_WRITE;
+ break;
+ case 2:
+ msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
+ break;
+ default:
+ msg.type = MIPI_DSI_DCS_LONG_WRITE;
+ break;
+ }
+
+ return ops->transfer(dsi->host, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_write);
+
+/**
+ * mipi_dsi_dcs_read - send DCS read request command
+ * @dsi: DSI device
+ * @channel: virtual channel
+ * @cmd: DCS read command
+ * @data: pointer to read buffer
+ * @len: length of @data
+ *
+ * Function returns number of read bytes or error code.
+ */
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
+ u8 cmd, void *data, size_t len)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .type = MIPI_DSI_DCS_READ,
+ .tx_buf = &cmd,
+ .tx_len = 1,
+ .rx_buf = data,
+ .rx_len = len
+ };
+
+ if (!ops || !ops->transfer)
+ return -ENOSYS;
+
+ return ops->transfer(dsi->host, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read);
+
+static int mipi_dsi_drv_probe(struct device *dev)
+{
+ struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ return drv->probe(dsi);
+}
+
+static int mipi_dsi_drv_remove(struct device *dev)
+{
+ struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ return drv->remove(dsi);
+}
+
+/**
+ * mipi_dsi_driver_register - register a driver for DSI devices
+ * @drv: DSI driver structure
+ */
+int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
+{
+ drv->driver.bus = &mipi_dsi_bus_type;
+ if (drv->probe)
+ drv->driver.probe = mipi_dsi_drv_probe;
+ if (drv->remove)
+ drv->driver.remove = mipi_dsi_drv_remove;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(mipi_dsi_driver_register);
+
+/**
+ * mipi_dsi_driver_unregister - unregister a driver for DSI devices
+ * @drv: DSI driver structure
+ */
+void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(mipi_dsi_driver_unregister);
+
+static int __init mipi_dsi_bus_init(void)
+{
+ return bus_register(&mipi_dsi_bus_type);
+}
+postcore_initcall(mipi_dsi_bus_init);
+
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("MIPI DSI Bus");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
new file mode 100644
index 000000000000..2ef988e037b7
--- /dev/null
+++ b/drivers/gpu/drm/drm_panel.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
+static DEFINE_MUTEX(panel_lock);
+static LIST_HEAD(panel_list);
+
+void drm_panel_init(struct drm_panel *panel)
+{
+ INIT_LIST_HEAD(&panel->list);
+}
+EXPORT_SYMBOL(drm_panel_init);
+
+int drm_panel_add(struct drm_panel *panel)
+{
+ mutex_lock(&panel_lock);
+ list_add_tail(&panel->list, &panel_list);
+ mutex_unlock(&panel_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_add);
+
+void drm_panel_remove(struct drm_panel *panel)
+{
+ mutex_lock(&panel_lock);
+ list_del_init(&panel->list);
+ mutex_unlock(&panel_lock);
+}
+EXPORT_SYMBOL(drm_panel_remove);
+
+int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
+{
+ if (panel->connector)
+ return -EBUSY;
+
+ panel->connector = connector;
+ panel->drm = connector->dev;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_attach);
+
+int drm_panel_detach(struct drm_panel *panel)
+{
+ panel->connector = NULL;
+ panel->drm = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_detach);
+
+#ifdef CONFIG_OF
+struct drm_panel *of_drm_find_panel(struct device_node *np)
+{
+ struct drm_panel *panel;
+
+ mutex_lock(&panel_lock);
+
+ list_for_each_entry(panel, &panel_list, list) {
+ if (panel->dev->of_node == np) {
+ mutex_unlock(&panel_lock);
+ return panel;
+ }
+ }
+
+ mutex_unlock(&panel_lock);
+ return NULL;
+}
+EXPORT_SYMBOL(of_drm_find_panel);
+#endif
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM panel infrastructure");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 02679793c9e2..f7af69bcf3f4 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -262,16 +262,11 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
return 0;
}
-static int drm_pci_agp_init(struct drm_device *dev)
+static void drm_pci_agp_init(struct drm_device *dev)
{
- if (drm_core_has_AGP(dev)) {
+ if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
if (drm_pci_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
- if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
- && (dev->agp == NULL)) {
- DRM_ERROR("Cannot initialize the agpgart module.\n");
- return -EINVAL;
- }
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
dev->agp->agp_info.aper_base,
@@ -279,15 +274,14 @@ static int drm_pci_agp_init(struct drm_device *dev)
1024 * 1024);
}
}
- return 0;
}
-static void drm_pci_agp_destroy(struct drm_device *dev)
+void drm_pci_agp_destroy(struct drm_device *dev)
{
- if (drm_core_has_AGP(dev) && dev->agp) {
+ if (dev->agp) {
arch_phys_wc_del(dev->agp->agp_mtrr);
drm_agp_clear(dev);
- drm_agp_destroy(dev->agp);
+ kfree(dev->agp);
dev->agp = NULL;
}
}
@@ -299,8 +293,6 @@ static struct drm_bus drm_pci_bus = {
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
- .agp_init = drm_pci_agp_init,
- .agp_destroy = drm_pci_agp_destroy,
};
/**
@@ -338,17 +330,25 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
if (drm_core_check_feature(dev, DRIVER_MODESET))
pci_set_drvdata(pdev, dev);
+ drm_pci_agp_init(dev);
+
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
- goto err_pci;
+ goto err_agp;
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, pci_name(pdev), dev->primary->index);
+ /* No locking needed since shadow-attach is single-threaded since it may
+ * only be called from the per-driver module init hook. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
+
return 0;
-err_pci:
+err_agp:
+ drm_pci_agp_destroy(dev);
pci_disable_device(pdev);
err_free:
drm_dev_free(dev);
@@ -375,7 +375,6 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_DEBUG("\n");
- INIT_LIST_HEAD(&driver->device_list);
driver->kdriver.pci = pdriver;
driver->bus = &drm_pci_bus;
@@ -383,6 +382,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
return pci_register_driver(pdriver);
/* If not using KMS, fall back to stealth mode manual scanning. */
+ INIT_LIST_HEAD(&driver->legacy_dev_list);
for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
pid = &pdriver->id_table[i];
@@ -452,6 +452,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
return -1;
}
+void drm_pci_agp_destroy(struct drm_device *dev) {}
#endif
EXPORT_SYMBOL(drm_pci_init);
@@ -465,8 +466,11 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
if (driver->driver_features & DRIVER_MODESET) {
pci_unregister_driver(pdriver);
} else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
+ legacy_dev_list) {
+ list_del(&dev->legacy_dev_list);
drm_put_dev(dev);
+ }
}
DRM_INFO("Module unloaded\n");
}
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index fc24fee8ec83..21fc82006b78 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -147,18 +147,6 @@ int drm_platform_init(struct drm_driver *driver, struct platform_device *platfor
driver->kdriver.platform_device = platform_device;
driver->bus = &drm_platform_bus;
- INIT_LIST_HEAD(&driver->device_list);
return drm_get_platform_dev(platform_device, driver);
}
EXPORT_SYMBOL(drm_platform_init);
-
-void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_platform_exit);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 56805c39c906..bb516fdd195d 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -471,7 +471,7 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR_OR_NULL(sgt)) {
+ if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto fail_detach;
}
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 66dd3a001cf1..98a33c580ca1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -99,13 +99,19 @@ void drm_ut_debug_printk(unsigned int request_level,
const char *function_name,
const char *format, ...)
{
+ struct va_format vaf;
va_list args;
if (drm_debug & request_level) {
- if (function_name)
- printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
va_start(args, format);
- vprintk(format, args);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ if (function_name)
+ printk(KERN_DEBUG "[%s:%s], %pV", prefix,
+ function_name, &vaf);
+ else
+ printk(KERN_DEBUG "%pV", &vaf);
va_end(args);
}
}
@@ -521,16 +527,10 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
mutex_lock(&drm_global_mutex);
- if (dev->driver->bus->agp_init) {
- ret = dev->driver->bus->agp_init(dev);
- if (ret)
- goto out_unlock;
- }
-
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
- goto err_agp;
+ goto out_unlock;
}
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
@@ -557,8 +557,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_unload;
}
- list_add_tail(&dev->driver_item, &dev->driver->device_list);
-
ret = 0;
goto out_unlock;
@@ -571,9 +569,6 @@ err_render_node:
drm_unplug_minor(dev->render);
err_control_node:
drm_unplug_minor(dev->control);
-err_agp:
- if (dev->driver->bus->agp_destroy)
- dev->driver->bus->agp_destroy(dev);
out_unlock:
mutex_unlock(&drm_global_mutex);
return ret;
@@ -597,8 +592,8 @@ void drm_dev_unregister(struct drm_device *dev)
if (dev->driver->unload)
dev->driver->unload(dev);
- if (dev->driver->bus->agp_destroy)
- dev->driver->bus->agp_destroy(dev);
+ if (dev->agp)
+ drm_pci_agp_destroy(dev);
drm_vblank_cleanup(dev);
@@ -608,7 +603,5 @@ void drm_dev_unregister(struct drm_device *dev)
drm_unplug_minor(dev->control);
drm_unplug_minor(dev->render);
drm_unplug_minor(dev->primary);
-
- list_del(&dev->driver_item);
}
EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index b179b70e7853..0f8cb1ae7607 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -1,4 +1,5 @@
#include <drm/drmP.h>
+#include <drm/drm_usb.h>
#include <linux/usb.h>
#include <linux/module.h>
@@ -63,7 +64,6 @@ int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
int res;
DRM_DEBUG("\n");
- INIT_LIST_HEAD(&driver->device_list);
driver->kdriver.usb = udriver;
driver->bus = &drm_usb_bus;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 93e95d7efd57..24e045c4f531 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -101,7 +101,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/*
* Find the right map
*/
- if (!drm_core_has_AGP(dev))
+ if (!dev->agp)
goto vm_fault_error;
if (!dev->agp || !dev->agp->cant_use_aperture)
@@ -220,7 +220,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
map = vma->vm_private_data;
@@ -266,9 +265,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
}
kfree(map);
}
@@ -408,7 +404,6 @@ void drm_vm_open_locked(struct drm_device *dev,
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_inc(&dev->vma_count);
vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
if (vma_entry) {
@@ -436,7 +431,6 @@ void drm_vm_close_locked(struct drm_device *dev,
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
@@ -595,7 +589,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
switch (map->type) {
#if !defined(__arm__)
case _DRM_AGP:
- if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
+ if (dev->agp && dev->agp->cant_use_aperture) {
/*
* On some platforms we can't talk to bus dma address from the CPU, so for
* memory of type DRM_AGP, we'll deal with sorting out the real physical
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f227f544aa36..6e1a1a20cf6b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D
config DRM_EXYNOS_IPP
bool "Exynos DRM IPP"
- depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
+ depends on DRM_EXYNOS
help
Choose this option if you want to use IPP feature for DRM.
@@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR
config DRM_EXYNOS_GSC
bool "Exynos DRM GSC"
- depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
help
Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index ebc01503d50e..6f3400f3978a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -36,9 +36,9 @@ enum exynos_crtc_mode {
* @pipe: a crtc index created at load() with a new crtc object creation
* and the crtc object would be set to private->crtc array
* to get a crtc object corresponding to this pipe from private->crtc
- * array when irq interrupt occured. the reason of using this pipe is that
+ * array when irq interrupt occurred. the reason of using this pipe is that
* drm framework doesn't support multiple irq yet.
- * we can refer to the crtc to current hardware interrupt occured through
+ * we can refer to the crtc to current hardware interrupt occurred through
* this pipe value.
* @dpms: store the crtc dpms value
* @mode: store the crtc mode value
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 59827cc5e770..c786cd4f457b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -224,7 +224,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR_OR_NULL(sgt)) {
+ if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto err_buf_detach;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 22b8f5eced80..c204b4e3356e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -14,6 +14,8 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <linux/anon_inodes.h>
+
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -119,6 +121,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
drm_vblank_offdelay = VBLANK_OFF_DELAY;
+ platform_set_drvdata(dev->platformdev, dev);
+
return 0;
err_drm_device:
@@ -150,9 +154,14 @@ static int exynos_drm_unload(struct drm_device *dev)
return 0;
}
+static const struct file_operations exynos_drm_gem_fops = {
+ .mmap = exynos_drm_gem_mmap_buffer,
+};
+
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
+ struct file *anon_filp;
int ret;
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
@@ -162,11 +171,27 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = file_priv;
ret = exynos_drm_subdrv_open(dev, file);
- if (ret) {
- kfree(file_priv);
- file->driver_priv = NULL;
+ if (ret)
+ goto err_file_priv_free;
+
+ anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
+ NULL, 0);
+ if (IS_ERR(anon_filp)) {
+ ret = PTR_ERR(anon_filp);
+ goto err_subdrv_close;
}
+ anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
+ file_priv->anon_filp = anon_filp;
+
+ return ret;
+
+err_subdrv_close:
+ exynos_drm_subdrv_close(dev, file);
+
+err_file_priv_free:
+ kfree(file_priv);
+ file->driver_priv = NULL;
return ret;
}
@@ -179,6 +204,7 @@ static void exynos_drm_preclose(struct drm_device *dev,
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct exynos_drm_private *private = dev->dev_private;
+ struct drm_exynos_file_private *file_priv;
struct drm_pending_vblank_event *v, *vt;
struct drm_pending_event *e, *et;
unsigned long flags;
@@ -204,6 +230,9 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
}
spin_unlock_irqrestore(&dev->event_lock, flags);
+ file_priv = file->driver_priv;
+ if (file_priv->anon_filp)
+ fput(file_priv->anon_filp);
kfree(file->driver_priv);
file->driver_priv = NULL;
@@ -305,7 +334,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
static int exynos_drm_platform_remove(struct platform_device *pdev)
{
- drm_platform_exit(&exynos_drm_driver, pdev);
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index eaa19668bf00..a8f9dba2a816 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -226,6 +226,7 @@ struct exynos_drm_ipp_private {
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
struct exynos_drm_ipp_private *ipp_priv;
+ struct file *anon_filp;
};
/*
@@ -236,7 +237,6 @@ struct drm_exynos_file_private {
* otherwise default one.
* @da_space_size: size of device address space.
* if 0 then default value is used for it.
- * @da_space_order: order to device address space.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -254,7 +254,6 @@ struct exynos_drm_private {
unsigned long da_start;
unsigned long da_space_size;
- unsigned long da_space_order;
};
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 8adfc8f1e08f..30d76b2ff9c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -345,7 +345,7 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
fimc_write(cfg, EXYNOS_CIWDOFST);
- dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
ctx->id, status);
return true;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a61878bf5dcd..a20440ce32e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -347,7 +347,7 @@ static void fimd_wait_for_vblank(struct device *dev)
*/
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
- DRM_HZ/20))
+ HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
@@ -706,7 +706,7 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
- DRM_WAKEUP(&ctx->wait_vsync_queue);
+ wake_up(&ctx->wait_vsync_queue);
}
out:
return IRQ_HANDLED;
@@ -954,7 +954,7 @@ static int fimd_probe(struct platform_device *pdev)
}
ctx->driver_data = drm_fimd_get_driver_data(pdev);
- DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
subdrv = &ctx->subdrv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 7bccedca487a..6c1885eedfdf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
reg_type = REG_TYPE_NONE;
DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
break;
- };
+ }
return reg_type;
}
@@ -1126,7 +1126,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
* G2D interrupt event once current command list execution is
* finished.
* Otherwise only ACF bit should be set to INTEN register so
- * that one interrupt is occured after all command lists
+ * that one interrupt is occurred after all command lists
* have been completed.
*/
if (node->event) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 1ade191d84f4..42d2904d88c7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -338,46 +338,22 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
-static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
- struct file *filp)
-{
- struct drm_file *file_priv;
-
- /* find current process's drm_file from filelist. */
- list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
- if (file_priv->filp == filp)
- return file_priv;
-
- WARN_ON(1);
-
- return ERR_PTR(-EFAULT);
-}
-
-static int exynos_drm_gem_mmap_buffer(struct file *filp,
+int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
- struct drm_file *file_priv;
unsigned long vm_size;
int ret;
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = obj;
vma->vm_ops = drm_dev->driver->gem_vm_ops;
- /* restore it to driver's fops. */
- filp->f_op = fops_get(drm_dev->driver->fops);
-
- file_priv = exynos_drm_find_drm_file(drm_dev, filp);
- if (IS_ERR(file_priv))
- return PTR_ERR(file_priv);
-
- /* restore it to drm_file. */
- filp->private_data = file_priv;
-
update_vm_cache_attr(exynos_gem_obj, vma);
vm_size = vma->vm_end - vma->vm_start;
@@ -411,15 +387,13 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
return 0;
}
-static const struct file_operations exynos_drm_gem_fops = {
- .mmap = exynos_drm_gem_mmap_buffer,
-};
-
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_exynos_file_private *exynos_file_priv;
struct drm_exynos_gem_mmap *args = data;
struct drm_gem_object *obj;
+ struct file *anon_filp;
unsigned long addr;
if (!(dev->driver->driver_features & DRIVER_GEM)) {
@@ -427,47 +401,25 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
}
+ mutex_lock(&dev->struct_mutex);
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
+ mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
- /*
- * We have to use gem object and its fops for specific mmaper,
- * but vm_mmap() can deliver only filp. So we have to change
- * filp->f_op and filp->private_data temporarily, then restore
- * again. So it is important to keep lock until restoration the
- * settings to prevent others from misuse of filp->f_op or
- * filp->private_data.
- */
- mutex_lock(&dev->struct_mutex);
-
- /*
- * Set specific mmper's fops. And it will be restored by
- * exynos_drm_gem_mmap_buffer to dev->driver->fops.
- * This is used to call specific mapper temporarily.
- */
- file_priv->filp->f_op = &exynos_drm_gem_fops;
-
- /*
- * Set gem object to private_data so that specific mmaper
- * can get the gem object. And it will be restored by
- * exynos_drm_gem_mmap_buffer to drm_file.
- */
- file_priv->filp->private_data = obj;
+ exynos_file_priv = file_priv->driver_priv;
+ anon_filp = exynos_file_priv->anon_filp;
+ anon_filp->private_data = obj;
- addr = vm_mmap(file_priv->filp, 0, args->size,
- PROT_READ | PROT_WRITE, MAP_SHARED, 0);
+ addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, 0);
drm_gem_object_unreference(obj);
if (IS_ERR_VALUE(addr)) {
- /* check filp->f_op, filp->private_data are restored */
- if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
- file_priv->filp->f_op = fops_get(dev->driver->fops);
- file_priv->filp->private_data = file_priv;
- }
mutex_unlock(&dev->struct_mutex);
return (int)addr;
}
@@ -652,7 +604,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
int ret;
/*
- * alocate memory to be used for framebuffer.
+ * allocate memory to be used for framebuffer.
* - this callback would be called by user application
* with DRM_IOCTL_MODE_CREATE_DUMB command.
*/
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 702ec3abe85c..1592c0ba7de8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -60,7 +60,7 @@ struct exynos_drm_gem_buf {
* @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
- * P.S. this object would be transfered to user as kms_bo.handle so
+ * P.S. this object would be transferred to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
*/
struct exynos_drm_gem_obj {
@@ -122,6 +122,9 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int exynos_drm_gem_mmap_buffer(struct file *filp,
+ struct vm_area_struct *vma);
+
/* map user space allocated by malloc to pages. */
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index cd6aebd53bd0..fa75059a6104 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1301,13 +1301,13 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
status = gsc_read(GSC_IRQ);
if (status & GSC_IRQ_STATUS_OR_IRQ) {
- dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
ctx->id, status);
return IRQ_NONE;
}
if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
- dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+ dev_dbg(ippdrv->dev, "occurred frame done at %d, status 0x%x.\n",
ctx->id, status);
buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index fb8db0378274..b32b291f88ff 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -36,12 +36,10 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
priv->da_start = EXYNOS_DEV_ADDR_START;
if (!priv->da_space_size)
priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
- if (!priv->da_space_order)
- priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
- priv->da_space_size,
- priv->da_space_order);
+ priv->da_space_size);
+
if (IS_ERR(mapping))
return PTR_ERR(mapping);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 598e60f57d4b..72376d41c512 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -14,7 +14,6 @@
#define EXYNOS_DEV_ADDR_START 0x20000000
#define EXYNOS_DEV_ADDR_SIZE 0x40000000
-#define EXYNOS_DEV_ADDR_ORDER 0x0
#ifdef CONFIG_DRM_EXYNOS_IOMMU
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 824e0705c8d3..09312b877470 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -16,7 +16,6 @@
#include <linux/types.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
-#include <plat/map-base.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -335,7 +334,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
} else {
/*
* Getting ippdrv capability by ipp_id.
- * some deivce not supported wb, output interface.
+ * some device not supported wb, output interface.
* so, user application detect correct ipp driver
* using this ioctl.
*/
@@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
/*
- * quf == NULL condition means all event deletion.
+ * qbuf == NULL condition means all event deletion.
* stop operations want to delete all event list.
* another case delete only same buf id.
*/
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 4cadbea7dbde..ab1634befc05 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -48,7 +48,7 @@ struct drm_exynos_ipp_cmd_work {
/*
* A structure of command node.
*
- * @priv: IPP private infomation.
+ * @priv: IPP private information.
* @list: list head to command queue information.
* @event_list: list head of event.
* @mem_list: list head to source,destination memory queue information.
@@ -92,7 +92,7 @@ struct drm_exynos_ipp_buf_info {
};
/*
- * A structure of wb setting infomation.
+ * A structure of wb setting information.
*
* @enable: enable flag for wb.
* @refresh: HZ of the refresh rate.
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a0e10aeb0e67..c021ddc1ffb4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/hdmi.h>
#include <drm/exynos_drm.h>
@@ -59,19 +60,6 @@
#define HDMI_AUI_VERSION 0x01
#define HDMI_AUI_LENGTH 0x0A
-/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
-enum HDMI_PACKET_TYPE {
- /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
- /* InfoFrame packet type */
- HDMI_PACKET_TYPE_INFOFRAME = 0x80,
- /* Vendor-Specific InfoFrame */
- HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
- /* Auxiliary Video information InfoFrame */
- HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
- /* Audio information InfoFrame */
- HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
-};
-
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
};
-struct hdmi_infoframe {
- enum HDMI_PACKET_TYPE type;
- u8 ver;
- u8 len;
-};
-
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
return readl(hdata->regs + reg_id);
@@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata,
}
static void hdmi_reg_infoframe(struct hdmi_context *hdata,
- struct hdmi_infoframe *infoframe)
+ union hdmi_infoframe *infoframe)
{
u32 hdr_sum;
u8 chksum;
@@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
return;
}
- switch (infoframe->type) {
- case HDMI_PACKET_TYPE_AVI:
+ switch (infoframe->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
/* Output format zero hardcoded ,RGB YBCR selection */
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
@@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
break;
- case HDMI_PACKET_TYPE_AUI:
+ case HDMI_INFOFRAME_TYPE_AUDIO:
hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
break;
@@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- struct hdmi_infoframe infoframe;
+ union hdmi_infoframe infoframe;
/* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
@@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
- infoframe.type = HDMI_PACKET_TYPE_AVI;
- infoframe.ver = HDMI_AVI_VERSION;
- infoframe.len = HDMI_AVI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
+ infoframe.any.version = HDMI_AVI_VERSION;
+ infoframe.any.length = HDMI_AVI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
- infoframe.type = HDMI_PACKET_TYPE_AUI;
- infoframe.ver = HDMI_AUI_VERSION;
- infoframe.len = HDMI_AUI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
+ infoframe.any.version = HDMI_AUI_VERSION;
+ infoframe.any.length = HDMI_AUI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
/* enable AVI packet every vsync, fixes purple line problem */
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 63bc5f92fbb3..2dfa48c76f54 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -868,7 +868,7 @@ static void mixer_wait_for_vblank(void *ctx)
*/
if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
!atomic_read(&mixer_ctx->wait_vsync_event),
- DRM_HZ/20))
+ HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
@@ -1019,7 +1019,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
- DRM_WAKEUP(&ctx->wait_vsync_queue);
+ wake_up(&ctx->wait_vsync_queue);
}
}
@@ -1209,7 +1209,7 @@ static int mixer_probe(struct platform_device *pdev)
drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version;
- DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, drm_hdmi_ctx);
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 508cf99a292d..17f928ec84ea 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -10,7 +10,6 @@ config DRM_GMA500
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
- select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
help
Say yes for an experimental 2D KMS framebuffer driver for the
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index d5ef1a5793c8..de6f62a6ceb7 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -326,7 +326,7 @@ int psbfb_sync(struct fb_info *info)
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- unsigned long _end = jiffies + DRM_HZ;
+ unsigned long _end = jiffies + HZ;
int busy = 0;
unsigned long flags;
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 143eba3309c5..ea7dfc59d796 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -26,13 +26,13 @@
#include "intel_bios.h"
#include "power.h"
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
static void do_gma_backlight_set(struct drm_device *dev)
{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = dev->dev_private;
backlight_update_status(dev_priv->backlight_device);
-#endif
}
+#endif
void gma_backlight_enable(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index f88a1815d87c..0490ce36b53f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -483,7 +483,7 @@ cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
if (send_bytes > 16)
return -1;
- msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[0] = DP_AUX_NATIVE_WRITE << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = send_bytes - 1;
@@ -493,9 +493,10 @@ cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ ack >>= 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
break;
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
@@ -523,7 +524,7 @@ cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
uint8_t ack;
int ret;
- msg[0] = AUX_NATIVE_READ << 4;
+ msg[0] = DP_AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = recv_bytes - 1;
@@ -538,12 +539,12 @@ cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
return -EPROTO;
if (ret < 0)
return ret;
- ack = reply[0];
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ ack = reply[0] >> 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
memcpy(recv, reply + 1, ret - 1);
return ret - 1;
}
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
@@ -569,12 +570,12 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
/* Set up the command byte */
if (mode & MODE_I2C_READ)
- msg[0] = AUX_I2C_READ << 4;
+ msg[0] = DP_AUX_I2C_READ << 4;
else
- msg[0] = AUX_I2C_WRITE << 4;
+ msg[0] = DP_AUX_I2C_WRITE << 4;
if (!(mode & MODE_I2C_STOP))
- msg[0] |= AUX_I2C_MOT << 4;
+ msg[0] |= DP_AUX_I2C_MOT << 4;
msg[1] = address >> 8;
msg[2] = address;
@@ -606,16 +607,16 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return ret;
}
- switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
- case AUX_NATIVE_REPLY_ACK:
+ switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+ case DP_AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
- case AUX_NATIVE_REPLY_NACK:
+ case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
- case AUX_NATIVE_REPLY_DEFER:
+ case DP_AUX_NATIVE_REPLY_DEFER:
udelay(100);
continue;
default:
@@ -624,16 +625,16 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return -EREMOTEIO;
}
- switch (reply[0] & AUX_I2C_REPLY_MASK) {
- case AUX_I2C_REPLY_ACK:
+ switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
+ case DP_AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
}
return reply_bytes - 1;
- case AUX_I2C_REPLY_NACK:
+ case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
- case AUX_I2C_REPLY_DEFER:
+ case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
@@ -677,7 +678,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
return ret;
}
-void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+static void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
{
adjusted_mode->hdisplay = fixed_mode->hdisplay;
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 24e8af3d22bf..386de2c9dc86 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -349,6 +349,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
/* If we didn't get a handle then turn the cursor off */
if (!handle) {
temp = CURSOR_MODE_DISABLE;
+ mutex_lock(&dev->struct_mutex);
if (gma_power_begin(dev, false)) {
REG_WRITE(control, temp);
@@ -365,6 +366,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
gma_crtc->cursor_obj = NULL;
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -374,9 +376,12 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
+ mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, handle);
- if (!obj)
- return -ENOENT;
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock;
+ }
if (obj->size < width * height * 4) {
dev_dbg(dev->dev, "Buffer is too small\n");
@@ -440,10 +445,13 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
}
gma_crtc->cursor_obj = obj;
+unlock:
+ mutex_unlock(&dev->struct_mutex);
return ret;
unref_cursor:
drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 49bac41beefb..c3e67ba94446 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -520,7 +520,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
driver->has_clflush = 0;
- if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
+ if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
uint32_t tfms, misc, cap0, cap4, clflush_size;
/*
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index ad0d6de938f3..13ec6283bf59 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -22,7 +22,6 @@
*
*/
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index b59e6588c343..5ad6a03e477e 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -212,8 +212,8 @@ enum {
#define PSB_HIGH_REG_OFFS 0x0600
#define PSB_NUM_VBLANKS 2
-#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
-#define PSB_LID_DELAY (DRM_HZ / 10)
+#define PSB_WATCHDOG_DELAY (HZ * 2)
+#define PSB_LID_DELAY (HZ / 10)
#define MDFLD_PNW_B0 0x04
#define MDFLD_PNW_C0 0x08
@@ -232,7 +232,7 @@ enum {
#define MDFLD_DSR_RR 45
#define MDFLD_DPU_ENABLE (1 << 31)
#define MDFLD_DSR_FULLSCREEN (1 << 30)
-#define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR)
+#define MDFLD_DSR_DELAY (HZ / MDFLD_DSR_RR)
#define PSB_PWR_STATE_ON 1
#define PSB_PWR_STATE_OFF 2
@@ -769,7 +769,7 @@ extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
*psb_irq.c
*/
-extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t psb_irq_handler(int irq, void *arg);
extern int psb_irq_enable_dpst(struct drm_device *dev);
extern int psb_irq_disable_dpst(struct drm_device *dev);
extern void psb_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index bde27fdb41bf..dc2c8eb030fa 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -250,11 +250,6 @@ extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
extern int intelfb_probe(struct drm_device *dev);
extern int intelfb_remove(struct drm_device *dev,
struct drm_framebuffer *fb);
-extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
- *dev, struct
- drm_mode_fb_cmd
- *mode_cmd,
- void *mm_private);
extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index ba4830342d34..f883f9e4c524 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -200,7 +200,7 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
mid_pipe_event_handler(dev, 1);
}
-irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t psb_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -253,7 +253,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
(void) PSB_RVDC32(PSB_INT_IDENTITY_R);
- DRM_READMEMORYBARRIER();
+ rmb();
if (!handled)
return IRQ_NONE;
@@ -450,21 +450,6 @@ int psb_irq_disable_dpst(struct drm_device *dev)
return 0;
}
-#ifdef PSB_FIXME
-static int psb_vblank_do_wait(struct drm_device *dev,
- unsigned int *sequence, atomic_t *counter)
-{
- unsigned int cur_vblank;
- int ret = 0;
- DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
- (((cur_vblank = atomic_read(counter))
- - *sequence) <= (1 << 23)));
- *sequence = cur_vblank;
-
- return ret;
-}
-#endif
-
/*
* It is used to enable VBLANK interrupt
*/
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index debb7f190c06..d0b45ffa1126 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -32,7 +32,7 @@ void sysirq_uninit(struct drm_device *dev);
void psb_irq_preinstall(struct drm_device *dev);
int psb_irq_postinstall(struct drm_device *dev);
void psb_irq_uninstall(struct drm_device *dev);
-irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t psb_irq_handler(int irq, void *arg);
int psb_irq_enable_dpst(struct drm_device *dev);
int psb_irq_disable_dpst(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 400b0c4a10fb..faa77f543a07 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -208,7 +208,7 @@ struct tda998x_priv {
# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
-# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
+# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0)
# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
@@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
{
uint8_t buf[PB(5) + 1];
+ memset(buf, 0, sizeof(buf));
buf[HB(0)] = 0x84;
buf[HB(1)] = 0x01;
buf[HB(2)] = 10;
- buf[PB(0)] = 0;
buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
buf[PB(4)] = p->audio_frame[4];
@@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
}
div = 148500 / mode->clock;
+ if (div != 0) {
+ div--;
+ if (div > 3)
+ div = 3;
+ }
/* mute the audio FIFO: */
reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
if (priv->rev == TDA19988) {
/* let incoming pixels fill the active space (if any) */
- reg_write(encoder, REG_ENABLE_SPACE, 0x01);
+ reg_write(encoder, REG_ENABLE_SPACE, 0x00);
}
/* must be last register set: */
@@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
drm_i2c_encoder_destroy(encoder);
+ if (priv->cec)
+ i2c_unregister_device(priv->cec);
kfree(priv);
}
@@ -1142,8 +1149,12 @@ tda998x_encoder_init(struct i2c_client *client,
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
- priv->current_page = 0;
+ priv->current_page = 0xff;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ if (!priv->cec) {
+ kfree(priv);
+ return -ENODEV;
+ }
priv->dpms = DRM_MODE_DPMS_OFF;
encoder_slave->slave_priv = priv;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 249fdff305c6..aeace37415aa 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -1193,6 +1193,10 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
+ /* Our userspace depends upon the agp mapping support. */
+ if (!dev->agp)
+ return -EINVAL;
+
pci_set_master(dev->pdev);
return 0;
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index d8180d22cedd..441ccf8f5bdc 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -57,7 +57,7 @@ static const struct file_operations i810_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+ DRIVER_USE_AGP |
DRIVER_HAVE_DMA,
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
.load = i810_driver_load,
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 6199d0b5b958..bea2d67196fb 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -1,8 +1,10 @@
config DRM_I915
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
depends on DRM
- depends on AGP
- depends on AGP_INTEL
+ depends on X86 && PCI
+ depends on (AGP || AGP=n)
+ select INTEL_GTT
+ select AGP_INTEL if AGP
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
select SHMEM
@@ -12,7 +14,6 @@ config DRM_I915
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
- select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
@@ -35,15 +36,14 @@ config DRM_I915
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
depends on DRM_I915
+ default y
help
- Choose this option if you want kernel modesetting enabled by default,
- and you have a new enough userspace to support this. Running old
- userspaces with this enabled will cause pain. Note that this causes
- the driver to bind to PCI devices, which precludes loading things
- like intelfb.
+ Choose this option if you want kernel modesetting enabled by default.
+
+ If in doubt, say "Y".
config DRM_I915_FBDEV
- bool "Enable legacy fbdev support for the modesettting intel driver"
+ bool "Enable legacy fbdev support for the modesetting intel driver"
depends on DRM_I915
select DRM_KMS_FB_HELPER
select FB_CFB_FILLRECT
@@ -55,9 +55,12 @@ config DRM_I915_FBDEV
support. Note that this support also provide the linux console
support on top of the intel modesetting driver.
+ If in doubt, say "Y".
+
config DRM_I915_PRELIMINARY_HW_SUPPORT
bool "Enable preliminary support for prerelease Intel hardware by default"
depends on DRM_I915
+ default n
help
Choose this option if you have prerelease Intel hardware and want the
i915 driver to support it by default. You can enable such support at
@@ -65,3 +68,15 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
option changes the default for that module option.
If in doubt, say "N".
+
+config DRM_I915_UMS
+ bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option if you still need userspace modesetting.
+
+ Userspace modesetting is deprecated for quite some time now, so
+ enable this only if you have ancient versions of the DDX drivers.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 41838eaa799c..9fd44f5f3b3b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -4,7 +4,6 @@
ccflags-y := -Iinclude/drm
i915-y := i915_drv.o i915_dma.o i915_irq.o \
- i915_debugfs.o \
i915_gpu_error.o \
i915_suspend.o \
i915_gem.o \
@@ -38,7 +37,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_ringbuffer.o \
intel_overlay.o \
intel_sprite.o \
- intel_opregion.o \
intel_sideband.o \
intel_uncore.o \
dvo_ch7xxx.o \
@@ -51,10 +49,12 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915-$(CONFIG_COMPAT) += i915_ioc32.o
-i915-$(CONFIG_ACPI) += intel_acpi.o
+i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index c4a255be6979..954acb2c7021 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -87,49 +87,6 @@ struct ns2501_priv {
* when switching the resolution.
*/
-static void enable_dvo(struct intel_dvo_device *dvo)
-{
- struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_gmbus *bus = container_of(adapter,
- struct intel_gmbus,
- adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
-
- DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
-
- ns->dvoc = I915_READ(DVO_C);
- ns->pll_a = I915_READ(_DPLL_A);
- ns->srcdim = I915_READ(DVOC_SRCDIM);
- ns->fw_blc = I915_READ(FW_BLC);
-
- I915_WRITE(DVOC, 0x10004084);
- I915_WRITE(_DPLL_A, 0xd0820000);
- I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
- I915_WRITE(FW_BLC, 0x1080304);
-
- I915_WRITE(DVOC, 0x90004084);
-}
-
-/*
- * Restore the I915 registers modified by the above
- * trigger function.
- */
-static void restore_dvo(struct intel_dvo_device *dvo)
-{
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_gmbus *bus = container_of(adapter,
- struct intel_gmbus,
- adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
- struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
-
- I915_WRITE(DVOC, ns->dvoc);
- I915_WRITE(_DPLL_A, ns->pll_a);
- I915_WRITE(DVOC_SRCDIM, ns->srcdim);
- I915_WRITE(FW_BLC, ns->fw_blc);
-}
-
/*
** Read a register from the ns2501.
** Returns true if successful, false otherwise.
@@ -300,7 +257,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *adjusted_mode)
{
bool ok;
- bool restore = false;
+ int retries = 10;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
DRM_DEBUG_KMS
@@ -476,20 +433,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
ns->reg_8_shadow |= NS2501_8_BPAS;
}
ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
-
- if (!ok) {
- if (restore)
- restore_dvo(dvo);
- enable_dvo(dvo);
- restore = true;
- }
- } while (!ok);
- /*
- * Restore the old i915 registers before
- * forcing the ns2501 on.
- */
- if (restore)
- restore_dvo(dvo);
+ } while (!ok && retries--);
}
/* set the NS2501 power state */
@@ -510,7 +454,7 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
{
bool ok;
- bool restore = false;
+ int retries = 10;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
unsigned char ch;
@@ -537,16 +481,7 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
ok &=
ns2501_writeb(dvo, 0x35,
enable ? 0xff : 0x00);
- if (!ok) {
- if (restore)
- restore_dvo(dvo);
- enable_dvo(dvo);
- restore = true;
- }
- } while (!ok);
-
- if (restore)
- restore_dvo(dvo);
+ } while (!ok && retries--);
}
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6ed45a984230..b2b46c52294c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,8 +40,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#if defined(CONFIG_DEBUG_FS)
-
enum {
ACTIVE_LIST,
INACTIVE_LIST,
@@ -406,16 +404,26 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_putc(m, '\n');
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
+ struct task_struct *task;
memset(&stats, 0, sizeof(stats));
idr_for_each(&file->object_idr, per_file_stats, &stats);
+ /*
+ * Although we have a valid reference on file->pid, that does
+ * not guarantee that the task_struct who called get_pid() is
+ * still alive (e.g. get_pid(current) => fork() => exit()).
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+ task = pid_task(file->pid, PIDTYPE_PID);
seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
- get_pid_task(file->pid, PIDTYPE_PID)->comm,
+ task ? task->comm : "<unknown>",
stats.count,
stats.total,
stats.active,
stats.inactive,
stats.unbound);
+ rcu_read_unlock();
}
mutex_unlock(&dev->struct_mutex);
@@ -564,10 +572,12 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
for_each_ring(ring, dev_priv, i)
i915_ring_seqno_info(m, ring);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -585,6 +595,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
if (INTEL_INFO(dev)->gen >= 8) {
int i;
@@ -711,6 +722,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
i915_ring_seqno_info(m, ring);
}
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -904,9 +916,11 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
crstanddelay = I915_READ16(CRSTANDVID);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
@@ -919,7 +933,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret = 0;
+
+ intel_runtime_pm_get(dev_priv);
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -945,9 +961,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
/* RPSTAT1 is in the GT power well */
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out;
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
reqf &= ~GEN6_TURBO_DISABLE;
@@ -970,7 +986,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
cagf *= GT_FREQUENCY_MULTIPLIER;
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1018,23 +1034,24 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
- val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
+ val = valleyview_rps_max_freq(dev_priv);
seq_printf(m, "max GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv->mem_freq, val));
+ vlv_gpu_freq(dev_priv, val));
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
+ val = valleyview_rps_min_freq(dev_priv);
seq_printf(m, "min GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv->mem_freq, val));
+ vlv_gpu_freq(dev_priv, val));
seq_printf(m, "current GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- (freq_sts >> 8) & 0xff));
+ vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
seq_puts(m, "no P-state info available\n");
}
- return 0;
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
}
static int i915_delayfreq_table(struct seq_file *m, void *unused)
@@ -1048,6 +1065,7 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
for (i = 0; i < 16; i++) {
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -1055,6 +1073,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
(delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
}
+ intel_runtime_pm_put(dev_priv);
+
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1076,12 +1096,14 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
for (i = 1; i <= 32; i++) {
inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
}
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1099,11 +1121,13 @@ static int ironlake_drpc_info(struct seq_file *m)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
rgvmodectl = I915_READ(MEMMODECTL);
rstdbyctl = I915_READ(RSTDBYCTL);
crstandvid = I915_READ16(CRSTANDVID);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -1154,6 +1178,50 @@ static int ironlake_drpc_info(struct seq_file *m)
return 0;
}
+static int vlv_drpc_info(struct seq_file *m)
+{
+
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rpmodectl1, rcctl1;
+ unsigned fw_rendercount = 0, fw_mediacount = 0;
+
+ rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+ rcctl1 = I915_READ(GEN6_RC_CONTROL);
+
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "Turbo enabled: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_ENABLE));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_EI_MODE(1))));
+ seq_printf(m, "Render Power Well: %s\n",
+ (I915_READ(VLV_GTLC_PW_STATUS) &
+ VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (I915_READ(VLV_GTLC_PW_STATUS) &
+ VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ fw_rendercount = dev_priv->uncore.fw_rendercount;
+ fw_mediacount = dev_priv->uncore.fw_mediacount;
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
+ seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
+
+
+ return 0;
+}
+
+
static int gen6_drpc_info(struct seq_file *m)
{
@@ -1167,6 +1235,7 @@ static int gen6_drpc_info(struct seq_file *m)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
forcewake_count = dev_priv->uncore.forcewake_count;
@@ -1192,6 +1261,8 @@ static int gen6_drpc_info(struct seq_file *m)
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
+
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -1256,7 +1327,9 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_VALLEYVIEW(dev))
+ return vlv_drpc_info(m);
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
return gen6_drpc_info(m);
else
return ironlake_drpc_info(m);
@@ -1268,7 +1341,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!I915_HAS_FBC(dev)) {
+ if (!HAS_FBC(dev)) {
seq_puts(m, "FBC unsupported on this chipset\n");
return 0;
}
@@ -1330,7 +1403,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
return 0;
}
- if (I915_READ(IPS_CTL) & IPS_ENABLE)
+ if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
seq_puts(m, "enabled\n");
else
seq_puts(m, "disabled\n");
@@ -1406,6 +1479,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
@@ -1422,6 +1496,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
((ia_freq >> 8) & 0xff) * 100);
}
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
@@ -1437,8 +1512,10 @@ static int i915_gfxec(struct seq_file *m, void *unused)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -1565,13 +1642,21 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned forcewake_count;
+ unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
spin_lock_irq(&dev_priv->uncore.lock);
- forcewake_count = dev_priv->uncore.forcewake_count;
+ if (IS_VALLEYVIEW(dev)) {
+ fw_rendercount = dev_priv->uncore.fw_rendercount;
+ fw_mediacount = dev_priv->uncore.fw_mediacount;
+ } else
+ forcewake_count = dev_priv->uncore.forcewake_count;
spin_unlock_irq(&dev_priv->uncore.lock);
- seq_printf(m, "forcewake count = %u\n", forcewake_count);
+ if (IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
+ seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
+ } else
+ seq_printf(m, "forcewake count = %u\n", forcewake_count);
return 0;
}
@@ -1610,6 +1695,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
@@ -1641,6 +1727,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
I915_READ(DISP_ARB_CTL));
}
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1701,16 +1788,19 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
if (INTEL_INFO(dev)->gen >= 8)
gen8_ppgtt_info(m, dev);
else if (INTEL_INFO(dev)->gen >= 6)
gen6_ppgtt_info(m, dev);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1735,28 +1825,28 @@ static int i915_dpio_info(struct seq_file *m, void *data)
seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
- seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
- seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
+ seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0)));
+ seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1)));
- seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
- seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
+ seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0)));
+ seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1)));
- seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
- seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
+ seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0)));
+ seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1)));
- seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
- seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
+ seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0)));
+ seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n",
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1)));
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
- vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
+ vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0));
mutex_unlock(&dev_priv->dpio_lock);
@@ -1784,6 +1874,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
u32 psrperf = 0;
bool enabled = false;
+ intel_runtime_pm_get(dev_priv);
+
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
@@ -1796,6 +1888,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -1845,6 +1938,76 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
return 0;
}
+static const char *power_domain_str(enum intel_display_power_domain domain)
+{
+ switch (domain) {
+ case POWER_DOMAIN_PIPE_A:
+ return "PIPE_A";
+ case POWER_DOMAIN_PIPE_B:
+ return "PIPE_B";
+ case POWER_DOMAIN_PIPE_C:
+ return "PIPE_C";
+ case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+ return "PIPE_A_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+ return "PIPE_B_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+ return "PIPE_C_PANEL_FITTER";
+ case POWER_DOMAIN_TRANSCODER_A:
+ return "TRANSCODER_A";
+ case POWER_DOMAIN_TRANSCODER_B:
+ return "TRANSCODER_B";
+ case POWER_DOMAIN_TRANSCODER_C:
+ return "TRANSCODER_C";
+ case POWER_DOMAIN_TRANSCODER_EDP:
+ return "TRANSCODER_EDP";
+ case POWER_DOMAIN_VGA:
+ return "VGA";
+ case POWER_DOMAIN_AUDIO:
+ return "AUDIO";
+ case POWER_DOMAIN_INIT:
+ return "INIT";
+ default:
+ WARN_ON(1);
+ return "?";
+ }
+}
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+
+ seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+ for (i = 0; i < power_domains->power_well_count; i++) {
+ struct i915_power_well *power_well;
+ enum intel_display_power_domain power_domain;
+
+ power_well = &power_domains->power_wells[i];
+ seq_printf(m, "%-25s %d\n", power_well->name,
+ power_well->count);
+
+ for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
+ power_domain++) {
+ if (!(BIT(power_domain) & power_well->domains))
+ continue;
+
+ seq_printf(m, " %-23s %d\n",
+ power_domain_str(power_domain),
+ power_domains->domain_use_count[power_domain]);
+ }
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -1857,6 +2020,9 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
struct drm_i915_private *dev_priv = info->dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+ if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+ return -ENODEV;
+
spin_lock_irq(&pipe_crc->lock);
if (pipe_crc->opened) {
@@ -2005,8 +2171,8 @@ static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
info->dev = dev;
ent = debugfs_create_file(info->name, S_IRUGO, root, info,
&i915_pipe_crc_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ if (!ent)
+ return -ENOMEM;
return drm_add_fake_info_node(minor, ent, info);
}
@@ -2347,7 +2513,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- u32 val;
+ u32 val = 0; /* shut up gcc */
int ret;
if (pipe_crc->source == source)
@@ -2742,7 +2908,7 @@ i915_drop_caches_set(void *data, u64 val)
struct i915_vma *vma, *x;
int ret;
- DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
+ DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
@@ -2810,8 +2976,7 @@ i915_max_freq_get(void *data, u64 *val)
return ret;
if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.max_delay);
+ *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
else
*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -2841,9 +3006,9 @@ i915_max_freq_set(void *data, u64 val)
* Turbo will still be enabled, but won't go above the set value.
*/
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv->mem_freq, val);
+ val = vlv_freq_opcode(dev_priv, val);
dev_priv->rps.max_delay = val;
- gen6_set_rps(dev, val);
+ valleyview_set_rps(dev, val);
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
dev_priv->rps.max_delay = val;
@@ -2876,8 +3041,7 @@ i915_min_freq_get(void *data, u64 *val)
return ret;
if (IS_VALLEYVIEW(dev))
- *val = vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.min_delay);
+ *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
else
*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -2907,7 +3071,7 @@ i915_min_freq_set(void *data, u64 val)
* Turbo will still be enabled, but won't go below the set value.
*/
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv->mem_freq, val);
+ val = vlv_freq_opcode(dev_priv, val);
dev_priv->rps.min_delay = val;
valleyview_set_rps(dev, val);
} else {
@@ -2938,8 +3102,11 @@ i915_cache_sharing_get(void *data, u64 *val)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->dev->struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -2960,6 +3127,7 @@ i915_cache_sharing_set(void *data, u64 val)
if (val > 3)
return -EINVAL;
+ intel_runtime_pm_get(dev_priv);
DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
/* Update the cache sharing policy here as well */
@@ -2968,6 +3136,7 @@ i915_cache_sharing_set(void *data, u64 val)
snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+ intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -2983,7 +3152,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- gen6_gt_force_wake_get(dev_priv);
+ intel_runtime_pm_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
return 0;
}
@@ -2996,7 +3166,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -3016,8 +3187,8 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
S_IRUSR,
root, dev,
&i915_forcewake_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ if (!ent)
+ return -ENOMEM;
return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
}
@@ -3034,8 +3205,8 @@ static int i915_debugfs_create(struct dentry *root,
S_IRUGO | S_IWUSR,
root, dev,
fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ if (!ent)
+ return -ENOMEM;
return drm_add_fake_info_node(minor, ent, fops);
}
@@ -3079,6 +3250,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_pc8_status", i915_pc8_status, 0},
+ {"i915_power_domain_info", i915_power_domain_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -3102,10 +3274,10 @@ static const struct i915_debugfs_files {
void intel_display_crc_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
+ enum pipe pipe;
- for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
+ for_each_pipe(pipe) {
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
pipe_crc->opened = false;
spin_lock_init(&pipe_crc->lock);
@@ -3164,5 +3336,3 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
drm_debugfs_remove_files(info_list, 1, minor);
}
}
-
-#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 5c648425c1e0..15a74f979b4b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -42,6 +42,8 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <acpi/video.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
@@ -791,7 +793,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
if (ring->irq_get(ring)) {
- DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
ring->irq_put(ring);
} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
@@ -828,7 +830,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
result = i915_emit_irq(dev);
mutex_unlock(&dev->struct_mutex);
- if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
@@ -1016,8 +1018,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
- DRM_ERROR("DRM_COPY_TO_USER failed\n");
+ if (copy_to_user(param->value, &value, sizeof(int))) {
+ DRM_ERROR("copy_to_user failed\n");
return -EFAULT;
}
@@ -1411,7 +1413,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
-#ifdef CONFIG_DRM_I915_FBDEV
+#if IS_ENABLED(CONFIG_FB)
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
@@ -1484,6 +1486,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return -ENODEV;
}
+ /* UMS needs agp support. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
+ return -EINVAL;
+
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
@@ -1494,7 +1500,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->backlight.lock);
+ spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock);
@@ -1639,8 +1645,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
- if (HAS_POWER_WELL(dev))
- intel_power_domains_init(dev);
+ intel_power_domains_init(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
@@ -1664,11 +1669,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
+ intel_init_runtime_pm(dev_priv);
+
return 0;
out_power_well:
- if (HAS_POWER_WELL(dev))
- intel_power_domains_remove(dev);
+ intel_power_domains_remove(dev);
drm_vblank_cleanup(dev);
out_gem_unload:
if (dev_priv->mm.inactive_shrinker.scan_objects)
@@ -1679,6 +1685,7 @@ out_gem_unload:
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
+ pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1704,25 +1711,27 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ ret = i915_gem_suspend(dev);
+ if (ret) {
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+ return ret;
+ }
+
+ intel_fini_runtime_pm(dev_priv);
+
intel_gpu_ips_teardown();
- if (HAS_POWER_WELL(dev)) {
- /* The i915.ko module is still not prepared to be loaded when
- * the power well is not enabled, so just enable it in case
- * we're going to unload/reload. */
- intel_display_set_init_power(dev, true);
- intel_power_domains_remove(dev);
- }
+ /* The i915.ko module is still not prepared to be loaded when
+ * the power well is not enabled, so just enable it in case
+ * we're going to unload/reload. */
+ intel_display_set_init_power(dev, true);
+ intel_power_domains_remove(dev);
i915_teardown_sysfs(dev);
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
- ret = i915_gem_suspend(dev);
- if (ret)
- DRM_ERROR("failed to idle hardware: %d\n", ret);
-
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1777,7 +1786,6 @@ int i915_driver_unload(struct drm_device *dev)
list_del(&dev_priv->gtt.base.global_link);
WARN_ON(!list_empty(&dev_priv->vm_list));
- drm_mm_takedown(&dev_priv->gtt.base.mm);
drm_vblank_cleanup(dev);
@@ -1910,6 +1918,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5b7b7e06cb3a..ec7bb0fc71bc 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -59,7 +59,7 @@ MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
int i915_semaphores __read_mostly = -1;
-module_param_named(semaphores, i915_semaphores, int, 0600);
+module_param_named(semaphores, i915_semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
@@ -114,7 +114,7 @@ MODULE_PARM_DESC(enable_hangcheck,
"(default: true)");
int i915_enable_ppgtt __read_mostly = -1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
@@ -155,7 +155,6 @@ MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
static struct drm_driver driver;
-extern int intel_agp_enabled;
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
@@ -173,6 +172,7 @@ static const struct intel_device_info intel_i85x_info = {
.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
+ .has_fbc = 1,
.ring_mask = RENDER_RING,
};
@@ -192,6 +192,7 @@ static const struct intel_device_info intel_i915gm_info = {
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
+ .has_fbc = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i945g_info = {
@@ -204,6 +205,7 @@ static const struct intel_device_info intel_i945gm_info = {
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
+ .has_fbc = 1,
.ring_mask = RENDER_RING,
};
@@ -265,6 +267,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
static const struct intel_device_info intel_sandybridge_d_info = {
.gen = 6, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
};
@@ -280,6 +283,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
#define GEN7_FEATURES \
.gen = 7, .num_pipes = 3, \
.need_gfx_hws = 1, .has_hotplug = 1, \
+ .has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_llc = 1
@@ -292,7 +296,6 @@ static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
- .has_fbc = 1,
};
static const struct intel_device_info intel_ivybridge_q_info = {
@@ -307,6 +310,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
.num_pipes = 2,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
+ .has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
};
@@ -315,6 +319,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.num_pipes = 2,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
+ .has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
};
@@ -332,12 +337,10 @@ static const struct intel_device_info intel_haswell_m_info = {
.is_mobile = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
- .has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
};
static const struct intel_device_info intel_broadwell_d_info = {
- .is_preliminary = 1,
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -346,7 +349,6 @@ static const struct intel_device_info intel_broadwell_d_info = {
};
static const struct intel_device_info intel_broadwell_m_info = {
- .is_preliminary = 1,
.gen = 8, .is_mobile = 1, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -401,7 +403,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pch;
+ struct pci_dev *pch = NULL;
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
@@ -422,12 +424,9 @@ void intel_detect_pch(struct drm_device *dev)
* all the ISA bridge devices and check for the first match, instead
* of only checking the first one.
*/
- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- while (pch) {
- struct pci_dev *curr = pch;
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
- unsigned short id;
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
@@ -459,29 +458,27 @@ void intel_detect_pch(struct drm_device *dev)
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
- } else {
- goto check_next;
- }
- pci_dev_put(pch);
+ } else
+ continue;
+
break;
}
-check_next:
- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
- pci_dev_put(curr);
}
if (!pch)
- DRM_DEBUG_KMS("No PCH found?\n");
+ DRM_DEBUG_KMS("No PCH found.\n");
+
+ pci_dev_put(pch);
}
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
- return 0;
+ return false;
/* Until we get further testing... */
if (IS_GEN8(dev)) {
WARN_ON(!i915_preliminary_hw_support);
- return 0;
+ return false;
}
if (i915_semaphores >= 0)
@@ -493,7 +490,7 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return false;
#endif
- return 1;
+ return true;
}
static int i915_drm_freeze(struct drm_device *dev)
@@ -501,6 +498,8 @@ static int i915_drm_freeze(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
+ intel_runtime_pm_get(dev_priv);
+
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_SUSPENDED;
@@ -688,6 +687,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
+
+ intel_runtime_pm_put(dev_priv);
return error;
}
@@ -762,14 +763,14 @@ int i915_reset(struct drm_device *dev)
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
- DRM_ERROR("Reset not implemented, but ignoring "
- "error for simulated gpu hangs\n");
+ DRM_INFO("Reset not implemented, but ignoring "
+ "error for simulated gpu hangs\n");
ret = 0;
}
}
if (ret) {
- DRM_ERROR("Failed to reset chip.\n");
+ DRM_ERROR("Failed to reset chip: %i\n", ret);
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -790,12 +791,9 @@ int i915_reset(struct drm_device *dev)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->ums.mm_suspended) {
- bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
dev_priv->ums.mm_suspended = 0;
ret = i915_gem_init_hw(dev);
- if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
- DRM_ERROR("HW contexts didn't survive reset\n");
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
@@ -831,17 +829,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
- /* We've managed to ship a kms-enabled ddx that shipped with an XvMC
- * implementation for gen3 (and only gen3) that used legacy drm maps
- * (gasp!) to share buffers between X and the client. Hence we need to
- * keep around the fake agp stuff for gen3, even when kms is enabled. */
- if (intel_info->gen != 3) {
- driver.driver_features &=
- ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
- } else if (!intel_agp_enabled) {
- DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
- return -ENODEV;
- }
+ driver.driver_features &= ~(DRIVER_USE_AGP);
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -915,6 +903,49 @@ static int i915_pm_poweroff(struct device *dev)
return i915_drm_freeze(drm_dev);
}
+static int i915_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!HAS_RUNTIME_PM(dev));
+
+ DRM_DEBUG_KMS("Suspending device\n");
+
+ i915_gem_release_all_mmaps(dev_priv);
+
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ dev_priv->pm.suspended = true;
+
+ /*
+ * current versions of firmware which depend on this opregion
+ * notification have repurposed the D1 definition to mean
+ * "runtime suspended" vs. what you would normally expect (D3)
+ * to distinguish it from notifications that might be sent
+ * via the suspend path.
+ */
+ intel_opregion_notify_adapter(dev, PCI_D1);
+
+ return 0;
+}
+
+static int i915_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!HAS_RUNTIME_PM(dev));
+
+ DRM_DEBUG_KMS("Resuming device\n");
+
+ intel_opregion_notify_adapter(dev, PCI_D0);
+ dev_priv->pm.suspended = false;
+
+ return 0;
+}
+
static const struct dev_pm_ops i915_pm_ops = {
.suspend = i915_pm_suspend,
.resume = i915_pm_resume,
@@ -922,6 +953,8 @@ static const struct dev_pm_ops i915_pm_ops = {
.thaw = i915_pm_thaw,
.poweroff = i915_pm_poweroff,
.restore = i915_pm_resume,
+ .runtime_suspend = i915_runtime_suspend,
+ .runtime_resume = i915_runtime_resume,
};
static const struct vm_operations_struct i915_gem_vm_ops = {
@@ -949,7 +982,7 @@ static struct drm_driver driver = {
* deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+ DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER,
.load = i915_driver_load,
@@ -1024,14 +1057,24 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
- if (!(driver.driver_features & DRIVER_MODESET))
+ if (!(driver.driver_features & DRIVER_MODESET)) {
driver.get_vblank_timestamp = NULL;
+#ifndef CONFIG_DRM_I915_UMS
+ /* Silently fail loading to not upset userspace. */
+ return 0;
+#endif
+ }
return drm_pci_init(&driver, &i915_pci_driver);
}
static void __exit i915_exit(void)
{
+#ifndef CONFIG_DRM_I915_UMS
+ if (!(driver.driver_features & DRIVER_MODESET))
+ return; /* Never loaded a driver. */
+#endif
+
drm_pci_exit(&driver, &i915_pci_driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 90fcccba17b0..df77e20e3c3d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -89,6 +89,18 @@ enum port {
};
#define port_name(p) ((p) + 'A')
+#define I915_NUM_PHYS_VLV 1
+
+enum dpio_channel {
+ DPIO_CH0,
+ DPIO_CH1
+};
+
+enum dpio_phy {
+ DPIO_PHY0,
+ DPIO_PHY1
+};
+
enum intel_display_power_domain {
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
@@ -101,6 +113,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -310,13 +323,14 @@ struct drm_i915_error_state {
u32 instps[I915_NUM_RINGS];
u32 extra_instdone[I915_NUM_INSTDONE_REG];
u32 seqno[I915_NUM_RINGS];
- u64 bbaddr;
+ u64 bbaddr[I915_NUM_RINGS];
u32 fault_reg[I915_NUM_RINGS];
u32 done_reg;
u32 faddr[I915_NUM_RINGS];
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_ring {
+ bool valid;
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
@@ -351,6 +365,7 @@ struct drm_i915_error_state {
enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
};
+struct intel_connector;
struct intel_crtc_config;
struct intel_crtc;
struct intel_limit;
@@ -358,7 +373,7 @@ struct dpll;
struct drm_i915_display_funcs {
bool (*fbc_enabled)(struct drm_device *dev);
- void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+ void (*enable_fbc)(struct drm_crtc *crtc);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
@@ -413,11 +428,20 @@ struct drm_i915_display_funcs {
/* render clock increase/decrease */
/* display clock increase/decrease */
/* pll clock increase/decrease */
+
+ int (*setup_backlight)(struct intel_connector *connector);
+ uint32_t (*get_backlight)(struct intel_connector *connector);
+ void (*set_backlight)(struct intel_connector *connector,
+ uint32_t level);
+ void (*disable_backlight)(struct intel_connector *connector);
+ void (*enable_backlight)(struct intel_connector *connector);
};
struct intel_uncore_funcs {
- void (*force_wake_get)(struct drm_i915_private *dev_priv);
- void (*force_wake_put)(struct drm_i915_private *dev_priv);
+ void (*force_wake_get)(struct drm_i915_private *dev_priv,
+ int fw_engine);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv,
+ int fw_engine);
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
@@ -442,6 +466,9 @@ struct intel_uncore {
unsigned fifo_count;
unsigned forcewake_count;
+ unsigned fw_rendercount;
+ unsigned fw_mediacount;
+
struct delayed_work force_wake_work;
};
@@ -669,7 +696,6 @@ struct i915_fbc {
struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
- int interval;
} *fbc_work;
enum no_fbc_reason {
@@ -708,7 +734,6 @@ enum intel_sbi_destination {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
-#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
struct intel_fbdev;
struct intel_fbc_work;
@@ -761,8 +786,6 @@ struct i915_suspend_saved_registers {
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
u32 saveBLC_HIST_CTL_B;
- u32 saveBLC_PWM_CTL_B;
- u32 saveBLC_PWM_CTL2_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
@@ -932,21 +955,29 @@ struct intel_ilk_power_mgmt {
/* Power well structure for haswell */
struct i915_power_well {
+ const char *name;
+ bool always_on;
/* power well enable/disable usage count */
int count;
+ unsigned long domains;
+ void *data;
+ void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
+ bool enable);
+ bool (*is_enabled)(struct drm_device *dev,
+ struct i915_power_well *power_well);
};
-#define I915_MAX_POWER_WELLS 1
-
struct i915_power_domains {
/*
* Power wells needed for initialization at driver init and suspend
* time are on. They are kept on until after the first modeset.
*/
bool init_power_on;
+ int power_well_count;
struct mutex lock;
- struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
+ int domain_use_count[POWER_DOMAIN_NUM];
+ struct i915_power_well *power_wells;
};
struct i915_dri1_state {
@@ -1077,34 +1108,30 @@ struct i915_gpu_error {
unsigned long missed_irq_rings;
/**
- * State variable and reset counter controlling the reset flow
+ * State variable controlling the reset flow and count
*
- * Upper bits are for the reset counter. This counter is used by the
- * wait_seqno code to race-free noticed that a reset event happened and
- * that it needs to restart the entire ioctl (since most likely the
- * seqno it waited for won't ever signal anytime soon).
+ * This is a counter which gets incremented when reset is triggered,
+ * and again when reset has been handled. So odd values (lowest bit set)
+ * means that reset is in progress and even values that
+ * (reset_counter >> 1):th reset was successfully completed.
+ *
+ * If reset is not completed succesfully, the I915_WEDGE bit is
+ * set meaning that hardware is terminally sour and there is no
+ * recovery. All waiters on the reset_queue will be woken when
+ * that happens.
+ *
+ * This counter is used by the wait_seqno code to notice that reset
+ * event happened and it needs to restart the entire ioctl (since most
+ * likely the seqno it waited for won't ever signal anytime soon).
*
* This is important for lock-free wait paths, where no contended lock
* naturally enforces the correct ordering between the bail-out of the
* waiter and the gpu reset work code.
- *
- * Lowest bit controls the reset state machine: Set means a reset is in
- * progress. This state will (presuming we don't have any bugs) decay
- * into either unset (successful reset) or the special WEDGED value (hw
- * terminally sour). All waiters on the reset_queue will be woken when
- * that happens.
*/
atomic_t reset_counter;
- /**
- * Special values/flags for reset_counter
- *
- * Note that the code relies on
- * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
- * being true.
- */
#define I915_RESET_IN_PROGRESS_FLAG 1
-#define I915_WEDGED 0xffffffff
+#define I915_WEDGED (1 << 31)
/**
* Waitqueue to signal when the reset has completed. Used by clients
@@ -1158,6 +1185,11 @@ struct intel_vbt_data {
int edp_bpp;
struct edp_power_seq edp_pps;
+ struct {
+ u16 pwm_freq_hz;
+ bool active_low_pwm;
+ } backlight;
+
/* MIPI DSI */
struct {
u16 panel_id;
@@ -1184,7 +1216,7 @@ struct intel_wm_level {
uint32_t fbc_val;
};
-struct hsw_wm_values {
+struct ilk_wm_values {
uint32_t wm_pipe[3];
uint32_t wm_lp[3];
uint32_t wm_lp_spr[3];
@@ -1262,6 +1294,10 @@ struct i915_package_c8 {
} regsave;
};
+struct i915_runtime_pm {
+ bool suspended;
+};
+
enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
@@ -1366,15 +1402,9 @@ typedef struct drm_i915_private {
/* overlay */
struct intel_overlay *overlay;
- unsigned int sprite_scaling_enabled;
- /* backlight */
- struct {
- int level;
- bool enabled;
- spinlock_t lock; /* bl registers and the above bl fields */
- struct backlight_device *device;
- } backlight;
+ /* backlight registers and fields in struct intel_panel */
+ spinlock_t backlight_lock;
/* LVDS info */
bool no_aux_handshake;
@@ -1426,6 +1456,7 @@ typedef struct drm_i915_private {
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
+ int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
/* Reclocking support */
bool render_reclock_avail;
@@ -1470,7 +1501,6 @@ typedef struct drm_i915_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
- bool hw_contexts_disabled;
uint32_t hw_context_size;
struct list_head context_list;
@@ -1492,11 +1522,13 @@ typedef struct drm_i915_private {
uint16_t cur_latency[5];
/* current hardware state */
- struct hsw_wm_values hw;
+ struct ilk_wm_values hw;
} wm;
struct i915_package_c8 pc8;
+ struct i915_runtime_pm pm;
+
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
@@ -1799,6 +1831,14 @@ struct drm_i915_file_private {
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+/*
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
+ * even when in MSI mode. This results in spurious interrupt warnings if the
+ * legacy irq no. is shared with another device. The kernel then disables that
+ * interrupt source and so prevents the other device from working properly.
+ */
+#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -1813,15 +1853,15 @@ struct drm_i915_file_private {
#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
-#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
+#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1911,7 +1951,6 @@ extern void intel_hpd_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_init(struct drm_device *dev);
-extern void intel_uncore_clear_errors(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
@@ -1987,6 +2026,7 @@ void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@@ -2063,12 +2103,17 @@ int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
- & I915_RESET_IN_PROGRESS_FLAG);
+ & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
- return atomic_read(&error->reset_counter) == I915_WEDGED;
+ return atomic_read(&error->reset_counter) & I915_WEDGED;
+}
+
+static inline u32 i915_reset_count(struct i915_gpu_error *error)
+{
+ return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
}
void i915_gem_reset(struct drm_device *dev);
@@ -2180,7 +2225,7 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
}
/* i915_gem_context.c */
-void i915_gem_context_init(struct drm_device *dev);
+int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
@@ -2339,8 +2384,8 @@ extern void intel_i2c_reset(struct drm_device *dev);
/* intel_opregion.c */
struct intel_encoder;
-extern int intel_opregion_setup(struct drm_device *dev);
#ifdef CONFIG_ACPI
+extern int intel_opregion_setup(struct drm_device *dev);
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
@@ -2349,6 +2394,7 @@ extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
extern int intel_opregion_notify_adapter(struct drm_device *dev,
pci_power_t state);
#else
+static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
@@ -2398,6 +2444,8 @@ extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -2413,8 +2461,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
* must be set to prevent GT core from power down and stale values being
* returned.
*/
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2429,6 +2477,8 @@ u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
@@ -2437,9 +2487,30 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination);
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
+
+void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
+
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+ (((reg) >= 0x2000 && (reg) < 0x4000) ||\
+ ((reg) >= 0x5000 && (reg) < 0x8000) ||\
+ ((reg) >= 0xB000 && (reg) < 0x12000) ||\
+ ((reg) >= 0x2E000 && (reg) < 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
+ (((reg) >= 0x12000 && (reg) < 0x14000) ||\
+ ((reg) >= 0x22000 && (reg) < 0x24000) ||\
+ ((reg) >= 0x30000 && (reg) < 0x40000))
+
+#define FORCEWAKE_RENDER (1 << 0)
+#define FORCEWAKE_MEDIA (1 << 1)
+#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
-int vlv_gpu_freq(int ddr_freq, int val);
-int vlv_freq_opcode(int ddr_freq, int val);
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 76d3d1ab73c6..00c836154725 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1015,9 +1015,11 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
struct drm_i915_file_private *file_priv)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ const bool irq_test_in_progress =
+ ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
struct timespec before, now;
DEFINE_WAIT(wait);
- long timeout_jiffies;
+ unsigned long timeout_expire;
int ret;
WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1025,7 +1027,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
- timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
+ timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
@@ -1035,8 +1037,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
msecs_to_jiffies(100));
}
- if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
- WARN_ON(!ring->irq_get(ring)))
+ if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
/* Record current time in case interrupted by signal, or wedged */
@@ -1044,7 +1045,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
getrawmonotonic(&before);
for (;;) {
struct timer_list timer;
- unsigned long expire;
prepare_to_wait(&ring->irq_queue, &wait,
interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
@@ -1070,23 +1070,22 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
break;
}
- if (timeout_jiffies <= 0) {
+ if (timeout && time_after_eq(jiffies, timeout_expire)) {
ret = -ETIME;
break;
}
timer.function = NULL;
if (timeout || missed_irq(dev_priv, ring)) {
+ unsigned long expire;
+
setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
- expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+ expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
mod_timer(&timer, expire);
}
io_schedule();
- if (timeout)
- timeout_jiffies = expire - jiffies;
-
if (timer.function) {
del_singleshot_timer_sync(&timer);
destroy_timer_on_stack(&timer);
@@ -1095,7 +1094,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
getrawmonotonic(&now);
trace_i915_gem_request_wait_end(ring, seqno);
- ring->irq_put(ring);
+ if (!irq_test_in_progress)
+ ring->irq_put(ring);
finish_wait(&ring->irq_queue, &wait);
@@ -1380,6 +1380,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int ret = 0;
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
+ intel_runtime_pm_get(dev_priv);
+
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
@@ -1427,8 +1429,10 @@ out:
/* If this -EIO is due to a gpu hang, give the reset code a
* chance to clean up the mess. Otherwise return the proper
* SIGBUS. */
- if (i915_terminally_wedged(&dev_priv->gpu_error))
- return VM_FAULT_SIGBUS;
+ if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+ ret = VM_FAULT_SIGBUS;
+ break;
+ }
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error
@@ -1443,15 +1447,38 @@ out:
* EBUSY is ok: this just means that another thread
* already did the job.
*/
- return VM_FAULT_NOPAGE;
+ ret = VM_FAULT_NOPAGE;
+ break;
case -ENOMEM:
- return VM_FAULT_OOM;
+ ret = VM_FAULT_OOM;
+ break;
case -ENOSPC:
- return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
+ break;
default:
WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
- return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
+ break;
}
+
+ intel_runtime_pm_put(dev_priv);
+ return ret;
+}
+
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+ struct i915_vma *vma;
+
+ /*
+ * Only the global gtt is relevant for gtt memory mappings, so restrict
+ * list traversal to objects bound into the global address space. Note
+ * that the active list should be empty, but better safe than sorry.
+ */
+ WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
+ list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
+ i915_gem_release_mmap(vma->obj);
+ list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
+ i915_gem_release_mmap(vma->obj);
}
/**
@@ -2303,7 +2330,7 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
if (ring->hangcheck.action != HANGCHECK_WAIT &&
i915_request_guilty(request, acthd, &inside)) {
- DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
+ DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
offset,
@@ -2361,16 +2388,6 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
- while (!list_empty(&ring->request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&ring->request_list,
- struct drm_i915_gem_request,
- list);
-
- i915_gem_free_request(request);
- }
-
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
@@ -2380,6 +2397,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_object_move_to_inactive(obj);
}
+
+ /*
+ * We must free the requests after all the corresponding objects have
+ * been moved off active lists. Which is the same order as the normal
+ * retire_requests function does. This is important if object hold
+ * implicit references on things like e.g. ppgtt address spaces through
+ * the request.
+ */
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ i915_gem_free_request(request);
+ }
}
void i915_gem_restore_fences(struct drm_device *dev)
@@ -2760,7 +2794,6 @@ int i915_vma_unbind(struct i915_vma *vma)
obj->has_aliasing_ppgtt_mapping = 0;
}
i915_gem_gtt_finish_object(obj);
- i915_gem_object_unpin_pages(obj);
list_del(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
@@ -2768,7 +2801,6 @@ int i915_vma_unbind(struct i915_vma *vma)
obj->map_and_fenceable = true;
drm_mm_remove_node(&vma->node);
-
i915_gem_vma_destroy(vma);
/* Since the unbound list is global, only move to that list if
@@ -2776,6 +2808,12 @@ int i915_vma_unbind(struct i915_vma *vma)
if (list_empty(&obj->vma_list))
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+ /* And finally now the object is completely decoupled from this vma,
+ * we can drop its hold on the backing storage and allow it to be
+ * reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+
return 0;
}
@@ -3068,7 +3106,7 @@ i915_find_fence_reg(struct drm_device *dev)
}
if (avail == NULL)
- return NULL;
+ goto deadlock;
/* None available, try to steal one or wait for a user to finish */
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
@@ -3078,7 +3116,12 @@ i915_find_fence_reg(struct drm_device *dev)
return reg;
}
- return NULL;
+deadlock:
+ /* Wait for completion of pending flips which consume fences */
+ if (intel_has_pending_fb_unpin(dev))
+ return ERR_PTR(-EAGAIN);
+
+ return ERR_PTR(-EDEADLK);
}
/**
@@ -3123,8 +3166,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
}
} else if (enable) {
reg = i915_find_fence_reg(dev);
- if (reg == NULL)
- return -EDEADLK;
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
@@ -4179,6 +4222,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_vma *vma, *next;
+ intel_runtime_pm_get(dev_priv);
+
trace_i915_gem_object_destroy(obj);
if (obj->phys_obj)
@@ -4223,6 +4268,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
kfree(obj->bit_17);
i915_gem_object_free(obj);
+
+ intel_runtime_pm_put(dev_priv);
}
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
@@ -4479,7 +4526,13 @@ i915_gem_init_hw(struct drm_device *dev)
* XXX: There was some w/a described somewhere suggesting loading
* contexts before PPGTT.
*/
- i915_gem_context_init(dev);
+ ret = i915_gem_context_init(dev);
+ if (ret) {
+ i915_gem_cleanup_ringbuffer(dev);
+ DRM_ERROR("Context initialization failed %d\n", ret);
+ return ret;
+ }
+
if (dev_priv->mm.aliasing_ppgtt) {
ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b0f42b9ca037..e08acaba5402 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -247,36 +247,34 @@ err_destroy:
return ret;
}
-void i915_gem_context_init(struct drm_device *dev)
+int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
- if (!HAS_HW_CONTEXTS(dev)) {
- dev_priv->hw_contexts_disabled = true;
- DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
- return;
- }
+ if (!HAS_HW_CONTEXTS(dev))
+ return 0;
/* If called from reset, or thaw... we've been here already */
- if (dev_priv->hw_contexts_disabled ||
- dev_priv->ring[RCS].default_context)
- return;
+ if (dev_priv->ring[RCS].default_context)
+ return 0;
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
if (dev_priv->hw_context_size > (1<<20)) {
- dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
- return;
+ return -E2BIG;
}
- if (create_default_context(dev_priv)) {
- dev_priv->hw_contexts_disabled = true;
- DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
- return;
+ ret = create_default_context(dev_priv);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
+ ret);
+ return ret;
}
DRM_DEBUG_DRIVER("HW context support initialized\n");
+ return 0;
}
void i915_gem_context_fini(struct drm_device *dev)
@@ -284,7 +282,7 @@ void i915_gem_context_fini(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
- if (dev_priv->hw_contexts_disabled)
+ if (!HAS_HW_CONTEXTS(dev))
return;
/* The only known way to stop the gpu from accessing the hw context is
@@ -327,16 +325,16 @@ i915_gem_context_get_hang_stats(struct drm_device *dev,
struct drm_file *file,
u32 id)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *ctx;
if (id == DEFAULT_CONTEXT_ID)
return &file_priv->hang_stats;
- ctx = NULL;
- if (!dev_priv->hw_contexts_disabled)
- ctx = i915_gem_context_get(file->driver_priv, id);
+ if (!HAS_HW_CONTEXTS(dev))
+ return ERR_PTR(-ENOENT);
+
+ ctx = i915_gem_context_get(file->driver_priv, id);
if (ctx == NULL)
return ERR_PTR(-ENOENT);
@@ -502,8 +500,6 @@ static int do_switch(struct i915_hw_context *to)
* @ring: ring for which we'll execute the context switch
* @file_priv: file_priv associated with the context, may be NULL
* @id: context id number
- * @seqno: sequence number by which the new context will be switched to
- * @flags:
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -517,7 +513,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_hw_context *to;
- if (dev_priv->hw_contexts_disabled)
+ if (!HAS_HW_CONTEXTS(ring->dev))
return 0;
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
@@ -542,7 +538,6 @@ int i915_switch_context(struct intel_ring_buffer *ring,
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *ctx;
@@ -551,7 +546,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- if (dev_priv->hw_contexts_disabled)
+ if (!HAS_HW_CONTEXTS(dev))
return -ENODEV;
ret = i915_mutex_lock_interruptible(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 8f3adc7d0dc8..2ca280f9ee53 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -27,8 +27,10 @@
*/
#include <drm/drmP.h>
-#include "i915_drv.h"
#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
#include "i915_trace.h"
static bool
@@ -53,6 +55,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
struct list_head eviction_list, unwind_list;
struct i915_vma *vma;
int ret = 0;
+ int pass = 0;
trace_i915_gem_evict(dev, min_size, alignment, mappable);
@@ -119,14 +122,24 @@ none:
/* Can we unpin some objects such as idle hw contents,
* or pending flips?
*/
- ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
- if (ret)
- return ret;
+ if (nonblocking)
+ return -ENOSPC;
/* Only idle the GPU and repeat the search once */
- i915_gem_retire_requests(dev);
- nonblocking = true;
- goto search_again;
+ if (pass++ == 0) {
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev);
+ goto search_again;
+ }
+
+ /* If we still have pending pageflip completions, drop
+ * back to userspace to give our workqueues time to
+ * acquire our locks and unpin the old scanouts.
+ */
+ return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
found:
/* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a3ba9a8cd687..d269ecf46e26 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -46,7 +46,7 @@ struct eb_vmas {
};
static struct eb_vmas *
-eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
+eb_create(struct drm_i915_gem_execbuffer2 *args)
{
struct eb_vmas *eb = NULL;
@@ -252,7 +252,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset);
char *vaddr;
- int ret = -EINVAL;
+ int ret;
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
@@ -287,7 +287,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
- int ret = -EINVAL;
+ int ret;
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
@@ -335,7 +335,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
uint32_t target_offset;
- int ret = -EINVAL;
+ int ret;
/* we've already hold a reference to all valid objects */
target_vma = eb_get_vma(eb, reloc->target_handle);
@@ -344,7 +344,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj = target_vma->obj;
target_obj = &target_vma->obj->base;
- target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
+ target_offset = target_vma->node.start;
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
@@ -365,7 +365,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
- return ret;
+ return -EINVAL;
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
@@ -376,7 +376,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
- return ret;
+ return -EINVAL;
}
target_obj->pending_read_domains |= reloc->read_domains;
@@ -396,14 +396,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
obj, reloc->target_handle,
(int) reloc->offset,
(int) obj->base.size);
- return ret;
+ return -EINVAL;
}
if (unlikely(reloc->offset & 3)) {
DRM_DEBUG("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
- return ret;
+ return -EINVAL;
}
/* We can't wait for rendering with pagefaults disabled */
@@ -491,8 +491,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
}
static int
-i915_gem_execbuffer_relocate(struct eb_vmas *eb,
- struct i915_address_space *vm)
+i915_gem_execbuffer_relocate(struct eb_vmas *eb)
{
struct i915_vma *vma;
int ret = 0;
@@ -901,6 +900,24 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
return 0;
}
+static int
+i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
+ const u32 ctx_id)
+{
+ struct i915_ctx_hang_stats *hs;
+
+ hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
+ if (IS_ERR(hs))
+ return PTR_ERR(hs);
+
+ if (hs->banned) {
+ DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_ring_buffer *ring)
@@ -980,8 +997,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
- struct i915_ctx_hang_stats *hs;
- u32 ctx_id = i915_execbuffer2_get_context_id(*args);
+ const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 mask, flags;
int ret, mode, i;
@@ -1108,6 +1124,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
+ intel_runtime_pm_get(dev_priv);
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto pre_mutex_err;
@@ -1118,7 +1136,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
- eb = eb_create(args, vm);
+ ret = i915_gem_validate_context(dev, file, ctx_id);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ goto pre_mutex_err;
+ }
+
+ eb = eb_create(args);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
@@ -1141,7 +1165,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
- ret = i915_gem_execbuffer_relocate(eb, vm);
+ ret = i915_gem_execbuffer_relocate(eb);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
@@ -1170,17 +1194,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
- hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
- if (IS_ERR(hs)) {
- ret = PTR_ERR(hs);
- goto err;
- }
-
- if (hs->banned) {
- ret = -EIO;
- goto err;
- }
-
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
@@ -1242,6 +1255,10 @@ err:
pre_mutex_err:
kfree(cliprects);
+
+ /* intel_gpu_busy should also get a ref, so it will free when the device
+ * is really idle. */
+ intel_runtime_pm_put(dev_priv);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3540569948db..d278be110805 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -240,10 +240,16 @@ static int gen8_ppgtt_enable(struct drm_device *dev)
for_each_ring(ring, dev_priv, j) {
ret = gen8_write_pdp(ring, i, addr);
if (ret)
- return ret;
+ goto err_out;
}
}
return 0;
+
+err_out:
+ for_each_ring(ring, dev_priv, j)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+ return ret;
}
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
@@ -293,23 +299,23 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
struct sg_page_iter sg_iter;
- pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+ pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
- dma_addr_t page_addr;
+ if (pt_vaddr == NULL)
+ pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
- page_addr = sg_dma_address(sg_iter.sg) +
- (sg_iter.sg_pgoffset << PAGE_SHIFT);
- pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
- true);
+ pt_vaddr[act_pte] =
+ gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
+ cache_level, true);
if (++act_pte == GEN8_PTES_PER_PAGE) {
kunmap_atomic(pt_vaddr);
+ pt_vaddr = NULL;
act_pt++;
- pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
act_pte = 0;
-
}
}
- kunmap_atomic(pt_vaddr);
+ if (pt_vaddr)
+ kunmap_atomic(pt_vaddr);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -318,6 +324,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
container_of(vm, struct i915_hw_ppgtt, base);
int i, j;
+ drm_mm_takedown(&vm->mm);
+
for (i = 0; i < ppgtt->num_pd_pages ; i++) {
if (ppgtt->pd_dma_addr[i]) {
pci_unmap_page(ppgtt->base.dev->pdev,
@@ -381,6 +389,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
@@ -573,21 +583,23 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
struct sg_page_iter sg_iter;
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+ pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
- dma_addr_t page_addr;
+ if (pt_vaddr == NULL)
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
- page_addr = sg_page_iter_dma_address(&sg_iter);
- pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
+ pt_vaddr[act_pte] =
+ vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
+ cache_level, true);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
+ pt_vaddr = NULL;
act_pt++;
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
act_pte = 0;
-
}
}
- kunmap_atomic(pt_vaddr);
+ if (pt_vaddr)
+ kunmap_atomic(pt_vaddr);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -632,6 +644,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+ ppgtt->base.start = 0;
+ ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
if (!ppgtt->pt_pages)
@@ -828,7 +842,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE,
- false);
+ true);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -1124,7 +1138,6 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
- list_add(&vma->vma_link, &obj->vma_list);
}
dev_priv->gtt.base.start = start;
@@ -1400,6 +1413,8 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
{
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+
+ drm_mm_takedown(&vm->mm);
iounmap(gtt->gsm);
teardown_scratch_page(vm->dev);
}
@@ -1425,6 +1440,9 @@ static int i915_gmch_probe(struct drm_device *dev,
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
+ if (unlikely(dev_priv->gtt.do_idle_maps))
+ DRM_INFO("applying Ironlake quirks for intel_iommu\n");
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index d284d892ed94..28d24caa49f3 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -82,9 +82,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
- DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
- base, base + (uint32_t)dev_priv->gtt.stolen_size);
- base = 0;
+ /*
+ * One more attempt but this time requesting region from
+ * base + 1, as we have seen that this resolves the region
+ * conflict with the PCI Bus.
+ * This is a BIOS w/a: Some BIOS wrap stolen in the root
+ * PCI bus, but have an off-by-one error. Hence retry the
+ * reservation starting from 1 instead of 0.
+ */
+ r = devm_request_mem_region(dev->dev, base + 1,
+ dev_priv->gtt.stolen_size - 1,
+ "Graphics Stolen Memory");
+ if (r == NULL) {
+ DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+ base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base = 0;
+ }
}
return base;
@@ -201,6 +214,13 @@ int i915_gem_init_stolen(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped) {
+ DRM_INFO("DMAR active, disabling use of stolen memory\n");
+ return 0;
+ }
+#endif
+
if (dev_priv->gtt.stolen_size == 0)
return 0;
@@ -250,7 +270,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
}
sg = st->sgl;
- sg->offset = offset;
+ sg->offset = 0;
sg->length = size;
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
@@ -420,6 +440,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+ i915_gem_object_pin_pages(obj);
return obj;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 79dcb8f896c6..990cf8f43efd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
va_list tmp;
va_copy(tmp, args);
- if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
+ len = vsnprintf(NULL, 0, f, tmp);
+ va_end(tmp);
+
+ if (!__i915_error_seek(e, len))
return;
}
@@ -239,6 +242,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
unsigned ring)
{
BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
+ if (!error->ring[ring].valid)
+ return;
+
err_printf(m, "%s command stream:\n", ring_str(ring));
err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
@@ -247,12 +253,11 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
- if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
- err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_INFO(dev)->gen >= 4) {
+ err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]);
err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
- if (INTEL_INFO(dev)->gen >= 4)
err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ }
err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) {
@@ -294,7 +299,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
- struct intel_ring_buffer *ring;
int i, j, page, offset, elt;
if (!error) {
@@ -329,7 +333,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
- for_each_ring(ring, dev_priv, i)
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++)
i915_ring_error_state(m, dev, error, i);
if (error->active_bo)
@@ -386,8 +390,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- obj = error->ring[i].ctx;
- if (obj) {
+ if ((obj = error->ring[i].ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
@@ -668,7 +671,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
return NULL;
obj = ring->scratch.obj;
- if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+ if (obj != NULL &&
+ acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
}
@@ -725,8 +729,9 @@ static void i915_record_ring_state(struct drm_device *dev,
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- if (ring->id == RCS)
- error->bbaddr = I915_READ64(BB_ADDR);
+ error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
+ if (INTEL_INFO(dev)->gen >= 8)
+ error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
@@ -775,11 +780,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request;
int i, count;
- for_each_ring(ring, dev_priv, i) {
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_ring_buffer *ring = &dev_priv->ring[i];
+
+ if (ring->dev == NULL)
+ continue;
+
+ error->ring[i].valid = true;
+
i915_record_ring_state(dev, error, ring);
error->ring[i].batchbuffer =
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f13d5edc39d5..d554169ac592 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -62,7 +62,7 @@ static const u32 hpd_mask_i915[] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};
-static const u32 hpd_status_gen4[] = {
+static const u32 hpd_status_g4x[] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
@@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
} else {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum transcoder cpu_transcoder = (enum transcoder) pipe;
u32 htotal;
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
@@ -600,7 +599,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
* Cook up a vblank counter by also checking the pixel
* counter against vblank start.
*/
- return ((high1 << 8) | low) + (pixel >= vbl_start);
+ return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
}
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -619,63 +618,30 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
-#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
-static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
+static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t status;
int reg;
- if (IS_VALLEYVIEW(dev)) {
- status = pipe == PIPE_A ?
- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
- reg = VLV_ISR;
- } else if (IS_GEN2(dev)) {
- status = pipe == PIPE_A ?
- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
- reg = ISR;
- } else if (INTEL_INFO(dev)->gen < 5) {
- status = pipe == PIPE_A ?
- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
- reg = ISR;
- } else if (INTEL_INFO(dev)->gen < 7) {
- status = pipe == PIPE_A ?
- DE_PIPEA_VBLANK :
- DE_PIPEB_VBLANK;
-
+ if (INTEL_INFO(dev)->gen >= 8) {
+ status = GEN8_PIPE_VBLANK;
+ reg = GEN8_DE_PIPE_ISR(pipe);
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ status = DE_PIPE_VBLANK_IVB(pipe);
reg = DEISR;
} else {
- switch (pipe) {
- default:
- case PIPE_A:
- status = DE_PIPEA_VBLANK_IVB;
- break;
- case PIPE_B:
- status = DE_PIPEB_VBLANK_IVB;
- break;
- case PIPE_C:
- status = DE_PIPEC_VBLANK_IVB;
- break;
- }
-
+ status = DE_PIPE_VBLANK(pipe);
reg = DEISR;
}
- if (IS_GEN2(dev))
- return __raw_i915_read16(dev_priv, reg) & status;
- else
- return __raw_i915_read32(dev_priv, reg) & status;
+ return __raw_i915_read32(dev_priv, reg) & status;
}
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
- int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -698,6 +664,12 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
vbl_start = mode->crtc_vblank_start;
vbl_end = mode->crtc_vblank_end;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
+ vbl_end /= 2;
+ vtotal /= 2;
+ }
+
ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
/*
@@ -722,17 +694,63 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
- /*
- * The scanline counter increments at the leading edge
- * of hsync, ie. it completely misses the active portion
- * of the line. Fix up the counter at both edges of vblank
- * to get a more accurate picture whether we're in vblank
- * or not.
- */
- in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
- if ((in_vbl && position == vbl_start - 1) ||
- (!in_vbl && position == vbl_end - 1))
- position = (position + 1) % vtotal;
+ if (HAS_DDI(dev)) {
+ /*
+ * On HSW HDMI outputs there seems to be a 2 line
+ * difference, whereas eDP has the normal 1 line
+ * difference that earlier platforms have. External
+ * DP is unknown. For now just check for the 2 line
+ * difference case on all output types on HSW+.
+ *
+ * This might misinterpret the scanline counter being
+ * one line too far along on eDP, but that's less
+ * dangerous than the alternative since that would lead
+ * the vblank timestamp code astray when it sees a
+ * scanline count before vblank_start during a vblank
+ * interrupt.
+ */
+ in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
+ if ((in_vbl && (position == vbl_start - 2 ||
+ position == vbl_start - 1)) ||
+ (!in_vbl && (position == vbl_end - 2 ||
+ position == vbl_end - 1)))
+ position = (position + 2) % vtotal;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ /*
+ * The scanline counter increments at the leading edge
+ * of hsync, ie. it completely misses the active portion
+ * of the line. Fix up the counter at both edges of vblank
+ * to get a more accurate picture whether we're in vblank
+ * or not.
+ */
+ in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
+ if ((in_vbl && position == vbl_start - 1) ||
+ (!in_vbl && position == vbl_end - 1))
+ position = (position + 1) % vtotal;
+ } else {
+ /*
+ * ISR vblank status bits don't work the way we'd want
+ * them to work on non-PCH platforms (for
+ * ilk_pipe_in_vblank_locked()), and there doesn't
+ * appear any other way to determine if we're currently
+ * in vblank.
+ *
+ * Instead let's assume that we're already in vblank if
+ * we got called from the vblank interrupt and the
+ * scanline counter value indicates that we're on the
+ * line just prior to vblank start. This should result
+ * in the correct answer, unless the vblank interrupt
+ * delivery really got delayed for almost exactly one
+ * full frame/field.
+ */
+ if (flags & DRM_CALLED_FROM_VBLIRQ &&
+ position == vbl_start - 1) {
+ position = (position + 1) % vtotal;
+
+ /* Signal this correction as "applied". */
+ ret |= 0x8;
+ }
+ }
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
@@ -809,7 +827,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
- crtc);
+ crtc,
+ &to_intel_crtc(crtc)->config.adjusted_mode);
}
static bool intel_hpd_irq_event(struct drm_device *dev,
@@ -1015,10 +1034,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
- if (new_delay < (int)dev_priv->rps.min_delay)
- new_delay = dev_priv->rps.min_delay;
- if (new_delay > (int)dev_priv->rps.max_delay)
- new_delay = dev_priv->rps.max_delay;
+ new_delay = clamp_t(int, new_delay,
+ dev_priv->rps.min_delay, dev_priv->rps.max_delay);
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
if (IS_VALLEYVIEW(dev_priv->dev))
@@ -1235,9 +1252,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
- WARN(((hpd[i] & hotplug_trigger) &&
- dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
- "Received HPD interrupt although disabled\n");
+ WARN_ONCE(hpd[i] & hotplug_trigger &&
+ dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
+ "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
+ hotplug_trigger, i, hpd[i]);
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
@@ -1474,6 +1492,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
+ if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ dp_aux_irq_handler(dev);
+
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
@@ -1993,7 +2014,7 @@ static void i915_error_work_func(struct work_struct *work)
kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event);
} else {
- atomic_set(&error->reset_counter, I915_WEDGED);
+ atomic_set_mask(I915_WEDGED, &error->reset_counter);
}
/*
@@ -2761,10 +2782,9 @@ static void ibx_irq_postinstall(struct drm_device *dev)
return;
if (HAS_PCH_IBX(dev)) {
- mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
- SDE_TRANSA_FIFO_UNDER | SDE_POISON;
+ mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
} else {
- mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
+ mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
}
@@ -2824,20 +2844,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
- DE_ERR_INT_IVB);
+ DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
- DE_PIPEA_VBLANK_IVB);
+ DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A |
- DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
DE_POISON);
- extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
+ extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
+ DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
}
dev_priv->irq_mask = ~display_mask;
@@ -2953,9 +2972,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
GEN8_PIPE_CDCLK_CRC_DONE |
- GEN8_PIPE_FIFO_UNDERRUN |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
- uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
+ uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+ GEN8_PIPE_FIFO_UNDERRUN;
int pipe;
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
@@ -3140,10 +3159,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
* Returns true when a page flip has completed.
*/
static bool i8xx_handle_vblank(struct drm_device *dev,
- int pipe, u16 iir)
+ int plane, int pipe, u32 iir)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
+ u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
if (!drm_handle_vblank(dev, pipe))
return false;
@@ -3151,7 +3170,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if ((iir & flip_pending) == 0)
return false;
- intel_prepare_page_flip(dev, pipe);
+ intel_prepare_page_flip(dev, plane);
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
@@ -3220,9 +3239,13 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
notify_ring(dev, &dev_priv->ring[RCS]);
for_each_pipe(pipe) {
+ int plane = pipe;
+ if (HAS_FBC(dev))
+ plane = !plane;
+
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i8xx_handle_vblank(dev, pipe, iir))
- flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
+ i8xx_handle_vblank(dev, plane, pipe, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -3418,7 +3441,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
for_each_pipe(pipe) {
int plane = pipe;
- if (IS_MOBILE(dev))
+ if (HAS_FBC(dev))
plane = !plane;
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
@@ -3655,7 +3678,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
hotplug_status);
intel_hpd_irq_handler(dev, hotplug_trigger,
- IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
+ IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
+
+ if (IS_G4X(dev) &&
+ (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
+ dp_aux_irq_handler(dev);
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
@@ -3893,8 +3920,8 @@ void hsw_pc8_disable_interrupts(struct drm_device *dev)
dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
- ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
- ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
+ ironlake_disable_display_irq(dev_priv, 0xffffffff);
+ ibx_disable_display_interrupt(dev_priv, 0xffffffff);
ilk_disable_gt_irq(dev_priv, 0xffffffff);
snb_disable_pm_irq(dev_priv, 0xffffffff);
@@ -3908,34 +3935,26 @@ void hsw_pc8_restore_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- uint32_t val, expected;
+ uint32_t val;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
val = I915_READ(DEIMR);
- expected = ~DE_PCH_EVENT_IVB;
- WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
+ WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
- val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
- expected = ~SDE_HOTPLUG_MASK_CPT;
- WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
- val, expected);
+ val = I915_READ(SDEIMR);
+ WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
val = I915_READ(GTIMR);
- expected = 0xffffffff;
- WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
+ WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
val = I915_READ(GEN6_PMIMR);
- expected = 0xffffffff;
- WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
- expected);
+ WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
dev_priv->pc8.irqs_disabled = false;
ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
- ibx_enable_display_interrupt(dev_priv,
- ~dev_priv->pc8.regsave.sdeimr &
- ~SDE_HOTPLUG_MASK_CPT);
+ ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ee2742122a02..a48b7cad6f11 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -193,10 +193,13 @@
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
+#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
+#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
+#define MI_ARB_ENABLE (1<<0)
+#define MI_ARB_DISABLE (0<<0)
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
-#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
@@ -212,10 +215,24 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
-#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
-#define MI_ARB_ENABLE (1<<0)
-#define MI_ARB_DISABLE (0<<0)
-
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
+#define MI_SEMAPHORE_UPDATE (1<<21)
+#define MI_SEMAPHORE_COMPARE (1<<20)
+#define MI_SEMAPHORE_REGISTER (1<<18)
+#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
+#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
+#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
+#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
+#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
+#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
+#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
+#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
+#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
+#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
+#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
+#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
+#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -235,7 +252,7 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
-#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
+#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
@@ -246,30 +263,13 @@
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
-#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_PPGTT_HSW (1<<8)
-#define MI_BATCH_NON_SECURE_HSW (1<<13)
+#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
-#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
-#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
-#define MI_SEMAPHORE_UPDATE (1<<21)
-#define MI_SEMAPHORE_COMPARE (1<<20)
-#define MI_SEMAPHORE_REGISTER (1<<18)
-#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
-#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
-#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
-#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
-#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
-#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
-#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
-#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
-#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
-#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
-#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
-#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
-#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
+
#define MI_PREDICATE_RESULT_2 (0x2214)
#define LOWER_SLICE_ENABLED (1<<0)
@@ -354,6 +354,7 @@
#define IOSF_BYTE_ENABLES_SHIFT 4
#define IOSF_BAR_SHIFT 1
#define IOSF_SB_BUSY (1<<0)
+#define IOSF_PORT_BUNIT 0x3
#define IOSF_PORT_PUNIT 0x4
#define IOSF_PORT_NC 0x11
#define IOSF_PORT_DPIO 0x12
@@ -361,12 +362,21 @@
#define IOSF_PORT_CCK 0x14
#define IOSF_PORT_CCU 0xA9
#define IOSF_PORT_GPS_CORE 0x48
+#define IOSF_PORT_FLISDSI 0x1B
#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
+/* See configdb bunit SB addr map */
+#define BUNIT_REG_BISOC 0x11
+
#define PUNIT_OPCODE_REG_READ 6
#define PUNIT_OPCODE_REG_WRITE 7
+#define PUNIT_REG_DSPFREQ 0x36
+#define DSPFREQSTAT_SHIFT 30
+#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
+#define DSPFREQGUAR_SHIFT 14
+#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_CLK_GATE 1
@@ -429,6 +439,7 @@
#define DSI_PLL_N1_DIV_MASK (3 << 16)
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
+#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
/*
* DPIO - a special bus for various display related registers to hide behind
@@ -447,15 +458,13 @@
#define DPIO_SFR_BYPASS (1<<1)
#define DPIO_CMNRST (1<<0)
-#define _DPIO_TX3_SWING_CTL4_A 0x690
-#define _DPIO_TX3_SWING_CTL4_B 0x2a90
-#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
- _DPIO_TX3_SWING_CTL4_B)
+#define DPIO_PHY(pipe) ((pipe) >> 1)
+#define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy])
/*
* Per pipe/PLL DPIO regs
*/
-#define _DPIO_DIV_A 0x800c
+#define _VLV_PLL_DW3_CH0 0x800c
#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
#define DPIO_POST_DIV_DAC 0
#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */
@@ -468,10 +477,10 @@
#define DPIO_ENABLE_CALIBRATION (1<<11)
#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
#define DPIO_M2DIV_MASK 0xff
-#define _DPIO_DIV_B 0x802c
-#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+#define _VLV_PLL_DW3_CH1 0x802c
+#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1)
-#define _DPIO_REFSFR_A 0x8014
+#define _VLV_PLL_DW5_CH0 0x8014
#define DPIO_REFSEL_OVERRIDE 27
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
@@ -479,118 +488,112 @@
#define DPIO_PLL_REFCLK_SEL_MASK 3
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
-#define _DPIO_REFSFR_B 0x8034
-#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+#define _VLV_PLL_DW5_CH1 0x8034
+#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1)
-#define _DPIO_CORE_CLK_A 0x801c
-#define _DPIO_CORE_CLK_B 0x803c
-#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+#define _VLV_PLL_DW7_CH0 0x801c
+#define _VLV_PLL_DW7_CH1 0x803c
+#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1)
-#define _DPIO_IREF_CTL_A 0x8040
-#define _DPIO_IREF_CTL_B 0x8060
-#define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B)
+#define _VLV_PLL_DW8_CH0 0x8040
+#define _VLV_PLL_DW8_CH1 0x8060
+#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1)
-#define DPIO_IREF_BCAST 0xc044
-#define _DPIO_IREF_A 0x8044
-#define _DPIO_IREF_B 0x8064
-#define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B)
+#define VLV_PLL_DW9_BCAST 0xc044
+#define _VLV_PLL_DW9_CH0 0x8044
+#define _VLV_PLL_DW9_CH1 0x8064
+#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1)
-#define _DPIO_PLL_CML_A 0x804c
-#define _DPIO_PLL_CML_B 0x806c
-#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
+#define _VLV_PLL_DW10_CH0 0x8048
+#define _VLV_PLL_DW10_CH1 0x8068
+#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1)
-#define _DPIO_LPF_COEFF_A 0x8048
-#define _DPIO_LPF_COEFF_B 0x8068
-#define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B)
+#define _VLV_PLL_DW11_CH0 0x804c
+#define _VLV_PLL_DW11_CH1 0x806c
+#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1)
-#define DPIO_CALIBRATION 0x80ac
+/* Spec for ref block start counts at DW10 */
+#define VLV_REF_DW13 0x80ac
-#define DPIO_FASTCLK_DISABLE 0x8100
+#define VLV_CMN_DW0 0x8100
/*
* Per DDI channel DPIO regs
*/
-#define _DPIO_PCS_TX_0 0x8200
-#define _DPIO_PCS_TX_1 0x8400
+#define _VLV_PCS_DW0_CH0 0x8200
+#define _VLV_PCS_DW0_CH1 0x8400
#define DPIO_PCS_TX_LANE2_RESET (1<<16)
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
-#define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1)
+#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
-#define _DPIO_PCS_CLK_0 0x8204
-#define _DPIO_PCS_CLK_1 0x8404
+#define _VLV_PCS_DW1_CH0 0x8204
+#define _VLV_PCS_DW1_CH1 0x8404
#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
-#define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1)
-
-#define _DPIO_PCS_CTL_OVR1_A 0x8224
-#define _DPIO_PCS_CTL_OVR1_B 0x8424
-#define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \
- _DPIO_PCS_CTL_OVR1_B)
-
-#define _DPIO_PCS_STAGGER0_A 0x822c
-#define _DPIO_PCS_STAGGER0_B 0x842c
-#define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \
- _DPIO_PCS_STAGGER0_B)
-
-#define _DPIO_PCS_STAGGER1_A 0x8230
-#define _DPIO_PCS_STAGGER1_B 0x8430
-#define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \
- _DPIO_PCS_STAGGER1_B)
-
-#define _DPIO_PCS_CLOCKBUF0_A 0x8238
-#define _DPIO_PCS_CLOCKBUF0_B 0x8438
-#define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \
- _DPIO_PCS_CLOCKBUF0_B)
-
-#define _DPIO_PCS_CLOCKBUF8_A 0x825c
-#define _DPIO_PCS_CLOCKBUF8_B 0x845c
-#define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \
- _DPIO_PCS_CLOCKBUF8_B)
-
-#define _DPIO_TX_SWING_CTL2_A 0x8288
-#define _DPIO_TX_SWING_CTL2_B 0x8488
-#define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \
- _DPIO_TX_SWING_CTL2_B)
-
-#define _DPIO_TX_SWING_CTL3_A 0x828c
-#define _DPIO_TX_SWING_CTL3_B 0x848c
-#define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \
- _DPIO_TX_SWING_CTL3_B)
-
-#define _DPIO_TX_SWING_CTL4_A 0x8290
-#define _DPIO_TX_SWING_CTL4_B 0x8490
-#define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \
- _DPIO_TX_SWING_CTL4_B)
-
-#define _DPIO_TX_OCALINIT_0 0x8294
-#define _DPIO_TX_OCALINIT_1 0x8494
+#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
+
+#define _VLV_PCS_DW8_CH0 0x8220
+#define _VLV_PCS_DW8_CH1 0x8420
+#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
+
+#define _VLV_PCS01_DW8_CH0 0x0220
+#define _VLV_PCS23_DW8_CH0 0x0420
+#define _VLV_PCS01_DW8_CH1 0x2620
+#define _VLV_PCS23_DW8_CH1 0x2820
+#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1)
+#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1)
+
+#define _VLV_PCS_DW9_CH0 0x8224
+#define _VLV_PCS_DW9_CH1 0x8424
+#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
+
+#define _VLV_PCS_DW11_CH0 0x822c
+#define _VLV_PCS_DW11_CH1 0x842c
+#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
+
+#define _VLV_PCS_DW12_CH0 0x8230
+#define _VLV_PCS_DW12_CH1 0x8430
+#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
+
+#define _VLV_PCS_DW14_CH0 0x8238
+#define _VLV_PCS_DW14_CH1 0x8438
+#define VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1)
+
+#define _VLV_PCS_DW23_CH0 0x825c
+#define _VLV_PCS_DW23_CH1 0x845c
+#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1)
+
+#define _VLV_TX_DW2_CH0 0x8288
+#define _VLV_TX_DW2_CH1 0x8488
+#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
+
+#define _VLV_TX_DW3_CH0 0x828c
+#define _VLV_TX_DW3_CH1 0x848c
+#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
+
+#define _VLV_TX_DW4_CH0 0x8290
+#define _VLV_TX_DW4_CH1 0x8490
+#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
+
+#define _VLV_TX3_DW4_CH0 0x690
+#define _VLV_TX3_DW4_CH1 0x2a90
+#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1)
+
+#define _VLV_TX_DW5_CH0 0x8294
+#define _VLV_TX_DW5_CH1 0x8494
#define DPIO_TX_OCALINIT_EN (1<<31)
-#define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \
- _DPIO_TX_OCALINIT_1)
-
-#define _DPIO_TX_CTL_0 0x82ac
-#define _DPIO_TX_CTL_1 0x84ac
-#define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1)
-
-#define _DPIO_TX_LANE_0 0x82b8
-#define _DPIO_TX_LANE_1 0x84b8
-#define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1)
-
-#define _DPIO_DATA_CHANNEL1 0x8220
-#define _DPIO_DATA_CHANNEL2 0x8420
-#define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2)
-
-#define _DPIO_PORT0_PCS0 0x0220
-#define _DPIO_PORT0_PCS1 0x0420
-#define _DPIO_PORT1_PCS2 0x2620
-#define _DPIO_PORT1_PCS3 0x2820
-#define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2)
-#define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3)
-#define DPIO_DATA_CHANNEL1 0x8220
-#define DPIO_DATA_CHANNEL2 0x8420
+#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
+
+#define _VLV_TX_DW11_CH0 0x82ac
+#define _VLV_TX_DW11_CH1 0x84ac
+#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1)
+
+#define _VLV_TX_DW14_CH0 0x82b8
+#define _VLV_TX_DW14_CH1 0x84b8
+#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
/*
* Fence registers
@@ -732,6 +735,8 @@
#define HWSTAM 0x02098
#define DMA_FADD_I8XX 0x020d0
#define RING_BBSTATE(base) ((base)+0x110)
+#define RING_BBADDR(base) ((base)+0x140)
+#define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */
#define ERROR_GEN6 0x040a0
#define GEN7_ERR_INT 0x44040
@@ -922,7 +927,6 @@
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
-#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 0x101008
#define GFX_FLSH_CNTL_EN (1<<0)
@@ -999,6 +1003,7 @@
#define GEN7_FF_THREAD_MODE 0x20a0
#define GEN7_FF_SCHED_MASK 0x0077070
+#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
@@ -1026,14 +1031,14 @@
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
#define FBC_CTL_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
-#define FBC_CTL_FENCENO (1<<0)
+#define FBC_CTL_FENCENO_SHIFT (0)
#define FBC_COMMAND 0x0320c
#define FBC_CMD_COMPRESS (1<<0)
#define FBC_STATUS 0x03210
#define FBC_STAT_COMPRESSING (1<<31)
#define FBC_STAT_COMPRESSED (1<<30)
#define FBC_STAT_MODIFIED (1<<29)
-#define FBC_STAT_CURRENT_LINE (1<<0)
+#define FBC_STAT_CURRENT_LINE_SHIFT (0)
#define FBC_CONTROL2 0x03214
#define FBC_CTL_FENCE_DBL (0<<4)
#define FBC_CTL_IDLE_IMM (0<<2)
@@ -2117,9 +2122,13 @@
* Please check the detailed lore in the commit message for for experimental
* evidence.
*/
-#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
-#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
+/* VLV DP/HDMI bits again match Bspec */
+#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
+#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
@@ -2130,6 +2139,11 @@
#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
+#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
+#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
+#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
+
/* SDVO is different across gen3/4 */
#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
@@ -3421,42 +3435,6 @@
/* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f
-/* define the fifo size on Ironlake */
-#define ILK_DISPLAY_FIFO 128
-#define ILK_DISPLAY_MAXWM 64
-#define ILK_DISPLAY_DFTWM 8
-#define ILK_CURSOR_FIFO 32
-#define ILK_CURSOR_MAXWM 16
-#define ILK_CURSOR_DFTWM 8
-
-#define ILK_DISPLAY_SR_FIFO 512
-#define ILK_DISPLAY_MAX_SRWM 0x1ff
-#define ILK_DISPLAY_DFT_SRWM 0x3f
-#define ILK_CURSOR_SR_FIFO 64
-#define ILK_CURSOR_MAX_SRWM 0x3f
-#define ILK_CURSOR_DFT_SRWM 8
-
-#define ILK_FIFO_LINE_SIZE 64
-
-/* define the WM info on Sandybridge */
-#define SNB_DISPLAY_FIFO 128
-#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
-#define SNB_DISPLAY_DFTWM 8
-#define SNB_CURSOR_FIFO 32
-#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
-#define SNB_CURSOR_DFTWM 8
-
-#define SNB_DISPLAY_SR_FIFO 512
-#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
-#define SNB_DISPLAY_DFT_SRWM 0x3f
-#define SNB_CURSOR_SR_FIFO 64
-#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
-#define SNB_CURSOR_DFT_SRWM 8
-
-#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
-
-#define SNB_FIFO_LINE_SIZE 64
-
/* the address where we get all kinds of latency value */
#define SSKPD 0x5d10
@@ -3600,8 +3578,6 @@
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
-#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
- (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
/* VBIOS flags */
#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
@@ -3787,7 +3763,7 @@
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
#define SP_ENABLE (1<<31)
-#define SP_GEAMMA_ENABLE (1<<30)
+#define SP_GAMMA_ENABLE (1<<30)
#define SP_PIXFORMAT_MASK (0xf<<26)
#define SP_FORMAT_YUV422 (0<<26)
#define SP_FORMAT_BGR565 (5<<26)
@@ -4139,6 +4115,8 @@
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
+#define DISP_ARB_CTL2 0x45004
+#define DISP_DATA_PARTITION_5_6 (1<<6)
#define GEN7_MSG_CTL 0x45010
#define WAIT_FOR_PCH_RESET_ACK (1<<1)
#define WAIT_FOR_PCH_FLR_ACK (1<<0)
@@ -4159,6 +4137,10 @@
#define GEN7_L3SQCREG4 0xb034
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+/* GEN8 chicken */
+#define HDC_CHICKEN0 0x7300
+#define HDC_FORCE_NON_COHERENT (1<<4)
+
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
@@ -4843,6 +4825,8 @@
#define FORCEWAKE_ACK 0x130090
#define VLV_GTLC_WAKE_CTRL 0x130090
#define VLV_GTLC_PW_STATUS 0x130094
+#define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80
+#define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
@@ -4851,12 +4835,16 @@
#define FORCEWAKE_MT_ENABLE (1<<5)
#define GTFIFODBG 0x120000
-#define GT_FIFO_CPU_ERROR_MASK 7
+#define GT_FIFO_SBDROPERR (1<<6)
+#define GT_FIFO_BLOBDROPERR (1<<5)
+#define GT_FIFO_SB_READ_ABORTERR (1<<4)
+#define GT_FIFO_DROPERR (1<<3)
#define GT_FIFO_OVFERR (1<<2)
#define GT_FIFO_IAWRERR (1<<1)
#define GT_FIFO_IARDERR (1<<0)
-#define GT_FIFO_FREE_ENTRIES 0x120008
+#define GTFIFOCTL 0x120008
+#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
#define HSW_IDICR 0x9008
@@ -4890,6 +4878,7 @@
#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
+#define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24)
#define GEN7_RC_CTL_TO_MODE (1<<28)
#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
#define GEN6_RC_CTL_HW_ENABLE (1<<31)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 98790c7cccb1..8150fdc08d49 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -192,7 +192,6 @@ static void i915_restore_vga(struct drm_device *dev)
static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
/* Display arbitration control */
if (INTEL_INFO(dev)->gen <= 4)
@@ -203,46 +202,27 @@ static void i915_save_display(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_save_display_reg(dev);
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->regfile.saveBLC_PWM_CTL =
- I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
dev_priv->regfile.saveBLC_HIST_CTL =
I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
- dev_priv->regfile.saveBLC_PWM_CTL2 =
- I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
- dev_priv->regfile.saveBLC_PWM_CTL_B =
- I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
dev_priv->regfile.saveBLC_HIST_CTL_B =
I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
- dev_priv->regfile.saveBLC_PWM_CTL2_B =
- I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
} else {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->regfile.saveLVDS = I915_READ(LVDS);
}
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
@@ -257,7 +237,7 @@ static void i915_save_display(struct drm_device *dev)
}
/* Only regfile.save FBC state on the platform that supports FBC */
- if (I915_HAS_FBC(dev)) {
+ if (HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
} else if (IS_GM45(dev)) {
@@ -278,7 +258,6 @@ static void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 mask = 0xffffffff;
- unsigned long flags;
/* Display arbitration */
if (INTEL_INFO(dev)->gen <= 4)
@@ -287,12 +266,6 @@ static void i915_restore_display(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_restore_display_reg(dev);
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
- /* LVDS state */
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
-
if (drm_core_check_feature(dev, DRIVER_MODESET))
mask = ~LVDS_PORT_EN;
@@ -305,13 +278,6 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
- /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
- * otherwise we get blank eDP screen after S3 on some machines
- */
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -319,21 +285,12 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(RSTDBYCTL,
dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else if (IS_VALLEYVIEW(dev)) {
- I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
- dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
dev_priv->regfile.saveBLC_HIST_CTL);
- I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
- dev_priv->regfile.saveBLC_PWM_CTL2);
- I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
- dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
dev_priv->regfile.saveBLC_HIST_CTL);
- I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
- dev_priv->regfile.saveBLC_PWM_CTL2);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
@@ -341,11 +298,9 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
}
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
- if (I915_HAS_FBC(dev)) {
+ if (HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index cef38fd320a7..33bcae314bf8 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -40,10 +40,13 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
struct drm_i915_private *dev_priv = dev->dev_private;
u64 raw_time; /* 32b value may overflow during fixed point math */
u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
+ u32 ret;
if (!intel_enable_rc6(dev))
return 0;
+ intel_runtime_pm_get(dev_priv);
+
/* On VLV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
u32 clkctl2;
@@ -52,7 +55,8 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
CLK_CTL2_CZCOUNT_30NS_SHIFT;
if (!clkctl2) {
WARN(!clkctl2, "bogus CZ count value");
- return 0;
+ ret = 0;
+ goto out;
}
units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
@@ -62,7 +66,11 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
}
raw_time = I915_READ(reg) * units;
- return DIV_ROUND_UP_ULL(raw_time, div);
+ ret = DIV_ROUND_UP_ULL(raw_time, div);
+
+out:
+ intel_runtime_pm_put(dev_priv);
+ return ret;
}
static ssize_t
@@ -183,13 +191,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
int slice = (int)(uintptr_t)attr->private;
int ret;
+ if (!HAS_HW_CONTEXTS(drm_dev))
+ return -ENXIO;
+
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
- if (dev_priv->hw_contexts_disabled)
- return -ENXIO;
-
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
@@ -259,7 +267,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
+ ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
}
@@ -276,8 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private;
return snprintf(buf, PAGE_SIZE, "%d\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.rpe_delay));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -291,7 +298,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
+ ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
else
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -318,7 +325,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
- val = vlv_freq_opcode(dev_priv->mem_freq, val);
+ val = vlv_freq_opcode(dev_priv, val);
hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
@@ -342,15 +349,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
DRM_DEBUG("User requested overclocking to %d\n",
val * GT_FREQUENCY_MULTIPLIER);
+ dev_priv->rps.max_delay = val;
+
if (dev_priv->rps.cur_delay > val) {
- if (IS_VALLEYVIEW(dev_priv->dev))
- valleyview_set_rps(dev_priv->dev, val);
+ if (IS_VALLEYVIEW(dev))
+ valleyview_set_rps(dev, val);
else
- gen6_set_rps(dev_priv->dev, val);
+ gen6_set_rps(dev, val);
}
- dev_priv->rps.max_delay = val;
-
mutex_unlock(&dev_priv->rps.hw_lock);
return count;
@@ -367,7 +374,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
- ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
+ ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
else
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -394,7 +401,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev)) {
- val = vlv_freq_opcode(dev_priv->mem_freq, val);
+ val = vlv_freq_opcode(dev_priv, val);
hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
@@ -411,15 +418,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
return -EINVAL;
}
+ dev_priv->rps.min_delay = val;
+
if (dev_priv->rps.cur_delay < val) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
- gen6_set_rps(dev_priv->dev, val);
+ gen6_set_rps(dev, val);
}
- dev_priv->rps.min_delay = val;
-
mutex_unlock(&dev_priv->rps.hw_lock);
return count;
@@ -449,7 +456,9 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
if (attr == &dev_attr_gt_RP0_freq_mhz) {
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index 967da4772c44..caa18e855815 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -270,6 +270,18 @@ void i915_save_display_reg(struct drm_device *dev)
}
/* FIXME: regfile.save TV & SDVO state */
+ /* Backlight */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ } else {
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ }
+
return;
}
@@ -280,6 +292,21 @@ void i915_restore_display_reg(struct drm_device *dev)
int dpll_b_reg, fpb0_reg, fpb1_reg;
int i;
+ /* Backlight */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+ * otherwise we get blank eDP screen after S3 on some machines
+ */
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+ } else {
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ }
+
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index dfff0907f70e..d96eee1ae9c5 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -6,14 +6,10 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/vga_switcheroo.h>
-#include <acpi/acpi_drivers.h>
-
#include <drm/drmP.h>
#include "i915_drv.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
-
-#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
static struct intel_dsm_priv {
@@ -28,61 +24,6 @@ static const u8 intel_dsm_guid[] = {
0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
};
-static int intel_dsm(acpi_handle handle, int func)
-{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_object_list input;
- union acpi_object params[4];
- union acpi_object *obj;
- u32 result;
- int ret = 0;
-
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(intel_dsm_guid);
- params[0].buffer.pointer = (char *)intel_dsm_guid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = INTEL_DSM_REVISION_ID;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
-
- ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
- if (ret) {
- DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
- return ret;
- }
-
- obj = (union acpi_object *)output.pointer;
-
- result = 0;
- switch (obj->type) {
- case ACPI_TYPE_INTEGER:
- result = obj->integer.value;
- break;
-
- case ACPI_TYPE_BUFFER:
- if (obj->buffer.length == 4) {
- result = (obj->buffer.pointer[0] |
- (obj->buffer.pointer[1] << 8) |
- (obj->buffer.pointer[2] << 16) |
- (obj->buffer.pointer[3] << 24));
- break;
- }
- default:
- ret = -EINVAL;
- break;
- }
- if (result == 0x80000002)
- ret = -ENODEV;
-
- kfree(output.pointer);
- return ret;
-}
-
static char *intel_dsm_port_name(u8 id)
{
switch (id) {
@@ -137,83 +78,56 @@ static char *intel_dsm_mux_type(u8 type)
static void intel_dsm_platform_mux_info(void)
{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_object_list input;
- union acpi_object params[4];
- union acpi_object *pkg;
- int i, ret;
-
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(intel_dsm_guid);
- params[0].buffer.pointer = (char *)intel_dsm_guid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = INTEL_DSM_REVISION_ID;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
-
- ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
- &output);
- if (ret) {
- DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
- goto out;
+ int i;
+ union acpi_object *pkg, *connector_count;
+
+ pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid,
+ INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!pkg) {
+ DRM_DEBUG_DRIVER("failed to evaluate _DSM\n");
+ return;
}
- pkg = (union acpi_object *)output.pointer;
-
- if (pkg->type == ACPI_TYPE_PACKAGE) {
- union acpi_object *connector_count = &pkg->package.elements[0];
- DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
- (unsigned long long)connector_count->integer.value);
- for (i = 1; i < pkg->package.count; i++) {
- union acpi_object *obj = &pkg->package.elements[i];
- union acpi_object *connector_id =
- &obj->package.elements[0];
- union acpi_object *info = &obj->package.elements[1];
- DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
- (unsigned long long)connector_id->integer.value);
- DRM_DEBUG_DRIVER(" port id: %s\n",
- intel_dsm_port_name(info->buffer.pointer[0]));
- DRM_DEBUG_DRIVER(" display mux info: %s\n",
- intel_dsm_mux_type(info->buffer.pointer[1]));
- DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
- intel_dsm_mux_type(info->buffer.pointer[2]));
- DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
- intel_dsm_mux_type(info->buffer.pointer[3]));
- }
+ connector_count = &pkg->package.elements[0];
+ DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+ (unsigned long long)connector_count->integer.value);
+ for (i = 1; i < pkg->package.count; i++) {
+ union acpi_object *obj = &pkg->package.elements[i];
+ union acpi_object *connector_id = &obj->package.elements[0];
+ union acpi_object *info = &obj->package.elements[1];
+ DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+ (unsigned long long)connector_id->integer.value);
+ DRM_DEBUG_DRIVER(" port id: %s\n",
+ intel_dsm_port_name(info->buffer.pointer[0]));
+ DRM_DEBUG_DRIVER(" display mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[1]));
+ DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[2]));
+ DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[3]));
}
-out:
- kfree(output.pointer);
+ ACPI_FREE(pkg);
}
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle;
- int ret;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
- if (!acpi_has_method(dhandle, "_DSM")) {
+ if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID,
+ 1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
return false;
}
- ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS);
- if (ret < 0) {
- DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
- return false;
- }
-
intel_dsm_priv.dhandle = dhandle;
-
intel_dsm_platform_mux_info();
+
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index e4fba39631a5..f22041973f3a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -281,6 +281,34 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
}
}
+static void
+parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ const struct bdb_lfp_backlight_data *backlight_data;
+ const struct bdb_lfp_backlight_data_entry *entry;
+
+ backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
+ if (!backlight_data)
+ return;
+
+ if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
+ DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
+ backlight_data->entry_size);
+ return;
+ }
+
+ entry = &backlight_data->data[panel_type];
+
+ dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+ dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+ DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
+ "active %s, min brightness %u, level %u\n",
+ dev_priv->vbt.backlight.pwm_freq_hz,
+ dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
+ entry->min_brightness,
+ backlight_data->level[panel_type]);
+}
+
/* Try to find sdvo panel data */
static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
@@ -327,12 +355,12 @@ static int intel_bios_ssc_frequency(struct drm_device *dev,
{
switch (INTEL_INFO(dev)->gen) {
case 2:
- return alternate ? 66 : 48;
+ return alternate ? 66667 : 48000;
case 3:
case 4:
- return alternate ? 100 : 96;
+ return alternate ? 100000 : 96000;
default:
- return alternate ? 100 : 120;
+ return alternate ? 100000 : 120000;
}
}
@@ -796,7 +824,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
*/
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
!HAS_PCH_SPLIT(dev));
- DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
+ DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
struct ddi_vbt_port_info *info =
@@ -894,6 +922,7 @@ intel_parse_bios(struct drm_device *dev)
parse_general_features(dev_priv, bdb);
parse_general_definitions(dev_priv, bdb);
parse_lfp_panel_data(dev_priv, bdb);
+ parse_lfp_backlight(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_sdvo_device_mapping(dev_priv, bdb);
parse_device_mapping(dev_priv, bdb);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index f580a2b0ddd3..282de5e9f39d 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -39,7 +39,7 @@ struct vbt_header {
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
-} __attribute__((packed));
+} __packed;
struct bdb_header {
u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
@@ -65,7 +65,7 @@ struct vbios_data {
u8 rsvd4; /* popup memory size */
u8 resize_pci_bios;
u8 rsvd5; /* is crt already on ddc2 */
-} __attribute__((packed));
+} __packed;
/*
* There are several types of BIOS data blocks (BDBs), each block has
@@ -142,7 +142,7 @@ struct bdb_general_features {
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
u8 rsvd11:3; /* finish byte */
-} __attribute__((packed));
+} __packed;
/* pre-915 */
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
@@ -225,7 +225,7 @@ struct old_child_dev_config {
u8 dvo2_wiring;
u16 extended_type;
u8 dvo_function;
-} __attribute__((packed));
+} __packed;
/* This one contains field offsets that are known to be common for all BDB
* versions. Notice that the meaning of the contents contents may still change,
@@ -238,7 +238,7 @@ struct common_child_dev_config {
u8 not_common2[2];
u8 ddc_pin;
u16 edid_ptr;
-} __attribute__((packed));
+} __packed;
/* This field changes depending on the BDB version, so the most reliable way to
* read it is by checking the BDB version and reading the raw pointer. */
@@ -279,7 +279,7 @@ struct bdb_general_definitions {
* sizeof(child_device_config);
*/
union child_device_config devices[0];
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_options {
u8 panel_type;
@@ -293,7 +293,7 @@ struct bdb_lvds_options {
u8 lvds_edid:1;
u8 rsvd2:1;
u8 rsvd4;
-} __attribute__((packed));
+} __packed;
/* LFP pointer table contains entries to the struct below */
struct bdb_lvds_lfp_data_ptr {
@@ -303,12 +303,12 @@ struct bdb_lvds_lfp_data_ptr {
u8 dvo_table_size;
u16 panel_pnp_id_offset;
u8 pnp_table_size;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
struct bdb_lvds_lfp_data_ptr ptr[16];
-} __attribute__((packed));
+} __packed;
/* LFP data has 3 blocks per entry */
struct lvds_fp_timing {
@@ -325,7 +325,7 @@ struct lvds_fp_timing {
u32 pfit_reg;
u32 pfit_reg_val;
u16 terminator;
-} __attribute__((packed));
+} __packed;
struct lvds_dvo_timing {
u16 clock; /**< In 10khz */
@@ -353,7 +353,7 @@ struct lvds_dvo_timing {
u8 vsync_positive:1;
u8 hsync_positive:1;
u8 rsvd2:1;
-} __attribute__((packed));
+} __packed;
struct lvds_pnp_id {
u16 mfg_name;
@@ -361,17 +361,33 @@ struct lvds_pnp_id {
u32 serial;
u8 mfg_week;
u8 mfg_year;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_entry {
struct lvds_fp_timing fp_timing;
struct lvds_dvo_timing dvo_timing;
struct lvds_pnp_id pnp_id;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data {
struct bdb_lvds_lfp_data_entry data[16];
-} __attribute__((packed));
+} __packed;
+
+struct bdb_lfp_backlight_data_entry {
+ u8 type:2;
+ u8 active_low_pwm:1;
+ u8 obsolete1:5;
+ u16 pwm_freq_hz;
+ u8 min_brightness;
+ u8 obsolete2;
+ u8 obsolete3;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+ u8 entry_size;
+ struct bdb_lfp_backlight_data_entry data[16];
+ u8 level[16];
+} __packed;
struct aimdb_header {
char signature[16];
@@ -379,12 +395,12 @@ struct aimdb_header {
u16 aimdb_version;
u16 aimdb_header_size;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct aimdb_block {
u8 aimdb_id;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct vch_panel_data {
u16 fp_timing_offset;
@@ -395,12 +411,12 @@ struct vch_panel_data {
u8 text_fitting_size;
u16 graphics_fitting_offset;
u8 graphics_fitting_size;
-} __attribute__((packed));
+} __packed;
struct vch_bdb_22 {
struct aimdb_block aimdb_block;
struct vch_panel_data panels[16];
-} __attribute__((packed));
+} __packed;
struct bdb_sdvo_lvds_options {
u8 panel_backlight;
@@ -416,7 +432,7 @@ struct bdb_sdvo_lvds_options {
u8 panel_misc_bits_2;
u8 panel_misc_bits_3;
u8 panel_misc_bits_4;
-} __attribute__((packed));
+} __packed;
#define BDB_DRIVER_FEATURE_NO_LVDS 0
@@ -462,7 +478,7 @@ struct bdb_driver_features {
u8 hdmi_termination;
u8 custom_vbt_version;
-} __attribute__((packed));
+} __packed;
#define EDP_18BPP 0
#define EDP_24BPP 1
@@ -487,14 +503,14 @@ struct edp_power_seq {
u16 t9;
u16 t10;
u16 t11_t12;
-} __attribute__ ((packed));
+} __packed;
struct edp_link_params {
u8 rate:4;
u8 lanes:4;
u8 preemphasis:4;
u8 vswing:4;
-} __attribute__ ((packed));
+} __packed;
struct bdb_edp {
struct edp_power_seq power_seqs[16];
@@ -505,7 +521,7 @@ struct bdb_edp {
/* ith bit indicates enabled/disabled for (i+1)th panel */
u16 edp_s3d_feature;
u16 edp_t3_optimization;
-} __attribute__ ((packed));
+} __packed;
void intel_setup_bios(struct drm_device *dev);
int intel_parse_bios(struct drm_device *dev);
@@ -733,6 +749,6 @@ struct bdb_mipi {
u32 hl_switch_cnt;
u32 lp_byte_clk;
u32 clk_lane_switch_cnt;
-} __attribute__((packed));
+} __packed;
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b5b1b9b23adf..e2e39e65f109 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -222,8 +222,9 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
intel_modeset_check_state(connector->dev);
}
-static int intel_crt_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b69dc3e66c16..234ac5f7bc5a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -73,7 +73,7 @@ static const u32 hsw_ddi_translations_hdmi[] = {
};
static const u32 bdw_ddi_translations_edp[] = {
- 0x00FFFFFF, 0x00000012, /* DP parameters */
+ 0x00FFFFFF, 0x00000012, /* eDP parameters */
0x00EBAFFF, 0x00020011,
0x00C71FFF, 0x0006000F,
0x00FFFFFF, 0x00020011,
@@ -696,25 +696,25 @@ intel_ddi_calculate_wrpll(int clock /* in Hz */,
*n2_out = best.n2;
*p_out = best.p;
*r2_out = best.r2;
-
- DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n",
- clock, *p_out, *n2_out, *r2_out);
}
-bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
+/*
+ * Tries to find a PLL for the CRTC. If it finds, it increases the refcount and
+ * stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to
+ * steal the selected PLL. You need to call intel_ddi_pll_enable to actually
+ * enable the PLL.
+ */
+bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int type = intel_encoder->type;
enum pipe pipe = intel_crtc->pipe;
- uint32_t reg, val;
int clock = intel_crtc->config.port_clock;
- /* TODO: reuse PLLs when possible (compare values) */
-
intel_ddi_put_crtc_pll(crtc);
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
@@ -736,66 +736,145 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
return false;
}
- /* We don't need to turn any PLL on because we'll use LCPLL. */
- return true;
-
} else if (type == INTEL_OUTPUT_HDMI) {
+ uint32_t reg, val;
unsigned p, n2, r2;
- if (plls->wrpll1_refcount == 0) {
+ intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ if (val == I915_READ(WRPLL_CTL1)) {
+ DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
+ pipe_name(pipe));
+ reg = WRPLL_CTL1;
+ } else if (val == I915_READ(WRPLL_CTL2)) {
+ DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
+ pipe_name(pipe));
+ reg = WRPLL_CTL2;
+ } else if (plls->wrpll1_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
pipe_name(pipe));
- plls->wrpll1_refcount++;
reg = WRPLL_CTL1;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
} else if (plls->wrpll2_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
pipe_name(pipe));
- plls->wrpll2_refcount++;
reg = WRPLL_CTL2;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
} else {
DRM_ERROR("No WRPLLs available!\n");
return false;
}
- WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
- "WRPLL already enabled\n");
-
- intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+ DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ clock, p, n2, r2);
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
- WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p);
+ if (reg == WRPLL_CTL1) {
+ plls->wrpll1_refcount++;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+ } else {
+ plls->wrpll2_refcount++;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+ }
} else if (type == INTEL_OUTPUT_ANALOG) {
if (plls->spll_refcount == 0) {
DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
pipe_name(pipe));
plls->spll_refcount++;
- reg = SPLL_CTL;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
} else {
DRM_ERROR("SPLL already in use\n");
return false;
}
- WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
- "SPLL already enabled\n");
-
- val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
-
} else {
WARN(1, "Invalid DDI encoder type %d\n", type);
return false;
}
- I915_WRITE(reg, val);
- udelay(20);
-
return true;
}
+/*
+ * To be called after intel_ddi_pll_select(). That one selects the PLL to be
+ * used, this one actually enables the PLL.
+ */
+void intel_ddi_pll_enable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ int clock = crtc->config.port_clock;
+ uint32_t reg, cur_val, new_val;
+ int refcount;
+ const char *pll_name;
+ uint32_t enable_bit = (1 << 31);
+ unsigned int p, n2, r2;
+
+ BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
+ BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
+
+ switch (crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_LCPLL_2700:
+ case PORT_CLK_SEL_LCPLL_1350:
+ case PORT_CLK_SEL_LCPLL_810:
+ /*
+ * LCPLL should always be enabled at this point of the mode set
+ * sequence, so nothing to do.
+ */
+ return;
+
+ case PORT_CLK_SEL_SPLL:
+ pll_name = "SPLL";
+ reg = SPLL_CTL;
+ refcount = plls->spll_refcount;
+ new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
+ SPLL_PLL_SSC;
+ break;
+
+ case PORT_CLK_SEL_WRPLL1:
+ case PORT_CLK_SEL_WRPLL2:
+ if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
+ pll_name = "WRPLL1";
+ reg = WRPLL_CTL1;
+ refcount = plls->wrpll1_refcount;
+ } else {
+ pll_name = "WRPLL2";
+ reg = WRPLL_CTL2;
+ refcount = plls->wrpll2_refcount;
+ }
+
+ intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+ new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) |
+ WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
+
+ break;
+
+ case PORT_CLK_SEL_NONE:
+ WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
+ return;
+ default:
+ WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
+ return;
+ }
+
+ cur_val = I915_READ(reg);
+
+ WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
+ if (refcount == 1) {
+ WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
+ I915_WRITE(reg, new_val);
+ POSTING_READ(reg);
+ udelay(20);
+ } else {
+ WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
+ }
+}
+
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -1121,9 +1200,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
}
WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
@@ -1166,8 +1243,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_panel_off(intel_dp);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2bde35d34eb9..9b8a7c7ea7fc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -90,8 +90,8 @@ intel_fdi_link_freq(struct drm_device *dev)
static const intel_limit_t intel_limits_i8xx_dac = {
.dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 930000, .max = 1400000 },
- .n = { .min = 3, .max = 16 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
@@ -103,8 +103,8 @@ static const intel_limit_t intel_limits_i8xx_dac = {
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 930000, .max = 1400000 },
- .n = { .min = 3, .max = 16 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
@@ -116,8 +116,8 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 930000, .max = 1400000 },
- .n = { .min = 3, .max = 16 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
@@ -329,6 +329,8 @@ static void vlv_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
@@ -430,6 +432,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
@@ -443,6 +447,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+ return;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
@@ -748,10 +754,10 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
return intel_crtc->config.cpu_transcoder;
}
-static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 frame, frame_reg = PIPEFRAME(pipe);
+ u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
frame = I915_READ(frame_reg);
@@ -772,8 +778,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = PIPESTAT(pipe);
- if (INTEL_INFO(dev)->gen >= 5) {
- ironlake_wait_for_vblank(dev, pipe);
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ g4x_wait_for_vblank(dev, pipe);
return;
}
@@ -1086,12 +1092,12 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
struct drm_device *dev = dev_priv->dev;
bool cur_state;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
- else if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev) || IS_I865G(dev))
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
- else
+ else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+ else
+ cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
@@ -1205,15 +1211,12 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
}
}
-static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
u32 val;
bool enabled;
- if (HAS_PCH_LPT(dev_priv->dev)) {
- DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
- return;
- }
+ WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
@@ -1361,6 +1364,24 @@ static void intel_init_dpio(struct drm_device *dev)
if (!IS_VALLEYVIEW(dev))
return;
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+}
+
+static void intel_reset_dpio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_VALLEYVIEW(dev))
+ return;
+
+ /*
+ * Enable the CRI clock source so we can get at the display and the
+ * reference clock for VGA hotplug / manual detection.
+ */
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_INTEGRATED_CRI_CLK_VLV);
+
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
@@ -1487,25 +1508,35 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
- /* Leave integrated clock source enabled */
+ /*
+ * Leave integrated clock source and reference clock enabled for pipe B.
+ * The latter is needed for VGA hotplug / manual detection.
+ */
if (pipe == PIPE_B)
- val = DPLL_INTEGRATED_CRI_CLK_VLV;
+ val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
}
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dport)
{
u32 port_mask;
- if (!port)
+ switch (dport->port) {
+ case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
- else
+ break;
+ case PORT_C:
port_mask = DPLL_PORTC_READY_MASK;
+ break;
+ default:
+ BUG();
+ }
if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
- 'B' + port, I915_READ(DPLL(0)));
+ port_name(dport->port), I915_READ(DPLL(0)));
}
/**
@@ -2083,8 +2114,8 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_MODIFY_DISPBASE(DSPSURF(plane),
- i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ I915_WRITE(DSPSURF(plane),
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
@@ -2174,8 +2205,8 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- I915_MODIFY_DISPBASE(DSPSURF(plane),
- i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ I915_WRITE(DSPSURF(plane),
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
@@ -2233,7 +2264,12 @@ void intel_display_handle_reset(struct drm_device *dev)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
mutex_lock(&crtc->mutex);
- if (intel_crtc->active)
+ /*
+ * FIXME: Once we have proper support for primary planes (and
+ * disabling them without disabling the entire crtc) allow again
+ * a NULL crtc->fb.
+ */
+ if (intel_crtc->active && crtc->fb)
dev_priv->display.update_plane(crtc, crtc->fb,
crtc->x, crtc->y);
mutex_unlock(&crtc->mutex);
@@ -2350,6 +2386,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
}
+ intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
+ intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
}
ret = dev_priv->display.update_plane(crtc, fb, x, y);
@@ -2944,6 +2982,30 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
return pending;
}
+bool intel_has_pending_fb_unpin(struct drm_device *dev)
+{
+ struct intel_crtc *crtc;
+
+ /* Note that we don't need to be called with mode_config.lock here
+ * as our list of CRTC objects is static for the lifetime of the
+ * device and so cannot disappear as we iterate. Similarly, we can
+ * happily treat the predicates as racy, atomic checks as userspace
+ * cannot claim and pin a new fb without at least acquring the
+ * struct_mutex and so serialising with us.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ if (atomic_read(&crtc->unpin_work_count) == 0)
+ continue;
+
+ if (crtc->unpin_work)
+ intel_wait_for_vblank(dev, crtc->pipe);
+
+ return true;
+ }
+
+ return false;
+}
+
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3399,9 +3461,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
mutex_unlock(&dev_priv->rps.hw_lock);
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
- * mailbox." Therefore we need to defer waiting on the state
- * change.
- * TODO: need to fix this for state checker
+ * mailbox." Moreover, the mailbox may return a bogus state,
+ * so we need to just enable it and continue on.
*/
} else {
I915_WRITE(IPS_CTL, IPS_ENABLE);
@@ -3428,9 +3489,10 @@ void hsw_disable_ips(struct intel_crtc *crtc)
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->rps.hw_lock);
- } else
+ } else {
I915_WRITE(IPS_CTL, 0);
- POSTING_READ(IPS_CTL);
+ POSTING_READ(IPS_CTL);
+ }
/* We need to wait for a vblank before we can disable the plane. */
intel_wait_for_vblank(dev, crtc->pipe);
@@ -3465,7 +3527,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
/* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
- if (intel_crtc->config.ips_enabled &&
+ if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
@@ -3910,6 +3972,174 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+int valleyview_get_vco(struct drm_i915_private *dev_priv)
+{
+ int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+ /* Obtain SKU information */
+ mutex_lock(&dev_priv->dpio_lock);
+ hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+ CCK_FUSE_HPLL_FREQ_MASK;
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return vco_freq[hpll_freq];
+}
+
+/* Adjust CDclk dividers to allow high res or save power if possible */
+static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, cmd;
+
+ if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+ cmd = 2;
+ else if (cdclk == 266)
+ cmd = 1;
+ else
+ cmd = 0;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val &= ~DSPFREQGUAR_MASK;
+ val |= (cmd << DSPFREQGUAR_SHIFT);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+ 50)) {
+ DRM_ERROR("timed out waiting for CDclk change\n");
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (cdclk == 400) {
+ u32 divider, vco;
+
+ vco = valleyview_get_vco(dev_priv);
+ divider = ((vco << 1) / cdclk) - 1;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ /* adjust cdclk divider */
+ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ val &= ~0xf;
+ val |= divider;
+ vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+ mutex_unlock(&dev_priv->dpio_lock);
+ }
+
+ mutex_lock(&dev_priv->dpio_lock);
+ /* adjust self-refresh exit latency value */
+ val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+ val &= ~0x7f;
+
+ /*
+ * For high bandwidth configs, we set a higher latency in the bunit
+ * so that the core display fetch happens in time to avoid underruns.
+ */
+ if (cdclk == 400)
+ val |= 4500 / 250; /* 4.5 usec */
+ else
+ val |= 3000 / 250; /* 3.0 usec */
+ vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
+ intel_i2c_reset(dev);
+}
+
+static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
+{
+ int cur_cdclk, vco;
+ int divider;
+
+ vco = valleyview_get_vco(dev_priv);
+
+ mutex_lock(&dev_priv->dpio_lock);
+ divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ divider &= 0xf;
+
+ cur_cdclk = (vco << 1) / (divider + 1);
+
+ return cur_cdclk;
+}
+
+static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
+ int max_pixclk)
+{
+ int cur_cdclk;
+
+ cur_cdclk = valleyview_cur_cdclk(dev_priv);
+
+ /*
+ * Really only a few cases to deal with, as only 4 CDclks are supported:
+ * 200MHz
+ * 267MHz
+ * 320MHz
+ * 400MHz
+ * So we check to see whether we're above 90% of the lower bin and
+ * adjust if needed.
+ */
+ if (max_pixclk > 288000) {
+ return 400;
+ } else if (max_pixclk > 240000) {
+ return 320;
+ } else
+ return 266;
+ /* Looks like the 200MHz CDclk freq doesn't work on some configs */
+}
+
+static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
+ unsigned modeset_pipes,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *intel_crtc;
+ int max_pixclk = 0;
+
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ if (modeset_pipes & (1 << intel_crtc->pipe))
+ max_pixclk = max(max_pixclk,
+ pipe_config->adjusted_mode.crtc_clock);
+ else if (intel_crtc->base.enabled)
+ max_pixclk = max(max_pixclk,
+ intel_crtc->config.adjusted_mode.crtc_clock);
+ }
+
+ return max_pixclk;
+}
+
+static void valleyview_modeset_global_pipes(struct drm_device *dev,
+ unsigned *prepare_pipes,
+ unsigned modeset_pipes,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc;
+ int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
+ pipe_config);
+ int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+
+ if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
+ return;
+
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head)
+ if (intel_crtc->base.enabled)
+ *prepare_pipes |= (1 << intel_crtc->pipe);
+}
+
+static void valleyview_modeset_global_resources(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
+ int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+ int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+
+ if (req_cdclk != cur_cdclk)
+ valleyview_set_cdclk(dev, req_cdclk);
+}
+
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -4570,9 +4800,8 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
refclk = 100000;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
- DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
- refclk / 1000);
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
} else if (!IS_GEN2(dev)) {
refclk = 96000;
} else {
@@ -4634,24 +4863,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
reg_val &= 0x8cffffff;
reg_val = 0x8c000000;
- vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
reg_val &= 0x00ffffff;
reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
}
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4720,15 +4949,15 @@ static void vlv_update_pll(struct intel_crtc *crtc)
vlv_pllb_recal_opamp(dev_priv, pipe);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
/* Disable fast lock */
- vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
+ vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4742,50 +4971,54 @@ static void vlv_update_pll(struct intel_crtc *crtc)
* Note: don't use the DAC post divider as it seems unstable.
*/
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
if (crtc->config.port_clock == 162000 ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
- vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
/* Use SSC source */
if (!pipe)
- vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df40000);
else
- vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (!pipe)
- vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df70000);
else
- vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
+ coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
- vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
- /* Enable DPIO clock input */
+ /*
+ * Enable DPIO clock input. We should never disable the reference
+ * clock for pipe B, since VGA hotplug / manual detection depends
+ * on it.
+ */
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
/* We should never disable this, set it here for state tracking */
@@ -5230,6 +5463,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+ return;
+
tmp = I915_READ(PFIT_CONTROL);
if (!(tmp & PFIT_ENABLE))
return;
@@ -5261,7 +5497,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
int refclk = 100000;
mutex_lock(&dev_priv->dpio_lock);
- mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
+ mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
mutex_unlock(&dev_priv->dpio_lock);
clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
@@ -5718,9 +5954,9 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
}
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
dev_priv->vbt.lvds_ssc_freq);
- return dev_priv->vbt.lvds_ssc_freq * 1000;
+ return dev_priv->vbt.lvds_ssc_freq;
}
return 120000;
@@ -5982,7 +6218,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
factor = 21;
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->vbt.lvds_ssc_freq == 100) ||
+ dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (intel_crtc->config.sdvo_tv_clock)
@@ -6323,7 +6559,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
val = I915_READ(DEIMR);
- WARN((val & ~DE_PCH_EVENT_IVB) != val,
+ WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
"Unexpected DEIMR bits enabled: 0x%x\n", val);
val = I915_READ(SDEIMR);
WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
@@ -6402,7 +6638,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
/* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine! */
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6672,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
void hsw_enable_pc8_work(struct work_struct *__work)
@@ -6447,6 +6683,8 @@ void hsw_enable_pc8_work(struct work_struct *__work)
struct drm_device *dev = dev_priv->dev;
uint32_t val;
+ WARN_ON(!HAS_PC8(dev));
+
if (dev_priv->pc8.enabled)
return;
@@ -6463,6 +6701,8 @@ void hsw_enable_pc8_work(struct work_struct *__work)
lpt_disable_clkout_dp(dev);
hsw_pc8_disable_interrupts(dev);
hsw_disable_lcpll(dev_priv, true, true);
+
+ intel_runtime_pm_put(dev_priv);
}
static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
@@ -6492,12 +6732,16 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
if (dev_priv->pc8.disable_count != 1)
return;
+ WARN_ON(!HAS_PC8(dev));
+
cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
if (!dev_priv->pc8.enabled)
return;
DRM_DEBUG_KMS("Disabling package C8+\n");
+ intel_runtime_pm_get(dev_priv);
+
hsw_restore_lcpll(dev_priv);
hsw_pc8_restore_interrupts(dev);
lpt_init_pch_refclk(dev);
@@ -6704,8 +6948,9 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int ret;
- if (!intel_ddi_pll_mode_set(crtc))
+ if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
+ intel_ddi_pll_enable(intel_crtc);
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
@@ -6796,8 +7041,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (intel_display_power_enabled(dev, pfit_domain))
ironlake_get_pfit_config(crtc, pipe_config);
- pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
- (I915_READ(IPS_CTL) & IPS_ENABLE);
+ if (IS_HASWELL(dev))
+ pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
+ (I915_READ(IPS_CTL) & IPS_ENABLE);
pipe_config->pixel_multiplier = 1;
@@ -7689,7 +7935,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
u32 dpll = pipe_config->dpll_hw_state.dpll;
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return dev_priv->vbt.lvds_ssc_freq * 1000;
+ return dev_priv->vbt.lvds_ssc_freq;
else if (HAS_PCH_SPLIT(dev))
return 120000;
else if (!IS_GEN2(dev))
@@ -7752,12 +7998,17 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
else
i9xx_clock(refclk, &clock);
} else {
- bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+ u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
+ bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
if (is_lvds) {
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
- clock.p2 = 14;
+
+ if (lvds & LVDS_CLKB_POWER_UP)
+ clock.p2 = 7;
+ else
+ clock.p2 = 14;
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
@@ -8335,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
if (ring->id == RCS)
len += 6;
+ /*
+ * BSpec MI_DISPLAY_FLIP for IVB:
+ * "The full packet must be contained within the same cache line."
+ *
+ * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
+ * cacheline, if we ever start emitting more commands before
+ * the MI_DISPLAY_FLIP we may need to first emit everything else,
+ * then do the cacheline alignment, and finally emit the
+ * MI_DISPLAY_FLIP.
+ */
+ ret = intel_ring_cacheline_align(ring);
+ if (ret)
+ goto err_unpin;
+
ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;
@@ -8493,28 +8758,6 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
.load_lut = intel_crtc_load_lut,
};
-static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
- struct drm_crtc *crtc)
-{
- struct drm_device *dev;
- struct drm_crtc *tmp;
- int crtc_mask = 1;
-
- WARN(!crtc, "checking null crtc?\n");
-
- dev = crtc->dev;
-
- list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
- if (tmp == crtc)
- break;
- crtc_mask <<= 1;
- }
-
- if (encoder->possible_crtcs & crtc_mask)
- return true;
- return false;
-}
-
/**
* intel_modeset_update_staged_output_state
*
@@ -9122,7 +9365,9 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pch_pfit.size);
}
- PIPE_CONF_CHECK_I(ips_enabled);
+ /* BDW+ don't expose a synchronous way to read the state */
+ if (IS_HASWELL(dev))
+ PIPE_CONF_CHECK_I(ips_enabled);
PIPE_CONF_CHECK_I(double_wide);
@@ -9368,21 +9613,19 @@ static int __intel_set_mode(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_display_mode *saved_mode, *saved_hwmode;
+ struct drm_display_mode *saved_mode;
struct intel_crtc_config *pipe_config = NULL;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
int ret = 0;
- saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
+ saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
if (!saved_mode)
return -ENOMEM;
- saved_hwmode = saved_mode + 1;
intel_modeset_affected_pipes(crtc, &modeset_pipes,
&prepare_pipes, &disable_pipes);
- *saved_hwmode = crtc->hwmode;
*saved_mode = crtc->mode;
/* Hack: Because we don't (yet) support global modeset on multiple
@@ -9402,6 +9645,21 @@ static int __intel_set_mode(struct drm_crtc *crtc,
"[modeset]");
}
+ /*
+ * See if the config requires any additional preparation, e.g.
+ * to adjust global state with pipes off. We need to do this
+ * here so we can get the modeset_pipe updated config for the new
+ * mode set on this crtc. For other crtcs we need to use the
+ * adjusted_mode bits in the crtc directly.
+ */
+ if (IS_VALLEYVIEW(dev)) {
+ valleyview_modeset_global_pipes(dev, &prepare_pipes,
+ modeset_pipes, pipe_config);
+
+ /* may have added more to prepare_pipes than we should */
+ prepare_pipes &= ~disable_pipes;
+ }
+
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
@@ -9418,6 +9676,14 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
to_intel_crtc(crtc)->config = *pipe_config;
+
+ /*
+ * Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc,
+ &pipe_config->adjusted_mode);
}
/* Only after disabling all output pipelines that will be changed can we
@@ -9441,23 +9707,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
dev_priv->display.crtc_enable(&intel_crtc->base);
- if (modeset_pipes) {
- /* Store real post-adjustment hardware mode. */
- crtc->hwmode = pipe_config->adjusted_mode;
-
- /* Calculate and store various constants which
- * are later needed by vblank and swap-completion
- * timestamping. They are derived from true hwmode.
- */
- drm_calc_timestamping_constants(crtc);
- }
-
/* FIXME: add subpixel order */
done:
- if (ret && crtc->enabled) {
- crtc->hwmode = *saved_hwmode;
+ if (ret && crtc->enabled)
crtc->mode = *saved_mode;
- }
out:
kfree(pipe_config);
@@ -9679,8 +9932,8 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
/* Make sure the new CRTC will work with the encoder */
- if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
- new_crtc)) {
+ if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
+ new_crtc)) {
return -EINVAL;
}
connector->encoder->new_crtc = to_intel_crtc(new_crtc);
@@ -9694,17 +9947,21 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Check for any encoders that needs to be disabled. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
+ int num_connectors = 0;
list_for_each_entry(connector,
&dev->mode_config.connector_list,
base.head) {
if (connector->new_encoder == encoder) {
WARN_ON(!connector->new_encoder->new_crtc);
-
- goto next_encoder;
+ num_connectors++;
}
}
- encoder->new_crtc = NULL;
-next_encoder:
+
+ if (num_connectors == 0)
+ encoder->new_crtc = NULL;
+ else if (num_connectors > 1)
+ return -EINVAL;
+
/* Only now check for crtc changes so we don't miss encoders
* that will be disabled. */
if (&encoder->new_crtc->base != encoder->base.crtc) {
@@ -9775,6 +10032,16 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
+ /*
+ * In the fastboot case this may be our only check of the
+ * state after boot. It would be better to only do it on
+ * the first update, but we don't have a nice way of doing that
+ * (and really, set_config isn't used much for high freq page
+ * flipping, so increasing its cost here shouldn't be a big
+ * deal).
+ */
+ if (i915_fastboot && ret == 0)
+ intel_modeset_check_state(set->crtc->dev);
}
if (ret) {
@@ -9835,7 +10102,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
/* PCH refclock must be enabled first */
- assert_pch_refclk_enabled(dev_priv);
+ ibx_assert_pch_refclk_enabled(dev_priv);
I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
@@ -9903,8 +10170,6 @@ static void intel_shared_dpll_init(struct drm_device *dev)
dev_priv->num_shared_dpll = 0;
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
- DRM_DEBUG_KMS("%i shared PLLs initialized\n",
- dev_priv->num_shared_dpll);
}
static void intel_crtc_init(struct drm_device *dev, int pipe)
@@ -9926,10 +10191,13 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->lut_b[i] = i;
}
- /* Swap pipes & planes for FBC on pre-965 */
+ /*
+ * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
+ * is hooked to plane B. Hence we want plane A feeding pipe B.
+ */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
- if (IS_MOBILE(dev) && IS_GEN3(dev)) {
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
}
@@ -10018,6 +10286,28 @@ static bool has_edp_a(struct drm_device *dev)
return true;
}
+const char *intel_output_name(int output)
+{
+ static const char *names[] = {
+ [INTEL_OUTPUT_UNUSED] = "Unused",
+ [INTEL_OUTPUT_ANALOG] = "Analog",
+ [INTEL_OUTPUT_DVO] = "DVO",
+ [INTEL_OUTPUT_SDVO] = "SDVO",
+ [INTEL_OUTPUT_LVDS] = "LVDS",
+ [INTEL_OUTPUT_TVOUT] = "TV",
+ [INTEL_OUTPUT_HDMI] = "HDMI",
+ [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
+ [INTEL_OUTPUT_EDP] = "eDP",
+ [INTEL_OUTPUT_DSI] = "DSI",
+ [INTEL_OUTPUT_UNKNOWN] = "Unknown",
+ };
+
+ if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
+ return "Invalid";
+
+ return names[output];
+}
+
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10412,8 +10702,11 @@ static void intel_init_display(struct drm_device *dev)
}
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
- } else if (IS_VALLEYVIEW(dev))
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.modeset_global_resources =
+ valleyview_modeset_global_resources;
dev_priv->display.write_eld = ironlake_write_eld;
+ }
/* Default just returns -ENODEV to indicate unsupported */
dev_priv->display.queue_flip = intel_default_queue_flip;
@@ -10440,6 +10733,8 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
}
+
+ intel_panel_init_backlight_funcs(dev);
}
/*
@@ -10476,17 +10771,6 @@ static void quirk_invert_brightness(struct drm_device *dev)
DRM_INFO("applying inverted panel brightness quirk\n");
}
-/*
- * Some machines (Dell XPS13) suffer broken backlight controls if
- * BLM_PCH_PWM_ENABLE is set.
- */
-static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
- DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
-}
-
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -10555,11 +10839,6 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
-
- /* Dell XPS13 HD Sandy Bridge */
- { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
- /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
- { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -10603,18 +10882,11 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
intel_prepare_ddi(dev);
intel_init_clock_gating(dev);
- /* Enable the CRI clock source so we can get at the display */
- if (IS_VALLEYVIEW(dev))
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_INTEGRATED_CRI_CLK_VLV);
-
- intel_init_dpio(dev);
+ intel_reset_dpio(dev);
mutex_lock(&dev->struct_mutex);
intel_enable_gt_powersave(dev);
@@ -10676,6 +10948,9 @@ void intel_modeset_init(struct drm_device *dev)
}
}
+ intel_init_dpio(dev);
+ intel_reset_dpio(dev);
+
intel_cpu_pll_init(dev);
intel_shared_dpll_init(dev);
@@ -10879,7 +11154,7 @@ void i915_redisable_vga(struct drm_device *dev)
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
- if (HAS_POWER_WELL(dev) &&
+ if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
return;
@@ -11023,7 +11298,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
pll->on = false;
}
- if (IS_HASWELL(dev))
+ if (HAS_PCH_SPLIT(dev))
ilk_wm_get_hw_state(dev);
if (force_restore) {
@@ -11101,12 +11376,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* flush any delayed tasks or pending work */
flush_scheduled_work();
- /* destroy backlight, if any, before the connectors */
- intel_panel_destroy_backlight(dev);
-
- /* destroy the sysfs files before encoders/connectors */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ /* destroy the backlight and sysfs files before encoders/connectors */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ intel_panel_destroy_backlight(connector);
drm_sysfs_connector_remove(connector);
+ }
drm_mode_config_cleanup(dev);
@@ -11161,6 +11435,7 @@ struct intel_display_error_state {
} cursor[I915_MAX_PIPES];
struct intel_pipe_error_state {
+ bool power_domain_on;
u32 source;
} pipe[I915_MAX_PIPES];
@@ -11175,6 +11450,7 @@ struct intel_display_error_state {
} plane[I915_MAX_PIPES];
struct intel_transcoder_error_state {
+ bool power_domain_on;
enum transcoder cpu_transcoder;
u32 conf;
@@ -11208,11 +11484,13 @@ intel_display_capture_error_state(struct drm_device *dev)
if (error == NULL)
return NULL;
- if (HAS_POWER_WELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(i) {
- if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
+ error->pipe[i].power_domain_on =
+ intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
+ if (!error->pipe[i].power_domain_on)
continue;
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
@@ -11248,8 +11526,10 @@ intel_display_capture_error_state(struct drm_device *dev)
for (i = 0; i < error->num_transcoders; i++) {
enum transcoder cpu_transcoder = transcoders[i];
- if (!intel_display_power_enabled(dev,
- POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
+ error->transcoder[i].power_domain_on =
+ intel_display_power_enabled_sw(dev,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+ if (!error->transcoder[i].power_domain_on)
continue;
error->transcoder[i].cpu_transcoder = cpu_transcoder;
@@ -11279,11 +11559,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
return;
err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
- if (HAS_POWER_WELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(i) {
err_printf(m, "Pipe [%d]:\n", i);
+ err_printf(m, " Power: %s\n",
+ error->pipe[i].power_domain_on ? "on" : "off");
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
err_printf(m, "Plane [%d]:\n", i);
@@ -11309,6 +11591,8 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
for (i = 0; i < error->num_transcoders; i++) {
err_printf(m, "CPU transcoder: %c\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
+ err_printf(m, " Power: %s\n",
+ error->transcoder[i].power_domain_on ? "on" : "off");
err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 30c627c7b7ba..2688f6d64bb9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -142,7 +142,7 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
return (max_link_clock * max_lanes * 8) / 10;
}
-static int
+static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int i, ret, recv_bytes;
uint32_t status;
int try, precharge, clock = 0;
- bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+ bool has_aux_irq = HAS_AUX_IRQ(dev);
uint32_t timeout;
/* dp aux is extremely sensitive to irq latency, hence request the
@@ -537,29 +537,33 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
uint8_t msg[20];
int msg_bytes;
uint8_t ack;
+ int retry;
if (WARN_ON(send_bytes > 16))
return -E2BIG;
intel_dp_check_edp(intel_dp);
- msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[0] = DP_AUX_NATIVE_WRITE << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = send_bytes - 1;
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
- for (;;) {
+ for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
- break;
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
- udelay(100);
+ ack >>= 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
+ return send_bytes;
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+ usleep_range(400, 500);
else
return -EIO;
}
- return send_bytes;
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EIO;
}
/* Write a single byte to the aux channel in native mode */
@@ -581,12 +585,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
int reply_bytes;
uint8_t ack;
int ret;
+ int retry;
if (WARN_ON(recv_bytes > 19))
return -E2BIG;
intel_dp_check_edp(intel_dp);
- msg[0] = AUX_NATIVE_READ << 4;
+ msg[0] = DP_AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = recv_bytes - 1;
@@ -594,23 +599,26 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
msg_bytes = 4;
reply_bytes = recv_bytes + 1;
- for (;;) {
+ for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
if (ret < 0)
return ret;
- ack = reply[0];
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ ack = reply[0] >> 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
memcpy(recv, reply + 1, ret - 1);
return ret - 1;
}
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
- udelay(100);
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+ usleep_range(400, 500);
else
return -EIO;
}
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EIO;
}
static int
@@ -633,12 +641,12 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
intel_dp_check_edp(intel_dp);
/* Set up the command byte */
if (mode & MODE_I2C_READ)
- msg[0] = AUX_I2C_READ << 4;
+ msg[0] = DP_AUX_I2C_READ << 4;
else
- msg[0] = AUX_I2C_WRITE << 4;
+ msg[0] = DP_AUX_I2C_WRITE << 4;
if (!(mode & MODE_I2C_STOP))
- msg[0] |= AUX_I2C_MOT << 4;
+ msg[0] |= DP_AUX_I2C_MOT << 4;
msg[1] = address >> 8;
msg[2] = address;
@@ -675,17 +683,17 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
goto out;
}
- switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
- case AUX_NATIVE_REPLY_ACK:
+ switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+ case DP_AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
- case AUX_NATIVE_REPLY_NACK:
+ case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
ret = -EREMOTEIO;
goto out;
- case AUX_NATIVE_REPLY_DEFER:
+ case DP_AUX_NATIVE_REPLY_DEFER:
/*
* For now, just give more slack to branch devices. We
* could check the DPCD for I2C bit rate capabilities,
@@ -706,18 +714,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
goto out;
}
- switch (reply[0] & AUX_I2C_REPLY_MASK) {
- case AUX_I2C_REPLY_ACK:
+ switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
+ case DP_AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
}
ret = reply_bytes - 1;
goto out;
- case AUX_I2C_REPLY_NACK:
+ case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
ret = -EREMOTEIO;
goto out;
- case AUX_I2C_REPLY_DEFER:
+ case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
@@ -1037,6 +1045,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
}
+
+ DRM_DEBUG_KMS("Wait complete\n");
}
static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
@@ -1092,6 +1102,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
if (ironlake_edp_have_panel_vdd(intel_dp))
return;
+ intel_runtime_pm_get(dev_priv);
+
DRM_DEBUG_KMS("Turning eDP VDD on\n");
if (!ironlake_edp_have_panel_power(intel_dp))
@@ -1140,7 +1152,11 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
/* Make sure sequencer is idle before allowing subsequent activity */
DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
- msleep(intel_dp->panel_power_down_delay);
+
+ if ((pp & POWER_TARGET_ON) == 0)
+ msleep(intel_dp->panel_power_cycle_delay);
+
+ intel_runtime_pm_put(dev_priv);
}
}
@@ -1248,6 +1264,9 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
intel_dp->want_panel_vdd = false;
ironlake_wait_panel_off(intel_dp);
+
+ /* We got a reference when we enabled the VDD. */
+ intel_runtime_pm_put(dev_priv);
}
void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1627,7 +1646,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
val |= EDP_PSR_LINK_DISABLE;
I915_WRITE(EDP_PSR_CTL(dev), val |
- IS_BROADWELL(dev) ? 0 : link_entry_time |
+ (IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
@@ -1845,34 +1864,36 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
struct edp_power_seq power_seq;
u32 val;
mutex_lock(&dev_priv->dpio_lock);
- val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ if (is_edp(intel_dp)) {
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+ }
intel_enable_dp(encoder);
- vlv_wait_port_ready(dev_priv, port);
+ vlv_wait_port_ready(dev_priv, dport);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -1882,24 +1903,24 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -1941,18 +1962,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE);
}
-#if 0
-static char *voltage_names[] = {
- "0.4V", "0.6V", "0.8V", "1.2V"
-};
-static char *pre_emph_names[] = {
- "0dB", "3.5dB", "6dB", "9.5dB"
-};
-static char *link_train_names[] = {
- "pattern 1", "pattern 2", "idle", "off"
-};
-#endif
-
/*
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
@@ -2050,7 +2059,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0];
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
@@ -2127,14 +2136,14 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
}
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
mutex_unlock(&dev_priv->dpio_lock);
return 0;
@@ -2646,7 +2655,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
- intel_dp_link_down(intel_dp);
break;
}
@@ -2899,13 +2907,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
/* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp, link_status)) {
- intel_dp_link_down(intel_dp);
return;
}
/* Now read the DPCD to see if it's actually running */
if (!intel_dp_get_dpcd(intel_dp)) {
- intel_dp_link_down(intel_dp);
return;
}
@@ -3020,18 +3026,34 @@ g4x_dp_detect(struct intel_dp *intel_dp)
return status;
}
- switch (intel_dig_port->port) {
- case PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS;
- break;
- case PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS;
- break;
- case PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS;
- break;
- default:
- return connector_status_unknown;
+ if (IS_VALLEYVIEW(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+ } else {
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ default:
+ return connector_status_unknown;
+ }
}
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
@@ -3082,9 +3104,12 @@ intel_dp_detect(struct drm_connector *connector, bool force)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
enum drm_connector_status status;
struct edid *edid = NULL;
+ intel_runtime_pm_get(dev_priv);
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
@@ -3096,7 +3121,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
status = g4x_dp_detect(intel_dp);
if (status != connector_status_connected)
- return status;
+ goto out;
intel_dp_probe_oui(intel_dp);
@@ -3112,7 +3137,11 @@ intel_dp_detect(struct drm_connector *connector, bool force)
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
- return connector_status_connected;
+ status = connector_status_connected;
+
+out:
+ intel_runtime_pm_put(dev_priv);
+ return status;
}
static int intel_dp_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 79f91f26e288..fbfaaba5cc3b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -65,8 +65,8 @@
#define wait_for_atomic_us(COND, US) _wait_for((COND), \
DIV_ROUND_UP((US), 1000), 0)
-#define KHz(x) (1000*x)
-#define MHz(x) KHz(1000*x)
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
/*
* Display related stuff
@@ -155,7 +155,19 @@ struct intel_encoder {
struct intel_panel {
struct drm_display_mode *fixed_mode;
+ struct drm_display_mode *downclock_mode;
int fitting_mode;
+
+ /* backlight */
+ struct {
+ bool present;
+ u32 level;
+ u32 max;
+ bool enabled;
+ bool combination_mode; /* gen 2/4 only */
+ bool active_low_pwm;
+ struct backlight_device *device;
+ } backlight;
};
struct intel_connector {
@@ -443,7 +455,7 @@ struct intel_hdmi {
bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len);
+ const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
};
@@ -490,9 +502,9 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
{
switch (dport->port) {
case PORT_B:
- return 0;
+ return DPIO_CH0;
case PORT_C:
- return 1;
+ return DPIO_CH1;
default:
BUG();
}
@@ -601,7 +613,8 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
-bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
+bool intel_ddi_pll_select(struct intel_crtc *crtc);
+void intel_ddi_pll_enable(struct intel_crtc *crtc);
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
@@ -612,6 +625,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
/* intel_display.c */
+const char *intel_output_name(int output);
+bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -638,7 +653,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
void intel_wait_for_vblank(struct drm_device *dev, int pipe);
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dport);
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old);
@@ -690,11 +706,10 @@ void
ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
int dotclock);
bool intel_crtc_active(struct drm_crtc *crtc);
-void i915_disable_vga_mem(struct drm_device *dev);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_device *dev, bool enable);
-
+int valleyview_get_vco(struct drm_i915_private *dev_priv);
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -808,9 +823,13 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
int intel_panel_setup_backlight(struct drm_connector *connector);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
-void intel_panel_destroy_backlight(struct drm_device *dev);
+void intel_panel_destroy_backlight(struct drm_connector *connector);
+void intel_panel_init_backlight_funcs(struct drm_device *dev);
enum drm_connector_status intel_panel_detect(struct drm_device *dev);
-
+extern struct drm_display_mode *intel_find_panel_downclock(
+ struct drm_device *dev,
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector);
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
@@ -830,6 +849,8 @@ int intel_power_domains_init(struct drm_device *dev);
void intel_power_domains_remove(struct drm_device *dev);
bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain);
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+ enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_device *dev,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_device *dev,
@@ -844,6 +865,10 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
+void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index d257b093ca68..fabbf0d895cf 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -37,49 +37,18 @@
static const struct intel_dsi_device intel_dsi_devices[] = {
};
-
-static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
- u32 mask)
-{
- u32 tmp = vlv_cck_read(dev_priv, reg);
- tmp &= ~mask;
- tmp |= val;
- vlv_cck_write(dev_priv, reg, tmp);
-}
-
-static void band_gap_wa(struct drm_i915_private *dev_priv)
+static void band_gap_reset(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->dpio_lock);
- /* Enable bandgap fix in GOP driver */
- vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
- msleep(20);
-
- /* Turn Display Trunk on */
- vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
- msleep(20);
-
- vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
- msleep(20);
-
- vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
- msleep(20);
- vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
+ vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
+ udelay(150);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
+ vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
mutex_unlock(&dev_priv->dpio_lock);
-
- /* Need huge delay, otherwise clock is not stable */
- msleep(100);
}
static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
@@ -132,14 +101,47 @@ static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
vlv_enable_dsi_pll(encoder);
}
+static void intel_dsi_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
+ DRM_DEBUG_KMS("\n");
+
+ val = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
+ usleep_range(2000, 2500);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
+ usleep_range(2000, 2500);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
+ usleep_range(2000, 2500);
+ I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
+ usleep_range(2000, 2500);
+}
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
DRM_DEBUG_KMS("\n");
+
+ if (intel_dsi->dev.dev_ops->panel_reset)
+ intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
+
+ /* put device in ready state */
+ intel_dsi_device_ready(encoder);
+
+ if (intel_dsi->dev.dev_ops->send_otp_cmds)
+ intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
}
static void intel_dsi_enable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
@@ -147,41 +149,28 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- temp = I915_READ(MIPI_DEVICE_READY(pipe));
- if ((temp & DEVICE_READY) == 0) {
- temp &= ~ULPS_STATE_MASK;
- I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
- } else if (temp & ULPS_STATE_MASK) {
- temp &= ~ULPS_STATE_MASK;
- I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
- /*
- * We need to ensure that there is a minimum of 1 ms time
- * available before clearing the UPLS exit state.
- */
- msleep(2);
- I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
- }
-
if (is_cmd_mode(intel_dsi))
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
-
- if (is_vid_mode(intel_dsi)) {
+ else {
msleep(20); /* XXX */
dpi_send_cmd(intel_dsi, TURN_ON);
msleep(100);
/* assert ip_tg_enable signal */
- temp = I915_READ(MIPI_PORT_CTRL(pipe));
+ temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
+ temp = temp | intel_dsi->port_bits;
I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
}
- intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+ if (intel_dsi->dev.dev_ops->enable)
+ intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
}
static void intel_dsi_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
@@ -189,8 +178,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
-
if (is_vid_mode(intel_dsi)) {
dpi_send_cmd(intel_dsi, SHUTDOWN);
msleep(10);
@@ -203,20 +190,54 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
msleep(2);
}
- temp = I915_READ(MIPI_DEVICE_READY(pipe));
- if (temp & DEVICE_READY) {
- temp &= ~DEVICE_READY;
- temp &= ~ULPS_STATE_MASK;
- I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
- }
+ /* if disable packets are sent before sending shutdown packet then in
+ * some next enable sequence send turn on packet error is observed */
+ if (intel_dsi->dev.dev_ops->disable)
+ intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
}
-static void intel_dsi_post_disable(struct intel_encoder *encoder)
+static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
DRM_DEBUG_KMS("\n");
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ val = I915_READ(MIPI_PORT_CTRL(pipe));
+ I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
+ if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
+ == 0x00000), 30))
+ DRM_ERROR("DSI LP not going Low\n");
+
+ I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
+ usleep_range(2000, 2500);
+
vlv_disable_dsi_pll(encoder);
}
+static void intel_dsi_post_disable(struct intel_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi_clear_device_ready(encoder);
+
+ if (intel_dsi->dev.dev_ops->disable_panel_power)
+ intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
+}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
@@ -251,8 +272,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
/* XXX: read flags, set to adjusted_mode */
}
-static int intel_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
@@ -352,11 +374,8 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
- /* Update the DSI PLL */
- vlv_enable_dsi_pll(intel_encoder);
-
/* XXX: Location of the call */
- band_gap_wa(dev_priv);
+ band_gap_reset(dev_priv);
/* escape clock divider, 20MHz, shared for A and C. device ready must be
* off when doing this! txclkesc? */
@@ -373,11 +392,7 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
- I915_WRITE(MIPI_DPHY_PARAM(pipe),
- 0x3c << EXIT_ZERO_COUNT_SHIFT |
- 0x1f << TRAIL_COUNT_SHIFT |
- 0xc5 << CLK_ZERO_COUNT_SHIFT |
- 0x1f << PREPARE_COUNT_SHIFT);
+ I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
@@ -425,9 +440,9 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
adjusted_mode->htotal,
bpp, intel_dsi->lane_count) + 1);
}
- I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
- I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
- I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
+ I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
+ I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
+ I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
/* dphy stuff */
@@ -442,29 +457,31 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
- I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
+ I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
+ intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock. the number
* of byte clocks occupied in one low power clock. based on txbyteclkhs
* and txclkesc. txclkesc time / txbyteclk time * (105 +
* MIPI_STOP_STATE_STALL) / 105.???
*/
- I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
+ I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
/* the bw essential for transmitting 16 long packets containing 252
* bytes meant for dcs write memory command is programmed in this
* register in terms of byte clocks. based on dsi transfer rate and the
* number of lanes configured the time taken to transmit 16 long packets
* in a dsi stream varies. */
- I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
+ I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
- 0xa << LP_HS_SSW_CNT_SHIFT |
- 0x14 << HS_LP_PWR_SW_CNT_SHIFT);
+ intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
+ intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi))
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
- intel_dsi->video_mode_format);
+ intel_dsi->video_frmt_cfg_bits |
+ intel_dsi->video_mode_format);
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index c7765f33d524..b4a27cec882f 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -39,6 +39,13 @@ struct intel_dsi_device {
struct intel_dsi_dev_ops {
bool (*init)(struct intel_dsi_device *dsi);
+ void (*panel_reset)(struct intel_dsi_device *dsi);
+
+ void (*disable_panel_power)(struct intel_dsi_device *dsi);
+
+ /* one time programmable commands if needed */
+ void (*send_otp_cmds)(struct intel_dsi_device *dsi);
+
/* This callback must be able to assume DSI commands can be sent */
void (*enable)(struct intel_dsi_device *dsi);
@@ -89,6 +96,20 @@ struct intel_dsi {
/* eot for MIPI_EOT_DISABLE register */
u32 eot_disable;
+
+ u32 port_bits;
+ u32 bw_timer;
+ u32 dphy_reg;
+ u32 video_frmt_cfg_bits;
+ u16 lp_byte_clk;
+
+ /* timeouts in byte clocks */
+ u16 lp_rx_timeout;
+ u16 turn_arnd_val;
+ u16 rst_timer_val;
+ u16 hs_to_lp_count;
+ u16 clk_lp_to_hs_count;
+ u16 clk_hs_to_lp_count;
};
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 44279b2ade88..ba79ec19da3b 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -50,6 +50,8 @@ static const u32 lfsr_converts[] = {
71, 35 /* 91 - 92 */
};
+#ifdef DSI_CLK_FROM_RR
+
static u32 dsi_rr_formula(const struct drm_display_mode *mode,
int pixel_format, int video_mode_format,
int lane_count, bool eotp)
@@ -121,7 +123,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
- dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
+ dsi_clk = dsi_bit_clock_hz / 1000;
if (eotp && video_mode_format == VIDEO_MODE_BURST)
dsi_clk *= 2;
@@ -129,64 +131,37 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
return dsi_clk;
}
-#ifdef MNP_FROM_TABLE
-
-struct dsi_clock_table {
- u32 freq;
- u8 m;
- u8 p;
-};
-
-static const struct dsi_clock_table dsi_clk_tbl[] = {
- {300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
- {343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
- {383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
- {401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
- {405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
- {409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
- {413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
- {417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
- {430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
- {470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
- {510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
- {550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
- {590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
- {630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
- {670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
- {710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
- {750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
- {790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
- {1000, 80, 2}, /* dsi clock frequency in Mhz*/
-};
+#else
-static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+/* Get DSI clock from pixel clock */
+static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
+ int pixel_format, int lane_count)
{
- unsigned int i;
- u8 m;
- u8 n;
- u8 p;
- u32 m_seed;
-
- if (dsi_clk < 300 || dsi_clk > 1000)
- return -ECHRNG;
+ u32 dsi_clk_khz;
+ u32 bpp;
- for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
- if (dsi_clk_tbl[i].freq > dsi_clk)
- break;
+ switch (pixel_format) {
+ default:
+ case VID_MODE_FORMAT_RGB888:
+ case VID_MODE_FORMAT_RGB666_LOOSE:
+ bpp = 24;
+ break;
+ case VID_MODE_FORMAT_RGB666:
+ bpp = 18;
+ break;
+ case VID_MODE_FORMAT_RGB565:
+ bpp = 16;
+ break;
}
- m = dsi_clk_tbl[i].m;
- p = dsi_clk_tbl[i].p;
- m_seed = lfsr_converts[m - 62];
- n = 1;
- dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
- dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
- m_seed << DSI_PLL_M1_DIV_SHIFT;
+ /* DSI data rate = pixel clock * bits per pixel / lane count
+ pixel clock is converted from KHz to Hz */
+ dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count);
- return 0;
+ return dsi_clk_khz;
}
-#else
+#endif
static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
{
@@ -194,36 +169,47 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
u32 ref_clk;
u32 error;
u32 tmp_error;
- u32 target_dsi_clk;
- u32 calc_dsi_clk;
+ int target_dsi_clk;
+ int calc_dsi_clk;
u32 calc_m;
u32 calc_p;
u32 m_seed;
- if (dsi_clk < 300 || dsi_clk > 1150) {
+ /* dsi_clk is expected in KHZ */
+ if (dsi_clk < 300000 || dsi_clk > 1150000) {
DRM_ERROR("DSI CLK Out of Range\n");
return -ECHRNG;
}
ref_clk = 25000;
- target_dsi_clk = dsi_clk * 1000;
+ target_dsi_clk = dsi_clk;
error = 0xFFFFFFFF;
+ tmp_error = 0xFFFFFFFF;
calc_m = 0;
calc_p = 0;
for (m = 62; m <= 92; m++) {
for (p = 2; p <= 6; p++) {
-
+ /* Find the optimal m and p divisors
+ with minimal error +/- the required clock */
calc_dsi_clk = (m * ref_clk) / p;
- if (calc_dsi_clk >= target_dsi_clk) {
- tmp_error = calc_dsi_clk - target_dsi_clk;
- if (tmp_error < error) {
- error = tmp_error;
- calc_m = m;
- calc_p = p;
- }
+ if (calc_dsi_clk == target_dsi_clk) {
+ calc_m = m;
+ calc_p = p;
+ error = 0;
+ break;
+ } else
+ tmp_error = abs(target_dsi_clk - calc_dsi_clk);
+
+ if (tmp_error < error) {
+ error = tmp_error;
+ calc_m = m;
+ calc_p = p;
}
}
+
+ if (error == 0)
+ break;
}
m_seed = lfsr_converts[calc_m - 62];
@@ -235,8 +221,6 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
return 0;
}
-#endif
-
/*
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
@@ -251,9 +235,8 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
struct dsi_mnp dsi_mnp;
u32 dsi_clk;
- dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
- intel_dsi->video_mode_format,
- intel_dsi->lane_count, !intel_dsi->eot_disable);
+ dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format,
+ intel_dsi->lane_count);
ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 3c7736546856..eeff998e52ef 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -234,8 +234,9 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
intel_modeset_check_state(connector->dev);
}
-static int intel_dvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_dvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 895fcb4fbd94..39eac9937a4a 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -57,18 +57,14 @@ static struct fb_ops intelfb_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
-static int intelfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+static int intelfb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct fb_info *info;
- struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
- struct device *device = &dev->pdev->dev;
int size, ret;
/* we don't do packed 24bpp */
@@ -94,8 +90,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out;
}
- mutex_lock(&dev->struct_mutex);
-
/* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
@@ -103,7 +97,50 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unref;
}
- info = framebuffer_alloc(0, device);
+ ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
+ if (ret)
+ goto out_unpin;
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin(obj);
+out_unref:
+ drm_gem_object_unreference(&obj->base);
+out:
+ return ret;
+}
+
+static int intelfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct intel_fbdev *ifbdev =
+ container_of(helper, struct intel_fbdev, helper);
+ struct intel_framebuffer *intel_fb = &ifbdev->ifb;
+ struct drm_device *dev = helper->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
+ int size, ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (!intel_fb->obj) {
+ DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+ ret = intelfb_alloc(helper, sizes);
+ if (ret)
+ goto out_unlock;
+ } else {
+ DRM_DEBUG_KMS("re-using BIOS fb\n");
+ sizes->fb_width = intel_fb->base.width;
+ sizes->fb_height = intel_fb->base.height;
+ }
+
+ obj = intel_fb->obj;
+ size = obj->base.size;
+
+ info = framebuffer_alloc(0, &dev->pdev->dev);
if (!info) {
ret = -ENOMEM;
goto out_unpin;
@@ -111,10 +148,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->par = helper;
- ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
- if (ret)
- goto out_unpin;
-
fb = &ifbdev->ifb.base;
ifbdev->helper.fb = fb;
@@ -170,17 +203,15 @@ static int intelfb_create(struct drm_fb_helper *helper,
fb->width, fb->height,
i915_gem_obj_ggtt_offset(obj), obj);
-
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unpin:
i915_gem_object_unpin(obj);
-out_unref:
drm_gem_object_unreference(&obj->base);
+out_unlock:
mutex_unlock(&dev->struct_mutex);
-out:
return ret;
}
@@ -297,8 +328,6 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
fb_set_suspend(info, state);
}
-MODULE_LICENSE("GPL and additional rights");
-
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 03f9ca70530c..ee3181ebcc92 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -130,9 +130,9 @@ static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
static void g4x_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len)
+ const void *frame, ssize_t len)
{
- uint32_t *data = (uint32_t *)frame;
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(VIDEO_DIP_CTL);
@@ -167,9 +167,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
static void ibx_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len)
+ const void *frame, ssize_t len)
{
- uint32_t *data = (uint32_t *)frame;
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -205,9 +205,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
static void cpt_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len)
+ const void *frame, ssize_t len)
{
- uint32_t *data = (uint32_t *)frame;
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -246,9 +246,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
static void vlv_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len)
+ const void *frame, ssize_t len)
{
- uint32_t *data = (uint32_t *)frame;
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -284,9 +284,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
static void hsw_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
- const uint8_t *frame, ssize_t len)
+ const void *frame, ssize_t len)
{
- uint32_t *data = (uint32_t *)frame;
+ const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -845,7 +845,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- if (IS_G4X(dev))
+ if (!hdmi->has_hdmi_sink || IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000;
@@ -853,8 +853,9 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
return 225000;
}
-static int intel_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
return MODE_CLOCK_HIGH;
@@ -898,8 +899,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
- && HAS_PCH_SPLIT(dev)) {
+ if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
+ clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1081,7 +1082,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
@@ -1090,41 +1091,33 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
- val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
/* HDMI 1.0V-2dB */
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
- 0x2b245f5f);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
- 0x5578b83a);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
- 0x0c782040);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
- 0x2b247878);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
- 0x00002000);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
- DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
/* Program lane clock */
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
- 0x00760018);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
- 0x00400888);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
intel_enable_hdmi(encoder);
- vlv_wait_port_ready(dev_priv, port);
+ vlv_wait_port_ready(dev_priv, dport);
}
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1134,7 +1127,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
if (!IS_VALLEYVIEW(dev))
@@ -1142,24 +1135,22 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
-
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
- 0x00002000);
- vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
- DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -1169,13 +1160,13 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
mutex_lock(&dev_priv->dpio_lock);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
mutex_unlock(&dev_priv->dpio_lock);
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 2ca17b14b6c1..d33b61d0dd33 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -82,20 +82,11 @@ static int get_disp_clk_div(struct drm_i915_private *dev_priv,
static void gmbus_set_freq(struct drm_i915_private *dev_priv)
{
- int vco_freq[] = { 800, 1600, 2000, 2400 };
- int gmbus_freq = 0, cdclk_div, hpll_freq;
+ int vco, gmbus_freq = 0, cdclk_div;
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
- /* Skip setting the gmbus freq if BIOS has already programmed it */
- if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
- return;
-
- /* Obtain SKU information */
- mutex_lock(&dev_priv->dpio_lock);
- hpll_freq =
- vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
- mutex_unlock(&dev_priv->dpio_lock);
+ vco = valleyview_get_vco(dev_priv);
/* Get the CDCLK divide ratio */
cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
@@ -106,7 +97,7 @@ static void gmbus_set_freq(struct drm_i915_private *dev_priv)
* in fact 1MHz is the correct frequency.
*/
if (cdclk_div)
- gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
+ gmbus_freq = (vco << 1) / cdclk_div;
if (WARN_ON(gmbus_freq == 0))
return;
@@ -267,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
algo->data = bus;
}
-/*
- * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
- * mode. This results in spurious interrupt warnings if the legacy irq no. is
- * shared with another device. The kernel then disables that interrupt source
- * and so prevents the other device from working properly.
- */
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
static int
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2_status,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c3b4da7895ed..8bcb93a2a9f6 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -256,8 +256,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
POSTING_READ(lvds_encoder->reg);
}
-static int intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
@@ -446,9 +447,19 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
if (dev_priv->modeset_restore == MODESET_DONE)
goto exit;
- drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, true);
- drm_modeset_unlock_all(dev);
+ /*
+ * Some old platform's BIOS love to wreak havoc while the lid is closed.
+ * We try to detect this here and undo any damage. The split for PCH
+ * platforms is rather conservative and a bit arbitrary expect that on
+ * those platforms VGA disabling requires actual legacy VGA I/O access,
+ * and as part of the cleanup in the hw state restore we also redisable
+ * the vga plane.
+ */
+ if (!HAS_PCH_SPLIT(dev)) {
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, true);
+ drm_modeset_unlock_all(dev);
+ }
dev_priv->modeset_restore = MODESET_DONE;
@@ -744,57 +755,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
{ } /* terminating entry */
};
-/**
- * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
- * @dev: drm device
- * @connector: LVDS connector
- *
- * Find the reduced downclock for LVDS in EDID.
- */
-static void intel_find_lvds_downclock(struct drm_device *dev,
- struct drm_display_mode *fixed_mode,
- struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *scan;
- int temp_downclock;
-
- temp_downclock = fixed_mode->clock;
- list_for_each_entry(scan, &connector->probed_modes, head) {
- /*
- * If one mode has the same resolution with the fixed_panel
- * mode while they have the different refresh rate, it means
- * that the reduced downclock is found for the LVDS. In such
- * case we can set the different FPx0/1 to dynamically select
- * between low and high frequency.
- */
- if (scan->hdisplay == fixed_mode->hdisplay &&
- scan->hsync_start == fixed_mode->hsync_start &&
- scan->hsync_end == fixed_mode->hsync_end &&
- scan->htotal == fixed_mode->htotal &&
- scan->vdisplay == fixed_mode->vdisplay &&
- scan->vsync_start == fixed_mode->vsync_start &&
- scan->vsync_end == fixed_mode->vsync_end &&
- scan->vtotal == fixed_mode->vtotal) {
- if (scan->clock < temp_downclock) {
- /*
- * The downclock is already found. But we
- * expect to find the lower downclock.
- */
- temp_downclock = scan->clock;
- }
- }
- }
- if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
- /* We found the downclock for LVDS. */
- dev_priv->lvds_downclock_avail = 1;
- dev_priv->lvds_downclock = temp_downclock;
- DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
- "Normal clock %dKhz, downclock %dKhz\n",
- fixed_mode->clock, temp_downclock);
- }
-}
-
/*
* Enumerate the child dev array parsed from VBT to check whether
* the LVDS is present.
@@ -1072,8 +1032,22 @@ void intel_lvds_init(struct drm_device *dev)
fixed_mode = drm_mode_duplicate(dev, scan);
if (fixed_mode) {
- intel_find_lvds_downclock(dev, fixed_mode,
- connector);
+ intel_connector->panel.downclock_mode =
+ intel_find_panel_downclock(dev,
+ fixed_mode, connector);
+ if (intel_connector->panel.downclock_mode !=
+ NULL && i915_lvds_downclock) {
+ /* We found the downclock for LVDS. */
+ dev_priv->lvds_downclock_avail = true;
+ dev_priv->lvds_downclock =
+ intel_connector->panel.
+ downclock_mode->clock;
+ DRM_DEBUG_KMS("LVDS downclock is found"
+ " in EDID. Normal clock %dKhz, "
+ "downclock %dKhz\n",
+ fixed_mode->clock,
+ dev_priv->lvds_downclock);
+ }
goto out;
}
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 6d69a9bad865..acde2945eb8a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -28,7 +28,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include <acpi/video.h>
#include <drm/drmP.h>
@@ -64,7 +63,7 @@ struct opregion_header {
u8 driver_ver[16];
u32 mboxes;
u8 reserved[164];
-} __attribute__((packed));
+} __packed;
/* OpRegion mailbox #1: public ACPI methods */
struct opregion_acpi {
@@ -86,7 +85,7 @@ struct opregion_acpi {
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
u8 rsvd2[60];
-} __attribute__((packed));
+} __packed;
/* OpRegion mailbox #2: SWSCI */
struct opregion_swsci {
@@ -94,7 +93,7 @@ struct opregion_swsci {
u32 parm; /* command parameters */
u32 dslp; /* driver sleep time-out */
u8 rsvd[244];
-} __attribute__((packed));
+} __packed;
/* OpRegion mailbox #3: ASLE */
struct opregion_asle {
@@ -115,7 +114,7 @@ struct opregion_asle {
u32 srot; /* supported rotation angles */
u32 iuer; /* IUER events */
u8 rsvd[86];
-} __attribute__((packed));
+} __packed;
/* Driver readiness indicator */
#define ASLE_ARDY_READY (1 << 0)
@@ -227,6 +226,8 @@ struct opregion_asle {
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
+#define MAX_DSLP 1500
+
#ifdef CONFIG_ACPI
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
@@ -261,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
/* The spec says 2ms should be the default, but it's too small
* for some machines. */
dslp = 50;
- } else if (dslp > 500) {
+ } else if (dslp > MAX_DSLP) {
/* Hey bios, trust must be earned. */
- WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
- dslp = 500;
+ DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
+ "using %u ms instead\n", dslp, MAX_DSLP);
+ dslp = MAX_DSLP;
}
/* The spec tells us to do this, but we are the only user... */
@@ -396,13 +398,8 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- struct intel_connector *intel_connector = NULL;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+ struct intel_connector *intel_connector;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
- u32 ret = 0;
- bool found = false;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
@@ -414,38 +411,20 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return ASLC_BACKLIGHT_FAILED;
mutex_lock(&dev->mode_config.mutex);
+
/*
- * Could match the OpRegion connector here instead, but we'd also need
- * to verify the connector could handle a backlight call.
+ * Update backlight on all connectors that support backlight (usually
+ * only one).
*/
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc == crtc) {
- found = true;
- break;
- }
-
- if (!found) {
- ret = ASLC_BACKLIGHT_FAILED;
- goto out;
- }
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- intel_connector = to_intel_connector(connector);
-
- if (!intel_connector) {
- ret = ASLC_BACKLIGHT_FAILED;
- goto out;
- }
-
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
- intel_panel_set_backlight(intel_connector, bclp, 255);
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
+ intel_panel_set_backlight(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
-out:
mutex_unlock(&dev->mode_config.mutex);
- return ret;
+
+ return 0;
}
static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a98a990fbab3..a759ecdb7a6e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1005,7 +1005,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
u32 pfit_control;
/* i830 doesn't have a panel fitter */
- if (IS_I830(dev))
+ if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
return -1;
pfit_control = I915_READ(PFIT_CONTROL);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e6f782d1c669..079ea38f14d9 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -325,214 +325,170 @@ out:
pipe_config->gmch_pfit.lvds_border_bits = border;
}
-static int is_backlight_combination_mode(struct drm_device *dev)
+static int i915_panel_invert_brightness;
+MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
+static u32 intel_panel_compute_brightness(struct intel_connector *connector,
+ u32 val)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
- if (IS_GEN4(dev))
- return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+ WARN_ON(panel->backlight.max == 0);
+
+ if (i915_panel_invert_brightness < 0)
+ return val;
- if (IS_GEN2(dev))
- return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+ if (i915_panel_invert_brightness > 0 ||
+ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+ return panel->backlight.max - val;
+ }
- return 0;
+ return val;
}
-/* XXX: query mode clock or hardware clock and program max PWM appropriately
- * when it's 0.
- */
-static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe)
+static u32 bdw_get_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val;
-
- WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
- /* Restore the CTL value if it lost, e.g. GPU reset */
-
- if (HAS_PCH_SPLIT(dev_priv->dev)) {
- val = I915_READ(BLC_PWM_PCH_CTL2);
- if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
- dev_priv->regfile.saveBLC_PWM_CTL2 = val;
- } else if (val == 0) {
- val = dev_priv->regfile.saveBLC_PWM_CTL2;
- I915_WRITE(BLC_PWM_PCH_CTL2, val);
- }
- } else if (IS_VALLEYVIEW(dev)) {
- val = I915_READ(VLV_BLC_PWM_CTL(pipe));
- if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
- dev_priv->regfile.saveBLC_PWM_CTL = val;
- dev_priv->regfile.saveBLC_PWM_CTL2 =
- I915_READ(VLV_BLC_PWM_CTL2(pipe));
- } else if (val == 0) {
- val = dev_priv->regfile.saveBLC_PWM_CTL;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), val);
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe),
- dev_priv->regfile.saveBLC_PWM_CTL2);
- }
+ return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
- if (!val)
- val = 0x0f42ffff;
- } else {
- val = I915_READ(BLC_PWM_CTL);
- if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
- dev_priv->regfile.saveBLC_PWM_CTL = val;
- if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->regfile.saveBLC_PWM_CTL2 =
- I915_READ(BLC_PWM_CTL2);
- } else if (val == 0) {
- val = dev_priv->regfile.saveBLC_PWM_CTL;
- I915_WRITE(BLC_PWM_CTL, val);
- if (INTEL_INFO(dev)->gen >= 4)
- I915_WRITE(BLC_PWM_CTL2,
- dev_priv->regfile.saveBLC_PWM_CTL2);
- }
- }
+static u32 pch_get_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- return val;
+ return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 intel_panel_get_max_backlight(struct drm_device *dev,
- enum pipe pipe)
+static u32 i9xx_get_backlight(struct intel_connector *connector)
{
- u32 max;
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 val;
- max = i915_read_blc_pwm_ctl(dev, pipe);
+ val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (INTEL_INFO(dev)->gen < 4)
+ val >>= 1;
- if (HAS_PCH_SPLIT(dev)) {
- max >>= 16;
- } else {
- if (INTEL_INFO(dev)->gen < 4)
- max >>= 17;
- else
- max >>= 16;
+ if (panel->backlight.combination_mode) {
+ u8 lbpc;
- if (is_backlight_combination_mode(dev))
- max *= 0xff;
+ pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+ val *= lbpc;
}
- DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
-
- return max;
+ return val;
}
-static int i915_panel_invert_brightness;
-MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
- "(-1 force normal, 0 machine defaults, 1 force inversion), please "
- "report PCI device ID, subsystem vendor and subsystem device ID "
- "to dri-devel@lists.freedesktop.org, if your machine needs it. "
- "It will then be included in an upcoming module version.");
-module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
-static u32 intel_panel_compute_brightness(struct drm_device *dev,
- enum pipe pipe, u32 val)
+static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (i915_panel_invert_brightness < 0)
- return val;
+ return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
- if (i915_panel_invert_brightness > 0 ||
- dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- u32 max = intel_panel_get_max_backlight(dev, pipe);
- if (max)
- return max - val;
- }
+static u32 vlv_get_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
- return val;
+ return _vlv_get_backlight(dev, pipe);
}
-static u32 intel_panel_get_backlight(struct drm_device *dev,
- enum pipe pipe)
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
unsigned long flags;
- int reg;
-
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
- if (IS_BROADWELL(dev)) {
- val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
- } else if (HAS_PCH_SPLIT(dev)) {
- val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- } else {
- if (IS_VALLEYVIEW(dev))
- reg = VLV_BLC_PWM_CTL(pipe);
- else
- reg = BLC_PWM_CTL;
-
- val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (INTEL_INFO(dev)->gen < 4)
- val >>= 1;
- if (is_backlight_combination_mode(dev)) {
- u8 lbpc;
-
- pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
- val *= lbpc;
- }
- }
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
- val = intel_panel_compute_brightness(dev, pipe, val);
+ val = dev_priv->display.get_backlight(connector);
+ val = intel_panel_compute_brightness(connector, val);
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
-static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
+static void bdw_set_backlight(struct intel_connector *connector, u32 level)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
}
-static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+static void pch_set_backlight(struct intel_connector *connector, u32 level)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+ u32 tmp;
+
+ tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
}
-static void intel_panel_actually_set_backlight(struct drm_device *dev,
- enum pipe pipe, u32 level)
+static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 tmp;
- int reg;
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp, mask;
- DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
- level = intel_panel_compute_brightness(dev, pipe, level);
+ WARN_ON(panel->backlight.max == 0);
- if (IS_BROADWELL(dev))
- return intel_bdw_panel_set_backlight(dev, level);
- else if (HAS_PCH_SPLIT(dev))
- return intel_pch_panel_set_backlight(dev, level);
-
- if (is_backlight_combination_mode(dev)) {
- u32 max = intel_panel_get_max_backlight(dev, pipe);
+ if (panel->backlight.combination_mode) {
u8 lbpc;
- /* we're screwed, but keep behaviour backwards compatible */
- if (!max)
- max = 1;
-
- lbpc = level * 0xfe / max + 1;
+ lbpc = level * 0xfe / panel->backlight.max + 1;
level /= lbpc;
pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
}
- if (IS_VALLEYVIEW(dev))
- reg = VLV_BLC_PWM_CTL(pipe);
- else
- reg = BLC_PWM_CTL;
-
- tmp = I915_READ(reg);
- if (INTEL_INFO(dev)->gen < 4)
+ if (IS_GEN4(dev)) {
+ mask = BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
level <<= 1;
- tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(reg, tmp | level);
+ mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
+ }
+
+ tmp = I915_READ(BLC_PWM_CTL) & ~mask;
+ I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
+
+static void vlv_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 tmp;
+
+ tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
+}
+
+static void
+intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+ level = intel_panel_compute_brightness(connector, level);
+ dev_priv->display.set_backlight(connector, level);
}
/* set backlight brightness to level in range [0..max] */
@@ -541,45 +497,89 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 freq;
unsigned long flags;
- if (pipe == INVALID_PIPE)
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
- freq = intel_panel_get_max_backlight(dev, pipe);
- if (!freq) {
- /* we are screwed, bail out */
- goto out;
- }
+ WARN_ON(panel->backlight.max == 0);
- /* scale to hardware, but be careful to not overflow */
+ /* scale to hardware max, but be careful to not overflow */
+ freq = panel->backlight.max;
if (freq < max)
level = level * freq / max;
else
level = freq / max * level;
- dev_priv->backlight.level = level;
- if (dev_priv->backlight.device)
- dev_priv->backlight.device->props.brightness = level;
+ panel->backlight.level = level;
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness = level;
- if (dev_priv->backlight.enabled)
- intel_panel_actually_set_backlight(dev, pipe, level);
-out:
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(connector, level);
+
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+static void pch_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ intel_panel_actually_set_backlight(connector, 0);
+
+ tmp = I915_READ(BLC_PWM_CPU_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_disable_backlight(struct intel_connector *connector)
+{
+ intel_panel_actually_set_backlight(connector, 0);
+}
+
+static void i965_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ intel_panel_actually_set_backlight(connector, 0);
+
+ tmp = I915_READ(BLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+}
+
+static void vlv_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 tmp;
+
+ intel_panel_actually_set_backlight(connector, 0);
+
+ tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
}
void intel_panel_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
unsigned long flags;
- if (pipe == INVALID_PIPE)
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
/*
@@ -593,150 +593,215 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
return;
}
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
- dev_priv->backlight.enabled = false;
- intel_panel_actually_set_backlight(dev, pipe, 0);
+ panel->backlight.enabled = false;
+ dev_priv->display.disable_backlight(connector);
- if (INTEL_INFO(dev)->gen >= 4) {
- uint32_t reg, tmp;
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_CPU_CTL2;
- else if (IS_VALLEYVIEW(dev))
- reg = VLV_BLC_PWM_CTL2(pipe);
- else
- reg = BLC_PWM_CTL2;
+static void bdw_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 pch_ctl1, pch_ctl2;
+
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ DRM_DEBUG_KMS("pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
- I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
+ pch_ctl2 = panel->backlight.max << 16;
+ I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
- if (HAS_PCH_SPLIT(dev)) {
- tmp = I915_READ(BLC_PWM_PCH_CTL1);
- tmp &= ~BLM_PCH_PWM_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
- }
- }
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ /* BDW always uses the pch pwm controls. */
+ pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ POSTING_READ(BLC_PWM_PCH_CTL1);
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
}
-void intel_panel_enable_backlight(struct intel_connector *connector)
+static void pch_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
- unsigned long flags;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- if (pipe == INVALID_PIPE)
- return;
+ cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ if (cpu_ctl2 & BLM_PWM_ENABLE) {
+ WARN(1, "cpu backlight already enabled\n");
+ cpu_ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+ }
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ DRM_DEBUG_KMS("pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
+
+ if (cpu_transcoder == TRANSCODER_EDP)
+ cpu_ctl2 = BLM_TRANSCODER_EDP;
+ else
+ cpu_ctl2 = BLM_PIPE(cpu_transcoder);
+ I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+ POSTING_READ(BLC_PWM_CPU_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+ pch_ctl2 = panel->backlight.max << 16;
+ I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ POSTING_READ(BLC_PWM_PCH_CTL1);
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+}
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+static void i9xx_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, freq;
- if (dev_priv->backlight.level == 0) {
- dev_priv->backlight.level = intel_panel_get_max_backlight(dev,
- pipe);
- if (dev_priv->backlight.device)
- dev_priv->backlight.device->props.brightness =
- dev_priv->backlight.level;
+ ctl = I915_READ(BLC_PWM_CTL);
+ if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
+ WARN(1, "backlight already enabled\n");
+ I915_WRITE(BLC_PWM_CTL, 0);
}
- if (INTEL_INFO(dev)->gen >= 4) {
- uint32_t reg, tmp;
+ freq = panel->backlight.max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_CPU_CTL2;
- else if (IS_VALLEYVIEW(dev))
- reg = VLV_BLC_PWM_CTL2(pipe);
- else
- reg = BLC_PWM_CTL2;
+ ctl = freq << 17;
+ if (panel->backlight.combination_mode)
+ ctl |= BLM_LEGACY_MODE;
+ if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
+ ctl |= BLM_POLARITY_PNV;
- tmp = I915_READ(reg);
+ I915_WRITE(BLC_PWM_CTL, ctl);
+ POSTING_READ(BLC_PWM_CTL);
- /* Note that this can also get called through dpms changes. And
- * we don't track the backlight dpms state, hence check whether
- * we have to do anything first. */
- if (tmp & BLM_PWM_ENABLE)
- goto set_level;
+ /* XXX: combine this into above write? */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
- if (INTEL_INFO(dev)->num_pipes == 3)
- tmp &= ~BLM_PIPE_SELECT_IVB;
- else
- tmp &= ~BLM_PIPE_SELECT;
+static void i965_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 ctl, ctl2, freq;
- if (cpu_transcoder == TRANSCODER_EDP)
- tmp |= BLM_TRANSCODER_EDP;
- else
- tmp |= BLM_PIPE(cpu_transcoder);
- tmp &= ~BLM_PWM_ENABLE;
-
- I915_WRITE(reg, tmp);
- POSTING_READ(reg);
- I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
-
- if (IS_BROADWELL(dev)) {
- /*
- * Broadwell requires PCH override to drive the PCH
- * backlight pin. The above will configure the CPU
- * backlight pin, which we don't plan to use.
- */
- tmp = I915_READ(BLC_PWM_PCH_CTL1);
- tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
- } else if (HAS_PCH_SPLIT(dev) &&
- !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
- tmp = I915_READ(BLC_PWM_PCH_CTL1);
- tmp |= BLM_PCH_PWM_ENABLE;
- tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
- }
+ ctl2 = I915_READ(BLC_PWM_CTL2);
+ if (ctl2 & BLM_PWM_ENABLE) {
+ WARN(1, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_CTL2, ctl2);
}
-set_level:
- /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
- * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
- * registers are set.
- */
- dev_priv->backlight.enabled = true;
- intel_panel_actually_set_backlight(dev, pipe,
- dev_priv->backlight.level);
+ freq = panel->backlight.max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
+
+ ctl = freq << 16;
+ I915_WRITE(BLC_PWM_CTL, ctl);
+
+ /* XXX: combine this into above write? */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+ ctl2 = BLM_PIPE(pipe);
+ if (panel->backlight.combination_mode)
+ ctl2 |= BLM_COMBINATION_MODE;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ I915_WRITE(BLC_PWM_CTL2, ctl2);
+ POSTING_READ(BLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
}
-/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
-static void intel_panel_init_backlight_regs(struct drm_device *dev)
+static void vlv_enable_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 ctl, ctl2;
- if (IS_VALLEYVIEW(dev)) {
- enum pipe pipe;
+ ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ if (ctl2 & BLM_PWM_ENABLE) {
+ WARN(1, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+ }
- for_each_pipe(pipe) {
- u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+ ctl = panel->backlight.max << 16;
+ I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
- /* Skip if the modulation freq is already set */
- if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
- continue;
+ /* XXX: combine this into above write? */
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
- cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
- cur_val);
- }
- }
+ ctl2 = 0;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+ POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
}
-static void intel_panel_init_backlight(struct drm_device *dev)
+void intel_panel_enable_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ unsigned long flags;
+
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
+ return;
+
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+ WARN_ON(panel->backlight.max == 0);
- intel_panel_init_backlight_regs(dev);
+ if (panel->backlight.level == 0) {
+ panel->backlight.level = panel->backlight.max;
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness =
+ panel->backlight.level;
+ }
+
+ dev_priv->display.enable_backlight(connector);
+ panel->backlight.enabled = true;
- dev_priv->backlight.level = intel_panel_get_backlight(dev, 0);
- dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
enum drm_connector_status
@@ -762,7 +827,7 @@ intel_panel_detect(struct drm_device *dev)
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-static int intel_panel_update_status(struct backlight_device *bd)
+static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
@@ -776,85 +841,362 @@ static int intel_panel_update_status(struct backlight_device *bd)
return 0;
}
-static int intel_panel_get_brightness(struct backlight_device *bd)
+static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
- enum pipe pipe;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ intel_runtime_pm_get(dev_priv);
mutex_lock(&dev->mode_config.mutex);
- pipe = intel_get_pipe_from_connector(connector);
+ ret = intel_panel_get_backlight(connector);
mutex_unlock(&dev->mode_config.mutex);
- if (pipe == INVALID_PIPE)
- return 0;
+ intel_runtime_pm_put(dev_priv);
- return intel_panel_get_backlight(connector->base.dev, pipe);
+ return ret;
}
-static const struct backlight_ops intel_panel_bl_ops = {
- .update_status = intel_panel_update_status,
- .get_brightness = intel_panel_get_brightness,
+static const struct backlight_ops intel_backlight_device_ops = {
+ .update_status = intel_backlight_device_update_status,
+ .get_brightness = intel_backlight_device_get_brightness,
};
-int intel_panel_setup_backlight(struct drm_connector *connector)
+static int intel_backlight_device_register(struct intel_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
struct backlight_properties props;
- unsigned long flags;
- intel_panel_init_backlight(dev);
-
- if (WARN_ON(dev_priv->backlight.device))
+ if (WARN_ON(panel->backlight.device))
return -ENODEV;
+ BUG_ON(panel->backlight.max == 0);
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
- props.brightness = dev_priv->backlight.level;
+ props.brightness = panel->backlight.level;
+ props.max_brightness = panel->backlight.max;
- spin_lock_irqsave(&dev_priv->backlight.lock, flags);
- props.max_brightness = intel_panel_get_max_backlight(dev, 0);
- spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
- if (props.max_brightness == 0) {
- DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
- return -ENODEV;
- }
- dev_priv->backlight.device =
+ /*
+ * Note: using the same name independent of the connector prevents
+ * registration of multiple backlight devices in the driver.
+ */
+ panel->backlight.device =
backlight_device_register("intel_backlight",
- connector->kdev,
- to_intel_connector(connector),
- &intel_panel_bl_ops, &props);
+ connector->base.kdev,
+ connector,
+ &intel_backlight_device_ops, &props);
- if (IS_ERR(dev_priv->backlight.device)) {
+ if (IS_ERR(panel->backlight.device)) {
DRM_ERROR("Failed to register backlight: %ld\n",
- PTR_ERR(dev_priv->backlight.device));
- dev_priv->backlight.device = NULL;
+ PTR_ERR(panel->backlight.device));
+ panel->backlight.device = NULL;
return -ENODEV;
}
return 0;
}
-void intel_panel_destroy_backlight(struct drm_device *dev)
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->backlight.device) {
+ backlight_device_unregister(panel->backlight.device);
+ panel->backlight.device = NULL;
+ }
+}
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+ return 0;
+}
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+/*
+ * Note: The setup hooks can't assume pipe is set!
+ *
+ * XXX: Query mode clock or hardware clock and program PWM modulation frequency
+ * appropriately when it's 0. Use VBT and/or sane defaults.
+ */
+static int bdw_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 pch_ctl1, pch_ctl2, val;
+
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ panel->backlight.max = pch_ctl2 >> 16;
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = bdw_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
+ panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int pch_setup_backlight(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->backlight.device) {
- backlight_device_unregister(dev_priv->backlight.device);
- dev_priv->backlight.device = NULL;
+ struct intel_panel *panel = &connector->panel;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+
+ pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ panel->backlight.max = pch_ctl2 >> 16;
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = pch_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+ (pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int i9xx_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, val;
+
+ ctl = I915_READ(BLC_PWM_CTL);
+
+ if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
+ panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
+
+ if (IS_PINEVIEW(dev))
+ panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
+
+ panel->backlight.max = ctl >> 17;
+ if (panel->backlight.combination_mode)
+ panel->backlight.max *= 0xff;
+
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = i9xx_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int i965_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, ctl2, val;
+
+ ctl2 = I915_READ(BLC_PWM_CTL2);
+ panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = I915_READ(BLC_PWM_CTL);
+ panel->backlight.max = ctl >> 16;
+ if (panel->backlight.combination_mode)
+ panel->backlight.max *= 0xff;
+
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = i9xx_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+ panel->backlight.level != 0;
+
+ return 0;
+}
+
+static int vlv_setup_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe;
+ u32 ctl, ctl2, val;
+
+ for_each_pipe(pipe) {
+ u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+
+ /* Skip if the modulation freq is already set */
+ if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
+ continue;
+
+ cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+ cur_val);
}
+
+ ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+ panel->backlight.max = ctl >> 16;
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = _vlv_get_backlight(dev, PIPE_A);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+ panel->backlight.level != 0;
+
+ return 0;
}
-#else
+
int intel_panel_setup_backlight(struct drm_connector *connector)
{
- intel_panel_init_backlight(connector->dev);
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_panel *panel = &intel_connector->panel;
+ unsigned long flags;
+ int ret;
+
+ /* set level and max in panel struct */
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ ret = dev_priv->display.setup_backlight(intel_connector);
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+
+ if (ret) {
+ DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
+ drm_get_connector_name(connector));
+ return ret;
+ }
+
+ intel_backlight_device_register(intel_connector);
+
+ panel->backlight.present = true;
+
+ DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
+ "sysfs interface %sregistered\n",
+ panel->backlight.enabled ? "enabled" : "disabled",
+ panel->backlight.level, panel->backlight.max,
+ panel->backlight.device ? "" : "not ");
+
return 0;
}
-void intel_panel_destroy_backlight(struct drm_device *dev)
+void intel_panel_destroy_backlight(struct drm_connector *connector)
{
- return;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_panel *panel = &intel_connector->panel;
+
+ panel->backlight.present = false;
+ intel_backlight_device_unregister(intel_connector);
+}
+
+/**
+ * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @fixed_mode : panel native mode
+ * @connector: LVDS/eDP connector
+ *
+ * Return downclock_avail
+ * Find the reduced downclock for LVDS/eDP in EDID.
+ */
+struct drm_display_mode *
+intel_find_panel_downclock(struct drm_device *dev,
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *scan, *tmp_mode;
+ int temp_downclock;
+
+ temp_downclock = fixed_mode->clock;
+ tmp_mode = NULL;
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+ * mode while they have the different refresh rate, it means
+ * that the reduced downclock is found. In such
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+ if (scan->hdisplay == fixed_mode->hdisplay &&
+ scan->hsync_start == fixed_mode->hsync_start &&
+ scan->hsync_end == fixed_mode->hsync_end &&
+ scan->htotal == fixed_mode->htotal &&
+ scan->vdisplay == fixed_mode->vdisplay &&
+ scan->vsync_start == fixed_mode->vsync_start &&
+ scan->vsync_end == fixed_mode->vsync_end &&
+ scan->vtotal == fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ temp_downclock = scan->clock;
+ tmp_mode = scan;
+ }
+ }
+ }
+
+ if (temp_downclock < fixed_mode->clock)
+ return drm_mode_duplicate(dev, tmp_mode);
+ else
+ return NULL;
+}
+
+/* Set up chip specific backlight functions */
+void intel_panel_init_backlight_funcs(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_BROADWELL(dev)) {
+ dev_priv->display.setup_backlight = bdw_setup_backlight;
+ dev_priv->display.enable_backlight = bdw_enable_backlight;
+ dev_priv->display.disable_backlight = pch_disable_backlight;
+ dev_priv->display.set_backlight = bdw_set_backlight;
+ dev_priv->display.get_backlight = bdw_get_backlight;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.setup_backlight = pch_setup_backlight;
+ dev_priv->display.enable_backlight = pch_enable_backlight;
+ dev_priv->display.disable_backlight = pch_disable_backlight;
+ dev_priv->display.set_backlight = pch_set_backlight;
+ dev_priv->display.get_backlight = pch_get_backlight;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.setup_backlight = vlv_setup_backlight;
+ dev_priv->display.enable_backlight = vlv_enable_backlight;
+ dev_priv->display.disable_backlight = vlv_disable_backlight;
+ dev_priv->display.set_backlight = vlv_set_backlight;
+ dev_priv->display.get_backlight = vlv_get_backlight;
+ } else if (IS_GEN4(dev)) {
+ dev_priv->display.setup_backlight = i965_setup_backlight;
+ dev_priv->display.enable_backlight = i965_enable_backlight;
+ dev_priv->display.disable_backlight = i965_disable_backlight;
+ dev_priv->display.set_backlight = i9xx_set_backlight;
+ dev_priv->display.get_backlight = i9xx_get_backlight;
+ } else {
+ dev_priv->display.setup_backlight = i9xx_setup_backlight;
+ dev_priv->display.enable_backlight = i9xx_enable_backlight;
+ dev_priv->display.disable_backlight = i9xx_disable_backlight;
+ dev_priv->display.set_backlight = i9xx_set_backlight;
+ dev_priv->display.get_backlight = i9xx_get_backlight;
+ }
}
-#endif
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode)
@@ -871,4 +1213,8 @@ void intel_panel_fini(struct intel_panel *panel)
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+
+ if (panel->downclock_mode)
+ drm_mode_destroy(intel_connector->base.dev,
+ panel->downclock_mode);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 26c29c173221..e1fc35a72656 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,7 +30,9 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
+#include <linux/vgaarb.h>
#include <drm/i915_powerwell.h>
+#include <linux/pm_runtime.h>
/**
* RC6 is a special power stage which allows the GPU to enter an very
@@ -86,7 +88,7 @@ static void i8xx_disable_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("disabled FBC\n");
}
-static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void i8xx_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -96,32 +98,40 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int plane, i;
- u32 fbc_ctl, fbc_ctl2;
+ u32 fbc_ctl;
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
- /* FBC_CTL wants 64B units */
- cfb_pitch = (cfb_pitch / 64) - 1;
+ /* FBC_CTL wants 32B or 64B units */
+ if (IS_GEN2(dev))
+ cfb_pitch = (cfb_pitch / 32) - 1;
+ else
+ cfb_pitch = (cfb_pitch / 64) - 1;
plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
- /* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= plane;
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
+ if (IS_GEN4(dev)) {
+ u32 fbc_ctl2;
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= plane;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+ }
/* enable it... */
- fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
@@ -136,7 +146,7 @@ static bool i8xx_fbc_enabled(struct drm_device *dev)
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
-static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void g4x_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -145,16 +155,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
u32 dpfc_ctl;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
- I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
/* enable it... */
@@ -191,7 +197,11 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
u32 blt_ecoskpd;
/* Make sure blitter notifies FBC of writes */
- gen6_gt_force_wake_get(dev_priv);
+
+ /* Blitter is part of Media powerwell on VLV. No impact of
+ * his param in other platforms for now */
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
+
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
@@ -202,10 +212,11 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
- gen6_gt_force_wake_put(dev_priv);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
}
-static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void ironlake_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -214,7 +225,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
u32 dpfc_ctl;
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
@@ -222,12 +232,11 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
/* Set persistent mode for front-buffer rendering, ala X. */
dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
- dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+ dpfc_ctl |= DPFC_CTL_FENCE_EN;
+ if (IS_GEN5(dev))
+ dpfc_ctl |= obj->fence_reg;
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
- I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
@@ -265,7 +274,7 @@ static bool ironlake_fbc_enabled(struct drm_device *dev)
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void gen7_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -295,7 +304,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
sandybridge_blit_fbc_update(dev);
- DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
bool intel_fbc_enabled(struct drm_device *dev)
@@ -322,8 +331,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
* the prior work.
*/
if (work->crtc->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc,
- work->interval);
+ dev_priv->display.enable_fbc(work->crtc);
dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
dev_priv->fbc.fb_id = work->crtc->fb->base.id;
@@ -360,7 +368,7 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
dev_priv->fbc.fbc_work = NULL;
}
-static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_enable_fbc(struct drm_crtc *crtc)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
@@ -374,13 +382,12 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
- dev_priv->display.enable_fbc(crtc, interval);
+ dev_priv->display.enable_fbc(crtc);
return;
}
work->crtc = crtc;
work->fb = crtc->fb;
- work->interval = interval;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
dev_priv->fbc.fbc_work = work;
@@ -454,7 +461,7 @@ void intel_update_fbc(struct drm_device *dev)
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
- if (!I915_HAS_FBC(dev)) {
+ if (!HAS_FBC(dev)) {
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
return;
}
@@ -530,10 +537,10 @@ void intel_update_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
- if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
- intel_crtc->plane != 0) {
+ if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
+ intel_crtc->plane != PLANE_A) {
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
- DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ DRM_DEBUG_KMS("plane not A, disabling compression\n");
goto out_disable;
}
@@ -595,7 +602,7 @@ void intel_update_fbc(struct drm_device *dev)
intel_disable_fbc(dev);
}
- intel_enable_fbc(crtc, 500);
+ intel_enable_fbc(crtc);
dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
@@ -817,7 +824,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
@@ -850,21 +857,6 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 1; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO,
@@ -943,14 +935,14 @@ static const struct intel_watermark_params i915_wm_info = {
2,
I915_FIFO_LINE_SIZE
};
-static const struct intel_watermark_params i855_wm_info = {
+static const struct intel_watermark_params i830_wm_info = {
I855GM_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
-static const struct intel_watermark_params i830_wm_info = {
+static const struct intel_watermark_params i845_wm_info = {
I830_FIFO_SIZE,
I915_MAX_WM,
1,
@@ -958,65 +950,6 @@ static const struct intel_watermark_params i830_wm_info = {
I830_FIFO_LINE_SIZE
};
-static const struct intel_watermark_params ironlake_display_wm_info = {
- ILK_DISPLAY_FIFO,
- ILK_DISPLAY_MAXWM,
- ILK_DISPLAY_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_wm_info = {
- ILK_CURSOR_FIFO,
- ILK_CURSOR_MAXWM,
- ILK_CURSOR_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_display_srwm_info = {
- ILK_DISPLAY_SR_FIFO,
- ILK_DISPLAY_MAX_SRWM,
- ILK_DISPLAY_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_srwm_info = {
- ILK_CURSOR_SR_FIFO,
- ILK_CURSOR_MAX_SRWM,
- ILK_CURSOR_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params sandybridge_display_wm_info = {
- SNB_DISPLAY_FIFO,
- SNB_DISPLAY_MAXWM,
- SNB_DISPLAY_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_wm_info = {
- SNB_CURSOR_FIFO,
- SNB_CURSOR_MAXWM,
- SNB_CURSOR_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_display_srwm_info = {
- SNB_DISPLAY_SR_FIFO,
- SNB_DISPLAY_MAX_SRWM,
- SNB_DISPLAY_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
- SNB_CURSOR_SR_FIFO,
- SNB_CURSOR_MAX_SRWM,
- SNB_CURSOR_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-
-
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
@@ -1567,7 +1500,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
else if (!IS_GEN2(dev))
wm_info = &i915_wm_info;
else
- wm_info = &i855_wm_info;
+ wm_info = &i830_wm_info;
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
@@ -1615,7 +1548,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+ I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
@@ -1667,14 +1600,14 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
DRM_DEBUG_KMS("memory self refresh enabled\n");
} else
DRM_DEBUG_KMS("memory self refresh disabled\n");
}
}
-static void i830_update_wm(struct drm_crtc *unused_crtc)
+static void i845_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1689,7 +1622,7 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
- &i830_wm_info,
+ &i845_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
4, latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1700,423 +1633,6 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(FW_BLC, fwater_lo);
}
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool ironlake_check_srwm(struct drm_device *dev, int level,
- int fbc_wm, int display_wm, int cursor_wm,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
- " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
-
- if (fbc_wm > SNB_FBC_MAX_SRWM) {
- DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
- fbc_wm, SNB_FBC_MAX_SRWM, level);
-
- /* fbc has it's own way to disable FBC WM */
- I915_WRITE(DISP_ARB_CTL,
- I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
- return false;
- } else if (INTEL_INFO(dev)->gen >= 6) {
- /* enable FBC WM (except on ILK, where it must remain off) */
- I915_WRITE(DISP_ARB_CTL,
- I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
- }
-
- if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
- display_wm, SNB_DISPLAY_MAX_SRWM, level);
- return false;
- }
-
- if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
- cursor_wm, SNB_CURSOR_MAX_SRWM, level);
- return false;
- }
-
- if (!(fbc_wm || display_wm || cursor_wm)) {
- DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
- return false;
- }
-
- return true;
-}
-
-/*
- * Compute watermark values of WM[1-3],
- */
-static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
- int latency_ns,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor,
- int *fbc_wm, int *display_wm, int *cursor_wm)
-{
- struct drm_crtc *crtc;
- const struct drm_display_mode *adjusted_mode;
- unsigned long line_time_us;
- int hdisplay, htotal, pixel_size, clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *fbc_wm = *display_wm = *cursor_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
- clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- line_time_us = (htotal * 1000) / clock;
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *display_wm = entries + display->guard_size;
-
- /*
- * Spec says:
- * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
- */
- *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
-
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
-
- return ironlake_check_srwm(dev, level,
- *fbc_wm, *display_wm, *cursor_wm,
- display, cursor);
-}
-
-static void ironlake_update_wm(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, PIPE_A,
- &ironlake_display_wm_info,
- dev_priv->wm.pri_latency[0] * 100,
- &ironlake_cursor_wm_info,
- dev_priv->wm.cur_latency[0] * 100,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEA_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_A;
- }
-
- if (g4x_compute_wm0(dev, PIPE_B,
- &ironlake_display_wm_info,
- dev_priv->wm.pri_latency[0] * 100,
- &ironlake_cursor_wm_info,
- dev_priv->wm.cur_latency[0] * 100,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEB_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_B;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled))
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- dev_priv->wm.pri_latency[1] * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- dev_priv->wm.pri_latency[2] * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /*
- * WM3 is unsupported on ILK, probably because we don't have latency
- * data for that power state
- */
-}
-
-static void sandybridge_update_wm(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
- u32 val;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, PIPE_A,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEA_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_A;
- }
-
- if (g4x_compute_wm0(dev, PIPE_B,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEB_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_B;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- *
- * SNB support 3 levels of watermark.
- *
- * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
- * and disabled in the descending order
- *
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled) ||
- dev_priv->sprite_scaling_enabled)
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- dev_priv->wm.pri_latency[1] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- dev_priv->wm.pri_latency[2] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM3 */
- if (!ironlake_compute_srwm(dev, 3, enabled,
- dev_priv->wm.pri_latency[3] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM3_LP_ILK,
- WM3_LP_EN |
- (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-}
-
-static void ivybridge_update_wm(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
- u32 val;
- int fbc_wm, plane_wm, cursor_wm;
- int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, PIPE_A,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEA_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_A;
- }
-
- if (g4x_compute_wm0(dev, PIPE_B,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEB_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_B;
- }
-
- if (g4x_compute_wm0(dev, PIPE_C,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEC_IVB);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEC_IVB, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1 << PIPE_C;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- *
- * SNB support 3 levels of watermark.
- *
- * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
- * and disabled in the descending order
- *
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled) ||
- dev_priv->sprite_scaling_enabled)
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- dev_priv->wm.pri_latency[1] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- dev_priv->wm.pri_latency[2] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM3, note we have to correct the cursor latency */
- if (!ironlake_compute_srwm(dev, 3, enabled,
- dev_priv->wm.pri_latency[3] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
- !ironlake_compute_srwm(dev, 3, enabled,
- dev_priv->wm.cur_latency[3] * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM3_LP_ILK,
- WM3_LP_EN |
- (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-}
-
static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct drm_crtc *crtc)
{
@@ -2185,7 +1701,7 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
-struct hsw_pipe_wm_parameters {
+struct ilk_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
uint32_t pixel_rate;
@@ -2194,7 +1710,7 @@ struct hsw_pipe_wm_parameters {
struct intel_plane_wm_parameters cur;
};
-struct hsw_wm_maximums {
+struct ilk_wm_maximums {
uint16_t pri;
uint16_t spr;
uint16_t cur;
@@ -2212,7 +1728,7 @@ struct intel_wm_config {
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t mem_value,
bool is_lp)
{
@@ -2241,7 +1757,7 @@ static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t mem_value)
{
uint32_t method1, method2;
@@ -2264,7 +1780,7 @@ static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t mem_value)
{
if (!params->active || !params->cur.enabled)
@@ -2278,7 +1794,7 @@ static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
}
/* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t pri_val)
{
if (!params->active || !params->pri.enabled)
@@ -2383,7 +1899,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
- struct hsw_wm_maximums *max)
+ struct ilk_wm_maximums *max)
{
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
@@ -2392,7 +1908,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
}
static bool ilk_validate_wm_level(int level,
- const struct hsw_wm_maximums *max,
+ const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
{
bool ret;
@@ -2434,7 +1950,7 @@ static bool ilk_validate_wm_level(int level,
static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
int level,
- const struct hsw_pipe_wm_parameters *p,
+ const struct ilk_pipe_wm_parameters *p,
struct intel_wm_level *result)
{
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2482,7 +1998,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
@@ -2530,7 +2046,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
static int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 4;
else if (INTEL_INFO(dev)->gen >= 6)
return 3;
@@ -2582,8 +2098,8 @@ static void intel_setup_wm_latency(struct drm_device *dev)
intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
}
-static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
- struct hsw_pipe_wm_parameters *p,
+static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
+ struct ilk_pipe_wm_parameters *p,
struct intel_wm_config *config)
{
struct drm_device *dev = crtc->dev;
@@ -2593,7 +2109,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
p->active = intel_crtc_active(crtc);
if (p->active) {
- p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
+ p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
@@ -2620,7 +2136,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
/* Compute new watermarks for the pipe */
static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
- const struct hsw_pipe_wm_parameters *params,
+ const struct ilk_pipe_wm_parameters *params,
struct intel_pipe_wm *pipe_wm)
{
struct drm_device *dev = crtc->dev;
@@ -2632,16 +2148,25 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
.sprites_enabled = params->spr.enabled,
.sprites_scaled = params->spr.scaled,
};
- struct hsw_wm_maximums max;
+ struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+ /* ILK/SNB: LP2+ watermarks only w/o sprites */
+ if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
+ max_level = 1;
+
+ /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
+ if (params->spr.scaled)
+ max_level = 0;
+
for (level = 0; level <= max_level; level++)
ilk_compute_wm_level(dev_priv, level, params,
&pipe_wm->wm[level]);
- pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
/* At least LP0 must be valid */
return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
@@ -2676,12 +2201,19 @@ static void ilk_merge_wm_level(struct drm_device *dev,
* Merge all low power watermarks for all active pipes.
*/
static void ilk_wm_merge(struct drm_device *dev,
- const struct hsw_wm_maximums *max,
+ const struct intel_wm_config *config,
+ const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
int level, max_level = ilk_wm_max_level(dev);
- merged->fbc_wm_enabled = true;
+ /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
+ if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+ config->num_pipes_active > 1)
+ return;
+
+ /* ILK: FBC WM must be disabled always */
+ merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
/* merge each WM1+ level */
for (level = 1; level <= max_level; level++) {
@@ -2701,6 +2233,20 @@ static void ilk_wm_merge(struct drm_device *dev,
wm->fbc_val = 0;
}
}
+
+ /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
+ /*
+ * FIXME this is racy. FBC might get enabled later.
+ * What we should check here is whether FBC can be
+ * enabled sometime later.
+ */
+ if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
+ for (level = 2; level <= max_level; level++) {
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ wm->enable = false;
+ }
+ }
}
static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
@@ -2709,10 +2255,21 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
}
-static void hsw_compute_wm_results(struct drm_device *dev,
+/* The value we need to program into the WM_LPx latency field */
+static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ return 2 * level;
+ else
+ return dev_priv->wm.pri_latency[level];
+}
+
+static void ilk_compute_wm_results(struct drm_device *dev,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
- struct hsw_wm_values *results)
+ struct ilk_wm_values *results)
{
struct intel_crtc *intel_crtc;
int level, wm_lp;
@@ -2731,7 +2288,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
break;
results->wm_lp[wm_lp - 1] = WM3_LP_EN |
- ((level * 2) << WM1_LP_LATENCY_SHIFT) |
+ (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
(r->pri_val << WM1_LP_SR_SHIFT) |
r->cur_val;
@@ -2742,7 +2299,11 @@ static void hsw_compute_wm_results(struct drm_device *dev,
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT;
- results->wm_lp_spr[wm_lp - 1] = r->spr_val;
+ if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
+ WARN_ON(wm_lp != 1);
+ results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
+ } else
+ results->wm_lp_spr[wm_lp - 1] = r->spr_val;
}
/* LP0 register values */
@@ -2765,7 +2326,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
+static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
@@ -2800,8 +2361,8 @@ static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
#define WM_DIRTY_DDB (1 << 25)
static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
- const struct hsw_wm_values *old,
- const struct hsw_wm_values *new)
+ const struct ilk_wm_values *old,
+ const struct ilk_wm_values *new)
{
unsigned int dirty = 0;
enum pipe pipe;
@@ -2851,27 +2412,53 @@ static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
return dirty;
}
+static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+ unsigned int dirty)
+{
+ struct ilk_wm_values *previous = &dev_priv->wm.hw;
+ bool changed = false;
+
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
+ previous->wm_lp[2] &= ~WM1_LP_SR_EN;
+ I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
+ previous->wm_lp[1] &= ~WM1_LP_SR_EN;
+ I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
+ previous->wm_lp[0] &= ~WM1_LP_SR_EN;
+ I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
+ changed = true;
+ }
+
+ /*
+ * Don't touch WM1S_LP_EN here.
+ * Doing so could cause underruns.
+ */
+
+ return changed;
+}
+
/*
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
-static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
- struct hsw_wm_values *results)
+static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+ struct ilk_wm_values *results)
{
- struct hsw_wm_values *previous = &dev_priv->wm.hw;
+ struct drm_device *dev = dev_priv->dev;
+ struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
uint32_t val;
- dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
+ dirty = ilk_compute_wm_dirty(dev, previous, results);
if (!dirty)
return;
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
- I915_WRITE(WM3_LP_ILK, 0);
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
- I915_WRITE(WM2_LP_ILK, 0);
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
- I915_WRITE(WM1_LP_ILK, 0);
+ _ilk_disable_lp_wm(dev_priv, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
@@ -2888,12 +2475,21 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
if (dirty & WM_DIRTY_DDB) {
- val = I915_READ(WM_MISC);
- if (results->partitioning == INTEL_DDB_PART_1_2)
- val &= ~WM_MISC_DATA_PARTITION_5_6;
- else
- val |= WM_MISC_DATA_PARTITION_5_6;
- I915_WRITE(WM_MISC, val);
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ val = I915_READ(WM_MISC);
+ if (results->partitioning == INTEL_DDB_PART_1_2)
+ val &= ~WM_MISC_DATA_PARTITION_5_6;
+ else
+ val |= WM_MISC_DATA_PARTITION_5_6;
+ I915_WRITE(WM_MISC, val);
+ } else {
+ val = I915_READ(DISP_ARB_CTL2);
+ if (results->partitioning == INTEL_DDB_PART_1_2)
+ val &= ~DISP_DATA_PARTITION_5_6;
+ else
+ val |= DISP_DATA_PARTITION_5_6;
+ I915_WRITE(DISP_ARB_CTL2, val);
+ }
}
if (dirty & WM_DIRTY_FBC) {
@@ -2905,37 +2501,48 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
I915_WRITE(DISP_ARB_CTL, val);
}
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
+ if (dirty & WM_DIRTY_LP(1) &&
+ previous->wm_lp_spr[0] != results->wm_lp_spr[0])
I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
- if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
+ if (INTEL_INFO(dev)->gen >= 7) {
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
+ I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
+ I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+ }
+
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
- if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
- if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
dev_priv->wm.hw = *results;
}
-static void haswell_update_wm(struct drm_crtc *crtc)
+static bool ilk_disable_lp_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+}
+
+static void ilk_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct hsw_wm_maximums max;
- struct hsw_pipe_wm_parameters params = {};
- struct hsw_wm_values results = {};
+ struct ilk_wm_maximums max;
+ struct ilk_pipe_wm_parameters params = {};
+ struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
struct intel_pipe_wm pipe_wm = {};
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct intel_wm_config config = {};
- hsw_compute_wm_parameters(crtc, &params, &config);
+ ilk_compute_wm_parameters(crtc, &params, &config);
intel_compute_pipe_wm(crtc, &params, &pipe_wm);
@@ -2945,15 +2552,15 @@ static void haswell_update_wm(struct drm_crtc *crtc)
intel_crtc->wm.active = pipe_wm;
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev, &max, &lp_wm_1_2);
+ ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
if (INTEL_INFO(dev)->gen >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev, &max, &lp_wm_5_6);
+ ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
- best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+ best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_lp_wm = &lp_wm_1_2;
}
@@ -2961,16 +2568,17 @@ static void haswell_update_wm(struct drm_crtc *crtc)
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+ ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
- hsw_write_wm_values(dev_priv, &results);
+ ilk_write_wm_values(dev_priv, &results);
}
-static void haswell_update_sprite_wm(struct drm_plane *plane,
+static void ilk_update_sprite_wm(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled)
{
+ struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
intel_plane->wm.enabled = enabled;
@@ -2978,176 +2586,24 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
- haswell_update_wm(crtc);
-}
-
-static bool
-sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int display_latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- int clock;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (!intel_crtc_active(crtc)) {
- *sprite_wm = display->guard_size;
- return false;
- }
-
- clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
-
- /* Use the small buffer method to calculate the sprite watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size -
- sprite_width * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
- if (*sprite_wm > (int)display->max_wm)
- *sprite_wm = display->max_wm;
-
- return true;
-}
-
-static bool
-sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- unsigned long line_time_us;
- int clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *sprite_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
- if (!clock) {
- *sprite_wm = 0;
- return false;
- }
-
- line_time_us = (sprite_width * 1000) / clock;
- if (!line_time_us) {
- *sprite_wm = 0;
- return false;
- }
-
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = sprite_width * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
-
- return *sprite_wm > 0x3ff ? false : true;
-}
-
-static void sandybridge_update_sprite_wm(struct drm_plane *plane,
- struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enabled, bool scaled)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = to_intel_plane(plane)->pipe;
- int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
- u32 val;
- int sprite_wm, reg;
- int ret;
-
- if (!enabled)
- return;
-
- switch (pipe) {
- case 0:
- reg = WM0_PIPEA_ILK;
- break;
- case 1:
- reg = WM0_PIPEB_ILK;
- break;
- case 2:
- reg = WM0_PIPEC_IVB;
- break;
- default:
- return; /* bad pipe */
- }
-
- ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
- &sandybridge_display_wm_info,
- latency, &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
- pipe_name(pipe));
- return;
- }
-
- val = I915_READ(reg);
- val &= ~WM0_PIPE_SPRITE_MASK;
- I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
- DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
-
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- dev_priv->wm.spr_latency[1] * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
- pipe_name(pipe));
- return;
- }
- I915_WRITE(WM1S_LP_ILK, sprite_wm);
-
- /* Only IVB has two more LP watermarks for sprite */
- if (!IS_IVYBRIDGE(dev))
- return;
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- dev_priv->wm.spr_latency[2] * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
- pipe_name(pipe));
- return;
- }
- I915_WRITE(WM2S_LP_IVB, sprite_wm);
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ *
+ * WaCxSRDisabledForSpriteScaling:ivb
+ */
+ if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
+ intel_wait_for_vblank(dev, intel_plane->pipe);
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- dev_priv->wm.spr_latency[3] * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
- pipe_name(pipe));
- return;
- }
- I915_WRITE(WM3S_LP_IVB, sprite_wm);
+ ilk_update_wm(crtc);
}
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct hsw_wm_values *hw = &dev_priv->wm.hw;
+ struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_pipe_wm *active = &intel_crtc->wm.active;
enum pipe pipe = intel_crtc->pipe;
@@ -3158,7 +2614,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
};
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
- hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
if (intel_crtc_active(crtc)) {
u32 tmp = hw->wm_pipe[pipe];
@@ -3190,7 +2647,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
void ilk_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct hsw_wm_values *hw = &dev_priv->wm.hw;
+ struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -3204,8 +2661,12 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
- hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
- INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+ else if (IS_IVYBRIDGE(dev))
+ hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
@@ -3430,26 +2891,19 @@ static void ironlake_disable_drps(struct drm_device *dev)
* ourselves, instead of doing a rmw cycle (which might result in us clearing
* all limits and the gpu stuck at whatever frequency it is at atm).
*/
-static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
{
u32 limits;
- limits = 0;
-
- if (*val >= dev_priv->rps.max_delay)
- *val = dev_priv->rps.max_delay;
- limits |= dev_priv->rps.max_delay << 24;
-
/* Only set the down limit when we've reached the lowest level to avoid
* getting more interrupts, otherwise leave this clear. This prevents a
* race in the hw when coming out of rc6: There's a tiny window where
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- if (*val <= dev_priv->rps.min_delay) {
- *val = dev_priv->rps.min_delay;
+ limits = dev_priv->rps.max_delay << 24;
+ if (val <= dev_priv->rps.min_delay)
limits |= dev_priv->rps.min_delay << 16;
- }
return limits;
}
@@ -3549,7 +3003,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 limits = gen6_rps_limits(dev_priv, &val);
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
@@ -3572,7 +3025,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ gen6_rps_limits(dev_priv, val));
POSTING_READ(GEN6_RPNSWREQ);
@@ -3583,9 +3037,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (dev_priv->info->is_valleyview)
+ if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
@@ -3596,9 +3052,11 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
void gen6_rps_boost(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (dev_priv->info->is_valleyview)
+ if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
@@ -3607,48 +3065,18 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-/*
- * Wait until the previous freq change has completed,
- * or the timeout elapsed, and then update our notion
- * of the current GPU frequency.
- */
-static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
-{
- u32 pval;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
- if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
- DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
-
- pval >>= 8;
-
- if (pval != dev_priv->rps.cur_delay)
- DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
- dev_priv->rps.cur_delay,
- vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
-
- dev_priv->rps.cur_delay = pval;
-}
-
void valleyview_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- gen6_rps_limits(dev_priv, &val);
-
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
- vlv_update_rps_cur_delay(dev_priv);
-
DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.cur_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay,
- vlv_gpu_freq(dev_priv->mem_freq, val), val);
+ vlv_gpu_freq(dev_priv, val), val);
if (val == dev_priv->rps.cur_delay)
return;
@@ -3657,7 +3085,7 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
dev_priv->rps.cur_delay = val;
- trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
+ trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
static void gen6_disable_rps_interrupts(struct drm_device *dev)
@@ -3775,7 +3203,7 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 1c & 1d: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3832,7 +3260,7 @@ static void gen8_enable_rps(struct drm_device *dev)
gen6_enable_rps_interrupts(dev);
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
static void gen6_enable_rps(struct drm_device *dev)
@@ -3862,7 +3290,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
@@ -3954,7 +3382,7 @@ static void gen6_enable_rps(struct drm_device *dev)
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
void gen6_update_ring_freq(struct drm_device *dev)
@@ -4065,6 +3493,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
u32 pcbr;
int pctx_size = 24*1024;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
@@ -4114,9 +3544,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- valleyview_setup_pctx(dev);
-
- gen6_gt_force_wake_get(dev_priv);
+ /* If VLV, Forcewake all wells, else re-direct to regular path */
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -4140,7 +3569,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
- I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
@@ -4148,65 +3577,47 @@ static void valleyview_enable_rps(struct drm_device *dev)
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
- rc6_mode = GEN7_RC_CTL_TO_MODE;
+ rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
intel_print_rc6_info(dev, rc6_mode);
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- switch ((val >> 6) & 3) {
- case 0:
- case 1:
- dev_priv->mem_freq = 800;
- break;
- case 2:
- dev_priv->mem_freq = 1066;
- break;
- case 3:
- dev_priv->mem_freq = 1333;
- break;
- }
- DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
dev_priv->rps.cur_delay = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.cur_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay);
dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.hw_max = dev_priv->rps.max_delay;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.max_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
dev_priv->rps.max_delay);
dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.rpe_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.min_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
dev_priv->rps.min_delay);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv->mem_freq,
- dev_priv->rps.rpe_delay),
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
gen6_enable_rps_interrupts(dev);
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
void ironlake_teardown_rc6(struct drm_device *dev)
@@ -4984,6 +4395,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_VALLEYVIEW(dev))
+ valleyview_setup_pctx(dev);
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@@ -5019,6 +4432,20 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
}
}
+static void ilk_init_lp_watermarks(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
+ I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
+ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+
+ /*
+ * Don't touch WM1S_LP_EN here.
+ * Doing so could cause underruns.
+ */
+}
+
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5052,9 +4479,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+
+ ilk_init_lp_watermarks(dev);
/*
* Based on the document from hardware guys the following bits
@@ -5161,9 +4587,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE,
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+ ilk_init_lp_watermarks(dev);
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -5304,28 +4728,40 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
- /* WaSwitchSolVfFArbitrationPriority */
+ /* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
- /* WaPsrDPAMaskVBlankInSRD */
+ /* WaPsrDPAMaskVBlankInSRD:bdw */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
- /* WaPsrDPRSUnmaskVBlankInSRD */
+ /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
for_each_pipe(i) {
I915_WRITE(CHICKEN_PIPESL_1(i),
I915_READ(CHICKEN_PIPESL_1(i) |
DPRS_MASK_VBLANK_SRD));
}
+
+ /* Use Force Non-Coherent whenever executing a 3D context. This is a
+ * workaround for for a possible hang in the unlikely event a TLB
+ * invalidation occurs during a PSD flush.
+ */
+ I915_WRITE(HDC_CHICKEN0,
+ I915_READ(HDC_CHICKEN0) |
+ _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
+
+ /* WaVSRefCountFullforceMissDisable:bdw */
+ /* WaDSRefCountFullforceMissDisable:bdw */
+ I915_WRITE(GEN7_FF_THREAD_MODE,
+ I915_READ(GEN7_FF_THREAD_MODE) &
+ ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
}
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+ ilk_init_lp_watermarks(dev);
/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:hsw workaround.
@@ -5374,9 +4810,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t snpcr;
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+ ilk_init_lp_watermarks(dev);
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
@@ -5463,6 +4897,26 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ switch ((val >> 6) & 3) {
+ case 0:
+ dev_priv->mem_freq = 800;
+ break;
+ case 1:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 2:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 3:
+ dev_priv->mem_freq = 1333;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
@@ -5642,50 +5096,133 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
-static bool is_always_on_power_domain(struct drm_device *dev,
- enum intel_display_power_domain domain)
-{
- unsigned long always_on_domains;
+#define for_each_power_well(i, power_well, domain_mask, power_domains) \
+ for (i = 0; \
+ i < (power_domains)->power_well_count && \
+ ((power_well) = &(power_domains)->power_wells[i]); \
+ i++) \
+ if ((power_well)->domains & (domain_mask))
- BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
-
- if (IS_BROADWELL(dev)) {
- always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
- } else if (IS_HASWELL(dev)) {
- always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
- } else {
- WARN_ON(1);
- return true;
- }
-
- return BIT(domain) & always_on_domains;
-}
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+ for (i = (power_domains)->power_well_count - 1; \
+ i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+ i--) \
+ if ((power_well)->domains & (domain_mask))
/**
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
+static bool hsw_power_well_enabled(struct drm_device *dev,
+ struct i915_power_well *power_well)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(HSW_PWR_WELL_DRIVER) ==
+ (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+}
+
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+ enum intel_display_power_domain domain)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains;
+
+ power_domains = &dev_priv->power_domains;
+
+ return power_domains->domain_use_count[domain];
+}
+
bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ bool is_enabled;
+ int i;
- if (!HAS_POWER_WELL(dev))
- return true;
+ power_domains = &dev_priv->power_domains;
- if (is_always_on_power_domain(dev, domain))
- return true;
+ is_enabled = true;
- return I915_READ(HSW_PWR_WELL_DRIVER) ==
- (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+ mutex_lock(&power_domains->lock);
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ if (power_well->always_on)
+ continue;
+
+ if (!power_well->is_enabled(dev, power_well)) {
+ is_enabled = false;
+ break;
+ }
+ }
+ mutex_unlock(&power_domains->lock);
+
+ return is_enabled;
+}
+
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long irqflags;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+ if (IS_BROADWELL(dev)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
+ dev_priv->de_irq_mask[PIPE_B]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
+ ~dev_priv->de_irq_mask[PIPE_B] |
+ GEN8_PIPE_VBLANK);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
+ dev_priv->de_irq_mask[PIPE_C]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
+ ~dev_priv->de_irq_mask[PIPE_C] |
+ GEN8_PIPE_VBLANK);
+ POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+}
+
+static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum pipe p;
+ unsigned long irqflags;
+
+ /*
+ * After this, the registers on the pipes that are part of the power
+ * well will become zero, so we have to adjust our counters according to
+ * that.
+ *
+ * FIXME: Should we do this in general in drm_vblank_post_modeset?
+ */
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for_each_pipe(p)
+ if (p != PIPE_A)
+ dev->vblank[p].last = 0;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-static void __intel_set_power_well(struct drm_device *dev, bool enable)
+static void hsw_set_power_well(struct drm_device *dev,
+ struct i915_power_well *power_well, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
- unsigned long irqflags;
uint32_t tmp;
WARN_ON(dev_priv->pc8.enabled);
@@ -5706,42 +5243,14 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
DRM_ERROR("Timeout enabling power well\n");
}
- if (IS_BROADWELL(dev)) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
- dev_priv->de_irq_mask[PIPE_B]);
- I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
- ~dev_priv->de_irq_mask[PIPE_B] |
- GEN8_PIPE_VBLANK);
- I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
- dev_priv->de_irq_mask[PIPE_C]);
- I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
- ~dev_priv->de_irq_mask[PIPE_C] |
- GEN8_PIPE_VBLANK);
- POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ hsw_power_well_post_enable(dev_priv);
} else {
if (enable_requested) {
- enum pipe p;
-
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
- /*
- * After this, the registers on the pipes that are part
- * of the power well will become zero, so we have to
- * adjust our counters according to that.
- *
- * FIXME: Should we do this in general in
- * drm_vblank_post_modeset?
- */
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for_each_pipe(p)
- if (p != PIPE_A)
- dev->vblank[p].last = 0;
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ hsw_power_well_post_disable(dev_priv);
}
}
}
@@ -5751,9 +5260,9 @@ static void __intel_power_well_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!power_well->count++) {
+ if (!power_well->count++ && power_well->set) {
hsw_disable_package_c8(dev_priv);
- __intel_set_power_well(dev, true);
+ power_well->set(dev, power_well, true);
}
}
@@ -5763,8 +5272,10 @@ static void __intel_power_well_put(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!power_well->count);
- if (!--power_well->count && i915_disable_power_well) {
- __intel_set_power_well(dev, false);
+
+ if (!--power_well->count && power_well->set &&
+ i915_disable_power_well) {
+ power_well->set(dev, power_well, false);
hsw_enable_package_c8(dev_priv);
}
}
@@ -5774,17 +5285,18 @@ void intel_display_power_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
-
- if (!HAS_POWER_WELL(dev))
- return;
-
- if (is_always_on_power_domain(dev, domain))
- return;
+ struct i915_power_well *power_well;
+ int i;
power_domains = &dev_priv->power_domains;
mutex_lock(&power_domains->lock);
- __intel_power_well_get(dev, &power_domains->power_wells[0]);
+
+ for_each_power_well(i, power_well, BIT(domain), power_domains)
+ __intel_power_well_get(dev, power_well);
+
+ power_domains->domain_use_count[domain]++;
+
mutex_unlock(&power_domains->lock);
}
@@ -5793,17 +5305,19 @@ void intel_display_power_put(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
-
- if (!HAS_POWER_WELL(dev))
- return;
-
- if (is_always_on_power_domain(dev, domain))
- return;
+ struct i915_power_well *power_well;
+ int i;
power_domains = &dev_priv->power_domains;
mutex_lock(&power_domains->lock);
- __intel_power_well_put(dev, &power_domains->power_wells[0]);
+
+ WARN_ON(!power_domains->domain_use_count[domain]);
+ power_domains->domain_use_count[domain]--;
+
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
+ __intel_power_well_put(dev, power_well);
+
mutex_unlock(&power_domains->lock);
}
@@ -5819,10 +5333,7 @@ void i915_request_power_well(void)
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
-
- mutex_lock(&hsw_pwr->lock);
- __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
- mutex_unlock(&hsw_pwr->lock);
+ intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
@@ -5836,24 +5347,71 @@ void i915_release_power_well(void)
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
-
- mutex_lock(&hsw_pwr->lock);
- __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
- mutex_unlock(&hsw_pwr->lock);
+ intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
+static struct i915_power_well i9xx_always_on_power_well[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = POWER_DOMAIN_MASK,
+ },
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+ },
+ {
+ .name = "display",
+ .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
+ .is_enabled = hsw_power_well_enabled,
+ .set = hsw_set_power_well,
+ },
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+ },
+ {
+ .name = "display",
+ .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
+ .is_enabled = hsw_power_well_enabled,
+ .set = hsw_set_power_well,
+ },
+};
+
+#define set_power_wells(power_domains, __power_wells) ({ \
+ (power_domains)->power_wells = (__power_wells); \
+ (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
+})
+
int intel_power_domains_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
mutex_init(&power_domains->lock);
- hsw_pwr = power_domains;
- power_well = &power_domains->power_wells[0];
- power_well->count = 0;
+ /*
+ * The enabling order will be from lower to higher indexed wells,
+ * the disabling order is reversed.
+ */
+ if (IS_HASWELL(dev)) {
+ set_power_wells(power_domains, hsw_power_wells);
+ hsw_pwr = power_domains;
+ } else if (IS_BROADWELL(dev)) {
+ set_power_wells(power_domains, bdw_power_wells);
+ hsw_pwr = power_domains;
+ } else {
+ set_power_wells(power_domains, i9xx_always_on_power_well);
+ }
return 0;
}
@@ -5868,15 +5426,13 @@ static void intel_power_domains_resume(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
-
- if (!HAS_POWER_WELL(dev))
- return;
+ int i;
mutex_lock(&power_domains->lock);
-
- power_well = &power_domains->power_wells[0];
- __intel_set_power_well(dev, power_well->count > 0);
-
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ if (power_well->set)
+ power_well->set(dev, power_well, power_well->count > 0);
+ }
mutex_unlock(&power_domains->lock);
}
@@ -5890,13 +5446,13 @@ void intel_power_domains_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!HAS_POWER_WELL(dev))
- return;
-
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev, true);
intel_power_domains_resume(dev);
+ if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
+ return;
+
/* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now. */
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
@@ -5914,31 +5470,86 @@ void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
hsw_enable_package_c8(dev_priv);
}
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_get_sync(device);
+ WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+}
+
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_put_autosuspend(device);
+}
+
+void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ dev_priv->pm.suspended = false;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_set_active(device);
+
+ pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_use_autosuspend(device);
+}
+
+void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ /* Make sure we're not suspended first. */
+ pm_runtime_get_sync(device);
+ pm_runtime_disable(device);
+}
+
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_FBC(dev)) {
+ if (INTEL_INFO(dev)->gen >= 7) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- dev_priv->display.enable_fbc =
- gen7_enable_fbc;
- else
- dev_priv->display.enable_fbc =
- ironlake_enable_fbc;
+ dev_priv->display.enable_fbc = gen7_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (INTEL_INFO(dev)->gen >= 5) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_CRESTLINE(dev)) {
+ } else {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
+
+ /* This value was pulled out of someone's hat */
+ I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
- /* 855GM needs testing */
}
/* For cxsr */
@@ -5951,58 +5562,27 @@ void intel_init_pm(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
intel_setup_wm_latency(dev);
- if (IS_GEN5(dev)) {
- if (dev_priv->wm.pri_latency[1] &&
- dev_priv->wm.spr_latency[1] &&
- dev_priv->wm.cur_latency[1])
- dev_priv->display.update_wm = ironlake_update_wm;
- else {
- DRM_DEBUG_KMS("Failed to get proper latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
+ if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
+ dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
+ (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
+ dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
+ dev_priv->display.update_wm = ilk_update_wm;
+ dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ }
+
+ if (IS_GEN5(dev))
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
- } else if (IS_GEN6(dev)) {
- if (dev_priv->wm.pri_latency[0] &&
- dev_priv->wm.spr_latency[0] &&
- dev_priv->wm.cur_latency[0]) {
- dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
+ else if (IS_GEN6(dev))
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- } else if (IS_IVYBRIDGE(dev)) {
- if (dev_priv->wm.pri_latency[0] &&
- dev_priv->wm.spr_latency[0] &&
- dev_priv->wm.cur_latency[0]) {
- dev_priv->display.update_wm = ivybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
+ else if (IS_IVYBRIDGE(dev))
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
- } else if (IS_HASWELL(dev)) {
- if (dev_priv->wm.pri_latency[0] &&
- dev_priv->wm.spr_latency[0] &&
- dev_priv->wm.cur_latency[0]) {
- dev_priv->display.update_wm = haswell_update_wm;
- dev_priv->display.update_sprite_wm =
- haswell_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
+ else if (IS_HASWELL(dev))
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
- } else if (INTEL_INFO(dev)->gen == 8) {
+ else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = gen8_init_clock_gating;
- } else
- dev_priv->display.update_wm = NULL;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.init_clock_gating =
@@ -6036,21 +5616,21 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
- } else if (IS_I865G(dev)) {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
- } else if (IS_I85X(dev)) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i85x_get_fifo_size;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- } else {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i830_init_clock_gating;
- if (IS_845G(dev))
+ } else if (IS_GEN2(dev)) {
+ if (INTEL_INFO(dev)->num_pipes == 1) {
+ dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
- else
+ } else {
+ dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ }
+
+ if (IS_I85X(dev) || IS_I865G(dev))
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ else
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ } else {
+ DRM_ERROR("unexpected fall-through in intel_init_pm\n");
}
}
@@ -6101,59 +5681,48 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
-int vlv_gpu_freq(int ddr_freq, int val)
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int mult, base;
+ int div;
- switch (ddr_freq) {
+ /* 4 x czclk */
+ switch (dev_priv->mem_freq) {
case 800:
- mult = 20;
- base = 120;
+ div = 10;
break;
case 1066:
- mult = 22;
- base = 133;
+ div = 12;
break;
case 1333:
- mult = 21;
- base = 125;
+ div = 16;
break;
default:
return -1;
}
- return ((val - 0xbd) * mult) + base;
+ return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
}
-int vlv_freq_opcode(int ddr_freq, int val)
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mult, base;
+ int mul;
- switch (ddr_freq) {
+ /* 4 x czclk */
+ switch (dev_priv->mem_freq) {
case 800:
- mult = 20;
- base = 120;
+ mul = 10;
break;
case 1066:
- mult = 22;
- base = 133;
+ mul = 12;
break;
case 1333:
- mult = 21;
- base = 125;
+ mul = 16;
break;
default:
return -1;
}
- val /= mult;
- val -= base / mult;
- val += 0xbd;
-
- if (val > 0xea)
- val = 0xea;
-
- return val;
+ return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
}
void intel_pm_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c2f09d456300..31b36c5ac894 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -285,14 +285,16 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
if (!ring->fbc_dirty)
return 0;
- ret = intel_ring_begin(ring, 4);
+ ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
- intel_ring_emit(ring, MI_NOOP);
/* WaFbcNukeOn3DBlt:ivb/hsw */
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, MSG_FBC_REND_STATE);
intel_ring_emit(ring, value);
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
+ intel_ring_emit(ring, MSG_FBC_REND_STATE);
+ intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
intel_ring_advance(ring);
ring->fbc_dirty = false;
@@ -354,7 +356,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
- if (flush_domains)
+ if (!invalidate_domains && flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
return 0;
@@ -436,7 +438,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
int ret = 0;
u32 head;
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
@@ -509,7 +511,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
out:
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
@@ -661,19 +663,22 @@ gen6_add_request(struct intel_ring_buffer *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *useless;
- int i, ret;
+ int i, ret, num_dwords = 4;
- ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
- MBOX_UPDATE_DWORDS) +
- 4);
+ if (i915_semaphore_is_enabled(dev))
+ num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
+#undef MBOX_UPDATE_DWORDS
+
+ ret = intel_ring_begin(ring, num_dwords);
if (ret)
return ret;
-#undef MBOX_UPDATE_DWORDS
- for_each_ring(useless, dev_priv, i) {
- u32 mbox_reg = ring->signal_mbox[i];
- if (mbox_reg != GEN6_NOSYNC)
- update_mboxes(ring, mbox_reg);
+ if (i915_semaphore_is_enabled(dev)) {
+ for_each_ring(useless, dev_priv, i) {
+ u32 mbox_reg = ring->signal_mbox[i];
+ if (mbox_reg != GEN6_NOSYNC)
+ update_mboxes(ring, mbox_reg);
+ }
}
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
@@ -1030,11 +1035,6 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return false;
- /* It looks like we need to prevent the gt from suspending while waiting
- * for an notifiy irq, otherwise irqs seem to get lost on at least the
- * blt/bsd rings on ivb. */
- gen6_gt_force_wake_get(dev_priv);
-
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
if (HAS_L3_DPF(dev) && ring->id == RCS)
@@ -1066,8 +1066,6 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- gen6_gt_force_wake_put(dev_priv);
}
static bool
@@ -1611,8 +1609,8 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
}
-static int __intel_ring_begin(struct intel_ring_buffer *ring,
- int bytes)
+static int __intel_ring_prepare(struct intel_ring_buffer *ring,
+ int bytes)
{
int ret;
@@ -1628,7 +1626,6 @@ static int __intel_ring_begin(struct intel_ring_buffer *ring,
return ret;
}
- ring->space -= bytes;
return 0;
}
@@ -1643,12 +1640,38 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret)
return ret;
+ ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
+ if (ret)
+ return ret;
+
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring);
if (ret)
return ret;
- return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
+ ring->space -= num_dwords * sizeof(uint32_t);
+ return 0;
+}
+
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+{
+ int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
+ int ret;
+
+ if (num_dwords == 0)
+ return 0;
+
+ ret = intel_ring_begin(ring, num_dwords);
+ if (ret)
+ return ret;
+
+ while (num_dwords--)
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+
+ return 0;
}
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
@@ -1838,7 +1861,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
}
intel_ring_advance(ring);
- if (IS_GEN7(dev) && flush)
+ if (IS_GEN7(dev) && !invalidate && flush)
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 71a73f4fe252..0b243ce33714 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a583e8f718a7..95bdfb3c431c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -413,23 +413,34 @@ static const struct _sdvo_cmd_name {
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
- int i;
+ int i, pos = 0;
+#define BUF_LEN 256
+ char buffer[BUF_LEN];
+
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
- DRM_DEBUG_KMS("%s: W: %02X ",
- SDVO_NAME(intel_sdvo), cmd);
- for (i = 0; i < args_len; i++)
- DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
- for (; i < 8; i++)
- DRM_LOG_KMS(" ");
+ for (i = 0; i < args_len; i++) {
+ BUF_PRINT("%02X ", ((u8 *)args)[i]);
+ }
+ for (; i < 8; i++) {
+ BUF_PRINT(" ");
+ }
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
- DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+ BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
break;
}
}
- if (i == ARRAY_SIZE(sdvo_cmd_names))
- DRM_LOG_KMS("(%02X)", cmd);
- DRM_LOG_KMS("\n");
+ if (i == ARRAY_SIZE(sdvo_cmd_names)) {
+ BUF_PRINT("(%02X)", cmd);
+ }
+ BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+ DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
}
static const char *cmd_status_names[] = {
@@ -512,9 +523,10 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
{
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
- int i;
+ int i, pos = 0;
+#define BUF_LEN 256
+ char buffer[BUF_LEN];
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
/*
* The documentation states that all commands will be
@@ -551,10 +563,13 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
goto log_fail;
}
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+ BUF_PRINT("(%s)", cmd_status_names[status]);
else
- DRM_LOG_KMS("(??? %d)", status);
+ BUF_PRINT("(??? %d)", status);
if (status != SDVO_CMD_STATUS_SUCCESS)
goto log_fail;
@@ -565,13 +580,17 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]))
goto log_fail;
- DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+ BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
- DRM_LOG_KMS("\n");
+ BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+ DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
return true;
log_fail:
- DRM_LOG_KMS("... failed\n");
+ DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo));
return false;
}
@@ -933,7 +952,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
unsigned if_index, uint8_t tx_rate,
- uint8_t *data, unsigned length)
+ const uint8_t *data, unsigned length)
{
uint8_t set_buf_index[2] = { if_index, 0 };
uint8_t hbuf_size, tmp[8];
@@ -1517,8 +1536,9 @@ static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
intel_modeset_check_state(connector->dev);
}
-static int intel_sdvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 770bdd6ecd9f..2e2d4eb4a00d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -59,7 +59,7 @@ struct intel_sdvo_caps {
unsigned int stall_support:1;
unsigned int pad:1;
u16 output_flags;
-} __attribute__((packed));
+} __packed;
/* Note: SDVO detailed timing flags match EDID misc flags. */
#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
@@ -94,12 +94,12 @@ struct intel_sdvo_dtd {
u8 v_sync_off_high;
u8 reserved;
} part2;
-} __attribute__((packed));
+} __packed;
struct intel_sdvo_pixel_clock_range {
u16 min; /**< pixel clock, in 10kHz units */
u16 max; /**< pixel clock, in 10kHz units */
-} __attribute__((packed));
+} __packed;
struct intel_sdvo_preferred_input_timing_args {
u16 clock;
@@ -108,7 +108,7 @@ struct intel_sdvo_preferred_input_timing_args {
u8 interlace:1;
u8 scaled:1;
u8 pad:6;
-} __attribute__((packed));
+} __packed;
/* I2C registers for SDVO */
#define SDVO_I2C_ARG_0 0x07
@@ -162,7 +162,7 @@ struct intel_sdvo_get_trained_inputs_response {
unsigned int input0_trained:1;
unsigned int input1_trained:1;
unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
/** Returns a struct intel_sdvo_output_flags of active outputs. */
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
@@ -219,7 +219,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
unsigned int ambient_light_interrupt:1;
unsigned int hdmi_audio_encrypt_change:1;
unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
/**
* Selects which input is affected by future input commands.
@@ -232,7 +232,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
struct intel_sdvo_set_target_input_args {
unsigned int target_1:1;
unsigned int pad:7;
-} __attribute__((packed));
+} __packed;
/**
* Takes a struct intel_sdvo_output_flags of which outputs are targeted by
@@ -370,7 +370,7 @@ struct intel_sdvo_tv_format {
unsigned int hdtv_std_eia_7702a_480i_60:1;
unsigned int hdtv_std_eia_7702a_480p_60:1;
unsigned int pad:3;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_GET_TV_FORMAT 0x28
@@ -401,7 +401,7 @@ struct intel_sdvo_sdtv_resolution_request {
unsigned int secam_l:1;
unsigned int secam_60:1;
unsigned int pad:5;
-} __attribute__((packed));
+} __packed;
struct intel_sdvo_sdtv_resolution_reply {
unsigned int res_320x200:1;
@@ -426,7 +426,7 @@ struct intel_sdvo_sdtv_resolution_reply {
unsigned int res_1024x768:1;
unsigned int res_1280x1024:1;
unsigned int pad:5;
-} __attribute__((packed));
+} __packed;
/* Get supported resolution with squire pixel aspect ratio that can be
scaled for the requested HDTV format */
@@ -463,7 +463,7 @@ struct intel_sdvo_hdtv_resolution_request {
unsigned int hdtv_std_eia_7702a_480i_60:1;
unsigned int hdtv_std_eia_7702a_480p_60:1;
unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
struct intel_sdvo_hdtv_resolution_reply {
unsigned int res_640x480:1;
@@ -517,7 +517,7 @@ struct intel_sdvo_hdtv_resolution_reply {
unsigned int res_1280x768:1;
unsigned int pad5:7;
-} __attribute__((packed));
+} __packed;
/* Get supported power state returns info for encoder and monitor, rely on
last SetTargetInput and SetTargetOutput calls */
@@ -557,13 +557,13 @@ struct sdvo_panel_power_sequencing {
unsigned int t4_high:2;
unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
struct sdvo_max_backlight_reply {
u8 max_value;
u8 default_value;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
@@ -573,14 +573,14 @@ struct sdvo_get_ambient_light_reply {
u16 trip_low;
u16 trip_high;
u16 value;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
struct sdvo_set_ambient_light_reply {
u16 trip_low;
u16 trip_high;
unsigned int enable:1;
unsigned int pad:7;
-} __attribute__((packed));
+} __packed;
/* Set display power state */
#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
@@ -608,7 +608,7 @@ struct intel_sdvo_enhancements_reply {
unsigned int dither:1;
unsigned int tv_chroma_filter:1;
unsigned int tv_luma_filter:1;
-} __attribute__((packed));
+} __packed;
/* Picture enhancement limits below are dependent on the current TV format,
* and thus need to be queried and set after it.
@@ -630,7 +630,7 @@ struct intel_sdvo_enhancements_reply {
struct intel_sdvo_enhancement_limits_reply {
u16 max_value;
u16 default_value;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
@@ -671,7 +671,7 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
struct intel_sdvo_enhancements_arg {
u16 value;
-} __attribute__((packed));
+} __packed;
#define SDVO_CMD_GET_DOT_CRAWL 0x70
#define SDVO_CMD_SET_DOT_CRAWL 0x71
@@ -727,4 +727,4 @@ struct intel_sdvo_enhancements_arg {
struct intel_sdvo_encode {
u8 dvi_rev;
u8 hdmi_rev;
-} __attribute__ ((packed));
+} __packed;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 9944d8135e87..0954f132726e 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -90,6 +90,22 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
mutex_unlock(&dev_priv->dpio_lock);
}
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+ PUNIT_OPCODE_REG_READ, reg, &val);
+
+ return val;
+}
+
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+ PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
{
u32 val = 0;
@@ -160,27 +176,18 @@ void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
PUNIT_OPCODE_REG_WRITE, reg, &val);
}
-static u32 vlv_get_phy_port(enum pipe pipe)
-{
- u32 port = IOSF_PORT_DPIO;
-
- WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
-
- return port;
-}
-
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
DPIO_OPCODE_REG_READ, reg, &val);
return val;
}
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
{
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
DPIO_OPCODE_REG_WRITE, reg, &val);
}
@@ -242,3 +249,17 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
return;
}
}
+
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val = 0;
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
+ DPIO_OPCODE_REG_READ, reg, &val);
+ return val;
+}
+
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
+ DPIO_OPCODE_REG_WRITE, reg, &val);
+}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index b9fabf826f7d..716a3c9c0751 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -104,6 +104,12 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
break;
}
+ /*
+ * Enable gamma to match primary/cursor plane behaviour.
+ * FIXME should be user controllable via propertiesa.
+ */
+ sprctl |= SP_GAMMA_ENABLE;
+
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SP_TILED;
@@ -135,8 +141,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
- I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
- sprsurf_offset);
+ I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
+ sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
@@ -152,7 +158,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
~SP_ENABLE);
/* Activate double buffered register update */
- I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
+ I915_WRITE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
@@ -224,7 +230,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
sprctl = I915_READ(SPRCTL(pipe));
@@ -257,6 +262,12 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
BUG();
}
+ /*
+ * Enable gamma to match primary/cursor plane behaviour.
+ * FIXME should be user controllable via propertiesa.
+ */
+ sprctl |= SPRITE_GAMMA_ENABLE;
+
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED;
@@ -279,21 +290,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
crtc_w--;
crtc_h--;
- /*
- * IVB workaround: must disable low power watermarks for at least
- * one frame before enabling scaling. LP watermarks can be re-enabled
- * when scaling is disabled.
- */
- if (crtc_w != src_w || crtc_h != src_h) {
- dev_priv->sprite_scaling_enabled |= 1 << pipe;
-
- if (!scaling_was_enabled) {
- intel_update_watermarks(crtc);
- intel_wait_for_vblank(dev, pipe);
- }
+ if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- } else
- dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -317,13 +315,9 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_MODIFY_DISPBASE(SPRSURF(pipe),
- i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
+ I915_WRITE(SPRSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
-
- /* potentially re-enable LP watermarks */
- if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
- intel_update_watermarks(crtc);
}
static void
@@ -333,23 +327,22 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
- bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
- I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
+ I915_WRITE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
- dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
+ /*
+ * Avoid underruns when disabling the sprite.
+ * FIXME remove once watermark updates are done properly.
+ */
+ intel_wait_for_vblank(dev, pipe);
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
-
- /* potentially re-enable LP watermarks */
- if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
- intel_update_watermarks(crtc);
}
static int
@@ -453,6 +446,12 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
BUG();
}
+ /*
+ * Enable gamma to match primary/cursor plane behaviour.
+ * FIXME should be user controllable via propertiesa.
+ */
+ dvscntr |= DVS_GAMMA_ENABLE;
+
if (obj->tiling_mode != I915_TILING_NONE)
dvscntr |= DVS_TILED;
@@ -470,7 +469,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
crtc_h--;
dvsscale = 0;
- if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
+ if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -490,8 +489,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_MODIFY_DISPBASE(DVSSURF(pipe),
- i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
+ I915_WRITE(DVSSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
@@ -507,9 +506,15 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
- I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
+ I915_WRITE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
+ /*
+ * Avoid underruns when disabling the sprite.
+ * FIXME remove once watermark updates are done properly.
+ */
+ intel_wait_for_vblank(dev, pipe);
+
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
}
@@ -643,6 +648,15 @@ format_is_yuv(uint32_t format)
}
}
+static bool colorkey_enabled(struct intel_plane *intel_plane)
+{
+ struct drm_intel_sprite_colorkey key;
+
+ intel_plane->get_colorkey(&intel_plane->base, &key);
+
+ return key.flags != I915_SET_COLORKEY_NONE;
+}
+
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -828,7 +842,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
- disable_primary = drm_rect_equals(&dst, &clip);
+ disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane);
WARN_ON(disable_primary && !visible && intel_crtc->active);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 25cbe073c388..87df68f5f504 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -64,7 +64,8 @@ static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
__raw_posting_read(dev_priv, ECOBUS);
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
@@ -89,7 +90,8 @@ static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
__raw_posting_read(dev_priv, ECOBUS);
}
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
u32 forcewake_ack;
@@ -121,12 +123,12 @@ static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
u32 gtfifodbg;
gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
- if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
- "MMIO read or write has been dropped %x\n", gtfifodbg))
- __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+ if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
+ __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
__raw_i915_write32(dev_priv, FORCEWAKE, 0);
/* something from same cacheline, but !FORCEWAKE */
@@ -134,7 +136,8 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
gen6_gt_check_fifodbg(dev_priv);
}
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
@@ -147,12 +150,19 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
+ /* On VLV, FIFO will be shared by both SW and HW.
+ * So, we need to read the FREE_ENTRIES everytime */
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ dev_priv->uncore.fifo_count =
+ __raw_i915_read32(dev_priv, GTFIFOCTL) &
+ GT_FIFO_FREE_ENTRIES_MASK;
+
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
- u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
- fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
@@ -171,38 +181,112 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
}
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_VLV) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_VLV) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Render to ack.\n");
+ }
- if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_VLV) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_VLV) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for media to ack.\n");
+ }
/* WaRsForcewakeWaitTC0:vlv */
__gen6_gt_wait_for_thread_c0(dev_priv);
+
}
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
+ int fw_engine)
{
- __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
+
+}
+
+void vlv_force_wake_get(struct drm_i915_private *dev_priv,
+ int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (FORCEWAKE_RENDER & fw_engine) {
+ if (dev_priv->uncore.fw_rendercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_RENDER);
+ }
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ if (dev_priv->uncore.fw_mediacount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_MEDIA);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+void vlv_force_wake_put(struct drm_i915_private *dev_priv,
+ int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (FORCEWAKE_RENDER & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_rendercount == 0);
+ if (--dev_priv->uncore.fw_rendercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_RENDER);
+ }
+
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_mediacount == 0);
+ if (--dev_priv->uncore.fw_mediacount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_MEDIA);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void gen6_force_wake_work(struct work_struct *work)
@@ -213,7 +297,7 @@ static void gen6_force_wake_work(struct work_struct *work)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -248,6 +332,11 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
+ /* clear out old GT FIFO errors */
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ __raw_i915_write32(dev_priv, GTFIFODBG,
+ __raw_i915_read32(dev_priv, GTFIFODBG));
+
intel_uncore_forcewake_reset(dev);
}
@@ -256,8 +345,6 @@ void intel_uncore_sanitize(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_val;
- intel_uncore_forcewake_reset(dev);
-
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
@@ -281,29 +368,40 @@ void intel_uncore_sanitize(struct drm_device *dev)
* be called at the beginning of the sequence followed by a call to
* gen6_gt_force_wake_put() at the end of the sequence.
*/
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
+ intel_runtime_pm_get(dev_priv);
+
+ /* Redirect to VLV specific routine */
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ return vlv_force_wake_get(dev_priv, fw_engine);
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/*
* see gen6_gt_force_wake_get()
*/
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
if (!dev_priv->uncore.funcs.force_wake_put)
return;
+ /* Redirect to VLV specific routine */
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ return vlv_force_wake_put(dev_priv, fw_engine);
+
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (--dev_priv->uncore.forcewake_count == 0) {
dev_priv->uncore.forcewake_count++;
@@ -312,6 +410,8 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
1);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ intel_runtime_pm_put(dev_priv);
}
/* We give fast paths for the really cool registers */
@@ -346,6 +446,13 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
}
}
+static void
+assert_device_not_suspended(struct drm_i915_private *dev_priv)
+{
+ WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+ "Device suspended\n");
+}
+
#define REG_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
@@ -379,16 +486,51 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ FORCEWAKE_ALL); \
val = __raw_i915_read##x(dev_priv, reg); \
if (dev_priv->uncore.forcewake_count == 0) \
- dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ FORCEWAKE_ALL); \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
REG_READ_FOOTER; \
}
+#define __vlv_read(x) \
+static u##x \
+vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ unsigned fwengine = 0; \
+ unsigned *fwcount; \
+ REG_READ_HEADER(x); \
+ if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
+ fwengine = FORCEWAKE_RENDER; \
+ fwcount = &dev_priv->uncore.fw_rendercount; \
+ } \
+ else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
+ fwengine = FORCEWAKE_MEDIA; \
+ fwcount = &dev_priv->uncore.fw_mediacount; \
+ } \
+ if (fwengine != 0) { \
+ if ((*fwcount)++ == 0) \
+ (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
+ fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (--(*fwcount) == 0) \
+ (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
+ fwengine); \
+ } else { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } \
+ REG_READ_FOOTER; \
+}
+
+
+__vlv_read(8)
+__vlv_read(16)
+__vlv_read(32)
+__vlv_read(64)
__gen6_read(8)
__gen6_read(16)
__gen6_read(32)
@@ -402,6 +544,7 @@ __gen4_read(16)
__gen4_read(32)
__gen4_read(64)
+#undef __vlv_read
#undef __gen6_read
#undef __gen5_read
#undef __gen4_read
@@ -413,12 +556,15 @@ __gen4_read(64)
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+#define REG_WRITE_FOOTER \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+
#define __gen4_write(x) \
static void \
gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
REG_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_FOOTER; \
}
#define __gen5_write(x) \
@@ -427,7 +573,7 @@ gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
REG_WRITE_HEADER; \
ilk_dummy_write(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_FOOTER; \
}
#define __gen6_write(x) \
@@ -438,11 +584,12 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
+ assert_device_not_suspended(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_FOOTER; \
}
#define __hsw_write(x) \
@@ -453,13 +600,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
+ assert_device_not_suspended(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_FOOTER; \
}
static const u32 gen8_shadowed_regs[] = {
@@ -486,16 +634,18 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
- bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
+ bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
REG_WRITE_HEADER; \
if (__needs_put) { \
- dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ FORCEWAKE_ALL); \
} \
__raw_i915_write##x(dev_priv, reg, val); \
if (__needs_put) { \
- dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ FORCEWAKE_ALL); \
} \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ REG_WRITE_FOOTER; \
}
__gen8_write(8)
@@ -524,6 +674,7 @@ __gen4_write(64)
#undef __gen6_write
#undef __gen5_write
#undef __gen4_write
+#undef REG_WRITE_FOOTER
#undef REG_WRITE_HEADER
void intel_uncore_init(struct drm_device *dev)
@@ -534,8 +685,8 @@ void intel_uncore_init(struct drm_device *dev)
gen6_force_wake_work);
if (IS_VALLEYVIEW(dev)) {
- dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
- dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+ dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
@@ -552,9 +703,9 @@ void intel_uncore_init(struct drm_device *dev)
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
+ __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
+ __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
if (ecobus & FORCEWAKE_MT_ENABLE) {
@@ -601,10 +752,18 @@ void intel_uncore_init(struct drm_device *dev)
dev_priv->uncore.funcs.mmio_writel = gen6_write32;
dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
}
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->uncore.funcs.mmio_readb = vlv_read8;
+ dev_priv->uncore.funcs.mmio_readw = vlv_read16;
+ dev_priv->uncore.funcs.mmio_readl = vlv_read32;
+ dev_priv->uncore.funcs.mmio_readq = vlv_read64;
+ } else {
+ dev_priv->uncore.funcs.mmio_readb = gen6_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen6_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen6_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ }
break;
case 5:
dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
@@ -646,7 +805,7 @@ static const struct register_whitelist {
uint32_t size;
uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
} whitelist[] = {
- { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 },
};
int i915_reg_read_ioctl(struct drm_device *dev,
@@ -687,6 +846,43 @@ int i915_reg_read_ioctl(struct drm_device *dev,
return 0;
}
+int i915_get_reset_stats_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reset_stats *args = data;
+ struct i915_ctx_hang_stats *hs;
+ int ret;
+
+ if (args->flags || args->pad)
+ return -EINVAL;
+
+ if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
+ if (IS_ERR(hs)) {
+ mutex_unlock(&dev->struct_mutex);
+ return PTR_ERR(hs);
+ }
+
+ if (capable(CAP_SYS_ADMIN))
+ args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ else
+ args->reset_count = 0;
+
+ args->batch_active = hs->batch_active;
+ args->batch_pending = hs->batch_pending;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
@@ -770,12 +966,12 @@ static int gen6_do_reset(struct drm_device *dev)
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->uncore.forcewake_count)
- dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
else
- dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
/* Restore fifo count */
- dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return ret;
@@ -793,15 +989,6 @@ int intel_gpu_reset(struct drm_device *dev)
}
}
-void intel_uncore_clear_errors(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* XXX needs spinlock around caller's grouping */
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-}
-
void intel_uncore_check_errors(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 087db33f6cff..c3bf059ba720 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -1075,10 +1075,10 @@ static int mga_dma_get_buffers(struct drm_device *dev,
buf->file_priv = file_priv;
- if (DRM_COPY_TO_USER(&d->request_indices[i],
+ if (copy_to_user(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
return -EFAULT;
- if (DRM_COPY_TO_USER(&d->request_sizes[i],
+ if (copy_to_user(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
return -EFAULT;
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index ca4bc54ea214..fe453213600a 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -186,14 +186,14 @@ extern void mga_disable_vblank(struct drm_device *dev, int crtc);
extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t mga_driver_irq_handler(int irq, void *arg);
extern void mga_driver_irq_preinstall(struct drm_device *dev);
extern int mga_driver_irq_postinstall(struct drm_device *dev);
extern void mga_driver_irq_uninstall(struct drm_device *dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
+#define mga_flush_write_combine() wmb()
#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 709e90db8c40..86b4bb804852 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -34,6 +34,7 @@
#include <drm/drmP.h>
#include <drm/mga_drm.h>
+#include "mga_drv.h"
typedef struct drm32_mga_init {
int func;
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 2b0ceb8dc11b..1b071b8ff9dc 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -47,7 +47,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
}
-irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t mga_driver_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
MGA_WRITE(MGA_PRIMEND, prim_end);
atomic_inc(&dev_priv->last_fence_retired);
- DRM_WAKEUP(&dev_priv->fence_queue);
+ wake_up(&dev_priv->fence_queue);
handled = 1;
}
@@ -128,7 +128,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
* by about a day rather than she wants to wait for years
* using fences.
*/
- DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
(((cur_fence = atomic_read(&dev_priv->last_fence_retired))
- *sequence) <= (1 << 23)));
@@ -151,7 +151,7 @@ int mga_driver_irq_postinstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
+ init_waitqueue_head(&dev_priv->fence_queue);
/* Turn on soft trap interrupt. Vertical blank interrupts are enabled
* in mga_enable_vblank.
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 37cc2fb4eadd..314685b7f41f 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1029,7 +1029,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
return -EINVAL;
}
- if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+ if (copy_to_user(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 801731aeab61..9f9780b7ddf0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -22,8 +22,10 @@ static void mga_hide_cursor(struct mga_device *mdev)
{
WREG8(MGA_CURPOSXL, 0);
WREG8(MGA_CURPOSXH, 0);
- mgag200_bo_unpin(mdev->cursor.pixels_1);
- mgag200_bo_unpin(mdev->cursor.pixels_2);
+ if (mdev->cursor.pixels_1->pin_count)
+ mgag200_bo_unpin(mdev->cursor.pixels_1);
+ if (mdev->cursor.pixels_2->pin_count)
+ mgag200_bo_unpin(mdev->cursor.pixels_2);
}
int mga_crtc_cursor_set(struct drm_crtc *crtc,
@@ -32,7 +34,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width,
uint32_t height)
{
- struct drm_device *dev = (struct drm_device *)file_priv->minor->dev;
+ struct drm_device *dev = crtc->dev;
struct mga_device *mdev = (struct mga_device *)dev->dev_private;
struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1;
struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2;
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 964f58cee5ea..13b7dd83faa9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!in_interrupt())
+ if (drm_can_sleep())
ret = mgag200_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
@@ -282,6 +282,11 @@ int mgag200_fbdev_init(struct mga_device *mdev)
{
struct mga_fbdev *mfbdev;
int ret;
+ int bpp_sel = 32;
+
+ /* prefer 16bpp on low end gpus with limited VRAM */
+ if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+ bpp_sel = 16;
mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
if (!mfbdev)
@@ -301,7 +306,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(mdev->dev);
- drm_fb_helper_initial_config(&mfbdev->helper, 32);
+ drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index b1120cb1db6d..26868e5c55b0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -217,7 +217,10 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
dev->mode_config.funcs = (void *)&mga_mode_funcs;
- dev->mode_config.preferred_depth = 24;
+ if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+ dev->mode_config.preferred_depth = 16;
+ else
+ dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
r = mgag200_modeset_init(mdev);
@@ -310,7 +313,7 @@ int mgag200_dumb_create(struct drm_file *file,
return 0;
}
-void mgag200_bo_unref(struct mgag200_bo **bo)
+static void mgag200_bo_unref(struct mgag200_bo **bo)
{
struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ee6ed633b7b1..968374776db9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -691,7 +691,7 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
CRTCEXT0 has to be programmed last to trigger an update and make the
new addr variable take effect.
*/
-void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
{
struct mga_device *mdev = crtc->dev->dev_private;
u32 addr;
@@ -1398,7 +1398,7 @@ static void mga_encoder_commit(struct drm_encoder *encoder)
{
}
-void mga_encoder_destroy(struct drm_encoder *encoder)
+static void mga_encoder_destroy(struct drm_encoder *encoder)
{
struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
drm_encoder_cleanup(encoder);
@@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (32700 * 1024))) {
return MODE_BANDWIDTH;
- } else if (mode->type == G200_EH &&
+ } else if (mdev->type == G200_EH &&
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (37500 * 1024))) {
return MODE_BANDWIDTH;
- } else if (mode->type == G200_ER &&
+ } else if (mdev->type == G200_ER &&
(mga_vga_calculate_mode_bandwidth(mode,
bpp) > (55000 * 1024))) {
return MODE_BANDWIDTH;
@@ -1558,7 +1558,7 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *mga_connector_best_encoder(struct drm_connector
+static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
*connector)
{
int enc_id = connector->encoder_ids[0];
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 07b192fe15c6..adb5166a5dfd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -80,7 +80,7 @@ static int mgag200_ttm_global_init(struct mga_device *ast)
return 0;
}
-void
+static void
mgag200_ttm_global_release(struct mga_device *ast)
{
if (ast->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@ static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
+static bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &mgag200_bo_ttm_destroy)
return true;
@@ -208,7 +208,7 @@ static struct ttm_backend_func mgag200_tt_backend_func = {
};
-struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index f39ab7554fc9..c69d1e07a3a6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,8 +2,8 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on ARCH_MSM
- depends on ARCH_MSM8960
+ depends on MSM_IOMMU
+ depends on (ARCH_MSM && ARCH_MSM8960) || (ARM && COMPILE_TEST)
select DRM_KMS_HELPER
select SHMEM
select TMPFS
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index e5fa12b0d21e..4f977a593bea 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -12,18 +12,27 @@ msm-y := \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8x60.o \
- mdp4/mdp4_crtc.o \
- mdp4/mdp4_dtv_encoder.o \
- mdp4/mdp4_format.o \
- mdp4/mdp4_irq.o \
- mdp4/mdp4_kms.o \
- mdp4/mdp4_plane.o \
+ hdmi/hdmi_phy_8x74.o \
+ mdp/mdp_format.o \
+ mdp/mdp_kms.o \
+ mdp/mdp4/mdp4_crtc.o \
+ mdp/mdp4/mdp4_dtv_encoder.o \
+ mdp/mdp4/mdp4_irq.o \
+ mdp/mdp4/mdp4_kms.o \
+ mdp/mdp4/mdp4_plane.o \
+ mdp/mdp5/mdp5_crtc.o \
+ mdp/mdp5/mdp5_encoder.o \
+ mdp/mdp5/mdp5_irq.o \
+ mdp/mdp5/mdp5_kms.o \
+ mdp/mdp5/mdp5_plane.o \
+ mdp/mdp5/mdp5_smp.o \
msm_drv.o \
msm_fb.o \
msm_gem.o \
msm_gem_prime.o \
msm_gem_submit.o \
msm_gpu.o \
+ msm_iommu.o \
msm_ringbuffer.o
msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
index e036f6c1db94..9c4255b98021 100644
--- a/drivers/gpu/drm/msm/NOTES
+++ b/drivers/gpu/drm/msm/NOTES
@@ -4,7 +4,7 @@ In the current snapdragon SoC's, we have (at least) 3 different
display controller blocks at play:
+ MDP3 - ?? seems to be what is on geeksphone peak device
+ MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
- + MDSS - snapdragon 800
+ + MDP5 - snapdragon 800
(I don't have a completely clear picture on which display controller
maps to which part #)
@@ -46,6 +46,24 @@ and treat the MDP4 block's irq as "the" irq. Even though the connectors
may have their own irqs which they install themselves. For this reason
the display controller is the "master" device.
+For MDP5, the mapping is:
+
+ plane -> PIPE{RGBn,VIGn} \
+ crtc -> LM (layer mixer) |-> MDP "device"
+ encoder -> INTF /
+ connector -> HDMI/DSI/eDP/etc --> other device(s)
+
+Unlike MDP4, it appears we can get by with a single encoder, rather
+than needing a different implementation for DTV, DSI, etc. (Ie. the
+register interface is same, just different bases.)
+
+Also unlike MDP4, with MDP5 all the IRQs for other blocks (HDMI, DSI,
+etc) are routed through MDP.
+
+And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from
+which blocks need to be allocated to the active pipes based on fetch
+stride.
+
Each connector probably ends up being a separate device, just for the
logistics of finding/mapping io region, irq, etc. Idealy we would
have a better way than just stashing the platform device in a global
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 9588098741b5..85d615e7d62f 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -202,6 +203,12 @@ enum a2xx_rb_copy_sample_select {
SAMPLE_0123 = 6,
};
+enum adreno_mmu_clnt_beh {
+ BEH_NEVR = 0,
+ BEH_TRAN_RNG = 1,
+ BEH_TRAN_FLT = 2,
+};
+
enum sq_tex_clamp {
SQ_TEX_WRAP = 0,
SQ_TEX_MIRROR = 1,
@@ -238,6 +245,92 @@ enum sq_tex_filter {
#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
+#define REG_A2XX_MH_MMU_CONFIG 0x00000040
+#define A2XX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
+#define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
+static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
+static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
+static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
+}
+
+#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
+
+#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
+
+#define REG_A2XX_MH_MMU_PAGE_FAULT 0x00000043
+
+#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
+
+#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
+
+#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
+
+#define REG_A2XX_MH_MMU_MPU_END 0x00000047
+
+#define REG_A2XX_NQWAIT_UNTIL 0x00000394
+
#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395
#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397
@@ -276,20 +369,6 @@ enum sq_tex_filter {
#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
-#define REG_A2XX_CP_ST_BASE 0x0000044d
-
-#define REG_A2XX_CP_ST_BUFSZ 0x0000044e
-
-#define REG_A2XX_CP_IB1_BASE 0x00000458
-
-#define REG_A2XX_CP_IB1_BUFSZ 0x00000459
-
-#define REG_A2XX_CP_IB2_BASE 0x0000045a
-
-#define REG_A2XX_CP_IB2_BUFSZ 0x0000045b
-
-#define REG_A2XX_CP_STAT 0x0000047f
-
#define REG_A2XX_RBBM_STATUS 0x000005d0
#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
@@ -808,6 +887,12 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
+#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
+
+#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc
+
+#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
+
#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index d4afdf657559..a7be56163d23 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -292,6 +293,8 @@ enum a3xx_tex_type {
#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
+#define REG_A3XX_RBBM_NQWAIT_UNTIL 0x00000040
+
#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
@@ -304,6 +307,8 @@ enum a3xx_tex_type {
#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
+#define REG_A3XX_RBBM_INT_SET_CMD 0x00000060
+
#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
@@ -937,13 +942,13 @@ static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
}
-#define REG_A3XX_UNKNOWN_20E8 0x000020e8
+#define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8
-#define REG_A3XX_UNKNOWN_20E9 0x000020e9
+#define REG_A3XX_RB_CLEAR_COLOR_DW1 0x000020e9
-#define REG_A3XX_UNKNOWN_20EA 0x000020ea
+#define REG_A3XX_RB_CLEAR_COLOR_DW2 0x000020ea
-#define REG_A3XX_UNKNOWN_20EB 0x000020eb
+#define REG_A3XX_RB_CLEAR_COLOR_DW3 0x000020eb
#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
@@ -1026,7 +1031,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
-#define REG_A3XX_UNKNOWN_2101 0x00002101
+#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
#define REG_A3XX_RB_DEPTH_INFO 0x00002102
#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
@@ -1103,11 +1108,11 @@ static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
}
-#define REG_A3XX_UNKNOWN_2105 0x00002105
+#define REG_A3XX_RB_STENCIL_CLEAR 0x00002105
-#define REG_A3XX_UNKNOWN_2106 0x00002106
+#define REG_A3XX_RB_STENCIL_BUF_INFO 0x00002106
-#define REG_A3XX_UNKNOWN_2107 0x00002107
+#define REG_A3XX_RB_STENCIL_BUF_PITCH 0x00002107
#define REG_A3XX_RB_STENCILREFMASK 0x00002108
#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
@@ -1149,20 +1154,31 @@ static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
}
-#define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e
-#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff
-#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val)
+#define REG_A3XX_RB_LRZ_VSC_CONTROL 0x0000210c
+#define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE 0x00000002
+
+#define REG_A3XX_RB_WINDOW_OFFSET 0x0000210e
+#define A3XX_RB_WINDOW_OFFSET_X__MASK 0x0000ffff
+#define A3XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val)
{
- return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK;
+ return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK;
}
-#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000
-#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val)
+#define A3XX_RB_WINDOW_OFFSET_Y__MASK 0xffff0000
+#define A3XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
{
- return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK;
+ return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK;
}
+#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110
+
+#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111
+
+#define REG_A3XX_RB_Z_CLAMP_MIN 0x00002114
+
+#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115
+
#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
@@ -1309,6 +1325,8 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216
+
#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
@@ -1491,12 +1509,13 @@ static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0
#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
-#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x00040000
#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
{
return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
}
+#define A3XX_SP_SP_CTRL_REG_BINNING 0x00080000
#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
@@ -1669,7 +1688,7 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
-#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6
+#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6
#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
@@ -1772,7 +1791,7 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
-#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4
+#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4
#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
@@ -1943,6 +1962,9 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00
static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+#define REG_A3XX_VSC_BIN_CONTROL 0x00000c3c
+#define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE 0x00000001
+
#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
@@ -1953,7 +1975,7 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x000
#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
-#define REG_A3XX_UNKNOWN_0C81 0x00000c81
+#define REG_A3XX_GRAS_TSE_DEBUG_ECO 0x00000c81
#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
@@ -1975,22 +1997,24 @@ static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x000
#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
+#define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0x00000cc1
+
#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
-#define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0
-#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff
-#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val)
+#define REG_A3XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
{
- return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK;
+ return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
}
-#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000
-#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14
-static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val)
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x0fffc000
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 14
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
{
- return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK;
+ return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
}
#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
@@ -2088,6 +2112,14 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
+#define REG_A3XX_VGT_CL_INITIATOR 0x000021f0
+
+#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9
+
+#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc
+
+#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
+
#define REG_A3XX_TEX_SAMP_0 0x00000000
#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
@@ -2123,6 +2155,18 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
#define REG_A3XX_TEX_SAMP_1 0x00000001
+#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
+#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
+static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
+#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
+static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
#define REG_A3XX_TEX_CONST_0 0x00000000
#define A3XX_TEX_CONST_0_TILED 0x00000001
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 035bd13dc8bd..461df93e825e 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -15,6 +15,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#ifdef CONFIG_MSM_OCMEM
+# include <mach/ocmem.h>
+#endif
+
#include "a3xx_gpu.h"
#define A3XX_INT0_MASK \
@@ -63,6 +67,7 @@ static void a3xx_me_init(struct msm_gpu *gpu)
static int a3xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
uint32_t *ptr, len;
int i, ret;
@@ -105,6 +110,21 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ } else if (adreno_is_a330v2(adreno_gpu)) {
+ /*
+ * Most of the VBIF registers on 8974v2 have the correct
+ * values at power on, so we won't modify those if we don't
+ * need to
+ */
+ /* Enable 1k sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+
} else if (adreno_is_a330(adreno_gpu)) {
/* Set up 16 deep read/write request queues: */
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
@@ -121,10 +141,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
/* Set up AOOO: */
- gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff);
- gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
/* Enable 1K sort: */
- gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
/* Disable VBIF clock gating. This is to enable AXI running
* higher frequency than GPU:
@@ -162,14 +182,23 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
/* Enable Clock gating: */
- gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
-
- /* Set the OCMEM base address for A330 */
-//TODO:
-// if (adreno_is_a330(adreno_gpu)) {
-// gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
-// (unsigned int)(a3xx_gpu->ocmem_base >> 14));
-// }
+ if (adreno_is_a320(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
+ else if (adreno_is_a330v2(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
+ else if (adreno_is_a330(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
+
+ if (adreno_is_a330v2(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
+ else if (adreno_is_a330(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
+
+ /* Set the OCMEM base address for A330, etc */
+ if (a3xx_gpu->ocmem_hdl) {
+ gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
+ (unsigned int)(a3xx_gpu->ocmem_base >> 14));
+ }
/* Turn on performance counters: */
gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
@@ -219,7 +248,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->pm4->data);
len = adreno_gpu->pm4->size / 4;
- DBG("loading PM4 ucode version: %u", ptr[0]);
+ DBG("loading PM4 ucode version: %x", ptr[1]);
gpu_write(gpu, REG_AXXX_CP_DEBUG,
AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
@@ -231,19 +260,26 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Load PFP: */
ptr = (uint32_t *)(adreno_gpu->pfp->data);
len = adreno_gpu->pfp->size / 4;
- DBG("loading PFP ucode version: %u", ptr[0]);
+ DBG("loading PFP ucode version: %x", ptr[5]);
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
- if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu))
+ if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) {
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
-
+ } else if (adreno_is_a330(adreno_gpu)) {
+ /* NOTE: this (value take from downstream android driver)
+ * includes some bits outside of the known bitfields. But
+ * A330 has this "MERCIU queue" thing too, which might
+ * explain a new bitfield or reshuffling:
+ */
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
+ }
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
@@ -253,6 +289,14 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
return 0;
}
+static void a3xx_recover(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
static void a3xx_destroy(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -261,6 +305,12 @@ static void a3xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
adreno_gpu_cleanup(adreno_gpu);
+
+#ifdef CONFIG_MSM_OCMEM
+ if (a3xx_gpu->ocmem_base)
+ ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
+#endif
+
put_device(&a3xx_gpu->pdev->dev);
kfree(a3xx_gpu);
}
@@ -371,7 +421,7 @@ static const struct adreno_gpu_funcs funcs = {
.hw_init = a3xx_hw_init,
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
- .recover = adreno_recover,
+ .recover = a3xx_recover,
.last_fence = adreno_last_fence,
.submit = adreno_submit,
.flush = adreno_flush,
@@ -387,6 +437,7 @@ static const struct adreno_gpu_funcs funcs = {
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
{
struct a3xx_gpu *a3xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct platform_device *pdev = a3xx_pdev;
struct adreno_platform_config *config;
@@ -406,7 +457,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail;
}
- gpu = &a3xx_gpu->base.base;
+ adreno_gpu = &a3xx_gpu->base;
+ gpu = &adreno_gpu->base;
get_device(&pdev->dev);
a3xx_gpu->pdev = pdev;
@@ -414,16 +466,46 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
gpu->fast_rate = config->fast_rate;
gpu->slow_rate = config->slow_rate;
gpu->bus_freq = config->bus_freq;
+#ifdef CONFIG_MSM_BUS_SCALING
+ gpu->bus_scale_table = config->bus_scale_table;
+#endif
DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
- ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base,
- &funcs, config->rev);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev);
if (ret)
goto fail;
- return &a3xx_gpu->base.base;
+ /* if needed, allocate gmem: */
+ if (adreno_is_a330(adreno_gpu)) {
+#ifdef CONFIG_MSM_OCMEM
+ /* TODO this is different/missing upstream: */
+ struct ocmem_buf *ocmem_hdl =
+ ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
+
+ a3xx_gpu->ocmem_hdl = ocmem_hdl;
+ a3xx_gpu->ocmem_base = ocmem_hdl->addr;
+ adreno_gpu->gmem = ocmem_hdl->len;
+ DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
+ a3xx_gpu->ocmem_base);
+#endif
+ }
+
+ if (!gpu->mmu) {
+ /* TODO we think it is possible to configure the GPU to
+ * restrict access to VRAM carveout. But the required
+ * registers are unknown. For now just bail out and
+ * limp along with just modesetting. If it turns out
+ * to not be possible to restrict access, then we must
+ * implement a cmdstream validator.
+ */
+ dev_err(dev->dev, "No memory protection without IOMMU\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ return gpu;
fail:
if (a3xx_gpu)
@@ -436,19 +518,59 @@ fail:
* The a3xx device:
*/
+#if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
+# include <mach/kgsl.h>
+#endif
+
static int a3xx_probe(struct platform_device *pdev)
{
static struct adreno_platform_config config = {};
#ifdef CONFIG_OF
- /* TODO */
+ struct device_node *child, *node = pdev->dev.of_node;
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(node, "qcom,chipid", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "could not find chipid: %d\n", ret);
+ return ret;
+ }
+
+ config.rev = ADRENO_REV((val >> 24) & 0xff,
+ (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
+
+ /* find clock rates: */
+ config.fast_rate = 0;
+ config.slow_rate = ~0;
+ for_each_child_of_node(node, child) {
+ if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
+ struct device_node *pwrlvl;
+ for_each_child_of_node(child, pwrlvl) {
+ ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret);
+ return ret;
+ }
+ config.fast_rate = max(config.fast_rate, val);
+ config.slow_rate = min(config.slow_rate, val);
+ }
+ }
+ }
+
+ if (!config.fast_rate) {
+ dev_err(&pdev->dev, "could not find clk rates\n");
+ return -ENXIO;
+ }
+
#else
+ struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
uint32_t version = socinfo_get_version();
if (cpu_is_apq8064ab()) {
config.fast_rate = 450000000;
config.slow_rate = 27000000;
config.bus_freq = 4;
config.rev = ADRENO_REV(3, 2, 1, 0);
- } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) {
+ } else if (cpu_is_apq8064()) {
config.fast_rate = 400000000;
config.slow_rate = 27000000;
config.bus_freq = 4;
@@ -461,6 +583,16 @@ static int a3xx_probe(struct platform_device *pdev)
else
config.rev = ADRENO_REV(3, 2, 0, 0);
+ } else if (cpu_is_msm8960ab()) {
+ config.fast_rate = 400000000;
+ config.slow_rate = 320000000;
+ config.bus_freq = 4;
+
+ if (SOCINFO_VERSION_MINOR(version) == 0)
+ config.rev = ADRENO_REV(3, 2, 1, 0);
+ else
+ config.rev = ADRENO_REV(3, 2, 1, 1);
+
} else if (cpu_is_msm8930()) {
config.fast_rate = 400000000;
config.slow_rate = 27000000;
@@ -473,6 +605,9 @@ static int a3xx_probe(struct platform_device *pdev)
config.rev = ADRENO_REV(3, 0, 5, 0);
}
+# ifdef CONFIG_MSM_BUS_SCALING
+ config.bus_scale_table = pdata->bus_scale_table;
+# endif
#endif
pdev->dev.platform_data = &config;
a3xx_pdev = pdev;
@@ -485,10 +620,19 @@ static int a3xx_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,kgsl-3d0" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
static struct platform_driver a3xx_driver = {
.probe = a3xx_probe,
.remove = a3xx_remove,
- .driver.name = "kgsl-3d0",
+ .driver = {
+ .name = "kgsl-3d0",
+ .of_match_table = dt_match,
+ },
};
void __init a3xx_register(void)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index 32c398c2d00a..bb9a8ca0507b 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -24,6 +24,10 @@
struct a3xx_gpu {
struct adreno_gpu base;
struct platform_device *pdev;
+
+ /* if OCMEM is used for GMEM: */
+ uint32_t ocmem_base;
+ void *ocmem_hdl;
};
#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 33dcc606c7c5..d6e6ce2d1abd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -115,96 +116,6 @@ enum adreno_rb_depth_format {
DEPTHX_24_8 = 1,
};
-enum adreno_mmu_clnt_beh {
- BEH_NEVR = 0,
- BEH_TRAN_RNG = 1,
- BEH_TRAN_FLT = 2,
-};
-
-#define REG_AXXX_MH_MMU_CONFIG 0x00000040
-#define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
-#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
-#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
-#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
-static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
-#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
-#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
-#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
-#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
-#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
-#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
-#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
-static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
-#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
-static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
-#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
-static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
-#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
-static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
- return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
-}
-
-#define REG_AXXX_MH_MMU_VA_RANGE 0x00000041
-
-#define REG_AXXX_MH_MMU_PT_BASE 0x00000042
-
-#define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043
-
-#define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044
-
-#define REG_AXXX_MH_MMU_INVALIDATE 0x00000045
-
-#define REG_AXXX_MH_MMU_MPU_BASE 0x00000046
-
-#define REG_AXXX_MH_MMU_MPU_END 0x00000047
-
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
@@ -275,6 +186,18 @@ static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
}
#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK 0x001f0000
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT 16
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK;
+}
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK 0x1f000000
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT 24
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK;
+}
#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
@@ -402,6 +325,36 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
}
+#define REG_AXXX_CP_NON_PREFETCH_CNTRS 0x00000440
+
+#define REG_AXXX_CP_STQ_ST_STAT 0x00000443
+
+#define REG_AXXX_CP_ST_BASE 0x0000044d
+
+#define REG_AXXX_CP_ST_BUFSZ 0x0000044e
+
+#define REG_AXXX_CP_MEQ_STAT 0x0000044f
+
+#define REG_AXXX_CP_MIU_TAG_STAT 0x00000452
+
+#define REG_AXXX_CP_BIN_MASK_LO 0x00000454
+
+#define REG_AXXX_CP_BIN_MASK_HI 0x00000455
+
+#define REG_AXXX_CP_BIN_SELECT_LO 0x00000456
+
+#define REG_AXXX_CP_BIN_SELECT_HI 0x00000457
+
+#define REG_AXXX_CP_IB1_BASE 0x00000458
+
+#define REG_AXXX_CP_IB1_BUFSZ 0x00000459
+
+#define REG_AXXX_CP_IB2_BASE 0x0000045a
+
+#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b
+
+#define REG_AXXX_CP_STAT 0x0000047f
+
#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
@@ -418,6 +371,26 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
+#define REG_AXXX_CP_ME_VS_EVENT_SRC 0x00000600
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR 0x00000601
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA 0x00000602
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM 0x00000603
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM 0x00000604
+
+#define REG_AXXX_CP_ME_PS_EVENT_SRC 0x00000605
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR 0x00000606
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA 0x00000607
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM 0x00000608
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM 0x00000609
+
#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
@@ -428,5 +401,11 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC 0x00000612
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR 0x00000613
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614
+
#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a0b9d8a95b16..d321099abdd4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -17,6 +17,7 @@
#include "adreno_gpu.h"
#include "msm_gem.h"
+#include "msm_mmu.h"
struct adreno_info {
struct adreno_rev rev;
@@ -44,7 +45,7 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a300_pfp.fw",
.gmem = SZ_512K,
}, {
- .rev = ADRENO_REV(3, 3, 0, 0),
+ .rev = ADRENO_REV(3, 3, 0, ANY_ID),
.revn = 330,
.name = "A330",
.pm4fw = "a330_pm4.fw",
@@ -53,6 +54,11 @@ static const struct adreno_info gpulist[] = {
},
};
+MODULE_FIRMWARE("a300_pm4.fw");
+MODULE_FIRMWARE("a300_pfp.fw");
+MODULE_FIRMWARE("a330_pm4.fw");
+MODULE_FIRMWARE("a330_pfp.fw");
+
#define RB_SIZE SZ_32K
#define RB_BLKSIZE 16
@@ -65,7 +71,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->info->revn;
return 0;
case MSM_PARAM_GMEM_SIZE:
- *value = adreno_gpu->info->gmem;
+ *value = adreno_gpu->gmem;
return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
@@ -86,7 +92,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
/* size is log2(quad-words): */
AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
- AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
+ AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
/* Setup ringbuffer address: */
gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
@@ -286,6 +292,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
struct adreno_rev rev)
{
+ struct msm_mmu *mmu;
int i, ret;
/* identify gpu: */
@@ -311,6 +318,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
rev.core, rev.major, rev.minor, rev.patchid);
gpu->funcs = funcs;
+ gpu->gmem = gpu->info->gmem;
gpu->rev = rev;
ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
@@ -333,10 +341,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (ret)
return ret;
- ret = msm_iommu_attach(drm, gpu->base.iommu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
- if (ret)
- return ret;
+ mmu = gpu->base.mmu;
+ if (mmu) {
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret)
+ return ret;
+ }
gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
MSM_BO_UNCACHED);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index f73abfba7c22..ca11ea4da165 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -51,6 +51,7 @@ struct adreno_gpu {
struct msm_gpu base;
struct adreno_rev rev;
const struct adreno_info *info;
+ uint32_t gmem; /* actual gmem size */
uint32_t revn; /* numeric revision name */
const struct adreno_gpu_funcs *funcs;
@@ -70,6 +71,9 @@ struct adreno_gpu {
struct adreno_platform_config {
struct adreno_rev rev;
uint32_t fast_rate, slow_rate, bus_freq;
+#ifdef CONFIG_MSM_BUS_SCALING
+ struct msm_bus_scale_pdata *bus_scale_table;
+#endif
};
#define ADRENO_IDLE_TIMEOUT (20 * 1000)
@@ -94,6 +98,11 @@ static inline bool adreno_is_a330(struct adreno_gpu *gpu)
return gpu->revn == 330;
}
+static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
+{
+ return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 259ad709b0cc..ae992c71703f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -66,13 +67,15 @@ enum vgt_event_type {
enum pc_di_primtype {
DI_PT_NONE = 0,
- DI_PT_POINTLIST = 1,
+ DI_PT_POINTLIST_A2XX = 1,
DI_PT_LINELIST = 2,
DI_PT_LINESTRIP = 3,
DI_PT_TRILIST = 4,
DI_PT_TRIFAN = 5,
DI_PT_TRISTRIP = 6,
+ DI_PT_LINELOOP = 7,
DI_PT_RECTLIST = 8,
+ DI_PT_POINTLIST_A3XX = 9,
DI_PT_QUADLIST = 13,
DI_PT_QUADSTRIP = 14,
DI_PT_POLYGON = 15,
@@ -119,7 +122,7 @@ enum adreno_pm4_type3_packets {
CP_WAIT_FOR_IDLE = 38,
CP_WAIT_REG_MEM = 60,
CP_WAIT_REG_EQ = 82,
- CP_WAT_REG_GTE = 83,
+ CP_WAIT_REG_GTE = 83,
CP_WAIT_UNTIL_READ = 92,
CP_WAIT_IB_PFD_COMPLETE = 93,
CP_REG_RMW = 33,
@@ -151,7 +154,6 @@ enum adreno_pm4_type3_packets {
CP_CONTEXT_UPDATE = 94,
CP_INTERRUPT = 64,
CP_IM_STORE = 44,
- CP_SET_BIN_BASE_OFFSET = 75,
CP_SET_DRAW_INIT_FLAGS = 75,
CP_SET_PROTECTED_MODE = 95,
CP_LOAD_STATE = 48,
@@ -159,6 +161,16 @@ enum adreno_pm4_type3_packets {
CP_COND_INDIRECT_BUFFER_PFD = 50,
CP_INDIRECT_BUFFER_PFE = 63,
CP_SET_BIN = 76,
+ CP_TEST_TWO_MEMS = 113,
+ CP_WAIT_FOR_ME = 19,
+ IN_IB_PREFETCH_END = 23,
+ IN_SUBBLK_PREFETCH = 31,
+ IN_INSTR_PREFETCH = 32,
+ IN_INSTR_MATCH = 71,
+ IN_CONST_PREFETCH = 73,
+ IN_INCR_UPDT_STATE = 85,
+ IN_INCR_UPDT_CONST = 86,
+ IN_INCR_UPDT_INSTR = 87,
};
enum adreno_state_block {
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 6d4c62bf70dc..87be647e3825 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index d1df38bf5747..747a6ef4211f 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 0030a111302d..48e03acf19bf 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 50d11df35b21..6f1588aa9071 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -41,7 +41,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
power_on ? "Enable" : "Disable", ctrl);
}
-static irqreturn_t hdmi_irq(int irq, void *dev_id)
+irqreturn_t hdmi_irq(int irq, void *dev_id)
{
struct hdmi *hdmi = dev_id;
@@ -71,13 +71,13 @@ void hdmi_destroy(struct kref *kref)
}
/* initialize connector */
-int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
{
struct hdmi *hdmi = NULL;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = hdmi_pdev;
struct hdmi_platform_config *config;
- int ret;
+ int i, ret;
if (!pdev) {
dev_err(dev->dev, "no hdmi device\n");
@@ -99,6 +99,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
hdmi->dev = dev;
hdmi->pdev = pdev;
+ hdmi->config = config;
hdmi->encoder = encoder;
/* not sure about which phy maps to which msm.. probably I miss some */
@@ -114,44 +115,70 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
goto fail;
}
- hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI");
+ hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
if (IS_ERR(hdmi->mmio)) {
ret = PTR_ERR(hdmi->mmio);
goto fail;
}
- hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs");
- if (IS_ERR(hdmi->mvs))
- hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs");
- if (IS_ERR(hdmi->mvs)) {
- ret = PTR_ERR(hdmi->mvs);
- dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret);
- goto fail;
+ BUG_ON(config->hpd_reg_cnt > ARRAY_SIZE(hdmi->hpd_regs));
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ struct regulator *reg;
+
+ reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->hpd_regs[i] = reg;
}
- hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0");
- if (IS_ERR(hdmi->mpp0))
- hdmi->mpp0 = NULL;
+ BUG_ON(config->pwr_reg_cnt > ARRAY_SIZE(hdmi->pwr_regs));
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ struct regulator *reg;
- hdmi->clk = devm_clk_get(&pdev->dev, "core_clk");
- if (IS_ERR(hdmi->clk)) {
- ret = PTR_ERR(hdmi->clk);
- dev_err(dev->dev, "failed to get 'clk': %d\n", ret);
- goto fail;
+ reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->pwr_regs[i] = reg;
}
- hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk");
- if (IS_ERR(hdmi->m_pclk)) {
- ret = PTR_ERR(hdmi->m_pclk);
- dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
- goto fail;
+ BUG_ON(config->hpd_clk_cnt > ARRAY_SIZE(hdmi->hpd_clks));
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ struct clk *clk;
+
+ clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->hpd_clks[i] = clk;
}
- hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk");
- if (IS_ERR(hdmi->s_pclk)) {
- ret = PTR_ERR(hdmi->s_pclk);
- dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
- goto fail;
+ BUG_ON(config->pwr_clk_cnt > ARRAY_SIZE(hdmi->pwr_clks));
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ struct clk *clk;
+
+ clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->pwr_clks[i] = clk;
}
hdmi->i2c = hdmi_i2c_init(hdmi);
@@ -178,20 +205,22 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
goto fail;
}
- hdmi->irq = platform_get_irq(pdev, 0);
- if (hdmi->irq < 0) {
- ret = hdmi->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
- goto fail;
- }
+ if (!config->shared_irq) {
+ hdmi->irq = platform_get_irq(pdev, 0);
+ if (hdmi->irq < 0) {
+ ret = hdmi->irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
- ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
- NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "hdmi_isr", hdmi);
- if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
- hdmi->irq, ret);
- goto fail;
+ ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
+ NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "hdmi_isr", hdmi);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ hdmi->irq, ret);
+ goto fail;
+ }
}
encoder->bridge = hdmi->bridge;
@@ -199,7 +228,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector;
- return 0;
+ return hdmi;
fail:
if (hdmi) {
@@ -211,37 +240,100 @@ fail:
hdmi_destroy(&hdmi->refcount);
}
- return ret;
+ return ERR_PTR(ret);
}
/*
* The hdmi device:
*/
+#include <linux/of_gpio.h>
+
static int hdmi_dev_probe(struct platform_device *pdev)
{
static struct hdmi_platform_config config = {};
#ifdef CONFIG_OF
- /* TODO */
+ struct device_node *of_node = pdev->dev.of_node;
+
+ int get_gpio(const char *name)
+ {
+ int gpio = of_get_named_gpio(of_node, name, 0);
+ if (gpio < 0) {
+ dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n",
+ name, gpio);
+ gpio = -1;
+ }
+ return gpio;
+ }
+
+ /* TODO actually use DT.. */
+ static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
+ static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
+ static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+ static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
+
+ config.phy_init = hdmi_phy_8x74_init;
+ config.mmio_name = "core_physical";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.pwr_reg_names = pwr_reg_names;
+ config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ config.pwr_clk_names = pwr_clk_names;
+ config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
+ config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
+ config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
+ config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
+ config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
+ config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
+ config.shared_irq = true;
+
#else
+ static const char *hpd_clk_names[] = {
+ "core_clk", "master_iface_clk", "slave_iface_clk",
+ };
if (cpu_is_apq8064()) {
+ static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
+ config.mmio_name = "hdmi_msm_hdmi_addr";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 70;
config.ddc_data_gpio = 71;
config.hpd_gpio = 72;
- config.pmic_gpio = 13 + NR_GPIO_IRQS;
- } else if (cpu_is_msm8960()) {
+ config.mux_en_gpio = -1;
+ config.mux_sel_gpio = 13 + NR_GPIO_IRQS;
+ } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
+ static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
+ config.mmio_name = "hdmi_msm_hdmi_addr";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 100;
config.ddc_data_gpio = 101;
config.hpd_gpio = 102;
- config.pmic_gpio = -1;
+ config.mux_en_gpio = -1;
+ config.mux_sel_gpio = -1;
} else if (cpu_is_msm8x60()) {
+ static const char *hpd_reg_names[] = {
+ "8901_hdmi_mvs", "8901_mpp0"
+ };
config.phy_init = hdmi_phy_8x60_init;
+ config.mmio_name = "hdmi_msm_hdmi_addr";
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 170;
config.ddc_data_gpio = 171;
config.hpd_gpio = 172;
- config.pmic_gpio = -1;
+ config.mux_en_gpio = -1;
+ config.mux_sel_gpio = -1;
}
#endif
pdev->dev.platform_data = &config;
@@ -255,10 +347,19 @@ static int hdmi_dev_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
static struct platform_driver hdmi_driver = {
.probe = hdmi_dev_probe,
.remove = hdmi_dev_remove,
- .driver.name = "hdmi_msm",
+ .driver = {
+ .name = "hdmi_msm",
+ .of_match_table = dt_match,
+ },
};
void __init hdmi_register(void)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 2c2ec566394c..41b29add70b1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -28,6 +28,7 @@
struct hdmi_phy;
+struct hdmi_platform_config;
struct hdmi {
struct kref refcount;
@@ -35,14 +36,14 @@ struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
- void __iomem *mmio;
+ const struct hdmi_platform_config *config;
- struct regulator *mvs; /* HDMI_5V */
- struct regulator *mpp0; /* External 5V */
+ void __iomem *mmio;
- struct clk *clk;
- struct clk *m_pclk;
- struct clk *s_pclk;
+ struct regulator *hpd_regs[2];
+ struct regulator *pwr_regs[2];
+ struct clk *hpd_clks[3];
+ struct clk *pwr_clks[2];
struct hdmi_phy *phy;
struct i2c_adapter *i2c;
@@ -60,7 +61,29 @@ struct hdmi {
/* platform config data (ie. from DT, or pdata) */
struct hdmi_platform_config {
struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
- int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio;
+ const char *mmio_name;
+
+ /* regulators that need to be on for hpd: */
+ const char **hpd_reg_names;
+ int hpd_reg_cnt;
+
+ /* regulators that need to be on for screen pwr: */
+ const char **pwr_reg_names;
+ int pwr_reg_cnt;
+
+ /* clks that need to be on for hpd: */
+ const char **hpd_clk_names;
+ int hpd_clk_cnt;
+
+ /* clks that need to be on for screen pwr (ie pixel clk): */
+ const char **pwr_clk_names;
+ int pwr_clk_cnt;
+
+ /* gpio's: */
+ int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+
+ /* older devices had their own irq, mdp5+ it is shared w/ mdp: */
+ bool shared_irq;
};
void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
@@ -106,6 +129,7 @@ struct hdmi_phy {
struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
+struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi);
/*
* hdmi bridge:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 4e939f82918c..e2636582cfd7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -212,6 +214,20 @@ static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state
#define REG_HDMI_HDCP_RESET 0x00000130
#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
+#define REG_HDMI_VENSPEC_INFO0 0x0000016c
+
+#define REG_HDMI_VENSPEC_INFO1 0x00000170
+
+#define REG_HDMI_VENSPEC_INFO2 0x00000174
+
+#define REG_HDMI_VENSPEC_INFO3 0x00000178
+
+#define REG_HDMI_VENSPEC_INFO4 0x0000017c
+
+#define REG_HDMI_VENSPEC_INFO5 0x00000180
+
+#define REG_HDMI_VENSPEC_INFO6 0x00000184
+
#define REG_HDMI_AUDIO_CFG 0x000001d0
#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
@@ -235,6 +251,9 @@ static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
}
+#define REG_HDMI_DDC_ARBITRATION 0x00000210
+#define HDMI_DDC_ARBITRATION_HW_ARBITRATION 0x00000010
+
#define REG_HDMI_DDC_INT_CTRL 0x00000214
#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
@@ -340,6 +359,20 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
}
+#define REG_HDMI_CEC_STATUS 0x00000298
+
+#define REG_HDMI_CEC_INT 0x0000029c
+
+#define REG_HDMI_CEC_ADDR 0x000002a0
+
+#define REG_HDMI_CEC_TIME 0x000002a4
+
+#define REG_HDMI_CEC_REFTIMER 0x000002a8
+
+#define REG_HDMI_CEC_RD_DATA 0x000002ac
+
+#define REG_HDMI_CEC_RD_FILTER 0x000002b0
+
#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
@@ -410,17 +443,33 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
+#define REG_HDMI_AUD_INT 0x000002cc
+#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
+#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
+#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
+#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
+
#define REG_HDMI_PHY_CTRL 0x000002d4
#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
#define HDMI_PHY_CTRL_SW_RESET 0x00000004
#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
-#define REG_HDMI_AUD_INT 0x000002cc
-#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
-#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
-#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
-#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
+#define REG_HDMI_CEC_WR_RANGE 0x000002dc
+
+#define REG_HDMI_CEC_RD_RANGE 0x000002e0
+
+#define REG_HDMI_VERSION 0x000002e4
+
+#define REG_HDMI_CEC_COMPL_CTL 0x00000360
+
+#define REG_HDMI_CEC_RD_START_RANGE 0x00000364
+
+#define REG_HDMI_CEC_RD_TOTAL_RANGE 0x00000368
+
+#define REG_HDMI_CEC_RD_ERR_RESP_LO 0x0000036c
+
+#define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370
#define REG_HDMI_8x60_PHY_REG0 0x00000300
#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
@@ -504,5 +553,23 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
#define REG_HDMI_8960_PHY_REG12 0x00000430
+#define REG_HDMI_8x74_ANA_CFG0 0x00000000
+
+#define REG_HDMI_8x74_ANA_CFG1 0x00000004
+
+#define REG_HDMI_8x74_PD_CTRL0 0x00000010
+
+#define REG_HDMI_8x74_PD_CTRL1 0x00000014
+
+#define REG_HDMI_8x74_BIST_CFG0 0x00000034
+
+#define REG_HDMI_8x74_BIST_PATN0 0x0000003c
+
+#define REG_HDMI_8x74_BIST_PATN1 0x00000040
+
+#define REG_HDMI_8x74_BIST_PATN2 0x00000044
+
+#define REG_HDMI_8x74_BIST_PATN3 0x00000048
+
#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 5a8ee3473cf5..7d10e55403c6 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -21,6 +21,7 @@ struct hdmi_bridge {
struct drm_bridge base;
struct hdmi *hdmi;
+ bool power_on;
unsigned long int pixclock;
};
@@ -34,6 +35,65 @@ static void hdmi_bridge_destroy(struct drm_bridge *bridge)
kfree(hdmi_bridge);
}
+static void power_on(struct drm_bridge *bridge)
+{
+ struct drm_device *dev = bridge->dev;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->pwr_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+
+ if (config->pwr_clk_cnt > 0) {
+ DBG("pixclock: %lu", hdmi_bridge->pixclock);
+ ret = clk_set_rate(hdmi->pwr_clks[0], hdmi_bridge->pixclock);
+ if (ret) {
+ dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
+ config->pwr_clk_names[0], ret);
+ }
+ }
+
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ ret = clk_prepare_enable(hdmi->pwr_clks[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ }
+ }
+}
+
+static void power_off(struct drm_bridge *bridge)
+{
+ struct drm_device *dev = bridge->dev;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ /* TODO do we need to wait for final vblank somewhere before
+ * cutting the clocks?
+ */
+ mdelay(16 + 4);
+
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->pwr_clks[i]);
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->pwr_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+}
+
static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
@@ -41,6 +101,12 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
struct hdmi_phy *phy = hdmi->phy;
DBG("power up");
+
+ if (!hdmi_bridge->power_on) {
+ power_on(bridge);
+ hdmi_bridge->power_on = true;
+ }
+
phy->funcs->powerup(phy, hdmi_bridge->pixclock);
hdmi_set_mode(hdmi, true);
}
@@ -62,6 +128,11 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
DBG("power down");
hdmi_set_mode(hdmi, false);
phy->funcs->powerdown(phy);
+
+ if (hdmi_bridge->power_on) {
+ power_off(bridge);
+ hdmi_bridge->power_on = false;
+ }
}
static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 823eee521a31..7dedfdd12075 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -17,19 +17,20 @@
#include <linux/gpio.h>
+#include "msm_kms.h"
#include "hdmi.h"
struct hdmi_connector {
struct drm_connector base;
struct hdmi *hdmi;
+ struct work_struct hpd_work;
};
#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
static int gpio_config(struct hdmi *hdmi, bool on)
{
struct drm_device *dev = hdmi->dev;
- struct hdmi_platform_config *config =
- hdmi->pdev->dev.platform_data;
+ const struct hdmi_platform_config *config = hdmi->config;
int ret;
if (on) {
@@ -39,26 +40,43 @@ static int gpio_config(struct hdmi *hdmi, bool on)
"HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
goto error1;
}
+ gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
+
ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
"HDMI_DDC_DATA", config->ddc_data_gpio, ret);
goto error2;
}
+ gpio_set_value_cansleep(config->ddc_data_gpio, 1);
+
ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
"HDMI_HPD", config->hpd_gpio, ret);
goto error3;
}
- if (config->pmic_gpio != -1) {
- ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL");
+ gpio_direction_input(config->hpd_gpio);
+ gpio_set_value_cansleep(config->hpd_gpio, 1);
+
+ if (config->mux_en_gpio != -1) {
+ ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
- "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret);
+ "HDMI_MUX_SEL", config->mux_en_gpio, ret);
goto error4;
}
- gpio_set_value_cansleep(config->pmic_gpio, 0);
+ gpio_set_value_cansleep(config->mux_en_gpio, 1);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_SEL", config->mux_sel_gpio, ret);
+ goto error5;
+ }
+ gpio_set_value_cansleep(config->mux_sel_gpio, 0);
}
DBG("gpio on");
} else {
@@ -66,15 +84,23 @@ static int gpio_config(struct hdmi *hdmi, bool on)
gpio_free(config->ddc_data_gpio);
gpio_free(config->hpd_gpio);
- if (config->pmic_gpio != -1) {
- gpio_set_value_cansleep(config->pmic_gpio, 1);
- gpio_free(config->pmic_gpio);
+ if (config->mux_en_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_en_gpio, 0);
+ gpio_free(config->mux_en_gpio);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_sel_gpio, 1);
+ gpio_free(config->mux_sel_gpio);
}
DBG("gpio off");
}
return 0;
+error5:
+ if (config->mux_en_gpio != -1)
+ gpio_free(config->mux_en_gpio);
error4:
gpio_free(config->hpd_gpio);
error3:
@@ -88,10 +114,11 @@ error1:
static int hpd_enable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
struct drm_device *dev = hdmi_connector->base.dev;
struct hdmi_phy *phy = hdmi->phy;
uint32_t hpd_ctrl;
- int ret;
+ int i, ret;
ret = gpio_config(hdmi, true);
if (ret) {
@@ -99,31 +126,22 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
goto fail;
}
- ret = clk_prepare_enable(hdmi->clk);
- if (ret) {
- dev_err(dev->dev, "failed to enable 'clk': %d\n", ret);
- goto fail;
- }
-
- ret = clk_prepare_enable(hdmi->m_pclk);
- if (ret) {
- dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
- goto fail;
- }
-
- ret = clk_prepare_enable(hdmi->s_pclk);
- if (ret) {
- dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
- goto fail;
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto fail;
+ }
}
- if (hdmi->mpp0)
- ret = regulator_enable(hdmi->mpp0);
- if (!ret)
- ret = regulator_enable(hdmi->mvs);
- if (ret) {
- dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
- goto fail;
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
}
hdmi_set_mode(hdmi, false);
@@ -156,26 +174,26 @@ fail:
static int hdp_disable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
struct drm_device *dev = hdmi_connector->base.dev;
- int ret = 0;
+ int i, ret = 0;
/* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
hdmi_set_mode(hdmi, false);
- if (hdmi->mpp0)
- ret = regulator_disable(hdmi->mpp0);
- if (!ret)
- ret = regulator_disable(hdmi->mvs);
- if (ret) {
- dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
- goto fail;
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->hpd_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
}
- clk_disable_unprepare(hdmi->clk);
- clk_disable_unprepare(hdmi->m_pclk);
- clk_disable_unprepare(hdmi->s_pclk);
+ for (i = 0; i < config->hpd_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
ret = gpio_config(hdmi, false);
if (ret) {
@@ -189,9 +207,19 @@ fail:
return ret;
}
+static void
+hotplug_work(struct work_struct *work)
+{
+ struct hdmi_connector *hdmi_connector =
+ container_of(work, struct hdmi_connector, hpd_work);
+ struct drm_connector *connector = &hdmi_connector->base;
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
void hdmi_connector_irq(struct drm_connector *connector)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct msm_drm_private *priv = connector->dev->dev_private;
struct hdmi *hdmi = hdmi_connector->hdmi;
uint32_t hpd_int_status, hpd_int_ctrl;
@@ -209,13 +237,13 @@ void hdmi_connector_irq(struct drm_connector *connector)
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
- drm_helper_hpd_irq_event(connector->dev);
-
/* detect disconnect if we are connected or visa versa: */
hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
if (!detected)
hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+ queue_work(priv->wq, &hdmi_connector->hpd_work);
}
}
@@ -224,6 +252,7 @@ static enum drm_connector_status hdmi_connector_detect(
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
uint32_t hpd_int_status;
int retry = 20;
@@ -233,6 +262,14 @@ static enum drm_connector_status hdmi_connector_detect(
* let that trick us into thinking the monitor is gone:
*/
while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
+ /* hdmi debounce logic seems to get stuck sometimes,
+ * read directly the gpio to get a second opinion:
+ */
+ if (gpio_get_value(config->hpd_gpio)) {
+ DBG("gpio tells us we are connected!");
+ hpd_int_status |= HDMI_HPD_INT_STATUS_CABLE_DETECTED;
+ break;
+ }
mdelay(10);
hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
DBG("status=%08x", hpd_int_status);
@@ -285,6 +322,8 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = connector->dev->dev_private;
struct msm_kms *kms = priv->kms;
long actual, requested;
@@ -293,6 +332,13 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector,
actual = kms->funcs->round_pixclk(kms,
requested, hdmi_connector->hdmi->encoder);
+ /* for mdp5/apq8074, we manage our own pixel clk (as opposed to
+ * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
+ * instead):
+ */
+ if (config->pwr_clk_cnt > 0)
+ actual = clk_round_rate(hdmi->pwr_clks[0], actual);
+
DBG("requested=%ld, actual=%ld", requested, actual);
if (actual != requested)
@@ -335,6 +381,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
}
hdmi_connector->hdmi = hdmi_reference(hdmi);
+ INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
connector = &hdmi_connector->base;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
new file mode 100644
index 000000000000..59fa6cdacb2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_phy_8x74 {
+ struct hdmi_phy base;
+ struct hdmi *hdmi;
+ void __iomem *mmio;
+};
+#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
+
+
+static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data)
+{
+ msm_writel(data, phy->mmio + reg);
+}
+
+//static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg)
+//{
+// return msm_readl(phy->mmio + reg);
+//}
+
+static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+ kfree(phy_8x74);
+}
+
+static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+ struct hdmi *hdmi = phy_8x74->hdmi;
+ unsigned int val;
+
+ /* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+}
+
+static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+
+ phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0, 0x1b);
+ phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1, 0xf2);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0);
+ phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1, 0x20);
+}
+
+static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+ phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
+ .destroy = hdmi_phy_8x74_destroy,
+ .reset = hdmi_phy_8x74_reset,
+ .powerup = hdmi_phy_8x74_powerup,
+ .powerdown = hdmi_phy_8x74_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
+{
+ struct hdmi_phy_8x74 *phy_8x74;
+ struct hdmi_phy *phy = NULL;
+ int ret;
+
+ phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL);
+ if (!phy_8x74) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phy = &phy_8x74->base;
+
+ phy->funcs = &hdmi_phy_8x74_funcs;
+
+ phy_8x74->hdmi = hdmi;
+
+ /* for 8x74, the phy mmio is mapped separately: */
+ phy_8x74->mmio = msm_ioremap(hdmi->pdev,
+ "phy_physical", "HDMI_8x74");
+ if (IS_ERR(phy_8x74->mmio)) {
+ ret = PTR_ERR(phy_8x74->mmio);
+ goto fail;
+ }
+
+ return phy;
+
+fail:
+ if (phy)
+ hdmi_phy_8x74_destroy(phy);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index dbde4f6339b9..d591567173c4 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 9908ffe1c3ad..416a26e1e58d 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -42,27 +44,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-enum mdp4_bpc {
- BPC1 = 0,
- BPC5 = 1,
- BPC6 = 2,
- BPC8 = 3,
-};
-
-enum mdp4_bpc_alpha {
- BPC1A = 0,
- BPC4A = 1,
- BPC6A = 2,
- BPC8A = 3,
-};
-
-enum mdp4_alpha_type {
- FG_CONST = 0,
- BG_CONST = 1,
- FG_PIXEL = 2,
- BG_PIXEL = 3,
-};
-
enum mdp4_pipe {
VG1 = 0,
VG2 = 1,
@@ -79,15 +60,6 @@ enum mdp4_mixer {
MIXER2 = 2,
};
-enum mdp4_mixer_stage_id {
- STAGE_UNUSED = 0,
- STAGE_BASE = 1,
- STAGE0 = 2,
- STAGE1 = 3,
- STAGE2 = 4,
- STAGE3 = 5,
-};
-
enum mdp4_intf {
INTF_LCDC_DTV = 0,
INTF_DSI_VIDEO = 1,
@@ -194,56 +166,56 @@ static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
}
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
}
@@ -254,56 +226,56 @@ static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id va
#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
}
#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
{
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
}
@@ -369,7 +341,7 @@ static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x
static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
-static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
+static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val)
{
return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
}
@@ -377,7 +349,7 @@ static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
-static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val)
+static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val)
{
return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
}
@@ -472,19 +444,19 @@ static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __of
static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
-static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
}
#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
-static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
}
#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
-static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
}
@@ -710,25 +682,25 @@ static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
}
#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
}
#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
}
#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val)
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
{
return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 019d530187ff..84c5b13b33c9 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -39,6 +39,7 @@ struct mdp4_crtc {
spinlock_t lock;
bool stale;
uint32_t width, height;
+ uint32_t x, y;
/* next cursor to scan-out: */
uint32_t next_iova;
@@ -57,44 +58,100 @@ struct mdp4_crtc {
#define PENDING_FLIP 0x2
atomic_t pending;
- /* the fb that we currently hold a scanout ref to: */
+ /* the fb that we logically (from PoV of KMS API) hold a ref
+ * to. Which we may not yet be scanning out (we may still
+ * be scanning out previous in case of page_flip while waiting
+ * for gpu rendering to complete:
+ */
struct drm_framebuffer *fb;
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *scanout_fb;
+
/* for unref'ing framebuffers after scanout completes: */
struct drm_flip_work unref_fb_work;
/* for unref'ing cursor bo's after scanout completes: */
struct drm_flip_work unref_cursor_work;
- struct mdp4_irq vblank;
- struct mdp4_irq err;
+ struct mdp_irq vblank;
+ struct mdp_irq err;
};
#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
- return to_mdp4_kms(priv->kms);
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void update_fb(struct drm_crtc *crtc, bool async,
- struct drm_framebuffer *new_fb)
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_framebuffer *old_fb = mdp4_crtc->fb;
- if (old_fb)
- drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+ atomic_or(pending, &mdp4_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t i, flush = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+ struct drm_plane *plane = mdp4_crtc->planes[i];
+ if (plane) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
+ }
+ }
+ flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+ DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp4_crtc->fb;
/* grab reference to incoming scanout fb: */
drm_framebuffer_reference(new_fb);
mdp4_crtc->base.fb = new_fb;
mdp4_crtc->fb = new_fb;
- if (!async) {
- /* enable vblank to pick up the old_fb */
- mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
- }
+ if (old_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this. Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ /* flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+ crtc_flush(crtc);
+
+ if (mdp4_crtc->scanout_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
+ mdp4_crtc->scanout_fb);
+
+ mdp4_crtc->scanout_fb = fb;
+
+ /* enable vblank to complete flip: */
+ request_pending(crtc, PENDING_FLIP);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void crtc_flush(struct drm_crtc *crtc)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- uint32_t i, flush = 0;
-
- for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
- struct drm_plane *plane = mdp4_crtc->planes[i];
- if (plane) {
- enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
- flush |= pipe2flush(pipe_id);
- }
- }
- flush |= ovlp2flush(mdp4_crtc->ovlp);
-
- DBG("%s: flush=%08x", mdp4_crtc->name, flush);
-
- mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
-}
-
-static void request_pending(struct drm_crtc *crtc, uint32_t pending)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- atomic_or(pending, &mdp4_crtc->pending);
- mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
-}
-
static void pageflip_cb(struct msm_fence_cb *cb)
{
struct mdp4_crtc *mdp4_crtc =
@@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb)
if (!fb)
return;
+ drm_framebuffer_reference(fb);
mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
- crtc_flush(crtc);
-
- /* enable vblank to complete flip: */
- request_pending(crtc, PENDING_FLIP);
+ update_scanout(crtc, fb);
}
static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -210,9 +237,9 @@ static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
if (enabled != mdp4_crtc->enabled) {
if (enabled) {
mdp4_enable(mdp4_kms);
- mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
+ mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
} else {
- mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
+ mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms);
}
mdp4_crtc->enabled = enabled;
@@ -232,7 +259,7 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp4_kms *mdp4_kms = get_kms(crtc);
int i, ovlp = mdp4_crtc->ovlp;
uint32_t mixer_cfg = 0;
- static const enum mdp4_mixer_stage_id stages[] = {
+ static const enum mdp_mixer_stage_id stages[] = {
STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
};
/* statically (for now) map planes to mixer stage (z-order): */
@@ -262,8 +289,8 @@ static void blend_setup(struct drm_crtc *crtc)
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
int idx = idxs[pipe_id];
if (idx > 0) {
- const struct mdp4_format *format =
- to_mdp4_format(msm_framebuffer_format(plane->fb));
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(plane->fb));
alpha[idx-1] = format->alpha_enable;
}
mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
@@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
+
+ ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp4_crtc->name, ret);
+ return ret;
+ }
+
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
@@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
- update_fb(crtc, false, crtc->fb);
-
- ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
- mdp4_crtc->name, ret);
- return ret;
- }
-
if (dma == DMA_E) {
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
}
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
return 0;
}
@@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_plane *plane = mdp4_crtc->plane;
struct drm_display_mode *mode = &crtc->mode;
+ int ret;
- update_fb(crtc, false, crtc->fb);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
- return mdp4_plane_mode_set(plane, crtc, crtc->fb,
+ ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
+
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
+ return 0;
}
static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
@@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
mdp4_crtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
- update_fb(crtc, true, new_fb);
+ update_fb(crtc, new_fb);
return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
}
@@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc,
static void update_cursor(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
unsigned long flags;
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
if (mdp4_crtc->cursor.stale) {
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
uint32_t iova = mdp4_crtc->cursor.next_iova;
@@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc)
mdp4_crtc->cursor.scanout_bo = next_bo;
mdp4_crtc->cursor.stale = false;
}
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+ MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
+ MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
+
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
}
@@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
drm_gem_object_unreference_unlocked(old_bo);
}
+ crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR);
return 0;
@@ -542,12 +591,15 @@ fail:
static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- enum mdp4_dma dma = mdp4_crtc->dma;
+ unsigned long flags;
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
- MDP4_DMA_CURSOR_POS_X(x) |
- MDP4_DMA_CURSOR_POS_Y(y));
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ mdp4_crtc->cursor.x = x;
+ mdp4_crtc->cursor.y = y;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_CURSOR);
return 0;
}
@@ -571,14 +623,14 @@ static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
.load_lut = mdp4_crtc_load_lut,
};
-static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
struct drm_crtc *crtc = &mdp4_crtc->base;
struct msm_drm_private *priv = crtc->dev->dev_private;
unsigned pending;
- mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
+ mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
pending = atomic_xchg(&mdp4_crtc->pending, 0);
@@ -593,7 +645,7 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
}
}
-static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
struct drm_crtc *crtc = &mdp4_crtc->base;
@@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
crtc = &mdp4_crtc->base;
mdp4_crtc->plane = plane;
+ mdp4_crtc->id = id;
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 5e0dcae70ab5..067ed03b35fe 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -15,8 +15,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <mach/clk.h>
-
#include "mdp4_kms.h"
#include "drm_crtc.h"
@@ -37,7 +35,7 @@ struct mdp4_dtv_encoder {
static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
- return to_mdp4_kms(priv->kms);
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
}
#ifdef CONFIG_MSM_BUS_SCALING
@@ -139,7 +137,7 @@ static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
- mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
new file mode 100644
index 000000000000..c740ccd1cc67
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+ mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
+}
+
+static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp4_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp4_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+ struct mdp_irq *error_handler = &mdp4_kms->error_handler;
+
+ error_handler->irq = mdp4_irq_error_handler;
+ error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
+ MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+
+ mdp_irq_register(mdp_kms, error_handler);
+
+ return 0;
+}
+
+void mdp4_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+}
+
+irqreturn_t mdp4_irq(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned int id;
+ uint32_t status;
+
+ status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (status & mdp4_crtc_vblank(priv->crtcs[id]))
+ drm_handle_vblank(dev, id);
+
+ mdp_dispatch_irqs(mdp_kms, status);
+
+ return IRQ_HANDLED;
+}
+
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp4_crtc_vblank(crtc), true);
+ return 0;
+}
+
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp4_crtc_vblank(crtc), false);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 8972ac35a43d..272e707c9487 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -17,13 +17,14 @@
#include "msm_drv.h"
+#include "msm_mmu.h"
#include "mdp4_kms.h"
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
static int mdp4_hw_init(struct msm_kms *kms)
{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp4_kms->dev;
uint32_t version, major, minor, dmap_cfg, vg_cfg;
unsigned long clk;
@@ -31,12 +32,14 @@ static int mdp4_hw_init(struct msm_kms *kms)
pm_runtime_get_sync(dev->dev);
+ mdp4_enable(mdp4_kms);
version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+ mdp4_disable(mdp4_kms);
major = FIELD(version, MDP4_VERSION_MAJOR);
minor = FIELD(version, MDP4_VERSION_MINOR);
- DBG("found MDP version v%d.%d", major, minor);
+ DBG("found MDP4 version v%d.%d", major, minor);
if (major != 4) {
dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
@@ -130,7 +133,7 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
unsigned i;
@@ -140,11 +143,12 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp4_destroy(struct msm_kms *kms)
{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
kfree(mdp4_kms);
}
-static const struct msm_kms_funcs kms_funcs = {
+static const struct mdp_kms_funcs kms_funcs = {
+ .base = {
.hw_init = mdp4_hw_init,
.irq_preinstall = mdp4_irq_preinstall,
.irq_postinstall = mdp4_irq_postinstall,
@@ -152,10 +156,12 @@ static const struct msm_kms_funcs kms_funcs = {
.irq = mdp4_irq,
.enable_vblank = mdp4_enable_vblank,
.disable_vblank = mdp4_disable_vblank,
- .get_format = mdp4_get_format,
+ .get_format = mdp_get_format,
.round_pixclk = mdp4_round_pixclk,
.preclose = mdp4_preclose,
.destroy = mdp4_destroy,
+ },
+ .set_irqmask = mdp4_set_irqmask,
};
int mdp4_disable(struct mdp4_kms *mdp4_kms)
@@ -189,6 +195,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
+ struct hdmi *hdmi;
int ret;
/*
@@ -238,9 +245,10 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */
priv->encoders[priv->num_encoders++] = encoder;
- ret = hdmi_init(dev, encoder);
- if (ret) {
- dev_err(dev->dev, "failed to initialize HDMI\n");
+ hdmi = hdmi_init(dev, encoder);
+ if (IS_ERR(hdmi)) {
+ ret = PTR_ERR(hdmi);
+ dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
goto fail;
}
@@ -260,6 +268,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
+ struct msm_mmu *mmu;
int ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
@@ -269,8 +278,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- kms = &mdp4_kms->base;
- kms->funcs = &kms_funcs;
+ mdp_kms_init(&mdp4_kms->base, &kms_funcs);
+
+ kms = &mdp4_kms->base.base;
mdp4_kms->dev = dev;
@@ -322,27 +332,34 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
clk_set_rate(mdp4_kms->clk, config->max_clk);
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
- if (!config->iommu) {
- dev_err(dev->dev, "no iommu\n");
- ret = -ENXIO;
- goto fail;
- }
-
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
+ mdp4_enable(mdp4_kms);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+ mdp4_disable(mdp4_kms);
mdelay(16);
- ret = msm_iommu_attach(dev, config->iommu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
- if (ret)
- goto fail;
+ if (config->iommu) {
+ mmu = msm_iommu_new(dev, config->iommu);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ goto fail;
+ }
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret)
+ goto fail;
+ } else {
+ dev_info(dev->dev, "no iommu, fallback to phys "
+ "contig buffers for scanout\n");
+ mmu = NULL;
+ }
- mdp4_kms->id = msm_register_iommu(dev, config->iommu);
+ mdp4_kms->id = msm_register_mmu(dev, mmu);
if (mdp4_kms->id < 0) {
ret = mdp4_kms->id;
dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index eb015c834087..66a4d31aec80 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -18,29 +18,13 @@
#ifndef __MDP4_KMS_H__
#define __MDP4_KMS_H__
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-
#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp/mdp_kms.h"
#include "mdp4.xml.h"
-
-/* For transiently registering for different MDP4 irqs that various parts
- * of the KMS code need during setup/configuration. We these are not
- * necessarily the same as what drm_vblank_get/put() are requesting, and
- * the hysteresis in drm_vblank_put() is not necessarily desirable for
- * internal housekeeping related irq usage.
- */
-struct mdp4_irq {
- struct list_head node;
- uint32_t irqmask;
- bool registered;
- void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
-};
-
struct mdp4_kms {
- struct msm_kms base;
+ struct mdp_kms base;
struct drm_device *dev;
@@ -59,11 +43,7 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
- /* irq handling: */
- bool in_irq;
- struct list_head irq_list; /* list of mdp4_irq */
- uint32_t vblank_mask; /* irq bits set for userspace vblank */
- struct mdp4_irq error_handler;
+ struct mdp_irq error_handler;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
@@ -73,16 +53,6 @@ struct mdp4_platform_config {
uint32_t max_clk;
};
-struct mdp4_format {
- struct msm_format base;
- enum mdp4_bpc bpc_r, bpc_g, bpc_b;
- enum mdp4_bpc_alpha bpc_a;
- uint8_t unpack[4];
- bool alpha_enable, unpack_tight;
- uint8_t cpp, unpack_count;
-};
-#define to_mdp4_format(x) container_of(x, struct mdp4_format, base)
-
static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
{
msm_writel(data, mdp4_kms->mmio + reg);
@@ -134,7 +104,7 @@ static inline uint32_t dma2err(enum mdp4_dma dma)
}
static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
- enum mdp4_mixer_stage_id stage)
+ enum mdp_mixer_stage_id stage)
{
uint32_t mixer_cfg = 0;
@@ -178,19 +148,23 @@ static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
int mdp4_disable(struct mdp4_kms *mdp4_kms);
int mdp4_enable(struct mdp4_kms *mdp4_kms);
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
void mdp4_irq_preinstall(struct msm_kms *kms);
int mdp4_irq_postinstall(struct msm_kms *kms);
void mdp4_irq_uninstall(struct msm_kms *kms);
irqreturn_t mdp4_irq(struct msm_kms *kms);
-void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
-void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
-void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats,
- uint32_t max_formats);
-const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
+static inline
+uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
+ uint32_t max_formats)
+{
+ /* TODO when we have YUV, we need to filter supported formats
+ * based on pipe_id..
+ */
+ return mdp_get_formats(pixel_formats, max_formats);
+}
void mdp4_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0f0af243f6fc..1e893dd13859 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -34,7 +34,7 @@ struct mdp4_plane {
static struct mdp4_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
- return to_mdp4_kms(priv->kms);
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
}
static int mdp4_plane_update(struct drm_plane *plane,
@@ -132,7 +132,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
enum mdp4_pipe pipe = mdp4_plane->pipe;
- const struct mdp4_format *format;
+ const struct mdp_format *format;
uint32_t op_mode = 0;
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
@@ -170,12 +170,12 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
- MDP4_PIPE_SRC_XY_X(crtc_x) |
- MDP4_PIPE_SRC_XY_Y(crtc_y));
+ MDP4_PIPE_DST_XY_X(crtc_x) |
+ MDP4_PIPE_DST_XY_Y(crtc_y));
mdp4_plane_set_scanout(plane, fb);
- format = to_mdp4_format(msm_framebuffer_format(fb));
+ format = to_mdp_format(msm_framebuffer_format(fb));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
new file mode 100644
index 000000000000..0aa51517f826
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -0,0 +1,1036 @@
+#ifndef MDP5_XML
+#define MDP5_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp5_intf {
+ INTF_DSI = 1,
+ INTF_HDMI = 3,
+ INTF_LCDC = 5,
+ INTF_eDP = 9,
+};
+
+enum mdp5_intfnum {
+ NO_INTF = 0,
+ INTF0 = 1,
+ INTF1 = 2,
+ INTF2 = 3,
+ INTF3 = 4,
+};
+
+enum mdp5_pipe {
+ SSPP_VIG0 = 0,
+ SSPP_VIG1 = 1,
+ SSPP_VIG2 = 2,
+ SSPP_RGB0 = 3,
+ SSPP_RGB1 = 4,
+ SSPP_RGB2 = 5,
+ SSPP_DMA0 = 6,
+ SSPP_DMA1 = 7,
+};
+
+enum mdp5_ctl_mode {
+ MODE_NONE = 0,
+ MODE_ROT0 = 1,
+ MODE_ROT1 = 2,
+ MODE_WB0 = 3,
+ MODE_WB1 = 4,
+ MODE_WFD = 5,
+};
+
+enum mdp5_pack_3d {
+ PACK_3D_FRAME_INT = 0,
+ PACK_3D_H_ROW_INT = 1,
+ PACK_3D_V_ROW_INT = 2,
+ PACK_3D_COL_INT = 3,
+};
+
+enum mdp5_chroma_samp_type {
+ CHROMA_RGB = 0,
+ CHROMA_H2V1 = 1,
+ CHROMA_H1V2 = 2,
+ CHROMA_420 = 3,
+};
+
+enum mdp5_scale_filter {
+ SCALE_FILTER_NEAREST = 0,
+ SCALE_FILTER_BIL = 1,
+ SCALE_FILTER_PCMN = 2,
+ SCALE_FILTER_CA = 3,
+};
+
+enum mdp5_pipe_bwc {
+ BWC_LOSSLESS = 0,
+ BWC_Q_HIGH = 1,
+ BWC_Q_MED = 2,
+};
+
+enum mdp5_client_id {
+ CID_UNUSED = 0,
+ CID_VIG0_Y = 1,
+ CID_VIG0_CR = 2,
+ CID_VIG0_CB = 3,
+ CID_VIG1_Y = 4,
+ CID_VIG1_CR = 5,
+ CID_VIG1_CB = 6,
+ CID_VIG2_Y = 7,
+ CID_VIG2_CR = 8,
+ CID_VIG2_CB = 9,
+ CID_DMA0_Y = 10,
+ CID_DMA0_CR = 11,
+ CID_DMA0_CB = 12,
+ CID_DMA1_Y = 13,
+ CID_DMA1_CR = 14,
+ CID_DMA1_CB = 15,
+ CID_RGB0 = 16,
+ CID_RGB1 = 17,
+ CID_RGB2 = 18,
+ CID_MAX = 19,
+};
+
+enum mdp5_igc_type {
+ IGC_VIG = 0,
+ IGC_RGB = 1,
+ IGC_DMA = 2,
+ IGC_DSPP = 3,
+};
+
+#define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001
+#define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002
+#define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004
+#define MDP5_IRQ_INTF3_WB_ROT_COMP 0x00000008
+#define MDP5_IRQ_INTF0_WB_WFD 0x00000010
+#define MDP5_IRQ_INTF1_WB_WFD 0x00000020
+#define MDP5_IRQ_INTF2_WB_WFD 0x00000040
+#define MDP5_IRQ_INTF3_WB_WFD 0x00000080
+#define MDP5_IRQ_INTF0_PING_PONG_COMP 0x00000100
+#define MDP5_IRQ_INTF1_PING_PONG_COMP 0x00000200
+#define MDP5_IRQ_INTF2_PING_PONG_COMP 0x00000400
+#define MDP5_IRQ_INTF3_PING_PONG_COMP 0x00000800
+#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR 0x00001000
+#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR 0x00002000
+#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR 0x00004000
+#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR 0x00008000
+#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR 0x00010000
+#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR 0x00020000
+#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR 0x00040000
+#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR 0x00080000
+#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF 0x00100000
+#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF 0x00200000
+#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF 0x00400000
+#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF 0x00800000
+#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000
+#define MDP5_IRQ_INTF0_VSYNC 0x02000000
+#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000
+#define MDP5_IRQ_INTF1_VSYNC 0x08000000
+#define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000
+#define MDP5_IRQ_INTF2_VSYNC 0x20000000
+#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000
+#define MDP5_IRQ_INTF3_VSYNC 0x80000000
+#define REG_MDP5_HW_VERSION 0x00000000
+
+#define REG_MDP5_HW_INTR_STATUS 0x00000010
+#define MDP5_HW_INTR_STATUS_INTR_MDP 0x00000001
+#define MDP5_HW_INTR_STATUS_INTR_DSI0 0x00000010
+#define MDP5_HW_INTR_STATUS_INTR_DSI1 0x00000020
+#define MDP5_HW_INTR_STATUS_INTR_HDMI 0x00000100
+#define MDP5_HW_INTR_STATUS_INTR_EDP 0x00001000
+
+#define REG_MDP5_MDP_VERSION 0x00000100
+#define MDP5_MDP_VERSION_MINOR__MASK 0x00ff0000
+#define MDP5_MDP_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK;
+}
+#define MDP5_MDP_VERSION_MAJOR__MASK 0xf0000000
+#define MDP5_MDP_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP5_DISP_INTF_SEL 0x00000104
+#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
+#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
+#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
+#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
+#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
+}
+
+#define REG_MDP5_INTR_EN 0x00000110
+
+#define REG_MDP5_INTR_STATUS 0x00000114
+
+#define REG_MDP5_INTR_CLEAR 0x00000118
+
+#define REG_MDP5_HIST_INTR_EN 0x0000011c
+
+#define REG_MDP5_HIST_INTR_STATUS 0x00000120
+
+#define REG_MDP5_HIST_INTR_CLEAR 0x00000124
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
+{
+ switch (idx) {
+ case IGC_VIG: return 0x00000300;
+ case IGC_RGB: return 0x00000310;
+ case IGC_DMA: return 0x00000320;
+ case IGC_DSPP: return 0x00000400;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
+
+static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
+#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
+static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
+{
+ return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
+}
+#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+
+static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
+#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038
+#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0
+#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00
+#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000
+#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000
+#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000
+#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000
+#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000
+#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
+
+static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; }
+#define MDP5_CTL_OP_MODE__MASK 0x0000000f
+#define MDP5_CTL_OP_MODE__SHIFT 0
+static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
+{
+ return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK;
+}
+#define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070
+#define MDP5_CTL_OP_INTF_NUM__SHIFT 4
+static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val)
+{
+ return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK;
+}
+#define MDP5_CTL_OP_CMD_MODE 0x00020000
+#define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000
+#define MDP5_CTL_OP_PACK_3D__MASK 0x00300000
+#define MDP5_CTL_OP_PACK_3D__SHIFT 20
+static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
+{
+ return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
+}
+
+static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; }
+#define MDP5_CTL_FLUSH_VIG0 0x00000001
+#define MDP5_CTL_FLUSH_VIG1 0x00000002
+#define MDP5_CTL_FLUSH_VIG2 0x00000004
+#define MDP5_CTL_FLUSH_RGB0 0x00000008
+#define MDP5_CTL_FLUSH_RGB1 0x00000010
+#define MDP5_CTL_FLUSH_RGB2 0x00000020
+#define MDP5_CTL_FLUSH_LM0 0x00000040
+#define MDP5_CTL_FLUSH_LM1 0x00000080
+#define MDP5_CTL_FLUSH_LM2 0x00000100
+#define MDP5_CTL_FLUSH_DMA0 0x00000800
+#define MDP5_CTL_FLUSH_DMA1 0x00001000
+#define MDP5_CTL_FLUSH_DSPP0 0x00002000
+#define MDP5_CTL_FLUSH_DSPP1 0x00004000
+#define MDP5_CTL_FLUSH_DSPP2 0x00008000
+#define MDP5_CTL_FLUSH_CTL 0x00020000
+
+static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; }
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; }
+#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000
+#define MDP5_PIPE_SRC_XY_Y__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_XY_X__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; }
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; }
+#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000
+#define MDP5_PIPE_OUT_XY_Y__SHIFT 16
+static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK;
+}
+#define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff
+#define MDP5_PIPE_OUT_XY_X__SHIFT 0
+static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; }
+#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; }
+#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; }
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
+#define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
+#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
+#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00780000
+#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_type val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; }
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; }
+#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001
+#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006
+#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1
+static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
+{
+ return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK;
+}
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
+
+static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; }
+#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff
+#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0
+static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK;
+}
+#define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00
+#define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8
+static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; }
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT 8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK 0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT 10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK 0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT 12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK 0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT 14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK 0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT 16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK 0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT 18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
+
+static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; }
+#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004
+#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010
+#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400
+#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
+#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1
+static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
+{
+ return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK;
+}
+#define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010
+#define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100
+#define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000
+#define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000
+#define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000
+#define MDP5_DSPP_OP_MODE_PA_EN 0x00100000
+#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000
+#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000
+
+static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; }
+#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff
+#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK;
+}
+#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000
+#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK;
+}
+#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; }
+#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff
+#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK;
+}
+#define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000
+#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000
+
+static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; }
+#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001
+#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002
+#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004
+
+static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; }
+
+
+#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
new file mode 100644
index 000000000000..f2794021f086
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_flip_work.h"
+
+struct mdp5_crtc {
+ struct drm_crtc base;
+ char name[8];
+ struct drm_plane *plane;
+ struct drm_plane *planes[8];
+ int id;
+ bool enabled;
+
+ /* which mixer/encoder we route output to: */
+ int mixer;
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+ struct msm_fence_cb pageflip_cb;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP 0x2
+ atomic_t pending;
+
+ /* the fb that we logically (from PoV of KMS API) hold a ref
+ * to. Which we may not yet be scanning out (we may still
+ * be scanning out previous in case of page_flip while waiting
+ * for gpu rendering to complete:
+ */
+ struct drm_framebuffer *fb;
+
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *scanout_fb;
+
+ /* for unref'ing framebuffers after scanout completes: */
+ struct drm_flip_work unref_fb_work;
+
+ struct mdp_irq vblank;
+ struct mdp_irq err;
+};
+#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
+
+static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ atomic_or(pending, &mdp5_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ int id = mdp5_crtc->id;
+ uint32_t i, flush = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
+ struct drm_plane *plane = mdp5_crtc->planes[i];
+ if (plane) {
+ enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
+ flush |= pipe2flush(pipe);
+ }
+ }
+ flush |= mixer2flush(mdp5_crtc->id);
+ flush |= MDP5_CTL_FLUSH_CTL;
+
+ DBG("%s: flush=%08x", mdp5_crtc->name, flush);
+
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp5_crtc->fb;
+
+ /* grab reference to incoming scanout fb: */
+ drm_framebuffer_reference(new_fb);
+ mdp5_crtc->base.fb = new_fb;
+ mdp5_crtc->fb = new_fb;
+
+ if (old_fb)
+ drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this. Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ /* flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+ crtc_flush(crtc);
+
+ if (mdp5_crtc->scanout_fb)
+ drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
+ mdp5_crtc->scanout_fb);
+
+ mdp5_crtc->scanout_fb = fb;
+
+ /* enable vblank to complete flip: */
+ request_pending(crtc, PENDING_FLIP);
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags, i;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = mdp5_crtc->event;
+ if (event) {
+ /* if regular vblank case (!file) or if cancel-flip from
+ * preclose on file that requested flip, then send the
+ * event:
+ */
+ if (!file || (event->base.file_priv == file)) {
+ mdp5_crtc->event = NULL;
+ drm_send_vblank_event(dev, mdp5_crtc->id, event);
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
+ struct drm_plane *plane = mdp5_crtc->planes[i];
+ if (plane)
+ mdp5_plane_complete_flip(plane);
+ }
+}
+
+static void pageflip_cb(struct msm_fence_cb *cb)
+{
+ struct mdp5_crtc *mdp5_crtc =
+ container_of(cb, struct mdp5_crtc, pageflip_cb);
+ struct drm_crtc *crtc = &mdp5_crtc->base;
+ struct drm_framebuffer *fb = mdp5_crtc->fb;
+
+ if (!fb)
+ return;
+
+ drm_framebuffer_reference(fb);
+ mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
+ update_scanout(crtc, fb);
+}
+
+static void unref_fb_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp5_crtc *mdp5_crtc =
+ container_of(work, struct mdp5_crtc, unref_fb_work);
+ struct drm_device *dev = mdp5_crtc->base.dev;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(val);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void mdp5_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ mdp5_crtc->plane->funcs->destroy(mdp5_crtc->plane);
+
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
+
+ kfree(mdp5_crtc);
+}
+
+static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("%s: mode=%d", mdp5_crtc->name, mode);
+
+ if (enabled != mdp5_crtc->enabled) {
+ if (enabled) {
+ mdp5_enable(mdp5_kms);
+ mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
+ } else {
+ mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
+ mdp5_disable(mdp5_kms);
+ }
+ mdp5_crtc->enabled = enabled;
+ }
+}
+
+static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ int id = mdp5_crtc->id;
+
+ /*
+ * Hard-coded setup for now until I figure out how the
+ * layer-mixer works
+ */
+
+ /* LM[id]: */
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
+ MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
+ MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
+ MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
+
+ /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
+ * we want to be setting CTL[m].LAYER[n]. Not sure what the
+ * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
+ * used when chaining up mixers for high resolution displays?
+ */
+
+ /* CTL[id]: */
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
+ MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
+ MDP5_CTL_LAYER_REG_BORDER_COLOR);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
+}
+
+static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ int ret;
+
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mdp5_crtc->name, mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
+
+ ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp5_crtc->name, ret);
+ return ret;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
+ MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
+ MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
+ return 0;
+}
+
+static void mdp5_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ DBG("%s", mdp5_crtc->name);
+ /* make sure we hold a ref to mdp clks while setting up mode: */
+ mdp5_enable(get_kms(crtc));
+ mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp5_crtc_commit(struct drm_crtc *crtc)
+{
+ mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ crtc_flush(crtc);
+ /* drop the ref to mdp clk's that we got in prepare: */
+ mdp5_disable(get_kms(crtc));
+}
+
+static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_plane *plane = mdp5_crtc->plane;
+ struct drm_display_mode *mode = &crtc->mode;
+ int ret;
+
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
+
+ ret = mdp5_plane_mode_set(plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
+
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
+ return 0;
+}
+
+static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *new_fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *obj;
+ unsigned long flags;
+
+ if (mdp5_crtc->event) {
+ dev_err(dev->dev, "already pending flip!\n");
+ return -EBUSY;
+ }
+
+ obj = msm_framebuffer_bo(new_fb, 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ mdp5_crtc->event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ update_fb(crtc, new_fb);
+
+ return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
+}
+
+static int mdp5_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+static const struct drm_crtc_funcs mdp5_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = mdp5_crtc_destroy,
+ .page_flip = mdp5_crtc_page_flip,
+ .set_property = mdp5_crtc_set_property,
+};
+
+static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
+ .dpms = mdp5_crtc_dpms,
+ .mode_fixup = mdp5_crtc_mode_fixup,
+ .mode_set = mdp5_crtc_mode_set,
+ .prepare = mdp5_crtc_prepare,
+ .commit = mdp5_crtc_commit,
+ .mode_set_base = mdp5_crtc_mode_set_base,
+ .load_lut = mdp5_crtc_load_lut,
+};
+
+static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
+ struct drm_crtc *crtc = &mdp5_crtc->base;
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ unsigned pending;
+
+ mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+
+ pending = atomic_xchg(&mdp5_crtc->pending, 0);
+
+ if (pending & PENDING_FLIP) {
+ complete_flip(crtc, NULL);
+ drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
+ }
+}
+
+static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
+ struct drm_crtc *crtc = &mdp5_crtc->base;
+ DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
+ crtc_flush(crtc);
+}
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ return mdp5_crtc->vblank.irqmask;
+}
+
+void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ DBG("cancel: %p", file);
+ complete_flip(crtc, file);
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
+ enum mdp5_intf intf_id)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ static const enum mdp5_intfnum intfnum[] = {
+ INTF0, INTF1, INTF2, INTF3,
+ };
+ uint32_t intf_sel;
+
+ /* now that we know what irq's we want: */
+ mdp5_crtc->err.irqmask = intf2err(intf);
+ mdp5_crtc->vblank.irqmask = intf2vblank(intf);
+
+ /* when called from modeset_init(), skip the rest until later: */
+ if (!mdp5_kms)
+ return;
+
+ intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
+
+ switch (intf) {
+ case 0:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
+ break;
+ case 1:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
+ break;
+ case 2:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
+ break;
+ case 3:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ blend_setup(crtc);
+
+ DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
+
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
+ MDP5_CTL_OP_MODE(MODE_NONE) |
+ MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
+
+ crtc_flush(crtc);
+}
+
+static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
+ struct drm_plane *plane)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
+
+ if (mdp5_crtc->planes[pipe_id] == plane)
+ return;
+
+ mdp5_crtc->planes[pipe_id] = plane;
+ blend_setup(crtc);
+ if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
+ crtc_flush(crtc);
+}
+
+void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+ set_attach(crtc, mdp5_plane_pipe(plane), plane);
+}
+
+void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+ set_attach(crtc, mdp5_plane_pipe(plane), NULL);
+}
+
+/* initialize crtc */
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct mdp5_crtc *mdp5_crtc;
+ int ret;
+
+ mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
+ if (!mdp5_crtc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ crtc = &mdp5_crtc->base;
+
+ mdp5_crtc->plane = plane;
+ mdp5_crtc->id = id;
+
+ mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
+ mdp5_crtc->err.irq = mdp5_crtc_err_irq;
+
+ snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
+ pipe2name(mdp5_plane_pipe(plane)), id);
+
+ ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
+ "unref fb", unref_fb_worker);
+ if (ret)
+ goto fail;
+
+ INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
+
+ drm_crtc_init(dev, crtc, &mdp5_crtc_funcs);
+ drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
+
+ mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
+
+ return crtc;
+
+fail:
+ if (crtc)
+ mdp5_crtc_destroy(crtc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
new file mode 100644
index 000000000000..edec7bfaa952
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct mdp5_encoder {
+ struct drm_encoder base;
+ int intf;
+ enum mdp5_intf intf_id;
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_MDP_PORT0, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+ MDP_BUS_VECTOR_ENTRY(0, 0),
+ MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
+};
+static struct msm_bus_paths mdp_bus_usecases[] = { {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[0],
+}, {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[1],
+} };
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+ .usecase = mdp_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+ .name = "mdss_mdp",
+};
+
+static void bs_init(struct mdp5_encoder *mdp5_encoder)
+{
+ mdp5_encoder->bsc = msm_bus_scale_register_client(
+ &mdp_bus_scale_table);
+ DBG("bus scale client: %08x", mdp5_encoder->bsc);
+}
+
+static void bs_fini(struct mdp5_encoder *mdp5_encoder)
+{
+ if (mdp5_encoder->bsc) {
+ msm_bus_scale_unregister_client(mdp5_encoder->bsc);
+ mdp5_encoder->bsc = 0;
+ }
+}
+
+static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx)
+{
+ if (mdp5_encoder->bsc) {
+ DBG("set bus scaling: %d", idx);
+ /* HACK: scaling down, and then immediately back up
+ * seems to leave things broken (underflow).. so
+ * never disable:
+ */
+ idx = 1;
+ msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct mdp5_encoder *mdp5_encoder) {}
+static void bs_fini(struct mdp5_encoder *mdp5_encoder) {}
+static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {}
+#endif
+
+static void mdp5_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ bs_fini(mdp5_encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp5_encoder);
+}
+
+static const struct drm_encoder_funcs mdp5_encoder_funcs = {
+ .destroy = mdp5_encoder_destroy,
+};
+
+static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf;
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("mode=%d", mode);
+
+ if (enabled == mdp5_encoder->enabled)
+ return;
+
+ if (enabled) {
+ bs_set(mdp5_encoder, 1);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+ } else {
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+ bs_set(mdp5_encoder, 0);
+ }
+
+ mdp5_encoder->enabled = enabled;
+}
+
+static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf;
+ uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+ uint32_t format;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dtv_hsync_skew = 0; /* get this from panel? */
+ format = 0x213f; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
+ MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf),
+ MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) |
+ MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf),
+ MDP5_INTF_ACTIVE_HCTL_START(0) |
+ MDP5_INTF_ACTIVE_HCTL_END(0));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
+}
+
+static void mdp5_encoder_prepare(struct drm_encoder *encoder)
+{
+ mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp5_encoder_commit(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
+ mdp5_encoder->intf_id);
+ mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
+ .dpms = mdp5_encoder_dpms,
+ .mode_fixup = mdp5_encoder_mode_fixup,
+ .mode_set = mdp5_encoder_mode_set,
+ .prepare = mdp5_encoder_prepare,
+ .commit = mdp5_encoder_commit,
+};
+
+/* initialize encoder */
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
+ enum mdp5_intf intf_id)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp5_encoder *mdp5_encoder;
+ int ret;
+
+ mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
+ if (!mdp5_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdp5_encoder->intf = intf;
+ mdp5_encoder->intf_id = intf_id;
+ encoder = &mdp5_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
+
+ bs_init(mdp5_encoder);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp5_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
new file mode 100644
index 000000000000..353d494a497f
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp5_kms.h"
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
+}
+
+static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp5_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp5_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct mdp_irq *error_handler = &mdp5_kms->error_handler;
+
+ error_handler->irq = mdp5_irq_error_handler;
+ error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
+ MDP5_IRQ_INTF1_UNDER_RUN |
+ MDP5_IRQ_INTF2_UNDER_RUN |
+ MDP5_IRQ_INTF3_UNDER_RUN;
+
+ mdp_irq_register(mdp_kms, error_handler);
+
+ return 0;
+}
+
+void mdp5_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+}
+
+static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned int id;
+ uint32_t status;
+
+ status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (status & mdp5_crtc_vblank(priv->crtcs[id]))
+ drm_handle_vblank(dev, id);
+
+ mdp_dispatch_irqs(mdp_kms, status);
+}
+
+irqreturn_t mdp5_irq(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ uint32_t intr;
+
+ intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
+
+ VERB("intr=%08x", intr);
+
+ if (intr & MDP5_HW_INTR_STATUS_INTR_MDP)
+ mdp5_irq_mdp(mdp_kms);
+
+ if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
+ hdmi_irq(0, mdp5_kms->hdmi);
+
+ return IRQ_HANDLED;
+}
+
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp5_crtc_vblank(crtc), true);
+ return 0;
+}
+
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp5_crtc_vblank(crtc), false);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
new file mode 100644
index 000000000000..ee8446c1b5f6
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "mdp5_kms.h"
+
+static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
+
+static int mdp5_hw_init(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct drm_device *dev = mdp5_kms->dev;
+ uint32_t version, major, minor;
+ int ret = 0;
+
+ pm_runtime_get_sync(dev->dev);
+
+ mdp5_enable(mdp5_kms);
+ version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
+ mdp5_disable(mdp5_kms);
+
+ major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
+ minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
+
+ DBG("found MDP5 version v%d.%d", major, minor);
+
+ if ((major != 1) || ((minor != 0) && (minor != 2))) {
+ dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ mdp5_kms->rev = minor;
+
+ /* Magic unknown register writes:
+ *
+ * W VBIF:0x004 00000001 (mdss_mdp.c:839)
+ * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
+ * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
+ * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
+ * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
+ * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
+ * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
+ * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
+ * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
+ *
+ * Downstream fbdev driver gets these register offsets/values
+ * from DT.. not really sure what these registers are or if
+ * different values for different boards/SoC's, etc. I guess
+ * they are the golden registers.
+ *
+ * Not setting these does not seem to cause any problem. But
+ * we may be getting lucky with the bootloader initializing
+ * them for us. OTOH, if we can always count on the bootloader
+ * setting the golden registers, then perhaps we don't need to
+ * care.
+ */
+
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0);
+
+out:
+ pm_runtime_put_sync(dev->dev);
+
+ return ret;
+}
+
+static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
+ unsigned i;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
+static void mdp5_destroy(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ kfree(mdp5_kms);
+}
+
+static const struct mdp_kms_funcs kms_funcs = {
+ .base = {
+ .hw_init = mdp5_hw_init,
+ .irq_preinstall = mdp5_irq_preinstall,
+ .irq_postinstall = mdp5_irq_postinstall,
+ .irq_uninstall = mdp5_irq_uninstall,
+ .irq = mdp5_irq,
+ .enable_vblank = mdp5_enable_vblank,
+ .disable_vblank = mdp5_disable_vblank,
+ .get_format = mdp_get_format,
+ .round_pixclk = mdp5_round_pixclk,
+ .preclose = mdp5_preclose,
+ .destroy = mdp5_destroy,
+ },
+ .set_irqmask = mdp5_set_irqmask,
+};
+
+int mdp5_disable(struct mdp5_kms *mdp5_kms)
+{
+ DBG("");
+
+ clk_disable_unprepare(mdp5_kms->ahb_clk);
+ clk_disable_unprepare(mdp5_kms->axi_clk);
+ clk_disable_unprepare(mdp5_kms->core_clk);
+ clk_disable_unprepare(mdp5_kms->lut_clk);
+
+ return 0;
+}
+
+int mdp5_enable(struct mdp5_kms *mdp5_kms)
+{
+ DBG("");
+
+ clk_prepare_enable(mdp5_kms->ahb_clk);
+ clk_prepare_enable(mdp5_kms->axi_clk);
+ clk_prepare_enable(mdp5_kms->core_clk);
+ clk_prepare_enable(mdp5_kms->lut_clk);
+
+ return 0;
+}
+
+static int modeset_init(struct mdp5_kms *mdp5_kms)
+{
+ static const enum mdp5_pipe crtcs[] = {
+ SSPP_RGB0, SSPP_RGB1, SSPP_RGB2,
+ };
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ int i, ret;
+
+ /* construct CRTCs: */
+ for (i = 0; i < ARRAY_SIZE(crtcs); i++) {
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ plane = mdp5_plane_init(dev, crtcs[i], true);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
+ pipe2name(crtcs[i]), ret);
+ goto fail;
+ }
+
+ crtc = mdp5_crtc_init(dev, plane, i);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
+ pipe2name(crtcs[i]), ret);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /* Construct encoder for HDMI: */
+ encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct encoder\n");
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
+
+ /* NOTE: the vsync and error irq's are actually associated with
+ * the INTF/encoder.. the easiest way to deal with this (ie. what
+ * we do now) is assume a fixed relationship between crtc's and
+ * encoders. I'm not sure if there is ever a need to more freely
+ * assign crtcs to encoders, but if there is then we need to take
+ * care of error and vblank irq's that the crtc has registered,
+ * and also update user-requested vblank_mask.
+ */
+ encoder->possible_crtcs = BIT(0);
+ mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ /* Construct bridge/connector for HDMI: */
+ mdp5_kms->hdmi = hdmi_init(dev, encoder);
+ if (IS_ERR(mdp5_kms->hdmi)) {
+ ret = PTR_ERR(mdp5_kms->hdmi);
+ dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static const char *iommu_ports[] = {
+ "mdp_0",
+};
+
+static int get_clk(struct platform_device *pdev, struct clk **clkp,
+ const char *name)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ *clkp = clk;
+ return 0;
+}
+
+struct msm_kms *mdp5_kms_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct mdp5_platform_config *config = mdp5_get_config(pdev);
+ struct mdp5_kms *mdp5_kms;
+ struct msm_kms *kms = NULL;
+ struct msm_mmu *mmu;
+ int ret;
+
+ mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
+ if (!mdp5_kms) {
+ dev_err(dev->dev, "failed to allocate kms\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+
+ kms = &mdp5_kms->base.base;
+
+ mdp5_kms->dev = dev;
+ mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
+
+ mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
+ if (IS_ERR(mdp5_kms->mmio)) {
+ ret = PTR_ERR(mdp5_kms->mmio);
+ goto fail;
+ }
+
+ mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(mdp5_kms->vbif)) {
+ ret = PTR_ERR(mdp5_kms->vbif);
+ goto fail;
+ }
+
+ mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(mdp5_kms->vdd)) {
+ ret = PTR_ERR(mdp5_kms->vdd);
+ goto fail;
+ }
+
+ ret = regulator_enable(mdp5_kms->vdd);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ goto fail;
+ }
+
+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk") ||
+ get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk") ||
+ get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src") ||
+ get_clk(pdev, &mdp5_kms->core_clk, "core_clk") ||
+ get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk") ||
+ get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
+ if (ret)
+ goto fail;
+
+ ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
+
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ mdp5_enable(mdp5_kms);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0);
+ mdp5_disable(mdp5_kms);
+ mdelay(16);
+
+ if (config->iommu) {
+ mmu = msm_iommu_new(dev, config->iommu);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ goto fail;
+ }
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret)
+ goto fail;
+ } else {
+ dev_info(dev->dev, "no iommu, fallback to phys "
+ "contig buffers for scanout\n");
+ mmu = NULL;
+ }
+
+ mdp5_kms->id = msm_register_mmu(dev, mmu);
+ if (mdp5_kms->id < 0) {
+ ret = mdp5_kms->id;
+ dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
+ goto fail;
+ }
+
+ ret = modeset_init(mdp5_kms);
+ if (ret) {
+ dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+ goto fail;
+ }
+
+ return kms;
+
+fail:
+ if (kms)
+ mdp5_destroy(kms);
+ return ERR_PTR(ret);
+}
+
+static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
+{
+ static struct mdp5_platform_config config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#endif
+ return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
new file mode 100644
index 000000000000..c8b1a2522c25
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_KMS_H__
+#define __MDP5_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp/mdp_kms.h"
+#include "mdp5.xml.h"
+#include "mdp5_smp.h"
+
+struct mdp5_kms {
+ struct mdp_kms base;
+
+ struct drm_device *dev;
+
+ int rev;
+
+ /* mapper-id used to request GEM buffer mapped for scanout: */
+ int id;
+
+ /* for tracking smp allocation amongst pipes: */
+ mdp5_smp_state_t smp_state;
+ struct mdp5_client_smp_state smp_client_state[CID_MAX];
+ int smp_blk_cnt;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif;
+
+ struct regulator *vdd;
+
+ struct clk *axi_clk;
+ struct clk *ahb_clk;
+ struct clk *src_clk;
+ struct clk *core_clk;
+ struct clk *lut_clk;
+ struct clk *vsync_clk;
+
+ struct hdmi *hdmi;
+
+ struct mdp_irq error_handler;
+};
+#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp5_platform_config {
+ struct iommu_domain *iommu;
+ uint32_t max_clk;
+ int smp_blk_cnt;
+};
+
+static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
+{
+ msm_writel(data, mdp5_kms->mmio + reg);
+}
+
+static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
+{
+ return msm_readl(mdp5_kms->mmio + reg);
+}
+
+static inline const char *pipe2name(enum mdp5_pipe pipe)
+{
+ static const char *names[] = {
+#define NAME(n) [SSPP_ ## n] = #n
+ NAME(VIG0), NAME(VIG1), NAME(VIG2),
+ NAME(RGB0), NAME(RGB1), NAME(RGB2),
+ NAME(DMA0), NAME(DMA1),
+#undef NAME
+ };
+ return names[pipe];
+}
+
+static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+ case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+ case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+ case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+ case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+ case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+ case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+ case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ default: return 0;
+ }
+}
+
+static inline int pipe2nclients(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_RGB0:
+ case SSPP_RGB1:
+ case SSPP_RGB2:
+ return 1;
+ default:
+ return 3;
+ }
+}
+
+static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+{
+ WARN_ON(plane >= pipe2nclients(pipe));
+ switch (pipe) {
+ case SSPP_VIG0: return CID_VIG0_Y + plane;
+ case SSPP_VIG1: return CID_VIG1_Y + plane;
+ case SSPP_VIG2: return CID_VIG2_Y + plane;
+ case SSPP_RGB0: return CID_RGB0;
+ case SSPP_RGB1: return CID_RGB1;
+ case SSPP_RGB2: return CID_RGB2;
+ case SSPP_DMA0: return CID_DMA0_Y + plane;
+ case SSPP_DMA1: return CID_DMA1_Y + plane;
+ default: return CID_UNUSED;
+ }
+}
+
+static inline uint32_t mixer2flush(int lm)
+{
+ switch (lm) {
+ case 0: return MDP5_CTL_FLUSH_LM0;
+ case 1: return MDP5_CTL_FLUSH_LM1;
+ case 2: return MDP5_CTL_FLUSH_LM2;
+ default: return 0;
+ }
+}
+
+static inline uint32_t intf2err(int intf)
+{
+ switch (intf) {
+ case 0: return MDP5_IRQ_INTF0_UNDER_RUN;
+ case 1: return MDP5_IRQ_INTF1_UNDER_RUN;
+ case 2: return MDP5_IRQ_INTF2_UNDER_RUN;
+ case 3: return MDP5_IRQ_INTF3_UNDER_RUN;
+ default: return 0;
+ }
+}
+
+static inline uint32_t intf2vblank(int intf)
+{
+ switch (intf) {
+ case 0: return MDP5_IRQ_INTF0_VSYNC;
+ case 1: return MDP5_IRQ_INTF1_VSYNC;
+ case 2: return MDP5_IRQ_INTF2_VSYNC;
+ case 3: return MDP5_IRQ_INTF3_VSYNC;
+ default: return 0;
+ }
+}
+
+int mdp5_disable(struct mdp5_kms *mdp5_kms);
+int mdp5_enable(struct mdp5_kms *mdp5_kms);
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp5_irq_preinstall(struct msm_kms *kms);
+int mdp5_irq_postinstall(struct msm_kms *kms);
+void mdp5_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp5_irq(struct msm_kms *kms);
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+static inline
+uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
+ uint32_t max_formats)
+{
+ /* TODO when we have YUV, we need to filter supported formats
+ * based on pipe id..
+ */
+ return mdp_get_formats(pixel_formats, max_formats);
+}
+
+void mdp5_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj);
+void mdp5_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+void mdp5_plane_complete_flip(struct drm_plane *plane);
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum mdp5_pipe pipe, bool private_plane);
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
+
+void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
+ enum mdp5_intf intf_id);
+void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
+void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id);
+
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
+ enum mdp5_intf intf_id);
+
+#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
new file mode 100644
index 000000000000..0ac8bb5e7e85
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+
+struct mdp5_plane {
+ struct drm_plane base;
+ const char *name;
+
+ enum mdp5_pipe pipe;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ bool enabled;
+};
+#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
+
+static struct mdp5_kms *get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static int mdp5_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+ mdp5_plane->enabled = true;
+
+ if (plane->fb)
+ drm_framebuffer_unreference(plane->fb);
+
+ drm_framebuffer_reference(fb);
+
+ return mdp5_plane_mode_set(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+}
+
+static int mdp5_plane_disable(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ int i;
+
+ DBG("%s: disable", mdp5_plane->name);
+
+ /* update our SMP request to zero (release all our blks): */
+ for (i = 0; i < pipe2nclients(pipe); i++)
+ mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0);
+
+ /* TODO detaching now will cause us not to get the last
+ * vblank and mdp5_smp_commit().. so other planes will
+ * still see smp blocks previously allocated to us as
+ * in-use..
+ */
+ if (plane->crtc)
+ mdp5_crtc_detach(plane->crtc, plane);
+
+ return 0;
+}
+
+static void mdp5_plane_destroy(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+ mdp5_plane_disable(plane);
+ drm_plane_cleanup(plane);
+
+ kfree(mdp5_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void mdp5_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ // XXX
+}
+
+int mdp5_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp5_plane_funcs = {
+ .update_plane = mdp5_plane_update,
+ .disable_plane = mdp5_plane_disable,
+ .destroy = mdp5_plane_destroy,
+ .set_property = mdp5_plane_set_property,
+};
+
+void mdp5_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
+ uint32_t iova[4];
+ int i;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
+ msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
+ }
+ for (; i < 4; i++)
+ iova[i] = 0;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
+ MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
+ MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
+
+ plane->fb = fb;
+}
+
+/* NOTE: looks like if horizontal decimation is used (if we supported that)
+ * then the width used to calculate SMP block requirements is the post-
+ * decimated width. Ie. SMP buffering sits downstream of decimation (which
+ * presumably happens during the dma from scanout buffer).
+ */
+static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
+ uint32_t nplanes, uint32_t width)
+{
+ struct drm_device *dev = plane->dev;
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ int i, hsub, nlines, nblks, ret;
+
+ hsub = drm_format_horz_chroma_subsampling(format);
+
+ /* different if BWC (compressed framebuffer?) enabled: */
+ nlines = 2;
+
+ for (i = 0, nblks = 0; i < nplanes; i++) {
+ int n, fetch_stride, cpp;
+
+ cpp = drm_format_plane_cpp(format, i);
+ fetch_stride = width * cpp / (i ? hsub : 1);
+
+ n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE);
+
+ /* for hw rev v1.00 */
+ if (mdp5_kms->rev == 0)
+ n = roundup_pow_of_two(n);
+
+ DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n);
+ ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
+ if (ret) {
+ dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
+ n, ret);
+ return ret;
+ }
+
+ nblks += n;
+ }
+
+ /* in success case, return total # of blocks allocated: */
+ return nblks;
+}
+
+static void set_fifo_thresholds(struct drm_plane *plane, int nblks)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ uint32_t val;
+
+ /* 1/4 of SMP pool that is being fetched */
+ val = (nblks * SMP_ENTRIES_PER_BLK) / 4;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+
+}
+
+int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+ const struct mdp_format *format;
+ uint32_t nplanes, config = 0;
+ uint32_t phasex_step = 0, phasey_step = 0;
+ uint32_t hdecm = 0, vdecm = 0;
+ int i, nblks;
+
+ nplanes = drm_format_num_planes(fb->pixel_format);
+
+ /* bad formats should already be rejected: */
+ if (WARN_ON(nplanes > pipe2nclients(pipe)))
+ return -EINVAL;
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name,
+ fb->base.id, src_x, src_y, src_w, src_h,
+ crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+ /*
+ * Calculate and request required # of smp blocks:
+ */
+ nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w);
+ if (nblks < 0)
+ return nblks;
+
+ /*
+ * Currently we update the hw for allocations/requests immediately,
+ * but once atomic modeset/pageflip is in place, the allocation
+ * would move into atomic->check_plane_state(), while updating the
+ * hw would remain here:
+ */
+ for (i = 0; i < pipe2nclients(pipe); i++)
+ mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
+
+ if (src_w != crtc_w) {
+ config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
+ /* TODO calc phasex_step, hdecm */
+ }
+
+ if (src_h != crtc_h) {
+ config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN;
+ /* TODO calc phasey_step, vdecm */
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
+ MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
+ MDP5_PIPE_SRC_XY_X(src_x) |
+ MDP5_PIPE_SRC_XY_Y(src_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
+ MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
+ MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
+ MDP5_PIPE_OUT_XY_X(crtc_x) |
+ MDP5_PIPE_OUT_XY_Y(crtc_y));
+
+ mdp5_plane_set_scanout(plane, fb);
+
+ format = to_mdp_format(msm_framebuffer_format(fb));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
+ MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
+ MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) |
+ MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
+ MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+ MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
+
+ /* not using secure mode: */
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+ MDP5_PIPE_DECIMATION_VERT(vdecm) |
+ MDP5_PIPE_DECIMATION_HORZ(hdecm));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
+
+ set_fifo_thresholds(plane, nblks);
+
+ /* TODO detach from old crtc (if we had more than one) */
+ mdp5_crtc_attach(crtc, plane);
+
+ return 0;
+}
+
+void mdp5_plane_complete_flip(struct drm_plane *plane)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
+ int i;
+
+ for (i = 0; i < pipe2nclients(pipe); i++)
+ mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
+}
+
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ return mdp5_plane->pipe;
+}
+
+/* initialize plane */
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum mdp5_pipe pipe, bool private_plane)
+{
+ struct drm_plane *plane = NULL;
+ struct mdp5_plane *mdp5_plane;
+ int ret;
+
+ mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
+ if (!mdp5_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &mdp5_plane->base;
+
+ mdp5_plane->pipe = pipe;
+ mdp5_plane->name = pipe2name(pipe);
+
+ mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
+ ARRAY_SIZE(mdp5_plane->formats));
+
+ drm_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+ mdp5_plane->formats, mdp5_plane->nformats,
+ private_plane);
+
+ mdp5_plane_install_properties(plane, &plane->base);
+
+ return plane;
+
+fail:
+ if (plane)
+ mdp5_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
new file mode 100644
index 000000000000..2d0236b963a6
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "mdp5_kms.h"
+#include "mdp5_smp.h"
+
+
+/* SMP - Shared Memory Pool
+ *
+ * These are shared between all the clients, where each plane in a
+ * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
+ * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
+ *
+ * Based on the size of the attached scanout buffer, a certain # of
+ * blocks must be allocated to that client out of the shared pool.
+ *
+ * For each block, it can be either free, or pending/in-use by a
+ * client. The updates happen in three steps:
+ *
+ * 1) mdp5_smp_request():
+ * When plane scanout is setup, calculate required number of
+ * blocks needed per client, and request. Blocks not inuse or
+ * pending by any other client are added to client's pending
+ * set.
+ *
+ * 2) mdp5_smp_configure():
+ * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
+ * are configured for the union(pending, inuse)
+ *
+ * 3) mdp5_smp_commit():
+ * After next vblank, copy pending -> inuse. Optionally update
+ * MDP5_SMP_ALLOC registers if there are newly unused blocks
+ *
+ * On the next vblank after changes have been committed to hw, the
+ * client's pending blocks become it's in-use blocks (and no-longer
+ * in-use blocks become available to other clients).
+ *
+ * btw, hurray for confusing overloaded acronyms! :-/
+ *
+ * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
+ * should happen at (or before)? atomic->check(). And we'd need
+ * an API to discard previous requests if update is aborted or
+ * (test-only).
+ *
+ * TODO would perhaps be nice to have debugfs to dump out kernel
+ * inuse and pending state of all clients..
+ */
+
+static DEFINE_SPINLOCK(smp_lock);
+
+
+/* step #1: update # of blocks pending for the client: */
+int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
+ enum mdp5_client_id cid, int nblks)
+{
+ struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+ int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp_lock, flags);
+
+ avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
+ if (nblks > avail) {
+ ret = -ENOSPC;
+ goto fail;
+ }
+
+ cur_nblks = bitmap_weight(ps->pending, cnt);
+ if (nblks > cur_nblks) {
+ /* grow the existing pending reservation: */
+ for (i = cur_nblks; i < nblks; i++) {
+ int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
+ set_bit(blk, ps->pending);
+ set_bit(blk, mdp5_kms->smp_state);
+ }
+ } else {
+ /* shrink the existing pending reservation: */
+ for (i = cur_nblks; i > nblks; i--) {
+ int blk = find_first_bit(ps->pending, cnt);
+ clear_bit(blk, ps->pending);
+ /* don't clear in global smp_state until _commit() */
+ }
+ }
+
+fail:
+ spin_unlock_irqrestore(&smp_lock, flags);
+ return 0;
+}
+
+static void update_smp_state(struct mdp5_kms *mdp5_kms,
+ enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
+{
+ int cnt = mdp5_kms->smp_blk_cnt;
+ uint32_t blk, val;
+
+ for_each_set_bit(blk, *assigned, cnt) {
+ int idx = blk / 3;
+ int fld = blk % 3;
+
+ val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
+
+ switch (fld) {
+ case 0:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
+ break;
+ case 1:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
+ break;
+ case 2:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
+ break;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+ }
+}
+
+/* step #2: configure hw for union(pending, inuse): */
+void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+{
+ struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+ int cnt = mdp5_kms->smp_blk_cnt;
+ mdp5_smp_state_t assigned;
+
+ bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+ update_smp_state(mdp5_kms, cid, &assigned);
+}
+
+/* step #3: after vblank, copy pending -> inuse: */
+void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+{
+ struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+ int cnt = mdp5_kms->smp_blk_cnt;
+ mdp5_smp_state_t released;
+
+ /*
+ * Figure out if there are any blocks we where previously
+ * using, which can be released and made available to other
+ * clients:
+ */
+ if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp_lock, flags);
+ /* clear released blocks: */
+ bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
+ released, cnt);
+ spin_unlock_irqrestore(&smp_lock, flags);
+
+ update_smp_state(mdp5_kms, CID_UNUSED, &released);
+ }
+
+ bitmap_copy(ps->inuse, ps->pending, cnt);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
new file mode 100644
index 000000000000..0ab739e1a1dd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_SMP_H__
+#define __MDP5_SMP_H__
+
+#include "msm_drv.h"
+
+#define MAX_SMP_BLOCKS 22
+#define SMP_BLK_SIZE 4096
+#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
+
+typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
+
+struct mdp5_client_smp_state {
+ mdp5_smp_state_t inuse;
+ mdp5_smp_state_t pending;
+};
+
+struct mdp5_kms;
+
+int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks);
+void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+
+
+#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
new file mode 100644
index 000000000000..a9629b85b983
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -0,0 +1,78 @@
+#ifndef MDP_COMMON_XML
+#define MDP_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp_mixer_stage_id {
+ STAGE_UNUSED = 0,
+ STAGE_BASE = 1,
+ STAGE0 = 2,
+ STAGE1 = 3,
+ STAGE2 = 4,
+ STAGE3 = 5,
+};
+
+enum mdp_alpha_type {
+ FG_CONST = 0,
+ BG_CONST = 1,
+ FG_PIXEL = 2,
+ BG_PIXEL = 3,
+};
+
+enum mdp_bpc {
+ BPC1 = 0,
+ BPC5 = 1,
+ BPC6 = 2,
+ BPC8 = 3,
+};
+
+enum mdp_bpc_alpha {
+ BPC1A = 0,
+ BPC4A = 1,
+ BPC6A = 2,
+ BPC8A = 3,
+};
+
+
+#endif /* MDP_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 17330b0927b2..e0a6ffbe6ab4 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -17,7 +17,7 @@
#include "msm_drv.h"
-#include "mdp4_kms.h"
+#include "mdp_kms.h"
#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
.base = { .pixel_format = DRM_FORMAT_ ## name }, \
@@ -34,7 +34,7 @@
#define BPC0A 0
-static const struct mdp4_format formats[] = {
+static const struct mdp_format formats[] = {
/* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */
FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4),
FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4),
@@ -44,12 +44,11 @@ static const struct mdp4_format formats[] = {
FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
};
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
- uint32_t max_formats)
+uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats)
{
uint32_t i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
- const struct mdp4_format *f = &formats[i];
+ const struct mdp_format *f = &formats[i];
if (i == max_formats)
break;
@@ -60,11 +59,11 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
return i;
}
-const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
{
int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
- const struct mdp4_format *f = &formats[i];
+ const struct mdp_format *f = &formats[i];
if (f->base.pixel_format == format)
return &f->base;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
new file mode 100644
index 000000000000..3be48f7c36be
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp_kms.h"
+
+
+struct mdp_irq_wait {
+ struct mdp_irq irq;
+ int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void update_irq(struct mdp_kms *mdp_kms)
+{
+ struct mdp_irq *irq;
+ uint32_t irqmask = mdp_kms->vblank_mask;
+
+ BUG_ON(!spin_is_locked(&list_lock));
+
+ list_for_each_entry(irq, &mdp_kms->irq_list, node)
+ irqmask |= irq->irqmask;
+
+ mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
+}
+
+static void update_irq_unlocked(struct mdp_kms *mdp_kms)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&list_lock, flags);
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
+{
+ struct mdp_irq *handler, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ mdp_kms->in_irq = true;
+ list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
+ if (handler->irqmask & status) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ handler->irq(handler, handler->irqmask & status);
+ spin_lock_irqsave(&list_lock, flags);
+ }
+ }
+ mdp_kms->in_irq = false;
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+}
+
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ if (enable)
+ mdp_kms->vblank_mask |= mask;
+ else
+ mdp_kms->vblank_mask &= ~mask;
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp_irq_wait *wait =
+ container_of(irq, struct mdp_irq_wait, irq);
+ wait->count--;
+ wake_up_all(&wait_event);
+}
+
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+ struct mdp_irq_wait wait = {
+ .irq = {
+ .irq = wait_irq,
+ .irqmask = irqmask,
+ },
+ .count = 1,
+ };
+ mdp_irq_register(mdp_kms, &wait.irq);
+ wait_event(wait_event, (wait.count <= 0));
+ mdp_irq_unregister(mdp_kms, &wait.irq);
+}
+
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!irq->registered) {
+ irq->registered = true;
+ list_add(&irq->node, &mdp_kms->irq_list);
+ needs_update = !mdp_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ update_irq_unlocked(mdp_kms);
+}
+
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (irq->registered) {
+ irq->registered = false;
+ list_del(&irq->node);
+ needs_update = !mdp_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ update_irq_unlocked(mdp_kms);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
new file mode 100644
index 000000000000..99557b5ad4fd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP_KMS_H__
+#define __MDP_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp_common.xml.h"
+
+struct mdp_kms;
+
+struct mdp_kms_funcs {
+ struct msm_kms_funcs base;
+ void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
+};
+
+struct mdp_kms {
+ struct msm_kms base;
+
+ const struct mdp_kms_funcs *funcs;
+
+ /* irq handling: */
+ bool in_irq;
+ struct list_head irq_list; /* list of mdp4_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+};
+#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
+
+static inline void mdp_kms_init(struct mdp_kms *mdp_kms,
+ const struct mdp_kms_funcs *funcs)
+{
+ mdp_kms->funcs = funcs;
+ INIT_LIST_HEAD(&mdp_kms->irq_list);
+ msm_kms_init(&mdp_kms->base, &funcs->base);
+}
+
+/*
+ * irq helpers:
+ */
+
+/* For transiently registering for different MDP irqs that various parts
+ * of the KMS code need during setup/configuration. These are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct mdp_irq {
+ struct list_head node;
+ uint32_t irqmask;
+ bool registered;
+ void (*irq)(struct mdp_irq *irq, uint32_t irqstatus);
+};
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status);
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable);
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+
+
+/*
+ * pixel format helpers:
+ */
+
+struct mdp_format {
+ struct msm_format base;
+ enum mdp_bpc bpc_r, bpc_g, bpc_b;
+ enum mdp_bpc_alpha bpc_a;
+ uint8_t unpack[4];
+ bool alpha_enable, unpack_tight;
+ uint8_t cpp, unpack_count;
+};
+#define to_mdp_format(x) container_of(x, struct mdp_format, base)
+
+uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats);
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+
+#endif /* __MDP_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
deleted file mode 100644
index 5c6b7fca4edd..000000000000
--- a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-
-#include "msm_drv.h"
-#include "mdp4_kms.h"
-
-
-struct mdp4_irq_wait {
- struct mdp4_irq irq;
- int count;
-};
-
-static DECLARE_WAIT_QUEUE_HEAD(wait_event);
-
-static DEFINE_SPINLOCK(list_lock);
-
-static void update_irq(struct mdp4_kms *mdp4_kms)
-{
- struct mdp4_irq *irq;
- uint32_t irqmask = mdp4_kms->vblank_mask;
-
- BUG_ON(!spin_is_locked(&list_lock));
-
- list_for_each_entry(irq, &mdp4_kms->irq_list, node)
- irqmask |= irq->irqmask;
-
- mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
-}
-
-static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
-{
- unsigned long flags;
- spin_lock_irqsave(&list_lock, flags);
- update_irq(mdp4_kms);
- spin_unlock_irqrestore(&list_lock, flags);
-}
-
-static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
-{
- DRM_ERROR("errors: %08x\n", irqstatus);
-}
-
-void mdp4_irq_preinstall(struct msm_kms *kms)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
-}
-
-int mdp4_irq_postinstall(struct msm_kms *kms)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
-
- INIT_LIST_HEAD(&mdp4_kms->irq_list);
-
- error_handler->irq = mdp4_irq_error_handler;
- error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
- MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
-
- mdp4_irq_register(mdp4_kms, error_handler);
-
- return 0;
-}
-
-void mdp4_irq_uninstall(struct msm_kms *kms)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
-}
-
-irqreturn_t mdp4_irq(struct msm_kms *kms)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- struct drm_device *dev = mdp4_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
- struct mdp4_irq *handler, *n;
- unsigned long flags;
- unsigned int id;
- uint32_t status;
-
- status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
- mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
-
- VERB("status=%08x", status);
-
- for (id = 0; id < priv->num_crtcs; id++)
- if (status & mdp4_crtc_vblank(priv->crtcs[id]))
- drm_handle_vblank(dev, id);
-
- spin_lock_irqsave(&list_lock, flags);
- mdp4_kms->in_irq = true;
- list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
- if (handler->irqmask & status) {
- spin_unlock_irqrestore(&list_lock, flags);
- handler->irq(handler, handler->irqmask & status);
- spin_lock_irqsave(&list_lock, flags);
- }
- }
- mdp4_kms->in_irq = false;
- update_irq(mdp4_kms);
- spin_unlock_irqrestore(&list_lock, flags);
-
- return IRQ_HANDLED;
-}
-
-int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- unsigned long flags;
-
- spin_lock_irqsave(&list_lock, flags);
- mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
- update_irq(mdp4_kms);
- spin_unlock_irqrestore(&list_lock, flags);
-
- return 0;
-}
-
-void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
- unsigned long flags;
-
- spin_lock_irqsave(&list_lock, flags);
- mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
- update_irq(mdp4_kms);
- spin_unlock_irqrestore(&list_lock, flags);
-}
-
-static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
-{
- struct mdp4_irq_wait *wait =
- container_of(irq, struct mdp4_irq_wait, irq);
- wait->count--;
- wake_up_all(&wait_event);
-}
-
-void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
-{
- struct mdp4_irq_wait wait = {
- .irq = {
- .irq = wait_irq,
- .irqmask = irqmask,
- },
- .count = 1,
- };
- mdp4_irq_register(mdp4_kms, &wait.irq);
- wait_event(wait_event, (wait.count <= 0));
- mdp4_irq_unregister(mdp4_kms, &wait.irq);
-}
-
-void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
-{
- unsigned long flags;
- bool needs_update = false;
-
- spin_lock_irqsave(&list_lock, flags);
-
- if (!irq->registered) {
- irq->registered = true;
- list_add(&irq->node, &mdp4_kms->irq_list);
- needs_update = !mdp4_kms->in_irq;
- }
-
- spin_unlock_irqrestore(&list_lock, flags);
-
- if (needs_update)
- update_irq_unlocked(mdp4_kms);
-}
-
-void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
-{
- unsigned long flags;
- bool needs_update = false;
-
- spin_lock_irqsave(&list_lock, flags);
-
- if (irq->registered) {
- irq->registered = false;
- list_del(&irq->node);
- needs_update = !mdp4_kms->in_irq;
- }
-
- spin_unlock_irqrestore(&list_lock, flags);
-
- if (needs_update)
- update_irq_unlocked(mdp4_kms);
-}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 86537692e45c..e6adafc7eff3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -17,6 +17,7 @@
#include "msm_drv.h"
#include "msm_gpu.h"
+#include "msm_kms.h"
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
@@ -30,50 +31,19 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.output_poll_changed = msm_fb_output_poll_changed,
};
-static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
- unsigned long iova, int flags, void *arg)
-{
- DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
- return 0;
-}
-
-int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
{
struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_iommus++;
+ int idx = priv->num_mmus++;
- if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
+ if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
return -EINVAL;
- priv->iommus[idx] = iommu;
-
- iommu_set_fault_handler(iommu, msm_fault_handler, dev);
-
- /* need to iommu_attach_device() somewhere?? on resume?? */
+ priv->mmus[idx] = mmu;
return idx;
}
-int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
- const char **names, int cnt)
-{
- int i, ret;
-
- for (i = 0; i < cnt; i++) {
- /* TODO maybe some day msm iommu won't require this hack: */
- struct device *msm_iommu_get_ctx(const char *ctx_name);
- struct device *ctx = msm_iommu_get_ctx(names[i]);
- if (!ctx)
- continue;
- ret = iommu_attach_device(iommu, ctx);
- if (ret) {
- dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
- return ret;
- }
- }
- return 0;
-}
-
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -82,6 +52,10 @@ module_param(reglog, bool, 0600);
#define reglog 0
#endif
+static char *vram;
+MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
+module_param(vram, charp, 0);
+
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname)
{
@@ -161,6 +135,14 @@ static int msm_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
+ if (priv->vram.paddr) {
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ drm_mm_takedown(&priv->vram.mm);
+ dma_free_attrs(dev->dev, priv->vram.size, NULL,
+ priv->vram.paddr, &attrs);
+ }
+
dev->dev_private = NULL;
kfree(priv);
@@ -168,6 +150,24 @@ static int msm_unload(struct drm_device *dev)
return 0;
}
+static int get_mdp_ver(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ const static struct of_device_id match_types[] = { {
+ .compatible = "qcom,mdss_mdp",
+ .data = (void *)5,
+ }, {
+ /* end node */
+ } };
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ match = of_match_node(match_types, dev->of_node);
+ if (match)
+ return (int)match->data;
+#endif
+ return 4;
+}
+
static int msm_load(struct drm_device *dev, unsigned long flags)
{
struct platform_device *pdev = dev->platformdev;
@@ -191,7 +191,53 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
- kms = mdp4_kms_init(dev);
+ /* if we have no IOMMU, then we need to use carveout allocator.
+ * Grab the entire CMA chunk carved out in early startup in
+ * mach-msm:
+ */
+ if (!iommu_present(&platform_bus_type)) {
+ DEFINE_DMA_ATTRS(attrs);
+ unsigned long size;
+ void *p;
+
+ DBG("using %s VRAM carveout", vram);
+ size = memparse(vram, NULL);
+ priv->vram.size = size;
+
+ drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
+
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+
+ /* note that for no-kernel-mapping, the vaddr returned
+ * is bogus, but non-null if allocation succeeded:
+ */
+ p = dma_alloc_attrs(dev->dev, size,
+ &priv->vram.paddr, 0, &attrs);
+ if (!p) {
+ dev_err(dev->dev, "failed to allocate VRAM\n");
+ priv->vram.paddr = 0;
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dev_info(dev->dev, "VRAM: %08x->%08x\n",
+ (uint32_t)priv->vram.paddr,
+ (uint32_t)(priv->vram.paddr + size));
+ }
+
+ switch (get_mdp_ver(pdev)) {
+ case 4:
+ kms = mdp4_kms_init(dev);
+ break;
+ case 5:
+ kms = mdp5_kms_init(dev);
+ break;
+ default:
+ kms = ERR_PTR(-ENODEV);
+ break;
+ }
+
if (IS_ERR(kms)) {
/*
* NOTE: once we have GPU support, having no kms should not
@@ -326,7 +372,7 @@ static void msm_lastclose(struct drm_device *dev)
}
}
-static irqreturn_t msm_irq(DRM_IRQ_ARGS)
+static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct msm_drm_private *priv = dev->dev_private;
@@ -415,7 +461,7 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
{
- return drm_mm_dump_table(m, dev->mm_private);
+ return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
}
static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
@@ -778,12 +824,13 @@ static const struct dev_pm_ops msm_pm_ops = {
static int msm_pdev_probe(struct platform_device *pdev)
{
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
return drm_platform_init(&msm_driver, pdev);
}
static int msm_pdev_remove(struct platform_device *pdev)
{
- drm_platform_exit(&msm_driver, pdev);
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
@@ -793,12 +840,19 @@ static const struct platform_device_id msm_id[] = {
{ }
};
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,mdss_mdp" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
.remove = msm_pdev_remove,
.driver = {
.owner = THIS_MODULE,
.name = "msm",
+ .of_match_table = dt_match,
.pm = &msm_pm_ops,
},
.id_table = msm_id,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d39f0862b19e..3d63269c5b29 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -31,6 +31,15 @@
#include <linux/types.h>
#include <asm/sizes.h>
+
+#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_MSM)
+/* stubs we need for compile-test: */
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ return NULL;
+}
+#endif
+
#ifndef CONFIG_OF
#include <mach/board.h>
#include <mach/socinfo.h>
@@ -44,6 +53,7 @@
struct msm_kms;
struct msm_gpu;
+struct msm_mmu;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
@@ -76,9 +86,9 @@ struct msm_drm_private {
/* callbacks deferred until bo is inactive: */
struct list_head fence_cbs;
- /* registered IOMMU domains: */
- unsigned int num_iommus;
- struct iommu_domain *iommus[NUM_DOMAINS];
+ /* registered MMUs: */
+ unsigned int num_mmus;
+ struct msm_mmu *mmus[NUM_DOMAINS];
unsigned int num_planes;
struct drm_plane *planes[8];
@@ -94,6 +104,16 @@ struct msm_drm_private {
unsigned int num_connectors;
struct drm_connector *connectors[8];
+
+ /* VRAM carveout, used when no IOMMU: */
+ struct {
+ unsigned long size;
+ dma_addr_t paddr;
+ /* NOTE: mm managed at the page level, size is in # of pages
+ * and position mm_node->start is in # of pages:
+ */
+ struct drm_mm mm;
+ } vram;
};
struct msm_format {
@@ -114,39 +134,7 @@ void __msm_fence_worker(struct work_struct *work);
(_cb)->func = _func; \
} while (0)
-/* As there are different display controller blocks depending on the
- * snapdragon version, the kms support is split out and the appropriate
- * implementation is loaded at runtime. The kms module is responsible
- * for constructing the appropriate planes/crtcs/encoders/connectors.
- */
-struct msm_kms_funcs {
- /* hw initialization: */
- int (*hw_init)(struct msm_kms *kms);
- /* irq handling: */
- void (*irq_preinstall)(struct msm_kms *kms);
- int (*irq_postinstall)(struct msm_kms *kms);
- void (*irq_uninstall)(struct msm_kms *kms);
- irqreturn_t (*irq)(struct msm_kms *kms);
- int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
- void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
- /* misc: */
- const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
- long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
- struct drm_encoder *encoder);
- /* cleanup: */
- void (*preclose)(struct msm_kms *kms, struct drm_file *file);
- void (*destroy)(struct msm_kms *kms);
-};
-
-struct msm_kms {
- const struct msm_kms_funcs *funcs;
-};
-
-struct msm_kms *mdp4_kms_init(struct drm_device *dev);
-
-int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
-int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
- const char **names, int cnt);
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout);
@@ -202,7 +190,9 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
-int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
+struct hdmi;
+struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
+irqreturn_t hdmi_irq(int irq, void *dev_id);
void __init hdmi_register(void);
void __exit hdmi_unregister(void);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 0286c0eeb10c..81bafdf19ab3 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -16,6 +16,7 @@
*/
#include "msm_drv.h"
+#include "msm_kms.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index e587d251c590..3da8264d3039 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -22,7 +22,45 @@
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu.h"
+#include "msm_mmu.h"
+static dma_addr_t physaddr(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
+ priv->vram.paddr;
+}
+
+/* allocate pages from VRAM carveout, used when no IOMMU: */
+static struct page **get_pages_vram(struct drm_gem_object *obj,
+ int npages)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ dma_addr_t paddr;
+ struct page **p;
+ int ret, i;
+
+ p = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
+ npages, 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret) {
+ drm_free_large(p);
+ return ERR_PTR(ret);
+ }
+
+ paddr = physaddr(obj);
+ for (i = 0; i < npages; i++) {
+ p[i] = phys_to_page(paddr);
+ paddr += PAGE_SIZE;
+ }
+
+ return p;
+}
/* called with dev->struct_mutex held */
static struct page **get_pages(struct drm_gem_object *obj)
@@ -31,9 +69,14 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
- struct page **p = drm_gem_get_pages(obj, 0);
+ struct page **p;
int npages = obj->size >> PAGE_SHIFT;
+ if (iommu_present(&platform_bus_type))
+ p = drm_gem_get_pages(obj, 0);
+ else
+ p = get_pages_vram(obj, npages);
+
if (IS_ERR(p)) {
dev_err(dev->dev, "could not get pages: %ld\n",
PTR_ERR(p));
@@ -73,7 +116,11 @@ static void put_pages(struct drm_gem_object *obj)
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
- drm_gem_put_pages(obj, msm_obj->pages, true, false);
+ if (iommu_present(&platform_bus_type))
+ drm_gem_put_pages(obj, msm_obj->pages, true, false);
+ else
+ drm_mm_remove_node(msm_obj->vram_node);
+
msm_obj->pages = NULL;
}
}
@@ -138,7 +185,6 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct page **pages;
unsigned long pfn;
@@ -163,7 +209,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pgoff = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT;
- pfn = page_to_pfn(msm_obj->pages[pgoff]);
+ pfn = page_to_pfn(pages[pgoff]);
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
pfn, pfn << PAGE_SHIFT);
@@ -219,67 +265,6 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
-/* helpers for dealing w/ iommu: */
-static int map_range(struct iommu_domain *domain, unsigned int iova,
- struct sg_table *sgt, unsigned int len, int prot)
-{
- struct scatterlist *sg;
- unsigned int da = iova;
- unsigned int i, j;
- int ret;
-
- if (!domain || !sgt)
- return -EINVAL;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_phys(sg) - sg->offset;
- size_t bytes = sg->length + sg->offset;
-
- VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
-
- ret = iommu_map(domain, da, pa, bytes, prot);
- if (ret)
- goto fail;
-
- da += bytes;
- }
-
- return 0;
-
-fail:
- da = iova;
-
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg->length + sg->offset;
- iommu_unmap(domain, da, bytes);
- da += bytes;
- }
- return ret;
-}
-
-static void unmap_range(struct iommu_domain *domain, unsigned int iova,
- struct sg_table *sgt, unsigned int len)
-{
- struct scatterlist *sg;
- unsigned int da = iova;
- int i;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- size_t bytes = sg->length + sg->offset;
- size_t unmapped;
-
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- break;
-
- VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
-
- BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
-
- da += bytes;
- }
-}
-
/* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held.
@@ -295,15 +280,20 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
if (!msm_obj->domain[id].iova) {
struct msm_drm_private *priv = obj->dev->dev_private;
- uint32_t offset = (uint32_t)mmap_offset(obj);
- struct page **pages;
- pages = get_pages(obj);
+ struct msm_mmu *mmu = priv->mmus[id];
+ struct page **pages = get_pages(obj);
+
if (IS_ERR(pages))
return PTR_ERR(pages);
- // XXX ideally we would not map buffers writable when not needed...
- ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
- obj->size, IOMMU_READ | IOMMU_WRITE);
- msm_obj->domain[id].iova = offset;
+
+ if (iommu_present(&platform_bus_type)) {
+ uint32_t offset = (uint32_t)mmap_offset(obj);
+ ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
+ obj->size, IOMMU_READ | IOMMU_WRITE);
+ msm_obj->domain[id].iova = offset;
+ } else {
+ msm_obj->domain[id].iova = physaddr(obj);
+ }
}
if (!ret)
@@ -514,6 +504,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int id;
@@ -525,11 +516,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->mm_list);
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- if (msm_obj->domain[id].iova) {
- struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_mmu *mmu = priv->mmus[id];
+ if (mmu && msm_obj->domain[id].iova) {
uint32_t offset = (uint32_t)mmap_offset(obj);
- unmap_range(priv->iommus[id], offset,
- msm_obj->sgt, obj->size);
+ mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
}
}
@@ -591,6 +581,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
+ unsigned sz;
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED:
@@ -603,10 +594,17 @@ static int msm_gem_new_impl(struct drm_device *dev,
return -EINVAL;
}
- msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
+ sz = sizeof(*msm_obj);
+ if (!iommu_present(&platform_bus_type))
+ sz += sizeof(struct drm_mm_node);
+
+ msm_obj = kzalloc(sz, GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
+ if (!iommu_present(&platform_bus_type))
+ msm_obj->vram_node = (void *)&msm_obj[1];
+
msm_obj->flags = flags;
msm_obj->resv = &msm_obj->_resv;
@@ -623,7 +621,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags)
{
- struct drm_gem_object *obj;
+ struct drm_gem_object *obj = NULL;
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -634,15 +632,19 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
if (ret)
goto fail;
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto fail;
+ if (iommu_present(&platform_bus_type)) {
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+ } else {
+ drm_gem_private_object_init(dev, obj, size);
+ }
return obj;
fail:
if (obj)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
@@ -654,6 +656,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct drm_gem_object *obj;
int ret, npages;
+ /* if we don't have IOMMU, don't bother pretending we can import: */
+ if (!iommu_present(&platform_bus_type)) {
+ dev_err(dev->dev, "cannot import without IOMMU\n");
+ return ERR_PTR(-EINVAL);
+ }
+
size = PAGE_ALIGN(size);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index f4f23a578d9d..3246bb46c4f2 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -57,6 +57,11 @@ struct msm_gem_object {
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
struct reservation_object _resv;
+
+ /* For physically contiguous buffers. Used when we don't have
+ * an IOMMU.
+ */
+ struct drm_mm_node *vram_node;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5281d4bc37f7..5423e914e491 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -163,7 +163,7 @@ retry:
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova(&msm_obj->base,
+ ret = msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova);
/* this would break the logic in the fail path.. there is no
@@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_vaddr(&obj->base);
+ ptr = msm_gem_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
{
unsigned i;
- mutex_lock(&submit->dev->struct_mutex);
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
- mutex_unlock(&submit->dev->struct_mutex);
ww_acquire_fini(&submit->ticket);
kfree(submit);
@@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->nr_cmds > MAX_CMDS)
return -EINVAL;
+ mutex_lock(&dev->struct_mutex);
+
submit = submit_create(dev, gpu, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
@@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (submit)
submit_cleanup(submit, !!ret);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4583d61556f5..0cfe3f426ee4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -17,6 +17,7 @@
#include "msm_gpu.h"
#include "msm_gem.h"
+#include "msm_mmu.h"
/*
@@ -25,20 +26,10 @@
#ifdef CONFIG_MSM_BUS_SCALING
#include <mach/board.h>
-#include <mach/kgsl.h>
-static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
+static void bs_init(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
- struct kgsl_device_platform_data *pdata;
-
- if (!pdev) {
- dev_err(dev->dev, "could not find dtv pdata\n");
- return;
- }
-
- pdata = pdev->dev.platform_data;
- if (pdata->bus_scale_table) {
- gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
+ if (gpu->bus_scale_table) {
+ gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
DBG("bus scale client: %08x", gpu->bsc);
}
}
@@ -59,7 +50,7 @@ static void bs_set(struct msm_gpu *gpu, int idx)
}
}
#else
-static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
+static void bs_init(struct msm_gpu *gpu) {}
static void bs_fini(struct msm_gpu *gpu) {}
static void bs_set(struct msm_gpu *gpu, int idx) {}
#endif
@@ -307,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_drm_private *priv = dev->dev_private;
int i, ret;
- mutex_lock(&dev->struct_mutex);
-
submit->fence = ++priv->next_fence;
gpu->submitted_fence = submit->fence;
@@ -340,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
hangcheck_timer_reset(gpu);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -363,6 +351,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, const char *ioname, const char *irqname, int ringsz)
{
+ struct iommu_domain *iommu;
int i, ret;
gpu->dev = drm;
@@ -428,13 +417,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
* and have separate page tables per context. For now, to keep things
* simple and to get something working, just use a single address space:
*/
- gpu->iommu = iommu_domain_alloc(&platform_bus_type);
- if (!gpu->iommu) {
- dev_err(drm->dev, "failed to allocate IOMMU\n");
- ret = -ENOMEM;
- goto fail;
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (iommu) {
+ dev_info(drm->dev, "%s: using IOMMU\n", name);
+ gpu->mmu = msm_iommu_new(drm, iommu);
+ } else {
+ dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_iommu(drm, gpu->iommu);
+ gpu->id = msm_register_mmu(drm, gpu->mmu);
/* Create ringbuffer: */
gpu->rb = msm_ringbuffer_new(gpu, ringsz);
@@ -452,7 +442,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- bs_init(gpu, pdev);
+ bs_init(gpu);
return 0;
@@ -474,6 +464,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->iommu)
- iommu_domain_free(gpu->iommu);
+ if (gpu->mmu)
+ gpu->mmu->funcs->destroy(gpu->mmu);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 8cd829e520bb..458db8c64c28 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -78,14 +78,18 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
- struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
int id;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
struct clk *ebi1_clk, *grp_clks[5];
uint32_t fast_rate, slow_rate, bus_freq;
+
+#ifdef CONFIG_MSM_BUS_SCALING
+ struct msm_bus_scale_pdata *bus_scale_table;
uint32_t bsc;
+#endif
/* Hang Detction: */
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
new file mode 100644
index 000000000000..92b745986231
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+struct msm_iommu {
+ struct msm_mmu base;
+ struct iommu_domain *domain;
+};
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
+ return 0;
+}
+
+static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ struct drm_device *dev = mmu->dev;
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx))
+ continue;
+ ret = iommu_attach_device(iommu->domain, ctx);
+ if (ret) {
+ dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ struct iommu_domain *domain = iommu->domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ unsigned int i, j;
+ int ret;
+
+ if (!domain || !sgt)
+ return -EINVAL;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ u32 pa = sg_phys(sg) - sg->offset;
+ size_t bytes = sg->length + sg->offset;
+
+ VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
+
+ ret = iommu_map(domain, da, pa, bytes, prot);
+ if (ret)
+ goto fail;
+
+ da += bytes;
+ }
+
+ return 0;
+
+fail:
+ da = iova;
+
+ for_each_sg(sgt->sgl, sg, i, j) {
+ size_t bytes = sg->length + sg->offset;
+ iommu_unmap(domain, da, bytes);
+ da += bytes;
+ }
+ return ret;
+}
+
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
+ struct sg_table *sgt, unsigned len)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ struct iommu_domain *domain = iommu->domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = sg->length + sg->offset;
+ size_t unmapped;
+
+ unmapped = iommu_unmap(domain, da, bytes);
+ if (unmapped < bytes)
+ return unmapped;
+
+ VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
+
+ BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+
+ da += bytes;
+ }
+
+ return 0;
+}
+
+static void msm_iommu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ iommu_domain_free(iommu->domain);
+ kfree(iommu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .attach = msm_iommu_attach,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+};
+
+struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
+{
+ struct msm_iommu *iommu;
+
+ iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return ERR_PTR(-ENOMEM);
+
+ iommu->domain = domain;
+ msm_mmu_init(&iommu->base, dev, &funcs);
+ iommu_set_fault_handler(domain, msm_fault_handler, dev);
+
+ return &iommu->base;
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
new file mode 100644
index 000000000000..06437745bc2c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_KMS_H__
+#define __MSM_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+
+/* As there are different display controller blocks depending on the
+ * snapdragon version, the kms support is split out and the appropriate
+ * implementation is loaded at runtime. The kms module is responsible
+ * for constructing the appropriate planes/crtcs/encoders/connectors.
+ */
+struct msm_kms_funcs {
+ /* hw initialization: */
+ int (*hw_init)(struct msm_kms *kms);
+ /* irq handling: */
+ void (*irq_preinstall)(struct msm_kms *kms);
+ int (*irq_postinstall)(struct msm_kms *kms);
+ void (*irq_uninstall)(struct msm_kms *kms);
+ irqreturn_t (*irq)(struct msm_kms *kms);
+ int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ /* misc: */
+ const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
+ long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder);
+ /* cleanup: */
+ void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+ void (*destroy)(struct msm_kms *kms);
+};
+
+struct msm_kms {
+ const struct msm_kms_funcs *funcs;
+
+ /* irq handling: */
+ bool in_irq;
+ struct list_head irq_list; /* list of mdp4_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+};
+
+static inline void msm_kms_init(struct msm_kms *kms,
+ const struct msm_kms_funcs *funcs)
+{
+ kms->funcs = funcs;
+}
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev);
+struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+
+#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
new file mode 100644
index 000000000000..030324482b4a
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_MMU_H__
+#define __MSM_MMU_H__
+
+#include <linux/iommu.h>
+
+struct msm_mmu_funcs {
+ int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
+ int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ unsigned len, int prot);
+ int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ unsigned len);
+ void (*destroy)(struct msm_mmu *mmu);
+};
+
+struct msm_mmu {
+ const struct msm_mmu_funcs *funcs;
+ struct drm_device *dev;
+};
+
+static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev,
+ const struct msm_mmu_funcs *funcs)
+{
+ mmu->dev = dev;
+ mmu->funcs = funcs;
+}
+
+struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain);
+struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu);
+
+#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 7cf787d697b1..637c29a33127 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -11,7 +11,7 @@ config DRM_NOUVEAU
select FB
select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
- select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
+ select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
select MXM_WMI if ACPI && X86
@@ -19,7 +19,6 @@ config DRM_NOUVEAU
# Similar to i915, we need to select ACPI_VIDEO and it's dependencies
select BACKLIGHT_LCD_SUPPORT if ACPI && X86
select BACKLIGHT_CLASS_DEVICE if ACPI && X86
- select VIDEO_OUTPUT_CONTROL if ACPI && X86
select INPUT if ACPI && X86
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index b3fa1ba191b7..d310c195bdfe 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -41,6 +41,7 @@ nouveau-y += core/subdev/bios/init.o
nouveau-y += core/subdev/bios/mxm.o
nouveau-y += core/subdev/bios/perf.o
nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/ramcfg.o
nouveau-y += core/subdev/bios/rammap.o
nouveau-y += core/subdev/bios/timing.o
nouveau-y += core/subdev/bios/therm.o
@@ -71,7 +72,10 @@ nouveau-y += core/subdev/devinit/nv10.o
nouveau-y += core/subdev/devinit/nv1a.o
nouveau-y += core/subdev/devinit/nv20.o
nouveau-y += core/subdev/devinit/nv50.o
+nouveau-y += core/subdev/devinit/nv84.o
+nouveau-y += core/subdev/devinit/nv98.o
nouveau-y += core/subdev/devinit/nva3.o
+nouveau-y += core/subdev/devinit/nvaf.o
nouveau-y += core/subdev/devinit/nvc0.o
nouveau-y += core/subdev/fb/base.o
nouveau-y += core/subdev/fb/nv04.o
@@ -137,6 +141,7 @@ nouveau-y += core/subdev/mc/base.o
nouveau-y += core/subdev/mc/nv04.o
nouveau-y += core/subdev/mc/nv40.o
nouveau-y += core/subdev/mc/nv44.o
+nouveau-y += core/subdev/mc/nv4c.o
nouveau-y += core/subdev/mc/nv50.o
nouveau-y += core/subdev/mc/nv94.o
nouveau-y += core/subdev/mc/nv98.o
@@ -232,6 +237,7 @@ nouveau-y += core/engine/fifo/nv50.o
nouveau-y += core/engine/fifo/nv84.o
nouveau-y += core/engine/fifo/nvc0.o
nouveau-y += core/engine/fifo/nve0.o
+nouveau-y += core/engine/fifo/nv108.o
nouveau-y += core/engine/graph/ctxnv40.o
nouveau-y += core/engine/graph/ctxnv50.o
nouveau-y += core/engine/graph/ctxnvc0.o
@@ -242,6 +248,7 @@ nouveau-y += core/engine/graph/ctxnvd7.o
nouveau-y += core/engine/graph/ctxnvd9.o
nouveau-y += core/engine/graph/ctxnve4.o
nouveau-y += core/engine/graph/ctxnvf0.o
+nouveau-y += core/engine/graph/ctxnv108.o
nouveau-y += core/engine/graph/nv04.o
nouveau-y += core/engine/graph/nv10.o
nouveau-y += core/engine/graph/nv20.o
@@ -260,6 +267,7 @@ nouveau-y += core/engine/graph/nvd7.o
nouveau-y += core/engine/graph/nvd9.o
nouveau-y += core/engine/graph/nve4.o
nouveau-y += core/engine/graph/nvf0.o
+nouveau-y += core/engine/graph/nv108.o
nouveau-y += core/engine/mpeg/nv31.o
nouveau-y += core/engine/mpeg/nv40.o
nouveau-y += core/engine/mpeg/nv44.o
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c
index c8bed4a26833..1f6954ae9dd3 100644
--- a/drivers/gpu/drm/nouveau/core/core/engine.c
+++ b/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -42,11 +42,24 @@ nouveau_engine_create_(struct nouveau_object *parent,
if (ret)
return ret;
- if ( parent &&
- !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) {
- if (!enable)
- nv_warn(engine, "disabled, %s=1 to enable\n", iname);
- return -ENODEV;
+ if (parent) {
+ struct nouveau_device *device = nv_device(parent);
+ int engidx = nv_engidx(nv_object(engine));
+
+ if (device->disable_mask & (1ULL << engidx)) {
+ if (!nouveau_boolopt(device->cfgopt, iname, false)) {
+ nv_debug(engine, "engine disabled by hw/fw\n");
+ return -ENODEV;
+ }
+
+ nv_warn(engine, "ignoring hw/fw engine disable\n");
+ }
+
+ if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
+ if (!enable)
+ nv_warn(engine, "disabled, %s=1 to enable\n", iname);
+ return -ENODEV;
+ }
}
INIT_LIST_HEAD(&engine->contexts);
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index 993df09ad643..ac3291f781f6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -105,9 +105,6 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_copy_priv *priv;
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000100)
- return -ENODEV;
-
ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
"PCE0", "copy0", &priv);
*pobject = nv_object(priv);
@@ -133,9 +130,6 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_copy_priv *priv;
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000200)
- return -ENODEV;
-
ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
"PCE1", "copy1", &priv);
*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 30f1ef1edcc5..748a61eb3c6f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -88,9 +88,6 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nve0_copy_priv *priv;
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000100)
- return -ENODEV;
-
ret = nouveau_engine_create(parent, engine, oclass, true,
"PCE0", "copy0", &priv);
*pobject = nv_object(priv);
@@ -112,9 +109,6 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nve0_copy_priv *priv;
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000200)
- return -ENODEV;
-
ret = nouveau_engine_create(parent, engine, oclass, true,
"PCE1", "copy1", &priv);
*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index dbd2dde7b7e7..32113b08c4d5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -49,12 +49,12 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv04_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
@@ -67,12 +67,12 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv05_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 6e03dd6abeea..744f15d7e131 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -51,12 +51,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -68,12 +68,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
@@ -87,12 +87,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
@@ -106,12 +106,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
@@ -125,12 +125,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
@@ -144,12 +144,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -163,12 +163,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -182,12 +182,12 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index dcde53b9f07f..27ba61fb2710 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -52,12 +52,12 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -71,12 +71,12 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -90,12 +90,12 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -109,12 +109,12 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index 7b8662ef4f59..fd47ace67543 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -52,12 +52,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -71,12 +71,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -90,12 +90,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -110,12 +110,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
@@ -130,12 +130,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index c8c41e93695e..08b88591ed60 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -57,12 +57,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -80,12 +80,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -103,12 +103,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -126,12 +126,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv41_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -149,12 +149,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv40_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -172,12 +172,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv47_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -195,12 +195,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -218,12 +218,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv49_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -241,12 +241,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -264,12 +264,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -287,12 +287,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv44_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -310,12 +310,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -333,12 +333,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,12 +356,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -379,12 +379,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -402,12 +402,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index db3fc7be856a..81d5c26643d5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -65,12 +65,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv50_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv50_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -90,12 +90,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -118,12 +118,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -146,12 +146,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -174,12 +174,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -202,12 +202,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -230,12 +230,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -258,12 +258,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv84_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -286,12 +286,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -314,12 +314,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvaa_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
@@ -342,12 +342,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
@@ -372,12 +372,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
@@ -401,12 +401,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nva3_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
@@ -430,12 +430,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvaf_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvaf_fb_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index dbc5e33de94f..b7d66b59f43d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -65,14 +65,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -97,14 +97,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -129,14 +129,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -160,14 +160,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -192,14 +192,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -224,14 +224,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -255,14 +255,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
@@ -287,14 +287,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
@@ -318,14 +318,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 3900104976fc..987edbc30a09 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -65,14 +65,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
@@ -98,14 +98,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
@@ -131,14 +131,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
@@ -164,14 +164,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
@@ -199,29 +199,27 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
- device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
- device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-#if 0
- device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
-#endif
+ device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass;
-#if 0
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+#if 0
device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index a0bc8a89b699..7cf8b1348632 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -31,9 +31,45 @@ struct nv04_disp_priv {
struct nouveau_disp base;
};
+static int
+nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nv04_disp_priv *priv = (void *)object->engine;
+ struct nv04_display_scanoutpos *args = data;
+ const int head = (mthd & NV04_DISP_MTHD_HEAD);
+ u32 line;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ args->vblanks = nv_rd32(priv, 0x680800 + (head * 0x2000)) & 0xffff;
+ args->vtotal = nv_rd32(priv, 0x680804 + (head * 0x2000)) & 0xffff;
+ args->vblanke = args->vtotal - 1;
+
+ args->hblanks = nv_rd32(priv, 0x680820 + (head * 0x2000)) & 0xffff;
+ args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff;
+ args->hblanke = args->htotal - 1;
+
+ args->time[0] = ktime_to_ns(ktime_get());
+ line = nv_rd32(priv, 0x600868 + (head * 0x2000));
+ args->time[1] = ktime_to_ns(ktime_get());
+ args->hline = (line & 0xffff0000) >> 16;
+ args->vline = (line & 0x0000ffff);
+ return 0;
+}
+
+#define HEAD_MTHD(n) (n), (n) + 0x01
+
+static struct nouveau_omthds
+nv04_disp_omthds[] = {
+ { HEAD_MTHD(NV04_DISP_SCANOUTPOS), nv04_disp_scanoutpos },
+ {}
+};
+
static struct nouveau_oclass
nv04_disp_sclass[] = {
- { NV04_DISP_CLASS, &nouveau_object_ofuncs },
+ { NV04_DISP_CLASS, &nouveau_object_ofuncs, nv04_disp_omthds },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index c168ae3eaa97..9ad722e4e087 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -541,6 +541,35 @@ nv50_disp_curs_ofuncs = {
* Base display object
******************************************************************************/
+int
+nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv04_display_scanoutpos *args = data;
+ const int head = (mthd & NV50_DISP_MTHD_HEAD);
+ u32 blanke, blanks, total;
+
+ if (size < sizeof(*args) || head >= priv->head.nr)
+ return -EINVAL;
+ blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
+ blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
+ total = nv_rd32(priv, 0x610afc + (head * 0x540));
+
+ args->vblanke = (blanke & 0xffff0000) >> 16;
+ args->hblanke = (blanke & 0x0000ffff);
+ args->vblanks = (blanks & 0xffff0000) >> 16;
+ args->hblanks = (blanks & 0x0000ffff);
+ args->vtotal = ( total & 0xffff0000) >> 16;
+ args->htotal = ( total & 0x0000ffff);
+
+ args->time[0] = ktime_to_ns(ktime_get());
+ args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+ args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
+ args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+ return 0;
+}
+
static void
nv50_disp_base_vblank_enable(struct nouveau_event *event, int head)
{
@@ -675,6 +704,7 @@ nv50_disp_base_ofuncs = {
static struct nouveau_omthds
nv50_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
{ DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
@@ -1112,7 +1142,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
if (conf != ~0) {
if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
u32 soff = (ffs(outp.or) - 1) * 0x08;
- u32 ctrl = nv_rd32(priv, 0x610798 + soff);
+ u32 ctrl = nv_rd32(priv, 0x610794 + soff);
u32 datarate;
switch ((ctrl & 0x000f0000) >> 16) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 1ae6ceb56704..d31d426ea1f6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -43,6 +43,10 @@ struct nv50_disp_priv {
} pior;
};
+#define HEAD_MTHD(n) (n), (n) + 0x03
+
+int nv50_disp_base_scanoutpos(struct nouveau_object *, u32, void *, u32);
+
#define DAC_MTHD(n) (n), (n) + 0x03
int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
@@ -132,13 +136,12 @@ void nv50_disp_intr(struct nouveau_subdev *);
extern struct nouveau_omthds nv84_disp_base_omthds[];
-extern struct nouveau_omthds nva3_disp_base_omthds[];
-
extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_omthds nvd0_disp_base_omthds[];
extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
extern struct nouveau_oclass nvd0_disp_cclass;
void nvd0_disp_intr_supervisor(struct work_struct *);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index d8c74c0883a1..ef9ce300a496 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -41,6 +41,7 @@ nv84_disp_sclass[] = {
struct nouveau_omthds
nv84_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index a66f949c1f84..a518543c00ab 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -41,6 +41,7 @@ nv94_disp_sclass[] = {
static struct nouveau_omthds
nv94_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index b75413169eae..6ad6dcece43b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -39,8 +39,9 @@ nva3_disp_sclass[] = {
{}
};
-struct nouveau_omthds
+static struct nouveau_omthds
nva3_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
{ SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
{ SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 378a015091d2..1c5e4e8b2c82 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -440,6 +440,36 @@ nvd0_disp_curs_ofuncs = {
* Base display object
******************************************************************************/
+static int
+nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv04_display_scanoutpos *args = data;
+ const int head = (mthd & NV50_DISP_MTHD_HEAD);
+ u32 blanke, blanks, total;
+
+ if (size < sizeof(*args) || head >= priv->head.nr)
+ return -EINVAL;
+
+ total = nv_rd32(priv, 0x640414 + (head * 0x300));
+ blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
+ blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
+
+ args->vblanke = (blanke & 0xffff0000) >> 16;
+ args->hblanke = (blanke & 0x0000ffff);
+ args->vblanks = (blanks & 0xffff0000) >> 16;
+ args->hblanks = (blanks & 0x0000ffff);
+ args->vtotal = ( total & 0xffff0000) >> 16;
+ args->htotal = ( total & 0x0000ffff);
+
+ args->time[0] = ktime_to_ns(ktime_get());
+ args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+ args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
+ args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+ return 0;
+}
+
static void
nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head)
{
@@ -573,9 +603,24 @@ nvd0_disp_base_ofuncs = {
.fini = nvd0_disp_base_fini,
};
+struct nouveau_omthds
+nvd0_disp_base_omthds[] = {
+ { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nvd0_disp_base_scanoutpos },
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
+ { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
+ {},
+};
+
static struct nouveau_oclass
nvd0_disp_base_oclass[] = {
- { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
{}
};
@@ -967,9 +1012,6 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int heads = nv_rd32(parent, 0x022448);
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000001)
- return -ENODEV;
-
ret = nouveau_disp_create(parent, engine, oclass, heads,
"PDISP", "display", &priv);
*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index fb1fe6ae5e74..ab63f32c00b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -41,7 +41,7 @@ nve0_disp_sclass[] = {
static struct nouveau_oclass
nve0_disp_base_oclass[] = {
- { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
{}
};
@@ -54,9 +54,6 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int heads = nv_rd32(parent, 0x022448);
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000001)
- return -ENODEV;
-
ret = nouveau_disp_create(parent, engine, oclass, heads,
"PDISP", "display", &priv);
*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 42aa6b97dbea..05fee10e0c97 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -41,7 +41,7 @@ nvf0_disp_sclass[] = {
static struct nouveau_oclass
nvf0_disp_base_oclass[] = {
- { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
{}
};
@@ -54,9 +54,6 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int heads = nv_rd32(parent, 0x022448);
int ret;
- if (nv_rd32(parent, 0x022500) & 0x00000001)
- return -ENODEV;
-
ret = nouveau_disp_create(parent, engine, oclass, heads,
"PDISP", "display", &priv);
*pobject = nv_object(priv);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
index 5a1c68474597..8836c3cb99c3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
@@ -138,10 +138,15 @@ nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
bool
nv_lockvgac(void *obj, bool lock)
{
+ struct nouveau_device *dev = nv_device(obj);
+
bool locked = !nv_rdvgac(obj, 0, 0x1f);
u8 data = lock ? 0x99 : 0x57;
- nv_wrvgac(obj, 0, 0x1f, data);
- if (nv_device(obj)->chipset == 0x11) {
+ if (dev->card_type < NV_50)
+ nv_wrvgac(obj, 0, 0x1f, data);
+ else
+ nv_wrvgac(obj, 0, 0x3f, data);
+ if (dev->chipset == 0x11) {
if (!(nv_rd32(obj, 0x001084) & 0x10000000))
nv_wrvgac(obj, 1, 0x1f, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c
index e03fc8e4dc1d..5e077e4ed7f6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c
@@ -56,6 +56,16 @@ _nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
nv_wr32(falcon, falcon->addr + addr, data);
}
+static void *
+vmemdup(const void *src, size_t len)
+{
+ void *p = vmalloc(len);
+
+ if (p)
+ memcpy(p, src, len);
+ return p;
+}
+
int
_nouveau_falcon_init(struct nouveau_object *object)
{
@@ -111,7 +121,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
ret = request_firmware(&fw, name, &device->pdev->dev);
if (ret == 0) {
- falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.data = vmemdup(fw->data, fw->size);
falcon->code.size = fw->size;
falcon->data.data = NULL;
falcon->data.size = 0;
@@ -134,7 +144,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
return ret;
}
- falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->data.data = vmemdup(fw->data, fw->size);
falcon->data.size = fw->size;
release_firmware(fw);
if (!falcon->data.data)
@@ -149,7 +159,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
return ret;
}
- falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.data = vmemdup(fw->data, fw->size);
falcon->code.size = fw->size;
release_firmware(fw);
if (!falcon->code.data)
@@ -235,8 +245,8 @@ _nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
if (!suspend) {
nouveau_gpuobj_ref(NULL, &falcon->core);
if (falcon->external) {
- kfree(falcon->data.data);
- kfree(falcon->code.data);
+ vfree(falcon->data.data);
+ vfree(falcon->code.data);
falcon->code.data = NULL;
}
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c
new file mode 100644
index 000000000000..09362a51ba57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nve0.h"
+
+struct nouveau_oclass *
+nv108_fifo_oclass = &(struct nve0_fifo_impl) {
+ .base.handle = NV_ENGINE(FIFO, 0x08),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_fifo_ctor,
+ .dtor = nve0_fifo_dtor,
+ .init = nve0_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+ .channels = 1024,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 9ac94d4e5646..b22a33f0702d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -33,6 +33,7 @@
#include <subdev/timer.h>
#include <subdev/bar.h>
+#include <subdev/fb.h>
#include <subdev/vm.h>
#include <engine/dmaobj.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 04f412922d2d..54c1b5b471cd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -33,10 +33,12 @@
#include <subdev/timer.h>
#include <subdev/bar.h>
+#include <subdev/fb.h>
#include <subdev/vm.h>
#include <engine/dmaobj.h>
-#include <engine/fifo.h>
+
+#include "nve0.h"
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
static const struct {
@@ -56,8 +58,8 @@ static const struct {
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
struct nve0_fifo_engn {
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
+ struct nouveau_gpuobj *runlist[2];
+ int cur_runlist;
};
struct nve0_fifo_priv {
@@ -86,7 +88,7 @@ struct nve0_fifo_chan {
******************************************************************************/
static void
-nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
+nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nve0_fifo_engn *engn = &priv->engine[engine];
@@ -95,8 +97,8 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
int i, p;
mutex_lock(&nv_subdev(priv)->mutex);
- cur = engn->playlist[engn->cur_playlist];
- engn->cur_playlist = !engn->cur_playlist;
+ cur = engn->runlist[engn->cur_runlist];
+ engn->cur_runlist = !engn->cur_runlist;
for (i = 0, p = 0; i < priv->base.max; i++) {
u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
@@ -110,8 +112,8 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
nv_wr32(priv, 0x002270, cur->addr >> 12);
nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
- if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
- nv_error(priv, "playlist %d update timeout\n", engine);
+ if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000))
+ nv_error(priv, "runlist %d update timeout\n", engine);
mutex_unlock(&nv_subdev(priv)->mutex);
}
@@ -278,7 +280,7 @@ nve0_fifo_chan_init(struct nouveau_object *object)
nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
- nve0_fifo_playlist_update(priv, chan->engine);
+ nve0_fifo_runlist_update(priv, chan->engine);
nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
return 0;
}
@@ -291,7 +293,7 @@ nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
u32 chid = chan->base.chid;
nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
- nve0_fifo_playlist_update(priv, chan->engine);
+ nve0_fifo_runlist_update(priv, chan->engine);
nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
return nouveau_fifo_channel_fini(&chan->base, suspend);
@@ -375,54 +377,189 @@ nve0_fifo_cclass = {
* PFIFO engine
******************************************************************************/
-static const struct nouveau_enum nve0_fifo_fault_unit[] = {
+static const struct nouveau_enum nve0_fifo_sched_reason[] = {
+ { 0x0a, "CTXSW_TIMEOUT" },
+ {}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_engine[] = {
+ { 0x00, "GR", NULL, NVDEV_ENGINE_GR },
+ { 0x03, "IFB" },
+ { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
+ { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
+ { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
+ { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
+ { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
+ { 0x10, "MSVLD", NULL, NVDEV_ENGINE_BSP },
+ { 0x11, "MSPPP", NULL, NVDEV_ENGINE_PPP },
+ { 0x13, "PERF" },
+ { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_VP },
+ { 0x15, "CE0", NULL, NVDEV_ENGINE_COPY0 },
+ { 0x16, "CE1", NULL, NVDEV_ENGINE_COPY1 },
+ { 0x17, "PMU" },
+ { 0x19, "MSENC", NULL, NVDEV_ENGINE_VENC },
+ { 0x1b, "CE2", NULL, NVDEV_ENGINE_COPY2 },
{}
};
static const struct nouveau_enum nve0_fifo_fault_reason[] = {
- { 0x00, "PT_NOT_PRESENT" },
- { 0x01, "PT_TOO_SHORT" },
- { 0x02, "PAGE_NOT_PRESENT" },
- { 0x03, "VM_LIMIT_EXCEEDED" },
- { 0x04, "NO_CHANNEL" },
- { 0x05, "PAGE_SYSTEM_ONLY" },
- { 0x06, "PAGE_READ_ONLY" },
- { 0x0a, "COMPRESSED_SYSRAM" },
- { 0x0c, "INVALID_STORAGE_TYPE" },
+ { 0x00, "PDE" },
+ { 0x01, "PDE_SIZE" },
+ { 0x02, "PTE" },
+ { 0x03, "VA_LIMIT_VIOLATION" },
+ { 0x04, "UNBOUND_INST_BLOCK" },
+ { 0x05, "PRIV_VIOLATION" },
+ { 0x06, "RO_VIOLATION" },
+ { 0x07, "WO_VIOLATION" },
+ { 0x08, "PITCH_MASK_VIOLATION" },
+ { 0x09, "WORK_CREATION" },
+ { 0x0a, "UNSUPPORTED_APERTURE" },
+ { 0x0b, "COMPRESSION_FAILURE" },
+ { 0x0c, "UNSUPPORTED_KIND" },
+ { 0x0d, "REGION_VIOLATION" },
+ { 0x0e, "BOTH_PTES_VALID" },
+ { 0x0f, "INFO_TYPE_POISONED" },
{}
};
static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+ { 0x00, "VIP" },
+ { 0x01, "CE0" },
+ { 0x02, "CE1" },
+ { 0x03, "DNISO" },
+ { 0x04, "FE" },
+ { 0x05, "FECS" },
+ { 0x06, "HOST" },
+ { 0x07, "HOST_CPU" },
+ { 0x08, "HOST_CPU_NB" },
+ { 0x09, "ISO" },
+ { 0x0a, "MMU" },
+ { 0x0b, "MSPDEC" },
+ { 0x0c, "MSPPP" },
+ { 0x0d, "MSVLD" },
+ { 0x0e, "NISO" },
+ { 0x0f, "P2P" },
+ { 0x10, "PD" },
+ { 0x11, "PERF" },
+ { 0x12, "PMU" },
+ { 0x13, "RASTERTWOD" },
+ { 0x14, "SCC" },
+ { 0x15, "SCC_NB" },
+ { 0x16, "SEC" },
+ { 0x17, "SSYNC" },
+ { 0x18, "GR_COPY" },
+ { 0x19, "CE2" },
+ { 0x1a, "XV" },
+ { 0x1b, "MMU_NB" },
+ { 0x1c, "MSENC" },
+ { 0x1d, "DFALCON" },
+ { 0x1e, "SKED" },
+ { 0x1f, "AFALCON" },
{}
};
static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+ { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
+ { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
+ { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
+ { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
+ { 0x0c, "RAST" },
+ { 0x0d, "GCC" },
+ { 0x0e, "GPCCS" },
+ { 0x0f, "PROP_0" },
+ { 0x10, "PROP_1" },
+ { 0x11, "PROP_2" },
+ { 0x12, "PROP_3" },
+ { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
+ { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
+ { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
+ { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
+ { 0x1f, "GPM" },
+ { 0x20, "LTP_UTLB_0" },
+ { 0x21, "LTP_UTLB_1" },
+ { 0x22, "LTP_UTLB_2" },
+ { 0x23, "LTP_UTLB_3" },
+ { 0x24, "GPC_RGG_UTLB" },
{}
};
-static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
- { 0x00200000, "ILLEGAL_MTHD" },
- { 0x00800000, "EMPTY_SUBC" },
+static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
+ { 0x00000001, "MEMREQ" },
+ { 0x00000002, "MEMACK_TIMEOUT" },
+ { 0x00000004, "MEMACK_EXTRA" },
+ { 0x00000008, "MEMDAT_TIMEOUT" },
+ { 0x00000010, "MEMDAT_EXTRA" },
+ { 0x00000020, "MEMFLUSH" },
+ { 0x00000040, "MEMOP" },
+ { 0x00000080, "LBCONNECT" },
+ { 0x00000100, "LBREQ" },
+ { 0x00000200, "LBACK_TIMEOUT" },
+ { 0x00000400, "LBACK_EXTRA" },
+ { 0x00000800, "LBDAT_TIMEOUT" },
+ { 0x00001000, "LBDAT_EXTRA" },
+ { 0x00002000, "GPFIFO" },
+ { 0x00004000, "GPPTR" },
+ { 0x00008000, "GPENTRY" },
+ { 0x00010000, "GPCRC" },
+ { 0x00020000, "PBPTR" },
+ { 0x00040000, "PBENTRY" },
+ { 0x00080000, "PBCRC" },
+ { 0x00100000, "XBARCONNECT" },
+ { 0x00200000, "METHOD" },
+ { 0x00400000, "METHODCRC" },
+ { 0x00800000, "DEVICE" },
+ { 0x02000000, "SEMAPHORE" },
+ { 0x04000000, "ACQUIRE" },
+ { 0x08000000, "PRI" },
+ { 0x20000000, "NO_CTXSW_SEG" },
+ { 0x40000000, "PBSEG" },
+ { 0x80000000, "SIGNATURE" },
{}
};
static void
-nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
+nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
+{
+ u32 intr = nv_rd32(priv, 0x00254c);
+ u32 code = intr & 0x000000ff;
+ nv_error(priv, "SCHED_ERROR [");
+ nouveau_enum_print(nve0_fifo_sched_reason, code);
+ pr_cont("]\n");
+}
+
+static void
+nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
+{
+ u32 stat = nv_rd32(priv, 0x00256c);
+ nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
+ nv_wr32(priv, 0x00256c, stat);
+}
+
+static void
+nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv)
+{
+ u32 stat = nv_rd32(priv, 0x00259c);
+ nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
+}
+
+static void
+nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
{
u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
- const struct nouveau_enum *en;
- struct nouveau_engine *engine;
+ struct nouveau_engine *engine = NULL;
struct nouveau_object *engctx = NULL;
+ const struct nouveau_enum *en;
+ const char *name = "unknown";
nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
"write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
pr_cont("] from ");
- en = nouveau_enum_print(nve0_fifo_fault_unit, unit);
+ en = nouveau_enum_print(nve0_fifo_fault_engine, unit);
if (stat & 0x00000040) {
pr_cont("/");
nouveau_enum_print(nve0_fifo_fault_hubclient, client);
@@ -432,14 +569,22 @@ nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
}
if (en && en->data2) {
- engine = nouveau_engine(priv, en->data2);
- if (engine)
- engctx = nouveau_engctx_get(engine, inst);
-
+ if (en->data2 == NVDEV_SUBDEV_BAR) {
+ nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+ name = "BAR1";
+ } else
+ if (en->data2 == NVDEV_SUBDEV_INSTMEM) {
+ nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+ name = "BAR3";
+ } else {
+ engine = nouveau_engine(priv, en->data2);
+ if (engine) {
+ engctx = nouveau_engctx_get(engine, inst);
+ name = nouveau_client_name(engctx);
+ }
+ }
}
-
- pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
- nouveau_client_name(engctx));
+ pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12, name);
nouveau_engctx_put(engctx);
}
@@ -471,7 +616,7 @@ out:
}
static void
-nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
+nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
{
u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
@@ -487,11 +632,11 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
}
if (show) {
- nv_error(priv, "SUBFIFO%d:", unit);
- nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+ nv_error(priv, "PBDMA%d:", unit);
+ nouveau_bitfield_print(nve0_fifo_pbdma_intr, show);
pr_cont("\n");
nv_error(priv,
- "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+ "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
unit, chid,
nouveau_client_name_for_fifo_chid(&priv->base, chid),
subc, mthd, data);
@@ -508,19 +653,56 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
u32 mask = nv_rd32(priv, 0x002140);
u32 stat = nv_rd32(priv, 0x002100) & mask;
+ if (stat & 0x00000001) {
+ u32 stat = nv_rd32(priv, 0x00252c);
+ nv_error(priv, "BIND_ERROR 0x%08x\n", stat);
+ nv_wr32(priv, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000010) {
+ nv_error(priv, "PIO_ERROR\n");
+ nv_wr32(priv, 0x002100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
if (stat & 0x00000100) {
- nv_warn(priv, "unknown status 0x00000100\n");
+ nve0_fifo_intr_sched(priv);
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
+ if (stat & 0x00010000) {
+ nve0_fifo_intr_chsw(priv);
+ nv_wr32(priv, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x00800000) {
+ nv_error(priv, "FB_FLUSH_TIMEOUT\n");
+ nv_wr32(priv, 0x002100, 0x00800000);
+ stat &= ~0x00800000;
+ }
+
+ if (stat & 0x01000000) {
+ nv_error(priv, "LB_ERROR\n");
+ nv_wr32(priv, 0x002100, 0x01000000);
+ stat &= ~0x01000000;
+ }
+
+ if (stat & 0x08000000) {
+ nve0_fifo_intr_dropped_fault(priv);
+ nv_wr32(priv, 0x002100, 0x08000000);
+ stat &= ~0x08000000;
+ }
+
if (stat & 0x10000000) {
u32 units = nv_rd32(priv, 0x00259c);
u32 u = units;
while (u) {
int i = ffs(u) - 1;
- nve0_fifo_isr_vm_fault(priv, i);
+ nve0_fifo_intr_fault(priv, i);
u &= ~(1 << i);
}
@@ -529,22 +711,28 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x20000000) {
- u32 units = nv_rd32(priv, 0x0025a0);
- u32 u = units;
+ u32 mask = nv_rd32(priv, 0x0025a0);
+ u32 temp = mask;
- while (u) {
- int i = ffs(u) - 1;
- nve0_fifo_isr_subfifo_intr(priv, i);
- u &= ~(1 << i);
+ while (temp) {
+ u32 unit = ffs(temp) - 1;
+ nve0_fifo_intr_pbdma(priv, unit);
+ temp &= ~(1 << unit);
}
- nv_wr32(priv, 0x0025a0, units);
+ nv_wr32(priv, 0x0025a0, mask);
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
- nv_warn(priv, "unknown status 0x40000000\n");
- nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+ u32 mask = nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+
+ while (mask) {
+ u32 engn = ffs(mask) - 1;
+ /* runlist event, not currently used */
+ mask &= ~(1 << engn);
+ }
+
stat &= ~0x40000000;
}
@@ -575,53 +763,52 @@ nve0_fifo_uevent_disable(struct nouveau_event *event, int index)
nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
}
-static int
-nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+int
+nve0_fifo_fini(struct nouveau_object *object, bool suspend)
{
- struct nve0_fifo_priv *priv;
- int ret, i;
+ struct nve0_fifo_priv *priv = (void *)object;
+ int ret;
- ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
- *pobject = nv_object(priv);
+ ret = nouveau_fifo_fini(&priv->base, suspend);
if (ret)
return ret;
- for (i = 0; i < FIFO_ENGINE_NR; i++) {
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
- 0, &priv->engine[i].playlist[0]);
- if (ret)
- return ret;
+ /* allow mmu fault interrupts, even when we're not using fifo */
+ nv_mask(priv, 0x002140, 0x10000000, 0x10000000);
+ return 0;
+}
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
- 0, &priv->engine[i].playlist[1]);
- if (ret)
- return ret;
- }
+int
+nve0_fifo_init(struct nouveau_object *object)
+{
+ struct nve0_fifo_priv *priv = (void *)object;
+ int ret, i;
- ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ ret = nouveau_fifo_init(&priv->base);
if (ret)
return ret;
- ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
- &priv->user.bar);
- if (ret)
- return ret;
+ /* enable all available PBDMA units */
+ nv_wr32(priv, 0x000204, 0xffffffff);
+ priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
+ nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
- priv->base.uevent->enable = nve0_fifo_uevent_enable;
- priv->base.uevent->disable = nve0_fifo_uevent_disable;
- priv->base.uevent->priv = priv;
+ /* PBDMA[n] */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ }
- nv_subdev(priv)->unit = 0x00000100;
- nv_subdev(priv)->intr = nve0_fifo_intr;
- nv_engine(priv)->cclass = &nve0_fifo_cclass;
- nv_engine(priv)->sclass = nve0_fifo_sclass;
+ nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+ nv_wr32(priv, 0x002a00, 0xffffffff);
+ nv_wr32(priv, 0x002100, 0xffffffff);
+ nv_wr32(priv, 0x002140, 0x3fffffff);
return 0;
}
-static void
+void
nve0_fifo_dtor(struct nouveau_object *object)
{
struct nve0_fifo_priv *priv = (void *)object;
@@ -631,50 +818,69 @@ nve0_fifo_dtor(struct nouveau_object *object)
nouveau_gpuobj_ref(NULL, &priv->user.mem);
for (i = 0; i < FIFO_ENGINE_NR; i++) {
- nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
- nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
}
nouveau_fifo_destroy(&priv->base);
}
-static int
-nve0_fifo_init(struct nouveau_object *object)
+int
+nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nve0_fifo_priv *priv = (void *)object;
+ struct nve0_fifo_impl *impl = (void *)oclass;
+ struct nve0_fifo_priv *priv;
int ret, i;
- ret = nouveau_fifo_init(&priv->base);
+ ret = nouveau_fifo_create(parent, engine, oclass, 0,
+ impl->channels - 1, &priv);
+ *pobject = nv_object(priv);
if (ret)
return ret;
- /* enable all available PSUBFIFOs */
- nv_wr32(priv, 0x000204, 0xffffffff);
- priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
- nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
+ for (i = 0; i < FIFO_ENGINE_NR; i++) {
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
+ 0, &priv->engine[i].runlist[0]);
+ if (ret)
+ return ret;
- /* PSUBFIFO[n] */
- for (i = 0; i < priv->spoon_nr; i++) {
- nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
- nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
- nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
+ 0, &priv->engine[i].runlist[1]);
+ if (ret)
+ return ret;
}
- nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ if (ret)
+ return ret;
- nv_wr32(priv, 0x002a00, 0xffffffff);
- nv_wr32(priv, 0x002100, 0xffffffff);
- nv_wr32(priv, 0x002140, 0x3fffffff);
+ ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+ &priv->user.bar);
+ if (ret)
+ return ret;
+
+ priv->base.uevent->enable = nve0_fifo_uevent_enable;
+ priv->base.uevent->disable = nve0_fifo_uevent_disable;
+ priv->base.uevent->priv = priv;
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nve0_fifo_intr;
+ nv_engine(priv)->cclass = &nve0_fifo_cclass;
+ nv_engine(priv)->sclass = nve0_fifo_sclass;
return 0;
}
struct nouveau_oclass *
-nve0_fifo_oclass = &(struct nouveau_oclass) {
- .handle = NV_ENGINE(FIFO, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
+nve0_fifo_oclass = &(struct nve0_fifo_impl) {
+ .base.handle = NV_ENGINE(FIFO, 0xe0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_fifo_ctor,
.dtor = nve0_fifo_dtor,
.init = nve0_fifo_init,
- .fini = _nouveau_fifo_fini,
+ .fini = nve0_fifo_fini,
},
-};
+ .channels = 4096,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
new file mode 100644
index 000000000000..014344ebee66
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_FIFO_NVE0_H__
+#define __NVKM_FIFO_NVE0_H__
+
+#include <engine/fifo.h>
+
+int nve0_fifo_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nve0_fifo_dtor(struct nouveau_object *);
+int nve0_fifo_init(struct nouveau_object *);
+
+struct nve0_fifo_impl {
+ struct nouveau_oclass base;
+ u32 channels;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
new file mode 100644
index 000000000000..a86bd3352bf8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
@@ -0,0 +1,1408 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nvc0.h"
+
+static struct nvc0_graph_init
+nv108_grctx_init_icmd[] = {
+ { 0x001000, 1, 0x01, 0x00000004 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x0000a9, 1, 0x01, 0x0000ffff },
+ { 0x000038, 1, 0x01, 0x0fac6881 },
+ { 0x00003d, 1, 0x01, 0x00000001 },
+ { 0x0000e8, 8, 0x01, 0x00000400 },
+ { 0x000078, 8, 0x01, 0x00000300 },
+ { 0x000050, 1, 0x01, 0x00000011 },
+ { 0x000058, 8, 0x01, 0x00000008 },
+ { 0x000208, 8, 0x01, 0x00000001 },
+ { 0x000081, 1, 0x01, 0x00000001 },
+ { 0x000085, 1, 0x01, 0x00000004 },
+ { 0x000088, 1, 0x01, 0x00000400 },
+ { 0x000090, 1, 0x01, 0x00000300 },
+ { 0x000098, 1, 0x01, 0x00001001 },
+ { 0x0000e3, 1, 0x01, 0x00000001 },
+ { 0x0000da, 1, 0x01, 0x00000001 },
+ { 0x0000f8, 1, 0x01, 0x00000003 },
+ { 0x0000fa, 1, 0x01, 0x00000001 },
+ { 0x00009f, 4, 0x01, 0x0000ffff },
+ { 0x0000b1, 1, 0x01, 0x00000001 },
+ { 0x0000ad, 1, 0x01, 0x0000013e },
+ { 0x0000e1, 1, 0x01, 0x00000010 },
+ { 0x000290, 16, 0x01, 0x00000000 },
+ { 0x0003b0, 16, 0x01, 0x00000000 },
+ { 0x0002a0, 16, 0x01, 0x00000000 },
+ { 0x000420, 16, 0x01, 0x00000000 },
+ { 0x0002b0, 16, 0x01, 0x00000000 },
+ { 0x000430, 16, 0x01, 0x00000000 },
+ { 0x0002c0, 16, 0x01, 0x00000000 },
+ { 0x0004d0, 16, 0x01, 0x00000000 },
+ { 0x000720, 16, 0x01, 0x00000000 },
+ { 0x0008c0, 16, 0x01, 0x00000000 },
+ { 0x000890, 16, 0x01, 0x00000000 },
+ { 0x0008e0, 16, 0x01, 0x00000000 },
+ { 0x0008a0, 16, 0x01, 0x00000000 },
+ { 0x0008f0, 16, 0x01, 0x00000000 },
+ { 0x00094c, 1, 0x01, 0x000000ff },
+ { 0x00094d, 1, 0x01, 0xffffffff },
+ { 0x00094e, 1, 0x01, 0x00000002 },
+ { 0x0002ec, 1, 0x01, 0x00000001 },
+ { 0x0002f2, 2, 0x01, 0x00000001 },
+ { 0x0002f5, 1, 0x01, 0x00000001 },
+ { 0x0002f7, 1, 0x01, 0x00000001 },
+ { 0x000303, 1, 0x01, 0x00000001 },
+ { 0x0002e6, 1, 0x01, 0x00000001 },
+ { 0x000466, 1, 0x01, 0x00000052 },
+ { 0x000301, 1, 0x01, 0x3f800000 },
+ { 0x000304, 1, 0x01, 0x30201000 },
+ { 0x000305, 1, 0x01, 0x70605040 },
+ { 0x000306, 1, 0x01, 0xb8a89888 },
+ { 0x000307, 1, 0x01, 0xf8e8d8c8 },
+ { 0x00030a, 1, 0x01, 0x00ffff00 },
+ { 0x00030b, 1, 0x01, 0x0000001a },
+ { 0x00030c, 1, 0x01, 0x00000001 },
+ { 0x000318, 1, 0x01, 0x00000001 },
+ { 0x000340, 1, 0x01, 0x00000000 },
+ { 0x000375, 1, 0x01, 0x00000001 },
+ { 0x00037d, 1, 0x01, 0x00000006 },
+ { 0x0003a0, 1, 0x01, 0x00000002 },
+ { 0x0003aa, 1, 0x01, 0x00000001 },
+ { 0x0003a9, 1, 0x01, 0x00000001 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000383, 1, 0x01, 0x00000011 },
+ { 0x000360, 1, 0x01, 0x00000040 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x00037a, 1, 0x01, 0x00000012 },
+ { 0x000619, 1, 0x01, 0x00000003 },
+ { 0x000811, 1, 0x01, 0x00000003 },
+ { 0x000812, 1, 0x01, 0x00000004 },
+ { 0x000813, 1, 0x01, 0x00000006 },
+ { 0x000814, 1, 0x01, 0x00000008 },
+ { 0x000815, 1, 0x01, 0x0000000b },
+ { 0x000800, 6, 0x01, 0x00000001 },
+ { 0x000632, 1, 0x01, 0x00000001 },
+ { 0x000633, 1, 0x01, 0x00000002 },
+ { 0x000634, 1, 0x01, 0x00000003 },
+ { 0x000635, 1, 0x01, 0x00000004 },
+ { 0x000654, 1, 0x01, 0x3f800000 },
+ { 0x000657, 1, 0x01, 0x3f800000 },
+ { 0x000655, 2, 0x01, 0x3f800000 },
+ { 0x0006cd, 1, 0x01, 0x3f800000 },
+ { 0x0007f5, 1, 0x01, 0x3f800000 },
+ { 0x0007dc, 1, 0x01, 0x39291909 },
+ { 0x0007dd, 1, 0x01, 0x79695949 },
+ { 0x0007de, 1, 0x01, 0xb9a99989 },
+ { 0x0007df, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007e8, 1, 0x01, 0x00003210 },
+ { 0x0007e9, 1, 0x01, 0x00007654 },
+ { 0x0007ea, 1, 0x01, 0x00000098 },
+ { 0x0007ec, 1, 0x01, 0x39291909 },
+ { 0x0007ed, 1, 0x01, 0x79695949 },
+ { 0x0007ee, 1, 0x01, 0xb9a99989 },
+ { 0x0007ef, 1, 0x01, 0xf9e9d9c9 },
+ { 0x0007f0, 1, 0x01, 0x00003210 },
+ { 0x0007f1, 1, 0x01, 0x00007654 },
+ { 0x0007f2, 1, 0x01, 0x00000098 },
+ { 0x0005a5, 1, 0x01, 0x00000001 },
+ { 0x000980, 128, 0x01, 0x00000000 },
+ { 0x000468, 1, 0x01, 0x00000004 },
+ { 0x00046c, 1, 0x01, 0x00000001 },
+ { 0x000470, 96, 0x01, 0x00000000 },
+ { 0x000510, 16, 0x01, 0x3f800000 },
+ { 0x000520, 1, 0x01, 0x000002b6 },
+ { 0x000529, 1, 0x01, 0x00000001 },
+ { 0x000530, 16, 0x01, 0xffff0000 },
+ { 0x000585, 1, 0x01, 0x0000003f },
+ { 0x000576, 1, 0x01, 0x00000003 },
+ { 0x00057b, 1, 0x01, 0x00000059 },
+ { 0x000586, 1, 0x01, 0x00000040 },
+ { 0x000582, 2, 0x01, 0x00000080 },
+ { 0x0005c2, 1, 0x01, 0x00000001 },
+ { 0x000638, 2, 0x01, 0x00000001 },
+ { 0x00063a, 1, 0x01, 0x00000002 },
+ { 0x00063b, 2, 0x01, 0x00000001 },
+ { 0x00063d, 1, 0x01, 0x00000002 },
+ { 0x00063e, 1, 0x01, 0x00000001 },
+ { 0x0008b8, 8, 0x01, 0x00000001 },
+ { 0x000900, 8, 0x01, 0x00000001 },
+ { 0x000908, 8, 0x01, 0x00000002 },
+ { 0x000910, 16, 0x01, 0x00000001 },
+ { 0x000920, 8, 0x01, 0x00000002 },
+ { 0x000928, 8, 0x01, 0x00000001 },
+ { 0x000662, 1, 0x01, 0x00000001 },
+ { 0x000648, 9, 0x01, 0x00000001 },
+ { 0x000658, 1, 0x01, 0x0000000f },
+ { 0x0007ff, 1, 0x01, 0x0000000a },
+ { 0x00066a, 1, 0x01, 0x40000000 },
+ { 0x00066b, 1, 0x01, 0x10000000 },
+ { 0x00066c, 2, 0x01, 0xffff0000 },
+ { 0x0007af, 2, 0x01, 0x00000008 },
+ { 0x0007f6, 1, 0x01, 0x00000001 },
+ { 0x00080b, 1, 0x01, 0x00000002 },
+ { 0x0006b2, 1, 0x01, 0x00000055 },
+ { 0x0007ad, 1, 0x01, 0x00000003 },
+ { 0x000937, 1, 0x01, 0x00000001 },
+ { 0x000971, 1, 0x01, 0x00000008 },
+ { 0x000972, 1, 0x01, 0x00000040 },
+ { 0x000973, 1, 0x01, 0x0000012c },
+ { 0x00097c, 1, 0x01, 0x00000040 },
+ { 0x000979, 1, 0x01, 0x00000003 },
+ { 0x000975, 1, 0x01, 0x00000020 },
+ { 0x000976, 1, 0x01, 0x00000001 },
+ { 0x000977, 1, 0x01, 0x00000020 },
+ { 0x000978, 1, 0x01, 0x00000001 },
+ { 0x000957, 1, 0x01, 0x00000003 },
+ { 0x00095e, 1, 0x01, 0x20164010 },
+ { 0x00095f, 1, 0x01, 0x00000020 },
+ { 0x000a0d, 1, 0x01, 0x00000006 },
+ { 0x00097d, 1, 0x01, 0x00000020 },
+ { 0x000683, 1, 0x01, 0x00000006 },
+ { 0x000685, 1, 0x01, 0x003fffff },
+ { 0x000687, 1, 0x01, 0x003fffff },
+ { 0x0006a0, 1, 0x01, 0x00000005 },
+ { 0x000840, 1, 0x01, 0x00400008 },
+ { 0x000841, 1, 0x01, 0x08000080 },
+ { 0x000842, 1, 0x01, 0x00400008 },
+ { 0x000843, 1, 0x01, 0x08000080 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ab, 1, 0x01, 0x00000002 },
+ { 0x0006ac, 1, 0x01, 0x00000080 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x0006bb, 1, 0x01, 0x000000cf },
+ { 0x0006ce, 1, 0x01, 0x2a712488 },
+ { 0x000739, 1, 0x01, 0x4085c000 },
+ { 0x00073a, 1, 0x01, 0x00000080 },
+ { 0x000786, 1, 0x01, 0x80000100 },
+ { 0x00073c, 1, 0x01, 0x00010100 },
+ { 0x00073d, 1, 0x01, 0x02800000 },
+ { 0x000787, 1, 0x01, 0x000000cf },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x000836, 1, 0x01, 0x00000001 },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x000833, 1, 0x01, 0x04444480 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c1b0, 8, 0x01, 0x0000000f },
+ { 0x00c1b8, 1, 0x01, 0x0fac6881 },
+ { 0x00c1b9, 1, 0x01, 0x00fac688 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x00c500, 1, 0x01, 0x00000003 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000002 },
+ { 0x0006aa, 1, 0x01, 0x00000001 },
+ { 0x0006ad, 2, 0x01, 0x00000100 },
+ { 0x0006b1, 1, 0x01, 0x00000011 },
+ { 0x00078c, 1, 0x01, 0x00000008 },
+ { 0x000792, 1, 0x01, 0x00000001 },
+ { 0x000794, 3, 0x01, 0x00000001 },
+ { 0x000797, 1, 0x01, 0x000000cf },
+ { 0x00079a, 1, 0x01, 0x00000002 },
+ { 0x0007a1, 1, 0x01, 0x00000001 },
+ { 0x0007a3, 3, 0x01, 0x00000001 },
+ { 0x000831, 1, 0x01, 0x00000004 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000008 },
+ { 0x000039, 3, 0x01, 0x00000000 },
+ { 0x000380, 1, 0x01, 0x00000001 },
+ { 0x000366, 2, 0x01, 0x00000000 },
+ { 0x000368, 1, 0x01, 0x00000fff },
+ { 0x000370, 2, 0x01, 0x00000000 },
+ { 0x000372, 1, 0x01, 0x000fffff },
+ { 0x000813, 1, 0x01, 0x00000006 },
+ { 0x000814, 1, 0x01, 0x00000008 },
+ { 0x000957, 1, 0x01, 0x00000003 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x000a04, 1, 0x01, 0x000000ff },
+ { 0x000a0b, 1, 0x01, 0x00000040 },
+ { 0x00097f, 1, 0x01, 0x00000100 },
+ { 0x000a02, 1, 0x01, 0x00000001 },
+ { 0x000809, 1, 0x01, 0x00000007 },
+ { 0x00c221, 1, 0x01, 0x00000040 },
+ { 0x00c401, 1, 0x01, 0x00000001 },
+ { 0x00c402, 1, 0x01, 0x00010001 },
+ { 0x00c403, 2, 0x01, 0x00000001 },
+ { 0x00c40e, 1, 0x01, 0x00000020 },
+ { 0x00c500, 1, 0x01, 0x00000003 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ { 0x001000, 1, 0x01, 0x00000001 },
+ { 0x000b07, 1, 0x01, 0x00000002 },
+ { 0x000b08, 2, 0x01, 0x00000100 },
+ { 0x000b0a, 1, 0x01, 0x00000001 },
+ { 0x01e100, 1, 0x01, 0x00000001 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_a197[] = {
+ { 0x000800, 1, 0x04, 0x00000000 },
+ { 0x000840, 1, 0x04, 0x00000000 },
+ { 0x000880, 1, 0x04, 0x00000000 },
+ { 0x0008c0, 1, 0x04, 0x00000000 },
+ { 0x000900, 1, 0x04, 0x00000000 },
+ { 0x000940, 1, 0x04, 0x00000000 },
+ { 0x000980, 1, 0x04, 0x00000000 },
+ { 0x0009c0, 1, 0x04, 0x00000000 },
+ { 0x000804, 1, 0x04, 0x00000000 },
+ { 0x000844, 1, 0x04, 0x00000000 },
+ { 0x000884, 1, 0x04, 0x00000000 },
+ { 0x0008c4, 1, 0x04, 0x00000000 },
+ { 0x000904, 1, 0x04, 0x00000000 },
+ { 0x000944, 1, 0x04, 0x00000000 },
+ { 0x000984, 1, 0x04, 0x00000000 },
+ { 0x0009c4, 1, 0x04, 0x00000000 },
+ { 0x000808, 1, 0x04, 0x00000400 },
+ { 0x000848, 1, 0x04, 0x00000400 },
+ { 0x000888, 1, 0x04, 0x00000400 },
+ { 0x0008c8, 1, 0x04, 0x00000400 },
+ { 0x000908, 1, 0x04, 0x00000400 },
+ { 0x000948, 1, 0x04, 0x00000400 },
+ { 0x000988, 1, 0x04, 0x00000400 },
+ { 0x0009c8, 1, 0x04, 0x00000400 },
+ { 0x00080c, 1, 0x04, 0x00000300 },
+ { 0x00084c, 1, 0x04, 0x00000300 },
+ { 0x00088c, 1, 0x04, 0x00000300 },
+ { 0x0008cc, 1, 0x04, 0x00000300 },
+ { 0x00090c, 1, 0x04, 0x00000300 },
+ { 0x00094c, 1, 0x04, 0x00000300 },
+ { 0x00098c, 1, 0x04, 0x00000300 },
+ { 0x0009cc, 1, 0x04, 0x00000300 },
+ { 0x000810, 1, 0x04, 0x000000cf },
+ { 0x000850, 1, 0x04, 0x00000000 },
+ { 0x000890, 1, 0x04, 0x00000000 },
+ { 0x0008d0, 1, 0x04, 0x00000000 },
+ { 0x000910, 1, 0x04, 0x00000000 },
+ { 0x000950, 1, 0x04, 0x00000000 },
+ { 0x000990, 1, 0x04, 0x00000000 },
+ { 0x0009d0, 1, 0x04, 0x00000000 },
+ { 0x000814, 1, 0x04, 0x00000040 },
+ { 0x000854, 1, 0x04, 0x00000040 },
+ { 0x000894, 1, 0x04, 0x00000040 },
+ { 0x0008d4, 1, 0x04, 0x00000040 },
+ { 0x000914, 1, 0x04, 0x00000040 },
+ { 0x000954, 1, 0x04, 0x00000040 },
+ { 0x000994, 1, 0x04, 0x00000040 },
+ { 0x0009d4, 1, 0x04, 0x00000040 },
+ { 0x000818, 1, 0x04, 0x00000001 },
+ { 0x000858, 1, 0x04, 0x00000001 },
+ { 0x000898, 1, 0x04, 0x00000001 },
+ { 0x0008d8, 1, 0x04, 0x00000001 },
+ { 0x000918, 1, 0x04, 0x00000001 },
+ { 0x000958, 1, 0x04, 0x00000001 },
+ { 0x000998, 1, 0x04, 0x00000001 },
+ { 0x0009d8, 1, 0x04, 0x00000001 },
+ { 0x00081c, 1, 0x04, 0x00000000 },
+ { 0x00085c, 1, 0x04, 0x00000000 },
+ { 0x00089c, 1, 0x04, 0x00000000 },
+ { 0x0008dc, 1, 0x04, 0x00000000 },
+ { 0x00091c, 1, 0x04, 0x00000000 },
+ { 0x00095c, 1, 0x04, 0x00000000 },
+ { 0x00099c, 1, 0x04, 0x00000000 },
+ { 0x0009dc, 1, 0x04, 0x00000000 },
+ { 0x000820, 1, 0x04, 0x00000000 },
+ { 0x000860, 1, 0x04, 0x00000000 },
+ { 0x0008a0, 1, 0x04, 0x00000000 },
+ { 0x0008e0, 1, 0x04, 0x00000000 },
+ { 0x000920, 1, 0x04, 0x00000000 },
+ { 0x000960, 1, 0x04, 0x00000000 },
+ { 0x0009a0, 1, 0x04, 0x00000000 },
+ { 0x0009e0, 1, 0x04, 0x00000000 },
+ { 0x001c00, 1, 0x04, 0x00000000 },
+ { 0x001c10, 1, 0x04, 0x00000000 },
+ { 0x001c20, 1, 0x04, 0x00000000 },
+ { 0x001c30, 1, 0x04, 0x00000000 },
+ { 0x001c40, 1, 0x04, 0x00000000 },
+ { 0x001c50, 1, 0x04, 0x00000000 },
+ { 0x001c60, 1, 0x04, 0x00000000 },
+ { 0x001c70, 1, 0x04, 0x00000000 },
+ { 0x001c80, 1, 0x04, 0x00000000 },
+ { 0x001c90, 1, 0x04, 0x00000000 },
+ { 0x001ca0, 1, 0x04, 0x00000000 },
+ { 0x001cb0, 1, 0x04, 0x00000000 },
+ { 0x001cc0, 1, 0x04, 0x00000000 },
+ { 0x001cd0, 1, 0x04, 0x00000000 },
+ { 0x001ce0, 1, 0x04, 0x00000000 },
+ { 0x001cf0, 1, 0x04, 0x00000000 },
+ { 0x001c04, 1, 0x04, 0x00000000 },
+ { 0x001c14, 1, 0x04, 0x00000000 },
+ { 0x001c24, 1, 0x04, 0x00000000 },
+ { 0x001c34, 1, 0x04, 0x00000000 },
+ { 0x001c44, 1, 0x04, 0x00000000 },
+ { 0x001c54, 1, 0x04, 0x00000000 },
+ { 0x001c64, 1, 0x04, 0x00000000 },
+ { 0x001c74, 1, 0x04, 0x00000000 },
+ { 0x001c84, 1, 0x04, 0x00000000 },
+ { 0x001c94, 1, 0x04, 0x00000000 },
+ { 0x001ca4, 1, 0x04, 0x00000000 },
+ { 0x001cb4, 1, 0x04, 0x00000000 },
+ { 0x001cc4, 1, 0x04, 0x00000000 },
+ { 0x001cd4, 1, 0x04, 0x00000000 },
+ { 0x001ce4, 1, 0x04, 0x00000000 },
+ { 0x001cf4, 1, 0x04, 0x00000000 },
+ { 0x001c08, 1, 0x04, 0x00000000 },
+ { 0x001c18, 1, 0x04, 0x00000000 },
+ { 0x001c28, 1, 0x04, 0x00000000 },
+ { 0x001c38, 1, 0x04, 0x00000000 },
+ { 0x001c48, 1, 0x04, 0x00000000 },
+ { 0x001c58, 1, 0x04, 0x00000000 },
+ { 0x001c68, 1, 0x04, 0x00000000 },
+ { 0x001c78, 1, 0x04, 0x00000000 },
+ { 0x001c88, 1, 0x04, 0x00000000 },
+ { 0x001c98, 1, 0x04, 0x00000000 },
+ { 0x001ca8, 1, 0x04, 0x00000000 },
+ { 0x001cb8, 1, 0x04, 0x00000000 },
+ { 0x001cc8, 1, 0x04, 0x00000000 },
+ { 0x001cd8, 1, 0x04, 0x00000000 },
+ { 0x001ce8, 1, 0x04, 0x00000000 },
+ { 0x001cf8, 1, 0x04, 0x00000000 },
+ { 0x001c0c, 1, 0x04, 0x00000000 },
+ { 0x001c1c, 1, 0x04, 0x00000000 },
+ { 0x001c2c, 1, 0x04, 0x00000000 },
+ { 0x001c3c, 1, 0x04, 0x00000000 },
+ { 0x001c4c, 1, 0x04, 0x00000000 },
+ { 0x001c5c, 1, 0x04, 0x00000000 },
+ { 0x001c6c, 1, 0x04, 0x00000000 },
+ { 0x001c7c, 1, 0x04, 0x00000000 },
+ { 0x001c8c, 1, 0x04, 0x00000000 },
+ { 0x001c9c, 1, 0x04, 0x00000000 },
+ { 0x001cac, 1, 0x04, 0x00000000 },
+ { 0x001cbc, 1, 0x04, 0x00000000 },
+ { 0x001ccc, 1, 0x04, 0x00000000 },
+ { 0x001cdc, 1, 0x04, 0x00000000 },
+ { 0x001cec, 1, 0x04, 0x00000000 },
+ { 0x001cfc, 2, 0x04, 0x00000000 },
+ { 0x001d10, 1, 0x04, 0x00000000 },
+ { 0x001d20, 1, 0x04, 0x00000000 },
+ { 0x001d30, 1, 0x04, 0x00000000 },
+ { 0x001d40, 1, 0x04, 0x00000000 },
+ { 0x001d50, 1, 0x04, 0x00000000 },
+ { 0x001d60, 1, 0x04, 0x00000000 },
+ { 0x001d70, 1, 0x04, 0x00000000 },
+ { 0x001d80, 1, 0x04, 0x00000000 },
+ { 0x001d90, 1, 0x04, 0x00000000 },
+ { 0x001da0, 1, 0x04, 0x00000000 },
+ { 0x001db0, 1, 0x04, 0x00000000 },
+ { 0x001dc0, 1, 0x04, 0x00000000 },
+ { 0x001dd0, 1, 0x04, 0x00000000 },
+ { 0x001de0, 1, 0x04, 0x00000000 },
+ { 0x001df0, 1, 0x04, 0x00000000 },
+ { 0x001d04, 1, 0x04, 0x00000000 },
+ { 0x001d14, 1, 0x04, 0x00000000 },
+ { 0x001d24, 1, 0x04, 0x00000000 },
+ { 0x001d34, 1, 0x04, 0x00000000 },
+ { 0x001d44, 1, 0x04, 0x00000000 },
+ { 0x001d54, 1, 0x04, 0x00000000 },
+ { 0x001d64, 1, 0x04, 0x00000000 },
+ { 0x001d74, 1, 0x04, 0x00000000 },
+ { 0x001d84, 1, 0x04, 0x00000000 },
+ { 0x001d94, 1, 0x04, 0x00000000 },
+ { 0x001da4, 1, 0x04, 0x00000000 },
+ { 0x001db4, 1, 0x04, 0x00000000 },
+ { 0x001dc4, 1, 0x04, 0x00000000 },
+ { 0x001dd4, 1, 0x04, 0x00000000 },
+ { 0x001de4, 1, 0x04, 0x00000000 },
+ { 0x001df4, 1, 0x04, 0x00000000 },
+ { 0x001d08, 1, 0x04, 0x00000000 },
+ { 0x001d18, 1, 0x04, 0x00000000 },
+ { 0x001d28, 1, 0x04, 0x00000000 },
+ { 0x001d38, 1, 0x04, 0x00000000 },
+ { 0x001d48, 1, 0x04, 0x00000000 },
+ { 0x001d58, 1, 0x04, 0x00000000 },
+ { 0x001d68, 1, 0x04, 0x00000000 },
+ { 0x001d78, 1, 0x04, 0x00000000 },
+ { 0x001d88, 1, 0x04, 0x00000000 },
+ { 0x001d98, 1, 0x04, 0x00000000 },
+ { 0x001da8, 1, 0x04, 0x00000000 },
+ { 0x001db8, 1, 0x04, 0x00000000 },
+ { 0x001dc8, 1, 0x04, 0x00000000 },
+ { 0x001dd8, 1, 0x04, 0x00000000 },
+ { 0x001de8, 1, 0x04, 0x00000000 },
+ { 0x001df8, 1, 0x04, 0x00000000 },
+ { 0x001d0c, 1, 0x04, 0x00000000 },
+ { 0x001d1c, 1, 0x04, 0x00000000 },
+ { 0x001d2c, 1, 0x04, 0x00000000 },
+ { 0x001d3c, 1, 0x04, 0x00000000 },
+ { 0x001d4c, 1, 0x04, 0x00000000 },
+ { 0x001d5c, 1, 0x04, 0x00000000 },
+ { 0x001d6c, 1, 0x04, 0x00000000 },
+ { 0x001d7c, 1, 0x04, 0x00000000 },
+ { 0x001d8c, 1, 0x04, 0x00000000 },
+ { 0x001d9c, 1, 0x04, 0x00000000 },
+ { 0x001dac, 1, 0x04, 0x00000000 },
+ { 0x001dbc, 1, 0x04, 0x00000000 },
+ { 0x001dcc, 1, 0x04, 0x00000000 },
+ { 0x001ddc, 1, 0x04, 0x00000000 },
+ { 0x001dec, 1, 0x04, 0x00000000 },
+ { 0x001dfc, 1, 0x04, 0x00000000 },
+ { 0x001f00, 1, 0x04, 0x00000000 },
+ { 0x001f08, 1, 0x04, 0x00000000 },
+ { 0x001f10, 1, 0x04, 0x00000000 },
+ { 0x001f18, 1, 0x04, 0x00000000 },
+ { 0x001f20, 1, 0x04, 0x00000000 },
+ { 0x001f28, 1, 0x04, 0x00000000 },
+ { 0x001f30, 1, 0x04, 0x00000000 },
+ { 0x001f38, 1, 0x04, 0x00000000 },
+ { 0x001f40, 1, 0x04, 0x00000000 },
+ { 0x001f48, 1, 0x04, 0x00000000 },
+ { 0x001f50, 1, 0x04, 0x00000000 },
+ { 0x001f58, 1, 0x04, 0x00000000 },
+ { 0x001f60, 1, 0x04, 0x00000000 },
+ { 0x001f68, 1, 0x04, 0x00000000 },
+ { 0x001f70, 1, 0x04, 0x00000000 },
+ { 0x001f78, 1, 0x04, 0x00000000 },
+ { 0x001f04, 1, 0x04, 0x00000000 },
+ { 0x001f0c, 1, 0x04, 0x00000000 },
+ { 0x001f14, 1, 0x04, 0x00000000 },
+ { 0x001f1c, 1, 0x04, 0x00000000 },
+ { 0x001f24, 1, 0x04, 0x00000000 },
+ { 0x001f2c, 1, 0x04, 0x00000000 },
+ { 0x001f34, 1, 0x04, 0x00000000 },
+ { 0x001f3c, 1, 0x04, 0x00000000 },
+ { 0x001f44, 1, 0x04, 0x00000000 },
+ { 0x001f4c, 1, 0x04, 0x00000000 },
+ { 0x001f54, 1, 0x04, 0x00000000 },
+ { 0x001f5c, 1, 0x04, 0x00000000 },
+ { 0x001f64, 1, 0x04, 0x00000000 },
+ { 0x001f6c, 1, 0x04, 0x00000000 },
+ { 0x001f74, 1, 0x04, 0x00000000 },
+ { 0x001f7c, 2, 0x04, 0x00000000 },
+ { 0x001f88, 1, 0x04, 0x00000000 },
+ { 0x001f90, 1, 0x04, 0x00000000 },
+ { 0x001f98, 1, 0x04, 0x00000000 },
+ { 0x001fa0, 1, 0x04, 0x00000000 },
+ { 0x001fa8, 1, 0x04, 0x00000000 },
+ { 0x001fb0, 1, 0x04, 0x00000000 },
+ { 0x001fb8, 1, 0x04, 0x00000000 },
+ { 0x001fc0, 1, 0x04, 0x00000000 },
+ { 0x001fc8, 1, 0x04, 0x00000000 },
+ { 0x001fd0, 1, 0x04, 0x00000000 },
+ { 0x001fd8, 1, 0x04, 0x00000000 },
+ { 0x001fe0, 1, 0x04, 0x00000000 },
+ { 0x001fe8, 1, 0x04, 0x00000000 },
+ { 0x001ff0, 1, 0x04, 0x00000000 },
+ { 0x001ff8, 1, 0x04, 0x00000000 },
+ { 0x001f84, 1, 0x04, 0x00000000 },
+ { 0x001f8c, 1, 0x04, 0x00000000 },
+ { 0x001f94, 1, 0x04, 0x00000000 },
+ { 0x001f9c, 1, 0x04, 0x00000000 },
+ { 0x001fa4, 1, 0x04, 0x00000000 },
+ { 0x001fac, 1, 0x04, 0x00000000 },
+ { 0x001fb4, 1, 0x04, 0x00000000 },
+ { 0x001fbc, 1, 0x04, 0x00000000 },
+ { 0x001fc4, 1, 0x04, 0x00000000 },
+ { 0x001fcc, 1, 0x04, 0x00000000 },
+ { 0x001fd4, 1, 0x04, 0x00000000 },
+ { 0x001fdc, 1, 0x04, 0x00000000 },
+ { 0x001fe4, 1, 0x04, 0x00000000 },
+ { 0x001fec, 1, 0x04, 0x00000000 },
+ { 0x001ff4, 1, 0x04, 0x00000000 },
+ { 0x001ffc, 2, 0x04, 0x00000000 },
+ { 0x002040, 1, 0x04, 0x00000011 },
+ { 0x002080, 1, 0x04, 0x00000020 },
+ { 0x0020c0, 1, 0x04, 0x00000030 },
+ { 0x002100, 1, 0x04, 0x00000040 },
+ { 0x002140, 1, 0x04, 0x00000051 },
+ { 0x00200c, 1, 0x04, 0x00000001 },
+ { 0x00204c, 1, 0x04, 0x00000001 },
+ { 0x00208c, 1, 0x04, 0x00000001 },
+ { 0x0020cc, 1, 0x04, 0x00000001 },
+ { 0x00210c, 1, 0x04, 0x00000001 },
+ { 0x00214c, 1, 0x04, 0x00000001 },
+ { 0x002010, 1, 0x04, 0x00000000 },
+ { 0x002050, 1, 0x04, 0x00000000 },
+ { 0x002090, 1, 0x04, 0x00000001 },
+ { 0x0020d0, 1, 0x04, 0x00000002 },
+ { 0x002110, 1, 0x04, 0x00000003 },
+ { 0x002150, 1, 0x04, 0x00000004 },
+ { 0x000380, 1, 0x04, 0x00000000 },
+ { 0x0003a0, 1, 0x04, 0x00000000 },
+ { 0x0003c0, 1, 0x04, 0x00000000 },
+ { 0x0003e0, 1, 0x04, 0x00000000 },
+ { 0x000384, 1, 0x04, 0x00000000 },
+ { 0x0003a4, 1, 0x04, 0x00000000 },
+ { 0x0003c4, 1, 0x04, 0x00000000 },
+ { 0x0003e4, 1, 0x04, 0x00000000 },
+ { 0x000388, 1, 0x04, 0x00000000 },
+ { 0x0003a8, 1, 0x04, 0x00000000 },
+ { 0x0003c8, 1, 0x04, 0x00000000 },
+ { 0x0003e8, 1, 0x04, 0x00000000 },
+ { 0x00038c, 1, 0x04, 0x00000000 },
+ { 0x0003ac, 1, 0x04, 0x00000000 },
+ { 0x0003cc, 1, 0x04, 0x00000000 },
+ { 0x0003ec, 1, 0x04, 0x00000000 },
+ { 0x000700, 1, 0x04, 0x00000000 },
+ { 0x000710, 1, 0x04, 0x00000000 },
+ { 0x000720, 1, 0x04, 0x00000000 },
+ { 0x000730, 1, 0x04, 0x00000000 },
+ { 0x000704, 1, 0x04, 0x00000000 },
+ { 0x000714, 1, 0x04, 0x00000000 },
+ { 0x000724, 1, 0x04, 0x00000000 },
+ { 0x000734, 1, 0x04, 0x00000000 },
+ { 0x000708, 1, 0x04, 0x00000000 },
+ { 0x000718, 1, 0x04, 0x00000000 },
+ { 0x000728, 1, 0x04, 0x00000000 },
+ { 0x000738, 1, 0x04, 0x00000000 },
+ { 0x002800, 128, 0x04, 0x00000000 },
+ { 0x000a00, 1, 0x04, 0x00000000 },
+ { 0x000a20, 1, 0x04, 0x00000000 },
+ { 0x000a40, 1, 0x04, 0x00000000 },
+ { 0x000a60, 1, 0x04, 0x00000000 },
+ { 0x000a80, 1, 0x04, 0x00000000 },
+ { 0x000aa0, 1, 0x04, 0x00000000 },
+ { 0x000ac0, 1, 0x04, 0x00000000 },
+ { 0x000ae0, 1, 0x04, 0x00000000 },
+ { 0x000b00, 1, 0x04, 0x00000000 },
+ { 0x000b20, 1, 0x04, 0x00000000 },
+ { 0x000b40, 1, 0x04, 0x00000000 },
+ { 0x000b60, 1, 0x04, 0x00000000 },
+ { 0x000b80, 1, 0x04, 0x00000000 },
+ { 0x000ba0, 1, 0x04, 0x00000000 },
+ { 0x000bc0, 1, 0x04, 0x00000000 },
+ { 0x000be0, 1, 0x04, 0x00000000 },
+ { 0x000a04, 1, 0x04, 0x00000000 },
+ { 0x000a24, 1, 0x04, 0x00000000 },
+ { 0x000a44, 1, 0x04, 0x00000000 },
+ { 0x000a64, 1, 0x04, 0x00000000 },
+ { 0x000a84, 1, 0x04, 0x00000000 },
+ { 0x000aa4, 1, 0x04, 0x00000000 },
+ { 0x000ac4, 1, 0x04, 0x00000000 },
+ { 0x000ae4, 1, 0x04, 0x00000000 },
+ { 0x000b04, 1, 0x04, 0x00000000 },
+ { 0x000b24, 1, 0x04, 0x00000000 },
+ { 0x000b44, 1, 0x04, 0x00000000 },
+ { 0x000b64, 1, 0x04, 0x00000000 },
+ { 0x000b84, 1, 0x04, 0x00000000 },
+ { 0x000ba4, 1, 0x04, 0x00000000 },
+ { 0x000bc4, 1, 0x04, 0x00000000 },
+ { 0x000be4, 1, 0x04, 0x00000000 },
+ { 0x000a08, 1, 0x04, 0x00000000 },
+ { 0x000a28, 1, 0x04, 0x00000000 },
+ { 0x000a48, 1, 0x04, 0x00000000 },
+ { 0x000a68, 1, 0x04, 0x00000000 },
+ { 0x000a88, 1, 0x04, 0x00000000 },
+ { 0x000aa8, 1, 0x04, 0x00000000 },
+ { 0x000ac8, 1, 0x04, 0x00000000 },
+ { 0x000ae8, 1, 0x04, 0x00000000 },
+ { 0x000b08, 1, 0x04, 0x00000000 },
+ { 0x000b28, 1, 0x04, 0x00000000 },
+ { 0x000b48, 1, 0x04, 0x00000000 },
+ { 0x000b68, 1, 0x04, 0x00000000 },
+ { 0x000b88, 1, 0x04, 0x00000000 },
+ { 0x000ba8, 1, 0x04, 0x00000000 },
+ { 0x000bc8, 1, 0x04, 0x00000000 },
+ { 0x000be8, 1, 0x04, 0x00000000 },
+ { 0x000a0c, 1, 0x04, 0x00000000 },
+ { 0x000a2c, 1, 0x04, 0x00000000 },
+ { 0x000a4c, 1, 0x04, 0x00000000 },
+ { 0x000a6c, 1, 0x04, 0x00000000 },
+ { 0x000a8c, 1, 0x04, 0x00000000 },
+ { 0x000aac, 1, 0x04, 0x00000000 },
+ { 0x000acc, 1, 0x04, 0x00000000 },
+ { 0x000aec, 1, 0x04, 0x00000000 },
+ { 0x000b0c, 1, 0x04, 0x00000000 },
+ { 0x000b2c, 1, 0x04, 0x00000000 },
+ { 0x000b4c, 1, 0x04, 0x00000000 },
+ { 0x000b6c, 1, 0x04, 0x00000000 },
+ { 0x000b8c, 1, 0x04, 0x00000000 },
+ { 0x000bac, 1, 0x04, 0x00000000 },
+ { 0x000bcc, 1, 0x04, 0x00000000 },
+ { 0x000bec, 1, 0x04, 0x00000000 },
+ { 0x000a10, 1, 0x04, 0x00000000 },
+ { 0x000a30, 1, 0x04, 0x00000000 },
+ { 0x000a50, 1, 0x04, 0x00000000 },
+ { 0x000a70, 1, 0x04, 0x00000000 },
+ { 0x000a90, 1, 0x04, 0x00000000 },
+ { 0x000ab0, 1, 0x04, 0x00000000 },
+ { 0x000ad0, 1, 0x04, 0x00000000 },
+ { 0x000af0, 1, 0x04, 0x00000000 },
+ { 0x000b10, 1, 0x04, 0x00000000 },
+ { 0x000b30, 1, 0x04, 0x00000000 },
+ { 0x000b50, 1, 0x04, 0x00000000 },
+ { 0x000b70, 1, 0x04, 0x00000000 },
+ { 0x000b90, 1, 0x04, 0x00000000 },
+ { 0x000bb0, 1, 0x04, 0x00000000 },
+ { 0x000bd0, 1, 0x04, 0x00000000 },
+ { 0x000bf0, 1, 0x04, 0x00000000 },
+ { 0x000a14, 1, 0x04, 0x00000000 },
+ { 0x000a34, 1, 0x04, 0x00000000 },
+ { 0x000a54, 1, 0x04, 0x00000000 },
+ { 0x000a74, 1, 0x04, 0x00000000 },
+ { 0x000a94, 1, 0x04, 0x00000000 },
+ { 0x000ab4, 1, 0x04, 0x00000000 },
+ { 0x000ad4, 1, 0x04, 0x00000000 },
+ { 0x000af4, 1, 0x04, 0x00000000 },
+ { 0x000b14, 1, 0x04, 0x00000000 },
+ { 0x000b34, 1, 0x04, 0x00000000 },
+ { 0x000b54, 1, 0x04, 0x00000000 },
+ { 0x000b74, 1, 0x04, 0x00000000 },
+ { 0x000b94, 1, 0x04, 0x00000000 },
+ { 0x000bb4, 1, 0x04, 0x00000000 },
+ { 0x000bd4, 1, 0x04, 0x00000000 },
+ { 0x000bf4, 1, 0x04, 0x00000000 },
+ { 0x000c00, 1, 0x04, 0x00000000 },
+ { 0x000c10, 1, 0x04, 0x00000000 },
+ { 0x000c20, 1, 0x04, 0x00000000 },
+ { 0x000c30, 1, 0x04, 0x00000000 },
+ { 0x000c40, 1, 0x04, 0x00000000 },
+ { 0x000c50, 1, 0x04, 0x00000000 },
+ { 0x000c60, 1, 0x04, 0x00000000 },
+ { 0x000c70, 1, 0x04, 0x00000000 },
+ { 0x000c80, 1, 0x04, 0x00000000 },
+ { 0x000c90, 1, 0x04, 0x00000000 },
+ { 0x000ca0, 1, 0x04, 0x00000000 },
+ { 0x000cb0, 1, 0x04, 0x00000000 },
+ { 0x000cc0, 1, 0x04, 0x00000000 },
+ { 0x000cd0, 1, 0x04, 0x00000000 },
+ { 0x000ce0, 1, 0x04, 0x00000000 },
+ { 0x000cf0, 1, 0x04, 0x00000000 },
+ { 0x000c04, 1, 0x04, 0x00000000 },
+ { 0x000c14, 1, 0x04, 0x00000000 },
+ { 0x000c24, 1, 0x04, 0x00000000 },
+ { 0x000c34, 1, 0x04, 0x00000000 },
+ { 0x000c44, 1, 0x04, 0x00000000 },
+ { 0x000c54, 1, 0x04, 0x00000000 },
+ { 0x000c64, 1, 0x04, 0x00000000 },
+ { 0x000c74, 1, 0x04, 0x00000000 },
+ { 0x000c84, 1, 0x04, 0x00000000 },
+ { 0x000c94, 1, 0x04, 0x00000000 },
+ { 0x000ca4, 1, 0x04, 0x00000000 },
+ { 0x000cb4, 1, 0x04, 0x00000000 },
+ { 0x000cc4, 1, 0x04, 0x00000000 },
+ { 0x000cd4, 1, 0x04, 0x00000000 },
+ { 0x000ce4, 1, 0x04, 0x00000000 },
+ { 0x000cf4, 1, 0x04, 0x00000000 },
+ { 0x000c08, 1, 0x04, 0x00000000 },
+ { 0x000c18, 1, 0x04, 0x00000000 },
+ { 0x000c28, 1, 0x04, 0x00000000 },
+ { 0x000c38, 1, 0x04, 0x00000000 },
+ { 0x000c48, 1, 0x04, 0x00000000 },
+ { 0x000c58, 1, 0x04, 0x00000000 },
+ { 0x000c68, 1, 0x04, 0x00000000 },
+ { 0x000c78, 1, 0x04, 0x00000000 },
+ { 0x000c88, 1, 0x04, 0x00000000 },
+ { 0x000c98, 1, 0x04, 0x00000000 },
+ { 0x000ca8, 1, 0x04, 0x00000000 },
+ { 0x000cb8, 1, 0x04, 0x00000000 },
+ { 0x000cc8, 1, 0x04, 0x00000000 },
+ { 0x000cd8, 1, 0x04, 0x00000000 },
+ { 0x000ce8, 1, 0x04, 0x00000000 },
+ { 0x000cf8, 1, 0x04, 0x00000000 },
+ { 0x000c0c, 1, 0x04, 0x3f800000 },
+ { 0x000c1c, 1, 0x04, 0x3f800000 },
+ { 0x000c2c, 1, 0x04, 0x3f800000 },
+ { 0x000c3c, 1, 0x04, 0x3f800000 },
+ { 0x000c4c, 1, 0x04, 0x3f800000 },
+ { 0x000c5c, 1, 0x04, 0x3f800000 },
+ { 0x000c6c, 1, 0x04, 0x3f800000 },
+ { 0x000c7c, 1, 0x04, 0x3f800000 },
+ { 0x000c8c, 1, 0x04, 0x3f800000 },
+ { 0x000c9c, 1, 0x04, 0x3f800000 },
+ { 0x000cac, 1, 0x04, 0x3f800000 },
+ { 0x000cbc, 1, 0x04, 0x3f800000 },
+ { 0x000ccc, 1, 0x04, 0x3f800000 },
+ { 0x000cdc, 1, 0x04, 0x3f800000 },
+ { 0x000cec, 1, 0x04, 0x3f800000 },
+ { 0x000cfc, 1, 0x04, 0x3f800000 },
+ { 0x000d00, 1, 0x04, 0xffff0000 },
+ { 0x000d08, 1, 0x04, 0xffff0000 },
+ { 0x000d10, 1, 0x04, 0xffff0000 },
+ { 0x000d18, 1, 0x04, 0xffff0000 },
+ { 0x000d20, 1, 0x04, 0xffff0000 },
+ { 0x000d28, 1, 0x04, 0xffff0000 },
+ { 0x000d30, 1, 0x04, 0xffff0000 },
+ { 0x000d38, 1, 0x04, 0xffff0000 },
+ { 0x000d04, 1, 0x04, 0xffff0000 },
+ { 0x000d0c, 1, 0x04, 0xffff0000 },
+ { 0x000d14, 1, 0x04, 0xffff0000 },
+ { 0x000d1c, 1, 0x04, 0xffff0000 },
+ { 0x000d24, 1, 0x04, 0xffff0000 },
+ { 0x000d2c, 1, 0x04, 0xffff0000 },
+ { 0x000d34, 1, 0x04, 0xffff0000 },
+ { 0x000d3c, 1, 0x04, 0xffff0000 },
+ { 0x000e00, 1, 0x04, 0x00000000 },
+ { 0x000e10, 1, 0x04, 0x00000000 },
+ { 0x000e20, 1, 0x04, 0x00000000 },
+ { 0x000e30, 1, 0x04, 0x00000000 },
+ { 0x000e40, 1, 0x04, 0x00000000 },
+ { 0x000e50, 1, 0x04, 0x00000000 },
+ { 0x000e60, 1, 0x04, 0x00000000 },
+ { 0x000e70, 1, 0x04, 0x00000000 },
+ { 0x000e80, 1, 0x04, 0x00000000 },
+ { 0x000e90, 1, 0x04, 0x00000000 },
+ { 0x000ea0, 1, 0x04, 0x00000000 },
+ { 0x000eb0, 1, 0x04, 0x00000000 },
+ { 0x000ec0, 1, 0x04, 0x00000000 },
+ { 0x000ed0, 1, 0x04, 0x00000000 },
+ { 0x000ee0, 1, 0x04, 0x00000000 },
+ { 0x000ef0, 1, 0x04, 0x00000000 },
+ { 0x000e04, 1, 0x04, 0xffff0000 },
+ { 0x000e14, 1, 0x04, 0xffff0000 },
+ { 0x000e24, 1, 0x04, 0xffff0000 },
+ { 0x000e34, 1, 0x04, 0xffff0000 },
+ { 0x000e44, 1, 0x04, 0xffff0000 },
+ { 0x000e54, 1, 0x04, 0xffff0000 },
+ { 0x000e64, 1, 0x04, 0xffff0000 },
+ { 0x000e74, 1, 0x04, 0xffff0000 },
+ { 0x000e84, 1, 0x04, 0xffff0000 },
+ { 0x000e94, 1, 0x04, 0xffff0000 },
+ { 0x000ea4, 1, 0x04, 0xffff0000 },
+ { 0x000eb4, 1, 0x04, 0xffff0000 },
+ { 0x000ec4, 1, 0x04, 0xffff0000 },
+ { 0x000ed4, 1, 0x04, 0xffff0000 },
+ { 0x000ee4, 1, 0x04, 0xffff0000 },
+ { 0x000ef4, 1, 0x04, 0xffff0000 },
+ { 0x000e08, 1, 0x04, 0xffff0000 },
+ { 0x000e18, 1, 0x04, 0xffff0000 },
+ { 0x000e28, 1, 0x04, 0xffff0000 },
+ { 0x000e38, 1, 0x04, 0xffff0000 },
+ { 0x000e48, 1, 0x04, 0xffff0000 },
+ { 0x000e58, 1, 0x04, 0xffff0000 },
+ { 0x000e68, 1, 0x04, 0xffff0000 },
+ { 0x000e78, 1, 0x04, 0xffff0000 },
+ { 0x000e88, 1, 0x04, 0xffff0000 },
+ { 0x000e98, 1, 0x04, 0xffff0000 },
+ { 0x000ea8, 1, 0x04, 0xffff0000 },
+ { 0x000eb8, 1, 0x04, 0xffff0000 },
+ { 0x000ec8, 1, 0x04, 0xffff0000 },
+ { 0x000ed8, 1, 0x04, 0xffff0000 },
+ { 0x000ee8, 1, 0x04, 0xffff0000 },
+ { 0x000ef8, 1, 0x04, 0xffff0000 },
+ { 0x000d40, 1, 0x04, 0x00000000 },
+ { 0x000d48, 1, 0x04, 0x00000000 },
+ { 0x000d50, 1, 0x04, 0x00000000 },
+ { 0x000d58, 1, 0x04, 0x00000000 },
+ { 0x000d44, 1, 0x04, 0x00000000 },
+ { 0x000d4c, 1, 0x04, 0x00000000 },
+ { 0x000d54, 1, 0x04, 0x00000000 },
+ { 0x000d5c, 1, 0x04, 0x00000000 },
+ { 0x001e00, 1, 0x04, 0x00000001 },
+ { 0x001e20, 1, 0x04, 0x00000001 },
+ { 0x001e40, 1, 0x04, 0x00000001 },
+ { 0x001e60, 1, 0x04, 0x00000001 },
+ { 0x001e80, 1, 0x04, 0x00000001 },
+ { 0x001ea0, 1, 0x04, 0x00000001 },
+ { 0x001ec0, 1, 0x04, 0x00000001 },
+ { 0x001ee0, 1, 0x04, 0x00000001 },
+ { 0x001e04, 1, 0x04, 0x00000001 },
+ { 0x001e24, 1, 0x04, 0x00000001 },
+ { 0x001e44, 1, 0x04, 0x00000001 },
+ { 0x001e64, 1, 0x04, 0x00000001 },
+ { 0x001e84, 1, 0x04, 0x00000001 },
+ { 0x001ea4, 1, 0x04, 0x00000001 },
+ { 0x001ec4, 1, 0x04, 0x00000001 },
+ { 0x001ee4, 1, 0x04, 0x00000001 },
+ { 0x001e08, 1, 0x04, 0x00000002 },
+ { 0x001e28, 1, 0x04, 0x00000002 },
+ { 0x001e48, 1, 0x04, 0x00000002 },
+ { 0x001e68, 1, 0x04, 0x00000002 },
+ { 0x001e88, 1, 0x04, 0x00000002 },
+ { 0x001ea8, 1, 0x04, 0x00000002 },
+ { 0x001ec8, 1, 0x04, 0x00000002 },
+ { 0x001ee8, 1, 0x04, 0x00000002 },
+ { 0x001e0c, 1, 0x04, 0x00000001 },
+ { 0x001e2c, 1, 0x04, 0x00000001 },
+ { 0x001e4c, 1, 0x04, 0x00000001 },
+ { 0x001e6c, 1, 0x04, 0x00000001 },
+ { 0x001e8c, 1, 0x04, 0x00000001 },
+ { 0x001eac, 1, 0x04, 0x00000001 },
+ { 0x001ecc, 1, 0x04, 0x00000001 },
+ { 0x001eec, 1, 0x04, 0x00000001 },
+ { 0x001e10, 1, 0x04, 0x00000001 },
+ { 0x001e30, 1, 0x04, 0x00000001 },
+ { 0x001e50, 1, 0x04, 0x00000001 },
+ { 0x001e70, 1, 0x04, 0x00000001 },
+ { 0x001e90, 1, 0x04, 0x00000001 },
+ { 0x001eb0, 1, 0x04, 0x00000001 },
+ { 0x001ed0, 1, 0x04, 0x00000001 },
+ { 0x001ef0, 1, 0x04, 0x00000001 },
+ { 0x001e14, 1, 0x04, 0x00000002 },
+ { 0x001e34, 1, 0x04, 0x00000002 },
+ { 0x001e54, 1, 0x04, 0x00000002 },
+ { 0x001e74, 1, 0x04, 0x00000002 },
+ { 0x001e94, 1, 0x04, 0x00000002 },
+ { 0x001eb4, 1, 0x04, 0x00000002 },
+ { 0x001ed4, 1, 0x04, 0x00000002 },
+ { 0x001ef4, 1, 0x04, 0x00000002 },
+ { 0x001e18, 1, 0x04, 0x00000001 },
+ { 0x001e38, 1, 0x04, 0x00000001 },
+ { 0x001e58, 1, 0x04, 0x00000001 },
+ { 0x001e78, 1, 0x04, 0x00000001 },
+ { 0x001e98, 1, 0x04, 0x00000001 },
+ { 0x001eb8, 1, 0x04, 0x00000001 },
+ { 0x001ed8, 1, 0x04, 0x00000001 },
+ { 0x001ef8, 1, 0x04, 0x00000001 },
+ { 0x003400, 128, 0x04, 0x00000000 },
+ { 0x00030c, 1, 0x04, 0x00000001 },
+ { 0x001944, 1, 0x04, 0x00000000 },
+ { 0x001514, 1, 0x04, 0x00000000 },
+ { 0x000d68, 1, 0x04, 0x0000ffff },
+ { 0x00121c, 1, 0x04, 0x0fac6881 },
+ { 0x000fac, 1, 0x04, 0x00000001 },
+ { 0x001538, 1, 0x04, 0x00000001 },
+ { 0x000fe0, 2, 0x04, 0x00000000 },
+ { 0x000fe8, 1, 0x04, 0x00000014 },
+ { 0x000fec, 1, 0x04, 0x00000040 },
+ { 0x000ff0, 1, 0x04, 0x00000000 },
+ { 0x00179c, 1, 0x04, 0x00000000 },
+ { 0x001228, 1, 0x04, 0x00000400 },
+ { 0x00122c, 1, 0x04, 0x00000300 },
+ { 0x001230, 1, 0x04, 0x00010001 },
+ { 0x0007f8, 1, 0x04, 0x00000000 },
+ { 0x0015b4, 1, 0x04, 0x00000001 },
+ { 0x0015cc, 1, 0x04, 0x00000000 },
+ { 0x001534, 1, 0x04, 0x00000000 },
+ { 0x000fb0, 1, 0x04, 0x00000000 },
+ { 0x0015d0, 1, 0x04, 0x00000000 },
+ { 0x00153c, 1, 0x04, 0x00000000 },
+ { 0x0016b4, 1, 0x04, 0x00000003 },
+ { 0x000fbc, 4, 0x04, 0x0000ffff },
+ { 0x000df8, 2, 0x04, 0x00000000 },
+ { 0x001948, 1, 0x04, 0x00000000 },
+ { 0x001970, 1, 0x04, 0x00000001 },
+ { 0x00161c, 1, 0x04, 0x000009f0 },
+ { 0x000dcc, 1, 0x04, 0x00000010 },
+ { 0x00163c, 1, 0x04, 0x00000000 },
+ { 0x0015e4, 1, 0x04, 0x00000000 },
+ { 0x001160, 32, 0x04, 0x25e00040 },
+ { 0x001880, 32, 0x04, 0x00000000 },
+ { 0x000f84, 2, 0x04, 0x00000000 },
+ { 0x0017c8, 2, 0x04, 0x00000000 },
+ { 0x0017d0, 1, 0x04, 0x000000ff },
+ { 0x0017d4, 1, 0x04, 0xffffffff },
+ { 0x0017d8, 1, 0x04, 0x00000002 },
+ { 0x0017dc, 1, 0x04, 0x00000000 },
+ { 0x0015f4, 2, 0x04, 0x00000000 },
+ { 0x001434, 2, 0x04, 0x00000000 },
+ { 0x000d74, 1, 0x04, 0x00000000 },
+ { 0x000dec, 1, 0x04, 0x00000001 },
+ { 0x0013a4, 1, 0x04, 0x00000000 },
+ { 0x001318, 1, 0x04, 0x00000001 },
+ { 0x001644, 1, 0x04, 0x00000000 },
+ { 0x000748, 1, 0x04, 0x00000000 },
+ { 0x000de8, 1, 0x04, 0x00000000 },
+ { 0x001648, 1, 0x04, 0x00000000 },
+ { 0x0012a4, 1, 0x04, 0x00000000 },
+ { 0x001120, 4, 0x04, 0x00000000 },
+ { 0x001118, 1, 0x04, 0x00000000 },
+ { 0x00164c, 1, 0x04, 0x00000000 },
+ { 0x001658, 1, 0x04, 0x00000000 },
+ { 0x001910, 1, 0x04, 0x00000290 },
+ { 0x001518, 1, 0x04, 0x00000000 },
+ { 0x00165c, 1, 0x04, 0x00000001 },
+ { 0x001520, 1, 0x04, 0x00000000 },
+ { 0x001604, 1, 0x04, 0x00000000 },
+ { 0x001570, 1, 0x04, 0x00000000 },
+ { 0x0013b0, 2, 0x04, 0x3f800000 },
+ { 0x00020c, 1, 0x04, 0x00000000 },
+ { 0x001670, 1, 0x04, 0x30201000 },
+ { 0x001674, 1, 0x04, 0x70605040 },
+ { 0x001678, 1, 0x04, 0xb8a89888 },
+ { 0x00167c, 1, 0x04, 0xf8e8d8c8 },
+ { 0x00166c, 1, 0x04, 0x00000000 },
+ { 0x001680, 1, 0x04, 0x00ffff00 },
+ { 0x0012d0, 1, 0x04, 0x00000003 },
+ { 0x0012d4, 1, 0x04, 0x00000002 },
+ { 0x001684, 2, 0x04, 0x00000000 },
+ { 0x000dac, 2, 0x04, 0x00001b02 },
+ { 0x000db4, 1, 0x04, 0x00000000 },
+ { 0x00168c, 1, 0x04, 0x00000000 },
+ { 0x0015bc, 1, 0x04, 0x00000000 },
+ { 0x00156c, 1, 0x04, 0x00000000 },
+ { 0x00187c, 1, 0x04, 0x00000000 },
+ { 0x001110, 1, 0x04, 0x00000001 },
+ { 0x000dc0, 3, 0x04, 0x00000000 },
+ { 0x001234, 1, 0x04, 0x00000000 },
+ { 0x001690, 1, 0x04, 0x00000000 },
+ { 0x0012ac, 1, 0x04, 0x00000001 },
+ { 0x0002c4, 1, 0x04, 0x00000000 },
+ { 0x000790, 5, 0x04, 0x00000000 },
+ { 0x00077c, 1, 0x04, 0x00000000 },
+ { 0x001000, 1, 0x04, 0x00000010 },
+ { 0x0010fc, 1, 0x04, 0x00000000 },
+ { 0x001290, 1, 0x04, 0x00000000 },
+ { 0x000218, 1, 0x04, 0x00000010 },
+ { 0x0012d8, 1, 0x04, 0x00000000 },
+ { 0x0012dc, 1, 0x04, 0x00000010 },
+ { 0x000d94, 1, 0x04, 0x00000001 },
+ { 0x00155c, 2, 0x04, 0x00000000 },
+ { 0x001564, 1, 0x04, 0x00000fff },
+ { 0x001574, 2, 0x04, 0x00000000 },
+ { 0x00157c, 1, 0x04, 0x000fffff },
+ { 0x001354, 1, 0x04, 0x00000000 },
+ { 0x001610, 1, 0x04, 0x00000012 },
+ { 0x001608, 2, 0x04, 0x00000000 },
+ { 0x00260c, 1, 0x04, 0x00000000 },
+ { 0x0007ac, 1, 0x04, 0x00000000 },
+ { 0x00162c, 1, 0x04, 0x00000003 },
+ { 0x000210, 1, 0x04, 0x00000000 },
+ { 0x000320, 1, 0x04, 0x00000000 },
+ { 0x000324, 6, 0x04, 0x3f800000 },
+ { 0x000750, 1, 0x04, 0x00000000 },
+ { 0x000760, 1, 0x04, 0x39291909 },
+ { 0x000764, 1, 0x04, 0x79695949 },
+ { 0x000768, 1, 0x04, 0xb9a99989 },
+ { 0x00076c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x000770, 1, 0x04, 0x30201000 },
+ { 0x000774, 1, 0x04, 0x70605040 },
+ { 0x000778, 1, 0x04, 0x00009080 },
+ { 0x000780, 1, 0x04, 0x39291909 },
+ { 0x000784, 1, 0x04, 0x79695949 },
+ { 0x000788, 1, 0x04, 0xb9a99989 },
+ { 0x00078c, 1, 0x04, 0xf9e9d9c9 },
+ { 0x0007d0, 1, 0x04, 0x30201000 },
+ { 0x0007d4, 1, 0x04, 0x70605040 },
+ { 0x0007d8, 1, 0x04, 0x00009080 },
+ { 0x00037c, 1, 0x04, 0x00000001 },
+ { 0x000740, 2, 0x04, 0x00000000 },
+ { 0x002600, 1, 0x04, 0x00000000 },
+ { 0x001918, 1, 0x04, 0x00000000 },
+ { 0x00191c, 1, 0x04, 0x00000900 },
+ { 0x001920, 1, 0x04, 0x00000405 },
+ { 0x001308, 1, 0x04, 0x00000001 },
+ { 0x001924, 1, 0x04, 0x00000000 },
+ { 0x0013ac, 1, 0x04, 0x00000000 },
+ { 0x00192c, 1, 0x04, 0x00000001 },
+ { 0x00193c, 1, 0x04, 0x00002c1c },
+ { 0x000d7c, 1, 0x04, 0x00000000 },
+ { 0x000f8c, 1, 0x04, 0x00000000 },
+ { 0x0002c0, 1, 0x04, 0x00000001 },
+ { 0x001510, 1, 0x04, 0x00000000 },
+ { 0x001940, 1, 0x04, 0x00000000 },
+ { 0x000ff4, 2, 0x04, 0x00000000 },
+ { 0x00194c, 2, 0x04, 0x00000000 },
+ { 0x001968, 1, 0x04, 0x00000000 },
+ { 0x001590, 1, 0x04, 0x0000003f },
+ { 0x0007e8, 4, 0x04, 0x00000000 },
+ { 0x00196c, 1, 0x04, 0x00000011 },
+ { 0x0002e4, 1, 0x04, 0x0000b001 },
+ { 0x00036c, 2, 0x04, 0x00000000 },
+ { 0x00197c, 1, 0x04, 0x00000000 },
+ { 0x000fcc, 2, 0x04, 0x00000000 },
+ { 0x0002d8, 1, 0x04, 0x00000040 },
+ { 0x001980, 1, 0x04, 0x00000080 },
+ { 0x001504, 1, 0x04, 0x00000080 },
+ { 0x001984, 1, 0x04, 0x00000000 },
+ { 0x000300, 1, 0x04, 0x00000001 },
+ { 0x0013a8, 1, 0x04, 0x00000000 },
+ { 0x0012ec, 1, 0x04, 0x00000000 },
+ { 0x001310, 1, 0x04, 0x00000000 },
+ { 0x001314, 1, 0x04, 0x00000001 },
+ { 0x001380, 1, 0x04, 0x00000000 },
+ { 0x001384, 4, 0x04, 0x00000001 },
+ { 0x001394, 1, 0x04, 0x00000000 },
+ { 0x00139c, 1, 0x04, 0x00000000 },
+ { 0x001398, 1, 0x04, 0x00000000 },
+ { 0x001594, 1, 0x04, 0x00000000 },
+ { 0x001598, 4, 0x04, 0x00000001 },
+ { 0x000f54, 3, 0x04, 0x00000000 },
+ { 0x0019bc, 1, 0x04, 0x00000000 },
+ { 0x000f9c, 2, 0x04, 0x00000000 },
+ { 0x0012cc, 1, 0x04, 0x00000000 },
+ { 0x0012e8, 1, 0x04, 0x00000000 },
+ { 0x00130c, 1, 0x04, 0x00000001 },
+ { 0x001360, 8, 0x04, 0x00000000 },
+ { 0x00133c, 2, 0x04, 0x00000001 },
+ { 0x001344, 1, 0x04, 0x00000002 },
+ { 0x001348, 2, 0x04, 0x00000001 },
+ { 0x001350, 1, 0x04, 0x00000002 },
+ { 0x001358, 1, 0x04, 0x00000001 },
+ { 0x0012e4, 1, 0x04, 0x00000000 },
+ { 0x00131c, 4, 0x04, 0x00000000 },
+ { 0x0019c0, 1, 0x04, 0x00000000 },
+ { 0x001140, 1, 0x04, 0x00000000 },
+ { 0x0019c4, 1, 0x04, 0x00000000 },
+ { 0x0019c8, 1, 0x04, 0x00001500 },
+ { 0x00135c, 1, 0x04, 0x00000000 },
+ { 0x000f90, 1, 0x04, 0x00000000 },
+ { 0x0019e0, 8, 0x04, 0x00000001 },
+ { 0x0019cc, 1, 0x04, 0x00000001 },
+ { 0x0015b8, 1, 0x04, 0x00000000 },
+ { 0x001a00, 1, 0x04, 0x00001111 },
+ { 0x001a04, 7, 0x04, 0x00000000 },
+ { 0x000d6c, 2, 0x04, 0xffff0000 },
+ { 0x0010f8, 1, 0x04, 0x00001010 },
+ { 0x000d80, 5, 0x04, 0x00000000 },
+ { 0x000da0, 1, 0x04, 0x00000000 },
+ { 0x0007a4, 2, 0x04, 0x00000000 },
+ { 0x001508, 1, 0x04, 0x80000000 },
+ { 0x00150c, 1, 0x04, 0x40000000 },
+ { 0x001668, 1, 0x04, 0x00000000 },
+ { 0x000318, 2, 0x04, 0x00000008 },
+ { 0x000d9c, 1, 0x04, 0x00000001 },
+ { 0x000ddc, 1, 0x04, 0x00000002 },
+ { 0x000374, 1, 0x04, 0x00000000 },
+ { 0x000378, 1, 0x04, 0x00000020 },
+ { 0x0007dc, 1, 0x04, 0x00000000 },
+ { 0x00074c, 1, 0x04, 0x00000055 },
+ { 0x001420, 1, 0x04, 0x00000003 },
+ { 0x0017bc, 2, 0x04, 0x00000000 },
+ { 0x0017c4, 1, 0x04, 0x00000001 },
+ { 0x001008, 1, 0x04, 0x00000008 },
+ { 0x00100c, 1, 0x04, 0x00000040 },
+ { 0x001010, 1, 0x04, 0x0000012c },
+ { 0x000d60, 1, 0x04, 0x00000040 },
+ { 0x00075c, 1, 0x04, 0x00000003 },
+ { 0x001018, 1, 0x04, 0x00000020 },
+ { 0x00101c, 1, 0x04, 0x00000001 },
+ { 0x001020, 1, 0x04, 0x00000020 },
+ { 0x001024, 1, 0x04, 0x00000001 },
+ { 0x001444, 3, 0x04, 0x00000000 },
+ { 0x000360, 1, 0x04, 0x20164010 },
+ { 0x000364, 1, 0x04, 0x00000020 },
+ { 0x000368, 1, 0x04, 0x00000000 },
+ { 0x000de4, 1, 0x04, 0x00000000 },
+ { 0x000204, 1, 0x04, 0x00000006 },
+ { 0x000208, 1, 0x04, 0x00000000 },
+ { 0x0002cc, 2, 0x04, 0x003fffff },
+ { 0x001220, 1, 0x04, 0x00000005 },
+ { 0x000fdc, 1, 0x04, 0x00000000 },
+ { 0x000f98, 1, 0x04, 0x00400008 },
+ { 0x001284, 1, 0x04, 0x08000080 },
+ { 0x001450, 1, 0x04, 0x00400008 },
+ { 0x001454, 1, 0x04, 0x08000080 },
+ { 0x000214, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk40xx[] = {
+ { 0x404004, 8, 0x04, 0x00000000 },
+ { 0x404024, 1, 0x04, 0x0000e000 },
+ { 0x404028, 8, 0x04, 0x00000000 },
+ { 0x4040a8, 8, 0x04, 0x00000000 },
+ { 0x4040c8, 1, 0x04, 0xf800008f },
+ { 0x4040d0, 6, 0x04, 0x00000000 },
+ { 0x4040e8, 1, 0x04, 0x00001000 },
+ { 0x4040f8, 1, 0x04, 0x00000000 },
+ { 0x404100, 10, 0x04, 0x00000000 },
+ { 0x404130, 2, 0x04, 0x00000000 },
+ { 0x404138, 1, 0x04, 0x20000040 },
+ { 0x404150, 1, 0x04, 0x0000002e },
+ { 0x404154, 1, 0x04, 0x00000400 },
+ { 0x404158, 1, 0x04, 0x00000200 },
+ { 0x404164, 1, 0x04, 0x00000055 },
+ { 0x40417c, 2, 0x04, 0x00000000 },
+ { 0x404194, 1, 0x04, 0x01000700 },
+ { 0x4041a0, 4, 0x04, 0x00000000 },
+ { 0x404200, 1, 0x04, 0x0000a197 },
+ { 0x404204, 1, 0x04, 0x0000a1c0 },
+ { 0x404208, 1, 0x04, 0x0000a140 },
+ { 0x40420c, 1, 0x04, 0x0000902d },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk58xx[] = {
+ { 0x405800, 1, 0x04, 0x0f8000bf },
+ { 0x405830, 1, 0x04, 0x02180648 },
+ { 0x405834, 1, 0x04, 0x08000000 },
+ { 0x405838, 1, 0x04, 0x00000000 },
+ { 0x405854, 1, 0x04, 0x00000000 },
+ { 0x405870, 4, 0x04, 0x00000001 },
+ { 0x405a00, 2, 0x04, 0x00000000 },
+ { 0x405a18, 1, 0x04, 0x00000000 },
+ { 0x405a1c, 1, 0x04, 0x000000ff },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk64xx[] = {
+ { 0x4064a8, 1, 0x04, 0x00000000 },
+ { 0x4064ac, 1, 0x04, 0x00003fff },
+ { 0x4064b0, 3, 0x04, 0x00000000 },
+ { 0x4064c0, 1, 0x04, 0x802000f0 },
+ { 0x4064c4, 1, 0x04, 0x0192ffff },
+ { 0x4064c8, 1, 0x04, 0x00c20200 },
+ { 0x4064cc, 9, 0x04, 0x00000000 },
+ { 0x4064fc, 1, 0x04, 0x0000022a },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk78xx[] = {
+ { 0x407804, 1, 0x04, 0x00000063 },
+ { 0x40780c, 1, 0x04, 0x0a418820 },
+ { 0x407810, 1, 0x04, 0x062080e6 },
+ { 0x407814, 1, 0x04, 0x020398a4 },
+ { 0x407818, 1, 0x04, 0x0e629062 },
+ { 0x40781c, 1, 0x04, 0x0a418820 },
+ { 0x407820, 1, 0x04, 0x000000e6 },
+ { 0x4078bc, 1, 0x04, 0x00000103 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk88xx[] = {
+ { 0x408800, 1, 0x04, 0x32802a3c },
+ { 0x408804, 1, 0x04, 0x00000040 },
+ { 0x408808, 1, 0x04, 0x1003e005 },
+ { 0x408840, 1, 0x04, 0x0000000b },
+ { 0x408900, 1, 0x04, 0xb080b801 },
+ { 0x408904, 1, 0x04, 0x62000001 },
+ { 0x408908, 1, 0x04, 0x02c8102f },
+ { 0x408980, 1, 0x04, 0x0000011d },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_gpc_0[] = {
+ { 0x418380, 1, 0x04, 0x00000016 },
+ { 0x418400, 1, 0x04, 0x38005e00 },
+ { 0x418404, 1, 0x04, 0x71e0ffff },
+ { 0x41840c, 1, 0x04, 0x00001008 },
+ { 0x418410, 1, 0x04, 0x0fff0fff },
+ { 0x418414, 1, 0x04, 0x02200fff },
+ { 0x418450, 6, 0x04, 0x00000000 },
+ { 0x418468, 1, 0x04, 0x00000001 },
+ { 0x41846c, 2, 0x04, 0x00000000 },
+ { 0x418600, 1, 0x04, 0x0000007f },
+ { 0x418684, 1, 0x04, 0x0000001f },
+ { 0x418700, 1, 0x04, 0x00000002 },
+ { 0x418704, 2, 0x04, 0x00000080 },
+ { 0x41870c, 2, 0x04, 0x00000000 },
+ { 0x418800, 1, 0x04, 0x7006863a },
+ { 0x418808, 1, 0x04, 0x00000000 },
+ { 0x41880c, 1, 0x04, 0x00000030 },
+ { 0x418810, 1, 0x04, 0x00000000 },
+ { 0x418828, 1, 0x04, 0x00000044 },
+ { 0x418830, 1, 0x04, 0x10000001 },
+ { 0x4188d8, 1, 0x04, 0x00000008 },
+ { 0x4188e0, 1, 0x04, 0x01000000 },
+ { 0x4188e8, 5, 0x04, 0x00000000 },
+ { 0x4188fc, 1, 0x04, 0x20100058 },
+ { 0x41891c, 1, 0x04, 0x00ff00ff },
+ { 0x418924, 1, 0x04, 0x00000000 },
+ { 0x418928, 1, 0x04, 0x00ffff00 },
+ { 0x41892c, 1, 0x04, 0x0000ff00 },
+ { 0x418b00, 1, 0x04, 0x0000001e },
+ { 0x418b08, 1, 0x04, 0x0a418820 },
+ { 0x418b0c, 1, 0x04, 0x062080e6 },
+ { 0x418b10, 1, 0x04, 0x020398a4 },
+ { 0x418b14, 1, 0x04, 0x0e629062 },
+ { 0x418b18, 1, 0x04, 0x0a418820 },
+ { 0x418b1c, 1, 0x04, 0x000000e6 },
+ { 0x418bb8, 1, 0x04, 0x00000103 },
+ { 0x418c08, 1, 0x04, 0x00000001 },
+ { 0x418c10, 8, 0x04, 0x00000000 },
+ { 0x418c40, 1, 0x04, 0xffffffff },
+ { 0x418c6c, 1, 0x04, 0x00000001 },
+ { 0x418c80, 1, 0x04, 0x2020000c },
+ { 0x418c8c, 1, 0x04, 0x00000001 },
+ { 0x418d24, 1, 0x04, 0x00000000 },
+ { 0x419000, 1, 0x04, 0x00000780 },
+ { 0x419004, 2, 0x04, 0x00000000 },
+ { 0x419014, 1, 0x04, 0x00000004 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_tpc[] = {
+ { 0x419848, 1, 0x04, 0x00000000 },
+ { 0x419864, 1, 0x04, 0x00000129 },
+ { 0x419888, 1, 0x04, 0x00000000 },
+ { 0x419a00, 1, 0x04, 0x000100f0 },
+ { 0x419a04, 1, 0x04, 0x00000001 },
+ { 0x419a08, 1, 0x04, 0x00000421 },
+ { 0x419a0c, 1, 0x04, 0x00120000 },
+ { 0x419a10, 1, 0x04, 0x00000000 },
+ { 0x419a14, 1, 0x04, 0x00000200 },
+ { 0x419a1c, 1, 0x04, 0x0000c000 },
+ { 0x419a20, 1, 0x04, 0x00000800 },
+ { 0x419a30, 1, 0x04, 0x00000001 },
+ { 0x419ac4, 1, 0x04, 0x0037f440 },
+ { 0x419c00, 1, 0x04, 0x0000001a },
+ { 0x419c04, 1, 0x04, 0x80000006 },
+ { 0x419c08, 1, 0x04, 0x00000002 },
+ { 0x419c20, 1, 0x04, 0x00000000 },
+ { 0x419c24, 1, 0x04, 0x00084210 },
+ { 0x419c28, 1, 0x04, 0x3efbefbe },
+ { 0x419ce8, 1, 0x04, 0x00000000 },
+ { 0x419cf4, 1, 0x04, 0x00000203 },
+ { 0x419e04, 1, 0x04, 0x00000000 },
+ { 0x419e08, 1, 0x04, 0x0000001d },
+ { 0x419e0c, 1, 0x04, 0x00000000 },
+ { 0x419e10, 1, 0x04, 0x00001c02 },
+ { 0x419e44, 1, 0x04, 0x0013eff2 },
+ { 0x419e48, 1, 0x04, 0x00000000 },
+ { 0x419e4c, 1, 0x04, 0x0000007f },
+ { 0x419e50, 2, 0x04, 0x00000000 },
+ { 0x419e58, 1, 0x04, 0x00000001 },
+ { 0x419e5c, 3, 0x04, 0x00000000 },
+ { 0x419e68, 1, 0x04, 0x00000002 },
+ { 0x419e6c, 12, 0x04, 0x00000000 },
+ { 0x419eac, 1, 0x04, 0x00001f8f },
+ { 0x419eb0, 1, 0x04, 0x0db00da0 },
+ { 0x419eb8, 1, 0x04, 0x00000000 },
+ { 0x419ec8, 1, 0x04, 0x0001304f },
+ { 0x419f30, 4, 0x04, 0x00000000 },
+ { 0x419f40, 1, 0x04, 0x00000018 },
+ { 0x419f44, 3, 0x04, 0x00000000 },
+ { 0x419f58, 1, 0x04, 0x00000020 },
+ { 0x419f70, 1, 0x04, 0x00000000 },
+ { 0x419f78, 1, 0x04, 0x000001eb },
+ { 0x419f7c, 1, 0x04, 0x00000404 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk[] = {
+ { 0x41be24, 1, 0x04, 0x00000006 },
+ { 0x41bec0, 1, 0x04, 0x10000000 },
+ { 0x41bec4, 1, 0x04, 0x00037f7f },
+ { 0x41bee4, 1, 0x04, 0x00000000 },
+ { 0x41bef0, 1, 0x04, 0x000003ff },
+ { 0x41bf00, 1, 0x04, 0x0a418820 },
+ { 0x41bf04, 1, 0x04, 0x062080e6 },
+ { 0x41bf08, 1, 0x04, 0x020398a4 },
+ { 0x41bf0c, 1, 0x04, 0x0e629062 },
+ { 0x41bf10, 1, 0x04, 0x0a418820 },
+ { 0x41bf14, 1, 0x04, 0x000000e6 },
+ { 0x41bfd0, 1, 0x04, 0x00900103 },
+ { 0x41bfe0, 1, 0x04, 0x00400001 },
+ { 0x41bfe4, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static void
+nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+{
+ u32 magic[GPC_MAX][2];
+ u32 offset;
+ int gpc;
+
+ mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+ mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+ mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+ mmio_list(0x40800c, 0x00000000, 8, 1);
+ mmio_list(0x408010, 0x80000000, 0, 0);
+ mmio_list(0x419004, 0x00000000, 8, 1);
+ mmio_list(0x419008, 0x00000000, 0, 0);
+ mmio_list(0x408004, 0x00000000, 8, 0);
+ mmio_list(0x408008, 0x80000030, 0, 0);
+ mmio_list(0x418808, 0x00000000, 8, 0);
+ mmio_list(0x41880c, 0x80000030, 0, 0);
+ mmio_list(0x418810, 0x80000000, 12, 2);
+ mmio_list(0x419848, 0x10000000, 12, 2);
+
+ mmio_list(0x405830, 0x02180648, 0, 0);
+ mmio_list(0x4064c4, 0x0192ffff, 0, 0);
+
+ for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
+ u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+ u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
+ magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
+ magic[gpc][1] = 0x00000000 | (magic1 << 16);
+ offset += 0x0324 * priv->tpc_nr[gpc];
+ }
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
+ mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
+ offset += 0x07ff * priv->tpc_nr[gpc];
+ }
+
+ mmio_list(0x17e91c, 0x0b040a0b, 0, 0);
+ mmio_list(0x17e920, 0x00090d08, 0, 0);
+}
+
+static struct nvc0_graph_init *
+nv108_grctx_init_hub[] = {
+ nvc0_grctx_init_base,
+ nv108_grctx_init_unk40xx,
+ nvf0_grctx_init_unk44xx,
+ nve4_grctx_init_unk46xx,
+ nve4_grctx_init_unk47xx,
+ nv108_grctx_init_unk58xx,
+ nvf0_grctx_init_unk5bxx,
+ nvf0_grctx_init_unk60xx,
+ nv108_grctx_init_unk64xx,
+ nv108_grctx_init_unk78xx,
+ nve4_grctx_init_unk80xx,
+ nv108_grctx_init_unk88xx,
+ NULL
+};
+
+struct nvc0_graph_init *
+nv108_grctx_init_gpc[] = {
+ nv108_grctx_init_gpc_0,
+ nvc0_grctx_init_gpc_1,
+ nv108_grctx_init_tpc,
+ nv108_grctx_init_unk,
+ NULL
+};
+
+struct nvc0_graph_init
+nv108_grctx_init_mthd_magic[] = {
+ { 0x3410, 1, 0x04, 0x8e0e2006 },
+ { 0x3414, 1, 0x04, 0x00000038 },
+ {}
+};
+
+static struct nvc0_graph_mthd
+nv108_grctx_init_mthd[] = {
+ { 0xa197, nv108_grctx_init_a197, },
+ { 0x902d, nvc0_grctx_init_902d, },
+ { 0x902d, nv108_grctx_init_mthd_magic, },
+ {}
+};
+
+struct nouveau_oclass *
+nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
+ .base.handle = NV_ENGCTX(GR, 0x08),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_context_ctor,
+ .dtor = nvc0_graph_context_dtor,
+ .init = _nouveau_graph_context_init,
+ .fini = _nouveau_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+ .main = nve4_grctx_generate_main,
+ .mods = nv108_grctx_generate_mods,
+ .unkn = nve4_grctx_generate_unkn,
+ .hub = nv108_grctx_init_hub,
+ .gpc = nv108_grctx_init_gpc,
+ .icmd = nv108_grctx_init_icmd,
+ .mthd = nv108_grctx_init_mthd,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
index dcb2ebb8c29d..44012c3da538 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
@@ -50,7 +50,7 @@ nvf0_grctx_init_unk40xx[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_grctx_init_unk44xx[] = {
{ 0x404404, 12, 0x04, 0x00000000 },
{ 0x404438, 1, 0x04, 0x00000000 },
@@ -62,7 +62,7 @@ nvf0_grctx_init_unk44xx[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_grctx_init_unk5bxx[] = {
{ 0x405b00, 1, 0x04, 0x00000000 },
{ 0x405b10, 1, 0x04, 0x00001000 },
@@ -70,7 +70,7 @@ nvf0_grctx_init_unk5bxx[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_grctx_init_unk60xx[] = {
{ 0x406020, 1, 0x04, 0x034103c1 },
{ 0x406028, 4, 0x04, 0x00000001 },
@@ -286,7 +286,6 @@ nvf0_grctx_init_hub[] = {
nvf0_grctx_init_unk64xx,
nve4_grctx_init_unk80xx,
nvf0_grctx_init_unk88xx,
- nvd9_grctx_init_rop,
NULL
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
index 5d24b6de16cc..e148961b8075 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
@@ -38,7 +38,7 @@ queue_put:
cmpu b32 $r8 $r9
bra ne #queue_put_next
mov $r15 E_CMD_OVERFLOW
- call #error
+ call(error)
ret
// store cmd/data on queue
@@ -92,18 +92,16 @@ queue_get_done:
// Out: $r15 value
//
nv_rd32:
- mov $r11 0x728
- shl b32 $r11 6
mov b32 $r12 $r14
bset $r12 31 // MMIO_CTRL_PENDING
- iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_iowr(NV_PGRAPH_FECS_MMIO_CTRL, 0, $r12)
nv_rd32_wait:
- iord $r12 I[$r11 + 0x000]
+ nv_iord($r12, NV_PGRAPH_FECS_MMIO_CTRL, 0)
xbit $r12 $r12 31
bra ne #nv_rd32_wait
mov $r10 6 // DONE_MMIO_RD
- call #wait_doneo
- iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
+ call(wait_doneo)
+ nv_iord($r15, NV_PGRAPH_FECS_MMIO_RDVAL, 0)
ret
// nv_wr32 - write 32-bit value to nv register
@@ -112,37 +110,17 @@ nv_rd32:
// $r15 value
//
nv_wr32:
- mov $r11 0x728
- shl b32 $r11 6
- iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
+ nv_iowr(NV_PGRAPH_FECS_MMIO_WRVAL, 0, $r15)
mov b32 $r12 $r14
bset $r12 31 // MMIO_CTRL_PENDING
bset $r12 30 // MMIO_CTRL_WRITE
- iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_iowr(NV_PGRAPH_FECS_MMIO_CTRL, 0, $r12)
nv_wr32_wait:
- iord $r12 I[$r11 + 0x000]
+ nv_iord($r12, NV_PGRAPH_FECS_MMIO_CTRL, 0)
xbit $r12 $r12 31
bra ne #nv_wr32_wait
ret
-// (re)set watchdog timer
-//
-// In : $r15 timeout
-//
-watchdog_reset:
- mov $r8 0x430
- shl b32 $r8 6
- bset $r15 31
- iowr I[$r8 + 0x000] $r15
- ret
-
-// clear watchdog timer
-watchdog_clear:
- mov $r8 0x430
- shl b32 $r8 6
- iowr I[$r8 + 0x000] $r0
- ret
-
// wait_donez - wait on FUC_DONE bit to become clear
//
// In : $r10 bit to wait on
@@ -163,13 +141,9 @@ wait_donez:
//
wait_doneo:
trace_set(T_WAIT);
- mov $r8 0x818
- shl b32 $r8 6
- iowr I[$r8 + 0x000] $r10
+ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(6), 0, $r10)
wait_doneo_e:
- mov $r8 0x400
- shl b32 $r8 6
- iord $r8 I[$r8 + 0x000]
+ nv_iord($r8, NV_PGRAPH_FECS_SIGNAL, 0)
xbit $r8 $r8 $r10
bra e #wait_doneo_e
trace_clr(T_WAIT)
@@ -209,21 +183,18 @@ mmctx_size:
//
mmctx_xfer:
trace_set(T_MMCTX)
- mov $r8 0x710
- shl b32 $r8 6
clear b32 $r9
or $r11 $r11
bra e #mmctx_base_disabled
- iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_BASE, 0, $r11)
bset $r9 0 // BASE_EN
mmctx_base_disabled:
or $r14 $r14
bra e #mmctx_multi_disabled
- iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
- iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE, 0, $r14)
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_MULTI_MASK, 0, $r15)
bset $r9 1 // MULTI_EN
mmctx_multi_disabled:
- add b32 $r8 0x100
xbit $r11 $r10 0
shl b32 $r11 16 // DIR
@@ -231,20 +202,20 @@ mmctx_xfer:
xbit $r14 $r10 1
shl b32 $r14 17
or $r11 $r14 // START_TRIGGER
- iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_CTRL, 0, $r11)
// loop over the mmio list, and send requests to the hw
mmctx_exec_loop:
// wait for space in mmctx queue
mmctx_wait_free:
- iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+ nv_iord($r14, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
and $r14 0x1f
bra e #mmctx_wait_free
// queue up an entry
ld b32 $r14 D[$r12]
or $r14 $r9
- iowr I[$r8 + 0x300] $r14
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_QUEUE, 0, $r14)
add b32 $r12 4
cmpu b32 $r12 $r13
bra ne #mmctx_exec_loop
@@ -253,22 +224,22 @@ mmctx_xfer:
bra ne #mmctx_stop
// wait for queue to empty
mmctx_fini_wait:
- iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ nv_iord($r11, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
and $r11 0x1f
cmpu b32 $r11 0x10
bra ne #mmctx_fini_wait
mov $r10 2 // DONE_MMCTX
- call #wait_donez
+ call(wait_donez)
bra #mmctx_done
mmctx_stop:
xbit $r11 $r10 0
shl b32 $r11 16 // DIR
bset $r11 12 // QLIMIT = 0x10
bset $r11 18 // STOP_TRIGGER
- iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_CTRL, 0, $r11)
mmctx_stop_wait:
// wait for STOP_TRIGGER to clear
- iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ nv_iord($r11, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
xbit $r11 $r11 18
bra ne #mmctx_stop_wait
mmctx_done:
@@ -280,28 +251,24 @@ mmctx_xfer:
strand_wait:
push $r10
mov $r10 2
- call #wait_donez
+ call(wait_donez)
pop $r10
ret
// unknown - call before issuing strand commands
//
strand_pre:
- mov $r8 0x4afc
- sethi $r8 0x20000
- mov $r9 0xc
- iowr I[$r8] $r9
- call #strand_wait
+ mov $r9 NV_PGRAPH_FECS_STRAND_CMD_ENABLE
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r9)
+ call(strand_wait)
ret
// unknown - call after issuing strand commands
//
strand_post:
- mov $r8 0x4afc
- sethi $r8 0x20000
- mov $r9 0xd
- iowr I[$r8] $r9
- call #strand_wait
+ mov $r9 NV_PGRAPH_FECS_STRAND_CMD_DISABLE
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r9)
+ call(strand_wait)
ret
// Selects strand set?!
@@ -309,18 +276,14 @@ strand_post:
// In: $r14 id
//
strand_set:
- mov $r10 0x4ffc
- sethi $r10 0x20000
- sub b32 $r11 $r10 0x500
mov $r12 0xf
- iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
- mov $r12 0xb
- iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
- call #strand_wait
- iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
- mov $r12 0xa
- iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
- call #strand_wait
+ nv_iowr(NV_PGRAPH_FECS_STRAND_FILTER, 0x3f, $r12)
+ mov $r12 NV_PGRAPH_FECS_STRAND_CMD_DEACTIVATE_FILTER
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+ nv_iowr(NV_PGRAPH_FECS_STRAND_FILTER, 0x3f, $r14)
+ mov $r12 NV_PGRAPH_FECS_STRAND_CMD_ACTIVATE_FILTER
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+ call(strand_wait)
ret
// Initialise strand context data
@@ -332,30 +295,27 @@ strand_set:
//
strand_ctx_init:
trace_set(T_STRINIT)
- call #strand_pre
+ call(strand_pre)
mov $r14 3
- call #strand_set
- mov $r10 0x46fc
- sethi $r10 0x20000
- add b32 $r11 $r10 0x400
- iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
- mov $r12 1
- iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
- call #strand_wait
+ call(strand_set)
+
+ clear b32 $r12
+ nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r12)
+ mov $r12 NV_PGRAPH_FECS_STRAND_CMD_SEEK
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+ call(strand_wait)
sub b32 $r12 $r0 1
- iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
- mov $r12 2
- iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
- call #strand_wait
- call #strand_post
+ nv_iowr(NV_PGRAPH_FECS_STRAND_DATA, 0x3f, $r12)
+ mov $r12 NV_PGRAPH_FECS_STRAND_CMD_GET_INFO
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+ call(strand_wait)
+ call(strand_post)
// read the size of each strand, poke the context offset of
// each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
// about it later then.
- mov $r8 0x880
- shl b32 $r8 6
- iord $r9 I[$r8 + 0x000] // STRANDS
- add b32 $r8 0x2200
+ nv_mkio($r8, NV_PGRAPH_FECS_STRAND_SAVE_SWBASE, 0x00)
+ nv_iord($r9, NV_PGRAPH_FECS_STRANDS_CNT, 0x00)
shr b32 $r14 $r15 8
ctx_init_strand_loop:
iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
index 5547c1b3f4f2..96cbcea3b2c9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
@@ -58,12 +58,9 @@ mmio_list_base:
//
error:
push $r14
- mov $r14 -0x67ec // 0x9814
- sethi $r14 0x400000
- call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
- add b32 $r14 0x41c
+ nv_wr32(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), $r15)
mov $r15 1
- call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
+ nv_wr32(NV_PGRAPH_FECS_INTR_UP_SET, $r15)
pop $r14
ret
@@ -84,46 +81,40 @@ init:
mov $sp $r0
// enable fifo access
- mov $r1 0x1200
- mov $r2 2
- iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+ mov $r2 NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_ACCESS, 0, $r2)
// setup i0 handler, and route all interrupts to it
mov $r1 #ih
mov $iv0 $r1
- mov $r1 0x400
- iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_ROUTE, 0, $r0)
// enable fifo interrupt
- mov $r2 4
- iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+ mov $r2 NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET_FIFO
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET, 0, $r2)
// enable interrupts
bset $flags ie0
// figure out which GPC we are, and how many TPCs we have
- mov $r1 0x608
- shl b32 $r1 6
- iord $r2 I[$r1 + 0x000] // UNITS
+ nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_UNITS, 0)
mov $r3 1
and $r2 0x1f
shl b32 $r3 $r2
sub b32 $r3 1
st b32 D[$r0 + #tpc_count] $r2
st b32 D[$r0 + #tpc_mask] $r3
- add b32 $r1 0x400
- iord $r2 I[$r1 + 0x000] // MYINDEX
+ nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_MYINDEX, 0)
st b32 D[$r0 + #gpc_id] $r2
#if NV_PGRAPH_GPCX_UNK__SIZE > 0
// figure out which, and how many, UNKs are actually present
- mov $r14 0x0c30
- sethi $r14 0x500000
+ imm32($r14, 0x500c30)
clear b32 $r2
clear b32 $r3
clear b32 $r4
init_unk_loop:
- call #nv_rd32
+ call(nv_rd32)
cmp b32 $r15 0
bra z #init_unk_next
mov $r15 1
@@ -146,23 +137,21 @@ init:
// set mmctx base addresses now so we don't have to do it later,
// they don't currently ever change
- mov $r4 0x700
- shl b32 $r4 6
shr b32 $r5 $r2 8
- iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
- iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE, 0, $r5)
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE, 0, $r5)
// calculate GPC mmio context size
ld b32 $r14 D[$r0 + #gpc_mmio_list_head]
ld b32 $r15 D[$r0 + #gpc_mmio_list_tail]
- call #mmctx_size
+ call(mmctx_size)
add b32 $r2 $r15
add b32 $r3 $r15
// calculate per-TPC mmio context size
ld b32 $r14 D[$r0 + #tpc_mmio_list_head]
ld b32 $r15 D[$r0 + #tpc_mmio_list_tail]
- call #mmctx_size
+ call(mmctx_size)
ld b32 $r14 D[$r0 + #tpc_count]
mulu $r14 $r15
add b32 $r2 $r14
@@ -172,7 +161,7 @@ init:
// calculate per-UNK mmio context size
ld b32 $r14 D[$r0 + #unk_mmio_list_head]
ld b32 $r15 D[$r0 + #unk_mmio_list_tail]
- call #mmctx_size
+ call(mmctx_size)
ld b32 $r14 D[$r0 + #unk_count]
mulu $r14 $r15
add b32 $r2 $r14
@@ -180,9 +169,8 @@ init:
#endif
// round up base/size to 256 byte boundary (for strand SWBASE)
- add b32 $r4 0x1300
shr b32 $r3 2
- iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT, 0, $r3) // wtf for?!
shr b32 $r2 8
shr b32 $r3 6
add b32 $r2 1
@@ -192,7 +180,7 @@ init:
// calculate size of strand context data
mov b32 $r15 $r2
- call #strand_ctx_init
+ call(strand_ctx_init)
add b32 $r3 $r15
// save context size, and tell HUB we're done
@@ -208,7 +196,7 @@ main:
bset $flags $p0
sleep $p0
mov $r13 #cmd_queue
- call #queue_get
+ call(queue_get)
bra $p1 #main
// 0x0000-0x0003 are all context transfers
@@ -224,13 +212,13 @@ main:
or $r1 $r14
mov $flags $r1
// transfer context data
- call #ctx_xfer
+ call(ctx_xfer)
bra #main
main_not_ctx_xfer:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
- call #error
+ call(error)
bra #main
// interrupt handler
@@ -247,22 +235,20 @@ ih:
clear b32 $r0
// incoming fifo command?
- iord $r10 I[$r0 + 0x200] // INTR
- and $r11 $r10 0x00000004
+ nv_iord($r10, NV_PGRAPH_GPCX_GPCCS_INTR, 0)
+ and $r11 $r10 NV_PGRAPH_GPCX_GPCCS_INTR_FIFO
bra e #ih_no_fifo
// queue incoming fifo command for later processing
- mov $r11 0x1900
mov $r13 #cmd_queue
- iord $r14 I[$r11 + 0x100] // FIFO_CMD
- iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call #queue_put
- add b32 $r11 0x400
+ nv_iord($r14, NV_PGRAPH_GPCX_GPCCS_FIFO_CMD, 0)
+ nv_iord($r15, NV_PGRAPH_GPCX_GPCCS_FIFO_DATA, 0)
+ call(queue_put)
mov $r14 1
- iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_FIFO_ACK, 0, $r14)
// ack, and wake up main()
ih_no_fifo:
- iowr I[$r0 + 0x100] $r10 // INTR_ACK
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_ACK, 0, $r10)
pop $r15
pop $r14
@@ -283,9 +269,7 @@ hub_barrier_done:
mov $r15 1
ld b32 $r14 D[$r0 + #gpc_id]
shl b32 $r15 $r14
- mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
- sethi $r14 0x400000
- call #nv_wr32
+ nv_wr32(0x409418, $r15) // 0x409418 - HUB_BAR_SET
ret
// Disables various things, waits a bit, and re-enables them..
@@ -295,16 +279,15 @@ hub_barrier_done:
// funny things happen.
//
ctx_redswitch:
- mov $r14 0x614
- shl b32 $r14 6
- mov $r15 0x020
- iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
- mov $r15 8
+ mov $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_POWER
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_RED_SWITCH, 0, $r15)
+ mov $r14 8
ctx_redswitch_delay:
- sub b32 $r15 1
+ sub b32 $r14 1
bra ne #ctx_redswitch_delay
- mov $r15 0xa20
- iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
+ or $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11
+ or $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_RED_SWITCH, 0, $r15)
ret
// Transfer GPC context data between GPU and storage area
@@ -317,46 +300,37 @@ ctx_redswitch:
//
ctx_xfer:
// set context base address
- mov $r1 0xa04
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r15// MEM_BASE
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_MEM_BASE, 0, $r15)
bra not $p1 #ctx_xfer_not_load
- call #ctx_redswitch
+ call(ctx_redswitch)
ctx_xfer_not_load:
// strands
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xc
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call #strand_wait
- mov $r2 0x47fc
- sethi $r2 0x20000
- iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
- xbit $r2 $flags $p1
- add b32 $r2 3
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+ call(strand_pre)
+ clear b32 $r2
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_SELECT, 0x3f, $r2)
+ xbit $r2 $flags $p1 // SAVE/LOAD
+ add b32 $r2 NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE
+ nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_CMD, 0x3f, $r2)
// mmio context
xbit $r10 $flags $p1 // direction
or $r10 2 // first
- mov $r11 0x0000
- sethi $r11 0x500000
+ imm32($r11,0x500000)
ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
mov $r14 0 // not multi
- call #mmctx_xfer
+ call(mmctx_xfer)
// per-TPC mmio context
xbit $r10 $flags $p1 // direction
#if !NV_PGRAPH_GPCX_UNK__SIZE
or $r10 4 // last
#endif
- mov $r11 0x4000
- sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
+ imm32($r11, 0x504000)
ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
@@ -364,14 +338,13 @@ ctx_xfer:
ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
ld b32 $r15 D[$r0 + #tpc_mask]
mov $r14 0x800 // stride = 0x800
- call #mmctx_xfer
+ call(mmctx_xfer)
#if NV_PGRAPH_GPCX_UNK__SIZE > 0
// per-UNK mmio context
xbit $r10 $flags $p1 // direction
or $r10 4 // last
- mov $r11 0x3000
- sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_UNK0
+ imm32($r11, 0x503000)
ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_UNK0
@@ -379,11 +352,11 @@ ctx_xfer:
ld b32 $r13 D[$r0 + #unk_mmio_list_tail]
ld b32 $r15 D[$r0 + #unk_mask]
mov $r14 0x200 // stride = 0x200
- call #mmctx_xfer
+ call(mmctx_xfer)
#endif
// wait for strands to finish
- call #strand_wait
+ call(strand_wait)
// if load, or a save without a load following, do some
// unknown stuff that's done after finishing a block of
@@ -391,14 +364,10 @@ ctx_xfer:
bra $p1 #ctx_xfer_post
bra not $p2 #ctx_xfer_done
ctx_xfer_post:
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xd
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
- call #strand_wait
+ call(strand_post)
// mark completion in HUB's barrier
ctx_xfer_done:
- call #hub_barrier_done
+ call(hub_barrier_done)
ret
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5
new file mode 100644
index 000000000000..bd30262d635b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#define NV_PGRAPH_GPCX_UNK__SIZE 0x00000001
+
+#define CHIPSET GK208
+#include "macros.fuc"
+
+.section #nv108_grgpc_data
+#define INCLUDE_DATA
+#include "com.fuc"
+#include "gpc.fuc"
+#undef INCLUDE_DATA
+
+.section #nv108_grgpc_code
+#define INCLUDE_CODE
+bra #init
+#include "com.fuc"
+#include "gpc.fuc"
+.align 256
+#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
new file mode 100644
index 000000000000..27dc1280dc10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
@@ -0,0 +1,473 @@
+uint32_t nv108_grgpc_data[] = {
+/* 0x0000: gpc_mmio_list_head */
+ 0x0000006c,
+/* 0x0004: gpc_mmio_list_tail */
+/* 0x0004: tpc_mmio_list_head */
+ 0x0000006c,
+/* 0x0008: tpc_mmio_list_tail */
+/* 0x0008: unk_mmio_list_head */
+ 0x0000006c,
+/* 0x000c: unk_mmio_list_tail */
+ 0x0000006c,
+/* 0x0010: gpc_id */
+ 0x00000000,
+/* 0x0014: tpc_count */
+ 0x00000000,
+/* 0x0018: tpc_mask */
+ 0x00000000,
+/* 0x001c: unk_count */
+ 0x00000000,
+/* 0x0020: unk_mask */
+ 0x00000000,
+/* 0x0024: cmd_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nv108_grgpc_code[] = {
+ 0x03140ef5,
+/* 0x0004: queue_put */
+ 0x9800d898,
+ 0x86f001d9,
+ 0xf489a408,
+ 0x020f0b1b,
+ 0x0002f87e,
+/* 0x001a: queue_put_next */
+ 0x98c400f8,
+ 0x0384b607,
+ 0xb6008dbb,
+ 0x8eb50880,
+ 0x018fb500,
+ 0xf00190b6,
+ 0xd9b50f94,
+/* 0x0037: queue_get */
+ 0xf400f801,
+ 0xd8980131,
+ 0x01d99800,
+ 0x0bf489a4,
+ 0x0789c421,
+ 0xbb0394b6,
+ 0x90b6009d,
+ 0x009e9808,
+ 0xb6019f98,
+ 0x84f00180,
+ 0x00d8b50f,
+/* 0x0063: queue_get_done */
+ 0xf80132f4,
+/* 0x0065: nv_rd32 */
+ 0xf0ecb200,
+ 0x00801fc9,
+ 0x0cf601ca,
+/* 0x0073: nv_rd32_wait */
+ 0x8c04bd00,
+ 0xcf01ca00,
+ 0xccc800cc,
+ 0xf61bf41f,
+ 0xec7e060a,
+ 0x008f0000,
+ 0xffcf01cb,
+/* 0x008f: nv_wr32 */
+ 0x8000f800,
+ 0xf601cc00,
+ 0x04bd000f,
+ 0xc9f0ecb2,
+ 0x1ec9f01f,
+ 0x01ca0080,
+ 0xbd000cf6,
+/* 0x00a9: nv_wr32_wait */
+ 0xca008c04,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f61b,
+/* 0x00b8: wait_donez */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x00cf: wait_donez_ne */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf61bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x00ec: wait_doneo */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x0103: wait_doneo_e */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf60bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x0120: mmctx_size */
+/* 0x0122: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0x1bf4efa4,
+ 0xf89fb2ec,
+/* 0x013d: mmctx_xfer */
+ 0xf094bd00,
+ 0x00800199,
+ 0x09f60237,
+ 0xbd04bd00,
+ 0x05bbfd94,
+ 0x800f0bf4,
+ 0xf601c400,
+ 0x04bd000b,
+/* 0x015f: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0xc6008018,
+ 0x000ef601,
+ 0x008004bd,
+ 0x0ff601c7,
+ 0xf004bd00,
+/* 0x017a: mmctx_multi_disabled */
+ 0xabc80199,
+ 0x10b4b600,
+ 0xc80cb9f0,
+ 0xe4b601ae,
+ 0x05befd11,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x0195: mmctx_exec_loop */
+/* 0x0195: mmctx_wait_free */
+ 0xc5008e04,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f60b,
+ 0x05e9fd00,
+ 0x01c80080,
+ 0xbd000ef6,
+ 0x04c0b604,
+ 0x1bf4cda4,
+ 0x02abc8df,
+/* 0x01bf: mmctx_fini_wait */
+ 0x8b1c1bf4,
+ 0xcf01c500,
+ 0xb4f000bb,
+ 0x10b4b01f,
+ 0x0af31bf4,
+ 0x00b87e02,
+ 0x250ef400,
+/* 0x01d8: mmctx_stop */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x12b9f00c,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x01ed: mmctx_stop_wait */
+ 0xc5008b04,
+ 0x00bbcf01,
+ 0xf412bbc8,
+/* 0x01fa: mmctx_done */
+ 0x94bdf61b,
+ 0x800199f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x020a: strand_wait */
+ 0xa0f900f8,
+ 0xb87e020a,
+ 0xa0fc0000,
+/* 0x0216: strand_pre */
+ 0x0c0900f8,
+ 0x024afc80,
+ 0xbd0009f6,
+ 0x020a7e04,
+/* 0x0227: strand_post */
+ 0x0900f800,
+ 0x4afc800d,
+ 0x0009f602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0238: strand_set */
+ 0xfc800f0c,
+ 0x0cf6024f,
+ 0x0c04bd00,
+ 0x4afc800b,
+ 0x000cf602,
+ 0xfc8004bd,
+ 0x0ef6024f,
+ 0x0c04bd00,
+ 0x4afc800a,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0268: strand_ctx_init */
+ 0x99f094bd,
+ 0x37008003,
+ 0x0009f602,
+ 0x167e04bd,
+ 0x030e0002,
+ 0x0002387e,
+ 0xfc80c4bd,
+ 0x0cf60247,
+ 0x0c04bd00,
+ 0x4afc8001,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x0c920002,
+ 0x46fc8001,
+ 0x000cf602,
+ 0x020c04bd,
+ 0x024afc80,
+ 0xbd000cf6,
+ 0x020a7e04,
+ 0x02277e00,
+ 0x42008800,
+ 0x20008902,
+ 0x0099cf02,
+/* 0x02c7: ctx_init_strand_loop */
+ 0xf608fe95,
+ 0x8ef6008e,
+ 0x808acf40,
+ 0xb606a5b6,
+ 0xeabb01a0,
+ 0x0480b600,
+ 0xf40192b6,
+ 0xe4b6e81b,
+ 0xf2efbc08,
+ 0x99f094bd,
+ 0x17008003,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x02f8: error */
+ 0xffb2e0f9,
+ 0x4098148e,
+ 0x00008f7e,
+ 0xffb2010f,
+ 0x409c1c8e,
+ 0x00008f7e,
+ 0x00f8e0fc,
+/* 0x0314: init */
+ 0x04fe04bd,
+ 0x40020200,
+ 0x02f61200,
+ 0x4104bd00,
+ 0x10fe0465,
+ 0x07004000,
+ 0xbd0000f6,
+ 0x40040204,
+ 0x02f60400,
+ 0xf404bd00,
+ 0x00821031,
+ 0x22cf0182,
+ 0xf0010300,
+ 0x32bb1f24,
+ 0x0132b604,
+ 0xb50502b5,
+ 0x00820603,
+ 0x22cf0186,
+ 0x0402b500,
+ 0x500c308e,
+ 0x34bd24bd,
+/* 0x036a: init_unk_loop */
+ 0x657e44bd,
+ 0xf6b00000,
+ 0x0e0bf400,
+ 0xf2bb010f,
+ 0x054ffd04,
+/* 0x037f: init_unk_next */
+ 0xb60130b6,
+ 0xe0b60120,
+ 0x0126b004,
+/* 0x038b: init_unk_done */
+ 0xb5e21bf4,
+ 0x04b50703,
+ 0x01008208,
+ 0x0022cf02,
+ 0x259534bd,
+ 0xc0008008,
+ 0x0005f601,
+ 0x008004bd,
+ 0x05f601c1,
+ 0x9804bd00,
+ 0x0f98000e,
+ 0x01207e01,
+ 0x002fbb00,
+ 0x98003fbb,
+ 0x0f98010e,
+ 0x01207e02,
+ 0x050e9800,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x020e9800,
+ 0x7e030f98,
+ 0x98000120,
+ 0xeffd070e,
+ 0x002ebb00,
+ 0xb6003ebb,
+ 0x00800235,
+ 0x03f601d3,
+ 0xb604bd00,
+ 0x35b60825,
+ 0x0120b606,
+ 0xb60130b6,
+ 0x34b60824,
+ 0x7e2fb208,
+ 0xbb000268,
+ 0x0080003f,
+ 0x03f60201,
+ 0xbd04bd00,
+ 0x1f29f024,
+ 0x02300080,
+ 0xbd0002f6,
+/* 0x0429: main */
+ 0x0031f404,
+ 0x0d0028f4,
+ 0x00377e24,
+ 0xf401f400,
+ 0xf404e4b0,
+ 0x81fe1d18,
+ 0xbd060201,
+ 0x0412fd20,
+ 0xfd01e4b6,
+ 0x18fe051e,
+ 0x04fc7e00,
+ 0xd40ef400,
+/* 0x0458: main_not_ctx_xfer */
+ 0xf010ef94,
+ 0xf87e01f5,
+ 0x0ef40002,
+/* 0x0465: ih */
+ 0xfe80f9c7,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0x004a04bd,
+ 0x00aacf02,
+ 0xf404abc4,
+ 0x240d1f0b,
+ 0xcf1a004e,
+ 0x004f00ee,
+ 0x00ffcf19,
+ 0x0000047e,
+ 0x0040010e,
+ 0x000ef61d,
+/* 0x04a2: ih_no_fifo */
+ 0x004004bd,
+ 0x000af601,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x04c2: hub_barrier_done */
+ 0x010f01f8,
+ 0xbb040e98,
+ 0xffb204fe,
+ 0x4094188e,
+ 0x00008f7e,
+/* 0x04d6: ctx_redswitch */
+ 0x200f00f8,
+ 0x01850080,
+ 0xbd000ff6,
+/* 0x04e3: ctx_redswitch_delay */
+ 0xb6080e04,
+ 0x1bf401e2,
+ 0x00f5f1fd,
+ 0x00f5f108,
+ 0x85008002,
+ 0x000ff601,
+ 0x00f804bd,
+/* 0x04fc: ctx_xfer */
+ 0x02810080,
+ 0xbd000ff6,
+ 0x0711f404,
+ 0x0004d67e,
+/* 0x050c: ctx_xfer_not_load */
+ 0x0002167e,
+ 0xfc8024bd,
+ 0x02f60247,
+ 0xf004bd00,
+ 0x20b6012c,
+ 0x4afc8003,
+ 0x0002f602,
+ 0xacf004bd,
+ 0x02a5f001,
+ 0x5000008b,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x000c9800,
+ 0x0e010d98,
+ 0x013d7e00,
+ 0x01acf000,
+ 0x5040008b,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0x98020d98,
+ 0x004e060f,
+ 0x013d7e08,
+ 0x01acf000,
+ 0x8b04a5f0,
+ 0x98503000,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98020c98,
+ 0x0f98030d,
+ 0x02004e08,
+ 0x00013d7e,
+ 0x00020a7e,
+ 0xf40601f4,
+/* 0x0596: ctx_xfer_post */
+ 0x277e0712,
+/* 0x059a: ctx_xfer_done */
+ 0xc27e0002,
+ 0x00f80004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
index f2b0dea80116..0e7b01efae8d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
@@ -37,14 +37,14 @@ uint32_t nvc0_grgpc_data[] = {
};
uint32_t nvc0_grgpc_code[] = {
- 0x03180ef5,
+ 0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -68,184 +68,214 @@ uint32_t nvc0_grgpc_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -259,167 +289,199 @@ uint32_t nvc0_grgpc_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0xe0f900f8,
- 0x9814e7f1,
- 0xf440e3f0,
- 0xe0b78d21,
- 0xf7f0041c,
- 0x8d21f401,
- 0x00f8e0fc,
-/* 0x0318: init */
- 0x04fe04bd,
- 0x0017f100,
- 0x0227f012,
- 0xf10012d0,
- 0xfe042617,
- 0x17f10010,
- 0x10d00400,
- 0x0427f0c0,
- 0xf40012d0,
- 0x17f11031,
- 0x14b60608,
- 0x0012cf06,
+ 0xf102ffb9,
+ 0xf09814e7,
+ 0x21f440e3,
+ 0x01f7f09d,
+ 0xf102ffb9,
+ 0xf09c1ce7,
+ 0x21f440e3,
+ 0xf8e0fc9d,
+/* 0x03a1: init */
+ 0xfe04bd00,
+ 0x27f00004,
+ 0x0007f102,
+ 0x0003f012,
+ 0xbd0002d0,
+ 0xd517f104,
+ 0x0010fe04,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0000,
+ 0xf10427f0,
+ 0xf0040007,
+ 0x02d00003,
+ 0xf404bd00,
+ 0x27f11031,
+ 0x23f08200,
+ 0x0022cf01,
0xf00137f0,
0x32bb1f24,
0x0132b604,
0x80050280,
- 0x10b70603,
- 0x12cf0400,
- 0x04028000,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0x070047f1,
- 0x950644b6,
- 0x45d00825,
- 0x4045d000,
- 0x98000e98,
- 0x21f5010f,
- 0x2fbb0147,
- 0x003fbb00,
- 0x98010e98,
- 0x21f5020f,
- 0x0e980147,
- 0x00effd05,
- 0xbb002ebb,
- 0x40b7003e,
- 0x35b61300,
- 0x0043d002,
- 0xb60825b6,
- 0x20b60635,
- 0x0130b601,
- 0xb60824b6,
- 0x2fb90834,
- 0x7121f502,
- 0x003fbb02,
- 0x010007f1,
+ 0x27f10603,
+ 0x23f08600,
+ 0x0022cf01,
+ 0xf1040280,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
+ 0x07f104bd,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
+ 0x2ebb00ef,
+ 0x003ebb00,
+ 0xf10235b6,
+ 0xf0d30007,
+ 0x03d00103,
+ 0xb604bd00,
+ 0x35b60825,
+ 0x0120b606,
+ 0xb60130b6,
+ 0x34b60824,
+ 0x022fb908,
+ 0x02d321f5,
+ 0xf1003fbb,
+ 0xf0010007,
+ 0x03d00203,
+ 0xbd04bd00,
+ 0x1f29f024,
+ 0x080007f1,
0xd00203f0,
- 0x04bd0003,
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f008,
- 0xbd0002d0,
-/* 0x03e9: main */
- 0x0031f404,
- 0xf00028f4,
- 0x21f41cd7,
- 0xf401f439,
- 0xf404e4b0,
- 0x81fe1e18,
- 0x0627f001,
- 0x12fd20bd,
- 0x01e4b604,
- 0xfe051efd,
- 0x21f50018,
- 0x0ef404ad,
-/* 0x0419: main_not_ctx_xfer */
- 0x10ef94d3,
- 0xf501f5f0,
- 0xf402fe21,
-/* 0x0426: ih */
- 0x80f9c60e,
- 0xf90188fe,
- 0xf990f980,
- 0xf9b0f9a0,
- 0xf9e0f9d0,
- 0xcf04bdf0,
- 0xabc4800a,
- 0x1d0bf404,
- 0x1900b7f1,
- 0xcf1cd7f0,
- 0xbfcf40be,
+ 0x04bd0002,
+/* 0x0498: main */
+ 0xf40031f4,
+ 0xd7f00028,
+ 0x3921f41c,
+ 0xb0f401f4,
+ 0x18f404e4,
+ 0x0181fe1e,
+ 0xbd0627f0,
+ 0x0412fd20,
+ 0xfd01e4b6,
+ 0x18fe051e,
+ 0x8d21f500,
+ 0xd30ef405,
+/* 0x04c8: main_not_ctx_xfer */
+ 0xf010ef94,
+ 0x21f501f5,
+ 0x0ef4037e,
+/* 0x04d5: ih */
+ 0xfe80f9c6,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0xa7f104bd,
+ 0xa3f00200,
+ 0x00aacf00,
+ 0xf404abc4,
+ 0xd7f02c0b,
+ 0x00e7f11c,
+ 0x00e3f01a,
+ 0xf100eecf,
+ 0xf01900f7,
+ 0xffcf00f3,
0x0421f400,
- 0x0400b0b7,
- 0xd001e7f0,
-/* 0x045e: ih_no_fifo */
- 0x0ad000be,
- 0xfcf0fc40,
- 0xfcd0fce0,
- 0xfca0fcb0,
- 0xfe80fc90,
- 0x80fc0088,
- 0xf80032f4,
-/* 0x0479: hub_barrier_done */
- 0x01f7f001,
- 0xbb040e98,
- 0xe7f104fe,
- 0xe3f09418,
- 0x8d21f440,
-/* 0x048e: ctx_redswitch */
- 0xe7f100f8,
- 0xe4b60614,
- 0x20f7f006,
- 0xf000efd0,
-/* 0x049e: ctx_redswitch_delay */
- 0xf2b608f7,
- 0xfd1bf401,
- 0x0a20f7f1,
- 0xf800efd0,
-/* 0x04ad: ctx_xfer */
- 0x0417f100,
- 0x0614b60a,
- 0xf4001fd0,
- 0x21f50711,
-/* 0x04be: ctx_xfer_not_load */
- 0x17f1048e,
- 0x13f04afc,
- 0x0c27f002,
- 0xf50012d0,
- 0xf1021521,
- 0xf047fc27,
- 0x20d00223,
- 0x012cf000,
- 0xd00320b6,
- 0xacf00012,
- 0x02a5f001,
- 0xf000b7f0,
- 0x0c9850b3,
- 0x0fc4b604,
- 0x9800bcbb,
- 0x0d98000c,
- 0x00e7f001,
- 0x016621f5,
+ 0xf101e7f0,
+ 0xf01d0007,
+ 0x0ed00003,
+/* 0x0523: ih_no_fifo */
+ 0xf104bd00,
+ 0xf0010007,
+ 0x0ad00003,
+ 0xfc04bd00,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x32f480fc,
+/* 0x0547: hub_barrier_done */
+ 0xf001f800,
+ 0x0e9801f7,
+ 0x04febb04,
+ 0xf102ffb9,
+ 0xf09418e7,
+ 0x21f440e3,
+/* 0x055f: ctx_redswitch */
+ 0xf000f89d,
+ 0x07f120f7,
+ 0x03f08500,
+ 0x000fd001,
+ 0xe7f004bd,
+/* 0x0571: ctx_redswitch_delay */
+ 0x01e2b608,
+ 0xf1fd1bf4,
+ 0xf10800f5,
+ 0xf10200f5,
+ 0xf0850007,
+ 0x0fd00103,
+ 0xf804bd00,
+/* 0x058d: ctx_xfer */
+ 0x0007f100,
+ 0x0203f081,
+ 0xbd000fd0,
+ 0x0711f404,
+ 0x055f21f5,
+/* 0x05a0: ctx_xfer_not_load */
+ 0x026a21f5,
+ 0x07f124bd,
+ 0x03f047fc,
+ 0x0002d002,
+ 0x2cf004bd,
+ 0x0320b601,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
0xf001acf0,
- 0xb7f104a5,
- 0xb3f04000,
+ 0xb7f102a5,
+ 0xb3f00000,
0x040c9850,
0xbb0fc4b6,
0x0c9800bc,
- 0x020d9801,
- 0xf1060f98,
- 0xf50800e7,
- 0xf5016621,
- 0xf4021521,
- 0x12f40601,
-/* 0x0535: ctx_xfer_post */
- 0xfc17f114,
- 0x0213f04a,
- 0xd00d27f0,
- 0x21f50012,
-/* 0x0546: ctx_xfer_done */
- 0x21f50215,
- 0x00f80479,
+ 0x010d9800,
+ 0xf500e7f0,
+ 0xf0016f21,
+ 0xa5f001ac,
+ 0x00b7f104,
+ 0x50b3f040,
+ 0xb6040c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0x98020d98,
+ 0xe7f1060f,
+ 0x21f50800,
+ 0x21f5016f,
+ 0x01f4025e,
+ 0x0712f406,
+/* 0x0618: ctx_xfer_post */
+ 0x027f21f5,
+/* 0x061c: ctx_xfer_done */
+ 0x054721f5,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
index dd346c2a1624..84dd32db28a0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
@@ -41,14 +41,14 @@ uint32_t nvd7_grgpc_data[] = {
};
uint32_t nvd7_grgpc_code[] = {
- 0x03180ef5,
+ 0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -72,184 +72,214 @@ uint32_t nvd7_grgpc_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -263,198 +293,230 @@ uint32_t nvd7_grgpc_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0xe0f900f8,
- 0x9814e7f1,
- 0xf440e3f0,
- 0xe0b78d21,
- 0xf7f0041c,
- 0x8d21f401,
- 0x00f8e0fc,
-/* 0x0318: init */
- 0x04fe04bd,
- 0x0017f100,
- 0x0227f012,
- 0xf10012d0,
- 0xfe047017,
- 0x17f10010,
- 0x10d00400,
- 0x0427f0c0,
- 0xf40012d0,
- 0x17f11031,
- 0x14b60608,
- 0x0012cf06,
+ 0xf102ffb9,
+ 0xf09814e7,
+ 0x21f440e3,
+ 0x01f7f09d,
+ 0xf102ffb9,
+ 0xf09c1ce7,
+ 0x21f440e3,
+ 0xf8e0fc9d,
+/* 0x03a1: init */
+ 0xfe04bd00,
+ 0x27f00004,
+ 0x0007f102,
+ 0x0003f012,
+ 0xbd0002d0,
+ 0x1f17f104,
+ 0x0010fe05,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0000,
+ 0xf10427f0,
+ 0xf0040007,
+ 0x02d00003,
+ 0xf404bd00,
+ 0x27f11031,
+ 0x23f08200,
+ 0x0022cf01,
0xf00137f0,
0x32bb1f24,
0x0132b604,
0x80050280,
- 0x10b70603,
- 0x12cf0400,
- 0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0371: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0386: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40126b0,
-/* 0x0392: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0x070047f1,
- 0x950644b6,
- 0x45d00825,
- 0x4045d000,
- 0x98000e98,
- 0x21f5010f,
- 0x2fbb0147,
- 0x003fbb00,
- 0x98010e98,
- 0x21f5020f,
- 0x0e980147,
- 0x00effd05,
- 0xbb002ebb,
- 0x0e98003e,
- 0x030f9802,
- 0x014721f5,
- 0xfd070e98,
+ 0x27f10603,
+ 0x23f08600,
+ 0x0022cf01,
+ 0xf1040280,
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0410: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0425: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40126,
+/* 0x0431: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
+ 0x07f104bd,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
0x2ebb00ef,
0x003ebb00,
- 0x130040b7,
- 0xd00235b6,
- 0x25b60043,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb027121,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0080007,
- 0x02d00203,
-/* 0x0433: main */
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x3fbb02d3,
+ 0x0007f100,
+ 0x0203f001,
+ 0xbd0003d0,
+ 0xf024bd04,
+ 0x07f11f29,
+ 0x03f00800,
+ 0x0002d002,
+/* 0x04e2: main */
+ 0x31f404bd,
+ 0x0028f400,
+ 0xf424d7f0,
+ 0x01f43921,
+ 0x04e4b0f4,
+ 0xfe1e18f4,
+ 0x27f00181,
+ 0xfd20bd06,
+ 0xe4b60412,
+ 0x051efd01,
+ 0xf50018fe,
+ 0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+ 0xef94d30e,
+ 0x01f5f010,
+ 0x037e21f5,
+/* 0x051f: ih */
+ 0xf9c60ef4,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0x0200a7f1,
+ 0xcf00a3f0,
+ 0xabc400aa,
+ 0x2c0bf404,
+ 0xf124d7f0,
+ 0xf01a00e7,
+ 0xeecf00e3,
+ 0x00f7f100,
+ 0x00f3f019,
+ 0xf400ffcf,
+ 0xe7f00421,
+ 0x0007f101,
+ 0x0003f01d,
+ 0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+/* 0x0591: hub_barrier_done */
+ 0x01f7f001,
+ 0xbb040e98,
+ 0xffb904fe,
+ 0x18e7f102,
+ 0x40e3f094,
+ 0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+ 0x20f7f000,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+ 0xb608e7f0,
+ 0x1bf401e2,
+ 0x00f5f1fd,
+ 0x00f5f108,
+ 0x0007f102,
+ 0x0103f085,
+ 0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+ 0xf100f804,
+ 0xf0810007,
+ 0x0fd00203,
0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0xfe21f501,
- 0xc60ef402,
-/* 0x0470: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x800acf04,
- 0xf404abc4,
- 0xb7f11d0b,
- 0xd7f01900,
- 0x40becf24,
- 0xf400bfcf,
- 0xb0b70421,
- 0xe7f00400,
- 0x00bed001,
-/* 0x04a8: ih_no_fifo */
- 0xfc400ad0,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x04c3: hub_barrier_done */
- 0xf001f800,
- 0x0e9801f7,
- 0x04febb04,
- 0x9418e7f1,
- 0xf440e3f0,
- 0x00f88d21,
-/* 0x04d8: ctx_redswitch */
- 0x0614e7f1,
- 0xf006e4b6,
- 0xefd020f7,
- 0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xf7f1fd1b,
- 0xefd00a20,
-/* 0x04f7: ctx_xfer */
- 0xf100f800,
- 0xb60a0417,
- 0x1fd00614,
- 0x0711f400,
- 0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x1521f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f002a5,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf00166,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf00166,
- 0x04a5f001,
- 0x3000b7f1,
+ 0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+ 0x21f505a9,
+ 0x24bd026a,
+ 0x47fc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xb6012cf0,
+ 0x07f10320,
+ 0x03f04afc,
+ 0x0002d002,
+ 0xacf004bd,
+ 0x02a5f001,
+ 0x0000b7f1,
0x9850b3f0,
0xc4b6040c,
0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6621f502,
- 0x1521f501,
- 0x0601f402,
-/* 0x05a3: ctx_xfer_post */
- 0xf11412f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00d,
- 0x021521f5,
-/* 0x05b4: ctx_xfer_done */
- 0x04c321f5,
- 0x000000f8,
+ 0x98000c98,
+ 0xe7f0010d,
+ 0x6f21f500,
+ 0x01acf001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98010c98,
+ 0x0f98020d,
+ 0x00e7f106,
+ 0x6f21f508,
+ 0x01acf001,
+ 0xf104a5f0,
+ 0xf03000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98020c,
+ 0x080f9803,
+ 0x0200e7f1,
+ 0x016f21f5,
+ 0x025e21f5,
+ 0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+ 0x21f50712,
+/* 0x068a: ctx_xfer_done */
+ 0x21f5027f,
+ 0x00f80591,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
index 7ff5ef6b0804..b6da800ee9c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -41,14 +41,14 @@ uint32_t nve0_grgpc_data[] = {
};
uint32_t nve0_grgpc_code[] = {
- 0x03180ef5,
+ 0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -72,184 +72,214 @@ uint32_t nve0_grgpc_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -263,198 +293,230 @@ uint32_t nve0_grgpc_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0xe0f900f8,
- 0x9814e7f1,
- 0xf440e3f0,
- 0xe0b78d21,
- 0xf7f0041c,
- 0x8d21f401,
- 0x00f8e0fc,
-/* 0x0318: init */
- 0x04fe04bd,
- 0x0017f100,
- 0x0227f012,
- 0xf10012d0,
- 0xfe047017,
- 0x17f10010,
- 0x10d00400,
- 0x0427f0c0,
- 0xf40012d0,
- 0x17f11031,
- 0x14b60608,
- 0x0012cf06,
+ 0xf102ffb9,
+ 0xf09814e7,
+ 0x21f440e3,
+ 0x01f7f09d,
+ 0xf102ffb9,
+ 0xf09c1ce7,
+ 0x21f440e3,
+ 0xf8e0fc9d,
+/* 0x03a1: init */
+ 0xfe04bd00,
+ 0x27f00004,
+ 0x0007f102,
+ 0x0003f012,
+ 0xbd0002d0,
+ 0x1f17f104,
+ 0x0010fe05,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0000,
+ 0xf10427f0,
+ 0xf0040007,
+ 0x02d00003,
+ 0xf404bd00,
+ 0x27f11031,
+ 0x23f08200,
+ 0x0022cf01,
0xf00137f0,
0x32bb1f24,
0x0132b604,
0x80050280,
- 0x10b70603,
- 0x12cf0400,
- 0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0371: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0386: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40126b0,
-/* 0x0392: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0x070047f1,
- 0x950644b6,
- 0x45d00825,
- 0x4045d000,
- 0x98000e98,
- 0x21f5010f,
- 0x2fbb0147,
- 0x003fbb00,
- 0x98010e98,
- 0x21f5020f,
- 0x0e980147,
- 0x00effd05,
- 0xbb002ebb,
- 0x0e98003e,
- 0x030f9802,
- 0x014721f5,
- 0xfd070e98,
+ 0x27f10603,
+ 0x23f08600,
+ 0x0022cf01,
+ 0xf1040280,
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0410: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0425: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40126,
+/* 0x0431: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
+ 0x07f104bd,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
0x2ebb00ef,
0x003ebb00,
- 0x130040b7,
- 0xd00235b6,
- 0x25b60043,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb027121,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0080007,
- 0x02d00203,
-/* 0x0433: main */
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x3fbb02d3,
+ 0x0007f100,
+ 0x0203f001,
+ 0xbd0003d0,
+ 0xf024bd04,
+ 0x07f11f29,
+ 0x03f00800,
+ 0x0002d002,
+/* 0x04e2: main */
+ 0x31f404bd,
+ 0x0028f400,
+ 0xf424d7f0,
+ 0x01f43921,
+ 0x04e4b0f4,
+ 0xfe1e18f4,
+ 0x27f00181,
+ 0xfd20bd06,
+ 0xe4b60412,
+ 0x051efd01,
+ 0xf50018fe,
+ 0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+ 0xef94d30e,
+ 0x01f5f010,
+ 0x037e21f5,
+/* 0x051f: ih */
+ 0xf9c60ef4,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0x0200a7f1,
+ 0xcf00a3f0,
+ 0xabc400aa,
+ 0x2c0bf404,
+ 0xf124d7f0,
+ 0xf01a00e7,
+ 0xeecf00e3,
+ 0x00f7f100,
+ 0x00f3f019,
+ 0xf400ffcf,
+ 0xe7f00421,
+ 0x0007f101,
+ 0x0003f01d,
+ 0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+/* 0x0591: hub_barrier_done */
+ 0x01f7f001,
+ 0xbb040e98,
+ 0xffb904fe,
+ 0x18e7f102,
+ 0x40e3f094,
+ 0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+ 0x20f7f000,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+ 0xb608e7f0,
+ 0x1bf401e2,
+ 0x00f5f1fd,
+ 0x00f5f108,
+ 0x0007f102,
+ 0x0103f085,
+ 0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+ 0xf100f804,
+ 0xf0810007,
+ 0x0fd00203,
0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0xfe21f501,
- 0xc60ef402,
-/* 0x0470: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x800acf04,
- 0xf404abc4,
- 0xb7f11d0b,
- 0xd7f01900,
- 0x40becf24,
- 0xf400bfcf,
- 0xb0b70421,
- 0xe7f00400,
- 0x00bed001,
-/* 0x04a8: ih_no_fifo */
- 0xfc400ad0,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x04c3: hub_barrier_done */
- 0xf001f800,
- 0x0e9801f7,
- 0x04febb04,
- 0x9418e7f1,
- 0xf440e3f0,
- 0x00f88d21,
-/* 0x04d8: ctx_redswitch */
- 0x0614e7f1,
- 0xf006e4b6,
- 0xefd020f7,
- 0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xf7f1fd1b,
- 0xefd00a20,
-/* 0x04f7: ctx_xfer */
- 0xf100f800,
- 0xb60a0417,
- 0x1fd00614,
- 0x0711f400,
- 0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x1521f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f002a5,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf00166,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf00166,
- 0x04a5f001,
- 0x3000b7f1,
+ 0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+ 0x21f505a9,
+ 0x24bd026a,
+ 0x47fc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xb6012cf0,
+ 0x07f10320,
+ 0x03f04afc,
+ 0x0002d002,
+ 0xacf004bd,
+ 0x02a5f001,
+ 0x0000b7f1,
0x9850b3f0,
0xc4b6040c,
0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6621f502,
- 0x1521f501,
- 0x0601f402,
-/* 0x05a3: ctx_xfer_post */
- 0xf11412f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00d,
- 0x021521f5,
-/* 0x05b4: ctx_xfer_done */
- 0x04c321f5,
- 0x000000f8,
+ 0x98000c98,
+ 0xe7f0010d,
+ 0x6f21f500,
+ 0x01acf001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98010c98,
+ 0x0f98020d,
+ 0x00e7f106,
+ 0x6f21f508,
+ 0x01acf001,
+ 0xf104a5f0,
+ 0xf03000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98020c,
+ 0x080f9803,
+ 0x0200e7f1,
+ 0x016f21f5,
+ 0x025e21f5,
+ 0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+ 0x21f50712,
+/* 0x068a: ctx_xfer_done */
+ 0x21f5027f,
+ 0x00f80591,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
index f870507be880..6316ebaf5d9a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
@@ -41,14 +41,14 @@ uint32_t nvf0_grgpc_data[] = {
};
uint32_t nvf0_grgpc_code[] = {
- 0x03180ef5,
+ 0x03a10ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -72,184 +72,214 @@ uint32_t nvf0_grgpc_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f03700,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f037,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f03700,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f037,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf0370007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x370007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f03700,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x370007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -263,198 +293,230 @@ uint32_t nvf0_grgpc_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0xe0f900f8,
- 0x9814e7f1,
- 0xf440e3f0,
- 0xe0b78d21,
- 0xf7f0041c,
- 0x8d21f401,
- 0x00f8e0fc,
-/* 0x0318: init */
- 0x04fe04bd,
- 0x0017f100,
- 0x0227f012,
- 0xf10012d0,
- 0xfe047017,
- 0x17f10010,
- 0x10d00400,
- 0x0427f0c0,
- 0xf40012d0,
- 0x17f11031,
- 0x14b60608,
- 0x0012cf06,
+ 0xf102ffb9,
+ 0xf09814e7,
+ 0x21f440e3,
+ 0x01f7f09d,
+ 0xf102ffb9,
+ 0xf09c1ce7,
+ 0x21f440e3,
+ 0xf8e0fc9d,
+/* 0x03a1: init */
+ 0xfe04bd00,
+ 0x27f00004,
+ 0x0007f102,
+ 0x0003f012,
+ 0xbd0002d0,
+ 0x1f17f104,
+ 0x0010fe05,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0000,
+ 0xf10427f0,
+ 0xf0040007,
+ 0x02d00003,
+ 0xf404bd00,
+ 0x27f11031,
+ 0x23f08200,
+ 0x0022cf01,
0xf00137f0,
0x32bb1f24,
0x0132b604,
0x80050280,
- 0x10b70603,
- 0x12cf0400,
- 0x04028000,
- 0x0c30e7f1,
- 0xbd50e3f0,
- 0xbd34bd24,
-/* 0x0371: init_unk_loop */
- 0x6821f444,
- 0xf400f6b0,
- 0xf7f00f0b,
- 0x04f2bb01,
- 0xb6054ffd,
-/* 0x0386: init_unk_next */
- 0x20b60130,
- 0x04e0b601,
- 0xf40226b0,
-/* 0x0392: init_unk_done */
- 0x0380e21b,
- 0x08048007,
- 0x010027f1,
- 0xcf0223f0,
- 0x34bd0022,
- 0x070047f1,
- 0x950644b6,
- 0x45d00825,
- 0x4045d000,
- 0x98000e98,
- 0x21f5010f,
- 0x2fbb0147,
- 0x003fbb00,
- 0x98010e98,
- 0x21f5020f,
- 0x0e980147,
- 0x00effd05,
- 0xbb002ebb,
- 0x0e98003e,
- 0x030f9802,
- 0x014721f5,
- 0xfd070e98,
+ 0x27f10603,
+ 0x23f08600,
+ 0x0022cf01,
+ 0xf1040280,
+ 0xf00c30e7,
+ 0x24bd50e3,
+ 0x44bd34bd,
+/* 0x0410: init_unk_loop */
+ 0xb06821f4,
+ 0x0bf400f6,
+ 0x01f7f00f,
+ 0xfd04f2bb,
+ 0x30b6054f,
+/* 0x0425: init_unk_next */
+ 0x0120b601,
+ 0xb004e0b6,
+ 0x1bf40226,
+/* 0x0431: init_unk_done */
+ 0x070380e2,
+ 0xf1080480,
+ 0xf0010027,
+ 0x22cf0223,
+ 0x9534bd00,
+ 0x07f10825,
+ 0x03f0c000,
+ 0x0005d001,
+ 0x07f104bd,
+ 0x03f0c100,
+ 0x0005d001,
+ 0x0e9804bd,
+ 0x010f9800,
+ 0x015021f5,
+ 0xbb002fbb,
+ 0x0e98003f,
+ 0x020f9801,
+ 0x015021f5,
+ 0xfd050e98,
0x2ebb00ef,
0x003ebb00,
- 0x130040b7,
- 0xd00235b6,
- 0x25b60043,
- 0x0635b608,
- 0xb60120b6,
- 0x24b60130,
- 0x0834b608,
- 0xf5022fb9,
- 0xbb027121,
- 0x07f1003f,
- 0x03f00100,
- 0x0003d002,
- 0x24bd04bd,
- 0xf11f29f0,
- 0xf0300007,
- 0x02d00203,
-/* 0x0433: main */
+ 0x98020e98,
+ 0x21f5030f,
+ 0x0e980150,
+ 0x00effd07,
+ 0xbb002ebb,
+ 0x35b6003e,
+ 0x0007f102,
+ 0x0103f0d3,
+ 0xbd0003d0,
+ 0x0825b604,
+ 0xb60635b6,
+ 0x30b60120,
+ 0x0824b601,
+ 0xb90834b6,
+ 0x21f5022f,
+ 0x3fbb02d3,
+ 0x0007f100,
+ 0x0203f001,
+ 0xbd0003d0,
+ 0xf024bd04,
+ 0x07f11f29,
+ 0x03f03000,
+ 0x0002d002,
+/* 0x04e2: main */
+ 0x31f404bd,
+ 0x0028f400,
+ 0xf424d7f0,
+ 0x01f43921,
+ 0x04e4b0f4,
+ 0xfe1e18f4,
+ 0x27f00181,
+ 0xfd20bd06,
+ 0xe4b60412,
+ 0x051efd01,
+ 0xf50018fe,
+ 0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+ 0xef94d30e,
+ 0x01f5f010,
+ 0x037e21f5,
+/* 0x051f: ih */
+ 0xf9c60ef4,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0x0200a7f1,
+ 0xcf00a3f0,
+ 0xabc400aa,
+ 0x2c0bf404,
+ 0xf124d7f0,
+ 0xf01a00e7,
+ 0xeecf00e3,
+ 0x00f7f100,
+ 0x00f3f019,
+ 0xf400ffcf,
+ 0xe7f00421,
+ 0x0007f101,
+ 0x0003f01d,
+ 0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+/* 0x0591: hub_barrier_done */
+ 0x01f7f001,
+ 0xbb040e98,
+ 0xffb904fe,
+ 0x18e7f102,
+ 0x40e3f094,
+ 0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+ 0x20f7f000,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+ 0xb608e7f0,
+ 0x1bf401e2,
+ 0x00f5f1fd,
+ 0x00f5f108,
+ 0x0007f102,
+ 0x0103f085,
+ 0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+ 0xf100f804,
+ 0xf0810007,
+ 0x0fd00203,
0xf404bd00,
- 0x28f40031,
- 0x24d7f000,
- 0xf43921f4,
- 0xe4b0f401,
- 0x1e18f404,
- 0xf00181fe,
- 0x20bd0627,
- 0xb60412fd,
- 0x1efd01e4,
- 0x0018fe05,
- 0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
- 0x94d30ef4,
- 0xf5f010ef,
- 0xfe21f501,
- 0xc60ef402,
-/* 0x0470: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x800acf04,
- 0xf404abc4,
- 0xb7f11d0b,
- 0xd7f01900,
- 0x40becf24,
- 0xf400bfcf,
- 0xb0b70421,
- 0xe7f00400,
- 0x00bed001,
-/* 0x04a8: ih_no_fifo */
- 0xfc400ad0,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x04c3: hub_barrier_done */
- 0xf001f800,
- 0x0e9801f7,
- 0x04febb04,
- 0x9418e7f1,
- 0xf440e3f0,
- 0x00f88d21,
-/* 0x04d8: ctx_redswitch */
- 0x0614e7f1,
- 0xf006e4b6,
- 0xefd020f7,
- 0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xf7f1fd1b,
- 0xefd00a20,
-/* 0x04f7: ctx_xfer */
- 0xf100f800,
- 0xb60a0417,
- 0x1fd00614,
- 0x0711f400,
- 0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x1521f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f002a5,
- 0x50b3f000,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x000c9800,
- 0xf0010d98,
- 0x21f500e7,
- 0xacf00166,
- 0x00b7f101,
- 0x50b3f040,
- 0xb6040c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0x98020d98,
- 0xe7f1060f,
- 0x21f50800,
- 0xacf00166,
- 0x04a5f001,
- 0x3000b7f1,
+ 0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+ 0x21f505a9,
+ 0x24bd026a,
+ 0x47fc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xb6012cf0,
+ 0x07f10320,
+ 0x03f04afc,
+ 0x0002d002,
+ 0xacf004bd,
+ 0x02a5f001,
+ 0x0000b7f1,
0x9850b3f0,
0xc4b6040c,
0x00bcbb0f,
- 0x98020c98,
- 0x0f98030d,
- 0x00e7f108,
- 0x6621f502,
- 0x1521f501,
- 0x0601f402,
-/* 0x05a3: ctx_xfer_post */
- 0xf11412f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00d,
- 0x021521f5,
-/* 0x05b4: ctx_xfer_done */
- 0x04c321f5,
- 0x000000f8,
+ 0x98000c98,
+ 0xe7f0010d,
+ 0x6f21f500,
+ 0x01acf001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6040c,
+ 0x00bcbb0f,
+ 0x98010c98,
+ 0x0f98020d,
+ 0x00e7f106,
+ 0x6f21f508,
+ 0x01acf001,
+ 0xf104a5f0,
+ 0xf03000b7,
+ 0x0c9850b3,
+ 0x0fc4b604,
+ 0x9800bcbb,
+ 0x0d98020c,
+ 0x080f9803,
+ 0x0200e7f1,
+ 0x016f21f5,
+ 0x025e21f5,
+ 0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+ 0x21f50712,
+/* 0x068a: ctx_xfer_done */
+ 0x21f5027f,
+ 0x00f80591,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
index b82d2ae89917..c8ddb8d71b91 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
@@ -68,60 +68,57 @@ error:
//
init:
clear b32 $r0
- mov $sp $r0
mov $xdbase $r0
+ // setup stack
+ nv_iord($r1, NV_PGRAPH_FECS_CAPS, 0)
+ extr $r1 $r1 9:17
+ shl b32 $r1 8
+ mov $sp $r1
+
// enable fifo access
- mov $r1 0x1200
- mov $r2 2
- iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+ mov $r2 NV_PGRAPH_FECS_ACCESS_FIFO
+ nv_iowr(NV_PGRAPH_FECS_ACCESS, 0, $r2)
// setup i0 handler, and route all interrupts to it
mov $r1 #ih
mov $iv0 $r1
- mov $r1 0x400
- iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
- // route HUB_CHANNEL_SWITCH to fuc interrupt 8
- mov $r3 0x404
- shl b32 $r3 6
- mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
- iowr I[$r3 + 0x000] $r2
+ clear b32 $r2
+ nv_iowr(NV_PGRAPH_FECS_INTR_ROUTE, 0, $r2)
+
+ // route HUB_CHSW_PULSE to fuc interrupt 8
+ mov $r2 0x2003 // { HUB_CHSW_PULSE, ZERO } -> intr 8
+ nv_iowr(NV_PGRAPH_FECS_IROUTE, 0, $r2)
// not sure what these are, route them because NVIDIA does, and
// the IRQ handler will signal the host if we ever get one.. we
// may find out if/why we need to handle these if so..
//
- mov $r2 0x2004
- iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
- mov $r2 0x200b
- iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
- mov $r2 0x200c
- iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
+ mov $r2 0x2004 // { 0x04, ZERO } -> intr 9
+ nv_iowr(NV_PGRAPH_FECS_IROUTE, 1, $r2)
+ mov $r2 0x200b // { HUB_FIRMWARE_MTHD, ZERO } -> intr 10
+ nv_iowr(NV_PGRAPH_FECS_IROUTE, 2, $r2)
+ mov $r2 0x200c // { 0x0c, ZERO } -> intr 15
+ nv_iowr(NV_PGRAPH_FECS_IROUTE, 7, $r2)
// enable all INTR_UP interrupts
- mov $r2 0xc24
- shl b32 $r2 6
- not b32 $r3 $r0
- iowr I[$r2] $r3
+ sub b32 $r3 $r0 1
+ nv_iowr(NV_PGRAPH_FECS_INTR_UP_EN, 0, $r3)
- // enable fifo, ctxsw, 9, 10, 15 interrupts
- mov $r2 -0x78fc // 0x8704
- sethi $r2 0
- iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+ // enable fifo, ctxsw, 9, fwmthd, 15 interrupts
+ imm32($r2, 0x8704)
+ nv_iowr(NV_PGRAPH_FECS_INTR_EN_SET, 0, $r2)
// fifo level triggered, rest edge
- sub b32 $r1 0x100
- mov $r2 4
- iowr I[$r1] $r2
+ mov $r2 NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL
+ nv_iowr(NV_PGRAPH_FECS_INTR_MODE, 0, $r2)
// enable interrupts
bset $flags ie0
// fetch enabled GPC/ROP counts
- mov $r14 -0x69fc // 0x409604
- sethi $r14 0x400000
- call #nv_rd32
+ nv_rd32($r14, 0x409604)
extr $r1 $r15 16:20
st b32 D[$r0 + #rop_count] $r1
and $r15 0x1f
@@ -131,37 +128,40 @@ init:
mov $r1 1
shl b32 $r1 $r15
sub b32 $r1 1
- mov $r2 0x40c
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r1
- iowr I[$r2 + 0x100] $r1
+ nv_iowr(NV_PGRAPH_FECS_BAR_MASK0, 0, $r1)
+ nv_iowr(NV_PGRAPH_FECS_BAR_MASK1, 0, $r1)
// context size calculation, reserve first 256 bytes for use by fuc
mov $r1 256
+ //
+ mov $r15 2
+ call(ctx_4170s)
+ call(ctx_4170w)
+ mov $r15 0x10
+ call(ctx_86c)
+
// calculate size of mmio context data
ld b32 $r14 D[$r0 + #hub_mmio_list_head]
ld b32 $r15 D[$r0 + #hub_mmio_list_tail]
- call #mmctx_size
+ call(mmctx_size)
// set mmctx base addresses now so we don't have to do it later,
// they don't (currently) ever change
- mov $r3 0x700
- shl b32 $r3 6
shr b32 $r4 $r1 8
- iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
- iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE, 0, $r4)
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE, 0, $r4)
add b32 $r3 0x1300
add b32 $r1 $r15
shr b32 $r15 2
- iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
+ nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_COUNT, 0, $r15) // wtf??
// strands, base offset needs to be aligned to 256 bytes
shr b32 $r1 8
add b32 $r1 1
shl b32 $r1 8
mov b32 $r15 $r1
- call #strand_ctx_init
+ call(strand_ctx_init)
add b32 $r1 $r15
// initialise each GPC in sequence by passing in the offset of its
@@ -173,30 +173,29 @@ init:
// in GPCn_CC_SCRATCH[1]
//
ld b32 $r3 D[$r0 + #gpc_count]
- mov $r4 0x2000
- sethi $r4 0x500000
+ imm32($r4, 0x502000)
init_gpc:
// setup, and start GPC ucode running
add b32 $r14 $r4 0x804
mov b32 $r15 $r1
- call #nv_wr32 // CC_SCRATCH[1] = ctx offset
+ call(nv_wr32) // CC_SCRATCH[1] = ctx offset
add b32 $r14 $r4 0x10c
clear b32 $r15
- call #nv_wr32
+ call(nv_wr32)
add b32 $r14 $r4 0x104
- call #nv_wr32 // ENTRY
+ call(nv_wr32) // ENTRY
add b32 $r14 $r4 0x100
mov $r15 2 // CTRL_START_TRIGGER
- call #nv_wr32 // CTRL
+ call(nv_wr32) // CTRL
// wait for it to complete, and adjust context size
add b32 $r14 $r4 0x800
init_gpc_wait:
- call #nv_rd32
+ call(nv_rd32)
xbit $r15 $r15 31
bra e #init_gpc_wait
add b32 $r14 $r4 0x804
- call #nv_rd32
+ call(nv_rd32)
add b32 $r1 $r15
// next!
@@ -204,6 +203,12 @@ init:
sub b32 $r3 1
bra ne #init_gpc
+ //
+ mov $r15 0
+ call(ctx_86c)
+ mov $r15 0
+ call(ctx_4170s)
+
// save context size, and tell host we're ready
nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(1), 0, $r1)
clear b32 $r1
@@ -218,17 +223,15 @@ main:
bset $flags $p0
sleep $p0
mov $r13 #cmd_queue
- call #queue_get
+ call(queue_get)
bra $p1 #main
// context switch, requested by GPU?
cmpu b32 $r14 0x4001
bra ne #main_not_ctx_switch
trace_set(T_AUTO)
- mov $r1 0xb00
- shl b32 $r1 6
- iord $r2 I[$r1 + 0x100] // CHAN_NEXT
- iord $r1 I[$r1 + 0x000] // CHAN_CUR
+ nv_iord($r1, NV_PGRAPH_FECS_CHAN_ADDR, 0)
+ nv_iord($r2, NV_PGRAPH_FECS_CHAN_NEXT, 0)
xbit $r3 $r1 31
bra e #chsw_no_prev
@@ -239,12 +242,12 @@ main:
trace_set(T_SAVE)
bclr $flags $p1
bset $flags $p2
- call #ctx_xfer
+ call(ctx_xfer)
trace_clr(T_SAVE);
pop $r2
trace_set(T_LOAD);
bset $flags $p1
- call #ctx_xfer
+ call(ctx_xfer)
trace_clr(T_LOAD);
bra #chsw_done
chsw_prev_no_next:
@@ -252,25 +255,21 @@ main:
mov b32 $r2 $r1
bclr $flags $p1
bclr $flags $p2
- call #ctx_xfer
+ call(ctx_xfer)
pop $r2
- mov $r1 0xb00
- shl b32 $r1 6
- iowr I[$r1] $r2
+ nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
bra #chsw_done
chsw_no_prev:
xbit $r3 $r2 31
bra e #chsw_done
bset $flags $p1
bclr $flags $p2
- call #ctx_xfer
+ call(ctx_xfer)
// ack the context switch request
chsw_done:
- mov $r1 0xb0c
- shl b32 $r1 6
- mov $r2 1
- iowr I[$r1 + 0x000] $r2 // 0x409b0c
+ mov $r2 NV_PGRAPH_FECS_CHSW_ACK
+ nv_iowr(NV_PGRAPH_FECS_CHSW, 0, $r2)
trace_clr(T_AUTO)
bra #main
@@ -279,7 +278,7 @@ main:
cmpu b32 $r14 0x0001
bra ne #main_not_ctx_chan
mov b32 $r2 $r15
- call #ctx_chan
+ call(ctx_chan)
bra #main_done
// request to store current channel context?
@@ -289,14 +288,14 @@ main:
trace_set(T_SAVE)
bclr $flags $p1
bclr $flags $p2
- call #ctx_xfer
+ call(ctx_xfer)
trace_clr(T_SAVE)
bra #main_done
main_not_ctx_save:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
- call #error
+ call(error)
bra #main
main_done:
@@ -319,41 +318,46 @@ ih:
clear b32 $r0
// incoming fifo command?
- iord $r10 I[$r0 + 0x200] // INTR
- and $r11 $r10 0x00000004
+ nv_iord($r10, NV_PGRAPH_FECS_INTR, 0)
+ and $r11 $r10 NV_PGRAPH_FECS_INTR_FIFO
bra e #ih_no_fifo
// queue incoming fifo command for later processing
- mov $r11 0x1900
mov $r13 #cmd_queue
- iord $r14 I[$r11 + 0x100] // FIFO_CMD
- iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call #queue_put
+ nv_iord($r14, NV_PGRAPH_FECS_FIFO_CMD, 0)
+ nv_iord($r15, NV_PGRAPH_FECS_FIFO_DATA, 0)
+ call(queue_put)
add b32 $r11 0x400
mov $r14 1
- iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+ nv_iowr(NV_PGRAPH_FECS_FIFO_ACK, 0, $r14)
// context switch request?
ih_no_fifo:
- and $r11 $r10 0x00000100
+ and $r11 $r10 NV_PGRAPH_FECS_INTR_CHSW
bra e #ih_no_ctxsw
// enqueue a context switch for later processing
mov $r13 #cmd_queue
mov $r14 0x4001
- call #queue_put
+ call(queue_put)
- // anything we didn't handle, bring it to the host's attention
+ // firmware method?
ih_no_ctxsw:
- mov $r11 0x104
+ and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD
+ bra e #ih_no_fwmthd
+ // none we handle, ack, and fall-through to unhandled
+ mov $r11 0x100
+ nv_wr32(0x400144, $r11)
+
+ // anything we didn't handle, bring it to the host's attention
+ ih_no_fwmthd:
+ mov $r11 0x104 // FIFO | CHSW
not b32 $r11
and $r11 $r10 $r11
bra e #ih_no_other
- mov $r10 0xc1c
- shl b32 $r10 6
- iowr I[$r10] $r11 // INTR_UP_SET
+ nv_iowr(NV_PGRAPH_FECS_INTR_UP_SET, 0, $r11)
// ack, and wake up main()
ih_no_other:
- iowr I[$r0 + 0x100] $r10 // INTR_ACK
+ nv_iowr(NV_PGRAPH_FECS_INTR_ACK, 0, $r10)
pop $r15
pop $r14
@@ -370,12 +374,10 @@ ih:
#if CHIPSET < GK100
// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
ctx_4160s:
- mov $r14 0x4160
- sethi $r14 0x400000
mov $r15 1
- call #nv_wr32
+ nv_wr32(0x404160, $r15)
ctx_4160s_wait:
- call #nv_rd32
+ nv_rd32($r15, 0x404160)
xbit $r15 $r15 4
bra e #ctx_4160s_wait
ret
@@ -384,10 +386,8 @@ ctx_4160s:
// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
// still function with it set however...
ctx_4160c:
- mov $r14 0x4160
- sethi $r14 0x400000
clear b32 $r15
- call #nv_wr32
+ nv_wr32(0x404160, $r15)
ret
#endif
@@ -396,18 +396,14 @@ ctx_4160c:
// In: $r15 value to set 0x404170 to
//
ctx_4170s:
- mov $r14 0x4170
- sethi $r14 0x400000
or $r15 0x10
- call #nv_wr32
+ nv_wr32(0x404170, $r15)
ret
// Waits for a ctx_4170s() call to complete
//
ctx_4170w:
- mov $r14 0x4170
- sethi $r14 0x400000
- call #nv_rd32
+ nv_rd32($r15, 0x404170)
and $r15 0x10
bra ne #ctx_4170w
ret
@@ -419,16 +415,18 @@ ctx_4170w:
// funny things happen.
//
ctx_redswitch:
- mov $r14 0x614
- shl b32 $r14 6
- mov $r15 0x270
- iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
+ mov $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC
+ or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP
+ or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC
+ or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN
+ nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
mov $r15 8
ctx_redswitch_delay:
sub b32 $r15 1
bra ne #ctx_redswitch_delay
- mov $r15 0x770
- iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+ or $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP
+ or $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN
+ nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
ret
// Not a clue what this is for, except that unless the value is 0x10, the
@@ -437,15 +435,18 @@ ctx_redswitch:
// In: $r15 value to set to (0x00/0x10 are used)
//
ctx_86c:
- mov $r14 0x86c
- shl b32 $r14 6
- iowr I[$r14] $r15 // HUB(0x86c) = val
- mov $r14 -0x75ec
- sethi $r14 0x400000
- call #nv_wr32 // ROP(0xa14) = val
- mov $r14 -0x5794
- sethi $r14 0x410000
- call #nv_wr32 // GPC(0x86c) = val
+ nv_iowr(NV_PGRAPH_FECS_UNK86C, 0, $r15)
+ nv_wr32(0x408a14, $r15)
+ nv_wr32(NV_PGRAPH_GPCX_GPCCS_UNK86C, $r15)
+ ret
+
+// In: $r15 NV_PGRAPH_FECS_MEM_CMD_*
+ctx_mem:
+ nv_iowr(NV_PGRAPH_FECS_MEM_CMD, 0, $r15)
+ ctx_mem_wait:
+ nv_iord($r15, NV_PGRAPH_FECS_MEM_CMD, 0)
+ or $r15 $r15
+ bra ne #ctx_mem_wait
ret
// ctx_load - load's a channel's ctxctl data, and selects its vm
@@ -457,23 +458,14 @@ ctx_load:
// switch to channel, somewhat magic in parts..
mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa24
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r0 // 0x409a24
- mov $r3 0xb00
- shl b32 $r3 6
- iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
- mov $r1 0xa0c
- shl b32 $r1 6
- mov $r4 7
- iowr I[$r1 + 0x000] $r2 // MEM_CHAN
- iowr I[$r1 + 0x100] $r4 // MEM_CMD
- ctx_chan_wait_0:
- iord $r4 I[$r1 + 0x100]
- and $r4 0x1f
- bra ne #ctx_chan_wait_0
- iowr I[$r3 + 0x000] $r2 // CHAN_CUR
+ call(wait_donez)
+ clear b32 $r15
+ nv_iowr(0x409a24, 0, $r15)
+ nv_iowr(NV_PGRAPH_FECS_CHAN_NEXT, 0, $r2)
+ nv_iowr(NV_PGRAPH_FECS_MEM_CHAN, 0, $r2)
+ mov $r15 NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN
+ call(ctx_mem)
+ nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
// load channel header, fetch PGRAPH context pointer
mov $xtargets $r0
@@ -482,14 +474,10 @@ ctx_load:
add b32 $r2 2
trace_set(T_LCHAN)
- mov $r1 0xa04
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r2 // MEM_BASE
- mov $r1 0xa20
- shl b32 $r1 6
- mov $r2 0x0002
- sethi $r2 0x80000000
- iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
+ nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r2)
+ imm32($r2, NV_PGRAPH_FECS_MEM_TARGET_UNK31)
+ or $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM
+ nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
mov $r1 0x10 // chan + 0x0210
mov $r2 #xfer_data
sethi $r2 0x00020000 // 16 bytes
@@ -507,13 +495,9 @@ ctx_load:
// set transfer base to start of context, and fetch context header
trace_set(T_LCTXH)
- mov $r2 0xa04
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r1 // MEM_BASE
- mov $r2 1
- mov $r1 0xa20
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
+ nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r1)
+ mov $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VM
+ nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
mov $r1 #chan_data
sethi $r1 0x00060000 // 256 bytes
xdld $r0 $r1
@@ -532,21 +516,15 @@ ctx_load:
//
ctx_chan:
#if CHIPSET < GK100
- call #ctx_4160s
+ call(ctx_4160s)
#endif
- call #ctx_load
+ call(ctx_load)
mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa10
- shl b32 $r1 6
- mov $r2 5
- iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
- ctx_chan_wait:
- iord $r2 I[$r1 + 0x000]
- or $r2 $r2
- bra ne #ctx_chan_wait
+ call(wait_donez)
+ mov $r15 5 // MEM_CMD 5 ???
+ call(ctx_mem)
#if CHIPSET < GK100
- call #ctx_4160c
+ call(ctx_4160c)
#endif
ret
@@ -562,9 +540,7 @@ ctx_chan:
ctx_mmio_exec:
// set transfer base to be the mmio list
ld b32 $r3 D[$r0 + #chan_mmio_address]
- mov $r2 0xa04
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r3 // MEM_BASE
+ nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
clear b32 $r3
ctx_mmio_loop:
@@ -580,7 +556,7 @@ ctx_mmio_exec:
ctx_mmio_pull:
ld b32 $r14 D[$r4 + #xfer_data + 0x00]
ld b32 $r15 D[$r4 + #xfer_data + 0x04]
- call #nv_wr32
+ call(nv_wr32)
// next!
add b32 $r3 8
@@ -590,7 +566,7 @@ ctx_mmio_exec:
// set transfer base back to the current context
ctx_mmio_done:
ld b32 $r3 D[$r0 + #ctx_current]
- iowr I[$r2 + 0x000] $r3 // MEM_BASE
+ nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
// disable the mmio list now, we don't need/want to execute it again
st b32 D[$r0 + #chan_mmio_count] $r0
@@ -610,12 +586,10 @@ ctx_mmio_exec:
//
ctx_xfer:
// according to mwk, some kind of wait for idle
- mov $r15 0xc00
- shl b32 $r15 6
mov $r14 4
- iowr I[$r15 + 0x200] $r14
+ nv_iowr(0x409c08, 0, $r14)
ctx_xfer_idle:
- iord $r14 I[$r15 + 0x000]
+ nv_iord($r14, 0x409c00, 0)
and $r14 0x2000
bra ne #ctx_xfer_idle
@@ -623,50 +597,42 @@ ctx_xfer:
bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
mov $r15 0x10
- call #ctx_86c
+ call(ctx_86c)
#if CHIPSET < GK100
- call #ctx_4160s
+ call(ctx_4160s)
#endif
bra not $p1 #ctx_xfer_exec
ctx_xfer_pre_load:
mov $r15 2
- call #ctx_4170s
- call #ctx_4170w
- call #ctx_redswitch
+ call(ctx_4170s)
+ call(ctx_4170w)
+ call(ctx_redswitch)
clear b32 $r15
- call #ctx_4170s
- call #ctx_load
+ call(ctx_4170s)
+ call(ctx_load)
// fetch context pointer, and initiate xfer on all GPCs
ctx_xfer_exec:
ld b32 $r1 D[$r0 + #ctx_current]
- mov $r2 0x414
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
- mov $r14 -0x5b00
- sethi $r14 0x410000
- mov b32 $r15 $r1
- call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
- add b32 $r14 4
+
+ clear b32 $r2
+ nv_iowr(NV_PGRAPH_FECS_BAR, 0, $r2)
+
+ nv_wr32(0x41a500, $r1) // GPC_BCAST_WRCMD_DATA = ctx pointer
xbit $r15 $flags $p1
xbit $r2 $flags $p2
shl b32 $r2 1
or $r15 $r2
- call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+ nv_wr32(0x41a504, $r15) // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
// strands
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xc
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call #strand_wait
- mov $r2 0x47fc
- sethi $r2 0x20000
- iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
- xbit $r2 $flags $p1
- add b32 $r2 3
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+ call(strand_pre)
+ clear b32 $r2
+ nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r2)
+ xbit $r2 $flags $p1 // SAVE/LOAD
+ add b32 $r2 NV_PGRAPH_FECS_STRAND_CMD_SAVE
+ nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r2)
// mmio context
xbit $r10 $flags $p1 // direction
@@ -675,48 +641,42 @@ ctx_xfer:
ld b32 $r12 D[$r0 + #hub_mmio_list_head]
ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
mov $r14 0 // not multi
- call #mmctx_xfer
+ call(mmctx_xfer)
// wait for GPCs to all complete
mov $r10 8 // DONE_BAR
- call #wait_doneo
+ call(wait_doneo)
// wait for strand xfer to complete
- call #strand_wait
+ call(strand_wait)
// post-op
bra $p1 #ctx_xfer_post
mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa10
- shl b32 $r1 6
- mov $r2 5
- iowr I[$r1] $r2 // MEM_CMD
- ctx_xfer_post_save_wait:
- iord $r2 I[$r1]
- or $r2 $r2
- bra ne #ctx_xfer_post_save_wait
+ call(wait_donez)
+ mov $r15 5 // MEM_CMD 5 ???
+ call(ctx_mem)
bra $p2 #ctx_xfer_done
ctx_xfer_post:
mov $r15 2
- call #ctx_4170s
+ call(ctx_4170s)
clear b32 $r15
- call #ctx_86c
- call #strand_post
- call #ctx_4170w
+ call(ctx_86c)
+ call(strand_post)
+ call(ctx_4170w)
clear b32 $r15
- call #ctx_4170s
+ call(ctx_4170s)
bra not $p1 #ctx_xfer_no_post_mmio
ld b32 $r1 D[$r0 + #chan_mmio_count]
or $r1 $r1
bra e #ctx_xfer_no_post_mmio
- call #ctx_mmio_exec
+ call(ctx_mmio_exec)
ctx_xfer_no_post_mmio:
#if CHIPSET < GK100
- call #ctx_4160c
+ call(ctx_4160c)
#endif
ctx_xfer_done:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5
new file mode 100644
index 000000000000..7c5d25630fa8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#define CHIPSET GK208
+#include "macros.fuc"
+
+.section #nv108_grhub_data
+#define INCLUDE_DATA
+#include "com.fuc"
+#include "hub.fuc"
+#undef INCLUDE_DATA
+
+.section #nv108_grhub_code
+#define INCLUDE_CODE
+bra #init
+#include "com.fuc"
+#include "hub.fuc"
+.align 256
+#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
new file mode 100644
index 000000000000..4750984bf380
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
@@ -0,0 +1,916 @@
+uint32_t nv108_grhub_data[] = {
+/* 0x0000: hub_mmio_list_head */
+ 0x00000300,
+/* 0x0004: hub_mmio_list_tail */
+ 0x00000304,
+/* 0x0008: gpc_count */
+ 0x00000000,
+/* 0x000c: rop_count */
+ 0x00000000,
+/* 0x0010: cmd_queue */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0058: ctx_current */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0100: chan_data */
+/* 0x0100: chan_mmio_count */
+ 0x00000000,
+/* 0x0104: chan_mmio_address */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0200: xfer_data */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0300: hub_mmio_list_base */
+ 0x0417e91c,
+};
+
+uint32_t nv108_grhub_code[] = {
+ 0x030e0ef5,
+/* 0x0004: queue_put */
+ 0x9800d898,
+ 0x86f001d9,
+ 0xf489a408,
+ 0x020f0b1b,
+ 0x0002f87e,
+/* 0x001a: queue_put_next */
+ 0x98c400f8,
+ 0x0384b607,
+ 0xb6008dbb,
+ 0x8eb50880,
+ 0x018fb500,
+ 0xf00190b6,
+ 0xd9b50f94,
+/* 0x0037: queue_get */
+ 0xf400f801,
+ 0xd8980131,
+ 0x01d99800,
+ 0x0bf489a4,
+ 0x0789c421,
+ 0xbb0394b6,
+ 0x90b6009d,
+ 0x009e9808,
+ 0xb6019f98,
+ 0x84f00180,
+ 0x00d8b50f,
+/* 0x0063: queue_get_done */
+ 0xf80132f4,
+/* 0x0065: nv_rd32 */
+ 0xf0ecb200,
+ 0x00801fc9,
+ 0x0cf601ca,
+/* 0x0073: nv_rd32_wait */
+ 0x8c04bd00,
+ 0xcf01ca00,
+ 0xccc800cc,
+ 0xf61bf41f,
+ 0xec7e060a,
+ 0x008f0000,
+ 0xffcf01cb,
+/* 0x008f: nv_wr32 */
+ 0x8000f800,
+ 0xf601cc00,
+ 0x04bd000f,
+ 0xc9f0ecb2,
+ 0x1ec9f01f,
+ 0x01ca0080,
+ 0xbd000cf6,
+/* 0x00a9: nv_wr32_wait */
+ 0xca008c04,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f61b,
+/* 0x00b8: wait_donez */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x00cf: wait_donez_ne */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf61bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x00ec: wait_doneo */
+ 0x99f094bd,
+ 0x37008000,
+ 0x0009f602,
+ 0x008004bd,
+ 0x0af60206,
+/* 0x0103: wait_doneo_e */
+ 0x8804bd00,
+ 0xcf010000,
+ 0x8aff0088,
+ 0xf60bf488,
+ 0x99f094bd,
+ 0x17008000,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x0120: mmctx_size */
+/* 0x0122: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0x1bf4efa4,
+ 0xf89fb2ec,
+/* 0x013d: mmctx_xfer */
+ 0xf094bd00,
+ 0x00800199,
+ 0x09f60237,
+ 0xbd04bd00,
+ 0x05bbfd94,
+ 0x800f0bf4,
+ 0xf601c400,
+ 0x04bd000b,
+/* 0x015f: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0xc6008018,
+ 0x000ef601,
+ 0x008004bd,
+ 0x0ff601c7,
+ 0xf004bd00,
+/* 0x017a: mmctx_multi_disabled */
+ 0xabc80199,
+ 0x10b4b600,
+ 0xc80cb9f0,
+ 0xe4b601ae,
+ 0x05befd11,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x0195: mmctx_exec_loop */
+/* 0x0195: mmctx_wait_free */
+ 0xc5008e04,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f60b,
+ 0x05e9fd00,
+ 0x01c80080,
+ 0xbd000ef6,
+ 0x04c0b604,
+ 0x1bf4cda4,
+ 0x02abc8df,
+/* 0x01bf: mmctx_fini_wait */
+ 0x8b1c1bf4,
+ 0xcf01c500,
+ 0xb4f000bb,
+ 0x10b4b01f,
+ 0x0af31bf4,
+ 0x00b87e02,
+ 0x250ef400,
+/* 0x01d8: mmctx_stop */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x12b9f00c,
+ 0x01c50080,
+ 0xbd000bf6,
+/* 0x01ed: mmctx_stop_wait */
+ 0xc5008b04,
+ 0x00bbcf01,
+ 0xf412bbc8,
+/* 0x01fa: mmctx_done */
+ 0x94bdf61b,
+ 0x800199f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x020a: strand_wait */
+ 0xa0f900f8,
+ 0xb87e020a,
+ 0xa0fc0000,
+/* 0x0216: strand_pre */
+ 0x0c0900f8,
+ 0x024afc80,
+ 0xbd0009f6,
+ 0x020a7e04,
+/* 0x0227: strand_post */
+ 0x0900f800,
+ 0x4afc800d,
+ 0x0009f602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0238: strand_set */
+ 0xfc800f0c,
+ 0x0cf6024f,
+ 0x0c04bd00,
+ 0x4afc800b,
+ 0x000cf602,
+ 0xfc8004bd,
+ 0x0ef6024f,
+ 0x0c04bd00,
+ 0x4afc800a,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x00f80002,
+/* 0x0268: strand_ctx_init */
+ 0x99f094bd,
+ 0x37008003,
+ 0x0009f602,
+ 0x167e04bd,
+ 0x030e0002,
+ 0x0002387e,
+ 0xfc80c4bd,
+ 0x0cf60247,
+ 0x0c04bd00,
+ 0x4afc8001,
+ 0x000cf602,
+ 0x0a7e04bd,
+ 0x0c920002,
+ 0x46fc8001,
+ 0x000cf602,
+ 0x020c04bd,
+ 0x024afc80,
+ 0xbd000cf6,
+ 0x020a7e04,
+ 0x02277e00,
+ 0x42008800,
+ 0x20008902,
+ 0x0099cf02,
+/* 0x02c7: ctx_init_strand_loop */
+ 0xf608fe95,
+ 0x8ef6008e,
+ 0x808acf40,
+ 0xb606a5b6,
+ 0xeabb01a0,
+ 0x0480b600,
+ 0xf40192b6,
+ 0xe4b6e81b,
+ 0xf2efbc08,
+ 0x99f094bd,
+ 0x17008003,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x02f8: error */
+ 0x02050080,
+ 0xbd000ff6,
+ 0x80010f04,
+ 0xf6030700,
+ 0x04bd000f,
+/* 0x030e: init */
+ 0x04bd00f8,
+ 0x410007fe,
+ 0x11cf4200,
+ 0x0911e700,
+ 0x0814b601,
+ 0x020014fe,
+ 0x12004002,
+ 0xbd0002f6,
+ 0x05c94104,
+ 0xbd0010fe,
+ 0x07004024,
+ 0xbd0002f6,
+ 0x20034204,
+ 0x01010080,
+ 0xbd0002f6,
+ 0x20044204,
+ 0x01010480,
+ 0xbd0002f6,
+ 0x200b4204,
+ 0x01010880,
+ 0xbd0002f6,
+ 0x200c4204,
+ 0x01011c80,
+ 0xbd0002f6,
+ 0x01039204,
+ 0x03090080,
+ 0xbd0003f6,
+ 0x87044204,
+ 0xf6040040,
+ 0x04bd0002,
+ 0x00400402,
+ 0x0002f603,
+ 0x31f404bd,
+ 0x96048e10,
+ 0x00657e40,
+ 0xc7feb200,
+ 0x01b590f1,
+ 0x1ff4f003,
+ 0x01020fb5,
+ 0x041fbb01,
+ 0x800112b6,
+ 0xf6010300,
+ 0x04bd0001,
+ 0x01040080,
+ 0xbd0001f6,
+ 0x01004104,
+ 0x627e020f,
+ 0x717e0006,
+ 0x100f0006,
+ 0x0006b37e,
+ 0x98000e98,
+ 0x207e010f,
+ 0x14950001,
+ 0xc0008008,
+ 0x0004f601,
+ 0x008004bd,
+ 0x04f601c1,
+ 0xb704bd00,
+ 0xbb130030,
+ 0xf5b6001f,
+ 0xd3008002,
+ 0x000ff601,
+ 0x15b604bd,
+ 0x0110b608,
+ 0xb20814b6,
+ 0x02687e1f,
+ 0x001fbb00,
+ 0x84020398,
+/* 0x041f: init_gpc */
+ 0xb8502000,
+ 0x0008044e,
+ 0x8f7e1fb2,
+ 0x4eb80000,
+ 0xbd00010c,
+ 0x008f7ef4,
+ 0x044eb800,
+ 0x8f7e0001,
+ 0x4eb80000,
+ 0x0f000100,
+ 0x008f7e02,
+ 0x004eb800,
+/* 0x044e: init_gpc_wait */
+ 0x657e0008,
+ 0xffc80000,
+ 0xf90bf41f,
+ 0x08044eb8,
+ 0x00657e00,
+ 0x001fbb00,
+ 0x800040b7,
+ 0xf40132b6,
+ 0x000fb41b,
+ 0x0006b37e,
+ 0x627e000f,
+ 0x00800006,
+ 0x01f60201,
+ 0xbd04bd00,
+ 0x1f19f014,
+ 0x02300080,
+ 0xbd0001f6,
+/* 0x0491: main */
+ 0x0031f404,
+ 0x0d0028f4,
+ 0x00377e10,
+ 0xf401f400,
+ 0x4001e4b1,
+ 0x00c71bf5,
+ 0x99f094bd,
+ 0x37008004,
+ 0x0009f602,
+ 0x008104bd,
+ 0x11cf02c0,
+ 0xc1008200,
+ 0x0022cf02,
+ 0xf41f13c8,
+ 0x23c8770b,
+ 0x550bf41f,
+ 0x12b220f9,
+ 0x99f094bd,
+ 0x37008007,
+ 0x0009f602,
+ 0x32f404bd,
+ 0x0231f401,
+ 0x0008367e,
+ 0x99f094bd,
+ 0x17008007,
+ 0x0009f602,
+ 0x20fc04bd,
+ 0x99f094bd,
+ 0x37008006,
+ 0x0009f602,
+ 0x31f404bd,
+ 0x08367e01,
+ 0xf094bd00,
+ 0x00800699,
+ 0x09f60217,
+ 0xf404bd00,
+/* 0x0522: chsw_prev_no_next */
+ 0x20f92f0e,
+ 0x32f412b2,
+ 0x0232f401,
+ 0x0008367e,
+ 0x008020fc,
+ 0x02f602c0,
+ 0xf404bd00,
+/* 0x053e: chsw_no_prev */
+ 0x23c8130e,
+ 0x0d0bf41f,
+ 0xf40131f4,
+ 0x367e0232,
+/* 0x054e: chsw_done */
+ 0x01020008,
+ 0x02c30080,
+ 0xbd0002f6,
+ 0xf094bd04,
+ 0x00800499,
+ 0x09f60217,
+ 0xf504bd00,
+/* 0x056b: main_not_ctx_switch */
+ 0xb0ff2a0e,
+ 0x1bf401e4,
+ 0x7ef2b20c,
+ 0xf40007d6,
+/* 0x057a: main_not_ctx_chan */
+ 0xe4b0400e,
+ 0x2c1bf402,
+ 0x99f094bd,
+ 0x37008007,
+ 0x0009f602,
+ 0x32f404bd,
+ 0x0232f401,
+ 0x0008367e,
+ 0x99f094bd,
+ 0x17008007,
+ 0x0009f602,
+ 0x0ef404bd,
+/* 0x05a9: main_not_ctx_save */
+ 0x10ef9411,
+ 0x7e01f5f0,
+ 0xf50002f8,
+/* 0x05b7: main_done */
+ 0xbdfede0e,
+ 0x1f29f024,
+ 0x02300080,
+ 0xbd0002f6,
+ 0xcc0ef504,
+/* 0x05c9: ih */
+ 0xfe80f9fe,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0x004a04bd,
+ 0x00aacf02,
+ 0xf404abc4,
+ 0x100d230b,
+ 0xcf1a004e,
+ 0x004f00ee,
+ 0x00ffcf19,
+ 0x0000047e,
+ 0x0400b0b7,
+ 0x0040010e,
+ 0x000ef61d,
+/* 0x060a: ih_no_fifo */
+ 0xabe404bd,
+ 0x0bf40100,
+ 0x4e100d0c,
+ 0x047e4001,
+/* 0x061a: ih_no_ctxsw */
+ 0xabe40000,
+ 0x0bf40400,
+ 0x01004b10,
+ 0x448ebfb2,
+ 0x8f7e4001,
+/* 0x062e: ih_no_fwmthd */
+ 0x044b0000,
+ 0xffb0bd01,
+ 0x0bf4b4ab,
+ 0x0700800c,
+ 0x000bf603,
+/* 0x0642: ih_no_other */
+ 0x004004bd,
+ 0x000af601,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0x0032f480,
+/* 0x0662: ctx_4170s */
+ 0xf5f001f8,
+ 0x8effb210,
+ 0x7e404170,
+ 0xf800008f,
+/* 0x0671: ctx_4170w */
+ 0x41708e00,
+ 0x00657e40,
+ 0xf0ffb200,
+ 0x1bf410f4,
+/* 0x0683: ctx_redswitch */
+ 0x4e00f8f3,
+ 0xe5f00200,
+ 0x20e5f040,
+ 0x8010e5f0,
+ 0xf6018500,
+ 0x04bd000e,
+/* 0x069a: ctx_redswitch_delay */
+ 0xf2b6080f,
+ 0xfd1bf401,
+ 0x0400e5f1,
+ 0x0100e5f1,
+ 0x01850080,
+ 0xbd000ef6,
+/* 0x06b3: ctx_86c */
+ 0x8000f804,
+ 0xf6022300,
+ 0x04bd000f,
+ 0x148effb2,
+ 0x8f7e408a,
+ 0xffb20000,
+ 0x41a88c8e,
+ 0x00008f7e,
+/* 0x06d2: ctx_mem */
+ 0x008000f8,
+ 0x0ff60284,
+/* 0x06db: ctx_mem_wait */
+ 0x8f04bd00,
+ 0xcf028400,
+ 0xfffd00ff,
+ 0xf61bf405,
+/* 0x06ea: ctx_load */
+ 0x94bd00f8,
+ 0x800599f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0xb87e0c0a,
+ 0xf4bd0000,
+ 0x02890080,
+ 0xbd000ff6,
+ 0xc1008004,
+ 0x0002f602,
+ 0x008004bd,
+ 0x02f60283,
+ 0x0f04bd00,
+ 0x06d27e07,
+ 0xc0008000,
+ 0x0002f602,
+ 0x0bfe04bd,
+ 0x1f2af000,
+ 0xb60424b6,
+ 0x94bd0220,
+ 0x800899f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0x02810080,
+ 0xbd0002f6,
+ 0x0000d204,
+ 0x25f08000,
+ 0x88008002,
+ 0x0002f602,
+ 0x100104bd,
+ 0xf0020042,
+ 0x12fa0223,
+ 0xbd03f805,
+ 0x0899f094,
+ 0x02170080,
+ 0xbd0009f6,
+ 0x81019804,
+ 0x981814b6,
+ 0x25b68002,
+ 0x0512fd08,
+ 0xbd1601b5,
+ 0x0999f094,
+ 0x02370080,
+ 0xbd0009f6,
+ 0x81008004,
+ 0x0001f602,
+ 0x010204bd,
+ 0x02880080,
+ 0xbd0002f6,
+ 0x01004104,
+ 0xfa0613f0,
+ 0x03f80501,
+ 0x99f094bd,
+ 0x17008009,
+ 0x0009f602,
+ 0x94bd04bd,
+ 0x800599f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x07d6: ctx_chan */
+ 0xea7e00f8,
+ 0x0c0a0006,
+ 0x0000b87e,
+ 0xd27e050f,
+ 0x00f80006,
+/* 0x07e8: ctx_mmio_exec */
+ 0x80410398,
+ 0xf6028100,
+ 0x04bd0003,
+/* 0x07f6: ctx_mmio_loop */
+ 0x34c434bd,
+ 0x0e1bf4ff,
+ 0xf0020045,
+ 0x35fa0653,
+/* 0x0807: ctx_mmio_pull */
+ 0x9803f805,
+ 0x4f98804e,
+ 0x008f7e81,
+ 0x0830b600,
+ 0xf40112b6,
+/* 0x081a: ctx_mmio_done */
+ 0x0398df1b,
+ 0x81008016,
+ 0x0003f602,
+ 0x00b504bd,
+ 0x01004140,
+ 0xfa0613f0,
+ 0x03f80601,
+/* 0x0836: ctx_xfer */
+ 0x040e00f8,
+ 0x03020080,
+ 0xbd000ef6,
+/* 0x0841: ctx_xfer_idle */
+ 0x00008e04,
+ 0x00eecf03,
+ 0x2000e4f1,
+ 0xf4f51bf4,
+ 0x02f40611,
+/* 0x0855: ctx_xfer_pre */
+ 0x7e100f0c,
+ 0xf40006b3,
+/* 0x085e: ctx_xfer_pre_load */
+ 0x020f1b11,
+ 0x0006627e,
+ 0x0006717e,
+ 0x0006837e,
+ 0x627ef4bd,
+ 0xea7e0006,
+/* 0x0876: ctx_xfer_exec */
+ 0x01980006,
+ 0x8024bd16,
+ 0xf6010500,
+ 0x04bd0002,
+ 0x008e1fb2,
+ 0x8f7e41a5,
+ 0xfcf00000,
+ 0x022cf001,
+ 0xfd0124b6,
+ 0xffb205f2,
+ 0x41a5048e,
+ 0x00008f7e,
+ 0x0002167e,
+ 0xfc8024bd,
+ 0x02f60247,
+ 0xf004bd00,
+ 0x20b6012c,
+ 0x4afc8003,
+ 0x0002f602,
+ 0xacf004bd,
+ 0x06a5f001,
+ 0x0c98000b,
+ 0x010d9800,
+ 0x3d7e000e,
+ 0x080a0001,
+ 0x0000ec7e,
+ 0x00020a7e,
+ 0x0a1201f4,
+ 0x00b87e0c,
+ 0x7e050f00,
+ 0xf40006d2,
+/* 0x08f2: ctx_xfer_post */
+ 0x020f2d02,
+ 0x0006627e,
+ 0xb37ef4bd,
+ 0x277e0006,
+ 0x717e0002,
+ 0xf4bd0006,
+ 0x0006627e,
+ 0x981011f4,
+ 0x11fd4001,
+ 0x070bf405,
+ 0x0007e87e,
+/* 0x091c: ctx_xfer_no_post_mmio */
+/* 0x091c: ctx_xfer_done */
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index b59f694c0423..132f684b1946 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -206,14 +206,14 @@ uint32_t nvc0_grhub_data[] = {
};
uint32_t nvc0_grhub_code[] = {
- 0x031b0ef5,
+ 0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nvc0_grhub_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nvc0_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0x07f100f8,
0x03f00500,
0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nvc0_grhub_code[] = {
0x0007f101,
0x0303f007,
0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
0xbd00f804,
- 0x0004fe04,
- 0xf10007fe,
- 0xf0120017,
- 0x12d00227,
- 0xb117f100,
- 0x0010fe05,
- 0x040017f1,
- 0xf1c010d0,
- 0xb6040437,
- 0x27f10634,
- 0x32d02003,
- 0x0427f100,
- 0x0132d020,
+ 0x0007fe04,
+ 0x420017f1,
+ 0xcf0013f0,
+ 0x11e70011,
+ 0x14b60109,
+ 0x0014fe08,
+ 0xf10227f0,
+ 0xf0120007,
+ 0x02d00003,
+ 0xf104bd00,
+ 0xfe06c817,
+ 0x24bd0010,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0002,
+ 0x200327f1,
+ 0x010007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200427f1,
+ 0x010407f1,
+ 0xd00103f0,
+ 0x04bd0002,
0x200b27f1,
- 0xf10232d0,
- 0xd0200c27,
- 0x27f10732,
- 0x24b60c24,
- 0x0003b906,
- 0xf10023d0,
+ 0x010807f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200c27f1,
+ 0x011c07f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1010392,
+ 0xf0090007,
+ 0x03d00303,
+ 0xf104bd00,
0xf0870427,
- 0x12d00023,
- 0x0012b700,
- 0x0427f001,
- 0xf40012d0,
- 0xe7f11031,
- 0xe3f09604,
- 0x6821f440,
- 0x8090f1c7,
- 0xf4f00301,
- 0x020f801f,
- 0xbb0117f0,
- 0x12b6041f,
- 0x0c27f101,
- 0x0624b604,
- 0xd00021d0,
- 0x17f14021,
- 0x0e980100,
- 0x010f9800,
- 0x014721f5,
- 0x070037f1,
- 0x950634b6,
- 0x34d00814,
- 0x4034d000,
- 0x130030b7,
- 0xb6001fbb,
- 0x3fd002f5,
- 0x0815b600,
- 0xb60110b6,
- 0x1fb90814,
- 0x7121f502,
- 0x001fbb02,
- 0xf1020398,
- 0xf0200047,
-/* 0x03f6: init_gpc */
- 0x4ea05043,
- 0x1fb90804,
- 0x8d21f402,
- 0x010c4ea0,
- 0x21f4f4bd,
- 0x044ea08d,
- 0x8d21f401,
- 0x01004ea0,
- 0xf402f7f0,
- 0x4ea08d21,
-/* 0x041e: init_gpc_wait */
- 0x21f40800,
- 0x1fffc868,
- 0xa0fa0bf4,
- 0xf408044e,
- 0x1fbb6821,
- 0x0040b700,
- 0x0132b680,
- 0xf1be1bf4,
+ 0x07f10023,
+ 0x03f00400,
+ 0x0002d000,
+ 0x27f004bd,
+ 0x0007f104,
+ 0x0003f003,
+ 0xbd0002d0,
+ 0x1031f404,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xfeb96821,
+ 0x90f1c702,
+ 0xf0030180,
+ 0x0f801ff4,
+ 0x0117f002,
+ 0xb6041fbb,
+ 0x07f10112,
+ 0x03f00300,
+ 0x0001d001,
+ 0x07f104bd,
+ 0x03f00400,
+ 0x0001d001,
+ 0x17f104bd,
+ 0xf7f00100,
+ 0xb521f502,
+ 0xc721f507,
+ 0x10f7f007,
+ 0x081421f5,
+ 0x98000e98,
+ 0x21f5010f,
+ 0x14950150,
+ 0x0007f108,
+ 0x0103f0c0,
+ 0xbd0004d0,
+ 0x0007f104,
+ 0x0103f0c1,
+ 0xbd0004d0,
+ 0x0030b704,
+ 0x001fbb13,
+ 0xf102f5b6,
+ 0xf0d30007,
+ 0x0fd00103,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0xf5021fb9,
+ 0xbb02d321,
+ 0x0398001f,
+ 0x0047f102,
+ 0x5043f020,
+/* 0x04f4: init_gpc */
+ 0x08044ea0,
+ 0xf4021fb9,
+ 0x4ea09d21,
+ 0xf4bd010c,
+ 0xa09d21f4,
+ 0xf401044e,
+ 0x4ea09d21,
+ 0xf7f00100,
+ 0x9d21f402,
+ 0x08004ea0,
+/* 0x051c: init_gpc_wait */
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x00f7f0be,
+ 0x081421f5,
+ 0xf500f7f0,
+ 0xf107b521,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -519,402 +584,399 @@ uint32_t nvc0_grhub_code[] = {
0x080007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
0xf40031f4,
0xd7f00028,
0x3921f410,
0xb1f401f4,
0xf54001e4,
- 0xbd00de1b,
+ 0xbd00e91b,
0x0499f094,
0x0f0007f1,
0xd00203f0,
0x04bd0009,
- 0x0b0017f1,
- 0xcf0614b6,
- 0x11cf4012,
- 0x1f13c800,
- 0x00870bf5,
- 0xf41f23c8,
- 0x20f9620b,
- 0xbd0212b9,
- 0x0799f094,
- 0x0f0007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0xf40132f4,
- 0x21f50231,
- 0x94bd082f,
+ 0xc00017f1,
+ 0xcf0213f0,
+ 0x27f10011,
+ 0x23f0c100,
+ 0x0022cf02,
+ 0xf51f13c8,
+ 0xc800890b,
+ 0x0bf41f23,
+ 0xb920f962,
+ 0x94bd0212,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
- 0xfc04bd00,
- 0xf094bd20,
- 0x07f10699,
- 0x03f00f00,
- 0x0009d002,
- 0x31f404bd,
- 0x2f21f501,
- 0xf094bd08,
- 0x07f10699,
+ 0xf404bd00,
+ 0x31f40132,
+ 0xe821f502,
+ 0xf094bd09,
+ 0x07f10799,
0x03f01700,
0x0009d002,
- 0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
- 0xb920f931,
- 0x32f40212,
- 0x0232f401,
- 0x082f21f5,
- 0x17f120fc,
- 0x14b60b00,
- 0x0012d006,
-/* 0x0517: chsw_no_prev */
- 0xc8130ef4,
- 0x0bf41f23,
- 0x0131f40d,
- 0xf50232f4,
-/* 0x0527: chsw_done */
- 0xf1082f21,
- 0xb60b0c17,
- 0x27f00614,
- 0x0012d001,
+ 0x20fc04bd,
0x99f094bd,
- 0x0007f104,
+ 0x0007f106,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0131f404,
+ 0x09e821f5,
+ 0x99f094bd,
+ 0x0007f106,
0x0203f017,
0xbd0009d0,
- 0x130ef504,
-/* 0x0549: main_not_ctx_switch */
- 0x01e4b0ff,
- 0xb90d1bf4,
- 0x21f502f2,
- 0x0ef407bb,
-/* 0x0559: main_not_ctx_chan */
- 0x02e4b046,
- 0xbd321bf4,
- 0x0799f094,
- 0x0f0007f1,
+ 0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+ 0x12b920f9,
+ 0x0132f402,
+ 0xf50232f4,
+ 0xfc09e821,
+ 0x0007f120,
+ 0x0203f0c0,
+ 0xbd0002d0,
+ 0x130ef404,
+/* 0x062c: chsw_no_prev */
+ 0xf41f23c8,
+ 0x31f40d0b,
+ 0x0232f401,
+ 0x09e821f5,
+/* 0x063c: chsw_done */
+ 0xf10127f0,
+ 0xf0c30007,
+ 0x02d00203,
+ 0xbd04bd00,
+ 0x0499f094,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xf40132f4,
- 0x21f50232,
- 0x94bd082f,
+ 0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b90d1b,
+ 0x7821f502,
+ 0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+ 0xf402e4b0,
+ 0x94bd321b,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
0xf404bd00,
-/* 0x058e: main_not_ctx_save */
- 0xef94110e,
- 0x01f5f010,
- 0x02fe21f5,
- 0xfec00ef5,
-/* 0x059c: main_done */
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f008,
- 0xbd0002d0,
- 0xab0ef504,
-/* 0x05b1: ih */
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x0acf04bd,
- 0x04abc480,
- 0xf11d0bf4,
- 0xf01900b7,
- 0xbecf10d7,
- 0x00bfcf40,
+ 0x32f40132,
+ 0xe821f502,
+ 0xf094bd09,
+ 0x07f10799,
+ 0x03f01700,
+ 0x0009d002,
+ 0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+ 0x10ef9411,
+ 0xf501f5f0,
+ 0xf5037e21,
+/* 0x06b3: main_done */
+ 0xbdfeb50e,
+ 0x1f29f024,
+ 0x080007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xfea00ef5,
+/* 0x06c8: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x10d7f030,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
0xb70421f4,
0xf00400b0,
- 0xbed001e7,
-/* 0x05e9: ih_no_fifo */
- 0x00abe400,
- 0x0d0bf401,
- 0xf110d7f0,
- 0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
- 0xb7f10421,
- 0xb0bd0104,
- 0xf4b4abff,
- 0xa7f10d0b,
- 0xa4b60c1c,
- 0x00abd006,
-/* 0x0610: ih_no_other */
- 0xfc400ad0,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x071a: ih_no_fifo */
+ 0xabe404bd,
+ 0x0bf40100,
+ 0x10d7f00d,
+ 0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+ 0xe40421f4,
+ 0xf40400ab,
+ 0xb7f1140b,
+ 0xbfb90100,
+ 0x44e7f102,
+ 0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+ 0xf19d21f4,
+ 0xbd0104b7,
+ 0xb4abffb0,
+ 0xf10f0bf4,
+ 0xf0070007,
+ 0x0bd00303,
+/* 0x075b: ih_no_other */
+ 0xf104bd00,
+ 0xf0010007,
+ 0x0ad00003,
+ 0xfc04bd00,
0xfce0fcf0,
0xfcb0fcd0,
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x062b: ctx_4160s */
- 0xf101f800,
- 0xf04160e7,
- 0xf7f040e3,
- 0x8d21f401,
-/* 0x0638: ctx_4160s_wait */
- 0xc86821f4,
- 0x0bf404ff,
-/* 0x0643: ctx_4160c */
- 0xf100f8fa,
+/* 0x077f: ctx_4160s */
+ 0xf001f800,
+ 0xffb901f7,
+ 0x60e7f102,
+ 0x40e3f041,
+/* 0x078f: ctx_4160s_wait */
+ 0xf19d21f4,
0xf04160e7,
- 0xf4bd40e3,
- 0xf88d21f4,
-/* 0x0651: ctx_4170s */
- 0x70e7f100,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0xf404ffc8,
+ 0x00f8f00b,
+/* 0x07a4: ctx_4160c */
+ 0xffb9f4bd,
+ 0x60e7f102,
0x40e3f041,
- 0xf410f5f0,
- 0x00f88d21,
-/* 0x0660: ctx_4170w */
- 0x4170e7f1,
- 0xf440e3f0,
- 0xf4f06821,
- 0xf31bf410,
-/* 0x0672: ctx_redswitch */
- 0xe7f100f8,
- 0xe4b60614,
- 0x70f7f106,
- 0x00efd002,
-/* 0x0683: ctx_redswitch_delay */
- 0xb608f7f0,
- 0x1bf401f2,
- 0x70f7f1fd,
- 0x00efd007,
-/* 0x0692: ctx_86c */
- 0xe7f100f8,
- 0xe4b6086c,
- 0x00efd006,
- 0x8a14e7f1,
- 0xf440e3f0,
- 0xe7f18d21,
- 0xe3f0a86c,
- 0x8d21f441,
-/* 0x06b2: ctx_load */
+ 0xf89d21f4,
+/* 0x07b5: ctx_4170s */
+ 0x10f5f000,
+ 0xf102ffb9,
+ 0xf04170e7,
+ 0x21f440e3,
+/* 0x07c7: ctx_4170w */
+ 0xf100f89d,
+ 0xf04170e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0xf410f4f0,
+ 0x00f8f01b,
+/* 0x07dc: ctx_redswitch */
+ 0x0200e7f1,
+ 0xf040e5f0,
+ 0xe5f020e5,
+ 0x0007f110,
+ 0x0103f085,
+ 0xbd000ed0,
+ 0x08f7f004,
+/* 0x07f8: ctx_redswitch_delay */
+ 0xf401f2b6,
+ 0xe5f1fd1b,
+ 0xe5f10400,
+ 0x07f10100,
+ 0x03f08500,
+ 0x000ed001,
+ 0x00f804bd,
+/* 0x0814: ctx_86c */
+ 0x1b0007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0xf102ffb9,
+ 0xf08a14e7,
+ 0x21f440e3,
+ 0x02ffb99d,
+ 0xa86ce7f1,
+ 0xf441e3f0,
+ 0x00f89d21,
+/* 0x083c: ctx_mem */
+ 0x840007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+/* 0x0848: ctx_mem_wait */
+ 0x8400f7f1,
+ 0xcf02f3f0,
+ 0xfffd00ff,
+ 0xf31bf405,
+/* 0x085a: ctx_load */
0x94bd00f8,
0xf10599f0,
0xf00f0007,
0x09d00203,
0xf004bd00,
0x21f40ca7,
- 0x2417f1c9,
- 0x0614b60a,
- 0xf10010d0,
- 0xb60b0037,
- 0x32d00634,
- 0x0c17f140,
- 0x0614b60a,
- 0xd00747f0,
- 0x14d00012,
-/* 0x06ed: ctx_chan_wait_0 */
- 0x4014cf40,
- 0xf41f44f0,
- 0x32d0fa1b,
- 0x000bfe00,
- 0xb61f2af0,
- 0x20b60424,
- 0xf094bd02,
+ 0xf1f4bdd0,
+ 0xf0890007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf0c10007,
+ 0x02d00203,
+ 0xf104bd00,
+ 0xf0830007,
+ 0x02d00203,
+ 0xf004bd00,
+ 0x21f507f7,
+ 0x07f1083c,
+ 0x03f0c000,
+ 0x0002d002,
+ 0x0bfe04bd,
+ 0x1f2af000,
+ 0xb60424b6,
+ 0x94bd0220,
+ 0xf10899f0,
+ 0xf00f0007,
+ 0x09d00203,
+ 0xf104bd00,
+ 0xf0810007,
+ 0x02d00203,
+ 0xf104bd00,
+ 0xf1000027,
+ 0xf0800023,
+ 0x07f10225,
+ 0x03f08800,
+ 0x0002d002,
+ 0x17f004bd,
+ 0x0027f110,
+ 0x0223f002,
+ 0xf80512fa,
+ 0xf094bd03,
0x07f10899,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x17f104bd,
- 0x14b60a04,
- 0x0012d006,
- 0x0a2017f1,
- 0xf00614b6,
- 0x23f10227,
- 0x12d08000,
- 0x1017f000,
- 0x020027f1,
- 0xfa0223f0,
- 0x03f80512,
+ 0x019804bd,
+ 0x1814b681,
+ 0xb6800298,
+ 0x12fd0825,
+ 0x16018005,
0x99f094bd,
- 0x0007f108,
- 0x0203f017,
+ 0x0007f109,
+ 0x0203f00f,
0xbd0009d0,
- 0x81019804,
- 0x981814b6,
- 0x25b68002,
- 0x0512fd08,
- 0xbd160180,
- 0x0999f094,
- 0x0f0007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0x0a0427f1,
- 0xd00624b6,
- 0x27f00021,
- 0x2017f101,
- 0x0614b60a,
- 0xf10012d0,
- 0xf0010017,
- 0x01fa0613,
- 0xbd03f805,
- 0x0999f094,
- 0x170007f1,
+ 0x0007f104,
+ 0x0203f081,
+ 0xbd0001d0,
+ 0x0127f004,
+ 0x880007f1,
0xd00203f0,
- 0x04bd0009,
+ 0x04bd0002,
+ 0x010017f1,
+ 0xfa0613f0,
+ 0x03f80501,
0x99f094bd,
- 0x0007f105,
+ 0x0007f109,
0x0203f017,
0xbd0009d0,
-/* 0x07bb: ctx_chan */
- 0xf500f804,
- 0xf5062b21,
- 0xf006b221,
- 0x21f40ca7,
- 0x1017f1c9,
- 0x0614b60a,
- 0xd00527f0,
-/* 0x07d6: ctx_chan_wait */
- 0x12cf0012,
- 0x0522fd00,
- 0xf5fa1bf4,
- 0xf8064321,
-/* 0x07e5: ctx_mmio_exec */
- 0x41039800,
- 0x0a0427f1,
- 0xd00624b6,
- 0x34bd0023,
-/* 0x07f4: ctx_mmio_loop */
+ 0xf094bd04,
+ 0x07f10599,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0978: ctx_chan */
+ 0x077f21f5,
+ 0x085a21f5,
+ 0xf40ca7f0,
+ 0xf7f0d021,
+ 0x3c21f505,
+ 0xa421f508,
+/* 0x0993: ctx_mmio_exec */
+ 0x9800f807,
+ 0x07f14103,
+ 0x03f08100,
+ 0x0003d002,
+ 0x34bd04bd,
+/* 0x09a4: ctx_mmio_loop */
0xf4ff34c4,
0x57f10f1b,
0x53f00200,
0x0535fa06,
-/* 0x0806: ctx_mmio_pull */
+/* 0x09b6: ctx_mmio_pull */
0x4e9803f8,
0x814f9880,
- 0xb68d21f4,
+ 0xb69d21f4,
0x12b60830,
0xdf1bf401,
-/* 0x0818: ctx_mmio_done */
- 0xd0160398,
- 0x00800023,
- 0x0017f140,
- 0x0613f001,
- 0xf80601fa,
-/* 0x082f: ctx_xfer */
- 0xf100f803,
- 0xb60c00f7,
- 0xe7f006f4,
- 0x80fed004,
-/* 0x083c: ctx_xfer_idle */
- 0xf100fecf,
- 0xf42000e4,
- 0x11f4f91b,
- 0x1102f406,
-/* 0x084c: ctx_xfer_pre */
- 0xf510f7f0,
- 0xf5069221,
- 0xf4062b21,
-/* 0x085a: ctx_xfer_pre_load */
- 0xf7f01c11,
- 0x5121f502,
- 0x6021f506,
- 0x7221f506,
- 0xf5f4bd06,
- 0xf5065121,
-/* 0x0873: ctx_xfer_exec */
- 0x9806b221,
- 0x27f11601,
- 0x24b60414,
- 0x0020d006,
- 0xa500e7f1,
- 0xb941e3f0,
- 0x21f4021f,
- 0x04e0b68d,
- 0xf001fcf0,
- 0x24b6022c,
- 0x05f2fd01,
- 0xf18d21f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00c,
- 0x021521f5,
- 0x47fc27f1,
- 0xd00223f0,
- 0x2cf00020,
+/* 0x09c8: ctx_mmio_done */
+ 0xf1160398,
+ 0xf0810007,
+ 0x03d00203,
+ 0x8004bd00,
+ 0x17f14000,
+ 0x13f00100,
+ 0x0601fa06,
+ 0x00f803f8,
+/* 0x09e8: ctx_xfer */
+ 0xf104e7f0,
+ 0xf0020007,
+ 0x0ed00303,
+/* 0x09f7: ctx_xfer_idle */
+ 0xf104bd00,
+ 0xf00000e7,
+ 0xeecf03e3,
+ 0x00e4f100,
+ 0xf21bf420,
+ 0xf40611f4,
+/* 0x0a0e: ctx_xfer_pre */
+ 0xf7f01102,
+ 0x1421f510,
+ 0x7f21f508,
+ 0x1c11f407,
+/* 0x0a1c: ctx_xfer_pre_load */
+ 0xf502f7f0,
+ 0xf507b521,
+ 0xf507c721,
+ 0xbd07dc21,
+ 0xb521f5f4,
+ 0x5a21f507,
+/* 0x0a35: ctx_xfer_exec */
+ 0x16019808,
+ 0x07f124bd,
+ 0x03f00500,
+ 0x0002d001,
+ 0x1fb904bd,
+ 0x00e7f102,
+ 0x41e3f0a5,
+ 0xf09d21f4,
+ 0x2cf001fc,
+ 0x0124b602,
+ 0xb905f2fd,
+ 0xe7f102ff,
+ 0xe3f0a504,
+ 0x9d21f441,
+ 0x026a21f5,
+ 0x07f124bd,
+ 0x03f047fc,
+ 0x0002d002,
+ 0x2cf004bd,
0x0320b601,
- 0xf00012d0,
- 0xa5f001ac,
- 0x00b7f006,
- 0x98000c98,
- 0xe7f0010d,
- 0x6621f500,
- 0x08a7f001,
- 0x010921f5,
- 0x021521f5,
- 0xf02201f4,
- 0x21f40ca7,
- 0x1017f1c9,
- 0x0614b60a,
- 0xd00527f0,
-/* 0x08fa: ctx_xfer_post_save_wait */
- 0x12cf0012,
- 0x0522fd00,
- 0xf4fa1bf4,
-/* 0x0906: ctx_xfer_post */
- 0xf7f03202,
- 0x5121f502,
- 0xf5f4bd06,
- 0xf5069221,
- 0xf5023421,
- 0xbd066021,
- 0x5121f5f4,
- 0x1011f406,
- 0xfd400198,
- 0x0bf40511,
- 0xe521f507,
-/* 0x0931: ctx_xfer_no_post_mmio */
- 0x4321f507,
-/* 0x0935: ctx_xfer_done */
- 0x0000f806,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf001acf0,
+ 0xb7f006a5,
+ 0x000c9800,
+ 0xf0010d98,
+ 0x21f500e7,
+ 0xa7f0016f,
+ 0x1021f508,
+ 0x5e21f501,
+ 0x1301f402,
+ 0xf40ca7f0,
+ 0xf7f0d021,
+ 0x3c21f505,
+ 0x3202f408,
+/* 0x0ac4: ctx_xfer_post */
+ 0xf502f7f0,
+ 0xbd07b521,
+ 0x1421f5f4,
+ 0x7f21f508,
+ 0xc721f502,
+ 0xf5f4bd07,
+ 0xf407b521,
+ 0x01981011,
+ 0x0511fd40,
+ 0xf5070bf4,
+/* 0x0aef: ctx_xfer_no_post_mmio */
+ 0xf5099321,
+/* 0x0af3: ctx_xfer_done */
+ 0xf807a421,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
index a1b9f763996a..84af82418987 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
@@ -206,14 +206,14 @@ uint32_t nvd7_grhub_data[] = {
};
uint32_t nvd7_grhub_code[] = {
- 0x031b0ef5,
+ 0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nvd7_grhub_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nvd7_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0x07f100f8,
0x03f00500,
0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nvd7_grhub_code[] = {
0x0007f101,
0x0303f007,
0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
0xbd00f804,
- 0x0004fe04,
- 0xf10007fe,
- 0xf0120017,
- 0x12d00227,
- 0xb117f100,
- 0x0010fe05,
- 0x040017f1,
- 0xf1c010d0,
- 0xb6040437,
- 0x27f10634,
- 0x32d02003,
- 0x0427f100,
- 0x0132d020,
+ 0x0007fe04,
+ 0x420017f1,
+ 0xcf0013f0,
+ 0x11e70011,
+ 0x14b60109,
+ 0x0014fe08,
+ 0xf10227f0,
+ 0xf0120007,
+ 0x02d00003,
+ 0xf104bd00,
+ 0xfe06c817,
+ 0x24bd0010,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0002,
+ 0x200327f1,
+ 0x010007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200427f1,
+ 0x010407f1,
+ 0xd00103f0,
+ 0x04bd0002,
0x200b27f1,
- 0xf10232d0,
- 0xd0200c27,
- 0x27f10732,
- 0x24b60c24,
- 0x0003b906,
- 0xf10023d0,
+ 0x010807f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200c27f1,
+ 0x011c07f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1010392,
+ 0xf0090007,
+ 0x03d00303,
+ 0xf104bd00,
0xf0870427,
- 0x12d00023,
- 0x0012b700,
- 0x0427f001,
- 0xf40012d0,
- 0xe7f11031,
- 0xe3f09604,
- 0x6821f440,
- 0x8090f1c7,
- 0xf4f00301,
- 0x020f801f,
- 0xbb0117f0,
- 0x12b6041f,
- 0x0c27f101,
- 0x0624b604,
- 0xd00021d0,
- 0x17f14021,
- 0x0e980100,
- 0x010f9800,
- 0x014721f5,
- 0x070037f1,
- 0x950634b6,
- 0x34d00814,
- 0x4034d000,
- 0x130030b7,
- 0xb6001fbb,
- 0x3fd002f5,
- 0x0815b600,
- 0xb60110b6,
- 0x1fb90814,
- 0x7121f502,
- 0x001fbb02,
- 0xf1020398,
- 0xf0200047,
-/* 0x03f6: init_gpc */
- 0x4ea05043,
- 0x1fb90804,
- 0x8d21f402,
- 0x010c4ea0,
- 0x21f4f4bd,
- 0x044ea08d,
- 0x8d21f401,
- 0x01004ea0,
- 0xf402f7f0,
- 0x4ea08d21,
-/* 0x041e: init_gpc_wait */
- 0x21f40800,
- 0x1fffc868,
- 0xa0fa0bf4,
- 0xf408044e,
- 0x1fbb6821,
- 0x0040b700,
- 0x0132b680,
- 0xf1be1bf4,
+ 0x07f10023,
+ 0x03f00400,
+ 0x0002d000,
+ 0x27f004bd,
+ 0x0007f104,
+ 0x0003f003,
+ 0xbd0002d0,
+ 0x1031f404,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xfeb96821,
+ 0x90f1c702,
+ 0xf0030180,
+ 0x0f801ff4,
+ 0x0117f002,
+ 0xb6041fbb,
+ 0x07f10112,
+ 0x03f00300,
+ 0x0001d001,
+ 0x07f104bd,
+ 0x03f00400,
+ 0x0001d001,
+ 0x17f104bd,
+ 0xf7f00100,
+ 0xb521f502,
+ 0xc721f507,
+ 0x10f7f007,
+ 0x081421f5,
+ 0x98000e98,
+ 0x21f5010f,
+ 0x14950150,
+ 0x0007f108,
+ 0x0103f0c0,
+ 0xbd0004d0,
+ 0x0007f104,
+ 0x0103f0c1,
+ 0xbd0004d0,
+ 0x0030b704,
+ 0x001fbb13,
+ 0xf102f5b6,
+ 0xf0d30007,
+ 0x0fd00103,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0xf5021fb9,
+ 0xbb02d321,
+ 0x0398001f,
+ 0x0047f102,
+ 0x5043f020,
+/* 0x04f4: init_gpc */
+ 0x08044ea0,
+ 0xf4021fb9,
+ 0x4ea09d21,
+ 0xf4bd010c,
+ 0xa09d21f4,
+ 0xf401044e,
+ 0x4ea09d21,
+ 0xf7f00100,
+ 0x9d21f402,
+ 0x08004ea0,
+/* 0x051c: init_gpc_wait */
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x00f7f0be,
+ 0x081421f5,
+ 0xf500f7f0,
+ 0xf107b521,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -519,402 +584,399 @@ uint32_t nvd7_grhub_code[] = {
0x080007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
0xf40031f4,
0xd7f00028,
0x3921f410,
0xb1f401f4,
0xf54001e4,
- 0xbd00de1b,
+ 0xbd00e91b,
0x0499f094,
0x0f0007f1,
0xd00203f0,
0x04bd0009,
- 0x0b0017f1,
- 0xcf0614b6,
- 0x11cf4012,
- 0x1f13c800,
- 0x00870bf5,
- 0xf41f23c8,
- 0x20f9620b,
- 0xbd0212b9,
- 0x0799f094,
- 0x0f0007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0xf40132f4,
- 0x21f50231,
- 0x94bd082f,
+ 0xc00017f1,
+ 0xcf0213f0,
+ 0x27f10011,
+ 0x23f0c100,
+ 0x0022cf02,
+ 0xf51f13c8,
+ 0xc800890b,
+ 0x0bf41f23,
+ 0xb920f962,
+ 0x94bd0212,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
- 0xfc04bd00,
- 0xf094bd20,
- 0x07f10699,
- 0x03f00f00,
- 0x0009d002,
- 0x31f404bd,
- 0x2f21f501,
- 0xf094bd08,
- 0x07f10699,
+ 0xf404bd00,
+ 0x31f40132,
+ 0xe821f502,
+ 0xf094bd09,
+ 0x07f10799,
0x03f01700,
0x0009d002,
- 0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
- 0xb920f931,
- 0x32f40212,
- 0x0232f401,
- 0x082f21f5,
- 0x17f120fc,
- 0x14b60b00,
- 0x0012d006,
-/* 0x0517: chsw_no_prev */
- 0xc8130ef4,
- 0x0bf41f23,
- 0x0131f40d,
- 0xf50232f4,
-/* 0x0527: chsw_done */
- 0xf1082f21,
- 0xb60b0c17,
- 0x27f00614,
- 0x0012d001,
+ 0x20fc04bd,
0x99f094bd,
- 0x0007f104,
+ 0x0007f106,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0131f404,
+ 0x09e821f5,
+ 0x99f094bd,
+ 0x0007f106,
0x0203f017,
0xbd0009d0,
- 0x130ef504,
-/* 0x0549: main_not_ctx_switch */
- 0x01e4b0ff,
- 0xb90d1bf4,
- 0x21f502f2,
- 0x0ef407bb,
-/* 0x0559: main_not_ctx_chan */
- 0x02e4b046,
- 0xbd321bf4,
- 0x0799f094,
- 0x0f0007f1,
+ 0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+ 0x12b920f9,
+ 0x0132f402,
+ 0xf50232f4,
+ 0xfc09e821,
+ 0x0007f120,
+ 0x0203f0c0,
+ 0xbd0002d0,
+ 0x130ef404,
+/* 0x062c: chsw_no_prev */
+ 0xf41f23c8,
+ 0x31f40d0b,
+ 0x0232f401,
+ 0x09e821f5,
+/* 0x063c: chsw_done */
+ 0xf10127f0,
+ 0xf0c30007,
+ 0x02d00203,
+ 0xbd04bd00,
+ 0x0499f094,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xf40132f4,
- 0x21f50232,
- 0x94bd082f,
+ 0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b90d1b,
+ 0x7821f502,
+ 0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+ 0xf402e4b0,
+ 0x94bd321b,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
0xf404bd00,
-/* 0x058e: main_not_ctx_save */
- 0xef94110e,
- 0x01f5f010,
- 0x02fe21f5,
- 0xfec00ef5,
-/* 0x059c: main_done */
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f008,
- 0xbd0002d0,
- 0xab0ef504,
-/* 0x05b1: ih */
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x0acf04bd,
- 0x04abc480,
- 0xf11d0bf4,
- 0xf01900b7,
- 0xbecf10d7,
- 0x00bfcf40,
+ 0x32f40132,
+ 0xe821f502,
+ 0xf094bd09,
+ 0x07f10799,
+ 0x03f01700,
+ 0x0009d002,
+ 0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+ 0x10ef9411,
+ 0xf501f5f0,
+ 0xf5037e21,
+/* 0x06b3: main_done */
+ 0xbdfeb50e,
+ 0x1f29f024,
+ 0x080007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xfea00ef5,
+/* 0x06c8: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x10d7f030,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
0xb70421f4,
0xf00400b0,
- 0xbed001e7,
-/* 0x05e9: ih_no_fifo */
- 0x00abe400,
- 0x0d0bf401,
- 0xf110d7f0,
- 0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
- 0xb7f10421,
- 0xb0bd0104,
- 0xf4b4abff,
- 0xa7f10d0b,
- 0xa4b60c1c,
- 0x00abd006,
-/* 0x0610: ih_no_other */
- 0xfc400ad0,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x071a: ih_no_fifo */
+ 0xabe404bd,
+ 0x0bf40100,
+ 0x10d7f00d,
+ 0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+ 0xe40421f4,
+ 0xf40400ab,
+ 0xb7f1140b,
+ 0xbfb90100,
+ 0x44e7f102,
+ 0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+ 0xf19d21f4,
+ 0xbd0104b7,
+ 0xb4abffb0,
+ 0xf10f0bf4,
+ 0xf0070007,
+ 0x0bd00303,
+/* 0x075b: ih_no_other */
+ 0xf104bd00,
+ 0xf0010007,
+ 0x0ad00003,
+ 0xfc04bd00,
0xfce0fcf0,
0xfcb0fcd0,
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x062b: ctx_4160s */
- 0xf101f800,
- 0xf04160e7,
- 0xf7f040e3,
- 0x8d21f401,
-/* 0x0638: ctx_4160s_wait */
- 0xc86821f4,
- 0x0bf404ff,
-/* 0x0643: ctx_4160c */
- 0xf100f8fa,
+/* 0x077f: ctx_4160s */
+ 0xf001f800,
+ 0xffb901f7,
+ 0x60e7f102,
+ 0x40e3f041,
+/* 0x078f: ctx_4160s_wait */
+ 0xf19d21f4,
0xf04160e7,
- 0xf4bd40e3,
- 0xf88d21f4,
-/* 0x0651: ctx_4170s */
- 0x70e7f100,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0xf404ffc8,
+ 0x00f8f00b,
+/* 0x07a4: ctx_4160c */
+ 0xffb9f4bd,
+ 0x60e7f102,
0x40e3f041,
- 0xf410f5f0,
- 0x00f88d21,
-/* 0x0660: ctx_4170w */
- 0x4170e7f1,
- 0xf440e3f0,
- 0xf4f06821,
- 0xf31bf410,
-/* 0x0672: ctx_redswitch */
- 0xe7f100f8,
- 0xe4b60614,
- 0x70f7f106,
- 0x00efd002,
-/* 0x0683: ctx_redswitch_delay */
- 0xb608f7f0,
- 0x1bf401f2,
- 0x70f7f1fd,
- 0x00efd007,
-/* 0x0692: ctx_86c */
- 0xe7f100f8,
- 0xe4b6086c,
- 0x00efd006,
- 0x8a14e7f1,
- 0xf440e3f0,
- 0xe7f18d21,
- 0xe3f0a86c,
- 0x8d21f441,
-/* 0x06b2: ctx_load */
+ 0xf89d21f4,
+/* 0x07b5: ctx_4170s */
+ 0x10f5f000,
+ 0xf102ffb9,
+ 0xf04170e7,
+ 0x21f440e3,
+/* 0x07c7: ctx_4170w */
+ 0xf100f89d,
+ 0xf04170e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0xf410f4f0,
+ 0x00f8f01b,
+/* 0x07dc: ctx_redswitch */
+ 0x0200e7f1,
+ 0xf040e5f0,
+ 0xe5f020e5,
+ 0x0007f110,
+ 0x0103f085,
+ 0xbd000ed0,
+ 0x08f7f004,
+/* 0x07f8: ctx_redswitch_delay */
+ 0xf401f2b6,
+ 0xe5f1fd1b,
+ 0xe5f10400,
+ 0x07f10100,
+ 0x03f08500,
+ 0x000ed001,
+ 0x00f804bd,
+/* 0x0814: ctx_86c */
+ 0x1b0007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0xf102ffb9,
+ 0xf08a14e7,
+ 0x21f440e3,
+ 0x02ffb99d,
+ 0xa86ce7f1,
+ 0xf441e3f0,
+ 0x00f89d21,
+/* 0x083c: ctx_mem */
+ 0x840007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+/* 0x0848: ctx_mem_wait */
+ 0x8400f7f1,
+ 0xcf02f3f0,
+ 0xfffd00ff,
+ 0xf31bf405,
+/* 0x085a: ctx_load */
0x94bd00f8,
0xf10599f0,
0xf00f0007,
0x09d00203,
0xf004bd00,
0x21f40ca7,
- 0x2417f1c9,
- 0x0614b60a,
- 0xf10010d0,
- 0xb60b0037,
- 0x32d00634,
- 0x0c17f140,
- 0x0614b60a,
- 0xd00747f0,
- 0x14d00012,
-/* 0x06ed: ctx_chan_wait_0 */
- 0x4014cf40,
- 0xf41f44f0,
- 0x32d0fa1b,
- 0x000bfe00,
- 0xb61f2af0,
- 0x20b60424,
- 0xf094bd02,
+ 0xf1f4bdd0,
+ 0xf0890007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf0c10007,
+ 0x02d00203,
+ 0xf104bd00,
+ 0xf0830007,
+ 0x02d00203,
+ 0xf004bd00,
+ 0x21f507f7,
+ 0x07f1083c,
+ 0x03f0c000,
+ 0x0002d002,
+ 0x0bfe04bd,
+ 0x1f2af000,
+ 0xb60424b6,
+ 0x94bd0220,
+ 0xf10899f0,
+ 0xf00f0007,
+ 0x09d00203,
+ 0xf104bd00,
+ 0xf0810007,
+ 0x02d00203,
+ 0xf104bd00,
+ 0xf1000027,
+ 0xf0800023,
+ 0x07f10225,
+ 0x03f08800,
+ 0x0002d002,
+ 0x17f004bd,
+ 0x0027f110,
+ 0x0223f002,
+ 0xf80512fa,
+ 0xf094bd03,
0x07f10899,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x17f104bd,
- 0x14b60a04,
- 0x0012d006,
- 0x0a2017f1,
- 0xf00614b6,
- 0x23f10227,
- 0x12d08000,
- 0x1017f000,
- 0x020027f1,
- 0xfa0223f0,
- 0x03f80512,
+ 0x019804bd,
+ 0x1814b681,
+ 0xb6800298,
+ 0x12fd0825,
+ 0x16018005,
0x99f094bd,
- 0x0007f108,
- 0x0203f017,
+ 0x0007f109,
+ 0x0203f00f,
0xbd0009d0,
- 0x81019804,
- 0x981814b6,
- 0x25b68002,
- 0x0512fd08,
- 0xbd160180,
- 0x0999f094,
- 0x0f0007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0x0a0427f1,
- 0xd00624b6,
- 0x27f00021,
- 0x2017f101,
- 0x0614b60a,
- 0xf10012d0,
- 0xf0010017,
- 0x01fa0613,
- 0xbd03f805,
- 0x0999f094,
- 0x170007f1,
+ 0x0007f104,
+ 0x0203f081,
+ 0xbd0001d0,
+ 0x0127f004,
+ 0x880007f1,
0xd00203f0,
- 0x04bd0009,
+ 0x04bd0002,
+ 0x010017f1,
+ 0xfa0613f0,
+ 0x03f80501,
0x99f094bd,
- 0x0007f105,
+ 0x0007f109,
0x0203f017,
0xbd0009d0,
-/* 0x07bb: ctx_chan */
- 0xf500f804,
- 0xf5062b21,
- 0xf006b221,
- 0x21f40ca7,
- 0x1017f1c9,
- 0x0614b60a,
- 0xd00527f0,
-/* 0x07d6: ctx_chan_wait */
- 0x12cf0012,
- 0x0522fd00,
- 0xf5fa1bf4,
- 0xf8064321,
-/* 0x07e5: ctx_mmio_exec */
- 0x41039800,
- 0x0a0427f1,
- 0xd00624b6,
- 0x34bd0023,
-/* 0x07f4: ctx_mmio_loop */
+ 0xf094bd04,
+ 0x07f10599,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0978: ctx_chan */
+ 0x077f21f5,
+ 0x085a21f5,
+ 0xf40ca7f0,
+ 0xf7f0d021,
+ 0x3c21f505,
+ 0xa421f508,
+/* 0x0993: ctx_mmio_exec */
+ 0x9800f807,
+ 0x07f14103,
+ 0x03f08100,
+ 0x0003d002,
+ 0x34bd04bd,
+/* 0x09a4: ctx_mmio_loop */
0xf4ff34c4,
0x57f10f1b,
0x53f00200,
0x0535fa06,
-/* 0x0806: ctx_mmio_pull */
+/* 0x09b6: ctx_mmio_pull */
0x4e9803f8,
0x814f9880,
- 0xb68d21f4,
+ 0xb69d21f4,
0x12b60830,
0xdf1bf401,
-/* 0x0818: ctx_mmio_done */
- 0xd0160398,
- 0x00800023,
- 0x0017f140,
- 0x0613f001,
- 0xf80601fa,
-/* 0x082f: ctx_xfer */
- 0xf100f803,
- 0xb60c00f7,
- 0xe7f006f4,
- 0x80fed004,
-/* 0x083c: ctx_xfer_idle */
- 0xf100fecf,
- 0xf42000e4,
- 0x11f4f91b,
- 0x1102f406,
-/* 0x084c: ctx_xfer_pre */
- 0xf510f7f0,
- 0xf5069221,
- 0xf4062b21,
-/* 0x085a: ctx_xfer_pre_load */
- 0xf7f01c11,
- 0x5121f502,
- 0x6021f506,
- 0x7221f506,
- 0xf5f4bd06,
- 0xf5065121,
-/* 0x0873: ctx_xfer_exec */
- 0x9806b221,
- 0x27f11601,
- 0x24b60414,
- 0x0020d006,
- 0xa500e7f1,
- 0xb941e3f0,
- 0x21f4021f,
- 0x04e0b68d,
- 0xf001fcf0,
- 0x24b6022c,
- 0x05f2fd01,
- 0xf18d21f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00c,
- 0x021521f5,
- 0x47fc27f1,
- 0xd00223f0,
- 0x2cf00020,
+/* 0x09c8: ctx_mmio_done */
+ 0xf1160398,
+ 0xf0810007,
+ 0x03d00203,
+ 0x8004bd00,
+ 0x17f14000,
+ 0x13f00100,
+ 0x0601fa06,
+ 0x00f803f8,
+/* 0x09e8: ctx_xfer */
+ 0xf104e7f0,
+ 0xf0020007,
+ 0x0ed00303,
+/* 0x09f7: ctx_xfer_idle */
+ 0xf104bd00,
+ 0xf00000e7,
+ 0xeecf03e3,
+ 0x00e4f100,
+ 0xf21bf420,
+ 0xf40611f4,
+/* 0x0a0e: ctx_xfer_pre */
+ 0xf7f01102,
+ 0x1421f510,
+ 0x7f21f508,
+ 0x1c11f407,
+/* 0x0a1c: ctx_xfer_pre_load */
+ 0xf502f7f0,
+ 0xf507b521,
+ 0xf507c721,
+ 0xbd07dc21,
+ 0xb521f5f4,
+ 0x5a21f507,
+/* 0x0a35: ctx_xfer_exec */
+ 0x16019808,
+ 0x07f124bd,
+ 0x03f00500,
+ 0x0002d001,
+ 0x1fb904bd,
+ 0x00e7f102,
+ 0x41e3f0a5,
+ 0xf09d21f4,
+ 0x2cf001fc,
+ 0x0124b602,
+ 0xb905f2fd,
+ 0xe7f102ff,
+ 0xe3f0a504,
+ 0x9d21f441,
+ 0x026a21f5,
+ 0x07f124bd,
+ 0x03f047fc,
+ 0x0002d002,
+ 0x2cf004bd,
0x0320b601,
- 0xf00012d0,
- 0xa5f001ac,
- 0x00b7f006,
- 0x98000c98,
- 0xe7f0010d,
- 0x6621f500,
- 0x08a7f001,
- 0x010921f5,
- 0x021521f5,
- 0xf02201f4,
- 0x21f40ca7,
- 0x1017f1c9,
- 0x0614b60a,
- 0xd00527f0,
-/* 0x08fa: ctx_xfer_post_save_wait */
- 0x12cf0012,
- 0x0522fd00,
- 0xf4fa1bf4,
-/* 0x0906: ctx_xfer_post */
- 0xf7f03202,
- 0x5121f502,
- 0xf5f4bd06,
- 0xf5069221,
- 0xf5023421,
- 0xbd066021,
- 0x5121f5f4,
- 0x1011f406,
- 0xfd400198,
- 0x0bf40511,
- 0xe521f507,
-/* 0x0931: ctx_xfer_no_post_mmio */
- 0x4321f507,
-/* 0x0935: ctx_xfer_done */
- 0x0000f806,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf001acf0,
+ 0xb7f006a5,
+ 0x000c9800,
+ 0xf0010d98,
+ 0x21f500e7,
+ 0xa7f0016f,
+ 0x1021f508,
+ 0x5e21f501,
+ 0x1301f402,
+ 0xf40ca7f0,
+ 0xf7f0d021,
+ 0x3c21f505,
+ 0x3202f408,
+/* 0x0ac4: ctx_xfer_post */
+ 0xf502f7f0,
+ 0xbd07b521,
+ 0x1421f5f4,
+ 0x7f21f508,
+ 0xc721f502,
+ 0xf5f4bd07,
+ 0xf407b521,
+ 0x01981011,
+ 0x0511fd40,
+ 0xf5070bf4,
+/* 0x0aef: ctx_xfer_no_post_mmio */
+ 0xf5099321,
+/* 0x0af3: ctx_xfer_done */
+ 0xf807a421,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index eb7bc0e9576e..1c179bdd48cc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -206,14 +206,14 @@ uint32_t nve0_grhub_data[] = {
};
uint32_t nve0_grhub_code[] = {
- 0x031b0ef5,
+ 0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nve0_grhub_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f00f00,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f00f00,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f00f,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf00f0007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f00f00,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x0f0007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nve0_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0x07f100f8,
0x03f00500,
0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nve0_grhub_code[] = {
0x0007f101,
0x0303f007,
0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
0xbd00f804,
- 0x0004fe04,
- 0xf10007fe,
- 0xf0120017,
- 0x12d00227,
- 0xb117f100,
- 0x0010fe05,
- 0x040017f1,
- 0xf1c010d0,
- 0xb6040437,
- 0x27f10634,
- 0x32d02003,
- 0x0427f100,
- 0x0132d020,
+ 0x0007fe04,
+ 0x420017f1,
+ 0xcf0013f0,
+ 0x11e70011,
+ 0x14b60109,
+ 0x0014fe08,
+ 0xf10227f0,
+ 0xf0120007,
+ 0x02d00003,
+ 0xf104bd00,
+ 0xfe06c817,
+ 0x24bd0010,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0002,
+ 0x200327f1,
+ 0x010007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200427f1,
+ 0x010407f1,
+ 0xd00103f0,
+ 0x04bd0002,
0x200b27f1,
- 0xf10232d0,
- 0xd0200c27,
- 0x27f10732,
- 0x24b60c24,
- 0x0003b906,
- 0xf10023d0,
+ 0x010807f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200c27f1,
+ 0x011c07f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1010392,
+ 0xf0090007,
+ 0x03d00303,
+ 0xf104bd00,
0xf0870427,
- 0x12d00023,
- 0x0012b700,
- 0x0427f001,
- 0xf40012d0,
- 0xe7f11031,
- 0xe3f09604,
- 0x6821f440,
- 0x8090f1c7,
- 0xf4f00301,
- 0x020f801f,
- 0xbb0117f0,
- 0x12b6041f,
- 0x0c27f101,
- 0x0624b604,
- 0xd00021d0,
- 0x17f14021,
- 0x0e980100,
- 0x010f9800,
- 0x014721f5,
- 0x070037f1,
- 0x950634b6,
- 0x34d00814,
- 0x4034d000,
- 0x130030b7,
- 0xb6001fbb,
- 0x3fd002f5,
- 0x0815b600,
- 0xb60110b6,
- 0x1fb90814,
- 0x7121f502,
- 0x001fbb02,
- 0xf1020398,
- 0xf0200047,
-/* 0x03f6: init_gpc */
- 0x4ea05043,
- 0x1fb90804,
- 0x8d21f402,
- 0x010c4ea0,
- 0x21f4f4bd,
- 0x044ea08d,
- 0x8d21f401,
- 0x01004ea0,
- 0xf402f7f0,
- 0x4ea08d21,
-/* 0x041e: init_gpc_wait */
- 0x21f40800,
- 0x1fffc868,
- 0xa0fa0bf4,
- 0xf408044e,
- 0x1fbb6821,
- 0x0040b700,
- 0x0132b680,
- 0xf1be1bf4,
+ 0x07f10023,
+ 0x03f00400,
+ 0x0002d000,
+ 0x27f004bd,
+ 0x0007f104,
+ 0x0003f003,
+ 0xbd0002d0,
+ 0x1031f404,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xfeb96821,
+ 0x90f1c702,
+ 0xf0030180,
+ 0x0f801ff4,
+ 0x0117f002,
+ 0xb6041fbb,
+ 0x07f10112,
+ 0x03f00300,
+ 0x0001d001,
+ 0x07f104bd,
+ 0x03f00400,
+ 0x0001d001,
+ 0x17f104bd,
+ 0xf7f00100,
+ 0x7f21f502,
+ 0x9121f507,
+ 0x10f7f007,
+ 0x07de21f5,
+ 0x98000e98,
+ 0x21f5010f,
+ 0x14950150,
+ 0x0007f108,
+ 0x0103f0c0,
+ 0xbd0004d0,
+ 0x0007f104,
+ 0x0103f0c1,
+ 0xbd0004d0,
+ 0x0030b704,
+ 0x001fbb13,
+ 0xf102f5b6,
+ 0xf0d30007,
+ 0x0fd00103,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0xf5021fb9,
+ 0xbb02d321,
+ 0x0398001f,
+ 0x0047f102,
+ 0x5043f020,
+/* 0x04f4: init_gpc */
+ 0x08044ea0,
+ 0xf4021fb9,
+ 0x4ea09d21,
+ 0xf4bd010c,
+ 0xa09d21f4,
+ 0xf401044e,
+ 0x4ea09d21,
+ 0xf7f00100,
+ 0x9d21f402,
+ 0x08004ea0,
+/* 0x051c: init_gpc_wait */
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x00f7f0be,
+ 0x07de21f5,
+ 0xf500f7f0,
+ 0xf1077f21,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -519,382 +584,379 @@ uint32_t nve0_grhub_code[] = {
0x080007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
0xf40031f4,
0xd7f00028,
0x3921f410,
0xb1f401f4,
0xf54001e4,
- 0xbd00de1b,
+ 0xbd00e91b,
0x0499f094,
0x0f0007f1,
0xd00203f0,
0x04bd0009,
- 0x0b0017f1,
- 0xcf0614b6,
- 0x11cf4012,
- 0x1f13c800,
- 0x00870bf5,
- 0xf41f23c8,
- 0x20f9620b,
- 0xbd0212b9,
- 0x0799f094,
- 0x0f0007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0xf40132f4,
- 0x21f50231,
- 0x94bd0801,
+ 0xc00017f1,
+ 0xcf0213f0,
+ 0x27f10011,
+ 0x23f0c100,
+ 0x0022cf02,
+ 0xf51f13c8,
+ 0xc800890b,
+ 0x0bf41f23,
+ 0xb920f962,
+ 0x94bd0212,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
- 0xfc04bd00,
- 0xf094bd20,
- 0x07f10699,
- 0x03f00f00,
- 0x0009d002,
- 0x31f404bd,
- 0x0121f501,
- 0xf094bd08,
- 0x07f10699,
+ 0xf404bd00,
+ 0x31f40132,
+ 0xaa21f502,
+ 0xf094bd09,
+ 0x07f10799,
0x03f01700,
0x0009d002,
- 0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
- 0xb920f931,
- 0x32f40212,
- 0x0232f401,
- 0x080121f5,
- 0x17f120fc,
- 0x14b60b00,
- 0x0012d006,
-/* 0x0517: chsw_no_prev */
- 0xc8130ef4,
- 0x0bf41f23,
- 0x0131f40d,
- 0xf50232f4,
-/* 0x0527: chsw_done */
- 0xf1080121,
- 0xb60b0c17,
- 0x27f00614,
- 0x0012d001,
+ 0x20fc04bd,
0x99f094bd,
- 0x0007f104,
+ 0x0007f106,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0131f404,
+ 0x09aa21f5,
+ 0x99f094bd,
+ 0x0007f106,
0x0203f017,
0xbd0009d0,
- 0x130ef504,
-/* 0x0549: main_not_ctx_switch */
- 0x01e4b0ff,
- 0xb90d1bf4,
- 0x21f502f2,
- 0x0ef40795,
-/* 0x0559: main_not_ctx_chan */
- 0x02e4b046,
- 0xbd321bf4,
- 0x0799f094,
- 0x0f0007f1,
+ 0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+ 0x12b920f9,
+ 0x0132f402,
+ 0xf50232f4,
+ 0xfc09aa21,
+ 0x0007f120,
+ 0x0203f0c0,
+ 0xbd0002d0,
+ 0x130ef404,
+/* 0x062c: chsw_no_prev */
+ 0xf41f23c8,
+ 0x31f40d0b,
+ 0x0232f401,
+ 0x09aa21f5,
+/* 0x063c: chsw_done */
+ 0xf10127f0,
+ 0xf0c30007,
+ 0x02d00203,
+ 0xbd04bd00,
+ 0x0499f094,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xf40132f4,
- 0x21f50232,
- 0x94bd0801,
+ 0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b90d1b,
+ 0x4221f502,
+ 0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+ 0xf402e4b0,
+ 0x94bd321b,
0xf10799f0,
- 0xf0170007,
+ 0xf00f0007,
0x09d00203,
0xf404bd00,
-/* 0x058e: main_not_ctx_save */
- 0xef94110e,
- 0x01f5f010,
- 0x02fe21f5,
- 0xfec00ef5,
-/* 0x059c: main_done */
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f008,
- 0xbd0002d0,
- 0xab0ef504,
-/* 0x05b1: ih */
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x0acf04bd,
- 0x04abc480,
- 0xf11d0bf4,
- 0xf01900b7,
- 0xbecf10d7,
- 0x00bfcf40,
+ 0x32f40132,
+ 0xaa21f502,
+ 0xf094bd09,
+ 0x07f10799,
+ 0x03f01700,
+ 0x0009d002,
+ 0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+ 0x10ef9411,
+ 0xf501f5f0,
+ 0xf5037e21,
+/* 0x06b3: main_done */
+ 0xbdfeb50e,
+ 0x1f29f024,
+ 0x080007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xfea00ef5,
+/* 0x06c8: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x10d7f030,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
0xb70421f4,
0xf00400b0,
- 0xbed001e7,
-/* 0x05e9: ih_no_fifo */
- 0x00abe400,
- 0x0d0bf401,
- 0xf110d7f0,
- 0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
- 0xb7f10421,
- 0xb0bd0104,
- 0xf4b4abff,
- 0xa7f10d0b,
- 0xa4b60c1c,
- 0x00abd006,
-/* 0x0610: ih_no_other */
- 0xfc400ad0,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x071a: ih_no_fifo */
+ 0xabe404bd,
+ 0x0bf40100,
+ 0x10d7f00d,
+ 0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+ 0xe40421f4,
+ 0xf40400ab,
+ 0xb7f1140b,
+ 0xbfb90100,
+ 0x44e7f102,
+ 0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+ 0xf19d21f4,
+ 0xbd0104b7,
+ 0xb4abffb0,
+ 0xf10f0bf4,
+ 0xf0070007,
+ 0x0bd00303,
+/* 0x075b: ih_no_other */
+ 0xf104bd00,
+ 0xf0010007,
+ 0x0ad00003,
+ 0xfc04bd00,
0xfce0fcf0,
0xfcb0fcd0,
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x062b: ctx_4170s */
- 0xf101f800,
- 0xf04170e7,
- 0xf5f040e3,
- 0x8d21f410,
-/* 0x063a: ctx_4170w */
+/* 0x077f: ctx_4170s */
+ 0xf001f800,
+ 0xffb910f5,
+ 0x70e7f102,
+ 0x40e3f041,
+ 0xf89d21f4,
+/* 0x0791: ctx_4170w */
+ 0x70e7f100,
+ 0x40e3f041,
+ 0xb96821f4,
+ 0xf4f002ff,
+ 0xf01bf410,
+/* 0x07a6: ctx_redswitch */
0xe7f100f8,
- 0xe3f04170,
- 0x6821f440,
- 0xf410f4f0,
+ 0xe5f00200,
+ 0x20e5f040,
+ 0xf110e5f0,
+ 0xf0850007,
+ 0x0ed00103,
+ 0xf004bd00,
+/* 0x07c2: ctx_redswitch_delay */
+ 0xf2b608f7,
+ 0xfd1bf401,
+ 0x0400e5f1,
+ 0x0100e5f1,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+/* 0x07de: ctx_86c */
+ 0x07f100f8,
+ 0x03f01b00,
+ 0x000fd002,
+ 0xffb904bd,
+ 0x14e7f102,
+ 0x40e3f08a,
+ 0xb99d21f4,
+ 0xe7f102ff,
+ 0xe3f0a86c,
+ 0x9d21f441,
+/* 0x0806: ctx_mem */
+ 0x07f100f8,
+ 0x03f08400,
+ 0x000fd002,
+/* 0x0812: ctx_mem_wait */
+ 0xf7f104bd,
+ 0xf3f08400,
+ 0x00ffcf02,
+ 0xf405fffd,
0x00f8f31b,
-/* 0x064c: ctx_redswitch */
- 0x0614e7f1,
- 0xf106e4b6,
- 0xd00270f7,
- 0xf7f000ef,
-/* 0x065d: ctx_redswitch_delay */
- 0x01f2b608,
- 0xf1fd1bf4,
- 0xd00770f7,
- 0x00f800ef,
-/* 0x066c: ctx_86c */
- 0x086ce7f1,
- 0xd006e4b6,
- 0xe7f100ef,
- 0xe3f08a14,
- 0x8d21f440,
- 0xa86ce7f1,
- 0xf441e3f0,
- 0x00f88d21,
-/* 0x068c: ctx_load */
+/* 0x0824: ctx_load */
0x99f094bd,
0x0007f105,
0x0203f00f,
0xbd0009d0,
0x0ca7f004,
- 0xf1c921f4,
- 0xb60a2417,
- 0x10d00614,
- 0x0037f100,
- 0x0634b60b,
- 0xf14032d0,
- 0xb60a0c17,
- 0x47f00614,
- 0x0012d007,
-/* 0x06c7: ctx_chan_wait_0 */
- 0xcf4014d0,
- 0x44f04014,
- 0xfa1bf41f,
- 0xfe0032d0,
- 0x2af0000b,
- 0x0424b61f,
- 0xbd0220b6,
+ 0xbdd021f4,
+ 0x0007f1f4,
+ 0x0203f089,
+ 0xbd000fd0,
+ 0x0007f104,
+ 0x0203f0c1,
+ 0xbd0002d0,
+ 0x0007f104,
+ 0x0203f083,
+ 0xbd0002d0,
+ 0x07f7f004,
+ 0x080621f5,
+ 0xc00007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf0000bfe,
+ 0x24b61f2a,
+ 0x0220b604,
+ 0x99f094bd,
+ 0x0007f108,
+ 0x0203f00f,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f081,
+ 0xbd0002d0,
+ 0x0027f104,
+ 0x0023f100,
+ 0x0225f080,
+ 0x880007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf11017f0,
+ 0xf0020027,
+ 0x12fa0223,
+ 0xbd03f805,
0x0899f094,
- 0x0f0007f1,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0x0a0417f1,
- 0xd00614b6,
- 0x17f10012,
- 0x14b60a20,
- 0x0227f006,
- 0x800023f1,
- 0xf00012d0,
- 0x27f11017,
- 0x23f00200,
- 0x0512fa02,
- 0x94bd03f8,
- 0xf10899f0,
- 0xf0170007,
+ 0xb6810198,
+ 0x02981814,
+ 0x0825b680,
+ 0x800512fd,
+ 0x94bd1601,
+ 0xf10999f0,
+ 0xf00f0007,
0x09d00203,
- 0x9804bd00,
- 0x14b68101,
- 0x80029818,
- 0xfd0825b6,
- 0x01800512,
- 0xf094bd16,
- 0x07f10999,
- 0x03f00f00,
- 0x0009d002,
- 0x27f104bd,
- 0x24b60a04,
- 0x0021d006,
- 0xf10127f0,
- 0xb60a2017,
- 0x12d00614,
- 0x0017f100,
- 0x0613f001,
- 0xf80501fa,
- 0xf094bd03,
- 0x07f10999,
- 0x03f01700,
- 0x0009d002,
- 0x94bd04bd,
- 0xf10599f0,
+ 0xf104bd00,
+ 0xf0810007,
+ 0x01d00203,
+ 0xf004bd00,
+ 0x07f10127,
+ 0x03f08800,
+ 0x0002d002,
+ 0x17f104bd,
+ 0x13f00100,
+ 0x0501fa06,
+ 0x94bd03f8,
+ 0xf10999f0,
0xf0170007,
0x09d00203,
- 0xf804bd00,
-/* 0x0795: ctx_chan */
- 0x8c21f500,
- 0x0ca7f006,
- 0xf1c921f4,
- 0xb60a1017,
- 0x27f00614,
- 0x0012d005,
-/* 0x07ac: ctx_chan_wait */
- 0xfd0012cf,
- 0x1bf40522,
-/* 0x07b7: ctx_mmio_exec */
- 0x9800f8fa,
- 0x27f14103,
- 0x24b60a04,
- 0x0023d006,
-/* 0x07c6: ctx_mmio_loop */
+ 0xbd04bd00,
+ 0x0599f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x0942: ctx_chan */
+ 0x21f500f8,
+ 0xa7f00824,
+ 0xd021f40c,
+ 0xf505f7f0,
+ 0xf8080621,
+/* 0x0955: ctx_mmio_exec */
+ 0x41039800,
+ 0x810007f1,
+ 0xd00203f0,
+ 0x04bd0003,
+/* 0x0966: ctx_mmio_loop */
0x34c434bd,
0x0f1bf4ff,
0x020057f1,
0xfa0653f0,
0x03f80535,
-/* 0x07d8: ctx_mmio_pull */
+/* 0x0978: ctx_mmio_pull */
0x98804e98,
0x21f4814f,
- 0x0830b68d,
+ 0x0830b69d,
0xf40112b6,
-/* 0x07ea: ctx_mmio_done */
+/* 0x098a: ctx_mmio_done */
0x0398df1b,
- 0x0023d016,
- 0xf1400080,
- 0xf0010017,
- 0x01fa0613,
- 0xf803f806,
-/* 0x0801: ctx_xfer */
- 0x00f7f100,
- 0x06f4b60c,
- 0xd004e7f0,
-/* 0x080e: ctx_xfer_idle */
- 0xfecf80fe,
- 0x00e4f100,
- 0xf91bf420,
- 0xf40611f4,
-/* 0x081e: ctx_xfer_pre */
- 0xf7f00d02,
- 0x6c21f510,
- 0x1c11f406,
-/* 0x0828: ctx_xfer_pre_load */
- 0xf502f7f0,
- 0xf5062b21,
- 0xf5063a21,
- 0xbd064c21,
- 0x2b21f5f4,
- 0x8c21f506,
-/* 0x0841: ctx_xfer_exec */
- 0x16019806,
- 0x041427f1,
- 0xd00624b6,
- 0xe7f10020,
- 0xe3f0a500,
- 0x021fb941,
- 0xb68d21f4,
- 0xfcf004e0,
- 0x022cf001,
- 0xfd0124b6,
- 0x21f405f2,
- 0xfc17f18d,
- 0x0213f04a,
- 0xd00c27f0,
- 0x21f50012,
- 0x27f10215,
- 0x23f047fc,
- 0x0020d002,
+ 0x0007f116,
+ 0x0203f081,
+ 0xbd0003d0,
+ 0x40008004,
+ 0x010017f1,
+ 0xfa0613f0,
+ 0x03f80601,
+/* 0x09aa: ctx_xfer */
+ 0xe7f000f8,
+ 0x0007f104,
+ 0x0303f002,
+ 0xbd000ed0,
+/* 0x09b9: ctx_xfer_idle */
+ 0x00e7f104,
+ 0x03e3f000,
+ 0xf100eecf,
+ 0xf42000e4,
+ 0x11f4f21b,
+ 0x0d02f406,
+/* 0x09d0: ctx_xfer_pre */
+ 0xf510f7f0,
+ 0xf407de21,
+/* 0x09da: ctx_xfer_pre_load */
+ 0xf7f01c11,
+ 0x7f21f502,
+ 0x9121f507,
+ 0xa621f507,
+ 0xf5f4bd07,
+ 0xf5077f21,
+/* 0x09f3: ctx_xfer_exec */
+ 0x98082421,
+ 0x24bd1601,
+ 0x050007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1021fb9,
+ 0xf0a500e7,
+ 0x21f441e3,
+ 0x01fcf09d,
+ 0xb6022cf0,
+ 0xf2fd0124,
+ 0x02ffb905,
+ 0xa504e7f1,
+ 0xf441e3f0,
+ 0x21f59d21,
+ 0x24bd026a,
+ 0x47fc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
0xb6012cf0,
- 0x12d00320,
- 0x01acf000,
- 0xf006a5f0,
- 0x0c9800b7,
- 0x010d9800,
- 0xf500e7f0,
- 0xf0016621,
- 0x21f508a7,
- 0x21f50109,
- 0x01f40215,
- 0x0ca7f022,
- 0xf1c921f4,
- 0xb60a1017,
- 0x27f00614,
- 0x0012d005,
-/* 0x08c8: ctx_xfer_post_save_wait */
- 0xfd0012cf,
- 0x1bf40522,
- 0x2e02f4fa,
-/* 0x08d4: ctx_xfer_post */
- 0xf502f7f0,
- 0xbd062b21,
- 0x6c21f5f4,
- 0x3421f506,
- 0x3a21f502,
- 0xf5f4bd06,
- 0xf4062b21,
- 0x01981011,
- 0x0511fd40,
- 0xf5070bf4,
-/* 0x08ff: ctx_xfer_no_post_mmio */
-/* 0x08ff: ctx_xfer_done */
- 0xf807b721,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x07f10320,
+ 0x03f04afc,
+ 0x0002d002,
+ 0xacf004bd,
+ 0x06a5f001,
+ 0x9800b7f0,
+ 0x0d98000c,
+ 0x00e7f001,
+ 0x016f21f5,
+ 0xf508a7f0,
+ 0xf5011021,
+ 0xf4025e21,
+ 0xa7f01301,
+ 0xd021f40c,
+ 0xf505f7f0,
+ 0xf4080621,
+/* 0x0a82: ctx_xfer_post */
+ 0xf7f02e02,
+ 0x7f21f502,
+ 0xf5f4bd07,
+ 0xf507de21,
+ 0xf5027f21,
+ 0xbd079121,
+ 0x7f21f5f4,
+ 0x1011f407,
+ 0xfd400198,
+ 0x0bf40511,
+ 0x5521f507,
+/* 0x0aad: ctx_xfer_no_post_mmio */
+/* 0x0aad: ctx_xfer_done */
+ 0x0000f809,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
index 438506d14749..229c0ae37228 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
@@ -206,14 +206,14 @@ uint32_t nvf0_grhub_data[] = {
};
uint32_t nvf0_grhub_code[] = {
- 0x031b0ef5,
+ 0x039b0ef5,
/* 0x0004: queue_put */
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
- 0x00f802fe,
+ 0x00f8037e,
/* 0x001c: queue_put_next */
0xb60798c4,
0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nvf0_grhub_code[] = {
/* 0x0066: queue_get_done */
0x00f80132,
/* 0x0068: nv_rd32 */
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010921f5,
- 0xf840bfcf,
-/* 0x008d: nv_wr32 */
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
-/* 0x00bd: watchdog_clear */
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
-/* 0x00c9: wait_donez */
- 0xf094bd00,
- 0x07f10099,
- 0x03f03700,
- 0x0009d002,
- 0x07f104bd,
- 0x03f00600,
- 0x000ad002,
-/* 0x00e6: wait_donez_ne */
- 0x87f104bd,
- 0x83f00000,
- 0x0088cf01,
- 0xf4888aff,
- 0x94bdf31b,
- 0xf10099f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0109: wait_doneo */
- 0xf094bd00,
+ 0xf002ecb9,
+ 0x07f11fc9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x007a: nv_rd32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0xa7f0f31b,
+ 0x1021f506,
+ 0x00f7f101,
+ 0x01f3f0cb,
+ 0xf800ffcf,
+/* 0x009d: nv_wr32 */
+ 0x0007f100,
+ 0x0103f0cc,
+ 0xbd000fd0,
+ 0x02ecb904,
+ 0xf01fc9f0,
+ 0x07f11ec9,
+ 0x03f0ca00,
+ 0x000cd001,
+/* 0x00be: nv_wr32_wait */
+ 0xc7f104bd,
+ 0xc3f0ca00,
+ 0x00cccf01,
+ 0xf41fccc8,
+ 0x00f8f31b,
+/* 0x00d0: wait_donez */
+ 0x99f094bd,
+ 0x0007f100,
+ 0x0203f037,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x1bf4888a,
+ 0xf094bdf3,
0x07f10099,
- 0x03f03700,
+ 0x03f01700,
0x0009d002,
- 0x87f104bd,
- 0x84b60818,
- 0x008ad006,
-/* 0x0124: wait_doneo_e */
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
+ 0x00f804bd,
+/* 0x0110: wait_doneo */
0x99f094bd,
0x0007f100,
- 0x0203f017,
+ 0x0203f037,
0xbd0009d0,
-/* 0x0147: mmctx_size */
- 0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
- 0x00e89894,
- 0xb61a85b6,
- 0x84b60180,
- 0x0098bb02,
- 0xb804e0b6,
- 0x1bf404ef,
- 0x029fb9eb,
-/* 0x0166: mmctx_xfer */
- 0x94bd00f8,
- 0xf10199f0,
- 0xf0370007,
- 0x09d00203,
- 0xf104bd00,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
-/* 0x018c: mmctx_base_disabled */
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
- 0xb70199f0,
- 0xc8010080,
+ 0x0007f104,
+ 0x0203f006,
+ 0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+ 0x0087f104,
+ 0x0183f000,
+ 0xff0088cf,
+ 0x0bf4888a,
+ 0xf094bdf3,
+ 0x07f10099,
+ 0x03f01700,
+ 0x0009d002,
+ 0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+ 0xe89894bd,
+ 0x1a85b600,
+ 0xb60180b6,
+ 0x98bb0284,
+ 0x04e0b600,
+ 0xf404efb8,
+ 0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+ 0xbd00f802,
+ 0x0199f094,
+ 0x370007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0xbbfd94bd,
+ 0x120bf405,
+ 0xc40007f1,
+ 0xd00103f0,
+ 0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+ 0xfd0099f0,
+ 0x0bf405ee,
+ 0x0007f11e,
+ 0x0103f0c6,
+ 0xbd000ed0,
+ 0x0007f104,
+ 0x0103f0c7,
+ 0xbd000fd0,
+ 0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+ 0xb600abc8,
+ 0xb9f010b4,
+ 0x01aec80c,
+ 0xfd11e4b6,
+ 0x07f105be,
+ 0x03f0c500,
+ 0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+ 0xe7f104bd,
+ 0xe3f0c500,
+ 0x00eecf01,
+ 0xf41fe4f0,
+ 0xce98f30b,
+ 0x05e9fd00,
+ 0xc80007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+ 0xb804c0b6,
+ 0x1bf404cd,
+ 0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+ 0xf11f1bf4,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x1fb4f000,
+ 0xf410b4b0,
+ 0xa7f0f01b,
+ 0xd021f402,
+/* 0x0223: mmctx_stop */
+ 0xc82b0ef4,
0xb4b600ab,
0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
-/* 0x01ea: mmctx_stop */
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
- 0x008bcf00,
- 0xf412bbc8,
-/* 0x0202: mmctx_done */
- 0x94bdfa1b,
- 0xf10199f0,
- 0xf0170007,
- 0x09d00203,
- 0xf804bd00,
-/* 0x0215: strand_wait */
- 0xf0a0f900,
- 0x21f402a7,
- 0xf8a0fcc9,
-/* 0x0221: strand_pre */
- 0xfc87f100,
- 0x0283f04a,
- 0xd00c97f0,
- 0x21f50089,
- 0x00f80215,
-/* 0x0234: strand_post */
- 0x4afc87f1,
- 0xf00283f0,
- 0x89d00d97,
- 0x1521f500,
-/* 0x0247: strand_set */
- 0xf100f802,
- 0xf04ffca7,
- 0xaba202a3,
- 0xc7f00500,
- 0x00acd00f,
- 0xd00bc7f0,
- 0x21f500bc,
- 0xaed00215,
- 0x0ac7f000,
- 0xf500bcd0,
- 0xf8021521,
-/* 0x0271: strand_ctx_init */
- 0xf094bd00,
- 0x07f10399,
- 0x03f03700,
+ 0xf112b9f0,
+ 0xf0c50007,
+ 0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+ 0xf104bd00,
+ 0xf0c500b7,
+ 0xbbcf01b3,
+ 0x12bbc800,
+/* 0x024b: mmctx_done */
+ 0xbdf31bf4,
+ 0x0199f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x025e: strand_wait */
+ 0xa0f900f8,
+ 0xf402a7f0,
+ 0xa0fcd021,
+/* 0x026a: strand_pre */
+ 0x97f000f8,
+ 0xfc07f10c,
+ 0x0203f04a,
+ 0xbd0009d0,
+ 0x5e21f504,
+/* 0x027f: strand_post */
+ 0xf000f802,
+ 0x07f10d97,
+ 0x03f04afc,
0x0009d002,
0x21f504bd,
- 0xe7f00221,
- 0x4721f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x1521f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x1521f500,
- 0x3421f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+ 0x00f8025e,
+/* 0x0294: strand_set */
+ 0xf10fc7f0,
+ 0xf04ffc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f10bc7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x07f104bd,
+ 0x03f04ffc,
+ 0x000ed002,
+ 0xc7f004bd,
+ 0xfc07f10a,
+ 0x0203f04a,
+ 0xbd000cd0,
+ 0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+ 0xbd00f802,
+ 0x0399f094,
+ 0x370007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+ 0x026a21f5,
+ 0xf503e7f0,
+ 0xbd029421,
+ 0xfc07f1c4,
+ 0x0203f047,
+ 0xbd000cd0,
+ 0x01c7f004,
+ 0x4afc07f1,
+ 0xd00203f0,
+ 0x04bd000c,
+ 0x025e21f5,
+ 0xf1010c92,
+ 0xf046fc07,
+ 0x0cd00203,
+ 0xf004bd00,
+ 0x07f102c7,
+ 0x03f04afc,
+ 0x000cd002,
+ 0x21f504bd,
+ 0x21f5025e,
+ 0x87f1027f,
+ 0x83f04200,
+ 0x0097f102,
+ 0x0293f020,
+ 0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
0x8ed008fe,
0x408ed000,
0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nvf0_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
0x07f100f8,
0x03f00500,
0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nvf0_grhub_code[] = {
0x0007f101,
0x0303f007,
0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
0xbd00f804,
- 0x0004fe04,
- 0xf10007fe,
- 0xf0120017,
- 0x12d00227,
- 0xb117f100,
- 0x0010fe05,
- 0x040017f1,
- 0xf1c010d0,
- 0xb6040437,
- 0x27f10634,
- 0x32d02003,
- 0x0427f100,
- 0x0132d020,
+ 0x0007fe04,
+ 0x420017f1,
+ 0xcf0013f0,
+ 0x11e70011,
+ 0x14b60109,
+ 0x0014fe08,
+ 0xf10227f0,
+ 0xf0120007,
+ 0x02d00003,
+ 0xf104bd00,
+ 0xfe06c817,
+ 0x24bd0010,
+ 0x070007f1,
+ 0xd00003f0,
+ 0x04bd0002,
+ 0x200327f1,
+ 0x010007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200427f1,
+ 0x010407f1,
+ 0xd00103f0,
+ 0x04bd0002,
0x200b27f1,
- 0xf10232d0,
- 0xd0200c27,
- 0x27f10732,
- 0x24b60c24,
- 0x0003b906,
- 0xf10023d0,
+ 0x010807f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0x200c27f1,
+ 0x011c07f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1010392,
+ 0xf0090007,
+ 0x03d00303,
+ 0xf104bd00,
0xf0870427,
- 0x12d00023,
- 0x0012b700,
- 0x0427f001,
- 0xf40012d0,
- 0xe7f11031,
- 0xe3f09604,
- 0x6821f440,
- 0x8090f1c7,
- 0xf4f00301,
- 0x020f801f,
- 0xbb0117f0,
- 0x12b6041f,
- 0x0c27f101,
- 0x0624b604,
- 0xd00021d0,
- 0x17f14021,
- 0x0e980100,
- 0x010f9800,
- 0x014721f5,
- 0x070037f1,
- 0x950634b6,
- 0x34d00814,
- 0x4034d000,
- 0x130030b7,
- 0xb6001fbb,
- 0x3fd002f5,
- 0x0815b600,
- 0xb60110b6,
- 0x1fb90814,
- 0x7121f502,
- 0x001fbb02,
- 0xf1020398,
- 0xf0200047,
-/* 0x03f6: init_gpc */
- 0x4ea05043,
- 0x1fb90804,
- 0x8d21f402,
- 0x010c4ea0,
- 0x21f4f4bd,
- 0x044ea08d,
- 0x8d21f401,
- 0x01004ea0,
- 0xf402f7f0,
- 0x4ea08d21,
-/* 0x041e: init_gpc_wait */
- 0x21f40800,
- 0x1fffc868,
- 0xa0fa0bf4,
- 0xf408044e,
- 0x1fbb6821,
- 0x0040b700,
- 0x0132b680,
- 0xf1be1bf4,
+ 0x07f10023,
+ 0x03f00400,
+ 0x0002d000,
+ 0x27f004bd,
+ 0x0007f104,
+ 0x0003f003,
+ 0xbd0002d0,
+ 0x1031f404,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xfeb96821,
+ 0x90f1c702,
+ 0xf0030180,
+ 0x0f801ff4,
+ 0x0117f002,
+ 0xb6041fbb,
+ 0x07f10112,
+ 0x03f00300,
+ 0x0001d001,
+ 0x07f104bd,
+ 0x03f00400,
+ 0x0001d001,
+ 0x17f104bd,
+ 0xf7f00100,
+ 0x7f21f502,
+ 0x9121f507,
+ 0x10f7f007,
+ 0x07de21f5,
+ 0x98000e98,
+ 0x21f5010f,
+ 0x14950150,
+ 0x0007f108,
+ 0x0103f0c0,
+ 0xbd0004d0,
+ 0x0007f104,
+ 0x0103f0c1,
+ 0xbd0004d0,
+ 0x0030b704,
+ 0x001fbb13,
+ 0xf102f5b6,
+ 0xf0d30007,
+ 0x0fd00103,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0xf5021fb9,
+ 0xbb02d321,
+ 0x0398001f,
+ 0x0047f102,
+ 0x5043f020,
+/* 0x04f4: init_gpc */
+ 0x08044ea0,
+ 0xf4021fb9,
+ 0x4ea09d21,
+ 0xf4bd010c,
+ 0xa09d21f4,
+ 0xf401044e,
+ 0x4ea09d21,
+ 0xf7f00100,
+ 0x9d21f402,
+ 0x08004ea0,
+/* 0x051c: init_gpc_wait */
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x00f7f0be,
+ 0x07de21f5,
+ 0xf500f7f0,
+ 0xf1077f21,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -519,382 +584,379 @@ uint32_t nvf0_grhub_code[] = {
0x300007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
0xf40031f4,
0xd7f00028,
0x3921f410,
0xb1f401f4,
0xf54001e4,
- 0xbd00de1b,
+ 0xbd00e91b,
0x0499f094,
0x370007f1,
0xd00203f0,
0x04bd0009,
- 0x0b0017f1,
- 0xcf0614b6,
- 0x11cf4012,
- 0x1f13c800,
- 0x00870bf5,
- 0xf41f23c8,
- 0x20f9620b,
- 0xbd0212b9,
- 0x0799f094,
- 0x370007f1,
- 0xd00203f0,
- 0x04bd0009,
- 0xf40132f4,
- 0x21f50231,
- 0x94bd0801,
+ 0xc00017f1,
+ 0xcf0213f0,
+ 0x27f10011,
+ 0x23f0c100,
+ 0x0022cf02,
+ 0xf51f13c8,
+ 0xc800890b,
+ 0x0bf41f23,
+ 0xb920f962,
+ 0x94bd0212,
0xf10799f0,
- 0xf0170007,
+ 0xf0370007,
0x09d00203,
- 0xfc04bd00,
- 0xf094bd20,
- 0x07f10699,
- 0x03f03700,
- 0x0009d002,
- 0x31f404bd,
- 0x0121f501,
- 0xf094bd08,
- 0x07f10699,
+ 0xf404bd00,
+ 0x31f40132,
+ 0xaa21f502,
+ 0xf094bd09,
+ 0x07f10799,
0x03f01700,
0x0009d002,
- 0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
- 0xb920f931,
- 0x32f40212,
- 0x0232f401,
- 0x080121f5,
- 0x17f120fc,
- 0x14b60b00,
- 0x0012d006,
-/* 0x0517: chsw_no_prev */
- 0xc8130ef4,
- 0x0bf41f23,
- 0x0131f40d,
- 0xf50232f4,
-/* 0x0527: chsw_done */
- 0xf1080121,
- 0xb60b0c17,
- 0x27f00614,
- 0x0012d001,
+ 0x20fc04bd,
0x99f094bd,
- 0x0007f104,
+ 0x0007f106,
+ 0x0203f037,
+ 0xbd0009d0,
+ 0x0131f404,
+ 0x09aa21f5,
+ 0x99f094bd,
+ 0x0007f106,
0x0203f017,
0xbd0009d0,
- 0x130ef504,
-/* 0x0549: main_not_ctx_switch */
- 0x01e4b0ff,
- 0xb90d1bf4,
- 0x21f502f2,
- 0x0ef40795,
-/* 0x0559: main_not_ctx_chan */
- 0x02e4b046,
- 0xbd321bf4,
- 0x0799f094,
- 0x370007f1,
+ 0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+ 0x12b920f9,
+ 0x0132f402,
+ 0xf50232f4,
+ 0xfc09aa21,
+ 0x0007f120,
+ 0x0203f0c0,
+ 0xbd0002d0,
+ 0x130ef404,
+/* 0x062c: chsw_no_prev */
+ 0xf41f23c8,
+ 0x31f40d0b,
+ 0x0232f401,
+ 0x09aa21f5,
+/* 0x063c: chsw_done */
+ 0xf10127f0,
+ 0xf0c30007,
+ 0x02d00203,
+ 0xbd04bd00,
+ 0x0499f094,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xf40132f4,
- 0x21f50232,
- 0x94bd0801,
+ 0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b90d1b,
+ 0x4221f502,
+ 0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+ 0xf402e4b0,
+ 0x94bd321b,
0xf10799f0,
- 0xf0170007,
+ 0xf0370007,
0x09d00203,
0xf404bd00,
-/* 0x058e: main_not_ctx_save */
- 0xef94110e,
- 0x01f5f010,
- 0x02fe21f5,
- 0xfec00ef5,
-/* 0x059c: main_done */
- 0x29f024bd,
- 0x0007f11f,
- 0x0203f030,
- 0xbd0002d0,
- 0xab0ef504,
-/* 0x05b1: ih */
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x0acf04bd,
- 0x04abc480,
- 0xf11d0bf4,
- 0xf01900b7,
- 0xbecf10d7,
- 0x00bfcf40,
+ 0x32f40132,
+ 0xaa21f502,
+ 0xf094bd09,
+ 0x07f10799,
+ 0x03f01700,
+ 0x0009d002,
+ 0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+ 0x10ef9411,
+ 0xf501f5f0,
+ 0xf5037e21,
+/* 0x06b3: main_done */
+ 0xbdfeb50e,
+ 0x1f29f024,
+ 0x300007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xfea00ef5,
+/* 0x06c8: ih */
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x00a7f104,
+ 0x00a3f002,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x10d7f030,
+ 0x1a00e7f1,
+ 0xcf00e3f0,
+ 0xf7f100ee,
+ 0xf3f01900,
+ 0x00ffcf00,
0xb70421f4,
0xf00400b0,
- 0xbed001e7,
-/* 0x05e9: ih_no_fifo */
- 0x00abe400,
- 0x0d0bf401,
- 0xf110d7f0,
- 0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
- 0xb7f10421,
- 0xb0bd0104,
- 0xf4b4abff,
- 0xa7f10d0b,
- 0xa4b60c1c,
- 0x00abd006,
-/* 0x0610: ih_no_other */
- 0xfc400ad0,
+ 0x07f101e7,
+ 0x03f01d00,
+ 0x000ed000,
+/* 0x071a: ih_no_fifo */
+ 0xabe404bd,
+ 0x0bf40100,
+ 0x10d7f00d,
+ 0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+ 0xe40421f4,
+ 0xf40400ab,
+ 0xb7f1140b,
+ 0xbfb90100,
+ 0x44e7f102,
+ 0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+ 0xf19d21f4,
+ 0xbd0104b7,
+ 0xb4abffb0,
+ 0xf10f0bf4,
+ 0xf0070007,
+ 0x0bd00303,
+/* 0x075b: ih_no_other */
+ 0xf104bd00,
+ 0xf0010007,
+ 0x0ad00003,
+ 0xfc04bd00,
0xfce0fcf0,
0xfcb0fcd0,
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x062b: ctx_4170s */
- 0xf101f800,
- 0xf04170e7,
- 0xf5f040e3,
- 0x8d21f410,
-/* 0x063a: ctx_4170w */
+/* 0x077f: ctx_4170s */
+ 0xf001f800,
+ 0xffb910f5,
+ 0x70e7f102,
+ 0x40e3f041,
+ 0xf89d21f4,
+/* 0x0791: ctx_4170w */
+ 0x70e7f100,
+ 0x40e3f041,
+ 0xb96821f4,
+ 0xf4f002ff,
+ 0xf01bf410,
+/* 0x07a6: ctx_redswitch */
0xe7f100f8,
- 0xe3f04170,
- 0x6821f440,
- 0xf410f4f0,
+ 0xe5f00200,
+ 0x20e5f040,
+ 0xf110e5f0,
+ 0xf0850007,
+ 0x0ed00103,
+ 0xf004bd00,
+/* 0x07c2: ctx_redswitch_delay */
+ 0xf2b608f7,
+ 0xfd1bf401,
+ 0x0400e5f1,
+ 0x0100e5f1,
+ 0x850007f1,
+ 0xd00103f0,
+ 0x04bd000e,
+/* 0x07de: ctx_86c */
+ 0x07f100f8,
+ 0x03f02300,
+ 0x000fd002,
+ 0xffb904bd,
+ 0x14e7f102,
+ 0x40e3f08a,
+ 0xb99d21f4,
+ 0xe7f102ff,
+ 0xe3f0a88c,
+ 0x9d21f441,
+/* 0x0806: ctx_mem */
+ 0x07f100f8,
+ 0x03f08400,
+ 0x000fd002,
+/* 0x0812: ctx_mem_wait */
+ 0xf7f104bd,
+ 0xf3f08400,
+ 0x00ffcf02,
+ 0xf405fffd,
0x00f8f31b,
-/* 0x064c: ctx_redswitch */
- 0x0614e7f1,
- 0xf106e4b6,
- 0xd00270f7,
- 0xf7f000ef,
-/* 0x065d: ctx_redswitch_delay */
- 0x01f2b608,
- 0xf1fd1bf4,
- 0xd00770f7,
- 0x00f800ef,
-/* 0x066c: ctx_86c */
- 0x086ce7f1,
- 0xd006e4b6,
- 0xe7f100ef,
- 0xe3f08a14,
- 0x8d21f440,
- 0xa86ce7f1,
- 0xf441e3f0,
- 0x00f88d21,
-/* 0x068c: ctx_load */
+/* 0x0824: ctx_load */
0x99f094bd,
0x0007f105,
0x0203f037,
0xbd0009d0,
0x0ca7f004,
- 0xf1c921f4,
- 0xb60a2417,
- 0x10d00614,
- 0x0037f100,
- 0x0634b60b,
- 0xf14032d0,
- 0xb60a0c17,
- 0x47f00614,
- 0x0012d007,
-/* 0x06c7: ctx_chan_wait_0 */
- 0xcf4014d0,
- 0x44f04014,
- 0xfa1bf41f,
- 0xfe0032d0,
- 0x2af0000b,
- 0x0424b61f,
- 0xbd0220b6,
+ 0xbdd021f4,
+ 0x0007f1f4,
+ 0x0203f089,
+ 0xbd000fd0,
+ 0x0007f104,
+ 0x0203f0c1,
+ 0xbd0002d0,
+ 0x0007f104,
+ 0x0203f083,
+ 0xbd0002d0,
+ 0x07f7f004,
+ 0x080621f5,
+ 0xc00007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf0000bfe,
+ 0x24b61f2a,
+ 0x0220b604,
+ 0x99f094bd,
+ 0x0007f108,
+ 0x0203f037,
+ 0xbd0009d0,
+ 0x0007f104,
+ 0x0203f081,
+ 0xbd0002d0,
+ 0x0027f104,
+ 0x0023f100,
+ 0x0225f080,
+ 0x880007f1,
+ 0xd00203f0,
+ 0x04bd0002,
+ 0xf11017f0,
+ 0xf0020027,
+ 0x12fa0223,
+ 0xbd03f805,
0x0899f094,
- 0x370007f1,
+ 0x170007f1,
0xd00203f0,
0x04bd0009,
- 0x0a0417f1,
- 0xd00614b6,
- 0x17f10012,
- 0x14b60a20,
- 0x0227f006,
- 0x800023f1,
- 0xf00012d0,
- 0x27f11017,
- 0x23f00200,
- 0x0512fa02,
- 0x94bd03f8,
- 0xf10899f0,
- 0xf0170007,
+ 0xb6810198,
+ 0x02981814,
+ 0x0825b680,
+ 0x800512fd,
+ 0x94bd1601,
+ 0xf10999f0,
+ 0xf0370007,
0x09d00203,
- 0x9804bd00,
- 0x14b68101,
- 0x80029818,
- 0xfd0825b6,
- 0x01800512,
- 0xf094bd16,
- 0x07f10999,
- 0x03f03700,
- 0x0009d002,
- 0x27f104bd,
- 0x24b60a04,
- 0x0021d006,
- 0xf10127f0,
- 0xb60a2017,
- 0x12d00614,
- 0x0017f100,
- 0x0613f001,
- 0xf80501fa,
- 0xf094bd03,
- 0x07f10999,
- 0x03f01700,
- 0x0009d002,
- 0x94bd04bd,
- 0xf10599f0,
+ 0xf104bd00,
+ 0xf0810007,
+ 0x01d00203,
+ 0xf004bd00,
+ 0x07f10127,
+ 0x03f08800,
+ 0x0002d002,
+ 0x17f104bd,
+ 0x13f00100,
+ 0x0501fa06,
+ 0x94bd03f8,
+ 0xf10999f0,
0xf0170007,
0x09d00203,
- 0xf804bd00,
-/* 0x0795: ctx_chan */
- 0x8c21f500,
- 0x0ca7f006,
- 0xf1c921f4,
- 0xb60a1017,
- 0x27f00614,
- 0x0012d005,
-/* 0x07ac: ctx_chan_wait */
- 0xfd0012cf,
- 0x1bf40522,
-/* 0x07b7: ctx_mmio_exec */
- 0x9800f8fa,
- 0x27f14103,
- 0x24b60a04,
- 0x0023d006,
-/* 0x07c6: ctx_mmio_loop */
+ 0xbd04bd00,
+ 0x0599f094,
+ 0x170007f1,
+ 0xd00203f0,
+ 0x04bd0009,
+/* 0x0942: ctx_chan */
+ 0x21f500f8,
+ 0xa7f00824,
+ 0xd021f40c,
+ 0xf505f7f0,
+ 0xf8080621,
+/* 0x0955: ctx_mmio_exec */
+ 0x41039800,
+ 0x810007f1,
+ 0xd00203f0,
+ 0x04bd0003,
+/* 0x0966: ctx_mmio_loop */
0x34c434bd,
0x0f1bf4ff,
0x020057f1,
0xfa0653f0,
0x03f80535,
-/* 0x07d8: ctx_mmio_pull */
+/* 0x0978: ctx_mmio_pull */
0x98804e98,
0x21f4814f,
- 0x0830b68d,
+ 0x0830b69d,
0xf40112b6,
-/* 0x07ea: ctx_mmio_done */
+/* 0x098a: ctx_mmio_done */
0x0398df1b,
- 0x0023d016,
- 0xf1400080,
- 0xf0010017,
- 0x01fa0613,
- 0xf803f806,
-/* 0x0801: ctx_xfer */
- 0x00f7f100,
- 0x06f4b60c,
- 0xd004e7f0,
-/* 0x080e: ctx_xfer_idle */
- 0xfecf80fe,
- 0x00e4f100,
- 0xf91bf420,
- 0xf40611f4,
-/* 0x081e: ctx_xfer_pre */
- 0xf7f00d02,
- 0x6c21f510,
- 0x1c11f406,
-/* 0x0828: ctx_xfer_pre_load */
- 0xf502f7f0,
- 0xf5062b21,
- 0xf5063a21,
- 0xbd064c21,
- 0x2b21f5f4,
- 0x8c21f506,
-/* 0x0841: ctx_xfer_exec */
- 0x16019806,
- 0x041427f1,
- 0xd00624b6,
- 0xe7f10020,
- 0xe3f0a500,
- 0x021fb941,
- 0xb68d21f4,
- 0xfcf004e0,
- 0x022cf001,
- 0xfd0124b6,
- 0x21f405f2,
- 0xfc17f18d,
- 0x0213f04a,
- 0xd00c27f0,
- 0x21f50012,
- 0x27f10215,
- 0x23f047fc,
- 0x0020d002,
+ 0x0007f116,
+ 0x0203f081,
+ 0xbd0003d0,
+ 0x40008004,
+ 0x010017f1,
+ 0xfa0613f0,
+ 0x03f80601,
+/* 0x09aa: ctx_xfer */
+ 0xe7f000f8,
+ 0x0007f104,
+ 0x0303f002,
+ 0xbd000ed0,
+/* 0x09b9: ctx_xfer_idle */
+ 0x00e7f104,
+ 0x03e3f000,
+ 0xf100eecf,
+ 0xf42000e4,
+ 0x11f4f21b,
+ 0x0d02f406,
+/* 0x09d0: ctx_xfer_pre */
+ 0xf510f7f0,
+ 0xf407de21,
+/* 0x09da: ctx_xfer_pre_load */
+ 0xf7f01c11,
+ 0x7f21f502,
+ 0x9121f507,
+ 0xa621f507,
+ 0xf5f4bd07,
+ 0xf5077f21,
+/* 0x09f3: ctx_xfer_exec */
+ 0x98082421,
+ 0x24bd1601,
+ 0x050007f1,
+ 0xd00103f0,
+ 0x04bd0002,
+ 0xf1021fb9,
+ 0xf0a500e7,
+ 0x21f441e3,
+ 0x01fcf09d,
+ 0xb6022cf0,
+ 0xf2fd0124,
+ 0x02ffb905,
+ 0xa504e7f1,
+ 0xf441e3f0,
+ 0x21f59d21,
+ 0x24bd026a,
+ 0x47fc07f1,
+ 0xd00203f0,
+ 0x04bd0002,
0xb6012cf0,
- 0x12d00320,
- 0x01acf000,
- 0xf006a5f0,
- 0x0c9800b7,
- 0x010d9800,
- 0xf500e7f0,
- 0xf0016621,
- 0x21f508a7,
- 0x21f50109,
- 0x01f40215,
- 0x0ca7f022,
- 0xf1c921f4,
- 0xb60a1017,
- 0x27f00614,
- 0x0012d005,
-/* 0x08c8: ctx_xfer_post_save_wait */
- 0xfd0012cf,
- 0x1bf40522,
- 0x2e02f4fa,
-/* 0x08d4: ctx_xfer_post */
- 0xf502f7f0,
- 0xbd062b21,
- 0x6c21f5f4,
- 0x3421f506,
- 0x3a21f502,
- 0xf5f4bd06,
- 0xf4062b21,
- 0x01981011,
- 0x0511fd40,
- 0xf5070bf4,
-/* 0x08ff: ctx_xfer_no_post_mmio */
-/* 0x08ff: ctx_xfer_done */
- 0xf807b721,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x07f10320,
+ 0x03f04afc,
+ 0x0002d002,
+ 0xacf004bd,
+ 0x06a5f001,
+ 0x9800b7f0,
+ 0x0d98000c,
+ 0x00e7f001,
+ 0x016f21f5,
+ 0xf508a7f0,
+ 0xf5011021,
+ 0xf4025e21,
+ 0xa7f01301,
+ 0xd021f40c,
+ 0xf505f7f0,
+ 0xf4080621,
+/* 0x0a82: ctx_xfer_post */
+ 0xf7f02e02,
+ 0x7f21f502,
+ 0xf5f4bd07,
+ 0xf507de21,
+ 0xf5027f21,
+ 0xbd079121,
+ 0x7f21f5f4,
+ 0x1011f407,
+ 0xfd400198,
+ 0x0bf40511,
+ 0x5521f507,
+/* 0x0aad: ctx_xfer_no_post_mmio */
+/* 0x0aad: ctx_xfer_done */
+ 0x0000f809,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
index 33a5a82eccbd..6ffe28307dbd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
@@ -28,28 +28,135 @@
#define GF117 0xd7
#define GK100 0xe0
#define GK110 0xf0
+#define GK208 0x108
+#define NV_PGRAPH_FECS_INTR_ACK 0x409004
+#define NV_PGRAPH_FECS_INTR 0x409008
+#define NV_PGRAPH_FECS_INTR_FWMTHD 0x00000400
+#define NV_PGRAPH_FECS_INTR_CHSW 0x00000100
+#define NV_PGRAPH_FECS_INTR_FIFO 0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE 0x40900c
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO 0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL 0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO_EDGE 0x00000000
+#define NV_PGRAPH_FECS_INTR_EN_SET 0x409010
+#define NV_PGRAPH_FECS_INTR_EN_SET_FIFO 0x00000004
+#define NV_PGRAPH_FECS_INTR_ROUTE 0x40901c
+#define NV_PGRAPH_FECS_ACCESS 0x409048
+#define NV_PGRAPH_FECS_ACCESS_FIFO 0x00000002
+#define NV_PGRAPH_FECS_FIFO_DATA 0x409064
+#define NV_PGRAPH_FECS_FIFO_CMD 0x409068
+#define NV_PGRAPH_FECS_FIFO_ACK 0x409074
+#define NV_PGRAPH_FECS_CAPS 0x409108
#define NV_PGRAPH_FECS_SIGNAL 0x409400
+#define NV_PGRAPH_FECS_IROUTE 0x409404
+#define NV_PGRAPH_FECS_BAR_MASK0 0x40940c
+#define NV_PGRAPH_FECS_BAR_MASK1 0x409410
+#define NV_PGRAPH_FECS_BAR 0x409414
+#define NV_PGRAPH_FECS_BAR_SET 0x409418
+#define NV_PGRAPH_FECS_RED_SWITCH 0x409614
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP 0x00000400
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC 0x00000200
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN 0x00000100
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP 0x00000040
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC 0x00000020
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN 0x00000010
+#define NV_PGRAPH_FECS_RED_SWITCH_PAUSE_GPC 0x00000002
+#define NV_PGRAPH_FECS_RED_SWITCH_PAUSE_MAIN 0x00000001
+#define NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE 0x409700
+#define NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE 0x409704
+#define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT 0x40974c
+#define NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE 0x409700
+#define NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE 0x409704
+#define NV_PGRAPH_FECS_MMCTX_BASE 0x409710
+#define NV_PGRAPH_FECS_MMCTX_CTRL 0x409714
+#define NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE 0x409718
+#define NV_PGRAPH_FECS_MMCTX_MULTI_MASK 0x40971c
+#define NV_PGRAPH_FECS_MMCTX_QUEUE 0x409720
+#define NV_PGRAPH_FECS_MMIO_CTRL 0x409728
+#define NV_PGRAPH_FECS_MMIO_RDVAL 0x40972c
+#define NV_PGRAPH_FECS_MMIO_WRVAL 0x409730
+#define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT 0x40974c
#if CHIPSET < GK110
#define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x409800)
#define NV_PGRAPH_FECS_CC_SCRATCH_SET(n) ((n) * 4 + 0x409820)
#define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x409840)
+#define NV_PGRAPH_FECS_UNK86C 0x40986c
#else
#define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x409800)
#define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x409840)
+#define NV_PGRAPH_FECS_UNK86C 0x40988c
#define NV_PGRAPH_FECS_CC_SCRATCH_SET(n) ((n) * 4 + 0x4098c0)
#endif
+#define NV_PGRAPH_FECS_STRANDS_CNT 0x409880
+#define NV_PGRAPH_FECS_STRAND_SAVE_SWBASE 0x409908
+#define NV_PGRAPH_FECS_STRAND_LOAD_SWBASE 0x40990c
+#define NV_PGRAPH_FECS_STRAND_WORDS 0x409910
+#define NV_PGRAPH_FECS_STRAND_DATA 0x409918
+#define NV_PGRAPH_FECS_STRAND_SELECT 0x40991c
+#define NV_PGRAPH_FECS_STRAND_CMD 0x409928
+#define NV_PGRAPH_FECS_STRAND_CMD_SEEK 0x00000001
+#define NV_PGRAPH_FECS_STRAND_CMD_GET_INFO 0x00000002
+#define NV_PGRAPH_FECS_STRAND_CMD_SAVE 0x00000003
+#define NV_PGRAPH_FECS_STRAND_CMD_LOAD 0x00000004
+#define NV_PGRAPH_FECS_STRAND_CMD_ACTIVATE_FILTER 0x0000000a
+#define NV_PGRAPH_FECS_STRAND_CMD_DEACTIVATE_FILTER 0x0000000b
+#define NV_PGRAPH_FECS_STRAND_CMD_ENABLE 0x0000000c
+#define NV_PGRAPH_FECS_STRAND_CMD_DISABLE 0x0000000d
+#define NV_PGRAPH_FECS_STRAND_FILTER 0x40993c
+#define NV_PGRAPH_FECS_MEM_BASE 0x409a04
+#define NV_PGRAPH_FECS_MEM_CHAN 0x409a0c
+#define NV_PGRAPH_FECS_MEM_CMD 0x409a10
+#define NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN 0x00000007
+#define NV_PGRAPH_FECS_MEM_TARGET 0x409a20
+#define NV_PGRAPH_FECS_MEM_TARGET_UNK31 0x80000000
+#define NV_PGRAPH_FECS_MEM_TARGET_AS 0x0000001f
+#define NV_PGRAPH_FECS_MEM_TARGET_AS_VM 0x00000001
+#define NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM 0x00000002
+#define NV_PGRAPH_FECS_CHAN_ADDR 0x409b00
+#define NV_PGRAPH_FECS_CHAN_NEXT 0x409b04
+#define NV_PGRAPH_FECS_CHSW 0x409b0c
+#define NV_PGRAPH_FECS_CHSW_ACK 0x00000001
#define NV_PGRAPH_FECS_INTR_UP_SET 0x409c1c
+#define NV_PGRAPH_FECS_INTR_UP_EN 0x409c24
+#define NV_PGRAPH_GPCX_GPCCS_INTR_ACK 0x41a004
+#define NV_PGRAPH_GPCX_GPCCS_INTR 0x41a008
+#define NV_PGRAPH_GPCX_GPCCS_INTR_FIFO 0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET 0x41a010
+#define NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET_FIFO 0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_INTR_ROUTE 0x41a01c
+#define NV_PGRAPH_GPCX_GPCCS_ACCESS 0x41a048
+#define NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO 0x00000002
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_DATA 0x41a064
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_CMD 0x41a068
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_ACK 0x41a074
+#define NV_PGRAPH_GPCX_GPCCS_UNITS 0x41a608
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH 0x41a614
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11 0x00000800
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE 0x00000200
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_POWER 0x00000020
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_PAUSE 0x00000002
+#define NV_PGRAPH_GPCX_GPCCS_MYINDEX 0x41a618
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE 0x41a700
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE 0x41a704
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT 0x41a74c
#if CHIPSET < GK110
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x41a800)
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n) ((n) * 4 + 0x41a820)
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x41a840)
+#define NV_PGRAPH_GPCX_GPCCS_UNK86C 0x41a86c
#else
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n) ((n) * 4 + 0x41a800)
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n) ((n) * 4 + 0x41a840)
+#define NV_PGRAPH_GPCX_GPCCS_UNK86C 0x41a88c
#define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n) ((n) * 4 + 0x41a8c0)
#endif
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_SELECT 0x41a91c
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD 0x41a928
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE 0x00000003
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_LOAD 0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_MEM_BASE 0x41aa04
#define mmctx_data(r,c) .b32 (((c - 1) << 26) | r)
#define queue_init .skip 72 // (2 * 4) + ((8 * 4) * 2)
@@ -65,24 +172,50 @@
#define T_LCHAN 8
#define T_LCTXH 9
-#define nv_mkmm(rv,r) /*
-*/ movw rv ((r) & 0x0000fffc) /*
-*/ sethi rv ((r) & 0x00ff0000)
+#if CHIPSET < GK208
+#define imm32(reg,val) /*
+*/ movw reg ((val) & 0x0000ffff) /*
+*/ sethi reg ((val) & 0xffff0000)
+#else
+#define imm32(reg,val) /*
+*/ mov reg (val)
+#endif
+
#define nv_mkio(rv,r,i) /*
-*/ nv_mkmm(rv, (((r) & 0xffc) << 6) | ((i) << 2))
+*/ imm32(rv, (((r) & 0xffc) << 6) | ((i) << 2))
+
+#define hash #
+#define fn(a) a
+#if CHIPSET < GK208
+#define call(a) call fn(hash)a
+#else
+#define call(a) lcall fn(hash)a
+#endif
#define nv_iord(rv,r,i) /*
*/ nv_mkio(rv,r,i) /*
*/ iord rv I[rv]
+
#define nv_iowr(r,i,rv) /*
*/ nv_mkio($r0,r,i) /*
*/ iowr I[$r0] rv /*
*/ clear b32 $r0
+#define nv_rd32(reg,addr) /*
+*/ imm32($r14, addr) /*
+*/ call(nv_rd32) /*
+*/ mov b32 reg $r15
+
+#define nv_wr32(addr,reg) /*
+*/ mov b32 $r15 reg /*
+*/ imm32($r14, addr) /*
+*/ call(nv_wr32)
+
#define trace_set(bit) /*
*/ clear b32 $r9 /*
*/ bset $r9 bit /*
*/ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(7), 0, $r9)
+
#define trace_clr(bit) /*
*/ clear b32 $r9 /*
*/ bset $r9 bit /*
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
new file mode 100644
index 000000000000..e1af65ead379
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv108_graph_sclass[] = {
+ { 0x902d, &nouveau_object_ofuncs },
+ { 0xa140, &nouveau_object_ofuncs },
+ { 0xa197, &nouveau_object_ofuncs },
+ { 0xa1c0, &nouveau_object_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static struct nvc0_graph_init
+nv108_graph_init_regs[] = {
+ { 0x400080, 1, 0x04, 0x003083c2 },
+ { 0x400088, 1, 0x04, 0x0001bfe7 },
+ { 0x40008c, 1, 0x04, 0x00000000 },
+ { 0x400090, 1, 0x04, 0x00000030 },
+ { 0x40013c, 1, 0x04, 0x003901f7 },
+ { 0x400140, 1, 0x04, 0x00000100 },
+ { 0x400144, 1, 0x04, 0x00000000 },
+ { 0x400148, 1, 0x04, 0x00000110 },
+ { 0x400138, 1, 0x04, 0x00000000 },
+ { 0x400130, 2, 0x04, 0x00000000 },
+ { 0x400124, 1, 0x04, 0x00000002 },
+ {}
+};
+
+struct nvc0_graph_init
+nv108_graph_init_unk58xx[] = {
+ { 0x405844, 1, 0x04, 0x00ffffff },
+ { 0x405850, 1, 0x04, 0x00000000 },
+ { 0x405900, 1, 0x04, 0x00000000 },
+ { 0x405908, 1, 0x04, 0x00000000 },
+ { 0x405928, 1, 0x04, 0x00000000 },
+ { 0x40592c, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_graph_init_gpc[] = {
+ { 0x418408, 1, 0x04, 0x00000000 },
+ { 0x4184a0, 3, 0x04, 0x00000000 },
+ { 0x418604, 1, 0x04, 0x00000000 },
+ { 0x418680, 1, 0x04, 0x00000000 },
+ { 0x418714, 1, 0x04, 0x00000000 },
+ { 0x418384, 2, 0x04, 0x00000000 },
+ { 0x418814, 3, 0x04, 0x00000000 },
+ { 0x418b04, 1, 0x04, 0x00000000 },
+ { 0x4188c8, 2, 0x04, 0x00000000 },
+ { 0x4188d0, 1, 0x04, 0x00010000 },
+ { 0x4188d4, 1, 0x04, 0x00000201 },
+ { 0x418910, 1, 0x04, 0x00010001 },
+ { 0x418914, 1, 0x04, 0x00000301 },
+ { 0x418918, 1, 0x04, 0x00800000 },
+ { 0x418980, 1, 0x04, 0x77777770 },
+ { 0x418984, 3, 0x04, 0x77777777 },
+ { 0x418c04, 1, 0x04, 0x00000000 },
+ { 0x418c64, 2, 0x04, 0x00000000 },
+ { 0x418c88, 1, 0x04, 0x00000000 },
+ { 0x418cb4, 2, 0x04, 0x00000000 },
+ { 0x418d00, 1, 0x04, 0x00000000 },
+ { 0x418d28, 2, 0x04, 0x00000000 },
+ { 0x418f00, 1, 0x04, 0x00000400 },
+ { 0x418f08, 1, 0x04, 0x00000000 },
+ { 0x418f20, 2, 0x04, 0x00000000 },
+ { 0x418e00, 1, 0x04, 0x00000000 },
+ { 0x418e08, 1, 0x04, 0x00000000 },
+ { 0x418e1c, 2, 0x04, 0x00000000 },
+ { 0x41900c, 1, 0x04, 0x00000000 },
+ { 0x419018, 1, 0x04, 0x00000000 },
+ {}
+};
+
+static struct nvc0_graph_init
+nv108_graph_init_tpc[] = {
+ { 0x419d0c, 1, 0x04, 0x00000000 },
+ { 0x419d10, 1, 0x04, 0x00000014 },
+ { 0x419ab0, 1, 0x04, 0x00000000 },
+ { 0x419ac8, 1, 0x04, 0x00000000 },
+ { 0x419ab8, 1, 0x04, 0x000000e7 },
+ { 0x419abc, 2, 0x04, 0x00000000 },
+ { 0x419ab4, 1, 0x04, 0x00000000 },
+ { 0x419aa8, 2, 0x04, 0x00000000 },
+ { 0x41980c, 1, 0x04, 0x00000010 },
+ { 0x419844, 1, 0x04, 0x00000000 },
+ { 0x419850, 1, 0x04, 0x00000004 },
+ { 0x419854, 2, 0x04, 0x00000000 },
+ { 0x419c98, 1, 0x04, 0x00000000 },
+ { 0x419ca8, 1, 0x04, 0x00000000 },
+ { 0x419cb0, 1, 0x04, 0x01000000 },
+ { 0x419cb4, 1, 0x04, 0x00000000 },
+ { 0x419cb8, 1, 0x04, 0x00b08bea },
+ { 0x419c84, 1, 0x04, 0x00010384 },
+ { 0x419cbc, 1, 0x04, 0x281b3646 },
+ { 0x419cc0, 2, 0x04, 0x00000000 },
+ { 0x419c80, 1, 0x04, 0x00000230 },
+ { 0x419ccc, 2, 0x04, 0x00000000 },
+ { 0x419c0c, 1, 0x04, 0x00000000 },
+ { 0x419e00, 1, 0x04, 0x00000080 },
+ { 0x419ea0, 1, 0x04, 0x00000000 },
+ { 0x419ee4, 1, 0x04, 0x00000000 },
+ { 0x419ea4, 1, 0x04, 0x00000100 },
+ { 0x419ea8, 1, 0x04, 0x00000000 },
+ { 0x419eb4, 1, 0x04, 0x00000000 },
+ { 0x419ebc, 2, 0x04, 0x00000000 },
+ { 0x419edc, 1, 0x04, 0x00000000 },
+ { 0x419f00, 1, 0x04, 0x00000000 },
+ { 0x419ed0, 1, 0x04, 0x00003234 },
+ { 0x419f74, 1, 0x04, 0x00015555 },
+ { 0x419f80, 4, 0x04, 0x00000000 },
+ {}
+};
+
+static int
+nv108_graph_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvc0_graph_priv *priv = (void *)object;
+ static const struct {
+ u32 addr;
+ u32 data;
+ } magic[] = {
+ { 0x020520, 0xfffffffc },
+ { 0x020524, 0xfffffffe },
+ { 0x020524, 0xfffffffc },
+ { 0x020524, 0xfffffff8 },
+ { 0x020524, 0xffffffe0 },
+ { 0x020530, 0xfffffffe },
+ { 0x02052c, 0xfffffffa },
+ { 0x02052c, 0xfffffff0 },
+ { 0x02052c, 0xffffffc0 },
+ { 0x02052c, 0xffffff00 },
+ { 0x02052c, 0xfffffc00 },
+ { 0x02052c, 0xfffcfc00 },
+ { 0x02052c, 0xfff0fc00 },
+ { 0x02052c, 0xff80fc00 },
+ { 0x020528, 0xfffffffe },
+ { 0x020528, 0xfffffffc },
+ };
+ int i;
+
+ nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
+ nv_mask(priv, 0x0206b4, 0x00000000, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(magic); i++) {
+ nv_wr32(priv, magic[i].addr, magic[i].data);
+ nv_wait(priv, magic[i].addr, 0x80000000, 0x00000000);
+ }
+
+ return nouveau_graph_fini(&priv->base, suspend);
+}
+
+static struct nvc0_graph_init *
+nv108_graph_init_mmio[] = {
+ nv108_graph_init_regs,
+ nvf0_graph_init_unk40xx,
+ nvc0_graph_init_unk44xx,
+ nvc0_graph_init_unk78xx,
+ nvc0_graph_init_unk60xx,
+ nvd9_graph_init_unk64xx,
+ nv108_graph_init_unk58xx,
+ nvc0_graph_init_unk80xx,
+ nvf0_graph_init_unk70xx,
+ nvf0_graph_init_unk5bxx,
+ nv108_graph_init_gpc,
+ nv108_graph_init_tpc,
+ nve4_graph_init_unk,
+ nve4_graph_init_unk88xx,
+ NULL
+};
+
+#include "fuc/hubnv108.fuc5.h"
+
+static struct nvc0_graph_ucode
+nv108_graph_fecs_ucode = {
+ .code.data = nv108_grhub_code,
+ .code.size = sizeof(nv108_grhub_code),
+ .data.data = nv108_grhub_data,
+ .data.size = sizeof(nv108_grhub_data),
+};
+
+#include "fuc/gpcnv108.fuc5.h"
+
+static struct nvc0_graph_ucode
+nv108_graph_gpccs_ucode = {
+ .code.data = nv108_grgpc_code,
+ .code.size = sizeof(nv108_grgpc_code),
+ .data.data = nv108_grgpc_data,
+ .data.size = sizeof(nv108_grgpc_data),
+};
+
+struct nouveau_oclass *
+nv108_graph_oclass = &(struct nvc0_graph_oclass) {
+ .base.handle = NV_ENGINE(GR, 0x08),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_ctor,
+ .dtor = nvc0_graph_dtor,
+ .init = nve4_graph_init,
+ .fini = nv108_graph_fini,
+ },
+ .cclass = &nv108_grctx_oclass,
+ .sclass = nv108_graph_sclass,
+ .mmio = nv108_graph_init_mmio,
+ .fecs.ucode = &nv108_graph_fecs_ucode,
+ .gpccs.ucode = &nv108_graph_gpccs_ucode,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 03de5175dd9f..7a367c402978 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -304,12 +304,28 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
return timeout ? -EBUSY : 0;
}
-static const struct nouveau_enum nv50_mp_exec_error_names[] = {
- { 3, "STACK_UNDERFLOW", NULL },
- { 4, "QUADON_ACTIVE", NULL },
- { 8, "TIMEOUT", NULL },
- { 0x10, "INVALID_OPCODE", NULL },
- { 0x40, "BREAKPOINT", NULL },
+static const struct nouveau_bitfield nv50_mp_exec_errors[] = {
+ { 0x01, "STACK_UNDERFLOW" },
+ { 0x02, "STACK_MISMATCH" },
+ { 0x04, "QUADON_ACTIVE" },
+ { 0x08, "TIMEOUT" },
+ { 0x10, "INVALID_OPCODE" },
+ { 0x20, "PM_OVERFLOW" },
+ { 0x40, "BREAKPOINT" },
+ {}
+};
+
+static const struct nouveau_bitfield nv50_mpc_traps[] = {
+ { 0x0000001, "LOCAL_LIMIT_READ" },
+ { 0x0000010, "LOCAL_LIMIT_WRITE" },
+ { 0x0000040, "STACK_LIMIT" },
+ { 0x0000100, "GLOBAL_LIMIT_READ" },
+ { 0x0001000, "GLOBAL_LIMIT_WRITE" },
+ { 0x0010000, "MP0" },
+ { 0x0020000, "MP1" },
+ { 0x0040000, "GLOBAL_LIMIT_RED" },
+ { 0x0400000, "GLOBAL_LIMIT_ATOM" },
+ { 0x4000000, "MP2" },
{}
};
@@ -396,6 +412,60 @@ static const struct nouveau_bitfield nv50_graph_intr_name[] = {
{}
};
+static const struct nouveau_bitfield nv50_graph_trap_prop[] = {
+ { 0x00000004, "SURF_WIDTH_OVERRUN" },
+ { 0x00000008, "SURF_HEIGHT_OVERRUN" },
+ { 0x00000010, "DST2D_FAULT" },
+ { 0x00000020, "ZETA_FAULT" },
+ { 0x00000040, "RT_FAULT" },
+ { 0x00000080, "CUDA_FAULT" },
+ { 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
+ { 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
+ { 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
+ { 0x00000800, "DST2D_LINEAR_MISMATCH" },
+ { 0x00001000, "RT_LINEAR_MISMATCH" },
+ {}
+};
+
+static void
+nv50_priv_prop_trap(struct nv50_graph_priv *priv,
+ u32 ustatus_addr, u32 ustatus, u32 tp)
+{
+ u32 e0c = nv_rd32(priv, ustatus_addr + 0x04);
+ u32 e10 = nv_rd32(priv, ustatus_addr + 0x08);
+ u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c);
+ u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
+ u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
+ u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
+ u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
+
+ /* CUDA memory: l[], g[] or stack. */
+ if (ustatus & 0x00000080) {
+ if (e18 & 0x80000000) {
+ /* g[] read fault? */
+ nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
+ tp, e14, e10 | ((e18 >> 24) & 0x1f));
+ e18 &= ~0x1f000000;
+ } else if (e18 & 0xc) {
+ /* g[] write fault? */
+ nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
+ tp, e14, e10 | ((e18 >> 7) & 0x1f));
+ e18 &= ~0x00000f80;
+ } else {
+ nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
+ tp, e14, e10);
+ }
+ ustatus &= ~0x00000080;
+ }
+ if (ustatus) {
+ nv_error(priv, "TRAP_PROP - TP %d -", tp);
+ nouveau_bitfield_print(nv50_graph_trap_prop, ustatus);
+ pr_cont(" - Address %02x%08x\n", e14, e10);
+ }
+ nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ tp, e0c, e18, e1c, e20, e24);
+}
+
static void
nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
{
@@ -420,8 +490,8 @@ nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
oplow = nv_rd32(priv, addr + 0x70);
ophigh = nv_rd32(priv, addr + 0x74);
nv_error(priv, "TRAP_MP_EXEC - "
- "TP %d MP %d: ", tpid, i);
- nouveau_enum_print(nv50_mp_exec_error_names, status);
+ "TP %d MP %d:", tpid, i);
+ nouveau_bitfield_print(nv50_mp_exec_errors, status);
pr_cont(" at %06x warp %d, opcode %08x %08x\n",
pc&0xffffff, pc >> 24,
oplow, ophigh);
@@ -468,60 +538,19 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
nv50_priv_mp_trap(priv, i, display);
ustatus &= ~0x04030000;
}
- break;
- case 8: /* TPDMA error */
- {
- u32 e0c = nv_rd32(priv, ustatus_addr + 4);
- u32 e10 = nv_rd32(priv, ustatus_addr + 8);
- u32 e14 = nv_rd32(priv, ustatus_addr + 0xc);
- u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
- u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
- u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
- u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
- /* 2d engine destination */
- if (ustatus & 0x00000010) {
- if (display) {
- nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000010;
- }
- /* Render target */
- if (ustatus & 0x00000040) {
- if (display) {
- nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000040;
- }
- /* CUDA memory: l[], g[] or stack. */
- if (ustatus & 0x00000080) {
- if (display) {
- if (e18 & 0x80000000) {
- /* g[] read fault? */
- nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 24) & 0x1f));
- e18 &= ~0x1f000000;
- } else if (e18 & 0xc) {
- /* g[] write fault? */
- nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 7) & 0x1f));
- e18 &= ~0x00000f80;
- } else {
- nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
- i, e14, e10);
- }
- nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000080;
- }
+ if (ustatus && display) {
+ nv_error(priv, "%s - TP%d:", name, i);
+ nouveau_bitfield_print(nv50_mpc_traps, ustatus);
+ pr_cont("\n");
+ ustatus = 0;
}
break;
+ case 8: /* PROP error */
+ if (display)
+ nv50_priv_prop_trap(
+ priv, ustatus_addr, ustatus, i);
+ ustatus = 0;
+ break;
}
if (ustatus) {
if (display)
@@ -727,11 +756,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
status &= ~0x080;
}
- /* TPDMA: Handles TP-initiated uncached memory accesses:
+ /* PROP: Handles TP-initiated uncached memory accesses:
* l[], g[], stack, 2d surfaces, render targets. */
if (status & 0x100) {
nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
- "TRAP_TPDMA");
+ "TRAP_PROP");
nv_wr32(priv, 0x400108, 0x100);
status &= ~0x100;
}
@@ -760,7 +789,7 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
u32 mthd = (addr & 0x00001ffc);
u32 data = nv_rd32(priv, 0x400708);
u32 class = nv_rd32(priv, 0x400814);
- u32 show = stat;
+ u32 show = stat, show_bitfield = stat;
int chid;
engctx = nouveau_engctx_get(engine, inst);
@@ -778,21 +807,26 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
nv_error(priv, "DATA_ERROR ");
nouveau_enum_print(nv50_data_error_names, ecode);
pr_cont("\n");
+ show_bitfield &= ~0x00100000;
}
if (stat & 0x00200000) {
if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12,
engctx))
show &= ~0x00200000;
+ show_bitfield &= ~0x00200000;
}
nv_wr32(priv, 0x400100, stat);
nv_wr32(priv, 0x400500, 0x00010001);
if (show) {
- nv_error(priv, "%s", "");
- nouveau_bitfield_print(nv50_graph_intr_name, show);
- pr_cont("\n");
+ show &= show_bitfield;
+ if (show) {
+ nv_error(priv, "%s", "");
+ nouveau_bitfield_print(nv50_graph_intr_name, show);
+ pr_cont("\n");
+ }
nv_error(priv,
"ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, (u64)inst << 12, nouveau_client_name(engctx),
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 5c8a63dc506a..a73ab209ea88 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -901,6 +901,9 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
}
return 0;
+ } else
+ if (!oclass->fecs.ucode) {
+ return -ENOSYS;
}
/* load HUB microcode */
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index ea17a80ad7fc..b0ab6de270b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -205,6 +205,11 @@ extern struct nvc0_graph_init nve4_graph_init_regs[];
extern struct nvc0_graph_init nve4_graph_init_unk[];
extern struct nvc0_graph_init nve4_graph_init_unk88xx[];
+extern struct nvc0_graph_init nvf0_graph_init_unk40xx[];
+extern struct nvc0_graph_init nvf0_graph_init_unk70xx[];
+extern struct nvc0_graph_init nvf0_graph_init_unk5bxx[];
+extern struct nvc0_graph_init nvf0_graph_init_tpc[];
+
int nvc0_grctx_generate(struct nvc0_graph_priv *);
void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
@@ -266,6 +271,11 @@ extern struct nvc0_graph_init nve4_grctx_init_unk80xx[];
extern struct nvc0_graph_init nve4_grctx_init_unk90xx[];
extern struct nouveau_oclass *nvf0_grctx_oclass;
+extern struct nvc0_graph_init nvf0_grctx_init_unk44xx[];
+extern struct nvc0_graph_init nvf0_grctx_init_unk5bxx[];
+extern struct nvc0_graph_init nvf0_grctx_init_unk60xx[];
+
+extern struct nouveau_oclass *nv108_grctx_oclass;
#define mmio_data(s,a,p) do { \
info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
index 2f0ac7832234..b1acb9939d95 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
@@ -41,7 +41,7 @@ nvf0_graph_sclass[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_graph_init_unk40xx[] = {
{ 0x40415c, 1, 0x04, 0x00000000 },
{ 0x404170, 1, 0x04, 0x00000000 },
@@ -60,7 +60,7 @@ nvf0_graph_init_unk58xx[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_graph_init_unk70xx[] = {
{ 0x407010, 1, 0x04, 0x00000000 },
{ 0x407040, 1, 0x04, 0x80440424 },
@@ -68,7 +68,7 @@ nvf0_graph_init_unk70xx[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_graph_init_unk5bxx[] = {
{ 0x405b44, 1, 0x04, 0x00000000 },
{ 0x405b50, 1, 0x04, 0x00000000 },
@@ -114,7 +114,7 @@ nvf0_graph_init_gpc[] = {
{}
};
-static struct nvc0_graph_init
+struct nvc0_graph_init
nvf0_graph_init_tpc[] = {
{ 0x419d0c, 1, 0x04, 0x00000000 },
{ 0x419d10, 1, 0x04, 0x00000014 },
@@ -243,6 +243,6 @@ nvf0_graph_oclass = &(struct nvc0_graph_oclass) {
.cclass = &nvf0_grctx_oclass,
.sclass = nvf0_graph_sclass,
.mmio = nvf0_graph_init_mmio,
- .fecs.ucode = 0 ? &nvf0_graph_fecs_ucode : NULL,
+ .fecs.ucode = &nvf0_graph_fecs_ucode,
.gpccs.ucode = &nvf0_graph_gpccs_ucode,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 560c3593dae7..e71a4325e670 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -230,9 +230,26 @@ struct nve0_channel_ind_class {
#define NV04_DISP_CLASS 0x00000046
+#define NV04_DISP_MTHD 0x00000000
+#define NV04_DISP_MTHD_HEAD 0x00000001
+
+#define NV04_DISP_SCANOUTPOS 0x00000000
+
struct nv04_display_class {
};
+struct nv04_display_scanoutpos {
+ s64 time[2];
+ u32 vblanks;
+ u32 vblanke;
+ u32 vtotal;
+ u32 vline;
+ u32 hblanks;
+ u32 hblanke;
+ u32 htotal;
+ u32 hline;
+};
+
/* 5070: NV50_DISP
* 8270: NV84_DISP
* 8370: NVA0_DISP
@@ -252,6 +269,11 @@ struct nv04_display_class {
#define NVE0_DISP_CLASS 0x00009170
#define NVF0_DISP_CLASS 0x00009270
+#define NV50_DISP_MTHD 0x00000000
+#define NV50_DISP_MTHD_HEAD 0x00000003
+
+#define NV50_DISP_SCANOUTPOS 0x00000000
+
#define NV50_DISP_SOR_MTHD 0x00010000
#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
#define NV50_DISP_SOR_MTHD_HEAD 0x00000018
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index ac2881d1776a..7b8ea221b00d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -38,7 +38,8 @@ enum nv_subdev_type {
NVDEV_SUBDEV_THERM,
NVDEV_SUBDEV_CLOCK,
- NVDEV_ENGINE_DMAOBJ,
+ NVDEV_ENGINE_FIRST,
+ NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
NVDEV_ENGINE_FIFO,
NVDEV_ENGINE_SW,
NVDEV_ENGINE_GR,
@@ -70,6 +71,7 @@ struct nouveau_device {
const char *dbgopt;
const char *name;
const char *cname;
+ u64 disable_mask;
enum {
NV_04 = 0x04,
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index 8c32cf4d83c7..26b6b2bb1112 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -109,6 +109,7 @@ extern struct nouveau_oclass *nv50_fifo_oclass;
extern struct nouveau_oclass *nv84_fifo_oclass;
extern struct nouveau_oclass *nvc0_fifo_oclass;
extern struct nouveau_oclass *nve0_fifo_oclass;
+extern struct nouveau_oclass *nv108_fifo_oclass;
void nv04_fifo_intr(struct nouveau_subdev *);
int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 8e1b52312ddc..97705618de97 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -69,6 +69,7 @@ extern struct nouveau_oclass *nvd7_graph_oclass;
extern struct nouveau_oclass *nvd9_graph_oclass;
extern struct nouveau_oclass *nve4_graph_oclass;
extern struct nouveau_oclass *nvf0_graph_oclass;
+extern struct nouveau_oclass *nv108_graph_oclass;
extern const struct nouveau_bitfield nv04_graph_nsource[];
extern struct nouveau_ofuncs nv04_graph_ofuncs;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
index 4f4ff4502c3d..9faa98e67ad8 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -4,8 +4,7 @@
#include <core/subdev.h>
#include <core/device.h>
-#include <subdev/fb.h>
-
+struct nouveau_mem;
struct nouveau_vma;
struct nouveau_bar {
@@ -29,27 +28,7 @@ nouveau_bar(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
}
-#define nouveau_bar_create(p,e,o,d) \
- nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_bar_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_bar_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void nouveau_bar_destroy(struct nouveau_bar *);
-
-void _nouveau_bar_dtor(struct nouveau_object *);
-#define _nouveau_bar_init _nouveau_subdev_init
-#define _nouveau_bar_fini _nouveau_subdev_fini
-
extern struct nouveau_oclass nv50_bar_oclass;
extern struct nouveau_oclass nvc0_bar_oclass;
-int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
- struct nouveau_mem *, struct nouveau_object **);
-
-void nv84_bar_flush(struct nouveau_bar *);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
new file mode 100644
index 000000000000..c5e6d1e6ac1d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
@@ -0,0 +1,66 @@
+#ifndef __NVBIOS_RAMCFG_H__
+#define __NVBIOS_RAMCFG_H__
+
+struct nouveau_bios;
+
+struct nvbios_ramcfg {
+ unsigned rammap_11_08_01:1;
+ unsigned rammap_11_08_0c:2;
+ unsigned rammap_11_08_10:1;
+ unsigned rammap_11_11_0c:2;
+
+ unsigned ramcfg_11_01_01:1;
+ unsigned ramcfg_11_01_02:1;
+ unsigned ramcfg_11_01_04:1;
+ unsigned ramcfg_11_01_08:1;
+ unsigned ramcfg_11_01_10:1;
+ unsigned ramcfg_11_01_20:1;
+ unsigned ramcfg_11_01_40:1;
+ unsigned ramcfg_11_01_80:1;
+ unsigned ramcfg_11_02_03:2;
+ unsigned ramcfg_11_02_04:1;
+ unsigned ramcfg_11_02_08:1;
+ unsigned ramcfg_11_02_10:1;
+ unsigned ramcfg_11_02_40:1;
+ unsigned ramcfg_11_02_80:1;
+ unsigned ramcfg_11_03_0f:4;
+ unsigned ramcfg_11_03_30:2;
+ unsigned ramcfg_11_03_c0:2;
+ unsigned ramcfg_11_03_f0:4;
+ unsigned ramcfg_11_04:8;
+ unsigned ramcfg_11_06:8;
+ unsigned ramcfg_11_07_02:1;
+ unsigned ramcfg_11_07_04:1;
+ unsigned ramcfg_11_07_08:1;
+ unsigned ramcfg_11_07_10:1;
+ unsigned ramcfg_11_07_40:1;
+ unsigned ramcfg_11_07_80:1;
+ unsigned ramcfg_11_08_01:1;
+ unsigned ramcfg_11_08_02:1;
+ unsigned ramcfg_11_08_04:1;
+ unsigned ramcfg_11_08_08:1;
+ unsigned ramcfg_11_08_10:1;
+ unsigned ramcfg_11_08_20:1;
+ unsigned ramcfg_11_09:8;
+
+ unsigned timing[11];
+ unsigned timing_20_2e_03:2;
+ unsigned timing_20_2e_30:2;
+ unsigned timing_20_2e_c0:2;
+ unsigned timing_20_2f_03:2;
+ unsigned timing_20_2c_003f:6;
+ unsigned timing_20_2c_1fc0:7;
+ unsigned timing_20_30_f8:5;
+ unsigned timing_20_30_07:3;
+ unsigned timing_20_31_0007:3;
+ unsigned timing_20_31_0078:4;
+ unsigned timing_20_31_0780:4;
+ unsigned timing_20_31_0800:1;
+ unsigned timing_20_31_7000:3;
+ unsigned timing_20_31_8000:1;
+};
+
+u8 nvbios_ramcfg_count(struct nouveau_bios *);
+u8 nvbios_ramcfg_index(struct nouveau_bios *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
index bc15e0320877..5bdf8e4db40a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
@@ -1,11 +1,25 @@
#ifndef __NVBIOS_RAMMAP_H__
#define __NVBIOS_RAMMAP_H__
-u16 nvbios_rammap_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
- u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
-u16 nvbios_rammap_entry(struct nouveau_bios *, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_rammap_match(struct nouveau_bios *, u16 khz,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+struct nvbios_ramcfg;
+
+u32 nvbios_rammapTe(struct nouveau_bios *, u8 *ver, u8 *hdr,
+ u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+
+u32 nvbios_rammapEe(struct nouveau_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEm(struct nouveau_bios *, u16 mhz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEp(struct nouveau_bios *, u16 mhz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ramcfg *);
+
+u32 nvbios_rammapSe(struct nouveau_bios *, u32 data,
+ u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+ u8 *ver, u8 *hdr);
+u32 nvbios_rammapSp(struct nouveau_bios *, u32 data,
+ u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+ u8 *ver, u8 *hdr,
+ struct nvbios_ramcfg *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
index 963694b54224..76d914b67ab5 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
@@ -1,8 +1,14 @@
#ifndef __NVBIOS_TIMING_H__
#define __NVBIOS_TIMING_H__
-u16 nvbios_timing_table(struct nouveau_bios *,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_timing_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+struct nvbios_ramcfg;
+
+u16 nvbios_timingTe(struct nouveau_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+u16 nvbios_timingEe(struct nouveau_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_timingEp(struct nouveau_bios *, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ramcfg *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
index 685c9b12ee4c..ed1ac68c38b3 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -9,7 +9,6 @@ struct nouveau_devinit {
bool post;
void (*meminit)(struct nouveau_devinit *);
int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
-
};
static inline struct nouveau_devinit *
@@ -18,32 +17,16 @@ nouveau_devinit(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT];
}
-#define nouveau_devinit_create(p,e,o,d) \
- nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_devinit_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_devinit_init(p) ({ \
- struct nouveau_devinit *d = (p); \
- _nouveau_devinit_init(nv_object(d)); \
-})
-#define nouveau_devinit_fini(p,s) ({ \
- struct nouveau_devinit *d = (p); \
- _nouveau_devinit_fini(nv_object(d), (s)); \
-})
-
-int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-#define _nouveau_devinit_dtor _nouveau_subdev_dtor
-int _nouveau_devinit_init(struct nouveau_object *);
-int _nouveau_devinit_fini(struct nouveau_object *, bool suspend);
-
-extern struct nouveau_oclass nv04_devinit_oclass;
-extern struct nouveau_oclass nv05_devinit_oclass;
-extern struct nouveau_oclass nv10_devinit_oclass;
-extern struct nouveau_oclass nv1a_devinit_oclass;
-extern struct nouveau_oclass nv20_devinit_oclass;
-extern struct nouveau_oclass nv50_devinit_oclass;
-extern struct nouveau_oclass nva3_devinit_oclass;
-extern struct nouveau_oclass nvc0_devinit_oclass;
+extern struct nouveau_oclass *nv04_devinit_oclass;
+extern struct nouveau_oclass *nv05_devinit_oclass;
+extern struct nouveau_oclass *nv10_devinit_oclass;
+extern struct nouveau_oclass *nv1a_devinit_oclass;
+extern struct nouveau_oclass *nv20_devinit_oclass;
+extern struct nouveau_oclass *nv50_devinit_oclass;
+extern struct nouveau_oclass *nv84_devinit_oclass;
+extern struct nouveau_oclass *nv98_devinit_oclass;
+extern struct nouveau_oclass *nva3_devinit_oclass;
+extern struct nouveau_oclass *nvaf_devinit_oclass;
+extern struct nouveau_oclass *nvc0_devinit_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index d89dbdf39b0d..d7ecafbae1ca 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -106,6 +106,13 @@ extern struct nouveau_oclass *nvaf_fb_oclass;
extern struct nouveau_oclass *nvc0_fb_oclass;
extern struct nouveau_oclass *nve0_fb_oclass;
+#include <subdev/bios/ramcfg.h>
+
+struct nouveau_ram_data {
+ struct nvbios_ramcfg bios;
+ u32 freq;
+};
+
struct nouveau_ram {
struct nouveau_object base;
enum {
@@ -142,6 +149,12 @@ struct nouveau_ram {
} rammap, ramcfg, timing;
u32 freq;
u32 mr[16];
+ u32 mr1_nuts;
+
+ struct nouveau_ram_data *next;
+ struct nouveau_ram_data former;
+ struct nouveau_ram_data xition;
+ struct nouveau_ram_data target;
};
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index 4aca33887aaa..c1df26f3230c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -23,21 +23,6 @@ nv_memobj(void *obj)
return obj;
}
-#define nouveau_instobj_create(p,e,o,d) \
- nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_instobj_init(p) \
- nouveau_object_init(&(p)->base)
-#define nouveau_instobj_fini(p,s) \
- nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void nouveau_instobj_destroy(struct nouveau_instobj *);
-
-void _nouveau_instobj_dtor(struct nouveau_object *);
-#define _nouveau_instobj_init nouveau_object_init
-#define _nouveau_instobj_fini nouveau_object_fini
-
struct nouveau_instmem {
struct nouveau_subdev base;
struct list_head list;
@@ -60,21 +45,8 @@ nouveau_instmem(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
}
-#define nouveau_instmem_create(p,e,o,d) \
- nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_instmem_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-int nouveau_instmem_init(struct nouveau_instmem *);
-int nouveau_instmem_fini(struct nouveau_instmem *, bool);
-
-#define _nouveau_instmem_dtor _nouveau_subdev_dtor
-int _nouveau_instmem_init(struct nouveau_object *);
-int _nouveau_instmem_fini(struct nouveau_object *, bool);
-
-extern struct nouveau_oclass nv04_instmem_oclass;
-extern struct nouveau_oclass nv40_instmem_oclass;
-extern struct nouveau_oclass nv50_instmem_oclass;
+extern struct nouveau_oclass *nv04_instmem_oclass;
+extern struct nouveau_oclass *nv40_instmem_oclass;
+extern struct nouveau_oclass *nv50_instmem_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index adc88b73d911..3c6738edd127 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -47,6 +47,7 @@ struct nouveau_mc_oclass {
extern struct nouveau_oclass *nv04_mc_oclass;
extern struct nouveau_oclass *nv40_mc_oclass;
extern struct nouveau_oclass *nv44_mc_oclass;
+extern struct nouveau_oclass *nv4c_mc_oclass;
extern struct nouveau_oclass *nv50_mc_oclass;
extern struct nouveau_oclass *nv94_mc_oclass;
extern struct nouveau_oclass *nv98_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index fcf57fa309bf..c9509039f94b 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -131,9 +131,5 @@ void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
-void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- struct nouveau_mem *);
-void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index d70ba342aa2e..7098ddd54678 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -23,7 +23,11 @@
*/
#include <core/object.h>
-#include <subdev/bar.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+#include "priv.h"
struct nouveau_barobj {
struct nouveau_object base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
index 160d27f3c7b4..090d594a21b3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -25,10 +25,11 @@
#include <core/gpuobj.h>
#include <subdev/timer.h>
-#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
+#include "priv.h"
+
struct nv50_bar_priv {
struct nouveau_bar base;
spinlock_t lock;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index b2ec7411eb2e..bac5e754de35 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -25,10 +25,11 @@
#include <core/gpuobj.h>
#include <subdev/timer.h>
-#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
+#include "priv.h"
+
struct nvc0_bar_priv {
struct nouveau_bar base;
spinlock_t lock;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
new file mode 100644
index 000000000000..ffad8f337ead
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_BAR_PRIV_H__
+#define __NVKM_BAR_PRIV_H__
+
+#include <subdev/bar.h>
+
+#define nouveau_bar_create(p,e,o,d) \
+ nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_bar_init(p) \
+ nouveau_subdev_init(&(p)->base)
+#define nouveau_bar_fini(p,s) \
+ nouveau_subdev_fini(&(p)->base, (s))
+
+int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void nouveau_bar_destroy(struct nouveau_bar *);
+
+void _nouveau_bar_dtor(struct nouveau_object *);
+#define _nouveau_bar_init _nouveau_subdev_init
+#define _nouveau_bar_fini _nouveau_subdev_fini
+
+int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
+ struct nouveau_mem *, struct nouveau_object **);
+
+void nv84_bar_flush(struct nouveau_bar *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index aa0fbbec7f08..ef0c9c4a8cc3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -130,6 +130,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
u16 pcir;
int i;
+ /* there is no prom on nv4x IGP's */
+ if (device->card_type == NV_40 && device->chipset >= 0x4c)
+ return;
+
/* enable access to rom */
if (device->card_type >= NV_50)
pcireg = 0x088050;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index df1b1b423093..de201baeb053 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -9,6 +9,7 @@
#include <subdev/bios/dp.h>
#include <subdev/bios/gpio.h>
#include <subdev/bios/init.h>
+#include <subdev/bios/ramcfg.h>
#include <subdev/devinit.h>
#include <subdev/i2c.h>
#include <subdev/vga.h>
@@ -391,43 +392,14 @@ init_unknown_script(struct nouveau_bios *bios)
return 0x0000;
}
-static u16
-init_ram_restrict_table(struct nvbios_init *init)
-{
- struct nouveau_bios *bios = init->bios;
- struct bit_entry bit_M;
- u16 data = 0x0000;
-
- if (!bit_entry(bios, 'M', &bit_M)) {
- if (bit_M.version == 1 && bit_M.length >= 5)
- data = nv_ro16(bios, bit_M.offset + 3);
- if (bit_M.version == 2 && bit_M.length >= 3)
- data = nv_ro16(bios, bit_M.offset + 1);
- }
-
- if (data == 0x0000)
- warn("ram restrict table not found\n");
- return data;
-}
-
static u8
init_ram_restrict_group_count(struct nvbios_init *init)
{
- struct nouveau_bios *bios = init->bios;
- struct bit_entry bit_M;
-
- if (!bit_entry(bios, 'M', &bit_M)) {
- if (bit_M.version == 1 && bit_M.length >= 5)
- return nv_ro08(bios, bit_M.offset + 2);
- if (bit_M.version == 2 && bit_M.length >= 3)
- return nv_ro08(bios, bit_M.offset + 0);
- }
-
- return 0x00;
+ return nvbios_ramcfg_count(init->bios);
}
static u8
-init_ram_restrict_strap(struct nvbios_init *init)
+init_ram_restrict(struct nvbios_init *init)
{
/* This appears to be the behaviour of the VBIOS parser, and *is*
* important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
@@ -438,18 +410,8 @@ init_ram_restrict_strap(struct nvbios_init *init)
* in case *not* re-reading the strap causes similar breakage.
*/
if (!init->ramcfg || init->bios->version.major < 0x70)
- init->ramcfg = init_rd32(init, 0x101000);
- return (init->ramcfg & 0x00000003c) >> 2;
-}
-
-static u8
-init_ram_restrict(struct nvbios_init *init)
-{
- u8 strap = init_ram_restrict_strap(init);
- u16 table = init_ram_restrict_table(init);
- if (table)
- return nv_ro08(init->bios, table + strap);
- return 0x00;
+ init->ramcfg = 0x80000000 | nvbios_ramcfg_index(init->bios);
+ return (init->ramcfg & 0x7fffffff);
}
static u8
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
new file mode 100644
index 000000000000..991aedda999b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
+
+static u8
+nvbios_ramcfg_strap(struct nouveau_bios *bios)
+{
+ return (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+}
+
+u8
+nvbios_ramcfg_count(struct nouveau_bios *bios)
+{
+ struct bit_entry bit_M;
+
+ if (!bit_entry(bios, 'M', &bit_M)) {
+ if (bit_M.version == 1 && bit_M.length >= 5)
+ return nv_ro08(bios, bit_M.offset + 2);
+ if (bit_M.version == 2 && bit_M.length >= 3)
+ return nv_ro08(bios, bit_M.offset + 0);
+ }
+
+ return 0x00;
+}
+
+u8
+nvbios_ramcfg_index(struct nouveau_bios *bios)
+{
+ u8 strap = nvbios_ramcfg_strap(bios);
+ u32 xlat = 0x00000000;
+ struct bit_entry bit_M;
+
+ if (!bit_entry(bios, 'M', &bit_M)) {
+ if (bit_M.version == 1 && bit_M.length >= 5)
+ xlat = nv_ro16(bios, bit_M.offset + 3);
+ if (bit_M.version == 2 && bit_M.length >= 3)
+ xlat = nv_ro16(bios, bit_M.offset + 1);
+ }
+
+ if (xlat)
+ strap = nv_ro08(bios, xlat + strap);
+ return strap;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
index 916fa9d302b7..1811b2cb0472 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -24,11 +24,12 @@
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
#include <subdev/bios/rammap.h>
-u16
-nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
- u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
+u32
+nvbios_rammapTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+ u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
u16 rammap = 0x0000;
@@ -57,12 +58,12 @@ nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
return 0x0000;
}
-u16
-nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u32
+nvbios_rammapEe(struct nouveau_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 rammap = nvbios_rammap_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (rammap && idx < *cnt) {
rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
@@ -73,16 +74,100 @@ nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
return 0x0000;
}
-u16
-nvbios_rammap_match(struct nouveau_bios *bios, u16 khz,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u32
+nvbios_rammapEm(struct nouveau_bios *bios, u16 khz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
int idx = 0;
u32 data;
- while ((data = nvbios_rammap_entry(bios, idx++, ver, hdr, cnt, len))) {
+ while ((data = nvbios_rammapEe(bios, idx++, ver, hdr, cnt, len))) {
if (khz >= nv_ro16(bios, data + 0x00) &&
khz <= nv_ro16(bios, data + 0x02))
break;
}
return data;
}
+
+u32
+nvbios_rammapEp(struct nouveau_bios *bios, u16 khz,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ramcfg *p)
+{
+ u32 data = nvbios_rammapEm(bios, khz, ver, hdr, cnt, len);
+ memset(p, 0x00, sizeof(*p));
+ switch (!!data * *ver) {
+ case 0x11:
+ p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
+ p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2;
+ p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
+ p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2;
+ break;
+ default:
+ data = 0;
+ break;
+ }
+ return data;
+}
+
+u32
+nvbios_rammapSe(struct nouveau_bios *bios, u32 data,
+ u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+ u8 *ver, u8 *hdr)
+{
+ if (idx < ecnt) {
+ data = data + ehdr + (idx * elen);
+ *ver = ever;
+ *hdr = elen;
+ return data;
+ }
+ return 0;
+}
+
+u32
+nvbios_rammapSp(struct nouveau_bios *bios, u32 data,
+ u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+ u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
+{
+ data = nvbios_rammapSe(bios, data, ever, ehdr, ecnt, elen, idx, ver, hdr);
+ switch (!!data * *ver) {
+ case 0x11:
+ p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0;
+ p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1;
+ p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2;
+ p->ramcfg_11_01_08 = (nv_ro08(bios, data + 0x01) & 0x08) >> 3;
+ p->ramcfg_11_01_10 = (nv_ro08(bios, data + 0x01) & 0x10) >> 4;
+ p->ramcfg_11_01_20 = (nv_ro08(bios, data + 0x01) & 0x20) >> 5;
+ p->ramcfg_11_01_40 = (nv_ro08(bios, data + 0x01) & 0x40) >> 6;
+ p->ramcfg_11_01_80 = (nv_ro08(bios, data + 0x01) & 0x80) >> 7;
+ p->ramcfg_11_02_03 = (nv_ro08(bios, data + 0x02) & 0x03) >> 0;
+ p->ramcfg_11_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
+ p->ramcfg_11_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
+ p->ramcfg_11_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
+ p->ramcfg_11_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
+ p->ramcfg_11_02_80 = (nv_ro08(bios, data + 0x02) & 0x80) >> 7;
+ p->ramcfg_11_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
+ p->ramcfg_11_03_30 = (nv_ro08(bios, data + 0x03) & 0x30) >> 4;
+ p->ramcfg_11_03_c0 = (nv_ro08(bios, data + 0x03) & 0xc0) >> 6;
+ p->ramcfg_11_03_f0 = (nv_ro08(bios, data + 0x03) & 0xf0) >> 4;
+ p->ramcfg_11_04 = (nv_ro08(bios, data + 0x04) & 0xff) >> 0;
+ p->ramcfg_11_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
+ p->ramcfg_11_07_02 = (nv_ro08(bios, data + 0x07) & 0x02) >> 1;
+ p->ramcfg_11_07_04 = (nv_ro08(bios, data + 0x07) & 0x04) >> 2;
+ p->ramcfg_11_07_08 = (nv_ro08(bios, data + 0x07) & 0x08) >> 3;
+ p->ramcfg_11_07_10 = (nv_ro08(bios, data + 0x07) & 0x10) >> 4;
+ p->ramcfg_11_07_40 = (nv_ro08(bios, data + 0x07) & 0x40) >> 6;
+ p->ramcfg_11_07_80 = (nv_ro08(bios, data + 0x07) & 0x80) >> 7;
+ p->ramcfg_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
+ p->ramcfg_11_08_02 = (nv_ro08(bios, data + 0x08) & 0x02) >> 1;
+ p->ramcfg_11_08_04 = (nv_ro08(bios, data + 0x08) & 0x04) >> 2;
+ p->ramcfg_11_08_08 = (nv_ro08(bios, data + 0x08) & 0x08) >> 3;
+ p->ramcfg_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
+ p->ramcfg_11_08_20 = (nv_ro08(bios, data + 0x08) & 0x20) >> 5;
+ p->ramcfg_11_09 = (nv_ro08(bios, data + 0x09) & 0xff) >> 0;
+ break;
+ default:
+ data = 0;
+ break;
+ }
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
index 151c2d6aaee8..350d44ab2ba2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -24,11 +24,12 @@
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
#include <subdev/bios/timing.h>
u16
-nvbios_timing_table(struct nouveau_bios *bios,
- u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+nvbios_timingTe(struct nouveau_bios *bios,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
u16 timing = 0x0000;
@@ -47,11 +48,15 @@ nvbios_timing_table(struct nouveau_bios *bios,
*hdr = nv_ro08(bios, timing + 1);
*cnt = nv_ro08(bios, timing + 2);
*len = nv_ro08(bios, timing + 3);
+ *snr = 0;
+ *ssz = 0;
return timing;
case 0x20:
*hdr = nv_ro08(bios, timing + 1);
- *cnt = nv_ro08(bios, timing + 3);
+ *cnt = nv_ro08(bios, timing + 5);
*len = nv_ro08(bios, timing + 2);
+ *snr = nv_ro08(bios, timing + 4);
+ *ssz = nv_ro08(bios, timing + 3);
return timing;
default:
break;
@@ -63,11 +68,60 @@ nvbios_timing_table(struct nouveau_bios *bios,
}
u16
-nvbios_timing_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+nvbios_timingEe(struct nouveau_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- u8 hdr, cnt;
- u16 timing = nvbios_timing_table(bios, ver, &hdr, &cnt, len);
- if (timing && idx < cnt)
- return timing + hdr + (idx * *len);
+ u8 snr, ssz;
+ u16 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ if (timing && idx < *cnt) {
+ timing += *hdr + idx * (*len + (snr * ssz));
+ *hdr = *len;
+ *cnt = snr;
+ *len = ssz;
+ return timing;
+ }
return 0x0000;
}
+
+u16
+nvbios_timingEp(struct nouveau_bios *bios, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ramcfg *p)
+{
+ u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
+ switch (!!data * *ver) {
+ case 0x20:
+ p->timing[0] = nv_ro32(bios, data + 0x00);
+ p->timing[1] = nv_ro32(bios, data + 0x04);
+ p->timing[2] = nv_ro32(bios, data + 0x08);
+ p->timing[3] = nv_ro32(bios, data + 0x0c);
+ p->timing[4] = nv_ro32(bios, data + 0x10);
+ p->timing[5] = nv_ro32(bios, data + 0x14);
+ p->timing[6] = nv_ro32(bios, data + 0x18);
+ p->timing[7] = nv_ro32(bios, data + 0x1c);
+ p->timing[8] = nv_ro32(bios, data + 0x20);
+ p->timing[9] = nv_ro32(bios, data + 0x24);
+ p->timing[10] = nv_ro32(bios, data + 0x28);
+ p->timing_20_2e_03 = (nv_ro08(bios, data + 0x2e) & 0x03) >> 0;
+ p->timing_20_2e_30 = (nv_ro08(bios, data + 0x2e) & 0x30) >> 4;
+ p->timing_20_2e_c0 = (nv_ro08(bios, data + 0x2e) & 0xc0) >> 6;
+ p->timing_20_2f_03 = (nv_ro08(bios, data + 0x2f) & 0x03) >> 0;
+ temp = nv_ro16(bios, data + 0x2c);
+ p->timing_20_2c_003f = (temp & 0x003f) >> 0;
+ p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6;
+ p->timing_20_30_07 = (nv_ro08(bios, data + 0x30) & 0x07) >> 0;
+ p->timing_20_30_f8 = (nv_ro08(bios, data + 0x30) & 0xf8) >> 3;
+ temp = nv_ro16(bios, data + 0x31);
+ p->timing_20_31_0007 = (temp & 0x0007) >> 0;
+ p->timing_20_31_0078 = (temp & 0x0078) >> 3;
+ p->timing_20_31_0780 = (temp & 0x0780) >> 7;
+ p->timing_20_31_0800 = (temp & 0x0800) >> 11;
+ p->timing_20_31_7000 = (temp & 0x7000) >> 12;
+ p->timing_20_31_8000 = (temp & 0x8000) >> 15;
+ break;
+ default:
+ data = 0;
+ break;
+ }
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
index e2938a21b06f..dd62baead39c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -182,9 +182,12 @@ nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
clk->pstate = pstatei;
if (pfb->ram->calc) {
- ret = pfb->ram->calc(pfb, pstate->base.domain[nv_clk_src_mem]);
- if (ret == 0)
- ret = pfb->ram->prog(pfb);
+ int khz = pstate->base.domain[nv_clk_src_mem];
+ do {
+ ret = pfb->ram->calc(pfb, khz);
+ if (ret == 0)
+ ret = pfb->ram->prog(pfb);
+ } while (ret > 0);
pfb->ram->tidy(pfb);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index 30c1f3a4158e..b74db6cfc4e2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -25,7 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/clock.h>
-#include <subdev/devinit/priv.h>
+#include <subdev/devinit/nv04.h>
#include "pll.h"
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
index 4c62e84b96f5..d3c37c96f0e7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -457,7 +457,7 @@ nve0_domain[] = {
{ nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
- { nv_clk_src_mem , 0x03, 0, "memory", 1000 },
+ { nv_clk_src_mem , 0x03, 0, "memory", 500 },
{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_hubk01 , 0x05 },
{ nv_clk_src_vdec , 0x06 },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
index 79c81d3d9bac..8fa34e8152c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -24,9 +24,11 @@
#include <core/option.h>
-#include <subdev/devinit.h>
#include <subdev/bios.h>
#include <subdev/bios/init.h>
+#include <subdev/vga.h>
+
+#include "priv.h"
int
_nouveau_devinit_fini(struct nouveau_object *object, bool suspend)
@@ -37,18 +39,41 @@ _nouveau_devinit_fini(struct nouveau_object *object, bool suspend)
if (suspend)
devinit->post = true;
+ /* unlock the extended vga crtc regs */
+ nv_lockvgac(devinit, false);
+
return nouveau_subdev_fini(&devinit->base, suspend);
}
int
_nouveau_devinit_init(struct nouveau_object *object)
{
+ struct nouveau_devinit_impl *impl = (void *)object->oclass;
struct nouveau_devinit *devinit = (void *)object;
- int ret = nouveau_subdev_init(&devinit->base);
+ int ret;
+
+ ret = nouveau_subdev_init(&devinit->base);
+ if (ret)
+ return ret;
+
+ ret = nvbios_init(&devinit->base, devinit->post);
if (ret)
return ret;
- return nvbios_init(&devinit->base, devinit->post);
+ if (impl->disable)
+ nv_device(devinit)->disable_mask |= impl->disable(devinit);
+ return 0;
+}
+
+void
+_nouveau_devinit_dtor(struct nouveau_object *object)
+{
+ struct nouveau_devinit *devinit = (void *)object;
+
+ /* lock crtc regs */
+ nv_lockvgac(devinit, true);
+
+ nouveau_subdev_destroy(&devinit->base);
}
int
@@ -57,6 +82,7 @@ nouveau_devinit_create_(struct nouveau_object *parent,
struct nouveau_oclass *oclass,
int size, void **pobject)
{
+ struct nouveau_devinit_impl *impl = (void *)oclass;
struct nouveau_device *device = nv_device(parent);
struct nouveau_devinit *devinit;
int ret;
@@ -68,5 +94,7 @@ nouveau_devinit_create_(struct nouveau_object *parent,
return ret;
devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false);
+ devinit->meminit = impl->meminit;
+ devinit->pll_set = impl->pll_set;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index 27c8235f1a85..7037eae46e44 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -27,12 +27,7 @@
#include <subdev/vga.h>
#include "fbmem.h"
-#include "priv.h"
-
-struct nv04_devinit_priv {
- struct nouveau_devinit base;
- int owner;
-};
+#include "nv04.h"
static void
nv04_devinit_meminit(struct nouveau_devinit *devinit)
@@ -393,17 +388,21 @@ int
nv04_devinit_fini(struct nouveau_object *object, bool suspend)
{
struct nv04_devinit_priv *priv = (void *)object;
+ int ret;
/* make i2c busses accessible */
nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
- /* unlock extended vga crtc regs, and unslave crtcs */
- nv_lockvgac(priv, false);
+ ret = nouveau_devinit_fini(&priv->base, suspend);
+ if (ret)
+ return ret;
+
+ /* unslave crtcs */
if (priv->owner < 0)
priv->owner = nv_rdvgaowner(priv);
nv_wrvgaowner(priv, 0);
- return nouveau_devinit_fini(&priv->base, suspend);
+ return 0;
}
int
@@ -431,14 +430,13 @@ nv04_devinit_dtor(struct nouveau_object *object)
{
struct nv04_devinit_priv *priv = (void *)object;
- /* restore vga owner saved at first init, and lock crtc regs */
+ /* restore vga owner saved at first init */
nv_wrvgaowner(priv, priv->owner);
- nv_lockvgac(priv, true);
nouveau_devinit_destroy(&priv->base);
}
-static int
+int
nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -451,19 +449,19 @@ nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.meminit = nv04_devinit_meminit;
- priv->base.pll_set = nv04_devinit_pll_set;
priv->owner = -1;
return 0;
}
-struct nouveau_oclass
-nv04_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x04),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
.fini = nv04_devinit_fini,
},
-};
+ .meminit = nv04_devinit_meminit,
+ .pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h
new file mode 100644
index 000000000000..23470a57510c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h
@@ -0,0 +1,23 @@
+#ifndef __NVKM_DEVINIT_NV04_H__
+#define __NVKM_DEVINIT_NV04_H__
+
+#include "priv.h"
+
+struct nv04_devinit_priv {
+ struct nouveau_devinit base;
+ u8 owner;
+};
+
+int nv04_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nv04_devinit_dtor(struct nouveau_object *);
+int nv04_devinit_init(struct nouveau_object *);
+int nv04_devinit_fini(struct nouveau_object *, bool);
+int nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
index b1912a8a8942..98b7e6780dc7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -29,12 +29,7 @@
#include <subdev/vga.h>
#include "fbmem.h"
-#include "priv.h"
-
-struct nv05_devinit_priv {
- struct nouveau_devinit base;
- u8 owner;
-};
+#include "nv04.h"
static void
nv05_devinit_meminit(struct nouveau_devinit *devinit)
@@ -49,7 +44,7 @@ nv05_devinit_meminit(struct nouveau_devinit *devinit)
{ 0x06, 0x00 },
{ 0x00, 0x00 }
};
- struct nv05_devinit_priv *priv = (void *)devinit;
+ struct nv04_devinit_priv *priv = (void *)devinit;
struct nouveau_bios *bios = nouveau_bios(priv);
struct io_mapping *fb;
u32 patt = 0xdeadbeef;
@@ -130,31 +125,15 @@ out:
fbmem_fini(fb);
}
-static int
-nv05_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv05_devinit_priv *priv;
- int ret;
-
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.meminit = nv05_devinit_meminit;
- priv->base.pll_set = nv04_devinit_pll_set;
- return 0;
-}
-
-struct nouveau_oclass
-nv05_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x05),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv05_devinit_ctor,
+struct nouveau_oclass *
+nv05_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x05),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
.fini = nv04_devinit_fini,
},
-};
+ .meminit = nv05_devinit_meminit,
+ .pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 8d274dba1ef1..32b3d2131a7f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -27,17 +27,12 @@
#include <subdev/vga.h>
#include "fbmem.h"
-#include "priv.h"
-
-struct nv10_devinit_priv {
- struct nouveau_devinit base;
- u8 owner;
-};
+#include "nv04.h"
static void
nv10_devinit_meminit(struct nouveau_devinit *devinit)
{
- struct nv10_devinit_priv *priv = (void *)devinit;
+ struct nv04_devinit_priv *priv = (void *)devinit;
static const int mem_width[] = { 0x10, 0x00, 0x20 };
int mem_width_count;
uint32_t patt = 0xdeadbeef;
@@ -101,31 +96,15 @@ amount_found:
fbmem_fini(fb);
}
-static int
-nv10_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv10_devinit_priv *priv;
- int ret;
-
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.meminit = nv10_devinit_meminit;
- priv->base.pll_set = nv04_devinit_pll_set;
- return 0;
-}
-
-struct nouveau_oclass
-nv10_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x10),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv10_devinit_ctor,
+struct nouveau_oclass *
+nv10_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x10),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
.fini = nv04_devinit_fini,
},
-};
+ .meminit = nv10_devinit_meminit,
+ .pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
index e9743cdabe75..526d0c6faacd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
@@ -22,37 +22,16 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "nv04.h"
-struct nv1a_devinit_priv {
- struct nouveau_devinit base;
- u8 owner;
-};
-
-static int
-nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv1a_devinit_priv *priv;
- int ret;
-
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.pll_set = nv04_devinit_pll_set;
- return 0;
-}
-
-struct nouveau_oclass
-nv1a_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x1a),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv1a_devinit_ctor,
+struct nouveau_oclass *
+nv1a_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x1a),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
.fini = nv04_devinit_fini,
},
-};
+ .pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
index 6cc6080d3bc0..4689ba303b0b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -24,18 +24,13 @@
*
*/
-#include "priv.h"
+#include "nv04.h"
#include "fbmem.h"
-struct nv20_devinit_priv {
- struct nouveau_devinit base;
- u8 owner;
-};
-
static void
nv20_devinit_meminit(struct nouveau_devinit *devinit)
{
- struct nv20_devinit_priv *priv = (void *)devinit;
+ struct nv04_devinit_priv *priv = (void *)devinit;
struct nouveau_device *device = nv_device(priv);
uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
uint32_t amount, off;
@@ -65,31 +60,15 @@ nv20_devinit_meminit(struct nouveau_devinit *devinit)
fbmem_fini(fb);
}
-static int
-nv20_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv20_devinit_priv *priv;
- int ret;
-
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.meminit = nv20_devinit_meminit;
- priv->base.pll_set = nv04_devinit_pll_set;
- return 0;
-}
-
-struct nouveau_oclass
-nv20_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x20),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv20_devinit_ctor,
+struct nouveau_oclass *
+nv20_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x20),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_devinit_ctor,
.dtor = nv04_devinit_dtor,
.init = nv04_devinit_init,
.fini = nv04_devinit_fini,
},
-};
+ .meminit = nv20_devinit_meminit,
+ .pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index 6df72247c477..b46c62a1d5d8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -28,9 +28,9 @@
#include <subdev/bios/init.h>
#include <subdev/vga.h>
-#include "priv.h"
+#include "nv50.h"
-static int
+int
nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
{
struct nv50_devinit_priv *priv = (void *)devinit;
@@ -74,6 +74,19 @@ nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
return 0;
}
+static u64
+nv50_devinit_disable(struct nouveau_devinit *devinit)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r001540 = nv_rd32(priv, 0x001540);
+ u64 disable = 0ULL;
+
+ if (!(r001540 & 0x40000000))
+ disable |= (1ULL << NVDEV_ENGINE_MPEG);
+
+ return disable;
+}
+
int
nv50_devinit_init(struct nouveau_object *object)
{
@@ -120,7 +133,7 @@ nv50_devinit_init(struct nouveau_object *object)
return 0;
}
-static int
+int
nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -133,17 +146,18 @@ nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.pll_set = nv50_devinit_pll_set;
return 0;
}
-struct nouveau_oclass
-nv50_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_devinit_ctor,
.dtor = _nouveau_devinit_dtor,
.init = nv50_devinit_init,
.fini = _nouveau_devinit_fini,
},
-};
+ .pll_set = nv50_devinit_pll_set,
+ .disable = nv50_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
new file mode 100644
index 000000000000..141c27e9f182
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DEVINIT_NV50_H__
+#define __NVKM_DEVINIT_NV50_H__
+
+#include "priv.h"
+
+struct nv50_devinit_priv {
+ struct nouveau_devinit base;
+};
+
+int nv50_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+int nv50_devinit_init(struct nouveau_object *);
+int nv50_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+int nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
new file mode 100644
index 000000000000..787422505d87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nv84_devinit_disable(struct nouveau_devinit *devinit)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r001540 = nv_rd32(priv, 0x001540);
+ u32 r00154c = nv_rd32(priv, 0x00154c);
+ u64 disable = 0ULL;
+
+ if (!(r001540 & 0x40000000)) {
+ disable |= (1ULL << NVDEV_ENGINE_MPEG);
+ disable |= (1ULL << NVDEV_ENGINE_VP);
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+ }
+
+ if (!(r00154c & 0x00000004))
+ disable |= (1ULL << NVDEV_ENGINE_DISP);
+ if (!(r00154c & 0x00000020))
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ if (!(r00154c & 0x00000040))
+ disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+
+ return disable;
+}
+
+struct nouveau_oclass *
+nv84_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x84),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_devinit_ctor,
+ .dtor = _nouveau_devinit_dtor,
+ .init = nv50_devinit_init,
+ .fini = _nouveau_devinit_fini,
+ },
+ .pll_set = nv50_devinit_pll_set,
+ .disable = nv84_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
new file mode 100644
index 000000000000..2b0e963fc6f0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nv98_devinit_disable(struct nouveau_devinit *devinit)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r001540 = nv_rd32(priv, 0x001540);
+ u32 r00154c = nv_rd32(priv, 0x00154c);
+ u64 disable = 0ULL;
+
+ if (!(r001540 & 0x40000000)) {
+ disable |= (1ULL << NVDEV_ENGINE_VP);
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ disable |= (1ULL << NVDEV_ENGINE_PPP);
+ }
+
+ if (!(r00154c & 0x00000004))
+ disable |= (1ULL << NVDEV_ENGINE_DISP);
+ if (!(r00154c & 0x00000020))
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ if (!(r00154c & 0x00000040))
+ disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+
+ return disable;
+}
+
+struct nouveau_oclass *
+nv98_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x98),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_devinit_ctor,
+ .dtor = _nouveau_devinit_dtor,
+ .init = nv50_devinit_init,
+ .fini = _nouveau_devinit_fini,
+ },
+ .pll_set = nv50_devinit_pll_set,
+ .disable = nv98_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
index 76a68b290141..6dedf1dad7f7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
@@ -22,12 +22,12 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "nv50.h"
-static int
+int
nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
{
- struct nva3_devinit_priv *priv = (void *)devinit;
+ struct nv50_devinit_priv *priv = (void *)devinit;
struct nouveau_bios *bios = nouveau_bios(priv);
struct nvbios_pll info;
int N, fN, M, P;
@@ -58,30 +58,38 @@ nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
return ret;
}
-static int
-nva3_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+static u64
+nva3_devinit_disable(struct nouveau_devinit *devinit)
{
- struct nv50_devinit_priv *priv;
- int ret;
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r001540 = nv_rd32(priv, 0x001540);
+ u32 r00154c = nv_rd32(priv, 0x00154c);
+ u64 disable = 0ULL;
- ret = nouveau_devinit_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+ if (!(r001540 & 0x40000000)) {
+ disable |= (1ULL << NVDEV_ENGINE_VP);
+ disable |= (1ULL << NVDEV_ENGINE_PPP);
+ }
+
+ if (!(r00154c & 0x00000004))
+ disable |= (1ULL << NVDEV_ENGINE_DISP);
+ if (!(r00154c & 0x00000020))
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ if (!(r00154c & 0x00000200))
+ disable |= (1ULL << NVDEV_ENGINE_COPY0);
- priv->base.pll_set = nva3_devinit_pll_set;
- return 0;
+ return disable;
}
-struct nouveau_oclass
-nva3_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0xa3),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_devinit_ctor,
+struct nouveau_oclass *
+nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0xa3),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_devinit_ctor,
.dtor = _nouveau_devinit_dtor,
.init = nv50_devinit_init,
.fini = _nouveau_devinit_fini,
},
-};
+ .pll_set = nva3_devinit_pll_set,
+ .disable = nva3_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
new file mode 100644
index 000000000000..4fc68d27eff3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nvaf_devinit_disable(struct nouveau_devinit *devinit)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r001540 = nv_rd32(priv, 0x001540);
+ u32 r00154c = nv_rd32(priv, 0x00154c);
+ u64 disable = 0;
+
+ if (!(r001540 & 0x40000000)) {
+ disable |= (1ULL << NVDEV_ENGINE_VP);
+ disable |= (1ULL << NVDEV_ENGINE_PPP);
+ }
+
+ if (!(r00154c & 0x00000004))
+ disable |= (1ULL << NVDEV_ENGINE_DISP);
+ if (!(r00154c & 0x00000020))
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ if (!(r00154c & 0x00000040))
+ disable |= (1ULL << NVDEV_ENGINE_VIC);
+ if (!(r00154c & 0x00000200))
+ disable |= (1ULL << NVDEV_ENGINE_COPY0);
+
+ return disable;
+}
+
+struct nouveau_oclass *
+nvaf_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0xaf),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_devinit_ctor,
+ .dtor = _nouveau_devinit_dtor,
+ .init = nv50_devinit_init,
+ .fini = _nouveau_devinit_fini,
+ },
+ .pll_set = nva3_devinit_pll_set,
+ .disable = nvaf_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
index 19e265bf4574..fa7e63766b1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
@@ -22,12 +22,12 @@
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "nv50.h"
static int
nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
{
- struct nvc0_devinit_priv *priv = (void *)devinit;
+ struct nv50_devinit_priv *priv = (void *)devinit;
struct nouveau_bios *bios = nouveau_bios(priv);
struct nvbios_pll info;
int N, fN, M, P;
@@ -59,6 +59,33 @@ nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
return ret;
}
+static u64
+nvc0_devinit_disable(struct nouveau_devinit *devinit)
+{
+ struct nv50_devinit_priv *priv = (void *)devinit;
+ u32 r022500 = nv_rd32(priv, 0x022500);
+ u64 disable = 0ULL;
+
+ if (r022500 & 0x00000001)
+ disable |= (1ULL << NVDEV_ENGINE_DISP);
+
+ if (r022500 & 0x00000002) {
+ disable |= (1ULL << NVDEV_ENGINE_VP);
+ disable |= (1ULL << NVDEV_ENGINE_PPP);
+ }
+
+ if (r022500 & 0x00000004)
+ disable |= (1ULL << NVDEV_ENGINE_BSP);
+ if (r022500 & 0x00000008)
+ disable |= (1ULL << NVDEV_ENGINE_VENC);
+ if (r022500 & 0x00000100)
+ disable |= (1ULL << NVDEV_ENGINE_COPY0);
+ if (r022500 & 0x00000200)
+ disable |= (1ULL << NVDEV_ENGINE_COPY1);
+
+ return disable;
+}
+
static int
nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -72,19 +99,20 @@ nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.pll_set = nvc0_devinit_pll_set;
if (nv_rd32(priv, 0x022500) & 0x00000001)
priv->base.post = true;
return 0;
}
-struct nouveau_oclass
-nvc0_devinit_oclass = {
- .handle = NV_SUBDEV(DEVINIT, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nvc0_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_devinit_ctor,
.dtor = _nouveau_devinit_dtor,
.init = nv50_devinit_init,
.fini = _nouveau_devinit_fini,
},
-};
+ .pll_set = nvc0_devinit_pll_set,
+ .disable = nvc0_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
index 7d622e2b0171..822a2fbf44a5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
@@ -6,20 +6,32 @@
#include <subdev/clock/pll.h>
#include <subdev/devinit.h>
-void nv04_devinit_dtor(struct nouveau_object *);
-int nv04_devinit_init(struct nouveau_object *);
-int nv04_devinit_fini(struct nouveau_object *, bool);
-int nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32);
-
-void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-
-
-struct nv50_devinit_priv {
- struct nouveau_devinit base;
+struct nouveau_devinit_impl {
+ struct nouveau_oclass base;
+ void (*meminit)(struct nouveau_devinit *);
+ int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
+ u64 (*disable)(struct nouveau_devinit *);
};
-int nv50_devinit_init(struct nouveau_object *);
+#define nouveau_devinit_create(p,e,o,d) \
+ nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_devinit_destroy(p) ({ \
+ struct nouveau_devinit *d = (p); \
+ _nouveau_devinit_dtor(nv_object(d)); \
+})
+#define nouveau_devinit_init(p) ({ \
+ struct nouveau_devinit *d = (p); \
+ _nouveau_devinit_init(nv_object(d)); \
+})
+#define nouveau_devinit_fini(p,s) ({ \
+ struct nouveau_devinit *d = (p); \
+ _nouveau_devinit_fini(nv_object(d), (s)); \
+})
+
+int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_devinit_dtor(struct nouveau_object *);
+int _nouveau_devinit_init(struct nouveau_object *);
+int _nouveau_devinit_fini(struct nouveau_object *, bool suspend);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
index 34f9605ffee6..66fe959b4f74 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
@@ -25,35 +25,44 @@
#include <subdev/bios.h>
#include "priv.h"
+/* binary driver only executes this path if the condition (a) is true
+ * for any configuration (combination of rammap+ramcfg+timing) that
+ * can be reached on a given card. for now, we will execute the branch
+ * unconditionally in the hope that a "false everywhere" in the bios
+ * tables doesn't actually mean "don't touch this".
+ */
+#define NOTE00(a) 1
+
int
-nouveau_gddr5_calc(struct nouveau_ram *ram)
+nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts)
{
- struct nouveau_bios *bios = nouveau_bios(ram);
- int pd, lf, xd, vh, vr, vo;
- int WL, CL, WR, at, dt, ds;
+ int pd, lf, xd, vh, vr, vo, l3;
+ int WL, CL, WR, at[2], dt, ds;
int rq = ram->freq < 1000000; /* XXX */
- switch (!!ram->ramcfg.data * ram->ramcfg.version) {
+ switch (ram->ramcfg.version) {
case 0x11:
- pd = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x80) >> 7;
- lf = (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x40) >> 6;
- xd = !(nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x20);
- vh = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x10) >> 4;
- vr = (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x04) >> 2;
- vo = nv_ro08(bios, ram->ramcfg.data + 0x06) & 0xff;
+ pd = ram->next->bios.ramcfg_11_01_80;
+ lf = ram->next->bios.ramcfg_11_01_40;
+ xd = !ram->next->bios.ramcfg_11_01_20;
+ vh = ram->next->bios.ramcfg_11_02_10;
+ vr = ram->next->bios.ramcfg_11_02_04;
+ vo = ram->next->bios.ramcfg_11_06;
+ l3 = !ram->next->bios.ramcfg_11_07_02;
break;
default:
return -ENOSYS;
}
- switch (!!ram->timing.data * ram->timing.version) {
+ switch (ram->timing.version) {
case 0x20:
- WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
- CL = nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
- WR = nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
- at = (nv_ro08(bios, ram->timing.data + 0x2e) & 0xc0) >> 6;
- dt = nv_ro08(bios, ram->timing.data + 0x2e) & 0x03;
- ds = nv_ro08(bios, ram->timing.data + 0x2f) & 0x03;
+ WL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
+ CL = (ram->next->bios.timing[1] & 0x0000001f);
+ WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
+ at[0] = ram->next->bios.timing_20_2e_c0;
+ at[1] = ram->next->bios.timing_20_2e_30;
+ dt = ram->next->bios.timing_20_2e_03;
+ ds = ram->next->bios.timing_20_2f_03;
break;
default:
return -ENOSYS;
@@ -71,13 +80,25 @@ nouveau_gddr5_calc(struct nouveau_ram *ram)
ram->mr[1] &= ~0x0bf;
ram->mr[1] |= (xd & 0x01) << 7;
- ram->mr[1] |= (at & 0x03) << 4;
+ ram->mr[1] |= (at[0] & 0x03) << 4;
ram->mr[1] |= (dt & 0x03) << 2;
ram->mr[1] |= (ds & 0x03) << 0;
+ /* this seems wrong, alternate field used for the broadcast
+ * on nuts vs non-nuts configs.. meh, it matches for now.
+ */
+ ram->mr1_nuts = ram->mr[1];
+ if (nuts) {
+ ram->mr[1] &= ~0x030;
+ ram->mr[1] |= (at[1] & 0x03) << 4;
+ }
+
ram->mr[3] &= ~0x020;
ram->mr[3] |= (rq & 0x01) << 5;
+ ram->mr[5] &= ~0x004;
+ ram->mr[5] |= (l3 << 2);
+
if (!vo)
vo = (ram->mr[6] & 0xff0) >> 4;
if (ram->mr[6] & 0x001)
@@ -86,11 +107,16 @@ nouveau_gddr5_calc(struct nouveau_ram *ram)
ram->mr[6] |= (vo & 0xff) << 4;
ram->mr[6] |= (pd & 0x01) << 0;
- if (!(ram->mr[7] & 0x100))
- vr = 0; /* binary driver does this.. bug? */
- ram->mr[7] &= ~0x188;
- ram->mr[7] |= (vr & 0x01) << 8;
+ if (NOTE00(vr)) {
+ ram->mr[7] &= ~0x300;
+ ram->mr[7] |= (vr & 0x03) << 8;
+ }
+ ram->mr[7] &= ~0x088;
ram->mr[7] |= (vh & 0x01) << 7;
ram->mr[7] |= (lf & 0x01) << 3;
+
+ ram->mr[8] &= ~0x003;
+ ram->mr[8] |= (WR & 0x10) >> 3;
+ ram->mr[8] |= (CL & 0x10) >> 4;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
index 9159a5ccee93..265d1253624a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -36,7 +36,7 @@ nv1a_fb_oclass = &(struct nv04_fb_impl) {
.fini = _nouveau_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv10_ram_oclass,
+ .base.ram = &nv1a_ram_oclass,
.tile.regions = 8,
.tile.init = nv10_fb_tile_init,
.tile.fini = nv10_fb_tile_fini,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index e5fc37c4caac..45470e1f0385 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -33,6 +33,21 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
}
+static void
+nvc0_fb_intr(struct nouveau_subdev *subdev)
+{
+ struct nvc0_fb_priv *priv = (void *)subdev;
+ u32 intr = nv_rd32(priv, 0x000100);
+ if (intr & 0x08000000) {
+ nv_debug(priv, "PFFB intr\n");
+ intr &= ~0x08000000;
+ }
+ if (intr & 0x00002000) {
+ nv_debug(priv, "PBFB intr\n");
+ intr &= ~0x00002000;
+ }
+}
+
int
nvc0_fb_init(struct nouveau_object *object)
{
@@ -86,6 +101,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return -EFAULT;
}
+ nv_subdev(priv)->intr = nvc0_fb_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 493125214e88..edaf95dee612 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -34,7 +34,7 @@ extern struct nouveau_oclass nvc0_ram_oclass;
extern struct nouveau_oclass nve0_ram_oclass;
int nouveau_sddr3_calc(struct nouveau_ram *ram);
-int nouveau_gddr5_calc(struct nouveau_ram *ram);
+int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
#define nouveau_fb_create(p,e,c,d) \
nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index 76762a17d89c..c7fdb3a9e88b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -70,13 +70,11 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
struct nv50_ramseq *hwsq = &ram->hwsq;
struct nvbios_perfE perfE;
struct nvbios_pll mpll;
- struct bit_entry M;
struct {
u32 data;
u8 size;
} ramcfg, timing;
- u8 ver, hdr, cnt, strap;
- u32 data;
+ u8 ver, hdr, cnt, len, strap;
int N1, M1, N2, M2, P;
int ret, i;
@@ -93,16 +91,7 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
} while (perfE.memory < freq);
/* locate specific data set for the attached memory */
- if (bit_entry(bios, 'M', &M) || M.version != 1 || M.length < 5) {
- nv_error(pfb, "invalid/missing memory table\n");
- return -EINVAL;
- }
-
- strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
- data = nv_ro16(bios, M.offset + 3);
- if (data)
- strap = nv_ro08(bios, data + strap);
-
+ strap = nvbios_ramcfg_index(bios);
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
return -EINVAL;
@@ -113,7 +102,8 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
/* lookup memory timings, if bios says they're present */
strap = nv_ro08(bios, ramcfg.data + 0x01);
if (strap != 0xff) {
- timing.data = nvbios_timing_entry(bios, strap, &ver, &hdr);
+ timing.data = nvbios_timingEe(bios, strap, &ver, &hdr,
+ &cnt, &len);
if (!timing.data || ver != 0x10 || hdr < 0x12) {
nv_error(pfb, "invalid/missing timing entry "
"%02x %04x %02x %02x\n",
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
index f6292cd9207c..f4ae8aa46a25 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -79,8 +79,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
struct nva3_ram *ram = (void *)pfb->ram;
struct nva3_ramfuc *fuc = &ram->fuc;
struct nva3_clock_info mclk;
- struct bit_entry M;
- u8 ver, cnt, strap;
+ u8 ver, cnt, len, strap;
u32 data;
struct {
u32 data;
@@ -91,24 +90,15 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
int ret;
/* lookup memory config data relevant to the target frequency */
- rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
- &cnt, &ramcfg.size);
+ rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
+ &cnt, &ramcfg.size);
if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
nv_error(pfb, "invalid/missing rammap entry\n");
return -EINVAL;
}
/* locate specific data set for the attached memory */
- if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
- nv_error(pfb, "invalid/missing memory table\n");
- return -EINVAL;
- }
-
- strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
- data = nv_ro16(bios, M.offset + 1);
- if (data)
- strap = nv_ro08(bios, data + strap);
-
+ strap = nvbios_ramcfg_index(bios);
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
return -EINVAL;
@@ -123,8 +113,8 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
/* lookup memory timings, if bios says they're present */
strap = nv_ro08(bios, ramcfg.data + 0x01);
if (strap != 0xff) {
- timing.data = nvbios_timing_entry(bios, strap, &ver,
- &timing.size);
+ timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
+ &cnt, &len);
if (!timing.data || ver != 0x10 || timing.size < 0x19) {
nv_error(pfb, "invalid/missing timing entry\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index f464547c6bab..0391b824ee76 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -23,7 +23,6 @@
*/
#include <subdev/bios.h>
-#include <subdev/bios/bit.h>
#include <subdev/bios/pll.h>
#include <subdev/bios/rammap.h>
#include <subdev/bios/timing.h>
@@ -134,9 +133,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
struct nouveau_bios *bios = nouveau_bios(pfb);
struct nvc0_ram *ram = (void *)pfb->ram;
struct nvc0_ramfuc *fuc = &ram->fuc;
- struct bit_entry M;
- u8 ver, cnt, strap;
- u32 data;
+ u8 ver, cnt, len, strap;
struct {
u32 data;
u8 size;
@@ -147,24 +144,15 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
int ret;
/* lookup memory config data relevant to the target frequency */
- rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
- &cnt, &ramcfg.size);
+ rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
+ &cnt, &ramcfg.size);
if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
nv_error(pfb, "invalid/missing rammap entry\n");
return -EINVAL;
}
/* locate specific data set for the attached memory */
- if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
- nv_error(pfb, "invalid/missing memory table\n");
- return -EINVAL;
- }
-
- strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
- data = nv_ro16(bios, M.offset + 1);
- if (data)
- strap = nv_ro08(bios, data + strap);
-
+ strap = nvbios_ramcfg_index(bios);
if (strap >= cnt) {
nv_error(pfb, "invalid ramcfg strap\n");
return -EINVAL;
@@ -179,8 +167,8 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
/* lookup memory timings, if bios says they're present */
strap = nv_ro08(bios, ramcfg.data + 0x01);
if (strap != 0xff) {
- timing.data = nvbios_timing_entry(bios, strap, &ver,
- &timing.size);
+ timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
+ &cnt, &len);
if (!timing.data || ver != 0x10 || timing.size < 0x19) {
nv_error(pfb, "invalid/missing timing entry\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index bc86cfd084f6..3257c522a021 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -25,7 +25,6 @@
#include <subdev/gpio.h>
#include <subdev/bios.h>
-#include <subdev/bios/bit.h>
#include <subdev/bios/pll.h>
#include <subdev/bios/init.h>
#include <subdev/bios/rammap.h>
@@ -42,6 +41,14 @@
#include "ramfuc.h"
+/* binary driver only executes this path if the condition (a) is true
+ * for any configuration (combination of rammap+ramcfg+timing) that
+ * can be reached on a given card. for now, we will execute the branch
+ * unconditionally in the hope that a "false everywhere" in the bios
+ * tables doesn't actually mean "don't touch this".
+ */
+#define NOTE00(a) 1
+
struct nve0_ramfuc {
struct ramfuc base;
@@ -104,7 +111,9 @@ struct nve0_ramfuc {
struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */
struct ramfuc_reg r_0x62c000;
+
struct ramfuc_reg r_0x10f200;
+
struct ramfuc_reg r_0x10f210;
struct ramfuc_reg r_0x10f310;
struct ramfuc_reg r_0x10f314;
@@ -118,12 +127,17 @@ struct nve0_ramfuc {
struct ramfuc_reg r_0x10f65c;
struct ramfuc_reg r_0x10f6bc;
struct ramfuc_reg r_0x100710;
- struct ramfuc_reg r_0x10f750;
+ struct ramfuc_reg r_0x100750;
};
struct nve0_ram {
struct nouveau_ram base;
struct nve0_ramfuc fuc;
+
+ u32 parts;
+ u32 pmask;
+ u32 pnuts;
+
int from;
int mode;
int N1, fN1, M1, P1;
@@ -134,17 +148,17 @@ struct nve0_ram {
* GDDR5
******************************************************************************/
static void
-train(struct nve0_ramfuc *fuc, u32 magic)
+nve0_ram_train(struct nve0_ramfuc *fuc, u32 mask, u32 data)
{
struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
- struct nouveau_fb *pfb = nouveau_fb(ram);
- const int mc = nv_rd32(pfb, 0x02243c);
- int i;
-
- ram_mask(fuc, 0x10f910, 0xbc0e0000, magic);
- ram_mask(fuc, 0x10f914, 0xbc0e0000, magic);
- for (i = 0; i < mc; i++) {
- const u32 addr = 0x110974 + (i * 0x1000);
+ u32 addr = 0x110974, i;
+
+ ram_mask(fuc, 0x10f910, mask, data);
+ ram_mask(fuc, 0x10f914, mask, data);
+
+ for (i = 0; (data & 0x80000000) && i < ram->parts; addr += 0x1000, i++) {
+ if (ram->pmask & (1 << i))
+ continue;
ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
}
}
@@ -199,12 +213,12 @@ r1373f4_init(struct nve0_ramfuc *fuc)
}
static void
-r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
+r1373f4_fini(struct nve0_ramfuc *fuc)
{
struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
- struct nouveau_bios *bios = nouveau_bios(ram);
- u8 v0 = (nv_ro08(bios, ramcfg + 0x03) & 0xc0) >> 6;
- u8 v1 = (nv_ro08(bios, ramcfg + 0x03) & 0x30) >> 4;
+ struct nouveau_ram_data *next = ram->base.next;
+ u8 v0 = next->bios.ramcfg_11_03_c0;
+ u8 v1 = next->bios.ramcfg_11_03_30;
u32 tmp;
tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
@@ -220,25 +234,46 @@ r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4);
}
+static void
+nve0_ram_nuts(struct nve0_ram *ram, struct ramfuc_reg *reg,
+ u32 _mask, u32 _data, u32 _copy)
+{
+ struct nve0_fb_priv *priv = (void *)nouveau_fb(ram);
+ struct ramfuc *fuc = &ram->fuc.base;
+ u32 addr = 0x110000 + (reg->addr[0] & 0xfff);
+ u32 mask = _mask | _copy;
+ u32 data = (_data & _mask) | (reg->data & _copy);
+ u32 i;
+
+ for (i = 0; i < 16; i++, addr += 0x1000) {
+ if (ram->pnuts & (1 << i)) {
+ u32 prev = nv_rd32(priv, addr);
+ u32 next = (prev & ~mask) | data;
+ nouveau_memx_wr32(fuc->memx, addr, next);
+ }
+ }
+}
+#define ram_nuts(s,r,m,d,c) \
+ nve0_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
+
static int
nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
{
- struct nouveau_bios *bios = nouveau_bios(pfb);
struct nve0_ram *ram = (void *)pfb->ram;
struct nve0_ramfuc *fuc = &ram->fuc;
- const u32 rammap = ram->base.rammap.data;
- const u32 ramcfg = ram->base.ramcfg.data;
- const u32 timing = ram->base.timing.data;
- int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
- int mv = 1; /*XXX*/
+ struct nouveau_ram_data *next = ram->base.next;
+ int vc = !(next->bios.ramcfg_11_02_08);
+ int mv = !(next->bios.ramcfg_11_02_04);
u32 mask, data;
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_wr32(fuc, 0x62c000, 0x0f0f0000);
/* MR1: turn termination on early, for some reason.. */
- if ((ram->base.mr[1] & 0x03c) != 0x030)
+ if ((ram->base.mr[1] & 0x03c) != 0x030) {
ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c);
+ ram_nuts(ram, mr[1], 0x03c, ram->base.mr1_nuts & 0x03c, 0x000);
+ }
if (vc == 1 && ram_have(fuc, gpio2E)) {
u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
@@ -250,8 +285,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
- ram_mask(fuc, 0x10f914, 0x01020000, 0x000c0000);
- ram_mask(fuc, 0x10f910, 0x01020000, 0x000c0000);
+ nve0_ram_train(fuc, 0x01020000, 0x000c0000);
ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
ram_nsec(fuc, 1000);
@@ -280,28 +314,28 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
if (1) {
data |= 0x800807e0;
- switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
- case 0xc0: data &= ~0x00000040; break;
- case 0x80: data &= ~0x00000100; break;
- case 0x40: data &= ~0x80000000; break;
- case 0x00: data &= ~0x00000400; break;
+ switch (next->bios.ramcfg_11_03_c0) {
+ case 3: data &= ~0x00000040; break;
+ case 2: data &= ~0x00000100; break;
+ case 1: data &= ~0x80000000; break;
+ case 0: data &= ~0x00000400; break;
}
- switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
- case 0x30: data &= ~0x00000020; break;
- case 0x20: data &= ~0x00000080; break;
- case 0x10: data &= ~0x00080000; break;
- case 0x00: data &= ~0x00000200; break;
+ switch (next->bios.ramcfg_11_03_30) {
+ case 3: data &= ~0x00000020; break;
+ case 2: data &= ~0x00000080; break;
+ case 1: data &= ~0x00080000; break;
+ case 0: data &= ~0x00000200; break;
}
}
- if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+ if (next->bios.ramcfg_11_02_80)
mask |= 0x03000000;
- if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+ if (next->bios.ramcfg_11_02_40)
mask |= 0x00002000;
- if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+ if (next->bios.ramcfg_11_07_10)
mask |= 0x00004000;
- if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+ if (next->bios.ramcfg_11_07_08)
mask |= 0x00000003;
else {
mask |= 0x34000000;
@@ -314,18 +348,18 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
if (ram->from == 2 && ram->mode != 2) {
ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
- ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
+ ram_mask(fuc, 0x10f200, 0x18008000, 0x00008000);
ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004);
ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010);
ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
r1373f4_init(fuc);
ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001);
- r1373f4_fini(fuc, ramcfg);
+ r1373f4_fini(fuc);
ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001);
} else
if (ram->from != 2 && ram->mode != 2) {
r1373f4_init(fuc);
- r1373f4_fini(fuc, ramcfg);
+ r1373f4_fini(fuc);
}
if (ram_have(fuc, gpioMV)) {
@@ -336,49 +370,54 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
}
}
- if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
- (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+ if ( (next->bios.ramcfg_11_02_40) ||
+ (next->bios.ramcfg_11_07_10)) {
ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
ram_nsec(fuc, 20000);
}
if (ram->from != 2 && ram->mode == 2) {
+ if (0 /*XXX: Titan */)
+ ram_mask(fuc, 0x10f200, 0x18000000, 0x18000000);
ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002);
ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010);
r1373f4_init(fuc);
- r1373f4_fini(fuc, ramcfg);
+ r1373f4_fini(fuc);
ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000);
ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000);
} else
if (ram->from == 2 && ram->mode == 2) {
ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
r1373f4_init(fuc);
- r1373f4_fini(fuc, ramcfg);
+ r1373f4_fini(fuc);
}
if (ram->mode != 2) /*XXX*/ {
- if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+ if (next->bios.ramcfg_11_07_40)
ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
}
- data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
- ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
- ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
- ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+ ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c);
+ ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09);
+ ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09);
- data = nv_ro08(bios, ramcfg + 0x04);
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
- ram_wr32(fuc, 0x10f698, 0x01010101 * data);
- ram_wr32(fuc, 0x10f69c, 0x01010101 * data);
+ if (!next->bios.ramcfg_11_07_08 && !next->bios.ramcfg_11_07_04) {
+ ram_wr32(fuc, 0x10f698, 0x01010101 * next->bios.ramcfg_11_04);
+ ram_wr32(fuc, 0x10f69c, 0x01010101 * next->bios.ramcfg_11_04);
+ } else
+ if (!next->bios.ramcfg_11_07_08) {
+ ram_wr32(fuc, 0x10f698, 0x00000000);
+ ram_wr32(fuc, 0x10f69c, 0x00000000);
}
if (ram->mode != 2) {
- u32 temp = ram_rd32(fuc, 0x10f694) & ~0xff00ff00;
- ram_wr32(fuc, 0x10f694, temp | (0x01000100 * data));
+ u32 data = 0x01000100 * next->bios.ramcfg_11_04;
+ ram_nuke(fuc, 0x10f694);
+ ram_mask(fuc, 0x10f694, 0xff00ff00, data);
}
- if (ram->mode == 2 && (nv_ro08(bios, ramcfg + 0x08) & 0x10))
+ if (ram->mode == 2 && (next->bios.ramcfg_11_08_10))
data = 0x00000080;
else
data = 0x00000000;
@@ -386,19 +425,19 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
mask = 0x00070000;
data = 0x00000000;
- if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+ if (!(next->bios.ramcfg_11_02_80))
data |= 0x03000000;
- if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+ if (!(next->bios.ramcfg_11_02_40))
data |= 0x00002000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+ if (!(next->bios.ramcfg_11_07_10))
data |= 0x00004000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+ if (!(next->bios.ramcfg_11_07_08))
data |= 0x00000003;
else
data |= 0x74000000;
ram_mask(fuc, 0x10f824, mask, data);
- if (nv_ro08(bios, ramcfg + 0x01) & 0x08)
+ if (next->bios.ramcfg_11_01_08)
data = 0x00000000;
else
data = 0x00001000;
@@ -409,61 +448,90 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000);
}
- if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+ if (next->bios.ramcfg_11_08_01)
data = 0x00100000;
else
data = 0x00000000;
ram_mask(fuc, 0x10f82c, 0x00100000, data);
data = 0x00000000;
- if (nv_ro08(bios, ramcfg + 0x08) & 0x08)
+ if (next->bios.ramcfg_11_08_08)
data |= 0x00002000;
- if (nv_ro08(bios, ramcfg + 0x08) & 0x04)
+ if (next->bios.ramcfg_11_08_04)
data |= 0x00001000;
- if (nv_ro08(bios, ramcfg + 0x08) & 0x02)
+ if (next->bios.ramcfg_11_08_02)
data |= 0x00004000;
ram_mask(fuc, 0x10f830, 0x00007000, data);
/* PFB timing */
- ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
- ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
- ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
- ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
- ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
- ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
- ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
- ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
- ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
- ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
- ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
-
- data = (nv_ro08(bios, ramcfg + 0x02) & 0x03) << 8;
- if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
- data |= 0x70000000;
- ram_mask(fuc, 0x10f604, 0x70000300, data);
-
- data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
- if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
- data |= 0x00000100;
- ram_mask(fuc, 0x10f614, 0x70000000, data);
-
- data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
- if (nv_ro08(bios, ramcfg + 0x01) & 0x02)
- data |= 0x00000100;
- ram_mask(fuc, 0x10f610, 0x70000000, data);
+ ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]);
+ ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]);
+ ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]);
+ ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]);
+ ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]);
+ ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]);
+ ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]);
+ ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]);
+ ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]);
+ ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]);
+ ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]);
+
+ data = mask = 0x00000000;
+ if (NOTE00(ramcfg_08_20)) {
+ if (next->bios.ramcfg_11_08_20)
+ data |= 0x01000000;
+ mask |= 0x01000000;
+ }
+ ram_mask(fuc, 0x10f200, mask, data);
+
+ data = mask = 0x00000000;
+ if (NOTE00(ramcfg_02_03 != 0)) {
+ data |= (next->bios.ramcfg_11_02_03) << 8;
+ mask |= 0x00000300;
+ }
+ if (NOTE00(ramcfg_01_10)) {
+ if (next->bios.ramcfg_11_01_10)
+ data |= 0x70000000;
+ mask |= 0x70000000;
+ }
+ ram_mask(fuc, 0x10f604, mask, data);
+
+ data = mask = 0x00000000;
+ if (NOTE00(timing_30_07 != 0)) {
+ data |= (next->bios.timing_20_30_07) << 28;
+ mask |= 0x70000000;
+ }
+ if (NOTE00(ramcfg_01_01)) {
+ if (next->bios.ramcfg_11_01_01)
+ data |= 0x00000100;
+ mask |= 0x00000100;
+ }
+ ram_mask(fuc, 0x10f614, mask, data);
+
+ data = mask = 0x00000000;
+ if (NOTE00(timing_30_07 != 0)) {
+ data |= (next->bios.timing_20_30_07) << 28;
+ mask |= 0x70000000;
+ }
+ if (NOTE00(ramcfg_01_02)) {
+ if (next->bios.ramcfg_11_01_02)
+ data |= 0x00000100;
+ mask |= 0x00000100;
+ }
+ ram_mask(fuc, 0x10f610, mask, data);
mask = 0x33f00000;
data = 0x00000000;
- if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+ if (!(next->bios.ramcfg_11_01_04))
data |= 0x20200000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ if (!(next->bios.ramcfg_11_07_80))
data |= 0x12800000;
/*XXX: see note above about there probably being some condition
* for the 10f824 stuff that uses ramcfg 3...
*/
- if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
- if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ if ( (next->bios.ramcfg_11_03_f0)) {
+ if (next->bios.rammap_11_08_0c) {
+ if (!(next->bios.ramcfg_11_07_80))
mask |= 0x00000020;
else
data |= 0x00000020;
@@ -476,49 +544,53 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x10f808, mask, data);
- data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
- ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+ ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f);
- data = nv_ro08(bios, ramcfg + 0x02) & 0x03;
- if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
- data |= 0x00000004;
- if ((nv_rd32(bios, 0x100770) & 0x00000004) != (data & 0x00000004)) {
- ram_wr32(fuc, 0x10f750, 0x04000009);
+ data = mask = 0x00000000;
+ if (NOTE00(ramcfg_02_03 != 0)) {
+ data |= next->bios.ramcfg_11_02_03;
+ mask |= 0x00000003;
+ }
+ if (NOTE00(ramcfg_01_10)) {
+ if (next->bios.ramcfg_11_01_10)
+ data |= 0x00000004;
+ mask |= 0x00000004;
+ }
+
+ if ((ram_mask(fuc, 0x100770, mask, data) & mask & 4) != (data & 4)) {
+ ram_mask(fuc, 0x100750, 0x00000008, 0x00000008);
ram_wr32(fuc, 0x100710, 0x00000000);
ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
}
- ram_mask(fuc, 0x100770, 0x00000007, data);
- data = (nv_ro08(bios, timing + 0x30) & 0x07) << 8;
- if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
+ data = (next->bios.timing_20_30_07) << 8;
+ if (next->bios.ramcfg_11_01_01)
data |= 0x80000000;
ram_mask(fuc, 0x100778, 0x00000700, data);
- data = nv_ro16(bios, timing + 0x2c);
- ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4);
- ram_mask(fuc, 0x10f24c, 0x7f000000, (data & 0x1fc0) << 18);
-
- data = nv_ro08(bios, timing + 0x30);
- ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+ ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4);
+ data = (next->bios.timing[10] & 0x7f000000) >> 24;
+ if (data < next->bios.timing_20_2c_1fc0)
+ data = next->bios.timing_20_2c_1fc0;
+ ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
+ ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16);
- data = nv_ro16(bios, timing + 0x31);
- ram_mask(fuc, 0x10fec4, 0x041e0f07, (data & 0x0800) << 15 |
- (data & 0x0780) << 10 |
- (data & 0x0078) << 5 |
- (data & 0x0007));
- ram_mask(fuc, 0x10fec8, 0x00000027, (data & 0x8000) >> 10 |
- (data & 0x7000) >> 12);
+ ram_mask(fuc, 0x10fec4, 0x041e0f07, next->bios.timing_20_31_0800 << 26 |
+ next->bios.timing_20_31_0780 << 17 |
+ next->bios.timing_20_31_0078 << 8 |
+ next->bios.timing_20_31_0007);
+ ram_mask(fuc, 0x10fec8, 0x00000027, next->bios.timing_20_31_8000 << 5 |
+ next->bios.timing_20_31_7000);
ram_wr32(fuc, 0x10f090, 0x4000007e);
- ram_nsec(fuc, 1000);
+ ram_nsec(fuc, 2000);
ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
- ram_nsec(fuc, 2000);
ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
- if ((nv_ro08(bios, ramcfg + 0x08) & 0x10) && (ram->mode == 2) /*XXX*/) {
+ if ((next->bios.ramcfg_11_08_10) && (ram->mode == 2) /*XXX*/) {
u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
- train(fuc, 0xa4010000); /*XXX*/
+ nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f294, temp);
}
@@ -528,7 +600,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]);
ram_nsec(fuc, 1000);
ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]);
- ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5]);
+ ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5] & ~0x004); /* LP3 later */
ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]);
ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]);
@@ -544,12 +616,13 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
ram_nsec(fuc, 1000);
+ ram_nuts(ram, 0x10f200, 0x18808800, 0x00000000, 0x18808800);
data = ram_rd32(fuc, 0x10f978);
data &= ~0x00046144;
data |= 0x0000000b;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x04))
+ if (!(next->bios.ramcfg_11_07_08)) {
+ if (!(next->bios.ramcfg_11_07_04))
data |= 0x0000200c;
else
data |= 0x00000000;
@@ -563,44 +636,43 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x10f830, data);
}
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+ if (!(next->bios.ramcfg_11_07_08)) {
data = 0x88020000;
- if ( (nv_ro08(bios, ramcfg + 0x07) & 0x04))
+ if ( (next->bios.ramcfg_11_07_04))
data |= 0x10000000;
- if (!(nv_ro08(bios, rammap + 0x08) & 0x10))
+ if (!(next->bios.rammap_11_08_10))
data |= 0x00080000;
} else {
data = 0xa40e0000;
}
- train(fuc, data);
- ram_nsec(fuc, 1000);
+ nve0_ram_train(fuc, 0xbc0f0000, data);
+ if (1) /* XXX: not always? */
+ ram_nsec(fuc, 1000);
if (ram->mode == 2) { /*XXX*/
ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004);
}
- /* MR5: (re)enable LP3 if necessary
- * XXX: need to find the switch, keeping off for now
- */
- ram_mask(fuc, mr[5], 0x00000004, 0x00000000);
+ /* LP3 */
+ if (ram_mask(fuc, mr[5], 0x004, ram->base.mr[5]) != ram->base.mr[5])
+ ram_nsec(fuc, 1000);
if (ram->mode != 2) {
ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
}
- if (nv_ro08(bios, ramcfg + 0x07) & 0x02) {
- ram_mask(fuc, 0x10f910, 0x80020000, 0x01000000);
- ram_mask(fuc, 0x10f914, 0x80020000, 0x01000000);
- }
+ if (next->bios.ramcfg_11_07_02)
+ nve0_ram_train(fuc, 0x80020000, 0x01000000);
ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
- if (nv_ro08(bios, rammap + 0x08) & 0x01)
+ if (next->bios.rammap_11_08_01)
data = 0x00000800;
else
data = 0x00000000;
ram_mask(fuc, 0x10f200, 0x00000800, data);
+ ram_nuts(ram, 0x10f200, 0x18808800, data, 0x18808800);
return 0;
}
@@ -611,17 +683,14 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
static int
nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
{
- struct nouveau_bios *bios = nouveau_bios(pfb);
struct nve0_ram *ram = (void *)pfb->ram;
struct nve0_ramfuc *fuc = &ram->fuc;
const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
const u32 runk0 = ram->fN1 << 16;
const u32 runk1 = ram->fN1;
- const u32 rammap = ram->base.rammap.data;
- const u32 ramcfg = ram->base.ramcfg.data;
- const u32 timing = ram->base.timing.data;
- int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
- int mv = 1; /*XXX*/
+ struct nouveau_ram_data *next = ram->base.next;
+ int vc = !(next->bios.ramcfg_11_02_08);
+ int mv = !(next->bios.ramcfg_11_02_04);
u32 mask, data;
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -636,7 +705,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
}
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
- if ((nv_ro08(bios, ramcfg + 0x03) & 0xf0))
+ if ((next->bios.ramcfg_11_03_f0))
ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
@@ -661,28 +730,28 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
if (1) {
mask |= 0x800807e0;
data |= 0x800807e0;
- switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
- case 0xc0: data &= ~0x00000040; break;
- case 0x80: data &= ~0x00000100; break;
- case 0x40: data &= ~0x80000000; break;
- case 0x00: data &= ~0x00000400; break;
+ switch (next->bios.ramcfg_11_03_c0) {
+ case 3: data &= ~0x00000040; break;
+ case 2: data &= ~0x00000100; break;
+ case 1: data &= ~0x80000000; break;
+ case 0: data &= ~0x00000400; break;
}
- switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
- case 0x30: data &= ~0x00000020; break;
- case 0x20: data &= ~0x00000080; break;
- case 0x10: data &= ~0x00080000; break;
- case 0x00: data &= ~0x00000200; break;
+ switch (next->bios.ramcfg_11_03_30) {
+ case 3: data &= ~0x00000020; break;
+ case 2: data &= ~0x00000080; break;
+ case 1: data &= ~0x00080000; break;
+ case 0: data &= ~0x00000200; break;
}
}
- if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+ if (next->bios.ramcfg_11_02_80)
mask |= 0x03000000;
- if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+ if (next->bios.ramcfg_11_02_40)
mask |= 0x00002000;
- if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+ if (next->bios.ramcfg_11_07_10)
mask |= 0x00004000;
- if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+ if (next->bios.ramcfg_11_07_08)
mask |= 0x00000003;
else
mask |= 0x14000000;
@@ -692,7 +761,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
data = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
- data |= (nv_ro08(bios, ramcfg + 0x03) & 0x30) << 12;
+ data |= (next->bios.ramcfg_11_03_30) << 12;
ram_wr32(fuc, 0x1373ec, data);
ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
@@ -724,68 +793,67 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
}
}
- if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
- (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+ if ( (next->bios.ramcfg_11_02_40) ||
+ (next->bios.ramcfg_11_07_10)) {
ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
ram_nsec(fuc, 20000);
}
if (ram->mode != 2) /*XXX*/ {
- if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+ if (next->bios.ramcfg_11_07_40)
ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
}
- data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
- ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
- ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
- ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+ ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c);
+ ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09);
+ ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09);
mask = 0x00010000;
data = 0x00000000;
- if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+ if (!(next->bios.ramcfg_11_02_80))
data |= 0x03000000;
- if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+ if (!(next->bios.ramcfg_11_02_40))
data |= 0x00002000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+ if (!(next->bios.ramcfg_11_07_10))
data |= 0x00004000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+ if (!(next->bios.ramcfg_11_07_08))
data |= 0x00000003;
else
data |= 0x14000000;
ram_mask(fuc, 0x10f824, mask, data);
ram_nsec(fuc, 1000);
- if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+ if (next->bios.ramcfg_11_08_01)
data = 0x00100000;
else
data = 0x00000000;
ram_mask(fuc, 0x10f82c, 0x00100000, data);
/* PFB timing */
- ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
- ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
- ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
- ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
- ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
- ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
- ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
- ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
- ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
- ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
- ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
+ ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]);
+ ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]);
+ ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]);
+ ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]);
+ ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]);
+ ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]);
+ ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]);
+ ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]);
+ ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]);
+ ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]);
+ ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]);
mask = 0x33f00000;
data = 0x00000000;
- if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+ if (!(next->bios.ramcfg_11_01_04))
data |= 0x20200000;
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ if (!(next->bios.ramcfg_11_07_80))
data |= 0x12800000;
/*XXX: see note above about there probably being some condition
* for the 10f824 stuff that uses ramcfg 3...
*/
- if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
- if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
- if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+ if ( (next->bios.ramcfg_11_03_f0)) {
+ if (next->bios.rammap_11_08_0c) {
+ if (!(next->bios.ramcfg_11_07_80))
mask |= 0x00000020;
else
data |= 0x00000020;
@@ -799,21 +867,16 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x10f808, mask, data);
- data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
- ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+ ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f);
- data = nv_ro16(bios, timing + 0x2c);
- ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) << 4);
+ ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4);
- if (((nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6) >
- ((nv_ro32(bios, timing + 0x28) & 0x7f000000) >> 24))
- data = (nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >> 6;
- else
- data = (nv_ro32(bios, timing + 0x28) & 0x1f000000) >> 24;
+ data = (next->bios.timing[10] & 0x7f000000) >> 24;
+ if (data < next->bios.timing_20_2c_1fc0)
+ data = next->bios.timing_20_2c_1fc0;
ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
- data = nv_ro08(bios, timing + 0x30);
- ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+ ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8);
ram_wr32(fuc, 0x10f090, 0x4000007f);
ram_nsec(fuc, 1000);
@@ -855,7 +918,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
- if (nv_ro08(bios, rammap + 0x08) & 0x01)
+ if (next->bios.rammap_11_08_01)
data = 0x00000800;
else
data = 0x00000000;
@@ -868,21 +931,18 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
******************************************************************************/
static int
-nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+nve0_ram_calc_data(struct nouveau_fb *pfb, u32 freq,
+ struct nouveau_ram_data *data)
{
struct nouveau_bios *bios = nouveau_bios(pfb);
struct nve0_ram *ram = (void *)pfb->ram;
- struct nve0_ramfuc *fuc = &ram->fuc;
- struct bit_entry M;
- int ret, refclk, strap, i;
- u32 data;
- u8 cnt;
+ u8 strap, cnt, len;
/* lookup memory config data relevant to the target frequency */
- ram->base.rammap.data = nvbios_rammap_match(bios, freq / 1000,
- &ram->base.rammap.version,
- &ram->base.rammap.size, &cnt,
- &ram->base.ramcfg.size);
+ ram->base.rammap.data = nvbios_rammapEp(bios, freq / 1000,
+ &ram->base.rammap.version,
+ &ram->base.rammap.size,
+ &cnt, &len, &data->bios);
if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 ||
ram->base.rammap.size < 0x09) {
nv_error(pfb, "invalid/missing rammap entry\n");
@@ -890,24 +950,13 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
}
/* locate specific data set for the attached memory */
- if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
- nv_error(pfb, "invalid/missing memory table\n");
- return -EINVAL;
- }
-
- strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
- data = nv_ro16(bios, M.offset + 1);
- if (data)
- strap = nv_ro08(bios, data + strap);
-
- if (strap >= cnt) {
- nv_error(pfb, "invalid ramcfg strap\n");
- return -EINVAL;
- }
-
- ram->base.ramcfg.version = ram->base.rammap.version;
- ram->base.ramcfg.data = ram->base.rammap.data + ram->base.rammap.size +
- (ram->base.ramcfg.size * strap);
+ ram->base.ramcfg.data = nvbios_rammapSp(bios, ram->base.rammap.data,
+ ram->base.rammap.version,
+ ram->base.rammap.size, cnt, len,
+ nvbios_ramcfg_index(bios),
+ &ram->base.ramcfg.version,
+ &ram->base.ramcfg.size,
+ &data->bios);
if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 ||
ram->base.ramcfg.size < 0x08) {
nv_error(pfb, "invalid/missing ramcfg entry\n");
@@ -918,9 +967,9 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00);
if (strap != 0xff) {
ram->base.timing.data =
- nvbios_timing_entry(bios, strap,
- &ram->base.timing.version,
- &ram->base.timing.size);
+ nvbios_timingEp(bios, strap, &ram->base.timing.version,
+ &ram->base.timing.size, &cnt, &len,
+ &data->bios);
if (!ram->base.timing.data ||
ram->base.timing.version != 0x20 ||
ram->base.timing.size < 0x33) {
@@ -931,11 +980,23 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram->base.timing.data = 0;
}
+ data->freq = freq;
+ return 0;
+}
+
+static int
+nve0_ram_calc_xits(struct nouveau_fb *pfb, struct nouveau_ram_data *next)
+{
+ struct nve0_ram *ram = (void *)pfb->ram;
+ struct nve0_ramfuc *fuc = &ram->fuc;
+ int refclk, i;
+ int ret;
+
ret = ram_init(fuc, pfb);
if (ret)
return ret;
- ram->mode = (freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
+ ram->mode = (next->freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f;
/* XXX: this is *not* what nvidia do. on fermi nvidia generally
@@ -946,7 +1007,7 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
* so far, i've seen very weird values being chosen by nvidia on
* kepler boards, no idea how/why they're chosen.
*/
- refclk = freq;
+ refclk = next->freq;
if (ram->mode == 2)
refclk = fuc->mempll.refclk;
@@ -968,7 +1029,7 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
fuc->mempll.min_p = 1;
fuc->mempll.max_p = 2;
- ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, freq,
+ ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq,
&ram->N2, NULL, &ram->M2, &ram->P2);
if (ret <= 0) {
nv_error(pfb, "unable to calc mempll\n");
@@ -980,17 +1041,18 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
if (ram_have(fuc, mr[i]))
ram->base.mr[i] = ram_rd32(fuc, mr[i]);
}
+ ram->base.freq = next->freq;
switch (ram->base.type) {
case NV_MEM_TYPE_DDR3:
ret = nouveau_sddr3_calc(&ram->base);
if (ret == 0)
- ret = nve0_ram_calc_sddr3(pfb, freq);
+ ret = nve0_ram_calc_sddr3(pfb, next->freq);
break;
case NV_MEM_TYPE_GDDR5:
- ret = nouveau_gddr5_calc(&ram->base);
+ ret = nouveau_gddr5_calc(&ram->base, ram->pnuts != 0);
if (ret == 0)
- ret = nve0_ram_calc_gddr5(pfb, freq);
+ ret = nve0_ram_calc_gddr5(pfb, next->freq);
break;
default:
ret = -ENOSYS;
@@ -1001,13 +1063,55 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
}
static int
+nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+ struct nouveau_clock *clk = nouveau_clock(pfb);
+ struct nve0_ram *ram = (void *)pfb->ram;
+ struct nouveau_ram_data *xits = &ram->base.xition;
+ struct nouveau_ram_data *copy;
+ int ret;
+
+ if (ram->base.next == NULL) {
+ ret = nve0_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem),
+ &ram->base.former);
+ if (ret)
+ return ret;
+
+ ret = nve0_ram_calc_data(pfb, freq, &ram->base.target);
+ if (ret)
+ return ret;
+
+ if (ram->base.target.freq < ram->base.former.freq) {
+ *xits = ram->base.target;
+ copy = &ram->base.former;
+ } else {
+ *xits = ram->base.former;
+ copy = &ram->base.target;
+ }
+
+ xits->bios.ramcfg_11_02_04 = copy->bios.ramcfg_11_02_04;
+ xits->bios.ramcfg_11_02_03 = copy->bios.ramcfg_11_02_03;
+ xits->bios.timing_20_30_07 = copy->bios.timing_20_30_07;
+
+ ram->base.next = &ram->base.target;
+ if (memcmp(xits, &ram->base.former, sizeof(xits->bios)))
+ ram->base.next = &ram->base.xition;
+ } else {
+ BUG_ON(ram->base.next != &ram->base.xition);
+ ram->base.next = &ram->base.target;
+ }
+
+ return nve0_ram_calc_xits(pfb, ram->base.next);
+}
+
+static int
nve0_ram_prog(struct nouveau_fb *pfb)
{
struct nouveau_device *device = nv_device(pfb);
struct nve0_ram *ram = (void *)pfb->ram;
struct nve0_ramfuc *fuc = &ram->fuc;
ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
- return 0;
+ return (ram->base.next == &ram->base.xition);
}
static void
@@ -1015,6 +1119,7 @@ nve0_ram_tidy(struct nouveau_fb *pfb)
{
struct nve0_ram *ram = (void *)pfb->ram;
struct nve0_ramfuc *fuc = &ram->fuc;
+ ram->base.next = NULL;
ram_exec(fuc, false);
}
@@ -1055,7 +1160,7 @@ nve0_ram_init(struct nouveau_object *object)
* binary driver skips the one that's already been setup by
* the init tables.
*/
- data = nvbios_rammap_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+ data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
if (!data || hdr < 0x15)
return -EINVAL;
@@ -1073,6 +1178,7 @@ nve0_ram_init(struct nouveau_object *object)
data += 4;
}
nv_wr32(pfb, 0x10f65c, save);
+ nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000);
switch (ram->base.type) {
case NV_MEM_TYPE_GDDR5:
@@ -1117,7 +1223,8 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_gpio *gpio = nouveau_gpio(pfb);
struct dcb_gpio_func func;
struct nve0_ram *ram;
- int ret;
+ int ret, i;
+ u32 tmp;
ret = nvc0_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
@@ -1136,6 +1243,25 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
break;
}
+ /* calculate a mask of differently configured memory partitions,
+ * because, of course reclocking wasn't complicated enough
+ * already without having to treat some of them differently to
+ * the others....
+ */
+ ram->parts = nv_rd32(pfb, 0x022438);
+ ram->pmask = nv_rd32(pfb, 0x022554);
+ ram->pnuts = 0;
+ for (i = 0, tmp = 0; i < ram->parts; i++) {
+ if (!(ram->pmask & (1 << i))) {
+ u32 cfg1 = nv_rd32(pfb, 0x110204 + (i * 0x1000));
+ if (tmp && tmp != cfg1) {
+ ram->pnuts |= (1 << i);
+ continue;
+ }
+ tmp = cfg1;
+ }
+ }
+
// parse bios data for both pll's
ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
if (ret) {
@@ -1248,7 +1374,7 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c);
ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc);
ram->fuc.r_0x100710 = ramfuc_reg(0x100710);
- ram->fuc.r_0x10f750 = ramfuc_reg(0x10f750);
+ ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
index 6565f3dbbe04..14706d9842ca 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -22,7 +22,24 @@
* Authors: Ben Skeggs
*/
-#include <subdev/instmem.h>
+#include "priv.h"
+
+/******************************************************************************
+ * instmem object base implementation
+ *****************************************************************************/
+
+void
+_nouveau_instobj_dtor(struct nouveau_object *object)
+{
+ struct nouveau_instmem *imem = (void *)object->engine;
+ struct nouveau_instobj *iobj = (void *)object;
+
+ mutex_lock(&nv_subdev(imem)->mutex);
+ list_del(&iobj->head);
+ mutex_unlock(&nv_subdev(imem)->mutex);
+
+ return nouveau_object_destroy(&iobj->base);
+}
int
nouveau_instobj_create_(struct nouveau_object *parent,
@@ -46,73 +63,26 @@ nouveau_instobj_create_(struct nouveau_object *parent,
return 0;
}
-void
-nouveau_instobj_destroy(struct nouveau_instobj *iobj)
-{
- struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
+/******************************************************************************
+ * instmem subdev base implementation
+ *****************************************************************************/
- mutex_lock(&subdev->mutex);
- list_del(&iobj->head);
- mutex_unlock(&subdev->mutex);
-
- return nouveau_object_destroy(&iobj->base);
-}
-
-void
-_nouveau_instobj_dtor(struct nouveau_object *object)
+static int
+nouveau_instmem_alloc(struct nouveau_instmem *imem,
+ struct nouveau_object *parent, u32 size, u32 align,
+ struct nouveau_object **pobject)
{
- struct nouveau_instobj *iobj = (void *)object;
- return nouveau_instobj_destroy(iobj);
+ struct nouveau_object *engine = nv_object(imem);
+ struct nouveau_instmem_impl *impl = (void *)engine->oclass;
+ struct nouveau_instobj_args args = { .size = size, .align = align };
+ return nouveau_object_ctor(parent, engine, impl->instobj, &args,
+ sizeof(args), pobject);
}
int
-nouveau_instmem_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- int length, void **pobject)
-{
- struct nouveau_instmem *imem;
- int ret;
-
- ret = nouveau_subdev_create_(parent, engine, oclass, 0,
- "INSTMEM", "instmem", length, pobject);
- imem = *pobject;
- if (ret)
- return ret;
-
- INIT_LIST_HEAD(&imem->list);
- return 0;
-}
-
-int
-nouveau_instmem_init(struct nouveau_instmem *imem)
-{
- struct nouveau_instobj *iobj;
- int ret, i;
-
- ret = nouveau_subdev_init(&imem->base);
- if (ret)
- return ret;
-
- mutex_lock(&imem->base.mutex);
-
- list_for_each_entry(iobj, &imem->list, head) {
- if (iobj->suspend) {
- for (i = 0; i < iobj->size; i += 4)
- nv_wo32(iobj, i, iobj->suspend[i / 4]);
- vfree(iobj->suspend);
- iobj->suspend = NULL;
- }
- }
-
- mutex_unlock(&imem->base.mutex);
-
- return 0;
-}
-
-int
-nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
+_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
{
+ struct nouveau_instmem *imem = (void *)object;
struct nouveau_instobj *iobj;
int i, ret = 0;
@@ -143,12 +113,45 @@ int
_nouveau_instmem_init(struct nouveau_object *object)
{
struct nouveau_instmem *imem = (void *)object;
- return nouveau_instmem_init(imem);
+ struct nouveau_instobj *iobj;
+ int ret, i;
+
+ ret = nouveau_subdev_init(&imem->base);
+ if (ret)
+ return ret;
+
+ mutex_lock(&imem->base.mutex);
+
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->suspend) {
+ for (i = 0; i < iobj->size; i += 4)
+ nv_wo32(iobj, i, iobj->suspend[i / 4]);
+ vfree(iobj->suspend);
+ iobj->suspend = NULL;
+ }
+ }
+
+ mutex_unlock(&imem->base.mutex);
+
+ return 0;
}
int
-_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
+nouveau_instmem_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ int length, void **pobject)
{
- struct nouveau_instmem *imem = (void *)object;
- return nouveau_instmem_fini(imem, suspend);
+ struct nouveau_instmem *imem;
+ int ret;
+
+ ret = nouveau_subdev_create_(parent, engine, oclass, 0,
+ "INSTMEM", "instmem", length, pobject);
+ imem = *pobject;
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&imem->list);
+ imem->alloc = nouveau_instmem_alloc;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index 795393d7b2f5..7b64befee48f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -22,10 +22,35 @@
* Authors: Ben Skeggs
*/
-#include <subdev/fb.h>
-
#include "nv04.h"
+/******************************************************************************
+ * instmem object implementation
+ *****************************************************************************/
+
+static u32
+nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nv04_instobj_priv *node = (void *)object;
+ return nv_ro32(object->engine, node->mem->offset + addr);
+}
+
+static void
+nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nv04_instobj_priv *node = (void *)object;
+ nv_wo32(object->engine, node->mem->offset + addr, data);
+}
+
+static void
+nv04_instobj_dtor(struct nouveau_object *object)
+{
+ struct nv04_instmem_priv *priv = (void *)object->engine;
+ struct nv04_instobj_priv *node = (void *)object;
+ nouveau_mm_free(&priv->heap, &node->mem);
+ nouveau_instobj_destroy(&node->base);
+}
+
static int
nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -33,18 +58,19 @@ nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
{
struct nv04_instmem_priv *priv = (void *)engine;
struct nv04_instobj_priv *node;
- int ret, align;
+ struct nouveau_instobj_args *args = data;
+ int ret;
- align = (unsigned long)data;
- if (!align)
- align = 1;
+ if (!args->align)
+ args->align = 1;
ret = nouveau_instobj_create(parent, engine, oclass, &node);
*pobject = nv_object(node);
if (ret)
return ret;
- ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem);
+ ret = nouveau_mm_head(&priv->heap, 1, args->size, args->size,
+ args->align, &node->mem);
if (ret)
return ret;
@@ -53,32 +79,9 @@ nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static void
-nv04_instobj_dtor(struct nouveau_object *object)
-{
- struct nv04_instmem_priv *priv = (void *)object->engine;
- struct nv04_instobj_priv *node = (void *)object;
- nouveau_mm_free(&priv->heap, &node->mem);
- nouveau_instobj_destroy(&node->base);
-}
-
-static u32
-nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
-{
- struct nv04_instobj_priv *node = (void *)object;
- return nv_ro32(object->engine, node->mem->offset + addr);
-}
-
-static void
-nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
- struct nv04_instobj_priv *node = (void *)object;
- nv_wo32(object->engine, node->mem->offset + addr, data);
-}
-
-static struct nouveau_oclass
+struct nouveau_instobj_impl
nv04_instobj_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_instobj_ctor,
.dtor = nv04_instobj_dtor,
.init = _nouveau_instobj_init,
@@ -88,19 +91,34 @@ nv04_instobj_oclass = {
},
};
-int
-nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
- u32 size, u32 align, struct nouveau_object **pobject)
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
+static u32
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
{
- struct nouveau_object *engine = nv_object(imem);
- int ret;
+ return nv_rd32(object, 0x700000 + addr);
+}
- ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
- (void *)(unsigned long)align, size, pobject);
- if (ret)
- return ret;
+static void
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ return nv_wr32(object, 0x700000 + addr, data);
+}
- return 0;
+void
+nv04_instmem_dtor(struct nouveau_object *object)
+{
+ struct nv04_instmem_priv *priv = (void *)object;
+ nouveau_gpuobj_ref(NULL, &priv->ramfc);
+ nouveau_gpuobj_ref(NULL, &priv->ramro);
+ nouveau_ramht_ref(NULL, &priv->ramht);
+ nouveau_gpuobj_ref(NULL, &priv->vbios);
+ nouveau_mm_fini(&priv->heap);
+ if (priv->iomem)
+ iounmap(priv->iomem);
+ nouveau_instmem_destroy(&priv->base);
}
static int
@@ -118,7 +136,6 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
/* PRAMIN aperture maps over the end of VRAM, reserve it */
priv->base.reserved = 512 * 1024;
- priv->base.alloc = nv04_instmem_alloc;
ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
if (ret)
@@ -150,36 +167,10 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-void
-nv04_instmem_dtor(struct nouveau_object *object)
-{
- struct nv04_instmem_priv *priv = (void *)object;
- nouveau_gpuobj_ref(NULL, &priv->ramfc);
- nouveau_gpuobj_ref(NULL, &priv->ramro);
- nouveau_ramht_ref(NULL, &priv->ramht);
- nouveau_gpuobj_ref(NULL, &priv->vbios);
- nouveau_mm_fini(&priv->heap);
- if (priv->iomem)
- iounmap(priv->iomem);
- nouveau_instmem_destroy(&priv->base);
-}
-
-static u32
-nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
-{
- return nv_rd32(object, 0x700000 + addr);
-}
-
-static void
-nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
- return nv_wr32(object, 0x700000 + addr, data);
-}
-
-struct nouveau_oclass
-nv04_instmem_oclass = {
- .handle = NV_SUBDEV(INSTMEM, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_instmem_oclass = &(struct nouveau_instmem_impl) {
+ .base.handle = NV_SUBDEV(INSTMEM, 0x04),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_instmem_ctor,
.dtor = nv04_instmem_dtor,
.init = _nouveau_instmem_init,
@@ -187,4 +178,5 @@ nv04_instmem_oclass = {
.rd32 = nv04_instmem_rd32,
.wr32 = nv04_instmem_wr32,
},
-};
+ .instobj = &nv04_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
index b15b61310236..095fbc6fc099 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -5,7 +5,9 @@
#include <core/ramht.h>
#include <core/mm.h>
-#include <subdev/instmem.h>
+#include "priv.h"
+
+extern struct nouveau_instobj_impl nv04_instobj_oclass;
struct nv04_instmem_priv {
struct nouveau_instmem base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index b10a143787a7..ec0b9661d614 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -26,6 +26,24 @@
#include "nv04.h"
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
+static u32
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nv04_instmem_priv *priv = (void *)object;
+ return ioread32_native(priv->iomem + addr);
+}
+
+static void
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nv04_instmem_priv *priv = (void *)object;
+ iowrite32_native(data, priv->iomem + addr);
+}
+
static int
nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -69,7 +87,6 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.reserved += 512 * 1024; /* object storage */
priv->base.reserved = round_up(priv->base.reserved, 4096);
- priv->base.alloc = nv04_instmem_alloc;
ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
if (ret)
@@ -106,24 +123,10 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static u32
-nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
-{
- struct nv04_instmem_priv *priv = (void *)object;
- return ioread32_native(priv->iomem + addr);
-}
-
-static void
-nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
- struct nv04_instmem_priv *priv = (void *)object;
- iowrite32_native(data, priv->iomem + addr);
-}
-
-struct nouveau_oclass
-nv40_instmem_oclass = {
- .handle = NV_SUBDEV(INSTMEM, 0x40),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv40_instmem_oclass = &(struct nouveau_instmem_impl) {
+ .base.handle = NV_SUBDEV(INSTMEM, 0x40),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_instmem_ctor,
.dtor = nv04_instmem_dtor,
.init = _nouveau_instmem_init,
@@ -131,4 +134,5 @@ nv40_instmem_oclass = {
.rd32 = nv40_instmem_rd32,
.wr32 = nv40_instmem_wr32,
},
-};
+ .instobj = &nv04_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
index 97bc5dff93e7..7cb3b098a08d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -22,11 +22,11 @@
* Authors: Ben Skeggs
*/
-#include <subdev/instmem.h>
#include <subdev/fb.h>
-
#include <core/mm.h>
+#include "priv.h"
+
struct nv50_instmem_priv {
struct nouveau_instmem base;
spinlock_t lock;
@@ -38,42 +38,9 @@ struct nv50_instobj_priv {
struct nouveau_mem *mem;
};
-static int
-nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nv50_instobj_priv *node;
- u32 align = (unsigned long)data;
- int ret;
-
- size = max((size + 4095) & ~4095, (u32)4096);
- align = max((align + 4095) & ~4095, (u32)4096);
-
- ret = nouveau_instobj_create(parent, engine, oclass, &node);
- *pobject = nv_object(node);
- if (ret)
- return ret;
-
- ret = pfb->ram->get(pfb, size, align, 0, 0x800, &node->mem);
- if (ret)
- return ret;
-
- node->base.addr = node->mem->offset;
- node->base.size = node->mem->size << 12;
- node->mem->page_shift = 12;
- return 0;
-}
-
-static void
-nv50_instobj_dtor(struct nouveau_object *object)
-{
- struct nv50_instobj_priv *node = (void *)object;
- struct nouveau_fb *pfb = nouveau_fb(object);
- pfb->ram->put(pfb, &node->mem);
- nouveau_instobj_destroy(&node->base);
-}
+/******************************************************************************
+ * instmem object implementation
+ *****************************************************************************/
static u32
nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
@@ -113,9 +80,46 @@ nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static struct nouveau_oclass
+static void
+nv50_instobj_dtor(struct nouveau_object *object)
+{
+ struct nv50_instobj_priv *node = (void *)object;
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ pfb->ram->put(pfb, &node->mem);
+ nouveau_instobj_destroy(&node->base);
+}
+
+static int
+nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nouveau_instobj_args *args = data;
+ struct nv50_instobj_priv *node;
+ int ret;
+
+ args->size = max((args->size + 4095) & ~4095, (u32)4096);
+ args->align = max((args->align + 4095) & ~4095, (u32)4096);
+
+ ret = nouveau_instobj_create(parent, engine, oclass, &node);
+ *pobject = nv_object(node);
+ if (ret)
+ return ret;
+
+ ret = pfb->ram->get(pfb, args->size, args->align, 0, 0x800, &node->mem);
+ if (ret)
+ return ret;
+
+ node->base.addr = node->mem->offset;
+ node->base.size = node->mem->size << 12;
+ node->mem->page_shift = 12;
+ return 0;
+}
+
+static struct nouveau_instobj_impl
nv50_instobj_oclass = {
- .ofuncs = &(struct nouveau_ofuncs) {
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_instobj_ctor,
.dtor = nv50_instobj_dtor,
.init = _nouveau_instobj_init,
@@ -125,13 +129,16 @@ nv50_instobj_oclass = {
},
};
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
static int
-nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
- u32 size, u32 align, struct nouveau_object **pobject)
+nv50_instmem_fini(struct nouveau_object *object, bool suspend)
{
- struct nouveau_object *engine = nv_object(imem);
- return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass,
- (void *)(unsigned long)align, size, pobject);
+ struct nv50_instmem_priv *priv = (void *)object;
+ priv->addr = ~0ULL;
+ return nouveau_instmem_fini(&priv->base, suspend);
}
static int
@@ -148,25 +155,17 @@ nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
spin_lock_init(&priv->lock);
- priv->base.alloc = nv50_instmem_alloc;
return 0;
}
-static int
-nv50_instmem_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv50_instmem_priv *priv = (void *)object;
- priv->addr = ~0ULL;
- return nouveau_instmem_fini(&priv->base, suspend);
-}
-
-struct nouveau_oclass
-nv50_instmem_oclass = {
- .handle = NV_SUBDEV(INSTMEM, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_instmem_oclass = &(struct nouveau_instmem_impl) {
+ .base.handle = NV_SUBDEV(INSTMEM, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_instmem_ctor,
.dtor = _nouveau_instmem_dtor,
.init = _nouveau_instmem_init,
.fini = nv50_instmem_fini,
},
-};
+ .instobj = &nv50_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h
new file mode 100644
index 000000000000..8d67dedc5bb2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h
@@ -0,0 +1,56 @@
+#ifndef __NVKM_INSTMEM_PRIV_H__
+#define __NVKM_INSTMEM_PRIV_H__
+
+#include <subdev/instmem.h>
+
+struct nouveau_instobj_impl {
+ struct nouveau_oclass base;
+};
+
+struct nouveau_instobj_args {
+ u32 size;
+ u32 align;
+};
+
+#define nouveau_instobj_create(p,e,o,d) \
+ nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instobj_destroy(p) ({ \
+ struct nouveau_instobj *iobj = (p); \
+ _nouveau_instobj_dtor(nv_object(iobj)); \
+})
+#define nouveau_instobj_init(p) \
+ nouveau_object_init(&(p)->base)
+#define nouveau_instobj_fini(p,s) \
+ nouveau_object_fini(&(p)->base, (s))
+
+int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_instobj_dtor(struct nouveau_object *);
+#define _nouveau_instobj_init nouveau_object_init
+#define _nouveau_instobj_fini nouveau_object_fini
+
+struct nouveau_instmem_impl {
+ struct nouveau_oclass base;
+ struct nouveau_oclass *instobj;
+};
+
+#define nouveau_instmem_create(p,e,o,d) \
+ nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instmem_destroy(p) \
+ nouveau_subdev_destroy(&(p)->base)
+#define nouveau_instmem_init(p) ({ \
+ struct nouveau_instmem *imem = (p); \
+ _nouveau_instmem_init(nv_object(imem)); \
+})
+#define nouveau_instmem_fini(p,s) ({ \
+ struct nouveau_instmem *imem = (p); \
+ _nouveau_instmem_fini(nv_object(imem), (s)); \
+})
+
+int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+#define _nouveau_instmem_dtor _nouveau_subdev_dtor
+int _nouveau_instmem_init(struct nouveau_object *);
+int _nouveau_instmem_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
index b0d5c31606c1..81a408e7d034 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -14,6 +14,7 @@ int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
extern const struct nouveau_mc_intr nv04_mc_intr[];
int nv04_mc_init(struct nouveau_object *);
void nv40_mc_msi_rearm(struct nouveau_mc *);
+int nv44_mc_init(struct nouveau_object *object);
int nv50_mc_init(struct nouveau_object *);
extern const struct nouveau_mc_intr nv50_mc_intr[];
extern const struct nouveau_mc_intr nvc0_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 3bfee5c6c4f2..cc4d0d2d886e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -24,7 +24,7 @@
#include "nv04.h"
-static int
+int
nv44_mc_init(struct nouveau_object *object)
{
struct nv04_mc_priv *priv = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
new file mode 100644
index 000000000000..a75c35ccf25c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2014 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ilia Mirkin
+ */
+
+#include "nv04.h"
+
+static void
+nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
+{
+ struct nv04_mc_priv *priv = (void *)pmc;
+ nv_wr08(priv, 0x088050, 0xff);
+}
+
+struct nouveau_oclass *
+nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0x4c),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
+ .dtor = _nouveau_mc_dtor,
+ .init = nv44_mc_init,
+ .fini = _nouveau_mc_fini,
+ },
+ .intr = nv04_mc_intr,
+ .msi_rearm = nv4c_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c02b4763a2d5..34472d317097 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -32,6 +32,7 @@ nvc0_mc_intr[] = {
{ 0x00000080, NVDEV_ENGINE_COPY2 },
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
+ { 0x00002000, NVDEV_SUBDEV_FB },
{ 0x00008000, NVDEV_ENGINE_BSP },
{ 0x00040000, NVDEV_SUBDEV_THERM },
{ 0x00020000, NVDEV_ENGINE_VP },
@@ -40,6 +41,7 @@ nvc0_mc_intr[] = {
{ 0x01000000, NVDEV_SUBDEV_PWR },
{ 0x02000000, NVDEV_SUBDEV_LTCG },
{ 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x08000000, NVDEV_SUBDEV_FB },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x40000000, NVDEV_SUBDEV_IBUS },
{ 0x80000000, NVDEV_ENGINE_SW },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index 129120473f6c..13c5af88a601 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -87,55 +87,39 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
};
u32 mxms_args[] = { 0x00000000 };
- union acpi_object args[4] = {
- /* _DSM MUID */
- { .buffer.type = 3,
- .buffer.length = sizeof(muid),
- .buffer.pointer = muid,
- },
- /* spec says this can be zero to mean "highest revision", but
- * of course there's at least one bios out there which fails
- * unless you pass in exactly the version it supports..
- */
- { .integer.type = ACPI_TYPE_INTEGER,
- .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
- },
- /* MXMS function */
- { .integer.type = ACPI_TYPE_INTEGER,
- .integer.value = 0x00000010,
- },
- /* Pointer to MXMS arguments */
- { .buffer.type = ACPI_TYPE_BUFFER,
- .buffer.length = sizeof(mxms_args),
- .buffer.pointer = (char *)mxms_args,
- },
+ union acpi_object argv4 = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = sizeof(mxms_args),
+ .buffer.pointer = (char *)mxms_args,
};
- struct acpi_object_list list = { ARRAY_SIZE(args), args };
- struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_handle handle;
- int ret;
+ int rev;
handle = ACPI_HANDLE(&device->pdev->dev);
if (!handle)
return false;
- ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
- if (ret) {
- nv_debug(mxm, "DSM MXMS failed: %d\n", ret);
+ /*
+ * spec says this can be zero to mean "highest revision", but
+ * of course there's at least one bios out there which fails
+ * unless you pass in exactly the version it supports..
+ */
+ rev = (version & 0xf0) << 4 | (version & 0x0f);
+ obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
+ if (!obj) {
+ nv_debug(mxm, "DSM MXMS failed\n");
return false;
}
- obj = retn.pointer;
if (obj->type == ACPI_TYPE_BUFFER) {
mxm->mxms = kmemdup(obj->buffer.pointer,
obj->buffer.length, GFP_KERNEL);
- } else
- if (obj->type == ACPI_TYPE_INTEGER) {
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value);
}
- kfree(obj);
+ ACPI_FREE(obj);
return mxm->mxms != NULL;
}
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc
new file mode 100644
index 000000000000..757dda700024
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define T_TIMEOUT 2200000
+#define T_RISEFALL 1000
+#define T_HOLD 5000
+
+#ifdef INCLUDE_PROC
+process(PROC_I2C_, #i2c_init, #i2c_recv)
+#endif
+
+/******************************************************************************
+ * I2C_ data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+i2c_scl_map:
+.b32 NV_PPWR_OUTPUT_I2C_0_SCL
+.b32 NV_PPWR_OUTPUT_I2C_1_SCL
+.b32 NV_PPWR_OUTPUT_I2C_2_SCL
+.b32 NV_PPWR_OUTPUT_I2C_3_SCL
+.b32 NV_PPWR_OUTPUT_I2C_4_SCL
+.b32 NV_PPWR_OUTPUT_I2C_5_SCL
+.b32 NV_PPWR_OUTPUT_I2C_6_SCL
+.b32 NV_PPWR_OUTPUT_I2C_7_SCL
+.b32 NV_PPWR_OUTPUT_I2C_8_SCL
+.b32 NV_PPWR_OUTPUT_I2C_9_SCL
+i2c_sda_map:
+.b32 NV_PPWR_OUTPUT_I2C_0_SDA
+.b32 NV_PPWR_OUTPUT_I2C_1_SDA
+.b32 NV_PPWR_OUTPUT_I2C_2_SDA
+.b32 NV_PPWR_OUTPUT_I2C_3_SDA
+.b32 NV_PPWR_OUTPUT_I2C_4_SDA
+.b32 NV_PPWR_OUTPUT_I2C_5_SDA
+.b32 NV_PPWR_OUTPUT_I2C_6_SDA
+.b32 NV_PPWR_OUTPUT_I2C_7_SDA
+.b32 NV_PPWR_OUTPUT_I2C_8_SDA
+.b32 NV_PPWR_OUTPUT_I2C_9_SDA
+#if NVKM_PPWR_CHIPSET < GF119
+i2c_ctrl:
+.b32 0x00e138
+.b32 0x00e150
+.b32 0x00e168
+.b32 0x00e180
+.b32 0x00e254
+.b32 0x00e274
+.b32 0x00e764
+.b32 0x00e780
+.b32 0x00e79c
+.b32 0x00e7b8
+#endif
+#endif
+
+/******************************************************************************
+ * I2C_ code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+
+// $r3 - value
+// $r2 - sda line
+// $r1 - scl line
+// $r0 - zero
+i2c_drive_scl:
+ cmp b32 $r3 0
+ bra e #i2c_drive_scl_lo
+ nv_iowr(NV_PPWR_OUTPUT_SET, $r1)
+ ret
+ i2c_drive_scl_lo:
+ nv_iowr(NV_PPWR_OUTPUT_CLR, $r1)
+ ret
+
+i2c_drive_sda:
+ cmp b32 $r3 0
+ bra e #i2c_drive_sda_lo
+ nv_iowr(NV_PPWR_OUTPUT_SET, $r2)
+ ret
+ i2c_drive_sda_lo:
+ nv_iowr(NV_PPWR_OUTPUT_CLR, $r2)
+ ret
+
+i2c_sense_scl:
+ bclr $flags $p1
+ nv_iord($r3, NV_PPWR_INPUT)
+ and $r3 $r1
+ bra z #i2c_sense_scl_done
+ bset $flags $p1
+ i2c_sense_scl_done:
+ ret
+
+i2c_sense_sda:
+ bclr $flags $p1
+ nv_iord($r3, NV_PPWR_INPUT)
+ and $r3 $r2
+ bra z #i2c_sense_sda_done
+ bset $flags $p1
+ i2c_sense_sda_done:
+ ret
+
+#define i2c_drive_scl(v) /*
+*/ mov $r3 (v) /*
+*/ call(i2c_drive_scl)
+#define i2c_drive_sda(v) /*
+*/ mov $r3 (v) /*
+*/ call(i2c_drive_sda)
+#define i2c_sense_scl() /*
+*/ call(i2c_sense_scl)
+#define i2c_sense_sda() /*
+*/ call(i2c_sense_sda)
+#define i2c_delay(v) /*
+*/ mov $r14 (v) /*
+*/ call(nsec)
+
+#define i2c_trace_init() /*
+*/ imm32($r6, 0x10000000) /*
+*/ sub b32 $r7 $r6 1 /*
+*/
+#define i2c_trace_down() /*
+*/ shr b32 $r6 4 /*
+*/ push $r5 /*
+*/ shl b32 $r5 $r6 4 /*
+*/ sub b32 $r5 $r6 /*
+*/ not b32 $r5 /*
+*/ and $r7 $r5 /*
+*/ pop $r5 /*
+*/
+#define i2c_trace_exit() /*
+*/ shl b32 $r6 4 /*
+*/
+#define i2c_trace_next() /*
+*/ add b32 $r7 $r6 /*
+*/
+#define i2c_trace_call(func) /*
+*/ i2c_trace_next() /*
+*/ i2c_trace_down() /*
+*/ call(func) /*
+*/ i2c_trace_exit() /*
+*/
+
+i2c_raise_scl:
+ push $r4
+ mov $r4 (T_TIMEOUT / T_RISEFALL)
+ i2c_drive_scl(1)
+ i2c_raise_scl_wait:
+ i2c_delay(T_RISEFALL)
+ i2c_sense_scl()
+ bra $p1 #i2c_raise_scl_done
+ sub b32 $r4 1
+ bra nz #i2c_raise_scl_wait
+ i2c_raise_scl_done:
+ pop $r4
+ ret
+
+i2c_start:
+ i2c_sense_scl()
+ bra not $p1 #i2c_start_rep
+ i2c_sense_sda()
+ bra not $p1 #i2c_start_rep
+ bra #i2c_start_send
+ i2c_start_rep:
+ i2c_drive_scl(0)
+ i2c_drive_sda(1)
+ i2c_trace_call(i2c_raise_scl)
+ bra not $p1 #i2c_start_out
+ i2c_start_send:
+ i2c_drive_sda(0)
+ i2c_delay(T_HOLD)
+ i2c_drive_scl(0)
+ i2c_delay(T_HOLD)
+ i2c_start_out:
+ ret
+
+i2c_stop:
+ i2c_drive_scl(0)
+ i2c_drive_sda(0)
+ i2c_delay(T_RISEFALL)
+ i2c_drive_scl(1)
+ i2c_delay(T_HOLD)
+ i2c_drive_sda(1)
+ i2c_delay(T_HOLD)
+ ret
+
+// $r3 - value
+// $r2 - sda line
+// $r1 - scl line
+// $r0 - zero
+i2c_bitw:
+ call(i2c_drive_sda)
+ i2c_delay(T_RISEFALL)
+ i2c_trace_call(i2c_raise_scl)
+ bra not $p1 #i2c_bitw_out
+ i2c_delay(T_HOLD)
+ i2c_drive_scl(0)
+ i2c_delay(T_HOLD)
+ i2c_bitw_out:
+ ret
+
+// $r3 - value (out)
+// $r2 - sda line
+// $r1 - scl line
+// $r0 - zero
+i2c_bitr:
+ i2c_drive_sda(1)
+ i2c_delay(T_RISEFALL)
+ i2c_trace_call(i2c_raise_scl)
+ bra not $p1 #i2c_bitr_done
+ i2c_sense_sda()
+ i2c_drive_scl(0)
+ i2c_delay(T_HOLD)
+ xbit $r3 $flags $p1
+ bset $flags $p1
+ i2c_bitr_done:
+ ret
+
+i2c_get_byte:
+ mov $r5 0
+ mov $r4 8
+ i2c_get_byte_next:
+ shl b32 $r5 1
+ i2c_trace_call(i2c_bitr)
+ bra not $p1 #i2c_get_byte_done
+ or $r5 $r3
+ sub b32 $r4 1
+ bra nz #i2c_get_byte_next
+ mov $r3 1
+ i2c_trace_call(i2c_bitw)
+ i2c_get_byte_done:
+ ret
+
+i2c_put_byte:
+ mov $r4 8
+ i2c_put_byte_next:
+ sub b32 $r4 1
+ xbit $r3 $r5 $r4
+ i2c_trace_call(i2c_bitw)
+ bra not $p1 #i2c_put_byte_done
+ cmp b32 $r4 0
+ bra ne #i2c_put_byte_next
+ i2c_trace_call(i2c_bitr)
+ bra not $p1 #i2c_put_byte_done
+ i2c_trace_next()
+ cmp b32 $r3 1
+ bra ne #i2c_put_byte_done
+ bclr $flags $p1 // nack
+ i2c_put_byte_done:
+ ret
+
+i2c_addr:
+ i2c_trace_call(i2c_start)
+ bra not $p1 #i2c_addr_done
+ extr $r3 $r12 I2C__MSG_DATA0_ADDR
+ shl b32 $r3 1
+ or $r5 $r3
+ i2c_trace_call(i2c_put_byte)
+ i2c_addr_done:
+ ret
+
+i2c_acquire_addr:
+ extr $r14 $r12 I2C__MSG_DATA0_PORT
+#if NVKM_PPWR_CHIPSET < GF119
+ shl b32 $r14 2
+ add b32 $r14 #i2c_ctrl
+ ld b32 $r14 D[$r14]
+#else
+ shl b32 $r14 5
+ add b32 $r14 0x00d014
+#endif
+ ret
+
+i2c_acquire:
+ call(i2c_acquire_addr)
+ call(rd32)
+ bset $r13 3
+ call(wr32)
+ ret
+
+i2c_release:
+ call(i2c_acquire_addr)
+ call(rd32)
+ bclr $r13 3
+ call(wr32)
+ ret
+
+// description
+//
+// $r15 - current (i2c)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0 - zero
+i2c_recv:
+ bclr $flags $p1
+ extr $r1 $r12 I2C__MSG_DATA0_PORT
+ shl b32 $r1 2
+ cmp b32 $r1 (#i2c_sda_map - #i2c_scl_map)
+ bra ge #i2c_recv_done
+ add b32 $r3 $r1 #i2c_sda_map
+ ld b32 $r2 D[$r3]
+ add b32 $r3 $r1 #i2c_scl_map
+ ld b32 $r1 D[$r3]
+
+ bset $flags $p2
+ push $r13
+ push $r14
+
+ push $r13
+ i2c_trace_init()
+ i2c_trace_call(i2c_acquire)
+ pop $r13
+
+ cmp b32 $r13 I2C__MSG_RD08
+ bra ne #i2c_recv_not_rd08
+ mov $r5 0
+ i2c_trace_call(i2c_addr)
+ bra not $p1 #i2c_recv_done
+ extr $r5 $r12 I2C__MSG_DATA0_RD08_REG
+ i2c_trace_call(i2c_put_byte)
+ bra not $p1 #i2c_recv_done
+ mov $r5 1
+ i2c_trace_call(i2c_addr)
+ bra not $p1 #i2c_recv_done
+ i2c_trace_call(i2c_get_byte)
+ bra not $p1 #i2c_recv_done
+ ins $r11 $r5 I2C__MSG_DATA1_RD08_VAL
+ i2c_trace_call(i2c_stop)
+ mov b32 $r11 $r5
+ clear b32 $r7
+ bra #i2c_recv_done
+
+ i2c_recv_not_rd08:
+ cmp b32 $r13 I2C__MSG_WR08
+ bra ne #i2c_recv_not_wr08
+ mov $r5 0
+ call(i2c_addr)
+ bra not $p1 #i2c_recv_done
+ extr $r5 $r12 I2C__MSG_DATA0_WR08_REG
+ call(i2c_put_byte)
+ bra not $p1 #i2c_recv_done
+ mov $r5 0
+ call(i2c_addr)
+ bra not $p1 #i2c_recv_done
+ extr $r5 $r11 I2C__MSG_DATA1_WR08_VAL
+ call(i2c_put_byte)
+ bra not $p1 #i2c_recv_done
+ call(i2c_stop)
+ clear b32 $r7
+ extr $r5 $r12 I2C__MSG_DATA0_WR08_SYNC
+ bra nz #i2c_recv_done
+ bclr $flags $p2
+ bra #i2c_recv_done
+
+ i2c_recv_not_wr08:
+
+ i2c_recv_done:
+ extr $r14 $r12 I2C__MSG_DATA0_PORT
+ call(i2c_release)
+
+ pop $r14
+ pop $r13
+ bra not $p2 #i2c_recv_exit
+ mov b32 $r12 $r7
+ call(send)
+
+ i2c_recv_exit:
+ ret
+
+// description
+//
+// $r15 - current (i2c)
+// $r0 - zero
+i2c_init:
+ ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
index 0a7b05fa5c11..8f29badd785f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
@@ -51,12 +51,12 @@ time_next: .b32 0
// $r0 - zero
rd32:
nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
- mov $r14 NV_PPWR_MMIO_CTRL_OP_RD
- sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
- nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+ mov $r13 NV_PPWR_MMIO_CTRL_OP_RD
+ sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
+ nv_iowr(NV_PPWR_MMIO_CTRL, $r13)
rd32_wait:
- nv_iord($r14, NV_PPWR_MMIO_CTRL)
- and $r14 NV_PPWR_MMIO_CTRL_STATUS
+ nv_iord($r13, NV_PPWR_MMIO_CTRL)
+ and $r13 NV_PPWR_MMIO_CTRL_STATUS
bra nz #rd32_wait
nv_iord($r13, NV_PPWR_MMIO_DATA)
ret
@@ -70,23 +70,25 @@ rd32:
wr32:
nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
nv_iowr(NV_PPWR_MMIO_DATA, $r13)
- mov $r14 NV_PPWR_MMIO_CTRL_OP_WR
- or $r14 NV_PPWR_MMIO_CTRL_MASK_B32_0
- sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
+ mov $r13 NV_PPWR_MMIO_CTRL_OP_WR
+ or $r13 NV_PPWR_MMIO_CTRL_MASK_B32_0
+ sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
#ifdef NVKM_FALCON_MMIO_TRAP
- mov $r8 NV_PPWR_INTR_TRIGGER_USER1
- nv_iowr(NV_PPWR_INTR_TRIGGER, $r8)
+ push $r13
+ mov $r13 NV_PPWR_INTR_TRIGGER_USER1
+ nv_iowr(NV_PPWR_INTR_TRIGGER, $r13)
wr32_host:
- nv_iord($r8, NV_PPWR_INTR)
- and $r8 NV_PPWR_INTR_USER1
+ nv_iord($r13, NV_PPWR_INTR)
+ and $r13 NV_PPWR_INTR_USER1
bra nz #wr32_host
+ pop $r13
#endif
- nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+ nv_iowr(NV_PPWR_MMIO_CTRL, $r13)
wr32_wait:
- nv_iord($r14, NV_PPWR_MMIO_CTRL)
- and $r14 NV_PPWR_MMIO_CTRL_STATUS
+ nv_iord($r13, NV_PPWR_MMIO_CTRL)
+ and $r13 NV_PPWR_MMIO_CTRL_STATUS
bra nz #wr32_wait
ret
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
index 2a74ea907604..e2a63ac5422b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -83,6 +83,50 @@
#define NV_PPWR_MMIO_CTRL_OP_WR 0x00000002
#define NV_PPWR_OUTPUT 0x07c0
#define NV_PPWR_OUTPUT_FB_PAUSE 0x00000004
+#if NVKM_PPWR_CHIPSET < GF119
+#define NV_PPWR_OUTPUT_I2C_3_SCL 0x00000100
+#define NV_PPWR_OUTPUT_I2C_3_SDA 0x00000200
+#define NV_PPWR_OUTPUT_I2C_0_SCL 0x00001000
+#define NV_PPWR_OUTPUT_I2C_0_SDA 0x00002000
+#define NV_PPWR_OUTPUT_I2C_1_SCL 0x00004000
+#define NV_PPWR_OUTPUT_I2C_1_SDA 0x00008000
+#define NV_PPWR_OUTPUT_I2C_2_SCL 0x00010000
+#define NV_PPWR_OUTPUT_I2C_2_SDA 0x00020000
+#define NV_PPWR_OUTPUT_I2C_4_SCL 0x00040000
+#define NV_PPWR_OUTPUT_I2C_4_SDA 0x00080000
+#define NV_PPWR_OUTPUT_I2C_5_SCL 0x00100000
+#define NV_PPWR_OUTPUT_I2C_5_SDA 0x00200000
+#define NV_PPWR_OUTPUT_I2C_6_SCL 0x00400000
+#define NV_PPWR_OUTPUT_I2C_6_SDA 0x00800000
+#define NV_PPWR_OUTPUT_I2C_7_SCL 0x01000000
+#define NV_PPWR_OUTPUT_I2C_7_SDA 0x02000000
+#define NV_PPWR_OUTPUT_I2C_8_SCL 0x04000000
+#define NV_PPWR_OUTPUT_I2C_8_SDA 0x08000000
+#define NV_PPWR_OUTPUT_I2C_9_SCL 0x10000000
+#define NV_PPWR_OUTPUT_I2C_9_SDA 0x20000000
+#else
+#define NV_PPWR_OUTPUT_I2C_0_SCL 0x00000400
+#define NV_PPWR_OUTPUT_I2C_1_SCL 0x00000800
+#define NV_PPWR_OUTPUT_I2C_2_SCL 0x00001000
+#define NV_PPWR_OUTPUT_I2C_3_SCL 0x00002000
+#define NV_PPWR_OUTPUT_I2C_4_SCL 0x00004000
+#define NV_PPWR_OUTPUT_I2C_5_SCL 0x00008000
+#define NV_PPWR_OUTPUT_I2C_6_SCL 0x00010000
+#define NV_PPWR_OUTPUT_I2C_7_SCL 0x00020000
+#define NV_PPWR_OUTPUT_I2C_8_SCL 0x00040000
+#define NV_PPWR_OUTPUT_I2C_9_SCL 0x00080000
+#define NV_PPWR_OUTPUT_I2C_0_SDA 0x00100000
+#define NV_PPWR_OUTPUT_I2C_1_SDA 0x00200000
+#define NV_PPWR_OUTPUT_I2C_2_SDA 0x00400000
+#define NV_PPWR_OUTPUT_I2C_3_SDA 0x00800000
+#define NV_PPWR_OUTPUT_I2C_4_SDA 0x01000000
+#define NV_PPWR_OUTPUT_I2C_5_SDA 0x02000000
+#define NV_PPWR_OUTPUT_I2C_6_SDA 0x04000000
+#define NV_PPWR_OUTPUT_I2C_7_SDA 0x08000000
+#define NV_PPWR_OUTPUT_I2C_8_SDA 0x10000000
+#define NV_PPWR_OUTPUT_I2C_9_SDA 0x20000000
+#endif
+#define NV_PPWR_INPUT 0x07c4
#define NV_PPWR_OUTPUT_SET 0x07e0
#define NV_PPWR_OUTPUT_SET_FB_PAUSE 0x00000004
#define NV_PPWR_OUTPUT_CLR 0x07e4
@@ -125,6 +169,15 @@
*/ .b32 0 /*
*/ .skip 64
+#if NV_PPWR_CHIPSET < GK208
+#define imm32(reg,val) /*
+*/ movw reg ((val) & 0x0000ffff) /*
+*/ sethi reg ((val) & 0xffff0000)
+#else
+#define imm32(reg,val) /*
+*/ mov reg (val)
+#endif
+
#ifndef NVKM_FALCON_UNSHIFTED_IO
#define nv_iord(reg,ior) /*
*/ mov reg ior /*
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
index 947be536daef..17a8a383d91a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
@@ -37,6 +37,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_PROC
@@ -46,6 +47,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_DATA
@@ -57,6 +59,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 9342e2d7d3b7..4bd43a99fdcc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -89,16 +89,9 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x54534554,
- 0x00000494,
- 0x00000475,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x5f433249,
+ 0x00000877,
+ 0x0000071e,
0x00000000,
0x00000000,
0x00000000,
@@ -111,16 +104,6 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x454c4449,
- 0x0000049f,
- 0x0000049d,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -128,17 +111,16 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x54534554,
+ 0x00000898,
+ 0x00000879,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
0x00000000,
-/* 0x0214: time_next */
0x00000000,
-/* 0x0218: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -151,6 +133,9 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x454c4449,
+ 0x000008a3,
+ 0x000008a1,
0x00000000,
0x00000000,
0x00000000,
@@ -170,9 +155,12 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x026c: time_next */
0x00000000,
+/* 0x0270: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -204,31 +192,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0318: memx_func_head */
- 0x00010000,
- 0x00000000,
- 0x000003a9,
-/* 0x0324: memx_func_next */
- 0x00000001,
- 0x00000000,
- 0x000003c7,
- 0x00000002,
- 0x00000002,
- 0x000003df,
- 0x00040003,
- 0x00000000,
- 0x00000407,
- 0x00010004,
- 0x00000000,
- 0x00000421,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
0x00000000,
+/* 0x02f0: rfifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -261,10 +226,25 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0370: memx_func_head */
+ 0x00010000,
0x00000000,
+ 0x000003a9,
+/* 0x037c: memx_func_next */
+ 0x00000001,
0x00000000,
+ 0x000003c7,
+ 0x00000002,
+ 0x00000002,
+ 0x000003df,
+ 0x00040003,
0x00000000,
+ 0x00000407,
+ 0x00010004,
0x00000000,
+ 0x00000421,
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -735,7 +715,6 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0b54: memx_data_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -778,6 +757,29 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+ 0x00000400,
+ 0x00000800,
+ 0x00001000,
+ 0x00002000,
+ 0x00004000,
+ 0x00008000,
+ 0x00010000,
+ 0x00020000,
+ 0x00040000,
+ 0x00080000,
+/* 0x0bd4: i2c_sda_map */
+ 0x00100000,
+ 0x00200000,
+ 0x00400000,
+ 0x00800000,
+ 0x01000000,
+ 0x02000000,
+ 0x04000000,
+ 0x08000000,
+ 0x10000000,
+ 0x20000000,
0x00000000,
};
@@ -786,13 +788,13 @@ uint32_t nv108_pwr_code[] = {
/* 0x0004: rd32 */
0xf607a040,
0x04bd000e,
- 0xe3f0010e,
+ 0xd3f0010d,
0x07ac4001,
- 0xbd000ef6,
+ 0xbd000df6,
/* 0x0019: rd32_wait */
- 0x07ac4e04,
- 0xf100eecf,
- 0xf47000e4,
+ 0x07ac4d04,
+ 0xf100ddcf,
+ 0xf47000d4,
0xa44df61b,
0x00ddcf07,
/* 0x002e: wr32 */
@@ -800,14 +802,14 @@ uint32_t nv108_pwr_code[] = {
0x000ef607,
0xa44004bd,
0x000df607,
- 0x020e04bd,
- 0xf0f0e5f0,
- 0xac4001e3,
- 0x000ef607,
+ 0x020d04bd,
+ 0xf0f0d5f0,
+ 0xac4001d3,
+ 0x000df607,
/* 0x004e: wr32_wait */
- 0xac4e04bd,
- 0x00eecf07,
- 0x7000e4f1,
+ 0xac4d04bd,
+ 0x00ddcf07,
+ 0x7000d4f1,
0xf8f61bf4,
/* 0x005d: nsec */
0xcf2c0800,
@@ -832,20 +834,20 @@ uint32_t nv108_pwr_code[] = {
0x03e99800,
0xf40096b0,
0x0a98280b,
- 0x029abb84,
+ 0x029abb9a,
0x0d0e1cf4,
0x01de7e01,
0xf494bd00,
/* 0x00b2: intr_watchdog_next_time */
0x0a98140e,
- 0x00a6b085,
+ 0x00a6b09b,
0xa6080bf4,
0x061cf49a,
/* 0x00c0: intr_watchdog_next_time_set */
/* 0x00c3: intr_watchdog_next_proc */
- 0xb58509b5,
+ 0xb59b09b5,
0xe0b603e9,
- 0x10e6b158,
+ 0x68e6b158,
0xc81bf402,
/* 0x00d2: intr */
0x00f900f8,
@@ -862,15 +864,15 @@ uint32_t nv108_pwr_code[] = {
0x080804bd,
0xc40088cf,
0x0bf40289,
- 0x8500b51f,
+ 0x9b00b51f,
0x957e580e,
0x09980000,
- 0x0096b085,
+ 0x0096b09b,
0x000d0bf4,
0x0009f634,
0x09b504bd,
/* 0x0125: intr_skip_watchdog */
- 0x0089e484,
+ 0x0089e49a,
0x360bf408,
0xcf068849,
0x9ac40099,
@@ -918,7 +920,7 @@ uint32_t nv108_pwr_code[] = {
/* 0x01c6: timer_reset */
0x3400161e,
0xbd000ef6,
- 0x840eb504,
+ 0x9a0eb504,
/* 0x01d0: timer_enable */
0x38000108,
0xbd0008f6,
@@ -949,7 +951,7 @@ uint32_t nv108_pwr_code[] = {
0xa6008a98,
0x100bf4ae,
0xb15880b6,
- 0xf4021086,
+ 0xf4026886,
0x32f4f11b,
/* 0x0239: find_done */
0xfc8eb201,
@@ -1009,7 +1011,7 @@ uint32_t nv108_pwr_code[] = {
0x0bf412a6,
0x071ec42e,
0xb704ee94,
- 0x980218e0,
+ 0x980270e0,
0xec9803eb,
0x01ed9802,
0x7e00ee98,
@@ -1031,7 +1033,7 @@ uint32_t nv108_pwr_code[] = {
0xf412a608,
0x23c4ef0b,
0x0434b607,
- 0x029830b7,
+ 0x02f030b7,
0xb5033bb5,
0x3db5023c,
0x003eb501,
@@ -1044,11 +1046,11 @@ uint32_t nv108_pwr_code[] = {
/* 0x0379: host_init */
0x00804100,
0xf11014b6,
- 0x40021815,
+ 0x40027015,
0x01f604d0,
0x4104bd00,
0x14b60080,
- 0x9815f110,
+ 0xf015f110,
0x04dc4002,
0xbd0001f6,
0x40010104,
@@ -1101,13 +1103,13 @@ uint32_t nv108_pwr_code[] = {
0x001398b2,
0x950410b6,
0x30f01034,
- 0xc835980c,
+ 0xde35980c,
0x12a655f9,
0xfced1ef4,
0x7ee0fcd0,
0xf800023f,
/* 0x0455: memx_info */
- 0x03544c00,
+ 0x03ac4c00,
0x7e08004b,
0xf800023f,
/* 0x0461: memx_recv */
@@ -1119,7 +1121,301 @@ uint32_t nv108_pwr_code[] = {
/* 0x0471: perf_recv */
/* 0x0473: perf_init */
0xf800f800,
-/* 0x0475: test_recv */
+/* 0x0475: i2c_drive_scl */
+ 0x0036b000,
+ 0x400d0bf4,
+ 0x01f607e0,
+ 0xf804bd00,
+/* 0x0485: i2c_drive_scl_lo */
+ 0x07e44000,
+ 0xbd0001f6,
+/* 0x048f: i2c_drive_sda */
+ 0xb000f804,
+ 0x0bf40036,
+ 0x07e0400d,
+ 0xbd0002f6,
+/* 0x049f: i2c_drive_sda_lo */
+ 0x4000f804,
+ 0x02f607e4,
+ 0xf804bd00,
+/* 0x04a9: i2c_sense_scl */
+ 0x0132f400,
+ 0xcf07c443,
+ 0x31fd0033,
+ 0x060bf404,
+/* 0x04bb: i2c_sense_scl_done */
+ 0xf80131f4,
+/* 0x04bd: i2c_sense_sda */
+ 0x0132f400,
+ 0xcf07c443,
+ 0x32fd0033,
+ 0x060bf404,
+/* 0x04cf: i2c_sense_sda_done */
+ 0xf80131f4,
+/* 0x04d1: i2c_raise_scl */
+ 0x4440f900,
+ 0x01030898,
+ 0x0004757e,
+/* 0x04dc: i2c_raise_scl_wait */
+ 0x7e03e84e,
+ 0x7e00005d,
+ 0xf40004a9,
+ 0x42b60901,
+ 0xef1bf401,
+/* 0x04f0: i2c_raise_scl_done */
+ 0x00f840fc,
+/* 0x04f4: i2c_start */
+ 0x0004a97e,
+ 0x7e0d11f4,
+ 0xf40004bd,
+ 0x0ef40611,
+/* 0x0505: i2c_start_rep */
+ 0x7e00032e,
+ 0x03000475,
+ 0x048f7e01,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0xd17e50fc,
+ 0x64b60004,
+ 0x1d11f404,
+/* 0x0530: i2c_start_send */
+ 0x8f7e0003,
+ 0x884e0004,
+ 0x005d7e13,
+ 0x7e000300,
+ 0x4e000475,
+ 0x5d7e1388,
+/* 0x054a: i2c_start_out */
+ 0x00f80000,
+/* 0x054c: i2c_stop */
+ 0x757e0003,
+ 0x00030004,
+ 0x00048f7e,
+ 0x7e03e84e,
+ 0x0300005d,
+ 0x04757e01,
+ 0x13884e00,
+ 0x00005d7e,
+ 0x8f7e0103,
+ 0x884e0004,
+ 0x005d7e13,
+/* 0x057b: i2c_bitw */
+ 0x7e00f800,
+ 0x4e00048f,
+ 0x5d7e03e8,
+ 0x76bb0000,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb60004d1,
+ 0x11f40464,
+ 0x13884e17,
+ 0x00005d7e,
+ 0x757e0003,
+ 0x884e0004,
+ 0x005d7e13,
+/* 0x05b9: i2c_bitw_out */
+/* 0x05bb: i2c_bitr */
+ 0x0300f800,
+ 0x048f7e01,
+ 0x03e84e00,
+ 0x00005d7e,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x04d17e50,
+ 0x0464b600,
+ 0x7e1a11f4,
+ 0x030004bd,
+ 0x04757e00,
+ 0x13884e00,
+ 0x00005d7e,
+ 0xf4013cf0,
+/* 0x05fe: i2c_bitr_done */
+ 0x00f80131,
+/* 0x0600: i2c_get_byte */
+ 0x08040005,
+/* 0x0604: i2c_get_byte_next */
+ 0xbb0154b6,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0005bb7e,
+ 0xf40464b6,
+ 0x53fd2a11,
+ 0x0142b605,
+ 0x03d81bf4,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x7b7e50fc,
+ 0x64b60005,
+/* 0x064d: i2c_get_byte_done */
+/* 0x064f: i2c_put_byte */
+ 0x0400f804,
+/* 0x0651: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x00057b7e,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x05bb7e50,
+ 0x0464b600,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x06a7: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x06a9: i2c_addr */
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0xf47e50fc,
+ 0x64b60004,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb600064f,
+/* 0x06ee: i2c_addr_done */
+ 0x00f80464,
+/* 0x06f0: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b705e4,
+ 0x00f8d014,
+/* 0x06fc: i2c_acquire */
+ 0x0006f07e,
+ 0x0000047e,
+ 0x7e03d9f0,
+ 0xf800002e,
+/* 0x070d: i2c_release */
+ 0x06f07e00,
+ 0x00047e00,
+ 0x03daf000,
+ 0x00002e7e,
+/* 0x071e: i2c_recv */
+ 0x32f400f8,
+ 0xf8c1c701,
+ 0xb00214b6,
+ 0x1ff52816,
+ 0x13b80137,
+ 0x98000bd4,
+ 0x13b80032,
+ 0x98000bac,
+ 0x31f40031,
+ 0xf9d0f902,
+ 0xf1d0f9e0,
+ 0xf1000067,
+ 0x92100063,
+ 0x76bb0167,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb60006fc,
+ 0xd0fc0464,
+ 0xf500d6b0,
+ 0x0500b01b,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0xa97e50fc,
+ 0x64b60006,
+ 0xcc11f504,
+ 0xe0c5c700,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x064f7e50,
+ 0x0464b600,
+ 0x00a911f5,
+ 0x76bb0105,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb60006a9,
+ 0x11f50464,
+ 0x76bb0087,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb6000600,
+ 0x11f40464,
+ 0xe05bcb67,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x054c7e50,
+ 0x0464b600,
+ 0x74bd5bb2,
+/* 0x0823: i2c_recv_not_rd08 */
+ 0xb0410ef4,
+ 0x1bf401d6,
+ 0x7e00053b,
+ 0xf40006a9,
+ 0xc5c73211,
+ 0x064f7ee0,
+ 0x2811f400,
+ 0xa97e0005,
+ 0x11f40006,
+ 0xe0b5c71f,
+ 0x00064f7e,
+ 0x7e1511f4,
+ 0xbd00054c,
+ 0x08c5c774,
+ 0xf4091bf4,
+ 0x0ef40232,
+/* 0x0861: i2c_recv_not_wr08 */
+/* 0x0861: i2c_recv_done */
+ 0xf8cec703,
+ 0x00070d7e,
+ 0xd0fce0fc,
+ 0xb20912f4,
+ 0x023f7e7c,
+/* 0x0875: i2c_recv_exit */
+/* 0x0877: i2c_init */
+ 0xf800f800,
+/* 0x0879: test_recv */
0x04584100,
0xb60011cf,
0x58400110,
@@ -1128,26 +1424,26 @@ uint32_t nv108_pwr_code[] = {
0xe3f1d900,
0x967e134f,
0x00f80001,
-/* 0x0494: test_init */
+/* 0x0898: test_init */
0x7e08004e,
0xf8000196,
-/* 0x049d: idle_recv */
-/* 0x049f: idle */
+/* 0x08a1: idle_recv */
+/* 0x08a3: idle */
0xf400f800,
0x54410031,
0x0011cf04,
0x400110b6,
0x01f60454,
-/* 0x04b3: idle_loop */
+/* 0x08b7: idle_loop */
0x0104bd00,
0x0232f458,
-/* 0x04b8: idle_proc */
-/* 0x04b8: idle_proc_exec */
+/* 0x08bc: idle_proc */
+/* 0x08bc: idle_proc_exec */
0x1eb210f9,
0x0002487e,
0x11f410fc,
0x0231f409,
-/* 0x04cb: idle_proc_next */
+/* 0x08cf: idle_proc_next */
0xb6f00ef4,
0x1fa65810,
0xf4e81bf4,
@@ -1161,5 +1457,4 @@ uint32_t nv108_pwr_code[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
index 6fde0b89e5aa..6744fcc06151 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
@@ -37,6 +37,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_PROC
@@ -46,6 +47,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_DATA
@@ -57,6 +59,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 0fa4d7dcd407..5a73fa620978 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -89,9 +89,31 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x5f433249,
+ 0x00000982,
+ 0x00000825,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x54534554,
- 0x0000057b,
- 0x00000554,
+ 0x000009ab,
+ 0x00000984,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +134,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000587,
- 0x00000585,
+ 0x000009b7,
+ 0x000009b5,
0x00000000,
0x00000000,
0x00000000,
@@ -133,12 +155,12 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
0x00000000,
-/* 0x0214: time_next */
+/* 0x026c: time_next */
0x00000000,
-/* 0x0218: fifo_queue */
+/* 0x0270: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -171,7 +193,7 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x02f0: rfifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -204,11 +226,11 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0318: memx_func_head */
+/* 0x0370: memx_func_head */
0x00010000,
0x00000000,
0x0000046f,
-/* 0x0324: memx_func_next */
+/* 0x037c: memx_func_next */
0x00000001,
0x00000000,
0x00000496,
@@ -221,8 +243,18 @@ uint32_t nva3_pwr_data[] = {
0x00010004,
0x00000000,
0x000004fc,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -725,6 +757,42 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+ 0x00001000,
+ 0x00004000,
+ 0x00010000,
+ 0x00000100,
+ 0x00040000,
+ 0x00100000,
+ 0x00400000,
+ 0x01000000,
+ 0x04000000,
+ 0x10000000,
+/* 0x0bd4: i2c_sda_map */
+ 0x00002000,
+ 0x00008000,
+ 0x00020000,
+ 0x00000200,
+ 0x00080000,
+ 0x00200000,
+ 0x00800000,
+ 0x02000000,
+ 0x08000000,
+ 0x20000000,
+/* 0x0bfc: i2c_ctrl */
+ 0x0000e138,
+ 0x0000e150,
+ 0x0000e168,
+ 0x0000e180,
+ 0x0000e254,
+ 0x0000e274,
+ 0x0000e764,
+ 0x0000e780,
+ 0x0000e79c,
+ 0x0000e7b8,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -735,7 +803,6 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0b54: memx_data_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -787,15 +854,15 @@ uint32_t nva3_pwr_code[] = {
0x07a007f1,
0xd00604b6,
0x04bd000e,
- 0xf001e7f0,
- 0x07f101e3,
+ 0xf001d7f0,
+ 0x07f101d3,
0x04b607ac,
- 0x000ed006,
+ 0x000dd006,
/* 0x0022: rd32_wait */
- 0xe7f104bd,
- 0xe4b607ac,
- 0x00eecf06,
- 0x7000e4f1,
+ 0xd7f104bd,
+ 0xd4b607ac,
+ 0x00ddcf06,
+ 0x7000d4f1,
0xf1f21bf4,
0xb607a4d7,
0xddcf06d4,
@@ -807,15 +874,15 @@ uint32_t nva3_pwr_code[] = {
0xb607a407,
0x0dd00604,
0xf004bd00,
- 0xe5f002e7,
- 0x01e3f0f0,
+ 0xd5f002d7,
+ 0x01d3f0f0,
0x07ac07f1,
0xd00604b6,
- 0x04bd000e,
+ 0x04bd000d,
/* 0x006c: wr32_wait */
- 0x07ace7f1,
- 0xcf06e4b6,
- 0xe4f100ee,
+ 0x07acd7f1,
+ 0xcf06d4b6,
+ 0xd4f100dd,
0x1bf47000,
/* 0x007f: nsec */
0xf000f8f2,
@@ -845,21 +912,21 @@ uint32_t nva3_pwr_code[] = {
0x9800f8df,
0x96b003e9,
0x2a0bf400,
- 0xbb840a98,
+ 0xbb9a0a98,
0x1cf4029a,
0x01d7f00f,
0x025421f5,
0x0ef494bd,
/* 0x00e9: intr_watchdog_next_time */
- 0x850a9815,
+ 0x9b0a9815,
0xf400a6b0,
0x9ab8090b,
0x061cf406,
/* 0x00f8: intr_watchdog_next_time_set */
/* 0x00fb: intr_watchdog_next_proc */
- 0x80850980,
+ 0x809b0980,
0xe0b603e9,
- 0x10e6b158,
+ 0x68e6b158,
0xc61bf402,
/* 0x010a: intr */
0x00f900f8,
@@ -880,15 +947,15 @@ uint32_t nva3_pwr_code[] = {
0x0088cf06,
0xf40289c4,
0x0080230b,
- 0x58e7f085,
+ 0x58e7f09b,
0x98cb21f4,
- 0x96b08509,
+ 0x96b09b09,
0x110bf400,
0xb63407f0,
0x09d00604,
0x8004bd00,
/* 0x016e: intr_skip_watchdog */
- 0x89e48409,
+ 0x89e49a09,
0x0bf40800,
0x8897f148,
0x0694b606,
@@ -948,7 +1015,7 @@ uint32_t nva3_pwr_code[] = {
0x000ed006,
0x0e8004bd,
/* 0x0241: timer_enable */
- 0x0187f084,
+ 0x0187f09a,
0xb63807f0,
0x08d00604,
/* 0x024f: timer_done */
@@ -979,7 +1046,7 @@ uint32_t nva3_pwr_code[] = {
0xb8008a98,
0x0bf406ae,
0x5880b610,
- 0x021086b1,
+ 0x026886b1,
0xf4f01bf4,
/* 0x02b2: find_done */
0x8eb90132,
@@ -1049,7 +1116,7 @@ uint32_t nva3_pwr_code[] = {
0x320bf406,
0x94071ec4,
0xe0b704ee,
- 0xeb980218,
+ 0xeb980270,
0x02ec9803,
0x9801ed98,
0x21f500ee,
@@ -1075,7 +1142,7 @@ uint32_t nva3_pwr_code[] = {
0xe60bf406,
0xb60723c4,
0x30b70434,
- 0x3b800298,
+ 0x3b8002f0,
0x023c8003,
0x80013d80,
0x20b6003e,
@@ -1090,13 +1157,13 @@ uint32_t nva3_pwr_code[] = {
/* 0x0430: host_init */
0x008017f1,
0xf11014b6,
- 0xf1021815,
+ 0xf1027015,
0xb604d007,
0x01d00604,
0xf104bd00,
0xb6008017,
0x15f11014,
- 0x07f10298,
+ 0x07f102f0,
0x04b604dc,
0x0001d006,
0x17f004bd,
@@ -1156,14 +1223,14 @@ uint32_t nva3_pwr_code[] = {
0x00139802,
0x950410b6,
0x30f01034,
- 0xc835980c,
+ 0xde35980c,
0x12b855f9,
0xec1ef406,
0xe0fcd0fc,
0x02b921f5,
/* 0x0532: memx_info */
0xc7f100f8,
- 0xb7f10354,
+ 0xb7f103ac,
0x21f50800,
0x00f802b9,
/* 0x0540: memx_recv */
@@ -1175,7 +1242,312 @@ uint32_t nva3_pwr_code[] = {
/* 0x0550: perf_recv */
/* 0x0552: perf_init */
0x00f800f8,
-/* 0x0554: test_recv */
+/* 0x0554: i2c_drive_scl */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x0568: i2c_drive_scl_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x0576: i2c_drive_sda */
+ 0x36b000f8,
+ 0x110bf400,
+ 0x07e007f1,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x058a: i2c_drive_sda_lo */
+ 0x07f100f8,
+ 0x04b607e4,
+ 0x0002d006,
+ 0x00f804bd,
+/* 0x0598: i2c_sense_scl */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x05ae: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x05b0: i2c_sense_sda */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x05c6: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x05c8: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0x5421f501,
+/* 0x05d5: i2c_raise_scl_wait */
+ 0xe8e7f105,
+ 0x7f21f403,
+ 0x059821f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x05e9: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x05ed: i2c_start */
+ 0x9821f500,
+ 0x0d11f405,
+ 0x05b021f5,
+ 0xf40611f4,
+/* 0x05fe: i2c_start_rep */
+ 0x37f0300e,
+ 0x5421f500,
+ 0x0137f005,
+ 0x057621f5,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xc821f550,
+ 0x0464b605,
+/* 0x062b: i2c_start_send */
+ 0xf01f11f4,
+ 0x21f50037,
+ 0xe7f10576,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x055421f5,
+ 0x1388e7f1,
+/* 0x0647: i2c_start_out */
+ 0xf87f21f4,
+/* 0x0649: i2c_stop */
+ 0x0037f000,
+ 0x055421f5,
+ 0xf50037f0,
+ 0xf1057621,
+ 0xf403e8e7,
+ 0x37f07f21,
+ 0x5421f501,
+ 0x88e7f105,
+ 0x7f21f413,
+ 0xf50137f0,
+ 0xf1057621,
+ 0xf41388e7,
+ 0x00f87f21,
+/* 0x067c: i2c_bitw */
+ 0x057621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x05c821f5,
+ 0xf40464b6,
+ 0xe7f11811,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x055421f5,
+ 0x1388e7f1,
+/* 0x06bb: i2c_bitw_out */
+ 0xf87f21f4,
+/* 0x06bd: i2c_bitr */
+ 0x0137f000,
+ 0x057621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x05c821f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f005b0,
+ 0x5421f500,
+ 0x88e7f105,
+ 0x7f21f413,
+ 0xf4013cf0,
+/* 0x0702: i2c_bitr_done */
+ 0x00f80131,
+/* 0x0704: i2c_get_byte */
+ 0xf00057f0,
+/* 0x070a: i2c_get_byte_next */
+ 0x54b60847,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b606bd,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x7c21f550,
+ 0x0464b606,
+/* 0x0754: i2c_get_byte_done */
+/* 0x0756: i2c_put_byte */
+ 0x47f000f8,
+/* 0x0759: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x067c21f5,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xbd21f550,
+ 0x0464b606,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x07af: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x07b1: i2c_addr */
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b605ed,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6075621,
+/* 0x07f6: i2c_addr_done */
+ 0x00f80464,
+/* 0x07f8: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b702e4,
+ 0xee980bfc,
+/* 0x0807: i2c_acquire */
+ 0xf500f800,
+ 0xf407f821,
+ 0xd9f00421,
+ 0x3f21f403,
+/* 0x0816: i2c_release */
+ 0x21f500f8,
+ 0x21f407f8,
+ 0x03daf004,
+ 0xf83f21f4,
+/* 0x0825: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xd413a001,
+ 0x0032980b,
+ 0x0bac13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x080721f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07b121f5,
+ 0xf50464b6,
+ 0xc700d011,
+ 0x76bbe0c5,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6075621,
+ 0x11f50464,
+ 0x57f000ad,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b607b1,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60704,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x064921f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x092b: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x07b121f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40756,
+ 0x0057f029,
+ 0x07b121f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40756,
+ 0x4921f515,
+ 0xc774bd06,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x096b: i2c_recv_not_wr08 */
+/* 0x096b: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc0816,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x02b921f5,
+/* 0x0980: i2c_recv_exit */
+/* 0x0982: i2c_init */
+ 0x00f800f8,
+/* 0x0984: test_recv */
0x05d817f1,
0xcf0614b6,
0x10b60011,
@@ -1185,12 +1557,12 @@ uint32_t nva3_pwr_code[] = {
0x00e7f104,
0x4fe3f1d9,
0xf521f513,
-/* 0x057b: test_init */
+/* 0x09ab: test_init */
0xf100f801,
0xf50800e7,
0xf801f521,
-/* 0x0585: idle_recv */
-/* 0x0587: idle */
+/* 0x09b5: idle_recv */
+/* 0x09b7: idle */
0xf400f800,
0x17f10031,
0x14b605d4,
@@ -1198,32 +1570,20 @@ uint32_t nva3_pwr_code[] = {
0xf10110b6,
0xb605d407,
0x01d00604,
-/* 0x05a3: idle_loop */
+/* 0x09d3: idle_loop */
0xf004bd00,
0x32f45817,
-/* 0x05a9: idle_proc */
-/* 0x05a9: idle_proc_exec */
+/* 0x09d9: idle_proc */
+/* 0x09d9: idle_proc_exec */
0xb910f902,
0x21f5021e,
0x10fc02c2,
0xf40911f4,
0x0ef40231,
-/* 0x05bd: idle_proc_next */
+/* 0x09ed: idle_proc_next */
0x5810b6ef,
0xf4061fb8,
0x02f4e61b,
0x0028f4dd,
0x00bb0ef4,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
index eaa64da68e36..48f79434a449 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
@@ -37,6 +37,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_PROC
@@ -46,6 +47,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_DATA
@@ -57,6 +59,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index 82c8e8b88917..4dba00d2dd1a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -89,9 +89,31 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x5f433249,
+ 0x00000982,
+ 0x00000825,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x54534554,
- 0x0000057b,
- 0x00000554,
+ 0x000009ab,
+ 0x00000984,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +134,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000587,
- 0x00000585,
+ 0x000009b7,
+ 0x000009b5,
0x00000000,
0x00000000,
0x00000000,
@@ -133,12 +155,12 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
0x00000000,
-/* 0x0214: time_next */
+/* 0x026c: time_next */
0x00000000,
-/* 0x0218: fifo_queue */
+/* 0x0270: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -171,7 +193,7 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x02f0: rfifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -204,11 +226,11 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0318: memx_func_head */
+/* 0x0370: memx_func_head */
0x00010000,
0x00000000,
0x0000046f,
-/* 0x0324: memx_func_next */
+/* 0x037c: memx_func_next */
0x00000001,
0x00000000,
0x00000496,
@@ -221,8 +243,18 @@ uint32_t nvc0_pwr_data[] = {
0x00010004,
0x00000000,
0x000004fc,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -725,6 +757,42 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+ 0x00001000,
+ 0x00004000,
+ 0x00010000,
+ 0x00000100,
+ 0x00040000,
+ 0x00100000,
+ 0x00400000,
+ 0x01000000,
+ 0x04000000,
+ 0x10000000,
+/* 0x0bd4: i2c_sda_map */
+ 0x00002000,
+ 0x00008000,
+ 0x00020000,
+ 0x00000200,
+ 0x00080000,
+ 0x00200000,
+ 0x00800000,
+ 0x02000000,
+ 0x08000000,
+ 0x20000000,
+/* 0x0bfc: i2c_ctrl */
+ 0x0000e138,
+ 0x0000e150,
+ 0x0000e168,
+ 0x0000e180,
+ 0x0000e254,
+ 0x0000e274,
+ 0x0000e764,
+ 0x0000e780,
+ 0x0000e79c,
+ 0x0000e7b8,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -735,7 +803,6 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0b54: memx_data_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -787,15 +854,15 @@ uint32_t nvc0_pwr_code[] = {
0x07a007f1,
0xd00604b6,
0x04bd000e,
- 0xf001e7f0,
- 0x07f101e3,
+ 0xf001d7f0,
+ 0x07f101d3,
0x04b607ac,
- 0x000ed006,
+ 0x000dd006,
/* 0x0022: rd32_wait */
- 0xe7f104bd,
- 0xe4b607ac,
- 0x00eecf06,
- 0x7000e4f1,
+ 0xd7f104bd,
+ 0xd4b607ac,
+ 0x00ddcf06,
+ 0x7000d4f1,
0xf1f21bf4,
0xb607a4d7,
0xddcf06d4,
@@ -807,15 +874,15 @@ uint32_t nvc0_pwr_code[] = {
0xb607a407,
0x0dd00604,
0xf004bd00,
- 0xe5f002e7,
- 0x01e3f0f0,
+ 0xd5f002d7,
+ 0x01d3f0f0,
0x07ac07f1,
0xd00604b6,
- 0x04bd000e,
+ 0x04bd000d,
/* 0x006c: wr32_wait */
- 0x07ace7f1,
- 0xcf06e4b6,
- 0xe4f100ee,
+ 0x07acd7f1,
+ 0xcf06d4b6,
+ 0xd4f100dd,
0x1bf47000,
/* 0x007f: nsec */
0xf000f8f2,
@@ -845,21 +912,21 @@ uint32_t nvc0_pwr_code[] = {
0x9800f8df,
0x96b003e9,
0x2a0bf400,
- 0xbb840a98,
+ 0xbb9a0a98,
0x1cf4029a,
0x01d7f00f,
0x025421f5,
0x0ef494bd,
/* 0x00e9: intr_watchdog_next_time */
- 0x850a9815,
+ 0x9b0a9815,
0xf400a6b0,
0x9ab8090b,
0x061cf406,
/* 0x00f8: intr_watchdog_next_time_set */
/* 0x00fb: intr_watchdog_next_proc */
- 0x80850980,
+ 0x809b0980,
0xe0b603e9,
- 0x10e6b158,
+ 0x68e6b158,
0xc61bf402,
/* 0x010a: intr */
0x00f900f8,
@@ -880,15 +947,15 @@ uint32_t nvc0_pwr_code[] = {
0x0088cf06,
0xf40289c4,
0x0080230b,
- 0x58e7f085,
+ 0x58e7f09b,
0x98cb21f4,
- 0x96b08509,
+ 0x96b09b09,
0x110bf400,
0xb63407f0,
0x09d00604,
0x8004bd00,
/* 0x016e: intr_skip_watchdog */
- 0x89e48409,
+ 0x89e49a09,
0x0bf40800,
0x8897f148,
0x0694b606,
@@ -948,7 +1015,7 @@ uint32_t nvc0_pwr_code[] = {
0x000ed006,
0x0e8004bd,
/* 0x0241: timer_enable */
- 0x0187f084,
+ 0x0187f09a,
0xb63807f0,
0x08d00604,
/* 0x024f: timer_done */
@@ -979,7 +1046,7 @@ uint32_t nvc0_pwr_code[] = {
0xb8008a98,
0x0bf406ae,
0x5880b610,
- 0x021086b1,
+ 0x026886b1,
0xf4f01bf4,
/* 0x02b2: find_done */
0x8eb90132,
@@ -1049,7 +1116,7 @@ uint32_t nvc0_pwr_code[] = {
0x320bf406,
0x94071ec4,
0xe0b704ee,
- 0xeb980218,
+ 0xeb980270,
0x02ec9803,
0x9801ed98,
0x21f500ee,
@@ -1075,7 +1142,7 @@ uint32_t nvc0_pwr_code[] = {
0xe60bf406,
0xb60723c4,
0x30b70434,
- 0x3b800298,
+ 0x3b8002f0,
0x023c8003,
0x80013d80,
0x20b6003e,
@@ -1090,13 +1157,13 @@ uint32_t nvc0_pwr_code[] = {
/* 0x0430: host_init */
0x008017f1,
0xf11014b6,
- 0xf1021815,
+ 0xf1027015,
0xb604d007,
0x01d00604,
0xf104bd00,
0xb6008017,
0x15f11014,
- 0x07f10298,
+ 0x07f102f0,
0x04b604dc,
0x0001d006,
0x17f004bd,
@@ -1156,14 +1223,14 @@ uint32_t nvc0_pwr_code[] = {
0x00139802,
0x950410b6,
0x30f01034,
- 0xc835980c,
+ 0xde35980c,
0x12b855f9,
0xec1ef406,
0xe0fcd0fc,
0x02b921f5,
/* 0x0532: memx_info */
0xc7f100f8,
- 0xb7f10354,
+ 0xb7f103ac,
0x21f50800,
0x00f802b9,
/* 0x0540: memx_recv */
@@ -1175,7 +1242,312 @@ uint32_t nvc0_pwr_code[] = {
/* 0x0550: perf_recv */
/* 0x0552: perf_init */
0x00f800f8,
-/* 0x0554: test_recv */
+/* 0x0554: i2c_drive_scl */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x0568: i2c_drive_scl_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x0576: i2c_drive_sda */
+ 0x36b000f8,
+ 0x110bf400,
+ 0x07e007f1,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x058a: i2c_drive_sda_lo */
+ 0x07f100f8,
+ 0x04b607e4,
+ 0x0002d006,
+ 0x00f804bd,
+/* 0x0598: i2c_sense_scl */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x05ae: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x05b0: i2c_sense_sda */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x05c6: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x05c8: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0x5421f501,
+/* 0x05d5: i2c_raise_scl_wait */
+ 0xe8e7f105,
+ 0x7f21f403,
+ 0x059821f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x05e9: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x05ed: i2c_start */
+ 0x9821f500,
+ 0x0d11f405,
+ 0x05b021f5,
+ 0xf40611f4,
+/* 0x05fe: i2c_start_rep */
+ 0x37f0300e,
+ 0x5421f500,
+ 0x0137f005,
+ 0x057621f5,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xc821f550,
+ 0x0464b605,
+/* 0x062b: i2c_start_send */
+ 0xf01f11f4,
+ 0x21f50037,
+ 0xe7f10576,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x055421f5,
+ 0x1388e7f1,
+/* 0x0647: i2c_start_out */
+ 0xf87f21f4,
+/* 0x0649: i2c_stop */
+ 0x0037f000,
+ 0x055421f5,
+ 0xf50037f0,
+ 0xf1057621,
+ 0xf403e8e7,
+ 0x37f07f21,
+ 0x5421f501,
+ 0x88e7f105,
+ 0x7f21f413,
+ 0xf50137f0,
+ 0xf1057621,
+ 0xf41388e7,
+ 0x00f87f21,
+/* 0x067c: i2c_bitw */
+ 0x057621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x05c821f5,
+ 0xf40464b6,
+ 0xe7f11811,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x055421f5,
+ 0x1388e7f1,
+/* 0x06bb: i2c_bitw_out */
+ 0xf87f21f4,
+/* 0x06bd: i2c_bitr */
+ 0x0137f000,
+ 0x057621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x05c821f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f005b0,
+ 0x5421f500,
+ 0x88e7f105,
+ 0x7f21f413,
+ 0xf4013cf0,
+/* 0x0702: i2c_bitr_done */
+ 0x00f80131,
+/* 0x0704: i2c_get_byte */
+ 0xf00057f0,
+/* 0x070a: i2c_get_byte_next */
+ 0x54b60847,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b606bd,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x7c21f550,
+ 0x0464b606,
+/* 0x0754: i2c_get_byte_done */
+/* 0x0756: i2c_put_byte */
+ 0x47f000f8,
+/* 0x0759: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x067c21f5,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xbd21f550,
+ 0x0464b606,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x07af: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x07b1: i2c_addr */
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b605ed,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6075621,
+/* 0x07f6: i2c_addr_done */
+ 0x00f80464,
+/* 0x07f8: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b702e4,
+ 0xee980bfc,
+/* 0x0807: i2c_acquire */
+ 0xf500f800,
+ 0xf407f821,
+ 0xd9f00421,
+ 0x3f21f403,
+/* 0x0816: i2c_release */
+ 0x21f500f8,
+ 0x21f407f8,
+ 0x03daf004,
+ 0xf83f21f4,
+/* 0x0825: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xd413a001,
+ 0x0032980b,
+ 0x0bac13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x080721f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07b121f5,
+ 0xf50464b6,
+ 0xc700d011,
+ 0x76bbe0c5,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6075621,
+ 0x11f50464,
+ 0x57f000ad,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b607b1,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60704,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x064921f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x092b: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x07b121f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40756,
+ 0x0057f029,
+ 0x07b121f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40756,
+ 0x4921f515,
+ 0xc774bd06,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x096b: i2c_recv_not_wr08 */
+/* 0x096b: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc0816,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x02b921f5,
+/* 0x0980: i2c_recv_exit */
+/* 0x0982: i2c_init */
+ 0x00f800f8,
+/* 0x0984: test_recv */
0x05d817f1,
0xcf0614b6,
0x10b60011,
@@ -1185,12 +1557,12 @@ uint32_t nvc0_pwr_code[] = {
0x00e7f104,
0x4fe3f1d9,
0xf521f513,
-/* 0x057b: test_init */
+/* 0x09ab: test_init */
0xf100f801,
0xf50800e7,
0xf801f521,
-/* 0x0585: idle_recv */
-/* 0x0587: idle */
+/* 0x09b5: idle_recv */
+/* 0x09b7: idle */
0xf400f800,
0x17f10031,
0x14b605d4,
@@ -1198,32 +1570,20 @@ uint32_t nvc0_pwr_code[] = {
0xf10110b6,
0xb605d407,
0x01d00604,
-/* 0x05a3: idle_loop */
+/* 0x09d3: idle_loop */
0xf004bd00,
0x32f45817,
-/* 0x05a9: idle_proc */
-/* 0x05a9: idle_proc_exec */
+/* 0x09d9: idle_proc */
+/* 0x09d9: idle_proc_exec */
0xb910f902,
0x21f5021e,
0x10fc02c2,
0xf40911f4,
0x0ef40231,
-/* 0x05bd: idle_proc_next */
+/* 0x09ed: idle_proc_next */
0x5810b6ef,
0xf4061fb8,
0x02f4e61b,
0x0028f4dd,
0x00bb0ef4,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
index 32d65ea254dd..8a89dfe41ce1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
@@ -37,6 +37,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_PROC
@@ -46,6 +47,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_DATA
@@ -57,6 +59,7 @@
#include "host.fuc"
#include "memx.fuc"
#include "perf.fuc"
+#include "i2c_.fuc"
#include "test.fuc"
#include "idle.fuc"
#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index ce65e2a4b789..5e24c6bc041d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -89,33 +89,13 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x54534554,
- 0x000004eb,
- 0x000004ca,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x5f433249,
+ 0x000008e3,
+ 0x00000786,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
- 0x454c4449,
- 0x000004f7,
- 0x000004f5,
- 0x00000000,
- 0x00000000,
0x00000000,
0x00000000,
0x00000000,
@@ -131,14 +111,13 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x54534554,
+ 0x00000906,
+ 0x000008e5,
0x00000000,
0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
0x00000000,
-/* 0x0214: time_next */
0x00000000,
-/* 0x0218: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -154,6 +133,9 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x454c4449,
+ 0x00000912,
+ 0x00000910,
0x00000000,
0x00000000,
0x00000000,
@@ -171,11 +153,14 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0298: rfifo_queue */
0x00000000,
0x00000000,
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
0x00000000,
+/* 0x026c: time_next */
0x00000000,
+/* 0x0270: fifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -204,31 +189,11 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0318: memx_func_head */
- 0x00010000,
- 0x00000000,
- 0x000003f4,
-/* 0x0324: memx_func_next */
- 0x00000001,
- 0x00000000,
- 0x00000415,
- 0x00000002,
- 0x00000002,
- 0x00000430,
- 0x00040003,
- 0x00000000,
- 0x00000458,
- 0x00010004,
- 0x00000000,
- 0x00000472,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
- 0x00000000,
- 0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
+/* 0x02f0: rfifo_queue */
0x00000000,
0x00000000,
0x00000000,
@@ -261,10 +226,25 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0370: memx_func_head */
+ 0x00010000,
0x00000000,
+ 0x000003f4,
+/* 0x037c: memx_func_next */
+ 0x00000001,
0x00000000,
+ 0x00000415,
+ 0x00000002,
+ 0x00000002,
+ 0x00000430,
+ 0x00040003,
0x00000000,
+ 0x00000458,
+ 0x00010004,
0x00000000,
+ 0x00000472,
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -735,7 +715,6 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0b54: memx_data_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -778,6 +757,29 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+ 0x00000400,
+ 0x00000800,
+ 0x00001000,
+ 0x00002000,
+ 0x00004000,
+ 0x00008000,
+ 0x00010000,
+ 0x00020000,
+ 0x00040000,
+ 0x00080000,
+/* 0x0bd4: i2c_sda_map */
+ 0x00100000,
+ 0x00200000,
+ 0x00400000,
+ 0x00800000,
+ 0x01000000,
+ 0x02000000,
+ 0x04000000,
+ 0x08000000,
+ 0x10000000,
+ 0x20000000,
0x00000000,
};
@@ -786,14 +788,14 @@ uint32_t nvd0_pwr_code[] = {
/* 0x0004: rd32 */
0x07a007f1,
0xbd000ed0,
- 0x01e7f004,
- 0xf101e3f0,
+ 0x01d7f004,
+ 0xf101d3f0,
0xd007ac07,
- 0x04bd000e,
+ 0x04bd000d,
/* 0x001c: rd32_wait */
- 0x07ace7f1,
- 0xf100eecf,
- 0xf47000e4,
+ 0x07acd7f1,
+ 0xf100ddcf,
+ 0xf47000d4,
0xd7f1f51b,
0xddcf07a4,
/* 0x0033: wr32 */
@@ -802,14 +804,14 @@ uint32_t nvd0_pwr_code[] = {
0x04bd000e,
0x07a407f1,
0xbd000dd0,
- 0x02e7f004,
- 0xf0f0e5f0,
- 0x07f101e3,
- 0x0ed007ac,
+ 0x02d7f004,
+ 0xf0f0d5f0,
+ 0x07f101d3,
+ 0x0dd007ac,
/* 0x0057: wr32_wait */
0xf104bd00,
- 0xcf07ace7,
- 0xe4f100ee,
+ 0xcf07acd7,
+ 0xd4f100dd,
0x1bf47000,
/* 0x0067: nsec */
0xf000f8f5,
@@ -836,21 +838,21 @@ uint32_t nvd0_pwr_code[] = {
0x9800f8e2,
0x96b003e9,
0x2a0bf400,
- 0xbb840a98,
+ 0xbb9a0a98,
0x1cf4029a,
0x01d7f00f,
0x020621f5,
0x0ef494bd,
/* 0x00c5: intr_watchdog_next_time */
- 0x850a9815,
+ 0x9b0a9815,
0xf400a6b0,
0x9ab8090b,
0x061cf406,
/* 0x00d4: intr_watchdog_next_time_set */
/* 0x00d7: intr_watchdog_next_proc */
- 0x80850980,
+ 0x809b0980,
0xe0b603e9,
- 0x10e6b158,
+ 0x68e6b158,
0xc61bf402,
/* 0x00e6: intr */
0x00f900f8,
@@ -868,15 +870,15 @@ uint32_t nvd0_pwr_code[] = {
0x0887f004,
0xc40088cf,
0x0bf40289,
- 0x85008020,
+ 0x9b008020,
0xf458e7f0,
0x0998a721,
- 0x0096b085,
+ 0x0096b09b,
0xf00e0bf4,
0x09d03407,
0x8004bd00,
/* 0x013e: intr_skip_watchdog */
- 0x89e48409,
+ 0x89e49a09,
0x0bf40800,
0x8897f13c,
0x0099cf06,
@@ -929,7 +931,7 @@ uint32_t nvd0_pwr_code[] = {
0x0ed03407,
0x8004bd00,
/* 0x01f6: timer_enable */
- 0x87f0840e,
+ 0x87f09a0e,
0x3807f001,
0xbd0008d0,
/* 0x0201: timer_done */
@@ -960,7 +962,7 @@ uint32_t nvd0_pwr_code[] = {
0x06aeb800,
0xb6100bf4,
0x86b15880,
- 0x1bf40210,
+ 0x1bf40268,
0x0132f4f0,
/* 0x0264: find_done */
0xfc028eb9,
@@ -1024,7 +1026,7 @@ uint32_t nvd0_pwr_code[] = {
0x0bf40612,
0x071ec42f,
0xb704ee94,
- 0x980218e0,
+ 0x980270e0,
0xec9803eb,
0x01ed9802,
0xf500ee98,
@@ -1048,7 +1050,7 @@ uint32_t nvd0_pwr_code[] = {
0xec0bf406,
0xb60723c4,
0x30b70434,
- 0x3b800298,
+ 0x3b8002f0,
0x023c8003,
0x80013d80,
0x20b6003e,
@@ -1061,12 +1063,12 @@ uint32_t nvd0_pwr_code[] = {
/* 0x03be: host_init */
0x17f100f8,
0x14b60080,
- 0x1815f110,
+ 0x7015f110,
0xd007f102,
0x0001d004,
0x17f104bd,
0x14b60080,
- 0x9815f110,
+ 0xf015f110,
0xdc07f102,
0x0001d004,
0x17f004bd,
@@ -1122,13 +1124,13 @@ uint32_t nvd0_pwr_code[] = {
0x10b60013,
0x10349504,
0x980c30f0,
- 0x55f9c835,
+ 0x55f9de35,
0xf40612b8,
0xd0fcec1e,
0x21f5e0fc,
0x00f8026b,
/* 0x04a8: memx_info */
- 0x0354c7f1,
+ 0x03acc7f1,
0x0800b7f1,
0x026b21f5,
/* 0x04b6: memx_recv */
@@ -1140,49 +1142,342 @@ uint32_t nvd0_pwr_code[] = {
/* 0x04c6: perf_recv */
0x00f800f8,
/* 0x04c8: perf_init */
-/* 0x04ca: test_recv */
- 0x17f100f8,
- 0x11cf05d8,
- 0x0110b600,
- 0x05d807f1,
+/* 0x04ca: i2c_drive_scl */
+ 0x36b000f8,
+ 0x0e0bf400,
+ 0x07e007f1,
0xbd0001d0,
- 0x00e7f104,
- 0x4fe3f1d9,
- 0xb621f513,
-/* 0x04eb: test_init */
- 0xf100f801,
- 0xf50800e7,
- 0xf801b621,
-/* 0x04f5: idle_recv */
-/* 0x04f7: idle */
- 0xf400f800,
- 0x17f10031,
- 0x11cf05d4,
- 0x0110b600,
- 0x05d407f1,
- 0xbd0001d0,
-/* 0x050d: idle_loop */
- 0x5817f004,
-/* 0x0513: idle_proc */
-/* 0x0513: idle_proc_exec */
- 0xf90232f4,
- 0x021eb910,
- 0x027421f5,
- 0x11f410fc,
- 0x0231f409,
-/* 0x0527: idle_proc_next */
- 0xb6ef0ef4,
- 0x1fb85810,
- 0xe61bf406,
- 0xf4dd02f4,
- 0x0ef40028,
- 0x000000c1,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+/* 0x04db: i2c_drive_scl_lo */
+ 0xf100f804,
+ 0xd007e407,
+ 0x04bd0001,
+/* 0x04e6: i2c_drive_sda */
+ 0x36b000f8,
+ 0x0e0bf400,
+ 0x07e007f1,
+ 0xbd0002d0,
+/* 0x04f7: i2c_drive_sda_lo */
+ 0xf100f804,
+ 0xd007e407,
+ 0x04bd0002,
+/* 0x0502: i2c_sense_scl */
+ 0x32f400f8,
+ 0xc437f101,
+ 0x0033cf07,
+ 0xf40431fd,
+ 0x31f4060b,
+/* 0x0515: i2c_sense_scl_done */
+/* 0x0517: i2c_sense_sda */
+ 0xf400f801,
+ 0x37f10132,
+ 0x33cf07c4,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x052a: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x052c: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0xca21f501,
+/* 0x0539: i2c_raise_scl_wait */
+ 0xe8e7f104,
+ 0x6721f403,
+ 0x050221f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x054d: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x0551: i2c_start */
+ 0x0221f500,
+ 0x0d11f405,
+ 0x051721f5,
+ 0xf40611f4,
+/* 0x0562: i2c_start_rep */
+ 0x37f0300e,
+ 0xca21f500,
+ 0x0137f004,
+ 0x04e621f5,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x2c21f550,
+ 0x0464b605,
+/* 0x058f: i2c_start_send */
+ 0xf01f11f4,
+ 0x21f50037,
+ 0xe7f104e6,
+ 0x21f41388,
+ 0x0037f067,
+ 0x04ca21f5,
+ 0x1388e7f1,
+/* 0x05ab: i2c_start_out */
+ 0xf86721f4,
+/* 0x05ad: i2c_stop */
+ 0x0037f000,
+ 0x04ca21f5,
+ 0xf50037f0,
+ 0xf104e621,
+ 0xf403e8e7,
+ 0x37f06721,
+ 0xca21f501,
+ 0x88e7f104,
+ 0x6721f413,
+ 0xf50137f0,
+ 0xf104e621,
+ 0xf41388e7,
+ 0x00f86721,
+/* 0x05e0: i2c_bitw */
+ 0x04e621f5,
+ 0x03e8e7f1,
+ 0xbb6721f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x052c21f5,
+ 0xf40464b6,
+ 0xe7f11811,
+ 0x21f41388,
+ 0x0037f067,
+ 0x04ca21f5,
+ 0x1388e7f1,
+/* 0x061f: i2c_bitw_out */
+ 0xf86721f4,
+/* 0x0621: i2c_bitr */
+ 0x0137f000,
+ 0x04e621f5,
+ 0x03e8e7f1,
+ 0xbb6721f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x052c21f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f00517,
+ 0xca21f500,
+ 0x88e7f104,
+ 0x6721f413,
+ 0xf4013cf0,
+/* 0x0666: i2c_bitr_done */
+ 0x00f80131,
+/* 0x0668: i2c_get_byte */
+ 0xf00057f0,
+/* 0x066e: i2c_get_byte_next */
+ 0x54b60847,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60621,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xe021f550,
+ 0x0464b605,
+/* 0x06b8: i2c_get_byte_done */
+/* 0x06ba: i2c_put_byte */
+ 0x47f000f8,
+/* 0x06bd: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x05e021f5,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x2121f550,
+ 0x0464b606,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x0713: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x0715: i2c_addr */
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60551,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb606ba21,
+/* 0x075a: i2c_addr_done */
+ 0x00f80464,
+/* 0x075c: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b705e4,
+ 0x00f8d014,
+/* 0x0768: i2c_acquire */
+ 0x075c21f5,
+ 0xf00421f4,
+ 0x21f403d9,
+/* 0x0777: i2c_release */
+ 0xf500f833,
+ 0xf4075c21,
+ 0xdaf00421,
+ 0x3321f403,
+/* 0x0786: i2c_recv */
+ 0x32f400f8,
+ 0xf8c1c701,
+ 0xb00214b6,
+ 0x1ff52816,
+ 0x13a0013a,
+ 0x32980bd4,
+ 0xac13a000,
+ 0x0031980b,
+ 0xf90231f4,
+ 0xf9e0f9d0,
+ 0x0067f1d0,
+ 0x0063f100,
+ 0x01679210,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x6821f550,
+ 0x0464b607,
+ 0xd6b0d0fc,
+ 0xb31bf500,
+ 0x0057f000,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x1521f550,
+ 0x0464b607,
+ 0x00d011f5,
+ 0xbbe0c5c7,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x06ba21f5,
+ 0xf50464b6,
+ 0xf000ad11,
+ 0x76bb0157,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6071521,
+ 0x11f50464,
+ 0x76bb008a,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6066821,
+ 0x11f40464,
+ 0xe05bcb6a,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xad21f550,
+ 0x0464b605,
+ 0xbd025bb9,
+ 0x430ef474,
+/* 0x088c: i2c_recv_not_rd08 */
+ 0xf401d6b0,
+ 0x57f03d1b,
+ 0x1521f500,
+ 0x3311f407,
+ 0xf5e0c5c7,
+ 0xf406ba21,
+ 0x57f02911,
+ 0x1521f500,
+ 0x1f11f407,
+ 0xf5e0b5c7,
+ 0xf406ba21,
+ 0x21f51511,
+ 0x74bd05ad,
+ 0xf408c5c7,
+ 0x32f4091b,
+ 0x030ef402,
+/* 0x08cc: i2c_recv_not_wr08 */
+/* 0x08cc: i2c_recv_done */
+ 0xf5f8cec7,
+ 0xfc077721,
+ 0xf4d0fce0,
+ 0x7cb90a12,
+ 0x6b21f502,
+/* 0x08e1: i2c_recv_exit */
+/* 0x08e3: i2c_init */
+ 0xf800f802,
+/* 0x08e5: test_recv */
+ 0xd817f100,
+ 0x0011cf05,
+ 0xf10110b6,
+ 0xd005d807,
+ 0x04bd0001,
+ 0xd900e7f1,
+ 0x134fe3f1,
+ 0x01b621f5,
+/* 0x0906: test_init */
+ 0xe7f100f8,
+ 0x21f50800,
+ 0x00f801b6,
+/* 0x0910: idle_recv */
+/* 0x0912: idle */
+ 0x31f400f8,
+ 0xd417f100,
+ 0x0011cf05,
+ 0xf10110b6,
+ 0xd005d407,
+ 0x04bd0001,
+/* 0x0928: idle_loop */
+ 0xf45817f0,
+/* 0x092e: idle_proc */
+/* 0x092e: idle_proc_exec */
+ 0x10f90232,
+ 0xf5021eb9,
+ 0xfc027421,
+ 0x0911f410,
+ 0xf40231f4,
+/* 0x0942: idle_proc_next */
+ 0x10b6ef0e,
+ 0x061fb858,
+ 0xf4e61bf4,
+ 0x28f4dd02,
+ 0xc10ef400,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
index 5fb0cccc6c64..574acfa44c8c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -7,6 +7,7 @@
#define PROC_HOST 0x54534f48
#define PROC_MEMX 0x584d454d
#define PROC_PERF 0x46524550
+#define PROC_I2C_ 0x5f433249
#define PROC_TEST 0x54534554
/* KERN: message identifiers */
@@ -24,4 +25,22 @@
#define MEMX_WAIT 3
#define MEMX_DELAY 4
+/* I2C_: message identifiers */
+#define I2C__MSG_RD08 0
+#define I2C__MSG_WR08 1
+
+#define I2C__MSG_DATA0_PORT 24:31
+#define I2C__MSG_DATA0_ADDR 14:23
+
+#define I2C__MSG_DATA0_RD08_PORT I2C__MSG_DATA0_PORT
+#define I2C__MSG_DATA0_RD08_ADDR I2C__MSG_DATA0_ADDR
+#define I2C__MSG_DATA0_RD08_REG 0:7
+#define I2C__MSG_DATA1_RD08_VAL 0:7
+
+#define I2C__MSG_DATA0_WR08_PORT I2C__MSG_DATA0_PORT
+#define I2C__MSG_DATA0_WR08_ADDR I2C__MSG_DATA0_ADDR
+#define I2C__MSG_DATA0_WR08_SYNC 8:8
+#define I2C__MSG_DATA0_WR08_REG 0:7
+#define I2C__MSG_DATA1_WR08_VAL 0:7
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index ef3133e7575c..7dd680ff2f6f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -72,13 +72,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
vmm->flush(vm);
}
-void
-nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
-{
- nouveau_vm_map_at(vma, 0, node);
-}
-
-void
+static void
nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem)
{
@@ -136,7 +130,7 @@ finish:
vmm->flush(vm);
}
-void
+static void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem)
{
@@ -175,6 +169,18 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
}
void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
+{
+ if (node->sg)
+ nouveau_vm_map_sg_table(vma, 0, node->size << 12, node);
+ else
+ if (node->pages)
+ nouveau_vm_map_sg(vma, 0, node->size << 12, node);
+ else
+ nouveau_vm_map_at(vma, 0, node);
+}
+
+void
nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
{
struct nouveau_vm *vm = vma->vm;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index b13ff0fc42de..2f1ed61f7c8c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -77,11 +77,6 @@ nv04_display_create(struct drm_device *dev)
nouveau_hw_save_vga_fonts(dev, 1);
- ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 0xd1500000,
- NV04_DISP_CLASS, NULL, 0, &disp->core);
- if (ret)
- return ret;
-
nv04_crtc_create(dev, 0);
if (nv_two_heads(dev))
nv04_crtc_create(dev, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 56a28db04000..4245fc3dab70 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -80,7 +80,6 @@ struct nv04_display {
struct nv04_mode_state saved_reg;
uint32_t saved_vga_font[4][16384];
uint32_t dac_users[4];
- struct nouveau_object *core;
struct nouveau_bo *image[2];
};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 32e7064b819b..ab03f7719d2d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -55,9 +55,12 @@ struct nouveau_plane {
int hue;
int saturation;
int iturbt_709;
+
+ void (*set_params)(struct nouveau_plane *);
};
static uint32_t formats[] = {
+ DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_NV12,
};
@@ -140,10 +143,10 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
- if (fb->pixel_format == DRM_FORMAT_NV12) {
+ if (fb->pixel_format != DRM_FORMAT_UYVY)
format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
+ if (fb->pixel_format == DRM_FORMAT_NV12)
format |= NV_PVIDEO_FORMAT_PLANAR;
- }
if (nv_plane->iturbt_709)
format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
if (nv_plane->colorkey & (1 << 24))
@@ -182,9 +185,9 @@ nv10_disable_plane(struct drm_plane *plane)
}
static void
-nv10_destroy_plane(struct drm_plane *plane)
+nv_destroy_plane(struct drm_plane *plane)
{
- nv10_disable_plane(plane);
+ plane->funcs->disable_plane(plane);
drm_plane_cleanup(plane);
kfree(plane);
}
@@ -217,9 +220,9 @@ nv10_set_params(struct nouveau_plane *plane)
}
static int
-nv10_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t value)
+nv_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t value)
{
struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
@@ -238,15 +241,16 @@ nv10_set_property(struct drm_plane *plane,
else
return -EINVAL;
- nv10_set_params(nv_plane);
+ if (nv_plane->set_params)
+ nv_plane->set_params(nv_plane);
return 0;
}
static const struct drm_plane_funcs nv10_plane_funcs = {
.update_plane = nv10_update_plane,
.disable_plane = nv10_disable_plane,
- .set_property = nv10_set_property,
- .destroy = nv10_destroy_plane,
+ .set_property = nv_set_property,
+ .destroy = nv_destroy_plane,
};
static void
@@ -266,7 +270,7 @@ nv10_overlay_init(struct drm_device *device)
case 0x15:
case 0x1a:
case 0x20:
- num_formats = 1;
+ num_formats = 2;
break;
}
@@ -321,8 +325,159 @@ nv10_overlay_init(struct drm_device *device)
drm_object_attach_property(&plane->base.base,
plane->props.iturbt_709, plane->iturbt_709);
+ plane->set_params = nv10_set_params;
nv10_set_params(plane);
- nv_wr32(dev, NV_PVIDEO_STOP, 1);
+ nv10_disable_plane(&plane->base);
+ return;
+cleanup:
+ drm_plane_cleanup(&plane->base);
+err:
+ kfree(plane);
+ nv_error(dev, "Failed to create plane\n");
+}
+
+static int
+nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nouveau_bo *cur = nv_plane->cur;
+ uint32_t overlay = 1;
+ int brightness = (nv_plane->brightness - 512) * 62 / 512;
+ int pitch, ret, i;
+
+ /* Source parameters given in 16.16 fixed point, ignore fractional. */
+ src_x >>= 16;
+ src_y >>= 16;
+ src_w >>= 16;
+ src_h >>= 16;
+
+ pitch = ALIGN(src_w * 4, 0x100);
+
+ if (pitch > 0xffff)
+ return -ERANGE;
+
+ /* TODO: Compute an offset? Not sure how to do this for YUYV. */
+ if (src_x != 0 || src_y != 0)
+ return -ERANGE;
+
+ if (crtc_w < src_w || crtc_h < src_h)
+ return -ERANGE;
+
+ ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ nv_plane->cur = nv_fb->nvbo;
+
+ nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+ nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+ nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+
+ for (i = 0; i < 2; i++) {
+ nv_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
+ nv_fb->nvbo->bo.offset);
+ nv_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch);
+ nv_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
+ }
+ nv_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
+ nv_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w);
+ nv_wr32(dev, NV_PVIDEO_STEP_SIZE,
+ (uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1)));
+
+ /* It should be possible to convert hue/contrast to this */
+ nv_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness);
+ nv_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness);
+ nv_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness);
+ nv_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0);
+
+ nv_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */
+ nv_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */
+
+ nv_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03);
+ nv_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38);
+
+ nv_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey);
+
+ if (nv_plane->colorkey & (1 << 24))
+ overlay |= 0x10;
+ if (fb->pixel_format == DRM_FORMAT_YUYV)
+ overlay |= 0x100;
+
+ nv_wr32(dev, NV_PVIDEO_OVERLAY, overlay);
+
+ nv_wr32(dev, NV_PVIDEO_SU_STATE, nv_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16));
+
+ if (cur)
+ nouveau_bo_unpin(cur);
+
+ return 0;
+}
+
+static int
+nv04_disable_plane(struct drm_plane *plane)
+{
+ struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+
+ nv_mask(dev, NV_PVIDEO_OVERLAY, 1, 0);
+ nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+ nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+ nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+ if (nv_plane->cur) {
+ nouveau_bo_unpin(nv_plane->cur);
+ nv_plane->cur = NULL;
+ }
+
+ return 0;
+}
+
+static const struct drm_plane_funcs nv04_plane_funcs = {
+ .update_plane = nv04_update_plane,
+ .disable_plane = nv04_disable_plane,
+ .set_property = nv_set_property,
+ .destroy = nv_destroy_plane,
+};
+
+static void
+nv04_overlay_init(struct drm_device *device)
+{
+ struct nouveau_device *dev = nouveau_dev(device);
+ struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
+ int ret;
+
+ if (!plane)
+ return;
+
+ ret = drm_plane_init(device, &plane->base, 1 /* single crtc */,
+ &nv04_plane_funcs,
+ formats, 2, false);
+ if (ret)
+ goto err;
+
+ /* Set up the plane properties */
+ plane->props.colorkey = drm_property_create_range(
+ device, 0, "colorkey", 0, 0x01ffffff);
+ plane->props.brightness = drm_property_create_range(
+ device, 0, "brightness", 0, 1024);
+ if (!plane->props.colorkey ||
+ !plane->props.brightness)
+ goto cleanup;
+
+ plane->colorkey = 0;
+ drm_object_attach_property(&plane->base.base,
+ plane->props.colorkey, plane->colorkey);
+
+ plane->brightness = 512;
+ drm_object_attach_property(&plane->base.base,
+ plane->props.brightness, plane->brightness);
+
+ nv04_disable_plane(&plane->base);
return;
cleanup:
drm_plane_cleanup(&plane->base);
@@ -335,6 +490,8 @@ void
nouveau_overlay_init(struct drm_device *device)
{
struct nouveau_device *dev = nouveau_dev(device);
- if (dev->chipset >= 0x10 && dev->chipset <= 0x40)
+ if (dev->chipset < 0x10)
+ nv04_overlay_init(device);
+ else if (dev->chipset <= 0x40)
nv10_overlay_init(device);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index ba0183fb84f3..83face3f608f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -1,15 +1,10 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/video.h>
-#include <acpi/acpi.h>
#include <linux/mxm-wmi.h>
-
#include <linux/vga_switcheroo.h>
-
#include <drm/drm_edid.h>
+#include <acpi/video.h>
#include "nouveau_drm.h"
#include "nouveau_acpi.h"
@@ -66,6 +61,7 @@ bool nouveau_is_v1_dsm(void) {
#define NOUVEAU_DSM_HAS_MUX 0x1
#define NOUVEAU_DSM_HAS_OPT 0x2
+#ifdef CONFIG_VGA_SWITCHEROO
static const char nouveau_dsm_muid[] = {
0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
@@ -78,124 +74,89 @@ static const char nouveau_op_dsm_muid[] = {
static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_object_list input;
- union acpi_object params[4];
+ int i;
union acpi_object *obj;
- int i, err;
char args_buff[4];
+ union acpi_object argv4 = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = 4,
+ .buffer.pointer = args_buff
+ };
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(nouveau_op_dsm_muid);
- params[0].buffer.pointer = (char *)nouveau_op_dsm_muid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = 0x00000100;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_BUFFER;
- params[3].buffer.length = 4;
/* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */
for (i = 0; i < 4; i++)
args_buff[i] = (arg >> i * 8) & 0xFF;
- params[3].buffer.pointer = args_buff;
- err = acpi_evaluate_object(handle, "_DSM", &input, &output);
- if (err) {
- printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
- return err;
- }
-
- obj = (union acpi_object *)output.pointer;
-
- if (obj->type == ACPI_TYPE_INTEGER)
- if (obj->integer.value == 0x80000002) {
- return -ENODEV;
- }
-
- if (obj->type == ACPI_TYPE_BUFFER) {
- if (obj->buffer.length == 4 && result) {
- *result = 0;
+ *result = 0;
+ obj = acpi_evaluate_dsm_typed(handle, nouveau_op_dsm_muid, 0x00000100,
+ func, &argv4, ACPI_TYPE_BUFFER);
+ if (!obj) {
+ acpi_handle_info(handle, "failed to evaluate _DSM\n");
+ return AE_ERROR;
+ } else {
+ if (obj->buffer.length == 4) {
*result |= obj->buffer.pointer[0];
*result |= (obj->buffer.pointer[1] << 8);
*result |= (obj->buffer.pointer[2] << 16);
*result |= (obj->buffer.pointer[3] << 24);
}
+ ACPI_FREE(obj);
}
- kfree(output.pointer);
return 0;
}
-static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
+/*
+ * On some platforms, _DSM(nouveau_op_dsm_muid, func0) has special
+ * requirements on the fourth parameter, so a private implementation
+ * instead of using acpi_check_dsm().
+ */
+static int nouveau_check_optimus_dsm(acpi_handle handle)
{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_object_list input;
- union acpi_object params[4];
- union acpi_object *obj;
- int err;
-
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(nouveau_dsm_muid);
- params[0].buffer.pointer = (char *)nouveau_dsm_muid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = 0x00000102;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_INTEGER;
- params[3].integer.value = arg;
-
- err = acpi_evaluate_object(handle, "_DSM", &input, &output);
- if (err) {
- printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
- return err;
- }
-
- obj = (union acpi_object *)output.pointer;
+ int result;
- if (obj->type == ACPI_TYPE_INTEGER)
- if (obj->integer.value == 0x80000002)
- return -ENODEV;
-
- if (obj->type == ACPI_TYPE_BUFFER) {
- if (obj->buffer.length == 4 && result) {
- *result = 0;
- *result |= obj->buffer.pointer[0];
- *result |= (obj->buffer.pointer[1] << 8);
- *result |= (obj->buffer.pointer[2] << 16);
- *result |= (obj->buffer.pointer[3] << 24);
- }
- }
+ /*
+ * Function 0 returns a Buffer containing available functions.
+ * The args parameter is ignored for function 0, so just put 0 in it
+ */
+ if (nouveau_optimus_dsm(handle, 0, 0, &result))
+ return 0;
- kfree(output.pointer);
- return 0;
+ /*
+ * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported.
+ * If the n-th bit is enabled, function n is supported
+ */
+ return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS);
}
-/* Returns 1 if a DSM function is usable and 0 otherwise */
-static int nouveau_test_dsm(acpi_handle test_handle,
- int (*dsm_func)(acpi_handle, int, int, uint32_t *),
- int sfnc)
+static int nouveau_dsm(acpi_handle handle, int func, int arg)
{
- u32 result = 0;
-
- /* Function 0 returns a Buffer containing available functions. The args
- * parameter is ignored for function 0, so just put 0 in it */
- if (dsm_func(test_handle, 0, 0, &result))
- return 0;
+ int ret = 0;
+ union acpi_object *obj;
+ union acpi_object argv4 = {
+ .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = arg,
+ };
+
+ obj = acpi_evaluate_dsm_typed(handle, nouveau_dsm_muid, 0x00000102,
+ func, &argv4, ACPI_TYPE_INTEGER);
+ if (!obj) {
+ acpi_handle_info(handle, "failed to evaluate _DSM\n");
+ return AE_ERROR;
+ } else {
+ if (obj->integer.value == 0x80000002)
+ ret = -ENODEV;
+ ACPI_FREE(obj);
+ }
- /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If
- * the n-th bit is enabled, function n is supported */
- return result & 1 && result & (1 << sfnc);
+ return ret;
}
static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
{
mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
- return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
+ return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id);
}
static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
@@ -205,7 +166,7 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
arg = NOUVEAU_DSM_POWER_SPEED;
else
arg = NOUVEAU_DSM_POWER_STAMINA;
- nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
+ nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg);
return 0;
}
@@ -265,11 +226,11 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
nouveau_dsm_priv.other_handle = dhandle;
return false;
}
- if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
+ if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
+ 1 << NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
- if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
- NOUVEAU_DSM_OPTIMUS_CAPS))
+ if (nouveau_check_optimus_dsm(dhandle))
retval |= NOUVEAU_DSM_HAS_OPT;
if (retval & NOUVEAU_DSM_HAS_OPT) {
@@ -388,6 +349,11 @@ void nouveau_unregister_dsm_handler(void)
if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
vga_switcheroo_unregister_handler();
}
+#else
+void nouveau_register_dsm_handler(void) {}
+void nouveau_unregister_dsm_handler(void) {}
+void nouveau_switcheroo_optimus_dsm(void) {}
+#endif
/* retrieve the ROM in 4k blocks */
static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c0fde6b9393c..4aed1714b9ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -560,28 +560,6 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
}
-/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
- * TTM_PL_{VRAM,TT} directly.
- */
-
-static int
-nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
- struct nouveau_bo *nvbo, bool evict,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
-{
- struct nouveau_fence *fence = NULL;
- int ret;
-
- ret = nouveau_fence_new(chan, false, &fence);
- if (ret)
- return ret;
-
- ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
- no_wait_gpu, new_mem);
- nouveau_fence_unref(&fence);
- return ret;
-}
-
static int
nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
@@ -798,25 +776,25 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
struct nouveau_mem *node = old_mem->mm_node;
- struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset = node->vma[0].offset;
u64 dst_offset = node->vma[1].offset;
+ int src_tiled = !!node->memtype;
+ int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
int ret;
while (length) {
u32 amount, stride, height;
+ ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
+ if (ret)
+ return ret;
+
amount = min(length, (u64)(4 * 1024 * 1024));
stride = 16 * 4;
height = amount / stride;
- if (old_mem->mem_type == TTM_PL_VRAM &&
- nouveau_bo_tile_layout(nvbo)) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
-
+ if (src_tiled) {
BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
@@ -826,19 +804,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, 0);
OUT_RING (chan, 0);
} else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
OUT_RING (chan, 1);
}
- if (new_mem->mem_type == TTM_PL_VRAM &&
- nouveau_bo_tile_layout(nvbo)) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
-
+ if (dst_tiled) {
BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
@@ -848,18 +817,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, 0);
OUT_RING (chan, 0);
} else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
OUT_RING (chan, 1);
}
- ret = RING_SPACE(chan, 14);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, upper_32_bits(dst_offset));
@@ -953,23 +914,28 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
}
static int
-nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
- struct ttm_mem_reg *mem, struct nouveau_vma *vma)
+nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
{
- struct nouveau_mem *node = mem->mm_node;
+ struct nouveau_mem *old_node = bo->mem.mm_node;
+ struct nouveau_mem *new_node = mem->mm_node;
+ u64 size = (u64)mem->num_pages << PAGE_SHIFT;
int ret;
- ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
- PAGE_SHIFT, node->page_shift,
- NV_MEM_ACCESS_RW, vma);
+ ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift,
+ NV_MEM_ACCESS_RW, &old_node->vma[0]);
if (ret)
return ret;
- if (mem->mem_type == TTM_PL_VRAM)
- nouveau_vm_map(vma, node);
- else
- nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
+ ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift,
+ NV_MEM_ACCESS_RW, &old_node->vma[1]);
+ if (ret) {
+ nouveau_vm_put(&old_node->vma[0]);
+ return ret;
+ }
+ nouveau_vm_map(&old_node->vma[0], old_node);
+ nouveau_vm_map(&old_node->vma[1], new_node);
return 0;
}
@@ -979,35 +945,34 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct nouveau_fence *fence;
int ret;
- mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
-
/* create temporary vmas for the transfer and attach them to the
* old nouveau_mem node, these will get cleaned up after ttm has
* destroyed the ttm_mem_reg
*/
if (nv_device(drm->device)->card_type >= NV_50) {
- struct nouveau_mem *node = old_mem->mm_node;
-
- ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
- if (ret)
- goto out;
-
- ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
+ ret = nouveau_bo_move_prep(drm, bo, new_mem);
if (ret)
- goto out;
+ return ret;
}
- ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
+ mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
+ ret = nouveau_fence_sync(bo->sync_obj, chan);
if (ret == 0) {
- ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
- no_wait_gpu, new_mem);
+ ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
+ if (ret == 0) {
+ ret = nouveau_fence_new(chan, false, &fence);
+ if (ret == 0) {
+ ret = ttm_bo_move_accel_cleanup(bo, fence,
+ evict,
+ no_wait_gpu,
+ new_mem);
+ nouveau_fence_unref(&fence);
+ }
+ }
}
-
-out:
mutex_unlock(&chan->cli->mutex);
return ret;
}
@@ -1147,19 +1112,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
return;
list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
+ if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
+ (new_mem->mem_type == TTM_PL_VRAM ||
+ nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
nouveau_vm_map(vma, new_mem->mm_node);
- } else
- if (new_mem && new_mem->mem_type == TTM_PL_TT &&
- nvbo->page_shift == vma->vm->vmm->spg_shift) {
- if (((struct nouveau_mem *)new_mem->mm_node)->sg)
- nouveau_vm_map_sg_table(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
- else
- nouveau_vm_map_sg(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
} else {
nouveau_vm_unmap(vma);
}
@@ -1224,28 +1180,27 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
}
- /* CPU copy if we have no accelerated method available */
- if (!drm->ttm.move) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- goto out;
- }
-
/* Hardware assisted copy. */
- if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr,
- no_wait_gpu, new_mem);
- else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr,
- no_wait_gpu, new_mem);
- else
- ret = nouveau_bo_move_m2mf(bo, evict, intr,
- no_wait_gpu, new_mem);
-
- if (!ret)
- goto out;
+ if (drm->ttm.move) {
+ if (new_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flipd(bo, evict, intr,
+ no_wait_gpu, new_mem);
+ else if (old_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flips(bo, evict, intr,
+ no_wait_gpu, new_mem);
+ else
+ ret = nouveau_bo_move_m2mf(bo, evict, intr,
+ no_wait_gpu, new_mem);
+ if (!ret)
+ goto out;
+ }
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ spin_lock(&bo->bdev->fence_lock);
+ ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
+ spin_unlock(&bo->bdev->fence_lock);
+ if (ret == 0)
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
out:
if (nv_device(drm->device)->card_type < NV_50) {
@@ -1271,6 +1226,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = drm->dev;
int ret;
@@ -1293,14 +1249,16 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
- break;
+ if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
+ /* untiled */
+ break;
+ /* fallthrough, tiled memory */
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true;
if (nv_device(drm->device)->card_type >= NV_50) {
struct nouveau_bar *bar = nouveau_bar(drm->device);
- struct nouveau_mem *node = mem->mm_node;
ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
&node->bar_vma);
@@ -1336,6 +1294,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_device *device = nv_device(drm->device);
u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
+ int ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
@@ -1344,10 +1303,20 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
if (nv_device(drm->device)->card_type < NV_50 ||
!nouveau_bo_tile_layout(nvbo))
return 0;
+
+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
+
+ ret = nouveau_bo_validate(nvbo, false, false);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
/* make sure bo is in mappable vram */
- if (bo->mem.start + bo->mem.num_pages < mappable)
+ if (nv_device(drm->device)->card_type >= NV_50 ||
+ bo->mem.start + bo->mem.num_pages < mappable)
return 0;
@@ -1535,7 +1504,6 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
struct nouveau_vma *vma)
{
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- struct nouveau_mem *node = nvbo->bo.mem.mm_node;
int ret;
ret = nouveau_vm_get(vm, size, nvbo->page_shift,
@@ -1543,15 +1511,10 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
if (ret)
return ret;
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+ (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
+ nvbo->page_shift != vma->vm->vmm->lpg_shift))
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
- else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
- nvbo->page_shift == vma->vm->vmm->spg_shift) {
- if (node->sg)
- nouveau_vm_map_sg_table(vma, 0, size, node);
- else
- nouveau_vm_map_sg(vma, 0, size, node);
- }
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 25ea82f8def3..24011596af43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -68,20 +68,100 @@ nouveau_display_vblank_disable(struct drm_device *dev, int head)
nouveau_event_put(disp->vblank[head]);
}
+static inline int
+calc(int blanks, int blanke, int total, int line)
+{
+ if (blanke >= blanks) {
+ if (line >= blanks)
+ line -= total;
+ } else {
+ if (line >= blanks)
+ line -= total;
+ line -= blanke + 1;
+ }
+ return line;
+}
+
+int
+nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime)
+{
+ const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index;
+ struct nouveau_display *disp = nouveau_display(crtc->dev);
+ struct nv04_display_scanoutpos args;
+ int ret, retry = 1;
+
+ do {
+ ret = nv_exec(disp->core, mthd, &args, sizeof(args));
+ if (ret != 0)
+ return 0;
+
+ if (args.vline) {
+ ret |= DRM_SCANOUTPOS_ACCURATE;
+ ret |= DRM_SCANOUTPOS_VALID;
+ break;
+ }
+
+ if (retry) ndelay(crtc->linedur_ns);
+ } while (retry--);
+
+ *hpos = calc(args.hblanks, args.hblanke, args.htotal, args.hline);
+ *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
+ if (stime) *stime = ns_to_ktime(args.time[0]);
+ if (etime) *etime = ns_to_ktime(args.time[1]);
+
+ if (*vpos < 0)
+ ret |= DRM_SCANOUTPOS_INVBL;
+ return ret;
+}
+
+int
+nouveau_display_scanoutpos(struct drm_device *dev, int head, unsigned int flags,
+ int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (nouveau_crtc(crtc)->index == head) {
+ return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
+ stime, etime);
+ }
+ }
+
+ return 0;
+}
+
+int
+nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error,
+ struct timeval *time, unsigned flags)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (nouveau_crtc(crtc)->index == head) {
+ return drm_calc_vbltimestamp_from_scanoutpos(dev,
+ head, max_error, time, flags, crtc,
+ &crtc->hwmode);
+ }
+ }
+
+ return -EINVAL;
+}
+
static void
nouveau_display_vblank_fini(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
int i;
+ drm_vblank_cleanup(dev);
+
if (disp->vblank) {
for (i = 0; i < dev->mode_config.num_crtc; i++)
nouveau_event_ref(NULL, &disp->vblank[i]);
kfree(disp->vblank);
disp->vblank = NULL;
}
-
- drm_vblank_cleanup(dev);
}
static int
@@ -407,10 +487,31 @@ nouveau_display_create(struct drm_device *dev)
drm_kms_helper_poll_disable(dev);
if (drm->vbios.dcb.entries) {
- if (nv_device(drm->device)->card_type < NV_50)
- ret = nv04_display_create(dev);
- else
- ret = nv50_display_create(dev);
+ static const u16 oclass[] = {
+ NVF0_DISP_CLASS,
+ NVE0_DISP_CLASS,
+ NVD0_DISP_CLASS,
+ NVA3_DISP_CLASS,
+ NV94_DISP_CLASS,
+ NVA0_DISP_CLASS,
+ NV84_DISP_CLASS,
+ NV50_DISP_CLASS,
+ NV04_DISP_CLASS,
+ };
+ int i;
+
+ for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
+ ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+ NVDRM_DISPLAY, oclass[i],
+ NULL, 0, &disp->core);
+ }
+
+ if (ret == 0) {
+ if (nv_mclass(disp->core) < NV50_DISP_CLASS)
+ ret = nv04_display_create(dev);
+ else
+ ret = nv50_display_create(dev);
+ }
} else {
ret = 0;
}
@@ -439,6 +540,7 @@ void
nouveau_display_destroy(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
nouveau_backlight_exit(dev);
nouveau_display_vblank_fini(dev);
@@ -449,6 +551,8 @@ nouveau_display_destroy(struct drm_device *dev)
if (disp->dtor)
disp->dtor(dev);
+ nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY);
+
nouveau_drm(dev)->display = NULL;
kfree(disp);
}
@@ -603,6 +707,14 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (!s)
return -ENOMEM;
+ if (new_bo != old_bo) {
+ ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ goto fail_free;
+ }
+
+ mutex_lock(&chan->cli->mutex);
+
/* synchronise rendering channel with the kernel's channel */
spin_lock(&new_bo->bo.bdev->fence_lock);
fence = nouveau_fence_ref(new_bo->bo.sync_obj);
@@ -610,15 +722,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
ret = nouveau_fence_sync(fence, chan);
nouveau_fence_unref(&fence);
if (ret)
- goto fail_free;
-
- if (new_bo != old_bo) {
- ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
- if (ret)
- goto fail_free;
- }
+ goto fail_unpin;
- mutex_lock(&chan->cli->mutex);
ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
if (ret)
goto fail_unpin;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 8bc8bab90e8d..a71cf77e55b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -36,6 +36,7 @@ struct nouveau_display {
int (*init)(struct drm_device *);
void (*fini)(struct drm_device *);
+ struct nouveau_object *core;
struct nouveau_eventh **vblank;
struct drm_property *dithering_mode;
@@ -63,6 +64,10 @@ void nouveau_display_repin(struct drm_device *dev);
void nouveau_display_resume(struct drm_device *dev);
int nouveau_display_vblank_enable(struct drm_device *, int);
void nouveau_display_vblank_disable(struct drm_device *, int);
+int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int,
+ int *, int *, ktime_t *, ktime_t *);
+int nouveau_display_vblstamp(struct drm_device *, int, int *,
+ struct timeval *, unsigned);
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 40f91e1e5842..c177272152e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -100,7 +100,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
- DRM_MEMORYBARRIER();
+ mb();
/* Flush writes. */
nouveau_bo_rd32(pb, 0);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 984004d66a6d..dc0e0c5cadb4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -155,7 +155,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
}
#define WRITE_PUT(val) do { \
- DRM_MEMORYBARRIER(); \
+ mb(); \
nouveau_bo_rd32(chan->push.buffer, 0); \
nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
} while (0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 98a22e6e27a1..4ee702ac8907 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -376,6 +376,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto fail_device;
+ dev->irq_enabled = true;
+
/* workaround an odd issue on nvc1 by disabling the device's
* nosnoop capability. hopefully won't cause issues until a
* better fix is found - assuming there is one...
@@ -475,6 +477,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_object *device;
+ dev->irq_enabled = false;
device = drm->client.base.device;
drm_put_dev(dev);
@@ -503,19 +506,21 @@ nouveau_do_suspend(struct drm_device *dev)
if (drm->cechan) {
ret = nouveau_channel_idle(drm->cechan);
if (ret)
- return ret;
+ goto fail_display;
}
if (drm->channel) {
ret = nouveau_channel_idle(drm->channel);
if (ret)
- return ret;
+ goto fail_display;
}
NV_INFO(drm, "suspending client object trees...\n");
if (drm->fence && nouveau_fence(drm)->suspend) {
- if (!nouveau_fence(drm)->suspend(drm))
- return -ENOMEM;
+ if (!nouveau_fence(drm)->suspend(drm)) {
+ ret = -ENOMEM;
+ goto fail_display;
+ }
}
list_for_each_entry(cli, &drm->clients, head) {
@@ -537,6 +542,10 @@ fail_client:
nouveau_client_init(&cli->base);
}
+ if (drm->fence && nouveau_fence(drm)->resume)
+ nouveau_fence(drm)->resume(drm);
+
+fail_display:
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "resuming display...\n");
nouveau_display_resume(dev);
@@ -798,6 +807,8 @@ driver = {
.get_vblank_counter = drm_vblank_count,
.enable_vblank = nouveau_display_vblank_enable,
.disable_vblank = nouveau_display_vblank_disable,
+ .get_scanout_position = nouveau_display_scanoutpos,
+ .get_vblank_timestamp = nouveau_display_vblstamp,
.ioctls = nouveau_ioctls,
.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
@@ -855,13 +866,16 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (nouveau_runtime_pm == 0)
- return -EINVAL;
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
/* are we optimus enabled? */
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
- return -EINVAL;
+ pm_runtime_forbid(dev);
+ return -EBUSY;
}
nv_debug_level(SILENT);
@@ -912,12 +926,15 @@ static int nouveau_pmops_runtime_idle(struct device *dev)
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_crtc *crtc;
- if (nouveau_runtime_pm == 0)
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
return -EBUSY;
+ }
/* are we optimus enabled? */
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ pm_runtime_forbid(dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 4b0fb6c66be9..23ca7a517246 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -54,6 +54,7 @@ enum nouveau_drm_handle {
NVDRM_CLIENT = 0xffffffff,
NVDRM_DEVICE = 0xdddddddd,
NVDRM_CONTROL = 0xdddddddc,
+ NVDRM_DISPLAY = 0xd1500000,
NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
NVDRM_CHAN = 0xcccc0000, /* |= client chid */
NVDRM_NVSW = 0x55550000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 40cf52e6d6d2..90074d620e31 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
int ret;
fence->channel = chan;
- fence->timeout = jiffies + (15 * DRM_HZ);
+ fence->timeout = jiffies + (15 * HZ);
fence->sequence = ++fctx->sequence;
ret = fctx->emit(fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 78a27f8ad7d9..27c3fd89e8ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -463,12 +463,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- ret = validate_sync(chan, nvbo);
- if (unlikely(ret)) {
- NV_ERROR(cli, "fail pre-validate sync\n");
- return ret;
- }
-
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
@@ -506,7 +500,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
b->presumed.valid = 0;
relocs++;
- if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
+ if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
&b->presumed, sizeof(b->presumed)))
return -EFAULT;
}
@@ -593,7 +587,7 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
if (!mem)
return ERR_PTR(-ENOMEM);
- if (DRM_COPY_FROM_USER(mem, userptr, size)) {
+ if (copy_from_user(mem, userptr, size)) {
u_free(mem);
return ERR_PTR(-EFAULT);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 0843ebc910d4..a4d22e5eb176 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -31,16 +31,17 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node;
- u64 size = mem->num_pages << 12;
if (ttm->sg) {
- node->sg = ttm->sg;
- nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
+ node->sg = ttm->sg;
+ node->pages = NULL;
} else {
+ node->sg = NULL;
node->pages = nvbe->ttm.dma_address;
- nouveau_vm_map_sg(&node->vma[0], 0, size, node);
}
+ node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
+ nouveau_vm_map(&node->vma[0], node);
nvbe->node = node;
return 0;
}
@@ -67,9 +68,13 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
/* noop: bound in move_notify() */
if (ttm->sg) {
- node->sg = ttm->sg;
- } else
+ node->sg = ttm->sg;
+ node->pages = NULL;
+ } else {
+ node->sg = NULL;
node->pages = nvbe->ttm.dma_address;
+ }
+ node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 19e3757291fb..d45d50da978f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -171,6 +171,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
+
node->page_shift = 12;
switch (nv_device(drm->device)->card_type) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 81638d7f2eff..471347edc27e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -14,7 +14,9 @@ nouveau_vga_set_decode(void *priv, bool state)
{
struct nouveau_device *device = nouveau_dev(priv);
- if (device->chipset >= 0x40)
+ if (device->card_type == NV_40 && device->chipset >= 0x4c)
+ nv_wr32(device, 0x088060, state);
+ else if (device->chipset >= 0x40)
nv_wr32(device, 0x088054, state);
else
nv_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 4e384a2f99c3..2dccafc6e9db 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1035,6 +1035,7 @@ static bool
nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
return true;
}
@@ -2199,16 +2200,6 @@ nv50_display_destroy(struct drm_device *dev)
int
nv50_display_create(struct drm_device *dev)
{
- static const u16 oclass[] = {
- NVF0_DISP_CLASS,
- NVE0_DISP_CLASS,
- NVD0_DISP_CLASS,
- NVA3_DISP_CLASS,
- NV94_DISP_CLASS,
- NVA0_DISP_CLASS,
- NV84_DISP_CLASS,
- NV50_DISP_CLASS,
- };
struct nouveau_device *device = nouveau_dev(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcb = &drm->vbios.dcb;
@@ -2225,6 +2216,7 @@ nv50_display_create(struct drm_device *dev)
nouveau_display(dev)->dtor = nv50_display_destroy;
nouveau_display(dev)->init = nv50_display_init;
nouveau_display(dev)->fini = nv50_display_fini;
+ disp->core = nouveau_display(dev)->core;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2243,17 +2235,6 @@ nv50_display_create(struct drm_device *dev)
if (ret)
goto out;
- /* attempt to allocate a supported evo display class */
- ret = -ENODEV;
- for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
- ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
- 0xd1500000, oclass[i], NULL, 0,
- &disp->core);
- }
-
- if (ret)
- goto out;
-
/* allocate master evo channel */
ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
&(struct nv50_display_mast_class) {
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 0fd2eb139f6e..4313bb0a49a6 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -411,7 +411,7 @@ static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
struct drm_crtc *crtc = &omap_crtc->base;
DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
/* avoid getting in a flood, unregister the irq until next vblank */
- omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+ __omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
}
static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
@@ -421,13 +421,13 @@ static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
struct drm_crtc *crtc = &omap_crtc->base;
if (!omap_crtc->error_irq.registered)
- omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+ __omap_irq_register(crtc->dev, &omap_crtc->error_irq);
if (!dispc_mgr_go_busy(omap_crtc->channel)) {
struct omap_drm_private *priv =
crtc->dev->dev_private;
DBG("%s: apply done", omap_crtc->name);
- omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+ __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
queue_work(priv->wq, &omap_crtc->apply_work);
}
}
@@ -623,6 +623,11 @@ void omap_crtc_pre_init(void)
dss_install_mgr_ops(&mgr_ops);
}
+void omap_crtc_pre_uninit(void)
+{
+ dss_uninstall_mgr_ops();
+}
+
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_plane *plane, enum omap_channel channel, int id)
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index c27f59da7f29..d4c04d69fc4d 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -48,7 +48,7 @@ static int mm_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- return drm_mm_dump_table(m, dev->mm_private);
+ return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
}
static int fb_show(struct seq_file *m, void *arg)
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 701c4c10e08b..f926b4caf449 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -969,12 +969,21 @@ static const struct dev_pm_ops omap_dmm_pm_ops = {
};
#endif
+#if defined(CONFIG_OF)
+static const struct of_device_id dmm_of_match[] = {
+ { .compatible = "ti,omap4-dmm", },
+ { .compatible = "ti,omap5-dmm", },
+ {},
+};
+#endif
+
struct platform_driver omap_dmm_driver = {
.probe = omap_dmm_probe,
.remove = omap_dmm_remove,
.driver = {
.owner = THIS_MODULE,
.name = DMM_DRIVER_NAME,
+ .of_match_table = of_match_ptr(dmm_of_match),
#ifdef CONFIG_PM
.pm = &omap_dmm_pm_ops,
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index e7fa3cd96743..bf39fcc49e0f 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -86,6 +86,47 @@ static bool channel_used(struct drm_device *dev, enum omap_channel channel)
return false;
}
+static void omap_disconnect_dssdevs(void)
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ for_each_dss_dev(dssdev)
+ dssdev->driver->disconnect(dssdev);
+}
+
+static int omap_connect_dssdevs(void)
+{
+ int r;
+ struct omap_dss_device *dssdev = NULL;
+ bool no_displays = true;
+
+ for_each_dss_dev(dssdev) {
+ r = dssdev->driver->connect(dssdev);
+ if (r == -EPROBE_DEFER) {
+ omap_dss_put_device(dssdev);
+ goto cleanup;
+ } else if (r) {
+ dev_warn(dssdev->dev, "could not connect display: %s\n",
+ dssdev->name);
+ } else {
+ no_displays = false;
+ }
+ }
+
+ if (no_displays)
+ return -EPROBE_DEFER;
+
+ return 0;
+
+cleanup:
+ /*
+ * if we are deferring probe, we disconnect the devices we previously
+ * connected
+ */
+ omap_disconnect_dssdevs();
+
+ return r;
+}
static int omap_modeset_init(struct drm_device *dev)
{
@@ -95,9 +136,6 @@ static int omap_modeset_init(struct drm_device *dev)
int num_mgrs = dss_feat_get_num_mgrs();
int num_crtcs;
int i, id = 0;
- int r;
-
- omap_crtc_pre_init();
drm_mode_config_init(dev);
@@ -119,26 +157,8 @@ static int omap_modeset_init(struct drm_device *dev)
enum omap_channel channel;
struct omap_overlay_manager *mgr;
- if (!dssdev->driver) {
- dev_warn(dev->dev, "%s has no driver.. skipping it\n",
- dssdev->name);
- continue;
- }
-
- if (!(dssdev->driver->get_timings ||
- dssdev->driver->read_edid)) {
- dev_warn(dev->dev, "%s driver does not support "
- "get_timings or read_edid.. skipping it!\n",
- dssdev->name);
- continue;
- }
-
- r = dssdev->driver->connect(dssdev);
- if (r) {
- dev_err(dev->dev, "could not connect display: %s\n",
- dssdev->name);
+ if (!omapdss_device_is_connected(dssdev))
continue;
- }
encoder = omap_encoder_init(dev, dssdev);
@@ -497,16 +517,16 @@ static int dev_unload(struct drm_device *dev)
DBG("unload: dev=%p", dev);
drm_kms_helper_poll_fini(dev);
- drm_vblank_cleanup(dev);
- omap_drm_irq_uninstall(dev);
omap_fbdev_free(dev);
omap_modeset_free(dev);
omap_gem_deinit(dev);
- flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
+ drm_vblank_cleanup(dev);
+ omap_drm_irq_uninstall(dev);
+
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -655,9 +675,19 @@ static void pdev_shutdown(struct platform_device *device)
static int pdev_probe(struct platform_device *device)
{
+ int r;
+
if (omapdss_is_initialized() == false)
return -EPROBE_DEFER;
+ omap_crtc_pre_init();
+
+ r = omap_connect_dssdevs();
+ if (r) {
+ omap_crtc_pre_uninit();
+ return r;
+ }
+
DBG("%s", device->name);
return drm_platform_init(&omap_drm_driver, device);
}
@@ -665,9 +695,11 @@ static int pdev_probe(struct platform_device *device)
static int pdev_remove(struct platform_device *device)
{
DBG("");
- drm_platform_exit(&omap_drm_driver, device);
- platform_driver_unregister(&omap_dmm_driver);
+ omap_disconnect_dssdevs();
+ omap_crtc_pre_uninit();
+
+ drm_put_dev(platform_get_drvdata(device));
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 07847693cf49..428b2981fd68 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -141,10 +141,12 @@ int omap_gem_resume(struct device *dev);
int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
-irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t omap_irq_handler(int irq, void *arg);
void omap_irq_preinstall(struct drm_device *dev);
int omap_irq_postinstall(struct drm_device *dev);
void omap_irq_uninstall(struct drm_device *dev);
+void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
+void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
int omap_drm_irq_uninstall(struct drm_device *dev);
@@ -158,6 +160,7 @@ enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
int omap_crtc_apply(struct drm_crtc *crtc,
struct omap_drm_apply *apply);
void omap_crtc_pre_init(void);
+void omap_crtc_pre_uninit(void);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_plane *plane, enum omap_channel channel, int id);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 6a12e899235b..5290a88c681d 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -51,6 +51,9 @@ struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
static void omap_encoder_destroy(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+
+ omap_encoder_set_enabled(encoder, false);
+
drm_encoder_cleanup(encoder);
kfree(omap_encoder);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index f2b8f0668c0c..f466c4aaee94 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -123,12 +123,16 @@ static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
{
int i;
+ drm_modeset_lock_all(fb->dev);
+
for (i = 0; i < num_clips; i++) {
omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
clips[i].x2 - clips[i].x1,
clips[i].y2 - clips[i].y1);
}
+ drm_modeset_unlock_all(fb->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index cb858600185f..f035d2bceae7 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -45,12 +45,11 @@ static void omap_irq_update(struct drm_device *dev)
dispc_read_irqenable(); /* flush posted write */
}
-void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
{
struct omap_drm_private *priv = dev->dev_private;
unsigned long flags;
- dispc_runtime_get();
spin_lock_irqsave(&list_lock, flags);
if (!WARN_ON(irq->registered)) {
@@ -60,14 +59,21 @@ void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
}
spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+ dispc_runtime_get();
+
+ __omap_irq_register(dev, irq);
+
dispc_runtime_put();
}
-void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
{
unsigned long flags;
- dispc_runtime_get();
spin_lock_irqsave(&list_lock, flags);
if (!WARN_ON(!irq->registered)) {
@@ -77,6 +83,14 @@ void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
}
spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+ dispc_runtime_get();
+
+ __omap_irq_unregister(dev, irq);
+
dispc_runtime_put();
}
@@ -173,7 +187,7 @@ void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id)
dispc_runtime_put();
}
-irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t omap_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct omap_drm_private *priv = dev->dev_private;
@@ -308,7 +322,7 @@ int omap_drm_irq_uninstall(struct drm_device *dev)
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vblank[i].queue);
+ wake_up(&dev->vblank[i].queue);
dev->vblank[i].enabled = false;
dev->vblank[i].last =
dev->driver->get_vblank_counter(dev, i);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
new file mode 100644
index 000000000000..3e0f13d1bc84
--- /dev/null
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -0,0 +1,19 @@
+config DRM_PANEL
+ bool
+ depends on DRM
+ help
+ Panel registration and lookup framework.
+
+menu "Display Panels"
+ depends on DRM_PANEL
+
+config DRM_PANEL_SIMPLE
+ tristate "support for simple panels"
+ depends on OF
+ help
+ DRM panel driver for dumb panels that need at most a regulator and
+ a GPIO to be powered up. Optionally a backlight can be attached so
+ that it can be automatically turned off when the panel goes into a
+ low power state.
+
+endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
new file mode 100644
index 000000000000..af9dfa235b94
--- /dev/null
+++ b/drivers/gpu/drm/panel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
new file mode 100644
index 000000000000..59d52ca2c67f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+struct panel_desc {
+ const struct drm_display_mode *modes;
+ unsigned int num_modes;
+
+ struct {
+ unsigned int width;
+ unsigned int height;
+ } size;
+};
+
+/* TODO: convert to gpiod_*() API once it's been merged */
+#define GPIO_ACTIVE_LOW (1 << 0)
+
+struct panel_simple {
+ struct drm_panel base;
+ bool enabled;
+
+ const struct panel_desc *desc;
+
+ struct backlight_device *backlight;
+ struct regulator *supply;
+ struct i2c_adapter *ddc;
+
+ unsigned long enable_gpio_flags;
+ int enable_gpio;
+};
+
+static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_simple, base);
+}
+
+static int panel_simple_get_fixed_modes(struct panel_simple *panel)
+{
+ struct drm_connector *connector = panel->base.connector;
+ struct drm_device *drm = panel->base.drm;
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
+ if (!panel->desc)
+ return 0;
+
+ for (i = 0; i < panel->desc->num_modes; i++) {
+ const struct drm_display_mode *m = &panel->desc->modes[i];
+
+ mode = drm_mode_duplicate(drm, m);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ continue;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ connector->display_info.width_mm = panel->desc->size.width;
+ connector->display_info.height_mm = panel->desc->size.height;
+
+ return num;
+}
+
+static int panel_simple_disable(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+
+ if (!p->enabled)
+ return 0;
+
+ if (p->backlight) {
+ p->backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(p->backlight);
+ }
+
+ if (gpio_is_valid(p->enable_gpio)) {
+ if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
+ gpio_set_value(p->enable_gpio, 1);
+ else
+ gpio_set_value(p->enable_gpio, 0);
+ }
+
+ regulator_disable(p->supply);
+ p->enabled = false;
+
+ return 0;
+}
+
+static int panel_simple_enable(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+ int err;
+
+ if (p->enabled)
+ return 0;
+
+ err = regulator_enable(p->supply);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to enable supply: %d\n", err);
+ return err;
+ }
+
+ if (gpio_is_valid(p->enable_gpio)) {
+ if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
+ gpio_set_value(p->enable_gpio, 0);
+ else
+ gpio_set_value(p->enable_gpio, 1);
+ }
+
+ if (p->backlight) {
+ p->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(p->backlight);
+ }
+
+ p->enabled = true;
+
+ return 0;
+}
+
+static int panel_simple_get_modes(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+ int num = 0;
+
+ /* probe EDID if a DDC bus is available */
+ if (p->ddc) {
+ struct edid *edid = drm_get_edid(panel->connector, p->ddc);
+ drm_mode_connector_update_edid_property(panel->connector, edid);
+ if (edid) {
+ num += drm_add_edid_modes(panel->connector, edid);
+ kfree(edid);
+ }
+ }
+
+ /* add hard-coded panel modes */
+ num += panel_simple_get_fixed_modes(p);
+
+ return num;
+}
+
+static const struct drm_panel_funcs panel_simple_funcs = {
+ .disable = panel_simple_disable,
+ .enable = panel_simple_enable,
+ .get_modes = panel_simple_get_modes,
+};
+
+static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
+{
+ struct device_node *backlight, *ddc;
+ struct panel_simple *panel;
+ enum of_gpio_flags flags;
+ int err;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ panel->enabled = false;
+ panel->desc = desc;
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply))
+ return PTR_ERR(panel->supply);
+
+ panel->enable_gpio = of_get_named_gpio_flags(dev->of_node,
+ "enable-gpios", 0,
+ &flags);
+ if (gpio_is_valid(panel->enable_gpio)) {
+ unsigned int value;
+
+ if (flags & OF_GPIO_ACTIVE_LOW)
+ panel->enable_gpio_flags |= GPIO_ACTIVE_LOW;
+
+ err = gpio_request(panel->enable_gpio, "enable");
+ if (err < 0) {
+ dev_err(dev, "failed to request GPIO#%u: %d\n",
+ panel->enable_gpio, err);
+ return err;
+ }
+
+ value = (panel->enable_gpio_flags & GPIO_ACTIVE_LOW) != 0;
+
+ err = gpio_direction_output(panel->enable_gpio, value);
+ if (err < 0) {
+ dev_err(dev, "failed to setup GPIO%u: %d\n",
+ panel->enable_gpio, err);
+ goto free_gpio;
+ }
+ }
+
+ backlight = of_parse_phandle(dev->of_node, "backlight", 0);
+ if (backlight) {
+ panel->backlight = of_find_backlight_by_node(backlight);
+ of_node_put(backlight);
+
+ if (!panel->backlight) {
+ err = -EPROBE_DEFER;
+ goto free_gpio;
+ }
+ }
+
+ ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
+ if (ddc) {
+ panel->ddc = of_find_i2c_adapter_by_node(ddc);
+ of_node_put(ddc);
+
+ if (!panel->ddc) {
+ err = -EPROBE_DEFER;
+ goto free_backlight;
+ }
+ }
+
+ drm_panel_init(&panel->base);
+ panel->base.dev = dev;
+ panel->base.funcs = &panel_simple_funcs;
+
+ err = drm_panel_add(&panel->base);
+ if (err < 0)
+ goto free_ddc;
+
+ dev_set_drvdata(dev, panel);
+
+ return 0;
+
+free_ddc:
+ if (panel->ddc)
+ put_device(&panel->ddc->dev);
+free_backlight:
+ if (panel->backlight)
+ put_device(&panel->backlight->dev);
+free_gpio:
+ if (gpio_is_valid(panel->enable_gpio))
+ gpio_free(panel->enable_gpio);
+
+ return err;
+}
+
+static int panel_simple_remove(struct device *dev)
+{
+ struct panel_simple *panel = dev_get_drvdata(dev);
+
+ drm_panel_detach(&panel->base);
+ drm_panel_remove(&panel->base);
+
+ panel_simple_disable(&panel->base);
+
+ if (panel->ddc)
+ put_device(&panel->ddc->dev);
+
+ if (panel->backlight)
+ put_device(&panel->backlight->dev);
+
+ if (gpio_is_valid(panel->enable_gpio))
+ gpio_free(panel->enable_gpio);
+
+ regulator_disable(panel->supply);
+
+ return 0;
+}
+
+static const struct drm_display_mode auo_b101aw03_mode = {
+ .clock = 51450,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 156,
+ .hsync_end = 1024 + 156 + 8,
+ .htotal = 1024 + 156 + 8 + 156,
+ .vdisplay = 600,
+ .vsync_start = 600 + 16,
+ .vsync_end = 600 + 16 + 6,
+ .vtotal = 600 + 16 + 6 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b101aw03 = {
+ .modes = &auo_b101aw03_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 223,
+ .height = 125,
+ },
+};
+
+static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
+ .clock = 72070,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 58,
+ .hsync_end = 1366 + 58 + 58,
+ .htotal = 1366 + 58 + 58 + 58,
+ .vdisplay = 768,
+ .vsync_start = 768 + 4,
+ .vsync_end = 768 + 4 + 4,
+ .vtotal = 768 + 4 + 4 + 4,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc chunghwa_claa101wa01a = {
+ .modes = &chunghwa_claa101wa01a_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 220,
+ .height = 120,
+ },
+};
+
+static const struct drm_display_mode chunghwa_claa101wb01_mode = {
+ .clock = 69300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 48,
+ .hsync_end = 1366 + 48 + 32,
+ .htotal = 1366 + 48 + 32 + 20,
+ .vdisplay = 768,
+ .vsync_start = 768 + 16,
+ .vsync_end = 768 + 16 + 8,
+ .vtotal = 768 + 16 + 8 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc chunghwa_claa101wb01 = {
+ .modes = &chunghwa_claa101wb01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 223,
+ .height = 125,
+ },
+};
+
+static const struct drm_display_mode samsung_ltn101nt05_mode = {
+ .clock = 54030,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 24,
+ .hsync_end = 1024 + 24 + 136,
+ .htotal = 1024 + 24 + 136 + 160,
+ .vdisplay = 600,
+ .vsync_start = 600 + 3,
+ .vsync_end = 600 + 3 + 6,
+ .vtotal = 600 + 3 + 6 + 61,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc samsung_ltn101nt05 = {
+ .modes = &samsung_ltn101nt05_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 1024,
+ .height = 600,
+ },
+};
+
+static const struct of_device_id platform_of_match[] = {
+ {
+ .compatible = "auo,b101aw03",
+ .data = &auo_b101aw03,
+ }, {
+ .compatible = "chunghwa,claa101wa01a",
+ .data = &chunghwa_claa101wa01a
+ }, {
+ .compatible = "chunghwa,claa101wb01",
+ .data = &chunghwa_claa101wb01
+ }, {
+ .compatible = "samsung,ltn101nt05",
+ .data = &samsung_ltn101nt05,
+ }, {
+ .compatible = "simple-panel",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, platform_of_match);
+
+static int panel_simple_platform_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+
+ id = of_match_node(platform_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ return panel_simple_probe(&pdev->dev, id->data);
+}
+
+static int panel_simple_platform_remove(struct platform_device *pdev)
+{
+ return panel_simple_remove(&pdev->dev);
+}
+
+static struct platform_driver panel_simple_platform_driver = {
+ .driver = {
+ .name = "panel-simple",
+ .owner = THIS_MODULE,
+ .of_match_table = platform_of_match,
+ },
+ .probe = panel_simple_platform_probe,
+ .remove = panel_simple_platform_remove,
+};
+
+struct panel_desc_dsi {
+ struct panel_desc desc;
+
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+};
+
+static const struct drm_display_mode panasonic_vvx10f004b00_mode = {
+ .clock = 157200,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 154,
+ .hsync_end = 1920 + 154 + 16,
+ .htotal = 1920 + 154 + 16 + 32,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 17,
+ .vsync_end = 1200 + 17 + 2,
+ .vtotal = 1200 + 17 + 2 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
+ .desc = {
+ .modes = &panasonic_vvx10f004b00_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ },
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
+static const struct of_device_id dsi_of_match[] = {
+ {
+ .compatible = "panasonic,vvx10f004b00",
+ .data = &panasonic_vvx10f004b00
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, dsi_of_match);
+
+static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ const struct panel_desc_dsi *desc;
+ const struct of_device_id *id;
+ int err;
+
+ id = of_match_node(dsi_of_match, dsi->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ desc = id->data;
+
+ err = panel_simple_probe(&dsi->dev, &desc->desc);
+ if (err < 0)
+ return err;
+
+ dsi->format = desc->format;
+ dsi->lanes = desc->lanes;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ int err;
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+ return panel_simple_remove(&dsi->dev);
+}
+
+static struct mipi_dsi_driver panel_simple_dsi_driver = {
+ .driver = {
+ .name = "panel-simple-dsi",
+ .owner = THIS_MODULE,
+ .of_match_table = dsi_of_match,
+ },
+ .probe = panel_simple_dsi_probe,
+ .remove = panel_simple_dsi_remove,
+};
+
+static int __init panel_simple_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&panel_simple_platform_driver);
+ if (err < 0)
+ return err;
+
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
+ err = mipi_dsi_driver_register(&panel_simple_dsi_driver);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+module_init(panel_simple_init);
+
+static void __exit panel_simple_exit(void)
+{
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_unregister(&panel_simple_dsi_driver);
+
+ platform_driver_unregister(&panel_simple_platform_driver);
+}
+module_exit(panel_simple_exit);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM Driver for Simple Panels");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 66ac0ff95f5a..38c2bb72e456 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -5,9 +5,11 @@ config DRM_QXL
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_DEFERRED_IO
- select DRM_KMS_HELPER
+ select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
- select DRM_TTM
+ select DRM_TTM
select CRC32
help
- QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
+ QXL virtual GPU for Spice virtualization desktop integration.
+ Do not enable this driver unless your distro ships a corresponding
+ X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index d70aafb83307..798bde2e5881 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -399,10 +399,14 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct qxl_bo *qobj;
int inc = 1;
+ drm_modeset_lock_all(fb->dev);
+
qobj = gem_to_qxl_bo(qxl_fb->obj);
/* if we aren't primary surface ignore this */
- if (!qobj->is_primary)
+ if (!qobj->is_primary) {
+ drm_modeset_unlock_all(fb->dev);
return 0;
+ }
if (!num_clips) {
num_clips = 1;
@@ -417,6 +421,9 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
clips, num_clips, inc);
+
+ drm_modeset_unlock_all(fb->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 7bda32f68d3b..36ed40ba773f 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -534,7 +534,7 @@ void qxl_debugfs_takedown(struct drm_minor *minor);
/* qxl_irq.c */
int qxl_irq_init(struct qxl_device *qdev);
-irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t qxl_irq_handler(int irq, void *arg);
/* qxl_fb.c */
int qxl_fb_init(struct qxl_device *qdev);
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 7b95c75e9626..0bb86e6d41b4 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -200,7 +200,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
for (i = 0; i < cmd->relocs_num; ++i) {
struct drm_qxl_reloc reloc;
- if (DRM_COPY_FROM_USER(&reloc,
+ if (copy_from_user(&reloc,
&((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
sizeof(reloc))) {
ret = -EFAULT;
@@ -297,7 +297,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_qxl_command *commands =
(struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
- if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+ if (copy_from_user(&user_cmd, &commands[cmd_num],
sizeof(user_cmd)))
return -EFAULT;
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 21393dc4700a..28f84b4fce32 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -25,7 +25,7 @@
#include "qxl_drv.h"
-irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t qxl_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index e5ca498be920..fd88eb4a3f79 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -115,7 +115,7 @@ static void qxl_gc_work(struct work_struct *work)
qxl_garbage_collect(qdev);
}
-int qxl_device_init(struct qxl_device *qdev,
+static int qxl_device_init(struct qxl_device *qdev,
struct drm_device *ddev,
struct pci_dev *pdev,
unsigned long flags)
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index c451257f08fb..59459fe4e8c5 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -892,10 +892,10 @@ static int r128_cce_get_buffers(struct drm_device *dev,
buf->file_priv = file_priv;
- if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+ if (copy_to_user(&d->request_indices[i], &buf->idx,
sizeof(buf->idx)))
return -EFAULT;
- if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+ if (copy_to_user(&d->request_sizes[i], &buf->total,
sizeof(buf->total)))
return -EFAULT;
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 56eb5e3f5439..5bf3f5ff805d 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -154,7 +154,7 @@ extern int r128_do_cleanup_cce(struct drm_device *dev);
extern int r128_enable_vblank(struct drm_device *dev, int crtc);
extern void r128_disable_vblank(struct drm_device *dev, int crtc);
extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
-extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t r128_driver_irq_handler(int irq, void *arg);
extern void r128_driver_irq_preinstall(struct drm_device *dev);
extern int r128_driver_irq_postinstall(struct drm_device *dev);
extern void r128_driver_irq_uninstall(struct drm_device *dev);
@@ -514,7 +514,7 @@ do { \
if (R128_VERBOSE) \
DRM_INFO("COMMIT_RING() tail=0x%06x\n", \
dev_priv->ring.tail); \
- DRM_MEMORYBARRIER(); \
+ mb(); \
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail); \
R128_READ(R128_PM4_BUFFER_DL_WPTR); \
} while (0)
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index a954c548201e..b0d0fd3e4376 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -33,6 +33,7 @@
#include <drm/drmP.h>
#include <drm/r128_drm.h>
+#include "r128_drv.h"
typedef struct drm_r128_init32 {
int func;
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index 2ea4f09d2691..c2ae496babb7 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -44,7 +44,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
return atomic_read(&dev_priv->vbl_received);
}
-irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t r128_driver_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 01dd9aef9f0e..e806dacd452f 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -895,31 +895,22 @@ static int r128_cce_dispatch_write_span(struct drm_device *dev,
if (count > 4096 || count <= 0)
return -EMSGSIZE;
- if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
+ if (copy_from_user(&x, depth->x, sizeof(x)))
return -EFAULT;
- if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
+ if (copy_from_user(&y, depth->y, sizeof(y)))
return -EFAULT;
buffer_size = depth->n * sizeof(u32);
- buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (buffer == NULL)
- return -ENOMEM;
- if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
- kfree(buffer);
- return -EFAULT;
- }
+ buffer = memdup_user(depth->buffer, buffer_size);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
mask_size = depth->n * sizeof(u8);
if (depth->mask) {
- mask = kmalloc(mask_size, GFP_KERNEL);
- if (mask == NULL) {
+ mask = memdup_user(depth->mask, mask_size);
+ if (IS_ERR(mask)) {
kfree(buffer);
- return -ENOMEM;
- }
- if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
- kfree(buffer);
- kfree(mask);
- return -EFAULT;
+ return PTR_ERR(mask);
}
for (i = 0; i < count; i++, x++) {
@@ -999,46 +990,33 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
kfree(x);
return -ENOMEM;
}
- if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+ if (copy_from_user(x, depth->x, xbuf_size)) {
kfree(x);
kfree(y);
return -EFAULT;
}
- if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
+ if (copy_from_user(y, depth->y, xbuf_size)) {
kfree(x);
kfree(y);
return -EFAULT;
}
buffer_size = depth->n * sizeof(u32);
- buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (buffer == NULL) {
- kfree(x);
- kfree(y);
- return -ENOMEM;
- }
- if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
+ buffer = memdup_user(depth->buffer, buffer_size);
+ if (IS_ERR(buffer)) {
kfree(x);
kfree(y);
- kfree(buffer);
- return -EFAULT;
+ return PTR_ERR(buffer);
}
if (depth->mask) {
mask_size = depth->n * sizeof(u8);
- mask = kmalloc(mask_size, GFP_KERNEL);
- if (mask == NULL) {
- kfree(x);
- kfree(y);
- kfree(buffer);
- return -ENOMEM;
- }
- if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
+ mask = memdup_user(depth->mask, mask_size);
+ if (IS_ERR(mask)) {
kfree(x);
kfree(y);
kfree(buffer);
- kfree(mask);
- return -EFAULT;
+ return PTR_ERR(mask);
}
for (i = 0; i < count; i++) {
@@ -1107,9 +1085,9 @@ static int r128_cce_dispatch_read_span(struct drm_device *dev,
if (count > 4096 || count <= 0)
return -EMSGSIZE;
- if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
+ if (copy_from_user(&x, depth->x, sizeof(x)))
return -EFAULT;
- if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
+ if (copy_from_user(&y, depth->y, sizeof(y)))
return -EFAULT;
BEGIN_RING(7);
@@ -1162,12 +1140,12 @@ static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
kfree(x);
return -ENOMEM;
}
- if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+ if (copy_from_user(x, depth->x, xbuf_size)) {
kfree(x);
kfree(y);
return -EFAULT;
}
- if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
+ if (copy_from_user(y, depth->y, ybuf_size)) {
kfree(x);
kfree(y);
return -EFAULT;
@@ -1524,7 +1502,7 @@ static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file
DEV_INIT_TEST_WITH_RETURN(dev_priv);
- if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+ if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
return -EFAULT;
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -1622,7 +1600,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
return -EINVAL;
}
- if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+ if (copy_to_user(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 0b9621c9aeea..daa4dd375ab1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -209,6 +209,16 @@ static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+static const u32 vga_control_regs[6] =
+{
+ AVIVO_D1VGA_CONTROL,
+ AVIVO_D2VGA_CONTROL,
+ EVERGREEN_D3VGA_CONTROL,
+ EVERGREEN_D4VGA_CONTROL,
+ EVERGREEN_D5VGA_CONTROL,
+ EVERGREEN_D6VGA_CONTROL,
+};
+
static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -216,13 +226,23 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
BLANK_CRTC_PS_ALLOCATION args;
+ u32 vga_control = 0;
memset(&args, 0, sizeof(args));
+ if (ASIC_IS_DCE8(rdev)) {
+ vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]);
+ WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1);
+ }
+
args.ucCRTC = radeon_crtc->crtc_id;
args.ucBlanking = state;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ if (ASIC_IS_DCE8(rdev)) {
+ WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
+ }
}
static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
@@ -423,7 +443,17 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
- if (!enable) {
+ if (enable) {
+ /* Don't mess with SS if percentage is 0 or external ss.
+ * SS is already disabled previously, and disabling it
+ * again can cause display problems if the pll is already
+ * programmed.
+ */
+ if (ss->percentage == 0)
+ return;
+ if (ss->type & ATOM_EXTERNAL_SS_MASK)
+ return;
+ } else {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
rdev->mode_info.crtcs[i]->enabled &&
@@ -459,8 +489,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable;
- if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
- args.v3.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
@@ -480,8 +508,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v2.ucEnable = enable;
- if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
- args.v2.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE3(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
@@ -503,8 +529,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
args.lvds_ss_2.ucEnable = enable;
} else {
- if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
- (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+ if (enable == ATOM_DISABLE) {
atombios_disable_ss(rdev, pll_id);
return;
}
@@ -534,7 +559,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
u32 adjusted_clock = mode->clock;
int encoder_mode = atombios_get_encoder_mode(encoder);
u32 dp_clock = mode->clock;
- int bpc = radeon_get_monitor_bpc(connector);
+ int bpc = radeon_crtc->bpc;
bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
/* reset the pll flags */
@@ -938,11 +963,14 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ATOM_DP_SS_ID1);
- } else
+ } else {
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ATOM_DP_SS_ID1);
+ }
+ /* disable spread spectrum on DCE3 DP */
+ radeon_crtc->ss_enabled = false;
}
break;
case ATOM_ENCODER_MODE_LVDS:
@@ -1039,15 +1067,17 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
/* calculate ss amount and step size */
if (ASIC_IS_DCE4(rdev)) {
u32 step_size;
- u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
+ u32 amount = (((fb_div * 10) + frac_fb_div) *
+ (u32)radeon_crtc->ss.percentage) /
+ (100 * (u32)radeon_crtc->ss.percentage_divider);
radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
- step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+ step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
else
- step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+ step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
radeon_crtc->ss.step = step_size;
}
@@ -1146,7 +1176,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
/* Set NUM_BANKS. */
- if (rdev->family >= CHIP_BONAIRE) {
+ if (rdev->family >= CHIP_TAHITI) {
unsigned tileb, index, num_banks, tile_split_bytes;
/* Calculate the macrotile mode index. */
@@ -1164,13 +1194,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ if (rdev->family >= CHIP_BONAIRE)
+ num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ else
+ num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
} else {
- /* SI and older. */
- if (rdev->family >= CHIP_TAHITI)
- tmp = rdev->config.si.tile_config;
- else if (rdev->family >= CHIP_CAYMAN)
+ /* NI and older. */
+ if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
@@ -1743,6 +1774,20 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
return ATOM_PPLL1;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
+ } else if (ASIC_IS_DCE41(rdev)) {
+ /* Don't share PLLs on DCE4.1 chips */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+ if (rdev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ }
+ pll_in_use = radeon_get_pll_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
} else if (ASIC_IS_DCE4(rdev)) {
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
* depending on the asic:
@@ -1770,7 +1815,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
if (pll != ATOM_PPLL_INVALID)
return pll;
}
- } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
+ } else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index fb3ae07a1469..4ad7643fce5f 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -157,21 +157,22 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
msg[0] = address;
msg[1] = address >> 8;
- msg[2] = AUX_NATIVE_WRITE << 4;
+ msg[2] = DP_AUX_NATIVE_WRITE << 4;
msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes);
- for (retry = 0; retry < 4; retry++) {
+ for (retry = 0; retry < 7; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ ack >>= 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
return send_bytes;
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
- udelay(400);
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+ usleep_range(400, 500);
else
return -EIO;
}
@@ -191,20 +192,21 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
msg[0] = address;
msg[1] = address >> 8;
- msg[2] = AUX_NATIVE_READ << 4;
+ msg[2] = DP_AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
- for (retry = 0; retry < 4; retry++) {
+ for (retry = 0; retry < 7; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
- if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ ack >>= 4;
+ if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
return ret;
- else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
- udelay(400);
+ else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+ usleep_range(400, 500);
else if (ret == 0)
return -EPROTO;
else
@@ -246,12 +248,12 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
/* Set up the command byte */
if (mode & MODE_I2C_READ)
- msg[2] = AUX_I2C_READ << 4;
+ msg[2] = DP_AUX_I2C_READ << 4;
else
- msg[2] = AUX_I2C_WRITE << 4;
+ msg[2] = DP_AUX_I2C_WRITE << 4;
if (!(mode & MODE_I2C_STOP))
- msg[2] |= AUX_I2C_MOT << 4;
+ msg[2] |= DP_AUX_I2C_MOT << 4;
msg[0] = address;
msg[1] = address >> 8;
@@ -272,7 +274,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
break;
}
- for (retry = 0; retry < 4; retry++) {
+ for (retry = 0; retry < 7; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
if (ret == -EBUSY)
@@ -282,35 +284,35 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return ret;
}
- switch (ack & AUX_NATIVE_REPLY_MASK) {
- case AUX_NATIVE_REPLY_ACK:
+ switch ((ack >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+ case DP_AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
- case AUX_NATIVE_REPLY_NACK:
+ case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
- case AUX_NATIVE_REPLY_DEFER:
+ case DP_AUX_NATIVE_REPLY_DEFER:
DRM_DEBUG_KMS("aux_ch native defer\n");
- udelay(400);
+ usleep_range(500, 600);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
return -EREMOTEIO;
}
- switch (ack & AUX_I2C_REPLY_MASK) {
- case AUX_I2C_REPLY_ACK:
+ switch ((ack >> 4) & DP_AUX_I2C_REPLY_MASK) {
+ case DP_AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ)
*read_byte = reply[0];
return ret;
- case AUX_I2C_REPLY_NACK:
+ case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
- case AUX_I2C_REPLY_DEFER:
+ case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
- udelay(400);
+ usleep_range(400, 500);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
@@ -671,9 +673,11 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
u8 tmp;
/* power up the sink */
- if (dp_info->dpcd[0] >= 0x11)
+ if (dp_info->dpcd[0] >= 0x11) {
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_SET_POWER, DP_SET_POWER_D0);
+ usleep_range(1000, 2000);
+ }
/* possibly enable downspread on the sink */
if (dp_info->dpcd[3] & 0x1)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index a42d61571f49..607dc14d195e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -464,11 +464,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
{
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int bpc = 8;
- if (connector)
- bpc = radeon_get_monitor_bpc(connector);
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ bpc = radeon_crtc->bpc;
+ }
switch (bpc) {
case 0:
@@ -1313,7 +1314,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
if (is_dp)
args.v5.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v5.ucLaneNum = 8;
else
args.v5.ucLaneNum = 4;
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index f685035dbe39..b5162c3b6111 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -27,8 +27,6 @@
#include "radeon.h"
#include "atom.h"
-extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-
#define TARGET_HW_I2C_CLOCK 50
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 9b6950d9b3c0..ea103ccdf4bd 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -29,6 +29,7 @@
#include "cypress_dpm.h"
#include "btc_dpm.h"
#include "atom.h"
+#include <linux/seq_file.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -49,6 +50,7 @@ struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
//********* BARTS **************//
static const u32 barts_cgcg_cgls_default[] =
@@ -2510,21 +2512,6 @@ int btc_dpm_enable(struct radeon_device *rdev)
if (eg_pi->ls_clock_gating)
btc_ls_clock_gating_enable(rdev, true);
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
- PPSMC_Result result;
-
- ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- }
-
rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
btc_init_stutter_mode(rdev);
@@ -2576,7 +2563,11 @@ void btc_dpm_disable(struct radeon_device *rdev)
void btc_dpm_setup_asic(struct radeon_device *rdev)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ int r;
+ r = ni_mc_load_microcode(rdev);
+ if (r)
+ DRM_ERROR("Failed to load MC firmware!\n");
rv770_get_memory_type(rdev);
rv740_read_clock_registers(rdev);
btc_read_arb_registers(rdev);
@@ -2766,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev)
r600_free_extended_power_table(rdev);
}
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ if (rdev->family >= CHIP_CEDAR) {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+ } else {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc);
+ }
+ }
+}
+
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
index 29e32de7e025..9c65be2d55a9 100644
--- a/drivers/gpu/drm/radeon/btcd.h
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -44,6 +44,10 @@
# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
# define AC_DC_SW (1 << 24)
+#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
+# define CURRENT_PROFILE_INDEX_MASK (0xf << 4)
+# define CURRENT_PROFILE_INDEX_SHIFT 4
+
#define CG_BIF_REQ_AND_RSP 0x7f4
#define CG_CLIENT_REQ(x) ((x) << 0)
#define CG_CLIENT_REQ_MASK (0xff << 0)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 1ed479976358..8d49104ca6c2 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -171,8 +171,7 @@ extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
struct atom_voltage_table *voltage_table);
extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
-extern void cik_update_cg(struct radeon_device *rdev,
- u32 block, bool enable);
+extern int ci_mc_load_microcode(struct radeon_device *rdev);
static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
struct atom_voltage_table_entry *voltage_table,
@@ -4503,8 +4502,8 @@ static void ci_get_memory_type(struct radeon_device *rdev)
}
-void ci_update_current_ps(struct radeon_device *rdev,
- struct radeon_ps *rps)
+static void ci_update_current_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
{
struct ci_ps *new_ps = ci_get_ps(rps);
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4514,8 +4513,8 @@ void ci_update_current_ps(struct radeon_device *rdev,
pi->current_rps.ps_priv = &pi->current_ps;
}
-void ci_update_requested_ps(struct radeon_device *rdev,
- struct radeon_ps *rps)
+static void ci_update_requested_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
{
struct ci_ps *new_ps = ci_get_ps(rps);
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4549,6 +4548,11 @@ void ci_dpm_post_set_power_state(struct radeon_device *rdev)
void ci_dpm_setup_asic(struct radeon_device *rdev)
{
+ int r;
+
+ r = ci_mc_load_microcode(rdev);
+ if (r)
+ DRM_ERROR("Failed to load MC firmware!\n");
ci_read_clock_registers(rdev);
ci_get_memory_type(rdev);
ci_enable_acpi_power_management(rdev);
@@ -4561,13 +4565,6 @@ int ci_dpm_enable(struct radeon_device *rdev)
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
if (ci_is_smc_running(rdev))
return -EINVAL;
if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
@@ -4665,6 +4662,18 @@ int ci_dpm_enable(struct radeon_device *rdev)
DRM_ERROR("ci_enable_power_containment failed\n");
return ret;
}
+
+ ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+ ci_update_current_ps(rdev, boot_ps);
+
+ return 0;
+}
+
+int ci_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
#if 0
@@ -4685,19 +4694,8 @@ int ci_dpm_enable(struct radeon_device *rdev)
#endif
}
- ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
ci_dpm_powergate_uvd(rdev, true);
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), true);
-
- ci_update_current_ps(rdev, boot_ps);
-
return 0;
}
@@ -4706,12 +4704,6 @@ void ci_dpm_disable(struct radeon_device *rdev)
struct ci_power_info *pi = ci_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
ci_dpm_powergate_uvd(rdev, false);
if (!ci_is_smc_running(rdev))
@@ -4742,13 +4734,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
struct radeon_ps *old_ps = &pi->current_rps;
int ret;
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
if (pi->pcie_performance_request)
ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
@@ -4804,13 +4789,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
if (pi->pcie_performance_request)
ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), true);
-
return 0;
}
@@ -5023,8 +5001,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
return 0;
}
-int ci_get_vbios_boot_values(struct radeon_device *rdev,
- struct ci_vbios_boot_state *boot_state)
+static int ci_get_vbios_boot_values(struct radeon_device *rdev,
+ struct ci_vbios_boot_state *boot_state)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 9c745dd22438..8debc9d47362 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -28,6 +28,7 @@
#include "cikd.h"
#include "ppsmc.h"
#include "radeon_ucode.h"
+#include "ci_dpm.h"
static int ci_set_smc_sram_address(struct radeon_device *rdev,
u32 smc_address, u32 limit)
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e950fabd7f5e..bbb17841a9e5 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1697,7 +1697,7 @@ static void cik_srbm_select(struct radeon_device *rdev,
* Load the GDDR MC ucode into the hw (CIK).
* Returns 0 on success, error on failure.
*/
-static int ci_mc_load_microcode(struct radeon_device *rdev)
+int ci_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
u32 running, blackout = 0;
@@ -3046,7 +3046,7 @@ static u32 cik_create_bitmask(u32 bit_width)
}
/**
- * cik_select_se_sh - select which SE, SH to address
+ * cik_get_rb_disabled - computes the mask of disabled RBs
*
* @rdev: radeon_device pointer
* @max_rb_num: max RBs (render backends) for the asic
@@ -3487,6 +3487,51 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
}
/**
+ * cik_hdp_flush_cp_ring_emit - emit an hdp flush on the cp
+ *
+ * @rdev: radeon_device pointer
+ * @ridx: radeon ring index
+ *
+ * Emits an hdp flush on the cp.
+ */
+static void cik_hdp_flush_cp_ring_emit(struct radeon_device *rdev,
+ int ridx)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ u32 ref_and_mask;
+
+ switch (ring->idx) {
+ case CAYMAN_RING_TYPE_CP1_INDEX:
+ case CAYMAN_RING_TYPE_CP2_INDEX:
+ default:
+ switch (ring->me) {
+ case 0:
+ ref_and_mask = CP2 << ring->pipe;
+ break;
+ case 1:
+ ref_and_mask = CP6 << ring->pipe;
+ break;
+ default:
+ return;
+ }
+ break;
+ case RADEON_RING_TYPE_GFX_INDEX:
+ ref_and_mask = CP0;
+ break;
+ }
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
+ WAIT_REG_MEM_FUNCTION(3) | /* == */
+ WAIT_REG_MEM_ENGINE(1))); /* pfp */
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
+ radeon_ring_write(ring, ref_and_mask);
+ radeon_ring_write(ring, ref_and_mask);
+ radeon_ring_write(ring, 0x20); /* poll interval */
+}
+
+/**
* cik_fence_gfx_ring_emit - emit a fence on the gfx ring
*
* @rdev: radeon_device pointer
@@ -3512,15 +3557,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
/* HDP flush */
- /* We should be using the new WAIT_REG_MEM special op packet here
- * but it causes the CP to hang
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
/**
@@ -3550,15 +3587,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
/* HDP flush */
- /* We should be using the new WAIT_REG_MEM special op packet here
- * but it causes the CP to hang
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
@@ -3566,8 +3595,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
-/* TODO: figure out why semaphore cause lockups */
-#if 0
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
@@ -3576,9 +3603,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
return true;
-#else
- return false;
-#endif
}
/**
@@ -3816,6 +3840,8 @@ static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
if (enable)
WREG32(CP_ME_CNTL, 0);
else {
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
@@ -4014,18 +4040,50 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
return r;
}
+
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
return 0;
}
-u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
+u32 cik_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
{
u32 rptr;
+ if (rdev->wb.enabled)
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ else
+ rptr = RREG32(CP_RB0_RPTR);
+
+ return rptr;
+}
+
+u32 cik_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 wptr;
+
+ wptr = RREG32(CP_RB0_WPTR);
+
+ return wptr;
+}
+
+void cik_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(CP_RB0_WPTR, ring->wptr);
+ (void)RREG32(CP_RB0_WPTR);
+}
+u32 cik_compute_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr;
if (rdev->wb.enabled) {
- rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
} else {
mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
@@ -4037,13 +4095,14 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
return rptr;
}
-u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
+u32 cik_compute_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
{
u32 wptr;
if (rdev->wb.enabled) {
- wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
+ /* XXX check if swapping is necessary on BE */
+ wptr = rdev->wb.wb[ring->wptr_offs/4];
} else {
mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
@@ -4055,10 +4114,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
return wptr;
}
-void cik_compute_ring_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
+void cik_compute_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
{
- rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
+ /* XXX check if swapping is necessary on BE */
+ rdev->wb.wb[ring->wptr_offs/4] = ring->wptr;
WDOORBELL32(ring->doorbell_index, ring->wptr);
}
@@ -4074,8 +4134,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
WREG32(CP_MEC_CNTL, 0);
- else
+ else {
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ }
udelay(50);
}
@@ -4852,6 +4915,160 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
cik_print_gpu_status_regs(rdev);
}
+struct kv_reset_save_regs {
+ u32 gmcon_reng_execute;
+ u32 gmcon_misc;
+ u32 gmcon_misc3;
+};
+
+static void kv_save_regs_for_reset(struct radeon_device *rdev,
+ struct kv_reset_save_regs *save)
+{
+ save->gmcon_reng_execute = RREG32(GMCON_RENG_EXECUTE);
+ save->gmcon_misc = RREG32(GMCON_MISC);
+ save->gmcon_misc3 = RREG32(GMCON_MISC3);
+
+ WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute & ~RENG_EXECUTE_ON_PWR_UP);
+ WREG32(GMCON_MISC, save->gmcon_misc & ~(RENG_EXECUTE_ON_REG_UPDATE |
+ STCTRL_STUTTER_EN));
+}
+
+static void kv_restore_regs_for_reset(struct radeon_device *rdev,
+ struct kv_reset_save_regs *save)
+{
+ int i;
+
+ WREG32(GMCON_PGFSM_WRITE, 0);
+ WREG32(GMCON_PGFSM_CONFIG, 0x200010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0);
+ WREG32(GMCON_PGFSM_CONFIG, 0x300010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x210000);
+ WREG32(GMCON_PGFSM_CONFIG, 0xa00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x21003);
+ WREG32(GMCON_PGFSM_CONFIG, 0xb00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x2b00);
+ WREG32(GMCON_PGFSM_CONFIG, 0xc00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0);
+ WREG32(GMCON_PGFSM_CONFIG, 0xd00010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x420000);
+ WREG32(GMCON_PGFSM_CONFIG, 0x100010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x120202);
+ WREG32(GMCON_PGFSM_CONFIG, 0x500010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x3e3e36);
+ WREG32(GMCON_PGFSM_CONFIG, 0x600010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x373f3e);
+ WREG32(GMCON_PGFSM_CONFIG, 0x700010ff);
+
+ for (i = 0; i < 5; i++)
+ WREG32(GMCON_PGFSM_WRITE, 0);
+
+ WREG32(GMCON_PGFSM_WRITE, 0x3e1332);
+ WREG32(GMCON_PGFSM_CONFIG, 0xe00010ff);
+
+ WREG32(GMCON_MISC3, save->gmcon_misc3);
+ WREG32(GMCON_MISC, save->gmcon_misc);
+ WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute);
+}
+
+static void cik_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ struct kv_reset_save_regs kv_save = { 0 };
+ u32 tmp, i;
+
+ dev_info(rdev->dev, "GPU pci config reset\n");
+
+ /* disable dpm? */
+
+ /* disable cg/pg */
+ cik_fini_pg(rdev);
+ cik_fini_cg(rdev);
+
+ /* Disable GFX parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+ /* Disable MEC parsing/prefetching */
+ WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
+
+ /* sdma0 */
+ tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
+ tmp |= SDMA_HALT;
+ WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ /* sdma1 */
+ tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
+ tmp |= SDMA_HALT;
+ WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+ /* XXX other engines? */
+
+ /* halt the rlc, disable cp internal ints */
+ cik_rlc_stop(rdev);
+
+ udelay(50);
+
+ /* disable mem access */
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+ }
+
+ if (rdev->flags & RADEON_IS_IGP)
+ kv_save_regs_for_reset(rdev, &kv_save);
+
+ /* disable BM */
+ pci_clear_master(rdev->pdev);
+ /* reset */
+ radeon_pci_config_reset(rdev);
+
+ udelay(100);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+ break;
+ udelay(1);
+ }
+
+ /* does asic init need to be run first??? */
+ if (rdev->flags & RADEON_IS_IGP)
+ kv_restore_regs_for_reset(rdev, &kv_save);
+}
+
/**
* cik_asic_reset - soft reset GPU
*
@@ -4870,10 +5087,17 @@ int cik_asic_reset(struct radeon_device *rdev)
if (reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true);
+ /* try soft reset */
cik_gpu_soft_reset(rdev, reset_mask);
reset_mask = cik_gpu_check_soft_reset(rdev);
+ /* try pci config reset */
+ if (reset_mask && radeon_hard_reset)
+ cik_gpu_pci_config_reset(rdev);
+
+ reset_mask = cik_gpu_check_soft_reset(rdev);
+
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
@@ -5138,20 +5362,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
- /* TC cache setup ??? */
- WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
- WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
- WREG32(TC_CFG_L1_STORE_POLICY, 0);
-
- WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
- WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
- WREG32(TC_CFG_L2_STORE_POLICY0, 0);
- WREG32(TC_CFG_L2_STORE_POLICY1, 0);
- WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
-
- WREG32(TC_CFG_L1_VOLATILE, 0);
- WREG32(TC_CFG_L2_VOLATILE, 0);
-
if (rdev->family == CHIP_KAVERI) {
u32 tmp = RREG32(CHUB_CONTROL);
tmp &= ~BYPASS_VM;
@@ -5367,16 +5577,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, VMID(0));
/* HDP flush */
- /* We should be using the WAIT_REG_MEM packet here like in
- * cik_fence_ring_emit(), but it causes the CP to hang in this
- * context...
- */
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
+ cik_hdp_flush_cp_ring_emit(rdev, ridx);
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -7503,26 +7704,7 @@ static int cik_startup(struct radeon_device *rdev)
cik_mc_program(rdev);
- if (rdev->flags & RADEON_IS_IGP) {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
- !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
- r = cik_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
- } else {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
- !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
- !rdev->mc_fw) {
- r = cik_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
+ if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
r = ci_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
@@ -7627,7 +7809,6 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- CP_RB0_RPTR, CP_RB0_WPTR,
PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
@@ -7636,7 +7817,6 @@ static int cik_startup(struct radeon_device *rdev)
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
- CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
@@ -7648,7 +7828,6 @@ static int cik_startup(struct radeon_device *rdev)
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
- CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
@@ -7660,16 +7839,12 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
- SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
- SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
- SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
if (r)
return r;
@@ -7685,7 +7860,6 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
RADEON_CP_PACKET2);
if (!r)
r = uvd_v1_0_init(rdev);
@@ -7731,6 +7905,9 @@ int cik_resume(struct radeon_device *rdev)
/* init golden registers */
cik_init_golden_registers(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = cik_startup(rdev);
if (r) {
@@ -7754,6 +7931,7 @@ int cik_resume(struct radeon_device *rdev)
*/
int cik_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cik_cp_enable(rdev, false);
@@ -7835,6 +8013,30 @@ int cik_init(struct radeon_device *rdev)
if (r)
return r;
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+ !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
+ r = cik_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+ !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
+ !rdev->mc_fw) {
+ r = cik_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
@@ -7915,6 +8117,7 @@ int cik_init(struct radeon_device *rdev)
*/
void cik_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
cik_fini_pg(rdev);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index d08b83c6267b..94626ea90fa5 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -52,6 +52,75 @@ u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
*/
/**
+ * cik_sdma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (CIK+).
+ */
+uint32_t cik_sdma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr, reg;
+
+ if (rdev->wb.enabled) {
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ } else {
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET;
+ else
+ reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET;
+
+ rptr = RREG32(reg);
+ }
+
+ return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (CIK+).
+ */
+uint32_t cik_sdma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
+ else
+ reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
+
+ return (RREG32(reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (CIK+).
+ */
+void cik_sdma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
+ else
+ reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
+
+ WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
* cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
*
* @rdev: radeon_device pointer
@@ -88,6 +157,35 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
}
/**
+ * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @ridx: radeon ring index
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev,
+ int ridx)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+ u32 ref_and_mask;
+
+ if (ridx == R600_RING_TYPE_DMA_INDEX)
+ ref_and_mask = SDMA0;
+ else
+ ref_and_mask = SDMA1;
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+ radeon_ring_write(ring, ref_and_mask); /* reference */
+ radeon_ring_write(ring, ref_and_mask); /* mask */
+ radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
+}
+
+/**
* cik_sdma_fence_ring_emit - emit a fence on the DMA ring
*
* @rdev: radeon_device pointer
@@ -111,12 +209,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
/* generate an interrupt */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
/* flush HDP */
- /* We should be using the new POLL_REG_MEM special op packet here
- * but it causes sDMA to hang sometimes
- */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
+ cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
}
/**
@@ -157,7 +250,9 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
u32 rb_cntl, reg_offset;
int i;
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+ (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
for (i = 0; i < 2; i++) {
if (i == 0)
@@ -169,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
}
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
}
/**
@@ -196,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
u32 me_cntl, reg_offset;
int i;
+ if (enable == false) {
+ cik_sdma_gfx_stop(rdev);
+ cik_sdma_rlc_stop(rdev);
+ }
+
for (i = 0; i < 2; i++) {
if (i == 0)
reg_offset = SDMA0_REGISTER_OFFSET;
@@ -288,7 +390,9 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev)
}
}
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+ (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -323,10 +427,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
if (!rdev->sdma_fw)
return -EINVAL;
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
-
/* halt the MEs */
cik_sdma_enable(rdev, false);
@@ -395,9 +495,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
*/
void cik_sdma_fini(struct radeon_device *rdev)
{
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
/* halt the MEs */
cik_sdma_enable(rdev, false);
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
@@ -747,12 +844,7 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
radeon_ring_write(ring, VMID(0));
/* flush HDP */
- /* We should be using the new POLL_REG_MEM special op packet here
- * but it causes sDMA to hang sometimes
- */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
- radeon_ring_write(ring, 0);
+ cik_sdma_hdp_flush_ring_emit(rdev, ridx);
/* flush TLB */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 5964af5e5b2d..98bae9d7b74d 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -724,6 +724,17 @@
#define ATC_MISC_CG 0x3350
+#define GMCON_RENG_EXECUTE 0x3508
+#define RENG_EXECUTE_ON_PWR_UP (1 << 0)
+#define GMCON_MISC 0x350c
+#define RENG_EXECUTE_ON_REG_UPDATE (1 << 11)
+#define STCTRL_STUTTER_EN (1 << 16)
+
+#define GMCON_PGFSM_CONFIG 0x3538
+#define GMCON_PGFSM_WRITE 0x353c
+#define GMCON_PGFSM_READ 0x3540
+#define GMCON_MISC3 0x3544
+
#define MC_SEQ_CNTL_3 0x3600
# define CAC_EN (1 << 31)
#define MC_SEQ_G5PDX_CTRL 0x3604
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 920e1e4a52c5..cf783fc0ef21 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -1905,21 +1905,6 @@ int cypress_dpm_enable(struct radeon_device *rdev)
if (pi->mg_clock_gating)
cypress_mg_clock_gating_enable(rdev, true);
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
- PPSMC_Result result;
-
- ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- }
-
rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
return 0;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 713a5d359901..94e858751994 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -278,13 +278,15 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev)
return !ASIC_IS_NODCE(rdev);
}
-static void dce6_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
+ if (!pin)
+ return;
+
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
- AUDIO_ENABLED);
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
+ enable ? AUDIO_ENABLED : 0);
}
static const u32 pin_offsets[7] =
@@ -323,7 +325,8 @@ int dce6_audio_init(struct radeon_device *rdev)
rdev->audio.pin[i].connected = false;
rdev->audio.pin[i].offset = pin_offsets[i];
rdev->audio.pin[i].id = i;
- dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
+ /* disable audio. it will be set up later */
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 9702e55e924e..27b0ff16082e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -146,6 +146,7 @@ extern u32 si_get_csb_size(struct radeon_device *rdev);
extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
extern u32 cik_get_csb_size(struct radeon_device *rdev);
extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
+extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
static const u32 evergreen_golden_registers[] =
{
@@ -1679,7 +1680,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
case RADEON_HPD_6:
if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
- break;
+ break;
default:
break;
}
@@ -3867,6 +3868,48 @@ static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
evergreen_print_gpu_status_regs(rdev);
}
+void evergreen_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 tmp, i;
+
+ dev_info(rdev->dev, "GPU pci config reset\n");
+
+ /* disable dpm? */
+
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+ udelay(50);
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ /* XXX other engines? */
+
+ /* halt the rlc */
+ r600_rlc_stop(rdev);
+
+ udelay(50);
+
+ /* set mclk/sclk to bypass */
+ rv770_set_clk_bypass_mode(rdev);
+ /* disable BM */
+ pci_clear_master(rdev->pdev);
+ /* disable mem access */
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+ }
+ /* reset */
+ radeon_pci_config_reset(rdev);
+ /* wait for asic to come out of reset */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+ break;
+ udelay(1);
+ }
+}
+
int evergreen_asic_reset(struct radeon_device *rdev)
{
u32 reset_mask;
@@ -3876,10 +3919,17 @@ int evergreen_asic_reset(struct radeon_device *rdev)
if (reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true);
+ /* try soft reset */
evergreen_gpu_soft_reset(rdev, reset_mask);
reset_mask = evergreen_gpu_check_soft_reset(rdev);
+ /* try pci config reset */
+ if (reset_mask && radeon_hard_reset)
+ evergreen_gpu_pci_config_reset(rdev);
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
@@ -4298,8 +4348,8 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
- /* only one DAC on DCE6 */
- if (!ASIC_IS_DCE6(rdev))
+ /* only one DAC on DCE5 */
+ if (!ASIC_IS_DCE5(rdev))
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
@@ -5109,27 +5159,12 @@ static int evergreen_startup(struct radeon_device *rdev)
evergreen_mc_program(rdev);
- if (ASIC_IS_DCE5(rdev)) {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
- r = ni_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
+ if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
- } else {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
}
if (rdev->flags & RADEON_IS_AGP) {
@@ -5199,14 +5234,12 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- R600_CP_RB_RPTR, R600_CP_RB_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_RB_RPTR, DMA_RB_WPTR,
DMA_PACKET(DMA_PACKET_NOP, 0, 0));
if (r)
return r;
@@ -5224,7 +5257,6 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
RADEON_CP_PACKET2);
if (!r)
r = uvd_v1_0_init(rdev);
@@ -5267,6 +5299,9 @@ int evergreen_resume(struct radeon_device *rdev)
/* init golden registers */
evergreen_init_golden_registers(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
@@ -5281,6 +5316,7 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
@@ -5357,6 +5393,27 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
@@ -5409,6 +5466,7 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
@@ -5418,9 +5476,9 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- evergreen_pcie_gart_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index eb8ac315f92f..c7cac07f139b 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -967,7 +967,10 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
if (track->cb_dirty) {
tmp = track->cb_target_mask;
for (i = 0; i < 8; i++) {
- if ((tmp >> (i * 4)) & 0xF) {
+ u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
+
+ if (format != V_028C70_COLOR_INVALID &&
+ (tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 0c6d5cef4cf1..05b0c95813fd 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -306,6 +306,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ if (ASIC_IS_DCE6(rdev)) {
+ dig->afmt->pin = dce6_audio_get_pin(rdev);
+ dce6_audio_enable(rdev, dig->afmt->pin, false);
+ } else {
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+ }
+
evergreen_audio_set_dto(encoder, mode->clock);
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
@@ -409,12 +418,16 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+
+ /* enable audio after to setting up hw */
+ if (ASIC_IS_DCE6(rdev))
+ dce6_audio_enable(rdev, dig->afmt->pin, true);
+ else
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -427,15 +440,6 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
- if (enable) {
- if (ASIC_IS_DCE6(rdev))
- dig->afmt->pin = dce6_audio_get_pin(rdev);
- else
- dig->afmt->pin = r600_audio_get_pin(rdev);
- } else {
- dig->afmt->pin = NULL;
- }
-
dig->afmt->enabled = enable;
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 8a4e641f0e3c..a0f63ff5a5e9 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -33,6 +33,7 @@
#define EVERGREEN_PIF_PHY0_DATA 0xc
#define EVERGREEN_PIF_PHY1_INDEX 0x10
#define EVERGREEN_PIF_PHY1_DATA 0x14
+#define EVERGREEN_MM_INDEX_HI 0x18
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
index 76ada8cfe902..3a03ba37d043 100644
--- a/drivers/gpu/drm/radeon/evergreen_smc.h
+++ b/drivers/gpu/drm/radeon/evergreen_smc.h
@@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
-#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0
+#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8
#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 17f990798992..f9c7963b3ee6 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -82,12 +82,16 @@
#define CG_SPLL_FUNC_CNTL_2 0x604
#define SCLK_MUX_SEL(x) ((x) << 0)
#define SCLK_MUX_SEL_MASK (0x1ff << 0)
+#define SCLK_MUX_UPDATE (1 << 26)
#define CG_SPLL_FUNC_CNTL_3 0x608
#define SPLL_FB_DIV(x) ((x) << 0)
#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
#define SPLL_DITHEN (1 << 28)
+#define CG_SPLL_STATUS 0x60c
+#define SPLL_CHG_STATUS (1 << 1)
#define MPLL_CNTL_MODE 0x61c
+# define MPLL_MCLK_SEL (1 << 11)
# define SS_SSEN (1 << 24)
# define SS_DSMODE_EN (1 << 25)
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index b41905573cd2..351db361239d 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1126,11 +1126,6 @@ int kv_dpm_enable(struct radeon_device *rdev)
struct kv_power_info *pi = kv_get_pi(rdev);
int ret;
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), false);
-
ret = kv_process_firmware_header(rdev);
if (ret) {
DRM_ERROR("kv_process_firmware_header failed\n");
@@ -1215,6 +1210,21 @@ int kv_dpm_enable(struct radeon_device *rdev)
kv_reset_acp_boot_level(rdev);
+ ret = kv_smc_bapm_enable(rdev, false);
+ if (ret) {
+ DRM_ERROR("kv_smc_bapm_enable failed\n");
+ return ret;
+ }
+
+ kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+ return ret;
+}
+
+int kv_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret = 0;
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1226,35 +1236,17 @@ int kv_dpm_enable(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
- ret = kv_smc_bapm_enable(rdev, false);
- if (ret) {
- DRM_ERROR("kv_smc_bapm_enable failed\n");
- return ret;
- }
-
/* powerdown unused blocks for now */
kv_dpm_powergate_acp(rdev, true);
kv_dpm_powergate_samu(rdev, true);
kv_dpm_powergate_vce(rdev, true);
kv_dpm_powergate_uvd(rdev, true);
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), true);
-
- kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
-
return ret;
}
void kv_dpm_disable(struct radeon_device *rdev)
{
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), false);
-
kv_smc_bapm_enable(rdev, false);
/* powerup blocks */
@@ -1779,11 +1771,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
/*struct radeon_ps *old_ps = &pi->current_rps;*/
int ret;
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), false);
-
if (pi->bapm_enable) {
ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
if (ret) {
@@ -1849,11 +1836,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
}
}
- cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), true);
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index af85299f2126..4a85bb644e24 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -655,7 +655,7 @@ static int parser_auth(struct table *t, const char *filename)
/* first line will contain the last register
* and gpu name */
- sscanf(buf, "%s %s", gpu_name, last_reg_s);
+ sscanf(buf, "%9s %9s", gpu_name, last_reg_s);
t->gpu_prefix = gpu_name;
last_reg = strtol(last_reg_s, NULL, 16);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index f59a9e9fccf8..bf6300cfd62d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -174,6 +174,7 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void evergreen_program_aspm(struct radeon_device *rdev);
extern void sumo_rlc_fini(struct radeon_device *rdev);
extern int sumo_rlc_init(struct radeon_device *rdev);
+extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -1330,13 +1331,12 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA;
/* flush read cache over gart for this vmid */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
@@ -1352,6 +1352,8 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA;
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
@@ -1376,14 +1378,11 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
(ib->vm ? (ib->vm->id << 24) : 0));
/* flush read cache over gart for this vmid */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 10); /* poll interval */
+ radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
}
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
@@ -1391,13 +1390,63 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
if (enable)
WREG32(CP_ME_CNTL, 0);
else {
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
}
+u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ else {
+ if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
+ rptr = RREG32(CP_RB0_RPTR);
+ else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
+ rptr = RREG32(CP_RB1_RPTR);
+ else
+ rptr = RREG32(CP_RB2_RPTR);
+ }
+
+ return rptr;
+}
+
+u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 wptr;
+
+ if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
+ wptr = RREG32(CP_RB0_WPTR);
+ else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
+ wptr = RREG32(CP_RB1_WPTR);
+ else
+ wptr = RREG32(CP_RB2_WPTR);
+
+ return wptr;
+}
+
+void cayman_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
+ WREG32(CP_RB0_WPTR, ring->wptr);
+ (void)RREG32(CP_RB0_WPTR);
+ } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
+ WREG32(CP_RB1_WPTR, ring->wptr);
+ (void)RREG32(CP_RB1_WPTR);
+ } else {
+ WREG32(CP_RB2_WPTR, ring->wptr);
+ (void)RREG32(CP_RB2_WPTR);
+ }
+}
+
static int cayman_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -1526,6 +1575,16 @@ static int cayman_cp_resume(struct radeon_device *rdev)
CP_RB1_BASE,
CP_RB2_BASE
};
+ static const unsigned cp_rb_rptr[] = {
+ CP_RB0_RPTR,
+ CP_RB1_RPTR,
+ CP_RB2_RPTR
+ };
+ static const unsigned cp_rb_wptr[] = {
+ CP_RB0_WPTR,
+ CP_RB1_WPTR,
+ CP_RB2_WPTR
+ };
struct radeon_ring *ring;
int i, r;
@@ -1584,8 +1643,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
ring->rptr = ring->wptr = 0;
- WREG32(ring->rptr_reg, ring->rptr);
- WREG32(ring->wptr_reg, ring->wptr);
+ WREG32(cp_rb_rptr[i], ring->rptr);
+ WREG32(cp_rb_wptr[i], ring->wptr);
mdelay(1);
WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
@@ -1605,6 +1664,9 @@ static int cayman_cp_resume(struct radeon_device *rdev)
return r;
}
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
return 0;
}
@@ -1831,8 +1893,10 @@ int cayman_asic_reset(struct radeon_device *rdev)
reset_mask = cayman_gpu_check_soft_reset(rdev);
- if (!reset_mask)
- r600_set_bios_scratch_engine_hung(rdev, false);
+ if (reset_mask)
+ evergreen_gpu_pci_config_reset(rdev);
+
+ r600_set_bios_scratch_engine_hung(rdev, false);
return 0;
}
@@ -1878,23 +1942,7 @@ static int cayman_startup(struct radeon_device *rdev)
evergreen_mc_program(rdev);
- if (rdev->flags & RADEON_IS_IGP) {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = ni_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
- } else {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
- r = ni_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
+ if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
@@ -1981,23 +2029,18 @@ static int cayman_startup(struct radeon_device *rdev)
evergreen_irq_set(rdev);
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- CP_RB0_RPTR, CP_RB0_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
- DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
- DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
- DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -2016,7 +2059,6 @@ static int cayman_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
RADEON_CP_PACKET2);
if (!r)
r = uvd_v1_0_init(rdev);
@@ -2063,6 +2105,9 @@ int cayman_resume(struct radeon_device *rdev)
/* init golden registers */
ni_init_golden_registers(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = cayman_startup(rdev);
if (r) {
@@ -2075,6 +2120,7 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
if (ASIC_IS_DCE6(rdev))
dce6_audio_fini(rdev);
else
@@ -2145,6 +2191,27 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
@@ -2204,6 +2271,7 @@ int cayman_init(struct radeon_device *rdev)
void cayman_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
cayman_cp_fini(rdev);
cayman_dma_fini(rdev);
r600_irq_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index bdeb65ed3658..7cf96b15377f 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -43,6 +43,75 @@ u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
*/
/**
+ * cayman_dma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (cayman+).
+ */
+uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr, reg;
+
+ if (rdev->wb.enabled) {
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ } else {
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = DMA_RB_RPTR + DMA0_REGISTER_OFFSET;
+ else
+ reg = DMA_RB_RPTR + DMA1_REGISTER_OFFSET;
+
+ rptr = RREG32(reg);
+ }
+
+ return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cayman_dma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (cayman+).
+ */
+uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
+ else
+ reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
+
+ return (RREG32(reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * cayman_dma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (cayman+).
+ */
+void cayman_dma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
+ else
+ reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
+
+ WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
* cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
*
* @rdev: radeon_device pointer
@@ -88,7 +157,9 @@ void cayman_dma_stop(struct radeon_device *rdev)
{
u32 rb_cntl;
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+ (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
/* dma0 */
rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
@@ -190,7 +261,9 @@ int cayman_dma_resume(struct radeon_device *rdev)
}
}
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+ (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 49c4d48f54d6..ca814276b075 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -720,6 +720,8 @@ static const u32 cayman_sysls_enable[] =
struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
+
struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
{
struct ni_power_info *pi = rdev->pm.dpm.priv;
@@ -2586,7 +2588,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
- if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -3565,7 +3567,11 @@ void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
void ni_dpm_setup_asic(struct radeon_device *rdev)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ int r;
+ r = ni_mc_load_microcode(rdev);
+ if (r)
+ DRM_ERROR("Failed to load MC firmware!\n");
ni_read_clock_registers(rdev);
btc_read_arb_registers(rdev);
rv770_get_memory_type(rdev);
@@ -3710,21 +3716,6 @@ int ni_dpm_enable(struct radeon_device *rdev)
if (eg_pi->ls_clock_gating)
ni_ls_clockgating_enable(rdev, true);
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
- PPSMC_Result result;
-
- ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- }
-
rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
ni_update_current_ps(rdev, boot_ps);
@@ -3954,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *ps = ni_get_ps(rps);
- u16 vddc;
struct rv7xx_pl *pl = &ps->performance_levels[index];
ps->performance_level_count = index + 1;
@@ -3970,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -4331,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev,
void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 22421bc80c0d..d996033c243e 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -1154,6 +1154,7 @@
# define PACKET3_DB_ACTION_ENA (1 << 26)
# define PACKET3_SH_ACTION_ENA (1 << 27)
# define PACKET3_SX_ACTION_ENA (1 << 28)
+# define PACKET3_ENGINE_ME (1 << 31)
#define PACKET3_ME_INITIALIZE 0x44
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index da43ab328833..2d532996c697 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -23,7 +23,7 @@
#ifndef _PPTABLE_H
#define _PPTABLE_H
-#pragma pack(push, 1)
+#pragma pack(1)
typedef struct _ATOM_PPLIB_THERMALCONTROLLER
@@ -677,6 +677,6 @@ typedef struct _ATOM_PPLIB_PPM_Table
ULONG ulTjmax;
} ATOM_PPLIB_PPM_Table;
-#pragma pack(pop)
+#pragma pack()
#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 10abc4d5a6cc..3cc78bb66042 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1050,6 +1050,36 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
return err;
}
+u32 r100_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+ else
+ rptr = RREG32(RADEON_CP_RB_RPTR);
+
+ return rptr;
+}
+
+u32 r100_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 wptr;
+
+ wptr = RREG32(RADEON_CP_RB_WPTR);
+
+ return wptr;
+}
+
+void r100_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(RADEON_CP_RB_WPTR, ring->wptr);
+ (void)RREG32(RADEON_CP_RB_WPTR);
+}
+
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -1102,7 +1132,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
- RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
RADEON_CP_PACKET2);
if (r) {
return r;
@@ -3923,6 +3952,7 @@ int r100_resume(struct radeon_device *rdev)
int r100_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -3933,6 +3963,7 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -4039,6 +4070,9 @@ int r100_init(struct radeon_device *rdev)
}
r100_set_safe_registers(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = r100_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index d8dd269b9159..0b658b34b33a 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1440,6 +1440,7 @@ int r300_resume(struct radeon_device *rdev)
int r300_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -1452,6 +1453,7 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -1538,6 +1540,9 @@ int r300_init(struct radeon_device *rdev)
}
r300_set_reg_safe(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = r300_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 60170ea5e3a2..84b1d5367a11 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
for (i = 0; i < nr; ++i) {
- if (DRM_COPY_FROM_USER
+ if (copy_from_user
(&box, &cmdbuf->boxes[n + i], sizeof(box))) {
DRM_ERROR("copy cliprect faulted\n");
return -EFAULT;
@@ -928,12 +928,12 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
*buf_idx *= 2; /* 8 bytes per buf */
- if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
+ if (copy_to_user(ref_age_base + *buf_idx,
&dev_priv->scratch_ages[header.scratch.reg],
sizeof(u32)))
return -EINVAL;
- if (DRM_COPY_FROM_USER(&h_pending,
+ if (copy_from_user(&h_pending,
ref_age_base + *buf_idx + 1,
sizeof(u32)))
return -EINVAL;
@@ -943,7 +943,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
h_pending--;
- if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
+ if (copy_to_user(ref_age_base + *buf_idx + 1,
&h_pending,
sizeof(u32)))
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 6edf2b3a52b4..802b19220a21 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -335,6 +335,7 @@ int r420_resume(struct radeon_device *rdev)
int r420_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -348,6 +349,7 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -444,6 +446,9 @@ int r420_init(struct radeon_device *rdev)
}
r420_set_reg_safe(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index e1aece73b370..98d6053c36c6 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -312,6 +312,9 @@ int r520_init(struct radeon_device *rdev)
return r;
rv515_set_safe_registers(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9ad06732a78b..647ef4079217 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -105,6 +105,7 @@ void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
extern int evergreen_rlc_resume(struct radeon_device *rdev);
+extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
/**
* r600_get_xclk - get the xclk
@@ -1644,6 +1645,67 @@ static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
r600_print_gpu_status_regs(rdev);
}
+static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+ struct rv515_mc_save save;
+ u32 tmp, i;
+
+ dev_info(rdev->dev, "GPU pci config reset\n");
+
+ /* disable dpm? */
+
+ /* Disable CP parsing/prefetching */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+ else
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+ /* disable the RLC */
+ WREG32(RLC_CNTL, 0);
+
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+
+ mdelay(50);
+
+ /* set mclk/sclk to bypass */
+ if (rdev->family >= CHIP_RV770)
+ rv770_set_clk_bypass_mode(rdev);
+ /* disable BM */
+ pci_clear_master(rdev->pdev);
+ /* disable mem access */
+ rv515_mc_stop(rdev, &save);
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ /* BIF reset workaround. Not sure if this is needed on 6xx */
+ tmp = RREG32(BUS_CNTL);
+ tmp |= VGA_COHE_SPEC_TIMER_DIS;
+ WREG32(BUS_CNTL, tmp);
+
+ tmp = RREG32(BIF_SCRATCH0);
+
+ /* reset */
+ radeon_pci_config_reset(rdev);
+ mdelay(1);
+
+ /* BIF reset workaround. Not sure if this is needed on 6xx */
+ tmp = SOFT_RESET_BIF;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ mdelay(1);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+ break;
+ udelay(1);
+ }
+}
+
int r600_asic_reset(struct radeon_device *rdev)
{
u32 reset_mask;
@@ -1653,10 +1715,17 @@ int r600_asic_reset(struct radeon_device *rdev)
if (reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true);
+ /* try soft reset */
r600_gpu_soft_reset(rdev, reset_mask);
reset_mask = r600_gpu_check_soft_reset(rdev);
+ /* try pci config reset */
+ if (reset_mask && radeon_hard_reset)
+ r600_gpu_pci_config_reset(rdev);
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
@@ -2185,7 +2254,8 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
*/
void r600_cp_stop(struct radeon_device *rdev)
{
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -2382,6 +2452,36 @@ out:
return err;
}
+u32 r600_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ else
+ rptr = RREG32(R600_CP_RB_RPTR);
+
+ return rptr;
+}
+
+u32 r600_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 wptr;
+
+ wptr = RREG32(R600_CP_RB_WPTR);
+
+ return wptr;
+}
+
+void r600_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(R600_CP_RB_WPTR, ring->wptr);
+ (void)RREG32(R600_CP_RB_WPTR);
+}
+
static int r600_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -2513,6 +2613,10 @@ int r600_cp_resume(struct radeon_device *rdev)
ring->ready = false;
return r;
}
+
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
return 0;
}
@@ -2607,14 +2711,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA;
+
+ if (rdev->family >= CHIP_RV770)
+ cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
if (rdev->wb.use_event) {
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
@@ -2628,9 +2735,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
} else {
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
@@ -2775,14 +2880,6 @@ static int r600_startup(struct radeon_device *rdev)
r600_mc_program(rdev);
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
} else {
@@ -2803,12 +2900,6 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
- return r;
- }
-
/* Enable IRQ */
if (!rdev->irq.installed) {
r = radeon_irq_kms_init(rdev);
@@ -2826,18 +2917,10 @@ static int r600_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- R600_CP_RB_RPTR, R600_CP_RB_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
- ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_RB_RPTR, DMA_RB_WPTR,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- if (r)
- return r;
-
r = r600_cp_load_microcode(rdev);
if (r)
return r;
@@ -2845,10 +2928,6 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r600_dma_resume(rdev);
- if (r)
- return r;
-
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2889,6 +2968,9 @@ int r600_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
@@ -2902,9 +2984,9 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
r600_cp_stop(rdev);
- r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
@@ -2970,12 +3052,20 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
- rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
-
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2988,7 +3078,6 @@ int r600_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
- r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -3002,9 +3091,9 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r600_cp_fini(rdev);
- r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -3903,6 +3992,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 47fc2b886979..bffac10c4296 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -142,12 +142,15 @@ void r600_audio_update_hdmi(struct work_struct *work)
}
/* enable the audio stream */
-static void r600_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
u32 value = 0;
+ if (!pin)
+ return;
+
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
@@ -158,7 +161,6 @@ static void r600_audio_enable(struct radeon_device *rdev,
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
}
/*
@@ -178,8 +180,8 @@ int r600_audio_init(struct radeon_device *rdev)
rdev->audio.pin[0].status_bits = 0;
rdev->audio.pin[0].category_code = 0;
rdev->audio.pin[0].id = 0;
-
- r600_audio_enable(rdev, &rdev->audio.pin[0], true);
+ /* disable audio. it will be set up later */
+ r600_audio_enable(rdev, &rdev->audio.pin[0], false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index d8eb48bff0ed..8c9b7e26533c 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2515,7 +2515,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
buf = radeon_freelist_get(dev);
if (!buf) {
DRM_DEBUG("EAGAIN\n");
- if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+ if (copy_to_user(tex->image, image, sizeof(*image)))
return -EFAULT;
return -EAGAIN;
}
@@ -2528,7 +2528,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
buffer =
(u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
- if (DRM_COPY_FROM_USER(buffer, data, pass_size)) {
+ if (copy_from_user(buffer, data, pass_size)) {
DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 5dceea6f71ae..2812c7d1ae6f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -749,7 +749,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
for (i = 0; i < 8; i++) {
- if ((tmp >> (i * 4)) & 0xF) {
+ u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
+
+ if (format != V_0280A0_COLOR_INVALID &&
+ (tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
@@ -1004,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_008C64_SQ_VSTMP_RING_SIZE:
case R_0288C8_SQ_GS_VERT_ITEMSIZE:
/* get value to populate the IB don't remove */
- tmp =radeon_get_ib_value(p, idx);
- ib[idx] = 0;
+ /*tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;*/
+ break;
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SQ_CONFIG:
track->sq_config = radeon_get_ib_value(p, idx);
@@ -2386,7 +2403,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
parser.ib.length_dw = ib_chunk->length_dw;
*l = parser.ib.length_dw;
- if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
+ if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
r = -EFAULT;
r600_cs_parser_fini(&parser, r);
return r;
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 7844d15c139f..b2d4c91e6272 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -51,7 +51,14 @@ u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2;
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ else
+ rptr = RREG32(DMA_RB_RPTR);
+
+ return (rptr & 0x3fffc) >> 2;
}
/**
@@ -65,7 +72,7 @@ uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2;
+ return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2;
}
/**
@@ -79,7 +86,7 @@ uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
void r600_dma_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc);
+ WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc);
}
/**
@@ -93,7 +100,8 @@ void r600_dma_stop(struct radeon_device *rdev)
{
u32 rb_cntl = RREG32(DMA_RB_CNTL);
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
rb_cntl &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL, rb_cntl);
@@ -180,7 +188,8 @@ int r600_dma_resume(struct radeon_device *rdev)
return r;
}
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 5513d8f06252..e4cc9b314ce9 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -729,8 +729,8 @@ bool r600_is_uvd_state(u32 class, u32 class2)
return false;
}
-int r600_set_thermal_temperature_range(struct radeon_device *rdev,
- int min_temp, int max_temp)
+static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
int high_temp = 255 * 1000;
@@ -777,6 +777,22 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
}
}
+int r600_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
+ if (rdev->irq.installed &&
+ r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+ ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ rdev->irq.dpm_thermal = true;
+ radeon_irq_set(rdev);
+ }
+
+ return 0;
+}
+
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 1000bf9719f2..07eab2b04e81 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -213,8 +213,6 @@ void r600_wait_for_power_level(struct radeon_device *rdev,
void r600_start_dpm(struct radeon_device *rdev);
void r600_stop_dpm(struct radeon_device *rdev);
-int r600_set_thermal_temperature_range(struct radeon_device *rdev,
- int min_temp, int max_temp);
bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor);
int r600_parse_extended_power_table(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index b7d3ecba43e3..85a2bb28aed2 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -250,7 +250,7 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
value, ~HDMI0_AUDIO_TEST_EN);
}
-void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -329,9 +329,6 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- /* XXX: setting this register causes hangs on some asics */
- return;
-
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
@@ -460,6 +457,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+
r600_audio_set_dto(encoder, mode->clock);
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
@@ -531,6 +532,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
r600_hdmi_audio_workaround(encoder);
+
+ /* enable audio after to setting up hw */
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
/*
@@ -651,11 +655,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
- if (enable)
- dig->afmt->pin = r600_audio_get_pin(rdev);
- else
- dig->afmt->pin = NULL;
-
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index ebe38724a976..37455f65107f 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -701,11 +701,18 @@
#define RLC_UCODE_DATA 0x3f30
#define SRBM_SOFT_RESET 0xe60
+# define SOFT_RESET_BIF (1 << 1)
# define SOFT_RESET_DMA (1 << 12)
# define SOFT_RESET_RLC (1 << 13)
# define SOFT_RESET_UVD (1 << 18)
# define RV770_SOFT_RESET_DMA (1 << 20)
+#define BIF_SCRATCH0 0x5438
+
+#define BUS_CNTL 0x5420
+# define BIOS_ROM_DIS (1 << 1)
+# define VGA_COHE_SPEC_TIMER_DIS (1 << 9)
+
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
@@ -1575,6 +1582,7 @@
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
# define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_VC_ACTION_ENA (1 << 24)
# define PACKET3_CB_ACTION_ENA (1 << 25)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 45e1f447bc79..e887d027b6d0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -99,6 +99,7 @@ extern int radeon_fastfb;
extern int radeon_dpm;
extern int radeon_aspm;
extern int radeon_runtime_pm;
+extern int radeon_hard_reset;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -134,11 +135,17 @@ extern int radeon_runtime_pm;
/* R600+ */
#define R600_RING_TYPE_UVD_INDEX 5
+/* number of hw syncs before falling back on blocking */
+#define RADEON_NUM_SYNCS 4
+
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+/* hard reset data */
+#define RADEON_ASIC_RESET_DATA 0x39d5e86b
+
/* reset flags */
#define RADEON_RESET_GFX (1 << 0)
#define RADEON_RESET_COMPUTE (1 << 1)
@@ -252,6 +259,7 @@ struct radeon_clock {
* Power management
*/
int radeon_pm_init(struct radeon_device *rdev);
+int radeon_pm_late_init(struct radeon_device *rdev);
void radeon_pm_fini(struct radeon_device *rdev);
void radeon_pm_compute_clocks(struct radeon_device *rdev);
void radeon_pm_suspend(struct radeon_device *rdev);
@@ -413,6 +421,11 @@ struct radeon_mman {
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *vram;
+ struct dentry *gtt;
+#endif
};
/* bo virtual address in a specific vm */
@@ -544,7 +557,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
/*
* Semaphores.
*/
-/* everything here is constant */
struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
@@ -779,13 +791,11 @@ struct radeon_ring {
volatile uint32_t *ring;
unsigned rptr;
unsigned rptr_offs;
- unsigned rptr_reg;
unsigned rptr_save_reg;
u64 next_rptr_gpu_addr;
volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
- unsigned wptr_reg;
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
@@ -859,6 +869,8 @@ struct radeon_vm {
struct radeon_fence *fence;
/* last flush or NULL if we still need to flush */
struct radeon_fence *last_flush;
+ /* last use of vmid */
+ struct radeon_fence *last_id_use;
};
struct radeon_vm_manager {
@@ -949,7 +961,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop);
+ unsigned rptr_offs, u32 nop);
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -1775,6 +1787,7 @@ struct radeon_asic {
int (*init)(struct radeon_device *rdev);
void (*setup_asic)(struct radeon_device *rdev);
int (*enable)(struct radeon_device *rdev);
+ int (*late_enable)(struct radeon_device *rdev);
void (*disable)(struct radeon_device *rdev);
int (*pre_set_power_state)(struct radeon_device *rdev);
int (*set_power_state)(struct radeon_device *rdev);
@@ -2650,6 +2663,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
+#define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev))
#define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
#define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev))
#define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev))
@@ -2668,6 +2682,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void radeon_pci_config_reset(struct radeon_device *rdev);
extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
@@ -2732,6 +2747,12 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
void r600_audio_update_hdmi(struct work_struct *work);
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
/*
* R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 98a9074b306b..77e9d07c55b6 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -25,18 +25,14 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
+#include <linux/vga_switcheroo.h>
#include <acpi/video.h>
-
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "radeon.h"
#include "radeon_acpi.h"
#include "atom.h"
-#include <linux/vga_switcheroo.h>
-
#define ACPI_AC_CLASS "ac_adapter"
extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index c0425bb6223a..dda02bfc10a4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -182,9 +182,9 @@ static struct radeon_asic_ring r100_gfx_ring = {
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &r100_gfx_get_rptr,
+ .get_wptr = &r100_gfx_get_wptr,
+ .set_wptr = &r100_gfx_set_wptr,
};
static struct radeon_asic r100_asic = {
@@ -330,9 +330,9 @@ static struct radeon_asic_ring r300_gfx_ring = {
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &r100_gfx_get_rptr,
+ .get_wptr = &r100_gfx_get_wptr,
+ .set_wptr = &r100_gfx_set_wptr,
};
static struct radeon_asic r300_asic = {
@@ -883,9 +883,9 @@ static struct radeon_asic_ring r600_gfx_ring = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &r600_gfx_get_rptr,
+ .get_wptr = &r600_gfx_get_wptr,
+ .set_wptr = &r600_gfx_set_wptr,
};
static struct radeon_asic_ring r600_dma_ring = {
@@ -1045,6 +1045,7 @@ static struct radeon_asic rv6xx_asic = {
.init = &rv6xx_dpm_init,
.setup_asic = &rv6xx_setup_asic,
.enable = &rv6xx_dpm_enable,
+ .late_enable = &r600_dpm_late_enable,
.disable = &rv6xx_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &rv6xx_dpm_set_power_state,
@@ -1135,6 +1136,7 @@ static struct radeon_asic rs780_asic = {
.init = &rs780_dpm_init,
.setup_asic = &rs780_dpm_setup_asic,
.enable = &rs780_dpm_enable,
+ .late_enable = &r600_dpm_late_enable,
.disable = &rs780_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &rs780_dpm_set_power_state,
@@ -1239,6 +1241,7 @@ static struct radeon_asic rv770_asic = {
.init = &rv770_dpm_init,
.setup_asic = &rv770_dpm_setup_asic,
.enable = &rv770_dpm_enable,
+ .late_enable = &rv770_dpm_late_enable,
.disable = &rv770_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &rv770_dpm_set_power_state,
@@ -1267,9 +1270,9 @@ static struct radeon_asic_ring evergreen_gfx_ring = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &r600_gfx_get_rptr,
+ .get_wptr = &r600_gfx_get_wptr,
+ .set_wptr = &r600_gfx_set_wptr,
};
static struct radeon_asic_ring evergreen_dma_ring = {
@@ -1357,6 +1360,7 @@ static struct radeon_asic evergreen_asic = {
.init = &cypress_dpm_init,
.setup_asic = &cypress_dpm_setup_asic,
.enable = &cypress_dpm_enable,
+ .late_enable = &rv770_dpm_late_enable,
.disable = &cypress_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &cypress_dpm_set_power_state,
@@ -1449,6 +1453,7 @@ static struct radeon_asic sumo_asic = {
.init = &sumo_dpm_init,
.setup_asic = &sumo_dpm_setup_asic,
.enable = &sumo_dpm_enable,
+ .late_enable = &sumo_dpm_late_enable,
.disable = &sumo_dpm_disable,
.pre_set_power_state = &sumo_dpm_pre_set_power_state,
.set_power_state = &sumo_dpm_set_power_state,
@@ -1540,6 +1545,7 @@ static struct radeon_asic btc_asic = {
.init = &btc_dpm_init,
.setup_asic = &btc_dpm_setup_asic,
.enable = &btc_dpm_enable,
+ .late_enable = &rv770_dpm_late_enable,
.disable = &btc_dpm_disable,
.pre_set_power_state = &btc_dpm_pre_set_power_state,
.set_power_state = &btc_dpm_set_power_state,
@@ -1549,7 +1555,7 @@ static struct radeon_asic btc_asic = {
.get_sclk = &btc_dpm_get_sclk,
.get_mclk = &btc_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
- .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
+ .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &btc_dpm_vblank_too_short,
},
@@ -1570,9 +1576,9 @@ static struct radeon_asic_ring cayman_gfx_ring = {
.ib_test = &r600_ib_test,
.is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &cayman_gfx_get_rptr,
+ .get_wptr = &cayman_gfx_get_wptr,
+ .set_wptr = &cayman_gfx_set_wptr,
};
static struct radeon_asic_ring cayman_dma_ring = {
@@ -1585,9 +1591,9 @@ static struct radeon_asic_ring cayman_dma_ring = {
.ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup,
.vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &r600_dma_get_rptr,
- .get_wptr = &r600_dma_get_wptr,
- .set_wptr = &r600_dma_set_wptr
+ .get_rptr = &cayman_dma_get_rptr,
+ .get_wptr = &cayman_dma_get_wptr,
+ .set_wptr = &cayman_dma_set_wptr
};
static struct radeon_asic_ring cayman_uvd_ring = {
@@ -1683,6 +1689,7 @@ static struct radeon_asic cayman_asic = {
.init = &ni_dpm_init,
.setup_asic = &ni_dpm_setup_asic,
.enable = &ni_dpm_enable,
+ .late_enable = &rv770_dpm_late_enable,
.disable = &ni_dpm_disable,
.pre_set_power_state = &ni_dpm_pre_set_power_state,
.set_power_state = &ni_dpm_set_power_state,
@@ -1783,6 +1790,7 @@ static struct radeon_asic trinity_asic = {
.init = &trinity_dpm_init,
.setup_asic = &trinity_dpm_setup_asic,
.enable = &trinity_dpm_enable,
+ .late_enable = &trinity_dpm_late_enable,
.disable = &trinity_dpm_disable,
.pre_set_power_state = &trinity_dpm_pre_set_power_state,
.set_power_state = &trinity_dpm_set_power_state,
@@ -1813,9 +1821,9 @@ static struct radeon_asic_ring si_gfx_ring = {
.ib_test = &r600_ib_test,
.is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &cayman_gfx_get_rptr,
+ .get_wptr = &cayman_gfx_get_wptr,
+ .set_wptr = &cayman_gfx_set_wptr,
};
static struct radeon_asic_ring si_dma_ring = {
@@ -1828,9 +1836,9 @@ static struct radeon_asic_ring si_dma_ring = {
.ib_test = &r600_dma_ib_test,
.is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
- .get_rptr = &r600_dma_get_rptr,
- .get_wptr = &r600_dma_get_wptr,
- .set_wptr = &r600_dma_set_wptr,
+ .get_rptr = &cayman_dma_get_rptr,
+ .get_wptr = &cayman_dma_get_wptr,
+ .set_wptr = &cayman_dma_set_wptr,
};
static struct radeon_asic si_asic = {
@@ -1913,6 +1921,7 @@ static struct radeon_asic si_asic = {
.init = &si_dpm_init,
.setup_asic = &si_dpm_setup_asic,
.enable = &si_dpm_enable,
+ .late_enable = &si_dpm_late_enable,
.disable = &si_dpm_disable,
.pre_set_power_state = &si_dpm_pre_set_power_state,
.set_power_state = &si_dpm_set_power_state,
@@ -1943,9 +1952,9 @@ static struct radeon_asic_ring ci_gfx_ring = {
.ib_test = &cik_ib_test,
.is_lockup = &cik_gfx_is_lockup,
.vm_flush = &cik_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
+ .get_rptr = &cik_gfx_get_rptr,
+ .get_wptr = &cik_gfx_get_wptr,
+ .set_wptr = &cik_gfx_set_wptr,
};
static struct radeon_asic_ring ci_cp_ring = {
@@ -1958,9 +1967,9 @@ static struct radeon_asic_ring ci_cp_ring = {
.ib_test = &cik_ib_test,
.is_lockup = &cik_gfx_is_lockup,
.vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
+ .get_rptr = &cik_compute_get_rptr,
+ .get_wptr = &cik_compute_get_wptr,
+ .set_wptr = &cik_compute_set_wptr,
};
static struct radeon_asic_ring ci_dma_ring = {
@@ -1973,9 +1982,9 @@ static struct radeon_asic_ring ci_dma_ring = {
.ib_test = &cik_sdma_ib_test,
.is_lockup = &cik_sdma_is_lockup,
.vm_flush = &cik_dma_vm_flush,
- .get_rptr = &r600_dma_get_rptr,
- .get_wptr = &r600_dma_get_wptr,
- .set_wptr = &r600_dma_set_wptr,
+ .get_rptr = &cik_sdma_get_rptr,
+ .get_wptr = &cik_sdma_get_wptr,
+ .set_wptr = &cik_sdma_set_wptr,
};
static struct radeon_asic ci_asic = {
@@ -2058,6 +2067,7 @@ static struct radeon_asic ci_asic = {
.init = &ci_dpm_init,
.setup_asic = &ci_dpm_setup_asic,
.enable = &ci_dpm_enable,
+ .late_enable = &ci_dpm_late_enable,
.disable = &ci_dpm_disable,
.pre_set_power_state = &ci_dpm_pre_set_power_state,
.set_power_state = &ci_dpm_set_power_state,
@@ -2159,6 +2169,7 @@ static struct radeon_asic kv_asic = {
.init = &kv_dpm_init,
.setup_asic = &kv_dpm_setup_asic,
.enable = &kv_dpm_enable,
+ .late_enable = &kv_dpm_late_enable,
.disable = &kv_dpm_disable,
.pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state,
@@ -2449,7 +2460,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGCG |
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2468,7 +2479,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGCG |
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
@@ -2493,7 +2504,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGCG |
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2521,7 +2532,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGCG |
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c9fd97b58076..ae637cfda783 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -47,13 +47,6 @@ u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
-u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-
/*
* r100,rv100,rs100,rv200,rs200
*/
@@ -148,6 +141,13 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
+u32 r100_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 r100_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void r100_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+
/*
* r200,rv250,rs300,rv280
*/
@@ -368,6 +368,12 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
int r600_pcie_gart_init(struct radeon_device *rdev);
void r600_scratch_init(struct radeon_device *rdev);
int r600_init_microcode(struct radeon_device *rdev);
+u32 r600_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 r600_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void r600_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
/* r600 irq */
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_init(struct radeon_device *rdev);
@@ -392,6 +398,7 @@ int rv6xx_get_temp(struct radeon_device *rdev);
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
void r600_dpm_post_set_power_state(struct radeon_device *rdev);
+int r600_dpm_late_enable(struct radeon_device *rdev);
/* r600 dma */
uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
@@ -454,6 +461,7 @@ int rv770_get_temp(struct radeon_device *rdev);
/* rv7xx pm */
int rv770_dpm_init(struct radeon_device *rdev);
int rv770_dpm_enable(struct radeon_device *rdev);
+int rv770_dpm_late_enable(struct radeon_device *rdev);
void rv770_dpm_disable(struct radeon_device *rdev);
int rv770_dpm_set_power_state(struct radeon_device *rdev);
void rv770_dpm_setup_asic(struct radeon_device *rdev);
@@ -543,8 +551,11 @@ void btc_dpm_fini(struct radeon_device *rdev);
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
int sumo_dpm_init(struct radeon_device *rdev);
int sumo_dpm_enable(struct radeon_device *rdev);
+int sumo_dpm_late_enable(struct radeon_device *rdev);
void sumo_dpm_disable(struct radeon_device *rdev);
int sumo_dpm_pre_set_power_state(struct radeon_device *rdev);
int sumo_dpm_set_power_state(struct radeon_device *rdev);
@@ -591,6 +602,19 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void cayman_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void cayman_dma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+
int ni_dpm_init(struct radeon_device *rdev);
void ni_dpm_setup_asic(struct radeon_device *rdev);
int ni_dpm_enable(struct radeon_device *rdev);
@@ -610,6 +634,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev,
bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
int trinity_dpm_init(struct radeon_device *rdev);
int trinity_dpm_enable(struct radeon_device *rdev);
+int trinity_dpm_late_enable(struct radeon_device *rdev);
void trinity_dpm_disable(struct radeon_device *rdev);
int trinity_dpm_pre_set_power_state(struct radeon_device *rdev);
int trinity_dpm_set_power_state(struct radeon_device *rdev);
@@ -669,6 +694,7 @@ int si_get_temp(struct radeon_device *rdev);
int si_dpm_init(struct radeon_device *rdev);
void si_dpm_setup_asic(struct radeon_device *rdev);
int si_dpm_enable(struct radeon_device *rdev);
+int si_dpm_late_enable(struct radeon_device *rdev);
void si_dpm_disable(struct radeon_device *rdev);
int si_dpm_pre_set_power_state(struct radeon_device *rdev);
int si_dpm_set_power_state(struct radeon_device *rdev);
@@ -739,17 +765,30 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
uint32_t incr, uint32_t flags);
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
-u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void cik_compute_ring_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
+u32 cik_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cik_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void cik_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cik_compute_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cik_compute_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void cik_compute_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cik_sdma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+u32 cik_sdma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void cik_sdma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
int ci_get_temp(struct radeon_device *rdev);
int kv_get_temp(struct radeon_device *rdev);
int ci_dpm_init(struct radeon_device *rdev);
int ci_dpm_enable(struct radeon_device *rdev);
+int ci_dpm_late_enable(struct radeon_device *rdev);
void ci_dpm_disable(struct radeon_device *rdev);
int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
int ci_dpm_set_power_state(struct radeon_device *rdev);
@@ -770,6 +809,7 @@ void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
int kv_dpm_init(struct radeon_device *rdev);
int kv_dpm_enable(struct radeon_device *rdev);
+int kv_dpm_late_enable(struct radeon_device *rdev);
void kv_dpm_disable(struct radeon_device *rdev);
int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
int kv_dpm_set_power_state(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5c39bf7c3d88..30844814c25a 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -30,27 +30,10 @@
#include "atom.h"
#include "atom-bits.h"
-/* from radeon_encoder.c */
-extern uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
-extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device, u16 caps);
-/* from radeon_connector.c */
-extern void
-radeon_add_atom_connector(struct drm_device *dev,
- uint32_t connector_id,
- uint32_t supported_device,
- int connector_type,
- struct radeon_i2c_bus_rec *i2c_bus,
- uint32_t igp_lane_info,
- uint16_t connector_object_id,
- struct radeon_hpd *hpd,
- struct radeon_router *router);
-
/* from radeon_legacy_encoder.c */
extern void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
@@ -1528,6 +1511,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
ss->type = ss_assign->v1.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
+ ss->percentage_divider = 100;
return true;
}
ss_assign = (union asic_ss_assignment *)
@@ -1545,6 +1529,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
ss->type = ss_assign->v2.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
+ ss->percentage_divider = 100;
if ((crev == 2) &&
((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS)))
@@ -1566,6 +1551,11 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
ss->type = ss_assign->v3.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
+ if (ss_assign->v3.ucSpreadSpectrumMode &
+ SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
+ ss->percentage_divider = 1000;
+ else
+ ss->percentage_divider = 100;
if ((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS))
ss->rate /= 100;
@@ -1809,7 +1799,8 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
if (misc & ATOM_DOUBLE_CLOCK_MODE)
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
- mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
+ mode->crtc_clock = mode->clock =
+ le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
if (index == 1) {
/* PAL timings appear to have wrong values for totals */
@@ -1852,7 +1843,8 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
if (misc & ATOM_DOUBLE_CLOCK_MODE)
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
- mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
+ mode->crtc_clock = mode->clock =
+ le16_to_cpu(dtd_timings->usPixClk) * 10;
break;
}
return true;
@@ -3884,16 +3876,18 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
}
reg_table->last = i;
- while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) &&
+ while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
(num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
- t_mem_id = (u8)((*(u32 *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
+ t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
+ >> MEM_ID_SHIFT);
if (module_index == t_mem_id) {
reg_table->mc_reg_table_entry[num_ranges].mclk_max =
- (u32)((*(u32 *)reg_data & CLOCK_RANGE_MASK) >> CLOCK_RANGE_SHIFT);
+ (u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
+ >> CLOCK_RANGE_SHIFT);
for (i = 0, j = 1; i < reg_table->last; i++) {
if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
- (u32)*((u32 *)reg_data + j);
+ (u32)le32_to_cpu(*((u32 *)reg_data + j));
j++;
} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
@@ -3905,7 +3899,7 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
}
- if (*(u32 *)reg_data != END_OF_REG_DATA_BLOCK)
+ if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
return -EINVAL;
reg_table->num_entries = num_ranges;
} else
@@ -3944,6 +3938,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
/* tell the bios not to handle mode switching */
bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+ /* clear the vbios dpms state */
+ if (ASIC_IS_DCE4(rdev))
+ bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
+
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 485848f889f5..fa9a9c02751e 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -219,7 +219,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
- printk("ATPX version %u\n", output.version);
+ printk("ATPX version %u, functions 0x%08x\n",
+ output.version, output.function_bits);
radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 68ce36056019..6651177110f0 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -37,22 +37,6 @@
#include <asm/pci-bridge.h>
#endif /* CONFIG_PPC_PMAC */
-/* from radeon_encoder.c */
-extern uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
-extern void radeon_link_encoder_connector(struct drm_device *dev);
-
-/* from radeon_connector.c */
-extern void
-radeon_add_legacy_connector(struct drm_device *dev,
- uint32_t connector_id,
- uint32_t supported_device,
- int connector_type,
- struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t connector_object_id,
- struct radeon_hpd *hpd);
-
/* from radeon_legacy_encoder.c */
extern void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 20a768ac89a8..82d4f865546e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -33,15 +33,6 @@
#include <linux/pm_runtime.h>
-extern void
-radeon_combios_connected_scratch_regs(struct drm_connector *connector,
- struct drm_encoder *encoder,
- bool connected);
-extern void
-radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
- struct drm_encoder *encoder,
- bool connected);
-
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 3cae2bbc1854..bb0d5c3a8311 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2020,10 +2020,10 @@ static int radeon_cp_get_buffers(struct drm_device *dev,
buf->file_priv = file_priv;
- if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+ if (copy_to_user(&d->request_indices[i], &buf->idx,
sizeof(buf->idx)))
return -EFAULT;
- if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+ if (copy_to_user(&d->request_sizes[i], &buf->total,
sizeof(buf->total)))
return -EFAULT;
@@ -2228,7 +2228,7 @@ void radeon_commit_ring(drm_radeon_private_t *dev_priv)
dev_priv->ring.tail &= dev_priv->ring.tail_mask;
- DRM_MEMORYBARRIER();
+ mb();
GET_RING_HEAD( dev_priv );
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 0b366169d64d..dfb5a1db87d4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -138,7 +138,7 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
p->ring = R600_RING_TYPE_DMA_INDEX;
else
p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
- } else if (p->rdev->family >= CHIP_R600) {
+ } else if (p->rdev->family >= CHIP_RV770) {
p->ring = R600_RING_TYPE_DMA_INDEX;
} else {
return -EINVAL;
@@ -192,7 +192,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -ENOMEM;
}
chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
- if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
+ if (copy_from_user(p->chunks_array, chunk_array_ptr,
sizeof(uint64_t)*cs->num_chunks)) {
return -EFAULT;
}
@@ -208,7 +208,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
uint32_t __user *cdata;
chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
- if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
+ if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_radeon_cs_chunk))) {
return -EFAULT;
}
@@ -252,7 +252,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (p->chunks[i].kdata == NULL) {
return -ENOMEM;
}
- if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
+ if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
return -EFAULT;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
@@ -472,7 +472,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
}
parser->const_ib.is_const_ib = true;
parser->const_ib.length_dw = ib_chunk->length_dw;
- if (DRM_COPY_FROM_USER(parser->const_ib.ptr,
+ if (copy_from_user(parser->const_ib.ptr,
ib_chunk->user_ptr,
ib_chunk->length_dw * 4))
return -EFAULT;
@@ -495,7 +495,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
parser->ib.length_dw = ib_chunk->length_dw;
if (ib_chunk->kdata)
memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
- else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
+ else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
return -EFAULT;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 39b033b441d2..044bc98fb459 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -144,6 +144,11 @@ void radeon_program_register_sequence(struct radeon_device *rdev,
}
}
+void radeon_pci_config_reset(struct radeon_device *rdev)
+{
+ pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
+}
+
/**
* radeon_surface_init - Clear GPU surface registers.
*
@@ -249,7 +254,7 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
* Init doorbell driver information (CIK)
* Returns 0 on success, error on failure.
*/
-int radeon_doorbell_init(struct radeon_device *rdev)
+static int radeon_doorbell_init(struct radeon_device *rdev)
{
/* doorbell bar mapping */
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
@@ -278,7 +283,7 @@ int radeon_doorbell_init(struct radeon_device *rdev)
*
* Tear down doorbell driver information (CIK)
*/
-void radeon_doorbell_fini(struct radeon_device *rdev)
+static void radeon_doorbell_fini(struct radeon_device *rdev)
{
iounmap(rdev->doorbell.ptr);
rdev->doorbell.ptr = NULL;
@@ -1330,6 +1335,7 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
return r;
}
+
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
@@ -1455,7 +1461,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
radeon_save_bios_scratch_regs(rdev);
- radeon_pm_suspend(rdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
/* evict remaining vram memory */
@@ -1516,14 +1521,25 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
- radeon_pm_resume(rdev);
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ /* do dpm late init */
+ r = radeon_pm_late_init(rdev);
+ if (r) {
+ rdev->pm.dpm_enabled = false;
+ DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+ }
+ } else {
+ /* resume old pm late */
+ radeon_pm_resume(rdev);
+ }
+
radeon_restore_bios_scratch_regs(rdev);
if (fbcon) {
radeon_fbdev_set_suspend(rdev, 0);
console_unlock();
}
-
+
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7b253815a323..fbd8b930f2be 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -306,7 +306,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
* to complete in this vblank?
*/
if (update_pending &&
- (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+ (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0,
&vpos, &hpos, NULL, NULL)) &&
((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
(vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
@@ -571,6 +571,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
radeon_crtc->max_cursor_width = CURSOR_WIDTH;
radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
}
+ dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
+ dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
#if 0
radeon_crtc->mode_set.crtc = &radeon_crtc->base;
@@ -1464,12 +1466,22 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* setup afmt */
radeon_afmt_init(rdev);
- /* Initialize power management */
- radeon_pm_init(rdev);
-
radeon_fbdev_init(rdev);
drm_kms_helper_poll_init(rdev->ddev);
+ if (rdev->pm.dpm_enabled) {
+ /* do dpm late init */
+ ret = radeon_pm_late_init(rdev);
+ if (ret) {
+ rdev->pm.dpm_enabled = false;
+ DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+ }
+ /* set the dpm state for PX since there won't be
+ * a modeset to call this.
+ */
+ radeon_pm_compute_clocks(rdev);
+ }
+
return 0;
}
@@ -1477,7 +1489,6 @@ void radeon_modeset_fini(struct radeon_device *rdev)
{
radeon_fbdev_fini(rdev);
kfree(rdev->mode_info.bios_hardcoded_edid);
- radeon_pm_fini(rdev);
if (rdev->mode_info.mode_config_initialized) {
radeon_afmt_fini(rdev);
@@ -1601,6 +1612,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
*
* \param dev Device to query.
* \param crtc Crtc to query.
+ * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
* \param *stime Target location for timestamp taken immediately before
@@ -1622,8 +1634,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* unknown small number of scanlines wrt. real scanout position.
*
*/
-int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
+ int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
{
u32 stat_crtc = 0, vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1765,5 +1777,27 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int
if (in_vbl)
ret |= DRM_SCANOUTPOS_INVBL;
+ /* Is vpos outside nominal vblank area, but less than
+ * 1/100 of a frame height away from start of vblank?
+ * If so, assume this isn't a massively delayed vblank
+ * interrupt, but a vblank interrupt that fired a few
+ * microseconds before true start of vblank. Compensate
+ * by adding a full frame duration to the final timestamp.
+ * Happens, e.g., on ATI R500, R600.
+ *
+ * We only do this if DRM_CALLED_FROM_VBLIRQ.
+ */
+ if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
+ vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
+ vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+
+ if (vbl_start - *vpos < vtotal / 100) {
+ *vpos -= vtotal;
+
+ /* Signal this correction as "applied". */
+ ret |= 0x8;
+ }
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index db39ea36bf22..f633c2782170 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -78,9 +78,10 @@
* 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
* 2.36.0 - Fix CIK DCE tiling setup
+ * 2.37.0 - allow GS ring setup on r6xx/r7xx
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 36
+#define KMS_DRIVER_MINOR 37
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -102,13 +103,14 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
-irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
+irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
void radeon_gem_object_free(struct drm_gem_object *obj);
int radeon_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv);
void radeon_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ unsigned int flags,
int *vpos, int *hpos, ktime_t *stime,
ktime_t *etime);
extern const struct drm_ioctl_desc radeon_ioctls_kms[];
@@ -168,6 +170,7 @@ int radeon_fastfb = 0;
int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
+int radeon_hard_reset = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -232,6 +235,9 @@ module_param_named(aspm, radeon_aspm, int, 0444);
MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
module_param_named(runpm, radeon_runtime_pm, int, 0444);
+MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
+module_param_named(hard_reset, radeon_hard_reset, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
@@ -397,8 +403,15 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (radeon_runtime_pm == 0)
- return -EINVAL;
+ if (radeon_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
+
+ if (radeon_runtime_pm == -1 && !radeon_is_px()) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
@@ -422,6 +435,9 @@ static int radeon_pmops_runtime_resume(struct device *dev)
if (radeon_runtime_pm == 0)
return -EINVAL;
+ if (radeon_runtime_pm == -1 && !radeon_is_px())
+ return -EINVAL;
+
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
pci_set_power_state(pdev, PCI_D0);
@@ -444,12 +460,15 @@ static int radeon_pmops_runtime_idle(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_crtc *crtc;
- if (radeon_runtime_pm == 0)
+ if (radeon_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
return -EBUSY;
+ }
/* are we PX enabled? */
if (radeon_runtime_pm == -1 && !radeon_is_px()) {
DRM_DEBUG_DRIVER("failing to power off - not px\n");
+ pm_runtime_forbid(dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 00e0d449021c..dafd812e4571 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -405,7 +405,7 @@ extern void radeon_do_release(struct drm_device * dev);
extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
-extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t radeon_driver_irq_handler(int irq, void *arg);
extern void radeon_driver_irq_preinstall(struct drm_device * dev);
extern int radeon_driver_irq_postinstall(struct drm_device *dev);
extern void radeon_driver_irq_uninstall(struct drm_device * dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d3a86e43c012..c37cb79a9489 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -121,7 +121,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
radeon_fence_ring_emit(rdev, ring, *fence);
- trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
+ trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
return 0;
}
@@ -313,7 +313,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
continue;
last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
- trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
+ trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
radeon_irq_kms_sw_irq_get(rdev, i);
}
@@ -332,7 +332,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
continue;
radeon_irq_kms_sw_irq_put(rdev, i);
- trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
+ trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
}
if (unlikely(r < 0))
@@ -841,6 +841,8 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
if (!rdev->fence_drv[i].initialized)
continue;
+ radeon_fence_process(rdev, i);
+
seq_printf(m, "--- ring %d ---\n", i);
seq_printf(m, "Last signaled fence 0x%016llx\n",
(unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 96e440061bdb..a8f9b463bf2a 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -713,7 +713,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
unsigned i;
/* check if the id is still valid */
- if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
+ if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
return NULL;
/* we definately need to flush */
@@ -726,6 +726,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
if (fence == NULL) {
/* found a free one */
vm->id = i;
+ trace_radeon_vm_grab_id(vm->id, ring);
return NULL;
}
@@ -769,6 +770,9 @@ void radeon_vm_fence(struct radeon_device *rdev,
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(fence);
+
+ radeon_fence_unref(&vm->last_id_use);
+ vm->last_id_use = radeon_fence_ref(fence);
}
/**
@@ -1303,6 +1307,8 @@ void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
vm->id = 0;
vm->fence = NULL;
+ vm->last_flush = NULL;
+ vm->last_id_use = NULL;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
@@ -1341,5 +1347,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
}
radeon_fence_unref(&vm->fence);
radeon_fence_unref(&vm->last_flush);
+ radeon_fence_unref(&vm->last_id_use);
mutex_unlock(&vm->mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 805c5e566b9a..b96c819024b3 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -86,7 +86,7 @@ retry:
return 0;
}
-int radeon_gem_set_domain(struct drm_gem_object *gobj,
+static int radeon_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
struct radeon_bo *robj;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index fc60b74ee304..e24ca6ab96de 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1020,6 +1020,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
/* Add the default buses */
void radeon_i2c_init(struct radeon_device *rdev)
{
+ if (radeon_hw_i2c)
+ DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
+
if (rdev->is_atom_bios)
radeon_atombios_i2c_init(rdev);
else
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 8d68e972789a..244b19bab2e7 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -181,7 +181,7 @@ static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_dis
* tied to dma at all, this is just a hangover from dri prehistory.
*/
-irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t radeon_driver_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_radeon_private_t *dev_priv =
@@ -203,7 +203,7 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
/* SW interrupt */
if (stat & RADEON_SW_INT_TEST)
- DRM_WAKEUP(&dev_priv->swi_queue);
+ wake_up(&dev_priv->swi_queue);
/* VBLANK interrupt */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
@@ -249,7 +249,7 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
- DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * HZ,
RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
return ret;
@@ -302,7 +302,7 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
result = radeon_emit_irq(dev);
- if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
@@ -354,7 +354,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
(drm_radeon_private_t *) dev->dev_private;
atomic_set(&dev_priv->swi_emitted, 0);
- DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+ init_waitqueue_head(&dev_priv->swi_queue);
dev->max_vblank_count = 0x001fffff;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index ec6240b00469..089c9ffb0aa9 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -39,13 +39,13 @@
/**
* radeon_driver_irq_handler_kms - irq handler for KMS
*
- * @DRM_IRQ_ARGS: args
+ * @int irq, void *arg: args
*
* This is the irq handler for the radeon KMS driver (all asics).
* radeon_irq_process is a macro that points to the per-asic
* irq handler callback.
*/
-irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
+irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 21d593c0ecaf..66ed3ea71440 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -33,6 +33,13 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_is_px(void);
+#else
+static inline bool radeon_is_px(void) { return false; }
+#endif
+
/**
* radeon_driver_unload_kms - Main unload function for KMS.
*
@@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- if (radeon_runtime_pm != 0) {
+ if ((radeon_runtime_pm == 1) ||
+ ((radeon_runtime_pm == -1) && radeon_is_px())) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
@@ -191,7 +199,7 @@ static void radeon_set_filp_rights(struct drm_device *dev,
* etc. (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_info *info = data;
@@ -223,7 +231,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*value = rdev->accel_working;
break;
case RADEON_INFO_CRTC_FROM_ID:
- if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -269,7 +277,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*
* When returning, the value is 1 if filp owns hyper-z access,
* 0 otherwise. */
- if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -281,7 +289,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_WANT_CMASK:
/* The same logic as Hyper-Z. */
- if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -417,7 +425,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*value = rdev->fastfb_working;
break;
case RADEON_INFO_RING_WORKING:
- if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -470,11 +478,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
}
break;
+ case RADEON_INFO_MAX_SCLK:
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+ rdev->pm.dpm_enabled)
+ *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
+ else
+ *value = rdev->pm.default_sclk * 10;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
}
- if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) {
+ if (copy_to_user(value_ptr, (char*)value, value_size)) {
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -530,6 +545,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
radeon_vm_init(rdev, &fpriv->vm);
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (r)
+ return r;
+
/* map the ib pool buffer read only into
* virtual address space */
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
@@ -537,6 +556,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
+
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
radeon_vm_fini(rdev, &fpriv->vm);
kfree(fpriv);
@@ -712,11 +733,12 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
vblank_time, flags,
- drmcrtc);
+ drmcrtc, &drmcrtc->hwmode);
}
#define KMS_INVALID_IOCTL(name) \
-int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
+static int name(struct drm_device *dev, void *data, struct drm_file \
+ *file_priv) \
{ \
DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
return -EINVAL; \
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index d54d2d7c9031..146d253f1131 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -243,7 +243,7 @@ int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_p
if (!block)
return -ENOMEM;
- if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+ if (copy_to_user(alloc->region_offset, &block->start,
sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3f0dd664af90..402dbe32c234 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -291,6 +291,7 @@ struct radeon_tv_regs {
struct radeon_atom_ss {
uint16_t percentage;
+ uint16_t percentage_divider;
uint8_t type;
uint16_t step;
uint8_t delay;
@@ -624,6 +625,30 @@ struct atom_voltage_table
struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
};
+
+extern void
+radeon_add_atom_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint32_t igp_lane_info,
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd,
+ struct radeon_router *router);
+extern void
+radeon_add_legacy_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd);
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
@@ -631,6 +656,15 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev);
extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
u16 *vddc, u16 *vddci, u16 *mvdd);
+extern void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected);
+extern void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected);
+
extern struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder);
extern struct drm_connector *
@@ -666,6 +700,7 @@ extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte);
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
extern void radeon_i2c_init(struct radeon_device *rdev);
extern void radeon_i2c_fini(struct radeon_device *rdev);
@@ -766,6 +801,7 @@ extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ unsigned int flags,
int *vpos, int *hpos, ktime_t *stime,
ktime_t *etime);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index c0fa4aa9ceea..08595cf90b01 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,7 +46,7 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
-void radeon_bo_clear_va(struct radeon_bo *bo)
+static void radeon_bo_clear_va(struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va, *tmp;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 984097b907ef..8e8153e471c2 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -924,6 +924,10 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
if (rdev->asic->dpm.powergate_uvd) {
mutex_lock(&rdev->pm.mutex);
+ /* don't powergate anything if we
+ have active but pause streams */
+ enable |= rdev->pm.dpm.sd > 0;
+ enable |= rdev->pm.dpm.hd > 0;
/* enable/disable UVD */
radeon_dpm_powergate_uvd(rdev, !enable);
mutex_unlock(&rdev->pm.mutex);
@@ -1010,8 +1014,10 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
rdev->pm.current_clock_mode_index = 0;
rdev->pm.current_sclk = rdev->pm.default_sclk;
rdev->pm.current_mclk = rdev->pm.default_mclk;
- rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
- rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+ if (rdev->pm.power_state) {
+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+ }
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
@@ -1032,25 +1038,27 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
radeon_dpm_setup_asic(rdev);
ret = radeon_dpm_enable(rdev);
mutex_unlock(&rdev->pm.mutex);
- if (ret) {
- DRM_ERROR("radeon: dpm resume failed\n");
- if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_CAYMAN) &&
- rdev->mc_fw) {
- if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
- SET_VOLTAGE_TYPE_ASIC_VDDC);
- if (rdev->pm.default_vddci)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
- SET_VOLTAGE_TYPE_ASIC_VDDCI);
- if (rdev->pm.default_sclk)
- radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
- if (rdev->pm.default_mclk)
- radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
- }
- } else {
- rdev->pm.dpm_enabled = true;
- radeon_pm_compute_clocks(rdev);
+ if (ret)
+ goto dpm_resume_fail;
+ rdev->pm.dpm_enabled = true;
+ radeon_pm_compute_clocks(rdev);
+ return;
+
+dpm_resume_fail:
+ DRM_ERROR("radeon: dpm resume failed\n");
+ if ((rdev->family >= CHIP_BARTS) &&
+ (rdev->family <= CHIP_CAYMAN) &&
+ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+ if (rdev->pm.default_vddci)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+ SET_VOLTAGE_TYPE_ASIC_VDDCI);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
}
}
@@ -1170,51 +1178,50 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
radeon_dpm_setup_asic(rdev);
ret = radeon_dpm_enable(rdev);
mutex_unlock(&rdev->pm.mutex);
- if (ret) {
- rdev->pm.dpm_enabled = false;
- if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_CAYMAN) &&
- rdev->mc_fw) {
- if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
- SET_VOLTAGE_TYPE_ASIC_VDDC);
- if (rdev->pm.default_vddci)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
- SET_VOLTAGE_TYPE_ASIC_VDDCI);
- if (rdev->pm.default_sclk)
- radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
- if (rdev->pm.default_mclk)
- radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
- }
- DRM_ERROR("radeon: dpm initialization failed\n");
- return ret;
- }
+ if (ret)
+ goto dpm_failed;
rdev->pm.dpm_enabled = true;
- radeon_pm_compute_clocks(rdev);
- if (rdev->pm.num_power_states > 1) {
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- /* XXX: these are noops for dpm but are here for backwards compat */
- ret = device_create_file(rdev->dev, &dev_attr_power_profile);
- if (ret)
- DRM_ERROR("failed to create device file for power profile\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_method);
- if (ret)
- DRM_ERROR("failed to create device file for power method\n");
-
- if (radeon_debugfs_pm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for dpm!\n");
- }
+ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+ if (ret)
+ DRM_ERROR("failed to create device file for dpm state\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+ if (ret)
+ DRM_ERROR("failed to create device file for dpm state\n");
+ /* XXX: these are noops for dpm but are here for backwards compat */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
- DRM_INFO("radeon: dpm initialized\n");
+ if (radeon_debugfs_pm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for dpm!\n");
}
+ DRM_INFO("radeon: dpm initialized\n");
+
return 0;
+
+dpm_failed:
+ rdev->pm.dpm_enabled = false;
+ if ((rdev->family >= CHIP_BARTS) &&
+ (rdev->family <= CHIP_CAYMAN) &&
+ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+ if (rdev->pm.default_vddci)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+ SET_VOLTAGE_TYPE_ASIC_VDDCI);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+ }
+ DRM_ERROR("radeon: dpm initialization failed\n");
+ return ret;
}
int radeon_pm_init(struct radeon_device *rdev)
@@ -1228,11 +1235,10 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RV670:
case CHIP_RS780:
case CHIP_RS880:
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
case CHIP_CAYMAN:
- case CHIP_BONAIRE:
- case CHIP_KABINI:
- case CHIP_KAVERI:
- case CHIP_HAWAII:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1257,15 +1263,16 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
- case CHIP_BARTS:
- case CHIP_TURKS:
- case CHIP_CAICOS:
case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
+ case CHIP_BONAIRE:
+ case CHIP_KABINI:
+ case CHIP_KAVERI:
+ case CHIP_HAWAII:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1290,6 +1297,18 @@ int radeon_pm_init(struct radeon_device *rdev)
return radeon_pm_init_old(rdev);
}
+int radeon_pm_late_init(struct radeon_device *rdev)
+{
+ int ret = 0;
+
+ if (rdev->pm.pm_method == PM_METHOD_DPM) {
+ mutex_lock(&rdev->pm.mutex);
+ ret = radeon_dpm_late_enable(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
+ return ret;
+}
+
static void radeon_pm_fini_old(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
@@ -1420,6 +1439,9 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
+ if (!rdev->pm.dpm_enabled)
+ return;
+
mutex_lock(&rdev->pm.mutex);
/* update active crtc counts */
@@ -1464,7 +1486,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
- vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL);
+ vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
!(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 9214403ae173..15e44a7281ab 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -139,7 +139,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
}
/* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
if (r) {
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
@@ -332,36 +332,6 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
}
}
-u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- u32 rptr;
-
- if (rdev->wb.enabled)
- rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
- else
- rptr = RREG32(ring->rptr_reg);
-
- return rptr;
-}
-
-u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- u32 wptr;
-
- wptr = RREG32(ring->wptr_reg);
-
- return wptr;
-}
-
-void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- WREG32(ring->wptr_reg, ring->wptr);
- (void)RREG32(ring->wptr_reg);
-}
-
/**
* radeon_ring_free_size - update the free size
*
@@ -463,7 +433,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
- DRM_MEMORYBARRIER();
+ mb();
radeon_ring_set_wptr(rdev, ring);
}
@@ -689,22 +659,18 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
* @ring: radeon_ring structure holding ring information
* @ring_size: size of the ring
* @rptr_offs: offset of the rptr writeback location in the WB buffer
- * @rptr_reg: MMIO offset of the rptr register
- * @wptr_reg: MMIO offset of the wptr register
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop)
+ unsigned rptr_offs, u32 nop)
{
int r;
ring->ring_size = ring_size;
ring->rptr_offs = rptr_offs;
- ring->rptr_reg = rptr_reg;
- ring->wptr_reg = wptr_reg;
ring->nop = nop;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
@@ -790,34 +756,54 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
struct radeon_device *rdev = dev->dev_private;
int ridx = *(int*)node->info_ent->data;
struct radeon_ring *ring = &rdev->ring[ridx];
+
+ uint32_t rptr, wptr, rptr_next;
unsigned count, i, j;
- u32 tmp;
radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw;
- tmp = radeon_ring_get_wptr(rdev, ring);
- seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
- tmp = radeon_ring_get_rptr(rdev, ring);
- seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
+
+ wptr = radeon_ring_get_wptr(rdev, ring);
+ seq_printf(m, "wptr: 0x%08x [%5d]\n",
+ wptr, wptr);
+
+ rptr = radeon_ring_get_rptr(rdev, ring);
+ seq_printf(m, "rptr: 0x%08x [%5d]\n",
+ rptr, rptr);
+
if (ring->rptr_save_reg) {
- seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
- RREG32(ring->rptr_save_reg));
- }
- seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
- seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
- seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
- seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
+ rptr_next = RREG32(ring->rptr_save_reg);
+ seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n",
+ ring->rptr_save_reg, rptr_next, rptr_next);
+ } else
+ rptr_next = ~0;
+
+ seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
+ ring->wptr, ring->wptr);
+ seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n",
+ ring->rptr, ring->rptr);
+ seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
+ ring->last_semaphore_signal_addr);
+ seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
+ ring->last_semaphore_wait_addr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
+
+ if (!ring->ready)
+ return 0;
+
/* print 8 dw before current rptr as often it's the last executed
* packet that is the root issue
*/
- i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
- if (ring->ready) {
- for (j = 0; j <= (count + 32); j++) {
- seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
- i = (i + 1) & ring->ptr_mask;
- }
+ i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+ for (j = 0; j <= (count + 32); j++) {
+ seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
+ if (rptr == i)
+ seq_puts(m, " *");
+ if (rptr_next == i)
+ seq_puts(m, " #");
+ seq_puts(m, "\n");
+ i = (i + 1) & ring->ptr_mask;
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index f0bac68254b7..c0625805cdd7 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -402,13 +402,15 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
+ uint64_t soffset = i->soffset + sa_manager->gpu_addr;
+ uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
} else {
seq_printf(m, " ");
}
- seq_printf(m, "[0x%08x 0x%08x] size %8d",
- i->soffset, i->eoffset, i->eoffset - i->soffset);
+ seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
+ soffset, eoffset, eoffset - soffset);
if (i->fence) {
seq_printf(m, " protected by 0x%016llx on ring %d",
i->fence->seq, i->fence->ring);
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 2b42aa1914f2..9006b32d5eed 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,14 +34,15 @@
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
+ uint32_t *cpu_addr;
int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
return -ENOMEM;
}
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
- &(*semaphore)->sa_bo, 8, 8, true);
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
+ 8 * RADEON_NUM_SYNCS, 8, true);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
@@ -49,7 +50,10 @@ int radeon_semaphore_create(struct radeon_device *rdev,
}
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
- *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+
+ cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
+ for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+ cpu_addr[i] = 0;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
(*semaphore)->sync_to[i] = NULL;
@@ -125,6 +129,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int ring)
{
+ unsigned count = 0;
int i, r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -140,6 +145,12 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
return -EINVAL;
}
+ if (++count > RADEON_NUM_SYNCS) {
+ /* not enough room, wait manually */
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
/* allocate enough space for sync command */
r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
if (r) {
@@ -164,6 +175,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
radeon_ring_commit(rdev, &rdev->ring[i]);
radeon_fence_note_sync(fence, ring);
+
+ semaphore->gpu_addr += 8;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 4d20910899d4..956ab7f14e16 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -1810,7 +1810,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
}
if (!buf) {
DRM_DEBUG("EAGAIN\n");
- if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+ if (copy_to_user(tex->image, image, sizeof(*image)))
return -EFAULT;
return -EAGAIN;
}
@@ -1823,7 +1823,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
#define RADEON_COPY_MT(_buf, _data, _width) \
do { \
- if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
+ if (copy_from_user(_buf, _data, (_width))) {\
DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
return -EFAULT; \
} \
@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+ if (copy_from_user(&depth_boxes, clear->depth_boxes,
sarea_priv->nbox * sizeof(depth_boxes[0])))
return -EFAULT;
@@ -2436,7 +2436,7 @@ static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file
return -EINVAL;
}
- if (DRM_COPY_FROM_USER(&image,
+ if (copy_from_user(&image,
(drm_radeon_tex_image_t __user *) tex->image,
sizeof(image)))
return -EFAULT;
@@ -2460,7 +2460,7 @@ static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+ if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
return -EFAULT;
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -2585,13 +2585,13 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
drm_radeon_prim_t prim;
drm_radeon_tcl_prim_t tclprim;
- if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
+ if (copy_from_user(&prim, &vertex->prim[i], sizeof(prim)))
return -EFAULT;
if (prim.stateidx != laststate) {
drm_radeon_state_t state;
- if (DRM_COPY_FROM_USER(&state,
+ if (copy_from_user(&state,
&vertex->state[prim.stateidx],
sizeof(state)))
return -EFAULT;
@@ -2799,7 +2799,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
do {
if (i < cmdbuf->nbox) {
- if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
+ if (copy_from_user(&box, &boxes[i], sizeof(box)))
return -EFAULT;
/* FIXME The second and subsequent times round
* this loop, send a WAIT_UNTIL_3D_IDLE before
@@ -3116,7 +3116,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
return -EINVAL;
}
- if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+ if (copy_to_user(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 0473257d4078..f749f2c3bbdb 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -106,42 +106,45 @@ TRACE_EVENT(radeon_vm_set_page,
DECLARE_EVENT_CLASS(radeon_fence_request,
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
- TP_ARGS(dev, seqno),
+ TP_ARGS(dev, ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(int, ring)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
+ __entry->ring = ring;
__entry->seqno = seqno;
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%d, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
- TP_ARGS(dev, seqno)
+ TP_ARGS(dev, ring, seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
- TP_ARGS(dev, seqno)
+ TP_ARGS(dev, ring, seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
- TP_ARGS(dev, seqno)
+ TP_ARGS(dev, ring, seqno)
);
DECLARE_EVENT_CLASS(radeon_semaphore_request,
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 71245d6f34a2..040a2a10ea17 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -39,12 +39,14 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/swiotlb.h>
+#include <linux/debugfs.h>
#include "radeon_reg.h"
#include "radeon.h"
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
+static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
{
@@ -142,7 +144,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
- if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
+ if (!rdev->ddev->agp) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
@@ -712,6 +714,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
+ /* Change the size here instead of the init above so only lpfn is affected */
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->stollen_vga_memory);
@@ -753,6 +758,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
if (!rdev->mman.initialized)
return;
+ radeon_ttm_debugfs_fini(rdev);
if (rdev->stollen_vga_memory) {
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
if (r == 0) {
@@ -832,16 +838,15 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-
-#define RADEON_DEBUGFS_MEM_TYPES 2
-
#if defined(CONFIG_DEBUG_FS)
+
static int radeon_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+ unsigned ttm_pl = *(int *)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
int ret;
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -850,46 +855,169 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
spin_unlock(&glob->lru_lock);
return ret;
}
+
+static int ttm_pl_vram = TTM_PL_VRAM;
+static int ttm_pl_tt = TTM_PL_TT;
+
+static struct drm_info_list radeon_ttm_debugfs_list[] = {
+ {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
+ {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
+ {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
+#ifdef CONFIG_SWIOTLB
+ {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
#endif
+};
-static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
{
-#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
- unsigned i;
+ struct radeon_device *rdev = inode->i_private;
+ i_size_write(inode, rdev->mc.mc_vram_size);
+ filep->private_data = inode->i_private;
+ return 0;
+}
- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
- if (i == 0)
- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
- else
- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
- radeon_mem_types_list[i].driver_features = 0;
- if (i == 0)
- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
- else
- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct radeon_device *rdev = f->private_data;
+ ssize_t result = 0;
+ int r;
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ unsigned long flags;
+ uint32_t value;
+
+ if (*pos >= rdev->mc.mc_vram_size)
+ return result;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
+ if (rdev->family >= CHIP_CEDAR)
+ WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
+ value = RREG32(RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
}
- /* Add ttm page pool to debugfs */
- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
- radeon_mem_types_list[i].driver_features = 0;
- radeon_mem_types_list[i++].data = NULL;
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
- radeon_mem_types_list[i].driver_features = 0;
- radeon_mem_types_list[i++].data = NULL;
+
+ return result;
+}
+
+static const struct file_operations radeon_ttm_vram_fops = {
+ .owner = THIS_MODULE,
+ .open = radeon_ttm_vram_open,
+ .read = radeon_ttm_vram_read,
+ .llseek = default_llseek
+};
+
+static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
+{
+ struct radeon_device *rdev = inode->i_private;
+ i_size_write(inode, rdev->mc.gtt_size);
+ filep->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct radeon_device *rdev = f->private_data;
+ ssize_t result = 0;
+ int r;
+
+ while (size) {
+ loff_t p = *pos / PAGE_SIZE;
+ unsigned off = *pos & ~PAGE_MASK;
+ size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
+ struct page *page;
+ void *ptr;
+
+ if (p >= rdev->gart.num_cpu_pages)
+ return result;
+
+ page = rdev->gart.pages[p];
+ if (page) {
+ ptr = kmap(page);
+ ptr += off;
+
+ r = copy_to_user(buf, ptr, cur_size);
+ kunmap(rdev->gart.pages[p]);
+ } else
+ r = clear_user(buf, cur_size);
+
+ if (r)
+ return -EFAULT;
+
+ result += cur_size;
+ buf += cur_size;
+ *pos += cur_size;
+ size -= cur_size;
}
+
+ return result;
+}
+
+static const struct file_operations radeon_ttm_gtt_fops = {
+ .owner = THIS_MODULE,
+ .open = radeon_ttm_gtt_open,
+ .read = radeon_ttm_gtt_read,
+ .llseek = default_llseek
+};
+
#endif
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
+static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned count;
+
+ struct drm_minor *minor = rdev->ddev->primary;
+ struct dentry *ent, *root = minor->debugfs_root;
+
+ ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
+ rdev, &radeon_ttm_vram_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ rdev->mman.vram = ent;
+
+ ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root,
+ rdev, &radeon_ttm_gtt_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ rdev->mman.gtt = ent;
+
+ count = ARRAY_SIZE(radeon_ttm_debugfs_list);
+
+#ifdef CONFIG_SWIOTLB
+ if (!swiotlb_nr_tbl())
+ --count;
#endif
+
+ return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
+#else
+
return 0;
+#endif
+}
+
+static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+ debugfs_remove(rdev->mman.vram);
+ rdev->mman.vram = NULL;
+
+ debugfs_remove(rdev->mman.gtt);
+ rdev->mman.gtt = NULL;
+#endif
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index b9c0529b4a2e..3e6804b2b2ef 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -91,6 +91,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
case CHIP_VERDE:
case CHIP_PITCAIRN:
case CHIP_ARUBA:
+ case CHIP_OLAND:
fw_name = FIRMWARE_TAHITI;
break;
@@ -170,6 +171,8 @@ void radeon_uvd_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
+
release_firmware(rdev->uvd_fw);
}
@@ -778,6 +781,8 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
+ &rdev->pm.dpm.hd);
radeon_dpm_enable_uvd(rdev, false);
} else {
radeon_set_uvd_clocks(rdev, 0, 0);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 20bfbda7b3f1..ec0c6829c1dc 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -18,6 +18,7 @@ r600 0x9400
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE
0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028B38 VGT_GS_MAX_VERT_OUT
0x000088C8 VGT_GS_PER_ES
0x000088E8 VGT_GS_PER_VS
0x000088D4 VGT_GS_VERTEX_REUSE
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 9566b5940a5a..130d5cc50d43 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -484,6 +484,7 @@ int rs400_resume(struct radeon_device *rdev)
int rs400_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -493,6 +494,7 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -560,6 +562,9 @@ int rs400_init(struct radeon_device *rdev)
return r;
r300_set_reg_safe(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 76cc8d3aafec..72d3616de08e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -1058,6 +1058,7 @@ int rs600_resume(struct radeon_device *rdev)
int rs600_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -1068,6 +1069,7 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
@@ -1136,6 +1138,9 @@ int rs600_init(struct radeon_device *rdev)
return r;
rs600_set_safe_registers(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index e7dab069cccf..3462b64369bf 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -766,6 +766,7 @@ int rs690_resume(struct radeon_device *rdev)
int rs690_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -776,6 +777,7 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
@@ -845,6 +847,9 @@ int rs690_init(struct radeon_device *rdev)
return r;
rs600_set_safe_registers(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = rs690_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 6af8505cf4d2..8512085b0aef 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -623,14 +623,6 @@ int rs780_dpm_enable(struct radeon_device *rdev)
if (pi->gfx_clock_gating)
r600_gfx_clockgating_enable(rdev, true);
- if (rdev->irq.installed && (rdev->pm.int_thermal_type == THERMAL_TYPE_RV6XX)) {
- ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 5d1c316115ef..237dd29d9f1c 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -596,6 +596,7 @@ int rv515_resume(struct radeon_device *rdev)
int rv515_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
@@ -612,6 +613,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -685,6 +687,9 @@ int rv515_init(struct radeon_device *rdev)
return r;
rv515_set_safe_registers(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = rv515_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 26633a025252..bebf31c4d841 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1546,7 +1546,6 @@ int rv6xx_dpm_enable(struct radeon_device *rdev)
{
struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
- int ret;
if (r600_dynamicpm_enabled(rdev))
return -EINVAL;
@@ -1594,15 +1593,6 @@ int rv6xx_dpm_enable(struct radeon_device *rdev)
r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
- ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- }
-
rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
r600_start_dpm(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 9f5846743c9e..fef310773aad 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1071,7 +1071,8 @@ static void rv770_mc_program(struct radeon_device *rdev)
*/
void r700_cp_stop(struct radeon_device *rdev)
{
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -1123,6 +1124,35 @@ void r700_cp_fini(struct radeon_device *rdev)
radeon_scratch_free(rdev, ring->rptr_save_reg);
}
+void rv770_set_clk_bypass_mode(struct radeon_device *rdev)
+{
+ u32 tmp, i;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+ tmp &= SCLK_MUX_SEL_MASK;
+ tmp |= SCLK_MUX_SEL(1) | SCLK_MUX_UPDATE;
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(CG_SPLL_STATUS) & SPLL_CHG_STATUS)
+ break;
+ udelay(1);
+ }
+
+ tmp &= ~SCLK_MUX_UPDATE;
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ tmp = RREG32(MPLL_CNTL_MODE);
+ if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+ tmp &= ~RV730_MPLL_MCLK_SEL;
+ else
+ tmp &= ~MPLL_MCLK_SEL;
+ WREG32(MPLL_CNTL_MODE, tmp);
+}
+
/*
* Core functions
*/
@@ -1665,14 +1695,6 @@ static int rv770_startup(struct radeon_device *rdev)
rv770_mc_program(rdev);
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
@@ -1728,14 +1750,12 @@ static int rv770_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- R600_CP_RB_RPTR, R600_CP_RB_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_RB_RPTR, DMA_RB_WPTR,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -1754,7 +1774,6 @@ static int rv770_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
RADEON_CP_PACKET2);
if (!r)
r = uvd_v1_0_init(rdev);
@@ -1792,6 +1811,9 @@ int rv770_resume(struct radeon_device *rdev)
/* init golden registers */
rv770_init_golden_registers(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
@@ -1806,6 +1828,7 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
@@ -1876,6 +1899,17 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
@@ -1915,15 +1949,16 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- rv770_pcie_gart_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 374499db20c7..b5f63f5e22a3 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -1863,8 +1863,8 @@ void rv770_enable_auto_throttle_source(struct radeon_device *rdev,
}
}
-int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
- int min_temp, int max_temp)
+static int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
int high_temp = 255 * 1000;
@@ -1966,6 +1966,15 @@ int rv770_dpm_enable(struct radeon_device *rdev)
if (pi->mg_clock_gating)
rv770_mg_clock_gating_enable(rdev, true);
+ rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+ return 0;
+}
+
+int rv770_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
PPSMC_Result result;
@@ -1981,8 +1990,6 @@ int rv770_dpm_enable(struct radeon_device *rdev)
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
}
- rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
return 0;
}
@@ -2167,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct rv7xx_ps *ps = rv770_get_ps(rps);
u32 sclk, mclk;
- u16 vddc;
struct rv7xx_pl *pl;
switch (index) {
@@ -2207,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -2244,14 +2250,12 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
pl->vddci = vddci;
}
- if (rdev->family >= CHIP_BARTS) {
- if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
- ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
- rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
- rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
- rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
- rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
- }
+ if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+ ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
}
}
@@ -2522,14 +2526,13 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
{
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
- u32 switch_limit = 300;
-
- /* quirks */
- /* ASUS K70AF */
- if ((rdev->pdev->device == 0x9553) &&
- (rdev->pdev->subsystem_vendor == 0x1043) &&
- (rdev->pdev->subsystem_device == 0x1c42))
- switch_limit = 200;
+ u32 switch_limit = 200; /* 300 */
+
+ /* RV770 */
+ /* mclk switching doesn't seem to work reliably on desktop RV770s */
+ if ((rdev->family == CHIP_RV770) &&
+ !(rdev->flags & RADEON_IS_MOBILITY))
+ switch_limit = 0xffffffff; /* disable mclk switching */
if (vblank_time < switch_limit)
return true;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
index 9244effc6b59..f776634840c9 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.h
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -283,8 +283,4 @@ int rv770_read_smc_soft_register(struct radeon_device *rdev,
int rv770_write_smc_soft_register(struct radeon_device *rdev,
u16 reg_offset, u32 value);
-/* thermal */
-int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
- int min_temp, int max_temp);
-
#endif
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 1ae277152cc7..3cf1e2921545 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -100,14 +100,21 @@
#define CG_SPLL_FUNC_CNTL_2 0x604
#define SCLK_MUX_SEL(x) ((x) << 0)
#define SCLK_MUX_SEL_MASK (0x1ff << 0)
+#define SCLK_MUX_UPDATE (1 << 26)
#define CG_SPLL_FUNC_CNTL_3 0x608
#define SPLL_FB_DIV(x) ((x) << 0)
#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
#define SPLL_DITHEN (1 << 28)
+#define CG_SPLL_STATUS 0x60c
+#define SPLL_CHG_STATUS (1 << 1)
#define SPLL_CNTL_MODE 0x610
#define SPLL_DIV_SYNC (1 << 5)
+#define MPLL_CNTL_MODE 0x61c
+# define MPLL_MCLK_SEL (1 << 11)
+# define RV730_MPLL_MCLK_SEL (1 << 25)
+
#define MPLL_AD_FUNC_CNTL 0x624
#define CLKF(x) ((x) << 0)
#define CLKF_MASK (0x7f << 0)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 85e1edfaa3be..9a124d0608b3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -80,6 +80,8 @@ extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable);
+static void si_init_pg(struct radeon_device *rdev);
+static void si_init_cg(struct radeon_device *rdev);
static void si_fini_pg(struct radeon_device *rdev);
static void si_fini_cg(struct radeon_device *rdev);
static void si_rlc_stop(struct radeon_device *rdev);
@@ -1460,7 +1462,7 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
};
/* ucode loading */
-static int si_mc_load_microcode(struct radeon_device *rdev)
+int si_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
u32 running, blackout = 0;
@@ -3247,7 +3249,8 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
if (enable)
WREG32(CP_ME_CNTL, 0);
else {
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -3508,6 +3511,9 @@ static int si_cp_resume(struct radeon_device *rdev)
si_enable_gui_idle_interrupt(rdev, true);
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
return 0;
}
@@ -3724,6 +3730,106 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
evergreen_print_gpu_status_regs(rdev);
}
+static void si_set_clk_bypass_mode(struct radeon_device *rdev)
+{
+ u32 tmp, i;
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_BYPASS_EN;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+ tmp |= SPLL_CTLREQ_CHG;
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+ break;
+ udelay(1);
+ }
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+ tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ tmp = RREG32(MPLL_CNTL_MODE);
+ tmp &= ~MPLL_MCLK_SEL;
+ WREG32(MPLL_CNTL_MODE, tmp);
+}
+
+static void si_spll_powerdown(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32(SPLL_CNTL_MODE);
+ tmp |= SPLL_SW_DIR_CONTROL;
+ WREG32(SPLL_CNTL_MODE, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_RESET;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_SLEEP;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(SPLL_CNTL_MODE);
+ tmp &= ~SPLL_SW_DIR_CONTROL;
+ WREG32(SPLL_CNTL_MODE, tmp);
+}
+
+static void si_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 tmp, i;
+
+ dev_info(rdev->dev, "GPU pci config reset\n");
+
+ /* disable dpm? */
+
+ /* disable cg/pg */
+ si_fini_pg(rdev);
+ si_fini_cg(rdev);
+
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ /* XXX other engines? */
+
+ /* halt the rlc, disable cp internal ints */
+ si_rlc_stop(rdev);
+
+ udelay(50);
+
+ /* disable mem access */
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+ }
+
+ /* set mclk/sclk to bypass */
+ si_set_clk_bypass_mode(rdev);
+ /* powerdown spll */
+ si_spll_powerdown(rdev);
+ /* disable BM */
+ pci_clear_master(rdev->pdev);
+ /* reset */
+ radeon_pci_config_reset(rdev);
+ /* wait for asic to come out of reset */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+ break;
+ udelay(1);
+ }
+}
+
int si_asic_reset(struct radeon_device *rdev)
{
u32 reset_mask;
@@ -3733,10 +3839,17 @@ int si_asic_reset(struct radeon_device *rdev)
if (reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true);
+ /* try soft reset */
si_gpu_soft_reset(rdev, reset_mask);
reset_mask = si_gpu_check_soft_reset(rdev);
+ /* try pci config reset */
+ if (reset_mask && radeon_hard_reset)
+ si_gpu_pci_config_reset(rdev);
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
@@ -5212,8 +5325,8 @@ static void si_enable_hdp_ls(struct radeon_device *rdev,
WREG32(HDP_MEM_POWER_LS, data);
}
-void si_update_cg(struct radeon_device *rdev,
- u32 block, bool enable)
+static void si_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable)
{
if (block & RADEON_CG_BLOCK_GFX) {
si_enable_gui_idle_interrupt(rdev, false);
@@ -5379,6 +5492,9 @@ static void si_init_pg(struct radeon_device *rdev)
si_init_ao_cu_mask(rdev);
if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
si_init_gfx_cgpg(rdev);
+ } else {
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
}
si_enable_dma_pg(rdev, true);
si_enable_gfx_cgpg(rdev, true);
@@ -5566,7 +5682,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
}
if (!ASIC_IS_NODCE(rdev)) {
- WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD1_INT_CONTROL, tmp);
@@ -6222,6 +6338,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 146:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
@@ -6324,21 +6444,14 @@ static int si_startup(struct radeon_device *rdev)
si_mc_program(rdev);
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
- !rdev->rlc_fw || !rdev->mc_fw) {
- r = si_init_microcode(rdev);
+ if (!rdev->pm.dpm_enabled) {
+ r = si_mc_load_microcode(rdev);
if (r) {
- DRM_ERROR("Failed to load firmware!\n");
+ DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
}
- r = si_mc_load_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load MC firmware!\n");
- return r;
- }
-
r = si_pcie_gart_enable(rdev);
if (r)
return r;
@@ -6421,37 +6534,30 @@ static int si_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- CP_RB0_RPTR, CP_RB0_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
- CP_RB1_RPTR, CP_RB1_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
- CP_RB2_RPTR, CP_RB2_WPTR,
RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
- DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
- DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
- DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
@@ -6471,7 +6577,6 @@ static int si_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
RADEON_CP_PACKET2);
if (!r)
r = uvd_v1_0_init(rdev);
@@ -6513,6 +6618,9 @@ int si_resume(struct radeon_device *rdev)
/* init golden registers */
si_init_golden_registers(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = si_startup(rdev);
if (r) {
@@ -6527,6 +6635,7 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
si_cp_enable(rdev, false);
@@ -6600,6 +6709,18 @@ int si_init(struct radeon_device *rdev)
if (r)
return r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+ !rdev->rlc_fw || !rdev->mc_fw) {
+ r = si_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
@@ -6666,6 +6787,7 @@ int si_init(struct radeon_device *rdev)
void si_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
si_cp_fini(rdev);
cayman_dma_fini(rdev);
si_fini_pg(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0b00c790fb77..0a2f5b4bca43 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1738,6 +1738,8 @@ struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
struct ni_power_info *ni_get_pi(struct radeon_device *rdev);
struct ni_ps *ni_get_ps(struct radeon_ps *rps);
+extern int si_mc_load_microcode(struct radeon_device *rdev);
+
static int si_populate_voltage_value(struct radeon_device *rdev,
const struct atom_voltage_table *table,
u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
@@ -1753,9 +1755,6 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
u32 engine_clock,
SISLANDS_SMC_SCLK_VALUE *sclk);
-extern void si_update_cg(struct radeon_device *rdev,
- u32 block, bool enable);
-
static struct si_power_info *si_get_pi(struct radeon_device *rdev)
{
struct si_power_info *pi = rdev->pm.dpm.priv;
@@ -2396,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
- if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -3591,10 +3590,9 @@ static void si_program_display_gap(struct radeon_device *rdev)
/* Setting this to false forces the performance state to low if the crtcs are disabled.
* This can be a problem on PowerXpress systems or if you want to use the card
- * for offscreen rendering or compute if there are no crtcs enabled. Set it to
- * true for now so that performance scales even if the displays are off.
+ * for offscreen rendering or compute if there are no crtcs enabled.
*/
- si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/);
+ si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
}
static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
@@ -5414,7 +5412,7 @@ static void si_populate_mc_reg_addresses(struct radeon_device *rdev,
for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
- if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+ if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
break;
mc_reg_table->address[i].s0 =
cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
@@ -5754,6 +5752,11 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
void si_dpm_setup_asic(struct radeon_device *rdev)
{
+ int r;
+
+ r = si_mc_load_microcode(rdev);
+ if (r)
+ DRM_ERROR("Failed to load MC firmware!\n");
rv770_get_memory_type(rdev);
si_read_clock_registers(rdev);
si_enable_acpi_power_management(rdev);
@@ -5791,13 +5794,6 @@ int si_dpm_enable(struct radeon_device *rdev)
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
if (si_is_smc_running(rdev))
return -EINVAL;
if (pi->voltage_control)
@@ -5900,6 +5896,17 @@ int si_dpm_enable(struct radeon_device *rdev)
si_enable_sclk_control(rdev, true);
si_start_dpm(rdev);
+ si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+ ni_update_current_ps(rdev, boot_ps);
+
+ return 0;
+}
+
+int si_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
PPSMC_Result result;
@@ -5915,17 +5922,6 @@ int si_dpm_enable(struct radeon_device *rdev)
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
}
- si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), true);
-
- ni_update_current_ps(rdev, boot_ps);
-
return 0;
}
@@ -5934,13 +5930,6 @@ void si_dpm_disable(struct radeon_device *rdev)
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
if (!si_is_smc_running(rdev))
return;
si_disable_ulv(rdev);
@@ -6005,13 +5994,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
struct radeon_ps *old_ps = &eg_pi->current_rps;
int ret;
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), false);
-
ret = si_disable_ulv(rdev);
if (ret) {
DRM_ERROR("si_disable_ulv failed\n");
@@ -6104,13 +6086,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_UVD |
- RADEON_CG_BLOCK_HDP), true);
-
return 0;
}
@@ -6497,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev)
void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index d422a1cbf727..e80efcf0c230 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -28,6 +28,7 @@
#include "sid.h"
#include "ppsmc.h"
#include "radeon_ucode.h"
+#include "sislands_smc.h"
static int si_set_smc_sram_address(struct radeon_device *rdev,
u32 smc_address, u32 limit)
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index b322acc48097..9239a6d29128 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -94,6 +94,8 @@
#define CG_SPLL_FUNC_CNTL_2 0x604
#define SCLK_MUX_SEL(x) ((x) << 0)
#define SCLK_MUX_SEL_MASK (0x1ff << 0)
+#define SPLL_CTLREQ_CHG (1 << 23)
+#define SCLK_MUX_UPDATE (1 << 26)
#define CG_SPLL_FUNC_CNTL_3 0x608
#define SPLL_FB_DIV(x) ((x) << 0)
#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
@@ -101,7 +103,10 @@
#define SPLL_DITHEN (1 << 28)
#define CG_SPLL_FUNC_CNTL_4 0x60c
+#define SPLL_STATUS 0x614
+#define SPLL_CHG_STATUS (1 << 1)
#define SPLL_CNTL_MODE 0x618
+#define SPLL_SW_DIR_CONTROL (1 << 0)
# define SPLL_REFCLK_SEL(x) ((x) << 8)
# define SPLL_REFCLK_SEL_MASK 0xFF00
@@ -559,6 +564,8 @@
# define MRDCK0_BYPASS (1 << 24)
# define MRDCK1_BYPASS (1 << 25)
+#define MPLL_CNTL_MODE 0x2bb0
+# define MPLL_MCLK_SEL (1 << 11)
#define MPLL_FUNC_CNTL 0x2bb4
#define BWCTRL(x) ((x) << 20)
#define BWCTRL_MASK (0xff << 20)
@@ -815,7 +822,7 @@
# define GRPH_PFLIP_INT_MASK (1 << 0)
# define GRPH_PFLIP_INT_TYPE (1 << 8)
-#define DACA_AUTODETECT_INT_CONTROL 0x66c8
+#define DAC_AUTODETECT_INT_CONTROL 0x67c8
#define DC_HPD1_INT_STATUS 0x601c
#define DC_HPD2_INT_STATUS 0x6028
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 5578e9837026..10e945a49479 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -374,8 +374,6 @@ typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
#pragma pack(pop)
-int si_set_smc_sram_address(struct radeon_device *rdev,
- u32 smc_address, u32 limit);
int si_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit);
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 96ea6db8bf57..8b47b3cd0357 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -71,7 +71,7 @@ static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] =
SUMO_DTC_DFLT_14,
};
-struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
+static struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
{
struct sumo_ps *ps = rps->ps_priv;
@@ -1202,14 +1202,10 @@ static void sumo_update_requested_ps(struct radeon_device *rdev,
int sumo_dpm_enable(struct radeon_device *rdev)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- int ret;
if (sumo_dpm_enabled(rdev))
return -EINVAL;
- ret = sumo_enable_clock_power_gating(rdev);
- if (ret)
- return ret;
sumo_program_bootup_state(rdev);
sumo_init_bsp(rdev);
sumo_reset_am(rdev);
@@ -1233,6 +1229,19 @@ int sumo_dpm_enable(struct radeon_device *rdev)
if (pi->enable_boost)
sumo_enable_boost_timer(rdev);
+ sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+ return 0;
+}
+
+int sumo_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
+ ret = sumo_enable_clock_power_gating(rdev);
+ if (ret)
+ return ret;
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1242,8 +1251,6 @@ int sumo_dpm_enable(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
- sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
-
return 0;
}
@@ -1800,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
struct seq_file *m)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct radeon_ps *rps = &pi->current_rps;
struct sumo_ps *ps = sumo_get_ps(rps);
struct sumo_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/sumo_smc.c b/drivers/gpu/drm/radeon/sumo_smc.c
index 18abba5b5810..fb081d2ae374 100644
--- a/drivers/gpu/drm/radeon/sumo_smc.c
+++ b/drivers/gpu/drm/radeon/sumo_smc.c
@@ -31,7 +31,6 @@
#define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY 27
#define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20 20
-struct sumo_ps *sumo_get_ps(struct radeon_ps *rps);
struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev);
static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id)
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index d700698a1f22..2da0e17eb960 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -342,14 +342,14 @@ static void trinity_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *new_rps,
struct radeon_ps *old_rps);
-struct trinity_ps *trinity_get_ps(struct radeon_ps *rps)
+static struct trinity_ps *trinity_get_ps(struct radeon_ps *rps)
{
struct trinity_ps *ps = rps->ps_priv;
return ps;
}
-struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev)
+static struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev)
{
struct trinity_power_info *pi = rdev->pm.dpm.priv;
@@ -1082,7 +1082,6 @@ void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
int trinity_dpm_enable(struct radeon_device *rdev)
{
struct trinity_power_info *pi = trinity_get_pi(rdev);
- int ret;
trinity_acquire_mutex(rdev);
@@ -1091,7 +1090,6 @@ int trinity_dpm_enable(struct radeon_device *rdev)
return -EINVAL;
}
- trinity_enable_clock_power_gating(rdev);
trinity_program_bootup_state(rdev);
sumo_program_vc(rdev, 0x00C00033);
trinity_start_am(rdev);
@@ -1105,6 +1103,18 @@ int trinity_dpm_enable(struct radeon_device *rdev)
trinity_dpm_bapm_enable(rdev, false);
trinity_release_mutex(rdev);
+ trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+ return 0;
+}
+
+int trinity_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
+ trinity_acquire_mutex(rdev);
+ trinity_enable_clock_power_gating(rdev);
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
ret = trinity_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1115,8 +1125,7 @@ int trinity_dpm_enable(struct radeon_device *rdev)
rdev->irq.dpm_thermal = true;
radeon_irq_set(rdev);
}
-
- trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+ trinity_release_mutex(rdev);
return 0;
}
@@ -1917,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev,
void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+ struct radeon_ps *rps = &pi->current_rps;
struct trinity_ps *ps = trinity_get_ps(rps);
struct trinity_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index 9672bcbc7312..99dd0455334d 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -27,9 +27,6 @@
#include "trinity_dpm.h"
#include "ppsmc.h"
-struct trinity_ps *trinity_get_ps(struct radeon_ps *rps);
-struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev);
-
static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
{
int i;
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index b19ef4951085..d1771004cb52 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
radeon_ring_write(ring, 2);
- return;
}
/**
@@ -153,6 +152,7 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
chip_id = 0x01000015;
break;
case CHIP_PITCAIRN:
+ case CHIP_OLAND:
chip_id = 0x01000016;
break;
case CHIP_ARUBA:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index a9d24e4bf792..fbf4be316d0b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -371,7 +371,6 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
goto error;
rcrtc->plane->format = format;
- rcrtc->plane->pitch = crtc->fb->pitches[0];
rcrtc->plane->src_x = x;
rcrtc->plane->src_y = y;
@@ -413,7 +412,7 @@ static int rcar_du_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
rcrtc->plane->src_x = x;
rcrtc->plane->src_y = y;
- rcar_du_crtc_update_base(to_rcar_crtc(crtc));
+ rcar_du_crtc_update_base(rcrtc);
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 0023f9719cf1..792fd1d20e86 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -224,7 +224,9 @@ static int rcar_du_probe(struct platform_device *pdev)
static int rcar_du_remove(struct platform_device *pdev)
{
- drm_platform_exit(&rcar_du_driver, pdev);
+ struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
+
+ drm_put_dev(rcdu->ddev);
return 0;
}
@@ -249,8 +251,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
};
static const struct rcar_du_device_info rcar_du_r8a7790_info = {
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B
- | RCAR_DU_FEATURE_DEFR8,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+ .quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
.num_crtcs = 3,
.routes = {
/* R8A7790 has one RGB output, two LVDS outputs and one
@@ -272,9 +274,29 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
.num_lvds = 2,
};
+static const struct rcar_du_device_info rcar_du_r8a7791_info = {
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+ .num_crtcs = 2,
+ .routes = {
+ /* R8A7791 has one RGB output, one LVDS output and one
+ * (currently unsupported) TCON output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(1),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_LVDS,
+ },
+ },
+ .num_lvds = 1,
+};
+
static const struct platform_device_id rcar_du_id_table[] = {
{ "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
{ "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
+ { "rcar-du-r8a7791", (kernel_ulong_t)&rcar_du_r8a7791_info },
{ }
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 65d2d636b002..e31b735d3f25 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -28,8 +28,10 @@ struct rcar_du_device;
struct rcar_du_lvdsenc;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
-#define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */
-#define RCAR_DU_FEATURE_DEFR8 (1 << 2) /* Has DEFR8 register */
+#define RCAR_DU_FEATURE_DEFR8 (1 << 1) /* Has DEFR8 register */
+
+#define RCAR_DU_QUIRK_ALIGN_128B (1 << 0) /* Align pitches to 128 bytes */
+#define RCAR_DU_QUIRK_LVDS_LANES (1 << 1) /* LVDS lanes 1 and 3 inverted */
/*
* struct rcar_du_output_routing - Output routing specification
@@ -48,12 +50,14 @@ struct rcar_du_output_routing {
/*
* struct rcar_du_device_info - DU model-specific information
* @features: device features (RCAR_DU_FEATURE_*)
+ * @quirks: device quirks (RCAR_DU_QUIRK_*)
* @num_crtcs: total number of CRTCs
* @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
* @num_lvds: number of internal LVDS encoders
*/
struct rcar_du_device_info {
unsigned int features;
+ unsigned int quirks;
unsigned int num_crtcs;
struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
unsigned int num_lvds;
@@ -84,6 +88,12 @@ static inline bool rcar_du_has(struct rcar_du_device *rcdu,
return rcdu->info->features & feature;
}
+static inline bool rcar_du_needs(struct rcar_du_device *rcdu,
+ unsigned int quirk)
+{
+ return rcdu->info->quirks & quirk;
+}
+
static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
{
return ioread32(rcdu->mmio + reg);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index b31ac080c4a7..fbeabd9a281f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -119,7 +119,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
/* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
* but the R8A7790 DU seems to require a 128 bytes pitch alignment.
*/
- if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+ if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
align = 128;
else
align = 16 * args->bpp / 8;
@@ -144,7 +144,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
- if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+ if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
align = 128;
else
align = 16 * format->bpp / 8;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index a0f6a1781925..df30a075d793 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -44,6 +44,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
const struct drm_display_mode *mode = &rcrtc->crtc.mode;
unsigned int freq = mode->clock;
u32 lvdcr0;
+ u32 lvdhcr;
u32 pllcr;
int ret;
@@ -72,15 +73,19 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
* VSYNC -> CTRL1
* DISP -> CTRL2
* 0 -> CTRL3
- *
- * Channels 1 and 3 are switched on ES1.
*/
rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
LVDCTRCR_CTR0SEL_HSYNC);
- rcar_lvds_write(lvds, LVDCHCR,
- LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) |
- LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1));
+
+ if (rcar_du_needs(lvds->dev, RCAR_DU_QUIRK_LVDS_LANES))
+ lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3)
+ | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1);
+ else
+ lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 1)
+ | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 3);
+
+ rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
/* Select the input, hardcode mode 0, enable LVDS operation and turn
* bias circuitry on.
@@ -144,18 +149,9 @@ static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
sprintf(name, "lvds.%u", lvds->index);
mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
- if (mem == NULL) {
- dev_err(&pdev->dev, "failed to get memory resource for %s\n",
- name);
- return -EINVAL;
- }
-
lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
- if (lvds->mmio == NULL) {
- dev_err(&pdev->dev, "failed to remap memory resource for %s\n",
- name);
- return -ENOMEM;
- }
+ if (IS_ERR(lvds->mmio))
+ return PTR_ERR(lvds->mmio);
lvds->clock = devm_clk_get(&pdev->dev, name);
if (IS_ERR(lvds->clock)) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 53000644733f..3fb69d9ae61b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -104,6 +104,15 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane)
{
struct rcar_du_group *rgrp = plane->group;
unsigned int index = plane->hwindex;
+ u32 mwr;
+
+ /* Memory pitch (expressed in pixels) */
+ if (plane->format->planes == 2)
+ mwr = plane->pitch;
+ else
+ mwr = plane->pitch * 8 / plane->format->bpp;
+
+ rcar_du_plane_write(rgrp, index, PnMWR, mwr);
/* The Y position is expressed in raster line units and must be doubled
* for 32bpp formats, according to the R8A7790 datasheet. No mention of
@@ -133,6 +142,8 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
{
struct drm_gem_cma_object *gem;
+ plane->pitch = fb->pitches[0];
+
gem = drm_fb_cma_get_gem_obj(fb, 0);
plane->dma[0] = gem->paddr + fb->offsets[0];
@@ -209,7 +220,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
struct rcar_du_group *rgrp = plane->group;
u32 ddcr2 = PnDDCR2_CODE;
u32 ddcr4;
- u32 mwr;
/* Data format
*
@@ -240,14 +250,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
- /* Memory pitch (expressed in pixels) */
- if (plane->format->planes == 2)
- mwr = plane->pitch;
- else
- mwr = plane->pitch * 8 / plane->format->bpp;
-
- rcar_du_plane_write(rgrp, index, PnMWR, mwr);
-
/* Destination position and size */
rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
@@ -309,7 +311,6 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
rplane->crtc = crtc;
rplane->format = format;
- rplane->pitch = fb->pitches[0];
rplane->src_x = src_x >> 16;
rplane->src_y = src_y >> 16;
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index b17d0710871a..d2b2df9e26f3 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -49,7 +49,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
#endif
for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
- DRM_MEMORYBARRIER();
+ mb();
status = dev_priv->status_ptr[0];
if ((status & mask) < threshold)
return 0;
@@ -123,7 +123,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
int i;
for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
- DRM_MEMORYBARRIER();
+ mb();
status = dev_priv->status_ptr[1];
if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
(status & 0xffff) == 0)
@@ -449,7 +449,7 @@ static void savage_dma_flush(drm_savage_private_t * dev_priv)
}
}
- DRM_MEMORYBARRIER();
+ mb();
/* do flush ... */
phys_addr = dev_priv->cmd_dma->offset +
@@ -990,10 +990,10 @@ static int savage_bci_get_buffers(struct drm_device *dev,
buf->file_priv = file_priv;
- if (DRM_COPY_TO_USER(&d->request_indices[i],
+ if (copy_to_user(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
return -EFAULT;
- if (DRM_COPY_TO_USER(&d->request_sizes[i],
+ if (copy_to_user(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
return -EFAULT;
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index b35e75ed890c..c01ad0aeaa58 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -992,7 +992,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
if (kcmd_addr == NULL)
return -ENOMEM;
- if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
+ if (copy_from_user(kcmd_addr, cmdbuf->cmd_addr,
cmdbuf->size * 8))
{
kfree(kcmd_addr);
@@ -1007,7 +1007,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
goto done;
}
- if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
+ if (copy_from_user(kvb_addr, cmdbuf->vb_addr,
cmdbuf->vb_size)) {
ret = -EFAULT;
goto done;
@@ -1022,7 +1022,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
goto done;
}
- if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
+ if (copy_from_user(kbox_addr, cmdbuf->box_addr,
cmdbuf->nbox * sizeof(struct drm_clip_rect))) {
ret = -EFAULT;
goto done;
@@ -1032,7 +1032,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
/* Make sure writes to DMA buffers are finished before sending
* DMA commands to the graphics hardware. */
- DRM_MEMORYBARRIER();
+ mb();
/* Coming from user space. Don't know if the Xserver has
* emitted wait commands. Assuming the worst. */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 562f9a401cf6..0428076f1ce8 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -37,14 +37,21 @@
* Clock management
*/
-static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
+static int shmob_drm_clk_on(struct shmob_drm_device *sdev)
{
- if (sdev->clock)
- clk_prepare_enable(sdev->clock);
+ int ret;
+
+ if (sdev->clock) {
+ ret = clk_prepare_enable(sdev->clock);
+ if (ret < 0)
+ return ret;
+ }
#if 0
if (sdev->meram_dev && sdev->meram_dev->pdev)
pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
#endif
+
+ return 0;
}
static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
@@ -161,6 +168,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
struct drm_device *dev = sdev->ddev;
struct drm_plane *plane;
u32 value;
+ int ret;
if (scrtc->started)
return;
@@ -170,7 +178,9 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
return;
/* Enable clocks before accessing the hardware. */
- shmob_drm_clk_on(sdev);
+ ret = shmob_drm_clk_on(sdev);
+ if (ret < 0)
+ return;
/* Reset and enable the LCDC. */
lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 015551866b4a..c839c9c89efb 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -336,7 +336,9 @@ static int shmob_drm_probe(struct platform_device *pdev)
static int shmob_drm_remove(struct platform_device *pdev)
{
- drm_platform_exit(&shmob_drm_driver, pdev);
+ struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
+
+ drm_put_dev(sdev->ddev);
return 0;
}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4383b74a3aa4..756f787b7143 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -94,7 +94,7 @@ static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
+static void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct sis_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 01857d836350..0573be0d2933 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -266,7 +266,7 @@ int sis_idle(struct drm_device *dev)
* because its polling frequency is too low.
*/
- end = jiffies + (DRM_HZ * 3);
+ end = jiffies + (HZ * 3);
for (i = 0; i < 4; ++i) {
do {
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 8961ba6a34b8..354ddb29231f 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,13 +1,12 @@
config DRM_TEGRA
- bool "NVIDIA Tegra DRM"
- depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
+ tristate "NVIDIA Tegra DRM"
+ depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
depends on DRM
- select TEGRA_HOST1X
+ depends on RESET_CONTROLLER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ select TEGRA_HOST1X
help
Choose this option if you have an NVIDIA Tegra SoC.
@@ -16,6 +15,18 @@ config DRM_TEGRA
if DRM_TEGRA
+config DRM_TEGRA_FBDEV
+ bool "Enable legacy fbdev support"
+ select DRM_KMS_FB_HELPER
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ default y
+ help
+ Choose this option if you have a need for the legacy fbdev support.
+ Note that this support also provides the Linux console on top of
+ the Tegra modesetting driver.
+
config DRM_TEGRA_DEBUG
bool "NVIDIA Tegra DRM debug support"
help
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index edc76abd58bb..8d220afbd85f 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -9,6 +9,8 @@ tegra-drm-y := \
output.o \
rgb.o \
hdmi.o \
+ mipi-phy.o \
+ dsi.o \
gr2d.o \
gr3d.o
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
index 565f8f7b9a47..e38e5967d77b 100644
--- a/drivers/gpu/drm/tegra/bus.c
+++ b/drivers/gpu/drm/tegra/bus.c
@@ -46,7 +46,6 @@ int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
struct drm_device *drm;
int ret;
- INIT_LIST_HEAD(&driver->device_list);
driver->bus = &drm_host1x_bus;
drm = drm_dev_alloc(driver, &device->dev);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index ae1cb31ead7e..9336006b475d 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -8,13 +8,17 @@
*/
#include <linux/clk.h>
-#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
+#include <linux/reset.h>
#include "dc.h"
#include "drm.h"
#include "gem.h"
+struct tegra_dc_soc_info {
+ bool supports_interlacing;
+};
+
struct tegra_plane {
struct drm_plane base;
unsigned int index;
@@ -658,19 +662,12 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
/* program display mode */
tegra_dc_set_timings(dc, mode);
- value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
- tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
-
- value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
- value &= ~LVS_OUTPUT_POLARITY_LOW;
- value &= ~LHS_OUTPUT_POLARITY_LOW;
- tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
-
- value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
- DISP_ORDER_RED_BLUE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
-
- tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
+ /* interlacing isn't supported yet, so disable it */
+ if (dc->soc->supports_interlacing) {
+ value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
+ value &= ~INTERLACE_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL);
+ }
value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
@@ -712,7 +709,7 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
unsigned long value;
/* hardware initialization */
- tegra_periph_reset_deassert(dc->clk);
+ reset_control_deassert(dc->rst);
usleep_range(10000, 20000);
if (dc->pipe)
@@ -735,10 +732,6 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
- value |= DISP_CTRL_MODE_C_DISPLAY;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
/* initialize timer */
value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
@@ -1107,8 +1100,6 @@ static int tegra_dc_init(struct host1x_client *client)
struct tegra_dc *dc = host1x_client_to_dc(client);
int err;
- dc->pipe = tegra->drm->mode_config.num_crtc;
-
drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
@@ -1167,8 +1158,71 @@ static const struct host1x_client_ops dc_client_ops = {
.exit = tegra_dc_exit,
};
+static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
+ .supports_interlacing = false,
+};
+
+static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
+ .supports_interlacing = false,
+};
+
+static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
+ .supports_interlacing = true,
+};
+
+static const struct of_device_id tegra_dc_of_match[] = {
+ {
+ .compatible = "nvidia,tegra124-dc",
+ .data = &tegra124_dc_soc_info,
+ }, {
+ .compatible = "nvidia,tegra30-dc",
+ .data = &tegra30_dc_soc_info,
+ }, {
+ .compatible = "nvidia,tegra20-dc",
+ .data = &tegra20_dc_soc_info,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int tegra_dc_parse_dt(struct tegra_dc *dc)
+{
+ struct device_node *np;
+ u32 value = 0;
+ int err;
+
+ err = of_property_read_u32(dc->dev->of_node, "nvidia,head", &value);
+ if (err < 0) {
+ dev_err(dc->dev, "missing \"nvidia,head\" property\n");
+
+ /*
+ * If the nvidia,head property isn't present, try to find the
+ * correct head number by looking up the position of this
+ * display controller's node within the device tree. Assuming
+ * that the nodes are ordered properly in the DTS file and
+ * that the translation into a flattened device tree blob
+ * preserves that ordering this will actually yield the right
+ * head number.
+ *
+ * If those assumptions don't hold, this will still work for
+ * cases where only a single display controller is used.
+ */
+ for_each_matching_node(np, tegra_dc_of_match) {
+ if (np == dc->dev->of_node)
+ break;
+
+ value++;
+ }
+ }
+
+ dc->pipe = value;
+
+ return 0;
+}
+
static int tegra_dc_probe(struct platform_device *pdev)
{
+ const struct of_device_id *id;
struct resource *regs;
struct tegra_dc *dc;
int err;
@@ -1177,9 +1231,18 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (!dc)
return -ENOMEM;
+ id = of_match_node(tegra_dc_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
spin_lock_init(&dc->lock);
INIT_LIST_HEAD(&dc->list);
dc->dev = &pdev->dev;
+ dc->soc = id->data;
+
+ err = tegra_dc_parse_dt(dc);
+ if (err < 0)
+ return err;
dc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dc->clk)) {
@@ -1187,6 +1250,12 @@ static int tegra_dc_probe(struct platform_device *pdev)
return PTR_ERR(dc->clk);
}
+ dc->rst = devm_reset_control_get(&pdev->dev, "dc");
+ if (IS_ERR(dc->rst)) {
+ dev_err(&pdev->dev, "failed to get reset\n");
+ return PTR_ERR(dc->rst);
+ }
+
err = clk_prepare_enable(dc->clk);
if (err < 0)
return err;
@@ -1247,12 +1316,6 @@ static int tegra_dc_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tegra_dc_of_match[] = {
- { .compatible = "nvidia,tegra30-dc", },
- { .compatible = "nvidia,tegra20-dc", },
- { },
-};
-
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegra-dc",
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 91bbda291470..3c2c0ea1cd87 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -28,6 +28,7 @@
#define DISP_CTRL_MODE_STOP (0 << 5)
#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DISP_CTRL_MODE_MASK (3 << 5)
#define DC_CMD_SIGNAL_RAISE 0x033
#define DC_CMD_DISPLAY_POWER_CONTROL 0x036
#define PW0_ENABLE (1 << 0)
@@ -116,6 +117,7 @@
#define DC_DISP_DISP_WIN_OPTIONS 0x402
#define HDMI_ENABLE (1 << 30)
+#define DSI_ENABLE (1 << 29)
#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
@@ -238,6 +240,8 @@
#define DITHER_CONTROL_ERRDIFF (3 << 8)
#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
+#define SC1_H_QUALIFIER_NONE (1 << 16)
+#define SC0_H_QUALIFIER_NONE (1 << 0)
#define DC_DISP_DATA_ENABLE_OPTIONS 0x432
#define DE_SELECT_ACTIVE_BLANK (0 << 0)
@@ -292,6 +296,11 @@
#define DC_DISP_SD_HW_K_VALUES 0x4dd
#define DC_DISP_SD_MAN_K_VALUES 0x4de
+#define DC_DISP_INTERLACE_CONTROL 0x4e5
+#define INTERLACE_STATUS (1 << 2)
+#define INTERLACE_START (1 << 1)
+#define INTERLACE_ENABLE (1 << 0)
+
#define DC_WIN_CSC_YOF 0x611
#define DC_WIN_CSC_KYRGB 0x612
#define DC_WIN_CSC_KUR 0x613
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 07eba596d458..c71594754f46 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -104,9 +104,11 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
static void tegra_drm_lastclose(struct drm_device *drm)
{
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
tegra_fbdev_restore_mode(tegra->fbdev);
+#endif
}
static struct host1x_bo *
@@ -578,7 +580,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
#endif
static struct drm_driver tegra_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
.open = tegra_drm_open,
@@ -596,6 +598,12 @@ static struct drm_driver tegra_drm_driver = {
.gem_free_object = tegra_bo_free_object,
.gem_vm_ops = &tegra_bo_vm_ops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = tegra_gem_prime_export,
+ .gem_prime_import = tegra_gem_prime_import,
+
.dumb_create = tegra_bo_dumb_create,
.dumb_map_offset = tegra_bo_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
@@ -653,8 +661,10 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra30-hdmi", },
{ .compatible = "nvidia,tegra30-gr2d", },
{ .compatible = "nvidia,tegra30-gr3d", },
+ { .compatible = "nvidia,tegra114-dsi", },
{ .compatible = "nvidia,tegra114-hdmi", },
{ .compatible = "nvidia,tegra114-gr3d", },
+ { .compatible = "nvidia,tegra124-dc", },
{ /* sentinel */ }
};
@@ -677,10 +687,14 @@ static int __init host1x_drm_init(void)
if (err < 0)
goto unregister_host1x;
- err = platform_driver_register(&tegra_hdmi_driver);
+ err = platform_driver_register(&tegra_dsi_driver);
if (err < 0)
goto unregister_dc;
+ err = platform_driver_register(&tegra_hdmi_driver);
+ if (err < 0)
+ goto unregister_dsi;
+
err = platform_driver_register(&tegra_gr2d_driver);
if (err < 0)
goto unregister_hdmi;
@@ -695,6 +709,8 @@ unregister_gr2d:
platform_driver_unregister(&tegra_gr2d_driver);
unregister_hdmi:
platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dsi:
+ platform_driver_unregister(&tegra_dsi_driver);
unregister_dc:
platform_driver_unregister(&tegra_dc_driver);
unregister_host1x:
@@ -708,6 +724,7 @@ static void __exit host1x_drm_exit(void)
platform_driver_unregister(&tegra_gr3d_driver);
platform_driver_unregister(&tegra_gr2d_driver);
platform_driver_unregister(&tegra_hdmi_driver);
+ platform_driver_unregister(&tegra_dsi_driver);
platform_driver_unregister(&tegra_dc_driver);
host1x_driver_unregister(&host1x_drm_driver);
}
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 7da0b923131f..bf1cac7658f8 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -19,16 +19,20 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
+struct reset_control;
+
struct tegra_fb {
struct drm_framebuffer base;
struct tegra_bo **planes;
unsigned int num_planes;
};
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_fbdev {
struct drm_fb_helper base;
struct tegra_fb *fb;
};
+#endif
struct tegra_drm {
struct drm_device *drm;
@@ -36,7 +40,9 @@ struct tegra_drm {
struct mutex clients_lock;
struct list_head clients;
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_fbdev *fbdev;
+#endif
};
struct tegra_drm_client;
@@ -82,6 +88,7 @@ extern int tegra_drm_unregister_client(struct tegra_drm *tegra,
extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
extern int tegra_drm_exit(struct tegra_drm *tegra);
+struct tegra_dc_soc_info;
struct tegra_output;
struct tegra_dc {
@@ -93,6 +100,7 @@ struct tegra_dc {
int pipe;
struct clk *clk;
+ struct reset_control *rst;
void __iomem *regs;
int irq;
@@ -106,6 +114,8 @@ struct tegra_dc {
/* page-flip handling */
struct drm_pending_vblank_event *event;
+
+ const struct tegra_dc_soc_info *soc;
};
static inline struct tegra_dc *
@@ -174,6 +184,7 @@ struct tegra_output_ops {
enum tegra_output_type {
TEGRA_OUTPUT_RGB,
TEGRA_OUTPUT_HDMI,
+ TEGRA_OUTPUT_DSI,
};
struct tegra_output {
@@ -183,6 +194,7 @@ struct tegra_output {
const struct tegra_output_ops *ops;
enum tegra_output_type type;
+ struct drm_panel *panel;
struct i2c_adapter *ddc;
const struct edid *edid;
unsigned int hpd_irq;
@@ -260,9 +272,12 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
extern int tegra_drm_fb_init(struct drm_device *drm);
extern void tegra_drm_fb_exit(struct drm_device *drm);
+#ifdef CONFIG_DRM_TEGRA_FBDEV
extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
+#endif
extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_dsi_driver;
extern struct platform_driver tegra_hdmi_driver;
extern struct platform_driver tegra_gr2d_driver;
extern struct platform_driver tegra_gr3d_driver;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
new file mode 100644
index 000000000000..d452faab0235
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -0,0 +1,971 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#include "dc.h"
+#include "drm.h"
+#include "dsi.h"
+#include "mipi-phy.h"
+
+#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
+#define DSI_HOST_FIFO_DEPTH 64
+
+struct tegra_dsi {
+ struct host1x_client client;
+ struct tegra_output output;
+ struct device *dev;
+
+ void __iomem *regs;
+
+ struct reset_control *rst;
+ struct clk *clk_parent;
+ struct clk *clk_lp;
+ struct clk *clk;
+
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
+ struct dentry *debugfs;
+
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+
+ struct tegra_mipi_device *mipi;
+ struct mipi_dsi_host host;
+};
+
+static inline struct tegra_dsi *
+host1x_client_to_dsi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_dsi, client);
+}
+
+static inline struct tegra_dsi *host_to_tegra(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct tegra_dsi, host);
+}
+
+static inline struct tegra_dsi *to_dsi(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_dsi, output);
+}
+
+static inline unsigned long tegra_dsi_readl(struct tegra_dsi *dsi,
+ unsigned long reg)
+{
+ return readl(dsi->regs + (reg << 2));
+}
+
+static inline void tegra_dsi_writel(struct tegra_dsi *dsi, unsigned long value,
+ unsigned long reg)
+{
+ writel(value, dsi->regs + (reg << 2));
+}
+
+static int tegra_dsi_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dsi *dsi = node->info_ent->data;
+
+#define DUMP_REG(name) \
+ seq_printf(s, "%-32s %#05x %08lx\n", #name, name, \
+ tegra_dsi_readl(dsi, name))
+
+ DUMP_REG(DSI_INCR_SYNCPT);
+ DUMP_REG(DSI_INCR_SYNCPT_CONTROL);
+ DUMP_REG(DSI_INCR_SYNCPT_ERROR);
+ DUMP_REG(DSI_CTXSW);
+ DUMP_REG(DSI_RD_DATA);
+ DUMP_REG(DSI_WR_DATA);
+ DUMP_REG(DSI_POWER_CONTROL);
+ DUMP_REG(DSI_INT_ENABLE);
+ DUMP_REG(DSI_INT_STATUS);
+ DUMP_REG(DSI_INT_MASK);
+ DUMP_REG(DSI_HOST_CONTROL);
+ DUMP_REG(DSI_CONTROL);
+ DUMP_REG(DSI_SOL_DELAY);
+ DUMP_REG(DSI_MAX_THRESHOLD);
+ DUMP_REG(DSI_TRIGGER);
+ DUMP_REG(DSI_TX_CRC);
+ DUMP_REG(DSI_STATUS);
+
+ DUMP_REG(DSI_INIT_SEQ_CONTROL);
+ DUMP_REG(DSI_INIT_SEQ_DATA_0);
+ DUMP_REG(DSI_INIT_SEQ_DATA_1);
+ DUMP_REG(DSI_INIT_SEQ_DATA_2);
+ DUMP_REG(DSI_INIT_SEQ_DATA_3);
+ DUMP_REG(DSI_INIT_SEQ_DATA_4);
+ DUMP_REG(DSI_INIT_SEQ_DATA_5);
+ DUMP_REG(DSI_INIT_SEQ_DATA_6);
+ DUMP_REG(DSI_INIT_SEQ_DATA_7);
+
+ DUMP_REG(DSI_PKT_SEQ_0_LO);
+ DUMP_REG(DSI_PKT_SEQ_0_HI);
+ DUMP_REG(DSI_PKT_SEQ_1_LO);
+ DUMP_REG(DSI_PKT_SEQ_1_HI);
+ DUMP_REG(DSI_PKT_SEQ_2_LO);
+ DUMP_REG(DSI_PKT_SEQ_2_HI);
+ DUMP_REG(DSI_PKT_SEQ_3_LO);
+ DUMP_REG(DSI_PKT_SEQ_3_HI);
+ DUMP_REG(DSI_PKT_SEQ_4_LO);
+ DUMP_REG(DSI_PKT_SEQ_4_HI);
+ DUMP_REG(DSI_PKT_SEQ_5_LO);
+ DUMP_REG(DSI_PKT_SEQ_5_HI);
+
+ DUMP_REG(DSI_DCS_CMDS);
+
+ DUMP_REG(DSI_PKT_LEN_0_1);
+ DUMP_REG(DSI_PKT_LEN_2_3);
+ DUMP_REG(DSI_PKT_LEN_4_5);
+ DUMP_REG(DSI_PKT_LEN_6_7);
+
+ DUMP_REG(DSI_PHY_TIMING_0);
+ DUMP_REG(DSI_PHY_TIMING_1);
+ DUMP_REG(DSI_PHY_TIMING_2);
+ DUMP_REG(DSI_BTA_TIMING);
+
+ DUMP_REG(DSI_TIMEOUT_0);
+ DUMP_REG(DSI_TIMEOUT_1);
+ DUMP_REG(DSI_TO_TALLY);
+
+ DUMP_REG(DSI_PAD_CONTROL_0);
+ DUMP_REG(DSI_PAD_CONTROL_CD);
+ DUMP_REG(DSI_PAD_CD_STATUS);
+ DUMP_REG(DSI_VIDEO_MODE_CONTROL);
+ DUMP_REG(DSI_PAD_CONTROL_1);
+ DUMP_REG(DSI_PAD_CONTROL_2);
+ DUMP_REG(DSI_PAD_CONTROL_3);
+ DUMP_REG(DSI_PAD_CONTROL_4);
+
+ DUMP_REG(DSI_GANGED_MODE_CONTROL);
+ DUMP_REG(DSI_GANGED_MODE_START);
+ DUMP_REG(DSI_GANGED_MODE_SIZE);
+
+ DUMP_REG(DSI_RAW_DATA_BYTE_COUNT);
+ DUMP_REG(DSI_ULTRA_LOW_POWER_CONTROL);
+
+ DUMP_REG(DSI_INIT_SEQ_DATA_8);
+ DUMP_REG(DSI_INIT_SEQ_DATA_9);
+ DUMP_REG(DSI_INIT_SEQ_DATA_10);
+ DUMP_REG(DSI_INIT_SEQ_DATA_11);
+ DUMP_REG(DSI_INIT_SEQ_DATA_12);
+ DUMP_REG(DSI_INIT_SEQ_DATA_13);
+ DUMP_REG(DSI_INIT_SEQ_DATA_14);
+ DUMP_REG(DSI_INIT_SEQ_DATA_15);
+
+#undef DUMP_REG
+
+ return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_dsi_show_regs, 0, NULL },
+};
+
+static int tegra_dsi_debugfs_init(struct tegra_dsi *dsi,
+ struct drm_minor *minor)
+{
+ const char *name = dev_name(dsi->dev);
+ unsigned int i;
+ int err;
+
+ dsi->debugfs = debugfs_create_dir(name, minor->debugfs_root);
+ if (!dsi->debugfs)
+ return -ENOMEM;
+
+ dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!dsi->debugfs_files) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ dsi->debugfs_files[i].data = dsi;
+
+ err = drm_debugfs_create_files(dsi->debugfs_files,
+ ARRAY_SIZE(debugfs_files),
+ dsi->debugfs, minor);
+ if (err < 0)
+ goto free;
+
+ dsi->minor = minor;
+
+ return 0;
+
+free:
+ kfree(dsi->debugfs_files);
+ dsi->debugfs_files = NULL;
+remove:
+ debugfs_remove(dsi->debugfs);
+ dsi->debugfs = NULL;
+
+ return err;
+}
+
+static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi)
+{
+ drm_debugfs_remove_files(dsi->debugfs_files, ARRAY_SIZE(debugfs_files),
+ dsi->minor);
+ dsi->minor = NULL;
+
+ kfree(dsi->debugfs_files);
+ dsi->debugfs_files = NULL;
+
+ debugfs_remove(dsi->debugfs);
+ dsi->debugfs = NULL;
+
+ return 0;
+}
+
+#define PKT_ID0(id) ((((id) & 0x3f) << 3) | (1 << 9))
+#define PKT_LEN0(len) (((len) & 0x07) << 0)
+#define PKT_ID1(id) ((((id) & 0x3f) << 13) | (1 << 19))
+#define PKT_LEN1(len) (((len) & 0x07) << 10)
+#define PKT_ID2(id) ((((id) & 0x3f) << 23) | (1 << 29))
+#define PKT_LEN2(len) (((len) & 0x07) << 20)
+
+#define PKT_LP (1 << 30)
+#define NUM_PKT_SEQ 12
+
+/* non-burst mode with sync-end */
+static const u32 pkt_seq_vnb_syne[NUM_PKT_SEQ] = {
+ [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 1] = 0,
+ [ 2] = PKT_ID0(MIPI_DSI_V_SYNC_END) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 3] = 0,
+ [ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 5] = 0,
+ [ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
+ [ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
+ PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
+ PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
+ [ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 9] = 0,
+ [10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
+ [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
+ PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
+ PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
+};
+
+static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
+{
+ struct mipi_dphy_timing timing;
+ unsigned long value, period;
+ long rate;
+ int err;
+
+ rate = clk_get_rate(dsi->clk);
+ if (rate < 0)
+ return rate;
+
+ period = DIV_ROUND_CLOSEST(1000000000UL, rate * 2);
+
+ err = mipi_dphy_timing_get_default(&timing, period);
+ if (err < 0)
+ return err;
+
+ err = mipi_dphy_timing_validate(&timing, period);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err);
+ return err;
+ }
+
+ /*
+ * The D-PHY timing fields below are expressed in byte-clock cycles,
+ * so multiply the period by 8.
+ */
+ period *= 8;
+
+ value = DSI_TIMING_FIELD(timing.hsexit, period, 1) << 24 |
+ DSI_TIMING_FIELD(timing.hstrail, period, 0) << 16 |
+ DSI_TIMING_FIELD(timing.hszero, period, 3) << 8 |
+ DSI_TIMING_FIELD(timing.hsprepare, period, 1);
+ tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0);
+
+ value = DSI_TIMING_FIELD(timing.clktrail, period, 1) << 24 |
+ DSI_TIMING_FIELD(timing.clkpost, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing.clkzero, period, 1) << 8 |
+ DSI_TIMING_FIELD(timing.lpx, period, 1);
+ tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1);
+
+ value = DSI_TIMING_FIELD(timing.clkprepare, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing.clkpre, period, 1) << 8 |
+ DSI_TIMING_FIELD(0xff * period, period, 0) << 0;
+ tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2);
+
+ value = DSI_TIMING_FIELD(timing.taget, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing.tasure, period, 1) << 8 |
+ DSI_TIMING_FIELD(timing.tago, period, 1);
+ tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
+
+ return 0;
+}
+
+static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format,
+ unsigned int *mulp, unsigned int *divp)
+{
+ switch (format) {
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ case MIPI_DSI_FMT_RGB888:
+ *mulp = 3;
+ *divp = 1;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ *mulp = 2;
+ *divp = 1;
+ break;
+
+ case MIPI_DSI_FMT_RGB666:
+ *mulp = 9;
+ *divp = 4;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra_output_dsi_enable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct drm_display_mode *mode = &dc->base.mode;
+ unsigned int hact, hsw, hbp, hfp, i, mul, div;
+ struct tegra_dsi *dsi = to_dsi(output);
+ /* FIXME: don't hardcode this */
+ const u32 *pkt_seq = pkt_seq_vnb_syne;
+ unsigned long value;
+ int err;
+
+ err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
+ if (err < 0)
+ return err;
+
+ err = clk_enable(dsi->clk);
+ if (err < 0)
+ return err;
+
+ reset_control_deassert(dsi->rst);
+
+ value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(dsi->format) |
+ DSI_CONTROL_LANES(dsi->lanes - 1) |
+ DSI_CONTROL_SOURCE(dc->pipe);
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ tegra_dsi_writel(dsi, DSI_VIDEO_FIFO_DEPTH, DSI_MAX_THRESHOLD);
+
+ value = DSI_HOST_CONTROL_HS | DSI_HOST_CONTROL_CS |
+ DSI_HOST_CONTROL_ECC;
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+ value = tegra_dsi_readl(dsi, DSI_CONTROL);
+ value |= DSI_CONTROL_HS_CLK_CTRL;
+ value &= ~DSI_CONTROL_TX_TRIG(3);
+ value &= ~DSI_CONTROL_DCS_ENABLE;
+ value |= DSI_CONTROL_VIDEO_ENABLE;
+ value &= ~DSI_CONTROL_HOST_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ err = tegra_dsi_set_phy_timing(dsi);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < NUM_PKT_SEQ; i++)
+ tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i);
+
+ /* horizontal active pixels */
+ hact = mode->hdisplay * mul / div;
+
+ /* horizontal sync width */
+ hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
+ hsw -= 10;
+
+ /* horizontal back porch */
+ hbp = (mode->htotal - mode->hsync_end) * mul / div;
+ hbp -= 14;
+
+ /* horizontal front porch */
+ hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+ hfp -= 8;
+
+ tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
+ tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
+ tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
+ tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
+
+ /* set SOL delay */
+ tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+
+ /* enable display controller */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= DSI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ value |= DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ /* enable DSI controller */
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value |= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ return 0;
+}
+
+static int tegra_output_dsi_disable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct tegra_dsi *dsi = to_dsi(output);
+ unsigned long value;
+
+ /* disable DSI controller */
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value &= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~DSI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ }
+
+ clk_disable(dsi->clk);
+
+ return 0;
+}
+
+static int tegra_output_dsi_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct drm_display_mode *mode = &dc->base.mode;
+ unsigned int timeout, mul, div, vrefresh;
+ struct tegra_dsi *dsi = to_dsi(output);
+ unsigned long bclk, plld, value;
+ struct clk *base;
+ int err;
+
+ err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
+ if (err < 0)
+ return err;
+
+ vrefresh = drm_mode_vrefresh(mode);
+
+ pclk = mode->htotal * mode->vtotal * vrefresh;
+ bclk = (pclk * mul) / (div * dsi->lanes);
+ plld = DIV_ROUND_UP(bclk * 8, 1000000);
+ pclk = (plld * 1000000) / 2;
+
+ err = clk_set_parent(clk, dsi->clk_parent);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to set parent clock: %d\n", err);
+ return err;
+ }
+
+ base = clk_get_parent(dsi->clk_parent);
+
+ /*
+ * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+ * respectively, each of which divides the base pll_d by 2.
+ */
+ err = clk_set_rate(base, pclk * 2);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n",
+ pclk * 2);
+ return err;
+ }
+
+ /*
+ * XXX: Move the below somewhere else so that we don't need to have
+ * access to the vrefresh in this function?
+ */
+
+ /* one frame high-speed transmission timeout */
+ timeout = (bclk / vrefresh) / 512;
+ value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
+
+ /* 2 ms peripheral timeout for panel */
+ timeout = 2 * bclk / 512 * 1000;
+ value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
+
+ value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
+ tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+
+ return 0;
+}
+
+static int tegra_output_dsi_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ /*
+ * FIXME: For now, always assume that the mode is okay.
+ */
+
+ *status = MODE_OK;
+
+ return 0;
+}
+
+static const struct tegra_output_ops dsi_ops = {
+ .enable = tegra_output_dsi_enable,
+ .disable = tegra_output_dsi_disable,
+ .setup_clock = tegra_output_dsi_setup_clock,
+ .check_mode = tegra_output_dsi_check_mode,
+};
+
+static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
+{
+ unsigned long value;
+
+ value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
+
+ return 0;
+}
+
+static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
+{
+ unsigned long value;
+
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+ /* start calibration */
+ tegra_dsi_pad_enable(dsi);
+
+ value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
+ DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
+ DSI_PAD_OUT_CLK(0x0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+
+ return tegra_mipi_calibrate(dsi->mipi);
+}
+
+static int tegra_dsi_init(struct host1x_client *client)
+{
+ struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ unsigned long value, i;
+ int err;
+
+ dsi->output.type = TEGRA_OUTPUT_DSI;
+ dsi->output.dev = client->dev;
+ dsi->output.ops = &dsi_ops;
+
+ err = tegra_output_init(tegra->drm, &dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output setup failed: %d\n", err);
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_dsi_debugfs_init(dsi, tegra->drm->primary);
+ if (err < 0)
+ dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
+ }
+
+ /*
+ * enable high-speed mode, checksum generation, ECC generation and
+ * disable raw mode
+ */
+ value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
+ value |= DSI_HOST_CONTROL_ECC | DSI_HOST_CONTROL_CS |
+ DSI_HOST_CONTROL_HS;
+ value &= ~DSI_HOST_CONTROL_RAW;
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+ tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
+ tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
+
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
+
+ for (i = 0; i < 8; i++) {
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
+ }
+
+ for (i = 0; i < 12; i++)
+ tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
+
+ tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
+
+ err = tegra_dsi_pad_calibrate(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+ return err;
+ }
+
+ tegra_dsi_writel(dsi, DSI_POWER_CONTROL_ENABLE, DSI_POWER_CONTROL);
+ usleep_range(300, 1000);
+
+ return 0;
+}
+
+static int tegra_dsi_exit(struct host1x_client *client)
+{
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ int err;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_dsi_debugfs_exit(dsi);
+ if (err < 0)
+ dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err);
+ }
+
+ err = tegra_output_disable(&dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output failed to disable: %d\n", err);
+ return err;
+ }
+
+ err = tegra_output_exit(&dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output cleanup failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops dsi_client_ops = {
+ .init = tegra_dsi_init,
+ .exit = tegra_dsi_exit,
+};
+
+static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
+{
+ struct clk *parent;
+ int err;
+
+ parent = clk_get_parent(dsi->clk);
+ if (!parent)
+ return -EINVAL;
+
+ err = clk_set_parent(parent, dsi->clk_parent);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void tegra_dsi_initialize(struct tegra_dsi *dsi)
+{
+ unsigned int i;
+
+ tegra_dsi_writel(dsi, 0, DSI_POWER_CONTROL);
+
+ tegra_dsi_writel(dsi, 0, DSI_INT_ENABLE);
+ tegra_dsi_writel(dsi, 0, DSI_INT_STATUS);
+ tegra_dsi_writel(dsi, 0, DSI_INT_MASK);
+
+ tegra_dsi_writel(dsi, 0, DSI_HOST_CONTROL);
+ tegra_dsi_writel(dsi, 0, DSI_CONTROL);
+
+ tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
+ tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
+
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
+
+ for (i = 0; i < 8; i++) {
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
+ tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
+ }
+
+ for (i = 0; i < 12; i++)
+ tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
+
+ tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
+
+ for (i = 0; i < 4; i++)
+ tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1 + i);
+
+ tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_0);
+ tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_1);
+ tegra_dsi_writel(dsi, 0x000000ff, DSI_PHY_TIMING_2);
+ tegra_dsi_writel(dsi, 0x00000000, DSI_BTA_TIMING);
+
+ tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_0);
+ tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_1);
+ tegra_dsi_writel(dsi, 0, DSI_TO_TALLY);
+
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_CD);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CD_STATUS);
+ tegra_dsi_writel(dsi, 0, DSI_VIDEO_MODE_CONTROL);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
+}
+
+static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct tegra_dsi *dsi = host_to_tegra(host);
+ struct tegra_output *output = &dsi->output;
+
+ dsi->format = device->format;
+ dsi->lanes = device->lanes;
+
+ output->panel = of_drm_find_panel(device->dev.of_node);
+ if (output->panel) {
+ if (output->connector.dev)
+ drm_helper_hpd_irq_event(output->connector.dev);
+ }
+
+ return 0;
+}
+
+static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct tegra_dsi *dsi = host_to_tegra(host);
+ struct tegra_output *output = &dsi->output;
+
+ if (output->panel && &device->dev == output->panel->dev) {
+ if (output->connector.dev)
+ drm_helper_hpd_irq_event(output->connector.dev);
+
+ output->panel = NULL;
+ }
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops tegra_dsi_host_ops = {
+ .attach = tegra_dsi_host_attach,
+ .detach = tegra_dsi_host_detach,
+};
+
+static int tegra_dsi_probe(struct platform_device *pdev)
+{
+ struct tegra_dsi *dsi;
+ struct resource *regs;
+ int err;
+
+ dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->output.dev = dsi->dev = &pdev->dev;
+
+ err = tegra_output_probe(&dsi->output);
+ if (err < 0)
+ return err;
+
+ /*
+ * Assume these values by default. When a DSI peripheral driver
+ * attaches to the DSI host, the parameters will be taken from
+ * the attached device.
+ */
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+ if (IS_ERR(dsi->rst))
+ return PTR_ERR(dsi->rst);
+
+ dsi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dsi->clk)) {
+ dev_err(&pdev->dev, "cannot get DSI clock\n");
+ return PTR_ERR(dsi->clk);
+ }
+
+ err = clk_prepare_enable(dsi->clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot enable DSI clock\n");
+ return err;
+ }
+
+ dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
+ if (IS_ERR(dsi->clk_lp)) {
+ dev_err(&pdev->dev, "cannot get low-power clock\n");
+ return PTR_ERR(dsi->clk_lp);
+ }
+
+ err = clk_prepare_enable(dsi->clk_lp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot enable low-power clock\n");
+ return err;
+ }
+
+ dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ if (IS_ERR(dsi->clk_parent)) {
+ dev_err(&pdev->dev, "cannot get parent clock\n");
+ return PTR_ERR(dsi->clk_parent);
+ }
+
+ err = clk_prepare_enable(dsi->clk_parent);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot enable parent clock\n");
+ return err;
+ }
+
+ err = tegra_dsi_setup_clocks(dsi);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot setup clocks\n");
+ return err;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(dsi->regs))
+ return PTR_ERR(dsi->regs);
+
+ tegra_dsi_initialize(dsi);
+
+ dsi->mipi = tegra_mipi_request(&pdev->dev);
+ if (IS_ERR(dsi->mipi))
+ return PTR_ERR(dsi->mipi);
+
+ dsi->host.ops = &tegra_dsi_host_ops;
+ dsi->host.dev = &pdev->dev;
+
+ err = mipi_dsi_host_register(&dsi->host);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register DSI host: %d\n", err);
+ return err;
+ }
+
+ INIT_LIST_HEAD(&dsi->client.list);
+ dsi->client.ops = &dsi_client_ops;
+ dsi->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&dsi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, dsi);
+
+ return 0;
+}
+
+static int tegra_dsi_remove(struct platform_device *pdev)
+{
+ struct tegra_dsi *dsi = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&dsi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ mipi_dsi_host_unregister(&dsi->host);
+ tegra_mipi_free(dsi->mipi);
+
+ clk_disable_unprepare(dsi->clk_parent);
+ clk_disable_unprepare(dsi->clk_lp);
+ clk_disable_unprepare(dsi->clk);
+
+ err = tegra_output_remove(&dsi->output);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to remove output: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id tegra_dsi_of_match[] = {
+ { .compatible = "nvidia,tegra114-dsi", },
+ { },
+};
+
+struct platform_driver tegra_dsi_driver = {
+ .driver = {
+ .name = "tegra-dsi",
+ .of_match_table = tegra_dsi_of_match,
+ },
+ .probe = tegra_dsi_probe,
+ .remove = tegra_dsi_remove,
+};
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
new file mode 100644
index 000000000000..00e79c1f448c
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef DRM_TEGRA_DSI_H
+#define DRM_TEGRA_DSI_H
+
+#define DSI_INCR_SYNCPT 0x00
+#define DSI_INCR_SYNCPT_CONTROL 0x01
+#define DSI_INCR_SYNCPT_ERROR 0x02
+#define DSI_CTXSW 0x08
+#define DSI_RD_DATA 0x09
+#define DSI_WR_DATA 0x0a
+#define DSI_POWER_CONTROL 0x0b
+#define DSI_POWER_CONTROL_ENABLE (1 << 0)
+#define DSI_INT_ENABLE 0x0c
+#define DSI_INT_STATUS 0x0d
+#define DSI_INT_MASK 0x0e
+#define DSI_HOST_CONTROL 0x0f
+#define DSI_HOST_CONTROL_RAW (1 << 6)
+#define DSI_HOST_CONTROL_HS (1 << 5)
+#define DSI_HOST_CONTROL_BTA (1 << 2)
+#define DSI_HOST_CONTROL_CS (1 << 1)
+#define DSI_HOST_CONTROL_ECC (1 << 0)
+#define DSI_CONTROL 0x10
+#define DSI_CONTROL_HS_CLK_CTRL (1 << 20)
+#define DSI_CONTROL_CHANNEL(c) (((c) & 0x3) << 16)
+#define DSI_CONTROL_FORMAT(f) (((f) & 0x3) << 12)
+#define DSI_CONTROL_TX_TRIG(x) (((x) & 0x3) << 8)
+#define DSI_CONTROL_LANES(n) (((n) & 0x3) << 4)
+#define DSI_CONTROL_DCS_ENABLE (1 << 3)
+#define DSI_CONTROL_SOURCE(s) (((s) & 0x1) << 2)
+#define DSI_CONTROL_VIDEO_ENABLE (1 << 1)
+#define DSI_CONTROL_HOST_ENABLE (1 << 0)
+#define DSI_SOL_DELAY 0x11
+#define DSI_MAX_THRESHOLD 0x12
+#define DSI_TRIGGER 0x13
+#define DSI_TX_CRC 0x14
+#define DSI_STATUS 0x15
+#define DSI_STATUS_IDLE (1 << 10)
+#define DSI_INIT_SEQ_CONTROL 0x1a
+#define DSI_INIT_SEQ_DATA_0 0x1b
+#define DSI_INIT_SEQ_DATA_1 0x1c
+#define DSI_INIT_SEQ_DATA_2 0x1d
+#define DSI_INIT_SEQ_DATA_3 0x1e
+#define DSI_INIT_SEQ_DATA_4 0x1f
+#define DSI_INIT_SEQ_DATA_5 0x20
+#define DSI_INIT_SEQ_DATA_6 0x21
+#define DSI_INIT_SEQ_DATA_7 0x22
+#define DSI_PKT_SEQ_0_LO 0x23
+#define DSI_PKT_SEQ_0_HI 0x24
+#define DSI_PKT_SEQ_1_LO 0x25
+#define DSI_PKT_SEQ_1_HI 0x26
+#define DSI_PKT_SEQ_2_LO 0x27
+#define DSI_PKT_SEQ_2_HI 0x28
+#define DSI_PKT_SEQ_3_LO 0x29
+#define DSI_PKT_SEQ_3_HI 0x2a
+#define DSI_PKT_SEQ_4_LO 0x2b
+#define DSI_PKT_SEQ_4_HI 0x2c
+#define DSI_PKT_SEQ_5_LO 0x2d
+#define DSI_PKT_SEQ_5_HI 0x2e
+#define DSI_DCS_CMDS 0x33
+#define DSI_PKT_LEN_0_1 0x34
+#define DSI_PKT_LEN_2_3 0x35
+#define DSI_PKT_LEN_4_5 0x36
+#define DSI_PKT_LEN_6_7 0x37
+#define DSI_PHY_TIMING_0 0x3c
+#define DSI_PHY_TIMING_1 0x3d
+#define DSI_PHY_TIMING_2 0x3e
+#define DSI_BTA_TIMING 0x3f
+
+#define DSI_TIMING_FIELD(value, period, hwinc) \
+ ((DIV_ROUND_CLOSEST(value, period) - (hwinc)) & 0xff)
+
+#define DSI_TIMEOUT_0 0x44
+#define DSI_TIMEOUT_LRX(x) (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_HTX(x) (((x) & 0xffff) << 0)
+#define DSI_TIMEOUT_1 0x45
+#define DSI_TIMEOUT_PR(x) (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_TA(x) (((x) & 0xffff) << 0)
+#define DSI_TO_TALLY 0x46
+#define DSI_TALLY_TA(x) (((x) & 0xff) << 16)
+#define DSI_TALLY_LRX(x) (((x) & 0xff) << 8)
+#define DSI_TALLY_HTX(x) (((x) & 0xff) << 0)
+#define DSI_PAD_CONTROL_0 0x4b
+#define DSI_PAD_CONTROL_VS1_PDIO(x) (((x) & 0xf) << 0)
+#define DSI_PAD_CONTROL_VS1_PDIO_CLK (1 << 8)
+#define DSI_PAD_CONTROL_VS1_PULLDN(x) (((x) & 0xf) << 16)
+#define DSI_PAD_CONTROL_VS1_PULLDN_CLK (1 << 24)
+#define DSI_PAD_CONTROL_CD 0x4c
+#define DSI_PAD_CD_STATUS 0x4d
+#define DSI_VIDEO_MODE_CONTROL 0x4e
+#define DSI_PAD_CONTROL_1 0x4f
+#define DSI_PAD_CONTROL_2 0x50
+#define DSI_PAD_OUT_CLK(x) (((x) & 0x7) << 0)
+#define DSI_PAD_LP_DN(x) (((x) & 0x7) << 4)
+#define DSI_PAD_LP_UP(x) (((x) & 0x7) << 8)
+#define DSI_PAD_SLEW_DN(x) (((x) & 0x7) << 12)
+#define DSI_PAD_SLEW_UP(x) (((x) & 0x7) << 16)
+#define DSI_PAD_CONTROL_3 0x51
+#define DSI_PAD_CONTROL_4 0x52
+#define DSI_GANGED_MODE_CONTROL 0x53
+#define DSI_GANGED_MODE_START 0x54
+#define DSI_GANGED_MODE_SIZE 0x55
+#define DSI_RAW_DATA_BYTE_COUNT 0x56
+#define DSI_ULTRA_LOW_POWER_CONTROL 0x57
+#define DSI_INIT_SEQ_DATA_8 0x58
+#define DSI_INIT_SEQ_DATA_9 0x59
+#define DSI_INIT_SEQ_DATA_10 0x5a
+#define DSI_INIT_SEQ_DATA_11 0x5b
+#define DSI_INIT_SEQ_DATA_12 0x5c
+#define DSI_INIT_SEQ_DATA_13 0x5d
+#define DSI_INIT_SEQ_DATA_14 0x5e
+#define DSI_INIT_SEQ_DATA_15 0x5f
+
+#endif
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index a3835e7de184..f7fca09d4921 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -18,10 +18,12 @@ static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
return container_of(fb, struct tegra_fb, base);
}
+#ifdef CONFIG_DRM_TEGRA_FBDEV
static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
{
return container_of(helper, struct tegra_fbdev, base);
}
+#endif
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
unsigned int index)
@@ -98,8 +100,10 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
return ERR_PTR(-ENOMEM);
fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
- if (!fb->planes)
+ if (!fb->planes) {
+ kfree(fb);
return ERR_PTR(-ENOMEM);
+ }
fb->num_planes = num_planes;
@@ -172,6 +176,7 @@ unreference:
return ERR_PTR(err);
}
+#ifdef CONFIG_DRM_TEGRA_FBDEV
static struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
.fb_fillrect = sys_fillrect,
@@ -339,6 +344,15 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
kfree(fbdev);
}
+void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
+{
+ if (fbdev) {
+ drm_modeset_lock_all(fbdev->base.dev);
+ drm_fb_helper_restore_fbdev_mode(&fbdev->base);
+ drm_modeset_unlock_all(fbdev->base.dev);
+ }
+}
+
static void tegra_fb_output_poll_changed(struct drm_device *drm)
{
struct tegra_drm *tegra = drm->dev_private;
@@ -346,16 +360,20 @@ static void tegra_fb_output_poll_changed(struct drm_device *drm)
if (tegra->fbdev)
drm_fb_helper_hotplug_event(&tegra->fbdev->base);
}
+#endif
static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
.fb_create = tegra_fb_create,
+#ifdef CONFIG_DRM_TEGRA_FBDEV
.output_poll_changed = tegra_fb_output_poll_changed,
+#endif
};
int tegra_drm_fb_init(struct drm_device *drm)
{
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
- struct tegra_fbdev *fbdev;
+#endif
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
@@ -365,28 +383,21 @@ int tegra_drm_fb_init(struct drm_device *drm)
drm->mode_config.funcs = &tegra_drm_mode_funcs;
- fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
- drm->mode_config.num_connector);
- if (IS_ERR(fbdev))
- return PTR_ERR(fbdev);
-
- tegra->fbdev = fbdev;
+#ifdef CONFIG_DRM_TEGRA_FBDEV
+ tegra->fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(tegra->fbdev))
+ return PTR_ERR(tegra->fbdev);
+#endif
return 0;
}
void tegra_drm_fb_exit(struct drm_device *drm)
{
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
tegra_fbdev_free(tegra->fbdev);
-}
-
-void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
-{
- if (fbdev) {
- drm_modeset_lock_all(fbdev->base.dev);
- drm_fb_helper_restore_fbdev_mode(&fbdev->base);
- drm_modeset_unlock_all(fbdev->base.dev);
- }
+#endif
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 28a9cbc07ab9..ef853e558036 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -18,6 +18,7 @@
* GNU General Public License for more details.
*/
+#include <linux/dma-buf.h>
#include <drm/tegra_drm.h>
#include "gem.h"
@@ -83,7 +84,7 @@ static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
return bo;
}
-const struct host1x_bo_ops tegra_bo_ops = {
+static const struct host1x_bo_ops tegra_bo_ops = {
.get = tegra_bo_get,
.put = tegra_bo_put,
.pin = tegra_bo_pin,
@@ -145,7 +146,6 @@ err_dma:
kfree(bo);
return ERR_PTR(err);
-
}
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
@@ -174,13 +174,87 @@ err:
return ERR_PTR(ret);
}
+struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf)
+{
+ struct dma_buf_attachment *attach;
+ struct tegra_bo *bo;
+ ssize_t size;
+ int err;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ host1x_bo_init(&bo->base, &tegra_bo_ops);
+ size = round_up(buf->size, PAGE_SIZE);
+
+ err = drm_gem_object_init(drm, &bo->gem, size);
+ if (err < 0)
+ goto free;
+
+ err = drm_gem_create_mmap_offset(&bo->gem);
+ if (err < 0)
+ goto release;
+
+ attach = dma_buf_attach(buf, drm->dev);
+ if (IS_ERR(attach)) {
+ err = PTR_ERR(attach);
+ goto free_mmap;
+ }
+
+ get_dma_buf(buf);
+
+ bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+ if (!bo->sgt) {
+ err = -ENOMEM;
+ goto detach;
+ }
+
+ if (IS_ERR(bo->sgt)) {
+ err = PTR_ERR(bo->sgt);
+ goto detach;
+ }
+
+ if (bo->sgt->nents > 1) {
+ err = -EINVAL;
+ goto detach;
+ }
+
+ bo->paddr = sg_dma_address(bo->sgt->sgl);
+ bo->gem.import_attach = attach;
+
+ return bo;
+
+detach:
+ if (!IS_ERR_OR_NULL(bo->sgt))
+ dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
+
+ dma_buf_detach(buf, attach);
+ dma_buf_put(buf);
+free_mmap:
+ drm_gem_free_mmap_offset(&bo->gem);
+release:
+ drm_gem_object_release(&bo->gem);
+free:
+ kfree(bo);
+
+ return ERR_PTR(err);
+}
+
void tegra_bo_free_object(struct drm_gem_object *gem)
{
struct tegra_bo *bo = to_tegra_bo(gem);
+ if (gem->import_attach) {
+ dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
+ DMA_TO_DEVICE);
+ drm_prime_gem_destroy(gem, NULL);
+ } else {
+ tegra_bo_destroy(gem->dev, bo);
+ }
+
drm_gem_free_mmap_offset(gem);
drm_gem_object_release(gem);
- tegra_bo_destroy(gem->dev, bo);
kfree(bo);
}
@@ -256,3 +330,106 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
+
+static struct sg_table *
+tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *gem = attach->dmabuf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
+ kfree(sgt);
+ return NULL;
+ }
+
+ sg_dma_address(sgt->sgl) = bo->paddr;
+ sg_dma_len(sgt->sgl) = gem->size;
+
+ return sgt;
+}
+
+static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void tegra_gem_prime_release(struct dma_buf *buf)
+{
+ drm_gem_dmabuf_release(buf);
+}
+
+static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
+ unsigned long page)
+{
+ return NULL;
+}
+
+static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
+ unsigned long page,
+ void *addr)
+{
+}
+
+static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
+{
+ return NULL;
+}
+
+static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
+ void *addr)
+{
+}
+
+static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
+ .map_dma_buf = tegra_gem_prime_map_dma_buf,
+ .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
+ .release = tegra_gem_prime_release,
+ .kmap_atomic = tegra_gem_prime_kmap_atomic,
+ .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
+ .kmap = tegra_gem_prime_kmap,
+ .kunmap = tegra_gem_prime_kunmap,
+ .mmap = tegra_gem_prime_mmap,
+};
+
+struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
+ struct drm_gem_object *gem,
+ int flags)
+{
+ return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
+ flags);
+}
+
+struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
+ struct dma_buf *buf)
+{
+ struct tegra_bo *bo;
+
+ if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
+ struct drm_gem_object *gem = buf->priv;
+
+ if (gem->dev == drm) {
+ drm_gem_object_reference(gem);
+ return gem;
+ }
+ }
+
+ bo = tegra_bo_import(drm, buf);
+ if (IS_ERR(bo))
+ return ERR_CAST(bo);
+
+ return &bo->gem;
+}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 7674000bf47d..ffd4f792b410 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -31,6 +31,7 @@ struct tegra_bo {
struct drm_gem_object gem;
struct host1x_bo base;
unsigned long flags;
+ struct sg_table *sgt;
dma_addr_t paddr;
void *vaddr;
};
@@ -40,8 +41,6 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
return container_of(gem, struct tegra_bo, gem);
}
-extern const struct host1x_bo_ops tegra_bo_ops;
-
struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
unsigned long flags);
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
@@ -59,4 +58,10 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
extern const struct vm_operations_struct tegra_bo_vm_ops;
+struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
+ struct drm_gem_object *gem,
+ int flags);
+struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
+ struct dma_buf *buf);
+
#endif
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 4cec8f526af7..0cbb24b1ae04 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -11,6 +11,7 @@
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/tegra-powergate.h>
#include "drm.h"
@@ -22,6 +23,8 @@ struct gr3d {
struct host1x_channel *channel;
struct clk *clk_secondary;
struct clk *clk;
+ struct reset_control *rst_secondary;
+ struct reset_control *rst;
DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
};
@@ -255,15 +258,29 @@ static int gr3d_probe(struct platform_device *pdev)
return PTR_ERR(gr3d->clk);
}
+ gr3d->rst = devm_reset_control_get(&pdev->dev, "3d");
+ if (IS_ERR(gr3d->rst)) {
+ dev_err(&pdev->dev, "cannot get reset\n");
+ return PTR_ERR(gr3d->rst);
+ }
+
if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
if (IS_ERR(gr3d->clk)) {
dev_err(&pdev->dev, "cannot get secondary clock\n");
return PTR_ERR(gr3d->clk);
}
+
+ gr3d->rst_secondary = devm_reset_control_get(&pdev->dev,
+ "3d2");
+ if (IS_ERR(gr3d->rst_secondary)) {
+ dev_err(&pdev->dev, "cannot get secondary reset\n");
+ return PTR_ERR(gr3d->rst_secondary);
+ }
}
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk);
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk,
+ gr3d->rst);
if (err < 0) {
dev_err(&pdev->dev, "failed to power up 3D unit\n");
return err;
@@ -271,7 +288,8 @@ static int gr3d_probe(struct platform_device *pdev)
if (gr3d->clk_secondary) {
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1,
- gr3d->clk_secondary);
+ gr3d->clk_secondary,
+ gr3d->rst_secondary);
if (err < 0) {
dev_err(&pdev->dev,
"failed to power up secondary 3D unit\n");
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 0cd9bc2056e8..6928015d11a4 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -8,10 +8,10 @@
*/
#include <linux/clk.h>
-#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
#include <linux/hdmi.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include "hdmi.h"
#include "drm.h"
@@ -40,6 +40,7 @@ struct tegra_hdmi {
struct host1x_client client;
struct tegra_output output;
struct device *dev;
+ bool enabled;
struct regulator *vdd;
struct regulator *pll;
@@ -49,6 +50,7 @@ struct tegra_hdmi {
struct clk *clk_parent;
struct clk *clk;
+ struct reset_control *rst;
const struct tegra_hdmi_config *config;
@@ -378,7 +380,7 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
if (f > 96000)
delta = 2;
- else if (f > 480000)
+ else if (f > 48000)
delta = 6;
else
delta = 9;
@@ -698,6 +700,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
int retries = 1000;
int err;
+ if (hdmi->enabled)
+ return 0;
+
hdmi->dvi = !tegra_output_is_hdmi(output);
pclk = mode->clock * 1000;
@@ -731,9 +736,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
return err;
}
- tegra_periph_reset_assert(hdmi->clk);
+ reset_control_assert(hdmi->rst);
usleep_range(1000, 2000);
- tegra_periph_reset_deassert(hdmi->clk);
+ reset_control_deassert(hdmi->rst);
tegra_dc_writel(dc, VSYNC_H_POSITION(1),
DC_DISP_DISP_TIMING_OPTIONS);
@@ -838,10 +843,6 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
value |= SOR_CSTM_ROTCLK(2);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
- tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
/* start SOR */
tegra_hdmi_writel(hdmi,
SOR_PWR_NORMAL_STATE_PU |
@@ -891,31 +892,67 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
HDMI_NV_PDISP_SOR_STATE1);
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
- tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
-
- value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= HDMI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
- value = DISP_CTRL_MODE_C_DISPLAY;
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ value |= DISP_CTRL_MODE_C_DISPLAY;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
/* TODO: add HDCP support */
+ hdmi->enabled = true;
+
return 0;
}
static int tegra_output_hdmi_disable(struct tegra_output *output)
{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
struct tegra_hdmi *hdmi = to_hdmi(output);
+ unsigned long value;
- tegra_periph_reset_assert(hdmi->clk);
+ if (!hdmi->enabled)
+ return 0;
+
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~HDMI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ }
+
+ reset_control_assert(hdmi->rst);
clk_disable(hdmi->clk);
regulator_disable(hdmi->pll);
+ hdmi->enabled = false;
+
return 0;
}
@@ -959,7 +996,7 @@ static int tegra_output_hdmi_check_mode(struct tegra_output *output,
parent = clk_get_parent(hdmi->clk_parent);
err = clk_round_rate(parent, pclk * 4);
- if (err < 0)
+ if (err <= 0)
*status = MODE_NOCLOCK;
else
*status = MODE_OK;
@@ -1338,6 +1375,12 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return PTR_ERR(hdmi->clk);
}
+ hdmi->rst = devm_reset_control_get(&pdev->dev, "hdmi");
+ if (IS_ERR(hdmi->rst)) {
+ dev_err(&pdev->dev, "failed to get reset\n");
+ return PTR_ERR(hdmi->rst);
+ }
+
err = clk_prepare(hdmi->clk);
if (err < 0)
return err;
@@ -1375,9 +1418,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return err;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs)
- return -ENXIO;
-
hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/tegra/mipi-phy.c b/drivers/gpu/drm/tegra/mipi-phy.c
new file mode 100644
index 000000000000..e2c4aedaee78
--- /dev/null
+++ b/drivers/gpu/drm/tegra/mipi-phy.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#include "mipi-phy.h"
+
+/*
+ * Default D-PHY timings based on MIPI D-PHY specification. Derived from
+ * the valid ranges specified in Section 5.9 of the D-PHY specification
+ * with minor adjustments.
+ */
+int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+ unsigned long period)
+{
+ timing->clkmiss = 0;
+ timing->clkpost = 70 + 52 * period;
+ timing->clkpre = 8;
+ timing->clkprepare = 65;
+ timing->clksettle = 95;
+ timing->clktermen = 0;
+ timing->clktrail = 80;
+ timing->clkzero = 260;
+ timing->dtermen = 0;
+ timing->eot = 0;
+ timing->hsexit = 120;
+ timing->hsprepare = 65 + 5 * period;
+ timing->hszero = 145 + 5 * period;
+ timing->hssettle = 85 + 6 * period;
+ timing->hsskip = 40;
+ timing->hstrail = max(8 * period, 60 + 4 * period);
+ timing->init = 100000;
+ timing->lpx = 60;
+ timing->taget = 5 * timing->lpx;
+ timing->tago = 4 * timing->lpx;
+ timing->tasure = 2 * timing->lpx;
+ timing->wakeup = 1000000;
+
+ return 0;
+}
+
+/*
+ * Validate D-PHY timing according to MIPI Alliance Specification for D-PHY,
+ * Section 5.9 "Global Operation Timing Parameters".
+ */
+int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
+ unsigned long period)
+{
+ if (timing->clkmiss > 60)
+ return -EINVAL;
+
+ if (timing->clkpost < (60 + 52 * period))
+ return -EINVAL;
+
+ if (timing->clkpre < 8)
+ return -EINVAL;
+
+ if (timing->clkprepare < 38 || timing->clkprepare > 95)
+ return -EINVAL;
+
+ if (timing->clksettle < 95 || timing->clksettle > 300)
+ return -EINVAL;
+
+ if (timing->clktermen > 38)
+ return -EINVAL;
+
+ if (timing->clktrail < 60)
+ return -EINVAL;
+
+ if (timing->clkprepare + timing->clkzero < 300)
+ return -EINVAL;
+
+ if (timing->dtermen > 35 + 4 * period)
+ return -EINVAL;
+
+ if (timing->eot > 105 + 12 * period)
+ return -EINVAL;
+
+ if (timing->hsexit < 100)
+ return -EINVAL;
+
+ if (timing->hsprepare < 40 + 4 * period ||
+ timing->hsprepare > 85 + 6 * period)
+ return -EINVAL;
+
+ if (timing->hsprepare + timing->hszero < 145 + 10 * period)
+ return -EINVAL;
+
+ if ((timing->hssettle < 85 + 6 * period) ||
+ (timing->hssettle > 145 + 10 * period))
+ return -EINVAL;
+
+ if (timing->hsskip < 40 || timing->hsskip > 55 + 4 * period)
+ return -EINVAL;
+
+ if (timing->hstrail < max(8 * period, 60 + 4 * period))
+ return -EINVAL;
+
+ if (timing->init < 100000)
+ return -EINVAL;
+
+ if (timing->lpx < 50)
+ return -EINVAL;
+
+ if (timing->taget != 5 * timing->lpx)
+ return -EINVAL;
+
+ if (timing->tago != 4 * timing->lpx)
+ return -EINVAL;
+
+ if (timing->tasure < timing->lpx || timing->tasure > 2 * timing->lpx)
+ return -EINVAL;
+
+ if (timing->wakeup < 1000000)
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/mipi-phy.h b/drivers/gpu/drm/tegra/mipi-phy.h
new file mode 100644
index 000000000000..d3591694432d
--- /dev/null
+++ b/drivers/gpu/drm/tegra/mipi-phy.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef DRM_TEGRA_MIPI_PHY_H
+#define DRM_TEGRA_MIPI_PHY_H
+
+/*
+ * D-PHY timing parameters
+ *
+ * A detailed description of these parameters can be found in the MIPI
+ * Alliance Specification for D-PHY, Section 5.9 "Global Operation Timing
+ * Parameters".
+ *
+ * All parameters are specified in nanoseconds.
+ */
+struct mipi_dphy_timing {
+ unsigned int clkmiss;
+ unsigned int clkpost;
+ unsigned int clkpre;
+ unsigned int clkprepare;
+ unsigned int clksettle;
+ unsigned int clktermen;
+ unsigned int clktrail;
+ unsigned int clkzero;
+ unsigned int dtermen;
+ unsigned int eot;
+ unsigned int hsexit;
+ unsigned int hsprepare;
+ unsigned int hszero;
+ unsigned int hssettle;
+ unsigned int hsskip;
+ unsigned int hstrail;
+ unsigned int init;
+ unsigned int lpx;
+ unsigned int taget;
+ unsigned int tago;
+ unsigned int tasure;
+ unsigned int wakeup;
+};
+
+int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+ unsigned long period);
+int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
+ unsigned long period);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 2cb0065e0578..57cecbd18ca8 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -9,6 +9,7 @@
#include <linux/of_gpio.h>
+#include <drm/drm_panel.h>
#include "drm.h"
static int tegra_connector_get_modes(struct drm_connector *connector)
@@ -17,6 +18,16 @@ static int tegra_connector_get_modes(struct drm_connector *connector)
struct edid *edid = NULL;
int err = 0;
+ /*
+ * If the panel provides one or more modes, use them exclusively and
+ * ignore any other means of obtaining a mode.
+ */
+ if (output->panel) {
+ err = output->panel->funcs->get_modes(output->panel);
+ if (err > 0)
+ return err;
+ }
+
if (output->edid)
edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
else if (output->ddc)
@@ -72,6 +83,11 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
else
status = connector_status_connected;
} else {
+ if (!output->panel)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_connected;
+
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
status = connector_status_connected;
}
@@ -115,6 +131,16 @@ static const struct drm_encoder_funcs encoder_funcs = {
static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct drm_panel *panel = output->panel;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ drm_panel_disable(panel);
+ tegra_output_disable(output);
+ } else {
+ tegra_output_enable(output);
+ drm_panel_enable(panel);
+ }
}
static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
@@ -163,14 +189,22 @@ static irqreturn_t hpd_irq(int irq, void *data)
int tegra_output_probe(struct tegra_output *output)
{
+ struct device_node *ddc, *panel;
enum of_gpio_flags flags;
- struct device_node *ddc;
- size_t size;
- int err;
+ int err, size;
if (!output->of_node)
output->of_node = output->dev->of_node;
+ panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
+ if (panel) {
+ output->panel = of_drm_find_panel(panel);
+ if (!output->panel)
+ return -EPROBE_DEFER;
+
+ of_node_put(panel);
+ }
+
output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
@@ -185,9 +219,6 @@ int tegra_output_probe(struct tegra_output *output)
of_node_put(ddc);
}
- if (!output->edid && !output->ddc)
- return -ENODEV;
-
output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
"nvidia,hpd-gpio", 0,
&flags);
@@ -256,6 +287,11 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
encoder = DRM_MODE_ENCODER_TMDS;
break;
+ case TEGRA_OUTPUT_DSI:
+ connector = DRM_MODE_CONNECTOR_DSI;
+ encoder = DRM_MODE_ENCODER_DSI;
+ break;
+
default:
connector = DRM_MODE_CONNECTOR_Unknown;
encoder = DRM_MODE_ENCODER_NONE;
@@ -267,6 +303,9 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
drm_connector_helper_add(&output->connector, &connector_helper_funcs);
output->connector.dpms = DRM_MODE_DPMS_OFF;
+ if (output->panel)
+ drm_panel_attach(output->panel, &output->connector);
+
drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 3b29018913a5..0266fb40479e 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -15,6 +15,7 @@
struct tegra_rgb {
struct tegra_output output;
struct tegra_dc *dc;
+ bool enabled;
struct clk *clk_parent;
struct clk *clk;
@@ -87,18 +88,73 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
static int tegra_output_rgb_enable(struct tegra_output *output)
{
struct tegra_rgb *rgb = to_rgb(output);
+ unsigned long value;
+
+ if (rgb->enabled)
+ return 0;
tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+ value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
+ tegra_dc_writel(rgb->dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
+
+ /* XXX: parameterize? */
+ value = tegra_dc_readl(rgb->dc, DC_COM_PIN_OUTPUT_POLARITY(1));
+ value &= ~LVS_OUTPUT_POLARITY_LOW;
+ value &= ~LHS_OUTPUT_POLARITY_LOW;
+ tegra_dc_writel(rgb->dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
+
+ /* XXX: parameterize? */
+ value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
+ DISP_ORDER_RED_BLUE;
+ tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
+
+ /* XXX: parameterize? */
+ value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE;
+ tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
+
+ value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ value |= DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ rgb->enabled = true;
+
return 0;
}
static int tegra_output_rgb_disable(struct tegra_output *output)
{
struct tegra_rgb *rgb = to_rgb(output);
+ unsigned long value;
+
+ if (!rgb->enabled)
+ return 0;
+
+ value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+ tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ rgb->enabled = false;
+
return 0;
}
@@ -213,7 +269,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
* RGB outputs are an exception, so we make sure they can be attached
* to only their parent display controller.
*/
- rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
+ rgb->output.encoder.possible_crtcs = drm_crtc_mask(&dc->base);
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 116da199b942..171a8203892c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -311,7 +311,7 @@ static void tilcdc_lastclose(struct drm_device *dev)
drm_fbdev_cma_restore_mode(priv->fbdev);
}
-static irqreturn_t tilcdc_irq(DRM_IRQ_ARGS)
+static irqreturn_t tilcdc_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct tilcdc_drm_private *priv = dev->dev_private;
@@ -444,7 +444,7 @@ static int tilcdc_mm_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- return drm_mm_dump_table(m, dev->mm_private);
+ return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
}
static struct drm_info_list tilcdc_debugfs_list[] = {
@@ -594,7 +594,7 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
static int tilcdc_pdev_remove(struct platform_device *pdev)
{
- drm_platform_exit(&tilcdc_driver, pdev);
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 3302f99e7497..764be36397fd 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -126,6 +126,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
agp_be->ttm.func = &ttm_agp_func;
if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(agp_be);
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 07e02c4bf5a8..214b7992a3aa 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
moved:
if (bo->evicted) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
+ if (bdev->driver->invalidate_caches) {
+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ if (ret)
+ pr_err("Can not flush read caches\n");
+ }
bo->evicted = false;
}
@@ -957,7 +959,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_mem_space);
-int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible,
bool no_wait_gpu)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 406152152315..1df856f78568 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -187,7 +187,7 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
}
}
-int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -219,7 +219,7 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
return 0;
}
-void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void *virtual)
{
struct ttm_mem_type_manager *man;
@@ -594,7 +594,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (start_page > bo->num_pages)
return -EINVAL;
#if 0
- if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+ if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
return -EPERM;
#endif
(void) ttm_mem_io_lock(man, false);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6440eeac22d2..0ce48e5a9cb4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -132,6 +132,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
+ /*
+ * Refuse to fault imported pages. This should be handled
+ * (if at all) by redirecting mmap to the exporter.
+ */
+ if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ retval = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
if (bdev->driver->fault_reserve_notify) {
ret = bdev->driver->fault_reserve_notify(bo);
switch (ret) {
@@ -217,10 +226,17 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} else if (unlikely(!page)) {
break;
}
+ page->mapping = vma->vm_file->f_mapping;
+ page->index = drm_vma_node_start(&bo->vma_node) +
+ page_offset;
pfn = page_to_pfn(page);
}
- ret = vm_insert_mixed(&cvma, address, pfn);
+ if (vma->vm_flags & VM_MIXEDMAP)
+ ret = vm_insert_mixed(&cvma, address, pfn);
+ else
+ ret = vm_insert_pfn(&cvma, address, pfn);
+
/*
* Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error.
@@ -250,6 +266,8 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
struct ttm_buffer_object *bo =
(struct ttm_buffer_object *)vma->vm_private_data;
+ WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
+
(void)ttm_bo_reference(bo);
}
@@ -319,7 +337,16 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
*/
vma->vm_private_data = bo;
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+
+ /*
+ * We'd like to use VM_PFNMAP on shared mappings, where
+ * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
+ * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
+ * bad for performance. Until that has been sorted out, use
+ * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
+ */
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
out_unref:
ttm_bo_unref(&bo);
@@ -334,7 +361,8 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = ttm_bo_reference(bo);
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND;
return 0;
}
EXPORT_SYMBOL(ttm_fbdev_mmap);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 3daa9a3930b8..6a954544727f 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -186,14 +186,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
}
EXPORT_SYMBOL(ttm_write_lock);
-void ttm_write_lock_downgrade(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- lock->rw = 1;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-
static int __ttm_vt_unlock(struct ttm_lock *lock)
{
int ret = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 6fe7b92a82d1..53b51c4e671a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -68,7 +68,7 @@
struct ttm_object_file {
struct ttm_object_device *tdev;
- rwlock_t lock;
+ spinlock_t lock;
struct list_head ref_list;
struct drm_open_hash ref_hash[TTM_REF_NUM];
struct kref refcount;
@@ -118,6 +118,7 @@ struct ttm_object_device {
*/
struct ttm_ref_object {
+ struct rcu_head rcu_head;
struct drm_hash_item hash;
struct list_head head;
struct kref kref;
@@ -210,10 +211,9 @@ static void ttm_release_base(struct kref *kref)
* call_rcu() or ttm_base_object_kfree().
*/
- if (base->refcount_release) {
- ttm_object_file_unref(&base->tfile);
+ ttm_object_file_unref(&base->tfile);
+ if (base->refcount_release)
base->refcount_release(&base);
- }
}
void ttm_base_object_unref(struct ttm_base_object **p_base)
@@ -229,32 +229,46 @@ EXPORT_SYMBOL(ttm_base_object_unref);
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
{
- struct ttm_object_device *tdev = tfile->tdev;
- struct ttm_base_object *uninitialized_var(base);
+ struct ttm_base_object *base = NULL;
struct drm_hash_item *hash;
+ struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
int ret;
rcu_read_lock();
- ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
+ ret = drm_ht_find_item_rcu(ht, key, &hash);
if (likely(ret == 0)) {
- base = drm_hash_entry(hash, struct ttm_base_object, hash);
- ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
+ base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+ if (!kref_get_unless_zero(&base->refcount))
+ base = NULL;
}
rcu_read_unlock();
- if (unlikely(ret != 0))
- return NULL;
+ return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup);
- if (tfile != base->tfile && !base->shareable) {
- pr_err("Attempted access of non-shareable object\n");
- ttm_base_object_unref(&base);
- return NULL;
+struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
+{
+ struct ttm_base_object *base = NULL;
+ struct drm_hash_item *hash;
+ struct drm_open_hash *ht = &tdev->object_hash;
+ int ret;
+
+ rcu_read_lock();
+ ret = drm_ht_find_item_rcu(ht, key, &hash);
+
+ if (likely(ret == 0)) {
+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
+ if (!kref_get_unless_zero(&base->refcount))
+ base = NULL;
}
+ rcu_read_unlock();
return base;
}
-EXPORT_SYMBOL(ttm_base_object_lookup);
+EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
@@ -266,21 +280,25 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
int ret = -EINVAL;
+ if (base->tfile != tfile && !base->shareable)
+ return -EPERM;
+
if (existed != NULL)
*existed = true;
while (ret == -EINVAL) {
- read_lock(&tfile->lock);
- ret = drm_ht_find_item(ht, base->hash.key, &hash);
+ rcu_read_lock();
+ ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
- kref_get(&ref->kref);
- read_unlock(&tfile->lock);
- break;
+ if (kref_get_unless_zero(&ref->kref)) {
+ rcu_read_unlock();
+ break;
+ }
}
- read_unlock(&tfile->lock);
+ rcu_read_unlock();
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
@@ -297,19 +315,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
ref->ref_type = ref_type;
kref_init(&ref->kref);
- write_lock(&tfile->lock);
- ret = drm_ht_insert_item(ht, &ref->hash);
+ spin_lock(&tfile->lock);
+ ret = drm_ht_insert_item_rcu(ht, &ref->hash);
if (likely(ret == 0)) {
list_add_tail(&ref->head, &tfile->ref_list);
kref_get(&base->refcount);
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
if (existed != NULL)
*existed = false;
break;
}
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
BUG_ON(ret != -EINVAL);
ttm_mem_global_free(mem_glob, sizeof(*ref));
@@ -330,17 +348,17 @@ static void ttm_ref_object_release(struct kref *kref)
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
ht = &tfile->ref_hash[ref->ref_type];
- (void)drm_ht_remove_item(ht, &ref->hash);
+ (void)drm_ht_remove_item_rcu(ht, &ref->hash);
list_del(&ref->head);
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
base->ref_obj_release(base, ref->ref_type);
ttm_base_object_unref(&ref->obj);
ttm_mem_global_free(mem_glob, sizeof(*ref));
- kfree(ref);
- write_lock(&tfile->lock);
+ kfree_rcu(ref, rcu_head);
+ spin_lock(&tfile->lock);
}
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
@@ -351,15 +369,15 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
struct drm_hash_item *hash;
int ret;
- write_lock(&tfile->lock);
+ spin_lock(&tfile->lock);
ret = drm_ht_find_item(ht, key, &hash);
if (unlikely(ret != 0)) {
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
return -EINVAL;
}
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
kref_put(&ref->kref, ttm_ref_object_release);
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
return 0;
}
EXPORT_SYMBOL(ttm_ref_object_base_unref);
@@ -372,7 +390,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL;
- write_lock(&tfile->lock);
+ spin_lock(&tfile->lock);
/*
* Since we release the lock within the loop, we have to
@@ -388,7 +406,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
- write_unlock(&tfile->lock);
+ spin_unlock(&tfile->lock);
ttm_object_file_unref(&tfile);
}
EXPORT_SYMBOL(ttm_object_file_release);
@@ -404,7 +422,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
if (unlikely(tfile == NULL))
return NULL;
- rwlock_init(&tfile->lock);
+ spin_lock_init(&tfile->lock);
tfile->tdev = tdev;
kref_init(&tfile->refcount);
INIT_LIST_HEAD(&tfile->ref_list);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 210d50365162..75f319090043 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -170,9 +170,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm_tt_unbind(ttm);
}
- if (ttm->state == tt_unbound) {
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
- }
+ if (ttm->state == tt_unbound)
+ ttm_tt_unpopulate(ttm);
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
@@ -362,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
page_cache_release(to_page);
}
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ ttm_tt_unpopulate(ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)
@@ -375,3 +374,26 @@ out_err:
return ret;
}
+
+static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
+{
+ pgoff_t i;
+ struct page **page = ttm->pages;
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ return;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ (*page)->mapping = NULL;
+ (*page++)->index = 0;
+ }
+}
+
+void ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ if (ttm->state == tt_unpopulated)
+ return;
+
+ ttm_tt_clear_mapping(ttm);
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 97e9d614700f..dbadd49e4c4a 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -403,15 +403,17 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
int i;
int ret = 0;
+ drm_modeset_lock_all(fb->dev);
+
if (!ufb->active_16)
- return 0;
+ goto unlock;
if (ufb->obj->base.import_attach) {
ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
0, ufb->obj->base.size,
DMA_FROM_DEVICE);
if (ret)
- return ret;
+ goto unlock;
}
for (i = 0; i < num_clips; i++) {
@@ -419,7 +421,7 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
clips[i].x2 - clips[i].x1,
clips[i].y2 - clips[i].y1);
if (ret)
- break;
+ goto unlock;
}
if (ufb->obj->base.import_attach) {
@@ -427,6 +429,10 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
0, ufb->obj->base.size,
DMA_FROM_DEVICE);
}
+
+ unlock:
+ drm_modeset_unlock_all(fb->dev);
+
return ret;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8d67b943ac05..0394811251bd 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -177,8 +177,10 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->vmapping)
udl_gem_vunmap(obj);
- if (gem_obj->import_attach)
+ if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, obj->sg);
+ put_device(gem_obj->dev->dev);
+ }
if (obj->pages)
udl_gem_put_pages(obj);
@@ -256,9 +258,12 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
int ret;
/* need to attach */
+ get_device(dev->dev);
attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach))
+ if (IS_ERR(attach)) {
+ put_device(dev->dev);
return ERR_CAST(attach);
+ }
get_dma_buf(dma_buf);
@@ -282,6 +287,6 @@ fail_unmap:
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
-
+ put_device(dev->dev);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 652f9b43ec9d..a18479c6b6da 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -60,7 +60,7 @@
dev_priv->dma_low += 8; \
}
-#define via_flush_write_combine() DRM_MEMORYBARRIER()
+#define via_flush_write_combine() mb()
#define VIA_OUT_RING_QW(w1, w2) do { \
*vb++ = (w1); \
@@ -234,13 +234,13 @@ static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *fil
switch (init->func) {
case VIA_INIT_DMA:
- if (!DRM_SUSER(DRM_CURPROC))
+ if (!capable(CAP_SYS_ADMIN))
retcode = -EPERM;
else
retcode = via_initialize(dev, dev_priv, init);
break;
case VIA_CLEANUP_DMA:
- if (!DRM_SUSER(DRM_CURPROC))
+ if (!capable(CAP_SYS_ADMIN))
retcode = -EPERM;
else
retcode = via_dma_cleanup(dev);
@@ -273,7 +273,7 @@ static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *c
if (cmd->size > VIA_PCI_BUF_SIZE)
return -ENOMEM;
- if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+ if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
return -EFAULT;
/*
@@ -346,7 +346,7 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
if (cmd->size > VIA_PCI_BUF_SIZE)
return -ENOMEM;
- if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+ if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
return -EFAULT;
if ((ret =
@@ -543,7 +543,7 @@ static void via_cmdbuf_start(drm_via_private_t *dev_priv)
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
- DRM_WRITEMEMORYBARRIER();
+ wmb();
VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
VIA_READ(VIA_REG_TRANSPACE);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 8b0f25904e6d..ba33cf679180 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -217,7 +217,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
- DRM_WRITEMEMORYBARRIER();
+ wmb();
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
}
@@ -338,7 +338,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->blits[cur]->aborted = blitq->aborting;
blitq->done_blit_handle++;
- DRM_WAKEUP(blitq->blit_queue + cur);
+ wake_up(blitq->blit_queue + cur);
cur++;
if (cur >= VIA_NUM_BLIT_SLOTS)
@@ -363,7 +363,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
via_abort_dmablit(dev, engine);
blitq->aborting = 1;
- blitq->end = jiffies + DRM_HZ;
+ blitq->end = jiffies + HZ;
}
if (!blitq->is_active) {
@@ -372,7 +372,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->is_active = 1;
blitq->cur = cur;
blitq->num_outstanding--;
- blitq->end = jiffies + DRM_HZ;
+ blitq->end = jiffies + HZ;
if (!timer_pending(&blitq->poll_timer))
mod_timer(&blitq->poll_timer, jiffies + 1);
} else {
@@ -436,7 +436,7 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
int ret = 0;
if (via_dmablit_active(blitq, engine, handle, &queue)) {
- DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, *queue, 3 * HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
@@ -521,7 +521,7 @@ via_dmablit_workqueue(struct work_struct *work)
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- DRM_WAKEUP(&blitq->busy_queue);
+ wake_up(&blitq->busy_queue);
via_free_sg_info(dev->pdev, cur_sg);
kfree(cur_sg);
@@ -561,8 +561,8 @@ via_init_dmablit(struct drm_device *dev)
blitq->aborting = 0;
spin_lock_init(&blitq->blit_lock);
for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
- DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
- DRM_INIT_WAITQUEUE(&blitq->busy_queue);
+ init_waitqueue_head(blitq->blit_queue + j);
+ init_waitqueue_head(&blitq->busy_queue);
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
setup_timer(&blitq->poll_timer, via_dmablit_timer,
(unsigned long)blitq);
@@ -688,7 +688,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
while (blitq->num_free == 0) {
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
+ DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
if (ret)
return (-EINTR == ret) ? -EAGAIN : ret;
@@ -713,7 +713,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
blitq->num_free++;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- DRM_WAKEUP(&blitq->busy_queue);
+ wake_up(&blitq->busy_queue);
}
/*
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 92684a9b7e34..50abc2adfaee 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -46,7 +46,7 @@ static int via_driver_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+static void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct via_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index a811ef2b505f..ad0273256beb 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -138,7 +138,7 @@ extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
extern int via_enable_vblank(struct drm_device *dev, int crtc);
extern void via_disable_vblank(struct drm_device *dev, int crtc);
-extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t via_driver_irq_handler(int irq, void *arg);
extern void via_driver_irq_preinstall(struct drm_device *dev);
extern int via_driver_irq_postinstall(struct drm_device *dev);
extern void via_driver_irq_uninstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index ac98964297cf..1319433816d3 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -104,7 +104,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
return atomic_read(&dev_priv->vbl_received);
}
-irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t via_driver_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
for (i = 0; i < dev_priv->num_irqs; ++i) {
if (status & cur_irq->pending_mask) {
atomic_inc(&cur_irq->irq_received);
- DRM_WAKEUP(&cur_irq->irq_queue);
+ wake_up(&cur_irq->irq_queue);
handled = 1;
if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
via_dmablit_handler(dev, 0, 1);
@@ -239,12 +239,12 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
cur_irq = dev_priv->via_irqs + real_irq;
if (masks[real_irq][2] && !force_sequence) {
- DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
masks[irq][4]));
cur_irq_sequence = atomic_read(&cur_irq->irq_received);
} else {
- DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
(((cur_irq_sequence =
atomic_read(&cur_irq->irq_received)) -
*sequence) <= (1 << 23)));
@@ -287,7 +287,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
atomic_set(&cur_irq->irq_received, 0);
cur_irq->enable_mask = dev_priv->irq_masks[i][0];
cur_irq->pending_mask = dev_priv->irq_masks[i][1];
- DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
+ init_waitqueue_head(&cur_irq->irq_queue);
dev_priv->irq_enable_mask |= cur_irq->enable_mask;
dev_priv->irq_pending_mask |= cur_irq->pending_mask;
cur_irq++;
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6569efa2ff6e..a9ffbad1cfdd 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -36,7 +36,7 @@ void via_init_futex(drm_via_private_t *dev_priv)
DRM_DEBUG("\n");
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
- DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
+ init_waitqueue_head(&(dev_priv->decoder_queue[i]));
XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
}
}
@@ -58,7 +58,7 @@ void via_release_futex(drm_via_private_t *dev_priv, int context)
if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
if (_DRM_LOCK_IS_HELD(*lock)
&& (*lock & _DRM_LOCK_CONT)) {
- DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
+ wake_up(&(dev_priv->decoder_queue[i]));
}
*lock = 0;
}
@@ -83,10 +83,10 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
switch (fx->func) {
case VIA_FUTEX_WAIT:
DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
- (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
+ (fx->ms / 10) * (HZ / 100), *lock != fx->val);
return ret;
case VIA_FUTEX_WAKE:
- DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
+ wake_up(&(dev_priv->decoder_queue[fx->lock]));
return 0;
}
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 9f8b690bcf52..458cdf6d81e8 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
- vmwgfx_surface.o vmwgfx_prime.o
+ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d0e085ee8249..f58dc7dd15c5 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -34,6 +34,8 @@
#include "svga_reg.h"
+typedef uint32 PPN;
+typedef __le64 PPN64;
/*
* 3D Hardware Version
@@ -71,6 +73,9 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
#define SVGA3D_MAX_CONTEXT_IDS 256
#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
+#define SVGA3D_NUM_TEXTURE_UNITS 32
+#define SVGA3D_NUM_LIGHTS 8
+
/*
* Surface formats.
*
@@ -81,6 +86,7 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
*/
typedef enum SVGA3dSurfaceFormat {
+ SVGA3D_FORMAT_MIN = 0,
SVGA3D_FORMAT_INVALID = 0,
SVGA3D_X8R8G8B8 = 1,
@@ -134,12 +140,6 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_RG_S10E5 = 35,
SVGA3D_RG_S23E8 = 36,
- /*
- * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
- * the most efficient format to use when creating new surfaces
- * expressly for index or vertex data.
- */
-
SVGA3D_BUFFER = 37,
SVGA3D_Z_D24X8 = 38,
@@ -159,15 +159,109 @@ typedef enum SVGA3dSurfaceFormat {
/* Video format with alpha */
SVGA3D_AYUV = 45,
+ SVGA3D_R32G32B32A32_TYPELESS = 46,
+ SVGA3D_R32G32B32A32_FLOAT = 25,
+ SVGA3D_R32G32B32A32_UINT = 47,
+ SVGA3D_R32G32B32A32_SINT = 48,
+ SVGA3D_R32G32B32_TYPELESS = 49,
+ SVGA3D_R32G32B32_FLOAT = 50,
+ SVGA3D_R32G32B32_UINT = 51,
+ SVGA3D_R32G32B32_SINT = 52,
+ SVGA3D_R16G16B16A16_TYPELESS = 53,
+ SVGA3D_R16G16B16A16_FLOAT = 24,
+ SVGA3D_R16G16B16A16_UNORM = 41,
+ SVGA3D_R16G16B16A16_UINT = 54,
+ SVGA3D_R16G16B16A16_SNORM = 55,
+ SVGA3D_R16G16B16A16_SINT = 56,
+ SVGA3D_R32G32_TYPELESS = 57,
+ SVGA3D_R32G32_FLOAT = 36,
+ SVGA3D_R32G32_UINT = 58,
+ SVGA3D_R32G32_SINT = 59,
+ SVGA3D_R32G8X24_TYPELESS = 60,
+ SVGA3D_D32_FLOAT_S8X24_UINT = 61,
+ SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
+ SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
+ SVGA3D_R10G10B10A2_TYPELESS = 64,
+ SVGA3D_R10G10B10A2_UNORM = 26,
+ SVGA3D_R10G10B10A2_UINT = 65,
+ SVGA3D_R11G11B10_FLOAT = 66,
+ SVGA3D_R8G8B8A8_TYPELESS = 67,
+ SVGA3D_R8G8B8A8_UNORM = 68,
+ SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
+ SVGA3D_R8G8B8A8_UINT = 70,
+ SVGA3D_R8G8B8A8_SNORM = 28,
+ SVGA3D_R8G8B8A8_SINT = 71,
+ SVGA3D_R16G16_TYPELESS = 72,
+ SVGA3D_R16G16_FLOAT = 35,
+ SVGA3D_R16G16_UNORM = 40,
+ SVGA3D_R16G16_UINT = 73,
+ SVGA3D_R16G16_SNORM = 39,
+ SVGA3D_R16G16_SINT = 74,
+ SVGA3D_R32_TYPELESS = 75,
+ SVGA3D_D32_FLOAT = 76,
+ SVGA3D_R32_FLOAT = 34,
+ SVGA3D_R32_UINT = 77,
+ SVGA3D_R32_SINT = 78,
+ SVGA3D_R24G8_TYPELESS = 79,
+ SVGA3D_D24_UNORM_S8_UINT = 80,
+ SVGA3D_R24_UNORM_X8_TYPELESS = 81,
+ SVGA3D_X24_TYPELESS_G8_UINT = 82,
+ SVGA3D_R8G8_TYPELESS = 83,
+ SVGA3D_R8G8_UNORM = 84,
+ SVGA3D_R8G8_UINT = 85,
+ SVGA3D_R8G8_SNORM = 27,
+ SVGA3D_R8G8_SINT = 86,
+ SVGA3D_R16_TYPELESS = 87,
+ SVGA3D_R16_FLOAT = 33,
+ SVGA3D_D16_UNORM = 8,
+ SVGA3D_R16_UNORM = 88,
+ SVGA3D_R16_UINT = 89,
+ SVGA3D_R16_SNORM = 90,
+ SVGA3D_R16_SINT = 91,
+ SVGA3D_R8_TYPELESS = 92,
+ SVGA3D_R8_UNORM = 93,
+ SVGA3D_R8_UINT = 94,
+ SVGA3D_R8_SNORM = 95,
+ SVGA3D_R8_SINT = 96,
+ SVGA3D_A8_UNORM = 32,
+ SVGA3D_R1_UNORM = 97,
+ SVGA3D_R9G9B9E5_SHAREDEXP = 98,
+ SVGA3D_R8G8_B8G8_UNORM = 99,
+ SVGA3D_G8R8_G8B8_UNORM = 100,
+ SVGA3D_BC1_TYPELESS = 101,
+ SVGA3D_BC1_UNORM = 15,
+ SVGA3D_BC1_UNORM_SRGB = 102,
+ SVGA3D_BC2_TYPELESS = 103,
+ SVGA3D_BC2_UNORM = 17,
+ SVGA3D_BC2_UNORM_SRGB = 104,
+ SVGA3D_BC3_TYPELESS = 105,
+ SVGA3D_BC3_UNORM = 19,
+ SVGA3D_BC3_UNORM_SRGB = 106,
+ SVGA3D_BC4_TYPELESS = 107,
SVGA3D_BC4_UNORM = 108,
+ SVGA3D_BC4_SNORM = 109,
+ SVGA3D_BC5_TYPELESS = 110,
SVGA3D_BC5_UNORM = 111,
+ SVGA3D_BC5_SNORM = 112,
+ SVGA3D_B5G6R5_UNORM = 3,
+ SVGA3D_B5G5R5A1_UNORM = 5,
+ SVGA3D_B8G8R8A8_UNORM = 2,
+ SVGA3D_B8G8R8X8_UNORM = 1,
+ SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
+ SVGA3D_B8G8R8A8_TYPELESS = 114,
+ SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
+ SVGA3D_B8G8R8X8_TYPELESS = 116,
+ SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
/* Advanced D3D9 depth formats. */
SVGA3D_Z_DF16 = 118,
SVGA3D_Z_DF24 = 119,
SVGA3D_Z_D24S8_INT = 120,
- SVGA3D_FORMAT_MAX
+ /* Planar video formats. */
+ SVGA3D_YV12 = 121,
+
+ SVGA3D_FORMAT_MAX = 122,
} SVGA3dSurfaceFormat;
typedef uint32 SVGA3dColor; /* a, r, g, b */
@@ -957,15 +1051,21 @@ typedef enum {
} SVGA3dCubeFace;
typedef enum {
+ SVGA3D_SHADERTYPE_INVALID = 0,
+ SVGA3D_SHADERTYPE_MIN = 1,
SVGA3D_SHADERTYPE_VS = 1,
SVGA3D_SHADERTYPE_PS = 2,
- SVGA3D_SHADERTYPE_MAX
+ SVGA3D_SHADERTYPE_MAX = 3,
+ SVGA3D_SHADERTYPE_GS = 3,
} SVGA3dShaderType;
+#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
+
typedef enum {
SVGA3D_CONST_TYPE_FLOAT = 0,
SVGA3D_CONST_TYPE_INT = 1,
SVGA3D_CONST_TYPE_BOOL = 2,
+ SVGA3D_CONST_TYPE_MAX
} SVGA3dShaderConstType;
#define SVGA3D_MAX_SURFACE_FACES 6
@@ -1056,9 +1156,84 @@ typedef enum {
#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
-#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42
-
-#define SVGA_3D_CMD_FUTURE_MAX 2000
+#define SVGA_3D_CMD_SCREEN_DMA 1082
+#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
+#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084
+
+#define SVGA_3D_CMD_LOGICOPS_BITBLT 1085
+#define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086
+#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087
+#define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088
+#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089
+#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090
+
+#define SVGA_3D_CMD_SET_OTABLE_BASE 1091
+#define SVGA_3D_CMD_READBACK_OTABLE 1092
+
+#define SVGA_3D_CMD_DEFINE_GB_MOB 1093
+#define SVGA_3D_CMD_DESTROY_GB_MOB 1094
+#define SVGA_3D_CMD_REDEFINE_GB_MOB 1095
+#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096
+
+#define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097
+#define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098
+#define SVGA_3D_CMD_BIND_GB_SURFACE 1099
+#define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100
+#define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101
+#define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102
+#define SVGA_3D_CMD_READBACK_GB_IMAGE 1103
+#define SVGA_3D_CMD_READBACK_GB_SURFACE 1104
+#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105
+#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106
+
+#define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107
+#define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108
+#define SVGA_3D_CMD_BIND_GB_CONTEXT 1109
+#define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110
+#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111
+
+#define SVGA_3D_CMD_DEFINE_GB_SHADER 1112
+#define SVGA_3D_CMD_DESTROY_GB_SHADER 1113
+#define SVGA_3D_CMD_BIND_GB_SHADER 1114
+
+#define SVGA_3D_CMD_SET_OTABLE_BASE64 1115
+
+#define SVGA_3D_CMD_BEGIN_GB_QUERY 1116
+#define SVGA_3D_CMD_END_GB_QUERY 1117
+#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118
+
+#define SVGA_3D_CMD_NOP 1119
+
+#define SVGA_3D_CMD_ENABLE_GART 1120
+#define SVGA_3D_CMD_DISABLE_GART 1121
+#define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122
+#define SVGA_3D_CMD_UNMAP_GART_RANGE 1123
+
+#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124
+#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125
+#define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126
+#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127
+
+#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128
+#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
+
+#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
+#define SVGA_3D_CMD_GB_SCREEN_DMA 1131
+#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132
+#define SVGA_3D_CMD_GB_MOB_FENCE 1133
+#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134
+#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
+#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
+#define SVGA_3D_CMD_NOP_ERROR 1137
+
+#define SVGA_3D_CMD_RESERVED1 1138
+#define SVGA_3D_CMD_RESERVED2 1139
+#define SVGA_3D_CMD_RESERVED3 1140
+#define SVGA_3D_CMD_RESERVED4 1141
+#define SVGA_3D_CMD_RESERVED5 1142
+
+#define SVGA_3D_CMD_MAX 1142
+#define SVGA_3D_CMD_FUTURE_MAX 3000
/*
* Common substructures used in multiple FIFO commands:
@@ -1750,6 +1925,507 @@ struct {
/*
+ * Guest-backed surface definitions.
+ */
+
+typedef uint32 SVGAMobId;
+
+typedef enum SVGAMobFormat {
+ SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
+ SVGA3D_MOBFMT_PTDEPTH_0 = 0,
+ SVGA3D_MOBFMT_PTDEPTH_1 = 1,
+ SVGA3D_MOBFMT_PTDEPTH_2 = 2,
+ SVGA3D_MOBFMT_RANGE = 3,
+ SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
+ SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
+ SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
+ SVGA3D_MOBFMT_MAX,
+} SVGAMobFormat;
+
+/*
+ * Sizes of opaque types.
+ */
+
+#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
+#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
+#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
+#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
+#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
+#define SVGA3D_CONTEXT_DATA_SIZE 16384
+
+/*
+ * SVGA3dCmdSetOTableBase --
+ *
+ * This command allows the guest to specify the base PPN of the
+ * specified object table.
+ */
+
+typedef enum {
+ SVGA_OTABLE_MOB = 0,
+ SVGA_OTABLE_MIN = 0,
+ SVGA_OTABLE_SURFACE = 1,
+ SVGA_OTABLE_CONTEXT = 2,
+ SVGA_OTABLE_SHADER = 3,
+ SVGA_OTABLE_SCREEN_TARGET = 4,
+ SVGA_OTABLE_DX9_MAX = 5,
+ SVGA_OTABLE_MAX = 8
+} SVGAOTableType;
+
+typedef
+struct {
+ SVGAOTableType type;
+ PPN baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+} __packed
+SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
+
+typedef
+struct {
+ SVGAOTableType type;
+ PPN64 baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+} __packed
+SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+
+typedef
+struct {
+ SVGAOTableType type;
+} __packed
+SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
+
+/*
+ * Define a memory object (Mob) in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBMob {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN base;
+ uint32 sizeInBytes;
+} __packed
+SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
+
+
+/*
+ * Destroys an object in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBMob {
+ SVGAMobId mobid;
+} __packed
+SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
+
+/*
+ * Redefine an object in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdRedefineGBMob {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN base;
+ uint32 sizeInBytes;
+} __packed
+SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
+
+/*
+ * Define a memory object (Mob) in the OTable with a PPN64 base.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBMob64 {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN64 base;
+ uint32 sizeInBytes;
+} __packed
+SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
+
+/*
+ * Redefine an object in the OTable with PPN64 base.
+ */
+
+typedef
+struct SVGA3dCmdRedefineGBMob64 {
+ SVGAMobId mobid;
+ SVGAMobFormat ptDepth;
+ PPN64 base;
+ uint32 sizeInBytes;
+} __packed
+SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
+
+/*
+ * Notification that the page tables have been modified.
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBMobMapping {
+ SVGAMobId mobid;
+} __packed
+SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
+
+/*
+ * Define a guest-backed surface.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBSurface {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+} __packed
+SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+
+/*
+ * Destroy a guest-backed surface.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBSurface {
+ uint32 sid;
+} __packed
+SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+
+/*
+ * Bind a guest-backed surface to an object.
+ */
+
+typedef
+struct SVGA3dCmdBindGBSurface {
+ uint32 sid;
+ SVGAMobId mobid;
+} __packed
+SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
+
+/*
+ * Conditionally bind a mob to a guest backed surface if testMobid
+ * matches the currently bound mob. Optionally issue a readback on
+ * the surface while it is still bound to the old mobid if the mobid
+ * is changed by this command.
+ */
+
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
+
+typedef
+struct{
+ uint32 sid;
+ SVGAMobId testMobid;
+ SVGAMobId mobid;
+ uint32 flags;
+} __packed
+SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
+
+/*
+ * Update an image in a guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBImage {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+} __packed
+SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+
+/*
+ * Update an entire guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBSurface {
+ uint32 sid;
+} __packed
+SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+
+/*
+ * Readback an image in a guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBImage {
+ SVGA3dSurfaceImageId image;
+} __packed
+SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
+
+/*
+ * Readback an entire guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBSurface {
+ uint32 sid;
+} __packed
+SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+
+/*
+ * Readback a sub rect of an image in a guest-backed surface. After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBImagePartial {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+ uint32 invertBox;
+} __packed
+SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBImage {
+ SVGA3dSurfaceImageId image;
+} __packed
+SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+
+/*
+ * Invalidate an entire guest-backed surface.
+ * (Notify the device that the contents if all images can be lost.)
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBSurface {
+ uint32 sid;
+} __packed
+SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+
+/*
+ * Invalidate a sub rect of an image in a guest-backed surface. After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBImagePartial {
+ SVGA3dSurfaceImageId image;
+ SVGA3dBox box;
+ uint32 invertBox;
+} __packed
+SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
+
+/*
+ * Define a guest-backed context.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBContext {
+ uint32 cid;
+} __packed
+SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+
+/*
+ * Destroy a guest-backed context.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBContext {
+ uint32 cid;
+} __packed
+SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+
+/*
+ * Bind a guest-backed context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+
+typedef
+struct SVGA3dCmdBindGBContext {
+ uint32 cid;
+ SVGAMobId mobid;
+ uint32 validContents;
+} __packed
+SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+
+/*
+ * Readback a guest-backed context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBContext {
+ uint32 cid;
+} __packed
+SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+struct SVGA3dCmdInvalidateGBContext {
+ uint32 cid;
+} __packed
+SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+
+/*
+ * Define a guest-backed shader.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBShader {
+ uint32 shid;
+ SVGA3dShaderType type;
+ uint32 sizeInBytes;
+} __packed
+SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+
+/*
+ * Bind a guest-backed shader.
+ */
+
+typedef struct SVGA3dCmdBindGBShader {
+ uint32 shid;
+ SVGAMobId mobid;
+ uint32 offsetInBytes;
+} __packed
+SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
+
+/*
+ * Destroy a guest-backed shader.
+ */
+
+typedef struct SVGA3dCmdDestroyGBShader {
+ uint32 shid;
+} __packed
+SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 regStart;
+ SVGA3dShaderType shaderType;
+ SVGA3dShaderConstType constType;
+
+ /*
+ * Followed by a variable number of shader constants.
+ *
+ * Note that FLOAT and INT constants are 4-dwords in length, while
+ * BOOL constants are 1-dword in length.
+ */
+} __packed
+SVGA3dCmdSetGBShaderConstInline;
+/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+} __packed
+SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAMobId mobid;
+ uint32 offset;
+} __packed
+SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
+
+
+/*
+ * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
+ *
+ * The semantics of this command are identical to the
+ * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
+ * to a Mob instead of a GMR.
+ */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAMobId mobid;
+ uint32 offset;
+} __packed
+SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+
+typedef
+struct {
+ SVGAMobId mobid;
+ uint32 fbOffset;
+ uint32 initalized;
+} __packed
+SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
+
+typedef
+struct {
+ SVGAMobId mobid;
+ uint32 gartOffset;
+} __packed
+SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
+
+
+typedef
+struct {
+ uint32 gartOffset;
+ uint32 numPages;
+} __packed
+SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
+
+
+/*
+ * Screen Targets
+ */
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+
+typedef
+struct {
+ uint32 stid;
+ uint32 width;
+ uint32 height;
+ int32 xRoot;
+ int32 yRoot;
+ uint32 flags;
+} __packed
+SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
+
+typedef
+struct {
+ uint32 stid;
+} __packed
+SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
+
+typedef
+struct {
+ uint32 stid;
+ SVGA3dSurfaceImageId image;
+} __packed
+SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
+
+typedef
+struct {
+ uint32 stid;
+ SVGA3dBox box;
+} __packed
+SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
+
+/*
* Capability query index.
*
* Notes:
@@ -1879,10 +2555,41 @@ typedef enum {
SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
/*
- * Don't add new caps into the previous section; the values in this
- * enumeration must not change. You can put new values right before
- * SVGA3D_DEVCAP_MAX.
+ * Deprecated.
+ */
+ SVGA3D_DEVCAP_VGPU10 = 84,
+
+ /*
+ * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
+ * ored together, one for every type of video decoding supported.
+ */
+ SVGA3D_DEVCAP_VIDEO_DECODE = 85,
+
+ /*
+ * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
+ * ored together, one for every type of video processing supported.
+ */
+ SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
+
+ SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
+ SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
+ SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
+ SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
+
+ SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
+
+ /*
+ * Does the host support the SVGA logic ops commands?
+ */
+ SVGA3D_DEVCAP_LOGICOPS = 92,
+
+ /*
+ * What support does the host have for screen targets?
+ *
+ * See the SVGA3D_SCREENTARGET_CAP bits below.
*/
+ SVGA3D_DEVCAP_SCREENTARGETS = 93,
+
SVGA3D_DEVCAP_MAX /* This must be the last index. */
} SVGA3dDevCapIndex;
@@ -1893,4 +2600,28 @@ typedef union {
float f;
} SVGA3dDevCapResult;
+typedef enum {
+ SVGA3DCAPS_RECORD_UNKNOWN = 0,
+ SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
+} SVGA3dCapsRecordType;
+
+typedef
+struct SVGA3dCapsRecordHeader {
+ uint32 length;
+ SVGA3dCapsRecordType type;
+}
+SVGA3dCapsRecordHeader;
+
+typedef
+struct SVGA3dCapsRecord {
+ SVGA3dCapsRecordHeader header;
+ uint32 data[1];
+}
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
index 8369c3ba10fe..ef3385096145 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -38,8 +38,11 @@
#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
+#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
#define surf_size_struct SVGA3dSize
#define u32 uint32
+#define u64 uint64_t
+#define U32_MAX ((u32)~0U)
#endif /* __KERNEL__ */
@@ -704,8 +707,8 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
static inline u32 clamped_umul32(u32 a, u32 b)
{
- uint64_t tmp = (uint64_t) a*b;
- return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+ u64 tmp = (u64) a*b;
+ return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
}
static inline const struct svga3d_surface_desc *
@@ -834,7 +837,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
bool cubemap)
{
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
- u32 total_size = 0;
+ u64 total_size = 0;
u32 mip;
for (mip = 0; mip < num_mip_levels; mip++) {
@@ -847,7 +850,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
if (cubemap)
total_size *= SVGA3D_MAX_SURFACE_FACES;
- return total_size;
+ return (u32) min_t(u64, total_size, (u64) U32_MAX);
}
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 01f63cb49678..11323dd5196f 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -169,7 +169,17 @@ enum {
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
- SVGA_REG_TOP = 48, /* Must be 1 more than the last register */
+ SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
+ SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
+ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
+ SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
+ SVGA_REG_CMD_PREPEND_LOW = 53,
+ SVGA_REG_CMD_PREPEND_HIGH = 54,
+ SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
+ SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
+ SVGA_REG_MOB_MAX_SIZE = 57,
+ SVGA_REG_TOP = 58, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -431,7 +441,10 @@ struct SVGASignedPoint {
#define SVGA_CAP_TRACES 0x00200000
#define SVGA_CAP_GMR2 0x00400000
#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000
-
+#define SVGA_CAP_COMMAND_BUFFERS 0x01000000
+#define SVGA_CAP_DEAD1 0x02000000
+#define SVGA_CAP_CMD_BUFFERS_2 0x04000000
+#define SVGA_CAP_GBOBJECTS 0x08000000
/*
* FIFO register indices.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 0489c6152482..6327cfc36805 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -40,6 +40,10 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED;
+static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM |
+ TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_NO_EVICT;
+
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED;
@@ -47,6 +51,9 @@ static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT;
+static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB |
+ TTM_PL_FLAG_CACHED;
+
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -116,16 +123,26 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
+struct ttm_placement vmw_sys_ne_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &sys_ne_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_ne_placement_flags
+};
+
static uint32_t evictable_placement_flags[] = {
TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
- VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
+ VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
struct ttm_placement vmw_evictable_placement = {
.fpfn = 0,
.lpfn = 0,
- .num_placement = 3,
+ .num_placement = 4,
.placement = evictable_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
@@ -140,10 +157,21 @@ struct ttm_placement vmw_srf_placement = {
.busy_placement = gmr_vram_placement_flags
};
+struct ttm_placement vmw_mob_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .num_busy_placement = 1,
+ .placement = &mob_placement_flags,
+ .busy_placement = &mob_placement_flags
+};
+
struct vmw_ttm_tt {
struct ttm_dma_tt dma_ttm;
struct vmw_private *dev_priv;
int gmr_id;
+ struct vmw_mob *mob;
+ int mem_type;
struct sg_table sgt;
struct vmw_sg_table vsgt;
uint64_t sg_alloc_size;
@@ -244,6 +272,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
viter->dma_address = &__vmw_piter_dma_addr;
viter->page = &__vmw_piter_non_sg_page;
viter->addrs = vsgt->addrs;
+ viter->pages = vsgt->pages;
break;
case vmw_dma_map_populate:
case vmw_dma_map_bind:
@@ -424,6 +453,63 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
vmw_tt->mapped = false;
}
+
+/**
+ * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ * Note that the buffer object must be either pinned or reserved before
+ * calling this function.
+ */
+int vmw_bo_map_dma(struct ttm_buffer_object *bo)
+{
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+ return vmw_ttm_map_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ */
+void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
+{
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+ vmw_ttm_unmap_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
+ * TTM buffer object
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Returns a pointer to a struct vmw_sg_table object. The object should
+ * not be freed after use.
+ * Note that for the device addresses to be valid, the buffer object must
+ * either be reserved or pinned.
+ */
+const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
+{
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+ return &vmw_tt->vsgt;
+}
+
+
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct vmw_ttm_tt *vmw_be =
@@ -435,9 +521,27 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
return ret;
vmw_be->gmr_id = bo_mem->start;
+ vmw_be->mem_type = bo_mem->mem_type;
+
+ switch (bo_mem->mem_type) {
+ case VMW_PL_GMR:
+ return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
+ ttm->num_pages, vmw_be->gmr_id);
+ case VMW_PL_MOB:
+ if (unlikely(vmw_be->mob == NULL)) {
+ vmw_be->mob =
+ vmw_mob_create(ttm->num_pages);
+ if (unlikely(vmw_be->mob == NULL))
+ return -ENOMEM;
+ }
- return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
- ttm->num_pages, vmw_be->gmr_id);
+ return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
+ &vmw_be->vsgt, ttm->num_pages,
+ vmw_be->gmr_id);
+ default:
+ BUG();
+ }
+ return 0;
}
static int vmw_ttm_unbind(struct ttm_tt *ttm)
@@ -445,7 +549,16 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm)
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
- vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+ switch (vmw_be->mem_type) {
+ case VMW_PL_GMR:
+ vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+ break;
+ case VMW_PL_MOB:
+ vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
+ break;
+ default:
+ BUG();
+ }
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
vmw_ttm_unmap_dma(vmw_be);
@@ -453,6 +566,7 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm)
return 0;
}
+
static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
@@ -463,9 +577,14 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
ttm_dma_tt_fini(&vmw_be->dma_ttm);
else
ttm_tt_fini(ttm);
+
+ if (vmw_be->mob)
+ vmw_mob_destroy(vmw_be->mob);
+
kfree(vmw_be);
}
+
static int vmw_ttm_populate(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_tt =
@@ -500,6 +619,12 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+
+ if (vmw_tt->mob) {
+ vmw_mob_destroy(vmw_tt->mob);
+ vmw_tt->mob = NULL;
+ }
+
vmw_ttm_unmap_dma(vmw_tt);
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
size_t size =
@@ -517,7 +642,7 @@ static struct ttm_backend_func vmw_ttm_func = {
.destroy = vmw_ttm_destroy,
};
-struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
@@ -530,6 +655,7 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
+ vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
@@ -546,12 +672,12 @@ out_no_init:
return NULL;
}
-int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
}
-int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
switch (type) {
@@ -571,6 +697,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case VMW_PL_GMR:
+ case VMW_PL_MOB:
/*
* "Guest Memory Regions" is an aperture like feature with
* one slot per bo. There is an upper limit of the number of
@@ -589,7 +716,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
return 0;
}
-void vmw_evict_flags(struct ttm_buffer_object *bo,
+static void vmw_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
*placement = vmw_sys_placement;
@@ -618,6 +745,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
case VMW_PL_GMR:
+ case VMW_PL_MOB:
return 0;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -677,6 +805,38 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
VMW_FENCE_WAIT_TIMEOUT);
}
+/**
+ * vmw_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The truct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
+ *
+ * Calls move_notify for all subsystems needing it.
+ * (currently only resources).
+ */
+static void vmw_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+ vmw_resource_move_notify(bo, mem);
+}
+
+
+/**
+ * vmw_swap_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to be swapped out.
+ */
+static void vmw_swap_notify(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ spin_lock(&bdev->fence_lock);
+ ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bdev->fence_lock);
+}
+
+
struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
@@ -691,8 +851,8 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_flush = vmw_sync_obj_flush,
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
- .move_notify = NULL,
- .swap_notify = NULL,
+ .move_notify = vmw_move_notify,
+ .swap_notify = vmw_swap_notify,
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 00ae0925aca8..1e80152674b5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -32,12 +32,30 @@
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
+ struct vmw_ctx_binding_state cbs;
};
+
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
+
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base);
+static int vmw_gb_context_create(struct vmw_resource *res);
+static int vmw_gb_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_destroy(struct vmw_resource *res);
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind);
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
+static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static uint64_t vmw_user_context_size;
static const struct vmw_user_resource_conv user_context_conv = {
@@ -62,6 +80,23 @@ static const struct vmw_res_func vmw_legacy_context_func = {
.unbind = NULL
};
+static const struct vmw_res_func vmw_gb_context_func = {
+ .res_type = vmw_res_context,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "guest backed contexts",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_gb_context_create,
+ .destroy = vmw_gb_context_destroy,
+ .bind = vmw_gb_context_bind,
+ .unbind = vmw_gb_context_unbind
+};
+
+static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
+ [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
+ [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
+ [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+
/**
* Context management:
*/
@@ -76,6 +111,20 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
} *cmd;
+ if (res->func->destroy == vmw_gb_context_destroy) {
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ mutex_lock(&dev_priv->binding_mutex);
+ (void) vmw_context_binding_state_kill
+ (&container_of(res, struct vmw_user_context, res)->cbs);
+ (void) vmw_gb_context_destroy(res);
+ if (dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid)
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+ mutex_unlock(&dev_priv->binding_mutex);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ return;
+ }
+
vmw_execbuf_release_pinned_bo(dev_priv);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -92,6 +141,33 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
vmw_3d_resource_dec(dev_priv, false);
}
+static int vmw_gb_context_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
+
+ ret = vmw_resource_init(dev_priv, res, true,
+ res_free, &vmw_gb_context_func);
+ res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+
+ if (unlikely(ret != 0)) {
+ if (res_free)
+ res_free(res);
+ else
+ kfree(res);
+ return ret;
+ }
+
+ memset(&uctx->cbs, 0, sizeof(uctx->cbs));
+ INIT_LIST_HEAD(&uctx->cbs.list);
+
+ vmw_resource_activate(res, vmw_hw_context_destroy);
+ return 0;
+}
+
static int vmw_context_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
void (*res_free) (struct vmw_resource *res))
@@ -103,6 +179,9 @@ static int vmw_context_init(struct vmw_private *dev_priv,
SVGA3dCmdDefineContext body;
} *cmd;
+ if (dev_priv->has_mob)
+ return vmw_gb_context_init(dev_priv, res, res_free);
+
ret = vmw_resource_init(dev_priv, res, false,
res_free, &vmw_legacy_context_func);
@@ -154,6 +233,180 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
return (ret == 0) ? res : NULL;
}
+
+static int vmw_gb_context_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBContext body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a context id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv, false);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+static int vmw_gb_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBContext body;
+ } *cmd;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.validContents = res->backup_dirty;
+ res->backup_dirty = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBContext body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBContext body;
+ } *cmd2;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_state_scrub(&uctx->cbs);
+
+ submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "unbinding.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd2 = (void *) cmd;
+ if (readback) {
+ cmd1 = (void *) cmd;
+ cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.cid = res->id;
+ cmd2 = (void *) (&cmd1[1]);
+ }
+ cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.cid = res->id;
+ cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+ vmw_fifo_commit(dev_priv, submit_size);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_gb_context_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBContext body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "destruction.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (dev_priv->query_cid == res->id)
+ dev_priv->query_cid_valid = false;
+ vmw_resource_release_id(res);
+ vmw_3d_resource_dec(dev_priv, false);
+
+ return 0;
+}
+
/**
* User-space context management:
*/
@@ -272,3 +525,380 @@ out_unlock:
return ret;
}
+
+/**
+ * vmw_context_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShader body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = bi->ctx->id;
+ cmd->body.type = bi->i1.shader_type;
+ cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_context_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
+{
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetRenderTarget body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for render target "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = bi->ctx->id;
+ cmd->body.type = bi->i1.rt_type;
+ cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ cmd->body.target.face = 0;
+ cmd->body.target.mipmap = 0;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_context_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
+{
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ struct {
+ SVGA3dCmdSetTextureState c;
+ SVGA3dTextureState s1;
+ } body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for texture "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+
+ cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.c.cid = bi->ctx->id;
+ cmd->body.s1.stage = bi->i1.texture_stage;
+ cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+ cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_drop: Stop tracking a context binding
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+{
+ list_del(&cb->ctx_list);
+ if (!list_empty(&cb->res_list))
+ list_del(&cb->res_list);
+ cb->bi.ctx = NULL;
+}
+
+/**
+ * vmw_context_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Performs basic checks on the binding to make sure arguments are within
+ * bounds and then starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi)
+{
+ struct vmw_ctx_binding *loc;
+
+ switch (bi->bt) {
+ case vmw_ctx_binding_rt:
+ if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
+ DRM_ERROR("Illegal render target type %u.\n",
+ (unsigned) bi->i1.rt_type);
+ return -EINVAL;
+ }
+ loc = &cbs->render_targets[bi->i1.rt_type];
+ break;
+ case vmw_ctx_binding_tex:
+ if (unlikely((unsigned)bi->i1.texture_stage >=
+ SVGA3D_NUM_TEXTURE_UNITS)) {
+ DRM_ERROR("Illegal texture/sampler unit %u.\n",
+ (unsigned) bi->i1.texture_stage);
+ return -EINVAL;
+ }
+ loc = &cbs->texture_units[bi->i1.texture_stage];
+ break;
+ case vmw_ctx_binding_shader:
+ if (unlikely((unsigned)bi->i1.shader_type >=
+ SVGA3D_SHADERTYPE_MAX)) {
+ DRM_ERROR("Illegal shader type %u.\n",
+ (unsigned) bi->i1.shader_type);
+ return -EINVAL;
+ }
+ loc = &cbs->shaders[bi->i1.shader_type];
+ break;
+ default:
+ BUG();
+ }
+
+ if (loc->bi.ctx != NULL)
+ vmw_context_binding_drop(loc);
+
+ loc->bi = *bi;
+ loc->bi.scrubbed = false;
+ list_add_tail(&loc->ctx_list, &cbs->list);
+ INIT_LIST_HEAD(&loc->res_list);
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *bi)
+{
+ struct vmw_ctx_binding *loc;
+
+ switch (bi->bt) {
+ case vmw_ctx_binding_rt:
+ loc = &cbs->render_targets[bi->i1.rt_type];
+ break;
+ case vmw_ctx_binding_tex:
+ loc = &cbs->texture_units[bi->i1.texture_stage];
+ break;
+ case vmw_ctx_binding_shader:
+ loc = &cbs->shaders[bi->i1.shader_type];
+ break;
+ default:
+ BUG();
+ }
+
+ if (loc->bi.ctx != NULL)
+ vmw_context_binding_drop(loc);
+
+ if (bi->res != NULL) {
+ loc->bi = *bi;
+ list_add_tail(&loc->ctx_list, &cbs->list);
+ list_add_tail(&loc->res_list, &bi->res->binding_head);
+ }
+}
+
+/**
+ * vmw_context_binding_kill - Kill a binding on the device
+ * and stop tracking it.
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Emits FIFO commands to scrub a binding represented by @cb.
+ * Then stops tracking the binding and re-initializes its storage.
+ */
+static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
+{
+ if (!cb->bi.scrubbed) {
+ (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
+ cb->bi.scrubbed = true;
+ }
+ vmw_context_binding_drop(cb);
+}
+
+/**
+ * vmw_context_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_binding *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+ vmw_context_binding_kill(entry);
+}
+
+/**
+ * vmw_context_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
+ * vmw_context_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_context_binding_res_list_kill(struct list_head *head)
+{
+ struct vmw_ctx_binding *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, res_list)
+ vmw_context_binding_kill(entry);
+}
+
+/**
+ * vmw_context_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_context_binding_res_list_scrub(struct list_head *head)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, head, res_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
+ * vmw_context_binding_state_transfer - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ *
+ * Transfers binding info from a temporary structure to the persistent
+ * structure in the context. This can be done once commands
+ */
+void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
+ struct vmw_ctx_binding_state *from)
+{
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ struct vmw_ctx_binding *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &from->list, ctx_list)
+ vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
+}
+
+/**
+ * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_context_rebind_all(struct vmw_resource *ctx)
+{
+ struct vmw_ctx_binding *entry;
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ struct vmw_ctx_binding_state *cbs = &uctx->cbs;
+ int ret;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (likely(!entry->bi.scrubbed))
+ continue;
+
+ if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
+ SVGA3D_INVALID_ID))
+ continue;
+
+ ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ entry->bi.scrubbed = false;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_list - Return a list of context bindings
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+{
+ return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index d4e54fcc0acd..a75840211b3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -290,8 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
/**
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
*
- * @bo: The buffer object. Must be reserved, and present either in VRAM
- * or GMR memory.
+ * @bo: The buffer object. Must be reserved.
* @pin: Whether to pin or unpin.
*
*/
@@ -303,10 +302,9 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
int ret;
lockdep_assert_held(&bo->resv->lock.base);
- BUG_ON(old_mem_type != TTM_PL_VRAM &&
- old_mem_type != VMW_PL_GMR);
- pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+ pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+ | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
if (pin)
pl_flags |= TTM_PL_FLAG_NO_EVICT;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c7a549694e59..0083cbf99edf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -112,6 +112,21 @@
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
struct drm_vmw_update_layout_arg)
+#define DRM_IOCTL_VMW_CREATE_SHADER \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
+ struct drm_vmw_shader_create_arg)
+#define DRM_IOCTL_VMW_UNREF_SHADER \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
+ struct drm_vmw_shader_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
+ union drm_vmw_gb_surface_create_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
+ union drm_vmw_gb_surface_reference_arg)
+#define DRM_IOCTL_VMW_SYNCCPU \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
+ struct drm_vmw_synccpu_arg)
/**
* The core DRM version of this macro doesn't account for
@@ -177,6 +192,21 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
vmw_kms_update_layout_ioctl,
DRM_MASTER | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_CREATE_SHADER,
+ vmw_shader_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_UNREF_SHADER,
+ vmw_shader_destroy_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
+ vmw_gb_surface_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
+ vmw_gb_surface_reference_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_SYNCCPU,
+ vmw_user_dmabuf_synccpu_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -189,6 +219,7 @@ static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
static int vmw_force_iommu;
static int vmw_restrict_iommu;
static int vmw_force_coherent;
+static int vmw_restrict_dma_mask;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
@@ -203,6 +234,8 @@ MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
static void vmw_print_capabilities(uint32_t capabilities)
@@ -240,38 +273,52 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" GMR2.\n");
if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
DRM_INFO(" Screen Object 2.\n");
+ if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
+ DRM_INFO(" Command Buffers.\n");
+ if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
+ DRM_INFO(" Command Buffers 2.\n");
+ if (capabilities & SVGA_CAP_GBOBJECTS)
+ DRM_INFO(" Guest Backed Resources.\n");
}
-
/**
- * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
- * the start of a buffer object.
+ * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
*
- * @dev_priv: The device private structure.
+ * @dev_priv: A device private structure.
*
- * This function will idle the buffer using an uninterruptible wait, then
- * map the first page and initialize a pending occlusion query result structure,
- * Finally it will unmap the buffer.
+ * This function creates a small buffer object that holds the query
+ * result for dummy queries emitted as query barriers.
+ * The function will then map the first page and initialize a pending
+ * occlusion query result structure, Finally it will unmap the buffer.
+ * No interruptible waits are done within this function.
*
- * TODO: Since we're only mapping a single page, we should optimize the map
- * to use kmap_atomic / iomap_atomic.
+ * Returns an error if bo creation or initialization fails.
*/
-static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
+static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
+ int ret;
+ struct ttm_buffer_object *bo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
- int ret;
- struct ttm_bo_device *bdev = &dev_priv->bdev;
- struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
- ttm_bo_reserve(bo, false, false, false, 0);
- spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bdev->fence_lock);
+ /*
+ * Create the bo as pinned, so that a tryreserve will
+ * immediately succeed. This is because we're the only
+ * user of the bo currently.
+ */
+ ret = ttm_bo_create(&dev_priv->bdev,
+ PAGE_SIZE,
+ ttm_bo_type_device,
+ &vmw_sys_ne_placement,
+ 0, false, NULL,
+ &bo);
+
if (unlikely(ret != 0))
- (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
- 10*HZ);
+ return ret;
+
+ ret = ttm_bo_reserve(bo, false, true, false, 0);
+ BUG_ON(ret != 0);
ret = ttm_bo_kmap(bo, 0, 1, &map);
if (likely(ret == 0)) {
@@ -280,34 +327,19 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
result->state = SVGA3D_QUERYSTATE_PENDING;
result->result32 = 0xff;
ttm_bo_kunmap(&map);
- } else
- DRM_ERROR("Dummy query buffer map failed.\n");
+ }
+ vmw_bo_pin(bo, false);
ttm_bo_unreserve(bo);
-}
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Dummy query buffer map failed.\n");
+ ttm_bo_unref(&bo);
+ } else
+ dev_priv->dummy_query_bo = bo;
-/**
- * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
- *
- * @dev_priv: A device private structure.
- *
- * This function creates a small buffer object that holds the query
- * result for dummy queries emitted as query barriers.
- * No interruptible waits are done within this function.
- *
- * Returns an error if bo creation fails.
- */
-static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
-{
- return ttm_bo_create(&dev_priv->bdev,
- PAGE_SIZE,
- ttm_bo_type_device,
- &vmw_vram_sys_placement,
- 0, false, NULL,
- &dev_priv->dummy_query_bo);
+ return ret;
}
-
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
@@ -318,14 +350,24 @@ static int vmw_request_device(struct vmw_private *dev_priv)
return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
+ if (dev_priv->has_mob) {
+ ret = vmw_otables_setup(dev_priv);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to initialize "
+ "guest Memory OBjects.\n");
+ goto out_no_mob;
+ }
+ }
ret = vmw_dummy_query_bo_create(dev_priv);
if (unlikely(ret != 0))
goto out_no_query_bo;
- vmw_dummy_query_bo_prepare(dev_priv);
return 0;
out_no_query_bo:
+ if (dev_priv->has_mob)
+ vmw_otables_takedown(dev_priv);
+out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
return ret;
@@ -341,10 +383,13 @@ static void vmw_release_device(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL);
ttm_bo_unref(&dev_priv->dummy_query_bo);
+ if (dev_priv->has_mob)
+ vmw_otables_takedown(dev_priv);
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
+
/**
* Increase the 3d resource refcount.
* If the count was prevously zero, initialize the fifo, switching to svga
@@ -510,6 +555,33 @@ out_fixup:
return 0;
}
+/**
+ * vmw_dma_masks - set required page- and dma masks
+ *
+ * @dev: Pointer to struct drm-device
+ *
+ * With 32-bit we can only handle 32 bit PFNs. Optionally set that
+ * restriction also for 64-bit systems.
+ */
+#ifdef CONFIG_INTEL_IOMMU
+static int vmw_dma_masks(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (intel_iommu_enabled &&
+ (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
+ DRM_INFO("Restricting DMA addresses to 44 bits.\n");
+ return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
+ }
+ return 0;
+}
+#else
+static int vmw_dma_masks(struct vmw_private *dev_priv)
+{
+ return 0;
+}
+#endif
+
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct vmw_private *dev_priv;
@@ -532,6 +604,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
+ mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -578,14 +651,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
vmw_get_initial_size(dev_priv);
- if (dev_priv->capabilities & SVGA_CAP_GMR) {
- dev_priv->max_gmr_descriptors =
- vmw_read(dev_priv,
- SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
dev_priv->max_gmr_ids =
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
- }
- if (dev_priv->capabilities & SVGA_CAP_GMR2) {
dev_priv->max_gmr_pages =
vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
dev_priv->memory_size =
@@ -598,23 +666,45 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
*/
dev_priv->memory_size = 512*1024*1024;
}
+ dev_priv->max_mob_pages = 0;
+ dev_priv->max_mob_size = 0;
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ uint64_t mem_size =
+ vmw_read(dev_priv,
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+
+ dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
+ dev_priv->prim_bb_mem =
+ vmw_read(dev_priv,
+ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
+ dev_priv->max_mob_size =
+ vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
+ } else
+ dev_priv->prim_bb_mem = dev_priv->vram_size;
+
+ ret = vmw_dma_masks(dev_priv);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&dev_priv->hw_mutex);
+ goto out_err0;
+ }
+
+ if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
+ dev_priv->prim_bb_mem = dev_priv->vram_size;
mutex_unlock(&dev_priv->hw_mutex);
vmw_print_capabilities(dev_priv->capabilities);
- if (dev_priv->capabilities & SVGA_CAP_GMR) {
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
- DRM_INFO("Max GMR descriptors is %u\n",
- (unsigned)dev_priv->max_gmr_descriptors);
- }
- if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max number of GMR pages is %u\n",
(unsigned)dev_priv->max_gmr_pages);
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
(unsigned)dev_priv->memory_size / 1024);
}
+ DRM_INFO("Maximum display memory size is %u kiB\n",
+ dev_priv->prim_bb_mem / 1024);
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
dev_priv->vram_start, dev_priv->vram_size / 1024);
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
@@ -649,12 +739,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->has_gmr = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
- dev_priv->max_gmr_ids) != 0) {
+ VMW_PL_GMR) != 0) {
DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
}
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ dev_priv->has_mob = true;
+ if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
+ VMW_PL_MOB) != 0) {
+ DRM_INFO("No MOB memory available. "
+ "3D will be disabled.\n");
+ dev_priv->has_mob = false;
+ }
+ }
+
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
dev_priv->mmio_size);
@@ -757,6 +857,8 @@ out_err4:
iounmap(dev_priv->mmio_virt);
out_err3:
arch_phys_wc_del(dev_priv->mmio_mtrr);
+ if (dev_priv->has_mob)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
@@ -801,6 +903,8 @@ static int vmw_driver_unload(struct drm_device *dev)
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr);
+ if (dev_priv->has_mob)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
@@ -840,6 +944,7 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master);
}
+ vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
@@ -859,11 +964,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
+ vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
+ if (IS_ERR(vmw_fp->shman))
+ goto out_no_shman;
+
file_priv->driver_priv = vmw_fp;
dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0;
+out_no_shman:
+ ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile:
kfree(vmw_fp);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 20890ad8408b..07831554dad7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20120209"
+#define VMWGFX_DRIVER_DATE "20140228"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 4
+#define VMWGFX_DRIVER_MINOR 5
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -50,19 +50,39 @@
#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
+#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
+
+/*
+ * Perhaps we should have sysfs entries for these.
+ */
+#define VMWGFX_NUM_GB_CONTEXT 256
+#define VMWGFX_NUM_GB_SHADER 20000
+#define VMWGFX_NUM_GB_SURFACE 32768
+#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
+ VMWGFX_NUM_GB_SHADER +\
+ VMWGFX_NUM_GB_SURFACE +\
+ VMWGFX_NUM_GB_SCREEN_TARGET)
#define VMW_PL_GMR TTM_PL_PRIV0
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
+#define VMW_PL_MOB TTM_PL_PRIV1
+#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
#define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1
#define VMW_RES_STREAM ttm_driver_type2
#define VMW_RES_FENCE ttm_driver_type3
+#define VMW_RES_SHADER ttm_driver_type4
+
+struct vmw_compat_shader_manager;
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
+ bool gb_aware;
+ struct vmw_compat_shader_manager *shman;
};
struct vmw_dma_buffer {
@@ -82,6 +102,7 @@ struct vmw_dma_buffer {
struct vmw_validate_buffer {
struct ttm_validate_buffer base;
struct drm_hash_item hash;
+ bool validate_as_mob;
};
struct vmw_res_func;
@@ -98,6 +119,7 @@ struct vmw_resource {
const struct vmw_res_func *func;
struct list_head lru_head; /* Protected by the resource lock */
struct list_head mob_head; /* Protected by @backup reserved */
+ struct list_head binding_head; /* Protected by binding_mutex */
void (*res_free) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
};
@@ -106,6 +128,7 @@ enum vmw_res_type {
vmw_res_context,
vmw_res_surface,
vmw_res_stream,
+ vmw_res_shader,
vmw_res_max
};
@@ -154,6 +177,7 @@ struct vmw_fifo_state {
};
struct vmw_relocation {
+ SVGAMobId *mob_loc;
SVGAGuestPtr *location;
uint32_t index;
};
@@ -229,11 +253,77 @@ struct vmw_piter {
struct page *(*page)(struct vmw_piter *);
};
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+ vmw_ctx_binding_shader,
+ vmw_ctx_binding_rt,
+ vmw_ctx_binding_tex,
+ vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - structure representing a single context binding
+ *
+ * @ctx: Pointer to the context structure. NULL means the binding is not
+ * active.
+ * @res: Non ref-counted pointer to the bound resource.
+ * @bt: The binding type.
+ * @i1: Union of information needed to unbind.
+ */
+struct vmw_ctx_bindinfo {
+ struct vmw_resource *ctx;
+ struct vmw_resource *res;
+ enum vmw_ctx_binding_type bt;
+ bool scrubbed;
+ union {
+ SVGA3dShaderType shader_type;
+ SVGA3dRenderTargetType rt_type;
+ uint32 texture_stage;
+ } i1;
+};
+
+/**
+ * struct vmw_ctx_binding - structure representing a single context binding
+ * - suitable for tracking in a context
+ *
+ * @ctx_list: List head for context.
+ * @res_list: List head for bound resource.
+ * @bi: Binding info
+ */
+struct vmw_ctx_binding {
+ struct list_head ctx_list;
+ struct list_head res_list;
+ struct vmw_ctx_bindinfo bi;
+};
+
+
+/**
+ * struct vmw_ctx_binding_state - context binding state
+ *
+ * @list: linked list of individual bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units/samplers bindings.
+ * @shaders: Shader bindings.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+ struct list_head list;
+ struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
+ struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+ struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
+};
+
struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
- struct ttm_object_file *tfile;
+ struct vmw_fpriv *fp;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
@@ -250,6 +340,8 @@ struct vmw_sw_context{
struct vmw_resource *last_query_ctx;
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
+ struct vmw_ctx_binding_state staged_bindings;
+ struct list_head staged_shaders;
};
struct vmw_legacy_display;
@@ -281,6 +373,7 @@ struct vmw_private {
unsigned int io_start;
uint32_t vram_start;
uint32_t vram_size;
+ uint32_t prim_bb_mem;
uint32_t mmio_start;
uint32_t mmio_size;
uint32_t fb_max_width;
@@ -290,11 +383,13 @@ struct vmw_private {
__le32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities;
- uint32_t max_gmr_descriptors;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
+ uint32_t max_mob_pages;
+ uint32_t max_mob_size;
uint32_t memory_size;
bool has_gmr;
+ bool has_mob;
struct mutex hw_mutex;
/*
@@ -370,6 +465,7 @@ struct vmw_private {
struct vmw_sw_context ctx;
struct mutex cmdbuf_mutex;
+ struct mutex binding_mutex;
/**
* Operating mode.
@@ -415,6 +511,12 @@ struct vmw_private {
* DMA mapping stuff.
*/
enum vmw_dma_map_mode map_mode;
+
+ /*
+ * Guest Backed stuff
+ */
+ struct ttm_buffer_object *otable_bo;
+ struct vmw_otable *otables;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -471,23 +573,14 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
* Resource utilities - vmwgfx_resource.c
*/
struct vmw_user_resource_conv;
-extern const struct vmw_user_resource_conv *user_surface_converter;
-extern const struct vmw_user_resource_conv *user_context_converter;
-extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
-extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
@@ -499,18 +592,6 @@ extern int vmw_user_resource_lookup_handle(
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
-extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id);
-extern int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf);
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
@@ -519,10 +600,21 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
void (*bo_free) (struct ttm_buffer_object *bo));
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
+extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_dma_buffer **p_dma_buf);
+extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+ struct vmw_dma_buffer *dma_buf,
+ uint32_t *handle);
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
@@ -622,10 +714,16 @@ extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_vram_gmr_ne_placement;
extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_placement vmw_sys_ne_placement;
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
+extern struct ttm_placement vmw_mob_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
+extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
+extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
+extern const struct vmw_sg_table *
+vmw_bo_sg_table(struct ttm_buffer_object *bo);
extern void vmw_piter_start(struct vmw_piter *viter,
const struct vmw_sg_table *vsgt,
unsigned long p_offs);
@@ -701,7 +799,7 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* IRQs and wating - vmwgfx_irq.c
*/
-extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t vmw_irq_handler(int irq, void *arg);
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
uint32_t seqno, bool interruptible,
unsigned long timeout);
@@ -832,6 +930,101 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
uint32_t handle, uint32_t flags,
int *prime_fd);
+/*
+ * MemoryOBject management - vmwgfx_mob.c
+ */
+struct vmw_mob;
+extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
+ const struct vmw_sg_table *vsgt,
+ unsigned long num_data_pages, int32_t mob_id);
+extern void vmw_mob_unbind(struct vmw_private *dev_priv,
+ struct vmw_mob *mob);
+extern void vmw_mob_destroy(struct vmw_mob *mob);
+extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
+extern int vmw_otables_setup(struct vmw_private *dev_priv);
+extern void vmw_otables_takedown(struct vmw_private *dev_priv);
+
+/*
+ * Context management - vmwgfx_context.c
+ */
+
+extern const struct vmw_user_resource_conv *user_context_converter;
+
+extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
+
+extern int vmw_context_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ int id,
+ struct vmw_resource **p_res);
+extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+ const struct vmw_ctx_bindinfo *ci);
+extern void
+vmw_context_binding_state_transfer(struct vmw_resource *res,
+ struct vmw_ctx_binding_state *cbs);
+extern void vmw_context_binding_res_list_kill(struct list_head *head);
+extern void vmw_context_binding_res_list_scrub(struct list_head *head);
+extern int vmw_context_rebind_all(struct vmw_resource *ctx);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+
+/*
+ * Surface management - vmwgfx_surface.c
+ */
+
+extern const struct vmw_user_resource_conv *user_surface_converter;
+
+extern void vmw_surface_res_free(struct vmw_resource *res);
+extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, int *id);
+extern int vmw_surface_validate(struct vmw_private *dev_priv,
+ struct vmw_surface *srf);
+
+/*
+ * Shader management - vmwgfx_shader.c
+ */
+
+extern const struct vmw_user_resource_conv *user_shader_converter;
+
+extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key);
+extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key,
+ SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list);
+extern struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv);
+extern void
+vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+
/**
* Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 599f6469a1eb..efb575a7996c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -54,6 +54,8 @@ struct vmw_resource_relocation {
* @res: Ref-counted pointer to the resource.
* @switch_backup: Boolean whether to switch backup buffer on unreserve.
* @new_backup: Refcounted pointer to the new backup buffer.
+ * @staged_bindings: If @res is a context, tracks bindings set up during
+ * the command batch. Otherwise NULL.
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
@@ -65,12 +67,32 @@ struct vmw_resource_val_node {
struct drm_hash_item hash;
struct vmw_resource *res;
struct vmw_dma_buffer *new_backup;
+ struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
bool first_usage;
bool no_buffer_needed;
};
/**
+ * struct vmw_cmd_entry - Describe a command for the verifier
+ *
+ * @user_allow: Whether allowed from the execbuf ioctl.
+ * @gb_disable: Whether disabled if guest-backed objects are available.
+ * @gb_enable: Whether enabled iff guest-backed objects are available.
+ */
+struct vmw_cmd_entry {
+ int (*func) (struct vmw_private *, struct vmw_sw_context *,
+ SVGA3dCmdHeader *);
+ bool user_allow;
+ bool gb_disable;
+ bool gb_enable;
+};
+
+#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
+ [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
+ (_gb_disable), (_gb_enable)}
+
+/**
* vmw_resource_unreserve - unreserve resources previously reserved for
* command submission.
*
@@ -87,6 +109,18 @@ static void vmw_resource_list_unreserve(struct list_head *list,
struct vmw_dma_buffer *new_backup =
backoff ? NULL : val->new_backup;
+ /*
+ * Transfer staged context bindings to the
+ * persistent context binding tracker.
+ */
+ if (unlikely(val->staged_bindings)) {
+ if (!backoff) {
+ vmw_context_binding_state_transfer
+ (val->res, val->staged_bindings);
+ }
+ kfree(val->staged_bindings);
+ val->staged_bindings = NULL;
+ }
vmw_resource_unreserve(res, new_backup,
val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup);
@@ -146,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
}
/**
+ * vmw_resource_context_res_add - Put resources previously bound to a context on
+ * the validation list
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @sw_context: Pointer to a software context used for this command submission
+ * @ctx: Pointer to the context resource
+ *
+ * This function puts all resources that were previously bound to @ctx on
+ * the resource validation list. This is part of the context state reemission
+ */
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource *ctx)
+{
+ struct list_head *binding_list;
+ struct vmw_ctx_binding *entry;
+ int ret = 0;
+ struct vmw_resource *res;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ binding_list = vmw_context_binding_list(ctx);
+
+ list_for_each_entry(entry, binding_list, ctx_list) {
+ res = vmw_resource_reference_unless_doomed(entry->bi.res);
+ if (unlikely(res == NULL))
+ continue;
+
+ ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ mutex_unlock(&dev_priv->binding_mutex);
+ return ret;
+}
+
+/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @list: Pointer to head of relocation list.
@@ -201,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
- list_for_each_entry(rel, list, head)
- cb[rel->offset] = rel->res->id;
+ list_for_each_entry(rel, list, head) {
+ if (likely(rel->res != NULL))
+ cb[rel->offset] = rel->res->id;
+ else
+ cb[rel->offset] = SVGA_3D_CMD_NOP;
+ }
}
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -224,6 +300,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
+ * @validate_as_mob: Validate this buffer as a MOB.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
@@ -232,6 +309,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct ttm_buffer_object *bo,
+ bool validate_as_mob,
uint32_t *p_val_node)
{
uint32_t val_node;
@@ -244,6 +322,10 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
+ if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
+ DRM_ERROR("Inconsistent buffer usage.\n");
+ return -EINVAL;
+ }
val_buf = &vval_buf->base;
val_node = vval_buf - sw_context->val_bufs;
} else {
@@ -266,6 +348,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
val_buf->bo = ttm_bo_reference(bo);
val_buf->reserved = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+ vval_buf->validate_as_mob = validate_as_mob;
}
sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
@@ -302,7 +385,8 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
struct ttm_buffer_object *bo = &res->backup->base;
ret = vmw_bo_to_validate_list
- (sw_context, bo, NULL);
+ (sw_context, bo,
+ vmw_resource_needs_backup(res), NULL);
if (unlikely(ret != 0))
return ret;
@@ -339,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
- * @id: Pointer to the location in the command buffer currently being
+ * @id: user-space resource id handle.
+ * @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
*/
-static int vmw_cmd_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t *id,
- struct vmw_resource_val_node **p_val)
+static int
+vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t id,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
@@ -362,15 +451,22 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_resource_val_node *node;
int ret;
- if (*id == SVGA3D_INVALID_ID)
+ if (id == SVGA3D_INVALID_ID) {
+ if (p_val)
+ *p_val = NULL;
+ if (res_type == vmw_res_context) {
+ DRM_ERROR("Illegal context invalid id.\n");
+ return -EINVAL;
+ }
return 0;
+ }
/*
* Fastpath in case of repeated commands referencing the same
* resource
*/
- if (likely(rcache->valid && *id == rcache->handle)) {
+ if (likely(rcache->valid && id == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
@@ -379,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
- sw_context->tfile,
- *id,
+ sw_context->fp->tfile,
+ id,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) *id);
+ (unsigned) id);
dump_stack();
return ret;
}
rcache->valid = true;
rcache->res = res;
- rcache->handle = *id;
+ rcache->handle = id;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -411,6 +507,22 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
rcache->node = node;
if (p_val)
*p_val = node;
+
+ if (dev_priv->has_mob && node->first_usage &&
+ res_type == vmw_res_context) {
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+ node->staged_bindings =
+ kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
+ if (node->staged_bindings == NULL) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ goto out_no_reloc;
+ }
+ INIT_LIST_HEAD(&node->staged_bindings->list);
+ }
+
vmw_resource_unreference(&res);
return 0;
@@ -422,6 +534,59 @@ out_no_reloc:
}
/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id_loc: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
+ */
+static int
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
+{
+ return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
+ converter, *id_loc, id_loc, p_val);
+}
+
+/**
+ * vmw_rebind_contexts - Rebind all resources previously bound to
+ * referenced contexts.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Rebind context binding points that have been scrubbed because of eviction.
+ */
+static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
+{
+ struct vmw_resource_val_node *val;
+ int ret;
+
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ if (likely(!val->staged_bindings))
+ continue;
+
+ ret = vmw_context_rebind_all(val->res);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to rebind context.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
@@ -437,7 +602,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
{
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
- __le32 cid;
+ uint32_t cid;
} *cmd;
cmd = container_of(header, struct vmw_cid_cmd, header);
@@ -453,17 +618,35 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *res_node;
int ret;
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ &ctx_node);
if (unlikely(ret != 0))
return ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->body.target.sid, NULL);
- return ret;
+ &cmd->body.target.sid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (dev_priv->has_mob) {
+ struct vmw_ctx_bindinfo bi;
+
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_rt;
+ bi.i1.rt_type = cmd->body.type;
+ return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ }
+
+ return 0;
}
static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -519,11 +702,6 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_sid_cmd, header);
- if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
- return -EPERM;
- }
-
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.srcImage.sid, NULL);
@@ -541,11 +719,6 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_sid_cmd, header);
- if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
- return -EPERM;
- }
-
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->body.sid,
NULL);
@@ -586,7 +759,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
- NULL);
+ dev_priv->has_mob, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -594,7 +767,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
ret = vmw_bo_to_validate_list(sw_context,
dev_priv->dummy_query_bo,
- NULL);
+ dev_priv->has_mob, NULL);
if (unlikely(ret != 0))
return ret;
@@ -672,6 +845,66 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
}
/**
+ * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
+ * handle to a MOB id.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @id: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
+ *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a MOB id. The translation does not take place immediately, but
+ * during a call to vmw_apply_relocations(). This function builds a relocation
+ * list and a list of buffers to validate. The former needs to be freed using
+ * either vmw_apply_relocations() or vmw_free_relocations(). The latter
+ * needs to be freed using vmw_clear_validations.
+ */
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGAMobId *id,
+ struct vmw_dma_buffer **vmw_bo_p)
+{
+ struct vmw_dma_buffer *vmw_bo = NULL;
+ struct ttm_buffer_object *bo;
+ uint32_t handle = *id;
+ struct vmw_relocation *reloc;
+ int ret;
+
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or use MOB buffer.\n");
+ return -EINVAL;
+ }
+ bo = &vmw_bo->base;
+
+ if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
+ DRM_ERROR("Max number relocations per submission"
+ " exceeded\n");
+ ret = -EINVAL;
+ goto out_no_reloc;
+ }
+
+ reloc = &sw_context->relocs[sw_context->cur_reloc++];
+ reloc->mob_loc = id;
+ reloc->location = NULL;
+
+ ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+
+ *vmw_bo_p = vmw_bo;
+ return 0;
+
+out_no_reloc:
+ vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_p = NULL;
+ return ret;
+}
+
+/**
* vmw_translate_guest_pointer - Prepare to translate a user-space buffer
* handle to a valid SVGAGuestPtr
*
@@ -701,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
@@ -718,7 +951,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -732,6 +965,30 @@ out_no_reloc:
}
/**
+ * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_begin_gb_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBeginGBQuery q;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_begin_gb_query_cmd,
+ header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->q.cid,
+ NULL);
+}
+
+/**
* vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
@@ -750,12 +1007,64 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_begin_query_cmd,
header);
+ if (unlikely(dev_priv->has_mob)) {
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBeginGBQuery q;
+ } gb_cmd;
+
+ BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+ gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
+ gb_cmd.header.size = cmd->header.size;
+ gb_cmd.q.cid = cmd->q.cid;
+ gb_cmd.q.type = cmd->q.type;
+
+ memcpy(cmd, &gb_cmd, sizeof(*cmd));
+ return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
+ }
+
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->q.cid,
NULL);
}
/**
+ * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdEndGBQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context,
+ &cmd->q.mobid,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return ret;
+}
+
+/**
* vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
@@ -774,6 +1083,25 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
+ if (dev_priv->has_mob) {
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdEndGBQuery q;
+ } gb_cmd;
+
+ BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+ gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
+ gb_cmd.header.size = cmd->header.size;
+ gb_cmd.q.cid = cmd->q.cid;
+ gb_cmd.q.type = cmd->q.type;
+ gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
+ gb_cmd.q.offset = cmd->q.guestResult.offset;
+
+ memcpy(cmd, &gb_cmd, sizeof(*cmd));
+ return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
+ }
+
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
@@ -790,7 +1118,40 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
return ret;
}
-/*
+/**
+ * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForGBQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context,
+ &cmd->q.mobid,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return 0;
+}
+
+/**
* vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
@@ -809,6 +1170,25 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_query_cmd, header);
+ if (dev_priv->has_mob) {
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForGBQuery q;
+ } gb_cmd;
+
+ BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+ gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+ gb_cmd.header.size = cmd->header.size;
+ gb_cmd.q.cid = cmd->q.cid;
+ gb_cmd.q.type = cmd->q.type;
+ gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
+ gb_cmd.q.offset = cmd->q.guestResult.offset;
+
+ memcpy(cmd, &gb_cmd, sizeof(*cmd));
+ return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
+ }
+
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
@@ -853,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
+ header);
out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
@@ -921,15 +1302,22 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
struct vmw_tex_state_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetTextureState state;
- };
+ } *cmd;
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+ struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *res_node;
int ret;
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ cmd = container_of(header, struct vmw_tex_state_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->state.cid,
+ &ctx_node);
if (unlikely(ret != 0))
return ret;
@@ -939,9 +1327,20 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cur_state->value, NULL);
+ &cur_state->value, &res_node);
if (unlikely(ret != 0))
return ret;
+
+ if (dev_priv->has_mob) {
+ struct vmw_ctx_bindinfo bi;
+
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_tex;
+ bi.i1.texture_stage = cur_state->stage;
+ vmw_context_binding_add(ctx_node->staged_bindings,
+ &bi);
+ }
}
return 0;
@@ -971,6 +1370,314 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
}
/**
+ * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @res_type: The resource type.
+ * @converter: Information about user-space binding for this resource type.
+ * @res_id: Pointer to the user-space resource handle in the command stream.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving.
+ */
+static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv
+ *converter,
+ uint32_t *res_id,
+ uint32_t *buf_id,
+ unsigned long backup_offset)
+{
+ int ret;
+ struct vmw_dma_buffer *dma_buf;
+ struct vmw_resource_val_node *val_node;
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
+ converter, res_id, &val_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (val_node->first_usage)
+ val_node->no_buffer_needed = true;
+
+ vmw_dmabuf_unreference(&val_node->new_backup);
+ val_node->new_backup = dma_buf;
+ val_node->new_backup_offset = backup_offset;
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_bind_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
+
+ return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, &cmd->body.mobid,
+ 0);
+}
+
+/**
+ * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBImage body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdInvalidateGBImage body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_surface - Validate an
+ * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_gb_surface_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdInvalidateGBSurface body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.sid, NULL);
+}
+
+
+/**
+ * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_define_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineShader body;
+ } *cmd;
+ int ret;
+ size_t size;
+
+ cmd = container_of(header, struct vmw_shader_define_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ size = cmd->header.size - sizeof(cmd->body);
+ ret = vmw_compat_shader_add(sw_context->fp->shman,
+ cmd->body.shid, cmd + 1,
+ cmd->body.type, size,
+ sw_context->fp->tfile,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_destroy_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyShader body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_shader_destroy_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ ret = vmw_compat_shader_remove(sw_context->fp->shman,
+ cmd->body.shid,
+ cmd->body.type,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
+/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
*
@@ -986,18 +1693,105 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
+ struct vmw_resource_val_node *ctx_node;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
header);
- ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ &ctx_node);
if (unlikely(ret != 0))
return ret;
+ if (dev_priv->has_mob) {
+ struct vmw_ctx_bindinfo bi;
+ struct vmw_resource_val_node *res_node;
+ u32 shid = cmd->body.shid;
+
+ if (shid != SVGA3D_INVALID_ID)
+ (void) vmw_compat_shader_lookup(sw_context->fp->shman,
+ cmd->body.type,
+ &shid);
+
+ ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
+ vmw_res_shader,
+ user_shader_converter,
+ shid,
+ &cmd->body.shid, &res_node);
+ if (unlikely(ret != 0))
+ return ret;
+
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_shader;
+ bi.i1.shader_type = cmd->body.type;
+ return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+ }
+
return 0;
}
+/**
+ * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_set_shader_const_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShaderConst body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_set_shader_const_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (dev_priv->has_mob)
+ header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_bind_gb_shader_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBShader body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
+ header);
+
+ return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
+ user_shader_converter,
+ &cmd->body.shid, &cmd->body.mobid,
+ cmd->body.offsetInBytes);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -1041,50 +1835,173 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
return 0;
}
-typedef int (*vmw_cmd_func) (struct vmw_private *,
- struct vmw_sw_context *,
- SVGA3dCmdHeader *);
-
-#define VMW_CMD_DEF(cmd, func) \
- [cmd - SVGA_3D_CMD_BASE] = func
-
-static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
- VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
+static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
- &vmw_cmd_set_render_target_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
- VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
- VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
- VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
- VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
- VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
+ &vmw_cmd_set_render_target_check, true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
- &vmw_cmd_blt_surf_screen_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
- VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
+ &vmw_cmd_blt_surf_screen_check, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
+ &vmw_cmd_update_gb_surface, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
+ &vmw_cmd_readback_gb_image, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
+ &vmw_cmd_readback_gb_surface, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
+ &vmw_cmd_invalidate_gb_image, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
+ &vmw_cmd_invalidate_gb_surface, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
+ false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+ false, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
+ true, false, true)
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -1095,6 +2012,8 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
uint32_t size_remaining = *size;
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
int ret;
+ const struct vmw_cmd_entry *entry;
+ bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
/* Handle any none 3D commands */
@@ -1107,18 +2026,43 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
cmd_id -= SVGA_3D_CMD_BASE;
if (unlikely(*size > size_remaining))
- goto out_err;
+ goto out_invalid;
if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
- goto out_err;
+ goto out_invalid;
+
+ entry = &vmw_cmd_entries[cmd_id];
+ if (unlikely(!entry->func))
+ goto out_invalid;
+
+ if (unlikely(!entry->user_allow && !sw_context->kernel))
+ goto out_privileged;
+
+ if (unlikely(entry->gb_disable && gb))
+ goto out_old;
- ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
+ if (unlikely(entry->gb_enable && !gb))
+ goto out_new;
+
+ ret = entry->func(dev_priv, sw_context, header);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_invalid;
return 0;
-out_err:
- DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
+out_invalid:
+ DRM_ERROR("Invalid SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EINVAL;
+out_privileged:
+ DRM_ERROR("Privileged SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EPERM;
+out_old:
+ DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EINVAL;
+out_new:
+ DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
}
@@ -1174,6 +2118,9 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
case VMW_PL_GMR:
reloc->location->gmrId = bo->mem.start;
break;
+ case VMW_PL_MOB:
+ *reloc->mob_loc = bo->mem.start;
+ break;
default:
BUG();
}
@@ -1198,6 +2145,8 @@ static void vmw_resource_list_unreference(struct list_head *list)
list_for_each_entry_safe(val, val_next, list, head) {
list_del_init(&val->head);
vmw_resource_unreference(&val->res);
+ if (unlikely(val->staged_bindings))
+ kfree(val->staged_bindings);
kfree(val);
}
}
@@ -1224,7 +2173,8 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo)
+ struct ttm_buffer_object *bo,
+ bool validate_as_mob)
{
int ret;
@@ -1238,6 +2188,9 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
dev_priv->dummy_query_bo_pinned))
return 0;
+ if (validate_as_mob)
+ return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
@@ -1259,7 +2212,6 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
return ret;
}
-
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
@@ -1267,7 +2219,8 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
int ret;
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
+ ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+ entry->validate_as_mob);
if (unlikely(ret != 0))
return ret;
}
@@ -1461,7 +2414,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else
sw_context->kernel = true;
- sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+ sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
@@ -1478,16 +2431,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
sw_context->res_ht_initialized = true;
}
+ INIT_LIST_HEAD(&sw_context->staged_shaders);
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_resources_reserve(sw_context);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0))
@@ -1509,11 +2463,23 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_err;
}
+ ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_err;
+ }
+
+ if (dev_priv->has_mob) {
+ ret = vmw_rebind_contexts(sw_context);
+ if (unlikely(ret != 0))
+ goto out_unlock_binding;
+ }
+
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
- goto out_err;
+ goto out_unlock_binding;
}
vmw_apply_relocations(sw_context);
@@ -1538,6 +2504,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
+ mutex_unlock(&dev_priv->binding_mutex);
+
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
@@ -1558,6 +2526,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
list_splice_init(&sw_context->resource_list, &resource_list);
+ vmw_compat_shaders_commit(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
@@ -1568,11 +2538,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
return 0;
+out_unlock_binding:
+ mutex_unlock(&dev_priv->binding_mutex);
out_err:
- vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+out_err_nores:
vmw_resource_list_unreserve(&sw_context->resource_list, true);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
@@ -1581,6 +2554,8 @@ out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
+ vmw_compat_shaders_revert(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c62d20e8a6f1..436b013b4231 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -271,7 +271,7 @@ void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
spin_unlock_irq(&fman->lock);
}
-void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
+static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
struct list_head *list)
{
struct vmw_fence_action *action, *next_action;
@@ -897,7 +897,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
* Note that the action callbacks may be executed before this function
* returns.
*/
-void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
+static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
struct vmw_fence_action *action)
{
struct vmw_fence_manager *fman = fence->fman;
@@ -993,7 +993,7 @@ struct vmw_event_fence_pending {
struct drm_vmw_event_fence event;
};
-int vmw_event_fence_action_create(struct drm_file *file_priv,
+static int vmw_event_fence_action_create(struct drm_file *file_priv,
struct vmw_fence_obj *fence,
uint32_t flags,
uint64_t user_data,
@@ -1080,7 +1080,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
+ ttm_base_object_lookup_for_ref(dev_priv->tdev,
+ arg->handle);
if (unlikely(base == NULL)) {
DRM_ERROR("Fence event invalid fence object handle "
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 3eb148667d63..6ccd993e26bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -35,6 +35,23 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+ if (!(dev_priv->capabilities & SVGA_CAP_3D))
+ return false;
+
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ uint32_t result;
+
+ if (!dev_priv->has_mob)
+ return false;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
+ result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return (result != 0);
+ }
+
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
@@ -511,24 +528,16 @@ out_err:
}
/**
- * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
+ * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
+ * legacy query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
- * This function is used to emit a dummy occlusion query with
- * no primitives rendered between query begin and query end.
- * It's used to provide a query barrier, in order to know that when
- * this query is finished, all preceding queries are also finished.
- *
- * A Query results structure should have been initialized at the start
- * of the dev_priv->dummy_query_bo buffer object. And that buffer object
- * must also be either reserved or pinned when this function is called.
- *
- * Returns -ENOMEM on failure to reserve fifo space.
+ * See the vmw_fifo_emit_dummy_query documentation.
*/
-int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
- uint32_t cid)
+static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
+ uint32_t cid)
{
/*
* A query wait without a preceding query end will
@@ -566,3 +575,75 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
return 0;
}
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * guest-backed resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * See the vmw_fifo_emit_dummy_query documentation.
+ */
+static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
+ uint32_t cid)
+{
+ /*
+ * A query wait without a preceding query end will
+ * actually finish all queries for this cid
+ * without writing to the query result structure.
+ */
+
+ struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForGBQuery body;
+ } *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of fifo space for dummy query.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = cid;
+ cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.offset = 0;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * appropriate resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * This function is used to emit a dummy occlusion query with
+ * no primitives rendered between query begin and query end.
+ * It's used to provide a query barrier, in order to know that when
+ * this query is finished, all preceding queries are also finished.
+ *
+ * A Query results structure should have been initialized at the start
+ * of the dev_priv->dummy_query_bo buffer object. And that buffer object
+ * must also be either reserved or pinned when this function is called.
+ *
+ * Returns -ENOMEM on failure to reserve fifo space.
+ */
+int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+ uint32_t cid)
+{
+ if (dev_priv->has_mob)
+ return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
+
+ return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 6ef0b035becb..61d8d803199f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -125,181 +125,27 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
}
-static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
- struct list_head *desc_pages)
-{
- struct page *page, *next;
- struct svga_guest_mem_descriptor *page_virtual;
- unsigned int desc_per_page = PAGE_SIZE /
- sizeof(struct svga_guest_mem_descriptor) - 1;
-
- if (list_empty(desc_pages))
- return;
-
- list_for_each_entry_safe(page, next, desc_pages, lru) {
- list_del_init(&page->lru);
-
- if (likely(desc_dma != DMA_ADDR_INVALID)) {
- dma_unmap_page(dev, desc_dma, PAGE_SIZE,
- DMA_TO_DEVICE);
- }
-
- page_virtual = kmap_atomic(page);
- desc_dma = (dma_addr_t)
- le32_to_cpu(page_virtual[desc_per_page].ppn) <<
- PAGE_SHIFT;
- kunmap_atomic(page_virtual);
-
- __free_page(page);
- }
-}
-
-/**
- * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
- * the number of used descriptors.
- *
- */
-
-static int vmw_gmr_build_descriptors(struct device *dev,
- struct list_head *desc_pages,
- struct vmw_piter *iter,
- unsigned long num_pages,
- dma_addr_t *first_dma)
-{
- struct page *page;
- struct svga_guest_mem_descriptor *page_virtual = NULL;
- struct svga_guest_mem_descriptor *desc_virtual = NULL;
- unsigned int desc_per_page;
- unsigned long prev_pfn;
- unsigned long pfn;
- int ret;
- dma_addr_t desc_dma;
-
- desc_per_page = PAGE_SIZE /
- sizeof(struct svga_guest_mem_descriptor) - 1;
-
- while (likely(num_pages != 0)) {
- page = alloc_page(__GFP_HIGHMEM);
- if (unlikely(page == NULL)) {
- ret = -ENOMEM;
- goto out_err;
- }
-
- list_add_tail(&page->lru, desc_pages);
- page_virtual = kmap_atomic(page);
- desc_virtual = page_virtual - 1;
- prev_pfn = ~(0UL);
-
- while (likely(num_pages != 0)) {
- pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
-
- if (pfn != prev_pfn + 1) {
-
- if (desc_virtual - page_virtual ==
- desc_per_page - 1)
- break;
-
- (++desc_virtual)->ppn = cpu_to_le32(pfn);
- desc_virtual->num_pages = cpu_to_le32(1);
- } else {
- uint32_t tmp =
- le32_to_cpu(desc_virtual->num_pages);
- desc_virtual->num_pages = cpu_to_le32(tmp + 1);
- }
- prev_pfn = pfn;
- --num_pages;
- vmw_piter_next(iter);
- }
-
- (++desc_virtual)->ppn = DMA_PAGE_INVALID;
- desc_virtual->num_pages = cpu_to_le32(0);
- kunmap_atomic(page_virtual);
- }
-
- desc_dma = 0;
- list_for_each_entry_reverse(page, desc_pages, lru) {
- page_virtual = kmap_atomic(page);
- page_virtual[desc_per_page].ppn = cpu_to_le32
- (desc_dma >> PAGE_SHIFT);
- kunmap_atomic(page_virtual);
- desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(dev, desc_dma)))
- goto out_err;
- }
- *first_dma = desc_dma;
-
- return 0;
-out_err:
- vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
- return ret;
-}
-
-static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
- int gmr_id, dma_addr_t desc_dma)
-{
- mutex_lock(&dev_priv->hw_mutex);
-
- vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
- wmb();
- vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
- mb();
-
- mutex_unlock(&dev_priv->hw_mutex);
-
-}
-
int vmw_gmr_bind(struct vmw_private *dev_priv,
const struct vmw_sg_table *vsgt,
unsigned long num_pages,
int gmr_id)
{
- struct list_head desc_pages;
- dma_addr_t desc_dma = 0;
- struct device *dev = dev_priv->dev->dev;
struct vmw_piter data_iter;
- int ret;
vmw_piter_start(&data_iter, vsgt, 0);
if (unlikely(!vmw_piter_next(&data_iter)))
return 0;
- if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
- return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
-
- if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
- return -EINVAL;
-
- if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
+ if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
return -EINVAL;
- INIT_LIST_HEAD(&desc_pages);
-
- ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
- num_pages, &desc_dma);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
- vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
-
- return 0;
+ return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
}
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
- if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
+ if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
vmw_gmr2_unbind(dev_priv, gmr_id);
- return;
- }
-
- mutex_lock(&dev_priv->hw_mutex);
- vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
- wmb();
- vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
- mb();
- mutex_unlock(&dev_priv->hw_mutex);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index c5c054ae9056..b1273e8e9a69 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -125,10 +125,21 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
return -ENOMEM;
spin_lock_init(&gman->lock);
- gman->max_gmr_pages = dev_priv->max_gmr_pages;
gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
- gman->max_gmr_ids = p_size;
+
+ switch (p_size) {
+ case VMW_PL_GMR:
+ gman->max_gmr_ids = dev_priv->max_gmr_ids;
+ gman->max_gmr_pages = dev_priv->max_gmr_pages;
+ break;
+ case VMW_PL_MOB:
+ gman->max_gmr_ids = VMWGFX_NUM_MOB;
+ gman->max_gmr_pages = dev_priv->max_mob_pages;
+ break;
+ default:
+ BUG();
+ }
man->priv = (void *) gman;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 45d5b5ab6ca9..47b70949bf3a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -29,12 +29,18 @@
#include <drm/vmwgfx_drm.h>
#include "vmwgfx_kms.h"
+struct svga_3d_compat_cap {
+ SVGA3dCapsRecordHeader header;
+ SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
+};
+
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_getparam_arg *param =
(struct drm_vmw_getparam_arg *)data;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
switch (param->param) {
case DRM_VMW_PARAM_NUM_STREAMS:
@@ -53,13 +59,18 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = dev_priv->fifo.capabilities;
break;
case DRM_VMW_PARAM_MAX_FB_SIZE:
- param->value = dev_priv->vram_size;
+ param->value = dev_priv->prim_bb_mem;
break;
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
+ param->value = SVGA3D_HWVERSION_WS8_B1;
+ break;
+ }
+
param->value =
ioread32(fifo_mem +
((fifo->capabilities &
@@ -69,7 +80,30 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
}
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
- param->value = dev_priv->memory_size;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ !vmw_fp->gb_aware)
+ param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
+ else
+ param->value = dev_priv->memory_size;
+ break;
+ case DRM_VMW_PARAM_3D_CAPS_SIZE:
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ vmw_fp->gb_aware)
+ param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+ param->value = sizeof(struct svga_3d_compat_cap) +
+ sizeof(uint32_t);
+ else
+ param->value = (SVGA_FIFO_3D_CAPS_LAST -
+ SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
+ break;
+ case DRM_VMW_PARAM_MAX_MOB_MEMORY:
+ vmw_fp->gb_aware = true;
+ param->value = dev_priv->max_mob_pages * PAGE_SIZE;
+ break;
+ case DRM_VMW_PARAM_MAX_MOB_SIZE:
+ param->value = dev_priv->max_mob_size;
break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
@@ -80,6 +114,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
+ size_t size)
+{
+ struct svga_3d_compat_cap *compat_cap =
+ (struct svga_3d_compat_cap *) bounce;
+ unsigned int i;
+ size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
+ unsigned int max_size;
+
+ if (size < pair_offset)
+ return -EINVAL;
+
+ max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
+
+ if (max_size > SVGA3D_DEVCAP_MAX)
+ max_size = SVGA3D_DEVCAP_MAX;
+
+ compat_cap->header.length =
+ (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
+ compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ for (i = 0; i < max_size; ++i) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+ compat_cap->pairs[i][0] = i;
+ compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return 0;
+}
+
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -92,29 +158,58 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
void *bounce;
int ret;
+ bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
- size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2;
+ if (gb_objects && vmw_fp->gb_aware)
+ size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (gb_objects)
+ size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
+ else
+ size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
if (arg->max_size < size)
size = arg->max_size;
- bounce = vmalloc(size);
+ bounce = vzalloc(size);
if (unlikely(bounce == NULL)) {
DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
return -ENOMEM;
}
- fifo_mem = dev_priv->mmio_virt;
- memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+ if (gb_objects && vmw_fp->gb_aware) {
+ int i, num;
+ uint32_t *bounce32 = (uint32_t *) bounce;
+
+ num = size / sizeof(uint32_t);
+ if (num > SVGA3D_DEVCAP_MAX)
+ num = SVGA3D_DEVCAP_MAX;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ for (i = 0; i < num; ++i) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+ *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+ } else if (gb_objects) {
+ ret = vmw_fill_compat_cap(dev_priv, bounce, size);
+ if (unlikely(ret != 0))
+ goto out_err;
+ } else {
+ fifo_mem = dev_priv->mmio_virt;
+ memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+ }
ret = copy_to_user(buffer, bounce, size);
if (ret)
ret = -EFAULT;
+out_err:
vfree(bounce);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 4640adbcaf91..0c423766c441 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -30,7 +30,7 @@
#define VMW_FENCE_WRAP (1 << 24)
-irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t vmw_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 03f1c2038631..8a650413dea5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -40,7 +40,7 @@ struct vmw_clip_rect {
* Clip @num_rects number of @rects against @clip storing the
* results in @out_rects and the number of passed rects in @out_num.
*/
-void vmw_clip_cliprects(struct drm_clip_rect *rects,
+static void vmw_clip_cliprects(struct drm_clip_rect *rects,
int num_rects,
struct vmw_clip_rect clip,
SVGASignedRect *out_rects,
@@ -423,7 +423,7 @@ struct vmw_framebuffer_surface {
struct drm_master *master;
};
-void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
@@ -589,7 +589,7 @@ out_free_tmp:
return ret;
}
-int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
@@ -609,9 +609,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
if (!dev_priv->sou_priv)
return -EINVAL;
+ drm_modeset_lock_all(dev_priv->dev);
+
ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ drm_modeset_unlock_all(dev_priv->dev);
return ret;
+ }
if (!num_clips) {
num_clips = 1;
@@ -629,6 +633,9 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
clips, num_clips, inc, NULL);
ttm_read_unlock(&vmaster->lock);
+
+ drm_modeset_unlock_all(dev_priv->dev);
+
return 0;
}
@@ -665,9 +672,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
- surface->sizes[0].width < mode_cmd->width ||
- surface->sizes[0].height < mode_cmd->height ||
- surface->sizes[0].depth != 1)) {
+ surface->base_size.width < mode_cmd->width ||
+ surface->base_size.height < mode_cmd->height ||
+ surface->base_size.depth != 1)) {
DRM_ERROR("Incompatible surface dimensions "
"for requested mode.\n");
return -EINVAL;
@@ -754,7 +761,7 @@ struct vmw_framebuffer_dmabuf {
struct vmw_dma_buffer *buffer;
};
-void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
@@ -940,7 +947,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
return ret;
}
-int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
@@ -953,9 +960,13 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_clip_rect norect;
int ret, increment = 1;
+ drm_modeset_lock_all(dev_priv->dev);
+
ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ drm_modeset_unlock_all(dev_priv->dev);
return ret;
+ }
if (!num_clips) {
num_clips = 1;
@@ -979,6 +990,9 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
}
ttm_read_unlock(&vmaster->lock);
+
+ drm_modeset_unlock_all(dev_priv->dev);
+
return ret;
}
@@ -1631,7 +1645,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
{
- return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+ return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
}
@@ -1663,7 +1677,7 @@ void vmw_disable_vblank(struct drm_device *dev, int crtc)
* Small shared kms functions.
*/
-int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
+static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects)
{
struct drm_device *dev = dev_priv->dev;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
new file mode 100644
index 000000000000..04a64b8cd3cd
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -0,0 +1,656 @@
+/**************************************************************************
+ *
+ * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+/*
+ * If we set up the screen target otable, screen objects stop working.
+ */
+
+#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
+
+#ifdef CONFIG_64BIT
+#define VMW_PPN_SIZE 8
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
+#else
+#define VMW_PPN_SIZE 4
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
+#endif
+
+/*
+ * struct vmw_mob - Structure containing page table and metadata for a
+ * Guest Memory OBject.
+ *
+ * @num_pages Number of pages that make up the page table.
+ * @pt_level The indirection level of the page table. 0-2.
+ * @pt_root_page DMA address of the level 0 page of the page table.
+ */
+struct vmw_mob {
+ struct ttm_buffer_object *pt_bo;
+ unsigned long num_pages;
+ unsigned pt_level;
+ dma_addr_t pt_root_page;
+ uint32_t id;
+};
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size: Size of the table (page-aligned).
+ * @page_table: Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+ unsigned long size;
+ struct vmw_mob *page_table;
+};
+
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+ struct vmw_mob *mob);
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+ struct vmw_piter data_iter,
+ unsigned long num_data_pages);
+
+/*
+ * vmw_setup_otable_base - Issue an object table base setup command to
+ * the device
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @type: Type of object table base
+ * @offset Start of table offset into dev_priv::otable_bo
+ * @otable Pointer to otable metadata;
+ *
+ * This function returns -ENOMEM if it fails to reserve fifo space,
+ * and may block waiting for fifo space.
+ */
+static int vmw_setup_otable_base(struct vmw_private *dev_priv,
+ SVGAOTableType type,
+ unsigned long offset,
+ struct vmw_otable *otable)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetOTableBase64 body;
+ } *cmd;
+ struct vmw_mob *mob;
+ const struct vmw_sg_table *vsgt;
+ struct vmw_piter iter;
+ int ret;
+
+ BUG_ON(otable->page_table != NULL);
+
+ vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
+ vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
+ WARN_ON(!vmw_piter_next(&iter));
+
+ mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
+ if (unlikely(mob == NULL)) {
+ DRM_ERROR("Failed creating OTable page table.\n");
+ return -ENOMEM;
+ }
+
+ if (otable->size <= PAGE_SIZE) {
+ mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+ mob->pt_root_page = vmw_piter_dma_addr(&iter);
+ } else if (vsgt->num_regions == 1) {
+ mob->pt_level = SVGA3D_MOBFMT_RANGE;
+ mob->pt_root_page = vmw_piter_dma_addr(&iter);
+ } else {
+ ret = vmw_mob_pt_populate(dev_priv, mob);
+ if (unlikely(ret != 0))
+ goto out_no_populate;
+
+ vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
+ mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+ cmd->body.sizeInBytes = otable->size;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = mob->pt_level;
+
+ /*
+ * The device doesn't support this, But the otable size is
+ * determined at compile-time, so this BUG shouldn't trigger
+ * randomly.
+ */
+ BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ otable->page_table = mob;
+
+ return 0;
+
+out_no_fifo:
+out_no_populate:
+ vmw_mob_destroy(mob);
+ return ret;
+}
+
+/*
+ * vmw_takedown_otable_base - Issue an object table base takedown command
+ * to the device
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @type: Type of object table base
+ *
+ */
+static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
+ SVGAOTableType type,
+ struct vmw_otable *otable)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetOTableBase body;
+ } *cmd;
+ struct ttm_buffer_object *bo;
+
+ if (otable->page_table == NULL)
+ return;
+
+ bo = otable->page_table->pt_bo;
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for OTable "
+ "takedown.\n");
+ } else {
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = 0;
+ cmd->body.sizeInBytes = 0;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ }
+
+ if (bo) {
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+
+ vmw_fence_single_bo(bo, NULL);
+ ttm_bo_unreserve(bo);
+ }
+
+ vmw_mob_destroy(otable->page_table);
+ otable->page_table = NULL;
+}
+
+/*
+ * vmw_otables_setup - Set up guest backed memory object tables
+ *
+ * @dev_priv: Pointer to a device private structure
+ *
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A succesful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
+ */
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+ unsigned long offset;
+ unsigned long bo_size;
+ struct vmw_otable *otables;
+ SVGAOTableType i;
+ int ret;
+
+ otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
+ GFP_KERNEL);
+ if (unlikely(otables == NULL)) {
+ DRM_ERROR("Failed to allocate space for otable "
+ "metadata.\n");
+ return -ENOMEM;
+ }
+
+ otables[SVGA_OTABLE_MOB].size =
+ VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
+ otables[SVGA_OTABLE_SURFACE].size =
+ VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
+ otables[SVGA_OTABLE_CONTEXT].size =
+ VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
+ otables[SVGA_OTABLE_SHADER].size =
+ VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
+ otables[SVGA_OTABLE_SCREEN_TARGET].size =
+ VMWGFX_NUM_GB_SCREEN_TARGET *
+ SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
+
+ bo_size = 0;
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
+ otables[i].size =
+ (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
+ bo_size += otables[i].size;
+ }
+
+ ret = ttm_bo_create(&dev_priv->bdev, bo_size,
+ ttm_bo_type_device,
+ &vmw_sys_ne_placement,
+ 0, false, NULL,
+ &dev_priv->otable_bo);
+
+ if (unlikely(ret != 0))
+ goto out_no_bo;
+
+ ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+ ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+ ret = vmw_bo_map_dma(dev_priv->otable_bo);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+
+ ttm_bo_unreserve(dev_priv->otable_bo);
+
+ offset = 0;
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
+ ret = vmw_setup_otable_base(dev_priv, i, offset,
+ &otables[i]);
+ if (unlikely(ret != 0))
+ goto out_no_setup;
+ offset += otables[i].size;
+ }
+
+ dev_priv->otables = otables;
+ return 0;
+
+out_unreserve:
+ ttm_bo_unreserve(dev_priv->otable_bo);
+out_no_setup:
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
+ vmw_takedown_otable_base(dev_priv, i, &otables[i]);
+
+ ttm_bo_unref(&dev_priv->otable_bo);
+out_no_bo:
+ kfree(otables);
+ return ret;
+}
+
+
+/*
+ * vmw_otables_takedown - Take down guest backed memory object tables
+ *
+ * @dev_priv: Pointer to a device private structure
+ *
+ * Take down the Guest Memory Object tables.
+ */
+void vmw_otables_takedown(struct vmw_private *dev_priv)
+{
+ SVGAOTableType i;
+ struct ttm_buffer_object *bo = dev_priv->otable_bo;
+ int ret;
+
+ for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
+ vmw_takedown_otable_base(dev_priv, i,
+ &dev_priv->otables[i]);
+
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+
+ vmw_fence_single_bo(bo, NULL);
+ ttm_bo_unreserve(bo);
+
+ ttm_bo_unref(&dev_priv->otable_bo);
+ kfree(dev_priv->otables);
+ dev_priv->otables = NULL;
+}
+
+
+/*
+ * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
+ * needed for a guest backed memory object.
+ *
+ * @data_pages: Number of data pages in the memory object buffer.
+ */
+static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
+{
+ unsigned long data_size = data_pages * PAGE_SIZE;
+ unsigned long tot_size = 0;
+
+ while (likely(data_size > PAGE_SIZE)) {
+ data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
+ data_size *= VMW_PPN_SIZE;
+ tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
+ }
+
+ return tot_size >> PAGE_SHIFT;
+}
+
+/*
+ * vmw_mob_create - Create a mob, but don't populate it.
+ *
+ * @data_pages: Number of data pages of the underlying buffer object.
+ */
+struct vmw_mob *vmw_mob_create(unsigned long data_pages)
+{
+ struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
+
+ if (unlikely(mob == NULL))
+ return NULL;
+
+ mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
+
+ return mob;
+}
+
+/*
+ * vmw_mob_pt_populate - Populate the mob pagetable
+ *
+ * @mob: Pointer to the mob the pagetable of which we want to
+ * populate.
+ *
+ * This function allocates memory to be used for the pagetable, and
+ * adjusts TTM memory accounting accordingly. Returns ENOMEM if
+ * memory resources aren't sufficient and may cause TTM buffer objects
+ * to be swapped out by using the TTM memory accounting function.
+ */
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+ struct vmw_mob *mob)
+{
+ int ret;
+ BUG_ON(mob->pt_bo != NULL);
+
+ ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
+ ttm_bo_type_device,
+ &vmw_sys_ne_placement,
+ 0, false, NULL, &mob->pt_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
+
+ BUG_ON(ret != 0);
+ ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+ ret = vmw_bo_map_dma(mob->pt_bo);
+ if (unlikely(ret != 0))
+ goto out_unreserve;
+
+ ttm_bo_unreserve(mob->pt_bo);
+
+ return 0;
+
+out_unreserve:
+ ttm_bo_unreserve(mob->pt_bo);
+ ttm_bo_unref(&mob->pt_bo);
+
+ return ret;
+}
+
+/**
+ * vmw_mob_assign_ppn - Assign a value to a page table entry
+ *
+ * @addr: Pointer to pointer to page table entry.
+ * @val: The page table entry
+ *
+ * Assigns a value to a page table entry pointed to by *@addr and increments
+ * *@addr according to the page table entry size.
+ */
+#if (VMW_PPN_SIZE == 8)
+static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+{
+ *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
+ *addr += 2;
+}
+#else
+static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+{
+ *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
+}
+#endif
+
+/*
+ * vmw_mob_build_pt - Build a pagetable
+ *
+ * @data_addr: Array of DMA addresses to the underlying buffer
+ * object's data pages.
+ * @num_data_pages: Number of buffer object data pages.
+ * @pt_pages: Array of page pointers to the page table pages.
+ *
+ * Returns the number of page table pages actually used.
+ * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
+ */
+static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
+ unsigned long num_data_pages,
+ struct vmw_piter *pt_iter)
+{
+ unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
+ unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
+ unsigned long pt_page;
+ __le32 *addr, *save_addr;
+ unsigned long i;
+ struct page *page;
+
+ for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
+ page = vmw_piter_page(pt_iter);
+
+ save_addr = addr = kmap_atomic(page);
+
+ for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
+ vmw_mob_assign_ppn(&addr,
+ vmw_piter_dma_addr(data_iter));
+ if (unlikely(--num_data_pages == 0))
+ break;
+ WARN_ON(!vmw_piter_next(data_iter));
+ }
+ kunmap_atomic(save_addr);
+ vmw_piter_next(pt_iter);
+ }
+
+ return num_pt_pages;
+}
+
+/*
+ * vmw_mob_build_pt - Set up a multilevel mob pagetable
+ *
+ * @mob: Pointer to a mob whose page table needs setting up.
+ * @data_addr Array of DMA addresses to the buffer object's data
+ * pages.
+ * @num_data_pages: Number of buffer object data pages.
+ *
+ * Uses tail recursion to set up a multilevel mob page table.
+ */
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+ struct vmw_piter data_iter,
+ unsigned long num_data_pages)
+{
+ unsigned long num_pt_pages = 0;
+ struct ttm_buffer_object *bo = mob->pt_bo;
+ struct vmw_piter save_pt_iter;
+ struct vmw_piter pt_iter;
+ const struct vmw_sg_table *vsgt;
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ BUG_ON(ret != 0);
+
+ vsgt = vmw_bo_sg_table(bo);
+ vmw_piter_start(&pt_iter, vsgt, 0);
+ BUG_ON(!vmw_piter_next(&pt_iter));
+ mob->pt_level = 0;
+ while (likely(num_data_pages > 1)) {
+ ++mob->pt_level;
+ BUG_ON(mob->pt_level > 2);
+ save_pt_iter = pt_iter;
+ num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
+ &pt_iter);
+ data_iter = save_pt_iter;
+ num_data_pages = num_pt_pages;
+ }
+
+ mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
+ ttm_bo_unreserve(bo);
+}
+
+/*
+ * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
+ *
+ * @mob: Pointer to a mob to destroy.
+ */
+void vmw_mob_destroy(struct vmw_mob *mob)
+{
+ if (mob->pt_bo)
+ ttm_bo_unref(&mob->pt_bo);
+ kfree(mob);
+}
+
+/*
+ * vmw_mob_unbind - Hide a mob from the device.
+ *
+ * @dev_priv: Pointer to a device private.
+ * @mob_id: Device id of the mob to unbind.
+ */
+void vmw_mob_unbind(struct vmw_private *dev_priv,
+ struct vmw_mob *mob)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBMob body;
+ } *cmd;
+ int ret;
+ struct ttm_buffer_object *bo = mob->pt_bo;
+
+ if (bo) {
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ /*
+ * Noone else should be using this buffer.
+ */
+ BUG_ON(ret != 0);
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for Memory "
+ "Object unbinding.\n");
+ } else {
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.mobid = mob->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ }
+ if (bo) {
+ vmw_fence_single_bo(bo, NULL);
+ ttm_bo_unreserve(bo);
+ }
+ vmw_3d_resource_dec(dev_priv, false);
+}
+
+/*
+ * vmw_mob_bind - Make a mob visible to the device after first
+ * populating it if necessary.
+ *
+ * @dev_priv: Pointer to a device private.
+ * @mob: Pointer to the mob we're making visible.
+ * @data_addr: Array of DMA addresses to the data pages of the underlying
+ * buffer object.
+ * @num_data_pages: Number of data pages of the underlying buffer
+ * object.
+ * @mob_id: Device id of the mob to bind
+ *
+ * This function is intended to be interfaced with the ttm_tt backend
+ * code.
+ */
+int vmw_mob_bind(struct vmw_private *dev_priv,
+ struct vmw_mob *mob,
+ const struct vmw_sg_table *vsgt,
+ unsigned long num_data_pages,
+ int32_t mob_id)
+{
+ int ret;
+ bool pt_set_up = false;
+ struct vmw_piter data_iter;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBMob64 body;
+ } *cmd;
+
+ mob->id = mob_id;
+ vmw_piter_start(&data_iter, vsgt, 0);
+ if (unlikely(!vmw_piter_next(&data_iter)))
+ return 0;
+
+ if (likely(num_data_pages == 1)) {
+ mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+ mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+ } else if (vsgt->num_regions == 1) {
+ mob->pt_level = SVGA3D_MOBFMT_RANGE;
+ mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+ } else if (unlikely(mob->pt_bo == NULL)) {
+ ret = vmw_mob_pt_populate(dev_priv, mob);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_mob_pt_setup(mob, data_iter, num_data_pages);
+ pt_set_up = true;
+ mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+ }
+
+ (void) vmw_3d_resource_inc(dev_priv, false);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for Memory "
+ "Object binding.\n");
+ goto out_no_cmd_space;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.mobid = mob_id;
+ cmd->body.ptDepth = mob->pt_level;
+ cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+ cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+
+out_no_cmd_space:
+ vmw_3d_resource_dec(dev_priv, false);
+ if (pt_set_up)
+ ttm_bo_unref(&mob->pt_bo);
+
+ return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 9b5ea2ac7ddf..9757b57f8388 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res;
}
+struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res)
+{
+ return kref_get_unless_zero(&res->kref) ? res : NULL;
+}
/**
* vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
vmw_dmabuf_unreference(&res->backup);
}
- if (likely(res->hw_destroy != NULL))
+ if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res);
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_kill(&res->binding_head);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
id = res->id;
if (res->res_free != NULL)
@@ -215,6 +224,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
res->func = func;
INIT_LIST_HEAD(&res->lru_head);
INIT_LIST_HEAD(&res->mob_head);
+ INIT_LIST_HEAD(&res->binding_head);
res->id = -1;
res->backup = NULL;
res->backup_offset = 0;
@@ -417,8 +427,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- (user) ? ttm_bo_type_device :
- ttm_bo_type_kernel, placement,
+ ttm_bo_type_device, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
@@ -441,6 +450,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
ttm_bo_unref(&bo);
}
+static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
+ enum ttm_ref_type ref_type)
+{
+ struct vmw_user_dma_buffer *user_bo;
+ user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
+
+ switch (ref_type) {
+ case TTM_REF_SYNCCPU_WRITE:
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
+ break;
+ default:
+ BUG();
+ }
+}
+
/**
* vmw_user_dmabuf_alloc - Allocate a user dma buffer
*
@@ -471,6 +495,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
}
ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+ (dev_priv->has_mob) ?
+ &vmw_sys_placement :
&vmw_vram_sys_placement, true,
&vmw_user_dmabuf_destroy);
if (unlikely(ret != 0))
@@ -482,7 +508,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
&user_bo->prime,
shareable,
ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ &vmw_user_dmabuf_release,
+ &vmw_user_dmabuf_ref_obj_release);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
@@ -515,6 +542,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
}
+/**
+ * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ struct ttm_buffer_object *bo = &user_bo->dma.base;
+ bool existed;
+ int ret;
+
+ if (flags & drm_vmw_synccpu_allow_cs) {
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, true,
+ !!(flags & drm_vmw_synccpu_dontblock));
+ spin_unlock(&bdev->fence_lock);
+ return ret;
+ }
+
+ ret = ttm_bo_synccpu_write_grab
+ (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_SYNCCPU_WRITE, &existed);
+ if (ret != 0 || existed)
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
+
+ return ret;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ if (!(flags & drm_vmw_synccpu_allow_cs))
+ return ttm_ref_object_base_unref(tfile, handle,
+ TTM_REF_SYNCCPU_WRITE);
+
+ return 0;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_synccpu_arg *arg =
+ (struct drm_vmw_synccpu_arg *) data;
+ struct vmw_dma_buffer *dma_buf;
+ struct vmw_user_dma_buffer *user_bo;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+
+ if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+ || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+ drm_vmw_synccpu_dontblock |
+ drm_vmw_synccpu_allow_cs)) != 0) {
+ DRM_ERROR("Illegal synccpu flags.\n");
+ return -EINVAL;
+ }
+
+ switch (arg->op) {
+ case drm_vmw_synccpu_grab:
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
+ dma);
+ ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
+ vmw_dmabuf_unreference(&dma_buf);
+ if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+ ret != -EBUSY)) {
+ DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ case drm_vmw_synccpu_release:
+ ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
+ arg->flags);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ default:
+ DRM_ERROR("Invalid synccpu operation.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -591,7 +742,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
}
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
- struct vmw_dma_buffer *dma_buf)
+ struct vmw_dma_buffer *dma_buf,
+ uint32_t *handle)
{
struct vmw_user_dma_buffer *user_bo;
@@ -599,6 +751,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
return -EINVAL;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+
+ *handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL);
}
@@ -1291,11 +1445,54 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
* @mem: The truct ttm_mem_reg indicating to what memory
* region the move is taking place.
*
- * For now does nothing.
+ * Evicts the Guest Backed hardware resource if the backup
+ * buffer is being moved out of MOB memory.
+ * Note that this function should not race with the resource
+ * validation code as long as it accesses only members of struct
+ * resource that remain static while bo::res is !NULL and
+ * while we have @bo reserved. struct resource::backup is *not* a
+ * static member. The resource validation code will take care
+ * to set @bo::res to NULL, while having @bo reserved when the
+ * buffer is no longer bound to the resource, so @bo:res can be
+ * used to determine whether there is a need to unbind and whether
+ * it is safe to unbind.
*/
void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
+ struct vmw_dma_buffer *dma_buf;
+
+ if (mem == NULL)
+ return;
+
+ if (bo->destroy != vmw_dmabuf_bo_free &&
+ bo->destroy != vmw_user_dmabuf_destroy)
+ return;
+
+ dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+
+ if (mem->mem_type != VMW_PL_MOB) {
+ struct vmw_resource *res, *n;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_validate_buffer val_buf;
+
+ val_buf.bo = bo;
+
+ list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
+
+ if (unlikely(res->func->unbind == NULL))
+ continue;
+
+ (void) res->func->unbind(res, true, &val_buf);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+ list_del_init(&res->mob_head);
+ }
+
+ spin_lock(&bdev->fence_lock);
+ (void) ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bdev->fence_lock);
+ }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
new file mode 100644
index 000000000000..ee3856578a12
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -0,0 +1,812 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+#define VMW_COMPAT_SHADER_HT_ORDER 12
+
+struct vmw_shader {
+ struct vmw_resource res;
+ SVGA3dShaderType type;
+ uint32_t size;
+};
+
+struct vmw_user_shader {
+ struct ttm_base_object base;
+ struct vmw_shader shader;
+};
+
+/**
+ * enum vmw_compat_shader_state - Staging state for compat shaders
+ */
+enum vmw_compat_shader_state {
+ VMW_COMPAT_COMMITED,
+ VMW_COMPAT_ADD,
+ VMW_COMPAT_DEL
+};
+
+/**
+ * struct vmw_compat_shader - Metadata for compat shaders.
+ *
+ * @handle: The TTM handle of the guest backed shader.
+ * @tfile: The struct ttm_object_file the guest backed shader is registered
+ * with.
+ * @hash: Hash item for lookup.
+ * @head: List head for staging lists or the compat shader manager list.
+ * @state: Staging state.
+ *
+ * The structure is protected by the cmdbuf lock.
+ */
+struct vmw_compat_shader {
+ u32 handle;
+ struct ttm_object_file *tfile;
+ struct drm_hash_item hash;
+ struct list_head head;
+ enum vmw_compat_shader_state state;
+};
+
+/**
+ * struct vmw_compat_shader_manager - Compat shader manager.
+ *
+ * @shaders: Hash table containing staged and commited compat shaders
+ * @list: List of commited shaders.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @shaders and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_compat_shader_manager {
+ struct drm_open_hash shaders;
+ struct list_head list;
+ struct vmw_private *dev_priv;
+};
+
+static void vmw_user_shader_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base);
+
+static int vmw_gb_shader_create(struct vmw_resource *res);
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_destroy(struct vmw_resource *res);
+
+static uint64_t vmw_user_shader_size;
+
+static const struct vmw_user_resource_conv user_shader_conv = {
+ .object_type = VMW_RES_SHADER,
+ .base_obj_to_res = vmw_user_shader_base_to_res,
+ .res_free = vmw_user_shader_free
+};
+
+const struct vmw_user_resource_conv *user_shader_converter =
+ &user_shader_conv;
+
+
+static const struct vmw_res_func vmw_gb_shader_func = {
+ .res_type = vmw_res_shader,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "guest backed shaders",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_gb_shader_create,
+ .destroy = vmw_gb_shader_destroy,
+ .bind = vmw_gb_shader_bind,
+ .unbind = vmw_gb_shader_unbind
+};
+
+/**
+ * Shader management:
+ */
+
+static inline struct vmw_shader *
+vmw_res_to_shader(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_shader, res);
+}
+
+static void vmw_hw_shader_destroy(struct vmw_resource *res)
+{
+ (void) vmw_gb_shader_destroy(res);
+}
+
+static int vmw_gb_shader_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ uint32_t size,
+ uint64_t offset,
+ SVGA3dShaderType type,
+ struct vmw_dma_buffer *byte_code,
+ void (*res_free) (struct vmw_resource *res))
+{
+ struct vmw_shader *shader = vmw_res_to_shader(res);
+ int ret;
+
+ ret = vmw_resource_init(dev_priv, res, true,
+ res_free, &vmw_gb_shader_func);
+
+
+ if (unlikely(ret != 0)) {
+ if (res_free)
+ res_free(res);
+ else
+ kfree(res);
+ return ret;
+ }
+
+ res->backup_size = size;
+ if (byte_code) {
+ res->backup = vmw_dmabuf_reference(byte_code);
+ res->backup_offset = offset;
+ }
+ shader->size = size;
+ shader->type = type;
+
+ vmw_resource_activate(res, vmw_hw_shader_destroy);
+ return 0;
+}
+
+static int vmw_gb_shader_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_shader *shader = vmw_res_to_shader(res);
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBShader body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a shader id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ cmd->body.type = shader->type;
+ cmd->body.sizeInBytes = shader->size;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv, false);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBShader body;
+ } *cmd;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.offsetInBytes = 0;
+ res->backup_dirty = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBShader body;
+ } *cmd;
+ struct vmw_fence_obj *fence;
+
+ BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ cmd->body.mobid = SVGA3D_INVALID_ID;
+ cmd->body.offsetInBytes = 0;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_gb_shader_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBShader body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for shader "
+ "destruction.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.shid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ mutex_unlock(&dev_priv->binding_mutex);
+ vmw_resource_release_id(res);
+ vmw_3d_resource_dec(dev_priv, false);
+
+ return 0;
+}
+
+/**
+ * User-space shader management:
+ */
+
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base)
+{
+ return &(container_of(base, struct vmw_user_shader, base)->
+ shader.res);
+}
+
+static void vmw_user_shader_free(struct vmw_resource *res)
+{
+ struct vmw_user_shader *ushader =
+ container_of(res, struct vmw_user_shader, shader.res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ ttm_base_object_kfree(ushader, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_resource *res = vmw_user_shader_base_to_res(base);
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->handle,
+ TTM_REF_USAGE);
+}
+
+static int vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type,
+ struct ttm_object_file *tfile,
+ u32 *handle)
+{
+ struct vmw_user_shader *ushader;
+ struct vmw_resource *res, *tmp;
+ int ret;
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of shaders anyway.
+ */
+ if (unlikely(vmw_user_shader_size == 0))
+ vmw_user_shader_size =
+ ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for shader "
+ "creation.\n");
+ goto out;
+ }
+
+ ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
+ if (unlikely(ushader == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ res = &ushader->shader.res;
+ ushader->base.shareable = false;
+ ushader->base.tfile = NULL;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
+ ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+ offset, shader_type, buffer,
+ vmw_user_shader_free);
+ if (unlikely(ret != 0))
+ goto out;
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_base_object_init(tfile, &ushader->base, false,
+ VMW_RES_SHADER,
+ &vmw_user_shader_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ if (handle)
+ *handle = ushader->base.hash.key;
+out_err:
+ vmw_resource_unreference(&res);
+out:
+ return ret;
+}
+
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_shader_create_arg *arg =
+ (struct drm_vmw_shader_create_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct vmw_dma_buffer *buffer = NULL;
+ SVGA3dShaderType shader_type;
+ int ret;
+
+ if (arg->buffer_handle != SVGA3D_INVALID_ID) {
+ ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+ &buffer);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find buffer for shader "
+ "creation.\n");
+ return ret;
+ }
+
+ if ((u64)buffer->base.num_pages * PAGE_SIZE <
+ (u64)arg->size + (u64)arg->offset) {
+ DRM_ERROR("Illegal buffer- or shader size.\n");
+ ret = -EINVAL;
+ goto out_bad_arg;
+ }
+ }
+
+ switch (arg->shader_type) {
+ case drm_vmw_shader_type_vs:
+ shader_type = SVGA3D_SHADERTYPE_VS;
+ break;
+ case drm_vmw_shader_type_ps:
+ shader_type = SVGA3D_SHADERTYPE_PS;
+ break;
+ case drm_vmw_shader_type_gs:
+ shader_type = SVGA3D_SHADERTYPE_GS;
+ break;
+ default:
+ DRM_ERROR("Illegal shader type.\n");
+ ret = -EINVAL;
+ goto out_bad_arg;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ goto out_bad_arg;
+
+ ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+ shader_type, tfile, &arg->shader_handle);
+
+ ttm_read_unlock(&vmaster->lock);
+out_bad_arg:
+ vmw_dmabuf_unreference(&buffer);
+ return ret;
+}
+
+/**
+ * vmw_compat_shader_lookup - Look up a compat shader
+ *
+ * @man: Pointer to the compat shader manager.
+ * @shader_type: The shader type, that combined with the user_key identifies
+ * the shader.
+ * @user_key: On entry, this should be a pointer to the user_key.
+ * On successful exit, it will contain the guest-backed shader's TTM handle.
+ *
+ * Returns 0 on success. Non-zero on failure, in which case the value pointed
+ * to by @user_key is unmodified.
+ */
+int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key)
+{
+ struct drm_hash_item *hash;
+ int ret;
+ unsigned long key = *user_key | (shader_type << 24);
+
+ ret = drm_ht_find_item(&man->shaders, key, &hash);
+ if (unlikely(ret != 0))
+ return ret;
+
+ *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
+ hash)->handle;
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_free - Free a compat shader.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @entry: Pointer to a struct vmw_compat_shader.
+ *
+ * Frees a struct vmw_compat_shder entry and drops its reference to the
+ * guest backed shader.
+ */
+static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
+ struct vmw_compat_shader *entry)
+{
+ list_del(&entry->head);
+ WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
+ WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE));
+ kfree(entry);
+}
+
+/**
+ * vmw_compat_shaders_commit - Commit a list of compat shader actions.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function commits a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ list_del(&entry->head);
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ entry->state = VMW_COMPAT_COMMITED;
+ list_add_tail(&entry->head, &man->list);
+ break;
+ case VMW_COMPAT_DEL:
+ ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE);
+ kfree(entry);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function reverts a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+ int ret;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_DEL:
+ ret = drm_ht_insert_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ list_add_tail(&entry->head, &man->list);
+ entry->state = VMW_COMPAT_COMMITED;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_compat_shader_remove - Stage a compat shader for removal.
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @shader_type: Shader type.
+ * @list: Caller's list of staged shader actions.
+ *
+ * This function stages a compat shader for removal and removes the key from
+ * the shader manager's hash table. If the shader was previously only staged
+ * for addition it is completely removed (But the execbuf code may keep a
+ * reference if it was bound to a context between addition and removal). If
+ * it was previously commited to the manager, it is staged for removal.
+ */
+int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry;
+ struct drm_hash_item *hash;
+ int ret;
+
+ ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
+ &hash);
+ if (likely(ret != 0))
+ return -EINVAL;
+
+ entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
+
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_COMMITED:
+ (void) drm_ht_remove_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ entry->state = VMW_COMPAT_DEL;
+ list_add_tail(&entry->head, list);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_add - Create a compat shader and add the
+ * key to the manager
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @bytecode: Pointer to the bytecode of the shader.
+ * @shader_type: Shader type.
+ * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
+ * to be created with.
+ * @list: Caller's list of staged shader actions.
+ *
+ * Note that only the key is added to the shader manager's hash table.
+ * The shader is not yet added to the shader manager's list of shaders.
+ */
+int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list)
+{
+ struct vmw_dma_buffer *buf;
+ struct ttm_bo_kmap_obj map;
+ bool is_iomem;
+ struct vmw_compat_shader *compat;
+ u32 handle;
+ int ret;
+
+ if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+ return -EINVAL;
+
+ /* Allocate and pin a DMA buffer */
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (unlikely(buf == NULL))
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+ true, vmw_dmabuf_bo_free);
+ if (unlikely(ret != 0))
+ goto out;
+
+ ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+
+ /* Map and copy shader bytecode. */
+ ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
+ &map);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unreserve(&buf->base);
+ goto no_reserve;
+ }
+
+ memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
+ WARN_ON(is_iomem);
+
+ ttm_bo_kunmap(&map);
+ ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+ WARN_ON(ret != 0);
+ ttm_bo_unreserve(&buf->base);
+
+ /* Create a guest-backed shader container backed by the dma buffer */
+ ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
+ tfile, &handle);
+ vmw_dmabuf_unreference(&buf);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+ /*
+ * Create a compat shader structure and stage it for insertion
+ * in the manager
+ */
+ compat = kzalloc(sizeof(*compat), GFP_KERNEL);
+ if (compat == NULL)
+ goto no_compat;
+
+ compat->hash.key = user_key | (shader_type << 24);
+ ret = drm_ht_insert_item(&man->shaders, &compat->hash);
+ if (unlikely(ret != 0))
+ goto out_invalid_key;
+
+ compat->state = VMW_COMPAT_ADD;
+ compat->handle = handle;
+ compat->tfile = tfile;
+ list_add_tail(&compat->head, list);
+
+ return 0;
+
+out_invalid_key:
+ kfree(compat);
+no_compat:
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+no_reserve:
+out:
+ return ret;
+}
+
+/**
+ * vmw_compat_shader_man_create - Create a compat shader manager
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Typically done at file open time. If successful returns a pointer to a
+ * compat shader manager. Otherwise returns an error pointer.
+ */
+struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv)
+{
+ struct vmw_compat_shader_manager *man;
+ int ret;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (man == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ man->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&man->list);
+ ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
+ if (ret == 0)
+ return man;
+
+ kfree(man);
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ *
+ * @man: Pointer to the shader manager to destroy.
+ *
+ * Typically done at file close time.
+ */
+void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ mutex_lock(&man->dev_priv->cmdbuf_mutex);
+ list_for_each_entry_safe(entry, next, &man->list, head)
+ vmw_compat_shader_free(man, entry);
+
+ mutex_unlock(&man->dev_priv->cmdbuf_mutex);
+ kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7de2ea8bd553..e7af580ab977 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -41,7 +41,6 @@ struct vmw_user_surface {
struct ttm_prime_object prime;
struct vmw_surface srf;
uint32_t size;
- uint32_t backup_handle;
};
/**
@@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_legacy_srf_create(struct vmw_resource *res);
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+static int vmw_gb_surface_create(struct vmw_resource *res);
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_destroy(struct vmw_resource *res);
+
static const struct vmw_user_resource_conv user_surface_conv = {
.object_type = VMW_RES_SURFACE,
@@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
.unbind = &vmw_legacy_srf_unbind
};
+static const struct vmw_res_func vmw_gb_surface_func = {
+ .res_type = vmw_res_surface,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "guest backed surfaces",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_gb_surface_create,
+ .destroy = vmw_gb_surface_destroy,
+ .bind = vmw_gb_surface_bind,
+ .unbind = vmw_gb_surface_unbind
+};
+
/**
* struct vmw_surface_dma - SVGA3D DMA command
*/
@@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
struct vmw_surface *srf;
void *cmd;
+ if (res->func->destroy == vmw_gb_surface_destroy) {
+ (void) vmw_gb_surface_destroy(res);
+ return;
+ }
+
if (res->id != -1) {
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
@@ -549,12 +573,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_resource *res = &srf->res;
BUG_ON(res_free == NULL);
- (void) vmw_3d_resource_inc(dev_priv, false);
+ if (!dev_priv->has_mob)
+ (void) vmw_3d_resource_inc(dev_priv, false);
ret = vmw_resource_init(dev_priv, res, true, res_free,
+ (dev_priv->has_mob) ? &vmw_gb_surface_func :
&vmw_legacy_surface_func);
if (unlikely(ret != 0)) {
- vmw_3d_resource_dec(dev_priv, false);
+ if (!dev_priv->has_mob)
+ vmw_3d_resource_dec(dev_priv, false);
res_free(res);
return ret;
}
@@ -750,7 +777,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
- srf->multisample_count = 1;
+ srf->multisample_count = 0;
cur_bo_offset = 0;
cur_offset = srf->offsets;
@@ -803,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_unlock;
+ /*
+ * A gb-aware client referencing a shared surface will
+ * expect a backup buffer to be present.
+ */
+ if (dev_priv->has_mob && req->shareable) {
+ uint32_t backup_handle;
+
+ ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+ res->backup_size,
+ true,
+ &backup_handle,
+ &res->backup);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
@@ -843,6 +888,7 @@ out_unlock:
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_surface_reference_arg *arg =
(union drm_vmw_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
@@ -854,7 +900,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct ttm_base_object *base;
int ret = -EINVAL;
- base = ttm_base_object_lookup(tfile, req->sid);
+ base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
if (unlikely(base == NULL)) {
DRM_ERROR("Could not find surface to reference.\n");
return -EINVAL;
@@ -880,8 +926,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
rep->size_addr;
if (user_sizes)
- ret = copy_to_user(user_sizes, srf->sizes,
- srf->num_sizes * sizeof(*srf->sizes));
+ ret = copy_to_user(user_sizes, &srf->base_size,
+ sizeof(srf->base_size));
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
@@ -893,3 +939,436 @@ out_no_reference:
return ret;
}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static int vmw_gb_surface_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ uint32_t cmd_len, submit_len;
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBSurface body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a surface id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd_len = sizeof(cmd->body);
+ submit_len = sizeof(*cmd);
+ cmd = vmw_fifo_reserve(dev_priv, submit_len);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+ cmd->header.size = cmd_len;
+ cmd->body.sid = srf->res.id;
+ cmd->body.surfaceFlags = srf->flags;
+ cmd->body.format = cpu_to_le32(srf->format);
+ cmd->body.numMipLevels = srf->mip_levels[0];
+ cmd->body.multisampleCount = srf->multisample_count;
+ cmd->body.autogenFilter = srf->autogen_filter;
+ cmd->body.size.width = srf->base_size.width;
+ cmd->body.size.height = srf->base_size.height;
+ cmd->body.size.depth = srf->base_size.depth;
+ vmw_fifo_commit(dev_priv, submit_len);
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ vmw_3d_resource_dec(dev_priv, false);
+ return ret;
+}
+
+
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBSurface body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBSurface body;
+ } *cmd2;
+ uint32_t submit_size;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
+
+ cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd1 == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.sid = res->id;
+ cmd1->body.mobid = bo->mem.start;
+ if (res->backup_dirty) {
+ cmd2 = (void *) &cmd1[1];
+ cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.sid = res->id;
+ res->backup_dirty = false;
+ }
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ return 0;
+}
+
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBSurface body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdInvalidateGBSurface body;
+ } *cmd2;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBindGBSurface body;
+ } *cmd3;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "unbinding.\n");
+ return -ENOMEM;
+ }
+
+ if (readback) {
+ cmd1 = (void *) cmd;
+ cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.sid = res->id;
+ cmd3 = (void *) &cmd1[1];
+ } else {
+ cmd2 = (void *) cmd;
+ cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.sid = res->id;
+ cmd3 = (void *) &cmd2[1];
+ }
+
+ cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+ cmd3->header.size = sizeof(cmd3->body);
+ cmd3->body.sid = res->id;
+ cmd3->body.mobid = SVGA3D_INVALID_ID;
+
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static int vmw_gb_surface_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyGBSurface body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ mutex_unlock(&dev_priv->binding_mutex);
+ vmw_resource_release_id(res);
+ vmw_3d_resource_dec(dev_priv, false);
+
+ return 0;
+}
+
+/**
+ * vmw_gb_surface_define_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ union drm_vmw_gb_surface_create_arg *arg =
+ (union drm_vmw_gb_surface_create_arg *)data;
+ struct drm_vmw_gb_surface_create_req *req = &arg->req;
+ struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+ uint32_t size;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ const struct svga3d_surface_desc *desc;
+ uint32_t backup_handle;
+
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ size = vmw_user_surface_size + 128;
+
+ desc = svga3dsurface_get_desc(req->format);
+ if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+ DRM_ERROR("Invalid surface format for surface creation.\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ size, false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(user_srf == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
+
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ srf->flags = req->svga3d_flags;
+ srf->format = req->format;
+ srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
+ srf->mip_levels[0] = req->mip_levels;
+ srf->num_sizes = 1;
+ srf->sizes = NULL;
+ srf->offsets = NULL;
+ user_srf->size = size;
+ srf->base_size = req->base_size;
+ srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ srf->multisample_count = req->multisample_count;
+ res->backup_size = svga3dsurface_get_serialized_size
+ (srf->format, srf->base_size, srf->mip_levels[0],
+ srf->flags & SVGA3D_SURFACE_CUBEMAP);
+
+ user_srf->prime.base.shareable = false;
+ user_srf->prime.base.tfile = NULL;
+
+ /**
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ if (req->buffer_handle != SVGA3D_INVALID_ID) {
+ ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
+ &res->backup);
+ } else if (req->drm_surface_flags &
+ drm_vmw_surface_flag_create_buffer)
+ ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+ res->backup_size,
+ req->drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ &backup_handle,
+ &res->backup);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ tmp = vmw_resource_reference(&srf->res);
+ ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ req->drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ rep->handle = user_srf->prime.base.hash.key;
+ rep->backup_size = res->backup_size;
+ if (res->backup) {
+ rep->buffer_map_handle =
+ drm_vma_node_offset_addr(&res->backup->base.vma_node);
+ rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+ rep->buffer_handle = backup_handle;
+ } else {
+ rep->buffer_map_handle = 0;
+ rep->buffer_size = 0;
+ rep->buffer_handle = SVGA3D_INVALID_ID;
+ }
+
+ vmw_resource_unreference(&res);
+
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
+
+/**
+ * vmw_gb_surface_reference_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_gb_surface_reference_arg *arg =
+ (union drm_vmw_gb_surface_reference_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct ttm_base_object *base;
+ uint32_t backup_handle;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, prime.base);
+ srf = &user_srf->srf;
+ if (srf->res.backup == NULL) {
+ DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
+ goto out_bad_resource;
+ }
+
+ ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
+ TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a GB surface.\n");
+ goto out_bad_resource;
+ }
+
+ mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
+ ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
+ &backup_handle);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a GB surface "
+ "backup buffer.\n");
+ (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ req->sid,
+ TTM_REF_USAGE);
+ goto out_bad_resource;
+ }
+
+ rep->creq.svga3d_flags = srf->flags;
+ rep->creq.format = srf->format;
+ rep->creq.mip_levels = srf->mip_levels[0];
+ rep->creq.drm_surface_flags = 0;
+ rep->creq.multisample_count = srf->multisample_count;
+ rep->creq.autogen_filter = srf->autogen_filter;
+ rep->creq.buffer_handle = backup_handle;
+ rep->creq.base_size = srf->base_size;
+ rep->crep.handle = user_srf->prime.base.hash.key;
+ rep->crep.backup_size = srf->res.backup_size;
+ rep->crep.buffer_handle = backup_handle;
+ rep->crep.buffer_map_handle =
+ drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+ rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+
+out_bad_resource:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index 7d6bed222542..b2fd029d67b3 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -1,6 +1,6 @@
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
- depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
+ depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
help
Driver for the NVIDIA Tegra host1x hardware.
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index afa1e9e4e512..c1189f004441 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -7,7 +7,9 @@ host1x-y = \
channel.o \
job.o \
debug.o \
+ mipi.o \
hw/host1x01.o \
- hw/host1x02.o
+ hw/host1x02.o \
+ hw/host1x04.o
obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 6a929591aa73..ccdd2e6da5e3 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -188,6 +188,7 @@ int host1x_device_init(struct host1x_device *device)
return 0;
}
+EXPORT_SYMBOL(host1x_device_init);
int host1x_device_exit(struct host1x_device *device)
{
@@ -213,6 +214,7 @@ int host1x_device_exit(struct host1x_device *device)
return 0;
}
+EXPORT_SYMBOL(host1x_device_exit);
static int host1x_register_client(struct host1x *host1x,
struct host1x_client *client)
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index 83ea51b9f0fc..b4ae3affb987 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -43,6 +43,7 @@ int host1x_job_submit(struct host1x_job *job)
return host1x_hw_channel_submit(host, job);
}
+EXPORT_SYMBOL(host1x_job_submit);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
{
@@ -60,6 +61,7 @@ struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
return err ? NULL : channel;
}
+EXPORT_SYMBOL(host1x_channel_get);
void host1x_channel_put(struct host1x_channel *channel)
{
@@ -76,6 +78,7 @@ void host1x_channel_put(struct host1x_channel *channel)
mutex_unlock(&channel->reflock);
}
+EXPORT_SYMBOL(host1x_channel_put);
struct host1x_channel *host1x_channel_request(struct device *dev)
{
@@ -115,6 +118,7 @@ fail:
mutex_unlock(&host->chlist_mutex);
return NULL;
}
+EXPORT_SYMBOL(host1x_channel_request);
void host1x_channel_free(struct host1x_channel *channel)
{
@@ -124,3 +128,4 @@ void host1x_channel_free(struct host1x_channel *channel)
list_del(&channel->list);
kfree(channel);
}
+EXPORT_SYMBOL(host1x_channel_free);
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index 3ec7d77de24d..ee3d12b51c50 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -96,7 +96,6 @@ static void show_all(struct host1x *m, struct output *o)
show_channels(ch, o, true);
}
-#ifdef CONFIG_DEBUG_FS
static void show_all_no_fifo(struct host1x *host1x, struct output *o)
{
struct host1x_channel *ch;
@@ -153,7 +152,7 @@ static const struct file_operations host1x_debug_fops = {
.release = single_release,
};
-void host1x_debug_init(struct host1x *host1x)
+static void host1x_debugfs_init(struct host1x *host1x)
{
struct dentry *de = debugfs_create_dir("tegra-host1x", NULL);
@@ -180,18 +179,22 @@ void host1x_debug_init(struct host1x *host1x)
&host1x_debug_force_timeout_channel);
}
-void host1x_debug_deinit(struct host1x *host1x)
+static void host1x_debugfs_exit(struct host1x *host1x)
{
debugfs_remove_recursive(host1x->debugfs);
}
-#else
+
void host1x_debug_init(struct host1x *host1x)
{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ host1x_debugfs_init(host1x);
}
+
void host1x_debug_deinit(struct host1x *host1x)
{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ host1x_debugfs_exit(host1x);
}
-#endif
void host1x_debug_dump(struct host1x *host1x)
{
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 80da003d63de..2529908d304b 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -34,6 +34,7 @@
#include "debug.h"
#include "hw/host1x01.h"
#include "hw/host1x02.h"
+#include "hw/host1x04.h"
void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
{
@@ -77,7 +78,17 @@ static const struct host1x_info host1x02_info = {
.sync_offset = 0x3000,
};
+static const struct host1x_info host1x04_info = {
+ .nb_channels = 12,
+ .nb_pts = 192,
+ .nb_mlocks = 16,
+ .nb_bases = 64,
+ .init = host1x04_init,
+ .sync_offset = 0x2100,
+};
+
static struct of_device_id host1x_of_match[] = {
+ { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
{ .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
@@ -210,17 +221,26 @@ static int __init tegra_host1x_init(void)
return err;
err = platform_driver_register(&tegra_host1x_driver);
- if (err < 0) {
- host1x_bus_exit();
- return err;
- }
+ if (err < 0)
+ goto unregister_bus;
+
+ err = platform_driver_register(&tegra_mipi_driver);
+ if (err < 0)
+ goto unregister_host1x;
return 0;
+
+unregister_host1x:
+ platform_driver_unregister(&tegra_host1x_driver);
+unregister_bus:
+ host1x_bus_exit();
+ return err;
}
module_init(tegra_host1x_init);
static void __exit tegra_host1x_exit(void)
{
+ platform_driver_unregister(&tegra_mipi_driver);
platform_driver_unregister(&tegra_host1x_driver);
host1x_bus_exit();
}
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index a61a976e7a42..0b6e8e9629c5 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -306,4 +306,6 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
host->debug_op->show_mlocks(host, o);
}
+extern struct platform_driver tegra_mipi_driver;
+
#endif
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
index e98caca0ca42..928946c2144b 100644
--- a/drivers/gpu/host1x/hw/host1x02.c
+++ b/drivers/gpu/host1x/hw/host1x02.c
@@ -17,8 +17,8 @@
*/
/* include hw specification */
-#include "host1x01.h"
-#include "host1x01_hardware.h"
+#include "host1x02.h"
+#include "host1x02_hardware.h"
/* include code */
#include "cdma_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x02_hardware.h b/drivers/gpu/host1x/hw/host1x02_hardware.h
new file mode 100644
index 000000000000..154901860bc6
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra114
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X02_HARDWARE_H
+#define __HOST1X_HOST1X02_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x02_channel.h"
+#include "hw_host1x02_sync.h"
+#include "hw_host1x02_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_wait_syncpt_indx_f(indx)
+ | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+ | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_wait_syncpt_base_indx_f(indx)
+ | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+ unsigned cond, unsigned indx)
+{
+ return host1x_uclass_incr_syncpt_cond_f(cond)
+ | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indbe_f(0xf)
+ | host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset);
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset)
+ | host1x_uclass_indoff_rwn_read_v();
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+ return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+ host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+ return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x04.c b/drivers/gpu/host1x/hw/host1x04.c
new file mode 100644
index 000000000000..8007c70fa9c4
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x04.c
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for Tegra124 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x04.h"
+#include "host1x04_hardware.h"
+
+/* include code */
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x04_init(struct host1x *host)
+{
+ host->channel_op = &host1x_channel_ops;
+ host->cdma_op = &host1x_cdma_ops;
+ host->cdma_pb_op = &host1x_pushbuffer_ops;
+ host->syncpt_op = &host1x_syncpt_ops;
+ host->intr_op = &host1x_intr_ops;
+ host->debug_op = &host1x_debug_ops;
+
+ return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x04.h b/drivers/gpu/host1x/hw/host1x04.h
new file mode 100644
index 000000000000..a9ab7496c06e
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x04.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra124 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X04_H
+#define HOST1X_HOST1X04_H
+
+struct host1x;
+
+int host1x04_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x04_hardware.h b/drivers/gpu/host1x/hw/host1x04_hardware.h
new file mode 100644
index 000000000000..de1a38175328
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x04_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra124
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X04_HARDWARE_H
+#define __HOST1X_HOST1X04_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x04_channel.h"
+#include "hw_host1x04_sync.h"
+#include "hw_host1x04_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_wait_syncpt_indx_f(indx)
+ | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+ | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_wait_syncpt_base_indx_f(indx)
+ | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+ unsigned cond, unsigned indx)
+{
+ return host1x_uclass_incr_syncpt_cond_f(cond)
+ | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indbe_f(0xf)
+ | host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset);
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset)
+ | host1x_uclass_indoff_rwn_read_v();
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+ return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+ host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+ return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
index a3b3c9874413..028e49d9bac9 100644
--- a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
@@ -111,6 +111,12 @@ static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
}
#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+ return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+ host1x_uclass_load_syncpt_base_r()
static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
{
return (v & 0xff) << 24;
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_channel.h b/drivers/gpu/host1x/hw/hw_host1x04_channel.h
new file mode 100644
index 000000000000..95e6f96142b9
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x04_channel.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X04_CHANNEL_H
+#define HOST1X_HW_HOST1X04_CHANNEL_H
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+ host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+ return (r >> 11) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+ host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+ return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+ host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+ return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+ host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+ return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+ host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+ return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+ host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+ return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+ host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+ return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+ host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+ host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+ return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+ host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+ return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+ host1x_channel_dmactrl_dmainitget()
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_sync.h b/drivers/gpu/host1x/hw/hw_host1x04_sync.h
new file mode 100644
index 000000000000..ef2275b5407a
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x04_sync.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X04_SYNC_H
+#define HOST1X_HW_HOST1X04_SYNC_H
+
+#define REGISTER_STRIDE 4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+ return 0xf80 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+ host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+ return 0xe80 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+ host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+ return 0xf00 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+ host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+ return 0xf20 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+ host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+ return 0xc00 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+ host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+ return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+ host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+ host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+ return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+ host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+ return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+ host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+ return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+ host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+ return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+ host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+ return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+ host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+ return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+ host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+ return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+ host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+ return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+ host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+ return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+ host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+ return 0x1380 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+ host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+ return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+ host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+ return 0xf60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+ host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+ return 0xc80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+ host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+ return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+ host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+ return (v & 0x3ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+ host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+ return (v & 0xf) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+ host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+ return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+ host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+ return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+ host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+ return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+ host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+ return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+ host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+ host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+ return 0xcc0 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+ host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+ return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+ host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+ return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+ host1x_sync_cbstat_cbclass_v(r)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_uclass.h b/drivers/gpu/host1x/hw/hw_host1x04_uclass.h
new file mode 100644
index 000000000000..d1460e971493
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x04_uclass.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X04_UCLASS_H
+#define HOST1X_HW_HOST1X04_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+ host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+ host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+ host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+ host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+ host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+ return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+ host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+ return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+ host1x_uclass_load_syncpt_base_r()
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+ host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+ return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+ host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+ return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+ host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+ return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+ host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+ return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+ host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+ return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+ return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index b26dcc83bc1b..db9017adfe2b 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -20,7 +20,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <asm/mach/irq.h>
#include "../intr.h"
#include "../dev.h"
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index de5ec333ce1a..112f27e51bc7 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -75,12 +75,14 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
return job;
}
+EXPORT_SYMBOL(host1x_job_alloc);
struct host1x_job *host1x_job_get(struct host1x_job *job)
{
kref_get(&job->ref);
return job;
}
+EXPORT_SYMBOL(host1x_job_get);
static void job_free(struct kref *ref)
{
@@ -93,6 +95,7 @@ void host1x_job_put(struct host1x_job *job)
{
kref_put(&job->ref, job_free);
}
+EXPORT_SYMBOL(host1x_job_put);
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
u32 words, u32 offset)
@@ -104,6 +107,7 @@ void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
cur_gather->offset = offset;
job->num_gathers++;
}
+EXPORT_SYMBOL(host1x_job_add_gather);
/*
* NULL an already satisfied WAIT_SYNCPT host method, by patching its
@@ -534,7 +538,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
g->base = job->gather_addr_phys[i];
- for (j = 0; j < job->num_gathers; j++)
+ for (j = i + 1; j < job->num_gathers; j++)
if (job->gathers[j].bo == g->bo)
job->gathers[j].handled = true;
@@ -560,6 +564,7 @@ out:
return err;
}
+EXPORT_SYMBOL(host1x_job_pin);
void host1x_job_unpin(struct host1x_job *job)
{
@@ -577,6 +582,7 @@ void host1x_job_unpin(struct host1x_job *job)
job->gather_copy_mapped,
job->gather_copy);
}
+EXPORT_SYMBOL(host1x_job_unpin);
/*
* Debug routine used to dump job entries
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
new file mode 100644
index 000000000000..9882ea122024
--- /dev/null
+++ b/drivers/gpu/host1x/mipi.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dev.h"
+
+#define MIPI_CAL_CTRL 0x00
+#define MIPI_CAL_CTRL_START (1 << 0)
+
+#define MIPI_CAL_AUTOCAL_CTRL 0x01
+
+#define MIPI_CAL_STATUS 0x02
+#define MIPI_CAL_STATUS_DONE (1 << 16)
+#define MIPI_CAL_STATUS_ACTIVE (1 << 0)
+
+#define MIPI_CAL_CONFIG_CSIA 0x05
+#define MIPI_CAL_CONFIG_CSIB 0x06
+#define MIPI_CAL_CONFIG_CSIC 0x07
+#define MIPI_CAL_CONFIG_CSID 0x08
+#define MIPI_CAL_CONFIG_CSIE 0x09
+#define MIPI_CAL_CONFIG_DSIA 0x0e
+#define MIPI_CAL_CONFIG_DSIB 0x0f
+#define MIPI_CAL_CONFIG_DSIC 0x10
+#define MIPI_CAL_CONFIG_DSID 0x11
+
+#define MIPI_CAL_CONFIG_SELECT (1 << 21)
+#define MIPI_CAL_CONFIG_HSPDOS(x) (((x) & 0x1f) << 16)
+#define MIPI_CAL_CONFIG_HSPUOS(x) (((x) & 0x1f) << 8)
+#define MIPI_CAL_CONFIG_TERMOS(x) (((x) & 0x1f) << 0)
+
+#define MIPI_CAL_BIAS_PAD_CFG0 0x16
+#define MIPI_CAL_BIAS_PAD_PDVCLAMP (1 << 1)
+#define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF (1 << 0)
+
+#define MIPI_CAL_BIAS_PAD_CFG1 0x17
+
+#define MIPI_CAL_BIAS_PAD_CFG2 0x18
+#define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1)
+
+static const struct module {
+ unsigned long reg;
+} modules[] = {
+ { .reg = MIPI_CAL_CONFIG_CSIA },
+ { .reg = MIPI_CAL_CONFIG_CSIB },
+ { .reg = MIPI_CAL_CONFIG_CSIC },
+ { .reg = MIPI_CAL_CONFIG_CSID },
+ { .reg = MIPI_CAL_CONFIG_CSIE },
+ { .reg = MIPI_CAL_CONFIG_DSIA },
+ { .reg = MIPI_CAL_CONFIG_DSIB },
+ { .reg = MIPI_CAL_CONFIG_DSIC },
+ { .reg = MIPI_CAL_CONFIG_DSID },
+};
+
+struct tegra_mipi {
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *clk;
+};
+
+struct tegra_mipi_device {
+ struct platform_device *pdev;
+ struct tegra_mipi *mipi;
+ struct device *device;
+ unsigned long pads;
+};
+
+static inline unsigned long tegra_mipi_readl(struct tegra_mipi *mipi,
+ unsigned long reg)
+{
+ return readl(mipi->regs + (reg << 2));
+}
+
+static inline void tegra_mipi_writel(struct tegra_mipi *mipi,
+ unsigned long value, unsigned long reg)
+{
+ writel(value, mipi->regs + (reg << 2));
+}
+
+struct tegra_mipi_device *tegra_mipi_request(struct device *device)
+{
+ struct device_node *np = device->of_node;
+ struct tegra_mipi_device *dev;
+ struct of_phandle_args args;
+ int err;
+
+ err = of_parse_phandle_with_args(np, "nvidia,mipi-calibrate",
+ "#nvidia,mipi-calibrate-cells", 0,
+ &args);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ of_node_put(args.np);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dev->pdev = of_find_device_by_node(args.np);
+ if (!dev->pdev) {
+ of_node_put(args.np);
+ err = -ENODEV;
+ goto free;
+ }
+
+ of_node_put(args.np);
+
+ dev->mipi = platform_get_drvdata(dev->pdev);
+ if (!dev->mipi) {
+ err = -EPROBE_DEFER;
+ goto pdev_put;
+ }
+
+ dev->pads = args.args[0];
+ dev->device = device;
+
+ return dev;
+
+pdev_put:
+ platform_device_put(dev->pdev);
+free:
+ kfree(dev);
+out:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(tegra_mipi_request);
+
+void tegra_mipi_free(struct tegra_mipi_device *device)
+{
+ platform_device_put(device->pdev);
+ kfree(device);
+}
+EXPORT_SYMBOL(tegra_mipi_free);
+
+static int tegra_mipi_wait(struct tegra_mipi *mipi)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(250);
+ unsigned long value;
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS);
+ if ((value & MIPI_CAL_STATUS_ACTIVE) == 0 &&
+ (value & MIPI_CAL_STATUS_DONE) != 0)
+ return 0;
+
+ usleep_range(10, 50);
+ }
+
+ return -ETIMEDOUT;
+}
+
+int tegra_mipi_calibrate(struct tegra_mipi_device *device)
+{
+ unsigned long value;
+ unsigned int i;
+ int err;
+
+ err = clk_enable(device->mipi->clk);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&device->mipi->lock);
+
+ value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG0);
+ value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
+ value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+ value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
+ value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+ for (i = 0; i < ARRAY_SIZE(modules); i++) {
+ if (device->pads & BIT(i))
+ value = MIPI_CAL_CONFIG_SELECT |
+ MIPI_CAL_CONFIG_HSPDOS(0) |
+ MIPI_CAL_CONFIG_HSPUOS(4) |
+ MIPI_CAL_CONFIG_TERMOS(5);
+ else
+ value = 0;
+
+ tegra_mipi_writel(device->mipi, value, modules[i].reg);
+ }
+
+ tegra_mipi_writel(device->mipi, MIPI_CAL_CTRL_START, MIPI_CAL_CTRL);
+
+ err = tegra_mipi_wait(device->mipi);
+
+ mutex_unlock(&device->mipi->lock);
+ clk_disable(device->mipi->clk);
+
+ return err;
+}
+EXPORT_SYMBOL(tegra_mipi_calibrate);
+
+static int tegra_mipi_probe(struct platform_device *pdev)
+{
+ struct tegra_mipi *mipi;
+ struct resource *res;
+ int err;
+
+ mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mipi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mipi->regs))
+ return PTR_ERR(mipi->regs);
+
+ mutex_init(&mipi->lock);
+
+ mipi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mipi->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(mipi->clk);
+ }
+
+ err = clk_prepare(mipi->clk);
+ if (err < 0)
+ return err;
+
+ platform_set_drvdata(pdev, mipi);
+
+ return 0;
+}
+
+static int tegra_mipi_remove(struct platform_device *pdev)
+{
+ struct tegra_mipi *mipi = platform_get_drvdata(pdev);
+
+ clk_unprepare(mipi->clk);
+
+ return 0;
+}
+
+static struct of_device_id tegra_mipi_of_match[] = {
+ { .compatible = "nvidia,tegra114-mipi", },
+ { },
+};
+
+struct platform_driver tegra_mipi_driver = {
+ .driver = {
+ .name = "tegra-mipi",
+ .of_match_table = tegra_mipi_of_match,
+ },
+ .probe = tegra_mipi_probe,
+ .remove = tegra_mipi_remove,
+};
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 159c479829c9..bfb09d802abd 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -93,6 +93,7 @@ u32 host1x_syncpt_id(struct host1x_syncpt *sp)
{
return sp->id;
}
+EXPORT_SYMBOL(host1x_syncpt_id);
/*
* Updates the value sent to hardware.
@@ -168,6 +169,7 @@ int host1x_syncpt_incr(struct host1x_syncpt *sp)
{
return host1x_hw_syncpt_cpu_incr(sp->host, sp);
}
+EXPORT_SYMBOL(host1x_syncpt_incr);
/*
* Updated sync point form hardware, and returns true if syncpoint is expired,
@@ -377,6 +379,7 @@ struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
struct host1x *host = dev_get_drvdata(dev->parent);
return host1x_syncpt_alloc(host, dev, flags);
}
+EXPORT_SYMBOL(host1x_syncpt_request);
void host1x_syncpt_free(struct host1x_syncpt *sp)
{
@@ -390,6 +393,7 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
sp->name = NULL;
sp->client_managed = false;
}
+EXPORT_SYMBOL(host1x_syncpt_free);
void host1x_syncpt_deinit(struct host1x *host)
{
@@ -408,6 +412,7 @@ u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
smp_rmb();
return (u32)atomic_read(&sp->max_val);
}
+EXPORT_SYMBOL(host1x_syncpt_read_max);
/*
* Read min, which is a shadow of the current sync point value in hardware.
@@ -417,6 +422,7 @@ u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
smp_rmb();
return (u32)atomic_read(&sp->min_val);
}
+EXPORT_SYMBOL(host1x_syncpt_read_min);
int host1x_syncpt_nb_pts(struct host1x *host)
{
@@ -439,13 +445,16 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
return NULL;
return host->syncpt + id;
}
+EXPORT_SYMBOL(host1x_syncpt_get);
struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
{
return sp ? sp->base : NULL;
}
+EXPORT_SYMBOL(host1x_syncpt_get_base);
u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
{
return base->id;
}
+EXPORT_SYMBOL(host1x_syncpt_base_id);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index dbe548b22b2a..9e8064205bc7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2163,6 +2163,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
@@ -2319,6 +2320,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
{ }
};
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 548c1a519593..bd221263c739 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -247,6 +247,8 @@
#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9
#define USB_DEVICE_ID_CYGNAL_CP2112 0xea90
+#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
+
#define USB_VENDOR_ID_CYPRESS 0x04b4
#define USB_DEVICE_ID_CYPRESS_MOUSE 0x0001
#define USB_DEVICE_ID_CYPRESS_HIDCOM 0x5500
@@ -964,4 +966,7 @@
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */
+#define USB_DEVICE_ID_RI_KA_WEBMAIL 0x1320 /* Webmail Notifier */
+
#endif
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 7ed828056414..91fab975063c 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -624,7 +624,8 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
/* Setup sound card */
- err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
+ err = snd_card_new(&pm->pk->hdev->dev, index[dev], id[dev],
+ THIS_MODULE, 0, &card);
if (err < 0) {
pk_error("failed to create pc-midi sound card\n");
err = -ENOMEM;
@@ -660,8 +661,6 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT,
&pcmidi_in_ops);
- snd_card_set_dev(card, &pm->pk->hdev->dev);
-
/* create sysfs variables */
err = device_create_file(&pm->pk->hdev->dev,
sysfs_device_attr_channel);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 8c52a07d5652..b50860db92f1 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -865,37 +865,23 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
};
- union acpi_object params[4];
- struct acpi_object_list input;
+ union acpi_object *obj;
struct acpi_device *adev;
- unsigned long long value;
acpi_handle handle;
handle = ACPI_HANDLE(&client->dev);
if (!handle || acpi_bus_get_device(handle, &adev))
return -ENODEV;
- input.count = ARRAY_SIZE(params);
- input.pointer = params;
-
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(i2c_hid_guid);
- params[0].buffer.pointer = i2c_hid_guid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = 1;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = 1; /* HID function */
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
-
- if (ACPI_FAILURE(acpi_evaluate_integer(handle, "_DSM", &input,
- &value))) {
+ obj = acpi_evaluate_dsm_typed(handle, i2c_hid_guid, 1, 1, NULL,
+ ACPI_TYPE_INTEGER);
+ if (!obj) {
dev_err(&client->dev, "device _DSM execution failed\n");
return -ENODEV;
}
- pdata->hid_descriptor_address = value;
+ pdata->hid_descriptor_address = obj->integer.value;
+ ACPI_FREE(obj);
return 0;
}
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index 0a74b5661186..5e4dfa4cfe22 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
channel_mgmt.o ring_buffer.o
-hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o
+hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index cea623c36ae2..602ca86a6488 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hyperv.h>
+#include <linux/uio.h>
#include "hyperv_vmbus.h"
@@ -209,7 +210,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
{
int i;
int pagecount;
- unsigned long long pfn;
struct vmbus_channel_gpadl_header *gpadl_header;
struct vmbus_channel_gpadl_body *gpadl_body;
struct vmbus_channel_msginfo *msgheader;
@@ -219,7 +219,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
pagecount = size >> PAGE_SHIFT;
- pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
/* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
@@ -248,7 +247,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = pfn+i;
+ gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
+ kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
*msginfo = msgheader;
*messagecount = 1;
@@ -301,7 +301,9 @@ static int create_gpadl_header(void *kbuffer, u32 size,
* so the hypervisor gurantees that this is ok.
*/
for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = pfn + pfnsum + i;
+ gpadl_body->pfn[i] = slow_virt_to_phys(
+ kbuffer + PAGE_SIZE * (pfnsum + i)) >>
+ PAGE_SHIFT;
/* add to msg header */
list_add_tail(&msgbody->msglistentry,
@@ -327,7 +329,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = pfn+i;
+ gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
+ kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
*msginfo = msgheader;
*messagecount = 1;
@@ -344,7 +347,7 @@ nomem:
* vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
*
* @channel: a channel
- * @kbuffer: from kmalloc
+ * @kbuffer: from kmalloc or vmalloc
* @size: page-size multiple
* @gpadl_handle: some funky thing
*/
@@ -552,14 +555,14 @@ EXPORT_SYMBOL_GPL(vmbus_close);
*
* Mainly used by Hyper-V drivers.
*/
-int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
+int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u64 requestid,
enum vmbus_packet_type type, u32 flags)
{
struct vmpacket_descriptor desc;
u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
- struct scatterlist bufferlist[3];
+ struct kvec bufferlist[3];
u64 aligned_data = 0;
int ret;
bool signal = false;
@@ -573,11 +576,12 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
desc.len8 = (u16)(packetlen_aligned >> 3);
desc.trans_id = requestid;
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, sizeof(struct vmpacket_descriptor));
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
+ bufferlist[0].iov_base = &desc;
+ bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
@@ -603,7 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
- struct scatterlist bufferlist[3];
+ struct kvec bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
@@ -635,11 +639,12 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
desc.range[i].pfn = pagebuffers[i].pfn;
}
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, descsize);
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
+ bufferlist[0].iov_base = &desc;
+ bufferlist[0].iov_len = descsize;
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
@@ -663,7 +668,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
- struct scatterlist bufferlist[3];
+ struct kvec bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
@@ -698,11 +703,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
pfncount * sizeof(u64));
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, descsize);
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
+ bufferlist[0].iov_base = &desc;
+ bufferlist[0].iov_len = descsize;
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index af6edf9b1936..f2d7bf90c9fe 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -67,7 +67,6 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
int ret = 0;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
- int t;
init_completion(&msginfo->waitevent);
@@ -78,6 +77,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+ if (version == VERSION_WIN8)
+ msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
/*
* Add to list before we send the request since we may
@@ -100,15 +101,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
}
/* Wait for the connection response */
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- if (t == 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
- flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- return -ETIMEDOUT;
- }
+ wait_for_completion(&msginfo->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 7e17a5495e02..7e6d78dc9437 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1171,7 +1171,8 @@ static int dm_thread_func(void *dm_dev)
int t;
while (!kthread_should_stop()) {
- t = wait_for_completion_timeout(&dm_device.config_event, 1*HZ);
+ t = wait_for_completion_interruptible_timeout(
+ &dm_device.config_event, 1*HZ);
/*
* The host expects us to post information on the memory
* pressure every second.
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
new file mode 100644
index 000000000000..eaaa3d843b80
--- /dev/null
+++ b/drivers/hv/hv_fcopy.c
@@ -0,0 +1,414 @@
+/*
+ * An implementation of file copy service.
+ *
+ * Copyright (C) 2014, Microsoft, Inc.
+ *
+ * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/nls.h>
+#include <linux/workqueue.h>
+#include <linux/cdev.h>
+#include <linux/hyperv.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include "hyperv_vmbus.h"
+
+#define WIN8_SRV_MAJOR 1
+#define WIN8_SRV_MINOR 1
+#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+
+/*
+ * Global state maintained for transaction that is being processed.
+ * For a class of integration services, including the "file copy service",
+ * the specified protocol is a "request/response" protocol which means that
+ * there can only be single outstanding transaction from the host at any
+ * given point in time. We use this to simplify memory management in this
+ * driver - we cache and process only one message at a time.
+ *
+ * While the request/response protocol is guaranteed by the host, we further
+ * ensure this by serializing packet processing in this driver - we do not
+ * read additional packets from the VMBUs until the current packet is fully
+ * handled.
+ *
+ * The transaction "active" state is set when we receive a request from the
+ * host and we cleanup this state when the transaction is completed - when we
+ * respond to the host with our response. When the transaction active state is
+ * set, we defer handling incoming packets.
+ */
+
+static struct {
+ bool active; /* transaction status - active or not */
+ int recv_len; /* number of bytes received. */
+ struct hv_fcopy_hdr *fcopy_msg; /* current message */
+ struct hv_start_fcopy message; /* sent to daemon */
+ struct vmbus_channel *recv_channel; /* chn we got the request */
+ u64 recv_req_id; /* request ID. */
+ void *fcopy_context; /* for the channel callback */
+ struct semaphore read_sema;
+} fcopy_transaction;
+
+static bool opened; /* currently device opened */
+
+/*
+ * Before we can accept copy messages from the host, we need
+ * to handshake with the user level daemon. This state tracks
+ * if we are in the handshake phase.
+ */
+static bool in_hand_shake = true;
+static void fcopy_send_data(void);
+static void fcopy_respond_to_host(int error);
+static void fcopy_work_func(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(fcopy_work, fcopy_work_func);
+static u8 *recv_buffer;
+
+static void fcopy_work_func(struct work_struct *dummy)
+{
+ /*
+ * If the timer fires, the user-mode component has not responded;
+ * process the pending transaction.
+ */
+ fcopy_respond_to_host(HV_E_FAIL);
+}
+
+static int fcopy_handle_handshake(u32 version)
+{
+ switch (version) {
+ case FCOPY_CURRENT_VERSION:
+ break;
+ default:
+ /*
+ * For now we will fail the registration.
+ * If and when we have multiple versions to
+ * deal with, we will be backward compatible.
+ * We will add this code when needed.
+ */
+ return -EINVAL;
+ }
+ pr_info("FCP: user-mode registering done. Daemon version: %d\n",
+ version);
+ fcopy_transaction.active = false;
+ if (fcopy_transaction.fcopy_context)
+ hv_fcopy_onchannelcallback(fcopy_transaction.fcopy_context);
+ in_hand_shake = false;
+ return 0;
+}
+
+static void fcopy_send_data(void)
+{
+ struct hv_start_fcopy *smsg_out = &fcopy_transaction.message;
+ int operation = fcopy_transaction.fcopy_msg->operation;
+ struct hv_start_fcopy *smsg_in;
+
+ /*
+ * The strings sent from the host are encoded in
+ * in utf16; convert it to utf8 strings.
+ * The host assures us that the utf16 strings will not exceed
+ * the max lengths specified. We will however, reserve room
+ * for the string terminating character - in the utf16s_utf8s()
+ * function we limit the size of the buffer where the converted
+ * string is placed to W_MAX_PATH -1 to guarantee
+ * that the strings can be properly terminated!
+ */
+
+ switch (operation) {
+ case START_FILE_COPY:
+ memset(smsg_out, 0, sizeof(struct hv_start_fcopy));
+ smsg_out->hdr.operation = operation;
+ smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
+
+ utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)smsg_out->file_name, W_MAX_PATH - 1);
+
+ utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)smsg_out->path_name, W_MAX_PATH - 1);
+
+ smsg_out->copy_flags = smsg_in->copy_flags;
+ smsg_out->file_size = smsg_in->file_size;
+ break;
+
+ default:
+ break;
+ }
+ up(&fcopy_transaction.read_sema);
+ return;
+}
+
+/*
+ * Send a response back to the host.
+ */
+
+static void
+fcopy_respond_to_host(int error)
+{
+ struct icmsg_hdr *icmsghdr;
+ u32 buf_len;
+ struct vmbus_channel *channel;
+ u64 req_id;
+
+ /*
+ * Copy the global state for completing the transaction. Note that
+ * only one transaction can be active at a time. This is guaranteed
+ * by the file copy protocol implemented by the host. Furthermore,
+ * the "transaction active" state we maintain ensures that there can
+ * only be one active transaction at a time.
+ */
+
+ buf_len = fcopy_transaction.recv_len;
+ channel = fcopy_transaction.recv_channel;
+ req_id = fcopy_transaction.recv_req_id;
+
+ fcopy_transaction.active = false;
+
+ icmsghdr = (struct icmsg_hdr *)
+ &recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (channel->onchannel_callback == NULL)
+ /*
+ * We have raced with util driver being unloaded;
+ * silently return.
+ */
+ return;
+
+ icmsghdr->status = error;
+ icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+ vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
+ VM_PKT_DATA_INBAND, 0);
+}
+
+void hv_fcopy_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+ struct hv_fcopy_hdr *fcopy_msg;
+ struct icmsg_hdr *icmsghdr;
+ struct icmsg_negotiate *negop = NULL;
+ int util_fw_version;
+ int fcopy_srv_version;
+
+ if (fcopy_transaction.active) {
+ /*
+ * We will defer processing this callback once
+ * the current transaction is complete.
+ */
+ fcopy_transaction.fcopy_context = context;
+ return;
+ }
+
+ vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ &requestid);
+ if (recvlen <= 0)
+ return;
+
+ icmsghdr = (struct icmsg_hdr *)&recv_buffer[
+ sizeof(struct vmbuspipe_hdr)];
+ if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ util_fw_version = UTIL_FW_VERSION;
+ fcopy_srv_version = WIN8_SRV_VERSION;
+ vmbus_prep_negotiate_resp(icmsghdr, negop, recv_buffer,
+ util_fw_version, fcopy_srv_version);
+ } else {
+ fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+
+ /*
+ * Stash away this global state for completing the
+ * transaction; note transactions are serialized.
+ */
+
+ fcopy_transaction.active = true;
+ fcopy_transaction.recv_len = recvlen;
+ fcopy_transaction.recv_channel = channel;
+ fcopy_transaction.recv_req_id = requestid;
+ fcopy_transaction.fcopy_msg = fcopy_msg;
+
+ /*
+ * Send the information to the user-level daemon.
+ */
+ fcopy_send_data();
+ schedule_delayed_work(&fcopy_work, 5*HZ);
+ return;
+ }
+ icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+ vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+}
+
+/*
+ * Create a char device that can support read/write for passing
+ * the payload.
+ */
+
+static ssize_t fcopy_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ void *src;
+ size_t copy_size;
+ int operation;
+
+ /*
+ * Wait until there is something to be read.
+ */
+ if (down_interruptible(&fcopy_transaction.read_sema))
+ return -EINTR;
+
+ /*
+ * The channel may be rescinded and in this case, we will wakeup the
+ * the thread blocked on the semaphore and we will use the opened
+ * state to correctly handle this case.
+ */
+ if (!opened)
+ return -ENODEV;
+
+ operation = fcopy_transaction.fcopy_msg->operation;
+
+ if (operation == START_FILE_COPY) {
+ src = &fcopy_transaction.message;
+ copy_size = sizeof(struct hv_start_fcopy);
+ if (count < copy_size)
+ return 0;
+ } else {
+ src = fcopy_transaction.fcopy_msg;
+ copy_size = sizeof(struct hv_do_fcopy);
+ if (count < copy_size)
+ return 0;
+ }
+ if (copy_to_user(buf, src, copy_size))
+ return -EFAULT;
+
+ return copy_size;
+}
+
+static ssize_t fcopy_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int response = 0;
+
+ if (count != sizeof(int))
+ return -EINVAL;
+
+ if (copy_from_user(&response, buf, sizeof(int)))
+ return -EFAULT;
+
+ if (in_hand_shake) {
+ if (fcopy_handle_handshake(response))
+ return -EINVAL;
+ return sizeof(int);
+ }
+
+ /*
+ * Complete the transaction by forwarding the result
+ * to the host. But first, cancel the timeout.
+ */
+ if (cancel_delayed_work_sync(&fcopy_work))
+ fcopy_respond_to_host(response);
+
+ return sizeof(int);
+}
+
+static int fcopy_open(struct inode *inode, struct file *f)
+{
+ /*
+ * The user level daemon that will open this device is
+ * really an extension of this driver. We can have only
+ * active open at a time.
+ */
+ if (opened)
+ return -EBUSY;
+
+ /*
+ * The daemon is alive; setup the state.
+ */
+ opened = true;
+ return 0;
+}
+
+static int fcopy_release(struct inode *inode, struct file *f)
+{
+ /*
+ * The daemon has exited; reset the state.
+ */
+ in_hand_shake = true;
+ opened = false;
+ return 0;
+}
+
+
+static const struct file_operations fcopy_fops = {
+ .read = fcopy_read,
+ .write = fcopy_write,
+ .release = fcopy_release,
+ .open = fcopy_open,
+};
+
+static struct miscdevice fcopy_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vmbus/hv_fcopy",
+ .fops = &fcopy_fops,
+};
+
+static int fcopy_dev_init(void)
+{
+ return misc_register(&fcopy_misc);
+}
+
+static void fcopy_dev_deinit(void)
+{
+
+ /*
+ * The device is going away - perhaps because the
+ * host has rescinded the channel. Setup state so that
+ * user level daemon can gracefully exit if it is blocked
+ * on the read semaphore.
+ */
+ opened = false;
+ /*
+ * Signal the semaphore as the device is
+ * going away.
+ */
+ up(&fcopy_transaction.read_sema);
+ misc_deregister(&fcopy_misc);
+}
+
+int hv_fcopy_init(struct hv_util_service *srv)
+{
+ recv_buffer = srv->recv_buffer;
+
+ /*
+ * When this driver loads, the user level daemon that
+ * processes the host requests may not yet be running.
+ * Defer processing channel callbacks until the daemon
+ * has registered.
+ */
+ fcopy_transaction.active = true;
+ sema_init(&fcopy_transaction.read_sema, 0);
+
+ return fcopy_dev_init();
+}
+
+void hv_fcopy_deinit(void)
+{
+ cancel_delayed_work_sync(&fcopy_work);
+ fcopy_dev_deinit();
+}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 09988b289622..ea852537307e 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -113,7 +113,7 @@ kvp_register(int reg_value)
kvp_msg->kvp_hdr.operation = reg_value;
strcpy(version, HV_DRV_VERSION);
msg->len = sizeof(struct hv_kvp_msg);
- cn_netlink_send(msg, 0, GFP_ATOMIC);
+ cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
kfree(msg);
}
}
@@ -435,7 +435,7 @@ kvp_send_key(struct work_struct *dummy)
}
msg->len = sizeof(struct hv_kvp_msg);
- cn_netlink_send(msg, 0, GFP_ATOMIC);
+ cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
kfree(msg);
return;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 0c3546224376..34f14fddb666 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -98,7 +98,7 @@ static void vss_send_op(struct work_struct *dummy)
vss_msg->vss_hdr.operation = op;
msg->len = sizeof(struct hv_vss_msg);
- cn_netlink_send(msg, 0, GFP_ATOMIC);
+ cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
kfree(msg);
return;
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 62dfd246b948..dd761806f0e8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -28,6 +28,7 @@
#include <linux/reboot.h>
#include <linux/hyperv.h>
+#include "hyperv_vmbus.h"
#define SD_MAJOR 3
#define SD_MINOR 0
@@ -82,6 +83,12 @@ static struct hv_util_service util_vss = {
.util_deinit = hv_vss_deinit,
};
+static struct hv_util_service util_fcopy = {
+ .util_cb = hv_fcopy_onchannelcallback,
+ .util_init = hv_fcopy_init,
+ .util_deinit = hv_fcopy_deinit,
+};
+
static void perform_shutdown(struct work_struct *dummy)
{
orderly_poweroff(true);
@@ -401,6 +408,10 @@ static const struct hv_vmbus_device_id id_table[] = {
{ HV_VSS_GUID,
.driver_data = (unsigned long)&util_vss
},
+ /* File copy GUID */
+ { HV_FCOPY_GUID,
+ .driver_data = (unsigned long)&util_fcopy
+ },
{ },
};
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index e05517616a06..860134da8039 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -559,8 +559,8 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, void *buffer,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
- struct scatterlist *sglist,
- u32 sgcount, bool *signal);
+ struct kvec *kv_list,
+ u32 kv_count, bool *signal);
int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
u32 buflen);
@@ -669,5 +669,9 @@ int vmbus_set_event(struct vmbus_channel *channel);
void vmbus_on_event(unsigned long data);
+int hv_fcopy_init(struct hv_util_service *);
+void hv_fcopy_deinit(void);
+void hv_fcopy_onchannelcallback(void *);
+
#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 26c93cf9f6be..15db66b74141 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/hyperv.h>
+#include <linux/uio.h>
#include "hyperv_vmbus.h"
@@ -387,23 +388,20 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
*
*/
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct scatterlist *sglist, u32 sgcount, bool *signal)
+ struct kvec *kv_list, u32 kv_count, bool *signal)
{
int i = 0;
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 totalbytes_towrite = 0;
- struct scatterlist *sg;
u32 next_write_location;
u32 old_write;
u64 prev_indices = 0;
unsigned long flags;
- for_each_sg(sglist, sg, sgcount, i)
- {
- totalbytes_towrite += sg->length;
- }
+ for (i = 0; i < kv_count; i++)
+ totalbytes_towrite += kv_list[i].iov_len;
totalbytes_towrite += sizeof(u64);
@@ -427,12 +425,11 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
old_write = next_write_location;
- for_each_sg(sglist, sg, sgcount, i)
- {
+ for (i = 0; i < kv_count; i++) {
next_write_location = hv_copyto_ringbuffer(outring_info,
next_write_location,
- sg_virt(sg),
- sg->length);
+ kv_list[i].iov_base,
+ kv_list[i].iov_len);
}
/* Set previous packet start */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 48aad4faea06..8e53a3c2607e 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -25,12 +25,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
#include <linux/completion.h>
#include <linux/hyperv.h>
#include <linux/kernel_stat.h>
@@ -39,13 +37,18 @@
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
-
static struct acpi_device *hv_acpi_dev;
static struct tasklet_struct msg_dpc;
static struct completion probe_event;
static int irq;
+struct resource hyperv_mmio = {
+ .name = "hyperv mmio",
+ .flags = IORESOURCE_MEM,
+};
+EXPORT_SYMBOL_GPL(hyperv_mmio);
+
static int vmbus_exists(void)
{
if (hv_acpi_dev == NULL)
@@ -560,9 +563,6 @@ static struct bus_type hv_bus = {
.dev_groups = vmbus_groups,
};
-static const char *driver_name = "hyperv";
-
-
struct onmessage_work_context {
struct work_struct work;
struct hv_message msg;
@@ -621,7 +621,7 @@ static void vmbus_on_msg_dpc(unsigned long data)
}
}
-static irqreturn_t vmbus_isr(int irq, void *dev_id)
+static void vmbus_isr(void)
{
int cpu = smp_processor_id();
void *page_addr;
@@ -631,7 +631,7 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
page_addr = hv_context.synic_event_page[cpu];
if (page_addr == NULL)
- return IRQ_NONE;
+ return;
event = (union hv_synic_event_flags *)page_addr +
VMBUS_MESSAGE_SINT;
@@ -667,28 +667,8 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
/* Check if there are actual msgs to be processed */
- if (msg->header.message_type != HVMSG_NONE) {
- handled = true;
+ if (msg->header.message_type != HVMSG_NONE)
tasklet_schedule(&msg_dpc);
- }
-
- if (handled)
- return IRQ_HANDLED;
- else
- return IRQ_NONE;
-}
-
-/*
- * vmbus interrupt flow handler:
- * vmbus interrupts can concurrently occur on multiple CPUs and
- * can be handled concurrently.
- */
-
-static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
-{
- kstat_incr_irqs_this_cpu(irq, desc);
-
- desc->action->handler(irq, desc->action->dev_id);
}
/*
@@ -717,25 +697,7 @@ static int vmbus_bus_init(int irq)
if (ret)
goto err_cleanup;
- ret = request_irq(irq, vmbus_isr, 0, driver_name, hv_acpi_dev);
-
- if (ret != 0) {
- pr_err("Unable to request IRQ %d\n",
- irq);
- goto err_unregister;
- }
-
- /*
- * Vmbus interrupts can be handled concurrently on
- * different CPUs. Establish an appropriate interrupt flow
- * handler that can support this model.
- */
- irq_set_handler(irq, vmbus_flow_handler);
-
- /*
- * Register our interrupt handler.
- */
- hv_register_vmbus_handler(irq, vmbus_isr);
+ hv_setup_vmbus_irq(vmbus_isr);
ret = hv_synic_alloc();
if (ret)
@@ -755,9 +717,8 @@ static int vmbus_bus_init(int irq)
err_alloc:
hv_synic_free();
- free_irq(irq, hv_acpi_dev);
+ hv_remove_vmbus_irq();
-err_unregister:
bus_unregister(&hv_bus);
err_cleanup:
@@ -888,18 +849,21 @@ void vmbus_device_unregister(struct hv_device *device_obj)
/*
- * VMBUS is an acpi enumerated device. Get the the IRQ information
- * from DSDT.
+ * VMBUS is an acpi enumerated device. Get the the information we
+ * need from DSDT.
*/
-static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq)
+static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
{
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ irq = res->data.irq.interrupts[0];
+ break;
- if (res->type == ACPI_RESOURCE_TYPE_IRQ) {
- struct acpi_resource_irq *irqp;
- irqp = &res->data.irq;
-
- *((unsigned int *)irq) = irqp->interrupts[0];
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ hyperv_mmio.start = res->data.address64.minimum;
+ hyperv_mmio.end = res->data.address64.maximum;
+ break;
}
return AE_OK;
@@ -908,18 +872,34 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq)
static int vmbus_acpi_add(struct acpi_device *device)
{
acpi_status result;
+ int ret_val = -ENODEV;
hv_acpi_dev = device;
result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
- vmbus_walk_resources, &irq);
+ vmbus_walk_resources, NULL);
- if (ACPI_FAILURE(result)) {
- complete(&probe_event);
- return -ENODEV;
+ if (ACPI_FAILURE(result))
+ goto acpi_walk_err;
+ /*
+ * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
+ * has the mmio ranges. Get that.
+ */
+ if (device->parent) {
+ result = acpi_walk_resources(device->parent->handle,
+ METHOD_NAME__CRS,
+ vmbus_walk_resources, NULL);
+
+ if (ACPI_FAILURE(result))
+ goto acpi_walk_err;
+ if (hyperv_mmio.start && hyperv_mmio.end)
+ request_resource(&iomem_resource, &hyperv_mmio);
}
+ ret_val = 0;
+
+acpi_walk_err:
complete(&probe_event);
- return 0;
+ return ret_val;
}
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
@@ -949,7 +929,6 @@ static int __init hv_acpi_init(void)
/*
* Get irq resources first.
*/
-
ret = acpi_bus_register_driver(&vmbus_acpi_driver);
if (ret)
@@ -980,8 +959,7 @@ cleanup:
static void __exit vmbus_exit(void)
{
-
- free_irq(irq, hv_acpi_dev);
+ hv_remove_vmbus_irq();
vmbus_free_channels();
bus_unregister(&hv_bus);
hv_cleanup();
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 52d548f1dc1d..b53d87958710 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -111,22 +111,6 @@ config SENSORS_AD7418
This driver can also be built as a module. If so, the module
will be called ad7418.
-config SENSORS_ADCXX
- tristate "National Semiconductor ADCxxxSxxx"
- depends on SPI_MASTER
- help
- If you say yes here you get support for the National Semiconductor
- ADC<bb><c>S<sss> chip family, where
- * bb is the resolution in number of bits (8, 10, 12)
- * c is the number of channels (1, 2, 4, 8)
- * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500
- kSPS and 101 for 1 MSPS)
-
- Examples : ADC081S101, ADC124S501, ...
-
- This driver can also be built as a module. If so, the module
- will be called adcxx.
-
config SENSORS_ADM1021
tristate "Analog Devices ADM1021 and compatibles"
depends on I2C
@@ -312,6 +296,31 @@ config SENSORS_FAM15H_POWER
This driver can also be built as a module. If so, the module
will be called fam15h_power.
+config SENSORS_APPLESMC
+ tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
+ depends on INPUT && X86
+ select NEW_LEDS
+ select LEDS_CLASS
+ select INPUT_POLLDEV
+ default n
+ help
+ This driver provides support for the Apple System Management
+ Controller, which provides an accelerometer (Apple Sudden Motion
+ Sensor), light sensors, temperature sensors, keyboard backlight
+ control and fan control.
+
+ Only Intel-based Apple's computers are supported (MacBook Pro,
+ MacBook, MacMini).
+
+ Data from the different sensors, keyboard backlight control and fan
+ control are accessible via sysfs.
+
+ This driver also provides an absolute input class device, allowing
+ the laptop to act as a pinball machine-esque joystick.
+
+ Say Y here if you have an applicable laptop and want to experience
+ the awesome power of applesmc.
+
config SENSORS_ASB100
tristate "Asus ASB100 Bach"
depends on X86 && I2C
@@ -435,6 +444,12 @@ config SENSORS_F75375S
This driver can also be built as a module. If so, the module
will be called f75375s.
+config SENSORS_MC13783_ADC
+ tristate "Freescale MC13783/MC13892 ADC"
+ depends on MFD_MC13XXX
+ help
+ Support for the A/D converter on MC13783 and MC13892 PMIC.
+
config SENSORS_FSCHMD
tristate "Fujitsu Siemens Computers sensor chips"
depends on X86 && I2C
@@ -451,26 +466,6 @@ config SENSORS_FSCHMD
This driver can also be built as a module. If so, the module
will be called fschmd.
-config SENSORS_G760A
- tristate "GMT G760A"
- depends on I2C
- help
- If you say yes here you get support for Global Mixed-mode
- Technology Inc G760A fan speed PWM controller chips.
-
- This driver can also be built as a module. If so, the module
- will be called g760a.
-
-config SENSORS_G762
- tristate "GMT G762 and G763"
- depends on I2C
- help
- If you say yes here you get support for Global Mixed-mode
- Technology Inc G762 and G763 fan speed PWM controller chips.
-
- This driver can also be built as a module. If so, the module
- will be called g762.
-
config SENSORS_GL518SM
tristate "Genesys Logic GL518SM"
depends on I2C
@@ -492,6 +487,26 @@ config SENSORS_GL520SM
This driver can also be built as a module. If so, the module
will be called gl520sm.
+config SENSORS_G760A
+ tristate "GMT G760A"
+ depends on I2C
+ help
+ If you say yes here you get support for Global Mixed-mode
+ Technology Inc G760A fan speed PWM controller chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called g760a.
+
+config SENSORS_G762
+ tristate "GMT G762 and G763"
+ depends on I2C
+ help
+ If you say yes here you get support for Global Mixed-mode
+ Technology Inc G762 and G763 fan speed PWM controller chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called g762.
+
config SENSORS_GPIO_FAN
tristate "GPIO fan"
depends on GPIOLIB
@@ -511,24 +526,6 @@ config SENSORS_HIH6130
This driver can also be built as a module. If so, the module
will be called hih6130.
-config SENSORS_HTU21
- tristate "Measurement Specialties HTU21D humidity/temperature sensors"
- depends on I2C
- help
- If you say yes here you get support for the Measurement Specialties
- HTU21D humidity and temperature sensors.
-
- This driver can also be built as a module. If so, the module
- will be called htu21.
-
-config SENSORS_CORETEMP
- tristate "Intel Core/Core2/Atom temperature sensor"
- depends on X86
- help
- If you say yes here you get support for the temperature
- sensor inside your CPU. Most of the family 6 CPUs
- are supported. Check Documentation/hwmon/coretemp for details.
-
config SENSORS_IBMAEM
tristate "IBM Active Energy Manager temperature/power sensors and control"
select IPMI_SI
@@ -557,6 +554,14 @@ config SENSORS_IBMPEX
This driver can also be built as a module. If so, the module
will be called ibmpex.
+config SENSORS_IBMPOWERNV
+ tristate "IBM PowerNv Platform temperature/power/fan sensor"
+ depends on PPC_POWERNV
+ default y
+ help
+ If you say yes here you get support for the temperature/fan/power
+ sensors on your platform.
+
config SENSORS_IIO_HWMON
tristate "Hwmon driver that uses channels specified via iio maps"
depends on IIO
@@ -566,6 +571,14 @@ config SENSORS_IIO_HWMON
for those channels specified in the map. This map can be provided
either via platform data or the device tree bindings.
+config SENSORS_CORETEMP
+ tristate "Intel Core/Core2/Atom temperature sensor"
+ depends on X86
+ help
+ If you say yes here you get support for the temperature
+ sensor inside your CPU. Most of the family 6 CPUs
+ are supported. Check Documentation/hwmon/coretemp for details.
+
config SENSORS_IT87
tristate "ITE IT87xx and compatibles"
depends on !PPC
@@ -573,8 +586,8 @@ config SENSORS_IT87
help
If you say yes here you get support for ITE IT8705F, IT8712F,
IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E,
- IT8771E, IT8772E, IT8782F, and IT8783E/F sensor chips, and the
- SiS950 clone.
+ IT8771E, IT8772E, IT8782F, IT8783E/F and IT8603E sensor chips,
+ and the SiS950 clone.
This driver can also be built as a module. If so, the module
will be called it87.
@@ -614,6 +627,219 @@ config SENSORS_LINEAGE
This driver can also be built as a module. If so, the module
will be called lineage-pem.
+config SENSORS_LTC2945
+ tristate "Linear Technology LTC2945"
+ depends on I2C
+ select REGMAP_I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC2945
+ I2C System Monitor.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2945.
+
+config SENSORS_LTC4151
+ tristate "Linear Technology LTC4151"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4151
+ High Voltage I2C Current and Voltage Monitor interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4151.
+
+config SENSORS_LTC4215
+ tristate "Linear Technology LTC4215"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4215
+ Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4215.
+
+config SENSORS_LTC4222
+ tristate "Linear Technology LTC4222"
+ depends on I2C
+ select REGMAP_I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4222
+ Dual Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4222.
+
+config SENSORS_LTC4245
+ tristate "Linear Technology LTC4245"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4245
+ Multiple Supply Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4245.
+
+config SENSORS_LTC4260
+ tristate "Linear Technology LTC4260"
+ depends on I2C
+ select REGMAP_I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4260
+ Positive Voltage Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4260.
+
+config SENSORS_LTC4261
+ tristate "Linear Technology LTC4261"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4261
+ Negative Voltage Hot Swap Controller I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4261.
+
+config SENSORS_MAX1111
+ tristate "Maxim MAX1111 Serial 8-bit ADC chip and compatibles"
+ depends on SPI_MASTER
+ help
+ Say y here to support Maxim's MAX1110, MAX1111, MAX1112, and MAX1113
+ ADC chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max1111.
+
+config SENSORS_MAX16065
+ tristate "Maxim MAX16065 System Manager and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for hardware monitoring
+ capabilities of the following Maxim System Manager chips.
+ MAX16065
+ MAX16066
+ MAX16067
+ MAX16068
+ MAX16070
+ MAX16071
+
+ This driver can also be built as a module. If so, the module
+ will be called max16065.
+
+config SENSORS_MAX1619
+ tristate "Maxim MAX1619 sensor chip"
+ depends on I2C
+ help
+ If you say yes here you get support for MAX1619 sensor chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called max1619.
+
+config SENSORS_MAX1668
+ tristate "Maxim MAX1668 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for MAX1668, MAX1989 and
+ MAX1805 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max1668.
+
+config SENSORS_MAX197
+ tristate "Maxim MAX197 and compatibles"
+ help
+ Support for the Maxim MAX197 A/D converter.
+ Support will include, but not be limited to, MAX197, and MAX199.
+
+ This driver can also be built as a module. If so, the module
+ will be called max197.
+
+config SENSORS_MAX6639
+ tristate "Maxim MAX6639 sensor chip"
+ depends on I2C
+ help
+ If you say yes here you get support for the MAX6639
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6639.
+
+config SENSORS_MAX6642
+ tristate "Maxim MAX6642 sensor chip"
+ depends on I2C
+ help
+ If you say yes here you get support for MAX6642 sensor chip.
+ MAX6642 is a SMBus-Compatible Remote/Local Temperature Sensor
+ with Overtemperature Alarm from Maxim.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6642.
+
+config SENSORS_MAX6650
+ tristate "Maxim MAX6650 sensor chip"
+ depends on I2C
+ help
+ If you say yes here you get support for the MAX6650 / MAX6651
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6650.
+
+config SENSORS_MAX6697
+ tristate "Maxim MAX6697 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for MAX6581, MAX6602, MAX6622,
+ MAX6636, MAX6689, MAX6693, MAX6694, MAX6697, MAX6698, and MAX6699
+ temperature sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6697.
+
+config SENSORS_HTU21
+ tristate "Measurement Specialties HTU21D humidity/temperature sensors"
+ depends on I2C
+ help
+ If you say yes here you get support for the Measurement Specialties
+ HTU21D humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called htu21.
+
+config SENSORS_MCP3021
+ tristate "Microchip MCP3021 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for MCP3021 and MCP3221.
+ The MCP3021 is a A/D converter (ADC) with 10-bit and the MCP3221
+ with 12-bit resolution.
+
+ This driver can also be built as a module. If so, the module
+ will be called mcp3021.
+
+config SENSORS_ADCXX
+ tristate "National Semiconductor ADCxxxSxxx"
+ depends on SPI_MASTER
+ help
+ If you say yes here you get support for the National Semiconductor
+ ADC<bb><c>S<sss> chip family, where
+ * bb is the resolution in number of bits (8, 10, 12)
+ * c is the number of channels (1, 2, 4, 8)
+ * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500
+ kSPS and 101 for 1 MSPS)
+
+ Examples : ADC081S101, ADC124S501, ...
+
+ This driver can also be built as a module. If so, the module
+ will be called adcxx.
+
config SENSORS_LM63
tristate "National Semiconductor LM63 and compatibles"
depends on I2C
@@ -650,6 +876,7 @@ config SENSORS_LM73
config SENSORS_LM75
tristate "National Semiconductor LM75 and compatibles"
depends on I2C
+ depends on THERMAL || !THERMAL_OF
help
If you say yes here you get support for one common type of
temperature sensor chip, with models including:
@@ -775,50 +1002,6 @@ config SENSORS_LM93
This driver can also be built as a module. If so, the module
will be called lm93.
-config SENSORS_LTC4151
- tristate "Linear Technology LTC4151"
- depends on I2C
- default n
- help
- If you say yes here you get support for Linear Technology LTC4151
- High Voltage I2C Current and Voltage Monitor interface.
-
- This driver can also be built as a module. If so, the module will
- be called ltc4151.
-
-config SENSORS_LTC4215
- tristate "Linear Technology LTC4215"
- depends on I2C
- default n
- help
- If you say yes here you get support for Linear Technology LTC4215
- Hot Swap Controller I2C interface.
-
- This driver can also be built as a module. If so, the module will
- be called ltc4215.
-
-config SENSORS_LTC4245
- tristate "Linear Technology LTC4245"
- depends on I2C
- default n
- help
- If you say yes here you get support for Linear Technology LTC4245
- Multiple Supply Hot Swap Controller I2C interface.
-
- This driver can also be built as a module. If so, the module will
- be called ltc4245.
-
-config SENSORS_LTC4261
- tristate "Linear Technology LTC4261"
- depends on I2C
- default n
- help
- If you say yes here you get support for Linear Technology LTC4261
- Negative Voltage Hot Swap Controller I2C interface.
-
- This driver can also be built as a module. If so, the module will
- be called ltc4261.
-
config SENSORS_LM95234
tristate "National Semiconductor LM95234"
depends on I2C
@@ -848,125 +1031,33 @@ config SENSORS_LM95245
This driver can also be built as a module. If so, the module
will be called lm95245.
-config SENSORS_MAX1111
- tristate "Maxim MAX1111 Serial 8-bit ADC chip and compatibles"
- depends on SPI_MASTER
- help
- Say y here to support Maxim's MAX1110, MAX1111, MAX1112, and MAX1113
- ADC chips.
-
- This driver can also be built as a module. If so, the module
- will be called max1111.
-
-config SENSORS_MAX16065
- tristate "Maxim MAX16065 System Manager and compatibles"
- depends on I2C
- help
- If you say yes here you get support for hardware monitoring
- capabilities of the following Maxim System Manager chips.
- MAX16065
- MAX16066
- MAX16067
- MAX16068
- MAX16070
- MAX16071
-
- This driver can also be built as a module. If so, the module
- will be called max16065.
-
-config SENSORS_MAX1619
- tristate "Maxim MAX1619 sensor chip"
- depends on I2C
- help
- If you say yes here you get support for MAX1619 sensor chip.
-
- This driver can also be built as a module. If so, the module
- will be called max1619.
-
-config SENSORS_MAX1668
- tristate "Maxim MAX1668 and compatibles"
- depends on I2C
- help
- If you say yes here you get support for MAX1668, MAX1989 and
- MAX1805 chips.
-
- This driver can also be built as a module. If so, the module
- will be called max1668.
-
-config SENSORS_MAX197
- tristate "Maxim MAX197 and compatibles"
- help
- Support for the Maxim MAX197 A/D converter.
- Support will include, but not be limited to, MAX197, and MAX199.
-
- This driver can also be built as a module. If so, the module
- will be called max197.
-
-config SENSORS_MAX6639
- tristate "Maxim MAX6639 sensor chip"
- depends on I2C
- help
- If you say yes here you get support for the MAX6639
- sensor chips.
-
- This driver can also be built as a module. If so, the module
- will be called max6639.
-
-config SENSORS_MAX6642
- tristate "Maxim MAX6642 sensor chip"
- depends on I2C
- help
- If you say yes here you get support for MAX6642 sensor chip.
- MAX6642 is a SMBus-Compatible Remote/Local Temperature Sensor
- with Overtemperature Alarm from Maxim.
-
- This driver can also be built as a module. If so, the module
- will be called max6642.
-
-config SENSORS_MAX6650
- tristate "Maxim MAX6650 sensor chip"
- depends on I2C
- help
- If you say yes here you get support for the MAX6650 / MAX6651
- sensor chips.
-
- This driver can also be built as a module. If so, the module
- will be called max6650.
-
-config SENSORS_MAX6697
- tristate "Maxim MAX6697 and compatibles"
- depends on I2C
- help
- If you say yes here you get support for MAX6581, MAX6602, MAX6622,
- MAX6636, MAX6689, MAX6693, MAX6694, MAX6697, MAX6698, and MAX6699
- temperature sensor chips.
-
- This driver can also be built as a module. If so, the module
- will be called max6697.
-
-config SENSORS_MCP3021
- tristate "Microchip MCP3021 and compatibles"
- depends on I2C
+config SENSORS_PC87360
+ tristate "National Semiconductor PC87360 family"
+ depends on !PPC
+ select HWMON_VID
help
- If you say yes here you get support for MCP3021 and MCP3221.
- The MCP3021 is a A/D converter (ADC) with 10-bit and the MCP3221
- with 12-bit resolution.
+ If you say yes here you get access to the hardware monitoring
+ functions of the National Semiconductor PC8736x Super-I/O chips.
+ The PC87360, PC87363 and PC87364 only have fan monitoring and
+ control. The PC87365 and PC87366 additionally have voltage and
+ temperature monitoring.
This driver can also be built as a module. If so, the module
- will be called mcp3021.
+ will be called pc87360.
-config SENSORS_NCT6775
- tristate "Nuvoton NCT6775F and compatibles"
+config SENSORS_PC87427
+ tristate "National Semiconductor PC87427"
depends on !PPC
- select HWMON_VID
help
- If you say yes here you get support for the hardware monitoring
- functionality of the Nuvoton NCT6775F, NCT6776F, NCT6779D
- and compatible Super-I/O chips. This driver replaces the
- w83627ehf driver for NCT6775F and NCT6776F.
+ If you say yes here you get access to the hardware monitoring
+ functions of the National Semiconductor PC87427 Super-I/O chip.
+ The chip has two distinct logical devices, one for fan speed
+ monitoring and control, and one for voltage and temperature
+ monitoring. Fan speed monitoring and control are supported, as
+ well as temperature monitoring. Voltages aren't supported yet.
This driver can also be built as a module. If so, the module
- will be called nct6775.
+ will be called pc87427.
config SENSORS_NTC_THERMISTOR
tristate "NTC thermistor support"
@@ -982,33 +1073,18 @@ config SENSORS_NTC_THERMISTOR
This driver can also be built as a module. If so, the module
will be called ntc-thermistor.
-config SENSORS_PC87360
- tristate "National Semiconductor PC87360 family"
+config SENSORS_NCT6775
+ tristate "Nuvoton NCT6775F and compatibles"
depends on !PPC
select HWMON_VID
help
- If you say yes here you get access to the hardware monitoring
- functions of the National Semiconductor PC8736x Super-I/O chips.
- The PC87360, PC87363 and PC87364 only have fan monitoring and
- control. The PC87365 and PC87366 additionally have voltage and
- temperature monitoring.
-
- This driver can also be built as a module. If so, the module
- will be called pc87360.
-
-config SENSORS_PC87427
- tristate "National Semiconductor PC87427"
- depends on !PPC
- help
- If you say yes here you get access to the hardware monitoring
- functions of the National Semiconductor PC87427 Super-I/O chip.
- The chip has two distinct logical devices, one for fan speed
- monitoring and control, and one for voltage and temperature
- monitoring. Fan speed monitoring and control are supported, as
- well as temperature monitoring. Voltages aren't supported yet.
+ If you say yes here you get support for the hardware monitoring
+ functionality of the Nuvoton NCT6775F, NCT6776F, NCT6779D
+ and compatible Super-I/O chips. This driver replaces the
+ w83627ehf driver for NCT6775F and NCT6776F.
This driver can also be built as a module. If so, the module
- will be called pc87427.
+ will be called nct6775.
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
@@ -1073,21 +1149,6 @@ config SENSORS_SIS5595
This driver can also be built as a module. If so, the module
will be called sis5595.
-config SENSORS_SMM665
- tristate "Summit Microelectronics SMM665"
- depends on I2C
- default n
- help
- If you say yes here you get support for the hardware monitoring
- features of the Summit Microelectronics SMM665/SMM665B Six-Channel
- Active DC Output Controller / Monitor.
-
- Other supported chips are SMM465, SMM665C, SMM764, and SMM766.
- Support for those chips is untested.
-
- This driver can also be built as a module. If so, the module will
- be called smm665.
-
config SENSORS_DME1737
tristate "SMSC DME1737, SCH311x and compatibles"
depends on I2C && !PPC
@@ -1209,6 +1270,31 @@ config SENSORS_SCH5636
This driver can also be built as a module. If so, the module
will be called sch5636.
+config SENSORS_SMM665
+ tristate "Summit Microelectronics SMM665"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for the hardware monitoring
+ features of the Summit Microelectronics SMM665/SMM665B Six-Channel
+ Active DC Output Controller / Monitor.
+
+ Other supported chips are SMM465, SMM665C, SMM764, and SMM766.
+ Support for those chips is untested.
+
+ This driver can also be built as a module. If so, the module will
+ be called smm665.
+
+config SENSORS_ADC128D818
+ tristate "Texas Instruments ADC128D818"
+ depends on I2C
+ help
+ If you say yes here you get support for the Texas Instruments
+ ADC128D818 System Monitor with Temperature Sensor chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called adc128d818.
+
config SENSORS_ADS1015
tristate "Texas Instruments ADS1015"
depends on I2C
@@ -1285,6 +1371,7 @@ config SENSORS_THMC50
config SENSORS_TMP102
tristate "Texas Instruments TMP102"
depends on I2C
+ depends on THERMAL || !THERMAL_OF
help
If you say yes here you get support for Texas Instruments TMP102
sensor chips.
@@ -1523,37 +1610,6 @@ config SENSORS_ULTRA45
This driver provides support for the Ultra45 workstation environmental
sensors.
-config SENSORS_APPLESMC
- tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
- depends on INPUT && X86
- select NEW_LEDS
- select LEDS_CLASS
- select INPUT_POLLDEV
- default n
- help
- This driver provides support for the Apple System Management
- Controller, which provides an accelerometer (Apple Sudden Motion
- Sensor), light sensors, temperature sensors, keyboard backlight
- control and fan control.
-
- Only Intel-based Apple's computers are supported (MacBook Pro,
- MacBook, MacMini).
-
- Data from the different sensors, keyboard backlight control and fan
- control are accessible via sysfs.
-
- This driver also provides an absolute input class device, allowing
- the laptop to act as a pinball machine-esque joystick.
-
- Say Y here if you have an applicable laptop and want to experience
- the awesome power of applesmc.
-
-config SENSORS_MC13783_ADC
- tristate "Freescale MC13783/MC13892 ADC"
- depends on MFD_MC13XXX
- help
- Support for the A/D converter on MC13783 and MC13892 PMIC.
-
if ACPI
comment "ACPI drivers"
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index ec7cde06eb52..199c401bf8d9 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
obj-$(CONFIG_SENSORS_AD7314) += ad7314.o
obj-$(CONFIG_SENSORS_AD7414) += ad7414.o
obj-$(CONFIG_SENSORS_AD7418) += ad7418.o
+obj-$(CONFIG_SENSORS_ADC128D818) += adc128d818.o
obj-$(CONFIG_SENSORS_ADCXX) += adcxx.o
obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o
obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
@@ -70,6 +71,7 @@ obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
+obj-$(CONFIG_SENSORS_IBMPOWERNV)+= ibmpowernv.o
obj-$(CONFIG_SENSORS_IIO_HWMON) += iio_hwmon.o
obj-$(CONFIG_SENSORS_INA209) += ina209.o
obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o
@@ -95,9 +97,12 @@ obj-$(CONFIG_SENSORS_LM93) += lm93.o
obj-$(CONFIG_SENSORS_LM95234) += lm95234.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
+obj-$(CONFIG_SENSORS_LTC2945) += ltc2945.o
obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
+obj-$(CONFIG_SENSORS_LTC4222) += ltc4222.o
obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
+obj-$(CONFIG_SENSORS_LTC4260) += ltc4260.o
obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX16065) += max16065.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 6a34f7f48eb9..579bdf93be43 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -30,8 +30,7 @@
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/err.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#define ACPI_POWER_METER_NAME "power_meter"
ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
new file mode 100644
index 000000000000..5ffd81f19d01
--- /dev/null
+++ b/drivers/hwmon/adc128d818.c
@@ -0,0 +1,491 @@
+/*
+ * Driver for TI ADC128D818 System Monitor with Temperature Sensor
+ *
+ * Copyright (c) 2014 Guenter Roeck
+ *
+ * Derived from lm80.c
+ * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
+ * and Philip Edelbrock <phil@netroedge.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+
+/* Addresses to scan
+ * The chip also supports addresses 0x35..0x37. Don't scan those addresses
+ * since they are also used by some EEPROMs, which may result in false
+ * positives.
+ */
+static const unsigned short normal_i2c[] = {
+ 0x1d, 0x1e, 0x1f, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END };
+
+/* registers */
+#define ADC128_REG_IN_MAX(nr) (0x2a + (nr) * 2)
+#define ADC128_REG_IN_MIN(nr) (0x2b + (nr) * 2)
+#define ADC128_REG_IN(nr) (0x20 + (nr))
+
+#define ADC128_REG_TEMP 0x27
+#define ADC128_REG_TEMP_MAX 0x38
+#define ADC128_REG_TEMP_HYST 0x39
+
+#define ADC128_REG_CONFIG 0x00
+#define ADC128_REG_ALARM 0x01
+#define ADC128_REG_MASK 0x03
+#define ADC128_REG_CONV_RATE 0x07
+#define ADC128_REG_ONESHOT 0x09
+#define ADC128_REG_SHUTDOWN 0x0a
+#define ADC128_REG_CONFIG_ADV 0x0b
+#define ADC128_REG_BUSY_STATUS 0x0c
+
+#define ADC128_REG_MAN_ID 0x3e
+#define ADC128_REG_DEV_ID 0x3f
+
+struct adc128_data {
+ struct i2c_client *client;
+ struct regulator *regulator;
+ int vref; /* Reference voltage in mV */
+ struct mutex update_lock;
+ bool valid; /* true if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+
+ u16 in[3][7]; /* Register value, normalized to 12 bit
+ * 0: input voltage
+ * 1: min limit
+ * 2: max limit
+ */
+ s16 temp[3]; /* Register value, normalized to 9 bit
+ * 0: sensor 1: limit 2: hyst
+ */
+ u8 alarms; /* alarm register value */
+};
+
+static struct adc128_data *adc128_update_device(struct device *dev)
+{
+ struct adc128_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ struct adc128_data *ret = data;
+ int i, rv;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ for (i = 0; i < 7; i++) {
+ rv = i2c_smbus_read_word_swapped(client,
+ ADC128_REG_IN(i));
+ if (rv < 0)
+ goto abort;
+ data->in[0][i] = rv >> 4;
+
+ rv = i2c_smbus_read_byte_data(client,
+ ADC128_REG_IN_MIN(i));
+ if (rv < 0)
+ goto abort;
+ data->in[1][i] = rv << 4;
+
+ rv = i2c_smbus_read_byte_data(client,
+ ADC128_REG_IN_MAX(i));
+ if (rv < 0)
+ goto abort;
+ data->in[2][i] = rv << 4;
+ }
+
+ rv = i2c_smbus_read_word_swapped(client, ADC128_REG_TEMP);
+ if (rv < 0)
+ goto abort;
+ data->temp[0] = rv >> 7;
+
+ rv = i2c_smbus_read_byte_data(client, ADC128_REG_TEMP_MAX);
+ if (rv < 0)
+ goto abort;
+ data->temp[1] = rv << 1;
+
+ rv = i2c_smbus_read_byte_data(client, ADC128_REG_TEMP_HYST);
+ if (rv < 0)
+ goto abort;
+ data->temp[2] = rv << 1;
+
+ rv = i2c_smbus_read_byte_data(client, ADC128_REG_ALARM);
+ if (rv < 0)
+ goto abort;
+ data->alarms |= rv;
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+ goto done;
+
+abort:
+ ret = ERR_PTR(rv);
+ data->valid = false;
+done:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static ssize_t adc128_show_in(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adc128_data *data = adc128_update_device(dev);
+ int index = to_sensor_dev_attr_2(attr)->index;
+ int nr = to_sensor_dev_attr_2(attr)->nr;
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = DIV_ROUND_CLOSEST(data->in[index][nr] * data->vref, 4095);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t adc128_set_in(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adc128_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr_2(attr)->index;
+ int nr = to_sensor_dev_attr_2(attr)->nr;
+ u8 reg, regval;
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ /* 10 mV LSB on limit registers */
+ regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255);
+ data->in[index][nr] = regval << 4;
+ reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr);
+ i2c_smbus_write_byte_data(data->client, reg, regval);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t adc128_show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adc128_data *data = adc128_update_device(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ int temp;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = (data->temp[index] << 7) >> 7; /* sign extend */
+ return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */
+}
+
+static ssize_t adc128_set_temp(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adc128_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ long val;
+ int err;
+ s8 regval;
+
+ err = kstrtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ data->temp[index] = regval << 1;
+ i2c_smbus_write_byte_data(data->client,
+ index == 1 ? ADC128_REG_TEMP_MAX
+ : ADC128_REG_TEMP_HYST,
+ regval);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t adc128_show_alarm(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adc128_data *data = adc128_update_device(dev);
+ int mask = 1 << to_sensor_dev_attr(attr)->index;
+ u8 alarms;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /*
+ * Clear an alarm after reporting it to user space. If it is still
+ * active, the next update sequence will set the alarm bit again.
+ */
+ alarms = data->alarms;
+ data->alarms &= ~mask;
+
+ return sprintf(buf, "%u\n", !!(alarms & mask));
+}
+
+static SENSOR_DEVICE_ATTR_2(in0_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 0, 0);
+static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 0, 1);
+static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 0, 2);
+
+static SENSOR_DEVICE_ATTR_2(in1_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 1, 0);
+static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 1, 1);
+static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 1, 2);
+
+static SENSOR_DEVICE_ATTR_2(in2_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 2, 0);
+static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 2, 1);
+static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 2, 2);
+
+static SENSOR_DEVICE_ATTR_2(in3_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 3, 0);
+static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 3, 1);
+static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 3, 2);
+
+static SENSOR_DEVICE_ATTR_2(in4_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 4, 0);
+static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 4, 1);
+static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 4, 2);
+
+static SENSOR_DEVICE_ATTR_2(in5_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 5, 0);
+static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 5, 1);
+static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 5, 2);
+
+static SENSOR_DEVICE_ATTR_2(in6_input, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 6, 0);
+static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 6, 1);
+static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO,
+ adc128_show_in, adc128_set_in, 6, 2);
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, adc128_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
+ adc128_show_temp, adc128_set_temp, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
+ adc128_show_temp, adc128_set_temp, 2);
+
+static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, adc128_show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, adc128_show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, adc128_show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, adc128_show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, adc128_show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, adc128_show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, adc128_show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, adc128_show_alarm, NULL, 7);
+
+static struct attribute *adc128_attrs[] = {
+ &sensor_dev_attr_in0_min.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in2_min.dev_attr.attr,
+ &sensor_dev_attr_in3_min.dev_attr.attr,
+ &sensor_dev_attr_in4_min.dev_attr.attr,
+ &sensor_dev_attr_in5_min.dev_attr.attr,
+ &sensor_dev_attr_in6_min.dev_attr.attr,
+ &sensor_dev_attr_in0_max.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in2_max.dev_attr.attr,
+ &sensor_dev_attr_in3_max.dev_attr.attr,
+ &sensor_dev_attr_in4_max.dev_attr.attr,
+ &sensor_dev_attr_in5_max.dev_attr.attr,
+ &sensor_dev_attr_in6_max.dev_attr.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_in0_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_alarm.dev_attr.attr,
+ &sensor_dev_attr_in3_alarm.dev_attr.attr,
+ &sensor_dev_attr_in4_alarm.dev_attr.attr,
+ &sensor_dev_attr_in5_alarm.dev_attr.attr,
+ &sensor_dev_attr_in6_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(adc128);
+
+static int adc128_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ int man_id, dev_id;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ man_id = i2c_smbus_read_byte_data(client, ADC128_REG_MAN_ID);
+ dev_id = i2c_smbus_read_byte_data(client, ADC128_REG_DEV_ID);
+ if (man_id != 0x01 || dev_id != 0x09)
+ return -ENODEV;
+
+ /* Check unused bits for confirmation */
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_CONFIG) & 0xf4)
+ return -ENODEV;
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_CONV_RATE) & 0xfe)
+ return -ENODEV;
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_ONESHOT) & 0xfe)
+ return -ENODEV;
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_SHUTDOWN) & 0xfe)
+ return -ENODEV;
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_CONFIG_ADV) & 0xf8)
+ return -ENODEV;
+ if (i2c_smbus_read_byte_data(client, ADC128_REG_BUSY_STATUS) & 0xfc)
+ return -ENODEV;
+
+ strlcpy(info->type, "adc128d818", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int adc128_init_client(struct adc128_data *data)
+{
+ struct i2c_client *client = data->client;
+ int err;
+
+ /*
+ * Reset chip to defaults.
+ * This makes most other initializations unnecessary.
+ */
+ err = i2c_smbus_write_byte_data(client, ADC128_REG_CONFIG, 0x80);
+ if (err)
+ return err;
+
+ /* Start monitoring */
+ err = i2c_smbus_write_byte_data(client, ADC128_REG_CONFIG, 0x01);
+ if (err)
+ return err;
+
+ /* If external vref is selected, configure the chip to use it */
+ if (data->regulator) {
+ err = i2c_smbus_write_byte_data(client,
+ ADC128_REG_CONFIG_ADV, 0x01);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int adc128_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct regulator *regulator;
+ struct device *hwmon_dev;
+ struct adc128_data *data;
+ int err, vref;
+
+ data = devm_kzalloc(dev, sizeof(struct adc128_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* vref is optional. If specified, is used as chip reference voltage */
+ regulator = devm_regulator_get_optional(dev, "vref");
+ if (!IS_ERR(regulator)) {
+ data->regulator = regulator;
+ err = regulator_enable(regulator);
+ if (err < 0)
+ return err;
+ vref = regulator_get_voltage(regulator);
+ if (vref < 0) {
+ err = vref;
+ goto error;
+ }
+ data->vref = DIV_ROUND_CLOSEST(vref, 1000);
+ } else {
+ data->vref = 2560; /* 2.56V, in mV */
+ }
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Initialize the chip */
+ err = adc128_init_client(data);
+ if (err < 0)
+ goto error;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, adc128_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (data->regulator)
+ regulator_disable(data->regulator);
+ return err;
+}
+
+static int adc128_remove(struct i2c_client *client)
+{
+ struct adc128_data *data = i2c_get_clientdata(client);
+
+ if (data->regulator)
+ regulator_disable(data->regulator);
+
+ return 0;
+}
+
+static const struct i2c_device_id adc128_id[] = {
+ { "adc128d818", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adc128_id);
+
+static struct i2c_driver adc128_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "adc128d818",
+ },
+ .probe = adc128_probe,
+ .remove = adc128_remove,
+ .id_table = adc128_id,
+ .detect = adc128_detect,
+ .address_list = normal_i2c,
+};
+
+module_i2c_driver(adc128_driver);
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("Driver for ADC128D818");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 7e16e5d07bc6..9ffc4c8ca8b5 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -2,7 +2,7 @@
* adm1025.c
*
* Copyright (C) 2000 Chen-Yuan Wu <gwu@esoft.com>
- * Copyright (C) 2003-2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009 Jean Delvare <jdelvare@suse.de>
*
* The ADM1025 is a sensor chip made by Analog Devices. It reports up to 6
* voltages (including its own power source) and up to two temperatures
@@ -615,6 +615,6 @@ static struct adm1025_data *adm1025_update_device(struct device *dev)
module_i2c_driver(adm1025_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("ADM1025 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 9ee5e066423b..d19c790e410a 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006 Corentin LABBE <corentin.labbe@geomatys.fr>
*
- * Based on LM83 Driver by Jean Delvare <khali@linux-fr.org>
+ * Based on LM83 Driver by Jean Delvare <jdelvare@suse.de>
*
* Give only processor, motherboard temperatures and fan tachs
* Very rare chip please let me know if you use it
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 253ea396106d..a8a540ca8c34 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -4,7 +4,7 @@
* Based on lm75.c and lm85.c
* Supports adm1030 / adm1031
* Copyright (C) 2004 Alexandre d'Alton <alex@alexdalton.org>
- * Reworked by Jean Delvare <khali@linux-fr.org>
+ * Reworked by Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 22d008bbdc10..3cefd1aeb24f 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -3,7 +3,7 @@
* Copyright (C) 2007-2008, Advanced Micro Devices, Inc.
* Copyright (C) 2008 Jordan Crouse <jordan@cosmicpenguin.net>
* Copyright (C) 2008 Hans de Goede <hdegoede@redhat.com>
- * Copyright (C) 2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2009 Jean Delvare <jdelvare@suse.de>
*
* Derived from the lm83 driver by Jean Delvare
*
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index dafc63c6932d..ae208f612198 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -16,11 +16,7 @@
#include <linux/dmi.h>
#include <linux/jiffies.h>
#include <linux/err.h>
-
-#include <acpi/acpi.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
-
+#include <linux/acpi.h>
#define ATK_HID "ATK0110"
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index bbb0b0d463f7..f31bc4c48644 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -94,6 +94,8 @@ struct temp_data {
bool valid;
struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
+ struct attribute *attrs[TOTAL_ATTRS + 1];
+ struct attribute_group attr_group;
struct mutex update_lock;
};
@@ -114,12 +116,6 @@ struct pdev_entry {
static LIST_HEAD(pdev_list);
static DEFINE_MUTEX(pdev_list_mutex);
-static ssize_t show_name(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- return sprintf(buf, "%s\n", DRVNAME);
-}
-
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -393,20 +389,10 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
return adjust_tjmax(c, id, dev);
}
-static int create_name_attr(struct platform_data *pdata,
- struct device *dev)
-{
- sysfs_attr_init(&pdata->name_attr.attr);
- pdata->name_attr.attr.name = "name";
- pdata->name_attr.attr.mode = S_IRUGO;
- pdata->name_attr.show = show_name;
- return device_create_file(dev, &pdata->name_attr);
-}
-
static int create_core_attrs(struct temp_data *tdata, struct device *dev,
int attr_no)
{
- int err, i;
+ int i;
static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
struct device_attribute *devattr, char *buf) = {
show_label, show_crit_alarm, show_temp, show_tjmax,
@@ -424,16 +410,10 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
tdata->sd_attrs[i].index = attr_no;
- err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
- if (err)
- goto exit_free;
+ tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
}
- return 0;
-
-exit_free:
- while (--i >= 0)
- device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
- return err;
+ tdata->attr_group.attrs = tdata->attrs;
+ return sysfs_create_group(&dev->kobj, &tdata->attr_group);
}
@@ -548,7 +528,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
pdata->core_data[attr_no] = tdata;
/* Create sysfs interfaces */
- err = create_core_attrs(tdata, &pdev->dev, attr_no);
+ err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
if (err)
goto exit_free;
@@ -573,14 +553,12 @@ static void coretemp_add_core(unsigned int cpu, int pkg_flag)
}
static void coretemp_remove_core(struct platform_data *pdata,
- struct device *dev, int indx)
+ int indx)
{
- int i;
struct temp_data *tdata = pdata->core_data[indx];
/* Remove the sysfs attributes */
- for (i = 0; i < tdata->attr_size; i++)
- device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
+ sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);
kfree(pdata->core_data[indx]);
pdata->core_data[indx] = NULL;
@@ -588,34 +566,20 @@ static void coretemp_remove_core(struct platform_data *pdata,
static int coretemp_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct platform_data *pdata;
- int err;
/* Initialize the per-package data structures */
- pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- err = create_name_attr(pdata, &pdev->dev);
- if (err)
- goto exit_free;
-
pdata->phys_proc_id = pdev->id;
platform_set_drvdata(pdev, pdata);
- pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
- if (IS_ERR(pdata->hwmon_dev)) {
- err = PTR_ERR(pdata->hwmon_dev);
- dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
- goto exit_name;
- }
- return 0;
-
-exit_name:
- device_remove_file(&pdev->dev, &pdata->name_attr);
-exit_free:
- kfree(pdata);
- return err;
+ pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
+ pdata, NULL);
+ return PTR_ERR_OR_ZERO(pdata->hwmon_dev);
}
static int coretemp_remove(struct platform_device *pdev)
@@ -625,11 +589,8 @@ static int coretemp_remove(struct platform_device *pdev)
for (i = MAX_CORE_DATA - 1; i >= 0; --i)
if (pdata->core_data[i])
- coretemp_remove_core(pdata, &pdev->dev, i);
+ coretemp_remove_core(pdata, i);
- device_remove_file(&pdev->dev, &pdata->name_attr);
- hwmon_device_unregister(pdata->hwmon_dev);
- kfree(pdata);
return 0;
}
@@ -777,7 +738,7 @@ static void put_core_offline(unsigned int cpu)
return;
if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
- coretemp_remove_core(pdata, &pdev->dev, indx);
+ coretemp_remove_core(pdata, indx);
/*
* If a HT sibling of a core is taken offline, but another HT sibling
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 029ecabc4380..73b3865f1207 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -278,10 +278,6 @@ static int da9055_hwmon_probe(struct platform_device *pdev)
if (hwmon_irq < 0)
return hwmon_irq;
- hwmon_irq = regmap_irq_get_virq(hwmon->da9055->irq_data, hwmon_irq);
- if (hwmon_irq < 0)
- return hwmon_irq;
-
ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq,
NULL, da9055_auxadc_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 872d76744e30..fc6f5d54e7f7 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -4,7 +4,7 @@
* Christian W. Zuckschwerdt <zany@triq.net> 2000-11-23
* based on lm75.c by Frodo Looijaard <frodol@dds.nl>
* Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
*
* The DS1621 device is a digital temperature/thermometer with 9-bit
* resolution, a thermal alarm output (Tout), and user-defined minimum
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 2c137b26acb4..fd892dd48e4c 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -349,7 +349,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
dev_dbg(&client->dev, "reg 0x%02x, err %d\n",
REG_FAN_CONF1, status);
mutex_unlock(&data->update_lock);
- return -EIO;
+ return status;
}
status &= 0x9F;
status |= (new_range_bits << 5);
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 82e661e8241b..f76a74cb6dc4 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -1,6 +1,6 @@
/*
* emc6w201.c - Hardware monitoring driver for the SMSC EMC6W201
- * Copyright (C) 2011 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2011 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -548,6 +548,6 @@ static struct i2c_driver emc6w201_driver = {
module_i2c_driver(emc6w201_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("SMSC EMC6W201 hardware monitoring driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 15b7f5281def..1a8aa1265262 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -1,7 +1,7 @@
/*
* f71805f.c - driver for the Fintek F71805F/FG and F71872F/FG Super-I/O
* chips integrated hardware monitoring features
- * Copyright (C) 2005-2006 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2006 Jean Delvare <jdelvare@suse.de>
*
* The F71805F/FG is a LPC Super-I/O chip made by Fintek. It integrates
* complete hardware monitoring features: voltage, fan and temperature
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 95257a5621d8..1e9830513045 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -4,7 +4,7 @@
* Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
* Kyosti Malkki <kmalkki@cc.hut.fi>
* Copyright (C) 2004 Hong-Gunn Chew <hglinux@gunnet.org> and
- * Jean Delvare <khali@linux-fr.org>
+ * Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index e176a43af63d..a26c385a435b 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -22,6 +22,7 @@
#include <linux/gfp.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/string.h>
#define HWMON_ID_PREFIX "hwmon"
#define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
@@ -99,6 +100,10 @@ hwmon_device_register_with_groups(struct device *dev, const char *name,
struct hwmon_device *hwdev;
int err, id;
+ /* Do not accept invalid characters in hwmon name attribute */
+ if (name && (!strlen(name) || strpbrk(name, "-* \t\n")))
+ return ERR_PTR(-EINVAL);
+
id = ida_simple_get(&hwmon_ida, 0, 0, GFP_KERNEL);
if (id < 0)
return ERR_PTR(id);
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
new file mode 100644
index 000000000000..b7b1297a9b02
--- /dev/null
+++ b/drivers/hwmon/ibmpowernv.c
@@ -0,0 +1,529 @@
+/*
+ * hwmon driver for temperature/power/fan on IBM PowerNV platform
+ * Copyright (C) 2013 IBM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <asm/opal.h>
+#include <linux/err.h>
+
+MODULE_DESCRIPTION("IBM PowerNV Platform power/temp/fan sensor hwmon module");
+MODULE_LICENSE("GPL");
+
+#define MAX_ATTR_LENGTH 32
+
+/* Device tree sensor name prefixes. The device tree has the names in the
+ * format "cooling-fan#2-faulted" where the "cooling-fan" is the sensor type,
+ * 2 is the sensor count, and "faulted" is the sensor data attribute type.
+ */
+#define DT_FAULT_ATTR_SUFFIX "faulted"
+#define DT_DATA_ATTR_SUFFIX "data"
+#define DT_THRESHOLD_ATTR_SUFFIX "thrs"
+
+enum sensors {
+ FAN,
+ TEMPERATURE,
+ POWERSUPPLY,
+ POWER,
+ MAX_SENSOR_TYPE,
+};
+
+enum attributes {
+ INPUT,
+ MINIMUM,
+ MAXIMUM,
+ FAULT,
+ MAX_ATTR_TYPES
+};
+
+static struct sensor_name {
+ char *name;
+ char *compaible;
+} sensor_names[] = {
+ {"fan-sensor", "ibm,opal-sensor-cooling-fan"},
+ {"amb-temp-sensor", "ibm,opal-sensor-amb-temp"},
+ {"power-sensor", "ibm,opal-sensor-power-supply"},
+ {"power", "ibm,opal-sensor-power"}
+};
+
+static const char * const attribute_type_table[] = {
+ "input",
+ "min",
+ "max",
+ "fault",
+ NULL
+};
+
+struct pdev_entry {
+ struct list_head list;
+ struct platform_device *pdev;
+ enum sensors type;
+};
+
+static LIST_HEAD(pdev_list);
+
+/* The sensors are categorised on type.
+ *
+ * The sensors of same type are categorised under a common platform device.
+ * So, The pdev is shared by all sensors of same type.
+ * Ex : temp1_input, temp1_max, temp2_input,temp2_max all share same platform
+ * device.
+ *
+ * "sensor_data" is the Platform device specific data.
+ * There is one hwmon_device instance for all the sensors of same type.
+ * This also holds the list of all sensors with same type but different
+ * attribute and index.
+ */
+struct sensor_specific_data {
+ u32 sensor_id; /* The hex value as in the device tree */
+ u32 sensor_index; /* The sensor instance index */
+ struct sensor_device_attribute sd_attr;
+ enum attributes attr_type;
+ char attr_name[64];
+};
+
+struct sensor_data {
+ struct device *hwmon_dev;
+ struct list_head sensor_list;
+ struct device_attribute name_attr;
+};
+
+struct sensor_entry {
+ struct list_head list;
+ struct sensor_specific_data *sensor_data;
+};
+
+static struct platform_device *powernv_sensor_get_pdev(enum sensors type)
+{
+ struct pdev_entry *p;
+ list_for_each_entry(p, &pdev_list, list)
+ if (p->type == type)
+ return p->pdev;
+
+ return NULL;
+}
+
+static struct sensor_specific_data *powernv_sensor_get_sensor_data(
+ struct sensor_data *pdata,
+ int index, enum attributes attr_type)
+{
+ struct sensor_entry *p;
+ list_for_each_entry(p, &pdata->sensor_list, list)
+ if ((p->sensor_data->sensor_index == index) &&
+ (attr_type == p->sensor_data->attr_type))
+ return p->sensor_data;
+
+ return NULL;
+}
+
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ return sprintf(buf, "%s\n", pdev->name);
+}
+
+/* Note: Data from the sensors for each sensor type needs to be converted to
+ * the dimension appropriate.
+ */
+static ssize_t show_sensor(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(devattr);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sensor_data *pdata = platform_get_drvdata(pdev);
+ struct sensor_specific_data *tdata = NULL;
+ enum sensors sensor_type = pdev->id;
+ u32 x = -1;
+ int ret;
+
+ if (sd_attr && sd_attr->dev_attr.attr.name) {
+ char *pos = strchr(sd_attr->dev_attr.attr.name, '_');
+ int i;
+
+ for (i = 0; i < MAX_ATTR_TYPES; i++) {
+ if (strcmp(pos+1, attribute_type_table[i]) == 0) {
+ tdata = powernv_sensor_get_sensor_data(pdata,
+ sd_attr->index, i);
+ break;
+ }
+ }
+ }
+
+ if (tdata) {
+ ret = opal_get_sensor_data(tdata->sensor_id, &x);
+ if (ret)
+ x = -1;
+ }
+
+ if (sensor_type == TEMPERATURE && x > 0) {
+ /* Temperature comes in Degrees and convert it to
+ * milli-degrees.
+ */
+ x = x*1000;
+ } else if (sensor_type == POWER && x > 0) {
+ /* Power value comes in watts, convert to micro-watts */
+ x = x * 1000000;
+ }
+
+ return sprintf(buf, "%d\n", x);
+}
+
+static u32 get_sensor_index_from_name(const char *name)
+{
+ char *hash_position = strchr(name, '#');
+ u32 index = 0, copy_length;
+ char newbuf[8];
+
+ if (hash_position) {
+ copy_length = strchr(hash_position, '-') - hash_position - 1;
+ if (copy_length < sizeof(newbuf)) {
+ strncpy(newbuf, hash_position + 1, copy_length);
+ sscanf(newbuf, "%d", &index);
+ }
+ }
+
+ return index;
+}
+
+static inline void get_sensor_suffix_from_name(const char *name, char *suffix)
+{
+ char *dash_position = strrchr(name, '-');
+ if (dash_position)
+ strncpy(suffix, dash_position+1, MAX_ATTR_LENGTH);
+ else
+ strcpy(suffix,"");
+}
+
+static int get_sensor_attr_properties(const char *sensor_name,
+ enum sensors sensor_type, enum attributes *attr_type,
+ u32 *sensor_index)
+{
+ char suffix[MAX_ATTR_LENGTH];
+
+ *attr_type = MAX_ATTR_TYPES;
+ *sensor_index = get_sensor_index_from_name(sensor_name);
+ if (*sensor_index == 0)
+ return -EINVAL;
+
+ get_sensor_suffix_from_name(sensor_name, suffix);
+ if (strcmp(suffix, "") == 0)
+ return -EINVAL;
+
+ if (strcmp(suffix, DT_FAULT_ATTR_SUFFIX) == 0)
+ *attr_type = FAULT;
+ else if (strcmp(suffix, DT_DATA_ATTR_SUFFIX) == 0)
+ *attr_type = INPUT;
+ else if ((sensor_type == TEMPERATURE) &&
+ (strcmp(suffix, DT_THRESHOLD_ATTR_SUFFIX) == 0))
+ *attr_type = MAXIMUM;
+ else if ((sensor_type == FAN) &&
+ (strcmp(suffix, DT_THRESHOLD_ATTR_SUFFIX) == 0))
+ *attr_type = MINIMUM;
+ else
+ return -ENOENT;
+
+ if (((sensor_type == FAN) && ((*attr_type == INPUT) ||
+ (*attr_type == MINIMUM)))
+ || ((sensor_type == TEMPERATURE) && ((*attr_type == INPUT) ||
+ (*attr_type == MAXIMUM)))
+ || ((sensor_type == POWER) && ((*attr_type == INPUT))))
+ return 0;
+
+ return -ENOENT;
+}
+
+static int create_sensor_attr(struct sensor_specific_data *tdata,
+ struct device *dev, enum sensors sensor_type,
+ enum attributes attr_type)
+{
+ int err = 0;
+ char temp_file_prefix[50];
+ static const char *const file_name_format = "%s%d_%s";
+
+ tdata->attr_type = attr_type;
+
+ if (sensor_type == FAN)
+ strcpy(temp_file_prefix, "fan");
+ else if (sensor_type == TEMPERATURE)
+ strcpy(temp_file_prefix, "temp");
+ else if (sensor_type == POWERSUPPLY)
+ strcpy(temp_file_prefix, "powersupply");
+ else if (sensor_type == POWER)
+ strcpy(temp_file_prefix, "power");
+
+ snprintf(tdata->attr_name, sizeof(tdata->attr_name), file_name_format,
+ temp_file_prefix, tdata->sensor_index,
+ attribute_type_table[tdata->attr_type]);
+
+ sysfs_attr_init(&tdata->sd_attr.dev_attr.attr);
+ tdata->sd_attr.dev_attr.attr.name = tdata->attr_name;
+ tdata->sd_attr.dev_attr.attr.mode = S_IRUGO;
+ tdata->sd_attr.dev_attr.show = show_sensor;
+
+ tdata->sd_attr.index = tdata->sensor_index;
+ err = device_create_file(dev, &tdata->sd_attr.dev_attr);
+
+ return err;
+}
+
+static int create_name_attr(struct sensor_data *pdata,
+ struct device *dev)
+{
+ sysfs_attr_init(&pdata->name_attr.attr);
+ pdata->name_attr.attr.name = "name";
+ pdata->name_attr.attr.mode = S_IRUGO;
+ pdata->name_attr.show = show_name;
+ return device_create_file(dev, &pdata->name_attr);
+}
+
+static int create_platform_device(enum sensors sensor_type,
+ struct platform_device **pdev)
+{
+ struct pdev_entry *pdev_entry = NULL;
+ int err;
+
+ *pdev = platform_device_alloc(sensor_names[sensor_type].name,
+ sensor_type);
+ if (!*pdev) {
+ pr_err("Device allocation failed\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
+ if (!pdev_entry) {
+ pr_err("Device allocation failed\n");
+ err = -ENOMEM;
+ goto exit_device_put;
+ }
+
+ err = platform_device_add(*pdev);
+ if (err) {
+ pr_err("Device addition failed (%d)\n", err);
+ goto exit_device_free;
+ }
+
+ pdev_entry->pdev = *pdev;
+ pdev_entry->type = (*pdev)->id;
+
+ list_add_tail(&pdev_entry->list, &pdev_list);
+
+ return 0;
+exit_device_free:
+ kfree(pdev_entry);
+exit_device_put:
+ platform_device_put(*pdev);
+exit:
+ return err;
+}
+
+static int create_sensor_data(struct platform_device *pdev)
+{
+ struct sensor_data *pdata = NULL;
+ int err = 0;
+
+ pdata = kzalloc(sizeof(struct sensor_data), GFP_KERNEL);
+ if (!pdata) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = create_name_attr(pdata, &pdev->dev);
+ if (err)
+ goto exit_free;
+
+ pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(pdata->hwmon_dev)) {
+ err = PTR_ERR(pdata->hwmon_dev);
+ dev_err(&pdev->dev, "Class registration failed (%d)\n",
+ err);
+ goto exit_name;
+ }
+
+ INIT_LIST_HEAD(&pdata->sensor_list);
+ platform_set_drvdata(pdev, pdata);
+
+ return 0;
+
+exit_name:
+ device_remove_file(&pdev->dev, &pdata->name_attr);
+exit_free:
+ kfree(pdata);
+exit:
+ return err;
+}
+
+static void delete_sensor_attr(struct sensor_data *pdata)
+{
+ struct sensor_entry *s, *l;
+
+ list_for_each_entry_safe(s, l, &pdata->sensor_list, list) {
+ struct sensor_specific_data *tdata = s->sensor_data;
+ kfree(tdata);
+ list_del(&s->list);
+ kfree(s);
+ }
+}
+
+static int powernv_sensor_init(u32 sensor_id, const struct device_node *np,
+ enum sensors sensor_type, enum attributes attr_type,
+ u32 sensor_index)
+{
+ struct platform_device *pdev = powernv_sensor_get_pdev(sensor_type);
+ struct sensor_specific_data *tdata;
+ struct sensor_entry *sensor_entry;
+ struct sensor_data *pdata;
+ int err = 0;
+
+ if (!pdev) {
+ err = create_platform_device(sensor_type, &pdev);
+ if (err)
+ goto exit;
+
+ err = create_sensor_data(pdev);
+ if (err)
+ goto exit;
+ }
+
+ pdata = platform_get_drvdata(pdev);
+ if (!pdata) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ tdata = kzalloc(sizeof(struct sensor_specific_data), GFP_KERNEL);
+ if (!tdata) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ tdata->sensor_id = sensor_id;
+ tdata->sensor_index = sensor_index;
+
+ err = create_sensor_attr(tdata, &pdev->dev, sensor_type, attr_type);
+ if (err)
+ goto exit_free;
+
+ sensor_entry = kzalloc(sizeof(struct sensor_entry), GFP_KERNEL);
+ if (!sensor_entry) {
+ err = -ENOMEM;
+ goto exit_attr;
+ }
+
+ sensor_entry->sensor_data = tdata;
+
+ list_add_tail(&sensor_entry->list, &pdata->sensor_list);
+
+ return 0;
+exit_attr:
+ device_remove_file(&pdev->dev, &tdata->sd_attr.dev_attr);
+exit_free:
+ kfree(tdata);
+exit:
+ return err;
+}
+
+static void delete_unregister_sensors(void)
+{
+ struct pdev_entry *p, *n;
+
+ list_for_each_entry_safe(p, n, &pdev_list, list) {
+ struct sensor_data *pdata = platform_get_drvdata(p->pdev);
+ if (pdata) {
+ delete_sensor_attr(pdata);
+
+ hwmon_device_unregister(pdata->hwmon_dev);
+ kfree(pdata);
+ }
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+static int __init powernv_hwmon_init(void)
+{
+ struct device_node *opal, *np = NULL;
+ enum attributes attr_type;
+ enum sensors type;
+ const u32 *sensor_id;
+ u32 sensor_index;
+ int err;
+
+ opal = of_find_node_by_path("/ibm,opal/sensors");
+ if (!opal) {
+ pr_err("%s: Opal 'sensors' node not found\n", __func__);
+ return -ENXIO;
+ }
+
+ for_each_child_of_node(opal, np) {
+ if (np->name == NULL)
+ continue;
+
+ for (type = 0; type < MAX_SENSOR_TYPE; type++)
+ if (of_device_is_compatible(np,
+ sensor_names[type].compaible))
+ break;
+
+ if (type == MAX_SENSOR_TYPE)
+ continue;
+
+ if (get_sensor_attr_properties(np->name, type, &attr_type,
+ &sensor_index))
+ continue;
+
+ sensor_id = of_get_property(np, "sensor-id", NULL);
+ if (!sensor_id) {
+ pr_info("%s: %s doesn't have sensor-id\n", __func__,
+ np->name);
+ continue;
+ }
+
+ err = powernv_sensor_init(*sensor_id, np, type, attr_type,
+ sensor_index);
+ if (err) {
+ of_node_put(opal);
+ goto exit;
+ }
+ }
+ of_node_put(opal);
+
+ return 0;
+exit:
+ delete_unregister_sensors();
+ return err;
+
+}
+
+static void powernv_hwmon_exit(void)
+{
+ delete_unregister_sensors();
+}
+
+module_init(powernv_hwmon_init);
+module_exit(powernv_hwmon_exit);
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 708081b68c6f..9fbb1b1fdff3 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -31,6 +31,7 @@ struct iio_hwmon_state {
int num_channels;
struct device *hwmon_dev;
struct attribute_group attr_group;
+ const struct attribute_group *groups[2];
struct attribute **attrs;
};
@@ -56,19 +57,6 @@ static ssize_t iio_hwmon_read_val(struct device *dev,
return sprintf(buf, "%d\n", result);
}
-static ssize_t show_name(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- const char *name = "iio_hwmon";
-
- if (dev->of_node && dev->of_node->name)
- name = dev->of_node->name;
-
- return sprintf(buf, "%s\n", name);
-}
-
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-
static int iio_hwmon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -78,6 +66,10 @@ static int iio_hwmon_probe(struct platform_device *pdev)
int in_i = 1, temp_i = 1, curr_i = 1;
enum iio_chan_type type;
struct iio_channel *channels;
+ const char *name = "iio_hwmon";
+
+ if (dev->of_node && dev->of_node->name)
+ name = dev->of_node->name;
channels = iio_channel_get_all(dev);
if (IS_ERR(channels))
@@ -96,7 +88,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->num_channels++;
st->attrs = devm_kzalloc(dev,
- sizeof(*st->attrs) * (st->num_channels + 2),
+ sizeof(*st->attrs) * (st->num_channels + 1),
GFP_KERNEL);
if (st->attrs == NULL) {
ret = -ENOMEM;
@@ -144,22 +136,18 @@ static int iio_hwmon_probe(struct platform_device *pdev)
a->index = i;
st->attrs[i] = &a->dev_attr.attr;
}
- st->attrs[st->num_channels] = &dev_attr_name.attr;
- st->attr_group.attrs = st->attrs;
- platform_set_drvdata(pdev, st);
- ret = sysfs_create_group(&dev->kobj, &st->attr_group);
- if (ret < 0)
- goto error_release_channels;
- st->hwmon_dev = hwmon_device_register(dev);
+ st->attr_group.attrs = st->attrs;
+ st->groups[0] = &st->attr_group;
+ st->hwmon_dev = hwmon_device_register_with_groups(dev, name, st,
+ st->groups);
if (IS_ERR(st->hwmon_dev)) {
ret = PTR_ERR(st->hwmon_dev);
- goto error_remove_group;
+ goto error_release_channels;
}
+ platform_set_drvdata(pdev, st);
return 0;
-error_remove_group:
- sysfs_remove_group(&dev->kobj, &st->attr_group);
error_release_channels:
iio_channel_release_all(channels);
return ret;
@@ -170,7 +158,6 @@ static int iio_hwmon_remove(struct platform_device *pdev)
struct iio_hwmon_state *st = platform_get_drvdata(pdev);
hwmon_device_unregister(st->hwmon_dev);
- sysfs_remove_group(&pdev->dev.kobj, &st->attr_group);
iio_channel_release_all(st->channels);
return 0;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 29ffa27c60b8..70749fc15a4f 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -10,7 +10,8 @@
* This driver supports only the Environment Controller in the IT8705F and
* similar parts. The other devices are supported by different drivers.
*
- * Supports: IT8705F Super I/O chip w/LPC interface
+ * Supports: IT8603E Super I/O chip w/LPC interface
+ * IT8705F Super I/O chip w/LPC interface
* IT8712F Super I/O chip w/LPC interface
* IT8716F Super I/O chip w/LPC interface
* IT8718F Super I/O chip w/LPC interface
@@ -26,7 +27,7 @@
* Sis950 A clone of the IT8705F
*
* Copyright (C) 2001 Chris Gauthron
- * Copyright (C) 2005-2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2010 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -64,7 +65,7 @@
#define DRVNAME "it87"
enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8771,
- it8772, it8782, it8783 };
+ it8772, it8782, it8783, it8603 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
@@ -146,6 +147,7 @@ static inline void superio_exit(void)
#define IT8772E_DEVID 0x8772
#define IT8782F_DEVID 0x8782
#define IT8783E_DEVID 0x8783
+#define IT8306E_DEVID 0x8603
#define IT87_ACT_REG 0x30
#define IT87_BASE_REG 0x60
@@ -315,6 +317,12 @@ static const struct it87_devices it87_devices[] = {
| FEAT_TEMP_OLD_PECI,
.old_peci_mask = 0x4,
},
+ [it8603] = {
+ .name = "it8603",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+ .peci_mask = 0x07,
+ },
};
#define has_16bit_fans(data) ((data)->features & FEAT_16BIT_FANS)
@@ -361,7 +369,7 @@ struct it87_data {
unsigned long last_updated; /* In jiffies */
u16 in_scaled; /* Internal voltage sensors are scaled */
- u8 in[9][3]; /* [nr][0]=in, [1]=min, [2]=max */
+ u8 in[10][3]; /* [nr][0]=in, [1]=min, [2]=max */
u8 has_fan; /* Bitfield, fans enabled */
u16 fan[5][2]; /* Register values, [nr][0]=fan, [1]=min */
u8 has_temp; /* Bitfield, temp sensors enabled */
@@ -578,6 +586,7 @@ static SENSOR_DEVICE_ATTR_2(in7_max, S_IRUGO | S_IWUSR, show_in, set_in,
7, 2);
static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 8, 0);
+static SENSOR_DEVICE_ATTR_2(in9_input, S_IRUGO, show_in, NULL, 9, 0);
/* 3 temperatures */
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
@@ -734,7 +743,7 @@ static int pwm_mode(const struct it87_data *data, int nr)
{
int ctrl = data->fan_main_ctrl & (1 << nr);
- if (ctrl == 0) /* Full speed */
+ if (ctrl == 0 && data->type != it8603) /* Full speed */
return 0;
if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
return 2;
@@ -929,6 +938,10 @@ static ssize_t set_pwm_enable(struct device *dev,
return -EINVAL;
}
+ /* IT8603E does not have on/off mode */
+ if (val == 0 && data->type == it8603)
+ return -EINVAL;
+
mutex_lock(&data->update_lock);
if (val == 0) {
@@ -948,10 +961,13 @@ static ssize_t set_pwm_enable(struct device *dev,
else /* Automatic mode */
data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
- /* set SmartGuardian mode */
- data->fan_main_ctrl |= (1 << nr);
- it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
- data->fan_main_ctrl);
+
+ if (data->type != it8603) {
+ /* set SmartGuardian mode */
+ data->fan_main_ctrl |= (1 << nr);
+ it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
+ data->fan_main_ctrl);
+ }
}
mutex_unlock(&data->update_lock);
@@ -1415,6 +1431,8 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2);
+/* special AVCC3 IT8306E in9 */
+static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 0);
static ssize_t show_name(struct device *dev, struct device_attribute
*devattr, char *buf)
@@ -1424,7 +1442,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute
}
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static struct attribute *it87_attributes_in[9][5] = {
+static struct attribute *it87_attributes_in[10][5] = {
{
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
@@ -1476,9 +1494,12 @@ static struct attribute *it87_attributes_in[9][5] = {
}, {
&sensor_dev_attr_in8_input.dev_attr.attr,
NULL
+}, {
+ &sensor_dev_attr_in9_input.dev_attr.attr,
+ NULL
} };
-static const struct attribute_group it87_group_in[9] = {
+static const struct attribute_group it87_group_in[10] = {
{ .attrs = it87_attributes_in[0] },
{ .attrs = it87_attributes_in[1] },
{ .attrs = it87_attributes_in[2] },
@@ -1488,6 +1509,7 @@ static const struct attribute_group it87_group_in[9] = {
{ .attrs = it87_attributes_in[6] },
{ .attrs = it87_attributes_in[7] },
{ .attrs = it87_attributes_in[8] },
+ { .attrs = it87_attributes_in[9] },
};
static struct attribute *it87_attributes_temp[3][6] = {
@@ -1546,7 +1568,8 @@ static struct attribute *it87_attributes_in_beep[] = {
&sensor_dev_attr_in5_beep.dev_attr.attr,
&sensor_dev_attr_in6_beep.dev_attr.attr,
&sensor_dev_attr_in7_beep.dev_attr.attr,
- NULL
+ NULL,
+ NULL,
};
static struct attribute *it87_attributes_temp_beep[] = {
@@ -1685,6 +1708,7 @@ static struct attribute *it87_attributes_label[] = {
&sensor_dev_attr_in3_label.dev_attr.attr,
&sensor_dev_attr_in7_label.dev_attr.attr,
&sensor_dev_attr_in8_label.dev_attr.attr,
+ &sensor_dev_attr_in9_label.dev_attr.attr,
NULL
};
@@ -1742,6 +1766,9 @@ static int __init it87_find(unsigned short *address,
case IT8783E_DEVID:
sio_data->type = it8783;
break;
+ case IT8306E_DEVID:
+ sio_data->type = it8603;
+ break;
case 0xffff: /* No device at all */
goto exit;
default:
@@ -1763,11 +1790,16 @@ static int __init it87_find(unsigned short *address,
err = 0;
sio_data->revision = superio_inb(DEVREV) & 0x0f;
- pr_info("Found IT%04xF chip at 0x%x, revision %d\n",
- chip_type, *address, sio_data->revision);
+ pr_info("Found IT%04x%c chip at 0x%x, revision %d\n", chip_type,
+ chip_type == 0x8771 || chip_type == 0x8772 ||
+ chip_type == 0x8603 ? 'E' : 'F', *address,
+ sio_data->revision);
/* in8 (Vbat) is always internal */
sio_data->internal = (1 << 2);
+ /* Only the IT8603E has in9 */
+ if (sio_data->type != it8603)
+ sio_data->skip_in |= (1 << 9);
/* Read GPIO config and VID value from LDN 7 (GPIO) */
if (sio_data->type == it87) {
@@ -1844,7 +1876,38 @@ static int __init it87_find(unsigned short *address,
sio_data->internal |= (1 << 1);
sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ } else if (sio_data->type == it8603) {
+ int reg27, reg29;
+
+ sio_data->skip_vid = 1; /* No VID */
+ superio_select(GPIO);
+ reg27 = superio_inb(IT87_SIO_GPIO3_REG);
+
+ /* Check if fan3 is there or not */
+ if (reg27 & (1 << 6))
+ sio_data->skip_pwm |= (1 << 2);
+ if (reg27 & (1 << 7))
+ sio_data->skip_fan |= (1 << 2);
+
+ /* Check if fan2 is there or not */
+ reg29 = superio_inb(IT87_SIO_GPIO5_REG);
+ if (reg29 & (1 << 1))
+ sio_data->skip_pwm |= (1 << 1);
+ if (reg29 & (1 << 2))
+ sio_data->skip_fan |= (1 << 1);
+
+ sio_data->skip_in |= (1 << 5); /* No VIN5 */
+ sio_data->skip_in |= (1 << 6); /* No VIN6 */
+
+ /* no fan4 */
+ sio_data->skip_pwm |= (1 << 3);
+ sio_data->skip_fan |= (1 << 3);
+
+ sio_data->internal |= (1 << 1); /* in7 is VSB */
+ sio_data->internal |= (1 << 3); /* in9 is AVCC */
+
+ sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
} else {
int reg;
bool uart6;
@@ -1966,7 +2029,7 @@ static void it87_remove_files(struct device *dev)
int i;
sysfs_remove_group(&dev->kobj, &it87_group);
- for (i = 0; i < 9; i++) {
+ for (i = 0; i < 10; i++) {
if (sio_data->skip_in & (1 << i))
continue;
sysfs_remove_group(&dev->kobj, &it87_group_in[i]);
@@ -2080,6 +2143,8 @@ static int it87_probe(struct platform_device *pdev)
data->in_scaled |= (1 << 7); /* in7 is VSB */
if (sio_data->internal & (1 << 2))
data->in_scaled |= (1 << 8); /* in8 is Vbat */
+ if (sio_data->internal & (1 << 3))
+ data->in_scaled |= (1 << 9); /* in9 is AVCC */
} else if (sio_data->type == it8782 || sio_data->type == it8783) {
if (sio_data->internal & (1 << 0))
data->in_scaled |= (1 << 3); /* in3 is VCC5V */
@@ -2102,7 +2167,7 @@ static int it87_probe(struct platform_device *pdev)
if (err)
return err;
- for (i = 0; i < 9; i++) {
+ for (i = 0; i < 10; i++) {
if (sio_data->skip_in & (1 << i))
continue;
err = sysfs_create_group(&dev->kobj, &it87_group_in[i]);
@@ -2202,7 +2267,7 @@ static int it87_probe(struct platform_device *pdev)
}
/* Export labels for internal sensors */
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < 4; i++) {
if (!(sio_data->internal & (1 << i)))
continue;
err = sysfs_create_file(&dev->kobj,
@@ -2383,8 +2448,9 @@ static void it87_init_device(struct platform_device *pdev)
}
data->has_fan = (data->fan_main_ctrl >> 4) & 0x07;
- /* Set tachometers to 16-bit mode if needed */
- if (has_16bit_fans(data)) {
+ /* Set tachometers to 16-bit mode if needed, IT8603E (and IT8728F?)
+ * has it by default */
+ if (has_16bit_fans(data) && data->type != it8603) {
tmp = it87_read_value(data, IT87_REG_FAN_16BIT);
if (~tmp & 0x07 & data->has_fan) {
dev_dbg(&pdev->dev,
@@ -2464,6 +2530,8 @@ static struct it87_data *it87_update_device(struct device *dev)
}
/* in8 (battery) has no limit registers */
data->in[8][0] = it87_read_value(data, IT87_REG_VIN(8));
+ if (data->type == it8603)
+ data->in[9][0] = it87_read_value(data, 0x2f);
for (i = 0; i < 5; i++) {
/* Skip disabled fans */
@@ -2620,7 +2688,7 @@ static void __exit sm_it87_exit(void)
}
-MODULE_AUTHOR("Chris Gauthron, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Chris Gauthron, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
module_param(update_vbat, bool, 0);
MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index a183e488db78..7488e36809c8 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -28,7 +28,6 @@
#include <linux/hwmon.h>
struct jz4740_hwmon {
- struct resource *mem;
void __iomem *base;
int irq;
@@ -106,6 +105,7 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
{
int ret;
struct jz4740_hwmon *hwmon;
+ struct resource *mem;
hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
@@ -120,25 +120,10 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
return hwmon->irq;
}
- hwmon->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!hwmon->mem) {
- dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
- return -ENOENT;
- }
-
- hwmon->mem = devm_request_mem_region(&pdev->dev, hwmon->mem->start,
- resource_size(hwmon->mem), pdev->name);
- if (!hwmon->mem) {
- dev_err(&pdev->dev, "Failed to request mmio memory region\n");
- return -EBUSY;
- }
-
- hwmon->base = devm_ioremap_nocache(&pdev->dev, hwmon->mem->start,
- resource_size(hwmon->mem));
- if (!hwmon->base) {
- dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
- return -EBUSY;
- }
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hwmon->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(hwmon->base))
+ return PTR_ERR(hwmon->base);
init_completion(&hwmon->read_completion);
mutex_init(&hwmon->lock);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index d0def50ea860..b4ad598feb6c 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1,7 +1,7 @@
/*
* lm63.c - driver for the National Semiconductor LM63 temperature sensor
* with integrated fan control
- * Copyright (C) 2004-2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2008 Jean Delvare <jdelvare@suse.de>
* Based on the lm90 driver.
*
* The LM63 is a sensor chip made by National Semiconductor. It measures
@@ -1202,6 +1202,6 @@ static struct i2c_driver lm63_driver = {
module_i2c_driver(lm63_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("LM63 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 7e3ef134f1d2..84a55eacd903 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -27,6 +27,8 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/thermal.h>
#include "lm75.h"
@@ -71,6 +73,7 @@ static const u8 LM75_REG_TEMP[3] = {
/* Each client has this additional data */
struct lm75_data {
struct device *hwmon_dev;
+ struct thermal_zone_device *tz;
struct mutex update_lock;
u8 orig_conf;
u8 resolution; /* In bits, between 9 and 12 */
@@ -91,22 +94,36 @@ static struct lm75_data *lm75_update_device(struct device *dev);
/*-----------------------------------------------------------------------*/
+static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
+{
+ return ((temp >> (16 - resolution)) * 1000) >> (resolution - 8);
+}
+
/* sysfs attributes for hwmon */
+static int lm75_read_temp(void *dev, long *temp)
+{
+ struct lm75_data *data = lm75_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ *temp = lm75_reg_to_mc(data->temp[0], data->resolution);
+
+ return 0;
+}
+
static ssize_t show_temp(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm75_data *data = lm75_update_device(dev);
- long temp;
if (IS_ERR(data))
return PTR_ERR(data);
- temp = ((data->temp[attr->index] >> (16 - data->resolution)) * 1000)
- >> (data->resolution - 8);
-
- return sprintf(buf, "%ld\n", temp);
+ return sprintf(buf, "%ld\n", lm75_reg_to_mc(data->temp[attr->index],
+ data->resolution));
}
static ssize_t set_temp(struct device *dev, struct device_attribute *da,
@@ -273,6 +290,13 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
goto exit_remove;
}
+ data->tz = thermal_zone_of_sensor_register(&client->dev,
+ 0,
+ &client->dev,
+ lm75_read_temp, NULL);
+ if (IS_ERR(data->tz))
+ data->tz = NULL;
+
dev_info(&client->dev, "%s: sensor '%s'\n",
dev_name(data->hwmon_dev), client->name);
@@ -287,6 +311,7 @@ static int lm75_remove(struct i2c_client *client)
{
struct lm75_data *data = i2c_get_clientdata(client);
+ thermal_zone_of_sensor_unregister(&client->dev, data->tz);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm75_group);
lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index a2f3b4a365e4..9efadfc851bc 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -2,7 +2,7 @@
* lm78.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
* Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
- * Copyright (c) 2007, 2011 Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007, 2011 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1108,7 +1108,7 @@ static void __exit sm_lm78_exit(void)
i2c_del_driver(&lm78_driver);
}
-MODULE_AUTHOR("Frodo Looijaard, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Frodo Looijaard, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("LM78/LM79 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index e998034f1f11..abd270243ba7 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -1,7 +1,7 @@
/*
* lm83.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
- * Copyright (C) 2003-2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009 Jean Delvare <jdelvare@suse.de>
*
* Heavily inspired from the lm78, lm75 and adm1021 drivers. The LM83 is
* a sensor chip made by National Semiconductor. It reports up to four
@@ -427,6 +427,6 @@ static struct lm83_data *lm83_update_device(struct device *dev)
module_i2c_driver(lm83_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("LM83 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 3894c408fda3..bed4af358308 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -5,7 +5,7 @@
* Copyright (c) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com>
* Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de>
* Copyright (c) 2004 Justin Thiessen <jthiessen@penguincomputing.com>
- * Copyright (C) 2007--2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2007--2009 Jean Delvare <jdelvare@suse.de>
*
* Chip details at <http://www.national.com/ds/LM/LM85.pdf>
*
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 333092ce2465..4c5f20231c1a 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -5,7 +5,7 @@
* Philip Edelbrock <phil@netroedge.com>
* Stephen Rousset <stephen.rousset@rocketlogix.com>
* Dan Eaton <dan.eaton@rocketlogix.com>
- * Copyright (C) 2004-2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2008 Jean Delvare <jdelvare@suse.de>
*
* Original port to Linux 2.6 by Jeff Oliver.
*
@@ -1011,6 +1011,6 @@ static struct i2c_driver lm87_driver = {
module_i2c_driver(lm87_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org> and others");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de> and others");
MODULE_DESCRIPTION("LM87 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 8b8f3aa49726..701e952ae523 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1,7 +1,7 @@
/*
* lm90.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
- * Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
*
* Based on the lm83 driver. The LM90 is a sensor chip made by National
* Semiconductor. It reports up to two temperatures (its own plus up to
@@ -1679,6 +1679,6 @@ static struct i2c_driver lm90_driver = {
module_i2c_driver(lm90_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("LM90/ADM1032 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 71626f3c8742..9d0e87a4f0cb 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -1,6 +1,6 @@
/*
* lm92 - Hardware monitoring driver
- * Copyright (C) 2005-2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2008 Jean Delvare <jdelvare@suse.de>
*
* Based on the lm90 driver, with some ideas taken from the lm_sensors
* lm92 driver as well.
@@ -440,6 +440,6 @@ static struct i2c_driver lm92_driver = {
module_i2c_driver(lm92_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("LM92/MAX6635 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index a6f46058b1be..6f1c6c0dbaf5 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -12,7 +12,7 @@
* Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de>
*
* derived in part from w83l785ts.c:
- * Copyright (c) 2003-2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2003-2004 Jean Delvare <jdelvare@suse.de>
*
* Ported to Linux 2.6 by Eric J. Bowersox <ericb@aspsys.com>
* Copyright (c) 2005 Aspen Systems, Inc.
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 4b68fb2a31d7..cdf19adaec79 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -89,7 +89,7 @@ static const u8 lm95241_reg_address[] = {
/* Client data (each client gets its own) */
struct lm95241_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
unsigned long last_updated, interval; /* in jiffies */
char valid; /* zero until following fields are valid */
@@ -113,8 +113,8 @@ static int temp_from_reg_unsigned(u8 val_h, u8 val_l)
static struct lm95241_data *lm95241_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
mutex_lock(&data->update_lock);
@@ -122,7 +122,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
!data->valid) {
int i;
- dev_dbg(&client->dev, "Updating lm95241 data.\n");
+ dev_dbg(dev, "Updating lm95241 data.\n");
for (i = 0; i < ARRAY_SIZE(lm95241_reg_address); i++)
data->temp[i]
= i2c_smbus_read_byte_data(client,
@@ -153,8 +153,7 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr,
static ssize_t show_type(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE - 1,
data->model & to_sensor_dev_attr(attr)->index ? "1\n" : "2\n");
@@ -163,8 +162,8 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr,
static ssize_t set_type(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int shift;
u8 mask = to_sensor_dev_attr(attr)->index;
@@ -201,8 +200,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
static ssize_t show_min(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE - 1,
data->config & to_sensor_dev_attr(attr)->index ?
@@ -212,8 +210,7 @@ static ssize_t show_min(struct device *dev, struct device_attribute *attr,
static ssize_t set_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
long val;
if (kstrtol(buf, 10, &val) < 0)
@@ -229,7 +226,8 @@ static ssize_t set_min(struct device *dev, struct device_attribute *attr,
data->config &= ~to_sensor_dev_attr(attr)->index;
data->valid = 0;
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
+ i2c_smbus_write_byte_data(data->client, LM95241_REG_RW_CONFIG,
+ data->config);
mutex_unlock(&data->update_lock);
@@ -239,8 +237,7 @@ static ssize_t set_min(struct device *dev, struct device_attribute *attr,
static ssize_t show_max(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE - 1,
data->config & to_sensor_dev_attr(attr)->index ?
@@ -250,8 +247,7 @@ static ssize_t show_max(struct device *dev, struct device_attribute *attr,
static ssize_t set_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
long val;
if (kstrtol(buf, 10, &val) < 0)
@@ -267,7 +263,8 @@ static ssize_t set_max(struct device *dev, struct device_attribute *attr,
data->config &= ~to_sensor_dev_attr(attr)->index;
data->valid = 0;
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
+ i2c_smbus_write_byte_data(data->client, LM95241_REG_RW_CONFIG,
+ data->config);
mutex_unlock(&data->update_lock);
@@ -286,8 +283,7 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
+ struct lm95241_data *data = dev_get_drvdata(dev);
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
@@ -316,7 +312,7 @@ static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max, set_max,
static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
set_interval);
-static struct attribute *lm95241_attributes[] = {
+static struct attribute *lm95241_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
@@ -329,10 +325,7 @@ static struct attribute *lm95241_attributes[] = {
&dev_attr_update_interval.attr,
NULL
};
-
-static const struct attribute_group lm95241_group = {
- .attrs = lm95241_attributes,
-};
+ATTRIBUTE_GROUPS(lm95241);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm95241_detect(struct i2c_client *new_client,
@@ -366,14 +359,11 @@ static int lm95241_detect(struct i2c_client *new_client,
return 0;
}
-static void lm95241_init_client(struct i2c_client *client)
+static void lm95241_init_client(struct i2c_client *client,
+ struct lm95241_data *data)
{
- struct lm95241_data *data = i2c_get_clientdata(client);
-
data->interval = HZ; /* 1 sec default */
- data->valid = 0;
data->config = CFG_CR0076;
- data->model = 0;
data->trutherm = (TT_OFF << TT1_SHIFT) | (TT_OFF << TT2_SHIFT);
i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
@@ -385,49 +375,27 @@ static void lm95241_init_client(struct i2c_client *client)
data->model);
}
-static int lm95241_probe(struct i2c_client *new_client,
+static int lm95241_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct lm95241_data *data;
- int err;
+ struct device *hwmon_dev;
- data = devm_kzalloc(&new_client->dev, sizeof(struct lm95241_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm95241_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(new_client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the LM95241 chip */
- lm95241_init_client(new_client);
+ lm95241_init_client(client, data);
- /* Register sysfs hooks */
- err = sysfs_create_group(&new_client->dev.kobj, &lm95241_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
-exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &lm95241_group);
- return err;
-}
-
-static int lm95241_remove(struct i2c_client *client)
-{
- struct lm95241_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm95241_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ lm95241_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/* Driver data (common to all clients) */
@@ -444,7 +412,6 @@ static struct i2c_driver lm95241_driver = {
.name = DEVNAME,
},
.probe = lm95241_probe,
- .remove = lm95241_remove,
.id_table = lm95241_id,
.detect = lm95241_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index a6c85f0ff8f3..0ae0dfdafdff 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -115,7 +115,7 @@ static const u8 lm95245_reg_address[] = {
/* Client data (each client gets its own) */
struct lm95245_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
unsigned long last_updated; /* in jiffies */
unsigned long interval; /* in msecs */
@@ -140,8 +140,8 @@ static int temp_from_reg_signed(u8 val_h, u8 val_l)
static struct lm95245_data *lm95245_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
mutex_lock(&data->update_lock);
@@ -149,7 +149,6 @@ static struct lm95245_data *lm95245_update_device(struct device *dev)
+ msecs_to_jiffies(data->interval)) || !data->valid) {
int i;
- dev_dbg(&client->dev, "Updating lm95245 data.\n");
for (i = 0; i < ARRAY_SIZE(lm95245_reg_address); i++)
data->regs[i]
= i2c_smbus_read_byte_data(client,
@@ -249,9 +248,9 @@ static ssize_t show_limit(struct device *dev, struct device_attribute *attr,
static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
+ struct i2c_client *client = data->client;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
@@ -272,27 +271,38 @@ static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
return count;
}
+static ssize_t show_crit_hyst(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95245_data *data = lm95245_update_device(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ int hyst = data->regs[index] - data->regs[8];
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", hyst * 1000);
+}
+
static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ struct i2c_client *client = data->client;
unsigned long val;
+ int hyst, limit;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
- val /= 1000;
-
- val = clamp_val(val, 0, 31);
-
mutex_lock(&data->update_lock);
- data->valid = 0;
+ limit = i2c_smbus_read_byte_data(client, lm95245_reg_address[index]);
+ hyst = limit - val / 1000;
+ hyst = clamp_val(hyst, 0, 31);
+ data->regs[8] = hyst;
/* shared crit hysteresis */
i2c_smbus_write_byte_data(client, LM95245_REG_RW_COMMON_HYSTERESIS,
- val);
+ hyst);
mutex_unlock(&data->update_lock);
@@ -302,8 +312,7 @@ static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
static ssize_t show_type(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE - 1,
data->config2 & CFG2_REMOTE_TT ? "1\n" : "2\n");
@@ -312,8 +321,8 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr,
static ssize_t set_type(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
@@ -359,8 +368,8 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95245_data *data = i2c_get_clientdata(client);
+ struct lm95245_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
@@ -378,16 +387,15 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_limit,
set_limit, 6);
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
- set_crit_hyst, 8);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_crit_hyst,
+ set_crit_hyst, 6);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL,
STATUS1_LOC);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_limit,
set_limit, 7);
-static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
- set_crit_hyst, 8);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_crit_hyst, NULL, 7);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL,
STATUS1_RTCRIT);
static SENSOR_DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type,
@@ -398,7 +406,7 @@ static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL,
static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
set_interval);
-static struct attribute *lm95245_attributes[] = {
+static struct attribute *lm95245_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
@@ -412,10 +420,7 @@ static struct attribute *lm95245_attributes[] = {
&dev_attr_update_interval.attr,
NULL
};
-
-static const struct attribute_group lm95245_group = {
- .attrs = lm95245_attributes,
-};
+ATTRIBUTE_GROUPS(lm95245);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm95245_detect(struct i2c_client *new_client,
@@ -436,11 +441,9 @@ static int lm95245_detect(struct i2c_client *new_client,
return 0;
}
-static void lm95245_init_client(struct i2c_client *client)
+static void lm95245_init_client(struct i2c_client *client,
+ struct lm95245_data *data)
{
- struct lm95245_data *data = i2c_get_clientdata(client);
-
- data->valid = 0;
data->interval = lm95245_read_conversion_rate(client);
data->config1 = i2c_smbus_read_byte_data(client,
@@ -456,49 +459,27 @@ static void lm95245_init_client(struct i2c_client *client)
}
}
-static int lm95245_probe(struct i2c_client *new_client,
+static int lm95245_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct lm95245_data *data;
- int err;
+ struct device *hwmon_dev;
- data = devm_kzalloc(&new_client->dev, sizeof(struct lm95245_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct lm95245_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(new_client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the LM95245 chip */
- lm95245_init_client(new_client);
-
- /* Register sysfs hooks */
- err = sysfs_create_group(&new_client->dev.kobj, &lm95245_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
-exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &lm95245_group);
- return err;
-}
+ lm95245_init_client(client, data);
-static int lm95245_remove(struct i2c_client *client)
-{
- struct lm95245_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm95245_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ lm95245_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
/* Driver data (common to all clients) */
@@ -514,7 +495,6 @@ static struct i2c_driver lm95245_driver = {
.name = DEVNAME,
},
.probe = lm95245_probe,
- .remove = lm95245_remove,
.id_table = lm95245_id,
.detect = lm95245_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c
new file mode 100644
index 000000000000..c104cc32989d
--- /dev/null
+++ b/drivers/hwmon/ltc2945.c
@@ -0,0 +1,519 @@
+/*
+ * Driver for Linear Technology LTC2945 I2C Power Monitor
+ *
+ * Copyright (c) 2014 Guenter Roeck
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+
+/* chip registers */
+#define LTC2945_CONTROL 0x00
+#define LTC2945_ALERT 0x01
+#define LTC2945_STATUS 0x02
+#define LTC2945_FAULT 0x03
+#define LTC2945_POWER_H 0x05
+#define LTC2945_MAX_POWER_H 0x08
+#define LTC2945_MIN_POWER_H 0x0b
+#define LTC2945_MAX_POWER_THRES_H 0x0e
+#define LTC2945_MIN_POWER_THRES_H 0x11
+#define LTC2945_SENSE_H 0x14
+#define LTC2945_MAX_SENSE_H 0x16
+#define LTC2945_MIN_SENSE_H 0x18
+#define LTC2945_MAX_SENSE_THRES_H 0x1a
+#define LTC2945_MIN_SENSE_THRES_H 0x1c
+#define LTC2945_VIN_H 0x1e
+#define LTC2945_MAX_VIN_H 0x20
+#define LTC2945_MIN_VIN_H 0x22
+#define LTC2945_MAX_VIN_THRES_H 0x24
+#define LTC2945_MIN_VIN_THRES_H 0x26
+#define LTC2945_ADIN_H 0x28
+#define LTC2945_MAX_ADIN_H 0x2a
+#define LTC2945_MIN_ADIN_H 0x2c
+#define LTC2945_MAX_ADIN_THRES_H 0x2e
+#define LTC2945_MIN_ADIN_THRES_H 0x30
+#define LTC2945_MIN_ADIN_THRES_L 0x31
+
+/* Fault register bits */
+
+#define FAULT_ADIN_UV (1 << 0)
+#define FAULT_ADIN_OV (1 << 1)
+#define FAULT_VIN_UV (1 << 2)
+#define FAULT_VIN_OV (1 << 3)
+#define FAULT_SENSE_UV (1 << 4)
+#define FAULT_SENSE_OV (1 << 5)
+#define FAULT_POWER_UV (1 << 6)
+#define FAULT_POWER_OV (1 << 7)
+
+/* Control register bits */
+
+#define CONTROL_MULT_SELECT (1 << 0)
+#define CONTROL_TEST_MODE (1 << 4)
+
+static inline bool is_power_reg(u8 reg)
+{
+ return reg < LTC2945_SENSE_H;
+}
+
+/* Return the value from the given register in uW, mV, or mA */
+static long long ltc2945_reg_to_val(struct device *dev, u8 reg)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int control;
+ u8 buf[3];
+ long long val;
+ int ret;
+
+ ret = regmap_bulk_read(regmap, reg, buf,
+ is_power_reg(reg) ? 3 : 2);
+ if (ret < 0)
+ return ret;
+
+ if (is_power_reg(reg)) {
+ /* power */
+ val = (buf[0] << 16) + (buf[1] << 8) + buf[2];
+ } else {
+ /* current, voltage */
+ val = (buf[0] << 4) + (buf[1] >> 4);
+ }
+
+ switch (reg) {
+ case LTC2945_POWER_H:
+ case LTC2945_MAX_POWER_H:
+ case LTC2945_MIN_POWER_H:
+ case LTC2945_MAX_POWER_THRES_H:
+ case LTC2945_MIN_POWER_THRES_H:
+ /*
+ * Convert to uW by assuming current is measured with
+ * an 1mOhm sense resistor, similar to current
+ * measurements.
+ * Control register bit 0 selects if voltage at SENSE+/VDD
+ * or voltage at ADIN is used to measure power.
+ */
+ ret = regmap_read(regmap, LTC2945_CONTROL, &control);
+ if (ret < 0)
+ return ret;
+ if (control & CONTROL_MULT_SELECT) {
+ /* 25 mV * 25 uV = 0.625 uV resolution. */
+ val *= 625LL;
+ } else {
+ /* 0.5 mV * 25 uV = 0.0125 uV resolution. */
+ val = (val * 25LL) >> 1;
+ }
+ break;
+ case LTC2945_VIN_H:
+ case LTC2945_MAX_VIN_H:
+ case LTC2945_MIN_VIN_H:
+ case LTC2945_MAX_VIN_THRES_H:
+ case LTC2945_MIN_VIN_THRES_H:
+ /* 25 mV resolution. Convert to mV. */
+ val *= 25;
+ break;
+ case LTC2945_ADIN_H:
+ case LTC2945_MAX_ADIN_H:
+ case LTC2945_MIN_ADIN_THRES_H:
+ case LTC2945_MAX_ADIN_THRES_H:
+ case LTC2945_MIN_ADIN_H:
+ /* 0.5mV resolution. Convert to mV. */
+ val = val >> 1;
+ break;
+ case LTC2945_SENSE_H:
+ case LTC2945_MAX_SENSE_H:
+ case LTC2945_MIN_SENSE_H:
+ case LTC2945_MAX_SENSE_THRES_H:
+ case LTC2945_MIN_SENSE_THRES_H:
+ /*
+ * 25 uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA. If a different sense
+ * resistor is installed, calculate the actual current by
+ * dividing the reported current by the sense resistor value
+ * in mOhm.
+ */
+ val *= 25;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return val;
+}
+
+static int ltc2945_val_to_reg(struct device *dev, u8 reg,
+ unsigned long val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int control;
+ int ret;
+
+ switch (reg) {
+ case LTC2945_POWER_H:
+ case LTC2945_MAX_POWER_H:
+ case LTC2945_MIN_POWER_H:
+ case LTC2945_MAX_POWER_THRES_H:
+ case LTC2945_MIN_POWER_THRES_H:
+ /*
+ * Convert to register value by assuming current is measured
+ * with an 1mOhm sense resistor, similar to current
+ * measurements.
+ * Control register bit 0 selects if voltage at SENSE+/VDD
+ * or voltage at ADIN is used to measure power, which in turn
+ * determines register calculations.
+ */
+ ret = regmap_read(regmap, LTC2945_CONTROL, &control);
+ if (ret < 0)
+ return ret;
+ if (control & CONTROL_MULT_SELECT) {
+ /* 25 mV * 25 uV = 0.625 uV resolution. */
+ val = DIV_ROUND_CLOSEST(val, 625);
+ } else {
+ /*
+ * 0.5 mV * 25 uV = 0.0125 uV resolution.
+ * Divide first to avoid overflow;
+ * accept loss of accuracy.
+ */
+ val = DIV_ROUND_CLOSEST(val, 25) * 2;
+ }
+ break;
+ case LTC2945_VIN_H:
+ case LTC2945_MAX_VIN_H:
+ case LTC2945_MIN_VIN_H:
+ case LTC2945_MAX_VIN_THRES_H:
+ case LTC2945_MIN_VIN_THRES_H:
+ /* 25 mV resolution. */
+ val /= 25;
+ break;
+ case LTC2945_ADIN_H:
+ case LTC2945_MAX_ADIN_H:
+ case LTC2945_MIN_ADIN_THRES_H:
+ case LTC2945_MAX_ADIN_THRES_H:
+ case LTC2945_MIN_ADIN_H:
+ /* 0.5mV resolution. */
+ val *= 2;
+ break;
+ case LTC2945_SENSE_H:
+ case LTC2945_MAX_SENSE_H:
+ case LTC2945_MIN_SENSE_H:
+ case LTC2945_MAX_SENSE_THRES_H:
+ case LTC2945_MIN_SENSE_THRES_H:
+ /*
+ * 25 uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA. If a different sense
+ * resistor is installed, calculate the actual current by
+ * dividing the reported current by the sense resistor value
+ * in mOhm.
+ */
+ val = DIV_ROUND_CLOSEST(val, 25);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return val;
+}
+
+static ssize_t ltc2945_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ long long value;
+
+ value = ltc2945_reg_to_val(dev, attr->index);
+ if (value < 0)
+ return value;
+ return snprintf(buf, PAGE_SIZE, "%lld\n", value);
+}
+
+static ssize_t ltc2945_set_value(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u8 reg = attr->index;
+ unsigned long val;
+ u8 regbuf[3];
+ int num_regs;
+ int regval;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ /* convert to register value, then clamp and write result */
+ regval = ltc2945_val_to_reg(dev, reg, val);
+ if (is_power_reg(reg)) {
+ regval = clamp_val(regval, 0, 0xffffff);
+ regbuf[0] = regval >> 16;
+ regbuf[1] = (regval >> 8) & 0xff;
+ regbuf[2] = regval;
+ num_regs = 3;
+ } else {
+ regval = clamp_val(regval, 0, 0xfff) << 4;
+ regbuf[0] = regval >> 8;
+ regbuf[1] = regval & 0xff;
+ num_regs = 2;
+ }
+ ret = regmap_bulk_write(regmap, reg, regbuf, num_regs);
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t ltc2945_reset_history(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u8 reg = attr->index;
+ int num_regs = is_power_reg(reg) ? 3 : 2;
+ u8 buf_min[3] = { 0xff, 0xff, 0xff };
+ u8 buf_max[3] = { 0, 0, 0 };
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+ if (val != 1)
+ return -EINVAL;
+
+ ret = regmap_update_bits(regmap, LTC2945_CONTROL, CONTROL_TEST_MODE,
+ CONTROL_TEST_MODE);
+
+ /* Reset minimum */
+ ret = regmap_bulk_write(regmap, reg, buf_min, num_regs);
+ if (ret)
+ return ret;
+
+ switch (reg) {
+ case LTC2945_MIN_POWER_H:
+ reg = LTC2945_MAX_POWER_H;
+ break;
+ case LTC2945_MIN_SENSE_H:
+ reg = LTC2945_MAX_SENSE_H;
+ break;
+ case LTC2945_MIN_VIN_H:
+ reg = LTC2945_MAX_VIN_H;
+ break;
+ case LTC2945_MIN_ADIN_H:
+ reg = LTC2945_MAX_ADIN_H;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ /* Reset maximum */
+ ret = regmap_bulk_write(regmap, reg, buf_max, num_regs);
+
+ /* Try resetting test mode even if there was an error */
+ regmap_update_bits(regmap, LTC2945_CONTROL, CONTROL_TEST_MODE, 0);
+
+ return ret ? : count;
+}
+
+static ssize_t ltc2945_show_bool(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int fault;
+ int ret;
+
+ ret = regmap_read(regmap, LTC2945_FAULT, &fault);
+ if (ret < 0)
+ return ret;
+
+ fault &= attr->index;
+ if (fault) /* Clear reported faults in chip register */
+ regmap_update_bits(regmap, LTC2945_FAULT, attr->index, 0);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", !!fault);
+}
+
+/* Input voltages */
+
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_VIN_H);
+static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MIN_VIN_THRES_H);
+static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MAX_VIN_THRES_H);
+static SENSOR_DEVICE_ATTR(in1_lowest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MIN_VIN_H);
+static SENSOR_DEVICE_ATTR(in1_highest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MAX_VIN_H);
+static SENSOR_DEVICE_ATTR(in1_reset_history, S_IWUSR, NULL,
+ ltc2945_reset_history, LTC2945_MIN_VIN_H);
+
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_ADIN_H);
+static SENSOR_DEVICE_ATTR(in2_min, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MIN_ADIN_THRES_H);
+static SENSOR_DEVICE_ATTR(in2_max, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MAX_ADIN_THRES_H);
+static SENSOR_DEVICE_ATTR(in2_lowest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MIN_ADIN_H);
+static SENSOR_DEVICE_ATTR(in2_highest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MAX_ADIN_H);
+static SENSOR_DEVICE_ATTR(in2_reset_history, S_IWUSR, NULL,
+ ltc2945_reset_history, LTC2945_MIN_ADIN_H);
+
+/* Voltage alarms */
+
+static SENSOR_DEVICE_ATTR(in1_min_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_VIN_UV);
+static SENSOR_DEVICE_ATTR(in1_max_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_VIN_OV);
+static SENSOR_DEVICE_ATTR(in2_min_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_ADIN_UV);
+static SENSOR_DEVICE_ATTR(in2_max_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_ADIN_OV);
+
+/* Currents (via sense resistor) */
+
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_SENSE_H);
+static SENSOR_DEVICE_ATTR(curr1_min, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MIN_SENSE_THRES_H);
+static SENSOR_DEVICE_ATTR(curr1_max, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MAX_SENSE_THRES_H);
+static SENSOR_DEVICE_ATTR(curr1_lowest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MIN_SENSE_H);
+static SENSOR_DEVICE_ATTR(curr1_highest, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_MAX_SENSE_H);
+static SENSOR_DEVICE_ATTR(curr1_reset_history, S_IWUSR, NULL,
+ ltc2945_reset_history, LTC2945_MIN_SENSE_H);
+
+/* Current alarms */
+
+static SENSOR_DEVICE_ATTR(curr1_min_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_SENSE_UV);
+static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_SENSE_OV);
+
+/* Power */
+
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ltc2945_show_value, NULL,
+ LTC2945_POWER_H);
+static SENSOR_DEVICE_ATTR(power1_min, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MIN_POWER_THRES_H);
+static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO | S_IWUSR, ltc2945_show_value,
+ ltc2945_set_value, LTC2945_MAX_POWER_THRES_H);
+static SENSOR_DEVICE_ATTR(power1_input_lowest, S_IRUGO, ltc2945_show_value,
+ NULL, LTC2945_MIN_POWER_H);
+static SENSOR_DEVICE_ATTR(power1_input_highest, S_IRUGO, ltc2945_show_value,
+ NULL, LTC2945_MAX_POWER_H);
+static SENSOR_DEVICE_ATTR(power1_reset_history, S_IWUSR, NULL,
+ ltc2945_reset_history, LTC2945_MIN_POWER_H);
+
+/* Power alarms */
+
+static SENSOR_DEVICE_ATTR(power1_min_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_POWER_UV);
+static SENSOR_DEVICE_ATTR(power1_max_alarm, S_IRUGO, ltc2945_show_bool, NULL,
+ FAULT_POWER_OV);
+
+static struct attribute *ltc2945_attrs[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in1_lowest.dev_attr.attr,
+ &sensor_dev_attr_in1_highest.dev_attr.attr,
+ &sensor_dev_attr_in1_reset_history.dev_attr.attr,
+ &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_min.dev_attr.attr,
+ &sensor_dev_attr_in2_max.dev_attr.attr,
+ &sensor_dev_attr_in2_lowest.dev_attr.attr,
+ &sensor_dev_attr_in2_highest.dev_attr.attr,
+ &sensor_dev_attr_in2_reset_history.dev_attr.attr,
+ &sensor_dev_attr_in2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_max_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_min.dev_attr.attr,
+ &sensor_dev_attr_curr1_max.dev_attr.attr,
+ &sensor_dev_attr_curr1_lowest.dev_attr.attr,
+ &sensor_dev_attr_curr1_highest.dev_attr.attr,
+ &sensor_dev_attr_curr1_reset_history.dev_attr.attr,
+ &sensor_dev_attr_curr1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_power1_input.dev_attr.attr,
+ &sensor_dev_attr_power1_min.dev_attr.attr,
+ &sensor_dev_attr_power1_max.dev_attr.attr,
+ &sensor_dev_attr_power1_input_lowest.dev_attr.attr,
+ &sensor_dev_attr_power1_input_highest.dev_attr.attr,
+ &sensor_dev_attr_power1_reset_history.dev_attr.attr,
+ &sensor_dev_attr_power1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_power1_max_alarm.dev_attr.attr,
+
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc2945);
+
+static struct regmap_config ltc2945_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LTC2945_MIN_ADIN_THRES_L,
+};
+
+static int ltc2945_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &ltc2945_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Clear faults */
+ regmap_write(regmap, LTC2945_FAULT, 0x00);
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ regmap,
+ ltc2945_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id ltc2945_id[] = {
+ {"ltc2945", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, ltc2945_id);
+
+static struct i2c_driver ltc2945_driver = {
+ .driver = {
+ .name = "ltc2945",
+ },
+ .probe = ltc2945_probe,
+ .id_table = ltc2945_id,
+};
+
+module_i2c_driver(ltc2945_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("LTC2945 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index 8a142960d69e..c8a9bd9b050f 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -33,7 +33,7 @@ enum ltc4215_cmd {
};
struct ltc4215_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
bool valid;
@@ -45,8 +45,8 @@ struct ltc4215_data {
static struct ltc4215_data *ltc4215_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ltc4215_data *data = i2c_get_clientdata(client);
+ struct ltc4215_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
s32 val;
int i;
@@ -214,7 +214,7 @@ static SENSOR_DEVICE_ATTR(in2_min_alarm, S_IRUGO, ltc4215_show_alarm, NULL,
* Finally, construct an array of pointers to members of the above objects,
* as required for sysfs_create_group()
*/
-static struct attribute *ltc4215_attributes[] = {
+static struct attribute *ltc4215_attrs[] = {
&sensor_dev_attr_curr1_input.dev_attr.attr,
&sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
@@ -229,57 +229,33 @@ static struct attribute *ltc4215_attributes[] = {
NULL,
};
-
-static const struct attribute_group ltc4215_group = {
- .attrs = ltc4215_attributes,
-};
+ATTRIBUTE_GROUPS(ltc4215);
static int ltc4215_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
struct ltc4215_data *data;
- int ret;
+ struct device *hwmon_dev;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the LTC4215 chip */
i2c_smbus_write_byte_data(client, LTC4215_FAULT, 0x00);
- /* Register sysfs hooks */
- ret = sysfs_create_group(&client->dev.kobj, &ltc4215_group);
- if (ret)
- return ret;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto out_hwmon_device_register;
- }
-
- return 0;
-
-out_hwmon_device_register:
- sysfs_remove_group(&client->dev.kobj, &ltc4215_group);
- return ret;
-}
-
-static int ltc4215_remove(struct i2c_client *client)
-{
- struct ltc4215_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &ltc4215_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ ltc4215_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id ltc4215_id[] = {
@@ -294,7 +270,6 @@ static struct i2c_driver ltc4215_driver = {
.name = "ltc4215",
},
.probe = ltc4215_probe,
- .remove = ltc4215_remove,
.id_table = ltc4215_id,
};
diff --git a/drivers/hwmon/ltc4222.c b/drivers/hwmon/ltc4222.c
new file mode 100644
index 000000000000..07c25653659f
--- /dev/null
+++ b/drivers/hwmon/ltc4222.c
@@ -0,0 +1,237 @@
+/*
+ * Driver for Linear Technology LTC4222 Dual Hot Swap controller
+ *
+ * Copyright (c) 2014 Guenter Roeck
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+
+/* chip registers */
+
+#define LTC4222_CONTROL1 0xd0
+#define LTC4222_ALERT1 0xd1
+#define LTC4222_STATUS1 0xd2
+#define LTC4222_FAULT1 0xd3
+#define LTC4222_CONTROL2 0xd4
+#define LTC4222_ALERT2 0xd5
+#define LTC4222_STATUS2 0xd6
+#define LTC4222_FAULT2 0xd7
+#define LTC4222_SOURCE1 0xd8
+#define LTC4222_SOURCE2 0xda
+#define LTC4222_ADIN1 0xdc
+#define LTC4222_ADIN2 0xde
+#define LTC4222_SENSE1 0xe0
+#define LTC4222_SENSE2 0xe2
+#define LTC4222_ADC_CONTROL 0xe4
+
+/*
+ * Fault register bits
+ */
+#define FAULT_OV BIT(0)
+#define FAULT_UV BIT(1)
+#define FAULT_OC BIT(2)
+#define FAULT_POWER_BAD BIT(3)
+#define FAULT_FET_BAD BIT(5)
+
+/* Return the voltage from the given register in mV or mA */
+static int ltc4222_get_value(struct device *dev, u8 reg)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int val;
+ u8 buf[2];
+ int ret;
+
+ ret = regmap_bulk_read(regmap, reg, buf, 2);
+ if (ret < 0)
+ return ret;
+
+ val = ((buf[0] << 8) + buf[1]) >> 6;
+
+ switch (reg) {
+ case LTC4222_ADIN1:
+ case LTC4222_ADIN2:
+ /* 1.25 mV resolution. Convert to mV. */
+ val = DIV_ROUND_CLOSEST(val * 5, 4);
+ break;
+ case LTC4222_SOURCE1:
+ case LTC4222_SOURCE2:
+ /* 31.25 mV resolution. Convert to mV. */
+ val = DIV_ROUND_CLOSEST(val * 125, 4);
+ break;
+ case LTC4222_SENSE1:
+ case LTC4222_SENSE2:
+ /*
+ * 62.5 uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA. If a different sense
+ * resistor is installed, calculate the actual current by
+ * dividing the reported current by the sense resistor value
+ * in mOhm.
+ */
+ val = DIV_ROUND_CLOSEST(val * 125, 2);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return val;
+}
+
+static ssize_t ltc4222_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int value;
+
+ value = ltc4222_get_value(dev, attr->index);
+ if (value < 0)
+ return value;
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t ltc4222_show_bool(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int fault;
+ int ret;
+
+ ret = regmap_read(regmap, attr->nr, &fault);
+ if (ret < 0)
+ return ret;
+ fault &= attr->index;
+ if (fault) /* Clear reported faults in chip register */
+ regmap_update_bits(regmap, attr->nr, attr->index, 0);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", !!fault);
+}
+
+/* Voltages */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_SOURCE1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_ADIN1);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_SOURCE2);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_ADIN2);
+
+/*
+ * Voltage alarms
+ * UV/OV faults are associated with the input voltage, and power bad and fet
+ * faults are associated with the output voltage.
+ */
+static SENSOR_DEVICE_ATTR_2(in1_min_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT1, FAULT_UV);
+static SENSOR_DEVICE_ATTR_2(in1_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT1, FAULT_OV);
+static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT1, FAULT_POWER_BAD | FAULT_FET_BAD);
+
+static SENSOR_DEVICE_ATTR_2(in3_min_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT2, FAULT_UV);
+static SENSOR_DEVICE_ATTR_2(in3_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT2, FAULT_OV);
+static SENSOR_DEVICE_ATTR_2(in4_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT2, FAULT_POWER_BAD | FAULT_FET_BAD);
+
+/* Current (via sense resistor) */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_SENSE1);
+static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, ltc4222_show_value, NULL,
+ LTC4222_SENSE2);
+
+/* Overcurrent alarm */
+static SENSOR_DEVICE_ATTR_2(curr1_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT1, FAULT_OC);
+static SENSOR_DEVICE_ATTR_2(curr2_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
+ LTC4222_FAULT2, FAULT_OC);
+
+static struct attribute *ltc4222_attrs[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_alarm.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in4_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_curr2_input.dev_attr.attr,
+ &sensor_dev_attr_curr2_max_alarm.dev_attr.attr,
+
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc4222);
+
+static struct regmap_config ltc4222_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LTC4222_ADC_CONTROL,
+};
+
+static int ltc4222_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &ltc4222_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Clear faults */
+ regmap_write(regmap, LTC4222_FAULT1, 0x00);
+ regmap_write(regmap, LTC4222_FAULT2, 0x00);
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ regmap,
+ ltc4222_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id ltc4222_id[] = {
+ {"ltc4222", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, ltc4222_id);
+
+static struct i2c_driver ltc4222_driver = {
+ .driver = {
+ .name = "ltc4222",
+ },
+ .probe = ltc4222_probe,
+ .id_table = ltc4222_id,
+};
+
+module_i2c_driver(ltc4222_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("LTC4222 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index d4172933ce4f..681b5b7b3c3b 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -95,7 +95,6 @@ static void ltc4245_update_gpios(struct device *dev)
* readings as stale by setting them to -EAGAIN
*/
if (time_after(jiffies, data->last_updated + 5 * HZ)) {
- dev_dbg(&client->dev, "Marking GPIOs invalid\n");
for (i = 0; i < ARRAY_SIZE(data->gpios); i++)
data->gpios[i] = -EAGAIN;
}
@@ -141,8 +140,6 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- dev_dbg(&client->dev, "Starting ltc4245 update\n");
-
/* Read control registers -- 0x00 to 0x07 */
for (i = 0; i < ARRAY_SIZE(data->cregs); i++) {
val = i2c_smbus_read_byte_data(client, i);
@@ -470,19 +467,15 @@ static void ltc4245_sysfs_add_groups(struct ltc4245_data *data)
static bool ltc4245_use_extra_gpios(struct i2c_client *client)
{
struct ltc4245_platform_data *pdata = dev_get_platdata(&client->dev);
-#ifdef CONFIG_OF
struct device_node *np = client->dev.of_node;
-#endif
/* prefer platform data */
if (pdata)
return pdata->use_extra_gpios;
-#ifdef CONFIG_OF
/* fallback on OF */
if (of_find_property(np, "ltc4245,use-extra-gpios", NULL))
return true;
-#endif
return false;
}
@@ -512,24 +505,10 @@ static int ltc4245_probe(struct i2c_client *client,
/* Add sysfs hooks */
ltc4245_sysfs_add_groups(data);
- hwmon_dev = hwmon_device_register_with_groups(&client->dev,
- client->name, data,
- data->groups);
- if (IS_ERR(hwmon_dev))
- return PTR_ERR(hwmon_dev);
-
- i2c_set_clientdata(client, hwmon_dev);
-
- return 0;
-}
-
-static int ltc4245_remove(struct i2c_client *client)
-{
- struct device *hwmon_dev = i2c_get_clientdata(client);
-
- hwmon_device_unregister(hwmon_dev);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ data->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id ltc4245_id[] = {
@@ -544,7 +523,6 @@ static struct i2c_driver ltc4245_driver = {
.name = "ltc4245",
},
.probe = ltc4245_probe,
- .remove = ltc4245_remove,
.id_table = ltc4245_id,
};
diff --git a/drivers/hwmon/ltc4260.c b/drivers/hwmon/ltc4260.c
new file mode 100644
index 000000000000..453a250d9df5
--- /dev/null
+++ b/drivers/hwmon/ltc4260.c
@@ -0,0 +1,200 @@
+/*
+ * Driver for Linear Technology LTC4260 I2C Positive Voltage Hot Swap Controller
+ *
+ * Copyright (c) 2014 Guenter Roeck
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+
+/* chip registers */
+#define LTC4260_CONTROL 0x00
+#define LTC4260_ALERT 0x01
+#define LTC4260_STATUS 0x02
+#define LTC4260_FAULT 0x03
+#define LTC4260_SENSE 0x04
+#define LTC4260_SOURCE 0x05
+#define LTC4260_ADIN 0x06
+
+/*
+ * Fault register bits
+ */
+#define FAULT_OV (1 << 0)
+#define FAULT_UV (1 << 1)
+#define FAULT_OC (1 << 2)
+#define FAULT_POWER_BAD (1 << 3)
+#define FAULT_FET_SHORT (1 << 5)
+
+/* Return the voltage from the given register in mV or mA */
+static int ltc4260_get_value(struct device *dev, u8 reg)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case LTC4260_ADIN:
+ /* 10 mV resolution. Convert to mV. */
+ val = val * 10;
+ break;
+ case LTC4260_SOURCE:
+ /* 400 mV resolution. Convert to mV. */
+ val = val * 400;
+ break;
+ case LTC4260_SENSE:
+ /*
+ * 300 uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA. If a different sense
+ * resistor is installed, calculate the actual current by
+ * dividing the reported current by the sense resistor value
+ * in mOhm.
+ */
+ val = val * 300;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return val;
+}
+
+static ssize_t ltc4260_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int value;
+
+ value = ltc4260_get_value(dev, attr->index);
+ if (value < 0)
+ return value;
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t ltc4260_show_bool(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int fault;
+ int ret;
+
+ ret = regmap_read(regmap, LTC4260_FAULT, &fault);
+ if (ret < 0)
+ return ret;
+
+ fault &= attr->index;
+ if (fault) /* Clear reported faults in chip register */
+ regmap_update_bits(regmap, LTC4260_FAULT, attr->index, 0);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", !!fault);
+}
+
+/* Voltages */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc4260_show_value, NULL,
+ LTC4260_SOURCE);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc4260_show_value, NULL,
+ LTC4260_ADIN);
+
+/*
+ * Voltage alarms
+ * UV/OV faults are associated with the input voltage, and the POWER BAD and
+ * FET SHORT faults are associated with the output voltage.
+ */
+static SENSOR_DEVICE_ATTR(in1_min_alarm, S_IRUGO, ltc4260_show_bool, NULL,
+ FAULT_UV);
+static SENSOR_DEVICE_ATTR(in1_max_alarm, S_IRUGO, ltc4260_show_bool, NULL,
+ FAULT_OV);
+static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, ltc4260_show_bool, NULL,
+ FAULT_POWER_BAD | FAULT_FET_SHORT);
+
+/* Current (via sense resistor) */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4260_show_value, NULL,
+ LTC4260_SENSE);
+
+/* Overcurrent alarm */
+static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO, ltc4260_show_bool, NULL,
+ FAULT_OC);
+
+static struct attribute *ltc4260_attrs[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
+
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc4260);
+
+static struct regmap_config ltc4260_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LTC4260_ADIN,
+};
+
+static int ltc4260_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &ltc4260_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Clear faults */
+ regmap_write(regmap, LTC4260_FAULT, 0x00);
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ regmap,
+ ltc4260_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id ltc4260_id[] = {
+ {"ltc4260", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, ltc4260_id);
+
+static struct i2c_driver ltc4260_driver = {
+ .driver = {
+ .name = "ltc4260",
+ },
+ .probe = ltc4260_probe,
+ .id_table = ltc4260_id,
+};
+
+module_i2c_driver(ltc4260_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("LTC4260 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 445e5d40ac82..6638e997f83f 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -2,7 +2,7 @@
* max1619.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
* Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
- * Jean Delvare <khali@linux-fr.org>
+ * Jean Delvare <jdelvare@suse.de>
*
* Based on the lm90 driver. The MAX1619 is a sensor chip made by Maxim.
* It reports up to two temperatures (its own plus up to
@@ -357,7 +357,6 @@ static struct max1619_data *max1619_update_device(struct device *dev)
module_i2c_driver(max1619_driver);
-MODULE_AUTHOR("Oleksij Rempel <bug-track@fisher-privat.net> and "
- "Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Oleksij Rempel <bug-track@fisher-privat.net>, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("MAX1619 sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index a7626358c95d..e3ed0a5b6d94 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -66,7 +66,8 @@ MODULE_PARM_DESC(read_only, "Don't set any values, read only mode");
enum chips { max1668, max1805, max1989 };
struct max1668_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
+ const struct attribute_group *groups[3];
enum chips type;
struct mutex update_lock;
@@ -82,8 +83,8 @@ struct max1668_data {
static struct max1668_data *max1668_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max1668_data *data = i2c_get_clientdata(client);
+ struct max1668_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct max1668_data *ret = data;
s32 val;
int i;
@@ -205,8 +206,8 @@ static ssize_t set_temp_max(struct device *dev,
const char *buf, size_t count)
{
int index = to_sensor_dev_attr(devattr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct max1668_data *data = i2c_get_clientdata(client);
+ struct max1668_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long temp;
int ret;
@@ -216,10 +217,11 @@ static ssize_t set_temp_max(struct device *dev,
mutex_lock(&data->update_lock);
data->temp_max[index] = clamp_val(temp/1000, -128, 127);
- if (i2c_smbus_write_byte_data(client,
+ ret = i2c_smbus_write_byte_data(client,
MAX1668_REG_LIMH_WR(index),
- data->temp_max[index]))
- count = -EIO;
+ data->temp_max[index]);
+ if (ret < 0)
+ count = ret;
mutex_unlock(&data->update_lock);
return count;
@@ -230,8 +232,8 @@ static ssize_t set_temp_min(struct device *dev,
const char *buf, size_t count)
{
int index = to_sensor_dev_attr(devattr)->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct max1668_data *data = i2c_get_clientdata(client);
+ struct max1668_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long temp;
int ret;
@@ -241,10 +243,11 @@ static ssize_t set_temp_min(struct device *dev,
mutex_lock(&data->update_lock);
data->temp_min[index] = clamp_val(temp/1000, -128, 127);
- if (i2c_smbus_write_byte_data(client,
+ ret = i2c_smbus_write_byte_data(client,
MAX1668_REG_LIML_WR(index),
- data->temp_max[index]))
- count = -EIO;
+ data->temp_min[index]);
+ if (ret < 0)
+ count = ret;
mutex_unlock(&data->update_lock);
return count;
@@ -405,60 +408,29 @@ static int max1668_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
struct max1668_data *data;
- int err;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
- data = devm_kzalloc(&client->dev, sizeof(struct max1668_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct max1668_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
data->type = id->driver_data;
mutex_init(&data->update_lock);
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &max1668_group_common);
- if (err)
- return err;
-
- if (data->type == max1668 || data->type == max1989) {
- err = sysfs_create_group(&client->dev.kobj,
- &max1668_group_unique);
- if (err)
- goto error_sysrem0;
- }
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error_sysrem1;
- }
-
- return 0;
-
-error_sysrem1:
+ /* sysfs hooks */
+ data->groups[0] = &max1668_group_common;
if (data->type == max1668 || data->type == max1989)
- sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
-error_sysrem0:
- sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
- return err;
-}
-
-static int max1668_remove(struct i2c_client *client)
-{
- struct max1668_data *data = i2c_get_clientdata(client);
+ data->groups[1] = &max1668_group_unique;
- hwmon_device_unregister(data->hwmon_dev);
- if (data->type == max1668 || data->type == max1989)
- sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
-
- sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id max1668_id[] = {
@@ -476,7 +448,6 @@ static struct i2c_driver max1668_driver = {
.name = "max1668",
},
.probe = max1668_probe,
- .remove = max1668_remove,
.id_table = max1668_id,
.detect = max1668_detect,
.address_list = max1668_addr_list,
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 066e587a18a5..70650de2cbd1 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -80,7 +80,7 @@ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
* Client data (each client gets its own)
*/
struct max6639_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -104,8 +104,8 @@ struct max6639_data {
static struct max6639_data *max6639_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
+ struct max6639_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct max6639_data *ret = data;
int i;
int status_reg;
@@ -191,9 +191,8 @@ static ssize_t show_temp_fault(struct device *dev,
static ssize_t show_temp_max(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", (data->temp_therm[attr->index] * 1000));
}
@@ -202,9 +201,9 @@ static ssize_t set_temp_max(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -224,9 +223,8 @@ static ssize_t set_temp_max(struct device *dev,
static ssize_t show_temp_crit(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", (data->temp_alert[attr->index] * 1000));
}
@@ -235,9 +233,9 @@ static ssize_t set_temp_crit(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -258,9 +256,8 @@ static ssize_t show_temp_emergency(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", (data->temp_ot[attr->index] * 1000));
}
@@ -269,9 +266,9 @@ static ssize_t set_temp_emergency(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -291,9 +288,8 @@ static ssize_t set_temp_emergency(struct device *dev,
static ssize_t show_pwm(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->pwm[attr->index] * 255 / 120);
}
@@ -302,9 +298,9 @@ static ssize_t set_pwm(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6639_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ struct max6639_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
int res;
@@ -378,7 +374,7 @@ static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 5);
static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 4);
-static struct attribute *max6639_attributes[] = {
+static struct attribute *max6639_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp1_fault.dev_attr.attr,
@@ -403,10 +399,7 @@ static struct attribute *max6639_attributes[] = {
&sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr,
NULL
};
-
-static const struct attribute_group max6639_group = {
- .attrs = max6639_attributes,
-};
+ATTRIBUTE_GROUPS(max6639);
/*
* returns respective index in rpm_ranges table
@@ -424,9 +417,9 @@ static int rpm_range_to_reg(int range)
return 1; /* default: 4000 RPM */
}
-static int max6639_init_client(struct i2c_client *client)
+static int max6639_init_client(struct i2c_client *client,
+ struct max6639_data *data)
{
- struct max6639_data *data = i2c_get_clientdata(client);
struct max6639_platform_data *max6639_info =
dev_get_platdata(&client->dev);
int i;
@@ -545,50 +538,27 @@ static int max6639_detect(struct i2c_client *client,
static int max6639_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct max6639_data *data;
+ struct device *hwmon_dev;
int err;
- data = devm_kzalloc(&client->dev, sizeof(struct max6639_data),
- GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct max6639_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(client, data);
+ data->client = client;
mutex_init(&data->update_lock);
/* Initialize the max6639 chip */
- err = max6639_init_client(client);
+ err = max6639_init_client(client, data);
if (err < 0)
return err;
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &max6639_group);
- if (err)
- return err;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error_remove;
- }
-
- dev_info(&client->dev, "temperature sensor and fan control found\n");
-
- return 0;
-
-error_remove:
- sysfs_remove_group(&client->dev.kobj, &max6639_group);
- return err;
-}
-
-static int max6639_remove(struct i2c_client *client)
-{
- struct max6639_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &max6639_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data,
+ max6639_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
#ifdef CONFIG_PM_SLEEP
@@ -622,9 +592,7 @@ static const struct i2c_device_id max6639_id[] = {
MODULE_DEVICE_TABLE(i2c, max6639_id);
-static const struct dev_pm_ops max6639_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(max6639_suspend, max6639_resume)
-};
+static SIMPLE_DEV_PM_OPS(max6639_pm_ops, max6639_suspend, max6639_resume);
static struct i2c_driver max6639_driver = {
.class = I2C_CLASS_HWMON,
@@ -633,7 +601,6 @@ static struct i2c_driver max6639_driver = {
.pm = &max6639_pm_ops,
},
.probe = max6639_probe,
- .remove = max6639_remove,
.id_table = max6639_id,
.detect = max6639_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 8326fbd60150..6520bc51d02a 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -8,7 +8,7 @@
*
* Based on the max1619 driver.
* Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
- * Jean Delvare <khali@linux-fr.org>
+ * Jean Delvare <jdelvare@suse.de>
*
* The MAX6642 is a sensor chip made by Maxim.
* It reports up to two temperatures (its own plus up to
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 0cafc390db4d..162a520f4bd6 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -105,38 +105,13 @@ module_param(clock, int, S_IRUGO);
#define DIV_FROM_REG(reg) (1 << (reg & 7))
-static int max6650_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int max6650_init_client(struct i2c_client *client);
-static int max6650_remove(struct i2c_client *client);
-static struct max6650_data *max6650_update_device(struct device *dev);
-
-/*
- * Driver data (common to all clients)
- */
-
-static const struct i2c_device_id max6650_id[] = {
- { "max6650", 1 },
- { "max6651", 4 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, max6650_id);
-
-static struct i2c_driver max6650_driver = {
- .driver = {
- .name = "max6650",
- },
- .probe = max6650_probe,
- .remove = max6650_remove,
- .id_table = max6650_id,
-};
-
/*
* Client data (each client gets its own)
*/
struct max6650_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
+ const struct attribute_group *groups[3];
struct mutex update_lock;
int nr_fans;
char valid; /* zero until following fields are valid */
@@ -151,6 +126,51 @@ struct max6650_data {
u8 alarm;
};
+static const u8 tach_reg[] = {
+ MAX6650_REG_TACH0,
+ MAX6650_REG_TACH1,
+ MAX6650_REG_TACH2,
+ MAX6650_REG_TACH3,
+};
+
+static struct max6650_data *max6650_update_device(struct device *dev)
+{
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ data->speed = i2c_smbus_read_byte_data(client,
+ MAX6650_REG_SPEED);
+ data->config = i2c_smbus_read_byte_data(client,
+ MAX6650_REG_CONFIG);
+ for (i = 0; i < data->nr_fans; i++) {
+ data->tach[i] = i2c_smbus_read_byte_data(client,
+ tach_reg[i]);
+ }
+ data->count = i2c_smbus_read_byte_data(client,
+ MAX6650_REG_COUNT);
+ data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC);
+
+ /*
+ * Alarms are cleared on read in case the condition that
+ * caused the alarm is removed. Keep the value latched here
+ * for providing the register through different alarm files.
+ */
+ data->alarm |= i2c_smbus_read_byte_data(client,
+ MAX6650_REG_ALARM);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
static ssize_t get_fan(struct device *dev, struct device_attribute *devattr,
char *buf)
{
@@ -235,8 +255,8 @@ static ssize_t get_target(struct device *dev, struct device_attribute *devattr,
static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6650_data *data = i2c_get_clientdata(client);
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int kscale, ktach;
unsigned long rpm;
int err;
@@ -304,8 +324,8 @@ static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr,
static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6650_data *data = i2c_get_clientdata(client);
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long pwm;
int err;
@@ -350,8 +370,8 @@ static ssize_t get_enable(struct device *dev, struct device_attribute *devattr,
static ssize_t set_enable(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6650_data *data = i2c_get_clientdata(client);
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int max6650_modes[3] = {0, 3, 2};
unsigned long mode;
int err;
@@ -400,8 +420,8 @@ static ssize_t get_div(struct device *dev, struct device_attribute *devattr,
static ssize_t set_div(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct max6650_data *data = i2c_get_clientdata(client);
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long div;
int err;
@@ -446,7 +466,7 @@ static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max6650_data *data = max6650_update_device(dev);
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = data->client;
int alarm = 0;
if (data->alarm & attr->index) {
@@ -484,7 +504,8 @@ static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct i2c_client *client = to_i2c_client(dev);
+ struct max6650_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u8 alarm_en = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN);
struct device_attribute *devattr;
@@ -519,7 +540,7 @@ static struct attribute *max6650_attrs[] = {
NULL
};
-static struct attribute_group max6650_attr_grp = {
+static const struct attribute_group max6650_group = {
.attrs = max6650_attrs,
.is_visible = max6650_attrs_visible,
};
@@ -531,7 +552,7 @@ static struct attribute *max6651_attrs[] = {
NULL
};
-static const struct attribute_group max6651_attr_grp = {
+static const struct attribute_group max6651_group = {
.attrs = max6651_attrs,
};
@@ -539,74 +560,17 @@ static const struct attribute_group max6651_attr_grp = {
* Real code
*/
-static int max6650_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct max6650_data *data;
- int err;
-
- data = devm_kzalloc(&client->dev, sizeof(struct max6650_data),
- GFP_KERNEL);
- if (!data) {
- dev_err(&client->dev, "out of memory.\n");
- return -ENOMEM;
- }
-
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
- data->nr_fans = id->driver_data;
-
- /*
- * Initialize the max6650 chip
- */
- err = max6650_init_client(client);
- if (err)
- return err;
-
- err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp);
- if (err)
- return err;
- /* 3 additional fan inputs for the MAX6651 */
- if (data->nr_fans == 4) {
- err = sysfs_create_group(&client->dev.kobj, &max6651_attr_grp);
- if (err)
- goto err_remove;
- }
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (!IS_ERR(data->hwmon_dev))
- return 0;
-
- err = PTR_ERR(data->hwmon_dev);
- dev_err(&client->dev, "error registering hwmon device.\n");
- if (data->nr_fans == 4)
- sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
-err_remove:
- sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
- return err;
-}
-
-static int max6650_remove(struct i2c_client *client)
+static int max6650_init_client(struct max6650_data *data,
+ struct i2c_client *client)
{
- struct max6650_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- if (data->nr_fans == 4)
- sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
- sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
- return 0;
-}
-
-static int max6650_init_client(struct i2c_client *client)
-{
- struct max6650_data *data = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
int config;
int err = -EIO;
config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
if (config < 0) {
- dev_err(&client->dev, "Error reading config, aborting.\n");
+ dev_err(dev, "Error reading config, aborting.\n");
return err;
}
@@ -620,11 +584,11 @@ static int max6650_init_client(struct i2c_client *client)
config |= MAX6650_CFG_V12;
break;
default:
- dev_err(&client->dev, "illegal value for fan_voltage (%d)\n",
+ dev_err(dev, "illegal value for fan_voltage (%d)\n",
fan_voltage);
}
- dev_info(&client->dev, "Fan voltage is set to %dV.\n",
+ dev_info(dev, "Fan voltage is set to %dV.\n",
(config & MAX6650_CFG_V12) ? 12 : 5);
switch (prescaler) {
@@ -650,11 +614,10 @@ static int max6650_init_client(struct i2c_client *client)
| MAX6650_CFG_PRESCALER_16;
break;
default:
- dev_err(&client->dev, "illegal value for prescaler (%d)\n",
- prescaler);
+ dev_err(dev, "illegal value for prescaler (%d)\n", prescaler);
}
- dev_info(&client->dev, "Prescaler is set to %d.\n",
+ dev_info(dev, "Prescaler is set to %d.\n",
1 << (config & MAX6650_CFG_PRESCALER_MASK));
/*
@@ -664,17 +627,17 @@ static int max6650_init_client(struct i2c_client *client)
*/
if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) {
- dev_dbg(&client->dev, "Change mode to open loop, full off.\n");
+ dev_dbg(dev, "Change mode to open loop, full off.\n");
config = (config & ~MAX6650_CFG_MODE_MASK)
| MAX6650_CFG_MODE_OPEN_LOOP;
if (i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, 255)) {
- dev_err(&client->dev, "DAC write error, aborting.\n");
+ dev_err(dev, "DAC write error, aborting.\n");
return err;
}
}
if (i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, config)) {
- dev_err(&client->dev, "Config write error, aborting.\n");
+ dev_err(dev, "Config write error, aborting.\n");
return err;
}
@@ -684,51 +647,55 @@ static int max6650_init_client(struct i2c_client *client)
return 0;
}
-static const u8 tach_reg[] = {
- MAX6650_REG_TACH0,
- MAX6650_REG_TACH1,
- MAX6650_REG_TACH2,
- MAX6650_REG_TACH3,
-};
-
-static struct max6650_data *max6650_update_device(struct device *dev)
+static int max6650_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
- int i;
- struct i2c_client *client = to_i2c_client(dev);
- struct max6650_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->update_lock);
+ struct device *dev = &client->dev;
+ struct max6650_data *data;
+ struct device *hwmon_dev;
+ int err;
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- data->speed = i2c_smbus_read_byte_data(client,
- MAX6650_REG_SPEED);
- data->config = i2c_smbus_read_byte_data(client,
- MAX6650_REG_CONFIG);
- for (i = 0; i < data->nr_fans; i++) {
- data->tach[i] = i2c_smbus_read_byte_data(client,
- tach_reg[i]);
- }
- data->count = i2c_smbus_read_byte_data(client,
- MAX6650_REG_COUNT);
- data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC);
+ data = devm_kzalloc(dev, sizeof(struct max6650_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- /*
- * Alarms are cleared on read in case the condition that
- * caused the alarm is removed. Keep the value latched here
- * for providing the register through different alarm files.
- */
- data->alarm |= i2c_smbus_read_byte_data(client,
- MAX6650_REG_ALARM);
+ data->client = client;
+ mutex_init(&data->update_lock);
+ data->nr_fans = id->driver_data;
- data->last_updated = jiffies;
- data->valid = 1;
- }
+ /*
+ * Initialize the max6650 chip
+ */
+ err = max6650_init_client(data, client);
+ if (err)
+ return err;
- mutex_unlock(&data->update_lock);
+ data->groups[0] = &max6650_group;
+ /* 3 additional fan inputs for the MAX6651 */
+ if (data->nr_fans == 4)
+ data->groups[1] = &max6651_group;
- return data;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+ client->name, data,
+ data->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
+static const struct i2c_device_id max6650_id[] = {
+ { "max6650", 1 },
+ { "max6651", 4 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max6650_id);
+
+static struct i2c_driver max6650_driver = {
+ .driver = {
+ .name = "max6650",
+ },
+ .probe = max6650_probe,
+ .id_table = max6650_id,
+};
+
module_i2c_driver(max6650_driver);
MODULE_AUTHOR("Hans J. Koch");
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 8686e966fa28..38d5a6334053 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -5,7 +5,7 @@
* Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net>
*
* Derived from w83627ehf driver
- * Copyright (C) 2005-2012 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2012 Jean Delvare <jdelvare@suse.de>
* Copyright (C) 2006 Yuan Mu (Winbond),
* Rudolf Marek <r.marek@assembler.cz>
* David Hubbard <david.c.hubbard@gmail.com>
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8c23203915af..8a17f01e8672 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -145,7 +145,7 @@ struct ntc_data {
static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
{
struct iio_channel *channel = pdata->chan;
- unsigned int result;
+ s64 result;
int val, ret;
ret = iio_read_channel_raw(channel, &val);
@@ -155,10 +155,10 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
}
/* unit: mV */
- result = pdata->pullup_uv * val;
+ result = pdata->pullup_uv * (s64) val;
result >>= 12;
- return result;
+ return (int)result;
}
static const struct of_device_id ntc_match[] = {
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index aa615ba73d4b..330fe117e219 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1,7 +1,7 @@
/*
* pc87360.c - Part of lm_sensors, Linux kernel modules
* for hardware monitoring
- * Copyright (C) 2004, 2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004, 2007 Jean Delvare <jdelvare@suse.de>
*
* Copied from smsc47m1.c:
* Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
@@ -1808,7 +1808,7 @@ static void __exit pc87360_exit(void)
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("PC8736x hardware monitor");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 6e6ea4437bb6..d847e0a084e0 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -1,7 +1,7 @@
/*
* pc87427.c - hardware monitoring driver for the
* National Semiconductor PC87427 Super-I/O chip
- * Copyright (C) 2006, 2008, 2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2006, 2008, 2010 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1347,7 +1347,7 @@ static void __exit pc87427_exit(void)
platform_driver_unregister(&pc87427_driver);
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("PC87427 hardware monitoring driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index 825883d29002..5740888c6242 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2001-2004 Aurelien Jarno <aurelien@aurel32.net>
* Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index de3c152a1d9a..e24ed521051a 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -1,9 +1,9 @@
/*
* Hardware monitoring driver for LTC2974, LTC2977, LTC2978, LTC3880,
- * and LTC3883
+ * LTC3883, and LTM4676
*
* Copyright (c) 2011 Ericsson AB.
- * Copyright (c) 2013 Guenter Roeck
+ * Copyright (c) 2013, 2014 Guenter Roeck
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
@@ -28,7 +24,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
+enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883, ltm4676 };
/* Common for all chips */
#define LTC2978_MFR_VOUT_PEAK 0xdd
@@ -45,7 +41,7 @@ enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
#define LTC2974_MFR_IOUT_PEAK 0xd7
#define LTC2974_MFR_IOUT_MIN 0xd8
-/* LTC3880 and LTC3883 */
+/* LTC3880, LTC3883, and LTM4676 */
#define LTC3880_MFR_IOUT_PEAK 0xd7
#define LTC3880_MFR_CLEAR_PEAKS 0xe3
#define LTC3880_MFR_TEMPERATURE2_PEAK 0xf4
@@ -53,7 +49,8 @@ enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
/* LTC3883 only */
#define LTC3883_MFR_IIN_PEAK 0xe1
-#define LTC2974_ID 0x0212
+#define LTC2974_ID_REV1 0x0212
+#define LTC2974_ID_REV2 0x0213
#define LTC2977_ID 0x0130
#define LTC2978_ID_REV1 0x0121
#define LTC2978_ID_REV2 0x0122
@@ -62,6 +59,8 @@ enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
#define LTC3880_ID_MASK 0xff00
#define LTC3883_ID 0x4300
#define LTC3883_ID_MASK 0xff00
+#define LTM4676_ID 0x4480 /* datasheet claims 0x440X */
+#define LTM4676_ID_MASK 0xfff0
#define LTC2974_NUM_PAGES 4
#define LTC2978_NUM_PAGES 8
@@ -370,6 +369,7 @@ static const struct i2c_device_id ltc2978_id[] = {
{"ltc2978", ltc2978},
{"ltc3880", ltc3880},
{"ltc3883", ltc3883},
+ {"ltm4676", ltm4676},
{}
};
MODULE_DEVICE_TABLE(i2c, ltc2978_id);
@@ -394,7 +394,7 @@ static int ltc2978_probe(struct i2c_client *client,
if (chip_id < 0)
return chip_id;
- if (chip_id == LTC2974_ID) {
+ if (chip_id == LTC2974_ID_REV1 || chip_id == LTC2974_ID_REV2) {
data->id = ltc2974;
} else if (chip_id == LTC2977_ID) {
data->id = ltc2977;
@@ -405,6 +405,8 @@ static int ltc2978_probe(struct i2c_client *client,
data->id = ltc3880;
} else if ((chip_id & LTC3883_ID_MASK) == LTC3883_ID) {
data->id = ltc3883;
+ } else if ((chip_id & LTM4676_ID_MASK) == LTM4676_ID) {
+ data->id = ltm4676;
} else {
dev_err(&client->dev, "Unsupported chip ID 0x%x\n", chip_id);
return -ENODEV;
@@ -458,6 +460,7 @@ static int ltc2978_probe(struct i2c_client *client,
}
break;
case ltc3880:
+ case ltm4676:
info->read_word_data = ltc3880_read_word_data;
info->pages = LTC3880_NUM_PAGES;
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
@@ -500,5 +503,5 @@ static struct i2c_driver ltc2978_driver = {
module_i2c_driver(ltc2978_driver);
MODULE_AUTHOR("Guenter Roeck");
-MODULE_DESCRIPTION("PMBus driver for LTC2974, LTC2978, LTC3880, and LTC3883");
+MODULE_DESCRIPTION("PMBus driver for LTC2974, LTC2978, LTC3880, LTC3883, and LTM4676");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 3cbf66e9d861..291d11fe93e7 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -90,7 +90,8 @@ struct pmbus_data {
u32 flags; /* from platform data */
- int exponent; /* linear mode: exponent for output voltages */
+ int exponent[PMBUS_PAGES];
+ /* linear mode: exponent for output voltages */
const struct pmbus_driver_info *info;
@@ -410,7 +411,7 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
long val;
if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
- exponent = data->exponent;
+ exponent = data->exponent[sensor->page];
mantissa = (u16) sensor->data;
} else { /* LINEAR11 */
exponent = ((s16)sensor->data) >> 11;
@@ -516,7 +517,7 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
#define MIN_MANTISSA (511 * 1000)
static u16 pmbus_data2reg_linear(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
s16 exponent = 0, mantissa;
bool negative = false;
@@ -525,7 +526,7 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
if (val == 0)
return 0;
- if (class == PSC_VOLTAGE_OUT) {
+ if (sensor->class == PSC_VOLTAGE_OUT) {
/* LINEAR16 does not support negative voltages */
if (val < 0)
return 0;
@@ -534,10 +535,10 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
* For a static exponents, we don't have a choice
* but to adjust the value to it.
*/
- if (data->exponent < 0)
- val <<= -data->exponent;
+ if (data->exponent[sensor->page] < 0)
+ val <<= -data->exponent[sensor->page];
else
- val >>= data->exponent;
+ val >>= data->exponent[sensor->page];
val = DIV_ROUND_CLOSEST(val, 1000);
return val & 0xffff;
}
@@ -548,14 +549,14 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
}
/* Power is in uW. Convert to mW before converting. */
- if (class == PSC_POWER)
+ if (sensor->class == PSC_POWER)
val = DIV_ROUND_CLOSEST(val, 1000L);
/*
* For simplicity, convert fan data to milli-units
* before calculating the exponent.
*/
- if (class == PSC_FAN)
+ if (sensor->class == PSC_FAN)
val = val * 1000;
/* Reduce large mantissa until it fits into 10 bit */
@@ -585,22 +586,22 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
}
static u16 pmbus_data2reg_direct(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
long m, b, R;
- m = data->info->m[class];
- b = data->info->b[class];
- R = data->info->R[class];
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+ R = data->info->R[sensor->class];
/* Power is in uW. Adjust R and b. */
- if (class == PSC_POWER) {
+ if (sensor->class == PSC_POWER) {
R -= 3;
b *= 1000;
}
/* Calculate Y = (m * X + b) * 10^R */
- if (class != PSC_FAN) {
+ if (sensor->class != PSC_FAN) {
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
@@ -619,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
}
static u16 pmbus_data2reg_vid(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
val = clamp_val(val, 500, 1600);
@@ -627,20 +628,20 @@ static u16 pmbus_data2reg_vid(struct pmbus_data *data,
}
static u16 pmbus_data2reg(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
u16 regval;
- switch (data->info->format[class]) {
+ switch (data->info->format[sensor->class]) {
case direct:
- regval = pmbus_data2reg_direct(data, class, val);
+ regval = pmbus_data2reg_direct(data, sensor, val);
break;
case vid:
- regval = pmbus_data2reg_vid(data, class, val);
+ regval = pmbus_data2reg_vid(data, sensor, val);
break;
case linear:
default:
- regval = pmbus_data2reg_linear(data, class, val);
+ regval = pmbus_data2reg_linear(data, sensor, val);
break;
}
return regval;
@@ -746,7 +747,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
return -EINVAL;
mutex_lock(&data->update_lock);
- regval = pmbus_data2reg(data, sensor->class, val);
+ regval = pmbus_data2reg(data, sensor, val);
ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
if (ret < 0)
rv = ret;
@@ -1643,12 +1644,13 @@ static int pmbus_find_attributes(struct i2c_client *client,
* This function is called for all chips.
*/
static int pmbus_identify_common(struct i2c_client *client,
- struct pmbus_data *data)
+ struct pmbus_data *data, int page)
{
int vout_mode = -1;
- if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE))
- vout_mode = _pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (pmbus_check_byte_register(client, page, PMBUS_VOUT_MODE))
+ vout_mode = _pmbus_read_byte_data(client, page,
+ PMBUS_VOUT_MODE);
if (vout_mode >= 0 && vout_mode != 0xff) {
/*
* Not all chips support the VOUT_MODE command,
@@ -1659,7 +1661,7 @@ static int pmbus_identify_common(struct i2c_client *client,
if (data->info->format[PSC_VOLTAGE_OUT] != linear)
return -ENODEV;
- data->exponent = ((s8)(vout_mode << 3)) >> 3;
+ data->exponent[page] = ((s8)(vout_mode << 3)) >> 3;
break;
case 1: /* VID mode */
if (data->info->format[PSC_VOLTAGE_OUT] != vid)
@@ -1674,7 +1676,7 @@ static int pmbus_identify_common(struct i2c_client *client,
}
}
- pmbus_clear_fault_page(client, 0);
+ pmbus_clear_fault_page(client, page);
return 0;
}
@@ -1682,7 +1684,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
struct pmbus_driver_info *info)
{
struct device *dev = &client->dev;
- int ret;
+ int page, ret;
/*
* Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
@@ -1715,10 +1717,12 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
return -ENODEV;
}
- ret = pmbus_identify_common(client, data);
- if (ret < 0) {
- dev_err(dev, "Failed to identify chip capabilities\n");
- return ret;
+ for (page = 0; page < info->pages; page++) {
+ ret = pmbus_identify_common(client, data, page);
+ if (ret < 0) {
+ dev_err(dev, "Failed to identify chip capabilities\n");
+ return ret;
+ }
}
return 0;
}
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index e74bd7e620e8..3532026e25da 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -6,7 +6,7 @@
* Kyösti Mälkki <kmalkki@cc.hut.fi>, and
* Mark D. Studebaker <mdsxyz123@yahoo.com>
* Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
index d9e1b7de78da..4ef5802df6d8 100644
--- a/drivers/hwmon/smm665.c
+++ b/drivers/hwmon/smm665.c
@@ -222,7 +222,7 @@ static int smm665_read_adc(struct smm665_data *data, int adc)
rv = i2c_smbus_read_word_swapped(client, 0);
if (rv < 0) {
dev_dbg(&client->dev, "Failed to read ADC value: error %d", rv);
- return -1;
+ return rv;
}
/*
* Validate/verify readback adc channel (in bit 11..14).
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 81348fadf3b6..bd89e87bd6ae 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -9,7 +9,7 @@
*
* derived in part from smsc47m1.c:
* Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
- * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 05cb814539cb..23a22c4eee51 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -7,7 +7,7 @@
* Super-I/O chips.
*
* Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
- * Copyright (C) 2004-2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2007 Jean Delvare <jdelvare@suse.de>
* Ported to Linux 2.6 by Gabriele Gorla <gorlik@yahoo.com>
* and Jean Delvare
*
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index d7b47abf37fe..6748b4583e7b 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -27,6 +27,8 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/jiffies.h>
+#include <linux/thermal.h>
+#include <linux/of.h>
#define DRIVER_NAME "tmp102"
@@ -50,6 +52,7 @@
struct tmp102 {
struct device *hwmon_dev;
+ struct thermal_zone_device *tz;
struct mutex lock;
u16 config_orig;
unsigned long last_update;
@@ -93,6 +96,15 @@ static struct tmp102 *tmp102_update_device(struct i2c_client *client)
return tmp102;
}
+static int tmp102_read_temp(void *dev, long *temp)
+{
+ struct tmp102 *tmp102 = tmp102_update_device(to_i2c_client(dev));
+
+ *temp = tmp102->temp[0];
+
+ return 0;
+}
+
static ssize_t tmp102_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -204,6 +216,12 @@ static int tmp102_probe(struct i2c_client *client,
goto fail_remove_sysfs;
}
+ tmp102->tz = thermal_zone_of_sensor_register(&client->dev, 0,
+ &client->dev,
+ tmp102_read_temp, NULL);
+ if (IS_ERR(tmp102->tz))
+ tmp102->tz = NULL;
+
dev_info(&client->dev, "initialized\n");
return 0;
@@ -220,6 +238,7 @@ static int tmp102_remove(struct i2c_client *client)
{
struct tmp102 *tmp102 = i2c_get_clientdata(client);
+ thermal_zone_of_sensor_unregister(&client->dev, tmp102->tz);
hwmon_device_unregister(tmp102->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 23ff210513d3..f0ab61db7a0d 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1,7 +1,7 @@
/*
* w83627ehf - Driver for the hardware monitoring functionality of
* the Winbond W83627EHF Super-I/O chip
- * Copyright (C) 2005-2012 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2012 Jean Delvare <jdelvare@suse.de>
* Copyright (C) 2006 Yuan Mu (Winbond),
* Rudolf Marek <r.marek@assembler.cz>
* David Hubbard <david.c.hubbard@gmail.com>
@@ -2889,7 +2889,7 @@ static void __exit sensors_w83627ehf_exit(void)
platform_driver_unregister(&w83627ehf_driver);
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("W83627EHF driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index cb9cd326ecb5..c1726be3654c 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -5,7 +5,7 @@
* Philip Edelbrock <phil@netroedge.com>,
* and Mark Studebaker <mdsxyz123@yahoo.com>
* Ported to 2.6 by Bernhard C. Schrenk <clemy@clemy.org>
- * Copyright (c) 2007 - 1012 Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007 - 1012 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index f9d513949a38..84911616d8c0 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -4,7 +4,7 @@
* Copyright (c) 1998 - 2001 Frodo Looijaard <frodol@dds.nl>,
* Philip Edelbrock <phil@netroedge.com>,
* and Mark Studebaker <mdsxyz123@yahoo.com>
- * Copyright (c) 2007 - 2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007 - 2008 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 908209d24664..21894131190f 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -2,7 +2,7 @@
* w83795.c - Linux kernel driver for hardware monitoring
* Copyright (C) 2008 Nuvoton Technology Corp.
* Wei Song
- * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -2282,6 +2282,6 @@ static struct i2c_driver w83795_driver = {
module_i2c_driver(w83795_driver);
-MODULE_AUTHOR("Wei Song, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Wei Song, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("W83795G/ADG hardware monitoring driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 39dbe990dc10..6384b268f590 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -1,7 +1,7 @@
/*
* w83l785ts.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
- * Copyright (C) 2003-2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009 Jean Delvare <jdelvare@suse.de>
*
* Inspired from the lm83 driver. The W83L785TS-S is a sensor chip made
* by Winbond. It reports a single external temperature with a 1 deg
@@ -10,7 +10,7 @@
* http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L785TS-S.pdf
*
* Ported to Linux 2.6 by Wolfgang Ziegler <nuppla@gmx.at> and Jean Delvare
- * <khali@linux-fr.org>.
+ * <jdelvare@suse.de>.
*
* Thanks to James Bolt <james@evilpenguin.com> for benchmarking the read
* error handling mechanism.
@@ -299,6 +299,6 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev)
module_i2c_driver(w83l785ts_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("W83L785TS-S driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index fad22b0bb5b0..65ef9664d5da 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -20,12 +20,11 @@
* ------------------------------------------------------------------------- */
/* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki
- <kmalkki@cc.hut.fi> and Jean Delvare <khali@linux-fr.org> */
+ <kmalkki@cc.hut.fi> and Jean Delvare <jdelvare@suse.de> */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index f892a424009b..8b10f88b13d9 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -24,7 +24,6 @@
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pca.h>
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 5c2379522aa9..34370090b753 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pcf.h>
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6bcdea5856af..de17c5593d97 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -152,6 +152,7 @@ config I2C_PIIX4
ATI SB700/SP5100
ATI SB800
AMD Hudson-2
+ AMD ML
AMD CZ
Serverworks OSB4
Serverworks CSB5
@@ -386,7 +387,7 @@ config I2C_CBUS_GPIO
config I2C_CPM
tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
- depends on (CPM1 || CPM2) && OF_I2C
+ depends on CPM1 || CPM2
help
This supports the use of the I2C interface on Freescale
processors with CPM1 or CPM2.
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index ed9f48d566db..9d7be5af2bf2 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -12,7 +12,7 @@
* On Acorn machines, the following i2c devices are on the bus:
* - PCF8583 real time clock & static RAM
*/
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 3f491815e2c4..7d60d3a1f621 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -58,7 +58,6 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 84ccd9496a5e..4611e4754a67 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -20,7 +20,6 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/acpi.h>
#define ALI1563_MAX_TIMEOUT 500
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 26bcc6127cee..4823206a4870 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -65,7 +65,6 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
index 07f01ac853ff..41fc6837fb8b 100644
--- a/drivers/i2c/busses/i2c-amd756-s4882.c
+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
@@ -1,7 +1,7 @@
/*
* i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard
*
- * Copyright (C) 2004, 2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004, 2008 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -250,7 +250,7 @@ static void __exit amd756_s4882_exit(void)
"Physical bus restoration failed\n");
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("S4882 SMBus multiplexing");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index e13e2aa2d05d..819d3c1062a7 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -41,7 +41,6 @@
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index a44e6e77c5a1..f3d4d79855b5 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/acpi.h>
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index b5b89239d622..8762458ca7da 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index ce7ffba2b020..bdf040fd8675 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/errno.h>
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 3e5ea2c87a6e..f3b89a4698b6 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -33,14 +33,15 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/stddef.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <asm/cpm.h>
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index ff15ae90aaf5..e08e458bab02 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -18,7 +18,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/fs.h>
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index c1ef228095b5..9fd711c03dd2 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -571,7 +570,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
int i = 0, ret = 0, stop = 0;
if (i2c->suspended) {
- dev_err(i2c->dev, "HS-I2C is not initialzed.\n");
+ dev_err(i2c->dev, "HS-I2C is not initialized.\n");
return -EIO;
}
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 436b0f254916..512fcfabc18e 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -12,7 +12,6 @@
* of this archive for more details.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 79c3d9069a48..e248257fe517 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <asm/hydra.h>
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 737e29866887..349c2d35e792 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -2,7 +2,7 @@
Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl>,
Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker
<mdsxyz123@yahoo.com>
- Copyright (C) 2007 - 2012 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2007 - 2012 Jean Delvare <jdelvare@suse.de>
Copyright (C) 2010 Intel Corporation,
David Woodhouse <dwmw2@infradead.org>
@@ -1312,8 +1312,7 @@ static void __exit i2c_i801_exit(void)
pci_unregister_driver(&i801_driver);
}
-MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, "
- "Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("I801 SMBus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index f7444100f397..274312c96b12 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -36,7 +36,6 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index dd24aa0424a9..3d16c2f60a5e 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index af213045ab7e..cf99dbf21fd1 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -33,7 +33,6 @@
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/acpi.h>
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index bb132ea7d2b4..8ce4f517fc56 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -62,7 +62,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index b6a741caf4f6..f5391633b53a 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 8be7e42aa4de..d52d84937ad3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -97,7 +97,6 @@ enum {
enum {
MV64XXX_I2C_ACTION_INVALID,
MV64XXX_I2C_ACTION_CONTINUE,
- MV64XXX_I2C_ACTION_OFFLOAD_SEND_START,
MV64XXX_I2C_ACTION_SEND_START,
MV64XXX_I2C_ACTION_SEND_RESTART,
MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
@@ -204,6 +203,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
unsigned long ctrl_reg;
struct i2c_msg *msg = drv_data->msgs;
+ if (!drv_data->offload_enabled)
+ return -EOPNOTSUPP;
+
drv_data->msg = msg;
drv_data->byte_posn = 0;
drv_data->bytes_left = msg->len;
@@ -433,8 +435,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->msgs++;
drv_data->num_msgs--;
- if (!(drv_data->offload_enabled &&
- mv64xxx_i2c_offload_msg(drv_data))) {
+ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
writel(drv_data->cntl_bits,
drv_data->reg_base + drv_data->reg_offsets.control);
@@ -458,15 +459,14 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->reg_base + drv_data->reg_offsets.control);
break;
- case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START:
- if (!mv64xxx_i2c_offload_msg(drv_data))
- break;
- else
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- /* FALLTHRU */
case MV64XXX_I2C_ACTION_SEND_START:
- writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
- drv_data->reg_base + drv_data->reg_offsets.control);
+ /* Can we offload this msg ? */
+ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
+ /* No, switch to standard path */
+ mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+ writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
+ drv_data->reg_base + drv_data->reg_offsets.control);
+ }
break;
case MV64XXX_I2C_ACTION_SEND_ADDR_1:
@@ -625,15 +625,10 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
unsigned long flags;
spin_lock_irqsave(&drv_data->lock, flags);
- if (drv_data->offload_enabled) {
- drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
- } else {
- mv64xxx_i2c_prepare_for_io(drv_data, msg);
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
- }
+ drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+ drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+
drv_data->send_stop = is_last;
drv_data->block = 1;
mv64xxx_i2c_do_action(drv_data);
@@ -692,6 +687,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
{ .compatible = "allwinner,sun4i-i2c", .data = &mv64xxx_i2c_regs_sun4i},
{ .compatible = "marvell,mv64xxx-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
{ .compatible = "marvell,mv78230-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
+ { .compatible = "marvell,mv78230-a0-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
{}
};
MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
@@ -783,6 +779,10 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
drv_data->errata_delay = true;
}
+ if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
+ drv_data->offload_enabled = false;
+ drv_data->errata_delay = true;
+ }
out:
return rc;
#endif
diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
index 2ca268d6140b..b170bdffb5de 100644
--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
@@ -1,7 +1,7 @@
/*
* i2c-nforce2-s4985.c - i2c-nforce2 extras for the Tyan S4985 motherboard
*
- * Copyright (C) 2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2008 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -245,7 +245,7 @@ static void __exit nforce2_s4985_exit(void)
"Physical bus restoration failed\n");
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("S4985 SMBus multiplexing");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ac88f4000cc2..0038c451095c 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -51,7 +51,6 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/dmi.h>
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index c61f37a10a07..80e06fa45720 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -15,7 +15,6 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index b929ba271b47..81042b08a947 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -18,7 +18,6 @@
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index aa9577881925..62f55fe624cb 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport-light.c I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
Based on older i2c-velleman.c driver
Copyright (C) 1995-2000 Simon G. Vogl
@@ -273,7 +273,7 @@ static void __exit i2c_parport_exit(void)
release_region(base, 3);
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("I2C bus over parallel port (light)");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 81d887869620..a27aae2d6757 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport.c I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2011 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2011 Jean Delvare <jdelvare@suse.de>
Based on older i2c-philips-par.c driver
Copyright (C) 1995-2000 Simon G. Vogl
@@ -298,7 +298,7 @@ static void __exit i2c_parport_exit(void)
parport_unregister_driver(&i2c_parport_driver);
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("I2C bus over parallel port");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index 3fe652302ea7..e572f3aac0f7 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport.h I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 39e2755e3f25..845f12598e79 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -12,7 +12,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index a028617b8f13..39dd8ec60dfd 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -22,7 +22,7 @@
Intel PIIX4, 440MX
Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800
- AMD Hudson-2, CZ
+ AMD Hudson-2, ML, CZ
SMSC Victory66
Note: we assume there can only be one device, with one or more
@@ -38,7 +38,6 @@
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/io.h>
@@ -208,16 +207,16 @@ static int piix4_setup(struct pci_dev *PIIX4_dev,
"WARNING: SMBus interface has been FORCEFULLY ENABLED!\n");
} else {
dev_err(&PIIX4_dev->dev,
- "Host SMBus controller not enabled!\n");
+ "SMBus Host Controller not enabled!\n");
release_region(piix4_smba, SMBIOSIZE);
return -ENODEV;
}
}
if (((temp & 0x0E) == 8) || ((temp & 0x0E) == 2))
- dev_dbg(&PIIX4_dev->dev, "Using Interrupt 9 for SMBus.\n");
+ dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus\n");
else if ((temp & 0x0E) == 0)
- dev_dbg(&PIIX4_dev->dev, "Using Interrupt SMI# for SMBus.\n");
+ dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus\n");
else
dev_err(&PIIX4_dev->dev, "Illegal Interrupt configuration "
"(or code out of date)!\n");
@@ -235,7 +234,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
{
unsigned short piix4_smba;
unsigned short smba_idx = 0xcd6;
- u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en;
+ u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status;
+ u8 i2ccfg, i2ccfg_offset = 0x10;
/* SB800 and later SMBus does not support forcing address */
if (force || force_addr) {
@@ -245,7 +245,15 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
}
/* Determine the address of the SMBus areas */
- smb_en = (aux) ? 0x28 : 0x2c;
+ if ((PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
+ PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+ PIIX4_dev->revision >= 0x41) ||
+ (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
+ PIIX4_dev->device == 0x790b &&
+ PIIX4_dev->revision >= 0x49))
+ smb_en = 0x00;
+ else
+ smb_en = (aux) ? 0x28 : 0x2c;
if (!request_region(smba_idx, 2, "smba_idx")) {
dev_err(&PIIX4_dev->dev, "SMBus base address index region "
@@ -258,13 +266,22 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
smba_en_hi = inb_p(smba_idx + 1);
release_region(smba_idx, 2);
- if ((smba_en_lo & 1) == 0) {
+ if (!smb_en) {
+ smb_en_status = smba_en_lo & 0x10;
+ piix4_smba = smba_en_hi << 8;
+ if (aux)
+ piix4_smba |= 0x20;
+ } else {
+ smb_en_status = smba_en_lo & 0x01;
+ piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
+ }
+
+ if (!smb_en_status) {
dev_err(&PIIX4_dev->dev,
- "Host SMBus controller not enabled!\n");
+ "SMBus Host Controller not enabled!\n");
return -ENODEV;
}
- piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
return -ENODEV;
@@ -277,7 +294,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
/* Aux SMBus does not support IRQ information */
if (aux) {
dev_info(&PIIX4_dev->dev,
- "SMBus Host Controller at 0x%x\n", piix4_smba);
+ "Auxiliary SMBus Host Controller at 0x%x\n",
+ piix4_smba);
return piix4_smba;
}
@@ -292,9 +310,9 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
release_region(piix4_smba + i2ccfg_offset, 1);
if (i2ccfg & 1)
- dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus.\n");
+ dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus\n");
else
- dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus.\n");
+ dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus\n");
dev_info(&PIIX4_dev->dev,
"SMBus Host Controller at 0x%x, revision %d\n",
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index f6389e2c9d02..8564768fee32 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 8c87f4a9793b..01e967763c2a 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -24,7 +24,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/of_irq.h>
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index ac80199885be..c83fc3ccdd2b 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -17,7 +17,6 @@
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 2c2fd7c2b116..0282d4d42805 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -26,7 +26,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/i2c.h>
@@ -111,6 +110,7 @@ struct rcar_i2c_priv {
void __iomem *io;
struct i2c_adapter adap;
struct i2c_msg *msg;
+ struct clk *clk;
spinlock_t lock;
wait_queue_head_t wait;
@@ -227,18 +227,12 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
u32 bus_speed,
struct device *dev)
{
- struct clk *clkp = clk_get(dev, NULL);
u32 scgd, cdf;
u32 round, ick;
u32 scl;
u32 cdf_width;
unsigned long rate;
- if (IS_ERR(clkp)) {
- dev_err(dev, "couldn't get clock\n");
- return PTR_ERR(clkp);
- }
-
switch (priv->devtype) {
case I2C_RCAR_GEN1:
cdf_width = 2;
@@ -266,7 +260,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
* clkp : peripheral_clk
* F[] : integer up-valuation
*/
- rate = clk_get_rate(clkp);
+ rate = clk_get_rate(priv->clk);
cdf = rate / 20000000;
if (cdf >= 1 << cdf_width) {
dev_err(dev, "Input clock %lu too high\n", rate);
@@ -308,7 +302,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
scgd_find:
dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n",
- scl, bus_speed, clk_get_rate(clkp), round, cdf, scgd);
+ scl, bus_speed, clk_get_rate(priv->clk), round, cdf, scgd);
/*
* keep icccr value
@@ -604,7 +598,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
* error handling
*/
if (rcar_i2c_flags_has(priv, ID_NACK)) {
- ret = -EREMOTEIO;
+ ret = -ENXIO;
break;
}
@@ -623,7 +617,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
pm_runtime_put(dev);
- if (ret < 0)
+ if (ret < 0 && ret != -ENXIO)
dev_err(dev, "error %d : %x\n", ret, priv->flags);
return ret;
@@ -664,6 +658,12 @@ static int rcar_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "cannot get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
bus_speed = 100000; /* default 100 kHz */
ret = of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed);
if (ret < 0 && pdata && pdata->bus_speed)
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 599235514138..dfc98df7b1b6 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 5e8f136e233f..d76f3d9737ec 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 4fc87e7c94c9..294c80f21d65 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 36a9556d7cfa..19b8505d0cdd 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -45,7 +45,6 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index b9faf9b6002b..f8aa0c29f02b 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -36,7 +36,6 @@
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 04a17b9b38bb..5b80ef310841 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -801,7 +801,7 @@ static int stu300_xfer_msg(struct i2c_adapter *adap,
/* Check that the bus is free, or wait until some timeout occurs */
ret = stu300_wait_while_busy(dev);
if (ret != 0) {
- dev_err(&dev->pdev->dev, "timout waiting for transfer "
+ dev_err(&dev->pdev->dev, "timeout waiting for transfer "
"to commence.\n");
goto exit_disable;
}
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 6ffa56e08517..057602683553 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -3,7 +3,7 @@
* These devices include an I2C master which can be controlled over the
* serial port.
*
- * Copyright (C) 2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2007 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -321,7 +321,7 @@ static void __exit taos_exit(void)
serio_unregister_driver(&taos_drv);
}
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("TAOS evaluation module driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index e661edee4d0c..9704537aee3c 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -27,7 +27,7 @@
#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/module.h>
-#include <linux/clk/tegra.h>
+#include <linux/reset.h>
#include <asm/unaligned.h>
@@ -160,6 +160,7 @@ struct tegra_i2c_dev {
struct i2c_adapter adapter;
struct clk *div_clk;
struct clk *fast_clk;
+ struct reset_control *rst;
void __iomem *base;
int cont_id;
int irq;
@@ -415,9 +416,9 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
return err;
}
- tegra_periph_reset_assert(i2c_dev->div_clk);
+ reset_control_assert(i2c_dev->rst);
udelay(2);
- tegra_periph_reset_deassert(i2c_dev->div_clk);
+ reset_control_deassert(i2c_dev->rst);
if (i2c_dev->is_dvc)
tegra_dvc_init(i2c_dev);
@@ -743,6 +744,12 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
+ i2c_dev->rst = devm_reset_control_get(&pdev->dev, "i2c");
+ if (IS_ERR(i2c_dev->rst)) {
+ dev_err(&pdev->dev, "missing controller reset");
+ return PTR_ERR(i2c_dev->rst);
+ }
+
ret = of_property_read_u32(i2c_dev->dev->of_node, "clock-frequency",
&i2c_dev->bus_clk_rate);
if (ret)
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index be662511c58b..49d7f14b9d27 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index b2d90e105f41..40d36df678de 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -2,7 +2,7 @@
Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl>,
Philip Edelbrock <phil@netroedge.com>, Kyösti Mälkki <kmalkki@cc.hut.fi>,
Mark D. Studebaker <mdsxyz123@yahoo.com>
- Copyright (C) 2005 - 2008 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2005 - 2008 Jean Delvare <jdelvare@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -503,7 +503,7 @@ static void __exit i2c_vt596_exit(void)
MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>, "
"Mark D. Studebaker <mdsxyz123@yahoo.com> and "
- "Jean Delvare <khali@linux-fr.org>");
+ "Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("vt82c596 SMBus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 6f9918f37b91..28107502517f 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -30,7 +30,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/delay.h>
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 7945b05d3ea0..17f7352eca6b 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/errno.h>
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index ae1258b95d60..8eadf0f47ad7 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index d74c0b34248e..5fb80b8962a2 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -21,7 +21,7 @@
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>.
All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl>
SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and
- Jean Delvare <khali@linux-fr.org>
+ Jean Delvare <jdelvare@suse.de>
Mux support by Rodolfo Giometti <giometti@enneenne.com> and
Michael Lawnick <michael.lawnick.ext@nsn.com>
OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
@@ -104,6 +104,11 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct i2c_client *client = to_i2c_client(dev);
+ int rc;
+
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
if (add_uevent_var(env, "MODALIAS=%s%s",
I2C_MODULE_PREFIX, client->name))
@@ -256,10 +261,9 @@ static int i2c_device_probe(struct device *dev)
acpi_dev_pm_attach(&client->dev, true);
status = driver->probe(client, i2c_match_id(driver->id_table, client));
- if (status) {
- i2c_set_clientdata(client, NULL);
+ if (status)
acpi_dev_pm_detach(&client->dev, true);
- }
+
return status;
}
@@ -267,7 +271,7 @@ static int i2c_device_remove(struct device *dev)
{
struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
- int status;
+ int status = 0;
if (!client || !dev->driver)
return 0;
@@ -276,12 +280,8 @@ static int i2c_device_remove(struct device *dev)
if (driver->remove) {
dev_dbg(dev, "remove\n");
status = driver->remove(client);
- } else {
- dev->driver = NULL;
- status = 0;
}
- if (status == 0)
- i2c_set_clientdata(client, NULL);
+
acpi_dev_pm_detach(&client->dev, true);
return status;
}
@@ -409,6 +409,12 @@ static ssize_t
show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
+ int len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
+ if (len != -ENODEV)
+ return len;
+
return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
}
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index c99b22987366..fc99f0d6b4a5 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -2,7 +2,7 @@
* i2c-smbus.c - SMBus extensions to the I2C protocol
*
* Copyright (C) 2008 David Brownell
- * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -246,6 +246,6 @@ EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
module_i2c_driver(smbalert_driver);
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("SMBus protocol extensions support");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c
index d0a9c590c3cd..77e4849d2f2a 100644
--- a/drivers/i2c/i2c-stub.c
+++ b/drivers/i2c/i2c-stub.c
@@ -2,7 +2,7 @@
i2c-stub.c - I2C/SMBus chip emulator
Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com>
- Copyright (C) 2007, 2012 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2007, 2012 Jean Delvare <jdelvare@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index c58e093b6032..69afffa8f427 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 8a8c56f4b026..d8989c823f50 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -12,7 +12,6 @@
#include <linux/i2c-mux.h>
#include <linux/i2c-mux-gpio.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/gpio.h>
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index c4f08ad31183..cb772775da43 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -17,7 +17,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index e835304e7b5a..550bd36aa5d6 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -28,7 +28,7 @@
* Based on:
* i2c-virtual_cb.c from Brian Kuschak <bkuschak@yahoo.com>
* and
- * pca9540.c from Jean Delvare <khali@linux-fr.org>.
+ * pca9540.c from Jean Delvare <jdelvare@suse.de>.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -40,7 +40,6 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/i2c/pca954x.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index d7978dc4ad0b..4ff0ef3e07a6 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -18,7 +18,6 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/i2c-mux-pinctrl.h>
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index d9e1f7ccfe6f..b6940992a6ff 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -14,7 +14,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <acpi/acpi.h>
#include <linux/ide.h>
#include <linux/pci.h>
#include <linux/dmi.h>
@@ -98,6 +97,17 @@ bool ide_port_acpi(ide_hwif_t *hwif)
return ide_noacpi == 0 && hwif->acpidata;
}
+static acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
+{
+ struct acpi_device *adev;
+
+ if (!handle || acpi_bus_get_device(handle, &adev))
+ return NULL;
+
+ adev = acpi_find_child_device(adev, addr, false);
+ return adev ? adev->handle : NULL;
+}
+
/**
* ide_get_dev_handle - finds acpi_handle and PCI device.function
* @dev: device to locate
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
index 6490a2dea96b..f079ca2f260b 100644
--- a/drivers/ide/ide-cd_verbose.c
+++ b/drivers/ide/ide-cd_verbose.c
@@ -9,7 +9,9 @@
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/cdrom.h>
+#include <linux/ide.h>
#include <scsi/scsi.h>
+#include "ide-cd.h"
#ifndef CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS
void ide_cd_log_error(const char *name, struct request *failed_command,
diff --git a/drivers/ide/ide-pio-blacklist.c b/drivers/ide/ide-pio-blacklist.c
index a8c2c8f8660a..40e683a84ff9 100644
--- a/drivers/ide/ide-pio-blacklist.c
+++ b/drivers/ide/ide-pio-blacklist.c
@@ -7,6 +7,7 @@
*/
#include <linux/string.h>
+#include <linux/ide.h>
static struct ide_pio_info {
const char *name;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 6c0e0452dd9b..8e1939f564f4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -635,39 +635,10 @@ static int __init intel_idle_cpuidle_driver_init(void)
*/
static int intel_idle_cpu_init(int cpu)
{
- int cstate;
struct cpuidle_device *dev;
dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
- dev->state_count = 1;
-
- for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
- int num_substates, mwait_hint, mwait_cstate, mwait_substate;
-
- if (cpuidle_state_table[cstate].enter == NULL)
- break;
-
- if (cstate + 1 > max_cstate) {
- printk(PREFIX "max_cstate %d reached\n", max_cstate);
- break;
- }
-
- mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
- mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
- mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
-
- /* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
- & MWAIT_SUBSTATE_MASK;
-
- /* if sub-state in table is not enumerated by CPUID */
- if ((mwait_substate + 1) > num_substates)
- continue;
-
- dev->state_count += 1;
- }
-
dev->cpu = cpu;
if (cpuidle_register_device(dev)) {
@@ -679,6 +650,9 @@ static int intel_idle_cpu_init(int cpu)
if (icpu->auto_demotion_disable_flags)
smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
+ if (icpu->disable_promotion_to_c1e)
+ smp_call_function_single(cpu, c1e_promotion_disable, NULL, 1);
+
return 0;
}
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 3bec9220df04..a7e68c81f89d 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -447,14 +447,14 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
{ },
};
-#define BMA180_CHANNEL(_index) { \
+#define BMA180_CHANNEL(_axis) { \
.type = IIO_ACCEL, \
- .indexed = 1, \
- .channel = (_index), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_index = (_index), \
+ .scan_index = AXIS_##_axis, \
.scan_type = { \
.sign = 's', \
.realbits = 14, \
@@ -465,10 +465,10 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
}
static const struct iio_chan_spec bma180_channels[] = {
- BMA180_CHANNEL(AXIS_X),
- BMA180_CHANNEL(AXIS_Y),
- BMA180_CHANNEL(AXIS_Z),
- IIO_CHAN_SOFT_TIMESTAMP(4),
+ BMA180_CHANNEL(X),
+ BMA180_CHANNEL(Y),
+ BMA180_CHANNEL(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
static irqreturn_t bma180_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 2209f28441e9..4bf4c16de976 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -155,6 +155,16 @@ config MCP3422
This driver can also be built as a module. If so, the module will be
called mcp3422.
+config MEN_Z188_ADC
+ tristate "MEN 16z188 ADC IP Core support"
+ depends on MCB
+ help
+ Say yes here to enable support for the MEN 16z188 ADC IP-Core on a MCB
+ carrier.
+
+ This driver can also be built as a module. If so, the module will be
+ called men_z188_adc.
+
config NAU7802
tristate "Nuvoton NAU7802 ADC driver"
depends on I2C
@@ -197,6 +207,16 @@ config TWL6030_GPADC
This driver can also be built as a module. If so, the module will be
called twl6030-gpadc.
+config VF610_ADC
+ tristate "Freescale vf610 ADC driver"
+ depends on OF
+ help
+ Say yes here to support for Vybrid board analog-to-digital converter.
+ Since the IP is used for i.MX6SLX, the driver also support i.MX6SLX.
+
+ This driver can also be built as a module. If so, the module will be
+ called vf610_adc.
+
config VIPERBOARD_ADC
tristate "Viperboard ADC support"
depends on MFD_VIPERBOARD && USB
@@ -204,4 +224,17 @@ config VIPERBOARD_ADC
Say yes here to access the ADC part of the Nano River
Technologies Viperboard.
+config XILINX_XADC
+ tristate "Xilinx XADC driver"
+ depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+ depends on HAS_IOMEM
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to have support for the Xilinx XADC. The driver does support
+ both the ZYNQ interface to the XADC as well as the AXI-XADC interface.
+
+ The driver can also be build as a module. If so, the module will be called
+ xilinx-xadc.
+
endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index ba9a10a24cd0..bb252540664a 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -17,8 +17,12 @@ obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MCP320X) += mcp320x.o
obj-$(CONFIG_MCP3422) += mcp3422.o
+obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
obj-$(CONFIG_TWL6030_GPADC) += twl6030-gpadc.o
+obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
+xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
+obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index e283f2f2ee2f..9cf3229a7272 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -8,17 +8,11 @@
* based on linux/drivers/acron/char/pcf8583.c
* Copyright (C) 2000 Russell King
*
+ * Driver for max1363 and similar chips.
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * max1363.c
- *
- * Partial support for max1363 and similar chips.
- *
- * Not currently implemented.
- *
- * - Control of internal reference.
*/
#include <linux/interrupt.h>
@@ -1253,7 +1247,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11604] = {
.bits = 8,
- .int_vref_mv = 4098,
+ .int_vref_mv = 4096,
.mode_list = max1238_mode_list,
.num_modes = ARRAY_SIZE(max1238_mode_list),
.default_mode = s0to11,
@@ -1313,7 +1307,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11610] = {
.bits = 10,
- .int_vref_mv = 4098,
+ .int_vref_mv = 4096,
.mode_list = max1238_mode_list,
.num_modes = ARRAY_SIZE(max1238_mode_list),
.default_mode = s0to11,
@@ -1373,7 +1367,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11616] = {
.bits = 12,
- .int_vref_mv = 4098,
+ .int_vref_mv = 4096,
.mode_list = max1238_mode_list,
.num_modes = ARRAY_SIZE(max1238_mode_list),
.default_mode = s0to11,
@@ -1560,7 +1554,7 @@ static int max1363_probe(struct i2c_client *client,
st->client = client;
st->vref_uv = st->chip_info->int_vref_mv * 1000;
- vref = devm_regulator_get(&client->dev, "vref");
+ vref = devm_regulator_get_optional(&client->dev, "vref");
if (!IS_ERR(vref)) {
int vref_uv;
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
new file mode 100644
index 000000000000..6989c16aec2b
--- /dev/null
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -0,0 +1,172 @@
+/*
+ * MEN 16z188 Analog to Digial Converter
+ *
+ * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
+ * Author: Johannes Thumshirn <johannes.thumshirn@men.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mcb.h>
+#include <linux/io.h>
+#include <linux/iio/iio.h>
+
+#define Z188_ADC_MAX_CHAN 8
+#define Z188_ADC_GAIN 0x0700000
+#define Z188_MODE_VOLTAGE BIT(27)
+#define Z188_CFG_AUTO 0x1
+#define Z188_CTRL_REG 0x40
+
+#define ADC_DATA(x) (((x) >> 2) & 0x7ffffc)
+#define ADC_OVR(x) ((x) & 0x1)
+
+struct z188_adc {
+ struct resource *mem;
+ void __iomem *base;
+};
+
+#define Z188_ADC_CHANNEL(idx) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+}
+
+static const struct iio_chan_spec z188_adc_iio_channels[] = {
+ Z188_ADC_CHANNEL(0),
+ Z188_ADC_CHANNEL(1),
+ Z188_ADC_CHANNEL(2),
+ Z188_ADC_CHANNEL(3),
+ Z188_ADC_CHANNEL(4),
+ Z188_ADC_CHANNEL(5),
+ Z188_ADC_CHANNEL(6),
+ Z188_ADC_CHANNEL(7),
+};
+
+static int z188_iio_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long info)
+{
+ struct z188_adc *adc = iio_priv(iio_dev);
+ int ret;
+ u16 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ tmp = readw(adc->base + chan->channel * 4);
+
+ if (ADC_OVR(tmp)) {
+ dev_info(&iio_dev->dev,
+ "Oversampling error on ADC channel %d\n",
+ chan->channel);
+ return -EIO;
+ }
+ *val = ADC_DATA(tmp);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct iio_info z188_adc_info = {
+ .read_raw = &z188_iio_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static void men_z188_config_channels(void __iomem *addr)
+{
+ int i;
+ u32 cfg;
+ u32 ctl;
+
+ ctl = readl(addr + Z188_CTRL_REG);
+ ctl |= Z188_CFG_AUTO;
+ writel(ctl, addr + Z188_CTRL_REG);
+
+ for (i = 0; i < Z188_ADC_MAX_CHAN; i++) {
+ cfg = readl(addr + i);
+ cfg &= ~Z188_ADC_GAIN;
+ cfg |= Z188_MODE_VOLTAGE;
+ writel(cfg, addr + i);
+ }
+}
+
+static int men_z188_probe(struct mcb_device *dev,
+ const struct mcb_device_id *id)
+{
+ struct z188_adc *adc;
+ struct iio_dev *indio_dev;
+ struct resource *mem;
+
+ indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ indio_dev->name = "z188-adc";
+ indio_dev->dev.parent = &dev->dev;
+ indio_dev->info = &z188_adc_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = z188_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(z188_adc_iio_channels);
+
+ mem = mcb_request_mem(dev, "z188-adc");
+ if (!mem)
+ return -ENOMEM;
+
+ adc->base = ioremap(mem->start, resource_size(mem));
+ if (adc->base == NULL)
+ goto err;
+
+ men_z188_config_channels(adc->base);
+
+ adc->mem = mem;
+ mcb_set_drvdata(dev, indio_dev);
+
+ return iio_device_register(indio_dev);
+
+err:
+ mcb_release_mem(mem);
+ return -ENXIO;
+}
+
+static void men_z188_remove(struct mcb_device *dev)
+{
+ struct iio_dev *indio_dev = mcb_get_drvdata(dev);
+ struct z188_adc *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iounmap(adc->base);
+ mcb_release_mem(adc->mem);
+}
+
+static const struct mcb_device_id men_z188_ids[] = {
+ { .device = 0xbc },
+};
+MODULE_DEVICE_TABLE(mcb, men_z188_ids);
+
+static struct mcb_driver men_z188_driver = {
+ .driver = {
+ .name = "z188-adc",
+ .owner = THIS_MODULE,
+ },
+ .probe = men_z188_probe,
+ .remove = men_z188_remove,
+ .id_table = men_z188_ids,
+};
+module_mcb_driver(men_z188_driver);
+
+MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IIO ADC driver for MEN 16z188 ADC Core");
+MODULE_ALIAS("mcb:16z188");
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 31e786e3999b..a4db3026bec6 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -13,7 +13,6 @@
* GNU General Public License for more details.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/module.h>
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index 53e1c645cee7..15282f148b3b 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -28,7 +28,6 @@
* 02110-1301 USA
*
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -969,7 +968,7 @@ static int twl6030_gpadc_suspend(struct device *pdev)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, TWL6030_GPADCR,
TWL6030_REG_TOGGLE1);
if (ret)
- dev_err(pdev, "error reseting GPADC (%d)!\n", ret);
+ dev_err(pdev, "error resetting GPADC (%d)!\n", ret);
return 0;
};
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
new file mode 100644
index 000000000000..44799eb5930e
--- /dev/null
+++ b/drivers/iio/adc/vf610_adc.c
@@ -0,0 +1,711 @@
+/*
+ * Freescale Vybrid vf610 ADC driver
+ *
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of_platform.h>
+#include <linux/err.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/driver.h>
+
+/* This will be the driver name the kernel reports */
+#define DRIVER_NAME "vf610-adc"
+
+/* Vybrid/IMX ADC registers */
+#define VF610_REG_ADC_HC0 0x00
+#define VF610_REG_ADC_HC1 0x04
+#define VF610_REG_ADC_HS 0x08
+#define VF610_REG_ADC_R0 0x0c
+#define VF610_REG_ADC_R1 0x10
+#define VF610_REG_ADC_CFG 0x14
+#define VF610_REG_ADC_GC 0x18
+#define VF610_REG_ADC_GS 0x1c
+#define VF610_REG_ADC_CV 0x20
+#define VF610_REG_ADC_OFS 0x24
+#define VF610_REG_ADC_CAL 0x28
+#define VF610_REG_ADC_PCTL 0x30
+
+/* Configuration register field define */
+#define VF610_ADC_MODE_BIT8 0x00
+#define VF610_ADC_MODE_BIT10 0x04
+#define VF610_ADC_MODE_BIT12 0x08
+#define VF610_ADC_MODE_MASK 0x0c
+#define VF610_ADC_BUSCLK2_SEL 0x01
+#define VF610_ADC_ALTCLK_SEL 0x02
+#define VF610_ADC_ADACK_SEL 0x03
+#define VF610_ADC_ADCCLK_MASK 0x03
+#define VF610_ADC_CLK_DIV2 0x20
+#define VF610_ADC_CLK_DIV4 0x40
+#define VF610_ADC_CLK_DIV8 0x60
+#define VF610_ADC_CLK_MASK 0x60
+#define VF610_ADC_ADLSMP_LONG 0x10
+#define VF610_ADC_ADSTS_MASK 0x300
+#define VF610_ADC_ADLPC_EN 0x80
+#define VF610_ADC_ADHSC_EN 0x400
+#define VF610_ADC_REFSEL_VALT 0x100
+#define VF610_ADC_REFSEL_VBG 0x1000
+#define VF610_ADC_ADTRG_HARD 0x2000
+#define VF610_ADC_AVGS_8 0x4000
+#define VF610_ADC_AVGS_16 0x8000
+#define VF610_ADC_AVGS_32 0xC000
+#define VF610_ADC_AVGS_MASK 0xC000
+#define VF610_ADC_OVWREN 0x10000
+
+/* General control register field define */
+#define VF610_ADC_ADACKEN 0x1
+#define VF610_ADC_DMAEN 0x2
+#define VF610_ADC_ACREN 0x4
+#define VF610_ADC_ACFGT 0x8
+#define VF610_ADC_ACFE 0x10
+#define VF610_ADC_AVGEN 0x20
+#define VF610_ADC_ADCON 0x40
+#define VF610_ADC_CAL 0x80
+
+/* Other field define */
+#define VF610_ADC_ADCHC(x) ((x) & 0xF)
+#define VF610_ADC_AIEN (0x1 << 7)
+#define VF610_ADC_CONV_DISABLE 0x1F
+#define VF610_ADC_HS_COCO0 0x1
+#define VF610_ADC_CALF 0x2
+#define VF610_ADC_TIMEOUT msecs_to_jiffies(100)
+
+enum clk_sel {
+ VF610_ADCIOC_BUSCLK_SET,
+ VF610_ADCIOC_ALTCLK_SET,
+ VF610_ADCIOC_ADACK_SET,
+};
+
+enum vol_ref {
+ VF610_ADCIOC_VR_VREF_SET,
+ VF610_ADCIOC_VR_VALT_SET,
+ VF610_ADCIOC_VR_VBG_SET,
+};
+
+enum average_sel {
+ VF610_ADC_SAMPLE_1,
+ VF610_ADC_SAMPLE_4,
+ VF610_ADC_SAMPLE_8,
+ VF610_ADC_SAMPLE_16,
+ VF610_ADC_SAMPLE_32,
+};
+
+struct vf610_adc_feature {
+ enum clk_sel clk_sel;
+ enum vol_ref vol_ref;
+
+ int clk_div;
+ int sample_rate;
+ int res_mode;
+
+ bool lpm;
+ bool calibration;
+ bool ovwren;
+};
+
+struct vf610_adc {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+
+ u32 vref_uv;
+ u32 value;
+ struct regulator *vref;
+ struct vf610_adc_feature adc_feature;
+
+ struct completion completion;
+};
+
+#define VF610_ADC_CHAN(_idx, _chan_type) { \
+ .type = (_chan_type), \
+ .indexed = 1, \
+ .channel = (_idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+}
+
+static const struct iio_chan_spec vf610_adc_iio_channels[] = {
+ VF610_ADC_CHAN(0, IIO_VOLTAGE),
+ VF610_ADC_CHAN(1, IIO_VOLTAGE),
+ VF610_ADC_CHAN(2, IIO_VOLTAGE),
+ VF610_ADC_CHAN(3, IIO_VOLTAGE),
+ VF610_ADC_CHAN(4, IIO_VOLTAGE),
+ VF610_ADC_CHAN(5, IIO_VOLTAGE),
+ VF610_ADC_CHAN(6, IIO_VOLTAGE),
+ VF610_ADC_CHAN(7, IIO_VOLTAGE),
+ VF610_ADC_CHAN(8, IIO_VOLTAGE),
+ VF610_ADC_CHAN(9, IIO_VOLTAGE),
+ VF610_ADC_CHAN(10, IIO_VOLTAGE),
+ VF610_ADC_CHAN(11, IIO_VOLTAGE),
+ VF610_ADC_CHAN(12, IIO_VOLTAGE),
+ VF610_ADC_CHAN(13, IIO_VOLTAGE),
+ VF610_ADC_CHAN(14, IIO_VOLTAGE),
+ VF610_ADC_CHAN(15, IIO_VOLTAGE),
+ /* sentinel */
+};
+
+/*
+ * ADC sample frequency, unit is ADCK cycles.
+ * ADC clk source is ipg clock, which is the same as bus clock.
+ *
+ * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
+ * SFCAdder: fixed to 6 ADCK cycles
+ * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
+ * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
+ * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
+ *
+ * By default, enable 12 bit resolution mode, clock source
+ * set to ipg clock, So get below frequency group:
+ */
+static const u32 vf610_sample_freq_avail[5] =
+{1941176, 559332, 286957, 145374, 73171};
+
+static inline void vf610_adc_cfg_init(struct vf610_adc *info)
+{
+ /* set default Configuration for ADC controller */
+ info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET;
+ info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET;
+
+ info->adc_feature.calibration = true;
+ info->adc_feature.ovwren = true;
+
+ info->adc_feature.clk_div = 1;
+ info->adc_feature.res_mode = 12;
+ info->adc_feature.sample_rate = 1;
+ info->adc_feature.lpm = true;
+}
+
+static void vf610_adc_cfg_post_set(struct vf610_adc *info)
+{
+ struct vf610_adc_feature *adc_feature = &info->adc_feature;
+ int cfg_data = 0;
+ int gc_data = 0;
+
+ switch (adc_feature->clk_sel) {
+ case VF610_ADCIOC_ALTCLK_SET:
+ cfg_data |= VF610_ADC_ALTCLK_SEL;
+ break;
+ case VF610_ADCIOC_ADACK_SET:
+ cfg_data |= VF610_ADC_ADACK_SEL;
+ break;
+ default:
+ break;
+ }
+
+ /* low power set for calibration */
+ cfg_data |= VF610_ADC_ADLPC_EN;
+
+ /* enable high speed for calibration */
+ cfg_data |= VF610_ADC_ADHSC_EN;
+
+ /* voltage reference */
+ switch (adc_feature->vol_ref) {
+ case VF610_ADCIOC_VR_VREF_SET:
+ break;
+ case VF610_ADCIOC_VR_VALT_SET:
+ cfg_data |= VF610_ADC_REFSEL_VALT;
+ break;
+ case VF610_ADCIOC_VR_VBG_SET:
+ cfg_data |= VF610_ADC_REFSEL_VBG;
+ break;
+ default:
+ dev_err(info->dev, "error voltage reference\n");
+ }
+
+ /* data overwrite enable */
+ if (adc_feature->ovwren)
+ cfg_data |= VF610_ADC_OVWREN;
+
+ writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
+ writel(gc_data, info->regs + VF610_REG_ADC_GC);
+}
+
+static void vf610_adc_calibration(struct vf610_adc *info)
+{
+ int adc_gc, hc_cfg;
+ int timeout;
+
+ if (!info->adc_feature.calibration)
+ return;
+
+ /* enable calibration interrupt */
+ hc_cfg = VF610_ADC_AIEN | VF610_ADC_CONV_DISABLE;
+ writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
+
+ adc_gc = readl(info->regs + VF610_REG_ADC_GC);
+ writel(adc_gc | VF610_ADC_CAL, info->regs + VF610_REG_ADC_GC);
+
+ timeout = wait_for_completion_timeout
+ (&info->completion, VF610_ADC_TIMEOUT);
+ if (timeout == 0)
+ dev_err(info->dev, "Timeout for adc calibration\n");
+
+ adc_gc = readl(info->regs + VF610_REG_ADC_GS);
+ if (adc_gc & VF610_ADC_CALF)
+ dev_err(info->dev, "ADC calibration failed\n");
+
+ info->adc_feature.calibration = false;
+}
+
+static void vf610_adc_cfg_set(struct vf610_adc *info)
+{
+ struct vf610_adc_feature *adc_feature = &(info->adc_feature);
+ int cfg_data;
+
+ cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
+
+ /* low power configuration */
+ cfg_data &= ~VF610_ADC_ADLPC_EN;
+ if (adc_feature->lpm)
+ cfg_data |= VF610_ADC_ADLPC_EN;
+
+ /* disable high speed */
+ cfg_data &= ~VF610_ADC_ADHSC_EN;
+
+ writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
+}
+
+static void vf610_adc_sample_set(struct vf610_adc *info)
+{
+ struct vf610_adc_feature *adc_feature = &(info->adc_feature);
+ int cfg_data, gc_data;
+
+ cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
+ gc_data = readl(info->regs + VF610_REG_ADC_GC);
+
+ /* resolution mode */
+ cfg_data &= ~VF610_ADC_MODE_MASK;
+ switch (adc_feature->res_mode) {
+ case 8:
+ cfg_data |= VF610_ADC_MODE_BIT8;
+ break;
+ case 10:
+ cfg_data |= VF610_ADC_MODE_BIT10;
+ break;
+ case 12:
+ cfg_data |= VF610_ADC_MODE_BIT12;
+ break;
+ default:
+ dev_err(info->dev, "error resolution mode\n");
+ break;
+ }
+
+ /* clock select and clock divider */
+ cfg_data &= ~(VF610_ADC_CLK_MASK | VF610_ADC_ADCCLK_MASK);
+ switch (adc_feature->clk_div) {
+ case 1:
+ break;
+ case 2:
+ cfg_data |= VF610_ADC_CLK_DIV2;
+ break;
+ case 4:
+ cfg_data |= VF610_ADC_CLK_DIV4;
+ break;
+ case 8:
+ cfg_data |= VF610_ADC_CLK_DIV8;
+ break;
+ case 16:
+ switch (adc_feature->clk_sel) {
+ case VF610_ADCIOC_BUSCLK_SET:
+ cfg_data |= VF610_ADC_BUSCLK2_SEL | VF610_ADC_CLK_DIV8;
+ break;
+ default:
+ dev_err(info->dev, "error clk divider\n");
+ break;
+ }
+ break;
+ }
+
+ /* Use the short sample mode */
+ cfg_data &= ~(VF610_ADC_ADLSMP_LONG | VF610_ADC_ADSTS_MASK);
+
+ /* update hardware average selection */
+ cfg_data &= ~VF610_ADC_AVGS_MASK;
+ gc_data &= ~VF610_ADC_AVGEN;
+ switch (adc_feature->sample_rate) {
+ case VF610_ADC_SAMPLE_1:
+ break;
+ case VF610_ADC_SAMPLE_4:
+ gc_data |= VF610_ADC_AVGEN;
+ break;
+ case VF610_ADC_SAMPLE_8:
+ gc_data |= VF610_ADC_AVGEN;
+ cfg_data |= VF610_ADC_AVGS_8;
+ break;
+ case VF610_ADC_SAMPLE_16:
+ gc_data |= VF610_ADC_AVGEN;
+ cfg_data |= VF610_ADC_AVGS_16;
+ break;
+ case VF610_ADC_SAMPLE_32:
+ gc_data |= VF610_ADC_AVGEN;
+ cfg_data |= VF610_ADC_AVGS_32;
+ break;
+ default:
+ dev_err(info->dev,
+ "error hardware sample average select\n");
+ }
+
+ writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
+ writel(gc_data, info->regs + VF610_REG_ADC_GC);
+}
+
+static void vf610_adc_hw_init(struct vf610_adc *info)
+{
+ /* CFG: Feature set */
+ vf610_adc_cfg_post_set(info);
+ vf610_adc_sample_set(info);
+
+ /* adc calibration */
+ vf610_adc_calibration(info);
+
+ /* CFG: power and speed set */
+ vf610_adc_cfg_set(info);
+}
+
+static int vf610_adc_read_data(struct vf610_adc *info)
+{
+ int result;
+
+ result = readl(info->regs + VF610_REG_ADC_R0);
+
+ switch (info->adc_feature.res_mode) {
+ case 8:
+ result &= 0xFF;
+ break;
+ case 10:
+ result &= 0x3FF;
+ break;
+ case 12:
+ result &= 0xFFF;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
+{
+ struct vf610_adc *info = (struct vf610_adc *)dev_id;
+ int coco;
+
+ coco = readl(info->regs + VF610_REG_ADC_HS);
+ if (coco & VF610_ADC_HS_COCO0) {
+ info->value = vf610_adc_read_data(info);
+ complete(&info->completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171");
+
+static struct attribute *vf610_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group vf610_attribute_group = {
+ .attrs = vf610_attributes,
+};
+
+static int vf610_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct vf610_adc *info = iio_priv(indio_dev);
+ unsigned int hc_cfg;
+ long ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ reinit_completion(&info->completion);
+
+ hc_cfg = VF610_ADC_ADCHC(chan->channel);
+ hc_cfg |= VF610_ADC_AIEN;
+ writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
+ ret = wait_for_completion_interruptible_timeout
+ (&info->completion, VF610_ADC_TIMEOUT);
+ if (ret == 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return -ETIMEDOUT;
+ }
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+
+ *val = info->value;
+ mutex_unlock(&indio_dev->mlock);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = info->vref_uv / 1000;
+ *val2 = info->adc_feature.res_mode;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = vf610_sample_freq_avail[info->adc_feature.sample_rate];
+ *val2 = 0;
+ return IIO_VAL_INT;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int vf610_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct vf610_adc *info = iio_priv(indio_dev);
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ for (i = 0;
+ i < ARRAY_SIZE(vf610_sample_freq_avail);
+ i++)
+ if (val == vf610_sample_freq_avail[i]) {
+ info->adc_feature.sample_rate = i;
+ vf610_adc_sample_set(info);
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int vf610_adc_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval)
+{
+ struct vf610_adc *info = iio_priv(indio_dev);
+
+ if ((readval == NULL) ||
+ (!(reg % 4) || (reg > VF610_REG_ADC_PCTL)))
+ return -EINVAL;
+
+ *readval = readl(info->regs + reg);
+
+ return 0;
+}
+
+static const struct iio_info vf610_adc_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &vf610_read_raw,
+ .write_raw = &vf610_write_raw,
+ .debugfs_reg_access = &vf610_adc_reg_access,
+ .attrs = &vf610_attribute_group,
+};
+
+static const struct of_device_id vf610_adc_match[] = {
+ { .compatible = "fsl,vf610-adc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vf610_adc_match);
+
+static int vf610_adc_probe(struct platform_device *pdev)
+{
+ struct vf610_adc *info;
+ struct iio_dev *indio_dev;
+ struct resource *mem;
+ int irq;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct vf610_adc));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "Failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ info = iio_priv(indio_dev);
+ info->dev = &pdev->dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(info->regs))
+ return PTR_ERR(info->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(info->dev, irq,
+ vf610_adc_isr, 0,
+ dev_name(&pdev->dev), info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed requesting irq, irq = %d\n", irq);
+ return ret;
+ }
+
+ info->clk = devm_clk_get(&pdev->dev, "adc");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed getting clock, err = %ld\n",
+ PTR_ERR(info->clk));
+ ret = PTR_ERR(info->clk);
+ return ret;
+ }
+
+ info->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(info->vref))
+ return PTR_ERR(info->vref);
+
+ ret = regulator_enable(info->vref);
+ if (ret)
+ return ret;
+
+ info->vref_uv = regulator_get_voltage(info->vref);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ init_completion(&info->completion);
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &vf610_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = vf610_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vf610_adc_iio_channels);
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not prepare or enable the clock.\n");
+ goto error_adc_clk_enable;
+ }
+
+ vf610_adc_cfg_init(info);
+ vf610_adc_hw_init(info);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register the device.\n");
+ goto error_iio_device_register;
+ }
+
+ return 0;
+
+
+error_iio_device_register:
+ clk_disable_unprepare(info->clk);
+error_adc_clk_enable:
+ regulator_disable(info->vref);
+
+ return ret;
+}
+
+static int vf610_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct vf610_adc *info = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(info->vref);
+ clk_disable_unprepare(info->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vf610_adc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct vf610_adc *info = iio_priv(indio_dev);
+ int hc_cfg;
+
+ /* ADC controller enters to stop mode */
+ hc_cfg = readl(info->regs + VF610_REG_ADC_HC0);
+ hc_cfg |= VF610_ADC_CONV_DISABLE;
+ writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
+
+ clk_disable_unprepare(info->clk);
+ regulator_disable(info->vref);
+
+ return 0;
+}
+
+static int vf610_adc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct vf610_adc *info = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(info->vref);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret)
+ return ret;
+
+ vf610_adc_hw_init(info);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(vf610_adc_pm_ops,
+ vf610_adc_suspend,
+ vf610_adc_resume);
+
+static struct platform_driver vf610_adc_driver = {
+ .probe = vf610_adc_probe,
+ .remove = vf610_adc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = vf610_adc_match,
+ .pm = &vf610_adc_pm_ops,
+ },
+};
+
+module_platform_driver(vf610_adc_driver);
+
+MODULE_AUTHOR("Fugang Duan <B38611@freescale.com>");
+MODULE_DESCRIPTION("Freescale VF610 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
index d0add8f9416b..9acf6b6d705b 100644
--- a/drivers/iio/adc/viperboard_adc.c
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -139,8 +139,6 @@ static int vprbrd_adc_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, indio_dev);
-
return 0;
}
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
new file mode 100644
index 000000000000..ab52be29141b
--- /dev/null
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -0,0 +1,1333 @@
+/*
+ * Xilinx XADC driver
+ *
+ * Copyright 2013-2014 Analog Devices Inc.
+ * Author: Lars-Peter Clauen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ *
+ * Documentation for the parts can be found at:
+ * - XADC hardmacro: Xilinx UG480
+ * - ZYNQ XADC interface: Xilinx UG585
+ * - AXI XADC interface: Xilinx PG019
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "xilinx-xadc.h"
+
+static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
+
+/* ZYNQ register definitions */
+#define XADC_ZYNQ_REG_CFG 0x00
+#define XADC_ZYNQ_REG_INTSTS 0x04
+#define XADC_ZYNQ_REG_INTMSK 0x08
+#define XADC_ZYNQ_REG_STATUS 0x0c
+#define XADC_ZYNQ_REG_CFIFO 0x10
+#define XADC_ZYNQ_REG_DFIFO 0x14
+#define XADC_ZYNQ_REG_CTL 0x18
+
+#define XADC_ZYNQ_CFG_ENABLE BIT(31)
+#define XADC_ZYNQ_CFG_CFIFOTH_MASK (0xf << 20)
+#define XADC_ZYNQ_CFG_CFIFOTH_OFFSET 20
+#define XADC_ZYNQ_CFG_DFIFOTH_MASK (0xf << 16)
+#define XADC_ZYNQ_CFG_DFIFOTH_OFFSET 16
+#define XADC_ZYNQ_CFG_WEDGE BIT(13)
+#define XADC_ZYNQ_CFG_REDGE BIT(12)
+#define XADC_ZYNQ_CFG_TCKRATE_MASK (0x3 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV2 (0x0 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV4 (0x1 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV8 (0x2 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV16 (0x3 << 8)
+#define XADC_ZYNQ_CFG_IGAP_MASK 0x1f
+#define XADC_ZYNQ_CFG_IGAP(x) (x)
+
+#define XADC_ZYNQ_INT_CFIFO_LTH BIT(9)
+#define XADC_ZYNQ_INT_DFIFO_GTH BIT(8)
+#define XADC_ZYNQ_INT_ALARM_MASK 0xff
+#define XADC_ZYNQ_INT_ALARM_OFFSET 0
+
+#define XADC_ZYNQ_STATUS_CFIFO_LVL_MASK (0xf << 16)
+#define XADC_ZYNQ_STATUS_CFIFO_LVL_OFFSET 16
+#define XADC_ZYNQ_STATUS_DFIFO_LVL_MASK (0xf << 12)
+#define XADC_ZYNQ_STATUS_DFIFO_LVL_OFFSET 12
+#define XADC_ZYNQ_STATUS_CFIFOF BIT(11)
+#define XADC_ZYNQ_STATUS_CFIFOE BIT(10)
+#define XADC_ZYNQ_STATUS_DFIFOF BIT(9)
+#define XADC_ZYNQ_STATUS_DFIFOE BIT(8)
+#define XADC_ZYNQ_STATUS_OT BIT(7)
+#define XADC_ZYNQ_STATUS_ALM(x) BIT(x)
+
+#define XADC_ZYNQ_CTL_RESET BIT(4)
+
+#define XADC_ZYNQ_CMD_NOP 0x00
+#define XADC_ZYNQ_CMD_READ 0x01
+#define XADC_ZYNQ_CMD_WRITE 0x02
+
+#define XADC_ZYNQ_CMD(cmd, addr, data) (((cmd) << 26) | ((addr) << 16) | (data))
+
+/* AXI register definitions */
+#define XADC_AXI_REG_RESET 0x00
+#define XADC_AXI_REG_STATUS 0x04
+#define XADC_AXI_REG_ALARM_STATUS 0x08
+#define XADC_AXI_REG_CONVST 0x0c
+#define XADC_AXI_REG_XADC_RESET 0x10
+#define XADC_AXI_REG_GIER 0x5c
+#define XADC_AXI_REG_IPISR 0x60
+#define XADC_AXI_REG_IPIER 0x68
+#define XADC_AXI_ADC_REG_OFFSET 0x200
+
+#define XADC_AXI_RESET_MAGIC 0xa
+#define XADC_AXI_GIER_ENABLE BIT(31)
+
+#define XADC_AXI_INT_EOS BIT(4)
+#define XADC_AXI_INT_ALARM_MASK 0x3c0f
+
+#define XADC_FLAGS_BUFFERED BIT(0)
+
+static void xadc_write_reg(struct xadc *xadc, unsigned int reg,
+ uint32_t val)
+{
+ writel(val, xadc->base + reg);
+}
+
+static void xadc_read_reg(struct xadc *xadc, unsigned int reg,
+ uint32_t *val)
+{
+ *val = readl(xadc->base + reg);
+}
+
+/*
+ * The ZYNQ interface uses two asynchronous FIFOs for communication with the
+ * XADC. Reads and writes to the XADC register are performed by submitting a
+ * request to the command FIFO (CFIFO), once the request has been completed the
+ * result can be read from the data FIFO (DFIFO). The method currently used in
+ * this driver is to submit the request for a read/write operation, then go to
+ * sleep and wait for an interrupt that signals that a response is available in
+ * the data FIFO.
+ */
+
+static void xadc_zynq_write_fifo(struct xadc *xadc, uint32_t *cmd,
+ unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++)
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFIFO, cmd[i]);
+}
+
+static void xadc_zynq_drain_fifo(struct xadc *xadc)
+{
+ uint32_t status, tmp;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &status);
+
+ while (!(status & XADC_ZYNQ_STATUS_DFIFOE)) {
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &tmp);
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &status);
+ }
+}
+
+static void xadc_zynq_update_intmsk(struct xadc *xadc, unsigned int mask,
+ unsigned int val)
+{
+ xadc->zynq_intmask &= ~mask;
+ xadc->zynq_intmask |= val;
+
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTMSK,
+ xadc->zynq_intmask | xadc->zynq_masked_alarm);
+}
+
+static int xadc_zynq_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ uint32_t cmd[1];
+ uint32_t tmp;
+ int ret;
+
+ spin_lock_irq(&xadc->lock);
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
+ XADC_ZYNQ_INT_DFIFO_GTH);
+
+ reinit_completion(&xadc->completion);
+
+ cmd[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_WRITE, reg, val);
+ xadc_zynq_write_fifo(xadc, cmd, ARRAY_SIZE(cmd));
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &tmp);
+ tmp &= ~XADC_ZYNQ_CFG_DFIFOTH_MASK;
+ tmp |= 0 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET;
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
+
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
+ spin_unlock_irq(&xadc->lock);
+
+ ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
+ if (ret == 0)
+ ret = -EIO;
+ else
+ ret = 0;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &tmp);
+
+ return ret;
+}
+
+static int xadc_zynq_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ uint32_t cmd[2];
+ uint32_t resp, tmp;
+ int ret;
+
+ cmd[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_READ, reg, 0);
+ cmd[1] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_NOP, 0, 0);
+
+ spin_lock_irq(&xadc->lock);
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
+ XADC_ZYNQ_INT_DFIFO_GTH);
+ xadc_zynq_drain_fifo(xadc);
+ reinit_completion(&xadc->completion);
+
+ xadc_zynq_write_fifo(xadc, cmd, ARRAY_SIZE(cmd));
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &tmp);
+ tmp &= ~XADC_ZYNQ_CFG_DFIFOTH_MASK;
+ tmp |= 1 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET;
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
+
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
+ spin_unlock_irq(&xadc->lock);
+ ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
+ if (ret == 0)
+ ret = -EIO;
+ if (ret < 0)
+ return ret;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &resp);
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &resp);
+
+ *val = resp & 0xffff;
+
+ return 0;
+}
+
+static unsigned int xadc_zynq_transform_alarm(unsigned int alarm)
+{
+ return ((alarm & 0x80) >> 4) |
+ ((alarm & 0x78) << 1) |
+ (alarm & 0x07);
+}
+
+/*
+ * The ZYNQ threshold interrupts are level sensitive. Since we can't make the
+ * threshold condition go way from within the interrupt handler, this means as
+ * soon as a threshold condition is present we would enter the interrupt handler
+ * again and again. To work around this we mask all active thresholds interrupts
+ * in the interrupt handler and start a timer. In this timer we poll the
+ * interrupt status and only if the interrupt is inactive we unmask it again.
+ */
+static void xadc_zynq_unmask_worker(struct work_struct *work)
+{
+ struct xadc *xadc = container_of(work, struct xadc, zynq_unmask_work.work);
+ unsigned int misc_sts, unmask;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &misc_sts);
+
+ misc_sts &= XADC_ZYNQ_INT_ALARM_MASK;
+
+ spin_lock_irq(&xadc->lock);
+
+ /* Clear those bits which are not active anymore */
+ unmask = (xadc->zynq_masked_alarm ^ misc_sts) & xadc->zynq_masked_alarm;
+ xadc->zynq_masked_alarm &= misc_sts;
+
+ /* Also clear those which are masked out anyway */
+ xadc->zynq_masked_alarm &= ~xadc->zynq_intmask;
+
+ /* Clear the interrupts before we unmask them */
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, unmask);
+
+ xadc_zynq_update_intmsk(xadc, 0, 0);
+
+ spin_unlock_irq(&xadc->lock);
+
+ /* if still pending some alarm re-trigger the timer */
+ if (xadc->zynq_masked_alarm) {
+ schedule_delayed_work(&xadc->zynq_unmask_work,
+ msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
+ }
+}
+
+static irqreturn_t xadc_zynq_threaded_interrupt_handler(int irq, void *devid)
+{
+ struct iio_dev *indio_dev = devid;
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int alarm;
+
+ spin_lock_irq(&xadc->lock);
+ alarm = xadc->zynq_alarm;
+ xadc->zynq_alarm = 0;
+ spin_unlock_irq(&xadc->lock);
+
+ xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
+
+ /* unmask the required interrupts in timer. */
+ schedule_delayed_work(&xadc->zynq_unmask_work,
+ msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
+{
+ struct iio_dev *indio_dev = devid;
+ struct xadc *xadc = iio_priv(indio_dev);
+ irqreturn_t ret = IRQ_HANDLED;
+ uint32_t status;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
+
+ status &= ~(xadc->zynq_intmask | xadc->zynq_masked_alarm);
+
+ if (!status)
+ return IRQ_NONE;
+
+ spin_lock(&xadc->lock);
+
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status);
+
+ if (status & XADC_ZYNQ_INT_DFIFO_GTH) {
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
+ XADC_ZYNQ_INT_DFIFO_GTH);
+ complete(&xadc->completion);
+ }
+
+ status &= XADC_ZYNQ_INT_ALARM_MASK;
+ if (status) {
+ xadc->zynq_alarm |= status;
+ xadc->zynq_masked_alarm |= status;
+ /*
+ * mask the current event interrupt,
+ * unmask it when the interrupt is no more active.
+ */
+ xadc_zynq_update_intmsk(xadc, 0, 0);
+ ret = IRQ_WAKE_THREAD;
+ }
+ spin_unlock(&xadc->lock);
+
+ return ret;
+}
+
+#define XADC_ZYNQ_TCK_RATE_MAX 50000000
+#define XADC_ZYNQ_IGAP_DEFAULT 20
+
+static int xadc_zynq_setup(struct platform_device *pdev,
+ struct iio_dev *indio_dev, int irq)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long pcap_rate;
+ unsigned int tck_div;
+ unsigned int div;
+ unsigned int igap;
+ unsigned int tck_rate;
+
+ /* TODO: Figure out how to make igap and tck_rate configurable */
+ igap = XADC_ZYNQ_IGAP_DEFAULT;
+ tck_rate = XADC_ZYNQ_TCK_RATE_MAX;
+
+ xadc->zynq_intmask = ~0;
+
+ pcap_rate = clk_get_rate(xadc->clk);
+
+ if (tck_rate > XADC_ZYNQ_TCK_RATE_MAX)
+ tck_rate = XADC_ZYNQ_TCK_RATE_MAX;
+ if (tck_rate > pcap_rate / 2) {
+ div = 2;
+ } else {
+ div = pcap_rate / tck_rate;
+ if (pcap_rate / div > XADC_ZYNQ_TCK_RATE_MAX)
+ div++;
+ }
+
+ if (div <= 3)
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV2;
+ else if (div <= 7)
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV4;
+ else if (div <= 15)
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV8;
+ else
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV16;
+
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CTL, XADC_ZYNQ_CTL_RESET);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CTL, 0);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, ~0);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTMSK, xadc->zynq_intmask);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, XADC_ZYNQ_CFG_ENABLE |
+ XADC_ZYNQ_CFG_REDGE | XADC_ZYNQ_CFG_WEDGE |
+ tck_div | XADC_ZYNQ_CFG_IGAP(igap));
+
+ return 0;
+}
+
+static unsigned long xadc_zynq_get_dclk_rate(struct xadc *xadc)
+{
+ unsigned int div;
+ uint32_t val;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &val);
+
+ switch (val & XADC_ZYNQ_CFG_TCKRATE_MASK) {
+ case XADC_ZYNQ_CFG_TCKRATE_DIV4:
+ div = 4;
+ break;
+ case XADC_ZYNQ_CFG_TCKRATE_DIV8:
+ div = 8;
+ break;
+ case XADC_ZYNQ_CFG_TCKRATE_DIV16:
+ div = 16;
+ break;
+ default:
+ div = 2;
+ break;
+ }
+
+ return clk_get_rate(xadc->clk) / div;
+}
+
+static void xadc_zynq_update_alarm(struct xadc *xadc, unsigned int alarm)
+{
+ unsigned long flags;
+ uint32_t status;
+
+ /* Move OT to bit 7 */
+ alarm = ((alarm & 0x08) << 4) | ((alarm & 0xf0) >> 1) | (alarm & 0x07);
+
+ spin_lock_irqsave(&xadc->lock, flags);
+
+ /* Clear previous interrupts if any. */
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status & alarm);
+
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_ALARM_MASK,
+ ~alarm & XADC_ZYNQ_INT_ALARM_MASK);
+
+ spin_unlock_irqrestore(&xadc->lock, flags);
+}
+
+static const struct xadc_ops xadc_zynq_ops = {
+ .read = xadc_zynq_read_adc_reg,
+ .write = xadc_zynq_write_adc_reg,
+ .setup = xadc_zynq_setup,
+ .get_dclk_rate = xadc_zynq_get_dclk_rate,
+ .interrupt_handler = xadc_zynq_interrupt_handler,
+ .threaded_interrupt_handler = xadc_zynq_threaded_interrupt_handler,
+ .update_alarm = xadc_zynq_update_alarm,
+};
+
+static int xadc_axi_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ uint32_t val32;
+
+ xadc_read_reg(xadc, XADC_AXI_ADC_REG_OFFSET + reg * 4, &val32);
+ *val = val32 & 0xffff;
+
+ return 0;
+}
+
+static int xadc_axi_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ xadc_write_reg(xadc, XADC_AXI_ADC_REG_OFFSET + reg * 4, val);
+
+ return 0;
+}
+
+static int xadc_axi_setup(struct platform_device *pdev,
+ struct iio_dev *indio_dev, int irq)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+
+ xadc_write_reg(xadc, XADC_AXI_REG_RESET, XADC_AXI_RESET_MAGIC);
+ xadc_write_reg(xadc, XADC_AXI_REG_GIER, XADC_AXI_GIER_ENABLE);
+
+ return 0;
+}
+
+static irqreturn_t xadc_axi_interrupt_handler(int irq, void *devid)
+{
+ struct iio_dev *indio_dev = devid;
+ struct xadc *xadc = iio_priv(indio_dev);
+ uint32_t status, mask;
+ unsigned int events;
+
+ xadc_read_reg(xadc, XADC_AXI_REG_IPISR, &status);
+ xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &mask);
+ status &= mask;
+
+ if (!status)
+ return IRQ_NONE;
+
+ if ((status & XADC_AXI_INT_EOS) && xadc->trigger)
+ iio_trigger_poll(xadc->trigger, 0);
+
+ if (status & XADC_AXI_INT_ALARM_MASK) {
+ /*
+ * The order of the bits in the AXI-XADC status register does
+ * not match the order of the bits in the XADC alarm enable
+ * register. xadc_handle_events() expects the events to be in
+ * the same order as the XADC alarm enable register.
+ */
+ events = (status & 0x000e) >> 1;
+ events |= (status & 0x0001) << 3;
+ events |= (status & 0x3c00) >> 6;
+ xadc_handle_events(indio_dev, events);
+ }
+
+ xadc_write_reg(xadc, XADC_AXI_REG_IPISR, status);
+
+ return IRQ_HANDLED;
+}
+
+static void xadc_axi_update_alarm(struct xadc *xadc, unsigned int alarm)
+{
+ uint32_t val;
+ unsigned long flags;
+
+ /*
+ * The order of the bits in the AXI-XADC status register does not match
+ * the order of the bits in the XADC alarm enable register. We get
+ * passed the alarm mask in the same order as in the XADC alarm enable
+ * register.
+ */
+ alarm = ((alarm & 0x07) << 1) | ((alarm & 0x08) >> 3) |
+ ((alarm & 0xf0) << 6);
+
+ spin_lock_irqsave(&xadc->lock, flags);
+ xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
+ val &= ~XADC_AXI_INT_ALARM_MASK;
+ val |= alarm;
+ xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
+ spin_unlock_irqrestore(&xadc->lock, flags);
+}
+
+static unsigned long xadc_axi_get_dclk(struct xadc *xadc)
+{
+ return clk_get_rate(xadc->clk);
+}
+
+static const struct xadc_ops xadc_axi_ops = {
+ .read = xadc_axi_read_adc_reg,
+ .write = xadc_axi_write_adc_reg,
+ .setup = xadc_axi_setup,
+ .get_dclk_rate = xadc_axi_get_dclk,
+ .update_alarm = xadc_axi_update_alarm,
+ .interrupt_handler = xadc_axi_interrupt_handler,
+ .flags = XADC_FLAGS_BUFFERED,
+};
+
+static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t mask, uint16_t val)
+{
+ uint16_t tmp;
+ int ret;
+
+ ret = _xadc_read_adc_reg(xadc, reg, &tmp);
+ if (ret)
+ return ret;
+
+ return _xadc_write_adc_reg(xadc, reg, (tmp & ~mask) | val);
+}
+
+static int xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t mask, uint16_t val)
+{
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+ ret = _xadc_update_adc_reg(xadc, reg, mask, val);
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
+
+static unsigned long xadc_get_dclk_rate(struct xadc *xadc)
+{
+ return xadc->ops->get_dclk_rate(xadc);
+}
+
+static int xadc_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *mask)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int n;
+
+ n = bitmap_weight(mask, indio_dev->masklength);
+
+ kfree(xadc->data);
+ xadc->data = kcalloc(n, sizeof(*xadc->data), GFP_KERNEL);
+ if (!xadc->data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static unsigned int xadc_scan_index_to_channel(unsigned int scan_index)
+{
+ switch (scan_index) {
+ case 5:
+ return XADC_REG_VCCPINT;
+ case 6:
+ return XADC_REG_VCCPAUX;
+ case 7:
+ return XADC_REG_VCCO_DDR;
+ case 8:
+ return XADC_REG_TEMP;
+ case 9:
+ return XADC_REG_VCCINT;
+ case 10:
+ return XADC_REG_VCCAUX;
+ case 11:
+ return XADC_REG_VPVN;
+ case 12:
+ return XADC_REG_VREFP;
+ case 13:
+ return XADC_REG_VREFN;
+ case 14:
+ return XADC_REG_VCCBRAM;
+ default:
+ return XADC_REG_VAUX(scan_index - 16);
+ }
+}
+
+static irqreturn_t xadc_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int chan;
+ int i, j;
+
+ if (!xadc->data)
+ goto out;
+
+ j = 0;
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ chan = xadc_scan_index_to_channel(i);
+ xadc_read_adc_reg(xadc, chan, &xadc->data[j]);
+ j++;
+ }
+
+ iio_push_to_buffers(indio_dev, xadc->data);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
+{
+ struct xadc *xadc = iio_trigger_get_drvdata(trigger);
+ unsigned long flags;
+ unsigned int convst;
+ unsigned int val;
+ int ret = 0;
+
+ mutex_lock(&xadc->mutex);
+
+ if (state) {
+ /* Only one of the two triggers can be active at the a time. */
+ if (xadc->trigger != NULL) {
+ ret = -EBUSY;
+ goto err_out;
+ } else {
+ xadc->trigger = trigger;
+ if (trigger == xadc->convst_trigger)
+ convst = XADC_CONF0_EC;
+ else
+ convst = 0;
+ }
+ ret = _xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF0_EC,
+ convst);
+ if (ret)
+ goto err_out;
+ } else {
+ xadc->trigger = NULL;
+ }
+
+ spin_lock_irqsave(&xadc->lock, flags);
+ xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
+ xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS);
+ if (state)
+ val |= XADC_AXI_INT_EOS;
+ else
+ val &= ~XADC_AXI_INT_EOS;
+ xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
+ spin_unlock_irqrestore(&xadc->lock, flags);
+
+err_out:
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
+
+static const struct iio_trigger_ops xadc_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &xadc_trigger_set_state,
+};
+
+static struct iio_trigger *xadc_alloc_trigger(struct iio_dev *indio_dev,
+ const char *name)
+{
+ struct iio_trigger *trig;
+ int ret;
+
+ trig = iio_trigger_alloc("%s%d-%s", indio_dev->name,
+ indio_dev->id, name);
+ if (trig == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ trig->dev.parent = indio_dev->dev.parent;
+ trig->ops = &xadc_trigger_ops;
+ iio_trigger_set_drvdata(trig, iio_priv(indio_dev));
+
+ ret = iio_trigger_register(trig);
+ if (ret)
+ goto error_free_trig;
+
+ return trig;
+
+error_free_trig:
+ iio_trigger_free(trig);
+ return ERR_PTR(ret);
+}
+
+static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
+{
+ uint16_t val;
+
+ switch (seq_mode) {
+ case XADC_CONF1_SEQ_SIMULTANEOUS:
+ case XADC_CONF1_SEQ_INDEPENDENT:
+ val = XADC_CONF2_PD_ADC_B;
+ break;
+ default:
+ val = 0;
+ break;
+ }
+
+ return xadc_update_adc_reg(xadc, XADC_REG_CONF2, XADC_CONF2_PD_MASK,
+ val);
+}
+
+static int xadc_get_seq_mode(struct xadc *xadc, unsigned long scan_mode)
+{
+ unsigned int aux_scan_mode = scan_mode >> 16;
+
+ if (xadc->external_mux_mode == XADC_EXTERNAL_MUX_DUAL)
+ return XADC_CONF1_SEQ_SIMULTANEOUS;
+
+ if ((aux_scan_mode & 0xff00) == 0 ||
+ (aux_scan_mode & 0x00ff) == 0)
+ return XADC_CONF1_SEQ_CONTINUOUS;
+
+ return XADC_CONF1_SEQ_SIMULTANEOUS;
+}
+
+static int xadc_postdisable(struct iio_dev *indio_dev)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long scan_mask;
+ int ret;
+ int i;
+
+ scan_mask = 1; /* Run calibration as part of the sequence */
+ for (i = 0; i < indio_dev->num_channels; i++)
+ scan_mask |= BIT(indio_dev->channels[i].scan_index);
+
+ /* Enable all channels and calibration */
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(0), scan_mask & 0xffff);
+ if (ret)
+ return ret;
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
+ if (ret)
+ return ret;
+
+ ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
+ XADC_CONF1_SEQ_CONTINUOUS);
+ if (ret)
+ return ret;
+
+ return xadc_power_adc_b(xadc, XADC_CONF1_SEQ_CONTINUOUS);
+}
+
+static int xadc_preenable(struct iio_dev *indio_dev)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long scan_mask;
+ int seq_mode;
+ int ret;
+
+ ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
+ XADC_CONF1_SEQ_DEFAULT);
+ if (ret)
+ goto err;
+
+ scan_mask = *indio_dev->active_scan_mask;
+ seq_mode = xadc_get_seq_mode(xadc, scan_mask);
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(0), scan_mask & 0xffff);
+ if (ret)
+ goto err;
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
+ if (ret)
+ goto err;
+
+ ret = xadc_power_adc_b(xadc, seq_mode);
+ if (ret)
+ goto err;
+
+ ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
+ seq_mode);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ xadc_postdisable(indio_dev);
+ return ret;
+}
+
+static struct iio_buffer_setup_ops xadc_buffer_ops = {
+ .preenable = &xadc_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &xadc_postdisable,
+};
+
+static int xadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int div;
+ uint16_t val16;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+ ret = xadc_read_adc_reg(xadc, chan->address, &val16);
+ if (ret < 0)
+ return ret;
+
+ val16 >>= 4;
+ if (chan->scan_type.sign == 'u')
+ *val = val16;
+ else
+ *val = sign_extend32(val16, 11);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ /* V = (val * 3.0) / 4096 */
+ switch (chan->address) {
+ case XADC_REG_VCCINT:
+ case XADC_REG_VCCAUX:
+ case XADC_REG_VCCBRAM:
+ case XADC_REG_VCCPINT:
+ case XADC_REG_VCCPAUX:
+ case XADC_REG_VCCO_DDR:
+ *val = 3000;
+ break;
+ default:
+ *val = 1000;
+ break;
+ }
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ /* Temp in C = (val * 503.975) / 4096 - 273.15 */
+ *val = 503975;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has an offset */
+ *val = -((273150 << 12) / 503975);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
+ if (ret)
+ return ret;
+
+ div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
+ if (div < 2)
+ div = 2;
+
+ *val = xadc_get_dclk_rate(xadc) / div / 26;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int xadc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long clk_rate = xadc_get_dclk_rate(xadc);
+ unsigned int div;
+
+ if (info != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ if (val <= 0)
+ return -EINVAL;
+
+ /* Max. 150 kSPS */
+ if (val > 150000)
+ val = 150000;
+
+ val *= 26;
+
+ /* Min 1MHz */
+ if (val < 1000000)
+ val = 1000000;
+
+ /*
+ * We want to round down, but only if we do not exceed the 150 kSPS
+ * limit.
+ */
+ div = clk_rate / val;
+ if (clk_rate / div / 26 > 150000)
+ div++;
+ if (div < 2)
+ div = 2;
+ else if (div > 0xff)
+ div = 0xff;
+
+ return xadc_update_adc_reg(xadc, XADC_REG_CONF2, XADC_CONF2_DIV_MASK,
+ div << XADC_CONF2_DIV_OFFSET);
+}
+
+static const struct iio_event_spec xadc_temp_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
+/* Separate values for upper and lower thresholds, but only a shared enabled */
+static const struct iio_event_spec xadc_voltage_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define XADC_CHAN_TEMP(_chan, _scan_index, _addr) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = (_chan), \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = xadc_temp_events, \
+ .num_event_specs = ARRAY_SIZE(xadc_temp_events), \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+#define XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_chan), \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = (_alarm) ? xadc_voltage_events : NULL, \
+ .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+ .extend_name = _ext, \
+}
+
+static const struct iio_chan_spec xadc_channels[] = {
+ XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP),
+ XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
+ XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCINT, "vccaux", true),
+ XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
+ XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
+ XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
+ XADC_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccoddr", true),
+ XADC_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
+ XADC_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
+ XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
+ XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
+ XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
+ XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
+ XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
+ XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
+ XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
+ XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
+ XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
+ XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
+ XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
+ XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
+ XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
+ XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
+ XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
+ XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
+ XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
+};
+
+static const struct iio_info xadc_info = {
+ .read_raw = &xadc_read_raw,
+ .write_raw = &xadc_write_raw,
+ .read_event_config = &xadc_read_event_config,
+ .write_event_config = &xadc_write_event_config,
+ .read_event_value = &xadc_read_event_value,
+ .write_event_value = &xadc_write_event_value,
+ .update_scan_mode = &xadc_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct of_device_id xadc_of_match_table[] = {
+ { .compatible = "xlnx,zynq-xadc-1.00.a", (void *)&xadc_zynq_ops },
+ { .compatible = "xlnx,axi-xadc-1.00.a", (void *)&xadc_axi_ops },
+ { },
+};
+MODULE_DEVICE_TABLE(of, xadc_of_match_table);
+
+static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
+ unsigned int *conf)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ struct iio_chan_spec *channels, *chan;
+ struct device_node *chan_node, *child;
+ unsigned int num_channels;
+ const char *external_mux;
+ u32 ext_mux_chan;
+ int reg;
+ int ret;
+
+ *conf = 0;
+
+ ret = of_property_read_string(np, "xlnx,external-mux", &external_mux);
+ if (ret < 0 || strcasecmp(external_mux, "none") == 0)
+ xadc->external_mux_mode = XADC_EXTERNAL_MUX_NONE;
+ else if (strcasecmp(external_mux, "single") == 0)
+ xadc->external_mux_mode = XADC_EXTERNAL_MUX_SINGLE;
+ else if (strcasecmp(external_mux, "dual") == 0)
+ xadc->external_mux_mode = XADC_EXTERNAL_MUX_DUAL;
+ else
+ return -EINVAL;
+
+ if (xadc->external_mux_mode != XADC_EXTERNAL_MUX_NONE) {
+ ret = of_property_read_u32(np, "xlnx,external-mux-channel",
+ &ext_mux_chan);
+ if (ret < 0)
+ return ret;
+
+ if (xadc->external_mux_mode == XADC_EXTERNAL_MUX_SINGLE) {
+ if (ext_mux_chan == 0)
+ ext_mux_chan = XADC_REG_VPVN;
+ else if (ext_mux_chan <= 16)
+ ext_mux_chan = XADC_REG_VAUX(ext_mux_chan - 1);
+ else
+ return -EINVAL;
+ } else {
+ if (ext_mux_chan > 0 && ext_mux_chan <= 8)
+ ext_mux_chan = XADC_REG_VAUX(ext_mux_chan - 1);
+ else
+ return -EINVAL;
+ }
+
+ *conf |= XADC_CONF0_MUX | XADC_CONF0_CHAN(ext_mux_chan);
+ }
+
+ channels = kmemdup(xadc_channels, sizeof(xadc_channels), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ num_channels = 9;
+ chan = &channels[9];
+
+ chan_node = of_get_child_by_name(np, "xlnx,channels");
+ if (chan_node) {
+ for_each_child_of_node(chan_node, child) {
+ if (num_channels >= ARRAY_SIZE(xadc_channels)) {
+ of_node_put(child);
+ break;
+ }
+
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret || reg > 16)
+ continue;
+
+ if (of_property_read_bool(child, "xlnx,bipolar"))
+ chan->scan_type.sign = 's';
+
+ if (reg == 0) {
+ chan->scan_index = 11;
+ chan->address = XADC_REG_VPVN;
+ } else {
+ chan->scan_index = 15 + reg;
+ chan->scan_index = XADC_REG_VAUX(reg - 1);
+ }
+ num_channels++;
+ chan++;
+ }
+ }
+ of_node_put(chan_node);
+
+ indio_dev->num_channels = num_channels;
+ indio_dev->channels = krealloc(channels, sizeof(*channels) *
+ num_channels, GFP_KERNEL);
+ /* If we can't resize the channels array, just use the original */
+ if (!indio_dev->channels)
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int xadc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct iio_dev *indio_dev;
+ unsigned int bipolar_mask;
+ struct resource *mem;
+ unsigned int conf0;
+ struct xadc *xadc;
+ int ret;
+ int irq;
+ int i;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ id = of_match_node(xadc_of_match_table, pdev->dev.of_node);
+ if (!id)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*xadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ xadc = iio_priv(indio_dev);
+ xadc->ops = id->data;
+ init_completion(&xadc->completion);
+ mutex_init(&xadc->mutex);
+ spin_lock_init(&xadc->lock);
+ INIT_DELAYED_WORK(&xadc->zynq_unmask_work, xadc_zynq_unmask_worker);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xadc->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(xadc->base))
+ return PTR_ERR(xadc->base);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->name = "xadc";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &xadc_info;
+
+ ret = xadc_parse_dt(indio_dev, pdev->dev.of_node, &conf0);
+ if (ret)
+ goto err_device_free;
+
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
+ ret = iio_triggered_buffer_setup(indio_dev,
+ &iio_pollfunc_store_time, &xadc_trigger_handler,
+ &xadc_buffer_ops);
+ if (ret)
+ goto err_device_free;
+
+ xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
+ if (IS_ERR(xadc->convst_trigger))
+ goto err_triggered_buffer_cleanup;
+ xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev,
+ "samplerate");
+ if (IS_ERR(xadc->samplerate_trigger))
+ goto err_free_convst_trigger;
+ }
+
+ xadc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(xadc->clk)) {
+ ret = PTR_ERR(xadc->clk);
+ goto err_free_samplerate_trigger;
+ }
+ clk_prepare_enable(xadc->clk);
+
+ ret = xadc->ops->setup(pdev, indio_dev, irq);
+ if (ret)
+ goto err_free_samplerate_trigger;
+
+ ret = request_threaded_irq(irq, xadc->ops->interrupt_handler,
+ xadc->ops->threaded_interrupt_handler,
+ 0, dev_name(&pdev->dev), indio_dev);
+ if (ret)
+ goto err_clk_disable_unprepare;
+
+ for (i = 0; i < 16; i++)
+ xadc_read_adc_reg(xadc, XADC_REG_THRESHOLD(i),
+ &xadc->threshold[i]);
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_CONF0, conf0);
+ if (ret)
+ goto err_free_irq;
+
+ bipolar_mask = 0;
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ if (indio_dev->channels[i].scan_type.sign == 's')
+ bipolar_mask |= BIT(indio_dev->channels[i].scan_index);
+ }
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(0), bipolar_mask);
+ if (ret)
+ goto err_free_irq;
+ ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(1),
+ bipolar_mask >> 16);
+ if (ret)
+ goto err_free_irq;
+
+ /* Disable all alarms */
+ xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
+ XADC_CONF1_ALARM_MASK);
+
+ /* Set thresholds to min/max */
+ for (i = 0; i < 16; i++) {
+ /*
+ * Set max voltage threshold and both temperature thresholds to
+ * 0xffff, min voltage threshold to 0.
+ */
+ if (i % 8 < 4 || i == 7)
+ xadc->threshold[i] = 0xffff;
+ else
+ xadc->threshold[i] = 0;
+ xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
+ xadc->threshold[i]);
+ }
+
+ /* Go to non-buffered mode */
+ xadc_postdisable(indio_dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_free_irq;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+err_free_irq:
+ free_irq(irq, indio_dev);
+err_free_samplerate_trigger:
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ iio_trigger_free(xadc->samplerate_trigger);
+err_free_convst_trigger:
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ iio_trigger_free(xadc->convst_trigger);
+err_triggered_buffer_cleanup:
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ iio_triggered_buffer_cleanup(indio_dev);
+err_clk_disable_unprepare:
+ clk_disable_unprepare(xadc->clk);
+err_device_free:
+ kfree(indio_dev->channels);
+
+ return ret;
+}
+
+static int xadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct xadc *xadc = iio_priv(indio_dev);
+ int irq = platform_get_irq(pdev, 0);
+
+ iio_device_unregister(indio_dev);
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
+ iio_trigger_free(xadc->samplerate_trigger);
+ iio_trigger_free(xadc->convst_trigger);
+ iio_triggered_buffer_cleanup(indio_dev);
+ }
+ free_irq(irq, indio_dev);
+ clk_disable_unprepare(xadc->clk);
+ cancel_delayed_work(&xadc->zynq_unmask_work);
+ kfree(xadc->data);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_driver xadc_driver = {
+ .probe = xadc_probe,
+ .remove = xadc_remove,
+ .driver = {
+ .name = "xadc",
+ .owner = THIS_MODULE,
+ .of_match_table = xadc_of_match_table,
+ },
+};
+module_platform_driver(xadc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Xilinx XADC IIO driver");
diff --git a/drivers/iio/adc/xilinx-xadc-events.c b/drivers/iio/adc/xilinx-xadc-events.c
new file mode 100644
index 000000000000..3e7f0d7a80c3
--- /dev/null
+++ b/drivers/iio/adc/xilinx-xadc-events.c
@@ -0,0 +1,254 @@
+/*
+ * Xilinx XADC driver
+ *
+ * Copyright 2013 Analog Devices Inc.
+ * Author: Lars-Peter Clauen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+
+#include "xilinx-xadc.h"
+
+static const struct iio_chan_spec *xadc_event_to_channel(
+ struct iio_dev *indio_dev, unsigned int event)
+{
+ switch (event) {
+ case XADC_THRESHOLD_OT_MAX:
+ case XADC_THRESHOLD_TEMP_MAX:
+ return &indio_dev->channels[0];
+ case XADC_THRESHOLD_VCCINT_MAX:
+ case XADC_THRESHOLD_VCCAUX_MAX:
+ return &indio_dev->channels[event];
+ default:
+ return &indio_dev->channels[event-1];
+ }
+}
+
+static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event)
+{
+ const struct iio_chan_spec *chan;
+ unsigned int offset;
+
+ /* Temperature threshold error, we don't handle this yet */
+ if (event == 0)
+ return;
+
+ if (event < 4)
+ offset = event;
+ else
+ offset = event + 4;
+
+ chan = xadc_event_to_channel(indio_dev, event);
+
+ if (chan->type == IIO_TEMP) {
+ /*
+ * The temperature channel only supports over-temperature
+ * events.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ } else {
+ /*
+ * For other channels we don't know whether it is a upper or
+ * lower threshold event. Userspace will have to check the
+ * channel value if it wants to know.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER),
+ iio_get_time_ns());
+ }
+}
+
+void xadc_handle_events(struct iio_dev *indio_dev, unsigned long events)
+{
+ unsigned int i;
+
+ for_each_set_bit(i, &events, 8)
+ xadc_handle_event(indio_dev, i);
+}
+
+static unsigned xadc_get_threshold_offset(const struct iio_chan_spec *chan,
+ enum iio_event_direction dir)
+{
+ unsigned int offset;
+
+ if (chan->type == IIO_TEMP) {
+ offset = XADC_THRESHOLD_OT_MAX;
+ } else {
+ if (chan->channel < 2)
+ offset = chan->channel + 1;
+ else
+ offset = chan->channel + 6;
+ }
+
+ if (dir == IIO_EV_DIR_FALLING)
+ offset += 4;
+
+ return offset;
+}
+
+static unsigned int xadc_get_alarm_mask(const struct iio_chan_spec *chan)
+{
+ if (chan->type == IIO_TEMP) {
+ return XADC_ALARM_OT_MASK;
+ } else {
+ switch (chan->channel) {
+ case 0:
+ return XADC_ALARM_VCCINT_MASK;
+ case 1:
+ return XADC_ALARM_VCCAUX_MASK;
+ case 2:
+ return XADC_ALARM_VCCBRAM_MASK;
+ case 3:
+ return XADC_ALARM_VCCPINT_MASK;
+ case 4:
+ return XADC_ALARM_VCCPAUX_MASK;
+ case 5:
+ return XADC_ALARM_VCCODDR_MASK;
+ default:
+ /* We will never get here */
+ return 0;
+ }
+ }
+}
+
+int xadc_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+
+ return (bool)(xadc->alarm_mask & xadc_get_alarm_mask(chan));
+}
+
+int xadc_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ unsigned int alarm = xadc_get_alarm_mask(chan);
+ struct xadc *xadc = iio_priv(indio_dev);
+ uint16_t cfg, old_cfg;
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+
+ if (state)
+ xadc->alarm_mask |= alarm;
+ else
+ xadc->alarm_mask &= ~alarm;
+
+ xadc->ops->update_alarm(xadc, xadc->alarm_mask);
+
+ ret = _xadc_read_adc_reg(xadc, XADC_REG_CONF1, &cfg);
+ if (ret)
+ goto err_out;
+
+ old_cfg = cfg;
+ cfg |= XADC_CONF1_ALARM_MASK;
+ cfg &= ~((xadc->alarm_mask & 0xf0) << 4); /* bram, pint, paux, ddr */
+ cfg &= ~((xadc->alarm_mask & 0x08) >> 3); /* ot */
+ cfg &= ~((xadc->alarm_mask & 0x07) << 1); /* temp, vccint, vccaux */
+ if (old_cfg != cfg)
+ ret = _xadc_write_adc_reg(xadc, XADC_REG_CONF1, cfg);
+
+err_out:
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
+
+/* Register value is msb aligned, the lower 4 bits are ignored */
+#define XADC_THRESHOLD_VALUE_SHIFT 4
+
+int xadc_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2)
+{
+ unsigned int offset = xadc_get_threshold_offset(chan, dir);
+ struct xadc *xadc = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = xadc->threshold[offset];
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ *val = xadc->temp_hysteresis;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val >>= XADC_THRESHOLD_VALUE_SHIFT;
+
+ return IIO_VAL_INT;
+}
+
+int xadc_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int val, int val2)
+{
+ unsigned int offset = xadc_get_threshold_offset(chan, dir);
+ struct xadc *xadc = iio_priv(indio_dev);
+ int ret = 0;
+
+ val <<= XADC_THRESHOLD_VALUE_SHIFT;
+
+ if (val < 0 || val > 0xffff)
+ return -EINVAL;
+
+ mutex_lock(&xadc->mutex);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ xadc->threshold[offset] = val;
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ xadc->temp_hysteresis = val;
+ break;
+ default:
+ mutex_unlock(&xadc->mutex);
+ return -EINVAL;
+ }
+
+ if (chan->type == IIO_TEMP) {
+ /*
+ * According to the datasheet we need to set the lower 4 bits to
+ * 0x3, otherwise 125 degree celsius will be used as the
+ * threshold.
+ */
+ val |= 0x3;
+
+ /*
+ * Since we store the hysteresis as relative (to the threshold)
+ * value, but the hardware expects an absolute value we need to
+ * recalcualte this value whenever the hysteresis or the
+ * threshold changes.
+ */
+ if (xadc->threshold[offset] < xadc->temp_hysteresis)
+ xadc->threshold[offset + 4] = 0;
+ else
+ xadc->threshold[offset + 4] = xadc->threshold[offset] -
+ xadc->temp_hysteresis;
+ ret = _xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(offset + 4),
+ xadc->threshold[offset + 4]);
+ if (ret)
+ goto out_unlock;
+ }
+
+ if (info == IIO_EV_INFO_VALUE)
+ ret = _xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(offset), val);
+
+out_unlock:
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
new file mode 100644
index 000000000000..c7487e8d7f80
--- /dev/null
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -0,0 +1,209 @@
+/*
+ * Xilinx XADC driver
+ *
+ * Copyright 2013 Analog Devices Inc.
+ * Author: Lars-Peter Clauen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __IIO_XILINX_XADC__
+#define __IIO_XILINX_XADC__
+
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+struct iio_dev;
+struct clk;
+struct xadc_ops;
+struct platform_device;
+
+void xadc_handle_events(struct iio_dev *indio_dev, unsigned long events);
+
+int xadc_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir);
+int xadc_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state);
+int xadc_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2);
+int xadc_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int val, int val2);
+
+enum xadc_external_mux_mode {
+ XADC_EXTERNAL_MUX_NONE,
+ XADC_EXTERNAL_MUX_SINGLE,
+ XADC_EXTERNAL_MUX_DUAL,
+};
+
+struct xadc {
+ void __iomem *base;
+ struct clk *clk;
+
+ const struct xadc_ops *ops;
+
+ uint16_t threshold[16];
+ uint16_t temp_hysteresis;
+ unsigned int alarm_mask;
+
+ uint16_t *data;
+
+ struct iio_trigger *trigger;
+ struct iio_trigger *convst_trigger;
+ struct iio_trigger *samplerate_trigger;
+
+ enum xadc_external_mux_mode external_mux_mode;
+
+ unsigned int zynq_alarm;
+ unsigned int zynq_masked_alarm;
+ unsigned int zynq_intmask;
+ struct delayed_work zynq_unmask_work;
+
+ struct mutex mutex;
+ spinlock_t lock;
+
+ struct completion completion;
+};
+
+struct xadc_ops {
+ int (*read)(struct xadc *, unsigned int, uint16_t *);
+ int (*write)(struct xadc *, unsigned int, uint16_t);
+ int (*setup)(struct platform_device *pdev, struct iio_dev *indio_dev,
+ int irq);
+ void (*update_alarm)(struct xadc *, unsigned int);
+ unsigned long (*get_dclk_rate)(struct xadc *);
+ irqreturn_t (*interrupt_handler)(int, void *);
+ irqreturn_t (*threaded_interrupt_handler)(int, void *);
+
+ unsigned int flags;
+};
+
+static inline int _xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ lockdep_assert_held(&xadc->mutex);
+ return xadc->ops->read(xadc, reg, val);
+}
+
+static inline int _xadc_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ lockdep_assert_held(&xadc->mutex);
+ return xadc->ops->write(xadc, reg, val);
+}
+
+static inline int xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+ ret = _xadc_read_adc_reg(xadc, reg, val);
+ mutex_unlock(&xadc->mutex);
+ return ret;
+}
+
+static inline int xadc_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+ ret = _xadc_write_adc_reg(xadc, reg, val);
+ mutex_unlock(&xadc->mutex);
+ return ret;
+}
+
+/* XADC hardmacro register definitions */
+#define XADC_REG_TEMP 0x00
+#define XADC_REG_VCCINT 0x01
+#define XADC_REG_VCCAUX 0x02
+#define XADC_REG_VPVN 0x03
+#define XADC_REG_VREFP 0x04
+#define XADC_REG_VREFN 0x05
+#define XADC_REG_VCCBRAM 0x06
+
+#define XADC_REG_VCCPINT 0x0d
+#define XADC_REG_VCCPAUX 0x0e
+#define XADC_REG_VCCO_DDR 0x0f
+#define XADC_REG_VAUX(x) (0x10 + (x))
+
+#define XADC_REG_MAX_TEMP 0x20
+#define XADC_REG_MAX_VCCINT 0x21
+#define XADC_REG_MAX_VCCAUX 0x22
+#define XADC_REG_MAX_VCCBRAM 0x23
+#define XADC_REG_MIN_TEMP 0x24
+#define XADC_REG_MIN_VCCINT 0x25
+#define XADC_REG_MIN_VCCAUX 0x26
+#define XADC_REG_MIN_VCCBRAM 0x27
+#define XADC_REG_MAX_VCCPINT 0x28
+#define XADC_REG_MAX_VCCPAUX 0x29
+#define XADC_REG_MAX_VCCO_DDR 0x2a
+#define XADC_REG_MIN_VCCPINT 0x2b
+#define XADC_REG_MIN_VCCPAUX 0x2c
+#define XADC_REG_MIN_VCCO_DDR 0x2d
+
+#define XADC_REG_CONF0 0x40
+#define XADC_REG_CONF1 0x41
+#define XADC_REG_CONF2 0x42
+#define XADC_REG_SEQ(x) (0x48 + (x))
+#define XADC_REG_INPUT_MODE(x) (0x4c + (x))
+#define XADC_REG_THRESHOLD(x) (0x50 + (x))
+
+#define XADC_REG_FLAG 0x3f
+
+#define XADC_CONF0_EC BIT(9)
+#define XADC_CONF0_ACQ BIT(8)
+#define XADC_CONF0_MUX BIT(11)
+#define XADC_CONF0_CHAN(x) (x)
+
+#define XADC_CONF1_SEQ_MASK (0xf << 12)
+#define XADC_CONF1_SEQ_DEFAULT (0 << 12)
+#define XADC_CONF1_SEQ_SINGLE_PASS (1 << 12)
+#define XADC_CONF1_SEQ_CONTINUOUS (2 << 12)
+#define XADC_CONF1_SEQ_SINGLE_CHANNEL (3 << 12)
+#define XADC_CONF1_SEQ_SIMULTANEOUS (4 << 12)
+#define XADC_CONF1_SEQ_INDEPENDENT (8 << 12)
+#define XADC_CONF1_ALARM_MASK 0x0f0f
+
+#define XADC_CONF2_DIV_MASK 0xff00
+#define XADC_CONF2_DIV_OFFSET 8
+
+#define XADC_CONF2_PD_MASK (0x3 << 4)
+#define XADC_CONF2_PD_NONE (0x0 << 4)
+#define XADC_CONF2_PD_ADC_B (0x2 << 4)
+#define XADC_CONF2_PD_BOTH (0x3 << 4)
+
+#define XADC_ALARM_TEMP_MASK BIT(0)
+#define XADC_ALARM_VCCINT_MASK BIT(1)
+#define XADC_ALARM_VCCAUX_MASK BIT(2)
+#define XADC_ALARM_OT_MASK BIT(3)
+#define XADC_ALARM_VCCBRAM_MASK BIT(4)
+#define XADC_ALARM_VCCPINT_MASK BIT(5)
+#define XADC_ALARM_VCCPAUX_MASK BIT(6)
+#define XADC_ALARM_VCCODDR_MASK BIT(7)
+
+#define XADC_THRESHOLD_TEMP_MAX 0x0
+#define XADC_THRESHOLD_VCCINT_MAX 0x1
+#define XADC_THRESHOLD_VCCAUX_MAX 0x2
+#define XADC_THRESHOLD_OT_MAX 0x3
+#define XADC_THRESHOLD_TEMP_MIN 0x4
+#define XADC_THRESHOLD_VCCINT_MIN 0x5
+#define XADC_THRESHOLD_VCCAUX_MIN 0x6
+#define XADC_THRESHOLD_OT_MIN 0x7
+#define XADC_THRESHOLD_VCCBRAM_MAX 0x8
+#define XADC_THRESHOLD_VCCPINT_MAX 0x9
+#define XADC_THRESHOLD_VCCPAUX_MAX 0xa
+#define XADC_THRESHOLD_VCCODDR_MAX 0xb
+#define XADC_THRESHOLD_VCCBRAM_MIN 0xc
+#define XADC_THRESHOLD_VCCPINT_MIN 0xd
+#define XADC_THRESHOLD_VCCPAUX_MIN 0xe
+#define XADC_THRESHOLD_VCCODDR_MIN 0xf
+
+#endif
diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c
index 2d9c6f8c06db..eb46e728aa2e 100644
--- a/drivers/iio/buffer_cb.c
+++ b/drivers/iio/buffer_cb.c
@@ -46,10 +46,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
struct iio_channel *chan;
cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
- if (cb_buff == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ if (cb_buff == NULL)
+ return ERR_PTR(-ENOMEM);
iio_buffer_init(&cb_buff->buffer);
@@ -91,7 +89,6 @@ error_release_channels:
iio_channel_release_all(cb_buff->channels);
error_free_cb_buff:
kfree(cb_buff);
-error_ret:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index d0505fd22ef4..fa2810032968 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -92,7 +92,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
ad7303_write(st, chan->channel, st->dac_cache[chan->channel]);
mutex_unlock(&indio_dev->mlock);
- return ret ? ret : len;
+ return len;
}
static int ad7303_get_vref(struct ad7303_state *st,
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index de76e6a34c1e..9a82a7255ebb 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -19,7 +19,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 7d9f5c31d2fc..43d14588448d 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -15,7 +15,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 41c64a43bcab..ac2d69e34c8c 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -70,7 +70,7 @@ config IIO_ST_GYRO_3AXIS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics gyroscopes:
- L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330.
+ L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index f8f2bf84a5a2..c197360c450b 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -19,7 +19,6 @@
#define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
#define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
#define L3GD20_GYRO_DEV_NAME "l3gd20"
-#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index d53d91adfb55..a8e174a47bc4 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -167,11 +167,10 @@ static const struct st_sensors st_gyro_sensors[] = {
.wai = ST_GYRO_2_WAI_EXP,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
- [1] = L3GD20H_GYRO_DEV_NAME,
- [2] = LSM330D_GYRO_DEV_NAME,
- [3] = LSM330DLC_GYRO_DEV_NAME,
- [4] = L3G4IS_GYRO_DEV_NAME,
- [5] = LSM330_GYRO_DEV_NAME,
+ [1] = LSM330D_GYRO_DEV_NAME,
+ [2] = LSM330DLC_GYRO_DEV_NAME,
+ [3] = L3G4IS_GYRO_DEV_NAME,
+ [4] = LSM330_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 16b8b8d70bf1..23c12f361b05 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -55,7 +55,6 @@ static const struct i2c_device_id st_gyro_id_table[] = {
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
- { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{},
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index 94763e25caf9..b4ad3be26687 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -54,7 +54,6 @@ static const struct spi_device_id st_gyro_id_table[] = {
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
- { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{},
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 463c4d9da79e..e116bd8dd0e4 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -12,4 +12,14 @@ config DHT11
Other sensors should work as well as long as they speak the
same protocol.
+config SI7005
+ tristate "SI7005 relative humidity and temperature sensor"
+ depends on I2C
+ help
+ Say yes here to build support for the Silabs Si7005 relative
+ humidity and temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called si7005.
+
endmenu
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index d5d36c0c95f9..e3f3d942e646 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_DHT11) += dht11.o
+obj-$(CONFIG_SI7005) += si7005.o
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
new file mode 100644
index 000000000000..bdd586e6d955
--- /dev/null
+++ b/drivers/iio/humidity/si7005.c
@@ -0,0 +1,189 @@
+/*
+ * si7005.c - Support for Silabs Si7005 humidity and temperature sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x40)
+ *
+ * TODO: heater, fast mode, processed mode (temp. / linearity compensation)
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define SI7005_STATUS 0x00
+#define SI7005_DATA 0x01 /* 16-bit, MSB */
+#define SI7005_CONFIG 0x03
+#define SI7005_ID 0x11
+
+#define SI7005_STATUS_NRDY BIT(0)
+#define SI7005_CONFIG_TEMP BIT(4)
+#define SI7005_CONFIG_START BIT(0)
+
+#define SI7005_ID_7005 0x50
+#define SI7005_ID_7015 0xf0
+
+struct si7005_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 config;
+};
+
+static int si7005_read_measurement(struct si7005_data *data, bool temp)
+{
+ int tries = 50;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = i2c_smbus_write_byte_data(data->client, SI7005_CONFIG,
+ data->config | SI7005_CONFIG_START |
+ (temp ? SI7005_CONFIG_TEMP : 0));
+ if (ret < 0)
+ goto done;
+
+ while (tries-- > 0) {
+ msleep(20);
+ ret = i2c_smbus_read_byte_data(data->client, SI7005_STATUS);
+ if (ret < 0)
+ goto done;
+ if (!(ret & SI7005_STATUS_NRDY))
+ break;
+ }
+ if (tries < 0) {
+ ret = -EIO;
+ goto done;
+ }
+
+ ret = i2c_smbus_read_word_swapped(data->client, SI7005_DATA);
+
+done:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int si7005_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct si7005_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = si7005_read_measurement(data, chan->type == IIO_TEMP);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_TEMP) {
+ *val = 7;
+ *val2 = 812500;
+ } else {
+ *val = 3;
+ *val2 = 906250;
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ if (chan->type == IIO_TEMP)
+ *val = -50 * 32 * 4;
+ else
+ *val = -24 * 16 * 16;
+ return IIO_VAL_INT;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec si7005_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ }
+};
+
+static const struct iio_info si7005_info = {
+ .read_raw = si7005_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int si7005_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct si7005_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &si7005_info;
+
+ indio_dev->channels = si7005_channels;
+ indio_dev->num_channels = ARRAY_SIZE(si7005_channels);
+
+ ret = i2c_smbus_read_byte_data(client, SI7005_ID);
+ if (ret < 0)
+ return ret;
+ if (ret != SI7005_ID_7005 && ret != SI7005_ID_7015)
+ return -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(client, SI7005_CONFIG);
+ if (ret < 0)
+ return ret;
+ data->config = ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id si7005_id[] = {
+ { "si7005", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si7005_id);
+
+static struct i2c_driver si7005_driver = {
+ .driver = {
+ .name = "si7005",
+ .owner = THIS_MODULE,
+ },
+ .probe = si7005_probe,
+ .id_table = si7005_id,
+};
+module_i2c_driver(si7005_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Silabs Si7005 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 663e88a1a3c1..2b0e45133e9d 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -25,6 +25,8 @@ config ADIS16480
Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
ADIS16485, ADIS16488 inertial sensors.
+source "drivers/iio/imu/inv_mpu6050/Kconfig"
+
endmenu
config IIO_ADIS_LIB
@@ -38,5 +40,3 @@ config IIO_ADIS_LIB_BUFFER
help
A set of buffer helper functions for the Analog Devices ADIS* device
family.
-
-source "drivers/iio/imu/inv_mpu6050/Kconfig"
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 2f8f9d632386..0916bf6b6c31 100644
--- a/drivers/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -189,6 +189,7 @@ enum {
ADIS16300_SCAN_INCLI_X,
ADIS16300_SCAN_INCLI_Y,
ADIS16400_SCAN_ADC,
+ ADIS16400_SCAN_TIMESTAMP,
};
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 368660dfe135..433583b6f800 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -281,7 +281,7 @@ static ssize_t adis16400_write_frequency(struct device *dev,
st->variant->set_freq(st, val);
mutex_unlock(&indio_dev->mlock);
- return ret ? ret : len;
+ return len;
}
/* Power down the device */
@@ -632,7 +632,7 @@ static const struct iio_chan_spec adis16400_channels[] = {
ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
- IIO_CHAN_SOFT_TIMESTAMP(12)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16448_channels[] = {
@@ -659,7 +659,7 @@ static const struct iio_chan_spec adis16448_channels[] = {
},
},
ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16350_channels[] = {
@@ -677,7 +677,7 @@ static const struct iio_chan_spec adis16350_channels[] = {
ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16300_channels[] = {
@@ -690,7 +690,7 @@ static const struct iio_chan_spec adis16300_channels[] = {
ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
- IIO_CHAN_SOFT_TIMESTAMP(14)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16334_channels[] = {
@@ -701,7 +701,7 @@ static const struct iio_chan_spec adis16334_channels[] = {
ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(8)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static struct attribute *adis16400_attributes[] = {
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index df7f1e1157ae..cb9f96b446a5 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
@@ -117,7 +116,7 @@ int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
return result;
if (en) {
- /* Wait for output stablize */
+ /* Wait for output stabilize */
msleep(INV_MPU6050_TEMP_UP_TIME);
if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
/* switch internal clock to PLL */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index f38395529a44..0ab382be1e64 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -126,35 +126,35 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_SAMPLE_RATE_DIV 0x19
#define INV_MPU6050_REG_CONFIG 0x1A
#define INV_MPU6050_REG_GYRO_CONFIG 0x1B
-#define INV_MPU6050_REG_ACCEL_CONFIG 0x1C
+#define INV_MPU6050_REG_ACCEL_CONFIG 0x1C
#define INV_MPU6050_REG_FIFO_EN 0x23
-#define INV_MPU6050_BIT_ACCEL_OUT 0x08
-#define INV_MPU6050_BITS_GYRO_OUT 0x70
+#define INV_MPU6050_BIT_ACCEL_OUT 0x08
+#define INV_MPU6050_BITS_GYRO_OUT 0x70
#define INV_MPU6050_REG_INT_ENABLE 0x38
-#define INV_MPU6050_BIT_DATA_RDY_EN 0x01
-#define INV_MPU6050_BIT_DMP_INT_EN 0x02
+#define INV_MPU6050_BIT_DATA_RDY_EN 0x01
+#define INV_MPU6050_BIT_DMP_INT_EN 0x02
#define INV_MPU6050_REG_RAW_ACCEL 0x3B
#define INV_MPU6050_REG_TEMPERATURE 0x41
#define INV_MPU6050_REG_RAW_GYRO 0x43
#define INV_MPU6050_REG_USER_CTRL 0x6A
-#define INV_MPU6050_BIT_FIFO_RST 0x04
-#define INV_MPU6050_BIT_DMP_RST 0x08
-#define INV_MPU6050_BIT_I2C_MST_EN 0x20
-#define INV_MPU6050_BIT_FIFO_EN 0x40
-#define INV_MPU6050_BIT_DMP_EN 0x80
+#define INV_MPU6050_BIT_FIFO_RST 0x04
+#define INV_MPU6050_BIT_DMP_RST 0x08
+#define INV_MPU6050_BIT_I2C_MST_EN 0x20
+#define INV_MPU6050_BIT_FIFO_EN 0x40
+#define INV_MPU6050_BIT_DMP_EN 0x80
#define INV_MPU6050_REG_PWR_MGMT_1 0x6B
-#define INV_MPU6050_BIT_H_RESET 0x80
-#define INV_MPU6050_BIT_SLEEP 0x40
-#define INV_MPU6050_BIT_CLK_MASK 0x7
+#define INV_MPU6050_BIT_H_RESET 0x80
+#define INV_MPU6050_BIT_SLEEP 0x40
+#define INV_MPU6050_BIT_CLK_MASK 0x7
#define INV_MPU6050_REG_PWR_MGMT_2 0x6C
-#define INV_MPU6050_BIT_PWR_ACCL_STBY 0x38
-#define INV_MPU6050_BIT_PWR_GYRO_STBY 0x07
+#define INV_MPU6050_BIT_PWR_ACCL_STBY 0x38
+#define INV_MPU6050_BIT_PWR_GYRO_STBY 0x07
#define INV_MPU6050_REG_FIFO_COUNT_H 0x72
#define INV_MPU6050_REG_FIFO_R_W 0x74
@@ -180,10 +180,10 @@ struct inv_mpu6050_state {
/* init parameters */
#define INV_MPU6050_INIT_FIFO_RATE 50
-#define INV_MPU6050_TIME_STAMP_TOR 5
-#define INV_MPU6050_MAX_FIFO_RATE 1000
-#define INV_MPU6050_MIN_FIFO_RATE 4
-#define INV_MPU6050_ONE_K_HZ 1000
+#define INV_MPU6050_TIME_STAMP_TOR 5
+#define INV_MPU6050_MAX_FIFO_RATE 1000
+#define INV_MPU6050_MIN_FIFO_RATE 4
+#define INV_MPU6050_ONE_K_HZ 1000
/* scan element definition */
enum inv_mpu6050_scan {
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 429517117eff..0cd306a72a6e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index c67d83bdc8f0..e108f2a9d827 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -264,7 +264,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
- goto error_ret;
+ return ret;
attrcount++;
ret = __iio_add_chan_devattr("type",
chan,
@@ -275,7 +275,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
- goto error_ret;
+ return ret;
attrcount++;
if (chan->type != IIO_TIMESTAMP)
ret = __iio_add_chan_devattr("en",
@@ -296,10 +296,9 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
- goto error_ret;
+ return ret;
attrcount++;
ret = attrcount;
-error_ret:
return ret;
}
@@ -553,13 +552,13 @@ static int __iio_update_buffers(struct iio_dev *indio_dev,
if (indio_dev->setup_ops->predisable) {
ret = indio_dev->setup_ops->predisable(indio_dev);
if (ret)
- goto error_ret;
+ return ret;
}
indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable) {
ret = indio_dev->setup_ops->postdisable(indio_dev);
if (ret)
- goto error_ret;
+ return ret;
}
}
/* Keep a copy of current setup to allow roll back */
@@ -613,7 +612,7 @@ static int __iio_update_buffers(struct iio_dev *indio_dev,
else {
kfree(compound_mask);
ret = -EINVAL;
- goto error_ret;
+ return ret;
}
}
} else {
@@ -696,13 +695,10 @@ error_run_postdisable:
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
error_remove_inserted:
-
if (insert_buffer)
iio_buffer_deactivate(insert_buffer);
indio_dev->active_scan_mask = old_mask;
kfree(compound_mask);
-error_ret:
-
return ret;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index acc911a836ca..ede16aec20fb 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -540,7 +540,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
enum iio_shared_by shared_by)
{
int ret = 0;
- char *name_format = NULL;
+ char *name = NULL;
char *full_postfix;
sysfs_attr_init(&dev_attr->attr);
@@ -558,7 +558,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
->channel2],
postfix);
} else {
- if (chan->extend_name == NULL)
+ if (chan->extend_name == NULL || shared_by != IIO_SEPARATE)
full_postfix = kstrdup(postfix, GFP_KERNEL);
else
full_postfix = kasprintf(GFP_KERNEL,
@@ -572,16 +572,15 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
if (chan->differential) { /* Differential can not have modifier */
switch (shared_by) {
case IIO_SHARED_BY_ALL:
- name_format = kasprintf(GFP_KERNEL, "%s", full_postfix);
+ name = kasprintf(GFP_KERNEL, "%s", full_postfix);
break;
case IIO_SHARED_BY_DIR:
- name_format = kasprintf(GFP_KERNEL, "%s_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s",
iio_direction[chan->output],
full_postfix);
break;
case IIO_SHARED_BY_TYPE:
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
iio_chan_type_name_spec[chan->type],
@@ -593,8 +592,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
ret = -EINVAL;
goto error_free_full_postfix;
}
- name_format
- = kasprintf(GFP_KERNEL,
+ name = kasprintf(GFP_KERNEL,
"%s_%s%d-%s%d_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
@@ -607,16 +605,15 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
} else { /* Single ended */
switch (shared_by) {
case IIO_SHARED_BY_ALL:
- name_format = kasprintf(GFP_KERNEL, "%s", full_postfix);
+ name = kasprintf(GFP_KERNEL, "%s", full_postfix);
break;
case IIO_SHARED_BY_DIR:
- name_format = kasprintf(GFP_KERNEL, "%s_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s",
iio_direction[chan->output],
full_postfix);
break;
case IIO_SHARED_BY_TYPE:
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
full_postfix);
@@ -624,33 +621,24 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
case IIO_SEPARATE:
if (chan->indexed)
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
chan->channel,
full_postfix);
else
- name_format
- = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ name = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
full_postfix);
break;
}
}
- if (name_format == NULL) {
+ if (name == NULL) {
ret = -ENOMEM;
goto error_free_full_postfix;
}
- dev_attr->attr.name = kasprintf(GFP_KERNEL,
- name_format,
- chan->channel,
- chan->channel2);
- if (dev_attr->attr.name == NULL) {
- ret = -ENOMEM;
- goto error_free_name_format;
- }
+ dev_attr->attr.name = name;
if (readfunc) {
dev_attr->attr.mode |= S_IRUGO;
@@ -661,8 +649,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
dev_attr->attr.mode |= S_IWUSR;
dev_attr->store = writefunc;
}
-error_free_name_format:
- kfree(name_format);
+
error_free_full_postfix:
kfree(full_postfix);
@@ -692,10 +679,8 @@ int __iio_add_chan_devattr(const char *postfix,
struct iio_dev_attr *iio_attr, *t;
iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
- if (iio_attr == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ if (iio_attr == NULL)
+ return -ENOMEM;
ret = __iio_device_attr_init(&iio_attr->dev_attr,
postfix, chan,
readfunc, writefunc, shared_by);
@@ -720,7 +705,6 @@ error_device_attr_deinit:
__iio_device_attr_deinit(&iio_attr->dev_attr);
error_iio_dev_attr_free:
kfree(iio_attr);
-error_ret:
return ret;
}
@@ -1134,7 +1118,7 @@ int iio_device_register(struct iio_dev *indio_dev)
if (ret) {
dev_err(indio_dev->dev.parent,
"Failed to register debugfs interfaces\n");
- goto error_ret;
+ return ret;
}
ret = iio_device_register_sysfs(indio_dev);
if (ret) {
@@ -1175,7 +1159,6 @@ error_free_sysfs:
iio_device_unregister_sysfs(indio_dev);
error_unreg_debugfs:
iio_device_unregister_debugfs(indio_dev);
-error_ret:
return ret;
}
EXPORT_SYMBOL(iio_device_register);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index c9c1419fe6e0..ea6e06b9c7d4 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -40,6 +40,7 @@ struct iio_event_interface {
struct list_head dev_attr_list;
unsigned long flags;
struct attribute_group group;
+ struct mutex read_lock;
};
/**
@@ -47,16 +48,17 @@ struct iio_event_interface {
* @indio_dev: IIO device structure
* @ev_code: What event
* @timestamp: When the event occurred
+ *
+ * Note: The caller must make sure that this function is not running
+ * concurrently for the same indio_dev more than once.
**/
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
{
struct iio_event_interface *ev_int = indio_dev->event_interface;
struct iio_event_data ev;
- unsigned long flags;
int copied;
/* Does anyone care? */
- spin_lock_irqsave(&ev_int->wait.lock, flags);
if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
ev.id = ev_code;
@@ -64,9 +66,8 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
copied = kfifo_put(&ev_int->det_events, ev);
if (copied != 0)
- wake_up_locked_poll(&ev_int->wait, POLLIN);
+ wake_up_poll(&ev_int->wait, POLLIN);
}
- spin_unlock_irqrestore(&ev_int->wait.lock, flags);
return 0;
}
@@ -87,10 +88,8 @@ static unsigned int iio_event_poll(struct file *filep,
poll_wait(filep, &ev_int->wait, wait);
- spin_lock_irq(&ev_int->wait.lock);
if (!kfifo_is_empty(&ev_int->det_events))
events = POLLIN | POLLRDNORM;
- spin_unlock_irq(&ev_int->wait.lock);
return events;
}
@@ -111,31 +110,40 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
if (count < sizeof(struct iio_event_data))
return -EINVAL;
- spin_lock_irq(&ev_int->wait.lock);
- if (kfifo_is_empty(&ev_int->det_events)) {
- if (filep->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- goto error_unlock;
- }
- /* Blocking on device; waiting for something to be there */
- ret = wait_event_interruptible_locked_irq(ev_int->wait,
+ do {
+ if (kfifo_is_empty(&ev_int->det_events)) {
+ if (filep->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(ev_int->wait,
!kfifo_is_empty(&ev_int->det_events) ||
indio_dev->info == NULL);
- if (ret)
- goto error_unlock;
- if (indio_dev->info == NULL) {
- ret = -ENODEV;
- goto error_unlock;
+ if (ret)
+ return ret;
+ if (indio_dev->info == NULL)
+ return -ENODEV;
}
- /* Single access device so no one else can get the data */
- }
- ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
+ if (mutex_lock_interruptible(&ev_int->read_lock))
+ return -ERESTARTSYS;
+ ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
+ mutex_unlock(&ev_int->read_lock);
-error_unlock:
- spin_unlock_irq(&ev_int->wait.lock);
+ if (ret)
+ return ret;
+
+ /*
+ * If we couldn't read anything from the fifo (a different
+ * thread might have been faster) we either return -EAGAIN if
+ * the file descriptor is non-blocking, otherwise we go back to
+ * sleep and wait for more data to arrive.
+ */
+ if (copied == 0 && (filep->f_flags & O_NONBLOCK))
+ return -EAGAIN;
- return ret ? ret : copied;
+ } while (copied == 0);
+
+ return copied;
}
static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
@@ -143,15 +151,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
struct iio_dev *indio_dev = filep->private_data;
struct iio_event_interface *ev_int = indio_dev->event_interface;
- spin_lock_irq(&ev_int->wait.lock);
- __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- /*
- * In order to maintain a clean state for reopening,
- * clear out any awaiting events. The mask will prevent
- * any new __iio_push_event calls running.
- */
- kfifo_reset_out(&ev_int->det_events);
- spin_unlock_irq(&ev_int->wait.lock);
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
iio_device_put(indio_dev);
@@ -174,22 +174,20 @@ int iio_event_getfd(struct iio_dev *indio_dev)
if (ev_int == NULL)
return -ENODEV;
- spin_lock_irq(&ev_int->wait.lock);
- if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
- spin_unlock_irq(&ev_int->wait.lock);
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
return -EBUSY;
- }
- spin_unlock_irq(&ev_int->wait.lock);
+
iio_device_get(indio_dev);
fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
indio_dev, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
- spin_lock_irq(&ev_int->wait.lock);
- __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- spin_unlock_irq(&ev_int->wait.lock);
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
iio_device_put(indio_dev);
+ } else {
+ kfifo_reset_out(&ev_int->det_events);
}
+
return fd;
}
@@ -366,32 +364,31 @@ static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SEPARATE, &chan->event_spec[i].mask_separate);
if (ret < 0)
- goto error_ret;
+ return ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_TYPE,
&chan->event_spec[i].mask_shared_by_type);
if (ret < 0)
- goto error_ret;
+ return ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_DIR,
&chan->event_spec[i].mask_shared_by_dir);
if (ret < 0)
- goto error_ret;
+ return ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_ALL,
&chan->event_spec[i].mask_shared_by_all);
if (ret < 0)
- goto error_ret;
+ return ret;
attrcount += ret;
}
ret = attrcount;
-error_ret:
return ret;
}
@@ -425,6 +422,7 @@ static void iio_setup_ev_int(struct iio_event_interface *ev_int)
{
INIT_KFIFO(ev_int->det_events);
init_waitqueue_head(&ev_int->wait);
+ mutex_init(&ev_int->read_lock);
}
static const char *iio_event_group_name = "events";
@@ -440,10 +438,8 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
indio_dev->event_interface =
kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
- if (indio_dev->event_interface == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ if (indio_dev->event_interface == NULL)
+ return -ENOMEM;
INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
@@ -489,8 +485,6 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
error_free_setup_event_lines:
iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
kfree(indio_dev->event_interface);
-error_ret:
-
return ret;
}
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 766fab24b720..3383b025f62e 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -62,10 +62,9 @@ int iio_trigger_register(struct iio_trigger *trig_info)
int ret;
trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
- if (trig_info->id < 0) {
- ret = trig_info->id;
- goto error_ret;
- }
+ if (trig_info->id < 0)
+ return trig_info->id;
+
/* Set the name used for the sysfs directory etc */
dev_set_name(&trig_info->dev, "trigger%ld",
(unsigned long) trig_info->id);
@@ -83,7 +82,6 @@ int iio_trigger_register(struct iio_trigger *trig_info)
error_unregister_id:
ida_simple_remove(&iio_trigger_ida, trig_info->id);
-error_ret:
return ret;
}
EXPORT_SYMBOL(iio_trigger_register);
@@ -234,13 +232,12 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
ret = trig->ops->set_trigger_state(trig, false);
if (ret)
- goto error_ret;
+ return ret;
}
iio_trigger_put_irq(trig, pf->irq);
free_irq(pf->irq, pf);
module_put(pf->indio_dev->info->driver_module);
-error_ret:
return ret;
}
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index d12b2a0dbfbc..c89740d4748f 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -73,6 +73,20 @@ config HID_SENSOR_ALS
Say yes here to build support for the HID SENSOR
Ambient light sensor.
+config HID_SENSOR_PROX
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID PROX"
+ help
+ Say yes here to build support for the HID SENSOR
+ Proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-sensor-prox.
+
config SENSORS_LM3533
tristate "LM3533 ambient light sensor"
depends on MFD_LM3533
@@ -90,6 +104,18 @@ config SENSORS_LM3533
changes. The ALS-control output values can be set per zone for the
three current output channels.
+config LTR501
+ tristate "LTR-501ALS-01 light sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for the Lite-On LTR-501ALS-01
+ ambient light and proximity sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called ltr501.
+
config TCS3472
tristate "TAOS TCS3472 color light-to-digital converter"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 60e35ac07ff0..3eb36e5151fa 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -9,7 +9,9 @@ obj-$(CONFIG_CM32181) += cm32181.o
obj-$(CONFIG_CM36651) += cm36651.o
obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
+obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
+obj-$(CONFIG_LTR501) += ltr501.o
obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
obj-$(CONFIG_TCS3472) += tcs3472.o
obj-$(CONFIG_TSL4531) += tsl4531.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index f3068477b466..09ad5f1ce539 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/slab.h>
@@ -120,7 +119,6 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct adjd_s311_data *data = iio_priv(indio_dev);
s64 time_ns = iio_get_time_ns();
- int len = 0;
int i, j = 0;
int ret = adjd_s311_req_data(indio_dev);
@@ -135,7 +133,6 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
goto done;
data->buffer[j++] = ret & ADJD_S311_DATA_MASK;
- len += 2;
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, time_ns);
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index f17b4e6183c6..47a6dbac2d0c 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -103,13 +103,13 @@ static int cm32181_reg_init(struct cm32181_chip *cm32181)
/**
* cm32181_read_als_it() - Get sensor integration time (ms)
* @cm32181: pointer of struct cm32181
- * @val: pointer of int to load the als_it value.
+ * @val2: pointer of int to load the als_it value.
*
* Report the current integartion time by millisecond.
*
- * Return: IIO_VAL_INT for success, otherwise -EINVAL.
+ * Return: IIO_VAL_INT_PLUS_MICRO for success, otherwise -EINVAL.
*/
-static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val)
+static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2)
{
u16 als_it;
int i;
@@ -119,8 +119,8 @@ static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val)
als_it >>= CM32181_CMD_ALS_IT_SHIFT;
for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) {
if (als_it == als_it_bits[i]) {
- *val = als_it_value[i];
- return IIO_VAL_INT;
+ *val2 = als_it_value[i];
+ return IIO_VAL_INT_PLUS_MICRO;
}
}
@@ -221,7 +221,7 @@ static int cm32181_read_raw(struct iio_dev *indio_dev,
*val = cm32181->calibscale;
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm32181_read_als_it(cm32181, val);
+ ret = cm32181_read_als_it(cm32181, val2);
return ret;
}
@@ -240,7 +240,7 @@ static int cm32181_write_raw(struct iio_dev *indio_dev,
cm32181->calibscale = val;
return val;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm32181_write_als_it(cm32181, val);
+ ret = cm32181_write_als_it(cm32181, val2);
return ret;
}
@@ -264,7 +264,7 @@ static ssize_t cm32181_get_it_available(struct device *dev,
n = ARRAY_SIZE(als_it_value);
for (i = 0, len = 0; i < n; i++)
- len += sprintf(buf + len, "%d ", als_it_value[i]);
+ len += sprintf(buf + len, "0.%06u ", als_it_value[i]);
return len + sprintf(buf + len, "\n");
}
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 0a142af83e25..a45e07492db3 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -50,10 +50,10 @@
#define CM36651_CS_CONF2_DEFAULT_BIT 0x08
/* CS_CONF3 channel integration time */
-#define CM36651_CS_IT1 0x00 /* Integration time 80000 usec */
-#define CM36651_CS_IT2 0x40 /* Integration time 160000 usec */
-#define CM36651_CS_IT3 0x80 /* Integration time 320000 usec */
-#define CM36651_CS_IT4 0xC0 /* Integration time 640000 usec */
+#define CM36651_CS_IT1 0x00 /* Integration time 80 msec */
+#define CM36651_CS_IT2 0x40 /* Integration time 160 msec */
+#define CM36651_CS_IT3 0x80 /* Integration time 320 msec */
+#define CM36651_CS_IT4 0xC0 /* Integration time 640 msec */
/* PS_CONF1 command code */
#define CM36651_PS_ENABLE 0x00
@@ -64,10 +64,10 @@
#define CM36651_PS_PERS4 0x0C
/* PS_CONF1 command code: integration time */
-#define CM36651_PS_IT1 0x00 /* Integration time 320 usec */
-#define CM36651_PS_IT2 0x10 /* Integration time 420 usec */
-#define CM36651_PS_IT3 0x20 /* Integration time 520 usec */
-#define CM36651_PS_IT4 0x30 /* Integration time 640 usec */
+#define CM36651_PS_IT1 0x00 /* Integration time 0.32 msec */
+#define CM36651_PS_IT2 0x10 /* Integration time 0.42 msec */
+#define CM36651_PS_IT3 0x20 /* Integration time 0.52 msec */
+#define CM36651_PS_IT4 0x30 /* Integration time 0.64 msec */
/* PS_CONF1 command code: duty ratio */
#define CM36651_PS_DR1 0x00 /* Duty ratio 1/80 */
@@ -93,8 +93,8 @@
#define CM36651_CLOSE_PROXIMITY 0x32
#define CM36651_FAR_PROXIMITY 0x33
-#define CM36651_CS_INT_TIME_AVAIL "80000 160000 320000 640000"
-#define CM36651_PS_INT_TIME_AVAIL "320 420 520 640"
+#define CM36651_CS_INT_TIME_AVAIL "0.08 0.16 0.32 0.64"
+#define CM36651_PS_INT_TIME_AVAIL "0.000320 0.000420 0.000520 0.000640"
enum cm36651_operation_mode {
CM36651_LIGHT_EN,
@@ -356,30 +356,30 @@ static int cm36651_read_channel(struct cm36651_data *cm36651,
}
static int cm36651_read_int_time(struct cm36651_data *cm36651,
- struct iio_chan_spec const *chan, int *val)
+ struct iio_chan_spec const *chan, int *val2)
{
switch (chan->type) {
case IIO_LIGHT:
if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT1)
- *val = 80000;
+ *val2 = 80000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT2)
- *val = 160000;
+ *val2 = 160000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT3)
- *val = 320000;
+ *val2 = 320000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT4)
- *val = 640000;
+ *val2 = 640000;
else
return -EINVAL;
break;
case IIO_PROXIMITY:
if (cm36651->ps_int_time == CM36651_PS_IT1)
- *val = 320;
+ *val2 = 320;
else if (cm36651->ps_int_time == CM36651_PS_IT2)
- *val = 420;
+ *val2 = 420;
else if (cm36651->ps_int_time == CM36651_PS_IT3)
- *val = 520;
+ *val2 = 520;
else if (cm36651->ps_int_time == CM36651_PS_IT4)
- *val = 640;
+ *val2 = 640;
else
return -EINVAL;
break;
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
return -EINVAL;
}
- return IIO_VAL_INT;
+ return IIO_VAL_INT_PLUS_MICRO;
}
static int cm36651_write_int_time(struct cm36651_data *cm36651,
@@ -459,7 +459,8 @@ static int cm36651_read_raw(struct iio_dev *indio_dev,
ret = cm36651_read_channel(cm36651, chan, val);
break;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm36651_read_int_time(cm36651, chan, val);
+ *val = 0;
+ ret = cm36651_read_int_time(cm36651, chan, val2);
break;
default:
ret = -EINVAL;
@@ -479,7 +480,7 @@ static int cm36651_write_raw(struct iio_dev *indio_dev,
int ret = -EINVAL;
if (mask == IIO_CHAN_INFO_INT_TIME) {
- ret = cm36651_write_int_time(cm36651, chan, val);
+ ret = cm36651_write_int_time(cm36651, chan, val2);
if (ret < 0)
dev_err(&client->dev, "Integration time write failed\n");
}
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
new file mode 100644
index 000000000000..1894ab196f97
--- /dev/null
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -0,0 +1,375 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+#define CHANNEL_SCAN_INDEX_PRESENCE 0
+
+struct prox_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info prox_attr;
+ u32 human_presence;
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec prox_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .modified = 1,
+ .channel2 = IIO_NO_MOD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_PRESENCE,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void prox_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ channels[channel].scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int prox_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct prox_state *prox_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret;
+ int ret_type;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->scan_index) {
+ case CHANNEL_SCAN_INDEX_PRESENCE:
+ report_id = prox_state->prox_attr.report_id;
+ address =
+ HID_USAGE_SENSOR_HUMAN_PRESENCE;
+ break;
+ default:
+ report_id = -1;
+ break;
+ }
+ if (report_id >= 0)
+ *val = sensor_hub_input_attr_get_raw_value(
+ prox_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_PROX, address,
+ report_id);
+ else {
+ *val = 0;
+ return -EINVAL;
+ }
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = prox_state->prox_attr.units;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = hid_sensor_convert_exponent(
+ prox_state->prox_attr.unit_expo);
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_read_samp_freq_value(
+ &prox_state->common_attributes, val, val2);
+ ret_type = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_read_raw_hyst_value(
+ &prox_state->common_attributes, val, val2);
+ ret_type = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int prox_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct prox_state *prox_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &prox_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &prox_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info prox_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &prox_read_raw,
+ .write_raw = &prox_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int prox_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct prox_state *prox_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "prox_proc_event [%d]\n",
+ prox_state->common_attributes.data_ready);
+ if (prox_state->common_attributes.data_ready)
+ hid_sensor_push_data(indio_dev,
+ &prox_state->human_presence,
+ sizeof(prox_state->human_presence));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int prox_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct prox_state *prox_state = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_HUMAN_PRESENCE:
+ prox_state->human_presence = *(u32 *)raw_data;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int prox_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct prox_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_HUMAN_PRESENCE,
+ &st->prox_attr);
+ if (ret < 0)
+ return ret;
+ prox_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_PRESENCE,
+ st->prox_attr.size);
+
+ dev_dbg(&pdev->dev, "prox %x:%x\n", st->prox_attr.index,
+ st->prox_attr.report_id);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_PRESENCE,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_prox_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ static const char *name = "prox";
+ struct iio_dev *indio_dev;
+ struct prox_state *prox_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct prox_state));
+ if (!indio_dev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
+ prox_state = iio_priv(indio_dev);
+ prox_state->common_attributes.hsdev = hsdev;
+ prox_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev, HID_USAGE_SENSOR_PROX,
+ &prox_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(prox_channels, sizeof(prox_channels), GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = prox_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_PROX, prox_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels =
+ ARRAY_SIZE(prox_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &prox_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ prox_state->common_attributes.data_ready = false;
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &prox_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ prox_state->callbacks.send_event = prox_proc_event;
+ prox_state->callbacks.capture_sample = prox_capture_sample;
+ prox_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_PROX,
+ &prox_state->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return ret;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&prox_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_prox_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct prox_state *prox_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PROX);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&prox_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_prox_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200011",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_prox_ids);
+
+static struct platform_driver hid_prox_platform_driver = {
+ .id_table = hid_prox_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = hid_prox_probe,
+ .remove = hid_prox_remove,
+};
+module_platform_driver(hid_prox_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Proximity");
+MODULE_AUTHOR("Archana Patni <archana.patni@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
new file mode 100644
index 000000000000..62b7072af4de
--- /dev/null
+++ b/drivers/iio/light/ltr501.c
@@ -0,0 +1,445 @@
+/*
+ * ltr501.c - Support for Lite-On LTR501 ambient light and proximity sensor
+ *
+ * Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * 7-bit I2C slave address 0x23
+ *
+ * TODO: interrupt, threshold, measurement rate, IR LED characteristics
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define LTR501_DRV_NAME "ltr501"
+
+#define LTR501_ALS_CONTR 0x80 /* ALS operation mode, SW reset */
+#define LTR501_PS_CONTR 0x81 /* PS operation mode */
+#define LTR501_PART_ID 0x86
+#define LTR501_MANUFAC_ID 0x87
+#define LTR501_ALS_DATA1 0x88 /* 16-bit, little endian */
+#define LTR501_ALS_DATA0 0x8a /* 16-bit, little endian */
+#define LTR501_ALS_PS_STATUS 0x8c
+#define LTR501_PS_DATA 0x8d /* 16-bit, little endian */
+
+#define LTR501_ALS_CONTR_SW_RESET BIT(2)
+#define LTR501_CONTR_PS_GAIN_MASK (BIT(3) | BIT(2))
+#define LTR501_CONTR_PS_GAIN_SHIFT 2
+#define LTR501_CONTR_ALS_GAIN_MASK BIT(3)
+#define LTR501_CONTR_ACTIVE BIT(1)
+
+#define LTR501_STATUS_ALS_RDY BIT(2)
+#define LTR501_STATUS_PS_RDY BIT(0)
+
+#define LTR501_PS_DATA_MASK 0x7ff
+
+struct ltr501_data {
+ struct i2c_client *client;
+ struct mutex lock_als, lock_ps;
+ u8 als_contr, ps_contr;
+};
+
+static int ltr501_drdy(struct ltr501_data *data, u8 drdy_mask)
+{
+ int tries = 100;
+ int ret;
+
+ while (tries--) {
+ ret = i2c_smbus_read_byte_data(data->client,
+ LTR501_ALS_PS_STATUS);
+ if (ret < 0)
+ return ret;
+ if ((ret & drdy_mask) == drdy_mask)
+ return 0;
+ msleep(25);
+ }
+
+ dev_err(&data->client->dev, "ltr501_drdy() failed, data not ready\n");
+ return -EIO;
+}
+
+static int ltr501_read_als(struct ltr501_data *data, __le16 buf[2])
+{
+ int ret = ltr501_drdy(data, LTR501_STATUS_ALS_RDY);
+ if (ret < 0)
+ return ret;
+ /* always read both ALS channels in given order */
+ return i2c_smbus_read_i2c_block_data(data->client,
+ LTR501_ALS_DATA1, 2 * sizeof(__le16), (u8 *) buf);
+}
+
+static int ltr501_read_ps(struct ltr501_data *data)
+{
+ int ret = ltr501_drdy(data, LTR501_STATUS_PS_RDY);
+ if (ret < 0)
+ return ret;
+ return i2c_smbus_read_word_data(data->client, LTR501_PS_DATA);
+}
+
+#define LTR501_INTENSITY_CHANNEL(_idx, _addr, _mod, _shared) { \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .address = (_addr), \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = (_shared), \
+ .scan_index = (_idx), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+static const struct iio_chan_spec ltr501_channels[] = {
+ LTR501_INTENSITY_CHANNEL(0, LTR501_ALS_DATA0, IIO_MOD_LIGHT_BOTH, 0),
+ LTR501_INTENSITY_CHANNEL(1, LTR501_ALS_DATA1, IIO_MOD_LIGHT_IR,
+ BIT(IIO_CHAN_INFO_SCALE)),
+ {
+ .type = IIO_PROXIMITY,
+ .address = LTR501_PS_DATA,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 11,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static const int ltr501_ps_gain[4][2] = {
+ {1, 0}, {0, 250000}, {0, 125000}, {0, 62500}
+};
+
+static int ltr501_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ltr501_data *data = iio_priv(indio_dev);
+ __le16 buf[2];
+ int ret, i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ mutex_lock(&data->lock_als);
+ ret = ltr501_read_als(data, buf);
+ mutex_unlock(&data->lock_als);
+ if (ret < 0)
+ return ret;
+ *val = le16_to_cpu(chan->address == LTR501_ALS_DATA1 ?
+ buf[0] : buf[1]);
+ return IIO_VAL_INT;
+ case IIO_PROXIMITY:
+ mutex_lock(&data->lock_ps);
+ ret = ltr501_read_ps(data);
+ mutex_unlock(&data->lock_ps);
+ if (ret < 0)
+ return ret;
+ *val = ret & LTR501_PS_DATA_MASK;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ if (data->als_contr & LTR501_CONTR_ALS_GAIN_MASK) {
+ *val = 0;
+ *val2 = 5000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ } else {
+ *val = 1;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ }
+ case IIO_PROXIMITY:
+ i = (data->ps_contr & LTR501_CONTR_PS_GAIN_MASK) >>
+ LTR501_CONTR_PS_GAIN_SHIFT;
+ *val = ltr501_ps_gain[i][0];
+ *val2 = ltr501_ps_gain[i][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ }
+ return -EINVAL;
+}
+
+static int ltr501_get_ps_gain_index(int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ltr501_ps_gain); i++)
+ if (val == ltr501_ps_gain[i][0] && val2 == ltr501_ps_gain[i][1])
+ return i;
+
+ return -1;
+}
+
+static int ltr501_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ltr501_data *data = iio_priv(indio_dev);
+ int i;
+
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ if (val == 0 && val2 == 5000)
+ data->als_contr |= LTR501_CONTR_ALS_GAIN_MASK;
+ else if (val == 1 && val2 == 0)
+ data->als_contr &= ~LTR501_CONTR_ALS_GAIN_MASK;
+ else
+ return -EINVAL;
+ return i2c_smbus_write_byte_data(data->client,
+ LTR501_ALS_CONTR, data->als_contr);
+ case IIO_PROXIMITY:
+ i = ltr501_get_ps_gain_index(val, val2);
+ if (i < 0)
+ return -EINVAL;
+ data->ps_contr &= ~LTR501_CONTR_PS_GAIN_MASK;
+ data->ps_contr |= i << LTR501_CONTR_PS_GAIN_SHIFT;
+ return i2c_smbus_write_byte_data(data->client,
+ LTR501_PS_CONTR, data->ps_contr);
+ default:
+ return -EINVAL;
+ }
+ }
+ return -EINVAL;
+}
+
+static IIO_CONST_ATTR(in_proximity_scale_available, "1 0.25 0.125 0.0625");
+static IIO_CONST_ATTR(in_intensity_scale_available, "1 0.005");
+
+static struct attribute *ltr501_attributes[] = {
+ &iio_const_attr_in_proximity_scale_available.dev_attr.attr,
+ &iio_const_attr_in_intensity_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ltr501_attribute_group = {
+ .attrs = ltr501_attributes,
+};
+
+static const struct iio_info ltr501_info = {
+ .read_raw = ltr501_read_raw,
+ .write_raw = ltr501_write_raw,
+ .attrs = &ltr501_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int ltr501_write_contr(struct i2c_client *client, u8 als_val, u8 ps_val)
+{
+ int ret = i2c_smbus_write_byte_data(client, LTR501_ALS_CONTR, als_val);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client, LTR501_PS_CONTR, ps_val);
+}
+
+static irqreturn_t ltr501_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ltr501_data *data = iio_priv(indio_dev);
+ u16 buf[8];
+ __le16 als_buf[2];
+ u8 mask = 0;
+ int j = 0;
+ int ret;
+
+ memset(buf, 0, sizeof(buf));
+
+ /* figure out which data needs to be ready */
+ if (test_bit(0, indio_dev->active_scan_mask) ||
+ test_bit(1, indio_dev->active_scan_mask))
+ mask |= LTR501_STATUS_ALS_RDY;
+ if (test_bit(2, indio_dev->active_scan_mask))
+ mask |= LTR501_STATUS_PS_RDY;
+
+ ret = ltr501_drdy(data, mask);
+ if (ret < 0)
+ goto done;
+
+ if (mask & LTR501_STATUS_ALS_RDY) {
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ LTR501_ALS_DATA1, sizeof(als_buf), (u8 *) als_buf);
+ if (ret < 0)
+ return ret;
+ if (test_bit(0, indio_dev->active_scan_mask))
+ buf[j++] = le16_to_cpu(als_buf[1]);
+ if (test_bit(1, indio_dev->active_scan_mask))
+ buf[j++] = le16_to_cpu(als_buf[0]);
+ }
+
+ if (mask & LTR501_STATUS_PS_RDY) {
+ ret = i2c_smbus_read_word_data(data->client, LTR501_PS_DATA);
+ if (ret < 0)
+ goto done;
+ buf[j++] = ret & LTR501_PS_DATA_MASK;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ltr501_init(struct ltr501_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, LTR501_ALS_CONTR);
+ if (ret < 0)
+ return ret;
+ data->als_contr = ret | LTR501_CONTR_ACTIVE;
+
+ ret = i2c_smbus_read_byte_data(data->client, LTR501_PS_CONTR);
+ if (ret < 0)
+ return ret;
+ data->ps_contr = ret | LTR501_CONTR_ACTIVE;
+
+ return ltr501_write_contr(data->client, data->als_contr,
+ data->ps_contr);
+}
+
+static int ltr501_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ltr501_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock_als);
+ mutex_init(&data->lock_ps);
+
+ ret = i2c_smbus_read_byte_data(data->client, LTR501_PART_ID);
+ if (ret < 0)
+ return ret;
+ if ((ret >> 4) != 0x8)
+ return -ENODEV;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ltr501_info;
+ indio_dev->channels = ltr501_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ltr501_channels);
+ indio_dev->name = LTR501_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = ltr501_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ltr501_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_buffer;
+
+ return 0;
+
+error_unreg_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int ltr501_powerdown(struct ltr501_data *data)
+{
+ return ltr501_write_contr(data->client,
+ data->als_contr & ~LTR501_CONTR_ACTIVE,
+ data->ps_contr & ~LTR501_CONTR_ACTIVE);
+}
+
+static int ltr501_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ ltr501_powerdown(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ltr501_suspend(struct device *dev)
+{
+ struct ltr501_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ return ltr501_powerdown(data);
+}
+
+static int ltr501_resume(struct device *dev)
+{
+ struct ltr501_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return ltr501_write_contr(data->client, data->als_contr,
+ data->ps_contr);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ltr501_pm_ops, ltr501_suspend, ltr501_resume);
+
+static const struct i2c_device_id ltr501_id[] = {
+ { "ltr501", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ltr501_id);
+
+static struct i2c_driver ltr501_driver = {
+ .driver = {
+ .name = LTR501_DRV_NAME,
+ .pm = &ltr501_pm_ops,
+ .owner = THIS_MODULE,
+ },
+ .probe = ltr501_probe,
+ .remove = ltr501_remove,
+ .id_table = ltr501_id,
+};
+
+module_i2c_driver(ltr501_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Lite-On LTR501 ambient light and proximity sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 887fecf1f9bb..fe063a0a21cd 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -179,7 +179,6 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct tcs3472_data *data = iio_priv(indio_dev);
- int len = 0;
int i, j = 0;
int ret = tcs3472_req_data(data);
@@ -194,7 +193,6 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
goto done;
data->buffer[j++] = ret;
- len += 2;
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 3d8110157f2d..94daa9fc1247 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -460,10 +460,14 @@ static int tsl2563_write_raw(struct iio_dev *indio_dev,
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
- if (chan->channel == IIO_MOD_LIGHT_BOTH)
+ if (mask != IIO_CHAN_INFO_CALIBSCALE)
+ return -EINVAL;
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
chip->calib0 = calib_from_sysfs(val);
- else
+ else if (chan->channel2 == IIO_MOD_LIGHT_IR)
chip->calib1 = calib_from_sysfs(val);
+ else
+ return -EINVAL;
return 0;
}
@@ -472,14 +476,14 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
- long m)
+ long mask)
{
int ret = -EINVAL;
u32 calib0, calib1;
struct tsl2563_chip *chip = iio_priv(indio_dev);
mutex_lock(&chip->lock);
- switch (m) {
+ switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
@@ -498,7 +502,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
ret = tsl2563_get_adc(chip);
if (ret)
goto error_ret;
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = chip->data0;
else
*val = chip->data1;
@@ -510,7 +514,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = calib_to_sysfs(chip->calib0);
else
*val = calib_to_sysfs(chip->calib1);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index ff284e5afd95..74866d1efd1b 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -85,6 +85,7 @@
#define AK8975_MAX_CONVERSION_TIMEOUT 500
#define AK8975_CONVERSION_DONE_POLL_TIME 10
#define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000)
+#define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256)
/*
* Per-instance context data for the device.
@@ -265,15 +266,15 @@ static int ak8975_setup(struct i2c_client *client)
*
* Since 1uT = 0.01 gauss, our final scale factor becomes:
*
- * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100
- * Hadj = H * ((ASA + 128) * 30 / 256
+ * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
+ * Hadj = H * ((ASA + 128) * 0.003) / 256
*
* Since ASA doesn't change, we cache the resultant scale factor into the
* device context in ak8975_setup().
*/
- data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8;
- data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8;
- data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8;
+ data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]);
+ data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]);
+ data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]);
return 0;
}
@@ -428,8 +429,9 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
return ak8975_read_axis(indio_dev, chan->address, val);
case IIO_CHAN_INFO_SCALE:
- *val = data->raw_to_gauss[chan->address];
- return IIO_VAL_INT;
+ *val = 0;
+ *val2 = data->raw_to_gauss[chan->address];
+ return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
@@ -511,6 +513,7 @@ static int ak8975_probe(struct i2c_client *client,
indio_dev->channels = ak8975_channels;
indio_dev->num_channels = ARRAY_SIZE(ak8975_channels);
indio_dev->info = &ak8975_info;
+ indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
err = iio_device_register(indio_dev);
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 4b65b6d3bdb1..8b77782474d7 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -106,7 +106,7 @@ static ssize_t mag3110_show_int_plus_micros(char *buf,
while (n-- > 0)
len += scnprintf(buf + len, PAGE_SIZE - len,
- "%d.%d ", vals[n][0], vals[n][1]);
+ "%d.%06d ", vals[n][0], vals[n][1]);
/* replace trailing space by newline */
buf[len - 1] = '\n';
@@ -154,6 +154,9 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (chan->type) {
case IIO_MAGN: /* in 0.1 uT / LSB */
ret = mag3110_read(data, buffer);
@@ -180,9 +183,17 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = 1000;
- return IIO_VAL_INT_PLUS_MICRO;
+ switch (chan->type) {
+ case IIO_MAGN:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 1000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
i = data->ctrl_reg1 >> MAG3110_CTRL_DR_SHIFT;
*val = mag3110_samp_freq[i][0];
@@ -199,6 +210,9 @@ static int mag3110_write_raw(struct iio_dev *indio_dev,
struct mag3110_data *data = iio_priv(indio_dev);
int rate;
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
rate = mag3110_get_samp_freq_index(data, val, val2);
@@ -264,7 +278,8 @@ static const struct iio_chan_spec mag3110_channels[] = {
MAG3110_CHANNEL(Z, 2),
{
.type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 3,
.scan_type = {
.sign = 's',
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index a8b9cae5c173..d88ff17fedb2 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -5,6 +5,20 @@
menu "Pressure sensors"
+config HID_SENSOR_PRESS
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID PRESS"
+ help
+ Say yes here to build support for the HID SENSOR
+ Pressure driver
+
+ To compile this driver as a module, choose M here: the module
+ will be called hid-sensor-press.
+
config MPL3115
tristate "Freescale MPL3115A2 pressure sensor driver"
depends on I2C
@@ -26,7 +40,7 @@ config IIO_ST_PRESS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics pressure
- sensors: LPS001WP, LPS331AP.
+ sensors: LPS001WP, LPS25H, LPS331AP.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 42bb9fcf5436..4a57bf65b04b 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -3,6 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
obj-$(CONFIG_MPL3115) += mpl3115.o
obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
st_pressure-y := st_pressure_core.o
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
new file mode 100644
index 000000000000..e0e6409aa94e
--- /dev/null
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -0,0 +1,376 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+#define CHANNEL_SCAN_INDEX_PRESSURE 0
+
+struct press_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info press_attr;
+ u32 press_data;
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec press_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .modified = 1,
+ .channel2 = IIO_NO_MOD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_PRESSURE,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void press_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ channels[channel].scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int press_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct press_state *press_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret;
+ int ret_type;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->scan_index) {
+ case CHANNEL_SCAN_INDEX_PRESSURE:
+ report_id = press_state->press_attr.report_id;
+ address =
+ HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE;
+ break;
+ default:
+ report_id = -1;
+ break;
+ }
+ if (report_id >= 0)
+ *val = sensor_hub_input_attr_get_raw_value(
+ press_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_PRESSURE, address,
+ report_id);
+ else {
+ *val = 0;
+ return -EINVAL;
+ }
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = press_state->press_attr.units;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = hid_sensor_convert_exponent(
+ press_state->press_attr.unit_expo);
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_read_samp_freq_value(
+ &press_state->common_attributes, val, val2);
+ ret_type = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_read_raw_hyst_value(
+ &press_state->common_attributes, val, val2);
+ ret_type = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int press_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct press_state *press_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &press_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &press_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info press_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &press_read_raw,
+ .write_raw = &press_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int press_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct press_state *press_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "press_proc_event [%d]\n",
+ press_state->common_attributes.data_ready);
+ if (press_state->common_attributes.data_ready)
+ hid_sensor_push_data(indio_dev,
+ &press_state->press_data,
+ sizeof(press_state->press_data));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int press_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct press_state *press_state = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE:
+ press_state->press_data = *(u32 *)raw_data;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int press_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct press_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE,
+ &st->press_attr);
+ if (ret < 0)
+ return ret;
+ press_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_PRESSURE,
+ st->press_attr.size);
+
+ dev_dbg(&pdev->dev, "press %x:%x\n", st->press_attr.index,
+ st->press_attr.report_id);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ATMOSPHERIC_PRESSURE,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_press_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ static const char *name = "press";
+ struct iio_dev *indio_dev;
+ struct press_state *press_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct press_state));
+ if (!indio_dev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
+ press_state = iio_priv(indio_dev);
+ press_state->common_attributes.hsdev = hsdev;
+ press_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_PRESSURE,
+ &press_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(press_channels, sizeof(press_channels), GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = press_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_PRESSURE, press_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels =
+ ARRAY_SIZE(press_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &press_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ press_state->common_attributes.data_ready = false;
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &press_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ press_state->callbacks.send_event = press_proc_event;
+ press_state->callbacks.capture_sample = press_capture_sample;
+ press_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_PRESSURE,
+ &press_state->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return ret;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&press_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_press_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct press_state *press_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PRESSURE);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&press_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_press_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200031",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_press_ids);
+
+static struct platform_driver hid_press_platform_driver = {
+ .id_table = hid_press_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = hid_press_probe,
+ .remove = hid_press_remove,
+};
+module_platform_driver(hid_press_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Pressure");
+MODULE_AUTHOR("Archana Patni <archana.patni@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index ac8c8ab723e5..ba6d0c520e63 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -77,7 +77,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct mpl3115_data *data = iio_priv(indio_dev);
- s32 tmp = 0;
+ __be32 tmp = 0;
int ret;
switch (mask) {
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index 049c21acf1f0..242943c0c4e4 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -15,6 +15,7 @@
#include <linux/iio/common/st_sensors.h>
#define LPS001WP_PRESS_DEV_NAME "lps001wp"
+#define LPS25H_PRESS_DEV_NAME "lps25h"
#define LPS331AP_PRESS_DEV_NAME "lps331ap"
/**
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 58083f9d51c5..7418768ed49c 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -40,6 +40,9 @@
/* FULLSCALE */
#define ST_PRESS_FS_AVL_1260MB 1260
+#define ST_PRESS_1_OUT_XL_ADDR 0x28
+#define ST_TEMP_1_OUT_L_ADDR 0x2b
+
/* CUSTOM VALUES FOR LPS331AP SENSOR */
#define ST_PRESS_LPS331AP_WAI_EXP 0xbb
#define ST_PRESS_LPS331AP_ODR_ADDR 0x20
@@ -62,8 +65,6 @@
#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
-#define ST_PRESS_LPS331AP_OUT_XL_ADDR 0x28
-#define ST_TEMP_LPS331AP_OUT_L_ADDR 0x2b
/* CUSTOM VALUES FOR LPS001WP SENSOR */
#define ST_PRESS_LPS001WP_WAI_EXP 0xba
@@ -80,11 +81,36 @@
#define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28
#define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a
-static const struct iio_chan_spec st_press_lps331ap_channels[] = {
+/* CUSTOM VALUES FOR LPS25H SENSOR */
+#define ST_PRESS_LPS25H_WAI_EXP 0xbd
+#define ST_PRESS_LPS25H_ODR_ADDR 0x20
+#define ST_PRESS_LPS25H_ODR_MASK 0x70
+#define ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL 0x01
+#define ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL 0x02
+#define ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL 0x03
+#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
+#define ST_PRESS_LPS25H_PW_ADDR 0x20
+#define ST_PRESS_LPS25H_PW_MASK 0x80
+#define ST_PRESS_LPS25H_FS_ADDR 0x00
+#define ST_PRESS_LPS25H_FS_MASK 0x00
+#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00
+#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
+#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
+#define ST_PRESS_LPS25H_BDU_ADDR 0x20
+#define ST_PRESS_LPS25H_BDU_MASK 0x04
+#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
+#define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01
+#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
+#define ST_PRESS_LPS25H_MULTIREAD_BIT true
+#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
+#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
+#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
+
+static const struct iio_chan_spec st_press_1_channels[] = {
{
.type = IIO_PRESSURE,
.channel2 = IIO_NO_MOD,
- .address = ST_PRESS_LPS331AP_OUT_XL_ADDR,
+ .address = ST_PRESS_1_OUT_XL_ADDR,
.scan_index = ST_SENSORS_SCAN_X,
.scan_type = {
.sign = 'u',
@@ -99,7 +125,7 @@ static const struct iio_chan_spec st_press_lps331ap_channels[] = {
{
.type = IIO_TEMP,
.channel2 = IIO_NO_MOD,
- .address = ST_TEMP_LPS331AP_OUT_L_ADDR,
+ .address = ST_TEMP_1_OUT_L_ADDR,
.scan_index = -1,
.scan_type = {
.sign = 'u',
@@ -156,8 +182,8 @@ static const struct st_sensors st_press_sensors[] = {
.sensors_supported = {
[0] = LPS331AP_PRESS_DEV_NAME,
},
- .ch = (struct iio_chan_spec *)st_press_lps331ap_channels,
- .num_ch = ARRAY_SIZE(st_press_lps331ap_channels),
+ .ch = (struct iio_chan_spec *)st_press_1_channels,
+ .num_ch = ARRAY_SIZE(st_press_1_channels),
.odr = {
.addr = ST_PRESS_LPS331AP_ODR_ADDR,
.mask = ST_PRESS_LPS331AP_ODR_MASK,
@@ -233,6 +259,53 @@ static const struct st_sensors st_press_sensors[] = {
.multi_read_bit = ST_PRESS_LPS001WP_MULTIREAD_BIT,
.bootime = 2,
},
+ {
+ .wai = ST_PRESS_LPS25H_WAI_EXP,
+ .sensors_supported = {
+ [0] = LPS25H_PRESS_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_press_1_channels,
+ .num_ch = ARRAY_SIZE(st_press_1_channels),
+ .odr = {
+ .addr = ST_PRESS_LPS25H_ODR_ADDR,
+ .mask = ST_PRESS_LPS25H_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL, },
+ { 7, ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL, },
+ { 13, ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL, },
+ { 25, ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_PRESS_LPS25H_PW_ADDR,
+ .mask = ST_PRESS_LPS25H_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .addr = ST_PRESS_LPS25H_FS_ADDR,
+ .mask = ST_PRESS_LPS25H_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_PRESS_FS_AVL_1260MB,
+ .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
+ .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
+ .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_PRESS_LPS25H_BDU_ADDR,
+ .mask = ST_PRESS_LPS25H_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
+ },
+ .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
+ .bootime = 2,
+ },
};
static int st_press_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 51eab7fcb194..3cd73e39b840 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -50,6 +50,7 @@ static int st_press_i2c_remove(struct i2c_client *client)
static const struct i2c_device_id st_press_id_table[] = {
{ LPS001WP_PRESS_DEV_NAME },
+ { LPS25H_PRESS_DEV_NAME },
{ LPS331AP_PRESS_DEV_NAME },
{},
};
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 27322af6d665..f45d430ec529 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -49,6 +49,7 @@ static int st_press_spi_remove(struct spi_device *spi)
static const struct spi_device_id st_press_id_table[] = {
{ LPS001WP_PRESS_DEV_NAME },
+ { LPS25H_PRESS_DEV_NAME },
{ LPS331AP_PRESS_DEV_NAME },
{},
};
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 5ceda710f516..77089399359b 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -3,6 +3,8 @@ menuconfig INFINIBAND
depends on PCI || BROKEN
depends on HAS_IOMEM
depends on NET
+ depends on INET
+ depends on m || IPV6 != m
---help---
Core support for InfiniBand (IB). Make sure to also select
any protocols you wish to use as well as drivers for your
@@ -38,8 +40,7 @@ config INFINIBAND_USER_MEM
config INFINIBAND_ADDR_TRANS
bool
- depends on INET
- depends on !(INFINIBAND = y && IPV6 = m)
+ depends on INFINIBAND
default y
source "drivers/infiniband/hw/mthca/Kconfig"
@@ -53,6 +54,7 @@ source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
+source "drivers/infiniband/hw/usnic/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 1fe69888515f..bf508b5550c4 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/
+obj-$(CONFIG_INFINIBAND_USNIC) += hw/usnic/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index c8bbaef1becb..3ab3865544bb 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,8 +1,9 @@
-infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
+infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o
user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
- ib_cm.o iw_cm.o $(infiniband-y)
+ ib_cm.o iw_cm.o ib_addr.o \
+ $(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
$(user_access-y)
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index e90f2b2eabd7..8172d37f9add 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -86,6 +86,8 @@ int rdma_addr_size(struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_addr_size);
+static struct rdma_addr_client self;
+
void rdma_addr_register_client(struct rdma_addr_client *client)
{
atomic_set(&client->refcount, 1);
@@ -119,7 +121,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
}
EXPORT_SYMBOL(rdma_copy_addr);
-int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
+int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
+ u16 *vlan_id)
{
struct net_device *dev;
int ret = -EADDRNOTAVAIL;
@@ -142,6 +145,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
return ret;
ret = rdma_copy_addr(dev_addr, dev, NULL);
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(dev);
dev_put(dev);
break;
@@ -153,6 +158,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
&((struct sockaddr_in6 *) addr)->sin6_addr,
dev, 1)) {
ret = rdma_copy_addr(dev_addr, dev, NULL);
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(dev);
break;
}
}
@@ -238,7 +245,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
src_in->sin_addr.s_addr = fl4.saddr;
if (rt->dst.dev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
+ ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
if (!ret)
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
goto put;
@@ -286,7 +293,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
}
if (dst->dev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
+ ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
if (!ret)
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
goto put;
@@ -437,6 +444,88 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
}
EXPORT_SYMBOL(rdma_addr_cancel);
+struct resolve_cb_context {
+ struct rdma_dev_addr *addr;
+ struct completion comp;
+};
+
+static void resolve_cb(int status, struct sockaddr *src_addr,
+ struct rdma_dev_addr *addr, void *context)
+{
+ memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct
+ rdma_dev_addr));
+ complete(&((struct resolve_cb_context *)context)->comp);
+}
+
+int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
+ u16 *vlan_id)
+{
+ int ret = 0;
+ struct rdma_dev_addr dev_addr;
+ struct resolve_cb_context ctx;
+ struct net_device *dev;
+
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
+
+
+ ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+ if (ret)
+ return ret;
+
+ ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid);
+ if (ret)
+ return ret;
+
+ memset(&dev_addr, 0, sizeof(dev_addr));
+
+ ctx.addr = &dev_addr;
+ init_completion(&ctx.comp);
+ ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
+ &dev_addr, 1000, resolve_cb, &ctx);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&ctx.comp);
+
+ memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
+ dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
+ if (!dev)
+ return -ENODEV;
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(dev);
+ dev_put(dev);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_addr_find_dmac_by_grh);
+
+int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
+{
+ int ret = 0;
+ struct rdma_dev_addr dev_addr;
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } gid_addr;
+
+ ret = rdma_gid2ip(&gid_addr._sockaddr, sgid);
+
+ if (ret)
+ return ret;
+ memset(&dev_addr, 0, sizeof(dev_addr));
+ ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
+ if (ret)
+ return ret;
+
+ memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid);
+
static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx)
{
@@ -461,11 +550,13 @@ static int __init addr_init(void)
return -ENOMEM;
register_netevent_notifier(&nb);
+ rdma_addr_register_client(&self);
return 0;
}
static void __exit addr_cleanup(void)
{
+ rdma_addr_unregister_client(&self);
unregister_netevent_notifier(&nb);
destroy_workqueue(addr_wq);
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f2ef7ef0f36f..0601b9daf840 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -47,6 +47,7 @@
#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include <linux/kdev_t.h>
+#include <linux/etherdevice.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
@@ -177,6 +178,8 @@ struct cm_av {
struct ib_ah_attr ah_attr;
u16 pkey_index;
u8 timeout;
+ u8 valid;
+ u8 smac[ETH_ALEN];
};
struct cm_work {
@@ -346,6 +349,23 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr);
}
+int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac)
+{
+ struct cm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(id, struct cm_id_private, id);
+
+ if (smac != NULL)
+ memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac));
+
+ if (alt_smac != NULL)
+ memcpy(cm_id_priv->alt_av.smac, alt_smac,
+ sizeof(cm_id_priv->alt_av.smac));
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_update_cm_av);
+
static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
{
struct cm_device *cm_dev;
@@ -376,6 +396,9 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
&av->ah_attr);
av->timeout = path->packet_life_time + 1;
+ memcpy(av->smac, path->smac, sizeof(av->smac));
+
+ av->valid = 1;
return 0;
}
@@ -1554,6 +1577,9 @@ static int cm_req_handler(struct cm_work *work)
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
+
+ memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
+ work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id;
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret) {
ib_get_cached_gid(work->port->cm_dev->ib_device,
@@ -3500,6 +3526,32 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
IB_QP_DEST_QPN | IB_QP_RQ_PSN;
qp_attr->ah_attr = cm_id_priv->av.ah_attr;
+ if (!cm_id_priv->av.valid) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return -EINVAL;
+ }
+ if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) {
+ qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id;
+ *qp_attr_mask |= IB_QP_VID;
+ }
+ if (!is_zero_ether_addr(cm_id_priv->av.smac)) {
+ memcpy(qp_attr->smac, cm_id_priv->av.smac,
+ sizeof(qp_attr->smac));
+ *qp_attr_mask |= IB_QP_SMAC;
+ }
+ if (cm_id_priv->alt_av.valid) {
+ if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) {
+ qp_attr->alt_vlan_id =
+ cm_id_priv->alt_av.ah_attr.vlan_id;
+ *qp_attr_mask |= IB_QP_ALT_VID;
+ }
+ if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) {
+ memcpy(qp_attr->alt_smac,
+ cm_id_priv->alt_av.smac,
+ sizeof(qp_attr->alt_smac));
+ *qp_attr_mask |= IB_QP_ALT_SMAC;
+ }
+ }
qp_attr->path_mtu = cm_id_priv->path_mtu;
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 8e49db690f33..199958d9ddc8 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -340,7 +340,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
int ret;
if (addr->sa_family != AF_IB) {
- ret = rdma_translate_ip(addr, dev_addr);
+ ret = rdma_translate_ip(addr, dev_addr, NULL);
} else {
cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
ret = 0;
@@ -365,7 +365,9 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
return -EINVAL;
mutex_lock(&lock);
- iboe_addr_get_sgid(dev_addr, &iboe_gid);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &iboe_gid);
+
memcpy(&gid, dev_addr->src_dev_addr +
rdma_addr_gid_offset(dev_addr), sizeof gid);
if (listen_id_priv &&
@@ -603,6 +605,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
{
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
+ union ib_gid sgid;
mutex_lock(&id_priv->qp_mutex);
if (!id_priv->id.qp) {
@@ -625,6 +628,20 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
if (ret)
goto out;
+ ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
+ qp_attr.ah_attr.grh.sgid_index, &sgid);
+ if (ret)
+ goto out;
+
+ if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
+ == RDMA_TRANSPORT_IB &&
+ rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
+ == IB_LINK_LAYER_ETHERNET) {
+ ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
+
+ if (ret)
+ goto out;
+ }
if (conn_param)
qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
@@ -725,6 +742,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
else
ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
qp_attr_mask);
+
if (qp_attr->qp_state == IB_QPS_RTR)
qp_attr->rq_psn = id_priv->seq_num;
break;
@@ -1266,6 +1284,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event;
int offset, ret;
+ u8 smac[ETH_ALEN];
+ u8 alt_smac[ETH_ALEN];
+ u8 *psmac = smac;
+ u8 *palt_smac = alt_smac;
+ int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) ==
+ RDMA_TRANSPORT_IB) &&
+ (rdma_port_get_link_layer(cm_id->device,
+ ib_event->param.req_rcvd.port) ==
+ IB_LINK_LAYER_ETHERNET));
listen_id = cm_id->context;
if (!cma_check_req_qp_type(&listen_id->id, ib_event))
@@ -1310,12 +1337,29 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
if (ret)
goto err3;
+ if (is_iboe) {
+ if (ib_event->param.req_rcvd.primary_path != NULL)
+ rdma_addr_find_smac_by_sgid(
+ &ib_event->param.req_rcvd.primary_path->sgid,
+ psmac, NULL);
+ else
+ psmac = NULL;
+ if (ib_event->param.req_rcvd.alternate_path != NULL)
+ rdma_addr_find_smac_by_sgid(
+ &ib_event->param.req_rcvd.alternate_path->sgid,
+ palt_smac, NULL);
+ else
+ palt_smac = NULL;
+ }
/*
* Acquire mutex to prevent user executing rdma_destroy_id()
* while we're accessing the cm_id.
*/
mutex_lock(&lock);
- if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
+ if (is_iboe)
+ ib_update_cm_av(cm_id, psmac, palt_smac);
+ if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
+ (conn_id->id.qp_type != IB_QPT_UD))
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
mutex_unlock(&lock);
mutex_unlock(&conn_id->handler_mutex);
@@ -1474,7 +1518,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = RDMA_CM_CONNECT;
- ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
+ ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -1873,7 +1917,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
struct cma_work *work;
int ret;
struct net_device *ndev = NULL;
- u16 vid;
+
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work)
@@ -1897,10 +1941,14 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
- vid = rdma_vlan_dev_vlan_id(ndev);
+ route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
+ memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
+ memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
- iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid);
- iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &route->path_rec->sgid);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
+ &route->path_rec->dgid);
route->path_rec->hop_limit = 1;
route->path_rec->reversible = 1;
@@ -2063,6 +2111,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
RDMA_CM_ADDR_RESOLVED))
goto out;
+ memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
if (!status && !id_priv->cma_dev)
status = cma_acquire_dev(id_priv, NULL);
@@ -2072,10 +2121,8 @@ static void addr_handler(int status, struct sockaddr *src_addr,
goto out;
event.event = RDMA_CM_EVENT_ADDR_ERROR;
event.status = status;
- } else {
- memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
+ } else
event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
- }
if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, RDMA_CM_DESTROYING);
@@ -2310,7 +2357,7 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
inet_get_local_port_range(&init_net, &low, &high);
remaining = (high - low) + 1;
- rover = net_random() % remaining + low;
+ rover = prandom_u32() % remaining + low;
retry:
if (last_used_port != rover &&
!idr_find(ps, (unsigned short) rover)) {
@@ -2480,8 +2527,11 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
return 0;
sin6 = (struct sockaddr_in6 *) addr;
- if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
- !sin6->sin6_scope_id)
+
+ if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
+ return 0;
+
+ if (!sin6->sin6_scope_id)
return -EINVAL;
dev_addr->bound_dev_if = sin6->sin6_scope_id;
@@ -2556,6 +2606,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err1;
+ memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
if (!cma_any_addr(addr)) {
ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
if (ret)
@@ -2566,7 +2617,6 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
goto err1;
}
- memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
if (addr->sa_family == AF_INET)
id_priv->afonly = 1;
@@ -3295,7 +3345,8 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
err = -EINVAL;
goto out2;
}
- iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &mc->multicast.ib->rec.port_gid);
work->id = id_priv;
work->mc = mc;
INIT_WORK(&work->work, iboe_mcast_work_handler);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index a565af5c2d2e..87d1936f5c1c 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -49,4 +49,6 @@ void ib_sysfs_cleanup(void);
int ib_cache_setup(void);
void ib_cache_cleanup(void);
+int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr, int *qp_attr_mask);
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 0717940ec3b5..3d2e489ab732 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -334,7 +334,6 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
unsigned long flags;
- int ret;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
/*
@@ -350,7 +349,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
cm_id_priv->state = IW_CM_STATE_DESTROYING;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
/* destroy the listening endpoint */
- ret = cm_id->device->iwcm->destroy_listen(cm_id);
+ cm_id->device->iwcm->destroy_listen(cm_id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_ESTABLISHED:
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 9838ca484389..f820958e4047 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -42,7 +42,7 @@
#include <linux/kref.h>
#include <linux/idr.h>
#include <linux/workqueue.h>
-
+#include <uapi/linux/if_ether.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_cache.h>
#include "sa.h"
@@ -556,6 +556,13 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
ah_attr->grh.hop_limit = rec->hop_limit;
ah_attr->grh.traffic_class = rec->traffic_class;
}
+ if (force_grh) {
+ memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
+ ah_attr->vlan_id = rec->vlan_id;
+ } else {
+ ah_attr->vlan_id = 0xffff;
+ }
+
return 0;
}
EXPORT_SYMBOL(ib_init_ah_from_path);
@@ -670,6 +677,9 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
mad->data, &rec);
+ rec.vlan_id = 0xffff;
+ memset(rec.dmac, 0, ETH_ALEN);
+ memset(rec.smac, 0, ETH_ALEN);
query->callback(status, &rec, query->context);
} else
query->callback(status, NULL, query->context);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index faad2caf22b1..7d3292c7b4b4 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -613,6 +613,7 @@ static ssize_t show_node_type(struct device *device,
case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type);
+ case RDMA_NODE_USNIC_UDP: return sprintf(buf, "%d: usNIC UDP\n", dev->node_type);
case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ab8b1c30b36b..56a4b7ca7ee3 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -655,24 +655,14 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
struct rdma_route *route)
{
- struct rdma_dev_addr *dev_addr;
- struct net_device *dev;
- u16 vid = 0;
resp->num_paths = route->num_paths;
switch (route->num_paths) {
case 0:
- dev_addr = &route->addr.dev_addr;
- dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
- if (dev) {
- vid = rdma_vlan_dev_vlan_id(dev);
- dev_put(dev);
- }
-
- iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
- dev_addr->dst_dev_addr, vid);
- iboe_addr_get_sgid(dev_addr,
- (union ib_gid *) &resp->ib_route[0].sgid);
+ rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
+ (union ib_gid *)&resp->ib_route[0].dgid);
+ rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
+ (union ib_gid *)&resp->ib_route[0].sgid);
resp->ib_route[0].pkey = cpu_to_be16(0xffff);
break;
case 2:
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index f1cc83855af6..ea6203ee7bcc 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -40,6 +40,7 @@
#include <asm/uaccess.h>
#include "uverbs.h"
+#include "core_priv.h"
struct uverbs_lock_class {
struct lock_class_key key;
@@ -1961,6 +1962,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
if (qp->real_qp == qp) {
+ ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
+ if (ret)
+ goto out;
ret = qp->device->modify_qp(qp, attr,
modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
} else {
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index d4f6ddf72ffa..3ac795115438 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -44,6 +44,9 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
+#include <rdma/ib_addr.h>
+
+#include "core_priv.h"
int ib_rate_to_mult(enum ib_rate rate)
{
@@ -116,6 +119,8 @@ rdma_node_get_transport(enum rdma_node_type node_type)
return RDMA_TRANSPORT_IWARP;
case RDMA_NODE_USNIC:
return RDMA_TRANSPORT_USNIC;
+ case RDMA_NODE_USNIC_UDP:
+ return RDMA_TRANSPORT_USNIC_UDP;
default:
BUG();
return 0;
@@ -133,6 +138,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_
return IB_LINK_LAYER_INFINIBAND;
case RDMA_TRANSPORT_IWARP:
case RDMA_TRANSPORT_USNIC:
+ case RDMA_TRANSPORT_USNIC_UDP:
return IB_LINK_LAYER_ETHERNET;
default:
return IB_LINK_LAYER_UNSPECIFIED;
@@ -192,8 +198,28 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
u32 flow_class;
u16 gid_index;
int ret;
+ int is_eth = (rdma_port_get_link_layer(device, port_num) ==
+ IB_LINK_LAYER_ETHERNET);
memset(ah_attr, 0, sizeof *ah_attr);
+ if (is_eth) {
+ if (!(wc->wc_flags & IB_WC_GRH))
+ return -EPROTOTYPE;
+
+ if (wc->wc_flags & IB_WC_WITH_SMAC &&
+ wc->wc_flags & IB_WC_WITH_VLAN) {
+ memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
+ ah_attr->vlan_id = wc->vlan_id;
+ } else {
+ ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
+ ah_attr->dmac, &ah_attr->vlan_id);
+ if (ret)
+ return ret;
+ }
+ } else {
+ ah_attr->vlan_id = 0xffff;
+ }
+
ah_attr->dlid = wc->slid;
ah_attr->sl = wc->sl;
ah_attr->src_path_bits = wc->dlid_path_bits;
@@ -476,7 +502,9 @@ EXPORT_SYMBOL(ib_create_qp);
static const struct {
int valid;
enum ib_qp_attr_mask req_param[IB_QPT_MAX];
+ enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX];
enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
+ enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX];
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 },
@@ -557,6 +585,12 @@ static const struct {
IB_QP_MAX_DEST_RD_ATOMIC |
IB_QP_MIN_RNR_TIMER),
},
+ .req_param_add_eth = {
+ [IB_QPT_RC] = (IB_QP_SMAC),
+ [IB_QPT_UC] = (IB_QP_SMAC),
+ [IB_QPT_XRC_INI] = (IB_QP_SMAC),
+ [IB_QPT_XRC_TGT] = (IB_QP_SMAC)
+ },
.opt_param = {
[IB_QPT_UD] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
@@ -576,7 +610,21 @@ static const struct {
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
- }
+ },
+ .opt_param_add_eth = {
+ [IB_QPT_RC] = (IB_QP_ALT_SMAC |
+ IB_QP_VID |
+ IB_QP_ALT_VID),
+ [IB_QPT_UC] = (IB_QP_ALT_SMAC |
+ IB_QP_VID |
+ IB_QP_ALT_VID),
+ [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC |
+ IB_QP_VID |
+ IB_QP_ALT_VID),
+ [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC |
+ IB_QP_VID |
+ IB_QP_ALT_VID)
+ }
}
},
[IB_QPS_RTR] = {
@@ -779,7 +827,8 @@ static const struct {
};
int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask)
+ enum ib_qp_type type, enum ib_qp_attr_mask mask,
+ enum rdma_link_layer ll)
{
enum ib_qp_attr_mask req_param, opt_param;
@@ -798,6 +847,13 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
req_param = qp_state_table[cur_state][next_state].req_param[type];
opt_param = qp_state_table[cur_state][next_state].opt_param[type];
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ req_param |= qp_state_table[cur_state][next_state].
+ req_param_add_eth[type];
+ opt_param |= qp_state_table[cur_state][next_state].
+ opt_param_add_eth[type];
+ }
+
if ((mask & req_param) != req_param)
return 0;
@@ -808,10 +864,51 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
}
EXPORT_SYMBOL(ib_modify_qp_is_ok);
+int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr, int *qp_attr_mask)
+{
+ int ret = 0;
+ union ib_gid sgid;
+
+ if ((*qp_attr_mask & IB_QP_AV) &&
+ (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) {
+ ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
+ qp_attr->ah_attr.grh.sgid_index, &sgid);
+ if (ret)
+ goto out;
+ if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
+ rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
+ rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
+ qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
+ } else {
+ ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
+ qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
+ if (ret)
+ goto out;
+ ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL);
+ if (ret)
+ goto out;
+ }
+ *qp_attr_mask |= IB_QP_SMAC;
+ if (qp_attr->vlan_id < 0xFFFF)
+ *qp_attr_mask |= IB_QP_VID;
+ }
+out:
+ return ret;
+}
+EXPORT_SYMBOL(ib_resolve_eth_l2_attrs);
+
+
int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
+ int ret;
+
+ ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask);
+ if (ret)
+ return ret;
+
return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index d53cf519f42a..00400c352c1a 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
/* Initialize network device */
if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
+ ret = -ENOMEM;
iounmap(mmio_regs);
goto bail4;
}
@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
goto bail10;
}
- if (c2_register_device(c2dev))
+ ret = c2_register_device(c2dev);
+ if (ret)
goto bail10;
return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c
index 8951db4ae29d..3a17d9b36dba 100644
--- a/drivers/infiniband/hw/amso1100/c2_intr.c
+++ b/drivers/infiniband/hw/amso1100/c2_intr.c
@@ -169,7 +169,8 @@ static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
* We should never get here, as the adapter should
* never send us a reply that we're not expecting.
*/
- vq_repbuf_free(c2dev, host_msg);
+ if (reply_msg != NULL)
+ vq_repbuf_free(c2dev, host_msg);
pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
return;
}
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index b7c986990053..d2a6d961344b 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev)
goto bail4;
/* Initialize cached the adapter limits */
- if (c2_rnic_query(c2dev, &c2dev->props))
+ err = c2_rnic_query(c2dev, &c2dev->props);
+ if (err)
goto bail5;
/* Initialize the PD pool */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 45126879ad28..d286bdebe2ab 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
goto free_dst;
}
+ neigh_release(neigh);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
window = (__force u16) htons((__force u16)tcph->window);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 84e45006451c..41b11951a30a 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -76,7 +76,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
INIT_ULPTX_WR(req, wr_len, 0, 0);
req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
(wait ? FW_WR_COMPL(1) : 0));
- req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
+ req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 00d6861a6a18..2e89356c46fa 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -1329,7 +1329,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
if (!smi_reset2init &&
!ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
- attr_mask)) {
+ attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
ret = -EINVAL;
ehca_err(ibqp->device,
"Invalid qp transition new_state=%x cur_state=%x "
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 0857a9c3cd3d..face87602dc1 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -463,7 +463,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask))
+ attr_mask, IB_LINK_LAYER_UNSPECIFIED))
goto inval;
if (attr_mask & IB_QP_AV) {
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index 24ab11a9ad1e..fc01deac1d3c 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,6 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
- depends on NETDEVICES && ETHERNET && PCI
+ depends on NETDEVICES && ETHERNET && PCI && INET
select NET_VENDOR_MELLANOX
select MLX4_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index a251becdaa98..170dca608042 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -39,25 +39,6 @@
#include "mlx4_ib.h"
-int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
- u8 *mac, int *is_mcast, u8 port)
-{
- struct in6_addr in6;
-
- *is_mcast = 0;
-
- memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
- if (rdma_link_local_addr(&in6))
- rdma_get_ll_mac(&in6, mac);
- else if (rdma_is_multicast_addr(&in6)) {
- rdma_get_mcast_mac(&in6, mac);
- *is_mcast = 1;
- } else
- return -EINVAL;
-
- return 0;
-}
-
static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
struct mlx4_ib_ah *ah)
{
@@ -92,21 +73,18 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
{
struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
struct mlx4_dev *dev = ibdev->dev;
- union ib_gid sgid;
- u8 mac[6];
- int err;
int is_mcast;
+ struct in6_addr in6;
u16 vlan_tag;
- err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num);
- if (err)
- return ERR_PTR(err);
-
- memcpy(ah->av.eth.mac, mac, 6);
- err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid);
- if (err)
- return ERR_PTR(err);
- vlan_tag = rdma_get_vlan_id(&sgid);
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ if (rdma_is_multicast_addr(&in6)) {
+ is_mcast = 1;
+ rdma_get_mcast_mac(&in6, ah->av.eth.mac);
+ } else {
+ memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
+ }
+ vlan_tag = ah_attr->vlan_id;
if (vlan_tag < 0x1000)
vlan_tag |= (ah_attr->sl & 7) << 13;
ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 66dbf8062374..cc40f08ca8f1 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -798,6 +798,15 @@ repoll:
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
else
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
+ if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) {
+ wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
+ MLX4_CQE_VID_MASK;
+ } else {
+ wc->vlan_id = 0xffff;
+ }
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ wc->wc_flags |= IB_WC_WITH_SMAC;
}
return 0;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1958c5ca792a..f9c12e92fdd6 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -39,6 +39,8 @@
#include <linux/inetdevice.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
@@ -51,10 +53,11 @@
#include "user.h"
#define DRV_NAME MLX4_IB_DRV_NAME
-#define DRV_VERSION "1.0"
-#define DRV_RELDATE "April 4, 2008"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb 2014"
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
+#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -92,21 +95,27 @@ static union ib_gid zgid;
static int check_flow_steering_support(struct mlx4_dev *dev)
{
+ int eth_num_ports = 0;
int ib_num_ports = 0;
- int i;
-
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
- ib_num_ports++;
- if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
- if (ib_num_ports || mlx4_is_mfunc(dev)) {
- pr_warn("Device managed flow steering is unavailable "
- "for IB ports or in multifunction env.\n");
- return 0;
+ int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
+
+ if (dmfs) {
+ int i;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+ eth_num_ports++;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_num_ports++;
+ dmfs &= (!ib_num_ports ||
+ (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
+ (!eth_num_ports ||
+ (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
+ if (ib_num_ports && mlx4_is_mfunc(dev)) {
+ pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
+ dmfs = 0;
}
- return 1;
}
- return 0;
+ return dmfs;
}
static int mlx4_ib_query_device(struct ib_device *ibdev,
@@ -165,7 +174,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
- if (check_flow_steering_support(dev->dev))
+ if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
}
@@ -338,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
IB_WIDTH_4X : IB_WIDTH_1X;
props->active_speed = IB_SPEED_QDR;
- props->port_cap_flags = IB_PORT_CM_SUP;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
props->pkey_tbl_len = 1;
@@ -787,7 +796,6 @@ static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
union ib_gid *gid)
{
- u8 mac[6];
struct net_device *ndev;
int ret = 0;
@@ -801,11 +809,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
spin_unlock(&mdev->iboe.lock);
if (ndev) {
- rdma_get_mcast_mac((struct in6_addr *)gid, mac);
- rtnl_lock();
- dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
ret = 1;
- rtnl_unlock();
dev_put(ndev);
}
@@ -819,6 +823,7 @@ struct mlx4_ib_steering {
};
static int parse_flow_attr(struct mlx4_dev *dev,
+ u32 qp_num,
union ib_flow_spec *ib_spec,
struct _rule_hw *mlx4_spec)
{
@@ -834,6 +839,14 @@ static int parse_flow_attr(struct mlx4_dev *dev,
mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
break;
+ case IB_FLOW_SPEC_IB:
+ type = MLX4_NET_TRANS_RULE_ID_IB;
+ mlx4_spec->ib.l3_qpn =
+ cpu_to_be32(qp_num);
+ mlx4_spec->ib.qpn_mask =
+ cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
+ break;
+
case IB_FLOW_SPEC_IPV4:
type = MLX4_NET_TRANS_RULE_ID_IPV4;
@@ -865,6 +878,115 @@ static int parse_flow_attr(struct mlx4_dev *dev,
return mlx4_hw_rule_sz(dev, type);
}
+struct default_rules {
+ __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
+ __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
+ __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
+ __u8 link_layer;
+};
+static const struct default_rules default_table[] = {
+ {
+ .mandatory_fields = {IB_FLOW_SPEC_IPV4},
+ .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
+ .rules_create_list = {IB_FLOW_SPEC_IB},
+ .link_layer = IB_LINK_LAYER_INFINIBAND
+ }
+};
+
+static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
+ struct ib_flow_attr *flow_attr)
+{
+ int i, j, k;
+ void *ib_flow;
+ const struct default_rules *pdefault_rules = default_table;
+ u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
+
+ for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++,
+ pdefault_rules++) {
+ __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
+ memset(&field_types, 0, sizeof(field_types));
+
+ if (link_layer != pdefault_rules->link_layer)
+ continue;
+
+ ib_flow = flow_attr + 1;
+ /* we assume the specs are sorted */
+ for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
+ j < flow_attr->num_of_specs; k++) {
+ union ib_flow_spec *current_flow =
+ (union ib_flow_spec *)ib_flow;
+
+ /* same layer but different type */
+ if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
+ (pdefault_rules->mandatory_fields[k] &
+ IB_FLOW_SPEC_LAYER_MASK)) &&
+ (current_flow->type !=
+ pdefault_rules->mandatory_fields[k]))
+ goto out;
+
+ /* same layer, try match next one */
+ if (current_flow->type ==
+ pdefault_rules->mandatory_fields[k]) {
+ j++;
+ ib_flow +=
+ ((union ib_flow_spec *)ib_flow)->size;
+ }
+ }
+
+ ib_flow = flow_attr + 1;
+ for (j = 0; j < flow_attr->num_of_specs;
+ j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
+ for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
+ /* same layer and same type */
+ if (((union ib_flow_spec *)ib_flow)->type ==
+ pdefault_rules->mandatory_not_fields[k])
+ goto out;
+
+ return i;
+ }
+out:
+ return -1;
+}
+
+static int __mlx4_ib_create_default_rules(
+ struct mlx4_ib_dev *mdev,
+ struct ib_qp *qp,
+ const struct default_rules *pdefault_rules,
+ struct _rule_hw *mlx4_spec) {
+ int size = 0;
+ int i;
+
+ for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/
+ sizeof(pdefault_rules->rules_create_list[0]); i++) {
+ int ret;
+ union ib_flow_spec ib_spec;
+ switch (pdefault_rules->rules_create_list[i]) {
+ case 0:
+ /* no rule */
+ continue;
+ case IB_FLOW_SPEC_IB:
+ ib_spec.type = IB_FLOW_SPEC_IB;
+ ib_spec.size = sizeof(struct ib_flow_spec_ib);
+
+ break;
+ default:
+ /* invalid rule */
+ return -EINVAL;
+ }
+ /* We must put empty rule, qpn is being ignored */
+ ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
+ mlx4_spec);
+ if (ret < 0) {
+ pr_info("invalid parsing\n");
+ return -EINVAL;
+ }
+
+ mlx4_spec = (void *)mlx4_spec + ret;
+ size += ret;
+ }
+ return size;
+}
+
static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
int domain,
enum mlx4_net_trans_promisc_mode flow_type,
@@ -876,6 +998,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
struct mlx4_ib_dev *mdev = to_mdev(qp->device);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
+ int default_flow;
static const u16 __mlx4_domain[] = {
[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
@@ -910,8 +1033,21 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
ib_flow = flow_attr + 1;
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
+ /* Add default flows */
+ default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
+ if (default_flow >= 0) {
+ ret = __mlx4_ib_create_default_rules(
+ mdev, qp, default_table + default_flow,
+ mailbox->buf + size);
+ if (ret < 0) {
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return -EINVAL;
+ }
+ size += ret;
+ }
for (i = 0; i < flow_attr->num_of_specs; i++) {
- ret = parse_flow_attr(mdev->dev, ib_flow, mailbox->buf + size);
+ ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
+ mailbox->buf + size);
if (ret < 0) {
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return -EINVAL;
@@ -1025,6 +1161,8 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
u64 reg_id;
struct mlx4_ib_steering *ib_steering = NULL;
+ enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
+ MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1036,7 +1174,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
!!(mqp->flags &
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
- MLX4_PROT_IB_IPV6, &reg_id);
+ prot, &reg_id);
if (err)
goto err_malloc;
@@ -1055,7 +1193,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err_add:
mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
- MLX4_PROT_IB_IPV6, reg_id);
+ prot, reg_id);
err_malloc:
kfree(ib_steering);
@@ -1083,10 +1221,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
- u8 mac[6];
struct net_device *ndev;
struct mlx4_ib_gid_entry *ge;
u64 reg_id = 0;
+ enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
+ MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1109,7 +1248,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
- MLX4_PROT_IB_IPV6, reg_id);
+ prot, reg_id);
if (err)
return err;
@@ -1121,13 +1260,8 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (ndev)
dev_hold(ndev);
spin_unlock(&mdev->iboe.lock);
- rdma_get_mcast_mac((struct in6_addr *)gid, mac);
- if (ndev) {
- rtnl_lock();
- dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
- rtnl_unlock();
+ if (ndev)
dev_put(ndev);
- }
list_del(&ge->list);
kfree(ge);
} else
@@ -1223,7 +1357,8 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id
};
-static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
+static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
+ struct net_device *dev)
{
memcpy(eui, dev->dev_addr, 3);
memcpy(eui + 5, dev->dev_addr + 3, 3);
@@ -1259,161 +1394,377 @@ static void update_gids_task(struct work_struct *work)
MLX4_CMD_WRAPPED);
if (err)
pr_warn("set port command failed\n");
- else {
- memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
+ else
mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ kfree(gw);
+}
+
+static void reset_gids_task(struct work_struct *work)
+{
+ struct update_gid_work *gw =
+ container_of(work, struct update_gid_work, work);
+ struct mlx4_cmd_mailbox *mailbox;
+ union ib_gid *gids;
+ int err;
+ struct mlx4_dev *dev = gw->dev->dev;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ pr_warn("reset gid table failed\n");
+ goto free;
+ }
+
+ gids = mailbox->buf;
+ memcpy(gids, gw->gids, sizeof(gw->gids));
+
+ if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
+ IB_LINK_LAYER_ETHERNET) {
+ err = mlx4_cmd(dev, mailbox->dma,
+ MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
+ 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ pr_warn(KERN_WARNING
+ "set port %d command failed\n", gw->port);
}
mlx4_free_cmd_mailbox(dev, mailbox);
+free:
kfree(gw);
}
-static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
+static int update_gid_table(struct mlx4_ib_dev *dev, int port,
+ union ib_gid *gid, int clear,
+ int default_gid)
{
- struct net_device *ndev = dev->iboe.netdevs[port - 1];
struct update_gid_work *work;
- struct net_device *tmp;
int i;
- u8 *hits;
- int ret;
- union ib_gid gid;
- int free;
- int found;
int need_update = 0;
- u16 vid;
-
- work = kzalloc(sizeof *work, GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
-
- hits = kzalloc(128, GFP_ATOMIC);
- if (!hits) {
- ret = -ENOMEM;
- goto out;
- }
+ int free = -1;
+ int found = -1;
+ int max_gids;
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, tmp) {
- if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
- gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- vid = rdma_vlan_dev_vlan_id(tmp);
- mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
- found = 0;
- free = -1;
- for (i = 0; i < 128; ++i) {
- if (free < 0 &&
- !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
- free = i;
- if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
- hits[i] = 1;
- found = 1;
+ if (default_gid) {
+ free = 0;
+ } else {
+ max_gids = dev->dev->caps.gid_table_len[port];
+ for (i = 1; i < max_gids; ++i) {
+ if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
+ sizeof(*gid)))
+ found = i;
+
+ if (clear) {
+ if (found >= 0) {
+ need_update = 1;
+ dev->iboe.gid_table[port - 1][found] =
+ zgid;
break;
}
- }
+ } else {
+ if (found >= 0)
+ break;
- if (!found) {
- if (tmp == ndev &&
- (memcmp(&dev->iboe.gid_table[port - 1][0],
- &gid, sizeof gid) ||
- !memcmp(&dev->iboe.gid_table[port - 1][0],
- &zgid, sizeof gid))) {
- dev->iboe.gid_table[port - 1][0] = gid;
- ++need_update;
- hits[0] = 1;
- } else if (free >= 0) {
- dev->iboe.gid_table[port - 1][free] = gid;
- hits[free] = 1;
- ++need_update;
- }
+ if (free < 0 &&
+ !memcmp(&dev->iboe.gid_table[port - 1][i],
+ &zgid, sizeof(*gid)))
+ free = i;
}
}
}
- rcu_read_unlock();
- for (i = 0; i < 128; ++i)
- if (!hits[i]) {
- if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
- ++need_update;
- dev->iboe.gid_table[port - 1][i] = zgid;
- }
+ if (found == -1 && !clear && free >= 0) {
+ dev->iboe.gid_table[port - 1][free] = *gid;
+ need_update = 1;
+ }
- if (need_update) {
- memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
- INIT_WORK(&work->work, update_gids_task);
- work->port = port;
- work->dev = dev;
- queue_work(wq, &work->work);
- } else
- kfree(work);
+ if (!need_update)
+ return 0;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+
+ memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
+ INIT_WORK(&work->work, update_gids_task);
+ work->port = port;
+ work->dev = dev;
+ queue_work(wq, &work->work);
- kfree(hits);
return 0;
+}
-out:
- kfree(work);
- return ret;
+static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
+{
+ gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
}
-static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
+
+static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
{
- switch (event) {
- case NETDEV_UP:
- case NETDEV_CHANGEADDR:
- update_ipv6_gids(dev, port, 0);
- break;
+ struct update_gid_work *work;
- case NETDEV_DOWN:
- update_ipv6_gids(dev, port, 1);
- dev->iboe.netdevs[port - 1] = NULL;
- }
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+
+ memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
+ memset(work->gids, 0, sizeof(work->gids));
+ INIT_WORK(&work->work, reset_gids_task);
+ work->dev = dev;
+ work->port = port;
+ queue_work(wq, &work->work);
+ return 0;
}
-static void netdev_added(struct mlx4_ib_dev *dev, int port)
+static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
+ struct mlx4_ib_dev *ibdev, union ib_gid *gid)
{
- update_ipv6_gids(dev, port, 0);
+ struct mlx4_ib_iboe *iboe;
+ int port = 0;
+ struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
+ rdma_vlan_dev_real_dev(event_netdev) :
+ event_netdev;
+ union ib_gid default_gid;
+
+ mlx4_make_default_gid(real_dev, &default_gid);
+
+ if (!memcmp(gid, &default_gid, sizeof(*gid)))
+ return 0;
+
+ if (event != NETDEV_DOWN && event != NETDEV_UP)
+ return 0;
+
+ if ((real_dev != event_netdev) &&
+ (event == NETDEV_DOWN) &&
+ rdma_link_local_addr((struct in6_addr *)gid))
+ return 0;
+
+ iboe = &ibdev->iboe;
+ spin_lock(&iboe->lock);
+
+ for (port = 1; port <= MLX4_MAX_PORTS; ++port)
+ if ((netif_is_bond_master(real_dev) &&
+ (real_dev == iboe->masters[port - 1])) ||
+ (!netif_is_bond_master(real_dev) &&
+ (real_dev == iboe->netdevs[port - 1])))
+ update_gid_table(ibdev, port, gid,
+ event == NETDEV_DOWN, 0);
+
+ spin_unlock(&iboe->lock);
+ return 0;
+
}
-static void netdev_removed(struct mlx4_ib_dev *dev, int port)
+static u8 mlx4_ib_get_dev_port(struct net_device *dev,
+ struct mlx4_ib_dev *ibdev)
{
- update_ipv6_gids(dev, port, 1);
+ u8 port = 0;
+ struct mlx4_ib_iboe *iboe;
+ struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
+ rdma_vlan_dev_real_dev(dev) : dev;
+
+ iboe = &ibdev->iboe;
+
+ for (port = 1; port <= MLX4_MAX_PORTS; ++port)
+ if ((netif_is_bond_master(real_dev) &&
+ (real_dev == iboe->masters[port - 1])) ||
+ (!netif_is_bond_master(real_dev) &&
+ (real_dev == iboe->netdevs[port - 1])))
+ break;
+
+ if ((port == 0) || (port > MLX4_MAX_PORTS))
+ return 0;
+ else
+ return port;
}
-static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
+static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct mlx4_ib_dev *ibdev;
+ struct in_ifaddr *ifa = ptr;
+ union ib_gid gid;
+ struct net_device *event_netdev = ifa->ifa_dev->dev;
+
+ ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
+
+ ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
+
+ mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
+ return NOTIFY_DONE;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct mlx4_ib_dev *ibdev;
- struct net_device *oldnd;
+ struct inet6_ifaddr *ifa = ptr;
+ union ib_gid *gid = (union ib_gid *)&ifa->addr;
+ struct net_device *event_netdev = ifa->idev->dev;
+
+ ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
+
+ mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
+ return NOTIFY_DONE;
+}
+#endif
+
+static void mlx4_ib_get_dev_addr(struct net_device *dev,
+ struct mlx4_ib_dev *ibdev, u8 port)
+{
+ struct in_device *in_dev;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_dev *in6_dev;
+ union ib_gid *pgid;
+ struct inet6_ifaddr *ifp;
+#endif
+ union ib_gid gid;
+
+
+ if ((port == 0) || (port > MLX4_MAX_PORTS))
+ return;
+
+ /* IPv4 gids */
+ in_dev = in_dev_get(dev);
+ if (in_dev) {
+ for_ifa(in_dev) {
+ /*ifa->ifa_address;*/
+ ipv6_addr_set_v4mapped(ifa->ifa_address,
+ (struct in6_addr *)&gid);
+ update_gid_table(ibdev, port, &gid, 0, 0);
+ }
+ endfor_ifa(in_dev);
+ in_dev_put(in_dev);
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ /* IPv6 gids */
+ in6_dev = in6_dev_get(dev);
+ if (in6_dev) {
+ read_lock_bh(&in6_dev->lock);
+ list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
+ pgid = (union ib_gid *)&ifp->addr;
+ update_gid_table(ibdev, port, pgid, 0, 0);
+ }
+ read_unlock_bh(&in6_dev->lock);
+ in6_dev_put(in6_dev);
+ }
+#endif
+}
+
+static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev, u8 port)
+{
+ union ib_gid gid;
+ mlx4_make_default_gid(dev, &gid);
+ update_gid_table(ibdev, port, &gid, 0, 1);
+}
+
+static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
+{
+ struct net_device *dev;
+ struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+ int i;
+
+ for (i = 1; i <= ibdev->num_ports; ++i)
+ if (reset_gid_table(ibdev, i))
+ return -1;
+
+ read_lock(&dev_base_lock);
+ spin_lock(&iboe->lock);
+
+ for_each_netdev(&init_net, dev) {
+ u8 port = mlx4_ib_get_dev_port(dev, ibdev);
+ if (port)
+ mlx4_ib_get_dev_addr(dev, ibdev, port);
+ }
+
+ spin_unlock(&iboe->lock);
+ read_unlock(&dev_base_lock);
+
+ return 0;
+}
+
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+{
struct mlx4_ib_iboe *iboe;
int port;
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
-
- ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
iboe = &ibdev->iboe;
spin_lock(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
- oldnd = iboe->netdevs[port - 1];
+ enum ib_port_state port_state = IB_PORT_NOP;
+ struct net_device *old_master = iboe->masters[port - 1];
+ struct net_device *curr_netdev;
+ struct net_device *curr_master;
+
iboe->netdevs[port - 1] =
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
- if (oldnd != iboe->netdevs[port - 1]) {
- if (iboe->netdevs[port - 1])
- netdev_added(ibdev, port);
- else
- netdev_removed(ibdev, port);
+ if (iboe->netdevs[port - 1])
+ mlx4_ib_set_default_gid(ibdev,
+ iboe->netdevs[port - 1], port);
+ curr_netdev = iboe->netdevs[port - 1];
+
+ if (iboe->netdevs[port - 1] &&
+ netif_is_bond_slave(iboe->netdevs[port - 1])) {
+ iboe->masters[port - 1] = netdev_master_upper_dev_get(
+ iboe->netdevs[port - 1]);
+ } else {
+ iboe->masters[port - 1] = NULL;
+ }
+ curr_master = iboe->masters[port - 1];
+
+ if (curr_netdev) {
+ port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ } else {
+ reset_gid_table(ibdev, port);
+ }
+ /* if using bonding/team and a slave port is down, we don't the bond IP
+ * based gids in the table since flows that select port by gid may get
+ * the down port.
+ */
+ if (curr_master && (port_state == IB_PORT_DOWN)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ }
+ /* if bonding is used it is possible that we add it to masters
+ * only after IP address is assigned to the net bonding
+ * interface.
+ */
+ if (curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_master, ibdev, port);
}
- }
- if (dev == iboe->netdevs[0] ||
- (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
- handle_en_event(ibdev, 1, event);
- else if (dev == iboe->netdevs[1]
- || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
- handle_en_event(ibdev, 2, event);
+ if (!curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+ }
+ }
spin_unlock(&iboe->lock);
+}
+
+static int mlx4_ib_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mlx4_ib_dev *ibdev;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return NOTIFY_DONE;
+
+ ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
+ mlx4_ib_scan_netdevs(ibdev);
return NOTIFY_DONE;
}
@@ -1533,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
int i, j;
int err;
struct mlx4_ib_iboe *iboe;
+ int ib_num_ports = 0;
pr_info_once("%s", mlx4_ib_version);
@@ -1682,6 +2034,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
if (check_flow_steering_support(dev)) {
+ ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
@@ -1707,11 +2060,42 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->counters[i] = -1;
}
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_num_ports++;
+
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
+ if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ ib_num_ports) {
+ ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
+ err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
+ MLX4_IB_UC_STEER_QPN_ALIGN,
+ &ibdev->steer_qpn_base);
+ if (err)
+ goto err_counter;
+
+ ibdev->ib_uc_qpns_bitmap =
+ kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
+ sizeof(long),
+ GFP_KERNEL);
+ if (!ibdev->ib_uc_qpns_bitmap) {
+ dev_err(&dev->pdev->dev, "bit map alloc failed\n");
+ goto err_steer_qp_release;
+ }
+
+ bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
+
+ err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
+ dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_base +
+ ibdev->steer_qpn_count - 1);
+ if (err)
+ goto err_steer_free_bitmap;
+ }
+
if (ib_register_device(&ibdev->ib_dev, NULL))
- goto err_counter;
+ goto err_steer_free_bitmap;
if (mlx4_ib_mad_init(ibdev))
goto err_reg;
@@ -1719,11 +2103,39 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_ib_init_sriov(ibdev))
goto err_mad;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
- iboe->nb.notifier_call = mlx4_ib_netdev_event;
- err = register_netdevice_notifier(&iboe->nb);
- if (err)
- goto err_sriov;
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
+ if (!iboe->nb.notifier_call) {
+ iboe->nb.notifier_call = mlx4_ib_netdev_event;
+ err = register_netdevice_notifier(&iboe->nb);
+ if (err) {
+ iboe->nb.notifier_call = NULL;
+ goto err_notif;
+ }
+ }
+ if (!iboe->nb_inet.notifier_call) {
+ iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
+ err = register_inetaddr_notifier(&iboe->nb_inet);
+ if (err) {
+ iboe->nb_inet.notifier_call = NULL;
+ goto err_notif;
+ }
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!iboe->nb_inet6.notifier_call) {
+ iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
+ err = register_inet6addr_notifier(&iboe->nb_inet6);
+ if (err) {
+ iboe->nb_inet6.notifier_call = NULL;
+ goto err_notif;
+ }
+ }
+#endif
+ for (i = 1 ; i <= ibdev->num_ports ; ++i)
+ reset_gid_table(ibdev, i);
+ rtnl_lock();
+ mlx4_ib_scan_netdevs(ibdev);
+ rtnl_unlock();
+ mlx4_ib_init_gid_table(ibdev);
}
for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -1749,11 +2161,25 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
return ibdev;
err_notif:
- if (unregister_netdevice_notifier(&ibdev->iboe.nb))
- pr_warn("failure unregistering notifier\n");
+ if (ibdev->iboe.nb.notifier_call) {
+ if (unregister_netdevice_notifier(&ibdev->iboe.nb))
+ pr_warn("failure unregistering notifier\n");
+ ibdev->iboe.nb.notifier_call = NULL;
+ }
+ if (ibdev->iboe.nb_inet.notifier_call) {
+ if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
+ pr_warn("failure unregistering notifier\n");
+ ibdev->iboe.nb_inet.notifier_call = NULL;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ibdev->iboe.nb_inet6.notifier_call) {
+ if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
+ pr_warn("failure unregistering notifier\n");
+ ibdev->iboe.nb_inet6.notifier_call = NULL;
+ }
+#endif
flush_workqueue(wq);
-err_sriov:
mlx4_ib_close_sriov(ibdev);
err_mad:
@@ -1762,6 +2188,13 @@ err_mad:
err_reg:
ib_unregister_device(&ibdev->ib_dev);
+err_steer_free_bitmap:
+ kfree(ibdev->ib_uc_qpns_bitmap);
+
+err_steer_qp_release:
+ if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
err_counter:
for (; i; --i)
if (ibdev->counters[i - 1] != -1)
@@ -1782,6 +2215,69 @@ err_dealloc:
return NULL;
}
+int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
+{
+ int offset;
+
+ WARN_ON(!dev->ib_uc_qpns_bitmap);
+
+ offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
+ dev->steer_qpn_count,
+ get_count_order(count));
+ if (offset < 0)
+ return offset;
+
+ *qpn = dev->steer_qpn_base + offset;
+ return 0;
+}
+
+void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
+{
+ if (!qpn ||
+ dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return;
+
+ BUG_ON(qpn < dev->steer_qpn_base);
+
+ bitmap_release_region(dev->ib_uc_qpns_bitmap,
+ qpn - dev->steer_qpn_base,
+ get_count_order(count));
+}
+
+int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
+ int is_attach)
+{
+ int err;
+ size_t flow_size;
+ struct ib_flow_attr *flow = NULL;
+ struct ib_flow_spec_ib *ib_spec;
+
+ if (is_attach) {
+ flow_size = sizeof(struct ib_flow_attr) +
+ sizeof(struct ib_flow_spec_ib);
+ flow = kzalloc(flow_size, GFP_KERNEL);
+ if (!flow)
+ return -ENOMEM;
+ flow->port = mqp->port;
+ flow->num_of_specs = 1;
+ flow->size = flow_size;
+ ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
+ ib_spec->type = IB_FLOW_SPEC_IB;
+ ib_spec->size = sizeof(struct ib_flow_spec_ib);
+ /* Add an empty rule for IB L2 */
+ memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
+
+ err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
+ IB_FLOW_DOMAIN_NIC,
+ MLX4_FS_REGULAR,
+ &mqp->reg_id);
+ } else {
+ err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
+ }
+ kfree(flow);
+ return err;
+}
+
static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
{
struct mlx4_ib_dev *ibdev = ibdev_ptr;
@@ -1795,6 +2291,26 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb.notifier_call = NULL;
}
+
+ if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
+ kfree(ibdev->ib_uc_qpns_bitmap);
+ }
+
+ if (ibdev->iboe.nb_inet.notifier_call) {
+ if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
+ pr_warn("failure unregistering notifier\n");
+ ibdev->iboe.nb_inet.notifier_call = NULL;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ibdev->iboe.nb_inet6.notifier_call) {
+ if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
+ pr_warn("failure unregistering notifier\n");
+ ibdev->iboe.nb_inet6.notifier_call = NULL;
+ }
+#endif
+
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
if (ibdev->counters[p] != -1)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 036b663dd26e..a230683af940 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -68,6 +68,8 @@ enum {
/*module param to indicate if SM assigns the alias_GUID*/
extern int mlx4_ib_sm_guid_assign;
+#define MLX4_IB_UC_STEER_QPN_ALIGN 1
+#define MLX4_IB_UC_MAX_NUM_QPS 256
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
@@ -153,6 +155,7 @@ struct mlx4_ib_wq {
enum mlx4_ib_qp_flags {
MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
+ MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
MLX4_IB_SRIOV_SQP = 1 << 31,
};
@@ -270,6 +273,7 @@ struct mlx4_ib_qp {
struct list_head gid_list;
struct list_head steering_rules;
struct mlx4_ib_buf *sqp_proxy_rcv;
+ u64 reg_id;
};
@@ -428,7 +432,10 @@ struct mlx4_ib_sriov {
struct mlx4_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[MLX4_MAX_PORTS];
+ struct net_device *masters[MLX4_MAX_PORTS];
struct notifier_block nb;
+ struct notifier_block nb_inet;
+ struct notifier_block nb_inet6;
union ib_gid gid_table[MLX4_MAX_PORTS][128];
};
@@ -494,6 +501,10 @@ struct mlx4_ib_dev {
struct kobject *dev_ports_parent[MLX4_MFUNC_MAX];
struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS];
struct pkey_mgt pkeys;
+ unsigned long *ib_uc_qpns_bitmap;
+ int steer_qpn_count;
+ int steer_qpn_base;
+ int steering_support;
};
struct ib_event_work {
@@ -675,9 +686,6 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid, int netw_view);
-int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
- u8 *mac, int *is_mcast, u8 port);
-
static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
{
u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
@@ -752,5 +760,9 @@ void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
__be64 mlx4_ib_gen_node_guid(void);
+int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
+void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
+int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
+ int is_attach);
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 4f10af2905b5..d8f4d1fe8494 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -90,6 +90,21 @@ enum {
MLX4_RAW_QP_MSGMAX = 31,
};
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
+static inline u64 mlx4_mac_to_u64(u8 *addr)
+{
+ u64 mac = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac <<= 8;
+ mac |= addr[i];
+ }
+ return mac;
+}
+
static const __be32 mlx4_ib_opcode[] = {
[IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
[IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
@@ -716,6 +731,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
qp->flags |= MLX4_IB_QP_LSO;
+ if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
+ if (dev->steering_support ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ qp->flags |= MLX4_IB_QP_NETIF;
+ else
+ goto err;
+ }
+
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
if (err)
goto err;
@@ -765,7 +788,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (init_attr->qp_type == IB_QPT_RAW_PACKET)
err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
else
- err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
+ else
+ err = mlx4_qp_reserve_range(dev->dev, 1, 1,
+ &qpn);
if (err)
goto err_proxy;
}
@@ -790,8 +817,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
return 0;
err_qpn:
- if (!sqpn)
- mlx4_qp_release_range(dev->dev, qpn, 1);
+ if (!sqpn) {
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ mlx4_ib_steer_qp_free(dev, qpn, 1);
+ else
+ mlx4_qp_release_range(dev->dev, qpn, 1);
+ }
err_proxy:
if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
free_proxy_bufs(pd->device, qp);
@@ -932,8 +963,12 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
mlx4_qp_free(dev->dev, &qp->mqp);
- if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp))
- mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
+ if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
+ else
+ mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
+ }
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
@@ -987,9 +1022,16 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
*/
if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
- MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP))
+ MLX4_IB_SRIOV_TUNNEL_QP |
+ MLX4_IB_SRIOV_SQP |
+ MLX4_IB_QP_NETIF))
return ERR_PTR(-EINVAL);
+ if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
+ if (init_attr->qp_type != IB_QPT_UD)
+ return ERR_PTR(-EINVAL);
+ }
+
if (init_attr->create_flags &&
(udata ||
((init_attr->create_flags & ~MLX4_IB_SRIOV_SQP) &&
@@ -1144,16 +1186,15 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
}
-static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
- struct mlx4_qp_path *path, u8 port)
+static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
+ u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
+ u8 port)
{
- int err;
int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET;
- u8 mac[6];
- int is_mcast;
- u16 vlan_tag;
int vidx;
+ int smac_index;
+
path->grh_mylmc = ah->src_path_bits & 0x7f;
path->rlid = cpu_to_be16(ah->dlid);
@@ -1188,22 +1229,27 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
if (!(ah->ah_flags & IB_AH_GRH))
return -1;
- err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port);
- if (err)
- return err;
-
- memcpy(path->dmac, mac, 6);
+ memcpy(path->dmac, ah->dmac, ETH_ALEN);
path->ackto = MLX4_IB_LINK_TYPE_ETH;
- /* use index 0 into MAC table for IBoE */
- path->grh_mylmc &= 0x80;
+ /* find the index into MAC table for IBoE */
+ if (!is_zero_ether_addr((const u8 *)&smac)) {
+ if (mlx4_find_cached_mac(dev->dev, port, smac,
+ &smac_index))
+ return -ENOENT;
+ } else {
+ smac_index = 0;
+ }
+
+ path->grh_mylmc &= 0x80 | smac_index;
- vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]);
+ path->feup |= MLX4_FEUP_FORCE_ETH_UP;
if (vlan_tag < 0x1000) {
if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
return -ENOENT;
path->vlan_index = vidx;
path->fl = 1 << 6;
+ path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
}
} else
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
@@ -1212,6 +1258,28 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
return 0;
}
+static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
+ enum ib_qp_attr_mask qp_attr_mask,
+ struct mlx4_qp_path *path, u8 port)
+{
+ return _mlx4_set_path(dev, &qp->ah_attr,
+ mlx4_mac_to_u64((u8 *)qp->smac),
+ (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
+ path, port);
+}
+
+static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
+ const struct ib_qp_attr *qp,
+ enum ib_qp_attr_mask qp_attr_mask,
+ struct mlx4_qp_path *path, u8 port)
+{
+ return _mlx4_set_path(dev, &qp->alt_ah_attr,
+ mlx4_mac_to_u64((u8 *)qp->alt_smac),
+ (qp_attr_mask & IB_QP_ALT_VID) ?
+ qp->alt_vlan_id : 0xffff,
+ path, port);
+}
+
static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{
struct mlx4_ib_gid_entry *ge, *tmp;
@@ -1235,6 +1303,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
struct mlx4_qp_context *context;
enum mlx4_qp_optpar optpar = 0;
int sqd_event;
+ int steer_qp = 0;
int err = -EINVAL;
context = kzalloc(sizeof *context, GFP_KERNEL);
@@ -1319,6 +1388,11 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
} else
context->pri_path.counter_index = 0xff;
+
+ if (qp->flags & MLX4_IB_QP_NETIF) {
+ mlx4_ib_steer_qp_reg(dev, qp, 1);
+ steer_qp = 1;
+ }
}
if (attr_mask & IB_QP_PKEY_INDEX) {
@@ -1329,7 +1403,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_AV) {
- if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path,
+ if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path,
attr_mask & IB_QP_PORT ?
attr->port_num : qp->port))
goto out;
@@ -1352,8 +1426,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
dev->dev->caps.pkey_table_len[attr->alt_port_num])
goto out;
- if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
- attr->alt_port_num))
+ if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path,
+ attr->alt_port_num))
goto out;
context->alt_path.pkey_index = attr->alt_pkey_index;
@@ -1464,6 +1538,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH;
+ if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
+ int is_eth = rdma_port_get_link_layer(
+ &dev->ib_dev, qp->port) ==
+ IB_LINK_LAYER_ETHERNET;
+ if (is_eth) {
+ context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
+ optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
+ }
+ }
+
+
if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
sqd_event = 1;
@@ -1547,9 +1632,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
qp->sq_next_wqe = 0;
if (qp->rq.wqe_cnt)
*qp->db.db = 0;
+
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ mlx4_ib_steer_qp_reg(dev, qp, 0);
}
out:
+ if (err && steer_qp)
+ mlx4_ib_steer_qp_reg(dev, qp, 0);
kfree(context);
return err;
}
@@ -1561,13 +1651,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct mlx4_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
-
+ int ll;
mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
+ if (cur_state == new_state && cur_state == IB_QPS_RESET) {
+ ll = IB_LINK_LAYER_UNSPECIFIED;
+ } else {
+ int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ ll = rdma_port_get_link_layer(&dev->ib_dev, port);
+ }
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask, ll)) {
pr_debug("qpn 0x%x: invalid attribute mask specified "
"for transition %d to %d. qp_type %d,"
" attr_mask 0x%x\n",
@@ -1784,8 +1882,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
return err;
}
- vlan = rdma_get_vlan_id(&sgid);
- is_vlan = vlan < 0x1000;
+ if (ah->av.eth.vlan != 0xffff) {
+ vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
+ is_vlan = 1;
+ }
}
ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
@@ -2762,6 +2862,9 @@ done:
if (qp->flags & MLX4_IB_QP_LSO)
qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP;
+
qp_init_attr->sq_sig_type =
qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 97516eb363b7..db2ea31df832 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -582,8 +582,10 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
p->pkey_group.attrs =
alloc_group_attrs(show_port_pkey, store_port_pkey,
dev->dev->caps.pkey_table_len[port_num]);
- if (!p->pkey_group.attrs)
+ if (!p->pkey_group.attrs) {
+ ret = -ENOMEM;
goto err_alloc;
+ }
ret = sysfs_create_group(&p->kobj, &p->pkey_group);
if (ret)
@@ -591,8 +593,10 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
p->gid_group.name = "gid_idx";
p->gid_group.attrs = alloc_group_attrs(show_port_gid_idx, NULL, 1);
- if (!p->gid_group.attrs)
+ if (!p->gid_group.attrs) {
+ ret = -ENOMEM;
goto err_free_pkey;
+ }
ret = sysfs_create_group(&p->kobj, &p->gid_group);
if (ret)
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index 8e6aebfaf8a4..10df386c6344 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,6 +1,6 @@
config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support"
- depends on NETDEVICES && ETHERNET && PCI && X86
+ depends on NETDEVICES && ETHERNET && PCI
select NET_VENDOR_MELLANOX
select MLX5_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index b72627429745..b1705ce6eb88 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -73,14 +73,24 @@ static void *get_cqe(struct mlx5_ib_cq *cq, int n)
return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
}
+static u8 sw_ownership_bit(int n, int nent)
+{
+ return (n & nent) ? 1 : 0;
+}
+
static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
{
void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
struct mlx5_cqe64 *cqe64;
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
- return ((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^
- !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
+
+ if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
+ !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
+ return cqe;
+ } else {
+ return NULL;
+ }
}
static void *next_cqe_sw(struct mlx5_ib_cq *cq)
@@ -351,6 +361,11 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
qp->sq.last_poll = tail;
}
+static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
+{
+ mlx5_buf_free(&dev->mdev, &buf->buf);
+}
+
static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_ib_qp **cur_qp,
struct ib_wc *wc)
@@ -366,6 +381,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
void *cqe;
int idx;
+repoll:
cqe = next_cqe_sw(cq);
if (!cqe)
return -EAGAIN;
@@ -379,7 +395,18 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
*/
rmb();
- /* TBD: resize CQ */
+ opcode = cqe64->op_own >> 4;
+ if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
+ if (likely(cq->resize_buf)) {
+ free_cq_buf(dev, &cq->buf);
+ cq->buf = *cq->resize_buf;
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ goto repoll;
+ } else {
+ mlx5_ib_warn(dev, "unexpected resize cqe\n");
+ }
+ }
qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
@@ -398,7 +425,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
}
wc->qp = &(*cur_qp)->ibqp;
- opcode = cqe64->op_own >> 4;
switch (opcode) {
case MLX5_CQE_REQ:
wq = &(*cur_qp)->sq;
@@ -503,15 +529,11 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
return err;
buf->cqe_size = cqe_size;
+ buf->nent = nent;
return 0;
}
-static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
-{
- mlx5_buf_free(&dev->mdev, &buf->buf);
-}
-
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
int entries, struct mlx5_create_cq_mbox_in **cqb,
@@ -576,16 +598,16 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
ib_umem_release(cq->buf.umem);
}
-static void init_cq_buf(struct mlx5_ib_cq *cq, int nent)
+static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
{
int i;
void *cqe;
struct mlx5_cqe64 *cqe64;
- for (i = 0; i < nent; i++) {
- cqe = get_cqe(cq, i);
- cqe64 = (cq->buf.cqe_size == 64) ? cqe : cqe + 64;
- cqe64->op_own = 0xf1;
+ for (i = 0; i < buf->nent; i++) {
+ cqe = get_cqe_from_buf(buf, i, buf->cqe_size);
+ cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
+ cqe64->op_own = MLX5_CQE_INVALID << 4;
}
}
@@ -610,7 +632,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (err)
goto err_db;
- init_cq_buf(cq, entries);
+ init_cq_buf(cq, &cq->buf);
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
*cqb = mlx5_vzalloc(*inlen);
@@ -818,12 +840,266 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
- return -ENOSYS;
+ struct mlx5_modify_cq_mbox_in *in;
+ struct mlx5_ib_dev *dev = to_mdev(cq->device);
+ struct mlx5_ib_cq *mcq = to_mcq(cq);
+ int err;
+ u32 fsel;
+
+ if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
+ return -ENOSYS;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->cqn = cpu_to_be32(mcq->mcq.cqn);
+ fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
+ in->ctx.cq_period = cpu_to_be16(cq_period);
+ in->ctx.cq_max_count = cpu_to_be16(cq_count);
+ in->field_select = cpu_to_be32(fsel);
+ err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
+ kfree(in);
+
+ if (err)
+ mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
+
+ return err;
+}
+
+static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ int entries, struct ib_udata *udata, int *npas,
+ int *page_shift, int *cqe_size)
+{
+ struct mlx5_ib_resize_cq ucmd;
+ struct ib_umem *umem;
+ int err;
+ int npages;
+ struct ib_ucontext *context = cq->buf.umem->context;
+
+ err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
+ if (err)
+ return err;
+
+ if (ucmd.reserved0 || ucmd.reserved1)
+ return -EINVAL;
+
+ umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(umem)) {
+ err = PTR_ERR(umem);
+ return err;
+ }
+
+ mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
+ npas, NULL);
+
+ cq->resize_umem = umem;
+ *cqe_size = ucmd.cqe_size;
+
+ return 0;
+}
+
+static void un_resize_user(struct mlx5_ib_cq *cq)
+{
+ ib_umem_release(cq->resize_umem);
+}
+
+static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ int entries, int cqe_size)
+{
+ int err;
+
+ cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
+ if (!cq->resize_buf)
+ return -ENOMEM;
+
+ err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
+ if (err)
+ goto ex;
+
+ init_cq_buf(cq, cq->resize_buf);
+
+ return 0;
+
+ex:
+ kfree(cq->resize_buf);
+ return err;
+}
+
+static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
+{
+ free_cq_buf(dev, cq->resize_buf);
+ cq->resize_buf = NULL;
+}
+
+static int copy_resize_cqes(struct mlx5_ib_cq *cq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_cqe64 *scqe64;
+ struct mlx5_cqe64 *dcqe64;
+ void *start_cqe;
+ void *scqe;
+ void *dcqe;
+ int ssize;
+ int dsize;
+ int i;
+ u8 sw_own;
+
+ ssize = cq->buf.cqe_size;
+ dsize = cq->resize_buf->cqe_size;
+ if (ssize != dsize) {
+ mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
+ return -EINVAL;
+ }
+
+ i = cq->mcq.cons_index;
+ scqe = get_sw_cqe(cq, i);
+ scqe64 = ssize == 64 ? scqe : scqe + 64;
+ start_cqe = scqe;
+ if (!scqe) {
+ mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
+ return -EINVAL;
+ }
+
+ while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
+ dcqe = get_cqe_from_buf(cq->resize_buf,
+ (i + 1) & (cq->resize_buf->nent),
+ dsize);
+ dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
+ sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
+ memcpy(dcqe, scqe, dsize);
+ dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
+
+ ++i;
+ scqe = get_sw_cqe(cq, i);
+ scqe64 = ssize == 64 ? scqe : scqe + 64;
+ if (!scqe) {
+ mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
+ return -EINVAL;
+ }
+
+ if (scqe == start_cqe) {
+ pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
+ cq->mcq.cqn);
+ return -ENOMEM;
+ }
+ }
+ ++cq->mcq.cons_index;
+ return 0;
}
int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
- return -ENOSYS;
+ struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ struct mlx5_modify_cq_mbox_in *in;
+ int err;
+ int npas;
+ int page_shift;
+ int inlen;
+ int uninitialized_var(cqe_size);
+ unsigned long flags;
+
+ if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
+ pr_info("Firmware does not support resize CQ\n");
+ return -ENOSYS;
+ }
+
+ if (entries < 1)
+ return -EINVAL;
+
+ entries = roundup_pow_of_two(entries + 1);
+ if (entries > dev->mdev.caps.max_cqes + 1)
+ return -EINVAL;
+
+ if (entries == ibcq->cqe + 1)
+ return 0;
+
+ mutex_lock(&cq->resize_mutex);
+ if (udata) {
+ err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
+ &cqe_size);
+ } else {
+ cqe_size = 64;
+ err = resize_kernel(dev, cq, entries, cqe_size);
+ if (!err) {
+ npas = cq->resize_buf->buf.npages;
+ page_shift = cq->resize_buf->buf.page_shift;
+ }
+ }
+
+ if (err)
+ goto ex;
+
+ inlen = sizeof(*in) + npas * sizeof(in->pas[0]);
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ err = -ENOMEM;
+ goto ex_resize;
+ }
+
+ if (udata)
+ mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
+ in->pas, 0);
+ else
+ mlx5_fill_page_array(&cq->resize_buf->buf, in->pas);
+
+ in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE |
+ MLX5_MODIFY_CQ_MASK_PG_OFFSET |
+ MLX5_MODIFY_CQ_MASK_PG_SIZE);
+ in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
+ in->ctx.page_offset = 0;
+ in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24);
+ in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
+ in->cqn = cpu_to_be32(cq->mcq.cqn);
+
+ err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
+ if (err)
+ goto ex_alloc;
+
+ if (udata) {
+ cq->ibcq.cqe = entries - 1;
+ ib_umem_release(cq->buf.umem);
+ cq->buf.umem = cq->resize_umem;
+ cq->resize_umem = NULL;
+ } else {
+ struct mlx5_ib_cq_buf tbuf;
+ int resized = 0;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ if (cq->resize_buf) {
+ err = copy_resize_cqes(cq);
+ if (!err) {
+ tbuf = cq->buf;
+ cq->buf = *cq->resize_buf;
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ resized = 1;
+ }
+ }
+ cq->ibcq.cqe = entries - 1;
+ spin_unlock_irqrestore(&cq->lock, flags);
+ if (resized)
+ free_cq_buf(dev, &tbuf);
+ }
+ mutex_unlock(&cq->resize_mutex);
+
+ mlx5_vfree(in);
+ return 0;
+
+ex_alloc:
+ mlx5_vfree(in);
+
+ex_resize:
+ if (udata)
+ un_resize_user(cq);
+ else
+ un_resize_kernel(dev, cq);
+ex:
+ mutex_unlock(&cq->resize_mutex);
+ return err;
}
int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 306534109627..bf900579ac08 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -46,8 +46,8 @@
#include "mlx5_ib.h"
#define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "1.0"
-#define DRIVER_RELDATE "June 2013"
+#define DRIVER_VERSION "2.2-1"
+#define DRIVER_RELDATE "Feb 2014"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+ IB_DEVICE_RC_RNR_NAK_GEN;
flags = dev->mdev.caps.flags;
if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
@@ -536,34 +535,51 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_alloc_ucontext_req req;
+ struct mlx5_ib_alloc_ucontext_req_v2 req;
struct mlx5_ib_alloc_ucontext_resp resp;
struct mlx5_ib_ucontext *context;
struct mlx5_uuar_info *uuari;
struct mlx5_uar *uars;
+ int gross_uuars;
int num_uars;
+ int ver;
int uuarn;
int err;
int i;
+ int reqlen;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
- err = ib_copy_from_udata(&req, udata, sizeof(req));
+ memset(&req, 0, sizeof(req));
+ reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
+ if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
+ ver = 0;
+ else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+ ver = 2;
+ else
+ return ERR_PTR(-EINVAL);
+
+ err = ib_copy_from_udata(&req, udata, reqlen);
if (err)
return ERR_PTR(err);
+ if (req.flags || req.reserved)
+ return ERR_PTR(-EINVAL);
+
if (req.total_num_uuars > MLX5_MAX_UUARS)
return ERR_PTR(-ENOMEM);
if (req.total_num_uuars == 0)
return ERR_PTR(-EINVAL);
- req.total_num_uuars = ALIGN(req.total_num_uuars, MLX5_BF_REGS_PER_PAGE);
+ req.total_num_uuars = ALIGN(req.total_num_uuars,
+ MLX5_NON_FP_BF_REGS_PER_PAGE);
if (req.num_low_latency_uuars > req.total_num_uuars - 1)
return ERR_PTR(-EINVAL);
- num_uars = req.total_num_uuars / MLX5_BF_REGS_PER_PAGE;
+ num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
+ gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp;
resp.bf_reg_size = dev->mdev.caps.bf_reg_size;
resp.cache_line_size = L1_CACHE_BYTES;
@@ -585,7 +601,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
goto out_ctx;
}
- uuari->bitmap = kcalloc(BITS_TO_LONGS(req.total_num_uuars),
+ uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
sizeof(*uuari->bitmap),
GFP_KERNEL);
if (!uuari->bitmap) {
@@ -595,13 +611,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
/*
* clear all fast path uuars
*/
- for (i = 0; i < req.total_num_uuars; i++) {
+ for (i = 0; i < gross_uuars; i++) {
uuarn = i & 3;
if (uuarn == 2 || uuarn == 3)
set_bit(i, uuari->bitmap);
}
- uuari->count = kcalloc(req.total_num_uuars, sizeof(*uuari->count), GFP_KERNEL);
+ uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
if (!uuari->count) {
err = -ENOMEM;
goto out_bitmap;
@@ -623,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err)
goto out_uars;
+ uuari->ver = ver;
uuari->num_low_latency_uuars = req.num_low_latency_uuars;
uuari->uars = uars;
uuari->num_uars = num_uars;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 4c134d93d4fc..389e31965773 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -195,6 +195,7 @@ struct mlx5_ib_cq_buf {
struct mlx5_buf buf;
struct ib_umem *umem;
int cqe_size;
+ int nent;
};
enum mlx5_ib_qp_flags {
@@ -220,7 +221,7 @@ struct mlx5_ib_cq {
/* protect resize cq
*/
struct mutex resize_mutex;
- struct mlx5_ib_cq_resize *resize_buf;
+ struct mlx5_ib_cq_buf *resize_buf;
struct ib_umem *resize_umem;
int cqe_size;
};
@@ -264,7 +265,6 @@ struct mlx5_ib_mr {
enum ib_wc_status status;
struct mlx5_ib_dev *dev;
struct mlx5_create_mkey_mbox_out out;
- unsigned long start;
};
struct mlx5_ib_fast_reg_page_list {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 039c3e40fcb4..7c95ca1f0c25 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -146,7 +146,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
spin_lock_irq(&ent->lock);
ent->pending++;
spin_unlock_irq(&ent->lock);
- mr->start = jiffies;
err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
sizeof(*in), reg_mr_callback,
mr, &mr->out);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7c6b4ba49bec..7dfe8a1c84cf 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg);
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg);
break;
case IB_QPT_UD:
@@ -340,14 +342,57 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
return 1;
}
+static int first_med_uuar(void)
+{
+ return 1;
+}
+
+static int next_uuar(int n)
+{
+ n++;
+
+ while (((n % 4) & 2))
+ n++;
+
+ return n;
+}
+
+static int num_med_uuar(struct mlx5_uuar_info *uuari)
+{
+ int n;
+
+ n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
+ uuari->num_low_latency_uuars - 1;
+
+ return n >= 0 ? n : 0;
+}
+
+static int max_uuari(struct mlx5_uuar_info *uuari)
+{
+ return uuari->num_uars * 4;
+}
+
+static int first_hi_uuar(struct mlx5_uuar_info *uuari)
+{
+ int med;
+ int i;
+ int t;
+
+ med = num_med_uuar(uuari);
+ for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
+ t++;
+ if (t == med)
+ return next_uuar(i);
+ }
+
+ return 0;
+}
+
static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
{
- int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
- int start_uuar;
int i;
- start_uuar = nuuars - uuari->num_low_latency_uuars;
- for (i = start_uuar; i < nuuars; i++) {
+ for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
if (!test_bit(i, uuari->bitmap)) {
set_bit(i, uuari->bitmap);
uuari->count[i]++;
@@ -360,19 +405,10 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
{
- int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
- int minidx = 1;
- int uuarn;
- int end;
+ int minidx = first_med_uuar();
int i;
- end = nuuars - uuari->num_low_latency_uuars;
-
- for (i = 1; i < end; i++) {
- uuarn = i & 3;
- if (uuarn == 2 || uuarn == 3)
- continue;
-
+ for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
if (uuari->count[i] < uuari->count[minidx])
minidx = i;
}
@@ -394,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari,
break;
case MLX5_IB_LATENCY_CLASS_MEDIUM:
- uuarn = alloc_med_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_med_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_HIGH:
- uuarn = alloc_high_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_high_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_FAST_PATH:
@@ -489,12 +531,12 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
{
struct mlx5_ib_ucontext *context;
struct mlx5_ib_create_qp ucmd;
- int page_shift;
+ int page_shift = 0;
int uar_index;
int npages;
- u32 offset;
+ u32 offset = 0;
int uuarn;
- int ncont;
+ int ncont = 0;
int err;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
@@ -510,11 +552,16 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
if (uuarn < 0) {
mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
- mlx5_ib_dbg(dev, "reverting to high latency\n");
- uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
+ mlx5_ib_dbg(dev, "reverting to medium latency\n");
+ uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
if (uuarn < 0) {
- mlx5_ib_dbg(dev, "uuar allocation failed\n");
- return uuarn;
+ mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
+ mlx5_ib_dbg(dev, "reverting to high latency\n");
+ uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
+ if (uuarn < 0) {
+ mlx5_ib_warn(dev, "uuar allocation failed\n");
+ return uuarn;
+ }
}
}
@@ -525,23 +572,29 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err_uuar;
- qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- qp->buf_size, 0, 0);
- if (IS_ERR(qp->umem)) {
- mlx5_ib_dbg(dev, "umem_get failed\n");
- err = PTR_ERR(qp->umem);
- goto err_uuar;
+ if (ucmd.buf_addr && qp->buf_size) {
+ qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
+ qp->buf_size, 0, 0);
+ if (IS_ERR(qp->umem)) {
+ mlx5_ib_dbg(dev, "umem_get failed\n");
+ err = PTR_ERR(qp->umem);
+ goto err_uuar;
+ }
+ } else {
+ qp->umem = NULL;
}
- mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
- &ncont, NULL);
- err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
- if (err) {
- mlx5_ib_warn(dev, "bad offset\n");
- goto err_umem;
+ if (qp->umem) {
+ mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
+ &ncont, NULL);
+ err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
+ if (err) {
+ mlx5_ib_warn(dev, "bad offset\n");
+ goto err_umem;
+ }
+ mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
+ ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
}
- mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
- ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
*in = mlx5_vzalloc(*inlen);
@@ -549,7 +602,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = -ENOMEM;
goto err_umem;
}
- mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
+ if (qp->umem)
+ mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
(*in)->ctx.log_pg_sz_remote_qpn =
cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
(*in)->ctx.params2 = cpu_to_be32(offset << 6);
@@ -580,7 +634,8 @@ err_free:
mlx5_vfree(*in);
err_umem:
- ib_umem_release(qp->umem);
+ if (qp->umem)
+ ib_umem_release(qp->umem);
err_uuar:
free_uuar(&context->uuari, uuarn);
@@ -593,7 +648,8 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &qp->db);
- ib_umem_release(qp->umem);
+ if (qp->umem)
+ ib_umem_release(qp->umem);
free_uuar(&context->uuari, qp->uuarn);
}
@@ -609,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err;
uuari = &dev->mdev.priv.uuari;
- if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
- qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+ if (init_attr->create_flags)
+ return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
@@ -1616,7 +1672,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
- !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
+ !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_UNSPECIFIED))
goto out;
if ((attr_mask & IB_QP_PORT) &&
@@ -2212,6 +2269,10 @@ out:
qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell */
+ wmb();
+
if (bf->need_lock)
spin_lock(&bf->lock);
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index a886de3e593c..0f4f8e42a17f 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req {
__u32 num_low_latency_uuars;
};
+struct mlx5_ib_alloc_ucontext_req_v2 {
+ __u32 total_num_uuars;
+ __u32 num_low_latency_uuars;
+ __u32 flags;
+ __u32 reserved;
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
@@ -93,6 +100,9 @@ struct mlx5_ib_create_cq_resp {
struct mlx5_ib_resize_cq {
__u64 buf_addr;
+ __u16 cqe_size;
+ __u16 reserved0;
+ __u32 reserved1;
};
struct mlx5_ib_create_srq {
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 26a684536109..e354b2f04ad9 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -860,7 +860,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_UNSPECIFIED)) {
mthca_dbg(dev, "Bad QP transition (transport %d) "
"%d->%d with attr 0x%08x\n",
qp->transport, cur_state, new_state,
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 429141078eec..353c7b05a90a 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
/* Initialize network devices */
- if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
+ netdev = nes_netdev_init(nesdev, mmio_regs);
+ if (netdev == NULL) {
+ ret = -ENOMEM;
goto bail7;
+ }
/* Register network device */
ret = register_netdev(netdev);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6b29249aa85a..9c9f2f57e960 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1354,8 +1354,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
neigh->ha, ntohl(rt->rt_gateway));
if (arpindex >= 0) {
- if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
- neigh->ha, ETH_ALEN)) {
+ if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) {
/* Mac address same as in nes_arp_table */
goto out;
}
diff --git a/drivers/infiniband/hw/ocrdma/Kconfig b/drivers/infiniband/hw/ocrdma/Kconfig
index b5b6056c8518..c0cddc0192d1 100644
--- a/drivers/infiniband/hw/ocrdma/Kconfig
+++ b/drivers/infiniband/hw/ocrdma/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_OCRDMA
tristate "Emulex One Connect HCA support"
- depends on ETHERNET && NETDEVICES && PCI && (IPV6 || IPV6=n)
+ depends on ETHERNET && NETDEVICES && PCI && INET && (IPV6 || IPV6=n)
select NET_VENDOR_EMULEX
select BE2NET
---help---
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 294dd27b601e..7c001b97b23f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -423,5 +423,17 @@ static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
OCRDMA_CQE_WRITE_IMM) ? 1 : 0;
}
+static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
+ struct ib_ah_attr *ah_attr, u8 *mac_addr)
+{
+ struct in6_addr in6;
+
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ if (rdma_is_multicast_addr(&in6))
+ rdma_get_mcast_mac(&in6, mac_addr);
+ else
+ memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
+ return 0;
+}
#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index ee499d942257..34071143006e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -49,7 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
ah->sgid_index = attr->grh.sgid_index;
- vlan_tag = rdma_get_vlan_id(&attr->grh.dgid);
+ vlan_tag = attr->vlan_id;
if (!vlan_tag || (vlan_tag > 0xFFF))
vlan_tag = dev->pvid;
if (vlan_tag && (vlan_tag < 0x1000)) {
@@ -64,7 +64,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
eth_sz = sizeof(struct ocrdma_eth_basic);
}
memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
- status = ocrdma_resolve_dgid(dev, &attr->grh.dgid, &eth.dmac[0]);
+ memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
+ status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
if (status)
return status;
status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,
@@ -84,6 +85,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
if (vlan_enabled)
ah->av->valid |= OCRDMA_AV_VLAN_VALID;
+ ah->av->valid = cpu_to_le32(ah->av->valid);
return status;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 56bf32fcb62c..1664d648cbfc 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -2076,23 +2076,6 @@ mbx_err:
return status;
}
-int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
- u8 *mac_addr)
-{
- struct in6_addr in6;
-
- memcpy(&in6, dgid, sizeof in6);
- if (rdma_is_multicast_addr(&in6)) {
- rdma_get_mcast_mac(&in6, mac_addr);
- } else if (rdma_link_local_addr(&in6)) {
- rdma_get_ll_mac(&in6, mac_addr);
- } else {
- pr_err("%s() fail to resolve mac_addr.\n", __func__);
- return -EINVAL;
- }
- return 0;
-}
-
static int ocrdma_set_av_params(struct ocrdma_qp *qp,
struct ocrdma_modify_qp *cmd,
struct ib_qp_attr *attrs)
@@ -2126,14 +2109,14 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
qp->sgid_idx = ah_attr->grh.sgid_index;
memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
- ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
+ ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
(mac_addr[2] << 16) | (mac_addr[3] << 24);
/* convert them to LE format. */
ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
- vlan_id = rdma_get_vlan_id(&sgid);
+ vlan_id = ah_attr->vlan_id;
if (vlan_id && (vlan_id < 0x1000)) {
cmd->params.vlan_dmac_b4_to_b5 |=
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index f2a89d4cc7c4..82fe332ae6c6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -94,7 +94,6 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config);
-int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr);
int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 91443bcb9e0e..1a8a945efa60 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -67,46 +67,24 @@ void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
guid[7] = mac_addr[5];
}
-static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
- bool is_vlan, u16 vlan_id)
-{
- sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- sgid->raw[8] = mac_addr[0] ^ 2;
- sgid->raw[9] = mac_addr[1];
- sgid->raw[10] = mac_addr[2];
- if (is_vlan) {
- sgid->raw[11] = vlan_id >> 8;
- sgid->raw[12] = vlan_id & 0xff;
- } else {
- sgid->raw[11] = 0xff;
- sgid->raw[12] = 0xfe;
- }
- sgid->raw[13] = mac_addr[3];
- sgid->raw[14] = mac_addr[4];
- sgid->raw[15] = mac_addr[5];
-}
-
-static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
- bool is_vlan, u16 vlan_id)
+static bool ocrdma_add_sgid(struct ocrdma_dev *dev, union ib_gid *new_sgid)
{
int i;
- union ib_gid new_sgid;
unsigned long flags;
memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
- ocrdma_build_sgid_mac(&new_sgid, mac_addr, is_vlan, vlan_id);
spin_lock_irqsave(&dev->sgid_lock, flags);
for (i = 0; i < OCRDMA_MAX_SGID; i++) {
if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
sizeof(union ib_gid))) {
/* found free entry */
- memcpy(&dev->sgid_tbl[i], &new_sgid,
+ memcpy(&dev->sgid_tbl[i], new_sgid,
sizeof(union ib_gid));
spin_unlock_irqrestore(&dev->sgid_lock, flags);
return true;
- } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
+ } else if (!memcmp(&dev->sgid_tbl[i], new_sgid,
sizeof(union ib_gid))) {
/* entry already present, no addition is required. */
spin_unlock_irqrestore(&dev->sgid_lock, flags);
@@ -117,20 +95,17 @@ static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
return false;
}
-static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
- bool is_vlan, u16 vlan_id)
+static bool ocrdma_del_sgid(struct ocrdma_dev *dev, union ib_gid *sgid)
{
int found = false;
int i;
- union ib_gid sgid;
unsigned long flags;
- ocrdma_build_sgid_mac(&sgid, mac_addr, is_vlan, vlan_id);
spin_lock_irqsave(&dev->sgid_lock, flags);
/* first is default sgid, which cannot be deleted. */
for (i = 1; i < OCRDMA_MAX_SGID; i++) {
- if (!memcmp(&dev->sgid_tbl[i], &sgid, sizeof(union ib_gid))) {
+ if (!memcmp(&dev->sgid_tbl[i], sgid, sizeof(union ib_gid))) {
/* found matching entry */
memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid));
found = true;
@@ -141,75 +116,18 @@ static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
return found;
}
-static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
-{
- /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
- union ib_gid *sgid = &dev->sgid_tbl[0];
-
- sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- ocrdma_get_guid(dev, &sgid->raw[8]);
-}
-
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
-static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
-{
- struct net_device *netdev, *tmp;
- u16 vlan_id;
- bool is_vlan;
-
- netdev = dev->nic_info.netdev;
-
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, tmp) {
- if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
- if (!netif_running(tmp) || !netif_oper_up(tmp))
- continue;
- if (netdev != tmp) {
- vlan_id = vlan_dev_vlan_id(tmp);
- is_vlan = true;
- } else {
- is_vlan = false;
- vlan_id = 0;
- tmp = netdev;
- }
- ocrdma_add_sgid(dev, tmp->dev_addr, is_vlan, vlan_id);
- }
- }
- rcu_read_unlock();
-}
-#else
-static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
-{
-
-}
-#endif /* VLAN */
-
-static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
+ union ib_gid *gid)
{
- ocrdma_add_default_sgid(dev);
- ocrdma_add_vlan_sgids(dev);
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-
-static int ocrdma_inet6addr_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
-{
- struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
- struct net_device *netdev = ifa->idev->dev;
struct ib_event gid_event;
struct ocrdma_dev *dev;
bool found = false;
bool updated = false;
bool is_vlan = false;
- u16 vid = 0;
is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
- if (is_vlan) {
- vid = vlan_dev_vlan_id(netdev);
- netdev = vlan_dev_real_dev(netdev);
- }
+ if (is_vlan)
+ netdev = rdma_vlan_dev_real_dev(netdev);
rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
@@ -222,16 +140,14 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
if (!found)
return NOTIFY_DONE;
- if (!rdma_link_local_addr((struct in6_addr *)&ifa->addr))
- return NOTIFY_DONE;
mutex_lock(&dev->dev_lock);
switch (event) {
case NETDEV_UP:
- updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
+ updated = ocrdma_add_sgid(dev, gid);
break;
case NETDEV_DOWN:
- updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
+ updated = ocrdma_del_sgid(dev, gid);
break;
default:
break;
@@ -247,6 +163,32 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
return NOTIFY_OK;
}
+static int ocrdma_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ union ib_gid gid;
+ struct net_device *netdev = ifa->ifa_dev->dev;
+
+ ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
+ return ocrdma_addr_event(event, netdev, &gid);
+}
+
+static struct notifier_block ocrdma_inetaddr_notifier = {
+ .notifier_call = ocrdma_inetaddr_event
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+static int ocrdma_inet6addr_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+ union ib_gid *gid = (union ib_gid *)&ifa->addr;
+ struct net_device *netdev = ifa->idev->dev;
+ return ocrdma_addr_event(event, netdev, gid);
+}
+
static struct notifier_block ocrdma_inet6addr_notifier = {
.notifier_call = ocrdma_inet6addr_event
};
@@ -423,10 +365,6 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status)
goto alloc_err;
- status = ocrdma_build_sgid_tbl(dev);
- if (status)
- goto alloc_err;
-
status = ocrdma_register_device(dev);
if (status)
goto alloc_err;
@@ -553,6 +491,10 @@ static int __init ocrdma_init_module(void)
{
int status;
+ status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier);
+ if (status)
+ return status;
+
#if IS_ENABLED(CONFIG_IPV6)
status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
if (status)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 9f9570ec3c2e..60d5ac23ea80 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -31,7 +31,7 @@
#define Bit(_b) (1 << (_b))
#define OCRDMA_GEN1_FAMILY 0xB
-#define OCRDMA_GEN2_FAMILY 0x2
+#define OCRDMA_GEN2_FAMILY 0x0F
#define OCRDMA_SUBSYS_ROCE 10
enum {
@@ -1694,7 +1694,7 @@ struct ocrdma_grh {
u16 rsvd;
} __packed;
-#define OCRDMA_AV_VALID Bit(0)
+#define OCRDMA_AV_VALID Bit(7)
#define OCRDMA_AV_VLAN_VALID Bit(1)
struct ocrdma_av {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 7686dceadd29..e0cc201be41a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
+ IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
@@ -1326,7 +1326,8 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_qps = old_qps;
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
+ if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
__func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
@@ -1415,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
- OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
+ OCRDMA_QP_PARAMS_TCLASS_MASK) >>
OCRDMA_QP_PARAMS_TCLASS_SHIFT;
qp_attr->ah_attr.ah_flags = IB_AH_GRH;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bfc02f450e6..d1bd21319d7d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* ensure previous Tx parameters are not still forced */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
if (qib_compat_ddr_negotiate) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 3cca55b51e54..0cad0c40d742 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -585,7 +585,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask))
+ attr_mask, IB_LINK_LAYER_UNSPECIFIED))
goto inval;
if (attr_mask & IB_QP_AV) {
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index d6c7fe7f88d5..3ad651c3356c 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -57,13 +57,20 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
struct qib_sge *sge;
struct ib_wc wc;
u32 length;
+ enum ib_qp_type sqptype, dqptype;
qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
if (!qp) {
ibp->n_pkt_drops++;
return;
}
- if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
+
+ sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : sqp->ibqp.qp_type;
+ dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : qp->ibqp.qp_type;
+
+ if (dqptype != sqptype ||
!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
goto drop;
diff --git a/drivers/infiniband/hw/usnic/Kconfig b/drivers/infiniband/hw/usnic/Kconfig
new file mode 100644
index 000000000000..29ab11c34f3f
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/Kconfig
@@ -0,0 +1,10 @@
+config INFINIBAND_USNIC
+ tristate "Verbs support for Cisco VIC"
+ depends on NETDEVICES && ETHERNET && INET && PCI && INTEL_IOMMU
+ select ENIC
+ select NET_VENDOR_CISCO
+ select PCI_IOV
+ select INFINIBAND_USER_ACCESS
+ ---help---
+ This is a low-level driver for Cisco's Virtual Interface
+ Cards (VICs), including the VIC 1240 and 1280 cards.
diff --git a/drivers/infiniband/hw/usnic/Makefile b/drivers/infiniband/hw/usnic/Makefile
new file mode 100644
index 000000000000..99fb2db47cd5
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/Makefile
@@ -0,0 +1,15 @@
+ccflags-y := -Idrivers/net/ethernet/cisco/enic
+
+obj-$(CONFIG_INFINIBAND_USNIC)+= usnic_verbs.o
+
+usnic_verbs-y=\
+usnic_fwd.o \
+usnic_transport.o \
+usnic_uiom.o \
+usnic_uiom_interval_tree.o \
+usnic_vnic.o \
+usnic_ib_main.o \
+usnic_ib_qp_grp.o \
+usnic_ib_sysfs.o \
+usnic_ib_verbs.o \
+usnic_debugfs.o \
diff --git a/drivers/infiniband/hw/usnic/usnic.h b/drivers/infiniband/hw/usnic/usnic.h
new file mode 100644
index 000000000000..5be13d8991bc
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_H_
+#define USNIC_H_
+
+#define DRV_NAME "usnic_verbs"
+
+#define PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC 0x00cf /* User space NIC */
+
+#define DRV_VERSION "1.0.3"
+#define DRV_RELDATE "December 19, 2013"
+
+#endif /* USNIC_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h
new file mode 100644
index 000000000000..04a66229584e
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_abi.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+
+#ifndef USNIC_ABI_H
+#define USNIC_ABI_H
+
+/* ABI between userspace and kernel */
+#define USNIC_UVERBS_ABI_VERSION 4
+
+#define USNIC_QP_GRP_MAX_WQS 8
+#define USNIC_QP_GRP_MAX_RQS 8
+#define USNIC_QP_GRP_MAX_CQS 16
+
+enum usnic_transport_type {
+ USNIC_TRANSPORT_UNKNOWN = 0,
+ USNIC_TRANSPORT_ROCE_CUSTOM = 1,
+ USNIC_TRANSPORT_IPV4_UDP = 2,
+ USNIC_TRANSPORT_MAX = 3,
+};
+
+struct usnic_transport_spec {
+ enum usnic_transport_type trans_type;
+ union {
+ struct {
+ uint16_t port_num;
+ } usnic_roce;
+ struct {
+ uint32_t sock_fd;
+ } udp;
+ };
+};
+
+struct usnic_ib_create_qp_cmd {
+ struct usnic_transport_spec spec;
+};
+
+/*TODO: Future - usnic_modify_qp needs to pass in generic filters */
+struct usnic_ib_create_qp_resp {
+ u32 vfid;
+ u32 qp_grp_id;
+ u64 bar_bus_addr;
+ u32 bar_len;
+/*
+ * WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface
+ * expands the scope of ABI to many files.
+ */
+ u32 wq_cnt;
+ u32 rq_cnt;
+ u32 cq_cnt;
+ u32 wq_idx[USNIC_QP_GRP_MAX_WQS];
+ u32 rq_idx[USNIC_QP_GRP_MAX_RQS];
+ u32 cq_idx[USNIC_QP_GRP_MAX_CQS];
+ u32 transport;
+ u32 reserved[9];
+};
+
+#endif /* USNIC_ABI_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
new file mode 100644
index 000000000000..393567266142
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_CMN_PKT_HDR_H
+#define USNIC_CMN_PKT_HDR_H
+
+#define USNIC_ROCE_ETHERTYPE (0x8915)
+#define USNIC_ROCE_GRH_VER (8)
+#define USNIC_PROTO_VER (1)
+#define USNIC_ROCE_GRH_VER_SHIFT (4)
+
+#endif /* USNIC_COMMON_PKT_HDR_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_common_util.h b/drivers/infiniband/hw/usnic/usnic_common_util.h
new file mode 100644
index 000000000000..9d737ed5e55d
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_common_util.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_CMN_UTIL_H
+#define USNIC_CMN_UTIL_H
+
+static inline void
+usnic_mac_to_gid(const char *const mac, char *raw_gid)
+{
+ raw_gid[0] = 0xfe;
+ raw_gid[1] = 0x80;
+ memset(&raw_gid[2], 0, 6);
+ raw_gid[8] = mac[0]^2;
+ raw_gid[9] = mac[1];
+ raw_gid[10] = mac[2];
+ raw_gid[11] = 0xff;
+ raw_gid[12] = 0xfe;
+ raw_gid[13] = mac[3];
+ raw_gid[14] = mac[4];
+ raw_gid[15] = mac[5];
+}
+
+static inline void
+usnic_mac_ip_to_gid(const char *const mac, const __be32 inaddr, char *raw_gid)
+{
+ raw_gid[0] = 0xfe;
+ raw_gid[1] = 0x80;
+ memset(&raw_gid[2], 0, 2);
+ memcpy(&raw_gid[4], &inaddr, 4);
+ raw_gid[8] = mac[0]^2;
+ raw_gid[9] = mac[1];
+ raw_gid[10] = mac[2];
+ raw_gid[11] = 0xff;
+ raw_gid[12] = 0xfe;
+ raw_gid[13] = mac[3];
+ raw_gid[14] = mac[4];
+ raw_gid[15] = mac[5];
+}
+
+static inline void
+usnic_write_gid_if_id_from_mac(char *mac, char *raw_gid)
+{
+ raw_gid[8] = mac[0]^2;
+ raw_gid[9] = mac[1];
+ raw_gid[10] = mac[2];
+ raw_gid[11] = 0xff;
+ raw_gid[12] = 0xfe;
+ raw_gid[13] = mac[3];
+ raw_gid[14] = mac[4];
+ raw_gid[15] = mac[5];
+}
+
+#endif /* USNIC_COMMON_UTIL_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c
new file mode 100644
index 000000000000..5d13860161a4
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include "usnic.h"
+#include "usnic_log.h"
+#include "usnic_debugfs.h"
+#include "usnic_ib_qp_grp.h"
+#include "usnic_transport.h"
+
+static struct dentry *debugfs_root;
+static struct dentry *flows_dentry;
+
+static ssize_t usnic_debugfs_buildinfo_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ char buf[500];
+ int res;
+
+ if (*ppos > 0)
+ return 0;
+
+ res = scnprintf(buf, sizeof(buf),
+ "version: %s\n"
+ "build date: %s\n",
+ DRV_VERSION, DRV_RELDATE);
+
+ return simple_read_from_buffer(data, count, ppos, buf, res);
+}
+
+static const struct file_operations usnic_debugfs_buildinfo_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = usnic_debugfs_buildinfo_read
+};
+
+static ssize_t flowinfo_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct usnic_ib_qp_grp_flow *qp_flow;
+ int n;
+ int left;
+ char *ptr;
+ char buf[512];
+
+ qp_flow = f->private_data;
+ ptr = buf;
+ left = count;
+
+ if (*ppos > 0)
+ return 0;
+
+ spin_lock(&qp_flow->qp_grp->lock);
+ n = scnprintf(ptr, left,
+ "QP Grp ID: %d Transport: %s ",
+ qp_flow->qp_grp->grp_id,
+ usnic_transport_to_str(qp_flow->trans_type));
+ UPDATE_PTR_LEFT(n, ptr, left);
+ if (qp_flow->trans_type == USNIC_TRANSPORT_ROCE_CUSTOM) {
+ n = scnprintf(ptr, left, "Port_Num:%hu\n",
+ qp_flow->usnic_roce.port_num);
+ UPDATE_PTR_LEFT(n, ptr, left);
+ } else if (qp_flow->trans_type == USNIC_TRANSPORT_IPV4_UDP) {
+ n = usnic_transport_sock_to_str(ptr, left,
+ qp_flow->udp.sock);
+ UPDATE_PTR_LEFT(n, ptr, left);
+ n = scnprintf(ptr, left, "\n");
+ UPDATE_PTR_LEFT(n, ptr, left);
+ }
+ spin_unlock(&qp_flow->qp_grp->lock);
+
+ return simple_read_from_buffer(data, count, ppos, buf, ptr - buf);
+}
+
+static const struct file_operations flowinfo_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = flowinfo_read,
+};
+
+void usnic_debugfs_init(void)
+{
+ debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
+ if (IS_ERR(debugfs_root)) {
+ usnic_err("Failed to create debugfs root dir, check if debugfs is enabled in kernel configuration\n");
+ goto out_clear_root;
+ }
+
+ flows_dentry = debugfs_create_dir("flows", debugfs_root);
+ if (IS_ERR_OR_NULL(flows_dentry)) {
+ usnic_err("Failed to create debugfs flow dir with err %ld\n",
+ PTR_ERR(flows_dentry));
+ goto out_free_root;
+ }
+
+ debugfs_create_file("build-info", S_IRUGO, debugfs_root,
+ NULL, &usnic_debugfs_buildinfo_ops);
+ return;
+
+out_free_root:
+ debugfs_remove_recursive(debugfs_root);
+out_clear_root:
+ debugfs_root = NULL;
+}
+
+void usnic_debugfs_exit(void)
+{
+ if (!debugfs_root)
+ return;
+
+ debugfs_remove_recursive(debugfs_root);
+ debugfs_root = NULL;
+}
+
+void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow)
+{
+ if (IS_ERR_OR_NULL(flows_dentry))
+ return;
+
+ scnprintf(qp_flow->dentry_name, sizeof(qp_flow->dentry_name),
+ "%u", qp_flow->flow->flow_id);
+ qp_flow->dbgfs_dentry = debugfs_create_file(qp_flow->dentry_name,
+ S_IRUGO,
+ flows_dentry,
+ qp_flow,
+ &flowinfo_ops);
+ if (IS_ERR_OR_NULL(qp_flow->dbgfs_dentry)) {
+ usnic_err("Failed to create dbg fs entry for flow %u\n",
+ qp_flow->flow->flow_id);
+ }
+}
+
+void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow)
+{
+ if (!IS_ERR_OR_NULL(qp_flow->dbgfs_dentry))
+ debugfs_remove(qp_flow->dbgfs_dentry);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.h b/drivers/infiniband/hw/usnic/usnic_debugfs.h
new file mode 100644
index 000000000000..4087d24a88f6
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#ifndef USNIC_DEBUGFS_H_
+#define USNIC_DEBUGFS_H_
+
+#include "usnic_ib_qp_grp.h"
+
+void usnic_debugfs_init(void);
+
+void usnic_debugfs_exit(void);
+void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow);
+void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow);
+
+#endif /*!USNIC_DEBUGFS_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c
new file mode 100644
index 000000000000..e3c9bd9d3ba3
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "enic_api.h"
+#include "usnic_common_pkt_hdr.h"
+#include "usnic_fwd.h"
+#include "usnic_log.h"
+
+static int usnic_fwd_devcmd_locked(struct usnic_fwd_dev *ufdev, int vnic_idx,
+ enum vnic_devcmd_cmd cmd, u64 *a0,
+ u64 *a1)
+{
+ int status;
+ struct net_device *netdev = ufdev->netdev;
+
+ lockdep_assert_held(&ufdev->lock);
+
+ status = enic_api_devcmd_proxy_by_index(netdev,
+ vnic_idx,
+ cmd,
+ a0, a1,
+ 1000);
+ if (status) {
+ if (status == ERR_EINVAL && cmd == CMD_DEL_FILTER) {
+ usnic_dbg("Dev %s vnic idx %u cmd %u already deleted",
+ ufdev->name, vnic_idx, cmd);
+ } else {
+ usnic_err("Dev %s vnic idx %u cmd %u failed with status %d\n",
+ ufdev->name, vnic_idx, cmd,
+ status);
+ }
+ } else {
+ usnic_dbg("Dev %s vnic idx %u cmd %u success",
+ ufdev->name, vnic_idx, cmd);
+ }
+
+ return status;
+}
+
+static int usnic_fwd_devcmd(struct usnic_fwd_dev *ufdev, int vnic_idx,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1)
+{
+ int status;
+
+ spin_lock(&ufdev->lock);
+ status = usnic_fwd_devcmd_locked(ufdev, vnic_idx, cmd, a0, a1);
+ spin_unlock(&ufdev->lock);
+
+ return status;
+}
+
+struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev)
+{
+ struct usnic_fwd_dev *ufdev;
+
+ ufdev = kzalloc(sizeof(*ufdev), GFP_KERNEL);
+ if (!ufdev)
+ return NULL;
+
+ ufdev->pdev = pdev;
+ ufdev->netdev = pci_get_drvdata(pdev);
+ spin_lock_init(&ufdev->lock);
+ strncpy(ufdev->name, netdev_name(ufdev->netdev),
+ sizeof(ufdev->name) - 1);
+
+ return ufdev;
+}
+
+void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev)
+{
+ kfree(ufdev);
+}
+
+void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN])
+{
+ spin_lock(&ufdev->lock);
+ memcpy(&ufdev->mac, mac, sizeof(ufdev->mac));
+ spin_unlock(&ufdev->lock);
+}
+
+int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
+{
+ int status;
+
+ spin_lock(&ufdev->lock);
+ if (ufdev->inaddr == 0) {
+ ufdev->inaddr = inaddr;
+ status = 0;
+ } else {
+ status = -EFAULT;
+ }
+ spin_unlock(&ufdev->lock);
+
+ return status;
+}
+
+void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev)
+{
+ spin_lock(&ufdev->lock);
+ ufdev->inaddr = 0;
+ spin_unlock(&ufdev->lock);
+}
+
+void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev)
+{
+ spin_lock(&ufdev->lock);
+ ufdev->link_up = 1;
+ spin_unlock(&ufdev->lock);
+}
+
+void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev)
+{
+ spin_lock(&ufdev->lock);
+ ufdev->link_up = 0;
+ spin_unlock(&ufdev->lock);
+}
+
+void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu)
+{
+ spin_lock(&ufdev->lock);
+ ufdev->mtu = mtu;
+ spin_unlock(&ufdev->lock);
+}
+
+static int usnic_fwd_dev_ready_locked(struct usnic_fwd_dev *ufdev)
+{
+ lockdep_assert_held(&ufdev->lock);
+
+ if (!ufdev->link_up)
+ return -EPERM;
+
+ return 0;
+}
+
+static int validate_filter_locked(struct usnic_fwd_dev *ufdev,
+ struct filter *filter)
+{
+
+ lockdep_assert_held(&ufdev->lock);
+
+ if (filter->type == FILTER_IPV4_5TUPLE) {
+ if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_AD))
+ return -EACCES;
+ if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_PT))
+ return -EBUSY;
+ else if (ufdev->inaddr == 0)
+ return -EINVAL;
+ else if (filter->u.ipv4.dst_port == 0)
+ return -ERANGE;
+ else if (ntohl(ufdev->inaddr) != filter->u.ipv4.dst_addr)
+ return -EFAULT;
+ else
+ return 0;
+ }
+
+ return 0;
+}
+
+static void fill_tlv(struct filter_tlv *tlv, struct filter *filter,
+ struct filter_action *action)
+{
+ tlv->type = CLSF_TLV_FILTER;
+ tlv->length = sizeof(struct filter);
+ *((struct filter *)&tlv->val) = *filter;
+
+ tlv = (struct filter_tlv *)((char *)tlv + sizeof(struct filter_tlv) +
+ sizeof(struct filter));
+ tlv->type = CLSF_TLV_ACTION;
+ tlv->length = sizeof(struct filter_action);
+ *((struct filter_action *)&tlv->val) = *action;
+}
+
+struct usnic_fwd_flow*
+usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter,
+ struct usnic_filter_action *uaction)
+{
+ struct filter_tlv *tlv;
+ struct pci_dev *pdev;
+ struct usnic_fwd_flow *flow;
+ uint64_t a0, a1;
+ uint64_t tlv_size;
+ dma_addr_t tlv_pa;
+ int status;
+
+ pdev = ufdev->pdev;
+ tlv_size = (2*sizeof(struct filter_tlv) + sizeof(struct filter) +
+ sizeof(struct filter_action));
+
+ flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = pci_alloc_consistent(pdev, tlv_size, &tlv_pa);
+ if (!tlv) {
+ usnic_err("Failed to allocate memory\n");
+ status = -ENOMEM;
+ goto out_free_flow;
+ }
+
+ fill_tlv(tlv, filter, &uaction->action);
+
+ spin_lock(&ufdev->lock);
+ status = usnic_fwd_dev_ready_locked(ufdev);
+ if (status) {
+ usnic_err("Forwarding dev %s not ready with status %d\n",
+ ufdev->name, status);
+ goto out_free_tlv;
+ }
+
+ status = validate_filter_locked(ufdev, filter);
+ if (status) {
+ usnic_err("Failed to validate filter with status %d\n",
+ status);
+ goto out_free_tlv;
+ }
+
+ /* Issue Devcmd */
+ a0 = tlv_pa;
+ a1 = tlv_size;
+ status = usnic_fwd_devcmd_locked(ufdev, uaction->vnic_idx,
+ CMD_ADD_FILTER, &a0, &a1);
+ if (status) {
+ usnic_err("VF %s Filter add failed with status:%d",
+ ufdev->name, status);
+ status = -EFAULT;
+ goto out_free_tlv;
+ } else {
+ usnic_dbg("VF %s FILTER ID:%llu", ufdev->name, a0);
+ }
+
+ flow->flow_id = (uint32_t) a0;
+ flow->vnic_idx = uaction->vnic_idx;
+ flow->ufdev = ufdev;
+
+out_free_tlv:
+ spin_unlock(&ufdev->lock);
+ pci_free_consistent(pdev, tlv_size, tlv, tlv_pa);
+ if (!status)
+ return flow;
+out_free_flow:
+ kfree(flow);
+ return ERR_PTR(status);
+}
+
+int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow)
+{
+ int status;
+ u64 a0, a1;
+
+ a0 = flow->flow_id;
+
+ status = usnic_fwd_devcmd(flow->ufdev, flow->vnic_idx,
+ CMD_DEL_FILTER, &a0, &a1);
+ if (status) {
+ if (status == ERR_EINVAL) {
+ usnic_dbg("Filter %u already deleted for VF Idx %u pf: %s status: %d",
+ flow->flow_id, flow->vnic_idx,
+ flow->ufdev->name, status);
+ } else {
+ usnic_err("PF %s VF Idx %u Filter: %u FILTER DELETE failed with status %d",
+ flow->ufdev->name, flow->vnic_idx,
+ flow->flow_id, status);
+ }
+ status = 0;
+ /*
+ * Log the error and fake success to the caller because if
+ * a flow fails to be deleted in the firmware, it is an
+ * unrecoverable error.
+ */
+ } else {
+ usnic_dbg("PF %s VF Idx %u Filter: %u FILTER DELETED",
+ flow->ufdev->name, flow->vnic_idx,
+ flow->flow_id);
+ }
+
+ kfree(flow);
+ return status;
+}
+
+int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx)
+{
+ int status;
+ struct net_device *pf_netdev;
+ u64 a0, a1;
+
+ pf_netdev = ufdev->netdev;
+ a0 = qp_idx;
+ a1 = CMD_QP_RQWQ;
+
+ status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_ENABLE,
+ &a0, &a1);
+ if (status) {
+ usnic_err("PF %s VNIC Index %u RQ Index: %u ENABLE Failed with status %d",
+ netdev_name(pf_netdev),
+ vnic_idx,
+ qp_idx,
+ status);
+ } else {
+ usnic_dbg("PF %s VNIC Index %u RQ Index: %u ENABLED",
+ netdev_name(pf_netdev),
+ vnic_idx, qp_idx);
+ }
+
+ return status;
+}
+
+int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx)
+{
+ int status;
+ u64 a0, a1;
+ struct net_device *pf_netdev;
+
+ pf_netdev = ufdev->netdev;
+ a0 = qp_idx;
+ a1 = CMD_QP_RQWQ;
+
+ status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_DISABLE,
+ &a0, &a1);
+ if (status) {
+ usnic_err("PF %s VNIC Index %u RQ Index: %u DISABLE Failed with status %d",
+ netdev_name(pf_netdev),
+ vnic_idx,
+ qp_idx,
+ status);
+ } else {
+ usnic_dbg("PF %s VNIC Index %u RQ Index: %u DISABLED",
+ netdev_name(pf_netdev),
+ vnic_idx,
+ qp_idx);
+ }
+
+ return status;
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h
new file mode 100644
index 000000000000..93713a2230b3
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_FWD_H_
+#define USNIC_FWD_H_
+
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/in.h>
+
+#include "usnic_abi.h"
+#include "usnic_common_pkt_hdr.h"
+#include "vnic_devcmd.h"
+
+struct usnic_fwd_dev {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ spinlock_t lock;
+ /*
+ * The following fields can be read directly off the device.
+ * However, they should be set by a accessor function, except name,
+ * which cannot be changed.
+ */
+ bool link_up;
+ char mac[ETH_ALEN];
+ unsigned int mtu;
+ __be32 inaddr;
+ char name[IFNAMSIZ+1];
+};
+
+struct usnic_fwd_flow {
+ uint32_t flow_id;
+ struct usnic_fwd_dev *ufdev;
+ unsigned int vnic_idx;
+};
+
+struct usnic_filter_action {
+ int vnic_idx;
+ struct filter_action action;
+};
+
+struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev);
+void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev);
+
+void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]);
+int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
+void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev);
+void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev);
+void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev);
+void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu);
+
+/*
+ * Allocate a flow on this forwarding device. Whoever calls this function,
+ * must monitor netdev events on ufdev's netdevice. If NETDEV_REBOOT or
+ * NETDEV_DOWN is seen, flow will no longer function and must be
+ * immediately freed by calling usnic_dealloc_flow.
+ */
+struct usnic_fwd_flow*
+usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter,
+ struct usnic_filter_action *action);
+int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow);
+int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx);
+int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx);
+
+static inline void usnic_fwd_init_usnic_filter(struct filter *filter,
+ uint32_t usnic_id)
+{
+ filter->type = FILTER_USNIC_ID;
+ filter->u.usnic.ethtype = USNIC_ROCE_ETHERTYPE;
+ filter->u.usnic.flags = FILTER_FIELD_USNIC_ETHTYPE |
+ FILTER_FIELD_USNIC_ID |
+ FILTER_FIELD_USNIC_PROTO;
+ filter->u.usnic.proto_version = (USNIC_ROCE_GRH_VER <<
+ USNIC_ROCE_GRH_VER_SHIFT) |
+ USNIC_PROTO_VER;
+ filter->u.usnic.usnic_id = usnic_id;
+}
+
+static inline void usnic_fwd_init_udp_filter(struct filter *filter,
+ uint32_t daddr, uint16_t dport)
+{
+ filter->type = FILTER_IPV4_5TUPLE;
+ filter->u.ipv4.flags = FILTER_FIELD_5TUP_PROTO;
+ filter->u.ipv4.protocol = PROTO_UDP;
+
+ if (daddr) {
+ filter->u.ipv4.flags |= FILTER_FIELD_5TUP_DST_AD;
+ filter->u.ipv4.dst_addr = daddr;
+ }
+
+ if (dport) {
+ filter->u.ipv4.flags |= FILTER_FIELD_5TUP_DST_PT;
+ filter->u.ipv4.dst_port = dport;
+ }
+}
+
+#endif /* !USNIC_FWD_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h
new file mode 100644
index 000000000000..e5a9297dd1bd
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_IB_H_
+#define USNIC_IB_H_
+
+#include <linux/iommu.h>
+#include <linux/netdevice.h>
+
+#include <rdma/ib_verbs.h>
+
+
+#include "usnic.h"
+#include "usnic_abi.h"
+#include "usnic_vnic.h"
+
+#define USNIC_IB_PORT_CNT 1
+#define USNIC_IB_NUM_COMP_VECTORS 1
+
+extern unsigned int usnic_ib_share_vf;
+
+struct usnic_ib_ucontext {
+ struct ib_ucontext ibucontext;
+ /* Protected by usnic_ib_dev->usdev_lock */
+ struct list_head qp_grp_list;
+ struct list_head link;
+};
+
+struct usnic_ib_pd {
+ struct ib_pd ibpd;
+ struct usnic_uiom_pd *umem_pd;
+};
+
+struct usnic_ib_mr {
+ struct ib_mr ibmr;
+ struct usnic_uiom_reg *umem;
+};
+
+struct usnic_ib_dev {
+ struct ib_device ib_dev;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ struct usnic_fwd_dev *ufdev;
+ struct list_head ib_dev_link;
+ struct list_head vf_dev_list;
+ struct list_head ctx_list;
+ struct mutex usdev_lock;
+
+ /* provisioning information */
+ struct kref vf_cnt;
+ unsigned int vf_res_cnt[USNIC_VNIC_RES_TYPE_MAX];
+
+ /* sysfs vars for QPN reporting */
+ struct kobject *qpn_kobj;
+};
+
+struct usnic_ib_vf {
+ struct usnic_ib_dev *pf;
+ spinlock_t lock;
+ struct usnic_vnic *vnic;
+ unsigned int qp_grp_ref_cnt;
+ struct usnic_ib_pd *pd;
+ struct list_head link;
+};
+
+static inline
+struct usnic_ib_dev *to_usdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct usnic_ib_dev, ib_dev);
+}
+
+static inline
+struct usnic_ib_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct usnic_ib_ucontext, ibucontext);
+}
+
+static inline
+struct usnic_ib_pd *to_upd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct usnic_ib_pd, ibpd);
+}
+
+static inline
+struct usnic_ib_ucontext *to_uucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct usnic_ib_ucontext, ibucontext);
+}
+
+static inline
+struct usnic_ib_mr *to_umr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct usnic_ib_mr, ibmr);
+}
+void usnic_ib_log_vf(struct usnic_ib_vf *vf);
+
+#define UPDATE_PTR_LEFT(N, P, L) \
+do { \
+ L -= (N); \
+ P += (N); \
+} while (0)
+
+#endif /* USNIC_IB_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
new file mode 100644
index 000000000000..fb6d026f92cd
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -0,0 +1,682 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Author: Upinder Malhi <umalhi@cisco.com>
+ * Author: Anant Deepak <anadeepa@cisco.com>
+ * Author: Cesare Cantu' <cantuc@cisco.com>
+ * Author: Jeff Squyres <jsquyres@cisco.com>
+ * Author: Kiran Thirumalai <kithirum@cisco.com>
+ * Author: Xuyang Wang <xuywang@cisco.com>
+ * Author: Reese Faucette <rfaucett@cisco.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+
+#include "usnic_abi.h"
+#include "usnic_common_util.h"
+#include "usnic_ib.h"
+#include "usnic_ib_qp_grp.h"
+#include "usnic_log.h"
+#include "usnic_fwd.h"
+#include "usnic_debugfs.h"
+#include "usnic_ib_verbs.h"
+#include "usnic_transport.h"
+#include "usnic_uiom.h"
+#include "usnic_ib_sysfs.h"
+
+unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
+unsigned int usnic_ib_share_vf = 1;
+
+static const char usnic_version[] =
+ DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
+ DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
+static LIST_HEAD(usnic_ib_ibdev_list);
+
+/* Callback dump funcs */
+static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
+{
+ struct usnic_ib_vf *vf = obj;
+ return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name);
+}
+/* End callback dump funcs */
+
+static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
+{
+ usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
+ usnic_ib_dump_vf_hdr,
+ usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
+}
+
+void usnic_ib_log_vf(struct usnic_ib_vf *vf)
+{
+ char buf[1000];
+ usnic_ib_dump_vf(vf, buf, sizeof(buf));
+ usnic_dbg("%s\n", buf);
+}
+
+/* Start of netdev section */
+static inline const char *usnic_ib_netdev_event_to_string(unsigned long event)
+{
+ const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN",
+ "NETDEV_REBOOT", "NETDEV_CHANGE",
+ "NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU",
+ "NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE",
+ "NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP",
+ "NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE",
+ "NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE",
+ "NETDEV_NOTIFY_PEERS", "NETDEV_JOIN"
+ };
+
+ if (event >= ARRAY_SIZE(event2str))
+ return "UNKNOWN_NETDEV_EVENT";
+ else
+ return event2str[event];
+}
+
+static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
+{
+ struct usnic_ib_ucontext *ctx;
+ struct usnic_ib_qp_grp *qp_grp;
+ enum ib_qp_state cur_state;
+ int status;
+
+ BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
+
+ list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
+ list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
+ cur_state = qp_grp->state;
+ if (cur_state == IB_QPS_INIT ||
+ cur_state == IB_QPS_RTR ||
+ cur_state == IB_QPS_RTS) {
+ status = usnic_ib_qp_grp_modify(qp_grp,
+ IB_QPS_ERR,
+ NULL);
+ if (status) {
+ usnic_err("Failed to transistion qp grp %u from %s to %s\n",
+ qp_grp->grp_id,
+ usnic_ib_qp_grp_state_to_string
+ (cur_state),
+ usnic_ib_qp_grp_state_to_string
+ (IB_QPS_ERR));
+ }
+ }
+ }
+ }
+}
+
+static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
+ unsigned long event)
+{
+ struct net_device *netdev;
+ struct ib_event ib_event;
+
+ memset(&ib_event, 0, sizeof(ib_event));
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ netdev = us_ibdev->netdev;
+ switch (event) {
+ case NETDEV_REBOOT:
+ usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name);
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_PORT_ERR;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ break;
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ if (!us_ibdev->ufdev->link_up &&
+ netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_up(us_ibdev->ufdev);
+ usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
+ ib_event.event = IB_EVENT_PORT_ACTIVE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else if (us_ibdev->ufdev->link_up &&
+ !netif_carrier_ok(netdev)) {
+ usnic_fwd_carrier_down(us_ibdev->ufdev);
+ usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_PORT_ERR;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ } else {
+ usnic_dbg("Ignoring %s on %s\n",
+ usnic_ib_netdev_event_to_string(event),
+ us_ibdev->ib_dev.name);
+ }
+ break;
+ case NETDEV_CHANGEADDR:
+ if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
+ sizeof(us_ibdev->ufdev->mac))) {
+ usnic_dbg("Ignoring addr change on %s\n",
+ us_ibdev->ib_dev.name);
+ } else {
+ usnic_info(" %s old mac: %pM new mac: %pM\n",
+ us_ibdev->ib_dev.name,
+ us_ibdev->ufdev->mac,
+ netdev->dev_addr);
+ usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_GID_CHANGE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ }
+
+ break;
+ case NETDEV_CHANGEMTU:
+ if (us_ibdev->ufdev->mtu != netdev->mtu) {
+ usnic_info("MTU Change on %s old: %u new: %u\n",
+ us_ibdev->ib_dev.name,
+ us_ibdev->ufdev->mtu, netdev->mtu);
+ usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ } else {
+ usnic_dbg("Ignoring MTU change on %s\n",
+ us_ibdev->ib_dev.name);
+ }
+ break;
+ default:
+ usnic_dbg("Ignoring event %s on %s",
+ usnic_ib_netdev_event_to_string(event),
+ us_ibdev->ib_dev.name);
+ }
+ mutex_unlock(&us_ibdev->usdev_lock);
+}
+
+static int usnic_ib_netdevice_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct usnic_ib_dev *us_ibdev;
+
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ mutex_lock(&usnic_ib_ibdev_list_lock);
+ list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
+ if (us_ibdev->netdev == netdev) {
+ usnic_ib_handle_usdev_event(us_ibdev, event);
+ break;
+ }
+ }
+ mutex_unlock(&usnic_ib_ibdev_list_lock);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block usnic_ib_netdevice_notifier = {
+ .notifier_call = usnic_ib_netdevice_event
+};
+/* End of netdev section */
+
+/* Start of inet section */
+static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct ib_event ib_event;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+
+ switch (event) {
+ case NETDEV_DOWN:
+ usnic_info("%s via ip notifiers",
+ usnic_ib_netdev_event_to_string(event));
+ usnic_fwd_del_ipaddr(us_ibdev->ufdev);
+ usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
+ ib_event.event = IB_EVENT_GID_CHANGE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ break;
+ case NETDEV_UP:
+ usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
+ usnic_info("%s via ip notifiers: ip %pI4",
+ usnic_ib_netdev_event_to_string(event),
+ &us_ibdev->ufdev->inaddr);
+ ib_event.event = IB_EVENT_GID_CHANGE;
+ ib_event.device = &us_ibdev->ib_dev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+ break;
+ default:
+ usnic_info("Ignoring event %s on %s",
+ usnic_ib_netdev_event_to_string(event),
+ us_ibdev->ib_dev.name);
+ }
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return NOTIFY_DONE;
+}
+
+static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct usnic_ib_dev *us_ibdev;
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *netdev = ifa->ifa_dev->dev;
+
+ mutex_lock(&usnic_ib_ibdev_list_lock);
+ list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
+ if (us_ibdev->netdev == netdev) {
+ usnic_ib_handle_inet_event(us_ibdev, event, ptr);
+ break;
+ }
+ }
+ mutex_unlock(&usnic_ib_ibdev_list_lock);
+
+ return NOTIFY_DONE;
+}
+static struct notifier_block usnic_ib_inetaddr_notifier = {
+ .notifier_call = usnic_ib_inetaddr_event
+};
+/* End of inet section*/
+
+/* Start of PF discovery section */
+static void *usnic_ib_device_add(struct pci_dev *dev)
+{
+ struct usnic_ib_dev *us_ibdev;
+ union ib_gid gid;
+ struct in_ifaddr *in;
+ struct net_device *netdev;
+
+ usnic_dbg("\n");
+ netdev = pci_get_drvdata(dev);
+
+ us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev));
+ if (IS_ERR_OR_NULL(us_ibdev)) {
+ usnic_err("Device %s context alloc failed\n",
+ netdev_name(pci_get_drvdata(dev)));
+ return ERR_PTR(us_ibdev ? PTR_ERR(us_ibdev) : -EFAULT);
+ }
+
+ us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
+ if (IS_ERR_OR_NULL(us_ibdev->ufdev)) {
+ usnic_err("Failed to alloc ufdev for %s with err %ld\n",
+ pci_name(dev), PTR_ERR(us_ibdev->ufdev));
+ goto err_dealloc;
+ }
+
+ mutex_init(&us_ibdev->usdev_lock);
+ INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
+ INIT_LIST_HEAD(&us_ibdev->ctx_list);
+
+ us_ibdev->pdev = dev;
+ us_ibdev->netdev = pci_get_drvdata(dev);
+ us_ibdev->ib_dev.owner = THIS_MODULE;
+ us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
+ us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
+ us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
+ us_ibdev->ib_dev.dma_device = &dev->dev;
+ us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
+ strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX);
+
+ us_ibdev->ib_dev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_OPEN_QP);
+
+ us_ibdev->ib_dev.query_device = usnic_ib_query_device;
+ us_ibdev->ib_dev.query_port = usnic_ib_query_port;
+ us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey;
+ us_ibdev->ib_dev.query_gid = usnic_ib_query_gid;
+ us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer;
+ us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd;
+ us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd;
+ us_ibdev->ib_dev.create_qp = usnic_ib_create_qp;
+ us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp;
+ us_ibdev->ib_dev.query_qp = usnic_ib_query_qp;
+ us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp;
+ us_ibdev->ib_dev.create_cq = usnic_ib_create_cq;
+ us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq;
+ us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr;
+ us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr;
+ us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext;
+ us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext;
+ us_ibdev->ib_dev.mmap = usnic_ib_mmap;
+ us_ibdev->ib_dev.create_ah = usnic_ib_create_ah;
+ us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah;
+ us_ibdev->ib_dev.post_send = usnic_ib_post_send;
+ us_ibdev->ib_dev.post_recv = usnic_ib_post_recv;
+ us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq;
+ us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
+ us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
+
+
+ if (ib_register_device(&us_ibdev->ib_dev, NULL))
+ goto err_fwd_dealloc;
+
+ usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
+ usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
+ if (netif_carrier_ok(us_ibdev->netdev))
+ usnic_fwd_carrier_up(us_ibdev->ufdev);
+
+ in = ((struct in_device *)(netdev->ip_ptr))->ifa_list;
+ if (in != NULL)
+ usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address);
+
+ usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
+ us_ibdev->ufdev->inaddr, &gid.raw[0]);
+ memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
+ sizeof(gid.global.interface_id));
+ kref_init(&us_ibdev->vf_cnt);
+
+ usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
+ us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
+ us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up,
+ us_ibdev->ufdev->mtu);
+ return us_ibdev;
+
+err_fwd_dealloc:
+ usnic_fwd_dev_free(us_ibdev->ufdev);
+err_dealloc:
+ usnic_err("failed -- deallocing device\n");
+ ib_dealloc_device(&us_ibdev->ib_dev);
+ return NULL;
+}
+
+static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
+{
+ usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name);
+ usnic_ib_sysfs_unregister_usdev(us_ibdev);
+ usnic_fwd_dev_free(us_ibdev->ufdev);
+ ib_unregister_device(&us_ibdev->ib_dev);
+ ib_dealloc_device(&us_ibdev->ib_dev);
+}
+
+static void usnic_ib_undiscover_pf(struct kref *kref)
+{
+ struct usnic_ib_dev *us_ibdev, *tmp;
+ struct pci_dev *dev;
+ bool found = false;
+
+ dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
+ mutex_lock(&usnic_ib_ibdev_list_lock);
+ list_for_each_entry_safe(us_ibdev, tmp,
+ &usnic_ib_ibdev_list, ib_dev_link) {
+ if (us_ibdev->pdev == dev) {
+ list_del(&us_ibdev->ib_dev_link);
+ usnic_ib_device_remove(us_ibdev);
+ found = true;
+ break;
+ }
+ }
+
+ WARN(!found, "Failed to remove PF %s\n", pci_name(dev));
+
+ mutex_unlock(&usnic_ib_ibdev_list_lock);
+}
+
+static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
+{
+ struct usnic_ib_dev *us_ibdev;
+ struct pci_dev *parent_pci, *vf_pci;
+ int err;
+
+ vf_pci = usnic_vnic_get_pdev(vnic);
+ parent_pci = pci_physfn(vf_pci);
+
+ BUG_ON(!parent_pci);
+
+ mutex_lock(&usnic_ib_ibdev_list_lock);
+ list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
+ if (us_ibdev->pdev == parent_pci) {
+ kref_get(&us_ibdev->vf_cnt);
+ goto out;
+ }
+ }
+
+ us_ibdev = usnic_ib_device_add(parent_pci);
+ if (IS_ERR_OR_NULL(us_ibdev)) {
+ us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
+ goto out;
+ }
+
+ err = usnic_ib_sysfs_register_usdev(us_ibdev);
+ if (err) {
+ usnic_ib_device_remove(us_ibdev);
+ us_ibdev = ERR_PTR(err);
+ goto out;
+ }
+
+ list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
+out:
+ mutex_unlock(&usnic_ib_ibdev_list_lock);
+ return us_ibdev;
+}
+/* End of PF discovery section */
+
+/* Start of PCI section */
+
+static DEFINE_PCI_DEVICE_TABLE(usnic_ib_pci_ids) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
+ {0,}
+};
+
+static int usnic_ib_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int err;
+ struct usnic_ib_dev *pf;
+ struct usnic_ib_vf *vf;
+ enum usnic_vnic_res_type res_type;
+
+ vf = kzalloc(sizeof(*vf), GFP_KERNEL);
+ if (!vf)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ usnic_err("Failed to enable %s with err %d\n",
+ pci_name(pdev), err);
+ goto out_clean_vf;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ usnic_err("Failed to request region for %s with err %d\n",
+ pci_name(pdev), err);
+ goto out_disable_device;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, vf);
+
+ vf->vnic = usnic_vnic_alloc(pdev);
+ if (IS_ERR_OR_NULL(vf->vnic)) {
+ err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM;
+ usnic_err("Failed to alloc vnic for %s with err %d\n",
+ pci_name(pdev), err);
+ goto out_release_regions;
+ }
+
+ pf = usnic_ib_discover_pf(vf->vnic);
+ if (IS_ERR_OR_NULL(pf)) {
+ usnic_err("Failed to discover pf of vnic %s with err%ld\n",
+ pci_name(pdev), PTR_ERR(pf));
+ err = pf ? PTR_ERR(pf) : -EFAULT;
+ goto out_clean_vnic;
+ }
+
+ vf->pf = pf;
+ spin_lock_init(&vf->lock);
+ mutex_lock(&pf->usdev_lock);
+ list_add_tail(&vf->link, &pf->vf_dev_list);
+ /*
+ * Save max settings (will be same for each VF, easier to re-write than
+ * to say "if (!set) { set_values(); set=1; }
+ */
+ for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
+ res_type < USNIC_VNIC_RES_TYPE_MAX;
+ res_type++) {
+ pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
+ res_type);
+ }
+
+ mutex_unlock(&pf->usdev_lock);
+
+ usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
+ pf->ib_dev.name);
+ usnic_ib_log_vf(vf);
+ return 0;
+
+out_clean_vnic:
+ usnic_vnic_free(vf->vnic);
+out_release_regions:
+ pci_set_drvdata(pdev, NULL);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+out_disable_device:
+ pci_disable_device(pdev);
+out_clean_vf:
+ kfree(vf);
+ return err;
+}
+
+static void usnic_ib_pci_remove(struct pci_dev *pdev)
+{
+ struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
+ struct usnic_ib_dev *pf = vf->pf;
+
+ mutex_lock(&pf->usdev_lock);
+ list_del(&vf->link);
+ mutex_unlock(&pf->usdev_lock);
+
+ kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
+ usnic_vnic_free(vf->vnic);
+ pci_set_drvdata(pdev, NULL);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(vf);
+
+ usnic_info("Removed VF %s\n", pci_name(pdev));
+}
+
+/* PCI driver entry points */
+static struct pci_driver usnic_ib_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = usnic_ib_pci_ids,
+ .probe = usnic_ib_pci_probe,
+ .remove = usnic_ib_pci_remove,
+};
+/* End of PCI section */
+
+/* Start of module section */
+static int __init usnic_ib_init(void)
+{
+ int err;
+
+ printk_once(KERN_INFO "%s", usnic_version);
+
+ err = usnic_uiom_init(DRV_NAME);
+ if (err) {
+ usnic_err("Unable to initalize umem with err %d\n", err);
+ return err;
+ }
+
+ if (pci_register_driver(&usnic_ib_pci_driver)) {
+ usnic_err("Unable to register with PCI\n");
+ goto out_umem_fini;
+ }
+
+ err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
+ if (err) {
+ usnic_err("Failed to register netdev notifier\n");
+ goto out_pci_unreg;
+ }
+
+ err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
+ if (err) {
+ usnic_err("Failed to register inet addr notifier\n");
+ goto out_unreg_netdev_notifier;
+ }
+
+ err = usnic_transport_init();
+ if (err) {
+ usnic_err("Failed to initialize transport\n");
+ goto out_unreg_inetaddr_notifier;
+ }
+
+ usnic_debugfs_init();
+
+ return 0;
+
+out_unreg_inetaddr_notifier:
+ unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
+out_unreg_netdev_notifier:
+ unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
+out_pci_unreg:
+ pci_unregister_driver(&usnic_ib_pci_driver);
+out_umem_fini:
+ usnic_uiom_fini();
+
+ return err;
+}
+
+static void __exit usnic_ib_destroy(void)
+{
+ usnic_dbg("\n");
+ usnic_debugfs_exit();
+ usnic_transport_fini();
+ unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
+ unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
+ pci_unregister_driver(&usnic_ib_pci_driver);
+ usnic_uiom_fini();
+}
+
+MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
+MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
+module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
+MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
+MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
+
+module_init(usnic_ib_init);
+module_exit(usnic_ib_destroy);
+/* End of module section */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
new file mode 100644
index 000000000000..f8dfd76be89f
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -0,0 +1,761 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include "usnic_log.h"
+#include "usnic_vnic.h"
+#include "usnic_fwd.h"
+#include "usnic_uiom.h"
+#include "usnic_debugfs.h"
+#include "usnic_ib_qp_grp.h"
+#include "usnic_ib_sysfs.h"
+#include "usnic_transport.h"
+
+#define DFLT_RQ_IDX 0
+
+const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET:
+ return "Rst";
+ case IB_QPS_INIT:
+ return "Init";
+ case IB_QPS_RTR:
+ return "RTR";
+ case IB_QPS_RTS:
+ return "RTS";
+ case IB_QPS_SQD:
+ return "SQD";
+ case IB_QPS_SQE:
+ return "SQE";
+ case IB_QPS_ERR:
+ return "ERR";
+ default:
+ return "UNKOWN STATE";
+
+ }
+}
+
+int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
+{
+ return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
+}
+
+int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
+{
+ struct usnic_ib_qp_grp *qp_grp = obj;
+ struct usnic_ib_qp_grp_flow *default_flow;
+ if (obj) {
+ default_flow = list_first_entry(&qp_grp->flows_lst,
+ struct usnic_ib_qp_grp_flow, link);
+ return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
+ qp_grp->ibqp.qp_num,
+ usnic_ib_qp_grp_state_to_string(
+ qp_grp->state),
+ qp_grp->owner_pid,
+ usnic_vnic_get_index(qp_grp->vf->vnic),
+ default_flow->flow->flow_id);
+ } else {
+ return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
+ }
+}
+
+static struct usnic_vnic_res_chunk *
+get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
+{
+ lockdep_assert_held(&qp_grp->lock);
+ /*
+ * The QP res chunk, used to derive qp indices,
+ * are just indices of the RQs
+ */
+ return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
+}
+
+static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
+{
+
+ int status;
+ int i, vnic_idx;
+ struct usnic_vnic_res_chunk *res_chunk;
+ struct usnic_vnic_res *res;
+
+ lockdep_assert_held(&qp_grp->lock);
+
+ vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
+
+ res_chunk = get_qp_res_chunk(qp_grp);
+ if (IS_ERR_OR_NULL(res_chunk)) {
+ usnic_err("Unable to get qp res with err %ld\n",
+ PTR_ERR(res_chunk));
+ return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ }
+
+ for (i = 0; i < res_chunk->cnt; i++) {
+ res = res_chunk->res[i];
+ status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
+ res->vnic_idx);
+ if (status) {
+ usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
+ res->vnic_idx, qp_grp->ufdev->name,
+ vnic_idx, status);
+ goto out_err;
+ }
+ }
+
+ return 0;
+
+out_err:
+ for (i--; i >= 0; i--) {
+ res = res_chunk->res[i];
+ usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
+ res->vnic_idx);
+ }
+
+ return status;
+}
+
+static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
+{
+ int i, vnic_idx;
+ struct usnic_vnic_res_chunk *res_chunk;
+ struct usnic_vnic_res *res;
+ int status = 0;
+
+ lockdep_assert_held(&qp_grp->lock);
+ vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
+
+ res_chunk = get_qp_res_chunk(qp_grp);
+ if (IS_ERR_OR_NULL(res_chunk)) {
+ usnic_err("Unable to get qp res with err %ld\n",
+ PTR_ERR(res_chunk));
+ return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ }
+
+ for (i = 0; i < res_chunk->cnt; i++) {
+ res = res_chunk->res[i];
+ status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
+ res->vnic_idx);
+ if (status) {
+ usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
+ res->vnic_idx,
+ qp_grp->ufdev->name,
+ vnic_idx, status);
+ }
+ }
+
+ return status;
+
+}
+
+static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
+ struct usnic_filter_action *uaction)
+{
+ struct usnic_vnic_res_chunk *res_chunk;
+
+ res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
+ if (IS_ERR_OR_NULL(res_chunk)) {
+ usnic_err("Unable to get %s with err %ld\n",
+ usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
+ PTR_ERR(res_chunk));
+ return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ }
+
+ uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
+ uaction->action.type = FILTER_ACTION_RQ_STEERING;
+ uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
+
+ return 0;
+}
+
+static struct usnic_ib_qp_grp_flow*
+create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
+ struct usnic_transport_spec *trans_spec)
+{
+ uint16_t port_num;
+ int err;
+ struct filter filter;
+ struct usnic_filter_action uaction;
+ struct usnic_ib_qp_grp_flow *qp_flow;
+ struct usnic_fwd_flow *flow;
+ enum usnic_transport_type trans_type;
+
+ trans_type = trans_spec->trans_type;
+ port_num = trans_spec->usnic_roce.port_num;
+
+ /* Reserve Port */
+ port_num = usnic_transport_rsrv_port(trans_type, port_num);
+ if (port_num == 0)
+ return ERR_PTR(-EINVAL);
+
+ /* Create Flow */
+ usnic_fwd_init_usnic_filter(&filter, port_num);
+ err = init_filter_action(qp_grp, &uaction);
+ if (err)
+ goto out_unreserve_port;
+
+ flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
+ if (IS_ERR_OR_NULL(flow)) {
+ usnic_err("Unable to alloc flow failed with err %ld\n",
+ PTR_ERR(flow));
+ err = flow ? PTR_ERR(flow) : -EFAULT;
+ goto out_unreserve_port;
+ }
+
+ /* Create Flow Handle */
+ qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
+ if (IS_ERR_OR_NULL(qp_flow)) {
+ err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
+ goto out_dealloc_flow;
+ }
+ qp_flow->flow = flow;
+ qp_flow->trans_type = trans_type;
+ qp_flow->usnic_roce.port_num = port_num;
+ qp_flow->qp_grp = qp_grp;
+ return qp_flow;
+
+out_dealloc_flow:
+ usnic_fwd_dealloc_flow(flow);
+out_unreserve_port:
+ usnic_transport_unrsrv_port(trans_type, port_num);
+ return ERR_PTR(err);
+}
+
+static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
+{
+ usnic_fwd_dealloc_flow(qp_flow->flow);
+ usnic_transport_unrsrv_port(qp_flow->trans_type,
+ qp_flow->usnic_roce.port_num);
+ kfree(qp_flow);
+}
+
+static struct usnic_ib_qp_grp_flow*
+create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
+ struct usnic_transport_spec *trans_spec)
+{
+ struct socket *sock;
+ int sock_fd;
+ int err;
+ struct filter filter;
+ struct usnic_filter_action uaction;
+ struct usnic_ib_qp_grp_flow *qp_flow;
+ struct usnic_fwd_flow *flow;
+ enum usnic_transport_type trans_type;
+ uint32_t addr;
+ uint16_t port_num;
+ int proto;
+
+ trans_type = trans_spec->trans_type;
+ sock_fd = trans_spec->udp.sock_fd;
+
+ /* Get and check socket */
+ sock = usnic_transport_get_socket(sock_fd);
+ if (IS_ERR_OR_NULL(sock))
+ return ERR_CAST(sock);
+
+ err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
+ if (err)
+ goto out_put_sock;
+
+ if (proto != IPPROTO_UDP) {
+ usnic_err("Protocol for fd %d is not UDP", sock_fd);
+ err = -EPERM;
+ goto out_put_sock;
+ }
+
+ /* Create flow */
+ usnic_fwd_init_udp_filter(&filter, addr, port_num);
+ err = init_filter_action(qp_grp, &uaction);
+ if (err)
+ goto out_put_sock;
+
+ flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
+ if (IS_ERR_OR_NULL(flow)) {
+ usnic_err("Unable to alloc flow failed with err %ld\n",
+ PTR_ERR(flow));
+ err = flow ? PTR_ERR(flow) : -EFAULT;
+ goto out_put_sock;
+ }
+
+ /* Create qp_flow */
+ qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
+ if (IS_ERR_OR_NULL(qp_flow)) {
+ err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM;
+ goto out_dealloc_flow;
+ }
+ qp_flow->flow = flow;
+ qp_flow->trans_type = trans_type;
+ qp_flow->udp.sock = sock;
+ qp_flow->qp_grp = qp_grp;
+ return qp_flow;
+
+out_dealloc_flow:
+ usnic_fwd_dealloc_flow(flow);
+out_put_sock:
+ usnic_transport_put_socket(sock);
+ return ERR_PTR(err);
+}
+
+static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
+{
+ usnic_fwd_dealloc_flow(qp_flow->flow);
+ usnic_transport_put_socket(qp_flow->udp.sock);
+ kfree(qp_flow);
+}
+
+static struct usnic_ib_qp_grp_flow*
+create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
+ struct usnic_transport_spec *trans_spec)
+{
+ struct usnic_ib_qp_grp_flow *qp_flow;
+ enum usnic_transport_type trans_type;
+
+ trans_type = trans_spec->trans_type;
+ switch (trans_type) {
+ case USNIC_TRANSPORT_ROCE_CUSTOM:
+ qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
+ break;
+ case USNIC_TRANSPORT_IPV4_UDP:
+ qp_flow = create_udp_flow(qp_grp, trans_spec);
+ break;
+ default:
+ usnic_err("Unsupported transport %u\n",
+ trans_spec->trans_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!IS_ERR_OR_NULL(qp_flow)) {
+ list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
+ usnic_debugfs_flow_add(qp_flow);
+ }
+
+
+ return qp_flow;
+}
+
+static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
+{
+ usnic_debugfs_flow_remove(qp_flow);
+ list_del(&qp_flow->link);
+
+ switch (qp_flow->trans_type) {
+ case USNIC_TRANSPORT_ROCE_CUSTOM:
+ release_roce_custom_flow(qp_flow);
+ break;
+ case USNIC_TRANSPORT_IPV4_UDP:
+ release_udp_flow(qp_flow);
+ break;
+ default:
+ WARN(1, "Unsupported transport %u\n",
+ qp_flow->trans_type);
+ break;
+ }
+}
+
+static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
+{
+ struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
+ list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
+ release_and_remove_flow(qp_flow);
+}
+
+int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
+ enum ib_qp_state new_state,
+ void *data)
+{
+ int status = 0;
+ int vnic_idx;
+ struct ib_event ib_event;
+ enum ib_qp_state old_state;
+ struct usnic_transport_spec *trans_spec;
+ struct usnic_ib_qp_grp_flow *qp_flow;
+
+ old_state = qp_grp->state;
+ vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
+ trans_spec = (struct usnic_transport_spec *) data;
+
+ spin_lock(&qp_grp->lock);
+ switch (new_state) {
+ case IB_QPS_RESET:
+ switch (old_state) {
+ case IB_QPS_RESET:
+ /* NO-OP */
+ break;
+ case IB_QPS_INIT:
+ release_and_remove_all_flows(qp_grp);
+ status = 0;
+ break;
+ case IB_QPS_RTR:
+ case IB_QPS_RTS:
+ case IB_QPS_ERR:
+ status = disable_qp_grp(qp_grp);
+ release_and_remove_all_flows(qp_grp);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ break;
+ case IB_QPS_INIT:
+ switch (old_state) {
+ case IB_QPS_RESET:
+ if (trans_spec) {
+ qp_flow = create_and_add_flow(qp_grp,
+ trans_spec);
+ if (IS_ERR_OR_NULL(qp_flow)) {
+ status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
+ break;
+ }
+ } else {
+ /*
+ * Optional to specify filters.
+ */
+ status = 0;
+ }
+ break;
+ case IB_QPS_INIT:
+ if (trans_spec) {
+ qp_flow = create_and_add_flow(qp_grp,
+ trans_spec);
+ if (IS_ERR_OR_NULL(qp_flow)) {
+ status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
+ break;
+ }
+ } else {
+ /*
+ * Doesn't make sense to go into INIT state
+ * from INIT state w/o adding filters.
+ */
+ status = -EINVAL;
+ }
+ break;
+ case IB_QPS_RTR:
+ status = disable_qp_grp(qp_grp);
+ break;
+ case IB_QPS_RTS:
+ status = disable_qp_grp(qp_grp);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ break;
+ case IB_QPS_RTR:
+ switch (old_state) {
+ case IB_QPS_INIT:
+ status = enable_qp_grp(qp_grp);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ break;
+ case IB_QPS_RTS:
+ switch (old_state) {
+ case IB_QPS_RTR:
+ /* NO-OP FOR NOW */
+ break;
+ default:
+ status = -EINVAL;
+ }
+ break;
+ case IB_QPS_ERR:
+ ib_event.device = &qp_grp->vf->pf->ib_dev;
+ ib_event.element.qp = &qp_grp->ibqp;
+ ib_event.event = IB_EVENT_QP_FATAL;
+
+ switch (old_state) {
+ case IB_QPS_RESET:
+ qp_grp->ibqp.event_handler(&ib_event,
+ qp_grp->ibqp.qp_context);
+ break;
+ case IB_QPS_INIT:
+ release_and_remove_all_flows(qp_grp);
+ qp_grp->ibqp.event_handler(&ib_event,
+ qp_grp->ibqp.qp_context);
+ break;
+ case IB_QPS_RTR:
+ case IB_QPS_RTS:
+ status = disable_qp_grp(qp_grp);
+ release_and_remove_all_flows(qp_grp);
+ qp_grp->ibqp.event_handler(&ib_event,
+ qp_grp->ibqp.qp_context);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ break;
+ default:
+ status = -EINVAL;
+ }
+ spin_unlock(&qp_grp->lock);
+
+ if (!status) {
+ qp_grp->state = new_state;
+ usnic_info("Transistioned %u from %s to %s",
+ qp_grp->grp_id,
+ usnic_ib_qp_grp_state_to_string(old_state),
+ usnic_ib_qp_grp_state_to_string(new_state));
+ } else {
+ usnic_err("Failed to transistion %u from %s to %s",
+ qp_grp->grp_id,
+ usnic_ib_qp_grp_state_to_string(old_state),
+ usnic_ib_qp_grp_state_to_string(new_state));
+ }
+
+ return status;
+}
+
+static struct usnic_vnic_res_chunk**
+alloc_res_chunk_list(struct usnic_vnic *vnic,
+ struct usnic_vnic_res_spec *res_spec, void *owner_obj)
+{
+ enum usnic_vnic_res_type res_type;
+ struct usnic_vnic_res_chunk **res_chunk_list;
+ int err, i, res_cnt, res_lst_sz;
+
+ for (res_lst_sz = 0;
+ res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
+ res_lst_sz++) {
+ /* Do Nothing */
+ }
+
+ res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
+ GFP_ATOMIC);
+ if (!res_chunk_list)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
+ i++) {
+ res_type = res_spec->resources[i].type;
+ res_cnt = res_spec->resources[i].cnt;
+
+ res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
+ res_cnt, owner_obj);
+ if (IS_ERR_OR_NULL(res_chunk_list[i])) {
+ err = res_chunk_list[i] ?
+ PTR_ERR(res_chunk_list[i]) : -ENOMEM;
+ usnic_err("Failed to get %s from %s with err %d\n",
+ usnic_vnic_res_type_to_str(res_type),
+ usnic_vnic_pci_name(vnic),
+ err);
+ goto out_free_res;
+ }
+ }
+
+ return res_chunk_list;
+
+out_free_res:
+ for (i--; i > 0; i--)
+ usnic_vnic_put_resources(res_chunk_list[i]);
+ kfree(res_chunk_list);
+ return ERR_PTR(err);
+}
+
+static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
+{
+ int i;
+ for (i = 0; res_chunk_list[i]; i++)
+ usnic_vnic_put_resources(res_chunk_list[i]);
+ kfree(res_chunk_list);
+}
+
+static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
+ struct usnic_ib_pd *pd,
+ struct usnic_ib_qp_grp *qp_grp)
+{
+ int err;
+ struct pci_dev *pdev;
+
+ lockdep_assert_held(&vf->lock);
+
+ pdev = usnic_vnic_get_pdev(vf->vnic);
+ if (vf->qp_grp_ref_cnt == 0) {
+ err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
+ if (err) {
+ usnic_err("Failed to attach %s to domain\n",
+ pci_name(pdev));
+ return err;
+ }
+ vf->pd = pd;
+ }
+ vf->qp_grp_ref_cnt++;
+
+ WARN_ON(vf->pd != pd);
+ qp_grp->vf = vf;
+
+ return 0;
+}
+
+static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
+{
+ struct pci_dev *pdev;
+ struct usnic_ib_pd *pd;
+
+ lockdep_assert_held(&qp_grp->vf->lock);
+
+ pd = qp_grp->vf->pd;
+ pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
+ if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
+ qp_grp->vf->pd = NULL;
+ usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
+ }
+ qp_grp->vf = NULL;
+}
+
+static void log_spec(struct usnic_vnic_res_spec *res_spec)
+{
+ char buf[512];
+ usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
+ usnic_dbg("%s\n", buf);
+}
+
+static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
+ uint32_t *id)
+{
+ enum usnic_transport_type trans_type = qp_flow->trans_type;
+ int err;
+ uint16_t port_num = 0;
+
+ switch (trans_type) {
+ case USNIC_TRANSPORT_ROCE_CUSTOM:
+ *id = qp_flow->usnic_roce.port_num;
+ break;
+ case USNIC_TRANSPORT_IPV4_UDP:
+ err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
+ NULL, NULL,
+ &port_num);
+ if (err)
+ return err;
+ /*
+ * Copy port_num to stack first and then to *id,
+ * so that the short to int cast works for little
+ * and big endian systems.
+ */
+ *id = port_num;
+ break;
+ default:
+ usnic_err("Unsupported transport %u\n", trans_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct usnic_ib_qp_grp *
+usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
+ struct usnic_ib_pd *pd,
+ struct usnic_vnic_res_spec *res_spec,
+ struct usnic_transport_spec *transport_spec)
+{
+ struct usnic_ib_qp_grp *qp_grp;
+ int err;
+ enum usnic_transport_type transport = transport_spec->trans_type;
+ struct usnic_ib_qp_grp_flow *qp_flow;
+
+ lockdep_assert_held(&vf->lock);
+
+ err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
+ res_spec);
+ if (err) {
+ usnic_err("Spec does not meet miniumum req for transport %d\n",
+ transport);
+ log_spec(res_spec);
+ return ERR_PTR(err);
+ }
+
+ qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
+ if (!qp_grp) {
+ usnic_err("Unable to alloc qp_grp - Out of memory\n");
+ return NULL;
+ }
+
+ qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
+ qp_grp);
+ if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
+ err = qp_grp->res_chunk_list ?
+ PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
+ usnic_err("Unable to alloc res for %d with err %d\n",
+ qp_grp->grp_id, err);
+ goto out_free_qp_grp;
+ }
+
+ err = qp_grp_and_vf_bind(vf, pd, qp_grp);
+ if (err)
+ goto out_free_res;
+
+ INIT_LIST_HEAD(&qp_grp->flows_lst);
+ spin_lock_init(&qp_grp->lock);
+ qp_grp->ufdev = ufdev;
+ qp_grp->state = IB_QPS_RESET;
+ qp_grp->owner_pid = current->pid;
+
+ qp_flow = create_and_add_flow(qp_grp, transport_spec);
+ if (IS_ERR_OR_NULL(qp_flow)) {
+ usnic_err("Unable to create and add flow with err %ld\n",
+ PTR_ERR(qp_flow));
+ err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
+ goto out_qp_grp_vf_unbind;
+ }
+
+ err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
+ if (err)
+ goto out_release_flow;
+ qp_grp->ibqp.qp_num = qp_grp->grp_id;
+
+ usnic_ib_sysfs_qpn_add(qp_grp);
+
+ return qp_grp;
+
+out_release_flow:
+ release_and_remove_flow(qp_flow);
+out_qp_grp_vf_unbind:
+ qp_grp_and_vf_unbind(qp_grp);
+out_free_res:
+ free_qp_grp_res(qp_grp->res_chunk_list);
+out_free_qp_grp:
+ kfree(qp_grp);
+
+ return ERR_PTR(err);
+}
+
+void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
+{
+
+ WARN_ON(qp_grp->state != IB_QPS_RESET);
+ lockdep_assert_held(&qp_grp->vf->lock);
+
+ release_and_remove_all_flows(qp_grp);
+ usnic_ib_sysfs_qpn_remove(qp_grp);
+ qp_grp_and_vf_unbind(qp_grp);
+ free_qp_grp_res(qp_grp->res_chunk_list);
+ kfree(qp_grp);
+}
+
+struct usnic_vnic_res_chunk*
+usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
+ enum usnic_vnic_res_type res_type)
+{
+ int i;
+
+ for (i = 0; qp_grp->res_chunk_list[i]; i++) {
+ if (qp_grp->res_chunk_list[i]->type == res_type)
+ return qp_grp->res_chunk_list[i];
+ }
+
+ return ERR_PTR(-EINVAL);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
new file mode 100644
index 000000000000..b0aafe8db0c3
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_IB_QP_GRP_H_
+#define USNIC_IB_QP_GRP_H_
+
+#include <linux/debugfs.h>
+#include <rdma/ib_verbs.h>
+
+#include "usnic_ib.h"
+#include "usnic_abi.h"
+#include "usnic_fwd.h"
+#include "usnic_vnic.h"
+
+/*
+ * The qp group struct represents all the hw resources needed to present a ib_qp
+ */
+struct usnic_ib_qp_grp {
+ struct ib_qp ibqp;
+ enum ib_qp_state state;
+ int grp_id;
+
+ struct usnic_fwd_dev *ufdev;
+ struct usnic_ib_ucontext *ctx;
+ struct list_head flows_lst;
+
+ struct usnic_vnic_res_chunk **res_chunk_list;
+
+ pid_t owner_pid;
+ struct usnic_ib_vf *vf;
+ struct list_head link;
+
+ spinlock_t lock;
+
+ struct kobject kobj;
+};
+
+struct usnic_ib_qp_grp_flow {
+ struct usnic_fwd_flow *flow;
+ enum usnic_transport_type trans_type;
+ union {
+ struct {
+ uint16_t port_num;
+ } usnic_roce;
+ struct {
+ struct socket *sock;
+ } udp;
+ };
+ struct usnic_ib_qp_grp *qp_grp;
+ struct list_head link;
+
+ /* Debug FS */
+ struct dentry *dbgfs_dentry;
+ char dentry_name[32];
+};
+
+static const struct
+usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
+ { /*USNIC_TRANSPORT_UNKNOWN*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+ { /*USNIC_TRANSPORT_ROCE_CUSTOM*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+ { /*USNIC_TRANSPORT_IPV4_UDP*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+};
+
+const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state);
+int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz);
+int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz);
+struct usnic_ib_qp_grp *
+usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
+ struct usnic_ib_pd *pd,
+ struct usnic_vnic_res_spec *res_spec,
+ struct usnic_transport_spec *trans_spec);
+void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp);
+int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
+ enum ib_qp_state new_state,
+ void *data);
+struct usnic_vnic_res_chunk
+*usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
+ enum usnic_vnic_res_type type);
+static inline
+struct usnic_ib_qp_grp *to_uqp_grp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct usnic_ib_qp_grp, ibqp);
+}
+#endif /* USNIC_IB_QP_GRP_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
new file mode 100644
index 000000000000..27dc67c1689f
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+
+#include "usnic_common_util.h"
+#include "usnic_ib.h"
+#include "usnic_ib_qp_grp.h"
+#include "usnic_vnic.h"
+#include "usnic_ib_verbs.h"
+#include "usnic_log.h"
+
+static ssize_t usnic_ib_show_fw_ver(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev =
+ container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ struct ethtool_drvinfo info;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", info.fw_version);
+}
+
+static ssize_t usnic_ib_show_board(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev =
+ container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ unsigned short subsystem_device_id;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ subsystem_device_id = us_ibdev->pdev->subsystem_device;
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%hu\n", subsystem_device_id);
+}
+
+/*
+ * Report the configuration for this PF
+ */
+static ssize_t
+usnic_ib_show_config(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev;
+ char *ptr;
+ unsigned left;
+ unsigned n;
+ enum usnic_vnic_res_type res_type;
+
+ us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+
+ /* Buffer space limit is 1 page */
+ ptr = buf;
+ left = PAGE_SIZE;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ if (atomic_read(&us_ibdev->vf_cnt.refcount) > 0) {
+ char *busname;
+
+ /*
+ * bus name seems to come with annoying prefix.
+ * Remove it if it is predictable
+ */
+ busname = us_ibdev->pdev->bus->name;
+ if (strncmp(busname, "PCI Bus ", 8) == 0)
+ busname += 8;
+
+ n = scnprintf(ptr, left,
+ "%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:",
+ us_ibdev->ib_dev.name,
+ busname,
+ PCI_SLOT(us_ibdev->pdev->devfn),
+ PCI_FUNC(us_ibdev->pdev->devfn),
+ netdev_name(us_ibdev->netdev),
+ us_ibdev->ufdev->mac,
+ atomic_read(&us_ibdev->vf_cnt.refcount));
+ UPDATE_PTR_LEFT(n, ptr, left);
+
+ for (res_type = USNIC_VNIC_RES_TYPE_EOL;
+ res_type < USNIC_VNIC_RES_TYPE_MAX;
+ res_type++) {
+ if (us_ibdev->vf_res_cnt[res_type] == 0)
+ continue;
+ n = scnprintf(ptr, left, " %d %s%s",
+ us_ibdev->vf_res_cnt[res_type],
+ usnic_vnic_res_type_to_str(res_type),
+ (res_type < (USNIC_VNIC_RES_TYPE_MAX - 1)) ?
+ "," : "");
+ UPDATE_PTR_LEFT(n, ptr, left);
+ }
+ n = scnprintf(ptr, left, "\n");
+ UPDATE_PTR_LEFT(n, ptr, left);
+ } else {
+ n = scnprintf(ptr, left, "%s: no VFs\n",
+ us_ibdev->ib_dev.name);
+ UPDATE_PTR_LEFT(n, ptr, left);
+ }
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return ptr - buf;
+}
+
+static ssize_t
+usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev;
+
+ us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ netdev_name(us_ibdev->netdev));
+}
+
+static ssize_t
+usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev;
+
+ us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ atomic_read(&us_ibdev->vf_cnt.refcount));
+}
+
+static ssize_t
+usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev;
+ int qp_per_vf;
+
+ us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+ qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
+ us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
+
+ return scnprintf(buf, PAGE_SIZE,
+ "%d\n", qp_per_vf);
+}
+
+static ssize_t
+usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct usnic_ib_dev *us_ibdev;
+
+ us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
+}
+
+static DEVICE_ATTR(fw_ver, S_IRUGO, usnic_ib_show_fw_ver, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL);
+static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL);
+static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL);
+static DEVICE_ATTR(max_vf, S_IRUGO, usnic_ib_show_max_vf, NULL);
+static DEVICE_ATTR(qp_per_vf, S_IRUGO, usnic_ib_show_qp_per_vf, NULL);
+static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL);
+
+static struct device_attribute *usnic_class_attributes[] = {
+ &dev_attr_fw_ver,
+ &dev_attr_board_id,
+ &dev_attr_config,
+ &dev_attr_iface,
+ &dev_attr_max_vf,
+ &dev_attr_qp_per_vf,
+ &dev_attr_cq_per_vf,
+};
+
+struct qpn_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct usnic_ib_qp_grp *, char *buf);
+};
+
+/*
+ * Definitions for supporting QPN entries in sysfs
+ */
+static ssize_t
+usnic_ib_qpn_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct usnic_ib_qp_grp *qp_grp;
+ struct qpn_attribute *qpn_attr;
+
+ qp_grp = container_of(kobj, struct usnic_ib_qp_grp, kobj);
+ qpn_attr = container_of(attr, struct qpn_attribute, attr);
+
+ return qpn_attr->show(qp_grp, buf);
+}
+
+static const struct sysfs_ops usnic_ib_qpn_sysfs_ops = {
+ .show = usnic_ib_qpn_attr_show
+};
+
+#define QPN_ATTR_RO(NAME) \
+struct qpn_attribute qpn_attr_##NAME = __ATTR_RO(NAME)
+
+static ssize_t context_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "0x%p\n", qp_grp->ctx);
+}
+
+static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
+{
+ int i, j, n;
+ int left;
+ char *ptr;
+ struct usnic_vnic_res_chunk *res_chunk;
+ struct usnic_vnic_res *vnic_res;
+
+ left = PAGE_SIZE;
+ ptr = buf;
+
+ n = scnprintf(ptr, left,
+ "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
+ qp_grp->ibqp.qp_num,
+ usnic_ib_qp_grp_state_to_string(qp_grp->state),
+ qp_grp->owner_pid,
+ usnic_vnic_get_index(qp_grp->vf->vnic));
+ UPDATE_PTR_LEFT(n, ptr, left);
+
+ for (i = 0; qp_grp->res_chunk_list[i]; i++) {
+ res_chunk = qp_grp->res_chunk_list[i];
+ for (j = 0; j < res_chunk->cnt; j++) {
+ vnic_res = res_chunk->res[j];
+ n = scnprintf(ptr, left, "%s[%d] ",
+ usnic_vnic_res_type_to_str(vnic_res->type),
+ vnic_res->vnic_idx);
+ UPDATE_PTR_LEFT(n, ptr, left);
+ }
+ }
+
+ n = scnprintf(ptr, left, "\n");
+ UPDATE_PTR_LEFT(n, ptr, left);
+
+ return ptr - buf;
+}
+
+static QPN_ATTR_RO(context);
+static QPN_ATTR_RO(summary);
+
+static struct attribute *usnic_ib_qpn_default_attrs[] = {
+ &qpn_attr_context.attr,
+ &qpn_attr_summary.attr,
+ NULL
+};
+
+static struct kobj_type usnic_ib_qpn_type = {
+ .sysfs_ops = &usnic_ib_qpn_sysfs_ops,
+ .default_attrs = usnic_ib_qpn_default_attrs
+};
+
+int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
+{
+ int i;
+ int err;
+ for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
+ err = device_create_file(&us_ibdev->ib_dev.dev,
+ usnic_class_attributes[i]);
+ if (err) {
+ usnic_err("Failed to create device file %d for %s eith err %d",
+ i, us_ibdev->ib_dev.name, err);
+ return -EINVAL;
+ }
+ }
+
+ /* create kernel object for looking at individual QPs */
+ kobject_get(&us_ibdev->ib_dev.dev.kobj);
+ us_ibdev->qpn_kobj = kobject_create_and_add("qpn",
+ &us_ibdev->ib_dev.dev.kobj);
+ if (us_ibdev->qpn_kobj == NULL) {
+ kobject_put(&us_ibdev->ib_dev.dev.kobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
+ device_remove_file(&us_ibdev->ib_dev.dev,
+ usnic_class_attributes[i]);
+ }
+
+ kobject_put(us_ibdev->qpn_kobj);
+}
+
+void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp)
+{
+ struct usnic_ib_dev *us_ibdev;
+ int err;
+
+ us_ibdev = qp_grp->vf->pf;
+
+ err = kobject_init_and_add(&qp_grp->kobj, &usnic_ib_qpn_type,
+ kobject_get(us_ibdev->qpn_kobj),
+ "%d", qp_grp->grp_id);
+ if (err) {
+ kobject_put(us_ibdev->qpn_kobj);
+ return;
+ }
+}
+
+void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp)
+{
+ struct usnic_ib_dev *us_ibdev;
+
+ us_ibdev = qp_grp->vf->pf;
+
+ kobject_put(&qp_grp->kobj);
+ kobject_put(us_ibdev->qpn_kobj);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
new file mode 100644
index 000000000000..0d09b493cd02
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_IB_SYSFS_H_
+#define USNIC_IB_SYSFS_H_
+
+#include "usnic_ib.h"
+
+int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev);
+void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev);
+void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp);
+void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp);
+
+#endif /* !USNIC_IB_SYSFS_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
new file mode 100644
index 000000000000..d48d2c0a2e3c
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -0,0 +1,765 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+
+#include "usnic_abi.h"
+#include "usnic_ib.h"
+#include "usnic_common_util.h"
+#include "usnic_ib_qp_grp.h"
+#include "usnic_fwd.h"
+#include "usnic_log.h"
+#include "usnic_uiom.h"
+#include "usnic_transport.h"
+
+#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
+
+static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
+{
+ *fw_ver = (u64) *fw_ver_str;
+}
+
+static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
+ struct ib_udata *udata)
+{
+ struct usnic_ib_dev *us_ibdev;
+ struct usnic_ib_create_qp_resp resp;
+ struct pci_dev *pdev;
+ struct vnic_dev_bar *bar;
+ struct usnic_vnic_res_chunk *chunk;
+ struct usnic_ib_qp_grp_flow *default_flow;
+ int i, err;
+
+ memset(&resp, 0, sizeof(resp));
+
+ us_ibdev = qp_grp->vf->pf;
+ pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
+ if (!pdev) {
+ usnic_err("Failed to get pdev of qp_grp %d\n",
+ qp_grp->grp_id);
+ return -EFAULT;
+ }
+
+ bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
+ if (!bar) {
+ usnic_err("Failed to get bar0 of qp_grp %d vf %s",
+ qp_grp->grp_id, pci_name(pdev));
+ return -EFAULT;
+ }
+
+ resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
+ resp.bar_bus_addr = bar->bus_addr;
+ resp.bar_len = bar->len;
+
+ chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
+ if (IS_ERR_OR_NULL(chunk)) {
+ usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
+ usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
+ qp_grp->grp_id,
+ PTR_ERR(chunk));
+ return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ }
+
+ WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
+ resp.rq_cnt = chunk->cnt;
+ for (i = 0; i < chunk->cnt; i++)
+ resp.rq_idx[i] = chunk->res[i]->vnic_idx;
+
+ chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
+ if (IS_ERR_OR_NULL(chunk)) {
+ usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
+ usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
+ qp_grp->grp_id,
+ PTR_ERR(chunk));
+ return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ }
+
+ WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
+ resp.wq_cnt = chunk->cnt;
+ for (i = 0; i < chunk->cnt; i++)
+ resp.wq_idx[i] = chunk->res[i]->vnic_idx;
+
+ chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
+ if (IS_ERR_OR_NULL(chunk)) {
+ usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
+ usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
+ qp_grp->grp_id,
+ PTR_ERR(chunk));
+ return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ }
+
+ WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
+ resp.cq_cnt = chunk->cnt;
+ for (i = 0; i < chunk->cnt; i++)
+ resp.cq_idx[i] = chunk->res[i]->vnic_idx;
+
+ default_flow = list_first_entry(&qp_grp->flows_lst,
+ struct usnic_ib_qp_grp_flow, link);
+ resp.transport = default_flow->trans_type;
+
+ err = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (err) {
+ usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
+ return err;
+ }
+
+ return 0;
+}
+
+static struct usnic_ib_qp_grp*
+find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
+ struct usnic_ib_pd *pd,
+ struct usnic_transport_spec *trans_spec,
+ struct usnic_vnic_res_spec *res_spec)
+{
+ struct usnic_ib_vf *vf;
+ struct usnic_vnic *vnic;
+ struct usnic_ib_qp_grp *qp_grp;
+ struct device *dev, **dev_list;
+ int i, found = 0;
+
+ BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
+
+ if (list_empty(&us_ibdev->vf_dev_list)) {
+ usnic_info("No vfs to allocate\n");
+ return NULL;
+ }
+
+ if (usnic_ib_share_vf) {
+ /* Try to find resouces on a used vf which is in pd */
+ dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
+ for (i = 0; dev_list[i]; i++) {
+ dev = dev_list[i];
+ vf = pci_get_drvdata(to_pci_dev(dev));
+ spin_lock(&vf->lock);
+ vnic = vf->vnic;
+ if (!usnic_vnic_check_room(vnic, res_spec)) {
+ usnic_dbg("Found used vnic %s from %s\n",
+ us_ibdev->ib_dev.name,
+ pci_name(usnic_vnic_get_pdev(
+ vnic)));
+ found = 1;
+ break;
+ }
+ spin_unlock(&vf->lock);
+
+ }
+ usnic_uiom_free_dev_list(dev_list);
+ }
+
+ if (!found) {
+ /* Try to find resources on an unused vf */
+ list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
+ spin_lock(&vf->lock);
+ vnic = vf->vnic;
+ if (vf->qp_grp_ref_cnt == 0 &&
+ usnic_vnic_check_room(vnic, res_spec) == 0) {
+ found = 1;
+ break;
+ }
+ spin_unlock(&vf->lock);
+ }
+ }
+
+ if (!found) {
+ usnic_info("No free qp grp found on %s\n",
+ us_ibdev->ib_dev.name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
+ trans_spec);
+ spin_unlock(&vf->lock);
+ if (IS_ERR_OR_NULL(qp_grp)) {
+ usnic_err("Failed to allocate qp_grp\n");
+ return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
+ }
+
+ return qp_grp;
+}
+
+static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
+{
+ struct usnic_ib_vf *vf = qp_grp->vf;
+
+ WARN_ON(qp_grp->state != IB_QPS_RESET);
+
+ spin_lock(&vf->lock);
+ usnic_ib_qp_grp_destroy(qp_grp);
+ spin_unlock(&vf->lock);
+}
+
+static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
+ u8 *active_width)
+{
+ if (speed <= 10000) {
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_FDR10;
+ } else if (speed <= 20000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_DDR;
+ } else if (speed <= 30000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_QDR;
+ } else if (speed <= 40000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_FDR10;
+ } else {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ }
+}
+
+static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
+{
+ if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
+ cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Start of ib callback functions */
+
+enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
+ u8 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+int usnic_ib_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+ struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
+ union ib_gid gid;
+ struct ethtool_drvinfo info;
+ struct ethtool_cmd cmd;
+ int qp_per_vf;
+
+ usnic_dbg("\n");
+ mutex_lock(&us_ibdev->usdev_lock);
+ us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
+ us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
+ memset(props, 0, sizeof(*props));
+ usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
+ &gid.raw[0]);
+ memcpy(&props->sys_image_guid, &gid.global.interface_id,
+ sizeof(gid.global.interface_id));
+ usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
+ props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
+ props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
+ props->vendor_id = PCI_VENDOR_ID_CISCO;
+ props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
+ props->hw_ver = us_ibdev->pdev->subsystem_device;
+ qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
+ us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
+ props->max_qp = qp_per_vf *
+ atomic_read(&us_ibdev->vf_cnt.refcount);
+ props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
+ IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+ props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
+ atomic_read(&us_ibdev->vf_cnt.refcount);
+ props->max_pd = USNIC_UIOM_MAX_PD_CNT;
+ props->max_mr = USNIC_UIOM_MAX_MR_CNT;
+ props->local_ca_ack_delay = 0;
+ props->max_pkeys = 0;
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->masked_atomic_cap = props->atomic_cap;
+ props->max_qp_rd_atom = 0;
+ props->max_qp_init_rd_atom = 0;
+ props->max_res_rd_atom = 0;
+ props->max_srq = 0;
+ props->max_srq_wr = 0;
+ props->max_srq_sge = 0;
+ props->max_fast_reg_page_list_len = 0;
+ props->max_mcast_grp = 0;
+ props->max_mcast_qp_attach = 0;
+ props->max_total_mcast_qp_attach = 0;
+ props->max_map_per_fmr = 0;
+ /* Owned by Userspace
+ * max_qp_wr, max_sge, max_sge_rd, max_cqe */
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return 0;
+}
+
+int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
+ struct ethtool_cmd cmd;
+
+ usnic_dbg("\n");
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
+ memset(props, 0, sizeof(*props));
+
+ props->lid = 0;
+ props->lmc = 1;
+ props->sm_lid = 0;
+ props->sm_sl = 0;
+
+ if (!us_ibdev->ufdev->link_up) {
+ props->state = IB_PORT_DOWN;
+ props->phys_state = 3;
+ } else if (!us_ibdev->ufdev->inaddr) {
+ props->state = IB_PORT_INIT;
+ props->phys_state = 4;
+ } else {
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = 5;
+ }
+
+ props->port_cap_flags = 0;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ props->bad_pkey_cntr = 0;
+ props->qkey_viol_cntr = 0;
+ eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
+ &props->active_width);
+ props->max_mtu = IB_MTU_4096;
+ props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
+ /* Userspace will adjust for hdrs */
+ props->max_msg_sz = us_ibdev->ufdev->mtu;
+ props->max_vl_num = 1;
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return 0;
+}
+
+int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct usnic_ib_qp_grp *qp_grp;
+ struct usnic_ib_vf *vf;
+ int err;
+
+ usnic_dbg("\n");
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ qp_grp = to_uqp_grp(qp);
+ vf = qp_grp->vf;
+ mutex_lock(&vf->pf->usdev_lock);
+ usnic_dbg("\n");
+ qp_attr->qp_state = qp_grp->state;
+ qp_attr->cur_qp_state = qp_grp->state;
+
+ switch (qp_grp->ibqp.qp_type) {
+ case IB_QPT_UD:
+ qp_attr->qkey = 0;
+ break;
+ default:
+ usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ mutex_unlock(&vf->pf->usdev_lock);
+ return 0;
+
+err_out:
+ mutex_unlock(&vf->pf->usdev_lock);
+ return err;
+}
+
+int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+
+ struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
+ usnic_dbg("\n");
+
+ if (index > 1)
+ return -EINVAL;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ memset(&(gid->raw[0]), 0, sizeof(gid->raw));
+ usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
+ &gid->raw[0]);
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return 0;
+}
+
+int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ if (index > 1)
+ return -EINVAL;
+
+ *pkey = 0xffff;
+ return 0;
+}
+
+struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct usnic_ib_pd *pd;
+ void *umem_pd;
+
+ usnic_dbg("\n");
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
+ if (IS_ERR_OR_NULL(umem_pd)) {
+ kfree(pd);
+ return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
+ }
+
+ usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
+ pd, context, ibdev->name);
+ return &pd->ibpd;
+}
+
+int usnic_ib_dealloc_pd(struct ib_pd *pd)
+{
+ usnic_info("freeing domain 0x%p\n", pd);
+
+ usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
+ kfree(pd);
+ return 0;
+}
+
+struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ int err;
+ struct usnic_ib_dev *us_ibdev;
+ struct usnic_ib_qp_grp *qp_grp;
+ struct usnic_ib_ucontext *ucontext;
+ int cq_cnt;
+ struct usnic_vnic_res_spec res_spec;
+ struct usnic_ib_create_qp_cmd cmd;
+ struct usnic_transport_spec trans_spec;
+
+ usnic_dbg("\n");
+
+ ucontext = to_uucontext(pd->uobject->context);
+ us_ibdev = to_usdev(pd->device);
+
+ err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
+ if (err) {
+ usnic_err("%s: cannot copy udata for create_qp\n",
+ us_ibdev->ib_dev.name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ err = create_qp_validate_user_data(cmd);
+ if (err) {
+ usnic_err("%s: Failed to validate user data\n",
+ us_ibdev->ib_dev.name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (init_attr->qp_type != IB_QPT_UD) {
+ usnic_err("%s asked to make a non-UD QP: %d\n",
+ us_ibdev->ib_dev.name, init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ trans_spec = cmd.spec;
+ mutex_lock(&us_ibdev->usdev_lock);
+ cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
+ res_spec = min_transport_spec[trans_spec.trans_type];
+ usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
+ qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
+ &trans_spec,
+ &res_spec);
+ if (IS_ERR_OR_NULL(qp_grp)) {
+ err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
+ goto out_release_mutex;
+ }
+
+ err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
+ if (err) {
+ err = -EBUSY;
+ goto out_release_qp_grp;
+ }
+
+ qp_grp->ctx = ucontext;
+ list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
+ usnic_ib_log_vf(qp_grp->vf);
+ mutex_unlock(&us_ibdev->usdev_lock);
+ return &qp_grp->ibqp;
+
+out_release_qp_grp:
+ qp_grp_destroy(qp_grp);
+out_release_mutex:
+ mutex_unlock(&us_ibdev->usdev_lock);
+ return ERR_PTR(err);
+}
+
+int usnic_ib_destroy_qp(struct ib_qp *qp)
+{
+ struct usnic_ib_qp_grp *qp_grp;
+ struct usnic_ib_vf *vf;
+
+ usnic_dbg("\n");
+
+ qp_grp = to_uqp_grp(qp);
+ vf = qp_grp->vf;
+ mutex_lock(&vf->pf->usdev_lock);
+ if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
+ usnic_err("Failed to move qp grp %u to reset\n",
+ qp_grp->grp_id);
+ }
+
+ list_del(&qp_grp->link);
+ qp_grp_destroy(qp_grp);
+ mutex_unlock(&vf->pf->usdev_lock);
+
+ return 0;
+}
+
+int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct usnic_ib_qp_grp *qp_grp;
+ int status;
+ usnic_dbg("\n");
+
+ qp_grp = to_uqp_grp(ibqp);
+
+ /* TODO: Future Support All States */
+ mutex_lock(&qp_grp->vf->pf->usdev_lock);
+ if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
+ status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, NULL);
+ } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
+ status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL);
+ } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {
+ status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTS, NULL);
+ } else {
+ usnic_err("Unexpected combination mask: %u state: %u\n",
+ attr_mask & IB_QP_STATE, attr->qp_state);
+ status = -EINVAL;
+ }
+
+ mutex_unlock(&qp_grp->vf->pf->usdev_lock);
+ return status;
+}
+
+struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries,
+ int vector, struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct ib_cq *cq;
+
+ usnic_dbg("\n");
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return ERR_PTR(-EBUSY);
+
+ return cq;
+}
+
+int usnic_ib_destroy_cq(struct ib_cq *cq)
+{
+ usnic_dbg("\n");
+ kfree(cq);
+ return 0;
+}
+
+struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata)
+{
+ struct usnic_ib_mr *mr;
+ int err;
+
+ usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
+ virt_addr, length);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(mr))
+ return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM);
+
+ mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
+ access_flags, 0);
+ if (IS_ERR_OR_NULL(mr->umem)) {
+ err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
+ goto err_free;
+ }
+
+ mr->ibmr.lkey = mr->ibmr.rkey = 0;
+ return &mr->ibmr;
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+int usnic_ib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct usnic_ib_mr *mr = to_umr(ibmr);
+
+ usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
+
+ usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
+ kfree(mr);
+ return 0;
+}
+
+struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct usnic_ib_ucontext *context;
+ struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
+ usnic_dbg("\n");
+
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&context->qp_grp_list);
+ mutex_lock(&us_ibdev->usdev_lock);
+ list_add_tail(&context->link, &us_ibdev->ctx_list);
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return &context->ibucontext;
+}
+
+int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
+ struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
+ usnic_dbg("\n");
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ BUG_ON(!list_empty(&context->qp_grp_list));
+ list_del(&context->link);
+ mutex_unlock(&us_ibdev->usdev_lock);
+ kfree(context);
+ return 0;
+}
+
+int usnic_ib_mmap(struct ib_ucontext *context,
+ struct vm_area_struct *vma)
+{
+ struct usnic_ib_ucontext *uctx = to_ucontext(context);
+ struct usnic_ib_dev *us_ibdev;
+ struct usnic_ib_qp_grp *qp_grp;
+ struct usnic_ib_vf *vf;
+ struct vnic_dev_bar *bar;
+ dma_addr_t bus_addr;
+ unsigned int len;
+ unsigned int vfid;
+
+ usnic_dbg("\n");
+
+ us_ibdev = to_usdev(context->device);
+ vma->vm_flags |= VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vfid = vma->vm_pgoff;
+ usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
+ vma->vm_pgoff, PAGE_SHIFT, vfid);
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
+ vf = qp_grp->vf;
+ if (usnic_vnic_get_index(vf->vnic) == vfid) {
+ bar = usnic_vnic_get_bar(vf->vnic, 0);
+ if ((vma->vm_end - vma->vm_start) != bar->len) {
+ usnic_err("Bar0 Len %lu - Request map %lu\n",
+ bar->len,
+ vma->vm_end - vma->vm_start);
+ mutex_unlock(&us_ibdev->usdev_lock);
+ return -EINVAL;
+ }
+ bus_addr = bar->bus_addr;
+ len = bar->len;
+ usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
+ &bus_addr, bar->vaddr, bar->len);
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ bus_addr >> PAGE_SHIFT,
+ len, vma->vm_page_prot);
+ }
+ }
+
+ mutex_unlock(&us_ibdev->usdev_lock);
+ usnic_err("No VF %u found\n", vfid);
+ return -EINVAL;
+}
+
+/* In ib callbacks section - Start of stub funcs */
+struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ usnic_dbg("\n");
+ return ERR_PTR(-EPERM);
+}
+
+int usnic_ib_destroy_ah(struct ib_ah *ah)
+{
+ usnic_dbg("\n");
+ return -EINVAL;
+}
+
+int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ usnic_dbg("\n");
+ return -EINVAL;
+}
+
+int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ usnic_dbg("\n");
+ return -EINVAL;
+}
+
+int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *wc)
+{
+ usnic_dbg("\n");
+ return -EINVAL;
+}
+
+int usnic_ib_req_notify_cq(struct ib_cq *cq,
+ enum ib_cq_notify_flags flags)
+{
+ usnic_dbg("\n");
+ return -EINVAL;
+}
+
+struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ usnic_dbg("\n");
+ return ERR_PTR(-ENOMEM);
+}
+
+
+/* In ib callbacks section - End of stub funcs */
+/* End of ib callbacks section */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
new file mode 100644
index 000000000000..bb864f5aed70
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_IB_VERBS_H_
+#define USNIC_IB_VERBS_H_
+
+#include "usnic_ib.h"
+
+enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
+ u8 port_num);
+int usnic_ib_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props);
+int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
+int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid);
+int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey);
+struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int usnic_ib_dealloc_pd(struct ib_pd *pd);
+struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int usnic_ib_destroy_qp(struct ib_qp *qp);
+int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries,
+ int vector, struct ib_ucontext *context,
+ struct ib_udata *udata);
+int usnic_ib_destroy_cq(struct ib_cq *cq);
+struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata);
+int usnic_ib_dereg_mr(struct ib_mr *ibmr);
+struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata);
+int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
+int usnic_ib_mmap(struct ib_ucontext *context,
+ struct vm_area_struct *vma);
+struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr);
+int usnic_ib_destroy_ah(struct ib_ah *ah);
+int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *wc);
+int usnic_ib_req_notify_cq(struct ib_cq *cq,
+ enum ib_cq_notify_flags flags);
+struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc);
+#endif /* !USNIC_IB_VERBS_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_log.h b/drivers/infiniband/hw/usnic/usnic_log.h
new file mode 100644
index 000000000000..75777a66c684
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_log.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_LOG_H_
+#define USNIC_LOG_H_
+
+#include "usnic.h"
+
+extern unsigned int usnic_log_lvl;
+
+#define USNIC_LOG_LVL_NONE (0)
+#define USNIC_LOG_LVL_ERR (1)
+#define USNIC_LOG_LVL_INFO (2)
+#define USNIC_LOG_LVL_DBG (3)
+
+#define usnic_printk(lvl, args...) \
+ do { \
+ printk(lvl "%s:%s:%d: ", DRV_NAME, __func__, \
+ __LINE__); \
+ printk(args); \
+ } while (0)
+
+#define usnic_dbg(args...) \
+ do { \
+ if (unlikely(usnic_log_lvl >= USNIC_LOG_LVL_DBG)) { \
+ usnic_printk(KERN_INFO, args); \
+ } \
+} while (0)
+
+#define usnic_info(args...) \
+do { \
+ if (usnic_log_lvl >= USNIC_LOG_LVL_INFO) { \
+ usnic_printk(KERN_INFO, args); \
+ } \
+} while (0)
+
+#define usnic_err(args...) \
+ do { \
+ if (usnic_log_lvl >= USNIC_LOG_LVL_ERR) { \
+ usnic_printk(KERN_ERR, args); \
+ } \
+ } while (0)
+#endif /* !USNIC_LOG_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c
new file mode 100644
index 000000000000..ddef6f77a78c
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_transport.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/bitmap.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <net/inet_sock.h>
+
+#include "usnic_transport.h"
+#include "usnic_log.h"
+
+/* ROCE */
+static unsigned long *roce_bitmap;
+static u16 roce_next_port = 1;
+#define ROCE_BITMAP_SZ ((1 << (8 /*CHAR_BIT*/ * sizeof(u16)))/8 /*CHAR BIT*/)
+static DEFINE_SPINLOCK(roce_bitmap_lock);
+
+const char *usnic_transport_to_str(enum usnic_transport_type type)
+{
+ switch (type) {
+ case USNIC_TRANSPORT_UNKNOWN:
+ return "Unknown";
+ case USNIC_TRANSPORT_ROCE_CUSTOM:
+ return "roce custom";
+ case USNIC_TRANSPORT_IPV4_UDP:
+ return "IPv4 UDP";
+ case USNIC_TRANSPORT_MAX:
+ return "Max?";
+ default:
+ return "Not known";
+ }
+}
+
+int usnic_transport_sock_to_str(char *buf, int buf_sz,
+ struct socket *sock)
+{
+ int err;
+ uint32_t addr;
+ uint16_t port;
+ int proto;
+
+ memset(buf, 0, buf_sz);
+ err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port);
+ if (err)
+ return 0;
+
+ return scnprintf(buf, buf_sz, "Proto:%u Addr:%pI4h Port:%hu",
+ proto, &addr, port);
+}
+
+/*
+ * reserve a port number. if "0" specified, we will try to pick one
+ * starting at roce_next_port. roce_next_port will take on the values
+ * 1..4096
+ */
+u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num)
+{
+ if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
+ spin_lock(&roce_bitmap_lock);
+ if (!port_num) {
+ port_num = bitmap_find_next_zero_area(roce_bitmap,
+ ROCE_BITMAP_SZ,
+ roce_next_port /* start */,
+ 1 /* nr */,
+ 0 /* align */);
+ roce_next_port = (port_num & 4095) + 1;
+ } else if (test_bit(port_num, roce_bitmap)) {
+ usnic_err("Failed to allocate port for %s\n",
+ usnic_transport_to_str(type));
+ spin_unlock(&roce_bitmap_lock);
+ goto out_fail;
+ }
+ bitmap_set(roce_bitmap, port_num, 1);
+ spin_unlock(&roce_bitmap_lock);
+ } else {
+ usnic_err("Failed to allocate port - transport %s unsupported\n",
+ usnic_transport_to_str(type));
+ goto out_fail;
+ }
+
+ usnic_dbg("Allocating port %hu for %s\n", port_num,
+ usnic_transport_to_str(type));
+ return port_num;
+
+out_fail:
+ return 0;
+}
+
+void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num)
+{
+ if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
+ spin_lock(&roce_bitmap_lock);
+ if (!port_num) {
+ usnic_err("Unreserved unvalid port num 0 for %s\n",
+ usnic_transport_to_str(type));
+ goto out_roce_custom;
+ }
+
+ if (!test_bit(port_num, roce_bitmap)) {
+ usnic_err("Unreserving invalid %hu for %s\n",
+ port_num,
+ usnic_transport_to_str(type));
+ goto out_roce_custom;
+ }
+ bitmap_clear(roce_bitmap, port_num, 1);
+ usnic_dbg("Freeing port %hu for %s\n", port_num,
+ usnic_transport_to_str(type));
+out_roce_custom:
+ spin_unlock(&roce_bitmap_lock);
+ } else {
+ usnic_err("Freeing invalid port %hu for %d\n", port_num, type);
+ }
+}
+
+struct socket *usnic_transport_get_socket(int sock_fd)
+{
+ struct socket *sock;
+ int err;
+ char buf[25];
+
+ /* sockfd_lookup will internally do a fget */
+ sock = sockfd_lookup(sock_fd, &err);
+ if (!sock) {
+ usnic_err("Unable to lookup socket for fd %d with err %d\n",
+ sock_fd, err);
+ return ERR_PTR(-ENOENT);
+ }
+
+ usnic_transport_sock_to_str(buf, sizeof(buf), sock);
+ usnic_dbg("Get sock %s\n", buf);
+
+ return sock;
+}
+
+void usnic_transport_put_socket(struct socket *sock)
+{
+ char buf[100];
+
+ usnic_transport_sock_to_str(buf, sizeof(buf), sock);
+ usnic_dbg("Put sock %s\n", buf);
+ sockfd_put(sock);
+}
+
+int usnic_transport_sock_get_addr(struct socket *sock, int *proto,
+ uint32_t *addr, uint16_t *port)
+{
+ int len;
+ int err;
+ struct sockaddr_in sock_addr;
+
+ err = sock->ops->getname(sock,
+ (struct sockaddr *)&sock_addr,
+ &len, 0);
+ if (err)
+ return err;
+
+ if (sock_addr.sin_family != AF_INET)
+ return -EINVAL;
+
+ if (proto)
+ *proto = sock->sk->sk_protocol;
+ if (port)
+ *port = ntohs(((struct sockaddr_in *)&sock_addr)->sin_port);
+ if (addr)
+ *addr = ntohl(((struct sockaddr_in *)
+ &sock_addr)->sin_addr.s_addr);
+
+ return 0;
+}
+
+int usnic_transport_init(void)
+{
+ roce_bitmap = kzalloc(ROCE_BITMAP_SZ, GFP_KERNEL);
+ if (!roce_bitmap) {
+ usnic_err("Failed to allocate bit map");
+ return -ENOMEM;
+ }
+
+ /* Do not ever allocate bit 0, hence set it here */
+ bitmap_set(roce_bitmap, 0, 1);
+ return 0;
+}
+
+void usnic_transport_fini(void)
+{
+ kfree(roce_bitmap);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.h b/drivers/infiniband/hw/usnic/usnic_transport.h
new file mode 100644
index 000000000000..7e5dc6d9f462
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_transport.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_TRANSPORT_H_
+#define USNIC_TRANSPORT_H_
+
+#include "usnic_abi.h"
+
+const char *usnic_transport_to_str(enum usnic_transport_type trans_type);
+/*
+ * Returns number of bytes written, excluding null terminator. If
+ * nothing was written, the function returns 0.
+ */
+int usnic_transport_sock_to_str(char *buf, int buf_sz,
+ struct socket *sock);
+/*
+ * Reserve a port. If "port_num" is set, then the function will try
+ * to reserve that particular port.
+ */
+u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num);
+void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num);
+/*
+ * Do a fget on the socket refered to by sock_fd and returns the socket.
+ * Socket will not be destroyed before usnic_transport_put_socket has
+ * been called.
+ */
+struct socket *usnic_transport_get_socket(int sock_fd);
+void usnic_transport_put_socket(struct socket *sock);
+/*
+ * Call usnic_transport_get_socket before calling *_sock_get_addr
+ */
+int usnic_transport_sock_get_addr(struct socket *sock, int *proto,
+ uint32_t *addr, uint16_t *port);
+int usnic_transport_init(void);
+void usnic_transport_fini(void);
+#endif /* !USNIC_TRANSPORT_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
new file mode 100644
index 000000000000..16755cdab2c0
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -0,0 +1,604 @@
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/hugetlb.h>
+#include <linux/dma-attrs.h>
+#include <linux/iommu.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+
+#include "usnic_log.h"
+#include "usnic_uiom.h"
+#include "usnic_uiom_interval_tree.h"
+
+static struct workqueue_struct *usnic_uiom_wq;
+
+#define USNIC_UIOM_PAGE_CHUNK \
+ ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
+ ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
+ (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
+
+static void usnic_uiom_reg_account(struct work_struct *work)
+{
+ struct usnic_uiom_reg *umem = container_of(work,
+ struct usnic_uiom_reg, work);
+
+ down_write(&umem->mm->mmap_sem);
+ umem->mm->locked_vm -= umem->diff;
+ up_write(&umem->mm->mmap_sem);
+ mmput(umem->mm);
+ kfree(umem);
+}
+
+static int usnic_uiom_dma_fault(struct iommu_domain *domain,
+ struct device *dev,
+ unsigned long iova, int flags,
+ void *token)
+{
+ usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
+ dev_name(dev),
+ domain, iova, flags);
+ return -ENOSYS;
+}
+
+static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
+{
+ struct usnic_uiom_chunk *chunk, *tmp;
+ struct page *page;
+ struct scatterlist *sg;
+ int i;
+ dma_addr_t pa;
+
+ list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
+ for_each_sg(chunk->page_list, sg, chunk->nents, i) {
+ page = sg_page(sg);
+ pa = sg_phys(sg);
+ if (dirty)
+ set_page_dirty_lock(page);
+ put_page(page);
+ usnic_dbg("pa: %pa\n", &pa);
+ }
+ kfree(chunk);
+ }
+}
+
+static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
+ int dmasync, struct list_head *chunk_list)
+{
+ struct page **page_list;
+ struct scatterlist *sg;
+ struct usnic_uiom_chunk *chunk;
+ unsigned long locked;
+ unsigned long lock_limit;
+ unsigned long cur_base;
+ unsigned long npages;
+ int ret;
+ int off;
+ int i;
+ int flags;
+ dma_addr_t pa;
+ DEFINE_DMA_ATTRS(attrs);
+
+ if (dmasync)
+ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+
+ if (!can_do_mlock())
+ return -EPERM;
+
+ INIT_LIST_HEAD(chunk_list);
+
+ page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
+
+ down_write(&current->mm->mmap_sem);
+
+ locked = npages + current->mm->locked_vm;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ flags = IOMMU_READ | IOMMU_CACHE;
+ flags |= (writable) ? IOMMU_WRITE : 0;
+ cur_base = addr & PAGE_MASK;
+ ret = 0;
+
+ while (npages) {
+ ret = get_user_pages(current, current->mm, cur_base,
+ min_t(unsigned long, npages,
+ PAGE_SIZE / sizeof(struct page *)),
+ 1, !writable, page_list, NULL);
+
+ if (ret < 0)
+ goto out;
+
+ npages -= ret;
+ off = 0;
+
+ while (ret) {
+ chunk = kmalloc(sizeof(*chunk) +
+ sizeof(struct scatterlist) *
+ min_t(int, ret, USNIC_UIOM_PAGE_CHUNK),
+ GFP_KERNEL);
+ if (!chunk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
+ sg_init_table(chunk->page_list, chunk->nents);
+ for_each_sg(chunk->page_list, sg, chunk->nents, i) {
+ sg_set_page(sg, page_list[i + off],
+ PAGE_SIZE, 0);
+ pa = sg_phys(sg);
+ usnic_dbg("va: 0x%lx pa: %pa\n",
+ cur_base + i*PAGE_SIZE, &pa);
+ }
+ cur_base += chunk->nents * PAGE_SIZE;
+ ret -= chunk->nents;
+ off += chunk->nents;
+ list_add_tail(&chunk->list, chunk_list);
+ }
+
+ ret = 0;
+ }
+
+out:
+ if (ret < 0)
+ usnic_uiom_put_pages(chunk_list, 0);
+ else
+ current->mm->locked_vm = locked;
+
+ up_write(&current->mm->mmap_sem);
+ free_page((unsigned long) page_list);
+ return ret;
+}
+
+static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
+ struct usnic_uiom_pd *pd)
+{
+ struct usnic_uiom_interval_node *interval, *tmp;
+ long unsigned va, size;
+
+ list_for_each_entry_safe(interval, tmp, intervals, link) {
+ va = interval->start << PAGE_SHIFT;
+ size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
+ while (size > 0) {
+ /* Workaround for RH 970401 */
+ usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
+ iommu_unmap(pd->domain, va, PAGE_SIZE);
+ va += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+}
+
+static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
+ struct usnic_uiom_reg *uiomr,
+ int dirty)
+{
+ int npages;
+ unsigned long vpn_start, vpn_last;
+ struct usnic_uiom_interval_node *interval, *tmp;
+ int writable = 0;
+ LIST_HEAD(rm_intervals);
+
+ npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+ vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
+ vpn_last = vpn_start + npages - 1;
+
+ spin_lock(&pd->lock);
+ usnic_uiom_remove_interval(&pd->rb_root, vpn_start,
+ vpn_last, &rm_intervals);
+ usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
+
+ list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
+ if (interval->flags & IOMMU_WRITE)
+ writable = 1;
+ list_del(&interval->link);
+ kfree(interval);
+ }
+
+ usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
+ spin_unlock(&pd->lock);
+}
+
+static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
+ struct usnic_uiom_reg *uiomr)
+{
+ int i, err;
+ size_t size;
+ struct usnic_uiom_chunk *chunk;
+ struct usnic_uiom_interval_node *interval_node;
+ dma_addr_t pa;
+ dma_addr_t pa_start = 0;
+ dma_addr_t pa_end = 0;
+ long int va_start = -EINVAL;
+ struct usnic_uiom_pd *pd = uiomr->pd;
+ long int va = uiomr->va & PAGE_MASK;
+ int flags = IOMMU_READ | IOMMU_CACHE;
+
+ flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
+ chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
+ list);
+ list_for_each_entry(interval_node, intervals, link) {
+iter_chunk:
+ for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
+ pa = sg_phys(&chunk->page_list[i]);
+ if ((va >> PAGE_SHIFT) < interval_node->start)
+ continue;
+
+ if ((va >> PAGE_SHIFT) == interval_node->start) {
+ /* First page of the interval */
+ va_start = va;
+ pa_start = pa;
+ pa_end = pa;
+ }
+
+ WARN_ON(va_start == -EINVAL);
+
+ if ((pa_end + PAGE_SIZE != pa) &&
+ (pa != pa_start)) {
+ /* PAs are not contiguous */
+ size = pa_end - pa_start + PAGE_SIZE;
+ usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
+ va_start, &pa_start, size, flags);
+ err = iommu_map(pd->domain, va_start, pa_start,
+ size, flags);
+ if (err) {
+ usnic_err("Failed to map va 0x%lx pa 0x%pa size 0x%zx with err %d\n",
+ va_start, &pa_start, size, err);
+ goto err_out;
+ }
+ va_start = va;
+ pa_start = pa;
+ pa_end = pa;
+ }
+
+ if ((va >> PAGE_SHIFT) == interval_node->last) {
+ /* Last page of the interval */
+ size = pa - pa_start + PAGE_SIZE;
+ usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
+ va_start, &pa_start, size, flags);
+ err = iommu_map(pd->domain, va_start, pa_start,
+ size, flags);
+ if (err) {
+ usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
+ va_start, &pa_start, size, err);
+ goto err_out;
+ }
+ break;
+ }
+
+ if (pa != pa_start)
+ pa_end += PAGE_SIZE;
+ }
+
+ if (i == chunk->nents) {
+ /*
+ * Hit last entry of the chunk,
+ * hence advance to next chunk
+ */
+ chunk = list_first_entry(&chunk->list,
+ struct usnic_uiom_chunk,
+ list);
+ goto iter_chunk;
+ }
+ }
+
+ return 0;
+
+err_out:
+ usnic_uiom_unmap_sorted_intervals(intervals, pd);
+ return err;
+}
+
+struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
+ unsigned long addr, size_t size,
+ int writable, int dmasync)
+{
+ struct usnic_uiom_reg *uiomr;
+ unsigned long va_base, vpn_start, vpn_last;
+ unsigned long npages;
+ int offset, err;
+ LIST_HEAD(sorted_diff_intervals);
+
+ /*
+ * Intel IOMMU map throws an error if a translation entry is
+ * changed from read to write. This module may not unmap
+ * and then remap the entry after fixing the permission
+ * b/c this open up a small windows where hw DMA may page fault
+ * Hence, make all entries to be writable.
+ */
+ writable = 1;
+
+ va_base = addr & PAGE_MASK;
+ offset = addr & ~PAGE_MASK;
+ npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
+ vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
+ vpn_last = vpn_start + npages - 1;
+
+ uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
+ if (!uiomr)
+ return ERR_PTR(-ENOMEM);
+
+ uiomr->va = va_base;
+ uiomr->offset = offset;
+ uiomr->length = size;
+ uiomr->writable = writable;
+ uiomr->pd = pd;
+
+ err = usnic_uiom_get_pages(addr, size, writable, dmasync,
+ &uiomr->chunk_list);
+ if (err) {
+ usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
+ vpn_start, vpn_last, err);
+ goto out_free_uiomr;
+ }
+
+ spin_lock(&pd->lock);
+ err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
+ (writable) ? IOMMU_WRITE : 0,
+ IOMMU_WRITE,
+ &pd->rb_root,
+ &sorted_diff_intervals);
+ if (err) {
+ usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
+ vpn_start, vpn_last, err);
+ goto out_put_pages;
+ }
+
+ err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
+ if (err) {
+ usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
+ vpn_start, vpn_last, err);
+ goto out_put_intervals;
+
+ }
+
+ err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last,
+ (writable) ? IOMMU_WRITE : 0);
+ if (err) {
+ usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
+ vpn_start, vpn_last, err);
+ goto out_unmap_intervals;
+ }
+
+ usnic_uiom_put_interval_set(&sorted_diff_intervals);
+ spin_unlock(&pd->lock);
+
+ return uiomr;
+
+out_unmap_intervals:
+ usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
+out_put_intervals:
+ usnic_uiom_put_interval_set(&sorted_diff_intervals);
+out_put_pages:
+ usnic_uiom_put_pages(&uiomr->chunk_list, 0);
+ spin_unlock(&pd->lock);
+out_free_uiomr:
+ kfree(uiomr);
+ return ERR_PTR(err);
+}
+
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
+{
+ struct mm_struct *mm;
+ unsigned long diff;
+
+ __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
+
+ mm = get_task_mm(current);
+ if (!mm) {
+ kfree(uiomr);
+ return;
+ }
+
+ diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+
+ /*
+ * We may be called with the mm's mmap_sem already held. This
+ * can happen when a userspace munmap() is the call that drops
+ * the last reference to our file and calls our release
+ * method. If there are memory regions to destroy, we'll end
+ * up here and not be able to take the mmap_sem. In that case
+ * we defer the vm_locked accounting to the system workqueue.
+ */
+ if (closing) {
+ if (!down_write_trylock(&mm->mmap_sem)) {
+ INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
+ uiomr->mm = mm;
+ uiomr->diff = diff;
+
+ queue_work(usnic_uiom_wq, &uiomr->work);
+ return;
+ }
+ } else
+ down_write(&mm->mmap_sem);
+
+ current->mm->locked_vm -= diff;
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ kfree(uiomr);
+}
+
+struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
+{
+ struct usnic_uiom_pd *pd;
+ void *domain;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
+ if (IS_ERR_OR_NULL(domain)) {
+ usnic_err("Failed to allocate IOMMU domain with err %ld\n",
+ PTR_ERR(pd->domain));
+ kfree(pd);
+ return ERR_PTR(domain ? PTR_ERR(domain) : -ENOMEM);
+ }
+
+ iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
+
+ spin_lock_init(&pd->lock);
+ INIT_LIST_HEAD(&pd->devs);
+
+ return pd;
+}
+
+void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
+{
+ iommu_domain_free(pd->domain);
+ kfree(pd);
+}
+
+int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
+{
+ struct usnic_uiom_dev *uiom_dev;
+ int err;
+
+ uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
+ if (!uiom_dev)
+ return -ENOMEM;
+ uiom_dev->dev = dev;
+
+ err = iommu_attach_device(pd->domain, dev);
+ if (err)
+ goto out_free_dev;
+
+ if (!iommu_domain_has_cap(pd->domain, IOMMU_CAP_CACHE_COHERENCY)) {
+ usnic_err("IOMMU of %s does not support cache coherency\n",
+ dev_name(dev));
+ err = -EINVAL;
+ goto out_detach_device;
+ }
+
+ spin_lock(&pd->lock);
+ list_add_tail(&uiom_dev->link, &pd->devs);
+ pd->dev_cnt++;
+ spin_unlock(&pd->lock);
+
+ return 0;
+
+out_detach_device:
+ iommu_detach_device(pd->domain, dev);
+out_free_dev:
+ kfree(uiom_dev);
+ return err;
+}
+
+void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
+{
+ struct usnic_uiom_dev *uiom_dev;
+ int found = 0;
+
+ spin_lock(&pd->lock);
+ list_for_each_entry(uiom_dev, &pd->devs, link) {
+ if (uiom_dev->dev == dev) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ usnic_err("Unable to free dev %s - not found\n",
+ dev_name(dev));
+ spin_unlock(&pd->lock);
+ return;
+ }
+
+ list_del(&uiom_dev->link);
+ pd->dev_cnt--;
+ spin_unlock(&pd->lock);
+
+ return iommu_detach_device(pd->domain, dev);
+}
+
+struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
+{
+ struct usnic_uiom_dev *uiom_dev;
+ struct device **devs;
+ int i = 0;
+
+ spin_lock(&pd->lock);
+ devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
+ if (!devs) {
+ devs = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ list_for_each_entry(uiom_dev, &pd->devs, link) {
+ devs[i++] = uiom_dev->dev;
+ }
+out:
+ spin_unlock(&pd->lock);
+ return devs;
+}
+
+void usnic_uiom_free_dev_list(struct device **devs)
+{
+ kfree(devs);
+}
+
+int usnic_uiom_init(char *drv_name)
+{
+ if (!iommu_present(&pci_bus_type)) {
+ usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n");
+ return -EPERM;
+ }
+
+ usnic_uiom_wq = create_workqueue(drv_name);
+ if (!usnic_uiom_wq) {
+ usnic_err("Unable to alloc wq for drv %s\n", drv_name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void usnic_uiom_fini(void)
+{
+ flush_workqueue(usnic_uiom_wq);
+ destroy_workqueue(usnic_uiom_wq);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
new file mode 100644
index 000000000000..70440996e8f2
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_UIOM_H_
+#define USNIC_UIOM_H_
+
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+
+#include "usnic_uiom_interval_tree.h"
+
+#define USNIC_UIOM_READ (1)
+#define USNIC_UIOM_WRITE (2)
+
+#define USNIC_UIOM_MAX_PD_CNT (1000)
+#define USNIC_UIOM_MAX_MR_CNT (1000000)
+#define USNIC_UIOM_MAX_MR_SIZE (~0UL)
+#define USNIC_UIOM_PAGE_SIZE (PAGE_SIZE)
+
+struct usnic_uiom_dev {
+ struct device *dev;
+ struct list_head link;
+};
+
+struct usnic_uiom_pd {
+ struct iommu_domain *domain;
+ spinlock_t lock;
+ struct rb_root rb_root;
+ struct list_head devs;
+ int dev_cnt;
+};
+
+struct usnic_uiom_reg {
+ struct usnic_uiom_pd *pd;
+ unsigned long va;
+ size_t length;
+ int offset;
+ int page_size;
+ int writable;
+ struct list_head chunk_list;
+ struct work_struct work;
+ struct mm_struct *mm;
+ unsigned long diff;
+};
+
+struct usnic_uiom_chunk {
+ struct list_head list;
+ int nents;
+ struct scatterlist page_list[0];
+};
+
+struct usnic_uiom_pd *usnic_uiom_alloc_pd(void);
+void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd);
+int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev);
+void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd,
+ struct device *dev);
+struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd);
+void usnic_uiom_free_dev_list(struct device **devs);
+struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
+ unsigned long addr, size_t size,
+ int access, int dmasync);
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing);
+int usnic_uiom_init(char *drv_name);
+void usnic_uiom_fini(void);
+#endif /* USNIC_UIOM_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
new file mode 100644
index 000000000000..d135ad90d914
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
@@ -0,0 +1,236 @@
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+
+#include <linux/interval_tree_generic.h>
+#include "usnic_uiom_interval_tree.h"
+
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+#define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \
+ do { \
+ node = usnic_uiom_interval_node_alloc(start, \
+ end, ref_cnt, flags); \
+ if (!node) { \
+ err = -ENOMEM; \
+ goto err_out; \
+ } \
+ } while (0)
+
+#define MARK_FOR_ADD(node, list) (list_add_tail(&node->link, list))
+
+#define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \
+ err_out, list) \
+ do { \
+ MAKE_NODE(node, start, end, \
+ ref_cnt, flags, err, \
+ err_out); \
+ MARK_FOR_ADD(node, list); \
+ } while (0)
+
+#define FLAGS_EQUAL(flags1, flags2, mask) \
+ (((flags1) & (mask)) == ((flags2) & (mask)))
+
+static struct usnic_uiom_interval_node*
+usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt,
+ int flags)
+{
+ struct usnic_uiom_interval_node *interval = kzalloc(sizeof(*interval),
+ GFP_ATOMIC);
+ if (!interval)
+ return NULL;
+
+ interval->start = start;
+ interval->last = last;
+ interval->flags = flags;
+ interval->ref_cnt = ref_cnt;
+
+ return interval;
+}
+
+static int interval_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct usnic_uiom_interval_node *node_a, *node_b;
+
+ node_a = list_entry(a, struct usnic_uiom_interval_node, link);
+ node_b = list_entry(b, struct usnic_uiom_interval_node, link);
+
+ /* long to int */
+ if (node_a->start < node_b->start)
+ return -1;
+ else if (node_a->start > node_b->start)
+ return 1;
+
+ return 0;
+}
+
+static void
+find_intervals_intersection_sorted(struct rb_root *root, unsigned long start,
+ unsigned long last,
+ struct list_head *list)
+{
+ struct usnic_uiom_interval_node *node;
+
+ INIT_LIST_HEAD(list);
+
+ for (node = usnic_uiom_interval_tree_iter_first(root, start, last);
+ node;
+ node = usnic_uiom_interval_tree_iter_next(node, start, last))
+ list_add_tail(&node->link, list);
+
+ list_sort(NULL, list, interval_cmp);
+}
+
+int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last,
+ int flags, int flag_mask,
+ struct rb_root *root,
+ struct list_head *diff_set)
+{
+ struct usnic_uiom_interval_node *interval, *tmp;
+ int err = 0;
+ long int pivot = start;
+ LIST_HEAD(intersection_set);
+
+ INIT_LIST_HEAD(diff_set);
+
+ find_intervals_intersection_sorted(root, start, last,
+ &intersection_set);
+
+ list_for_each_entry(interval, &intersection_set, link) {
+ if (pivot < interval->start) {
+ MAKE_NODE_AND_APPEND(tmp, pivot, interval->start - 1,
+ 1, flags, err, err_out,
+ diff_set);
+ pivot = interval->start;
+ }
+
+ /*
+ * Invariant: Set [start, pivot] is either in diff_set or root,
+ * but not in both.
+ */
+
+ if (pivot > interval->last) {
+ continue;
+ } else if (pivot <= interval->last &&
+ FLAGS_EQUAL(interval->flags, flags,
+ flag_mask)) {
+ pivot = interval->last + 1;
+ }
+ }
+
+ if (pivot <= last)
+ MAKE_NODE_AND_APPEND(tmp, pivot, last, 1, flags, err, err_out,
+ diff_set);
+
+ return 0;
+
+err_out:
+ list_for_each_entry_safe(interval, tmp, diff_set, link) {
+ list_del(&interval->link);
+ kfree(interval);
+ }
+
+ return err;
+}
+
+void usnic_uiom_put_interval_set(struct list_head *intervals)
+{
+ struct usnic_uiom_interval_node *interval, *tmp;
+ list_for_each_entry_safe(interval, tmp, intervals, link)
+ kfree(interval);
+}
+
+int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start,
+ unsigned long last, int flags)
+{
+ struct usnic_uiom_interval_node *interval, *tmp;
+ unsigned long istart, ilast;
+ int iref_cnt, iflags;
+ unsigned long lpivot = start;
+ int err = 0;
+ LIST_HEAD(to_add);
+ LIST_HEAD(intersection_set);
+
+ find_intervals_intersection_sorted(root, start, last,
+ &intersection_set);
+
+ list_for_each_entry(interval, &intersection_set, link) {
+ /*
+ * Invariant - lpivot is the left edge of next interval to be
+ * inserted
+ */
+ istart = interval->start;
+ ilast = interval->last;
+ iref_cnt = interval->ref_cnt;
+ iflags = interval->flags;
+
+ if (istart < lpivot) {
+ MAKE_NODE_AND_APPEND(tmp, istart, lpivot - 1, iref_cnt,
+ iflags, err, err_out, &to_add);
+ } else if (istart > lpivot) {
+ MAKE_NODE_AND_APPEND(tmp, lpivot, istart - 1, 1, flags,
+ err, err_out, &to_add);
+ lpivot = istart;
+ } else {
+ lpivot = istart;
+ }
+
+ if (ilast > last) {
+ MAKE_NODE_AND_APPEND(tmp, lpivot, last, iref_cnt + 1,
+ iflags | flags, err, err_out,
+ &to_add);
+ MAKE_NODE_AND_APPEND(tmp, last + 1, ilast, iref_cnt,
+ iflags, err, err_out, &to_add);
+ } else {
+ MAKE_NODE_AND_APPEND(tmp, lpivot, ilast, iref_cnt + 1,
+ iflags | flags, err, err_out,
+ &to_add);
+ }
+
+ lpivot = ilast + 1;
+ }
+
+ if (lpivot <= last)
+ MAKE_NODE_AND_APPEND(tmp, lpivot, last, 1, flags, err, err_out,
+ &to_add);
+
+ list_for_each_entry_safe(interval, tmp, &intersection_set, link) {
+ usnic_uiom_interval_tree_remove(interval, root);
+ kfree(interval);
+ }
+
+ list_for_each_entry(interval, &to_add, link)
+ usnic_uiom_interval_tree_insert(interval, root);
+
+ return 0;
+
+err_out:
+ list_for_each_entry_safe(interval, tmp, &to_add, link)
+ kfree(interval);
+
+ return err;
+}
+
+void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start,
+ unsigned long last, struct list_head *removed)
+{
+ struct usnic_uiom_interval_node *interval;
+
+ for (interval = usnic_uiom_interval_tree_iter_first(root, start, last);
+ interval;
+ interval = usnic_uiom_interval_tree_iter_next(interval,
+ start,
+ last)) {
+ if (--interval->ref_cnt == 0)
+ list_add_tail(&interval->link, removed);
+ }
+
+ list_for_each_entry(interval, removed, link)
+ usnic_uiom_interval_tree_remove(interval, root);
+}
+
+INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb,
+ unsigned long, __subtree_last,
+ START, LAST, , usnic_uiom_interval_tree)
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
new file mode 100644
index 000000000000..d4f752e258fd
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_UIOM_INTERVAL_TREE_H_
+#define USNIC_UIOM_INTERVAL_TREE_H_
+
+#include <linux/rbtree.h>
+
+struct usnic_uiom_interval_node {
+ struct rb_node rb;
+ struct list_head link;
+ unsigned long start;
+ unsigned long last;
+ unsigned long __subtree_last;
+ unsigned int ref_cnt;
+ int flags;
+};
+
+extern void
+usnic_uiom_interval_tree_insert(struct usnic_uiom_interval_node *node,
+ struct rb_root *root);
+extern void
+usnic_uiom_interval_tree_remove(struct usnic_uiom_interval_node *node,
+ struct rb_root *root);
+extern struct usnic_uiom_interval_node *
+usnic_uiom_interval_tree_iter_first(struct rb_root *root,
+ unsigned long start,
+ unsigned long last);
+extern struct usnic_uiom_interval_node *
+usnic_uiom_interval_tree_iter_next(struct usnic_uiom_interval_node *node,
+ unsigned long start, unsigned long last);
+/*
+ * Inserts {start...last} into {root}. If there are overlaps,
+ * nodes will be broken up and merged
+ */
+int usnic_uiom_insert_interval(struct rb_root *root,
+ unsigned long start, unsigned long last,
+ int flags);
+/*
+ * Removed {start...last} from {root}. The nodes removed are returned in
+ * 'removed.' The caller is responsibile for freeing memory of nodes in
+ * 'removed.'
+ */
+void usnic_uiom_remove_interval(struct rb_root *root,
+ unsigned long start, unsigned long last,
+ struct list_head *removed);
+/*
+ * Returns {start...last} - {root} (relative complement of {start...last} in
+ * {root}) in diff_set sorted ascendingly
+ */
+int usnic_uiom_get_intervals_diff(unsigned long start,
+ unsigned long last, int flags,
+ int flag_mask,
+ struct rb_root *root,
+ struct list_head *diff_set);
+/* Call this to free diff_set returned by usnic_uiom_get_intervals_diff */
+void usnic_uiom_put_interval_set(struct list_head *intervals);
+#endif /* USNIC_UIOM_INTERVAL_TREE_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c
new file mode 100644
index 000000000000..656b88c39eda
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "usnic_ib.h"
+#include "vnic_resource.h"
+#include "usnic_log.h"
+#include "usnic_vnic.h"
+
+struct usnic_vnic {
+ struct vnic_dev *vdev;
+ struct vnic_dev_bar bar[PCI_NUM_RESOURCES];
+ struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX];
+ spinlock_t res_lock;
+};
+
+static enum vnic_res_type _to_vnic_res_type(enum usnic_vnic_res_type res_type)
+{
+#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \
+ vnic_res_type,
+#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \
+ vnic_res_type,
+ static enum vnic_res_type usnic_vnic_type_2_vnic_type[] = {
+ USNIC_VNIC_RES_TYPES};
+#undef DEFINE_USNIC_VNIC_RES
+#undef DEFINE_USNIC_VNIC_RES_AT
+
+ if (res_type >= USNIC_VNIC_RES_TYPE_MAX)
+ return RES_TYPE_MAX;
+
+ return usnic_vnic_type_2_vnic_type[res_type];
+}
+
+const char *usnic_vnic_res_type_to_str(enum usnic_vnic_res_type res_type)
+{
+#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \
+ desc,
+#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \
+ desc,
+ static const char * const usnic_vnic_res_type_desc[] = {
+ USNIC_VNIC_RES_TYPES};
+#undef DEFINE_USNIC_VNIC_RES
+#undef DEFINE_USNIC_VNIC_RES_AT
+
+ if (res_type >= USNIC_VNIC_RES_TYPE_MAX)
+ return "unknown";
+
+ return usnic_vnic_res_type_desc[res_type];
+
+}
+
+const char *usnic_vnic_pci_name(struct usnic_vnic *vnic)
+{
+ return pci_name(usnic_vnic_get_pdev(vnic));
+}
+
+int usnic_vnic_dump(struct usnic_vnic *vnic, char *buf,
+ int buf_sz,
+ void *hdr_obj,
+ int (*printtitle)(void *, char*, int),
+ int (*printcols)(char *, int),
+ int (*printrow)(void *, char *, int))
+{
+ struct usnic_vnic_res_chunk *chunk;
+ struct usnic_vnic_res *res;
+ struct vnic_dev_bar *bar0;
+ int i, j, offset;
+
+ offset = 0;
+ bar0 = usnic_vnic_get_bar(vnic, 0);
+ offset += scnprintf(buf + offset, buf_sz - offset,
+ "VF:%hu BAR0 bus_addr=%pa vaddr=0x%p size=%ld ",
+ usnic_vnic_get_index(vnic),
+ &bar0->bus_addr,
+ bar0->vaddr, bar0->len);
+ if (printtitle)
+ offset += printtitle(hdr_obj, buf + offset, buf_sz - offset);
+ offset += scnprintf(buf + offset, buf_sz - offset, "\n");
+ offset += scnprintf(buf + offset, buf_sz - offset,
+ "|RES\t|CTRL_PIN\t\t|IN_USE\t");
+ if (printcols)
+ offset += printcols(buf + offset, buf_sz - offset);
+ offset += scnprintf(buf + offset, buf_sz - offset, "\n");
+
+ spin_lock(&vnic->res_lock);
+ for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) {
+ chunk = &vnic->chunks[i];
+ for (j = 0; j < chunk->cnt; j++) {
+ res = chunk->res[j];
+ offset += scnprintf(buf + offset, buf_sz - offset,
+ "|%s[%u]\t|0x%p\t|%u\t",
+ usnic_vnic_res_type_to_str(res->type),
+ res->vnic_idx, res->ctrl, !!res->owner);
+ if (printrow) {
+ offset += printrow(res->owner, buf + offset,
+ buf_sz - offset);
+ }
+ offset += scnprintf(buf + offset, buf_sz - offset,
+ "\n");
+ }
+ }
+ spin_unlock(&vnic->res_lock);
+ return offset;
+}
+
+void usnic_vnic_res_spec_update(struct usnic_vnic_res_spec *spec,
+ enum usnic_vnic_res_type trgt_type,
+ u16 cnt)
+{
+ int i;
+
+ for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
+ if (spec->resources[i].type == trgt_type) {
+ spec->resources[i].cnt = cnt;
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+int usnic_vnic_res_spec_satisfied(const struct usnic_vnic_res_spec *min_spec,
+ struct usnic_vnic_res_spec *res_spec)
+{
+ int found, i, j;
+
+ for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
+ found = 0;
+
+ for (j = 0; j < USNIC_VNIC_RES_TYPE_MAX; j++) {
+ if (res_spec->resources[i].type !=
+ min_spec->resources[i].type)
+ continue;
+ found = 1;
+ if (min_spec->resources[i].cnt >
+ res_spec->resources[i].cnt)
+ return -EINVAL;
+ break;
+ }
+
+ if (!found)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int usnic_vnic_spec_dump(char *buf, int buf_sz,
+ struct usnic_vnic_res_spec *res_spec)
+{
+ enum usnic_vnic_res_type res_type;
+ int res_cnt;
+ int i;
+ int offset = 0;
+
+ for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
+ res_type = res_spec->resources[i].type;
+ res_cnt = res_spec->resources[i].cnt;
+ offset += scnprintf(buf + offset, buf_sz - offset,
+ "Res: %s Cnt: %d ",
+ usnic_vnic_res_type_to_str(res_type),
+ res_cnt);
+ }
+
+ return offset;
+}
+
+int usnic_vnic_check_room(struct usnic_vnic *vnic,
+ struct usnic_vnic_res_spec *res_spec)
+{
+ int i;
+ enum usnic_vnic_res_type res_type;
+ int res_cnt;
+
+ for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) {
+ res_type = res_spec->resources[i].type;
+ res_cnt = res_spec->resources[i].cnt;
+
+ if (res_type == USNIC_VNIC_RES_TYPE_EOL)
+ break;
+
+ if (res_cnt > usnic_vnic_res_free_cnt(vnic, res_type))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int usnic_vnic_res_cnt(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type)
+{
+ return vnic->chunks[type].cnt;
+}
+
+int usnic_vnic_res_free_cnt(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type)
+{
+ return vnic->chunks[type].free_cnt;
+}
+
+struct usnic_vnic_res_chunk *
+usnic_vnic_get_resources(struct usnic_vnic *vnic, enum usnic_vnic_res_type type,
+ int cnt, void *owner)
+{
+ struct usnic_vnic_res_chunk *src, *ret;
+ struct usnic_vnic_res *res;
+ int i;
+
+ if (usnic_vnic_res_free_cnt(vnic, type) < cnt || cnt < 1 || !owner)
+ return ERR_PTR(-EINVAL);
+
+ ret = kzalloc(sizeof(*ret), GFP_ATOMIC);
+ if (!ret) {
+ usnic_err("Failed to allocate chunk for %s - Out of memory\n",
+ usnic_vnic_pci_name(vnic));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret->res = kzalloc(sizeof(*(ret->res))*cnt, GFP_ATOMIC);
+ if (!ret->res) {
+ usnic_err("Failed to allocate resources for %s. Out of memory\n",
+ usnic_vnic_pci_name(vnic));
+ kfree(ret);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock(&vnic->res_lock);
+ src = &vnic->chunks[type];
+ for (i = 0; i < src->cnt && ret->cnt < cnt; i++) {
+ res = src->res[i];
+ if (!res->owner) {
+ src->free_cnt--;
+ res->owner = owner;
+ ret->res[ret->cnt++] = res;
+ }
+ }
+
+ spin_unlock(&vnic->res_lock);
+ ret->type = type;
+ ret->vnic = vnic;
+ WARN_ON(ret->cnt != cnt);
+
+ return ret;
+}
+
+void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk)
+{
+
+ struct usnic_vnic_res *res;
+ int i;
+ struct usnic_vnic *vnic = chunk->vnic;
+
+ spin_lock(&vnic->res_lock);
+ while ((i = --chunk->cnt) >= 0) {
+ res = chunk->res[i];
+ chunk->res[i] = NULL;
+ res->owner = NULL;
+ vnic->chunks[res->type].free_cnt++;
+ }
+ spin_unlock(&vnic->res_lock);
+
+ kfree(chunk->res);
+ kfree(chunk);
+}
+
+u16 usnic_vnic_get_index(struct usnic_vnic *vnic)
+{
+ return usnic_vnic_get_pdev(vnic)->devfn - 1;
+}
+
+static int usnic_vnic_alloc_res_chunk(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type,
+ struct usnic_vnic_res_chunk *chunk)
+{
+ int cnt, err, i;
+ struct usnic_vnic_res *res;
+
+ cnt = vnic_dev_get_res_count(vnic->vdev, _to_vnic_res_type(type));
+ if (cnt < 1)
+ return -EINVAL;
+
+ chunk->cnt = chunk->free_cnt = cnt;
+ chunk->res = kzalloc(sizeof(*(chunk->res))*cnt, GFP_KERNEL);
+ if (!chunk->res)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++) {
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ res->type = type;
+ res->vnic_idx = i;
+ res->vnic = vnic;
+ res->ctrl = vnic_dev_get_res(vnic->vdev,
+ _to_vnic_res_type(type), i);
+ chunk->res[i] = res;
+ }
+
+ chunk->vnic = vnic;
+ return 0;
+fail:
+ for (i--; i >= 0; i--)
+ kfree(chunk->res[i]);
+ kfree(chunk->res);
+ return err;
+}
+
+static void usnic_vnic_free_res_chunk(struct usnic_vnic_res_chunk *chunk)
+{
+ int i;
+ for (i = 0; i < chunk->cnt; i++)
+ kfree(chunk->res[i]);
+ kfree(chunk->res);
+}
+
+static int usnic_vnic_discover_resources(struct pci_dev *pdev,
+ struct usnic_vnic *vnic)
+{
+ enum usnic_vnic_res_type res_type;
+ int i;
+ int err = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
+ vnic->bar[i].len = pci_resource_len(pdev, i);
+ vnic->bar[i].vaddr = pci_iomap(pdev, i, vnic->bar[i].len);
+ if (!vnic->bar[i].vaddr) {
+ usnic_err("Cannot memory-map BAR %d, aborting\n",
+ i);
+ err = -ENODEV;
+ goto out_clean_bar;
+ }
+ vnic->bar[i].bus_addr = pci_resource_start(pdev, i);
+ }
+
+ vnic->vdev = vnic_dev_register(NULL, pdev, pdev, vnic->bar,
+ ARRAY_SIZE(vnic->bar));
+ if (!vnic->vdev) {
+ usnic_err("Failed to register device %s\n",
+ pci_name(pdev));
+ err = -EINVAL;
+ goto out_clean_bar;
+ }
+
+ for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1;
+ res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) {
+ err = usnic_vnic_alloc_res_chunk(vnic, res_type,
+ &vnic->chunks[res_type]);
+ if (err) {
+ usnic_err("Failed to alloc res %s with err %d\n",
+ usnic_vnic_res_type_to_str(res_type),
+ err);
+ goto out_clean_chunks;
+ }
+ }
+
+ return 0;
+
+out_clean_chunks:
+ for (res_type--; res_type > USNIC_VNIC_RES_TYPE_EOL; res_type--)
+ usnic_vnic_free_res_chunk(&vnic->chunks[res_type]);
+ vnic_dev_unregister(vnic->vdev);
+out_clean_bar:
+ for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
+ if (!vnic->bar[i].vaddr)
+ break;
+
+ iounmap(vnic->bar[i].vaddr);
+ }
+
+ return err;
+}
+
+struct pci_dev *usnic_vnic_get_pdev(struct usnic_vnic *vnic)
+{
+ return vnic_dev_get_pdev(vnic->vdev);
+}
+
+struct vnic_dev_bar *usnic_vnic_get_bar(struct usnic_vnic *vnic,
+ int bar_num)
+{
+ return (bar_num < ARRAY_SIZE(vnic->bar)) ? &vnic->bar[bar_num] : NULL;
+}
+
+static void usnic_vnic_release_resources(struct usnic_vnic *vnic)
+{
+ int i;
+ struct pci_dev *pdev;
+ enum usnic_vnic_res_type res_type;
+
+ pdev = usnic_vnic_get_pdev(vnic);
+
+ for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1;
+ res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++)
+ usnic_vnic_free_res_chunk(&vnic->chunks[res_type]);
+
+ vnic_dev_unregister(vnic->vdev);
+
+ for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
+ iounmap(vnic->bar[i].vaddr);
+ }
+}
+
+struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev)
+{
+ struct usnic_vnic *vnic;
+ int err = 0;
+
+ if (!pci_is_enabled(pdev)) {
+ usnic_err("PCI dev %s is disabled\n", pci_name(pdev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ vnic = kzalloc(sizeof(*vnic), GFP_KERNEL);
+ if (!vnic) {
+ usnic_err("Failed to alloc vnic for %s - out of memory\n",
+ pci_name(pdev));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&vnic->res_lock);
+
+ err = usnic_vnic_discover_resources(pdev, vnic);
+ if (err) {
+ usnic_err("Failed to discover %s resources with err %d\n",
+ pci_name(pdev), err);
+ goto out_free_vnic;
+ }
+
+ usnic_dbg("Allocated vnic for %s\n", usnic_vnic_pci_name(vnic));
+
+ return vnic;
+
+out_free_vnic:
+ kfree(vnic);
+
+ return ERR_PTR(err);
+}
+
+void usnic_vnic_free(struct usnic_vnic *vnic)
+{
+ usnic_vnic_release_resources(vnic);
+ kfree(vnic);
+}
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.h b/drivers/infiniband/hw/usnic/usnic_vnic.h
new file mode 100644
index 000000000000..14d931a8829d
--- /dev/null
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef USNIC_VNIC_H_
+#define USNIC_VNIC_H_
+
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+
+/* =USNIC_VNIC_RES_TYPE= =VNIC_RES= =DESC= */
+#define USNIC_VNIC_RES_TYPES \
+ DEFINE_USNIC_VNIC_RES_AT(EOL, RES_TYPE_EOL, "EOL", 0) \
+ DEFINE_USNIC_VNIC_RES(WQ, RES_TYPE_WQ, "WQ") \
+ DEFINE_USNIC_VNIC_RES(RQ, RES_TYPE_RQ, "RQ") \
+ DEFINE_USNIC_VNIC_RES(CQ, RES_TYPE_CQ, "CQ") \
+ DEFINE_USNIC_VNIC_RES(INTR, RES_TYPE_INTR_CTRL, "INT") \
+ DEFINE_USNIC_VNIC_RES(MAX, RES_TYPE_MAX, "MAX")\
+
+#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \
+ USNIC_VNIC_RES_TYPE_##usnic_vnic_res_t = val,
+#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \
+ USNIC_VNIC_RES_TYPE_##usnic_vnic_res_t,
+enum usnic_vnic_res_type {
+ USNIC_VNIC_RES_TYPES
+};
+#undef DEFINE_USNIC_VNIC_RES
+#undef DEFINE_USNIC_VNIC_RES_AT
+
+struct usnic_vnic_res {
+ enum usnic_vnic_res_type type;
+ unsigned int vnic_idx;
+ struct usnic_vnic *vnic;
+ void __iomem *ctrl;
+ void *owner;
+};
+
+struct usnic_vnic_res_chunk {
+ enum usnic_vnic_res_type type;
+ int cnt;
+ int free_cnt;
+ struct usnic_vnic_res **res;
+ struct usnic_vnic *vnic;
+};
+
+struct usnic_vnic_res_desc {
+ enum usnic_vnic_res_type type;
+ uint16_t cnt;
+};
+
+struct usnic_vnic_res_spec {
+ struct usnic_vnic_res_desc resources[USNIC_VNIC_RES_TYPE_MAX];
+};
+
+const char *usnic_vnic_res_type_to_str(enum usnic_vnic_res_type res_type);
+const char *usnic_vnic_pci_name(struct usnic_vnic *vnic);
+int usnic_vnic_dump(struct usnic_vnic *vnic, char *buf, int buf_sz,
+ void *hdr_obj,
+ int (*printtitle)(void *, char*, int),
+ int (*printcols)(char *, int),
+ int (*printrow)(void *, char *, int));
+void usnic_vnic_res_spec_update(struct usnic_vnic_res_spec *spec,
+ enum usnic_vnic_res_type trgt_type,
+ u16 cnt);
+int usnic_vnic_res_spec_satisfied(const struct usnic_vnic_res_spec *min_spec,
+ struct usnic_vnic_res_spec *res_spec);
+int usnic_vnic_spec_dump(char *buf, int buf_sz,
+ struct usnic_vnic_res_spec *res_spec);
+int usnic_vnic_check_room(struct usnic_vnic *vnic,
+ struct usnic_vnic_res_spec *res_spec);
+int usnic_vnic_res_cnt(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type);
+int usnic_vnic_res_free_cnt(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type);
+struct usnic_vnic_res_chunk *
+usnic_vnic_get_resources(struct usnic_vnic *vnic,
+ enum usnic_vnic_res_type type,
+ int cnt,
+ void *owner);
+void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk);
+struct pci_dev *usnic_vnic_get_pdev(struct usnic_vnic *vnic);
+struct vnic_dev_bar *usnic_vnic_get_bar(struct usnic_vnic *vnic,
+ int bar_num);
+struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev);
+void usnic_vnic_free(struct usnic_vnic *vnic);
+u16 usnic_vnic_get_index(struct usnic_vnic *vnic);
+
+#endif /*!USNIC_VNIC_H_*/
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index d64ed05fb082..5786a78ff8bc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -104,6 +104,8 @@ int ipoib_open(struct net_device *dev)
ipoib_dbg(priv, "bringing up interface\n");
+ netif_carrier_off(dev);
+
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
if (ipoib_pkey_dev_delay_open(dev))
@@ -1366,8 +1368,6 @@ void ipoib_setup(struct net_device *dev)
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
- netif_carrier_off(dev);
-
priv->dev = dev;
spin_lock_init(&priv->lock);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 049a997caff3..c56d5d44c53b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -192,6 +192,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK)
init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
+ if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
+ init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
+
if (dev->features & NETIF_F_SG)
init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 538822684d5b..334f34b1cd46 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
+ tx_desc = NULL;
}
atomic_dec(&ib_conn->post_send_buf_count);
- if (tx_desc->type == ISCSI_TX_CONTROL) {
+ if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
sizeof(struct iscsi_task));
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index afe95674008b..ca37edef2791 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
- ISER_CONN_TERMINATING))
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
+ ISER_CONN_TERMINATING)){
+ if (ib_conn->iser_conn)
+ iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ else
+ iser_err("iscsi_iser connection isn't bound\n");
+ }
/* Complete the termination process if no posts are pending */
if (ib_conn->post_recv_buf_count == 0 &&
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 9804fca6bf06..8ee228e9ab5a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -47,10 +47,10 @@ static int
isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct isert_rdma_wr *wr);
static void
-isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
+isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
static int
-isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr);
+isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct isert_rdma_wr *wr);
static void
isert_qp_event_callback(struct ib_event *e, void *context)
@@ -227,11 +227,11 @@ isert_create_device_ib_res(struct isert_device *device)
/* asign function handlers */
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
- device->use_frwr = 1;
- device->reg_rdma_mem = isert_reg_rdma_frwr;
- device->unreg_rdma_mem = isert_unreg_rdma_frwr;
+ device->use_fastreg = 1;
+ device->reg_rdma_mem = isert_reg_rdma;
+ device->unreg_rdma_mem = isert_unreg_rdma;
} else {
- device->use_frwr = 0;
+ device->use_fastreg = 0;
device->reg_rdma_mem = isert_map_rdma;
device->unreg_rdma_mem = isert_unmap_cmd;
}
@@ -239,9 +239,10 @@ isert_create_device_ib_res(struct isert_device *device)
device->cqs_used = min_t(int, num_online_cpus(),
device->ib_device->num_comp_vectors);
device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
- pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
+ pr_debug("Using %d CQs, device %s supports %d vectors support "
+ "Fast registration %d\n",
device->cqs_used, device->ib_device->name,
- device->ib_device->num_comp_vectors, device->use_frwr);
+ device->ib_device->num_comp_vectors, device->use_fastreg);
device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
device->cqs_used, GFP_KERNEL);
if (!device->cq_desc) {
@@ -250,13 +251,6 @@ isert_create_device_ib_res(struct isert_device *device)
}
cq_desc = device->cq_desc;
- device->dev_pd = ib_alloc_pd(ib_dev);
- if (IS_ERR(device->dev_pd)) {
- ret = PTR_ERR(device->dev_pd);
- pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
- goto out_cq_desc;
- }
-
for (i = 0; i < device->cqs_used; i++) {
cq_desc[i].device = device;
cq_desc[i].cq_index = i;
@@ -294,13 +288,6 @@ isert_create_device_ib_res(struct isert_device *device)
goto out_cq;
}
- device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(device->dev_mr)) {
- ret = PTR_ERR(device->dev_mr);
- pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
- goto out_cq;
- }
-
return 0;
out_cq:
@@ -316,9 +303,6 @@ out_cq:
ib_destroy_cq(device->dev_tx_cq[j]);
}
}
- ib_dealloc_pd(device->dev_pd);
-
-out_cq_desc:
kfree(device->cq_desc);
return ret;
@@ -341,8 +325,6 @@ isert_free_device_ib_res(struct isert_device *device)
device->dev_tx_cq[i] = NULL;
}
- ib_dereg_mr(device->dev_mr);
- ib_dealloc_pd(device->dev_pd);
kfree(device->cq_desc);
}
@@ -398,18 +380,18 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
}
static void
-isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
+isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
{
struct fast_reg_descriptor *fr_desc, *tmp;
int i = 0;
- if (list_empty(&isert_conn->conn_frwr_pool))
+ if (list_empty(&isert_conn->conn_fr_pool))
return;
- pr_debug("Freeing conn %p frwr pool", isert_conn);
+ pr_debug("Freeing conn %p fastreg pool", isert_conn);
list_for_each_entry_safe(fr_desc, tmp,
- &isert_conn->conn_frwr_pool, list) {
+ &isert_conn->conn_fr_pool, list) {
list_del(&fr_desc->list);
ib_free_fast_reg_page_list(fr_desc->data_frpl);
ib_dereg_mr(fr_desc->data_mr);
@@ -417,20 +399,47 @@ isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
++i;
}
- if (i < isert_conn->conn_frwr_pool_size)
+ if (i < isert_conn->conn_fr_pool_size)
pr_warn("Pool still has %d regions registered\n",
- isert_conn->conn_frwr_pool_size - i);
+ isert_conn->conn_fr_pool_size - i);
}
static int
-isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
+isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
+ struct fast_reg_descriptor *fr_desc)
+{
+ fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(fr_desc->data_frpl)) {
+ pr_err("Failed to allocate data frpl err=%ld\n",
+ PTR_ERR(fr_desc->data_frpl));
+ return PTR_ERR(fr_desc->data_frpl);
+ }
+
+ fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(fr_desc->data_mr)) {
+ pr_err("Failed to allocate data frmr err=%ld\n",
+ PTR_ERR(fr_desc->data_mr));
+ ib_free_fast_reg_page_list(fr_desc->data_frpl);
+ return PTR_ERR(fr_desc->data_mr);
+ }
+ pr_debug("Create fr_desc %p page_list %p\n",
+ fr_desc, fr_desc->data_frpl->page_list);
+
+ fr_desc->valid = true;
+
+ return 0;
+}
+
+static int
+isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
{
struct fast_reg_descriptor *fr_desc;
struct isert_device *device = isert_conn->conn_device;
int i, ret;
- INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
- isert_conn->conn_frwr_pool_size = 0;
+ INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
+ isert_conn->conn_fr_pool_size = 0;
for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
if (!fr_desc) {
@@ -439,40 +448,26 @@ isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
goto err;
}
- fr_desc->data_frpl =
- ib_alloc_fast_reg_page_list(device->ib_device,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(fr_desc->data_frpl)) {
- pr_err("Failed to allocate fr_pg_list err=%ld\n",
- PTR_ERR(fr_desc->data_frpl));
- ret = PTR_ERR(fr_desc->data_frpl);
- goto err;
- }
-
- fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(fr_desc->data_mr)) {
- pr_err("Failed to allocate frmr err=%ld\n",
- PTR_ERR(fr_desc->data_mr));
- ret = PTR_ERR(fr_desc->data_mr);
- ib_free_fast_reg_page_list(fr_desc->data_frpl);
+ ret = isert_create_fr_desc(device->ib_device,
+ isert_conn->conn_pd, fr_desc);
+ if (ret) {
+ pr_err("Failed to create fastreg descriptor err=%d\n",
+ ret);
+ kfree(fr_desc);
goto err;
}
- pr_debug("Create fr_desc %p page_list %p\n",
- fr_desc, fr_desc->data_frpl->page_list);
- fr_desc->valid = true;
- list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
- isert_conn->conn_frwr_pool_size++;
+ list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
+ isert_conn->conn_fr_pool_size++;
}
- pr_debug("Creating conn %p frwr pool size=%d",
- isert_conn, isert_conn->conn_frwr_pool_size);
+ pr_debug("Creating conn %p fastreg pool size=%d",
+ isert_conn, isert_conn->conn_fr_pool_size);
return 0;
err:
- isert_conn_free_frwr_pool(isert_conn);
+ isert_conn_free_fastreg_pool(isert_conn);
return ret;
}
@@ -497,12 +492,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_conn->state = ISER_CONN_INIT;
INIT_LIST_HEAD(&isert_conn->conn_accept_node);
init_completion(&isert_conn->conn_login_comp);
- init_waitqueue_head(&isert_conn->conn_wait);
- init_waitqueue_head(&isert_conn->conn_wait_comp_err);
+ init_completion(&isert_conn->conn_wait);
+ init_completion(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
- mutex_init(&isert_conn->conn_comp_mutex);
spin_lock_init(&isert_conn->conn_lock);
cma_id->context = isert_conn;
@@ -558,14 +552,29 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
}
isert_conn->conn_device = device;
- isert_conn->conn_pd = device->dev_pd;
- isert_conn->conn_mr = device->dev_mr;
+ isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
+ if (IS_ERR(isert_conn->conn_pd)) {
+ ret = PTR_ERR(isert_conn->conn_pd);
+ pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
+ isert_conn, ret);
+ goto out_pd;
+ }
- if (device->use_frwr) {
- ret = isert_conn_create_frwr_pool(isert_conn);
+ isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(isert_conn->conn_mr)) {
+ ret = PTR_ERR(isert_conn->conn_mr);
+ pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
+ isert_conn, ret);
+ goto out_mr;
+ }
+
+ if (device->use_fastreg) {
+ ret = isert_conn_create_fastreg_pool(isert_conn);
if (ret) {
- pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
- goto out_frwr;
+ pr_err("Conn: %p failed to create fastreg pool\n",
+ isert_conn);
+ goto out_fastreg;
}
}
@@ -582,9 +591,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
return 0;
out_conn_dev:
- if (device->use_frwr)
- isert_conn_free_frwr_pool(isert_conn);
-out_frwr:
+ if (device->use_fastreg)
+ isert_conn_free_fastreg_pool(isert_conn);
+out_fastreg:
+ ib_dereg_mr(isert_conn->conn_mr);
+out_mr:
+ ib_dealloc_pd(isert_conn->conn_pd);
+out_pd:
isert_device_try_release(device);
out_rsp_dma_map:
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
@@ -608,8 +621,8 @@ isert_connect_release(struct isert_conn *isert_conn)
pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
- if (device && device->use_frwr)
- isert_conn_free_frwr_pool(isert_conn);
+ if (device && device->use_fastreg)
+ isert_conn_free_fastreg_pool(isert_conn);
if (isert_conn->conn_qp) {
cq_index = ((struct isert_cq_desc *)
@@ -623,6 +636,9 @@ isert_connect_release(struct isert_conn *isert_conn)
isert_free_rx_descriptors(isert_conn);
rdma_destroy_id(isert_conn->conn_cm_id);
+ ib_dereg_mr(isert_conn->conn_mr);
+ ib_dealloc_pd(isert_conn->conn_pd);
+
if (isert_conn->login_buf) {
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
@@ -671,11 +687,11 @@ isert_disconnect_work(struct work_struct *work)
pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
mutex_lock(&isert_conn->conn_mutex);
- isert_conn->state = ISER_CONN_DOWN;
+ if (isert_conn->state == ISER_CONN_UP)
+ isert_conn->state = ISER_CONN_TERMINATING;
if (isert_conn->post_recv_buf_count == 0 &&
atomic_read(&isert_conn->post_send_buf_count) == 0) {
- pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
mutex_unlock(&isert_conn->conn_mutex);
goto wake_up;
}
@@ -695,7 +711,7 @@ isert_disconnect_work(struct work_struct *work)
mutex_unlock(&isert_conn->conn_mutex);
wake_up:
- wake_up(&isert_conn->conn_wait);
+ complete(&isert_conn->conn_wait);
isert_put_conn(isert_conn);
}
@@ -871,16 +887,17 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
* Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
* bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
*/
- mutex_lock(&isert_conn->conn_comp_mutex);
- if (coalesce &&
+ mutex_lock(&isert_conn->conn_mutex);
+ if (coalesce && isert_conn->state == ISER_CONN_UP &&
++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
+ tx_desc->llnode_active = true;
llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_comp_mutex);
+ mutex_unlock(&isert_conn->conn_mutex);
return;
}
isert_conn->conn_comp_batch = 0;
tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_comp_mutex);
+ mutex_unlock(&isert_conn->conn_mutex);
send_wr->send_flags = IB_SEND_SIGNALED;
}
@@ -1024,13 +1041,13 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
}
static struct iscsi_cmd
-*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
+*isert_allocate_cmd(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct isert_cmd *isert_cmd;
struct iscsi_cmd *cmd;
- cmd = iscsit_allocate_cmd(conn, gfp);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd) {
pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
return NULL;
@@ -1219,7 +1236,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
switch (opcode) {
case ISCSI_OP_SCSI_CMD:
- cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn);
if (!cmd)
break;
@@ -1233,7 +1250,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
rx_desc, (unsigned char *)hdr);
break;
case ISCSI_OP_NOOP_OUT:
- cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn);
if (!cmd)
break;
@@ -1246,7 +1263,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
(unsigned char *)hdr);
break;
case ISCSI_OP_SCSI_TMFUNC:
- cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn);
if (!cmd)
break;
@@ -1254,7 +1271,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
(unsigned char *)hdr);
break;
case ISCSI_OP_LOGOUT:
- cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn);
if (!cmd)
break;
@@ -1265,7 +1282,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
HZ);
break;
case ISCSI_OP_TEXT:
- cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+ cmd = isert_allocate_cmd(conn);
if (!cmd)
break;
@@ -1404,25 +1421,25 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
}
static void
-isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
+isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
LIST_HEAD(unmap_list);
- pr_debug("unreg_frwr_cmd: %p\n", isert_cmd);
+ pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
if (wr->fr_desc) {
- pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
+ pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
isert_cmd, wr->fr_desc);
spin_lock_bh(&isert_conn->conn_lock);
- list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool);
+ list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
spin_unlock_bh(&isert_conn->conn_lock);
wr->fr_desc = NULL;
}
if (wr->sge) {
- pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd);
+ pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -1447,7 +1464,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
case ISCSI_OP_SCSI_CMD:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
if (cmd->data_direction == DMA_TO_DEVICE)
@@ -1459,7 +1476,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
case ISCSI_OP_SCSI_TMFUNC:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1469,7 +1486,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
case ISCSI_OP_TEXT:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
/*
@@ -1532,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
iscsit_stop_dataout_timer(cmd);
device->unreg_rdma_mem(isert_cmd, isert_conn);
cmd->write_data_done = wr->cur_rdma_length;
+ wr->send_wr_num = 0;
pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
@@ -1572,7 +1590,7 @@ isert_do_control_comp(struct work_struct *work)
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
/*
* Call atomic_dec(&isert_conn->post_send_buf_count)
- * from isert_free_conn()
+ * from isert_wait_conn()
*/
isert_conn->logout_posted = true;
iscsit_logout_post_handler(cmd, cmd->conn);
@@ -1596,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
struct ib_device *ib_dev)
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1607,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
queue_work(isert_comp_wq, &isert_cmd->comp_work);
return;
}
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(tx_desc, isert_cmd, ib_dev);
@@ -1645,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
case ISER_IB_RDMA_READ:
pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_read(tx_desc, isert_cmd);
break;
default:
@@ -1674,31 +1693,76 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
}
static void
-isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
+{
+ struct llist_node *llnode;
+ struct isert_rdma_wr *wr;
+ struct iser_tx_desc *t;
+
+ mutex_lock(&isert_conn->conn_mutex);
+ llnode = llist_del_all(&isert_conn->conn_comp_llist);
+ isert_conn->conn_comp_batch = 0;
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ while (llnode) {
+ t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+ llnode = llist_next(llnode);
+ wr = &t->isert_cmd->rdma_wr;
+
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ isert_completion_put(t, t->isert_cmd, ib_dev);
+ }
+}
+
+static void
+isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+ struct llist_node *llnode = tx_desc->comp_llnode_batch;
+ struct isert_rdma_wr *wr;
+ struct iser_tx_desc *t;
- if (tx_desc) {
- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+ while (llnode) {
+ t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+ llnode = llist_next(llnode);
+ wr = &t->isert_cmd->rdma_wr;
- if (!isert_cmd)
- isert_unmap_tx_desc(tx_desc, ib_dev);
- else
- isert_completion_put(tx_desc, isert_cmd, ib_dev);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ isert_completion_put(t, t->isert_cmd, ib_dev);
}
+ tx_desc->comp_llnode_batch = NULL;
- if (isert_conn->post_recv_buf_count == 0 &&
- atomic_read(&isert_conn->post_send_buf_count) == 0) {
- pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
- pr_debug("Calling wake_up from isert_cq_comp_err\n");
+ if (!isert_cmd)
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+ else
+ isert_completion_put(tx_desc, isert_cmd, ib_dev);
+}
- mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->state != ISER_CONN_DOWN)
- isert_conn->state = ISER_CONN_TERMINATING;
- mutex_unlock(&isert_conn->conn_mutex);
+static void
+isert_cq_rx_comp_err(struct isert_conn *isert_conn)
+{
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct iscsi_conn *conn = isert_conn->conn;
- wake_up(&isert_conn->conn_wait_comp_err);
+ if (isert_conn->post_recv_buf_count)
+ return;
+
+ isert_cq_drain_comp_llist(isert_conn, ib_dev);
+
+ if (conn->sess) {
+ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
+ target_wait_for_sess_cmds(conn->sess->se_sess);
}
+
+ while (atomic_read(&isert_conn->post_send_buf_count))
+ msleep(3000);
+
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ complete(&isert_conn->conn_wait_comp_err);
}
static void
@@ -1723,8 +1787,14 @@ isert_cq_tx_work(struct work_struct *work)
pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
pr_debug("TX wc.status: 0x%08x\n", wc.status);
pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
- atomic_dec(&isert_conn->post_send_buf_count);
- isert_cq_comp_err(tx_desc, isert_conn);
+
+ if (wc.wr_id != ISER_FASTREG_LI_WRID) {
+ if (tx_desc->llnode_active)
+ continue;
+
+ atomic_dec(&isert_conn->post_send_buf_count);
+ isert_cq_tx_comp_err(tx_desc, isert_conn);
+ }
}
}
@@ -1767,7 +1837,7 @@ isert_cq_rx_work(struct work_struct *work)
wc.vendor_err);
}
isert_conn->post_recv_buf_count--;
- isert_cq_comp_err(NULL, isert_conn);
+ isert_cq_rx_comp_err(isert_conn);
}
}
@@ -2163,32 +2233,29 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
static int
isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
- struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
- struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
+ struct isert_conn *isert_conn, struct scatterlist *sg_start,
+ struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
+ unsigned int data_len)
{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct scatterlist *sg_start;
- u32 sg_off, page_off;
struct ib_send_wr fr_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
+ int ret, pagelist_len;
+ u32 page_off;
u8 key;
- int ret, sg_nents, pagelist_len;
- sg_off = offset / PAGE_SIZE;
- sg_start = &cmd->se_cmd.t_data_sg[sg_off];
- sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
- ISCSI_ISER_SG_TABLESIZE);
+ sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
page_off = offset % PAGE_SIZE;
- pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
- isert_cmd, fr_desc, sg_nents, sg_off, offset);
+ pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
+ fr_desc, sg_nents, offset);
pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
&fr_desc->data_frpl->page_list[0]);
if (!fr_desc->valid) {
memset(&inv_wr, 0, sizeof(inv_wr));
+ inv_wr.wr_id = ISER_FASTREG_LI_WRID;
inv_wr.opcode = IB_WR_LOCAL_INV;
inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
wr = &inv_wr;
@@ -2199,6 +2266,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
/* Prepare FASTREG WR */
memset(&fr_wr, 0, sizeof(fr_wr));
+ fr_wr.wr_id = ISER_FASTREG_LI_WRID;
fr_wr.opcode = IB_WR_FAST_REG_MR;
fr_wr.wr.fast_reg.iova_start =
fr_desc->data_frpl->page_list[0] + page_off;
@@ -2232,8 +2300,8 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
}
static int
-isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr)
+isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct isert_rdma_wr *wr)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
@@ -2251,9 +2319,9 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
data_left = se_cmd->data_length;
} else {
- sg_off = cmd->write_data_done / PAGE_SIZE;
- data_left = se_cmd->data_length - cmd->write_data_done;
offset = cmd->write_data_done;
+ sg_off = offset / PAGE_SIZE;
+ data_left = se_cmd->data_length - cmd->write_data_done;
isert_cmd->tx_desc.isert_cmd = isert_cmd;
}
@@ -2311,16 +2379,16 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
wr->fr_desc = NULL;
} else {
spin_lock_irqsave(&isert_conn->conn_lock, flags);
- fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
+ fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
struct fast_reg_descriptor, list);
list_del(&fr_desc->list);
spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
wr->fr_desc = fr_desc;
- ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
- ib_sge, offset, data_len);
+ ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
+ ib_sge, sg_nents, offset, data_len);
if (ret) {
- list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
+ list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
goto unmap_sg;
}
}
@@ -2364,12 +2432,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_init_send_wr(isert_conn, isert_cmd,
&isert_cmd->tx_desc.send_wr, true);
- atomic_inc(&isert_conn->post_send_buf_count);
+ atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
}
pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
isert_cmd);
@@ -2397,12 +2465,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
return rc;
}
- atomic_inc(&isert_conn->post_send_buf_count);
+ atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
}
pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
isert_cmd);
@@ -2689,22 +2757,11 @@ isert_free_np(struct iscsi_np *np)
kfree(isert_np);
}
-static int isert_check_state(struct isert_conn *isert_conn, int state)
-{
- int ret;
-
- mutex_lock(&isert_conn->conn_mutex);
- ret = (isert_conn->state == state);
- mutex_unlock(&isert_conn->conn_mutex);
-
- return ret;
-}
-
-static void isert_free_conn(struct iscsi_conn *conn)
+static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
- pr_debug("isert_free_conn: Starting \n");
+ pr_debug("isert_wait_conn: Starting \n");
/*
* Decrement post_send_buf_count for special case when called
* from isert_do_control_comp() -> iscsit_logout_post_handler()
@@ -2714,38 +2771,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
atomic_dec(&isert_conn->post_send_buf_count);
if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
- pr_debug("Calling rdma_disconnect from isert_free_conn\n");
+ pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
rdma_disconnect(isert_conn->conn_cm_id);
}
/*
* Only wait for conn_wait_comp_err if the isert_conn made it
* into full feature phase..
*/
- if (isert_conn->state == ISER_CONN_UP) {
- pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
- isert_conn->state);
- mutex_unlock(&isert_conn->conn_mutex);
-
- wait_event(isert_conn->conn_wait_comp_err,
- (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
-
- wait_event(isert_conn->conn_wait,
- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
-
- isert_put_conn(isert_conn);
- return;
- }
if (isert_conn->state == ISER_CONN_INIT) {
mutex_unlock(&isert_conn->conn_mutex);
- isert_put_conn(isert_conn);
return;
}
- pr_debug("isert_free_conn: wait_event conn_wait %d\n",
- isert_conn->state);
+ if (isert_conn->state == ISER_CONN_UP)
+ isert_conn->state = ISER_CONN_TERMINATING;
mutex_unlock(&isert_conn->conn_mutex);
- wait_event(isert_conn->conn_wait,
- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
+ wait_for_completion(&isert_conn->conn_wait_comp_err);
+
+ wait_for_completion(&isert_conn->conn_wait);
+}
+
+static void isert_free_conn(struct iscsi_conn *conn)
+{
+ struct isert_conn *isert_conn = conn->context;
isert_put_conn(isert_conn);
}
@@ -2758,6 +2806,7 @@ static struct iscsit_transport iser_target_transport = {
.iscsit_setup_np = isert_setup_np,
.iscsit_accept_np = isert_accept_np,
.iscsit_free_np = isert_free_np,
+ .iscsit_wait_conn = isert_wait_conn,
.iscsit_free_conn = isert_free_conn,
.iscsit_get_login_rx = isert_get_login_rx,
.iscsit_put_login_tx = isert_put_login_tx,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 691f90ff2d83..f6ae7f5dd408 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -6,6 +6,7 @@
#define ISERT_RDMA_LISTEN_BACKLOG 10
#define ISCSI_ISER_SG_TABLESIZE 256
+#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
enum isert_desc_type {
ISCSI_TX_CONTROL,
@@ -45,6 +46,7 @@ struct iser_tx_desc {
struct isert_cmd *isert_cmd;
struct llist_node *comp_llnode_batch;
struct llist_node comp_llnode;
+ bool llnode_active;
struct ib_send_wr send_wr;
} __packed;
@@ -116,17 +118,16 @@ struct isert_conn {
struct isert_device *conn_device;
struct work_struct conn_logout_work;
struct mutex conn_mutex;
- wait_queue_head_t conn_wait;
- wait_queue_head_t conn_wait_comp_err;
+ struct completion conn_wait;
+ struct completion conn_wait_comp_err;
struct kref conn_kref;
- struct list_head conn_frwr_pool;
- int conn_frwr_pool_size;
- /* lock to protect frwr_pool */
+ struct list_head conn_fr_pool;
+ int conn_fr_pool_size;
+ /* lock to protect fastreg pool */
spinlock_t conn_lock;
#define ISERT_COMP_BATCH_COUNT 8
int conn_comp_batch;
struct llist_head conn_comp_llist;
- struct mutex conn_comp_mutex;
};
#define ISERT_MAX_CQ 64
@@ -139,13 +140,11 @@ struct isert_cq_desc {
};
struct isert_device {
- int use_frwr;
+ int use_fastreg;
int cqs_used;
int refcount;
int cq_active_qps[ISERT_MAX_CQ];
struct ib_device *ib_device;
- struct ib_pd *dev_pd;
- struct ib_mr *dev_mr;
struct ib_cq *dev_rx_cq[ISERT_MAX_CQ];
struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
struct isert_cq_desc *cq_desc;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index a88631918e85..529b6bcdca7a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -660,6 +660,7 @@ static void srp_remove_target(struct srp_target_port *target)
srp_rport_get(target->rport);
srp_remove_host(target->scsi_host);
scsi_remove_host(target->scsi_host);
+ srp_stop_rport_timers(target->rport);
srp_disconnect_target(target);
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 520a7e5a490b..0e537d8d0e47 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3666,9 +3666,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RDMA_SIZE) {
@@ -3706,9 +3706,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RSP_SIZE) {
@@ -3746,9 +3746,9 @@ static ssize_t srpt_tpg_attrib_store_srp_sq_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_SRQ_SIZE) {
@@ -3793,7 +3793,7 @@ static ssize_t srpt_tpg_store_enable(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index a06e12552886..ce953d895f5b 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -954,11 +954,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
error = input_ff_upload(dev, &effect, file);
+ if (error)
+ return error;
if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
return -EFAULT;
- return error;
+ return 0;
}
/* Multi-number variable-length handlers */
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index fa7a95c1da0e..2909e9561cf3 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/slab.h>
#include <linux/pci.h>
diff --git a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
index ae912d3aee4e..7c03114158e0 100644
--- a/drivers/input/gameport/fm801-gp.c
+++ b/drivers/input/gameport/fm801-gp.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/gameport.h>
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d2965e4b3224..1c4c0db05550 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1653,35 +1653,36 @@ static void input_dev_toggle(struct input_dev *dev, bool activate)
*/
void input_reset_device(struct input_dev *dev)
{
- mutex_lock(&dev->mutex);
+ unsigned long flags;
- if (dev->users) {
- input_dev_toggle(dev, true);
+ mutex_lock(&dev->mutex);
+ spin_lock_irqsave(&dev->event_lock, flags);
- /*
- * Keys that have been pressed at suspend time are unlikely
- * to be still pressed when we resume.
- */
- spin_lock_irq(&dev->event_lock);
- input_dev_release_keys(dev);
- spin_unlock_irq(&dev->event_lock);
- }
+ input_dev_toggle(dev, true);
+ input_dev_release_keys(dev);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL(input_reset_device);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int input_dev_suspend(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- mutex_lock(&input_dev->mutex);
+ spin_lock_irq(&input_dev->event_lock);
- if (input_dev->users)
- input_dev_toggle(input_dev, false);
+ /*
+ * Keys that are pressed now are unlikely to be
+ * still pressed when we resume.
+ */
+ input_dev_release_keys(input_dev);
- mutex_unlock(&input_dev->mutex);
+ /* Turn off LEDs and sounds, if any are active. */
+ input_dev_toggle(input_dev, false);
+
+ spin_unlock_irq(&input_dev->event_lock);
return 0;
}
@@ -1690,7 +1691,43 @@ static int input_dev_resume(struct device *dev)
{
struct input_dev *input_dev = to_input_dev(dev);
- input_reset_device(input_dev);
+ spin_lock_irq(&input_dev->event_lock);
+
+ /* Restore state of LEDs and sounds, if any were active. */
+ input_dev_toggle(input_dev, true);
+
+ spin_unlock_irq(&input_dev->event_lock);
+
+ return 0;
+}
+
+static int input_dev_freeze(struct device *dev)
+{
+ struct input_dev *input_dev = to_input_dev(dev);
+
+ spin_lock_irq(&input_dev->event_lock);
+
+ /*
+ * Keys that are pressed now are unlikely to be
+ * still pressed when we resume.
+ */
+ input_dev_release_keys(input_dev);
+
+ spin_unlock_irq(&input_dev->event_lock);
+
+ return 0;
+}
+
+static int input_dev_poweroff(struct device *dev)
+{
+ struct input_dev *input_dev = to_input_dev(dev);
+
+ spin_lock_irq(&input_dev->event_lock);
+
+ /* Turn off LEDs and sounds, if any are active. */
+ input_dev_toggle(input_dev, false);
+
+ spin_unlock_irq(&input_dev->event_lock);
return 0;
}
@@ -1698,7 +1735,8 @@ static int input_dev_resume(struct device *dev)
static const struct dev_pm_ops input_dev_pm_ops = {
.suspend = input_dev_suspend,
.resume = input_dev_resume,
- .poweroff = input_dev_suspend,
+ .freeze = input_dev_freeze,
+ .poweroff = input_dev_poweroff,
.restore = input_dev_resume,
};
#endif /* CONFIG_PM */
@@ -1707,7 +1745,7 @@ static struct device_type input_dev_type = {
.groups = input_dev_attr_groups,
.release = input_dev_release,
.uevent = input_dev_uevent,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.pm = &input_dev_pm_ops,
#endif
};
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 85bc8dc07cfc..55efdfc7eb62 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/input.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index 0cbfd2dfabf4..b78425765d3e 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -33,7 +33,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/gameport.h>
-#include <linux/init.h>
#include <linux/jiffies.h>
#define DRIVER_DESC "Logitech ADI joystick family driver"
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index 65367e44d715..ae3ee24a2368 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/input.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index ab1cf2882004..0f519db64748 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/gameport.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index 9e1beff57c33..eac9c5b8d73e 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/gameport.h>
#include <linux/input.h>
diff --git a/drivers/input/joystick/grip_mp.c b/drivers/input/joystick/grip_mp.c
index c0f9c7b7eb4e..573191dd78e8 100644
--- a/drivers/input/joystick/grip_mp.c
+++ b/drivers/input/joystick/grip_mp.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/gameport.h>
#include <linux/input.h>
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index 55196f730af6..a9ac2f9cfce0 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -30,7 +30,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/input.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index b1d7d9b0eb86..96ae4f5bd0eb 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/usb.h>
#include <linux/serio.h>
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index 88c22623a2e8..17c2c800743c 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -33,7 +33,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/input.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/joydump.c b/drivers/input/joystick/joydump.c
index 7eb878bab968..d1c6e4846a4a 100644
--- a/drivers/input/joystick/joydump.c
+++ b/drivers/input/joystick/joydump.c
@@ -31,7 +31,6 @@
#include <linux/gameport.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/slab.h>
#define DRIVER_DESC "Gameport data dumper module"
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index 9fb153eef2fc..c5358ba1f571 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Magellan and SpaceMouse 6dof controller driver"
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 04c69af37148..4a95b224169f 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/gameport.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index 80a7b27a457a..f4445a4e8d6a 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index a41f291652e6..f2667820e8c5 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -32,7 +32,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index 0f51a60e14a7..099c6d7b5e08 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Gravis Stinger gamepad driver"
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index 5ef9bcdb0345..7e17cde464f0 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/input.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/joystick/twidjoy.c b/drivers/input/joystick/twidjoy.c
index 2556a8193579..7f7e5ab3f9e3 100644
--- a/drivers/input/joystick/twidjoy.c
+++ b/drivers/input/joystick/twidjoy.c
@@ -52,7 +52,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Handykey Twiddler keyboard as a joystick driver"
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index 23b3071abb6e..e13a9144a25d 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Logitech WingMan Warrior joystick driver"
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 75e3b102ce45..603fe0dd3682 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -74,7 +74,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/module.h>
@@ -125,6 +124,8 @@ static const struct xpad_device {
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
+ { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
{ 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
{ 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
@@ -166,8 +167,8 @@ static const struct xpad_device {
{ 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
- { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
+ { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
index c4de4388fd7f..30af2e8c670c 100644
--- a/drivers/input/joystick/zhenhua.c
+++ b/drivers/input/joystick/zhenhua.c
@@ -49,7 +49,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "RC transmitter with 5-byte Zhen Hua protocol joystick driver"
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index bb174c1a9886..a673c9f3a0b9 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -525,7 +525,7 @@ config KEYBOARD_SUNKBD
config KEYBOARD_SH_KEYSC
tristate "SuperH KEYSC keypad support"
- depends on SUPERH || ARM || COMPILE_TEST
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
help
Say Y here if you want to use a keypad attached to the KEYSC block
on SuperH processors such as sh7722 and sh7343.
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
index ef26b17fb159..4cc14c2fa7d5 100644
--- a/drivers/input/keyboard/adp5520-keys.c
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/mfd/adp5520.h>
@@ -71,7 +70,7 @@ static int adp5520_keys_notifier(struct notifier_block *nb,
static int adp5520_keys_probe(struct platform_device *pdev)
{
- struct adp5520_keys_platform_data *pdata = pdev->dev.platform_data;
+ struct adp5520_keys_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct input_dev *input;
struct adp5520_keys *dev;
int ret, i;
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 3ed23513d881..5ef7fcf0e250 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -9,7 +9,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/workqueue.h>
@@ -77,8 +76,18 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
+ int val;
- return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
+ mutex_lock(&kpad->gpio_lock);
+
+ if (kpad->dir[bank] & bit)
+ val = kpad->dat_out[bank];
+ else
+ val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank);
+
+ mutex_unlock(&kpad->gpio_lock);
+
+ return !!(val & bit);
}
static void adp5588_gpio_set_value(struct gpio_chip *chip,
@@ -173,7 +182,7 @@ static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
static int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev->platform_data;
+ const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
int i, error;
@@ -227,7 +236,7 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
static void adp5588_gpio_remove(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev->platform_data;
+ const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
int error;
@@ -321,7 +330,8 @@ static irqreturn_t adp5588_irq(int irq, void *handle)
static int adp5588_setup(struct i2c_client *client)
{
- const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
+ const struct adp5588_kpad_platform_data *pdata =
+ dev_get_platdata(&client->dev);
const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
int i, ret;
unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0;
@@ -424,7 +434,8 @@ static int adp5588_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adp5588_kpad *kpad;
- const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
+ const struct adp5588_kpad_platform_data *pdata =
+ dev_get_platdata(&client->dev);
struct input_dev *input;
unsigned int revid;
int ret, i;
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 60dafd4fa692..6329549bf6ad 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -8,7 +8,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/workqueue.h>
@@ -499,7 +498,7 @@ static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
static int adp5589_gpio_add(struct adp5589_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
- const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
+ const struct adp5589_kpad_platform_data *pdata = dev_get_platdata(dev);
const struct adp5589_gpio_platform_data *gpio_data = pdata->gpio_data;
int i, error;
@@ -553,7 +552,7 @@ static int adp5589_gpio_add(struct adp5589_kpad *kpad)
static void adp5589_gpio_remove(struct adp5589_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
- const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
+ const struct adp5589_kpad_platform_data *pdata = dev_get_platdata(dev);
const struct adp5589_gpio_platform_data *gpio_data = pdata->gpio_data;
int error;
@@ -658,7 +657,7 @@ static int adp5589_setup(struct adp5589_kpad *kpad)
{
struct i2c_client *client = kpad->client;
const struct adp5589_kpad_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
u8 (*reg) (u8) = kpad->var->reg;
unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0;
unsigned char pull_mask = 0;
@@ -864,7 +863,7 @@ static int adp5589_probe(struct i2c_client *client,
{
struct adp5589_kpad *kpad;
const struct adp5589_kpad_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
struct input_dev *input;
unsigned int revid;
int ret, i;
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 09b91d093087..e6d46c5994d7 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -180,7 +179,7 @@ static irqreturn_t bfin_kpad_isr(int irq, void *dev_id)
static int bfin_kpad_probe(struct platform_device *pdev)
{
struct bf54x_kpad *bf54x_kpad;
- struct bfin_kpad_platform_data *pdata = pdev->dev.platform_data;
+ struct bfin_kpad_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct input_dev *input;
int i, error;
@@ -333,7 +332,7 @@ out:
static int bfin_kpad_remove(struct platform_device *pdev)
{
- struct bfin_kpad_platform_data *pdata = pdev->dev.platform_data;
+ struct bfin_kpad_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct bf54x_kpad *bf54x_kpad = platform_get_drvdata(pdev);
del_timer_sync(&bf54x_kpad->timer);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 7e8b0a52af25..408379669d3c 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -38,6 +38,7 @@
* @row_shift: log2 or number of rows, rounded up
* @keymap_data: Matrix keymap data used to convert to keyscan values
* @ghost_filter: true to enable the matrix key-ghosting filter
+ * @old_kb_state: bitmap of keys pressed last scan
* @dev: Device pointer
* @idev: Input device
* @ec: Top level ChromeOS device to use to talk to EC
@@ -49,6 +50,7 @@ struct cros_ec_keyb {
int row_shift;
const struct matrix_keymap_data *keymap_data;
bool ghost_filter;
+ uint8_t *old_kb_state;
struct device *dev;
struct input_dev *idev;
@@ -135,6 +137,7 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
struct input_dev *idev = ckdev->idev;
int col, row;
int new_state;
+ int old_state;
int num_cols;
num_cols = len;
@@ -153,18 +156,19 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
for (row = 0; row < ckdev->rows; row++) {
int pos = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
const unsigned short *keycodes = idev->keycode;
- int code;
- code = keycodes[pos];
new_state = kb_state[col] & (1 << row);
- if (!!new_state != test_bit(code, idev->key)) {
+ old_state = ckdev->old_kb_state[col] & (1 << row);
+ if (new_state != old_state) {
dev_dbg(ckdev->dev,
"changed: [r%d c%d]: byte %02x\n",
row, col, new_state);
- input_report_key(idev, code, new_state);
+ input_report_key(idev, keycodes[pos],
+ new_state);
}
}
+ ckdev->old_kb_state[col] = kb_state[col];
}
input_sync(ckdev->idev);
}
@@ -226,6 +230,9 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
&ckdev->cols);
if (err)
return err;
+ ckdev->old_kb_state = devm_kzalloc(&pdev->dev, ckdev->cols, GFP_KERNEL);
+ if (!ckdev->old_kb_state)
+ return -ENOMEM;
idev = devm_input_allocate_device(&pdev->dev);
if (!idev)
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index d15977a8361e..1559dc1cf951 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -172,7 +172,7 @@ static int __init davinci_ks_probe(struct platform_device *pdev)
struct input_dev *key_dev;
struct resource *res, *mem;
struct device *dev = &pdev->dev;
- struct davinci_ks_platform_data *pdata = pdev->dev.platform_data;
+ struct davinci_ks_platform_data *pdata = dev_get_platdata(&pdev->dev);
int error, i;
if (pdata->device_enable) {
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 47206bdba411..e59876212b8c 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -244,7 +244,7 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
if (!keypad)
return -ENOMEM;
- keypad->pdata = pdev->dev.platform_data;
+ keypad->pdata = dev_get_platdata(&pdev->dev);
if (!keypad->pdata) {
err = -EINVAL;
goto failed_free;
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
index 9f60a2ec88db..69e854763370 100644
--- a/drivers/input/keyboard/goldfish_events.c
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 4e428199e580..e571e194ff84 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input-polldev.h>
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index 589e3c258f3f..610a8af795a1 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -36,7 +36,6 @@
#include <linux/serio.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/pci_ids.h>
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 328cfc1eed95..cbf4f8038cba 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -13,7 +13,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/input/matrix_keypad.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -425,7 +424,8 @@ MODULE_DEVICE_TABLE(of, imx_keypad_of_match);
static int imx_keypad_probe(struct platform_device *pdev)
{
- const struct matrix_keymap_data *keymap_data = pdev->dev.platform_data;
+ const struct matrix_keymap_data *keymap_data =
+ dev_get_platdata(&pdev->dev);
struct imx_keypad *keypad;
struct input_dev *input_dev;
struct resource *res;
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index a2a034c25f0b..69b1f002ff52 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/input-polldev.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index b0ad457ca9d8..cd729d485e98 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -18,7 +18,6 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index fc0a63c2f278..9fcd9f1d5dc8 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -65,7 +65,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/workqueue.h>
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 0de23f41b2d3..0b42118cbf8f 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -627,7 +627,7 @@ static DEVICE_ATTR(disable_kp, 0644, lm8323_show_disable, lm8323_set_disable);
static int lm8323_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct lm8323_platform_data *pdata = client->dev.platform_data;
+ struct lm8323_platform_data *pdata = dev_get_platdata(&client->dev);
struct input_dev *idev;
struct lm8323_chip *lm;
int pwm;
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 5a8ca35dc9af..9081cbef11ea 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -131,7 +131,8 @@ static irqreturn_t lm8333_irq_thread(int irq, void *data)
static int lm8333_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct lm8333_platform_data *pdata = client->dev.platform_data;
+ const struct lm8333_platform_data *pdata =
+ dev_get_platdata(&client->dev);
struct lm8333 *lm8333;
struct input_dev *input;
int err, active_time;
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 90ff73ace424..8d2e19e81e1e 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c
index bc2cdaf563fd..430b54539720 100644
--- a/drivers/input/keyboard/max7359_keypad.c
+++ b/drivers/input/keyboard/max7359_keypad.c
@@ -182,7 +182,8 @@ static void max7359_initialize(struct i2c_client *client)
static int max7359_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct matrix_keymap_data *keymap_data = client->dev.platform_data;
+ const struct matrix_keymap_data *keymap_data =
+ dev_get_platdata(&client->dev);
struct max7359_keypad *keypad;
struct input_dev *input_dev;
int ret;
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 7c236f9c6a51..1da8e0b44b56 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c/mcs.h>
#include <linux/interrupt.h>
@@ -108,7 +107,7 @@ static int mcs_touchkey_probe(struct i2c_client *client,
int error;
int i;
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
if (!pdata) {
dev_err(&client->dev, "no platform data defined\n");
return -EINVAL;
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index f7f3e9a9fd3f..009c82256e89 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -13,7 +13,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/slab.h>
@@ -188,7 +187,8 @@ err_i2c_write:
static int mpr_touchkey_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct mpr121_platform_data *pdata = client->dev.platform_data;
+ const struct mpr121_platform_data *pdata =
+ dev_get_platdata(&client->dev);
struct mpr121_touchkey *mpr121;
struct input_dev *input_dev;
int error;
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index f971898ad591..20f044377990 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
-#include <linux/init.h>
#include <linux/serio.h>
#define DRIVER_DESC "Newton keyboard driver"
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index c7d505cce72f..63332e2f8628 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -222,7 +222,8 @@ static irqreturn_t ske_keypad_irq(int irq, void *dev_id)
static int __init ske_keypad_probe(struct platform_device *pdev)
{
- const struct ske_keypad_platform_data *plat = pdev->dev.platform_data;
+ const struct ske_keypad_platform_data *plat =
+ dev_get_platdata(&pdev->dev);
struct ske_keypad *keypad;
struct input_dev *input;
struct resource *res;
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index d0d5226d9cd4..b1acc9852eb7 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -25,7 +25,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/input.h>
@@ -248,7 +247,7 @@ static int omap_kp_probe(struct platform_device *pdev)
{
struct omap_kp *omap_kp;
struct input_dev *input_dev;
- struct omap_kp_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_kp_platform_data *pdata = dev_get_platdata(&pdev->dev);
int i, col_idx, row_idx, ret;
unsigned int row_shift, keycodemax;
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 30acfd49fa6c..0400b3f2b4b9 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 186138c720c7..d8241ba0afa0 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/device.h>
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index 248cdcf95296..374ca0246c8f 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/platform_device.h>
@@ -84,7 +83,8 @@ static void pxa930_rotary_close(struct input_dev *dev)
static int pxa930_rotary_probe(struct platform_device *pdev)
{
- struct pxa930_rotary_platform_data *pdata = pdev->dev.platform_data;
+ struct pxa930_rotary_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
struct pxa930_rotary *r;
struct input_dev *input_dev;
struct resource *res;
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 6c561ec3cc09..52cd6e88acd7 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -25,7 +25,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 1c0ddad0a1cc..819b22897c13 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -19,7 +19,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index ac43a486c775..5e80fbf7b5ed 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -14,7 +14,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -244,8 +243,8 @@ static void samsung_keypad_close(struct input_dev *input_dev)
}
#ifdef CONFIG_OF
-static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
- struct device *dev)
+static struct samsung_keypad_platdata *
+samsung_keypad_parse_dt(struct device *dev)
{
struct samsung_keypad_platdata *pdata;
struct matrix_keymap_data *keymap_data;
@@ -253,17 +252,22 @@ static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
struct device_node *np = dev->of_node, *key_np;
unsigned int key_count;
+ if (!np) {
+ dev_err(dev, "missing device tree data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(dev, "could not allocate memory for platform data\n");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
of_property_read_u32(np, "samsung,keypad-num-rows", &num_rows);
of_property_read_u32(np, "samsung,keypad-num-columns", &num_cols);
if (!num_rows || !num_cols) {
dev_err(dev, "number of keypad rows/columns not specified\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
pdata->rows = num_rows;
pdata->cols = num_cols;
@@ -271,7 +275,7 @@ static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
keymap_data = devm_kzalloc(dev, sizeof(*keymap_data), GFP_KERNEL);
if (!keymap_data) {
dev_err(dev, "could not allocate memory for keymap data\n");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
pdata->keymap_data = keymap_data;
@@ -280,7 +284,7 @@ static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
keymap = devm_kzalloc(dev, sizeof(uint32_t) * key_count, GFP_KERNEL);
if (!keymap) {
dev_err(dev, "could not allocate memory for keymap\n");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
keymap_data->keymap = keymap;
@@ -294,16 +298,19 @@ static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
if (of_get_property(np, "linux,input-no-autorepeat", NULL))
pdata->no_autorepeat = true;
+
if (of_get_property(np, "linux,input-wakeup", NULL))
pdata->wakeup = true;
return pdata;
}
#else
-static
-struct samsung_keypad_platdata *samsung_keypad_parse_dt(struct device *dev)
+static struct samsung_keypad_platdata *
+samsung_keypad_parse_dt(struct device *dev)
{
- return NULL;
+ dev_err(dev, "no platform data defined\n");
+
+ return ERR_PTR(-EINVAL);
}
#endif
@@ -318,13 +325,11 @@ static int samsung_keypad_probe(struct platform_device *pdev)
unsigned int keymap_size;
int error;
- if (pdev->dev.of_node)
- pdata = samsung_keypad_parse_dt(&pdev->dev);
- else
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -EINVAL;
+ pdata = samsung_keypad_parse_dt(&pdev->dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
}
keymap_data = pdata->keymap_data;
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index fe0e498d2479..7abf03b4cc9c 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/delay.h>
@@ -171,7 +170,7 @@ static int sh_keysc_probe(struct platform_device *pdev)
int i;
int irq, error;
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data defined\n");
error = -EINVAL;
goto err0;
@@ -198,7 +197,7 @@ static int sh_keysc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, priv);
- memcpy(&priv->pdata, pdev->dev.platform_data, sizeof(priv->pdata));
+ memcpy(&priv->pdata, dev_get_platdata(&pdev->dev), sizeof(priv->pdata));
pdata = &priv->pdata;
priv->iomem_base = ioremap_nocache(res->start, resource_size(res));
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 85ff530d9a91..258af10e5811 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -12,7 +12,6 @@
#include <linux/clk.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/io.h>
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index 5cbec56f7720..c6727dda68f2 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -6,7 +6,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c
index cc612c5d5427..a6e0d565e306 100644
--- a/drivers/input/keyboard/stowaway.c
+++ b/drivers/input/keyboard/stowaway.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
-#include <linux/init.h>
#include <linux/serio.h>
#define DRIVER_DESC "Stowaway keyboard driver"
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index 5f836b1638c1..dc6bb9d5b4f0 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/workqueue.h>
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 208de7cbb7fa..74494a357522 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index bfc832c35a7c..dc983ab6c0ad 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -213,7 +213,7 @@ static int tca6416_keypad_probe(struct i2c_client *client,
return -ENODEV;
}
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
if (!pdata) {
dev_dbg(&client->dev, "no platform data\n");
return -EINVAL;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 8508879f6faf..9757a58bc897 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -31,7 +31,7 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/input/matrix_keypad.h>
-#include <linux/clk/tegra.h>
+#include <linux/reset.h>
#include <linux/err.h>
#define KBC_MAX_KPENT 8
@@ -116,6 +116,7 @@ struct tegra_kbc {
u32 wakeup_key;
struct timer_list timer;
struct clk *clk;
+ struct reset_control *rst;
const struct tegra_kbc_hw_support *hw_support;
int max_keys;
int num_rows_and_columns;
@@ -373,9 +374,9 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
clk_prepare_enable(kbc->clk);
/* Reset the KBC controller to clear all previous status.*/
- tegra_periph_reset_assert(kbc->clk);
+ reset_control_assert(kbc->rst);
udelay(100);
- tegra_periph_reset_deassert(kbc->clk);
+ reset_control_assert(kbc->rst);
udelay(100);
tegra_kbc_config_pins(kbc);
@@ -663,6 +664,12 @@ static int tegra_kbc_probe(struct platform_device *pdev)
return PTR_ERR(kbc->clk);
}
+ kbc->rst = devm_reset_control_get(&pdev->dev, "kbc");
+ if (IS_ERR(kbc->rst)) {
+ dev_err(&pdev->dev, "failed to get keyboard reset\n");
+ return PTR_ERR(kbc->rst);
+ }
+
/*
* The time delay between two consecutive reads of the FIFO is
* the sum of the repeat time and the time taken for scanning
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index 8bd24d52bf1b..086511c2121b 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -162,7 +162,7 @@ static int keypad_probe(struct platform_device *pdev)
int error = 0, sz, row_shift;
u32 rev = 0;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(dev, "cannot find device data\n");
return -EINVAL;
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index d2d178c84ea7..c5a11700a1bf 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -27,12 +27,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/i2c/twl.h>
#include <linux/slab.h>
+#include <linux/of.h>
/*
* The TWL4030 family chips include a keypad controller that supports
@@ -60,6 +60,7 @@
struct twl4030_keypad {
unsigned short keymap[TWL4030_KEYMAP_SIZE];
u16 kp_state[TWL4030_MAX_ROWS];
+ bool autorepeat;
unsigned n_rows;
unsigned n_cols;
unsigned irq;
@@ -330,70 +331,89 @@ static int twl4030_kp_program(struct twl4030_keypad *kp)
*/
static int twl4030_kp_probe(struct platform_device *pdev)
{
- struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
- const struct matrix_keymap_data *keymap_data;
+ struct twl4030_keypad_data *pdata = dev_get_platdata(&pdev->dev);
+ const struct matrix_keymap_data *keymap_data = NULL;
struct twl4030_keypad *kp;
struct input_dev *input;
u8 reg;
int error;
- if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data ||
- pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) {
- dev_err(&pdev->dev, "Invalid platform_data\n");
- return -EINVAL;
- }
+ kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
+ if (!kp)
+ return -ENOMEM;
- keymap_data = pdata->keymap_data;
-
- kp = kzalloc(sizeof(*kp), GFP_KERNEL);
- input = input_allocate_device();
- if (!kp || !input) {
- error = -ENOMEM;
- goto err1;
- }
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
- /* Get the debug Device */
- kp->dbg_dev = &pdev->dev;
- kp->input = input;
-
- kp->n_rows = pdata->rows;
- kp->n_cols = pdata->cols;
- kp->irq = platform_get_irq(pdev, 0);
+ /* get the debug device */
+ kp->dbg_dev = &pdev->dev;
+ kp->input = input;
/* setup input device */
input->name = "TWL4030 Keypad";
input->phys = "twl4030_keypad/input0";
- input->dev.parent = &pdev->dev;
input->id.bustype = BUS_HOST;
input->id.vendor = 0x0001;
input->id.product = 0x0001;
input->id.version = 0x0003;
+ if (pdata) {
+ if (!pdata->rows || !pdata->cols || !pdata->keymap_data) {
+ dev_err(&pdev->dev, "Missing platform_data\n");
+ return -EINVAL;
+ }
+
+ kp->n_rows = pdata->rows;
+ kp->n_cols = pdata->cols;
+ kp->autorepeat = pdata->rep;
+ keymap_data = pdata->keymap_data;
+ } else {
+ error = matrix_keypad_parse_of_params(&pdev->dev, &kp->n_rows,
+ &kp->n_cols);
+ if (error)
+ return error;
+
+ kp->autorepeat = true;
+ }
+
+ if (kp->n_rows > TWL4030_MAX_ROWS || kp->n_cols > TWL4030_MAX_COLS) {
+ dev_err(&pdev->dev,
+ "Invalid rows/cols amount specified in platform/devicetree data\n");
+ return -EINVAL;
+ }
+
+ kp->irq = platform_get_irq(pdev, 0);
+ if (!kp->irq) {
+ dev_err(&pdev->dev, "no keyboard irq assigned\n");
+ return -EINVAL;
+ }
+
error = matrix_keypad_build_keymap(keymap_data, NULL,
TWL4030_MAX_ROWS,
1 << TWL4030_ROW_SHIFT,
kp->keymap, input);
if (error) {
dev_err(kp->dbg_dev, "Failed to build keymap\n");
- goto err1;
+ return error;
}
input_set_capability(input, EV_MSC, MSC_SCAN);
/* Enable auto repeat feature of Linux input subsystem */
- if (pdata->rep)
+ if (kp->autorepeat)
__set_bit(EV_REP, input->evbit);
error = input_register_device(input);
if (error) {
dev_err(kp->dbg_dev,
"Unable to register twl4030 keypad device\n");
- goto err1;
+ return error;
}
error = twl4030_kp_program(kp);
if (error)
- goto err2;
+ return error;
/*
* This ISR will always execute in kernel thread context because of
@@ -401,47 +421,33 @@ static int twl4030_kp_probe(struct platform_device *pdev)
*
* NOTE: we assume this host is wired to TWL4040 INT1, not INT2 ...
*/
- error = request_threaded_irq(kp->irq, NULL, do_kp_irq,
- 0, pdev->name, kp);
+ error = devm_request_threaded_irq(&pdev->dev, kp->irq, NULL, do_kp_irq,
+ 0, pdev->name, kp);
if (error) {
- dev_info(kp->dbg_dev, "request_irq failed for irq no=%d\n",
- kp->irq);
- goto err2;
+ dev_info(kp->dbg_dev, "request_irq failed for irq no=%d: %d\n",
+ kp->irq, error);
+ return error;
}
/* Enable KP and TO interrupts now. */
reg = (u8) ~(KEYP_IMR1_KP | KEYP_IMR1_TO);
if (twl4030_kpwrite_u8(kp, reg, KEYP_IMR1)) {
- error = -EIO;
- goto err3;
+ /* mask all events - we don't care about the result */
+ (void) twl4030_kpwrite_u8(kp, 0xff, KEYP_IMR1);
+ return -EIO;
}
platform_set_drvdata(pdev, kp);
return 0;
-
-err3:
- /* mask all events - we don't care about the result */
- (void) twl4030_kpwrite_u8(kp, 0xff, KEYP_IMR1);
- free_irq(kp->irq, kp);
-err2:
- input_unregister_device(input);
- input = NULL;
-err1:
- input_free_device(input);
- kfree(kp);
- return error;
}
-static int twl4030_kp_remove(struct platform_device *pdev)
-{
- struct twl4030_keypad *kp = platform_get_drvdata(pdev);
-
- free_irq(kp->irq, kp);
- input_unregister_device(kp->input);
- kfree(kp);
-
- return 0;
-}
+#ifdef CONFIG_OF
+static const struct of_device_id twl4030_keypad_dt_match_table[] = {
+ { .compatible = "ti,twl4030-keypad" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, twl4030_keypad_dt_match_table);
+#endif
/*
* NOTE: twl4030 are multi-function devices connected via I2C.
@@ -451,10 +457,10 @@ static int twl4030_kp_remove(struct platform_device *pdev)
static struct platform_driver twl4030_kp_driver = {
.probe = twl4030_kp_probe,
- .remove = twl4030_kp_remove,
.driver = {
.name = "twl4030_keypad",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(twl4030_keypad_dt_match_table),
},
};
module_platform_driver(twl4030_kp_driver);
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index 7b039162a3f8..e8b9d94daae7 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/device.h>
@@ -121,7 +120,7 @@ static void w90p910_keypad_close(struct input_dev *dev)
static int w90p910_keypad_probe(struct platform_device *pdev)
{
const struct w90p910_keypad_platform_data *pdata =
- pdev->dev.platform_data;
+ dev_get_platdata(&pdev->dev);
const struct matrix_keymap_data *keymap_data;
struct w90p910_keypad *keypad;
struct input_dev *input_dev;
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index d050d9d0011b..7c2325bd7408 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
-#include <linux/init.h>
#include <linux/serio.h>
#define DRIVER_DESC "XT keyboard driver"
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index e2413acbbb16..7904ab05527a 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -222,6 +222,15 @@ config INPUT_GP2A
To compile this driver as a module, choose M here: the
module will be called gp2ap002a00f.
+config INPUT_GPIO_BEEPER
+ tristate "Generic GPIO Beeper support"
+ depends on OF_GPIO
+ help
+ Say Y here if you have a beeper connected to a GPIO pin.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gpio-beeper.
+
config INPUT_GPIO_TILT_POLLED
tristate "Polled GPIO tilt switch"
depends on GPIOLIB
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 0ebfb6dbf0f7..cda71fc52fb3 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_INPUT_DA9052_ONKEY) += da9052_onkey.o
obj-$(CONFIG_INPUT_DA9055_ONKEY) += da9055_onkey.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
+obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o
obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index 2e5d5e1de647..7a61e9ee682c 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -7,7 +7,6 @@
*/
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
@@ -969,7 +968,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
int error;
struct input_dev *input[MAX_DEVICE_NUM];
- struct ad714x_platform_data *plat_data = dev->platform_data;
+ struct ad714x_platform_data *plat_data = dev_get_platdata(dev);
struct ad714x_chip *ad714x;
void *drv_mem;
unsigned long irqflags;
@@ -986,7 +985,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
goto err_out;
}
- if (dev->platform_data == NULL) {
+ if (dev_get_platdata(dev) == NULL) {
dev_err(dev, "platform data for ad714x doesn't exist\n");
error = -EINVAL;
goto err_out;
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 1cb1da294419..2b2d02f408bb 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -8,7 +8,6 @@
*/
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -714,7 +713,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
ac->fifo_delay = fifo_delay_default;
- pdata = dev->platform_data;
+ pdata = dev_get_platdata(dev);
if (!pdata) {
dev_dbg(dev,
"No platform data: Using default initialization\n");
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 7a04f54ef961..ef2e281b0a43 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -37,7 +37,6 @@ static void arizona_haptics_work(struct work_struct *work)
struct arizona_haptics,
work);
struct arizona *arizona = haptics->arizona;
- struct mutex *dapm_mutex = &arizona->dapm->card->dapm_mutex;
int ret;
if (!haptics->arizona->dapm) {
@@ -67,13 +66,10 @@ static void arizona_haptics_work(struct work_struct *work)
return;
}
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
@@ -81,21 +77,14 @@ static void arizona_haptics_work(struct work_struct *work)
if (ret != 0) {
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
-
- mutex_unlock(dapm_mutex);
-
} else {
/* This disable sequence will be a noop if already enabled */
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
@@ -103,12 +92,9 @@ static void arizona_haptics_work(struct work_struct *work)
if (ret != 0) {
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
- mutex_unlock(dapm_mutex);
-
ret = regmap_update_bits(arizona->regmap,
ARIZONA_HAPTICS_CONTROL_1,
ARIZONA_HAP_CTRL_MASK,
@@ -155,16 +141,11 @@ static int arizona_haptics_play(struct input_dev *input, void *data,
static void arizona_haptics_close(struct input_dev *input)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
- struct mutex *dapm_mutex = &haptics->arizona->dapm->card->dapm_mutex;
cancel_work_sync(&haptics->work);
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
if (haptics->arizona->dapm)
snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
-
- mutex_unlock(dapm_mutex);
}
static int arizona_haptics_probe(struct platform_device *pdev)
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 5d4402365a52..638165c78e75 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -25,11 +25,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/types.h>
+#include <linux/acpi.h>
#include <asm/uaccess.h>
-#include <acpi/acpi_drivers.h>
#define ACPI_ATLAS_NAME "Atlas ACPI"
#define ACPI_ATLAS_CLASS "Atlas"
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index cd139cb17e32..e69d9bcb37e1 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -6,7 +6,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/pm.h>
@@ -92,7 +91,7 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
static int bfin_rotary_probe(struct platform_device *pdev)
{
- struct bfin_rotary_platform_data *pdata = pdev->dev.platform_data;
+ struct bfin_rotary_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct bfin_rot *rotary;
struct input_dev *input;
int error;
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 865c2f9d25b9..52d3a9b28f0b 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -526,7 +526,8 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
static int bma150_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct bma150_platform_data *pdata = client->dev.platform_data;
+ const struct bma150_platform_data *pdata =
+ dev_get_platdata(&client->dev);
const struct bma150_cfg *cfg;
struct bma150_data *bma150;
int chip_id;
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index df9b756594f8..c7d00748277b 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -284,7 +284,7 @@ EXPORT_SYMBOL(cma3000_resume);
struct cma3000_accl_data *cma3000_init(struct device *dev, int irq,
const struct cma3000_bus_ops *bops)
{
- const struct cma3000_platform_data *pdata = dev->platform_data;
+ const struct cma3000_platform_data *pdata = dev_get_platdata(dev);
struct cma3000_accl_data *data;
struct input_dev *input_dev;
int rev;
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index b5d71d245854..3e11510ff82d 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -17,7 +17,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/init.h>
#include <linux/input-polldev.h>
#include <linux/ioport.h>
#include <linux/module.h>
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 020569a499f2..184c8f21ab59 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -11,7 +11,6 @@
* option) any later version.
*/
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -28,29 +27,32 @@ struct da9052_onkey {
static void da9052_onkey_query(struct da9052_onkey *onkey)
{
- int key_stat;
+ int ret;
- key_stat = da9052_reg_read(onkey->da9052, DA9052_EVENT_B_REG);
- if (key_stat < 0) {
+ ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG);
+ if (ret < 0) {
dev_err(onkey->da9052->dev,
- "Failed to read onkey event %d\n", key_stat);
+ "Failed to read onkey event err=%d\n", ret);
} else {
/*
* Since interrupt for deassertion of ONKEY pin is not
* generated, onkey event state determines the onkey
* button state.
*/
- key_stat &= DA9052_EVENTB_ENONKEY;
- input_report_key(onkey->input, KEY_POWER, key_stat);
+ bool pressed = !(ret & DA9052_STATUSA_NONKEY);
+
+ input_report_key(onkey->input, KEY_POWER, pressed);
input_sync(onkey->input);
- }
- /*
- * Interrupt is generated only when the ONKEY pin is asserted.
- * Hence the deassertion of the pin is simulated through work queue.
- */
- if (key_stat)
- schedule_delayed_work(&onkey->work, msecs_to_jiffies(50));
+ /*
+ * Interrupt is generated only when the ONKEY pin
+ * is asserted. Hence the deassertion of the pin
+ * is simulated through work queue.
+ */
+ if (pressed)
+ schedule_delayed_work(&onkey->work,
+ msecs_to_jiffies(50));
+ }
}
static void da9052_onkey_work(struct work_struct *work)
diff --git a/drivers/input/misc/da9055_onkey.c b/drivers/input/misc/da9055_onkey.c
index a0af8b2506ce..4b11ede34950 100644
--- a/drivers/input/misc/da9055_onkey.c
+++ b/drivers/input/misc/da9055_onkey.c
@@ -11,7 +11,6 @@
* option) any later version.
*/
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index a309a5c0899e..0eba94f581df 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index fe30bd0fe4bd..de21e317da32 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -125,7 +125,7 @@ static int gp2a_initialize(struct gp2a_data *dt)
static int gp2a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct gp2a_platform_data *pdata = client->dev.platform_data;
+ const struct gp2a_platform_data *pdata = dev_get_platdata(&client->dev);
struct gp2a_data *dt;
int error;
diff --git a/drivers/input/misc/gpio-beeper.c b/drivers/input/misc/gpio-beeper.c
new file mode 100644
index 000000000000..b757435e2b3d
--- /dev/null
+++ b/drivers/input/misc/gpio-beeper.c
@@ -0,0 +1,127 @@
+/*
+ * Generic GPIO beeper driver
+ *
+ * Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+
+#define BEEPER_MODNAME "gpio-beeper"
+
+struct gpio_beeper {
+ struct work_struct work;
+ int gpio;
+ bool active_low;
+ bool beeping;
+};
+
+static void gpio_beeper_toggle(struct gpio_beeper *beep, bool on)
+{
+ gpio_set_value_cansleep(beep->gpio, on ^ beep->active_low);
+}
+
+static void gpio_beeper_work(struct work_struct *work)
+{
+ struct gpio_beeper *beep = container_of(work, struct gpio_beeper, work);
+
+ gpio_beeper_toggle(beep, beep->beeping);
+}
+
+static int gpio_beeper_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct gpio_beeper *beep = input_get_drvdata(dev);
+
+ if (type != EV_SND || code != SND_BELL)
+ return -ENOTSUPP;
+
+ if (value < 0)
+ return -EINVAL;
+
+ beep->beeping = value;
+ /* Schedule work to actually turn the beeper on or off */
+ schedule_work(&beep->work);
+
+ return 0;
+}
+
+static void gpio_beeper_close(struct input_dev *input)
+{
+ struct gpio_beeper *beep = input_get_drvdata(input);
+
+ cancel_work_sync(&beep->work);
+ gpio_beeper_toggle(beep, false);
+}
+
+static int gpio_beeper_probe(struct platform_device *pdev)
+{
+ struct gpio_beeper *beep;
+ enum of_gpio_flags flags;
+ struct input_dev *input;
+ unsigned long gflags;
+ int err;
+
+ beep = devm_kzalloc(&pdev->dev, sizeof(*beep), GFP_KERNEL);
+ if (!beep)
+ return -ENOMEM;
+
+ beep->gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
+ if (!gpio_is_valid(beep->gpio))
+ return beep->gpio;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ INIT_WORK(&beep->work, gpio_beeper_work);
+
+ input->name = pdev->name;
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+ input->close = gpio_beeper_close;
+ input->event = gpio_beeper_event;
+
+ input_set_capability(input, EV_SND, SND_BELL);
+
+ beep->active_low = flags & OF_GPIO_ACTIVE_LOW;
+ gflags = beep->active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+
+ err = devm_gpio_request_one(&pdev->dev, beep->gpio, gflags, pdev->name);
+ if (err)
+ return err;
+
+ input_set_drvdata(input, beep);
+
+ return input_register_device(input);
+}
+
+static struct of_device_id gpio_beeper_of_match[] = {
+ { .compatible = BEEPER_MODNAME, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpio_beeper_of_match);
+
+static struct platform_driver gpio_beeper_platform_driver = {
+ .driver = {
+ .name = BEEPER_MODNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = gpio_beeper_of_match,
+ },
+ .probe = gpio_beeper_probe,
+};
+module_platform_driver(gpio_beeper_platform_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("Generic GPIO beeper driver");
diff --git a/drivers/input/misc/gpio_tilt_polled.c b/drivers/input/misc/gpio_tilt_polled.c
index 714c68369134..1a81d9115226 100644
--- a/drivers/input/misc/gpio_tilt_polled.c
+++ b/drivers/input/misc/gpio_tilt_polled.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input-polldev.h>
@@ -98,7 +97,8 @@ static void gpio_tilt_polled_close(struct input_polled_dev *dev)
static int gpio_tilt_polled_probe(struct platform_device *pdev)
{
- const struct gpio_tilt_platform_data *pdata = pdev->dev.platform_data;
+ const struct gpio_tilt_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct gpio_tilt_polled_dev *tdev;
struct input_polled_dev *poll_dev;
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 290fa5f97ded..01f3b5b300f3 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index a993b67a8a5b..d708478bc5b5 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -509,7 +509,8 @@ out:
static int kxtj9_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct kxtj9_platform_data *pdata = client->dev.platform_data;
+ const struct kxtj9_platform_data *pdata =
+ dev_get_platdata(&client->dev);
struct kxtj9_data *tj9;
int err;
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index e973133212a5..1fea5484941f 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -23,7 +23,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/err.h>
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index d0277a7b1579..0df6e8d8bd03 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -20,7 +20,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/input.h>
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 6983ffbbfb94..5e5051351c3a 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -30,7 +30,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
diff --git a/drivers/input/misc/pcap_keys.c b/drivers/input/misc/pcap_keys.c
index 40ac9a5adf89..cd230365166e 100644
--- a/drivers/input/misc/pcap_keys.c
+++ b/drivers/input/misc/pcap_keys.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/input.h>
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
index 73b13ebabe56..db92f4f3c99b 100644
--- a/drivers/input/misc/pcf50633-input.c
+++ b/drivers/input/misc/pcf50633-input.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/input.h>
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 0deca5a3c87f..97f711a7bd20 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -7,7 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 7288b267613d..674a2cfc3c0e 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i8253.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/timex.h>
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index ec086f6f3cc3..b88b7cbf93e2 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -11,13 +11,12 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/slab.h>
-#include <linux/mfd/pm8xxx/core.h>
+#include <linux/regmap.h>
#define VIB_DRV 0x4A
@@ -35,7 +34,7 @@
* struct pm8xxx_vib - structure to hold vibrator data
* @vib_input_dev: input device supporting force feedback
* @work: work structure to set the vibration parameters
- * @dev: device supporting force feedback
+ * @regmap: regmap for register read/write
* @speed: speed of vibration set from userland
* @active: state of vibrator
* @level: level of vibration to set in the chip
@@ -44,7 +43,7 @@
struct pm8xxx_vib {
struct input_dev *vib_input_dev;
struct work_struct work;
- struct device *dev;
+ struct regmap *regmap;
int speed;
int level;
bool active;
@@ -52,42 +51,6 @@ struct pm8xxx_vib {
};
/**
- * pm8xxx_vib_read_u8 - helper to read a byte from pmic chip
- * @vib: pointer to vibrator structure
- * @data: placeholder for data to be read
- * @reg: register address
- */
-static int pm8xxx_vib_read_u8(struct pm8xxx_vib *vib,
- u8 *data, u16 reg)
-{
- int rc;
-
- rc = pm8xxx_readb(vib->dev->parent, reg, data);
- if (rc < 0)
- dev_warn(vib->dev, "Error reading pm8xxx reg 0x%x(0x%x)\n",
- reg, rc);
- return rc;
-}
-
-/**
- * pm8xxx_vib_write_u8 - helper to write a byte to pmic chip
- * @vib: pointer to vibrator structure
- * @data: data to write
- * @reg: register address
- */
-static int pm8xxx_vib_write_u8(struct pm8xxx_vib *vib,
- u8 data, u16 reg)
-{
- int rc;
-
- rc = pm8xxx_writeb(vib->dev->parent, reg, data);
- if (rc < 0)
- dev_warn(vib->dev, "Error writing pm8xxx reg 0x%x(0x%x)\n",
- reg, rc);
- return rc;
-}
-
-/**
* pm8xxx_vib_set - handler to start/stop vibration
* @vib: pointer to vibrator structure
* @on: state to set
@@ -95,14 +58,14 @@ static int pm8xxx_vib_write_u8(struct pm8xxx_vib *vib,
static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
{
int rc;
- u8 val = vib->reg_vib_drv;
+ unsigned int val = vib->reg_vib_drv;
if (on)
val |= ((vib->level << VIB_DRV_SEL_SHIFT) & VIB_DRV_SEL_MASK);
else
val &= ~VIB_DRV_SEL_MASK;
- rc = pm8xxx_vib_write_u8(vib, val, VIB_DRV);
+ rc = regmap_write(vib->regmap, VIB_DRV, val);
if (rc < 0)
return rc;
@@ -118,9 +81,9 @@ static void pm8xxx_work_handler(struct work_struct *work)
{
struct pm8xxx_vib *vib = container_of(work, struct pm8xxx_vib, work);
int rc;
- u8 val;
+ unsigned int val;
- rc = pm8xxx_vib_read_u8(vib, &val, VIB_DRV);
+ rc = regmap_read(vib->regmap, VIB_DRV, &val);
if (rc < 0)
return;
@@ -184,34 +147,37 @@ static int pm8xxx_vib_probe(struct platform_device *pdev)
struct pm8xxx_vib *vib;
struct input_dev *input_dev;
int error;
- u8 val;
-
- vib = kzalloc(sizeof(*vib), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!vib || !input_dev) {
- dev_err(&pdev->dev, "couldn't allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ unsigned int val;
+
+ vib = devm_kzalloc(&pdev->dev, sizeof(*vib), GFP_KERNEL);
+ if (!vib)
+ return -ENOMEM;
+
+ vib->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!vib->regmap)
+ return -ENODEV;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
INIT_WORK(&vib->work, pm8xxx_work_handler);
- vib->dev = &pdev->dev;
vib->vib_input_dev = input_dev;
/* operate in manual mode */
- error = pm8xxx_vib_read_u8(vib, &val, VIB_DRV);
+ error = regmap_read(vib->regmap, VIB_DRV, &val);
if (error < 0)
- goto err_free_mem;
+ return error;
+
val &= ~VIB_DRV_EN_MANUAL_MASK;
- error = pm8xxx_vib_write_u8(vib, val, VIB_DRV);
+ error = regmap_write(vib->regmap, VIB_DRV, val);
if (error < 0)
- goto err_free_mem;
+ return error;
vib->reg_vib_drv = val;
input_dev->name = "pm8xxx_vib_ffmemless";
input_dev->id.version = 1;
- input_dev->dev.parent = &pdev->dev;
input_dev->close = pm8xxx_vib_close;
input_set_drvdata(input_dev, vib);
input_set_capability(vib->vib_input_dev, EV_FF, FF_RUMBLE);
@@ -221,35 +187,17 @@ static int pm8xxx_vib_probe(struct platform_device *pdev)
if (error) {
dev_err(&pdev->dev,
"couldn't register vibrator as FF device\n");
- goto err_free_mem;
+ return error;
}
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "couldn't register input device\n");
- goto err_destroy_memless;
+ return error;
}
platform_set_drvdata(pdev, vib);
return 0;
-
-err_destroy_memless:
- input_ff_destroy(input_dev);
-err_free_mem:
- input_free_device(input_dev);
- kfree(vib);
-
- return error;
-}
-
-static int pm8xxx_vib_remove(struct platform_device *pdev)
-{
- struct pm8xxx_vib *vib = platform_get_drvdata(pdev);
-
- input_unregister_device(vib->vib_input_dev);
- kfree(vib);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -268,7 +216,6 @@ static SIMPLE_DEV_PM_OPS(pm8xxx_vib_pm_ops, pm8xxx_vib_suspend, NULL);
static struct platform_driver pm8xxx_vib_driver = {
.probe = pm8xxx_vib_probe,
- .remove = pm8xxx_vib_remove,
.driver = {
.name = "pm8xxx-vib",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index b49b738aa9c6..0e1a05f95858 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -11,16 +11,15 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/log2.h>
-#include <linux/mfd/pm8xxx/core.h>
#include <linux/input/pmic8xxx-pwrkey.h>
#define PON_CNTL_1 0x1C
@@ -32,26 +31,25 @@
* @key_press_irq: key press irq number
*/
struct pmic8xxx_pwrkey {
- struct input_dev *pwr;
int key_press_irq;
};
-static irqreturn_t pwrkey_press_irq(int irq, void *_pwrkey)
+static irqreturn_t pwrkey_press_irq(int irq, void *_pwr)
{
- struct pmic8xxx_pwrkey *pwrkey = _pwrkey;
+ struct input_dev *pwr = _pwr;
- input_report_key(pwrkey->pwr, KEY_POWER, 1);
- input_sync(pwrkey->pwr);
+ input_report_key(pwr, KEY_POWER, 1);
+ input_sync(pwr);
return IRQ_HANDLED;
}
-static irqreturn_t pwrkey_release_irq(int irq, void *_pwrkey)
+static irqreturn_t pwrkey_release_irq(int irq, void *_pwr)
{
- struct pmic8xxx_pwrkey *pwrkey = _pwrkey;
+ struct input_dev *pwr = _pwr;
- input_report_key(pwrkey->pwr, KEY_POWER, 0);
- input_sync(pwrkey->pwr);
+ input_report_key(pwr, KEY_POWER, 0);
+ input_sync(pwr);
return IRQ_HANDLED;
}
@@ -88,7 +86,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
int key_press_irq = platform_get_irq(pdev, 1);
int err;
unsigned int delay;
- u8 pon_cntl;
+ unsigned int pon_cntl;
+ struct regmap *regmap;
struct pmic8xxx_pwrkey *pwrkey;
const struct pm8xxx_pwrkey_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -103,30 +102,36 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
return -EINVAL;
}
- pwrkey = kzalloc(sizeof(*pwrkey), GFP_KERNEL);
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "failed to locate regmap for the device\n");
+ return -ENODEV;
+ }
+
+ pwrkey = devm_kzalloc(&pdev->dev, sizeof(*pwrkey), GFP_KERNEL);
if (!pwrkey)
return -ENOMEM;
- pwr = input_allocate_device();
+ pwrkey->key_press_irq = key_press_irq;
+
+ pwr = devm_input_allocate_device(&pdev->dev);
if (!pwr) {
dev_dbg(&pdev->dev, "Can't allocate power button\n");
- err = -ENOMEM;
- goto free_pwrkey;
+ return -ENOMEM;
}
input_set_capability(pwr, EV_KEY, KEY_POWER);
pwr->name = "pmic8xxx_pwrkey";
pwr->phys = "pmic8xxx_pwrkey/input0";
- pwr->dev.parent = &pdev->dev;
delay = (pdata->kpd_trigger_delay_us << 10) / USEC_PER_SEC;
delay = 1 + ilog2(delay);
- err = pm8xxx_readb(pdev->dev.parent, PON_CNTL_1, &pon_cntl);
+ err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
if (err < 0) {
dev_err(&pdev->dev, "failed reading PON_CNTL_1 err=%d\n", err);
- goto free_input_dev;
+ return err;
}
pon_cntl &= ~PON_CNTL_TRIG_DELAY_MASK;
@@ -136,69 +141,46 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
else
pon_cntl &= ~PON_CNTL_PULL_UP;
- err = pm8xxx_writeb(pdev->dev.parent, PON_CNTL_1, pon_cntl);
+ err = regmap_write(regmap, PON_CNTL_1, pon_cntl);
if (err < 0) {
dev_err(&pdev->dev, "failed writing PON_CNTL_1 err=%d\n", err);
- goto free_input_dev;
+ return err;
}
- err = input_register_device(pwr);
+ err = devm_request_irq(&pdev->dev, key_press_irq, pwrkey_press_irq,
+ IRQF_TRIGGER_RISING,
+ "pmic8xxx_pwrkey_press", pwr);
if (err) {
- dev_dbg(&pdev->dev, "Can't register power key: %d\n", err);
- goto free_input_dev;
+ dev_err(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
+ key_press_irq, err);
+ return err;
}
- pwrkey->key_press_irq = key_press_irq;
- pwrkey->pwr = pwr;
-
- platform_set_drvdata(pdev, pwrkey);
-
- err = request_irq(key_press_irq, pwrkey_press_irq,
- IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_press", pwrkey);
- if (err < 0) {
- dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
- key_press_irq, err);
- goto unreg_input_dev;
+ err = devm_request_irq(&pdev->dev, key_release_irq, pwrkey_release_irq,
+ IRQF_TRIGGER_RISING,
+ "pmic8xxx_pwrkey_release", pwr);
+ if (err) {
+ dev_err(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
+ key_release_irq, err);
+ return err;
}
- err = request_irq(key_release_irq, pwrkey_release_irq,
- IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_release", pwrkey);
- if (err < 0) {
- dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
- key_release_irq, err);
-
- goto free_press_irq;
+ err = input_register_device(pwr);
+ if (err) {
+ dev_err(&pdev->dev, "Can't register power key: %d\n", err);
+ return err;
}
+ platform_set_drvdata(pdev, pwrkey);
device_init_wakeup(&pdev->dev, pdata->wakeup);
return 0;
-
-free_press_irq:
- free_irq(key_press_irq, pwrkey);
-unreg_input_dev:
- input_unregister_device(pwr);
- pwr = NULL;
-free_input_dev:
- input_free_device(pwr);
-free_pwrkey:
- kfree(pwrkey);
- return err;
}
static int pmic8xxx_pwrkey_remove(struct platform_device *pdev)
{
- struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
- int key_release_irq = platform_get_irq(pdev, 0);
- int key_press_irq = platform_get_irq(pdev, 1);
-
device_init_wakeup(&pdev->dev, 0);
- free_irq(key_press_irq, pwrkey);
- free_irq(key_release_irq, pwrkey);
- input_unregister_device(pwrkey->pwr);
- kfree(pwrkey);
-
return 0;
}
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 49c0c3ebd321..63b539d3daba 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -31,7 +31,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/usb/input.h>
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 940566e7be13..8ef288e7c971 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -68,7 +68,7 @@ static int pwm_beeper_event(struct input_dev *input,
static int pwm_beeper_probe(struct platform_device *pdev)
{
- unsigned long pwm_id = (unsigned long)pdev->dev.platform_data;
+ unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
struct pwm_beeper *beeper;
int error;
diff --git a/drivers/input/misc/retu-pwrbutton.c b/drivers/input/misc/retu-pwrbutton.c
index 7ca09baa0016..4bff1aa9b0db 100644
--- a/drivers/input/misc/retu-pwrbutton.c
+++ b/drivers/input/misc/retu-pwrbutton.c
@@ -17,7 +17,6 @@
*/
#include <linux/irq.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/input.h>
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index f920ba7ab51f..99b9e42aa748 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/device.h>
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index 95cf299ef9a3..f10474937a64 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -17,7 +17,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/init.h>
#include <linux/input-polldev.h>
#include <linux/ioport.h>
#include <linux/module.h>
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 7b8b03e0d0be..e8897c36d21b 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -7,7 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index b9a05fda03e4..fb3b63b2f85c 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -52,15 +52,15 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
return IRQ_HANDLED;
}
-static int __init twl4030_pwrbutton_probe(struct platform_device *pdev)
+static int twl4030_pwrbutton_probe(struct platform_device *pdev)
{
struct input_dev *pwr;
int irq = platform_get_irq(pdev, 0);
int err;
- pwr = input_allocate_device();
+ pwr = devm_input_allocate_device(&pdev->dev);
if (!pwr) {
- dev_dbg(&pdev->dev, "Can't allocate power button\n");
+ dev_err(&pdev->dev, "Can't allocate power button\n");
return -ENOMEM;
}
@@ -70,52 +70,42 @@ static int __init twl4030_pwrbutton_probe(struct platform_device *pdev)
pwr->phys = "twl4030_pwrbutton/input0";
pwr->dev.parent = &pdev->dev;
- err = request_threaded_irq(irq, NULL, powerbutton_irq,
+ err = devm_request_threaded_irq(&pwr->dev, irq, NULL, powerbutton_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"twl4030_pwrbutton", pwr);
if (err < 0) {
- dev_dbg(&pdev->dev, "Can't get IRQ for pwrbutton: %d\n", err);
- goto free_input_dev;
+ dev_err(&pdev->dev, "Can't get IRQ for pwrbutton: %d\n", err);
+ return err;
}
err = input_register_device(pwr);
if (err) {
- dev_dbg(&pdev->dev, "Can't register power button: %d\n", err);
- goto free_irq;
+ dev_err(&pdev->dev, "Can't register power button: %d\n", err);
+ return err;
}
platform_set_drvdata(pdev, pwr);
return 0;
-
-free_irq:
- free_irq(irq, pwr);
-free_input_dev:
- input_free_device(pwr);
- return err;
}
-static int __exit twl4030_pwrbutton_remove(struct platform_device *pdev)
-{
- struct input_dev *pwr = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- free_irq(irq, pwr);
- input_unregister_device(pwr);
-
- return 0;
-}
+#ifdef CONFIG_OF
+static const struct of_device_id twl4030_pwrbutton_dt_match_table[] = {
+ { .compatible = "ti,twl4030-pwrbutton" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, twl4030_pwrbutton_dt_match_table);
+#endif
static struct platform_driver twl4030_pwrbutton_driver = {
- .remove = __exit_p(twl4030_pwrbutton_remove),
+ .probe = twl4030_pwrbutton_probe,
.driver = {
.name = "twl4030_pwrbutton",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(twl4030_pwrbutton_dt_match_table),
},
};
-
-module_platform_driver_probe(twl4030_pwrbutton_driver,
- twl4030_pwrbutton_probe);
+module_platform_driver(twl4030_pwrbutton_driver);
MODULE_ALIAS("platform:twl4030_pwrbutton");
MODULE_DESCRIPTION("Triton2 Power Button");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 68a5f33152a8..960ef2a70910 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -185,15 +185,17 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
if (pdata && pdata->coexist)
return true;
- if (of_find_node_by_name(node, "codec"))
+ if (of_find_node_by_name(node, "codec")) {
+ of_node_put(node);
return true;
+ }
return false;
}
static int twl4030_vibra_probe(struct platform_device *pdev)
{
- struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
+ struct twl4030_vibra_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *twl4030_core_node = pdev->dev.parent->of_node;
struct vibra_info *info;
int ret;
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 7864b0c3ebb3..77dc23b94eb1 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -258,17 +258,14 @@ static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
static int twl6040_vibra_probe(struct platform_device *pdev)
{
struct device *twl6040_core_dev = pdev->dev.parent;
- struct device_node *twl6040_core_node = NULL;
+ struct device_node *twl6040_core_node;
struct vibra_info *info;
int vddvibl_uV = 0;
int vddvibr_uV = 0;
int ret;
-#ifdef CONFIG_OF
twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
"vibra");
-#endif
-
if (!twl6040_core_node) {
dev_err(&pdev->dev, "parent of node is missing?\n");
return -EINVAL;
@@ -276,6 +273,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
+ of_node_put(twl6040_core_node);
dev_err(&pdev->dev, "couldn't allocate memory\n");
return -ENOMEM;
}
@@ -295,6 +293,8 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
of_property_read_u32(twl6040_core_node, "ti,vddvibl-uV", &vddvibl_uV);
of_property_read_u32(twl6040_core_node, "ti,vddvibr-uV", &vddvibr_uV);
+ of_node_put(twl6040_core_node);
+
if ((!info->vibldrv_res && !info->viblmotor_res) ||
(!info->vibrdrv_res && !info->vibrmotor_res)) {
dev_err(info->dev, "invalid vibra driver/motor resistance\n");
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index caa2c4068f09..173b6dcca0da 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -18,7 +18,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index e21c1816a8f9..fbfdc10573be 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -29,6 +29,7 @@
#include <xen/interface/io/fbif.h>
#include <xen/interface/io/kbdif.h>
#include <xen/xenbus.h>
+#include <xen/platform_pci.h>
struct xenkbd_info {
struct input_dev *kbd;
@@ -380,6 +381,9 @@ static int __init xenkbd_init(void)
if (xen_initial_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
return xenbus_register_frontend(&xenkbd_driver);
}
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 285a5bd6cbc9..79c964c075f1 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -47,7 +47,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/rwsem.h>
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 5cf62e315218..fb15c64ffb95 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -277,6 +277,57 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
}
/*
+ * Process bitmap data for V5 protocols. Return value is null.
+ *
+ * The bitmaps don't have enough data to track fingers, so this function
+ * only generates points representing a bounding box of at most two contacts.
+ * These two points are returned in x1, y1, x2, and y2.
+ */
+static void alps_process_bitmap_dolphin(struct alps_data *priv,
+ struct alps_fields *fields,
+ int *x1, int *y1, int *x2, int *y2)
+{
+ int box_middle_x, box_middle_y;
+ unsigned int x_map, y_map;
+ unsigned char start_bit, end_bit;
+ unsigned char x_msb, x_lsb, y_msb, y_lsb;
+
+ x_map = fields->x_map;
+ y_map = fields->y_map;
+
+ if (!x_map || !y_map)
+ return;
+
+ /* Get Most-significant and Least-significant bit */
+ x_msb = fls(x_map);
+ x_lsb = ffs(x_map);
+ y_msb = fls(y_map);
+ y_lsb = ffs(y_map);
+
+ /* Most-significant bit should never exceed max sensor line number */
+ if (x_msb > priv->x_bits || y_msb > priv->y_bits)
+ return;
+
+ *x1 = *y1 = *x2 = *y2 = 0;
+
+ if (fields->fingers > 1) {
+ start_bit = priv->x_bits - x_msb;
+ end_bit = priv->x_bits - x_lsb;
+ box_middle_x = (priv->x_max * (start_bit + end_bit)) /
+ (2 * (priv->x_bits - 1));
+
+ start_bit = y_lsb - 1;
+ end_bit = y_msb - 1;
+ box_middle_y = (priv->y_max * (start_bit + end_bit)) /
+ (2 * (priv->y_bits - 1));
+ *x1 = fields->x;
+ *y1 = fields->y;
+ *x2 = 2 * box_middle_x - *x1;
+ *y2 = 2 * box_middle_y - *y1;
+ }
+}
+
+/*
* Process bitmap data from v3 and v4 protocols. Returns the number of
* fingers detected. A return value of 0 means at least one of the
* bitmaps was empty.
@@ -481,7 +532,8 @@ static void alps_decode_buttons_v3(struct alps_fields *f, unsigned char *p)
f->ts_middle = !!(p[3] & 0x40);
}
-static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p)
+static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p,
+ struct psmouse *psmouse)
{
f->first_mp = !!(p[4] & 0x40);
f->is_mp = !!(p[0] & 0x40);
@@ -502,48 +554,61 @@ static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p)
alps_decode_buttons_v3(f, p);
}
-static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p)
+static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p,
+ struct psmouse *psmouse)
{
- alps_decode_pinnacle(f, p);
+ alps_decode_pinnacle(f, p, psmouse);
f->x_map |= (p[5] & 0x10) << 11;
f->y_map |= (p[5] & 0x20) << 6;
}
-static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p)
+static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p,
+ struct psmouse *psmouse)
{
+ u64 palm_data = 0;
+ struct alps_data *priv = psmouse->private;
+
f->first_mp = !!(p[0] & 0x02);
f->is_mp = !!(p[0] & 0x20);
- f->fingers = ((p[0] & 0x6) >> 1 |
+ if (!f->is_mp) {
+ f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7));
+ f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3));
+ f->z = (p[0] & 4) ? 0 : p[5] & 0x7f;
+ alps_decode_buttons_v3(f, p);
+ } else {
+ f->fingers = ((p[0] & 0x6) >> 1 |
(p[0] & 0x10) >> 2);
- f->x_map = ((p[2] & 0x60) >> 5) |
- ((p[4] & 0x7f) << 2) |
- ((p[5] & 0x7f) << 9) |
- ((p[3] & 0x07) << 16) |
- ((p[3] & 0x70) << 15) |
- ((p[0] & 0x01) << 22);
- f->y_map = (p[1] & 0x7f) |
- ((p[2] & 0x1f) << 7);
-
- f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7));
- f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3));
- f->z = (p[0] & 4) ? 0 : p[5] & 0x7f;
- alps_decode_buttons_v3(f, p);
+ palm_data = (p[1] & 0x7f) |
+ ((p[2] & 0x7f) << 7) |
+ ((p[4] & 0x7f) << 14) |
+ ((p[5] & 0x7f) << 21) |
+ ((p[3] & 0x07) << 28) |
+ (((u64)p[3] & 0x70) << 27) |
+ (((u64)p[0] & 0x01) << 34);
+
+ /* Y-profile is stored in P(0) to p(n-1), n = y_bits; */
+ f->y_map = palm_data & (BIT(priv->y_bits) - 1);
+
+ /* X-profile is stored in p(n) to p(n+m-1), m = x_bits; */
+ f->x_map = (palm_data >> priv->y_bits) &
+ (BIT(priv->x_bits) - 1);
+ }
}
-static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
+static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
unsigned char *packet = psmouse->packet;
struct input_dev *dev = psmouse->dev;
struct input_dev *dev2 = priv->dev2;
int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
- int fingers = 0, bmap_fingers;
- struct alps_fields f;
+ int fingers = 0, bmap_fn;
+ struct alps_fields f = {0};
- priv->decode_fields(&f, packet);
+ priv->decode_fields(&f, packet, psmouse);
/*
* There's no single feature of touchpad position and bitmap packets
@@ -560,19 +625,38 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
*/
if (f.is_mp) {
fingers = f.fingers;
- bmap_fingers = alps_process_bitmap(priv,
- f.x_map, f.y_map,
- &x1, &y1, &x2, &y2);
-
- /*
- * We shouldn't report more than one finger if
- * we don't have two coordinates.
- */
- if (fingers > 1 && bmap_fingers < 2)
- fingers = bmap_fingers;
-
- /* Now process position packet */
- priv->decode_fields(&f, priv->multi_data);
+ if (priv->proto_version == ALPS_PROTO_V3) {
+ bmap_fn = alps_process_bitmap(priv, f.x_map,
+ f.y_map, &x1, &y1,
+ &x2, &y2);
+
+ /*
+ * We shouldn't report more than one finger if
+ * we don't have two coordinates.
+ */
+ if (fingers > 1 && bmap_fn < 2)
+ fingers = bmap_fn;
+
+ /* Now process position packet */
+ priv->decode_fields(&f, priv->multi_data,
+ psmouse);
+ } else {
+ /*
+ * Because Dolphin uses position packet's
+ * coordinate data as Pt1 and uses it to
+ * calculate Pt2, so we need to do position
+ * packet decode first.
+ */
+ priv->decode_fields(&f, priv->multi_data,
+ psmouse);
+
+ /*
+ * Since Dolphin's finger number is reliable,
+ * there is no need to compare with bmap_fn.
+ */
+ alps_process_bitmap_dolphin(priv, &f, &x1, &y1,
+ &x2, &y2);
+ }
} else {
priv->multi_packet = 0;
}
@@ -662,7 +746,7 @@ static void alps_process_packet_v3(struct psmouse *psmouse)
return;
}
- alps_process_touchpad_packet_v3(psmouse);
+ alps_process_touchpad_packet_v3_v5(psmouse);
}
static void alps_process_packet_v6(struct psmouse *psmouse)
@@ -1709,6 +1793,52 @@ error:
return -1;
}
+static int alps_dolphin_get_device_area(struct psmouse *psmouse,
+ struct alps_data *priv)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ unsigned char param[4] = {0};
+ int num_x_electrode, num_y_electrode;
+
+ if (alps_enter_command_mode(psmouse))
+ return -1;
+
+ param[0] = 0x0a;
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) ||
+ ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE))
+ return -1;
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ return -1;
+
+ /*
+ * Dolphin's sensor line number is not fixed. It can be calculated
+ * by adding the device's register value with DOLPHIN_PROFILE_X/YOFFSET.
+ * Further more, we can get device's x_max and y_max by multiplying
+ * sensor line number with DOLPHIN_COUNT_PER_ELECTRODE.
+ *
+ * e.g. When we get register's sensor_x = 11 & sensor_y = 8,
+ * real sensor line number X = 11 + 8 = 19, and
+ * real sensor line number Y = 8 + 1 = 9.
+ * So, x_max = (19 - 1) * 64 = 1152, and
+ * y_max = (9 - 1) * 64 = 512.
+ */
+ num_x_electrode = DOLPHIN_PROFILE_XOFFSET + (param[2] & 0x0F);
+ num_y_electrode = DOLPHIN_PROFILE_YOFFSET + ((param[2] >> 4) & 0x0F);
+ priv->x_bits = num_x_electrode;
+ priv->y_bits = num_y_electrode;
+ priv->x_max = (num_x_electrode - 1) * DOLPHIN_COUNT_PER_ELECTRODE;
+ priv->y_max = (num_y_electrode - 1) * DOLPHIN_COUNT_PER_ELECTRODE;
+
+ if (alps_exit_command_mode(psmouse))
+ return -1;
+
+ return 0;
+}
+
static int alps_hw_init_dolphin_v1(struct psmouse *psmouse)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
@@ -1763,13 +1893,13 @@ static void alps_set_defaults(struct alps_data *priv)
break;
case ALPS_PROTO_V5:
priv->hw_init = alps_hw_init_dolphin_v1;
- priv->process_packet = alps_process_packet_v3;
+ priv->process_packet = alps_process_touchpad_packet_v3_v5;
priv->decode_fields = alps_decode_dolphin;
priv->set_abs_params = alps_set_abs_params_mt;
priv->nibble_commands = alps_v3_nibble_commands;
priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
priv->byte0 = 0xc8;
- priv->mask0 = 0xc8;
+ priv->mask0 = 0xd8;
priv->flags = 0;
priv->x_max = 1360;
priv->y_max = 660;
@@ -1845,11 +1975,13 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
if (alps_match_table(psmouse, priv, e7, ec) == 0) {
return 0;
} else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
- ec[0] == 0x73 && ec[1] == 0x01) {
+ ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
priv->proto_version = ALPS_PROTO_V5;
alps_set_defaults(priv);
-
- return 0;
+ if (alps_dolphin_get_device_area(psmouse, priv))
+ return -EIO;
+ else
+ return 0;
} else if (ec[0] == 0x88 && ec[1] == 0x08) {
priv->proto_version = ALPS_PROTO_V3;
alps_set_defaults(priv);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 704f0f924307..03f88b6940c7 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -19,6 +19,10 @@
#define ALPS_PROTO_V5 5
#define ALPS_PROTO_V6 6
+#define DOLPHIN_COUNT_PER_ELECTRODE 64
+#define DOLPHIN_PROFILE_XOFFSET 8 /* x-electrode offset */
+#define DOLPHIN_PROFILE_YOFFSET 1 /* y-electrode offset */
+
/**
* struct alps_model_info - touchpad ID table
* @signature: E7 response string to match.
@@ -146,7 +150,8 @@ struct alps_data {
int (*hw_init)(struct psmouse *psmouse);
void (*process_packet)(struct psmouse *psmouse);
- void (*decode_fields)(struct alps_fields *f, unsigned char *p);
+ void (*decode_fields)(struct alps_fields *f, unsigned char *p,
+ struct psmouse *psmouse);
void (*set_abs_params)(struct alps_data *priv, struct input_dev *dev1);
int prev_fin;
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index e42f1fa8cdc0..800ca7dfafc2 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index a73f9618b0ad..c329cdb0b91a 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -34,7 +34,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index a5869a856ea5..8af34ffe208b 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -15,7 +15,6 @@
* the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -410,7 +409,6 @@ static int cypress_set_input_params(struct input_dev *input,
__clear_bit(REL_X, input->relbit);
__clear_bit(REL_Y, input->relbit);
- __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 597e9b8fc18d..ef1cf52f8bb9 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -486,6 +486,7 @@ static void elantech_input_sync_v4(struct psmouse *psmouse)
unsigned char *packet = psmouse->packet;
input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
input_mt_report_pointer_emulation(dev, true);
input_sync(dev);
}
@@ -984,6 +985,44 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
}
/*
+ * Advertise INPUT_PROP_BUTTONPAD for clickpads. The testing of bit 12 in
+ * fw_version for this is based on the following fw_version & caps table:
+ *
+ * Laptop-model: fw_version: caps: buttons:
+ * Acer S3 0x461f00 10, 13, 0e clickpad
+ * Acer S7-392 0x581f01 50, 17, 0d clickpad
+ * Acer V5-131 0x461f02 01, 16, 0c clickpad
+ * Acer V5-551 0x461f00 ? clickpad
+ * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
+ * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
+ * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
+ * Asus UX31 0x361f00 20, 15, 0e clickpad
+ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
+ * Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
+ * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
+ * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons
+ * Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad
+ * Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad
+ * Samsung NP900X3E-A02 0x575f03 ? clickpad
+ * Samsung NP-QX410 0x851b00 19, 14, 0c clickpad
+ * Samsung RC512 0x450f00 08, 15, 0c 2 hw buttons
+ * Samsung RF710 0x450f00 ? 2 hw buttons
+ * System76 Pangolin 0x250f01 ? 2 hw buttons
+ * (*) + 3 trackpoint buttons
+ */
+static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct elantech_data *etd = psmouse->private;
+
+ if (etd->fw_version & 0x001000) {
+ __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+ __clear_bit(BTN_RIGHT, dev->keybit);
+ }
+}
+
+/*
* Set the appropriate event bits for the input subsystem
*/
static int elantech_set_input_params(struct psmouse *psmouse)
@@ -1026,6 +1065,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
__set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
/* fall through */
case 3:
+ if (etd->hw_version == 3)
+ elantech_set_buttonpad_prop(psmouse);
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
if (etd->reports_pressure) {
@@ -1047,9 +1088,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
*/
psmouse_warn(psmouse, "couldn't query resolution data.\n");
}
- /* v4 is clickpad, with only one button. */
- __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
- __clear_bit(BTN_RIGHT, dev->keybit);
+ elantech_set_buttonpad_prop(psmouse);
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
/* For X to recognize me as touchpad. */
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 6b44413f54e3..8c7d94200bdb 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -8,7 +8,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
@@ -48,7 +47,7 @@ static void gpio_mouse_scan(struct input_polled_dev *dev)
static int gpio_mouse_probe(struct platform_device *pdev)
{
- struct gpio_mouse_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_mouse_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct input_polled_dev *input_poll;
struct input_dev *input;
int pin, i;
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index 84de2fc6acc1..136e222e2a16 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -220,7 +220,7 @@ static const struct ps2pp_info *get_model_info(unsigned char model)
{ 61, PS2PP_KIND_MX, /* MX700 */
PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN |
PS2PP_EXTRA_BTN | PS2PP_NAV_BTN },
- { 66, PS2PP_KIND_MX, /* MX3100 reciver */
+ { 66, PS2PP_KIND_MX, /* MX3100 receiver */
PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN |
PS2PP_EXTRA_BTN | PS2PP_NAV_BTN | PS2PP_HWHEEL },
{ 72, PS2PP_KIND_TRACKMAN, 0 }, /* T-CH11: TrackMan Marble */
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index 0b8d33591dee..1ccc88af1f0b 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -9,7 +9,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 0ecb9e7945eb..9b4d9a59e229 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -10,7 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -166,7 +165,7 @@ static int pxa930_trkball_probe(struct platform_device *pdev)
if (!trkball)
return -ENOMEM;
- trkball->pdata = pdev->dev.platform_data;
+ trkball->pdata = dev_get_platdata(&pdev->dev);
if (!trkball->pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
error = -EINVAL;
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index d5928fd0c914..8df526620ebf 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -32,7 +32,6 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Serial mouse driver"
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 26386f9d2569..d8d49d10f9bb 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -265,11 +265,22 @@ static int synaptics_identify(struct psmouse *psmouse)
* Read touchpad resolution and maximum reported coordinates
* Resolution is left zero if touchpad does not support the query
*/
+
+static const int *quirk_min_max;
+
static int synaptics_resolution(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
unsigned char resp[3];
+ if (quirk_min_max) {
+ priv->x_min = quirk_min_max[0];
+ priv->x_max = quirk_min_max[1];
+ priv->y_min = quirk_min_max[2];
+ priv->y_max = quirk_min_max[3];
+ return 0;
+ }
+
if (SYN_ID_MAJOR(priv->identity) < 4)
return 0;
@@ -1485,10 +1496,54 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
{ }
};
+static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+#if defined(CONFIG_DMI)
+ {
+ /* Lenovo ThinkPad Helix */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
+ },
+ .driver_data = (int []){1024, 5052, 2258, 4832},
+ },
+ {
+ /* Lenovo ThinkPad X240 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
+ },
+ .driver_data = (int []){1232, 5710, 1156, 4696},
+ },
+ {
+ /* Lenovo ThinkPad T440s */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
+ /* Lenovo ThinkPad T540p */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
+ },
+ .driver_data = (int []){1024, 5056, 2058, 4832},
+ },
+#endif
+ { }
+};
+
void __init synaptics_module_init(void)
{
+ const struct dmi_system_id *min_max_dmi;
+
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
+
+ min_max_dmi = dmi_first_match(min_max_dmi_table);
+ if (min_max_dmi)
+ quirk_min_max = min_max_dmi->driver_data;
}
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/input/mouse/synaptics_usb.c b/drivers/input/mouse/synaptics_usb.c
index 64cf34ea7604..e122bda16aab 100644
--- a/drivers/input/mouse/synaptics_usb.c
+++ b/drivers/input/mouse/synaptics_usb.c
@@ -39,7 +39,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/input/mouse/vsxxxaa.c b/drivers/input/mouse/vsxxxaa.c
index e900d465aaf6..38298232124f 100644
--- a/drivers/input/mouse/vsxxxaa.c
+++ b/drivers/input/mouse/vsxxxaa.c
@@ -82,7 +82,6 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Driver for DEC VSXXX-AA and -GA mice and VSXXX-AB tablet"
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 4c842c320c2e..b604564dec5c 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -67,7 +67,6 @@ struct mousedev {
struct device dev;
struct cdev cdev;
bool exist;
- bool is_mixdev;
struct list_head mixdev_node;
bool opened_by_mixdev;
@@ -77,6 +76,9 @@ struct mousedev {
int old_x[4], old_y[4];
int frac_dx, frac_dy;
unsigned long touch;
+
+ int (*open_device)(struct mousedev *mousedev);
+ void (*close_device)(struct mousedev *mousedev);
};
enum mousedev_emul {
@@ -116,9 +118,6 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
static struct mousedev *mousedev_mix;
static LIST_HEAD(mousedev_mix_list);
-static void mixdev_open_devices(void);
-static void mixdev_close_devices(void);
-
#define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
#define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
@@ -428,9 +427,7 @@ static int mousedev_open_device(struct mousedev *mousedev)
if (retval)
return retval;
- if (mousedev->is_mixdev)
- mixdev_open_devices();
- else if (!mousedev->exist)
+ if (!mousedev->exist)
retval = -ENODEV;
else if (!mousedev->open++) {
retval = input_open_device(&mousedev->handle);
@@ -446,9 +443,7 @@ static void mousedev_close_device(struct mousedev *mousedev)
{
mutex_lock(&mousedev->mutex);
- if (mousedev->is_mixdev)
- mixdev_close_devices();
- else if (mousedev->exist && !--mousedev->open)
+ if (mousedev->exist && !--mousedev->open)
input_close_device(&mousedev->handle);
mutex_unlock(&mousedev->mutex);
@@ -459,21 +454,29 @@ static void mousedev_close_device(struct mousedev *mousedev)
* stream. Note that this function is called with mousedev_mix->mutex
* held.
*/
-static void mixdev_open_devices(void)
+static int mixdev_open_devices(struct mousedev *mixdev)
{
- struct mousedev *mousedev;
+ int error;
+
+ error = mutex_lock_interruptible(&mixdev->mutex);
+ if (error)
+ return error;
- if (mousedev_mix->open++)
- return;
+ if (!mixdev->open++) {
+ struct mousedev *mousedev;
- list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
- if (!mousedev->opened_by_mixdev) {
- if (mousedev_open_device(mousedev))
- continue;
+ list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+ if (!mousedev->opened_by_mixdev) {
+ if (mousedev_open_device(mousedev))
+ continue;
- mousedev->opened_by_mixdev = true;
+ mousedev->opened_by_mixdev = true;
+ }
}
}
+
+ mutex_unlock(&mixdev->mutex);
+ return 0;
}
/*
@@ -481,19 +484,22 @@ static void mixdev_open_devices(void)
* device. Note that this function is called with mousedev_mix->mutex
* held.
*/
-static void mixdev_close_devices(void)
+static void mixdev_close_devices(struct mousedev *mixdev)
{
- struct mousedev *mousedev;
+ mutex_lock(&mixdev->mutex);
- if (--mousedev_mix->open)
- return;
+ if (!--mixdev->open) {
+ struct mousedev *mousedev;
- list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
- if (mousedev->opened_by_mixdev) {
- mousedev->opened_by_mixdev = false;
- mousedev_close_device(mousedev);
+ list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+ if (mousedev->opened_by_mixdev) {
+ mousedev->opened_by_mixdev = false;
+ mousedev_close_device(mousedev);
+ }
}
}
+
+ mutex_unlock(&mixdev->mutex);
}
@@ -522,7 +528,7 @@ static int mousedev_release(struct inode *inode, struct file *file)
mousedev_detach_client(mousedev, client);
kfree(client);
- mousedev_close_device(mousedev);
+ mousedev->close_device(mousedev);
return 0;
}
@@ -550,7 +556,7 @@ static int mousedev_open(struct inode *inode, struct file *file)
client->mousedev = mousedev;
mousedev_attach_client(mousedev, client);
- error = mousedev_open_device(mousedev);
+ error = mousedev->open_device(mousedev);
if (error)
goto err_free_client;
@@ -861,16 +867,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
if (mixdev) {
dev_set_name(&mousedev->dev, "mice");
+
+ mousedev->open_device = mixdev_open_devices;
+ mousedev->close_device = mixdev_close_devices;
} else {
int dev_no = minor;
/* Normalize device number if it falls into legacy range */
if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
dev_no -= MOUSEDEV_MINOR_BASE;
dev_set_name(&mousedev->dev, "mouse%d", dev_no);
+
+ mousedev->open_device = mousedev_open_device;
+ mousedev->close_device = mousedev_close_device;
}
mousedev->exist = true;
- mousedev->is_mixdev = mixdev;
mousedev->handle.dev = input_get_device(dev);
mousedev->handle.name = dev_name(&mousedev->dev);
mousedev->handle.handler = handler;
@@ -919,7 +930,7 @@ static void mousedev_destroy(struct mousedev *mousedev)
device_del(&mousedev->dev);
mousedev_cleanup(mousedev);
input_free_minor(MINOR(mousedev->dev.devt));
- if (!mousedev->is_mixdev)
+ if (mousedev != mousedev_mix)
input_unregister_handle(&mousedev->handle);
put_device(&mousedev->dev);
}
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 8541f949778d..aec54e283580 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -16,14 +16,19 @@ config SERIO
To compile this driver as a module, choose M here: the
module will be called serio.
+config ARCH_MIGHT_HAVE_PC_SERIO
+ bool
+ help
+ Select this config option from the architecture Kconfig if
+ the architecture might use a PC serio device (i8042) to
+ communicate with keyboard, mouse, etc.
+
if SERIO
config SERIO_I8042
tristate "i8042 PC Keyboard controller"
default y
- depends on !PARISC && (!ARM || FOOTBRIDGE_HOST) && \
- (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
- !ARC
+ depends on ARCH_MIGHT_HAVE_PC_SERIO
help
i8042 is the chip over which the standard AT keyboard and PS/2
mouse are connected to the computer. If you use these devices,
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index 4777a73cd390..cce69d6b9587 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 4e2fd44865e1..762b08432de0 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -10,7 +10,6 @@
* (at your option) any later version.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/serio.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -167,8 +166,6 @@ static int amba_kmi_remove(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
- amba_set_drvdata(dev, NULL);
-
serio_unregister_port(kmi->io);
clk_put(kmi->clk);
iounmap(kmi->base);
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 3a83c3c14b23..613261994621 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -160,7 +160,9 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev,
if (info & IS_E0)
serio_interrupt(kbd_dev->hv_serio,
XTKBD_EMUL0, 0);
-
+ if (info & IS_E1)
+ serio_interrupt(kbd_dev->hv_serio,
+ XTKBD_EMUL1, 0);
scan_code = __le16_to_cpu(ks_msg->make_code);
if (info & IS_BREAK)
scan_code |= XTKBD_RELEASE;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 07a8363f3c5c..75516996db20 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -18,7 +18,6 @@
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/i8042.h>
-#include <linux/init.h>
#include <linux/libps2.h>
#define DRIVER_DESC "PS/2 driver library"
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index 51b1d40cc286..5d2fe7ece7ca 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/serio.h>
#include <linux/err.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index 76f83836fd5a..e862c6ea9d9e 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -16,7 +16,6 @@
#include <linux/input.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/serio.h>
#include <linux/delay.h>
#include <asm/io.h>
@@ -181,7 +180,6 @@ static void pcips2_remove(struct pci_dev *dev)
struct pcips2_data *ps2if = pci_get_drvdata(dev);
serio_unregister_port(ps2if->io);
- pci_set_drvdata(dev, NULL);
kfree(ps2if);
pci_release_regions(dev);
pci_disable_device(dev);
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index 7a65a1bc5226..594256c38554 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -30,7 +30,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/serio.h>
#include <linux/interrupt.h>
#include <linux/err.h>
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 567566ae0dae..e462e7791bb8 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/serio.h>
#include <linux/err.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 59df2e7317a3..c9a02fe57576 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -15,7 +15,6 @@
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/serio.h>
-#include <linux/init.h>
#include <linux/major.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index dfbcd872f95e..e6cf52ebad87 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -20,7 +20,6 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/of_address.h>
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index a70aa555bbff..e7409c45bdd0 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -236,7 +236,7 @@ EXPORT_SYMBOL(sparse_keymap_setup);
* in an input device that was set up by sparse_keymap_setup().
* NOTE: It is safe to cal this function while input device is
* still registered (however the drivers should care not to try to
- * use freed keymap and thus have to shut off interrups/polling
+ * use freed keymap and thus have to shut off interrupts/polling
* before freeing the keymap).
*/
void sparse_keymap_free(struct input_dev *dev)
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index e062ec899ca1..889f6b77e8cb 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb/input.h>
/*
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index ee83c3904ee8..e7f966da6efa 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -74,7 +74,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb/input.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 29e01ab6859f..caecffe8caff 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -53,7 +53,6 @@ Scott Hill shill@gtcocalcomp.com
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/usb.h>
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index 5cc04124995c..cd852059b99e 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb/input.h>
#define DRIVER_AUTHOR "Xing Wei <weixing@hanwang.com.cn>"
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 3fba74b9b602..d2ac7c2b5b82 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -1,7 +1,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb/input.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index b79d45198d82..9ebf0ed3b3b3 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -86,7 +86,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/init.h>
#include <linux/usb/input.h>
#include <linux/power_supply.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 867e7c33ac55..b16ebef5b911 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -304,7 +304,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
struct usb_device *dev = interface_to_usbdev(intf);
char limit = 0;
/* result has to be defined as int for some devices */
- int result = 0;
+ int result = 0, touch_max = 0;
int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
unsigned char *report;
@@ -351,7 +351,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
if (usage == WCM_DESKTOP) {
if (finger) {
features->device_type = BTN_TOOL_FINGER;
-
+ /* touch device at least supports one touch point */
+ touch_max = 1;
switch (features->type) {
case TABLETPC2FG:
features->pktlen = WACOM_PKGLEN_TPC2FG;
@@ -504,6 +505,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
}
out:
+ if (!features->touch_max && touch_max)
+ features->touch_max = touch_max;
result = 0;
kfree(report);
return result;
@@ -1194,12 +1197,15 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_wac1->features.device_type = BTN_TOOL_PEN;
snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen",
wacom_wac1->features.name);
+ wacom_wac1->shared->touch_max = wacom_wac1->features.touch_max;
+ wacom_wac1->shared->type = wacom_wac1->features.type;
error = wacom_register_input(wacom1);
if (error)
goto fail;
/* Touch interface */
- if (wacom_wac1->features.touch_max) {
+ if (wacom_wac1->features.touch_max ||
+ wacom_wac1->features.type == INTUOSHT) {
wacom_wac2->features =
*((struct wacom_features *)id->driver_info);
wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
@@ -1214,6 +1220,10 @@ static void wacom_wireless_work(struct work_struct *work)
error = wacom_register_input(wacom2);
if (error)
goto fail;
+
+ if (wacom_wac1->features.type == INTUOSHT &&
+ wacom_wac1->features.touch_max)
+ wacom_wac->shared->touch_input = wacom_wac2->input;
}
error = wacom_initialize_battery(wacom);
@@ -1322,7 +1332,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
* HID descriptor. If this is the touch interface (wMaxPacketSize
* of WACOM_PKGLEN_BBTOUCH3), override the table values.
*/
- if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
+ if (features->type >= INTUOS5S && features->type <= INTUOSHT) {
if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) {
features->device_type = BTN_TOOL_FINGER;
features->pktlen = WACOM_PKGLEN_BBTOUCH3;
@@ -1393,6 +1403,11 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
}
}
+ if (wacom_wac->features.type == INTUOSHT && wacom_wac->features.touch_max) {
+ if (wacom_wac->features.device_type == BTN_TOOL_FINGER)
+ wacom_wac->shared->touch_input = wacom_wac->input;
+ }
+
return 0;
fail5: wacom_destroy_leds(wacom);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 782c2535f1d8..05f371df6c40 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -210,6 +210,62 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
return 1;
}
+static int wacom_dtus_irq(struct wacom_wac *wacom)
+{
+ char *data = wacom->data;
+ struct input_dev *input = wacom->input;
+ unsigned short prox, pressure = 0;
+
+ if (data[0] != WACOM_REPORT_DTUS && data[0] != WACOM_REPORT_DTUSPAD) {
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d", __func__, data[0]);
+ return 0;
+ } else if (data[0] == WACOM_REPORT_DTUSPAD) {
+ input_report_key(input, BTN_0, (data[1] & 0x01));
+ input_report_key(input, BTN_1, (data[1] & 0x02));
+ input_report_key(input, BTN_2, (data[1] & 0x04));
+ input_report_key(input, BTN_3, (data[1] & 0x08));
+ input_report_abs(input, ABS_MISC,
+ data[1] & 0x0f ? PAD_DEVICE_ID : 0);
+ /*
+ * Serial number is required when expresskeys are
+ * reported through pen interface.
+ */
+ input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
+ return 1;
+ } else {
+ prox = data[1] & 0x80;
+ if (prox) {
+ switch ((data[1] >> 3) & 3) {
+ case 1: /* Rubber */
+ wacom->tool[0] = BTN_TOOL_RUBBER;
+ wacom->id[0] = ERASER_DEVICE_ID;
+ break;
+
+ case 2: /* Pen */
+ wacom->tool[0] = BTN_TOOL_PEN;
+ wacom->id[0] = STYLUS_DEVICE_ID;
+ break;
+ }
+ }
+
+ input_report_key(input, BTN_STYLUS, data[1] & 0x20);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x40);
+ input_report_abs(input, ABS_X, get_unaligned_be16(&data[3]));
+ input_report_abs(input, ABS_Y, get_unaligned_be16(&data[5]));
+ pressure = ((data[1] & 0x03) << 8) | (data[2] & 0xff);
+ input_report_abs(input, ABS_PRESSURE, pressure);
+ input_report_key(input, BTN_TOUCH, pressure > 10);
+
+ if (!prox) /* out-prox */
+ wacom->id[0] = 0;
+ input_report_key(input, wacom->tool[0], prox);
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
+ input_event(input, EV_MSC, MSC_SERIAL, 1);
+ return 1;
+ }
+}
+
static int wacom_graphire_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
@@ -331,7 +387,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Enter report */
if ((data[1] & 0xfc) == 0xc0) {
- if (features->quirks == WACOM_QUIRK_MULTI_INPUT)
+ if (features->quirks & WACOM_QUIRK_MULTI_INPUT)
wacom->shared->stylus_in_proximity = true;
/* serial number of the tool */
@@ -436,7 +492,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Exit report */
if ((data[1] & 0xfe) == 0x80) {
- if (features->quirks == WACOM_QUIRK_MULTI_INPUT)
+ if (features->quirks & WACOM_QUIRK_MULTI_INPUT)
wacom->shared->stylus_in_proximity = false;
/*
@@ -1151,8 +1207,8 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
int width, height;
if (features->type >= INTUOSPS && features->type <= INTUOSPL) {
- width = data[5];
- height = data[6];
+ width = data[5] * 100;
+ height = data[6] * 100;
} else {
/*
* "a" is a scaled-down area which we assume is
@@ -1176,10 +1232,16 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
{
struct input_dev *input = wacom->input;
+ struct wacom_features *features = &wacom->features;
- input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
+ if (features->type == INTUOSHT) {
+ input_report_key(input, BTN_LEFT, (data[1] & 0x02) != 0);
+ input_report_key(input, BTN_BACK, (data[1] & 0x08) != 0);
+ } else {
+ input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
+ input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
+ }
input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0);
- input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0);
}
@@ -1213,13 +1275,23 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
static int wacom_bpt_pen(struct wacom_wac *wacom)
{
+ struct wacom_features *features = &wacom->features;
struct input_dev *input = wacom->input;
unsigned char *data = wacom->data;
int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
- if (data[0] != 0x02)
+ if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_USB)
return 0;
+ if (data[0] == WACOM_REPORT_USB) {
+ if (features->type == INTUOSHT && features->touch_max) {
+ input_report_switch(wacom->shared->touch_input,
+ SW_MUTE_DEVICE, data[8] & 0x40);
+ input_sync(wacom->shared->touch_input);
+ }
+ return 0;
+ }
+
prox = (data[1] & 0x20) == 0x20;
/*
@@ -1252,8 +1324,8 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
* touching and applying pressure; do not report negative
* distance.
*/
- if (data[8] <= wacom->features.distance_max)
- d = wacom->features.distance_max - data[8];
+ if (data[8] <= features->distance_max)
+ d = features->distance_max - data[8];
pen = data[1] & 0x01;
btn1 = data[1] & 0x02;
@@ -1297,13 +1369,20 @@ static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len)
unsigned char *data = wacom->data;
int connected;
- if (len != WACOM_PKGLEN_WIRELESS || data[0] != 0x80)
+ if (len != WACOM_PKGLEN_WIRELESS || data[0] != WACOM_REPORT_WL)
return 0;
connected = data[1] & 0x01;
if (connected) {
int pid, battery;
+ if ((wacom->shared->type == INTUOSHT) &&
+ wacom->shared->touch_max) {
+ input_report_switch(wacom->shared->touch_input,
+ SW_MUTE_DEVICE, data[5] & 0x40);
+ input_sync(wacom->shared->touch_input);
+ }
+
pid = get_unaligned_be16(&data[6]);
battery = data[5] & 0x3f;
if (wacom->pid != pid) {
@@ -1348,6 +1427,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
sync = wacom_dtu_irq(wacom_wac);
break;
+ case DTUS:
+ sync = wacom_dtus_irq(wacom_wac);
+ break;
+
case INTUOS:
case INTUOS3S:
case INTUOS3:
@@ -1391,6 +1474,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
break;
case BAMBOO_PT:
+ case INTUOSHT:
sync = wacom_bpt_irq(wacom_wac, len);
break;
@@ -1459,7 +1543,7 @@ void wacom_setup_device_quirks(struct wacom_features *features)
/* these device have multiple inputs */
if (features->type >= WIRELESS ||
- (features->type >= INTUOS5S && features->type <= INTUOSPL) ||
+ (features->type >= INTUOS5S && features->type <= INTUOSHT) ||
(features->oVid && features->oPid))
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
@@ -1538,7 +1622,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
wacom_abs_set_axis(input_dev, wacom_wac);
- switch (wacom_wac->features.type) {
+ switch (features->type) {
case WACOM_MO:
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
/* fall through */
@@ -1749,8 +1833,14 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
+ case DTUS:
case PL:
case DTU:
+ if (features->type == DTUS) {
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+ for (i = 0; i < 3; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+ }
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
@@ -1771,33 +1861,50 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
+ case INTUOSHT:
+ if (features->touch_max &&
+ features->device_type == BTN_TOOL_FINGER) {
+ input_dev->evbit[0] |= BIT_MASK(EV_SW);
+ __set_bit(SW_MUTE_DEVICE, input_dev->swbit);
+ }
+ /* fall through */
+
case BAMBOO_PT:
__clear_bit(ABS_MISC, input_dev->absbit);
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
-
if (features->device_type == BTN_TOOL_FINGER) {
- unsigned int flags = INPUT_MT_POINTER;
__set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_FORWARD, input_dev->keybit);
__set_bit(BTN_BACK, input_dev->keybit);
__set_bit(BTN_RIGHT, input_dev->keybit);
- if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
- input_set_abs_params(input_dev,
+ if (features->touch_max) {
+ /* touch interface */
+ unsigned int flags = INPUT_MT_POINTER;
+
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+ if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
+ input_set_abs_params(input_dev,
ABS_MT_TOUCH_MAJOR,
0, features->x_max, 0, 0);
- input_set_abs_params(input_dev,
+ input_set_abs_params(input_dev,
ABS_MT_TOUCH_MINOR,
0, features->y_max, 0, 0);
+ } else {
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ flags = 0;
+ }
+ input_mt_init_slots(input_dev, features->touch_max, flags);
} else {
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- flags = 0;
+ /* buttons/keys only interface */
+ __clear_bit(ABS_X, input_dev->absbit);
+ __clear_bit(ABS_Y, input_dev->absbit);
+ __clear_bit(BTN_TOUCH, input_dev->keybit);
}
- input_mt_init_slots(input_dev, features->touch_max, flags);
} else if (features->device_type == BTN_TOOL_PEN) {
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
@@ -2055,6 +2162,9 @@ static const struct wacom_features wacom_features_0xCE =
static const struct wacom_features wacom_features_0xF0 =
{ "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511,
0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xFB =
+ { "Wacom DTU1031", WACOM_PKGLEN_DTUS, 22096, 13960, 511,
+ 0, DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x57 =
{ "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES};
@@ -2200,6 +2310,17 @@ static const struct wacom_features wacom_features_0x300 =
static const struct wacom_features wacom_features_0x301 =
{ "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x302 =
+ { "Wacom Intuos PT S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023,
+ 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x303 =
+ { "Wacom Intuos PT M", WACOM_PKGLEN_BBPEN, 21600, 13500, 1023,
+ 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x30E =
+ { "Wacom Intuos S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023,
+ 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2337,6 +2458,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x10F) },
{ USB_DEVICE_WACOM(0x300) },
{ USB_DEVICE_WACOM(0x301) },
+ { USB_DEVICE_DETAILED(0x302, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_DETAILED(0x303, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_DETAILED(0x30E, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0x304) },
{ USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) },
@@ -2347,6 +2471,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xF8) },
{ USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0xFA) },
+ { USB_DEVICE_WACOM(0xFB) },
{ USB_DEVICE_WACOM(0x0307) },
{ USB_DEVICE_DETAILED(0x0309, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_LENOVO(0x6004) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index fd23a3790605..f69c0ebe7fa9 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -12,7 +12,7 @@
#include <linux/types.h>
/* maximum packet length for USB devices */
-#define WACOM_PKGLEN_MAX 64
+#define WACOM_PKGLEN_MAX 68
#define WACOM_NAME_MAX 64
@@ -29,6 +29,7 @@
#define WACOM_PKGLEN_WIRELESS 32
#define WACOM_PKGLEN_MTOUCH 62
#define WACOM_PKGLEN_MTTPC 40
+#define WACOM_PKGLEN_DTUS 68
/* wacom data size per MT contact */
#define WACOM_BYTES_PER_MT_PACKET 11
@@ -47,13 +48,17 @@
#define WACOM_REPORT_INTUOSWRITE 6
#define WACOM_REPORT_INTUOSPAD 12
#define WACOM_REPORT_INTUOS5PAD 3
+#define WACOM_REPORT_DTUSPAD 21
#define WACOM_REPORT_TPC1FG 6
#define WACOM_REPORT_TPC2FG 13
#define WACOM_REPORT_TPCMT 13
#define WACOM_REPORT_TPCHID 15
#define WACOM_REPORT_TPCST 16
+#define WACOM_REPORT_DTUS 17
#define WACOM_REPORT_TPC1FGE 18
#define WACOM_REPORT_24HDT 1
+#define WACOM_REPORT_WL 128
+#define WACOM_REPORT_USB 192
/* device quirks */
#define WACOM_QUIRK_MULTI_INPUT 0x0001
@@ -68,6 +73,7 @@ enum {
PTU,
PL,
DTU,
+ DTUS,
INTUOS,
INTUOS3S,
INTUOS3,
@@ -81,6 +87,7 @@ enum {
INTUOSPS,
INTUOSPM,
INTUOSPL,
+ INTUOSHT,
WACOM_21UX2,
WACOM_22HD,
DTK,
@@ -129,6 +136,10 @@ struct wacom_features {
struct wacom_shared {
bool stylus_in_proximity;
bool touch_down;
+ /* for wireless device to access USB interfaces */
+ unsigned touch_max;
+ int type;
+ struct input_dev *touch_input;
};
struct wacom_wac {
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index f7de14a268bf..544e20c551f8 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -172,7 +172,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
static int pm860x_touch_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_touch_pdata *pdata = pdev->dev.platform_data;
+ struct pm860x_touch_pdata *pdata = dev_get_platdata(&pdev->dev);
struct pm860x_touch *touch;
struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \
: chip->companion;
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 69834dd3c313..6793c85903ae 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -37,7 +37,6 @@
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -686,7 +685,7 @@ static int ad7877_probe(struct spi_device *spi)
{
struct ad7877 *ts;
struct input_dev *input_dev;
- struct ad7877_platform_data *pdata = spi->dev.platform_data;
+ struct ad7877_platform_data *pdata = dev_get_platdata(&spi->dev);
int err;
u16 verify;
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index facd3057b62d..fce590677b7b 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -22,7 +22,6 @@
*/
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -470,7 +469,7 @@ static int ad7879_gpio_add(struct ad7879 *ts,
static void ad7879_gpio_remove(struct ad7879 *ts)
{
- const struct ad7879_platform_data *pdata = ts->dev->platform_data;
+ const struct ad7879_platform_data *pdata = dev_get_platdata(ts->dev);
int ret;
if (pdata->gpio_export) {
@@ -495,7 +494,7 @@ static inline void ad7879_gpio_remove(struct ad7879 *ts)
struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
const struct ad7879_bus_ops *bops)
{
- struct ad7879_platform_data *pdata = dev->platform_data;
+ struct ad7879_platform_data *pdata = dev_get_platdata(dev);
struct ad7879 *ts;
struct input_dev *input_dev;
int err;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index ea195360747e..45a06e495ed2 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -19,7 +19,6 @@
*/
#include <linux/types.h>
#include <linux/hwmon.h>
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -101,8 +100,7 @@ struct ads7846 {
struct spi_device *spi;
struct regulator *reg;
-#if defined(CONFIG_HWMON) || defined(CONFIG_HWMON_MODULE)
- struct attribute_group *attr_group;
+#if IS_ENABLED(CONFIG_HWMON)
struct device *hwmon;
#endif
@@ -421,7 +419,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command)
return status;
}
-#if defined(CONFIG_HWMON) || defined(CONFIG_HWMON_MODULE)
+#if IS_ENABLED(CONFIG_HWMON)
#define SHOW(name, var, adjust) static ssize_t \
name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \
@@ -479,42 +477,36 @@ static inline unsigned vbatt_adjust(struct ads7846 *ts, ssize_t v)
SHOW(in0_input, vaux, vaux_adjust)
SHOW(in1_input, vbatt, vbatt_adjust)
-static struct attribute *ads7846_attributes[] = {
- &dev_attr_temp0.attr,
- &dev_attr_temp1.attr,
- &dev_attr_in0_input.attr,
- &dev_attr_in1_input.attr,
- NULL,
-};
-
-static struct attribute_group ads7846_attr_group = {
- .attrs = ads7846_attributes,
-};
+static umode_t ads7846_is_visible(struct kobject *kobj, struct attribute *attr,
+ int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct ads7846 *ts = dev_get_drvdata(dev);
-static struct attribute *ads7843_attributes[] = {
- &dev_attr_in0_input.attr,
- &dev_attr_in1_input.attr,
- NULL,
-};
+ if (ts->model == 7843 && index < 2) /* in0, in1 */
+ return 0;
+ if (ts->model == 7845 && index != 2) /* in0 */
+ return 0;
-static struct attribute_group ads7843_attr_group = {
- .attrs = ads7843_attributes,
-};
+ return attr->mode;
+}
-static struct attribute *ads7845_attributes[] = {
- &dev_attr_in0_input.attr,
+static struct attribute *ads7846_attributes[] = {
+ &dev_attr_temp0.attr, /* 0 */
+ &dev_attr_temp1.attr, /* 1 */
+ &dev_attr_in0_input.attr, /* 2 */
+ &dev_attr_in1_input.attr, /* 3 */
NULL,
};
-static struct attribute_group ads7845_attr_group = {
- .attrs = ads7845_attributes,
+static struct attribute_group ads7846_attr_group = {
+ .attrs = ads7846_attributes,
+ .is_visible = ads7846_is_visible,
};
+__ATTRIBUTE_GROUPS(ads7846_attr);
static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts)
{
- struct device *hwmon;
- int err;
-
/* hwmon sensors need a reference voltage */
switch (ts->model) {
case 7846:
@@ -535,43 +527,19 @@ static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts)
break;
}
- /* different chips have different sensor groups */
- switch (ts->model) {
- case 7846:
- ts->attr_group = &ads7846_attr_group;
- break;
- case 7845:
- ts->attr_group = &ads7845_attr_group;
- break;
- case 7843:
- ts->attr_group = &ads7843_attr_group;
- break;
- default:
- dev_dbg(&spi->dev, "ADS%d not recognized\n", ts->model);
- return 0;
- }
-
- err = sysfs_create_group(&spi->dev.kobj, ts->attr_group);
- if (err)
- return err;
-
- hwmon = hwmon_device_register(&spi->dev);
- if (IS_ERR(hwmon)) {
- sysfs_remove_group(&spi->dev.kobj, ts->attr_group);
- return PTR_ERR(hwmon);
- }
+ ts->hwmon = hwmon_device_register_with_groups(&spi->dev, spi->modalias,
+ ts, ads7846_attr_groups);
+ if (IS_ERR(ts->hwmon))
+ return PTR_ERR(ts->hwmon);
- ts->hwmon = hwmon;
return 0;
}
static void ads784x_hwmon_unregister(struct spi_device *spi,
struct ads7846 *ts)
{
- if (ts->hwmon) {
- sysfs_remove_group(&spi->dev.kobj, ts->attr_group);
+ if (ts->hwmon)
hwmon_device_unregister(ts->hwmon);
- }
}
#else
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 59aa24002c7b..a70400754e92 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
@@ -1130,7 +1129,7 @@ static void mxt_input_close(struct input_dev *dev)
static int mxt_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct mxt_platform_data *pdata = client->dev.platform_data;
+ const struct mxt_platform_data *pdata = dev_get_platdata(&client->dev);
struct mxt_data *data;
struct input_dev *input_dev;
int error;
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
index bddabc595077..a7c9d6967d1e 100644
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ b/drivers/input/touchscreen/atmel_tsadcc.c
@@ -12,7 +12,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -182,7 +181,7 @@ static int atmel_tsadcc_probe(struct platform_device *pdev)
struct atmel_tsadcc *ts_dev;
struct input_dev *input_dev;
struct resource *res;
- struct at91_tsadcc_data *pdata = pdev->dev.platform_data;
+ struct at91_tsadcc_data *pdata = dev_get_platdata(&pdev->dev);
int err;
unsigned int prsc;
unsigned int reg;
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 8c651985a5c4..5bf1aeeea825 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -178,7 +178,7 @@ static irqreturn_t cy8ctmg110_irq_thread(int irq, void *dev_id)
static int cy8ctmg110_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct cy8ctmg110_pdata *pdata = client->dev.platform_data;
+ const struct cy8ctmg110_pdata *pdata = dev_get_platdata(&client->dev);
struct cy8ctmg110 *ts;
struct input_dev *input_dev;
int err;
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 4204841cdc49..eee656f77a2e 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -534,7 +534,7 @@ static void cyttsp_close(struct input_dev *dev)
struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
struct device *dev, int irq, size_t xfer_buf_size)
{
- const struct cyttsp_platform_data *pdata = dev->platform_data;
+ const struct cyttsp_platform_data *pdata = dev_get_platdata(dev);
struct cyttsp *ts;
struct input_dev *input_dev;
int error;
@@ -553,7 +553,7 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
ts->dev = dev;
ts->input = input_dev;
- ts->pdata = dev->platform_data;
+ ts->pdata = dev_get_platdata(dev);
ts->bus_ops = bus_ops;
ts->irq = irq;
diff --git a/drivers/input/touchscreen/cyttsp_i2c_common.c b/drivers/input/touchscreen/cyttsp_i2c_common.c
index 1d7b6f154168..ccefa56ca212 100644
--- a/drivers/input/touchscreen/cyttsp_i2c_common.c
+++ b/drivers/input/touchscreen/cyttsp_i2c_common.c
@@ -31,6 +31,8 @@
#include <linux/module.h>
#include <linux/types.h>
+#include "cyttsp4_core.h"
+
int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf,
u16 addr, u8 length, void *values)
{
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
index 34ad84105e6e..8ccf7bb4028a 100644
--- a/drivers/input/touchscreen/da9034-ts.c
+++ b/drivers/input/touchscreen/da9034-ts.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input.h>
@@ -299,7 +298,7 @@ static void da9034_touch_close(struct input_dev *dev)
static int da9034_touch_probe(struct platform_device *pdev)
{
- struct da9034_touch_pdata *pdata = pdev->dev.platform_data;
+ struct da9034_touch_pdata *pdata = dev_get_platdata(&pdev->dev);
struct da9034_touch *touch;
struct input_dev *input_dev;
int ret;
diff --git a/drivers/input/touchscreen/dynapro.c b/drivers/input/touchscreen/dynapro.c
index 1809677a6513..86237a910876 100644
--- a/drivers/input/touchscreen/dynapro.c
+++ b/drivers/input/touchscreen/dynapro.c
@@ -24,7 +24,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Dynapro serial touchscreen driver"
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 83fa1b15a97f..412a85ec9ba5 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -623,8 +623,9 @@ static int edt_ft5x06_ts_reset(struct i2c_client *client,
if (gpio_is_valid(reset_pin)) {
/* this pulls reset down, enabling the low active reset */
- error = gpio_request_one(reset_pin, GPIOF_OUT_INIT_LOW,
- "edt-ft5x06 reset");
+ error = devm_gpio_request_one(&client->dev, reset_pin,
+ GPIOF_OUT_INIT_LOW,
+ "edt-ft5x06 reset");
if (error) {
dev_err(&client->dev,
"Failed to request GPIO %d as reset pin, error %d\n",
@@ -705,7 +706,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct edt_ft5x06_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
struct edt_ft5x06_ts_data *tsdata;
struct input_dev *input;
int error;
@@ -723,8 +724,8 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
if (gpio_is_valid(pdata->irq_pin)) {
- error = gpio_request_one(pdata->irq_pin,
- GPIOF_IN, "edt-ft5x06 irq");
+ error = devm_gpio_request_one(&client->dev, pdata->irq_pin,
+ GPIOF_IN, "edt-ft5x06 irq");
if (error) {
dev_err(&client->dev,
"Failed to request GPIO %d, error %d\n",
@@ -733,12 +734,16 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
}
}
- tsdata = kzalloc(sizeof(*tsdata), GFP_KERNEL);
- input = input_allocate_device();
- if (!tsdata || !input) {
+ tsdata = devm_kzalloc(&client->dev, sizeof(*tsdata), GFP_KERNEL);
+ if (!tsdata) {
dev_err(&client->dev, "failed to allocate driver data.\n");
- error = -ENOMEM;
- goto err_free_mem;
+ return -ENOMEM;
+ }
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input) {
+ dev_err(&client->dev, "failed to allocate input device.\n");
+ return -ENOMEM;
}
mutex_init(&tsdata->mutex);
@@ -749,7 +754,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
error = edt_ft5x06_ts_identify(client, tsdata->name, fw_version);
if (error) {
dev_err(&client->dev, "touchscreen probe failed\n");
- goto err_free_mem;
+ return error;
}
edt_ft5x06_ts_get_defaults(tsdata, pdata);
@@ -776,27 +781,30 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, 0);
if (error) {
dev_err(&client->dev, "Unable to init MT slots.\n");
- goto err_free_mem;
+ return error;
}
input_set_drvdata(input, tsdata);
i2c_set_clientdata(client, tsdata);
- error = request_threaded_irq(client->irq, NULL, edt_ft5x06_ts_isr,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->name, tsdata);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, edt_ft5x06_ts_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->name, tsdata);
if (error) {
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
- goto err_free_mem;
+ return error;
}
error = sysfs_create_group(&client->dev.kobj, &edt_ft5x06_attr_group);
if (error)
- goto err_free_irq;
+ return error;
error = input_register_device(input);
- if (error)
- goto err_remove_attrs;
+ if (error) {
+ sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
+ return error;
+ }
edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev));
device_init_wakeup(&client->dev, 1);
@@ -806,40 +814,15 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
pdata->irq_pin, pdata->reset_pin);
return 0;
-
-err_remove_attrs:
- sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
-err_free_irq:
- free_irq(client->irq, tsdata);
-err_free_mem:
- input_free_device(input);
- kfree(tsdata);
-
- if (gpio_is_valid(pdata->irq_pin))
- gpio_free(pdata->irq_pin);
-
- return error;
}
static int edt_ft5x06_ts_remove(struct i2c_client *client)
{
- const struct edt_ft5x06_platform_data *pdata =
- dev_get_platdata(&client->dev);
struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
edt_ft5x06_ts_teardown_debugfs(tsdata);
sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
- free_irq(client->irq, tsdata);
- input_unregister_device(tsdata->input);
-
- if (gpio_is_valid(pdata->irq_pin))
- gpio_free(pdata->irq_pin);
- if (gpio_is_valid(pdata->reset_pin))
- gpio_free(pdata->reset_pin);
-
- kfree(tsdata);
-
return 0;
}
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 1ce3d29ffca5..b1884ddd7a84 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev)
static int eeti_ts_probe(struct i2c_client *client,
const struct i2c_device_id *idp)
{
- struct eeti_ts_platform_data *pdata = client->dev.platform_data;
+ struct eeti_ts_platform_data *pdata = dev_get_platdata(&client->dev);
struct eeti_ts_priv *priv;
struct input_dev *input;
unsigned int irq_flags;
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 054d22583248..e6bcb13680b2 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -18,7 +18,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/input.h>
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index 957423d1471d..8051a4b704ea 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -22,7 +22,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#include <linux/ctype.h>
#define DRIVER_DESC "Elo serial touchscreen driver"
diff --git a/drivers/input/touchscreen/fujitsu_ts.c b/drivers/input/touchscreen/fujitsu_ts.c
index 10794ddbdf58..d0e46a7e183b 100644
--- a/drivers/input/touchscreen/fujitsu_ts.c
+++ b/drivers/input/touchscreen/fujitsu_ts.c
@@ -16,7 +16,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Fujitsu serial touchscreen driver"
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index 41c71766bf18..e2ee62615273 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Gunze AHL-51S touchscreen driver"
diff --git a/drivers/input/touchscreen/hampshire.c b/drivers/input/touchscreen/hampshire.c
index 0cc47ea98acf..ecb1e0e01328 100644
--- a/drivers/input/touchscreen/hampshire.c
+++ b/drivers/input/touchscreen/hampshire.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Hampshire serial touchscreen driver"
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 1418bdda61bb..2a5089139818 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -184,7 +184,7 @@ static int ili210x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- const struct ili210x_platform_data *pdata = dev->platform_data;
+ const struct ili210x_platform_data *pdata = dev_get_platdata(dev);
struct ili210x *priv;
struct input_dev *input;
struct panel_info panel;
diff --git a/drivers/input/touchscreen/inexio.c b/drivers/input/touchscreen/inexio.c
index a29c99c32245..adb80b65a259 100644
--- a/drivers/input/touchscreen/inexio.c
+++ b/drivers/input/touchscreen/inexio.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "iNexio serial touchscreen driver"
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index e30d837dae2f..4f6b156144e9 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -27,7 +27,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/err.h>
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index e463a79ffecc..7324c5c0fb86 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -14,7 +14,6 @@
*/
#include <linux/platform_device.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 9101ee529c92..2058253b55d9 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -15,7 +15,6 @@
*/
#include <linux/platform_device.h>
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index 7d2b2136e5ad..0786010d7ed0 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index 9f84fcd08732..a68ec142ee9a 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -33,7 +33,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/input.h>
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index f9f4e0c56eda..647e36f5930e 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c/mcs.h>
#include <linux/interrupt.h>
@@ -194,7 +193,7 @@ static int mcs5000_ts_probe(struct i2c_client *client,
struct input_dev *input_dev;
int ret;
- if (!client->dev.platform_data)
+ if (!dev_get_platdata(&client->dev))
return -EINVAL;
data = kzalloc(sizeof(struct mcs5000_ts_data), GFP_KERNEL);
@@ -207,7 +206,7 @@ static int mcs5000_ts_probe(struct i2c_client *client,
data->client = client;
data->input_dev = input_dev;
- data->platform_data = client->dev.platform_data;
+ data->platform_data = dev_get_platdata(&client->dev);
input_dev->name = "MELPAS MCS-5000 Touchscreen";
input_dev->id.bustype = BUS_I2C;
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 1443532fe6c4..8a598c065391 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -8,7 +8,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/i2c.h>
diff --git a/drivers/input/touchscreen/mtouch.c b/drivers/input/touchscreen/mtouch.c
index eb66b7c37c2f..9b5552a26169 100644
--- a/drivers/input/touchscreen/mtouch.c
+++ b/drivers/input/touchscreen/mtouch.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "MicroTouch serial touchscreen driver"
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index f22e04dd4e16..cff2376817e5 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -11,7 +11,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/input/touchscreen/penmount.c b/drivers/input/touchscreen/penmount.c
index b49f0b836925..417d87379265 100644
--- a/drivers/input/touchscreen/penmount.c
+++ b/drivers/input/touchscreen/penmount.c
@@ -21,7 +21,6 @@
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "PenMount serial touchscreen driver"
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 6cc6b36663ff..02392d2061d6 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -128,7 +128,8 @@ static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
static int pixcir_i2c_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct pixcir_ts_platform_data *pdata = client->dev.platform_data;
+ const struct pixcir_ts_platform_data *pdata =
+ dev_get_platdata(&client->dev);
struct pixcir_i2c_ts_data *tsdata;
struct input_dev *input;
int error;
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index b061af2c8376..19cb247dbb86 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/input.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -251,7 +250,7 @@ static int s3c2410ts_probe(struct platform_device *pdev)
ts.dev = dev;
- info = pdev->dev.platform_data;
+ info = dev_get_platdata(&pdev->dev);
if (!info) {
dev_err(dev, "no platform data, cannot attach\n");
return -EINVAL;
@@ -392,7 +391,7 @@ static int s3c2410ts_suspend(struct device *dev)
static int s3c2410ts_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
- struct s3c2410_ts_mach_info *info = pdev->dev.platform_data;
+ struct s3c2410_ts_mach_info *info = dev_get_platdata(&pdev->dev);
clk_enable(ts.clock);
enable_irq(ts.irq_tc);
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 2f03b2f289dd..3c0f57efe7b1 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -134,7 +134,8 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
} else if (!ts->low_latency_req.dev) {
/* First contact, request 100 us latency. */
dev_pm_qos_add_ancestor_request(&ts->client->dev,
- &ts->low_latency_req, 100);
+ &ts->low_latency_req,
+ DEV_PM_QOS_RESUME_LATENCY, 100);
}
/* SYN_REPORT */
@@ -154,7 +155,7 @@ static int st1232_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct st1232_ts_data *ts;
- struct st1232_pdata *pdata = client->dev.platform_data;
+ struct st1232_pdata *pdata = dev_get_platdata(&client->dev);
struct input_dev *input_dev;
int error;
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index 59e81b00f244..42ce31afa259 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 2ca5a7bee04e..4e793a17361f 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -14,7 +14,6 @@
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/module.h>
diff --git a/drivers/input/touchscreen/touchit213.c b/drivers/input/touchscreen/touchit213.c
index 5f29e5b8e1c1..c27cf8f3d1ca 100644
--- a/drivers/input/touchscreen/touchit213.c
+++ b/drivers/input/touchscreen/touchit213.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Sahara TouchIT-213 serial touchscreen driver"
diff --git a/drivers/input/touchscreen/touchright.c b/drivers/input/touchscreen/touchright.c
index 8a2887daf194..4000e5205407 100644
--- a/drivers/input/touchscreen/touchright.c
+++ b/drivers/input/touchscreen/touchright.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Touchright serial touchscreen driver"
diff --git a/drivers/input/touchscreen/touchwin.c b/drivers/input/touchscreen/touchwin.c
index 588cdcb839dd..ba90f447df75 100644
--- a/drivers/input/touchscreen/touchwin.c
+++ b/drivers/input/touchscreen/touchwin.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define DRIVER_DESC "Touchwindow serial touchscreen driver"
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 811353353917..550adcbbfc23 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -571,7 +571,7 @@ static void tsc2005_setup_spi_xfer(struct tsc2005 *ts)
static int tsc2005_probe(struct spi_device *spi)
{
- const struct tsc2005_platform_data *pdata = spi->dev.platform_data;
+ const struct tsc2005_platform_data *pdata = dev_get_platdata(&spi->dev);
struct tsc2005 *ts;
struct input_dev *input_dev;
unsigned int max_x, max_y, max_p;
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 0b67ba476b4c..1bf9906b5a3f 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -26,6 +26,9 @@
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/i2c/tsc2007.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#define TSC2007_MEASURE_TEMP0 (0x0 << 4)
#define TSC2007_MEASURE_AUX (0x2 << 4)
@@ -72,15 +75,18 @@ struct tsc2007 {
u16 model;
u16 x_plate_ohms;
u16 max_rt;
- unsigned long poll_delay;
unsigned long poll_period;
+ int fuzzx;
+ int fuzzy;
+ int fuzzz;
+ unsigned gpio;
int irq;
wait_queue_head_t wait;
bool stopped;
- int (*get_pendown_state)(void);
+ int (*get_pendown_state)(struct device *);
void (*clear_penirq)(void);
};
@@ -161,7 +167,7 @@ static bool tsc2007_is_pen_down(struct tsc2007 *ts)
if (!ts->get_pendown_state)
return true;
- return ts->get_pendown_state();
+ return ts->get_pendown_state(&ts->client->dev);
}
static irqreturn_t tsc2007_soft_irq(int irq, void *handle)
@@ -178,7 +184,7 @@ static irqreturn_t tsc2007_soft_irq(int irq, void *handle)
rt = tsc2007_calculate_pressure(ts, &tc);
- if (rt == 0 && !ts->get_pendown_state) {
+ if (!rt && !ts->get_pendown_state) {
/*
* If pressure reported is 0 and we don't have
* callback to check pendown state, we have to
@@ -228,7 +234,7 @@ static irqreturn_t tsc2007_hard_irq(int irq, void *handle)
{
struct tsc2007 *ts = handle;
- if (!ts->get_pendown_state || likely(ts->get_pendown_state()))
+ if (tsc2007_is_pen_down(ts))
return IRQ_WAKE_THREAD;
if (ts->clear_penirq)
@@ -273,49 +279,134 @@ static void tsc2007_close(struct input_dev *input_dev)
tsc2007_stop(ts);
}
-static int tsc2007_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+#ifdef CONFIG_OF
+static int tsc2007_get_pendown_state_gpio(struct device *dev)
{
- struct tsc2007 *ts;
- struct tsc2007_platform_data *pdata = client->dev.platform_data;
- struct input_dev *input_dev;
- int err;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tsc2007 *ts = i2c_get_clientdata(client);
+
+ return !gpio_get_value(ts->gpio);
+}
+
+static int tsc2007_probe_dt(struct i2c_client *client, struct tsc2007 *ts)
+{
+ struct device_node *np = client->dev.of_node;
+ u32 val32;
+ u64 val64;
- if (!pdata) {
- dev_err(&client->dev, "platform data is required!\n");
+ if (!np) {
+ dev_err(&client->dev, "missing device tree data\n");
return -EINVAL;
}
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_WORD_DATA))
- return -EIO;
+ if (!of_property_read_u32(np, "ti,max-rt", &val32))
+ ts->max_rt = val32;
+ else
+ ts->max_rt = MAX_12BIT;
+
+ if (!of_property_read_u32(np, "ti,fuzzx", &val32))
+ ts->fuzzx = val32;
+
+ if (!of_property_read_u32(np, "ti,fuzzy", &val32))
+ ts->fuzzy = val32;
+
+ if (!of_property_read_u32(np, "ti,fuzzz", &val32))
+ ts->fuzzz = val32;
+
+ if (!of_property_read_u64(np, "ti,poll-period", &val64))
+ ts->poll_period = val64;
+ else
+ ts->poll_period = 1;
- ts = kzalloc(sizeof(struct tsc2007), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !input_dev) {
- err = -ENOMEM;
- goto err_free_mem;
+ if (!of_property_read_u32(np, "ti,x-plate-ohms", &val32)) {
+ ts->x_plate_ohms = val32;
+ } else {
+ dev_err(&client->dev, "missing ti,x-plate-ohms devicetree property.");
+ return -EINVAL;
}
- ts->client = client;
- ts->irq = client->irq;
- ts->input = input_dev;
- init_waitqueue_head(&ts->wait);
+ ts->gpio = of_get_gpio(np, 0);
+ if (gpio_is_valid(ts->gpio))
+ ts->get_pendown_state = tsc2007_get_pendown_state_gpio;
+ else
+ dev_warn(&client->dev,
+ "GPIO not specified in DT (of_get_gpio returned %d)\n",
+ ts->gpio);
+
+ return 0;
+}
+#else
+static int tsc2007_probe_dt(struct i2c_client *client, struct tsc2007 *ts)
+{
+ dev_err(&client->dev, "platform data is required!\n");
+ return -EINVAL;
+}
+#endif
+static int tsc2007_probe_pdev(struct i2c_client *client, struct tsc2007 *ts,
+ const struct tsc2007_platform_data *pdata,
+ const struct i2c_device_id *id)
+{
ts->model = pdata->model;
ts->x_plate_ohms = pdata->x_plate_ohms;
ts->max_rt = pdata->max_rt ? : MAX_12BIT;
- ts->poll_delay = pdata->poll_delay ? : 1;
ts->poll_period = pdata->poll_period ? : 1;
ts->get_pendown_state = pdata->get_pendown_state;
ts->clear_penirq = pdata->clear_penirq;
+ ts->fuzzx = pdata->fuzzx;
+ ts->fuzzy = pdata->fuzzy;
+ ts->fuzzz = pdata->fuzzz;
if (pdata->x_plate_ohms == 0) {
dev_err(&client->dev, "x_plate_ohms is not set up in platform data");
- err = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
+ return 0;
+}
+
+static void tsc2007_call_exit_platform_hw(void *data)
+{
+ struct device *dev = data;
+ const struct tsc2007_platform_data *pdata = dev_get_platdata(dev);
+
+ pdata->exit_platform_hw();
+}
+
+static int tsc2007_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct tsc2007_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct tsc2007 *ts;
+ struct input_dev *input_dev;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -EIO;
+
+ ts = devm_kzalloc(&client->dev, sizeof(struct tsc2007), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ if (pdata)
+ err = tsc2007_probe_pdev(client, ts, pdata, id);
+ else
+ err = tsc2007_probe_dt(client, ts);
+ if (err)
+ return err;
+
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ts);
+
+ ts->client = client;
+ ts->irq = client->irq;
+ ts->input = input_dev;
+ init_waitqueue_head(&ts->wait);
+
snprintf(ts->phys, sizeof(ts->phys),
"%s/input0", dev_name(&client->dev));
@@ -331,53 +422,46 @@ static int tsc2007_probe(struct i2c_client *client,
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
- input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, pdata->fuzzx, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, pdata->fuzzy, 0);
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, ts->fuzzx, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, ts->fuzzy, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT,
- pdata->fuzzz, 0);
+ ts->fuzzz, 0);
+
+ if (pdata) {
+ if (pdata->exit_platform_hw) {
+ err = devm_add_action(&client->dev,
+ tsc2007_call_exit_platform_hw,
+ &client->dev);
+ if (err) {
+ dev_err(&client->dev,
+ "Failed to register exit_platform_hw action, %d\n",
+ err);
+ return err;
+ }
+ }
- if (pdata->init_platform_hw)
- pdata->init_platform_hw();
+ if (pdata->init_platform_hw)
+ pdata->init_platform_hw();
+ }
- err = request_threaded_irq(ts->irq, tsc2007_hard_irq, tsc2007_soft_irq,
- IRQF_ONESHOT, client->dev.driver->name, ts);
- if (err < 0) {
- dev_err(&client->dev, "irq %d busy?\n", ts->irq);
- goto err_free_mem;
+ err = devm_request_threaded_irq(&client->dev, ts->irq,
+ tsc2007_hard_irq, tsc2007_soft_irq,
+ IRQF_ONESHOT,
+ client->dev.driver->name, ts);
+ if (err) {
+ dev_err(&client->dev, "Failed to request irq %d: %d\n",
+ ts->irq, err);
+ return err;
}
tsc2007_stop(ts);
err = input_register_device(input_dev);
- if (err)
- goto err_free_irq;
-
- i2c_set_clientdata(client, ts);
-
- return 0;
-
- err_free_irq:
- free_irq(ts->irq, ts);
- if (pdata->exit_platform_hw)
- pdata->exit_platform_hw();
- err_free_mem:
- input_free_device(input_dev);
- kfree(ts);
- return err;
-}
-
-static int tsc2007_remove(struct i2c_client *client)
-{
- struct tsc2007 *ts = i2c_get_clientdata(client);
- struct tsc2007_platform_data *pdata = client->dev.platform_data;
-
- free_irq(ts->irq, ts);
-
- if (pdata->exit_platform_hw)
- pdata->exit_platform_hw();
-
- input_unregister_device(ts->input);
- kfree(ts);
+ if (err) {
+ dev_err(&client->dev,
+ "Failed to register input device: %d\n", err);
+ return err;
+ }
return 0;
}
@@ -389,14 +473,22 @@ static const struct i2c_device_id tsc2007_idtable[] = {
MODULE_DEVICE_TABLE(i2c, tsc2007_idtable);
+#ifdef CONFIG_OF
+static const struct of_device_id tsc2007_of_match[] = {
+ { .compatible = "ti,tsc2007" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tsc2007_of_match);
+#endif
+
static struct i2c_driver tsc2007_driver = {
.driver = {
.owner = THIS_MODULE,
- .name = "tsc2007"
+ .name = "tsc2007",
+ .of_match_table = of_match_ptr(tsc2007_of_match),
},
.id_table = tsc2007_idtable,
.probe = tsc2007_probe,
- .remove = tsc2007_remove,
};
module_i2c_driver(tsc2007_driver);
diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c
index eb96f168fb9d..29687872cb94 100644
--- a/drivers/input/touchscreen/tsc40.c
+++ b/drivers/input/touchscreen/tsc40.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
-#include <linux/init.h>
#define PACKET_LENGTH 5
struct tsc_ser {
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 1271f97b4079..b46c55cd1bbb 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -19,7 +19,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -320,7 +319,7 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
static int ucb1400_ts_probe(struct platform_device *pdev)
{
- struct ucb1400_ts *ucb = pdev->dev.platform_data;
+ struct ucb1400_ts *ucb = dev_get_platdata(&pdev->dev);
int error, x_res, y_res;
u16 fcsr;
@@ -399,7 +398,7 @@ err:
static int ucb1400_ts_remove(struct platform_device *pdev)
{
- struct ucb1400_ts *ucb = pdev->dev.platform_data;
+ struct ucb1400_ts *ucb = dev_get_platdata(&pdev->dev);
free_irq(ucb->irq, ucb);
input_unregister_device(ucb->ts_idev);
@@ -410,7 +409,7 @@ static int ucb1400_ts_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int ucb1400_ts_suspend(struct device *dev)
{
- struct ucb1400_ts *ucb = dev->platform_data;
+ struct ucb1400_ts *ucb = dev_get_platdata(dev);
struct input_dev *idev = ucb->ts_idev;
mutex_lock(&idev->mutex);
@@ -424,7 +423,7 @@ static int ucb1400_ts_suspend(struct device *dev)
static int ucb1400_ts_resume(struct device *dev)
{
- struct ucb1400_ts *ucb = dev->platform_data;
+ struct ucb1400_ts *ucb = dev_get_platdata(dev);
struct input_dev *idev = ucb->ts_idev;
mutex_lock(&idev->mutex);
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 5f87bed05467..a0966331a89b 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -51,7 +51,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <linux/hid.h>
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 9a83be6b6584..2792ca397dd0 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
-#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/delay.h>
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 6be2eb6a153a..1b953a066b2c 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/string.h>
#include <linux/pm.h>
#include <linux/input.h>
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 7e45c9f6e6b7..d0ef91fc87d1 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -584,7 +584,7 @@ static void wm97xx_ts_input_close(struct input_dev *idev)
static int wm97xx_probe(struct device *dev)
{
struct wm97xx *wm;
- struct wm97xx_pdata *pdata = dev->platform_data;
+ struct wm97xx_pdata *pdata = dev_get_platdata(dev);
int ret = 0, id = 0;
wm = kzalloc(sizeof(struct wm97xx), GFP_KERNEL);
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index aa127ba392a4..2175f3419002 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -279,7 +279,8 @@ static int zforce_start(struct zforce_ts *ts)
goto error;
}
- if (zforce_setconfig(ts, SETCONFIG_DUALTOUCH)) {
+ ret = zforce_setconfig(ts, SETCONFIG_DUALTOUCH);
+ if (ret) {
dev_err(&client->dev, "Unable to set config\n");
goto error;
}
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
index bf0869a7a78e..e2ccd683de6e 100644
--- a/drivers/input/touchscreen/zylonite-wm97xx.c
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/irq.h>
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 3e7fdbb4916b..79bbc21c1d01 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -207,6 +207,7 @@ config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI"
default n
depends on ARM
+ depends on SH_MOBILE || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
select SHMOBILE_IPMMU
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 72531f008a5e..faf0da4bb3a2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -248,8 +248,8 @@ static bool check_device(struct device *dev)
if (!dev || !dev->dma_mask)
return false;
- /* No device or no PCI device */
- if (dev->bus != &pci_bus_type)
+ /* No PCI device */
+ if (!dev_is_pci(dev))
return false;
devid = get_device_id(dev);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 8f798be6e398..28b4bea7c109 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -26,7 +26,6 @@
#include <linux/msi.h>
#include <linux/amd-iommu.h>
#include <linux/export.h>
-#include <acpi/acpi.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index e400fbe411de..cff039df056e 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/irqreturn.h>
/*
* Maximum number of IOMMUs supported
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e46a88700b68..1d9ab39af29f 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -24,7 +24,7 @@
* - v7/v8 long-descriptor format
* - Non-secure access to the SMMU
* - 4k and 64k pages, with contiguous pte hints.
- * - Up to 39-bit addressing
+ * - Up to 42-bit addressing (dependent on VA_BITS)
* - Context fault reporting
*/
@@ -61,12 +61,13 @@
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
/* Page table bits */
-#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
+#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
+#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
#if PAGE_SIZE == SZ_4K
#define ARM_SMMU_PTE_CONT_ENTRIES 16
@@ -78,7 +79,6 @@
#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
-#define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t))
/* Stage-1 PTE */
#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
@@ -190,6 +190,9 @@
#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
#define CBAR_VMID_SHIFT 0
#define CBAR_VMID_MASK 0xff
+#define CBAR_S1_BPSHCFG_SHIFT 8
+#define CBAR_S1_BPSHCFG_MASK 3
+#define CBAR_S1_BPSHCFG_NSH 3
#define CBAR_S1_MEMATTR_SHIFT 12
#define CBAR_S1_MEMATTR_MASK 0xf
#define CBAR_S1_MEMATTR_WB 0xf
@@ -392,7 +395,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg root_cfg;
phys_addr_t output_mask;
- struct mutex lock;
+ spinlock_t lock;
};
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -631,6 +634,28 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
return IRQ_HANDLED;
}
+static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
+ size_t size)
+{
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+
+ /* Ensure new page tables are visible to the hardware walker */
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
+ dsb();
+ } else {
+ /*
+ * If the SMMU can't walk tables in the CPU caches, treat them
+ * like non-coherent DMA since we need to flush the new entries
+ * all the way out to memory. There's no possibility of
+ * recursion here as the SMMU table walker will not be wired
+ * through another SMMU.
+ */
+ dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+ DMA_TO_DEVICE);
+ }
+}
+
static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
{
u32 reg;
@@ -649,11 +674,16 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
if (smmu->version == 1)
reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
- /* Use the weakest memory type, so it is overridden by the pte */
- if (stage1)
- reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
- else
+ /*
+ * Use the weakest shareability/memory types, so they are
+ * overridden by the ttbcr/pte.
+ */
+ if (stage1) {
+ reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
+ (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+ } else {
reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
+ }
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
if (smmu->version > 1) {
@@ -714,6 +744,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
}
/* TTBR0 */
+ arm_smmu_flush_pgtable(smmu, root_cfg->pgd,
+ PTRS_PER_PGD * sizeof(pgd_t));
reg = __pa(root_cfg->pgd);
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
@@ -900,7 +932,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
goto out_free_domain;
smmu_domain->root_cfg.pgd = pgd;
- mutex_init(&smmu_domain->lock);
+ spin_lock_init(&smmu_domain->lock);
domain->priv = smmu_domain;
return 0;
@@ -1127,6 +1159,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_device *device_smmu = dev->archdata.iommu;
struct arm_smmu_master *master;
+ unsigned long flags;
if (!device_smmu) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
@@ -1137,7 +1170,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Sanity check the domain. We don't currently support domains
* that cross between different SMMU chains.
*/
- mutex_lock(&smmu_domain->lock);
+ spin_lock_irqsave(&smmu_domain->lock, flags);
if (!smmu_domain->leaf_smmu) {
/* Now that we have a master, we can finalise the domain */
ret = arm_smmu_init_domain_context(domain, dev);
@@ -1152,7 +1185,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
dev_name(device_smmu->dev));
goto err_unlock;
}
- mutex_unlock(&smmu_domain->lock);
+ spin_unlock_irqrestore(&smmu_domain->lock, flags);
/* Looks ok, so add the device to the domain */
master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1162,7 +1195,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return arm_smmu_domain_add_master(smmu_domain, master);
err_unlock:
- mutex_unlock(&smmu_domain->lock);
+ spin_unlock_irqrestore(&smmu_domain->lock, flags);
return ret;
}
@@ -1176,23 +1209,6 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_domain_remove_master(smmu_domain, master);
}
-static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
- size_t size)
-{
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
- /*
- * If the SMMU can't walk tables in the CPU caches, treat them
- * like non-coherent DMA since we need to flush the new entries
- * all the way out to memory. There's no possibility of recursion
- * here as the SMMU table walker will not be wired through another
- * SMMU.
- */
- if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
- dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
- DMA_TO_DEVICE);
-}
-
static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
unsigned long end)
{
@@ -1205,16 +1221,15 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
unsigned long pfn, int flags, int stage)
{
pte_t *pte, *start;
- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
+ pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
if (pmd_none(*pmd)) {
/* Allocate a new set of tables */
- pgtable_t table = alloc_page(PGALLOC_GFP);
+ pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
if (!table)
return -ENOMEM;
- arm_smmu_flush_pgtable(smmu, page_address(table),
- ARM_SMMU_PTE_HWTABLE_SIZE);
+ arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
if (!pgtable_page_ctor(table)) {
__free_page(table);
return -ENOMEM;
@@ -1244,7 +1259,9 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
}
/* If no access, create a faulting entry to avoid TLB fills */
- if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
+ if (flags & IOMMU_EXEC)
+ pteval &= ~ARM_SMMU_PTE_XN;
+ else if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_PAGE;
pteval |= ARM_SMMU_PTE_SH_IS;
@@ -1314,9 +1331,15 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
#ifndef __PAGETABLE_PMD_FOLDED
if (pud_none(*pud)) {
- pmd = pmd_alloc_one(NULL, addr);
+ pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
if (!pmd)
return -ENOMEM;
+
+ arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
+ pud_populate(NULL, pud, pmd);
+ arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
+
+ pmd += pmd_index(addr);
} else
#endif
pmd = pmd_offset(pud, addr);
@@ -1325,8 +1348,6 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
next = pmd_addr_end(addr, end);
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
flags, stage);
- pud_populate(NULL, pud, pmd);
- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
phys += next - addr;
} while (pmd++, addr = next, addr < end);
@@ -1343,9 +1364,15 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
#ifndef __PAGETABLE_PUD_FOLDED
if (pgd_none(*pgd)) {
- pud = pud_alloc_one(NULL, addr);
+ pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
+
+ arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
+ pgd_populate(NULL, pgd, pud);
+ arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
+
+ pud += pud_index(addr);
} else
#endif
pud = pud_offset(pgd, addr);
@@ -1354,8 +1381,6 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
next = pud_addr_end(addr, end);
ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
flags, stage);
- pgd_populate(NULL, pud, pgd);
- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
phys += next - addr;
} while (pud++, addr = next, addr < end);
@@ -1372,6 +1397,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
pgd_t *pgd = root_cfg->pgd;
struct arm_smmu_device *smmu = root_cfg->smmu;
+ unsigned long irqflags;
if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
stage = 2;
@@ -1394,7 +1420,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
if (paddr & ~output_mask)
return -ERANGE;
- mutex_lock(&smmu_domain->lock);
+ spin_lock_irqsave(&smmu_domain->lock, irqflags);
pgd += pgd_index(iova);
end = iova + size;
do {
@@ -1410,11 +1436,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
} while (pgd++, iova != end);
out_unlock:
- mutex_unlock(&smmu_domain->lock);
-
- /* Ensure new page tables are visible to the hardware walker */
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- dsb();
+ spin_unlock_irqrestore(&smmu_domain->lock, irqflags);
return ret;
}
@@ -1494,6 +1516,13 @@ static int arm_smmu_add_device(struct device *dev)
{
struct arm_smmu_device *child, *parent, *smmu;
struct arm_smmu_master *master = NULL;
+ struct iommu_group *group;
+ int ret;
+
+ if (dev->archdata.iommu) {
+ dev_warn(dev, "IOMMU driver already assigned to device\n");
+ return -EINVAL;
+ }
spin_lock(&arm_smmu_devices_lock);
list_for_each_entry(parent, &arm_smmu_devices, list) {
@@ -1526,13 +1555,23 @@ static int arm_smmu_add_device(struct device *dev)
if (!master)
return -ENODEV;
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(dev, "Failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+
+ ret = iommu_group_add_device(group, dev);
+ iommu_group_put(group);
dev->archdata.iommu = smmu;
- return 0;
+
+ return ret;
}
static void arm_smmu_remove_device(struct device *dev)
{
dev->archdata.iommu = NULL;
+ iommu_group_remove_device(dev);
}
static struct iommu_ops arm_smmu_ops = {
@@ -1730,7 +1769,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
* allocation (PTRS_PER_PGD).
*/
#ifdef CONFIG_64BIT
- /* Current maximum output size of 39 bits */
smmu->s1_output_size = min(39UL, size);
#else
smmu->s1_output_size = min(32UL, size);
@@ -1745,7 +1783,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
} else {
#ifdef CONFIG_64BIT
size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
- size = min(39, arm_smmu_id_size_to_bits(size));
+ size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
#else
size = 32;
#endif
@@ -1968,8 +2006,10 @@ static int __init arm_smmu_init(void)
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+#ifdef CONFIG_ARM_AMBA
if (!iommu_present(&amba_bustype))
bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+#endif
return 0;
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8b452c9676d9..158156543410 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -52,6 +52,9 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
+static int alloc_iommu(struct dmar_drhd_unit *drhd);
+static void free_iommu(struct intel_iommu *iommu);
+
static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
@@ -100,7 +103,6 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
if (!pdev) {
pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
segment, scope->bus, path->device, path->function);
- *dev = NULL;
return 0;
}
if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
@@ -151,7 +153,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
ret = dmar_parse_one_dev_scope(scope,
&(*devices)[index], segment);
if (ret) {
- kfree(*devices);
+ dmar_free_dev_scope(devices, cnt);
return ret;
}
index ++;
@@ -162,6 +164,17 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
return 0;
}
+void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
+{
+ if (*devices && *cnt) {
+ while (--*cnt >= 0)
+ pci_dev_put((*devices)[*cnt]);
+ kfree(*devices);
+ *devices = NULL;
+ *cnt = 0;
+ }
+}
+
/**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
@@ -193,25 +206,28 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return 0;
}
+static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
+{
+ if (dmaru->devices && dmaru->devices_cnt)
+ dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
+ if (dmaru->iommu)
+ free_iommu(dmaru->iommu);
+ kfree(dmaru);
+}
+
static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
{
struct acpi_dmar_hardware_unit *drhd;
- int ret = 0;
drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
if (dmaru->include_all)
return 0;
- ret = dmar_parse_dev_scope((void *)(drhd + 1),
- ((void *)drhd) + drhd->header.length,
- &dmaru->devices_cnt, &dmaru->devices,
- drhd->segment);
- if (ret) {
- list_del(&dmaru->list);
- kfree(dmaru);
- }
- return ret;
+ return dmar_parse_dev_scope((void *)(drhd + 1),
+ ((void *)drhd) + drhd->header.length,
+ &dmaru->devices_cnt, &dmaru->devices,
+ drhd->segment);
}
#ifdef CONFIG_ACPI_NUMA
@@ -423,7 +439,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
int __init dmar_dev_scope_init(void)
{
static int dmar_dev_scope_initialized;
- struct dmar_drhd_unit *drhd, *drhd_n;
+ struct dmar_drhd_unit *drhd;
int ret = -ENODEV;
if (dmar_dev_scope_initialized)
@@ -432,7 +448,7 @@ int __init dmar_dev_scope_init(void)
if (list_empty(&dmar_drhd_units))
goto fail;
- list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
+ list_for_each_entry(drhd, &dmar_drhd_units, list) {
ret = dmar_parse_dev(drhd);
if (ret)
goto fail;
@@ -456,24 +472,23 @@ int __init dmar_table_init(void)
static int dmar_table_initialized;
int ret;
- if (dmar_table_initialized)
- return 0;
-
- dmar_table_initialized = 1;
-
- ret = parse_dmar_table();
- if (ret) {
- if (ret != -ENODEV)
- pr_info("parse DMAR table failure.\n");
- return ret;
- }
+ if (dmar_table_initialized == 0) {
+ ret = parse_dmar_table();
+ if (ret < 0) {
+ if (ret != -ENODEV)
+ pr_info("parse DMAR table failure.\n");
+ } else if (list_empty(&dmar_drhd_units)) {
+ pr_info("No DMAR devices found\n");
+ ret = -ENODEV;
+ }
- if (list_empty(&dmar_drhd_units)) {
- pr_info("No DMAR devices found\n");
- return -ENODEV;
+ if (ret < 0)
+ dmar_table_initialized = ret;
+ else
+ dmar_table_initialized = 1;
}
- return 0;
+ return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
}
static void warn_invalid_dmar(u64 addr, const char *message)
@@ -488,7 +503,7 @@ static void warn_invalid_dmar(u64 addr, const char *message)
dmi_get_system_info(DMI_PRODUCT_VERSION));
}
-int __init check_zero_address(void)
+static int __init check_zero_address(void)
{
struct acpi_table_dmar *dmar;
struct acpi_dmar_header *entry_header;
@@ -546,14 +561,6 @@ int __init detect_intel_iommu(void)
if (ret)
ret = check_zero_address();
{
- struct acpi_table_dmar *dmar;
-
- dmar = (struct acpi_table_dmar *) dmar_tbl;
-
- if (ret && irq_remapping_enabled && cpu_has_x2apic &&
- dmar->flags & 0x1)
- pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
-
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
@@ -565,7 +572,7 @@ int __init detect_intel_iommu(void)
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
}
- early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
+ early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
return ret ? 1 : -ENODEV;
@@ -647,7 +654,7 @@ out:
return err;
}
-int alloc_iommu(struct dmar_drhd_unit *drhd)
+static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
@@ -721,12 +728,19 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
return err;
}
-void free_iommu(struct intel_iommu *iommu)
+static void free_iommu(struct intel_iommu *iommu)
{
- if (!iommu)
- return;
+ if (iommu->irq) {
+ free_irq(iommu->irq, iommu);
+ irq_set_handler_data(iommu->irq, NULL);
+ destroy_irq(iommu->irq);
+ }
- free_dmar_iommu(iommu);
+ if (iommu->qi) {
+ free_page((unsigned long)iommu->qi->desc);
+ kfree(iommu->qi->desc_status);
+ kfree(iommu->qi);
+ }
if (iommu->reg)
unmap_iommu(iommu);
@@ -1050,7 +1064,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
if (!desc_page) {
kfree(qi);
- iommu->qi = 0;
+ iommu->qi = NULL;
return -ENOMEM;
}
@@ -1060,7 +1074,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
if (!qi->desc_status) {
free_page((unsigned long) qi->desc);
kfree(qi);
- iommu->qi = 0;
+ iommu->qi = NULL;
return -ENOMEM;
}
@@ -1111,9 +1125,7 @@ static const char *irq_remap_fault_reasons[] =
"Blocked an interrupt request due to source-id verification failure",
};
-#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
-
-const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
+static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
{
if (fault_reason >= 0x20 && (fault_reason - 0x20 <
ARRAY_SIZE(irq_remap_fault_reasons))) {
@@ -1303,15 +1315,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
int __init enable_drhd_fault_handling(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
/*
* Enable fault control interrupt.
*/
- for_each_drhd_unit(drhd) {
- int ret;
- struct intel_iommu *iommu = drhd->iommu;
+ for_each_iommu(iommu, drhd) {
u32 fault_status;
- ret = dmar_set_interrupt(iommu);
+ int ret = dmar_set_interrupt(iommu);
if (ret) {
pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
@@ -1366,4 +1377,22 @@ int __init dmar_ir_support(void)
return 0;
return dmar->flags & 0x1;
}
+
+static int __init dmar_free_unused_resources(void)
+{
+ struct dmar_drhd_unit *dmaru, *dmaru_n;
+
+ /* DMAR units are in use */
+ if (irq_remapping_enabled || intel_iommu_enabled)
+ return 0;
+
+ list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
+ list_del(&dmaru->list);
+ dmar_free_drhd(dmaru);
+ }
+
+ return 0;
+}
+
+late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index c857c30da979..93072ba44b1d 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -691,7 +691,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
* Use LIODN of the PCI controller while attaching a
* PCI device.
*/
- if (dev->bus == &pci_bus_type) {
+ if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
pci_ctl = pci_bus_to_host(pdev->bus);
/*
@@ -729,7 +729,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
* Use LIODN of the PCI controller while detaching a
* PCI device.
*/
- if (dev->bus == &pci_bus_type) {
+ if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
pci_ctl = pci_bus_to_host(pdev->bus);
/*
@@ -1056,7 +1056,7 @@ static int fsl_pamu_add_device(struct device *dev)
* For platform devices we allocate a separate group for
* each of the devices.
*/
- if (dev->bus == &pci_bus_type) {
+ if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
/* Don't create device groups for virtual PCI bridges */
if (pdev->subordinate)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 59779e19315e..a22c86c867fa 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -63,6 +63,7 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
#define MAX_AGAW_WIDTH 64
+#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
@@ -106,12 +107,12 @@ static inline int agaw_to_level(int agaw)
static inline int agaw_to_width(int agaw)
{
- return 30 + agaw * LEVEL_STRIDE;
+ return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
}
static inline int width_to_agaw(int width)
{
- return (width - 30) / LEVEL_STRIDE;
+ return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
}
static inline unsigned int level_to_offset_bits(int level)
@@ -141,7 +142,7 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
- return 1 << ((lvl - 1) * LEVEL_STRIDE);
+ return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
@@ -288,26 +289,6 @@ static inline void dma_clear_pte(struct dma_pte *pte)
pte->val = 0;
}
-static inline void dma_set_pte_readable(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_READ;
-}
-
-static inline void dma_set_pte_writable(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_WRITE;
-}
-
-static inline void dma_set_pte_snp(struct dma_pte *pte)
-{
- pte->val |= DMA_PTE_SNP;
-}
-
-static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
-{
- pte->val = (pte->val & ~3) | (prot & 3);
-}
-
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
#ifdef CONFIG_64BIT
@@ -318,11 +299,6 @@ static inline u64 dma_pte_addr(struct dma_pte *pte)
#endif
}
-static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
-{
- pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
-}
-
static inline bool dma_pte_present(struct dma_pte *pte)
{
return (pte->val & 3) != 0;
@@ -406,7 +382,7 @@ struct device_domain_info {
static void flush_unmaps_timeout(unsigned long data);
-DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
+static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
#define HIGH_WATER_MARK 250
struct deferred_flush_tables {
@@ -652,9 +628,7 @@ static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
struct dmar_drhd_unit *drhd = NULL;
int i;
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
+ for_each_active_drhd_unit(drhd) {
if (segment != drhd->segment)
continue;
@@ -865,7 +839,6 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;
- int order;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -890,8 +863,7 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
} while (start_pfn && start_pfn <= last_pfn);
- order = (large_page - 1) * 9;
- return order;
+ return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
}
static void dma_pte_free_level(struct dmar_domain *domain, int level,
@@ -1255,8 +1227,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
unsigned long nlongs;
ndomains = cap_ndoms(iommu->cap);
- pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
- ndomains);
+ pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
+ iommu->seq_id, ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
@@ -1266,13 +1238,17 @@ static int iommu_init_domains(struct intel_iommu *iommu)
*/
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
if (!iommu->domain_ids) {
- printk(KERN_ERR "Allocating domain id array failed\n");
+ pr_err("IOMMU%d: allocating domain id array failed\n",
+ iommu->seq_id);
return -ENOMEM;
}
iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
GFP_KERNEL);
if (!iommu->domains) {
- printk(KERN_ERR "Allocating domain array failed\n");
+ pr_err("IOMMU%d: allocating domain array failed\n",
+ iommu->seq_id);
+ kfree(iommu->domain_ids);
+ iommu->domain_ids = NULL;
return -ENOMEM;
}
@@ -1289,10 +1265,10 @@ static int iommu_init_domains(struct intel_iommu *iommu)
static void domain_exit(struct dmar_domain *domain);
static void vm_domain_exit(struct dmar_domain *domain);
-void free_dmar_iommu(struct intel_iommu *iommu)
+static void free_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
- int i;
+ int i, count;
unsigned long flags;
if ((iommu->domains) && (iommu->domain_ids)) {
@@ -1301,28 +1277,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
clear_bit(i, iommu->domain_ids);
spin_lock_irqsave(&domain->iommu_lock, flags);
- if (--domain->iommu_count == 0) {
+ count = --domain->iommu_count;
+ spin_unlock_irqrestore(&domain->iommu_lock, flags);
+ if (count == 0) {
if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
vm_domain_exit(domain);
else
domain_exit(domain);
}
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
}
}
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- if (iommu->irq) {
- irq_set_handler_data(iommu->irq, NULL);
- /* This will mask the irq */
- free_irq(iommu->irq, iommu);
- destroy_irq(iommu->irq);
- }
-
kfree(iommu->domains);
kfree(iommu->domain_ids);
+ iommu->domains = NULL;
+ iommu->domain_ids = NULL;
g_iommus[iommu->seq_id] = NULL;
@@ -2245,8 +2217,6 @@ static int __init si_domain_init(int hw)
if (!si_domain)
return -EFAULT;
- pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
-
for_each_active_iommu(iommu, drhd) {
ret = iommu_attach_domain(si_domain, iommu);
if (ret) {
@@ -2261,6 +2231,8 @@ static int __init si_domain_init(int hw)
}
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+ pr_debug("IOMMU: identity mapping domain is domain %d\n",
+ si_domain->id);
if (hw)
return 0;
@@ -2492,11 +2464,7 @@ static int __init init_dmars(void)
goto error;
}
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
+ for_each_active_iommu(iommu, drhd) {
g_iommus[iommu->seq_id] = iommu;
ret = iommu_init_domains(iommu);
@@ -2520,12 +2488,7 @@ static int __init init_dmars(void)
/*
* Start from the sane iommu hardware state.
*/
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
-
+ for_each_active_iommu(iommu, drhd) {
/*
* If the queued invalidation is already initialized by us
* (for example, while enabling interrupt-remapping) then
@@ -2545,12 +2508,7 @@ static int __init init_dmars(void)
dmar_disable_qi(iommu);
}
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
-
+ for_each_active_iommu(iommu, drhd) {
if (dmar_enable_qi(iommu)) {
/*
* Queued Invalidate not enabled, use Register Based
@@ -2633,17 +2591,16 @@ static int __init init_dmars(void)
* global invalidate iotlb
* enable translation
*/
- for_each_drhd_unit(drhd) {
+ for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
/*
* we always have to disable PMRs or DMA may fail on
* this device
*/
if (force_on)
- iommu_disable_protect_mem_regions(drhd->iommu);
+ iommu_disable_protect_mem_regions(iommu);
continue;
}
- iommu = drhd->iommu;
iommu_flush_write_buffer(iommu);
@@ -2665,12 +2622,9 @@ static int __init init_dmars(void)
return 0;
error:
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
- iommu = drhd->iommu;
- free_iommu(iommu);
- }
+ for_each_active_iommu(iommu, drhd)
+ free_dmar_iommu(iommu);
+ kfree(deferred_flush);
kfree(g_iommus);
return ret;
}
@@ -2758,7 +2712,7 @@ static int iommu_no_mapping(struct device *dev)
struct pci_dev *pdev;
int found;
- if (unlikely(dev->bus != &pci_bus_type))
+ if (unlikely(!dev_is_pci(dev)))
return 1;
pdev = to_pci_dev(dev);
@@ -3318,9 +3272,9 @@ static void __init init_no_remapping_devices(void)
}
}
- for_each_drhd_unit(drhd) {
+ for_each_active_drhd_unit(drhd) {
int i;
- if (drhd->ignored || drhd->include_all)
+ if (drhd->include_all)
continue;
for (i = 0; i < drhd->devices_cnt; i++)
@@ -3514,18 +3468,12 @@ static int __init
rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
{
struct acpi_dmar_reserved_memory *rmrr;
- int ret;
rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
- ret = dmar_parse_dev_scope((void *)(rmrr + 1),
- ((void *)rmrr) + rmrr->header.length,
- &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
-
- if (ret || (rmrru->devices_cnt == 0)) {
- list_del(&rmrru->list);
- kfree(rmrru);
- }
- return ret;
+ return dmar_parse_dev_scope((void *)(rmrr + 1),
+ ((void *)rmrr) + rmrr->header.length,
+ &rmrru->devices_cnt, &rmrru->devices,
+ rmrr->segment);
}
static LIST_HEAD(dmar_atsr_units);
@@ -3550,23 +3498,39 @@ int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
{
- int rc;
struct acpi_dmar_atsr *atsr;
if (atsru->include_all)
return 0;
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
- rc = dmar_parse_dev_scope((void *)(atsr + 1),
- (void *)atsr + atsr->header.length,
- &atsru->devices_cnt, &atsru->devices,
- atsr->segment);
- if (rc || !atsru->devices_cnt) {
- list_del(&atsru->list);
- kfree(atsru);
+ return dmar_parse_dev_scope((void *)(atsr + 1),
+ (void *)atsr + atsr->header.length,
+ &atsru->devices_cnt, &atsru->devices,
+ atsr->segment);
+}
+
+static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
+{
+ dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
+ kfree(atsru);
+}
+
+static void intel_iommu_free_dmars(void)
+{
+ struct dmar_rmrr_unit *rmrru, *rmrr_n;
+ struct dmar_atsr_unit *atsru, *atsr_n;
+
+ list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
+ list_del(&rmrru->list);
+ dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
+ kfree(rmrru);
}
- return rc;
+ list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
+ list_del(&atsru->list);
+ intel_iommu_free_atsr(atsru);
+ }
}
int dmar_find_matched_atsr_unit(struct pci_dev *dev)
@@ -3610,17 +3574,17 @@ found:
int __init dmar_parse_rmrr_atsr_dev(void)
{
- struct dmar_rmrr_unit *rmrr, *rmrr_n;
- struct dmar_atsr_unit *atsr, *atsr_n;
+ struct dmar_rmrr_unit *rmrr;
+ struct dmar_atsr_unit *atsr;
int ret = 0;
- list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
+ list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
ret = rmrr_parse_dev(rmrr);
if (ret)
return ret;
}
- list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+ list_for_each_entry(atsr, &dmar_atsr_units, list) {
ret = atsr_parse_dev(atsr);
if (ret)
return ret;
@@ -3667,8 +3631,9 @@ static struct notifier_block device_nb = {
int __init intel_iommu_init(void)
{
- int ret = 0;
+ int ret = -ENODEV;
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
@@ -3676,36 +3641,29 @@ int __init intel_iommu_init(void)
if (dmar_table_init()) {
if (force_on)
panic("tboot: Failed to initialize DMAR table\n");
- return -ENODEV;
+ goto out_free_dmar;
}
/*
* Disable translation if already enabled prior to OS handover.
*/
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu;
-
- if (drhd->ignored)
- continue;
-
- iommu = drhd->iommu;
+ for_each_active_iommu(iommu, drhd)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- }
if (dmar_dev_scope_init() < 0) {
if (force_on)
panic("tboot: Failed to initialize DMAR device scope\n");
- return -ENODEV;
+ goto out_free_dmar;
}
if (no_iommu || dmar_disabled)
- return -ENODEV;
+ goto out_free_dmar;
if (iommu_init_mempool()) {
if (force_on)
panic("tboot: Failed to initialize iommu memory\n");
- return -ENODEV;
+ goto out_free_dmar;
}
if (list_empty(&dmar_rmrr_units))
@@ -3717,7 +3675,7 @@ int __init intel_iommu_init(void)
if (dmar_init_reserved_ranges()) {
if (force_on)
panic("tboot: Failed to reserve iommu ranges\n");
- return -ENODEV;
+ goto out_free_mempool;
}
init_no_remapping_devices();
@@ -3727,9 +3685,7 @@ int __init intel_iommu_init(void)
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
printk(KERN_ERR "IOMMU: dmar init failed\n");
- put_iova_domain(&reserved_iova_list);
- iommu_exit_mempool();
- return ret;
+ goto out_free_reserved_range;
}
printk(KERN_INFO
"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
@@ -3749,6 +3705,14 @@ int __init intel_iommu_init(void)
intel_iommu_enabled = 1;
return 0;
+
+out_free_reserved_range:
+ put_iova_domain(&reserved_iova_list);
+out_free_mempool:
+ iommu_exit_mempool();
+out_free_dmar:
+ intel_iommu_free_dmars();
+ return ret;
}
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
@@ -3877,7 +3841,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
}
/* domain id for virtual machine, it won't be set in context */
-static unsigned long vm_domid;
+static atomic_t vm_domid = ATOMIC_INIT(0);
static struct dmar_domain *iommu_alloc_vm_domain(void)
{
@@ -3887,7 +3851,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
if (!domain)
return NULL;
- domain->id = vm_domid++;
+ domain->id = atomic_inc_return(&vm_domid);
domain->nid = -1;
memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
@@ -3934,11 +3898,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
unsigned long i;
unsigned long ndomains;
- for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
- iommu = drhd->iommu;
-
+ for_each_active_iommu(iommu, drhd) {
ndomains = cap_ndoms(iommu->cap);
for_each_set_bit(i, iommu->domain_ids, ndomains) {
if (iommu->domains[i] == domain) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index bab10b1002fb..ef5f65dbafe9 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -6,11 +6,11 @@
#include <linux/hpet.h>
#include <linux/pci.h>
#include <linux/irq.h>
+#include <linux/intel-iommu.h>
+#include <linux/acpi.h>
#include <asm/io_apic.h>
#include <asm/smp.h>
#include <asm/cpu.h>
-#include <linux/intel-iommu.h>
-#include <acpi/acpi.h>
#include <asm/irq_remapping.h>
#include <asm/pci-direct.h>
#include <asm/msidef.h>
@@ -40,13 +40,15 @@ static int ir_ioapic_num, ir_hpet_num;
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
+static int __init parse_ioapics_under_ir(void);
+
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
struct irq_cfg *cfg = irq_get_chip_data(irq);
return cfg ? &cfg->irq_2_iommu : NULL;
}
-int get_irte(int irq, struct irte *entry)
+static int get_irte(int irq, struct irte *entry)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
unsigned long flags;
@@ -69,19 +71,13 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct irq_cfg *cfg = irq_get_chip_data(irq);
- u16 index, start_index;
unsigned int mask = 0;
unsigned long flags;
- int i;
+ int index;
if (!count || !irq_iommu)
return -1;
- /*
- * start the IRTE search from index 0.
- */
- index = start_index = 0;
-
if (count > 1) {
count = __roundup_pow_of_two(count);
mask = ilog2(count);
@@ -96,32 +92,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
}
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
- do {
- for (i = index; i < index + count; i++)
- if (table->base[i].present)
- break;
- /* empty index found */
- if (i == index + count)
- break;
-
- index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
-
- if (index == start_index) {
- raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- printk(KERN_ERR "can't allocate an IRTE\n");
- return -1;
- }
- } while (1);
-
- for (i = index; i < index + count; i++)
- table->base[i].present = 1;
-
- cfg->remapped = 1;
- irq_iommu->iommu = iommu;
- irq_iommu->irte_index = index;
- irq_iommu->sub_handle = 0;
- irq_iommu->irte_mask = mask;
-
+ index = bitmap_find_free_region(table->bitmap,
+ INTR_REMAP_TABLE_ENTRIES, mask);
+ if (index < 0) {
+ pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
+ } else {
+ cfg->remapped = 1;
+ irq_iommu->iommu = iommu;
+ irq_iommu->irte_index = index;
+ irq_iommu->sub_handle = 0;
+ irq_iommu->irte_mask = mask;
+ }
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
@@ -254,6 +235,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
set_64bit(&entry->low, 0);
set_64bit(&entry->high, 0);
}
+ bitmap_release_region(iommu->ir_table->bitmap, index,
+ irq_iommu->irte_mask);
return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
@@ -336,7 +319,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
return -1;
}
- set_irte_sid(irte, 1, 0, sid);
+ set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
return 0;
}
@@ -453,6 +436,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
{
struct ir_table *ir_table;
struct page *pages;
+ unsigned long *bitmap;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
GFP_ATOMIC);
@@ -464,13 +448,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
INTR_REMAP_PAGE_ORDER);
if (!pages) {
- printk(KERN_ERR "failed to allocate pages of order %d\n",
- INTR_REMAP_PAGE_ORDER);
+ pr_err("IR%d: failed to allocate pages of order %d\n",
+ iommu->seq_id, INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table);
return -ENOMEM;
}
+ bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
+ sizeof(long), GFP_ATOMIC);
+ if (bitmap == NULL) {
+ pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
+ __free_pages(pages, INTR_REMAP_PAGE_ORDER);
+ kfree(ir_table);
+ return -ENOMEM;
+ }
+
ir_table->base = page_address(pages);
+ ir_table->bitmap = bitmap;
iommu_set_irq_remapping(iommu, mode);
return 0;
@@ -521,6 +515,7 @@ static int __init dmar_x2apic_optout(void)
static int __init intel_irq_remapping_supported(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
if (disable_irq_remap)
return 0;
@@ -539,12 +534,9 @@ static int __init intel_irq_remapping_supported(void)
if (!dmar_ir_support())
return 0;
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd)
if (!ecap_ir_support(iommu->ecap))
return 0;
- }
return 1;
}
@@ -552,6 +544,7 @@ static int __init intel_irq_remapping_supported(void)
static int __init intel_enable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
bool x2apic_present;
int setup = 0;
int eim = 0;
@@ -564,6 +557,8 @@ static int __init intel_enable_irq_remapping(void)
}
if (x2apic_present) {
+ pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
+
eim = !dmar_x2apic_optout();
if (!eim)
printk(KERN_WARNING
@@ -572,9 +567,7 @@ static int __init intel_enable_irq_remapping(void)
"Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
}
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd) {
/*
* If the queued invalidation is already initialized,
* shouldn't disable it.
@@ -599,9 +592,7 @@ static int __init intel_enable_irq_remapping(void)
/*
* check for the Interrupt-remapping support
*/
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
@@ -615,10 +606,8 @@ static int __init intel_enable_irq_remapping(void)
/*
* Enable queued invalidation for all the DRHD's.
*/
- for_each_drhd_unit(drhd) {
- int ret;
- struct intel_iommu *iommu = drhd->iommu;
- ret = dmar_enable_qi(iommu);
+ for_each_iommu(iommu, drhd) {
+ int ret = dmar_enable_qi(iommu);
if (ret) {
printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
@@ -631,9 +620,7 @@ static int __init intel_enable_irq_remapping(void)
/*
* Setup Interrupt-remapping for all the DRHD's now.
*/
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
@@ -774,22 +761,20 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
* Finds the assocaition between IOAPIC's and its Interrupt-remapping
* hardware unit.
*/
-int __init parse_ioapics_under_ir(void)
+static int __init parse_ioapics_under_ir(void)
{
struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
int ir_supported = 0;
int ioapic_idx;
- for_each_drhd_unit(drhd) {
- struct intel_iommu *iommu = drhd->iommu;
-
+ for_each_iommu(iommu, drhd)
if (ecap_ir_support(iommu->ecap)) {
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
return -1;
ir_supported = 1;
}
- }
if (!ir_supported)
return 0;
@@ -807,7 +792,7 @@ int __init parse_ioapics_under_ir(void)
return 1;
}
-int __init ir_dev_scope_init(void)
+static int __init ir_dev_scope_init(void)
{
if (!irq_remapping_enabled)
return 0;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 39f81aeefcd6..228632c99adb 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -150,7 +150,7 @@ static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
return do_setup_msix_irqs(dev, nvec);
}
-void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
+static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
{
/*
* Intr-remapping uses pin number as the virtual vector
@@ -295,8 +295,8 @@ int setup_ioapic_remapped_entry(int irq,
vector, attr);
}
-int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
+static int set_remapped_irq_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
if (!config_enabled(CONFIG_SMP) || !remap_ops ||
!remap_ops->set_affinity)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index ee249bc959f8..e550ccb7634e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -20,6 +20,7 @@
#include <linux/export.h>
#include <linux/limits.h>
#include <linux/of.h>
+#include <linux/of_iommu.h>
/**
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index d97fbe4fb9b1..80fffba7f12d 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -354,8 +354,8 @@ DEBUG_FOPS(mem);
return -ENOMEM; \
}
-#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600)
-#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400)
+#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
+#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
static int iommu_debug_register(struct device *dev, void *data)
{
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index d572863dfccd..464acda0bbc4 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -343,7 +343,7 @@ static int shmobile_iommu_add_device(struct device *dev)
mapping = archdata->iommu_mapping;
if (!mapping) {
mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
- L1_LEN << 20, 0);
+ L1_LEN << 20);
if (IS_ERR(mapping))
return PTR_ERR(mapping);
archdata->iommu_mapping = mapping;
@@ -380,14 +380,13 @@ int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
kmem_cache_destroy(l1cache);
return -ENOMEM;
}
- archdata = kmalloc(sizeof(*archdata), GFP_KERNEL);
+ archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
if (!archdata) {
kmem_cache_destroy(l1cache);
kmem_cache_destroy(l2cache);
return -ENOMEM;
}
spin_lock_init(&archdata->attach_lock);
- archdata->attached = NULL;
archdata->ipmmu = ipmmu;
ipmmu_archdata = archdata;
bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c
index 8321f89596c4..e3bc2e19b6dd 100644
--- a/drivers/iommu/shmobile-ipmmu.c
+++ b/drivers/iommu/shmobile-ipmmu.c
@@ -35,12 +35,12 @@ void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
if (!ipmmu)
return;
- mutex_lock(&ipmmu->flush_lock);
+ spin_lock(&ipmmu->flush_lock);
if (ipmmu->tlb_enabled)
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
else
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
- mutex_unlock(&ipmmu->flush_lock);
+ spin_unlock(&ipmmu->flush_lock);
}
void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
@@ -49,7 +49,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
if (!ipmmu)
return;
- mutex_lock(&ipmmu->flush_lock);
+ spin_lock(&ipmmu->flush_lock);
switch (size) {
default:
ipmmu->tlb_enabled = 0;
@@ -85,7 +85,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
}
ipmmu_reg_write(ipmmu, IMTTBR, phys);
ipmmu_reg_write(ipmmu, IMASID, asid);
- mutex_unlock(&ipmmu->flush_lock);
+ spin_unlock(&ipmmu->flush_lock);
}
static int ipmmu_probe(struct platform_device *pdev)
@@ -104,7 +104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot allocate device data\n");
return -ENOMEM;
}
- mutex_init(&ipmmu->flush_lock);
+ spin_lock_init(&ipmmu->flush_lock);
ipmmu->dev = &pdev->dev;
ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
diff --git a/drivers/iommu/shmobile-ipmmu.h b/drivers/iommu/shmobile-ipmmu.h
index 4d53684673e1..9524743ca1fb 100644
--- a/drivers/iommu/shmobile-ipmmu.h
+++ b/drivers/iommu/shmobile-ipmmu.h
@@ -14,7 +14,7 @@ struct shmobile_ipmmu {
struct device *dev;
void __iomem *ipmmu_base;
int tlb_enabled;
- struct mutex flush_lock;
+ spinlock_t flush_lock;
const char * const *dev_names;
unsigned int num_dev_names;
};
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 3792a1aa52b8..61ffdca96e25 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -30,6 +30,10 @@ config ARM_VIC_NR
The maximum number of VICs available in the system, for
power management.
+config DW_APB_ICTL
+ bool
+ select IRQ_DOMAIN
+
config IMGPDC_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -61,3 +65,7 @@ config VERSATILE_FPGA_IRQ_NR
int
default 4
depends on VERSATILE_FPGA_IRQ
+
+config XTENSA_MX
+ bool
+ select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index c60b9010b152..1c0c151d108c 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -6,11 +6,13 @@ obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o
obj-$(CONFIG_ARCH_MXS) += irq-mxs.o
obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
+obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
obj-$(CONFIG_METAG) += irq-metag-ext.o
obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o
obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
+obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
@@ -20,5 +22,8 @@ obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
+obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
+obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index 868ed40cb6bf..40e6440348ff 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -171,8 +171,7 @@ static struct irq_domain_ops combiner_irq_domain_ops = {
static void __init combiner_init(void __iomem *combiner_base,
struct device_node *np,
- unsigned int max_nr,
- int irq_base)
+ unsigned int max_nr)
{
int i, irq;
unsigned int nr_irq;
@@ -186,7 +185,7 @@ static void __init combiner_init(void __iomem *combiner_base,
return;
}
- combiner_irq_domain = irq_domain_add_simple(np, nr_irq, irq_base,
+ combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
&combiner_irq_domain_ops, combiner_data);
if (WARN_ON(!combiner_irq_domain)) {
pr_warning("%s: irq domain init failed\n", __func__);
@@ -207,7 +206,6 @@ static int __init combiner_of_init(struct device_node *np,
{
void __iomem *combiner_base;
unsigned int max_nr = 20;
- int irq_base = -1;
combiner_base = of_iomap(np, 0);
if (!combiner_base) {
@@ -221,14 +219,7 @@ static int __init combiner_of_init(struct device_node *np,
__func__, max_nr);
}
- /*
- * FIXME: This is a hardwired COMBINER_IRQ(0,0). Once all devices
- * get their IRQ from DT, remove this in order to get dynamic
- * allocation.
- */
- irq_base = 160;
-
- combiner_init(combiner_base, np, max_nr, irq_base);
+ combiner_init(combiner_base, np, max_nr);
return 0;
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 433cc8568dec..41be897df8d5 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -42,6 +43,7 @@
#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
+#define ARMADA_375_PPI_CAUSE (0x10)
#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
@@ -59,8 +61,6 @@
#define PCI_MSI_DOORBELL_END (32)
#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
-static DEFINE_RAW_SPINLOCK(irq_controller_lock);
-
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
@@ -239,6 +239,8 @@ static inline int armada_370_xp_msi_init(struct device_node *node,
#endif
#ifdef CONFIG_SMP
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
@@ -352,7 +354,63 @@ static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static asmlinkage void __exception_irq_entry
+#ifdef CONFIG_PCI_MSI
+static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
+{
+ u32 msimask, msinr;
+
+ msimask = readl_relaxed(per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+ & PCI_MSI_DOORBELL_MASK;
+
+ writel(~msimask, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+ for (msinr = PCI_MSI_DOORBELL_START;
+ msinr < PCI_MSI_DOORBELL_END; msinr++) {
+ int irq;
+
+ if (!(msimask & BIT(msinr)))
+ continue;
+
+ irq = irq_find_mapping(armada_370_xp_msi_domain,
+ msinr - 16);
+
+ if (is_chained)
+ generic_handle_irq(irq);
+ else
+ handle_IRQ(irq, regs);
+ }
+}
+#else
+static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
+#endif
+
+static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned long irqmap, irqn;
+ unsigned int cascade_irq;
+
+ chained_irq_enter(chip, desc);
+
+ irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
+
+ if (irqmap & BIT(0)) {
+ armada_370_xp_handle_msi_irq(NULL, true);
+ irqmap &= ~BIT(0);
+ }
+
+ for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
+ cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
+ generic_handle_irq(cascade_irq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void __exception_irq_entry
armada_370_xp_handle_irq(struct pt_regs *regs)
{
u32 irqstat, irqnr;
@@ -372,31 +430,9 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
continue;
}
-#ifdef CONFIG_PCI_MSI
/* MSI handling */
- if (irqnr == 1) {
- u32 msimask, msinr;
-
- msimask = readl_relaxed(per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
- & PCI_MSI_DOORBELL_MASK;
-
- writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
-
- for (msinr = PCI_MSI_DOORBELL_START;
- msinr < PCI_MSI_DOORBELL_END; msinr++) {
- int irq;
-
- if (!(msimask & BIT(msinr)))
- continue;
-
- irq = irq_find_mapping(armada_370_xp_msi_domain,
- msinr - 16);
- handle_IRQ(irq, regs);
- }
- }
-#endif
+ if (irqnr == 1)
+ armada_370_xp_handle_msi_irq(regs, false);
#ifdef CONFIG_SMP
/* IPI Handling */
@@ -407,7 +443,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& IPI_DOORBELL_MASK;
- writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
+ writel(~ipimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
/* Handle all pending doorbells */
@@ -427,6 +463,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
struct device_node *parent)
{
struct resource main_int_res, per_cpu_int_res;
+ int parent_irq;
u32 control;
BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -455,8 +492,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
BUG_ON(!armada_370_xp_mpic_domain);
- irq_set_default_host(armada_370_xp_mpic_domain);
-
#ifdef CONFIG_SMP
armada_xp_mpic_smp_cpu_init();
@@ -472,7 +507,14 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
armada_370_xp_msi_init(node, main_int_res.start);
- set_handle_irq(armada_370_xp_handle_irq);
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (parent_irq <= 0) {
+ irq_set_default_host(armada_370_xp_mpic_domain);
+ set_handle_irq(armada_370_xp_handle_irq);
+ } else {
+ irq_set_chained_handler(parent_irq,
+ armada_370_xp_mpic_handle_cascade_irq);
+ }
return 0;
}
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index 1693b8e7f26a..5916d6cdafa1 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -95,7 +95,7 @@ struct armctrl_ic {
};
static struct armctrl_ic intc __read_mostly;
-static asmlinkage void __exception_irq_entry bcm2835_handle_irq(
+static void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs);
static void armctrl_mask_irq(struct irq_data *d)
@@ -196,7 +196,7 @@ static void armctrl_handle_shortcut(int bank, struct pt_regs *regs,
handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
}
-static asmlinkage void __exception_irq_entry bcm2835_handle_irq(
+static void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs)
{
u32 stat, irq;
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
new file mode 100644
index 000000000000..31e231e1f566
--- /dev/null
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -0,0 +1,150 @@
+/*
+ * Synopsys DW APB ICTL irqchip driver.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * based on GPL'ed 2.6 kernel sources
+ * (c) Marvell International Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include "irqchip.h"
+
+#define APB_INT_ENABLE_L 0x00
+#define APB_INT_ENABLE_H 0x04
+#define APB_INT_MASK_L 0x08
+#define APB_INT_MASK_H 0x0c
+#define APB_INT_FINALSTATUS_L 0x30
+#define APB_INT_FINALSTATUS_H 0x34
+
+static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip_generic *gc = irq_get_handler_data(irq);
+ struct irq_domain *d = gc->private;
+ u32 stat;
+ int n;
+
+ chained_irq_enter(chip, desc);
+
+ for (n = 0; n < gc->num_ct; n++) {
+ stat = readl_relaxed(gc->reg_base +
+ APB_INT_FINALSTATUS_L + 4 * n);
+ while (stat) {
+ u32 hwirq = ffs(stat) - 1;
+ generic_handle_irq(irq_find_mapping(d,
+ gc->irq_base + hwirq + 32 * n));
+ stat &= ~(1 << hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int __init dw_apb_ictl_init(struct device_node *np,
+ struct device_node *parent)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct resource r;
+ struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ void __iomem *iobase;
+ int ret, nrirqs, irq;
+ u32 reg;
+
+ /* Map the parent interrupt for the chained handler */
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("%s: unable to parse irq\n", np->full_name);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret) {
+ pr_err("%s: unable to get resource\n", np->full_name);
+ return ret;
+ }
+
+ if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
+ pr_err("%s: unable to request mem region\n", np->full_name);
+ return -ENOMEM;
+ }
+
+ iobase = ioremap(r.start, resource_size(&r));
+ if (!iobase) {
+ pr_err("%s: unable to map resource\n", np->full_name);
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ /*
+ * DW IP can be configured to allow 2-64 irqs. We can determine
+ * the number of irqs supported by writing into enable register
+ * and look for bits not set, as corresponding flip-flops will
+ * have been removed by sythesis tool.
+ */
+
+ /* mask and enable all interrupts */
+ writel(~0, iobase + APB_INT_MASK_L);
+ writel(~0, iobase + APB_INT_MASK_H);
+ writel(~0, iobase + APB_INT_ENABLE_L);
+ writel(~0, iobase + APB_INT_ENABLE_H);
+
+ reg = readl(iobase + APB_INT_ENABLE_H);
+ if (reg)
+ nrirqs = 32 + fls(reg);
+ else
+ nrirqs = fls(readl(iobase + APB_INT_ENABLE_L));
+
+ domain = irq_domain_add_linear(np, nrirqs,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("%s: unable to add irq domain\n", np->full_name);
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1,
+ np->name, handle_level_irq, clr, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
+ goto err_unmap;
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->private = domain;
+ gc->reg_base = iobase;
+
+ gc->chip_types[0].regs.mask = APB_INT_MASK_L;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+
+ if (nrirqs > 32) {
+ gc->chip_types[1].regs.mask = APB_INT_MASK_H;
+ gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit;
+ }
+
+ irq_set_handler_data(irq, gc);
+ irq_set_chained_handler(irq, dw_apb_ictl_handler);
+
+ return 0;
+
+err_unmap:
+ iounmap(iobase);
+err_release:
+ release_mem_region(r.start, resource_size(&r));
+ return ret;
+}
+IRQCHIP_DECLARE(dw_apb_ictl,
+ "snps,dw-apb-ictl", dw_apb_ictl_init);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 341c6016812d..531769b2433a 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -50,7 +50,7 @@
union gic_base {
void __iomem *common_base;
- void __percpu __iomem **percpu_base;
+ void __percpu * __iomem *percpu_base;
};
struct gic_chip_data {
@@ -279,7 +279,7 @@ static int gic_set_wake(struct irq_data *d, unsigned int on)
#define gic_set_wake NULL
#endif
-static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
u32 irqstat, irqnr;
struct gic_chip_data *gic = &gic_data[0];
@@ -648,7 +648,7 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
#endif
#ifdef CONFIG_SMP
-void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
int cpu;
unsigned long flags, map = 0;
@@ -869,7 +869,7 @@ static struct notifier_block gic_cpu_notifier = {
};
#endif
-const struct irq_domain_ops gic_irq_domain_ops = {
+static const struct irq_domain_ops gic_irq_domain_ops = {
.map = gic_irq_domain_map,
.xlate = gic_irq_domain_xlate,
};
@@ -974,7 +974,8 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
#ifdef CONFIG_OF
static int gic_cnt __initdata;
-int __init gic_of_init(struct device_node *node, struct device_node *parent)
+static int __init
+gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *cpu_base;
void __iomem *dist_base;
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 92c41ab4dbfd..2cb474ad8809 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -515,7 +515,7 @@ static int meta_intc_set_affinity(struct irq_data *data,
* one cpu (the interrupt code doesn't support it), so we just
* pick the first cpu we find in 'cpumask'.
*/
- cpu = cpumask_any(cpumask);
+ cpu = cpumask_any_and(cpumask, cpu_online_mask);
thread = cpu_2_hwthread_id[cpu];
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
index 8e94d7a3b20d..c16c186d97d3 100644
--- a/drivers/irqchip/irq-metag.c
+++ b/drivers/irqchip/irq-metag.c
@@ -201,7 +201,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data,
* one cpu (the interrupt code doesn't support it), so we just
* pick the first cpu we find in 'cpumask'.
*/
- cpu = cpumask_any(cpumask);
+ cpu = cpumask_any_and(cpumask, cpu_online_mask);
thread = cpu_2_hwthread_id[cpu];
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 2cb7cd0bc2f5..3c8827fe83f3 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -194,8 +194,7 @@ static struct mmp_intc_conf mmp2_conf = {
.conf_mask = 0x7f,
};
-static asmlinkage void __exception_irq_entry
-mmp_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
{
int irq, hwirq;
@@ -207,8 +206,7 @@ mmp_handle_irq(struct pt_regs *regs)
handle_IRQ(irq, regs);
}
-static asmlinkage void __exception_irq_entry
-mmp2_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
{
int irq, hwirq;
diff --git a/drivers/irqchip/irq-moxart.c b/drivers/irqchip/irq-moxart.c
index 5552fc2bf28a..00b3cc908f76 100644
--- a/drivers/irqchip/irq-moxart.c
+++ b/drivers/irqchip/irq-moxart.c
@@ -44,7 +44,7 @@ struct moxart_irq_data {
static struct moxart_irq_data intc;
-static asmlinkage void __exception_irq_entry handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry handle_irq(struct pt_regs *regs)
{
u32 irqstat;
int hwirq;
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index e51d40031884..e25f246cd2fb 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -30,7 +30,7 @@
static struct irq_domain *orion_irq_domain;
-static asmlinkage void
+static void
__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
{
struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
@@ -111,7 +111,8 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct irq_domain *d = irq_get_handler_data(irq);
- struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq);
+
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
gc->mask_cache;
@@ -123,6 +124,19 @@ static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
+/*
+ * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
+ * To avoid interrupt events on stale irqs, we clear them before unmask.
+ */
+static unsigned int orion_bridge_irq_startup(struct irq_data *d)
+{
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
+ ct->chip.irq_ack(d);
+ ct->chip.irq_unmask(d);
+ return 0;
+}
+
static int __init orion_bridge_irq_init(struct device_node *np,
struct device_node *parent)
{
@@ -143,7 +157,7 @@ static int __init orion_bridge_irq_init(struct device_node *np,
}
ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
- handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
if (ret) {
pr_err("%s: unable to alloc irq domain gc\n", np->name);
return ret;
@@ -176,12 +190,14 @@ static int __init orion_bridge_irq_init(struct device_node *np,
gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
+ gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
- /* mask all interrupts */
+ /* mask and clear all interrupts */
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
+ writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
irq_set_handler_data(irq, domain);
irq_set_chained_handler(irq, orion_bridge_irq_handler);
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 2f404ba61c6c..8777065012a5 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -81,15 +81,12 @@ static void irqc_irq_disable(struct irq_data *d)
iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_STS);
}
-#define INTC_IRQ_SENSE_VALID 0x10
-#define INTC_IRQ_SENSE(x) (x + INTC_IRQ_SENSE_VALID)
-
static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
- [IRQ_TYPE_LEVEL_LOW] = INTC_IRQ_SENSE(0x01),
- [IRQ_TYPE_LEVEL_HIGH] = INTC_IRQ_SENSE(0x02),
- [IRQ_TYPE_EDGE_FALLING] = INTC_IRQ_SENSE(0x04), /* Synchronous */
- [IRQ_TYPE_EDGE_RISING] = INTC_IRQ_SENSE(0x08), /* Synchronous */
- [IRQ_TYPE_EDGE_BOTH] = INTC_IRQ_SENSE(0x0c), /* Synchronous */
+ [IRQ_TYPE_LEVEL_LOW] = 0x01,
+ [IRQ_TYPE_LEVEL_HIGH] = 0x02,
+ [IRQ_TYPE_EDGE_FALLING] = 0x04, /* Synchronous */
+ [IRQ_TYPE_EDGE_RISING] = 0x08, /* Synchronous */
+ [IRQ_TYPE_EDGE_BOTH] = 0x0c, /* Synchronous */
};
static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
@@ -101,12 +98,12 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
irqc_dbg(&p->irq[hw_irq], "sense");
- if (!(value & INTC_IRQ_SENSE_VALID))
+ if (!value)
return -EINVAL;
tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
tmp &= ~0x3f;
- tmp |= value ^ INTC_IRQ_SENSE_VALID;
+ tmp |= value;
iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
return 0;
}
@@ -212,10 +209,8 @@ static int irqc_probe(struct platform_device *pdev)
irq_chip->name = name;
irq_chip->irq_mask = irqc_irq_disable;
irq_chip->irq_unmask = irqc_irq_enable;
- irq_chip->irq_enable = irqc_irq_enable;
- irq_chip->irq_disable = irqc_irq_disable;
irq_chip->irq_set_type = irqc_irq_set_type;
- irq_chip->flags = IRQCHIP_SKIP_SET_WAKE;
+ irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
p->number_of_irqs,
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
index 4851afae38dc..581eefe331ae 100644
--- a/drivers/irqchip/irq-sirfsoc.c
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -34,9 +34,10 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
struct irq_chip_type *ct;
int ret;
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ unsigned int set = IRQ_LEVEL;
ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
- handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ handle_level_irq, clr, set, IRQ_GC_INIT_MASK_CACHE);
gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
gc->reg_base = base;
@@ -46,7 +47,7 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
}
-static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
{
void __iomem *base = sirfsoc_irqdomain->host_data;
u32 irqstat, irqnr;
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index a5438d889245..6fcef4a95a18 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -36,18 +36,16 @@
static void __iomem *sun4i_irq_base;
static struct irq_domain *sun4i_irq_domain;
-static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs);
+static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs);
static void sun4i_irq_ack(struct irq_data *irqd)
{
unsigned int irq = irqd_to_hwirq(irqd);
- unsigned int irq_off = irq % 32;
- int reg = irq / 32;
- u32 val;
- val = readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg));
- writel(val | (1 << irq_off),
- sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg));
+ if (irq != 0)
+ return; /* Only IRQ 0 / the ENMI needs to be acked */
+
+ writel(BIT(0), sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0));
}
static void sun4i_irq_mask(struct irq_data *irqd)
@@ -76,16 +74,16 @@ static void sun4i_irq_unmask(struct irq_data *irqd)
static struct irq_chip sun4i_irq_chip = {
.name = "sun4i_irq",
- .irq_ack = sun4i_irq_ack,
+ .irq_eoi = sun4i_irq_ack,
.irq_mask = sun4i_irq_mask,
.irq_unmask = sun4i_irq_unmask,
+ .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
};
static int sun4i_irq_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw)
{
- irq_set_chip_and_handler(virq, &sun4i_irq_chip,
- handle_level_irq);
+ irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq);
set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
return 0;
@@ -109,7 +107,7 @@ static int __init sun4i_of_init(struct device_node *node,
writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(1));
writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(2));
- /* Mask all the interrupts */
+ /* Unmask all the interrupts, ENABLE_REG(x) is used for masking */
writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(0));
writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(1));
writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(2));
@@ -134,16 +132,30 @@ static int __init sun4i_of_init(struct device_node *node,
return 0;
}
-IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-ic", sun4i_of_init);
+IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-a10-ic", sun4i_of_init);
-static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
{
u32 irq, hwirq;
+ /*
+ * hwirq == 0 can mean one of 3 things:
+ * 1) no more irqs pending
+ * 2) irq 0 pending
+ * 3) spurious irq
+ * So if we immediately get a reading of 0, check the irq-pending reg
+ * to differentiate between 2 and 3. We only do this once to avoid
+ * the extra check in the common case of 1 hapening after having
+ * read the vector-reg once.
+ */
hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
- while (hwirq != 0) {
+ if (hwirq == 0 &&
+ !(readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0)) & BIT(0)))
+ return;
+
+ do {
irq = irq_find_mapping(sun4i_irq_domain, hwirq);
handle_IRQ(irq, regs);
hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
- }
+ } while (hwirq != 0);
}
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
new file mode 100644
index 000000000000..12f547a44ae4
--- /dev/null
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -0,0 +1,208 @@
+/*
+ * Allwinner A20/A31 SoCs NMI IRQ chip driver.
+ *
+ * Carlo Caione <carlo.caione@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/irqchip/chained_irq.h>
+#include "irqchip.h"
+
+#define SUNXI_NMI_SRC_TYPE_MASK 0x00000003
+
+enum {
+ SUNXI_SRC_TYPE_LEVEL_LOW = 0,
+ SUNXI_SRC_TYPE_EDGE_FALLING,
+ SUNXI_SRC_TYPE_LEVEL_HIGH,
+ SUNXI_SRC_TYPE_EDGE_RISING,
+};
+
+struct sunxi_sc_nmi_reg_offs {
+ u32 ctrl;
+ u32 pend;
+ u32 enable;
+};
+
+static struct sunxi_sc_nmi_reg_offs sun7i_reg_offs = {
+ .ctrl = 0x00,
+ .pend = 0x04,
+ .enable = 0x08,
+};
+
+static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = {
+ .ctrl = 0x00,
+ .pend = 0x04,
+ .enable = 0x34,
+};
+
+static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
+ u32 val)
+{
+ irq_reg_writel(val, gc->reg_base + off);
+}
+
+static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
+{
+ return irq_reg_readl(gc->reg_base + off);
+}
+
+static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned int virq = irq_find_mapping(domain, 0);
+
+ chained_irq_enter(chip, desc);
+ generic_handle_irq(virq);
+ chained_irq_exit(chip, desc);
+}
+
+static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct irq_chip_type *ct = gc->chip_types;
+ u32 src_type_reg;
+ u32 ctrl_off = ct->regs.type;
+ unsigned int src_type;
+ unsigned int i;
+
+ irq_gc_lock(gc);
+
+ switch (flow_type & IRQF_TRIGGER_MASK) {
+ case IRQ_TYPE_EDGE_FALLING:
+ src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ src_type = SUNXI_SRC_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_NONE:
+ case IRQ_TYPE_LEVEL_LOW:
+ src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
+ break;
+ default:
+ irq_gc_unlock(gc);
+ pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
+ __func__, data->irq);
+ return -EBADR;
+ }
+
+ irqd_set_trigger_type(data, flow_type);
+ irq_setup_alt_chip(data, flow_type);
+
+ for (i = 0; i <= gc->num_ct; i++, ct++)
+ if (ct->type & flow_type)
+ ctrl_off = ct->regs.type;
+
+ src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
+ src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
+ src_type_reg |= src_type;
+ sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
+
+ irq_gc_unlock(gc);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
+ struct sunxi_sc_nmi_reg_offs *reg_offs)
+{
+ struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ unsigned int irq;
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ int ret;
+
+
+ domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("%s: Could not register interrupt domain.\n", node->name);
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name,
+ handle_fasteoi_irq, clr, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%s: Could not allocate generic interrupt chip.\n",
+ node->name);
+ goto fail_irqd_remove;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ pr_err("%s: unable to parse irq\n", node->name);
+ ret = -EINVAL;
+ goto fail_irqd_remove;
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = of_iomap(node, 0);
+ if (!gc->reg_base) {
+ pr_err("%s: unable to map resource\n", node->name);
+ ret = -ENOMEM;
+ goto fail_irqd_remove;
+ }
+
+ gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
+ gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
+ gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
+ gc->chip_types[0].regs.ack = reg_offs->pend;
+ gc->chip_types[0].regs.mask = reg_offs->enable;
+ gc->chip_types[0].regs.type = reg_offs->ctrl;
+
+ gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
+ gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type;
+ gc->chip_types[1].regs.ack = reg_offs->pend;
+ gc->chip_types[1].regs.mask = reg_offs->enable;
+ gc->chip_types[1].regs.type = reg_offs->ctrl;
+ gc->chip_types[1].handler = handle_edge_irq;
+
+ sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
+ sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
+
+ irq_set_handler_data(irq, domain);
+ irq_set_chained_handler(irq, sunxi_sc_nmi_handle_irq);
+
+ return 0;
+
+fail_irqd_remove:
+ irq_domain_remove(domain);
+
+ return ret;
+}
+
+static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
+}
+IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
+
+static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
+}
+IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 47a52ab580d8..3ae2bb8d9cf2 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
@@ -167,8 +168,12 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
f->used_irqs++;
}
- pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs\n",
+ pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs",
fpga_irq_id, name, base, f->used_irqs);
+ if (parent_irq != -1)
+ pr_cont(", parent IRQ: %d\n", parent_irq);
+ else
+ pr_cont("\n");
fpga_irq_id++;
}
@@ -180,6 +185,7 @@ int __init fpga_irq_of_init(struct device_node *node,
void __iomem *base;
u32 clear_mask;
u32 valid_mask;
+ int parent_irq;
if (WARN_ON(!node))
return -ENODEV;
@@ -193,7 +199,12 @@ int __init fpga_irq_of_init(struct device_node *node,
if (of_property_read_u32(node, "valid-mask", &valid_mask))
valid_mask = 0;
- fpga_irq_init(base, node->name, 0, -1, valid_mask, node);
+ /* Some chips are cascaded from a parent IRQ */
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq)
+ parent_irq = -1;
+
+ fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 8e21ae0bab46..473f09a74d4d 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -228,7 +228,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
* Keep iterating over all registered VIC's until there are no pending
* interrupts.
*/
-static asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
{
int i, handled;
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index 1846e7d66681..eb6e91efdec8 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -178,8 +178,7 @@ static struct irq_domain_ops vt8500_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static asmlinkage
-void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
{
u32 stat, i;
int irqnr, virq;
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
new file mode 100644
index 000000000000..e1c2f9632893
--- /dev/null
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -0,0 +1,164 @@
+/*
+ * Xtensa MX interrupt distributor
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+
+#include <asm/mxregs.h>
+
+#include "irqchip.h"
+
+#define HW_IRQ_IPI_COUNT 2
+#define HW_IRQ_MX_BASE 2
+#define HW_IRQ_EXTERN_BASE 3
+
+static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
+
+static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (hw < HW_IRQ_IPI_COUNT) {
+ struct irq_chip *irq_chip = d->host_data;
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_percpu_irq, "ipi");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ return 0;
+ }
+ return xtensa_irq_map(d, irq, hw);
+}
+
+/*
+ * Device Tree IRQ specifier translation function which works with one or
+ * two cell bindings. First cell value maps directly to the hwirq number.
+ * Second cell if present specifies whether hwirq number is external (1) or
+ * internal (0).
+ */
+static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ return xtensa_irq_domain_xlate(intspec, intsize,
+ intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
+ out_hwirq, out_type);
+}
+
+static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
+ .xlate = xtensa_mx_irq_domain_xlate,
+ .map = xtensa_mx_irq_map,
+};
+
+void secondary_init_irq(void)
+{
+ __this_cpu_write(cached_irq_mask,
+ XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
+ set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
+}
+
+static void xtensa_mx_irq_mask(struct irq_data *d)
+{
+ unsigned int mask = 1u << d->hwirq;
+
+ if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+ set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
+ HW_IRQ_MX_BASE), MIENG);
+ } else {
+ mask = __this_cpu_read(cached_irq_mask) & ~mask;
+ __this_cpu_write(cached_irq_mask, mask);
+ set_sr(mask, intenable);
+ }
+}
+
+static void xtensa_mx_irq_unmask(struct irq_data *d)
+{
+ unsigned int mask = 1u << d->hwirq;
+
+ if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+ set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
+ HW_IRQ_MX_BASE), MIENGSET);
+ } else {
+ mask |= __this_cpu_read(cached_irq_mask);
+ __this_cpu_write(cached_irq_mask, mask);
+ set_sr(mask, intenable);
+ }
+}
+
+static void xtensa_mx_irq_enable(struct irq_data *d)
+{
+ variant_irq_enable(d->hwirq);
+ xtensa_mx_irq_unmask(d);
+}
+
+static void xtensa_mx_irq_disable(struct irq_data *d)
+{
+ xtensa_mx_irq_mask(d);
+ variant_irq_disable(d->hwirq);
+}
+
+static void xtensa_mx_irq_ack(struct irq_data *d)
+{
+ set_sr(1 << d->hwirq, intclear);
+}
+
+static int xtensa_mx_irq_retrigger(struct irq_data *d)
+{
+ set_sr(1 << d->hwirq, intset);
+ return 1;
+}
+
+static int xtensa_mx_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *dest, bool force)
+{
+ unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask);
+
+ set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
+ return 0;
+
+}
+
+static struct irq_chip xtensa_mx_irq_chip = {
+ .name = "xtensa-mx",
+ .irq_enable = xtensa_mx_irq_enable,
+ .irq_disable = xtensa_mx_irq_disable,
+ .irq_mask = xtensa_mx_irq_mask,
+ .irq_unmask = xtensa_mx_irq_unmask,
+ .irq_ack = xtensa_mx_irq_ack,
+ .irq_retrigger = xtensa_mx_irq_retrigger,
+ .irq_set_affinity = xtensa_mx_irq_set_affinity,
+};
+
+int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ &xtensa_mx_irq_domain_ops,
+ &xtensa_mx_irq_chip);
+ irq_set_default_host(root_domain);
+ secondary_init_irq();
+ return 0;
+}
+
+static int __init xtensa_mx_init(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
+ &xtensa_mx_irq_chip);
+ irq_set_default_host(root_domain);
+ secondary_init_irq();
+ return 0;
+}
+IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
new file mode 100644
index 000000000000..7d71126d1ce5
--- /dev/null
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -0,0 +1,108 @@
+/*
+ * Xtensa built-in interrupt controller
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+
+#include "irqchip.h"
+
+unsigned int cached_irq_mask;
+
+/*
+ * Device Tree IRQ specifier translation function which works with one or
+ * two cell bindings. First cell value maps directly to the hwirq number.
+ * Second cell if present specifies whether hwirq number is external (1) or
+ * internal (0).
+ */
+static int xtensa_pic_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ return xtensa_irq_domain_xlate(intspec, intsize,
+ intspec[0], intspec[0],
+ out_hwirq, out_type);
+}
+
+static const struct irq_domain_ops xtensa_irq_domain_ops = {
+ .xlate = xtensa_pic_irq_domain_xlate,
+ .map = xtensa_irq_map,
+};
+
+static void xtensa_irq_mask(struct irq_data *d)
+{
+ cached_irq_mask &= ~(1 << d->hwirq);
+ set_sr(cached_irq_mask, intenable);
+}
+
+static void xtensa_irq_unmask(struct irq_data *d)
+{
+ cached_irq_mask |= 1 << d->hwirq;
+ set_sr(cached_irq_mask, intenable);
+}
+
+static void xtensa_irq_enable(struct irq_data *d)
+{
+ variant_irq_enable(d->hwirq);
+ xtensa_irq_unmask(d);
+}
+
+static void xtensa_irq_disable(struct irq_data *d)
+{
+ xtensa_irq_mask(d);
+ variant_irq_disable(d->hwirq);
+}
+
+static void xtensa_irq_ack(struct irq_data *d)
+{
+ set_sr(1 << d->hwirq, intclear);
+}
+
+static int xtensa_irq_retrigger(struct irq_data *d)
+{
+ set_sr(1 << d->hwirq, intset);
+ return 1;
+}
+
+static struct irq_chip xtensa_irq_chip = {
+ .name = "xtensa",
+ .irq_enable = xtensa_irq_enable,
+ .irq_disable = xtensa_irq_disable,
+ .irq_mask = xtensa_irq_mask,
+ .irq_unmask = xtensa_irq_unmask,
+ .irq_ack = xtensa_irq_ack,
+ .irq_retrigger = xtensa_irq_retrigger,
+};
+
+int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ &xtensa_irq_domain_ops, &xtensa_irq_chip);
+ irq_set_default_host(root_domain);
+ return 0;
+}
+
+static int __init xtensa_pic_init(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ struct irq_domain *root_domain =
+ irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops,
+ &xtensa_irq_chip);
+ irq_set_default_host(root_domain);
+ return 0;
+}
+IRQCHIP_DECLARE(xtensa_irq_chip, "cdns,xtensa-pic", xtensa_pic_init);
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
new file mode 100644
index 000000000000..ceb3a4318f73
--- /dev/null
+++ b/drivers/irqchip/irq-zevio.c
@@ -0,0 +1,127 @@
+/*
+ * linux/drivers/irqchip/irq-zevio.c
+ *
+ * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/irq.h>
+#include <asm/exception.h>
+
+#include "irqchip.h"
+
+#define IO_STATUS 0x000
+#define IO_RAW_STATUS 0x004
+#define IO_ENABLE 0x008
+#define IO_DISABLE 0x00C
+#define IO_CURRENT 0x020
+#define IO_RESET 0x028
+#define IO_MAX_PRIOTY 0x02C
+
+#define IO_IRQ_BASE 0x000
+#define IO_FIQ_BASE 0x100
+
+#define IO_INVERT_SEL 0x200
+#define IO_STICKY_SEL 0x204
+#define IO_PRIORITY_SEL 0x300
+
+#define MAX_INTRS 32
+#define FIQ_START MAX_INTRS
+
+static struct irq_domain *zevio_irq_domain;
+static void __iomem *zevio_irq_io;
+
+static void zevio_irq_ack(struct irq_data *irqd)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
+ struct irq_chip_regs *regs =
+ &container_of(irqd->chip, struct irq_chip_type, chip)->regs;
+
+ readl(gc->reg_base + regs->ack);
+}
+
+static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
+{
+ int irqnr;
+
+ while (readl(zevio_irq_io + IO_STATUS)) {
+ irqnr = readl(zevio_irq_io + IO_CURRENT);
+ irqnr = irq_find_mapping(zevio_irq_domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ };
+}
+
+static void __init zevio_init_irq_base(void __iomem *base)
+{
+ /* Disable all interrupts */
+ writel(~0, base + IO_DISABLE);
+
+ /* Accept interrupts of all priorities */
+ writel(0xF, base + IO_MAX_PRIOTY);
+
+ /* Reset existing interrupts */
+ readl(base + IO_RESET);
+}
+
+static int __init zevio_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct irq_chip_generic *gc;
+ int ret;
+
+ if (WARN_ON(zevio_irq_io || zevio_irq_domain))
+ return -EBUSY;
+
+ zevio_irq_io = of_iomap(node, 0);
+ BUG_ON(!zevio_irq_io);
+
+ /* Do not invert interrupt status bits */
+ writel(~0, zevio_irq_io + IO_INVERT_SEL);
+
+ /* Disable sticky interrupts */
+ writel(0, zevio_irq_io + IO_STICKY_SEL);
+
+ /* We don't use IRQ priorities. Set each IRQ to highest priority. */
+ memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
+
+ /* Init IRQ and FIQ */
+ zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
+ zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
+
+ zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
+ &irq_generic_chip_ops, NULL);
+ BUG_ON(!zevio_irq_domain);
+
+ ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
+ "zevio_intc", handle_level_irq,
+ clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ BUG_ON(ret);
+
+ gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
+ gc->reg_base = zevio_irq_io;
+ gc->chip_types[0].chip.irq_ack = zevio_irq_ack;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ gc->chip_types[0].regs.mask = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.enable = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.disable = IO_IRQ_BASE + IO_DISABLE;
+ gc->chip_types[0].regs.ack = IO_IRQ_BASE + IO_RESET;
+
+ set_handle_irq(zevio_handle_irq);
+
+ pr_info("TI-NSPIRE classic IRQ controller\n");
+ return 0;
+}
+
+IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index f496afce29de..cad3e2495552 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -10,8 +10,7 @@
#include <linux/init.h>
#include <linux/of_irq.h>
-
-#include "irqchip.h"
+#include <linux/irqchip.h>
/*
* This special of_device_id is the sentinel at the end of the
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index f04686580040..9816c51eb5c2 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -16,9 +16,17 @@ config CAPI_TRACE
This will increase the size of the kernelcapi module by 20 KB.
If unsure, say Y.
+config ISDN_CAPI_CAPI20
+ tristate "CAPI2.0 /dev/capi support"
+ help
+ This option will provide the CAPI 2.0 interface to userspace
+ applications via /dev/capi20. Applications should use the
+ standardized libcapi20 to access this functionality. You should say
+ Y/M here.
+
config ISDN_CAPI_MIDDLEWARE
bool "CAPI2.0 Middleware support"
- depends on TTY
+ depends on ISDN_CAPI_CAPI20 && TTY
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
@@ -26,14 +34,6 @@ config ISDN_CAPI_MIDDLEWARE
device. If you want to use pppd with pppdcapiplugin to dial up to
your ISP, say Y here.
-config ISDN_CAPI_CAPI20
- tristate "CAPI2.0 /dev/capi support"
- help
- This option will provide the CAPI 2.0 interface to userspace
- applications via /dev/capi20. Applications should use the
- standardized libcapi20 to access this functionality. You should say
- Y/M here.
-
config ISDN_CAPI_CAPIDRV
tristate "CAPI2.0 capidrv interface support"
depends on ISDN_I4L
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index c49c294fc81e..414dbf6da89a 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -1620,7 +1620,7 @@ hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#else
if (!request_region(hw->iobase, 8, hw->card_name)) {
printk(KERN_INFO
- "HFC-4S/8S: failed to rquest address space at 0x%04x\n",
+ "HFC-4S/8S: failed to request address space at 0x%04x\n",
hw->iobase);
goto out;
}
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index af1b020a81f1..b420f8bd862e 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -810,7 +810,7 @@ prfeatureind(char *dest, u_char *p)
dp += sprintf(dp, " octet 3 ");
dp += prbits(dp, *p, 8, 8);
*dp++ = '\n';
- if (!(*p++ & 80)) {
+ if (!(*p++ & 0x80)) {
dp += sprintf(dp, " octet 4 ");
dp += prbits(dp, *p++, 8, 8);
*dp++ = '\n';
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 8b98d53d9976..d9aebbc510cc 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1371,7 +1371,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
eth = eth_hdr(skb);
if (*eth->h_dest & 1) {
- if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
+ if (ether_addr_equal(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
@@ -1382,7 +1382,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
*/
else if (dev->flags & (IFF_PROMISC /*| IFF_ALLMULTI*/)) {
- if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
+ if (!ether_addr_equal(eth->h_dest, dev->dev_addr))
skb->pkt_type = PACKET_OTHERHOST;
}
if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig
index 1747a02a019a..c0730d5c734d 100644
--- a/drivers/isdn/mISDN/Kconfig
+++ b/drivers/isdn/mISDN/Kconfig
@@ -17,7 +17,7 @@ config MISDN_DSP
This module may be used for special applications that require
cross connecting of bchannels, conferencing, dtmf decoding,
- echo cancelation, tone generation, and Blowfish encryption and
+ echo cancellation, tone generation, and Blowfish encryption and
decryption. It may use hardware features if available.
E.g. it is required for PBX4Linux. Go to http://isdn.eversberg.eu
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 5cefb479c707..1be82284cf9d 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -135,7 +135,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return err;
if (msg->msg_name) {
- struct sockaddr_mISDN *maddr = msg->msg_name;
+ DECLARE_SOCKADDR(struct sockaddr_mISDN *, maddr, msg->msg_name);
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
@@ -179,7 +179,6 @@ mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk;
struct sk_buff *skb;
int err = -ENOMEM;
- struct sockaddr_mISDN *maddr;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s: len %d flags %x ch %d proto %x\n",
@@ -214,7 +213,7 @@ mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
/* if we have a address, we use it */
- maddr = (struct sockaddr_mISDN *)msg->msg_name;
+ DECLARE_SOCKADDR(struct sockaddr_mISDN *, maddr, msg->msg_name);
mISDN_HEAD_ID(skb) = maddr->channel;
} else { /* use default for L2 messages */
if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
diff --git a/drivers/isdn/sc/event.c b/drivers/isdn/sc/event.c
index 717003a3bdf4..833d96c2cf92 100644
--- a/drivers/isdn/sc/event.c
+++ b/drivers/isdn/sc/event.c
@@ -57,7 +57,7 @@ int indicate_status(int card, int event, ulong Channel, char *Data)
memcpy(&cmd.parm.setup, Data, sizeof(cmd.parm.setup));
break;
default:
- strcpy(cmd.parm.num, Data);
+ strlcpy(cmd.parm.num, Data, sizeof(cmd.parm.num));
}
}
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 3c972b2f9893..e387f41a9cb7 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -242,18 +242,14 @@ EXPORT_SYMBOL_GPL(led_trigger_unregister);
void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
- struct list_head *entry;
+ struct led_classdev *led_cdev;
if (!trig)
return;
read_lock(&trig->leddev_list_lock);
- list_for_each(entry, &trig->led_cdevs) {
- struct led_classdev *led_cdev;
-
- led_cdev = list_entry(entry, struct led_classdev, trig_list);
+ list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
- }
read_unlock(&trig->leddev_list_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_event);
@@ -264,16 +260,13 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
int oneshot,
int invert)
{
- struct list_head *entry;
+ struct led_classdev *led_cdev;
if (!trig)
return;
read_lock(&trig->leddev_list_lock);
- list_for_each(entry, &trig->led_cdevs) {
- struct led_classdev *led_cdev;
-
- led_cdev = list_entry(entry, struct led_classdev, trig_list);
+ list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
invert);
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index a97263e902ff..2ec34cfcedce 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -152,12 +152,26 @@ static void lp5521_load_engine(struct lp55xx_chip *chip)
lp5521_wait_opmode_done();
}
-static void lp5521_stop_engine(struct lp55xx_chip *chip)
+static void lp5521_stop_all_engines(struct lp55xx_chip *chip)
{
lp55xx_write(chip, LP5521_REG_OP_MODE, 0);
lp5521_wait_opmode_done();
}
+static void lp5521_stop_engine(struct lp55xx_chip *chip)
+{
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ u8 mask[] = {
+ [LP55XX_ENGINE_1] = LP5521_MODE_R_M,
+ [LP55XX_ENGINE_2] = LP5521_MODE_G_M,
+ [LP55XX_ENGINE_3] = LP5521_MODE_B_M,
+ };
+
+ lp55xx_update_bits(chip, LP5521_REG_OP_MODE, mask[idx], 0);
+
+ lp5521_wait_opmode_done();
+}
+
static void lp5521_run_engine(struct lp55xx_chip *chip, bool start)
{
int ret;
@@ -564,7 +578,7 @@ static int lp5521_remove(struct i2c_client *client)
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
- lp5521_stop_engine(chip);
+ lp5521_stop_all_engines(chip);
lp55xx_unregister_sysfs(chip);
lp55xx_unregister_leds(led, chip);
lp55xx_deinit_device(chip);
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index fd9ab5f61441..4ade66a2d9d4 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -35,7 +35,15 @@
#include "leds-lp55xx-common.h"
-#define LP5523_PROGRAM_LENGTH 32
+#define LP5523_PROGRAM_LENGTH 32 /* bytes */
+/* Memory is used like this:
+ 0x00 engine 1 program
+ 0x10 engine 2 program
+ 0x20 engine 3 program
+ 0x30 engine 1 muxing info
+ 0x40 engine 2 muxing info
+ 0x50 engine 3 muxing info
+*/
#define LP5523_MAX_LEDS 9
/* Registers */
@@ -187,12 +195,26 @@ static void lp5523_load_engine_and_select_page(struct lp55xx_chip *chip)
lp55xx_write(chip, LP5523_REG_PROG_PAGE_SEL, page_sel[idx]);
}
-static void lp5523_stop_engine(struct lp55xx_chip *chip)
+static void lp5523_stop_all_engines(struct lp55xx_chip *chip)
{
lp55xx_write(chip, LP5523_REG_OP_MODE, 0);
lp5523_wait_opmode_done();
}
+static void lp5523_stop_engine(struct lp55xx_chip *chip)
+{
+ enum lp55xx_engine_index idx = chip->engine_idx;
+ u8 mask[] = {
+ [LP55XX_ENGINE_1] = LP5523_MODE_ENG1_M,
+ [LP55XX_ENGINE_2] = LP5523_MODE_ENG2_M,
+ [LP55XX_ENGINE_3] = LP5523_MODE_ENG3_M,
+ };
+
+ lp55xx_update_bits(chip, LP5523_REG_OP_MODE, mask[idx], 0);
+
+ lp5523_wait_opmode_done();
+}
+
static void lp5523_turn_off_channels(struct lp55xx_chip *chip)
{
int i;
@@ -303,7 +325,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
}
out:
- lp5523_stop_engine(chip);
+ lp5523_stop_all_engines(chip);
return ret;
}
@@ -774,7 +796,7 @@ static int lp5523_remove(struct i2c_client *client)
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
- lp5523_stop_engine(chip);
+ lp5523_stop_all_engines(chip);
lp55xx_unregister_sysfs(chip);
lp55xx_unregister_leds(led, chip);
lp55xx_deinit_device(chip);
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 9acc6bb7deef..88317b4f7bf3 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -210,6 +210,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
{
struct lp55xx_chip *chip = context;
struct device *dev = &chip->cl->dev;
+ enum lp55xx_engine_index idx = chip->engine_idx;
if (!fw) {
dev_err(dev, "firmware request failed\n");
@@ -219,6 +220,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
/* handling firmware data is chip dependent */
mutex_lock(&chip->lock);
+ chip->engines[idx - 1].mode = LP55XX_ENGINE_LOAD;
chip->fw = fw;
if (chip->cfg->firmware_cb)
chip->cfg->firmware_cb(chip);
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index f1c704f2243a..00f068b0fa6f 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -263,7 +263,7 @@ static void lp8501_firmware_loaded(struct lp55xx_chip *chip)
}
/*
- * Program momery sequence
+ * Program memory sequence
* 1) set engine mode to "LOAD"
* 2) write firmware data into program memory
*/
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index fa9b439323bd..ca87a1b4a0db 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -117,9 +117,7 @@ static void mc13xxx_led_work(struct work_struct *work)
BUG();
}
- mc13xxx_lock(led->master);
mc13xxx_reg_rmw(led->master, reg, mask << shift, value << shift);
- mc13xxx_unlock(led->master);
}
static void mc13xxx_led_set(struct led_classdev *led_cdev,
@@ -132,75 +130,6 @@ static void mc13xxx_led_set(struct led_classdev *led_cdev,
schedule_work(&led->work);
}
-static int __init mc13xxx_led_setup(struct mc13xxx_led *led, int max_current)
-{
- int shift, mask, reg, ret, bank;
-
- switch (led->id) {
- case MC13783_LED_MD:
- reg = MC13XXX_REG_LED_CONTROL(2);
- shift = 0;
- mask = 0x07;
- break;
- case MC13783_LED_AD:
- reg = MC13XXX_REG_LED_CONTROL(2);
- shift = 3;
- mask = 0x07;
- break;
- case MC13783_LED_KP:
- reg = MC13XXX_REG_LED_CONTROL(2);
- shift = 6;
- mask = 0x07;
- break;
- case MC13783_LED_R1:
- case MC13783_LED_G1:
- case MC13783_LED_B1:
- case MC13783_LED_R2:
- case MC13783_LED_G2:
- case MC13783_LED_B2:
- case MC13783_LED_R3:
- case MC13783_LED_G3:
- case MC13783_LED_B3:
- bank = (led->id - MC13783_LED_R1) / 3;
- reg = MC13XXX_REG_LED_CONTROL(3) + bank;
- shift = ((led->id - MC13783_LED_R1) - bank * 3) * 2;
- mask = 0x03;
- break;
- case MC13892_LED_MD:
- reg = MC13XXX_REG_LED_CONTROL(0);
- shift = 9;
- mask = 0x07;
- break;
- case MC13892_LED_AD:
- reg = MC13XXX_REG_LED_CONTROL(0);
- shift = 21;
- mask = 0x07;
- break;
- case MC13892_LED_KP:
- reg = MC13XXX_REG_LED_CONTROL(1);
- shift = 9;
- mask = 0x07;
- break;
- case MC13892_LED_R:
- case MC13892_LED_G:
- case MC13892_LED_B:
- bank = (led->id - MC13892_LED_R) / 2;
- reg = MC13XXX_REG_LED_CONTROL(2) + bank;
- shift = ((led->id - MC13892_LED_R) - bank * 2) * 12 + 9;
- mask = 0x07;
- break;
- default:
- BUG();
- }
-
- mc13xxx_lock(led->master);
- ret = mc13xxx_reg_rmw(led->master, reg, mask << shift,
- max_current << shift);
- mc13xxx_unlock(led->master);
-
- return ret;
-}
-
static int __init mc13xxx_led_probe(struct platform_device *pdev)
{
struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -233,31 +162,22 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev)
leds->num_leds = num_leds;
platform_set_drvdata(pdev, leds);
- mc13xxx_lock(mcdev);
for (i = 0; i < devtype->num_regs; i++) {
reg = pdata->led_control[i];
WARN_ON(reg >= (1 << 24));
ret = mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), reg);
if (ret)
- break;
- }
- mc13xxx_unlock(mcdev);
-
- if (ret) {
- dev_err(&pdev->dev, "Unable to init LED driver\n");
- return ret;
+ return ret;
}
for (i = 0; i < num_leds; i++) {
const char *name, *trig;
- char max_current;
ret = -EINVAL;
id = pdata->led[i].id;
name = pdata->led[i].name;
trig = pdata->led[i].default_trigger;
- max_current = pdata->led[i].max_current;
if ((id > devtype->led_max) || (id < devtype->led_min)) {
dev_err(&pdev->dev, "Invalid ID %i\n", id);
@@ -280,11 +200,6 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev)
INIT_WORK(&leds->led[i].work, mc13xxx_led_work);
- ret = mc13xxx_led_setup(&leds->led[i], max_current);
- if (ret) {
- dev_err(&pdev->dev, "Unable to setup LED %i\n", id);
- break;
- }
ret = led_classdev_register(pdev->dev.parent,
&leds->led[i].cdev);
if (ret) {
@@ -313,10 +228,8 @@ static int mc13xxx_led_remove(struct platform_device *pdev)
cancel_work_sync(&leds->led[i].work);
}
- mc13xxx_lock(mcdev);
for (i = 0; i < leds->devtype->num_regs; i++)
mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), 0);
- mc13xxx_unlock(mcdev);
return 0;
}
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index b31d8e99c419..605047428b5a 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -66,9 +66,11 @@ static void led_pwm_set(struct led_classdev *led_cdev,
struct led_pwm_data *led_dat =
container_of(led_cdev, struct led_pwm_data, cdev);
unsigned int max = led_dat->cdev.max_brightness;
- unsigned int period = led_dat->period;
+ unsigned long long duty = led_dat->period;
- led_dat->duty = brightness * period / max;
+ duty *= brightness;
+ do_div(duty, max);
+ led_dat->duty = duty;
if (led_dat->can_sleep)
schedule_work(&led_dat->work);
@@ -85,11 +87,10 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
static int led_pwm_create_of(struct platform_device *pdev,
struct led_pwm_priv *priv)
{
- struct device_node *node = pdev->dev.of_node;
struct device_node *child;
int ret;
- for_each_child_of_node(node, child) {
+ for_each_child_of_node(pdev->dev.of_node, child) {
struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
led_dat->cdev.name = of_get_property(child, "label",
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 87cf215af798..98174e7240ee 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -18,11 +18,10 @@
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/platform_data/leds-s3c24xx.h>
-#include <mach/hardware.h>
#include <mach/regs-gpio.h>
#include <plat/gpio-cfg.h>
-#include <linux/platform_data/leds-s3c24xx.h>
/* our context */
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 8cc304f36728..3d9e267a56c4 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -4,77 +4,87 @@
* The TCA6507 is a programmable LED controller that can drive 7
* separate lines either by holding them low, or by pulsing them
* with modulated width.
- * The modulation can be varied in a simple pattern to produce a blink or
- * double-blink.
+ * The modulation can be varied in a simple pattern to produce a
+ * blink or double-blink.
*
- * This driver can configure each line either as a 'GPIO' which is out-only
- * (no pull-up) or as an LED with variable brightness and hardware-assisted
- * blinking.
+ * This driver can configure each line either as a 'GPIO' which is
+ * out-only (pull-up resistor required) or as an LED with variable
+ * brightness and hardware-assisted blinking.
*
- * Apart from OFF and ON there are three programmable brightness levels which
- * can be programmed from 0 to 15 and indicate how many 500usec intervals in
- * each 8msec that the led is 'on'. The levels are named MASTER, BANK0 and
- * BANK1.
+ * Apart from OFF and ON there are three programmable brightness
+ * levels which can be programmed from 0 to 15 and indicate how many
+ * 500usec intervals in each 8msec that the led is 'on'. The levels
+ * are named MASTER, BANK0 and BANK1.
*
- * There are two different blink rates that can be programmed, each with
- * separate time for rise, on, fall, off and second-off. Thus if 3 or more
- * different non-trivial rates are required, software must be used for the extra
- * rates. The two different blink rates must align with the two levels BANK0 and
- * BANK1.
- * This driver does not support double-blink so 'second-off' always matches
- * 'off'.
+ * There are two different blink rates that can be programmed, each
+ * with separate time for rise, on, fall, off and second-off. Thus if
+ * 3 or more different non-trivial rates are required, software must
+ * be used for the extra rates. The two different blink rates must
+ * align with the two levels BANK0 and BANK1. This driver does not
+ * support double-blink so 'second-off' always matches 'off'.
*
- * Only 16 different times can be programmed in a roughly logarithmic scale from
- * 64ms to 16320ms. To be precise the possible times are:
+ * Only 16 different times can be programmed in a roughly logarithmic
+ * scale from 64ms to 16320ms. To be precise the possible times are:
* 0, 64, 128, 192, 256, 384, 512, 768,
* 1024, 1536, 2048, 3072, 4096, 5760, 8128, 16320
*
- * Times that cannot be closely matched with these must be
- * handled in software. This driver allows 12.5% error in matching.
+ * Times that cannot be closely matched with these must be handled in
+ * software. This driver allows 12.5% error in matching.
*
- * This driver does not allow rise/fall rates to be set explicitly. When trying
- * to match a given 'on' or 'off' period, an appropriate pair of 'change' and
- * 'hold' times are chosen to get a close match. If the target delay is even,
- * the 'change' number will be the smaller; if odd, the 'hold' number will be
- * the smaller.
-
- * Choosing pairs of delays with 12.5% errors allows us to match delays in the
- * ranges: 56-72, 112-144, 168-216, 224-27504, 28560-36720.
- * 26% of the achievable sums can be matched by multiple pairings. For example
- * 1536 == 1536+0, 1024+512, or 768+768. This driver will always choose the
- * pairing with the least maximum - 768+768 in this case. Other pairings are
- * not available.
+ * This driver does not allow rise/fall rates to be set explicitly.
+ * When trying to match a given 'on' or 'off' period, an appropriate
+ * pair of 'change' and 'hold' times are chosen to get a close match.
+ * If the target delay is even, the 'change' number will be the
+ * smaller; if odd, the 'hold' number will be the smaller.
+
+ * Choosing pairs of delays with 12.5% errors allows us to match
+ * delays in the ranges: 56-72, 112-144, 168-216, 224-27504,
+ * 28560-36720.
+ * 26% of the achievable sums can be matched by multiple pairings.
+ * For example 1536 == 1536+0, 1024+512, or 768+768.
+ * This driver will always choose the pairing with the least
+ * maximum - 768+768 in this case. Other pairings are not available.
*
- * Access to the 3 levels and 2 blinks are on a first-come, first-served basis.
- * Access can be shared by multiple leds if they have the same level and
- * either same blink rates, or some don't blink.
- * When a led changes, it relinquishes access and tries again, so it might
- * lose access to hardware blink.
- * If a blink engine cannot be allocated, software blink is used.
- * If the desired brightness cannot be allocated, the closest available non-zero
- * brightness is used. As 'full' is always available, the worst case would be
- * to have two different blink rates at '1', with Max at '2', then other leds
- * will have to choose between '2' and '16'. Hopefully this is not likely.
+ * Access to the 3 levels and 2 blinks are on a first-come,
+ * first-served basis. Access can be shared by multiple leds if they
+ * have the same level and either same blink rates, or some don't
+ * blink. When a led changes, it relinquishes access and tries again,
+ * so it might lose access to hardware blink.
*
- * Each bank (BANK0 and BANK1) has two usage counts - LEDs using the brightness
- * and LEDs using the blink. It can only be reprogrammed when the appropriate
- * counter is zero. The MASTER level has a single usage count.
+ * If a blink engine cannot be allocated, software blink is used. If
+ * the desired brightness cannot be allocated, the closest available
+ * non-zero brightness is used. As 'full' is always available, the
+ * worst case would be to have two different blink rates at '1', with
+ * Max at '2', then other leds will have to choose between '2' and
+ * '16'. Hopefully this is not likely.
*
- * Each Led has programmable 'on' and 'off' time as milliseconds. With each
- * there is a flag saying if it was explicitly requested or defaulted.
- * Similarly the banks know if each time was explicit or a default. Defaults
- * are permitted to be changed freely - they are not recognised when matching.
+ * Each bank (BANK0 and BANK1) has two usage counts - LEDs using the
+ * brightness and LEDs using the blink. It can only be reprogrammed
+ * when the appropriate counter is zero. The MASTER level has a
+ * single usage count.
*
+ * Each LED has programmable 'on' and 'off' time as milliseconds.
+ * With each there is a flag saying if it was explicitly requested or
+ * defaulted. Similarly the banks know if each time was explicit or a
+ * default. Defaults are permitted to be changed freely - they are
+ * not recognised when matching.
*
- * An led-tca6507 device must be provided with platform data. This data
- * lists for each output: the name, default trigger, and whether the signal
- * is being used as a GPiO rather than an led. 'struct led_plaform_data'
- * is used for this. If 'name' is NULL, the output isn't used. If 'flags'
- * is TCA6507_MAKE_CPIO, the output is a GPO.
- * The "struct led_platform_data" can be embedded in a
- * "struct tca6507_platform_data" which adds a 'gpio_base' for the GPiOs,
- * and a 'setup' callback which is called once the GPiOs are available.
*
+ * An led-tca6507 device must be provided with platform data or
+ * configured via devicetree.
+ *
+ * The platform-data lists for each output: the name, default trigger,
+ * and whether the signal is being used as a GPIO rather than an LED.
+ * 'struct led_plaform_data' is used for this. If 'name' is NULL, the
+ * output isn't used. If 'flags' is TCA6507_MAKE_GPIO, the output is
+ * a GPO. The "struct led_platform_data" can be embedded in a "struct
+ * tca6507_platform_data" which adds a 'gpio_base' for the GPIOs, and
+ * a 'setup' callback which is called once the GPIOs are available.
+ *
+ * When configured via devicetree there is one child for each output.
+ * The "reg" determines the output number and "compatible" determines
+ * whether it is an LED or a GPIO. "linux,default-trigger" can set a
+ * default trigger.
*/
#include <linux/module.h>
@@ -192,17 +202,18 @@ MODULE_DEVICE_TABLE(i2c, tca6507_id);
static int choose_times(int msec, int *c1p, int *c2p)
{
/*
- * Choose two timecodes which add to 'msec' as near as possible.
- * The first returned is the 'on' or 'off' time. The second is to be
- * used as a 'fade-on' or 'fade-off' time. If 'msec' is even,
- * the first will not be smaller than the second. If 'msec' is odd,
- * the first will not be larger than the second.
- * If we cannot get a sum within 1/8 of 'msec' fail with -EINVAL,
- * otherwise return the sum that was achieved, plus 1 if the first is
- * smaller.
- * If two possibilities are equally good (e.g. 512+0, 256+256), choose
- * the first pair so there is more change-time visible (i.e. it is
- * softer).
+ * Choose two timecodes which add to 'msec' as near as
+ * possible. The first returned is the 'on' or 'off' time.
+ * The second is to be used as a 'fade-on' or 'fade-off' time.
+ * If 'msec' is even, the first will not be smaller than the
+ * second. If 'msec' is odd, the first will not be larger
+ * than the second.
+ * If we cannot get a sum within 1/8 of 'msec' fail with
+ * -EINVAL, otherwise return the sum that was achieved, plus 1
+ * if the first is smaller.
+ * If two possibilities are equally good (e.g. 512+0,
+ * 256+256), choose the first pair so there is more
+ * change-time visible (i.e. it is softer).
*/
int c1, c2;
int tmax = msec * 9 / 8;
@@ -255,8 +266,8 @@ static int choose_times(int msec, int *c1p, int *c2p)
}
/*
- * Update the register file with the appropriate 3-bit state for
- * the given led.
+ * Update the register file with the appropriate 3-bit state for the
+ * given led.
*/
static void set_select(struct tca6507_chip *tca, int led, int val)
{
@@ -274,9 +285,9 @@ static void set_select(struct tca6507_chip *tca, int led, int val)
}
}
-/* Update the register file with the appropriate 4-bit code for
- * one bank or other. This can be used for timers, for levels, or
- * for initialisation.
+/* Update the register file with the appropriate 4-bit code for one
+ * bank or other. This can be used for timers, for levels, or for
+ * initialization.
*/
static void set_code(struct tca6507_chip *tca, int reg, int bank, int new)
{
@@ -309,7 +320,7 @@ static void set_level(struct tca6507_chip *tca, int bank, int level)
tca->bank[bank].level = level;
}
-/* Record all relevant time code for a given bank */
+/* Record all relevant time codes for a given bank */
static void set_times(struct tca6507_chip *tca, int bank)
{
int c1, c2;
@@ -317,7 +328,8 @@ static void set_times(struct tca6507_chip *tca, int bank)
result = choose_times(tca->bank[bank].ontime, &c1, &c2);
dev_dbg(&tca->client->dev,
- "Chose on times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+ "Chose on times %d(%d) %d(%d) for %dms\n",
+ c1, time_codes[c1],
c2, time_codes[c2], tca->bank[bank].ontime);
set_code(tca, TCA6507_FADE_ON, bank, c2);
set_code(tca, TCA6507_FULL_ON, bank, c1);
@@ -325,7 +337,8 @@ static void set_times(struct tca6507_chip *tca, int bank)
result = choose_times(tca->bank[bank].offtime, &c1, &c2);
dev_dbg(&tca->client->dev,
- "Chose off times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+ "Chose off times %d(%d) %d(%d) for %dms\n",
+ c1, time_codes[c1],
c2, time_codes[c2], tca->bank[bank].offtime);
set_code(tca, TCA6507_FADE_OFF, bank, c2);
set_code(tca, TCA6507_FIRST_OFF, bank, c1);
@@ -373,7 +386,8 @@ static void led_release(struct tca6507_led *led)
static int led_prepare(struct tca6507_led *led)
{
- /* Assign this led to a bank, configuring that bank if necessary. */
+ /* Assign this led to a bank, configuring that bank if
+ * necessary. */
int level = TO_LEVEL(led->led_cdev.brightness);
struct tca6507_chip *tca = led->chip;
int c1, c2;
@@ -389,10 +403,10 @@ static int led_prepare(struct tca6507_led *led)
if (led->ontime == 0 || led->offtime == 0) {
/*
- * Just set the brightness, choosing first usable bank.
- * If none perfect, choose best.
- * Count backwards so we check MASTER bank first
- * to avoid wasting a timer.
+ * Just set the brightness, choosing first usable
+ * bank. If none perfect, choose best. Count
+ * backwards so we check MASTER bank first to avoid
+ * wasting a timer.
*/
int best = -1;/* full-on */
int diff = 15-level;
@@ -433,9 +447,9 @@ static int led_prepare(struct tca6507_led *led)
}
/*
- * We have on/off time so we need to try to allocate a timing bank.
- * First check if times are compatible with hardware and give up if
- * not.
+ * We have on/off time so we need to try to allocate a timing
+ * bank. First check if times are compatible with hardware
+ * and give up if not.
*/
if (choose_times(led->ontime, &c1, &c2) < 0)
return -EINVAL;
@@ -523,8 +537,8 @@ static int led_assign(struct tca6507_led *led)
err = led_prepare(led);
if (err) {
/*
- * Can only fail on timer setup. In that case we need to
- * re-establish as steady level.
+ * Can only fail on timer setup. In that case we need
+ * to re-establish as steady level.
*/
led->ontime = 0;
led->offtime = 0;
@@ -594,8 +608,8 @@ static void tca6507_gpio_set_value(struct gpio_chip *gc,
spin_lock_irqsave(&tca->lock, flags);
/*
- * 'OFF' is floating high, and 'ON' is pulled down, so it has the
- * inverse sense of 'val'.
+ * 'OFF' is floating high, and 'ON' is pulled down, so it has
+ * the inverse sense of 'val'.
*/
set_select(tca, tca->gpio_map[offset],
val ? TCA6507_LS_LED_OFF : TCA6507_LS_LED_ON);
@@ -638,6 +652,9 @@ static int tca6507_probe_gpios(struct i2c_client *client,
tca->gpio.direction_output = tca6507_gpio_direction_output;
tca->gpio.set = tca6507_gpio_set_value;
tca->gpio.dev = &client->dev;
+#ifdef CONFIG_OF_GPIO
+ tca->gpio.of_node = of_node_get(client->dev.of_node);
+#endif
err = gpiochip_add(&tca->gpio);
if (err) {
tca->gpio.ngpio = 0;
@@ -682,7 +699,7 @@ tca6507_led_dt_init(struct i2c_client *client)
return ERR_PTR(-ENODEV);
tca_leds = devm_kzalloc(&client->dev,
- sizeof(struct led_info) * count, GFP_KERNEL);
+ sizeof(struct led_info) * NUM_LEDS, GFP_KERNEL);
if (!tca_leds)
return ERR_PTR(-ENOMEM);
@@ -695,9 +712,11 @@ tca6507_led_dt_init(struct i2c_client *client)
of_get_property(child, "label", NULL) ? : child->name;
led.default_trigger =
of_get_property(child, "linux,default-trigger", NULL);
-
+ led.flags = 0;
+ if (of_property_match_string(child, "compatible", "gpio") >= 0)
+ led.flags |= TCA6507_MAKE_GPIO;
ret = of_property_read_u32(child, "reg", &reg);
- if (ret != 0)
+ if (ret != 0 || reg < 0 || reg >= NUM_LEDS)
continue;
tca_leds[reg] = led;
@@ -708,8 +727,10 @@ tca6507_led_dt_init(struct i2c_client *client)
return ERR_PTR(-ENOMEM);
pdata->leds.leds = tca_leds;
- pdata->leds.num_leds = count;
-
+ pdata->leds.num_leds = NUM_LEDS;
+#ifdef CONFIG_GPIOLIB
+ pdata->gpio_base = -1;
+#endif
return pdata;
}
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 04a50498f257..9e9c56758a08 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -38,7 +38,7 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifdef CONFIG_PPC
#include <asm/prom.h>
#include <asm/machdep.h>
@@ -193,8 +193,7 @@ static int adb_scan_bus(void)
break;
noMovement = 0;
- }
- else {
+ } else {
/*
* No devices left at address i; move the
* one(s) we moved to `highFree' back to i.
@@ -263,7 +262,7 @@ adb_reset_bus(void)
/*
* notify clients before sleep
*/
-static int adb_suspend(struct platform_device *dev, pm_message_t state)
+static int __adb_suspend(struct platform_device *dev, pm_message_t state)
{
adb_got_sleep = 1;
/* We need to get a lock on the probe thread */
@@ -276,10 +275,25 @@ static int adb_suspend(struct platform_device *dev, pm_message_t state)
return 0;
}
+static int adb_suspend(struct device *dev)
+{
+ return __adb_suspend(to_platform_device(dev), PMSG_SUSPEND);
+}
+
+static int adb_freeze(struct device *dev)
+{
+ return __adb_suspend(to_platform_device(dev), PMSG_FREEZE);
+}
+
+static int adb_poweroff(struct device *dev)
+{
+ return __adb_suspend(to_platform_device(dev), PMSG_HIBERNATE);
+}
+
/*
* reset bus after sleep
*/
-static int adb_resume(struct platform_device *dev)
+static int __adb_resume(struct platform_device *dev)
{
adb_got_sleep = 0;
up(&adb_probe_mutex);
@@ -287,6 +301,11 @@ static int adb_resume(struct platform_device *dev)
return 0;
}
+
+static int adb_resume(struct device *dev)
+{
+ return __adb_resume(to_platform_device(dev));
+}
#endif /* CONFIG_PM */
static int __init adb_init(void)
@@ -502,7 +521,7 @@ void
adb_input(unsigned char *buf, int nb, int autopoll)
{
int i, id;
- static int dump_adb_input = 0;
+ static int dump_adb_input;
unsigned long flags;
void (*handler)(unsigned char *, int, int);
@@ -624,8 +643,7 @@ do_adb_query(struct adb_request *req)
{
int ret = -EINVAL;
- switch(req->data[1])
- {
+ switch(req->data[1]) {
case ADB_QUERY_GETDEVINFO:
if (req->nbytes < 3)
break;
@@ -697,7 +715,7 @@ static ssize_t adb_read(struct file *file, char __user *buf,
int ret = 0;
struct adbdev_state *state = file->private_data;
struct adb_request *req;
- DECLARE_WAITQUEUE(wait,current);
+ DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
if (count < 2)
@@ -794,8 +812,8 @@ static ssize_t adb_write(struct file *file, const char __user *buf,
}
/* Special case for ADB_BUSRESET request, all others are sent to
the controller */
- else if ((req->data[0] == ADB_PACKET)&&(count > 1)
- &&(req->data[1] == ADB_BUSRESET)) {
+ else if ((req->data[0] == ADB_PACKET) && (count > 1)
+ && (req->data[1] == ADB_BUSRESET)) {
ret = do_adb_reset_bus();
up(&adb_probe_mutex);
atomic_dec(&state->n_pending);
@@ -831,14 +849,25 @@ static const struct file_operations adb_fops = {
.release = adb_release,
};
+#ifdef CONFIG_PM
+static const struct dev_pm_ops adb_dev_pm_ops = {
+ .suspend = adb_suspend,
+ .resume = adb_resume,
+ /* Hibernate hooks */
+ .freeze = adb_freeze,
+ .thaw = adb_resume,
+ .poweroff = adb_poweroff,
+ .restore = adb_resume,
+};
+#endif
+
static struct platform_driver adb_pfdrv = {
.driver = {
.name = "adb",
- },
#ifdef CONFIG_PM
- .suspend = adb_suspend,
- .resume = adb_resume,
+ .pm = &adb_dev_pm_ops,
#endif
+ },
};
static struct platform_device adb_pfdev = {
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 9ef32b3df91f..590214ba736c 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -133,7 +133,7 @@ static int wf_lm75_probe(struct i2c_client *client,
lm->inited = 0;
lm->ds1775 = ds1775;
lm->i2c = client;
- lm->sens.name = (char *)name; /* XXX fix constness in structure */
+ lm->sens.name = name;
lm->sens.ops = &wf_lm75_ops;
i2c_set_clientdata(client, lm);
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 945a25b2f31e..87e439b10318 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -95,7 +95,7 @@ static int wf_max6690_probe(struct i2c_client *client,
}
max->i2c = client;
- max->sens.name = (char *)name; /* XXX fix constness in structure */
+ max->sens.name = name;
max->sens.ops = &wf_max6690_ops;
i2c_set_clientdata(client, max);
diff --git a/drivers/mailbox/omap-mbox.h b/drivers/mailbox/omap-mbox.h
index 6cd38fc68599..86d7518cd13b 100644
--- a/drivers/mailbox/omap-mbox.h
+++ b/drivers/mailbox/omap-mbox.h
@@ -52,7 +52,7 @@ struct omap_mbox_queue {
struct omap_mbox {
const char *name;
- unsigned int irq;
+ int irq;
struct omap_mbox_queue *txq, *rxq;
struct omap_mbox_ops *ops;
struct device *dev;
diff --git a/drivers/mcb/Kconfig b/drivers/mcb/Kconfig
new file mode 100644
index 000000000000..e9a6976e1010
--- /dev/null
+++ b/drivers/mcb/Kconfig
@@ -0,0 +1,31 @@
+#
+# MEN Chameleon Bus (MCB) support
+#
+
+menuconfig MCB
+ tristate "MCB support"
+ default n
+ depends on HAS_IOMEM
+ help
+
+ The MCB (MEN Chameleon Bus) is a Bus specific to MEN Mikroelektronik
+ FPGA based devices. It is used to identify MCB based IP-Cores within
+ an FPGA and provide the necessary framework for instantiating drivers
+ for these devices.
+
+ If build as a module, the module is called mcb.ko
+
+if MCB
+config MCB_PCI
+ tristate "PCI based MCB carrier"
+ default n
+ depends on PCI
+ help
+
+ This is a MCB carrier on a PCI device. Both PCI attached on-board
+ FPGAs as well as CompactPCI attached MCB FPGAs are supported with
+ this driver.
+
+ If build as a module, the module is called mcb-pci.ko
+
+endif # MCB
diff --git a/drivers/mcb/Makefile b/drivers/mcb/Makefile
new file mode 100644
index 000000000000..1ae141311def
--- /dev/null
+++ b/drivers/mcb/Makefile
@@ -0,0 +1,7 @@
+
+obj-$(CONFIG_MCB) += mcb.o
+
+mcb-y += mcb-core.o
+mcb-y += mcb-parse.o
+
+obj-$(CONFIG_MCB_PCI) += mcb-pci.o
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
new file mode 100644
index 000000000000..bbe12932d404
--- /dev/null
+++ b/drivers/mcb/mcb-core.c
@@ -0,0 +1,414 @@
+/*
+ * MEN Chameleon Bus.
+ *
+ * Copyright (C) 2013 MEN Mikroelektronik GmbH (www.men.de)
+ * Author: Johannes Thumshirn <johannes.thumshirn@men.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/mcb.h>
+
+static DEFINE_IDA(mcb_ida);
+
+static const struct mcb_device_id *mcb_match_id(const struct mcb_device_id *ids,
+ struct mcb_device *dev)
+{
+ if (ids) {
+ while (ids->device) {
+ if (ids->device == dev->id)
+ return ids;
+ ids++;
+ }
+ }
+
+ return NULL;
+}
+
+static int mcb_match(struct device *dev, struct device_driver *drv)
+{
+ struct mcb_driver *mdrv = to_mcb_driver(drv);
+ struct mcb_device *mdev = to_mcb_device(dev);
+ const struct mcb_device_id *found_id;
+
+ found_id = mcb_match_id(mdrv->id_table, mdev);
+ if (found_id)
+ return 1;
+
+ return 0;
+}
+
+static int mcb_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mcb_device *mdev = to_mcb_device(dev);
+ int ret;
+
+ ret = add_uevent_var(env, "MODALIAS=mcb:16z%03d", mdev->id);
+ if (ret)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int mcb_probe(struct device *dev)
+{
+ struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
+ struct mcb_device *mdev = to_mcb_device(dev);
+ const struct mcb_device_id *found_id;
+
+ found_id = mcb_match_id(mdrv->id_table, mdev);
+ if (!found_id)
+ return -ENODEV;
+
+ return mdrv->probe(mdev, found_id);
+}
+
+static int mcb_remove(struct device *dev)
+{
+ struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
+ struct mcb_device *mdev = to_mcb_device(dev);
+
+ mdrv->remove(mdev);
+
+ put_device(&mdev->dev);
+
+ return 0;
+}
+
+static void mcb_shutdown(struct device *dev)
+{
+ struct mcb_device *mdev = to_mcb_device(dev);
+ struct mcb_driver *mdrv = mdev->driver;
+
+ if (mdrv && mdrv->shutdown)
+ mdrv->shutdown(mdev);
+}
+
+static struct bus_type mcb_bus_type = {
+ .name = "mcb",
+ .match = mcb_match,
+ .uevent = mcb_uevent,
+ .probe = mcb_probe,
+ .remove = mcb_remove,
+ .shutdown = mcb_shutdown,
+};
+
+/**
+ * __mcb_register_driver() - Register a @mcb_driver at the system
+ * @drv: The @mcb_driver
+ * @owner: The @mcb_driver's module
+ * @mod_name: The name of the @mcb_driver's module
+ *
+ * Register a @mcb_driver at the system. Perform some sanity checks, if
+ * the .probe and .remove methods are provided by the driver.
+ */
+int __mcb_register_driver(struct mcb_driver *drv, struct module *owner,
+ const char *mod_name)
+{
+ if (!drv->probe || !drv->remove)
+ return -EINVAL;
+
+ drv->driver.owner = owner;
+ drv->driver.bus = &mcb_bus_type;
+ drv->driver.mod_name = mod_name;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__mcb_register_driver);
+
+/**
+ * mcb_unregister_driver() - Unregister a @mcb_driver from the system
+ * @drv: The @mcb_driver
+ *
+ * Unregister a @mcb_driver from the system.
+ */
+void mcb_unregister_driver(struct mcb_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(mcb_unregister_driver);
+
+static void mcb_release_dev(struct device *dev)
+{
+ struct mcb_device *mdev = to_mcb_device(dev);
+
+ mcb_bus_put(mdev->bus);
+ kfree(mdev);
+}
+
+/**
+ * mcb_device_register() - Register a mcb_device
+ * @bus: The @mcb_bus of the device
+ * @dev: The @mcb_device
+ *
+ * Register a specific @mcb_device at a @mcb_bus and the system itself.
+ */
+int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
+{
+ int ret;
+ int device_id;
+
+ device_initialize(&dev->dev);
+ dev->dev.bus = &mcb_bus_type;
+ dev->dev.parent = bus->dev.parent;
+ dev->dev.release = mcb_release_dev;
+
+ device_id = dev->id;
+ dev_set_name(&dev->dev, "mcb%d-16z%03d-%d:%d:%d",
+ bus->bus_nr, device_id, dev->inst, dev->group, dev->var);
+
+ ret = device_add(&dev->dev);
+ if (ret < 0) {
+ pr_err("Failed registering device 16z%03d on bus mcb%d (%d)\n",
+ device_id, bus->bus_nr, ret);
+ goto out;
+ }
+
+ return 0;
+
+out:
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mcb_device_register);
+
+/**
+ * mcb_alloc_bus() - Allocate a new @mcb_bus
+ *
+ * Allocate a new @mcb_bus.
+ */
+struct mcb_bus *mcb_alloc_bus(void)
+{
+ struct mcb_bus *bus;
+ int bus_nr;
+
+ bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL);
+ if (!bus)
+ return NULL;
+
+ bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
+ if (bus_nr < 0) {
+ kfree(bus);
+ return ERR_PTR(bus_nr);
+ }
+
+ INIT_LIST_HEAD(&bus->children);
+ bus->bus_nr = bus_nr;
+
+ return bus;
+}
+EXPORT_SYMBOL_GPL(mcb_alloc_bus);
+
+static int __mcb_devices_unregister(struct device *dev, void *data)
+{
+ device_unregister(dev);
+ return 0;
+}
+
+static void mcb_devices_unregister(struct mcb_bus *bus)
+{
+ bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_devices_unregister);
+}
+/**
+ * mcb_release_bus() - Free a @mcb_bus
+ * @bus: The @mcb_bus to release
+ *
+ * Release an allocated @mcb_bus from the system.
+ */
+void mcb_release_bus(struct mcb_bus *bus)
+{
+ mcb_devices_unregister(bus);
+
+ ida_simple_remove(&mcb_ida, bus->bus_nr);
+
+ kfree(bus);
+}
+EXPORT_SYMBOL_GPL(mcb_release_bus);
+
+/**
+ * mcb_bus_put() - Increment refcnt
+ * @bus: The @mcb_bus
+ *
+ * Get a @mcb_bus' ref
+ */
+struct mcb_bus *mcb_bus_get(struct mcb_bus *bus)
+{
+ if (bus)
+ get_device(&bus->dev);
+
+ return bus;
+}
+EXPORT_SYMBOL_GPL(mcb_bus_get);
+
+/**
+ * mcb_bus_put() - Decrement refcnt
+ * @bus: The @mcb_bus
+ *
+ * Release a @mcb_bus' ref
+ */
+void mcb_bus_put(struct mcb_bus *bus)
+{
+ if (bus)
+ put_device(&bus->dev);
+}
+EXPORT_SYMBOL_GPL(mcb_bus_put);
+
+/**
+ * mcb_alloc_dev() - Allocate a device
+ * @bus: The @mcb_bus the device is part of
+ *
+ * Allocate a @mcb_device and add bus.
+ */
+struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus)
+{
+ struct mcb_device *dev;
+
+ dev = kzalloc(sizeof(struct mcb_device), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ INIT_LIST_HEAD(&dev->bus_list);
+ dev->bus = bus;
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(mcb_alloc_dev);
+
+/**
+ * mcb_free_dev() - Free @mcb_device
+ * @dev: The device to free
+ *
+ * Free a @mcb_device
+ */
+void mcb_free_dev(struct mcb_device *dev)
+{
+ kfree(dev);
+}
+EXPORT_SYMBOL_GPL(mcb_free_dev);
+
+static int __mcb_bus_add_devices(struct device *dev, void *data)
+{
+ struct mcb_device *mdev = to_mcb_device(dev);
+ int retval;
+
+ if (mdev->is_added)
+ return 0;
+
+ retval = device_attach(dev);
+ if (retval < 0)
+ dev_err(dev, "Error adding device (%d)\n", retval);
+
+ mdev->is_added = true;
+
+ return 0;
+}
+
+static int __mcb_bus_add_child(struct device *dev, void *data)
+{
+ struct mcb_device *mdev = to_mcb_device(dev);
+ struct mcb_bus *child;
+
+ BUG_ON(!mdev->is_added);
+ child = mdev->subordinate;
+
+ if (child)
+ mcb_bus_add_devices(child);
+
+ return 0;
+}
+
+/**
+ * mcb_bus_add_devices() - Add devices in the bus' internal device list
+ * @bus: The @mcb_bus we add the devices
+ *
+ * Add devices in the bus' internal device list to the system.
+ */
+void mcb_bus_add_devices(const struct mcb_bus *bus)
+{
+ bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_devices);
+ bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_child);
+
+}
+EXPORT_SYMBOL_GPL(mcb_bus_add_devices);
+
+/**
+ * mcb_request_mem() - Request memory
+ * @dev: The @mcb_device the memory is for
+ * @name: The name for the memory reference.
+ *
+ * Request memory for a @mcb_device. If @name is NULL the driver name will
+ * be used.
+ */
+struct resource *mcb_request_mem(struct mcb_device *dev, const char *name)
+{
+ struct resource *mem;
+ u32 size;
+
+ if (!name)
+ name = dev->dev.driver->name;
+
+ size = resource_size(&dev->mem);
+
+ mem = request_mem_region(dev->mem.start, size, name);
+ if (!mem)
+ return ERR_PTR(-EBUSY);
+
+ return mem;
+}
+EXPORT_SYMBOL_GPL(mcb_request_mem);
+
+/**
+ * mcb_release_mem() - Release memory requested by device
+ * @dev: The @mcb_device that requested the memory
+ *
+ * Release memory that was prior requested via @mcb_request_mem().
+ */
+void mcb_release_mem(struct resource *mem)
+{
+ u32 size;
+
+ size = resource_size(mem);
+ release_mem_region(mem->start, size);
+}
+EXPORT_SYMBOL_GPL(mcb_release_mem);
+
+/**
+ * mcb_get_irq() - Get device's IRQ number
+ * @dev: The @mcb_device the IRQ is for
+ *
+ * Get the IRQ number of a given @mcb_device.
+ */
+int mcb_get_irq(struct mcb_device *dev)
+{
+ struct resource *irq = &dev->irq;
+
+ return irq->start;
+}
+EXPORT_SYMBOL_GPL(mcb_get_irq);
+
+static int mcb_init(void)
+{
+ return bus_register(&mcb_bus_type);
+}
+
+static void mcb_exit(void)
+{
+ bus_unregister(&mcb_bus_type);
+}
+
+/* mcb must be initialized after PCI but before the chameleon drivers.
+ * That means we must use some initcall between subsys_initcall and
+ * device_initcall.
+ */
+fs_initcall(mcb_init);
+module_exit(mcb_exit);
+
+MODULE_DESCRIPTION("MEN Chameleon Bus Driver");
+MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h
new file mode 100644
index 000000000000..f956ef26c0ce
--- /dev/null
+++ b/drivers/mcb/mcb-internal.h
@@ -0,0 +1,118 @@
+#ifndef __MCB_INTERNAL
+#define __MCB_INTERNAL
+
+#include <linux/types.h>
+
+#define PCI_VENDOR_ID_MEN 0x1a88
+#define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45
+#define CHAMELEON_FILENAME_LEN 12
+#define CHAMELEONV2_MAGIC 0xabce
+
+enum chameleon_descriptor_type {
+ CHAMELEON_DTYPE_GENERAL = 0x0,
+ CHAMELEON_DTYPE_BRIDGE = 0x1,
+ CHAMELEON_DTYPE_CPU = 0x2,
+ CHAMELEON_DTYPE_BAR = 0x3,
+ CHAMELEON_DTYPE_END = 0xf,
+};
+
+enum chameleon_bus_type {
+ CHAMELEON_BUS_WISHBONE,
+ CHAMELEON_BUS_AVALON,
+ CHAMELEON_BUS_LPC,
+ CHAMELEON_BUS_ISA,
+};
+
+/**
+ * struct chameleon_fpga_header
+ *
+ * @revision: Revison of Chameleon table in FPGA
+ * @model: Chameleon table model ASCII char
+ * @minor: Revision minor
+ * @bus_type: Bus type (usually %CHAMELEON_BUS_WISHBONE)
+ * @magic: Chameleon header magic number (0xabce for version 2)
+ * @reserved: Reserved
+ * @filename: Filename of FPGA bitstream
+ */
+struct chameleon_fpga_header {
+ u8 revision;
+ char model;
+ u8 minor;
+ u8 bus_type;
+ u16 magic;
+ u16 reserved;
+ /* This one has no '\0' at the end!!! */
+ char filename[CHAMELEON_FILENAME_LEN];
+} __packed;
+#define HEADER_MAGIC_OFFSET 0x4
+
+/**
+ * struct chameleon_gdd - Chameleon General Device Descriptor
+ *
+ * @irq: the position in the FPGA's IRQ controller vector
+ * @rev: the revision of the variant's implementation
+ * @var: the variant of the IP core
+ * @dev: the device the IP core is
+ * @dtype: device descriptor type
+ * @bar: BAR offset that must be added to module offset
+ * @inst: the instance number of the device, 0 is first instance
+ * @group: the group the device belongs to (0 = no group)
+ * @reserved: reserved
+ * @offset: beginning of the address window of desired module
+ * @size: size of the module's address window
+ */
+struct chameleon_gdd {
+ __le32 reg1;
+ __le32 reg2;
+ __le32 offset;
+ __le32 size;
+
+} __packed;
+
+/* GDD Register 1 fields */
+#define GDD_IRQ(x) ((x) & 0x1f)
+#define GDD_REV(x) (((x) >> 5) & 0x3f)
+#define GDD_VAR(x) (((x) >> 11) & 0x3f)
+#define GDD_DEV(x) (((x) >> 18) & 0x3ff)
+#define GDD_DTY(x) (((x) >> 28) & 0xf)
+
+/* GDD Register 2 fields */
+#define GDD_BAR(x) ((x) & 0x7)
+#define GDD_INS(x) (((x) >> 3) & 0x3f)
+#define GDD_GRP(x) (((x) >> 9) & 0x3f)
+
+/**
+ * struct chameleon_bdd - Chameleon Bridge Device Descriptor
+ *
+ * @irq: the position in the FPGA's IRQ controller vector
+ * @rev: the revision of the variant's implementation
+ * @var: the variant of the IP core
+ * @dev: the device the IP core is
+ * @dtype: device descriptor type
+ * @bar: BAR offset that must be added to module offset
+ * @inst: the instance number of the device, 0 is first instance
+ * @dbar: destination bar from the bus _behind_ the bridge
+ * @chamoff: offset within the BAR of the source bus
+ * @offset:
+ * @size:
+ */
+struct chameleon_bdd {
+ unsigned int irq:6;
+ unsigned int rev:6;
+ unsigned int var:6;
+ unsigned int dev:10;
+ unsigned int dtype:4;
+ unsigned int bar:3;
+ unsigned int inst:6;
+ unsigned int dbar:3;
+ unsigned int group:6;
+ unsigned int reserved:14;
+ u32 chamoff;
+ u32 offset;
+ u32 size;
+} __packed;
+
+int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
+ void __iomem *base);
+
+#endif
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
new file mode 100644
index 000000000000..d1278b5f3028
--- /dev/null
+++ b/drivers/mcb/mcb-parse.c
@@ -0,0 +1,159 @@
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/mcb.h>
+
+#include "mcb-internal.h"
+
+struct mcb_parse_priv {
+ phys_addr_t mapbase;
+ void __iomem *base;
+};
+
+#define for_each_chameleon_cell(dtype, p) \
+ for ((dtype) = get_next_dtype((p)); \
+ (dtype) != CHAMELEON_DTYPE_END; \
+ (dtype) = get_next_dtype((p)))
+
+static inline uint32_t get_next_dtype(void __iomem *p)
+{
+ uint32_t dtype;
+
+ dtype = readl(p);
+ return dtype >> 28;
+}
+
+static int chameleon_parse_bdd(struct mcb_bus *bus,
+ phys_addr_t mapbase,
+ void __iomem *base)
+{
+ return 0;
+}
+
+static int chameleon_parse_gdd(struct mcb_bus *bus,
+ phys_addr_t mapbase,
+ void __iomem *base)
+{
+ struct chameleon_gdd __iomem *gdd =
+ (struct chameleon_gdd __iomem *) base;
+ struct mcb_device *mdev;
+ u32 offset;
+ u32 size;
+ int ret;
+ __le32 reg1;
+ __le32 reg2;
+
+ mdev = mcb_alloc_dev(bus);
+ if (!mdev)
+ return -ENOMEM;
+
+ reg1 = readl(&gdd->reg1);
+ reg2 = readl(&gdd->reg2);
+ offset = readl(&gdd->offset);
+ size = readl(&gdd->size);
+
+ mdev->id = GDD_DEV(reg1);
+ mdev->rev = GDD_REV(reg1);
+ mdev->var = GDD_VAR(reg1);
+ mdev->bar = GDD_BAR(reg1);
+ mdev->group = GDD_GRP(reg2);
+ mdev->inst = GDD_INS(reg2);
+
+ pr_debug("Found a 16z%03d\n", mdev->id);
+
+ mdev->irq.start = GDD_IRQ(reg1);
+ mdev->irq.end = GDD_IRQ(reg1);
+ mdev->irq.flags = IORESOURCE_IRQ;
+
+ mdev->mem.start = mapbase + offset;
+ mdev->mem.end = mdev->mem.start + size - 1;
+ mdev->mem.flags = IORESOURCE_MEM;
+
+ mdev->is_added = false;
+
+ ret = mcb_device_register(bus, mdev);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ mcb_free_dev(mdev);
+
+ return ret;
+}
+
+int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
+ void __iomem *base)
+{
+ char __iomem *p = base;
+ struct chameleon_fpga_header *header;
+ uint32_t dtype;
+ int num_cells = 0;
+ int ret = 0;
+ u32 hsize;
+
+ hsize = sizeof(struct chameleon_fpga_header);
+
+ header = kzalloc(hsize, GFP_KERNEL);
+ if (!header)
+ return -ENOMEM;
+
+ /* Extract header information */
+ memcpy_fromio(header, p, hsize);
+ /* We only support chameleon v2 at the moment */
+ header->magic = le16_to_cpu(header->magic);
+ if (header->magic != CHAMELEONV2_MAGIC) {
+ pr_err("Unsupported chameleon version 0x%x\n",
+ header->magic);
+ kfree(header);
+ return -ENODEV;
+ }
+ p += hsize;
+
+ pr_debug("header->revision = %d\n", header->revision);
+ pr_debug("header->model = 0x%x ('%c')\n", header->model,
+ header->model);
+ pr_debug("header->minor = %d\n", header->minor);
+ pr_debug("header->bus_type = 0x%x\n", header->bus_type);
+
+
+ pr_debug("header->magic = 0x%x\n", header->magic);
+ pr_debug("header->filename = \"%.*s\"\n", CHAMELEON_FILENAME_LEN,
+ header->filename);
+
+ for_each_chameleon_cell(dtype, p) {
+ switch (dtype) {
+ case CHAMELEON_DTYPE_GENERAL:
+ ret = chameleon_parse_gdd(bus, mapbase, p);
+ if (ret < 0)
+ goto out;
+ p += sizeof(struct chameleon_gdd);
+ break;
+ case CHAMELEON_DTYPE_BRIDGE:
+ chameleon_parse_bdd(bus, mapbase, p);
+ p += sizeof(struct chameleon_bdd);
+ break;
+ case CHAMELEON_DTYPE_END:
+ break;
+ default:
+ pr_err("Invalid chameleon descriptor type 0x%x\n",
+ dtype);
+ return -EINVAL;
+ }
+ num_cells++;
+ }
+
+ if (num_cells == 0)
+ num_cells = -EINVAL;
+
+ kfree(header);
+ return num_cells;
+
+out:
+ kfree(header);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chameleon_parse_cells);
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
new file mode 100644
index 000000000000..99c742cbfb5b
--- /dev/null
+++ b/drivers/mcb/mcb-pci.c
@@ -0,0 +1,114 @@
+/*
+ * MEN Chameleon Bus.
+ *
+ * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
+ * Author: Johannes Thumshirn <johannes.thumshirn@men.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mcb.h>
+
+#include "mcb-internal.h"
+
+struct priv {
+ struct mcb_bus *bus;
+ void __iomem *base;
+};
+
+static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct priv *priv;
+ phys_addr_t mapbase;
+ int ret;
+ int num_cells;
+ unsigned long flags;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return -ENODEV;
+ }
+
+ mapbase = pci_resource_start(pdev, 0);
+ if (!mapbase) {
+ dev_err(&pdev->dev, "No PCI resource\n");
+ goto err_start;
+ }
+
+ ret = pci_request_region(pdev, 0, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request PCI BARs\n");
+ goto err_start;
+ }
+
+ priv->base = pci_iomap(pdev, 0, 0);
+ if (!priv->base) {
+ dev_err(&pdev->dev, "Cannot ioremap\n");
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ flags = pci_resource_flags(pdev, 0);
+ if (flags & IORESOURCE_IO) {
+ ret = -ENOTSUPP;
+ dev_err(&pdev->dev,
+ "IO mapped PCI devices are not supported\n");
+ goto err_ioremap;
+ }
+
+ pci_set_drvdata(pdev, priv);
+
+ priv->bus = mcb_alloc_bus();
+
+ ret = chameleon_parse_cells(priv->bus, mapbase, priv->base);
+ if (ret < 0)
+ goto err_drvdata;
+ num_cells = ret;
+
+ dev_dbg(&pdev->dev, "Found %d cells\n", num_cells);
+
+ mcb_bus_add_devices(priv->bus);
+
+err_drvdata:
+ pci_iounmap(pdev, priv->base);
+err_ioremap:
+ pci_release_region(pdev, 0);
+err_start:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void mcb_pci_remove(struct pci_dev *pdev)
+{
+ struct priv *priv = pci_get_drvdata(pdev);
+
+ mcb_release_bus(priv->bus);
+}
+
+static const struct pci_device_id mcb_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEN, PCI_DEVICE_ID_MEN_CHAMELEON) },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, mcb_pci_tbl);
+
+static struct pci_driver mcb_pci_driver = {
+ .name = "mcb-pci",
+ .id_table = mcb_pci_tbl,
+ .probe = mcb_pci_probe,
+ .remove = mcb_pci_remove,
+};
+
+module_pci_driver(mcb_pci_driver);
+
+MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MCB over PCI support");
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 9a06fe883766..95ad936e6048 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -254,16 +254,6 @@ config DM_THIN_PROVISIONING
---help---
Provides thin provisioning and snapshots that share a data store.
-config DM_DEBUG_BLOCK_STACK_TRACING
- boolean "Keep stack trace of persistent data block lock holders"
- depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
- select STACKTRACE
- ---help---
- Enable this for messages that may help debug problems with the
- block manager locking used by thin provisioning and caching.
-
- If unsure, say N.
-
config DM_CACHE
tristate "Cache target (EXPERIMENTAL)"
depends on BLK_DEV_DM
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 2638417b19aa..4d200883c505 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -24,11 +24,3 @@ config BCACHE_CLOSURES_DEBUG
Keeps all active closures in a linked list and provides a debugfs
interface to list them, which makes it possible to see asynchronous
operations that get stuck.
-
-# cgroup code needs to be updated:
-#
-#config CGROUP_BCACHE
-# bool "Cgroup controls for bcache"
-# depends on BCACHE && BLK_CGROUP
-# ---help---
-# TODO
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
index 0e9c82523be6..c488b846f831 100644
--- a/drivers/md/bcache/Makefile
+++ b/drivers/md/bcache/Makefile
@@ -1,7 +1,8 @@
obj-$(CONFIG_BCACHE) += bcache.o
-bcache-y := alloc.o btree.o bset.o io.o journal.o writeback.o\
- movinggc.o request.o super.o sysfs.o debug.o util.o trace.o stats.o closure.o
+bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
+ io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
+ util.o writeback.o
CFLAGS_request.o += -Iblock
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 4c9852d92b0a..443d03fbac47 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -78,12 +78,6 @@ uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
- if (CACHE_SYNC(&ca->set->sb)) {
- ca->need_save_prio = max(ca->need_save_prio,
- bucket_disk_gen(b));
- WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX);
- }
-
return ret;
}
@@ -120,50 +114,63 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
mutex_unlock(&c->bucket_lock);
}
-/* Allocation */
+/*
+ * Background allocation thread: scans for buckets to be invalidated,
+ * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
+ * then optionally issues discard commands to the newly free buckets, then puts
+ * them on the various freelists.
+ */
static inline bool can_inc_bucket_gen(struct bucket *b)
{
- return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX &&
- bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX;
+ return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
}
-bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
+bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
{
- BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
-
- if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] &&
- CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO)
- return false;
-
- b->prio = 0;
-
- if (can_inc_bucket_gen(b) &&
- fifo_push(&ca->unused, b - ca->buckets)) {
- atomic_inc(&b->pin);
- return true;
- }
+ BUG_ON(!ca->set->gc_mark_valid);
- return false;
-}
-
-static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
-{
- return GC_MARK(b) == GC_MARK_RECLAIMABLE &&
+ return (!GC_MARK(b) ||
+ GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
!atomic_read(&b->pin) &&
can_inc_bucket_gen(b);
}
-static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
+void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
{
+ lockdep_assert_held(&ca->set->bucket_lock);
+ BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
+
+ if (GC_SECTORS_USED(b))
+ trace_bcache_invalidate(ca, b - ca->buckets);
+
bch_inc_gen(ca, b);
b->prio = INITIAL_PRIO;
atomic_inc(&b->pin);
+}
+
+static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
+{
+ __bch_invalidate_one_bucket(ca, b);
+
fifo_push(&ca->free_inc, b - ca->buckets);
}
-#define bucket_prio(b) \
- (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
+/*
+ * Determines what order we're going to reuse buckets, smallest bucket_prio()
+ * first: we also take into account the number of sectors of live data in that
+ * bucket, and in order for that multiply to make sense we have to scale bucket
+ *
+ * Thus, we scale the bucket priorities so that the bucket with the smallest
+ * prio is worth 1/8th of what INITIAL_PRIO is worth.
+ */
+
+#define bucket_prio(b) \
+({ \
+ unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
+ \
+ (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
+})
#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
@@ -176,20 +183,7 @@ static void invalidate_buckets_lru(struct cache *ca)
ca->heap.used = 0;
for_each_bucket(b, ca) {
- /*
- * If we fill up the unused list, if we then return before
- * adding anything to the free_inc list we'll skip writing
- * prios/gens and just go back to allocating from the unused
- * list:
- */
- if (fifo_full(&ca->unused))
- return;
-
- if (!can_invalidate_bucket(ca, b))
- continue;
-
- if (!GC_SECTORS_USED(b) &&
- bch_bucket_add_unused(ca, b))
+ if (!bch_can_invalidate_bucket(ca, b))
continue;
if (!heap_full(&ca->heap))
@@ -214,7 +208,7 @@ static void invalidate_buckets_lru(struct cache *ca)
return;
}
- invalidate_one_bucket(ca, b);
+ bch_invalidate_one_bucket(ca, b);
}
}
@@ -230,8 +224,8 @@ static void invalidate_buckets_fifo(struct cache *ca)
b = ca->buckets + ca->fifo_last_bucket++;
- if (can_invalidate_bucket(ca, b))
- invalidate_one_bucket(ca, b);
+ if (bch_can_invalidate_bucket(ca, b))
+ bch_invalidate_one_bucket(ca, b);
if (++checked >= ca->sb.nbuckets) {
ca->invalidate_needs_gc = 1;
@@ -255,8 +249,8 @@ static void invalidate_buckets_random(struct cache *ca)
b = ca->buckets + n;
- if (can_invalidate_bucket(ca, b))
- invalidate_one_bucket(ca, b);
+ if (bch_can_invalidate_bucket(ca, b))
+ bch_invalidate_one_bucket(ca, b);
if (++checked >= ca->sb.nbuckets / 2) {
ca->invalidate_needs_gc = 1;
@@ -268,8 +262,7 @@ static void invalidate_buckets_random(struct cache *ca)
static void invalidate_buckets(struct cache *ca)
{
- if (ca->invalidate_needs_gc)
- return;
+ BUG_ON(ca->invalidate_needs_gc);
switch (CACHE_REPLACEMENT(&ca->sb)) {
case CACHE_REPLACEMENT_LRU:
@@ -282,8 +275,6 @@ static void invalidate_buckets(struct cache *ca)
invalidate_buckets_random(ca);
break;
}
-
- trace_bcache_alloc_invalidate(ca);
}
#define allocator_wait(ca, cond) \
@@ -304,6 +295,21 @@ do { \
__set_current_state(TASK_RUNNING); \
} while (0)
+static int bch_allocator_push(struct cache *ca, long bucket)
+{
+ unsigned i;
+
+ /* Prios/gens are actually the most important reserve */
+ if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
+ return true;
+
+ for (i = 0; i < RESERVE_NR; i++)
+ if (fifo_push(&ca->free[i], bucket))
+ return true;
+
+ return false;
+}
+
static int bch_allocator_thread(void *arg)
{
struct cache *ca = arg;
@@ -316,17 +322,10 @@ static int bch_allocator_thread(void *arg)
* possibly issue discards to them, then we add the bucket to
* the free list:
*/
- while (1) {
+ while (!fifo_empty(&ca->free_inc)) {
long bucket;
- if ((!atomic_read(&ca->set->prio_blocked) ||
- !CACHE_SYNC(&ca->set->sb)) &&
- !fifo_empty(&ca->unused))
- fifo_pop(&ca->unused, bucket);
- else if (!fifo_empty(&ca->free_inc))
- fifo_pop(&ca->free_inc, bucket);
- else
- break;
+ fifo_pop(&ca->free_inc, bucket);
if (ca->discard) {
mutex_unlock(&ca->set->bucket_lock);
@@ -336,9 +335,8 @@ static int bch_allocator_thread(void *arg)
mutex_lock(&ca->set->bucket_lock);
}
- allocator_wait(ca, !fifo_full(&ca->free));
-
- fifo_push(&ca->free, bucket);
+ allocator_wait(ca, bch_allocator_push(ca, bucket));
+ wake_up(&ca->set->btree_cache_wait);
wake_up(&ca->set->bucket_wait);
}
@@ -348,9 +346,9 @@ static int bch_allocator_thread(void *arg)
* them to the free_inc list:
*/
+retry_invalidate:
allocator_wait(ca, ca->set->gc_mark_valid &&
- (ca->need_save_prio > 64 ||
- !ca->invalidate_needs_gc));
+ !ca->invalidate_needs_gc);
invalidate_buckets(ca);
/*
@@ -358,59 +356,73 @@ static int bch_allocator_thread(void *arg)
* new stuff to them:
*/
allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
- if (CACHE_SYNC(&ca->set->sb) &&
- (!fifo_empty(&ca->free_inc) ||
- ca->need_save_prio > 64))
+ if (CACHE_SYNC(&ca->set->sb)) {
+ /*
+ * This could deadlock if an allocation with a btree
+ * node locked ever blocked - having the btree node
+ * locked would block garbage collection, but here we're
+ * waiting on garbage collection before we invalidate
+ * and free anything.
+ *
+ * But this should be safe since the btree code always
+ * uses btree_check_reserve() before allocating now, and
+ * if it fails it blocks without btree nodes locked.
+ */
+ if (!fifo_full(&ca->free_inc))
+ goto retry_invalidate;
+
bch_prio_write(ca);
+ }
}
}
-long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
+/* Allocation */
+
+long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
{
DEFINE_WAIT(w);
struct bucket *b;
long r;
/* fastpath */
- if (fifo_used(&ca->free) > ca->watermark[watermark]) {
- fifo_pop(&ca->free, r);
+ if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
+ fifo_pop(&ca->free[reserve], r))
goto out;
- }
- if (!wait)
+ if (!wait) {
+ trace_bcache_alloc_fail(ca, reserve);
return -1;
+ }
- while (1) {
- if (fifo_used(&ca->free) > ca->watermark[watermark]) {
- fifo_pop(&ca->free, r);
- break;
- }
-
+ do {
prepare_to_wait(&ca->set->bucket_wait, &w,
TASK_UNINTERRUPTIBLE);
mutex_unlock(&ca->set->bucket_lock);
schedule();
mutex_lock(&ca->set->bucket_lock);
- }
+ } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
+ !fifo_pop(&ca->free[reserve], r));
finish_wait(&ca->set->bucket_wait, &w);
out:
wake_up_process(ca->alloc_thread);
+ trace_bcache_alloc(ca, reserve);
+
if (expensive_debug_checks(ca->set)) {
size_t iter;
long i;
+ unsigned j;
for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
- fifo_for_each(i, &ca->free, iter)
- BUG_ON(i == r);
+ for (j = 0; j < RESERVE_NR; j++)
+ fifo_for_each(i, &ca->free[j], iter)
+ BUG_ON(i == r);
fifo_for_each(i, &ca->free_inc, iter)
BUG_ON(i == r);
- fifo_for_each(i, &ca->unused, iter)
- BUG_ON(i == r);
}
b = ca->buckets + r;
@@ -419,7 +431,7 @@ out:
SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
- if (watermark <= WATERMARK_METADATA) {
+ if (reserve <= RESERVE_PRIO) {
SET_GC_MARK(b, GC_MARK_METADATA);
SET_GC_MOVE(b, 0);
b->prio = BTREE_PRIO;
@@ -432,20 +444,22 @@ out:
return r;
}
+void __bch_bucket_free(struct cache *ca, struct bucket *b)
+{
+ SET_GC_MARK(b, 0);
+ SET_GC_SECTORS_USED(b, 0);
+}
+
void bch_bucket_free(struct cache_set *c, struct bkey *k)
{
unsigned i;
- for (i = 0; i < KEY_PTRS(k); i++) {
- struct bucket *b = PTR_BUCKET(c, k, i);
-
- SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
- SET_GC_SECTORS_USED(b, 0);
- bch_bucket_add_unused(PTR_CACHE(c, k, i), b);
- }
+ for (i = 0; i < KEY_PTRS(k); i++)
+ __bch_bucket_free(PTR_CACHE(c, k, i),
+ PTR_BUCKET(c, k, i));
}
-int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
struct bkey *k, int n, bool wait)
{
int i;
@@ -459,7 +473,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
for (i = 0; i < n; i++) {
struct cache *ca = c->cache_by_alloc[i];
- long b = bch_bucket_alloc(ca, watermark, wait);
+ long b = bch_bucket_alloc(ca, reserve, wait);
if (b == -1)
goto err;
@@ -478,12 +492,12 @@ err:
return -1;
}
-int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
struct bkey *k, int n, bool wait)
{
int ret;
mutex_lock(&c->bucket_lock);
- ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
+ ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
mutex_unlock(&c->bucket_lock);
return ret;
}
@@ -573,8 +587,8 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
unsigned watermark = write_prio
- ? WATERMARK_MOVINGGC
- : WATERMARK_NONE;
+ ? RESERVE_MOVINGGC
+ : RESERVE_NONE;
spin_unlock(&c->data_bucket_lock);
@@ -680,25 +694,3 @@ int bch_cache_allocator_start(struct cache *ca)
ca->alloc_thread = k;
return 0;
}
-
-int bch_cache_allocator_init(struct cache *ca)
-{
- /*
- * Reserve:
- * Prio/gen writes first
- * Then 8 for btree allocations
- * Then half for the moving garbage collector
- */
-
- ca->watermark[WATERMARK_PRIO] = 0;
-
- ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
-
- ca->watermark[WATERMARK_MOVINGGC] = 8 +
- ca->watermark[WATERMARK_METADATA];
-
- ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
- ca->watermark[WATERMARK_MOVINGGC];
-
- return 0;
-}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 754f43177483..82c9c5d35251 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -187,6 +187,7 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+#include "bset.h"
#include "util.h"
#include "closure.h"
@@ -194,9 +195,7 @@ struct bucket {
atomic_t pin;
uint16_t prio;
uint8_t gen;
- uint8_t disk_gen;
uint8_t last_gc; /* Most out of date gen in the btree */
- uint8_t gc_gen;
uint16_t gc_mark; /* Bitfield used by GC. See below for field */
};
@@ -206,10 +205,12 @@ struct bucket {
*/
BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
-#define GC_MARK_RECLAIMABLE 0
-#define GC_MARK_DIRTY 1
-#define GC_MARK_METADATA 2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
+#define GC_MARK_RECLAIMABLE 1
+#define GC_MARK_DIRTY 2
+#define GC_MARK_METADATA 3
+#define GC_SECTORS_USED_SIZE 13
+#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h"
@@ -280,7 +281,6 @@ struct bcache_device {
unsigned long sectors_dirty_last;
long sectors_dirty_derivative;
- mempool_t *unaligned_bvec;
struct bio_set *bio_split;
unsigned data_csum:1;
@@ -310,7 +310,8 @@ struct cached_dev {
struct cache_sb sb;
struct bio sb_bio;
struct bio_vec sb_bv[1];
- struct closure_with_waitlist sb_write;
+ struct closure sb_write;
+ struct semaphore sb_write_mutex;
/* Refcount on the cache set. Always nonzero when we're caching. */
atomic_t count;
@@ -383,12 +384,12 @@ struct cached_dev {
unsigned writeback_rate_p_term_inverse;
};
-enum alloc_watermarks {
- WATERMARK_PRIO,
- WATERMARK_METADATA,
- WATERMARK_MOVINGGC,
- WATERMARK_NONE,
- WATERMARK_MAX
+enum alloc_reserve {
+ RESERVE_BTREE,
+ RESERVE_PRIO,
+ RESERVE_MOVINGGC,
+ RESERVE_NONE,
+ RESERVE_NR,
};
struct cache {
@@ -400,8 +401,6 @@ struct cache {
struct kobject kobj;
struct block_device *bdev;
- unsigned watermark[WATERMARK_MAX];
-
struct task_struct *alloc_thread;
struct closure prio;
@@ -425,14 +424,9 @@ struct cache {
* their new gen to disk. After prio_write() finishes writing the new
* gens/prios, they'll be moved to the free list (and possibly discarded
* in the process)
- *
- * unused: GC found nothing pointing into these buckets (possibly
- * because all the data they contained was overwritten), so we only
- * need to discard them before they can be moved to the free list.
*/
- DECLARE_FIFO(long, free);
+ DECLARE_FIFO(long, free)[RESERVE_NR];
DECLARE_FIFO(long, free_inc);
- DECLARE_FIFO(long, unused);
size_t fifo_last_bucket;
@@ -442,12 +436,6 @@ struct cache {
DECLARE_HEAP(struct bucket *, heap);
/*
- * max(gen - disk_gen) for all buckets. When it gets too big we have to
- * call prio_write() to keep gens from wrapping.
- */
- uint8_t need_save_prio;
-
- /*
* If nonzero, we know we aren't going to find any buckets to invalidate
* until a gc finishes - otherwise we could pointlessly burn a ton of
* cpu
@@ -515,7 +503,8 @@ struct cache_set {
uint64_t cached_dev_sectors;
struct closure caching;
- struct closure_with_waitlist sb_write;
+ struct closure sb_write;
+ struct semaphore sb_write_mutex;
mempool_t *search;
mempool_t *bio_meta;
@@ -560,19 +549,16 @@ struct cache_set {
struct list_head btree_cache_freed;
/* Number of elements in btree_cache + btree_cache_freeable lists */
- unsigned bucket_cache_used;
+ unsigned btree_cache_used;
/*
* If we need to allocate memory for a new btree node and that
* allocation fails, we can cannibalize another node in the btree cache
- * to satisfy the allocation. However, only one thread can be doing this
- * at a time, for obvious reasons - try_harder and try_wait are
- * basically a lock for this that we can wait on asynchronously. The
- * btree_root() macro releases the lock when it returns.
+ * to satisfy the allocation - lock to guarantee only one thread does
+ * this at a time:
*/
- struct task_struct *try_harder;
- wait_queue_head_t try_wait;
- uint64_t try_harder_start;
+ wait_queue_head_t btree_cache_wait;
+ struct task_struct *btree_cache_alloc_lock;
/*
* When we free a btree node, we increment the gen of the bucket the
@@ -601,7 +587,7 @@ struct cache_set {
uint16_t min_prio;
/*
- * max(gen - gc_gen) for all buckets. When it gets too big we have to gc
+ * max(gen - last_gc) for all buckets. When it gets too big we have to gc
* to keep gens from wrapping around.
*/
uint8_t need_gc;
@@ -626,17 +612,21 @@ struct cache_set {
/* Number of moving GC bios in flight */
struct semaphore moving_in_flight;
+ struct workqueue_struct *moving_gc_wq;
+
struct btree *root;
#ifdef CONFIG_BCACHE_DEBUG
struct btree *verify_data;
+ struct bset *verify_ondisk;
struct mutex verify_lock;
#endif
unsigned nr_uuids;
struct uuid_entry *uuids;
BKEY_PADDED(uuid_bucket);
- struct closure_with_waitlist uuid_write;
+ struct closure uuid_write;
+ struct semaphore uuid_write_mutex;
/*
* A btree node on disk could have too many bsets for an iterator to fit
@@ -644,13 +634,7 @@ struct cache_set {
*/
mempool_t *fill_iter;
- /*
- * btree_sort() is a merge sort and requires temporary space - single
- * element mempool
- */
- struct mutex sort_lock;
- struct bset *sort;
- unsigned sort_crit_factor;
+ struct bset_sort_state sort;
/* List of buckets we're currently writing data to */
struct list_head data_buckets;
@@ -666,11 +650,9 @@ struct cache_set {
unsigned congested_read_threshold_us;
unsigned congested_write_threshold_us;
- struct time_stats sort_time;
struct time_stats btree_gc_time;
struct time_stats btree_split_time;
struct time_stats btree_read_time;
- struct time_stats try_harder_time;
atomic_long_t cache_read_races;
atomic_long_t writeback_keys_done;
@@ -684,9 +666,9 @@ struct cache_set {
unsigned error_decay;
unsigned short journal_delay_ms;
+ bool expensive_debug_checks;
unsigned verify:1;
unsigned key_merging_disabled:1;
- unsigned expensive_debug_checks:1;
unsigned gc_always_rewrite:1;
unsigned shrinker_disabled:1;
unsigned copy_gc_enabled:1;
@@ -708,13 +690,8 @@ struct bbio {
struct bio bio;
};
-static inline unsigned local_clock_us(void)
-{
- return local_clock() >> 10;
-}
-
#define BTREE_PRIO USHRT_MAX
-#define INITIAL_PRIO 32768
+#define INITIAL_PRIO 32768U
#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
#define btree_blocks(b) \
@@ -727,21 +704,6 @@ static inline unsigned local_clock_us(void)
#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
#define block_bytes(c) ((c)->sb.block_size << 9)
-#define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
-#define set_bytes(i) __set_bytes(i, i->keys)
-
-#define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
-#define set_blocks(i, c) __set_blocks(i, (i)->keys, c)
-
-#define node(i, j) ((struct bkey *) ((i)->d + (j)))
-#define end(i) node(i, (i)->keys)
-
-#define index(i, b) \
- ((size_t) (((void *) i - (void *) (b)->sets[0].data) / \
- block_bytes(b->c)))
-
-#define btree_data_space(b) (PAGE_SIZE << (b)->page_order)
-
#define prios_per_bucket(c) \
((bucket_bytes(c) - sizeof(struct prio_set)) / \
sizeof(struct bucket_disk))
@@ -784,20 +746,34 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
}
-/* Btree key macros */
+static inline uint8_t gen_after(uint8_t a, uint8_t b)
+{
+ uint8_t r = a - b;
+ return r > 128U ? 0 : r;
+}
+
+static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
+ unsigned i)
+{
+ return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
+}
-static inline void bkey_init(struct bkey *k)
+static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
+ unsigned i)
{
- *k = ZERO_KEY;
+ return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
}
+/* Btree key macros */
+
/*
* This is used for various on disk data structures - cache_sb, prio_set, bset,
* jset: The checksum is _always_ the first 8 bytes of these structs
*/
#define csum_set(i) \
bch_crc64(((void *) (i)) + sizeof(uint64_t), \
- ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
+ ((void *) bset_bkey_last(i)) - \
+ (((void *) (i)) + sizeof(uint64_t)))
/* Error handling macros */
@@ -859,9 +835,6 @@ static inline bool cached_dev_get(struct cached_dev *dc)
/*
* bucket_gc_gen() returns the difference between the bucket's current gen and
* the oldest gen of any pointer into that bucket in the btree (last_gc).
- *
- * bucket_disk_gen() returns the difference between the current gen and the gen
- * on disk; they're both used to make sure gens don't wrap around.
*/
static inline uint8_t bucket_gc_gen(struct bucket *b)
@@ -869,13 +842,7 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
return b->gen - b->last_gc;
}
-static inline uint8_t bucket_disk_gen(struct bucket *b)
-{
- return b->gen - b->disk_gen;
-}
-
#define BUCKET_GC_GEN_MAX 96U
-#define BUCKET_DISK_GEN_MAX 64U
#define kobj_attribute_write(n, fn) \
static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
@@ -902,18 +869,20 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
void bch_bbio_free(struct bio *, struct cache_set *);
struct bio *bch_bbio_alloc(struct cache_set *);
-struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
void bch_generic_make_request(struct bio *, struct bio_split_pool *);
void __bch_submit_bbio(struct bio *, struct cache_set *);
void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int);
-bool bch_bucket_add_unused(struct cache *, struct bucket *);
-long bch_bucket_alloc(struct cache *, unsigned, bool);
+bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
+void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
+
+void __bch_bucket_free(struct cache *, struct bucket *);
void bch_bucket_free(struct cache_set *, struct bkey *);
+long bch_bucket_alloc(struct cache *, unsigned, bool);
int __bch_bucket_alloc_set(struct cache_set *, unsigned,
struct bkey *, int, bool);
int bch_bucket_alloc_set(struct cache_set *, unsigned,
@@ -964,13 +933,10 @@ int bch_open_buckets_alloc(struct cache_set *);
void bch_open_buckets_free(struct cache_set *);
int bch_cache_allocator_start(struct cache *ca);
-int bch_cache_allocator_init(struct cache *ca);
void bch_debug_exit(void);
int bch_debug_init(struct kobject *);
void bch_request_exit(void);
int bch_request_init(void);
-void bch_btree_exit(void);
-int bch_btree_init(void);
#endif /* _BCACHE_H */
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 7d388b8bb50e..545416415305 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -5,30 +5,134 @@
* Copyright 2012 Google, Inc.
*/
-#include "bcache.h"
-#include "btree.h"
-#include "debug.h"
+#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+#include "util.h"
+#include "bset.h"
+
+#include <linux/console.h>
#include <linux/random.h>
#include <linux/prefetch.h>
+#ifdef CONFIG_BCACHE_DEBUG
+
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
+{
+ struct bkey *k, *next;
+
+ for (k = i->start; k < bset_bkey_last(i); k = next) {
+ next = bkey_next(k);
+
+ printk(KERN_ERR "block %u key %u/%u: ", set,
+ (unsigned) ((u64 *) k - i->d), i->keys);
+
+ if (b->ops->key_dump)
+ b->ops->key_dump(b, k);
+ else
+ printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+
+ if (next < bset_bkey_last(i) &&
+ bkey_cmp(k, b->ops->is_extents ?
+ &START_KEY(next) : next) > 0)
+ printk(KERN_ERR "Key skipped backwards\n");
+ }
+}
+
+void bch_dump_bucket(struct btree_keys *b)
+{
+ unsigned i;
+
+ console_lock();
+ for (i = 0; i <= b->nsets; i++)
+ bch_dump_bset(b, b->set[i].data,
+ bset_sector_offset(b, b->set[i].data));
+ console_unlock();
+}
+
+int __bch_count_data(struct btree_keys *b)
+{
+ unsigned ret = 0;
+ struct btree_iter iter;
+ struct bkey *k;
+
+ if (b->ops->is_extents)
+ for_each_key(b, k, &iter)
+ ret += KEY_SIZE(k);
+ return ret;
+}
+
+void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+{
+ va_list args;
+ struct bkey *k, *p = NULL;
+ struct btree_iter iter;
+ const char *err;
+
+ for_each_key(b, k, &iter) {
+ if (b->ops->is_extents) {
+ err = "Keys out of order";
+ if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
+ goto bug;
+
+ if (bch_ptr_invalid(b, k))
+ continue;
+
+ err = "Overlapping keys";
+ if (p && bkey_cmp(p, &START_KEY(k)) > 0)
+ goto bug;
+ } else {
+ if (bch_ptr_bad(b, k))
+ continue;
+
+ err = "Duplicate keys";
+ if (p && !bkey_cmp(p, k))
+ goto bug;
+ }
+ p = k;
+ }
+#if 0
+ err = "Key larger than btree node key";
+ if (p && bkey_cmp(p, &b->key) > 0)
+ goto bug;
+#endif
+ return;
+bug:
+ bch_dump_bucket(b);
+
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+
+ panic("bch_check_keys error: %s:\n", err);
+}
+
+static void bch_btree_iter_next_check(struct btree_iter *iter)
+{
+ struct bkey *k = iter->data->k, *next = bkey_next(k);
+
+ if (next < iter->data->end &&
+ bkey_cmp(k, iter->b->ops->is_extents ?
+ &START_KEY(next) : next) > 0) {
+ bch_dump_bucket(iter->b);
+ panic("Key skipped backwards\n");
+ }
+}
+
+#else
+
+static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
+
+#endif
+
/* Keylists */
-int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
+int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
{
size_t oldsize = bch_keylist_nkeys(l);
- size_t newsize = oldsize + 2 + nptrs;
+ size_t newsize = oldsize + u64s;
uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
uint64_t *new_keys;
- /* The journalling code doesn't handle the case where the keys to insert
- * is bigger than an empty write: If we just return -ENOMEM here,
- * bio_insert() and bio_invalidate() will insert the keys created so far
- * and finish the rest when the keylist is empty.
- */
- if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
- return -ENOMEM;
-
newsize = roundup_pow_of_two(newsize);
if (newsize <= KEYLIST_INLINE ||
@@ -71,136 +175,6 @@ void bch_keylist_pop_front(struct keylist *l)
bch_keylist_bytes(l));
}
-/* Pointer validation */
-
-static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
- unsigned i;
-
- for (i = 0; i < KEY_PTRS(k); i++)
- if (ptr_available(c, k, i)) {
- struct cache *ca = PTR_CACHE(c, k, i);
- size_t bucket = PTR_BUCKET_NR(c, k, i);
- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
- bucket < ca->sb.first_bucket ||
- bucket >= ca->sb.nbuckets)
- return true;
- }
-
- return false;
-}
-
-bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
- char buf[80];
-
- if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
- goto bad;
-
- if (__ptr_invalid(c, k))
- goto bad;
-
- return false;
-bad:
- bch_bkey_to_text(buf, sizeof(buf), k);
- cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
- return true;
-}
-
-bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
- char buf[80];
-
- if (!KEY_SIZE(k))
- return true;
-
- if (KEY_SIZE(k) > KEY_OFFSET(k))
- goto bad;
-
- if (__ptr_invalid(c, k))
- goto bad;
-
- return false;
-bad:
- bch_bkey_to_text(buf, sizeof(buf), k);
- cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
- return true;
-}
-
-static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
- unsigned ptr)
-{
- struct bucket *g = PTR_BUCKET(b->c, k, ptr);
- char buf[80];
-
- if (mutex_trylock(&b->c->bucket_lock)) {
- if (b->level) {
- if (KEY_DIRTY(k) ||
- g->prio != BTREE_PRIO ||
- (b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_METADATA))
- goto err;
-
- } else {
- if (g->prio == BTREE_PRIO)
- goto err;
-
- if (KEY_DIRTY(k) &&
- b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_DIRTY)
- goto err;
- }
- mutex_unlock(&b->c->bucket_lock);
- }
-
- return false;
-err:
- mutex_unlock(&b->c->bucket_lock);
- bch_bkey_to_text(buf, sizeof(buf), k);
- btree_bug(b,
-"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
- buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
- return true;
-}
-
-bool bch_ptr_bad(struct btree *b, const struct bkey *k)
-{
- struct bucket *g;
- unsigned i, stale;
-
- if (!bkey_cmp(k, &ZERO_KEY) ||
- !KEY_PTRS(k) ||
- bch_ptr_invalid(b, k))
- return true;
-
- for (i = 0; i < KEY_PTRS(k); i++) {
- if (!ptr_available(b->c, k, i))
- return true;
-
- g = PTR_BUCKET(b->c, k, i);
- stale = ptr_stale(b->c, k, i);
-
- btree_bug_on(stale > 96, b,
- "key too stale: %i, need_gc %u",
- stale, b->c->need_gc);
-
- btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
- b, "stale dirty pointer");
-
- if (stale)
- return true;
-
- if (expensive_debug_checks(b->c) &&
- ptr_bad_expensive_checks(b, k, i))
- return true;
- }
-
- return false;
-}
-
/* Key/pointer manipulation */
void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
@@ -255,56 +229,138 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
return true;
}
-static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+/* Auxiliary search trees */
+
+/* 32 bits total: */
+#define BKEY_MID_BITS 3
+#define BKEY_EXPONENT_BITS 7
+#define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
+#define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
+
+struct bkey_float {
+ unsigned exponent:BKEY_EXPONENT_BITS;
+ unsigned m:BKEY_MID_BITS;
+ unsigned mantissa:BKEY_MANTISSA_BITS;
+} __packed;
+
+/*
+ * BSET_CACHELINE was originally intended to match the hardware cacheline size -
+ * it used to be 64, but I realized the lookup code would touch slightly less
+ * memory if it was 128.
+ *
+ * It definites the number of bytes (in struct bset) per struct bkey_float in
+ * the auxiliar search tree - when we're done searching the bset_float tree we
+ * have this many bytes left that we do a linear search over.
+ *
+ * Since (after level 5) every level of the bset_tree is on a new cacheline,
+ * we're touching one fewer cacheline in the bset tree in exchange for one more
+ * cacheline in the linear search - but the linear search might stop before it
+ * gets to the second cacheline.
+ */
+
+#define BSET_CACHELINE 128
+
+/* Space required for the btree node keys */
+static inline size_t btree_keys_bytes(struct btree_keys *b)
{
- return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
- ~((uint64_t)1 << 63);
+ return PAGE_SIZE << b->page_order;
}
-/* Tries to merge l and r: l should be lower than r
- * Returns true if we were able to merge. If we did merge, l will be the merged
- * key, r will be untouched.
- */
-bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
+static inline size_t btree_keys_cachelines(struct btree_keys *b)
{
- unsigned i;
+ return btree_keys_bytes(b) / BSET_CACHELINE;
+}
- if (key_merging_disabled(b->c))
- return false;
+/* Space required for the auxiliary search trees */
+static inline size_t bset_tree_bytes(struct btree_keys *b)
+{
+ return btree_keys_cachelines(b) * sizeof(struct bkey_float);
+}
- if (KEY_PTRS(l) != KEY_PTRS(r) ||
- KEY_DIRTY(l) != KEY_DIRTY(r) ||
- bkey_cmp(l, &START_KEY(r)))
- return false;
+/* Space required for the prev pointers */
+static inline size_t bset_prev_bytes(struct btree_keys *b)
+{
+ return btree_keys_cachelines(b) * sizeof(uint8_t);
+}
- for (i = 0; i < KEY_PTRS(l); i++)
- if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
- PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
- return false;
+/* Memory allocation */
- /* Keys with no pointers aren't restricted to one bucket and could
- * overflow KEY_SIZE
- */
- if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
- SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
- SET_KEY_SIZE(l, USHRT_MAX);
+void bch_btree_keys_free(struct btree_keys *b)
+{
+ struct bset_tree *t = b->set;
- bch_cut_front(l, r);
- return false;
- }
+ if (bset_prev_bytes(b) < PAGE_SIZE)
+ kfree(t->prev);
+ else
+ free_pages((unsigned long) t->prev,
+ get_order(bset_prev_bytes(b)));
- if (KEY_CSUM(l)) {
- if (KEY_CSUM(r))
- l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
- else
- SET_KEY_CSUM(l, 0);
- }
+ if (bset_tree_bytes(b) < PAGE_SIZE)
+ kfree(t->tree);
+ else
+ free_pages((unsigned long) t->tree,
+ get_order(bset_tree_bytes(b)));
- SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
- SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+ free_pages((unsigned long) t->data, b->page_order);
- return true;
+ t->prev = NULL;
+ t->tree = NULL;
+ t->data = NULL;
+}
+EXPORT_SYMBOL(bch_btree_keys_free);
+
+int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
+{
+ struct bset_tree *t = b->set;
+
+ BUG_ON(t->data);
+
+ b->page_order = page_order;
+
+ t->data = (void *) __get_free_pages(gfp, b->page_order);
+ if (!t->data)
+ goto err;
+
+ t->tree = bset_tree_bytes(b) < PAGE_SIZE
+ ? kmalloc(bset_tree_bytes(b), gfp)
+ : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
+ if (!t->tree)
+ goto err;
+
+ t->prev = bset_prev_bytes(b) < PAGE_SIZE
+ ? kmalloc(bset_prev_bytes(b), gfp)
+ : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
+ if (!t->prev)
+ goto err;
+
+ return 0;
+err:
+ bch_btree_keys_free(b);
+ return -ENOMEM;
}
+EXPORT_SYMBOL(bch_btree_keys_alloc);
+
+void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
+ bool *expensive_debug_checks)
+{
+ unsigned i;
+
+ b->ops = ops;
+ b->expensive_debug_checks = expensive_debug_checks;
+ b->nsets = 0;
+ b->last_set_unwritten = 0;
+
+ /* XXX: shouldn't be needed */
+ for (i = 0; i < MAX_BSETS; i++)
+ b->set[i].size = 0;
+ /*
+ * Second loop starts at 1 because b->keys[0]->data is the memory we
+ * allocated
+ */
+ for (i = 1; i < MAX_BSETS; i++)
+ b->set[i].data = NULL;
+}
+EXPORT_SYMBOL(bch_btree_keys_init);
/* Binary tree stuff for auxiliary search trees */
@@ -455,9 +511,11 @@ static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
return ((void *) k - (void *) t->data) / BSET_CACHELINE;
}
-static unsigned bkey_to_cacheline_offset(struct bkey *k)
+static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
+ unsigned cacheline,
+ struct bkey *k)
{
- return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
+ return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
}
static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
@@ -504,7 +562,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
: tree_to_prev_bkey(t, j >> ffs(j));
struct bkey *r = is_power_of_2(j + 1)
- ? node(t->data, t->data->keys - bkey_u64s(&t->end))
+ ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
: tree_to_bkey(t, j >> (ffz(j) + 1));
BUG_ON(m < l || m > r);
@@ -528,9 +586,9 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
f->exponent = 127;
}
-static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
+static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
{
- if (t != b->sets) {
+ if (t != b->set) {
unsigned j = roundup(t[-1].size,
64 / sizeof(struct bkey_float));
@@ -538,33 +596,54 @@ static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
t->prev = t[-1].prev + j;
}
- while (t < b->sets + MAX_BSETS)
+ while (t < b->set + MAX_BSETS)
t++->size = 0;
}
-static void bset_build_unwritten_tree(struct btree *b)
+static void bch_bset_build_unwritten_tree(struct btree_keys *b)
{
- struct bset_tree *t = b->sets + b->nsets;
+ struct bset_tree *t = bset_tree_last(b);
+
+ BUG_ON(b->last_set_unwritten);
+ b->last_set_unwritten = 1;
bset_alloc_tree(b, t);
- if (t->tree != b->sets->tree + bset_tree_space(b)) {
- t->prev[0] = bkey_to_cacheline_offset(t->data->start);
+ if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
+ t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
t->size = 1;
}
}
-static void bset_build_written_tree(struct btree *b)
+void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
+{
+ if (i != b->set->data) {
+ b->set[++b->nsets].data = i;
+ i->seq = b->set->data->seq;
+ } else
+ get_random_bytes(&i->seq, sizeof(uint64_t));
+
+ i->magic = magic;
+ i->version = 0;
+ i->keys = 0;
+
+ bch_bset_build_unwritten_tree(b);
+}
+EXPORT_SYMBOL(bch_bset_init_next);
+
+void bch_bset_build_written_tree(struct btree_keys *b)
{
- struct bset_tree *t = b->sets + b->nsets;
- struct bkey *k = t->data->start;
+ struct bset_tree *t = bset_tree_last(b);
+ struct bkey *prev = NULL, *k = t->data->start;
unsigned j, cacheline = 1;
+ b->last_set_unwritten = 0;
+
bset_alloc_tree(b, t);
t->size = min_t(unsigned,
- bkey_to_cacheline(t, end(t->data)),
- b->sets->tree + bset_tree_space(b) - t->tree);
+ bkey_to_cacheline(t, bset_bkey_last(t->data)),
+ b->set->tree + btree_keys_cachelines(b) - t->tree);
if (t->size < 2) {
t->size = 0;
@@ -577,16 +656,14 @@ static void bset_build_written_tree(struct btree *b)
for (j = inorder_next(0, t->size);
j;
j = inorder_next(j, t->size)) {
- while (bkey_to_cacheline(t, k) != cacheline)
- k = bkey_next(k);
+ while (bkey_to_cacheline(t, k) < cacheline)
+ prev = k, k = bkey_next(k);
- t->prev[j] = bkey_u64s(k);
- k = bkey_next(k);
- cacheline++;
- t->tree[j].m = bkey_to_cacheline_offset(k);
+ t->prev[j] = bkey_u64s(prev);
+ t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
}
- while (bkey_next(k) != end(t->data))
+ while (bkey_next(k) != bset_bkey_last(t->data))
k = bkey_next(k);
t->end = *k;
@@ -597,14 +674,17 @@ static void bset_build_written_tree(struct btree *b)
j = inorder_next(j, t->size))
make_bfloat(t, j);
}
+EXPORT_SYMBOL(bch_bset_build_written_tree);
-void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
+/* Insert */
+
+void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
{
struct bset_tree *t;
unsigned inorder, j = 1;
- for (t = b->sets; t <= &b->sets[b->nsets]; t++)
- if (k < end(t->data))
+ for (t = b->set; t <= bset_tree_last(b); t++)
+ if (k < bset_bkey_last(t->data))
goto found_set;
BUG();
@@ -617,7 +697,7 @@ found_set:
if (k == t->data->start)
goto fix_left;
- if (bkey_next(k) == end(t->data)) {
+ if (bkey_next(k) == bset_bkey_last(t->data)) {
t->end = *k;
goto fix_right;
}
@@ -642,10 +722,12 @@ fix_right: do {
j = j * 2 + 1;
} while (j < t->size);
}
+EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
-void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
+static void bch_bset_fix_lookup_table(struct btree_keys *b,
+ struct bset_tree *t,
+ struct bkey *k)
{
- struct bset_tree *t = &b->sets[b->nsets];
unsigned shift = bkey_u64s(k);
unsigned j = bkey_to_cacheline(t, k);
@@ -657,8 +739,8 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
* lookup table for the first key that is strictly greater than k:
* it's either k's cacheline or the next one
*/
- if (j < t->size &&
- table_to_bkey(t, j) <= k)
+ while (j < t->size &&
+ table_to_bkey(t, j) <= k)
j++;
/* Adjust all the lookup table entries, and find a new key for any that
@@ -673,54 +755,124 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
while (k < cacheline_to_bkey(t, j, 0))
k = bkey_next(k);
- t->prev[j] = bkey_to_cacheline_offset(k);
+ t->prev[j] = bkey_to_cacheline_offset(t, j, k);
}
}
- if (t->size == b->sets->tree + bset_tree_space(b) - t->tree)
+ if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
return;
/* Possibly add a new entry to the end of the lookup table */
for (k = table_to_bkey(t, t->size - 1);
- k != end(t->data);
+ k != bset_bkey_last(t->data);
k = bkey_next(k))
if (t->size == bkey_to_cacheline(t, k)) {
- t->prev[t->size] = bkey_to_cacheline_offset(k);
+ t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
t->size++;
}
}
-void bch_bset_init_next(struct btree *b)
+/*
+ * Tries to merge l and r: l should be lower than r
+ * Returns true if we were able to merge. If we did merge, l will be the merged
+ * key, r will be untouched.
+ */
+bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
{
- struct bset *i = write_block(b);
+ if (!b->ops->key_merge)
+ return false;
- if (i != b->sets[0].data) {
- b->sets[++b->nsets].data = i;
- i->seq = b->sets[0].data->seq;
- } else
- get_random_bytes(&i->seq, sizeof(uint64_t));
+ /*
+ * Generic header checks
+ * Assumes left and right are in order
+ * Left and right must be exactly aligned
+ */
+ if (!bch_bkey_equal_header(l, r) ||
+ bkey_cmp(l, &START_KEY(r)))
+ return false;
- i->magic = bset_magic(&b->c->sb);
- i->version = 0;
- i->keys = 0;
+ return b->ops->key_merge(b, l, r);
+}
+EXPORT_SYMBOL(bch_bkey_try_merge);
+
+void bch_bset_insert(struct btree_keys *b, struct bkey *where,
+ struct bkey *insert)
+{
+ struct bset_tree *t = bset_tree_last(b);
+
+ BUG_ON(!b->last_set_unwritten);
+ BUG_ON(bset_byte_offset(b, t->data) +
+ __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
+ PAGE_SIZE << b->page_order);
+
+ memmove((uint64_t *) where + bkey_u64s(insert),
+ where,
+ (void *) bset_bkey_last(t->data) - (void *) where);
+
+ t->data->keys += bkey_u64s(insert);
+ bkey_copy(where, insert);
+ bch_bset_fix_lookup_table(b, t, where);
+}
+EXPORT_SYMBOL(bch_bset_insert);
+
+unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ struct bkey *replace_key)
+{
+ unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
+ struct bset *i = bset_tree_last(b)->data;
+ struct bkey *m, *prev = NULL;
+ struct btree_iter iter;
+
+ BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
+
+ m = bch_btree_iter_init(b, &iter, b->ops->is_extents
+ ? PRECEDING_KEY(&START_KEY(k))
+ : PRECEDING_KEY(k));
+
+ if (b->ops->insert_fixup(b, k, &iter, replace_key))
+ return status;
- bset_build_unwritten_tree(b);
+ status = BTREE_INSERT_STATUS_INSERT;
+
+ while (m != bset_bkey_last(i) &&
+ bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
+ prev = m, m = bkey_next(m);
+
+ /* prev is in the tree, if we merge we're done */
+ status = BTREE_INSERT_STATUS_BACK_MERGE;
+ if (prev &&
+ bch_bkey_try_merge(b, prev, k))
+ goto merged;
+#if 0
+ status = BTREE_INSERT_STATUS_OVERWROTE;
+ if (m != bset_bkey_last(i) &&
+ KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
+ goto copy;
+#endif
+ status = BTREE_INSERT_STATUS_FRONT_MERGE;
+ if (m != bset_bkey_last(i) &&
+ bch_bkey_try_merge(b, k, m))
+ goto copy;
+
+ bch_bset_insert(b, m, k);
+copy: bkey_copy(m, k);
+merged:
+ return status;
}
+EXPORT_SYMBOL(bch_btree_insert_key);
+
+/* Lookup */
struct bset_search_iter {
struct bkey *l, *r;
};
-static struct bset_search_iter bset_search_write_set(struct btree *b,
- struct bset_tree *t,
+static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
const struct bkey *search)
{
unsigned li = 0, ri = t->size;
- BUG_ON(!b->nsets &&
- t->size < bkey_to_cacheline(t, end(t->data)));
-
while (li + 1 != ri) {
unsigned m = (li + ri) >> 1;
@@ -732,12 +884,11 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,
return (struct bset_search_iter) {
table_to_bkey(t, li),
- ri < t->size ? table_to_bkey(t, ri) : end(t->data)
+ ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
};
}
-static struct bset_search_iter bset_search_tree(struct btree *b,
- struct bset_tree *t,
+static struct bset_search_iter bset_search_tree(struct bset_tree *t,
const struct bkey *search)
{
struct bkey *l, *r;
@@ -784,7 +935,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
f = &t->tree[inorder_next(j, t->size)];
r = cacheline_to_bkey(t, inorder, f->m);
} else
- r = end(t->data);
+ r = bset_bkey_last(t->data);
} else {
r = cacheline_to_bkey(t, inorder, f->m);
@@ -798,7 +949,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
return (struct bset_search_iter) {l, r};
}
-struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
+struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
const struct bkey *search)
{
struct bset_search_iter i;
@@ -820,7 +971,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
if (unlikely(!t->size)) {
i.l = t->data->start;
- i.r = end(t->data);
+ i.r = bset_bkey_last(t->data);
} else if (bset_written(b, t)) {
/*
* Each node in the auxiliary search tree covers a certain range
@@ -830,23 +981,27 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
*/
if (unlikely(bkey_cmp(search, &t->end) >= 0))
- return end(t->data);
+ return bset_bkey_last(t->data);
if (unlikely(bkey_cmp(search, t->data->start) < 0))
return t->data->start;
- i = bset_search_tree(b, t, search);
- } else
- i = bset_search_write_set(b, t, search);
+ i = bset_search_tree(t, search);
+ } else {
+ BUG_ON(!b->nsets &&
+ t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
- if (expensive_debug_checks(b->c)) {
+ i = bset_search_write_set(t, search);
+ }
+
+ if (btree_keys_expensive_checks(b)) {
BUG_ON(bset_written(b, t) &&
i.l != t->data->start &&
bkey_cmp(tree_to_prev_bkey(t,
inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
search) > 0);
- BUG_ON(i.r != end(t->data) &&
+ BUG_ON(i.r != bset_bkey_last(t->data) &&
bkey_cmp(i.r, search) <= 0);
}
@@ -856,22 +1011,17 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
return i.l;
}
+EXPORT_SYMBOL(__bch_bset_search);
/* Btree iterator */
-/*
- * Returns true if l > r - unless l == r, in which case returns true if l is
- * older than r.
- *
- * Necessary for btree_sort_fixup() - if there are multiple keys that compare
- * equal in different sets, we have to process them newest to oldest.
- */
+typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
+ struct btree_iter_set);
+
static inline bool btree_iter_cmp(struct btree_iter_set l,
struct btree_iter_set r)
{
- int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
-
- return c ? c > 0 : l.k < r.k;
+ return bkey_cmp(l.k, r.k) > 0;
}
static inline bool btree_iter_end(struct btree_iter *iter)
@@ -888,8 +1038,10 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
btree_iter_cmp));
}
-struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
- struct bkey *search, struct bset_tree *start)
+static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bkey *search,
+ struct bset_tree *start)
{
struct bkey *ret = NULL;
iter->size = ARRAY_SIZE(iter->data);
@@ -899,15 +1051,24 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
iter->b = b;
#endif
- for (; start <= &b->sets[b->nsets]; start++) {
+ for (; start <= bset_tree_last(b); start++) {
ret = bch_bset_search(b, start, search);
- bch_btree_iter_push(iter, ret, end(start->data));
+ bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
}
return ret;
}
-struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+struct bkey *bch_btree_iter_init(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bkey *search)
+{
+ return __bch_btree_iter_init(b, iter, search, b->set);
+}
+EXPORT_SYMBOL(bch_btree_iter_init);
+
+static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+ btree_iter_cmp_fn *cmp)
{
struct btree_iter_set unused;
struct bkey *ret = NULL;
@@ -924,16 +1085,23 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
}
if (iter->data->k == iter->data->end)
- heap_pop(iter, unused, btree_iter_cmp);
+ heap_pop(iter, unused, cmp);
else
- heap_sift(iter, 0, btree_iter_cmp);
+ heap_sift(iter, 0, cmp);
}
return ret;
}
+struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+{
+ return __bch_btree_iter_next(iter, btree_iter_cmp);
+
+}
+EXPORT_SYMBOL(bch_btree_iter_next);
+
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
- struct btree *b, ptr_filter_fn fn)
+ struct btree_keys *b, ptr_filter_fn fn)
{
struct bkey *ret;
@@ -946,70 +1114,58 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
/* Mergesort */
-static void sort_key_next(struct btree_iter *iter,
- struct btree_iter_set *i)
+void bch_bset_sort_state_free(struct bset_sort_state *state)
{
- i->k = bkey_next(i->k);
-
- if (i->k == i->end)
- *i = iter->data[--iter->used];
+ if (state->pool)
+ mempool_destroy(state->pool);
}
-static void btree_sort_fixup(struct btree_iter *iter)
+int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
{
- while (iter->used > 1) {
- struct btree_iter_set *top = iter->data, *i = top + 1;
-
- if (iter->used > 2 &&
- btree_iter_cmp(i[0], i[1]))
- i++;
-
- if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
- break;
+ spin_lock_init(&state->time.lock);
- if (!KEY_SIZE(i->k)) {
- sort_key_next(iter, i);
- heap_sift(iter, i - top, btree_iter_cmp);
- continue;
- }
+ state->page_order = page_order;
+ state->crit_factor = int_sqrt(1 << page_order);
- if (top->k > i->k) {
- if (bkey_cmp(top->k, i->k) >= 0)
- sort_key_next(iter, i);
- else
- bch_cut_front(top->k, i->k);
+ state->pool = mempool_create_page_pool(1, page_order);
+ if (!state->pool)
+ return -ENOMEM;
- heap_sift(iter, i - top, btree_iter_cmp);
- } else {
- /* can't happen because of comparison func */
- BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
- bch_cut_back(&START_KEY(i->k), top->k);
- }
- }
+ return 0;
}
+EXPORT_SYMBOL(bch_bset_sort_state_init);
-static void btree_mergesort(struct btree *b, struct bset *out,
+static void btree_mergesort(struct btree_keys *b, struct bset *out,
struct btree_iter *iter,
bool fixup, bool remove_stale)
{
+ int i;
struct bkey *k, *last = NULL;
- bool (*bad)(struct btree *, const struct bkey *) = remove_stale
+ BKEY_PADDED(k) tmp;
+ bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
? bch_ptr_bad
: bch_ptr_invalid;
+ /* Heapify the iterator, using our comparison function */
+ for (i = iter->used / 2 - 1; i >= 0; --i)
+ heap_sift(iter, i, b->ops->sort_cmp);
+
while (!btree_iter_end(iter)) {
- if (fixup && !b->level)
- btree_sort_fixup(iter);
+ if (b->ops->sort_fixup && fixup)
+ k = b->ops->sort_fixup(iter, &tmp.k);
+ else
+ k = NULL;
+
+ if (!k)
+ k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
- k = bch_btree_iter_next(iter);
if (bad(b, k))
continue;
if (!last) {
last = out->start;
bkey_copy(last, k);
- } else if (b->level ||
- !bch_bkey_try_merge(b, last, k)) {
+ } else if (!bch_bkey_try_merge(b, last, k)) {
last = bkey_next(last);
bkey_copy(last, k);
}
@@ -1020,27 +1176,30 @@ static void btree_mergesort(struct btree *b, struct bset *out,
pr_debug("sorted %i keys", out->keys);
}
-static void __btree_sort(struct btree *b, struct btree_iter *iter,
- unsigned start, unsigned order, bool fixup)
+static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
+ unsigned start, unsigned order, bool fixup,
+ struct bset_sort_state *state)
{
uint64_t start_time;
- bool remove_stale = !b->written;
+ bool used_mempool = false;
struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
order);
if (!out) {
- mutex_lock(&b->c->sort_lock);
- out = b->c->sort;
- order = ilog2(bucket_pages(b->c));
+ struct page *outp;
+
+ BUG_ON(order > state->page_order);
+
+ outp = mempool_alloc(state->pool, GFP_NOIO);
+ out = page_address(outp);
+ used_mempool = true;
+ order = state->page_order;
}
start_time = local_clock();
- btree_mergesort(b, out, iter, fixup, remove_stale);
+ btree_mergesort(b, out, iter, fixup, false);
b->nsets = start;
- if (!fixup && !start && b->written)
- bch_btree_verify(b, out);
-
if (!start && order == b->page_order) {
/*
* Our temporary buffer is the same size as the btree node's
@@ -1048,84 +1207,76 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
* memcpy()
*/
- out->magic = bset_magic(&b->c->sb);
- out->seq = b->sets[0].data->seq;
- out->version = b->sets[0].data->version;
- swap(out, b->sets[0].data);
-
- if (b->c->sort == b->sets[0].data)
- b->c->sort = out;
+ out->magic = b->set->data->magic;
+ out->seq = b->set->data->seq;
+ out->version = b->set->data->version;
+ swap(out, b->set->data);
} else {
- b->sets[start].data->keys = out->keys;
- memcpy(b->sets[start].data->start, out->start,
- (void *) end(out) - (void *) out->start);
+ b->set[start].data->keys = out->keys;
+ memcpy(b->set[start].data->start, out->start,
+ (void *) bset_bkey_last(out) - (void *) out->start);
}
- if (out == b->c->sort)
- mutex_unlock(&b->c->sort_lock);
+ if (used_mempool)
+ mempool_free(virt_to_page(out), state->pool);
else
free_pages((unsigned long) out, order);
- if (b->written)
- bset_build_written_tree(b);
+ bch_bset_build_written_tree(b);
if (!start)
- bch_time_stats_update(&b->c->sort_time, start_time);
+ bch_time_stats_update(&state->time, start_time);
}
-void bch_btree_sort_partial(struct btree *b, unsigned start)
+void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
+ struct bset_sort_state *state)
{
size_t order = b->page_order, keys = 0;
struct btree_iter iter;
int oldsize = bch_count_data(b);
- __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
-
- BUG_ON(b->sets[b->nsets].data == write_block(b) &&
- (b->sets[b->nsets].size || b->nsets));
-
+ __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
if (start) {
unsigned i;
for (i = start; i <= b->nsets; i++)
- keys += b->sets[i].data->keys;
+ keys += b->set[i].data->keys;
- order = roundup_pow_of_two(__set_bytes(b->sets->data,
- keys)) / PAGE_SIZE;
- if (order)
- order = ilog2(order);
+ order = get_order(__set_bytes(b->set->data, keys));
}
- __btree_sort(b, &iter, start, order, false);
+ __btree_sort(b, &iter, start, order, false, state);
- EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
+ EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
}
+EXPORT_SYMBOL(bch_btree_sort_partial);
-void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
+void bch_btree_sort_and_fix_extents(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bset_sort_state *state)
{
- BUG_ON(!b->written);
- __btree_sort(b, iter, 0, b->page_order, true);
+ __btree_sort(b, iter, 0, b->page_order, true, state);
}
-void bch_btree_sort_into(struct btree *b, struct btree *new)
+void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+ struct bset_sort_state *state)
{
uint64_t start_time = local_clock();
struct btree_iter iter;
bch_btree_iter_init(b, &iter, NULL);
- btree_mergesort(b, new->sets->data, &iter, false, true);
+ btree_mergesort(b, new->set->data, &iter, false, true);
- bch_time_stats_update(&b->c->sort_time, start_time);
+ bch_time_stats_update(&state->time, start_time);
- bkey_copy_key(&new->key, &b->key);
- new->sets->size = 0;
+ new->set->size = 0; // XXX: why?
}
#define SORT_CRIT (4096 / sizeof(uint64_t))
-void bch_btree_sort_lazy(struct btree *b)
+void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
{
unsigned crit = SORT_CRIT;
int i;
@@ -1134,50 +1285,32 @@ void bch_btree_sort_lazy(struct btree *b)
if (!b->nsets)
goto out;
- /* If not a leaf node, always sort */
- if (b->level) {
- bch_btree_sort(b);
- return;
- }
-
for (i = b->nsets - 1; i >= 0; --i) {
- crit *= b->c->sort_crit_factor;
+ crit *= state->crit_factor;
- if (b->sets[i].data->keys < crit) {
- bch_btree_sort_partial(b, i);
+ if (b->set[i].data->keys < crit) {
+ bch_btree_sort_partial(b, i, state);
return;
}
}
/* Sort if we'd overflow */
if (b->nsets + 1 == MAX_BSETS) {
- bch_btree_sort(b);
+ bch_btree_sort(b, state);
return;
}
out:
- bset_build_written_tree(b);
+ bch_bset_build_written_tree(b);
}
+EXPORT_SYMBOL(bch_btree_sort_lazy);
-/* Sysfs stuff */
-
-struct bset_stats {
- struct btree_op op;
- size_t nodes;
- size_t sets_written, sets_unwritten;
- size_t bytes_written, bytes_unwritten;
- size_t floats, failed;
-};
-
-static int btree_bset_stats(struct btree_op *op, struct btree *b)
+void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
{
- struct bset_stats *stats = container_of(op, struct bset_stats, op);
unsigned i;
- stats->nodes++;
-
for (i = 0; i <= b->nsets; i++) {
- struct bset_tree *t = &b->sets[i];
+ struct bset_tree *t = &b->set[i];
size_t bytes = t->data->keys * sizeof(uint64_t);
size_t j;
@@ -1195,32 +1328,4 @@ static int btree_bset_stats(struct btree_op *op, struct btree *b)
stats->bytes_unwritten += bytes;
}
}
-
- return MAP_CONTINUE;
-}
-
-int bch_bset_print_stats(struct cache_set *c, char *buf)
-{
- struct bset_stats t;
- int ret;
-
- memset(&t, 0, sizeof(struct bset_stats));
- bch_btree_op_init(&t.op, -1);
-
- ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
- if (ret < 0)
- return ret;
-
- return snprintf(buf, PAGE_SIZE,
- "btree nodes: %zu\n"
- "written sets: %zu\n"
- "unwritten sets: %zu\n"
- "written key bytes: %zu\n"
- "unwritten key bytes: %zu\n"
- "floats: %zu\n"
- "failed: %zu\n",
- t.nodes,
- t.sets_written, t.sets_unwritten,
- t.bytes_written, t.bytes_unwritten,
- t.floats, t.failed);
}
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 1d3c24f9fa0e..5f6728d5d4dd 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -1,7 +1,11 @@
#ifndef _BCACHE_BSET_H
#define _BCACHE_BSET_H
-#include <linux/slab.h>
+#include <linux/bcache.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "util.h" /* for time_stats */
/*
* BKEYS:
@@ -142,20 +146,13 @@
* first key in that range of bytes again.
*/
-/* Btree key comparison/iteration */
+struct btree_keys;
+struct btree_iter;
+struct btree_iter_set;
+struct bkey_float;
#define MAX_BSETS 4U
-struct btree_iter {
- size_t size, used;
-#ifdef CONFIG_BCACHE_DEBUG
- struct btree *b;
-#endif
- struct btree_iter_set {
- struct bkey *k, *end;
- } data[MAX_BSETS];
-};
-
struct bset_tree {
/*
* We construct a binary tree in an array as if the array
@@ -165,14 +162,14 @@ struct bset_tree {
*/
/* size of the binary tree and prev array */
- unsigned size;
+ unsigned size;
/* function of size - precalculated for to_inorder() */
- unsigned extra;
+ unsigned extra;
/* copy of the last key in the set */
- struct bkey end;
- struct bkey_float *tree;
+ struct bkey end;
+ struct bkey_float *tree;
/*
* The nodes in the bset tree point to specific keys - this
@@ -182,12 +179,219 @@ struct bset_tree {
* to keep bkey_float to 4 bytes and prev isn't used in the fast
* path.
*/
- uint8_t *prev;
+ uint8_t *prev;
/* The actual btree node, with pointers to each sorted set */
- struct bset *data;
+ struct bset *data;
};
+struct btree_keys_ops {
+ bool (*sort_cmp)(struct btree_iter_set,
+ struct btree_iter_set);
+ struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *);
+ bool (*insert_fixup)(struct btree_keys *, struct bkey *,
+ struct btree_iter *, struct bkey *);
+ bool (*key_invalid)(struct btree_keys *,
+ const struct bkey *);
+ bool (*key_bad)(struct btree_keys *, const struct bkey *);
+ bool (*key_merge)(struct btree_keys *,
+ struct bkey *, struct bkey *);
+ void (*key_to_text)(char *, size_t, const struct bkey *);
+ void (*key_dump)(struct btree_keys *, const struct bkey *);
+
+ /*
+ * Only used for deciding whether to use START_KEY(k) or just the key
+ * itself in a couple places
+ */
+ bool is_extents;
+};
+
+struct btree_keys {
+ const struct btree_keys_ops *ops;
+ uint8_t page_order;
+ uint8_t nsets;
+ unsigned last_set_unwritten:1;
+ bool *expensive_debug_checks;
+
+ /*
+ * Sets of sorted keys - the real btree node - plus a binary search tree
+ *
+ * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
+ * to the memory we have allocated for this btree node. Additionally,
+ * set[0]->data points to the entire btree node as it exists on disk.
+ */
+ struct bset_tree set[MAX_BSETS];
+};
+
+static inline struct bset_tree *bset_tree_last(struct btree_keys *b)
+{
+ return b->set + b->nsets;
+}
+
+static inline bool bset_written(struct btree_keys *b, struct bset_tree *t)
+{
+ return t <= b->set + b->nsets - b->last_set_unwritten;
+}
+
+static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
+{
+ return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
+}
+
+static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)
+{
+ return ((size_t) i) - ((size_t) b->set->data);
+}
+
+static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
+{
+ return bset_byte_offset(b, i) >> 9;
+}
+
+#define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
+#define set_bytes(i) __set_bytes(i, i->keys)
+
+#define __set_blocks(i, k, block_bytes) \
+ DIV_ROUND_UP(__set_bytes(i, k), block_bytes)
+#define set_blocks(i, block_bytes) \
+ __set_blocks(i, (i)->keys, block_bytes)
+
+static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
+{
+ struct bset_tree *t = bset_tree_last(b);
+
+ BUG_ON((PAGE_SIZE << b->page_order) <
+ (bset_byte_offset(b, t->data) + set_bytes(t->data)));
+
+ if (!b->last_set_unwritten)
+ return 0;
+
+ return ((PAGE_SIZE << b->page_order) -
+ (bset_byte_offset(b, t->data) + set_bytes(t->data))) /
+ sizeof(u64);
+}
+
+static inline struct bset *bset_next_set(struct btree_keys *b,
+ unsigned block_bytes)
+{
+ struct bset *i = bset_tree_last(b)->data;
+
+ return ((void *) i) + roundup(set_bytes(i), block_bytes);
+}
+
+void bch_btree_keys_free(struct btree_keys *);
+int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t);
+void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
+ bool *);
+
+void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
+void bch_bset_build_written_tree(struct btree_keys *);
+void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
+bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
+void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
+unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
+ struct bkey *);
+
+enum {
+ BTREE_INSERT_STATUS_NO_INSERT = 0,
+ BTREE_INSERT_STATUS_INSERT,
+ BTREE_INSERT_STATUS_BACK_MERGE,
+ BTREE_INSERT_STATUS_OVERWROTE,
+ BTREE_INSERT_STATUS_FRONT_MERGE,
+};
+
+/* Btree key iteration */
+
+struct btree_iter {
+ size_t size, used;
+#ifdef CONFIG_BCACHE_DEBUG
+ struct btree_keys *b;
+#endif
+ struct btree_iter_set {
+ struct bkey *k, *end;
+ } data[MAX_BSETS];
+};
+
+typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *);
+
+struct bkey *bch_btree_iter_next(struct btree_iter *);
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
+ struct btree_keys *, ptr_filter_fn);
+
+void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
+struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *,
+ struct bkey *);
+
+struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *,
+ const struct bkey *);
+
+/*
+ * Returns the first key that is strictly greater than search
+ */
+static inline struct bkey *bch_bset_search(struct btree_keys *b,
+ struct bset_tree *t,
+ const struct bkey *search)
+{
+ return search ? __bch_bset_search(b, t, search) : t->data->start;
+}
+
+#define for_each_key_filter(b, k, iter, filter) \
+ for (bch_btree_iter_init((b), (iter), NULL); \
+ ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
+
+#define for_each_key(b, k, iter) \
+ for (bch_btree_iter_init((b), (iter), NULL); \
+ ((k) = bch_btree_iter_next(iter));)
+
+/* Sorting */
+
+struct bset_sort_state {
+ mempool_t *pool;
+
+ unsigned page_order;
+ unsigned crit_factor;
+
+ struct time_stats time;
+};
+
+void bch_bset_sort_state_free(struct bset_sort_state *);
+int bch_bset_sort_state_init(struct bset_sort_state *, unsigned);
+void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
+void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
+ struct bset_sort_state *);
+void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
+ struct bset_sort_state *);
+void bch_btree_sort_partial(struct btree_keys *, unsigned,
+ struct bset_sort_state *);
+
+static inline void bch_btree_sort(struct btree_keys *b,
+ struct bset_sort_state *state)
+{
+ bch_btree_sort_partial(b, 0, state);
+}
+
+struct bset_stats {
+ size_t sets_written, sets_unwritten;
+ size_t bytes_written, bytes_unwritten;
+ size_t floats, failed;
+};
+
+void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
+
+/* Bkey utility code */
+
+#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
+
+static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
+{
+ return bkey_idx(i->start, idx);
+}
+
+static inline void bkey_init(struct bkey *k)
+{
+ *k = ZERO_KEY;
+}
+
static __always_inline int64_t bkey_cmp(const struct bkey *l,
const struct bkey *r)
{
@@ -196,6 +400,62 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
}
+void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
+ unsigned);
+bool __bch_cut_front(const struct bkey *, struct bkey *);
+bool __bch_cut_back(const struct bkey *, struct bkey *);
+
+static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
+{
+ BUG_ON(bkey_cmp(where, k) > 0);
+ return __bch_cut_front(where, k);
+}
+
+static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
+{
+ BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
+ return __bch_cut_back(where, k);
+}
+
+#define PRECEDING_KEY(_k) \
+({ \
+ struct bkey *_ret = NULL; \
+ \
+ if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
+ _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
+ \
+ if (!_ret->low) \
+ _ret->high--; \
+ _ret->low--; \
+ } \
+ \
+ _ret; \
+})
+
+static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
+{
+ return b->ops->key_invalid(b, k);
+}
+
+static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k)
+{
+ return b->ops->key_bad(b, k);
+}
+
+static inline void bch_bkey_to_text(struct btree_keys *b, char *buf,
+ size_t size, const struct bkey *k)
+{
+ return b->ops->key_to_text(buf, size, k);
+}
+
+static inline bool bch_bkey_equal_header(const struct bkey *l,
+ const struct bkey *r)
+{
+ return (KEY_DIRTY(l) == KEY_DIRTY(r) &&
+ KEY_PTRS(l) == KEY_PTRS(r) &&
+ KEY_CSUM(l) == KEY_CSUM(l));
+}
+
/* Keylists */
struct keylist {
@@ -218,6 +478,12 @@ static inline void bch_keylist_init(struct keylist *l)
l->top_p = l->keys_p = l->inline_keys;
}
+static inline void bch_keylist_init_single(struct keylist *l, struct bkey *k)
+{
+ l->keys = k;
+ l->top = bkey_next(k);
+}
+
static inline void bch_keylist_push(struct keylist *l)
{
l->top = bkey_next(l->top);
@@ -257,136 +523,44 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
struct bkey *bch_keylist_pop(struct keylist *);
void bch_keylist_pop_front(struct keylist *);
-int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
-
-void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
- unsigned);
-bool __bch_cut_front(const struct bkey *, struct bkey *);
-bool __bch_cut_back(const struct bkey *, struct bkey *);
-
-static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
-{
- BUG_ON(bkey_cmp(where, k) > 0);
- return __bch_cut_front(where, k);
-}
-
-static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
-{
- BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
- return __bch_cut_back(where, k);
-}
-
-const char *bch_ptr_status(struct cache_set *, const struct bkey *);
-bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
-bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
-
-bool bch_ptr_bad(struct btree *, const struct bkey *);
-
-static inline uint8_t gen_after(uint8_t a, uint8_t b)
-{
- uint8_t r = a - b;
- return r > 128U ? 0 : r;
-}
-
-static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
- unsigned i)
-{
- return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
-}
-
-static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
- unsigned i)
-{
- return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
-}
-
+int __bch_keylist_realloc(struct keylist *, unsigned);
-typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
-
-struct bkey *bch_btree_iter_next(struct btree_iter *);
-struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
- struct btree *, ptr_filter_fn);
-
-void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
-struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *,
- struct bkey *, struct bset_tree *);
-
-/* 32 bits total: */
-#define BKEY_MID_BITS 3
-#define BKEY_EXPONENT_BITS 7
-#define BKEY_MANTISSA_BITS 22
-#define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
-
-struct bkey_float {
- unsigned exponent:BKEY_EXPONENT_BITS;
- unsigned m:BKEY_MID_BITS;
- unsigned mantissa:BKEY_MANTISSA_BITS;
-} __packed;
-
-/*
- * BSET_CACHELINE was originally intended to match the hardware cacheline size -
- * it used to be 64, but I realized the lookup code would touch slightly less
- * memory if it was 128.
- *
- * It definites the number of bytes (in struct bset) per struct bkey_float in
- * the auxiliar search tree - when we're done searching the bset_float tree we
- * have this many bytes left that we do a linear search over.
- *
- * Since (after level 5) every level of the bset_tree is on a new cacheline,
- * we're touching one fewer cacheline in the bset tree in exchange for one more
- * cacheline in the linear search - but the linear search might stop before it
- * gets to the second cacheline.
- */
+/* Debug stuff */
-#define BSET_CACHELINE 128
-#define bset_tree_space(b) (btree_data_space(b) / BSET_CACHELINE)
+#ifdef CONFIG_BCACHE_DEBUG
-#define bset_tree_bytes(b) (bset_tree_space(b) * sizeof(struct bkey_float))
-#define bset_prev_bytes(b) (bset_tree_space(b) * sizeof(uint8_t))
+int __bch_count_data(struct btree_keys *);
+void __bch_check_keys(struct btree_keys *, const char *, ...);
+void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
+void bch_dump_bucket(struct btree_keys *);
-void bch_bset_init_next(struct btree *);
+#else
-void bch_bset_fix_invalidated_key(struct btree *, struct bkey *);
-void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
+static inline int __bch_count_data(struct btree_keys *b) { return -1; }
+static inline void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
+static inline void bch_dump_bucket(struct btree_keys *b) {}
+void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
-struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
- const struct bkey *);
+#endif
-/*
- * Returns the first key that is strictly greater than search
- */
-static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
- const struct bkey *search)
+static inline bool btree_keys_expensive_checks(struct btree_keys *b)
{
- return search ? __bch_bset_search(b, t, search) : t->data->start;
+#ifdef CONFIG_BCACHE_DEBUG
+ return *b->expensive_debug_checks;
+#else
+ return false;
+#endif
}
-#define PRECEDING_KEY(_k) \
-({ \
- struct bkey *_ret = NULL; \
- \
- if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
- _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
- \
- if (!_ret->low) \
- _ret->high--; \
- _ret->low--; \
- } \
- \
- _ret; \
-})
-
-bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
-void bch_btree_sort_lazy(struct btree *);
-void bch_btree_sort_into(struct btree *, struct btree *);
-void bch_btree_sort_and_fix_extents(struct btree *, struct btree_iter *);
-void bch_btree_sort_partial(struct btree *, unsigned);
-
-static inline void bch_btree_sort(struct btree *b)
+static inline int bch_count_data(struct btree_keys *b)
{
- bch_btree_sort_partial(b, 0);
+ return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1;
}
-int bch_bset_print_stats(struct cache_set *, char *);
+#define bch_check_keys(b, ...) \
+do { \
+ if (btree_keys_expensive_checks(b)) \
+ __bch_check_keys(b, __VA_ARGS__); \
+} while (0)
#endif
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 31bb53fcc67a..7347b6100961 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -23,7 +23,7 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
-#include "writeback.h"
+#include "extents.h"
#include <linux/slab.h>
#include <linux/bitops.h>
@@ -68,15 +68,11 @@
* alloc_bucket() cannot fail. This should be true but is not completely
* obvious.
*
- * Make sure all allocations get charged to the root cgroup
- *
* Plugging?
*
* If data write is less than hard sector size of ssd, round up offset in open
* bucket to the next whole sector
*
- * Also lookup by cgroup in get_open_bucket()
- *
* Superblock needs to be fleshed out for multiple cache devices
*
* Add a sysfs tunable for the number of writeback IOs in flight
@@ -89,13 +85,6 @@
* Test module load/unload
*/
-enum {
- BTREE_INSERT_STATUS_INSERT,
- BTREE_INSERT_STATUS_BACK_MERGE,
- BTREE_INSERT_STATUS_OVERWROTE,
- BTREE_INSERT_STATUS_FRONT_MERGE,
-};
-
#define MAX_NEED_GC 64
#define MAX_SAVE_PRIO 72
@@ -104,16 +93,6 @@ enum {
#define PTR_HASH(c, k) \
(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
-static struct workqueue_struct *btree_io_wq;
-
-static inline bool should_split(struct btree *b)
-{
- struct bset *i = write_block(b);
- return b->written >= btree_blocks(b) ||
- (b->written + __set_blocks(i, i->keys + 15, b->c)
- > btree_blocks(b));
-}
-
#define insert_lock(s, b) ((b)->level <= (s)->lock)
/*
@@ -138,7 +117,7 @@ static inline bool should_split(struct btree *b)
({ \
int _r, l = (b)->level - 1; \
bool _w = l <= (op)->lock; \
- struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
+ struct btree *_child = bch_btree_node_get((b)->c, op, key, l, _w);\
if (!IS_ERR(_child)) { \
_child->parent = (b); \
_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
@@ -168,16 +147,33 @@ static inline bool should_split(struct btree *b)
} \
rw_unlock(_w, _b); \
bch_cannibalize_unlock(c); \
- if (_r == -ENOSPC) { \
- wait_event((c)->try_wait, \
- !(c)->try_harder); \
- _r = -EINTR; \
- } \
+ if (_r == -EINTR) \
+ schedule(); \
} while (_r == -EINTR); \
\
+ finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
_r; \
})
+static inline struct bset *write_block(struct btree *b)
+{
+ return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
+}
+
+static void bch_btree_init_next(struct btree *b)
+{
+ /* If not a leaf node, always sort */
+ if (b->level && b->keys.nsets)
+ bch_btree_sort(&b->keys, &b->c->sort);
+ else
+ bch_btree_sort_lazy(&b->keys, &b->c->sort);
+
+ if (b->written < btree_blocks(b))
+ bch_bset_init_next(&b->keys, write_block(b),
+ bset_magic(&b->c->sb));
+
+}
+
/* Btree key manipulation */
void bkey_put(struct cache_set *c, struct bkey *k)
@@ -194,16 +190,16 @@ void bkey_put(struct cache_set *c, struct bkey *k)
static uint64_t btree_csum_set(struct btree *b, struct bset *i)
{
uint64_t crc = b->key.ptr[0];
- void *data = (void *) i + 8, *end = end(i);
+ void *data = (void *) i + 8, *end = bset_bkey_last(i);
crc = bch_crc64_update(crc, data, end - data);
return crc ^ 0xffffffffffffffffULL;
}
-static void bch_btree_node_read_done(struct btree *b)
+void bch_btree_node_read_done(struct btree *b)
{
const char *err = "bad btree header";
- struct bset *i = b->sets[0].data;
+ struct bset *i = btree_bset_first(b);
struct btree_iter *iter;
iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
@@ -211,21 +207,22 @@ static void bch_btree_node_read_done(struct btree *b)
iter->used = 0;
#ifdef CONFIG_BCACHE_DEBUG
- iter->b = b;
+ iter->b = &b->keys;
#endif
if (!i->seq)
goto err;
for (;
- b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
+ b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
i = write_block(b)) {
err = "unsupported bset version";
if (i->version > BCACHE_BSET_VERSION)
goto err;
err = "bad btree header";
- if (b->written + set_blocks(i, b->c) > btree_blocks(b))
+ if (b->written + set_blocks(i, block_bytes(b->c)) >
+ btree_blocks(b))
goto err;
err = "bad magic";
@@ -245,39 +242,40 @@ static void bch_btree_node_read_done(struct btree *b)
}
err = "empty set";
- if (i != b->sets[0].data && !i->keys)
+ if (i != b->keys.set[0].data && !i->keys)
goto err;
- bch_btree_iter_push(iter, i->start, end(i));
+ bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
- b->written += set_blocks(i, b->c);
+ b->written += set_blocks(i, block_bytes(b->c));
}
err = "corrupted btree";
for (i = write_block(b);
- index(i, b) < btree_blocks(b);
+ bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
i = ((void *) i) + block_bytes(b->c))
- if (i->seq == b->sets[0].data->seq)
+ if (i->seq == b->keys.set[0].data->seq)
goto err;
- bch_btree_sort_and_fix_extents(b, iter);
+ bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
- i = b->sets[0].data;
+ i = b->keys.set[0].data;
err = "short btree key";
- if (b->sets[0].size &&
- bkey_cmp(&b->key, &b->sets[0].end) < 0)
+ if (b->keys.set[0].size &&
+ bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
goto err;
if (b->written < btree_blocks(b))
- bch_bset_init_next(b);
+ bch_bset_init_next(&b->keys, write_block(b),
+ bset_magic(&b->c->sb));
out:
mempool_free(iter, b->c->fill_iter);
return;
err:
set_btree_node_io_error(b);
- bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
+ bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
err, PTR_BUCKET_NR(b->c, &b->key, 0),
- index(i, b), i->keys);
+ bset_block_offset(b, i), i->keys);
goto out;
}
@@ -287,7 +285,7 @@ static void btree_node_read_endio(struct bio *bio, int error)
closure_put(cl);
}
-void bch_btree_node_read(struct btree *b)
+static void bch_btree_node_read(struct btree *b)
{
uint64_t start_time = local_clock();
struct closure cl;
@@ -299,11 +297,11 @@ void bch_btree_node_read(struct btree *b)
bio = bch_bbio_alloc(b->c);
bio->bi_rw = REQ_META|READ_SYNC;
- bio->bi_size = KEY_SIZE(&b->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = &cl;
- bch_bio_map(bio, b->sets[0].data);
+ bch_bio_map(bio, b->keys.set[0].data);
bch_submit_bbio(bio, b->c, &b->key, 0);
closure_sync(&cl);
@@ -340,9 +338,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
w->journal = NULL;
}
+static void btree_node_write_unlock(struct closure *cl)
+{
+ struct btree *b = container_of(cl, struct btree, io);
+
+ up(&b->io_mutex);
+}
+
static void __btree_node_write_done(struct closure *cl)
{
- struct btree *b = container_of(cl, struct btree, io.cl);
+ struct btree *b = container_of(cl, struct btree, io);
struct btree_write *w = btree_prev_write(b);
bch_bbio_free(b->bio, b->c);
@@ -350,19 +355,18 @@ static void __btree_node_write_done(struct closure *cl)
btree_complete_write(b, w);
if (btree_node_dirty(b))
- queue_delayed_work(btree_io_wq, &b->work,
- msecs_to_jiffies(30000));
+ schedule_delayed_work(&b->work, 30 * HZ);
- closure_return(cl);
+ closure_return_with_destructor(cl, btree_node_write_unlock);
}
static void btree_node_write_done(struct closure *cl)
{
- struct btree *b = container_of(cl, struct btree, io.cl);
+ struct btree *b = container_of(cl, struct btree, io);
struct bio_vec *bv;
int n;
- __bio_for_each_segment(bv, b->bio, n, 0)
+ bio_for_each_segment_all(bv, b->bio, n)
__free_page(bv->bv_page);
__btree_node_write_done(cl);
@@ -371,7 +375,7 @@ static void btree_node_write_done(struct closure *cl)
static void btree_node_write_endio(struct bio *bio, int error)
{
struct closure *cl = bio->bi_private;
- struct btree *b = container_of(cl, struct btree, io.cl);
+ struct btree *b = container_of(cl, struct btree, io);
if (error)
set_btree_node_io_error(b);
@@ -382,8 +386,8 @@ static void btree_node_write_endio(struct bio *bio, int error)
static void do_btree_node_write(struct btree *b)
{
- struct closure *cl = &b->io.cl;
- struct bset *i = b->sets[b->nsets].data;
+ struct closure *cl = &b->io;
+ struct bset *i = btree_bset_last(b);
BKEY_PADDED(key) k;
i->version = BCACHE_BSET_VERSION;
@@ -395,7 +399,7 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl;
b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
- b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
+ b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
bch_bio_map(b->bio, i);
/*
@@ -414,14 +418,15 @@ static void do_btree_node_write(struct btree *b)
*/
bkey_copy(&k.key, &b->key);
- SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
+ SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
+ bset_sector_offset(&b->keys, i));
if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
int j;
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
- bio_for_each_segment(bv, b->bio, j)
+ bio_for_each_segment_all(bv, b->bio, j)
memcpy(page_address(bv->bv_page),
base + j * PAGE_SIZE, PAGE_SIZE);
@@ -435,40 +440,57 @@ static void do_btree_node_write(struct btree *b)
bch_submit_bbio(b->bio, b->c, &k.key, 0);
closure_sync(cl);
- __btree_node_write_done(cl);
+ continue_at_nobarrier(cl, __btree_node_write_done, NULL);
}
}
-void bch_btree_node_write(struct btree *b, struct closure *parent)
+void __bch_btree_node_write(struct btree *b, struct closure *parent)
{
- struct bset *i = b->sets[b->nsets].data;
+ struct bset *i = btree_bset_last(b);
+
+ lockdep_assert_held(&b->write_lock);
trace_bcache_btree_write(b);
BUG_ON(current->bio_list);
BUG_ON(b->written >= btree_blocks(b));
BUG_ON(b->written && !i->keys);
- BUG_ON(b->sets->data->seq != i->seq);
- bch_check_keys(b, "writing");
+ BUG_ON(btree_bset_first(b)->seq != i->seq);
+ bch_check_keys(&b->keys, "writing");
cancel_delayed_work(&b->work);
/* If caller isn't waiting for write, parent refcount is cache set */
- closure_lock(&b->io, parent ?: &b->c->cl);
+ down(&b->io_mutex);
+ closure_init(&b->io, parent ?: &b->c->cl);
clear_bit(BTREE_NODE_dirty, &b->flags);
change_bit(BTREE_NODE_write_idx, &b->flags);
do_btree_node_write(b);
- b->written += set_blocks(i, b->c);
- atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
+ atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
- bch_btree_sort_lazy(b);
+ b->written += set_blocks(i, block_bytes(b->c));
+}
- if (b->written < btree_blocks(b))
- bch_bset_init_next(b);
+void bch_btree_node_write(struct btree *b, struct closure *parent)
+{
+ unsigned nsets = b->keys.nsets;
+
+ lockdep_assert_held(&b->lock);
+
+ __bch_btree_node_write(b, parent);
+
+ /*
+ * do verify if there was more than one set initially (i.e. we did a
+ * sort) and we sorted down to a single set:
+ */
+ if (nsets && !b->keys.nsets)
+ bch_btree_verify(b);
+
+ bch_btree_init_next(b);
}
static void bch_btree_node_write_sync(struct btree *b)
@@ -476,7 +498,11 @@ static void bch_btree_node_write_sync(struct btree *b)
struct closure cl;
closure_init_stack(&cl);
+
+ mutex_lock(&b->write_lock);
bch_btree_node_write(b, &cl);
+ mutex_unlock(&b->write_lock);
+
closure_sync(&cl);
}
@@ -484,23 +510,24 @@ static void btree_node_write_work(struct work_struct *w)
{
struct btree *b = container_of(to_delayed_work(w), struct btree, work);
- rw_lock(true, b, b->level);
-
+ mutex_lock(&b->write_lock);
if (btree_node_dirty(b))
- bch_btree_node_write(b, NULL);
- rw_unlock(true, b);
+ __bch_btree_node_write(b, NULL);
+ mutex_unlock(&b->write_lock);
}
static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
{
- struct bset *i = b->sets[b->nsets].data;
+ struct bset *i = btree_bset_last(b);
struct btree_write *w = btree_current_write(b);
+ lockdep_assert_held(&b->write_lock);
+
BUG_ON(!b->written);
BUG_ON(!i->keys);
if (!btree_node_dirty(b))
- queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
+ schedule_delayed_work(&b->work, 30 * HZ);
set_btree_node_dirty(b);
@@ -528,53 +555,19 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
* mca -> memory cache
*/
-static void mca_reinit(struct btree *b)
-{
- unsigned i;
-
- b->flags = 0;
- b->written = 0;
- b->nsets = 0;
-
- for (i = 0; i < MAX_BSETS; i++)
- b->sets[i].size = 0;
- /*
- * Second loop starts at 1 because b->sets[0]->data is the memory we
- * allocated
- */
- for (i = 1; i < MAX_BSETS; i++)
- b->sets[i].data = NULL;
-}
-
#define mca_reserve(c) (((c->root && c->root->level) \
? c->root->level : 1) * 8 + 16)
#define mca_can_free(c) \
- max_t(int, 0, c->bucket_cache_used - mca_reserve(c))
+ max_t(int, 0, c->btree_cache_used - mca_reserve(c))
static void mca_data_free(struct btree *b)
{
- struct bset_tree *t = b->sets;
- BUG_ON(!closure_is_unlocked(&b->io.cl));
+ BUG_ON(b->io_mutex.count != 1);
- if (bset_prev_bytes(b) < PAGE_SIZE)
- kfree(t->prev);
- else
- free_pages((unsigned long) t->prev,
- get_order(bset_prev_bytes(b)));
+ bch_btree_keys_free(&b->keys);
- if (bset_tree_bytes(b) < PAGE_SIZE)
- kfree(t->tree);
- else
- free_pages((unsigned long) t->tree,
- get_order(bset_tree_bytes(b)));
-
- free_pages((unsigned long) t->data, b->page_order);
-
- t->prev = NULL;
- t->tree = NULL;
- t->data = NULL;
+ b->c->btree_cache_used--;
list_move(&b->list, &b->c->btree_cache_freed);
- b->c->bucket_cache_used--;
}
static void mca_bucket_free(struct btree *b)
@@ -593,34 +586,16 @@ static unsigned btree_order(struct bkey *k)
static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
{
- struct bset_tree *t = b->sets;
- BUG_ON(t->data);
-
- b->page_order = max_t(unsigned,
- ilog2(b->c->btree_pages),
- btree_order(k));
-
- t->data = (void *) __get_free_pages(gfp, b->page_order);
- if (!t->data)
- goto err;
-
- t->tree = bset_tree_bytes(b) < PAGE_SIZE
- ? kmalloc(bset_tree_bytes(b), gfp)
- : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
- if (!t->tree)
- goto err;
-
- t->prev = bset_prev_bytes(b) < PAGE_SIZE
- ? kmalloc(bset_prev_bytes(b), gfp)
- : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
- if (!t->prev)
- goto err;
-
- list_move(&b->list, &b->c->btree_cache);
- b->c->bucket_cache_used++;
- return;
-err:
- mca_data_free(b);
+ if (!bch_btree_keys_alloc(&b->keys,
+ max_t(unsigned,
+ ilog2(b->c->btree_pages),
+ btree_order(k)),
+ gfp)) {
+ b->c->btree_cache_used++;
+ list_move(&b->list, &b->c->btree_cache);
+ } else {
+ list_move(&b->list, &b->c->btree_cache_freed);
+ }
}
static struct btree *mca_bucket_alloc(struct cache_set *c,
@@ -632,10 +607,12 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
init_rwsem(&b->lock);
lockdep_set_novalidate_class(&b->lock);
+ mutex_init(&b->write_lock);
+ lockdep_set_novalidate_class(&b->write_lock);
INIT_LIST_HEAD(&b->list);
INIT_DELAYED_WORK(&b->work, btree_node_write_work);
b->c = c;
- closure_init_unlocked(&b->io);
+ sema_init(&b->io_mutex, 1);
mca_data_alloc(b, k, gfp);
return b;
@@ -651,24 +628,35 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush)
if (!down_write_trylock(&b->lock))
return -ENOMEM;
- BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+ BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
- if (b->page_order < min_order ||
- (!flush &&
- (btree_node_dirty(b) ||
- atomic_read(&b->io.cl.remaining) != -1))) {
- rw_unlock(true, b);
- return -ENOMEM;
+ if (b->keys.page_order < min_order)
+ goto out_unlock;
+
+ if (!flush) {
+ if (btree_node_dirty(b))
+ goto out_unlock;
+
+ if (down_trylock(&b->io_mutex))
+ goto out_unlock;
+ up(&b->io_mutex);
}
+ mutex_lock(&b->write_lock);
if (btree_node_dirty(b))
- bch_btree_node_write_sync(b);
+ __bch_btree_node_write(b, &cl);
+ mutex_unlock(&b->write_lock);
+
+ closure_sync(&cl);
/* wait for any in flight btree write */
- closure_wait_event(&b->io.wait, &cl,
- atomic_read(&b->io.cl.remaining) == -1);
+ down(&b->io_mutex);
+ up(&b->io_mutex);
return 0;
+out_unlock:
+ rw_unlock(true, b);
+ return -ENOMEM;
}
static unsigned long bch_mca_scan(struct shrinker *shrink,
@@ -682,7 +670,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
if (c->shrinker_disabled)
return SHRINK_STOP;
- if (c->try_harder)
+ if (c->btree_cache_alloc_lock)
return SHRINK_STOP;
/* Return -1 if we can't do anything right now */
@@ -714,14 +702,10 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
}
}
- /*
- * Can happen right when we first start up, before we've read in any
- * btree nodes
- */
- if (list_empty(&c->btree_cache))
- goto out;
+ for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
+ if (list_empty(&c->btree_cache))
+ goto out;
- for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
b = list_first_entry(&c->btree_cache, struct btree, list);
list_rotate_left(&c->btree_cache);
@@ -747,7 +731,7 @@ static unsigned long bch_mca_count(struct shrinker *shrink,
if (c->shrinker_disabled)
return 0;
- if (c->try_harder)
+ if (c->btree_cache_alloc_lock)
return 0;
return mca_can_free(c) * c->btree_pages;
@@ -767,6 +751,8 @@ void bch_btree_cache_free(struct cache_set *c)
#ifdef CONFIG_BCACHE_DEBUG
if (c->verify_data)
list_move(&c->verify_data->list, &c->btree_cache);
+
+ free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
#endif
list_splice(&c->btree_cache_freeable,
@@ -807,10 +793,13 @@ int bch_btree_cache_alloc(struct cache_set *c)
#ifdef CONFIG_BCACHE_DEBUG
mutex_init(&c->verify_lock);
+ c->verify_ondisk = (void *)
+ __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
+
c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
if (c->verify_data &&
- c->verify_data->sets[0].data)
+ c->verify_data->keys.set->data)
list_del_init(&c->verify_data->list);
else
c->verify_data = NULL;
@@ -846,17 +835,30 @@ out:
return b;
}
-static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
+static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
+{
+ struct task_struct *old;
+
+ old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
+ if (old && old != current) {
+ if (op)
+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
+ TASK_UNINTERRUPTIBLE);
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
+ struct bkey *k)
{
struct btree *b;
trace_bcache_btree_cache_cannibalize(c);
- if (!c->try_harder) {
- c->try_harder = current;
- c->try_harder_start = local_clock();
- } else if (c->try_harder != current)
- return ERR_PTR(-ENOSPC);
+ if (mca_cannibalize_lock(c, op))
+ return ERR_PTR(-EINTR);
list_for_each_entry_reverse(b, &c->btree_cache, list)
if (!mca_reap(b, btree_order(k), false))
@@ -866,6 +868,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
if (!mca_reap(b, btree_order(k), true))
return b;
+ WARN(1, "btree cache cannibalize failed\n");
return ERR_PTR(-ENOMEM);
}
@@ -877,14 +880,14 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
*/
static void bch_cannibalize_unlock(struct cache_set *c)
{
- if (c->try_harder == current) {
- bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
- c->try_harder = NULL;
- wake_up(&c->try_wait);
+ if (c->btree_cache_alloc_lock == current) {
+ c->btree_cache_alloc_lock = NULL;
+ wake_up(&c->btree_cache_wait);
}
}
-static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
+static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
+ struct bkey *k, int level)
{
struct btree *b;
@@ -908,7 +911,7 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
list_for_each_entry(b, &c->btree_cache_freed, list)
if (!mca_reap(b, 0, false)) {
mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
- if (!b->sets[0].data)
+ if (!b->keys.set[0].data)
goto err;
else
goto out;
@@ -919,10 +922,10 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
goto err;
BUG_ON(!down_write_trylock(&b->lock));
- if (!b->sets->data)
+ if (!b->keys.set->data)
goto err;
out:
- BUG_ON(!closure_is_unlocked(&b->io.cl));
+ BUG_ON(b->io_mutex.count != 1);
bkey_copy(&b->key, k);
list_move(&b->list, &c->btree_cache);
@@ -930,17 +933,24 @@ out:
hlist_add_head_rcu(&b->hash, mca_hash(c, k));
lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
- b->level = level;
b->parent = (void *) ~0UL;
+ b->flags = 0;
+ b->written = 0;
+ b->level = level;
- mca_reinit(b);
+ if (!b->level)
+ bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
+ &b->c->expensive_debug_checks);
+ else
+ bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
+ &b->c->expensive_debug_checks);
return b;
err:
if (b)
rw_unlock(true, b);
- b = mca_cannibalize(c, k);
+ b = mca_cannibalize(c, op, k);
if (!IS_ERR(b))
goto out;
@@ -956,8 +966,8 @@ err:
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
*/
-struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
- int level, bool write)
+struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
+ struct bkey *k, int level, bool write)
{
int i = 0;
struct btree *b;
@@ -971,7 +981,7 @@ retry:
return ERR_PTR(-EAGAIN);
mutex_lock(&c->bucket_lock);
- b = mca_alloc(c, k, level);
+ b = mca_alloc(c, op, k, level);
mutex_unlock(&c->bucket_lock);
if (!b)
@@ -994,13 +1004,13 @@ retry:
b->accessed = 1;
- for (; i <= b->nsets && b->sets[i].size; i++) {
- prefetch(b->sets[i].tree);
- prefetch(b->sets[i].data);
+ for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
+ prefetch(b->keys.set[i].tree);
+ prefetch(b->keys.set[i].data);
}
- for (; i <= b->nsets; i++)
- prefetch(b->sets[i].data);
+ for (; i <= b->keys.nsets; i++)
+ prefetch(b->keys.set[i].data);
if (btree_node_io_error(b)) {
rw_unlock(write, b);
@@ -1017,7 +1027,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
struct btree *b;
mutex_lock(&c->bucket_lock);
- b = mca_alloc(c, k, level);
+ b = mca_alloc(c, NULL, k, level);
mutex_unlock(&c->bucket_lock);
if (!IS_ERR_OR_NULL(b)) {
@@ -1030,46 +1040,41 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
static void btree_node_free(struct btree *b)
{
- unsigned i;
-
trace_bcache_btree_node_free(b);
BUG_ON(b == b->c->root);
+ mutex_lock(&b->write_lock);
+
if (btree_node_dirty(b))
btree_complete_write(b, btree_current_write(b));
clear_bit(BTREE_NODE_dirty, &b->flags);
+ mutex_unlock(&b->write_lock);
+
cancel_delayed_work(&b->work);
mutex_lock(&b->c->bucket_lock);
-
- for (i = 0; i < KEY_PTRS(&b->key); i++) {
- BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin));
-
- bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
- PTR_BUCKET(b->c, &b->key, i));
- }
-
bch_bucket_free(b->c, &b->key);
mca_bucket_free(b);
mutex_unlock(&b->c->bucket_lock);
}
-struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
+struct btree *bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+ int level)
{
BKEY_PADDED(key) k;
struct btree *b = ERR_PTR(-EAGAIN);
mutex_lock(&c->bucket_lock);
retry:
- if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, op != NULL))
goto err;
bkey_put(c, &k.key);
SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
- b = mca_alloc(c, &k.key, level);
+ b = mca_alloc(c, op, &k.key, level);
if (IS_ERR(b))
goto err_free;
@@ -1080,7 +1085,7 @@ retry:
}
b->accessed = 1;
- bch_bset_init_next(b);
+ bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
mutex_unlock(&c->bucket_lock);
@@ -1095,11 +1100,16 @@ err:
return b;
}
-static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
+static struct btree *btree_node_alloc_replacement(struct btree *b,
+ struct btree_op *op)
{
- struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
- if (!IS_ERR_OR_NULL(n))
- bch_btree_sort_into(b, n);
+ struct btree *n = bch_btree_node_alloc(b->c, op, b->level);
+ if (!IS_ERR_OR_NULL(n)) {
+ mutex_lock(&n->write_lock);
+ bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
+ bkey_copy_key(&n->key, &b->key);
+ mutex_unlock(&n->write_lock);
+ }
return n;
}
@@ -1108,21 +1118,47 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
{
unsigned i;
+ mutex_lock(&b->c->bucket_lock);
+
+ atomic_inc(&b->c->prio_blocked);
+
bkey_copy(k, &b->key);
bkey_copy_key(k, &ZERO_KEY);
- for (i = 0; i < KEY_PTRS(k); i++) {
- uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1;
+ for (i = 0; i < KEY_PTRS(k); i++)
+ SET_PTR_GEN(k, i,
+ bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
+ PTR_BUCKET(b->c, &b->key, i)));
- SET_PTR_GEN(k, i, g);
- }
+ mutex_unlock(&b->c->bucket_lock);
+}
- atomic_inc(&b->c->prio_blocked);
+static int btree_check_reserve(struct btree *b, struct btree_op *op)
+{
+ struct cache_set *c = b->c;
+ struct cache *ca;
+ unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
+
+ mutex_lock(&c->bucket_lock);
+
+ for_each_cache(ca, c, i)
+ if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
+ if (op)
+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
+ TASK_UNINTERRUPTIBLE);
+ mutex_unlock(&c->bucket_lock);
+ return -EINTR;
+ }
+
+ mutex_unlock(&c->bucket_lock);
+
+ return mca_cannibalize_lock(b->c, op);
}
/* Garbage collection */
-uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
+static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
+ struct bkey *k)
{
uint8_t stale = 0;
unsigned i;
@@ -1142,8 +1178,8 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
g = PTR_BUCKET(c, k, i);
- if (gen_after(g->gc_gen, PTR_GEN(k, i)))
- g->gc_gen = PTR_GEN(k, i);
+ if (gen_after(g->last_gc, PTR_GEN(k, i)))
+ g->last_gc = PTR_GEN(k, i);
if (ptr_stale(c, k, i)) {
stale = max(stale, ptr_stale(c, k, i));
@@ -1159,11 +1195,13 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
SET_GC_MARK(g, GC_MARK_METADATA);
else if (KEY_DIRTY(k))
SET_GC_MARK(g, GC_MARK_DIRTY);
+ else if (!GC_MARK(g))
+ SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
/* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned,
GC_SECTORS_USED(g) + KEY_SIZE(k),
- (1 << 14) - 1));
+ MAX_GC_SECTORS_USED));
BUG_ON(!GC_SECTORS_USED(g));
}
@@ -1173,6 +1211,26 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
+void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
+{
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i) &&
+ !ptr_stale(c, k, i)) {
+ struct bucket *b = PTR_BUCKET(c, k, i);
+
+ b->gen = PTR_GEN(k, i);
+
+ if (level && bkey_cmp(k, &ZERO_KEY))
+ b->prio = BTREE_PRIO;
+ else if (!level && b->prio == BTREE_PRIO)
+ b->prio = INITIAL_PRIO;
+ }
+
+ __bch_btree_mark_key(c, level, k);
+}
+
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{
uint8_t stale = 0;
@@ -1183,11 +1241,11 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
gc->nodes++;
- for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
stale = max(stale, btree_mark_key(b, k));
keys++;
- if (bch_ptr_bad(b, k))
+ if (bch_ptr_bad(&b->keys, k))
continue;
gc->key_bytes += bkey_u64s(k);
@@ -1197,9 +1255,9 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
gc->data += KEY_SIZE(k);
}
- for (t = b->sets; t <= &b->sets[b->nsets]; t++)
+ for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
btree_bug_on(t->size &&
- bset_written(b, t) &&
+ bset_written(&b->keys, t) &&
bkey_cmp(&b->key, &t->end) < 0,
b, "found short btree key in gc");
@@ -1226,14 +1284,19 @@ static int bch_btree_insert_node(struct btree *, struct btree_op *,
struct keylist *, atomic_t *, struct bkey *);
static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- struct keylist *keylist, struct gc_stat *gc,
- struct gc_merge_info *r)
+ struct gc_stat *gc, struct gc_merge_info *r)
{
unsigned i, nodes = 0, keys = 0, blocks;
struct btree *new_nodes[GC_MERGE_NODES];
+ struct keylist keylist;
struct closure cl;
struct bkey *k;
+ bch_keylist_init(&keylist);
+
+ if (btree_check_reserve(b, NULL))
+ return 0;
+
memset(new_nodes, 0, sizeof(new_nodes));
closure_init_stack(&cl);
@@ -1243,28 +1306,42 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
blocks = btree_default_blocks(b->c) * 2 / 3;
if (nodes < 2 ||
- __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
+ __set_blocks(b->keys.set[0].data, keys,
+ block_bytes(b->c)) > blocks * (nodes - 1))
return 0;
for (i = 0; i < nodes; i++) {
- new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
+ new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
if (IS_ERR_OR_NULL(new_nodes[i]))
goto out_nocoalesce;
}
+ /*
+ * We have to check the reserve here, after we've allocated our new
+ * nodes, to make sure the insert below will succeed - we also check
+ * before as an optimization to potentially avoid a bunch of expensive
+ * allocs/sorts
+ */
+ if (btree_check_reserve(b, NULL))
+ goto out_nocoalesce;
+
+ for (i = 0; i < nodes; i++)
+ mutex_lock(&new_nodes[i]->write_lock);
+
for (i = nodes - 1; i > 0; --i) {
- struct bset *n1 = new_nodes[i]->sets->data;
- struct bset *n2 = new_nodes[i - 1]->sets->data;
+ struct bset *n1 = btree_bset_first(new_nodes[i]);
+ struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
struct bkey *k, *last = NULL;
keys = 0;
if (i > 1) {
for (k = n2->start;
- k < end(n2);
+ k < bset_bkey_last(n2);
k = bkey_next(k)) {
if (__set_blocks(n1, n1->keys + keys +
- bkey_u64s(k), b->c) > blocks)
+ bkey_u64s(k),
+ block_bytes(b->c)) > blocks)
break;
last = k;
@@ -1280,7 +1357,8 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
* though)
*/
if (__set_blocks(n1, n1->keys + n2->keys,
- b->c) > btree_blocks(new_nodes[i]))
+ block_bytes(b->c)) >
+ btree_blocks(new_nodes[i]))
goto out_nocoalesce;
keys = n2->keys;
@@ -1288,47 +1366,54 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
last = &r->b->key;
}
- BUG_ON(__set_blocks(n1, n1->keys + keys,
- b->c) > btree_blocks(new_nodes[i]));
+ BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
+ btree_blocks(new_nodes[i]));
if (last)
bkey_copy_key(&new_nodes[i]->key, last);
- memcpy(end(n1),
+ memcpy(bset_bkey_last(n1),
n2->start,
- (void *) node(n2, keys) - (void *) n2->start);
+ (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
n1->keys += keys;
r[i].keys = n1->keys;
memmove(n2->start,
- node(n2, keys),
- (void *) end(n2) - (void *) node(n2, keys));
+ bset_bkey_idx(n2, keys),
+ (void *) bset_bkey_last(n2) -
+ (void *) bset_bkey_idx(n2, keys));
n2->keys -= keys;
- if (bch_keylist_realloc(keylist,
- KEY_PTRS(&new_nodes[i]->key), b->c))
+ if (__bch_keylist_realloc(&keylist,
+ bkey_u64s(&new_nodes[i]->key)))
goto out_nocoalesce;
bch_btree_node_write(new_nodes[i], &cl);
- bch_keylist_add(keylist, &new_nodes[i]->key);
+ bch_keylist_add(&keylist, &new_nodes[i]->key);
}
- for (i = 0; i < nodes; i++) {
- if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c))
- goto out_nocoalesce;
+ for (i = 0; i < nodes; i++)
+ mutex_unlock(&new_nodes[i]->write_lock);
- make_btree_freeing_key(r[i].b, keylist->top);
- bch_keylist_push(keylist);
- }
+ closure_sync(&cl);
/* We emptied out this node */
- BUG_ON(new_nodes[0]->sets->data->keys);
+ BUG_ON(btree_bset_first(new_nodes[0])->keys);
btree_node_free(new_nodes[0]);
rw_unlock(true, new_nodes[0]);
- closure_sync(&cl);
+ for (i = 0; i < nodes; i++) {
+ if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
+ goto out_nocoalesce;
+
+ make_btree_freeing_key(r[i].b, keylist.top);
+ bch_keylist_push(&keylist);
+ }
+
+ bch_btree_insert_node(b, op, &keylist, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&keylist));
for (i = 0; i < nodes; i++) {
btree_node_free(r[i].b);
@@ -1337,22 +1422,22 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
r[i].b = new_nodes[i];
}
- bch_btree_insert_node(b, op, keylist, NULL, NULL);
- BUG_ON(!bch_keylist_empty(keylist));
-
memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
r[nodes - 1].b = ERR_PTR(-EINTR);
trace_bcache_btree_gc_coalesce(nodes);
gc->nodes--;
+ bch_keylist_free(&keylist);
+
/* Invalidated our iterator */
return -EINTR;
out_nocoalesce:
closure_sync(&cl);
+ bch_keylist_free(&keylist);
- while ((k = bch_keylist_pop(keylist)))
+ while ((k = bch_keylist_pop(&keylist)))
if (!bkey_cmp(k, &ZERO_KEY))
atomic_dec(&b->c->prio_blocked);
@@ -1364,13 +1449,49 @@ out_nocoalesce:
return 0;
}
+static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
+ struct btree *replace)
+{
+ struct keylist keys;
+ struct btree *n;
+
+ if (btree_check_reserve(b, NULL))
+ return 0;
+
+ n = btree_node_alloc_replacement(replace, NULL);
+
+ /* recheck reserve after allocating replacement node */
+ if (btree_check_reserve(b, NULL)) {
+ btree_node_free(n);
+ rw_unlock(true, n);
+ return 0;
+ }
+
+ bch_btree_node_write_sync(n);
+
+ bch_keylist_init(&keys);
+ bch_keylist_add(&keys, &n->key);
+
+ make_btree_freeing_key(replace, keys.top);
+ bch_keylist_push(&keys);
+
+ bch_btree_insert_node(b, op, &keys, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&keys));
+
+ btree_node_free(replace);
+ rw_unlock(true, n);
+
+ /* Invalidated our iterator */
+ return -EINTR;
+}
+
static unsigned btree_gc_count_keys(struct btree *b)
{
struct bkey *k;
struct btree_iter iter;
unsigned ret = 0;
- for_each_key_filter(b, k, &iter, bch_ptr_bad)
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
ret += bkey_u64s(k);
return ret;
@@ -1379,26 +1500,23 @@ static unsigned btree_gc_count_keys(struct btree *b)
static int btree_gc_recurse(struct btree *b, struct btree_op *op,
struct closure *writes, struct gc_stat *gc)
{
- unsigned i;
int ret = 0;
bool should_rewrite;
- struct btree *n;
struct bkey *k;
- struct keylist keys;
struct btree_iter iter;
struct gc_merge_info r[GC_MERGE_NODES];
- struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
+ struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
- bch_keylist_init(&keys);
- bch_btree_iter_init(b, &iter, &b->c->gc_done);
+ bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
- for (i = 0; i < GC_MERGE_NODES; i++)
- r[i].b = ERR_PTR(-EINTR);
+ for (i = r; i < r + ARRAY_SIZE(r); i++)
+ i->b = ERR_PTR(-EINTR);
while (1) {
- k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+ k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
if (k) {
- r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
+ r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
+ true);
if (IS_ERR(r->b)) {
ret = PTR_ERR(r->b);
break;
@@ -1406,7 +1524,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
r->keys = btree_gc_count_keys(r->b);
- ret = btree_gc_coalesce(b, op, &keys, gc, r);
+ ret = btree_gc_coalesce(b, op, gc, r);
if (ret)
break;
}
@@ -1417,30 +1535,9 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
if (!IS_ERR(last->b)) {
should_rewrite = btree_gc_mark_node(last->b, gc);
if (should_rewrite) {
- n = btree_node_alloc_replacement(last->b,
- false);
-
- if (!IS_ERR_OR_NULL(n)) {
- bch_btree_node_write_sync(n);
- bch_keylist_add(&keys, &n->key);
-
- make_btree_freeing_key(last->b,
- keys.top);
- bch_keylist_push(&keys);
-
- btree_node_free(last->b);
-
- bch_btree_insert_node(b, op, &keys,
- NULL, NULL);
- BUG_ON(!bch_keylist_empty(&keys));
-
- rw_unlock(true, last->b);
- last->b = n;
-
- /* Invalidated our iterator */
- ret = -EINTR;
+ ret = btree_gc_rewrite_node(b, op, last->b);
+ if (ret)
break;
- }
}
if (last->b->level) {
@@ -1455,8 +1552,10 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
* Must flush leaf nodes before gc ends, since replace
* operations aren't journalled
*/
+ mutex_lock(&last->b->write_lock);
if (btree_node_dirty(last->b))
bch_btree_node_write(last->b, writes);
+ mutex_unlock(&last->b->write_lock);
rw_unlock(true, last->b);
}
@@ -1469,15 +1568,15 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
}
}
- for (i = 0; i < GC_MERGE_NODES; i++)
- if (!IS_ERR_OR_NULL(r[i].b)) {
- if (btree_node_dirty(r[i].b))
- bch_btree_node_write(r[i].b, writes);
- rw_unlock(true, r[i].b);
+ for (i = r; i < r + ARRAY_SIZE(r); i++)
+ if (!IS_ERR_OR_NULL(i->b)) {
+ mutex_lock(&i->b->write_lock);
+ if (btree_node_dirty(i->b))
+ bch_btree_node_write(i->b, writes);
+ mutex_unlock(&i->b->write_lock);
+ rw_unlock(true, i->b);
}
- bch_keylist_free(&keys);
-
return ret;
}
@@ -1490,10 +1589,11 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
should_rewrite = btree_gc_mark_node(b, gc);
if (should_rewrite) {
- n = btree_node_alloc_replacement(b, false);
+ n = btree_node_alloc_replacement(b, NULL);
if (!IS_ERR_OR_NULL(n)) {
bch_btree_node_write_sync(n);
+
bch_btree_set_root(n);
btree_node_free(b);
rw_unlock(true, n);
@@ -1502,6 +1602,8 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
}
}
+ __bch_btree_mark_key(b->c, b->level + 1, &b->key);
+
if (b->level) {
ret = btree_gc_recurse(b, op, writes, gc);
if (ret)
@@ -1529,9 +1631,9 @@ static void btree_gc_start(struct cache_set *c)
for_each_cache(ca, c, i)
for_each_bucket(b, ca) {
- b->gc_gen = b->gen;
+ b->last_gc = b->gen;
if (!atomic_read(&b->pin)) {
- SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0);
}
}
@@ -1539,7 +1641,7 @@ static void btree_gc_start(struct cache_set *c)
mutex_unlock(&c->bucket_lock);
}
-size_t bch_btree_gc_finish(struct cache_set *c)
+static size_t bch_btree_gc_finish(struct cache_set *c)
{
size_t available = 0;
struct bucket *b;
@@ -1552,11 +1654,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
c->gc_mark_valid = 1;
c->need_gc = 0;
- if (c->root)
- for (i = 0; i < KEY_PTRS(&c->root->key); i++)
- SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i),
- GC_MARK_METADATA);
-
for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
GC_MARK_METADATA);
@@ -1596,15 +1693,15 @@ size_t bch_btree_gc_finish(struct cache_set *c)
SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
for_each_bucket(b, ca) {
- b->last_gc = b->gc_gen;
c->need_gc = max(c->need_gc, bucket_gc_gen(b));
- if (!atomic_read(&b->pin) &&
- GC_MARK(b) == GC_MARK_RECLAIMABLE) {
+ if (atomic_read(&b->pin))
+ continue;
+
+ BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
+
+ if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
available++;
- if (!GC_SECTORS_USED(b))
- bch_bucket_add_unused(ca, b);
- }
}
}
@@ -1696,313 +1793,113 @@ int bch_gc_thread_start(struct cache_set *c)
/* Initial partial gc */
-static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
- unsigned long **seen)
+static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
{
int ret = 0;
- unsigned i;
struct bkey *k, *p = NULL;
- struct bucket *g;
struct btree_iter iter;
- for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
- for (i = 0; i < KEY_PTRS(k); i++) {
- if (!ptr_available(b->c, k, i))
- continue;
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
+ bch_initial_mark_key(b->c, b->level, k);
- g = PTR_BUCKET(b->c, k, i);
-
- if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i),
- seen[PTR_DEV(k, i)]) ||
- !ptr_stale(b->c, k, i)) {
- g->gen = PTR_GEN(k, i);
-
- if (b->level)
- g->prio = BTREE_PRIO;
- else if (g->prio == BTREE_PRIO)
- g->prio = INITIAL_PRIO;
- }
- }
-
- btree_mark_key(b, k);
- }
+ bch_initial_mark_key(b->c, b->level + 1, &b->key);
if (b->level) {
- bch_btree_iter_init(b, &iter, NULL);
+ bch_btree_iter_init(&b->keys, &iter, NULL);
do {
- k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+ k = bch_btree_iter_next_filter(&iter, &b->keys,
+ bch_ptr_bad);
if (k)
btree_node_prefetch(b->c, k, b->level - 1);
if (p)
- ret = btree(check_recurse, p, b, op, seen);
+ ret = btree(check_recurse, p, b, op);
p = k;
} while (p && !ret);
}
- return 0;
+ return ret;
}
int bch_btree_check(struct cache_set *c)
{
- int ret = -ENOMEM;
- unsigned i;
- unsigned long *seen[MAX_CACHES_PER_SET];
struct btree_op op;
- memset(seen, 0, sizeof(seen));
bch_btree_op_init(&op, SHRT_MAX);
- for (i = 0; c->cache[i]; i++) {
- size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
- seen[i] = kmalloc(n, GFP_KERNEL);
- if (!seen[i])
- goto err;
-
- /* Disables the seen array until prio_read() uses it too */
- memset(seen[i], 0xFF, n);
- }
-
- ret = btree_root(check_recurse, c, &op, seen);
-err:
- for (i = 0; i < MAX_CACHES_PER_SET; i++)
- kfree(seen[i]);
- return ret;
+ return btree_root(check_recurse, c, &op);
}
-/* Btree insertion */
-
-static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
+void bch_initial_gc_finish(struct cache_set *c)
{
- struct bset *i = b->sets[b->nsets].data;
-
- memmove((uint64_t *) where + bkey_u64s(insert),
- where,
- (void *) end(i) - (void *) where);
-
- i->keys += bkey_u64s(insert);
- bkey_copy(where, insert);
- bch_bset_fix_lookup_table(b, where);
-}
-
-static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
- struct btree_iter *iter,
- struct bkey *replace_key)
-{
- void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
- {
- if (KEY_DIRTY(k))
- bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
- offset, -sectors);
- }
-
- uint64_t old_offset;
- unsigned old_size, sectors_found = 0;
-
- while (1) {
- struct bkey *k = bch_btree_iter_next(iter);
- if (!k ||
- bkey_cmp(&START_KEY(k), insert) >= 0)
- break;
-
- if (bkey_cmp(k, &START_KEY(insert)) <= 0)
- continue;
-
- old_offset = KEY_START(k);
- old_size = KEY_SIZE(k);
-
- /*
- * We might overlap with 0 size extents; we can't skip these
- * because if they're in the set we're inserting to we have to
- * adjust them so they don't overlap with the key we're
- * inserting. But we don't want to check them for replace
- * operations.
- */
-
- if (replace_key && KEY_SIZE(k)) {
- /*
- * k might have been split since we inserted/found the
- * key we're replacing
- */
- unsigned i;
- uint64_t offset = KEY_START(k) -
- KEY_START(replace_key);
-
- /* But it must be a subset of the replace key */
- if (KEY_START(k) < KEY_START(replace_key) ||
- KEY_OFFSET(k) > KEY_OFFSET(replace_key))
- goto check_failed;
-
- /* We didn't find a key that we were supposed to */
- if (KEY_START(k) > KEY_START(insert) + sectors_found)
- goto check_failed;
-
- if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
- KEY_DIRTY(k) != KEY_DIRTY(replace_key))
- goto check_failed;
-
- /* skip past gen */
- offset <<= 8;
-
- BUG_ON(!KEY_PTRS(replace_key));
-
- for (i = 0; i < KEY_PTRS(replace_key); i++)
- if (k->ptr[i] != replace_key->ptr[i] + offset)
- goto check_failed;
-
- sectors_found = KEY_OFFSET(k) - KEY_START(insert);
- }
+ struct cache *ca;
+ struct bucket *b;
+ unsigned i;
- if (bkey_cmp(insert, k) < 0 &&
- bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
- /*
- * We overlapped in the middle of an existing key: that
- * means we have to split the old key. But we have to do
- * slightly different things depending on whether the
- * old key has been written out yet.
- */
+ bch_btree_gc_finish(c);
- struct bkey *top;
-
- subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
-
- if (bkey_written(b, k)) {
- /*
- * We insert a new key to cover the top of the
- * old key, and the old key is modified in place
- * to represent the bottom split.
- *
- * It's completely arbitrary whether the new key
- * is the top or the bottom, but it has to match
- * up with what btree_sort_fixup() does - it
- * doesn't check for this kind of overlap, it
- * depends on us inserting a new key for the top
- * here.
- */
- top = bch_bset_search(b, &b->sets[b->nsets],
- insert);
- shift_keys(b, top, k);
- } else {
- BKEY_PADDED(key) temp;
- bkey_copy(&temp.key, k);
- shift_keys(b, k, &temp.key);
- top = bkey_next(k);
- }
+ mutex_lock(&c->bucket_lock);
- bch_cut_front(insert, top);
- bch_cut_back(&START_KEY(insert), k);
- bch_bset_fix_invalidated_key(b, k);
- return false;
- }
+ /*
+ * We need to put some unused buckets directly on the prio freelist in
+ * order to get the allocator thread started - it needs freed buckets in
+ * order to rewrite the prios and gens, and it needs to rewrite prios
+ * and gens in order to free buckets.
+ *
+ * This is only safe for buckets that have no live data in them, which
+ * there should always be some of.
+ */
+ for_each_cache(ca, c, i) {
+ for_each_bucket(b, ca) {
+ if (fifo_full(&ca->free[RESERVE_PRIO]))
+ break;
- if (bkey_cmp(insert, k) < 0) {
- bch_cut_front(insert, k);
- } else {
- if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
- old_offset = KEY_START(insert);
-
- if (bkey_written(b, k) &&
- bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
- /*
- * Completely overwrote, so we don't have to
- * invalidate the binary search tree
- */
- bch_cut_front(k, k);
- } else {
- __bch_cut_back(&START_KEY(insert), k);
- bch_bset_fix_invalidated_key(b, k);
+ if (bch_can_invalidate_bucket(ca, b) &&
+ !GC_MARK(b)) {
+ __bch_invalidate_one_bucket(ca, b);
+ fifo_push(&ca->free[RESERVE_PRIO],
+ b - ca->buckets);
}
}
-
- subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
}
-check_failed:
- if (replace_key) {
- if (!sectors_found) {
- return true;
- } else if (sectors_found < KEY_SIZE(insert)) {
- SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
- (KEY_SIZE(insert) - sectors_found));
- SET_KEY_SIZE(insert, sectors_found);
- }
- }
-
- return false;
+ mutex_unlock(&c->bucket_lock);
}
-static bool btree_insert_key(struct btree *b, struct btree_op *op,
- struct bkey *k, struct bkey *replace_key)
+/* Btree insertion */
+
+static bool btree_insert_key(struct btree *b, struct bkey *k,
+ struct bkey *replace_key)
{
- struct bset *i = b->sets[b->nsets].data;
- struct bkey *m, *prev;
- unsigned status = BTREE_INSERT_STATUS_INSERT;
+ unsigned status;
BUG_ON(bkey_cmp(k, &b->key) > 0);
- BUG_ON(b->level && !KEY_PTRS(k));
- BUG_ON(!b->level && !KEY_OFFSET(k));
- if (!b->level) {
- struct btree_iter iter;
+ status = bch_btree_insert_key(&b->keys, k, replace_key);
+ if (status != BTREE_INSERT_STATUS_NO_INSERT) {
+ bch_check_keys(&b->keys, "%u for %s", status,
+ replace_key ? "replace" : "insert");
- /*
- * bset_search() returns the first key that is strictly greater
- * than the search key - but for back merging, we want to find
- * the previous key.
- */
- prev = NULL;
- m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
-
- if (fix_overlapping_extents(b, k, &iter, replace_key)) {
- op->insert_collision = true;
- return false;
- }
-
- if (KEY_DIRTY(k))
- bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
- KEY_START(k), KEY_SIZE(k));
-
- while (m != end(i) &&
- bkey_cmp(k, &START_KEY(m)) > 0)
- prev = m, m = bkey_next(m);
-
- if (key_merging_disabled(b->c))
- goto insert;
-
- /* prev is in the tree, if we merge we're done */
- status = BTREE_INSERT_STATUS_BACK_MERGE;
- if (prev &&
- bch_bkey_try_merge(b, prev, k))
- goto merged;
-
- status = BTREE_INSERT_STATUS_OVERWROTE;
- if (m != end(i) &&
- KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
- goto copy;
-
- status = BTREE_INSERT_STATUS_FRONT_MERGE;
- if (m != end(i) &&
- bch_bkey_try_merge(b, k, m))
- goto copy;
- } else {
- BUG_ON(replace_key);
- m = bch_bset_search(b, &b->sets[b->nsets], k);
- }
-
-insert: shift_keys(b, m, k);
-copy: bkey_copy(m, k);
-merged:
- bch_check_keys(b, "%u for %s", status,
- replace_key ? "replace" : "insert");
+ trace_bcache_btree_insert_key(b, k, replace_key != NULL,
+ status);
+ return true;
+ } else
+ return false;
+}
- if (b->level && !KEY_OFFSET(k))
- btree_current_write(b)->prio_blocked++;
+static size_t insert_u64s_remaining(struct btree *b)
+{
+ long ret = bch_btree_keys_u64s_remaining(&b->keys);
- trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
+ /*
+ * Might land in the middle of an existing extent and have to split it
+ */
+ if (b->keys.ops->is_extents)
+ ret -= KEY_MAX_U64S;
- return true;
+ return max(ret, 0L);
}
static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
@@ -2010,21 +1907,19 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
struct bkey *replace_key)
{
bool ret = false;
- int oldsize = bch_count_data(b);
+ int oldsize = bch_count_data(&b->keys);
while (!bch_keylist_empty(insert_keys)) {
- struct bset *i = write_block(b);
struct bkey *k = insert_keys->keys;
- if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
- > btree_blocks(b))
+ if (bkey_u64s(k) > insert_u64s_remaining(b))
break;
if (bkey_cmp(k, &b->key) <= 0) {
if (!b->level)
bkey_put(b->c, k);
- ret |= btree_insert_key(b, op, k, replace_key);
+ ret |= btree_insert_key(b, k, replace_key);
bch_keylist_pop_front(insert_keys);
} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
BKEY_PADDED(key) temp;
@@ -2033,16 +1928,19 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
bch_cut_back(&b->key, &temp.key);
bch_cut_front(&b->key, insert_keys->keys);
- ret |= btree_insert_key(b, op, &temp.key, replace_key);
+ ret |= btree_insert_key(b, &temp.key, replace_key);
break;
} else {
break;
}
}
+ if (!ret)
+ op->insert_collision = true;
+
BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
- BUG_ON(bch_count_data(b) < oldsize);
+ BUG_ON(bch_count_data(&b->keys) < oldsize);
return ret;
}
@@ -2059,27 +1957,38 @@ static int btree_split(struct btree *b, struct btree_op *op,
closure_init_stack(&cl);
bch_keylist_init(&parent_keys);
- n1 = btree_node_alloc_replacement(b, true);
+ if (btree_check_reserve(b, op)) {
+ if (!b->level)
+ return -EINTR;
+ else
+ WARN(1, "insufficient reserve for split\n");
+ }
+
+ n1 = btree_node_alloc_replacement(b, op);
if (IS_ERR(n1))
goto err;
- split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
+ split = set_blocks(btree_bset_first(n1),
+ block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
if (split) {
unsigned keys = 0;
- trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
+ trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
- n2 = bch_btree_node_alloc(b->c, b->level, true);
+ n2 = bch_btree_node_alloc(b->c, op, b->level);
if (IS_ERR(n2))
goto err_free1;
if (!b->parent) {
- n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
+ n3 = bch_btree_node_alloc(b->c, op, b->level + 1);
if (IS_ERR(n3))
goto err_free2;
}
+ mutex_lock(&n1->write_lock);
+ mutex_lock(&n2->write_lock);
+
bch_btree_insert_keys(n1, op, insert_keys, replace_key);
/*
@@ -2087,80 +1996,85 @@ static int btree_split(struct btree *b, struct btree_op *op,
* search tree yet
*/
- while (keys < (n1->sets[0].data->keys * 3) / 5)
- keys += bkey_u64s(node(n1->sets[0].data, keys));
+ while (keys < (btree_bset_first(n1)->keys * 3) / 5)
+ keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
+ keys));
- bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
- keys += bkey_u64s(node(n1->sets[0].data, keys));
+ bkey_copy_key(&n1->key,
+ bset_bkey_idx(btree_bset_first(n1), keys));
+ keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
- n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
- n1->sets[0].data->keys = keys;
+ btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
+ btree_bset_first(n1)->keys = keys;
- memcpy(n2->sets[0].data->start,
- end(n1->sets[0].data),
- n2->sets[0].data->keys * sizeof(uint64_t));
+ memcpy(btree_bset_first(n2)->start,
+ bset_bkey_last(btree_bset_first(n1)),
+ btree_bset_first(n2)->keys * sizeof(uint64_t));
bkey_copy_key(&n2->key, &b->key);
bch_keylist_add(&parent_keys, &n2->key);
bch_btree_node_write(n2, &cl);
+ mutex_unlock(&n2->write_lock);
rw_unlock(true, n2);
} else {
- trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
+ trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
+ mutex_lock(&n1->write_lock);
bch_btree_insert_keys(n1, op, insert_keys, replace_key);
}
bch_keylist_add(&parent_keys, &n1->key);
bch_btree_node_write(n1, &cl);
+ mutex_unlock(&n1->write_lock);
if (n3) {
/* Depth increases, make a new root */
+ mutex_lock(&n3->write_lock);
bkey_copy_key(&n3->key, &MAX_KEY);
bch_btree_insert_keys(n3, op, &parent_keys, NULL);
bch_btree_node_write(n3, &cl);
+ mutex_unlock(&n3->write_lock);
closure_sync(&cl);
bch_btree_set_root(n3);
rw_unlock(true, n3);
-
- btree_node_free(b);
} else if (!b->parent) {
/* Root filled up but didn't need to be split */
closure_sync(&cl);
bch_btree_set_root(n1);
-
- btree_node_free(b);
} else {
/* Split a non root node */
closure_sync(&cl);
make_btree_freeing_key(b, parent_keys.top);
bch_keylist_push(&parent_keys);
- btree_node_free(b);
-
bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
BUG_ON(!bch_keylist_empty(&parent_keys));
}
+ btree_node_free(b);
rw_unlock(true, n1);
bch_time_stats_update(&b->c->btree_split_time, start_time);
return 0;
err_free2:
+ bkey_put(b->c, &n2->key);
btree_node_free(n2);
rw_unlock(true, n2);
err_free1:
+ bkey_put(b->c, &n1->key);
btree_node_free(n1);
rw_unlock(true, n1);
err:
+ WARN(1, "bcache: btree split failed (level %u)", b->level);
+
if (n3 == ERR_PTR(-EAGAIN) ||
n2 == ERR_PTR(-EAGAIN) ||
n1 == ERR_PTR(-EAGAIN))
return -EAGAIN;
- pr_warn("couldn't split");
return -ENOMEM;
}
@@ -2169,31 +2083,54 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
atomic_t *journal_ref,
struct bkey *replace_key)
{
+ struct closure cl;
+
BUG_ON(b->level && replace_key);
- if (should_split(b)) {
- if (current->bio_list) {
- op->lock = b->c->root->level + 1;
- return -EAGAIN;
- } else if (op->lock <= b->c->root->level) {
- op->lock = b->c->root->level + 1;
- return -EINTR;
- } else {
- /* Invalidated all iterators */
- return btree_split(b, op, insert_keys, replace_key) ?:
- -EINTR;
- }
- } else {
- BUG_ON(write_block(b) != b->sets[b->nsets].data);
+ closure_init_stack(&cl);
- if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
- if (!b->level)
- bch_btree_leaf_dirty(b, journal_ref);
- else
- bch_btree_node_write_sync(b);
- }
+ mutex_lock(&b->write_lock);
- return 0;
+ if (write_block(b) != btree_bset_last(b) &&
+ b->keys.last_set_unwritten)
+ bch_btree_init_next(b); /* just wrote a set */
+
+ if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
+ mutex_unlock(&b->write_lock);
+ goto split;
+ }
+
+ BUG_ON(write_block(b) != btree_bset_last(b));
+
+ if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
+ if (!b->level)
+ bch_btree_leaf_dirty(b, journal_ref);
+ else
+ bch_btree_node_write(b, &cl);
+ }
+
+ mutex_unlock(&b->write_lock);
+
+ /* wait for btree node write if necessary, after unlock */
+ closure_sync(&cl);
+
+ return 0;
+split:
+ if (current->bio_list) {
+ op->lock = b->c->root->level + 1;
+ return -EAGAIN;
+ } else if (op->lock <= b->c->root->level) {
+ op->lock = b->c->root->level + 1;
+ return -EINTR;
+ } else {
+ /* Invalidated all iterators */
+ int ret = btree_split(b, op, insert_keys, replace_key);
+
+ if (bch_keylist_empty(insert_keys))
+ return 0;
+ else if (!ret)
+ return -EINTR;
+ return ret;
}
}
@@ -2323,9 +2260,9 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
struct bkey *k;
struct btree_iter iter;
- bch_btree_iter_init(b, &iter, from);
+ bch_btree_iter_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, b,
+ while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
bch_ptr_bad))) {
ret = btree(map_nodes_recurse, k, b,
op, from, fn, flags);
@@ -2356,9 +2293,9 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
struct bkey *k;
struct btree_iter iter;
- bch_btree_iter_init(b, &iter, from);
+ bch_btree_iter_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
+ while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
ret = !b->level
? fn(op, b, k)
: btree(map_keys_recurse, k, b, op, from, fn, flags);
@@ -2579,18 +2516,3 @@ void bch_keybuf_init(struct keybuf *buf)
spin_lock_init(&buf->lock);
array_allocator_init(&buf->freelist);
}
-
-void bch_btree_exit(void)
-{
- if (btree_io_wq)
- destroy_workqueue(btree_io_wq);
-}
-
-int __init bch_btree_init(void)
-{
- btree_io_wq = create_singlethread_workqueue("bch_btree_io");
- if (!btree_io_wq)
- return -ENOMEM;
-
- return 0;
-}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 767e75570896..91dfa5e69685 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -127,23 +127,17 @@ struct btree {
struct cache_set *c;
struct btree *parent;
+ struct mutex write_lock;
+
unsigned long flags;
uint16_t written; /* would be nice to kill */
uint8_t level;
- uint8_t nsets;
- uint8_t page_order;
-
- /*
- * Set of sorted keys - the real btree node - plus a binary search tree
- *
- * sets[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
- * to the memory we have allocated for this btree node. Additionally,
- * set[0]->data points to the entire btree node as it exists on disk.
- */
- struct bset_tree sets[MAX_BSETS];
+
+ struct btree_keys keys;
/* For outstanding btree writes, used as a lock - protects write_idx */
- struct closure_with_waitlist io;
+ struct closure io;
+ struct semaphore io_mutex;
struct list_head list;
struct delayed_work work;
@@ -179,24 +173,19 @@ static inline struct btree_write *btree_prev_write(struct btree *b)
return b->writes + (btree_node_write_idx(b) ^ 1);
}
-static inline unsigned bset_offset(struct btree *b, struct bset *i)
-{
- return (((size_t) i) - ((size_t) b->sets->data)) >> 9;
-}
-
-static inline struct bset *write_block(struct btree *b)
+static inline struct bset *btree_bset_first(struct btree *b)
{
- return ((void *) b->sets[0].data) + b->written * block_bytes(b->c);
+ return b->keys.set->data;
}
-static inline bool bset_written(struct btree *b, struct bset_tree *t)
+static inline struct bset *btree_bset_last(struct btree *b)
{
- return t->data < write_block(b);
+ return bset_tree_last(&b->keys)->data;
}
-static inline bool bkey_written(struct btree *b, struct bkey *k)
+static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
{
- return k < write_block(b)->start;
+ return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
}
static inline void set_gc_sectors(struct cache_set *c)
@@ -204,21 +193,6 @@ static inline void set_gc_sectors(struct cache_set *c)
atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
}
-static inline struct bkey *bch_btree_iter_init(struct btree *b,
- struct btree_iter *iter,
- struct bkey *search)
-{
- return __bch_btree_iter_init(b, iter, search, b->sets);
-}
-
-static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
-{
- if (b->level)
- return bch_btree_ptr_invalid(b->c, k);
- else
- return bch_extent_ptr_invalid(b->c, k);
-}
-
void bkey_put(struct cache_set *c, struct bkey *k);
/* Looping macros */
@@ -229,17 +203,12 @@ void bkey_put(struct cache_set *c, struct bkey *k);
iter++) \
hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
-#define for_each_key_filter(b, k, iter, filter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next_filter((iter), b, filter));)
-
-#define for_each_key(b, k, iter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next(iter));)
-
/* Recursing down the btree */
struct btree_op {
+ /* for waiting on btree reserve in btree_split() */
+ wait_queue_t wait;
+
/* Btree level at which we start taking write locks */
short lock;
@@ -249,6 +218,7 @@ struct btree_op {
static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
{
memset(op, 0, sizeof(struct btree_op));
+ init_wait(&op->wait);
op->lock = write_lock_level;
}
@@ -267,12 +237,14 @@ static inline void rw_unlock(bool w, struct btree *b)
(w ? up_write : up_read)(&b->lock);
}
-void bch_btree_node_read(struct btree *);
+void bch_btree_node_read_done(struct btree *);
+void __bch_btree_node_write(struct btree *, struct closure *);
void bch_btree_node_write(struct btree *, struct closure *);
void bch_btree_set_root(struct btree *);
-struct btree *bch_btree_node_alloc(struct cache_set *, int, bool);
-struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
+struct btree *bch_btree_node_alloc(struct cache_set *, struct btree_op *, int);
+struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *,
+ struct bkey *, int, bool);
int bch_btree_insert_check_key(struct btree *, struct btree_op *,
struct bkey *);
@@ -280,10 +252,10 @@ int bch_btree_insert(struct cache_set *, struct keylist *,
atomic_t *, struct bkey *);
int bch_gc_thread_start(struct cache_set *);
-size_t bch_btree_gc_finish(struct cache_set *);
+void bch_initial_gc_finish(struct cache_set *);
void bch_moving_gc(struct cache_set *);
int bch_btree_check(struct cache_set *);
-uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
+void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
static inline void wake_up_gc(struct cache_set *c)
{
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index dfff2410322e..7a228de95fd7 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -11,19 +11,6 @@
#include "closure.h"
-#define CL_FIELD(type, field) \
- case TYPE_ ## type: \
- return &container_of(cl, struct type, cl)->field
-
-static struct closure_waitlist *closure_waitlist(struct closure *cl)
-{
- switch (cl->type) {
- CL_FIELD(closure_with_waitlist, wait);
- default:
- return NULL;
- }
-}
-
static inline void closure_put_after_sub(struct closure *cl, int flags)
{
int r = flags & CLOSURE_REMAINING_MASK;
@@ -42,17 +29,10 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
closure_queue(cl);
} else {
struct closure *parent = cl->parent;
- struct closure_waitlist *wait = closure_waitlist(cl);
closure_fn *destructor = cl->fn;
closure_debug_destroy(cl);
- smp_mb();
- atomic_set(&cl->remaining, -1);
-
- if (wait)
- closure_wake_up(wait);
-
if (destructor)
destructor(cl);
@@ -69,19 +49,18 @@ void closure_sub(struct closure *cl, int v)
}
EXPORT_SYMBOL(closure_sub);
+/**
+ * closure_put - decrement a closure's refcount
+ */
void closure_put(struct closure *cl)
{
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
}
EXPORT_SYMBOL(closure_put);
-static void set_waiting(struct closure *cl, unsigned long f)
-{
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
- cl->waiting_on = f;
-#endif
-}
-
+/**
+ * closure_wake_up - wake up all closures on a wait list, without memory barrier
+ */
void __closure_wake_up(struct closure_waitlist *wait_list)
{
struct llist_node *list;
@@ -106,27 +85,34 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
cl = container_of(reverse, struct closure, list);
reverse = llist_next(reverse);
- set_waiting(cl, 0);
+ closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
EXPORT_SYMBOL(__closure_wake_up);
-bool closure_wait(struct closure_waitlist *list, struct closure *cl)
+/**
+ * closure_wait - add a closure to a waitlist
+ *
+ * @waitlist will own a ref on @cl, which will be released when
+ * closure_wake_up() is called on @waitlist.
+ *
+ */
+bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
{
if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
return false;
- set_waiting(cl, _RET_IP_);
+ closure_set_waiting(cl, _RET_IP_);
atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
- llist_add(&cl->list, &list->list);
+ llist_add(&cl->list, &waitlist->list);
return true;
}
EXPORT_SYMBOL(closure_wait);
/**
- * closure_sync() - sleep until a closure a closure has nothing left to wait on
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
*
* Sleeps until the refcount hits 1 - the thread that's running the closure owns
* the last refcount.
@@ -148,46 +134,6 @@ void closure_sync(struct closure *cl)
}
EXPORT_SYMBOL(closure_sync);
-/**
- * closure_trylock() - try to acquire the closure, without waiting
- * @cl: closure to lock
- *
- * Returns true if the closure was succesfully locked.
- */
-bool closure_trylock(struct closure *cl, struct closure *parent)
-{
- if (atomic_cmpxchg(&cl->remaining, -1,
- CLOSURE_REMAINING_INITIALIZER) != -1)
- return false;
-
- smp_mb();
-
- cl->parent = parent;
- if (parent)
- closure_get(parent);
-
- closure_set_ret_ip(cl);
- closure_debug_create(cl);
- return true;
-}
-EXPORT_SYMBOL(closure_trylock);
-
-void __closure_lock(struct closure *cl, struct closure *parent,
- struct closure_waitlist *wait_list)
-{
- struct closure wait;
- closure_init_stack(&wait);
-
- while (1) {
- if (closure_trylock(cl, parent))
- return;
-
- closure_wait_event(wait_list, &wait,
- atomic_read(&cl->remaining) == -1);
- }
-}
-EXPORT_SYMBOL(__closure_lock);
-
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
static LIST_HEAD(closure_list);
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 9762f1be3304..7ef7461912be 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -72,30 +72,6 @@
* closure - _always_ use continue_at(). Doing so consistently will help
* eliminate an entire class of particularly pernicious races.
*
- * For a closure to wait on an arbitrary event, we need to introduce waitlists:
- *
- * struct closure_waitlist list;
- * closure_wait_event(list, cl, condition);
- * closure_wake_up(wait_list);
- *
- * These work analagously to wait_event() and wake_up() - except that instead of
- * operating on the current thread (for wait_event()) and lists of threads, they
- * operate on an explicit closure and lists of closures.
- *
- * Because it's a closure we can now wait either synchronously or
- * asynchronously. closure_wait_event() returns the current value of the
- * condition, and if it returned false continue_at() or closure_sync() can be
- * used to wait for it to become true.
- *
- * It's useful for waiting on things when you can't sleep in the context in
- * which you must check the condition (perhaps a spinlock held, or you might be
- * beneath generic_make_request() - in which case you can't sleep on IO).
- *
- * closure_wait_event() will wait either synchronously or asynchronously,
- * depending on whether the closure is in blocking mode or not. You can pick a
- * mode explicitly with closure_wait_event_sync() and
- * closure_wait_event_async(), which do just what you might expect.
- *
* Lastly, you might have a wait list dedicated to a specific event, and have no
* need for specifying the condition - you just want to wait until someone runs
* closure_wake_up() on the appropriate wait list. In that case, just use
@@ -121,40 +97,6 @@
* All this implies that a closure should typically be embedded in a particular
* struct (which its refcount will normally control the lifetime of), and that
* struct can very much be thought of as a stack frame.
- *
- * Locking:
- *
- * Closures are based on work items but they can be thought of as more like
- * threads - in that like threads and unlike work items they have a well
- * defined lifetime; they are created (with closure_init()) and eventually
- * complete after a continue_at(cl, NULL, NULL).
- *
- * Suppose you've got some larger structure with a closure embedded in it that's
- * used for periodically doing garbage collection. You only want one garbage
- * collection happening at a time, so the natural thing to do is protect it with
- * a lock. However, it's difficult to use a lock protecting a closure correctly
- * because the unlock should come after the last continue_to() (additionally, if
- * you're using the closure asynchronously a mutex won't work since a mutex has
- * to be unlocked by the same process that locked it).
- *
- * So to make it less error prone and more efficient, we also have the ability
- * to use closures as locks:
- *
- * closure_init_unlocked();
- * closure_trylock();
- *
- * That's all we need for trylock() - the last closure_put() implicitly unlocks
- * it for you. But for closure_lock(), we also need a wait list:
- *
- * struct closure_with_waitlist frobnicator_cl;
- *
- * closure_init_unlocked(&frobnicator_cl);
- * closure_lock(&frobnicator_cl);
- *
- * A closure_with_waitlist embeds a closure and a wait list - much like struct
- * delayed_work embeds a work item and a timer_list. The important thing is, use
- * it exactly like you would a regular closure and closure_put() will magically
- * handle everything for you.
*/
struct closure;
@@ -164,12 +106,6 @@ struct closure_waitlist {
struct llist_head list;
};
-enum closure_type {
- TYPE_closure = 0,
- TYPE_closure_with_waitlist = 1,
- MAX_CLOSURE_TYPE = 1,
-};
-
enum closure_state {
/*
* CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
@@ -224,8 +160,6 @@ struct closure {
atomic_t remaining;
- enum closure_type type;
-
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
#define CLOSURE_MAGIC_DEAD 0xc054dead
#define CLOSURE_MAGIC_ALIVE 0xc054a11e
@@ -237,34 +171,12 @@ struct closure {
#endif
};
-struct closure_with_waitlist {
- struct closure cl;
- struct closure_waitlist wait;
-};
-
-extern unsigned invalid_closure_type(void);
-
-#define __CLOSURE_TYPE(cl, _t) \
- __builtin_types_compatible_p(typeof(cl), struct _t) \
- ? TYPE_ ## _t : \
-
-#define __closure_type(cl) \
-( \
- __CLOSURE_TYPE(cl, closure) \
- __CLOSURE_TYPE(cl, closure_with_waitlist) \
- invalid_closure_type() \
-)
-
void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
void closure_sync(struct closure *cl);
-bool closure_trylock(struct closure *cl, struct closure *parent);
-void __closure_lock(struct closure *cl, struct closure *parent,
- struct closure_waitlist *wait_list);
-
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
void closure_debug_init(void);
@@ -293,134 +205,97 @@ static inline void closure_set_ret_ip(struct closure *cl)
#endif
}
-static inline void closure_get(struct closure *cl)
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
{
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
- BUG_ON((atomic_inc_return(&cl->remaining) &
- CLOSURE_REMAINING_MASK) <= 1);
-#else
- atomic_inc(&cl->remaining);
+ cl->waiting_on = f;
#endif
}
-static inline void closure_set_stopped(struct closure *cl)
+static inline void __closure_end_sleep(struct closure *cl)
{
- atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+ __set_current_state(TASK_RUNNING);
+
+ if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
+ atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
}
-static inline bool closure_is_unlocked(struct closure *cl)
+static inline void __closure_start_sleep(struct closure *cl)
{
- return atomic_read(&cl->remaining) == -1;
+ closure_set_ip(cl);
+ cl->task = current;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
+ atomic_add(CLOSURE_SLEEPING, &cl->remaining);
}
-static inline void do_closure_init(struct closure *cl, struct closure *parent,
- bool running)
+static inline void closure_set_stopped(struct closure *cl)
{
- cl->parent = parent;
- if (parent)
- closure_get(parent);
-
- if (running) {
- closure_debug_create(cl);
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
- } else
- atomic_set(&cl->remaining, -1);
+ atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+ struct workqueue_struct *wq)
+{
+ BUG_ON(object_is_on_stack(cl));
closure_set_ip(cl);
+ cl->fn = fn;
+ cl->wq = wq;
+ /* between atomic_dec() in closure_put() */
+ smp_mb__before_atomic_dec();
}
-/*
- * Hack to get at the embedded closure if there is one, by doing an unsafe cast:
- * the result of __closure_type() is thrown away, it's used merely for type
- * checking.
- */
-#define __to_internal_closure(cl) \
-({ \
- BUILD_BUG_ON(__closure_type(*cl) > MAX_CLOSURE_TYPE); \
- (struct closure *) cl; \
-})
-
-#define closure_init_type(cl, parent, running) \
-do { \
- struct closure *_cl = __to_internal_closure(cl); \
- _cl->type = __closure_type(*(cl)); \
- do_closure_init(_cl, parent, running); \
-} while (0)
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(cl);
+}
/**
- * __closure_init() - Initialize a closure, skipping the memset()
- *
- * May be used instead of closure_init() when memory has already been zeroed.
+ * closure_get - increment a closure's refcount
*/
-#define __closure_init(cl, parent) \
- closure_init_type(cl, parent, true)
+static inline void closure_get(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+ BUG_ON((atomic_inc_return(&cl->remaining) &
+ CLOSURE_REMAINING_MASK) <= 1);
+#else
+ atomic_inc(&cl->remaining);
+#endif
+}
/**
- * closure_init() - Initialize a closure, setting the refcount to 1
+ * closure_init - Initialize a closure, setting the refcount to 1
* @cl: closure to initialize
* @parent: parent of the new closure. cl will take a refcount on it for its
* lifetime; may be NULL.
*/
-#define closure_init(cl, parent) \
-do { \
- memset((cl), 0, sizeof(*(cl))); \
- __closure_init(cl, parent); \
-} while (0)
-
-static inline void closure_init_stack(struct closure *cl)
+static inline void closure_init(struct closure *cl, struct closure *parent)
{
memset(cl, 0, sizeof(struct closure));
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
-}
-
-/**
- * closure_init_unlocked() - Initialize a closure but leave it unlocked.
- * @cl: closure to initialize
- *
- * For when the closure will be used as a lock. The closure may not be used
- * until after a closure_lock() or closure_trylock().
- */
-#define closure_init_unlocked(cl) \
-do { \
- memset((cl), 0, sizeof(*(cl))); \
- closure_init_type(cl, NULL, false); \
-} while (0)
-
-/**
- * closure_lock() - lock and initialize a closure.
- * @cl: the closure to lock
- * @parent: the new parent for this closure
- *
- * The closure must be of one of the types that has a waitlist (otherwise we
- * wouldn't be able to sleep on contention).
- *
- * @parent has exactly the same meaning as in closure_init(); if non null, the
- * closure will take a reference on @parent which will be released when it is
- * unlocked.
- */
-#define closure_lock(cl, parent) \
- __closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
+ cl->parent = parent;
+ if (parent)
+ closure_get(parent);
-static inline void __closure_end_sleep(struct closure *cl)
-{
- __set_current_state(TASK_RUNNING);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
- if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
- atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
+ closure_debug_create(cl);
+ closure_set_ip(cl);
}
-static inline void __closure_start_sleep(struct closure *cl)
+static inline void closure_init_stack(struct closure *cl)
{
- closure_set_ip(cl);
- cl->task = current;
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
- atomic_add(CLOSURE_SLEEPING, &cl->remaining);
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
}
/**
- * closure_wake_up() - wake up all closures on a wait list.
+ * closure_wake_up - wake up all closures on a wait list.
*/
static inline void closure_wake_up(struct closure_waitlist *list)
{
@@ -428,69 +303,19 @@ static inline void closure_wake_up(struct closure_waitlist *list)
__closure_wake_up(list);
}
-/*
- * Wait on an event, synchronously or asynchronously - analogous to wait_event()
- * but for closures.
- *
- * The loop is oddly structured so as to avoid a race; we must check the
- * condition again after we've added ourself to the waitlist. We know if we were
- * already on the waitlist because closure_wait() returns false; thus, we only
- * schedule or break if closure_wait() returns false. If it returns true, we
- * just loop again - rechecking the condition.
- *
- * The __closure_wake_up() is necessary because we may race with the event
- * becoming true; i.e. we see event false -> wait -> recheck condition, but the
- * thread that made the event true may have called closure_wake_up() before we
- * added ourself to the wait list.
- *
- * We have to call closure_sync() at the end instead of just
- * __closure_end_sleep() because a different thread might've called
- * closure_wake_up() before us and gotten preempted before they dropped the
- * refcount on our closure. If this was a stack allocated closure, that would be
- * bad.
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * NOTE: This macro expands to a return in the calling function!
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
*/
-#define closure_wait_event(list, cl, condition) \
-({ \
- typeof(condition) ret; \
- \
- while (1) { \
- ret = (condition); \
- if (ret) { \
- __closure_wake_up(list); \
- closure_sync(cl); \
- break; \
- } \
- \
- __closure_start_sleep(cl); \
- \
- if (!closure_wait(list, cl)) \
- schedule(); \
- } \
- \
- ret; \
-})
-
-static inline void closure_queue(struct closure *cl)
-{
- struct workqueue_struct *wq = cl->wq;
- if (wq) {
- INIT_WORK(&cl->work, cl->work.func);
- BUG_ON(!queue_work(wq, &cl->work));
- } else
- cl->fn(cl);
-}
-
-static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
- struct workqueue_struct *wq)
-{
- BUG_ON(object_is_on_stack(cl));
- closure_set_ip(cl);
- cl->fn = fn;
- cl->wq = wq;
- /* between atomic_dec() in closure_put() */
- smp_mb__before_atomic_dec();
-}
-
#define continue_at(_cl, _fn, _wq) \
do { \
set_closure_fn(_cl, _fn, _wq); \
@@ -498,8 +323,28 @@ do { \
return; \
} while (0)
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * NOTE: like continue_at(), this macro expands to a return in the caller!
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
#define continue_at_nobarrier(_cl, _fn, _wq) \
do { \
set_closure_fn(_cl, _fn, _wq); \
@@ -507,6 +352,15 @@ do { \
return; \
} while (0)
+/**
+ * closure_return - finish execution of a closure, with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
#define closure_return_with_destructor(_cl, _destructor) \
do { \
set_closure_fn(_cl, _destructor, NULL); \
@@ -514,6 +368,13 @@ do { \
return; \
} while (0)
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
static inline void closure_call(struct closure *cl, closure_fn fn,
struct workqueue_struct *wq,
struct closure *parent)
@@ -522,12 +383,4 @@ static inline void closure_call(struct closure *cl, closure_fn fn,
continue_at_nobarrier(cl, fn, wq);
}
-static inline void closure_trylock_call(struct closure *cl, closure_fn fn,
- struct workqueue_struct *wq,
- struct closure *parent)
-{
- if (closure_trylock(cl, parent))
- continue_at_nobarrier(cl, fn, wq);
-}
-
#endif /* _LINUX_CLOSURE_H */
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 264fcfbd6290..8b1f1d5c1819 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -8,6 +8,7 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
+#include "extents.h"
#include <linux/console.h>
#include <linux/debugfs.h>
@@ -17,163 +18,96 @@
static struct dentry *debug;
-const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
-{
- unsigned i;
-
- for (i = 0; i < KEY_PTRS(k); i++)
- if (ptr_available(c, k, i)) {
- struct cache *ca = PTR_CACHE(c, k, i);
- size_t bucket = PTR_BUCKET_NR(c, k, i);
- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
- if (KEY_SIZE(k) + r > c->sb.bucket_size)
- return "bad, length too big";
- if (bucket < ca->sb.first_bucket)
- return "bad, short offset";
- if (bucket >= ca->sb.nbuckets)
- return "bad, offset past end of device";
- if (ptr_stale(c, k, i))
- return "stale";
- }
-
- if (!bkey_cmp(k, &ZERO_KEY))
- return "bad, null key";
- if (!KEY_PTRS(k))
- return "bad, no pointers";
- if (!KEY_SIZE(k))
- return "zeroed key";
- return "";
-}
-
-int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
-{
- unsigned i = 0;
- char *out = buf, *end = buf + size;
-
-#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
-
- p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k));
-
- if (KEY_PTRS(k))
- while (1) {
- p("%llu:%llu gen %llu",
- PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i));
-
- if (++i == KEY_PTRS(k))
- break;
-
- p(", ");
- }
-
- p("]");
-
- if (KEY_DIRTY(k))
- p(" dirty");
- if (KEY_CSUM(k))
- p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
-#undef p
- return out - buf;
-}
-
#ifdef CONFIG_BCACHE_DEBUG
-static void dump_bset(struct btree *b, struct bset *i)
-{
- struct bkey *k, *next;
- unsigned j;
- char buf[80];
-
- for (k = i->start; k < end(i); k = next) {
- next = bkey_next(k);
-
- bch_bkey_to_text(buf, sizeof(buf), k);
- printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
- (uint64_t *) k - i->d, i->keys, buf);
-
- for (j = 0; j < KEY_PTRS(k); j++) {
- size_t n = PTR_BUCKET_NR(b->c, k, j);
- printk(" bucket %zu", n);
-
- if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
- printk(" prio %i",
- PTR_BUCKET(b->c, k, j)->prio);
- }
+#define for_each_written_bset(b, start, i) \
+ for (i = (start); \
+ (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
+ i->seq == (start)->seq; \
+ i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
+ block_bytes(b->c))
- printk(" %s\n", bch_ptr_status(b->c, k));
-
- if (next < end(i) &&
- bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
- printk(KERN_ERR "Key skipped backwards\n");
- }
-}
-
-static void bch_dump_bucket(struct btree *b)
-{
- unsigned i;
-
- console_lock();
- for (i = 0; i <= b->nsets; i++)
- dump_bset(b, b->sets[i].data);
- console_unlock();
-}
-
-void bch_btree_verify(struct btree *b, struct bset *new)
+void bch_btree_verify(struct btree *b)
{
struct btree *v = b->c->verify_data;
- struct closure cl;
- closure_init_stack(&cl);
+ struct bset *ondisk, *sorted, *inmemory;
+ struct bio *bio;
- if (!b->c->verify)
+ if (!b->c->verify || !b->c->verify_ondisk)
return;
- closure_wait_event(&b->io.wait, &cl,
- atomic_read(&b->io.cl.remaining) == -1);
-
+ down(&b->io_mutex);
mutex_lock(&b->c->verify_lock);
+ ondisk = b->c->verify_ondisk;
+ sorted = b->c->verify_data->keys.set->data;
+ inmemory = b->keys.set->data;
+
bkey_copy(&v->key, &b->key);
v->written = 0;
v->level = b->level;
+ v->keys.ops = b->keys.ops;
+
+ bio = bch_bbio_alloc(b->c);
+ bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
+ bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
+ bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
+ bch_bio_map(bio, sorted);
- bch_btree_node_read(v);
- closure_wait_event(&v->io.wait, &cl,
- atomic_read(&b->io.cl.remaining) == -1);
+ submit_bio_wait(REQ_META|READ_SYNC, bio);
+ bch_bbio_free(bio, b->c);
- if (new->keys != v->sets[0].data->keys ||
- memcmp(new->start,
- v->sets[0].data->start,
- (void *) end(new) - (void *) new->start)) {
- unsigned i, j;
+ memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
+
+ bch_btree_node_read_done(v);
+ sorted = v->keys.set->data;
+
+ if (inmemory->keys != sorted->keys ||
+ memcmp(inmemory->start,
+ sorted->start,
+ (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
+ struct bset *i;
+ unsigned j;
console_lock();
- printk(KERN_ERR "*** original memory node:\n");
- for (i = 0; i <= b->nsets; i++)
- dump_bset(b, b->sets[i].data);
+ printk(KERN_ERR "*** in memory:\n");
+ bch_dump_bset(&b->keys, inmemory, 0);
- printk(KERN_ERR "*** sorted memory node:\n");
- dump_bset(b, new);
+ printk(KERN_ERR "*** read back in:\n");
+ bch_dump_bset(&v->keys, sorted, 0);
- printk(KERN_ERR "*** on disk node:\n");
- dump_bset(v, v->sets[0].data);
+ for_each_written_bset(b, ondisk, i) {
+ unsigned block = ((void *) i - (void *) ondisk) /
+ block_bytes(b->c);
+
+ printk(KERN_ERR "*** on disk block %u:\n", block);
+ bch_dump_bset(&b->keys, i, block);
+ }
- for (j = 0; j < new->keys; j++)
- if (new->d[j] != v->sets[0].data->d[j])
+ printk(KERN_ERR "*** block %zu not written\n",
+ ((void *) i - (void *) ondisk) / block_bytes(b->c));
+
+ for (j = 0; j < inmemory->keys; j++)
+ if (inmemory->d[j] != sorted->d[j])
break;
+ printk(KERN_ERR "b->written %u\n", b->written);
+
console_unlock();
panic("verify failed at %u\n", j);
}
mutex_unlock(&b->c->verify_lock);
+ up(&b->io_mutex);
}
void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
struct bio *check;
- struct bio_vec *bv;
+ struct bio_vec bv, *bv2;
+ struct bvec_iter iter;
int i;
check = bio_clone(bio, GFP_NOIO);
@@ -185,95 +119,27 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
submit_bio_wait(READ_SYNC, check);
- bio_for_each_segment(bv, bio, i) {
- void *p1 = kmap_atomic(bv->bv_page);
- void *p2 = page_address(check->bi_io_vec[i].bv_page);
+ bio_for_each_segment(bv, bio, iter) {
+ void *p1 = kmap_atomic(bv.bv_page);
+ void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
- cache_set_err_on(memcmp(p1 + bv->bv_offset,
- p2 + bv->bv_offset,
- bv->bv_len),
+ cache_set_err_on(memcmp(p1 + bv.bv_offset,
+ p2 + bv.bv_offset,
+ bv.bv_len),
dc->disk.c,
"verify failed at dev %s sector %llu",
bdevname(dc->bdev, name),
- (uint64_t) bio->bi_sector);
+ (uint64_t) bio->bi_iter.bi_sector);
kunmap_atomic(p1);
}
- bio_for_each_segment_all(bv, check, i)
- __free_page(bv->bv_page);
+ bio_for_each_segment_all(bv2, check, i)
+ __free_page(bv2->bv_page);
out_put:
bio_put(check);
}
-int __bch_count_data(struct btree *b)
-{
- unsigned ret = 0;
- struct btree_iter iter;
- struct bkey *k;
-
- if (!b->level)
- for_each_key(b, k, &iter)
- ret += KEY_SIZE(k);
- return ret;
-}
-
-void __bch_check_keys(struct btree *b, const char *fmt, ...)
-{
- va_list args;
- struct bkey *k, *p = NULL;
- struct btree_iter iter;
- const char *err;
-
- for_each_key(b, k, &iter) {
- if (!b->level) {
- err = "Keys out of order";
- if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
- goto bug;
-
- if (bch_ptr_invalid(b, k))
- continue;
-
- err = "Overlapping keys";
- if (p && bkey_cmp(p, &START_KEY(k)) > 0)
- goto bug;
- } else {
- if (bch_ptr_bad(b, k))
- continue;
-
- err = "Duplicate keys";
- if (p && !bkey_cmp(p, k))
- goto bug;
- }
- p = k;
- }
-
- err = "Key larger than btree node key";
- if (p && bkey_cmp(p, &b->key) > 0)
- goto bug;
-
- return;
-bug:
- bch_dump_bucket(b);
-
- va_start(args, fmt);
- vprintk(fmt, args);
- va_end(args);
-
- panic("bcache error: %s:\n", err);
-}
-
-void bch_btree_iter_next_check(struct btree_iter *iter)
-{
- struct bkey *k = iter->data->k, *next = bkey_next(k);
-
- if (next < iter->data->end &&
- bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
- bch_dump_bucket(iter->b);
- panic("Key skipped backwards\n");
- }
-}
-
#endif
#ifdef CONFIG_DEBUG_FS
@@ -320,7 +186,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
if (!w)
break;
- bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
+ bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
bch_keybuf_del(&i->keys, w);
}
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index 2ede60e31874..1f63c195d247 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -1,47 +1,30 @@
#ifndef _BCACHE_DEBUG_H
#define _BCACHE_DEBUG_H
-/* Btree/bkey debug printing */
-
-int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
+struct bio;
+struct cached_dev;
+struct cache_set;
#ifdef CONFIG_BCACHE_DEBUG
-void bch_btree_verify(struct btree *, struct bset *);
+void bch_btree_verify(struct btree *);
void bch_data_verify(struct cached_dev *, struct bio *);
-int __bch_count_data(struct btree *);
-void __bch_check_keys(struct btree *, const char *, ...);
-void bch_btree_iter_next_check(struct btree_iter *);
-#define EBUG_ON(cond) BUG_ON(cond)
#define expensive_debug_checks(c) ((c)->expensive_debug_checks)
#define key_merging_disabled(c) ((c)->key_merging_disabled)
#define bypass_torture_test(d) ((d)->bypass_torture_test)
#else /* DEBUG */
-static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
+static inline void bch_btree_verify(struct btree *b) {}
static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
-static inline int __bch_count_data(struct btree *b) { return -1; }
-static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
-static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
-#define EBUG_ON(cond) do { if (cond); } while (0)
#define expensive_debug_checks(c) 0
#define key_merging_disabled(c) 0
#define bypass_torture_test(d) 0
#endif
-#define bch_count_data(b) \
- (expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
-
-#define bch_check_keys(b, ...) \
-do { \
- if (expensive_debug_checks((b)->c)) \
- __bch_check_keys(b, __VA_ARGS__); \
-} while (0)
-
#ifdef CONFIG_DEBUG_FS
void bch_debug_init_cache_set(struct cache_set *);
#else
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
new file mode 100644
index 000000000000..3a0de4cf9771
--- /dev/null
+++ b/drivers/md/bcache/extents.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
+ *
+ * Uses a block device as cache for other block devices; optimized for SSDs.
+ * All allocation is done in buckets, which should match the erase block size
+ * of the device.
+ *
+ * Buckets containing cached data are kept on a heap sorted by priority;
+ * bucket priority is increased on cache hit, and periodically all the buckets
+ * on the heap have their priority scaled down. This currently is just used as
+ * an LRU but in the future should allow for more intelligent heuristics.
+ *
+ * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
+ * counter. Garbage collection is used to remove stale pointers.
+ *
+ * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
+ * as keys are inserted we only sort the pages that have not yet been written.
+ * When garbage collection is run, we resort the entire node.
+ *
+ * All configuration is done via sysfs; see Documentation/bcache.txt.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "extents.h"
+#include "writeback.h"
+
+static void sort_key_next(struct btree_iter *iter,
+ struct btree_iter_set *i)
+{
+ i->k = bkey_next(i->k);
+
+ if (i->k == i->end)
+ *i = iter->data[--iter->used];
+}
+
+static bool bch_key_sort_cmp(struct btree_iter_set l,
+ struct btree_iter_set r)
+{
+ int64_t c = bkey_cmp(l.k, r.k);
+
+ return c ? c > 0 : l.k < r.k;
+}
+
+static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i)) {
+ struct cache *ca = PTR_CACHE(c, k, i);
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+ if (KEY_SIZE(k) + r > c->sb.bucket_size ||
+ bucket < ca->sb.first_bucket ||
+ bucket >= ca->sb.nbuckets)
+ return true;
+ }
+
+ return false;
+}
+
+/* Common among btree and extent ptrs */
+
+static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+{
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i)) {
+ struct cache *ca = PTR_CACHE(c, k, i);
+ size_t bucket = PTR_BUCKET_NR(c, k, i);
+ size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+ if (KEY_SIZE(k) + r > c->sb.bucket_size)
+ return "bad, length too big";
+ if (bucket < ca->sb.first_bucket)
+ return "bad, short offset";
+ if (bucket >= ca->sb.nbuckets)
+ return "bad, offset past end of device";
+ if (ptr_stale(c, k, i))
+ return "stale";
+ }
+
+ if (!bkey_cmp(k, &ZERO_KEY))
+ return "bad, null key";
+ if (!KEY_PTRS(k))
+ return "bad, no pointers";
+ if (!KEY_SIZE(k))
+ return "zeroed key";
+ return "";
+}
+
+void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
+{
+ unsigned i = 0;
+ char *out = buf, *end = buf + size;
+
+#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
+
+ p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ if (i)
+ p(", ");
+
+ if (PTR_DEV(k, i) == PTR_CHECK_DEV)
+ p("check dev");
+ else
+ p("%llu:%llu gen %llu", PTR_DEV(k, i),
+ PTR_OFFSET(k, i), PTR_GEN(k, i));
+ }
+
+ p("]");
+
+ if (KEY_DIRTY(k))
+ p(" dirty");
+ if (KEY_CSUM(k))
+ p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
+#undef p
+}
+
+static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
+{
+ struct btree *b = container_of(keys, struct btree, keys);
+ unsigned j;
+ char buf[80];
+
+ bch_extent_to_text(buf, sizeof(buf), k);
+ printk(" %s", buf);
+
+ for (j = 0; j < KEY_PTRS(k); j++) {
+ size_t n = PTR_BUCKET_NR(b->c, k, j);
+ printk(" bucket %zu", n);
+
+ if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
+ printk(" prio %i",
+ PTR_BUCKET(b->c, k, j)->prio);
+ }
+
+ printk(" %s\n", bch_ptr_status(b->c, k));
+}
+
+/* Btree ptrs */
+
+bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ char buf[80];
+
+ if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
+ goto bad;
+
+ if (__ptr_invalid(c, k))
+ goto bad;
+
+ return false;
+bad:
+ bch_extent_to_text(buf, sizeof(buf), k);
+ cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
+ return true;
+}
+
+static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ return __bch_btree_ptr_invalid(b->c, k);
+}
+
+static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
+{
+ unsigned i;
+ char buf[80];
+ struct bucket *g;
+
+ if (mutex_trylock(&b->c->bucket_lock)) {
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(b->c, k, i)) {
+ g = PTR_BUCKET(b->c, k, i);
+
+ if (KEY_DIRTY(k) ||
+ g->prio != BTREE_PRIO ||
+ (b->c->gc_mark_valid &&
+ GC_MARK(g) != GC_MARK_METADATA))
+ goto err;
+ }
+
+ mutex_unlock(&b->c->bucket_lock);
+ }
+
+ return false;
+err:
+ mutex_unlock(&b->c->bucket_lock);
+ bch_extent_to_text(buf, sizeof(buf), k);
+ btree_bug(b,
+"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
+ buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
+ g->prio, g->gen, g->last_gc, GC_MARK(g));
+ return true;
+}
+
+static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ unsigned i;
+
+ if (!bkey_cmp(k, &ZERO_KEY) ||
+ !KEY_PTRS(k) ||
+ bch_ptr_invalid(bk, k))
+ return true;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (!ptr_available(b->c, k, i) ||
+ ptr_stale(b->c, k, i))
+ return true;
+
+ if (expensive_debug_checks(b->c) &&
+ btree_ptr_bad_expensive(b, k))
+ return true;
+
+ return false;
+}
+
+static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
+ struct bkey *insert,
+ struct btree_iter *iter,
+ struct bkey *replace_key)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+
+ if (!KEY_OFFSET(insert))
+ btree_current_write(b)->prio_blocked++;
+
+ return false;
+}
+
+const struct btree_keys_ops bch_btree_keys_ops = {
+ .sort_cmp = bch_key_sort_cmp,
+ .insert_fixup = bch_btree_ptr_insert_fixup,
+ .key_invalid = bch_btree_ptr_invalid,
+ .key_bad = bch_btree_ptr_bad,
+ .key_to_text = bch_extent_to_text,
+ .key_dump = bch_bkey_dump,
+};
+
+/* Extents */
+
+/*
+ * Returns true if l > r - unless l == r, in which case returns true if l is
+ * older than r.
+ *
+ * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+ * equal in different sets, we have to process them newest to oldest.
+ */
+static bool bch_extent_sort_cmp(struct btree_iter_set l,
+ struct btree_iter_set r)
+{
+ int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
+
+ return c ? c > 0 : l.k < r.k;
+}
+
+static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
+ struct bkey *tmp)
+{
+ while (iter->used > 1) {
+ struct btree_iter_set *top = iter->data, *i = top + 1;
+
+ if (iter->used > 2 &&
+ bch_extent_sort_cmp(i[0], i[1]))
+ i++;
+
+ if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
+ break;
+
+ if (!KEY_SIZE(i->k)) {
+ sort_key_next(iter, i);
+ heap_sift(iter, i - top, bch_extent_sort_cmp);
+ continue;
+ }
+
+ if (top->k > i->k) {
+ if (bkey_cmp(top->k, i->k) >= 0)
+ sort_key_next(iter, i);
+ else
+ bch_cut_front(top->k, i->k);
+
+ heap_sift(iter, i - top, bch_extent_sort_cmp);
+ } else {
+ /* can't happen because of comparison func */
+ BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+
+ if (bkey_cmp(i->k, top->k) < 0) {
+ bkey_copy(tmp, top->k);
+
+ bch_cut_back(&START_KEY(i->k), tmp);
+ bch_cut_front(i->k, top->k);
+ heap_sift(iter, 0, bch_extent_sort_cmp);
+
+ return tmp;
+ } else {
+ bch_cut_back(&START_KEY(i->k), top->k);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void bch_subtract_dirty(struct bkey *k,
+ struct cache_set *c,
+ uint64_t offset,
+ int sectors)
+{
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
+ offset, -sectors);
+}
+
+static bool bch_extent_insert_fixup(struct btree_keys *b,
+ struct bkey *insert,
+ struct btree_iter *iter,
+ struct bkey *replace_key)
+{
+ struct cache_set *c = container_of(b, struct btree, keys)->c;
+
+ uint64_t old_offset;
+ unsigned old_size, sectors_found = 0;
+
+ BUG_ON(!KEY_OFFSET(insert));
+ BUG_ON(!KEY_SIZE(insert));
+
+ while (1) {
+ struct bkey *k = bch_btree_iter_next(iter);
+ if (!k)
+ break;
+
+ if (bkey_cmp(&START_KEY(k), insert) >= 0) {
+ if (KEY_SIZE(k))
+ break;
+ else
+ continue;
+ }
+
+ if (bkey_cmp(k, &START_KEY(insert)) <= 0)
+ continue;
+
+ old_offset = KEY_START(k);
+ old_size = KEY_SIZE(k);
+
+ /*
+ * We might overlap with 0 size extents; we can't skip these
+ * because if they're in the set we're inserting to we have to
+ * adjust them so they don't overlap with the key we're
+ * inserting. But we don't want to check them for replace
+ * operations.
+ */
+
+ if (replace_key && KEY_SIZE(k)) {
+ /*
+ * k might have been split since we inserted/found the
+ * key we're replacing
+ */
+ unsigned i;
+ uint64_t offset = KEY_START(k) -
+ KEY_START(replace_key);
+
+ /* But it must be a subset of the replace key */
+ if (KEY_START(k) < KEY_START(replace_key) ||
+ KEY_OFFSET(k) > KEY_OFFSET(replace_key))
+ goto check_failed;
+
+ /* We didn't find a key that we were supposed to */
+ if (KEY_START(k) > KEY_START(insert) + sectors_found)
+ goto check_failed;
+
+ if (!bch_bkey_equal_header(k, replace_key))
+ goto check_failed;
+
+ /* skip past gen */
+ offset <<= 8;
+
+ BUG_ON(!KEY_PTRS(replace_key));
+
+ for (i = 0; i < KEY_PTRS(replace_key); i++)
+ if (k->ptr[i] != replace_key->ptr[i] + offset)
+ goto check_failed;
+
+ sectors_found = KEY_OFFSET(k) - KEY_START(insert);
+ }
+
+ if (bkey_cmp(insert, k) < 0 &&
+ bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
+ /*
+ * We overlapped in the middle of an existing key: that
+ * means we have to split the old key. But we have to do
+ * slightly different things depending on whether the
+ * old key has been written out yet.
+ */
+
+ struct bkey *top;
+
+ bch_subtract_dirty(k, c, KEY_START(insert),
+ KEY_SIZE(insert));
+
+ if (bkey_written(b, k)) {
+ /*
+ * We insert a new key to cover the top of the
+ * old key, and the old key is modified in place
+ * to represent the bottom split.
+ *
+ * It's completely arbitrary whether the new key
+ * is the top or the bottom, but it has to match
+ * up with what btree_sort_fixup() does - it
+ * doesn't check for this kind of overlap, it
+ * depends on us inserting a new key for the top
+ * here.
+ */
+ top = bch_bset_search(b, bset_tree_last(b),
+ insert);
+ bch_bset_insert(b, top, k);
+ } else {
+ BKEY_PADDED(key) temp;
+ bkey_copy(&temp.key, k);
+ bch_bset_insert(b, k, &temp.key);
+ top = bkey_next(k);
+ }
+
+ bch_cut_front(insert, top);
+ bch_cut_back(&START_KEY(insert), k);
+ bch_bset_fix_invalidated_key(b, k);
+ goto out;
+ }
+
+ if (bkey_cmp(insert, k) < 0) {
+ bch_cut_front(insert, k);
+ } else {
+ if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
+ old_offset = KEY_START(insert);
+
+ if (bkey_written(b, k) &&
+ bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
+ /*
+ * Completely overwrote, so we don't have to
+ * invalidate the binary search tree
+ */
+ bch_cut_front(k, k);
+ } else {
+ __bch_cut_back(&START_KEY(insert), k);
+ bch_bset_fix_invalidated_key(b, k);
+ }
+ }
+
+ bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k));
+ }
+
+check_failed:
+ if (replace_key) {
+ if (!sectors_found) {
+ return true;
+ } else if (sectors_found < KEY_SIZE(insert)) {
+ SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
+ (KEY_SIZE(insert) - sectors_found));
+ SET_KEY_SIZE(insert, sectors_found);
+ }
+ }
+out:
+ if (KEY_DIRTY(insert))
+ bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
+ KEY_START(insert),
+ KEY_SIZE(insert));
+
+ return false;
+}
+
+static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ char buf[80];
+
+ if (!KEY_SIZE(k))
+ return true;
+
+ if (KEY_SIZE(k) > KEY_OFFSET(k))
+ goto bad;
+
+ if (__ptr_invalid(b->c, k))
+ goto bad;
+
+ return false;
+bad:
+ bch_extent_to_text(buf, sizeof(buf), k);
+ cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k));
+ return true;
+}
+
+static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
+ unsigned ptr)
+{
+ struct bucket *g = PTR_BUCKET(b->c, k, ptr);
+ char buf[80];
+
+ if (mutex_trylock(&b->c->bucket_lock)) {
+ if (b->c->gc_mark_valid &&
+ (!GC_MARK(g) ||
+ GC_MARK(g) == GC_MARK_METADATA ||
+ (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k))))
+ goto err;
+
+ if (g->prio == BTREE_PRIO)
+ goto err;
+
+ mutex_unlock(&b->c->bucket_lock);
+ }
+
+ return false;
+err:
+ mutex_unlock(&b->c->bucket_lock);
+ bch_extent_to_text(buf, sizeof(buf), k);
+ btree_bug(b,
+"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu",
+ buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
+ g->prio, g->gen, g->last_gc, GC_MARK(g));
+ return true;
+}
+
+static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ struct bucket *g;
+ unsigned i, stale;
+
+ if (!KEY_PTRS(k) ||
+ bch_extent_invalid(bk, k))
+ return true;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (!ptr_available(b->c, k, i))
+ return true;
+
+ if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
+ return false;
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ g = PTR_BUCKET(b->c, k, i);
+ stale = ptr_stale(b->c, k, i);
+
+ btree_bug_on(stale > 96, b,
+ "key too stale: %i, need_gc %u",
+ stale, b->c->need_gc);
+
+ btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
+ b, "stale dirty pointer");
+
+ if (stale)
+ return true;
+
+ if (expensive_debug_checks(b->c) &&
+ bch_extent_bad_expensive(b, k, i))
+ return true;
+ }
+
+ return false;
+}
+
+static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+{
+ return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
+ ~((uint64_t)1 << 63);
+}
+
+static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
+{
+ struct btree *b = container_of(bk, struct btree, keys);
+ unsigned i;
+
+ if (key_merging_disabled(b->c))
+ return false;
+
+ for (i = 0; i < KEY_PTRS(l); i++)
+ if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+ PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
+ return false;
+
+ /* Keys with no pointers aren't restricted to one bucket and could
+ * overflow KEY_SIZE
+ */
+ if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
+ SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
+ SET_KEY_SIZE(l, USHRT_MAX);
+
+ bch_cut_front(l, r);
+ return false;
+ }
+
+ if (KEY_CSUM(l)) {
+ if (KEY_CSUM(r))
+ l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
+ else
+ SET_KEY_CSUM(l, 0);
+ }
+
+ SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
+ SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+
+ return true;
+}
+
+const struct btree_keys_ops bch_extent_keys_ops = {
+ .sort_cmp = bch_extent_sort_cmp,
+ .sort_fixup = bch_extent_sort_fixup,
+ .insert_fixup = bch_extent_insert_fixup,
+ .key_invalid = bch_extent_invalid,
+ .key_bad = bch_extent_bad,
+ .key_merge = bch_extent_merge,
+ .key_to_text = bch_extent_to_text,
+ .key_dump = bch_bkey_dump,
+ .is_extents = true,
+};
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
new file mode 100644
index 000000000000..e4e23409782d
--- /dev/null
+++ b/drivers/md/bcache/extents.h
@@ -0,0 +1,13 @@
+#ifndef _BCACHE_EXTENTS_H
+#define _BCACHE_EXTENTS_H
+
+extern const struct btree_keys_ops bch_btree_keys_ops;
+extern const struct btree_keys_ops bch_extent_keys_ops;
+
+struct bkey;
+struct cache_set;
+
+void bch_extent_to_text(char *, size_t, const struct bkey *);
+bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
+
+#endif /* _BCACHE_EXTENTS_H */
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9056632995b1..fa028fa82df4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,178 +11,40 @@
#include <linux/blkdev.h>
-static void bch_bi_idx_hack_endio(struct bio *bio, int error)
-{
- struct bio *p = bio->bi_private;
-
- bio_endio(p, error);
- bio_put(bio);
-}
-
-static void bch_generic_make_request_hack(struct bio *bio)
-{
- if (bio->bi_idx) {
- struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
-
- memcpy(clone->bi_io_vec,
- bio_iovec(bio),
- bio_segments(bio) * sizeof(struct bio_vec));
-
- clone->bi_sector = bio->bi_sector;
- clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw;
- clone->bi_vcnt = bio_segments(bio);
- clone->bi_size = bio->bi_size;
-
- clone->bi_private = bio;
- clone->bi_end_io = bch_bi_idx_hack_endio;
-
- bio = clone;
- }
-
- /*
- * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
- * bios might have had more than that (before we split them per device
- * limitations).
- *
- * To be taken out once immutable bvec stuff is in.
- */
- bio->bi_max_vecs = bio->bi_vcnt;
-
- generic_make_request(bio);
-}
-
-/**
- * bch_bio_split - split a bio
- * @bio: bio to split
- * @sectors: number of sectors to split from the front of @bio
- * @gfp: gfp mask
- * @bs: bio set to allocate from
- *
- * Allocates and returns a new bio which represents @sectors from the start of
- * @bio, and updates @bio to represent the remaining sectors.
- *
- * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
- * unchanged.
- *
- * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
- * bvec boundry; it is the caller's responsibility to ensure that @bio is not
- * freed before the split.
- */
-struct bio *bch_bio_split(struct bio *bio, int sectors,
- gfp_t gfp, struct bio_set *bs)
-{
- unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
- struct bio_vec *bv;
- struct bio *ret = NULL;
-
- BUG_ON(sectors <= 0);
-
- if (sectors >= bio_sectors(bio))
- return bio;
-
- if (bio->bi_rw & REQ_DISCARD) {
- ret = bio_alloc_bioset(gfp, 1, bs);
- if (!ret)
- return NULL;
- idx = 0;
- goto out;
- }
-
- bio_for_each_segment(bv, bio, idx) {
- vcnt = idx - bio->bi_idx;
-
- if (!nbytes) {
- ret = bio_alloc_bioset(gfp, vcnt, bs);
- if (!ret)
- return NULL;
-
- memcpy(ret->bi_io_vec, bio_iovec(bio),
- sizeof(struct bio_vec) * vcnt);
-
- break;
- } else if (nbytes < bv->bv_len) {
- ret = bio_alloc_bioset(gfp, ++vcnt, bs);
- if (!ret)
- return NULL;
-
- memcpy(ret->bi_io_vec, bio_iovec(bio),
- sizeof(struct bio_vec) * vcnt);
-
- ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
- bv->bv_offset += nbytes;
- bv->bv_len -= nbytes;
- break;
- }
-
- nbytes -= bv->bv_len;
- }
-out:
- ret->bi_bdev = bio->bi_bdev;
- ret->bi_sector = bio->bi_sector;
- ret->bi_size = sectors << 9;
- ret->bi_rw = bio->bi_rw;
- ret->bi_vcnt = vcnt;
- ret->bi_max_vecs = vcnt;
-
- bio->bi_sector += sectors;
- bio->bi_size -= sectors << 9;
- bio->bi_idx = idx;
-
- if (bio_integrity(bio)) {
- if (bio_integrity_clone(ret, bio, gfp)) {
- bio_put(ret);
- return NULL;
- }
-
- bio_integrity_trim(ret, 0, bio_sectors(ret));
- bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
- }
-
- return ret;
-}
-
static unsigned bch_bio_max_sectors(struct bio *bio)
{
- unsigned ret = bio_sectors(bio);
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
- queue_max_segments(q));
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ unsigned ret = 0, seg = 0;
if (bio->bi_rw & REQ_DISCARD)
- return min(ret, q->limits.max_discard_sectors);
-
- if (bio_segments(bio) > max_segments ||
- q->merge_bvec_fn) {
- struct bio_vec *bv;
- int i, seg = 0;
-
- ret = 0;
-
- bio_for_each_segment(bv, bio, i) {
- struct bvec_merge_data bvm = {
- .bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_sector,
- .bi_size = ret << 9,
- .bi_rw = bio->bi_rw,
- };
-
- if (seg == max_segments)
- break;
+ return min(bio_sectors(bio), q->limits.max_discard_sectors);
+
+ bio_for_each_segment(bv, bio, iter) {
+ struct bvec_merge_data bvm = {
+ .bi_bdev = bio->bi_bdev,
+ .bi_sector = bio->bi_iter.bi_sector,
+ .bi_size = ret << 9,
+ .bi_rw = bio->bi_rw,
+ };
+
+ if (seg == min_t(unsigned, BIO_MAX_PAGES,
+ queue_max_segments(q)))
+ break;
- if (q->merge_bvec_fn &&
- q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
- break;
+ if (q->merge_bvec_fn &&
+ q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
+ break;
- seg++;
- ret += bv->bv_len >> 9;
- }
+ seg++;
+ ret += bv.bv_len >> 9;
}
ret = min(ret, queue_max_sectors(q));
WARN_ON(!ret);
- ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
+ ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
return ret;
}
@@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
s->bio->bi_end_io = s->bi_end_io;
s->bio->bi_private = s->bi_private;
- bio_endio(s->bio, 0);
+ bio_endio_nodec(s->bio, 0);
closure_debug_destroy(&s->cl);
mempool_free(s, s->p->bio_split_hook);
@@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
bio_get(bio);
do {
- n = bch_bio_split(bio, bch_bio_max_sectors(bio),
- GFP_NOIO, s->p->bio_split);
+ n = bio_next_split(bio, bch_bio_max_sectors(bio),
+ GFP_NOIO, s->p->bio_split);
n->bi_end_io = bch_bio_submit_split_endio;
n->bi_private = &s->cl;
closure_get(&s->cl);
- bch_generic_make_request_hack(n);
+ generic_make_request(n);
} while (n != bio);
continue_at(&s->cl, bch_bio_submit_split_done, NULL);
submit:
- bch_generic_make_request_hack(bio);
+ generic_make_request(bio);
}
/* Bios with headers */
@@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
{
struct bbio *b = container_of(bio, struct bbio, bio);
- bio->bi_sector = PTR_OFFSET(&b->key, 0);
- bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
+ bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
+ bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
b->submit_time_us = local_clock_us();
closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ecdaa671bd50..59e82021b5bb 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -44,17 +44,17 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
closure_init_stack(&cl);
- pr_debug("reading %llu", (uint64_t) bucket);
+ pr_debug("reading %u", bucket_index);
while (offset < ca->sb.bucket_size) {
reread: left = ca->sb.bucket_size - offset;
- len = min_t(unsigned, left, PAGE_SECTORS * 8);
+ len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
bio_reset(bio);
- bio->bi_sector = bucket + offset;
+ bio->bi_iter.bi_sector = bucket + offset;
bio->bi_bdev = ca->bdev;
bio->bi_rw = READ;
- bio->bi_size = len << 9;
+ bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
@@ -74,19 +74,28 @@ reread: left = ca->sb.bucket_size - offset;
struct list_head *where;
size_t blocks, bytes = set_bytes(j);
- if (j->magic != jset_magic(&ca->sb))
+ if (j->magic != jset_magic(&ca->sb)) {
+ pr_debug("%u: bad magic", bucket_index);
return ret;
+ }
- if (bytes > left << 9)
+ if (bytes > left << 9 ||
+ bytes > PAGE_SIZE << JSET_BITS) {
+ pr_info("%u: too big, %zu bytes, offset %u",
+ bucket_index, bytes, offset);
return ret;
+ }
if (bytes > len << 9)
goto reread;
- if (j->csum != csum_set(j))
+ if (j->csum != csum_set(j)) {
+ pr_info("%u: bad csum, %zu bytes, offset %u",
+ bucket_index, bytes, offset);
return ret;
+ }
- blocks = set_blocks(j, ca->set);
+ blocks = set_blocks(j, block_bytes(ca->set));
while (!list_empty(list)) {
i = list_first_entry(list,
@@ -228,8 +237,14 @@ bsearch:
for (i = 0; i < ca->sb.njournal_buckets; i++)
if (ja->seq[i] > seq) {
seq = ja->seq[i];
- ja->cur_idx = ja->discard_idx =
- ja->last_idx = i;
+ /*
+ * When journal_reclaim() goes to allocate for
+ * the first time, it'll use the bucket after
+ * ja->cur_idx
+ */
+ ja->cur_idx = i;
+ ja->last_idx = ja->discard_idx = (i + 1) %
+ ca->sb.njournal_buckets;
}
}
@@ -275,20 +290,15 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
}
for (k = i->j.start;
- k < end(&i->j);
+ k < bset_bkey_last(&i->j);
k = bkey_next(k)) {
unsigned j;
- for (j = 0; j < KEY_PTRS(k); j++) {
- struct bucket *g = PTR_BUCKET(c, k, j);
- atomic_inc(&g->pin);
+ for (j = 0; j < KEY_PTRS(k); j++)
+ if (ptr_available(c, k, j))
+ atomic_inc(&PTR_BUCKET(c, k, j)->pin);
- if (g->prio == BTREE_PRIO &&
- !ptr_stale(c, k, j))
- g->prio = INITIAL_PRIO;
- }
-
- __bch_btree_mark_key(c, 0, k);
+ bch_initial_mark_key(c, 0, k);
}
}
}
@@ -303,8 +313,6 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
struct keylist keylist;
- bch_keylist_init(&keylist);
-
list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1);
@@ -313,12 +321,11 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
n, i->j.seq - 1, start, end);
for (k = i->j.start;
- k < end(&i->j);
+ k < bset_bkey_last(&i->j);
k = bkey_next(k)) {
trace_bcache_journal_replay_key(k);
- bkey_copy(keylist.top, k);
- bch_keylist_push(&keylist);
+ bch_keylist_init_single(&keylist, k);
ret = bch_btree_insert(s, &keylist, i->pin, NULL);
if (ret)
@@ -374,16 +381,15 @@ retry:
b = best;
if (b) {
- rw_lock(true, b, b->level);
-
+ mutex_lock(&b->write_lock);
if (!btree_current_write(b)->journal) {
- rw_unlock(true, b);
+ mutex_unlock(&b->write_lock);
/* We raced */
goto retry;
}
- bch_btree_node_write(b, NULL);
- rw_unlock(true, b);
+ __bch_btree_node_write(b, NULL);
+ mutex_unlock(&b->write_lock);
}
}
@@ -437,13 +443,13 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
bio_init(bio);
- bio->bi_sector = bucket_to_sector(ca->set,
+ bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_DISCARD;
bio->bi_max_vecs = 1;
bio->bi_io_vec = bio->bi_inline_vecs;
- bio->bi_size = bucket_bytes(ca);
+ bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
closure_get(&ca->set->cl);
@@ -527,6 +533,7 @@ void bch_journal_next(struct journal *j)
atomic_set(&fifo_back(&j->pin), 1);
j->cur->data->seq = ++j->seq;
+ j->cur->dirty = false;
j->cur->need_write = false;
j->cur->data->keys = 0;
@@ -555,6 +562,14 @@ static void journal_write_done(struct closure *cl)
continue_at_nobarrier(cl, journal_write, system_wq);
}
+static void journal_write_unlock(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+
+ c->journal.io_in_flight = 0;
+ spin_unlock(&c->journal.lock);
+}
+
static void journal_write_unlocked(struct closure *cl)
__releases(c->journal.lock)
{
@@ -562,22 +577,15 @@ static void journal_write_unlocked(struct closure *cl)
struct cache *ca;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
- unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
+ unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
+ c->sb.block_size;
struct bio *bio;
struct bio_list list;
bio_list_init(&list);
if (!w->need_write) {
- /*
- * XXX: have to unlock closure before we unlock journal lock,
- * else we race with bch_journal(). But this way we race
- * against cache set unregister. Doh.
- */
- set_closure_fn(cl, NULL, NULL);
- closure_sub(cl, CLOSURE_RUNNING + 1);
- spin_unlock(&c->journal.lock);
- return;
+ closure_return_with_destructor(cl, journal_write_unlock);
} else if (journal_full(&c->journal)) {
journal_reclaim(c);
spin_unlock(&c->journal.lock);
@@ -586,7 +594,7 @@ static void journal_write_unlocked(struct closure *cl)
continue_at(cl, journal_write, system_wq);
}
- c->journal.blocks_free -= set_blocks(w->data, c);
+ c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
w->data->btree_level = c->root->level;
@@ -608,10 +616,10 @@ static void journal_write_unlocked(struct closure *cl)
atomic_long_add(sectors, &ca->meta_sectors_written);
bio_reset(bio);
- bio->bi_sector = PTR_OFFSET(k, i);
+ bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
- bio->bi_size = sectors << 9;
+ bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
@@ -653,10 +661,12 @@ static void journal_try_write(struct cache_set *c)
w->need_write = true;
- if (closure_trylock(cl, &c->cl))
- journal_write_unlocked(cl);
- else
+ if (!c->journal.io_in_flight) {
+ c->journal.io_in_flight = 1;
+ closure_call(cl, journal_write_unlocked, NULL, &c->cl);
+ } else {
spin_unlock(&c->journal.lock);
+ }
}
static struct journal_write *journal_wait_for_write(struct cache_set *c,
@@ -664,6 +674,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
{
size_t sectors;
struct closure cl;
+ bool wait = false;
closure_init_stack(&cl);
@@ -673,16 +684,19 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
struct journal_write *w = c->journal.cur;
sectors = __set_blocks(w->data, w->data->keys + nkeys,
- c) * c->sb.block_size;
+ block_bytes(c)) * c->sb.block_size;
if (sectors <= min_t(size_t,
c->journal.blocks_free * c->sb.block_size,
PAGE_SECTORS << JSET_BITS))
return w;
- /* XXX: tracepoint */
+ if (wait)
+ closure_wait(&c->journal.wait, &cl);
+
if (!journal_full(&c->journal)) {
- trace_bcache_journal_entry_full(c);
+ if (wait)
+ trace_bcache_journal_entry_full(c);
/*
* XXX: If we were inserting so many keys that they
@@ -692,12 +706,11 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
*/
BUG_ON(!w->data->keys);
- closure_wait(&w->wait, &cl);
journal_try_write(c); /* unlocks */
} else {
- trace_bcache_journal_full(c);
+ if (wait)
+ trace_bcache_journal_full(c);
- closure_wait(&c->journal.wait, &cl);
journal_reclaim(c);
spin_unlock(&c->journal.lock);
@@ -706,6 +719,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
closure_sync(&cl);
spin_lock(&c->journal.lock);
+ wait = true;
}
}
@@ -715,7 +729,10 @@ static void journal_write_work(struct work_struct *work)
struct cache_set,
journal.work);
spin_lock(&c->journal.lock);
- journal_try_write(c);
+ if (c->journal.cur->dirty)
+ journal_try_write(c);
+ else
+ spin_unlock(&c->journal.lock);
}
/*
@@ -736,7 +753,7 @@ atomic_t *bch_journal(struct cache_set *c,
w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
- memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
+ memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
w->data->keys += bch_keylist_nkeys(keys);
ret = &fifo_back(&c->journal.pin);
@@ -745,7 +762,8 @@ atomic_t *bch_journal(struct cache_set *c,
if (parent) {
closure_wait(&w->wait, parent);
journal_try_write(c);
- } else if (!w->need_write) {
+ } else if (!w->dirty) {
+ w->dirty = true;
schedule_delayed_work(&c->journal.work,
msecs_to_jiffies(c->journal_delay_ms));
spin_unlock(&c->journal.lock);
@@ -780,7 +798,6 @@ int bch_journal_alloc(struct cache_set *c)
{
struct journal *j = &c->journal;
- closure_init_unlocked(&j->io);
spin_lock_init(&j->lock);
INIT_DELAYED_WORK(&j->work, journal_write_work);
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index a6472fda94b2..e3c39457afbb 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -95,6 +95,7 @@ struct journal_write {
struct cache_set *c;
struct closure_waitlist wait;
+ bool dirty;
bool need_write;
};
@@ -104,6 +105,7 @@ struct journal {
/* used when waiting because the journal was full */
struct closure_waitlist wait;
struct closure io;
+ int io_in_flight;
struct delayed_work work;
/* Number of blocks free in the bucket(s) we're currently writing to */
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index f2f0998c4a91..cd7490311e51 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -24,12 +24,10 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
moving_gc_keys);
unsigned i;
- for (i = 0; i < KEY_PTRS(k); i++) {
- struct bucket *g = PTR_BUCKET(c, k, i);
-
- if (GC_MOVE(g))
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i) &&
+ GC_MOVE(PTR_BUCKET(c, k, i)))
return true;
- }
return false;
}
@@ -86,7 +84,7 @@ static void moving_init(struct moving_io *io)
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- bio->bi_size = KEY_SIZE(&io->w->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
PAGE_SECTORS);
bio->bi_private = &io->cl;
@@ -102,7 +100,7 @@ static void write_moving(struct closure *cl)
if (!op->error) {
moving_init(io);
- io->bio.bio.bi_sector = KEY_START(&io->w->key);
+ io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
op->write_prio = 1;
op->bio = &io->bio.bio;
@@ -115,7 +113,7 @@ static void write_moving(struct closure *cl)
closure_call(&op->cl, bch_data_insert, NULL, cl);
}
- continue_at(cl, write_moving_finish, system_wq);
+ continue_at(cl, write_moving_finish, op->wq);
}
static void read_moving_submit(struct closure *cl)
@@ -125,7 +123,7 @@ static void read_moving_submit(struct closure *cl)
bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
- continue_at(cl, write_moving, system_wq);
+ continue_at(cl, write_moving, io->op.wq);
}
static void read_moving(struct cache_set *c)
@@ -160,6 +158,7 @@ static void read_moving(struct cache_set *c)
io->w = w;
io->op.inode = KEY_INODE(&w->key);
io->op.c = c;
+ io->op.wq = c->moving_gc_wq;
moving_init(io);
bio = &io->bio.bio;
@@ -211,12 +210,15 @@ void bch_moving_gc(struct cache_set *c)
for_each_cache(ca, c, i) {
unsigned sectors_to_move = 0;
unsigned reserve_sectors = ca->sb.bucket_size *
- min(fifo_used(&ca->free), ca->free.size / 2);
+ fifo_used(&ca->free[RESERVE_MOVINGGC]);
ca->heap.used = 0;
for_each_bucket(b, ca) {
- if (!GC_SECTORS_USED(b))
+ if (GC_MARK(b) == GC_MARK_METADATA ||
+ !GC_SECTORS_USED(b) ||
+ GC_SECTORS_USED(b) == ca->sb.bucket_size ||
+ atomic_read(&b->pin))
continue;
if (!heap_full(&ca->heap)) {
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 61bcfc21d2a0..15fff4f68a7c 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -12,11 +12,9 @@
#include "request.h"
#include "writeback.h"
-#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/random.h>
-#include "blk-cgroup.h"
#include <trace/events/bcache.h>
@@ -27,184 +25,26 @@ struct kmem_cache *bch_search_cache;
static void bch_data_insert_start(struct closure *);
-/* Cgroup interface */
-
-#ifdef CONFIG_CGROUP_BCACHE
-static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
-
-static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
-{
- struct cgroup_subsys_state *css;
- return cgroup &&
- (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
- ? container_of(css, struct bch_cgroup, css)
- : &bcache_default_cgroup;
-}
-
-struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
-{
- struct cgroup_subsys_state *css = bio->bi_css
- ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
- : task_subsys_state(current, bcache_subsys_id);
-
- return css
- ? container_of(css, struct bch_cgroup, css)
- : &bcache_default_cgroup;
-}
-
-static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- char __user *buf, size_t nbytes, loff_t *ppos)
-{
- char tmp[1024];
- int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
- cgroup_to_bcache(cgrp)->cache_mode + 1);
-
- if (len < 0)
- return len;
-
- return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
-}
-
-static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buf)
-{
- int v = bch_read_string_list(buf, bch_cache_modes);
- if (v < 0)
- return v;
-
- cgroup_to_bcache(cgrp)->cache_mode = v - 1;
- return 0;
-}
-
-static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
-{
- return cgroup_to_bcache(cgrp)->verify;
-}
-
-static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
-{
- cgroup_to_bcache(cgrp)->verify = val;
- return 0;
-}
-
-static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
-{
- struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
- return atomic_read(&bcachecg->stats.cache_hits);
-}
-
-static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
-{
- struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
- return atomic_read(&bcachecg->stats.cache_misses);
-}
-
-static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
- struct cftype *cft)
-{
- struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
- return atomic_read(&bcachecg->stats.cache_bypass_hits);
-}
-
-static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
- struct cftype *cft)
-{
- struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
- return atomic_read(&bcachecg->stats.cache_bypass_misses);
-}
-
-static struct cftype bch_files[] = {
- {
- .name = "cache_mode",
- .read = cache_mode_read,
- .write_string = cache_mode_write,
- },
- {
- .name = "verify",
- .read_u64 = bch_verify_read,
- .write_u64 = bch_verify_write,
- },
- {
- .name = "cache_hits",
- .read_u64 = bch_cache_hits_read,
- },
- {
- .name = "cache_misses",
- .read_u64 = bch_cache_misses_read,
- },
- {
- .name = "cache_bypass_hits",
- .read_u64 = bch_cache_bypass_hits_read,
- },
- {
- .name = "cache_bypass_misses",
- .read_u64 = bch_cache_bypass_misses_read,
- },
- { } /* terminate */
-};
-
-static void init_bch_cgroup(struct bch_cgroup *cg)
-{
- cg->cache_mode = -1;
-}
-
-static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
-{
- struct bch_cgroup *cg;
-
- cg = kzalloc(sizeof(*cg), GFP_KERNEL);
- if (!cg)
- return ERR_PTR(-ENOMEM);
- init_bch_cgroup(cg);
- return &cg->css;
-}
-
-static void bcachecg_destroy(struct cgroup *cgroup)
-{
- struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
- kfree(cg);
-}
-
-struct cgroup_subsys bcache_subsys = {
- .create = bcachecg_create,
- .destroy = bcachecg_destroy,
- .subsys_id = bcache_subsys_id,
- .name = "bcache",
- .module = THIS_MODULE,
-};
-EXPORT_SYMBOL_GPL(bcache_subsys);
-#endif
-
static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
{
-#ifdef CONFIG_CGROUP_BCACHE
- int r = bch_bio_to_cgroup(bio)->cache_mode;
- if (r >= 0)
- return r;
-#endif
return BDEV_CACHE_MODE(&dc->sb);
}
static bool verify(struct cached_dev *dc, struct bio *bio)
{
-#ifdef CONFIG_CGROUP_BCACHE
- if (bch_bio_to_cgroup(bio)->verify)
- return true;
-#endif
return dc->verify;
}
static void bio_csum(struct bio *bio, struct bkey *k)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
+ struct bvec_iter iter;
uint64_t csum = 0;
- int i;
- bio_for_each_segment(bv, bio, i) {
- void *d = kmap(bv->bv_page) + bv->bv_offset;
- csum = bch_crc64_update(csum, d, bv->bv_len);
- kunmap(bv->bv_page);
+ bio_for_each_segment(bv, bio, iter) {
+ void *d = kmap(bv.bv_page) + bv.bv_offset;
+ csum = bch_crc64_update(csum, d, bv.bv_len);
+ kunmap(bv.bv_page);
}
k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -248,38 +88,56 @@ static void bch_data_insert_keys(struct closure *cl)
atomic_dec_bug(journal_ref);
if (!op->insert_data_done)
- continue_at(cl, bch_data_insert_start, bcache_wq);
+ continue_at(cl, bch_data_insert_start, op->wq);
bch_keylist_free(&op->insert_keys);
closure_return(cl);
}
+static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
+ struct cache_set *c)
+{
+ size_t oldsize = bch_keylist_nkeys(l);
+ size_t newsize = oldsize + u64s;
+
+ /*
+ * The journalling code doesn't handle the case where the keys to insert
+ * is bigger than an empty write: If we just return -ENOMEM here,
+ * bio_insert() and bio_invalidate() will insert the keys created so far
+ * and finish the rest when the keylist is empty.
+ */
+ if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
+ return -ENOMEM;
+
+ return __bch_keylist_realloc(l, u64s);
+}
+
static void bch_data_invalidate(struct closure *cl)
{
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio;
pr_debug("invalidating %i sectors from %llu",
- bio_sectors(bio), (uint64_t) bio->bi_sector);
+ bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
while (bio_sectors(bio)) {
unsigned sectors = min(bio_sectors(bio),
1U << (KEY_SIZE_BITS - 1));
- if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
+ if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
goto out;
- bio->bi_sector += sectors;
- bio->bi_size -= sectors << 9;
+ bio->bi_iter.bi_sector += sectors;
+ bio->bi_iter.bi_size -= sectors << 9;
bch_keylist_add(&op->insert_keys,
- &KEY(op->inode, bio->bi_sector, sectors));
+ &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
}
op->insert_data_done = true;
bio_put(bio);
out:
- continue_at(cl, bch_data_insert_keys, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, op->wq);
}
static void bch_data_insert_error(struct closure *cl)
@@ -322,7 +180,7 @@ static void bch_data_insert_endio(struct bio *bio, int error)
if (op->writeback)
op->error = error;
else if (!op->replace)
- set_closure_fn(cl, bch_data_insert_error, bcache_wq);
+ set_closure_fn(cl, bch_data_insert_error, op->wq);
else
set_closure_fn(cl, NULL, NULL);
}
@@ -335,14 +193,14 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (op->bypass)
- return bch_data_invalidate(cl);
-
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c);
wake_up_gc(op->c);
}
+ if (op->bypass)
+ return bch_data_invalidate(cl);
+
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.
@@ -356,21 +214,21 @@ static void bch_data_insert_start(struct closure *cl)
/* 1 for the device pointer and 1 for the chksum */
if (bch_keylist_realloc(&op->insert_keys,
- 1 + (op->csum ? 1 : 0),
+ 3 + (op->csum ? 1 : 0),
op->c))
- continue_at(cl, bch_data_insert_keys, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, op->wq);
k = op->insert_keys.top;
bkey_init(k);
SET_KEY_INODE(k, op->inode);
- SET_KEY_OFFSET(k, bio->bi_sector);
+ SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
op->write_point, op->write_prio,
op->writeback))
goto err;
- n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
+ n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
n->bi_end_io = bch_data_insert_endio;
n->bi_private = cl;
@@ -395,7 +253,7 @@ static void bch_data_insert_start(struct closure *cl)
} while (n != bio);
op->insert_data_done = true;
- continue_at(cl, bch_data_insert_keys, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, op->wq);
err:
/* bch_alloc_sectors() blocks if s->writeback = true */
BUG_ON(op->writeback);
@@ -424,7 +282,7 @@ err:
bio_put(bio);
if (!bch_keylist_empty(&op->insert_keys))
- continue_at(cl, bch_data_insert_keys, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, op->wq);
else
closure_return(cl);
}
@@ -521,7 +379,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
(bio->bi_rw & REQ_WRITE)))
goto skip;
- if (bio->bi_sector & (c->sb.block_size - 1) ||
+ if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) {
pr_debug("skipping unaligned io");
goto skip;
@@ -545,8 +403,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
spin_lock(&dc->io_lock);
- hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
- if (i->last == bio->bi_sector &&
+ hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
+ if (i->last == bio->bi_iter.bi_sector &&
time_before(jiffies, i->jiffies))
goto found;
@@ -555,8 +413,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
add_sequential(task);
i->sequential = 0;
found:
- if (i->sequential + bio->bi_size > i->sequential)
- i->sequential += bio->bi_size;
+ if (i->sequential + bio->bi_iter.bi_size > i->sequential)
+ i->sequential += bio->bi_iter.bi_size;
i->last = bio_end_sector(bio);
i->jiffies = jiffies + msecs_to_jiffies(5000);
@@ -596,16 +454,13 @@ struct search {
/* Stack frame for bio_complete */
struct closure cl;
- struct bcache_device *d;
-
struct bbio bio;
struct bio *orig_bio;
struct bio *cache_miss;
+ struct bcache_device *d;
unsigned insert_bio_sectors;
-
unsigned recoverable:1;
- unsigned unaligned_bvec:1;
unsigned write:1;
unsigned read_dirty_data:1;
@@ -630,7 +485,8 @@ static void bch_cache_read_endio(struct bio *bio, int error)
if (error)
s->iop.error = error;
- else if (ptr_stale(s->iop.c, &b->key, 0)) {
+ else if (!KEY_DIRTY(&b->key) &&
+ ptr_stale(s->iop.c, &b->key, 0)) {
atomic_long_inc(&s->iop.c->cache_read_races);
s->iop.error = -EINTR;
}
@@ -649,15 +505,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
struct bkey *bio_key;
unsigned ptr;
- if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
+ if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
return MAP_CONTINUE;
if (KEY_INODE(k) != s->iop.inode ||
- KEY_START(k) > bio->bi_sector) {
+ KEY_START(k) > bio->bi_iter.bi_sector) {
unsigned bio_sectors = bio_sectors(bio);
unsigned sectors = KEY_INODE(k) == s->iop.inode
? min_t(uint64_t, INT_MAX,
- KEY_START(k) - bio->bi_sector)
+ KEY_START(k) - bio->bi_iter.bi_sector)
: INT_MAX;
int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -679,14 +535,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
if (KEY_DIRTY(k))
s->read_dirty_data = true;
- n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
- KEY_OFFSET(k) - bio->bi_sector),
- GFP_NOIO, s->d->bio_split);
+ n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
+ KEY_OFFSET(k) - bio->bi_iter.bi_sector),
+ GFP_NOIO, s->d->bio_split);
bio_key = &container_of(n, struct bbio, bio)->key;
bch_bkey_copy_single_ptr(bio_key, k, ptr);
- bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
+ bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
n->bi_end_io = bch_cache_read_endio;
@@ -711,10 +567,13 @@ static void cache_lookup(struct closure *cl)
{
struct search *s = container_of(cl, struct search, iop.cl);
struct bio *bio = &s->bio.bio;
+ int ret;
- int ret = bch_btree_map_keys(&s->op, s->iop.c,
- &KEY(s->iop.inode, bio->bi_sector, 0),
- cache_lookup_fn, MAP_END_KEY);
+ bch_btree_op_init(&s->op, -1);
+
+ ret = bch_btree_map_keys(&s->op, s->iop.c,
+ &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
+ cache_lookup_fn, MAP_END_KEY);
if (ret == -EAGAIN)
continue_at(cl, cache_lookup, bcache_wq);
@@ -755,13 +614,15 @@ static void bio_complete(struct search *s)
}
}
-static void do_bio_hook(struct search *s)
+static void do_bio_hook(struct search *s, struct bio *orig_bio)
{
struct bio *bio = &s->bio.bio;
- memcpy(bio, s->orig_bio, sizeof(struct bio));
+ bio_init(bio);
+ __bio_clone_fast(bio, orig_bio);
bio->bi_end_io = request_endio;
bio->bi_private = &s->cl;
+
atomic_set(&bio->bi_cnt, 3);
}
@@ -773,43 +634,37 @@ static void search_free(struct closure *cl)
if (s->iop.bio)
bio_put(s->iop.bio);
- if (s->unaligned_bvec)
- mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
-
closure_debug_destroy(cl);
mempool_free(s, s->d->c->search);
}
-static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
+static inline struct search *search_alloc(struct bio *bio,
+ struct bcache_device *d)
{
struct search *s;
- struct bio_vec *bv;
s = mempool_alloc(d->c->search, GFP_NOIO);
- memset(s, 0, offsetof(struct search, iop.insert_keys));
- __closure_init(&s->cl, NULL);
+ closure_init(&s->cl, NULL);
+ do_bio_hook(s, bio);
- s->iop.inode = d->id;
- s->iop.c = d->c;
- s->d = d;
- s->op.lock = -1;
- s->iop.write_point = hash_long((unsigned long) current, 16);
s->orig_bio = bio;
- s->write = (bio->bi_rw & REQ_WRITE) != 0;
- s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
+ s->cache_miss = NULL;
+ s->d = d;
s->recoverable = 1;
+ s->write = (bio->bi_rw & REQ_WRITE) != 0;
+ s->read_dirty_data = 0;
s->start_time = jiffies;
- do_bio_hook(s);
-
- if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
- bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
- memcpy(bv, bio_iovec(bio),
- sizeof(struct bio_vec) * bio_segments(bio));
- s->bio.bio.bi_io_vec = bv;
- s->unaligned_bvec = 1;
- }
+ s->iop.c = d->c;
+ s->iop.bio = NULL;
+ s->iop.inode = d->id;
+ s->iop.write_point = hash_long((unsigned long) current, 16);
+ s->iop.write_prio = 0;
+ s->iop.error = 0;
+ s->iop.flags = 0;
+ s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
+ s->iop.wq = bcache_wq;
return s;
}
@@ -849,26 +704,13 @@ static void cached_dev_read_error(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
- struct bio_vec *bv;
- int i;
if (s->recoverable) {
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
s->iop.error = 0;
- bv = s->bio.bio.bi_io_vec;
- do_bio_hook(s);
- s->bio.bio.bi_io_vec = bv;
-
- if (!s->unaligned_bvec)
- bio_for_each_segment(bv, s->orig_bio, i)
- bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
- else
- memcpy(s->bio.bio.bi_io_vec,
- bio_iovec(s->orig_bio),
- sizeof(struct bio_vec) *
- bio_segments(s->orig_bio));
+ do_bio_hook(s, s->orig_bio);
/* XXX: invalidate cache */
@@ -893,9 +735,9 @@ static void cached_dev_read_done(struct closure *cl)
if (s->iop.bio) {
bio_reset(s->iop.bio);
- s->iop.bio->bi_sector = s->cache_miss->bi_sector;
+ s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
- s->iop.bio->bi_size = s->insert_bio_sectors << 9;
+ s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bch_bio_map(s->iop.bio, NULL);
bio_copy_data(s->cache_miss, s->iop.bio);
@@ -904,8 +746,7 @@ static void cached_dev_read_done(struct closure *cl)
s->cache_miss = NULL;
}
- if (verify(dc, &s->bio.bio) && s->recoverable &&
- !s->unaligned_bvec && !s->read_dirty_data)
+ if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
bch_data_verify(dc, s->orig_bio);
bio_complete(s);
@@ -945,7 +786,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *miss, *cache_bio;
if (s->cache_miss || s->iop.bypass) {
- miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
goto out_submit;
}
@@ -959,7 +800,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
s->iop.replace_key = KEY(s->iop.inode,
- bio->bi_sector + s->insert_bio_sectors,
+ bio->bi_iter.bi_sector + s->insert_bio_sectors,
s->insert_bio_sectors);
ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -968,7 +809,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->iop.replace = true;
- miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
@@ -979,9 +820,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
if (!cache_bio)
goto out_submit;
- cache_bio->bi_sector = miss->bi_sector;
- cache_bio->bi_bdev = miss->bi_bdev;
- cache_bio->bi_size = s->insert_bio_sectors << 9;
+ cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
+ cache_bio->bi_bdev = miss->bi_bdev;
+ cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = request_endio;
cache_bio->bi_private = &s->cl;
@@ -1031,7 +872,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
struct bio *bio = &s->bio.bio;
- struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
+ struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1087,8 +928,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
closure_bio_submit(flush, cl, s->d);
}
} else {
- s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
- dc->disk.bio_split);
+ s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
closure_bio_submit(bio, cl, s->d);
}
@@ -1126,13 +966,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
part_stat_unlock();
bio->bi_bdev = dc->bdev;
- bio->bi_sector += dc->sb.data_offset;
+ bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) {
s = search_alloc(bio, d);
trace_bcache_request_start(s->d, bio);
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
/*
* can't call bch_journal_meta from under
* generic_make_request
@@ -1204,24 +1044,15 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
static int flash_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
{
- struct bio_vec *bv;
- int i;
-
- /* Zero fill bio */
+ unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
- bio_for_each_segment(bv, bio, i) {
- unsigned j = min(bv->bv_len >> 9, sectors);
+ swap(bio->bi_iter.bi_size, bytes);
+ zero_fill_bio(bio);
+ swap(bio->bi_iter.bi_size, bytes);
- void *p = kmap(bv->bv_page);
- memset(p + bv->bv_offset, 0, j << 9);
- kunmap(bv->bv_page);
+ bio_advance(bio, bytes);
- sectors -= j;
- }
-
- bio_advance(bio, min(sectors << 9, bio->bi_size));
-
- if (!bio->bi_size)
+ if (!bio->bi_iter.bi_size)
return MAP_DONE;
return MAP_CONTINUE;
@@ -1255,7 +1086,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
trace_bcache_request_start(s->d, bio);
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
/*
* can't call bch_journal_meta from under
* generic_make_request
@@ -1265,7 +1096,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
bcache_wq);
} else if (rw) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
- &KEY(d->id, bio->bi_sector, 0),
+ &KEY(d->id, bio->bi_iter.bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
@@ -1314,9 +1145,6 @@ void bch_flash_dev_request_init(struct bcache_device *d)
void bch_request_exit(void)
{
-#ifdef CONFIG_CGROUP_BCACHE
- cgroup_unload_subsys(&bcache_subsys);
-#endif
if (bch_search_cache)
kmem_cache_destroy(bch_search_cache);
}
@@ -1327,11 +1155,5 @@ int __init bch_request_init(void)
if (!bch_search_cache)
return -ENOMEM;
-#ifdef CONFIG_CGROUP_BCACHE
- cgroup_load_subsys(&bcache_subsys);
- init_bch_cgroup(&bcache_default_cgroup);
-
- cgroup_add_cftypes(&bcache_subsys, bch_files);
-#endif
return 0;
}
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 2cd65bf073c2..1ff36875c2b3 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -1,29 +1,33 @@
#ifndef _BCACHE_REQUEST_H_
#define _BCACHE_REQUEST_H_
-#include <linux/cgroup.h>
-
struct data_insert_op {
struct closure cl;
struct cache_set *c;
struct bio *bio;
+ struct workqueue_struct *wq;
unsigned inode;
uint16_t write_point;
uint16_t write_prio;
short error;
- unsigned bypass:1;
- unsigned writeback:1;
- unsigned flush_journal:1;
- unsigned csum:1;
+ union {
+ uint16_t flags;
+
+ struct {
+ unsigned bypass:1;
+ unsigned writeback:1;
+ unsigned flush_journal:1;
+ unsigned csum:1;
- unsigned replace:1;
- unsigned replace_collision:1;
+ unsigned replace:1;
+ unsigned replace_collision:1;
- unsigned insert_data_done:1;
+ unsigned insert_data_done:1;
+ };
+ };
- /* Anything past this point won't get zeroed in search_alloc() */
struct keylist insert_keys;
BKEY_PADDED(replace_key);
};
@@ -36,20 +40,4 @@ void bch_flash_dev_request_init(struct bcache_device *d);
extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
-struct bch_cgroup {
-#ifdef CONFIG_CGROUP_BCACHE
- struct cgroup_subsys_state css;
-#endif
- /*
- * We subtract one from the index into bch_cache_modes[], so that
- * default == -1; this makes it so the rest match up with d->cache_mode,
- * and we use d->cache_mode if cgrp->cache_mode < 0
- */
- short cache_mode;
- bool verify;
- struct cache_stat_collector stats;
-};
-
-struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
-
#endif /* _BCACHE_REQUEST_H_ */
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 84d0782f702e..0ca072c20d0d 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -201,9 +201,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
mark_cache_stats(&dc->accounting.collector, hit, bypass);
mark_cache_stats(&c->accounting.collector, hit, bypass);
-#ifdef CONFIG_CGROUP_BCACHE
- mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
-#endif
}
void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c57bfa071a57..926ded8ccbf5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -9,6 +9,7 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
+#include "extents.h"
#include "request.h"
#include "writeback.h"
@@ -225,7 +226,7 @@ static void write_bdev_super_endio(struct bio *bio, int error)
struct cached_dev *dc = bio->bi_private;
/* XXX: error checking */
- closure_put(&dc->sb_write.cl);
+ closure_put(&dc->sb_write);
}
static void __write_super(struct cache_sb *sb, struct bio *bio)
@@ -233,9 +234,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
unsigned i;
- bio->bi_sector = SB_SECTOR;
- bio->bi_rw = REQ_SYNC|REQ_META;
- bio->bi_size = SB_SIZE;
+ bio->bi_iter.bi_sector = SB_SECTOR;
+ bio->bi_rw = REQ_SYNC|REQ_META;
+ bio->bi_iter.bi_size = SB_SIZE;
bch_bio_map(bio, NULL);
out->offset = cpu_to_le64(sb->offset);
@@ -263,12 +264,20 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
submit_bio(REQ_WRITE, bio);
}
+static void bch_write_bdev_super_unlock(struct closure *cl)
+{
+ struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
+
+ up(&dc->sb_write_mutex);
+}
+
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
{
- struct closure *cl = &dc->sb_write.cl;
+ struct closure *cl = &dc->sb_write;
struct bio *bio = &dc->sb_bio;
- closure_lock(&dc->sb_write, parent);
+ down(&dc->sb_write_mutex);
+ closure_init(cl, parent);
bio_reset(bio);
bio->bi_bdev = dc->bdev;
@@ -278,7 +287,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
closure_get(cl);
__write_super(&dc->sb, bio);
- closure_return(cl);
+ closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
}
static void write_super_endio(struct bio *bio, int error)
@@ -286,16 +295,24 @@ static void write_super_endio(struct bio *bio, int error)
struct cache *ca = bio->bi_private;
bch_count_io_errors(ca, error, "writing superblock");
- closure_put(&ca->set->sb_write.cl);
+ closure_put(&ca->set->sb_write);
+}
+
+static void bcache_write_super_unlock(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, sb_write);
+
+ up(&c->sb_write_mutex);
}
void bcache_write_super(struct cache_set *c)
{
- struct closure *cl = &c->sb_write.cl;
+ struct closure *cl = &c->sb_write;
struct cache *ca;
unsigned i;
- closure_lock(&c->sb_write, &c->cl);
+ down(&c->sb_write_mutex);
+ closure_init(cl, &c->cl);
c->sb.seq++;
@@ -317,7 +334,7 @@ void bcache_write_super(struct cache_set *c)
__write_super(&ca->sb, bio);
}
- closure_return(cl);
+ closure_return_with_destructor(cl, bcache_write_super_unlock);
}
/* UUID io */
@@ -325,29 +342,37 @@ void bcache_write_super(struct cache_set *c)
static void uuid_endio(struct bio *bio, int error)
{
struct closure *cl = bio->bi_private;
- struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl);
+ struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
cache_set_err_on(error, c, "accessing uuids");
bch_bbio_free(bio, c);
closure_put(cl);
}
+static void uuid_io_unlock(struct closure *cl)
+{
+ struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+
+ up(&c->uuid_write_mutex);
+}
+
static void uuid_io(struct cache_set *c, unsigned long rw,
struct bkey *k, struct closure *parent)
{
- struct closure *cl = &c->uuid_write.cl;
+ struct closure *cl = &c->uuid_write;
struct uuid_entry *u;
unsigned i;
char buf[80];
BUG_ON(!parent);
- closure_lock(&c->uuid_write, parent);
+ down(&c->uuid_write_mutex);
+ closure_init(cl, parent);
for (i = 0; i < KEY_PTRS(k); i++) {
struct bio *bio = bch_bbio_alloc(c);
bio->bi_rw = REQ_SYNC|REQ_META|rw;
- bio->bi_size = KEY_SIZE(k) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio;
bio->bi_private = cl;
@@ -359,7 +384,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
break;
}
- bch_bkey_to_text(buf, sizeof(buf), k);
+ bch_extent_to_text(buf, sizeof(buf), k);
pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
@@ -368,14 +393,14 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
u - c->uuids, u->uuid, u->label,
u->first_reg, u->last_reg, u->invalidated);
- closure_return(cl);
+ closure_return_with_destructor(cl, uuid_io_unlock);
}
static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
{
struct bkey *k = &j->uuid_bucket;
- if (bch_btree_ptr_invalid(c, k))
+ if (__bch_btree_ptr_invalid(c, k))
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
@@ -420,7 +445,7 @@ static int __uuid_write(struct cache_set *c)
lockdep_assert_held(&bch_register_lock);
- if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
return 1;
SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -503,10 +528,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
closure_init_stack(cl);
- bio->bi_sector = bucket * ca->sb.bucket_size;
- bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_SYNC|REQ_META|rw;
- bio->bi_size = bucket_bytes(ca);
+ bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
+ bio->bi_bdev = ca->bdev;
+ bio->bi_rw = REQ_SYNC|REQ_META|rw;
+ bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = prio_endio;
bio->bi_private = ca;
@@ -516,9 +541,6 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
closure_sync(cl);
}
-#define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \
- fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused)
-
void bch_prio_write(struct cache *ca)
{
int i;
@@ -529,17 +551,13 @@ void bch_prio_write(struct cache *ca)
lockdep_assert_held(&ca->set->bucket_lock);
- for (b = ca->buckets;
- b < ca->buckets + ca->sb.nbuckets; b++)
- b->disk_gen = b->gen;
-
ca->disk_buckets->seq++;
atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
&ca->meta_sectors_written);
- pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
- fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+ //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
+ // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
for (i = prio_buckets(ca) - 1; i >= 0; --i) {
long bucket;
@@ -558,7 +576,7 @@ void bch_prio_write(struct cache *ca)
p->magic = pset_magic(&ca->sb);
p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
- bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
+ bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
@@ -576,14 +594,17 @@ void bch_prio_write(struct cache *ca)
mutex_lock(&ca->set->bucket_lock);
- ca->need_save_prio = 0;
-
/*
* Don't want the old priorities to get garbage collected until after we
* finish writing the new ones, and they're journalled
*/
- for (i = 0; i < prio_buckets(ca); i++)
+ for (i = 0; i < prio_buckets(ca); i++) {
+ if (ca->prio_last_buckets[i])
+ __bch_bucket_free(ca,
+ &ca->buckets[ca->prio_last_buckets[i]]);
+
ca->prio_last_buckets[i] = ca->prio_buckets[i];
+ }
}
static void prio_read(struct cache *ca, uint64_t bucket)
@@ -614,7 +635,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
}
b->prio = le16_to_cpu(d->prio);
- b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen;
+ b->gen = b->last_gc = d->gen;
}
}
@@ -739,8 +760,6 @@ static void bcache_device_free(struct bcache_device *d)
}
bio_split_pool_free(&d->bio_split_hook);
- if (d->unaligned_bvec)
- mempool_destroy(d->unaligned_bvec);
if (d->bio_split)
bioset_free(d->bio_split);
if (is_vmalloc_addr(d->full_dirty_stripes))
@@ -793,8 +812,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
return minor;
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
- sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
bio_split_pool_init(&d->bio_split_hook) ||
!(d->disk = alloc_disk(1))) {
ida_simple_remove(&bcache_minor, minor);
@@ -822,6 +839,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
q->limits.max_segment_size = UINT_MAX;
q->limits.max_segments = BIO_MAX_PAGES;
q->limits.max_discard_sectors = UINT_MAX;
+ q->limits.discard_granularity = 512;
q->limits.io_min = block_size;
q->limits.logical_block_size = block_size;
q->limits.physical_block_size = block_size;
@@ -1102,7 +1120,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
INIT_WORK(&dc->detach, cached_dev_detach_finish);
- closure_init_unlocked(&dc->sb_write);
+ sema_init(&dc->sb_write_mutex, 1);
INIT_LIST_HEAD(&dc->io_lru);
spin_lock_init(&dc->io_lock);
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
@@ -1114,6 +1132,12 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
}
+ dc->disk.stripe_size = q->limits.io_opt >> 9;
+
+ if (dc->disk.stripe_size)
+ dc->partial_stripes_expensive =
+ q->limits.raid_partial_stripes_expensive;
+
ret = bcache_device_init(&dc->disk, block_size,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
if (ret)
@@ -1325,9 +1349,11 @@ static void cache_set_free(struct closure *cl)
if (ca)
kobject_put(&ca->kobj);
+ bch_bset_sort_state_free(&c->sort);
free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
- free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
+ if (c->moving_gc_wq)
+ destroy_workqueue(c->moving_gc_wq);
if (c->bio_split)
bioset_free(c->bio_split);
if (c->fill_iter)
@@ -1368,14 +1394,21 @@ static void cache_set_flush(struct closure *cl)
list_add(&c->root->list, &c->btree_cache);
/* Should skip this if we're unregistering because of an error */
- list_for_each_entry(b, &c->btree_cache, list)
+ list_for_each_entry(b, &c->btree_cache, list) {
+ mutex_lock(&b->write_lock);
if (btree_node_dirty(b))
- bch_btree_node_write(b, NULL);
+ __bch_btree_node_write(b, NULL);
+ mutex_unlock(&b->write_lock);
+ }
for_each_cache(ca, c, i)
if (ca->alloc_thread)
kthread_stop(ca->alloc_thread);
+ cancel_delayed_work_sync(&c->journal.work);
+ /* flush last journal entry if needed */
+ c->journal.work.work.func(&c->journal.work.work);
+
closure_return(cl);
}
@@ -1451,25 +1484,20 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->block_bits = ilog2(sb->block_size);
c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
- c->btree_pages = c->sb.bucket_size / PAGE_SECTORS;
+ c->btree_pages = bucket_pages(c);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
BTREE_MAX_PAGES);
- c->sort_crit_factor = int_sqrt(c->btree_pages);
-
- closure_init_unlocked(&c->sb_write);
+ sema_init(&c->sb_write_mutex, 1);
mutex_init(&c->bucket_lock);
- init_waitqueue_head(&c->try_wait);
+ init_waitqueue_head(&c->btree_cache_wait);
init_waitqueue_head(&c->bucket_wait);
- closure_init_unlocked(&c->uuid_write);
- mutex_init(&c->sort_lock);
+ sema_init(&c->uuid_write_mutex, 1);
- spin_lock_init(&c->sort_time.lock);
spin_lock_init(&c->btree_gc_time.lock);
spin_lock_init(&c->btree_split_time.lock);
spin_lock_init(&c->btree_read_time.lock);
- spin_lock_init(&c->try_harder_time.lock);
bch_moving_init_cache_set(c);
@@ -1493,11 +1521,12 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
bucket_pages(c))) ||
!(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
!(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
!(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
+ !(c->moving_gc_wq = create_workqueue("bcache_gc")) ||
bch_journal_alloc(c) ||
bch_btree_cache_alloc(c) ||
- bch_open_buckets_alloc(c))
+ bch_open_buckets_alloc(c) ||
+ bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
goto err;
c->congested_read_threshold_us = 2000;
@@ -1553,11 +1582,11 @@ static void run_cache_set(struct cache_set *c)
k = &j->btree_root;
err = "bad btree root";
- if (bch_btree_ptr_invalid(c, k))
+ if (__bch_btree_ptr_invalid(c, k))
goto err;
err = "error reading btree root";
- c->root = bch_btree_node_get(c, k, j->btree_level, true);
+ c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true);
if (IS_ERR_OR_NULL(c->root))
goto err;
@@ -1573,7 +1602,7 @@ static void run_cache_set(struct cache_set *c)
goto err;
bch_journal_mark(c, &journal);
- bch_btree_gc_finish(c);
+ bch_initial_gc_finish(c);
pr_debug("btree_check() done");
/*
@@ -1615,7 +1644,7 @@ static void run_cache_set(struct cache_set *c)
ca->sb.d[j] = ca->sb.first_bucket + j;
}
- bch_btree_gc_finish(c);
+ bch_initial_gc_finish(c);
err = "error starting allocator thread";
for_each_cache(ca, c, i)
@@ -1632,12 +1661,14 @@ static void run_cache_set(struct cache_set *c)
goto err;
err = "cannot allocate new btree root";
- c->root = bch_btree_node_alloc(c, 0, true);
+ c->root = bch_btree_node_alloc(c, NULL, 0);
if (IS_ERR_OR_NULL(c->root))
goto err;
+ mutex_lock(&c->root->write_lock);
bkey_copy_key(&c->root->key, &MAX_KEY);
bch_btree_node_write(c->root, &cl);
+ mutex_unlock(&c->root->write_lock);
bch_btree_set_root(c->root);
rw_unlock(true, c->root);
@@ -1747,6 +1778,7 @@ err:
void bch_cache_release(struct kobject *kobj)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
+ unsigned i;
if (ca->set)
ca->set->cache[ca->sb.nr_this_dev] = NULL;
@@ -1758,9 +1790,10 @@ void bch_cache_release(struct kobject *kobj)
vfree(ca->buckets);
free_heap(&ca->heap);
- free_fifo(&ca->unused);
free_fifo(&ca->free_inc);
- free_fifo(&ca->free);
+
+ for (i = 0; i < RESERVE_NR; i++)
+ free_fifo(&ca->free[i]);
if (ca->sb_bio.bi_inline_vecs[0].bv_page)
put_page(ca->sb_bio.bi_io_vec[0].bv_page);
@@ -1786,12 +1819,13 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
ca->journal.bio.bi_max_vecs = 8;
ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
- free = roundup_pow_of_two(ca->sb.nbuckets) >> 9;
- free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
+ free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
- if (!init_fifo(&ca->free, free, GFP_KERNEL) ||
+ if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
+ !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+ !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
+ !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
- !init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
!init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
!(ca->buckets = vzalloc(sizeof(struct bucket) *
ca->sb.nbuckets)) ||
@@ -1806,13 +1840,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
for_each_bucket(b, ca)
atomic_set(&b->pin, 0);
- if (bch_cache_allocator_init(ca))
- goto err;
-
return 0;
-err:
- kobject_put(&ca->kobj);
- return -ENOMEM;
}
static void register_cache(struct cache_sb *sb, struct page *sb_page,
@@ -1841,7 +1869,10 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
goto err;
+ mutex_lock(&bch_register_lock);
err = register_cache_set(ca);
+ mutex_unlock(&bch_register_lock);
+
if (err)
goto err;
@@ -1903,8 +1934,6 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!try_module_get(THIS_MODULE))
return -EBUSY;
- mutex_lock(&bch_register_lock);
-
if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
!(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
goto err;
@@ -1937,7 +1966,9 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!dc)
goto err_close;
+ mutex_lock(&bch_register_lock);
register_bdev(sb, sb_page, bdev, dc);
+ mutex_unlock(&bch_register_lock);
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
@@ -1950,7 +1981,6 @@ out:
put_page(sb_page);
kfree(sb);
kfree(path);
- mutex_unlock(&bch_register_lock);
module_put(THIS_MODULE);
return ret;
@@ -2029,12 +2059,12 @@ static void bcache_exit(void)
{
bch_debug_exit();
bch_request_exit();
- bch_btree_exit();
if (bcache_kobj)
kobject_put(bcache_kobj);
if (bcache_wq)
destroy_workqueue(bcache_wq);
- unregister_blkdev(bcache_major, "bcache");
+ if (bcache_major)
+ unregister_blkdev(bcache_major, "bcache");
unregister_reboot_notifier(&reboot);
}
@@ -2058,7 +2088,6 @@ static int __init bcache_init(void)
if (!(bcache_wq = create_workqueue("bcache")) ||
!(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
sysfs_create_files(bcache_kobj, files) ||
- bch_btree_init() ||
bch_request_init() ||
bch_debug_init(bcache_kobj))
goto err;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index a1f85612f0b3..b3ff57d61dde 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -54,7 +54,6 @@ sysfs_time_stats_attribute(btree_gc, sec, ms);
sysfs_time_stats_attribute(btree_split, sec, us);
sysfs_time_stats_attribute(btree_sort, ms, us);
sysfs_time_stats_attribute(btree_read, ms, us);
-sysfs_time_stats_attribute(try_harder, ms, us);
read_attribute(btree_nodes);
read_attribute(btree_used_percent);
@@ -102,7 +101,6 @@ rw_attribute(bypass_torture_test);
rw_attribute(key_merging_disabled);
rw_attribute(gc_always_rewrite);
rw_attribute(expensive_debug_checks);
-rw_attribute(freelist_percent);
rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled);
rw_attribute(copy_gc_enabled);
@@ -401,81 +399,123 @@ static struct attribute *bch_flash_dev_files[] = {
};
KTYPE(bch_flash_dev);
-SHOW(__bch_cache_set)
+struct bset_stats_op {
+ struct btree_op op;
+ size_t nodes;
+ struct bset_stats stats;
+};
+
+static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
{
- unsigned root_usage(struct cache_set *c)
- {
- unsigned bytes = 0;
- struct bkey *k;
- struct btree *b;
- struct btree_iter iter;
+ struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
- goto lock_root;
+ op->nodes++;
+ bch_btree_keys_stats(&b->keys, &op->stats);
- do {
- rw_unlock(false, b);
-lock_root:
- b = c->root;
- rw_lock(false, b, b->level);
- } while (b != c->root);
+ return MAP_CONTINUE;
+}
+
+static int bch_bset_print_stats(struct cache_set *c, char *buf)
+{
+ struct bset_stats_op op;
+ int ret;
+
+ memset(&op, 0, sizeof(op));
+ bch_btree_op_init(&op.op, -1);
+
+ ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
+ if (ret < 0)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE,
+ "btree nodes: %zu\n"
+ "written sets: %zu\n"
+ "unwritten sets: %zu\n"
+ "written key bytes: %zu\n"
+ "unwritten key bytes: %zu\n"
+ "floats: %zu\n"
+ "failed: %zu\n",
+ op.nodes,
+ op.stats.sets_written, op.stats.sets_unwritten,
+ op.stats.bytes_written, op.stats.bytes_unwritten,
+ op.stats.floats, op.stats.failed);
+}
+
+static unsigned bch_root_usage(struct cache_set *c)
+{
+ unsigned bytes = 0;
+ struct bkey *k;
+ struct btree *b;
+ struct btree_iter iter;
- for_each_key_filter(b, k, &iter, bch_ptr_bad)
- bytes += bkey_bytes(k);
+ goto lock_root;
+ do {
rw_unlock(false, b);
+lock_root:
+ b = c->root;
+ rw_lock(false, b, b->level);
+ } while (b != c->root);
- return (bytes * 100) / btree_bytes(c);
- }
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
+ bytes += bkey_bytes(k);
- size_t cache_size(struct cache_set *c)
- {
- size_t ret = 0;
- struct btree *b;
+ rw_unlock(false, b);
- mutex_lock(&c->bucket_lock);
- list_for_each_entry(b, &c->btree_cache, list)
- ret += 1 << (b->page_order + PAGE_SHIFT);
+ return (bytes * 100) / btree_bytes(c);
+}
- mutex_unlock(&c->bucket_lock);
- return ret;
- }
+static size_t bch_cache_size(struct cache_set *c)
+{
+ size_t ret = 0;
+ struct btree *b;
- unsigned cache_max_chain(struct cache_set *c)
- {
- unsigned ret = 0;
- struct hlist_head *h;
+ mutex_lock(&c->bucket_lock);
+ list_for_each_entry(b, &c->btree_cache, list)
+ ret += 1 << (b->keys.page_order + PAGE_SHIFT);
- mutex_lock(&c->bucket_lock);
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
- for (h = c->bucket_hash;
- h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
- h++) {
- unsigned i = 0;
- struct hlist_node *p;
+static unsigned bch_cache_max_chain(struct cache_set *c)
+{
+ unsigned ret = 0;
+ struct hlist_head *h;
- hlist_for_each(p, h)
- i++;
+ mutex_lock(&c->bucket_lock);
- ret = max(ret, i);
- }
+ for (h = c->bucket_hash;
+ h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
+ h++) {
+ unsigned i = 0;
+ struct hlist_node *p;
- mutex_unlock(&c->bucket_lock);
- return ret;
- }
+ hlist_for_each(p, h)
+ i++;
- unsigned btree_used(struct cache_set *c)
- {
- return div64_u64(c->gc_stats.key_bytes * 100,
- (c->gc_stats.nodes ?: 1) * btree_bytes(c));
+ ret = max(ret, i);
}
- unsigned average_key_size(struct cache_set *c)
- {
- return c->gc_stats.nkeys
- ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
- : 0;
- }
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
+
+static unsigned bch_btree_used(struct cache_set *c)
+{
+ return div64_u64(c->gc_stats.key_bytes * 100,
+ (c->gc_stats.nodes ?: 1) * btree_bytes(c));
+}
+
+static unsigned bch_average_key_size(struct cache_set *c)
+{
+ return c->gc_stats.nkeys
+ ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
+ : 0;
+}
+SHOW(__bch_cache_set)
+{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
sysfs_print(synchronous, CACHE_SYNC(&c->sb));
@@ -483,21 +523,20 @@ lock_root:
sysfs_hprint(bucket_size, bucket_bytes(c));
sysfs_hprint(block_size, block_bytes(c));
sysfs_print(tree_depth, c->root->level);
- sysfs_print(root_usage_percent, root_usage(c));
+ sysfs_print(root_usage_percent, bch_root_usage(c));
- sysfs_hprint(btree_cache_size, cache_size(c));
- sysfs_print(btree_cache_max_chain, cache_max_chain(c));
+ sysfs_hprint(btree_cache_size, bch_cache_size(c));
+ sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
- sysfs_print_time_stats(&c->sort_time, btree_sort, ms, us);
+ sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
- sysfs_print_time_stats(&c->try_harder_time, try_harder, ms, us);
- sysfs_print(btree_used_percent, btree_used(c));
+ sysfs_print(btree_used_percent, bch_btree_used(c));
sysfs_print(btree_nodes, c->gc_stats.nodes);
- sysfs_hprint(average_key_size, average_key_size(c));
+ sysfs_hprint(average_key_size, bch_average_key_size(c));
sysfs_print(cache_read_races,
atomic_long_read(&c->cache_read_races));
@@ -668,7 +707,6 @@ static struct attribute *bch_cache_set_internal_files[] = {
sysfs_time_stats_attribute_list(btree_split, sec, us)
sysfs_time_stats_attribute_list(btree_sort, ms, us)
sysfs_time_stats_attribute_list(btree_read, ms, us)
- sysfs_time_stats_attribute_list(try_harder, ms, us)
&sysfs_btree_nodes,
&sysfs_btree_used_percent,
@@ -711,9 +749,6 @@ SHOW(__bch_cache)
sysfs_print(io_errors,
atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
- sysfs_print(freelist_percent, ca->free.size * 100 /
- ((size_t) ca->sb.nbuckets));
-
if (attr == &sysfs_cache_replacement_policy)
return bch_snprint_string_list(buf, PAGE_SIZE,
cache_replacement_policies,
@@ -723,7 +758,9 @@ SHOW(__bch_cache)
int cmp(const void *l, const void *r)
{ return *((uint16_t *) r) - *((uint16_t *) l); }
- size_t n = ca->sb.nbuckets, i, unused, btree;
+ struct bucket *b;
+ size_t n = ca->sb.nbuckets, i;
+ size_t unused = 0, available = 0, dirty = 0, meta = 0;
uint64_t sum = 0;
/* Compute 31 quantiles */
uint16_t q[31], *p, *cached;
@@ -734,6 +771,17 @@ SHOW(__bch_cache)
return -ENOMEM;
mutex_lock(&ca->set->bucket_lock);
+ for_each_bucket(b, ca) {
+ if (!GC_SECTORS_USED(b))
+ unused++;
+ if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
+ available++;
+ if (GC_MARK(b) == GC_MARK_DIRTY)
+ dirty++;
+ if (GC_MARK(b) == GC_MARK_METADATA)
+ meta++;
+ }
+
for (i = ca->sb.first_bucket; i < n; i++)
p[i] = ca->buckets[i].prio;
mutex_unlock(&ca->set->bucket_lock);
@@ -748,10 +796,7 @@ SHOW(__bch_cache)
while (cached < p + n &&
*cached == BTREE_PRIO)
- cached++;
-
- btree = cached - p;
- n -= btree;
+ cached++, n--;
for (i = 0; i < n; i++)
sum += INITIAL_PRIO - cached[i];
@@ -767,12 +812,16 @@ SHOW(__bch_cache)
ret = scnprintf(buf, PAGE_SIZE,
"Unused: %zu%%\n"
+ "Clean: %zu%%\n"
+ "Dirty: %zu%%\n"
"Metadata: %zu%%\n"
"Average: %llu\n"
"Sectors per Q: %zu\n"
"Quantiles: [",
unused * 100 / (size_t) ca->sb.nbuckets,
- btree * 100 / (size_t) ca->sb.nbuckets, sum,
+ available * 100 / (size_t) ca->sb.nbuckets,
+ dirty * 100 / (size_t) ca->sb.nbuckets,
+ meta * 100 / (size_t) ca->sb.nbuckets, sum,
n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
for (i = 0; i < ARRAY_SIZE(q); i++)
@@ -820,32 +869,6 @@ STORE(__bch_cache)
}
}
- if (attr == &sysfs_freelist_percent) {
- DECLARE_FIFO(long, free);
- long i;
- size_t p = strtoul_or_return(buf);
-
- p = clamp_t(size_t,
- ((size_t) ca->sb.nbuckets * p) / 100,
- roundup_pow_of_two(ca->sb.nbuckets) >> 9,
- ca->sb.nbuckets / 2);
-
- if (!init_fifo_exact(&free, p, GFP_KERNEL))
- return -ENOMEM;
-
- mutex_lock(&ca->set->bucket_lock);
-
- fifo_move(&free, &ca->free);
- fifo_swap(&free, &ca->free);
-
- mutex_unlock(&ca->set->bucket_lock);
-
- while (fifo_pop(&free, i))
- atomic_dec(&ca->buckets[i].pin);
-
- free_fifo(&free);
- }
-
if (attr == &sysfs_clear_stats) {
atomic_long_set(&ca->sectors_written, 0);
atomic_long_set(&ca->btree_sectors_written, 0);
@@ -869,7 +892,6 @@ static struct attribute *bch_cache_files[] = {
&sysfs_metadata_written,
&sysfs_io_errors,
&sysfs_clear_stats,
- &sysfs_freelist_percent,
&sysfs_cache_replacement_policy,
NULL
};
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index adbc3df17a80..b7820b0d2621 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -45,7 +45,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_split);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_invalidate);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_invalidate);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback);
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index bb37618e7664..db3ae4c2b223 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -224,10 +224,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
void bch_bio_map(struct bio *bio, void *base)
{
- size_t size = bio->bi_size;
+ size_t size = bio->bi_iter.bi_size;
struct bio_vec *bv = bio->bi_io_vec;
- BUG_ON(!bio->bi_size);
+ BUG_ON(!bio->bi_iter.bi_size);
BUG_ON(bio->bi_vcnt);
bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1030c6020e98..ac7d0d1f70d7 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -2,6 +2,7 @@
#ifndef _BCACHE_UTIL_H
#define _BCACHE_UTIL_H
+#include <linux/blkdev.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/llist.h>
@@ -17,11 +18,13 @@ struct closure;
#ifdef CONFIG_BCACHE_DEBUG
+#define EBUG_ON(cond) BUG_ON(cond)
#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
#else /* DEBUG */
+#define EBUG_ON(cond) do { if (cond); } while (0)
#define atomic_dec_bug(v) atomic_dec(v)
#define atomic_inc_bug(v, i) atomic_inc(v)
@@ -391,6 +394,11 @@ struct time_stats {
void bch_time_stats_update(struct time_stats *stats, uint64_t time);
+static inline unsigned local_clock_us(void)
+{
+ return local_clock() >> 10;
+}
+
#define NSEC_PER_ns 1L
#define NSEC_PER_us NSEC_PER_USEC
#define NSEC_PER_ms NSEC_PER_MSEC
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 6c44fe059c27..f4300e4c0114 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -111,7 +111,7 @@ static void dirty_init(struct keybuf_key *w)
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- bio->bi_size = KEY_SIZE(&w->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
bio->bi_private = w;
bio->bi_io_vec = bio->bi_inline_vecs;
@@ -184,7 +184,7 @@ static void write_dirty(struct closure *cl)
dirty_init(w);
io->bio.bi_rw = WRITE;
- io->bio.bi_sector = KEY_START(&w->key);
+ io->bio.bi_iter.bi_sector = KEY_START(&w->key);
io->bio.bi_bdev = io->dc->bdev;
io->bio.bi_end_io = dirty_endio;
@@ -253,7 +253,7 @@ static void read_dirty(struct cached_dev *dc)
io->dc = dc;
dirty_init(w);
- io->bio.bi_sector = PTR_OFFSET(&w->key, 0);
+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
&w->key, 0)->bdev;
io->bio.bi_rw = READ;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c9ddcf4614b9..e2f8598937ac 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
return false;
if (dc->partial_stripes_expensive &&
- bcache_dev_stripe_dirty(dc, bio->bi_sector,
+ bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
bio_sectors(bio)))
return true;
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 3a8cfa2645c7..dd3646111561 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -17,55 +17,24 @@
* original bio state.
*/
-struct dm_bio_vec_details {
-#if PAGE_SIZE < 65536
- __u16 bv_len;
- __u16 bv_offset;
-#else
- unsigned bv_len;
- unsigned bv_offset;
-#endif
-};
-
struct dm_bio_details {
- sector_t bi_sector;
struct block_device *bi_bdev;
- unsigned int bi_size;
- unsigned short bi_idx;
unsigned long bi_flags;
- struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES];
+ struct bvec_iter bi_iter;
};
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{
- unsigned i;
-
- bd->bi_sector = bio->bi_sector;
bd->bi_bdev = bio->bi_bdev;
- bd->bi_size = bio->bi_size;
- bd->bi_idx = bio->bi_idx;
bd->bi_flags = bio->bi_flags;
-
- for (i = 0; i < bio->bi_vcnt; i++) {
- bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
- bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
- }
+ bd->bi_iter = bio->bi_iter;
}
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{
- unsigned i;
-
- bio->bi_sector = bd->bi_sector;
bio->bi_bdev = bd->bi_bdev;
- bio->bi_size = bd->bi_size;
- bio->bi_idx = bd->bi_idx;
bio->bi_flags = bd->bi_flags;
-
- for (i = 0; i < bio->bi_vcnt; i++) {
- bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
- bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
- }
+ bio->bi_iter = bd->bi_iter;
}
#endif
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 9ed42125514b..66c5d130c8c2 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -540,7 +540,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
bio_init(&b->bio);
b->bio.bi_io_vec = b->bio_vec;
b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
- b->bio.bi_sector = block << b->c->sectors_per_block_bits;
+ b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
b->bio.bi_bdev = b->c->bdev;
b->bio.bi_end_io = end_io;
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 930e8c3d73e9..0e385e40909e 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
static void iot_update_stats(struct io_tracker *t, struct bio *bio)
{
- if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+ if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
t->nr_seq_samples++;
else {
/*
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
t->nr_rand_samples++;
}
- t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+ t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
}
static void iot_check_for_pattern_switch(struct io_tracker *t)
@@ -872,7 +872,7 @@ static void mq_destroy(struct dm_cache_policy *p)
{
struct mq_policy *mq = to_mq_policy(p);
- kfree(mq->table);
+ vfree(mq->table);
epool_exit(&mq->cache_pool);
epool_exit(&mq->pre_cache_pool);
kfree(mq);
@@ -1245,7 +1245,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
mq->hash_bits = ffs(mq->nr_buckets) - 1;
- mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
+ mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
if (!mq->table)
goto bad_alloc_table;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 09334c275c79..074b9c8e4cf0 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -85,6 +85,12 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
{
bio->bi_end_io = h->bi_end_io;
bio->bi_private = h->bi_private;
+
+ /*
+ * Must bump bi_remaining to allow bio to complete with
+ * restored bi_end_io.
+ */
+ atomic_inc(&bio->bi_remaining);
}
/*----------------------------------------------------------------*/
@@ -283,6 +289,7 @@ struct per_bio_data {
bool tick:1;
unsigned req_nr:2;
struct dm_deferred_entry *all_io_entry;
+ struct dm_hook_info hook_info;
/*
* writethrough fields. These MUST remain at the end of this
@@ -291,7 +298,6 @@ struct per_bio_data {
*/
struct cache *cache;
dm_cblock_t cblock;
- struct dm_hook_info hook_info;
struct dm_bio_details bio_details;
};
@@ -664,15 +670,18 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
static void remap_to_cache(struct cache *cache, struct bio *bio,
dm_cblock_t cblock)
{
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
+ sector_t block = from_cblock(cblock);
bio->bi_bdev = cache->cache_dev->bdev;
if (!block_size_is_power_of_two(cache))
- bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
- sector_div(bi_sector, cache->sectors_per_block);
+ bio->bi_iter.bi_sector =
+ (block * cache->sectors_per_block) +
+ sector_div(bi_sector, cache->sectors_per_block);
else
- bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
- (bi_sector & (cache->sectors_per_block - 1));
+ bio->bi_iter.bi_sector =
+ (block << cache->sectors_per_block_shift) |
+ (bi_sector & (cache->sectors_per_block - 1));
}
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +721,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
{
- sector_t block_nr = bio->bi_sector;
+ sector_t block_nr = bio->bi_iter.bi_sector;
if (!block_size_is_power_of_two(cache))
(void) sector_div(block_nr, cache->sectors_per_block);
@@ -970,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)
int r;
struct dm_io_region o_region, c_region;
struct cache *cache = mg->cache;
+ sector_t cblock = from_cblock(mg->cblock);
o_region.bdev = cache->origin_dev->bdev;
o_region.count = cache->sectors_per_block;
c_region.bdev = cache->cache_dev->bdev;
- c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
+ c_region.sector = cblock * cache->sectors_per_block;
c_region.count = cache->sectors_per_block;
if (mg->writeback || mg->demote) {
@@ -1002,13 +1012,15 @@ static void overwrite_endio(struct bio *bio, int err)
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
unsigned long flags;
+ dm_unhook_bio(&pb->hook_info, bio);
+
if (err)
mg->err = true;
+ mg->requeue_holder = false;
+
spin_lock_irqsave(&cache->lock, flags);
list_add_tail(&mg->list, &cache->completed_migrations);
- dm_unhook_bio(&pb->hook_info, bio);
- mg->requeue_holder = false;
spin_unlock_irqrestore(&cache->lock, flags);
wake_worker(cache);
@@ -1027,7 +1039,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
{
return (bio_data_dir(bio) == WRITE) &&
- (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
+ (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
}
static void avoid_copy(struct dm_cache_migration *mg)
@@ -1252,7 +1264,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
- BUG_ON(bio->bi_size);
+ BUG_ON(bio->bi_iter.bi_size);
if (!pb->req_nr)
remap_to_origin(cache, bio);
else
@@ -1275,9 +1287,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
*/
static void process_discard_bio(struct cache *cache, struct bio *bio)
{
- dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+ dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
cache->discard_block_size);
- dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+ dm_block_t end_block = bio_end_sector(bio);
dm_block_t b;
end_block = block_div(end_block, cache->discard_block_size);
@@ -2453,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
bool discarded_block;
struct dm_bio_prison_cell *cell;
struct policy_result lookup_result;
- struct per_bio_data *pb;
+ struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
- if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
+ if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
/*
* This can only occur if the io goes to a partial block at
* the end of the origin device. We don't cache these.
* Just remap to the origin and carry on.
*/
- remap_to_origin_clear_discard(cache, bio, block);
+ remap_to_origin(cache, bio);
return DM_MAPIO_REMAPPED;
}
- pb = init_per_bio_data(bio, pb_data_size);
-
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
defer_bio(cache, bio);
return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 81b0fa660452..784695d22fde 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -39,10 +39,8 @@ struct convert_context {
struct completion restart;
struct bio *bio_in;
struct bio *bio_out;
- unsigned int offset_in;
- unsigned int offset_out;
- unsigned int idx_in;
- unsigned int idx_out;
+ struct bvec_iter iter_in;
+ struct bvec_iter iter_out;
sector_t cc_sector;
atomic_t cc_pending;
};
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc,
{
ctx->bio_in = bio_in;
ctx->bio_out = bio_out;
- ctx->offset_in = 0;
- ctx->offset_out = 0;
- ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
- ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
+ if (bio_in)
+ ctx->iter_in = bio_in->bi_iter;
+ if (bio_out)
+ ctx->iter_out = bio_out->bi_iter;
ctx->cc_sector = sector + cc->iv_offset;
init_completion(&ctx->restart);
}
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc,
struct convert_context *ctx,
struct ablkcipher_request *req)
{
- struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
- struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+ struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
+ struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct dm_crypt_request *dmreq;
u8 *iv;
int r;
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc,
dmreq->iv_sector = ctx->cc_sector;
dmreq->ctx = ctx;
sg_init_table(&dmreq->sg_in, 1);
- sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
- bv_in->bv_offset + ctx->offset_in);
+ sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
+ bv_in.bv_offset);
sg_init_table(&dmreq->sg_out, 1);
- sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
- bv_out->bv_offset + ctx->offset_out);
+ sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
+ bv_out.bv_offset);
- ctx->offset_in += 1 << SECTOR_SHIFT;
- if (ctx->offset_in >= bv_in->bv_len) {
- ctx->offset_in = 0;
- ctx->idx_in++;
- }
-
- ctx->offset_out += 1 << SECTOR_SHIFT;
- if (ctx->offset_out >= bv_out->bv_len) {
- ctx->offset_out = 0;
- ctx->idx_out++;
- }
+ bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
+ bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
if (cc->iv_gen_ops) {
r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc,
atomic_set(&ctx->cc_pending, 1);
- while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
- ctx->idx_out < ctx->bio_out->bi_vcnt) {
+ while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
crypt_alloc_req(cc, ctx);
@@ -1021,7 +1009,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
size -= len;
}
- if (!clone->bi_size) {
+ if (!clone->bi_iter.bi_size) {
bio_put(clone);
return NULL;
}
@@ -1161,7 +1149,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
crypt_inc_pending(io);
clone_init(io, clone);
- clone->bi_sector = cc->start + io->sector;
+ clone->bi_iter.bi_sector = cc->start + io->sector;
generic_make_request(clone);
return 0;
@@ -1207,9 +1195,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
}
/* crypt_convert should have filled the clone bio */
- BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
+ BUG_ON(io->ctx.iter_out.bi_size);
- clone->bi_sector = cc->start + io->sector;
+ clone->bi_iter.bi_sector = cc->start + io->sector;
if (async)
kcryptd_queue_io(io);
@@ -1224,7 +1212,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
struct dm_crypt_io *new_io;
int crypt_finished;
unsigned out_of_pages = 0;
- unsigned remaining = io->base_bio->bi_size;
+ unsigned remaining = io->base_bio->bi_iter.bi_size;
sector_t sector = io->sector;
int r;
@@ -1246,9 +1234,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
}
io->ctx.bio_out = clone;
- io->ctx.idx_out = 0;
+ io->ctx.iter_out = clone->bi_iter;
- remaining -= clone->bi_size;
+ remaining -= clone->bi_iter.bi_size;
sector += bio_sectors(clone);
crypt_inc_pending(io);
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(new_io);
crypt_convert_init(cc, &new_io->ctx, NULL,
io->base_bio, sector);
- new_io->ctx.idx_in = io->ctx.idx_in;
- new_io->ctx.offset_in = io->ctx.offset_in;
+ new_io->ctx.iter_in = io->ctx.iter_in;
/*
* Fragments after the first use the base_io
@@ -1869,11 +1856,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = cc->start +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return DM_MAPIO_REMAPPED;
}
- io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
+ io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT))
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index a8a511c053a5..42c3a27a14cc 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -277,14 +277,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
bio->bi_bdev = dc->dev_write->bdev;
if (bio_sectors(bio))
- bio->bi_sector = dc->start_write +
- dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dc->start_write +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return delay_bio(dc, dc->write_delay, bio);
}
bio->bi_bdev = dc->dev_read->bdev;
- bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dc->start_read +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return delay_bio(dc, dc->read_delay, bio);
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index c80a0ec5f126..b257e46876d3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = fc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector =
+ flakey_map_sector(ti, bio->bi_iter.bi_sector);
}
static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
"(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
- (bio_data_dir(bio) == WRITE) ? 'w' : 'r',
- bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+ (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
}
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2a20986a2fec..3842ac738f98 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -201,26 +201,28 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
/*
* Functions for getting the pages from a bvec.
*/
-static void bvec_get_page(struct dpages *dp,
- struct page **p, unsigned long *len, unsigned *offset)
+static void bio_get_page(struct dpages *dp, struct page **p,
+ unsigned long *len, unsigned *offset)
{
- struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
+ struct bio_vec *bvec = dp->context_ptr;
*p = bvec->bv_page;
- *len = bvec->bv_len;
- *offset = bvec->bv_offset;
+ *len = bvec->bv_len - dp->context_u;
+ *offset = bvec->bv_offset + dp->context_u;
}
-static void bvec_next_page(struct dpages *dp)
+static void bio_next_page(struct dpages *dp)
{
- struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
+ struct bio_vec *bvec = dp->context_ptr;
dp->context_ptr = bvec + 1;
+ dp->context_u = 0;
}
-static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
+static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
- dp->get_page = bvec_get_page;
- dp->next_page = bvec_next_page;
- dp->context_ptr = bvec;
+ dp->get_page = bio_get_page;
+ dp->next_page = bio_next_page;
+ dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ dp->context_u = bio->bi_iter.bi_bvec_done;
}
/*
@@ -304,14 +306,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
- bio->bi_sector = where->sector + (where->count - remaining);
+ bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) {
num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
- bio->bi_size = num_sectors << SECTOR_SHIFT;
+ bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors;
} else if (rw & REQ_WRITE_SAME) {
/*
@@ -320,7 +322,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
dp->get_page(dp, &page, &len, &offset);
bio_add_page(bio, page, logical_block_size, offset);
num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
- bio->bi_size = num_sectors << SECTOR_SHIFT;
+ bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
offset = 0;
remaining -= num_sectors;
@@ -457,8 +459,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
break;
- case DM_IO_BVEC:
- bvec_dp_init(dp, io_req->mem.ptr.bvec);
+ case DM_IO_BIO:
+ bio_dp_init(dp, io_req->mem.ptr.bio);
break;
case DM_IO_VMA:
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 4f99d267340c..53e848c10939 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = lc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector =
+ linear_map_sector(ti, bio->bi_iter.bi_sector);
}
static int linear_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 08d9a207259a..b428c0ae63d5 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -66,7 +66,7 @@ static int dm_ulog_sendto_server(struct dm_ulog_request *tfr)
msg->seq = tfr->seq;
msg->len = sizeof(struct dm_ulog_request) + tfr->data_size;
- r = cn_netlink_send(msg, 0, gfp_any());
+ r = cn_netlink_send(msg, 0, 0, gfp_any());
return r;
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6eb9dc9ef8f3..422a9fdeb53e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1626,8 +1626,11 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
+ if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
+ int err = scsi_verify_blk_ioctl(NULL, cmd);
+ if (err)
+ r = err;
+ }
if (r == -ENOTCONN && !fatal_signal_pending(current))
queue_work(kmultipathd, &m->process_queued_ios);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9584443c5614..7dfdb5c746d6 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
region_t region = dm_rh_bio_to_region(ms->rh, bio);
if (log->type->in_sync(log, region, 0))
- return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
+ return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
return 0;
}
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
*/
static sector_t map_sector(struct mirror *m, struct bio *bio)
{
- if (unlikely(!bio->bi_size))
+ if (unlikely(!bio->bi_iter.bi_size))
return 0;
- return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
+ return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
}
static void map_bio(struct mirror *m, struct bio *bio)
{
bio->bi_bdev = m->dev->bdev;
- bio->bi_sector = map_sector(m, bio);
+ bio->bi_iter.bi_sector = map_sector(m, bio);
}
static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
struct dm_io_region io;
struct dm_io_request io_req = {
.bi_rw = READ,
- .mem.type = DM_IO_BVEC,
- .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+ .mem.type = DM_IO_BIO,
+ .mem.ptr.bio = bio,
.notify.fn = read_callback,
.notify.context = bio,
.client = m->ms->io_client,
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
* We can only read balance if the region is in sync.
*/
if (likely(region_in_sync(ms, region, 1)))
- m = choose_mirror(ms, bio->bi_sector);
+ m = choose_mirror(ms, bio->bi_iter.bi_sector);
else if (m && atomic_read(&m->error_count))
m = NULL;
@@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct mirror *m;
struct dm_io_request io_req = {
.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
- .mem.type = DM_IO_BVEC,
- .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+ .mem.type = DM_IO_BIO,
+ .mem.ptr.bio = bio,
.notify.fn = write_callback,
.notify.context = bio,
.client = ms->io_client,
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
* The region is in-sync and we can perform reads directly.
* Store enough information so we can retry if it fails.
*/
- m = choose_mirror(ms, bio->bi_sector);
+ m = choose_mirror(ms, bio->bi_iter.bi_sector);
if (unlikely(!m))
return -EIO;
@@ -1244,6 +1244,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
dm_bio_restore(bd, bio);
bio_record->details.bi_bdev = NULL;
+
+ atomic_inc(&bio->bi_remaining);
+
queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE;
}
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 69732e03eb34..b929fd5f4984 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
{
- return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
+ return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
+ rh->target_begin);
}
EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index afc3d017de4c..d6e88178d22c 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -546,6 +546,9 @@ static int read_exceptions(struct pstore *ps,
r = insert_exceptions(ps, area, callback, callback_context,
&full);
+ if (!full)
+ memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
+
dm_bufio_release(bp);
dm_bufio_forget(client, chunk);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 717718558bd9..ebddef5237e4 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1438,6 +1438,7 @@ out:
if (full_bio) {
full_bio->bi_end_io = pe->full_bio_end_io;
full_bio->bi_private = pe->full_bio_private;
+ atomic_inc(&full_bio->bi_remaining);
}
free_pending_exception(pe);
@@ -1619,11 +1620,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
bio->bi_bdev = s->cow->bdev;
- bio->bi_sector = chunk_to_sector(s->store,
- dm_chunk_number(e->new_chunk) +
- (chunk - e->old_chunk)) +
- (bio->bi_sector &
- s->store->chunk_mask);
+ bio->bi_iter.bi_sector =
+ chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
+ (chunk - e->old_chunk)) +
+ (bio->bi_iter.bi_sector & s->store->chunk_mask);
}
static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1641,7 +1641,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- chunk = sector_to_chunk(s->store, bio->bi_sector);
+ chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
/* Full snapshots are not usable */
/* To get here the table must be live so s->active is always set. */
@@ -1702,7 +1702,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
r = DM_MAPIO_SUBMITTED;
if (!pe->started &&
- bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+ bio->bi_iter.bi_size ==
+ (s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
up_write(&s->lock);
start_full_bio(pe, bio);
@@ -1758,7 +1759,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- chunk = sector_to_chunk(s->store, bio->bi_sector);
+ chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
down_write(&s->lock);
@@ -2095,7 +2096,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
if (o)
- r = __origin_write(&o->snapshots, bio->bi_sector, bio);
+ r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
up_read(&_origins_lock);
return r;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 73c1712dad96..d1600d2aa2e2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
{
sector_t begin, end;
- stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
+ stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
+ target_stripe, &begin);
stripe_map_range_sector(sc, bio_end_sector(bio),
target_stripe, &end);
if (begin < end) {
bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
- bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
- bio->bi_size = to_bytes(end - begin);
+ bio->bi_iter.bi_sector = begin +
+ sc->stripe[target_stripe].physical_start;
+ bio->bi_iter.bi_size = to_bytes(end - begin);
return DM_MAPIO_REMAPPED;
} else {
/* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
return stripe_map_range(sc, bio, target_bio_nr);
}
- stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
+ stripe_map_sector(sc, bio->bi_iter.bi_sector,
+ &stripe, &bio->bi_iter.bi_sector);
- bio->bi_sector += sc->stripe[stripe].physical_start;
+ bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index ff9ac4be4721..09a688b3d48c 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -311,11 +311,11 @@ error:
static int switch_map(struct dm_target *ti, struct bio *bio)
{
struct switch_ctx *sctx = ti->private;
- sector_t offset = dm_target_offset(ti, bio->bi_sector);
+ sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
unsigned path_nr = switch_get_path_nr(sctx, offset);
bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
- bio->bi_sector = sctx->path_list[path_nr].start + offset;
+ bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
return DM_MAPIO_REMAPPED;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 7da347665552..fb9efc829182 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -76,7 +76,7 @@
#define THIN_SUPERBLOCK_MAGIC 27022010
#define THIN_SUPERBLOCK_LOCATION 0
-#define THIN_VERSION 1
+#define THIN_VERSION 2
#define THIN_METADATA_CACHE_SIZE 64
#define SECTOR_TO_BLOCK_SHIFT 3
@@ -483,7 +483,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
disk_super->data_mapping_root = cpu_to_le64(pmd->root);
disk_super->device_details_root = cpu_to_le64(pmd->details_root);
- disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
@@ -651,7 +651,7 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
{
int r;
- pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE,
+ pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
THIN_METADATA_CACHE_SIZE,
THIN_MAX_CONCURRENT_LOCKS);
if (IS_ERR(pmd->bm)) {
@@ -1489,6 +1489,23 @@ bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
return r;
}
+bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
+{
+ bool r = false;
+ struct dm_thin_device *td, *tmp;
+
+ down_read(&pmd->root_lock);
+ list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
+ if (td->changed) {
+ r = td->changed;
+ break;
+ }
+ }
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
bool dm_thin_aborted_changes(struct dm_thin_device *td)
{
bool r;
@@ -1738,3 +1755,38 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
return r;
}
+
+int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
+{
+ int r;
+ struct dm_block *sblock;
+ struct thin_disk_superblock *disk_super;
+
+ down_write(&pmd->root_lock);
+ pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
+
+ r = superblock_lock(pmd, &sblock);
+ if (r) {
+ DMERR("couldn't read superblock");
+ goto out;
+ }
+
+ disk_super = dm_block_data(sblock);
+ disk_super->flags = cpu_to_le32(pmd->flags);
+
+ dm_bm_unlock(sblock);
+out:
+ up_write(&pmd->root_lock);
+ return r;
+}
+
+bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
+{
+ bool needs_check;
+
+ down_read(&pmd->root_lock);
+ needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
+ up_read(&pmd->root_lock);
+
+ return needs_check;
+}
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 9a368567632f..e3c857db195a 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -9,16 +9,14 @@
#include "persistent-data/dm-block-manager.h"
#include "persistent-data/dm-space-map.h"
+#include "persistent-data/dm-space-map-metadata.h"
-#define THIN_METADATA_BLOCK_SIZE 4096
+#define THIN_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
/*
* The metadata device is currently limited in size.
- *
- * We have one block of index, which can hold 255 index entries. Each
- * index entry contains allocation info about 16k metadata blocks.
*/
-#define THIN_METADATA_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
+#define THIN_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
/*
* A metadata device larger than 16GB triggers a warning.
@@ -27,6 +25,11 @@
/*----------------------------------------------------------------*/
+/*
+ * Thin metadata superblock flags.
+ */
+#define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0)
+
struct dm_pool_metadata;
struct dm_thin_device;
@@ -161,6 +164,8 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
*/
bool dm_thin_changed_this_transaction(struct dm_thin_device *td);
+bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd);
+
bool dm_thin_aborted_changes(struct dm_thin_device *td);
int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
@@ -202,6 +207,12 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_sm_threshold_fn fn,
void *context);
+/*
+ * Updates the superblock immediately.
+ */
+int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd);
+bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd);
+
/*----------------------------------------------------------------*/
#endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 726228b33a01..be70d38745f7 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -130,10 +130,11 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
struct dm_thin_new_mapping;
/*
- * The pool runs in 3 modes. Ordered in degraded order for comparisons.
+ * The pool runs in 4 modes. Ordered in degraded order for comparisons.
*/
enum pool_mode {
PM_WRITE, /* metadata may be changed */
+ PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
PM_READ_ONLY, /* metadata may not be changed */
PM_FAIL, /* all I/O fails */
};
@@ -198,7 +199,6 @@ struct pool {
};
static enum pool_mode get_pool_mode(struct pool *pool);
-static void out_of_data_space(struct pool *pool);
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
/*
@@ -226,6 +226,7 @@ struct thin_c {
struct pool *pool;
struct dm_thin_device *td;
+ bool requeue_mode:1;
};
/*----------------------------------------------------------------*/
@@ -369,14 +370,18 @@ struct dm_thin_endio_hook {
struct dm_thin_new_mapping *overwrite_mapping;
};
-static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
+static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
{
struct bio *bio;
struct bio_list bios;
+ unsigned long flags;
bio_list_init(&bios);
+
+ spin_lock_irqsave(&tc->pool->lock, flags);
bio_list_merge(&bios, master);
bio_list_init(master);
+ spin_unlock_irqrestore(&tc->pool->lock, flags);
while ((bio = bio_list_pop(&bios))) {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -391,12 +396,26 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
static void requeue_io(struct thin_c *tc)
{
struct pool *pool = tc->pool;
+
+ requeue_bio_list(tc, &pool->deferred_bios);
+ requeue_bio_list(tc, &pool->retry_on_resume_list);
+}
+
+static void error_retry_list(struct pool *pool)
+{
+ struct bio *bio;
unsigned long flags;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
spin_lock_irqsave(&pool->lock, flags);
- __requeue_bio_list(tc, &pool->deferred_bios);
- __requeue_bio_list(tc, &pool->retry_on_resume_list);
+ bio_list_merge(&bios, &pool->retry_on_resume_list);
+ bio_list_init(&pool->retry_on_resume_list);
spin_unlock_irqrestore(&pool->lock, flags);
+
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
}
/*
@@ -414,7 +433,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- sector_t block_nr = bio->bi_sector;
+ sector_t block_nr = bio->bi_iter.bi_sector;
if (block_size_is_power_of_two(pool))
block_nr >>= pool->sectors_per_block_shift;
@@ -427,14 +446,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
struct pool *pool = tc->pool;
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
bio->bi_bdev = tc->pool_dev->bdev;
if (block_size_is_power_of_two(pool))
- bio->bi_sector = (block << pool->sectors_per_block_shift) |
- (bi_sector & (pool->sectors_per_block - 1));
+ bio->bi_iter.bi_sector =
+ (block << pool->sectors_per_block_shift) |
+ (bi_sector & (pool->sectors_per_block - 1));
else
- bio->bi_sector = (block * pool->sectors_per_block) +
+ bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
sector_div(bi_sector, pool->sectors_per_block);
}
@@ -612,8 +632,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
- if (m->bio)
+ if (m->bio) {
m->bio->bi_end_io = m->saved_bi_end_io;
+ atomic_inc(&m->bio->bi_remaining);
+ }
cell_error(m->tc->pool, m->cell);
list_del(&m->list);
mempool_free(m, m->tc->pool->mapping_pool);
@@ -627,8 +649,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
int r;
bio = m->bio;
- if (bio)
+ if (bio) {
bio->bi_end_io = m->saved_bi_end_io;
+ atomic_inc(&bio->bi_remaining);
+ }
if (m->err) {
cell_error(pool, m->cell);
@@ -731,7 +755,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
*/
static int io_overlaps_block(struct pool *pool, struct bio *bio)
{
- return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
+ return bio->bi_iter.bi_size ==
+ (pool->sectors_per_block << SECTOR_SHIFT);
}
static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -919,13 +944,15 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
}
}
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
int r;
dm_block_t free_blocks;
struct pool *pool = tc->pool;
- if (get_pool_mode(pool) != PM_WRITE)
+ if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
return -EINVAL;
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
@@ -952,7 +979,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
}
if (!free_blocks) {
- out_of_data_space(pool);
+ set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
return -ENOSPC;
}
}
@@ -982,15 +1009,32 @@ static void retry_on_resume(struct bio *bio)
spin_unlock_irqrestore(&pool->lock, flags);
}
-static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
+static bool should_error_unserviceable_bio(struct pool *pool)
{
- /*
- * When pool is read-only, no cell locking is needed because
- * nothing is changing.
- */
- WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);
+ enum pool_mode m = get_pool_mode(pool);
+
+ switch (m) {
+ case PM_WRITE:
+ /* Shouldn't get here */
+ DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
+ return true;
+
+ case PM_OUT_OF_DATA_SPACE:
+ return pool->pf.error_if_no_space;
+
+ case PM_READ_ONLY:
+ case PM_FAIL:
+ return true;
+ default:
+ /* Shouldn't get here */
+ DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
+ return true;
+ }
+}
- if (pool->pf.error_if_no_space)
+static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
+{
+ if (should_error_unserviceable_bio(pool))
bio_io_error(bio);
else
retry_on_resume(bio);
@@ -1001,11 +1045,20 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
struct bio *bio;
struct bio_list bios;
+ if (should_error_unserviceable_bio(pool)) {
+ cell_error(pool, cell);
+ return;
+ }
+
bio_list_init(&bios);
cell_release(pool, cell, &bios);
- while ((bio = bio_list_pop(&bios)))
- handle_unserviceable_bio(pool, bio);
+ if (should_error_unserviceable_bio(pool))
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
+ else
+ while ((bio = bio_list_pop(&bios)))
+ retry_on_resume(bio);
}
static void process_discard(struct thin_c *tc, struct bio *bio)
@@ -1136,7 +1189,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
if (bio_detain(pool, &key, bio, &cell))
return;
- if (bio_data_dir(bio) == WRITE && bio->bi_size)
+ if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
break_sharing(tc, bio, block, &key, lookup_result, cell);
else {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1159,7 +1212,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
/*
* Remap empty bios (flushes) immediately, without provisioning.
*/
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
@@ -1258,7 +1311,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
switch (r) {
case 0:
- if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
+ if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
handle_unserviceable_bio(tc->pool, bio);
else {
inc_all_io_entry(tc->pool, bio);
@@ -1290,6 +1343,11 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
}
}
+static void process_bio_success(struct thin_c *tc, struct bio *bio)
+{
+ bio_endio(bio, 0);
+}
+
static void process_bio_fail(struct thin_c *tc, struct bio *bio)
{
bio_io_error(bio);
@@ -1322,6 +1380,11 @@ static void process_deferred_bios(struct pool *pool)
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
+ if (tc->requeue_mode) {
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+ continue;
+ }
+
/*
* If we've got no free new_mapping structs, and processing
* this bio might require one, we pause until there are some
@@ -1351,7 +1414,8 @@ static void process_deferred_bios(struct pool *pool)
bio_list_init(&pool->deferred_flush_bios);
spin_unlock_irqrestore(&pool->lock, flags);
- if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
+ if (bio_list_empty(&bios) &&
+ !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
return;
if (commit(pool)) {
@@ -1387,51 +1451,134 @@ static void do_waker(struct work_struct *ws)
/*----------------------------------------------------------------*/
+struct noflush_work {
+ struct work_struct worker;
+ struct thin_c *tc;
+
+ atomic_t complete;
+ wait_queue_head_t wait;
+};
+
+static void complete_noflush_work(struct noflush_work *w)
+{
+ atomic_set(&w->complete, 1);
+ wake_up(&w->wait);
+}
+
+static void do_noflush_start(struct work_struct *ws)
+{
+ struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+ w->tc->requeue_mode = true;
+ requeue_io(w->tc);
+ complete_noflush_work(w);
+}
+
+static void do_noflush_stop(struct work_struct *ws)
+{
+ struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+ w->tc->requeue_mode = false;
+ complete_noflush_work(w);
+}
+
+static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
+{
+ struct noflush_work w;
+
+ INIT_WORK(&w.worker, fn);
+ w.tc = tc;
+ atomic_set(&w.complete, 0);
+ init_waitqueue_head(&w.wait);
+
+ queue_work(tc->pool->wq, &w.worker);
+
+ wait_event(w.wait, atomic_read(&w.complete));
+}
+
+/*----------------------------------------------------------------*/
+
static enum pool_mode get_pool_mode(struct pool *pool)
{
return pool->pf.mode;
}
+static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
+{
+ dm_table_event(pool->ti->table);
+ DMINFO("%s: switching pool to %s mode",
+ dm_device_name(pool->pool_md), new_mode);
+}
+
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
{
- int r;
- enum pool_mode old_mode = pool->pf.mode;
+ struct pool_c *pt = pool->ti->private;
+ bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
+ enum pool_mode old_mode = get_pool_mode(pool);
+
+ /*
+ * Never allow the pool to transition to PM_WRITE mode if user
+ * intervention is required to verify metadata and data consistency.
+ */
+ if (new_mode == PM_WRITE && needs_check) {
+ DMERR("%s: unable to switch pool to write mode until repaired.",
+ dm_device_name(pool->pool_md));
+ if (old_mode != new_mode)
+ new_mode = old_mode;
+ else
+ new_mode = PM_READ_ONLY;
+ }
+ /*
+ * If we were in PM_FAIL mode, rollback of metadata failed. We're
+ * not going to recover without a thin_repair. So we never let the
+ * pool move out of the old mode.
+ */
+ if (old_mode == PM_FAIL)
+ new_mode = old_mode;
switch (new_mode) {
case PM_FAIL:
if (old_mode != new_mode)
- DMERR("%s: switching pool to failure mode",
- dm_device_name(pool->pool_md));
+ notify_of_pool_mode_change(pool, "failure");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
pool->process_prepared_mapping = process_prepared_mapping_fail;
pool->process_prepared_discard = process_prepared_discard_fail;
+
+ error_retry_list(pool);
break;
case PM_READ_ONLY:
if (old_mode != new_mode)
- DMERR("%s: switching pool to read-only mode",
- dm_device_name(pool->pool_md));
- r = dm_pool_abort_metadata(pool->pmd);
- if (r) {
- DMERR("%s: aborting transaction failed",
- dm_device_name(pool->pool_md));
- new_mode = PM_FAIL;
- set_pool_mode(pool, new_mode);
- } else {
- dm_pool_metadata_read_only(pool->pmd);
- pool->process_bio = process_bio_read_only;
- pool->process_discard = process_discard;
- pool->process_prepared_mapping = process_prepared_mapping_fail;
- pool->process_prepared_discard = process_prepared_discard_passdown;
- }
+ notify_of_pool_mode_change(pool, "read-only");
+ dm_pool_metadata_read_only(pool->pmd);
+ pool->process_bio = process_bio_read_only;
+ pool->process_discard = process_bio_success;
+ pool->process_prepared_mapping = process_prepared_mapping_fail;
+ pool->process_prepared_discard = process_prepared_discard_passdown;
+
+ error_retry_list(pool);
+ break;
+
+ case PM_OUT_OF_DATA_SPACE:
+ /*
+ * Ideally we'd never hit this state; the low water mark
+ * would trigger userland to extend the pool before we
+ * completely run out of data space. However, many small
+ * IOs to unprovisioned space can consume data space at an
+ * alarming rate. Adjust your low water mark if you're
+ * frequently seeing this mode.
+ */
+ if (old_mode != new_mode)
+ notify_of_pool_mode_change(pool, "out-of-data-space");
+ pool->process_bio = process_bio_read_only;
+ pool->process_discard = process_discard;
+ pool->process_prepared_mapping = process_prepared_mapping;
+ pool->process_prepared_discard = process_prepared_discard_passdown;
break;
case PM_WRITE:
if (old_mode != new_mode)
- DMINFO("%s: switching pool to write mode",
- dm_device_name(pool->pool_md));
+ notify_of_pool_mode_change(pool, "write");
dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
pool->process_discard = process_discard;
@@ -1441,32 +1588,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
}
pool->pf.mode = new_mode;
+ /*
+ * The pool mode may have changed, sync it so bind_control_target()
+ * doesn't cause an unexpected mode transition on resume.
+ */
+ pt->adjusted_pf.mode = new_mode;
}
-/*
- * Rather than calling set_pool_mode directly, use these which describe the
- * reason for mode degradation.
- */
-static void out_of_data_space(struct pool *pool)
+static void abort_transaction(struct pool *pool)
{
- DMERR_LIMIT("%s: no free data space available.",
- dm_device_name(pool->pool_md));
- set_pool_mode(pool, PM_READ_ONLY);
+ const char *dev_name = dm_device_name(pool->pool_md);
+
+ DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
+ if (dm_pool_abort_metadata(pool->pmd)) {
+ DMERR("%s: failed to abort metadata transaction", dev_name);
+ set_pool_mode(pool, PM_FAIL);
+ }
+
+ if (dm_pool_metadata_set_needs_check(pool->pmd)) {
+ DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
+ set_pool_mode(pool, PM_FAIL);
+ }
}
static void metadata_operation_failed(struct pool *pool, const char *op, int r)
{
- dm_block_t free_blocks;
-
DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
dm_device_name(pool->pool_md), op, r);
- if (r == -ENOSPC &&
- !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
- !free_blocks)
- DMERR_LIMIT("%s: no free metadata space available.",
- dm_device_name(pool->pool_md));
-
+ abort_transaction(pool);
set_pool_mode(pool, PM_READ_ONLY);
}
@@ -1517,6 +1667,11 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
thin_hook_bio(tc, bio);
+ if (tc->requeue_mode) {
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+ return DM_MAPIO_SUBMITTED;
+ }
+
if (get_pool_mode(tc->pool) == PM_FAIL) {
bio_io_error(bio);
return DM_MAPIO_SUBMITTED;
@@ -1680,7 +1835,7 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
/*
* We want to make sure that a pool in PM_FAIL mode is never upgraded.
*/
- enum pool_mode old_mode = pool->pf.mode;
+ enum pool_mode old_mode = get_pool_mode(pool);
enum pool_mode new_mode = pt->adjusted_pf.mode;
/*
@@ -1694,16 +1849,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
pool->pf = pt->adjusted_pf;
pool->low_water_blocks = pt->low_water_blocks;
- /*
- * If we were in PM_FAIL mode, rollback of metadata failed. We're
- * not going to recover without a thin_repair. So we never let the
- * pool move out of the old mode. On the other hand a PM_READ_ONLY
- * may have been due to a lack of metadata or data space, and may
- * now work (ie. if the underlying devices have been resized).
- */
- if (old_mode == PM_FAIL)
- new_mode = old_mode;
-
set_pool_mode(pool, new_mode);
return 0;
@@ -1993,16 +2138,27 @@ static void metadata_low_callback(void *context)
dm_table_event(pool->ti->table);
}
-static sector_t get_metadata_dev_size(struct block_device *bdev)
+static sector_t get_dev_size(struct block_device *bdev)
+{
+ return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+static void warn_if_metadata_device_too_big(struct block_device *bdev)
{
- sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t metadata_dev_size = get_dev_size(bdev);
char buffer[BDEVNAME_SIZE];
- if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
+ if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
- metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
- }
+}
+
+static sector_t get_metadata_dev_size(struct block_device *bdev)
+{
+ sector_t metadata_dev_size = get_dev_size(bdev);
+
+ if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
+ metadata_dev_size = THIN_METADATA_MAX_SECTORS;
return metadata_dev_size;
}
@@ -2011,7 +2167,7 @@ static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
{
sector_t metadata_dev_size = get_metadata_dev_size(bdev);
- sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
return metadata_dev_size;
}
@@ -2089,12 +2245,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Error opening metadata block device";
goto out_unlock;
}
-
- /*
- * Run for the side-effect of possibly issuing a warning if the
- * device is too big.
- */
- (void) get_metadata_dev_size(metadata_dev->bdev);
+ warn_if_metadata_device_too_big(metadata_dev->bdev);
r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
if (r) {
@@ -2240,6 +2391,12 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
return -EINVAL;
} else if (data_size > sb_data_size) {
+ if (dm_pool_metadata_needs_check(pool->pmd)) {
+ DMERR("%s: unable to grow the data device until repaired.",
+ dm_device_name(pool->pool_md));
+ return 0;
+ }
+
if (sb_data_size)
DMINFO("%s: growing the data device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
@@ -2281,6 +2438,13 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
return -EINVAL;
} else if (metadata_dev_size > sb_metadata_dev_size) {
+ if (dm_pool_metadata_needs_check(pool->pmd)) {
+ DMERR("%s: unable to grow the metadata device until repaired.",
+ dm_device_name(pool->pool_md));
+ return 0;
+ }
+
+ warn_if_metadata_device_too_big(pool->md_dev);
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
sb_metadata_dev_size, metadata_dev_size);
@@ -2667,7 +2831,9 @@ static void pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("- ");
- if (pool->pf.mode == PM_READ_ONLY)
+ if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+ DMEMIT("out_of_data_space ");
+ else if (pool->pf.mode == PM_READ_ONLY)
DMEMIT("ro ");
else
DMEMIT("rw ");
@@ -2781,7 +2947,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2888,6 +3054,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (get_pool_mode(tc->pool) == PM_FAIL) {
ti->error = "Couldn't open thin device, Pool is in fail mode";
+ r = -EINVAL;
goto bad_thin_open;
}
@@ -2899,7 +3066,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
if (r)
- goto bad_thin_open;
+ goto bad_target_max_io_len;
ti->num_flush_bios = 1;
ti->flush_supported = true;
@@ -2920,6 +3087,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
return 0;
+bad_target_max_io_len:
+ dm_pool_close_thin_device(tc->td);
bad_thin_open:
__pool_dec(tc->pool);
bad_pool_lookup:
@@ -2939,7 +3108,7 @@ out_unlock:
static int thin_map(struct dm_target *ti, struct bio *bio)
{
- bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
return thin_bio_map(ti, bio);
}
@@ -2980,10 +3149,23 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
return 0;
}
-static void thin_postsuspend(struct dm_target *ti)
+static void thin_presuspend(struct dm_target *ti)
{
+ struct thin_c *tc = ti->private;
+
if (dm_noflush_suspending(ti))
- requeue_io((struct thin_c *)ti->private);
+ noflush_work(tc, do_noflush_start);
+}
+
+static void thin_postsuspend(struct dm_target *ti)
+{
+ struct thin_c *tc = ti->private;
+
+ /*
+ * The dm_noflush_suspending flag has been cleared by now, so
+ * unfortunately we must always run this.
+ */
+ noflush_work(tc, do_noflush_stop);
}
/*
@@ -3068,12 +3250,13 @@ static int thin_iterate_devices(struct dm_target *ti,
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
.map = thin_map,
.end_io = thin_endio,
+ .presuspend = thin_presuspend,
.postsuspend = thin_postsuspend,
.status = thin_status,
.iterate_devices = thin_iterate_devices,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 4b7941db3aff..796007a5e0e1 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -73,15 +73,10 @@ struct dm_verity_io {
sector_t block;
unsigned n_blocks;
- /* saved bio vector */
- struct bio_vec *io_vec;
- unsigned io_vec_size;
+ struct bvec_iter iter;
struct work_struct work;
- /* A space for short vectors; longer vectors are allocated separately. */
- struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
-
/*
* Three variably-size fields follow this struct:
*
@@ -284,9 +279,10 @@ release_ret_r:
static int verity_verify_io(struct dm_verity_io *io)
{
struct dm_verity *v = io->v;
+ struct bio *bio = dm_bio_from_per_bio_data(io,
+ v->ti->per_bio_data_size);
unsigned b;
int i;
- unsigned vector = 0, offset = 0;
for (b = 0; b < io->n_blocks; b++) {
struct shash_desc *desc;
@@ -336,31 +332,22 @@ test_block_hash:
}
todo = 1 << v->data_dev_block_bits;
- do {
- struct bio_vec *bv;
+ while (io->iter.bi_size) {
u8 *page;
- unsigned len;
-
- BUG_ON(vector >= io->io_vec_size);
- bv = &io->io_vec[vector];
- page = kmap_atomic(bv->bv_page);
- len = bv->bv_len - offset;
- if (likely(len >= todo))
- len = todo;
- r = crypto_shash_update(desc,
- page + bv->bv_offset + offset, len);
+ struct bio_vec bv = bio_iter_iovec(bio, io->iter);
+
+ page = kmap_atomic(bv.bv_page);
+ r = crypto_shash_update(desc, page + bv.bv_offset,
+ bv.bv_len);
kunmap_atomic(page);
+
if (r < 0) {
DMERR("crypto_shash_update failed: %d", r);
return r;
}
- offset += len;
- if (likely(offset == bv->bv_len)) {
- offset = 0;
- vector++;
- }
- todo -= len;
- } while (todo);
+
+ bio_advance_iter(bio, &io->iter, bv.bv_len);
+ }
if (!v->version) {
r = crypto_shash_update(desc, v->salt, v->salt_size);
@@ -383,8 +370,6 @@ test_block_hash:
return -EIO;
}
}
- BUG_ON(vector != io->io_vec_size);
- BUG_ON(offset);
return 0;
}
@@ -400,10 +385,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_private = io->orig_bi_private;
- if (io->io_vec != io->io_vec_inline)
- mempool_free(io->io_vec, v->vec_mempool);
-
- bio_endio(bio, error);
+ bio_endio_nodec(bio, error);
}
static void verity_work(struct work_struct *w)
@@ -493,9 +475,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
struct dm_verity_io *io;
bio->bi_bdev = v->data_dev->bdev;
- bio->bi_sector = verity_map_sector(v, bio->bi_sector);
+ bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
- if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
+ if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
DMERR_LIMIT("unaligned io");
return -EIO;
@@ -514,18 +496,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
io->v = v;
io->orig_bi_end_io = bio->bi_end_io;
io->orig_bi_private = bio->bi_private;
- io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
- io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
+ io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
+ io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
bio->bi_end_io = verity_end_io;
bio->bi_private = io;
- io->io_vec_size = bio_segments(bio);
- if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
- io->io_vec = io->io_vec_inline;
- else
- io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
- memcpy(io->io_vec, bio_iovec(bio),
- io->io_vec_size * sizeof(struct bio_vec));
+ io->iter = bio->bi_iter;
verity_submit_prefetch(v, io);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b49c76284241..8c53b09b9a2c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io)
atomic_inc_return(&md->pending[rw]));
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
bio_sectors(bio), false, 0, &io->stats_aux);
}
@@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io)
part_stat_unlock();
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
bio_sectors(bio), true, duration, &io->stats_aux);
/*
@@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error)
if (io_error == DM_ENDIO_REQUEUE)
return;
- if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
+ if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
/*
* Preflush done for flush with data, reissue
* without REQ_FLUSH.
@@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error)
struct dm_rq_clone_bio_info *info = clone->bi_private;
struct dm_rq_target_io *tio = info->tio;
struct bio *bio = info->orig;
- unsigned int nr_bytes = info->orig->bi_size;
+ unsigned int nr_bytes = info->orig->bi_iter.bi_size;
bio_put(clone);
@@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio)
* this io.
*/
atomic_inc(&tio->io->io_count);
- sector = clone->bi_sector;
+ sector = clone->bi_iter.bi_sector;
r = ti->type->map(ti, clone);
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
@@ -1155,76 +1155,32 @@ struct clone_info {
struct dm_io *io;
sector_t sector;
sector_t sector_count;
- unsigned short idx;
};
static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
{
- bio->bi_sector = sector;
- bio->bi_size = to_bytes(len);
-}
-
-static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
-{
- bio->bi_idx = idx;
- bio->bi_vcnt = idx + bv_count;
- bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-}
-
-static void clone_bio_integrity(struct bio *bio, struct bio *clone,
- unsigned short idx, unsigned len, unsigned offset,
- unsigned trim)
-{
- if (!bio_integrity(bio))
- return;
-
- bio_integrity_clone(clone, bio, GFP_NOIO);
-
- if (trim)
- bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
-}
-
-/*
- * Creates a little bio that just does part of a bvec.
- */
-static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned short idx,
- unsigned offset, unsigned len)
-{
- struct bio *clone = &tio->clone;
- struct bio_vec *bv = bio->bi_io_vec + idx;
-
- *clone->bi_io_vec = *bv;
-
- bio_setup_sector(clone, sector, len);
-
- clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw;
- clone->bi_vcnt = 1;
- clone->bi_io_vec->bv_offset = offset;
- clone->bi_io_vec->bv_len = clone->bi_size;
- clone->bi_flags |= 1 << BIO_CLONED;
-
- clone_bio_integrity(bio, clone, idx, len, offset, 1);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_iter.bi_size = to_bytes(len);
}
/*
* Creates a bio that consists of range of complete bvecs.
*/
static void clone_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned short idx,
- unsigned short bv_count, unsigned len)
+ sector_t sector, unsigned len)
{
struct bio *clone = &tio->clone;
- unsigned trim = 0;
- __bio_clone(clone, bio);
- bio_setup_sector(clone, sector, len);
- bio_setup_bv(clone, idx, bv_count);
+ __bio_clone_fast(clone, bio);
+
+ if (bio_integrity(bio))
+ bio_integrity_clone(clone, bio, GFP_NOIO);
+
+ bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+ clone->bi_iter.bi_size = to_bytes(len);
- if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
- trim = 1;
- clone_bio_integrity(bio, clone, idx, len, 0, trim);
+ if (bio_integrity(bio))
+ bio_integrity_trim(clone, 0, len);
}
static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1257,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
* and discard, so no need for concern about wasted bvec allocations.
*/
- __bio_clone(clone, ci->bio);
+ __bio_clone_fast(clone, ci->bio);
if (len)
bio_setup_sector(clone, ci->sector, len);
@@ -1286,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci)
}
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, int nr_iovecs,
- unsigned short idx, unsigned short bv_count,
- unsigned offset, unsigned len,
- unsigned split_bvec)
+ sector_t sector, unsigned len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
@@ -1303,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
num_target_bios = ti->num_write_bios(ti, bio);
for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
- tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
- if (split_bvec)
- clone_split_bio(tio, bio, sector, idx, offset, len);
- else
- clone_bio(tio, bio, sector, idx, bv_count, len);
+ tio = alloc_tio(ci, ti, 0, target_bio_nr);
+ clone_bio(tio, bio, sector, len);
__map_bio(tio);
}
}
@@ -1379,68 +1329,13 @@ static int __send_write_same(struct clone_info *ci)
}
/*
- * Find maximum number of sectors / bvecs we can process with a single bio.
- */
-static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
-{
- struct bio *bio = ci->bio;
- sector_t bv_len, total_len = 0;
-
- for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
- bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
-
- if (bv_len > max)
- break;
-
- max -= bv_len;
- total_len += bv_len;
- }
-
- return total_len;
-}
-
-static int __split_bvec_across_targets(struct clone_info *ci,
- struct dm_target *ti, sector_t max)
-{
- struct bio *bio = ci->bio;
- struct bio_vec *bv = bio->bi_io_vec + ci->idx;
- sector_t remaining = to_sector(bv->bv_len);
- unsigned offset = 0;
- sector_t len;
-
- do {
- if (offset) {
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
-
- max = max_io_len(ci->sector, ti);
- }
-
- len = min(remaining, max);
-
- __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
- bv->bv_offset + offset, len, 1);
-
- ci->sector += len;
- ci->sector_count -= len;
- offset += to_bytes(len);
- } while (remaining -= len);
-
- ci->idx++;
-
- return 0;
-}
-
-/*
* Select the correct strategy for processing a non-flush bio.
*/
static int __split_and_process_non_flush(struct clone_info *ci)
{
struct bio *bio = ci->bio;
struct dm_target *ti;
- sector_t len, max;
- int idx;
+ unsigned len;
if (unlikely(bio->bi_rw & REQ_DISCARD))
return __send_discard(ci);
@@ -1451,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (!dm_target_is_valid(ti))
return -EIO;
- max = max_io_len(ci->sector, ti);
-
- /*
- * Optimise for the simple case where we can do all of
- * the remaining io with a single clone.
- */
- if (ci->sector_count <= max) {
- __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
- ci->idx, bio->bi_vcnt - ci->idx, 0,
- ci->sector_count, 0);
- ci->sector_count = 0;
- return 0;
- }
-
- /*
- * There are some bvecs that don't span targets.
- * Do as many of these as possible.
- */
- if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
- len = __len_within_target(ci, max, &idx);
-
- __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
- ci->idx, idx - ci->idx, 0, len, 0);
+ len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
- ci->sector += len;
- ci->sector_count -= len;
- ci->idx = idx;
+ __clone_and_map_data_bio(ci, ti, ci->sector, len);
- return 0;
- }
+ ci->sector += len;
+ ci->sector_count -= len;
- /*
- * Handle a bvec that must be split between two or more targets.
- */
- return __split_bvec_across_targets(ci, ti, max);
+ return 0;
}
/*
@@ -1510,8 +1378,7 @@ static void __split_and_process_bio(struct mapped_device *md,
ci.io->bio = bio;
ci.io->md = md;
spin_lock_init(&ci.io->endio_lock);
- ci.sector = bio->bi_sector;
- ci.idx = bio->bi_idx;
+ ci.sector = bio->bi_iter.bi_sector;
start_io_acct(ci.io);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 3193aefe982b..e8b4574956c7 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)
{
struct bio *b = bio->bi_private;
- b->bi_size = bio->bi_size;
- b->bi_sector = bio->bi_sector;
+ b->bi_iter.bi_size = bio->bi_iter.bi_size;
+ b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
bio_put(bio);
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)
return;
}
- if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
+ if (check_sector(conf, bio->bi_iter.bi_sector,
+ bio_end_sector(bio), WRITE))
failit = 1;
if (check_mode(conf, WritePersistent)) {
- add_sector(conf, bio->bi_sector, WritePersistent);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ WritePersistent);
failit = 1;
}
if (check_mode(conf, WriteTransient))
failit = 1;
} else {
/* read request */
- if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
+ if (check_sector(conf, bio->bi_iter.bi_sector,
+ bio_end_sector(bio), READ))
failit = 1;
if (check_mode(conf, ReadTransient))
failit = 1;
if (check_mode(conf, ReadPersistent)) {
- add_sector(conf, bio->bi_sector, ReadPersistent);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ ReadPersistent);
failit = 1;
}
if (check_mode(conf, ReadFixable)) {
- add_sector(conf, bio->bi_sector, ReadFixable);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ ReadFixable);
failit = 1;
}
}
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index f03fabd2b37b..56f534b4a2d2 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev)
static void linear_make_request(struct mddev *mddev, struct bio *bio)
{
+ char b[BDEVNAME_SIZE];
struct dev_info *tmp_dev;
- sector_t start_sector;
+ struct bio *split;
+ sector_t start_sector, end_sector, data_offset;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
return;
}
- rcu_read_lock();
- tmp_dev = which_dev(mddev, bio->bi_sector);
- start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
-
-
- if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
- || (bio->bi_sector < start_sector))) {
- char b[BDEVNAME_SIZE];
-
- printk(KERN_ERR
- "md/linear:%s: make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
- mdname(mddev),
- (unsigned long long)bio->bi_sector,
- bdevname(tmp_dev->rdev->bdev, b),
- (unsigned long long)tmp_dev->rdev->sectors,
- (unsigned long long)start_sector);
- rcu_read_unlock();
- bio_io_error(bio);
- return;
- }
- if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
- /* This bio crosses a device boundary, so we have to
- * split it.
- */
- struct bio_pair *bp;
- sector_t end_sector = tmp_dev->end_sector;
+ do {
+ rcu_read_lock();
- rcu_read_unlock();
-
- bp = bio_split(bio, end_sector - bio->bi_sector);
+ tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+ start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+ end_sector = tmp_dev->end_sector;
+ data_offset = tmp_dev->rdev->data_offset;
+ bio->bi_bdev = tmp_dev->rdev->bdev;
- linear_make_request(mddev, &bp->bio1);
- linear_make_request(mddev, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
-
- bio->bi_bdev = tmp_dev->rdev->bdev;
- bio->bi_sector = bio->bi_sector - start_sector
- + tmp_dev->rdev->data_offset;
- rcu_read_unlock();
+ rcu_read_unlock();
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
- /* Just ignore it */
- bio_endio(bio, 0);
- return;
- }
+ if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
+ bio->bi_iter.bi_sector < start_sector))
+ goto out_of_bounds;
+
+ if (unlikely(bio_end_sector(bio) > end_sector)) {
+ /* This bio crosses a device boundary, so we have to
+ * split it.
+ */
+ split = bio_split(bio, end_sector -
+ bio->bi_iter.bi_sector,
+ GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
- generic_make_request(bio);
+ split->bi_iter.bi_sector = split->bi_iter.bi_sector -
+ start_sector + data_offset;
+
+ if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+ /* Just ignore it */
+ bio_endio(split, 0);
+ } else
+ generic_make_request(split);
+ } while (split != bio);
+ return;
+
+out_of_bounds:
+ printk(KERN_ERR
+ "md/linear:%s: make_request: Sector %llu out of bounds on "
+ "dev %s: %llu sectors, offset %llu\n",
+ mdname(mddev),
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bdevname(tmp_dev->rdev->bdev, b),
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
+ bio_io_error(bio);
}
static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 369d919bdafe..4ad5cc4e63e8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct bio *bio = mddev->flush_bio;
- if (bio->bi_size == 0)
+ if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
@@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
@@ -782,18 +782,16 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
int ret;
- rw |= REQ_SYNC;
-
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
if (metadata_op)
- bio->bi_sector = sector + rdev->sb_start;
+ bio->bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
(rdev->mddev->reshape_backwards ==
(sector >= rdev->mddev->reshape_position)))
- bio->bi_sector = sector + rdev->new_data_offset;
+ bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
else
- bio->bi_sector = sector + rdev->data_offset;
+ bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
submit_bio_wait(rw, bio);
@@ -1173,6 +1171,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
desc->raid_disk < mddev->raid_disks */) {
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = desc->raid_disk;
+ rdev->saved_raid_disk = desc->raid_disk;
} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
/* active but not in sync implies recovery up to
* reshape position. We don't know exactly where
@@ -1671,10 +1670,14 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Faulty, &rdev->flags);
break;
default:
+ rdev->saved_raid_disk = role;
if ((le32_to_cpu(sb->feature_map) &
- MD_FEATURE_RECOVERY_OFFSET))
+ MD_FEATURE_RECOVERY_OFFSET)) {
rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
- else
+ if (!(le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RECOVERY_BITMAP))
+ rdev->saved_raid_disk = -1;
+ } else
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = role;
break;
@@ -1736,6 +1739,9 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
sb->recovery_offset =
cpu_to_le64(rdev->recovery_offset);
+ if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
+ sb->feature_map |=
+ cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
}
if (test_bit(Replacement, &rdev->flags))
sb->feature_map |=
@@ -2477,8 +2483,7 @@ repeat:
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
- if (!test_bit(Faulty, &rdev->flags) &&
- rdev->saved_raid_disk == -1) {
+ if (!test_bit(Faulty, &rdev->flags)) {
md_super_write(mddev,rdev,
rdev->sb_start, rdev->sb_size,
rdev->sb_page);
@@ -2494,11 +2499,9 @@ repeat:
rdev->badblocks.size = 0;
}
- } else if (test_bit(Faulty, &rdev->flags))
+ } else
pr_debug("md: %s (skipping faulty)\n",
bdevname(rdev->bdev, b));
- else
- pr_debug("(skipping incremental s/r ");
if (mddev->level == LEVEL_MULTIPATH)
/* only need to write one superblock... */
@@ -2614,6 +2617,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* blocked - sets the Blocked flags
* -blocked - clears the Blocked and possibly simulates an error
* insync - sets Insync providing device isn't active
+ * -insync - clear Insync for a device with a slot assigned,
+ * so that it gets rebuilt based on bitmap
* write_error - sets WriteErrorSeen
* -write_error - clears WriteErrorSeen
*/
@@ -2662,6 +2667,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
+ } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
+ clear_bit(In_sync, &rdev->flags);
+ rdev->saved_raid_disk = rdev->raid_disk;
+ rdev->raid_disk = -1;
+ err = 0;
} else if (cmd_match(buf, "write_error")) {
set_bit(WriteErrorSeen, &rdev->flags);
err = 0;
@@ -3589,6 +3599,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
pers->run(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev_resume(mddev);
+ if (!mddev->thread)
+ md_update_sb(mddev, 1);
sysfs_notify(&mddev->kobj, NULL, "level");
md_new_event(mddev);
return rv;
@@ -5770,6 +5782,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
clear_bit(Bitmap_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
+ rdev->saved_raid_disk = rdev->raid_disk;
} else
super_types[mddev->major_version].
validate_super(mddev, rdev);
@@ -5782,11 +5795,6 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
return -EINVAL;
}
- if (test_bit(In_sync, &rdev->flags))
- rdev->saved_raid_disk = rdev->raid_disk;
- else
- rdev->saved_raid_disk = -1;
-
clear_bit(In_sync, &rdev->flags); /* just to be sure */
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
@@ -6336,6 +6344,32 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
+static inline bool md_ioctl_valid(unsigned int cmd)
+{
+ switch (cmd) {
+ case ADD_NEW_DISK:
+ case BLKROSET:
+ case GET_ARRAY_INFO:
+ case GET_BITMAP_FILE:
+ case GET_DISK_INFO:
+ case HOT_ADD_DISK:
+ case HOT_REMOVE_DISK:
+ case PRINT_RAID_DEBUG:
+ case RAID_AUTORUN:
+ case RAID_VERSION:
+ case RESTART_ARRAY_RW:
+ case RUN_ARRAY:
+ case SET_ARRAY_INFO:
+ case SET_BITMAP_FILE:
+ case SET_DISK_FAULTY:
+ case STOP_ARRAY:
+ case STOP_ARRAY_RO:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int md_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
@@ -6344,6 +6378,9 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
struct mddev *mddev = NULL;
int ro;
+ if (!md_ioctl_valid(cmd))
+ return -ENOTTY;
+
switch (cmd) {
case RAID_VERSION:
case GET_ARRAY_INFO:
@@ -7718,7 +7755,8 @@ static int remove_and_add_spares(struct mddev *mddev,
!test_bit(Bitmap_sync, &rdev->flags)))
continue;
- rdev->recovery_offset = 0;
+ if (rdev->saved_raid_disk < 0)
+ rdev->recovery_offset = 0;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
if (sysfs_link_rdev(mddev, rdev))
@@ -7938,14 +7976,10 @@ void md_reap_sync_thread(struct mddev *mddev)
mddev->pers->finish_reshape(mddev);
/* If array is no-longer degraded, then any saved_raid_disk
- * information must be scrapped. Also if any device is now
- * In_sync we must scrape the saved_raid_disk for that device
- * do the superblock for an incrementally recovered device
- * written out.
+ * information must be scrapped.
*/
- rdev_for_each(rdev, mddev)
- if (!mddev->degraded ||
- test_bit(In_sync, &rdev->flags))
+ if (!mddev->degraded)
+ rdev_for_each(rdev, mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1642eae75a33..849ad39f547b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)
md_error (mp_bh->mddev, rdev);
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
multipath = conf->multipaths + mp_bh->path;
mp_bh->bio = *bio;
- mp_bh->bio.bi_sector += multipath->rdev->data_offset;
+ mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)
spin_unlock_irqrestore(&conf->device_lock, flags);
bio = &mp_bh->bio;
- bio->bi_sector = mp_bh->master_bio->bi_sector;
+ bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
" error for block %llu\n",
bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, -EIO);
} else {
printk(KERN_ERR "multipath: %s: redirecting sector %llu"
" to another IO path\n",
bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
- bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
+ bio->bi_iter.bi_sector +=
+ conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
index 19b268795415..0c2dec7aec20 100644
--- a/drivers/md/persistent-data/Kconfig
+++ b/drivers/md/persistent-data/Kconfig
@@ -6,3 +6,13 @@ config DM_PERSISTENT_DATA
---help---
Library providing immutable on-disk data structure support for
device-mapper targets such as the thin provisioning target.
+
+config DM_DEBUG_BLOCK_STACK_TRACING
+ boolean "Keep stack trace of persistent data block lock holders"
+ depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
+ select STACKTRACE
+ ---help---
+ Enable this for messages that may help debug problems with the
+ block manager locking used by thin provisioning and caching.
+
+ If unsure, say N.
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 536782e3bcb7..786b689bdfc7 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -91,6 +91,69 @@ struct block_op {
dm_block_t block;
};
+struct bop_ring_buffer {
+ unsigned begin;
+ unsigned end;
+ struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
+};
+
+static void brb_init(struct bop_ring_buffer *brb)
+{
+ brb->begin = 0;
+ brb->end = 0;
+}
+
+static bool brb_empty(struct bop_ring_buffer *brb)
+{
+ return brb->begin == brb->end;
+}
+
+static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
+{
+ unsigned r = old + 1;
+ return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
+}
+
+static int brb_push(struct bop_ring_buffer *brb,
+ enum block_op_type type, dm_block_t b)
+{
+ struct block_op *bop;
+ unsigned next = brb_next(brb, brb->end);
+
+ /*
+ * We don't allow the last bop to be filled, this way we can
+ * differentiate between full and empty.
+ */
+ if (next == brb->begin)
+ return -ENOMEM;
+
+ bop = brb->bops + brb->end;
+ bop->type = type;
+ bop->block = b;
+
+ brb->end = next;
+
+ return 0;
+}
+
+static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
+{
+ struct block_op *bop;
+
+ if (brb_empty(brb))
+ return -ENODATA;
+
+ bop = brb->bops + brb->begin;
+ result->type = bop->type;
+ result->block = bop->block;
+
+ brb->begin = brb_next(brb, brb->begin);
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
struct sm_metadata {
struct dm_space_map sm;
@@ -101,25 +164,20 @@ struct sm_metadata {
unsigned recursion_count;
unsigned allocated_this_transaction;
- unsigned nr_uncommitted;
- struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
+ struct bop_ring_buffer uncommitted;
struct threshold threshold;
};
static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
{
- struct block_op *op;
+ int r = brb_push(&smm->uncommitted, type, b);
- if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
+ if (r) {
DMERR("too many recursive allocations");
return -ENOMEM;
}
- op = smm->uncommitted + smm->nr_uncommitted++;
- op->type = type;
- op->block = b;
-
return 0;
}
@@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm)
return -ENOMEM;
}
- if (smm->recursion_count == 1 && smm->nr_uncommitted) {
- while (smm->nr_uncommitted && !r) {
- smm->nr_uncommitted--;
- r = commit_bop(smm, smm->uncommitted +
- smm->nr_uncommitted);
+ if (smm->recursion_count == 1) {
+ while (!brb_empty(&smm->uncommitted)) {
+ struct block_op bop;
+
+ r = brb_pop(&smm->uncommitted, &bop);
+ if (r) {
+ DMERR("bug in bop ring buffer");
+ break;
+ }
+
+ r = commit_bop(smm, &bop);
if (r)
break;
}
@@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
uint32_t *result)
{
- int r, i;
+ int r;
+ unsigned i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
unsigned adjustment = 0;
@@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
* We may have some uncommitted adjustments to add. This list
* should always be really short.
*/
- for (i = 0; i < smm->nr_uncommitted; i++) {
- struct block_op *op = smm->uncommitted + i;
+ for (i = smm->uncommitted.begin;
+ i != smm->uncommitted.end;
+ i = brb_next(&smm->uncommitted, i)) {
+ struct block_op *op = smm->uncommitted.bops + i;
if (op->block != b)
continue;
@@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
dm_block_t b, int *result)
{
- int r, i, adjustment = 0;
+ int r, adjustment = 0;
+ unsigned i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
uint32_t rc;
@@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
* We may have some uncommitted adjustments to add. This list
* should always be really short.
*/
- for (i = 0; i < smm->nr_uncommitted; i++) {
- struct block_op *op = smm->uncommitted + i;
+ for (i = smm->uncommitted.begin;
+ i != smm->uncommitted.end;
+ i = brb_next(&smm->uncommitted, i)) {
+
+ struct block_op *op = smm->uncommitted.bops + i;
if (op->block != b)
continue;
@@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
smm->begin = superblock + 1;
smm->recursion_count = 0;
smm->allocated_this_transaction = 0;
- smm->nr_uncommitted = 0;
+ brb_init(&smm->uncommitted);
threshold_init(&smm->threshold);
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
@@ -680,6 +751,8 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
if (r)
return r;
+ if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
+ nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
r = sm_ll_extend(&smm->ll, nr_blocks);
if (r)
return r;
@@ -713,7 +786,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
smm->begin = 0;
smm->recursion_count = 0;
smm->allocated_this_transaction = 0;
- smm->nr_uncommitted = 0;
+ brb_init(&smm->uncommitted);
threshold_init(&smm->threshold);
memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.h b/drivers/md/persistent-data/dm-space-map-metadata.h
index 39bba0801cf2..64df923974d8 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.h
+++ b/drivers/md/persistent-data/dm-space-map-metadata.h
@@ -9,6 +9,17 @@
#include "dm-transaction-manager.h"
+#define DM_SM_METADATA_BLOCK_SIZE (4096 >> SECTOR_SHIFT)
+
+/*
+ * The metadata device is currently limited in size.
+ *
+ * We have one block of index, which can hold 255 index entries. Each
+ * index entry contains allocation info about ~16k metadata blocks.
+ */
+#define DM_SM_METADATA_MAX_BLOCKS (255 * ((1 << 14) - 64))
+#define DM_SM_METADATA_MAX_SECTORS (DM_SM_METADATA_MAX_BLOCKS * DM_SM_METADATA_BLOCK_SIZE)
+
/*
* Unfortunately we have to use two-phase construction due to the cycle
* between the tm and sm.
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c4d420b7d2f4..407a99e46f69 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
unsigned int chunk_sects, struct bio *bio)
{
if (likely(is_power_of_2(chunk_sects))) {
- return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+ return chunk_sects >=
+ ((bio->bi_iter.bi_sector & (chunk_sects-1))
+ bio_sectors(bio));
} else{
- sector_t sector = bio->bi_sector;
+ sector_t sector = bio->bi_iter.bi_sector;
return chunk_sects >= (sector_div(sector, chunk_sects)
+ bio_sectors(bio));
}
@@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
static void raid0_make_request(struct mddev *mddev, struct bio *bio)
{
- unsigned int chunk_sects;
- sector_t sector_offset;
struct strip_zone *zone;
struct md_rdev *tmp_dev;
+ struct bio *split;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
return;
}
- chunk_sects = mddev->chunk_sectors;
- if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
- sector_t sector = bio->bi_sector;
- struct bio_pair *bp;
- /* Sanity check -- queue functions should prevent this happening */
- if (bio_segments(bio) > 1)
- goto bad_map;
- /* This is a one page bio that upper layers
- * refuse to split for us, so we need to split it.
- */
- if (likely(is_power_of_2(chunk_sects)))
- bp = bio_split(bio, chunk_sects - (sector &
- (chunk_sects-1)));
- else
- bp = bio_split(bio, chunk_sects -
- sector_div(sector, chunk_sects));
- raid0_make_request(mddev, &bp->bio1);
- raid0_make_request(mddev, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
+ do {
+ sector_t sector = bio->bi_iter.bi_sector;
+ unsigned chunk_sects = mddev->chunk_sectors;
- sector_offset = bio->bi_sector;
- zone = find_zone(mddev->private, &sector_offset);
- tmp_dev = map_sector(mddev, zone, bio->bi_sector,
- &sector_offset);
- bio->bi_bdev = tmp_dev->bdev;
- bio->bi_sector = sector_offset + zone->dev_start +
- tmp_dev->data_offset;
-
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
- /* Just ignore it */
- bio_endio(bio, 0);
- return;
- }
+ unsigned sectors = chunk_sects -
+ (likely(is_power_of_2(chunk_sects))
+ ? (sector & (chunk_sects-1))
+ : sector_div(sector, chunk_sects));
- generic_make_request(bio);
- return;
-
-bad_map:
- printk("md/raid0:%s: make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n",
- mdname(mddev), chunk_sects / 2,
- (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
+ if (sectors < bio_sectors(bio)) {
+ split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
- bio_io_error(bio);
- return;
+ zone = find_zone(mddev->private, &sector);
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ split->bi_bdev = tmp_dev->bdev;
+ split->bi_iter.bi_sector = sector + zone->dev_start +
+ tmp_dev->data_offset;
+
+ if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+ /* Just ignore it */
+ bio_endio(split, 0);
+ } else
+ generic_make_request(split);
+ } while (split != bio);
}
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a49cfcc7a343..4a6ca1cb2e78 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
int done;
struct r1conf *conf = r1_bio->mddev->private;
sector_t start_next_window = r1_bio->start_next_window;
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
if (bio->bi_phys_segments) {
unsigned long flags;
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
(bio_data_dir(bio) == WRITE) ? "write" : "read",
- (unsigned long long) bio->bi_sector,
- (unsigned long long) bio->bi_sector +
- bio_sectors(bio) - 1);
+ (unsigned long long) bio->bi_iter.bi_sector,
+ (unsigned long long) bio_end_sector(bio) - 1);
call_bio_endio(r1_bio);
}
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
struct bio *mbio = r1_bio->master_bio;
pr_debug("raid1: behind end write sectors"
" %llu-%llu\n",
- (unsigned long long) mbio->bi_sector,
- (unsigned long long) mbio->bi_sector +
- bio_sectors(mbio) - 1);
+ (unsigned long long) mbio->bi_iter.bi_sector,
+ (unsigned long long) bio_end_sector(mbio) - 1);
call_bio_endio(r1_bio);
}
}
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
>= bio_end_sector(bio)) ||
(conf->next_resync + NEXT_NORMALIO_DISTANCE
- <= bio->bi_sector))
+ <= bio->bi_iter.bi_sector))
wait = false;
else
wait = true;
@@ -913,14 +911,14 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
if (bio && bio_data_dir(bio) == WRITE) {
if (conf->next_resync + NEXT_NORMALIO_DISTANCE
- <= bio->bi_sector) {
+ <= bio->bi_iter.bi_sector) {
if (conf->start_next_window == MaxSector)
conf->start_next_window =
conf->next_resync +
NEXT_NORMALIO_DISTANCE;
if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
- <= bio->bi_sector)
+ <= bio->bi_iter.bi_sector)
conf->next_window_requests++;
else
conf->current_window_requests++;
@@ -1027,7 +1025,8 @@ do_sync_io:
if (bvecs[i].bv_page)
put_page(bvecs[i].bv_page);
kfree(bvecs);
- pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
+ pr_debug("%dB behind alloc failed, doing sync I/O\n",
+ bio->bi_iter.bi_size);
}
struct raid1_plug_cb {
@@ -1107,7 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
if (bio_data_dir(bio) == WRITE &&
bio_end_sector(bio) > mddev->suspend_lo &&
- bio->bi_sector < mddev->suspend_hi) {
+ bio->bi_iter.bi_sector < mddev->suspend_hi) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -1118,7 +1117,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
if (bio_end_sector(bio) <= mddev->suspend_lo ||
- bio->bi_sector >= mddev->suspend_hi)
+ bio->bi_iter.bi_sector >= mddev->suspend_hi)
break;
schedule();
}
@@ -1140,7 +1139,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r1_bio->sectors = bio_sectors(bio);
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector;
+ r1_bio->sector = bio->bi_iter.bi_sector;
/* We might need to issue multiple reads to different
* devices if there are bad blocks around, so we keep
@@ -1180,12 +1179,13 @@ read_again:
r1_bio->read_disk = rdisk;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r1_bio->sector - bio->bi_sector,
+ bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r1_bio->bios[rdisk] = read_bio;
- read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
+ read_bio->bi_iter.bi_sector = r1_bio->sector +
+ mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
read_bio->bi_rw = READ | do_sync;
@@ -1197,7 +1197,7 @@ read_again:
*/
sectors_handled = (r1_bio->sector + max_sectors
- - bio->bi_sector);
+ - bio->bi_iter.bi_sector);
r1_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (bio->bi_phys_segments == 0)
@@ -1218,7 +1218,8 @@ read_again:
r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector + sectors_handled;
+ r1_bio->sector = bio->bi_iter.bi_sector +
+ sectors_handled;
goto read_again;
} else
generic_make_request(read_bio);
@@ -1321,7 +1322,7 @@ read_again:
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
- allow_barrier(conf, start_next_window, bio->bi_sector);
+ allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
start_next_window = wait_barrier(conf, bio);
/*
@@ -1348,7 +1349,7 @@ read_again:
bio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
}
- sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
+ sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
atomic_set(&r1_bio->remaining, 1);
atomic_set(&r1_bio->behind_remaining, 0);
@@ -1360,7 +1361,7 @@ read_again:
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
if (first_clone) {
/* do behind I/O ?
@@ -1394,7 +1395,7 @@ read_again:
r1_bio->bios[i] = mbio;
- mbio->bi_sector = (r1_bio->sector +
+ mbio->bi_iter.bi_sector = (r1_bio->sector +
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
@@ -1434,7 +1435,7 @@ read_again:
r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector + sectors_handled;
+ r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
goto retry_write;
}
@@ -1952,20 +1953,24 @@ static int process_checks(struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
int size;
+ int uptodate;
struct bio *b = r1_bio->bios[i];
if (b->bi_end_io != end_sync_read)
continue;
- /* fixup the bio for reuse */
+ /* fixup the bio for reuse, but preserve BIO_UPTODATE */
+ uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
bio_reset(b);
+ if (!uptodate)
+ clear_bit(BIO_UPTODATE, &b->bi_flags);
b->bi_vcnt = vcnt;
- b->bi_size = r1_bio->sectors << 9;
- b->bi_sector = r1_bio->sector +
+ b->bi_iter.bi_size = r1_bio->sectors << 9;
+ b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
b->bi_bdev = conf->mirrors[i].rdev->bdev;
b->bi_end_io = end_sync_read;
b->bi_private = r1_bio;
- size = b->bi_size;
+ size = b->bi_iter.bi_size;
for (j = 0; j < vcnt ; j++) {
struct bio_vec *bi;
bi = &b->bi_io_vec[j];
@@ -1989,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio)
int j;
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
+ int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
if (sbio->bi_end_io != end_sync_read)
continue;
+ /* Now we can 'fixup' the BIO_UPTODATE flag */
+ set_bit(BIO_UPTODATE, &sbio->bi_flags);
- if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
+ if (uptodate) {
for (j = vcnt; j-- ; ) {
struct page *p, *s;
p = pbio->bi_io_vec[j].bv_page;
@@ -2008,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio)
if (j >= 0)
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
- && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
+ && uptodate)) {
/* No need to write to this device. */
sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@@ -2220,11 +2228,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
}
wbio->bi_rw = WRITE;
- wbio->bi_sector = r1_bio->sector;
- wbio->bi_size = r1_bio->sectors << 9;
+ wbio->bi_iter.bi_sector = r1_bio->sector;
+ wbio->bi_iter.bi_size = r1_bio->sectors << 9;
bio_trim(wbio, sector - r1_bio->sector, sectors);
- wbio->bi_sector += rdev->data_offset;
+ wbio->bi_iter.bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev;
if (submit_bio_wait(WRITE, wbio) == 0)
/* failure! */
@@ -2338,7 +2346,8 @@ read_more:
}
r1_bio->read_disk = disk;
bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
- bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
printk_ratelimited(KERN_ERR
@@ -2347,7 +2356,7 @@ read_more:
mdname(mddev),
(unsigned long long)r1_bio->sector,
bdevname(rdev->bdev, b));
- bio->bi_sector = r1_bio->sector + rdev->data_offset;
+ bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
bio->bi_rw = READ | do_sync;
@@ -2356,7 +2365,7 @@ read_more:
/* Drat - have to split this up more */
struct bio *mbio = r1_bio->master_bio;
int sectors_handled = (r1_bio->sector + max_sectors
- - mbio->bi_sector);
+ - mbio->bi_iter.bi_sector);
r1_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (mbio->bi_phys_segments == 0)
@@ -2374,7 +2383,8 @@ read_more:
r1_bio->state = 0;
set_bit(R1BIO_ReadError, &r1_bio->state);
r1_bio->mddev = mddev;
- r1_bio->sector = mbio->bi_sector + sectors_handled;
+ r1_bio->sector = mbio->bi_iter.bi_sector +
+ sectors_handled;
goto read_more;
} else
@@ -2598,7 +2608,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
}
if (bio->bi_end_io) {
atomic_inc(&rdev->nr_pending);
- bio->bi_sector = sector_nr + rdev->data_offset;
+ bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_private = r1_bio;
}
@@ -2698,7 +2708,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
continue;
/* remove last page from this bio */
bio->bi_vcnt--;
- bio->bi_size -= len;
+ bio->bi_iter.bi_size -= len;
bio->bi_flags &= ~(1<< BIO_SEG_VALID);
}
goto bio_full;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 06eeb99ea6fc..33fc408e5eac 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void __make_request(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
struct bio *read_bio;
int i;
- sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
- int chunk_sects = chunk_mask + 1;
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
@@ -1174,88 +1172,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
int max_sectors;
int sectors;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
- md_flush_request(mddev, bio);
- return;
- }
-
- /* If this request crosses a chunk boundary, we need to
- * split it. This will only happen for 1 PAGE (or less) requests.
- */
- if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
- > chunk_sects
- && (conf->geo.near_copies < conf->geo.raid_disks
- || conf->prev.near_copies < conf->prev.raid_disks))) {
- struct bio_pair *bp;
- /* Sanity check -- queue functions should prevent this happening */
- if (bio_segments(bio) > 1)
- goto bad_map;
- /* This is a one page bio that upper layers
- * refuse to split for us, so we need to split it.
- */
- bp = bio_split(bio,
- chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
-
- /* Each of these 'make_request' calls will call 'wait_barrier'.
- * If the first succeeds but the second blocks due to the resync
- * thread raising the barrier, we will deadlock because the
- * IO to the underlying device will be queued in generic_make_request
- * and will never complete, so will never reduce nr_pending.
- * So increment nr_waiting here so no new raise_barriers will
- * succeed, and so the second wait_barrier cannot block.
- */
- spin_lock_irq(&conf->resync_lock);
- conf->nr_waiting++;
- spin_unlock_irq(&conf->resync_lock);
-
- make_request(mddev, &bp->bio1);
- make_request(mddev, &bp->bio2);
-
- spin_lock_irq(&conf->resync_lock);
- conf->nr_waiting--;
- wake_up(&conf->wait_barrier);
- spin_unlock_irq(&conf->resync_lock);
-
- bio_pair_release(bp);
- return;
- bad_map:
- printk("md/raid10:%s: make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
- (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
-
- bio_io_error(bio);
- return;
- }
-
- md_write_start(mddev, bio);
-
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
- wait_barrier(conf);
-
sectors = bio_sectors(bio);
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- bio->bi_sector < conf->reshape_progress &&
- bio->bi_sector + sectors > conf->reshape_progress) {
+ bio->bi_iter.bi_sector < conf->reshape_progress &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
/* IO spans the reshape position. Need to wait for
* reshape to pass
*/
allow_barrier(conf);
wait_event(conf->wait_barrier,
- conf->reshape_progress <= bio->bi_sector ||
- conf->reshape_progress >= bio->bi_sector + sectors);
+ conf->reshape_progress <= bio->bi_iter.bi_sector ||
+ conf->reshape_progress >= bio->bi_iter.bi_sector +
+ sectors);
wait_barrier(conf);
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio_data_dir(bio) == WRITE &&
(mddev->reshape_backwards
- ? (bio->bi_sector < conf->reshape_safe &&
- bio->bi_sector + sectors > conf->reshape_progress)
- : (bio->bi_sector + sectors > conf->reshape_safe &&
- bio->bi_sector < conf->reshape_progress))) {
+ ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
+ : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
+ bio->bi_iter.bi_sector < conf->reshape_progress))) {
/* Need to update reshape_position in metadata */
mddev->reshape_position = conf->reshape_progress;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1210,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r10_bio->sectors = sectors;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector;
+ r10_bio->sector = bio->bi_iter.bi_sector;
r10_bio->state = 0;
/* We might need to issue multiple reads to different
@@ -1302,13 +1239,13 @@ read_again:
slot = r10_bio->read_slot;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r10_bio->sector - bio->bi_sector,
+ bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r10_bio->devs[slot].bio = read_bio;
r10_bio->devs[slot].rdev = rdev;
- read_bio->bi_sector = r10_bio->devs[slot].addr +
+ read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev);
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
@@ -1320,7 +1257,7 @@ read_again:
* need another r10_bio.
*/
sectors_handled = (r10_bio->sector + max_sectors
- - bio->bi_sector);
+ - bio->bi_iter.bi_sector);
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (bio->bi_phys_segments == 0)
@@ -1341,7 +1278,8 @@ read_again:
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->state = 0;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector + sectors_handled;
+ r10_bio->sector = bio->bi_iter.bi_sector +
+ sectors_handled;
goto read_again;
} else
generic_make_request(read_bio);
@@ -1499,7 +1437,8 @@ retry_write:
bio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
}
- sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+ sectors_handled = r10_bio->sector + max_sectors -
+ bio->bi_iter.bi_sector;
atomic_set(&r10_bio->remaining, 1);
bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1449,11 @@ retry_write:
if (r10_bio->devs[i].bio) {
struct md_rdev *rdev = conf->mirrors[d].rdev;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+ bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r10_bio->devs[i].bio = mbio;
- mbio->bi_sector = (r10_bio->devs[i].addr+
+ mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
choose_data_offset(r10_bio,
rdev));
mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1492,11 @@ retry_write:
rdev = conf->mirrors[d].rdev;
}
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+ bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r10_bio->devs[i].repl_bio = mbio;
- mbio->bi_sector = (r10_bio->devs[i].addr +
+ mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
choose_data_offset(
r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
@@ -1591,11 +1530,57 @@ retry_write:
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector + sectors_handled;
+ r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
r10_bio->state = 0;
goto retry_write;
}
one_write_done(r10_bio);
+}
+
+static void make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct r10conf *conf = mddev->private;
+ sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
+ int chunk_sects = chunk_mask + 1;
+
+ struct bio *split;
+
+ if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ md_flush_request(mddev, bio);
+ return;
+ }
+
+ md_write_start(mddev, bio);
+
+ /*
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
+ */
+ wait_barrier(conf);
+
+ do {
+
+ /*
+ * If this request crosses a chunk boundary, we need to split
+ * it.
+ */
+ if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
+ bio_sectors(bio) > chunk_sects
+ && (conf->geo.near_copies < conf->geo.raid_disks
+ || conf->prev.near_copies <
+ conf->prev.raid_disks))) {
+ split = bio_split(bio, chunk_sects -
+ (bio->bi_iter.bi_sector &
+ (chunk_sects - 1)),
+ GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
+
+ __make_request(mddev, split);
+ } while (split != bio);
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
@@ -2124,10 +2109,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_reset(tbio);
tbio->bi_vcnt = vcnt;
- tbio->bi_size = r10_bio->sectors << 9;
+ tbio->bi_iter.bi_size = r10_bio->sectors << 9;
tbio->bi_rw = WRITE;
tbio->bi_private = r10_bio;
- tbio->bi_sector = r10_bio->devs[i].addr;
+ tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
for (j=0; j < vcnt ; j++) {
tbio->bi_io_vec[j].bv_offset = 0;
@@ -2144,7 +2129,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
- tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
+ tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
generic_make_request(tbio);
}
@@ -2614,8 +2599,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(wbio, sector - bio->bi_sector, sectors);
- wbio->bi_sector = (r10_bio->devs[i].addr+
+ bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
+ wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
wbio->bi_bdev = rdev->bdev;
@@ -2687,10 +2672,10 @@ read_more:
(unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev);
- bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
r10_bio->devs[slot].bio = bio;
r10_bio->devs[slot].rdev = rdev;
- bio->bi_sector = r10_bio->devs[slot].addr
+ bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
+ choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
bio->bi_rw = READ | do_sync;
@@ -2701,7 +2686,7 @@ read_more:
struct bio *mbio = r10_bio->master_bio;
int sectors_handled =
r10_bio->sector + max_sectors
- - mbio->bi_sector;
+ - mbio->bi_iter.bi_sector;
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (mbio->bi_phys_segments == 0)
@@ -2719,7 +2704,7 @@ read_more:
set_bit(R10BIO_ReadError,
&r10_bio->state);
r10_bio->mddev = mddev;
- r10_bio->sector = mbio->bi_sector
+ r10_bio->sector = mbio->bi_iter.bi_sector
+ sectors_handled;
goto read_more;
@@ -3157,7 +3142,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
from_addr = r10_bio->devs[j].addr;
- bio->bi_sector = from_addr + rdev->data_offset;
+ bio->bi_iter.bi_sector = from_addr +
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&rdev->nr_pending);
/* and we write to 'i' (if not in_sync) */
@@ -3181,7 +3167,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = to_addr
+ bio->bi_iter.bi_sector = to_addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&r10_bio->remaining);
@@ -3210,7 +3196,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = to_addr + rdev->data_offset;
+ bio->bi_iter.bi_sector = to_addr +
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&r10_bio->remaining);
break;
@@ -3328,7 +3315,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
- bio->bi_sector = sector +
+ bio->bi_iter.bi_sector = sector +
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
count++;
@@ -3350,7 +3337,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = sector +
+ bio->bi_iter.bi_sector = sector +
conf->mirrors[d].replacement->data_offset;
bio->bi_bdev = conf->mirrors[d].replacement->bdev;
count++;
@@ -3397,7 +3384,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio2 = bio2->bi_next) {
/* remove last page from this bio */
bio2->bi_vcnt--;
- bio2->bi_size -= len;
+ bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
}
goto bio_full;
@@ -3747,7 +3734,8 @@ static int run(struct mddev *mddev)
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
mddev->degraded++;
- if (disk->rdev)
+ if (disk->rdev &&
+ disk->rdev->saved_raid_disk < 0)
conf->fullsync = 1;
}
disk->recovery_disabled = mddev->recovery_disabled - 1;
@@ -4417,7 +4405,7 @@ read_more:
read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
read_bio->bi_bdev = rdev->bdev;
- read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_sync_read;
@@ -4425,7 +4413,7 @@ read_more:
read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
read_bio->bi_flags |= 1 << BIO_UPTODATE;
read_bio->bi_vcnt = 0;
- read_bio->bi_size = 0;
+ read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
@@ -4451,7 +4439,8 @@ read_more:
bio_reset(b);
b->bi_bdev = rdev2->bdev;
- b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
+ b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
+ rdev2->new_data_offset;
b->bi_private = r10_bio;
b->bi_end_io = end_reshape_write;
b->bi_rw = WRITE;
@@ -4478,7 +4467,7 @@ read_more:
bio2 = bio2->bi_next) {
/* Remove last page from this bio */
bio2->bi_vcnt--;
- bio2->bi_size -= len;
+ bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
}
goto bio_full;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cbb15716a5db..16f5c21963db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
{
int sectors = bio_sectors(bio);
- if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
+ if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
return bio->bi_next;
else
return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
- bi->bi_size = 0;
+ bi->bi_iter.bi_size = 0;
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);
bio_endio(bi, 0);
@@ -675,8 +675,10 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
|| !conf->inactive_blocked),
*(conf->hash_locks + hash));
conf->inactive_blocked = 0;
- } else
+ } else {
init_stripe(sh, sector, previous);
+ atomic_inc(&sh->count);
+ }
} else {
spin_lock(&conf->device_lock);
if (atomic_read(&sh->count)) {
@@ -695,13 +697,11 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
sh->group = NULL;
}
}
+ atomic_inc(&sh->count);
spin_unlock(&conf->device_lock);
}
} while (sh == NULL);
- if (sh)
- atomic_inc(&sh->count);
-
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -852,10 +852,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_rw, i);
atomic_inc(&sh->count);
if (use_new_offset(conf, sh))
- bi->bi_sector = (sh->sector
+ bi->bi_iter.bi_sector = (sh->sector
+ rdev->new_data_offset);
else
- bi->bi_sector = (sh->sector
+ bi->bi_iter.bi_sector = (sh->sector
+ rdev->data_offset);
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
bi->bi_rw |= REQ_NOMERGE;
@@ -863,7 +863,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
- bi->bi_size = STRIPE_SIZE;
+ bi->bi_iter.bi_size = STRIPE_SIZE;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -899,15 +899,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rbi->bi_rw, i);
atomic_inc(&sh->count);
if (use_new_offset(conf, sh))
- rbi->bi_sector = (sh->sector
+ rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->new_data_offset);
else
- rbi->bi_sector = (sh->sector
+ rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->data_offset);
rbi->bi_vcnt = 1;
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi->bi_io_vec[0].bv_offset = 0;
- rbi->bi_size = STRIPE_SIZE;
+ rbi->bi_iter.bi_size = STRIPE_SIZE;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -935,24 +935,24 @@ static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page *page,
sector_t sector, struct dma_async_tx_descriptor *tx)
{
- struct bio_vec *bvl;
+ struct bio_vec bvl;
+ struct bvec_iter iter;
struct page *bio_page;
- int i;
int page_offset;
struct async_submit_ctl submit;
enum async_tx_flags flags = 0;
- if (bio->bi_sector >= sector)
- page_offset = (signed)(bio->bi_sector - sector) * 512;
+ if (bio->bi_iter.bi_sector >= sector)
+ page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
else
- page_offset = (signed)(sector - bio->bi_sector) * -512;
+ page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
if (frombio)
flags |= ASYNC_TX_FENCE;
init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
- bio_for_each_segment(bvl, bio, i) {
- int len = bvl->bv_len;
+ bio_for_each_segment(bvl, bio, iter) {
+ int len = bvl.bv_len;
int clen;
int b_offset = 0;
@@ -968,8 +968,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
clen = len;
if (clen > 0) {
- b_offset += bvl->bv_offset;
- bio_page = bvl->bv_page;
+ b_offset += bvl.bv_offset;
+ bio_page = bvl.bv_page;
if (frombio)
tx = async_memcpy(page, bio_page, page_offset,
b_offset, clen, &submit);
@@ -1012,7 +1012,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
BUG_ON(!dev->read);
rbi = dev->read;
dev->read = NULL;
- while (rbi && rbi->bi_sector <
+ while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector);
if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1048,7 +1048,7 @@ static void ops_run_biofill(struct stripe_head *sh)
dev->read = rbi = dev->toread;
dev->toread = NULL;
spin_unlock_irq(&sh->stripe_lock);
- while (rbi && rbi->bi_sector <
+ while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, dev->page,
dev->sector, tx);
@@ -1390,7 +1390,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
wbi = dev->written = chosen;
spin_unlock_irq(&sh->stripe_lock);
- while (wbi && wbi->bi_sector <
+ while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
if (wbi->bi_rw & REQ_FUA)
set_bit(R5_WantFUA, &dev->flags);
@@ -2111,6 +2111,7 @@ static void raid5_end_write_request(struct bio *bi, int error)
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
if (!uptodate) {
+ set_bit(STRIPE_DEGRADED, &sh->state);
set_bit(WriteErrorSeen, &rdev->flags);
set_bit(R5_WriteError, &sh->dev[i].flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
@@ -2614,7 +2615,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
int firstwrite=0;
pr_debug("adding bi b#%llu to stripe s#%llu\n",
- (unsigned long long)bi->bi_sector,
+ (unsigned long long)bi->bi_iter.bi_sector,
(unsigned long long)sh->sector);
/*
@@ -2632,12 +2633,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
firstwrite = 1;
} else
bip = &sh->dev[dd_idx].toread;
- while (*bip && (*bip)->bi_sector < bi->bi_sector) {
- if (bio_end_sector(*bip) > bi->bi_sector)
+ while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
+ if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
goto overlap;
bip = & (*bip)->bi_next;
}
- if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
+ if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
goto overlap;
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2651,7 +2652,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sector_t sector = sh->dev[dd_idx].sector;
for (bi=sh->dev[dd_idx].towrite;
sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
- bi && bi->bi_sector <= sector;
+ bi && bi->bi_iter.bi_sector <= sector;
bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
if (bio_end_sector(bi) >= sector)
sector = bio_end_sector(bi);
@@ -2661,7 +2662,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
}
pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
- (unsigned long long)(*bip)->bi_sector,
+ (unsigned long long)(*bip)->bi_iter.bi_sector,
(unsigned long long)sh->sector, dd_idx);
spin_unlock_irq(&sh->stripe_lock);
@@ -2736,7 +2737,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2755,7 +2756,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
if (bi) bitmap_end = 1;
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2779,7 +2780,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
spin_unlock_irq(&sh->stripe_lock);
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
@@ -3003,7 +3004,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
clear_bit(R5_UPTODATE, &dev->flags);
wbi = dev->written;
dev->written = NULL;
- while (wbi && wbi->bi_sector <
+ while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4095,7 +4096,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
- sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+ sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
@@ -4232,9 +4233,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
/*
* compute position
*/
- align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
- 0,
- &dd_idx, NULL);
+ align_bi->bi_iter.bi_sector =
+ raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
+ 0, &dd_idx, NULL);
end_sector = bio_end_sector(align_bi);
rcu_read_lock();
@@ -4259,7 +4260,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
if (!bio_fits_rdev(align_bi) ||
- is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
+ is_badblock(rdev, align_bi->bi_iter.bi_sector,
+ bio_sectors(align_bi),
&first_bad, &bad_sectors)) {
/* too big in some way, or has a known bad block */
bio_put(align_bi);
@@ -4268,7 +4270,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
}
/* No reshape active, so we can trust rdev->data_offset */
- align_bi->bi_sector += rdev->data_offset;
+ align_bi->bi_iter.bi_sector += rdev->data_offset;
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
@@ -4280,7 +4282,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
align_bi, disk_devt(mddev->gendisk),
- raid_bio->bi_sector);
+ raid_bio->bi_iter.bi_sector);
generic_make_request(align_bi);
return 1;
} else {
@@ -4463,8 +4465,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
/* Skip discard while reshape is happening */
return;
- logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
- last_sector = bi->bi_sector + (bi->bi_size>>9);
+ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4568,7 +4570,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
return;
}
- logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -5052,7 +5054,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
int remaining;
int handled = 0;
- logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ logical_sector = raid_bio->bi_iter.bi_sector &
+ ~((sector_t)STRIPE_SECTORS-1);
sector = raid5_compute_sector(conf, logical_sector,
0, &dd_idx, NULL);
last_sector = bio_end_sector(raid_bio);
@@ -5511,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return sectors * (raid_disks - conf->max_degraded);
}
+static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ safe_put_page(percpu->spare_page);
+ kfree(percpu->scribble);
+ percpu->spare_page = NULL;
+ percpu->scribble = NULL;
+}
+
+static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ if (conf->level == 6 && !percpu->spare_page)
+ percpu->spare_page = alloc_page(GFP_KERNEL);
+ if (!percpu->scribble)
+ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+
+ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
+ free_scratch_buffer(conf, percpu);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void raid5_free_percpu(struct r5conf *conf)
{
- struct raid5_percpu *percpu;
unsigned long cpu;
if (!conf->percpu)
return;
- get_online_cpus();
- for_each_possible_cpu(cpu) {
- percpu = per_cpu_ptr(conf->percpu, cpu);
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- }
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&conf->cpu_notify);
#endif
+
+ get_online_cpus();
+ for_each_possible_cpu(cpu)
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
put_online_cpus();
free_percpu(conf->percpu);
@@ -5554,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (conf->level == 6 && !percpu->spare_page)
- percpu->spare_page = alloc_page(GFP_KERNEL);
- if (!percpu->scribble)
- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-
- if (!percpu->scribble ||
- (conf->level == 6 && !percpu->spare_page)) {
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
+ if (alloc_scratch_buffer(conf, percpu)) {
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
return notifier_from_errno(-ENOMEM);
@@ -5570,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- percpu->spare_page = NULL;
- percpu->scribble = NULL;
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
break;
default:
break;
@@ -5585,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
static int raid5_alloc_percpu(struct r5conf *conf)
{
unsigned long cpu;
- struct page *spare_page;
- struct raid5_percpu __percpu *allcpus;
- void *scribble;
- int err;
+ int err = 0;
- allcpus = alloc_percpu(struct raid5_percpu);
- if (!allcpus)
+ conf->percpu = alloc_percpu(struct raid5_percpu);
+ if (!conf->percpu)
return -ENOMEM;
- conf->percpu = allcpus;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ conf->cpu_notify.notifier_call = raid456_cpu_notify;
+ conf->cpu_notify.priority = 0;
+ err = register_cpu_notifier(&conf->cpu_notify);
+ if (err)
+ return err;
+#endif
get_online_cpus();
- err = 0;
for_each_present_cpu(cpu) {
- if (conf->level == 6) {
- spare_page = alloc_page(GFP_KERNEL);
- if (!spare_page) {
- err = -ENOMEM;
- break;
- }
- per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
- }
- scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
- if (!scribble) {
- err = -ENOMEM;
+ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ if (err) {
+ pr_err("%s: failed memory allocation for cpu%ld\n",
+ __func__, cpu);
break;
}
- per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
}
-#ifdef CONFIG_HOTPLUG_CPU
- conf->cpu_notify.notifier_call = raid456_cpu_notify;
- conf->cpu_notify.priority = 0;
- if (err == 0)
- err = register_cpu_notifier(&conf->cpu_notify);
-#endif
put_online_cpus();
return err;
@@ -6100,6 +6101,7 @@ static int run(struct mddev *mddev)
blk_queue_io_min(mddev->queue, chunk_size);
blk_queue_io_opt(mddev->queue, chunk_size *
(conf->raid_disks - conf->max_degraded));
+ mddev->queue->limits.raid_partial_stripes_expensive = 1;
/*
* We can only discard a whole stripe. It doesn't make sense to
* discard data disk but write parity disk
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 8270388e2a0d..1d0758aeb8e4 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -172,6 +172,9 @@ comment "Media ancillary drivers (tuners, sensors, i2c, frontends)"
config MEDIA_SUBDRV_AUTOSELECT
bool "Autoselect ancillary drivers (tuners, sensors, i2c, frontends)"
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_CAMERA_SUPPORT
+ depends on HAS_IOMEM
+ select I2C
+ select I2C_MUX
default y
help
By default, a media driver auto-selects all possible ancillary
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 419a2d6b4349..f19a2ccd1e4b 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -239,6 +239,7 @@
#define USB_PID_AVERMEDIA_A835B_4835 0x4835
#define USB_PID_AVERMEDIA_1867 0x1867
#define USB_PID_AVERMEDIA_A867 0xa867
+#define USB_PID_AVERMEDIA_H335 0x0335
#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
@@ -317,6 +318,7 @@
#define USB_PID_WINFAST_DTV_DONGLE_H 0x60f6
#define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01
#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029
+#define USB_PID_WINFAST_DTV_DONGLE_MINID 0x6f0f
#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200
#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201
#define USB_PID_GENPIX_8PSK_REV_2 0x0202
@@ -365,6 +367,7 @@
#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
+#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003
#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004
#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500
#define USB_PID_CPYTO_REDI_PC50A 0xa803
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index f91c80c0e9ec..8a86b3025637 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -179,7 +179,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
eth = eth_hdr(skb);
if (*eth->h_dest & 1) {
- if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
+ if(ether_addr_equal(eth->h_dest,dev->broadcast))
skb->pkt_type=PACKET_BROADCAST;
else
skb->pkt_type=PACKET_MULTICAST;
@@ -674,11 +674,13 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
if (priv->rx_mode != RX_MODE_PROMISC) {
if (priv->ule_skb->data[0] & 0x01) {
/* multicast or broadcast */
- if (memcmp(priv->ule_skb->data, bc_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(priv->ule_skb->data, bc_addr)) {
/* multicast */
if (priv->rx_mode == RX_MODE_MULTI) {
int i;
- for(i = 0; i < priv->multi_num && memcmp(priv->ule_skb->data, priv->multi_macs[i], ETH_ALEN); i++)
+ for(i = 0; i < priv->multi_num &&
+ !ether_addr_equal(priv->ule_skb->data,
+ priv->multi_macs[i]); i++)
;
if (i == priv->multi_num)
drop = 1;
@@ -688,7 +690,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
}
/* else: broadcast */
}
- else if (memcmp(priv->ule_skb->data, dev->dev_addr, ETH_ALEN))
+ else if (!ether_addr_equal(priv->ule_skb->data, dev->dev_addr))
drop = 1;
/* else: destination address matches the MAC address of our receiver device */
}
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index bddbab43a2df..dd12a1ebda82 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -35,6 +35,13 @@ config DVB_STV6110x
help
A Silicon tuner that supports DVB-S and DVB-S2 modes
+config DVB_M88DS3103
+ tristate "Montage M88DS3103"
+ depends on DVB_CORE && I2C && I2C_MUX
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
+
comment "Multistandard (cable + terrestrial) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index f9cb43d9aed9..0c75a6aafb9d 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -85,6 +85,7 @@ obj-$(CONFIG_DVB_STV6110) += stv6110.o
obj-$(CONFIG_DVB_STV0900) += stv0900.o
obj-$(CONFIG_DVB_STV090x) += stv090x.o
obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
+obj-$(CONFIG_DVB_M88DS3103) += m88ds3103.o
obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
obj-$(CONFIG_DVB_HD29L2) += hd29l2.o
diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c
index 74fbb5d58bed..780da58132f1 100644
--- a/drivers/media/dvb-frontends/a8293.c
+++ b/drivers/media/dvb-frontends/a8293.c
@@ -96,6 +96,8 @@ static int a8293_set_voltage(struct dvb_frontend *fe,
if (ret)
goto err;
+ usleep_range(1500, 50000);
+
return ret;
err:
dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index 476b422ccf19..a6c3c9e2e897 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -135,15 +135,33 @@
enum cmds {
- CMD_SET_VCO = 0x10,
- CMD_TUNEREQUEST = 0x11,
- CMD_MPEGCONFIG = 0x13,
- CMD_TUNERINIT = 0x14,
- CMD_LNBSEND = 0x21, /* Formerly CMD_SEND_DISEQC */
- CMD_LNBDCLEVEL = 0x22,
- CMD_SET_TONE = 0x23,
- CMD_UPDFWVERS = 0x35,
- CMD_TUNERSLEEP = 0x36,
+ CMD_SET_VCOFREQ = 0x10,
+ CMD_TUNEREQUEST = 0x11,
+ CMD_GLOBAL_MPEGCFG = 0x13,
+ CMD_MPEGCFG = 0x14,
+ CMD_TUNERINIT = 0x15,
+ CMD_GET_SRATE = 0x18,
+ CMD_SET_GOLDCODE = 0x19,
+ CMD_GET_AGCACC = 0x1a,
+ CMD_DEMODINIT = 0x1b,
+ CMD_GETCTLACC = 0x1c,
+
+ CMD_LNBCONFIG = 0x20,
+ CMD_LNBSEND = 0x21,
+ CMD_LNBDCLEVEL = 0x22,
+ CMD_LNBPCBCONFIG = 0x23,
+ CMD_LNBSENDTONEBST = 0x24,
+ CMD_LNBUPDREPLY = 0x25,
+
+ CMD_SET_GPIOMODE = 0x30,
+ CMD_SET_GPIOEN = 0x31,
+ CMD_SET_GPIODIR = 0x32,
+ CMD_SET_GPIOOUT = 0x33,
+ CMD_ENABLERSCORR = 0x34,
+ CMD_FWVERSION = 0x35,
+ CMD_SET_SLEEPMODE = 0x36,
+ CMD_BERCTRL = 0x3c,
+ CMD_EVENTCTRL = 0x3d,
};
static LIST_HEAD(hybrid_tuner_instance_list);
@@ -619,8 +637,8 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
cx24117_writereg(state, 0xf7, 0x0c);
cx24117_writereg(state, 0xe0, 0x00);
- /* CMD 1B */
- cmd.args[0] = 0x1b;
+ /* Init demodulator */
+ cmd.args[0] = CMD_DEMODINIT;
cmd.args[1] = 0x00;
cmd.args[2] = 0x01;
cmd.args[3] = 0x00;
@@ -629,8 +647,8 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
if (ret != 0)
goto error;
- /* CMD 10 */
- cmd.args[0] = CMD_SET_VCO;
+ /* Set VCO frequency */
+ cmd.args[0] = CMD_SET_VCOFREQ;
cmd.args[1] = 0x06;
cmd.args[2] = 0x2b;
cmd.args[3] = 0xd8;
@@ -648,8 +666,8 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
if (ret != 0)
goto error;
- /* CMD 15 */
- cmd.args[0] = 0x15;
+ /* Tuner init */
+ cmd.args[0] = CMD_TUNERINIT;
cmd.args[1] = 0x00;
cmd.args[2] = 0x01;
cmd.args[3] = 0x00;
@@ -667,8 +685,8 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
if (ret != 0)
goto error;
- /* CMD 13 */
- cmd.args[0] = CMD_MPEGCONFIG;
+ /* Global MPEG config */
+ cmd.args[0] = CMD_GLOBAL_MPEGCFG;
cmd.args[1] = 0x00;
cmd.args[2] = 0x00;
cmd.args[3] = 0x00;
@@ -679,9 +697,9 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
if (ret != 0)
goto error;
- /* CMD 14 */
+ /* MPEG config for each demod */
for (i = 0; i < 2; i++) {
- cmd.args[0] = CMD_TUNERINIT;
+ cmd.args[0] = CMD_MPEGCFG;
cmd.args[1] = (u8) i;
cmd.args[2] = 0x00;
cmd.args[3] = 0x05;
@@ -699,8 +717,8 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
cx24117_writereg(state, 0xcf, 0x00);
cx24117_writereg(state, 0xe5, 0x04);
- /* Firmware CMD 35: Get firmware version */
- cmd.args[0] = CMD_UPDFWVERS;
+ /* Get firmware version */
+ cmd.args[0] = CMD_FWVERSION;
cmd.len = 2;
for (i = 0; i < 4; i++) {
cmd.args[1] = i;
@@ -779,8 +797,8 @@ static int cx24117_read_signal_strength(struct dvb_frontend *fe,
u8 reg = (state->demod == 0) ?
CX24117_REG_SSTATUS0 : CX24117_REG_SSTATUS1;
- /* Firmware CMD 1A */
- cmd.args[0] = 0x1a;
+ /* Read AGC accumulator register */
+ cmd.args[0] = CMD_GET_AGCACC;
cmd.args[1] = (u8) state->demod;
cmd.len = 2;
ret = cx24117_cmd_execute(fe, &cmd);
@@ -899,22 +917,15 @@ static int cx24117_set_voltage(struct dvb_frontend *fe,
voltage == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" :
"SEC_VOLTAGE_OFF");
- /* CMD 32 */
- cmd.args[0] = 0x32;
- cmd.args[1] = reg;
- cmd.args[2] = reg;
+ /* Prepare a set GPIO logic level CMD */
+ cmd.args[0] = CMD_SET_GPIOOUT;
+ cmd.args[2] = reg; /* mask */
cmd.len = 3;
- ret = cx24117_cmd_execute(fe, &cmd);
- if (ret)
- return ret;
if ((voltage == SEC_VOLTAGE_13) ||
(voltage == SEC_VOLTAGE_18)) {
- /* CMD 33 */
- cmd.args[0] = 0x33;
+ /* power on LNB */
cmd.args[1] = reg;
- cmd.args[2] = reg;
- cmd.len = 3;
ret = cx24117_cmd_execute(fe, &cmd);
if (ret != 0)
return ret;
@@ -926,22 +937,22 @@ static int cx24117_set_voltage(struct dvb_frontend *fe,
/* Wait for voltage/min repeat delay */
msleep(100);
- /* CMD 22 - CMD_LNBDCLEVEL */
+ /* Set 13V/18V select pin */
cmd.args[0] = CMD_LNBDCLEVEL;
cmd.args[1] = state->demod ? 0 : 1;
cmd.args[2] = (voltage == SEC_VOLTAGE_18 ? 0x01 : 0x00);
cmd.len = 3;
+ ret = cx24117_cmd_execute(fe, &cmd);
/* Min delay time before DiSEqC send */
msleep(20);
} else {
- cmd.args[0] = 0x33;
+ /* power off LNB */
cmd.args[1] = 0x00;
- cmd.args[2] = reg;
- cmd.len = 3;
+ ret = cx24117_cmd_execute(fe, &cmd);
}
- return cx24117_cmd_execute(fe, &cmd);
+ return ret;
}
static int cx24117_set_tone(struct dvb_frontend *fe,
@@ -968,8 +979,7 @@ static int cx24117_set_tone(struct dvb_frontend *fe,
msleep(20);
/* Set the tone */
- /* CMD 23 - CMD_SET_TONE */
- cmd.args[0] = CMD_SET_TONE;
+ cmd.args[0] = CMD_LNBPCBCONFIG;
cmd.args[1] = (state->demod ? 0 : 1);
cmd.args[2] = 0x00;
cmd.args[3] = 0x00;
@@ -1166,7 +1176,7 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
switch (demod) {
case 0:
- dev_err(&state->priv->i2c->dev,
+ dev_err(&i2c->dev,
"%s: Error attaching frontend %d\n",
KBUILD_MODNAME, demod);
goto error1;
@@ -1190,12 +1200,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->demod = demod - 1;
state->priv = priv;
- /* test i2c bus for ack */
- if (demod == 0) {
- if (cx24117_readreg(state, 0x00) < 0)
- goto error3;
- }
-
dev_info(&state->priv->i2c->dev,
"%s: Attaching frontend %d\n",
KBUILD_MODNAME, state->demod);
@@ -1206,8 +1210,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->frontend.demodulator_priv = state;
return &state->frontend;
-error3:
- kfree(state);
error2:
cx24117_release_priv(priv);
error1:
@@ -1231,8 +1233,8 @@ static int cx24117_initfe(struct dvb_frontend *fe)
mutex_lock(&state->priv->fe_lock);
- /* Firmware CMD 36: Power config */
- cmd.args[0] = CMD_TUNERSLEEP;
+ /* Set sleep mode off */
+ cmd.args[0] = CMD_SET_SLEEPMODE;
cmd.args[1] = (state->demod ? 1 : 0);
cmd.args[2] = 0;
cmd.len = 3;
@@ -1244,8 +1246,8 @@ static int cx24117_initfe(struct dvb_frontend *fe)
if (ret != 0)
goto exit;
- /* CMD 3C */
- cmd.args[0] = 0x3c;
+ /* Set BER control */
+ cmd.args[0] = CMD_BERCTRL;
cmd.args[1] = (state->demod ? 1 : 0);
cmd.args[2] = 0x10;
cmd.args[3] = 0x10;
@@ -1254,12 +1256,22 @@ static int cx24117_initfe(struct dvb_frontend *fe)
if (ret != 0)
goto exit;
- /* CMD 34 */
- cmd.args[0] = 0x34;
+ /* Set RS correction (enable/disable) */
+ cmd.args[0] = CMD_ENABLERSCORR;
cmd.args[1] = (state->demod ? 1 : 0);
cmd.args[2] = CX24117_OCC;
cmd.len = 3;
ret = cx24117_cmd_execute_nolock(fe, &cmd);
+ if (ret != 0)
+ goto exit;
+
+ /* Set GPIO direction */
+ /* Set as output - controls LNB power on/off */
+ cmd.args[0] = CMD_SET_GPIODIR;
+ cmd.args[1] = 0x30;
+ cmd.args[2] = 0x30;
+ cmd.len = 3;
+ ret = cx24117_cmd_execute_nolock(fe, &cmd);
exit:
mutex_unlock(&state->priv->fe_lock);
@@ -1278,8 +1290,8 @@ static int cx24117_sleep(struct dvb_frontend *fe)
dev_dbg(&state->priv->i2c->dev, "%s() demod%d\n",
__func__, state->demod);
- /* Firmware CMD 36: Power config */
- cmd.args[0] = CMD_TUNERSLEEP;
+ /* Set sleep mode on */
+ cmd.args[0] = CMD_SET_SLEEPMODE;
cmd.args[1] = (state->demod ? 1 : 0);
cmd.args[2] = 1;
cmd.len = 3;
@@ -1558,7 +1570,8 @@ static int cx24117_get_frontend(struct dvb_frontend *fe)
u8 buf[0x1f-4];
- cmd.args[0] = 0x1c;
+ /* Read current tune parameters */
+ cmd.args[0] = CMD_GETCTLACC;
cmd.args[1] = (u8) state->demod;
cmd.len = 2;
ret = cx24117_cmd_execute(fe, &cmd);
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 6dbbee453ee1..1632d78a5479 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
+#include <asm/div64.h>
#include "dvb_math.h"
@@ -118,6 +119,12 @@ struct dib8000_state {
u8 longest_intlv_layer;
u16 output_mode;
+ /* for DVBv5 stats */
+ s64 init_ucb;
+ unsigned long per_jiffies_stats;
+ unsigned long ber_jiffies_stats;
+ unsigned long ber_jiffies_stats_layer[3];
+
#ifdef DIB8000_AGC_FREEZE
u16 agc1_max;
u16 agc1_min;
@@ -157,15 +164,10 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
return ret;
}
-static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
+static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
{
u16 ret;
- if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
- return 0;
- }
-
state->i2c_write_buffer[0] = reg >> 8;
state->i2c_write_buffer[1] = reg & 0xff;
@@ -183,6 +185,21 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
dprintk("i2c read error on %d", reg);
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+
+ return ret;
+}
+
+static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
+{
+ u16 ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
+ ret = __dib8000_read_word(state, reg);
+
mutex_unlock(&state->i2c_buffer_lock);
return ret;
@@ -192,8 +209,15 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
{
u16 rw[2];
- rw[0] = dib8000_read_word(state, reg + 0);
- rw[1] = dib8000_read_word(state, reg + 1);
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
+ rw[0] = __dib8000_read_word(state, reg + 0);
+ rw[1] = __dib8000_read_word(state, reg + 1);
+
+ mutex_unlock(&state->i2c_buffer_lock);
return ((rw[0] << 16) | (rw[1]));
}
@@ -787,7 +811,7 @@ int dib8000_update_pll(struct dvb_frontend *fe,
dprintk("PLL: Update ratio (prediv: %d, ratio: %d)", state->cfg.pll->pll_prediv, ratio);
dib8000_write_word(state, 901, (state->cfg.pll->pll_prediv << 8) | (ratio << 0)); /* only the PLL ratio is updated. */
}
-}
+ }
return 0;
}
@@ -966,6 +990,45 @@ static u16 dib8000_identify(struct i2c_device *client)
return value;
}
+static int dib8000_read_unc_blocks(struct dvb_frontend *fe, u32 *unc);
+
+static void dib8000_reset_stats(struct dvb_frontend *fe)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache;
+ u32 ucb;
+
+ memset(&c->strength, 0, sizeof(c->strength));
+ memset(&c->cnr, 0, sizeof(c->cnr));
+ memset(&c->post_bit_error, 0, sizeof(c->post_bit_error));
+ memset(&c->post_bit_count, 0, sizeof(c->post_bit_count));
+ memset(&c->block_error, 0, sizeof(c->block_error));
+
+ c->strength.len = 1;
+ c->cnr.len = 1;
+ c->block_error.len = 1;
+ c->block_count.len = 1;
+ c->post_bit_error.len = 1;
+ c->post_bit_count.len = 1;
+
+ c->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ c->strength.stat[0].uvalue = 0;
+
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ dib8000_read_unc_blocks(fe, &ucb);
+
+ state->init_ucb = -ucb;
+ state->ber_jiffies_stats = 0;
+ state->per_jiffies_stats = 0;
+ memset(&state->ber_jiffies_stats_layer, 0,
+ sizeof(state->ber_jiffies_stats_layer));
+}
+
static int dib8000_reset(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
@@ -1071,6 +1134,8 @@ static int dib8000_reset(struct dvb_frontend *fe)
dib8000_set_power_mode(state, DIB8000_POWER_INTERFACE_ONLY);
+ dib8000_reset_stats(fe);
+
return 0;
}
@@ -2445,7 +2510,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
if (state->revision == 0x8090)
internal = dib8000_read32(state, 23) / 1000;
- if (state->autosearch_state == AS_SEARCHING_FFT) {
+ if ((state->revision >= 0x8002) &&
+ (state->autosearch_state == AS_SEARCHING_FFT)) {
dib8000_write_word(state, 37, 0x0065); /* P_ctrl_pha_off_max default values */
dib8000_write_word(state, 116, 0x0000); /* P_ana_gain to 0 */
@@ -2481,7 +2547,8 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (1 << 13)); /* P_restart_ccg = 1 */
dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (0 << 13)); /* P_restart_ccg = 0 */
dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x7ff) | (0 << 15) | (1 << 13)); /* P_restart_search = 0; */
- } else if (state->autosearch_state == AS_SEARCHING_GUARD) {
+ } else if ((state->revision >= 0x8002) &&
+ (state->autosearch_state == AS_SEARCHING_GUARD)) {
c->transmission_mode = TRANSMISSION_MODE_8K;
c->guard_interval = GUARD_INTERVAL_1_8;
c->inversion = 0;
@@ -2583,7 +2650,8 @@ static int dib8000_autosearch_irq(struct dvb_frontend *fe)
struct dib8000_state *state = fe->demodulator_priv;
u16 irq_pending = dib8000_read_word(state, 1284);
- if (state->autosearch_state == AS_SEARCHING_FFT) {
+ if ((state->revision >= 0x8002) &&
+ (state->autosearch_state == AS_SEARCHING_FFT)) {
if (irq_pending & 0x1) {
dprintk("dib8000_autosearch_irq: max correlation result available");
return 3;
@@ -2853,6 +2921,91 @@ static int dib8090p_init_sdram(struct dib8000_state *state)
return 0;
}
+/**
+ * is_manual_mode - Check if TMCC should be used for parameters settings
+ * @c: struct dvb_frontend_properties
+ *
+ * By default, TMCC table should be used for parameter settings on most
+ * usercases. However, sometimes it is desirable to lock the demod to
+ * use the manual parameters.
+ *
+ * On manual mode, the current dib8000_tune state machine is very restrict:
+ * It requires that both per-layer and per-transponder parameters to be
+ * properly specified, otherwise the device won't lock.
+ *
+ * Check if all those conditions are properly satisfied before allowing
+ * the device to use the manual frequency lock mode.
+ */
+static int is_manual_mode(struct dtv_frontend_properties *c)
+{
+ int i, n_segs = 0;
+
+ /* Use auto mode on DVB-T compat mode */
+ if (c->delivery_system != SYS_ISDBT)
+ return 0;
+
+ /*
+ * Transmission mode is only detected on auto mode, currently
+ */
+ if (c->transmission_mode == TRANSMISSION_MODE_AUTO) {
+ dprintk("transmission mode auto");
+ return 0;
+ }
+
+ /*
+ * Guard interval is only detected on auto mode, currently
+ */
+ if (c->guard_interval == GUARD_INTERVAL_AUTO) {
+ dprintk("guard interval auto");
+ return 0;
+ }
+
+ /*
+ * If no layer is enabled, assume auto mode, as at least one
+ * layer should be enabled
+ */
+ if (!c->isdbt_layer_enabled) {
+ dprintk("no layer modulation specified");
+ return 0;
+ }
+
+ /*
+ * Check if the per-layer parameters aren't auto and
+ * disable a layer if segment count is 0 or invalid.
+ */
+ for (i = 0; i < 3; i++) {
+ if (!(c->isdbt_layer_enabled & 1 << i))
+ continue;
+
+ if ((c->layer[i].segment_count > 13) ||
+ (c->layer[i].segment_count == 0)) {
+ c->isdbt_layer_enabled &= ~(1 << i);
+ continue;
+ }
+
+ n_segs += c->layer[i].segment_count;
+
+ if ((c->layer[i].modulation == QAM_AUTO) ||
+ (c->layer[i].fec == FEC_AUTO)) {
+ dprintk("layer %c has either modulation or FEC auto",
+ 'A' + i);
+ return 0;
+ }
+ }
+
+ /*
+ * Userspace specified a wrong number of segments.
+ * fallback to auto mode.
+ */
+ if (n_segs == 0 || n_segs > 13) {
+ dprintk("number of segments is invalid");
+ return 0;
+ }
+
+ /* Everything looks ok for manual mode */
+ return 1;
+}
+
static int dib8000_tune(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
@@ -2878,40 +3031,19 @@ static int dib8000_tune(struct dvb_frontend *fe)
switch (*tune_state) {
case CT_DEMOD_START: /* 30 */
+ dib8000_reset_stats(fe);
+
if (state->revision == 0x8090)
dib8090p_init_sdram(state);
state->status = FE_STATUS_TUNE_PENDING;
- if ((c->delivery_system != SYS_ISDBT) ||
- (c->inversion == INVERSION_AUTO) ||
- (c->transmission_mode == TRANSMISSION_MODE_AUTO) ||
- (c->guard_interval == GUARD_INTERVAL_AUTO) ||
- (((c->isdbt_layer_enabled & (1 << 0)) != 0) &&
- (c->layer[0].segment_count != 0xff) &&
- (c->layer[0].segment_count != 0) &&
- ((c->layer[0].modulation == QAM_AUTO) ||
- (c->layer[0].fec == FEC_AUTO))) ||
- (((c->isdbt_layer_enabled & (1 << 1)) != 0) &&
- (c->layer[1].segment_count != 0xff) &&
- (c->layer[1].segment_count != 0) &&
- ((c->layer[1].modulation == QAM_AUTO) ||
- (c->layer[1].fec == FEC_AUTO))) ||
- (((c->isdbt_layer_enabled & (1 << 2)) != 0) &&
- (c->layer[2].segment_count != 0xff) &&
- (c->layer[2].segment_count != 0) &&
- ((c->layer[2].modulation == QAM_AUTO) ||
- (c->layer[2].fec == FEC_AUTO))) ||
- (((c->layer[0].segment_count == 0) ||
- ((c->isdbt_layer_enabled & (1 << 0)) == 0)) &&
- ((c->layer[1].segment_count == 0) ||
- ((c->isdbt_layer_enabled & (2 << 0)) == 0)) &&
- ((c->layer[2].segment_count == 0) || ((c->isdbt_layer_enabled & (3 << 0)) == 0))))
- state->channel_parameters_set = 0; /* auto search */
- else
- state->channel_parameters_set = 1; /* channel parameters are known */
+ state->channel_parameters_set = is_manual_mode(c);
+
+ dprintk("Tuning channel on %s search mode",
+ state->channel_parameters_set ? "manual" : "auto");
dib8000_viterbi_state(state, 0); /* force chan dec in restart */
- /* Layer monit */
+ /* Layer monitor */
dib8000_write_word(state, 285, dib8000_read_word(state, 285) & 0x60);
dib8000_set_frequency_offset(state);
@@ -3256,15 +3388,27 @@ static int dib8000_sleep(struct dvb_frontend *fe)
return dib8000_set_adc_state(state, DIBX000_SLOW_ADC_OFF) | dib8000_set_adc_state(state, DIBX000_ADC_OFF);
}
+static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat);
+
static int dib8000_get_frontend(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
u16 i, val = 0;
- fe_status_t stat;
+ fe_status_t stat = 0;
u8 index_frontend, sub_index_frontend;
fe->dtv_property_cache.bandwidth_hz = 6000000;
+ /*
+ * If called to early, get_frontend makes dib8000_tune to either
+ * not lock or not sync. This causes dvbv5-scan/dvbv5-zap to fail.
+ * So, let's just return if frontend 0 has not locked.
+ */
+ dib8000_read_status(fe, &stat);
+ if (!(stat & FE_HAS_SYNC))
+ return 0;
+
+ dprintk("TMCC lock");
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
if (stat&FE_HAS_SYNC) {
@@ -3335,9 +3479,13 @@ static int dib8000_get_frontend(struct dvb_frontend *fe)
fe->dtv_property_cache.layer[i].segment_count = val & 0x0F;
dprintk("dib8000_get_frontend : Layer %d segments = %d ", i, fe->dtv_property_cache.layer[i].segment_count);
- val = dib8000_read_word(state, 499 + i);
- fe->dtv_property_cache.layer[i].interleaving = val & 0x3;
- dprintk("dib8000_get_frontend : Layer %d time_intlv = %d ", i, fe->dtv_property_cache.layer[i].interleaving);
+ val = dib8000_read_word(state, 499 + i) & 0x3;
+ /* Interleaving can be 0, 1, 2 or 4 */
+ if (val == 3)
+ val = 4;
+ fe->dtv_property_cache.layer[i].interleaving = val;
+ dprintk("dib8000_get_frontend : Layer %d time_intlv = %d ",
+ i, fe->dtv_property_cache.layer[i].interleaving);
val = dib8000_read_word(state, 481 + i);
switch (val & 0x7) {
@@ -3556,6 +3704,8 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
return 0;
}
+static int dib8000_get_stats(struct dvb_frontend *fe, fe_status_t stat);
+
static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
{
struct dib8000_state *state = fe->demodulator_priv;
@@ -3593,6 +3743,7 @@ static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
if (lock & 0x01)
*stat |= FE_HAS_VITERBI;
}
+ dib8000_get_stats(fe, *stat);
return 0;
}
@@ -3699,6 +3850,357 @@ static int dib8000_read_snr(struct dvb_frontend *fe, u16 * snr)
return 0;
}
+struct per_layer_regs {
+ u16 lock, ber, per;
+};
+
+static const struct per_layer_regs per_layer_regs[] = {
+ { 554, 560, 562 },
+ { 555, 576, 578 },
+ { 556, 581, 583 },
+};
+
+struct linear_segments {
+ unsigned x;
+ signed y;
+};
+
+/*
+ * Table to estimate signal strength in dBm.
+ * This table was empirically determinated by measuring the signal
+ * strength generated by a DTA-2111 RF generator directly connected into
+ * a dib8076 device (a PixelView PV-D231U stick), using a good quality
+ * 3 meters RC6 cable and good RC6 connectors.
+ * The real value can actually be different on other devices, depending
+ * on several factors, like if LNA is enabled or not, if diversity is
+ * enabled, type of connectors, etc.
+ * Yet, it is better to use this measure in dB than a random non-linear
+ * percentage value, especially for antenna adjustments.
+ * On my tests, the precision of the measure using this table is about
+ * 0.5 dB, with sounds reasonable enough.
+ */
+static struct linear_segments strength_to_db_table[] = {
+ { 55953, 108500 }, /* -22.5 dBm */
+ { 55394, 108000 },
+ { 53834, 107000 },
+ { 52863, 106000 },
+ { 52239, 105000 },
+ { 52012, 104000 },
+ { 51803, 103000 },
+ { 51566, 102000 },
+ { 51356, 101000 },
+ { 51112, 100000 },
+ { 50869, 99000 },
+ { 50600, 98000 },
+ { 50363, 97000 },
+ { 50117, 96000 }, /* -35 dBm */
+ { 49889, 95000 },
+ { 49680, 94000 },
+ { 49493, 93000 },
+ { 49302, 92000 },
+ { 48929, 91000 },
+ { 48416, 90000 },
+ { 48035, 89000 },
+ { 47593, 88000 },
+ { 47282, 87000 },
+ { 46953, 86000 },
+ { 46698, 85000 },
+ { 45617, 84000 },
+ { 44773, 83000 },
+ { 43845, 82000 },
+ { 43020, 81000 },
+ { 42010, 80000 }, /* -51 dBm */
+ { 0, 0 },
+};
+
+static u32 interpolate_value(u32 value, struct linear_segments *segments,
+ unsigned len)
+{
+ u64 tmp64;
+ u32 dx;
+ s32 dy;
+ int i, ret;
+
+ if (value >= segments[0].x)
+ return segments[0].y;
+ if (value < segments[len-1].x)
+ return segments[len-1].y;
+
+ for (i = 1; i < len - 1; i++) {
+ /* If value is identical, no need to interpolate */
+ if (value == segments[i].x)
+ return segments[i].y;
+ if (value > segments[i].x)
+ break;
+ }
+
+ /* Linear interpolation between the two (x,y) points */
+ dy = segments[i - 1].y - segments[i].y;
+ dx = segments[i - 1].x - segments[i].x;
+
+ tmp64 = value - segments[i].x;
+ tmp64 *= dy;
+ do_div(tmp64, dx);
+ ret = segments[i].y + tmp64;
+
+ return ret;
+}
+
+static u32 dib8000_get_time_us(struct dvb_frontend *fe, int layer)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache;
+ int ini_layer, end_layer, i;
+ u64 time_us, tmp64;
+ u32 tmp, denom;
+ int guard, rate_num, rate_denum = 1, bits_per_symbol, nsegs;
+ int interleaving = 0, fft_div;
+
+ if (layer >= 0) {
+ ini_layer = layer;
+ end_layer = layer + 1;
+ } else {
+ ini_layer = 0;
+ end_layer = 3;
+ }
+
+ switch (c->guard_interval) {
+ case GUARD_INTERVAL_1_4:
+ guard = 4;
+ break;
+ case GUARD_INTERVAL_1_8:
+ guard = 8;
+ break;
+ case GUARD_INTERVAL_1_16:
+ guard = 16;
+ break;
+ default:
+ case GUARD_INTERVAL_1_32:
+ guard = 32;
+ break;
+ }
+
+ switch (c->transmission_mode) {
+ case TRANSMISSION_MODE_2K:
+ fft_div = 4;
+ break;
+ case TRANSMISSION_MODE_4K:
+ fft_div = 2;
+ break;
+ default:
+ case TRANSMISSION_MODE_8K:
+ fft_div = 1;
+ break;
+ }
+
+ denom = 0;
+ for (i = ini_layer; i < end_layer; i++) {
+ nsegs = c->layer[i].segment_count;
+ if (nsegs == 0 || nsegs > 13)
+ continue;
+
+ switch (c->layer[i].modulation) {
+ case DQPSK:
+ case QPSK:
+ bits_per_symbol = 2;
+ break;
+ case QAM_16:
+ bits_per_symbol = 4;
+ break;
+ default:
+ case QAM_64:
+ bits_per_symbol = 6;
+ break;
+ }
+
+ switch (c->layer[i].fec) {
+ case FEC_1_2:
+ rate_num = 1;
+ rate_denum = 2;
+ break;
+ case FEC_2_3:
+ rate_num = 2;
+ rate_denum = 3;
+ break;
+ case FEC_3_4:
+ rate_num = 3;
+ rate_denum = 4;
+ break;
+ case FEC_5_6:
+ rate_num = 5;
+ rate_denum = 6;
+ break;
+ default:
+ case FEC_7_8:
+ rate_num = 7;
+ rate_denum = 8;
+ break;
+ }
+
+ interleaving = c->layer[i].interleaving;
+
+ denom += bits_per_symbol * rate_num * fft_div * nsegs * 384;
+ }
+
+ /* If all goes wrong, wait for 1s for the next stats */
+ if (!denom)
+ return 0;
+
+ /* Estimate the period for the total bit rate */
+ time_us = rate_denum * (1008 * 1562500L);
+ tmp64 = time_us;
+ do_div(tmp64, guard);
+ time_us = time_us + tmp64;
+ time_us += denom / 2;
+ do_div(time_us, denom);
+
+ tmp = 1008 * 96 * interleaving;
+ time_us += tmp + tmp / guard;
+
+ return time_us;
+}
+
+static int dib8000_get_stats(struct dvb_frontend *fe, fe_status_t stat)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache;
+ int i;
+ int show_per_stats = 0;
+ u32 time_us = 0, snr, val;
+ u64 blocks;
+ s32 db;
+ u16 strength;
+
+ /* Get Signal strength */
+ dib8000_read_signal_strength(fe, &strength);
+ val = strength;
+ db = interpolate_value(val,
+ strength_to_db_table,
+ ARRAY_SIZE(strength_to_db_table)) - 131000;
+ c->strength.stat[0].svalue = db;
+
+ /* UCB/BER/CNR measures require lock */
+ if (!(stat & FE_HAS_LOCK)) {
+ c->cnr.len = 1;
+ c->block_count.len = 1;
+ c->block_error.len = 1;
+ c->post_bit_error.len = 1;
+ c->post_bit_count.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return 0;
+ }
+
+ /* Check if time for stats was elapsed */
+ if (time_after(jiffies, state->per_jiffies_stats)) {
+ state->per_jiffies_stats = jiffies + msecs_to_jiffies(1000);
+
+ /* Get SNR */
+ snr = dib8000_get_snr(fe);
+ for (i = 1; i < MAX_NUMBER_OF_FRONTENDS; i++) {
+ if (state->fe[i])
+ snr += dib8000_get_snr(state->fe[i]);
+ }
+ snr = snr >> 16;
+
+ if (snr) {
+ snr = 10 * intlog10(snr);
+ snr = (1000L * snr) >> 24;
+ } else {
+ snr = 0;
+ }
+ c->cnr.stat[0].svalue = snr;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+
+ /* Get UCB measures */
+ dib8000_read_unc_blocks(fe, &val);
+ if (val < state->init_ucb)
+ state->init_ucb += 0x100000000LL;
+
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue = val + state->init_ucb;
+
+ /* Estimate the number of packets based on bitrate */
+ if (!time_us)
+ time_us = dib8000_get_time_us(fe, -1);
+
+ if (time_us) {
+ blocks = 1250000ULL * 1000000ULL;
+ do_div(blocks, time_us * 8 * 204);
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[0].uvalue += blocks;
+ }
+
+ show_per_stats = 1;
+ }
+
+ /* Get post-BER measures */
+ if (time_after(jiffies, state->ber_jiffies_stats)) {
+ time_us = dib8000_get_time_us(fe, -1);
+ state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
+
+ dprintk("Next all layers stats available in %u us.", time_us);
+
+ dib8000_read_ber(fe, &val);
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue += val;
+
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue += 100000000;
+ }
+
+ if (state->revision < 0x8002)
+ return 0;
+
+ c->block_error.len = 4;
+ c->post_bit_error.len = 4;
+ c->post_bit_count.len = 4;
+
+ for (i = 0; i < 3; i++) {
+ unsigned nsegs = c->layer[i].segment_count;
+
+ if (nsegs == 0 || nsegs > 13)
+ continue;
+
+ time_us = 0;
+
+ if (time_after(jiffies, state->ber_jiffies_stats_layer[i])) {
+ time_us = dib8000_get_time_us(fe, i);
+
+ state->ber_jiffies_stats_layer[i] = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
+ dprintk("Next layer %c stats will be available in %u us\n",
+ 'A' + i, time_us);
+
+ val = dib8000_read_word(state, per_layer_regs[i].ber);
+ c->post_bit_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[1 + i].uvalue += val;
+
+ c->post_bit_count.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[1 + i].uvalue += 100000000;
+ }
+
+ if (show_per_stats) {
+ val = dib8000_read_word(state, per_layer_regs[i].per);
+
+ c->block_error.stat[1 + i].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[1 + i].uvalue += val;
+
+ if (!time_us)
+ time_us = dib8000_get_time_us(fe, i);
+ if (time_us) {
+ blocks = 1250000ULL * 1000000ULL;
+ do_div(blocks, time_us * 8 * 204);
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[0].uvalue += blocks;
+ }
+ }
+ }
+ return 0;
+}
+
int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave)
{
struct dib8000_state *state = fe->demodulator_priv;
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h
index f22eb9f13ad5..f6cb34660327 100644
--- a/drivers/media/dvb-frontends/drxk.h
+++ b/drivers/media/dvb-frontends/drxk.h
@@ -29,7 +29,6 @@
* A value of 0 (default) or lower indicates that
* the correct number of parameters will be
* automatically detected.
- * @load_firmware_sync: Force the firmware load to be synchronous.
*
* On the *_gpio vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
* UIO-3.
@@ -41,7 +40,6 @@ struct drxk_config {
bool parallel_ts;
bool dynamic_clk;
bool enable_merr_cfg;
- bool load_firmware_sync;
bool antenna_dvbt;
u16 antenna_gpio;
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index bf29a3f0e6f0..cce94a75b2e1 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6830,25 +6830,13 @@ struct dvb_frontend *drxk_attach(const struct drxk_config *config,
/* Load firmware and initialize DRX-K */
if (state->microcode_name) {
- if (config->load_firmware_sync) {
- const struct firmware *fw = NULL;
+ const struct firmware *fw = NULL;
- status = request_firmware(&fw, state->microcode_name,
- state->i2c->dev.parent);
- if (status < 0)
- fw = NULL;
- load_firmware_cb(fw, state);
- } else {
- status = request_firmware_nowait(THIS_MODULE, 1,
- state->microcode_name,
- state->i2c->dev.parent,
- GFP_KERNEL,
- state, load_firmware_cb);
- if (status < 0) {
- pr_err("failed to request a firmware\n");
- return NULL;
- }
- }
+ status = request_firmware(&fw, state->microcode_name,
+ state->i2c->dev.parent);
+ if (status < 0)
+ fw = NULL;
+ load_firmware_cb(fw, state);
} else if (init_drxk(state) < 0)
goto error;
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
new file mode 100644
index 000000000000..b8a7897e7bd8
--- /dev/null
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -0,0 +1,1311 @@
+/*
+ * Montage M88DS3103 demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "m88ds3103_priv.h"
+
+static struct dvb_frontend_ops m88ds3103_ops;
+
+/* write multiple registers */
+static int m88ds3103_wr_regs(struct m88ds3103_priv *priv,
+ u8 reg, const u8 *val, int len)
+{
+#define MAX_WR_LEN 32
+#define MAX_WR_XFER_LEN (MAX_WR_LEN + 1)
+ int ret;
+ u8 buf[MAX_WR_XFER_LEN];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+ .flags = 0,
+ .len = 1 + len,
+ .buf = buf,
+ }
+ };
+
+ if (WARN_ON(len > MAX_WR_LEN))
+ return -EINVAL;
+
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+ mutex_lock(&priv->i2c_mutex);
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ mutex_unlock(&priv->i2c_mutex);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* read multiple registers */
+static int m88ds3103_rd_regs(struct m88ds3103_priv *priv,
+ u8 reg, u8 *val, int len)
+{
+#define MAX_RD_LEN 3
+#define MAX_RD_XFER_LEN (MAX_RD_LEN)
+ int ret;
+ u8 buf[MAX_RD_XFER_LEN];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ }, {
+ .addr = priv->cfg->i2c_addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ }
+ };
+
+ if (WARN_ON(len > MAX_RD_LEN))
+ return -EINVAL;
+
+ mutex_lock(&priv->i2c_mutex);
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ mutex_unlock(&priv->i2c_mutex);
+ if (ret == 2) {
+ memcpy(val, buf, len);
+ ret = 0;
+ } else {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rd failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* write single register */
+static int m88ds3103_wr_reg(struct m88ds3103_priv *priv, u8 reg, u8 val)
+{
+ return m88ds3103_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register */
+static int m88ds3103_rd_reg(struct m88ds3103_priv *priv, u8 reg, u8 *val)
+{
+ return m88ds3103_rd_regs(priv, reg, val, 1);
+}
+
+/* write single register with mask */
+static int m88ds3103_wr_reg_mask(struct m88ds3103_priv *priv,
+ u8 reg, u8 val, u8 mask)
+{
+ int ret;
+ u8 u8tmp;
+
+ /* no need for read if whole reg is written */
+ if (mask != 0xff) {
+ ret = m88ds3103_rd_regs(priv, reg, &u8tmp, 1);
+ if (ret)
+ return ret;
+
+ val &= mask;
+ u8tmp &= ~mask;
+ val |= u8tmp;
+ }
+
+ return m88ds3103_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register with mask */
+static int m88ds3103_rd_reg_mask(struct m88ds3103_priv *priv,
+ u8 reg, u8 *val, u8 mask)
+{
+ int ret, i;
+ u8 u8tmp;
+
+ ret = m88ds3103_rd_regs(priv, reg, &u8tmp, 1);
+ if (ret)
+ return ret;
+
+ u8tmp &= mask;
+
+ /* find position of the first bit */
+ for (i = 0; i < 8; i++) {
+ if ((mask >> i) & 0x01)
+ break;
+ }
+ *val = u8tmp >> i;
+
+ return 0;
+}
+
+/* write reg val table using reg addr auto increment */
+static int m88ds3103_wr_reg_val_tab(struct m88ds3103_priv *priv,
+ const struct m88ds3103_reg_val *tab, int tab_len)
+{
+ int ret, i, j;
+ u8 buf[83];
+ dev_dbg(&priv->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
+
+ if (tab_len > 83) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0, j = 0; i < tab_len; i++, j++) {
+ buf[j] = tab[i].val;
+
+ if (i == tab_len - 1 || tab[i].reg != tab[i + 1].reg - 1 ||
+ !((j + 1) % (priv->cfg->i2c_wr_max - 1))) {
+ ret = m88ds3103_wr_regs(priv, tab[i].reg - j, buf, j + 1);
+ if (ret)
+ goto err;
+
+ j = -1;
+ }
+ }
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ u8 u8tmp;
+
+ *status = 0;
+
+ if (!priv->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ ret = m88ds3103_rd_reg_mask(priv, 0xd1, &u8tmp, 0x07);
+ if (ret)
+ goto err;
+
+ if (u8tmp == 0x07)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ break;
+ case SYS_DVBS2:
+ ret = m88ds3103_rd_reg_mask(priv, 0x0d, &u8tmp, 0x8f);
+ if (ret)
+ goto err;
+
+ if (u8tmp == 0x8f)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ priv->fe_status = *status;
+
+ dev_dbg(&priv->i2c->dev, "%s: lock=%02x status=%02x\n",
+ __func__, u8tmp, *status);
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_set_frontend(struct dvb_frontend *fe)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, len;
+ const struct m88ds3103_reg_val *init;
+ u8 u8tmp, u8tmp1, u8tmp2;
+ u8 buf[2];
+ u16 u16tmp, divide_ratio;
+ u32 tuner_frequency, target_mclk, ts_clk;
+ s32 s32tmp;
+ dev_dbg(&priv->i2c->dev,
+ "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
+ __func__, c->delivery_system,
+ c->modulation, c->frequency, c->symbol_rate,
+ c->inversion, c->pilot, c->rolloff);
+
+ if (!priv->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ /* program tuner */
+ if (fe->ops.tuner_ops.set_params) {
+ ret = fe->ops.tuner_ops.set_params(fe);
+ if (ret)
+ goto err;
+ }
+
+ if (fe->ops.tuner_ops.get_frequency) {
+ ret = fe->ops.tuner_ops.get_frequency(fe, &tuner_frequency);
+ if (ret)
+ goto err;
+ }
+
+ /* reset */
+ ret = m88ds3103_wr_reg(priv, 0x07, 0x80);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ len = ARRAY_SIZE(m88ds3103_dvbs_init_reg_vals);
+ init = m88ds3103_dvbs_init_reg_vals;
+ target_mclk = 96000;
+ break;
+ case SYS_DVBS2:
+ len = ARRAY_SIZE(m88ds3103_dvbs2_init_reg_vals);
+ init = m88ds3103_dvbs2_init_reg_vals;
+
+ switch (priv->cfg->ts_mode) {
+ case M88DS3103_TS_SERIAL:
+ case M88DS3103_TS_SERIAL_D7:
+ if (c->symbol_rate < 18000000)
+ target_mclk = 96000;
+ else
+ target_mclk = 144000;
+ break;
+ case M88DS3103_TS_PARALLEL:
+ case M88DS3103_TS_PARALLEL_12:
+ case M88DS3103_TS_PARALLEL_16:
+ case M88DS3103_TS_PARALLEL_19_2:
+ case M88DS3103_TS_CI:
+ if (c->symbol_rate < 18000000)
+ target_mclk = 96000;
+ else if (c->symbol_rate < 28000000)
+ target_mclk = 144000;
+ else
+ target_mclk = 192000;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* program init table */
+ if (c->delivery_system != priv->delivery_system) {
+ ret = m88ds3103_wr_reg_val_tab(priv, init, len);
+ if (ret)
+ goto err;
+ }
+
+ u8tmp1 = 0; /* silence compiler warning */
+ switch (priv->cfg->ts_mode) {
+ case M88DS3103_TS_SERIAL:
+ u8tmp1 = 0x00;
+ ts_clk = 0;
+ u8tmp = 0x46;
+ break;
+ case M88DS3103_TS_SERIAL_D7:
+ u8tmp1 = 0x20;
+ ts_clk = 0;
+ u8tmp = 0x46;
+ break;
+ case M88DS3103_TS_PARALLEL:
+ ts_clk = 24000;
+ u8tmp = 0x42;
+ break;
+ case M88DS3103_TS_PARALLEL_12:
+ ts_clk = 12000;
+ u8tmp = 0x42;
+ break;
+ case M88DS3103_TS_PARALLEL_16:
+ ts_clk = 16000;
+ u8tmp = 0x42;
+ break;
+ case M88DS3103_TS_PARALLEL_19_2:
+ ts_clk = 19200;
+ u8tmp = 0x42;
+ break;
+ case M88DS3103_TS_CI:
+ ts_clk = 6000;
+ u8tmp = 0x43;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* TS mode */
+ ret = m88ds3103_wr_reg(priv, 0xfd, u8tmp);
+ if (ret)
+ goto err;
+
+ switch (priv->cfg->ts_mode) {
+ case M88DS3103_TS_SERIAL:
+ case M88DS3103_TS_SERIAL_D7:
+ ret = m88ds3103_wr_reg_mask(priv, 0x29, u8tmp1, 0x20);
+ if (ret)
+ goto err;
+ }
+
+ if (ts_clk) {
+ divide_ratio = DIV_ROUND_UP(target_mclk, ts_clk);
+ u8tmp1 = divide_ratio / 2;
+ u8tmp2 = DIV_ROUND_UP(divide_ratio, 2);
+ } else {
+ divide_ratio = 0;
+ u8tmp1 = 0;
+ u8tmp2 = 0;
+ }
+
+ dev_dbg(&priv->i2c->dev,
+ "%s: target_mclk=%d ts_clk=%d divide_ratio=%d\n",
+ __func__, target_mclk, ts_clk, divide_ratio);
+
+ u8tmp1--;
+ u8tmp2--;
+ /* u8tmp1[5:2] => fe[3:0], u8tmp1[1:0] => ea[7:6] */
+ u8tmp1 &= 0x3f;
+ /* u8tmp2[5:0] => ea[5:0] */
+ u8tmp2 &= 0x3f;
+
+ ret = m88ds3103_rd_reg(priv, 0xfe, &u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp = ((u8tmp & 0xf0) << 0) | u8tmp1 >> 2;
+ ret = m88ds3103_wr_reg(priv, 0xfe, u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp = ((u8tmp1 & 0x03) << 6) | u8tmp2 >> 0;
+ ret = m88ds3103_wr_reg(priv, 0xea, u8tmp);
+ if (ret)
+ goto err;
+
+ switch (target_mclk) {
+ case 72000:
+ u8tmp1 = 0x00; /* 0b00 */
+ u8tmp2 = 0x03; /* 0b11 */
+ break;
+ case 96000:
+ u8tmp1 = 0x02; /* 0b10 */
+ u8tmp2 = 0x01; /* 0b01 */
+ break;
+ case 115200:
+ u8tmp1 = 0x01; /* 0b01 */
+ u8tmp2 = 0x01; /* 0b01 */
+ break;
+ case 144000:
+ u8tmp1 = 0x00; /* 0b00 */
+ u8tmp2 = 0x01; /* 0b01 */
+ break;
+ case 192000:
+ u8tmp1 = 0x03; /* 0b11 */
+ u8tmp2 = 0x00; /* 0b00 */
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid target_mclk\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x24, u8tmp2 << 6, 0xc0);
+ if (ret)
+ goto err;
+
+ if (c->symbol_rate <= 3000000)
+ u8tmp = 0x20;
+ else if (c->symbol_rate <= 10000000)
+ u8tmp = 0x10;
+ else
+ u8tmp = 0x06;
+
+ ret = m88ds3103_wr_reg(priv, 0xc3, 0x08);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xc8, u8tmp);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xc4, 0x08);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xc7, 0x00);
+ if (ret)
+ goto err;
+
+ u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, M88DS3103_MCLK_KHZ / 2);
+ buf[0] = (u16tmp >> 0) & 0xff;
+ buf[1] = (u16tmp >> 8) & 0xff;
+ ret = m88ds3103_wr_regs(priv, 0x61, buf, 2);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x4d, priv->cfg->spec_inv << 1, 0x02);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x30, priv->cfg->agc_inv << 4, 0x10);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x33, priv->cfg->agc);
+ if (ret)
+ goto err;
+
+ dev_dbg(&priv->i2c->dev, "%s: carrier offset=%d\n", __func__,
+ (tuner_frequency - c->frequency));
+
+ s32tmp = 0x10000 * (tuner_frequency - c->frequency);
+ s32tmp = DIV_ROUND_CLOSEST(s32tmp, M88DS3103_MCLK_KHZ);
+ if (s32tmp < 0)
+ s32tmp += 0x10000;
+
+ buf[0] = (s32tmp >> 0) & 0xff;
+ buf[1] = (s32tmp >> 8) & 0xff;
+ ret = m88ds3103_wr_regs(priv, 0x5e, buf, 2);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x00, 0x00);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xb2, 0x00);
+ if (ret)
+ goto err;
+
+ priv->delivery_system = c->delivery_system;
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_init(struct dvb_frontend *fe)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret, len, remaining;
+ const struct firmware *fw = NULL;
+ u8 *fw_file = M88DS3103_FIRMWARE;
+ u8 u8tmp;
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+ /* set cold state by default */
+ priv->warm = false;
+
+ /* wake up device from sleep */
+ ret = m88ds3103_wr_reg_mask(priv, 0x08, 0x01, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x04, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x23, 0x00, 0x10);
+ if (ret)
+ goto err;
+
+ /* reset */
+ ret = m88ds3103_wr_reg(priv, 0x07, 0x60);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
+ if (ret)
+ goto err;
+
+ /* firmware status */
+ ret = m88ds3103_rd_reg(priv, 0xb9, &u8tmp);
+ if (ret)
+ goto err;
+
+ dev_dbg(&priv->i2c->dev, "%s: firmware=%02x\n", __func__, u8tmp);
+
+ if (u8tmp)
+ goto skip_fw_download;
+
+ /* cold state - try to download firmware */
+ dev_info(&priv->i2c->dev, "%s: found a '%s' in cold state\n",
+ KBUILD_MODNAME, m88ds3103_ops.info.name);
+
+ /* request the firmware, this will block and timeout */
+ ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
+ if (ret) {
+ dev_err(&priv->i2c->dev, "%s: firmare file '%s' not found\n",
+ KBUILD_MODNAME, fw_file);
+ goto err;
+ }
+
+ dev_info(&priv->i2c->dev, "%s: downloading firmware from file '%s'\n",
+ KBUILD_MODNAME, fw_file);
+
+ ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
+ if (ret)
+ goto err;
+
+ for (remaining = fw->size; remaining > 0;
+ remaining -= (priv->cfg->i2c_wr_max - 1)) {
+ len = remaining;
+ if (len > (priv->cfg->i2c_wr_max - 1))
+ len = (priv->cfg->i2c_wr_max - 1);
+
+ ret = m88ds3103_wr_regs(priv, 0xb0,
+ &fw->data[fw->size - remaining], len);
+ if (ret) {
+ dev_err(&priv->i2c->dev,
+ "%s: firmware download failed=%d\n",
+ KBUILD_MODNAME, ret);
+ goto err;
+ }
+ }
+
+ ret = m88ds3103_wr_reg(priv, 0xb2, 0x00);
+ if (ret)
+ goto err;
+
+ release_firmware(fw);
+ fw = NULL;
+
+ ret = m88ds3103_rd_reg(priv, 0xb9, &u8tmp);
+ if (ret)
+ goto err;
+
+ if (!u8tmp) {
+ dev_info(&priv->i2c->dev, "%s: firmware did not run\n",
+ KBUILD_MODNAME);
+ ret = -EFAULT;
+ goto err;
+ }
+
+ dev_info(&priv->i2c->dev, "%s: found a '%s' in warm state\n",
+ KBUILD_MODNAME, m88ds3103_ops.info.name);
+ dev_info(&priv->i2c->dev, "%s: firmware version %X.%X\n",
+ KBUILD_MODNAME, (u8tmp >> 4) & 0xf, (u8tmp >> 0 & 0xf));
+
+skip_fw_download:
+ /* warm state */
+ priv->warm = true;
+
+ return 0;
+err:
+ if (fw)
+ release_firmware(fw);
+
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_sleep(struct dvb_frontend *fe)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret;
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+ priv->delivery_system = SYS_UNDEFINED;
+
+ /* TS Hi-Z */
+ ret = m88ds3103_wr_reg_mask(priv, 0x27, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ /* sleep */
+ ret = m88ds3103_wr_reg_mask(priv, 0x08, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x04, 0x01, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x23, 0x10, 0x10);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_get_frontend(struct dvb_frontend *fe)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ u8 buf[3];
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+
+ if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ ret = m88ds3103_rd_reg(priv, 0xe0, &buf[0]);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_rd_reg(priv, 0xe6, &buf[1]);
+ if (ret)
+ goto err;
+
+ switch ((buf[0] >> 2) & 0x01) {
+ case 0:
+ c->inversion = INVERSION_OFF;
+ break;
+ case 1:
+ c->inversion = INVERSION_ON;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n",
+ __func__);
+ }
+
+ switch ((buf[1] >> 5) & 0x07) {
+ case 0:
+ c->fec_inner = FEC_7_8;
+ break;
+ case 1:
+ c->fec_inner = FEC_5_6;
+ break;
+ case 2:
+ c->fec_inner = FEC_3_4;
+ break;
+ case 3:
+ c->fec_inner = FEC_2_3;
+ break;
+ case 4:
+ c->fec_inner = FEC_1_2;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid fec_inner\n",
+ __func__);
+ }
+
+ c->modulation = QPSK;
+
+ break;
+ case SYS_DVBS2:
+ ret = m88ds3103_rd_reg(priv, 0x7e, &buf[0]);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_rd_reg(priv, 0x89, &buf[1]);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_rd_reg(priv, 0xf2, &buf[2]);
+ if (ret)
+ goto err;
+
+ switch ((buf[0] >> 0) & 0x0f) {
+ case 2:
+ c->fec_inner = FEC_2_5;
+ break;
+ case 3:
+ c->fec_inner = FEC_1_2;
+ break;
+ case 4:
+ c->fec_inner = FEC_3_5;
+ break;
+ case 5:
+ c->fec_inner = FEC_2_3;
+ break;
+ case 6:
+ c->fec_inner = FEC_3_4;
+ break;
+ case 7:
+ c->fec_inner = FEC_4_5;
+ break;
+ case 8:
+ c->fec_inner = FEC_5_6;
+ break;
+ case 9:
+ c->fec_inner = FEC_8_9;
+ break;
+ case 10:
+ c->fec_inner = FEC_9_10;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid fec_inner\n",
+ __func__);
+ }
+
+ switch ((buf[0] >> 5) & 0x01) {
+ case 0:
+ c->pilot = PILOT_OFF;
+ break;
+ case 1:
+ c->pilot = PILOT_ON;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid pilot\n",
+ __func__);
+ }
+
+ switch ((buf[0] >> 6) & 0x07) {
+ case 0:
+ c->modulation = QPSK;
+ break;
+ case 1:
+ c->modulation = PSK_8;
+ break;
+ case 2:
+ c->modulation = APSK_16;
+ break;
+ case 3:
+ c->modulation = APSK_32;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid modulation\n",
+ __func__);
+ }
+
+ switch ((buf[1] >> 7) & 0x01) {
+ case 0:
+ c->inversion = INVERSION_OFF;
+ break;
+ case 1:
+ c->inversion = INVERSION_ON;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n",
+ __func__);
+ }
+
+ switch ((buf[2] >> 0) & 0x03) {
+ case 0:
+ c->rolloff = ROLLOFF_35;
+ break;
+ case 1:
+ c->rolloff = ROLLOFF_25;
+ break;
+ case 2:
+ c->rolloff = ROLLOFF_20;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid rolloff\n",
+ __func__);
+ }
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = m88ds3103_rd_regs(priv, 0x6d, buf, 2);
+ if (ret)
+ goto err;
+
+ c->symbol_rate = 1ull * ((buf[1] << 8) | (buf[0] << 0)) *
+ M88DS3103_MCLK_KHZ * 1000 / 0x10000;
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i, tmp;
+ u8 buf[3];
+ u16 noise, signal;
+ u32 noise_tot, signal_tot;
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
+ /* reports SNR in resolution of 0.1 dB */
+
+ /* more iterations for more accurate estimation */
+ #define M88DS3103_SNR_ITERATIONS 3
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ tmp = 0;
+
+ for (i = 0; i < M88DS3103_SNR_ITERATIONS; i++) {
+ ret = m88ds3103_rd_reg(priv, 0xff, &buf[0]);
+ if (ret)
+ goto err;
+
+ tmp += buf[0];
+ }
+
+ /* use of one register limits max value to 15 dB */
+ /* SNR(X) dB = 10 * ln(X) / ln(10) dB */
+ tmp = DIV_ROUND_CLOSEST(tmp, 8 * M88DS3103_SNR_ITERATIONS);
+ if (tmp)
+ *snr = 100ul * intlog2(tmp) / intlog2(10);
+ else
+ *snr = 0;
+ break;
+ case SYS_DVBS2:
+ noise_tot = 0;
+ signal_tot = 0;
+
+ for (i = 0; i < M88DS3103_SNR_ITERATIONS; i++) {
+ ret = m88ds3103_rd_regs(priv, 0x8c, buf, 3);
+ if (ret)
+ goto err;
+
+ noise = buf[1] << 6; /* [13:6] */
+ noise |= buf[0] & 0x3f; /* [5:0] */
+ noise >>= 2;
+ signal = buf[2] * buf[2];
+ signal >>= 1;
+
+ noise_tot += noise;
+ signal_tot += signal;
+ }
+
+ noise = noise_tot / M88DS3103_SNR_ITERATIONS;
+ signal = signal_tot / M88DS3103_SNR_ITERATIONS;
+
+ /* SNR(X) dB = 10 * log10(X) dB */
+ if (signal > noise) {
+ tmp = signal / noise;
+ *snr = 100ul * intlog10(tmp) / (1 << 24);
+ } else {
+ *snr = 0;
+ }
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+
+static int m88ds3103_set_tone(struct dvb_frontend *fe,
+ fe_sec_tone_mode_t fe_sec_tone_mode)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret;
+ u8 u8tmp, tone, reg_a1_mask;
+ dev_dbg(&priv->i2c->dev, "%s: fe_sec_tone_mode=%d\n", __func__,
+ fe_sec_tone_mode);
+
+ if (!priv->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (fe_sec_tone_mode) {
+ case SEC_TONE_ON:
+ tone = 0;
+ reg_a1_mask = 0x87;
+ break;
+ case SEC_TONE_OFF:
+ tone = 1;
+ reg_a1_mask = 0x00;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_tone_mode\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ u8tmp = tone << 7 | priv->cfg->envelope_mode << 5;
+ ret = m88ds3103_wr_reg_mask(priv, 0xa2, u8tmp, 0xe0);
+ if (ret)
+ goto err;
+
+ u8tmp = 1 << 2;
+ ret = m88ds3103_wr_reg_mask(priv, 0xa1, u8tmp, reg_a1_mask);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *diseqc_cmd)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret, i;
+ u8 u8tmp;
+ dev_dbg(&priv->i2c->dev, "%s: msg=%*ph\n", __func__,
+ diseqc_cmd->msg_len, diseqc_cmd->msg);
+
+ if (!priv->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ if (diseqc_cmd->msg_len < 3 || diseqc_cmd->msg_len > 6) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ u8tmp = priv->cfg->envelope_mode << 5;
+ ret = m88ds3103_wr_reg_mask(priv, 0xa2, u8tmp, 0xe0);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_regs(priv, 0xa3, diseqc_cmd->msg,
+ diseqc_cmd->msg_len);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0xa1,
+ (diseqc_cmd->msg_len - 1) << 3 | 0x07);
+ if (ret)
+ goto err;
+
+ /* DiSEqC message typical period is 54 ms */
+ usleep_range(40000, 60000);
+
+ /* wait DiSEqC TX ready */
+ for (i = 20, u8tmp = 1; i && u8tmp; i--) {
+ usleep_range(5000, 10000);
+
+ ret = m88ds3103_rd_reg_mask(priv, 0xa1, &u8tmp, 0x40);
+ if (ret)
+ goto err;
+ }
+
+ dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+
+ if (i == 0) {
+ dev_dbg(&priv->i2c->dev, "%s: diseqc tx timeout\n", __func__);
+
+ ret = m88ds3103_wr_reg_mask(priv, 0xa1, 0x40, 0xc0);
+ if (ret)
+ goto err;
+ }
+
+ ret = m88ds3103_wr_reg_mask(priv, 0xa2, 0x80, 0xc0);
+ if (ret)
+ goto err;
+
+ if (i == 0) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe,
+ fe_sec_mini_cmd_t fe_sec_mini_cmd)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret, i;
+ u8 u8tmp, burst;
+ dev_dbg(&priv->i2c->dev, "%s: fe_sec_mini_cmd=%d\n", __func__,
+ fe_sec_mini_cmd);
+
+ if (!priv->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ u8tmp = priv->cfg->envelope_mode << 5;
+ ret = m88ds3103_wr_reg_mask(priv, 0xa2, u8tmp, 0xe0);
+ if (ret)
+ goto err;
+
+ switch (fe_sec_mini_cmd) {
+ case SEC_MINI_A:
+ burst = 0x02;
+ break;
+ case SEC_MINI_B:
+ burst = 0x01;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_mini_cmd\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = m88ds3103_wr_reg(priv, 0xa1, burst);
+ if (ret)
+ goto err;
+
+ /* DiSEqC ToneBurst period is 12.5 ms */
+ usleep_range(11000, 20000);
+
+ /* wait DiSEqC TX ready */
+ for (i = 5, u8tmp = 1; i && u8tmp; i--) {
+ usleep_range(800, 2000);
+
+ ret = m88ds3103_rd_reg_mask(priv, 0xa1, &u8tmp, 0x40);
+ if (ret)
+ goto err;
+ }
+
+ dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+
+ ret = m88ds3103_wr_reg_mask(priv, 0xa2, 0x80, 0xc0);
+ if (ret)
+ goto err;
+
+ if (i == 0) {
+ dev_dbg(&priv->i2c->dev, "%s: diseqc tx timeout\n", __func__);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ds3103_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *s)
+{
+ s->min_delay_ms = 3000;
+
+ return 0;
+}
+
+static void m88ds3103_release(struct dvb_frontend *fe)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ i2c_del_mux_adapter(priv->i2c_adapter);
+ kfree(priv);
+}
+
+static int m88ds3103_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
+{
+ struct m88ds3103_priv *priv = mux_priv;
+ int ret;
+ struct i2c_msg gate_open_msg[1] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = "\x03\x11",
+ }
+ };
+
+ mutex_lock(&priv->i2c_mutex);
+
+ /* open tuner I2C repeater for 1 xfer, closes automatically */
+ ret = __i2c_transfer(priv->i2c, gate_open_msg, 1);
+ if (ret != 1) {
+ dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d\n",
+ KBUILD_MODNAME, ret);
+ if (ret >= 0)
+ ret = -EREMOTEIO;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int m88ds3103_deselect(struct i2c_adapter *adap, void *mux_priv,
+ u32 chan)
+{
+ struct m88ds3103_priv *priv = mux_priv;
+
+ mutex_unlock(&priv->i2c_mutex);
+
+ return 0;
+}
+
+struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
+ struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
+{
+ int ret;
+ struct m88ds3103_priv *priv;
+ u8 chip_id, u8tmp;
+
+ /* allocate memory for the internal priv */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ dev_err(&i2c->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ goto err;
+ }
+
+ priv->cfg = cfg;
+ priv->i2c = i2c;
+ mutex_init(&priv->i2c_mutex);
+
+ ret = m88ds3103_rd_reg(priv, 0x01, &chip_id);
+ if (ret)
+ goto err;
+
+ dev_dbg(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+
+ switch (chip_id) {
+ case 0xd0:
+ break;
+ default:
+ goto err;
+ }
+
+ switch (priv->cfg->clock_out) {
+ case M88DS3103_CLOCK_OUT_DISABLED:
+ u8tmp = 0x80;
+ break;
+ case M88DS3103_CLOCK_OUT_ENABLED:
+ u8tmp = 0x00;
+ break;
+ case M88DS3103_CLOCK_OUT_ENABLED_DIV2:
+ u8tmp = 0x10;
+ break;
+ default:
+ goto err;
+ }
+
+ ret = m88ds3103_wr_reg(priv, 0x29, u8tmp);
+ if (ret)
+ goto err;
+
+ /* sleep */
+ ret = m88ds3103_wr_reg_mask(priv, 0x08, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x04, 0x01, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg_mask(priv, 0x23, 0x10, 0x10);
+ if (ret)
+ goto err;
+
+ /* create mux i2c adapter for tuner */
+ priv->i2c_adapter = i2c_add_mux_adapter(i2c, &i2c->dev, priv, 0, 0, 0,
+ m88ds3103_select, m88ds3103_deselect);
+ if (priv->i2c_adapter == NULL)
+ goto err;
+
+ *tuner_i2c_adapter = priv->i2c_adapter;
+
+ /* create dvb_frontend */
+ memcpy(&priv->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops));
+ priv->fe.demodulator_priv = priv;
+
+ return &priv->fe;
+err:
+ dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
+ kfree(priv);
+ return NULL;
+}
+EXPORT_SYMBOL(m88ds3103_attach);
+
+static struct dvb_frontend_ops m88ds3103_ops = {
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
+ .info = {
+ .name = "Montage M88DS3103",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_tolerance = 5000,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_4_5 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_6_7 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_8_9 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_RECOVER |
+ FE_CAN_2G_MODULATION
+ },
+
+ .release = m88ds3103_release,
+
+ .get_tune_settings = m88ds3103_get_tune_settings,
+
+ .init = m88ds3103_init,
+ .sleep = m88ds3103_sleep,
+
+ .set_frontend = m88ds3103_set_frontend,
+ .get_frontend = m88ds3103_get_frontend,
+
+ .read_status = m88ds3103_read_status,
+ .read_snr = m88ds3103_read_snr,
+
+ .diseqc_send_master_cmd = m88ds3103_diseqc_send_master_cmd,
+ .diseqc_send_burst = m88ds3103_diseqc_send_burst,
+
+ .set_tone = m88ds3103_set_tone,
+};
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Montage M88DS3103 DVB-S/S2 demodulator driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(M88DS3103_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/m88ds3103.h b/drivers/media/dvb-frontends/m88ds3103.h
new file mode 100644
index 000000000000..bbb7e3aa5675
--- /dev/null
+++ b/drivers/media/dvb-frontends/m88ds3103.h
@@ -0,0 +1,114 @@
+/*
+ * Montage M88DS3103 demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef M88DS3103_H
+#define M88DS3103_H
+
+#include <linux/dvb/frontend.h>
+
+struct m88ds3103_config {
+ /*
+ * I2C address
+ * Default: none, must set
+ * 0x68, ...
+ */
+ u8 i2c_addr;
+
+ /*
+ * clock
+ * Default: none, must set
+ * 27000000
+ */
+ u32 clock;
+
+ /*
+ * max bytes I2C provider is asked to write at once
+ * Default: none, must set
+ * 33, 65, ...
+ */
+ u16 i2c_wr_max;
+
+ /*
+ * TS output mode
+ * Default: M88DS3103_TS_SERIAL
+ */
+#define M88DS3103_TS_SERIAL 0 /* TS output pin D0, normal */
+#define M88DS3103_TS_SERIAL_D7 1 /* TS output pin D7 */
+#define M88DS3103_TS_PARALLEL 2 /* 24 MHz, normal */
+#define M88DS3103_TS_PARALLEL_12 3 /* 12 MHz */
+#define M88DS3103_TS_PARALLEL_16 4 /* 16 MHz */
+#define M88DS3103_TS_PARALLEL_19_2 5 /* 19.2 MHz */
+#define M88DS3103_TS_CI 6 /* 6 MHz */
+ u8 ts_mode;
+
+ /*
+ * spectrum inversion
+ * Default: 0
+ */
+ u8 spec_inv:1;
+
+ /*
+ * AGC polarity
+ * Default: 0
+ */
+ u8 agc_inv:1;
+
+ /*
+ * clock output
+ * Default: M88DS3103_CLOCK_OUT_DISABLED
+ */
+#define M88DS3103_CLOCK_OUT_DISABLED 0
+#define M88DS3103_CLOCK_OUT_ENABLED 1
+#define M88DS3103_CLOCK_OUT_ENABLED_DIV2 2
+ u8 clock_out;
+
+ /*
+ * DiSEqC envelope mode
+ * Default: 0
+ */
+ u8 envelope_mode:1;
+
+ /*
+ * AGC configuration
+ * Default: none, must set
+ */
+ u8 agc;
+};
+
+/*
+ * Driver implements own I2C-adapter for tuner I2C access. That's since chip
+ * has I2C-gate control which closes gate automatically after I2C transfer.
+ * Using own I2C adapter we can workaround that.
+ */
+
+#if defined(CONFIG_DVB_M88DS3103) || \
+ (defined(CONFIG_DVB_M88DS3103_MODULE) && defined(MODULE))
+extern struct dvb_frontend *m88ds3103_attach(
+ const struct m88ds3103_config *config,
+ struct i2c_adapter *i2c,
+ struct i2c_adapter **tuner_i2c);
+#else
+static inline struct dvb_frontend *m88ds3103_attach(
+ const struct m88ds3103_config *config,
+ struct i2c_adapter *i2c,
+ struct i2c_adapter **tuner_i2c)
+{
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/m88ds3103_priv.h b/drivers/media/dvb-frontends/m88ds3103_priv.h
new file mode 100644
index 000000000000..84c3c06df622
--- /dev/null
+++ b/drivers/media/dvb-frontends/m88ds3103_priv.h
@@ -0,0 +1,215 @@
+/*
+ * Montage M88DS3103 demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef M88DS3103_PRIV_H
+#define M88DS3103_PRIV_H
+
+#include "dvb_frontend.h"
+#include "m88ds3103.h"
+#include "dvb_math.h"
+#include <linux/firmware.h>
+#include <linux/i2c-mux.h>
+
+#define M88DS3103_FIRMWARE "dvb-demod-m88ds3103.fw"
+#define M88DS3103_MCLK_KHZ 96000
+
+struct m88ds3103_priv {
+ struct i2c_adapter *i2c;
+ /* mutex needed due to own tuner I2C adapter */
+ struct mutex i2c_mutex;
+ const struct m88ds3103_config *cfg;
+ struct dvb_frontend fe;
+ fe_delivery_system_t delivery_system;
+ fe_status_t fe_status;
+ bool warm; /* FW running */
+ struct i2c_adapter *i2c_adapter;
+};
+
+struct m88ds3103_reg_val {
+ u8 reg;
+ u8 val;
+};
+
+static const struct m88ds3103_reg_val m88ds3103_dvbs_init_reg_vals[] = {
+ {0x23, 0x07},
+ {0x08, 0x03},
+ {0x0c, 0x02},
+ {0x21, 0x54},
+ {0x25, 0x8a},
+ {0x27, 0x31},
+ {0x30, 0x08},
+ {0x31, 0x40},
+ {0x32, 0x32},
+ {0x35, 0xff},
+ {0x3a, 0x00},
+ {0x37, 0x10},
+ {0x38, 0x10},
+ {0x39, 0x02},
+ {0x42, 0x60},
+ {0x4a, 0x80},
+ {0x4b, 0x04},
+ {0x4d, 0x91},
+ {0x5d, 0xc8},
+ {0x50, 0x36},
+ {0x51, 0x36},
+ {0x52, 0x36},
+ {0x53, 0x36},
+ {0x56, 0x01},
+ {0x63, 0x0f},
+ {0x64, 0x30},
+ {0x65, 0x40},
+ {0x68, 0x26},
+ {0x69, 0x4c},
+ {0x70, 0x20},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x40},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x60},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x80},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0xa0},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x1f},
+ {0x76, 0x38},
+ {0x77, 0xa6},
+ {0x78, 0x0c},
+ {0x79, 0x80},
+ {0x7f, 0x14},
+ {0x7c, 0x00},
+ {0xae, 0x82},
+ {0x80, 0x64},
+ {0x81, 0x66},
+ {0x82, 0x44},
+ {0x85, 0x04},
+ {0xcd, 0xf4},
+ {0x90, 0x33},
+ {0xa0, 0x44},
+ {0xc0, 0x08},
+ {0xc3, 0x10},
+ {0xc4, 0x08},
+ {0xc5, 0xf0},
+ {0xc6, 0xff},
+ {0xc7, 0x00},
+ {0xc8, 0x1a},
+ {0xc9, 0x80},
+ {0xe0, 0xf8},
+ {0xe6, 0x8b},
+ {0xd0, 0x40},
+ {0xf8, 0x20},
+ {0xfa, 0x0f},
+ {0x00, 0x00},
+ {0xbd, 0x01},
+ {0xb8, 0x00},
+};
+
+static const struct m88ds3103_reg_val m88ds3103_dvbs2_init_reg_vals[] = {
+ {0x23, 0x07},
+ {0x08, 0x07},
+ {0x0c, 0x02},
+ {0x21, 0x54},
+ {0x25, 0x8a},
+ {0x27, 0x31},
+ {0x30, 0x08},
+ {0x32, 0x32},
+ {0x35, 0xff},
+ {0x3a, 0x00},
+ {0x37, 0x10},
+ {0x38, 0x10},
+ {0x39, 0x02},
+ {0x42, 0x60},
+ {0x4a, 0x80},
+ {0x4b, 0x04},
+ {0x4d, 0x91},
+ {0x5d, 0xc8},
+ {0x50, 0x36},
+ {0x51, 0x36},
+ {0x52, 0x36},
+ {0x53, 0x36},
+ {0x56, 0x01},
+ {0x63, 0x0f},
+ {0x64, 0x10},
+ {0x65, 0x20},
+ {0x68, 0x46},
+ {0x69, 0xcd},
+ {0x70, 0x20},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x40},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x60},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x80},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0xa0},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x1f},
+ {0x76, 0x38},
+ {0x77, 0xa6},
+ {0x78, 0x0c},
+ {0x79, 0x80},
+ {0x7f, 0x14},
+ {0x85, 0x08},
+ {0xcd, 0xf4},
+ {0x90, 0x33},
+ {0x86, 0x00},
+ {0x87, 0x0f},
+ {0x89, 0x00},
+ {0x8b, 0x44},
+ {0x8c, 0x66},
+ {0x9d, 0xc1},
+ {0x8a, 0x10},
+ {0xad, 0x40},
+ {0xa0, 0x44},
+ {0xc0, 0x08},
+ {0xc1, 0x10},
+ {0xc2, 0x08},
+ {0xc3, 0x10},
+ {0xc4, 0x08},
+ {0xc5, 0xf0},
+ {0xc6, 0xff},
+ {0xc7, 0x00},
+ {0xc8, 0x1a},
+ {0xc9, 0x80},
+ {0xca, 0x23},
+ {0xcb, 0x24},
+ {0xcc, 0xf4},
+ {0xce, 0x74},
+ {0x00, 0x00},
+ {0xbd, 0x01},
+ {0xb8, 0x00},
+};
+
+#endif
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 4da5272075cb..b2351466b0da 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -110,28 +110,94 @@ static u8 m88rs2000_readreg(struct m88rs2000_state *state, u8 reg)
return b1[0];
}
+static u32 m88rs2000_get_mclk(struct dvb_frontend *fe)
+{
+ struct m88rs2000_state *state = fe->demodulator_priv;
+ u32 mclk;
+ u8 reg;
+ /* Must not be 0x00 or 0xff */
+ reg = m88rs2000_readreg(state, 0x86);
+ if (!reg || reg == 0xff)
+ return 0;
+
+ reg /= 2;
+ reg += 1;
+
+ mclk = (u32)(reg * RS2000_FE_CRYSTAL_KHZ + 28 / 2) / 28;
+
+ return mclk;
+}
+
+static int m88rs2000_set_carrieroffset(struct dvb_frontend *fe, s16 offset)
+{
+ struct m88rs2000_state *state = fe->demodulator_priv;
+ u32 mclk;
+ s32 tmp;
+ u8 reg;
+ int ret;
+
+ mclk = m88rs2000_get_mclk(fe);
+ if (!mclk)
+ return -EINVAL;
+
+ tmp = (offset * 4096 + (s32)mclk / 2) / (s32)mclk;
+ if (tmp < 0)
+ tmp += 4096;
+
+ /* Carrier Offset */
+ ret = m88rs2000_writereg(state, 0x9c, (u8)(tmp >> 4));
+
+ reg = m88rs2000_readreg(state, 0x9d);
+ reg &= 0xf;
+ reg |= (u8)(tmp & 0xf) << 4;
+
+ ret |= m88rs2000_writereg(state, 0x9d, reg);
+
+ return ret;
+}
+
static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate)
{
struct m88rs2000_state *state = fe->demodulator_priv;
int ret;
- u32 temp;
+ u64 temp;
+ u32 mclk;
u8 b[3];
if ((srate < 1000000) || (srate > 45000000))
return -EINVAL;
+ mclk = m88rs2000_get_mclk(fe);
+ if (!mclk)
+ return -EINVAL;
+
temp = srate / 1000;
- temp *= 11831;
- temp /= 68;
- temp -= 3;
+ temp *= 1 << 24;
+
+ do_div(temp, mclk);
b[0] = (u8) (temp >> 16) & 0xff;
b[1] = (u8) (temp >> 8) & 0xff;
b[2] = (u8) temp & 0xff;
+
ret = m88rs2000_writereg(state, 0x93, b[2]);
ret |= m88rs2000_writereg(state, 0x94, b[1]);
ret |= m88rs2000_writereg(state, 0x95, b[0]);
+ if (srate > 10000000)
+ ret |= m88rs2000_writereg(state, 0xa0, 0x20);
+ else
+ ret |= m88rs2000_writereg(state, 0xa0, 0x60);
+
+ ret |= m88rs2000_writereg(state, 0xa1, 0xe0);
+
+ if (srate > 12000000)
+ ret |= m88rs2000_writereg(state, 0xa3, 0x20);
+ else if (srate > 2800000)
+ ret |= m88rs2000_writereg(state, 0xa3, 0x98);
+ else
+ ret |= m88rs2000_writereg(state, 0xa3, 0x90);
+
deb_info("m88rs2000: m88rs2000_set_symbolrate\n");
return ret;
}
@@ -261,8 +327,6 @@ struct inittab m88rs2000_shutdown[] = {
struct inittab fe_reset[] = {
{DEMOD_WRITE, 0x00, 0x01},
- {DEMOD_WRITE, 0xf1, 0xbf},
- {DEMOD_WRITE, 0x00, 0x01},
{DEMOD_WRITE, 0x20, 0x81},
{DEMOD_WRITE, 0x21, 0x80},
{DEMOD_WRITE, 0x10, 0x33},
@@ -305,9 +369,6 @@ struct inittab fe_trigger[] = {
{DEMOD_WRITE, 0x9b, 0x64},
{DEMOD_WRITE, 0x9e, 0x00},
{DEMOD_WRITE, 0x9f, 0xf8},
- {DEMOD_WRITE, 0xa0, 0x20},
- {DEMOD_WRITE, 0xa1, 0xe0},
- {DEMOD_WRITE, 0xa3, 0x38},
{DEMOD_WRITE, 0x98, 0xff},
{DEMOD_WRITE, 0xc0, 0x0f},
{DEMOD_WRITE, 0x89, 0x01},
@@ -408,7 +469,7 @@ static int m88rs2000_read_status(struct dvb_frontend *fe, fe_status_t *status)
*status = 0;
- if ((reg & 0x7) == 0x7) {
+ if ((reg & 0xee) == 0xee) {
*status = FE_HAS_CARRIER | FE_HAS_SIGNAL | FE_HAS_VITERBI
| FE_HAS_SYNC | FE_HAS_LOCK;
if (state->config->set_ts_params)
@@ -480,33 +541,38 @@ static int m88rs2000_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
static int m88rs2000_set_fec(struct m88rs2000_state *state,
fe_code_rate_t fec)
{
- u16 fec_set;
+ u8 fec_set, reg;
+ int ret;
+
switch (fec) {
- /* This is not confirmed kept for reference */
-/* case FEC_1_2:
- fec_set = 0x88;
+ case FEC_1_2:
+ fec_set = 0x8;
break;
case FEC_2_3:
- fec_set = 0x68;
+ fec_set = 0x10;
break;
case FEC_3_4:
- fec_set = 0x48;
+ fec_set = 0x20;
break;
case FEC_5_6:
- fec_set = 0x28;
+ fec_set = 0x40;
break;
case FEC_7_8:
- fec_set = 0x18;
- break; */
+ fec_set = 0x80;
+ break;
case FEC_AUTO:
default:
- fec_set = 0x08;
+ fec_set = 0x0;
}
- m88rs2000_writereg(state, 0x76, fec_set);
- return 0;
-}
+ reg = m88rs2000_readreg(state, 0x70);
+ reg &= 0x7;
+ ret = m88rs2000_writereg(state, 0x70, reg | fec_set);
+ ret |= m88rs2000_writereg(state, 0x76, 0x8);
+
+ return ret;
+}
static fe_code_rate_t m88rs2000_get_fec(struct m88rs2000_state *state)
{
@@ -515,18 +581,20 @@ static fe_code_rate_t m88rs2000_get_fec(struct m88rs2000_state *state)
reg = m88rs2000_readreg(state, 0x76);
m88rs2000_writereg(state, 0x9a, 0xb0);
+ reg &= 0xf0;
+ reg >>= 5;
+
switch (reg) {
- case 0x88:
+ case 0x4:
return FEC_1_2;
- case 0x68:
+ case 0x3:
return FEC_2_3;
- case 0x48:
+ case 0x2:
return FEC_3_4;
- case 0x28:
+ case 0x1:
return FEC_5_6;
- case 0x18:
+ case 0x0:
return FEC_7_8;
- case 0x08:
default:
break;
}
@@ -540,9 +608,8 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
fe_status_t status;
int i, ret = 0;
- s32 tmp;
u32 tuner_freq;
- u16 offset = 0;
+ s16 offset = 0;
u8 reg;
state->no_lock_count = 0;
@@ -567,38 +634,31 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
if (ret < 0)
return -ENODEV;
- offset = tuner_freq - c->frequency;
-
- /* calculate offset assuming 96000kHz*/
- tmp = offset;
- tmp *= 65536;
-
- tmp = (2 * tmp + 96000) / (2 * 96000);
- if (tmp < 0)
- tmp += 65536;
+ offset = (s16)((s32)tuner_freq - c->frequency);
- offset = tmp & 0xffff;
+ /* default mclk value 96.4285 * 2 * 1000 = 192857 */
+ if (((c->frequency % 192857) >= (192857 - 3000)) ||
+ (c->frequency % 192857) <= 3000)
+ ret = m88rs2000_writereg(state, 0x86, 0xc2);
+ else
+ ret = m88rs2000_writereg(state, 0x86, 0xc6);
- ret = m88rs2000_writereg(state, 0x9a, 0x30);
- /* Unknown usually 0xc6 sometimes 0xc1 */
- reg = m88rs2000_readreg(state, 0x86);
- ret |= m88rs2000_writereg(state, 0x86, reg);
- /* Offset lower nibble always 0 */
- ret |= m88rs2000_writereg(state, 0x9c, (offset >> 8));
- ret |= m88rs2000_writereg(state, 0x9d, offset & 0xf0);
+ ret |= m88rs2000_set_carrieroffset(fe, offset);
+ if (ret < 0)
+ return -ENODEV;
+ /* Reset demod by symbol rate */
+ if (c->symbol_rate > 27500000)
+ ret = m88rs2000_writereg(state, 0xf1, 0xa4);
+ else
+ ret = m88rs2000_writereg(state, 0xf1, 0xbf);
- /* Reset Demod */
- ret = m88rs2000_tab_set(state, fe_reset);
+ ret |= m88rs2000_tab_set(state, fe_reset);
if (ret < 0)
return -ENODEV;
- /* Unknown */
- reg = m88rs2000_readreg(state, 0x70);
- ret = m88rs2000_writereg(state, 0x70, reg);
-
/* Set FEC */
- ret |= m88rs2000_set_fec(state, c->fec_inner);
+ ret = m88rs2000_set_fec(state, c->fec_inner);
ret |= m88rs2000_writereg(state, 0x85, 0x1);
ret |= m88rs2000_writereg(state, 0x8a, 0xbf);
ret |= m88rs2000_writereg(state, 0x8d, 0x1e);
@@ -620,7 +680,7 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
for (i = 0; i < 25; i++) {
reg = m88rs2000_readreg(state, 0x8c);
- if ((reg & 0x7) == 0x7) {
+ if ((reg & 0xee) == 0xee) {
status = FE_HAS_LOCK;
break;
}
diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h
index 14ce31e76ae6..0a50ea90736b 100644
--- a/drivers/media/dvb-frontends/m88rs2000.h
+++ b/drivers/media/dvb-frontends/m88rs2000.h
@@ -53,6 +53,8 @@ static inline struct dvb_frontend *m88rs2000_attach(
}
#endif /* CONFIG_DVB_M88RS2000 */
+#define RS2000_FE_CRYSTAL_KHZ 27000
+
enum {
DEMOD_WRITE = 0x1,
WRITE_DELAY = 0x10,
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index fbca9856313a..8a8e1ecb762d 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -2,7 +2,7 @@
* Support for NXT2002 and NXT2004 - VSB/QAM
*
* Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com>
- * Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net>
+ * Copyright (C) 2006-2014 Michael Krufky <mkrufky@linuxtv.org>
* based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net>
* and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com>
*
@@ -40,7 +40,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
+#define MAX_XFER_SIZE 256
#define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw"
#define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw"
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 842654d33317..4aa9c5311cc5 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -555,14 +555,6 @@ config VIDEO_MT9V032
This is a Video4Linux2 sensor-level driver for the Micron
MT9V032 752x480 CMOS sensor.
-config VIDEO_TCM825X
- tristate "TCM825x camera sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_INT_DEVICE
- depends on MEDIA_CAMERA_SUPPORT
- ---help---
- This is a driver for the Toshiba TCM825x VGA camera sensor.
- It is used for example in Nokia N800.
-
config VIDEO_SR030PC30
tristate "Siliconfile SR030PC30 sensor support"
depends on I2C && VIDEO_V4L2
@@ -594,6 +586,13 @@ config VIDEO_S5K4ECGX
This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
camera sensor with an embedded SoC image signal processor.
+config VIDEO_S5K5BAF
+ tristate "Samsung S5K5BAF sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a V4L2 sensor-level driver for Samsung S5K5BAF 2M
+ camera sensor with an embedded SoC image signal processor.
+
source "drivers/media/i2c/smiapp/Kconfig"
config VIDEO_S5C73M3
@@ -655,6 +654,18 @@ config VIDEO_UPD64083
To compile this driver as a module, choose M here: the
module will be called upd64083.
+comment "Audio/Video compression chips"
+
+config VIDEO_SAA6752HS
+ tristate "Philips SAA6752HS MPEG-2 Audio/Video Encoder"
+ depends on VIDEO_V4L2 && I2C
+ ---help---
+ Support for the Philips SAA6752HS MPEG-2 video and MPEG-audio/AC-3
+ audio encoder with multiplexer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa6752hs.
+
comment "Miscellaneous helper chips"
config VIDEO_THS7303
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index e03f1776f4f4..48888ae876fb 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_VIDEO_SAA717X) += saa717x.o
obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
+obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
@@ -57,7 +58,6 @@ obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
-obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
@@ -67,6 +67,7 @@ obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o
+obj-$(CONFIG_VIDEO_S5K5BAF) += s5k5baf.o
obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/
obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index b06a7e54ee0d..83225d6a0dd9 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -66,11 +66,6 @@ MODULE_LICENSE("GPL");
**********************************************************************
*/
-struct i2c_reg_value {
- u8 reg;
- u8 value;
-};
-
struct ad9389b_state_edid {
/* total number of blocks */
u32 blocks;
@@ -143,14 +138,14 @@ static int ad9389b_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
if (ret == 0)
return 0;
}
- v4l2_err(sd, "I2C Write Problem\n");
+ v4l2_err(sd, "%s: failed reg 0x%x, val 0x%x\n", __func__, reg, val);
return ret;
}
/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
and then the value-mask (to be OR-ed). */
static inline void ad9389b_wr_and_or(struct v4l2_subdev *sd, u8 reg,
- u8 clr_mask, u8 val_mask)
+ u8 clr_mask, u8 val_mask)
{
ad9389b_wr(sd, reg, (ad9389b_rd(sd, reg) & clr_mask) | val_mask);
}
@@ -321,12 +316,12 @@ static int ad9389b_s_ctrl(struct v4l2_ctrl *ctrl)
struct ad9389b_state *state = get_ad9389b_state(sd);
v4l2_dbg(1, debug, sd,
- "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
+ "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
if (state->hdmi_mode_ctrl == ctrl) {
/* Set HDMI or DVI-D */
ad9389b_wr_and_or(sd, 0xaf, 0xfd,
- ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
+ ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
return 0;
}
if (state->rgb_quantization_range_ctrl == ctrl)
@@ -387,61 +382,57 @@ static int ad9389b_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "chip revision %d\n", state->chip_revision);
v4l2_info(sd, "power %s\n", state->power_on ? "on" : "off");
v4l2_info(sd, "%s hotplug, %s Rx Sense, %s EDID (%d block(s))\n",
- (ad9389b_rd(sd, 0x42) & MASK_AD9389B_HPD_DETECT) ?
- "detected" : "no",
- (ad9389b_rd(sd, 0x42) & MASK_AD9389B_MSEN_DETECT) ?
- "detected" : "no",
- edid->segments ? "found" : "no", edid->blocks);
- if (state->have_monitor) {
- v4l2_info(sd, "%s output %s\n",
- (ad9389b_rd(sd, 0xaf) & 0x02) ?
- "HDMI" : "DVI-D",
- (ad9389b_rd(sd, 0xa1) & 0x3c) ?
- "disabled" : "enabled");
- }
+ (ad9389b_rd(sd, 0x42) & MASK_AD9389B_HPD_DETECT) ?
+ "detected" : "no",
+ (ad9389b_rd(sd, 0x42) & MASK_AD9389B_MSEN_DETECT) ?
+ "detected" : "no",
+ edid->segments ? "found" : "no", edid->blocks);
+ v4l2_info(sd, "%s output %s\n",
+ (ad9389b_rd(sd, 0xaf) & 0x02) ?
+ "HDMI" : "DVI-D",
+ (ad9389b_rd(sd, 0xa1) & 0x3c) ?
+ "disabled" : "enabled");
v4l2_info(sd, "ad9389b: %s\n", (ad9389b_rd(sd, 0xb8) & 0x40) ?
- "encrypted" : "no encryption");
+ "encrypted" : "no encryption");
v4l2_info(sd, "state: %s, error: %s, detect count: %u, msk/irq: %02x/%02x\n",
- states[ad9389b_rd(sd, 0xc8) & 0xf],
- errors[ad9389b_rd(sd, 0xc8) >> 4],
- state->edid_detect_counter,
- ad9389b_rd(sd, 0x94), ad9389b_rd(sd, 0x96));
+ states[ad9389b_rd(sd, 0xc8) & 0xf],
+ errors[ad9389b_rd(sd, 0xc8) >> 4],
+ state->edid_detect_counter,
+ ad9389b_rd(sd, 0x94), ad9389b_rd(sd, 0x96));
manual_gear = ad9389b_rd(sd, 0x98) & 0x80;
v4l2_info(sd, "ad9389b: RGB quantization: %s range\n",
- ad9389b_rd(sd, 0x3b) & 0x01 ? "limited" : "full");
+ ad9389b_rd(sd, 0x3b) & 0x01 ? "limited" : "full");
v4l2_info(sd, "ad9389b: %s gear %d\n",
manual_gear ? "manual" : "automatic",
manual_gear ? ((ad9389b_rd(sd, 0x98) & 0x70) >> 4) :
- ((ad9389b_rd(sd, 0x9e) & 0x0e) >> 1));
- if (state->have_monitor) {
- if (ad9389b_rd(sd, 0xaf) & 0x02) {
- /* HDMI only */
- u8 manual_cts = ad9389b_rd(sd, 0x0a) & 0x80;
- u32 N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
- ad9389b_rd(sd, 0x02) << 8 |
- ad9389b_rd(sd, 0x03);
- u8 vic_detect = ad9389b_rd(sd, 0x3e) >> 2;
- u8 vic_sent = ad9389b_rd(sd, 0x3d) & 0x3f;
- u32 CTS;
-
- if (manual_cts)
- CTS = (ad9389b_rd(sd, 0x07) & 0xf) << 16 |
- ad9389b_rd(sd, 0x08) << 8 |
- ad9389b_rd(sd, 0x09);
- else
- CTS = (ad9389b_rd(sd, 0x04) & 0xf) << 16 |
- ad9389b_rd(sd, 0x05) << 8 |
- ad9389b_rd(sd, 0x06);
- N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
- ad9389b_rd(sd, 0x02) << 8 |
- ad9389b_rd(sd, 0x03);
-
- v4l2_info(sd, "ad9389b: CTS %s mode: N %d, CTS %d\n",
- manual_cts ? "manual" : "automatic", N, CTS);
-
- v4l2_info(sd, "ad9389b: VIC: detected %d, sent %d\n",
- vic_detect, vic_sent);
- }
+ ((ad9389b_rd(sd, 0x9e) & 0x0e) >> 1));
+ if (ad9389b_rd(sd, 0xaf) & 0x02) {
+ /* HDMI only */
+ u8 manual_cts = ad9389b_rd(sd, 0x0a) & 0x80;
+ u32 N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
+ ad9389b_rd(sd, 0x02) << 8 |
+ ad9389b_rd(sd, 0x03);
+ u8 vic_detect = ad9389b_rd(sd, 0x3e) >> 2;
+ u8 vic_sent = ad9389b_rd(sd, 0x3d) & 0x3f;
+ u32 CTS;
+
+ if (manual_cts)
+ CTS = (ad9389b_rd(sd, 0x07) & 0xf) << 16 |
+ ad9389b_rd(sd, 0x08) << 8 |
+ ad9389b_rd(sd, 0x09);
+ else
+ CTS = (ad9389b_rd(sd, 0x04) & 0xf) << 16 |
+ ad9389b_rd(sd, 0x05) << 8 |
+ ad9389b_rd(sd, 0x06);
+ N = (ad9389b_rd(sd, 0x01) & 0xf) << 16 |
+ ad9389b_rd(sd, 0x02) << 8 |
+ ad9389b_rd(sd, 0x03);
+
+ v4l2_info(sd, "ad9389b: CTS %s mode: N %d, CTS %d\n",
+ manual_cts ? "manual" : "automatic", N, CTS);
+
+ v4l2_info(sd, "ad9389b: VIC: detected %d, sent %d\n",
+ vic_detect, vic_sent);
}
if (state->dv_timings.type == V4L2_DV_BT_656_1120)
v4l2_print_dv_timings(sd->name, "timings: ",
@@ -486,7 +477,7 @@ static int ad9389b_s_power(struct v4l2_subdev *sd, int on)
}
if (i > 1)
v4l2_dbg(1, debug, sd,
- "needed %d retries to powerup the ad9389b\n", i);
+ "needed %d retries to powerup the ad9389b\n", i);
/* Select chip: AD9389B */
ad9389b_wr_and_or(sd, 0xba, 0xef, 0x10);
@@ -556,14 +547,16 @@ static int ad9389b_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
irq_status = ad9389b_rd(sd, 0x96);
/* clear detected interrupts */
ad9389b_wr(sd, 0x96, irq_status);
+ /* enable interrupts */
+ ad9389b_set_isr(sd, true);
+
+ v4l2_dbg(1, debug, sd, "%s: irq_status 0x%x\n", __func__, irq_status);
- if (irq_status & (MASK_AD9389B_HPD_INT | MASK_AD9389B_MSEN_INT))
+ if (irq_status & (MASK_AD9389B_HPD_INT))
ad9389b_check_monitor_present_status(sd);
if (irq_status & MASK_AD9389B_EDID_RDY_INT)
ad9389b_check_edid_status(sd);
- /* enable interrupts */
- ad9389b_set_isr(sd, true);
*handled = true;
return 0;
}
@@ -599,7 +592,7 @@ static int ad9389b_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edi
if (edid->blocks + edid->start_block >= state->edid.segments * 2)
edid->blocks = state->edid.segments * 2 - edid->start_block;
memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
- 128 * edid->blocks);
+ 128 * edid->blocks);
return 0;
}
@@ -612,8 +605,6 @@ static const struct v4l2_subdev_pad_ops ad9389b_pad_ops = {
/* Enable/disable ad9389b output */
static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct ad9389b_state *state = get_ad9389b_state(sd);
-
v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
ad9389b_wr_and_or(sd, 0xa1, ~0x3c, (enable ? 0 : 0x3c));
@@ -621,7 +612,6 @@ static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
ad9389b_check_monitor_present_status(sd);
} else {
ad9389b_s_power(sd, 0);
- state->have_monitor = false;
}
return 0;
}
@@ -686,14 +676,14 @@ static int ad9389b_g_dv_timings(struct v4l2_subdev *sd,
}
static int ad9389b_enum_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_enum_dv_timings *timings)
+ struct v4l2_enum_dv_timings *timings)
{
return v4l2_enum_dv_timings_cap(timings, &ad9389b_timings_cap,
NULL, NULL);
}
static int ad9389b_dv_timings_cap(struct v4l2_subdev *sd,
- struct v4l2_dv_timings_cap *cap)
+ struct v4l2_dv_timings_cap *cap)
{
*cap = ad9389b_timings_cap;
return 0;
@@ -724,15 +714,15 @@ static int ad9389b_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
u32 N;
switch (freq) {
- case 32000: N = 4096; break;
- case 44100: N = 6272; break;
- case 48000: N = 6144; break;
- case 88200: N = 12544; break;
- case 96000: N = 12288; break;
+ case 32000: N = 4096; break;
+ case 44100: N = 6272; break;
+ case 48000: N = 6144; break;
+ case 88200: N = 12544; break;
+ case 96000: N = 12288; break;
case 176400: N = 25088; break;
case 192000: N = 24576; break;
default:
- return -EINVAL;
+ return -EINVAL;
}
/* Set N (used with CTS to regenerate the audio clock) */
@@ -748,15 +738,15 @@ static int ad9389b_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
u32 i2s_sf;
switch (freq) {
- case 32000: i2s_sf = 0x30; break;
- case 44100: i2s_sf = 0x00; break;
- case 48000: i2s_sf = 0x20; break;
- case 88200: i2s_sf = 0x80; break;
- case 96000: i2s_sf = 0xa0; break;
+ case 32000: i2s_sf = 0x30; break;
+ case 44100: i2s_sf = 0x00; break;
+ case 48000: i2s_sf = 0x20; break;
+ case 88200: i2s_sf = 0x80; break;
+ case 96000: i2s_sf = 0xa0; break;
case 176400: i2s_sf = 0xc0; break;
case 192000: i2s_sf = 0xe0; break;
default:
- return -EINVAL;
+ return -EINVAL;
}
/* Set sampling frequency for I2S audio to 48 kHz */
@@ -800,7 +790,7 @@ static const struct v4l2_subdev_ops ad9389b_ops = {
/* ----------------------------------------------------------------------- */
static void ad9389b_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd,
- int segment, u8 *buf)
+ int segment, u8 *buf)
{
int i, j;
@@ -826,8 +816,8 @@ static void ad9389b_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd,
static void ad9389b_edid_handler(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ad9389b_state *state = container_of(dwork,
- struct ad9389b_state, edid_handler);
+ struct ad9389b_state *state =
+ container_of(dwork, struct ad9389b_state, edid_handler);
struct v4l2_subdev *sd = &state->sd;
struct ad9389b_edid_detect ed;
@@ -845,11 +835,10 @@ static void ad9389b_edid_handler(struct work_struct *work)
if (state->edid.read_retries) {
state->edid.read_retries--;
v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
- state->have_monitor = false;
ad9389b_s_power(sd, false);
ad9389b_s_power(sd, true);
queue_delayed_work(state->work_queue,
- &state->edid_handler, EDID_DELAY);
+ &state->edid_handler, EDID_DELAY);
return;
}
}
@@ -915,49 +904,35 @@ static void ad9389b_notify_monitor_detect(struct v4l2_subdev *sd)
v4l2_subdev_notify(sd, AD9389B_MONITOR_DETECT, (void *)&mdt);
}
-static void ad9389b_check_monitor_present_status(struct v4l2_subdev *sd)
+static void ad9389b_update_monitor_present_status(struct v4l2_subdev *sd)
{
struct ad9389b_state *state = get_ad9389b_state(sd);
/* read hotplug and rx-sense state */
u8 status = ad9389b_rd(sd, 0x42);
v4l2_dbg(1, debug, sd, "%s: status: 0x%x%s%s\n",
- __func__,
- status,
- status & MASK_AD9389B_HPD_DETECT ? ", hotplug" : "",
- status & MASK_AD9389B_MSEN_DETECT ? ", rx-sense" : "");
+ __func__,
+ status,
+ status & MASK_AD9389B_HPD_DETECT ? ", hotplug" : "",
+ status & MASK_AD9389B_MSEN_DETECT ? ", rx-sense" : "");
- if ((status & MASK_AD9389B_HPD_DETECT) &&
- ((status & MASK_AD9389B_MSEN_DETECT) || state->edid.segments)) {
- v4l2_dbg(1, debug, sd,
- "%s: hotplug and (rx-sense or edid)\n", __func__);
- if (!state->have_monitor) {
- v4l2_dbg(1, debug, sd, "%s: monitor detected\n", __func__);
- state->have_monitor = true;
- ad9389b_set_isr(sd, true);
- if (!ad9389b_s_power(sd, true)) {
- v4l2_dbg(1, debug, sd,
- "%s: monitor detected, powerup failed\n", __func__);
- return;
- }
- ad9389b_setup(sd);
- ad9389b_notify_monitor_detect(sd);
- state->edid.read_retries = EDID_MAX_RETRIES;
- queue_delayed_work(state->work_queue,
- &state->edid_handler, EDID_DELAY);
- }
- } else if (status & MASK_AD9389B_HPD_DETECT) {
+ if (status & MASK_AD9389B_HPD_DETECT) {
v4l2_dbg(1, debug, sd, "%s: hotplug detected\n", __func__);
+ state->have_monitor = true;
+ if (!ad9389b_s_power(sd, true)) {
+ v4l2_dbg(1, debug, sd,
+ "%s: monitor detected, powerup failed\n", __func__);
+ return;
+ }
+ ad9389b_setup(sd);
+ ad9389b_notify_monitor_detect(sd);
state->edid.read_retries = EDID_MAX_RETRIES;
queue_delayed_work(state->work_queue,
- &state->edid_handler, EDID_DELAY);
+ &state->edid_handler, EDID_DELAY);
} else if (!(status & MASK_AD9389B_HPD_DETECT)) {
v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
- if (state->have_monitor) {
- v4l2_dbg(1, debug, sd, "%s: monitor not detected\n", __func__);
- state->have_monitor = false;
- ad9389b_notify_monitor_detect(sd);
- }
+ state->have_monitor = false;
+ ad9389b_notify_monitor_detect(sd);
ad9389b_s_power(sd, false);
memset(&state->edid, 0, sizeof(struct ad9389b_state_edid));
}
@@ -966,6 +941,35 @@ static void ad9389b_check_monitor_present_status(struct v4l2_subdev *sd)
v4l2_ctrl_s_ctrl(state->hotplug_ctrl, ad9389b_have_hotplug(sd) ? 0x1 : 0x0);
v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, ad9389b_have_rx_sense(sd) ? 0x1 : 0x0);
v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+
+ /* update with setting from ctrls */
+ ad9389b_s_ctrl(state->rgb_quantization_range_ctrl);
+ ad9389b_s_ctrl(state->hdmi_mode_ctrl);
+}
+
+static void ad9389b_check_monitor_present_status(struct v4l2_subdev *sd)
+{
+ struct ad9389b_state *state = get_ad9389b_state(sd);
+ int retry = 0;
+
+ ad9389b_update_monitor_present_status(sd);
+
+ /*
+ * Rapid toggling of the hotplug may leave the chip powered off,
+ * even if we think it is on. In that case reset and power up again.
+ */
+ while (state->power_on && (ad9389b_rd(sd, 0x41) & 0x40)) {
+ if (++retry > 5) {
+ v4l2_err(sd, "retried %d times, give up\n", retry);
+ return;
+ }
+ v4l2_dbg(1, debug, sd, "%s: reset and re-check status (%d)\n", __func__, retry);
+ ad9389b_notify_monitor_detect(sd);
+ cancel_delayed_work_sync(&state->edid_handler);
+ memset(&state->edid, 0, sizeof(struct ad9389b_state_edid));
+ ad9389b_s_power(sd, false);
+ ad9389b_update_monitor_present_status(sd);
+ }
}
static bool edid_block_verify_crc(u8 *edid_block)
@@ -978,7 +982,7 @@ static bool edid_block_verify_crc(u8 *edid_block)
return sum == 0;
}
-static bool edid_segment_verify_crc(struct v4l2_subdev *sd, u32 segment)
+static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
{
struct ad9389b_state *state = get_ad9389b_state(sd);
u32 blocks = state->edid.blocks;
@@ -992,6 +996,25 @@ static bool edid_segment_verify_crc(struct v4l2_subdev *sd, u32 segment)
return false;
}
+static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
+{
+ static const u8 hdmi_header[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+ };
+ struct ad9389b_state *state = get_ad9389b_state(sd);
+ u8 *data = state->edid.data;
+ int i;
+
+ if (segment)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_header); i++)
+ if (data[i] != hdmi_header[i])
+ return false;
+
+ return true;
+}
+
static bool ad9389b_check_edid_status(struct v4l2_subdev *sd)
{
struct ad9389b_state *state = get_ad9389b_state(sd);
@@ -1000,7 +1023,7 @@ static bool ad9389b_check_edid_status(struct v4l2_subdev *sd)
u8 edidRdy = ad9389b_rd(sd, 0xc5);
v4l2_dbg(1, debug, sd, "%s: edid ready (retries: %d)\n",
- __func__, EDID_MAX_RETRIES - state->edid.read_retries);
+ __func__, EDID_MAX_RETRIES - state->edid.read_retries);
if (!(edidRdy & MASK_AD9389B_EDID_RDY))
return false;
@@ -1013,16 +1036,16 @@ static bool ad9389b_check_edid_status(struct v4l2_subdev *sd)
v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment);
ad9389b_edid_rd(sd, 256, &state->edid.data[segment * 256]);
ad9389b_dbg_dump_edid(2, debug, sd, segment,
- &state->edid.data[segment * 256]);
+ &state->edid.data[segment * 256]);
if (segment == 0) {
state->edid.blocks = state->edid.data[0x7e] + 1;
v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n",
- __func__, state->edid.blocks);
+ __func__, state->edid.blocks);
}
- if (!edid_segment_verify_crc(sd, segment)) {
+ if (!edid_verify_crc(sd, segment) ||
+ !edid_verify_header(sd, segment)) {
/* edid crc error, force reread of edid segment */
- v4l2_err(sd, "%s: edid crc error\n", __func__);
- state->have_monitor = false;
+ v4l2_err(sd, "%s: edid crc or header error\n", __func__);
ad9389b_s_power(sd, false);
ad9389b_s_power(sd, true);
return false;
@@ -1032,12 +1055,12 @@ static bool ad9389b_check_edid_status(struct v4l2_subdev *sd)
if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
/* Request next EDID segment */
v4l2_dbg(1, debug, sd, "%s: request segment %d\n",
- __func__, state->edid.segments);
+ __func__, state->edid.segments);
ad9389b_wr(sd, 0xc9, 0xf);
ad9389b_wr(sd, 0xc4, state->edid.segments);
state->edid.read_retries = EDID_MAX_RETRIES;
queue_delayed_work(state->work_queue,
- &state->edid_handler, EDID_DELAY);
+ &state->edid_handler, EDID_DELAY);
return false;
}
@@ -1081,7 +1104,7 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
return -EIO;
v4l_dbg(1, debug, client, "detecting ad9389b client on address 0x%x\n",
- client->addr << 1);
+ client->addr << 1);
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
if (!state)
@@ -1140,7 +1163,7 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
goto err_entity;
}
v4l2_dbg(1, debug, sd, "reg 0x41 0x%x, chip version (reg 0x00) 0x%x\n",
- ad9389b_rd(sd, 0x41), state->chip_revision);
+ ad9389b_rd(sd, 0x41), state->chip_revision);
state->edid_i2c_client = i2c_new_dummy(client->adapter, (0x7e>>1));
if (state->edid_i2c_client == NULL) {
@@ -1163,7 +1186,7 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
ad9389b_set_isr(sd, true);
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
- client->addr << 1, client->adapter->name);
+ client->addr << 1, client->adapter->name);
return 0;
err_unreg:
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index d4e15a617c3b..9d38f7b36cd1 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -26,12 +26,12 @@
#include <linux/videodev2.h>
#include <linux/uaccess.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <media/adv7343.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-of.h>
#include "adv7343_regs.h"
@@ -410,7 +410,7 @@ adv7343_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- np = v4l2_of_get_next_endpoint(client->dev.of_node, NULL);
+ np = of_graph_get_next_endpoint(client->dev.of_node, NULL);
if (!np)
return NULL;
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 7c8d971f1f61..ee618942cb8e 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -452,6 +452,29 @@ static int adv7511_log_status(struct v4l2_subdev *sd)
errors[adv7511_rd(sd, 0xc8) >> 4], state->edid_detect_counter,
adv7511_rd(sd, 0x94), adv7511_rd(sd, 0x96));
v4l2_info(sd, "RGB quantization: %s range\n", adv7511_rd(sd, 0x18) & 0x80 ? "limited" : "full");
+ if (adv7511_rd(sd, 0xaf) & 0x02) {
+ /* HDMI only */
+ u8 manual_cts = adv7511_rd(sd, 0x0a) & 0x80;
+ u32 N = (adv7511_rd(sd, 0x01) & 0xf) << 16 |
+ adv7511_rd(sd, 0x02) << 8 |
+ adv7511_rd(sd, 0x03);
+ u8 vic_detect = adv7511_rd(sd, 0x3e) >> 2;
+ u8 vic_sent = adv7511_rd(sd, 0x3d) & 0x3f;
+ u32 CTS;
+
+ if (manual_cts)
+ CTS = (adv7511_rd(sd, 0x07) & 0xf) << 16 |
+ adv7511_rd(sd, 0x08) << 8 |
+ adv7511_rd(sd, 0x09);
+ else
+ CTS = (adv7511_rd(sd, 0x04) & 0xf) << 16 |
+ adv7511_rd(sd, 0x05) << 8 |
+ adv7511_rd(sd, 0x06);
+ v4l2_info(sd, "CTS %s mode: N %d, CTS %d\n",
+ manual_cts ? "manual" : "automatic", N, CTS);
+ v4l2_info(sd, "VIC: detected %d, sent %d\n",
+ vic_detect, vic_sent);
+ }
if (state->dv_timings.type == V4L2_DV_BT_656_1120)
v4l2_print_dv_timings(sd->name, "timings: ",
&state->dv_timings, false);
@@ -942,26 +965,38 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
static bool edid_block_verify_crc(uint8_t *edid_block)
{
- int i;
uint8_t sum = 0;
+ int i;
for (i = 0; i < 128; i++)
- sum += *(edid_block + i);
- return (sum == 0);
+ sum += edid_block[i];
+ return sum == 0;
}
-static bool edid_segment_verify_crc(struct v4l2_subdev *sd, u32 segment)
+static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
{
struct adv7511_state *state = get_adv7511_state(sd);
u32 blocks = state->edid.blocks;
uint8_t *data = state->edid.data;
- if (edid_block_verify_crc(&data[segment * 256])) {
- if ((segment + 1) * 2 <= blocks)
- return edid_block_verify_crc(&data[segment * 256 + 128]);
+ if (!edid_block_verify_crc(&data[segment * 256]))
+ return false;
+ if ((segment + 1) * 2 <= blocks)
+ return edid_block_verify_crc(&data[segment * 256 + 128]);
+ return true;
+}
+
+static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
+{
+ static const u8 hdmi_header[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+ };
+ struct adv7511_state *state = get_adv7511_state(sd);
+ u8 *data = state->edid.data;
+
+ if (segment != 0)
return true;
- }
- return false;
+ return !memcmp(data, hdmi_header, sizeof(hdmi_header));
}
static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
@@ -990,9 +1025,10 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
state->edid.blocks = state->edid.data[0x7e] + 1;
v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n", __func__, state->edid.blocks);
}
- if (!edid_segment_verify_crc(sd, segment)) {
+ if (!edid_verify_crc(sd, segment) ||
+ !edid_verify_header(sd, segment)) {
/* edid crc error, force reread of edid segment */
- v4l2_dbg(1, debug, sd, "%s: edid crc error\n", __func__);
+ v4l2_err(sd, "%s: edid crc or header error\n", __func__);
state->have_monitor = false;
adv7511_s_power(sd, false);
adv7511_s_power(sd, true);
@@ -1038,6 +1074,12 @@ static void adv7511_init_setup(struct v4l2_subdev *sd)
/* clear all interrupts */
adv7511_wr(sd, 0x96, 0xff);
+ /*
+ * Stop HPD from resetting a lot of registers.
+ * It might leave the chip in a partly un-initialized state,
+ * in particular with regards to hotplug bounces.
+ */
+ adv7511_wr_and_or(sd, 0xd6, 0x3f, 0xc0);
memset(edid, 0, sizeof(struct adv7511_state_edid));
state->have_monitor = false;
adv7511_set_isr(sd, false);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index a324106b9f11..71c8570bd9ea 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -53,8 +53,6 @@ MODULE_LICENSE("GPL");
/* ADV7604 system clock frequency */
#define ADV7604_fsc (28636360)
-#define DIGITAL_INPUT (state->mode == ADV7604_MODE_HDMI)
-
/*
**********************************************************************
*
@@ -67,17 +65,19 @@ struct adv7604_state {
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler hdl;
- enum adv7604_mode mode;
+ enum adv7604_input_port selected_input;
struct v4l2_dv_timings timings;
- u8 edid[256];
- unsigned edid_blocks;
+ struct {
+ u8 edid[256];
+ u32 present;
+ unsigned blocks;
+ } edid;
+ u16 spa_port_a[2];
struct v4l2_fract aspect_ratio;
u32 rgb_quantization_range;
struct workqueue_struct *work_queues;
struct delayed_work delayed_work_enable_hotplug;
- bool connector_hdmi;
bool restart_stdi_once;
- u32 prev_input_status;
/* i2c clients */
struct i2c_client *i2c_avlink;
@@ -160,6 +160,7 @@ static const struct v4l2_dv_timings adv7604_timings[] = {
V4L2_DV_BT_DMT_1792X1344P60,
V4L2_DV_BT_DMT_1856X1392P60,
V4L2_DV_BT_DMT_1920X1200P60_RB,
+ V4L2_DV_BT_DMT_1366X768P60_RB,
V4L2_DV_BT_DMT_1366X768P60,
V4L2_DV_BT_DMT_1920X1080P60,
{ },
@@ -507,57 +508,31 @@ static inline int edid_read_block(struct v4l2_subdev *sd, unsigned len, u8 *val)
return 0;
}
-static void adv7604_delayed_work_enable_hotplug(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct adv7604_state *state = container_of(dwork, struct adv7604_state,
- delayed_work_enable_hotplug);
- struct v4l2_subdev *sd = &state->sd;
-
- v4l2_dbg(2, debug, sd, "%s: enable hotplug\n", __func__);
-
- v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)1);
-}
-
static inline int edid_write_block(struct v4l2_subdev *sd,
unsigned len, const u8 *val)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct adv7604_state *state = to_state(sd);
int err = 0;
int i;
v4l2_dbg(2, debug, sd, "%s: write EDID block (%d byte)\n", __func__, len);
- v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)0);
-
- /* Disables I2C access to internal EDID ram from DDC port */
- rep_write_and_or(sd, 0x77, 0xf0, 0x0);
-
for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
err = adv_smbus_write_i2c_block_data(state->i2c_edid, i,
I2C_SMBUS_BLOCK_MAX, val + i);
- if (err)
- return err;
+ return err;
+}
- /* adv7604 calculates the checksums and enables I2C access to internal
- EDID ram from DDC port. */
- rep_write_and_or(sd, 0x77, 0xf0, 0x1);
+static void adv7604_delayed_work_enable_hotplug(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct adv7604_state *state = container_of(dwork, struct adv7604_state,
+ delayed_work_enable_hotplug);
+ struct v4l2_subdev *sd = &state->sd;
- for (i = 0; i < 1000; i++) {
- if (rep_read(sd, 0x7d) & 1)
- break;
- mdelay(1);
- }
- if (i == 1000) {
- v4l_err(client, "error enabling edid\n");
- return -EIO;
- }
+ v4l2_dbg(2, debug, sd, "%s: enable hotplug\n", __func__);
- /* enable hotplug after 100 ms */
- queue_delayed_work(state->work_queues,
- &state->delayed_work_enable_hotplug, HZ / 10);
- return 0;
+ v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)&state->edid.present);
}
static inline int hdmi_read(struct v4l2_subdev *sd, u8 reg)
@@ -574,6 +549,11 @@ static inline int hdmi_write(struct v4l2_subdev *sd, u8 reg, u8 val)
return adv_smbus_write_byte_data(state->i2c_hdmi, reg, val);
}
+static inline int hdmi_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return hdmi_write(sd, reg, (hdmi_read(sd, reg) & mask) | val);
+}
+
static inline int test_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv7604_state *state = to_state(sd);
@@ -623,6 +603,26 @@ static inline int vdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
/* ----------------------------------------------------------------------- */
+static inline bool is_analog_input(struct v4l2_subdev *sd)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ return state->selected_input == ADV7604_INPUT_VGA_RGB ||
+ state->selected_input == ADV7604_INPUT_VGA_COMP;
+}
+
+static inline bool is_digital_input(struct v4l2_subdev *sd)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ return state->selected_input == ADV7604_INPUT_HDMI_PORT_A ||
+ state->selected_input == ADV7604_INPUT_HDMI_PORT_B ||
+ state->selected_input == ADV7604_INPUT_HDMI_PORT_C ||
+ state->selected_input == ADV7604_INPUT_HDMI_PORT_D;
+}
+
+/* ----------------------------------------------------------------------- */
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
static void adv7604_inv_register(struct v4l2_subdev *sd)
{
@@ -696,45 +696,47 @@ static int adv7604_g_register(struct v4l2_subdev *sd,
static int adv7604_s_register(struct v4l2_subdev *sd,
const struct v4l2_dbg_register *reg)
{
+ u8 val = reg->val & 0xff;
+
switch (reg->reg >> 8) {
case 0:
- io_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ io_write(sd, reg->reg & 0xff, val);
break;
case 1:
- avlink_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ avlink_write(sd, reg->reg & 0xff, val);
break;
case 2:
- cec_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ cec_write(sd, reg->reg & 0xff, val);
break;
case 3:
- infoframe_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ infoframe_write(sd, reg->reg & 0xff, val);
break;
case 4:
- esdp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ esdp_write(sd, reg->reg & 0xff, val);
break;
case 5:
- dpp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ dpp_write(sd, reg->reg & 0xff, val);
break;
case 6:
- afe_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ afe_write(sd, reg->reg & 0xff, val);
break;
case 7:
- rep_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ rep_write(sd, reg->reg & 0xff, val);
break;
case 8:
- edid_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ edid_write(sd, reg->reg & 0xff, val);
break;
case 9:
- hdmi_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ hdmi_write(sd, reg->reg & 0xff, val);
break;
case 0xa:
- test_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ test_write(sd, reg->reg & 0xff, val);
break;
case 0xb:
- cp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ cp_write(sd, reg->reg & 0xff, val);
break;
case 0xc:
- vdp_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ vdp_write(sd, reg->reg & 0xff, val);
break;
default:
v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
@@ -748,10 +750,13 @@ static int adv7604_s_register(struct v4l2_subdev *sd,
static int adv7604_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
{
struct adv7604_state *state = to_state(sd);
+ u8 reg_io_6f = io_read(sd, 0x6f);
- /* port A only */
return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl,
- ((io_read(sd, 0x6f) & 0x10) >> 4));
+ ((reg_io_6f & 0x10) >> 4) |
+ ((reg_io_6f & 0x08) >> 2) |
+ (reg_io_6f & 0x04) |
+ ((reg_io_6f & 0x02) << 2));
}
static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
@@ -759,12 +764,11 @@ static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
const struct adv7604_video_standards *predef_vid_timings,
const struct v4l2_dv_timings *timings)
{
- struct adv7604_state *state = to_state(sd);
int i;
for (i = 0; predef_vid_timings[i].timings.bt.width; i++) {
if (!v4l2_match_dv_timings(timings, &predef_vid_timings[i].timings,
- DIGITAL_INPUT ? 250000 : 1000000))
+ is_digital_input(sd) ? 250000 : 1000000))
continue;
io_write(sd, 0x00, predef_vid_timings[i].vid_std); /* video std */
io_write(sd, 0x01, (predef_vid_timings[i].v_freq << 4) +
@@ -799,27 +803,22 @@ static int configure_predefined_video_timings(struct v4l2_subdev *sd,
cp_write(sd, 0xab, 0x00);
cp_write(sd, 0xac, 0x00);
- switch (state->mode) {
- case ADV7604_MODE_COMP:
- case ADV7604_MODE_GR:
+ if (is_analog_input(sd)) {
err = find_and_set_predefined_video_timings(sd,
0x01, adv7604_prim_mode_comp, timings);
if (err)
err = find_and_set_predefined_video_timings(sd,
0x02, adv7604_prim_mode_gr, timings);
- break;
- case ADV7604_MODE_HDMI:
+ } else if (is_digital_input(sd)) {
err = find_and_set_predefined_video_timings(sd,
0x05, adv7604_prim_mode_hdmi_comp, timings);
if (err)
err = find_and_set_predefined_video_timings(sd,
0x06, adv7604_prim_mode_hdmi_gr, timings);
- break;
- default:
- v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
- __func__, state->mode);
+ } else {
+ v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+ __func__, state->selected_input);
err = -1;
- break;
}
@@ -846,9 +845,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
v4l2_dbg(2, debug, sd, "%s\n", __func__);
- switch (state->mode) {
- case ADV7604_MODE_COMP:
- case ADV7604_MODE_GR:
+ if (is_analog_input(sd)) {
/* auto graphics */
io_write(sd, 0x00, 0x07); /* video std */
io_write(sd, 0x01, 0x02); /* prim mode */
@@ -858,33 +855,28 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
/* Should only be set in auto-graphics mode [REF_02, p. 91-92] */
/* setup PLL_DIV_MAN_EN and PLL_DIV_RATIO */
/* IO-map reg. 0x16 and 0x17 should be written in sequence */
- if (adv_smbus_write_i2c_block_data(client, 0x16, 2, pll)) {
+ if (adv_smbus_write_i2c_block_data(client, 0x16, 2, pll))
v4l2_err(sd, "writing to reg 0x16 and 0x17 failed\n");
- break;
- }
/* active video - horizontal timing */
cp_write(sd, 0xa2, (cp_start_sav >> 4) & 0xff);
cp_write(sd, 0xa3, ((cp_start_sav & 0x0f) << 4) |
- ((cp_start_eav >> 8) & 0x0f));
+ ((cp_start_eav >> 8) & 0x0f));
cp_write(sd, 0xa4, cp_start_eav & 0xff);
/* active video - vertical timing */
cp_write(sd, 0xa5, (cp_start_vbi >> 4) & 0xff);
cp_write(sd, 0xa6, ((cp_start_vbi & 0xf) << 4) |
- ((cp_end_vbi >> 8) & 0xf));
+ ((cp_end_vbi >> 8) & 0xf));
cp_write(sd, 0xa7, cp_end_vbi & 0xff);
- break;
- case ADV7604_MODE_HDMI:
+ } else if (is_digital_input(sd)) {
/* set default prim_mode/vid_std for HDMI
according to [REF_03, c. 4.2] */
io_write(sd, 0x00, 0x02); /* video std */
io_write(sd, 0x01, 0x06); /* prim mode */
- break;
- default:
- v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
- __func__, state->mode);
- break;
+ } else {
+ v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+ __func__, state->selected_input);
}
cp_write(sd, 0x8f, (ch1_fr_ll >> 8) & 0x7);
@@ -893,43 +885,149 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
cp_write(sd, 0xac, (height & 0x0f) << 4);
}
+static void adv7604_set_offset(struct v4l2_subdev *sd, bool auto_offset, u16 offset_a, u16 offset_b, u16 offset_c)
+{
+ struct adv7604_state *state = to_state(sd);
+ u8 offset_buf[4];
+
+ if (auto_offset) {
+ offset_a = 0x3ff;
+ offset_b = 0x3ff;
+ offset_c = 0x3ff;
+ }
+
+ v4l2_dbg(2, debug, sd, "%s: %s offset: a = 0x%x, b = 0x%x, c = 0x%x\n",
+ __func__, auto_offset ? "Auto" : "Manual",
+ offset_a, offset_b, offset_c);
+
+ offset_buf[0] = (cp_read(sd, 0x77) & 0xc0) | ((offset_a & 0x3f0) >> 4);
+ offset_buf[1] = ((offset_a & 0x00f) << 4) | ((offset_b & 0x3c0) >> 6);
+ offset_buf[2] = ((offset_b & 0x03f) << 2) | ((offset_c & 0x300) >> 8);
+ offset_buf[3] = offset_c & 0x0ff;
+
+ /* Registers must be written in this order with no i2c access in between */
+ if (adv_smbus_write_i2c_block_data(state->i2c_cp, 0x77, 4, offset_buf))
+ v4l2_err(sd, "%s: i2c error writing to CP reg 0x77, 0x78, 0x79, 0x7a\n", __func__);
+}
+
+static void adv7604_set_gain(struct v4l2_subdev *sd, bool auto_gain, u16 gain_a, u16 gain_b, u16 gain_c)
+{
+ struct adv7604_state *state = to_state(sd);
+ u8 gain_buf[4];
+ u8 gain_man = 1;
+ u8 agc_mode_man = 1;
+
+ if (auto_gain) {
+ gain_man = 0;
+ agc_mode_man = 0;
+ gain_a = 0x100;
+ gain_b = 0x100;
+ gain_c = 0x100;
+ }
+
+ v4l2_dbg(2, debug, sd, "%s: %s gain: a = 0x%x, b = 0x%x, c = 0x%x\n",
+ __func__, auto_gain ? "Auto" : "Manual",
+ gain_a, gain_b, gain_c);
+
+ gain_buf[0] = ((gain_man << 7) | (agc_mode_man << 6) | ((gain_a & 0x3f0) >> 4));
+ gain_buf[1] = (((gain_a & 0x00f) << 4) | ((gain_b & 0x3c0) >> 6));
+ gain_buf[2] = (((gain_b & 0x03f) << 2) | ((gain_c & 0x300) >> 8));
+ gain_buf[3] = ((gain_c & 0x0ff));
+
+ /* Registers must be written in this order with no i2c access in between */
+ if (adv_smbus_write_i2c_block_data(state->i2c_cp, 0x73, 4, gain_buf))
+ v4l2_err(sd, "%s: i2c error writing to CP reg 0x73, 0x74, 0x75, 0x76\n", __func__);
+}
+
static void set_rgb_quantization_range(struct v4l2_subdev *sd)
{
struct adv7604_state *state = to_state(sd);
+ bool rgb_output = io_read(sd, 0x02) & 0x02;
+ bool hdmi_signal = hdmi_read(sd, 0x05) & 0x80;
+
+ v4l2_dbg(2, debug, sd, "%s: RGB quantization range: %d, RGB out: %d, HDMI: %d\n",
+ __func__, state->rgb_quantization_range,
+ rgb_output, hdmi_signal);
+
+ adv7604_set_gain(sd, true, 0x0, 0x0, 0x0);
+ adv7604_set_offset(sd, true, 0x0, 0x0, 0x0);
switch (state->rgb_quantization_range) {
case V4L2_DV_RGB_RANGE_AUTO:
- /* automatic */
- if (DIGITAL_INPUT && !(hdmi_read(sd, 0x05) & 0x80)) {
- /* receiving DVI-D signal */
+ if (state->selected_input == ADV7604_INPUT_VGA_RGB) {
+ /* Receiving analog RGB signal
+ * Set RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ break;
+ }
+
+ if (state->selected_input == ADV7604_INPUT_VGA_COMP) {
+ /* Receiving analog YPbPr signal
+ * Set automode */
+ io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+ break;
+ }
+
+ if (hdmi_signal) {
+ /* Receiving HDMI signal
+ * Set automode */
+ io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+ break;
+ }
- /* ADV7604 selects RGB limited range regardless of
- input format (CE/IT) in automatic mode */
- if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
- /* RGB limited range (16-235) */
- io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ /* Receiving DVI-D signal
+ * ADV7604 selects RGB limited range regardless of
+ * input format (CE/IT) in automatic mode */
+ if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ /* RGB limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ } else {
+ /* RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ if (is_digital_input(sd) && rgb_output) {
+ adv7604_set_offset(sd, false, 0x40, 0x40, 0x40);
} else {
- /* RGB full range (0-255) */
- io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ adv7604_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
+ adv7604_set_offset(sd, false, 0x70, 0x70, 0x70);
}
- } else {
- /* receiving HDMI or analog signal, set automode */
- io_write_and_or(sd, 0x02, 0x0f, 0xf0);
}
break;
case V4L2_DV_RGB_RANGE_LIMITED:
+ if (state->selected_input == ADV7604_INPUT_VGA_COMP) {
+ /* YCrCb limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x20);
+ break;
+ }
+
/* RGB limited range (16-235) */
io_write_and_or(sd, 0x02, 0x0f, 0x00);
+
break;
case V4L2_DV_RGB_RANGE_FULL:
+ if (state->selected_input == ADV7604_INPUT_VGA_COMP) {
+ /* YCrCb full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x60);
+ break;
+ }
+
/* RGB full range (0-255) */
io_write_and_or(sd, 0x02, 0x0f, 0x10);
+
+ if (is_analog_input(sd) || hdmi_signal)
+ break;
+
+ /* Adjust gain/offset for DVI-D signals only */
+ if (rgb_output) {
+ adv7604_set_offset(sd, false, 0x40, 0x40, 0x40);
+ } else {
+ adv7604_set_gain(sd, false, 0xe0, 0xe0, 0xe0);
+ adv7604_set_offset(sd, false, 0x70, 0x70, 0x70);
+ }
break;
}
}
-
static int adv7604_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
@@ -983,8 +1081,9 @@ static inline bool no_power(struct v4l2_subdev *sd)
static inline bool no_signal_tmds(struct v4l2_subdev *sd)
{
- /* TODO port B, C and D */
- return !(io_read(sd, 0x6a) & 0x10);
+ struct adv7604_state *state = to_state(sd);
+
+ return !(io_read(sd, 0x6a) & (0x10 >> state->selected_input));
}
static inline bool no_lock_tmds(struct v4l2_subdev *sd)
@@ -1011,7 +1110,6 @@ static inline bool no_lock_stdi(struct v4l2_subdev *sd)
static inline bool no_signal(struct v4l2_subdev *sd)
{
- struct adv7604_state *state = to_state(sd);
bool ret;
ret = no_power(sd);
@@ -1019,7 +1117,7 @@ static inline bool no_signal(struct v4l2_subdev *sd)
ret |= no_lock_stdi(sd);
ret |= no_lock_sspd(sd);
- if (DIGITAL_INPUT) {
+ if (is_digital_input(sd)) {
ret |= no_lock_tmds(sd);
ret |= no_signal_tmds(sd);
}
@@ -1036,13 +1134,11 @@ static inline bool no_lock_cp(struct v4l2_subdev *sd)
static int adv7604_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
- struct adv7604_state *state = to_state(sd);
-
*status = 0;
*status |= no_power(sd) ? V4L2_IN_ST_NO_POWER : 0;
*status |= no_signal(sd) ? V4L2_IN_ST_NO_SIGNAL : 0;
if (no_lock_cp(sd))
- *status |= DIGITAL_INPUT ? V4L2_IN_ST_NO_SYNC : V4L2_IN_ST_NO_H_LOCK;
+ *status |= is_digital_input(sd) ? V4L2_IN_ST_NO_SYNC : V4L2_IN_ST_NO_H_LOCK;
v4l2_dbg(1, debug, sd, "%s: status = 0x%x\n", __func__, *status);
@@ -1157,13 +1253,11 @@ static int adv7604_enum_dv_timings(struct v4l2_subdev *sd,
static int adv7604_dv_timings_cap(struct v4l2_subdev *sd,
struct v4l2_dv_timings_cap *cap)
{
- struct adv7604_state *state = to_state(sd);
-
cap->type = V4L2_DV_BT_656_1120;
cap->bt.max_width = 1920;
cap->bt.max_height = 1200;
cap->bt.min_pixelclock = 25000000;
- if (DIGITAL_INPUT)
+ if (is_digital_input(sd))
cap->bt.max_pixelclock = 225000000;
else
cap->bt.max_pixelclock = 170000000;
@@ -1179,12 +1273,11 @@ static int adv7604_dv_timings_cap(struct v4l2_subdev *sd,
static void adv7604_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
- struct adv7604_state *state = to_state(sd);
int i;
for (i = 0; adv7604_timings[i].bt.width; i++) {
if (v4l2_match_dv_timings(timings, &adv7604_timings[i],
- DIGITAL_INPUT ? 250000 : 1000000)) {
+ is_digital_input(sd) ? 250000 : 1000000)) {
*timings = adv7604_timings[i];
break;
}
@@ -1204,6 +1297,7 @@ static int adv7604_query_dv_timings(struct v4l2_subdev *sd,
memset(timings, 0, sizeof(struct v4l2_dv_timings));
if (no_signal(sd)) {
+ state->restart_stdi_once = true;
v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
return -ENOLINK;
}
@@ -1216,7 +1310,7 @@ static int adv7604_query_dv_timings(struct v4l2_subdev *sd,
bt->interlaced = stdi.interlaced ?
V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
- if (DIGITAL_INPUT) {
+ if (is_digital_input(sd)) {
uint32_t freq;
timings->type = V4L2_DV_BT_656_1120;
@@ -1305,8 +1399,8 @@ found:
return -ENOLINK;
}
- if ((!DIGITAL_INPUT && bt->pixelclock > 170000000) ||
- (DIGITAL_INPUT && bt->pixelclock > 225000000)) {
+ if ((is_analog_input(sd) && bt->pixelclock > 170000000) ||
+ (is_digital_input(sd) && bt->pixelclock > 225000000)) {
v4l2_dbg(1, debug, sd, "%s: pixelclock out of range %d\n",
__func__, (u32)bt->pixelclock);
return -ERANGE;
@@ -1329,10 +1423,15 @@ static int adv7604_s_dv_timings(struct v4l2_subdev *sd,
if (!timings)
return -EINVAL;
+ if (v4l2_match_dv_timings(&state->timings, timings, 0)) {
+ v4l2_dbg(1, debug, sd, "%s: no change\n", __func__);
+ return 0;
+ }
+
bt = &timings->bt;
- if ((!DIGITAL_INPUT && bt->pixelclock > 170000000) ||
- (DIGITAL_INPUT && bt->pixelclock > 225000000)) {
+ if ((is_analog_input(sd) && bt->pixelclock > 170000000) ||
+ (is_digital_input(sd) && bt->pixelclock > 225000000)) {
v4l2_dbg(1, debug, sd, "%s: pixelclock out of range %d\n",
__func__, (u32)bt->pixelclock);
return -ERANGE;
@@ -1354,7 +1453,6 @@ static int adv7604_s_dv_timings(struct v4l2_subdev *sd,
set_rgb_quantization_range(sd);
-
if (debug > 1)
v4l2_print_dv_timings(sd->name, "adv7604_s_dv_timings: ",
timings, true);
@@ -1374,30 +1472,24 @@ static void enable_input(struct v4l2_subdev *sd)
{
struct adv7604_state *state = to_state(sd);
- switch (state->mode) {
- case ADV7604_MODE_COMP:
- case ADV7604_MODE_GR:
- /* enable */
+ if (is_analog_input(sd)) {
io_write(sd, 0x15, 0xb0); /* Disable Tristate of Pins (no audio) */
- break;
- case ADV7604_MODE_HDMI:
- /* enable */
- hdmi_write(sd, 0x1a, 0x0a); /* Unmute audio */
+ } else if (is_digital_input(sd)) {
+ hdmi_write_and_or(sd, 0x00, 0xfc, state->selected_input);
hdmi_write(sd, 0x01, 0x00); /* Enable HDMI clock terminators */
io_write(sd, 0x15, 0xa0); /* Disable Tristate of Pins */
- break;
- default:
- v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
- __func__, state->mode);
- break;
+ hdmi_write_and_or(sd, 0x1a, 0xef, 0x00); /* Unmute audio */
+ } else {
+ v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+ __func__, state->selected_input);
}
}
static void disable_input(struct v4l2_subdev *sd)
{
- /* disable */
+ hdmi_write_and_or(sd, 0x1a, 0xef, 0x10); /* Mute audio */
+ msleep(16); /* 512 samples with >= 32 kHz sample rate [REF_03, c. 7.16.10] */
io_write(sd, 0x15, 0xbe); /* Tristate all outputs from video core */
- hdmi_write(sd, 0x1a, 0x1a); /* Mute audio */
hdmi_write(sd, 0x01, 0x78); /* Disable HDMI clock terminators */
}
@@ -1405,9 +1497,7 @@ static void select_input(struct v4l2_subdev *sd)
{
struct adv7604_state *state = to_state(sd);
- switch (state->mode) {
- case ADV7604_MODE_COMP:
- case ADV7604_MODE_GR:
+ if (is_analog_input(sd)) {
/* reset ADI recommended settings for HDMI: */
/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
hdmi_write(sd, 0x0d, 0x04); /* HDMI filter optimization */
@@ -1433,9 +1523,9 @@ static void select_input(struct v4l2_subdev *sd)
cp_write(sd, 0x3e, 0x04); /* CP core pre-gain control */
cp_write(sd, 0xc3, 0x39); /* CP coast control. Graphics mode */
cp_write(sd, 0x40, 0x5c); /* CP core pre-gain control. Graphics mode */
- break;
+ } else if (is_digital_input(sd)) {
+ hdmi_write(sd, 0x00, state->selected_input & 0x03);
- case ADV7604_MODE_HDMI:
/* set ADI recommended settings for HDMI: */
/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
hdmi_write(sd, 0x0d, 0x84); /* HDMI filter optimization */
@@ -1461,12 +1551,9 @@ static void select_input(struct v4l2_subdev *sd)
cp_write(sd, 0x3e, 0x00); /* CP core pre-gain control */
cp_write(sd, 0xc3, 0x39); /* CP coast control. Graphics mode */
cp_write(sd, 0x40, 0x80); /* CP core pre-gain control. Graphics mode */
-
- break;
- default:
- v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
- __func__, state->mode);
- break;
+ } else {
+ v4l2_dbg(2, debug, sd, "%s: Unknown port %d selected\n",
+ __func__, state->selected_input);
}
}
@@ -1475,9 +1562,13 @@ static int adv7604_s_routing(struct v4l2_subdev *sd,
{
struct adv7604_state *state = to_state(sd);
- v4l2_dbg(2, debug, sd, "%s: input %d", __func__, input);
+ v4l2_dbg(2, debug, sd, "%s: input %d, selected input %d",
+ __func__, input, state->selected_input);
+
+ if (input == state->selected_input)
+ return 0;
- state->mode = input;
+ state->selected_input = input;
disable_input(sd);
@@ -1516,36 +1607,47 @@ static int adv7604_g_mbus_fmt(struct v4l2_subdev *sd,
static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
{
- struct adv7604_state *state = to_state(sd);
- u8 fmt_change, fmt_change_digital, tx_5v;
- u32 input_status;
+ const u8 irq_reg_0x43 = io_read(sd, 0x43);
+ const u8 irq_reg_0x6b = io_read(sd, 0x6b);
+ const u8 irq_reg_0x70 = io_read(sd, 0x70);
+ u8 fmt_change_digital;
+ u8 fmt_change;
+ u8 tx_5v;
+
+ if (irq_reg_0x43)
+ io_write(sd, 0x44, irq_reg_0x43);
+ if (irq_reg_0x70)
+ io_write(sd, 0x71, irq_reg_0x70);
+ if (irq_reg_0x6b)
+ io_write(sd, 0x6c, irq_reg_0x6b);
+
+ v4l2_dbg(2, debug, sd, "%s: ", __func__);
/* format change */
- fmt_change = io_read(sd, 0x43) & 0x98;
- if (fmt_change)
- io_write(sd, 0x44, fmt_change);
- fmt_change_digital = DIGITAL_INPUT ? (io_read(sd, 0x6b) & 0xc0) : 0;
- if (fmt_change_digital)
- io_write(sd, 0x6c, fmt_change_digital);
+ fmt_change = irq_reg_0x43 & 0x98;
+ fmt_change_digital = is_digital_input(sd) ? (irq_reg_0x6b & 0xc0) : 0;
+
if (fmt_change || fmt_change_digital) {
v4l2_dbg(1, debug, sd,
"%s: fmt_change = 0x%x, fmt_change_digital = 0x%x\n",
__func__, fmt_change, fmt_change_digital);
- adv7604_g_input_status(sd, &input_status);
- if (input_status != state->prev_input_status) {
- v4l2_dbg(1, debug, sd,
- "%s: input_status = 0x%x, prev_input_status = 0x%x\n",
- __func__, input_status, state->prev_input_status);
- state->prev_input_status = input_status;
- v4l2_subdev_notify(sd, ADV7604_FMT_CHANGE, NULL);
- }
+ v4l2_subdev_notify(sd, ADV7604_FMT_CHANGE, NULL);
if (handled)
*handled = true;
}
+ /* HDMI/DVI mode */
+ if (irq_reg_0x6b & 0x01) {
+ v4l2_dbg(1, debug, sd, "%s: irq %s mode\n", __func__,
+ (io_read(sd, 0x6a) & 0x01) ? "HDMI" : "DVI");
+ set_rgb_quantization_range(sd);
+ if (handled)
+ *handled = true;
+ }
+
/* tx 5v detect */
- tx_5v = io_read(sd, 0x70) & 0x10;
+ tx_5v = io_read(sd, 0x70) & 0x1e;
if (tx_5v) {
v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
io_write(sd, 0x71, tx_5v);
@@ -1559,55 +1661,178 @@ static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
static int adv7604_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
{
struct adv7604_state *state = to_state(sd);
+ u8 *data = NULL;
- if (edid->pad != 0)
+ if (edid->pad > ADV7604_EDID_PORT_D)
return -EINVAL;
if (edid->blocks == 0)
return -EINVAL;
- if (edid->start_block >= state->edid_blocks)
+ if (edid->blocks > 2)
+ return -EINVAL;
+ if (edid->start_block > 1)
return -EINVAL;
- if (edid->start_block + edid->blocks > state->edid_blocks)
- edid->blocks = state->edid_blocks - edid->start_block;
+ if (edid->start_block == 1)
+ edid->blocks = 1;
if (!edid->edid)
return -EINVAL;
- memcpy(edid->edid + edid->start_block * 128,
- state->edid + edid->start_block * 128,
+
+ if (edid->blocks > state->edid.blocks)
+ edid->blocks = state->edid.blocks;
+
+ switch (edid->pad) {
+ case ADV7604_EDID_PORT_A:
+ case ADV7604_EDID_PORT_B:
+ case ADV7604_EDID_PORT_C:
+ case ADV7604_EDID_PORT_D:
+ if (state->edid.present & (1 << edid->pad))
+ data = state->edid.edid;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ if (!data)
+ return -ENODATA;
+
+ memcpy(edid->edid,
+ data + edid->start_block * 128,
edid->blocks * 128);
return 0;
}
+static int get_edid_spa_location(const u8 *edid)
+{
+ u8 d;
+
+ if ((edid[0x7e] != 1) ||
+ (edid[0x80] != 0x02) ||
+ (edid[0x81] != 0x03)) {
+ return -1;
+ }
+
+ /* search Vendor Specific Data Block (tag 3) */
+ d = edid[0x82] & 0x7f;
+ if (d > 4) {
+ int i = 0x84;
+ int end = 0x80 + d;
+
+ do {
+ u8 tag = edid[i] >> 5;
+ u8 len = edid[i] & 0x1f;
+
+ if ((tag == 3) && (len >= 5))
+ return i + 4;
+ i += len + 1;
+ } while (i < end);
+ }
+ return -1;
+}
+
static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
{
struct adv7604_state *state = to_state(sd);
+ int spa_loc;
+ int tmp = 0;
int err;
+ int i;
- if (edid->pad != 0)
+ if (edid->pad > ADV7604_EDID_PORT_D)
return -EINVAL;
if (edid->start_block != 0)
return -EINVAL;
if (edid->blocks == 0) {
- /* Pull down the hotplug pin */
- v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)0);
- /* Disables I2C access to internal EDID ram from DDC port */
- rep_write_and_or(sd, 0x77, 0xf0, 0x0);
- state->edid_blocks = 0;
+ /* Disable hotplug and I2C access to EDID RAM from DDC port */
+ state->edid.present &= ~(1 << edid->pad);
+ v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)&state->edid.present);
+ rep_write_and_or(sd, 0x77, 0xf0, state->edid.present);
+
/* Fall back to a 16:9 aspect ratio */
state->aspect_ratio.numerator = 16;
state->aspect_ratio.denominator = 9;
+
+ if (!state->edid.present)
+ state->edid.blocks = 0;
+
+ v4l2_dbg(2, debug, sd, "%s: clear EDID pad %d, edid.present = 0x%x\n",
+ __func__, edid->pad, state->edid.present);
return 0;
}
- if (edid->blocks > 2)
+ if (edid->blocks > 2) {
+ edid->blocks = 2;
return -E2BIG;
+ }
if (!edid->edid)
return -EINVAL;
- memcpy(state->edid, edid->edid, 128 * edid->blocks);
- state->edid_blocks = edid->blocks;
+
+ v4l2_dbg(2, debug, sd, "%s: write EDID pad %d, edid.present = 0x%x\n",
+ __func__, edid->pad, state->edid.present);
+
+ /* Disable hotplug and I2C access to EDID RAM from DDC port */
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+ v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)&tmp);
+ rep_write_and_or(sd, 0x77, 0xf0, 0x00);
+
+ spa_loc = get_edid_spa_location(edid->edid);
+ if (spa_loc < 0)
+ spa_loc = 0xc0; /* Default value [REF_02, p. 116] */
+
+ switch (edid->pad) {
+ case ADV7604_EDID_PORT_A:
+ state->spa_port_a[0] = edid->edid[spa_loc];
+ state->spa_port_a[1] = edid->edid[spa_loc + 1];
+ break;
+ case ADV7604_EDID_PORT_B:
+ rep_write(sd, 0x70, edid->edid[spa_loc]);
+ rep_write(sd, 0x71, edid->edid[spa_loc + 1]);
+ break;
+ case ADV7604_EDID_PORT_C:
+ rep_write(sd, 0x72, edid->edid[spa_loc]);
+ rep_write(sd, 0x73, edid->edid[spa_loc + 1]);
+ break;
+ case ADV7604_EDID_PORT_D:
+ rep_write(sd, 0x74, edid->edid[spa_loc]);
+ rep_write(sd, 0x75, edid->edid[spa_loc + 1]);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rep_write(sd, 0x76, spa_loc & 0xff);
+ rep_write_and_or(sd, 0x77, 0xbf, (spa_loc >> 2) & 0x40);
+
+ edid->edid[spa_loc] = state->spa_port_a[0];
+ edid->edid[spa_loc + 1] = state->spa_port_a[1];
+
+ memcpy(state->edid.edid, edid->edid, 128 * edid->blocks);
+ state->edid.blocks = edid->blocks;
state->aspect_ratio = v4l2_calc_aspect_ratio(edid->edid[0x15],
edid->edid[0x16]);
- err = edid_write_block(sd, 128 * edid->blocks, state->edid);
- if (err < 0)
- v4l2_err(sd, "error %d writing edid\n", err);
- return err;
+ state->edid.present |= 1 << edid->pad;
+
+ err = edid_write_block(sd, 128 * edid->blocks, state->edid.edid);
+ if (err < 0) {
+ v4l2_err(sd, "error %d writing edid pad %d\n", err, edid->pad);
+ return err;
+ }
+
+ /* adv7604 calculates the checksums and enables I2C access to internal
+ EDID RAM from DDC port. */
+ rep_write_and_or(sd, 0x77, 0xf0, state->edid.present);
+
+ for (i = 0; i < 1000; i++) {
+ if (rep_read(sd, 0x7d) & state->edid.present)
+ break;
+ mdelay(1);
+ }
+ if (i == 1000) {
+ v4l2_err(sd, "error enabling edid (0x%x)\n", state->edid.present);
+ return -EIO;
+ }
+
+
+ /* enable hotplug after 100 ms */
+ queue_delayed_work(state->work_queues,
+ &state->delayed_work_enable_hotplug, HZ / 10);
+ return 0;
}
/*********** avi info frame CEA-861-E **************/
@@ -1670,7 +1895,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
char *input_color_space_txt[16] = {
"RGB limited range (16-235)", "RGB full range (0-255)",
"YCbCr Bt.601 (16-235)", "YCbCr Bt.709 (16-235)",
- "XvYCC Bt.601", "XvYCC Bt.709",
+ "xvYCC Bt.601", "xvYCC Bt.709",
"YCbCr Bt.601 (0-255)", "YCbCr Bt.709 (0-255)",
"invalid", "invalid", "invalid", "invalid", "invalid",
"invalid", "invalid", "automatic"
@@ -1689,16 +1914,20 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "-----Chip status-----\n");
v4l2_info(sd, "Chip power: %s\n", no_power(sd) ? "off" : "on");
- v4l2_info(sd, "Connector type: %s\n", state->connector_hdmi ?
- "HDMI" : (DIGITAL_INPUT ? "DVI-D" : "DVI-A"));
- v4l2_info(sd, "EDID: %s\n", ((rep_read(sd, 0x7d) & 0x01) &&
- (rep_read(sd, 0x77) & 0x01)) ? "enabled" : "disabled ");
+ v4l2_info(sd, "EDID enabled port A: %s, B: %s, C: %s, D: %s\n",
+ ((rep_read(sd, 0x7d) & 0x01) ? "Yes" : "No"),
+ ((rep_read(sd, 0x7d) & 0x02) ? "Yes" : "No"),
+ ((rep_read(sd, 0x7d) & 0x04) ? "Yes" : "No"),
+ ((rep_read(sd, 0x7d) & 0x08) ? "Yes" : "No"));
v4l2_info(sd, "CEC: %s\n", !!(cec_read(sd, 0x2a) & 0x01) ?
"enabled" : "disabled");
v4l2_info(sd, "-----Signal status-----\n");
- v4l2_info(sd, "Cable detected (+5V power): %s\n",
- (io_read(sd, 0x6f) & 0x10) ? "true" : "false");
+ v4l2_info(sd, "Cable detected (+5V power) port A: %s, B: %s, C: %s, D: %s\n",
+ ((io_read(sd, 0x6f) & 0x10) ? "Yes" : "No"),
+ ((io_read(sd, 0x6f) & 0x08) ? "Yes" : "No"),
+ ((io_read(sd, 0x6f) & 0x04) ? "Yes" : "No"),
+ ((io_read(sd, 0x6f) & 0x02) ? "Yes" : "No"));
v4l2_info(sd, "TMDS signal detected: %s\n",
no_signal_tmds(sd) ? "false" : "true");
v4l2_info(sd, "TMDS signal locked: %s\n",
@@ -1744,11 +1973,14 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "Color space conversion: %s\n",
csc_coeff_sel_rb[cp_read(sd, 0xfc) >> 4]);
- if (!DIGITAL_INPUT)
+ if (!is_digital_input(sd))
return 0;
v4l2_info(sd, "-----%s status-----\n", is_hdmi(sd) ? "HDMI" : "DVI-D");
- v4l2_info(sd, "HDCP encrypted content: %s\n", (hdmi_read(sd, 0x05) & 0x40) ? "true" : "false");
+ v4l2_info(sd, "Digital video port selected: %c\n",
+ (hdmi_read(sd, 0x00) & 0x03) + 'A');
+ v4l2_info(sd, "HDCP encrypted content: %s\n",
+ (hdmi_read(sd, 0x05) & 0x40) ? "true" : "false");
v4l2_info(sd, "HDCP keys read: %s%s\n",
(hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no",
(hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : "");
@@ -1894,10 +2126,16 @@ static int adv7604_core_init(struct v4l2_subdev *sd)
pdata->replicate_av_codes << 1 |
pdata->invert_cbcr << 0);
- /* TODO from platform data */
cp_write(sd, 0x69, 0x30); /* Enable CP CSC */
- io_write(sd, 0x06, 0xa6); /* positive VS and HS */
- io_write(sd, 0x14, 0x7f); /* Drive strength adjusted to max */
+
+ /* VS, HS polarities */
+ io_write(sd, 0x06, 0xa0 | pdata->inv_vs_pol << 2 | pdata->inv_hs_pol << 1);
+
+ /* Adjust drive strength */
+ io_write(sd, 0x14, 0x40 | pdata->dr_str_data << 4 |
+ pdata->dr_str_clk << 2 |
+ pdata->dr_str_sync);
+
cp_write(sd, 0xba, (pdata->hdmi_free_run_mode << 1) | 0x01); /* HDMI free run */
cp_write(sd, 0xf3, 0xdc); /* Low threshold to enter/exit free run mode */
cp_write(sd, 0xf9, 0x23); /* STDI ch. 1 - LCVS change threshold -
@@ -1907,6 +2145,11 @@ static int adv7604_core_init(struct v4l2_subdev *sd)
cp_write(sd, 0xc9, 0x2d); /* use prim_mode and vid_std as free run resolution
for digital formats */
+ /* HDMI audio */
+ hdmi_write_and_or(sd, 0x15, 0xfc, 0x03); /* Mute on FIFO over-/underflow [REF_01, c. 1.2.18] */
+ hdmi_write_and_or(sd, 0x1a, 0xf1, 0x08); /* Wait 1 s before unmute */
+ hdmi_write_and_or(sd, 0x68, 0xf9, 0x06); /* FIFO reset on over-/underflow [REF_01, c. 1.2.19] */
+
/* TODO from platform data */
afe_write(sd, 0xb5, 0x01); /* Setting MCLK to 256Fs */
@@ -1917,8 +2160,8 @@ static int adv7604_core_init(struct v4l2_subdev *sd)
io_write(sd, 0x40, 0xc2); /* Configure INT1 */
io_write(sd, 0x41, 0xd7); /* STDI irq for any change, disable INT2 */
io_write(sd, 0x46, 0x98); /* Enable SSPD, STDI and CP unlocked interrupts */
- io_write(sd, 0x6e, 0xc0); /* Enable V_LOCKED and DE_REGEN_LCK interrupts */
- io_write(sd, 0x73, 0x10); /* Enable CABLE_DET_A_ST (+5v) interrupt */
+ io_write(sd, 0x6e, 0xc1); /* Enable V_LOCKED, DE_REGEN_LCK, HDMI_MODE interrupts */
+ io_write(sd, 0x73, 0x1e); /* Enable CABLE_DET_A_ST (+5v) interrupts */
return v4l2_ctrl_handler_setup(sd->ctrl_handler);
}
@@ -1964,6 +2207,8 @@ static struct i2c_client *adv7604_dummy_client(struct v4l2_subdev *sd,
static int adv7604_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ static const struct v4l2_dv_timings cea640x480 =
+ V4L2_DV_BT_CEA_640X480P59_94;
struct adv7604_state *state;
struct adv7604_platform_data *pdata = client->dev.platform_data;
struct v4l2_ctrl_handler *hdl;
@@ -1984,19 +2229,19 @@ static int adv7604_probe(struct i2c_client *client,
/* initialize variables */
state->restart_stdi_once = true;
- state->prev_input_status = ~0;
+ state->selected_input = ~0;
/* platform data */
if (!pdata) {
v4l_err(client, "No platform data!\n");
return -ENODEV;
}
- memcpy(&state->pdata, pdata, sizeof(state->pdata));
+ state->pdata = *pdata;
+ state->timings = cea640x480;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7604_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- state->connector_hdmi = pdata->connector_hdmi;
/* i2c access to adv7604? */
if (adv_smbus_read_byte_data_check(client, 0xfb, false) != 0x68) {
@@ -2020,7 +2265,7 @@ static int adv7604_probe(struct i2c_client *client,
/* private controls */
state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
- V4L2_CID_DV_RX_POWER_PRESENT, 0, 1, 0, 0);
+ V4L2_CID_DV_RX_POWER_PRESENT, 0, 0x0f, 0, 0);
state->rgb_quantization_range_ctrl =
v4l2_ctrl_new_std_menu(hdl, &adv7604_ctrl_ops,
V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index b154f36740b4..9bbd6656fb8f 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -20,10 +20,13 @@
/*
* References (c = chapter, p = page):
- * REF_01 - Analog devices, ADV7842, Register Settings Recommendations,
- * Revision 2.5, June 2010
- * REF_02 - Analog devices, Register map documentation, Documentation of
- * the register maps, Software manual, Rev. F, June 2010
+ * REF_01 - Analog devices, ADV7842,
+ * Register Settings Recommendations, Rev. 1.9, April 2011
+ * REF_02 - Analog devices, Software User Guide, UG-206,
+ * ADV7842 I2C Register Maps, Rev. 0, November 2010
+ * REF_03 - Analog devices, Hardware User Guide, UG-214,
+ * ADV7842 Fast Switching 2:1 HDMI 1.4 Receiver with 3D-Comb
+ * Decoder and Digitizer , Rev. 0, January 2011
*/
@@ -61,6 +64,7 @@ MODULE_LICENSE("GPL");
*/
struct adv7842_state {
+ struct adv7842_platform_data pdata;
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler hdl;
@@ -81,7 +85,7 @@ struct adv7842_state {
bool is_cea_format;
struct workqueue_struct *work_queues;
struct delayed_work delayed_work_enable_hotplug;
- bool connector_hdmi;
+ bool restart_stdi_once;
bool hdmi_port_a;
/* i2c clients */
@@ -491,6 +495,11 @@ static inline int hdmi_write(struct v4l2_subdev *sd, u8 reg, u8 val)
return adv_smbus_write_byte_data(state->i2c_hdmi, reg, val);
}
+static inline int hdmi_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return hdmi_write(sd, reg, (hdmi_read(sd, reg) & mask) | val);
+}
+
static inline int cp_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv7842_state *state = to_state(sd);
@@ -532,7 +541,7 @@ static void main_reset(struct v4l2_subdev *sd)
adv_smbus_write_byte_no_check(client, 0xff, 0x80);
- mdelay(2);
+ mdelay(5);
}
/* ----------------------------------------------------------------------- */
@@ -587,10 +596,10 @@ static void adv7842_delayed_work_enable_hotplug(struct work_struct *work)
v4l2_dbg(2, debug, sd, "%s: enable hotplug on ports: 0x%x\n",
__func__, present);
- if (present & 0x1)
- mask |= 0x20; /* port A */
- if (present & 0x2)
- mask |= 0x10; /* port B */
+ if (present & (0x04 << ADV7842_EDID_PORT_A))
+ mask |= 0x20;
+ if (present & (0x04 << ADV7842_EDID_PORT_B))
+ mask |= 0x10;
io_write_and_or(sd, 0x20, 0xcf, mask);
}
@@ -679,14 +688,12 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct adv7842_state *state = to_state(sd);
const u8 *val = state->hdmi_edid.edid;
- u8 cur_mask = rep_read(sd, 0x77) & 0x0c;
- u8 mask = port == 0 ? 0x4 : 0x8;
int spa_loc = edid_spa_location(val);
int err = 0;
int i;
- v4l2_dbg(2, debug, sd, "%s: write EDID on port %d (spa at 0x%x)\n",
- __func__, port, spa_loc);
+ v4l2_dbg(2, debug, sd, "%s: write EDID on port %c (spa at 0x%x)\n",
+ __func__, (port == ADV7842_EDID_PORT_A) ? 'A' : 'B', spa_loc);
/* HPA disable on port A and B */
io_write_and_or(sd, 0x20, 0xcf, 0x00);
@@ -694,6 +701,9 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
/* Disable I2C access to internal EDID ram from HDMI DDC ports */
rep_write_and_or(sd, 0x77, 0xf3, 0x00);
+ if (!state->hdmi_edid.present)
+ return 0;
+
/* edid segment pointer '0' for HDMI ports */
rep_write_and_or(sd, 0x77, 0xef, 0x00);
@@ -703,44 +713,32 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
if (err)
return err;
- if (spa_loc > 0) {
- if (port == 0) {
- /* port A SPA */
- rep_write(sd, 0x72, val[spa_loc]);
- rep_write(sd, 0x73, val[spa_loc + 1]);
- } else {
- /* port B SPA */
- rep_write(sd, 0x74, val[spa_loc]);
- rep_write(sd, 0x75, val[spa_loc + 1]);
- }
- rep_write(sd, 0x76, spa_loc);
+ if (spa_loc < 0)
+ spa_loc = 0xc0; /* Default value [REF_02, p. 199] */
+
+ if (port == ADV7842_EDID_PORT_A) {
+ rep_write(sd, 0x72, val[spa_loc]);
+ rep_write(sd, 0x73, val[spa_loc + 1]);
} else {
- /* default register values for SPA */
- if (port == 0) {
- /* port A SPA */
- rep_write(sd, 0x72, 0);
- rep_write(sd, 0x73, 0);
- } else {
- /* port B SPA */
- rep_write(sd, 0x74, 0);
- rep_write(sd, 0x75, 0);
- }
- rep_write(sd, 0x76, 0xc0);
+ rep_write(sd, 0x74, val[spa_loc]);
+ rep_write(sd, 0x75, val[spa_loc + 1]);
}
- rep_write_and_or(sd, 0x77, 0xbf, 0x00);
+ rep_write(sd, 0x76, spa_loc & 0xff);
+ rep_write_and_or(sd, 0x77, 0xbf, (spa_loc >> 2) & 0x40);
/* Calculates the checksums and enables I2C access to internal
* EDID ram from HDMI DDC ports
*/
- rep_write_and_or(sd, 0x77, 0xf3, mask | cur_mask);
+ rep_write_and_or(sd, 0x77, 0xf3, state->hdmi_edid.present);
for (i = 0; i < 1000; i++) {
- if (rep_read(sd, 0x7d) & mask)
+ if (rep_read(sd, 0x7d) & state->hdmi_edid.present)
break;
mdelay(1);
}
if (i == 1000) {
- v4l_err(client, "error enabling edid on port %d\n", port);
+ v4l_err(client, "error enabling edid on port %c\n",
+ (port == ADV7842_EDID_PORT_A) ? 'A' : 'B');
return -EIO;
}
@@ -927,7 +925,7 @@ static int configure_predefined_video_timings(struct v4l2_subdev *sd,
cp_write(sd, 0x27, 0x00);
cp_write(sd, 0x28, 0x00);
cp_write(sd, 0x29, 0x00);
- cp_write(sd, 0x8f, 0x00);
+ cp_write(sd, 0x8f, 0x40);
cp_write(sd, 0x90, 0x00);
cp_write(sd, 0xa5, 0x00);
cp_write(sd, 0xa6, 0x00);
@@ -1033,34 +1031,60 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
{
struct adv7842_state *state = to_state(sd);
+ v4l2_dbg(2, debug, sd, "%s: rgb_quantization_range = %d\n",
+ __func__, state->rgb_quantization_range);
+
switch (state->rgb_quantization_range) {
case V4L2_DV_RGB_RANGE_AUTO:
- /* automatic */
- if (is_digital_input(sd) && !(hdmi_read(sd, 0x05) & 0x80)) {
- /* receiving DVI-D signal */
-
- /* ADV7842 selects RGB limited range regardless of
- input format (CE/IT) in automatic mode */
- if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
- /* RGB limited range (16-235) */
- io_write_and_or(sd, 0x02, 0x0f, 0x00);
-
- } else {
- /* RGB full range (0-255) */
- io_write_and_or(sd, 0x02, 0x0f, 0x10);
- }
- } else {
- /* receiving HDMI or analog signal, set automode */
+ if (state->mode == ADV7842_MODE_RGB) {
+ /* Receiving analog RGB signal
+ * Set RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ break;
+ }
+
+ if (state->mode == ADV7842_MODE_COMP) {
+ /* Receiving analog YPbPr signal
+ * Set automode */
io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+ break;
+ }
+
+ if (hdmi_read(sd, 0x05) & 0x80) {
+ /* Receiving HDMI signal
+ * Set automode */
+ io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+ break;
+ }
+
+ /* Receiving DVI-D signal
+ * ADV7842 selects RGB limited range regardless of
+ * input format (CE/IT) in automatic mode */
+ if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ /* RGB limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ } else {
+ /* RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
}
break;
case V4L2_DV_RGB_RANGE_LIMITED:
- /* RGB limited range (16-235) */
- io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ if (state->mode == ADV7842_MODE_COMP) {
+ /* YCrCb limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x20);
+ } else {
+ /* RGB limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ }
break;
case V4L2_DV_RGB_RANGE_FULL:
- /* RGB full range (0-255) */
- io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ if (state->mode == ADV7842_MODE_COMP) {
+ /* YCrCb full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x60);
+ } else {
+ /* RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ }
break;
}
}
@@ -1298,7 +1322,7 @@ static int adv7842_dv_timings_cap(struct v4l2_subdev *sd,
}
/* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
- if the format is listed in adv7604_timings[] */
+ if the format is listed in adv7842_timings[] */
static void adv7842_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
@@ -1314,119 +1338,106 @@ static int adv7842_query_dv_timings(struct v4l2_subdev *sd,
struct v4l2_bt_timings *bt = &timings->bt;
struct stdi_readback stdi = { 0 };
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
/* SDP block */
if (state->mode == ADV7842_MODE_SDP)
return -ENODATA;
/* read STDI */
if (read_stdi(sd, &stdi)) {
+ state->restart_stdi_once = true;
v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
return -ENOLINK;
}
bt->interlaced = stdi.interlaced ?
V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
- bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ? V4L2_DV_VSYNC_POS_POL : 0) |
- ((hdmi_read(sd, 0x05) & 0x20) ? V4L2_DV_HSYNC_POS_POL : 0);
- bt->vsync = stdi.lcvs;
if (is_digital_input(sd)) {
- bool lock = hdmi_read(sd, 0x04) & 0x02;
- bool interlaced = hdmi_read(sd, 0x0b) & 0x20;
- unsigned w = (hdmi_read(sd, 0x07) & 0x1f) * 256 + hdmi_read(sd, 0x08);
- unsigned h = (hdmi_read(sd, 0x09) & 0x1f) * 256 + hdmi_read(sd, 0x0a);
- unsigned w_total = (hdmi_read(sd, 0x1e) & 0x3f) * 256 +
- hdmi_read(sd, 0x1f);
- unsigned h_total = ((hdmi_read(sd, 0x26) & 0x3f) * 256 +
- hdmi_read(sd, 0x27)) / 2;
- unsigned freq = (((hdmi_read(sd, 0x51) << 1) +
- (hdmi_read(sd, 0x52) >> 7)) * 1000000) +
- ((hdmi_read(sd, 0x52) & 0x7f) * 1000000) / 128;
- int i;
+ uint32_t freq;
- if (is_hdmi(sd)) {
- /* adjust for deep color mode */
- freq = freq * 8 / (((hdmi_read(sd, 0x0b) & 0xc0)>>6) * 2 + 8);
- }
-
- /* No lock? */
- if (!lock) {
- v4l2_dbg(1, debug, sd, "%s: no lock on TMDS signal\n", __func__);
- return -ENOLCK;
- }
- /* Interlaced? */
- if (interlaced) {
- v4l2_dbg(1, debug, sd, "%s: interlaced video not supported\n", __func__);
- return -ERANGE;
- }
-
- for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
- const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
-
- if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
- adv7842_get_dv_timings_cap(sd),
- adv7842_check_dv_timings, NULL))
- continue;
- if (w_total != htotal(bt) || h_total != vtotal(bt))
- continue;
+ timings->type = V4L2_DV_BT_656_1120;
- if (w != bt->width || h != bt->height)
- continue;
+ bt->width = (hdmi_read(sd, 0x07) & 0x0f) * 256 + hdmi_read(sd, 0x08);
+ bt->height = (hdmi_read(sd, 0x09) & 0x0f) * 256 + hdmi_read(sd, 0x0a);
+ freq = (hdmi_read(sd, 0x06) * 1000000) +
+ ((hdmi_read(sd, 0x3b) & 0x30) >> 4) * 250000;
- if (abs(freq - bt->pixelclock) > 1000000)
- continue;
- *timings = v4l2_dv_timings_presets[i];
- return 0;
+ if (is_hdmi(sd)) {
+ /* adjust for deep color mode */
+ freq = freq * 8 / (((hdmi_read(sd, 0x0b) & 0xc0) >> 5) + 8);
}
-
- timings->type = V4L2_DV_BT_656_1120;
-
- bt->width = w;
- bt->height = h;
- bt->interlaced = (hdmi_read(sd, 0x0b) & 0x20) ?
- V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
- bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ?
- V4L2_DV_VSYNC_POS_POL : 0) | ((hdmi_read(sd, 0x05) & 0x20) ?
- V4L2_DV_HSYNC_POS_POL : 0);
- bt->pixelclock = (((hdmi_read(sd, 0x51) << 1) +
- (hdmi_read(sd, 0x52) >> 7)) * 1000000) +
- ((hdmi_read(sd, 0x52) & 0x7f) * 1000000) / 128;
- bt->hfrontporch = (hdmi_read(sd, 0x20) & 0x1f) * 256 +
+ bt->pixelclock = freq;
+ bt->hfrontporch = (hdmi_read(sd, 0x20) & 0x03) * 256 +
hdmi_read(sd, 0x21);
- bt->hsync = (hdmi_read(sd, 0x22) & 0x1f) * 256 +
+ bt->hsync = (hdmi_read(sd, 0x22) & 0x03) * 256 +
hdmi_read(sd, 0x23);
- bt->hbackporch = (hdmi_read(sd, 0x24) & 0x1f) * 256 +
+ bt->hbackporch = (hdmi_read(sd, 0x24) & 0x03) * 256 +
hdmi_read(sd, 0x25);
- bt->vfrontporch = ((hdmi_read(sd, 0x2a) & 0x3f) * 256 +
- hdmi_read(sd, 0x2b)) / 2;
- bt->il_vfrontporch = ((hdmi_read(sd, 0x2c) & 0x3f) * 256 +
- hdmi_read(sd, 0x2d)) / 2;
- bt->vsync = ((hdmi_read(sd, 0x2e) & 0x3f) * 256 +
- hdmi_read(sd, 0x2f)) / 2;
- bt->il_vsync = ((hdmi_read(sd, 0x30) & 0x3f) * 256 +
- hdmi_read(sd, 0x31)) / 2;
- bt->vbackporch = ((hdmi_read(sd, 0x32) & 0x3f) * 256 +
- hdmi_read(sd, 0x33)) / 2;
- bt->il_vbackporch = ((hdmi_read(sd, 0x34) & 0x3f) * 256 +
- hdmi_read(sd, 0x35)) / 2;
-
- bt->standards = 0;
- bt->flags = 0;
- } else {
- /* Interlaced? */
- if (stdi.interlaced) {
- v4l2_dbg(1, debug, sd, "%s: interlaced video not supported\n", __func__);
- return -ERANGE;
+ bt->vfrontporch = ((hdmi_read(sd, 0x2a) & 0x1f) * 256 +
+ hdmi_read(sd, 0x2b)) / 2;
+ bt->vsync = ((hdmi_read(sd, 0x2e) & 0x1f) * 256 +
+ hdmi_read(sd, 0x2f)) / 2;
+ bt->vbackporch = ((hdmi_read(sd, 0x32) & 0x1f) * 256 +
+ hdmi_read(sd, 0x33)) / 2;
+ bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ? V4L2_DV_VSYNC_POS_POL : 0) |
+ ((hdmi_read(sd, 0x05) & 0x20) ? V4L2_DV_HSYNC_POS_POL : 0);
+ if (bt->interlaced == V4L2_DV_INTERLACED) {
+ bt->height += (hdmi_read(sd, 0x0b) & 0x0f) * 256 +
+ hdmi_read(sd, 0x0c);
+ bt->il_vfrontporch = ((hdmi_read(sd, 0x2c) & 0x1f) * 256 +
+ hdmi_read(sd, 0x2d)) / 2;
+ bt->il_vsync = ((hdmi_read(sd, 0x30) & 0x1f) * 256 +
+ hdmi_read(sd, 0x31)) / 2;
+ bt->vbackporch = ((hdmi_read(sd, 0x34) & 0x1f) * 256 +
+ hdmi_read(sd, 0x35)) / 2;
}
-
+ adv7842_fill_optional_dv_timings_fields(sd, timings);
+ } else {
+ /* find format
+ * Since LCVS values are inaccurate [REF_03, p. 339-340],
+ * stdi2dv_timings() is called with lcvs +-1 if the first attempt fails.
+ */
+ if (!stdi2dv_timings(sd, &stdi, timings))
+ goto found;
+ stdi.lcvs += 1;
+ v4l2_dbg(1, debug, sd, "%s: lcvs + 1 = %d\n", __func__, stdi.lcvs);
+ if (!stdi2dv_timings(sd, &stdi, timings))
+ goto found;
+ stdi.lcvs -= 2;
+ v4l2_dbg(1, debug, sd, "%s: lcvs - 1 = %d\n", __func__, stdi.lcvs);
if (stdi2dv_timings(sd, &stdi, timings)) {
+ /*
+ * The STDI block may measure wrong values, especially
+ * for lcvs and lcf. If the driver can not find any
+ * valid timing, the STDI block is restarted to measure
+ * the video timings again. The function will return an
+ * error, but the restart of STDI will generate a new
+ * STDI interrupt and the format detection process will
+ * restart.
+ */
+ if (state->restart_stdi_once) {
+ v4l2_dbg(1, debug, sd, "%s: restart STDI\n", __func__);
+ /* TODO restart STDI for Sync Channel 2 */
+ /* enter one-shot mode */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x00);
+ /* trigger STDI restart */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x04);
+ /* reset to continuous mode */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x02);
+ state->restart_stdi_once = false;
+ return -ENOLINK;
+ }
v4l2_dbg(1, debug, sd, "%s: format not supported\n", __func__);
return -ERANGE;
}
+ state->restart_stdi_once = true;
}
+found:
if (debug > 1)
- v4l2_print_dv_timings(sd->name, "adv7842_query_dv_timings: ",
- timings, true);
+ v4l2_print_dv_timings(sd->name, "adv7842_query_dv_timings:",
+ timings, true);
return 0;
}
@@ -1437,9 +1448,16 @@ static int adv7842_s_dv_timings(struct v4l2_subdev *sd,
struct v4l2_bt_timings *bt;
int err;
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
if (state->mode == ADV7842_MODE_SDP)
return -ENODATA;
+ if (v4l2_match_dv_timings(&state->timings, timings, 0)) {
+ v4l2_dbg(1, debug, sd, "%s: no change\n", __func__);
+ return 0;
+ }
+
bt = &timings->bt;
if (!v4l2_valid_dv_timings(timings, adv7842_get_dv_timings_cap(sd),
@@ -1450,7 +1468,7 @@ static int adv7842_s_dv_timings(struct v4l2_subdev *sd,
state->timings = *timings;
- cp_write(sd, 0x91, bt->interlaced ? 0x50 : 0x10);
+ cp_write(sd, 0x91, bt->interlaced ? 0x40 : 0x00);
/* Use prim_mode and vid_std when available */
err = configure_predefined_video_timings(sd, timings);
@@ -1483,18 +1501,18 @@ static int adv7842_g_dv_timings(struct v4l2_subdev *sd,
static void enable_input(struct v4l2_subdev *sd)
{
struct adv7842_state *state = to_state(sd);
+
+ set_rgb_quantization_range(sd);
switch (state->mode) {
case ADV7842_MODE_SDP:
case ADV7842_MODE_COMP:
case ADV7842_MODE_RGB:
- /* enable */
io_write(sd, 0x15, 0xb0); /* Disable Tristate of Pins (no audio) */
break;
case ADV7842_MODE_HDMI:
- /* enable */
- hdmi_write(sd, 0x1a, 0x0a); /* Unmute audio */
hdmi_write(sd, 0x01, 0x00); /* Enable HDMI clock terminators */
io_write(sd, 0x15, 0xa0); /* Disable Tristate of Pins */
+ hdmi_write_and_or(sd, 0x1a, 0xef, 0x00); /* Unmute audio */
break;
default:
v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
@@ -1505,9 +1523,9 @@ static void enable_input(struct v4l2_subdev *sd)
static void disable_input(struct v4l2_subdev *sd)
{
- /* disable */
+ hdmi_write_and_or(sd, 0x1a, 0xef, 0x10); /* Mute audio [REF_01, c. 2.2.2] */
+ msleep(16); /* 512 samples with >= 32 kHz sample rate [REF_03, c. 8.29] */
io_write(sd, 0x15, 0xbe); /* Tristate all outputs from video core */
- hdmi_write(sd, 0x1a, 0x1a); /* Mute audio */
hdmi_write(sd, 0x01, 0x78); /* Disable HDMI clock terminators */
}
@@ -1575,9 +1593,6 @@ static void select_input(struct v4l2_subdev *sd,
afe_write(sd, 0x00, 0x00); /* power up ADC */
afe_write(sd, 0xc8, 0x00); /* phase control */
- io_write(sd, 0x19, 0x83); /* LLC DLL phase */
- io_write(sd, 0x33, 0x40); /* LLC DLL enable */
-
io_write(sd, 0xdd, 0x90); /* Manual 2x output clock */
/* script says register 0xde, which don't exist in manual */
@@ -1611,8 +1626,6 @@ static void select_input(struct v4l2_subdev *sd,
/* deinterlacer enabled and 3D comb */
sdp_write_and_or(sd, 0x12, 0xf6, 0x09);
- sdp_write(sd, 0xdd, 0x08); /* free run auto */
-
break;
case ADV7842_MODE_COMP:
@@ -1627,6 +1640,13 @@ static void select_input(struct v4l2_subdev *sd,
afe_write(sd, 0x00, 0x00); /* power up ADC */
afe_write(sd, 0xc8, 0x00); /* phase control */
+ if (state->mode == ADV7842_MODE_COMP) {
+ /* force to YCrCb */
+ io_write_and_or(sd, 0x02, 0x0f, 0x60);
+ } else {
+ /* force to RGB */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ }
/* set ADI recommended settings for digitizer */
/* "ADV7842 Register Settings Recommendations
@@ -1722,19 +1742,19 @@ static int adv7842_s_routing(struct v4l2_subdev *sd,
switch (input) {
case ADV7842_SELECT_HDMI_PORT_A:
- /* TODO select HDMI_COMP or HDMI_GR */
state->mode = ADV7842_MODE_HDMI;
state->vid_std_select = ADV7842_HDMI_COMP_VID_STD_HD_1250P;
state->hdmi_port_a = true;
break;
case ADV7842_SELECT_HDMI_PORT_B:
- /* TODO select HDMI_COMP or HDMI_GR */
state->mode = ADV7842_MODE_HDMI;
state->vid_std_select = ADV7842_HDMI_COMP_VID_STD_HD_1250P;
state->hdmi_port_a = false;
break;
case ADV7842_SELECT_VGA_COMP:
- v4l2_info(sd, "%s: VGA component: todo\n", __func__);
+ state->mode = ADV7842_MODE_COMP;
+ state->vid_std_select = ADV7842_RGB_VID_STD_AUTO_GRAPH_MODE;
+ break;
case ADV7842_SELECT_VGA_RGB:
state->mode = ADV7842_MODE_RGB;
state->vid_std_select = ADV7842_RGB_VID_STD_AUTO_GRAPH_MODE;
@@ -1814,12 +1834,15 @@ static void adv7842_irq_enable(struct v4l2_subdev *sd, bool enable)
io_write(sd, 0x78, 0x03);
/* Enable SDP Standard Detection Change and SDP Video Detected */
io_write(sd, 0xa0, 0x09);
+ /* Enable HDMI_MODE interrupt */
+ io_write(sd, 0x69, 0x08);
} else {
io_write(sd, 0x46, 0x0);
io_write(sd, 0x5a, 0x0);
io_write(sd, 0x73, 0x0);
io_write(sd, 0x78, 0x0);
io_write(sd, 0xa0, 0x0);
+ io_write(sd, 0x69, 0x0);
}
}
@@ -1827,11 +1850,9 @@ static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
{
struct adv7842_state *state = to_state(sd);
u8 fmt_change_cp, fmt_change_digital, fmt_change_sdp;
- u8 irq_status[5];
- u8 irq_cfg = io_read(sd, 0x40);
+ u8 irq_status[6];
- /* disable irq-pin output */
- io_write(sd, 0x40, irq_cfg | 0x3);
+ adv7842_irq_enable(sd, false);
/* read status */
irq_status[0] = io_read(sd, 0x43);
@@ -1839,6 +1860,7 @@ static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
irq_status[2] = io_read(sd, 0x70);
irq_status[3] = io_read(sd, 0x75);
irq_status[4] = io_read(sd, 0x9d);
+ irq_status[5] = io_read(sd, 0x66);
/* and clear */
if (irq_status[0])
@@ -1851,10 +1873,14 @@ static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
io_write(sd, 0x76, irq_status[3]);
if (irq_status[4])
io_write(sd, 0x9e, irq_status[4]);
+ if (irq_status[5])
+ io_write(sd, 0x67, irq_status[5]);
- v4l2_dbg(1, debug, sd, "%s: irq %x, %x, %x, %x, %x\n", __func__,
+ adv7842_irq_enable(sd, true);
+
+ v4l2_dbg(1, debug, sd, "%s: irq %x, %x, %x, %x, %x, %x\n", __func__,
irq_status[0], irq_status[1], irq_status[2],
- irq_status[3], irq_status[4]);
+ irq_status[3], irq_status[4], irq_status[5]);
/* format change CP */
fmt_change_cp = irq_status[0] & 0x9c;
@@ -1871,25 +1897,72 @@ static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
else
fmt_change_digital = 0;
- /* notify */
+ /* format change */
if (fmt_change_cp || fmt_change_digital || fmt_change_sdp) {
v4l2_dbg(1, debug, sd,
"%s: fmt_change_cp = 0x%x, fmt_change_digital = 0x%x, fmt_change_sdp = 0x%x\n",
__func__, fmt_change_cp, fmt_change_digital,
fmt_change_sdp);
v4l2_subdev_notify(sd, ADV7842_FMT_CHANGE, NULL);
+ if (handled)
+ *handled = true;
}
- /* 5v cable detect */
- if (irq_status[2])
+ /* HDMI/DVI mode */
+ if (irq_status[5] & 0x08) {
+ v4l2_dbg(1, debug, sd, "%s: irq %s mode\n", __func__,
+ (io_read(sd, 0x65) & 0x08) ? "HDMI" : "DVI");
+ if (handled)
+ *handled = true;
+ }
+
+ /* tx 5v detect */
+ if (irq_status[2] & 0x3) {
+ v4l2_dbg(1, debug, sd, "%s: irq tx_5v\n", __func__);
adv7842_s_detect_tx_5v_ctrl(sd);
+ if (handled)
+ *handled = true;
+ }
+ return 0;
+}
+
+static int adv7842_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ struct adv7842_state *state = to_state(sd);
+ u8 *data = NULL;
- if (handled)
- *handled = true;
+ if (edid->pad > ADV7842_EDID_PORT_VGA)
+ return -EINVAL;
+ if (edid->blocks == 0)
+ return -EINVAL;
+ if (edid->blocks > 2)
+ return -EINVAL;
+ if (edid->start_block > 1)
+ return -EINVAL;
+ if (edid->start_block == 1)
+ edid->blocks = 1;
+ if (!edid->edid)
+ return -EINVAL;
- /* re-enable irq-pin output */
- io_write(sd, 0x40, irq_cfg);
+ switch (edid->pad) {
+ case ADV7842_EDID_PORT_A:
+ case ADV7842_EDID_PORT_B:
+ if (state->hdmi_edid.present & (0x04 << edid->pad))
+ data = state->hdmi_edid.edid;
+ break;
+ case ADV7842_EDID_PORT_VGA:
+ if (state->vga_edid.present)
+ data = state->vga_edid.edid;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!data)
+ return -ENODATA;
+ memcpy(edid->edid,
+ data + edid->start_block * 128,
+ edid->blocks * 128);
return 0;
}
@@ -1898,7 +1971,7 @@ static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *e)
struct adv7842_state *state = to_state(sd);
int err = 0;
- if (e->pad > 2)
+ if (e->pad > ADV7842_EDID_PORT_VGA)
return -EINVAL;
if (e->start_block != 0)
return -EINVAL;
@@ -1911,20 +1984,25 @@ static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *e)
state->aspect_ratio = v4l2_calc_aspect_ratio(e->edid[0x15],
e->edid[0x16]);
- if (e->pad == 2) {
+ switch (e->pad) {
+ case ADV7842_EDID_PORT_VGA:
memset(&state->vga_edid.edid, 0, 256);
state->vga_edid.present = e->blocks ? 0x1 : 0x0;
memcpy(&state->vga_edid.edid, e->edid, 128 * e->blocks);
err = edid_write_vga_segment(sd);
- } else {
- u32 mask = 0x1<<e->pad;
+ break;
+ case ADV7842_EDID_PORT_A:
+ case ADV7842_EDID_PORT_B:
memset(&state->hdmi_edid.edid, 0, 256);
if (e->blocks)
- state->hdmi_edid.present |= mask;
+ state->hdmi_edid.present |= 0x04 << e->pad;
else
- state->hdmi_edid.present &= ~mask;
- memcpy(&state->hdmi_edid.edid, e->edid, 128*e->blocks);
+ state->hdmi_edid.present &= ~(0x04 << e->pad);
+ memcpy(&state->hdmi_edid.edid, e->edid, 128 * e->blocks);
err = edid_write_hdmi_segment(sd, e->pad);
+ break;
+ default:
+ return -EINVAL;
}
if (err < 0)
v4l2_err(sd, "error %d writing edid on port %d\n", err, e->pad);
@@ -2156,7 +2234,7 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd)
static const char * const input_color_space_txt[16] = {
"RGB limited range (16-235)", "RGB full range (0-255)",
"YCbCr Bt.601 (16-235)", "YCbCr Bt.709 (16-235)",
- "XvYCC Bt.601", "XvYCC Bt.709",
+ "xvYCC Bt.601", "xvYCC Bt.709",
"YCbCr Bt.601 (0-255)", "YCbCr Bt.709 (0-255)",
"invalid", "invalid", "invalid", "invalid", "invalid",
"invalid", "invalid", "automatic"
@@ -2175,8 +2253,6 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "-----Chip status-----\n");
v4l2_info(sd, "Chip power: %s\n", no_power(sd) ? "off" : "on");
- v4l2_info(sd, "Connector type: %s\n", state->connector_hdmi ?
- "HDMI" : (is_digital_input(sd) ? "DVI-D" : "DVI-A"));
v4l2_info(sd, "HDMI/DVI-D port selected: %s\n",
state->hdmi_port_a ? "A" : "B");
v4l2_info(sd, "EDID A %s, B %s\n",
@@ -2354,15 +2430,63 @@ static int adv7842_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
return 0;
}
+static void adv7842_s_sdp_io(struct v4l2_subdev *sd, struct adv7842_sdp_io_sync_adjustment *s)
+{
+ if (s && s->adjust) {
+ sdp_io_write(sd, 0x94, (s->hs_beg >> 8) & 0xf);
+ sdp_io_write(sd, 0x95, s->hs_beg & 0xff);
+ sdp_io_write(sd, 0x96, (s->hs_width >> 8) & 0xf);
+ sdp_io_write(sd, 0x97, s->hs_width & 0xff);
+ sdp_io_write(sd, 0x98, (s->de_beg >> 8) & 0xf);
+ sdp_io_write(sd, 0x99, s->de_beg & 0xff);
+ sdp_io_write(sd, 0x9a, (s->de_end >> 8) & 0xf);
+ sdp_io_write(sd, 0x9b, s->de_end & 0xff);
+ sdp_io_write(sd, 0xa8, s->vs_beg_o);
+ sdp_io_write(sd, 0xa9, s->vs_beg_e);
+ sdp_io_write(sd, 0xaa, s->vs_end_o);
+ sdp_io_write(sd, 0xab, s->vs_end_e);
+ sdp_io_write(sd, 0xac, s->de_v_beg_o);
+ sdp_io_write(sd, 0xad, s->de_v_beg_e);
+ sdp_io_write(sd, 0xae, s->de_v_end_o);
+ sdp_io_write(sd, 0xaf, s->de_v_end_e);
+ } else {
+ /* set to default */
+ sdp_io_write(sd, 0x94, 0x00);
+ sdp_io_write(sd, 0x95, 0x00);
+ sdp_io_write(sd, 0x96, 0x00);
+ sdp_io_write(sd, 0x97, 0x20);
+ sdp_io_write(sd, 0x98, 0x00);
+ sdp_io_write(sd, 0x99, 0x00);
+ sdp_io_write(sd, 0x9a, 0x00);
+ sdp_io_write(sd, 0x9b, 0x00);
+ sdp_io_write(sd, 0xa8, 0x04);
+ sdp_io_write(sd, 0xa9, 0x04);
+ sdp_io_write(sd, 0xaa, 0x04);
+ sdp_io_write(sd, 0xab, 0x04);
+ sdp_io_write(sd, 0xac, 0x04);
+ sdp_io_write(sd, 0xad, 0x04);
+ sdp_io_write(sd, 0xae, 0x04);
+ sdp_io_write(sd, 0xaf, 0x04);
+ }
+}
+
static int adv7842_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
{
struct adv7842_state *state = to_state(sd);
+ struct adv7842_platform_data *pdata = &state->pdata;
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
if (state->mode != ADV7842_MODE_SDP)
return -ENODATA;
+ if (norm & V4L2_STD_625_50)
+ adv7842_s_sdp_io(sd, &pdata->sdp_io_sync_625);
+ else if (norm & V4L2_STD_525_60)
+ adv7842_s_sdp_io(sd, &pdata->sdp_io_sync_525);
+ else
+ adv7842_s_sdp_io(sd, NULL);
+
if (norm & V4L2_STD_ALL) {
state->norm = norm;
return 0;
@@ -2385,9 +2509,10 @@ static int adv7842_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
/* ----------------------------------------------------------------------- */
-static int adv7842_core_init(struct v4l2_subdev *sd,
- const struct adv7842_platform_data *pdata)
+static int adv7842_core_init(struct v4l2_subdev *sd)
{
+ struct adv7842_state *state = to_state(sd);
+ struct adv7842_platform_data *pdata = &state->pdata;
hdmi_write(sd, 0x48,
(pdata->disable_pwrdnb ? 0x80 : 0) |
(pdata->disable_cable_det_rst ? 0x40 : 0));
@@ -2400,7 +2525,7 @@ static int adv7842_core_init(struct v4l2_subdev *sd,
/* video format */
io_write(sd, 0x02,
- pdata->inp_color_space << 4 |
+ 0xf0 |
pdata->alt_gamma << 3 |
pdata->op_656_range << 2 |
pdata->rgb_out << 1 |
@@ -2412,13 +2537,24 @@ static int adv7842_core_init(struct v4l2_subdev *sd,
pdata->replicate_av_codes << 1 |
pdata->invert_cbcr << 0);
+ /* HDMI audio */
+ hdmi_write_and_or(sd, 0x1a, 0xf1, 0x08); /* Wait 1 s before unmute */
+
/* Drive strength */
- io_write_and_or(sd, 0x14, 0xc0, pdata->drive_strength.data<<4 |
- pdata->drive_strength.clock<<2 |
- pdata->drive_strength.sync);
+ io_write_and_or(sd, 0x14, 0xc0,
+ pdata->dr_str_data << 4 |
+ pdata->dr_str_clk << 2 |
+ pdata->dr_str_sync);
/* HDMI free run */
- cp_write(sd, 0xba, (pdata->hdmi_free_run_mode << 1) | 0x01);
+ cp_write_and_or(sd, 0xba, 0xfc, pdata->hdmi_free_run_enable |
+ (pdata->hdmi_free_run_mode << 1));
+
+ /* SPD free run */
+ sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force |
+ (pdata->sdp_free_run_cbar_en << 1) |
+ (pdata->sdp_free_run_man_col_en << 2) |
+ (pdata->sdp_free_run_auto << 3));
/* TODO from platform data */
cp_write(sd, 0x69, 0x14); /* Enable CP CSC */
@@ -2431,18 +2567,6 @@ static int adv7842_core_init(struct v4l2_subdev *sd,
sdp_csc_coeff(sd, &pdata->sdp_csc_coeff);
- if (pdata->sdp_io_sync.adjust) {
- const struct adv7842_sdp_io_sync_adjustment *s = &pdata->sdp_io_sync;
- sdp_io_write(sd, 0x94, (s->hs_beg>>8) & 0xf);
- sdp_io_write(sd, 0x95, s->hs_beg & 0xff);
- sdp_io_write(sd, 0x96, (s->hs_width>>8) & 0xf);
- sdp_io_write(sd, 0x97, s->hs_width & 0xff);
- sdp_io_write(sd, 0x98, (s->de_beg>>8) & 0xf);
- sdp_io_write(sd, 0x99, s->de_beg & 0xff);
- sdp_io_write(sd, 0x9a, (s->de_end>>8) & 0xf);
- sdp_io_write(sd, 0x9b, s->de_end & 0xff);
- }
-
/* todo, improve settings for sdram */
if (pdata->sd_ram_size >= 128) {
sdp_write(sd, 0x12, 0x0d); /* Frame TBC,3D comb enabled */
@@ -2483,12 +2607,11 @@ static int adv7842_core_init(struct v4l2_subdev *sd,
io_write_and_or(sd, 0x20, 0xcf, 0x00);
/* LLC */
- /* Set phase to 16. TODO: get this from platform_data */
- io_write(sd, 0x19, 0x90);
+ io_write(sd, 0x19, 0x80 | pdata->llc_dll_phase);
io_write(sd, 0x33, 0x40);
/* interrupts */
- io_write(sd, 0x40, 0xe2); /* Configure INT1 */
+ io_write(sd, 0x40, 0xf2); /* Configure INT1 */
adv7842_irq_enable(sd, true);
@@ -2588,6 +2711,7 @@ static int adv7842_command_ram_test(struct v4l2_subdev *sd)
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct adv7842_state *state = to_state(sd);
struct adv7842_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_dv_timings timings;
int ret = 0;
if (!pdata)
@@ -2610,7 +2734,7 @@ static int adv7842_command_ram_test(struct v4l2_subdev *sd)
adv7842_rewrite_i2c_addresses(sd, pdata);
/* and re-init chip and state */
- adv7842_core_init(sd, pdata);
+ adv7842_core_init(sd);
disable_input(sd);
@@ -2618,11 +2742,15 @@ static int adv7842_command_ram_test(struct v4l2_subdev *sd)
enable_input(sd);
- adv7842_s_dv_timings(sd, &state->timings);
-
edid_write_vga_segment(sd);
- edid_write_hdmi_segment(sd, 0);
- edid_write_hdmi_segment(sd, 1);
+ edid_write_hdmi_segment(sd, ADV7842_EDID_PORT_A);
+ edid_write_hdmi_segment(sd, ADV7842_EDID_PORT_B);
+
+ timings = state->timings;
+
+ memset(&state->timings, 0, sizeof(struct v4l2_dv_timings));
+
+ adv7842_s_dv_timings(sd, &timings);
return ret;
}
@@ -2670,6 +2798,7 @@ static const struct v4l2_subdev_video_ops adv7842_video_ops = {
};
static const struct v4l2_subdev_pad_ops adv7842_pad_ops = {
+ .get_edid = adv7842_get_edid,
.set_edid = adv7842_set_edid,
};
@@ -2712,8 +2841,9 @@ static const struct v4l2_ctrl_config adv7842_ctrl_free_run_color = {
};
-static void adv7842_unregister_clients(struct adv7842_state *state)
+static void adv7842_unregister_clients(struct v4l2_subdev *sd)
{
+ struct adv7842_state *state = to_state(sd);
if (state->i2c_avlink)
i2c_unregister_device(state->i2c_avlink);
if (state->i2c_cec)
@@ -2736,21 +2866,79 @@ static void adv7842_unregister_clients(struct adv7842_state *state)
i2c_unregister_device(state->i2c_cp);
if (state->i2c_vdp)
i2c_unregister_device(state->i2c_vdp);
+
+ state->i2c_avlink = NULL;
+ state->i2c_cec = NULL;
+ state->i2c_infoframe = NULL;
+ state->i2c_sdp_io = NULL;
+ state->i2c_sdp = NULL;
+ state->i2c_afe = NULL;
+ state->i2c_repeater = NULL;
+ state->i2c_edid = NULL;
+ state->i2c_hdmi = NULL;
+ state->i2c_cp = NULL;
+ state->i2c_vdp = NULL;
}
-static struct i2c_client *adv7842_dummy_client(struct v4l2_subdev *sd,
+static struct i2c_client *adv7842_dummy_client(struct v4l2_subdev *sd, const char *desc,
u8 addr, u8 io_reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_client *cp;
io_write(sd, io_reg, addr << 1);
- return i2c_new_dummy(client->adapter, io_read(sd, io_reg) >> 1);
+
+ if (addr == 0) {
+ v4l2_err(sd, "no %s i2c addr configured\n", desc);
+ return NULL;
+ }
+
+ cp = i2c_new_dummy(client->adapter, io_read(sd, io_reg) >> 1);
+ if (!cp)
+ v4l2_err(sd, "register %s on i2c addr 0x%x failed\n", desc, addr);
+
+ return cp;
+}
+
+static int adv7842_register_clients(struct v4l2_subdev *sd)
+{
+ struct adv7842_state *state = to_state(sd);
+ struct adv7842_platform_data *pdata = &state->pdata;
+
+ state->i2c_avlink = adv7842_dummy_client(sd, "avlink", pdata->i2c_avlink, 0xf3);
+ state->i2c_cec = adv7842_dummy_client(sd, "cec", pdata->i2c_cec, 0xf4);
+ state->i2c_infoframe = adv7842_dummy_client(sd, "infoframe", pdata->i2c_infoframe, 0xf5);
+ state->i2c_sdp_io = adv7842_dummy_client(sd, "sdp_io", pdata->i2c_sdp_io, 0xf2);
+ state->i2c_sdp = adv7842_dummy_client(sd, "sdp", pdata->i2c_sdp, 0xf1);
+ state->i2c_afe = adv7842_dummy_client(sd, "afe", pdata->i2c_afe, 0xf8);
+ state->i2c_repeater = adv7842_dummy_client(sd, "repeater", pdata->i2c_repeater, 0xf9);
+ state->i2c_edid = adv7842_dummy_client(sd, "edid", pdata->i2c_edid, 0xfa);
+ state->i2c_hdmi = adv7842_dummy_client(sd, "hdmi", pdata->i2c_hdmi, 0xfb);
+ state->i2c_cp = adv7842_dummy_client(sd, "cp", pdata->i2c_cp, 0xfd);
+ state->i2c_vdp = adv7842_dummy_client(sd, "vdp", pdata->i2c_vdp, 0xfe);
+
+ if (!state->i2c_avlink ||
+ !state->i2c_cec ||
+ !state->i2c_infoframe ||
+ !state->i2c_sdp_io ||
+ !state->i2c_sdp ||
+ !state->i2c_afe ||
+ !state->i2c_repeater ||
+ !state->i2c_edid ||
+ !state->i2c_hdmi ||
+ !state->i2c_cp ||
+ !state->i2c_vdp)
+ return -1;
+
+ return 0;
}
static int adv7842_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adv7842_state *state;
+ static const struct v4l2_dv_timings cea640x480 =
+ V4L2_DV_BT_CEA_640X480P59_94;
struct adv7842_platform_data *pdata = client->dev.platform_data;
struct v4l2_ctrl_handler *hdl;
struct v4l2_subdev *sd;
@@ -2775,13 +2963,17 @@ static int adv7842_probe(struct i2c_client *client,
return -ENOMEM;
}
+ /* platform data */
+ state->pdata = *pdata;
+ state->timings = cea640x480;
+
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7842_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- state->connector_hdmi = pdata->connector_hdmi;
state->mode = pdata->mode;
- state->hdmi_port_a = true;
+ state->hdmi_port_a = pdata->input == ADV7842_SELECT_HDMI_PORT_A;
+ state->restart_stdi_once = true;
/* i2c access to adv7842? */
rev = adv_smbus_read_byte_data_check(client, 0xea, false) << 8 |
@@ -2843,21 +3035,7 @@ static int adv7842_probe(struct i2c_client *client,
goto err_hdl;
}
- state->i2c_avlink = adv7842_dummy_client(sd, pdata->i2c_avlink, 0xf3);
- state->i2c_cec = adv7842_dummy_client(sd, pdata->i2c_cec, 0xf4);
- state->i2c_infoframe = adv7842_dummy_client(sd, pdata->i2c_infoframe, 0xf5);
- state->i2c_sdp_io = adv7842_dummy_client(sd, pdata->i2c_sdp_io, 0xf2);
- state->i2c_sdp = adv7842_dummy_client(sd, pdata->i2c_sdp, 0xf1);
- state->i2c_afe = adv7842_dummy_client(sd, pdata->i2c_afe, 0xf8);
- state->i2c_repeater = adv7842_dummy_client(sd, pdata->i2c_repeater, 0xf9);
- state->i2c_edid = adv7842_dummy_client(sd, pdata->i2c_edid, 0xfa);
- state->i2c_hdmi = adv7842_dummy_client(sd, pdata->i2c_hdmi, 0xfb);
- state->i2c_cp = adv7842_dummy_client(sd, pdata->i2c_cp, 0xfd);
- state->i2c_vdp = adv7842_dummy_client(sd, pdata->i2c_vdp, 0xfe);
- if (!state->i2c_avlink || !state->i2c_cec || !state->i2c_infoframe ||
- !state->i2c_sdp_io || !state->i2c_sdp || !state->i2c_afe ||
- !state->i2c_repeater || !state->i2c_edid || !state->i2c_hdmi ||
- !state->i2c_cp || !state->i2c_vdp) {
+ if (adv7842_register_clients(sd) < 0) {
err = -ENOMEM;
v4l2_err(sd, "failed to create all i2c clients\n");
goto err_i2c;
@@ -2879,7 +3057,7 @@ static int adv7842_probe(struct i2c_client *client,
if (err)
goto err_work_queues;
- err = adv7842_core_init(sd, pdata);
+ err = adv7842_core_init(sd);
if (err)
goto err_entity;
@@ -2893,7 +3071,7 @@ err_work_queues:
cancel_delayed_work(&state->delayed_work_enable_hotplug);
destroy_workqueue(state->work_queues);
err_i2c:
- adv7842_unregister_clients(state);
+ adv7842_unregister_clients(sd);
err_hdl:
v4l2_ctrl_handler_free(hdl);
return err;
@@ -2912,7 +3090,7 @@ static int adv7842_remove(struct i2c_client *client)
destroy_workqueue(state->work_queues);
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
- adv7842_unregister_clients(to_state(sd));
+ adv7842_unregister_clients(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
return 0;
}
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index 3317a9ae3961..d98ca3aebe23 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -172,28 +172,28 @@ static int lm3560_flash_brt_ctrl(struct lm3560_flash *flash,
static int lm3560_get_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
{
struct lm3560_flash *flash = to_lm3560_flash(ctrl, led_no);
+ int rval = -EINVAL;
mutex_lock(&flash->lock);
if (ctrl->id == V4L2_CID_FLASH_FAULT) {
- int rval;
s32 fault = 0;
unsigned int reg_val;
rval = regmap_read(flash->regmap, REG_FLAG, &reg_val);
if (rval < 0)
- return rval;
- if (rval & FAULT_SHORT_CIRCUIT)
+ goto out;
+ if (reg_val & FAULT_SHORT_CIRCUIT)
fault |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
- if (rval & FAULT_OVERTEMP)
+ if (reg_val & FAULT_OVERTEMP)
fault |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
- if (rval & FAULT_TIMEOUT)
+ if (reg_val & FAULT_TIMEOUT)
fault |= V4L2_FLASH_FAULT_TIMEOUT;
ctrl->cur.val = fault;
- return 0;
}
+out:
mutex_unlock(&flash->lock);
- return -EINVAL;
+ return rval;
}
static int lm3560_set_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
@@ -219,15 +219,19 @@ static int lm3560_set_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
break;
case V4L2_CID_FLASH_STROBE:
- if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
- return -EBUSY;
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) {
+ rval = -EBUSY;
+ goto err_out;
+ }
flash->led_mode = V4L2_FLASH_LED_MODE_FLASH;
rval = lm3560_mode_ctrl(flash);
break;
case V4L2_CID_FLASH_STROBE_STOP:
- if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
- return -EBUSY;
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) {
+ rval = -EBUSY;
+ goto err_out;
+ }
flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
rval = lm3560_mode_ctrl(flash);
break;
@@ -247,8 +251,8 @@ static int lm3560_set_ctrl(struct v4l2_ctrl *ctrl, enum lm3560_led_id led_no)
break;
}
- mutex_unlock(&flash->lock);
err_out:
+ mutex_unlock(&flash->lock);
return rval;
}
@@ -444,14 +448,14 @@ static int lm3560_probe(struct i2c_client *client,
if (rval < 0)
return rval;
+ i2c_set_clientdata(client, flash);
+
return 0;
}
static int lm3560_remove(struct i2c_client *client)
{
- struct v4l2_subdev *subdev = i2c_get_clientdata(client);
- struct lm3560_flash *flash = container_of(subdev, struct lm3560_flash,
- subdev_led[LM3560_LED_MAX]);
+ struct lm3560_flash *flash = i2c_get_clientdata(client);
unsigned int i;
for (i = LM3560_LED0; i < LM3560_LED_MAX; i++) {
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index 846b15f0bf64..85ec3bacdf1c 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -459,13 +459,15 @@ static int mt9m032_set_pad_crop(struct v4l2_subdev *subdev,
MT9M032_COLUMN_START_MAX);
rect.top = clamp(ALIGN(crop->rect.top, 2), MT9M032_ROW_START_MIN,
MT9M032_ROW_START_MAX);
- rect.width = clamp(ALIGN(crop->rect.width, 2), MT9M032_COLUMN_SIZE_MIN,
- MT9M032_COLUMN_SIZE_MAX);
- rect.height = clamp(ALIGN(crop->rect.height, 2), MT9M032_ROW_SIZE_MIN,
- MT9M032_ROW_SIZE_MAX);
-
- rect.width = min(rect.width, MT9M032_PIXEL_ARRAY_WIDTH - rect.left);
- rect.height = min(rect.height, MT9M032_PIXEL_ARRAY_HEIGHT - rect.top);
+ rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ MT9M032_COLUMN_SIZE_MIN, MT9M032_COLUMN_SIZE_MAX);
+ rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ MT9M032_ROW_SIZE_MIN, MT9M032_ROW_SIZE_MAX);
+
+ rect.width = min_t(unsigned int, rect.width,
+ MT9M032_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min_t(unsigned int, rect.height,
+ MT9M032_PIXEL_ARRAY_HEIGHT - rect.top);
__crop = __mt9m032_get_pad_crop(sensor, fh, crop->which);
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 1c2303d18bf4..192c4aad05d6 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -29,7 +30,6 @@
#include <media/mt9p031.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-of.h>
#include <media/v4l2-subdev.h>
#include "aptina-pll.h"
@@ -519,11 +519,13 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
/* Clamp the width and height to avoid dividing by zero. */
width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
- max(__crop->width / 7, MT9P031_WINDOW_WIDTH_MIN),
+ max_t(unsigned int, __crop->width / 7,
+ MT9P031_WINDOW_WIDTH_MIN),
__crop->width);
height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
- max(__crop->height / 8, MT9P031_WINDOW_HEIGHT_MIN),
- __crop->height);
+ max_t(unsigned int, __crop->height / 8,
+ MT9P031_WINDOW_HEIGHT_MIN),
+ __crop->height);
hratio = DIV_ROUND_CLOSEST(__crop->width, width);
vratio = DIV_ROUND_CLOSEST(__crop->height, height);
@@ -565,15 +567,17 @@ static int mt9p031_set_crop(struct v4l2_subdev *subdev,
MT9P031_COLUMN_START_MAX);
rect.top = clamp(ALIGN(crop->rect.top, 2), MT9P031_ROW_START_MIN,
MT9P031_ROW_START_MAX);
- rect.width = clamp(ALIGN(crop->rect.width, 2),
- MT9P031_WINDOW_WIDTH_MIN,
- MT9P031_WINDOW_WIDTH_MAX);
- rect.height = clamp(ALIGN(crop->rect.height, 2),
- MT9P031_WINDOW_HEIGHT_MIN,
- MT9P031_WINDOW_HEIGHT_MAX);
-
- rect.width = min(rect.width, MT9P031_PIXEL_ARRAY_WIDTH - rect.left);
- rect.height = min(rect.height, MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
+ rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ MT9P031_WINDOW_WIDTH_MIN,
+ MT9P031_WINDOW_WIDTH_MAX);
+ rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ MT9P031_WINDOW_HEIGHT_MIN,
+ MT9P031_WINDOW_HEIGHT_MAX);
+
+ rect.width = min_t(unsigned int, rect.width,
+ MT9P031_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min_t(unsigned int, rect.height,
+ MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
__crop = __mt9p031_get_pad_crop(mt9p031, fh, crop->pad, crop->which);
@@ -939,7 +943,7 @@ mt9p031_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- np = v4l2_of_get_next_endpoint(client->dev.of_node, NULL);
+ np = of_graph_get_next_endpoint(client->dev.of_node, NULL);
if (!np)
return NULL;
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 796463466ef0..d41c70eaf838 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -291,10 +291,12 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
/* Clamp the width and height to avoid dividing by zero. */
width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
- max(__crop->width / 8, MT9T001_WINDOW_HEIGHT_MIN + 1),
+ max_t(unsigned int, __crop->width / 8,
+ MT9T001_WINDOW_HEIGHT_MIN + 1),
__crop->width);
height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
- max(__crop->height / 8, MT9T001_WINDOW_HEIGHT_MIN + 1),
+ max_t(unsigned int, __crop->height / 8,
+ MT9T001_WINDOW_HEIGHT_MIN + 1),
__crop->height);
hratio = DIV_ROUND_CLOSEST(__crop->width, width);
@@ -339,15 +341,17 @@ static int mt9t001_set_crop(struct v4l2_subdev *subdev,
rect.top = clamp(ALIGN(crop->rect.top, 2),
MT9T001_ROW_START_MIN,
MT9T001_ROW_START_MAX);
- rect.width = clamp(ALIGN(crop->rect.width, 2),
- MT9T001_WINDOW_WIDTH_MIN + 1,
- MT9T001_WINDOW_WIDTH_MAX + 1);
- rect.height = clamp(ALIGN(crop->rect.height, 2),
- MT9T001_WINDOW_HEIGHT_MIN + 1,
- MT9T001_WINDOW_HEIGHT_MAX + 1);
-
- rect.width = min(rect.width, MT9T001_PIXEL_ARRAY_WIDTH - rect.left);
- rect.height = min(rect.height, MT9T001_PIXEL_ARRAY_HEIGHT - rect.top);
+ rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ MT9T001_WINDOW_WIDTH_MIN + 1,
+ MT9T001_WINDOW_WIDTH_MAX + 1);
+ rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ MT9T001_WINDOW_HEIGHT_MIN + 1,
+ MT9T001_WINDOW_HEIGHT_MAX + 1);
+
+ rect.width = min_t(unsigned int, rect.width,
+ MT9T001_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min_t(unsigned int, rect.height,
+ MT9T001_PIXEL_ARRAY_HEIGHT - rect.top);
__crop = __mt9t001_get_pad_crop(mt9t001, fh, crop->pad, crop->which);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 2c50effaa334..36c504b78f2c 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -27,14 +27,16 @@
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
-#define MT9V032_PIXEL_ARRAY_HEIGHT 492
-#define MT9V032_PIXEL_ARRAY_WIDTH 782
+/* The first four rows are black rows. The active area spans 753x481 pixels. */
+#define MT9V032_PIXEL_ARRAY_HEIGHT 485
+#define MT9V032_PIXEL_ARRAY_WIDTH 753
#define MT9V032_SYSCLK_FREQ_DEF 26600000
#define MT9V032_CHIP_VERSION 0x00
#define MT9V032_CHIP_ID_REV1 0x1311
#define MT9V032_CHIP_ID_REV3 0x1313
+#define MT9V034_CHIP_ID_REV1 0X1324
#define MT9V032_COLUMN_START 0x01
#define MT9V032_COLUMN_START_MIN 1
#define MT9V032_COLUMN_START_DEF 1
@@ -53,12 +55,15 @@
#define MT9V032_WINDOW_WIDTH_MAX 752
#define MT9V032_HORIZONTAL_BLANKING 0x05
#define MT9V032_HORIZONTAL_BLANKING_MIN 43
+#define MT9V034_HORIZONTAL_BLANKING_MIN 61
#define MT9V032_HORIZONTAL_BLANKING_DEF 94
#define MT9V032_HORIZONTAL_BLANKING_MAX 1023
#define MT9V032_VERTICAL_BLANKING 0x06
#define MT9V032_VERTICAL_BLANKING_MIN 4
+#define MT9V034_VERTICAL_BLANKING_MIN 2
#define MT9V032_VERTICAL_BLANKING_DEF 45
#define MT9V032_VERTICAL_BLANKING_MAX 3000
+#define MT9V034_VERTICAL_BLANKING_MAX 32288
#define MT9V032_CHIP_CONTROL 0x07
#define MT9V032_CHIP_CONTROL_MASTER_MODE (1 << 3)
#define MT9V032_CHIP_CONTROL_DOUT_ENABLE (1 << 7)
@@ -68,8 +73,10 @@
#define MT9V032_SHUTTER_WIDTH_CONTROL 0x0a
#define MT9V032_TOTAL_SHUTTER_WIDTH 0x0b
#define MT9V032_TOTAL_SHUTTER_WIDTH_MIN 1
+#define MT9V034_TOTAL_SHUTTER_WIDTH_MIN 0
#define MT9V032_TOTAL_SHUTTER_WIDTH_DEF 480
#define MT9V032_TOTAL_SHUTTER_WIDTH_MAX 32767
+#define MT9V034_TOTAL_SHUTTER_WIDTH_MAX 32765
#define MT9V032_RESET 0x0c
#define MT9V032_READ_MODE 0x0d
#define MT9V032_READ_MODE_ROW_BIN_MASK (3 << 0)
@@ -81,6 +88,8 @@
#define MT9V032_READ_MODE_DARK_COLUMNS (1 << 6)
#define MT9V032_READ_MODE_DARK_ROWS (1 << 7)
#define MT9V032_PIXEL_OPERATION_MODE 0x0f
+#define MT9V034_PIXEL_OPERATION_MODE_HDR (1 << 0)
+#define MT9V034_PIXEL_OPERATION_MODE_COLOR (1 << 1)
#define MT9V032_PIXEL_OPERATION_MODE_COLOR (1 << 2)
#define MT9V032_PIXEL_OPERATION_MODE_HDR (1 << 6)
#define MT9V032_ANALOG_GAIN 0x35
@@ -96,9 +105,12 @@
#define MT9V032_DARK_AVG_HIGH_THRESH_MASK (255 << 8)
#define MT9V032_DARK_AVG_HIGH_THRESH_SHIFT 8
#define MT9V032_ROW_NOISE_CORR_CONTROL 0x70
+#define MT9V034_ROW_NOISE_CORR_ENABLE (1 << 0)
+#define MT9V034_ROW_NOISE_CORR_USE_BLK_AVG (1 << 1)
#define MT9V032_ROW_NOISE_CORR_ENABLE (1 << 5)
#define MT9V032_ROW_NOISE_CORR_USE_BLK_AVG (1 << 7)
#define MT9V032_PIXEL_CLOCK 0x74
+#define MT9V034_PIXEL_CLOCK 0x72
#define MT9V032_PIXEL_CLOCK_INV_LINE (1 << 0)
#define MT9V032_PIXEL_CLOCK_INV_FRAME (1 << 1)
#define MT9V032_PIXEL_CLOCK_XOR_LINE (1 << 2)
@@ -120,12 +132,88 @@
#define MT9V032_AGC_ENABLE (1 << 1)
#define MT9V032_THERMAL_INFO 0xc1
+enum mt9v032_model {
+ MT9V032_MODEL_V032_COLOR,
+ MT9V032_MODEL_V032_MONO,
+ MT9V032_MODEL_V034_COLOR,
+ MT9V032_MODEL_V034_MONO,
+};
+
+struct mt9v032_model_version {
+ unsigned int version;
+ const char *name;
+};
+
+struct mt9v032_model_data {
+ unsigned int min_row_time;
+ unsigned int min_hblank;
+ unsigned int min_vblank;
+ unsigned int max_vblank;
+ unsigned int min_shutter;
+ unsigned int max_shutter;
+ unsigned int pclk_reg;
+};
+
+struct mt9v032_model_info {
+ const struct mt9v032_model_data *data;
+ bool color;
+};
+
+static const struct mt9v032_model_version mt9v032_versions[] = {
+ { MT9V032_CHIP_ID_REV1, "MT9V032 rev1/2" },
+ { MT9V032_CHIP_ID_REV3, "MT9V032 rev3" },
+ { MT9V034_CHIP_ID_REV1, "MT9V034 rev1" },
+};
+
+static const struct mt9v032_model_data mt9v032_model_data[] = {
+ {
+ /* MT9V032 revisions 1/2/3 */
+ .min_row_time = 660,
+ .min_hblank = MT9V032_HORIZONTAL_BLANKING_MIN,
+ .min_vblank = MT9V032_VERTICAL_BLANKING_MIN,
+ .max_vblank = MT9V032_VERTICAL_BLANKING_MAX,
+ .min_shutter = MT9V032_TOTAL_SHUTTER_WIDTH_MIN,
+ .max_shutter = MT9V032_TOTAL_SHUTTER_WIDTH_MAX,
+ .pclk_reg = MT9V032_PIXEL_CLOCK,
+ }, {
+ /* MT9V034 */
+ .min_row_time = 690,
+ .min_hblank = MT9V034_HORIZONTAL_BLANKING_MIN,
+ .min_vblank = MT9V034_VERTICAL_BLANKING_MIN,
+ .max_vblank = MT9V034_VERTICAL_BLANKING_MAX,
+ .min_shutter = MT9V034_TOTAL_SHUTTER_WIDTH_MIN,
+ .max_shutter = MT9V034_TOTAL_SHUTTER_WIDTH_MAX,
+ .pclk_reg = MT9V034_PIXEL_CLOCK,
+ },
+};
+
+static const struct mt9v032_model_info mt9v032_models[] = {
+ [MT9V032_MODEL_V032_COLOR] = {
+ .data = &mt9v032_model_data[0],
+ .color = true,
+ },
+ [MT9V032_MODEL_V032_MONO] = {
+ .data = &mt9v032_model_data[0],
+ .color = false,
+ },
+ [MT9V032_MODEL_V034_COLOR] = {
+ .data = &mt9v032_model_data[1],
+ .color = true,
+ },
+ [MT9V032_MODEL_V034_MONO] = {
+ .data = &mt9v032_model_data[1],
+ .color = false,
+ },
+};
+
struct mt9v032 {
struct v4l2_subdev subdev;
struct media_pad pad;
struct v4l2_mbus_framefmt format;
struct v4l2_rect crop;
+ unsigned int hratio;
+ unsigned int vratio;
struct v4l2_ctrl_handler ctrls;
struct {
@@ -139,6 +227,8 @@ struct mt9v032 {
struct clk *clk;
struct mt9v032_platform_data *pdata;
+ const struct mt9v032_model_info *model;
+ const struct mt9v032_model_version *version;
u32 sysclk;
u16 chip_control;
@@ -210,12 +300,17 @@ mt9v032_update_hblank(struct mt9v032 *mt9v032)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
struct v4l2_rect *crop = &mt9v032->crop;
+ unsigned int min_hblank = mt9v032->model->data->min_hblank;
+ unsigned int hblank;
- return mt9v032_write(client, MT9V032_HORIZONTAL_BLANKING,
- max_t(s32, mt9v032->hblank, 660 - crop->width));
-}
+ if (mt9v032->version->version == MT9V034_CHIP_ID_REV1)
+ min_hblank += (mt9v032->hratio - 1) * 10;
+ min_hblank = max_t(unsigned int, (int)mt9v032->model->data->min_row_time - crop->width,
+ (int)min_hblank);
+ hblank = max_t(unsigned int, mt9v032->hblank, min_hblank);
-#define EXT_CLK 25000000
+ return mt9v032_write(client, MT9V032_HORIZONTAL_BLANKING, hblank);
+}
static int mt9v032_power_on(struct mt9v032 *mt9v032)
{
@@ -259,7 +354,7 @@ static int __mt9v032_set_power(struct mt9v032 *mt9v032, bool on)
/* Configure the pixel clock polarity */
if (mt9v032->pdata && mt9v032->pdata->clk_pol) {
- ret = mt9v032_write(client, MT9V032_PIXEL_CLOCK,
+ ret = mt9v032_write(client, mt9v032->model->data->pclk_reg,
MT9V032_PIXEL_CLOCK_INV_PXL_CLK);
if (ret < 0)
return ret;
@@ -312,22 +407,20 @@ static int mt9v032_s_stream(struct v4l2_subdev *subdev, int enable)
| MT9V032_CHIP_CONTROL_SEQUENTIAL;
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
- struct v4l2_mbus_framefmt *format = &mt9v032->format;
struct v4l2_rect *crop = &mt9v032->crop;
- unsigned int hratio;
- unsigned int vratio;
+ unsigned int hbin;
+ unsigned int vbin;
int ret;
if (!enable)
return mt9v032_set_chip_control(mt9v032, mode, 0);
/* Configure the window size and row/column bin */
- hratio = DIV_ROUND_CLOSEST(crop->width, format->width);
- vratio = DIV_ROUND_CLOSEST(crop->height, format->height);
-
+ hbin = fls(mt9v032->hratio) - 1;
+ vbin = fls(mt9v032->vratio) - 1;
ret = mt9v032_write(client, MT9V032_READ_MODE,
- (hratio - 1) << MT9V032_READ_MODE_ROW_BIN_SHIFT |
- (vratio - 1) << MT9V032_READ_MODE_COLUMN_BIN_SHIFT);
+ hbin << MT9V032_READ_MODE_COLUMN_BIN_SHIFT |
+ vbin << MT9V032_READ_MODE_ROW_BIN_SHIFT);
if (ret < 0)
return ret;
@@ -370,12 +463,12 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= 8 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10)
+ if (fse->index >= 3 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10)
return -EINVAL;
- fse->min_width = MT9V032_WINDOW_WIDTH_DEF / fse->index;
+ fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index);
fse->max_width = fse->min_width;
- fse->min_height = MT9V032_WINDOW_HEIGHT_DEF / fse->index;
+ fse->min_height = MT9V032_WINDOW_HEIGHT_DEF / (1 << fse->index);
fse->max_height = fse->min_height;
return 0;
@@ -392,18 +485,30 @@ static int mt9v032_get_format(struct v4l2_subdev *subdev,
return 0;
}
-static void mt9v032_configure_pixel_rate(struct mt9v032 *mt9v032,
- unsigned int hratio)
+static void mt9v032_configure_pixel_rate(struct mt9v032 *mt9v032)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
int ret;
ret = v4l2_ctrl_s_ctrl_int64(mt9v032->pixel_rate,
- mt9v032->sysclk / hratio);
+ mt9v032->sysclk / mt9v032->hratio);
if (ret < 0)
dev_warn(&client->dev, "failed to set pixel rate (%d)\n", ret);
}
+static unsigned int mt9v032_calc_ratio(unsigned int input, unsigned int output)
+{
+ /* Compute the power-of-two binning factor closest to the input size to
+ * output size ratio. Given that the output size is bounded by input/4
+ * and input, a generic implementation would be an ineffective luxury.
+ */
+ if (output * 3 > input * 2)
+ return 1;
+ if (output * 3 > input)
+ return 2;
+ return 4;
+}
+
static int mt9v032_set_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *format)
@@ -420,22 +525,28 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
- width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
- max(__crop->width / 8, MT9V032_WINDOW_WIDTH_MIN),
- __crop->width);
- height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
- max(__crop->height / 8, MT9V032_WINDOW_HEIGHT_MIN),
- __crop->height);
-
- hratio = DIV_ROUND_CLOSEST(__crop->width, width);
- vratio = DIV_ROUND_CLOSEST(__crop->height, height);
+ width = clamp(ALIGN(format->format.width, 2),
+ max_t(unsigned int, __crop->width / 4,
+ MT9V032_WINDOW_WIDTH_MIN),
+ __crop->width);
+ height = clamp(ALIGN(format->format.height, 2),
+ max_t(unsigned int, __crop->height / 4,
+ MT9V032_WINDOW_HEIGHT_MIN),
+ __crop->height);
+
+ hratio = mt9v032_calc_ratio(__crop->width, width);
+ vratio = mt9v032_calc_ratio(__crop->height, height);
__format = __mt9v032_get_pad_format(mt9v032, fh, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- mt9v032_configure_pixel_rate(mt9v032, hratio);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ mt9v032->hratio = hratio;
+ mt9v032->vratio = vratio;
+ mt9v032_configure_pixel_rate(mt9v032);
+ }
format->format = *__format;
@@ -471,15 +582,17 @@ static int mt9v032_set_crop(struct v4l2_subdev *subdev,
rect.top = clamp(ALIGN(crop->rect.top + 1, 2) - 1,
MT9V032_ROW_START_MIN,
MT9V032_ROW_START_MAX);
- rect.width = clamp(ALIGN(crop->rect.width, 2),
- MT9V032_WINDOW_WIDTH_MIN,
- MT9V032_WINDOW_WIDTH_MAX);
- rect.height = clamp(ALIGN(crop->rect.height, 2),
- MT9V032_WINDOW_HEIGHT_MIN,
- MT9V032_WINDOW_HEIGHT_MAX);
-
- rect.width = min(rect.width, MT9V032_PIXEL_ARRAY_WIDTH - rect.left);
- rect.height = min(rect.height, MT9V032_PIXEL_ARRAY_HEIGHT - rect.top);
+ rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
+ MT9V032_WINDOW_WIDTH_MIN,
+ MT9V032_WINDOW_WIDTH_MAX);
+ rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
+ MT9V032_WINDOW_HEIGHT_MIN,
+ MT9V032_WINDOW_HEIGHT_MAX);
+
+ rect.width = min_t(unsigned int,
+ rect.width, MT9V032_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min_t(unsigned int,
+ rect.height, MT9V032_PIXEL_ARRAY_HEIGHT - rect.top);
__crop = __mt9v032_get_pad_crop(mt9v032, fh, crop->pad, crop->which);
@@ -491,8 +604,11 @@ static int mt9v032_set_crop(struct v4l2_subdev *subdev,
crop->which);
__format->width = rect.width;
__format->height = rect.height;
- if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- mt9v032_configure_pixel_rate(mt9v032, 1);
+ if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ mt9v032->hratio = 1;
+ mt9v032->vratio = 1;
+ mt9v032_configure_pixel_rate(mt9v032);
+ }
}
*__crop = rect;
@@ -641,7 +757,8 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
{
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
- s32 data;
+ unsigned int i;
+ s32 version;
int ret;
dev_info(&client->dev, "Probing MT9V032 at address 0x%02x\n",
@@ -654,25 +771,38 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
}
/* Read and check the sensor version */
- data = mt9v032_read(client, MT9V032_CHIP_VERSION);
- if (data != MT9V032_CHIP_ID_REV1 && data != MT9V032_CHIP_ID_REV3) {
- dev_err(&client->dev, "MT9V032 not detected, wrong version "
- "0x%04x\n", data);
+ version = mt9v032_read(client, MT9V032_CHIP_VERSION);
+ if (version < 0) {
+ dev_err(&client->dev, "Failed reading chip version\n");
+ return version;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt9v032_versions); ++i) {
+ if (mt9v032_versions[i].version == version) {
+ mt9v032->version = &mt9v032_versions[i];
+ break;
+ }
+ }
+
+ if (mt9v032->version == NULL) {
+ dev_err(&client->dev, "Unsupported chip version 0x%04x\n",
+ version);
return -ENODEV;
}
mt9v032_power_off(mt9v032);
- dev_info(&client->dev, "MT9V032 detected at address 0x%02x\n",
- client->addr);
+ dev_info(&client->dev, "%s detected at address 0x%02x\n",
+ mt9v032->version->name, client->addr);
- mt9v032_configure_pixel_rate(mt9v032, 1);
+ mt9v032_configure_pixel_rate(mt9v032);
return ret;
}
static int mt9v032_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
{
+ struct mt9v032 *mt9v032 = to_mt9v032(subdev);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
@@ -683,7 +813,12 @@ static int mt9v032_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
crop->height = MT9V032_WINDOW_HEIGHT_DEF;
format = v4l2_subdev_get_try_format(fh, 0);
- format->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ if (mt9v032->model->color)
+ format->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ else
+ format->code = V4L2_MBUS_FMT_Y10_1X10;
+
format->width = MT9V032_WINDOW_WIDTH_DEF;
format->height = MT9V032_WINDOW_HEIGHT_DEF;
format->field = V4L2_FIELD_NONE;
@@ -755,6 +890,7 @@ static int mt9v032_probe(struct i2c_client *client,
mutex_init(&mt9v032->power_lock);
mt9v032->pdata = pdata;
+ mt9v032->model = (const void *)did->driver_data;
v4l2_ctrl_handler_init(&mt9v032->ctrls, 10);
@@ -767,16 +903,16 @@ static int mt9v032_probe(struct i2c_client *client,
V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL, 0,
V4L2_EXPOSURE_AUTO);
v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
- V4L2_CID_EXPOSURE, MT9V032_TOTAL_SHUTTER_WIDTH_MIN,
- MT9V032_TOTAL_SHUTTER_WIDTH_MAX, 1,
+ V4L2_CID_EXPOSURE, mt9v032->model->data->min_shutter,
+ mt9v032->model->data->max_shutter, 1,
MT9V032_TOTAL_SHUTTER_WIDTH_DEF);
v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
- V4L2_CID_HBLANK, MT9V032_HORIZONTAL_BLANKING_MIN,
+ V4L2_CID_HBLANK, mt9v032->model->data->min_hblank,
MT9V032_HORIZONTAL_BLANKING_MAX, 1,
MT9V032_HORIZONTAL_BLANKING_DEF);
v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
- V4L2_CID_VBLANK, MT9V032_VERTICAL_BLANKING_MIN,
- MT9V032_VERTICAL_BLANKING_MAX, 1,
+ V4L2_CID_VBLANK, mt9v032->model->data->min_vblank,
+ mt9v032->model->data->max_vblank, 1,
MT9V032_VERTICAL_BLANKING_DEF);
mt9v032->test_pattern = v4l2_ctrl_new_std_menu_items(&mt9v032->ctrls,
&mt9v032_ctrl_ops, V4L2_CID_TEST_PATTERN,
@@ -819,12 +955,19 @@ static int mt9v032_probe(struct i2c_client *client,
mt9v032->crop.width = MT9V032_WINDOW_WIDTH_DEF;
mt9v032->crop.height = MT9V032_WINDOW_HEIGHT_DEF;
- mt9v032->format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ if (mt9v032->model->color)
+ mt9v032->format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ else
+ mt9v032->format.code = V4L2_MBUS_FMT_Y10_1X10;
+
mt9v032->format.width = MT9V032_WINDOW_WIDTH_DEF;
mt9v032->format.height = MT9V032_WINDOW_HEIGHT_DEF;
mt9v032->format.field = V4L2_FIELD_NONE;
mt9v032->format.colorspace = V4L2_COLORSPACE_SRGB;
+ mt9v032->hratio = 1;
+ mt9v032->vratio = 1;
+
mt9v032->aec_agc = MT9V032_AEC_ENABLE | MT9V032_AGC_ENABLE;
mt9v032->hblank = MT9V032_HORIZONTAL_BLANKING_DEF;
mt9v032->sysclk = MT9V032_SYSCLK_FREQ_DEF;
@@ -855,7 +998,10 @@ static int mt9v032_remove(struct i2c_client *client)
}
static const struct i2c_device_id mt9v032_id[] = {
- { "mt9v032", 0 },
+ { "mt9v032", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_COLOR] },
+ { "mt9v032m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_MONO] },
+ { "mt9v034", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_COLOR] },
+ { "mt9v034m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_MONO] },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9v032_id);
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
new file mode 100644
index 000000000000..2d768ef67cc5
--- /dev/null
+++ b/drivers/media/i2c/s5k5baf.c
@@ -0,0 +1,2054 @@
+/*
+ * Driver for Samsung S5K5BAF UXGA 1/5" 2M CMOS Image Sensor
+ * with embedded SoC ISP.
+ *
+ * Copyright (C) 2013, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on S5K6AA driver authored by Sylwester Nawrocki
+ * Copyright (C) 2013, Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-of.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define S5K5BAF_DRIVER_NAME "s5k5baf"
+#define S5K5BAF_DEFAULT_MCLK_FREQ 24000000U
+#define S5K5BAF_CLK_NAME "mclk"
+
+#define S5K5BAF_FW_FILENAME "s5k5baf-cfg.bin"
+#define S5K5BAF_FW_TAG "SF00"
+#define S5K5BAG_FW_TAG_LEN 2
+#define S5K5BAG_FW_MAX_COUNT 16
+
+#define S5K5BAF_CIS_WIDTH 1600
+#define S5K5BAF_CIS_HEIGHT 1200
+#define S5K5BAF_WIN_WIDTH_MIN 8
+#define S5K5BAF_WIN_HEIGHT_MIN 8
+#define S5K5BAF_GAIN_RED_DEF 127
+#define S5K5BAF_GAIN_GREEN_DEF 95
+#define S5K5BAF_GAIN_BLUE_DEF 180
+/* Default number of MIPI CSI-2 data lanes used */
+#define S5K5BAF_DEF_NUM_LANES 1
+
+#define AHB_MSB_ADDR_PTR 0xfcfc
+
+/*
+ * Register interface pages (the most significant word of the address)
+ */
+#define PAGE_IF_HW 0xd000
+#define PAGE_IF_SW 0x7000
+
+/*
+ * H/W register Interface (PAGE_IF_HW)
+ */
+#define REG_SW_LOAD_COMPLETE 0x0014
+#define REG_CMDWR_PAGE 0x0028
+#define REG_CMDWR_ADDR 0x002a
+#define REG_CMDRD_PAGE 0x002c
+#define REG_CMDRD_ADDR 0x002e
+#define REG_CMD_BUF 0x0f12
+#define REG_SET_HOST_INT 0x1000
+#define REG_CLEAR_HOST_INT 0x1030
+#define REG_PATTERN_SET 0x3100
+#define REG_PATTERN_WIDTH 0x3118
+#define REG_PATTERN_HEIGHT 0x311a
+#define REG_PATTERN_PARAM 0x311c
+
+/*
+ * S/W register interface (PAGE_IF_SW)
+ */
+
+/* Firmware revision information */
+#define REG_FW_APIVER 0x012e
+#define S5K5BAF_FW_APIVER 0x0001
+#define REG_FW_REVISION 0x0130
+#define REG_FW_SENSOR_ID 0x0152
+
+/* Initialization parameters */
+/* Master clock frequency in KHz */
+#define REG_I_INCLK_FREQ_L 0x01b8
+#define REG_I_INCLK_FREQ_H 0x01ba
+#define MIN_MCLK_FREQ_KHZ 6000U
+#define MAX_MCLK_FREQ_KHZ 48000U
+#define REG_I_USE_NPVI_CLOCKS 0x01c6
+#define NPVI_CLOCKS 1
+#define REG_I_USE_NMIPI_CLOCKS 0x01c8
+#define NMIPI_CLOCKS 1
+#define REG_I_BLOCK_INTERNAL_PLL_CALC 0x01ca
+
+/* Clock configurations, n = 0..2. REG_I_* frequency unit is 4 kHz. */
+#define REG_I_OPCLK_4KHZ(n) ((n) * 6 + 0x01cc)
+#define REG_I_MIN_OUTRATE_4KHZ(n) ((n) * 6 + 0x01ce)
+#define REG_I_MAX_OUTRATE_4KHZ(n) ((n) * 6 + 0x01d0)
+#define SCLK_PVI_FREQ 24000
+#define SCLK_MIPI_FREQ 48000
+#define PCLK_MIN_FREQ 6000
+#define PCLK_MAX_FREQ 48000
+#define REG_I_USE_REGS_API 0x01de
+#define REG_I_INIT_PARAMS_UPDATED 0x01e0
+#define REG_I_ERROR_INFO 0x01e2
+
+/* General purpose parameters */
+#define REG_USER_BRIGHTNESS 0x01e4
+#define REG_USER_CONTRAST 0x01e6
+#define REG_USER_SATURATION 0x01e8
+#define REG_USER_SHARPBLUR 0x01ea
+
+#define REG_G_SPEC_EFFECTS 0x01ee
+#define REG_G_ENABLE_PREV 0x01f0
+#define REG_G_ENABLE_PREV_CHG 0x01f2
+#define REG_G_NEW_CFG_SYNC 0x01f8
+#define REG_G_PREVREQ_IN_WIDTH 0x01fa
+#define REG_G_PREVREQ_IN_HEIGHT 0x01fc
+#define REG_G_PREVREQ_IN_XOFFS 0x01fe
+#define REG_G_PREVREQ_IN_YOFFS 0x0200
+#define REG_G_PREVZOOM_IN_WIDTH 0x020a
+#define REG_G_PREVZOOM_IN_HEIGHT 0x020c
+#define REG_G_PREVZOOM_IN_XOFFS 0x020e
+#define REG_G_PREVZOOM_IN_YOFFS 0x0210
+#define REG_G_INPUTS_CHANGE_REQ 0x021a
+#define REG_G_ACTIVE_PREV_CFG 0x021c
+#define REG_G_PREV_CFG_CHG 0x021e
+#define REG_G_PREV_OPEN_AFTER_CH 0x0220
+#define REG_G_PREV_CFG_ERROR 0x0222
+#define CFG_ERROR_RANGE 0x0b
+#define REG_G_PREV_CFG_BYPASS_CHANGED 0x022a
+#define REG_G_ACTUAL_P_FR_TIME 0x023a
+#define REG_G_ACTUAL_P_OUT_RATE 0x023c
+#define REG_G_ACTUAL_C_FR_TIME 0x023e
+#define REG_G_ACTUAL_C_OUT_RATE 0x0240
+
+/* Preview control section. n = 0...4. */
+#define PREG(n, x) ((n) * 0x26 + x)
+#define REG_P_OUT_WIDTH(n) PREG(n, 0x0242)
+#define REG_P_OUT_HEIGHT(n) PREG(n, 0x0244)
+#define REG_P_FMT(n) PREG(n, 0x0246)
+#define REG_P_MAX_OUT_RATE(n) PREG(n, 0x0248)
+#define REG_P_MIN_OUT_RATE(n) PREG(n, 0x024a)
+#define REG_P_PVI_MASK(n) PREG(n, 0x024c)
+#define PVI_MASK_MIPI 0x52
+#define REG_P_CLK_INDEX(n) PREG(n, 0x024e)
+#define CLK_PVI_INDEX 0
+#define CLK_MIPI_INDEX NPVI_CLOCKS
+#define REG_P_FR_RATE_TYPE(n) PREG(n, 0x0250)
+#define FR_RATE_DYNAMIC 0
+#define FR_RATE_FIXED 1
+#define FR_RATE_FIXED_ACCURATE 2
+#define REG_P_FR_RATE_Q_TYPE(n) PREG(n, 0x0252)
+#define FR_RATE_Q_DYNAMIC 0
+#define FR_RATE_Q_BEST_FRRATE 1 /* Binning enabled */
+#define FR_RATE_Q_BEST_QUALITY 2 /* Binning disabled */
+/* Frame period in 0.1 ms units */
+#define REG_P_MAX_FR_TIME(n) PREG(n, 0x0254)
+#define REG_P_MIN_FR_TIME(n) PREG(n, 0x0256)
+#define S5K5BAF_MIN_FR_TIME 333 /* x100 us */
+#define S5K5BAF_MAX_FR_TIME 6500 /* x100 us */
+/* The below 5 registers are for "device correction" values */
+#define REG_P_SATURATION(n) PREG(n, 0x0258)
+#define REG_P_SHARP_BLUR(n) PREG(n, 0x025a)
+#define REG_P_GLAMOUR(n) PREG(n, 0x025c)
+#define REG_P_COLORTEMP(n) PREG(n, 0x025e)
+#define REG_P_GAMMA_INDEX(n) PREG(n, 0x0260)
+#define REG_P_PREV_MIRROR(n) PREG(n, 0x0262)
+#define REG_P_CAP_MIRROR(n) PREG(n, 0x0264)
+#define REG_P_CAP_ROTATION(n) PREG(n, 0x0266)
+
+/* Extended image property controls */
+/* Exposure time in 10 us units */
+#define REG_SF_USR_EXPOSURE_L 0x03bc
+#define REG_SF_USR_EXPOSURE_H 0x03be
+#define REG_SF_USR_EXPOSURE_CHG 0x03c0
+#define REG_SF_USR_TOT_GAIN 0x03c2
+#define REG_SF_USR_TOT_GAIN_CHG 0x03c4
+#define REG_SF_RGAIN 0x03c6
+#define REG_SF_RGAIN_CHG 0x03c8
+#define REG_SF_GGAIN 0x03ca
+#define REG_SF_GGAIN_CHG 0x03cc
+#define REG_SF_BGAIN 0x03ce
+#define REG_SF_BGAIN_CHG 0x03d0
+#define REG_SF_WBGAIN_CHG 0x03d2
+#define REG_SF_FLICKER_QUANT 0x03d4
+#define REG_SF_FLICKER_QUANT_CHG 0x03d6
+
+/* Output interface (parallel/MIPI) setup */
+#define REG_OIF_EN_MIPI_LANES 0x03f2
+#define REG_OIF_EN_PACKETS 0x03f4
+#define EN_PACKETS_CSI2 0xc3
+#define REG_OIF_CFG_CHG 0x03f6
+
+/* Auto-algorithms enable mask */
+#define REG_DBG_AUTOALG_EN 0x03f8
+#define AALG_ALL_EN BIT(0)
+#define AALG_AE_EN BIT(1)
+#define AALG_DIVLEI_EN BIT(2)
+#define AALG_WB_EN BIT(3)
+#define AALG_USE_WB_FOR_ISP BIT(4)
+#define AALG_FLICKER_EN BIT(5)
+#define AALG_FIT_EN BIT(6)
+#define AALG_WRHW_EN BIT(7)
+
+/* Pointers to color correction matrices */
+#define REG_PTR_CCM_HORIZON 0x06d0
+#define REG_PTR_CCM_INCANDESCENT 0x06d4
+#define REG_PTR_CCM_WARM_WHITE 0x06d8
+#define REG_PTR_CCM_COOL_WHITE 0x06dc
+#define REG_PTR_CCM_DL50 0x06e0
+#define REG_PTR_CCM_DL65 0x06e4
+#define REG_PTR_CCM_OUTDOOR 0x06ec
+
+#define REG_ARR_CCM(n) (0x2800 + 36 * (n))
+
+static const char * const s5k5baf_supply_names[] = {
+ "vdda", /* Analog power supply 2.8V (2.6V to 3.0V) */
+ "vddreg", /* Regulator input power supply 1.8V (1.7V to 1.9V)
+ or 2.8V (2.6V to 3.0) */
+ "vddio", /* I/O power supply 1.8V (1.65V to 1.95V)
+ or 2.8V (2.5V to 3.1V) */
+};
+#define S5K5BAF_NUM_SUPPLIES ARRAY_SIZE(s5k5baf_supply_names)
+
+struct s5k5baf_gpio {
+ int gpio;
+ int level;
+};
+
+enum s5k5baf_gpio_id {
+ STBY,
+ RST,
+ NUM_GPIOS,
+};
+
+#define PAD_CIS 0
+#define PAD_OUT 1
+#define NUM_CIS_PADS 1
+#define NUM_ISP_PADS 2
+
+struct s5k5baf_pixfmt {
+ enum v4l2_mbus_pixelcode code;
+ u32 colorspace;
+ /* REG_P_FMT(x) register value */
+ u16 reg_p_fmt;
+};
+
+struct s5k5baf_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct { /* Auto / manual white balance cluster */
+ struct v4l2_ctrl *awb;
+ struct v4l2_ctrl *gain_red;
+ struct v4l2_ctrl *gain_blue;
+ };
+ struct { /* Mirror cluster */
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
+ struct { /* Auto exposure / manual exposure and gain cluster */
+ struct v4l2_ctrl *auto_exp;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ };
+};
+
+enum {
+ S5K5BAF_FW_ID_PATCH,
+ S5K5BAF_FW_ID_CCM,
+ S5K5BAF_FW_ID_CIS,
+};
+
+struct s5k5baf_fw {
+ u16 count;
+ struct {
+ u16 id;
+ u16 offset;
+ } seq[0];
+ u16 data[0];
+};
+
+struct s5k5baf {
+ struct s5k5baf_gpio gpios[NUM_GPIOS];
+ enum v4l2_mbus_type bus_type;
+ u8 nlanes;
+ struct regulator_bulk_data supplies[S5K5BAF_NUM_SUPPLIES];
+
+ struct clk *clock;
+ u32 mclk_frequency;
+
+ struct s5k5baf_fw *fw;
+
+ struct v4l2_subdev cis_sd;
+ struct media_pad cis_pad;
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[NUM_ISP_PADS];
+
+ /* protects the struct members below */
+ struct mutex lock;
+
+ int error;
+
+ struct v4l2_rect crop_sink;
+ struct v4l2_rect compose;
+ struct v4l2_rect crop_source;
+ /* index to s5k5baf_formats array */
+ int pixfmt;
+ /* actual frame interval in 100us */
+ u16 fiv;
+ /* requested frame interval in 100us */
+ u16 req_fiv;
+ /* cache for REG_DBG_AUTOALG_EN register */
+ u16 auto_alg;
+
+ struct s5k5baf_ctrls ctrls;
+
+ unsigned int streaming:1;
+ unsigned int apply_cfg:1;
+ unsigned int apply_crop:1;
+ unsigned int valid_auto_alg:1;
+ unsigned int power;
+};
+
+static const struct s5k5baf_pixfmt s5k5baf_formats[] = {
+ { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 5 },
+ /* range 16-240 */
+ { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_REC709, 6 },
+ { V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_JPEG, 0 },
+};
+
+static struct v4l2_rect s5k5baf_cis_rect = {
+ 0, 0, S5K5BAF_CIS_WIDTH, S5K5BAF_CIS_HEIGHT
+};
+
+/* Setfile contains set of I2C command sequences. Each sequence has its ID.
+ * setfile format:
+ * u8 magic[4];
+ * u16 count; number of sequences
+ * struct {
+ * u16 id; sequence id
+ * u16 offset; sequence offset in data array
+ * } seq[count];
+ * u16 data[*]; array containing sequences
+ *
+ */
+static int s5k5baf_fw_parse(struct device *dev, struct s5k5baf_fw **fw,
+ size_t count, const u16 *data)
+{
+ struct s5k5baf_fw *f;
+ u16 *d, i, *end;
+ int ret;
+
+ if (count < S5K5BAG_FW_TAG_LEN + 1) {
+ dev_err(dev, "firmware file too short (%zu)\n", count);
+ return -EINVAL;
+ }
+
+ ret = memcmp(data, S5K5BAF_FW_TAG, S5K5BAG_FW_TAG_LEN * sizeof(u16));
+ if (ret != 0) {
+ dev_err(dev, "invalid firmware magic number\n");
+ return -EINVAL;
+ }
+
+ data += S5K5BAG_FW_TAG_LEN;
+ count -= S5K5BAG_FW_TAG_LEN;
+
+ d = devm_kzalloc(dev, count * sizeof(u16), GFP_KERNEL);
+
+ for (i = 0; i < count; ++i)
+ d[i] = le16_to_cpu(data[i]);
+
+ f = (struct s5k5baf_fw *)d;
+ if (count < 1 + 2 * f->count) {
+ dev_err(dev, "invalid firmware header (count=%d size=%zu)\n",
+ f->count, 2 * (count + S5K5BAG_FW_TAG_LEN));
+ return -EINVAL;
+ }
+ end = d + count;
+ d += 1 + 2 * f->count;
+
+ for (i = 0; i < f->count; ++i) {
+ if (f->seq[i].offset + d <= end)
+ continue;
+ dev_err(dev, "invalid firmware header (seq=%d)\n", i);
+ return -EINVAL;
+ }
+
+ *fw = f;
+
+ return 0;
+}
+
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct s5k5baf, ctrls.handler)->sd;
+}
+
+static inline bool s5k5baf_is_cis_subdev(struct v4l2_subdev *sd)
+{
+ return sd->entity.type == MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+}
+
+static inline struct s5k5baf *to_s5k5baf(struct v4l2_subdev *sd)
+{
+ if (s5k5baf_is_cis_subdev(sd))
+ return container_of(sd, struct s5k5baf, cis_sd);
+ else
+ return container_of(sd, struct s5k5baf, sd);
+}
+
+static u16 s5k5baf_i2c_read(struct s5k5baf *state, u16 addr)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ __be16 w, r;
+ struct i2c_msg msg[] = {
+ { .addr = c->addr, .flags = 0,
+ .len = 2, .buf = (u8 *)&w },
+ { .addr = c->addr, .flags = I2C_M_RD,
+ .len = 2, .buf = (u8 *)&r },
+ };
+ int ret;
+
+ if (state->error)
+ return 0;
+
+ w = cpu_to_be16(addr);
+ ret = i2c_transfer(c->adapter, msg, 2);
+ r = be16_to_cpu(r);
+
+ v4l2_dbg(3, debug, c, "i2c_read: 0x%04x : 0x%04x\n", addr, r);
+
+ if (ret != 2) {
+ v4l2_err(c, "i2c_read: error during transfer (%d)\n", ret);
+ state->error = ret;
+ }
+ return r;
+}
+
+static void s5k5baf_i2c_write(struct s5k5baf *state, u16 addr, u16 val)
+{
+ u8 buf[4] = { addr >> 8, addr & 0xFF, val >> 8, val & 0xFF };
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ int ret;
+
+ if (state->error)
+ return;
+
+ ret = i2c_master_send(c, buf, 4);
+ v4l2_dbg(3, debug, c, "i2c_write: 0x%04x : 0x%04x\n", addr, val);
+
+ if (ret != 4) {
+ v4l2_err(c, "i2c_write: error during transfer (%d)\n", ret);
+ state->error = ret;
+ }
+}
+
+static u16 s5k5baf_read(struct s5k5baf *state, u16 addr)
+{
+ s5k5baf_i2c_write(state, REG_CMDRD_ADDR, addr);
+ return s5k5baf_i2c_read(state, REG_CMD_BUF);
+}
+
+static void s5k5baf_write(struct s5k5baf *state, u16 addr, u16 val)
+{
+ s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
+ s5k5baf_i2c_write(state, REG_CMD_BUF, val);
+}
+
+static void s5k5baf_write_arr_seq(struct s5k5baf *state, u16 addr,
+ u16 count, const u16 *seq)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ __be16 buf[65];
+
+ s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
+ if (state->error)
+ return;
+
+ v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
+ min(2 * count, 64), seq);
+
+ buf[0] = __constant_cpu_to_be16(REG_CMD_BUF);
+
+ while (count > 0) {
+ int n = min_t(int, count, ARRAY_SIZE(buf) - 1);
+ int ret, i;
+
+ for (i = 1; i <= n; ++i)
+ buf[i] = cpu_to_be16(*seq++);
+
+ i *= 2;
+ ret = i2c_master_send(c, (char *)buf, i);
+ if (ret != i) {
+ v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
+ state->error = ret;
+ break;
+ }
+
+ count -= n;
+ }
+}
+
+#define s5k5baf_write_seq(state, addr, seq...) \
+ s5k5baf_write_arr_seq(state, addr, sizeof((char[]){ seq }), \
+ (const u16 []){ seq });
+
+/* add items count at the beginning of the list */
+#define NSEQ(seq...) sizeof((char[]){ seq }), seq
+
+/*
+ * s5k5baf_write_nseq() - Writes sequences of values to sensor memory via i2c
+ * @nseq: sequence of u16 words in format:
+ * (N, address, value[1]...value[N-1])*,0
+ * Ex.:
+ * u16 seq[] = { NSEQ(0x4000, 1, 1), NSEQ(0x4010, 640, 480), 0 };
+ * ret = s5k5baf_write_nseq(c, seq);
+ */
+static void s5k5baf_write_nseq(struct s5k5baf *state, const u16 *nseq)
+{
+ int count;
+
+ while ((count = *nseq++)) {
+ u16 addr = *nseq++;
+ --count;
+
+ s5k5baf_write_arr_seq(state, addr, count, nseq);
+ nseq += count;
+ }
+}
+
+static void s5k5baf_synchronize(struct s5k5baf *state, int timeout, u16 addr)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(timeout);
+ u16 reg;
+
+ s5k5baf_write(state, addr, 1);
+ do {
+ reg = s5k5baf_read(state, addr);
+ if (state->error || !reg)
+ return;
+ usleep_range(5000, 10000);
+ } while (time_is_after_jiffies(end));
+
+ v4l2_err(&state->sd, "timeout on register synchronize (%#x)\n", addr);
+ state->error = -ETIMEDOUT;
+}
+
+static u16 *s5k5baf_fw_get_seq(struct s5k5baf *state, u16 seq_id)
+{
+ struct s5k5baf_fw *fw = state->fw;
+ u16 *data;
+ int i;
+
+ if (fw == NULL)
+ return NULL;
+
+ data = fw->data + 2 * fw->count;
+
+ for (i = 0; i < fw->count; ++i) {
+ if (fw->seq[i].id == seq_id)
+ return data + fw->seq[i].offset;
+ }
+
+ return NULL;
+}
+
+static void s5k5baf_hw_patch(struct s5k5baf *state)
+{
+ u16 *seq = s5k5baf_fw_get_seq(state, S5K5BAF_FW_ID_PATCH);
+
+ if (seq)
+ s5k5baf_write_nseq(state, seq);
+}
+
+static void s5k5baf_hw_set_clocks(struct s5k5baf *state)
+{
+ unsigned long mclk = state->mclk_frequency / 1000;
+ u16 status;
+ static const u16 nseq_clk_cfg[] = {
+ NSEQ(REG_I_USE_NPVI_CLOCKS,
+ NPVI_CLOCKS, NMIPI_CLOCKS, 0,
+ SCLK_PVI_FREQ / 4, PCLK_MIN_FREQ / 4, PCLK_MAX_FREQ / 4,
+ SCLK_MIPI_FREQ / 4, PCLK_MIN_FREQ / 4, PCLK_MAX_FREQ / 4),
+ NSEQ(REG_I_USE_REGS_API, 1),
+ 0
+ };
+
+ s5k5baf_write_seq(state, REG_I_INCLK_FREQ_L, mclk & 0xffff, mclk >> 16);
+ s5k5baf_write_nseq(state, nseq_clk_cfg);
+
+ s5k5baf_synchronize(state, 250, REG_I_INIT_PARAMS_UPDATED);
+ status = s5k5baf_read(state, REG_I_ERROR_INFO);
+ if (!state->error && status) {
+ v4l2_err(&state->sd, "error configuring PLL (%d)\n", status);
+ state->error = -EINVAL;
+ }
+}
+
+/* set custom color correction matrices for various illuminations */
+static void s5k5baf_hw_set_ccm(struct s5k5baf *state)
+{
+ u16 *seq = s5k5baf_fw_get_seq(state, S5K5BAF_FW_ID_CCM);
+
+ if (seq)
+ s5k5baf_write_nseq(state, seq);
+}
+
+/* CIS sensor tuning, based on undocumented android driver code */
+static void s5k5baf_hw_set_cis(struct s5k5baf *state)
+{
+ u16 *seq = s5k5baf_fw_get_seq(state, S5K5BAF_FW_ID_CIS);
+
+ if (!seq)
+ return;
+
+ s5k5baf_i2c_write(state, REG_CMDWR_PAGE, PAGE_IF_HW);
+ s5k5baf_write_nseq(state, seq);
+ s5k5baf_i2c_write(state, REG_CMDWR_PAGE, PAGE_IF_SW);
+}
+
+static void s5k5baf_hw_sync_cfg(struct s5k5baf *state)
+{
+ s5k5baf_write(state, REG_G_PREV_CFG_CHG, 1);
+ if (state->apply_crop) {
+ s5k5baf_write(state, REG_G_INPUTS_CHANGE_REQ, 1);
+ s5k5baf_write(state, REG_G_PREV_CFG_BYPASS_CHANGED, 1);
+ }
+ s5k5baf_synchronize(state, 500, REG_G_NEW_CFG_SYNC);
+}
+/* Set horizontal and vertical image flipping */
+static void s5k5baf_hw_set_mirror(struct s5k5baf *state)
+{
+ u16 flip = state->ctrls.vflip->val | (state->ctrls.vflip->val << 1);
+
+ s5k5baf_write(state, REG_P_PREV_MIRROR(0), flip);
+ if (state->streaming)
+ s5k5baf_hw_sync_cfg(state);
+}
+
+static void s5k5baf_hw_set_alg(struct s5k5baf *state, u16 alg, bool enable)
+{
+ u16 cur_alg, new_alg;
+
+ if (!state->valid_auto_alg)
+ cur_alg = s5k5baf_read(state, REG_DBG_AUTOALG_EN);
+ else
+ cur_alg = state->auto_alg;
+
+ new_alg = enable ? (cur_alg | alg) : (cur_alg & ~alg);
+
+ if (new_alg != cur_alg)
+ s5k5baf_write(state, REG_DBG_AUTOALG_EN, new_alg);
+
+ if (state->error)
+ return;
+
+ state->valid_auto_alg = 1;
+ state->auto_alg = new_alg;
+}
+
+/* Configure auto/manual white balance and R/G/B gains */
+static void s5k5baf_hw_set_awb(struct s5k5baf *state, int awb)
+{
+ struct s5k5baf_ctrls *ctrls = &state->ctrls;
+
+ if (!awb)
+ s5k5baf_write_seq(state, REG_SF_RGAIN,
+ ctrls->gain_red->val, 1,
+ S5K5BAF_GAIN_GREEN_DEF, 1,
+ ctrls->gain_blue->val, 1,
+ 1);
+
+ s5k5baf_hw_set_alg(state, AALG_WB_EN, awb);
+}
+
+/* Program FW with exposure time, 'exposure' in us units */
+static void s5k5baf_hw_set_user_exposure(struct s5k5baf *state, int exposure)
+{
+ unsigned int time = exposure / 10;
+
+ s5k5baf_write_seq(state, REG_SF_USR_EXPOSURE_L,
+ time & 0xffff, time >> 16, 1);
+}
+
+static void s5k5baf_hw_set_user_gain(struct s5k5baf *state, int gain)
+{
+ s5k5baf_write_seq(state, REG_SF_USR_TOT_GAIN, gain, 1);
+}
+
+/* Set auto/manual exposure and total gain */
+static void s5k5baf_hw_set_auto_exposure(struct s5k5baf *state, int value)
+{
+ if (value == V4L2_EXPOSURE_AUTO) {
+ s5k5baf_hw_set_alg(state, AALG_AE_EN | AALG_DIVLEI_EN, true);
+ } else {
+ unsigned int exp_time = state->ctrls.exposure->val;
+
+ s5k5baf_hw_set_user_exposure(state, exp_time);
+ s5k5baf_hw_set_user_gain(state, state->ctrls.gain->val);
+ s5k5baf_hw_set_alg(state, AALG_AE_EN | AALG_DIVLEI_EN, false);
+ }
+}
+
+static void s5k5baf_hw_set_anti_flicker(struct s5k5baf *state, int v)
+{
+ if (v == V4L2_CID_POWER_LINE_FREQUENCY_AUTO) {
+ s5k5baf_hw_set_alg(state, AALG_FLICKER_EN, true);
+ } else {
+ /* The V4L2_CID_LINE_FREQUENCY control values match
+ * the register values */
+ s5k5baf_write_seq(state, REG_SF_FLICKER_QUANT, v, 1);
+ s5k5baf_hw_set_alg(state, AALG_FLICKER_EN, false);
+ }
+}
+
+static void s5k5baf_hw_set_colorfx(struct s5k5baf *state, int val)
+{
+ static const u16 colorfx[] = {
+ [V4L2_COLORFX_NONE] = 0,
+ [V4L2_COLORFX_BW] = 1,
+ [V4L2_COLORFX_NEGATIVE] = 2,
+ [V4L2_COLORFX_SEPIA] = 3,
+ [V4L2_COLORFX_SKY_BLUE] = 4,
+ [V4L2_COLORFX_SKETCH] = 5,
+ };
+
+ s5k5baf_write(state, REG_G_SPEC_EFFECTS, colorfx[val]);
+}
+
+static int s5k5baf_find_pixfmt(struct v4l2_mbus_framefmt *mf)
+{
+ int i, c = -1;
+
+ for (i = 0; i < ARRAY_SIZE(s5k5baf_formats); i++) {
+ if (mf->colorspace != s5k5baf_formats[i].colorspace)
+ continue;
+ if (mf->code == s5k5baf_formats[i].code)
+ return i;
+ if (c < 0)
+ c = i;
+ }
+ return (c < 0) ? 0 : c;
+}
+
+static int s5k5baf_clear_error(struct s5k5baf *state)
+{
+ int ret = state->error;
+
+ state->error = 0;
+ return ret;
+}
+
+static int s5k5baf_hw_set_video_bus(struct s5k5baf *state)
+{
+ u16 en_pkts;
+
+ if (state->bus_type == V4L2_MBUS_CSI2)
+ en_pkts = EN_PACKETS_CSI2;
+ else
+ en_pkts = 0;
+
+ s5k5baf_write_seq(state, REG_OIF_EN_MIPI_LANES,
+ state->nlanes, en_pkts, 1);
+
+ return s5k5baf_clear_error(state);
+}
+
+static u16 s5k5baf_get_cfg_error(struct s5k5baf *state)
+{
+ u16 err = s5k5baf_read(state, REG_G_PREV_CFG_ERROR);
+ if (err)
+ s5k5baf_write(state, REG_G_PREV_CFG_ERROR, 0);
+ return err;
+}
+
+static void s5k5baf_hw_set_fiv(struct s5k5baf *state, u16 fiv)
+{
+ s5k5baf_write(state, REG_P_MAX_FR_TIME(0), fiv);
+ s5k5baf_hw_sync_cfg(state);
+}
+
+static void s5k5baf_hw_find_min_fiv(struct s5k5baf *state)
+{
+ u16 err, fiv;
+ int n;
+
+ fiv = s5k5baf_read(state, REG_G_ACTUAL_P_FR_TIME);
+ if (state->error)
+ return;
+
+ for (n = 5; n > 0; --n) {
+ s5k5baf_hw_set_fiv(state, fiv);
+ err = s5k5baf_get_cfg_error(state);
+ if (state->error)
+ return;
+ switch (err) {
+ case CFG_ERROR_RANGE:
+ ++fiv;
+ break;
+ case 0:
+ state->fiv = fiv;
+ v4l2_info(&state->sd,
+ "found valid frame interval: %d00us\n", fiv);
+ return;
+ default:
+ v4l2_err(&state->sd,
+ "error setting frame interval: %d\n", err);
+ state->error = -EINVAL;
+ }
+ };
+ v4l2_err(&state->sd, "cannot find correct frame interval\n");
+ state->error = -ERANGE;
+}
+
+static void s5k5baf_hw_validate_cfg(struct s5k5baf *state)
+{
+ u16 err;
+
+ err = s5k5baf_get_cfg_error(state);
+ if (state->error)
+ return;
+
+ switch (err) {
+ case 0:
+ state->apply_cfg = 1;
+ return;
+ case CFG_ERROR_RANGE:
+ s5k5baf_hw_find_min_fiv(state);
+ if (!state->error)
+ state->apply_cfg = 1;
+ return;
+ default:
+ v4l2_err(&state->sd,
+ "error setting format: %d\n", err);
+ state->error = -EINVAL;
+ }
+}
+
+static void s5k5baf_rescale(struct v4l2_rect *r, const struct v4l2_rect *v,
+ const struct v4l2_rect *n,
+ const struct v4l2_rect *d)
+{
+ r->left = v->left * n->width / d->width;
+ r->top = v->top * n->height / d->height;
+ r->width = v->width * n->width / d->width;
+ r->height = v->height * n->height / d->height;
+}
+
+static int s5k5baf_hw_set_crop_rects(struct s5k5baf *state)
+{
+ struct v4l2_rect *p, r;
+ u16 err;
+ int ret;
+
+ p = &state->crop_sink;
+ s5k5baf_write_seq(state, REG_G_PREVREQ_IN_WIDTH, p->width, p->height,
+ p->left, p->top);
+
+ s5k5baf_rescale(&r, &state->crop_source, &state->crop_sink,
+ &state->compose);
+ s5k5baf_write_seq(state, REG_G_PREVZOOM_IN_WIDTH, r.width, r.height,
+ r.left, r.top);
+
+ s5k5baf_synchronize(state, 500, REG_G_INPUTS_CHANGE_REQ);
+ s5k5baf_synchronize(state, 500, REG_G_PREV_CFG_BYPASS_CHANGED);
+ err = s5k5baf_get_cfg_error(state);
+ ret = s5k5baf_clear_error(state);
+ if (ret < 0)
+ return ret;
+
+ switch (err) {
+ case 0:
+ break;
+ case CFG_ERROR_RANGE:
+ /* retry crop with frame interval set to max */
+ s5k5baf_hw_set_fiv(state, S5K5BAF_MAX_FR_TIME);
+ err = s5k5baf_get_cfg_error(state);
+ ret = s5k5baf_clear_error(state);
+ if (ret < 0)
+ return ret;
+ if (err) {
+ v4l2_err(&state->sd,
+ "crop error on max frame interval: %d\n", err);
+ state->error = -EINVAL;
+ }
+ s5k5baf_hw_set_fiv(state, state->req_fiv);
+ s5k5baf_hw_validate_cfg(state);
+ break;
+ default:
+ v4l2_err(&state->sd, "crop error: %d\n", err);
+ return -EINVAL;
+ }
+
+ if (!state->apply_cfg)
+ return 0;
+
+ p = &state->crop_source;
+ s5k5baf_write_seq(state, REG_P_OUT_WIDTH(0), p->width, p->height);
+ s5k5baf_hw_set_fiv(state, state->req_fiv);
+ s5k5baf_hw_validate_cfg(state);
+
+ return s5k5baf_clear_error(state);
+}
+
+static void s5k5baf_hw_set_config(struct s5k5baf *state)
+{
+ u16 reg_fmt = s5k5baf_formats[state->pixfmt].reg_p_fmt;
+ struct v4l2_rect *r = &state->crop_source;
+
+ s5k5baf_write_seq(state, REG_P_OUT_WIDTH(0),
+ r->width, r->height, reg_fmt,
+ PCLK_MAX_FREQ >> 2, PCLK_MIN_FREQ >> 2,
+ PVI_MASK_MIPI, CLK_MIPI_INDEX,
+ FR_RATE_FIXED, FR_RATE_Q_DYNAMIC,
+ state->req_fiv, S5K5BAF_MIN_FR_TIME);
+ s5k5baf_hw_sync_cfg(state);
+ s5k5baf_hw_validate_cfg(state);
+}
+
+
+static void s5k5baf_hw_set_test_pattern(struct s5k5baf *state, int id)
+{
+ s5k5baf_i2c_write(state, REG_PATTERN_WIDTH, 800);
+ s5k5baf_i2c_write(state, REG_PATTERN_HEIGHT, 511);
+ s5k5baf_i2c_write(state, REG_PATTERN_PARAM, 0);
+ s5k5baf_i2c_write(state, REG_PATTERN_SET, id);
+}
+
+static void s5k5baf_gpio_assert(struct s5k5baf *state, int id)
+{
+ struct s5k5baf_gpio *gpio = &state->gpios[id];
+
+ gpio_set_value(gpio->gpio, gpio->level);
+}
+
+static void s5k5baf_gpio_deassert(struct s5k5baf *state, int id)
+{
+ struct s5k5baf_gpio *gpio = &state->gpios[id];
+
+ gpio_set_value(gpio->gpio, !gpio->level);
+}
+
+static int s5k5baf_power_on(struct s5k5baf *state)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(S5K5BAF_NUM_SUPPLIES, state->supplies);
+ if (ret < 0)
+ goto err;
+
+ ret = clk_set_rate(state->clock, state->mclk_frequency);
+ if (ret < 0)
+ goto err_reg_dis;
+
+ ret = clk_prepare_enable(state->clock);
+ if (ret < 0)
+ goto err_reg_dis;
+
+ v4l2_dbg(1, debug, &state->sd, "clock frequency: %ld\n",
+ clk_get_rate(state->clock));
+
+ s5k5baf_gpio_deassert(state, STBY);
+ usleep_range(50, 100);
+ s5k5baf_gpio_deassert(state, RST);
+ return 0;
+
+err_reg_dis:
+ regulator_bulk_disable(S5K5BAF_NUM_SUPPLIES, state->supplies);
+err:
+ v4l2_err(&state->sd, "%s() failed (%d)\n", __func__, ret);
+ return ret;
+}
+
+static int s5k5baf_power_off(struct s5k5baf *state)
+{
+ int ret;
+
+ state->streaming = 0;
+ state->apply_cfg = 0;
+ state->apply_crop = 0;
+
+ s5k5baf_gpio_assert(state, RST);
+ s5k5baf_gpio_assert(state, STBY);
+
+ if (!IS_ERR(state->clock))
+ clk_disable_unprepare(state->clock);
+
+ ret = regulator_bulk_disable(S5K5BAF_NUM_SUPPLIES,
+ state->supplies);
+ if (ret < 0)
+ v4l2_err(&state->sd, "failed to disable regulators\n");
+
+ return 0;
+}
+
+static void s5k5baf_hw_init(struct s5k5baf *state)
+{
+ s5k5baf_i2c_write(state, AHB_MSB_ADDR_PTR, PAGE_IF_HW);
+ s5k5baf_i2c_write(state, REG_CLEAR_HOST_INT, 0);
+ s5k5baf_i2c_write(state, REG_SW_LOAD_COMPLETE, 1);
+ s5k5baf_i2c_write(state, REG_CMDRD_PAGE, PAGE_IF_SW);
+ s5k5baf_i2c_write(state, REG_CMDWR_PAGE, PAGE_IF_SW);
+}
+
+/*
+ * V4L2 subdev core and video operations
+ */
+
+static void s5k5baf_initialize_data(struct s5k5baf *state)
+{
+ state->pixfmt = 0;
+ state->req_fiv = 10000 / 15;
+ state->fiv = state->req_fiv;
+ state->valid_auto_alg = 0;
+}
+
+static int s5k5baf_load_setfile(struct s5k5baf *state)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, S5K5BAF_FW_FILENAME, &c->dev);
+ if (ret < 0) {
+ dev_warn(&c->dev, "firmware file (%s) not loaded\n",
+ S5K5BAF_FW_FILENAME);
+ return ret;
+ }
+
+ ret = s5k5baf_fw_parse(&c->dev, &state->fw, fw->size / 2,
+ (u16 *)fw->data);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int s5k5baf_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+ int ret = 0;
+
+ mutex_lock(&state->lock);
+
+ if (!on != state->power)
+ goto out;
+
+ if (on) {
+ if (state->fw == NULL)
+ s5k5baf_load_setfile(state);
+
+ s5k5baf_initialize_data(state);
+ ret = s5k5baf_power_on(state);
+ if (ret < 0)
+ goto out;
+
+ s5k5baf_hw_init(state);
+ s5k5baf_hw_patch(state);
+ s5k5baf_i2c_write(state, REG_SET_HOST_INT, 1);
+ s5k5baf_hw_set_clocks(state);
+
+ ret = s5k5baf_hw_set_video_bus(state);
+ if (ret < 0)
+ goto out;
+
+ s5k5baf_hw_set_cis(state);
+ s5k5baf_hw_set_ccm(state);
+
+ ret = s5k5baf_clear_error(state);
+ if (!ret)
+ state->power++;
+ } else {
+ s5k5baf_power_off(state);
+ state->power--;
+ }
+
+out:
+ mutex_unlock(&state->lock);
+
+ if (!ret && on)
+ ret = v4l2_ctrl_handler_setup(&state->ctrls.handler);
+
+ return ret;
+}
+
+static void s5k5baf_hw_set_stream(struct s5k5baf *state, int enable)
+{
+ s5k5baf_write_seq(state, REG_G_ENABLE_PREV, enable, 1);
+}
+
+static int s5k5baf_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+ int ret;
+
+ mutex_lock(&state->lock);
+
+ if (state->streaming == !!on) {
+ ret = 0;
+ goto out;
+ }
+
+ if (on) {
+ s5k5baf_hw_set_config(state);
+ ret = s5k5baf_hw_set_crop_rects(state);
+ if (ret < 0)
+ goto out;
+ s5k5baf_hw_set_stream(state, 1);
+ s5k5baf_i2c_write(state, 0xb0cc, 0x000b);
+ } else {
+ s5k5baf_hw_set_stream(state, 0);
+ }
+ ret = s5k5baf_clear_error(state);
+ if (!ret)
+ state->streaming = !state->streaming;
+
+out:
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static int s5k5baf_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+
+ mutex_lock(&state->lock);
+ fi->interval.numerator = state->fiv;
+ fi->interval.denominator = 10000;
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static void s5k5baf_set_frame_interval(struct s5k5baf *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct v4l2_fract *i = &fi->interval;
+
+ if (fi->interval.denominator == 0)
+ state->req_fiv = S5K5BAF_MAX_FR_TIME;
+ else
+ state->req_fiv = clamp_t(u32,
+ i->numerator * 10000 / i->denominator,
+ S5K5BAF_MIN_FR_TIME,
+ S5K5BAF_MAX_FR_TIME);
+
+ state->fiv = state->req_fiv;
+ if (state->apply_cfg) {
+ s5k5baf_hw_set_fiv(state, state->req_fiv);
+ s5k5baf_hw_validate_cfg(state);
+ }
+ *i = (struct v4l2_fract){ state->fiv, 10000 };
+ if (state->fiv == state->req_fiv)
+ v4l2_info(&state->sd, "frame interval changed to %d00us\n",
+ state->fiv);
+}
+
+static int s5k5baf_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+
+ mutex_lock(&state->lock);
+ s5k5baf_set_frame_interval(state, fi);
+ mutex_unlock(&state->lock);
+ return 0;
+}
+
+/*
+ * V4L2 subdev pad level and video operations
+ */
+static int s5k5baf_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ if (fie->index > S5K5BAF_MAX_FR_TIME - S5K5BAF_MIN_FR_TIME ||
+ fie->pad != PAD_CIS)
+ return -EINVAL;
+
+ v4l_bound_align_image(&fie->width, S5K5BAF_WIN_WIDTH_MIN,
+ S5K5BAF_CIS_WIDTH, 1,
+ &fie->height, S5K5BAF_WIN_HEIGHT_MIN,
+ S5K5BAF_CIS_HEIGHT, 1, 0);
+
+ fie->interval.numerator = S5K5BAF_MIN_FR_TIME + fie->index;
+ fie->interval.denominator = 10000;
+
+ return 0;
+}
+
+static int s5k5baf_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad == PAD_CIS) {
+ if (code->index > 0)
+ return -EINVAL;
+ code->code = V4L2_MBUS_FMT_FIXED;
+ return 0;
+ }
+
+ if (code->index >= ARRAY_SIZE(s5k5baf_formats))
+ return -EINVAL;
+
+ code->code = s5k5baf_formats[code->index].code;
+ return 0;
+}
+
+static int s5k5baf_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int i;
+
+ if (fse->index > 0)
+ return -EINVAL;
+
+ if (fse->pad == PAD_CIS) {
+ fse->code = V4L2_MBUS_FMT_FIXED;
+ fse->min_width = S5K5BAF_CIS_WIDTH;
+ fse->max_width = S5K5BAF_CIS_WIDTH;
+ fse->min_height = S5K5BAF_CIS_HEIGHT;
+ fse->max_height = S5K5BAF_CIS_HEIGHT;
+ return 0;
+ }
+
+ i = ARRAY_SIZE(s5k5baf_formats);
+ while (--i)
+ if (fse->code == s5k5baf_formats[i].code)
+ break;
+ fse->code = s5k5baf_formats[i].code;
+ fse->min_width = S5K5BAF_WIN_WIDTH_MIN;
+ fse->max_width = S5K5BAF_CIS_WIDTH;
+ fse->max_height = S5K5BAF_WIN_HEIGHT_MIN;
+ fse->min_height = S5K5BAF_CIS_HEIGHT;
+
+ return 0;
+}
+
+static void s5k5baf_try_cis_format(struct v4l2_mbus_framefmt *mf)
+{
+ mf->width = S5K5BAF_CIS_WIDTH;
+ mf->height = S5K5BAF_CIS_HEIGHT;
+ mf->code = V4L2_MBUS_FMT_FIXED;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->field = V4L2_FIELD_NONE;
+}
+
+static int s5k5baf_try_isp_format(struct v4l2_mbus_framefmt *mf)
+{
+ int pixfmt;
+
+ v4l_bound_align_image(&mf->width, S5K5BAF_WIN_WIDTH_MIN,
+ S5K5BAF_CIS_WIDTH, 1,
+ &mf->height, S5K5BAF_WIN_HEIGHT_MIN,
+ S5K5BAF_CIS_HEIGHT, 1, 0);
+
+ pixfmt = s5k5baf_find_pixfmt(mf);
+
+ mf->colorspace = s5k5baf_formats[pixfmt].colorspace;
+ mf->code = s5k5baf_formats[pixfmt].code;
+ mf->field = V4L2_FIELD_NONE;
+
+ return pixfmt;
+}
+
+static int s5k5baf_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+ const struct s5k5baf_pixfmt *pixfmt;
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mf = &fmt->format;
+ if (fmt->pad == PAD_CIS) {
+ s5k5baf_try_cis_format(mf);
+ return 0;
+ }
+ mf->field = V4L2_FIELD_NONE;
+ mutex_lock(&state->lock);
+ pixfmt = &s5k5baf_formats[state->pixfmt];
+ mf->width = state->crop_source.width;
+ mf->height = state->crop_source.height;
+ mf->code = pixfmt->code;
+ mf->colorspace = pixfmt->colorspace;
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static int s5k5baf_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct s5k5baf *state = to_s5k5baf(sd);
+ const struct s5k5baf_pixfmt *pixfmt;
+ int ret = 0;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(fh, fmt->pad) = *mf;
+ return 0;
+ }
+
+ if (fmt->pad == PAD_CIS) {
+ s5k5baf_try_cis_format(mf);
+ return 0;
+ }
+
+ mutex_lock(&state->lock);
+
+ if (state->streaming) {
+ mutex_unlock(&state->lock);
+ return -EBUSY;
+ }
+
+ state->pixfmt = s5k5baf_try_isp_format(mf);
+ pixfmt = &s5k5baf_formats[state->pixfmt];
+ mf->code = pixfmt->code;
+ mf->colorspace = pixfmt->colorspace;
+ mf->width = state->crop_source.width;
+ mf->height = state->crop_source.height;
+
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+enum selection_rect { R_CIS, R_CROP_SINK, R_COMPOSE, R_CROP_SOURCE, R_INVALID };
+
+static enum selection_rect s5k5baf_get_sel_rect(u32 pad, u32 target)
+{
+ switch (target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ return pad ? R_COMPOSE : R_CIS;
+ case V4L2_SEL_TGT_CROP:
+ return pad ? R_CROP_SOURCE : R_CROP_SINK;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ return pad ? R_INVALID : R_CROP_SINK;
+ case V4L2_SEL_TGT_COMPOSE:
+ return pad ? R_INVALID : R_COMPOSE;
+ default:
+ return R_INVALID;
+ }
+}
+
+static int s5k5baf_is_bound_target(u32 target)
+{
+ return target == V4L2_SEL_TGT_CROP_BOUNDS ||
+ target == V4L2_SEL_TGT_COMPOSE_BOUNDS;
+}
+
+static int s5k5baf_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ static enum selection_rect rtype;
+ struct s5k5baf *state = to_s5k5baf(sd);
+
+ rtype = s5k5baf_get_sel_rect(sel->pad, sel->target);
+
+ switch (rtype) {
+ case R_INVALID:
+ return -EINVAL;
+ case R_CIS:
+ sel->r = s5k5baf_cis_rect;
+ return 0;
+ default:
+ break;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ if (rtype == R_COMPOSE)
+ sel->r = *v4l2_subdev_get_try_compose(fh, sel->pad);
+ else
+ sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+ return 0;
+ }
+
+ mutex_lock(&state->lock);
+ switch (rtype) {
+ case R_CROP_SINK:
+ sel->r = state->crop_sink;
+ break;
+ case R_COMPOSE:
+ sel->r = state->compose;
+ break;
+ case R_CROP_SOURCE:
+ sel->r = state->crop_source;
+ break;
+ default:
+ break;
+ }
+ if (s5k5baf_is_bound_target(sel->target)) {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ }
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+/* bounds range [start, start+len) to [0, max) and aligns to 2 */
+static void s5k5baf_bound_range(u32 *start, u32 *len, u32 max)
+{
+ if (*len > max)
+ *len = max;
+ if (*start + *len > max)
+ *start = max - *len;
+ *start &= ~1;
+ *len &= ~1;
+ if (*len < S5K5BAF_WIN_WIDTH_MIN)
+ *len = S5K5BAF_WIN_WIDTH_MIN;
+}
+
+static void s5k5baf_bound_rect(struct v4l2_rect *r, u32 width, u32 height)
+{
+ s5k5baf_bound_range(&r->left, &r->width, width);
+ s5k5baf_bound_range(&r->top, &r->height, height);
+}
+
+static void s5k5baf_set_rect_and_adjust(struct v4l2_rect **rects,
+ enum selection_rect first,
+ struct v4l2_rect *v)
+{
+ struct v4l2_rect *r, *br;
+ enum selection_rect i = first;
+
+ *rects[first] = *v;
+ do {
+ r = rects[i];
+ br = rects[i - 1];
+ s5k5baf_bound_rect(r, br->width, br->height);
+ } while (++i != R_INVALID);
+ *v = *rects[first];
+}
+
+static bool s5k5baf_cmp_rect(const struct v4l2_rect *r1,
+ const struct v4l2_rect *r2)
+{
+ return !memcmp(r1, r2, sizeof(*r1));
+}
+
+static int s5k5baf_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ static enum selection_rect rtype;
+ struct s5k5baf *state = to_s5k5baf(sd);
+ struct v4l2_rect **rects;
+ int ret = 0;
+
+ rtype = s5k5baf_get_sel_rect(sel->pad, sel->target);
+ if (rtype == R_INVALID || s5k5baf_is_bound_target(sel->target))
+ return -EINVAL;
+
+ /* allow only scaling on compose */
+ if (rtype == R_COMPOSE) {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ rects = (struct v4l2_rect * []) {
+ &s5k5baf_cis_rect,
+ v4l2_subdev_get_try_crop(fh, PAD_CIS),
+ v4l2_subdev_get_try_compose(fh, PAD_CIS),
+ v4l2_subdev_get_try_crop(fh, PAD_OUT)
+ };
+ s5k5baf_set_rect_and_adjust(rects, rtype, &sel->r);
+ return 0;
+ }
+
+ rects = (struct v4l2_rect * []) {
+ &s5k5baf_cis_rect,
+ &state->crop_sink,
+ &state->compose,
+ &state->crop_source
+ };
+ mutex_lock(&state->lock);
+ if (state->streaming) {
+ /* adjust sel->r to avoid output resolution change */
+ if (rtype < R_CROP_SOURCE) {
+ if (sel->r.width < state->crop_source.width)
+ sel->r.width = state->crop_source.width;
+ if (sel->r.height < state->crop_source.height)
+ sel->r.height = state->crop_source.height;
+ } else {
+ sel->r.width = state->crop_source.width;
+ sel->r.height = state->crop_source.height;
+ }
+ }
+ s5k5baf_set_rect_and_adjust(rects, rtype, &sel->r);
+ if (!s5k5baf_cmp_rect(&state->crop_sink, &s5k5baf_cis_rect) ||
+ !s5k5baf_cmp_rect(&state->compose, &s5k5baf_cis_rect))
+ state->apply_crop = 1;
+ if (state->streaming)
+ ret = s5k5baf_hw_set_crop_rects(state);
+ mutex_unlock(&state->lock);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_pad_ops s5k5baf_cis_pad_ops = {
+ .enum_mbus_code = s5k5baf_enum_mbus_code,
+ .enum_frame_size = s5k5baf_enum_frame_size,
+ .get_fmt = s5k5baf_get_fmt,
+ .set_fmt = s5k5baf_set_fmt,
+};
+
+static const struct v4l2_subdev_pad_ops s5k5baf_pad_ops = {
+ .enum_mbus_code = s5k5baf_enum_mbus_code,
+ .enum_frame_size = s5k5baf_enum_frame_size,
+ .enum_frame_interval = s5k5baf_enum_frame_interval,
+ .get_fmt = s5k5baf_get_fmt,
+ .set_fmt = s5k5baf_set_fmt,
+ .get_selection = s5k5baf_get_selection,
+ .set_selection = s5k5baf_set_selection,
+};
+
+static const struct v4l2_subdev_video_ops s5k5baf_video_ops = {
+ .g_frame_interval = s5k5baf_g_frame_interval,
+ .s_frame_interval = s5k5baf_s_frame_interval,
+ .s_stream = s5k5baf_s_stream,
+};
+
+/*
+ * V4L2 subdev controls
+ */
+
+static int s5k5baf_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct s5k5baf *state = to_s5k5baf(sd);
+ int ret;
+
+ v4l2_dbg(1, debug, sd, "ctrl: %s, value: %d\n", ctrl->name, ctrl->val);
+
+ mutex_lock(&state->lock);
+
+ if (state->power == 0)
+ goto unlock;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ s5k5baf_hw_set_awb(state, ctrl->val);
+ break;
+
+ case V4L2_CID_BRIGHTNESS:
+ s5k5baf_write(state, REG_USER_BRIGHTNESS, ctrl->val);
+ break;
+
+ case V4L2_CID_COLORFX:
+ s5k5baf_hw_set_colorfx(state, ctrl->val);
+ break;
+
+ case V4L2_CID_CONTRAST:
+ s5k5baf_write(state, REG_USER_CONTRAST, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ s5k5baf_hw_set_auto_exposure(state, ctrl->val);
+ break;
+
+ case V4L2_CID_HFLIP:
+ s5k5baf_hw_set_mirror(state);
+ break;
+
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ s5k5baf_hw_set_anti_flicker(state, ctrl->val);
+ break;
+
+ case V4L2_CID_SATURATION:
+ s5k5baf_write(state, REG_USER_SATURATION, ctrl->val);
+ break;
+
+ case V4L2_CID_SHARPNESS:
+ s5k5baf_write(state, REG_USER_SHARPBLUR, ctrl->val);
+ break;
+
+ case V4L2_CID_WHITE_BALANCE_TEMPERATURE:
+ s5k5baf_write(state, REG_P_COLORTEMP(0), ctrl->val);
+ if (state->apply_cfg)
+ s5k5baf_hw_sync_cfg(state);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ s5k5baf_hw_set_test_pattern(state, ctrl->val);
+ break;
+ }
+unlock:
+ ret = s5k5baf_clear_error(state);
+ mutex_unlock(&state->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops s5k5baf_ctrl_ops = {
+ .s_ctrl = s5k5baf_s_ctrl,
+};
+
+static const char * const s5k5baf_test_pattern_menu[] = {
+ "Disabled",
+ "Blank",
+ "Bars",
+ "Gradients",
+ "Textile",
+ "Textile2",
+ "Squares"
+};
+
+static int s5k5baf_initialize_ctrls(struct s5k5baf *state)
+{
+ const struct v4l2_ctrl_ops *ops = &s5k5baf_ctrl_ops;
+ struct s5k5baf_ctrls *ctrls = &state->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(hdl, 16);
+ if (ret < 0) {
+ v4l2_err(&state->sd, "cannot init ctrl handler (%d)\n", ret);
+ return ret;
+ }
+
+ /* Auto white balance cluster */
+ ctrls->awb = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
+ ctrls->gain_red = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
+ 0, 255, 1, S5K5BAF_GAIN_RED_DEF);
+ ctrls->gain_blue = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
+ 0, 255, 1, S5K5BAF_GAIN_BLUE_DEF);
+ v4l2_ctrl_auto_cluster(3, &ctrls->awb, 0, false);
+
+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_cluster(2, &ctrls->hflip);
+
+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
+ /* Exposure time: x 1 us */
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+ 0, 6000000U, 1, 100000U);
+ /* Total gain: 256 <=> 1x */
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
+ 0, 256, 1, 256);
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_exp, 0, false);
+
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO);
+
+ v4l2_ctrl_new_std_menu(hdl, ops, V4L2_CID_COLORFX,
+ V4L2_COLORFX_SKY_BLUE, ~0x6f, V4L2_COLORFX_NONE);
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_WHITE_BALANCE_TEMPERATURE,
+ 0, 256, 1, 0);
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION, -127, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -127, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -127, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SHARPNESS, -127, 127, 1, 0);
+
+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(s5k5baf_test_pattern_menu) - 1,
+ 0, 0, s5k5baf_test_pattern_menu);
+
+ if (hdl->error) {
+ v4l2_err(&state->sd, "error creating controls (%d)\n",
+ hdl->error);
+ ret = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+ }
+
+ state->sd.ctrl_handler = hdl;
+ return 0;
+}
+
+/*
+ * V4L2 subdev internal operations
+ */
+static int s5k5baf_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(fh, PAD_CIS);
+ s5k5baf_try_cis_format(mf);
+
+ if (s5k5baf_is_cis_subdev(sd))
+ return 0;
+
+ mf = v4l2_subdev_get_try_format(fh, PAD_OUT);
+ mf->colorspace = s5k5baf_formats[0].colorspace;
+ mf->code = s5k5baf_formats[0].code;
+ mf->width = s5k5baf_cis_rect.width;
+ mf->height = s5k5baf_cis_rect.height;
+ mf->field = V4L2_FIELD_NONE;
+
+ *v4l2_subdev_get_try_crop(fh, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_compose(fh, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_crop(fh, PAD_OUT) = s5k5baf_cis_rect;
+
+ return 0;
+}
+
+static int s5k5baf_check_fw_revision(struct s5k5baf *state)
+{
+ u16 api_ver = 0, fw_rev = 0, s_id = 0;
+ int ret;
+
+ api_ver = s5k5baf_read(state, REG_FW_APIVER);
+ fw_rev = s5k5baf_read(state, REG_FW_REVISION) & 0xff;
+ s_id = s5k5baf_read(state, REG_FW_SENSOR_ID);
+ ret = s5k5baf_clear_error(state);
+ if (ret < 0)
+ return ret;
+
+ v4l2_info(&state->sd, "FW API=%#x, revision=%#x sensor_id=%#x\n",
+ api_ver, fw_rev, s_id);
+
+ if (api_ver != S5K5BAF_FW_APIVER) {
+ v4l2_err(&state->sd, "FW API version not supported\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int s5k5baf_registered(struct v4l2_subdev *sd)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+ int ret;
+
+ ret = v4l2_device_register_subdev(sd->v4l2_dev, &state->cis_sd);
+ if (ret < 0)
+ v4l2_err(sd, "failed to register subdev %s\n",
+ state->cis_sd.name);
+ else
+ ret = media_entity_create_link(&state->cis_sd.entity, PAD_CIS,
+ &state->sd.entity, PAD_CIS,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ return ret;
+}
+
+static void s5k5baf_unregistered(struct v4l2_subdev *sd)
+{
+ struct s5k5baf *state = to_s5k5baf(sd);
+ v4l2_device_unregister_subdev(&state->cis_sd);
+}
+
+static const struct v4l2_subdev_ops s5k5baf_cis_subdev_ops = {
+ .pad = &s5k5baf_cis_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops s5k5baf_cis_subdev_internal_ops = {
+ .open = s5k5baf_open,
+};
+
+static const struct v4l2_subdev_internal_ops s5k5baf_subdev_internal_ops = {
+ .registered = s5k5baf_registered,
+ .unregistered = s5k5baf_unregistered,
+ .open = s5k5baf_open,
+};
+
+static const struct v4l2_subdev_core_ops s5k5baf_core_ops = {
+ .s_power = s5k5baf_set_power,
+ .log_status = v4l2_ctrl_subdev_log_status,
+};
+
+static const struct v4l2_subdev_ops s5k5baf_subdev_ops = {
+ .core = &s5k5baf_core_ops,
+ .pad = &s5k5baf_pad_ops,
+ .video = &s5k5baf_video_ops,
+};
+
+static int s5k5baf_configure_gpios(struct s5k5baf *state)
+{
+ static const char const *name[] = { "S5K5BAF_STBY", "S5K5BAF_RST" };
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ struct s5k5baf_gpio *g = state->gpios;
+ int ret, i;
+
+ for (i = 0; i < NUM_GPIOS; ++i) {
+ int flags = GPIOF_DIR_OUT;
+ if (g[i].level)
+ flags |= GPIOF_INIT_HIGH;
+ ret = devm_gpio_request_one(&c->dev, g[i].gpio, flags, name[i]);
+ if (ret < 0) {
+ v4l2_err(c, "failed to request gpio %s\n", name[i]);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int s5k5baf_parse_gpios(struct s5k5baf_gpio *gpios, struct device *dev)
+{
+ static const char * const names[] = {
+ "stbyn-gpios",
+ "rstn-gpios",
+ };
+ struct device_node *node = dev->of_node;
+ enum of_gpio_flags flags;
+ int ret, i;
+
+ for (i = 0; i < NUM_GPIOS; ++i) {
+ ret = of_get_named_gpio_flags(node, names[i], 0, &flags);
+ if (ret < 0) {
+ dev_err(dev, "no %s GPIO pin provided\n", names[i]);
+ return ret;
+ }
+ gpios[i].gpio = ret;
+ gpios[i].level = !(flags & OF_GPIO_ACTIVE_LOW);
+ }
+
+ return 0;
+}
+
+static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+ struct device_node *node_ep;
+ struct v4l2_of_endpoint ep;
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "no device-tree node provided\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "clock-frequency",
+ &state->mclk_frequency);
+ if (ret < 0) {
+ state->mclk_frequency = S5K5BAF_DEFAULT_MCLK_FREQ;
+ dev_info(dev, "using default %u Hz clock frequency\n",
+ state->mclk_frequency);
+ }
+
+ ret = s5k5baf_parse_gpios(state->gpios, dev);
+ if (ret < 0)
+ return ret;
+
+ node_ep = of_graph_get_next_endpoint(node, NULL);
+ if (!node_ep) {
+ dev_err(dev, "no endpoint defined at node %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ v4l2_of_parse_endpoint(node_ep, &ep);
+ of_node_put(node_ep);
+ state->bus_type = ep.bus_type;
+
+ switch (state->bus_type) {
+ case V4L2_MBUS_CSI2:
+ state->nlanes = ep.bus.mipi_csi2.num_data_lanes;
+ break;
+ case V4L2_MBUS_PARALLEL:
+ break;
+ default:
+ dev_err(dev, "unsupported bus in endpoint defined at node %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int s5k5baf_configure_subdevs(struct s5k5baf *state,
+ struct i2c_client *c)
+{
+ struct v4l2_subdev *sd;
+ int ret;
+
+ sd = &state->cis_sd;
+ v4l2_subdev_init(sd, &s5k5baf_cis_subdev_ops);
+ sd->owner = THIS_MODULE;
+ v4l2_set_subdevdata(sd, state);
+ snprintf(sd->name, sizeof(sd->name), "S5K5BAF-CIS %d-%04x",
+ i2c_adapter_id(c->adapter), c->addr);
+
+ sd->internal_ops = &s5k5baf_cis_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ state->cis_pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, NUM_CIS_PADS, &state->cis_pad, 0);
+ if (ret < 0)
+ goto err;
+
+ sd = &state->sd;
+ v4l2_i2c_subdev_init(sd, c, &s5k5baf_subdev_ops);
+ snprintf(sd->name, sizeof(sd->name), "S5K5BAF-ISP %d-%04x",
+ i2c_adapter_id(c->adapter), c->addr);
+
+ sd->internal_ops = &s5k5baf_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ state->pads[PAD_CIS].flags = MEDIA_PAD_FL_SINK;
+ state->pads[PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ret = media_entity_init(&sd->entity, NUM_ISP_PADS, state->pads, 0);
+
+ if (!ret)
+ return 0;
+
+ media_entity_cleanup(&state->cis_sd.entity);
+err:
+ dev_err(&c->dev, "cannot init media entity %s\n", sd->name);
+ return ret;
+}
+
+static int s5k5baf_configure_regulators(struct s5k5baf *state)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
+ int ret;
+ int i;
+
+ for (i = 0; i < S5K5BAF_NUM_SUPPLIES; i++)
+ state->supplies[i].supply = s5k5baf_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&c->dev, S5K5BAF_NUM_SUPPLIES,
+ state->supplies);
+ if (ret < 0)
+ v4l2_err(c, "failed to get regulators\n");
+ return ret;
+}
+
+static int s5k5baf_probe(struct i2c_client *c,
+ const struct i2c_device_id *id)
+{
+ struct s5k5baf *state;
+ int ret;
+
+ state = devm_kzalloc(&c->dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->lock);
+ state->crop_sink = s5k5baf_cis_rect;
+ state->compose = s5k5baf_cis_rect;
+ state->crop_source = s5k5baf_cis_rect;
+
+ ret = s5k5baf_parse_device_node(state, &c->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = s5k5baf_configure_subdevs(state, c);
+ if (ret < 0)
+ return ret;
+
+ ret = s5k5baf_configure_gpios(state);
+ if (ret < 0)
+ goto err_me;
+
+ ret = s5k5baf_configure_regulators(state);
+ if (ret < 0)
+ goto err_me;
+
+ state->clock = devm_clk_get(state->sd.dev, S5K5BAF_CLK_NAME);
+ if (IS_ERR(state->clock)) {
+ ret = -EPROBE_DEFER;
+ goto err_me;
+ }
+
+ ret = s5k5baf_power_on(state);
+ if (ret < 0) {
+ ret = -EPROBE_DEFER;
+ goto err_me;
+ }
+ s5k5baf_hw_init(state);
+ ret = s5k5baf_check_fw_revision(state);
+
+ s5k5baf_power_off(state);
+ if (ret < 0)
+ goto err_me;
+
+ ret = s5k5baf_initialize_ctrls(state);
+ if (ret < 0)
+ goto err_me;
+
+ ret = v4l2_async_register_subdev(&state->sd);
+ if (ret < 0)
+ goto err_ctrl;
+
+ return 0;
+
+err_ctrl:
+ v4l2_ctrl_handler_free(state->sd.ctrl_handler);
+err_me:
+ media_entity_cleanup(&state->sd.entity);
+ media_entity_cleanup(&state->cis_sd.entity);
+ return ret;
+}
+
+static int s5k5baf_remove(struct i2c_client *c)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(c);
+ struct s5k5baf *state = to_s5k5baf(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ media_entity_cleanup(&sd->entity);
+
+ sd = &state->cis_sd;
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct i2c_device_id s5k5baf_id[] = {
+ { S5K5BAF_DRIVER_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, s5k5baf_id);
+
+static const struct of_device_id s5k5baf_of_match[] = {
+ { .compatible = "samsung,s5k5baf" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s5k5baf_of_match);
+
+static struct i2c_driver s5k5baf_i2c_driver = {
+ .driver = {
+ .of_match_table = s5k5baf_of_match,
+ .name = S5K5BAF_DRIVER_NAME
+ },
+ .probe = s5k5baf_probe,
+ .remove = s5k5baf_remove,
+ .id_table = s5k5baf_id,
+};
+
+module_i2c_driver(s5k5baf_i2c_driver);
+
+MODULE_DESCRIPTION("Samsung S5K5BAF(X) UXGA camera driver");
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index 70bc72e795d0..2960b5a8362a 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -150,14 +150,14 @@ static inline struct saa6588 *to_saa6588(struct v4l2_subdev *sd)
/* ---------------------------------------------------------------------- */
-static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf)
+static bool block_from_buf(struct saa6588 *s, unsigned char *buf)
{
int i;
if (s->rd_index == s->wr_index) {
if (debug > 2)
dprintk(PREFIX "Read: buffer empty.\n");
- return 0;
+ return false;
}
if (debug > 2) {
@@ -166,8 +166,7 @@ static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf)
dprintk("0x%02x ", s->buffer[i]);
}
- if (copy_to_user(user_buf, &s->buffer[s->rd_index], 3))
- return -EFAULT;
+ memcpy(buf, &s->buffer[s->rd_index], 3);
s->rd_index += 3;
if (s->rd_index >= s->buf_size)
@@ -177,22 +176,22 @@ static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf)
if (debug > 2)
dprintk("%d blocks total.\n", s->block_count);
- return 1;
+ return true;
}
static void read_from_buf(struct saa6588 *s, struct saa6588_command *a)
{
- unsigned long flags;
-
unsigned char __user *buf_ptr = a->buffer;
- unsigned int i;
+ unsigned char buf[3];
+ unsigned long flags;
unsigned int rd_blocks;
+ unsigned int i;
a->result = 0;
if (!a->buffer)
return;
- while (!s->data_available_for_read) {
+ while (!a->nonblocking && !s->data_available_for_read) {
int ret = wait_event_interruptible(s->read_queue,
s->data_available_for_read);
if (ret == -ERESTARTSYS) {
@@ -201,24 +200,31 @@ static void read_from_buf(struct saa6588 *s, struct saa6588_command *a)
}
}
- spin_lock_irqsave(&s->lock, flags);
rd_blocks = a->block_count;
+ spin_lock_irqsave(&s->lock, flags);
if (rd_blocks > s->block_count)
rd_blocks = s->block_count;
+ spin_unlock_irqrestore(&s->lock, flags);
- if (!rd_blocks) {
- spin_unlock_irqrestore(&s->lock, flags);
+ if (!rd_blocks)
return;
- }
for (i = 0; i < rd_blocks; i++) {
- if (block_to_user_buf(s, buf_ptr)) {
- buf_ptr += 3;
- a->result++;
- } else
+ bool got_block;
+
+ spin_lock_irqsave(&s->lock, flags);
+ got_block = block_from_buf(s, buf);
+ spin_unlock_irqrestore(&s->lock, flags);
+ if (!got_block)
break;
+ if (copy_to_user(buf_ptr, buf, 3)) {
+ a->result = -EFAULT;
+ return;
+ }
+ buf_ptr += 3;
+ a->result += 3;
}
- a->result *= 3;
+ spin_lock_irqsave(&s->lock, flags);
s->data_available_for_read = (s->block_count > 0);
spin_unlock_irqrestore(&s->lock, flags);
}
@@ -394,14 +400,11 @@ static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
struct saa6588_command *a = arg;
switch (cmd) {
- /* --- open() for /dev/radio --- */
- case SAA6588_CMD_OPEN:
- a->result = 0; /* return error if chip doesn't work ??? */
- break;
/* --- close() for /dev/radio --- */
case SAA6588_CMD_CLOSE:
s->data_available_for_read = 1;
wake_up_interruptible(&s->read_queue);
+ s->data_available_for_read = 0;
a->result = 0;
break;
/* --- read() for /dev/radio --- */
@@ -411,9 +414,8 @@ static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
/* --- poll() for /dev/radio --- */
case SAA6588_CMD_POLL:
a->result = 0;
- if (s->data_available_for_read) {
+ if (s->data_available_for_read)
a->result |= POLLIN | POLLRDNORM;
- }
poll_wait(a->instance, &s->read_queue, a->event_list);
break;
diff --git a/drivers/media/pci/saa7134/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 8ac4b1f2322d..8272c0b9c5bf 100644
--- a/drivers/media/pci/saa7134/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -33,11 +33,11 @@
#include <linux/i2c.h>
#include <linux/types.h>
#include <linux/videodev2.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-common.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
#define MPEG_VIDEO_TARGET_BITRATE_MAX 27000
#define MPEG_VIDEO_MAX_BITRATE_MAX 27000
@@ -124,7 +124,7 @@ static inline struct saa6752hs_state *to_state(struct v4l2_subdev *sd)
/* ---------------------------------------------------------------------- */
-static u8 PAT[] = {
+static const u8 PAT[] = {
0xc2, /* i2c register */
0x00, /* table number for encoder */
@@ -150,7 +150,7 @@ static u8 PAT[] = {
0x00, 0x00, 0x00, 0x00 /* CRC32 */
};
-static u8 PMT[] = {
+static const u8 PMT[] = {
0xc2, /* i2c register */
0x01, /* table number for encoder */
@@ -179,7 +179,7 @@ static u8 PMT[] = {
0x00, 0x00, 0x00, 0x00 /* CRC32 */
};
-static u8 PMT_AC3[] = {
+static const u8 PMT_AC3[] = {
0xc2, /* i2c register */
0x01, /* table number for encoder(1) */
0x47, /* sync */
@@ -212,7 +212,7 @@ static u8 PMT_AC3[] = {
0xED, 0xDE, 0x2D, 0xF3 /* CRC32 BE */
};
-static struct saa6752hs_mpeg_params param_defaults =
+static const struct saa6752hs_mpeg_params param_defaults =
{
.ts_pid_pmt = 16,
.ts_pid_video = 260,
@@ -643,13 +643,6 @@ static const struct v4l2_ctrl_ops saa6752hs_ctrl_ops = {
static const struct v4l2_subdev_core_ops saa6752hs_core_ops = {
.init = saa6752hs_init,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
.s_std = saa6752hs_s_std,
};
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index ae66d91bf713..8741cae9c9f2 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -399,7 +399,6 @@ static void smiapp_update_mbus_formats(struct smiapp_sensor *sensor)
BUG_ON(max(internal_csi_format_idx, csi_format_idx) + pixel_order
>= ARRAY_SIZE(smiapp_csi_data_formats));
- BUG_ON(min(internal_csi_format_idx, csi_format_idx) < 0);
dev_dbg(&client->dev, "new pixel order %s\n",
pixel_order_str[pixel_order]);
@@ -2028,8 +2027,8 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
sel->r.width = min(sel->r.width, src_size->width);
sel->r.height = min(sel->r.height, src_size->height);
- sel->r.left = min(sel->r.left, src_size->width - sel->r.width);
- sel->r.top = min(sel->r.top, src_size->height - sel->r.height);
+ sel->r.left = min_t(int, sel->r.left, src_size->width - sel->r.width);
+ sel->r.top = min_t(int, sel->r.top, src_size->height - sel->r.height);
*crops[sel->pad] = sel->r;
@@ -2121,8 +2120,8 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
sel->r.left = max(0, sel->r.left & ~1);
sel->r.top = max(0, sel->r.top & ~1);
- sel->r.width = max(0, SMIAPP_ALIGN_DIM(sel->r.width, sel->flags));
- sel->r.height = max(0, SMIAPP_ALIGN_DIM(sel->r.height, sel->flags));
+ sel->r.width = SMIAPP_ALIGN_DIM(sel->r.width, sel->flags);
+ sel->r.height = SMIAPP_ALIGN_DIM(sel->r.height, sel->flags);
sel->r.width = max_t(unsigned int,
sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE],
diff --git a/drivers/media/i2c/soc_camera/mt9m111.c b/drivers/media/i2c/soc_camera/mt9m111.c
index 6f4056668bbc..ccf59406a172 100644
--- a/drivers/media/i2c/soc_camera/mt9m111.c
+++ b/drivers/media/i2c/soc_camera/mt9m111.c
@@ -208,8 +208,8 @@ struct mt9m111 {
struct mt9m111_context *ctx;
struct v4l2_rect rect; /* cropping rectangle */
struct v4l2_clk *clk;
- int width; /* output */
- int height; /* sizes */
+ unsigned int width; /* output */
+ unsigned int height; /* sizes */
struct mutex power_lock; /* lock to protect power_count */
int power_count;
const struct mt9m111_datafmt *fmt;
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 83d85df4853a..ca001178c5bf 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -36,6 +36,7 @@
#include <linux/module.h>
#include <linux/v4l2-mediabus.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
@@ -1068,7 +1069,7 @@ tvp514x_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- endpoint = v4l2_of_get_next_endpoint(client->dev.of_node, NULL);
+ endpoint = of_graph_get_next_endpoint(client->dev.of_node, NULL);
if (!endpoint)
return NULL;
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 89c0b13463b7..542d2528b3f9 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -58,21 +58,17 @@ static int tvp5150_read(struct v4l2_subdev *sd, unsigned char addr)
struct i2c_client *c = v4l2_get_subdevdata(sd);
unsigned char buffer[1];
int rc;
-
- buffer[0] = addr;
-
- rc = i2c_master_send(c, buffer, 1);
- if (rc < 0) {
- v4l2_err(sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
- return rc;
- }
-
- msleep(10);
-
- rc = i2c_master_recv(c, buffer, 1);
- if (rc < 0) {
- v4l2_err(sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
- return rc;
+ struct i2c_msg msg[] = {
+ { .addr = c->addr, .flags = 0,
+ .buf = &addr, .len = 1 },
+ { .addr = c->addr, .flags = I2C_M_RD,
+ .buf = buffer, .len = 1 }
+ };
+
+ rc = i2c_transfer(c->adapter, msg, 2);
+ if (rc < 0 || rc != 2) {
+ v4l2_err(sd, "i2c i/o error: rc == %d (should be 2)\n", rc);
+ return rc < 0 ? rc : -EIO;
}
v4l2_dbg(2, debug, sd, "tvp5150: read 0x%02x = 0x%02x\n", addr, buffer[0]);
@@ -867,7 +863,7 @@ static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
struct v4l2_rect rect = a->c;
struct tvp5150 *decoder = to_tvp5150(sd);
v4l2_std_id std;
- int hmax;
+ unsigned int hmax;
v4l2_dbg(1, debug, sd, "%s left=%d, top=%d, width=%d, height=%d\n",
__func__, rect.left, rect.top, rect.width, rect.height);
@@ -877,9 +873,9 @@ static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
/* tvp5150 has some special limits */
rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT);
- rect.width = clamp(rect.width,
- TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
- TVP5150_H_MAX - rect.left);
+ rect.width = clamp_t(unsigned int, rect.width,
+ TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
+ TVP5150_H_MAX - rect.left);
rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP);
/* Calculate height based on current standard */
@@ -893,9 +889,9 @@ static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
else
hmax = TVP5150_V_MAX_OTHERS;
- rect.height = clamp(rect.height,
- hmax - TVP5150_MAX_CROP_TOP - rect.top,
- hmax - rect.top);
+ rect.height = clamp_t(unsigned int, rect.height,
+ hmax - TVP5150_MAX_CROP_TOP - rect.top,
+ hmax - rect.top);
tvp5150_write(sd, TVP5150_VERT_BLANKING_START, rect.top);
tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP,
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 912e1cccdd1c..c4e1e2cb3094 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -30,6 +30,7 @@
#include <linux/videodev2.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/v4l2-dv-timings.h>
#include <media/tvp7002.h>
#include <media/v4l2-async.h>
@@ -957,7 +958,7 @@ tvp7002_get_pdata(struct i2c_client *client)
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
- endpoint = v4l2_of_get_next_endpoint(client->dev.of_node, NULL);
+ endpoint = of_graph_get_next_endpoint(client->dev.of_node, NULL);
if (!endpoint)
return NULL;
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 25bdd9312fea..23f4f65fccd7 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -503,6 +503,7 @@ static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
return &container_of(ctrl->handler, struct vs6624, hdl)->sd;
}
+#ifdef CONFIG_VIDEO_ADV_DEBUG
static int vs6624_read(struct v4l2_subdev *sd, u16 index)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -515,6 +516,7 @@ static int vs6624_read(struct v4l2_subdev *sd, u16 index)
return buf[0];
}
+#endif
static int vs6624_write(struct v4l2_subdev *sd, u16 index,
u8 value)
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 2c286c307145..37c334edc7e8 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -235,6 +235,8 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
media_entity_graph_walk_start(&graph, entity);
while ((entity = media_entity_graph_walk_next(&graph))) {
+ DECLARE_BITMAP(active, entity->num_pads);
+ DECLARE_BITMAP(has_no_links, entity->num_pads);
unsigned int i;
entity->stream_count++;
@@ -248,21 +250,46 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
if (!entity->ops || !entity->ops->link_validate)
continue;
+ bitmap_zero(active, entity->num_pads);
+ bitmap_fill(has_no_links, entity->num_pads);
+
for (i = 0; i < entity->num_links; i++) {
struct media_link *link = &entity->links[i];
-
- /* Is this pad part of an enabled link? */
- if (!(link->flags & MEDIA_LNK_FL_ENABLED))
- continue;
-
- /* Are we the sink or not? */
- if (link->sink->entity != entity)
+ struct media_pad *pad = link->sink->entity == entity
+ ? link->sink : link->source;
+
+ /* Mark that a pad is connected by a link. */
+ bitmap_clear(has_no_links, pad->index, 1);
+
+ /*
+ * Pads that either do not need to connect or
+ * are connected through an enabled link are
+ * fine.
+ */
+ if (!(pad->flags & MEDIA_PAD_FL_MUST_CONNECT) ||
+ link->flags & MEDIA_LNK_FL_ENABLED)
+ bitmap_set(active, pad->index, 1);
+
+ /*
+ * Link validation will only take place for
+ * sink ends of the link that are enabled.
+ */
+ if (link->sink != pad ||
+ !(link->flags & MEDIA_LNK_FL_ENABLED))
continue;
ret = entity->ops->link_validate(link);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto error;
}
+
+ /* Either no links or validated links are fine. */
+ bitmap_or(active, active, has_no_links, entity->num_pads);
+
+ if (!bitmap_full(active, entity->num_pads)) {
+ ret = -EPIPE;
+ goto error;
+ }
}
mutex_unlock(&mdev->graph_mutex);
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index d85cb0ace4dc..6662b495b22c 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -2426,7 +2426,7 @@ struct tvcard bttv_tvcards[] = {
},
/* ---- card 0x87---------------------------------- */
[BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = {
- /* Michael Krufky <mkrufky@m1k.net> */
+ /* Michael Krufky <mkrufky@linuxtv.org> */
.name = "DViCO FusionHDTV 5 Lite",
.tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */
.tuner_addr = ADDR_UNSET,
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 92a06fd85865..afcd53bfcf8e 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -1126,9 +1126,9 @@ bttv_crop_calc_limits(struct bttv_crop *c)
c->min_scaled_height = 32;
} else {
c->min_scaled_width =
- (max(48, c->rect.width >> 4) + 3) & ~3;
+ (max_t(unsigned int, 48, c->rect.width >> 4) + 3) & ~3;
c->min_scaled_height =
- max(32, c->rect.height >> 4);
+ max_t(unsigned int, 32, c->rect.height >> 4);
}
c->max_scaled_width = c->rect.width & ~3;
@@ -2024,7 +2024,7 @@ limit_scaled_size_lock (struct bttv_fh * fh,
/* We cannot scale up. When the scaled image is larger
than crop.rect we adjust the crop.rect as required
by the V4L2 spec, hence cropcap.bounds are our limit. */
- max_width = min(b->width, (__s32) MAX_HACTIVE);
+ max_width = min_t(unsigned int, b->width, MAX_HACTIVE);
max_height = b->height;
/* We cannot capture the same line as video and VBI data.
@@ -3266,7 +3266,9 @@ static ssize_t radio_read(struct file *file, char __user *data,
struct bttv_fh *fh = file->private_data;
struct bttv *btv = fh->btv;
struct saa6588_command cmd;
- cmd.block_count = count/3;
+
+ cmd.block_count = count / 3;
+ cmd.nonblocking = file->f_flags & O_NONBLOCK;
cmd.buffer = data;
cmd.instance = file;
cmd.result = -ENODEV;
diff --git a/drivers/media/pci/bt8xx/bttv-gpio.c b/drivers/media/pci/bt8xx/bttv-gpio.c
index 922e8233fd0b..3f364b7062b9 100644
--- a/drivers/media/pci/bt8xx/bttv-gpio.c
+++ b/drivers/media/pci/bt8xx/bttv-gpio.c
@@ -98,7 +98,7 @@ int bttv_sub_add_device(struct bttv_core *core, char *name)
err = device_register(&sub->dev);
if (0 != err) {
- kfree(sub);
+ put_device(&sub->dev);
return err;
}
pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index b2c8c3439fea..ea272bcb38df 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -145,11 +145,12 @@ static int snd_cx18_init(struct v4l2_device *v4l2_dev)
/* This is a no-op for us. We'll use the cx->instance */
/* (2) Create a card instance */
- ret = snd_card_create(SNDRV_DEFAULT_IDX1, /* use first available id */
- SNDRV_DEFAULT_STR1, /* xid from end of shortname*/
- THIS_MODULE, 0, &sc);
+ ret = snd_card_new(&cx->pci_dev->dev,
+ SNDRV_DEFAULT_IDX1, /* use first available id */
+ SNDRV_DEFAULT_STR1, /* xid from end of shortname*/
+ THIS_MODULE, 0, &sc);
if (ret) {
- CX18_ALSA_ERR("%s: snd_card_create() failed with err %d\n",
+ CX18_ALSA_ERR("%s: snd_card_new() failed with err %d\n",
__func__, ret);
goto err_exit;
}
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index c1f8cc6f14b2..716bdc57fac6 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -327,13 +327,16 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
struct i2c_client *c;
u8 eedata[256];
+ memset(tv, 0, sizeof(*tv));
+
c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return;
strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name));
c->adapter = &cx->i2c_adap[0];
c->addr = 0xa0 >> 1;
- memset(tv, 0, sizeof(*tv));
if (tveeprom_read(c, eedata, sizeof(eedata)))
goto ret;
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index c6c9bd58f8be..554798dcedd0 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -489,7 +489,8 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
return NULL;
}
- err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ err = snd_card_new(&dev->pci->dev,
+ SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
THIS_MODULE, sizeof(struct cx23885_audio_dev), &card);
if (err < 0)
goto error;
@@ -500,8 +501,6 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
chip->card = card;
spin_lock_init(&chip->lock);
- snd_card_set_dev(card, &dev->pci->dev);
-
err = snd_cx23885_pcm(chip, 0, "CX23885 Digital");
if (err < 0)
goto error;
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index 6e91e84d6bf9..2dd5bcaa7e53 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -618,7 +618,7 @@ static int snd_cx25821_pcm(struct cx25821_audio_dev *chip, int device,
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
-static DEFINE_PCI_DEVICE_TABLE(cx25821_audio_pci_tbl) = {
+static const struct pci_device_id cx25821_audio_pci_tbl[] = {
{0x14f1, 0x0920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
@@ -645,8 +645,9 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
return -ENOENT;
}
- err = snd_card_create(index[devno], id[devno], THIS_MODULE,
- sizeof(struct cx25821_audio_dev), &card);
+ err = snd_card_new(&dev->pci->dev, index[devno], id[devno],
+ THIS_MODULE,
+ sizeof(struct cx25821_audio_dev), &card);
if (err < 0) {
pr_info("DEBUG ERROR: cannot create snd_card_new in %s\n",
__func__);
@@ -682,8 +683,6 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
goto error;
}
- snd_card_set_dev(card, &chip->pci->dev);
-
strcpy(card->shortname, "cx25821");
sprintf(card->longname, "%s at 0x%lx irq %d", chip->dev->name,
chip->iobase, chip->irq);
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index b762c5b2ca10..e81173c41e5a 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1361,7 +1361,7 @@ static void cx25821_finidev(struct pci_dev *pci_dev)
kfree(dev);
}
-static DEFINE_PCI_DEVICE_TABLE(cx25821_pci_tbl) = {
+static const struct pci_device_id cx25821_pci_tbl[] = {
{
/* CX25821 Athena */
.vendor = 0x14f1,
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 400eb1c42d3f..a72579a9f67f 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -852,8 +852,6 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
chip->irq = pci->irq;
synchronize_irq(chip->irq);
- snd_card_set_dev(card, &pci->dev);
-
*rchip = chip;
*core_ptr = core;
@@ -876,8 +874,8 @@ static int cx88_audio_initdev(struct pci_dev *pci,
return (-ENOENT);
}
- err = snd_card_create(index[devno], id[devno], THIS_MODULE,
- sizeof(snd_cx88_card_t), &card);
+ err = snd_card_new(&pci->dev, index[devno], id[devno], THIS_MODULE,
+ sizeof(snd_cx88_card_t), &card);
if (err < 0)
return err;
@@ -931,9 +929,9 @@ error:
*/
static void cx88_audio_finidev(struct pci_dev *pci)
{
- struct cx88_audio_dev *card = pci_get_drvdata(pci);
+ struct snd_card *card = pci_get_drvdata(pci);
- snd_card_free((void *)card);
+ snd_card_free(card);
devno--;
}
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index e970cface70e..39b52929755a 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -145,11 +145,12 @@ static int snd_ivtv_init(struct v4l2_device *v4l2_dev)
/* This is a no-op for us. We'll use the itv->instance */
/* (2) Create a card instance */
- ret = snd_card_create(SNDRV_DEFAULT_IDX1, /* use first available id */
- SNDRV_DEFAULT_STR1, /* xid from end of shortname*/
- THIS_MODULE, 0, &sc);
+ ret = snd_card_new(&itv->pdev->dev,
+ SNDRV_DEFAULT_IDX1, /* use first available id */
+ SNDRV_DEFAULT_STR1, /* xid from end of shortname*/
+ THIS_MODULE, 0, &sc);
if (ret) {
- IVTV_ALSA_ERR("%s: snd_card_create() failed with err %d\n",
+ IVTV_ALSA_ERR("%s: snd_card_new() failed with err %d\n",
__func__, ret);
goto err_exit;
}
diff --git a/drivers/media/pci/saa7134/Kconfig b/drivers/media/pci/saa7134/Kconfig
index 15b90d6e9130..7883393571e5 100644
--- a/drivers/media/pci/saa7134/Kconfig
+++ b/drivers/media/pci/saa7134/Kconfig
@@ -6,6 +6,7 @@ config VIDEO_SAA7134
select VIDEO_TVEEPROM
select CRC32
select VIDEO_SAA6588 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_SAA6752HS if MEDIA_SUBDRV_AUTOSELECT
---help---
This is a video4linux driver for Philips SAA713x based
TV cards.
diff --git a/drivers/media/pci/saa7134/Makefile b/drivers/media/pci/saa7134/Makefile
index 35375480ed4d..58de9b085689 100644
--- a/drivers/media/pci/saa7134/Makefile
+++ b/drivers/media/pci/saa7134/Makefile
@@ -4,7 +4,7 @@ saa7134-y += saa7134-ts.o saa7134-tvaudio.o saa7134-vbi.o
saa7134-y += saa7134-video.o
saa7134-$(CONFIG_VIDEO_SAA7134_RC) += saa7134-input.o
-obj-$(CONFIG_VIDEO_SAA7134) += saa6752hs.o saa7134.o saa7134-empress.o
+obj-$(CONFIG_VIDEO_SAA7134) += saa7134.o saa7134-empress.o
obj-$(CONFIG_VIDEO_SAA7134_ALSA) += saa7134-alsa.o
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index dd67c8a400cc..e04a4d5d6672 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -1072,8 +1072,8 @@ static int alsa_card_saa7134_create(struct saa7134_dev *dev, int devnum)
if (!enable[devnum])
return -ENODEV;
- err = snd_card_create(index[devnum], id[devnum], THIS_MODULE,
- sizeof(snd_card_saa7134_t), &card);
+ err = snd_card_new(&dev->pci->dev, index[devnum], id[devnum],
+ THIS_MODULE, sizeof(snd_card_saa7134_t), &card);
if (err < 0)
return err;
@@ -1115,8 +1115,6 @@ static int alsa_card_saa7134_create(struct saa7134_dev *dev, int devnum)
if ((err = snd_card_saa7134_pcm(chip, 0)) < 0)
goto __nodev;
- snd_card_set_dev(card, &chip->pci->dev);
-
/* End of "creation" */
strcpy(card->shortname, "SAA7134");
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index d45e7f6ff332..c9b2350e92c8 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -2590,7 +2590,7 @@ struct saa7134_board saa7134_boards[] = {
}},
},
[SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = {
- /* Michael Krufky <mkrufky@m1k.net>
+ /* Michael Krufky <mkrufky@linuxtv.org>
* Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder
* AFAIK, there is no analog demod, thus,
* no support for analog television.
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 27d7ee709c58..1362b4aab473 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -751,6 +751,7 @@ static int saa7134_hwfini(struct saa7134_dev *dev)
saa7134_input_fini(dev);
saa7134_vbi_fini(dev);
saa7134_tvaudio_fini(dev);
+ saa7134_video_fini(dev);
return 0;
}
@@ -802,7 +803,6 @@ static struct video_device *vdev_init(struct saa7134_dev *dev,
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
- vfd->debug = video_debug;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
dev->name, type, saa7134_boards[dev->board].name);
set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
@@ -1008,13 +1008,13 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
/* load i2c helpers */
if (card_is_empress(dev)) {
- struct v4l2_subdev *sd =
+ dev->empress_sd =
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"saa6752hs",
saa7134_boards[dev->board].empress_addr, NULL);
- if (sd)
- sd->grp_id = GRP_EMPRESS;
+ if (dev->empress_sd)
+ dev->empress_sd->grp_id = GRP_EMPRESS;
}
if (saa7134_boards[dev->board].rds_addr) {
@@ -1046,6 +1046,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
printk(KERN_INFO "%s: Overlay support disabled.\n", dev->name);
dev->video_dev = vdev_init(dev,&saa7134_video_template,"video");
+ dev->video_dev->ctrl_handler = &dev->ctrl_handler;
err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
video_nr[dev->nr]);
if (err < 0) {
@@ -1057,6 +1058,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
dev->name, video_device_node_name(dev->video_dev));
dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi");
+ dev->vbi_dev->ctrl_handler = &dev->ctrl_handler;
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[dev->nr]);
@@ -1067,6 +1069,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
if (card_has_radio(dev)) {
dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
+ dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler;
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[dev->nr]);
if (err < 0)
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 3022eb2a7925..0a9047e754b9 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -23,12 +23,12 @@
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+
#include "saa7134-reg.h"
#include "saa7134.h"
-#include <media/saa6752hs.h>
-#include <media/v4l2-common.h>
-
/* ------------------------------------------------------------------ */
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
@@ -85,52 +85,54 @@ static int ts_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct saa7134_dev *dev = video_drvdata(file);
- int err;
+ struct saa7134_fh *fh;
- dprintk("open dev=%s\n", video_device_node_name(vdev));
- err = -EBUSY;
- if (!mutex_trylock(&dev->empress_tsq.vb_lock))
- return err;
- if (atomic_read(&dev->empress_users))
- goto done;
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (NULL == fh)
+ return -ENOMEM;
+
+ v4l2_fh_init(&fh->fh, vdev);
+ file->private_data = fh;
+ fh->is_empress = true;
+ v4l2_fh_add(&fh->fh);
/* Unmute audio */
saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
saa_readb(SAA7134_AUDIO_MUTE_CTRL) & ~(1 << 6));
- atomic_inc(&dev->empress_users);
- file->private_data = dev;
- err = 0;
-
-done:
- mutex_unlock(&dev->empress_tsq.vb_lock);
- return err;
+ return 0;
}
static int ts_release(struct file *file)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
+ struct saa7134_fh *fh = file->private_data;
- videobuf_stop(&dev->empress_tsq);
- videobuf_mmap_free(&dev->empress_tsq);
+ if (res_check(fh, RESOURCE_EMPRESS)) {
+ videobuf_stop(&dev->empress_tsq);
+ videobuf_mmap_free(&dev->empress_tsq);
- /* stop the encoder */
- ts_reset_encoder(dev);
+ /* stop the encoder */
+ ts_reset_encoder(dev);
- /* Mute audio */
- saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
- saa_readb(SAA7134_AUDIO_MUTE_CTRL) | (1 << 6));
-
- atomic_dec(&dev->empress_users);
+ /* Mute audio */
+ saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
+ saa_readb(SAA7134_AUDIO_MUTE_CTRL) | (1 << 6));
+ }
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
return 0;
}
static ssize_t
ts_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
+ if (res_locked(dev, RESOURCE_EMPRESS))
+ return -EBUSY;
if (!dev->empress_started)
ts_init_encoder(dev);
@@ -142,68 +144,27 @@ ts_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
static unsigned int
ts_poll(struct file *file, struct poll_table_struct *wait)
{
- struct saa7134_dev *dev = file->private_data;
+ unsigned long req_events = poll_requested_events(wait);
+ struct saa7134_dev *dev = video_drvdata(file);
+ struct saa7134_fh *fh = file->private_data;
+ unsigned int rc = 0;
- return videobuf_poll_stream(file, &dev->empress_tsq, wait);
+ if (v4l2_event_pending(&fh->fh))
+ rc = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->fh.wait, wait);
+ return rc | videobuf_poll_stream(file, &dev->empress_tsq, wait);
}
static int
ts_mmap(struct file *file, struct vm_area_struct * vma)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
return videobuf_mmap_mapper(&dev->empress_tsq, vma);
}
-/*
- * This function is _not_ called directly, but from
- * video_generic_ioctl (and maybe others). userspace
- * copying is done already, arg is a kernel pointer.
- */
-
-static int empress_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct saa7134_dev *dev = file->private_data;
-
- strcpy(cap->driver, "saa7134");
- strlcpy(cap->card, saa7134_boards[dev->board].name,
- sizeof(cap->card));
- sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->capabilities =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
- return 0;
-}
-
-static int empress_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
-{
- if (i->index != 0)
- return -EINVAL;
-
- i->type = V4L2_INPUT_TYPE_CAMERA;
- strcpy(i->name, "CCIR656");
-
- return 0;
-}
-
-static int empress_g_input(struct file *file, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int empress_s_input(struct file *file, void *priv, unsigned int i)
-{
- if (i != 0)
- return -EINVAL;
-
- return 0;
-}
-
static int empress_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
@@ -219,7 +180,7 @@ static int empress_enum_fmt_vid_cap(struct file *file, void *priv,
static int empress_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_mbus_framefmt mbus_fmt;
saa_call_all(dev, video, g_mbus_fmt, &mbus_fmt);
@@ -236,7 +197,7 @@ static int empress_g_fmt_vid_cap(struct file *file, void *priv,
static int empress_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_mbus_framefmt mbus_fmt;
v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
@@ -254,7 +215,7 @@ static int empress_s_fmt_vid_cap(struct file *file, void *priv,
static int empress_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_dev *dev = file->private_data;
+ struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_mbus_framefmt mbus_fmt;
v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
@@ -269,175 +230,6 @@ static int empress_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int empress_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_reqbufs(&dev->empress_tsq, p);
-}
-
-static int empress_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *b)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_querybuf(&dev->empress_tsq, b);
-}
-
-static int empress_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_qbuf(&dev->empress_tsq, b);
-}
-
-static int empress_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_dqbuf(&dev->empress_tsq, b,
- file->f_flags & O_NONBLOCK);
-}
-
-static int empress_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_streamon(&dev->empress_tsq);
-}
-
-static int empress_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return videobuf_streamoff(&dev->empress_tsq);
-}
-
-static int empress_s_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *ctrls)
-{
- struct saa7134_dev *dev = file->private_data;
- int err;
-
- /* count == 0 is abused in saa6752hs.c, so that special
- case is handled here explicitly. */
- if (ctrls->count == 0)
- return 0;
-
- if (ctrls->ctrl_class != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
-
- err = saa_call_empress(dev, core, s_ext_ctrls, ctrls);
- ts_init_encoder(dev);
-
- return err;
-}
-
-static int empress_g_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *ctrls)
-{
- struct saa7134_dev *dev = file->private_data;
-
- if (ctrls->ctrl_class != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
- return saa_call_empress(dev, core, g_ext_ctrls, ctrls);
-}
-
-static int empress_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *c)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return saa7134_g_ctrl_internal(dev, NULL, c);
-}
-
-static int empress_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *c)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return saa7134_s_ctrl_internal(dev, NULL, c);
-}
-
-static int empress_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *c)
-{
- /* Must be sorted from low to high control ID! */
- static const u32 user_ctrls[] = {
- V4L2_CID_USER_CLASS,
- V4L2_CID_BRIGHTNESS,
- V4L2_CID_CONTRAST,
- V4L2_CID_SATURATION,
- V4L2_CID_HUE,
- V4L2_CID_AUDIO_VOLUME,
- V4L2_CID_AUDIO_MUTE,
- V4L2_CID_HFLIP,
- 0
- };
-
- /* Must be sorted from low to high control ID! */
- static const u32 mpeg_ctrls[] = {
- V4L2_CID_MPEG_CLASS,
- V4L2_CID_MPEG_STREAM_TYPE,
- V4L2_CID_MPEG_STREAM_PID_PMT,
- V4L2_CID_MPEG_STREAM_PID_AUDIO,
- V4L2_CID_MPEG_STREAM_PID_VIDEO,
- V4L2_CID_MPEG_STREAM_PID_PCR,
- V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ,
- V4L2_CID_MPEG_AUDIO_ENCODING,
- V4L2_CID_MPEG_AUDIO_L2_BITRATE,
- V4L2_CID_MPEG_VIDEO_ENCODING,
- V4L2_CID_MPEG_VIDEO_ASPECT,
- V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
- V4L2_CID_MPEG_VIDEO_BITRATE,
- V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
- 0
- };
- static const u32 *ctrl_classes[] = {
- user_ctrls,
- mpeg_ctrls,
- NULL
- };
- struct saa7134_dev *dev = file->private_data;
-
- c->id = v4l2_ctrl_next(ctrl_classes, c->id);
- if (c->id == 0)
- return -EINVAL;
- if (c->id == V4L2_CID_USER_CLASS || c->id == V4L2_CID_MPEG_CLASS)
- return v4l2_ctrl_query_fill(c, 0, 0, 0, 0);
- if (V4L2_CTRL_ID2CLASS(c->id) != V4L2_CTRL_CLASS_MPEG)
- return saa7134_queryctrl(file, priv, c);
- return saa_call_empress(dev, core, queryctrl, c);
-}
-
-static int empress_querymenu(struct file *file, void *priv,
- struct v4l2_querymenu *c)
-{
- struct saa7134_dev *dev = file->private_data;
-
- if (V4L2_CTRL_ID2CLASS(c->id) != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
- return saa_call_empress(dev, core, querymenu, c);
-}
-
-static int empress_s_std(struct file *file, void *priv, v4l2_std_id id)
-{
- struct saa7134_dev *dev = file->private_data;
-
- return saa7134_s_std_internal(dev, NULL, id);
-}
-
-static int empress_g_std(struct file *file, void *priv, v4l2_std_id *id)
-{
- struct saa7134_dev *dev = file->private_data;
-
- *id = dev->tvnorm->id;
- return 0;
-}
-
static const struct v4l2_file_operations ts_fops =
{
.owner = THIS_MODULE,
@@ -450,28 +242,29 @@ static const struct v4l2_file_operations ts_fops =
};
static const struct v4l2_ioctl_ops ts_ioctl_ops = {
- .vidioc_querycap = empress_querycap,
+ .vidioc_querycap = saa7134_querycap,
.vidioc_enum_fmt_vid_cap = empress_enum_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = empress_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = empress_s_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = empress_g_fmt_vid_cap,
- .vidioc_reqbufs = empress_reqbufs,
- .vidioc_querybuf = empress_querybuf,
- .vidioc_qbuf = empress_qbuf,
- .vidioc_dqbuf = empress_dqbuf,
- .vidioc_streamon = empress_streamon,
- .vidioc_streamoff = empress_streamoff,
- .vidioc_s_ext_ctrls = empress_s_ext_ctrls,
- .vidioc_g_ext_ctrls = empress_g_ext_ctrls,
- .vidioc_enum_input = empress_enum_input,
- .vidioc_g_input = empress_g_input,
- .vidioc_s_input = empress_s_input,
- .vidioc_queryctrl = empress_queryctrl,
- .vidioc_querymenu = empress_querymenu,
- .vidioc_g_ctrl = empress_g_ctrl,
- .vidioc_s_ctrl = empress_s_ctrl,
- .vidioc_s_std = empress_s_std,
- .vidioc_g_std = empress_g_std,
+ .vidioc_reqbufs = saa7134_reqbufs,
+ .vidioc_querybuf = saa7134_querybuf,
+ .vidioc_qbuf = saa7134_qbuf,
+ .vidioc_dqbuf = saa7134_dqbuf,
+ .vidioc_streamon = saa7134_streamon,
+ .vidioc_streamoff = saa7134_streamoff,
+ .vidioc_g_frequency = saa7134_g_frequency,
+ .vidioc_s_frequency = saa7134_s_frequency,
+ .vidioc_g_tuner = saa7134_g_tuner,
+ .vidioc_s_tuner = saa7134_s_tuner,
+ .vidioc_enum_input = saa7134_enum_input,
+ .vidioc_g_input = saa7134_g_input,
+ .vidioc_s_input = saa7134_s_input,
+ .vidioc_s_std = saa7134_s_std,
+ .vidioc_g_std = saa7134_g_std,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/* ----------------------------------------------------------- */
@@ -501,9 +294,26 @@ static void empress_signal_change(struct saa7134_dev *dev)
schedule_work(&dev->empress_workqueue);
}
+static bool empress_ctrl_filter(const struct v4l2_ctrl *ctrl)
+{
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_HUE:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_PRIVATE_INVERT:
+ case V4L2_CID_PRIVATE_AUTOMUTE:
+ return true;
+ default:
+ return false;
+ }
+}
static int empress_init(struct saa7134_dev *dev)
{
+ struct v4l2_ctrl_handler *hdl = &dev->empress_ctrl_handler;
int err;
dprintk("%s: %s\n",dev->name,__func__);
@@ -516,6 +326,16 @@ static int empress_init(struct saa7134_dev *dev)
snprintf(dev->empress_dev->name, sizeof(dev->empress_dev->name),
"%s empress (%s)", dev->name,
saa7134_boards[dev->board].name);
+ set_bit(V4L2_FL_USE_FH_PRIO, &dev->empress_dev->flags);
+ v4l2_ctrl_handler_init(hdl, 21);
+ v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, empress_ctrl_filter);
+ if (dev->empress_sd)
+ v4l2_ctrl_add_handler(hdl, dev->empress_sd->ctrl_handler, NULL);
+ if (hdl->error) {
+ video_device_release(dev->empress_dev);
+ return hdl->error;
+ }
+ dev->empress_dev->ctrl_handler = hdl;
INIT_WORK(&dev->empress_workqueue, empress_signal_update);
@@ -551,6 +371,7 @@ static int empress_fini(struct saa7134_dev *dev)
return 0;
flush_work(&dev->empress_workqueue);
video_unregister_device(dev->empress_dev);
+ v4l2_ctrl_handler_free(&dev->empress_ctrl_handler);
dev->empress_dev = NULL;
return 0;
}
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index e9aa94b807f1..d4da18d049f3 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -117,8 +117,7 @@ static int buffer_prepare(struct videobuf_queue *q,
struct videobuf_buffer *vb,
enum v4l2_field field)
{
- struct saa7134_fh *fh = q->priv_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = q->priv_data;
struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
struct saa7134_tvnorm *norm = dev->tvnorm;
unsigned int lines, llength, size;
@@ -141,7 +140,7 @@ static int buffer_prepare(struct videobuf_queue *q,
buf->vb.width = llength;
buf->vb.height = lines;
buf->vb.size = size;
- buf->pt = &fh->pt_vbi;
+ buf->pt = &dev->pt_vbi;
err = videobuf_iolock(q,&buf->vb,NULL);
if (err)
@@ -166,8 +165,7 @@ static int buffer_prepare(struct videobuf_queue *q,
static int
buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
{
- struct saa7134_fh *fh = q->priv_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = q->priv_data;
int llength,lines;
lines = dev->tvnorm->vbi_v_stop_0 - dev->tvnorm->vbi_v_start_0 +1;
@@ -181,8 +179,7 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
- struct saa7134_fh *fh = q->priv_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = q->priv_data;
struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
saa7134_buffer_queue(dev,&dev->vbi_q,buf);
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index fb60da85bc2c..eb472b5b26a0 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -27,11 +27,13 @@
#include <linux/slab.h>
#include <linux/sort.h>
-#include "saa7134-reg.h"
-#include "saa7134.h"
#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
#include <media/saa6588.h>
+#include "saa7134-reg.h"
+#include "saa7134.h"
+
/* ------------------------------------------------------------------ */
unsigned int video_debug;
@@ -369,117 +371,6 @@ static struct saa7134_tvnorm tvnorms[] = {
};
#define TVNORMS ARRAY_SIZE(tvnorms)
-#define V4L2_CID_PRIVATE_INVERT (V4L2_CID_PRIVATE_BASE + 0)
-#define V4L2_CID_PRIVATE_Y_ODD (V4L2_CID_PRIVATE_BASE + 1)
-#define V4L2_CID_PRIVATE_Y_EVEN (V4L2_CID_PRIVATE_BASE + 2)
-#define V4L2_CID_PRIVATE_AUTOMUTE (V4L2_CID_PRIVATE_BASE + 3)
-#define V4L2_CID_PRIVATE_LASTP1 (V4L2_CID_PRIVATE_BASE + 4)
-
-static const struct v4l2_queryctrl no_ctrl = {
- .name = "42",
- .flags = V4L2_CTRL_FLAG_DISABLED,
-};
-static const struct v4l2_queryctrl video_ctrls[] = {
- /* --- video --- */
- {
- .id = V4L2_CID_BRIGHTNESS,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_CONTRAST,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 68,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_SATURATION,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 64,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_HUE,
- .name = "Hue",
- .minimum = -128,
- .maximum = 127,
- .step = 1,
- .default_value = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_HFLIP,
- .name = "Mirror",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },
- /* --- audio --- */
- {
- .id = V4L2_CID_AUDIO_MUTE,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },{
- .id = V4L2_CID_AUDIO_VOLUME,
- .name = "Volume",
- .minimum = -15,
- .maximum = 15,
- .step = 1,
- .default_value = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- /* --- private --- */
- {
- .id = V4L2_CID_PRIVATE_INVERT,
- .name = "Invert",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },{
- .id = V4L2_CID_PRIVATE_Y_ODD,
- .name = "y offset odd field",
- .minimum = 0,
- .maximum = 128,
- .step = 1,
- .default_value = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_PRIVATE_Y_EVEN,
- .name = "y offset even field",
- .minimum = 0,
- .maximum = 128,
- .step = 1,
- .default_value = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },{
- .id = V4L2_CID_PRIVATE_AUTOMUTE,
- .name = "automute",
- .minimum = 0,
- .maximum = 1,
- .default_value = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- }
-};
-static const unsigned int CTRLS = ARRAY_SIZE(video_ctrls);
-
-static const struct v4l2_queryctrl* ctrl_by_id(unsigned int id)
-{
- unsigned int i;
-
- for (i = 0; i < CTRLS; i++)
- if (video_ctrls[i].id == id)
- return video_ctrls+i;
- return NULL;
-}
-
static struct saa7134_format* format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
@@ -514,16 +405,6 @@ static int res_get(struct saa7134_dev *dev, struct saa7134_fh *fh, unsigned int
return 1;
}
-static int res_check(struct saa7134_fh *fh, unsigned int bit)
-{
- return (fh->resources & bit);
-}
-
-static int res_locked(struct saa7134_dev *dev, unsigned int bit)
-{
- return (dev->resources & bit);
-}
-
static
void res_free(struct saa7134_dev *dev, struct saa7134_fh *fh, unsigned int bits)
{
@@ -868,7 +749,7 @@ static int verify_preview(struct saa7134_dev *dev, struct v4l2_window *win, bool
return 0;
}
-static int start_preview(struct saa7134_dev *dev, struct saa7134_fh *fh)
+static int start_preview(struct saa7134_dev *dev)
{
unsigned long base,control,bpl;
int err;
@@ -923,7 +804,7 @@ static int start_preview(struct saa7134_dev *dev, struct saa7134_fh *fh)
return 0;
}
-static int stop_preview(struct saa7134_dev *dev, struct saa7134_fh *fh)
+static int stop_preview(struct saa7134_dev *dev)
{
dev->ovenable = 0;
saa7134_set_dmabits(dev);
@@ -1018,8 +899,7 @@ static int buffer_prepare(struct videobuf_queue *q,
struct videobuf_buffer *vb,
enum v4l2_field field)
{
- struct saa7134_fh *fh = q->priv_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = q->priv_data;
struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
unsigned int size;
int err;
@@ -1057,7 +937,7 @@ static int buffer_prepare(struct videobuf_queue *q,
buf->vb.size = size;
buf->vb.field = field;
buf->fmt = dev->fmt;
- buf->pt = &fh->pt_cap;
+ buf->pt = &dev->pt_cap;
dev->video_q.curr = NULL;
err = videobuf_iolock(q,&buf->vb,&dev->ovbuf);
@@ -1082,8 +962,7 @@ static int buffer_prepare(struct videobuf_queue *q,
static int
buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
{
- struct saa7134_fh *fh = q->priv_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = q->priv_data;
*size = dev->fmt->depth * dev->width * dev->height >> 3;
if (0 == *count)
@@ -1094,10 +973,10 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
- struct saa7134_fh *fh = q->priv_data;
+ struct saa7134_dev *dev = q->priv_data;
struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
- saa7134_buffer_queue(fh->dev,&fh->dev->video_q,buf);
+ saa7134_buffer_queue(dev, &dev->video_q, buf);
}
static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
@@ -1116,133 +995,56 @@ static struct videobuf_queue_ops video_qops = {
/* ------------------------------------------------------------------ */
-int saa7134_g_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, struct v4l2_control *c)
+static int saa7134_s_ctrl(struct v4l2_ctrl *ctrl)
{
- const struct v4l2_queryctrl* ctrl;
-
- ctrl = ctrl_by_id(c->id);
- if (NULL == ctrl)
- return -EINVAL;
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- c->value = dev->ctl_bright;
- break;
- case V4L2_CID_HUE:
- c->value = dev->ctl_hue;
- break;
- case V4L2_CID_CONTRAST:
- c->value = dev->ctl_contrast;
- break;
- case V4L2_CID_SATURATION:
- c->value = dev->ctl_saturation;
- break;
- case V4L2_CID_AUDIO_MUTE:
- c->value = dev->ctl_mute;
- break;
- case V4L2_CID_AUDIO_VOLUME:
- c->value = dev->ctl_volume;
- break;
- case V4L2_CID_PRIVATE_INVERT:
- c->value = dev->ctl_invert;
- break;
- case V4L2_CID_HFLIP:
- c->value = dev->ctl_mirror;
- break;
- case V4L2_CID_PRIVATE_Y_EVEN:
- c->value = dev->ctl_y_even;
- break;
- case V4L2_CID_PRIVATE_Y_ODD:
- c->value = dev->ctl_y_odd;
- break;
- case V4L2_CID_PRIVATE_AUTOMUTE:
- c->value = dev->ctl_automute;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(saa7134_g_ctrl_internal);
-
-static int saa7134_g_ctrl(struct file *file, void *priv, struct v4l2_control *c)
-{
- struct saa7134_fh *fh = priv;
-
- return saa7134_g_ctrl_internal(fh->dev, fh, c);
-}
-
-int saa7134_s_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, struct v4l2_control *c)
-{
- const struct v4l2_queryctrl* ctrl;
+ struct saa7134_dev *dev = container_of(ctrl->handler, struct saa7134_dev, ctrl_handler);
unsigned long flags;
int restart_overlay = 0;
- int err;
- err = -EINVAL;
-
- mutex_lock(&dev->lock);
-
- ctrl = ctrl_by_id(c->id);
- if (NULL == ctrl)
- goto error;
-
- dprintk("set_control name=%s val=%d\n",ctrl->name,c->value);
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_BOOLEAN:
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_INTEGER:
- if (c->value < ctrl->minimum)
- c->value = ctrl->minimum;
- if (c->value > ctrl->maximum)
- c->value = ctrl->maximum;
- break;
- default:
- /* nothing */;
- }
- switch (c->id) {
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- dev->ctl_bright = c->value;
- saa_writeb(SAA7134_DEC_LUMA_BRIGHT, dev->ctl_bright);
+ dev->ctl_bright = ctrl->val;
+ saa_writeb(SAA7134_DEC_LUMA_BRIGHT, ctrl->val);
break;
case V4L2_CID_HUE:
- dev->ctl_hue = c->value;
- saa_writeb(SAA7134_DEC_CHROMA_HUE, dev->ctl_hue);
+ dev->ctl_hue = ctrl->val;
+ saa_writeb(SAA7134_DEC_CHROMA_HUE, ctrl->val);
break;
case V4L2_CID_CONTRAST:
- dev->ctl_contrast = c->value;
+ dev->ctl_contrast = ctrl->val;
saa_writeb(SAA7134_DEC_LUMA_CONTRAST,
dev->ctl_invert ? -dev->ctl_contrast : dev->ctl_contrast);
break;
case V4L2_CID_SATURATION:
- dev->ctl_saturation = c->value;
+ dev->ctl_saturation = ctrl->val;
saa_writeb(SAA7134_DEC_CHROMA_SATURATION,
dev->ctl_invert ? -dev->ctl_saturation : dev->ctl_saturation);
break;
case V4L2_CID_AUDIO_MUTE:
- dev->ctl_mute = c->value;
+ dev->ctl_mute = ctrl->val;
saa7134_tvaudio_setmute(dev);
break;
case V4L2_CID_AUDIO_VOLUME:
- dev->ctl_volume = c->value;
+ dev->ctl_volume = ctrl->val;
saa7134_tvaudio_setvolume(dev,dev->ctl_volume);
break;
case V4L2_CID_PRIVATE_INVERT:
- dev->ctl_invert = c->value;
+ dev->ctl_invert = ctrl->val;
saa_writeb(SAA7134_DEC_LUMA_CONTRAST,
dev->ctl_invert ? -dev->ctl_contrast : dev->ctl_contrast);
saa_writeb(SAA7134_DEC_CHROMA_SATURATION,
dev->ctl_invert ? -dev->ctl_saturation : dev->ctl_saturation);
break;
case V4L2_CID_HFLIP:
- dev->ctl_mirror = c->value;
+ dev->ctl_mirror = ctrl->val;
restart_overlay = 1;
break;
case V4L2_CID_PRIVATE_Y_EVEN:
- dev->ctl_y_even = c->value;
+ dev->ctl_y_even = ctrl->val;
restart_overlay = 1;
break;
case V4L2_CID_PRIVATE_Y_ODD:
- dev->ctl_y_odd = c->value;
+ dev->ctl_y_odd = ctrl->val;
restart_overlay = 1;
break;
case V4L2_CID_PRIVATE_AUTOMUTE:
@@ -1252,7 +1054,7 @@ int saa7134_s_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, str
tda9887_cfg.tuner = TUNER_TDA9887;
tda9887_cfg.priv = &dev->tda9887_conf;
- dev->ctl_automute = c->value;
+ dev->ctl_automute = ctrl->val;
if (dev->tda9887_conf) {
if (dev->ctl_automute)
dev->tda9887_conf |= TDA9887_AUTOMUTE;
@@ -1264,27 +1066,15 @@ int saa7134_s_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, str
break;
}
default:
- goto error;
+ return -EINVAL;
}
- if (restart_overlay && fh && res_check(fh, RESOURCE_OVERLAY)) {
- spin_lock_irqsave(&dev->slock,flags);
- stop_preview(dev,fh);
- start_preview(dev,fh);
- spin_unlock_irqrestore(&dev->slock,flags);
+ if (restart_overlay && res_locked(dev, RESOURCE_OVERLAY)) {
+ spin_lock_irqsave(&dev->slock, flags);
+ stop_preview(dev);
+ start_preview(dev);
+ spin_unlock_irqrestore(&dev->slock, flags);
}
- err = 0;
-
-error:
- mutex_unlock(&dev->lock);
- return err;
-}
-EXPORT_SYMBOL_GPL(saa7134_s_ctrl_internal);
-
-static int saa7134_s_ctrl(struct file *file, void *f, struct v4l2_control *c)
-{
- struct saa7134_fh *fh = f;
-
- return saa7134_s_ctrl_internal(fh->dev, fh, c);
+ return 0;
}
/* ------------------------------------------------------------------ */
@@ -1292,15 +1082,16 @@ static int saa7134_s_ctrl(struct file *file, void *f, struct v4l2_control *c)
static struct videobuf_queue *saa7134_queue(struct file *file)
{
struct video_device *vdev = video_devdata(file);
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_fh *fh = file->private_data;
struct videobuf_queue *q = NULL;
switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
- q = &fh->cap;
+ q = fh->is_empress ? &dev->empress_tsq : &dev->cap;
break;
case VFL_TYPE_VBI:
- q = &fh->vbi;
+ q = &dev->vbi;
break;
default:
BUG();
@@ -1311,9 +1102,10 @@ static struct videobuf_queue *saa7134_queue(struct file *file)
static int saa7134_resource(struct file *file)
{
struct video_device *vdev = video_devdata(file);
+ struct saa7134_fh *fh = file->private_data;
if (vdev->vfl_type == VFL_TYPE_GRABBER)
- return RESOURCE_VIDEO;
+ return fh->is_empress ? RESOURCE_EMPRESS : RESOURCE_VIDEO;
if (vdev->vfl_type == VFL_TYPE_VBI)
return RESOURCE_VBI;
@@ -1335,22 +1127,6 @@ static int video_open(struct file *file)
v4l2_fh_init(&fh->fh, vdev);
file->private_data = fh;
- fh->dev = dev;
-
- videobuf_queue_sg_init(&fh->cap, &video_qops,
- &dev->pci->dev, &dev->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct saa7134_buf),
- fh, NULL);
- videobuf_queue_sg_init(&fh->vbi, &saa7134_vbi_qops,
- &dev->pci->dev, &dev->slock,
- V4L2_BUF_TYPE_VBI_CAPTURE,
- V4L2_FIELD_SEQ_TB,
- sizeof(struct saa7134_buf),
- fh, NULL);
- saa7134_pgtable_alloc(dev->pci,&fh->pt_cap);
- saa7134_pgtable_alloc(dev->pci,&fh->pt_vbi);
if (vdev->vfl_type == VFL_TYPE_RADIO) {
/* switch to radio mode */
@@ -1369,17 +1145,18 @@ static ssize_t
video_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
struct video_device *vdev = video_devdata(file);
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_fh *fh = file->private_data;
switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
- if (res_locked(fh->dev,RESOURCE_VIDEO))
+ if (res_locked(dev, RESOURCE_VIDEO))
return -EBUSY;
return videobuf_read_one(saa7134_queue(file),
data, count, ppos,
file->f_flags & O_NONBLOCK);
case VFL_TYPE_VBI:
- if (!res_get(fh->dev,fh,RESOURCE_VBI))
+ if (!res_get(dev, fh, RESOURCE_VBI))
return -EBUSY;
return videobuf_read_stream(saa7134_queue(file),
data, count, ppos, 1,
@@ -1394,52 +1171,59 @@ video_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
static unsigned int
video_poll(struct file *file, struct poll_table_struct *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct video_device *vdev = video_devdata(file);
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_fh *fh = file->private_data;
struct videobuf_buffer *buf = NULL;
unsigned int rc = 0;
+ if (v4l2_event_pending(&fh->fh))
+ rc = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->fh.wait, wait);
+
if (vdev->vfl_type == VFL_TYPE_VBI)
- return videobuf_poll_stream(file, &fh->vbi, wait);
+ return rc | videobuf_poll_stream(file, &dev->vbi, wait);
- if (res_check(fh,RESOURCE_VIDEO)) {
- mutex_lock(&fh->cap.vb_lock);
- if (!list_empty(&fh->cap.stream))
- buf = list_entry(fh->cap.stream.next, struct videobuf_buffer, stream);
+ if (res_check(fh, RESOURCE_VIDEO)) {
+ mutex_lock(&dev->cap.vb_lock);
+ if (!list_empty(&dev->cap.stream))
+ buf = list_entry(dev->cap.stream.next, struct videobuf_buffer, stream);
} else {
- mutex_lock(&fh->cap.vb_lock);
- if (UNSET == fh->cap.read_off) {
+ mutex_lock(&dev->cap.vb_lock);
+ if (UNSET == dev->cap.read_off) {
/* need to capture a new frame */
- if (res_locked(fh->dev,RESOURCE_VIDEO))
+ if (res_locked(dev, RESOURCE_VIDEO))
goto err;
- if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,fh->cap.field))
+ if (0 != dev->cap.ops->buf_prepare(&dev->cap,
+ dev->cap.read_buf, dev->cap.field))
goto err;
- fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf);
- fh->cap.read_off = 0;
+ dev->cap.ops->buf_queue(&dev->cap, dev->cap.read_buf);
+ dev->cap.read_off = 0;
}
- buf = fh->cap.read_buf;
+ buf = dev->cap.read_buf;
}
if (!buf)
goto err;
poll_wait(file, &buf->done, wait);
- if (buf->state == VIDEOBUF_DONE ||
- buf->state == VIDEOBUF_ERROR)
- rc = POLLIN|POLLRDNORM;
- mutex_unlock(&fh->cap.vb_lock);
+ if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR)
+ rc |= POLLIN | POLLRDNORM;
+ mutex_unlock(&dev->cap.vb_lock);
return rc;
err:
- mutex_unlock(&fh->cap.vb_lock);
- return POLLERR;
+ mutex_unlock(&dev->cap.vb_lock);
+ return rc | POLLERR;
}
static int video_release(struct file *file)
{
struct video_device *vdev = video_devdata(file);
- struct saa7134_fh *fh = file->private_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
+ struct saa7134_fh *fh = file->private_data;
struct saa6588_command cmd;
unsigned long flags;
@@ -1448,26 +1232,28 @@ static int video_release(struct file *file)
/* turn off overlay */
if (res_check(fh, RESOURCE_OVERLAY)) {
spin_lock_irqsave(&dev->slock,flags);
- stop_preview(dev,fh);
+ stop_preview(dev);
spin_unlock_irqrestore(&dev->slock,flags);
- res_free(dev,fh,RESOURCE_OVERLAY);
+ res_free(dev, fh, RESOURCE_OVERLAY);
}
/* stop video capture */
if (res_check(fh, RESOURCE_VIDEO)) {
pm_qos_remove_request(&dev->qos_request);
- videobuf_streamoff(&fh->cap);
- res_free(dev,fh,RESOURCE_VIDEO);
+ videobuf_streamoff(&dev->cap);
+ res_free(dev, fh, RESOURCE_VIDEO);
+ videobuf_mmap_free(&dev->cap);
}
- if (fh->cap.read_buf) {
- buffer_release(&fh->cap,fh->cap.read_buf);
- kfree(fh->cap.read_buf);
+ if (dev->cap.read_buf) {
+ buffer_release(&dev->cap, dev->cap.read_buf);
+ kfree(dev->cap.read_buf);
}
/* stop vbi capture */
if (res_check(fh, RESOURCE_VBI)) {
- videobuf_stop(&fh->vbi);
- res_free(dev,fh,RESOURCE_VBI);
+ videobuf_stop(&dev->vbi);
+ res_free(dev, fh, RESOURCE_VBI);
+ videobuf_mmap_free(&dev->vbi);
}
/* ts-capture will not work in planar mode, so turn it off Hac: 04.05*/
@@ -1480,12 +1266,6 @@ static int video_release(struct file *file)
if (vdev->vfl_type == VFL_TYPE_RADIO)
saa_call_all(dev, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
- /* free stuff */
- videobuf_mmap_free(&fh->cap);
- videobuf_mmap_free(&fh->vbi);
- saa7134_pgtable_free(dev->pci,&fh->pt_cap);
- saa7134_pgtable_free(dev->pci,&fh->pt_vbi);
-
v4l2_fh_del(&fh->fh);
v4l2_fh_exit(&fh->fh);
file->private_data = NULL;
@@ -1501,11 +1281,11 @@ static int video_mmap(struct file *file, struct vm_area_struct * vma)
static ssize_t radio_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
- struct saa7134_fh *fh = file->private_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa6588_command cmd;
cmd.block_count = count/3;
+ cmd.nonblocking = file->f_flags & O_NONBLOCK;
cmd.buffer = data;
cmd.instance = file;
cmd.result = -ENODEV;
@@ -1517,16 +1297,16 @@ static ssize_t radio_read(struct file *file, char __user *data,
static unsigned int radio_poll(struct file *file, poll_table *wait)
{
- struct saa7134_fh *fh = file->private_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa6588_command cmd;
+ unsigned int rc = v4l2_ctrl_poll(file, wait);
cmd.instance = file;
cmd.event_list = wait;
- cmd.result = -ENODEV;
+ cmd.result = 0;
saa_call_all(dev, core, ioctl, SAA6588_CMD_POLL, &cmd);
- return cmd.result;
+ return rc | cmd.result;
}
/* ------------------------------------------------------------------ */
@@ -1534,8 +1314,7 @@ static unsigned int radio_poll(struct file *file, poll_table *wait)
static int saa7134_try_get_set_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_tvnorm *norm = dev->tvnorm;
memset(&f->fmt.vbi.reserved, 0, sizeof(f->fmt.vbi.reserved));
@@ -1555,12 +1334,11 @@ static int saa7134_try_get_set_fmt_vbi_cap(struct file *file, void *priv,
static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
- f->fmt.pix.field = fh->cap.field;
+ f->fmt.pix.field = dev->cap.field;
f->fmt.pix.pixelformat = dev->fmt->fourcc;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * dev->fmt->depth) >> 3;
@@ -1574,8 +1352,7 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
static int saa7134_g_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_clip __user *clips = f->fmt.win.clips;
u32 clipcount = f->fmt.win.clipcount;
int err = 0;
@@ -1607,8 +1384,7 @@ static int saa7134_g_fmt_vid_overlay(struct file *file, void *priv,
static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_format *fmt;
enum v4l2_field field;
unsigned int maxw, maxh;
@@ -1659,8 +1435,7 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
static int saa7134_try_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (saa7134_no_overlay > 0) {
printk(KERN_ERR "V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
@@ -1675,8 +1450,7 @@ static int saa7134_try_fmt_vid_overlay(struct file *file, void *priv,
static int saa7134_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
int err;
err = saa7134_try_fmt_vid_cap(file, priv, f);
@@ -1686,15 +1460,14 @@ static int saa7134_s_fmt_vid_cap(struct file *file, void *priv,
dev->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
- fh->cap.field = f->fmt.pix.field;
+ dev->cap.field = f->fmt.pix.field;
return 0;
}
static int saa7134_s_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
int err;
unsigned long flags;
@@ -1719,10 +1492,10 @@ static int saa7134_s_fmt_vid_overlay(struct file *file, void *priv,
return -EFAULT;
}
- if (res_check(fh, RESOURCE_OVERLAY)) {
+ if (res_check(priv, RESOURCE_OVERLAY)) {
spin_lock_irqsave(&dev->slock, flags);
- stop_preview(dev, fh);
- start_preview(dev, fh);
+ stop_preview(dev);
+ start_preview(dev);
spin_unlock_irqrestore(&dev->slock, flags);
}
@@ -1730,26 +1503,9 @@ static int saa7134_s_fmt_vid_overlay(struct file *file, void *priv,
return 0;
}
-int saa7134_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *c)
-{
- const struct v4l2_queryctrl *ctrl;
-
- if ((c->id < V4L2_CID_BASE ||
- c->id >= V4L2_CID_LASTP1) &&
- (c->id < V4L2_CID_PRIVATE_BASE ||
- c->id >= V4L2_CID_PRIVATE_LASTP1))
- return -EINVAL;
- ctrl = ctrl_by_id(c->id);
- *c = (NULL != ctrl) ? *ctrl : no_ctrl;
- return 0;
-}
-EXPORT_SYMBOL_GPL(saa7134_queryctrl);
-
-static int saa7134_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
+int saa7134_enum_input(struct file *file, void *priv, struct v4l2_input *i)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
unsigned int n;
n = i->index;
@@ -1769,27 +1525,27 @@ static int saa7134_enum_input(struct file *file, void *priv,
if (0 != (v1 & 0x40))
i->status |= V4L2_IN_ST_NO_H_LOCK;
if (0 != (v2 & 0x40))
- i->status |= V4L2_IN_ST_NO_SYNC;
+ i->status |= V4L2_IN_ST_NO_SIGNAL;
if (0 != (v2 & 0x0e))
i->status |= V4L2_IN_ST_MACROVISION;
}
i->std = SAA7134_NORMS;
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_enum_input);
-static int saa7134_g_input(struct file *file, void *priv, unsigned int *i)
+int saa7134_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
*i = dev->ctl_input;
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_g_input);
-static int saa7134_s_input(struct file *file, void *priv, unsigned int i)
+int saa7134_s_input(struct file *file, void *priv, unsigned int i)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (i >= SAA7134_INPUT_MAX)
return -EINVAL;
@@ -1800,13 +1556,14 @@ static int saa7134_s_input(struct file *file, void *priv, unsigned int i)
mutex_unlock(&dev->lock);
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_s_input);
-static int saa7134_querycap(struct file *file, void *priv,
+int saa7134_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
+ struct saa7134_fh *fh = priv;
u32 radio_caps, video_caps, vbi_caps;
unsigned int tuner_type = dev->tuner_type;
@@ -1825,7 +1582,7 @@ static int saa7134_querycap(struct file *file, void *priv,
radio_caps |= V4L2_CAP_RDS_CAPTURE;
video_caps = V4L2_CAP_VIDEO_CAPTURE;
- if (saa7134_no_overlay <= 0)
+ if (saa7134_no_overlay <= 0 && !fh->is_empress)
video_caps |= V4L2_CAP_VIDEO_OVERLAY;
vbi_caps = V4L2_CAP_VBI_CAPTURE;
@@ -1851,14 +1608,17 @@ static int saa7134_querycap(struct file *file, void *priv,
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_querycap);
-int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_std_id id)
+int saa7134_s_std(struct file *file, void *priv, v4l2_std_id id)
{
+ struct saa7134_dev *dev = video_drvdata(file);
+ struct saa7134_fh *fh = priv;
unsigned long flags;
unsigned int i;
v4l2_std_id fixup;
- if (!fh && res_locked(dev, RESOURCE_OVERLAY)) {
+ if (fh->is_empress && res_locked(dev, RESOURCE_OVERLAY)) {
/* Don't change the std from the mpeg device
if overlay is active. */
return -EBUSY;
@@ -1898,15 +1658,15 @@ int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_
id = tvnorms[i].id;
mutex_lock(&dev->lock);
- if (fh && res_check(fh, RESOURCE_OVERLAY)) {
+ if (!fh->is_empress && res_check(fh, RESOURCE_OVERLAY)) {
spin_lock_irqsave(&dev->slock, flags);
- stop_preview(dev, fh);
+ stop_preview(dev);
spin_unlock_irqrestore(&dev->slock, flags);
set_tvnorm(dev, &tvnorms[i]);
spin_lock_irqsave(&dev->slock, flags);
- start_preview(dev, fh);
+ start_preview(dev);
spin_unlock_irqrestore(&dev->slock, flags);
} else
set_tvnorm(dev, &tvnorms[i]);
@@ -1915,29 +1675,21 @@ int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_
mutex_unlock(&dev->lock);
return 0;
}
-EXPORT_SYMBOL_GPL(saa7134_s_std_internal);
+EXPORT_SYMBOL_GPL(saa7134_s_std);
-static int saa7134_s_std(struct file *file, void *priv, v4l2_std_id id)
+int saa7134_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct saa7134_fh *fh = priv;
-
- return saa7134_s_std_internal(fh->dev, fh, id);
-}
-
-static int saa7134_g_std(struct file *file, void *priv, v4l2_std_id *id)
-{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
*id = dev->tvnorm->id;
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_g_std);
static int saa7134_cropcap(struct file *file, void *priv,
struct v4l2_cropcap *cap)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
@@ -1959,8 +1711,7 @@ static int saa7134_cropcap(struct file *file, void *priv,
static int saa7134_g_crop(struct file *file, void *f, struct v4l2_crop *crop)
{
- struct saa7134_fh *fh = f;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
@@ -1971,22 +1722,17 @@ static int saa7134_g_crop(struct file *file, void *f, struct v4l2_crop *crop)
static int saa7134_s_crop(struct file *file, void *f, const struct v4l2_crop *crop)
{
- struct saa7134_fh *fh = f;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_rect *b = &dev->crop_bounds;
struct v4l2_rect *c = &dev->crop_current;
if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
- if (crop->c.height < 0)
- return -EINVAL;
- if (crop->c.width < 0)
- return -EINVAL;
- if (res_locked(fh->dev, RESOURCE_OVERLAY))
+ if (res_locked(dev, RESOURCE_OVERLAY))
return -EBUSY;
- if (res_locked(fh->dev, RESOURCE_VIDEO))
+ if (res_locked(dev, RESOURCE_VIDEO))
return -EBUSY;
*c = crop->c;
@@ -2006,11 +1752,10 @@ static int saa7134_s_crop(struct file *file, void *f, const struct v4l2_crop *cr
return 0;
}
-static int saa7134_g_tuner(struct file *file, void *priv,
+int saa7134_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
int n;
if (0 != t->index)
@@ -2037,12 +1782,12 @@ static int saa7134_g_tuner(struct file *file, void *priv,
t->signal = 0xffff;
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_g_tuner);
-static int saa7134_s_tuner(struct file *file, void *priv,
+int saa7134_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
int rx, mode;
if (0 != t->index)
@@ -2058,12 +1803,12 @@ static int saa7134_s_tuner(struct file *file, void *priv,
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_s_tuner);
-static int saa7134_g_frequency(struct file *file, void *priv,
+int saa7134_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (0 != f->tuner)
return -EINVAL;
@@ -2072,12 +1817,12 @@ static int saa7134_g_frequency(struct file *file, void *priv,
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_g_frequency);
-static int saa7134_s_frequency(struct file *file, void *priv,
+int saa7134_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (0 != f->tuner)
return -EINVAL;
@@ -2089,6 +1834,7 @@ static int saa7134_s_frequency(struct file *file, void *priv,
mutex_unlock(&dev->lock);
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_s_frequency);
static int saa7134_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
@@ -2126,8 +1872,7 @@ static int saa7134_enum_fmt_vid_overlay(struct file *file, void *priv,
static int saa7134_g_fbuf(struct file *file, void *f,
struct v4l2_framebuffer *fb)
{
- struct saa7134_fh *fh = f;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
*fb = dev->ovbuf;
fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
@@ -2138,8 +1883,7 @@ static int saa7134_g_fbuf(struct file *file, void *f,
static int saa7134_s_fbuf(struct file *file, void *f,
const struct v4l2_framebuffer *fb)
{
- struct saa7134_fh *fh = f;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_format *fmt;
if (!capable(CAP_SYS_ADMIN) &&
@@ -2160,10 +1904,9 @@ static int saa7134_s_fbuf(struct file *file, void *f,
return 0;
}
-static int saa7134_overlay(struct file *file, void *f, unsigned int on)
+static int saa7134_overlay(struct file *file, void *priv, unsigned int on)
{
- struct saa7134_fh *fh = f;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
unsigned long flags;
if (on) {
@@ -2172,54 +1915,57 @@ static int saa7134_overlay(struct file *file, void *f, unsigned int on)
return -EINVAL;
}
- if (!res_get(dev, fh, RESOURCE_OVERLAY))
+ if (!res_get(dev, priv, RESOURCE_OVERLAY))
return -EBUSY;
spin_lock_irqsave(&dev->slock, flags);
- start_preview(dev, fh);
+ start_preview(dev);
spin_unlock_irqrestore(&dev->slock, flags);
}
if (!on) {
- if (!res_check(fh, RESOURCE_OVERLAY))
+ if (!res_check(priv, RESOURCE_OVERLAY))
return -EINVAL;
spin_lock_irqsave(&dev->slock, flags);
- stop_preview(dev, fh);
+ stop_preview(dev);
spin_unlock_irqrestore(&dev->slock, flags);
- res_free(dev, fh, RESOURCE_OVERLAY);
+ res_free(dev, priv, RESOURCE_OVERLAY);
}
return 0;
}
-static int saa7134_reqbufs(struct file *file, void *priv,
+int saa7134_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
return videobuf_reqbufs(saa7134_queue(file), p);
}
+EXPORT_SYMBOL_GPL(saa7134_reqbufs);
-static int saa7134_querybuf(struct file *file, void *priv,
+int saa7134_querybuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
return videobuf_querybuf(saa7134_queue(file), b);
}
+EXPORT_SYMBOL_GPL(saa7134_querybuf);
-static int saa7134_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+int saa7134_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
return videobuf_qbuf(saa7134_queue(file), b);
}
+EXPORT_SYMBOL_GPL(saa7134_qbuf);
-static int saa7134_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+int saa7134_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
return videobuf_dqbuf(saa7134_queue(file), b,
file->f_flags & O_NONBLOCK);
}
+EXPORT_SYMBOL_GPL(saa7134_dqbuf);
-static int saa7134_streamon(struct file *file, void *priv,
+int saa7134_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
int res = saa7134_resource(file);
- if (!res_get(dev, fh, res))
+ if (!res_get(dev, priv, res))
return -EBUSY;
/* The SAA7134 has a 1K FIFO; the datasheet suggests that when
@@ -2229,36 +1975,37 @@ static int saa7134_streamon(struct file *file, void *priv,
* Unfortunately, I lack register-level documentation to check the
* Linux FIFO setup and confirm the perfect value.
*/
- pm_qos_add_request(&dev->qos_request,
- PM_QOS_CPU_DMA_LATENCY,
- 20);
+ if (res != RESOURCE_EMPRESS)
+ pm_qos_add_request(&dev->qos_request,
+ PM_QOS_CPU_DMA_LATENCY, 20);
return videobuf_streamon(saa7134_queue(file));
}
+EXPORT_SYMBOL_GPL(saa7134_streamon);
-static int saa7134_streamoff(struct file *file, void *priv,
+int saa7134_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
+ struct saa7134_dev *dev = video_drvdata(file);
int err;
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
int res = saa7134_resource(file);
- pm_qos_remove_request(&dev->qos_request);
+ if (res != RESOURCE_EMPRESS)
+ pm_qos_remove_request(&dev->qos_request);
err = videobuf_streamoff(saa7134_queue(file));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ res_free(dev, priv, res);
return 0;
}
+EXPORT_SYMBOL_GPL(saa7134_streamoff);
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int vidioc_g_register (struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
reg->val = saa_readb(reg->reg & 0xffffff);
reg->size = 1;
@@ -2268,8 +2015,7 @@ static int vidioc_g_register (struct file *file, void *priv,
static int vidioc_s_register (struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
- struct saa7134_fh *fh = priv;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
saa_writeb(reg->reg & 0xffffff, reg->val);
return 0;
@@ -2279,8 +2025,7 @@ static int vidioc_s_register (struct file *file, void *priv,
static int radio_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct saa7134_fh *fh = file->private_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (0 != t->index)
return -EINVAL;
@@ -2299,8 +2044,7 @@ static int radio_g_tuner(struct file *file, void *priv,
static int radio_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct saa7134_fh *fh = file->private_data;
- struct saa7134_dev *dev = fh->dev;
+ struct saa7134_dev *dev = video_drvdata(file);
if (0 != t->index)
return -EINVAL;
@@ -2309,50 +2053,6 @@ static int radio_s_tuner(struct file *file, void *priv,
return 0;
}
-static int radio_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
-{
- if (i->index != 0)
- return -EINVAL;
-
- strcpy(i->name, "Radio");
- i->type = V4L2_INPUT_TYPE_TUNER;
-
- return 0;
-}
-
-static int radio_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int radio_s_input(struct file *filp, void *priv, unsigned int i)
-{
- return 0;
-}
-
-static int radio_s_std(struct file *file, void *fh, v4l2_std_id norm)
-{
- return 0;
-}
-
-static int radio_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *c)
-{
- const struct v4l2_queryctrl *ctrl;
-
- if (c->id < V4L2_CID_BASE ||
- c->id >= V4L2_CID_LASTP1)
- return -EINVAL;
- if (c->id == V4L2_CID_AUDIO_MUTE) {
- ctrl = ctrl_by_id(c->id);
- *c = *ctrl;
- } else
- *c = no_ctrl;
- return 0;
-}
-
static const struct v4l2_file_operations video_fops =
{
.owner = THIS_MODULE,
@@ -2387,9 +2087,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_enum_input = saa7134_enum_input,
.vidioc_g_input = saa7134_g_input,
.vidioc_s_input = saa7134_s_input,
- .vidioc_queryctrl = saa7134_queryctrl,
- .vidioc_g_ctrl = saa7134_g_ctrl,
- .vidioc_s_ctrl = saa7134_s_ctrl,
.vidioc_streamon = saa7134_streamon,
.vidioc_streamoff = saa7134_streamoff,
.vidioc_g_tuner = saa7134_g_tuner,
@@ -2405,6 +2102,9 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
#endif
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static const struct v4l2_file_operations radio_fops = {
@@ -2419,16 +2119,11 @@ static const struct v4l2_file_operations radio_fops = {
static const struct v4l2_ioctl_ops radio_ioctl_ops = {
.vidioc_querycap = saa7134_querycap,
.vidioc_g_tuner = radio_g_tuner,
- .vidioc_enum_input = radio_enum_input,
.vidioc_s_tuner = radio_s_tuner,
- .vidioc_s_input = radio_s_input,
- .vidioc_s_std = radio_s_std,
- .vidioc_queryctrl = radio_queryctrl,
- .vidioc_g_input = radio_g_input,
- .vidioc_g_ctrl = saa7134_g_ctrl,
- .vidioc_s_ctrl = saa7134_s_ctrl,
.vidioc_g_frequency = saa7134_g_frequency,
.vidioc_s_frequency = saa7134_s_frequency,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/* ----------------------------------------------------------- */
@@ -2447,8 +2142,55 @@ struct video_device saa7134_radio_template = {
.ioctl_ops = &radio_ioctl_ops,
};
+static const struct v4l2_ctrl_ops saa7134_ctrl_ops = {
+ .s_ctrl = saa7134_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_invert = {
+ .ops = &saa7134_ctrl_ops,
+ .id = V4L2_CID_PRIVATE_INVERT,
+ .name = "Invert",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_y_odd = {
+ .ops = &saa7134_ctrl_ops,
+ .id = V4L2_CID_PRIVATE_Y_ODD,
+ .name = "Y Offset Odd Field",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 128,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_y_even = {
+ .ops = &saa7134_ctrl_ops,
+ .id = V4L2_CID_PRIVATE_Y_EVEN,
+ .name = "Y Offset Even Field",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 128,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config saa7134_ctrl_automute = {
+ .ops = &saa7134_ctrl_ops,
+ .id = V4L2_CID_PRIVATE_AUTOMUTE,
+ .name = "Automute",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
int saa7134_video_init1(struct saa7134_dev *dev)
{
+ struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
+
/* sanitycheck insmod options */
if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
gbuffers = 2;
@@ -2456,17 +2198,38 @@ int saa7134_video_init1(struct saa7134_dev *dev)
gbufsize = gbufsize_max;
gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK;
- /* put some sensible defaults into the data structures ... */
- dev->ctl_bright = ctrl_by_id(V4L2_CID_BRIGHTNESS)->default_value;
- dev->ctl_contrast = ctrl_by_id(V4L2_CID_CONTRAST)->default_value;
- dev->ctl_hue = ctrl_by_id(V4L2_CID_HUE)->default_value;
- dev->ctl_saturation = ctrl_by_id(V4L2_CID_SATURATION)->default_value;
- dev->ctl_volume = ctrl_by_id(V4L2_CID_AUDIO_VOLUME)->default_value;
- dev->ctl_mute = 1; // ctrl_by_id(V4L2_CID_AUDIO_MUTE)->default_value;
- dev->ctl_invert = ctrl_by_id(V4L2_CID_PRIVATE_INVERT)->default_value;
- dev->ctl_automute = ctrl_by_id(V4L2_CID_PRIVATE_AUTOMUTE)->default_value;
-
- if (dev->tda9887_conf && dev->ctl_automute)
+ v4l2_ctrl_handler_init(hdl, 11);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 127, 1, 68);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa7134_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, -15, 15, 1, 0);
+ v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_invert, NULL);
+ v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_y_odd, NULL);
+ v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_y_even, NULL);
+ v4l2_ctrl_new_custom(hdl, &saa7134_ctrl_automute, NULL);
+ if (hdl->error)
+ return hdl->error;
+ if (card_has_radio(dev)) {
+ hdl = &dev->radio_ctrl_handler;
+ v4l2_ctrl_handler_init(hdl, 2);
+ v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler,
+ v4l2_ctrl_radio_filter);
+ if (hdl->error)
+ return hdl->error;
+ }
+ dev->ctl_mute = 1;
+
+ if (dev->tda9887_conf && saa7134_ctrl_automute.def)
dev->tda9887_conf |= TDA9887_AUTOMUTE;
dev->automute = 0;
@@ -2489,9 +2252,34 @@ int saa7134_video_init1(struct saa7134_dev *dev)
if (saa7134_boards[dev->board].video_out)
saa7134_videoport_init(dev);
+ videobuf_queue_sg_init(&dev->cap, &video_qops,
+ &dev->pci->dev, &dev->slock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct saa7134_buf),
+ dev, NULL);
+ videobuf_queue_sg_init(&dev->vbi, &saa7134_vbi_qops,
+ &dev->pci->dev, &dev->slock,
+ V4L2_BUF_TYPE_VBI_CAPTURE,
+ V4L2_FIELD_SEQ_TB,
+ sizeof(struct saa7134_buf),
+ dev, NULL);
+ saa7134_pgtable_alloc(dev->pci, &dev->pt_cap);
+ saa7134_pgtable_alloc(dev->pci, &dev->pt_vbi);
+
return 0;
}
+void saa7134_video_fini(struct saa7134_dev *dev)
+{
+ /* free stuff */
+ saa7134_pgtable_free(dev->pci, &dev->pt_cap);
+ saa7134_pgtable_free(dev->pci, &dev->pt_vbi);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ if (card_has_radio(dev))
+ v4l2_ctrl_handler_free(&dev->radio_ctrl_handler);
+}
+
int saa7134_videoport_init(struct saa7134_dev *dev)
{
/* enable video output */
@@ -2533,6 +2321,7 @@ int saa7134_video_init2(struct saa7134_dev *dev)
/* init video hw */
set_tvnorm(dev,&tvnorms[0]);
video_mux(dev,0);
+ v4l2_ctrl_handler_setup(&dev->ctrl_handler);
saa7134_tvaudio_setmute(dev);
saa7134_tvaudio_setvolume(dev,dev->ctl_volume);
return 0;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 8d1453a48014..2474e848f2c0 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -37,6 +37,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
#include <media/tuner.h>
#include <media/rc-core.h>
#include <media/ir-kbd-i2c.h>
@@ -410,12 +411,18 @@ struct saa7134_board {
#define card(dev) (saa7134_boards[dev->board])
#define card_in(dev,n) (saa7134_boards[dev->board].inputs[n])
+#define V4L2_CID_PRIVATE_INVERT (V4L2_CID_USER_SAA7134_BASE + 0)
+#define V4L2_CID_PRIVATE_Y_ODD (V4L2_CID_USER_SAA7134_BASE + 1)
+#define V4L2_CID_PRIVATE_Y_EVEN (V4L2_CID_USER_SAA7134_BASE + 2)
+#define V4L2_CID_PRIVATE_AUTOMUTE (V4L2_CID_USER_SAA7134_BASE + 3)
+
/* ----------------------------------------------------------- */
/* device / file handle status */
#define RESOURCE_OVERLAY 1
#define RESOURCE_VIDEO 2
#define RESOURCE_VBI 4
+#define RESOURCE_EMPRESS 8
#define INTERLACE_AUTO 0
#define INTERLACE_ON 1
@@ -470,16 +477,8 @@ struct saa7134_dmaqueue {
/* video filehandle status */
struct saa7134_fh {
struct v4l2_fh fh;
- struct saa7134_dev *dev;
+ bool is_empress;
unsigned int resources;
-
- /* video capture */
- struct videobuf_queue cap;
- struct saa7134_pgtable pt_cap;
-
- /* vbi capture */
- struct videobuf_queue vbi;
- struct saa7134_pgtable pt_vbi;
};
/* dmasound dsp status */
@@ -589,7 +588,11 @@ struct saa7134_dev {
/* video+ts+vbi capture */
struct saa7134_dmaqueue video_q;
+ struct videobuf_queue cap;
+ struct saa7134_pgtable pt_cap;
struct saa7134_dmaqueue vbi_q;
+ struct videobuf_queue vbi;
+ struct saa7134_pgtable pt_vbi;
unsigned int video_fieldcount;
unsigned int vbi_fieldcount;
struct saa7134_format *fmt;
@@ -599,6 +602,7 @@ struct saa7134_dev {
/* various v4l controls */
struct saa7134_tvnorm *tvnorm; /* video */
struct saa7134_tvaudio *tvaudio;
+ struct v4l2_ctrl_handler ctrl_handler;
unsigned int ctl_input;
int ctl_bright;
int ctl_contrast;
@@ -626,6 +630,7 @@ struct saa7134_dev {
int last_carrier;
int nosignal;
unsigned int insuspend;
+ struct v4l2_ctrl_handler radio_ctrl_handler;
/* I2C keyboard data */
struct IR_i2c_init_data init_data;
@@ -638,10 +643,11 @@ struct saa7134_dev {
/* SAA7134_MPEG_EMPRESS only */
struct video_device *empress_dev;
+ struct v4l2_subdev *empress_sd;
struct videobuf_queue empress_tsq;
- atomic_t empress_users;
struct work_struct empress_workqueue;
int empress_started;
+ struct v4l2_ctrl_handler empress_ctrl_handler;
#if IS_ENABLED(CONFIG_VIDEO_SAA7134_DVB)
/* SAA7134_MPEG_DVB only */
@@ -699,6 +705,16 @@ struct saa7134_dev {
_rc; \
})
+static inline int res_check(struct saa7134_fh *fh, unsigned int bit)
+{
+ return fh->resources & bit;
+}
+
+static inline int res_locked(struct saa7134_dev *dev, unsigned int bit)
+{
+ return dev->resources & bit;
+}
+
/* ----------------------------------------------------------- */
/* saa7134-core.c */
@@ -761,10 +777,31 @@ extern unsigned int video_debug;
extern struct video_device saa7134_video_template;
extern struct video_device saa7134_radio_template;
-int saa7134_s_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, struct v4l2_control *c);
-int saa7134_g_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, struct v4l2_control *c);
-int saa7134_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *c);
-int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_std_id id);
+int saa7134_s_std(struct file *file, void *priv, v4l2_std_id id);
+int saa7134_g_std(struct file *file, void *priv, v4l2_std_id *id);
+int saa7134_enum_input(struct file *file, void *priv, struct v4l2_input *i);
+int saa7134_g_input(struct file *file, void *priv, unsigned int *i);
+int saa7134_s_input(struct file *file, void *priv, unsigned int i);
+int saa7134_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap);
+int saa7134_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t);
+int saa7134_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *t);
+int saa7134_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f);
+int saa7134_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f);
+int saa7134_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p);
+int saa7134_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *b);
+int saa7134_qbuf(struct file *file, void *priv, struct v4l2_buffer *b);
+int saa7134_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b);
+int saa7134_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type);
+int saa7134_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type);
int saa7134_videoport_init(struct saa7134_dev *dev);
void saa7134_set_tvnorm_hw(struct saa7134_dev *dev);
@@ -773,6 +810,7 @@ int saa7134_video_init1(struct saa7134_dev *dev);
int saa7134_video_init2(struct saa7134_dev *dev);
void saa7134_irq_video_signalchange(struct saa7134_dev *dev);
void saa7134_irq_video_done(struct saa7134_dev *dev, unsigned long status);
+void saa7134_video_fini(struct saa7134_dev *dev);
/* ----------------------------------------------------------- */
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 77edc113e485..e5cfb6cfa18d 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -1303,7 +1303,7 @@ static int sta2x11_vip_resume(struct pci_dev *pdev)
#endif
-static DEFINE_PCI_DEVICE_TABLE(sta2x11_vip_pci_tbl) = {
+static const struct pci_device_id sta2x11_vip_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIP)},
{0,}
};
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index d7f0249e4050..b2a4403940c5 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -36,7 +36,8 @@ source "drivers/media/platform/blackfin/Kconfig"
config VIDEO_SH_VOU
tristate "SuperH VOU video output driver"
depends on MEDIA_CAMERA_SUPPORT
- depends on VIDEO_DEV && ARCH_SHMOBILE && I2C
+ depends on VIDEO_DEV && I2C
+ depends on ARCH_SHMOBILE || COMPILE_TEST
select VIDEOBUF_DMA_CONTIG
help
Support for the Video Output Unit (VOU) on SuperH SoCs.
@@ -90,13 +91,6 @@ config VIDEO_M32R_AR_M64278
To compile this driver as a module, choose M here: the
module will be called arv.
-config VIDEO_OMAP2
- tristate "OMAP2 Camera Capture Interface driver"
- depends on VIDEO_DEV && ARCH_OMAP2 && VIDEO_V4L2_INT_DEVICE
- select VIDEOBUF_DMA_SG
- ---help---
- This is a v4l2 driver for the TI OMAP2 camera capture interface
-
config VIDEO_OMAP3
tristate "OMAP 3 Camera support"
depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 1348ba1faf92..e5269da91906 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -2,8 +2,6 @@
# Makefile for the video capture/playback device drivers.
#
-omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
-
obj-$(CONFIG_VIDEO_VINO) += indycam.o
obj-$(CONFIG_VIDEO_VINO) += vino.o
@@ -14,7 +12,6 @@ obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
-obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
obj-$(CONFIG_VIDEO_OMAP3) += omap3isp/
obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index eac472b5ae83..b02aba488826 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -347,7 +347,7 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
/* If buffer queue is empty, return error */
if (list_empty(&layer->dma_queue)) {
v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
- return -EINVAL;
+ return -ENOBUFS;
}
/* Get the next frame from the buffer queue */
layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 52ac5e6c8625..735ec47601a9 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -277,7 +277,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
if (list_empty(&common->dma_queue)) {
spin_unlock_irqrestore(&common->irqlock, flags);
vpif_dbg(1, debug, "buffer queue is empty\n");
- return -EIO;
+ return -ENOBUFS;
}
/* Get the next frame from the buffer queue */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index c31bcf129a5d..9d115cdc6bdb 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -239,7 +239,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
if (list_empty(&common->dma_queue)) {
spin_unlock_irqrestore(&common->irqlock, flags);
vpif_err("buffer queue is empty\n");
- return -EIO;
+ return -ENOBUFS;
}
/* Get the next frame from the buffer queue */
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index d2d3b4b61435..01ed1ecdff7e 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -1,7 +1,7 @@
config VIDEO_SAMSUNG_EXYNOS4_IS
bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && PM_RUNTIME
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on (PLAT_S5P || ARCH_EXYNOS)
help
Say Y here to enable camera host interface devices for
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index fb27ff7e1e07..8a712ca91d11 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -549,7 +549,7 @@ static int fimc_capture_release(struct file *file)
vc->streaming = false;
}
- ret = vb2_fop_release(file);
+ ret = _vb2_fop_release(file, NULL);
if (close) {
clear_bit(ST_CAPT_BUSY, &fimc->state);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index f7915695c907..da2fc86cc524 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -998,41 +998,46 @@ static int fimc_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, res->start, fimc_irq_handler,
0, dev_name(dev), fimc);
- if (ret) {
+ if (ret < 0) {
dev_err(dev, "failed to install irq (%d)\n", ret);
- goto err_clk;
+ goto err_sclk;
}
ret = fimc_initialize_capture_subdev(fimc);
- if (ret)
- goto err_clk;
+ if (ret < 0)
+ goto err_sclk;
platform_set_drvdata(pdev, fimc);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- goto err_sd;
+
+ if (!pm_runtime_enabled(dev)) {
+ ret = clk_enable(fimc->clock[CLK_GATE]);
+ if (ret < 0)
+ goto err_sd;
+ }
+
/* Initialize contiguous memory allocator */
fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
if (IS_ERR(fimc->alloc_ctx)) {
ret = PTR_ERR(fimc->alloc_ctx);
- goto err_pm;
+ goto err_gclk;
}
dev_dbg(dev, "FIMC.%d registered successfully\n", fimc->id);
-
- pm_runtime_put(dev);
return 0;
-err_pm:
- pm_runtime_put(dev);
+
+err_gclk:
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock[CLK_GATE]);
err_sd:
fimc_unregister_capture_subdev(fimc);
-err_clk:
+err_sclk:
clk_disable(fimc->clock[CLK_BUS]);
fimc_clk_put(fimc);
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
static int fimc_runtime_resume(struct device *dev)
{
struct fimc_dev *fimc = dev_get_drvdata(dev);
@@ -1065,6 +1070,7 @@ static int fimc_runtime_suspend(struct device *dev)
dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
return ret;
}
+#endif
#ifdef CONFIG_PM_SLEEP
static int fimc_resume(struct device *dev)
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
index 3d376faec777..1790fb4e32ea 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -481,7 +481,6 @@ struct fimc_ctrls {
* @flags: additional flags for image conversion
* @state: flags to keep track of user configuration
* @fimc_dev: the FIMC device this context applies to
- * @m2m_ctx: memory-to-memory device context
* @fh: v4l2 file handle
* @ctrls: v4l2 controls structure
*/
@@ -502,7 +501,6 @@ struct fimc_ctx {
u32 flags;
u32 state;
struct fimc_dev *fimc_dev;
- struct v4l2_m2m_ctx *m2m_ctx;
struct v4l2_fh fh;
struct fimc_ctrls ctrls;
};
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/exynos4-is/fimc-is-regs.c
index f758e2694fa3..2628733c4e10 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.c
@@ -33,47 +33,23 @@ void fimc_is_hw_set_intgr0_gd0(struct fimc_is *is)
mcuctl_write(INTGR0_INTGD(0), is, MCUCTL_REG_INTGR0);
}
-int fimc_is_hw_wait_intsr0_intsd0(struct fimc_is *is)
-{
- unsigned int timeout = 2000;
- u32 cfg, status;
-
- cfg = mcuctl_read(is, MCUCTL_REG_INTSR0);
- status = INTSR0_GET_INTSD(0, cfg);
-
- while (status) {
- cfg = mcuctl_read(is, MCUCTL_REG_INTSR0);
- status = INTSR0_GET_INTSD(0, cfg);
- if (timeout == 0) {
- dev_warn(&is->pdev->dev, "%s timeout\n",
- __func__);
- return -ETIME;
- }
- timeout--;
- udelay(1);
- }
- return 0;
-}
-
int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is)
{
unsigned int timeout = 2000;
u32 cfg, status;
- cfg = mcuctl_read(is, MCUCTL_REG_INTMSR0);
- status = INTMSR0_GET_INTMSD(0, cfg);
-
- while (status) {
+ do {
cfg = mcuctl_read(is, MCUCTL_REG_INTMSR0);
status = INTMSR0_GET_INTMSD(0, cfg);
- if (timeout == 0) {
+
+ if (--timeout == 0) {
dev_warn(&is->pdev->dev, "%s timeout\n",
__func__);
- return -ETIME;
+ return -ETIMEDOUT;
}
- timeout--;
udelay(1);
- }
+ } while (status != 0);
+
return 0;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.h b/drivers/media/platform/exynos4-is/fimc-is-regs.h
index 5fa2fda46742..1d9d4ffc6ad5 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.h
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.h
@@ -145,7 +145,6 @@ void fimc_is_fw_clear_irq2(struct fimc_is *is);
int fimc_is_hw_get_params(struct fimc_is *is, unsigned int num);
void fimc_is_hw_set_intgr0_gd0(struct fimc_is *is);
-int fimc_is_hw_wait_intsr0_intsd0(struct fimc_is *is);
int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is);
void fimc_is_hw_set_sensor_num(struct fimc_is *is);
void fimc_is_hw_stream_on(struct fimc_is *is);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 9770fa98d6a1..9bdfa4599bc3 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -24,13 +24,13 @@
#include <linux/i2c.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/videodev2.h>
-#include <media/v4l2-of.h>
#include <media/videobuf2-dma-contig.h>
#include "media-dev.h"
@@ -167,10 +167,10 @@ static int fimc_is_parse_sensor_config(struct fimc_is_sensor *sensor,
u32 tmp = 0;
int ret;
- np = v4l2_of_get_next_endpoint(np, NULL);
+ np = of_graph_get_next_endpoint(np, NULL);
if (!np)
return -ENXIO;
- np = v4l2_of_get_remote_port(np);
+ np = of_graph_get_remote_port(np);
if (!np)
return -ENXIO;
@@ -781,6 +781,9 @@ static int fimc_is_debugfs_create(struct fimc_is *is)
return is->debugfs_entry == NULL ? -EIO : 0;
}
+static int fimc_is_runtime_resume(struct device *dev);
+static int fimc_is_runtime_suspend(struct device *dev);
+
static int fimc_is_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -835,14 +838,20 @@ static int fimc_is_probe(struct platform_device *pdev)
}
pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev)) {
+ ret = fimc_is_runtime_resume(dev);
+ if (ret < 0)
+ goto err_irq;
+ }
+
ret = pm_runtime_get_sync(dev);
if (ret < 0)
- goto err_irq;
+ goto err_pm;
is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
if (IS_ERR(is->alloc_ctx)) {
ret = PTR_ERR(is->alloc_ctx);
- goto err_irq;
+ goto err_pm;
}
/*
* Register FIMC-IS V4L2 subdevs to this driver. The video nodes
@@ -867,10 +876,13 @@ static int fimc_is_probe(struct platform_device *pdev)
err_dfs:
fimc_is_debugfs_remove(is);
-err_vb:
- vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
err_sd:
fimc_is_unregister_subdevs(is);
+err_vb:
+ vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
+err_pm:
+ if (!pm_runtime_enabled(dev))
+ fimc_is_runtime_suspend(dev);
err_irq:
free_irq(is->irq, is);
err_clk:
@@ -919,10 +931,13 @@ static int fimc_is_suspend(struct device *dev)
static int fimc_is_remove(struct platform_device *pdev)
{
- struct fimc_is *is = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct fimc_is *is = dev_get_drvdata(dev);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ if (!pm_runtime_status_suspended(dev))
+ fimc_is_runtime_suspend(dev);
free_irq(is->irq, is);
fimc_is_unregister_subdevs(is);
vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.c b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
index 72a343e3b5e8..d0dc7ee04452 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite-reg.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
@@ -133,7 +133,7 @@ void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
int i = ARRAY_SIZE(src_pixfmt_map);
u32 cfg;
- while (--i >= 0) {
+ while (--i) {
if (src_pixfmt_map[i][0] == pixelcode)
break;
}
@@ -240,7 +240,7 @@ static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
int i = ARRAY_SIZE(pixcode);
- while (--i >= 0)
+ while (--i)
if (pixcode[i][0] == f->fmt->mbus_code)
break;
cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index e5798f70d149..779ec3cd259d 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -546,7 +546,7 @@ static int fimc_lite_release(struct file *file)
mutex_unlock(&entity->parent->graph_mutex);
}
- vb2_fop_release(file);
+ _vb2_fop_release(file, NULL);
pm_runtime_put(&fimc->pdev->dev);
clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
@@ -1549,42 +1549,46 @@ static int fimc_lite_probe(struct platform_device *pdev)
0, dev_name(dev), fimc);
if (ret) {
dev_err(dev, "Failed to install irq (%d)\n", ret);
- goto err_clk;
+ goto err_clk_put;
}
/* The video node will be created within the subdev's registered() op */
ret = fimc_lite_create_capture_subdev(fimc);
if (ret)
- goto err_clk;
+ goto err_clk_put;
platform_set_drvdata(pdev, fimc);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- goto err_sd;
+
+ if (!pm_runtime_enabled(dev)) {
+ ret = clk_enable(fimc->clock);
+ if (ret < 0)
+ goto err_sd;
+ }
fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
if (IS_ERR(fimc->alloc_ctx)) {
ret = PTR_ERR(fimc->alloc_ctx);
- goto err_pm;
+ goto err_clk_dis;
}
- pm_runtime_put(dev);
-
fimc_lite_set_default_config(fimc);
dev_dbg(dev, "FIMC-LITE.%d registered successfully\n",
fimc->index);
return 0;
-err_pm:
- pm_runtime_put(dev);
+
+err_clk_dis:
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock);
err_sd:
fimc_lite_unregister_capture_subdev(fimc);
-err_clk:
+err_clk_put:
fimc_lite_clk_put(fimc);
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
static int fimc_lite_runtime_resume(struct device *dev)
{
struct fimc_lite *fimc = dev_get_drvdata(dev);
@@ -1600,6 +1604,7 @@ static int fimc_lite_runtime_suspend(struct device *dev)
clk_disable(fimc->clock);
return 0;
}
+#endif
#ifdef CONFIG_PM_SLEEP
static int fimc_lite_resume(struct device *dev)
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index 8d33b68c76ba..9da95bd14820 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -44,17 +44,17 @@ void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
{
struct vb2_buffer *src_vb, *dst_vb;
- if (!ctx || !ctx->m2m_ctx)
+ if (!ctx || !ctx->fh.m2m_ctx)
return;
- src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (src_vb && dst_vb) {
v4l2_m2m_buf_done(src_vb, vb_state);
v4l2_m2m_buf_done(dst_vb, vb_state);
v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
- ctx->m2m_ctx);
+ ctx->fh.m2m_ctx);
}
}
@@ -123,12 +123,12 @@ static void fimc_device_run(void *priv)
fimc_prepare_dma_offset(ctx, df);
}
- src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
ret = fimc_prepare_addr(ctx, src_vb, sf, &sf->paddr);
if (ret)
goto dma_unlock;
- dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
ret = fimc_prepare_addr(ctx, dst_vb, df, &df->paddr);
if (ret)
goto dma_unlock;
@@ -219,31 +219,15 @@ static int fimc_buf_prepare(struct vb2_buffer *vb)
static void fimc_buf_queue(struct vb2_buffer *vb)
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-
- dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
-
- if (ctx->m2m_ctx)
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
-}
-
-static void fimc_lock(struct vb2_queue *vq)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_lock(&ctx->fimc_dev->lock);
-}
-
-static void fimc_unlock(struct vb2_queue *vq)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_unlock(&ctx->fimc_dev->lock);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
static struct vb2_ops fimc_qops = {
.queue_setup = fimc_queue_setup,
.buf_prepare = fimc_buf_prepare,
.buf_queue = fimc_buf_queue,
- .wait_prepare = fimc_unlock,
- .wait_finish = fimc_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.stop_streaming = stop_streaming,
.start_streaming = start_streaming,
};
@@ -385,7 +369,7 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
if (ret)
return ret;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (vb2_is_busy(vq)) {
v4l2_err(&fimc->m2m.vfd, "queue (%d) busy\n", f->type);
@@ -410,56 +394,6 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
return 0;
}
-static int fimc_m2m_reqbufs(struct file *file, void *fh,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int fimc_m2m_querybuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_qbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_dqbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_expbuf(struct file *file, void *fh,
- struct v4l2_exportbuffer *eb)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
-}
-
-
-static int fimc_m2m_streamon(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int fimc_m2m_streamoff(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
static int fimc_m2m_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *cr)
{
@@ -598,13 +532,13 @@ static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_try_fmt_vid_out_mplane = fimc_m2m_try_fmt_mplane,
.vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
.vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
- .vidioc_reqbufs = fimc_m2m_reqbufs,
- .vidioc_querybuf = fimc_m2m_querybuf,
- .vidioc_qbuf = fimc_m2m_qbuf,
- .vidioc_dqbuf = fimc_m2m_dqbuf,
- .vidioc_expbuf = fimc_m2m_expbuf,
- .vidioc_streamon = fimc_m2m_streamon,
- .vidioc_streamoff = fimc_m2m_streamoff,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_crop = fimc_m2m_g_crop,
.vidioc_s_crop = fimc_m2m_s_crop,
.vidioc_cropcap = fimc_m2m_cropcap
@@ -624,6 +558,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->fimc_dev->lock;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -636,6 +571,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->fimc_dev->lock;
return vb2_queue_init(dst_vq);
}
@@ -708,9 +644,9 @@ static int fimc_m2m_open(struct file *file)
ctx->out_path = FIMC_IO_DMA;
ctx->scaler.enabled = 1;
- ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- ret = PTR_ERR(ctx->m2m_ctx);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
goto error_c;
}
@@ -725,7 +661,7 @@ static int fimc_m2m_open(struct file *file)
return 0;
error_m2m_ctx:
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
error_c:
fimc_ctrls_delete(ctx);
error_fh:
@@ -747,7 +683,7 @@ static int fimc_m2m_release(struct file *file)
mutex_lock(&fimc->lock);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
fimc_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
@@ -760,45 +696,13 @@ static int fimc_m2m_release(struct file *file)
return 0;
}
-static unsigned int fimc_m2m_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
- struct fimc_dev *fimc = ctx->fimc_dev;
- int ret;
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
- ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
- mutex_unlock(&fimc->lock);
-
- return ret;
-}
-
-
-static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
- struct fimc_dev *fimc = ctx->fimc_dev;
- int ret;
-
- if (mutex_lock_interruptible(&fimc->lock))
- return -ERESTARTSYS;
-
- ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
- mutex_unlock(&fimc->lock);
-
- return ret;
-}
-
static const struct v4l2_file_operations fimc_m2m_fops = {
.owner = THIS_MODULE,
.open = fimc_m2m_open,
.release = fimc_m2m_release,
- .poll = fimc_m2m_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = fimc_m2m_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
static struct v4l2_m2m_ops m2m_ops = {
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index c1bce170df6f..04d6ecdd314c 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -468,12 +469,12 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
return 0;
v4l2_of_parse_endpoint(ep, &endpoint);
- if (WARN_ON(endpoint.port == 0) || index >= FIMC_MAX_SENSORS)
+ if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS)
return -EINVAL;
- pd->mux_id = (endpoint.port - 1) & 0x1;
+ pd->mux_id = (endpoint.base.port - 1) & 0x1;
- rem = v4l2_of_get_remote_port_parent(ep);
+ rem = of_graph_get_remote_port_parent(ep);
of_node_put(ep);
if (rem == NULL) {
v4l2_info(&fmd->v4l2_dev, "Remote device at %s not found\n",
@@ -493,13 +494,13 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
return -EINVAL;
}
- if (fimc_input_is_parallel(endpoint.port)) {
+ if (fimc_input_is_parallel(endpoint.base.port)) {
if (endpoint.bus_type == V4L2_MBUS_PARALLEL)
pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_601;
else
pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_656;
pd->flags = endpoint.bus.parallel.flags;
- } else if (fimc_input_is_mipi_csi(endpoint.port)) {
+ } else if (fimc_input_is_mipi_csi(endpoint.base.port)) {
/*
* MIPI CSI-2: only input mux selection and
* the sensor's clock frequency is needed.
@@ -507,7 +508,7 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
pd->sensor_bus_type = FIMC_BUS_TYPE_MIPI_CSI2;
} else {
v4l2_err(&fmd->v4l2_dev, "Wrong port id (%u) at node %s\n",
- endpoint.port, rem->full_name);
+ endpoint.base.port, rem->full_name);
}
/*
* For FIMC-IS handled sensors, that are placed under i2c-isp device
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 9fc2af6a0446..3678ba59725c 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -20,6 +20,7 @@
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/platform_data/mipi-csis.h>
#include <linux/platform_device.h>
@@ -91,7 +92,7 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
#define S5PCSIS_INTSRC_ODD_BEFORE (1 << 29)
#define S5PCSIS_INTSRC_ODD_AFTER (1 << 28)
#define S5PCSIS_INTSRC_ODD (0x3 << 28)
-#define S5PCSIS_INTSRC_NON_IMAGE_DATA (0xff << 28)
+#define S5PCSIS_INTSRC_NON_IMAGE_DATA (0xf << 28)
#define S5PCSIS_INTSRC_FRAME_START (1 << 27)
#define S5PCSIS_INTSRC_FRAME_END (1 << 26)
#define S5PCSIS_INTSRC_ERR_SOT_HS (0xf << 12)
@@ -762,7 +763,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
&state->max_num_lanes))
return -EINVAL;
- node = v4l2_of_get_next_endpoint(node, NULL);
+ node = of_graph_get_next_endpoint(node, NULL);
if (!node) {
dev_err(&pdev->dev, "No port node at %s\n",
pdev->dev.of_node->full_name);
@@ -771,7 +772,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
/* Get port node and validate MIPI-CSI channel id. */
v4l2_of_parse_endpoint(node, &endpoint);
- state->index = endpoint.port - FIMC_INPUT_MIPI_CSI2_0;
+ state->index = endpoint.base.port - FIMC_INPUT_MIPI_CSI2_0;
if (state->index < 0 || state->index >= CSIS_MAX_ENTITIES)
return -ENXIO;
@@ -790,6 +791,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
#define s5pcsis_parse_dt(pdev, state) (-ENOSYS)
#endif
+static int s5pcsis_pm_resume(struct device *dev, bool runtime);
static const struct of_device_id s5pcsis_of_match[];
static int s5pcsis_probe(struct platform_device *pdev)
@@ -902,13 +904,21 @@ static int s5pcsis_probe(struct platform_device *pdev)
/* .. and a pointer to the subdev. */
platform_set_drvdata(pdev, &state->sd);
memcpy(state->events, s5pcsis_events, sizeof(state->events));
+
pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev)) {
+ ret = s5pcsis_pm_resume(dev, true);
+ if (ret < 0)
+ goto e_m_ent;
+ }
dev_info(&pdev->dev, "lanes: %d, hs_settle: %d, wclk: %d, freq: %u\n",
state->num_lanes, state->hs_settle, state->wclk_ext,
state->clk_frequency);
return 0;
+e_m_ent:
+ media_entity_cleanup(&state->sd.entity);
e_clkdis:
clk_disable(state->clock[CSIS_CLK_MUX]);
e_clkput:
@@ -1014,7 +1024,7 @@ static int s5pcsis_remove(struct platform_device *pdev)
struct csis_state *state = sd_to_csis_state(sd);
pm_runtime_disable(&pdev->dev);
- s5pcsis_pm_suspend(&pdev->dev, false);
+ s5pcsis_pm_suspend(&pdev->dev, true);
clk_disable(state->clock[CSIS_CLK_MUX]);
pm_runtime_set_suspended(&pdev->dev);
s5pcsis_clk_put(state);
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 6a232239ee8c..dbf0ce38a8e7 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1580,7 +1580,7 @@ static int viu_of_probe(struct platform_device *op)
}
/* enable VIU clock */
- clk = devm_clk_get(&op->dev, "viu_clk");
+ clk = devm_clk_get(&op->dev, "ipg");
if (IS_ERR(clk)) {
dev_err(&op->dev, "failed to lookup the clock!\n");
ret = PTR_ERR(clk);
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 65cab70fefcb..6bb86b581a34 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -918,7 +918,7 @@ static int deinterlace_open(struct file *file)
return ret;
}
- ctx->xt = kzalloc(sizeof(struct dma_async_tx_descriptor) +
+ ctx->xt = kzalloc(sizeof(struct dma_interleaved_template) +
sizeof(struct data_chunk), GFP_KERNEL);
if (!ctx->xt) {
kfree(ctx);
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
index 8df5975b700a..08e24379b794 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -177,8 +177,6 @@ struct m2mtest_ctx {
enum v4l2_colorspace colorspace;
- struct v4l2_m2m_ctx *m2m_ctx;
-
/* Source and destination queue data */
struct m2mtest_q_data q_data[2];
};
@@ -342,8 +340,8 @@ static int job_ready(void *priv)
{
struct m2mtest_ctx *ctx = priv;
- if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < ctx->translen
- || v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < ctx->translen) {
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen
+ || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) {
dprintk(ctx->dev, "Not enough buffers available\n");
return 0;
}
@@ -359,21 +357,6 @@ static void job_abort(void *priv)
ctx->aborting = 1;
}
-static void m2mtest_lock(void *priv)
-{
- struct m2mtest_ctx *ctx = priv;
- struct m2mtest_dev *dev = ctx->dev;
- mutex_lock(&dev->dev_mutex);
-}
-
-static void m2mtest_unlock(void *priv)
-{
- struct m2mtest_ctx *ctx = priv;
- struct m2mtest_dev *dev = ctx->dev;
- mutex_unlock(&dev->dev_mutex);
-}
-
-
/* device_run() - prepares and starts the device
*
* This simulates all the immediate preparations required before starting
@@ -386,8 +369,8 @@ static void device_run(void *priv)
struct m2mtest_dev *dev = ctx->dev;
struct vb2_buffer *src_buf, *dst_buf;
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
device_process(ctx, src_buf, dst_buf);
@@ -409,8 +392,8 @@ static void device_isr(unsigned long priv)
return;
}
- src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
- dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
curr_ctx->num_processed++;
@@ -423,7 +406,7 @@ static void device_isr(unsigned long priv)
|| curr_ctx->aborting) {
dprintk(curr_ctx->dev, "Finishing transaction\n");
curr_ctx->num_processed = 0;
- v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->m2m_ctx);
+ v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->fh.m2m_ctx);
} else {
device_run(curr_ctx);
}
@@ -491,7 +474,7 @@ static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
struct vb2_queue *vq;
struct m2mtest_q_data *q_data;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -594,7 +577,7 @@ static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
struct m2mtest_q_data *q_data;
struct vb2_queue *vq;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -648,52 +631,6 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return ret;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
static int m2mtest_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct m2mtest_ctx *ctx =
@@ -748,14 +685,14 @@ static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
.vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -818,27 +755,15 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
static void m2mtest_buf_queue(struct vb2_buffer *vb)
{
struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
-}
-
-static void m2mtest_wait_prepare(struct vb2_queue *q)
-{
- struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
- m2mtest_unlock(ctx);
-}
-
-static void m2mtest_wait_finish(struct vb2_queue *q)
-{
- struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
- m2mtest_lock(ctx);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
static struct vb2_ops m2mtest_qops = {
.queue_setup = m2mtest_queue_setup,
.buf_prepare = m2mtest_buf_prepare,
.buf_queue = m2mtest_buf_queue,
- .wait_prepare = m2mtest_wait_prepare,
- .wait_finish = m2mtest_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
@@ -853,6 +778,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
src_vq->ops = &m2mtest_qops;
src_vq->mem_ops = &vb2_vmalloc_memops;
src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -865,6 +791,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
dst_vq->ops = &m2mtest_qops;
dst_vq->mem_ops = &vb2_vmalloc_memops;
dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
return vb2_queue_init(dst_vq);
}
@@ -936,10 +863,10 @@ static int m2mtest_open(struct file *file)
ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
ctx->colorspace = V4L2_COLORSPACE_REC709;
- ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- rc = PTR_ERR(ctx->m2m_ctx);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ rc = PTR_ERR(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(hdl);
kfree(ctx);
@@ -949,7 +876,8 @@ static int m2mtest_open(struct file *file)
v4l2_fh_add(&ctx->fh);
atomic_inc(&dev->num_inst);
- dprintk(dev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+ dprintk(dev, "Created instance: %p, m2m_ctx: %p\n",
+ ctx, ctx->fh.m2m_ctx);
open_unlock:
mutex_unlock(&dev->dev_mutex);
@@ -967,7 +895,7 @@ static int m2mtest_release(struct file *file)
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
mutex_lock(&dev->dev_mutex);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(&dev->dev_mutex);
kfree(ctx);
@@ -976,34 +904,13 @@ static int m2mtest_release(struct file *file)
return 0;
}
-static unsigned int m2mtest_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct m2mtest_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
-}
-
-static int m2mtest_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct m2mtest_dev *dev = video_drvdata(file);
- struct m2mtest_ctx *ctx = file2ctx(file);
- int res;
-
- if (mutex_lock_interruptible(&dev->dev_mutex))
- return -ERESTARTSYS;
- res = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
- mutex_unlock(&dev->dev_mutex);
- return res;
-}
-
static const struct v4l2_file_operations m2mtest_fops = {
.owner = THIS_MODULE,
.open = m2mtest_open,
.release = m2mtest_release,
- .poll = m2mtest_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = m2mtest_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
static struct video_device m2mtest_videodev = {
@@ -1019,8 +926,6 @@ static struct v4l2_m2m_ops m2m_ops = {
.device_run = device_run,
.job_ready = job_ready,
.job_abort = job_abort,
- .lock = m2mtest_lock,
- .unlock = m2mtest_unlock,
};
static int m2mtest_probe(struct platform_device *pdev)
@@ -1133,4 +1038,3 @@ static int __init m2mtest_init(void)
module_init(m2mtest_init);
module_exit(m2mtest_exit);
-
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 561bce8ffb1b..5807185262fe 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -290,9 +290,11 @@ static int isp_xclk_init(struct isp_device *isp)
struct clk_init_data init;
unsigned int i;
+ for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
+ isp->xclks[i].clk = ERR_PTR(-EINVAL);
+
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
struct isp_xclk *xclk = &isp->xclks[i];
- struct clk *clk;
xclk->isp = isp;
xclk->id = i == 0 ? ISP_XCLK_A : ISP_XCLK_B;
@@ -305,10 +307,15 @@ static int isp_xclk_init(struct isp_device *isp)
init.num_parents = 1;
xclk->hw.init = &init;
-
- clk = devm_clk_register(isp->dev, &xclk->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ /*
+ * The first argument is NULL in order to avoid circular
+ * reference, as this driver takes reference on the
+ * sensor subdevice modules and the sensors would take
+ * reference on this module through clk_get().
+ */
+ xclk->clk = clk_register(NULL, &xclk->hw);
+ if (IS_ERR(xclk->clk))
+ return PTR_ERR(xclk->clk);
if (pdata->xclks[i].con_id == NULL &&
pdata->xclks[i].dev_id == NULL)
@@ -320,7 +327,7 @@ static int isp_xclk_init(struct isp_device *isp)
xclk->lookup->con_id = pdata->xclks[i].con_id;
xclk->lookup->dev_id = pdata->xclks[i].dev_id;
- xclk->lookup->clk = clk;
+ xclk->lookup->clk = xclk->clk;
clkdev_add(xclk->lookup);
}
@@ -335,6 +342,9 @@ static void isp_xclk_cleanup(struct isp_device *isp)
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
struct isp_xclk *xclk = &isp->xclks[i];
+ if (!IS_ERR(xclk->clk))
+ clk_unregister(xclk->clk);
+
if (xclk->lookup)
clkdev_drop(xclk->lookup);
}
@@ -863,15 +873,12 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
unsigned long flags;
int ret;
- /* If the preview engine crashed it might not respond to read/write
- * operations on the L4 bus. This would result in a bus fault and a
- * kernel oops. Refuse to start streaming in that case. This check must
- * be performed before the loop below to avoid starting entities if the
- * pipeline won't start anyway (those entities would then likely fail to
- * stop, making the problem worse).
+ /* Refuse to start streaming if an entity included in the pipeline has
+ * crashed. This check must be performed before the loop below to avoid
+ * starting entities if the pipeline won't start anyway (those entities
+ * would then likely fail to stop, making the problem worse).
*/
- if ((pipe->entities & isp->crashed) &
- (1U << isp->isp_prev.subdev.entity.id))
+ if (pipe->entities & isp->crashed)
return -EIO;
spin_lock_irqsave(&pipe->lock, flags);
@@ -1004,13 +1011,23 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
else
ret = 0;
+ /* Handle stop failures. An entity that fails to stop can
+ * usually just be restarted. Flag the stop failure nonetheless
+ * to trigger an ISP reset the next time the device is released,
+ * just in case.
+ *
+ * The preview engine is a special case. A failure to stop can
+ * mean a hardware crash. When that happens the preview engine
+ * won't respond to read/write operations on the L4 bus anymore,
+ * resulting in a bus fault and a kernel oops next time it gets
+ * accessed. Mark it as crashed to prevent pipelines including
+ * it from being started.
+ */
if (ret) {
dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
- /* If the entity failed to stopped, assume it has
- * crashed. Mark it as such, the ISP will be reset when
- * applications will release it.
- */
- isp->crashed |= 1U << subdev->entity.id;
+ isp->stop_failure = true;
+ if (subdev == &isp->isp_prev.subdev)
+ isp->crashed |= 1U << subdev->entity.id;
failure = -ETIMEDOUT;
}
}
@@ -1047,6 +1064,23 @@ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
}
/*
+ * omap3isp_pipeline_cancel_stream - Cancel stream on a pipeline
+ * @pipe: ISP pipeline
+ *
+ * Cancelling a stream mark all buffers on all video nodes in the pipeline as
+ * erroneous and makes sure no new buffer can be queued. This function is called
+ * when a fatal error that prevents any further operation on the pipeline
+ * occurs.
+ */
+void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe)
+{
+ if (pipe->input)
+ omap3isp_video_cancel_stream(pipe->input);
+ if (pipe->output)
+ omap3isp_video_cancel_stream(pipe->output);
+}
+
+/*
* isp_pipeline_resume - Resume streaming on a pipeline
* @pipe: ISP pipeline
*
@@ -1198,6 +1232,7 @@ static int isp_reset(struct isp_device *isp)
udelay(1);
}
+ isp->stop_failure = false;
isp->crashed = 0;
return 0;
}
@@ -1609,7 +1644,7 @@ void omap3isp_put(struct isp_device *isp)
/* Reset the ISP if an entity has failed to stop. This is the
* only way to recover from such conditions.
*/
- if (isp->crashed)
+ if (isp->crashed || isp->stop_failure)
isp_reset(isp);
isp_disable_clocks(isp);
}
@@ -2120,28 +2155,13 @@ static int isp_map_mem_resource(struct platform_device *pdev,
/* request the mem region for the camera registers */
mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
- if (!mem) {
- dev_err(isp->dev, "no mem resource?\n");
- return -ENODEV;
- }
-
- if (!devm_request_mem_region(isp->dev, mem->start, resource_size(mem),
- pdev->name)) {
- dev_err(isp->dev,
- "cannot reserve camera register I/O region\n");
- return -ENODEV;
- }
- isp->mmio_base_phys[res] = mem->start;
- isp->mmio_size[res] = resource_size(mem);
/* map the region */
- isp->mmio_base[res] = devm_ioremap_nocache(isp->dev,
- isp->mmio_base_phys[res],
- isp->mmio_size[res]);
- if (!isp->mmio_base[res]) {
- dev_err(isp->dev, "cannot map camera register I/O region\n");
- return -ENODEV;
- }
+ isp->mmio_base[res] = devm_ioremap_resource(isp->dev, mem);
+ if (IS_ERR(isp->mmio_base[res]))
+ return PTR_ERR(isp->mmio_base[res]);
+
+ isp->mmio_base_phys[res] = mem->start;
return 0;
}
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index ce65d3ae1aa7..081f5ec5a663 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -135,6 +135,7 @@ struct isp_xclk {
struct isp_device *isp;
struct clk_hw hw;
struct clk_lookup *lookup;
+ struct clk *clk;
enum isp_xclk_id id;
spinlock_t lock; /* Protects enabled and divider */
@@ -151,9 +152,9 @@ struct isp_xclk {
* regions.
* @mmio_base_phys: Array with physical L4 bus addresses for ISP register
* regions.
- * @mmio_size: Array with ISP register regions size in bytes.
* @stat_lock: Spinlock for handling statistics
* @isp_mutex: Mutex for serializing requests to ISP.
+ * @stop_failure: Indicates that an entity failed to stop.
* @crashed: Bitmask of crashed entities (indexed by entity ID)
* @has_context: Context has been saved at least once and can be restored.
* @ref_count: Reference count for handling multiple ISP requests.
@@ -187,11 +188,11 @@ struct isp_device {
void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
- resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
/* ISP Obj */
spinlock_t stat_lock; /* common lock for statistic drivers */
struct mutex isp_mutex; /* For handling ref_count field */
+ bool stop_failure;
u32 crashed;
int has_context;
int ref_count;
@@ -237,6 +238,7 @@ int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait,
int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
enum isp_pipeline_stream_state state);
+void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe);
void omap3isp_configure_bridge(struct isp_device *isp,
enum ccdc_input_entity input,
const struct isp_parallel_platform_data *pdata,
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 907a205da5a5..5db2c88b9ad8 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -1516,6 +1516,8 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
if (ccdc_sbl_wait_idle(ccdc, 1000)) {
dev_info(isp->dev, "CCDC won't become idle!\n");
+ isp->crashed |= 1U << ccdc->subdev.entity.id;
+ omap3isp_pipeline_cancel_stream(pipe);
goto done;
}
@@ -2484,7 +2486,8 @@ static int ccdc_init_entities(struct isp_ccdc_device *ccdc)
v4l2_set_subdevdata(sd, ccdc);
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
- pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
pads[CCDC_PAD_SOURCE_OF].flags = MEDIA_PAD_FL_SOURCE;
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index e71651429dda..e84fe0543e47 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -1076,7 +1076,8 @@ static int ccp2_init_entities(struct isp_ccp2_device *ccp2)
v4l2_set_subdevdata(sd, ccp2);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- pads[CCP2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[CCP2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
pads[CCP2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
me->ops = &ccp2_media_ops;
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 6db245d84bbb..620560828a48 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -1245,7 +1245,8 @@ static int csi2_init_entities(struct isp_csi2_device *csi2)
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
me->ops = &csi2_media_ops;
ret = media_entity_init(me, CSI2_PADS_NUM, pads, 0);
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index cd8831aebdeb..1c776c1186f1 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -2283,7 +2283,8 @@ static int preview_init_entities(struct isp_prev_device *prev)
v4l2_ctrl_handler_setup(&prev->ctrls);
sd->ctrl_handler = &prev->ctrls;
- pads[PREV_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[PREV_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
pads[PREV_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
me->ops = &preview_media_ops;
diff --git a/drivers/media/platform/omap3isp/ispqueue.c b/drivers/media/platform/omap3isp/ispqueue.c
index e15f01342058..5f0f8fab1d17 100644
--- a/drivers/media/platform/omap3isp/ispqueue.c
+++ b/drivers/media/platform/omap3isp/ispqueue.c
@@ -553,8 +553,10 @@ static void isp_video_buffer_query(struct isp_video_buffer *buf,
switch (buf->state) {
case ISP_BUF_STATE_ERROR:
vbuf->flags |= V4L2_BUF_FLAG_ERROR;
+ /* Fallthrough */
case ISP_BUF_STATE_DONE:
vbuf->flags |= V4L2_BUF_FLAG_DONE;
+ break;
case ISP_BUF_STATE_QUEUED:
case ISP_BUF_STATE_ACTIVE:
vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index d11fb261d530..0d36b8bc9f98 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -1532,6 +1532,20 @@ static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return 0;
}
+static int resizer_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct isp_pipeline *pipe = to_isp_pipeline(&sd->entity);
+
+ omap3isp_resizer_max_rate(res, &pipe->max_rate);
+
+ return v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+}
+
/*
* resizer_init_formats - Initialize formats on all pads
* @sd: ISP resizer V4L2 subdevice
@@ -1570,6 +1584,7 @@ static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
.set_fmt = resizer_set_format,
.get_selection = resizer_get_selection,
.set_selection = resizer_set_selection,
+ .link_validate = resizer_link_validate,
};
/* subdev operations */
@@ -1701,7 +1716,8 @@ static int resizer_init_entities(struct isp_res_device *res)
v4l2_set_subdevdata(sd, res);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- pads[RESZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[RESZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
pads[RESZ_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
me->ops = &resizer_media_ops;
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 61e17f9bd8b9..a75407c3a726 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -1067,7 +1067,7 @@ static int isp_stat_init_entities(struct ispstat *stat, const char *name,
subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
v4l2_set_subdevdata(subdev, stat);
- stat->pad.flags = MEDIA_PAD_FL_SINK;
+ stat->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
me->ops = NULL;
return media_entity_init(me, 1, &stat->pad, 0);
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index f6304bb074f5..856fdf554035 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -278,55 +278,6 @@ static int isp_video_get_graph_data(struct isp_video *video,
return 0;
}
-/*
- * Validate a pipeline by checking both ends of all links for format
- * discrepancies.
- *
- * Compute the minimum time per frame value as the maximum of time per frame
- * limits reported by every block in the pipeline.
- *
- * Return 0 if all formats match, or -EPIPE if at least one link is found with
- * different formats on its two ends or if the pipeline doesn't start with a
- * video source (either a subdev with no input pad, or a non-subdev entity).
- */
-static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
-{
- struct isp_device *isp = pipe->output->isp;
- struct media_pad *pad;
- struct v4l2_subdev *subdev;
-
- subdev = isp_video_remote_subdev(pipe->output, NULL);
- if (subdev == NULL)
- return -EPIPE;
-
- while (1) {
- /* Retrieve the sink format */
- pad = &subdev->entity.pads[0];
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- break;
-
- /* Update the maximum frame rate */
- if (subdev == &isp->isp_res.subdev)
- omap3isp_resizer_max_rate(&isp->isp_res,
- &pipe->max_rate);
-
- /* Retrieve the source format. Return an error if no source
- * entity can be found, and stop checking the pipeline if the
- * source entity isn't a subdev.
- */
- pad = media_entity_remote_pad(pad);
- if (pad == NULL)
- return -EPIPE;
-
- if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
- break;
-
- subdev = media_entity_to_v4l2_subdev(pad->entity);
- }
-
- return 0;
-}
-
static int
__isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
{
@@ -460,6 +411,15 @@ static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
struct isp_video *video = vfh->video;
unsigned long addr;
+ /* Refuse to prepare the buffer is the video node has registered an
+ * error. We don't need to take any lock here as the operation is
+ * inherently racy. The authoritative check will be performed in the
+ * queue handler, which can't return an error, this check is just a best
+ * effort to notify userspace as early as possible.
+ */
+ if (unlikely(video->error))
+ return -EIO;
+
addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
if (IS_ERR_VALUE(addr))
return -EIO;
@@ -496,6 +456,12 @@ static void isp_video_buffer_queue(struct isp_video_buffer *buf)
unsigned int empty;
unsigned int start;
+ if (unlikely(video->error)) {
+ buf->state = ISP_BUF_STATE_ERROR;
+ wake_up(&buf->wait);
+ return;
+ }
+
empty = list_empty(&video->dmaqueue);
list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
@@ -618,6 +584,36 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
}
/*
+ * omap3isp_video_cancel_stream - Cancel stream on a video node
+ * @video: ISP video object
+ *
+ * Cancelling a stream mark all buffers on the video node as erroneous and makes
+ * sure no new buffer can be queued.
+ */
+void omap3isp_video_cancel_stream(struct isp_video *video)
+{
+ struct isp_video_queue *queue = video->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->irqlock, flags);
+
+ while (!list_empty(&video->dmaqueue)) {
+ struct isp_video_buffer *buf;
+
+ buf = list_first_entry(&video->dmaqueue,
+ struct isp_video_buffer, irqlist);
+ list_del(&buf->irqlist);
+
+ buf->state = ISP_BUF_STATE_ERROR;
+ wake_up(&buf->wait);
+ }
+
+ video->error = true;
+
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+}
+
+/*
* omap3isp_video_resume - Perform resume operation on the buffers
* @video: ISP video object
* @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
@@ -1051,11 +1047,6 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
if (ret < 0)
goto err_check_format;
- /* Validate the pipeline and update its state. */
- ret = isp_video_validate_pipeline(pipe);
- if (ret < 0)
- goto err_check_format;
-
pipe->error = false;
spin_lock_irqsave(&pipe->lock, flags);
@@ -1159,6 +1150,7 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
omap3isp_video_queue_streamoff(&vfh->queue);
video->queue = NULL;
video->streaming = 0;
+ video->error = false;
if (video->isp->pdata->set_constraints)
video->isp->pdata->set_constraints(video->isp, false);
@@ -1332,11 +1324,13 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
switch (video->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
direction = "output";
- video->pad.flags = MEDIA_PAD_FL_SINK;
+ video->pad.flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
direction = "input";
- video->pad.flags = MEDIA_PAD_FL_SOURCE;
+ video->pad.flags = MEDIA_PAD_FL_SOURCE
+ | MEDIA_PAD_FL_MUST_CONNECT;
video->video.vfl_dir = VFL_DIR_TX;
break;
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 1ad470ec2b9d..4e194076cc60 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -178,6 +178,7 @@ struct isp_video {
/* Pipeline state */
struct isp_pipeline pipe;
struct mutex stream_lock; /* pipeline and stream states */
+ bool error;
/* Video buffers queue */
struct isp_video_queue *queue;
@@ -207,6 +208,7 @@ int omap3isp_video_register(struct isp_video *video,
struct v4l2_device *vdev);
void omap3isp_video_unregister(struct isp_video *video);
struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video);
+void omap3isp_video_cancel_stream(struct isp_video *video);
void omap3isp_video_resume(struct isp_video *video, int continuous);
struct media_pad *omap3isp_video_remote_pad(struct isp_video *video);
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 0b2948376aee..0fcf7d75e841 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -136,10 +136,9 @@ static int g2d_buf_prepare(struct vb2_buffer *vb)
static void g2d_buf_queue(struct vb2_buffer *vb)
{
struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
-
static struct vb2_ops g2d_qops = {
.queue_setup = g2d_queue_setup,
.buf_prepare = g2d_buf_prepare,
@@ -159,6 +158,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->mutex;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -171,6 +171,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->mutex;
return vb2_queue_init(dst_vq);
}
@@ -253,9 +254,9 @@ static int g2d_open(struct file *file)
kfree(ctx);
return -ERESTARTSYS;
}
- ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- ret = PTR_ERR(ctx->m2m_ctx);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
mutex_unlock(&dev->mutex);
kfree(ctx);
return ret;
@@ -324,7 +325,7 @@ static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
struct vb2_queue *vq;
struct g2d_frame *frm;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
frm = get_frame(ctx, f->type);
@@ -384,7 +385,7 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
ret = vidioc_try_fmt(file, prv, f);
if (ret)
return ret;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (vb2_is_busy(vq)) {
v4l2_err(&dev->v4l2_dev, "queue (%d) bust\n", f->type);
return -EBUSY;
@@ -410,72 +411,6 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static unsigned int g2d_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct g2d_ctx *ctx = fh2ctx(file->private_data);
- struct g2d_dev *dev = ctx->dev;
- unsigned int res;
-
- mutex_lock(&dev->mutex);
- res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
- mutex_unlock(&dev->mutex);
- return res;
-}
-
-static int g2d_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct g2d_ctx *ctx = fh2ctx(file->private_data);
- struct g2d_dev *dev = ctx->dev;
- int ret;
-
- if (mutex_lock_interruptible(&dev->mutex))
- return -ERESTARTSYS;
- ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
- mutex_unlock(&dev->mutex);
- return ret;
-}
-
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct g2d_ctx *ctx = priv;
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
static int vidioc_cropcap(struct file *file, void *priv,
struct v4l2_cropcap *cr)
{
@@ -551,20 +486,6 @@ static int vidioc_s_crop(struct file *file, void *prv, const struct v4l2_crop *c
return 0;
}
-static void g2d_lock(void *prv)
-{
- struct g2d_ctx *ctx = prv;
- struct g2d_dev *dev = ctx->dev;
- mutex_lock(&dev->mutex);
-}
-
-static void g2d_unlock(void *prv)
-{
- struct g2d_ctx *ctx = prv;
- struct g2d_dev *dev = ctx->dev;
- mutex_unlock(&dev->mutex);
-}
-
static void job_abort(void *prv)
{
struct g2d_ctx *ctx = prv;
@@ -589,8 +510,8 @@ static void device_run(void *prv)
dev->curr = ctx;
- src = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- dst = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
clk_enable(dev->gate);
g2d_reset(dev);
@@ -631,8 +552,8 @@ static irqreturn_t g2d_isr(int irq, void *prv)
BUG_ON(ctx == NULL);
- src = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
BUG_ON(src == NULL);
BUG_ON(dst == NULL);
@@ -642,7 +563,7 @@ static irqreturn_t g2d_isr(int irq, void *prv)
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
- v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
+ v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
dev->curr = NULL;
wake_up(&dev->irq_queue);
@@ -653,9 +574,9 @@ static const struct v4l2_file_operations g2d_fops = {
.owner = THIS_MODULE,
.open = g2d_open,
.release = g2d_release,
- .poll = g2d_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = g2d_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
static const struct v4l2_ioctl_ops g2d_ioctl_ops = {
@@ -671,14 +592,13 @@ static const struct v4l2_ioctl_ops g2d_ioctl_ops = {
.vidioc_try_fmt_vid_out = vidioc_try_fmt,
.vidioc_s_fmt_vid_out = vidioc_s_fmt,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
-
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_crop = vidioc_g_crop,
.vidioc_s_crop = vidioc_s_crop,
@@ -697,8 +617,6 @@ static struct video_device g2d_videodev = {
static struct v4l2_m2m_ops g2d_m2m_ops = {
.device_run = device_run,
.job_abort = job_abort,
- .lock = g2d_lock,
- .unlock = g2d_unlock,
};
static const struct of_device_id exynos_g2d_match[];
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
index 300ca05ba404..b0e52ab7ecdb 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -57,7 +57,6 @@ struct g2d_frame {
struct g2d_ctx {
struct v4l2_fh fh;
struct g2d_dev *dev;
- struct v4l2_m2m_ctx *m2m_ctx;
struct g2d_frame in;
struct g2d_frame out;
struct v4l2_ctrl *ctrl_hflip;
diff --git a/drivers/media/platform/s5p-jpeg/Makefile b/drivers/media/platform/s5p-jpeg/Makefile
index d18cb5edd2d5..a1a9169254c3 100644
--- a/drivers/media/platform/s5p-jpeg/Makefile
+++ b/drivers/media/platform/s5p-jpeg/Makefile
@@ -1,2 +1,2 @@
-s5p-jpeg-objs := jpeg-core.o
+s5p-jpeg-objs := jpeg-core.o jpeg-hw-exynos4.o jpeg-hw-s5p.o
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 9b88a4601007..7d68d0b9966a 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1,9 +1,10 @@
/* linux/drivers/media/platform/s5p-jpeg/jpeg-core.c
*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2011-2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -17,6 +18,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -28,70 +30,234 @@
#include <media/videobuf2-dma-contig.h>
#include "jpeg-core.h"
-#include "jpeg-hw.h"
+#include "jpeg-hw-s5p.h"
+#include "jpeg-hw-exynos4.h"
+#include "jpeg-regs.h"
-static struct s5p_jpeg_fmt formats_enc[] = {
+static struct s5p_jpeg_fmt sjpeg_formats[] = {
{
.name = "JPEG JFIF",
.fourcc = V4L2_PIX_FMT_JPEG,
+ .flags = SJPEG_FMT_FLAG_ENC_CAPTURE |
+ SJPEG_FMT_FLAG_DEC_OUTPUT |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_FLAG_EXYNOS4,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
.colplanes = 1,
- .types = MEM2MEM_CAPTURE,
+ .h_align = 4,
+ .v_align = 3,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.colplanes = 1,
- .types = MEM2MEM_OUTPUT,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.name = "RGB565",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.colplanes = 1,
- .types = MEM2MEM_OUTPUT,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
-};
-#define NUM_FORMATS_ENC ARRAY_SIZE(formats_enc)
-
-static struct s5p_jpeg_fmt formats_dec[] = {
{
- .name = "YUV 4:2:0 planar, YCbCr",
- .fourcc = V4L2_PIX_FMT_YUV420,
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "ARGB8888, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .colplanes = 1,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "YUV 4:4:4 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV24,
+ .depth = 24,
+ .colplanes = 2,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "YUV 4:4:4 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV42,
+ .depth = 24,
+ .colplanes = 2,
+ .h_align = 0,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ },
+ {
+ .name = "YUV 4:2:2 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .depth = 16,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:2 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .depth = 16,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 0,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ },
+ {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
.depth = 12,
- .colplanes = 3,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 1,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ },
+ {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = 12,
+ .colplanes = 2,
.h_align = 4,
.v_align = 4,
- .types = MEM2MEM_CAPTURE,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_S5P |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
- .name = "YUV 4:2:2 packed, YCbYCr",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .depth = 16,
- .colplanes = 1,
- .h_align = 4,
- .v_align = 3,
- .types = MEM2MEM_CAPTURE,
+ .name = "YUV 4:2:0 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .depth = 12,
+ .colplanes = 2,
+ .h_align = 1,
+ .v_align = 1,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
- .name = "JPEG JFIF",
- .fourcc = V4L2_PIX_FMT_JPEG,
+ .name = "YUV 4:2:0 contiguous 3-planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .colplanes = 3,
+ .h_align = 1,
+ .v_align = 1,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+ },
+ {
+ .name = "Gray",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .depth = 8,
.colplanes = 1,
- .types = MEM2MEM_OUTPUT,
+ .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
+ SJPEG_FMT_FLAG_DEC_CAPTURE |
+ SJPEG_FMT_FLAG_EXYNOS4 |
+ SJPEG_FMT_NON_RGB,
+ .subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
},
};
-#define NUM_FORMATS_DEC ARRAY_SIZE(formats_dec)
+#define SJPEG_NUM_FORMATS ARRAY_SIZE(sjpeg_formats)
static const unsigned char qtbl_luminance[4][64] = {
- {/* level 1 - high quality */
- 8, 6, 6, 8, 12, 14, 16, 17,
- 6, 6, 6, 8, 10, 13, 12, 15,
- 6, 6, 7, 8, 13, 14, 18, 24,
- 8, 8, 8, 14, 13, 19, 24, 35,
- 12, 10, 13, 13, 20, 26, 34, 39,
- 14, 13, 14, 19, 26, 34, 39, 39,
- 16, 12, 18, 24, 34, 39, 39, 39,
- 17, 15, 24, 35, 39, 39, 39, 39
+ {/*level 0 - high compression quality */
+ 20, 16, 25, 39, 50, 46, 62, 68,
+ 16, 18, 23, 38, 38, 53, 65, 68,
+ 25, 23, 31, 38, 53, 65, 68, 68,
+ 39, 38, 38, 53, 65, 68, 68, 68,
+ 50, 38, 53, 65, 68, 68, 68, 68,
+ 46, 53, 65, 68, 68, 68, 68, 68,
+ 62, 65, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68
+ },
+ {/* level 1 */
+ 16, 11, 11, 16, 23, 27, 31, 30,
+ 11, 12, 12, 15, 20, 23, 23, 30,
+ 11, 12, 13, 16, 23, 26, 35, 47,
+ 16, 15, 16, 23, 26, 37, 47, 64,
+ 23, 20, 23, 26, 39, 51, 64, 64,
+ 27, 23, 26, 37, 51, 64, 64, 64,
+ 31, 23, 35, 47, 64, 64, 64, 64,
+ 30, 30, 47, 64, 64, 64, 64, 64
},
{/* level 2 */
12, 8, 8, 12, 17, 21, 24, 23,
@@ -103,38 +269,38 @@ static const unsigned char qtbl_luminance[4][64] = {
24, 18, 27, 36, 51, 59, 59, 59,
23, 23, 36, 53, 59, 59, 59, 59
},
- {/* level 3 */
- 16, 11, 11, 16, 23, 27, 31, 30,
- 11, 12, 12, 15, 20, 23, 23, 30,
- 11, 12, 13, 16, 23, 26, 35, 47,
- 16, 15, 16, 23, 26, 37, 47, 64,
- 23, 20, 23, 26, 39, 51, 64, 64,
- 27, 23, 26, 37, 51, 64, 64, 64,
- 31, 23, 35, 47, 64, 64, 64, 64,
- 30, 30, 47, 64, 64, 64, 64, 64
- },
- {/*level 4 - low quality */
- 20, 16, 25, 39, 50, 46, 62, 68,
- 16, 18, 23, 38, 38, 53, 65, 68,
- 25, 23, 31, 38, 53, 65, 68, 68,
- 39, 38, 38, 53, 65, 68, 68, 68,
- 50, 38, 53, 65, 68, 68, 68, 68,
- 46, 53, 65, 68, 68, 68, 68, 68,
- 62, 65, 68, 68, 68, 68, 68, 68,
- 68, 68, 68, 68, 68, 68, 68, 68
+ {/* level 3 - low compression quality */
+ 8, 6, 6, 8, 12, 14, 16, 17,
+ 6, 6, 6, 8, 10, 13, 12, 15,
+ 6, 6, 7, 8, 13, 14, 18, 24,
+ 8, 8, 8, 14, 13, 19, 24, 35,
+ 12, 10, 13, 13, 20, 26, 34, 39,
+ 14, 13, 14, 19, 26, 34, 39, 39,
+ 16, 12, 18, 24, 34, 39, 39, 39,
+ 17, 15, 24, 35, 39, 39, 39, 39
}
};
static const unsigned char qtbl_chrominance[4][64] = {
- {/* level 1 - high quality */
- 9, 8, 9, 11, 14, 17, 19, 24,
- 8, 10, 9, 11, 14, 13, 17, 22,
- 9, 9, 13, 14, 13, 15, 23, 26,
- 11, 11, 14, 14, 15, 20, 26, 33,
- 14, 14, 13, 15, 20, 24, 33, 39,
- 17, 13, 15, 20, 24, 32, 39, 39,
- 19, 17, 23, 26, 33, 39, 39, 39,
- 24, 22, 26, 33, 39, 39, 39, 39
+ {/*level 0 - high compression quality */
+ 21, 25, 32, 38, 54, 68, 68, 68,
+ 25, 28, 24, 38, 54, 68, 68, 68,
+ 32, 24, 32, 43, 66, 68, 68, 68,
+ 38, 38, 43, 53, 68, 68, 68, 68,
+ 54, 54, 66, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68
+ },
+ {/* level 1 */
+ 17, 15, 17, 21, 20, 26, 38, 48,
+ 15, 19, 18, 17, 20, 26, 35, 43,
+ 17, 18, 20, 22, 26, 30, 46, 53,
+ 21, 17, 22, 28, 30, 39, 53, 64,
+ 20, 20, 26, 30, 39, 48, 64, 64,
+ 26, 26, 30, 39, 48, 63, 64, 64,
+ 38, 35, 46, 53, 64, 64, 64, 64,
+ 48, 43, 53, 64, 64, 64, 64, 64
},
{/* level 2 */
13, 11, 13, 16, 20, 20, 29, 37,
@@ -146,25 +312,15 @@ static const unsigned char qtbl_chrominance[4][64] = {
29, 26, 35, 40, 50, 59, 59, 59,
37, 32, 40, 50, 59, 59, 59, 59
},
- {/* level 3 */
- 17, 15, 17, 21, 20, 26, 38, 48,
- 15, 19, 18, 17, 20, 26, 35, 43,
- 17, 18, 20, 22, 26, 30, 46, 53,
- 21, 17, 22, 28, 30, 39, 53, 64,
- 20, 20, 26, 30, 39, 48, 64, 64,
- 26, 26, 30, 39, 48, 63, 64, 64,
- 38, 35, 46, 53, 64, 64, 64, 64,
- 48, 43, 53, 64, 64, 64, 64, 64
- },
- {/*level 4 - low quality */
- 21, 25, 32, 38, 54, 68, 68, 68,
- 25, 28, 24, 38, 54, 68, 68, 68,
- 32, 24, 32, 43, 66, 68, 68, 68,
- 38, 38, 43, 53, 68, 68, 68, 68,
- 54, 54, 66, 68, 68, 68, 68, 68,
- 68, 68, 68, 68, 68, 68, 68, 68,
- 68, 68, 68, 68, 68, 68, 68, 68,
- 68, 68, 68, 68, 68, 68, 68, 68
+ {/* level 3 - low compression quality */
+ 9, 8, 9, 11, 14, 17, 19, 24,
+ 8, 10, 9, 11, 14, 13, 17, 22,
+ 9, 9, 13, 14, 13, 15, 23, 26,
+ 11, 11, 14, 14, 15, 20, 26, 33,
+ 14, 14, 13, 15, 20, 24, 33, 39,
+ 17, 13, 15, 20, 24, 32, 39, 39,
+ 19, 17, 23, 26, 33, 39, 39, 39,
+ 24, 22, 26, 33, 39, 39, 39, 39
}
};
@@ -202,6 +358,106 @@ static const unsigned char hactblg0[162] = {
0xf9, 0xfa
};
+/*
+ * Fourcc downgrade schema lookup tables for 422 and 420
+ * chroma subsampling - fourcc on each position maps on the
+ * fourcc from the table fourcc_to_dwngrd_schema_id which allows
+ * to get the most suitable fourcc counterpart for the given
+ * downgraded subsampling property.
+ */
+static const u32 subs422_fourcc_dwngrd_schema[] = {
+ V4L2_PIX_FMT_NV16,
+ V4L2_PIX_FMT_NV61,
+};
+
+static const u32 subs420_fourcc_dwngrd_schema[] = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_GREY,
+ V4L2_PIX_FMT_GREY,
+ V4L2_PIX_FMT_GREY,
+ V4L2_PIX_FMT_GREY,
+};
+
+/*
+ * Lookup table for translation of a fourcc to the position
+ * of its downgraded counterpart in the *fourcc_dwngrd_schema
+ * tables.
+ */
+static const u32 fourcc_to_dwngrd_schema_id[] = {
+ V4L2_PIX_FMT_NV24,
+ V4L2_PIX_FMT_NV42,
+ V4L2_PIX_FMT_NV16,
+ V4L2_PIX_FMT_NV61,
+ V4L2_PIX_FMT_YUYV,
+ V4L2_PIX_FMT_YVYU,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_GREY,
+};
+
+static int s5p_jpeg_get_dwngrd_sch_id_by_fourcc(u32 fourcc)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(fourcc_to_dwngrd_schema_id); ++i) {
+ if (fourcc_to_dwngrd_schema_id[i] == fourcc)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int s5p_jpeg_adjust_fourcc_to_subsampling(
+ enum v4l2_jpeg_chroma_subsampling subs,
+ u32 in_fourcc,
+ u32 *out_fourcc,
+ struct s5p_jpeg_ctx *ctx)
+{
+ int dwngrd_sch_id;
+
+ if (ctx->subsampling != V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY) {
+ dwngrd_sch_id =
+ s5p_jpeg_get_dwngrd_sch_id_by_fourcc(in_fourcc);
+ if (dwngrd_sch_id < 0)
+ return -EINVAL;
+ }
+
+ switch (ctx->subsampling) {
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY:
+ *out_fourcc = V4L2_PIX_FMT_GREY;
+ break;
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
+ if (dwngrd_sch_id >
+ ARRAY_SIZE(subs420_fourcc_dwngrd_schema) - 1)
+ return -EINVAL;
+ *out_fourcc = subs420_fourcc_dwngrd_schema[dwngrd_sch_id];
+ break;
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
+ if (dwngrd_sch_id >
+ ARRAY_SIZE(subs422_fourcc_dwngrd_schema) - 1)
+ return -EINVAL;
+ *out_fourcc = subs422_fourcc_dwngrd_schema[dwngrd_sch_id];
+ break;
+ default:
+ *out_fourcc = V4L2_PIX_FMT_GREY;
+ break;
+ }
+
+ return 0;
+}
+
+static int exynos4x12_decoded_subsampling[] = {
+ V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_444,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_422,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420,
+};
+
static inline struct s5p_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
{
return container_of(c->handler, struct s5p_jpeg_ctx, ctrl_handler);
@@ -212,8 +468,24 @@ static inline struct s5p_jpeg_ctx *fh_to_ctx(struct v4l2_fh *fh)
return container_of(fh, struct s5p_jpeg_ctx, fh);
}
-static inline void jpeg_set_qtbl(void __iomem *regs, const unsigned char *qtbl,
- unsigned long tab, int len)
+static int s5p_jpeg_to_user_subsampling(struct s5p_jpeg_ctx *ctx)
+{
+ WARN_ON(ctx->subsampling > 3);
+
+ if (ctx->jpeg->variant->version == SJPEG_S5P) {
+ if (ctx->subsampling > 2)
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+ return ctx->subsampling;
+ } else {
+ if (ctx->subsampling > 2)
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_420;
+ return exynos4x12_decoded_subsampling[ctx->subsampling];
+ }
+}
+
+static inline void s5p_jpeg_set_qtbl(void __iomem *regs,
+ const unsigned char *qtbl,
+ unsigned long tab, int len)
{
int i;
@@ -221,22 +493,25 @@ static inline void jpeg_set_qtbl(void __iomem *regs, const unsigned char *qtbl,
writel((unsigned int)qtbl[i], regs + tab + (i * 0x04));
}
-static inline void jpeg_set_qtbl_lum(void __iomem *regs, int quality)
+static inline void s5p_jpeg_set_qtbl_lum(void __iomem *regs, int quality)
{
/* this driver fills quantisation table 0 with data for luma */
- jpeg_set_qtbl(regs, qtbl_luminance[quality], S5P_JPG_QTBL_CONTENT(0),
- ARRAY_SIZE(qtbl_luminance[quality]));
+ s5p_jpeg_set_qtbl(regs, qtbl_luminance[quality],
+ S5P_JPG_QTBL_CONTENT(0),
+ ARRAY_SIZE(qtbl_luminance[quality]));
}
-static inline void jpeg_set_qtbl_chr(void __iomem *regs, int quality)
+static inline void s5p_jpeg_set_qtbl_chr(void __iomem *regs, int quality)
{
/* this driver fills quantisation table 1 with data for chroma */
- jpeg_set_qtbl(regs, qtbl_chrominance[quality], S5P_JPG_QTBL_CONTENT(1),
- ARRAY_SIZE(qtbl_chrominance[quality]));
+ s5p_jpeg_set_qtbl(regs, qtbl_chrominance[quality],
+ S5P_JPG_QTBL_CONTENT(1),
+ ARRAY_SIZE(qtbl_chrominance[quality]));
}
-static inline void jpeg_set_htbl(void __iomem *regs, const unsigned char *htbl,
- unsigned long tab, int len)
+static inline void s5p_jpeg_set_htbl(void __iomem *regs,
+ const unsigned char *htbl,
+ unsigned long tab, int len)
{
int i;
@@ -244,28 +519,84 @@ static inline void jpeg_set_htbl(void __iomem *regs, const unsigned char *htbl,
writel((unsigned int)htbl[i], regs + tab + (i * 0x04));
}
-static inline void jpeg_set_hdctbl(void __iomem *regs)
+static inline void s5p_jpeg_set_hdctbl(void __iomem *regs)
{
/* this driver fills table 0 for this component */
- jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0), ARRAY_SIZE(hdctbl0));
+ s5p_jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0),
+ ARRAY_SIZE(hdctbl0));
}
-static inline void jpeg_set_hdctblg(void __iomem *regs)
+static inline void s5p_jpeg_set_hdctblg(void __iomem *regs)
{
/* this driver fills table 0 for this component */
- jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0), ARRAY_SIZE(hdctblg0));
+ s5p_jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0),
+ ARRAY_SIZE(hdctblg0));
}
-static inline void jpeg_set_hactbl(void __iomem *regs)
+static inline void s5p_jpeg_set_hactbl(void __iomem *regs)
{
/* this driver fills table 0 for this component */
- jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0), ARRAY_SIZE(hactbl0));
+ s5p_jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0),
+ ARRAY_SIZE(hactbl0));
}
-static inline void jpeg_set_hactblg(void __iomem *regs)
+static inline void s5p_jpeg_set_hactblg(void __iomem *regs)
{
/* this driver fills table 0 for this component */
- jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0), ARRAY_SIZE(hactblg0));
+ s5p_jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0),
+ ARRAY_SIZE(hactblg0));
+}
+
+static inline void exynos4_jpeg_set_tbl(void __iomem *regs,
+ const unsigned char *tbl,
+ unsigned long tab, int len)
+{
+ int i;
+ unsigned int dword;
+
+ for (i = 0; i < len; i += 4) {
+ dword = tbl[i] |
+ (tbl[i + 1] << 8) |
+ (tbl[i + 2] << 16) |
+ (tbl[i + 3] << 24);
+ writel(dword, regs + tab + i);
+ }
+}
+
+static inline void exynos4_jpeg_set_qtbl_lum(void __iomem *regs, int quality)
+{
+ /* this driver fills quantisation table 0 with data for luma */
+ exynos4_jpeg_set_tbl(regs, qtbl_luminance[quality],
+ EXYNOS4_QTBL_CONTENT(0),
+ ARRAY_SIZE(qtbl_luminance[quality]));
+}
+
+static inline void exynos4_jpeg_set_qtbl_chr(void __iomem *regs, int quality)
+{
+ /* this driver fills quantisation table 1 with data for chroma */
+ exynos4_jpeg_set_tbl(regs, qtbl_chrominance[quality],
+ EXYNOS4_QTBL_CONTENT(1),
+ ARRAY_SIZE(qtbl_chrominance[quality]));
+}
+
+void exynos4_jpeg_set_huff_tbl(void __iomem *base)
+{
+ exynos4_jpeg_set_tbl(base, hdctbl0, EXYNOS4_HUFF_TBL_HDCLL,
+ ARRAY_SIZE(hdctbl0));
+ exynos4_jpeg_set_tbl(base, hdctbl0, EXYNOS4_HUFF_TBL_HDCCL,
+ ARRAY_SIZE(hdctbl0));
+ exynos4_jpeg_set_tbl(base, hdctblg0, EXYNOS4_HUFF_TBL_HDCLV,
+ ARRAY_SIZE(hdctblg0));
+ exynos4_jpeg_set_tbl(base, hdctblg0, EXYNOS4_HUFF_TBL_HDCCV,
+ ARRAY_SIZE(hdctblg0));
+ exynos4_jpeg_set_tbl(base, hactbl0, EXYNOS4_HUFF_TBL_HACLL,
+ ARRAY_SIZE(hactbl0));
+ exynos4_jpeg_set_tbl(base, hactbl0, EXYNOS4_HUFF_TBL_HACCL,
+ ARRAY_SIZE(hactbl0));
+ exynos4_jpeg_set_tbl(base, hactblg0, EXYNOS4_HUFF_TBL_HACLV,
+ ARRAY_SIZE(hactblg0));
+ exynos4_jpeg_set_tbl(base, hactblg0, EXYNOS4_HUFF_TBL_HACCV,
+ ARRAY_SIZE(hactblg0));
}
/*
@@ -276,8 +607,8 @@ static inline void jpeg_set_hactblg(void __iomem *regs)
static int queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
-static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode,
- __u32 pixelformat);
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
+ __u32 pixelformat, unsigned int fmt_type);
static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx);
static int s5p_jpeg_open(struct file *file)
@@ -285,7 +616,7 @@ static int s5p_jpeg_open(struct file *file)
struct s5p_jpeg *jpeg = video_drvdata(file);
struct video_device *vfd = video_devdata(file);
struct s5p_jpeg_ctx *ctx;
- struct s5p_jpeg_fmt *out_fmt;
+ struct s5p_jpeg_fmt *out_fmt, *cap_fmt;
int ret = 0;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -306,24 +637,31 @@ static int s5p_jpeg_open(struct file *file)
ctx->jpeg = jpeg;
if (vfd == jpeg->vfd_encoder) {
ctx->mode = S5P_JPEG_ENCODE;
- out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_RGB565);
+ out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_RGB565,
+ FMT_TYPE_OUTPUT);
+ cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
+ FMT_TYPE_CAPTURE);
} else {
ctx->mode = S5P_JPEG_DECODE;
- out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_JPEG);
+ out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
+ FMT_TYPE_OUTPUT);
+ cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_YUYV,
+ FMT_TYPE_CAPTURE);
}
- ret = s5p_jpeg_controls_create(ctx);
- if (ret < 0)
- goto error;
-
- ctx->m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- ret = PTR_ERR(ctx->m2m_ctx);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
goto error;
}
ctx->out_q.fmt = out_fmt;
- ctx->cap_q.fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_YUYV);
+ ctx->cap_q.fmt = cap_fmt;
+
+ ret = s5p_jpeg_controls_create(ctx);
+ if (ret < 0)
+ goto error;
+
mutex_unlock(&jpeg->lock);
return 0;
@@ -342,49 +680,23 @@ static int s5p_jpeg_release(struct file *file)
struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
mutex_lock(&jpeg->lock);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
- mutex_unlock(&jpeg->lock);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
-
- return 0;
-}
-
-static unsigned int s5p_jpeg_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct s5p_jpeg *jpeg = video_drvdata(file);
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
- unsigned int res;
-
- mutex_lock(&jpeg->lock);
- res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
mutex_unlock(&jpeg->lock);
- return res;
-}
-
-static int s5p_jpeg_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct s5p_jpeg *jpeg = video_drvdata(file);
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
- int ret;
- if (mutex_lock_interruptible(&jpeg->lock))
- return -ERESTARTSYS;
- ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
- mutex_unlock(&jpeg->lock);
- return ret;
+ return 0;
}
static const struct v4l2_file_operations s5p_jpeg_fops = {
.owner = THIS_MODULE,
.open = s5p_jpeg_open,
.release = s5p_jpeg_release,
- .poll = s5p_jpeg_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = s5p_jpeg_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
/*
@@ -427,10 +739,11 @@ static void skip(struct s5p_jpeg_buffer *buf, long len)
}
static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
- unsigned long buffer, unsigned long size)
+ unsigned long buffer, unsigned long size,
+ struct s5p_jpeg_ctx *ctx)
{
int c, components, notfound;
- unsigned int height, width, word;
+ unsigned int height, width, word, subsampling = 0;
long length;
struct s5p_jpeg_buffer jpeg_buffer;
@@ -469,7 +782,15 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
break;
notfound = 0;
- skip(&jpeg_buffer, components * 3);
+ if (components == 1) {
+ subsampling = 0x33;
+ } else {
+ skip(&jpeg_buffer, 1);
+ subsampling = get_byte(&jpeg_buffer);
+ skip(&jpeg_buffer, 1);
+ }
+
+ skip(&jpeg_buffer, components * 2);
break;
/* skip payload-less markers */
@@ -491,6 +812,24 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
result->w = width;
result->h = height;
result->size = components;
+
+ switch (subsampling) {
+ case 0x11:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444;
+ break;
+ case 0x21:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422;
+ break;
+ case 0x22:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420;
+ break;
+ case 0x33:
+ ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+ break;
+ default:
+ return false;
+ }
+
return !notfound;
}
@@ -521,13 +860,13 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
return 0;
}
-static int enum_fmt(struct s5p_jpeg_fmt *formats, int n,
+static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
struct v4l2_fmtdesc *f, u32 type)
{
int i, num = 0;
for (i = 0; i < n; ++i) {
- if (formats[i].types & type) {
+ if (sjpeg_formats[i].flags & type) {
/* index-th format of type type found ? */
if (num == f->index)
break;
@@ -541,8 +880,8 @@ static int enum_fmt(struct s5p_jpeg_fmt *formats, int n,
if (i >= n)
return -EINVAL;
- strlcpy(f->description, formats[i].name, sizeof(f->description));
- f->pixelformat = formats[i].fourcc;
+ strlcpy(f->description, sjpeg_formats[i].name, sizeof(f->description));
+ f->pixelformat = sjpeg_formats[i].fourcc;
return 0;
}
@@ -553,10 +892,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
- return enum_fmt(formats_enc, NUM_FORMATS_ENC, f,
- MEM2MEM_CAPTURE);
+ return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_ENC_CAPTURE);
- return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_CAPTURE);
+ return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_CAPTURE);
}
static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
@@ -565,10 +905,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
- return enum_fmt(formats_enc, NUM_FORMATS_ENC, f,
- MEM2MEM_OUTPUT);
+ return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_ENC_OUTPUT);
- return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_OUTPUT);
+ return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_OUTPUT);
}
static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
@@ -589,7 +930,7 @@ static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
struct v4l2_pix_format *pix = &f->fmt.pix;
struct s5p_jpeg_ctx *ct = fh_to_ctx(priv);
- vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -615,29 +956,35 @@ static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
return 0;
}
-static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode,
- u32 pixelformat)
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
+ u32 pixelformat, unsigned int fmt_type)
{
- unsigned int k;
- struct s5p_jpeg_fmt *formats;
- int n;
+ unsigned int k, fmt_flag, ver_flag;
- if (mode == S5P_JPEG_ENCODE) {
- formats = formats_enc;
- n = NUM_FORMATS_ENC;
- } else {
- formats = formats_dec;
- n = NUM_FORMATS_DEC;
- }
+ if (ctx->mode == S5P_JPEG_ENCODE)
+ fmt_flag = (fmt_type == FMT_TYPE_OUTPUT) ?
+ SJPEG_FMT_FLAG_ENC_OUTPUT :
+ SJPEG_FMT_FLAG_ENC_CAPTURE;
+ else
+ fmt_flag = (fmt_type == FMT_TYPE_OUTPUT) ?
+ SJPEG_FMT_FLAG_DEC_OUTPUT :
+ SJPEG_FMT_FLAG_DEC_CAPTURE;
+
+ if (ctx->jpeg->variant->version == SJPEG_S5P)
+ ver_flag = SJPEG_FMT_FLAG_S5P;
+ else
+ ver_flag = SJPEG_FMT_FLAG_EXYNOS4;
- for (k = 0; k < n; k++) {
- struct s5p_jpeg_fmt *fmt = &formats[k];
- if (fmt->fourcc == pixelformat)
+ for (k = 0; k < ARRAY_SIZE(sjpeg_formats); k++) {
+ struct s5p_jpeg_fmt *fmt = &sjpeg_formats[k];
+ if (fmt->fourcc == pixelformat &&
+ fmt->flags & fmt_flag &&
+ fmt->flags & ver_flag) {
return fmt;
+ }
}
return NULL;
-
}
static void jpeg_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
@@ -673,7 +1020,7 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt,
/* V4L2 specification suggests the driver corrects the format struct
* if any of the dimensions is unsupported */
- if (q_type == MEM2MEM_OUTPUT)
+ if (q_type == FMT_TYPE_OUTPUT)
jpeg_bound_align_image(&pix->width, S5P_JPEG_MIN_WIDTH,
S5P_JPEG_MAX_WIDTH, 0,
&pix->height, S5P_JPEG_MIN_HEIGHT,
@@ -695,7 +1042,7 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt,
bpl = pix->width; /* planar */
if (fmt->colplanes == 1 && /* packed */
- (bpl << 3) * fmt->depth < pix->width)
+ (bpl << 3) / fmt->depth < pix->width)
bpl = (pix->width * fmt->depth) >> 3;
pix->bytesperline = bpl;
@@ -709,17 +1056,41 @@ static int s5p_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
struct s5p_jpeg_fmt *fmt;
+ int ret;
- fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat);
- if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+ fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
+ FMT_TYPE_CAPTURE);
+ if (!fmt) {
v4l2_err(&ctx->jpeg->v4l2_dev,
"Fourcc format (0x%08x) invalid.\n",
f->fmt.pix.pixelformat);
return -EINVAL;
}
- return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_CAPTURE);
+ /*
+ * The exynos4x12 device requires resulting YUV image
+ * subsampling not to be lower than the input jpeg subsampling.
+ * If this requirement is not met then downgrade the requested
+ * capture format to the one with subsampling equal to the input jpeg.
+ */
+ if ((ctx->jpeg->variant->version != SJPEG_S5P) &&
+ (ctx->mode == S5P_JPEG_DECODE) &&
+ (fmt->flags & SJPEG_FMT_NON_RGB) &&
+ (fmt->subsampling < ctx->subsampling)) {
+ ret = s5p_jpeg_adjust_fourcc_to_subsampling(ctx->subsampling,
+ fmt->fourcc,
+ &pix->pixelformat,
+ ctx);
+ if (ret < 0)
+ pix->pixelformat = V4L2_PIX_FMT_GREY;
+
+ fmt = s5p_jpeg_find_format(ctx, pix->pixelformat,
+ FMT_TYPE_CAPTURE);
+ }
+
+ return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_CAPTURE);
}
static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv,
@@ -728,15 +1099,16 @@ static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
struct s5p_jpeg_fmt *fmt;
- fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat);
- if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+ fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
+ FMT_TYPE_OUTPUT);
+ if (!fmt) {
v4l2_err(&ctx->jpeg->v4l2_dev,
"Fourcc format (0x%08x) invalid.\n",
f->fmt.pix.pixelformat);
return -EINVAL;
}
- return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_OUTPUT);
+ return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_OUTPUT);
}
static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
@@ -744,8 +1116,10 @@ static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
struct vb2_queue *vq;
struct s5p_jpeg_q_data *q_data = NULL;
struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_ctrl *ctrl_subs;
+ unsigned int f_type;
- vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -757,7 +1131,10 @@ static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
return -EBUSY;
}
- q_data->fmt = s5p_jpeg_find_format(ct->mode, pix->pixelformat);
+ f_type = V4L2_TYPE_IS_OUTPUT(f->type) ?
+ FMT_TYPE_OUTPUT : FMT_TYPE_CAPTURE;
+
+ q_data->fmt = s5p_jpeg_find_format(ct, pix->pixelformat, f_type);
q_data->w = pix->width;
q_data->h = pix->height;
if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG)
@@ -765,6 +1142,13 @@ static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
else
q_data->size = pix->sizeimage;
+ if (f_type == FMT_TYPE_OUTPUT) {
+ ctrl_subs = v4l2_ctrl_find(&ct->ctrl_handler,
+ V4L2_CID_JPEG_CHROMA_SUBSAMPLING);
+ if (ctrl_subs)
+ v4l2_ctrl_s_ctrl(ctrl_subs, q_data->fmt->subsampling);
+ }
+
return 0;
}
@@ -792,60 +1176,14 @@ static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv,
return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
}
-static int s5p_jpeg_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int s5p_jpeg_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int s5p_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int s5p_jpeg_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int s5p_jpeg_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int s5p_jpeg_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
static int s5p_jpeg_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ ctx->jpeg->variant->version != SJPEG_S5P)
return -EINVAL;
/* For JPEG blob active == default == bounds */
@@ -884,12 +1222,7 @@ static int s5p_jpeg_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
spin_lock_irqsave(&jpeg->slock, flags);
-
- WARN_ON(ctx->subsampling > S5P_SUBSAMPLING_MODE_GRAY);
- if (ctx->subsampling > 2)
- ctrl->val = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
- else
- ctrl->val = ctx->subsampling;
+ ctrl->val = s5p_jpeg_to_user_subsampling(ctx);
spin_unlock_irqrestore(&jpeg->slock, flags);
break;
}
@@ -897,6 +1230,40 @@ static int s5p_jpeg_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
return 0;
}
+static int s5p_jpeg_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+ if (ctrl->id == V4L2_CID_JPEG_CHROMA_SUBSAMPLING) {
+ if (ctx->jpeg->variant->version == SJPEG_S5P)
+ goto error_free;
+ /*
+ * The exynos4x12 device requires input raw image fourcc
+ * to be V4L2_PIX_FMT_GREY if gray jpeg format
+ * is to be set.
+ */
+ if (ctx->out_q.fmt->fourcc != V4L2_PIX_FMT_GREY &&
+ ctrl->val == V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY) {
+ ret = -EINVAL;
+ goto error_free;
+ }
+ /*
+ * The exynos4x12 device requires resulting jpeg subsampling
+ * not to be lower than the input raw image subsampling.
+ */
+ if (ctx->out_q.fmt->subsampling > ctrl->val)
+ ctrl->val = ctx->out_q.fmt->subsampling;
+ }
+
+error_free:
+ spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+ return ret;
+}
+
static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
@@ -906,7 +1273,7 @@ static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_JPEG_COMPRESSION_QUALITY:
- ctx->compr_quality = S5P_JPEG_COMPR_QUAL_WORST - ctrl->val;
+ ctx->compr_quality = ctrl->val;
break;
case V4L2_CID_JPEG_RESTART_INTERVAL:
ctx->restart_interval = ctrl->val;
@@ -922,6 +1289,7 @@ static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
static const struct v4l2_ctrl_ops s5p_jpeg_ctrl_ops = {
.g_volatile_ctrl = s5p_jpeg_g_volatile_ctrl,
+ .try_ctrl = s5p_jpeg_try_ctrl,
.s_ctrl = s5p_jpeg_s_ctrl,
};
@@ -929,18 +1297,20 @@ static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx)
{
unsigned int mask = ~0x27; /* 444, 422, 420, GRAY */
struct v4l2_ctrl *ctrl;
+ int ret;
v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
if (ctx->mode == S5P_JPEG_ENCODE) {
v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
V4L2_CID_JPEG_COMPRESSION_QUALITY,
- 0, 3, 1, 3);
+ 0, 3, 1, S5P_JPEG_COMPR_QUAL_WORST);
v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
V4L2_CID_JPEG_RESTART_INTERVAL,
0, 3, 0xffff, 0);
- mask = ~0x06; /* 422, 420 */
+ if (ctx->jpeg->variant->version == SJPEG_S5P)
+ mask = ~0x06; /* 422, 420 */
}
ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
@@ -948,13 +1318,24 @@ static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx)
V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY, mask,
V4L2_JPEG_CHROMA_SUBSAMPLING_422);
- if (ctx->ctrl_handler.error)
- return ctx->ctrl_handler.error;
+ if (ctx->ctrl_handler.error) {
+ ret = ctx->ctrl_handler.error;
+ goto error_free;
+ }
if (ctx->mode == S5P_JPEG_DECODE)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE |
V4L2_CTRL_FLAG_READ_ONLY;
- return 0;
+
+ ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ if (ret < 0)
+ goto error_free;
+
+ return ret;
+
+error_free:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return ret;
}
static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = {
@@ -972,14 +1353,13 @@ static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = {
.vidioc_s_fmt_vid_cap = s5p_jpeg_s_fmt_vid_cap,
.vidioc_s_fmt_vid_out = s5p_jpeg_s_fmt_vid_out,
- .vidioc_reqbufs = s5p_jpeg_reqbufs,
- .vidioc_querybuf = s5p_jpeg_querybuf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
- .vidioc_qbuf = s5p_jpeg_qbuf,
- .vidioc_dqbuf = s5p_jpeg_dqbuf,
-
- .vidioc_streamon = s5p_jpeg_streamon,
- .vidioc_streamoff = s5p_jpeg_streamoff,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_selection = s5p_jpeg_g_selection,
};
@@ -995,74 +1375,181 @@ static void s5p_jpeg_device_run(void *priv)
struct s5p_jpeg_ctx *ctx = priv;
struct s5p_jpeg *jpeg = ctx->jpeg;
struct vb2_buffer *src_buf, *dst_buf;
- unsigned long src_addr, dst_addr;
+ unsigned long src_addr, dst_addr, flags;
+
+ spin_lock_irqsave(&ctx->jpeg->slock, flags);
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
- jpeg_reset(jpeg->regs);
- jpeg_poweron(jpeg->regs);
- jpeg_proc_mode(jpeg->regs, ctx->mode);
+ s5p_jpeg_reset(jpeg->regs);
+ s5p_jpeg_poweron(jpeg->regs);
+ s5p_jpeg_proc_mode(jpeg->regs, ctx->mode);
if (ctx->mode == S5P_JPEG_ENCODE) {
if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565)
- jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_565);
+ s5p_jpeg_input_raw_mode(jpeg->regs,
+ S5P_JPEG_RAW_IN_565);
else
- jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_422);
- jpeg_subsampling_mode(jpeg->regs, ctx->subsampling);
- jpeg_dri(jpeg->regs, ctx->restart_interval);
- jpeg_x(jpeg->regs, ctx->out_q.w);
- jpeg_y(jpeg->regs, ctx->out_q.h);
- jpeg_imgadr(jpeg->regs, src_addr);
- jpeg_jpgadr(jpeg->regs, dst_addr);
+ s5p_jpeg_input_raw_mode(jpeg->regs,
+ S5P_JPEG_RAW_IN_422);
+ s5p_jpeg_subsampling_mode(jpeg->regs, ctx->subsampling);
+ s5p_jpeg_dri(jpeg->regs, ctx->restart_interval);
+ s5p_jpeg_x(jpeg->regs, ctx->out_q.w);
+ s5p_jpeg_y(jpeg->regs, ctx->out_q.h);
+ s5p_jpeg_imgadr(jpeg->regs, src_addr);
+ s5p_jpeg_jpgadr(jpeg->regs, dst_addr);
/* ultimately comes from sizeimage from userspace */
- jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size);
+ s5p_jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size);
/* JPEG RGB to YCbCr conversion matrix */
- jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11);
- jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12);
- jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13);
- jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21);
- jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22);
- jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23);
- jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31);
- jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32);
- jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33);
+ s5p_jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11);
+ s5p_jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12);
+ s5p_jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13);
+ s5p_jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21);
+ s5p_jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22);
+ s5p_jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23);
+ s5p_jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31);
+ s5p_jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32);
+ s5p_jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33);
/*
* JPEG IP allows storing 4 quantization tables
* We fill table 0 for luma and table 1 for chroma
*/
- jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
- jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
+ s5p_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
+ s5p_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
/* use table 0 for Y */
- jpeg_qtbl(jpeg->regs, 1, 0);
+ s5p_jpeg_qtbl(jpeg->regs, 1, 0);
/* use table 1 for Cb and Cr*/
- jpeg_qtbl(jpeg->regs, 2, 1);
- jpeg_qtbl(jpeg->regs, 3, 1);
+ s5p_jpeg_qtbl(jpeg->regs, 2, 1);
+ s5p_jpeg_qtbl(jpeg->regs, 3, 1);
/* Y, Cb, Cr use Huffman table 0 */
- jpeg_htbl_ac(jpeg->regs, 1);
- jpeg_htbl_dc(jpeg->regs, 1);
- jpeg_htbl_ac(jpeg->regs, 2);
- jpeg_htbl_dc(jpeg->regs, 2);
- jpeg_htbl_ac(jpeg->regs, 3);
- jpeg_htbl_dc(jpeg->regs, 3);
+ s5p_jpeg_htbl_ac(jpeg->regs, 1);
+ s5p_jpeg_htbl_dc(jpeg->regs, 1);
+ s5p_jpeg_htbl_ac(jpeg->regs, 2);
+ s5p_jpeg_htbl_dc(jpeg->regs, 2);
+ s5p_jpeg_htbl_ac(jpeg->regs, 3);
+ s5p_jpeg_htbl_dc(jpeg->regs, 3);
} else { /* S5P_JPEG_DECODE */
- jpeg_rst_int_enable(jpeg->regs, true);
- jpeg_data_num_int_enable(jpeg->regs, true);
- jpeg_final_mcu_num_int_enable(jpeg->regs, true);
+ s5p_jpeg_rst_int_enable(jpeg->regs, true);
+ s5p_jpeg_data_num_int_enable(jpeg->regs, true);
+ s5p_jpeg_final_mcu_num_int_enable(jpeg->regs, true);
if (ctx->cap_q.fmt->fourcc == V4L2_PIX_FMT_YUYV)
- jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422);
+ s5p_jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422);
else
- jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_420);
- jpeg_jpgadr(jpeg->regs, src_addr);
- jpeg_imgadr(jpeg->regs, dst_addr);
+ s5p_jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_420);
+ s5p_jpeg_jpgadr(jpeg->regs, src_addr);
+ s5p_jpeg_imgadr(jpeg->regs, dst_addr);
}
- jpeg_start(jpeg->regs);
+ s5p_jpeg_start(jpeg->regs);
+
+ spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
+}
+
+static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct s5p_jpeg_fmt *fmt;
+ struct vb2_buffer *vb;
+ struct s5p_jpeg_addr jpeg_addr;
+ u32 pix_size, padding_bytes = 0;
+
+ pix_size = ctx->cap_q.w * ctx->cap_q.h;
+
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ fmt = ctx->out_q.fmt;
+ if (ctx->out_q.w % 2 && fmt->h_align > 0)
+ padding_bytes = ctx->out_q.h;
+ } else {
+ fmt = ctx->cap_q.fmt;
+ vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ }
+
+ jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (fmt->colplanes == 2) {
+ jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
+ } else if (fmt->colplanes == 3) {
+ jpeg_addr.cb = jpeg_addr.y + pix_size;
+ if (fmt->fourcc == V4L2_PIX_FMT_YUV420)
+ jpeg_addr.cr = jpeg_addr.cb + pix_size / 4;
+ else
+ jpeg_addr.cr = jpeg_addr.cb + pix_size / 2;
+ }
+
+ exynos4_jpeg_set_frame_buf_address(jpeg->regs, &jpeg_addr);
+}
+
+static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
+{
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_buffer *vb;
+ unsigned int jpeg_addr = 0;
+
+ if (ctx->mode == S5P_JPEG_ENCODE)
+ vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+
+ jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ exynos4_jpeg_set_stream_buf_address(jpeg->regs, jpeg_addr);
+}
+
+static void exynos4_jpeg_device_run(void *priv)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ unsigned int bitstream_size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->jpeg->slock, flags);
+
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ exynos4_jpeg_sw_reset(jpeg->regs);
+ exynos4_jpeg_set_interrupt(jpeg->regs);
+ exynos4_jpeg_set_huf_table_enable(jpeg->regs, 1);
+
+ exynos4_jpeg_set_huff_tbl(jpeg->regs);
+
+ /*
+ * JPEG IP allows storing 4 quantization tables
+ * We fill table 0 for luma and table 1 for chroma
+ */
+ exynos4_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
+ exynos4_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
+
+ exynos4_jpeg_set_encode_tbl_select(jpeg->regs,
+ ctx->compr_quality);
+ exynos4_jpeg_set_stream_size(jpeg->regs, ctx->cap_q.w,
+ ctx->cap_q.h);
+
+ exynos4_jpeg_set_enc_out_fmt(jpeg->regs, ctx->subsampling);
+ exynos4_jpeg_set_img_fmt(jpeg->regs, ctx->out_q.fmt->fourcc);
+ exynos4_jpeg_set_img_addr(ctx);
+ exynos4_jpeg_set_jpeg_addr(ctx);
+ exynos4_jpeg_set_encode_hoff_cnt(jpeg->regs,
+ ctx->out_q.fmt->fourcc);
+ } else {
+ exynos4_jpeg_sw_reset(jpeg->regs);
+ exynos4_jpeg_set_interrupt(jpeg->regs);
+ exynos4_jpeg_set_img_addr(ctx);
+ exynos4_jpeg_set_jpeg_addr(ctx);
+ exynos4_jpeg_set_img_fmt(jpeg->regs, ctx->cap_q.fmt->fourcc);
+
+ bitstream_size = DIV_ROUND_UP(ctx->out_q.size, 32);
+
+ exynos4_jpeg_set_dec_bitstream_size(jpeg->regs, bitstream_size);
+ }
+
+ exynos4_jpeg_set_enc_dec_mode(jpeg->regs, ctx->mode);
+
+ spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
}
static int s5p_jpeg_job_ready(void *priv)
@@ -1082,6 +1569,12 @@ static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
.device_run = s5p_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
.job_abort = s5p_jpeg_job_abort,
+}
+;
+static struct v4l2_m2m_ops exynos_jpeg_m2m_ops = {
+ .device_run = exynos4_jpeg_device_run,
+ .job_ready = s5p_jpeg_job_ready,
+ .job_abort = s5p_jpeg_job_abort,
};
/*
@@ -1149,7 +1642,7 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
ctx->hdr_parsed = s5p_jpeg_parse_hdr(&tmp,
(unsigned long)vb2_plane_vaddr(vb, 0),
min((unsigned long)ctx->out_q.size,
- vb2_get_plane_payload(vb, 0)));
+ vb2_get_plane_payload(vb, 0)), ctx);
if (!ctx->hdr_parsed) {
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
return;
@@ -1162,30 +1655,9 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
q_data = &ctx->cap_q;
q_data->w = tmp.w;
q_data->h = tmp.h;
-
- jpeg_bound_align_image(&q_data->w, S5P_JPEG_MIN_WIDTH,
- S5P_JPEG_MAX_WIDTH, q_data->fmt->h_align,
- &q_data->h, S5P_JPEG_MIN_HEIGHT,
- S5P_JPEG_MAX_HEIGHT, q_data->fmt->v_align
- );
- q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3;
}
- if (ctx->m2m_ctx)
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
-}
-
-static void s5p_jpeg_wait_prepare(struct vb2_queue *vq)
-{
- struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_unlock(&ctx->jpeg->lock);
-}
-
-static void s5p_jpeg_wait_finish(struct vb2_queue *vq)
-{
- struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
-
- mutex_lock(&ctx->jpeg->lock);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
@@ -1211,8 +1683,8 @@ static struct vb2_ops s5p_jpeg_qops = {
.queue_setup = s5p_jpeg_queue_setup,
.buf_prepare = s5p_jpeg_buf_prepare,
.buf_queue = s5p_jpeg_buf_queue,
- .wait_prepare = s5p_jpeg_wait_prepare,
- .wait_finish = s5p_jpeg_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.start_streaming = s5p_jpeg_start_streaming,
.stop_streaming = s5p_jpeg_stop_streaming,
};
@@ -1230,6 +1702,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &s5p_jpeg_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->jpeg->lock;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -1242,6 +1715,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &s5p_jpeg_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->jpeg->lock;
return vb2_queue_init(dst_vq);
}
@@ -1267,26 +1741,27 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
- src_buf = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
- dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+ src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
if (curr_ctx->mode == S5P_JPEG_ENCODE)
- enc_jpeg_too_large = jpeg_enc_stream_stat(jpeg->regs);
- timer_elapsed = jpeg_timer_stat(jpeg->regs);
- op_completed = jpeg_result_stat_ok(jpeg->regs);
+ enc_jpeg_too_large = s5p_jpeg_enc_stream_stat(jpeg->regs);
+ timer_elapsed = s5p_jpeg_timer_stat(jpeg->regs);
+ op_completed = s5p_jpeg_result_stat_ok(jpeg->regs);
if (curr_ctx->mode == S5P_JPEG_DECODE)
- op_completed = op_completed && jpeg_stream_stat_ok(jpeg->regs);
+ op_completed = op_completed &&
+ s5p_jpeg_stream_stat_ok(jpeg->regs);
if (enc_jpeg_too_large) {
state = VB2_BUF_STATE_ERROR;
- jpeg_clear_enc_stream_stat(jpeg->regs);
+ s5p_jpeg_clear_enc_stream_stat(jpeg->regs);
} else if (timer_elapsed) {
state = VB2_BUF_STATE_ERROR;
- jpeg_clear_timer_stat(jpeg->regs);
+ s5p_jpeg_clear_timer_stat(jpeg->regs);
} else if (!op_completed) {
state = VB2_BUF_STATE_ERROR;
} else {
- payload_size = jpeg_compressed_size(jpeg->regs);
+ payload_size = s5p_jpeg_compressed_size(jpeg->regs);
}
dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
@@ -1296,16 +1771,79 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
if (curr_ctx->mode == S5P_JPEG_ENCODE)
vb2_set_plane_payload(dst_buf, 0, payload_size);
v4l2_m2m_buf_done(dst_buf, state);
- v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->m2m_ctx);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
- curr_ctx->subsampling = jpeg_get_subsampling_mode(jpeg->regs);
+ curr_ctx->subsampling = s5p_jpeg_get_subsampling_mode(jpeg->regs);
spin_unlock(&jpeg->slock);
- jpeg_clear_int(jpeg->regs);
+ s5p_jpeg_clear_int(jpeg->regs);
return IRQ_HANDLED;
}
+static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
+{
+ unsigned int int_status;
+ struct vb2_buffer *src_vb, *dst_vb;
+ struct s5p_jpeg *jpeg = priv;
+ struct s5p_jpeg_ctx *curr_ctx;
+ unsigned long payload_size = 0;
+
+ spin_lock(&jpeg->slock);
+
+ curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ int_status = exynos4_jpeg_get_int_status(jpeg->regs);
+
+ if (int_status) {
+ switch (int_status & 0x1f) {
+ case 0x1:
+ jpeg->irq_ret = ERR_PROT;
+ break;
+ case 0x2:
+ jpeg->irq_ret = OK_ENC_OR_DEC;
+ break;
+ case 0x4:
+ jpeg->irq_ret = ERR_DEC_INVALID_FORMAT;
+ break;
+ case 0x8:
+ jpeg->irq_ret = ERR_MULTI_SCAN;
+ break;
+ case 0x10:
+ jpeg->irq_ret = ERR_FRAME;
+ break;
+ default:
+ jpeg->irq_ret = ERR_UNKNOWN;
+ break;
+ }
+ } else {
+ jpeg->irq_ret = ERR_UNKNOWN;
+ }
+
+ if (jpeg->irq_ret == OK_ENC_OR_DEC) {
+ if (curr_ctx->mode == S5P_JPEG_ENCODE) {
+ payload_size = exynos4_jpeg_get_stream_size(jpeg->regs);
+ vb2_set_plane_payload(dst_vb, 0, payload_size);
+ }
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ } else {
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
+ curr_ctx->subsampling = exynos4_jpeg_get_frame_fmt(jpeg->regs);
+
+ spin_unlock(&jpeg->slock);
+ return IRQ_HANDLED;
+}
+
+static void *jpeg_get_drv_data(struct platform_device *pdev);
+
/*
* ============================================================================
* Driver basic infrastructure
@@ -1316,13 +1854,19 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
{
struct s5p_jpeg *jpeg;
struct resource *res;
+ struct v4l2_m2m_ops *samsung_jpeg_m2m_ops;
int ret;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
/* JPEG IP abstraction struct */
jpeg = devm_kzalloc(&pdev->dev, sizeof(struct s5p_jpeg), GFP_KERNEL);
if (!jpeg)
return -ENOMEM;
+ jpeg->variant = jpeg_get_drv_data(pdev);
+
mutex_init(&jpeg->lock);
spin_lock_init(&jpeg->slock);
jpeg->dev = &pdev->dev;
@@ -1341,8 +1885,8 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_request_irq(&pdev->dev, jpeg->irq, s5p_jpeg_irq, 0,
- dev_name(&pdev->dev), jpeg);
+ ret = devm_request_irq(&pdev->dev, jpeg->irq, jpeg->variant->jpeg_irq,
+ 0, dev_name(&pdev->dev), jpeg);
if (ret) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpeg->irq);
return ret;
@@ -1356,7 +1900,6 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
return ret;
}
dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk);
- clk_prepare_enable(jpeg->clk);
/* v4l2 device */
ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
@@ -1365,8 +1908,13 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
goto clk_get_rollback;
}
+ if (jpeg->variant->version == SJPEG_S5P)
+ samsung_jpeg_m2m_ops = &s5p_jpeg_m2m_ops;
+ else
+ samsung_jpeg_m2m_ops = &exynos_jpeg_m2m_ops;
+
/* mem2mem device */
- jpeg->m2m_dev = v4l2_m2m_init(&s5p_jpeg_m2m_ops);
+ jpeg->m2m_dev = v4l2_m2m_init(samsung_jpeg_m2m_ops);
if (IS_ERR(jpeg->m2m_dev)) {
v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(jpeg->m2m_dev);
@@ -1387,8 +1935,8 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto vb2_allocator_rollback;
}
- strlcpy(jpeg->vfd_encoder->name, S5P_JPEG_M2M_NAME,
- sizeof(jpeg->vfd_encoder->name));
+ snprintf(jpeg->vfd_encoder->name, sizeof(jpeg->vfd_encoder->name),
+ "%s-enc", S5P_JPEG_M2M_NAME);
jpeg->vfd_encoder->fops = &s5p_jpeg_fops;
jpeg->vfd_encoder->ioctl_ops = &s5p_jpeg_ioctl_ops;
jpeg->vfd_encoder->minor = -1;
@@ -1415,8 +1963,8 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto enc_vdev_register_rollback;
}
- strlcpy(jpeg->vfd_decoder->name, S5P_JPEG_M2M_NAME,
- sizeof(jpeg->vfd_decoder->name));
+ snprintf(jpeg->vfd_decoder->name, sizeof(jpeg->vfd_decoder->name),
+ "%s-dec", S5P_JPEG_M2M_NAME);
jpeg->vfd_decoder->fops = &s5p_jpeg_fops;
jpeg->vfd_decoder->ioctl_ops = &s5p_jpeg_ioctl_ops;
jpeg->vfd_decoder->minor = -1;
@@ -1464,7 +2012,6 @@ device_register_rollback:
v4l2_device_unregister(&jpeg->v4l2_dev);
clk_get_rollback:
- clk_disable_unprepare(jpeg->clk);
clk_put(jpeg->clk);
return ret;
@@ -1484,7 +2031,9 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
- clk_disable_unprepare(jpeg->clk);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ clk_disable_unprepare(jpeg->clk);
+
clk_put(jpeg->clk);
return 0;
@@ -1492,41 +2041,119 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
static int s5p_jpeg_runtime_suspend(struct device *dev)
{
+ struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(jpeg->clk);
+
return 0;
}
static int s5p_jpeg_runtime_resume(struct device *dev)
{
struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
+ unsigned long flags;
+ int ret;
+
+ ret = clk_prepare_enable(jpeg->clk);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&jpeg->slock, flags);
+
/*
* JPEG IP allows storing two Huffman tables for each component
- * We fill table 0 for each component
+ * We fill table 0 for each component and do this here only
+ * for S5PC210 device as Exynos4x12 requires programming its
+ * Huffman tables each time the encoding process is initialized.
*/
- jpeg_set_hdctbl(jpeg->regs);
- jpeg_set_hdctblg(jpeg->regs);
- jpeg_set_hactbl(jpeg->regs);
- jpeg_set_hactblg(jpeg->regs);
+ if (jpeg->variant->version == SJPEG_S5P) {
+ s5p_jpeg_set_hdctbl(jpeg->regs);
+ s5p_jpeg_set_hdctblg(jpeg->regs);
+ s5p_jpeg_set_hactbl(jpeg->regs);
+ s5p_jpeg_set_hactblg(jpeg->regs);
+ }
+
+ spin_unlock_irqrestore(&jpeg->slock, flags);
+
return 0;
}
+static int s5p_jpeg_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return s5p_jpeg_runtime_suspend(dev);
+}
+
+static int s5p_jpeg_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return s5p_jpeg_runtime_resume(dev);
+}
+
static const struct dev_pm_ops s5p_jpeg_pm_ops = {
- .runtime_suspend = s5p_jpeg_runtime_suspend,
- .runtime_resume = s5p_jpeg_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(s5p_jpeg_suspend, s5p_jpeg_resume)
+ SET_RUNTIME_PM_OPS(s5p_jpeg_runtime_suspend, s5p_jpeg_runtime_resume, NULL)
+};
+
+#ifdef CONFIG_OF
+static struct s5p_jpeg_variant s5p_jpeg_drvdata = {
+ .version = SJPEG_S5P,
+ .jpeg_irq = s5p_jpeg_irq,
+};
+
+static struct s5p_jpeg_variant exynos4_jpeg_drvdata = {
+ .version = SJPEG_EXYNOS4,
+ .jpeg_irq = exynos4_jpeg_irq,
+};
+
+static const struct of_device_id samsung_jpeg_match[] = {
+ {
+ .compatible = "samsung,s5pv210-jpeg",
+ .data = &s5p_jpeg_drvdata,
+ }, {
+ .compatible = "samsung,exynos4210-jpeg",
+ .data = &s5p_jpeg_drvdata,
+ }, {
+ .compatible = "samsung,exynos4212-jpeg",
+ .data = &exynos4_jpeg_drvdata,
+ },
+ {},
};
+MODULE_DEVICE_TABLE(of, samsung_jpeg_match);
+
+static void *jpeg_get_drv_data(struct platform_device *pdev)
+{
+ struct s5p_jpeg_variant *driver_data = NULL;
+ const struct of_device_id *match;
+
+ match = of_match_node(of_match_ptr(samsung_jpeg_match),
+ pdev->dev.of_node);
+ if (match)
+ driver_data = (struct s5p_jpeg_variant *)match->data;
+
+ return driver_data;
+}
+#endif
+
static struct platform_driver s5p_jpeg_driver = {
.probe = s5p_jpeg_probe,
.remove = s5p_jpeg_remove,
.driver = {
- .owner = THIS_MODULE,
- .name = S5P_JPEG_M2M_NAME,
- .pm = &s5p_jpeg_pm_ops,
+ .of_match_table = of_match_ptr(samsung_jpeg_match),
+ .owner = THIS_MODULE,
+ .name = S5P_JPEG_M2M_NAME,
+ .pm = &s5p_jpeg_pm_ops,
},
};
module_platform_driver(s5p_jpeg_driver);
MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>");
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
MODULE_DESCRIPTION("Samsung JPEG codec driver");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 8a4013e3aee7..f482dbf55d5f 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -13,6 +13,7 @@
#ifndef JPEG_CORE_H_
#define JPEG_CORE_H_
+#include <linux/interrupt.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-ctrls.h>
@@ -43,8 +44,45 @@
#define DHP 0xde
/* Flags that indicate a format can be used for capture/output */
-#define MEM2MEM_CAPTURE (1 << 0)
-#define MEM2MEM_OUTPUT (1 << 1)
+#define SJPEG_FMT_FLAG_ENC_CAPTURE (1 << 0)
+#define SJPEG_FMT_FLAG_ENC_OUTPUT (1 << 1)
+#define SJPEG_FMT_FLAG_DEC_CAPTURE (1 << 2)
+#define SJPEG_FMT_FLAG_DEC_OUTPUT (1 << 3)
+#define SJPEG_FMT_FLAG_S5P (1 << 4)
+#define SJPEG_FMT_FLAG_EXYNOS4 (1 << 5)
+#define SJPEG_FMT_RGB (1 << 6)
+#define SJPEG_FMT_NON_RGB (1 << 7)
+
+#define S5P_JPEG_ENCODE 0
+#define S5P_JPEG_DECODE 1
+
+#define FMT_TYPE_OUTPUT 0
+#define FMT_TYPE_CAPTURE 1
+
+#define SJPEG_SUBSAMPLING_444 0x11
+#define SJPEG_SUBSAMPLING_422 0x21
+#define SJPEG_SUBSAMPLING_420 0x22
+
+/* Version numbers */
+
+#define SJPEG_S5P 1
+#define SJPEG_EXYNOS4 2
+
+enum exynos4_jpeg_result {
+ OK_ENC_OR_DEC,
+ ERR_PROT,
+ ERR_DEC_INVALID_FORMAT,
+ ERR_MULTI_SCAN,
+ ERR_FRAME,
+ ERR_UNKNOWN,
+};
+
+enum exynos4_jpeg_img_quality_level {
+ QUALITY_LEVEL_1 = 0, /* high */
+ QUALITY_LEVEL_2,
+ QUALITY_LEVEL_3,
+ QUALITY_LEVEL_4, /* low */
+};
/**
* struct s5p_jpeg - JPEG IP abstraction
@@ -71,9 +109,16 @@ struct s5p_jpeg {
void __iomem *regs;
unsigned int irq;
+ enum exynos4_jpeg_result irq_ret;
struct clk *clk;
struct device *dev;
void *alloc_ctx;
+ struct s5p_jpeg_variant *variant;
+};
+
+struct s5p_jpeg_variant {
+ unsigned int version;
+ irqreturn_t (*jpeg_irq)(int irq, void *priv);
};
/**
@@ -84,16 +129,18 @@ struct s5p_jpeg {
* @colplanes: number of color planes (1 for packed formats)
* @h_align: horizontal alignment order (align to 2^h_align)
* @v_align: vertical alignment order (align to 2^v_align)
- * @types: types of queue this format is applicable to
+ * @flags: flags describing format applicability
*/
struct s5p_jpeg_fmt {
char *name;
u32 fourcc;
int depth;
int colplanes;
+ int memplanes;
int h_align;
int v_align;
- u32 types;
+ int subsampling;
+ u32 flags;
};
/**
@@ -115,7 +162,6 @@ struct s5p_jpeg_q_data {
* @jpeg: JPEG IP device for this context
* @mode: compression (encode) operation or decompression (decode)
* @compr_quality: destination image quality in compression (encode) mode
- * @m2m_ctx: mem2mem device context
* @out_q: source (output) queue information
* @cap_fmt: destination (capture) queue queue information
* @hdr_parsed: set if header has been parsed during decompression
@@ -127,7 +173,6 @@ struct s5p_jpeg_ctx {
unsigned short compr_quality;
unsigned short restart_interval;
unsigned short subsampling;
- struct v4l2_m2m_ctx *m2m_ctx;
struct s5p_jpeg_q_data out_q;
struct s5p_jpeg_q_data cap_q;
struct v4l2_fh fh;
@@ -147,4 +192,16 @@ struct s5p_jpeg_buffer {
unsigned long data;
};
+/**
+ * struct s5p_jpeg_addr - JPEG converter physical address set for DMA
+ * @y: luminance plane physical address
+ * @cb: Cb plane physical address
+ * @cr: Cr plane physical address
+ */
+struct s5p_jpeg_addr {
+ u32 y;
+ u32 cb;
+ u32 cr;
+};
+
#endif /* JPEG_CORE_H */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
new file mode 100644
index 000000000000..da8d6a1a984f
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
@@ -0,0 +1,279 @@
+/* Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * Register interface file for JPEG driver on Exynos4x12.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "jpeg-core.h"
+#include "jpeg-hw-exynos4.h"
+#include "jpeg-regs.h"
+
+void exynos4_jpeg_sw_reset(void __iomem *base)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
+ writel(reg & ~EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
+
+ ndelay(100000);
+
+ writel(reg | EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
+ /* set exynos4_jpeg mod register */
+ if (mode == S5P_JPEG_DECODE) {
+ writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) |
+ EXYNOS4_DEC_MODE,
+ base + EXYNOS4_JPEG_CNTL_REG);
+ } else {/* encode */
+ writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) |
+ EXYNOS4_ENC_MODE,
+ base + EXYNOS4_JPEG_CNTL_REG);
+ }
+}
+
+void exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_IMG_FMT_REG) &
+ EXYNOS4_ENC_IN_FMT_MASK; /* clear except enc format */
+
+ switch (img_fmt) {
+ case V4L2_PIX_FMT_GREY:
+ reg = reg | EXYNOS4_ENC_GRAY_IMG | EXYNOS4_GRAY_IMG_IP;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ reg = reg | EXYNOS4_ENC_RGB_IMG |
+ EXYNOS4_RGB_IP_RGB_32BIT_IMG;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ reg = reg | EXYNOS4_ENC_RGB_IMG |
+ EXYNOS4_RGB_IP_RGB_16BIT_IMG;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ reg = reg | EXYNOS4_ENC_YUV_444_IMG |
+ EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CBCR;
+ break;
+ case V4L2_PIX_FMT_NV42:
+ reg = reg | EXYNOS4_ENC_YUV_444_IMG |
+ EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CRCB;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
+ EXYNOS4_SWAP_CHROMA_CBCR;
+ break;
+
+ case V4L2_PIX_FMT_YVYU:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
+ EXYNOS4_SWAP_CHROMA_CRCB;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CBCR;
+ break;
+ case V4L2_PIX_FMT_NV61:
+ reg = reg | EXYNOS4_DEC_YUV_422_IMG |
+ EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CRCB;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+ EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CBCR;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+ EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
+ EXYNOS4_SWAP_CHROMA_CRCB;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ reg = reg | EXYNOS4_DEC_YUV_420_IMG |
+ EXYNOS4_YUV_420_IP_YUV_420_3P_IMG |
+ EXYNOS4_SWAP_CHROMA_CBCR;
+ break;
+ default:
+ break;
+
+ }
+
+ writel(reg, base + EXYNOS4_IMG_FMT_REG);
+}
+
+void exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_IMG_FMT_REG) &
+ ~EXYNOS4_ENC_FMT_MASK; /* clear enc format */
+
+ switch (out_fmt) {
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY:
+ reg = reg | EXYNOS4_ENC_FMT_GRAY;
+ break;
+
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_444:
+ reg = reg | EXYNOS4_ENC_FMT_YUV_444;
+ break;
+
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
+ reg = reg | EXYNOS4_ENC_FMT_YUV_422;
+ break;
+
+ case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
+ reg = reg | EXYNOS4_ENC_FMT_YUV_420;
+ break;
+
+ default:
+ break;
+ }
+
+ writel(reg, base + EXYNOS4_IMG_FMT_REG);
+}
+
+void exynos4_jpeg_set_interrupt(void __iomem *base)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_INT_EN_REG) & ~EXYNOS4_INT_EN_MASK;
+ writel(EXYNOS4_INT_EN_ALL, base + EXYNOS4_INT_EN_REG);
+}
+
+unsigned int exynos4_jpeg_get_int_status(void __iomem *base)
+{
+ unsigned int int_status;
+
+ int_status = readl(base + EXYNOS4_INT_STATUS_REG);
+
+ return int_status;
+}
+
+unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base)
+{
+ unsigned int fifo_status;
+
+ fifo_status = readl(base + EXYNOS4_FIFO_STATUS_REG);
+
+ return fifo_status;
+}
+
+void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG) & ~EXYNOS4_HUF_TBL_EN;
+
+ if (value == 1)
+ writel(reg | EXYNOS4_HUF_TBL_EN,
+ base + EXYNOS4_JPEG_CNTL_REG);
+ else
+ writel(reg | ~EXYNOS4_HUF_TBL_EN,
+ base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_sys_int_enable(void __iomem *base, int value)
+{
+ unsigned int reg;
+
+ reg = readl(base + EXYNOS4_JPEG_CNTL_REG) & ~(EXYNOS4_SYS_INT_EN);
+
+ if (value == 1)
+ writel(EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
+ else
+ writel(~EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
+}
+
+void exynos4_jpeg_set_stream_buf_address(void __iomem *base,
+ unsigned int address)
+{
+ writel(address, base + EXYNOS4_OUT_MEM_BASE_REG);
+}
+
+void exynos4_jpeg_set_stream_size(void __iomem *base,
+ unsigned int x_value, unsigned int y_value)
+{
+ writel(0x0, base + EXYNOS4_JPEG_IMG_SIZE_REG); /* clear */
+ writel(EXYNOS4_X_SIZE(x_value) | EXYNOS4_Y_SIZE(y_value),
+ base + EXYNOS4_JPEG_IMG_SIZE_REG);
+}
+
+void exynos4_jpeg_set_frame_buf_address(void __iomem *base,
+ struct s5p_jpeg_addr *exynos4_jpeg_addr)
+{
+ writel(exynos4_jpeg_addr->y, base + EXYNOS4_IMG_BA_PLANE_1_REG);
+ writel(exynos4_jpeg_addr->cb, base + EXYNOS4_IMG_BA_PLANE_2_REG);
+ writel(exynos4_jpeg_addr->cr, base + EXYNOS4_IMG_BA_PLANE_3_REG);
+}
+
+void exynos4_jpeg_set_encode_tbl_select(void __iomem *base,
+ enum exynos4_jpeg_img_quality_level level)
+{
+ unsigned int reg;
+
+ reg = EXYNOS4_Q_TBL_COMP1_0 | EXYNOS4_Q_TBL_COMP2_1 |
+ EXYNOS4_Q_TBL_COMP3_1 |
+ EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_1 |
+ EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_0 |
+ EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_1;
+
+ writel(reg, base + EXYNOS4_TBL_SEL_REG);
+}
+
+void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt)
+{
+ if (fmt == V4L2_PIX_FMT_GREY)
+ writel(0xd2, base + EXYNOS4_HUFF_CNT_REG);
+ else
+ writel(0x1a2, base + EXYNOS4_HUFF_CNT_REG);
+}
+
+unsigned int exynos4_jpeg_get_stream_size(void __iomem *base)
+{
+ unsigned int size;
+
+ size = readl(base + EXYNOS4_BITSTREAM_SIZE_REG);
+ return size;
+}
+
+void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size)
+{
+ writel(size, base + EXYNOS4_BITSTREAM_SIZE_REG);
+}
+
+void exynos4_jpeg_get_frame_size(void __iomem *base,
+ unsigned int *width, unsigned int *height)
+{
+ *width = (readl(base + EXYNOS4_DECODE_XY_SIZE_REG) &
+ EXYNOS4_DECODED_SIZE_MASK);
+ *height = (readl(base + EXYNOS4_DECODE_XY_SIZE_REG) >> 16) &
+ EXYNOS4_DECODED_SIZE_MASK;
+}
+
+unsigned int exynos4_jpeg_get_frame_fmt(void __iomem *base)
+{
+ return readl(base + EXYNOS4_DECODE_IMG_FMT_REG) &
+ EXYNOS4_JPEG_DECODED_IMG_FMT_MASK;
+}
+
+void exynos4_jpeg_set_timer_count(void __iomem *base, unsigned int size)
+{
+ writel(size, base + EXYNOS4_INT_TIMER_COUNT_REG);
+}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
new file mode 100644
index 000000000000..c228d28a4bc7
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * Header file of the register interface for JPEG driver on Exynos4x12.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef JPEG_HW_EXYNOS4_H_
+#define JPEG_HW_EXYNOS4_H_
+
+void exynos4_jpeg_sw_reset(void __iomem *base);
+void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode);
+void exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt);
+void exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt);
+void exynos4_jpeg_set_enc_tbl(void __iomem *base);
+void exynos4_jpeg_set_interrupt(void __iomem *base);
+unsigned int exynos4_jpeg_get_int_status(void __iomem *base);
+void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value);
+void exynos4_jpeg_set_sys_int_enable(void __iomem *base, int value);
+void exynos4_jpeg_set_stream_buf_address(void __iomem *base,
+ unsigned int address);
+void exynos4_jpeg_set_stream_size(void __iomem *base,
+ unsigned int x_value, unsigned int y_value);
+void exynos4_jpeg_set_frame_buf_address(void __iomem *base,
+ struct s5p_jpeg_addr *jpeg_addr);
+void exynos4_jpeg_set_encode_tbl_select(void __iomem *base,
+ enum exynos4_jpeg_img_quality_level level);
+void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt);
+void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size);
+unsigned int exynos4_jpeg_get_stream_size(void __iomem *base);
+void exynos4_jpeg_get_frame_size(void __iomem *base,
+ unsigned int *width, unsigned int *height);
+unsigned int exynos4_jpeg_get_frame_fmt(void __iomem *base);
+unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base);
+void exynos4_jpeg_set_timer_count(void __iomem *base, unsigned int size);
+
+#endif /* JPEG_HW_EXYNOS4_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
index b47e887b6138..52407d790726 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
@@ -9,27 +9,15 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef JPEG_HW_H_
-#define JPEG_HW_H_
#include <linux/io.h>
#include <linux/videodev2.h>
-#include "jpeg-hw.h"
+#include "jpeg-core.h"
#include "jpeg-regs.h"
+#include "jpeg-hw-s5p.h"
-#define S5P_JPEG_MIN_WIDTH 32
-#define S5P_JPEG_MIN_HEIGHT 32
-#define S5P_JPEG_MAX_WIDTH 8192
-#define S5P_JPEG_MAX_HEIGHT 8192
-#define S5P_JPEG_ENCODE 0
-#define S5P_JPEG_DECODE 1
-#define S5P_JPEG_RAW_IN_565 0
-#define S5P_JPEG_RAW_IN_422 1
-#define S5P_JPEG_RAW_OUT_422 0
-#define S5P_JPEG_RAW_OUT_420 1
-
-static inline void jpeg_reset(void __iomem *regs)
+void s5p_jpeg_reset(void __iomem *regs)
{
unsigned long reg;
@@ -42,12 +30,12 @@ static inline void jpeg_reset(void __iomem *regs)
}
}
-static inline void jpeg_poweron(void __iomem *regs)
+void s5p_jpeg_poweron(void __iomem *regs)
{
writel(S5P_POWER_ON, regs + S5P_JPGCLKCON);
}
-static inline void jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
+void s5p_jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
{
unsigned long reg, m;
@@ -63,7 +51,7 @@ static inline void jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
writel(reg, regs + S5P_JPGCMOD);
}
-static inline void jpeg_input_raw_y16(void __iomem *regs, bool y16)
+void s5p_jpeg_input_raw_y16(void __iomem *regs, bool y16)
{
unsigned long reg;
@@ -75,7 +63,7 @@ static inline void jpeg_input_raw_y16(void __iomem *regs, bool y16)
writel(reg, regs + S5P_JPGCMOD);
}
-static inline void jpeg_proc_mode(void __iomem *regs, unsigned long mode)
+void s5p_jpeg_proc_mode(void __iomem *regs, unsigned long mode)
{
unsigned long reg, m;
@@ -90,7 +78,7 @@ static inline void jpeg_proc_mode(void __iomem *regs, unsigned long mode)
writel(reg, regs + S5P_JPGMOD);
}
-static inline void jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
+void s5p_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
{
unsigned long reg, m;
@@ -105,12 +93,12 @@ static inline void jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
writel(reg, regs + S5P_JPGMOD);
}
-static inline unsigned int jpeg_get_subsampling_mode(void __iomem *regs)
+unsigned int s5p_jpeg_get_subsampling_mode(void __iomem *regs)
{
return readl(regs + S5P_JPGMOD) & S5P_SUBSAMPLING_MODE_MASK;
}
-static inline void jpeg_dri(void __iomem *regs, unsigned int dri)
+void s5p_jpeg_dri(void __iomem *regs, unsigned int dri)
{
unsigned long reg;
@@ -125,7 +113,7 @@ static inline void jpeg_dri(void __iomem *regs, unsigned int dri)
writel(reg, regs + S5P_JPGDRI_L);
}
-static inline void jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
+void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
{
unsigned long reg;
@@ -135,7 +123,7 @@ static inline void jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
writel(reg, regs + S5P_JPG_QTBL);
}
-static inline void jpeg_htbl_ac(void __iomem *regs, unsigned int t)
+void s5p_jpeg_htbl_ac(void __iomem *regs, unsigned int t)
{
unsigned long reg;
@@ -146,7 +134,7 @@ static inline void jpeg_htbl_ac(void __iomem *regs, unsigned int t)
writel(reg, regs + S5P_JPG_HTBL);
}
-static inline void jpeg_htbl_dc(void __iomem *regs, unsigned int t)
+void s5p_jpeg_htbl_dc(void __iomem *regs, unsigned int t)
{
unsigned long reg;
@@ -157,7 +145,7 @@ static inline void jpeg_htbl_dc(void __iomem *regs, unsigned int t)
writel(reg, regs + S5P_JPG_HTBL);
}
-static inline void jpeg_y(void __iomem *regs, unsigned int y)
+void s5p_jpeg_y(void __iomem *regs, unsigned int y)
{
unsigned long reg;
@@ -172,7 +160,7 @@ static inline void jpeg_y(void __iomem *regs, unsigned int y)
writel(reg, regs + S5P_JPGY_L);
}
-static inline void jpeg_x(void __iomem *regs, unsigned int x)
+void s5p_jpeg_x(void __iomem *regs, unsigned int x)
{
unsigned long reg;
@@ -187,7 +175,7 @@ static inline void jpeg_x(void __iomem *regs, unsigned int x)
writel(reg, regs + S5P_JPGX_L);
}
-static inline void jpeg_rst_int_enable(void __iomem *regs, bool enable)
+void s5p_jpeg_rst_int_enable(void __iomem *regs, bool enable)
{
unsigned long reg;
@@ -198,7 +186,7 @@ static inline void jpeg_rst_int_enable(void __iomem *regs, bool enable)
writel(reg, regs + S5P_JPGINTSE);
}
-static inline void jpeg_data_num_int_enable(void __iomem *regs, bool enable)
+void s5p_jpeg_data_num_int_enable(void __iomem *regs, bool enable)
{
unsigned long reg;
@@ -209,7 +197,7 @@ static inline void jpeg_data_num_int_enable(void __iomem *regs, bool enable)
writel(reg, regs + S5P_JPGINTSE);
}
-static inline void jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
+void s5p_jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
{
unsigned long reg;
@@ -220,7 +208,7 @@ static inline void jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
writel(reg, regs + S5P_JPGINTSE);
}
-static inline void jpeg_timer_enable(void __iomem *regs, unsigned long val)
+void s5p_jpeg_timer_enable(void __iomem *regs, unsigned long val)
{
unsigned long reg;
@@ -231,7 +219,7 @@ static inline void jpeg_timer_enable(void __iomem *regs, unsigned long val)
writel(reg, regs + S5P_JPG_TIMER_SE);
}
-static inline void jpeg_timer_disable(void __iomem *regs)
+void s5p_jpeg_timer_disable(void __iomem *regs)
{
unsigned long reg;
@@ -240,13 +228,13 @@ static inline void jpeg_timer_disable(void __iomem *regs)
writel(reg, regs + S5P_JPG_TIMER_SE);
}
-static inline int jpeg_timer_stat(void __iomem *regs)
+int s5p_jpeg_timer_stat(void __iomem *regs)
{
return (int)((readl(regs + S5P_JPG_TIMER_ST) & S5P_TIMER_INT_STAT_MASK)
>> S5P_TIMER_INT_STAT_SHIFT);
}
-static inline void jpeg_clear_timer_stat(void __iomem *regs)
+void s5p_jpeg_clear_timer_stat(void __iomem *regs)
{
unsigned long reg;
@@ -255,7 +243,7 @@ static inline void jpeg_clear_timer_stat(void __iomem *regs)
writel(reg, regs + S5P_JPG_TIMER_SE);
}
-static inline void jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
+void s5p_jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
{
unsigned long reg;
@@ -266,13 +254,13 @@ static inline void jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
}
-static inline int jpeg_enc_stream_stat(void __iomem *regs)
+int s5p_jpeg_enc_stream_stat(void __iomem *regs)
{
return (int)(readl(regs + S5P_JPG_ENC_STREAM_INTST) &
S5P_ENC_STREAM_INT_STAT_MASK);
}
-static inline void jpeg_clear_enc_stream_stat(void __iomem *regs)
+void s5p_jpeg_clear_enc_stream_stat(void __iomem *regs)
{
unsigned long reg;
@@ -281,7 +269,7 @@ static inline void jpeg_clear_enc_stream_stat(void __iomem *regs)
writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
}
-static inline void jpeg_outform_raw(void __iomem *regs, unsigned long format)
+void s5p_jpeg_outform_raw(void __iomem *regs, unsigned long format)
{
unsigned long reg, f;
@@ -296,17 +284,17 @@ static inline void jpeg_outform_raw(void __iomem *regs, unsigned long format)
writel(reg, regs + S5P_JPG_OUTFORM);
}
-static inline void jpeg_jpgadr(void __iomem *regs, unsigned long addr)
+void s5p_jpeg_jpgadr(void __iomem *regs, unsigned long addr)
{
writel(addr, regs + S5P_JPG_JPGADR);
}
-static inline void jpeg_imgadr(void __iomem *regs, unsigned long addr)
+void s5p_jpeg_imgadr(void __iomem *regs, unsigned long addr)
{
writel(addr, regs + S5P_JPG_IMGADR);
}
-static inline void jpeg_coef(void __iomem *regs, unsigned int i,
+void s5p_jpeg_coef(void __iomem *regs, unsigned int i,
unsigned int j, unsigned int coef)
{
unsigned long reg;
@@ -317,24 +305,24 @@ static inline void jpeg_coef(void __iomem *regs, unsigned int i,
writel(reg, regs + S5P_JPG_COEF(i));
}
-static inline void jpeg_start(void __iomem *regs)
+void s5p_jpeg_start(void __iomem *regs)
{
writel(1, regs + S5P_JSTART);
}
-static inline int jpeg_result_stat_ok(void __iomem *regs)
+int s5p_jpeg_result_stat_ok(void __iomem *regs)
{
return (int)((readl(regs + S5P_JPGINTST) & S5P_RESULT_STAT_MASK)
>> S5P_RESULT_STAT_SHIFT);
}
-static inline int jpeg_stream_stat_ok(void __iomem *regs)
+int s5p_jpeg_stream_stat_ok(void __iomem *regs)
{
return !(int)((readl(regs + S5P_JPGINTST) & S5P_STREAM_STAT_MASK)
>> S5P_STREAM_STAT_SHIFT);
}
-static inline void jpeg_clear_int(void __iomem *regs)
+void s5p_jpeg_clear_int(void __iomem *regs)
{
unsigned long reg;
@@ -343,7 +331,7 @@ static inline void jpeg_clear_int(void __iomem *regs)
reg = readl(regs + S5P_JPGOPR);
}
-static inline unsigned int jpeg_compressed_size(void __iomem *regs)
+unsigned int s5p_jpeg_compressed_size(void __iomem *regs)
{
unsigned long jpeg_size = 0;
@@ -353,5 +341,3 @@ static inline unsigned int jpeg_compressed_size(void __iomem *regs)
return (unsigned int)jpeg_size;
}
-
-#endif /* JPEG_HW_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
new file mode 100644
index 000000000000..c11ebe86b9c9
--- /dev/null
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h
@@ -0,0 +1,63 @@
+/* linux/drivers/media/platform/s5p-jpeg/jpeg-hw.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef JPEG_HW_S5P_H_
+#define JPEG_HW_S5P_H_
+
+#include <linux/io.h>
+#include <linux/videodev2.h>
+
+#include "jpeg-regs.h"
+
+#define S5P_JPEG_MIN_WIDTH 32
+#define S5P_JPEG_MIN_HEIGHT 32
+#define S5P_JPEG_MAX_WIDTH 8192
+#define S5P_JPEG_MAX_HEIGHT 8192
+#define S5P_JPEG_RAW_IN_565 0
+#define S5P_JPEG_RAW_IN_422 1
+#define S5P_JPEG_RAW_OUT_422 0
+#define S5P_JPEG_RAW_OUT_420 1
+
+void s5p_jpeg_reset(void __iomem *regs);
+void s5p_jpeg_poweron(void __iomem *regs);
+void s5p_jpeg_input_raw_mode(void __iomem *regs, unsigned long mode);
+void s5p_jpeg_input_raw_y16(void __iomem *regs, bool y16);
+void s5p_jpeg_proc_mode(void __iomem *regs, unsigned long mode);
+void s5p_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode);
+unsigned int s5p_jpeg_get_subsampling_mode(void __iomem *regs);
+void s5p_jpeg_dri(void __iomem *regs, unsigned int dri);
+void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n);
+void s5p_jpeg_htbl_ac(void __iomem *regs, unsigned int t);
+void s5p_jpeg_htbl_dc(void __iomem *regs, unsigned int t);
+void s5p_jpeg_y(void __iomem *regs, unsigned int y);
+void s5p_jpeg_x(void __iomem *regs, unsigned int x);
+void s5p_jpeg_rst_int_enable(void __iomem *regs, bool enable);
+void s5p_jpeg_data_num_int_enable(void __iomem *regs, bool enable);
+void s5p_jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl);
+void s5p_jpeg_timer_enable(void __iomem *regs, unsigned long val);
+void s5p_jpeg_timer_disable(void __iomem *regs);
+int s5p_jpeg_timer_stat(void __iomem *regs);
+void s5p_jpeg_clear_timer_stat(void __iomem *regs);
+void s5p_jpeg_enc_stream_int(void __iomem *regs, unsigned long size);
+int s5p_jpeg_enc_stream_stat(void __iomem *regs);
+void s5p_jpeg_clear_enc_stream_stat(void __iomem *regs);
+void s5p_jpeg_outform_raw(void __iomem *regs, unsigned long format);
+void s5p_jpeg_jpgadr(void __iomem *regs, unsigned long addr);
+void s5p_jpeg_imgadr(void __iomem *regs, unsigned long addr);
+void s5p_jpeg_coef(void __iomem *regs, unsigned int i,
+ unsigned int j, unsigned int coef);
+void s5p_jpeg_start(void __iomem *regs);
+int s5p_jpeg_result_stat_ok(void __iomem *regs);
+int s5p_jpeg_stream_stat_ok(void __iomem *regs);
+void s5p_jpeg_clear_int(void __iomem *regs);
+unsigned int s5p_jpeg_compressed_size(void __iomem *regs);
+
+#endif /* JPEG_HW_S5P_H_ */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
index 38e50815668c..33f2c7374cfd 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
@@ -2,10 +2,11 @@
*
* Register definition file for Samsung JPEG codec driver
*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2011-2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -15,6 +16,8 @@
#ifndef JPEG_REGS_H_
#define JPEG_REGS_H_
+/* Register and bit definitions for S5PC210 */
+
/* JPEG mode register */
#define S5P_JPGMOD 0x00
#define S5P_PROC_MODE_MASK (0x1 << 3)
@@ -166,5 +169,209 @@
/* JPEG AC Huffman table register */
#define S5P_JPG_HACTBLG(n) (0x8c0 + (n) * 0x400)
+
+/* Register and bit definitions for Exynos 4x12 */
+
+/* JPEG Codec Control Registers */
+#define EXYNOS4_JPEG_CNTL_REG 0x00
+#define EXYNOS4_INT_EN_REG 0x04
+#define EXYNOS4_INT_TIMER_COUNT_REG 0x08
+#define EXYNOS4_INT_STATUS_REG 0x0c
+#define EXYNOS4_OUT_MEM_BASE_REG 0x10
+#define EXYNOS4_JPEG_IMG_SIZE_REG 0x14
+#define EXYNOS4_IMG_BA_PLANE_1_REG 0x18
+#define EXYNOS4_IMG_SO_PLANE_1_REG 0x1c
+#define EXYNOS4_IMG_PO_PLANE_1_REG 0x20
+#define EXYNOS4_IMG_BA_PLANE_2_REG 0x24
+#define EXYNOS4_IMG_SO_PLANE_2_REG 0x28
+#define EXYNOS4_IMG_PO_PLANE_2_REG 0x2c
+#define EXYNOS4_IMG_BA_PLANE_3_REG 0x30
+#define EXYNOS4_IMG_SO_PLANE_3_REG 0x34
+#define EXYNOS4_IMG_PO_PLANE_3_REG 0x38
+
+#define EXYNOS4_TBL_SEL_REG 0x3c
+
+#define EXYNOS4_IMG_FMT_REG 0x40
+
+#define EXYNOS4_BITSTREAM_SIZE_REG 0x44
+#define EXYNOS4_PADDING_REG 0x48
+#define EXYNOS4_HUFF_CNT_REG 0x4c
+#define EXYNOS4_FIFO_STATUS_REG 0x50
+#define EXYNOS4_DECODE_XY_SIZE_REG 0x54
+#define EXYNOS4_DECODE_IMG_FMT_REG 0x58
+
+#define EXYNOS4_QUAN_TBL_ENTRY_REG 0x100
+#define EXYNOS4_HUFF_TBL_ENTRY_REG 0x200
+
+
+/****************************************************************/
+/* Bit definition part */
+/****************************************************************/
+
+/* JPEG CNTL Register bit */
+#define EXYNOS4_ENC_DEC_MODE_MASK (0xfffffffc << 0)
+#define EXYNOS4_DEC_MODE (1 << 0)
+#define EXYNOS4_ENC_MODE (1 << 1)
+#define EXYNOS4_AUTO_RST_MARKER (1 << 2)
+#define EXYNOS4_RST_INTERVAL_SHIFT 3
+#define EXYNOS4_RST_INTERVAL(x) (((x) & 0xffff) \
+ << EXYNOS4_RST_INTERVAL_SHIFT)
+#define EXYNOS4_HUF_TBL_EN (1 << 19)
+#define EXYNOS4_HOR_SCALING_SHIFT 20
+#define EXYNOS4_HOR_SCALING_MASK (3 << EXYNOS4_HOR_SCALING_SHIFT)
+#define EXYNOS4_HOR_SCALING(x) (((x) & 0x3) \
+ << EXYNOS4_HOR_SCALING_SHIFT)
+#define EXYNOS4_VER_SCALING_SHIFT 22
+#define EXYNOS4_VER_SCALING_MASK (3 << EXYNOS4_VER_SCALING_SHIFT)
+#define EXYNOS4_VER_SCALING(x) (((x) & 0x3) \
+ << EXYNOS4_VER_SCALING_SHIFT)
+#define EXYNOS4_PADDING (1 << 27)
+#define EXYNOS4_SYS_INT_EN (1 << 28)
+#define EXYNOS4_SOFT_RESET_HI (1 << 29)
+
+/* JPEG INT Register bit */
+#define EXYNOS4_INT_EN_MASK (0x1f << 0)
+#define EXYNOS4_PROT_ERR_INT_EN (1 << 0)
+#define EXYNOS4_IMG_COMPLETION_INT_EN (1 << 1)
+#define EXYNOS4_DEC_INVALID_FORMAT_EN (1 << 2)
+#define EXYNOS4_MULTI_SCAN_ERROR_EN (1 << 3)
+#define EXYNOS4_FRAME_ERR_EN (1 << 4)
+#define EXYNOS4_INT_EN_ALL (0x1f << 0)
+
+#define EXYNOS4_MOD_REG_PROC_ENC (0 << 3)
+#define EXYNOS4_MOD_REG_PROC_DEC (1 << 3)
+
+#define EXYNOS4_MOD_REG_SUBSAMPLE_444 (0 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_422 (1 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_420 (2 << 0)
+#define EXYNOS4_MOD_REG_SUBSAMPLE_GRAY (3 << 0)
+
+
+/* JPEG IMAGE SIZE Register bit */
+#define EXYNOS4_X_SIZE_SHIFT 0
+#define EXYNOS4_X_SIZE_MASK (0xffff << EXYNOS4_X_SIZE_SHIFT)
+#define EXYNOS4_X_SIZE(x) (((x) & 0xffff) << EXYNOS4_X_SIZE_SHIFT)
+#define EXYNOS4_Y_SIZE_SHIFT 16
+#define EXYNOS4_Y_SIZE_MASK (0xffff << EXYNOS4_Y_SIZE_SHIFT)
+#define EXYNOS4_Y_SIZE(x) (((x) & 0xffff) << EXYNOS4_Y_SIZE_SHIFT)
+
+/* JPEG IMAGE FORMAT Register bit */
+#define EXYNOS4_ENC_IN_FMT_MASK 0xffff0000
+#define EXYNOS4_ENC_GRAY_IMG (0 << 0)
+#define EXYNOS4_ENC_RGB_IMG (1 << 0)
+#define EXYNOS4_ENC_YUV_444_IMG (2 << 0)
+#define EXYNOS4_ENC_YUV_422_IMG (3 << 0)
+#define EXYNOS4_ENC_YUV_440_IMG (4 << 0)
+
+#define EXYNOS4_DEC_GRAY_IMG (0 << 0)
+#define EXYNOS4_DEC_RGB_IMG (1 << 0)
+#define EXYNOS4_DEC_YUV_444_IMG (2 << 0)
+#define EXYNOS4_DEC_YUV_422_IMG (3 << 0)
+#define EXYNOS4_DEC_YUV_420_IMG (4 << 0)
+
+#define EXYNOS4_GRAY_IMG_IP_SHIFT 3
+#define EXYNOS4_GRAY_IMG_IP_MASK (7 << EXYNOS4_GRAY_IMG_IP_SHIFT)
+#define EXYNOS4_GRAY_IMG_IP (4 << EXYNOS4_GRAY_IMG_IP_SHIFT)
+
+#define EXYNOS4_RGB_IP_SHIFT 6
+#define EXYNOS4_RGB_IP_MASK (7 << EXYNOS4_RGB_IP_SHIFT)
+#define EXYNOS4_RGB_IP_RGB_16BIT_IMG (4 << EXYNOS4_RGB_IP_SHIFT)
+#define EXYNOS4_RGB_IP_RGB_32BIT_IMG (5 << EXYNOS4_RGB_IP_SHIFT)
+
+#define EXYNOS4_YUV_444_IP_SHIFT 9
+#define EXYNOS4_YUV_444_IP_MASK (7 << EXYNOS4_YUV_444_IP_SHIFT)
+#define EXYNOS4_YUV_444_IP_YUV_444_2P_IMG (4 << EXYNOS4_YUV_444_IP_SHIFT)
+#define EXYNOS4_YUV_444_IP_YUV_444_3P_IMG (5 << EXYNOS4_YUV_444_IP_SHIFT)
+
+#define EXYNOS4_YUV_422_IP_SHIFT 12
+#define EXYNOS4_YUV_422_IP_MASK (7 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_1P_IMG (4 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_2P_IMG (5 << EXYNOS4_YUV_422_IP_SHIFT)
+#define EXYNOS4_YUV_422_IP_YUV_422_3P_IMG (6 << EXYNOS4_YUV_422_IP_SHIFT)
+
+#define EXYNOS4_YUV_420_IP_SHIFT 15
+#define EXYNOS4_YUV_420_IP_MASK (7 << EXYNOS4_YUV_420_IP_SHIFT)
+#define EXYNOS4_YUV_420_IP_YUV_420_2P_IMG (4 << EXYNOS4_YUV_420_IP_SHIFT)
+#define EXYNOS4_YUV_420_IP_YUV_420_3P_IMG (5 << EXYNOS4_YUV_420_IP_SHIFT)
+
+#define EXYNOS4_ENC_FMT_SHIFT 24
+#define EXYNOS4_ENC_FMT_MASK (3 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_GRAY (0 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_444 (1 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_422 (2 << EXYNOS4_ENC_FMT_SHIFT)
+#define EXYNOS4_ENC_FMT_YUV_420 (3 << EXYNOS4_ENC_FMT_SHIFT)
+
+#define EXYNOS4_JPEG_DECODED_IMG_FMT_MASK 0x03
+
+#define EXYNOS4_SWAP_CHROMA_CRCB (1 << 26)
+#define EXYNOS4_SWAP_CHROMA_CBCR (0 << 26)
+
+/* JPEG HUFF count Register bit */
+#define EXYNOS4_HUFF_COUNT_MASK 0xffff
+
+/* JPEG Decoded_img_x_y_size Register bit */
+#define EXYNOS4_DECODED_SIZE_MASK 0x0000ffff
+
+/* JPEG Decoded image format Register bit */
+#define EXYNOS4_DECODED_IMG_FMT_MASK 0x3
+
+/* JPEG TBL SEL Register bit */
+#define EXYNOS4_Q_TBL_COMP1_0 (0 << 0)
+#define EXYNOS4_Q_TBL_COMP1_1 (1 << 0)
+#define EXYNOS4_Q_TBL_COMP1_2 (2 << 0)
+#define EXYNOS4_Q_TBL_COMP1_3 (3 << 0)
+
+#define EXYNOS4_Q_TBL_COMP2_0 (0 << 2)
+#define EXYNOS4_Q_TBL_COMP2_1 (1 << 2)
+#define EXYNOS4_Q_TBL_COMP2_2 (2 << 2)
+#define EXYNOS4_Q_TBL_COMP2_3 (3 << 2)
+
+#define EXYNOS4_Q_TBL_COMP3_0 (0 << 4)
+#define EXYNOS4_Q_TBL_COMP3_1 (1 << 4)
+#define EXYNOS4_Q_TBL_COMP3_2 (2 << 4)
+#define EXYNOS4_Q_TBL_COMP3_3 (3 << 4)
+
+#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_0 (0 << 6)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_1 (1 << 6)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_0 (2 << 6)
+#define EXYNOS4_HUFF_TBL_COMP1_AC_1_DC_1 (3 << 6)
+
+#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_0 (0 << 8)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_1 (1 << 8)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_0 (2 << 8)
+#define EXYNOS4_HUFF_TBL_COMP2_AC_1_DC_1 (3 << 8)
+
+#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_0 (0 << 10)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_0_DC_1 (1 << 10)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_0 (2 << 10)
+#define EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_1 (3 << 10)
+
+/* JPEG quantizer table register */
+#define EXYNOS4_QTBL_CONTENT(n) (0x100 + (n) * 0x40)
+
+/* JPEG DC luminance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCLL 0x200
+
+/* JPEG DC luminance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCLV 0x210
+
+/* JPEG DC chrominance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCCL 0x220
+
+/* JPEG DC chrominance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HDCCV 0x230
+
+/* JPEG AC luminance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACLL 0x240
+
+/* JPEG AC luminance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACLV 0x250
+
+/* JPEG AC chrominance (code length) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACCL 0x300
+
+/* JPEG AC chrominance (values) Huffman table register */
+#define EXYNOS4_HUFF_TBL_HACCV 0x310
+
#endif /* JPEG_REGS_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index e46067a57853..e2aac592d29f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -177,21 +177,6 @@ unlock:
mutex_unlock(&dev->mfc_mutex);
}
-static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (!vdev) {
- mfc_err("failed to get video_device");
- return MFCNODE_INVALID;
- }
- if (vdev->index == 0)
- return MFCNODE_DECODER;
- else if (vdev->index == 1)
- return MFCNODE_ENCODER;
- return MFCNODE_INVALID;
-}
-
static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
{
mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
@@ -705,6 +690,7 @@ irq_cleanup_hw:
/* Open an MFC node */
static int s5p_mfc_open(struct file *file)
{
+ struct video_device *vdev = video_devdata(file);
struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_ctx *ctx = NULL;
struct vb2_queue *q;
@@ -742,7 +728,7 @@ static int s5p_mfc_open(struct file *file)
/* Mark context as idle */
clear_work_bit_irqsave(ctx);
dev->ctx[ctx->num] = ctx;
- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ if (vdev == dev->vfd_dec) {
ctx->type = MFCINST_DECODER;
ctx->c_ops = get_dec_codec_ops();
s5p_mfc_dec_init(ctx);
@@ -752,7 +738,7 @@ static int s5p_mfc_open(struct file *file)
mfc_err("Failed to setup mfc controls\n");
goto err_ctrls_setup;
}
- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ } else if (vdev == dev->vfd_enc) {
ctx->type = MFCINST_ENCODER;
ctx->c_ops = get_enc_codec_ops();
/* only for encoder */
@@ -797,10 +783,10 @@ static int s5p_mfc_open(struct file *file)
q = &ctx->vq_dst;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
q->drv_priv = &ctx->fh;
- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
q->ops = get_dec_queue_ops();
- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ } else if (vdev == dev->vfd_enc) {
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->ops = get_enc_queue_ops();
} else {
@@ -819,10 +805,10 @@ static int s5p_mfc_open(struct file *file)
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
q->io_modes = VB2_MMAP;
q->drv_priv = &ctx->fh;
- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
q->ops = get_dec_queue_ops();
- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ } else if (vdev == dev->vfd_enc) {
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->ops = get_enc_queue_ops();
} else {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 6920b546181a..f723f1f2f578 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -115,15 +115,6 @@ enum s5p_mfc_fmt_type {
};
/**
- * enum s5p_mfc_node_type - The type of an MFC device node.
- */
-enum s5p_mfc_node_type {
- MFCNODE_INVALID = -1,
- MFCNODE_DECODER = 0,
- MFCNODE_ENCODER = 1,
-};
-
-/**
* enum s5p_mfc_inst_type - The type of an MFC instance.
*/
enum s5p_mfc_inst_type {
@@ -422,6 +413,11 @@ struct s5p_mfc_vp8_enc_params {
enum v4l2_vp8_golden_frame_sel golden_frame_sel;
u8 hier_layer;
u8 hier_layer_qp[3];
+ u8 rc_min_qp;
+ u8 rc_max_qp;
+ u8 rc_frame_qp;
+ u8 rc_p_frame_qp;
+ u8 profile;
};
/**
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 4ff3b6cd6842..91b6e020ddf3 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -618,6 +618,46 @@ static struct mfc_control controls[] = {
.default_value = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV,
.menu_skip_mask = 0,
},
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 127,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 11,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 10,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 10,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_PROFILE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 3,
+ .step = 1,
+ .default_value = 0,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(controls)
@@ -1557,6 +1597,21 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
p->codec.vp8.golden_frame_sel = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP:
+ p->codec.vp8.rc_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP:
+ p->codec.vp8.rc_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP:
+ p->codec.vp8.rc_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP:
+ p->codec.vp8.rc_p_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ p->codec.vp8.profile = ctrl->val;
+ break;
default:
v4l2_err(&dev->v4l2_dev, "Invalid control, id=%d, val=%d\n",
ctrl->id, ctrl->val);
@@ -1863,7 +1918,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
if (ctx->src_bufs_cnt < ctx->pb_count) {
mfc_err("Need minimum %d OUTPUT buffers\n",
ctx->pb_count);
- return -EINVAL;
+ return -ENOBUFS;
}
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 461358c4a790..f6ff2dbf3a1d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1197,10 +1197,8 @@ static int s5p_mfc_set_enc_params_vp8(struct s5p_mfc_ctx *ctx)
reg |= ((p->num_b_frame & 0x3) << 16);
WRITEL(reg, S5P_FIMV_E_GOP_CONFIG_V6);
- /* profile & level */
- reg = 0;
- /** profile */
- reg |= (0x1 << 4);
+ /* profile - 0 ~ 3 */
+ reg = p_vp8->profile & 0x3;
WRITEL(reg, S5P_FIMV_E_PICTURE_PROFILE_V6);
/* rate control config. */
@@ -1218,6 +1216,26 @@ static int s5p_mfc_set_enc_params_vp8(struct s5p_mfc_ctx *ctx)
WRITEL(reg, S5P_FIMV_E_RC_FRAME_RATE_V6);
}
+ /* frame QP */
+ reg &= ~(0x7F);
+ reg |= p_vp8->rc_frame_qp & 0x7F;
+ WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
+
+ /* other QPs */
+ WRITEL(0x0, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
+ if (!p->rc_frame && !p->rc_mb) {
+ reg = 0;
+ reg |= ((p_vp8->rc_p_frame_qp & 0x7F) << 8);
+ reg |= p_vp8->rc_frame_qp & 0x7F;
+ WRITEL(reg, S5P_FIMV_E_FIXED_PICTURE_QP_V6);
+ }
+
+ /* max QP */
+ reg = ((p_vp8->rc_max_qp & 0x7F) << 8);
+ /* min QP */
+ reg |= p_vp8->rc_min_qp & 0x7F;
+ WRITEL(reg, S5P_FIMV_E_RC_QP_BOUND_V6);
+
/* vbv buffer size */
if (p->frame_skip_mode ==
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index 51805a5e2beb..bc08b5f28e44 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -347,19 +347,41 @@ static int mxr_runtime_resume(struct device *dev)
{
struct mxr_device *mdev = to_mdev(dev);
struct mxr_resources *res = &mdev->res;
+ int ret;
mxr_dbg(mdev, "resume - start\n");
mutex_lock(&mdev->mutex);
/* turn clocks on */
- clk_enable(res->mixer);
- clk_enable(res->vp);
- clk_enable(res->sclk_mixer);
+ ret = clk_prepare_enable(res->mixer);
+ if (ret < 0) {
+ dev_err(mdev->dev, "clk_prepare_enable(mixer) failed\n");
+ goto fail;
+ }
+ ret = clk_prepare_enable(res->vp);
+ if (ret < 0) {
+ dev_err(mdev->dev, "clk_prepare_enable(vp) failed\n");
+ goto fail_mixer;
+ }
+ ret = clk_prepare_enable(res->sclk_mixer);
+ if (ret < 0) {
+ dev_err(mdev->dev, "clk_prepare_enable(sclk_mixer) failed\n");
+ goto fail_vp;
+ }
/* apply default configuration */
mxr_reg_reset(mdev);
mxr_dbg(mdev, "resume - finished\n");
mutex_unlock(&mdev->mutex);
return 0;
+
+fail_vp:
+ clk_disable_unprepare(res->vp);
+fail_mixer:
+ clk_disable_unprepare(res->mixer);
+fail:
+ mutex_unlock(&mdev->mutex);
+ dev_err(mdev->dev, "resume failed\n");
+ return ret;
}
static int mxr_runtime_suspend(struct device *dev)
@@ -369,9 +391,9 @@ static int mxr_runtime_suspend(struct device *dev)
mxr_dbg(mdev, "suspend - start\n");
mutex_lock(&mdev->mutex);
/* turn clocks off */
- clk_disable(res->sclk_mixer);
- clk_disable(res->vp);
- clk_disable(res->mixer);
+ clk_disable_unprepare(res->sclk_mixer);
+ clk_disable_unprepare(res->vp);
+ clk_disable_unprepare(res->mixer);
mutex_unlock(&mdev->mutex);
mxr_dbg(mdev, "suspend - finished\n");
return 0;
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 81b97db111d8..c5059ba0d733 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -948,7 +948,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
if (count == 0) {
mxr_dbg(mdev, "no output buffers queued\n");
- return -EINVAL;
+ return -ENOBUFS;
}
/* block any changes in output configuration */
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
index 0afa90f0f6ab..5a7c3796f22e 100644
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ b/drivers/media/platform/s5p-tv/sdo_drv.c
@@ -55,6 +55,8 @@ struct sdo_device {
struct clk *dacphy;
/** clock for control of VPLL */
struct clk *fout_vpll;
+ /** vpll rate before sdo stream was on */
+ unsigned long vpll_rate;
/** regulator for SDO IP power */
struct regulator *vdac;
/** regulator for SDO plug detection */
@@ -193,17 +195,33 @@ static int sdo_s_power(struct v4l2_subdev *sd, int on)
static int sdo_streamon(struct sdo_device *sdev)
{
+ int ret;
+
/* set proper clock for Timing Generator */
- clk_set_rate(sdev->fout_vpll, 54000000);
+ sdev->vpll_rate = clk_get_rate(sdev->fout_vpll);
+ ret = clk_set_rate(sdev->fout_vpll, 54000000);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to set vpll rate\n");
+ return ret;
+ }
dev_info(sdev->dev, "fout_vpll.rate = %lu\n",
clk_get_rate(sdev->fout_vpll));
/* enable clock in SDO */
sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_CLOCK_ON);
- clk_enable(sdev->dacphy);
+ ret = clk_prepare_enable(sdev->dacphy);
+ if (ret < 0) {
+ dev_err(sdev->dev, "clk_prepare_enable(dacphy) failed\n");
+ goto fail;
+ }
/* enable DAC */
sdo_write_mask(sdev, SDO_DAC, ~0, SDO_POWER_ON_DAC);
sdo_reg_debug(sdev);
return 0;
+
+fail:
+ sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
+ clk_set_rate(sdev->fout_vpll, sdev->vpll_rate);
+ return ret;
}
static int sdo_streamoff(struct sdo_device *sdev)
@@ -211,7 +229,7 @@ static int sdo_streamoff(struct sdo_device *sdev)
int tries;
sdo_write_mask(sdev, SDO_DAC, 0, SDO_POWER_ON_DAC);
- clk_disable(sdev->dacphy);
+ clk_disable_unprepare(sdev->dacphy);
sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
for (tries = 100; tries; --tries) {
if (sdo_read(sdev, SDO_CLKCON) & SDO_TVOUT_CLOCK_READY)
@@ -220,6 +238,7 @@ static int sdo_streamoff(struct sdo_device *sdev)
}
if (tries == 0)
dev_err(sdev->dev, "failed to stop streaming\n");
+ clk_set_rate(sdev->fout_vpll, sdev->vpll_rate);
return tries ? 0 : -EIO;
}
@@ -254,7 +273,7 @@ static int sdo_runtime_suspend(struct device *dev)
dev_info(dev, "suspend\n");
regulator_disable(sdev->vdet);
regulator_disable(sdev->vdac);
- clk_disable(sdev->sclk_dac);
+ clk_disable_unprepare(sdev->sclk_dac);
return 0;
}
@@ -266,7 +285,7 @@ static int sdo_runtime_resume(struct device *dev)
dev_info(dev, "resume\n");
- ret = clk_enable(sdev->sclk_dac);
+ ret = clk_prepare_enable(sdev->sclk_dac);
if (ret < 0)
return ret;
@@ -299,7 +318,7 @@ static int sdo_runtime_resume(struct device *dev)
vdac_r_dis:
regulator_disable(sdev->vdac);
dac_clk_dis:
- clk_disable(sdev->sclk_dac);
+ clk_disable_unprepare(sdev->sclk_dac);
return ret;
}
@@ -405,7 +424,11 @@ static int sdo_probe(struct platform_device *pdev)
}
/* enable gate for dac clock, because mixer uses it */
- clk_enable(sdev->dac);
+ ret = clk_prepare_enable(sdev->dac);
+ if (ret < 0) {
+ dev_err(dev, "clk_prepare_enable(dac) failed\n");
+ goto fail_fout_vpll;
+ }
/* configure power management */
pm_runtime_enable(dev);
@@ -444,7 +467,7 @@ static int sdo_remove(struct platform_device *pdev)
struct sdo_device *sdev = sd_to_sdev(sd);
pm_runtime_disable(&pdev->dev);
- clk_disable(sdev->dac);
+ clk_disable_unprepare(sdev->dac);
clk_put(sdev->fout_vpll);
clk_put(sdev->dacphy);
clk_put(sdev->dac);
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 4f30341dc2ab..e5f1d4c14f2c 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -286,7 +286,7 @@ static int sh_vou_buf_prepare(struct videobuf_queue *vq,
vb->size = vb->height * bytes_per_line;
if (vb->baddr && vb->bsize < vb->size) {
/* User buffer too small */
- dev_warn(vq->dev, "User buffer too small: [%u] @ %lx\n",
+ dev_warn(vq->dev, "User buffer too small: [%zu] @ %lx\n",
vb->bsize, vb->baddr);
return -EINVAL;
}
@@ -302,9 +302,10 @@ static int sh_vou_buf_prepare(struct videobuf_queue *vq,
}
dev_dbg(vou_dev->v4l2_dev.dev,
- "%s(): fmt #%d, %u bytes per line, phys 0x%x, type %d, state %d\n",
+ "%s(): fmt #%d, %u bytes per line, phys %pad, type %d, state %d\n",
__func__, vou_dev->pix_idx, bytes_per_line,
- videobuf_to_dma_contig(vb), vb->memory, vb->state);
+ ({ dma_addr_t addr = videobuf_to_dma_contig(vb); &addr; }),
+ vb->memory, vb->state);
return 0;
}
@@ -442,7 +443,7 @@ static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev,
int pix_idx, int w_idx, int h_idx)
{
struct sh_vou_fmt *fmt = vou_fmt + pix_idx;
- unsigned int black_left, black_top, width_max, height_max,
+ unsigned int black_left, black_top, width_max,
frame_in_height, frame_out_height, frame_out_top;
struct v4l2_rect *rect = &vou_dev->rect;
struct v4l2_pix_format *pix = &vou_dev->pix;
@@ -450,10 +451,10 @@ static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev,
if (vou_dev->std & V4L2_STD_525_60) {
width_max = 858;
- height_max = 262;
+ /* height_max = 262; */
} else {
width_max = 864;
- height_max = 312;
+ /* height_max = 312; */
}
frame_in_height = pix->height / 2;
@@ -1052,7 +1053,6 @@ static irqreturn_t sh_vou_isr(int irq, void *dev_id)
static unsigned long j;
struct videobuf_buffer *vb;
static int cnt;
- static int side;
u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked;
u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR);
@@ -1080,7 +1080,7 @@ static irqreturn_t sh_vou_isr(int irq, void *dev_id)
irq_status, masked, vou_status, cnt);
cnt++;
- side = vou_status & 0x10000;
+ /* side = vou_status & 0x10000; */
/* Clear only set interrupts */
sh_vou_reg_a_write(vou_dev, VOUIR, masked);
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 104485632501..4835173d7f80 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -34,13 +34,6 @@
#define MIN_FRAME_RATE 15
#define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE)
-/* ISI states */
-enum {
- ISI_STATE_IDLE = 0,
- ISI_STATE_READY,
- ISI_STATE_WAIT_SOF,
-};
-
/* Frame buffer descriptor */
struct fbd {
/* Physical address of the frame buffer */
@@ -75,11 +68,6 @@ struct atmel_isi {
void __iomem *regs;
int sequence;
- /* State of the ISI module in capturing mode */
- int state;
-
- /* Wait queue for waiting for SOF */
- wait_queue_head_t vsync_wq;
struct vb2_alloc_ctx *alloc_ctx;
@@ -124,16 +112,16 @@ static int configure_geometry(struct atmel_isi *isi, u32 width,
case V4L2_MBUS_FMT_Y8_1X8:
cr = ISI_CFG2_GRAYSCALE;
break;
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_VYUY8_2X8:
cr = ISI_CFG2_YCC_SWAP_MODE_3;
break;
- case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
cr = ISI_CFG2_YCC_SWAP_MODE_2;
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
cr = ISI_CFG2_YCC_SWAP_MODE_1;
break;
- case V4L2_MBUS_FMT_YVYU8_2X8:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
cr = ISI_CFG2_YCC_SWAP_DEFAULT;
break;
/* RGB, TODO */
@@ -144,6 +132,8 @@ static int configure_geometry(struct atmel_isi *isi, u32 width,
isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
cfg2 = isi_readl(isi, ISI_CFG2);
+ /* Set YCC swap mode */
+ cfg2 &= ~ISI_CFG2_YCC_SWAP_MODE_MASK;
cfg2 |= cr;
/* Set width */
cfg2 &= ~(ISI_CFG2_IM_HSIZE_MASK);
@@ -207,12 +197,6 @@ static irqreturn_t isi_interrupt(int irq, void *dev_id)
isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS);
ret = IRQ_HANDLED;
} else {
- if ((pending & ISI_SR_VSYNC) &&
- (isi->state == ISI_STATE_IDLE)) {
- isi->state = ISI_STATE_READY;
- wake_up_interruptible(&isi->vsync_wq);
- ret = IRQ_HANDLED;
- }
if (likely(pending & ISI_SR_CXFR_DONE))
ret = atmel_isi_handle_streaming(isi);
}
@@ -259,16 +243,6 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
unsigned long size;
- int ret;
-
- /* Reset ISI */
- ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
- if (ret < 0) {
- dev_err(icd->parent, "Reset ISI timed out\n");
- return ret;
- }
- /* Disable all interrupts */
- isi_writel(isi, ISI_INTDIS, ~0UL);
size = icd->sizeimage;
@@ -374,6 +348,7 @@ static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+ cfg1 &= ~ISI_CFG1_FRATE_DIV_MASK;
/* Enable linked list */
cfg1 |= isi->pdata->frate | ISI_CFG1_DISCR;
@@ -407,43 +382,27 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
-
u32 sr = 0;
int ret;
- spin_lock_irq(&isi->lock);
- isi->state = ISI_STATE_IDLE;
- /* Clear any pending SOF interrupt */
- sr = isi_readl(isi, ISI_STATUS);
- /* Enable VSYNC interrupt for SOF */
- isi_writel(isi, ISI_INTEN, ISI_SR_VSYNC);
- isi_writel(isi, ISI_CTRL, ISI_CTRL_EN);
- spin_unlock_irq(&isi->lock);
-
- dev_dbg(icd->parent, "Waiting for SOF\n");
- ret = wait_event_interruptible(isi->vsync_wq,
- isi->state != ISI_STATE_IDLE);
- if (ret)
- goto err;
-
- if (isi->state != ISI_STATE_READY) {
- ret = -EIO;
- goto err;
+ /* Reset ISI */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
+ if (ret < 0) {
+ dev_err(icd->parent, "Reset ISI timed out\n");
+ return ret;
}
+ /* Disable all interrupts */
+ isi_writel(isi, ISI_INTDIS, ~0UL);
spin_lock_irq(&isi->lock);
- isi->state = ISI_STATE_WAIT_SOF;
- isi_writel(isi, ISI_INTDIS, ISI_SR_VSYNC);
+ /* Clear any pending interrupt */
+ sr = isi_readl(isi, ISI_STATUS);
+
if (count)
start_dma(isi, isi->active);
spin_unlock_irq(&isi->lock);
return 0;
-err:
- isi->active = NULL;
- isi->sequence = 0;
- INIT_LIST_HEAD(&isi->video_buffer_list);
- return ret;
}
/* abort streaming and wait for last buffer */
@@ -765,14 +724,16 @@ static int isi_camera_clock_start(struct soc_camera_host *ici)
struct atmel_isi *isi = ici->priv;
int ret;
- ret = clk_enable(isi->pclk);
+ ret = clk_prepare_enable(isi->pclk);
if (ret)
return ret;
- ret = clk_enable(isi->mck);
- if (ret) {
- clk_disable(isi->pclk);
- return ret;
+ if (!IS_ERR(isi->mck)) {
+ ret = clk_prepare_enable(isi->mck);
+ if (ret) {
+ clk_disable_unprepare(isi->pclk);
+ return ret;
+ }
}
return 0;
@@ -783,8 +744,9 @@ static void isi_camera_clock_stop(struct soc_camera_host *ici)
{
struct atmel_isi *isi = ici->priv;
- clk_disable(isi->mck);
- clk_disable(isi->pclk);
+ if (!IS_ERR(isi->mck))
+ clk_disable_unprepare(isi->mck);
+ clk_disable_unprepare(isi->pclk);
}
static unsigned int isi_camera_poll(struct file *file, poll_table *pt)
@@ -906,7 +868,6 @@ static int atmel_isi_remove(struct platform_device *pdev)
struct atmel_isi *isi = container_of(soc_host,
struct atmel_isi, soc_host);
- free_irq(isi->irq, isi);
soc_camera_host_unregister(soc_host);
vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
dma_free_coherent(&pdev->dev,
@@ -914,13 +875,6 @@ static int atmel_isi_remove(struct platform_device *pdev)
isi->p_fb_descriptors,
isi->fb_descriptors_phys);
- iounmap(isi->regs);
- clk_unprepare(isi->mck);
- clk_put(isi->mck);
- clk_unprepare(isi->pclk);
- clk_put(isi->pclk);
- kfree(isi);
-
return 0;
}
@@ -928,7 +882,6 @@ static int atmel_isi_probe(struct platform_device *pdev)
{
unsigned int irq;
struct atmel_isi *isi;
- struct clk *pclk;
struct resource *regs;
int ret, i;
struct device *dev = &pdev->dev;
@@ -936,64 +889,50 @@ static int atmel_isi_probe(struct platform_device *pdev)
struct isi_platform_data *pdata;
pdata = dev->platform_data;
- if (!pdata || !pdata->data_width_flags || !pdata->mck_hz) {
+ if (!pdata || !pdata->data_width_flags) {
dev_err(&pdev->dev,
"No config available for Atmel ISI\n");
return -EINVAL;
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs)
- return -ENXIO;
-
- pclk = clk_get(&pdev->dev, "isi_clk");
- if (IS_ERR(pclk))
- return PTR_ERR(pclk);
-
- ret = clk_prepare(pclk);
- if (ret)
- goto err_clk_prepare_pclk;
-
- isi = kzalloc(sizeof(struct atmel_isi), GFP_KERNEL);
+ isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL);
if (!isi) {
- ret = -ENOMEM;
dev_err(&pdev->dev, "Can't allocate interface!\n");
- goto err_alloc_isi;
+ return -ENOMEM;
}
- isi->pclk = pclk;
+ isi->pclk = devm_clk_get(&pdev->dev, "isi_clk");
+ if (IS_ERR(isi->pclk))
+ return PTR_ERR(isi->pclk);
+
isi->pdata = pdata;
isi->active = NULL;
spin_lock_init(&isi->lock);
- init_waitqueue_head(&isi->vsync_wq);
INIT_LIST_HEAD(&isi->video_buffer_list);
INIT_LIST_HEAD(&isi->dma_desc_head);
- /* Get ISI_MCK, provided by programmable clock or external clock */
- isi->mck = clk_get(dev, "isi_mck");
- if (IS_ERR(isi->mck)) {
- dev_err(dev, "Failed to get isi_mck\n");
- ret = PTR_ERR(isi->mck);
- goto err_clk_get;
+ /* ISI_MCK is the sensor master clock. It should be handled by the
+ * sensor driver directly, as the ISI has no use for that clock. Make
+ * the clock optional here while platforms transition to the correct
+ * model.
+ */
+ isi->mck = devm_clk_get(dev, "isi_mck");
+ if (!IS_ERR(isi->mck)) {
+ /* Set ISI_MCK's frequency, it should be faster than pixel
+ * clock.
+ */
+ ret = clk_set_rate(isi->mck, pdata->mck_hz);
+ if (ret < 0)
+ return ret;
}
- ret = clk_prepare(isi->mck);
- if (ret)
- goto err_clk_prepare_mck;
-
- /* Set ISI_MCK's frequency, it should be faster than pixel clock */
- ret = clk_set_rate(isi->mck, pdata->mck_hz);
- if (ret < 0)
- goto err_set_mck_rate;
-
isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
sizeof(struct fbd) * MAX_BUFFER_NUM,
&isi->fb_descriptors_phys,
GFP_KERNEL);
if (!isi->p_fb_descriptors) {
- ret = -ENOMEM;
dev_err(&pdev->dev, "Can't allocate descriptors!\n");
- goto err_alloc_descriptors;
+ return -ENOMEM;
}
for (i = 0; i < MAX_BUFFER_NUM; i++) {
@@ -1009,9 +948,10 @@ static int atmel_isi_probe(struct platform_device *pdev)
goto err_alloc_ctx;
}
- isi->regs = ioremap(regs->start, resource_size(regs));
- if (!isi->regs) {
- ret = -ENOMEM;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ isi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(isi->regs)) {
+ ret = PTR_ERR(isi->regs);
goto err_ioremap;
}
@@ -1028,7 +968,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
goto err_req_irq;
}
- ret = request_irq(irq, isi_interrupt, 0, "isi", isi);
+ ret = devm_request_irq(&pdev->dev, irq, isi_interrupt, 0, "isi", isi);
if (ret) {
dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
goto err_req_irq;
@@ -1050,9 +990,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
return 0;
err_register_soc_camera_host:
- free_irq(isi->irq, isi);
err_req_irq:
- iounmap(isi->regs);
err_ioremap:
vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
err_alloc_ctx:
@@ -1060,17 +998,6 @@ err_alloc_ctx:
sizeof(struct fbd) * MAX_BUFFER_NUM,
isi->p_fb_descriptors,
isi->fb_descriptors_phys);
-err_alloc_descriptors:
-err_set_mck_rate:
- clk_unprepare(isi->mck);
-err_clk_prepare_mck:
- clk_put(isi->mck);
-err_clk_get:
- kfree(isi);
-err_alloc_isi:
- clk_unprepare(pclk);
-err_clk_prepare_pclk:
- clk_put(pclk);
return ret;
}
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index 45a0276be4e5..d73abca9c6ee 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -659,7 +659,7 @@ static int mx2_start_streaming(struct vb2_queue *q, unsigned int count)
unsigned long flags;
if (count < 2)
- return -EINVAL;
+ return -ENOBUFS;
spin_lock_irqsave(&pcdev->lock, flags);
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 6866bb4fbebc..3b1c05a72d00 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -106,7 +106,7 @@
#define VIN_MAX_HEIGHT 2048
enum chip_id {
- RCAR_H2,
+ RCAR_GEN2,
RCAR_H1,
RCAR_M1,
RCAR_E1,
@@ -302,7 +302,7 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
dmr = 0;
break;
case V4L2_PIX_FMT_RGB32:
- if (priv->chip == RCAR_H2 || priv->chip == RCAR_H1 ||
+ if (priv->chip == RCAR_GEN2 || priv->chip == RCAR_H1 ||
priv->chip == RCAR_E1) {
dmr = VNDMR_EXRGB;
break;
@@ -1384,7 +1384,8 @@ static struct soc_camera_host_ops rcar_vin_host_ops = {
};
static struct platform_device_id rcar_vin_id_table[] = {
- { "r8a7790-vin", RCAR_H2 },
+ { "r8a7791-vin", RCAR_GEN2 },
+ { "r8a7790-vin", RCAR_GEN2 },
{ "r8a7779-vin", RCAR_H1 },
{ "r8a7778-vin", RCAR_M1 },
{ "uPD35004-vin", RCAR_E1 },
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
index cbd3a34f4f3f..8e74fb7f2a07 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
@@ -141,8 +141,8 @@ int soc_camera_client_s_crop(struct v4l2_subdev *sd,
* Popular special case - some cameras can only handle fixed sizes like
* QVGA, VGA,... Take care to avoid infinite loop.
*/
- width = max(cam_rect->width, 2);
- height = max(cam_rect->height, 2);
+ width = max_t(unsigned int, cam_rect->width, 2);
+ height = max_t(unsigned int, cam_rect->height, 2);
/*
* Loop as long as sensor is not covering the requested rectangle and
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
index cbf0a806ba1d..be680f839e77 100644
--- a/drivers/media/platform/ti-vpe/Makefile
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
-ti-vpe-y := vpe.o vpdma.o
+ti-vpe-y := vpe.o sc.o csc.o vpdma.o
ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
new file mode 100644
index 000000000000..acfea500710e
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -0,0 +1,196 @@
+/*
+ * Color space converter library
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include "csc.h"
+
+/*
+ * 16 coefficients in the order:
+ * a0, b0, c0, a1, b1, c1, a2, b2, c2, d0, d1, d2
+ * (we may need to pass non-default values from user space later on, we might
+ * need to make the coefficient struct more easy to populate)
+ */
+struct colorspace_coeffs {
+ u16 sd[12];
+ u16 hd[12];
+};
+
+/* VIDEO_RANGE: limited range, GRAPHICS_RANGE: full range */
+#define CSC_COEFFS_VIDEO_RANGE_Y2R 0
+#define CSC_COEFFS_GRAPHICS_RANGE_Y2R 1
+#define CSC_COEFFS_VIDEO_RANGE_R2Y 2
+#define CSC_COEFFS_GRAPHICS_RANGE_R2Y 3
+
+/* default colorspace coefficients */
+static struct colorspace_coeffs colorspace_coeffs[4] = {
+ [CSC_COEFFS_VIDEO_RANGE_Y2R] = {
+ {
+ /* SDTV */
+ 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
+ 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+ },
+ {
+ /* HDTV */
+ 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
+ 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+ },
+ },
+ [CSC_COEFFS_GRAPHICS_RANGE_Y2R] = {
+ {
+ /* SDTV */
+ 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
+ 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+ },
+ {
+ /* HDTV */
+ 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+ 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ },
+ },
+ [CSC_COEFFS_VIDEO_RANGE_R2Y] = {
+ {
+ /* SDTV */
+ 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
+ 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
+ },
+ {
+ /* HDTV */
+ 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
+ 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
+ },
+ },
+ [CSC_COEFFS_GRAPHICS_RANGE_R2Y] = {
+ {
+ /* SDTV */
+ 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
+ 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
+ },
+ {
+ /* HDTV */
+ 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+ 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ },
+ },
+};
+
+void csc_dump_regs(struct csc_data *csc)
+{
+ struct device *dev = &csc->pdev->dev;
+
+ u32 read_reg(struct csc_data *csc, int offset)
+ {
+ return ioread32(csc->base + offset);
+ }
+
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(csc, CSC_##r))
+
+ DUMPREG(CSC00);
+ DUMPREG(CSC01);
+ DUMPREG(CSC02);
+ DUMPREG(CSC03);
+ DUMPREG(CSC04);
+ DUMPREG(CSC05);
+
+#undef DUMPREG
+}
+
+void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5)
+{
+ *csc_reg5 |= CSC_BYPASS;
+}
+
+/*
+ * set the color space converter coefficient shadow register values
+ */
+void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
+ enum v4l2_colorspace src_colorspace,
+ enum v4l2_colorspace dst_colorspace)
+{
+ u32 *csc_reg5 = csc_reg0 + 5;
+ u32 *shadow_csc = csc_reg0;
+ struct colorspace_coeffs *sd_hd_coeffs;
+ u16 *coeff, *end_coeff;
+ enum v4l2_colorspace yuv_colorspace;
+ int sel = 0;
+
+ /*
+ * support only graphics data range(full range) for now, a control ioctl
+ * would be nice here
+ */
+ /* Y2R */
+ if (dst_colorspace == V4L2_COLORSPACE_SRGB &&
+ (src_colorspace == V4L2_COLORSPACE_SMPTE170M ||
+ src_colorspace == V4L2_COLORSPACE_REC709)) {
+ /* Y2R */
+ sel = 1;
+ yuv_colorspace = src_colorspace;
+ } else if ((dst_colorspace == V4L2_COLORSPACE_SMPTE170M ||
+ dst_colorspace == V4L2_COLORSPACE_REC709) &&
+ src_colorspace == V4L2_COLORSPACE_SRGB) {
+ /* R2Y */
+ sel = 3;
+ yuv_colorspace = dst_colorspace;
+ } else {
+ *csc_reg5 |= CSC_BYPASS;
+ return;
+ }
+
+ sd_hd_coeffs = &colorspace_coeffs[sel];
+
+ /* select between SD or HD coefficients */
+ if (yuv_colorspace == V4L2_COLORSPACE_SMPTE170M)
+ coeff = sd_hd_coeffs->sd;
+ else
+ coeff = sd_hd_coeffs->hd;
+
+ end_coeff = coeff + 12;
+
+ for (; coeff < end_coeff; coeff += 2)
+ *shadow_csc++ = (*(coeff + 1) << 16) | *coeff;
+}
+
+struct csc_data *csc_create(struct platform_device *pdev)
+{
+ struct csc_data *csc;
+
+ dev_dbg(&pdev->dev, "csc_create\n");
+
+ csc = devm_kzalloc(&pdev->dev, sizeof(*csc), GFP_KERNEL);
+ if (!csc) {
+ dev_err(&pdev->dev, "couldn't alloc csc_data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ csc->pdev = pdev;
+
+ csc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "vpe_csc");
+ if (csc->res == NULL) {
+ dev_err(&pdev->dev, "missing platform resources data\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ csc->base = devm_ioremap_resource(&pdev->dev, csc->res);
+ if (!csc->base) {
+ dev_err(&pdev->dev, "failed to ioremap\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return csc;
+}
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti-vpe/csc.h
new file mode 100644
index 000000000000..1ad2b6dad561
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/csc.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef TI_CSC_H
+#define TI_CSC_H
+
+/* VPE color space converter regs */
+#define CSC_CSC00 0x00
+#define CSC_A0_MASK 0x1fff
+#define CSC_A0_SHIFT 0
+#define CSC_B0_MASK 0x1fff
+#define CSC_B0_SHIFT 16
+
+#define CSC_CSC01 0x04
+#define CSC_C0_MASK 0x1fff
+#define CSC_C0_SHIFT 0
+#define CSC_A1_MASK 0x1fff
+#define CSC_A1_SHIFT 16
+
+#define CSC_CSC02 0x08
+#define CSC_B1_MASK 0x1fff
+#define CSC_B1_SHIFT 0
+#define CSC_C1_MASK 0x1fff
+#define CSC_C1_SHIFT 16
+
+#define CSC_CSC03 0x0c
+#define CSC_A2_MASK 0x1fff
+#define CSC_A2_SHIFT 0
+#define CSC_B2_MASK 0x1fff
+#define CSC_B2_SHIFT 16
+
+#define CSC_CSC04 0x10
+#define CSC_C2_MASK 0x1fff
+#define CSC_C2_SHIFT 0
+#define CSC_D0_MASK 0x0fff
+#define CSC_D0_SHIFT 16
+
+#define CSC_CSC05 0x14
+#define CSC_D1_MASK 0x0fff
+#define CSC_D1_SHIFT 0
+#define CSC_D2_MASK 0x0fff
+#define CSC_D2_SHIFT 16
+
+#define CSC_BYPASS (1 << 28)
+
+struct csc_data {
+ void __iomem *base;
+ struct resource *res;
+
+ struct platform_device *pdev;
+};
+
+void csc_dump_regs(struct csc_data *csc);
+void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5);
+void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
+ enum v4l2_colorspace src_colorspace,
+ enum v4l2_colorspace dst_colorspace);
+struct csc_data *csc_create(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/sc.c b/drivers/media/platform/ti-vpe/sc.c
new file mode 100644
index 000000000000..93f0af546b76
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc.c
@@ -0,0 +1,311 @@
+/*
+ * Scaler library
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "sc.h"
+#include "sc_coeff.h"
+
+void sc_dump_regs(struct sc_data *sc)
+{
+ struct device *dev = &sc->pdev->dev;
+
+ u32 read_reg(struct sc_data *sc, int offset)
+ {
+ return ioread32(sc->base + offset);
+ }
+
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(sc, CFG_##r))
+
+ DUMPREG(SC0);
+ DUMPREG(SC1);
+ DUMPREG(SC2);
+ DUMPREG(SC3);
+ DUMPREG(SC4);
+ DUMPREG(SC5);
+ DUMPREG(SC6);
+ DUMPREG(SC8);
+ DUMPREG(SC9);
+ DUMPREG(SC10);
+ DUMPREG(SC11);
+ DUMPREG(SC12);
+ DUMPREG(SC13);
+ DUMPREG(SC17);
+ DUMPREG(SC18);
+ DUMPREG(SC19);
+ DUMPREG(SC20);
+ DUMPREG(SC21);
+ DUMPREG(SC22);
+ DUMPREG(SC23);
+ DUMPREG(SC24);
+ DUMPREG(SC25);
+
+#undef DUMPREG
+}
+
+/*
+ * set the horizontal scaler coefficients according to the ratio of output to
+ * input widths, after accounting for up to two levels of decimation
+ */
+void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
+ unsigned int dst_w)
+{
+ int sixteenths;
+ int idx;
+ int i, j;
+ u16 *coeff_h = addr;
+ const u16 *cp;
+
+ if (dst_w > src_w) {
+ idx = HS_UP_SCALE;
+ } else {
+ if ((dst_w << 1) < src_w)
+ dst_w <<= 1; /* first level decimation */
+ if ((dst_w << 1) < src_w)
+ dst_w <<= 1; /* second level decimation */
+
+ if (dst_w == src_w) {
+ idx = HS_LE_16_16_SCALE;
+ } else {
+ sixteenths = (dst_w << 4) / src_w;
+ if (sixteenths < 8)
+ sixteenths = 8;
+ idx = HS_LT_9_16_SCALE + sixteenths - 8;
+ }
+ }
+
+ if (idx == sc->hs_index)
+ return;
+
+ cp = scaler_hs_coeffs[idx];
+
+ for (i = 0; i < SC_NUM_PHASES * 2; i++) {
+ for (j = 0; j < SC_H_NUM_TAPS; j++)
+ *coeff_h++ = *cp++;
+ /*
+ * for each phase, the scaler expects space for 8 coefficients
+ * in it's memory. For the horizontal scaler, we copy the first
+ * 7 coefficients and skip the last slot to move to the next
+ * row to hold coefficients for the next phase
+ */
+ coeff_h += SC_NUM_TAPS_MEM_ALIGN - SC_H_NUM_TAPS;
+ }
+
+ sc->hs_index = idx;
+
+ sc->load_coeff_h = true;
+}
+
+/*
+ * set the vertical scaler coefficients according to the ratio of output to
+ * input heights
+ */
+void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
+ unsigned int dst_h)
+{
+ int sixteenths;
+ int idx;
+ int i, j;
+ u16 *coeff_v = addr;
+ const u16 *cp;
+
+ if (dst_h > src_h) {
+ idx = VS_UP_SCALE;
+ } else if (dst_h == src_h) {
+ idx = VS_1_TO_1_SCALE;
+ } else {
+ sixteenths = (dst_h << 4) / src_h;
+ if (sixteenths < 8)
+ sixteenths = 8;
+ idx = VS_LT_9_16_SCALE + sixteenths - 8;
+ }
+
+ if (idx == sc->vs_index)
+ return;
+
+ cp = scaler_vs_coeffs[idx];
+
+ for (i = 0; i < SC_NUM_PHASES * 2; i++) {
+ for (j = 0; j < SC_V_NUM_TAPS; j++)
+ *coeff_v++ = *cp++;
+ /*
+ * for the vertical scaler, we copy the first 5 coefficients and
+ * skip the last 3 slots to move to the next row to hold
+ * coefficients for the next phase
+ */
+ coeff_v += SC_NUM_TAPS_MEM_ALIGN - SC_V_NUM_TAPS;
+ }
+
+ sc->vs_index = idx;
+ sc->load_coeff_v = true;
+}
+
+void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
+ u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
+ unsigned int dst_w, unsigned int dst_h)
+{
+ struct device *dev = &sc->pdev->dev;
+ u32 val;
+ int dcm_x, dcm_shift;
+ bool use_rav;
+ unsigned long lltmp;
+ u32 lin_acc_inc, lin_acc_inc_u;
+ u32 col_acc_offset;
+ u16 factor = 0;
+ int row_acc_init_rav = 0, row_acc_init_rav_b = 0;
+ u32 row_acc_inc = 0, row_acc_offset = 0, row_acc_offset_b = 0;
+ /*
+ * location of SC register in payload memory with respect to the first
+ * register in the mmr address data block
+ */
+ u32 *sc_reg9 = sc_reg8 + 1;
+ u32 *sc_reg12 = sc_reg8 + 4;
+ u32 *sc_reg13 = sc_reg8 + 5;
+ u32 *sc_reg24 = sc_reg17 + 7;
+
+ val = sc_reg0[0];
+
+ /* clear all the features(they may get enabled elsewhere later) */
+ val &= ~(CFG_SELFGEN_FID | CFG_TRIM | CFG_ENABLE_SIN2_VER_INTP |
+ CFG_INTERLACE_I | CFG_DCM_4X | CFG_DCM_2X | CFG_AUTO_HS |
+ CFG_ENABLE_EV | CFG_USE_RAV | CFG_INVT_FID | CFG_SC_BYPASS |
+ CFG_INTERLACE_O | CFG_Y_PK_EN | CFG_HP_BYPASS | CFG_LINEAR);
+
+ if (src_w == dst_w && src_h == dst_h) {
+ val |= CFG_SC_BYPASS;
+ sc_reg0[0] = val;
+ return;
+ }
+
+ /* we only support linear scaling for now */
+ val |= CFG_LINEAR;
+
+ /* configure horizontal scaler */
+
+ /* enable 2X or 4X decimation */
+ dcm_x = src_w / dst_w;
+ if (dcm_x > 4) {
+ val |= CFG_DCM_4X;
+ dcm_shift = 2;
+ } else if (dcm_x > 2) {
+ val |= CFG_DCM_2X;
+ dcm_shift = 1;
+ } else {
+ dcm_shift = 0;
+ }
+
+ lltmp = dst_w - 1;
+ lin_acc_inc = div64_u64(((u64)(src_w >> dcm_shift) - 1) << 24, lltmp);
+ lin_acc_inc_u = 0;
+ col_acc_offset = 0;
+
+ dev_dbg(dev, "hs config: src_w = %d, dst_w = %d, decimation = %s, lin_acc_inc = %08x\n",
+ src_w, dst_w, dcm_shift == 2 ? "4x" :
+ (dcm_shift == 1 ? "2x" : "none"), lin_acc_inc);
+
+ /* configure vertical scaler */
+
+ /* use RAV for vertical scaler if vertical downscaling is > 4x */
+ if (dst_h < (src_h >> 2)) {
+ use_rav = true;
+ val |= CFG_USE_RAV;
+ } else {
+ use_rav = false;
+ }
+
+ if (use_rav) {
+ /* use RAV */
+ factor = (u16) ((dst_h << 10) / src_h);
+
+ row_acc_init_rav = factor + ((1 + factor) >> 1);
+ if (row_acc_init_rav >= 1024)
+ row_acc_init_rav -= 1024;
+
+ row_acc_init_rav_b = row_acc_init_rav +
+ (1 + (row_acc_init_rav >> 1)) -
+ (1024 >> 1);
+
+ if (row_acc_init_rav_b < 0) {
+ row_acc_init_rav_b += row_acc_init_rav;
+ row_acc_init_rav *= 2;
+ }
+
+ dev_dbg(dev, "vs config(RAV): src_h = %d, dst_h = %d, factor = %d, acc_init = %08x, acc_init_b = %08x\n",
+ src_h, dst_h, factor, row_acc_init_rav,
+ row_acc_init_rav_b);
+ } else {
+ /* use polyphase */
+ row_acc_inc = ((src_h - 1) << 16) / (dst_h - 1);
+ row_acc_offset = 0;
+ row_acc_offset_b = 0;
+
+ dev_dbg(dev, "vs config(POLY): src_h = %d, dst_h = %d,row_acc_inc = %08x\n",
+ src_h, dst_h, row_acc_inc);
+ }
+
+
+ sc_reg0[0] = val;
+ sc_reg0[1] = row_acc_inc;
+ sc_reg0[2] = row_acc_offset;
+ sc_reg0[3] = row_acc_offset_b;
+
+ sc_reg0[4] = ((lin_acc_inc_u & CFG_LIN_ACC_INC_U_MASK) <<
+ CFG_LIN_ACC_INC_U_SHIFT) | (dst_w << CFG_TAR_W_SHIFT) |
+ (dst_h << CFG_TAR_H_SHIFT);
+
+ sc_reg0[5] = (src_w << CFG_SRC_W_SHIFT) | (src_h << CFG_SRC_H_SHIFT);
+
+ sc_reg0[6] = (row_acc_init_rav_b << CFG_ROW_ACC_INIT_RAV_B_SHIFT) |
+ (row_acc_init_rav << CFG_ROW_ACC_INIT_RAV_SHIFT);
+
+ *sc_reg9 = lin_acc_inc;
+
+ *sc_reg12 = col_acc_offset << CFG_COL_ACC_OFFSET_SHIFT;
+
+ *sc_reg13 = factor;
+
+ *sc_reg24 = (src_w << CFG_ORG_W_SHIFT) | (src_h << CFG_ORG_H_SHIFT);
+}
+
+struct sc_data *sc_create(struct platform_device *pdev)
+{
+ struct sc_data *sc;
+
+ dev_dbg(&pdev->dev, "sc_create\n");
+
+ sc = devm_kzalloc(&pdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (!sc) {
+ dev_err(&pdev->dev, "couldn't alloc sc_data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sc->pdev = pdev;
+
+ sc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sc");
+ if (!sc->res) {
+ dev_err(&pdev->dev, "missing platform resources data\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ sc->base = devm_ioremap_resource(&pdev->dev, sc->res);
+ if (!sc->base) {
+ dev_err(&pdev->dev, "failed to ioremap\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return sc;
+}
diff --git a/drivers/media/platform/ti-vpe/sc.h b/drivers/media/platform/ti-vpe/sc.h
new file mode 100644
index 000000000000..60e411e05c30
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef TI_SC_H
+#define TI_SC_H
+
+/* Scaler regs */
+#define CFG_SC0 0x0
+#define CFG_INTERLACE_O (1 << 0)
+#define CFG_LINEAR (1 << 1)
+#define CFG_SC_BYPASS (1 << 2)
+#define CFG_INVT_FID (1 << 3)
+#define CFG_USE_RAV (1 << 4)
+#define CFG_ENABLE_EV (1 << 5)
+#define CFG_AUTO_HS (1 << 6)
+#define CFG_DCM_2X (1 << 7)
+#define CFG_DCM_4X (1 << 8)
+#define CFG_HP_BYPASS (1 << 9)
+#define CFG_INTERLACE_I (1 << 10)
+#define CFG_ENABLE_SIN2_VER_INTP (1 << 11)
+#define CFG_Y_PK_EN (1 << 14)
+#define CFG_TRIM (1 << 15)
+#define CFG_SELFGEN_FID (1 << 16)
+
+#define CFG_SC1 0x4
+#define CFG_ROW_ACC_INC_MASK 0x07ffffff
+#define CFG_ROW_ACC_INC_SHIFT 0
+
+#define CFG_SC2 0x08
+#define CFG_ROW_ACC_OFFSET_MASK 0x0fffffff
+#define CFG_ROW_ACC_OFFSET_SHIFT 0
+
+#define CFG_SC3 0x0c
+#define CFG_ROW_ACC_OFFSET_B_MASK 0x0fffffff
+#define CFG_ROW_ACC_OFFSET_B_SHIFT 0
+
+#define CFG_SC4 0x10
+#define CFG_TAR_H_MASK 0x07ff
+#define CFG_TAR_H_SHIFT 0
+#define CFG_TAR_W_MASK 0x07ff
+#define CFG_TAR_W_SHIFT 12
+#define CFG_LIN_ACC_INC_U_MASK 0x07
+#define CFG_LIN_ACC_INC_U_SHIFT 24
+#define CFG_NLIN_ACC_INIT_U_MASK 0x07
+#define CFG_NLIN_ACC_INIT_U_SHIFT 28
+
+#define CFG_SC5 0x14
+#define CFG_SRC_H_MASK 0x07ff
+#define CFG_SRC_H_SHIFT 0
+#define CFG_SRC_W_MASK 0x07ff
+#define CFG_SRC_W_SHIFT 12
+#define CFG_NLIN_ACC_INC_U_MASK 0x07
+#define CFG_NLIN_ACC_INC_U_SHIFT 24
+
+#define CFG_SC6 0x18
+#define CFG_ROW_ACC_INIT_RAV_MASK 0x03ff
+#define CFG_ROW_ACC_INIT_RAV_SHIFT 0
+#define CFG_ROW_ACC_INIT_RAV_B_MASK 0x03ff
+#define CFG_ROW_ACC_INIT_RAV_B_SHIFT 10
+
+#define CFG_SC8 0x20
+#define CFG_NLIN_LEFT_MASK 0x07ff
+#define CFG_NLIN_LEFT_SHIFT 0
+#define CFG_NLIN_RIGHT_MASK 0x07ff
+#define CFG_NLIN_RIGHT_SHIFT 12
+
+#define CFG_SC9 0x24
+#define CFG_LIN_ACC_INC CFG_SC9
+
+#define CFG_SC10 0x28
+#define CFG_NLIN_ACC_INIT CFG_SC10
+
+#define CFG_SC11 0x2c
+#define CFG_NLIN_ACC_INC CFG_SC11
+
+#define CFG_SC12 0x30
+#define CFG_COL_ACC_OFFSET_MASK 0x01ffffff
+#define CFG_COL_ACC_OFFSET_SHIFT 0
+
+#define CFG_SC13 0x34
+#define CFG_SC_FACTOR_RAV_MASK 0xff
+#define CFG_SC_FACTOR_RAV_SHIFT 0
+#define CFG_CHROMA_INTP_THR_MASK 0x03ff
+#define CFG_CHROMA_INTP_THR_SHIFT 12
+#define CFG_DELTA_CHROMA_THR_MASK 0x0f
+#define CFG_DELTA_CHROMA_THR_SHIFT 24
+
+#define CFG_SC17 0x44
+#define CFG_EV_THR_MASK 0x03ff
+#define CFG_EV_THR_SHIFT 12
+#define CFG_DELTA_LUMA_THR_MASK 0x0f
+#define CFG_DELTA_LUMA_THR_SHIFT 24
+#define CFG_DELTA_EV_THR_MASK 0x0f
+#define CFG_DELTA_EV_THR_SHIFT 28
+
+#define CFG_SC18 0x48
+#define CFG_HS_FACTOR_MASK 0x03ff
+#define CFG_HS_FACTOR_SHIFT 0
+#define CFG_CONF_DEFAULT_MASK 0x01ff
+#define CFG_CONF_DEFAULT_SHIFT 16
+
+#define CFG_SC19 0x4c
+#define CFG_HPF_COEFF0_MASK 0xff
+#define CFG_HPF_COEFF0_SHIFT 0
+#define CFG_HPF_COEFF1_MASK 0xff
+#define CFG_HPF_COEFF1_SHIFT 8
+#define CFG_HPF_COEFF2_MASK 0xff
+#define CFG_HPF_COEFF2_SHIFT 16
+#define CFG_HPF_COEFF3_MASK 0xff
+#define CFG_HPF_COEFF3_SHIFT 23
+
+#define CFG_SC20 0x50
+#define CFG_HPF_COEFF4_MASK 0xff
+#define CFG_HPF_COEFF4_SHIFT 0
+#define CFG_HPF_COEFF5_MASK 0xff
+#define CFG_HPF_COEFF5_SHIFT 8
+#define CFG_HPF_NORM_SHIFT_MASK 0x07
+#define CFG_HPF_NORM_SHIFT_SHIFT 16
+#define CFG_NL_LIMIT_MASK 0x1ff
+#define CFG_NL_LIMIT_SHIFT 20
+
+#define CFG_SC21 0x54
+#define CFG_NL_LO_THR_MASK 0x01ff
+#define CFG_NL_LO_THR_SHIFT 0
+#define CFG_NL_LO_SLOPE_MASK 0xff
+#define CFG_NL_LO_SLOPE_SHIFT 16
+
+#define CFG_SC22 0x58
+#define CFG_NL_HI_THR_MASK 0x01ff
+#define CFG_NL_HI_THR_SHIFT 0
+#define CFG_NL_HI_SLOPE_SH_MASK 0x07
+#define CFG_NL_HI_SLOPE_SH_SHIFT 16
+
+#define CFG_SC23 0x5c
+#define CFG_GRADIENT_THR_MASK 0x07ff
+#define CFG_GRADIENT_THR_SHIFT 0
+#define CFG_GRADIENT_THR_RANGE_MASK 0x0f
+#define CFG_GRADIENT_THR_RANGE_SHIFT 12
+#define CFG_MIN_GY_THR_MASK 0xff
+#define CFG_MIN_GY_THR_SHIFT 16
+#define CFG_MIN_GY_THR_RANGE_MASK 0x0f
+#define CFG_MIN_GY_THR_RANGE_SHIFT 28
+
+#define CFG_SC24 0x60
+#define CFG_ORG_H_MASK 0x07ff
+#define CFG_ORG_H_SHIFT 0
+#define CFG_ORG_W_MASK 0x07ff
+#define CFG_ORG_W_SHIFT 16
+
+#define CFG_SC25 0x64
+#define CFG_OFF_H_MASK 0x07ff
+#define CFG_OFF_H_SHIFT 0
+#define CFG_OFF_W_MASK 0x07ff
+#define CFG_OFF_W_SHIFT 16
+
+/* number of phases supported by the polyphase scalers */
+#define SC_NUM_PHASES 32
+
+/* number of taps used by horizontal polyphase scaler */
+#define SC_H_NUM_TAPS 7
+
+/* number of taps used by vertical polyphase scaler */
+#define SC_V_NUM_TAPS 5
+
+/* number of taps expected by the scaler in it's coefficient memory */
+#define SC_NUM_TAPS_MEM_ALIGN 8
+
+/*
+ * coefficient memory size in bytes:
+ * num phases x num sets(luma and chroma) x num taps(aligned) x coeff size
+ */
+#define SC_COEF_SRAM_SIZE (SC_NUM_PHASES * 2 * SC_NUM_TAPS_MEM_ALIGN * 2)
+
+struct sc_data {
+ void __iomem *base;
+ struct resource *res;
+
+ dma_addr_t loaded_coeff_h; /* loaded h coeffs in SC */
+ dma_addr_t loaded_coeff_v; /* loaded v coeffs in SC */
+
+ bool load_coeff_h; /* have new h SC coeffs */
+ bool load_coeff_v; /* have new v SC coeffs */
+
+ unsigned int hs_index; /* h SC coeffs selector */
+ unsigned int vs_index; /* v SC coeffs selector */
+
+ struct platform_device *pdev;
+};
+
+void sc_dump_regs(struct sc_data *sc);
+void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
+ unsigned int dst_w);
+void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
+ unsigned int dst_h);
+void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
+ u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
+ unsigned int dst_w, unsigned int dst_h);
+struct sc_data *sc_create(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/sc_coeff.h b/drivers/media/platform/ti-vpe/sc_coeff.h
new file mode 100644
index 000000000000..5bfa5c03aec6
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/sc_coeff.h
@@ -0,0 +1,1342 @@
+/*
+ * VPE SC coefs
+ *
+ * Copyright (c) 2013 Texas Instruments Inc.
+ *
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Archit Taneja, <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_SC_COEFF_H
+#define __TI_SC_COEFF_H
+
+/* horizontal scaler coefficients */
+enum {
+ HS_UP_SCALE = 0,
+ HS_LT_9_16_SCALE,
+ HS_LT_10_16_SCALE,
+ HS_LT_11_16_SCALE,
+ HS_LT_12_16_SCALE,
+ HS_LT_13_16_SCALE,
+ HS_LT_14_16_SCALE,
+ HS_LT_15_16_SCALE,
+ HS_LE_16_16_SCALE,
+};
+
+static const u16 scaler_hs_coeffs[13][SC_NUM_PHASES * 2 * SC_H_NUM_TAPS] = {
+ [HS_UP_SCALE] = {
+ /* Luma */
+ 0x001F, 0x1F90, 0x00D2, 0x06FE, 0x00D2, 0x1F90, 0x001F,
+ 0x001C, 0x1F9E, 0x009F, 0x06FB, 0x0108, 0x1F82, 0x0022,
+ 0x0019, 0x1FAC, 0x006F, 0x06F3, 0x0140, 0x1F74, 0x0025,
+ 0x0016, 0x1FB9, 0x0041, 0x06E7, 0x017B, 0x1F66, 0x0028,
+ 0x0013, 0x1FC6, 0x0017, 0x06D6, 0x01B7, 0x1F58, 0x002B,
+ 0x0010, 0x1FD3, 0x1FEF, 0x06C0, 0x01F6, 0x1F4B, 0x002D,
+ 0x000E, 0x1FDF, 0x1FCB, 0x06A5, 0x0235, 0x1F3F, 0x002F,
+ 0x000B, 0x1FEA, 0x1FAA, 0x0686, 0x0277, 0x1F33, 0x0031,
+ 0x0009, 0x1FF5, 0x1F8C, 0x0663, 0x02B8, 0x1F28, 0x0033,
+ 0x0007, 0x1FFF, 0x1F72, 0x063A, 0x02FB, 0x1F1F, 0x0034,
+ 0x0005, 0x0008, 0x1F5A, 0x060F, 0x033E, 0x1F17, 0x0035,
+ 0x0003, 0x0010, 0x1F46, 0x05E0, 0x0382, 0x1F10, 0x0035,
+ 0x0002, 0x0017, 0x1F34, 0x05AF, 0x03C5, 0x1F0B, 0x0034,
+ 0x0001, 0x001E, 0x1F26, 0x0579, 0x0407, 0x1F08, 0x0033,
+ 0x0000, 0x0023, 0x1F1A, 0x0541, 0x0449, 0x1F07, 0x0032,
+ 0x1FFF, 0x0028, 0x1F12, 0x0506, 0x048A, 0x1F08, 0x002F,
+ 0x002C, 0x1F0C, 0x04C8, 0x04C8, 0x1F0C, 0x002C, 0x0000,
+ 0x002F, 0x1F08, 0x048A, 0x0506, 0x1F12, 0x0028, 0x1FFF,
+ 0x0032, 0x1F07, 0x0449, 0x0541, 0x1F1A, 0x0023, 0x0000,
+ 0x0033, 0x1F08, 0x0407, 0x0579, 0x1F26, 0x001E, 0x0001,
+ 0x0034, 0x1F0B, 0x03C5, 0x05AF, 0x1F34, 0x0017, 0x0002,
+ 0x0035, 0x1F10, 0x0382, 0x05E0, 0x1F46, 0x0010, 0x0003,
+ 0x0035, 0x1F17, 0x033E, 0x060F, 0x1F5A, 0x0008, 0x0005,
+ 0x0034, 0x1F1F, 0x02FB, 0x063A, 0x1F72, 0x1FFF, 0x0007,
+ 0x0033, 0x1F28, 0x02B8, 0x0663, 0x1F8C, 0x1FF5, 0x0009,
+ 0x0031, 0x1F33, 0x0277, 0x0686, 0x1FAA, 0x1FEA, 0x000B,
+ 0x002F, 0x1F3F, 0x0235, 0x06A5, 0x1FCB, 0x1FDF, 0x000E,
+ 0x002D, 0x1F4B, 0x01F6, 0x06C0, 0x1FEF, 0x1FD3, 0x0010,
+ 0x002B, 0x1F58, 0x01B7, 0x06D6, 0x0017, 0x1FC6, 0x0013,
+ 0x0028, 0x1F66, 0x017B, 0x06E7, 0x0041, 0x1FB9, 0x0016,
+ 0x0025, 0x1F74, 0x0140, 0x06F3, 0x006F, 0x1FAC, 0x0019,
+ 0x0022, 0x1F82, 0x0108, 0x06FB, 0x009F, 0x1F9E, 0x001C,
+ /* Chroma */
+ 0x001F, 0x1F90, 0x00D2, 0x06FE, 0x00D2, 0x1F90, 0x001F,
+ 0x001C, 0x1F9E, 0x009F, 0x06FB, 0x0108, 0x1F82, 0x0022,
+ 0x0019, 0x1FAC, 0x006F, 0x06F3, 0x0140, 0x1F74, 0x0025,
+ 0x0016, 0x1FB9, 0x0041, 0x06E7, 0x017B, 0x1F66, 0x0028,
+ 0x0013, 0x1FC6, 0x0017, 0x06D6, 0x01B7, 0x1F58, 0x002B,
+ 0x0010, 0x1FD3, 0x1FEF, 0x06C0, 0x01F6, 0x1F4B, 0x002D,
+ 0x000E, 0x1FDF, 0x1FCB, 0x06A5, 0x0235, 0x1F3F, 0x002F,
+ 0x000B, 0x1FEA, 0x1FAA, 0x0686, 0x0277, 0x1F33, 0x0031,
+ 0x0009, 0x1FF5, 0x1F8C, 0x0663, 0x02B8, 0x1F28, 0x0033,
+ 0x0007, 0x1FFF, 0x1F72, 0x063A, 0x02FB, 0x1F1F, 0x0034,
+ 0x0005, 0x0008, 0x1F5A, 0x060F, 0x033E, 0x1F17, 0x0035,
+ 0x0003, 0x0010, 0x1F46, 0x05E0, 0x0382, 0x1F10, 0x0035,
+ 0x0002, 0x0017, 0x1F34, 0x05AF, 0x03C5, 0x1F0B, 0x0034,
+ 0x0001, 0x001E, 0x1F26, 0x0579, 0x0407, 0x1F08, 0x0033,
+ 0x0000, 0x0023, 0x1F1A, 0x0541, 0x0449, 0x1F07, 0x0032,
+ 0x1FFF, 0x0028, 0x1F12, 0x0506, 0x048A, 0x1F08, 0x002F,
+ 0x002C, 0x1F0C, 0x04C8, 0x04C8, 0x1F0C, 0x002C, 0x0000,
+ 0x002F, 0x1F08, 0x048A, 0x0506, 0x1F12, 0x0028, 0x1FFF,
+ 0x0032, 0x1F07, 0x0449, 0x0541, 0x1F1A, 0x0023, 0x0000,
+ 0x0033, 0x1F08, 0x0407, 0x0579, 0x1F26, 0x001E, 0x0001,
+ 0x0034, 0x1F0B, 0x03C5, 0x05AF, 0x1F34, 0x0017, 0x0002,
+ 0x0035, 0x1F10, 0x0382, 0x05E0, 0x1F46, 0x0010, 0x0003,
+ 0x0035, 0x1F17, 0x033E, 0x060F, 0x1F5A, 0x0008, 0x0005,
+ 0x0034, 0x1F1F, 0x02FB, 0x063A, 0x1F72, 0x1FFF, 0x0007,
+ 0x0033, 0x1F28, 0x02B8, 0x0663, 0x1F8C, 0x1FF5, 0x0009,
+ 0x0031, 0x1F33, 0x0277, 0x0686, 0x1FAA, 0x1FEA, 0x000B,
+ 0x002F, 0x1F3F, 0x0235, 0x06A5, 0x1FCB, 0x1FDF, 0x000E,
+ 0x002D, 0x1F4B, 0x01F6, 0x06C0, 0x1FEF, 0x1FD3, 0x0010,
+ 0x002B, 0x1F58, 0x01B7, 0x06D6, 0x0017, 0x1FC6, 0x0013,
+ 0x0028, 0x1F66, 0x017B, 0x06E7, 0x0041, 0x1FB9, 0x0016,
+ 0x0025, 0x1F74, 0x0140, 0x06F3, 0x006F, 0x1FAC, 0x0019,
+ 0x0022, 0x1F82, 0x0108, 0x06FB, 0x009F, 0x1F9E, 0x001C,
+ },
+ [HS_LT_9_16_SCALE] = {
+ /* Luma */
+ 0x1FA3, 0x005E, 0x024A, 0x036A, 0x024A, 0x005E, 0x1FA3,
+ 0x1FA3, 0x0052, 0x023A, 0x036A, 0x0259, 0x006A, 0x1FA4,
+ 0x1FA3, 0x0046, 0x022A, 0x036A, 0x0269, 0x0076, 0x1FA4,
+ 0x1FA3, 0x003B, 0x021A, 0x0368, 0x0278, 0x0083, 0x1FA5,
+ 0x1FA4, 0x0031, 0x020A, 0x0365, 0x0286, 0x0090, 0x1FA6,
+ 0x1FA5, 0x0026, 0x01F9, 0x0362, 0x0294, 0x009E, 0x1FA8,
+ 0x1FA6, 0x001C, 0x01E8, 0x035E, 0x02A3, 0x00AB, 0x1FAA,
+ 0x1FA7, 0x0013, 0x01D7, 0x035A, 0x02B0, 0x00B9, 0x1FAC,
+ 0x1FA9, 0x000A, 0x01C6, 0x0354, 0x02BD, 0x00C7, 0x1FAF,
+ 0x1FAA, 0x0001, 0x01B6, 0x034E, 0x02C9, 0x00D6, 0x1FB2,
+ 0x1FAC, 0x1FF9, 0x01A5, 0x0347, 0x02D5, 0x00E5, 0x1FB5,
+ 0x1FAE, 0x1FF1, 0x0194, 0x0340, 0x02E1, 0x00F3, 0x1FB9,
+ 0x1FB0, 0x1FEA, 0x0183, 0x0338, 0x02EC, 0x0102, 0x1FBD,
+ 0x1FB2, 0x1FE3, 0x0172, 0x0330, 0x02F6, 0x0112, 0x1FC1,
+ 0x1FB4, 0x1FDC, 0x0161, 0x0327, 0x0301, 0x0121, 0x1FC6,
+ 0x1FB7, 0x1FD6, 0x0151, 0x031D, 0x030A, 0x0130, 0x1FCB,
+ 0x1FD2, 0x0136, 0x02F8, 0x02F8, 0x0136, 0x1FD2, 0x0000,
+ 0x1FCB, 0x0130, 0x030A, 0x031D, 0x0151, 0x1FD6, 0x1FB7,
+ 0x1FC6, 0x0121, 0x0301, 0x0327, 0x0161, 0x1FDC, 0x1FB4,
+ 0x1FC1, 0x0112, 0x02F6, 0x0330, 0x0172, 0x1FE3, 0x1FB2,
+ 0x1FBD, 0x0102, 0x02EC, 0x0338, 0x0183, 0x1FEA, 0x1FB0,
+ 0x1FB9, 0x00F3, 0x02E1, 0x0340, 0x0194, 0x1FF1, 0x1FAE,
+ 0x1FB5, 0x00E5, 0x02D5, 0x0347, 0x01A5, 0x1FF9, 0x1FAC,
+ 0x1FB2, 0x00D6, 0x02C9, 0x034E, 0x01B6, 0x0001, 0x1FAA,
+ 0x1FAF, 0x00C7, 0x02BD, 0x0354, 0x01C6, 0x000A, 0x1FA9,
+ 0x1FAC, 0x00B9, 0x02B0, 0x035A, 0x01D7, 0x0013, 0x1FA7,
+ 0x1FAA, 0x00AB, 0x02A3, 0x035E, 0x01E8, 0x001C, 0x1FA6,
+ 0x1FA8, 0x009E, 0x0294, 0x0362, 0x01F9, 0x0026, 0x1FA5,
+ 0x1FA6, 0x0090, 0x0286, 0x0365, 0x020A, 0x0031, 0x1FA4,
+ 0x1FA5, 0x0083, 0x0278, 0x0368, 0x021A, 0x003B, 0x1FA3,
+ 0x1FA4, 0x0076, 0x0269, 0x036A, 0x022A, 0x0046, 0x1FA3,
+ 0x1FA4, 0x006A, 0x0259, 0x036A, 0x023A, 0x0052, 0x1FA3,
+ /* Chroma */
+ 0x1FA3, 0x005E, 0x024A, 0x036A, 0x024A, 0x005E, 0x1FA3,
+ 0x1FA3, 0x0052, 0x023A, 0x036A, 0x0259, 0x006A, 0x1FA4,
+ 0x1FA3, 0x0046, 0x022A, 0x036A, 0x0269, 0x0076, 0x1FA4,
+ 0x1FA3, 0x003B, 0x021A, 0x0368, 0x0278, 0x0083, 0x1FA5,
+ 0x1FA4, 0x0031, 0x020A, 0x0365, 0x0286, 0x0090, 0x1FA6,
+ 0x1FA5, 0x0026, 0x01F9, 0x0362, 0x0294, 0x009E, 0x1FA8,
+ 0x1FA6, 0x001C, 0x01E8, 0x035E, 0x02A3, 0x00AB, 0x1FAA,
+ 0x1FA7, 0x0013, 0x01D7, 0x035A, 0x02B0, 0x00B9, 0x1FAC,
+ 0x1FA9, 0x000A, 0x01C6, 0x0354, 0x02BD, 0x00C7, 0x1FAF,
+ 0x1FAA, 0x0001, 0x01B6, 0x034E, 0x02C9, 0x00D6, 0x1FB2,
+ 0x1FAC, 0x1FF9, 0x01A5, 0x0347, 0x02D5, 0x00E5, 0x1FB5,
+ 0x1FAE, 0x1FF1, 0x0194, 0x0340, 0x02E1, 0x00F3, 0x1FB9,
+ 0x1FB0, 0x1FEA, 0x0183, 0x0338, 0x02EC, 0x0102, 0x1FBD,
+ 0x1FB2, 0x1FE3, 0x0172, 0x0330, 0x02F6, 0x0112, 0x1FC1,
+ 0x1FB4, 0x1FDC, 0x0161, 0x0327, 0x0301, 0x0121, 0x1FC6,
+ 0x1FB7, 0x1FD6, 0x0151, 0x031D, 0x030A, 0x0130, 0x1FCB,
+ 0x1FD2, 0x0136, 0x02F8, 0x02F8, 0x0136, 0x1FD2, 0x0000,
+ 0x1FCB, 0x0130, 0x030A, 0x031D, 0x0151, 0x1FD6, 0x1FB7,
+ 0x1FC6, 0x0121, 0x0301, 0x0327, 0x0161, 0x1FDC, 0x1FB4,
+ 0x1FC1, 0x0112, 0x02F6, 0x0330, 0x0172, 0x1FE3, 0x1FB2,
+ 0x1FBD, 0x0102, 0x02EC, 0x0338, 0x0183, 0x1FEA, 0x1FB0,
+ 0x1FB9, 0x00F3, 0x02E1, 0x0340, 0x0194, 0x1FF1, 0x1FAE,
+ 0x1FB5, 0x00E5, 0x02D5, 0x0347, 0x01A5, 0x1FF9, 0x1FAC,
+ 0x1FB2, 0x00D6, 0x02C9, 0x034E, 0x01B6, 0x0001, 0x1FAA,
+ 0x1FAF, 0x00C7, 0x02BD, 0x0354, 0x01C6, 0x000A, 0x1FA9,
+ 0x1FAC, 0x00B9, 0x02B0, 0x035A, 0x01D7, 0x0013, 0x1FA7,
+ 0x1FAA, 0x00AB, 0x02A3, 0x035E, 0x01E8, 0x001C, 0x1FA6,
+ 0x1FA8, 0x009E, 0x0294, 0x0362, 0x01F9, 0x0026, 0x1FA5,
+ 0x1FA6, 0x0090, 0x0286, 0x0365, 0x020A, 0x0031, 0x1FA4,
+ 0x1FA5, 0x0083, 0x0278, 0x0368, 0x021A, 0x003B, 0x1FA3,
+ 0x1FA4, 0x0076, 0x0269, 0x036A, 0x022A, 0x0046, 0x1FA3,
+ 0x1FA4, 0x006A, 0x0259, 0x036A, 0x023A, 0x0052, 0x1FA3,
+ },
+ [HS_LT_10_16_SCALE] = {
+ /* Luma */
+ 0x1F8D, 0x000C, 0x026A, 0x03FA, 0x026A, 0x000C, 0x1F8D,
+ 0x1F8F, 0x0000, 0x0255, 0x03FA, 0x027F, 0x0019, 0x1F8A,
+ 0x1F92, 0x1FF5, 0x023F, 0x03F8, 0x0293, 0x0027, 0x1F88,
+ 0x1F95, 0x1FEA, 0x022A, 0x03F6, 0x02A7, 0x0034, 0x1F86,
+ 0x1F99, 0x1FDF, 0x0213, 0x03F2, 0x02BB, 0x0043, 0x1F85,
+ 0x1F9C, 0x1FD5, 0x01FE, 0x03ED, 0x02CF, 0x0052, 0x1F83,
+ 0x1FA0, 0x1FCC, 0x01E8, 0x03E7, 0x02E1, 0x0061, 0x1F83,
+ 0x1FA4, 0x1FC3, 0x01D2, 0x03E0, 0x02F4, 0x0071, 0x1F82,
+ 0x1FA7, 0x1FBB, 0x01BC, 0x03D9, 0x0306, 0x0081, 0x1F82,
+ 0x1FAB, 0x1FB4, 0x01A6, 0x03D0, 0x0317, 0x0092, 0x1F82,
+ 0x1FAF, 0x1FAD, 0x0190, 0x03C7, 0x0327, 0x00A3, 0x1F83,
+ 0x1FB3, 0x1FA7, 0x017A, 0x03BC, 0x0337, 0x00B5, 0x1F84,
+ 0x1FB8, 0x1FA1, 0x0165, 0x03B0, 0x0346, 0x00C7, 0x1F85,
+ 0x1FBC, 0x1F9C, 0x0150, 0x03A4, 0x0354, 0x00D9, 0x1F87,
+ 0x1FC0, 0x1F98, 0x013A, 0x0397, 0x0361, 0x00EC, 0x1F8A,
+ 0x1FC4, 0x1F93, 0x0126, 0x0389, 0x036F, 0x00FE, 0x1F8D,
+ 0x1F93, 0x010A, 0x0363, 0x0363, 0x010A, 0x1F93, 0x0000,
+ 0x1F8D, 0x00FE, 0x036F, 0x0389, 0x0126, 0x1F93, 0x1FC4,
+ 0x1F8A, 0x00EC, 0x0361, 0x0397, 0x013A, 0x1F98, 0x1FC0,
+ 0x1F87, 0x00D9, 0x0354, 0x03A4, 0x0150, 0x1F9C, 0x1FBC,
+ 0x1F85, 0x00C7, 0x0346, 0x03B0, 0x0165, 0x1FA1, 0x1FB8,
+ 0x1F84, 0x00B5, 0x0337, 0x03BC, 0x017A, 0x1FA7, 0x1FB3,
+ 0x1F83, 0x00A3, 0x0327, 0x03C7, 0x0190, 0x1FAD, 0x1FAF,
+ 0x1F82, 0x0092, 0x0317, 0x03D0, 0x01A6, 0x1FB4, 0x1FAB,
+ 0x1F82, 0x0081, 0x0306, 0x03D9, 0x01BC, 0x1FBB, 0x1FA7,
+ 0x1F82, 0x0071, 0x02F4, 0x03E0, 0x01D2, 0x1FC3, 0x1FA4,
+ 0x1F83, 0x0061, 0x02E1, 0x03E7, 0x01E8, 0x1FCC, 0x1FA0,
+ 0x1F83, 0x0052, 0x02CF, 0x03ED, 0x01FE, 0x1FD5, 0x1F9C,
+ 0x1F85, 0x0043, 0x02BB, 0x03F2, 0x0213, 0x1FDF, 0x1F99,
+ 0x1F86, 0x0034, 0x02A7, 0x03F6, 0x022A, 0x1FEA, 0x1F95,
+ 0x1F88, 0x0027, 0x0293, 0x03F8, 0x023F, 0x1FF5, 0x1F92,
+ 0x1F8A, 0x0019, 0x027F, 0x03FA, 0x0255, 0x0000, 0x1F8F,
+ /* Chroma */
+ 0x1F8D, 0x000C, 0x026A, 0x03FA, 0x026A, 0x000C, 0x1F8D,
+ 0x1F8F, 0x0000, 0x0255, 0x03FA, 0x027F, 0x0019, 0x1F8A,
+ 0x1F92, 0x1FF5, 0x023F, 0x03F8, 0x0293, 0x0027, 0x1F88,
+ 0x1F95, 0x1FEA, 0x022A, 0x03F6, 0x02A7, 0x0034, 0x1F86,
+ 0x1F99, 0x1FDF, 0x0213, 0x03F2, 0x02BB, 0x0043, 0x1F85,
+ 0x1F9C, 0x1FD5, 0x01FE, 0x03ED, 0x02CF, 0x0052, 0x1F83,
+ 0x1FA0, 0x1FCC, 0x01E8, 0x03E7, 0x02E1, 0x0061, 0x1F83,
+ 0x1FA4, 0x1FC3, 0x01D2, 0x03E0, 0x02F4, 0x0071, 0x1F82,
+ 0x1FA7, 0x1FBB, 0x01BC, 0x03D9, 0x0306, 0x0081, 0x1F82,
+ 0x1FAB, 0x1FB4, 0x01A6, 0x03D0, 0x0317, 0x0092, 0x1F82,
+ 0x1FAF, 0x1FAD, 0x0190, 0x03C7, 0x0327, 0x00A3, 0x1F83,
+ 0x1FB3, 0x1FA7, 0x017A, 0x03BC, 0x0337, 0x00B5, 0x1F84,
+ 0x1FB8, 0x1FA1, 0x0165, 0x03B0, 0x0346, 0x00C7, 0x1F85,
+ 0x1FBC, 0x1F9C, 0x0150, 0x03A4, 0x0354, 0x00D9, 0x1F87,
+ 0x1FC0, 0x1F98, 0x013A, 0x0397, 0x0361, 0x00EC, 0x1F8A,
+ 0x1FC4, 0x1F93, 0x0126, 0x0389, 0x036F, 0x00FE, 0x1F8D,
+ 0x1F93, 0x010A, 0x0363, 0x0363, 0x010A, 0x1F93, 0x0000,
+ 0x1F8D, 0x00FE, 0x036F, 0x0389, 0x0126, 0x1F93, 0x1FC4,
+ 0x1F8A, 0x00EC, 0x0361, 0x0397, 0x013A, 0x1F98, 0x1FC0,
+ 0x1F87, 0x00D9, 0x0354, 0x03A4, 0x0150, 0x1F9C, 0x1FBC,
+ 0x1F85, 0x00C7, 0x0346, 0x03B0, 0x0165, 0x1FA1, 0x1FB8,
+ 0x1F84, 0x00B5, 0x0337, 0x03BC, 0x017A, 0x1FA7, 0x1FB3,
+ 0x1F83, 0x00A3, 0x0327, 0x03C7, 0x0190, 0x1FAD, 0x1FAF,
+ 0x1F82, 0x0092, 0x0317, 0x03D0, 0x01A6, 0x1FB4, 0x1FAB,
+ 0x1F82, 0x0081, 0x0306, 0x03D9, 0x01BC, 0x1FBB, 0x1FA7,
+ 0x1F82, 0x0071, 0x02F4, 0x03E0, 0x01D2, 0x1FC3, 0x1FA4,
+ 0x1F83, 0x0061, 0x02E1, 0x03E7, 0x01E8, 0x1FCC, 0x1FA0,
+ 0x1F83, 0x0052, 0x02CF, 0x03ED, 0x01FE, 0x1FD5, 0x1F9C,
+ 0x1F85, 0x0043, 0x02BB, 0x03F2, 0x0213, 0x1FDF, 0x1F99,
+ 0x1F86, 0x0034, 0x02A7, 0x03F6, 0x022A, 0x1FEA, 0x1F95,
+ 0x1F88, 0x0027, 0x0293, 0x03F8, 0x023F, 0x1FF5, 0x1F92,
+ 0x1F8A, 0x0019, 0x027F, 0x03FA, 0x0255, 0x0000, 0x1F8F,
+ },
+ [HS_LT_11_16_SCALE] = {
+ /* Luma */
+ 0x1F95, 0x1FB5, 0x0272, 0x0488, 0x0272, 0x1FB5, 0x1F95,
+ 0x1F9B, 0x1FAA, 0x0257, 0x0486, 0x028D, 0x1FC1, 0x1F90,
+ 0x1FA0, 0x1FA0, 0x023C, 0x0485, 0x02A8, 0x1FCD, 0x1F8A,
+ 0x1FA6, 0x1F96, 0x0221, 0x0481, 0x02C2, 0x1FDB, 0x1F85,
+ 0x1FAC, 0x1F8E, 0x0205, 0x047C, 0x02DC, 0x1FE9, 0x1F80,
+ 0x1FB1, 0x1F86, 0x01E9, 0x0476, 0x02F6, 0x1FF8, 0x1F7C,
+ 0x1FB7, 0x1F7F, 0x01CE, 0x046E, 0x030F, 0x0008, 0x1F77,
+ 0x1FBD, 0x1F79, 0x01B3, 0x0465, 0x0326, 0x0019, 0x1F73,
+ 0x1FC3, 0x1F73, 0x0197, 0x045B, 0x033E, 0x002A, 0x1F70,
+ 0x1FC8, 0x1F6F, 0x017D, 0x044E, 0x0355, 0x003C, 0x1F6D,
+ 0x1FCE, 0x1F6B, 0x0162, 0x0441, 0x036B, 0x004F, 0x1F6A,
+ 0x1FD3, 0x1F68, 0x0148, 0x0433, 0x0380, 0x0063, 0x1F67,
+ 0x1FD8, 0x1F65, 0x012E, 0x0424, 0x0395, 0x0077, 0x1F65,
+ 0x1FDE, 0x1F63, 0x0115, 0x0413, 0x03A8, 0x008B, 0x1F64,
+ 0x1FE3, 0x1F62, 0x00FC, 0x0403, 0x03BA, 0x00A0, 0x1F62,
+ 0x1FE7, 0x1F62, 0x00E4, 0x03EF, 0x03CC, 0x00B6, 0x1F62,
+ 0x1F63, 0x00CA, 0x03D3, 0x03D3, 0x00CA, 0x1F63, 0x0000,
+ 0x1F62, 0x00B6, 0x03CC, 0x03EF, 0x00E4, 0x1F62, 0x1FE7,
+ 0x1F62, 0x00A0, 0x03BA, 0x0403, 0x00FC, 0x1F62, 0x1FE3,
+ 0x1F64, 0x008B, 0x03A8, 0x0413, 0x0115, 0x1F63, 0x1FDE,
+ 0x1F65, 0x0077, 0x0395, 0x0424, 0x012E, 0x1F65, 0x1FD8,
+ 0x1F67, 0x0063, 0x0380, 0x0433, 0x0148, 0x1F68, 0x1FD3,
+ 0x1F6A, 0x004F, 0x036B, 0x0441, 0x0162, 0x1F6B, 0x1FCE,
+ 0x1F6D, 0x003C, 0x0355, 0x044E, 0x017D, 0x1F6F, 0x1FC8,
+ 0x1F70, 0x002A, 0x033E, 0x045B, 0x0197, 0x1F73, 0x1FC3,
+ 0x1F73, 0x0019, 0x0326, 0x0465, 0x01B3, 0x1F79, 0x1FBD,
+ 0x1F77, 0x0008, 0x030F, 0x046E, 0x01CE, 0x1F7F, 0x1FB7,
+ 0x1F7C, 0x1FF8, 0x02F6, 0x0476, 0x01E9, 0x1F86, 0x1FB1,
+ 0x1F80, 0x1FE9, 0x02DC, 0x047C, 0x0205, 0x1F8E, 0x1FAC,
+ 0x1F85, 0x1FDB, 0x02C2, 0x0481, 0x0221, 0x1F96, 0x1FA6,
+ 0x1F8A, 0x1FCD, 0x02A8, 0x0485, 0x023C, 0x1FA0, 0x1FA0,
+ 0x1F90, 0x1FC1, 0x028D, 0x0486, 0x0257, 0x1FAA, 0x1F9B,
+ /* Chroma */
+ 0x1F95, 0x1FB5, 0x0272, 0x0488, 0x0272, 0x1FB5, 0x1F95,
+ 0x1F9B, 0x1FAA, 0x0257, 0x0486, 0x028D, 0x1FC1, 0x1F90,
+ 0x1FA0, 0x1FA0, 0x023C, 0x0485, 0x02A8, 0x1FCD, 0x1F8A,
+ 0x1FA6, 0x1F96, 0x0221, 0x0481, 0x02C2, 0x1FDB, 0x1F85,
+ 0x1FAC, 0x1F8E, 0x0205, 0x047C, 0x02DC, 0x1FE9, 0x1F80,
+ 0x1FB1, 0x1F86, 0x01E9, 0x0476, 0x02F6, 0x1FF8, 0x1F7C,
+ 0x1FB7, 0x1F7F, 0x01CE, 0x046E, 0x030F, 0x0008, 0x1F77,
+ 0x1FBD, 0x1F79, 0x01B3, 0x0465, 0x0326, 0x0019, 0x1F73,
+ 0x1FC3, 0x1F73, 0x0197, 0x045B, 0x033E, 0x002A, 0x1F70,
+ 0x1FC8, 0x1F6F, 0x017D, 0x044E, 0x0355, 0x003C, 0x1F6D,
+ 0x1FCE, 0x1F6B, 0x0162, 0x0441, 0x036B, 0x004F, 0x1F6A,
+ 0x1FD3, 0x1F68, 0x0148, 0x0433, 0x0380, 0x0063, 0x1F67,
+ 0x1FD8, 0x1F65, 0x012E, 0x0424, 0x0395, 0x0077, 0x1F65,
+ 0x1FDE, 0x1F63, 0x0115, 0x0413, 0x03A8, 0x008B, 0x1F64,
+ 0x1FE3, 0x1F62, 0x00FC, 0x0403, 0x03BA, 0x00A0, 0x1F62,
+ 0x1FE7, 0x1F62, 0x00E4, 0x03EF, 0x03CC, 0x00B6, 0x1F62,
+ 0x1F63, 0x00CA, 0x03D3, 0x03D3, 0x00CA, 0x1F63, 0x0000,
+ 0x1F62, 0x00B6, 0x03CC, 0x03EF, 0x00E4, 0x1F62, 0x1FE7,
+ 0x1F62, 0x00A0, 0x03BA, 0x0403, 0x00FC, 0x1F62, 0x1FE3,
+ 0x1F64, 0x008B, 0x03A8, 0x0413, 0x0115, 0x1F63, 0x1FDE,
+ 0x1F65, 0x0077, 0x0395, 0x0424, 0x012E, 0x1F65, 0x1FD8,
+ 0x1F67, 0x0063, 0x0380, 0x0433, 0x0148, 0x1F68, 0x1FD3,
+ 0x1F6A, 0x004F, 0x036B, 0x0441, 0x0162, 0x1F6B, 0x1FCE,
+ 0x1F6D, 0x003C, 0x0355, 0x044E, 0x017D, 0x1F6F, 0x1FC8,
+ 0x1F70, 0x002A, 0x033E, 0x045B, 0x0197, 0x1F73, 0x1FC3,
+ 0x1F73, 0x0019, 0x0326, 0x0465, 0x01B3, 0x1F79, 0x1FBD,
+ 0x1F77, 0x0008, 0x030F, 0x046E, 0x01CE, 0x1F7F, 0x1FB7,
+ 0x1F7C, 0x1FF8, 0x02F6, 0x0476, 0x01E9, 0x1F86, 0x1FB1,
+ 0x1F80, 0x1FE9, 0x02DC, 0x047C, 0x0205, 0x1F8E, 0x1FAC,
+ 0x1F85, 0x1FDB, 0x02C2, 0x0481, 0x0221, 0x1F96, 0x1FA6,
+ 0x1F8A, 0x1FCD, 0x02A8, 0x0485, 0x023C, 0x1FA0, 0x1FA0,
+ 0x1F90, 0x1FC1, 0x028D, 0x0486, 0x0257, 0x1FAA, 0x1F9B,
+ },
+ [HS_LT_12_16_SCALE] = {
+ /* Luma */
+ 0x1FBB, 0x1F65, 0x025E, 0x0504, 0x025E, 0x1F65, 0x1FBB,
+ 0x1FC3, 0x1F5D, 0x023C, 0x0503, 0x027F, 0x1F6E, 0x1FB4,
+ 0x1FCA, 0x1F56, 0x021B, 0x0501, 0x02A0, 0x1F78, 0x1FAC,
+ 0x1FD1, 0x1F50, 0x01FA, 0x04FD, 0x02C0, 0x1F83, 0x1FA5,
+ 0x1FD8, 0x1F4B, 0x01D9, 0x04F6, 0x02E1, 0x1F90, 0x1F9D,
+ 0x1FDF, 0x1F47, 0x01B8, 0x04EF, 0x0301, 0x1F9D, 0x1F95,
+ 0x1FE6, 0x1F43, 0x0198, 0x04E5, 0x0321, 0x1FAB, 0x1F8E,
+ 0x1FEC, 0x1F41, 0x0178, 0x04DA, 0x0340, 0x1FBB, 0x1F86,
+ 0x1FF2, 0x1F40, 0x0159, 0x04CC, 0x035E, 0x1FCC, 0x1F7F,
+ 0x1FF8, 0x1F40, 0x013A, 0x04BE, 0x037B, 0x1FDD, 0x1F78,
+ 0x1FFE, 0x1F40, 0x011B, 0x04AD, 0x0398, 0x1FF0, 0x1F72,
+ 0x0003, 0x1F41, 0x00FD, 0x049C, 0x03B4, 0x0004, 0x1F6B,
+ 0x0008, 0x1F43, 0x00E0, 0x0489, 0x03CE, 0x0019, 0x1F65,
+ 0x000D, 0x1F46, 0x00C4, 0x0474, 0x03E8, 0x002E, 0x1F5F,
+ 0x0011, 0x1F49, 0x00A9, 0x045E, 0x0400, 0x0045, 0x1F5A,
+ 0x0015, 0x1F4D, 0x008E, 0x0447, 0x0418, 0x005C, 0x1F55,
+ 0x1F4F, 0x0076, 0x043B, 0x043B, 0x0076, 0x1F4F, 0x0000,
+ 0x1F55, 0x005C, 0x0418, 0x0447, 0x008E, 0x1F4D, 0x0015,
+ 0x1F5A, 0x0045, 0x0400, 0x045E, 0x00A9, 0x1F49, 0x0011,
+ 0x1F5F, 0x002E, 0x03E8, 0x0474, 0x00C4, 0x1F46, 0x000D,
+ 0x1F65, 0x0019, 0x03CE, 0x0489, 0x00E0, 0x1F43, 0x0008,
+ 0x1F6B, 0x0004, 0x03B4, 0x049C, 0x00FD, 0x1F41, 0x0003,
+ 0x1F72, 0x1FF0, 0x0398, 0x04AD, 0x011B, 0x1F40, 0x1FFE,
+ 0x1F78, 0x1FDD, 0x037B, 0x04BE, 0x013A, 0x1F40, 0x1FF8,
+ 0x1F7F, 0x1FCC, 0x035E, 0x04CC, 0x0159, 0x1F40, 0x1FF2,
+ 0x1F86, 0x1FBB, 0x0340, 0x04DA, 0x0178, 0x1F41, 0x1FEC,
+ 0x1F8E, 0x1FAB, 0x0321, 0x04E5, 0x0198, 0x1F43, 0x1FE6,
+ 0x1F95, 0x1F9D, 0x0301, 0x04EF, 0x01B8, 0x1F47, 0x1FDF,
+ 0x1F9D, 0x1F90, 0x02E1, 0x04F6, 0x01D9, 0x1F4B, 0x1FD8,
+ 0x1FA5, 0x1F83, 0x02C0, 0x04FD, 0x01FA, 0x1F50, 0x1FD1,
+ 0x1FAC, 0x1F78, 0x02A0, 0x0501, 0x021B, 0x1F56, 0x1FCA,
+ 0x1FB4, 0x1F6E, 0x027F, 0x0503, 0x023C, 0x1F5D, 0x1FC3,
+ /* Chroma */
+ 0x1FBB, 0x1F65, 0x025E, 0x0504, 0x025E, 0x1F65, 0x1FBB,
+ 0x1FC3, 0x1F5D, 0x023C, 0x0503, 0x027F, 0x1F6E, 0x1FB4,
+ 0x1FCA, 0x1F56, 0x021B, 0x0501, 0x02A0, 0x1F78, 0x1FAC,
+ 0x1FD1, 0x1F50, 0x01FA, 0x04FD, 0x02C0, 0x1F83, 0x1FA5,
+ 0x1FD8, 0x1F4B, 0x01D9, 0x04F6, 0x02E1, 0x1F90, 0x1F9D,
+ 0x1FDF, 0x1F47, 0x01B8, 0x04EF, 0x0301, 0x1F9D, 0x1F95,
+ 0x1FE6, 0x1F43, 0x0198, 0x04E5, 0x0321, 0x1FAB, 0x1F8E,
+ 0x1FEC, 0x1F41, 0x0178, 0x04DA, 0x0340, 0x1FBB, 0x1F86,
+ 0x1FF2, 0x1F40, 0x0159, 0x04CC, 0x035E, 0x1FCC, 0x1F7F,
+ 0x1FF8, 0x1F40, 0x013A, 0x04BE, 0x037B, 0x1FDD, 0x1F78,
+ 0x1FFE, 0x1F40, 0x011B, 0x04AD, 0x0398, 0x1FF0, 0x1F72,
+ 0x0003, 0x1F41, 0x00FD, 0x049C, 0x03B4, 0x0004, 0x1F6B,
+ 0x0008, 0x1F43, 0x00E0, 0x0489, 0x03CE, 0x0019, 0x1F65,
+ 0x000D, 0x1F46, 0x00C4, 0x0474, 0x03E8, 0x002E, 0x1F5F,
+ 0x0011, 0x1F49, 0x00A9, 0x045E, 0x0400, 0x0045, 0x1F5A,
+ 0x0015, 0x1F4D, 0x008E, 0x0447, 0x0418, 0x005C, 0x1F55,
+ 0x1F4F, 0x0076, 0x043B, 0x043B, 0x0076, 0x1F4F, 0x0000,
+ 0x1F55, 0x005C, 0x0418, 0x0447, 0x008E, 0x1F4D, 0x0015,
+ 0x1F5A, 0x0045, 0x0400, 0x045E, 0x00A9, 0x1F49, 0x0011,
+ 0x1F5F, 0x002E, 0x03E8, 0x0474, 0x00C4, 0x1F46, 0x000D,
+ 0x1F65, 0x0019, 0x03CE, 0x0489, 0x00E0, 0x1F43, 0x0008,
+ 0x1F6B, 0x0004, 0x03B4, 0x049C, 0x00FD, 0x1F41, 0x0003,
+ 0x1F72, 0x1FF0, 0x0398, 0x04AD, 0x011B, 0x1F40, 0x1FFE,
+ 0x1F78, 0x1FDD, 0x037B, 0x04BE, 0x013A, 0x1F40, 0x1FF8,
+ 0x1F7F, 0x1FCC, 0x035E, 0x04CC, 0x0159, 0x1F40, 0x1FF2,
+ 0x1F86, 0x1FBB, 0x0340, 0x04DA, 0x0178, 0x1F41, 0x1FEC,
+ 0x1F8E, 0x1FAB, 0x0321, 0x04E5, 0x0198, 0x1F43, 0x1FE6,
+ 0x1F95, 0x1F9D, 0x0301, 0x04EF, 0x01B8, 0x1F47, 0x1FDF,
+ 0x1F9D, 0x1F90, 0x02E1, 0x04F6, 0x01D9, 0x1F4B, 0x1FD8,
+ 0x1FA5, 0x1F83, 0x02C0, 0x04FD, 0x01FA, 0x1F50, 0x1FD1,
+ 0x1FAC, 0x1F78, 0x02A0, 0x0501, 0x021B, 0x1F56, 0x1FCA,
+ 0x1FB4, 0x1F6E, 0x027F, 0x0503, 0x023C, 0x1F5D, 0x1FC3,
+ },
+ [HS_LT_13_16_SCALE] = {
+ /* Luma */
+ 0x1FF4, 0x1F29, 0x022D, 0x056C, 0x022D, 0x1F29, 0x1FF4,
+ 0x1FFC, 0x1F26, 0x0206, 0x056A, 0x0254, 0x1F2E, 0x1FEC,
+ 0x0003, 0x1F24, 0x01E0, 0x0567, 0x027A, 0x1F34, 0x1FE4,
+ 0x000A, 0x1F23, 0x01BA, 0x0561, 0x02A2, 0x1F3B, 0x1FDB,
+ 0x0011, 0x1F22, 0x0194, 0x055B, 0x02C9, 0x1F43, 0x1FD2,
+ 0x0017, 0x1F23, 0x016F, 0x0551, 0x02F0, 0x1F4D, 0x1FC9,
+ 0x001D, 0x1F25, 0x014B, 0x0545, 0x0316, 0x1F58, 0x1FC0,
+ 0x0022, 0x1F28, 0x0127, 0x0538, 0x033C, 0x1F65, 0x1FB6,
+ 0x0027, 0x1F2C, 0x0104, 0x0528, 0x0361, 0x1F73, 0x1FAD,
+ 0x002B, 0x1F30, 0x00E2, 0x0518, 0x0386, 0x1F82, 0x1FA3,
+ 0x002F, 0x1F36, 0x00C2, 0x0504, 0x03AA, 0x1F92, 0x1F99,
+ 0x0032, 0x1F3C, 0x00A2, 0x04EF, 0x03CD, 0x1FA4, 0x1F90,
+ 0x0035, 0x1F42, 0x0083, 0x04D9, 0x03EF, 0x1FB8, 0x1F86,
+ 0x0038, 0x1F49, 0x0065, 0x04C0, 0x0410, 0x1FCD, 0x1F7D,
+ 0x003A, 0x1F51, 0x0048, 0x04A6, 0x0431, 0x1FE3, 0x1F73,
+ 0x003C, 0x1F59, 0x002D, 0x048A, 0x0450, 0x1FFA, 0x1F6A,
+ 0x1F5D, 0x0014, 0x048F, 0x048F, 0x0014, 0x1F5D, 0x0000,
+ 0x1F6A, 0x1FFA, 0x0450, 0x048A, 0x002D, 0x1F59, 0x003C,
+ 0x1F73, 0x1FE3, 0x0431, 0x04A6, 0x0048, 0x1F51, 0x003A,
+ 0x1F7D, 0x1FCD, 0x0410, 0x04C0, 0x0065, 0x1F49, 0x0038,
+ 0x1F86, 0x1FB8, 0x03EF, 0x04D9, 0x0083, 0x1F42, 0x0035,
+ 0x1F90, 0x1FA4, 0x03CD, 0x04EF, 0x00A2, 0x1F3C, 0x0032,
+ 0x1F99, 0x1F92, 0x03AA, 0x0504, 0x00C2, 0x1F36, 0x002F,
+ 0x1FA3, 0x1F82, 0x0386, 0x0518, 0x00E2, 0x1F30, 0x002B,
+ 0x1FAD, 0x1F73, 0x0361, 0x0528, 0x0104, 0x1F2C, 0x0027,
+ 0x1FB6, 0x1F65, 0x033C, 0x0538, 0x0127, 0x1F28, 0x0022,
+ 0x1FC0, 0x1F58, 0x0316, 0x0545, 0x014B, 0x1F25, 0x001D,
+ 0x1FC9, 0x1F4D, 0x02F0, 0x0551, 0x016F, 0x1F23, 0x0017,
+ 0x1FD2, 0x1F43, 0x02C9, 0x055B, 0x0194, 0x1F22, 0x0011,
+ 0x1FDB, 0x1F3B, 0x02A2, 0x0561, 0x01BA, 0x1F23, 0x000A,
+ 0x1FE4, 0x1F34, 0x027A, 0x0567, 0x01E0, 0x1F24, 0x0003,
+ 0x1FEC, 0x1F2E, 0x0254, 0x056A, 0x0206, 0x1F26, 0x1FFC,
+ /* Chroma */
+ 0x1FF4, 0x1F29, 0x022D, 0x056C, 0x022D, 0x1F29, 0x1FF4,
+ 0x1FFC, 0x1F26, 0x0206, 0x056A, 0x0254, 0x1F2E, 0x1FEC,
+ 0x0003, 0x1F24, 0x01E0, 0x0567, 0x027A, 0x1F34, 0x1FE4,
+ 0x000A, 0x1F23, 0x01BA, 0x0561, 0x02A2, 0x1F3B, 0x1FDB,
+ 0x0011, 0x1F22, 0x0194, 0x055B, 0x02C9, 0x1F43, 0x1FD2,
+ 0x0017, 0x1F23, 0x016F, 0x0551, 0x02F0, 0x1F4D, 0x1FC9,
+ 0x001D, 0x1F25, 0x014B, 0x0545, 0x0316, 0x1F58, 0x1FC0,
+ 0x0022, 0x1F28, 0x0127, 0x0538, 0x033C, 0x1F65, 0x1FB6,
+ 0x0027, 0x1F2C, 0x0104, 0x0528, 0x0361, 0x1F73, 0x1FAD,
+ 0x002B, 0x1F30, 0x00E2, 0x0518, 0x0386, 0x1F82, 0x1FA3,
+ 0x002F, 0x1F36, 0x00C2, 0x0504, 0x03AA, 0x1F92, 0x1F99,
+ 0x0032, 0x1F3C, 0x00A2, 0x04EF, 0x03CD, 0x1FA4, 0x1F90,
+ 0x0035, 0x1F42, 0x0083, 0x04D9, 0x03EF, 0x1FB8, 0x1F86,
+ 0x0038, 0x1F49, 0x0065, 0x04C0, 0x0410, 0x1FCD, 0x1F7D,
+ 0x003A, 0x1F51, 0x0048, 0x04A6, 0x0431, 0x1FE3, 0x1F73,
+ 0x003C, 0x1F59, 0x002D, 0x048A, 0x0450, 0x1FFA, 0x1F6A,
+ 0x1F5D, 0x0014, 0x048F, 0x048F, 0x0014, 0x1F5D, 0x0000,
+ 0x1F6A, 0x1FFA, 0x0450, 0x048A, 0x002D, 0x1F59, 0x003C,
+ 0x1F73, 0x1FE3, 0x0431, 0x04A6, 0x0048, 0x1F51, 0x003A,
+ 0x1F7D, 0x1FCD, 0x0410, 0x04C0, 0x0065, 0x1F49, 0x0038,
+ 0x1F86, 0x1FB8, 0x03EF, 0x04D9, 0x0083, 0x1F42, 0x0035,
+ 0x1F90, 0x1FA4, 0x03CD, 0x04EF, 0x00A2, 0x1F3C, 0x0032,
+ 0x1F99, 0x1F92, 0x03AA, 0x0504, 0x00C2, 0x1F36, 0x002F,
+ 0x1FA3, 0x1F82, 0x0386, 0x0518, 0x00E2, 0x1F30, 0x002B,
+ 0x1FAD, 0x1F73, 0x0361, 0x0528, 0x0104, 0x1F2C, 0x0027,
+ 0x1FB6, 0x1F65, 0x033C, 0x0538, 0x0127, 0x1F28, 0x0022,
+ 0x1FC0, 0x1F58, 0x0316, 0x0545, 0x014B, 0x1F25, 0x001D,
+ 0x1FC9, 0x1F4D, 0x02F0, 0x0551, 0x016F, 0x1F23, 0x0017,
+ 0x1FD2, 0x1F43, 0x02C9, 0x055B, 0x0194, 0x1F22, 0x0011,
+ 0x1FDB, 0x1F3B, 0x02A2, 0x0561, 0x01BA, 0x1F23, 0x000A,
+ 0x1FE4, 0x1F34, 0x027A, 0x0567, 0x01E0, 0x1F24, 0x0003,
+ 0x1FEC, 0x1F2E, 0x0254, 0x056A, 0x0206, 0x1F26, 0x1FFC,
+ },
+ [HS_LT_14_16_SCALE] = {
+ /* Luma */
+ 0x002F, 0x1F0B, 0x01E7, 0x05BE, 0x01E7, 0x1F0B, 0x002F,
+ 0x0035, 0x1F0D, 0x01BC, 0x05BD, 0x0213, 0x1F0A, 0x0028,
+ 0x003A, 0x1F11, 0x0191, 0x05BA, 0x023F, 0x1F0A, 0x0021,
+ 0x003F, 0x1F15, 0x0167, 0x05B3, 0x026C, 0x1F0C, 0x001A,
+ 0x0043, 0x1F1B, 0x013E, 0x05AA, 0x0299, 0x1F0F, 0x0012,
+ 0x0046, 0x1F21, 0x0116, 0x05A1, 0x02C6, 0x1F13, 0x0009,
+ 0x0049, 0x1F28, 0x00EF, 0x0593, 0x02F4, 0x1F19, 0x0000,
+ 0x004C, 0x1F30, 0x00C9, 0x0584, 0x0321, 0x1F20, 0x1FF6,
+ 0x004E, 0x1F39, 0x00A4, 0x0572, 0x034D, 0x1F2A, 0x1FEC,
+ 0x004F, 0x1F43, 0x0080, 0x055E, 0x037A, 0x1F34, 0x1FE2,
+ 0x0050, 0x1F4D, 0x005E, 0x0548, 0x03A5, 0x1F41, 0x1FD7,
+ 0x0050, 0x1F57, 0x003D, 0x0531, 0x03D1, 0x1F4F, 0x1FCB,
+ 0x0050, 0x1F62, 0x001E, 0x0516, 0x03FB, 0x1F5F, 0x1FC0,
+ 0x004F, 0x1F6D, 0x0000, 0x04FA, 0x0425, 0x1F71, 0x1FB4,
+ 0x004E, 0x1F79, 0x1FE4, 0x04DC, 0x044D, 0x1F84, 0x1FA8,
+ 0x004D, 0x1F84, 0x1FCA, 0x04BC, 0x0474, 0x1F99, 0x1F9C,
+ 0x1F8C, 0x1FAE, 0x04C6, 0x04C6, 0x1FAE, 0x1F8C, 0x0000,
+ 0x1F9C, 0x1F99, 0x0474, 0x04BC, 0x1FCA, 0x1F84, 0x004D,
+ 0x1FA8, 0x1F84, 0x044D, 0x04DC, 0x1FE4, 0x1F79, 0x004E,
+ 0x1FB4, 0x1F71, 0x0425, 0x04FA, 0x0000, 0x1F6D, 0x004F,
+ 0x1FC0, 0x1F5F, 0x03FB, 0x0516, 0x001E, 0x1F62, 0x0050,
+ 0x1FCB, 0x1F4F, 0x03D1, 0x0531, 0x003D, 0x1F57, 0x0050,
+ 0x1FD7, 0x1F41, 0x03A5, 0x0548, 0x005E, 0x1F4D, 0x0050,
+ 0x1FE2, 0x1F34, 0x037A, 0x055E, 0x0080, 0x1F43, 0x004F,
+ 0x1FEC, 0x1F2A, 0x034D, 0x0572, 0x00A4, 0x1F39, 0x004E,
+ 0x1FF6, 0x1F20, 0x0321, 0x0584, 0x00C9, 0x1F30, 0x004C,
+ 0x0000, 0x1F19, 0x02F4, 0x0593, 0x00EF, 0x1F28, 0x0049,
+ 0x0009, 0x1F13, 0x02C6, 0x05A1, 0x0116, 0x1F21, 0x0046,
+ 0x0012, 0x1F0F, 0x0299, 0x05AA, 0x013E, 0x1F1B, 0x0043,
+ 0x001A, 0x1F0C, 0x026C, 0x05B3, 0x0167, 0x1F15, 0x003F,
+ 0x0021, 0x1F0A, 0x023F, 0x05BA, 0x0191, 0x1F11, 0x003A,
+ 0x0028, 0x1F0A, 0x0213, 0x05BD, 0x01BC, 0x1F0D, 0x0035,
+ /* Chroma */
+ 0x002F, 0x1F0B, 0x01E7, 0x05BE, 0x01E7, 0x1F0B, 0x002F,
+ 0x0035, 0x1F0D, 0x01BC, 0x05BD, 0x0213, 0x1F0A, 0x0028,
+ 0x003A, 0x1F11, 0x0191, 0x05BA, 0x023F, 0x1F0A, 0x0021,
+ 0x003F, 0x1F15, 0x0167, 0x05B3, 0x026C, 0x1F0C, 0x001A,
+ 0x0043, 0x1F1B, 0x013E, 0x05AA, 0x0299, 0x1F0F, 0x0012,
+ 0x0046, 0x1F21, 0x0116, 0x05A1, 0x02C6, 0x1F13, 0x0009,
+ 0x0049, 0x1F28, 0x00EF, 0x0593, 0x02F4, 0x1F19, 0x0000,
+ 0x004C, 0x1F30, 0x00C9, 0x0584, 0x0321, 0x1F20, 0x1FF6,
+ 0x004E, 0x1F39, 0x00A4, 0x0572, 0x034D, 0x1F2A, 0x1FEC,
+ 0x004F, 0x1F43, 0x0080, 0x055E, 0x037A, 0x1F34, 0x1FE2,
+ 0x0050, 0x1F4D, 0x005E, 0x0548, 0x03A5, 0x1F41, 0x1FD7,
+ 0x0050, 0x1F57, 0x003D, 0x0531, 0x03D1, 0x1F4F, 0x1FCB,
+ 0x0050, 0x1F62, 0x001E, 0x0516, 0x03FB, 0x1F5F, 0x1FC0,
+ 0x004F, 0x1F6D, 0x0000, 0x04FA, 0x0425, 0x1F71, 0x1FB4,
+ 0x004E, 0x1F79, 0x1FE4, 0x04DC, 0x044D, 0x1F84, 0x1FA8,
+ 0x004D, 0x1F84, 0x1FCA, 0x04BC, 0x0474, 0x1F99, 0x1F9C,
+ 0x1F8C, 0x1FAE, 0x04C6, 0x04C6, 0x1FAE, 0x1F8C, 0x0000,
+ 0x1F9C, 0x1F99, 0x0474, 0x04BC, 0x1FCA, 0x1F84, 0x004D,
+ 0x1FA8, 0x1F84, 0x044D, 0x04DC, 0x1FE4, 0x1F79, 0x004E,
+ 0x1FB4, 0x1F71, 0x0425, 0x04FA, 0x0000, 0x1F6D, 0x004F,
+ 0x1FC0, 0x1F5F, 0x03FB, 0x0516, 0x001E, 0x1F62, 0x0050,
+ 0x1FCB, 0x1F4F, 0x03D1, 0x0531, 0x003D, 0x1F57, 0x0050,
+ 0x1FD7, 0x1F41, 0x03A5, 0x0548, 0x005E, 0x1F4D, 0x0050,
+ 0x1FE2, 0x1F34, 0x037A, 0x055E, 0x0080, 0x1F43, 0x004F,
+ 0x1FEC, 0x1F2A, 0x034D, 0x0572, 0x00A4, 0x1F39, 0x004E,
+ 0x1FF6, 0x1F20, 0x0321, 0x0584, 0x00C9, 0x1F30, 0x004C,
+ 0x0000, 0x1F19, 0x02F4, 0x0593, 0x00EF, 0x1F28, 0x0049,
+ 0x0009, 0x1F13, 0x02C6, 0x05A1, 0x0116, 0x1F21, 0x0046,
+ 0x0012, 0x1F0F, 0x0299, 0x05AA, 0x013E, 0x1F1B, 0x0043,
+ 0x001A, 0x1F0C, 0x026C, 0x05B3, 0x0167, 0x1F15, 0x003F,
+ 0x0021, 0x1F0A, 0x023F, 0x05BA, 0x0191, 0x1F11, 0x003A,
+ 0x0028, 0x1F0A, 0x0213, 0x05BD, 0x01BC, 0x1F0D, 0x0035,
+ },
+ [HS_LT_15_16_SCALE] = {
+ /* Luma */
+ 0x005B, 0x1F0A, 0x0195, 0x060C, 0x0195, 0x1F0A, 0x005B,
+ 0x005D, 0x1F13, 0x0166, 0x0609, 0x01C6, 0x1F03, 0x0058,
+ 0x005F, 0x1F1C, 0x0138, 0x0605, 0x01F7, 0x1EFD, 0x0054,
+ 0x0060, 0x1F26, 0x010B, 0x05FF, 0x0229, 0x1EF8, 0x004F,
+ 0x0060, 0x1F31, 0x00DF, 0x05F5, 0x025C, 0x1EF5, 0x004A,
+ 0x0060, 0x1F3D, 0x00B5, 0x05E8, 0x028F, 0x1EF3, 0x0044,
+ 0x005F, 0x1F49, 0x008C, 0x05DA, 0x02C3, 0x1EF2, 0x003D,
+ 0x005E, 0x1F56, 0x0065, 0x05C7, 0x02F6, 0x1EF4, 0x0036,
+ 0x005C, 0x1F63, 0x003F, 0x05B3, 0x032B, 0x1EF7, 0x002D,
+ 0x0059, 0x1F71, 0x001B, 0x059D, 0x035F, 0x1EFB, 0x0024,
+ 0x0057, 0x1F7F, 0x1FF9, 0x0583, 0x0392, 0x1F02, 0x001A,
+ 0x0053, 0x1F8D, 0x1FD9, 0x0567, 0x03C5, 0x1F0B, 0x0010,
+ 0x0050, 0x1F9B, 0x1FBB, 0x0548, 0x03F8, 0x1F15, 0x0005,
+ 0x004C, 0x1FA9, 0x1F9E, 0x0528, 0x042A, 0x1F22, 0x1FF9,
+ 0x0048, 0x1FB7, 0x1F84, 0x0505, 0x045A, 0x1F31, 0x1FED,
+ 0x0043, 0x1FC5, 0x1F6C, 0x04E0, 0x048A, 0x1F42, 0x1FE0,
+ 0x1FD1, 0x1F50, 0x04DF, 0x04DF, 0x1F50, 0x1FD1, 0x0000,
+ 0x1FE0, 0x1F42, 0x048A, 0x04E0, 0x1F6C, 0x1FC5, 0x0043,
+ 0x1FED, 0x1F31, 0x045A, 0x0505, 0x1F84, 0x1FB7, 0x0048,
+ 0x1FF9, 0x1F22, 0x042A, 0x0528, 0x1F9E, 0x1FA9, 0x004C,
+ 0x0005, 0x1F15, 0x03F8, 0x0548, 0x1FBB, 0x1F9B, 0x0050,
+ 0x0010, 0x1F0B, 0x03C5, 0x0567, 0x1FD9, 0x1F8D, 0x0053,
+ 0x001A, 0x1F02, 0x0392, 0x0583, 0x1FF9, 0x1F7F, 0x0057,
+ 0x0024, 0x1EFB, 0x035F, 0x059D, 0x001B, 0x1F71, 0x0059,
+ 0x002D, 0x1EF7, 0x032B, 0x05B3, 0x003F, 0x1F63, 0x005C,
+ 0x0036, 0x1EF4, 0x02F6, 0x05C7, 0x0065, 0x1F56, 0x005E,
+ 0x003D, 0x1EF2, 0x02C3, 0x05DA, 0x008C, 0x1F49, 0x005F,
+ 0x0044, 0x1EF3, 0x028F, 0x05E8, 0x00B5, 0x1F3D, 0x0060,
+ 0x004A, 0x1EF5, 0x025C, 0x05F5, 0x00DF, 0x1F31, 0x0060,
+ 0x004F, 0x1EF8, 0x0229, 0x05FF, 0x010B, 0x1F26, 0x0060,
+ 0x0054, 0x1EFD, 0x01F7, 0x0605, 0x0138, 0x1F1C, 0x005F,
+ 0x0058, 0x1F03, 0x01C6, 0x0609, 0x0166, 0x1F13, 0x005D,
+ /* Chroma */
+ 0x005B, 0x1F0A, 0x0195, 0x060C, 0x0195, 0x1F0A, 0x005B,
+ 0x005D, 0x1F13, 0x0166, 0x0609, 0x01C6, 0x1F03, 0x0058,
+ 0x005F, 0x1F1C, 0x0138, 0x0605, 0x01F7, 0x1EFD, 0x0054,
+ 0x0060, 0x1F26, 0x010B, 0x05FF, 0x0229, 0x1EF8, 0x004F,
+ 0x0060, 0x1F31, 0x00DF, 0x05F5, 0x025C, 0x1EF5, 0x004A,
+ 0x0060, 0x1F3D, 0x00B5, 0x05E8, 0x028F, 0x1EF3, 0x0044,
+ 0x005F, 0x1F49, 0x008C, 0x05DA, 0x02C3, 0x1EF2, 0x003D,
+ 0x005E, 0x1F56, 0x0065, 0x05C7, 0x02F6, 0x1EF4, 0x0036,
+ 0x005C, 0x1F63, 0x003F, 0x05B3, 0x032B, 0x1EF7, 0x002D,
+ 0x0059, 0x1F71, 0x001B, 0x059D, 0x035F, 0x1EFB, 0x0024,
+ 0x0057, 0x1F7F, 0x1FF9, 0x0583, 0x0392, 0x1F02, 0x001A,
+ 0x0053, 0x1F8D, 0x1FD9, 0x0567, 0x03C5, 0x1F0B, 0x0010,
+ 0x0050, 0x1F9B, 0x1FBB, 0x0548, 0x03F8, 0x1F15, 0x0005,
+ 0x004C, 0x1FA9, 0x1F9E, 0x0528, 0x042A, 0x1F22, 0x1FF9,
+ 0x0048, 0x1FB7, 0x1F84, 0x0505, 0x045A, 0x1F31, 0x1FED,
+ 0x0043, 0x1FC5, 0x1F6C, 0x04E0, 0x048A, 0x1F42, 0x1FE0,
+ 0x1FD1, 0x1F50, 0x04DF, 0x04DF, 0x1F50, 0x1FD1, 0x0000,
+ 0x1FE0, 0x1F42, 0x048A, 0x04E0, 0x1F6C, 0x1FC5, 0x0043,
+ 0x1FED, 0x1F31, 0x045A, 0x0505, 0x1F84, 0x1FB7, 0x0048,
+ 0x1FF9, 0x1F22, 0x042A, 0x0528, 0x1F9E, 0x1FA9, 0x004C,
+ 0x0005, 0x1F15, 0x03F8, 0x0548, 0x1FBB, 0x1F9B, 0x0050,
+ 0x0010, 0x1F0B, 0x03C5, 0x0567, 0x1FD9, 0x1F8D, 0x0053,
+ 0x001A, 0x1F02, 0x0392, 0x0583, 0x1FF9, 0x1F7F, 0x0057,
+ 0x0024, 0x1EFB, 0x035F, 0x059D, 0x001B, 0x1F71, 0x0059,
+ 0x002D, 0x1EF7, 0x032B, 0x05B3, 0x003F, 0x1F63, 0x005C,
+ 0x0036, 0x1EF4, 0x02F6, 0x05C7, 0x0065, 0x1F56, 0x005E,
+ 0x003D, 0x1EF2, 0x02C3, 0x05DA, 0x008C, 0x1F49, 0x005F,
+ 0x0044, 0x1EF3, 0x028F, 0x05E8, 0x00B5, 0x1F3D, 0x0060,
+ 0x004A, 0x1EF5, 0x025C, 0x05F5, 0x00DF, 0x1F31, 0x0060,
+ 0x004F, 0x1EF8, 0x0229, 0x05FF, 0x010B, 0x1F26, 0x0060,
+ 0x0054, 0x1EFD, 0x01F7, 0x0605, 0x0138, 0x1F1C, 0x005F,
+ 0x0058, 0x1F03, 0x01C6, 0x0609, 0x0166, 0x1F13, 0x005D,
+ },
+ [HS_LE_16_16_SCALE] = {
+ /* Luma */
+ 0x006E, 0x1F24, 0x013E, 0x0660, 0x013E, 0x1F24, 0x006E,
+ 0x006C, 0x1F33, 0x010B, 0x065D, 0x0172, 0x1F17, 0x0070,
+ 0x0069, 0x1F41, 0x00DA, 0x0659, 0x01A8, 0x1F0B, 0x0070,
+ 0x0066, 0x1F51, 0x00AA, 0x0650, 0x01DF, 0x1F00, 0x0070,
+ 0x0062, 0x1F61, 0x007D, 0x0644, 0x0217, 0x1EF6, 0x006F,
+ 0x005E, 0x1F71, 0x0051, 0x0636, 0x0250, 0x1EED, 0x006D,
+ 0x0059, 0x1F81, 0x0028, 0x0624, 0x028A, 0x1EE5, 0x006B,
+ 0x0054, 0x1F91, 0x0000, 0x060F, 0x02C5, 0x1EE0, 0x0067,
+ 0x004E, 0x1FA2, 0x1FDB, 0x05F6, 0x0300, 0x1EDC, 0x0063,
+ 0x0049, 0x1FB2, 0x1FB8, 0x05DB, 0x033B, 0x1EDA, 0x005D,
+ 0x0043, 0x1FC3, 0x1F98, 0x05BC, 0x0376, 0x1ED9, 0x0057,
+ 0x003D, 0x1FD3, 0x1F7A, 0x059B, 0x03B1, 0x1EDB, 0x004F,
+ 0x0036, 0x1FE2, 0x1F5E, 0x0578, 0x03EC, 0x1EDF, 0x0047,
+ 0x0030, 0x1FF1, 0x1F45, 0x0551, 0x0426, 0x1EE6, 0x003D,
+ 0x002A, 0x0000, 0x1F2E, 0x0528, 0x045F, 0x1EEE, 0x0033,
+ 0x0023, 0x000E, 0x1F19, 0x04FD, 0x0498, 0x1EFA, 0x0027,
+ 0x001B, 0x1F04, 0x04E1, 0x04E1, 0x1F04, 0x001B, 0x0000,
+ 0x0027, 0x1EFA, 0x0498, 0x04FD, 0x1F19, 0x000E, 0x0023,
+ 0x0033, 0x1EEE, 0x045F, 0x0528, 0x1F2E, 0x0000, 0x002A,
+ 0x003D, 0x1EE6, 0x0426, 0x0551, 0x1F45, 0x1FF1, 0x0030,
+ 0x0047, 0x1EDF, 0x03EC, 0x0578, 0x1F5E, 0x1FE2, 0x0036,
+ 0x004F, 0x1EDB, 0x03B1, 0x059B, 0x1F7A, 0x1FD3, 0x003D,
+ 0x0057, 0x1ED9, 0x0376, 0x05BC, 0x1F98, 0x1FC3, 0x0043,
+ 0x005D, 0x1EDA, 0x033B, 0x05DB, 0x1FB8, 0x1FB2, 0x0049,
+ 0x0063, 0x1EDC, 0x0300, 0x05F6, 0x1FDB, 0x1FA2, 0x004E,
+ 0x0067, 0x1EE0, 0x02C5, 0x060F, 0x0000, 0x1F91, 0x0054,
+ 0x006B, 0x1EE5, 0x028A, 0x0624, 0x0028, 0x1F81, 0x0059,
+ 0x006D, 0x1EED, 0x0250, 0x0636, 0x0051, 0x1F71, 0x005E,
+ 0x006F, 0x1EF6, 0x0217, 0x0644, 0x007D, 0x1F61, 0x0062,
+ 0x0070, 0x1F00, 0x01DF, 0x0650, 0x00AA, 0x1F51, 0x0066,
+ 0x0070, 0x1F0B, 0x01A8, 0x0659, 0x00DA, 0x1F41, 0x0069,
+ 0x0070, 0x1F17, 0x0172, 0x065D, 0x010B, 0x1F33, 0x006C,
+ /* Chroma */
+ 0x006E, 0x1F24, 0x013E, 0x0660, 0x013E, 0x1F24, 0x006E,
+ 0x006C, 0x1F33, 0x010B, 0x065D, 0x0172, 0x1F17, 0x0070,
+ 0x0069, 0x1F41, 0x00DA, 0x0659, 0x01A8, 0x1F0B, 0x0070,
+ 0x0066, 0x1F51, 0x00AA, 0x0650, 0x01DF, 0x1F00, 0x0070,
+ 0x0062, 0x1F61, 0x007D, 0x0644, 0x0217, 0x1EF6, 0x006F,
+ 0x005E, 0x1F71, 0x0051, 0x0636, 0x0250, 0x1EED, 0x006D,
+ 0x0059, 0x1F81, 0x0028, 0x0624, 0x028A, 0x1EE5, 0x006B,
+ 0x0054, 0x1F91, 0x0000, 0x060F, 0x02C5, 0x1EE0, 0x0067,
+ 0x004E, 0x1FA2, 0x1FDB, 0x05F6, 0x0300, 0x1EDC, 0x0063,
+ 0x0049, 0x1FB2, 0x1FB8, 0x05DB, 0x033B, 0x1EDA, 0x005D,
+ 0x0043, 0x1FC3, 0x1F98, 0x05BC, 0x0376, 0x1ED9, 0x0057,
+ 0x003D, 0x1FD3, 0x1F7A, 0x059B, 0x03B1, 0x1EDB, 0x004F,
+ 0x0036, 0x1FE2, 0x1F5E, 0x0578, 0x03EC, 0x1EDF, 0x0047,
+ 0x0030, 0x1FF1, 0x1F45, 0x0551, 0x0426, 0x1EE6, 0x003D,
+ 0x002A, 0x0000, 0x1F2E, 0x0528, 0x045F, 0x1EEE, 0x0033,
+ 0x0023, 0x000E, 0x1F19, 0x04FD, 0x0498, 0x1EFA, 0x0027,
+ 0x001B, 0x1F04, 0x04E1, 0x04E1, 0x1F04, 0x001B, 0x0000,
+ 0x0027, 0x1EFA, 0x0498, 0x04FD, 0x1F19, 0x000E, 0x0023,
+ 0x0033, 0x1EEE, 0x045F, 0x0528, 0x1F2E, 0x0000, 0x002A,
+ 0x003D, 0x1EE6, 0x0426, 0x0551, 0x1F45, 0x1FF1, 0x0030,
+ 0x0047, 0x1EDF, 0x03EC, 0x0578, 0x1F5E, 0x1FE2, 0x0036,
+ 0x004F, 0x1EDB, 0x03B1, 0x059B, 0x1F7A, 0x1FD3, 0x003D,
+ 0x0057, 0x1ED9, 0x0376, 0x05BC, 0x1F98, 0x1FC3, 0x0043,
+ 0x005D, 0x1EDA, 0x033B, 0x05DB, 0x1FB8, 0x1FB2, 0x0049,
+ 0x0063, 0x1EDC, 0x0300, 0x05F6, 0x1FDB, 0x1FA2, 0x004E,
+ 0x0067, 0x1EE0, 0x02C5, 0x060F, 0x0000, 0x1F91, 0x0054,
+ 0x006B, 0x1EE5, 0x028A, 0x0624, 0x0028, 0x1F81, 0x0059,
+ 0x006D, 0x1EED, 0x0250, 0x0636, 0x0051, 0x1F71, 0x005E,
+ 0x006F, 0x1EF6, 0x0217, 0x0644, 0x007D, 0x1F61, 0x0062,
+ 0x0070, 0x1F00, 0x01DF, 0x0650, 0x00AA, 0x1F51, 0x0066,
+ 0x0070, 0x1F0B, 0x01A8, 0x0659, 0x00DA, 0x1F41, 0x0069,
+ 0x0070, 0x1F17, 0x0172, 0x065D, 0x010B, 0x1F33, 0x006C,
+ },
+};
+
+/* vertical scaler coefficients */
+enum {
+ VS_UP_SCALE = 0,
+ VS_LT_9_16_SCALE,
+ VS_LT_10_16_SCALE,
+ VS_LT_11_16_SCALE,
+ VS_LT_12_16_SCALE,
+ VS_LT_13_16_SCALE,
+ VS_LT_14_16_SCALE,
+ VS_LT_15_16_SCALE,
+ VS_LT_16_16_SCALE,
+ VS_1_TO_1_SCALE,
+};
+
+static const u16 scaler_vs_coeffs[15][SC_NUM_PHASES * 2 * SC_V_NUM_TAPS] = {
+ [VS_UP_SCALE] = {
+ /* Luma */
+ 0x1FD1, 0x00B1, 0x06FC, 0x00B1, 0x1FD1,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ /* Chroma */
+ 0x1FD1, 0x00B1, 0x06FC, 0x00B1, 0x1FD1,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ },
+ [VS_LT_9_16_SCALE] = {
+ /* Luma */
+ 0x001C, 0x01F6, 0x03DC, 0x01F6, 0x001C,
+ 0x0018, 0x01DF, 0x03DB, 0x020C, 0x0022,
+ 0x0013, 0x01C9, 0x03D9, 0x0223, 0x0028,
+ 0x000F, 0x01B3, 0x03D6, 0x023A, 0x002E,
+ 0x000C, 0x019D, 0x03D2, 0x0250, 0x0035,
+ 0x0009, 0x0188, 0x03CC, 0x0266, 0x003D,
+ 0x0006, 0x0173, 0x03C5, 0x027D, 0x0045,
+ 0x0004, 0x015E, 0x03BD, 0x0293, 0x004E,
+ 0x0002, 0x014A, 0x03B4, 0x02A8, 0x0058,
+ 0x0000, 0x0136, 0x03AA, 0x02BE, 0x0062,
+ 0x1FFF, 0x0123, 0x039E, 0x02D3, 0x006D,
+ 0x1FFE, 0x0110, 0x0392, 0x02E8, 0x0078,
+ 0x1FFD, 0x00FE, 0x0384, 0x02FC, 0x0085,
+ 0x1FFD, 0x00ED, 0x0376, 0x030F, 0x0091,
+ 0x1FFC, 0x00DC, 0x0367, 0x0322, 0x009F,
+ 0x1FFC, 0x00CC, 0x0357, 0x0334, 0x00AD,
+ 0x00BC, 0x0344, 0x0344, 0x00BC, 0x0000,
+ 0x00AD, 0x0334, 0x0357, 0x00CC, 0x1FFC,
+ 0x009F, 0x0322, 0x0367, 0x00DC, 0x1FFC,
+ 0x0091, 0x030F, 0x0376, 0x00ED, 0x1FFD,
+ 0x0085, 0x02FC, 0x0384, 0x00FE, 0x1FFD,
+ 0x0078, 0x02E8, 0x0392, 0x0110, 0x1FFE,
+ 0x006D, 0x02D3, 0x039E, 0x0123, 0x1FFF,
+ 0x0062, 0x02BE, 0x03AA, 0x0136, 0x0000,
+ 0x0058, 0x02A8, 0x03B4, 0x014A, 0x0002,
+ 0x004E, 0x0293, 0x03BD, 0x015E, 0x0004,
+ 0x0045, 0x027D, 0x03C5, 0x0173, 0x0006,
+ 0x003D, 0x0266, 0x03CC, 0x0188, 0x0009,
+ 0x0035, 0x0250, 0x03D2, 0x019D, 0x000C,
+ 0x002E, 0x023A, 0x03D6, 0x01B3, 0x000F,
+ 0x0028, 0x0223, 0x03D9, 0x01C9, 0x0013,
+ 0x0022, 0x020C, 0x03DB, 0x01DF, 0x0018,
+ /* Chroma */
+ 0x001C, 0x01F6, 0x03DC, 0x01F6, 0x001C,
+ 0x0018, 0x01DF, 0x03DB, 0x020C, 0x0022,
+ 0x0013, 0x01C9, 0x03D9, 0x0223, 0x0028,
+ 0x000F, 0x01B3, 0x03D6, 0x023A, 0x002E,
+ 0x000C, 0x019D, 0x03D2, 0x0250, 0x0035,
+ 0x0009, 0x0188, 0x03CC, 0x0266, 0x003D,
+ 0x0006, 0x0173, 0x03C5, 0x027D, 0x0045,
+ 0x0004, 0x015E, 0x03BD, 0x0293, 0x004E,
+ 0x0002, 0x014A, 0x03B4, 0x02A8, 0x0058,
+ 0x0000, 0x0136, 0x03AA, 0x02BE, 0x0062,
+ 0x1FFF, 0x0123, 0x039E, 0x02D3, 0x006D,
+ 0x1FFE, 0x0110, 0x0392, 0x02E8, 0x0078,
+ 0x1FFD, 0x00FE, 0x0384, 0x02FC, 0x0085,
+ 0x1FFD, 0x00ED, 0x0376, 0x030F, 0x0091,
+ 0x1FFC, 0x00DC, 0x0367, 0x0322, 0x009F,
+ 0x1FFC, 0x00CC, 0x0357, 0x0334, 0x00AD,
+ 0x00BC, 0x0344, 0x0344, 0x00BC, 0x0000,
+ 0x00AD, 0x0334, 0x0357, 0x00CC, 0x1FFC,
+ 0x009F, 0x0322, 0x0367, 0x00DC, 0x1FFC,
+ 0x0091, 0x030F, 0x0376, 0x00ED, 0x1FFD,
+ 0x0085, 0x02FC, 0x0384, 0x00FE, 0x1FFD,
+ 0x0078, 0x02E8, 0x0392, 0x0110, 0x1FFE,
+ 0x006D, 0x02D3, 0x039E, 0x0123, 0x1FFF,
+ 0x0062, 0x02BE, 0x03AA, 0x0136, 0x0000,
+ 0x0058, 0x02A8, 0x03B4, 0x014A, 0x0002,
+ 0x004E, 0x0293, 0x03BD, 0x015E, 0x0004,
+ 0x0045, 0x027D, 0x03C5, 0x0173, 0x0006,
+ 0x003D, 0x0266, 0x03CC, 0x0188, 0x0009,
+ 0x0035, 0x0250, 0x03D2, 0x019D, 0x000C,
+ 0x002E, 0x023A, 0x03D6, 0x01B3, 0x000F,
+ 0x0028, 0x0223, 0x03D9, 0x01C9, 0x0013,
+ 0x0022, 0x020C, 0x03DB, 0x01DF, 0x0018,
+ },
+ [VS_LT_10_16_SCALE] = {
+ /* Luma */
+ 0x0003, 0x01E9, 0x0428, 0x01E9, 0x0003,
+ 0x0000, 0x01D0, 0x0426, 0x0203, 0x0007,
+ 0x1FFD, 0x01B7, 0x0424, 0x021C, 0x000C,
+ 0x1FFB, 0x019E, 0x0420, 0x0236, 0x0011,
+ 0x1FF9, 0x0186, 0x041A, 0x0250, 0x0017,
+ 0x1FF7, 0x016E, 0x0414, 0x026A, 0x001D,
+ 0x1FF6, 0x0157, 0x040B, 0x0284, 0x0024,
+ 0x1FF5, 0x0140, 0x0401, 0x029E, 0x002C,
+ 0x1FF4, 0x012A, 0x03F6, 0x02B7, 0x0035,
+ 0x1FF4, 0x0115, 0x03E9, 0x02D0, 0x003E,
+ 0x1FF4, 0x0100, 0x03DB, 0x02E9, 0x0048,
+ 0x1FF4, 0x00EC, 0x03CC, 0x0301, 0x0053,
+ 0x1FF4, 0x00D9, 0x03BC, 0x0318, 0x005F,
+ 0x1FF5, 0x00C7, 0x03AA, 0x032F, 0x006B,
+ 0x1FF6, 0x00B5, 0x0398, 0x0345, 0x0078,
+ 0x1FF6, 0x00A5, 0x0384, 0x035B, 0x0086,
+ 0x0094, 0x036C, 0x036C, 0x0094, 0x0000,
+ 0x0086, 0x035B, 0x0384, 0x00A5, 0x1FF6,
+ 0x0078, 0x0345, 0x0398, 0x00B5, 0x1FF6,
+ 0x006B, 0x032F, 0x03AA, 0x00C7, 0x1FF5,
+ 0x005F, 0x0318, 0x03BC, 0x00D9, 0x1FF4,
+ 0x0053, 0x0301, 0x03CC, 0x00EC, 0x1FF4,
+ 0x0048, 0x02E9, 0x03DB, 0x0100, 0x1FF4,
+ 0x003E, 0x02D0, 0x03E9, 0x0115, 0x1FF4,
+ 0x0035, 0x02B7, 0x03F6, 0x012A, 0x1FF4,
+ 0x002C, 0x029E, 0x0401, 0x0140, 0x1FF5,
+ 0x0024, 0x0284, 0x040B, 0x0157, 0x1FF6,
+ 0x001D, 0x026A, 0x0414, 0x016E, 0x1FF7,
+ 0x0017, 0x0250, 0x041A, 0x0186, 0x1FF9,
+ 0x0011, 0x0236, 0x0420, 0x019E, 0x1FFB,
+ 0x000C, 0x021C, 0x0424, 0x01B7, 0x1FFD,
+ 0x0007, 0x0203, 0x0426, 0x01D0, 0x0000,
+ /* Chroma */
+ 0x0003, 0x01E9, 0x0428, 0x01E9, 0x0003,
+ 0x0000, 0x01D0, 0x0426, 0x0203, 0x0007,
+ 0x1FFD, 0x01B7, 0x0424, 0x021C, 0x000C,
+ 0x1FFB, 0x019E, 0x0420, 0x0236, 0x0011,
+ 0x1FF9, 0x0186, 0x041A, 0x0250, 0x0017,
+ 0x1FF7, 0x016E, 0x0414, 0x026A, 0x001D,
+ 0x1FF6, 0x0157, 0x040B, 0x0284, 0x0024,
+ 0x1FF5, 0x0140, 0x0401, 0x029E, 0x002C,
+ 0x1FF4, 0x012A, 0x03F6, 0x02B7, 0x0035,
+ 0x1FF4, 0x0115, 0x03E9, 0x02D0, 0x003E,
+ 0x1FF4, 0x0100, 0x03DB, 0x02E9, 0x0048,
+ 0x1FF4, 0x00EC, 0x03CC, 0x0301, 0x0053,
+ 0x1FF4, 0x00D9, 0x03BC, 0x0318, 0x005F,
+ 0x1FF5, 0x00C7, 0x03AA, 0x032F, 0x006B,
+ 0x1FF6, 0x00B5, 0x0398, 0x0345, 0x0078,
+ 0x1FF6, 0x00A5, 0x0384, 0x035B, 0x0086,
+ 0x0094, 0x036C, 0x036C, 0x0094, 0x0000,
+ 0x0086, 0x035B, 0x0384, 0x00A5, 0x1FF6,
+ 0x0078, 0x0345, 0x0398, 0x00B5, 0x1FF6,
+ 0x006B, 0x032F, 0x03AA, 0x00C7, 0x1FF5,
+ 0x005F, 0x0318, 0x03BC, 0x00D9, 0x1FF4,
+ 0x0053, 0x0301, 0x03CC, 0x00EC, 0x1FF4,
+ 0x0048, 0x02E9, 0x03DB, 0x0100, 0x1FF4,
+ 0x003E, 0x02D0, 0x03E9, 0x0115, 0x1FF4,
+ 0x0035, 0x02B7, 0x03F6, 0x012A, 0x1FF4,
+ 0x002C, 0x029E, 0x0401, 0x0140, 0x1FF5,
+ 0x0024, 0x0284, 0x040B, 0x0157, 0x1FF6,
+ 0x001D, 0x026A, 0x0414, 0x016E, 0x1FF7,
+ 0x0017, 0x0250, 0x041A, 0x0186, 0x1FF9,
+ 0x0011, 0x0236, 0x0420, 0x019E, 0x1FFB,
+ 0x000C, 0x021C, 0x0424, 0x01B7, 0x1FFD,
+ 0x0007, 0x0203, 0x0426, 0x01D0, 0x0000,
+ },
+ [VS_LT_11_16_SCALE] = {
+ /* Luma */
+ 0x1FEC, 0x01D6, 0x047C, 0x01D6, 0x1FEC,
+ 0x1FEA, 0x01BA, 0x047B, 0x01F3, 0x1FEE,
+ 0x1FE9, 0x019D, 0x0478, 0x0211, 0x1FF1,
+ 0x1FE8, 0x0182, 0x0473, 0x022E, 0x1FF5,
+ 0x1FE8, 0x0167, 0x046C, 0x024C, 0x1FF9,
+ 0x1FE8, 0x014D, 0x0464, 0x026A, 0x1FFD,
+ 0x1FE8, 0x0134, 0x0459, 0x0288, 0x0003,
+ 0x1FE9, 0x011B, 0x044D, 0x02A6, 0x0009,
+ 0x1FE9, 0x0104, 0x0440, 0x02C3, 0x0010,
+ 0x1FEA, 0x00ED, 0x0430, 0x02E1, 0x0018,
+ 0x1FEB, 0x00D7, 0x0420, 0x02FD, 0x0021,
+ 0x1FED, 0x00C2, 0x040D, 0x0319, 0x002B,
+ 0x1FEE, 0x00AE, 0x03F9, 0x0336, 0x0035,
+ 0x1FF0, 0x009C, 0x03E3, 0x0350, 0x0041,
+ 0x1FF1, 0x008A, 0x03CD, 0x036B, 0x004D,
+ 0x1FF3, 0x0079, 0x03B5, 0x0384, 0x005B,
+ 0x0069, 0x0397, 0x0397, 0x0069, 0x0000,
+ 0x005B, 0x0384, 0x03B5, 0x0079, 0x1FF3,
+ 0x004D, 0x036B, 0x03CD, 0x008A, 0x1FF1,
+ 0x0041, 0x0350, 0x03E3, 0x009C, 0x1FF0,
+ 0x0035, 0x0336, 0x03F9, 0x00AE, 0x1FEE,
+ 0x002B, 0x0319, 0x040D, 0x00C2, 0x1FED,
+ 0x0021, 0x02FD, 0x0420, 0x00D7, 0x1FEB,
+ 0x0018, 0x02E1, 0x0430, 0x00ED, 0x1FEA,
+ 0x0010, 0x02C3, 0x0440, 0x0104, 0x1FE9,
+ 0x0009, 0x02A6, 0x044D, 0x011B, 0x1FE9,
+ 0x0003, 0x0288, 0x0459, 0x0134, 0x1FE8,
+ 0x1FFD, 0x026A, 0x0464, 0x014D, 0x1FE8,
+ 0x1FF9, 0x024C, 0x046C, 0x0167, 0x1FE8,
+ 0x1FF5, 0x022E, 0x0473, 0x0182, 0x1FE8,
+ 0x1FF1, 0x0211, 0x0478, 0x019D, 0x1FE9,
+ 0x1FEE, 0x01F3, 0x047B, 0x01BA, 0x1FEA,
+ /* Chroma */
+ 0x1FEC, 0x01D6, 0x047C, 0x01D6, 0x1FEC,
+ 0x1FEA, 0x01BA, 0x047B, 0x01F3, 0x1FEE,
+ 0x1FE9, 0x019D, 0x0478, 0x0211, 0x1FF1,
+ 0x1FE8, 0x0182, 0x0473, 0x022E, 0x1FF5,
+ 0x1FE8, 0x0167, 0x046C, 0x024C, 0x1FF9,
+ 0x1FE8, 0x014D, 0x0464, 0x026A, 0x1FFD,
+ 0x1FE8, 0x0134, 0x0459, 0x0288, 0x0003,
+ 0x1FE9, 0x011B, 0x044D, 0x02A6, 0x0009,
+ 0x1FE9, 0x0104, 0x0440, 0x02C3, 0x0010,
+ 0x1FEA, 0x00ED, 0x0430, 0x02E1, 0x0018,
+ 0x1FEB, 0x00D7, 0x0420, 0x02FD, 0x0021,
+ 0x1FED, 0x00C2, 0x040D, 0x0319, 0x002B,
+ 0x1FEE, 0x00AE, 0x03F9, 0x0336, 0x0035,
+ 0x1FF0, 0x009C, 0x03E3, 0x0350, 0x0041,
+ 0x1FF1, 0x008A, 0x03CD, 0x036B, 0x004D,
+ 0x1FF3, 0x0079, 0x03B5, 0x0384, 0x005B,
+ 0x0069, 0x0397, 0x0397, 0x0069, 0x0000,
+ 0x005B, 0x0384, 0x03B5, 0x0079, 0x1FF3,
+ 0x004D, 0x036B, 0x03CD, 0x008A, 0x1FF1,
+ 0x0041, 0x0350, 0x03E3, 0x009C, 0x1FF0,
+ 0x0035, 0x0336, 0x03F9, 0x00AE, 0x1FEE,
+ 0x002B, 0x0319, 0x040D, 0x00C2, 0x1FED,
+ 0x0021, 0x02FD, 0x0420, 0x00D7, 0x1FEB,
+ 0x0018, 0x02E1, 0x0430, 0x00ED, 0x1FEA,
+ 0x0010, 0x02C3, 0x0440, 0x0104, 0x1FE9,
+ 0x0009, 0x02A6, 0x044D, 0x011B, 0x1FE9,
+ 0x0003, 0x0288, 0x0459, 0x0134, 0x1FE8,
+ 0x1FFD, 0x026A, 0x0464, 0x014D, 0x1FE8,
+ 0x1FF9, 0x024C, 0x046C, 0x0167, 0x1FE8,
+ 0x1FF5, 0x022E, 0x0473, 0x0182, 0x1FE8,
+ 0x1FF1, 0x0211, 0x0478, 0x019D, 0x1FE9,
+ 0x1FEE, 0x01F3, 0x047B, 0x01BA, 0x1FEA,
+ },
+ [VS_LT_12_16_SCALE] = {
+ /* Luma */
+ 0x1FD8, 0x01BC, 0x04D8, 0x01BC, 0x1FD8,
+ 0x1FD8, 0x019C, 0x04D8, 0x01DC, 0x1FD8,
+ 0x1FD8, 0x017D, 0x04D4, 0x01FE, 0x1FD9,
+ 0x1FD9, 0x015E, 0x04CF, 0x0220, 0x1FDA,
+ 0x1FDB, 0x0141, 0x04C7, 0x0241, 0x1FDC,
+ 0x1FDC, 0x0125, 0x04BC, 0x0264, 0x1FDF,
+ 0x1FDE, 0x0109, 0x04B0, 0x0286, 0x1FE3,
+ 0x1FE0, 0x00EF, 0x04A1, 0x02A9, 0x1FE7,
+ 0x1FE2, 0x00D6, 0x0491, 0x02CB, 0x1FEC,
+ 0x1FE4, 0x00BE, 0x047E, 0x02EE, 0x1FF2,
+ 0x1FE6, 0x00A7, 0x046A, 0x030F, 0x1FFA,
+ 0x1FE9, 0x0092, 0x0453, 0x0330, 0x0002,
+ 0x1FEB, 0x007E, 0x043B, 0x0351, 0x000B,
+ 0x1FED, 0x006B, 0x0421, 0x0372, 0x0015,
+ 0x1FEF, 0x005A, 0x0406, 0x0391, 0x0020,
+ 0x1FF1, 0x0049, 0x03EA, 0x03AF, 0x002D,
+ 0x003A, 0x03C6, 0x03C6, 0x003A, 0x0000,
+ 0x002D, 0x03AF, 0x03EA, 0x0049, 0x1FF1,
+ 0x0020, 0x0391, 0x0406, 0x005A, 0x1FEF,
+ 0x0015, 0x0372, 0x0421, 0x006B, 0x1FED,
+ 0x000B, 0x0351, 0x043B, 0x007E, 0x1FEB,
+ 0x0002, 0x0330, 0x0453, 0x0092, 0x1FE9,
+ 0x1FFA, 0x030F, 0x046A, 0x00A7, 0x1FE6,
+ 0x1FF2, 0x02EE, 0x047E, 0x00BE, 0x1FE4,
+ 0x1FEC, 0x02CB, 0x0491, 0x00D6, 0x1FE2,
+ 0x1FE7, 0x02A9, 0x04A1, 0x00EF, 0x1FE0,
+ 0x1FE3, 0x0286, 0x04B0, 0x0109, 0x1FDE,
+ 0x1FDF, 0x0264, 0x04BC, 0x0125, 0x1FDC,
+ 0x1FDC, 0x0241, 0x04C7, 0x0141, 0x1FDB,
+ 0x1FDA, 0x0220, 0x04CF, 0x015E, 0x1FD9,
+ 0x1FD9, 0x01FE, 0x04D4, 0x017D, 0x1FD8,
+ 0x1FD8, 0x01DC, 0x04D8, 0x019C, 0x1FD8,
+ /* Chroma */
+ 0x1FD8, 0x01BC, 0x04D8, 0x01BC, 0x1FD8,
+ 0x1FD8, 0x019C, 0x04D8, 0x01DC, 0x1FD8,
+ 0x1FD8, 0x017D, 0x04D4, 0x01FE, 0x1FD9,
+ 0x1FD9, 0x015E, 0x04CF, 0x0220, 0x1FDA,
+ 0x1FDB, 0x0141, 0x04C7, 0x0241, 0x1FDC,
+ 0x1FDC, 0x0125, 0x04BC, 0x0264, 0x1FDF,
+ 0x1FDE, 0x0109, 0x04B0, 0x0286, 0x1FE3,
+ 0x1FE0, 0x00EF, 0x04A1, 0x02A9, 0x1FE7,
+ 0x1FE2, 0x00D6, 0x0491, 0x02CB, 0x1FEC,
+ 0x1FE4, 0x00BE, 0x047E, 0x02EE, 0x1FF2,
+ 0x1FE6, 0x00A7, 0x046A, 0x030F, 0x1FFA,
+ 0x1FE9, 0x0092, 0x0453, 0x0330, 0x0002,
+ 0x1FEB, 0x007E, 0x043B, 0x0351, 0x000B,
+ 0x1FED, 0x006B, 0x0421, 0x0372, 0x0015,
+ 0x1FEF, 0x005A, 0x0406, 0x0391, 0x0020,
+ 0x1FF1, 0x0049, 0x03EA, 0x03AF, 0x002D,
+ 0x003A, 0x03C6, 0x03C6, 0x003A, 0x0000,
+ 0x002D, 0x03AF, 0x03EA, 0x0049, 0x1FF1,
+ 0x0020, 0x0391, 0x0406, 0x005A, 0x1FEF,
+ 0x0015, 0x0372, 0x0421, 0x006B, 0x1FED,
+ 0x000B, 0x0351, 0x043B, 0x007E, 0x1FEB,
+ 0x0002, 0x0330, 0x0453, 0x0092, 0x1FE9,
+ 0x1FFA, 0x030F, 0x046A, 0x00A7, 0x1FE6,
+ 0x1FF2, 0x02EE, 0x047E, 0x00BE, 0x1FE4,
+ 0x1FEC, 0x02CB, 0x0491, 0x00D6, 0x1FE2,
+ 0x1FE7, 0x02A9, 0x04A1, 0x00EF, 0x1FE0,
+ 0x1FE3, 0x0286, 0x04B0, 0x0109, 0x1FDE,
+ 0x1FDF, 0x0264, 0x04BC, 0x0125, 0x1FDC,
+ 0x1FDC, 0x0241, 0x04C7, 0x0141, 0x1FDB,
+ 0x1FDA, 0x0220, 0x04CF, 0x015E, 0x1FD9,
+ 0x1FD9, 0x01FE, 0x04D4, 0x017D, 0x1FD8,
+ 0x1FD8, 0x01DC, 0x04D8, 0x019C, 0x1FD8,
+ },
+ [VS_LT_13_16_SCALE] = {
+ /* Luma */
+ 0x1FC8, 0x0199, 0x053E, 0x0199, 0x1FC8,
+ 0x1FCA, 0x0175, 0x053E, 0x01BD, 0x1FC6,
+ 0x1FCD, 0x0153, 0x0539, 0x01E2, 0x1FC5,
+ 0x1FCF, 0x0132, 0x0532, 0x0209, 0x1FC4,
+ 0x1FD2, 0x0112, 0x0529, 0x022F, 0x1FC4,
+ 0x1FD5, 0x00F4, 0x051C, 0x0256, 0x1FC5,
+ 0x1FD8, 0x00D7, 0x050D, 0x027E, 0x1FC6,
+ 0x1FDC, 0x00BB, 0x04FB, 0x02A6, 0x1FC8,
+ 0x1FDF, 0x00A1, 0x04E7, 0x02CE, 0x1FCB,
+ 0x1FE2, 0x0089, 0x04D1, 0x02F5, 0x1FCF,
+ 0x1FE5, 0x0072, 0x04B8, 0x031D, 0x1FD4,
+ 0x1FE8, 0x005D, 0x049E, 0x0344, 0x1FD9,
+ 0x1FEB, 0x0049, 0x0480, 0x036B, 0x1FE1,
+ 0x1FEE, 0x0037, 0x0462, 0x0390, 0x1FE9,
+ 0x1FF0, 0x0026, 0x0442, 0x03B6, 0x1FF2,
+ 0x1FF2, 0x0017, 0x0420, 0x03DA, 0x1FFD,
+ 0x0009, 0x03F7, 0x03F7, 0x0009, 0x0000,
+ 0x1FFD, 0x03DA, 0x0420, 0x0017, 0x1FF2,
+ 0x1FF2, 0x03B6, 0x0442, 0x0026, 0x1FF0,
+ 0x1FE9, 0x0390, 0x0462, 0x0037, 0x1FEE,
+ 0x1FE1, 0x036B, 0x0480, 0x0049, 0x1FEB,
+ 0x1FD9, 0x0344, 0x049E, 0x005D, 0x1FE8,
+ 0x1FD4, 0x031D, 0x04B8, 0x0072, 0x1FE5,
+ 0x1FCF, 0x02F5, 0x04D1, 0x0089, 0x1FE2,
+ 0x1FCB, 0x02CE, 0x04E7, 0x00A1, 0x1FDF,
+ 0x1FC8, 0x02A6, 0x04FB, 0x00BB, 0x1FDC,
+ 0x1FC6, 0x027E, 0x050D, 0x00D7, 0x1FD8,
+ 0x1FC5, 0x0256, 0x051C, 0x00F4, 0x1FD5,
+ 0x1FC4, 0x022F, 0x0529, 0x0112, 0x1FD2,
+ 0x1FC4, 0x0209, 0x0532, 0x0132, 0x1FCF,
+ 0x1FC5, 0x01E2, 0x0539, 0x0153, 0x1FCD,
+ 0x1FC6, 0x01BD, 0x053E, 0x0175, 0x1FCA,
+ /* Chroma */
+ 0x1FC8, 0x0199, 0x053E, 0x0199, 0x1FC8,
+ 0x1FCA, 0x0175, 0x053E, 0x01BD, 0x1FC6,
+ 0x1FCD, 0x0153, 0x0539, 0x01E2, 0x1FC5,
+ 0x1FCF, 0x0132, 0x0532, 0x0209, 0x1FC4,
+ 0x1FD2, 0x0112, 0x0529, 0x022F, 0x1FC4,
+ 0x1FD5, 0x00F4, 0x051C, 0x0256, 0x1FC5,
+ 0x1FD8, 0x00D7, 0x050D, 0x027E, 0x1FC6,
+ 0x1FDC, 0x00BB, 0x04FB, 0x02A6, 0x1FC8,
+ 0x1FDF, 0x00A1, 0x04E7, 0x02CE, 0x1FCB,
+ 0x1FE2, 0x0089, 0x04D1, 0x02F5, 0x1FCF,
+ 0x1FE5, 0x0072, 0x04B8, 0x031D, 0x1FD4,
+ 0x1FE8, 0x005D, 0x049E, 0x0344, 0x1FD9,
+ 0x1FEB, 0x0049, 0x0480, 0x036B, 0x1FE1,
+ 0x1FEE, 0x0037, 0x0462, 0x0390, 0x1FE9,
+ 0x1FF0, 0x0026, 0x0442, 0x03B6, 0x1FF2,
+ 0x1FF2, 0x0017, 0x0420, 0x03DA, 0x1FFD,
+ 0x0009, 0x03F7, 0x03F7, 0x0009, 0x0000,
+ 0x1FFD, 0x03DA, 0x0420, 0x0017, 0x1FF2,
+ 0x1FF2, 0x03B6, 0x0442, 0x0026, 0x1FF0,
+ 0x1FE9, 0x0390, 0x0462, 0x0037, 0x1FEE,
+ 0x1FE1, 0x036B, 0x0480, 0x0049, 0x1FEB,
+ 0x1FD9, 0x0344, 0x049E, 0x005D, 0x1FE8,
+ 0x1FD4, 0x031D, 0x04B8, 0x0072, 0x1FE5,
+ 0x1FCF, 0x02F5, 0x04D1, 0x0089, 0x1FE2,
+ 0x1FCB, 0x02CE, 0x04E7, 0x00A1, 0x1FDF,
+ 0x1FC8, 0x02A6, 0x04FB, 0x00BB, 0x1FDC,
+ 0x1FC6, 0x027E, 0x050D, 0x00D7, 0x1FD8,
+ 0x1FC5, 0x0256, 0x051C, 0x00F4, 0x1FD5,
+ 0x1FC4, 0x022F, 0x0529, 0x0112, 0x1FD2,
+ 0x1FC4, 0x0209, 0x0532, 0x0132, 0x1FCF,
+ 0x1FC5, 0x01E2, 0x0539, 0x0153, 0x1FCD,
+ 0x1FC6, 0x01BD, 0x053E, 0x0175, 0x1FCA,
+ },
+ [VS_LT_14_16_SCALE] = {
+ /* Luma */
+ 0x1FBF, 0x016C, 0x05AA, 0x016C, 0x1FBF,
+ 0x1FC3, 0x0146, 0x05A8, 0x0194, 0x1FBB,
+ 0x1FC7, 0x0121, 0x05A3, 0x01BD, 0x1FB8,
+ 0x1FCB, 0x00FD, 0x059B, 0x01E8, 0x1FB5,
+ 0x1FD0, 0x00DC, 0x058F, 0x0213, 0x1FB2,
+ 0x1FD4, 0x00BC, 0x0580, 0x0240, 0x1FB0,
+ 0x1FD8, 0x009E, 0x056E, 0x026D, 0x1FAF,
+ 0x1FDC, 0x0082, 0x055A, 0x029A, 0x1FAE,
+ 0x1FE0, 0x0067, 0x0542, 0x02C9, 0x1FAE,
+ 0x1FE4, 0x004F, 0x0528, 0x02F6, 0x1FAF,
+ 0x1FE8, 0x0038, 0x050A, 0x0325, 0x1FB1,
+ 0x1FEB, 0x0024, 0x04EB, 0x0352, 0x1FB4,
+ 0x1FEE, 0x0011, 0x04C8, 0x0380, 0x1FB9,
+ 0x1FF1, 0x0000, 0x04A4, 0x03AC, 0x1FBF,
+ 0x1FF4, 0x1FF1, 0x047D, 0x03D8, 0x1FC6,
+ 0x1FF6, 0x1FE4, 0x0455, 0x0403, 0x1FCE,
+ 0x1FD8, 0x0428, 0x0428, 0x1FD8, 0x0000,
+ 0x1FCE, 0x0403, 0x0455, 0x1FE4, 0x1FF6,
+ 0x1FC6, 0x03D8, 0x047D, 0x1FF1, 0x1FF4,
+ 0x1FBF, 0x03AC, 0x04A4, 0x0000, 0x1FF1,
+ 0x1FB9, 0x0380, 0x04C8, 0x0011, 0x1FEE,
+ 0x1FB4, 0x0352, 0x04EB, 0x0024, 0x1FEB,
+ 0x1FB1, 0x0325, 0x050A, 0x0038, 0x1FE8,
+ 0x1FAF, 0x02F6, 0x0528, 0x004F, 0x1FE4,
+ 0x1FAE, 0x02C9, 0x0542, 0x0067, 0x1FE0,
+ 0x1FAE, 0x029A, 0x055A, 0x0082, 0x1FDC,
+ 0x1FAF, 0x026D, 0x056E, 0x009E, 0x1FD8,
+ 0x1FB0, 0x0240, 0x0580, 0x00BC, 0x1FD4,
+ 0x1FB2, 0x0213, 0x058F, 0x00DC, 0x1FD0,
+ 0x1FB5, 0x01E8, 0x059B, 0x00FD, 0x1FCB,
+ 0x1FB8, 0x01BD, 0x05A3, 0x0121, 0x1FC7,
+ 0x1FBB, 0x0194, 0x05A8, 0x0146, 0x1FC3,
+ /* Chroma */
+ 0x1FBF, 0x016C, 0x05AA, 0x016C, 0x1FBF,
+ 0x1FC3, 0x0146, 0x05A8, 0x0194, 0x1FBB,
+ 0x1FC7, 0x0121, 0x05A3, 0x01BD, 0x1FB8,
+ 0x1FCB, 0x00FD, 0x059B, 0x01E8, 0x1FB5,
+ 0x1FD0, 0x00DC, 0x058F, 0x0213, 0x1FB2,
+ 0x1FD4, 0x00BC, 0x0580, 0x0240, 0x1FB0,
+ 0x1FD8, 0x009E, 0x056E, 0x026D, 0x1FAF,
+ 0x1FDC, 0x0082, 0x055A, 0x029A, 0x1FAE,
+ 0x1FE0, 0x0067, 0x0542, 0x02C9, 0x1FAE,
+ 0x1FE4, 0x004F, 0x0528, 0x02F6, 0x1FAF,
+ 0x1FE8, 0x0038, 0x050A, 0x0325, 0x1FB1,
+ 0x1FEB, 0x0024, 0x04EB, 0x0352, 0x1FB4,
+ 0x1FEE, 0x0011, 0x04C8, 0x0380, 0x1FB9,
+ 0x1FF1, 0x0000, 0x04A4, 0x03AC, 0x1FBF,
+ 0x1FF4, 0x1FF1, 0x047D, 0x03D8, 0x1FC6,
+ 0x1FF6, 0x1FE4, 0x0455, 0x0403, 0x1FCE,
+ 0x1FD8, 0x0428, 0x0428, 0x1FD8, 0x0000,
+ 0x1FCE, 0x0403, 0x0455, 0x1FE4, 0x1FF6,
+ 0x1FC6, 0x03D8, 0x047D, 0x1FF1, 0x1FF4,
+ 0x1FBF, 0x03AC, 0x04A4, 0x0000, 0x1FF1,
+ 0x1FB9, 0x0380, 0x04C8, 0x0011, 0x1FEE,
+ 0x1FB4, 0x0352, 0x04EB, 0x0024, 0x1FEB,
+ 0x1FB1, 0x0325, 0x050A, 0x0038, 0x1FE8,
+ 0x1FAF, 0x02F6, 0x0528, 0x004F, 0x1FE4,
+ 0x1FAE, 0x02C9, 0x0542, 0x0067, 0x1FE0,
+ 0x1FAE, 0x029A, 0x055A, 0x0082, 0x1FDC,
+ 0x1FAF, 0x026D, 0x056E, 0x009E, 0x1FD8,
+ 0x1FB0, 0x0240, 0x0580, 0x00BC, 0x1FD4,
+ 0x1FB2, 0x0213, 0x058F, 0x00DC, 0x1FD0,
+ 0x1FB5, 0x01E8, 0x059B, 0x00FD, 0x1FCB,
+ 0x1FB8, 0x01BD, 0x05A3, 0x0121, 0x1FC7,
+ 0x1FBB, 0x0194, 0x05A8, 0x0146, 0x1FC3,
+ },
+ [VS_LT_15_16_SCALE] = {
+ /* Luma */
+ 0x1FBD, 0x0136, 0x061A, 0x0136, 0x1FBD,
+ 0x1FC3, 0x010D, 0x0617, 0x0161, 0x1FB8,
+ 0x1FC9, 0x00E6, 0x0611, 0x018E, 0x1FB2,
+ 0x1FCE, 0x00C1, 0x0607, 0x01BD, 0x1FAD,
+ 0x1FD4, 0x009E, 0x05F9, 0x01ED, 0x1FA8,
+ 0x1FD9, 0x007D, 0x05E8, 0x021F, 0x1FA3,
+ 0x1FDE, 0x005E, 0x05D3, 0x0252, 0x1F9F,
+ 0x1FE2, 0x0042, 0x05BC, 0x0285, 0x1F9B,
+ 0x1FE7, 0x0029, 0x059F, 0x02B9, 0x1F98,
+ 0x1FEA, 0x0011, 0x0580, 0x02EF, 0x1F96,
+ 0x1FEE, 0x1FFC, 0x055D, 0x0324, 0x1F95,
+ 0x1FF1, 0x1FE9, 0x0538, 0x0359, 0x1F95,
+ 0x1FF4, 0x1FD8, 0x0510, 0x038E, 0x1F96,
+ 0x1FF7, 0x1FC9, 0x04E5, 0x03C2, 0x1F99,
+ 0x1FF9, 0x1FBD, 0x04B8, 0x03F5, 0x1F9D,
+ 0x1FFB, 0x1FB2, 0x0489, 0x0428, 0x1FA2,
+ 0x1FAA, 0x0456, 0x0456, 0x1FAA, 0x0000,
+ 0x1FA2, 0x0428, 0x0489, 0x1FB2, 0x1FFB,
+ 0x1F9D, 0x03F5, 0x04B8, 0x1FBD, 0x1FF9,
+ 0x1F99, 0x03C2, 0x04E5, 0x1FC9, 0x1FF7,
+ 0x1F96, 0x038E, 0x0510, 0x1FD8, 0x1FF4,
+ 0x1F95, 0x0359, 0x0538, 0x1FE9, 0x1FF1,
+ 0x1F95, 0x0324, 0x055D, 0x1FFC, 0x1FEE,
+ 0x1F96, 0x02EF, 0x0580, 0x0011, 0x1FEA,
+ 0x1F98, 0x02B9, 0x059F, 0x0029, 0x1FE7,
+ 0x1F9B, 0x0285, 0x05BC, 0x0042, 0x1FE2,
+ 0x1F9F, 0x0252, 0x05D3, 0x005E, 0x1FDE,
+ 0x1FA3, 0x021F, 0x05E8, 0x007D, 0x1FD9,
+ 0x1FA8, 0x01ED, 0x05F9, 0x009E, 0x1FD4,
+ 0x1FAD, 0x01BD, 0x0607, 0x00C1, 0x1FCE,
+ 0x1FB2, 0x018E, 0x0611, 0x00E6, 0x1FC9,
+ 0x1FB8, 0x0161, 0x0617, 0x010D, 0x1FC3,
+ /* Chroma */
+ 0x1FBD, 0x0136, 0x061A, 0x0136, 0x1FBD,
+ 0x1FC3, 0x010D, 0x0617, 0x0161, 0x1FB8,
+ 0x1FC9, 0x00E6, 0x0611, 0x018E, 0x1FB2,
+ 0x1FCE, 0x00C1, 0x0607, 0x01BD, 0x1FAD,
+ 0x1FD4, 0x009E, 0x05F9, 0x01ED, 0x1FA8,
+ 0x1FD9, 0x007D, 0x05E8, 0x021F, 0x1FA3,
+ 0x1FDE, 0x005E, 0x05D3, 0x0252, 0x1F9F,
+ 0x1FE2, 0x0042, 0x05BC, 0x0285, 0x1F9B,
+ 0x1FE7, 0x0029, 0x059F, 0x02B9, 0x1F98,
+ 0x1FEA, 0x0011, 0x0580, 0x02EF, 0x1F96,
+ 0x1FEE, 0x1FFC, 0x055D, 0x0324, 0x1F95,
+ 0x1FF1, 0x1FE9, 0x0538, 0x0359, 0x1F95,
+ 0x1FF4, 0x1FD8, 0x0510, 0x038E, 0x1F96,
+ 0x1FF7, 0x1FC9, 0x04E5, 0x03C2, 0x1F99,
+ 0x1FF9, 0x1FBD, 0x04B8, 0x03F5, 0x1F9D,
+ 0x1FFB, 0x1FB2, 0x0489, 0x0428, 0x1FA2,
+ 0x1FAA, 0x0456, 0x0456, 0x1FAA, 0x0000,
+ 0x1FA2, 0x0428, 0x0489, 0x1FB2, 0x1FFB,
+ 0x1F9D, 0x03F5, 0x04B8, 0x1FBD, 0x1FF9,
+ 0x1F99, 0x03C2, 0x04E5, 0x1FC9, 0x1FF7,
+ 0x1F96, 0x038E, 0x0510, 0x1FD8, 0x1FF4,
+ 0x1F95, 0x0359, 0x0538, 0x1FE9, 0x1FF1,
+ 0x1F95, 0x0324, 0x055D, 0x1FFC, 0x1FEE,
+ 0x1F96, 0x02EF, 0x0580, 0x0011, 0x1FEA,
+ 0x1F98, 0x02B9, 0x059F, 0x0029, 0x1FE7,
+ 0x1F9B, 0x0285, 0x05BC, 0x0042, 0x1FE2,
+ 0x1F9F, 0x0252, 0x05D3, 0x005E, 0x1FDE,
+ 0x1FA3, 0x021F, 0x05E8, 0x007D, 0x1FD9,
+ 0x1FA8, 0x01ED, 0x05F9, 0x009E, 0x1FD4,
+ 0x1FAD, 0x01BD, 0x0607, 0x00C1, 0x1FCE,
+ 0x1FB2, 0x018E, 0x0611, 0x00E6, 0x1FC9,
+ 0x1FB8, 0x0161, 0x0617, 0x010D, 0x1FC3,
+ },
+ [VS_LT_16_16_SCALE] = {
+ /* Luma */
+ 0x1FC3, 0x00F8, 0x068A, 0x00F8, 0x1FC3,
+ 0x1FCA, 0x00CC, 0x0689, 0x0125, 0x1FBC,
+ 0x1FD1, 0x00A3, 0x0681, 0x0156, 0x1FB5,
+ 0x1FD7, 0x007D, 0x0676, 0x0188, 0x1FAE,
+ 0x1FDD, 0x005A, 0x0666, 0x01BD, 0x1FA6,
+ 0x1FE3, 0x0039, 0x0652, 0x01F3, 0x1F9F,
+ 0x1FE8, 0x001B, 0x0639, 0x022C, 0x1F98,
+ 0x1FEC, 0x0000, 0x061D, 0x0265, 0x1F92,
+ 0x1FF0, 0x1FE8, 0x05FC, 0x02A0, 0x1F8C,
+ 0x1FF4, 0x1FD2, 0x05D7, 0x02DC, 0x1F87,
+ 0x1FF7, 0x1FBF, 0x05AF, 0x0319, 0x1F82,
+ 0x1FFA, 0x1FAF, 0x0583, 0x0356, 0x1F7E,
+ 0x1FFC, 0x1FA1, 0x0554, 0x0393, 0x1F7C,
+ 0x1FFE, 0x1F95, 0x0523, 0x03CF, 0x1F7B,
+ 0x0000, 0x1F8C, 0x04EE, 0x040B, 0x1F7B,
+ 0x0001, 0x1F85, 0x04B8, 0x0446, 0x1F7C,
+ 0x1F80, 0x0480, 0x0480, 0x1F80, 0x0000,
+ 0x1F7C, 0x0446, 0x04B8, 0x1F85, 0x0001,
+ 0x1F7B, 0x040B, 0x04EE, 0x1F8C, 0x0000,
+ 0x1F7B, 0x03CF, 0x0523, 0x1F95, 0x1FFE,
+ 0x1F7C, 0x0393, 0x0554, 0x1FA1, 0x1FFC,
+ 0x1F7E, 0x0356, 0x0583, 0x1FAF, 0x1FFA,
+ 0x1F82, 0x0319, 0x05AF, 0x1FBF, 0x1FF7,
+ 0x1F87, 0x02DC, 0x05D7, 0x1FD2, 0x1FF4,
+ 0x1F8C, 0x02A0, 0x05FC, 0x1FE8, 0x1FF0,
+ 0x1F92, 0x0265, 0x061D, 0x0000, 0x1FEC,
+ 0x1F98, 0x022C, 0x0639, 0x001B, 0x1FE8,
+ 0x1F9F, 0x01F3, 0x0652, 0x0039, 0x1FE3,
+ 0x1FA6, 0x01BD, 0x0666, 0x005A, 0x1FDD,
+ 0x1FAE, 0x0188, 0x0676, 0x007D, 0x1FD7,
+ 0x1FB5, 0x0156, 0x0681, 0x00A3, 0x1FD1,
+ 0x1FBC, 0x0125, 0x0689, 0x00CC, 0x1FCA,
+ /* Chroma */
+ 0x1FC3, 0x00F8, 0x068A, 0x00F8, 0x1FC3,
+ 0x1FCA, 0x00CC, 0x0689, 0x0125, 0x1FBC,
+ 0x1FD1, 0x00A3, 0x0681, 0x0156, 0x1FB5,
+ 0x1FD7, 0x007D, 0x0676, 0x0188, 0x1FAE,
+ 0x1FDD, 0x005A, 0x0666, 0x01BD, 0x1FA6,
+ 0x1FE3, 0x0039, 0x0652, 0x01F3, 0x1F9F,
+ 0x1FE8, 0x001B, 0x0639, 0x022C, 0x1F98,
+ 0x1FEC, 0x0000, 0x061D, 0x0265, 0x1F92,
+ 0x1FF0, 0x1FE8, 0x05FC, 0x02A0, 0x1F8C,
+ 0x1FF4, 0x1FD2, 0x05D7, 0x02DC, 0x1F87,
+ 0x1FF7, 0x1FBF, 0x05AF, 0x0319, 0x1F82,
+ 0x1FFA, 0x1FAF, 0x0583, 0x0356, 0x1F7E,
+ 0x1FFC, 0x1FA1, 0x0554, 0x0393, 0x1F7C,
+ 0x1FFE, 0x1F95, 0x0523, 0x03CF, 0x1F7B,
+ 0x0000, 0x1F8C, 0x04EE, 0x040B, 0x1F7B,
+ 0x0001, 0x1F85, 0x04B8, 0x0446, 0x1F7C,
+ 0x1F80, 0x0480, 0x0480, 0x1F80, 0x0000,
+ 0x1F7C, 0x0446, 0x04B8, 0x1F85, 0x0001,
+ 0x1F7B, 0x040B, 0x04EE, 0x1F8C, 0x0000,
+ 0x1F7B, 0x03CF, 0x0523, 0x1F95, 0x1FFE,
+ 0x1F7C, 0x0393, 0x0554, 0x1FA1, 0x1FFC,
+ 0x1F7E, 0x0356, 0x0583, 0x1FAF, 0x1FFA,
+ 0x1F82, 0x0319, 0x05AF, 0x1FBF, 0x1FF7,
+ 0x1F87, 0x02DC, 0x05D7, 0x1FD2, 0x1FF4,
+ 0x1F8C, 0x02A0, 0x05FC, 0x1FE8, 0x1FF0,
+ 0x1F92, 0x0265, 0x061D, 0x0000, 0x1FEC,
+ 0x1F98, 0x022C, 0x0639, 0x001B, 0x1FE8,
+ 0x1F9F, 0x01F3, 0x0652, 0x0039, 0x1FE3,
+ 0x1FA6, 0x01BD, 0x0666, 0x005A, 0x1FDD,
+ 0x1FAE, 0x0188, 0x0676, 0x007D, 0x1FD7,
+ 0x1FB5, 0x0156, 0x0681, 0x00A3, 0x1FD1,
+ 0x1FBC, 0x0125, 0x0689, 0x00CC, 0x1FCA,
+ },
+ [VS_1_TO_1_SCALE] = {
+ /* Luma */
+ 0x0000, 0x0000, 0x0800, 0x0000, 0x0000,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ /* Chroma */
+ 0x0000, 0x0000, 0x0800, 0x0000, 0x0000,
+ 0x1FD8, 0x0085, 0x06F9, 0x00E1, 0x1FC9,
+ 0x1FDF, 0x005B, 0x06F2, 0x0114, 0x1FC0,
+ 0x1FE5, 0x0035, 0x06E5, 0x014A, 0x1FB7,
+ 0x1FEB, 0x0012, 0x06D3, 0x0182, 0x1FAE,
+ 0x1FF1, 0x1FF3, 0x06BA, 0x01BD, 0x1FA5,
+ 0x1FF5, 0x1FD7, 0x069D, 0x01FB, 0x1F9C,
+ 0x1FF9, 0x1FBE, 0x067C, 0x023A, 0x1F93,
+ 0x1FFD, 0x1FA8, 0x0656, 0x027B, 0x1F8A,
+ 0x0000, 0x1F95, 0x062B, 0x02BF, 0x1F81,
+ 0x0002, 0x1F86, 0x05FC, 0x0303, 0x1F79,
+ 0x0004, 0x1F79, 0x05CA, 0x0347, 0x1F72,
+ 0x0005, 0x1F6F, 0x0594, 0x038D, 0x1F6B,
+ 0x0006, 0x1F67, 0x055B, 0x03D2, 0x1F66,
+ 0x0007, 0x1F62, 0x051E, 0x0417, 0x1F62,
+ 0x0007, 0x1F5F, 0x04DF, 0x045C, 0x1F5F,
+ 0x1F5E, 0x04A2, 0x04A2, 0x1F5E, 0x0000,
+ 0x1F5F, 0x045C, 0x04DF, 0x1F5F, 0x0007,
+ 0x1F62, 0x0417, 0x051E, 0x1F62, 0x0007,
+ 0x1F66, 0x03D2, 0x055B, 0x1F67, 0x0006,
+ 0x1F6B, 0x038D, 0x0594, 0x1F6F, 0x0005,
+ 0x1F72, 0x0347, 0x05CA, 0x1F79, 0x0004,
+ 0x1F79, 0x0303, 0x05FC, 0x1F86, 0x0002,
+ 0x1F81, 0x02BF, 0x062B, 0x1F95, 0x0000,
+ 0x1F8A, 0x027B, 0x0656, 0x1FA8, 0x1FFD,
+ 0x1F93, 0x023A, 0x067C, 0x1FBE, 0x1FF9,
+ 0x1F9C, 0x01FB, 0x069D, 0x1FD7, 0x1FF5,
+ 0x1FA5, 0x01BD, 0x06BA, 0x1FF3, 0x1FF1,
+ 0x1FAE, 0x0182, 0x06D3, 0x0012, 0x1FEB,
+ 0x1FB7, 0x014A, 0x06E5, 0x0035, 0x1FE5,
+ 0x1FC0, 0x0114, 0x06F2, 0x005B, 0x1FDF,
+ 0x1FC9, 0x00E1, 0x06F9, 0x0085, 0x1FD8,
+ },
+};
+#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index af0a5ffcaa98..e8175e7938ed 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -30,38 +30,47 @@
const struct vpdma_data_format vpdma_yuv_fmts[] = {
[VPDMA_DATA_FMT_Y444] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_Y444,
.depth = 8,
},
[VPDMA_DATA_FMT_Y422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_Y422,
.depth = 8,
},
[VPDMA_DATA_FMT_Y420] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_Y420,
.depth = 8,
},
[VPDMA_DATA_FMT_C444] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_C444,
.depth = 8,
},
[VPDMA_DATA_FMT_C422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_C422,
.depth = 8,
},
[VPDMA_DATA_FMT_C420] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_C420,
.depth = 4,
},
[VPDMA_DATA_FMT_YC422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_YC422,
.depth = 16,
},
[VPDMA_DATA_FMT_YC444] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_YC444,
.depth = 24,
},
[VPDMA_DATA_FMT_CY422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_CY422,
.depth = 16,
},
@@ -69,82 +78,102 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
const struct vpdma_data_format vpdma_rgb_fmts[] = {
[VPDMA_DATA_FMT_RGB565] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGB16_565,
.depth = 16,
},
[VPDMA_DATA_FMT_ARGB16_1555] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ARGB_1555,
.depth = 16,
},
[VPDMA_DATA_FMT_ARGB16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ARGB_4444,
.depth = 16,
},
[VPDMA_DATA_FMT_RGBA16_5551] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGBA_5551,
.depth = 16,
},
[VPDMA_DATA_FMT_RGBA16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGBA_4444,
.depth = 16,
},
[VPDMA_DATA_FMT_ARGB24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ARGB24_6666,
.depth = 24,
},
[VPDMA_DATA_FMT_RGB24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGB24_888,
.depth = 24,
},
[VPDMA_DATA_FMT_ARGB32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ARGB32_8888,
.depth = 32,
},
[VPDMA_DATA_FMT_RGBA24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGBA24_6666,
.depth = 24,
},
[VPDMA_DATA_FMT_RGBA32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_RGBA32_8888,
.depth = 32,
},
[VPDMA_DATA_FMT_BGR565] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGR16_565,
.depth = 16,
},
[VPDMA_DATA_FMT_ABGR16_1555] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ABGR_1555,
.depth = 16,
},
[VPDMA_DATA_FMT_ABGR16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ABGR_4444,
.depth = 16,
},
[VPDMA_DATA_FMT_BGRA16_5551] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGRA_5551,
.depth = 16,
},
[VPDMA_DATA_FMT_BGRA16] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGRA_4444,
.depth = 16,
},
[VPDMA_DATA_FMT_ABGR24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ABGR24_6666,
.depth = 24,
},
[VPDMA_DATA_FMT_BGR24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGR24_888,
.depth = 24,
},
[VPDMA_DATA_FMT_ABGR32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_ABGR32_8888,
.depth = 32,
},
[VPDMA_DATA_FMT_BGRA24] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGRA24_6666,
.depth = 24,
},
[VPDMA_DATA_FMT_BGRA32] = {
+ .type = VPDMA_DATA_FMT_TYPE_RGB,
.data_type = DATA_TYPE_BGRA32_8888,
.depth = 32,
},
@@ -152,6 +181,7 @@ const struct vpdma_data_format vpdma_rgb_fmts[] = {
const struct vpdma_data_format vpdma_misc_fmts[] = {
[VPDMA_DATA_FMT_MV] = {
+ .type = VPDMA_DATA_FMT_TYPE_MISC,
.data_type = DATA_TYPE_MV,
.depth = 4,
},
@@ -577,8 +607,8 @@ static void dump_dtd(struct vpdma_dtd *dtd)
pr_debug("word5: max_width %d, max_height %d\n",
dtd_get_max_width(dtd), dtd_get_max_height(dtd));
- pr_debug("word6: client specfic attr0 = 0x%08x\n", dtd->client_attr0);
- pr_debug("word7: client specfic attr1 = 0x%08x\n", dtd->client_attr1);
+ pr_debug("word6: client specific attr0 = 0x%08x\n", dtd->client_attr0);
+ pr_debug("word7: client specific attr1 = 0x%08x\n", dtd->client_attr1);
}
/*
@@ -599,10 +629,11 @@ void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect,
channel = next_chan = chan_info[chan].num;
- if (fmt->data_type == DATA_TYPE_C420)
+ if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
+ fmt->data_type == DATA_TYPE_C420)
depth = 8;
- stride = (depth * c_rect->width) >> 3;
+ stride = ALIGN((depth * c_rect->width) >> 3, VPDMA_STRIDE_ALIGN);
dma_addr += (c_rect->left * depth) >> 3;
dtd = list->next;
@@ -649,13 +680,14 @@ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width,
channel = next_chan = chan_info[chan].num;
- if (fmt->data_type == DATA_TYPE_C420) {
+ if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
+ fmt->data_type == DATA_TYPE_C420) {
height >>= 1;
frame_height >>= 1;
depth = 8;
}
- stride = (depth * c_rect->width) >> 3;
+ stride = ALIGN((depth * c_rect->width) >> 3, VPDMA_STRIDE_ALIGN);
dma_addr += (c_rect->left * depth) >> 3;
dtd = list->next;
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index eaa2a71a5db9..cf40f11b3c8f 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -39,13 +39,23 @@ struct vpdma_data {
bool ready;
};
+enum vpdma_data_format_type {
+ VPDMA_DATA_FMT_TYPE_YUV,
+ VPDMA_DATA_FMT_TYPE_RGB,
+ VPDMA_DATA_FMT_TYPE_MISC,
+};
+
struct vpdma_data_format {
+ enum vpdma_data_format_type type;
int data_type;
u8 depth;
};
#define VPDMA_DESC_ALIGN 16 /* 16-byte descriptor alignment */
-
+#define VPDMA_STRIDE_ALIGN 16 /*
+ * line stride of source and dest
+ * buffers should be 16 byte aligned
+ */
#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
index f0e9a8038c1b..c1a6ce1884f3 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -78,7 +78,7 @@
#define DATA_TYPE_C420 0x6
#define DATA_TYPE_YC422 0x7
#define DATA_TYPE_YC444 0x8
-#define DATA_TYPE_CY422 0x23
+#define DATA_TYPE_CY422 0x27
#define DATA_TYPE_RGB16_565 0x0
#define DATA_TYPE_ARGB_1555 0x1
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 4e58069e24ff..1296c5386231 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <linux/log2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
@@ -42,6 +43,8 @@
#include "vpdma.h"
#include "vpe_regs.h"
+#include "sc.h"
+#include "csc.h"
#define VPE_MODULE_NAME "vpe"
@@ -54,10 +57,6 @@
/* required alignments */
#define S_ALIGN 0 /* multiple of 1 */
#define H_ALIGN 1 /* multiple of 2 */
-#define W_ALIGN 1 /* multiple of 2 */
-
-/* multiple of 128 bits, line stride, 16 bytes */
-#define L_ALIGN 4
/* flags that indicate a format can be used for capture/output */
#define VPE_FMT_TYPE_CAPTURE (1 << 0)
@@ -268,6 +267,38 @@ static struct vpe_fmt vpe_formats[] = {
.vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
},
},
+ {
+ .name = "RGB888 packed",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
+ },
+ },
+ {
+ .name = "ARGB32",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
+ },
+ },
+ {
+ .name = "BGR888 packed",
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
+ },
+ },
+ {
+ .name = "ABGR32",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
+ },
+ },
};
/*
@@ -327,9 +358,12 @@ struct vpe_dev {
int irq;
void __iomem *base;
+ struct resource *res;
struct vb2_alloc_ctx *alloc_ctx;
struct vpdma_data *vpdma; /* vpdma data handle */
+ struct sc_data *sc; /* scaler data handle */
+ struct csc_data *csc; /* csc data handle */
};
/*
@@ -356,6 +390,8 @@ struct vpe_ctx {
void *mv_buf[2]; /* virtual addrs of motion vector bufs */
size_t mv_buf_size; /* current motion vector buffer size */
struct vpdma_buf mmr_adb; /* shadow reg addr/data block */
+ struct vpdma_buf sc_coeff_h; /* h coeff buffer */
+ struct vpdma_buf sc_coeff_v; /* v coeff buffer */
struct vpdma_desc_list desc_list; /* DMA descriptor list */
bool deinterlacing; /* using de-interlacer */
@@ -438,14 +474,23 @@ struct vpe_mmr_adb {
u32 us3_regs[8];
struct vpdma_adb_hdr dei_hdr;
u32 dei_regs[8];
- struct vpdma_adb_hdr sc_hdr;
- u32 sc_regs[1];
- u32 sc_pad[3];
+ struct vpdma_adb_hdr sc_hdr0;
+ u32 sc_regs0[7];
+ u32 sc_pad0[1];
+ struct vpdma_adb_hdr sc_hdr8;
+ u32 sc_regs8[6];
+ u32 sc_pad8[2];
+ struct vpdma_adb_hdr sc_hdr17;
+ u32 sc_regs17[9];
+ u32 sc_pad17[3];
struct vpdma_adb_hdr csc_hdr;
u32 csc_regs[6];
u32 csc_pad[2];
};
+#define GET_OFFSET_TOP(ctx, obj, reg) \
+ ((obj)->res->start - ctx->dev->res->start + reg)
+
#define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
/*
@@ -458,8 +503,14 @@ static void init_adb_hdrs(struct vpe_ctx *ctx)
VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
- VPE_SET_MMR_ADB_HDR(ctx, sc_hdr, sc_regs, VPE_SC_MP_SC0);
- VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, VPE_CSC_CSC00);
+ VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0,
+ GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0));
+ VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8,
+ GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8));
+ VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17,
+ GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17));
+ VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs,
+ GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00));
};
/*
@@ -670,17 +721,20 @@ static void set_src_registers(struct vpe_ctx *ctx)
static void set_dst_registers(struct vpe_ctx *ctx)
{
struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
u32 val = 0;
- /* select RGB path when color space conversion is supported in future */
- if (fmt->fourcc == V4L2_PIX_FMT_RGB24)
- val |= VPE_RGB_OUT_SELECT | VPE_CSC_SRC_DEI_SCALER;
+ if (clrspc == V4L2_COLORSPACE_SRGB)
+ val |= VPE_RGB_OUT_SELECT;
else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
val |= VPE_COLOR_SEPARATE_422;
- /* The source of CHR_DS is always the scaler, whether it's used or not */
- val |= VPE_DS_SRC_DEI_SCALER;
+ /*
+ * the source of CHR_DS and CSC is always the scaler, irrespective of
+ * whether it's used or not
+ */
+ val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
if (fmt->fourcc != V4L2_PIX_FMT_NV12)
val |= VPE_DS_BYPASS;
@@ -742,28 +796,6 @@ static void set_dei_shadow_registers(struct vpe_ctx *ctx)
ctx->load_mmrs = true;
}
-static void set_csc_coeff_bypass(struct vpe_ctx *ctx)
-{
- struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
- u32 *shadow_csc_reg5 = &mmr_adb->csc_regs[5];
-
- *shadow_csc_reg5 |= VPE_CSC_BYPASS;
-
- ctx->load_mmrs = true;
-}
-
-static void set_sc_regs_bypass(struct vpe_ctx *ctx)
-{
- struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
- u32 *sc_reg0 = &mmr_adb->sc_regs[0];
- u32 val = 0;
-
- val |= VPE_SC_BYPASS;
- *sc_reg0 = val;
-
- ctx->load_mmrs = true;
-}
-
/*
* Set the shadow registers whose values are modified when either the
* source or destination format is changed.
@@ -772,6 +804,11 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
{
struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ unsigned int src_w = s_q_data->c_rect.width;
+ unsigned int src_h = s_q_data->c_rect.height;
+ unsigned int dst_w = d_q_data->c_rect.width;
+ unsigned int dst_h = d_q_data->c_rect.height;
size_t mv_buf_size;
int ret;
@@ -780,12 +817,23 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
if ((s_q_data->flags & Q_DATA_INTERLACED) &&
!(d_q_data->flags & Q_DATA_INTERLACED)) {
+ int bytes_per_line;
const struct vpdma_data_format *mv =
&vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+ /*
+ * we make sure that the source image has a 16 byte aligned
+ * stride, we need to do the same for the motion vector buffer
+ * by aligning it's stride to the next 16 byte boundry. this
+ * extra space will not be used by the de-interlacer, but will
+ * ensure that vpdma operates correctly
+ */
+ bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
+ mv_buf_size = bytes_per_line * s_q_data->height;
+
ctx->deinterlacing = 1;
- mv_buf_size =
- (s_q_data->width * s_q_data->height * mv->depth) >> 3;
+ src_h <<= 1;
} else {
ctx->deinterlacing = 0;
mv_buf_size = 0;
@@ -799,8 +847,16 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
set_cfg_and_line_modes(ctx);
set_dei_regs(ctx);
- set_csc_coeff_bypass(ctx);
- set_sc_regs_bypass(ctx);
+
+ csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
+ s_q_data->colorspace, d_q_data->colorspace);
+
+ sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
+ sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
+
+ sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0],
+ &mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
+ src_w, src_h, dst_w, dst_h);
return 0;
}
@@ -916,35 +972,10 @@ static void vpe_dump_regs(struct vpe_dev *dev)
DUMPREG(DEI_FMD_STATUS_R0);
DUMPREG(DEI_FMD_STATUS_R1);
DUMPREG(DEI_FMD_STATUS_R2);
- DUMPREG(SC_MP_SC0);
- DUMPREG(SC_MP_SC1);
- DUMPREG(SC_MP_SC2);
- DUMPREG(SC_MP_SC3);
- DUMPREG(SC_MP_SC4);
- DUMPREG(SC_MP_SC5);
- DUMPREG(SC_MP_SC6);
- DUMPREG(SC_MP_SC8);
- DUMPREG(SC_MP_SC9);
- DUMPREG(SC_MP_SC10);
- DUMPREG(SC_MP_SC11);
- DUMPREG(SC_MP_SC12);
- DUMPREG(SC_MP_SC13);
- DUMPREG(SC_MP_SC17);
- DUMPREG(SC_MP_SC18);
- DUMPREG(SC_MP_SC19);
- DUMPREG(SC_MP_SC20);
- DUMPREG(SC_MP_SC21);
- DUMPREG(SC_MP_SC22);
- DUMPREG(SC_MP_SC23);
- DUMPREG(SC_MP_SC24);
- DUMPREG(SC_MP_SC25);
- DUMPREG(CSC_CSC00);
- DUMPREG(CSC_CSC01);
- DUMPREG(CSC_CSC02);
- DUMPREG(CSC_CSC03);
- DUMPREG(CSC_CSC04);
- DUMPREG(CSC_CSC05);
#undef DUMPREG
+
+ sc_dump_regs(dev->sc);
+ csc_dump_regs(dev->csc);
}
static void add_out_dtd(struct vpe_ctx *ctx, int port)
@@ -1053,6 +1084,7 @@ static void disable_irqs(struct vpe_ctx *ctx)
static void device_run(void *priv)
{
struct vpe_ctx *ctx = priv;
+ struct sc_data *sc = ctx->dev->sc;
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
@@ -1075,13 +1107,37 @@ static void device_run(void *priv)
ctx->load_mmrs = false;
}
+ if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr ||
+ sc->load_coeff_h) {
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
+ &ctx->sc_coeff_h, 0);
+
+ sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr;
+ sc->load_coeff_h = false;
+ }
+
+ if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr ||
+ sc->load_coeff_v) {
+ vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v);
+ vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
+ &ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
+
+ sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr;
+ sc->load_coeff_v = false;
+ }
+
/* output data descriptors */
if (ctx->deinterlacing)
add_out_dtd(ctx, VPE_PORT_MV_OUT);
- add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
- if (d_q_data->fmt->coplanar)
- add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
+ if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ add_out_dtd(ctx, VPE_PORT_RGB_OUT);
+ } else {
+ add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
+ if (d_q_data->fmt->coplanar)
+ add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
+ }
/* input data descriptors */
if (ctx->deinterlacing) {
@@ -1117,9 +1173,16 @@ static void device_run(void *priv)
}
/* sync on channel control descriptors for output ports */
- vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT);
- if (d_q_data->fmt->coplanar)
- vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT);
+ if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_RGB_OUT);
+ } else {
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_LUMA_OUT);
+ if (d_q_data->fmt->coplanar)
+ vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
+ VPE_CHAN_CHROMA_OUT);
+ }
if (ctx->deinterlacing)
vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
@@ -1198,6 +1261,8 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
vpdma_reset_desc_list(&ctx->desc_list);
@@ -1352,7 +1417,8 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *plane_fmt;
- int i;
+ unsigned int w_align;
+ int i, depth, depth_bytes;
if (!fmt || !(fmt->types & type)) {
vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
@@ -1363,35 +1429,57 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
pix->field = V4L2_FIELD_NONE;
- v4l_bound_align_image(&pix->width, MIN_W, MAX_W, W_ALIGN,
+ depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
+
+ /*
+ * the line stride should 16 byte aligned for VPDMA to work, based on
+ * the bytes per pixel, figure out how much the width should be aligned
+ * to make sure line stride is 16 byte aligned
+ */
+ depth_bytes = depth >> 3;
+
+ if (depth_bytes == 3)
+ /*
+ * if bpp is 3(as in some RGB formats), the pixel width doesn't
+ * really help in ensuring line stride is 16 byte aligned
+ */
+ w_align = 4;
+ else
+ /*
+ * for the remainder bpp(4, 2 and 1), the pixel width alignment
+ * can ensure a line stride alignment of 16 bytes. For example,
+ * if bpp is 2, then the line stride can be 16 byte aligned if
+ * the width is 8 byte aligned
+ */
+ w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes);
+
+ v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
&pix->height, MIN_H, MAX_H, H_ALIGN,
S_ALIGN);
pix->num_planes = fmt->coplanar ? 2 : 1;
pix->pixelformat = fmt->fourcc;
- if (type == VPE_FMT_TYPE_CAPTURE) {
- struct vpe_q_data *s_q_data;
-
- /* get colorspace from the source queue */
- s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
-
- pix->colorspace = s_q_data->colorspace;
- } else {
- if (!pix->colorspace)
- pix->colorspace = V4L2_COLORSPACE_SMPTE240M;
+ if (!pix->colorspace) {
+ if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
+ fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
+ fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
+ fmt->fourcc == V4L2_PIX_FMT_BGR32) {
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ } else {
+ if (pix->height > 1280) /* HD */
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ else /* SD */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ }
}
for (i = 0; i < pix->num_planes; i++) {
- int depth;
-
plane_fmt = &pix->plane_fmt[i];
depth = fmt->vpdma_fmt[i]->depth;
if (i == VPE_LUMA)
- plane_fmt->bytesperline =
- round_up((pix->width * depth) >> 3,
- 1 << L_ALIGN);
+ plane_fmt->bytesperline = (pix->width * depth) >> 3;
else
plane_fmt->bytesperline = pix->width;
@@ -1749,6 +1837,14 @@ static int vpe_open(struct file *file)
if (ret != 0)
goto free_desc_list;
+ ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE);
+ if (ret != 0)
+ goto free_mmr_adb;
+
+ ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE);
+ if (ret != 0)
+ goto free_sc_h;
+
init_adb_hdrs(ctx);
v4l2_fh_init(&ctx->fh, video_devdata(file));
@@ -1770,7 +1866,7 @@ static int vpe_open(struct file *file)
s_q_data->height = 1080;
s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height *
s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
- s_q_data->colorspace = V4L2_COLORSPACE_SMPTE240M;
+ s_q_data->colorspace = V4L2_COLORSPACE_SMPTE170M;
s_q_data->field = V4L2_FIELD_NONE;
s_q_data->c_rect.left = 0;
s_q_data->c_rect.top = 0;
@@ -1817,6 +1913,10 @@ static int vpe_open(struct file *file)
exit_fh:
v4l2_ctrl_handler_free(hdl);
v4l2_fh_exit(&ctx->fh);
+ vpdma_free_desc_buf(&ctx->sc_coeff_v);
+free_sc_h:
+ vpdma_free_desc_buf(&ctx->sc_coeff_h);
+free_mmr_adb:
vpdma_free_desc_buf(&ctx->mmr_adb);
free_desc_list:
vpdma_free_desc_list(&ctx->desc_list);
@@ -1938,12 +2038,11 @@ static int vpe_probe(struct platform_device *pdev)
{
struct vpe_dev *dev;
struct video_device *vfd;
- struct resource *res;
int ret, irq, func;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ if (!dev)
+ return -ENOMEM;
spin_lock_init(&dev->lock);
@@ -1954,16 +2053,17 @@ static int vpe_probe(struct platform_device *pdev)
atomic_set(&dev->num_instances, 0);
mutex_init(&dev->dev_mutex);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe_top");
+ dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "vpe_top");
/*
* HACK: we get resource info from device tree in the form of a list of
* VPE sub blocks, the driver currently uses only the base of vpe_top
* for register access, the driver should be changed later to access
* registers based on the sub block base addresses
*/
- dev->base = devm_ioremap(&pdev->dev, res->start, SZ_32K);
- if (IS_ERR(dev->base)) {
- ret = PTR_ERR(dev->base);
+ dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K);
+ if (!dev->base) {
+ ret = -ENOMEM;
goto v4l2_dev_unreg;
}
@@ -2006,9 +2106,23 @@ static int vpe_probe(struct platform_device *pdev)
vpe_top_vpdma_reset(dev);
+ dev->sc = sc_create(pdev);
+ if (IS_ERR(dev->sc)) {
+ ret = PTR_ERR(dev->sc);
+ goto runtime_put;
+ }
+
+ dev->csc = csc_create(pdev);
+ if (IS_ERR(dev->csc)) {
+ ret = PTR_ERR(dev->csc);
+ goto runtime_put;
+ }
+
dev->vpdma = vpdma_create(pdev);
- if (IS_ERR(dev->vpdma))
+ if (IS_ERR(dev->vpdma)) {
+ ret = PTR_ERR(dev->vpdma);
goto runtime_put;
+ }
vfd = &dev->vfd;
*vfd = vpe_videodev;
@@ -2081,18 +2195,7 @@ static struct platform_driver vpe_pdrv = {
},
};
-static void __exit vpe_exit(void)
-{
- platform_driver_unregister(&vpe_pdrv);
-}
-
-static int __init vpe_init(void)
-{
- return platform_driver_register(&vpe_pdrv);
-}
-
-module_init(vpe_init);
-module_exit(vpe_exit);
+module_platform_driver(vpe_pdrv);
MODULE_DESCRIPTION("TI VPE driver");
MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti-vpe/vpe_regs.h
index ed214e828398..74283d79eae1 100644
--- a/drivers/media/platform/ti-vpe/vpe_regs.h
+++ b/drivers/media/platform/ti-vpe/vpe_regs.h
@@ -306,191 +306,4 @@
#define VPE_FMD_FRAME_DIFF_MASK 0x000fffff
#define VPE_FMD_FRAME_DIFF_SHIFT 0
-/* VPE scaler regs */
-#define VPE_SC_MP_SC0 0x0700
-#define VPE_INTERLACE_O (1 << 0)
-#define VPE_LINEAR (1 << 1)
-#define VPE_SC_BYPASS (1 << 2)
-#define VPE_INVT_FID (1 << 3)
-#define VPE_USE_RAV (1 << 4)
-#define VPE_ENABLE_EV (1 << 5)
-#define VPE_AUTO_HS (1 << 6)
-#define VPE_DCM_2X (1 << 7)
-#define VPE_DCM_4X (1 << 8)
-#define VPE_HP_BYPASS (1 << 9)
-#define VPE_INTERLACE_I (1 << 10)
-#define VPE_ENABLE_SIN2_VER_INTP (1 << 11)
-#define VPE_Y_PK_EN (1 << 14)
-#define VPE_TRIM (1 << 15)
-#define VPE_SELFGEN_FID (1 << 16)
-
-#define VPE_SC_MP_SC1 0x0704
-#define VPE_ROW_ACC_INC_MASK 0x07ffffff
-#define VPE_ROW_ACC_INC_SHIFT 0
-
-#define VPE_SC_MP_SC2 0x0708
-#define VPE_ROW_ACC_OFFSET_MASK 0x0fffffff
-#define VPE_ROW_ACC_OFFSET_SHIFT 0
-
-#define VPE_SC_MP_SC3 0x070c
-#define VPE_ROW_ACC_OFFSET_B_MASK 0x0fffffff
-#define VPE_ROW_ACC_OFFSET_B_SHIFT 0
-
-#define VPE_SC_MP_SC4 0x0710
-#define VPE_TAR_H_MASK 0x07ff
-#define VPE_TAR_H_SHIFT 0
-#define VPE_TAR_W_MASK 0x07ff
-#define VPE_TAR_W_SHIFT 12
-#define VPE_LIN_ACC_INC_U_MASK 0x07
-#define VPE_LIN_ACC_INC_U_SHIFT 24
-#define VPE_NLIN_ACC_INIT_U_MASK 0x07
-#define VPE_NLIN_ACC_INIT_U_SHIFT 28
-
-#define VPE_SC_MP_SC5 0x0714
-#define VPE_SRC_H_MASK 0x07ff
-#define VPE_SRC_H_SHIFT 0
-#define VPE_SRC_W_MASK 0x07ff
-#define VPE_SRC_W_SHIFT 12
-#define VPE_NLIN_ACC_INC_U_MASK 0x07
-#define VPE_NLIN_ACC_INC_U_SHIFT 24
-
-#define VPE_SC_MP_SC6 0x0718
-#define VPE_ROW_ACC_INIT_RAV_MASK 0x03ff
-#define VPE_ROW_ACC_INIT_RAV_SHIFT 0
-#define VPE_ROW_ACC_INIT_RAV_B_MASK 0x03ff
-#define VPE_ROW_ACC_INIT_RAV_B_SHIFT 10
-
-#define VPE_SC_MP_SC8 0x0720
-#define VPE_NLIN_LEFT_MASK 0x07ff
-#define VPE_NLIN_LEFT_SHIFT 0
-#define VPE_NLIN_RIGHT_MASK 0x07ff
-#define VPE_NLIN_RIGHT_SHIFT 12
-
-#define VPE_SC_MP_SC9 0x0724
-#define VPE_LIN_ACC_INC VPE_SC_MP_SC9
-
-#define VPE_SC_MP_SC10 0x0728
-#define VPE_NLIN_ACC_INIT VPE_SC_MP_SC10
-
-#define VPE_SC_MP_SC11 0x072c
-#define VPE_NLIN_ACC_INC VPE_SC_MP_SC11
-
-#define VPE_SC_MP_SC12 0x0730
-#define VPE_COL_ACC_OFFSET_MASK 0x01ffffff
-#define VPE_COL_ACC_OFFSET_SHIFT 0
-
-#define VPE_SC_MP_SC13 0x0734
-#define VPE_SC_FACTOR_RAV_MASK 0x03ff
-#define VPE_SC_FACTOR_RAV_SHIFT 0
-#define VPE_CHROMA_INTP_THR_MASK 0x03ff
-#define VPE_CHROMA_INTP_THR_SHIFT 12
-#define VPE_DELTA_CHROMA_THR_MASK 0x0f
-#define VPE_DELTA_CHROMA_THR_SHIFT 24
-
-#define VPE_SC_MP_SC17 0x0744
-#define VPE_EV_THR_MASK 0x03ff
-#define VPE_EV_THR_SHIFT 12
-#define VPE_DELTA_LUMA_THR_MASK 0x0f
-#define VPE_DELTA_LUMA_THR_SHIFT 24
-#define VPE_DELTA_EV_THR_MASK 0x0f
-#define VPE_DELTA_EV_THR_SHIFT 28
-
-#define VPE_SC_MP_SC18 0x0748
-#define VPE_HS_FACTOR_MASK 0x03ff
-#define VPE_HS_FACTOR_SHIFT 0
-#define VPE_CONF_DEFAULT_MASK 0x01ff
-#define VPE_CONF_DEFAULT_SHIFT 16
-
-#define VPE_SC_MP_SC19 0x074c
-#define VPE_HPF_COEFF0_MASK 0xff
-#define VPE_HPF_COEFF0_SHIFT 0
-#define VPE_HPF_COEFF1_MASK 0xff
-#define VPE_HPF_COEFF1_SHIFT 8
-#define VPE_HPF_COEFF2_MASK 0xff
-#define VPE_HPF_COEFF2_SHIFT 16
-#define VPE_HPF_COEFF3_MASK 0xff
-#define VPE_HPF_COEFF3_SHIFT 23
-
-#define VPE_SC_MP_SC20 0x0750
-#define VPE_HPF_COEFF4_MASK 0xff
-#define VPE_HPF_COEFF4_SHIFT 0
-#define VPE_HPF_COEFF5_MASK 0xff
-#define VPE_HPF_COEFF5_SHIFT 8
-#define VPE_HPF_NORM_SHIFT_MASK 0x07
-#define VPE_HPF_NORM_SHIFT_SHIFT 16
-#define VPE_NL_LIMIT_MASK 0x1ff
-#define VPE_NL_LIMIT_SHIFT 20
-
-#define VPE_SC_MP_SC21 0x0754
-#define VPE_NL_LO_THR_MASK 0x01ff
-#define VPE_NL_LO_THR_SHIFT 0
-#define VPE_NL_LO_SLOPE_MASK 0xff
-#define VPE_NL_LO_SLOPE_SHIFT 16
-
-#define VPE_SC_MP_SC22 0x0758
-#define VPE_NL_HI_THR_MASK 0x01ff
-#define VPE_NL_HI_THR_SHIFT 0
-#define VPE_NL_HI_SLOPE_SH_MASK 0x07
-#define VPE_NL_HI_SLOPE_SH_SHIFT 16
-
-#define VPE_SC_MP_SC23 0x075c
-#define VPE_GRADIENT_THR_MASK 0x07ff
-#define VPE_GRADIENT_THR_SHIFT 0
-#define VPE_GRADIENT_THR_RANGE_MASK 0x0f
-#define VPE_GRADIENT_THR_RANGE_SHIFT 12
-#define VPE_MIN_GY_THR_MASK 0xff
-#define VPE_MIN_GY_THR_SHIFT 16
-#define VPE_MIN_GY_THR_RANGE_MASK 0x0f
-#define VPE_MIN_GY_THR_RANGE_SHIFT 28
-
-#define VPE_SC_MP_SC24 0x0760
-#define VPE_ORG_H_MASK 0x07ff
-#define VPE_ORG_H_SHIFT 0
-#define VPE_ORG_W_MASK 0x07ff
-#define VPE_ORG_W_SHIFT 16
-
-#define VPE_SC_MP_SC25 0x0764
-#define VPE_OFF_H_MASK 0x07ff
-#define VPE_OFF_H_SHIFT 0
-#define VPE_OFF_W_MASK 0x07ff
-#define VPE_OFF_W_SHIFT 16
-
-/* VPE color space converter regs */
-#define VPE_CSC_CSC00 0x5700
-#define VPE_CSC_A0_MASK 0x1fff
-#define VPE_CSC_A0_SHIFT 0
-#define VPE_CSC_B0_MASK 0x1fff
-#define VPE_CSC_B0_SHIFT 16
-
-#define VPE_CSC_CSC01 0x5704
-#define VPE_CSC_C0_MASK 0x1fff
-#define VPE_CSC_C0_SHIFT 0
-#define VPE_CSC_A1_MASK 0x1fff
-#define VPE_CSC_A1_SHIFT 16
-
-#define VPE_CSC_CSC02 0x5708
-#define VPE_CSC_B1_MASK 0x1fff
-#define VPE_CSC_B1_SHIFT 0
-#define VPE_CSC_C1_MASK 0x1fff
-#define VPE_CSC_C1_SHIFT 16
-
-#define VPE_CSC_CSC03 0x570c
-#define VPE_CSC_A2_MASK 0x1fff
-#define VPE_CSC_A2_SHIFT 0
-#define VPE_CSC_B2_MASK 0x1fff
-#define VPE_CSC_B2_SHIFT 16
-
-#define VPE_CSC_CSC04 0x5710
-#define VPE_CSC_C2_MASK 0x1fff
-#define VPE_CSC_C2_SHIFT 0
-#define VPE_CSC_D0_MASK 0x0fff
-#define VPE_CSC_D0_SHIFT 16
-
-#define VPE_CSC_CSC05 0x5714
-#define VPE_CSC_D1_MASK 0x0fff
-#define VPE_CSC_D1_SHIFT 0
-#define VPE_CSC_D2_MASK 0x0fff
-#define VPE_CSC_D2_SHIFT 16
-#define VPE_CSC_BYPASS (1 << 28)
-
#endif
diff --git a/drivers/media/platform/vsp1/Makefile b/drivers/media/platform/vsp1/Makefile
index 4da226169e15..151cecd0ea25 100644
--- a/drivers/media/platform/vsp1/Makefile
+++ b/drivers/media/platform/vsp1/Makefile
@@ -1,5 +1,6 @@
vsp1-y := vsp1_drv.o vsp1_entity.o vsp1_video.o
vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
-vsp1-y += vsp1_lif.o vsp1_uds.o
+vsp1-y += vsp1_hsit.o vsp1_lif.o vsp1_lut.o
+vsp1-y += vsp1_sru.o vsp1_uds.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1.o
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index d6c6ecd039ff..94d1b02680c5 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -28,8 +28,11 @@ struct clk;
struct device;
struct vsp1_platform_data;
+struct vsp1_hsit;
struct vsp1_lif;
+struct vsp1_lut;
struct vsp1_rwpf;
+struct vsp1_sru;
struct vsp1_uds;
#define VPS1_MAX_RPF 5
@@ -47,8 +50,12 @@ struct vsp1_device {
struct mutex lock;
int ref_count;
+ struct vsp1_hsit *hsi;
+ struct vsp1_hsit *hst;
struct vsp1_lif *lif;
+ struct vsp1_lut *lut;
struct vsp1_rwpf *rpf[VPS1_MAX_RPF];
+ struct vsp1_sru *sru;
struct vsp1_uds *uds[VPS1_MAX_UDS];
struct vsp1_rwpf *wpf[VPS1_MAX_WPF];
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index d16bf0f41e24..0df0a994e575 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -20,8 +20,11 @@
#include <linux/videodev2.h>
#include "vsp1.h"
+#include "vsp1_hsit.h"
#include "vsp1_lif.h"
+#include "vsp1_lut.h"
#include "vsp1_rwpf.h"
+#include "vsp1_sru.h"
#include "vsp1_uds.h"
/* -----------------------------------------------------------------------------
@@ -152,6 +155,22 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
/* Instantiate all the entities. */
+ vsp1->hsi = vsp1_hsit_create(vsp1, true);
+ if (IS_ERR(vsp1->hsi)) {
+ ret = PTR_ERR(vsp1->hsi);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities);
+
+ vsp1->hst = vsp1_hsit_create(vsp1, false);
+ if (IS_ERR(vsp1->hst)) {
+ ret = PTR_ERR(vsp1->hst);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
+
if (vsp1->pdata->features & VSP1_HAS_LIF) {
vsp1->lif = vsp1_lif_create(vsp1);
if (IS_ERR(vsp1->lif)) {
@@ -162,6 +181,16 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->lif->entity.list_dev, &vsp1->entities);
}
+ if (vsp1->pdata->features & VSP1_HAS_LUT) {
+ vsp1->lut = vsp1_lut_create(vsp1);
+ if (IS_ERR(vsp1->lut)) {
+ ret = PTR_ERR(vsp1->lut);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities);
+ }
+
for (i = 0; i < vsp1->pdata->rpf_count; ++i) {
struct vsp1_rwpf *rpf;
@@ -175,6 +204,16 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&rpf->entity.list_dev, &vsp1->entities);
}
+ if (vsp1->pdata->features & VSP1_HAS_SRU) {
+ vsp1->sru = vsp1_sru_create(vsp1);
+ if (IS_ERR(vsp1->sru)) {
+ ret = PTR_ERR(vsp1->sru);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities);
+ }
+
for (i = 0; i < vsp1->pdata->uds_count; ++i) {
struct vsp1_uds *uds;
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index 9028f9d524f4..0226e47df6d9 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -15,6 +15,7 @@
#include <linux/gfp.h>
#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
#include "vsp1.h"
@@ -122,12 +123,16 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
unsigned int id;
unsigned int reg;
} routes[] = {
+ { VI6_DPR_NODE_HSI, VI6_DPR_HSI_ROUTE },
+ { VI6_DPR_NODE_HST, VI6_DPR_HST_ROUTE },
{ VI6_DPR_NODE_LIF, 0 },
+ { VI6_DPR_NODE_LUT, VI6_DPR_LUT_ROUTE },
{ VI6_DPR_NODE_RPF(0), VI6_DPR_RPF_ROUTE(0) },
{ VI6_DPR_NODE_RPF(1), VI6_DPR_RPF_ROUTE(1) },
{ VI6_DPR_NODE_RPF(2), VI6_DPR_RPF_ROUTE(2) },
{ VI6_DPR_NODE_RPF(3), VI6_DPR_RPF_ROUTE(3) },
{ VI6_DPR_NODE_RPF(4), VI6_DPR_RPF_ROUTE(4) },
+ { VI6_DPR_NODE_SRU, VI6_DPR_SRU_ROUTE },
{ VI6_DPR_NODE_UDS(0), VI6_DPR_UDS_ROUTE(0) },
{ VI6_DPR_NODE_UDS(1), VI6_DPR_UDS_ROUTE(1) },
{ VI6_DPR_NODE_UDS(2), VI6_DPR_UDS_ROUTE(2) },
@@ -177,5 +182,7 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
void vsp1_entity_destroy(struct vsp1_entity *entity)
{
+ if (entity->subdev.ctrl_handler)
+ v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
media_entity_cleanup(&entity->subdev.entity);
}
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index c4feab2cbb81..e152798d7f38 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -20,8 +20,12 @@
struct vsp1_device;
enum vsp1_entity_type {
+ VSP1_ENTITY_HSI,
+ VSP1_ENTITY_HST,
VSP1_ENTITY_LIF,
+ VSP1_ENTITY_LUT,
VSP1_ENTITY_RPF,
+ VSP1_ENTITY_SRU,
VSP1_ENTITY_UDS,
VSP1_ENTITY_WPF,
};
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
new file mode 100644
index 000000000000..285485350d82
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -0,0 +1,222 @@
+/*
+ * vsp1_hsit.c -- R-Car VSP1 Hue Saturation value (Inverse) Transform
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_hsit.h"
+
+#define HSIT_MIN_SIZE 4U
+#define HSIT_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_hsit_read(struct vsp1_hsit *hsit, u32 reg)
+{
+ return vsp1_read(hsit->entity.vsp1, reg);
+}
+
+static inline void vsp1_hsit_write(struct vsp1_hsit *hsit, u32 reg, u32 data)
+{
+ vsp1_write(hsit->entity.vsp1, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int hsit_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+
+ if (!enable)
+ return 0;
+
+ if (hsit->inverse)
+ vsp1_hsit_write(hsit, VI6_HSI_CTRL, VI6_HSI_CTRL_EN);
+ else
+ vsp1_hsit_write(hsit, VI6_HST_CTRL, VI6_HST_CTRL_EN);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ if ((code->pad == HSIT_PAD_SINK && !hsit->inverse) |
+ (code->pad == HSIT_PAD_SOURCE && hsit->inverse))
+ code->code = V4L2_MBUS_FMT_ARGB8888_1X32;
+ else
+ code->code = V4L2_MBUS_FMT_AHSV8888_1X32;
+
+ return 0;
+}
+
+static int hsit_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(fh, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == HSIT_PAD_SINK) {
+ fse->min_width = HSIT_MIN_SIZE;
+ fse->max_width = HSIT_MAX_SIZE;
+ fse->min_height = HSIT_MIN_SIZE;
+ fse->max_height = HSIT_MAX_SIZE;
+ } else {
+ /* The size on the source pad are fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+
+static int hsit_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+
+ fmt->format = *vsp1_entity_get_pad_format(&hsit->entity, fh, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int hsit_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = vsp1_entity_get_pad_format(&hsit->entity, fh, fmt->pad,
+ fmt->which);
+
+ if (fmt->pad == HSIT_PAD_SOURCE) {
+ /* The HST and HSI output format code and resolution can't be
+ * modified.
+ */
+ fmt->format = *format;
+ return 0;
+ }
+
+ format->code = hsit->inverse ? V4L2_MBUS_FMT_AHSV8888_1X32
+ : V4L2_MBUS_FMT_ARGB8888_1X32;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ HSIT_MIN_SIZE, HSIT_MAX_SIZE);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ HSIT_MIN_SIZE, HSIT_MAX_SIZE);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&hsit->entity, fh, HSIT_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ format->code = hsit->inverse ? V4L2_MBUS_FMT_ARGB8888_1X32
+ : V4L2_MBUS_FMT_AHSV8888_1X32;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_video_ops hsit_video_ops = {
+ .s_stream = hsit_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops hsit_pad_ops = {
+ .enum_mbus_code = hsit_enum_mbus_code,
+ .enum_frame_size = hsit_enum_frame_size,
+ .get_fmt = hsit_get_format,
+ .set_fmt = hsit_set_format,
+};
+
+static struct v4l2_subdev_ops hsit_ops = {
+ .video = &hsit_video_ops,
+ .pad = &hsit_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse)
+{
+ struct v4l2_subdev *subdev;
+ struct vsp1_hsit *hsit;
+ int ret;
+
+ hsit = devm_kzalloc(vsp1->dev, sizeof(*hsit), GFP_KERNEL);
+ if (hsit == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ hsit->inverse = inverse;
+
+ if (inverse) {
+ hsit->entity.type = VSP1_ENTITY_HSI;
+ hsit->entity.id = VI6_DPR_NODE_HSI;
+ } else {
+ hsit->entity.type = VSP1_ENTITY_HST;
+ hsit->entity.id = VI6_DPR_NODE_HST;
+ }
+
+ ret = vsp1_entity_init(vsp1, &hsit->entity, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &hsit->entity.subdev;
+ v4l2_subdev_init(subdev, &hsit_ops);
+
+ subdev->entity.ops = &vsp1_media_ops;
+ subdev->internal_ops = &vsp1_subdev_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "%s %s",
+ dev_name(vsp1->dev), inverse ? "hsi" : "hst");
+ v4l2_set_subdevdata(subdev, hsit);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ vsp1_entity_init_formats(subdev, NULL);
+
+ return hsit;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.h b/drivers/media/platform/vsp1/vsp1_hsit.h
new file mode 100644
index 000000000000..82f1c8426900
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hsit.h
@@ -0,0 +1,38 @@
+/*
+ * vsp1_hsit.h -- R-Car VSP1 Hue Saturation value (Inverse) Transform
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_HSIT_H__
+#define __VSP1_HSIT_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define HSIT_PAD_SINK 0
+#define HSIT_PAD_SOURCE 1
+
+struct vsp1_hsit {
+ struct vsp1_entity entity;
+ bool inverse;
+};
+
+static inline struct vsp1_hsit *to_hsit(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_hsit, entity.subdev);
+}
+
+struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse);
+
+#endif /* __VSP1_HSIT_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
new file mode 100644
index 000000000000..4e9dc7c86ef8
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -0,0 +1,252 @@
+/*
+ * vsp1_lut.c -- R-Car VSP1 Look-Up Table
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/vsp1.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_lut.h"
+
+#define LUT_MIN_SIZE 4U
+#define LUT_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_lut_read(struct vsp1_lut *lut, u32 reg)
+{
+ return vsp1_read(lut->entity.vsp1, reg);
+}
+
+static inline void vsp1_lut_write(struct vsp1_lut *lut, u32 reg, u32 data)
+{
+ vsp1_write(lut->entity.vsp1, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static void lut_configure(struct vsp1_lut *lut, struct vsp1_lut_config *config)
+{
+ memcpy_toio(lut->entity.vsp1->mmio + VI6_LUT_TABLE, config->lut,
+ sizeof(config->lut));
+}
+
+static long lut_ioctl(struct v4l2_subdev *subdev, unsigned int cmd, void *arg)
+{
+ struct vsp1_lut *lut = to_lut(subdev);
+
+ switch (cmd) {
+ case VIDIOC_VSP1_LUT_CONFIG:
+ lut_configure(lut, arg);
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int lut_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct vsp1_lut *lut = to_lut(subdev);
+
+ if (!enable)
+ return 0;
+
+ vsp1_lut_write(lut, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ V4L2_MBUS_FMT_ARGB8888_1X32,
+ V4L2_MBUS_FMT_AHSV8888_1X32,
+ V4L2_MBUS_FMT_AYUV8_1X32,
+ };
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == LUT_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(codes))
+ return -EINVAL;
+
+ code->code = codes[code->index];
+ } else {
+ /* The LUT can't perform format conversion, the sink format is
+ * always identical to the source format.
+ */
+ if (code->index)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(fh, LUT_PAD_SINK);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int lut_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(fh, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == LUT_PAD_SINK) {
+ fse->min_width = LUT_MIN_SIZE;
+ fse->max_width = LUT_MAX_SIZE;
+ fse->min_height = LUT_MIN_SIZE;
+ fse->max_height = LUT_MAX_SIZE;
+ } else {
+ /* The size on the source pad are fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+
+static int lut_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_lut *lut = to_lut(subdev);
+
+ fmt->format = *vsp1_entity_get_pad_format(&lut->entity, fh, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_lut *lut = to_lut(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != V4L2_MBUS_FMT_AHSV8888_1X32 &&
+ fmt->format.code != V4L2_MBUS_FMT_AYUV8_1X32)
+ fmt->format.code = V4L2_MBUS_FMT_AYUV8_1X32;
+
+ format = vsp1_entity_get_pad_format(&lut->entity, fh, fmt->pad,
+ fmt->which);
+
+ if (fmt->pad == LUT_PAD_SOURCE) {
+ /* The LUT output format can't be modified. */
+ fmt->format = *format;
+ return 0;
+ }
+
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ LUT_MIN_SIZE, LUT_MAX_SIZE);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ LUT_MIN_SIZE, LUT_MAX_SIZE);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&lut->entity, fh, LUT_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_core_ops lut_core_ops = {
+ .ioctl = lut_ioctl,
+};
+
+static struct v4l2_subdev_video_ops lut_video_ops = {
+ .s_stream = lut_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops lut_pad_ops = {
+ .enum_mbus_code = lut_enum_mbus_code,
+ .enum_frame_size = lut_enum_frame_size,
+ .get_fmt = lut_get_format,
+ .set_fmt = lut_set_format,
+};
+
+static struct v4l2_subdev_ops lut_ops = {
+ .core = &lut_core_ops,
+ .video = &lut_video_ops,
+ .pad = &lut_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1)
+{
+ struct v4l2_subdev *subdev;
+ struct vsp1_lut *lut;
+ int ret;
+
+ lut = devm_kzalloc(vsp1->dev, sizeof(*lut), GFP_KERNEL);
+ if (lut == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ lut->entity.type = VSP1_ENTITY_LUT;
+ lut->entity.id = VI6_DPR_NODE_LUT;
+
+ ret = vsp1_entity_init(vsp1, &lut->entity, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &lut->entity.subdev;
+ v4l2_subdev_init(subdev, &lut_ops);
+
+ subdev->entity.ops = &vsp1_media_ops;
+ subdev->internal_ops = &vsp1_subdev_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "%s lut",
+ dev_name(vsp1->dev));
+ v4l2_set_subdevdata(subdev, lut);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ vsp1_entity_init_formats(subdev, NULL);
+
+ return lut;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_lut.h b/drivers/media/platform/vsp1/vsp1_lut.h
new file mode 100644
index 000000000000..f92ffb867350
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lut.h
@@ -0,0 +1,38 @@
+/*
+ * vsp1_lut.h -- R-Car VSP1 Look-Up Table
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_LUT_H__
+#define __VSP1_LUT_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define LUT_PAD_SINK 0
+#define LUT_PAD_SOURCE 1
+
+struct vsp1_lut {
+ struct vsp1_entity entity;
+ u32 lut[256];
+};
+
+static inline struct vsp1_lut *to_lut(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_lut, entity.subdev);
+}
+
+struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_LUT_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 1d3304f1365b..28650806c20f 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -336,8 +336,21 @@
*/
#define VI6_SRU_CTRL0 0x2200
+#define VI6_SRU_CTRL0_PARAM0_SHIFT 16
+#define VI6_SRU_CTRL0_PARAM1_SHIFT 8
+#define VI6_SRU_CTRL0_MODE_UPSCALE (4 << 4)
+#define VI6_SRU_CTRL0_PARAM2 (1 << 3)
+#define VI6_SRU_CTRL0_PARAM3 (1 << 2)
+#define VI6_SRU_CTRL0_PARAM4 (1 << 1)
+#define VI6_SRU_CTRL0_EN (1 << 0)
+
#define VI6_SRU_CTRL1 0x2204
+#define VI6_SRU_CTRL1_PARAM5 0x7ff
+
#define VI6_SRU_CTRL2 0x2208
+#define VI6_SRU_CTRL2_PARAM6_SHIFT 16
+#define VI6_SRU_CTRL2_PARAM7_SHIFT 8
+#define VI6_SRU_CTRL2_PARAM8_SHIFT 0
/* -----------------------------------------------------------------------------
* UDS Control Registers
@@ -412,6 +425,7 @@
*/
#define VI6_LUT_CTRL 0x2800
+#define VI6_LUT_CTRL_EN (1 << 0)
/* -----------------------------------------------------------------------------
* CLU Control Registers
@@ -424,12 +438,14 @@
*/
#define VI6_HST_CTRL 0x2a00
+#define VI6_HST_CTRL_EN (1 << 0)
/* -----------------------------------------------------------------------------
* HSI Control Registers
*/
#define VI6_HSI_CTRL 0x2b00
+#define VI6_HSI_CTRL_EN (1 << 0)
/* -----------------------------------------------------------------------------
* BRU Control Registers
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 254871d3423e..bce2be5466b9 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -47,25 +47,36 @@ static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
struct vsp1_rwpf *rpf = to_rwpf(subdev);
const struct vsp1_format_info *fmtinfo = rpf->video.fmtinfo;
const struct v4l2_pix_format_mplane *format = &rpf->video.format;
+ const struct v4l2_rect *crop = &rpf->crop;
u32 pstride;
u32 infmt;
if (!enable)
return 0;
- /* Source size and stride. Cropping isn't supported yet. */
+ /* Source size, stride and crop offsets.
+ *
+ * The crop offsets correspond to the location of the crop rectangle top
+ * left corner in the plane buffer. Only two offsets are needed, as
+ * planes 2 and 3 always have identical strides.
+ */
vsp1_rpf_write(rpf, VI6_RPF_SRC_BSIZE,
- (format->width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
- (format->height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
+ (crop->width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
+ (crop->height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
vsp1_rpf_write(rpf, VI6_RPF_SRC_ESIZE,
- (format->width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
- (format->height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+ (crop->width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
+ (crop->height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+ rpf->offsets[0] = crop->top * format->plane_fmt[0].bytesperline
+ + crop->left * fmtinfo->bpp[0] / 8;
pstride = format->plane_fmt[0].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
- if (format->num_planes > 1)
+ if (format->num_planes > 1) {
+ rpf->offsets[1] = crop->top * format->plane_fmt[1].bytesperline
+ + crop->left * fmtinfo->bpp[1] / 8;
pstride |= format->plane_fmt[1].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
+ }
vsp1_rpf_write(rpf, VI6_RPF_SRCM_PSTRIDE, pstride);
@@ -113,6 +124,8 @@ static struct v4l2_subdev_pad_ops rpf_pad_ops = {
.enum_frame_size = vsp1_rwpf_enum_frame_size,
.get_fmt = vsp1_rwpf_get_format,
.set_fmt = vsp1_rwpf_set_format,
+ .get_selection = vsp1_rwpf_get_selection,
+ .set_selection = vsp1_rwpf_set_selection,
};
static struct v4l2_subdev_ops rpf_ops = {
@@ -129,11 +142,14 @@ static void rpf_vdev_queue(struct vsp1_video *video,
{
struct vsp1_rwpf *rpf = container_of(video, struct vsp1_rwpf, video);
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y, buf->addr[0]);
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
+ buf->addr[0] + rpf->offsets[0]);
if (buf->buf.num_planes > 1)
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0, buf->addr[1]);
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
+ buf->addr[1] + rpf->offsets[1]);
if (buf->buf.num_planes > 2)
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1, buf->addr[2]);
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
+ buf->addr[2] + rpf->offsets[1]);
}
static const struct vsp1_video_operations rpf_vdev_ops = {
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 9752d5516ceb..782f770daee5 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -71,6 +71,19 @@ int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
return 0;
}
+static struct v4l2_rect *
+vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf, struct v4l2_subdev_fh *fh, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(fh, RWPF_PAD_SINK);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &rwpf->crop;
+ default:
+ return NULL;
+ }
+}
+
int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt)
{
@@ -87,6 +100,7 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
/* Default to YUV if the requested format is not supported. */
if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
@@ -115,6 +129,13 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
fmt->format = *format;
+ /* Update the sink crop rectangle. */
+ crop = vsp1_rwpf_get_crop(rwpf, fh, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+
/* Propagate the format to the source pad. */
format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SOURCE,
fmt->which);
@@ -122,3 +143,78 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
return 0;
}
+
+int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Cropping is implemented on the sink pad. */
+ if (sel->pad != RWPF_PAD_SINK)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *vsp1_rwpf_get_crop(rwpf, fh, sel->which);
+ break;
+
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ format = vsp1_entity_get_pad_format(&rwpf->entity, fh,
+ RWPF_PAD_SINK, sel->which);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ /* Cropping is implemented on the sink pad. */
+ if (sel->pad != RWPF_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /* Make sure the crop rectangle is entirely contained in the image. The
+ * WPF top and left offsets are limited to 255.
+ */
+ format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SINK,
+ sel->which);
+ sel->r.left = min_t(unsigned int, sel->r.left, format->width - 2);
+ sel->r.top = min_t(unsigned int, sel->r.top, format->height - 2);
+ if (rwpf->entity.type == VSP1_ENTITY_WPF) {
+ sel->r.left = min_t(unsigned int, sel->r.left, 255);
+ sel->r.top = min_t(unsigned int, sel->r.top, 255);
+ }
+ sel->r.width = min_t(unsigned int, sel->r.width,
+ format->width - sel->r.left);
+ sel->r.height = min_t(unsigned int, sel->r.height,
+ format->height - sel->r.top);
+
+ crop = vsp1_rwpf_get_crop(rwpf, fh, sel->which);
+ *crop = sel->r;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SOURCE,
+ sel->which);
+ format->width = crop->width;
+ format->height = crop->height;
+
+ return 0;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index c182d85f36b3..6cbdb547470b 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -29,6 +29,10 @@ struct vsp1_rwpf {
unsigned int max_width;
unsigned int max_height;
+
+ struct v4l2_rect crop;
+
+ unsigned int offsets[2];
};
static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
@@ -49,5 +53,11 @@ int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt);
int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt);
+int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel);
+int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel);
#endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
new file mode 100644
index 000000000000..7ab1a0b2d656
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -0,0 +1,356 @@
+/*
+ * vsp1_sru.c -- R-Car VSP1 Super Resolution Unit
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_sru.h"
+
+#define SRU_MIN_SIZE 4U
+#define SRU_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_sru_read(struct vsp1_sru *sru, u32 reg)
+{
+ return vsp1_read(sru->entity.vsp1, reg);
+}
+
+static inline void vsp1_sru_write(struct vsp1_sru *sru, u32 reg, u32 data)
+{
+ vsp1_write(sru->entity.vsp1, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_SRU_INTENSITY (V4L2_CID_USER_BASE + 1)
+
+static int sru_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_sru *sru =
+ container_of(ctrl->handler, struct vsp1_sru, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VSP1_SRU_INTENSITY:
+ sru->intensity = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops sru_ctrl_ops = {
+ .s_ctrl = sru_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config sru_intensity_control = {
+ .ops = &sru_ctrl_ops,
+ .id = V4L2_CID_VSP1_SRU_INTENSITY,
+ .name = "Intensity",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 6,
+ .step = 1,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+struct vsp1_sru_param {
+ u32 ctrl0;
+ u32 ctrl2;
+};
+
+#define VI6_SRU_CTRL0_PARAMS(p0, p1) \
+ (((p0) << VI6_SRU_CTRL0_PARAM0_SHIFT) | \
+ ((p1) << VI6_SRU_CTRL0_PARAM1_SHIFT))
+
+#define VI6_SRU_CTRL2_PARAMS(p6, p7, p8) \
+ (((p6) << VI6_SRU_CTRL2_PARAM6_SHIFT) | \
+ ((p7) << VI6_SRU_CTRL2_PARAM7_SHIFT) | \
+ ((p8) << VI6_SRU_CTRL2_PARAM8_SHIFT))
+
+static const struct vsp1_sru_param vsp1_sru_params[] = {
+ {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(24, 40, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(8, 16, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(36, 60, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(12, 27, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(48, 80, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(16, 36, 255),
+ },
+};
+
+static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct vsp1_sru *sru = to_sru(subdev);
+ const struct vsp1_sru_param *param;
+ struct v4l2_mbus_framefmt *input;
+ struct v4l2_mbus_framefmt *output;
+ bool upscale;
+ u32 ctrl0;
+
+ if (!enable)
+ return 0;
+
+ input = &sru->entity.formats[SRU_PAD_SINK];
+ output = &sru->entity.formats[SRU_PAD_SOURCE];
+ upscale = input->width != output->width;
+ param = &vsp1_sru_params[sru->intensity];
+
+ if (input->code == V4L2_MBUS_FMT_ARGB8888_1X32)
+ ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3
+ | VI6_SRU_CTRL0_PARAM4;
+ else
+ ctrl0 = VI6_SRU_CTRL0_PARAM3;
+
+ vsp1_sru_write(sru, VI6_SRU_CTRL0, param->ctrl0 | ctrl0 |
+ (upscale ? VI6_SRU_CTRL0_MODE_UPSCALE : 0));
+ vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
+ vsp1_sru_write(sru, VI6_SRU_CTRL2, param->ctrl2);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ V4L2_MBUS_FMT_ARGB8888_1X32,
+ V4L2_MBUS_FMT_AYUV8_1X32,
+ };
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == SRU_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(codes))
+ return -EINVAL;
+
+ code->code = codes[code->index];
+ } else {
+ /* The SRU can't perform format conversion, the sink format is
+ * always identical to the source format.
+ */
+ if (code->index)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(fh, SRU_PAD_SINK);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int sru_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(fh, SRU_PAD_SINK);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == SRU_PAD_SINK) {
+ fse->min_width = SRU_MIN_SIZE;
+ fse->max_width = SRU_MAX_SIZE;
+ fse->min_height = SRU_MIN_SIZE;
+ fse->max_height = SRU_MAX_SIZE;
+ } else {
+ fse->min_width = format->width;
+ fse->min_height = format->height;
+ if (format->width <= SRU_MAX_SIZE / 2 &&
+ format->height <= SRU_MAX_SIZE / 2) {
+ fse->max_width = format->width * 2;
+ fse->max_height = format->height * 2;
+ } else {
+ fse->max_width = format->width;
+ fse->max_height = format->height;
+ }
+ }
+
+ return 0;
+}
+
+static int sru_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_sru *sru = to_sru(subdev);
+
+ fmt->format = *vsp1_entity_get_pad_format(&sru->entity, fh, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_fh *fh,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt *format;
+ unsigned int input_area;
+ unsigned int output_area;
+
+ switch (pad) {
+ case SRU_PAD_SINK:
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
+ fmt->code != V4L2_MBUS_FMT_AYUV8_1X32)
+ fmt->code = V4L2_MBUS_FMT_AYUV8_1X32;
+
+ fmt->width = clamp(fmt->width, SRU_MIN_SIZE, SRU_MAX_SIZE);
+ fmt->height = clamp(fmt->height, SRU_MIN_SIZE, SRU_MAX_SIZE);
+ break;
+
+ case SRU_PAD_SOURCE:
+ /* The SRU can't perform format conversion. */
+ format = vsp1_entity_get_pad_format(&sru->entity, fh,
+ SRU_PAD_SINK, which);
+ fmt->code = format->code;
+
+ /* We can upscale by 2 in both direction, but not independently.
+ * Compare the input and output rectangles areas (avoiding
+ * integer overflows on the output): if the requested output
+ * area is larger than 1.5^2 the input area upscale by two,
+ * otherwise don't scale.
+ */
+ input_area = format->width * format->height;
+ output_area = min(fmt->width, SRU_MAX_SIZE)
+ * min(fmt->height, SRU_MAX_SIZE);
+
+ if (fmt->width <= SRU_MAX_SIZE / 2 &&
+ fmt->height <= SRU_MAX_SIZE / 2 &&
+ output_area > input_area * 9 / 4) {
+ fmt->width = format->width * 2;
+ fmt->height = format->height * 2;
+ } else {
+ fmt->width = format->width;
+ fmt->height = format->height;
+ }
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int sru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_sru *sru = to_sru(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ sru_try_format(sru, fh, fmt->pad, &fmt->format, fmt->which);
+
+ format = vsp1_entity_get_pad_format(&sru->entity, fh, fmt->pad,
+ fmt->which);
+ *format = fmt->format;
+
+ if (fmt->pad == SRU_PAD_SINK) {
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&sru->entity, fh,
+ SRU_PAD_SOURCE, fmt->which);
+ *format = fmt->format;
+
+ sru_try_format(sru, fh, SRU_PAD_SOURCE, format, fmt->which);
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_video_ops sru_video_ops = {
+ .s_stream = sru_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops sru_pad_ops = {
+ .enum_mbus_code = sru_enum_mbus_code,
+ .enum_frame_size = sru_enum_frame_size,
+ .get_fmt = sru_get_format,
+ .set_fmt = sru_set_format,
+};
+
+static struct v4l2_subdev_ops sru_ops = {
+ .video = &sru_video_ops,
+ .pad = &sru_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1)
+{
+ struct v4l2_subdev *subdev;
+ struct vsp1_sru *sru;
+ int ret;
+
+ sru = devm_kzalloc(vsp1->dev, sizeof(*sru), GFP_KERNEL);
+ if (sru == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ sru->entity.type = VSP1_ENTITY_SRU;
+ sru->entity.id = VI6_DPR_NODE_SRU;
+
+ ret = vsp1_entity_init(vsp1, &sru->entity, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &sru->entity.subdev;
+ v4l2_subdev_init(subdev, &sru_ops);
+
+ subdev->entity.ops = &vsp1_media_ops;
+ subdev->internal_ops = &vsp1_subdev_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "%s sru",
+ dev_name(vsp1->dev));
+ v4l2_set_subdevdata(subdev, sru);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ vsp1_entity_init_formats(subdev, NULL);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&sru->ctrls, 1);
+ v4l2_ctrl_new_custom(&sru->ctrls, &sru_intensity_control, NULL);
+ v4l2_ctrl_handler_setup(&sru->ctrls);
+ sru->entity.subdev.ctrl_handler = &sru->ctrls;
+
+ return sru;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_sru.h b/drivers/media/platform/vsp1/vsp1_sru.h
new file mode 100644
index 000000000000..381870b74780
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_sru.h
@@ -0,0 +1,41 @@
+/*
+ * vsp1_sru.h -- R-Car VSP1 Super Resolution Unit
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_SRU_H__
+#define __VSP1_SRU_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define SRU_PAD_SINK 0
+#define SRU_PAD_SOURCE 1
+
+struct vsp1_sru {
+ struct vsp1_entity entity;
+
+ struct v4l2_ctrl_handler ctrls;
+ unsigned int intensity;
+};
+
+static inline struct vsp1_sru *to_sru(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_sru, entity.subdev);
+}
+
+struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_SRU_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 4b0ac07af662..b4687a834f85 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -488,11 +488,17 @@ static bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe)
* This function completes the current buffer by filling its sequence number,
* time stamp and payload size, and hands it back to the videobuf core.
*
+ * When operating in DU output mode (deep pipeline to the DU through the LIF),
+ * the VSP1 needs to constantly supply frames to the display. In that case, if
+ * no other buffer is queued, reuse the one that has just been processed instead
+ * of handing it back to the videobuf core.
+ *
* Return the next queued buffer or NULL if the queue is empty.
*/
static struct vsp1_video_buffer *
vsp1_video_complete_buffer(struct vsp1_video *video)
{
+ struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
struct vsp1_video_buffer *next = NULL;
struct vsp1_video_buffer *done;
unsigned long flags;
@@ -507,6 +513,13 @@ vsp1_video_complete_buffer(struct vsp1_video *video)
done = list_first_entry(&video->irqqueue,
struct vsp1_video_buffer, queue);
+
+ /* In DU output mode reuse the buffer if the list is singular. */
+ if (pipe->lif && list_is_singular(&video->irqqueue)) {
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return done;
+ }
+
list_del(&done->queue);
if (!list_empty(&video->irqqueue))
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index db4b85ee05fc..7baed81ff005 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -48,8 +48,7 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
struct vsp1_pipeline *pipe =
to_vsp1_pipeline(&wpf->entity.subdev.entity);
struct vsp1_device *vsp1 = wpf->entity.vsp1;
- const struct v4l2_mbus_framefmt *format =
- &wpf->entity.formats[RWPF_PAD_SOURCE];
+ const struct v4l2_rect *crop = &wpf->crop;
unsigned int i;
u32 srcrpf = 0;
u32 outfmt = 0;
@@ -68,7 +67,7 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
vsp1_wpf_write(wpf, VI6_WPF_SRCRPF, srcrpf);
- /* Destination stride. Cropping isn't supported yet. */
+ /* Destination stride. */
if (!pipe->lif) {
struct v4l2_pix_format_mplane *format = &wpf->video.format;
@@ -79,10 +78,12 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
format->plane_fmt[1].bytesperline);
}
- vsp1_wpf_write(wpf, VI6_WPF_HSZCLIP,
- format->width << VI6_WPF_SZCLIP_SIZE_SHIFT);
- vsp1_wpf_write(wpf, VI6_WPF_VSZCLIP,
- format->height << VI6_WPF_SZCLIP_SIZE_SHIFT);
+ vsp1_wpf_write(wpf, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
+ (crop->left << VI6_WPF_SZCLIP_OFST_SHIFT) |
+ (crop->width << VI6_WPF_SZCLIP_SIZE_SHIFT));
+ vsp1_wpf_write(wpf, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
+ (crop->top << VI6_WPF_SZCLIP_OFST_SHIFT) |
+ (crop->height << VI6_WPF_SZCLIP_SIZE_SHIFT));
/* Format */
if (!pipe->lif) {
@@ -130,6 +131,8 @@ static struct v4l2_subdev_pad_ops wpf_pad_ops = {
.enum_frame_size = vsp1_rwpf_enum_frame_size,
.get_fmt = vsp1_rwpf_get_format,
.set_fmt = vsp1_rwpf_set_format,
+ .get_selection = vsp1_rwpf_get_selection,
+ .set_selection = vsp1_rwpf_set_selection,
};
static struct v4l2_subdev_ops wpf_ops = {
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 6ecdc39bb366..192f36f2f4aa 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -21,6 +21,12 @@ config RADIO_SI470X
source "drivers/media/radio/si470x/Kconfig"
+config RADIO_SI4713
+ tristate "Silicon Labs Si4713 FM Radio with RDS Transmitter support"
+ depends on VIDEO_V4L2
+
+source "drivers/media/radio/si4713/Kconfig"
+
config RADIO_SI476X
tristate "Silicon Laboratories Si476x I2C FM Radio"
depends on I2C && VIDEO_V4L2
@@ -113,29 +119,6 @@ config RADIO_SHARK2
To compile this driver as a module, choose M here: the
module will be called radio-shark2.
-config I2C_SI4713
- tristate "I2C driver for Silicon Labs Si4713 device"
- depends on I2C && VIDEO_V4L2
- ---help---
- Say Y here if you want support to Si4713 I2C device.
- This device driver supports only i2c bus.
-
- To compile this driver as a module, choose M here: the
- module will be called si4713.
-
-config RADIO_SI4713
- tristate "Silicon Labs Si4713 FM Radio Transmitter support"
- depends on I2C && VIDEO_V4L2
- select I2C_SI4713
- ---help---
- Say Y here if you want support to Si4713 FM Radio Transmitter.
- This device can transmit audio through FM. It can transmit
- RDS and RBDS signals as well. This module is the v4l2 radio
- interface for the i2c driver of this device.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-si4713.
-
config USB_KEENE
tristate "Keene FM Transmitter USB support"
depends on USB && VIDEO_V4L2
@@ -146,6 +129,20 @@ config USB_KEENE
To compile this driver as a module, choose M here: the
module will be called radio-keene.
+config USB_RAREMONO
+ tristate "Thanko's Raremono AM/FM/SW radio support"
+ depends on USB && VIDEO_V4L2
+ ---help---
+ The 'Thanko's Raremono' device contains the Si4734 chip from Silicon Labs Inc.
+ It is one of the very few or perhaps the only consumer USB radio device
+ to receive the AM/FM/SW bands.
+
+ Say Y here if you want to connect this type of AM/FM/SW receiver
+ to your computer's USB port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-raremono.
+
config USB_MA901
tristate "Masterkit MA901 USB FM radio support"
depends on USB && VIDEO_V4L2
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index 3b645601800d..120e791199b2 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -17,12 +17,11 @@ obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o
obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
-obj-$(CONFIG_I2C_SI4713) += si4713-i2c.o
-obj-$(CONFIG_RADIO_SI4713) += radio-si4713.o
obj-$(CONFIG_RADIO_SI476X) += radio-si476x.o
obj-$(CONFIG_RADIO_MIROPCM20) += radio-miropcm20.o
obj-$(CONFIG_USB_DSBR) += dsbr100.o
obj-$(CONFIG_RADIO_SI470X) += si470x/
+obj-$(CONFIG_RADIO_SI4713) += si4713/
obj-$(CONFIG_USB_MR800) += radio-mr800.o
obj-$(CONFIG_USB_KEENE) += radio-keene.o
obj-$(CONFIG_USB_MA901) += radio-ma901.o
@@ -33,6 +32,7 @@ obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
obj-$(CONFIG_RADIO_WL128X) += wl128x/
obj-$(CONFIG_RADIO_TEA575X) += tea575x.o
+obj-$(CONFIG_USB_RAREMONO) += radio-raremono.o
shark2-objs := radio-shark2.o radio-tea5777.o
diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
new file mode 100644
index 000000000000..7b3bdbb1be73
--- /dev/null
+++ b/drivers/media/radio/radio-raremono.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <asm/unaligned.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+
+/*
+ * 'Thanko's Raremono' is a Japanese si4734-based AM/FM/SW USB receiver:
+ *
+ * http://www.raremono.jp/product/484.html/
+ *
+ * The USB protocol has been reversed engineered using wireshark, initially
+ * by Dinesh Ram <dinesh.ram@cern.ch> and finished by Hans Verkuil
+ * <hverkuil@xs4all.nl>.
+ *
+ * Sadly the firmware used in this product hides lots of goodies since the
+ * si4734 has more features than are supported by the firmware. Oh well...
+ */
+
+/* driver and module definitions */
+MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_DESCRIPTION("Thanko's Raremono AM/FM/SW Receiver USB driver");
+MODULE_LICENSE("GPL v2");
+
+/*
+ * The Device announces itself as Cygnal Integrated Products, Inc.
+ *
+ * The vendor and product IDs (and in fact all other lsusb information as
+ * well) are identical to the si470x Silicon Labs USB FM Radio Reference
+ * Design board, even though this card has a si4734 device. Clearly the
+ * designer of this product never bothered to change the USB IDs.
+ */
+
+/* USB Device ID List */
+static struct usb_device_id usb_raremono_device_table[] = {
+ {USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_raremono_device_table);
+
+#define BUFFER_LENGTH 64
+
+/* Timeout is set to a high value, could probably be reduced. Need more tests */
+#define USB_TIMEOUT 10000
+
+/* Frequency limits in KHz */
+#define FM_FREQ_RANGE_LOW 64000
+#define FM_FREQ_RANGE_HIGH 108000
+
+#define AM_FREQ_RANGE_LOW 520
+#define AM_FREQ_RANGE_HIGH 1710
+
+#define SW_FREQ_RANGE_LOW 2300
+#define SW_FREQ_RANGE_HIGH 26100
+
+enum { BAND_FM, BAND_AM, BAND_SW };
+
+static const struct v4l2_frequency_band bands[] = {
+ /* Band FM */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = FM_FREQ_RANGE_LOW * 16,
+ .rangehigh = FM_FREQ_RANGE_HIGH * 16,
+ .modulation = V4L2_BAND_MODULATION_FM,
+ },
+ /* Band AM */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 1,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = AM_FREQ_RANGE_LOW * 16,
+ .rangehigh = AM_FREQ_RANGE_HIGH * 16,
+ .modulation = V4L2_BAND_MODULATION_AM,
+ },
+ /* Band SW */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 2,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = SW_FREQ_RANGE_LOW * 16,
+ .rangehigh = SW_FREQ_RANGE_HIGH * 16,
+ .modulation = V4L2_BAND_MODULATION_AM,
+ },
+};
+
+struct raremono_device {
+ struct usb_device *usbdev;
+ struct usb_interface *intf;
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+ struct mutex lock;
+
+ u8 *buffer;
+ u32 band;
+ unsigned curfreq;
+};
+
+static inline struct raremono_device *to_raremono_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct raremono_device, v4l2_dev);
+}
+
+/* Set frequency. */
+static int raremono_cmd_main(struct raremono_device *radio, unsigned band, unsigned freq)
+{
+ unsigned band_offset;
+ int ret;
+
+ switch (band) {
+ case BAND_FM:
+ band_offset = 1;
+ freq /= 10;
+ break;
+ case BAND_AM:
+ band_offset = 0;
+ break;
+ default:
+ band_offset = 2;
+ break;
+ }
+ radio->buffer[0] = 0x04 + band_offset;
+ radio->buffer[1] = freq >> 8;
+ radio->buffer[2] = freq & 0xff;
+
+ ret = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ HID_REQ_SET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ 0x0300 + radio->buffer[0], 2,
+ radio->buffer, 3, USB_TIMEOUT);
+
+ if (ret < 0) {
+ dev_warn(radio->v4l2_dev.dev, "%s failed (%d)\n", __func__, ret);
+ return ret;
+ }
+ radio->curfreq = (band == BAND_FM) ? freq * 10 : freq;
+ return 0;
+}
+
+/* Handle unplugging the device.
+ * We call video_unregister_device in any case.
+ * The last function called in this procedure is
+ * usb_raremono_device_release.
+ */
+static void usb_raremono_disconnect(struct usb_interface *intf)
+{
+ struct raremono_device *radio = to_raremono_dev(usb_get_intfdata(intf));
+
+ dev_info(&intf->dev, "Thanko's Raremono disconnected\n");
+
+ mutex_lock(&radio->lock);
+ usb_set_intfdata(intf, NULL);
+ video_unregister_device(&radio->vdev);
+ v4l2_device_disconnect(&radio->v4l2_dev);
+ mutex_unlock(&radio->lock);
+ v4l2_device_put(&radio->v4l2_dev);
+}
+
+/*
+ * Linux Video interface
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *v)
+{
+ struct raremono_device *radio = video_drvdata(file);
+
+ strlcpy(v->driver, "radio-raremono", sizeof(v->driver));
+ strlcpy(v->card, "Thanko's Raremono", sizeof(v->card));
+ usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
+ v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int vidioc_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
+{
+ if (band->tuner != 0)
+ return -EINVAL;
+
+ if (band->index >= ARRAY_SIZE(bands))
+ return -EINVAL;
+
+ *band = bands[band->index];
+
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *v)
+{
+ struct raremono_device *radio = video_drvdata(file);
+ int ret;
+
+ if (v->index > 0)
+ return -EINVAL;
+
+ strlcpy(v->name, "AM/FM/SW", sizeof(v->name));
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_FREQ_BANDS;
+ v->rangelow = AM_FREQ_RANGE_LOW * 16;
+ v->rangehigh = FM_FREQ_RANGE_HIGH * 16;
+ v->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
+ v->audmode = (radio->curfreq < FM_FREQ_RANGE_LOW) ?
+ V4L2_TUNER_MODE_MONO : V4L2_TUNER_MODE_STEREO;
+ memset(radio->buffer, 1, BUFFER_LENGTH);
+ ret = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
+ 1, 0xa1, 0x030d, 2, radio->buffer, BUFFER_LENGTH, USB_TIMEOUT);
+
+ if (ret < 0) {
+ dev_warn(radio->v4l2_dev.dev, "%s failed (%d)\n", __func__, ret);
+ return ret;
+ }
+ v->signal = ((radio->buffer[1] & 0xf) << 8 | radio->buffer[2]) << 4;
+ return 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *v)
+{
+ return v->index ? -EINVAL : 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
+{
+ struct raremono_device *radio = video_drvdata(file);
+ u32 freq = f->frequency;
+ unsigned band;
+
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
+
+ if (f->frequency >= (FM_FREQ_RANGE_LOW + SW_FREQ_RANGE_HIGH) * 8)
+ band = BAND_FM;
+ else if (f->frequency <= (AM_FREQ_RANGE_HIGH + SW_FREQ_RANGE_LOW) * 8)
+ band = BAND_AM;
+ else
+ band = BAND_SW;
+
+ freq = clamp_t(u32, f->frequency, bands[band].rangelow, bands[band].rangehigh);
+ return raremono_cmd_main(radio, band, freq / 16);
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct raremono_device *radio = video_drvdata(file);
+
+ if (f->tuner != 0)
+ return -EINVAL;
+ f->type = V4L2_TUNER_RADIO;
+ f->frequency = radio->curfreq * 16;
+ return 0;
+}
+
+/* File system interface */
+static const struct v4l2_file_operations usb_raremono_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops usb_raremono_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_enum_freq_bands = vidioc_enum_freq_bands,
+};
+
+/* check if the device is present and register with v4l and usb if it is */
+static int usb_raremono_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct raremono_device *radio;
+ int retval = 0;
+
+ radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), GFP_KERNEL);
+ if (radio)
+ radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, GFP_KERNEL);
+
+ if (!radio || !radio->buffer)
+ return -ENOMEM;
+
+ radio->usbdev = interface_to_usbdev(intf);
+ radio->intf = intf;
+
+ /*
+ * This device uses the same USB IDs as the si470x SiLabs reference
+ * design. So do an additional check: attempt to read the device ID
+ * from the si470x: the lower 12 bits are 0x0242 for the si470x. The
+ * Raremono always returns 0x0800 (the meaning of that is unknown, but
+ * at least it works).
+ *
+ * We use this check to determine which device we are dealing with.
+ */
+ msleep(20);
+ retval = usb_control_msg(radio->usbdev,
+ usb_rcvctrlpipe(radio->usbdev, 0),
+ HID_REQ_GET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ 1, 2,
+ radio->buffer, 3, 500);
+ if (retval != 3 ||
+ (get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) {
+ dev_info(&intf->dev, "this is not Thanko's Raremono.\n");
+ return -ENODEV;
+ }
+
+ dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n",
+ id->idVendor, id->idProduct);
+
+ retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
+ if (retval < 0) {
+ dev_err(&intf->dev, "couldn't register v4l2_device\n");
+ return retval;
+ }
+
+ mutex_init(&radio->lock);
+
+ strlcpy(radio->vdev.name, radio->v4l2_dev.name,
+ sizeof(radio->vdev.name));
+ radio->vdev.v4l2_dev = &radio->v4l2_dev;
+ radio->vdev.fops = &usb_raremono_fops;
+ radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops;
+ radio->vdev.lock = &radio->lock;
+ radio->vdev.release = video_device_release_empty;
+
+ usb_set_intfdata(intf, &radio->v4l2_dev);
+
+ video_set_drvdata(&radio->vdev, radio);
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags);
+
+ raremono_cmd_main(radio, BAND_FM, 95160);
+
+ retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO, -1);
+ if (retval == 0) {
+ dev_info(&intf->dev, "V4L2 device registered as %s\n",
+ video_device_node_name(&radio->vdev));
+ return 0;
+ }
+ dev_err(&intf->dev, "could not register video device\n");
+ v4l2_device_unregister(&radio->v4l2_dev);
+ return retval;
+}
+
+/* USB subsystem interface */
+static struct usb_driver usb_raremono_driver = {
+ .name = "radio-raremono",
+ .probe = usb_raremono_probe,
+ .disconnect = usb_raremono_disconnect,
+ .id_table = usb_raremono_device_table,
+};
+
+module_usb_driver(usb_raremono_driver);
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index d6d4d60261d5..07ef40595efd 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -137,6 +137,8 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
/* interrupt out endpoint 2 every 1 millisecond */
#define UNUSED_REPORT 23
+#define MAX_REPORT_SIZE 64
+
/**************************************************************************
@@ -208,7 +210,7 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
*/
static int si470x_get_report(struct si470x_device *radio, void *buf, int size)
{
- unsigned char *report = (unsigned char *) buf;
+ unsigned char *report = buf;
int retval;
retval = usb_control_msg(radio->usbdev,
@@ -231,7 +233,7 @@ static int si470x_get_report(struct si470x_device *radio, void *buf, int size)
*/
static int si470x_set_report(struct si470x_device *radio, void *buf, int size)
{
- unsigned char *report = (unsigned char *) buf;
+ unsigned char *report = buf;
int retval;
retval = usb_control_msg(radio->usbdev,
@@ -254,15 +256,14 @@ static int si470x_set_report(struct si470x_device *radio, void *buf, int size)
*/
int si470x_get_register(struct si470x_device *radio, int regnr)
{
- unsigned char buf[REGISTER_REPORT_SIZE];
int retval;
- buf[0] = REGISTER_REPORT(regnr);
+ radio->usb_buf[0] = REGISTER_REPORT(regnr);
- retval = si470x_get_report(radio, (void *) &buf, sizeof(buf));
+ retval = si470x_get_report(radio, radio->usb_buf, REGISTER_REPORT_SIZE);
if (retval >= 0)
- radio->registers[regnr] = get_unaligned_be16(&buf[1]);
+ radio->registers[regnr] = get_unaligned_be16(&radio->usb_buf[1]);
return (retval < 0) ? -EINVAL : 0;
}
@@ -273,13 +274,12 @@ int si470x_get_register(struct si470x_device *radio, int regnr)
*/
int si470x_set_register(struct si470x_device *radio, int regnr)
{
- unsigned char buf[REGISTER_REPORT_SIZE];
int retval;
- buf[0] = REGISTER_REPORT(regnr);
- put_unaligned_be16(radio->registers[regnr], &buf[1]);
+ radio->usb_buf[0] = REGISTER_REPORT(regnr);
+ put_unaligned_be16(radio->registers[regnr], &radio->usb_buf[1]);
- retval = si470x_set_report(radio, (void *) &buf, sizeof(buf));
+ retval = si470x_set_report(radio, radio->usb_buf, REGISTER_REPORT_SIZE);
return (retval < 0) ? -EINVAL : 0;
}
@@ -295,18 +295,17 @@ int si470x_set_register(struct si470x_device *radio, int regnr)
*/
static int si470x_get_all_registers(struct si470x_device *radio)
{
- unsigned char buf[ENTIRE_REPORT_SIZE];
int retval;
unsigned char regnr;
- buf[0] = ENTIRE_REPORT;
+ radio->usb_buf[0] = ENTIRE_REPORT;
- retval = si470x_get_report(radio, (void *) &buf, sizeof(buf));
+ retval = si470x_get_report(radio, radio->usb_buf, ENTIRE_REPORT_SIZE);
if (retval >= 0)
for (regnr = 0; regnr < RADIO_REGISTER_NUM; regnr++)
radio->registers[regnr] = get_unaligned_be16(
- &buf[regnr * RADIO_REGISTER_SIZE + 1]);
+ &radio->usb_buf[regnr * RADIO_REGISTER_SIZE + 1]);
return (retval < 0) ? -EINVAL : 0;
}
@@ -323,14 +322,13 @@ static int si470x_get_all_registers(struct si470x_device *radio)
static int si470x_set_led_state(struct si470x_device *radio,
unsigned char led_state)
{
- unsigned char buf[LED_REPORT_SIZE];
int retval;
- buf[0] = LED_REPORT;
- buf[1] = LED_COMMAND;
- buf[2] = led_state;
+ radio->usb_buf[0] = LED_REPORT;
+ radio->usb_buf[1] = LED_COMMAND;
+ radio->usb_buf[2] = led_state;
- retval = si470x_set_report(radio, (void *) &buf, sizeof(buf));
+ retval = si470x_set_report(radio, radio->usb_buf, LED_REPORT_SIZE);
return (retval < 0) ? -EINVAL : 0;
}
@@ -346,19 +344,18 @@ static int si470x_set_led_state(struct si470x_device *radio,
*/
static int si470x_get_scratch_page_versions(struct si470x_device *radio)
{
- unsigned char buf[SCRATCH_REPORT_SIZE];
int retval;
- buf[0] = SCRATCH_REPORT;
+ radio->usb_buf[0] = SCRATCH_REPORT;
- retval = si470x_get_report(radio, (void *) &buf, sizeof(buf));
+ retval = si470x_get_report(radio, radio->usb_buf, SCRATCH_REPORT_SIZE);
if (retval < 0)
dev_warn(&radio->intf->dev, "si470x_get_scratch: "
"si470x_get_report returned %d\n", retval);
else {
- radio->software_version = buf[1];
- radio->hardware_version = buf[2];
+ radio->software_version = radio->usb_buf[1];
+ radio->hardware_version = radio->usb_buf[2];
}
return (retval < 0) ? -EINVAL : 0;
@@ -509,6 +506,7 @@ static void si470x_usb_release(struct v4l2_device *v4l2_dev)
v4l2_device_unregister(&radio->v4l2_dev);
kfree(radio->int_in_buffer);
kfree(radio->buffer);
+ kfree(radio->usb_buf);
kfree(radio);
}
@@ -593,6 +591,11 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
retval = -ENOMEM;
goto err_initial;
}
+ radio->usb_buf = kmalloc(MAX_REPORT_SIZE, GFP_KERNEL);
+ if (radio->usb_buf == NULL) {
+ retval = -ENOMEM;
+ goto err_radio;
+ }
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
radio->band = 1; /* Default to 76 - 108 MHz */
@@ -612,7 +615,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
if (!radio->int_in_endpoint) {
dev_info(&intf->dev, "could not find interrupt in endpoint\n");
retval = -EIO;
- goto err_radio;
+ goto err_usbbuf;
}
int_end_size = le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize);
@@ -621,7 +624,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
if (!radio->int_in_buffer) {
dev_info(&intf->dev, "could not allocate int_in_buffer");
retval = -ENOMEM;
- goto err_radio;
+ goto err_usbbuf;
}
radio->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -632,6 +635,30 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
}
radio->v4l2_dev.release = si470x_usb_release;
+
+ /*
+ * The si470x SiLabs reference design uses the same USB IDs as
+ * 'Thanko's Raremono' si4734 based receiver. So check here which we
+ * have: attempt to read the device ID from the si470x: the lower 12
+ * bits should be 0x0242 for the si470x.
+ *
+ * We use this check to determine which device we are dealing with.
+ */
+ if (id->idVendor == 0x10c4 && id->idProduct == 0x818a) {
+ retval = usb_control_msg(radio->usbdev,
+ usb_rcvctrlpipe(radio->usbdev, 0),
+ HID_REQ_GET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ 1, 2,
+ radio->usb_buf, 3, 500);
+ if (retval != 3 ||
+ (get_unaligned_be16(&radio->usb_buf[1]) & 0xfff) != 0x0242) {
+ dev_info(&intf->dev, "this is not a si470x device.\n");
+ retval = -ENODEV;
+ goto err_urb;
+ }
+ }
+
retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
if (retval < 0) {
dev_err(&intf->dev, "couldn't register v4l2_device\n");
@@ -743,6 +770,8 @@ err_urb:
usb_free_urb(radio->int_in_urb);
err_intbuffer:
kfree(radio->int_in_buffer);
+err_usbbuf:
+ kfree(radio->usb_buf);
err_radio:
kfree(radio);
err_initial:
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 467e95575488..4b7660470e2f 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -167,6 +167,7 @@ struct si470x_device {
/* reference to USB and video device */
struct usb_device *usbdev;
struct usb_interface *intf;
+ char *usb_buf;
/* Interrupt endpoint handling */
char *int_in_buffer;
diff --git a/drivers/media/radio/si4713/Kconfig b/drivers/media/radio/si4713/Kconfig
new file mode 100644
index 000000000000..a7c3ba85d12b
--- /dev/null
+++ b/drivers/media/radio/si4713/Kconfig
@@ -0,0 +1,40 @@
+config USB_SI4713
+ tristate "Silicon Labs Si4713 FM Radio Transmitter support with USB"
+ depends on USB && RADIO_SI4713
+ select SI4713
+ ---help---
+ This is a driver for USB devices with the Silicon Labs SI4713
+ chip. Currently these devices are known to work.
+ - 10c4:8244: Silicon Labs FM Transmitter USB device.
+
+ Say Y here if you want to connect this type of radio to your
+ computer's USB port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-usb-si4713.
+
+config PLATFORM_SI4713
+ tristate "Silicon Labs Si4713 FM Radio Transmitter support with I2C"
+ depends on I2C && RADIO_SI4713
+ select SI4713
+ ---help---
+ This is a driver for I2C devices with the Silicon Labs SI4713
+ chip.
+
+ Say Y here if you want to connect this type of radio to your
+ computer's I2C port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-platform-si4713.
+
+config I2C_SI4713
+ tristate "Silicon Labs Si4713 FM Radio Transmitter support"
+ depends on I2C && RADIO_SI4713
+ ---help---
+ Say Y here if you want support to Si4713 FM Radio Transmitter.
+ This device can transmit audio through FM. It can transmit
+ RDS and RBDS signals as well. This module is the v4l2 radio
+ interface for the i2c driver of this device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called si4713.
diff --git a/drivers/media/radio/si4713/Makefile b/drivers/media/radio/si4713/Makefile
new file mode 100644
index 000000000000..ddaaf925e883
--- /dev/null
+++ b/drivers/media/radio/si4713/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for radios with Silicon Labs Si4713 FM Radio Transmitters
+#
+
+obj-$(CONFIG_I2C_SI4713) += si4713.o
+obj-$(CONFIG_USB_SI4713) += radio-usb-si4713.o
+obj-$(CONFIG_PLATFORM_SI4713) += radio-platform-si4713.o
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/si4713/radio-platform-si4713.c
index ba4cfc946868..ba4cfc946868 100644
--- a/drivers/media/radio/radio-si4713.c
+++ b/drivers/media/radio/si4713/radio-platform-si4713.c
diff --git a/drivers/media/radio/si4713/radio-usb-si4713.c b/drivers/media/radio/si4713/radio-usb-si4713.c
new file mode 100644
index 000000000000..779855b74bcd
--- /dev/null
+++ b/drivers/media/radio/si4713/radio-usb-si4713.c
@@ -0,0 +1,540 @@
+/*
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates.
+ * All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* kernel includes */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+/* V4l includes */
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/si4713.h>
+
+#include "si4713.h"
+
+/* driver and module definitions */
+MODULE_AUTHOR("Dinesh Ram <dinesh.ram@cern.ch>");
+MODULE_DESCRIPTION("Si4713 FM Transmitter USB driver");
+MODULE_LICENSE("GPL v2");
+
+/* The Device announces itself as Cygnal Integrated Products, Inc. */
+#define USB_SI4713_VENDOR 0x10c4
+#define USB_SI4713_PRODUCT 0x8244
+
+#define BUFFER_LENGTH 64
+#define USB_TIMEOUT 1000
+#define USB_RESP_TIMEOUT 50000
+
+/* USB Device ID List */
+static struct usb_device_id usb_si4713_usb_device_table[] = {
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_SI4713_VENDOR, USB_SI4713_PRODUCT,
+ USB_CLASS_HID, 0, 0) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_si4713_usb_device_table);
+
+struct si4713_usb_device {
+ struct usb_device *usbdev;
+ struct usb_interface *intf;
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev *v4l2_subdev;
+ struct mutex lock;
+ struct i2c_adapter i2c_adapter;
+
+ u8 *buffer;
+};
+
+static inline struct si4713_usb_device *to_si4713_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct si4713_usb_device, v4l2_dev);
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *v)
+{
+ struct si4713_usb_device *radio = video_drvdata(file);
+
+ strlcpy(v->driver, "radio-usb-si4713", sizeof(v->driver));
+ strlcpy(v->card, "Si4713 FM Transmitter", sizeof(v->card));
+ usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
+ v->device_caps = V4L2_CAP_MODULATOR | V4L2_CAP_RDS_OUTPUT;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int vidioc_g_modulator(struct file *file, void *priv,
+ struct v4l2_modulator *vm)
+{
+ struct si4713_usb_device *radio = video_drvdata(file);
+
+ return v4l2_subdev_call(radio->v4l2_subdev, tuner, g_modulator, vm);
+}
+
+static int vidioc_s_modulator(struct file *file, void *priv,
+ const struct v4l2_modulator *vm)
+{
+ struct si4713_usb_device *radio = video_drvdata(file);
+
+ return v4l2_subdev_call(radio->v4l2_subdev, tuner, s_modulator, vm);
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *vf)
+{
+ struct si4713_usb_device *radio = video_drvdata(file);
+
+ return v4l2_subdev_call(radio->v4l2_subdev, tuner, s_frequency, vf);
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *vf)
+{
+ struct si4713_usb_device *radio = video_drvdata(file);
+
+ return v4l2_subdev_call(radio->v4l2_subdev, tuner, g_frequency, vf);
+}
+
+static const struct v4l2_ioctl_ops usb_si4713_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_g_modulator = vidioc_g_modulator,
+ .vidioc_s_modulator = vidioc_s_modulator,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* File system interface */
+static const struct v4l2_file_operations usb_si4713_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static void usb_si4713_video_device_release(struct v4l2_device *v4l2_dev)
+{
+ struct si4713_usb_device *radio = to_si4713_dev(v4l2_dev);
+ struct i2c_adapter *adapter = &radio->i2c_adapter;
+
+ i2c_del_adapter(adapter);
+ v4l2_device_unregister(&radio->v4l2_dev);
+ kfree(radio->buffer);
+ kfree(radio);
+}
+
+/*
+ * This command sequence emulates the behaviour of the Windows driver.
+ * The structure of these commands was determined by sniffing the
+ * usb traffic of the device during startup.
+ * Most likely, these commands make some queries to the device.
+ * Commands are sent to enquire parameters like the bus mode,
+ * component revision, boot mode, the device serial number etc.
+ *
+ * These commands are necessary to be sent in this order during startup.
+ * The device fails to powerup if these commands are not sent.
+ *
+ * The complete list of startup commands is given in the start_seq table below.
+ */
+static int si4713_send_startup_command(struct si4713_usb_device *radio)
+{
+ unsigned long until_jiffies = jiffies + usecs_to_jiffies(USB_RESP_TIMEOUT) + 1;
+ u8 *buffer = radio->buffer;
+ int retval;
+
+ /* send the command */
+ retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ 0x09, 0x21, 0x033f, 0, radio->buffer,
+ BUFFER_LENGTH, USB_TIMEOUT);
+ if (retval < 0)
+ return retval;
+
+ for (;;) {
+ /* receive the response */
+ retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
+ 0x01, 0xa1, 0x033f, 0, radio->buffer,
+ BUFFER_LENGTH, USB_TIMEOUT);
+ if (retval < 0)
+ return retval;
+ if (!radio->buffer[1]) {
+ /* USB traffic sniffing showed that some commands require
+ * additional checks. */
+ switch (buffer[1]) {
+ case 0x32:
+ if (radio->buffer[2] == 0)
+ return 0;
+ break;
+ case 0x14:
+ case 0x12:
+ if (radio->buffer[2] & SI4713_CTS)
+ return 0;
+ break;
+ case 0x06:
+ if ((radio->buffer[2] & SI4713_CTS) && radio->buffer[9] == 0x08)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ }
+ if (time_is_before_jiffies(until_jiffies))
+ return -EIO;
+ msleep(3);
+ }
+
+ return retval;
+}
+
+struct si4713_start_seq_table {
+ int len;
+ u8 payload[8];
+};
+
+/*
+ * Some of the startup commands that could be recognized are :
+ * (0x03): Get serial number of the board (Response : CB000-00-00)
+ * (0x06, 0x03, 0x03, 0x08, 0x01, 0x0f) : Get Component revision
+ */
+static struct si4713_start_seq_table start_seq[] = {
+
+ { 1, { 0x03 } },
+ { 2, { 0x32, 0x7f } },
+ { 6, { 0x06, 0x03, 0x03, 0x08, 0x01, 0x0f } },
+ { 2, { 0x14, 0x02 } },
+ { 2, { 0x09, 0x90 } },
+ { 3, { 0x08, 0x90, 0xfa } },
+ { 2, { 0x36, 0x01 } },
+ { 2, { 0x05, 0x03 } },
+ { 7, { 0x06, 0x00, 0x06, 0x0e, 0x01, 0x0f, 0x05 } },
+ { 1, { 0x12 } },
+ /* Commands that are sent after pressing the 'Initialize'
+ button in the windows application */
+ { 1, { 0x03 } },
+ { 1, { 0x01 } },
+ { 2, { 0x09, 0x90 } },
+ { 3, { 0x08, 0x90, 0xfa } },
+ { 1, { 0x34 } },
+ { 2, { 0x35, 0x01 } },
+ { 2, { 0x36, 0x01 } },
+ { 2, { 0x30, 0x09 } },
+ { 4, { 0x30, 0x06, 0x00, 0xe2 } },
+ { 3, { 0x31, 0x01, 0x30 } },
+ { 3, { 0x31, 0x04, 0x09 } },
+ { 2, { 0x05, 0x02 } },
+ { 6, { 0x06, 0x03, 0x03, 0x08, 0x01, 0x0f } },
+};
+
+static int si4713_start_seq(struct si4713_usb_device *radio)
+{
+ int retval = 0;
+ int i;
+
+ radio->buffer[0] = 0x3f;
+
+ for (i = 0; i < ARRAY_SIZE(start_seq); i++) {
+ int len = start_seq[i].len;
+ u8 *payload = start_seq[i].payload;
+
+ memcpy(radio->buffer + 1, payload, len);
+ memset(radio->buffer + len + 1, 0, BUFFER_LENGTH - 1 - len);
+ retval = si4713_send_startup_command(radio);
+ }
+
+ return retval;
+}
+
+static struct i2c_board_info si4713_board_info = {
+ I2C_BOARD_INFO("si4713", SI4713_I2C_ADDR_BUSEN_HIGH),
+};
+
+struct si4713_command_table {
+ int command_id;
+ u8 payload[8];
+};
+
+/*
+ * Structure of a command :
+ * Byte 1 : 0x3f (always)
+ * Byte 2 : 0x06 (send a command)
+ * Byte 3 : Unknown
+ * Byte 4 : Number of arguments + 1 (for the command byte)
+ * Byte 5 : Number of response bytes
+ */
+static struct si4713_command_table command_table[] = {
+
+ { SI4713_CMD_POWER_UP, { 0x00, SI4713_PWUP_NARGS + 1, SI4713_PWUP_NRESP} },
+ { SI4713_CMD_GET_REV, { 0x03, 0x01, SI4713_GETREV_NRESP } },
+ { SI4713_CMD_POWER_DOWN, { 0x00, 0x01, SI4713_PWDN_NRESP} },
+ { SI4713_CMD_SET_PROPERTY, { 0x00, SI4713_SET_PROP_NARGS + 1, SI4713_SET_PROP_NRESP } },
+ { SI4713_CMD_GET_PROPERTY, { 0x00, SI4713_GET_PROP_NARGS + 1, SI4713_GET_PROP_NRESP } },
+ { SI4713_CMD_TX_TUNE_FREQ, { 0x03, SI4713_TXFREQ_NARGS + 1, SI4713_TXFREQ_NRESP } },
+ { SI4713_CMD_TX_TUNE_POWER, { 0x03, SI4713_TXPWR_NARGS + 1, SI4713_TXPWR_NRESP } },
+ { SI4713_CMD_TX_TUNE_MEASURE, { 0x03, SI4713_TXMEA_NARGS + 1, SI4713_TXMEA_NRESP } },
+ { SI4713_CMD_TX_TUNE_STATUS, { 0x00, SI4713_TXSTATUS_NARGS + 1, SI4713_TXSTATUS_NRESP } },
+ { SI4713_CMD_TX_ASQ_STATUS, { 0x03, SI4713_ASQSTATUS_NARGS + 1, SI4713_ASQSTATUS_NRESP } },
+ { SI4713_CMD_GET_INT_STATUS, { 0x03, 0x01, SI4713_GET_STATUS_NRESP } },
+ { SI4713_CMD_TX_RDS_BUFF, { 0x03, SI4713_RDSBUFF_NARGS + 1, SI4713_RDSBUFF_NRESP } },
+ { SI4713_CMD_TX_RDS_PS, { 0x00, SI4713_RDSPS_NARGS + 1, SI4713_RDSPS_NRESP } },
+};
+
+static int send_command(struct si4713_usb_device *radio, u8 *payload, char *data, int len)
+{
+ int retval;
+
+ radio->buffer[0] = 0x3f;
+ radio->buffer[1] = 0x06;
+
+ memcpy(radio->buffer + 2, payload, 3);
+ memcpy(radio->buffer + 5, data, len);
+ memset(radio->buffer + 5 + len, 0, BUFFER_LENGTH - 5 - len);
+
+ /* send the command */
+ retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0),
+ 0x09, 0x21, 0x033f, 0, radio->buffer,
+ BUFFER_LENGTH, USB_TIMEOUT);
+
+ return retval < 0 ? retval : 0;
+}
+
+static int si4713_i2c_read(struct si4713_usb_device *radio, char *data, int len)
+{
+ unsigned long until_jiffies = jiffies + usecs_to_jiffies(USB_RESP_TIMEOUT) + 1;
+ int retval;
+
+ /* receive the response */
+ for (;;) {
+ retval = usb_control_msg(radio->usbdev,
+ usb_rcvctrlpipe(radio->usbdev, 0),
+ 0x01, 0xa1, 0x033f, 0, radio->buffer,
+ BUFFER_LENGTH, USB_TIMEOUT);
+ if (retval < 0)
+ return retval;
+
+ /*
+ * Check that we get a valid reply back (buffer[1] == 0) and
+ * that CTS is set before returning, otherwise we wait and try
+ * again. The i2c driver also does the CTS check, but the timeouts
+ * used there are much too small for this USB driver, so we wait
+ * for it here.
+ */
+ if (radio->buffer[1] == 0 && (radio->buffer[2] & SI4713_CTS)) {
+ memcpy(data, radio->buffer + 2, len);
+ return 0;
+ }
+ if (time_is_before_jiffies(until_jiffies)) {
+ /* Zero the status value, ensuring CTS isn't set */
+ data[0] = 0;
+ return 0;
+ }
+ msleep(3);
+ }
+}
+
+static int si4713_i2c_write(struct si4713_usb_device *radio, char *data, int len)
+{
+ int retval = -EINVAL;
+ int i;
+
+ if (len > BUFFER_LENGTH - 5)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(command_table); i++) {
+ if (data[0] == command_table[i].command_id)
+ retval = send_command(radio, command_table[i].payload,
+ data, len);
+ }
+
+ return retval < 0 ? retval : 0;
+}
+
+static int si4713_transfer(struct i2c_adapter *i2c_adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct si4713_usb_device *radio = i2c_get_adapdata(i2c_adapter);
+ int retval = -EINVAL;
+ int i;
+
+ if (num <= 0)
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ if (msgs[i].flags & I2C_M_RD)
+ retval = si4713_i2c_read(radio, msgs[i].buf, msgs[i].len);
+ else
+ retval = si4713_i2c_write(radio, msgs[i].buf, msgs[i].len);
+ if (retval)
+ break;
+ }
+
+ return retval ? retval : num;
+}
+
+static u32 si4713_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm si4713_algo = {
+ .master_xfer = si4713_transfer,
+ .functionality = si4713_functionality,
+};
+
+/* This name value shows up in the sysfs filename associated
+ with this I2C adapter */
+static struct i2c_adapter si4713_i2c_adapter_template = {
+ .name = "si4713-i2c",
+ .owner = THIS_MODULE,
+ .algo = &si4713_algo,
+};
+
+static int si4713_register_i2c_adapter(struct si4713_usb_device *radio)
+{
+ radio->i2c_adapter = si4713_i2c_adapter_template;
+ /* set up sysfs linkage to our parent device */
+ radio->i2c_adapter.dev.parent = &radio->usbdev->dev;
+ i2c_set_adapdata(&radio->i2c_adapter, radio);
+
+ return i2c_add_adapter(&radio->i2c_adapter);
+}
+
+/* check if the device is present and register with v4l and usb if it is */
+static int usb_si4713_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct si4713_usb_device *radio;
+ struct i2c_adapter *adapter;
+ struct v4l2_subdev *sd;
+ int retval = -ENOMEM;
+
+ dev_info(&intf->dev, "Si4713 development board discovered: (%04X:%04X)\n",
+ id->idVendor, id->idProduct);
+
+ /* Initialize local device structure */
+ radio = kzalloc(sizeof(struct si4713_usb_device), GFP_KERNEL);
+ if (radio)
+ radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
+
+ if (!radio || !radio->buffer) {
+ dev_err(&intf->dev, "kmalloc for si4713_usb_device failed\n");
+ kfree(radio);
+ return -ENOMEM;
+ }
+
+ mutex_init(&radio->lock);
+
+ radio->usbdev = interface_to_usbdev(intf);
+ radio->intf = intf;
+ usb_set_intfdata(intf, &radio->v4l2_dev);
+
+ retval = si4713_start_seq(radio);
+ if (retval < 0)
+ goto err_v4l2;
+
+ retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
+ if (retval < 0) {
+ dev_err(&intf->dev, "couldn't register v4l2_device\n");
+ goto err_v4l2;
+ }
+
+ retval = si4713_register_i2c_adapter(radio);
+ if (retval < 0) {
+ dev_err(&intf->dev, "could not register i2c device\n");
+ goto err_i2cdev;
+ }
+
+ adapter = &radio->i2c_adapter;
+ sd = v4l2_i2c_new_subdev_board(&radio->v4l2_dev, adapter,
+ &si4713_board_info, NULL);
+ radio->v4l2_subdev = sd;
+ if (!sd) {
+ dev_err(&intf->dev, "cannot get v4l2 subdevice\n");
+ retval = -ENODEV;
+ goto del_adapter;
+ }
+
+ radio->vdev.ctrl_handler = sd->ctrl_handler;
+ radio->v4l2_dev.release = usb_si4713_video_device_release;
+ strlcpy(radio->vdev.name, radio->v4l2_dev.name,
+ sizeof(radio->vdev.name));
+ radio->vdev.v4l2_dev = &radio->v4l2_dev;
+ radio->vdev.fops = &usb_si4713_fops;
+ radio->vdev.ioctl_ops = &usb_si4713_ioctl_ops;
+ radio->vdev.lock = &radio->lock;
+ radio->vdev.release = video_device_release_empty;
+ radio->vdev.vfl_dir = VFL_DIR_TX;
+
+ video_set_drvdata(&radio->vdev, radio);
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags);
+
+ retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO, -1);
+ if (retval < 0) {
+ dev_err(&intf->dev, "could not register video device\n");
+ goto del_adapter;
+ }
+
+ dev_info(&intf->dev, "V4L2 device registered as %s\n",
+ video_device_node_name(&radio->vdev));
+
+ return 0;
+
+del_adapter:
+ i2c_del_adapter(adapter);
+err_i2cdev:
+ v4l2_device_unregister(&radio->v4l2_dev);
+err_v4l2:
+ kfree(radio->buffer);
+ kfree(radio);
+ return retval;
+}
+
+static void usb_si4713_disconnect(struct usb_interface *intf)
+{
+ struct si4713_usb_device *radio = to_si4713_dev(usb_get_intfdata(intf));
+
+ dev_info(&intf->dev, "Si4713 development board now disconnected\n");
+
+ mutex_lock(&radio->lock);
+ usb_set_intfdata(intf, NULL);
+ video_unregister_device(&radio->vdev);
+ v4l2_device_disconnect(&radio->v4l2_dev);
+ mutex_unlock(&radio->lock);
+ v4l2_device_put(&radio->v4l2_dev);
+}
+
+/* USB subsystem interface */
+static struct usb_driver usb_si4713_driver = {
+ .name = "radio-usb-si4713",
+ .probe = usb_si4713_probe,
+ .disconnect = usb_si4713_disconnect,
+ .id_table = usb_si4713_usb_device_table,
+};
+
+module_usb_driver(usb_si4713_driver);
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713/si4713.c
index 9ec48ccbcf0b..07d5153811e8 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -27,13 +27,12 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/gpio.h>
-#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
-#include "si4713-i2c.h"
+#include "si4713.h"
/* module parameters */
static int debug;
@@ -45,23 +44,18 @@ MODULE_AUTHOR("Eduardo Valentin <eduardo.valentin@nokia.com>");
MODULE_DESCRIPTION("I2C driver for Si4713 FM Radio Transmitter");
MODULE_VERSION("0.0.1");
-static const char *si4713_supply_names[SI4713_NUM_SUPPLIES] = {
- "vio",
- "vdd",
-};
-
#define DEFAULT_RDS_PI 0x00
#define DEFAULT_RDS_PTY 0x00
#define DEFAULT_RDS_DEVIATION 0x00C8
#define DEFAULT_RDS_PS_REPEAT_COUNT 0x0003
#define DEFAULT_LIMITER_RTIME 0x1392
#define DEFAULT_LIMITER_DEV 0x102CA
-#define DEFAULT_PILOT_FREQUENCY 0x4A38
+#define DEFAULT_PILOT_FREQUENCY 0x4A38
#define DEFAULT_PILOT_DEVIATION 0x1A5E
#define DEFAULT_ACOMP_ATIME 0x0000
#define DEFAULT_ACOMP_RTIME 0xF4240L
#define DEFAULT_ACOMP_GAIN 0x0F
-#define DEFAULT_ACOMP_THRESHOLD (-0x28)
+#define DEFAULT_ACOMP_THRESHOLD (-0x28)
#define DEFAULT_MUTE 0x01
#define DEFAULT_POWER_LEVEL 88
#define DEFAULT_FREQUENCY 8800
@@ -213,6 +207,7 @@ static int si4713_send_command(struct si4713_device *sdev, const u8 command,
u8 response[], const int respn, const int usecs)
{
struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
+ unsigned long until_jiffies;
u8 data1[MAX_ARGS + 1];
int err;
@@ -228,30 +223,42 @@ static int si4713_send_command(struct si4713_device *sdev, const u8 command,
if (err != argn + 1) {
v4l2_err(&sdev->sd, "Error while sending command 0x%02x\n",
command);
- return (err > 0) ? -EIO : err;
+ return err < 0 ? err : -EIO;
}
+ until_jiffies = jiffies + usecs_to_jiffies(usecs) + 1;
+
/* Wait response from interrupt */
- if (!wait_for_completion_timeout(&sdev->work,
+ if (client->irq) {
+ if (!wait_for_completion_timeout(&sdev->work,
usecs_to_jiffies(usecs) + 1))
- v4l2_warn(&sdev->sd,
+ v4l2_warn(&sdev->sd,
"(%s) Device took too much time to answer.\n",
__func__);
-
- /* Then get the response */
- err = i2c_master_recv(client, response, respn);
- if (err != respn) {
- v4l2_err(&sdev->sd,
- "Error while reading response for command 0x%02x\n",
- command);
- return (err > 0) ? -EIO : err;
}
- DBG_BUFFER(&sdev->sd, "Response", response, respn);
- if (check_command_failed(response[0]))
- return -EBUSY;
+ do {
+ err = i2c_master_recv(client, response, respn);
+ if (err != respn) {
+ v4l2_err(&sdev->sd,
+ "Error %d while reading response for command 0x%02x\n",
+ err, command);
+ return err < 0 ? err : -EIO;
+ }
- return 0;
+ DBG_BUFFER(&sdev->sd, "Response", response, respn);
+ if (!check_command_failed(response[0]))
+ return 0;
+
+ if (client->irq)
+ return -EBUSY;
+ if (usecs <= 1000)
+ usleep_range(usecs, 1000);
+ else
+ usleep_range(1000, 2000);
+ } while (time_is_after_jiffies(until_jiffies));
+
+ return -EBUSY;
}
/*
@@ -265,9 +272,9 @@ static int si4713_read_property(struct si4713_device *sdev, u16 prop, u32 *pv)
int err;
u8 val[SI4713_GET_PROP_NRESP];
/*
- * .First byte = 0
- * .Second byte = property's MSB
- * .Third byte = property's LSB
+ * .First byte = 0
+ * .Second byte = property's MSB
+ * .Third byte = property's LSB
*/
const u8 args[SI4713_GET_PROP_NARGS] = {
0x00,
@@ -302,11 +309,11 @@ static int si4713_write_property(struct si4713_device *sdev, u16 prop, u16 val)
int rval;
u8 resp[SI4713_SET_PROP_NRESP];
/*
- * .First byte = 0
- * .Second byte = property's MSB
- * .Third byte = property's LSB
- * .Fourth byte = value's MSB
- * .Fifth byte = value's LSB
+ * .First byte = 0
+ * .Second byte = property's MSB
+ * .Third byte = property's LSB
+ * .Fourth byte = value's MSB
+ * .Fifth byte = value's LSB
*/
const u8 args[SI4713_SET_PROP_NARGS] = {
0x00,
@@ -344,31 +351,36 @@ static int si4713_write_property(struct si4713_device *sdev, u16 prop, u16 val)
*/
static int si4713_powerup(struct si4713_device *sdev)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
int err;
u8 resp[SI4713_PWUP_NRESP];
/*
- * .First byte = Enabled interrupts and boot function
- * .Second byte = Input operation mode
+ * .First byte = Enabled interrupts and boot function
+ * .Second byte = Input operation mode
*/
- const u8 args[SI4713_PWUP_NARGS] = {
- SI4713_PWUP_CTSIEN | SI4713_PWUP_GPO2OEN | SI4713_PWUP_FUNC_TX,
+ u8 args[SI4713_PWUP_NARGS] = {
+ SI4713_PWUP_GPO2OEN | SI4713_PWUP_FUNC_TX,
SI4713_PWUP_OPMOD_ANALOG,
};
if (sdev->power_state)
return 0;
- err = regulator_bulk_enable(ARRAY_SIZE(sdev->supplies),
- sdev->supplies);
- if (err) {
- v4l2_err(&sdev->sd, "Failed to enable supplies: %d\n", err);
- return err;
+ if (sdev->supplies) {
+ err = regulator_bulk_enable(sdev->supplies, sdev->supply_data);
+ if (err) {
+ v4l2_err(&sdev->sd, "Failed to enable supplies: %d\n", err);
+ return err;
+ }
}
if (gpio_is_valid(sdev->gpio_reset)) {
udelay(50);
gpio_set_value(sdev->gpio_reset, 1);
}
+ if (client->irq)
+ args[0] |= SI4713_PWUP_CTSIEN;
+
err = si4713_send_command(sdev, SI4713_CMD_POWER_UP,
args, ARRAY_SIZE(args),
resp, ARRAY_SIZE(resp),
@@ -380,13 +392,15 @@ static int si4713_powerup(struct si4713_device *sdev)
v4l2_dbg(1, debug, &sdev->sd, "Device in power up mode\n");
sdev->power_state = POWER_ON;
- err = si4713_write_property(sdev, SI4713_GPO_IEN,
+ if (client->irq)
+ err = si4713_write_property(sdev, SI4713_GPO_IEN,
SI4713_STC_INT | SI4713_CTS);
- } else {
- if (gpio_is_valid(sdev->gpio_reset))
- gpio_set_value(sdev->gpio_reset, 0);
- err = regulator_bulk_disable(ARRAY_SIZE(sdev->supplies),
- sdev->supplies);
+ return err;
+ }
+ if (gpio_is_valid(sdev->gpio_reset))
+ gpio_set_value(sdev->gpio_reset, 0);
+ if (sdev->supplies) {
+ err = regulator_bulk_disable(sdev->supplies, sdev->supply_data);
if (err)
v4l2_err(&sdev->sd,
"Failed to disable supplies: %d\n", err);
@@ -418,11 +432,13 @@ static int si4713_powerdown(struct si4713_device *sdev)
v4l2_dbg(1, debug, &sdev->sd, "Device in reset mode\n");
if (gpio_is_valid(sdev->gpio_reset))
gpio_set_value(sdev->gpio_reset, 0);
- err = regulator_bulk_disable(ARRAY_SIZE(sdev->supplies),
- sdev->supplies);
- if (err)
- v4l2_err(&sdev->sd,
- "Failed to disable supplies: %d\n", err);
+ if (sdev->supplies) {
+ err = regulator_bulk_disable(sdev->supplies,
+ sdev->supply_data);
+ if (err)
+ v4l2_err(&sdev->sd,
+ "Failed to disable supplies: %d\n", err);
+ }
sdev->power_state = POWER_OFF;
}
@@ -451,7 +467,7 @@ static int si4713_checkrev(struct si4713_device *sdev)
v4l2_info(&sdev->sd, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
} else {
- v4l2_err(&sdev->sd, "Invalid product number\n");
+ v4l2_err(&sdev->sd, "Invalid product number 0x%X\n", resp[1]);
rval = -EINVAL;
}
return rval;
@@ -465,39 +481,45 @@ static int si4713_checkrev(struct si4713_device *sdev)
*/
static int si4713_wait_stc(struct si4713_device *sdev, const int usecs)
{
- int err;
+ struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
u8 resp[SI4713_GET_STATUS_NRESP];
+ unsigned long start_jiffies = jiffies;
+ int err;
- /* Wait response from STC interrupt */
- if (!wait_for_completion_timeout(&sdev->work,
- usecs_to_jiffies(usecs) + 1))
+ if (client->irq &&
+ !wait_for_completion_timeout(&sdev->work, usecs_to_jiffies(usecs) + 1))
v4l2_warn(&sdev->sd,
- "%s: device took too much time to answer (%d usec).\n",
- __func__, usecs);
-
- /* Clear status bits */
- err = si4713_send_command(sdev, SI4713_CMD_GET_INT_STATUS,
- NULL, 0,
- resp, ARRAY_SIZE(resp),
- DEFAULT_TIMEOUT);
-
- if (err < 0)
- goto exit;
-
- v4l2_dbg(1, debug, &sdev->sd,
- "%s: status bits: 0x%02x\n", __func__, resp[0]);
-
- if (!(resp[0] & SI4713_STC_INT))
- err = -EIO;
-
-exit:
- return err;
+ "(%s) Device took too much time to answer.\n", __func__);
+
+ for (;;) {
+ /* Clear status bits */
+ err = si4713_send_command(sdev, SI4713_CMD_GET_INT_STATUS,
+ NULL, 0,
+ resp, ARRAY_SIZE(resp),
+ DEFAULT_TIMEOUT);
+ /* The USB device returns errors when it waits for the
+ * STC bit to be set. Hence polling */
+ if (err >= 0) {
+ v4l2_dbg(1, debug, &sdev->sd,
+ "%s: status bits: 0x%02x\n", __func__, resp[0]);
+
+ if (resp[0] & SI4713_STC_INT)
+ return 0;
+ }
+ if (jiffies_to_usecs(jiffies - start_jiffies) > usecs)
+ return err < 0 ? err : -EIO;
+ /* We sleep here for 3-4 ms in order to avoid flooding the device
+ * with USB requests. The si4713 USB driver was developed
+ * by reverse engineering the Windows USB driver. The windows
+ * driver also has a ~2.5 ms delay between responses. */
+ usleep_range(3000, 4000);
+ }
}
/*
* si4713_tx_tune_freq - Sets the state of the RF carrier and sets the tuning
- * frequency between 76 and 108 MHz in 10 kHz units and
- * steps of 50 kHz.
+ * frequency between 76 and 108 MHz in 10 kHz units and
+ * steps of 50 kHz.
* @sdev: si4713_device structure for the device we are communicating
* @frequency: desired frequency (76 - 108 MHz, unit 10 KHz, step 50 kHz)
*/
@@ -506,9 +528,9 @@ static int si4713_tx_tune_freq(struct si4713_device *sdev, u16 frequency)
int err;
u8 val[SI4713_TXFREQ_NRESP];
/*
- * .First byte = 0
- * .Second byte = frequency's MSB
- * .Third byte = frequency's LSB
+ * .First byte = 0
+ * .Second byte = frequency's MSB
+ * .Third byte = frequency's LSB
*/
const u8 args[SI4713_TXFREQ_NARGS] = {
0x00,
@@ -535,14 +557,14 @@ static int si4713_tx_tune_freq(struct si4713_device *sdev, u16 frequency)
}
/*
- * si4713_tx_tune_power - Sets the RF voltage level between 88 and 115 dBuV in
- * 1 dB units. A value of 0x00 indicates off. The command
- * also sets the antenna tuning capacitance. A value of 0
- * indicates autotuning, and a value of 1 - 191 indicates
- * a manual override, which results in a tuning
- * capacitance of 0.25 pF x @antcap.
+ * si4713_tx_tune_power - Sets the RF voltage level between 88 and 120 dBuV in
+ * 1 dB units. A value of 0x00 indicates off. The command
+ * also sets the antenna tuning capacitance. A value of 0
+ * indicates autotuning, and a value of 1 - 191 indicates
+ * a manual override, which results in a tuning
+ * capacitance of 0.25 pF x @antcap.
* @sdev: si4713_device structure for the device we are communicating
- * @power: tuning power (88 - 115 dBuV, unit/step 1 dB)
+ * @power: tuning power (88 - 120 dBuV, unit/step 1 dB)
* @antcap: value of antenna tuning capacitor (0 - 191)
*/
static int si4713_tx_tune_power(struct si4713_device *sdev, u8 power,
@@ -551,21 +573,21 @@ static int si4713_tx_tune_power(struct si4713_device *sdev, u8 power,
int err;
u8 val[SI4713_TXPWR_NRESP];
/*
- * .First byte = 0
- * .Second byte = 0
- * .Third byte = power
- * .Fourth byte = antcap
+ * .First byte = 0
+ * .Second byte = 0
+ * .Third byte = power
+ * .Fourth byte = antcap
*/
- const u8 args[SI4713_TXPWR_NARGS] = {
+ u8 args[SI4713_TXPWR_NARGS] = {
0x00,
0x00,
power,
antcap,
};
- if (((power > 0) && (power < SI4713_MIN_POWER)) ||
- power > SI4713_MAX_POWER || antcap > SI4713_MAX_ANTCAP)
- return -EDOM;
+ /* Map power values 1-87 to MIN_POWER (88) */
+ if (power > 0 && power < SI4713_MIN_POWER)
+ args[2] = power = SI4713_MIN_POWER;
err = si4713_send_command(sdev, SI4713_CMD_TX_TUNE_POWER,
args, ARRAY_SIZE(args), val,
@@ -583,12 +605,12 @@ static int si4713_tx_tune_power(struct si4713_device *sdev, u8 power,
/*
* si4713_tx_tune_measure - Enters receive mode and measures the received noise
- * level in units of dBuV on the selected frequency.
- * The Frequency must be between 76 and 108 MHz in 10 kHz
- * units and steps of 50 kHz. The command also sets the
- * antenna tuning capacitance. A value of 0 means
- * autotuning, and a value of 1 to 191 indicates manual
- * override.
+ * level in units of dBuV on the selected frequency.
+ * The Frequency must be between 76 and 108 MHz in 10 kHz
+ * units and steps of 50 kHz. The command also sets the
+ * antenna tuning capacitance. A value of 0 means
+ * autotuning, and a value of 1 to 191 indicates manual
+ * override.
* @sdev: si4713_device structure for the device we are communicating
* @frequency: desired frequency (76 - 108 MHz, unit 10 KHz, step 50 kHz)
* @antcap: value of antenna tuning capacitor (0 - 191)
@@ -599,10 +621,10 @@ static int si4713_tx_tune_measure(struct si4713_device *sdev, u16 frequency,
int err;
u8 val[SI4713_TXMEA_NRESP];
/*
- * .First byte = 0
- * .Second byte = frequency's MSB
- * .Third byte = frequency's LSB
- * .Fourth byte = antcap
+ * .First byte = 0
+ * .Second byte = frequency's MSB
+ * .Third byte = frequency's LSB
+ * .Fourth byte = antcap
*/
const u8 args[SI4713_TXMEA_NARGS] = {
0x00,
@@ -632,11 +654,11 @@ static int si4713_tx_tune_measure(struct si4713_device *sdev, u16 frequency,
/*
* si4713_tx_tune_status- Returns the status of the tx_tune_freq, tx_tune_mea or
- * tx_tune_power commands. This command return the current
- * frequency, output voltage in dBuV, the antenna tunning
- * capacitance value and the received noise level. The
- * command also clears the stcint interrupt bit when the
- * first bit of its arguments is high.
+ * tx_tune_power commands. This command return the current
+ * frequency, output voltage in dBuV, the antenna tunning
+ * capacitance value and the received noise level. The
+ * command also clears the stcint interrupt bit when the
+ * first bit of its arguments is high.
* @sdev: si4713_device structure for the device we are communicating
* @intack: 0x01 to clear the seek/tune complete interrupt status indicator.
* @frequency: returned frequency
@@ -651,7 +673,7 @@ static int si4713_tx_tune_status(struct si4713_device *sdev, u8 intack,
int err;
u8 val[SI4713_TXSTATUS_NRESP];
/*
- * .First byte = intack bit
+ * .First byte = intack bit
*/
const u8 args[SI4713_TXSTATUS_NARGS] = {
intack & SI4713_INTACK_MASK,
@@ -812,8 +834,9 @@ static int si4713_set_rds_ps_name(struct si4713_device *sdev, char *ps_name)
return rval;
}
-static int si4713_set_rds_radio_text(struct si4713_device *sdev, char *rt)
+static int si4713_set_rds_radio_text(struct si4713_device *sdev, const char *rt)
{
+ static const char cr[RDS_RADIOTEXT_BLK_SIZE] = { RDS_CARRIAGE_RETURN, 0 };
int rval = 0, i;
u16 t_index = 0;
u8 b_index = 0, cr_inserted = 0;
@@ -837,7 +860,7 @@ static int si4713_set_rds_radio_text(struct si4713_device *sdev, char *rt)
for (i = 0; i < RDS_RADIOTEXT_BLK_SIZE; i++) {
if (!rt[t_index + i] ||
rt[t_index + i] == RDS_CARRIAGE_RETURN) {
- rt[t_index + i] = RDS_CARRIAGE_RETURN;
+ rt = cr;
cr_inserted = 1;
break;
}
@@ -1024,7 +1047,6 @@ static int si4713_initialize(struct si4713_device *sdev)
if (rval < 0)
return rval;
-
sdev->frequency = DEFAULT_FREQUENCY;
sdev->stereo = 1;
sdev->tune_rnl = DEFAULT_TUNE_RNL;
@@ -1345,7 +1367,7 @@ static int si4713_probe(struct i2c_client *client,
struct v4l2_ctrl_handler *hdl;
int rval, i;
- sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev) {
dev_err(&client->dev, "Failed to alloc video device.\n");
rval = -ENOMEM;
@@ -1362,13 +1384,14 @@ static int si4713_probe(struct i2c_client *client,
}
sdev->gpio_reset = pdata->gpio_reset;
gpio_direction_output(sdev->gpio_reset, 0);
+ sdev->supplies = pdata->supplies;
}
- for (i = 0; i < ARRAY_SIZE(sdev->supplies); i++)
- sdev->supplies[i].supply = si4713_supply_names[i];
+ for (i = 0; i < sdev->supplies; i++)
+ sdev->supply_data[i].supply = pdata->supply_names[i];
- rval = regulator_bulk_get(&client->dev, ARRAY_SIZE(sdev->supplies),
- sdev->supplies);
+ rval = regulator_bulk_get(&client->dev, sdev->supplies,
+ sdev->supply_data);
if (rval) {
dev_err(&client->dev, "Cannot get regulators: %d\n", rval);
goto free_gpio;
@@ -1420,8 +1443,8 @@ static int si4713_probe(struct i2c_client *client,
V4L2_CID_AUDIO_COMPRESSION_GAIN, 0, MAX_ACOMP_GAIN, 1,
DEFAULT_ACOMP_GAIN);
sdev->compression_threshold = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
- V4L2_CID_AUDIO_COMPRESSION_THRESHOLD, MIN_ACOMP_THRESHOLD,
- MAX_ACOMP_THRESHOLD, 1,
+ V4L2_CID_AUDIO_COMPRESSION_THRESHOLD,
+ MIN_ACOMP_THRESHOLD, MAX_ACOMP_THRESHOLD, 1,
DEFAULT_ACOMP_THRESHOLD);
sdev->compression_attack_time = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME, 0,
@@ -1443,9 +1466,11 @@ static int si4713_probe(struct i2c_client *client,
V4L2_CID_TUNE_PREEMPHASIS,
V4L2_PREEMPHASIS_75_uS, 0, V4L2_PREEMPHASIS_50_uS);
sdev->tune_pwr_level = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
- V4L2_CID_TUNE_POWER_LEVEL, 0, 120, 1, DEFAULT_POWER_LEVEL);
+ V4L2_CID_TUNE_POWER_LEVEL, 0, SI4713_MAX_POWER,
+ 1, DEFAULT_POWER_LEVEL);
sdev->tune_ant_cap = v4l2_ctrl_new_std(hdl, &si4713_ctrl_ops,
- V4L2_CID_TUNE_ANTENNA_CAPACITOR, 0, 191, 1, 0);
+ V4L2_CID_TUNE_ANTENNA_CAPACITOR, 0, SI4713_MAX_ANTCAP,
+ 1, 0);
if (hdl->error) {
rval = hdl->error;
@@ -1481,7 +1506,7 @@ free_irq:
free_ctrls:
v4l2_ctrl_handler_free(hdl);
put_reg:
- regulator_bulk_free(ARRAY_SIZE(sdev->supplies), sdev->supplies);
+ regulator_bulk_free(sdev->supplies, sdev->supply_data);
free_gpio:
if (gpio_is_valid(sdev->gpio_reset))
gpio_free(sdev->gpio_reset);
@@ -1505,7 +1530,7 @@ static int si4713_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- regulator_bulk_free(ARRAY_SIZE(sdev->supplies), sdev->supplies);
+ regulator_bulk_free(sdev->supplies, sdev->supply_data);
if (gpio_is_valid(sdev->gpio_reset))
gpio_free(sdev->gpio_reset);
kfree(sdev);
diff --git a/drivers/media/radio/si4713-i2c.h b/drivers/media/radio/si4713/si4713.h
index 25cdea26343b..4837cf6e0e1b 100644
--- a/drivers/media/radio/si4713-i2c.h
+++ b/drivers/media/radio/si4713/si4713.h
@@ -15,6 +15,7 @@
#ifndef SI4713_I2C_H
#define SI4713_I2C_H
+#include <linux/regulator/consumer.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-ctrls.h>
#include <media/si4713.h>
@@ -226,7 +227,8 @@ struct si4713_device {
struct v4l2_ctrl *tune_ant_cap;
};
struct completion work;
- struct regulator_bulk_data supplies[SI4713_NUM_SUPPLIES];
+ unsigned supplies;
+ struct regulator_bulk_data supply_data[SI4713_NUM_SUPPLIES];
int gpio_reset;
u32 power_state;
u32 rds_enabled;
diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
index cef06981b7c9..7c14060a40b8 100644
--- a/drivers/media/radio/tea575x.c
+++ b/drivers/media/radio/tea575x.c
@@ -20,12 +20,12 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <asm/io.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index f329485c6629..822b9f47ca72 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1909,10 +1909,8 @@ static struct input_dev *imon_init_idev(struct imon_context *ictx)
int ret, i;
idev = input_allocate_device();
- if (!idev) {
- dev_err(ictx->dev, "input dev allocation failed\n");
+ if (!idev)
goto out;
- }
snprintf(ictx->name_idev, sizeof(ictx->name_idev),
"iMON Panel, Knob and Mouse(%04x:%04x)",
@@ -1960,10 +1958,8 @@ static struct input_dev *imon_init_touch(struct imon_context *ictx)
int ret;
touch = input_allocate_device();
- if (!touch) {
- dev_err(ictx->dev, "touchscreen input dev allocation failed\n");
+ if (!touch)
goto touch_alloc_failed;
- }
snprintf(ictx->name_touch, sizeof(ictx->name_touch),
"iMON USB Touchscreen (%04x:%04x)",
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index b1cde8c0422b..0b8c54919010 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -98,4 +98,5 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-videomate-s350.o \
rc-videomate-tv-pvr.o \
rc-winfast.o \
- rc-winfast-usbii-deluxe.o
+ rc-winfast-usbii-deluxe.o \
+ rc-su3000.o
diff --git a/drivers/media/rc/keymaps/rc-su3000.c b/drivers/media/rc/keymaps/rc-su3000.c
new file mode 100644
index 000000000000..8dbd3e9bc951
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-su3000.c
@@ -0,0 +1,75 @@
+/* rc-su3000.h - Keytable for Geniatech HDStar Remote Controller
+ *
+ * Copyright (c) 2013 by Evgeny Plehov <Evgeny Plehov@ukr.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table su3000[] = {
+ { 0x25, KEY_POWER }, /* right-bottom Red */
+ { 0x0a, KEY_MUTE }, /* -/-- */
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x00, KEY_0 },
+ { 0x20, KEY_UP }, /* CH+ */
+ { 0x21, KEY_DOWN }, /* CH+ */
+ { 0x12, KEY_VOLUMEUP }, /* Brightness Up */
+ { 0x13, KEY_VOLUMEDOWN },/* Brightness Down */
+ { 0x1f, KEY_RECORD },
+ { 0x17, KEY_PLAY },
+ { 0x16, KEY_PAUSE },
+ { 0x0b, KEY_STOP },
+ { 0x27, KEY_FASTFORWARD },/* >> */
+ { 0x26, KEY_REWIND }, /* << */
+ { 0x0d, KEY_OK }, /* Mute */
+ { 0x11, KEY_LEFT }, /* VOL- */
+ { 0x10, KEY_RIGHT }, /* VOL+ */
+ { 0x29, KEY_BACK }, /* button under 9 */
+ { 0x2c, KEY_MENU }, /* TTX */
+ { 0x2b, KEY_EPG }, /* EPG */
+ { 0x1e, KEY_RED }, /* OSD */
+ { 0x0e, KEY_GREEN }, /* Window */
+ { 0x2d, KEY_YELLOW }, /* button under << */
+ { 0x0f, KEY_BLUE }, /* bottom yellow button */
+ { 0x14, KEY_AUDIO }, /* Snapshot */
+ { 0x38, KEY_TV }, /* TV/Radio */
+ { 0x0c, KEY_ESC } /* upper Red button */
+};
+
+static struct rc_map_list su3000_map = {
+ .map = {
+ .scan = su3000,
+ .size = ARRAY_SIZE(su3000),
+ .rc_type = RC_TYPE_RC5,
+ .name = RC_MAP_SU3000,
+ }
+};
+
+static int __init init_rc_map_su3000(void)
+{
+ return rc_map_register(&su3000_map);
+}
+
+static void __exit exit_rc_map_su3000(void)
+{
+ rc_map_unregister(&su3000_map);
+}
+
+module_init(init_rc_map_su3000)
+module_exit(exit_rc_map_su3000)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Evgeny Plehov <Evgeny Plehov@ukr.net>");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 3c761014d3ce..a25bb1581e46 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -199,6 +199,7 @@ static bool debug;
#define VENDOR_TIVO 0x105a
#define VENDOR_CONEXANT 0x0572
#define VENDOR_TWISTEDMELON 0x2596
+#define VENDOR_HAUPPAUGE 0x2040
enum mceusb_model_type {
MCE_GEN2 = 0, /* Most boards */
@@ -210,6 +211,7 @@ enum mceusb_model_type {
MULTIFUNCTION,
TIVO_KIT,
MCE_GEN2_NO_TX,
+ HAUPPAUGE_CX_HYBRID_TV,
};
struct mceusb_model {
@@ -258,6 +260,11 @@ static const struct mceusb_model mceusb_model[] = {
.no_tx = 1, /* tx isn't wired up at all */
.name = "Conexant Hybrid TV (cx231xx) MCE IR",
},
+ [HAUPPAUGE_CX_HYBRID_TV] = {
+ .rc_map = RC_MAP_HAUPPAUGE,
+ .no_tx = 1, /* eeprom says it has no tx */
+ .name = "Conexant Hybrid TV (cx231xx) MCE IR no TX",
+ },
[MULTIFUNCTION] = {
.mce_gen2 = 1,
.ir_intfnum = 2,
@@ -399,6 +406,9 @@ static struct usb_device_id mceusb_dev_table[] = {
{ USB_DEVICE(VENDOR_TWISTEDMELON, 0x8016) },
/* Twisted Melon Inc. - Manta Transceiver */
{ USB_DEVICE(VENDOR_TWISTEDMELON, 0x8042) },
+ /* Hauppauge WINTV-HVR-HVR 930C-HD - based on cx231xx */
+ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb130),
+ .driver_info = HAUPPAUGE_CX_HYBRID_TV },
/* Terminating entry */
{ }
};
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 46da365c9c84..02e2f38c9c85 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -22,6 +22,10 @@
#include <linux/module.h>
#include "rc-core-priv.h"
+/* Bitmap to store allocated device numbers from 0 to IRRCV_NUM_DEVICES - 1 */
+#define IRRCV_NUM_DEVICES 256
+DECLARE_BITMAP(ir_core_dev_number, IRRCV_NUM_DEVICES);
+
/* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
#define IR_TAB_MIN_SIZE 256
#define IR_TAB_MAX_SIZE 8192
@@ -1065,10 +1069,9 @@ EXPORT_SYMBOL_GPL(rc_free_device);
int rc_register_device(struct rc_dev *dev)
{
static bool raw_init = false; /* raw decoders loaded? */
- static atomic_t devno = ATOMIC_INIT(0);
struct rc_map *rc_map;
const char *path;
- int rc;
+ int rc, devno;
if (!dev || !dev->map_name)
return -EINVAL;
@@ -1096,7 +1099,15 @@ int rc_register_device(struct rc_dev *dev)
*/
mutex_lock(&dev->lock);
- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
+ do {
+ devno = find_first_zero_bit(ir_core_dev_number,
+ IRRCV_NUM_DEVICES);
+ /* No free device slots */
+ if (devno >= IRRCV_NUM_DEVICES)
+ return -ENOMEM;
+ } while (test_and_set_bit(devno, ir_core_dev_number));
+
+ dev->devno = devno;
dev_set_name(&dev->dev, "rc%ld", dev->devno);
dev_set_drvdata(&dev->dev, dev);
rc = device_add(&dev->dev);
@@ -1186,6 +1197,7 @@ out_dev:
device_del(&dev->dev);
out_unlock:
mutex_unlock(&dev->lock);
+ clear_bit(dev->devno, ir_core_dev_number);
return rc;
}
EXPORT_SYMBOL_GPL(rc_register_device);
@@ -1197,6 +1209,8 @@ void rc_unregister_device(struct rc_dev *dev)
del_timer_sync(&dev->timer_keyup);
+ clear_bit(dev->devno, ir_core_dev_number);
+
if (dev->driver_type == RC_DRIVER_IR_RAW)
ir_raw_event_unregister(dev);
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index 65120c2d47ad..8f0cddb9e8f2 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <media/rc-core.h>
#include <linux/pinctrl/consumer.h>
@@ -28,6 +29,7 @@ struct st_rc_device {
int sample_mult;
int sample_div;
bool rxuhfmode;
+ struct reset_control *rstc;
};
/* Registers */
@@ -161,6 +163,10 @@ static void st_rc_hardware_init(struct st_rc_device *dev)
unsigned int rx_max_symbol_per = MAX_SYMB_TIME;
unsigned int rx_sampling_freq_div;
+ /* Enable the IP */
+ if (dev->rstc)
+ reset_control_deassert(dev->rstc);
+
clk_prepare_enable(dev->sys_clock);
baseclock = clk_get_rate(dev->sys_clock);
@@ -271,6 +277,11 @@ static int st_rc_probe(struct platform_device *pdev)
else
rc_dev->rx_base = rc_dev->base;
+
+ rc_dev->rstc = reset_control_get(dev, NULL);
+ if (IS_ERR(rc_dev->rstc))
+ rc_dev->rstc = NULL;
+
rc_dev->dev = dev;
platform_set_drvdata(pdev, rc_dev);
st_rc_hardware_init(rc_dev);
@@ -338,6 +349,8 @@ static int st_rc_suspend(struct device *dev)
writel(0x00, rc_dev->rx_base + IRB_RX_EN);
writel(0x00, rc_dev->rx_base + IRB_RX_INT_EN);
clk_disable_unprepare(rc_dev->sys_clock);
+ if (rc_dev->rstc)
+ reset_control_assert(rc_dev->rstc);
}
return 0;
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index 15665debc572..ba2e365296cf 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -215,6 +215,13 @@ config MEDIA_TUNER_FC2580
help
FCI FC2580 silicon tuner driver.
+config MEDIA_TUNER_M88TS2022
+ tristate "Montage M88TS2022 silicon tuner"
+ depends on MEDIA_SUPPORT && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Montage M88TS2022 silicon tuner driver.
+
config MEDIA_TUNER_TUA9001
tristate "Infineon TUA 9001 silicon tuner"
depends on MEDIA_SUPPORT && I2C
diff --git a/drivers/media/tuners/Makefile b/drivers/media/tuners/Makefile
index 308f108eadba..efe82a904b12 100644
--- a/drivers/media/tuners/Makefile
+++ b/drivers/media/tuners/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MEDIA_TUNER_TDA18212) += tda18212.o
obj-$(CONFIG_MEDIA_TUNER_E4000) += e4000.o
obj-$(CONFIG_MEDIA_TUNER_FC2580) += fc2580.o
obj-$(CONFIG_MEDIA_TUNER_TUA9001) += tua9001.o
+obj-$(CONFIG_MEDIA_TUNER_M88TS2022) += m88ts2022.o
obj-$(CONFIG_MEDIA_TUNER_FC0011) += fc0011.o
obj-$(CONFIG_MEDIA_TUNER_FC0012) += fc0012.o
obj-$(CONFIG_MEDIA_TUNER_FC0013) += fc0013.o
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 72971a8d3c37..40c1da707d15 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -243,8 +243,10 @@ static int e4000_set_params(struct dvb_frontend *fe)
break;
}
- if (i == ARRAY_SIZE(e4000_pll_lut))
+ if (i == ARRAY_SIZE(e4000_pll_lut)) {
+ ret = -EINVAL;
goto err;
+ }
/*
* Note: Currently f_vco overflows when c->frequency is 1 073 741 824 Hz
@@ -271,8 +273,10 @@ static int e4000_set_params(struct dvb_frontend *fe)
break;
}
- if (i == ARRAY_SIZE(e400_lna_filter_lut))
+ if (i == ARRAY_SIZE(e400_lna_filter_lut)) {
+ ret = -EINVAL;
goto err;
+ }
ret = e4000_wr_reg(priv, 0x10, e400_lna_filter_lut[i].val);
if (ret < 0)
@@ -284,8 +288,10 @@ static int e4000_set_params(struct dvb_frontend *fe)
break;
}
- if (i == ARRAY_SIZE(e4000_if_filter_lut))
+ if (i == ARRAY_SIZE(e4000_if_filter_lut)) {
+ ret = -EINVAL;
goto err;
+ }
buf[0] = e4000_if_filter_lut[i].reg11_val;
buf[1] = e4000_if_filter_lut[i].reg12_val;
@@ -300,8 +306,10 @@ static int e4000_set_params(struct dvb_frontend *fe)
break;
}
- if (i == ARRAY_SIZE(e4000_band_lut))
+ if (i == ARRAY_SIZE(e4000_band_lut)) {
+ ret = -EINVAL;
goto err;
+ }
ret = e4000_wr_reg(priv, 0x07, e4000_band_lut[i].reg07_val);
if (ret < 0)
diff --git a/drivers/media/tuners/m88ts2022.c b/drivers/media/tuners/m88ts2022.c
new file mode 100644
index 000000000000..40c42dec721b
--- /dev/null
+++ b/drivers/media/tuners/m88ts2022.c
@@ -0,0 +1,674 @@
+/*
+ * Montage M88TS2022 silicon tuner driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Some calculations are taken from existing TS2020 driver.
+ */
+
+#include "m88ts2022_priv.h"
+
+/* write multiple registers */
+static int m88ts2022_wr_regs(struct m88ts2022_priv *priv,
+ u8 reg, const u8 *val, int len)
+{
+#define MAX_WR_LEN 3
+#define MAX_WR_XFER_LEN (MAX_WR_LEN + 1)
+ int ret;
+ u8 buf[MAX_WR_XFER_LEN];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->client->addr,
+ .flags = 0,
+ .len = 1 + len,
+ .buf = buf,
+ }
+ };
+
+ if (WARN_ON(len > MAX_WR_LEN))
+ return -EINVAL;
+
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+ ret = i2c_transfer(priv->client->adapter, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ dev_warn(&priv->client->dev,
+ "%s: i2c wr failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* read multiple registers */
+static int m88ts2022_rd_regs(struct m88ts2022_priv *priv, u8 reg,
+ u8 *val, int len)
+{
+#define MAX_RD_LEN 1
+#define MAX_RD_XFER_LEN (MAX_RD_LEN)
+ int ret;
+ u8 buf[MAX_RD_XFER_LEN];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ }, {
+ .addr = priv->client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ }
+ };
+
+ if (WARN_ON(len > MAX_RD_LEN))
+ return -EINVAL;
+
+ ret = i2c_transfer(priv->client->adapter, msg, 2);
+ if (ret == 2) {
+ memcpy(val, buf, len);
+ ret = 0;
+ } else {
+ dev_warn(&priv->client->dev,
+ "%s: i2c rd failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* write single register */
+static int m88ts2022_wr_reg(struct m88ts2022_priv *priv, u8 reg, u8 val)
+{
+ return m88ts2022_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register */
+static int m88ts2022_rd_reg(struct m88ts2022_priv *priv, u8 reg, u8 *val)
+{
+ return m88ts2022_rd_regs(priv, reg, val, 1);
+}
+
+/* write single register with mask */
+static int m88ts2022_wr_reg_mask(struct m88ts2022_priv *priv,
+ u8 reg, u8 val, u8 mask)
+{
+ int ret;
+ u8 u8tmp;
+
+ /* no need for read if whole reg is written */
+ if (mask != 0xff) {
+ ret = m88ts2022_rd_regs(priv, reg, &u8tmp, 1);
+ if (ret)
+ return ret;
+
+ val &= mask;
+ u8tmp &= ~mask;
+ val |= u8tmp;
+ }
+
+ return m88ts2022_wr_regs(priv, reg, &val, 1);
+}
+
+static int m88ts2022_cmd(struct dvb_frontend *fe,
+ int op, int sleep, u8 reg, u8 mask, u8 val, u8 *reg_val)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ int ret, i;
+ u8 u8tmp;
+ struct m88ts2022_reg_val reg_vals[] = {
+ {0x51, 0x1f - op},
+ {0x51, 0x1f},
+ {0x50, 0x00 + op},
+ {0x50, 0x00},
+ };
+
+ for (i = 0; i < 2; i++) {
+ dev_dbg(&priv->client->dev,
+ "%s: i=%d op=%02x reg=%02x mask=%02x val=%02x\n",
+ __func__, i, op, reg, mask, val);
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ ret = m88ts2022_wr_reg(priv, reg_vals[i].reg,
+ reg_vals[i].val);
+ if (ret)
+ goto err;
+ }
+
+ usleep_range(sleep * 1000, sleep * 10000);
+
+ ret = m88ts2022_rd_reg(priv, reg, &u8tmp);
+ if (ret)
+ goto err;
+
+ if ((u8tmp & mask) != val)
+ break;
+ }
+
+ if (reg_val)
+ *reg_val = u8tmp;
+err:
+ return ret;
+}
+
+static int m88ts2022_set_params(struct dvb_frontend *fe)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ unsigned int frequency_khz, frequency_offset_khz, f_3db_hz;
+ unsigned int f_ref_khz, f_vco_khz, div_ref, div_out, pll_n, gdiv28;
+ u8 buf[3], u8tmp, cap_code, lpf_gm, lpf_mxdiv, div_max, div_min;
+ u16 u16tmp;
+ dev_dbg(&priv->client->dev,
+ "%s: frequency=%d symbol_rate=%d rolloff=%d\n",
+ __func__, c->frequency, c->symbol_rate, c->rolloff);
+ /*
+ * Integer-N PLL synthesizer
+ * kHz is used for all calculations to keep calculations within 32-bit
+ */
+ f_ref_khz = DIV_ROUND_CLOSEST(priv->cfg.clock, 1000);
+ div_ref = DIV_ROUND_CLOSEST(f_ref_khz, 2000);
+
+ if (c->symbol_rate < 5000000)
+ frequency_offset_khz = 3000; /* 3 MHz */
+ else
+ frequency_offset_khz = 0;
+
+ frequency_khz = c->frequency + frequency_offset_khz;
+
+ if (frequency_khz < 1103000) {
+ div_out = 4;
+ u8tmp = 0x1b;
+ } else {
+ div_out = 2;
+ u8tmp = 0x0b;
+ }
+
+ buf[0] = u8tmp;
+ buf[1] = 0x40;
+ ret = m88ts2022_wr_regs(priv, 0x10, buf, 2);
+ if (ret)
+ goto err;
+
+ f_vco_khz = frequency_khz * div_out;
+ pll_n = f_vco_khz * div_ref / f_ref_khz;
+ pll_n += pll_n % 2;
+ priv->frequency_khz = pll_n * f_ref_khz / div_ref / div_out;
+
+ if (pll_n < 4095)
+ u16tmp = pll_n - 1024;
+ else if (pll_n < 6143)
+ u16tmp = pll_n + 1024;
+ else
+ u16tmp = pll_n + 3072;
+
+ buf[0] = (u16tmp >> 8) & 0x3f;
+ buf[1] = (u16tmp >> 0) & 0xff;
+ buf[2] = div_ref - 8;
+ ret = m88ts2022_wr_regs(priv, 0x01, buf, 3);
+ if (ret)
+ goto err;
+
+ dev_dbg(&priv->client->dev,
+ "%s: frequency=%u offset=%d f_vco_khz=%u pll_n=%u div_ref=%u div_out=%u\n",
+ __func__, priv->frequency_khz,
+ priv->frequency_khz - c->frequency, f_vco_khz, pll_n,
+ div_ref, div_out);
+
+ ret = m88ts2022_cmd(fe, 0x10, 5, 0x15, 0x40, 0x00, NULL);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_rd_reg(priv, 0x14, &u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp &= 0x7f;
+ if (u8tmp < 64) {
+ ret = m88ts2022_wr_reg_mask(priv, 0x10, 0x80, 0x80);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x11, 0x6f);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x10, 5, 0x15, 0x40, 0x00, NULL);
+ if (ret)
+ goto err;
+ }
+
+ ret = m88ts2022_rd_reg(priv, 0x14, &u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp &= 0x1f;
+ if (u8tmp > 19) {
+ ret = m88ts2022_wr_reg_mask(priv, 0x10, 0x00, 0x02);
+ if (ret)
+ goto err;
+ }
+
+ ret = m88ts2022_cmd(fe, 0x08, 5, 0x3c, 0xff, 0x00, NULL);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x25, 0x00);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x27, 0x70);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x41, 0x09);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x08, 0x0b);
+ if (ret)
+ goto err;
+
+ /* filters */
+ gdiv28 = DIV_ROUND_CLOSEST(f_ref_khz * 1694U, 1000000U);
+
+ ret = m88ts2022_wr_reg(priv, 0x04, gdiv28);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ if (ret)
+ goto err;
+
+ cap_code = u8tmp & 0x3f;
+
+ ret = m88ts2022_wr_reg(priv, 0x41, 0x0d);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp &= 0x3f;
+ cap_code = (cap_code + u8tmp) / 2;
+ gdiv28 = gdiv28 * 207 / (cap_code * 2 + 151);
+ div_max = gdiv28 * 135 / 100;
+ div_min = gdiv28 * 78 / 100;
+ div_max = clamp_val(div_max, 0U, 63U);
+
+ f_3db_hz = c->symbol_rate * 135UL / 200UL;
+ f_3db_hz += 2000000U + (frequency_offset_khz * 1000U);
+ f_3db_hz = clamp(f_3db_hz, 7000000U, 40000000U);
+
+#define LPF_COEFF 3200U
+ lpf_gm = DIV_ROUND_CLOSEST(f_3db_hz * gdiv28, LPF_COEFF * f_ref_khz);
+ lpf_gm = clamp_val(lpf_gm, 1U, 23U);
+
+ lpf_mxdiv = DIV_ROUND_CLOSEST(lpf_gm * LPF_COEFF * f_ref_khz, f_3db_hz);
+ if (lpf_mxdiv < div_min)
+ lpf_mxdiv = DIV_ROUND_CLOSEST(++lpf_gm * LPF_COEFF * f_ref_khz, f_3db_hz);
+ lpf_mxdiv = clamp_val(lpf_mxdiv, 0U, div_max);
+
+ ret = m88ts2022_wr_reg(priv, 0x04, lpf_mxdiv);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x06, lpf_gm);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ if (ret)
+ goto err;
+
+ cap_code = u8tmp & 0x3f;
+
+ ret = m88ts2022_wr_reg(priv, 0x41, 0x09);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ if (ret)
+ goto err;
+
+ u8tmp &= 0x3f;
+ cap_code = (cap_code + u8tmp) / 2;
+
+ u8tmp = cap_code | 0x80;
+ ret = m88ts2022_wr_reg(priv, 0x25, u8tmp);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x27, 0x30);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x08, 0x09);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_cmd(fe, 0x01, 20, 0x21, 0xff, 0x00, NULL);
+ if (ret)
+ goto err;
+err:
+ if (ret)
+ dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int m88ts2022_init(struct dvb_frontend *fe)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ int ret, i;
+ u8 u8tmp;
+ static const struct m88ts2022_reg_val reg_vals[] = {
+ {0x7d, 0x9d},
+ {0x7c, 0x9a},
+ {0x7a, 0x76},
+ {0x3b, 0x01},
+ {0x63, 0x88},
+ {0x61, 0x85},
+ {0x22, 0x30},
+ {0x30, 0x40},
+ {0x20, 0x23},
+ {0x24, 0x02},
+ {0x12, 0xa0},
+ };
+ dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x03);
+ if (ret)
+ goto err;
+
+ switch (priv->cfg.clock_out) {
+ case M88TS2022_CLOCK_OUT_DISABLED:
+ u8tmp = 0x60;
+ break;
+ case M88TS2022_CLOCK_OUT_ENABLED:
+ u8tmp = 0x70;
+ ret = m88ts2022_wr_reg(priv, 0x05, priv->cfg.clock_out_div);
+ if (ret)
+ goto err;
+ break;
+ case M88TS2022_CLOCK_OUT_ENABLED_XTALOUT:
+ u8tmp = 0x6c;
+ break;
+ default:
+ goto err;
+ }
+
+ ret = m88ts2022_wr_reg(priv, 0x42, u8tmp);
+ if (ret)
+ goto err;
+
+ if (priv->cfg.loop_through)
+ u8tmp = 0xec;
+ else
+ u8tmp = 0x6c;
+
+ ret = m88ts2022_wr_reg(priv, 0x62, u8tmp);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ ret = m88ts2022_wr_reg(priv, reg_vals[i].reg, reg_vals[i].val);
+ if (ret)
+ goto err;
+ }
+err:
+ if (ret)
+ dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ts2022_sleep(struct dvb_frontend *fe)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ int ret;
+ dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x00);
+ if (ret)
+ goto err;
+err:
+ if (ret)
+ dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int m88ts2022_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+ *frequency = priv->frequency_khz;
+ return 0;
+}
+
+static int m88ts2022_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ dev_dbg(&priv->client->dev, "%s:\n", __func__);
+
+ *frequency = 0; /* Zero-IF */
+ return 0;
+}
+
+static int m88ts2022_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct m88ts2022_priv *priv = fe->tuner_priv;
+ int ret;
+ u8 u8tmp;
+ u16 gain, u16tmp;
+ unsigned int gain1, gain2, gain3;
+
+ ret = m88ts2022_rd_reg(priv, 0x3d, &u8tmp);
+ if (ret)
+ goto err;
+
+ gain1 = (u8tmp >> 0) & 0x1f;
+ gain1 = clamp(gain1, 0U, 15U);
+
+ ret = m88ts2022_rd_reg(priv, 0x21, &u8tmp);
+ if (ret)
+ goto err;
+
+ gain2 = (u8tmp >> 0) & 0x1f;
+ gain2 = clamp(gain2, 2U, 16U);
+
+ ret = m88ts2022_rd_reg(priv, 0x66, &u8tmp);
+ if (ret)
+ goto err;
+
+ gain3 = (u8tmp >> 3) & 0x07;
+ gain3 = clamp(gain3, 0U, 6U);
+
+ gain = gain1 * 265 + gain2 * 338 + gain3 * 285;
+
+ /* scale value to 0x0000-0xffff */
+ u16tmp = (0xffff - gain);
+ u16tmp = clamp_val(u16tmp, 59000U, 61500U);
+
+ *strength = (u16tmp - 59000) * 0xffff / (61500 - 59000);
+err:
+ if (ret)
+ dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static const struct dvb_tuner_ops m88ts2022_tuner_ops = {
+ .info = {
+ .name = "Montage M88TS2022",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ },
+
+ .init = m88ts2022_init,
+ .sleep = m88ts2022_sleep,
+ .set_params = m88ts2022_set_params,
+
+ .get_frequency = m88ts2022_get_frequency,
+ .get_if_frequency = m88ts2022_get_if_frequency,
+ .get_rf_strength = m88ts2022_get_rf_strength,
+};
+
+static int m88ts2022_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct m88ts2022_config *cfg = client->dev.platform_data;
+ struct dvb_frontend *fe = cfg->fe;
+ struct m88ts2022_priv *priv;
+ int ret;
+ u8 chip_id, u8tmp;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ dev_err(&client->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ goto err;
+ }
+
+ memcpy(&priv->cfg, cfg, sizeof(struct m88ts2022_config));
+ priv->client = client;
+
+ /* check if the tuner is there */
+ ret = m88ts2022_rd_reg(priv, 0x00, &u8tmp);
+ if (ret)
+ goto err;
+
+ if ((u8tmp & 0x03) == 0x00) {
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(2000, 50000);
+ }
+
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x03);
+ if (ret)
+ goto err;
+
+ usleep_range(2000, 50000);
+
+ ret = m88ts2022_rd_reg(priv, 0x00, &chip_id);
+ if (ret)
+ goto err;
+
+ dev_dbg(&priv->client->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+
+ switch (chip_id) {
+ case 0xc3:
+ case 0x83:
+ break;
+ default:
+ goto err;
+ }
+
+ switch (priv->cfg.clock_out) {
+ case M88TS2022_CLOCK_OUT_DISABLED:
+ u8tmp = 0x60;
+ break;
+ case M88TS2022_CLOCK_OUT_ENABLED:
+ u8tmp = 0x70;
+ ret = m88ts2022_wr_reg(priv, 0x05, priv->cfg.clock_out_div);
+ if (ret)
+ goto err;
+ break;
+ case M88TS2022_CLOCK_OUT_ENABLED_XTALOUT:
+ u8tmp = 0x6c;
+ break;
+ default:
+ goto err;
+ }
+
+ ret = m88ts2022_wr_reg(priv, 0x42, u8tmp);
+ if (ret)
+ goto err;
+
+ if (priv->cfg.loop_through)
+ u8tmp = 0xec;
+ else
+ u8tmp = 0x6c;
+
+ ret = m88ts2022_wr_reg(priv, 0x62, u8tmp);
+ if (ret)
+ goto err;
+
+ /* sleep */
+ ret = m88ts2022_wr_reg(priv, 0x00, 0x00);
+ if (ret)
+ goto err;
+
+ dev_info(&priv->client->dev,
+ "%s: Montage M88TS2022 successfully identified\n",
+ KBUILD_MODNAME);
+
+ fe->tuner_priv = priv;
+ memcpy(&fe->ops.tuner_ops, &m88ts2022_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ i2c_set_clientdata(client, priv);
+ return 0;
+err:
+ dev_dbg(&client->dev, "%s: failed=%d\n", __func__, ret);
+ kfree(priv);
+ return ret;
+}
+
+static int m88ts2022_remove(struct i2c_client *client)
+{
+ struct m88ts2022_priv *priv = i2c_get_clientdata(client);
+ struct dvb_frontend *fe = priv->cfg.fe;
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = NULL;
+ kfree(priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id m88ts2022_id[] = {
+ {"m88ts2022", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, m88ts2022_id);
+
+static struct i2c_driver m88ts2022_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "m88ts2022",
+ },
+ .probe = m88ts2022_probe,
+ .remove = m88ts2022_remove,
+ .id_table = m88ts2022_id,
+};
+
+module_i2c_driver(m88ts2022_driver);
+
+MODULE_DESCRIPTION("Montage M88TS2022 silicon tuner driver");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/m88ts2022.h b/drivers/media/tuners/m88ts2022.h
new file mode 100644
index 000000000000..659fa1b1633a
--- /dev/null
+++ b/drivers/media/tuners/m88ts2022.h
@@ -0,0 +1,54 @@
+/*
+ * Montage M88TS2022 silicon tuner driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef M88TS2022_H
+#define M88TS2022_H
+
+#include "dvb_frontend.h"
+
+struct m88ts2022_config {
+ /*
+ * clock
+ * 16000000 - 32000000
+ */
+ u32 clock;
+
+ /*
+ * RF loop-through
+ */
+ u8 loop_through:1;
+
+ /*
+ * clock output
+ */
+#define M88TS2022_CLOCK_OUT_DISABLED 0
+#define M88TS2022_CLOCK_OUT_ENABLED 1
+#define M88TS2022_CLOCK_OUT_ENABLED_XTALOUT 2
+ u8 clock_out:2;
+
+ /*
+ * clock output divider
+ * 1 - 31
+ */
+ u8 clock_out_div:5;
+
+ /*
+ * pointer to DVB frontend
+ */
+ struct dvb_frontend *fe;
+};
+
+#endif
diff --git a/drivers/media/tuners/m88ts2022_priv.h b/drivers/media/tuners/m88ts2022_priv.h
new file mode 100644
index 000000000000..0363dd866a2d
--- /dev/null
+++ b/drivers/media/tuners/m88ts2022_priv.h
@@ -0,0 +1,34 @@
+/*
+ * Montage M88TS2022 silicon tuner driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef M88TS2022_PRIV_H
+#define M88TS2022_PRIV_H
+
+#include "m88ts2022.h"
+
+struct m88ts2022_priv {
+ struct m88ts2022_config cfg;
+ struct i2c_client *client;
+ struct dvb_frontend *fe;
+ u32 frequency_khz;
+};
+
+struct m88ts2022_reg_val {
+ u8 reg;
+ u8 val;
+};
+
+#endif
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 4be5cf808a40..cca508d4aafb 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -134,15 +134,6 @@ struct xc2028_data {
_rc; \
})
-#define i2c_rcv(priv, buf, size) ({ \
- int _rc; \
- _rc = tuner_i2c_xfer_recv(&priv->i2c_props, buf, size); \
- if (size != _rc) \
- tuner_err("i2c input error: rc = %d (should be %d)\n", \
- _rc, (int)size); \
- _rc; \
-})
-
#define i2c_send_recv(priv, obuf, osize, ibuf, isize) ({ \
int _rc; \
_rc = tuner_i2c_xfer_send_recv(&priv->i2c_props, obuf, osize, \
@@ -276,6 +267,7 @@ static int check_device_status(struct xc2028_data *priv)
case XC2028_WAITING_FIRMWARE:
return -EAGAIN;
case XC2028_ACTIVE:
+ return 1;
case XC2028_SLEEP:
return 0;
case XC2028_NODEV:
@@ -718,6 +710,8 @@ static int load_scode(struct dvb_frontend *fe, unsigned int type,
return 0;
}
+static int xc2028_sleep(struct dvb_frontend *fe);
+
static int check_firmware(struct dvb_frontend *fe, unsigned int type,
v4l2_std_id std, __u16 int_freq)
{
@@ -890,7 +884,7 @@ read_not_reliable:
return 0;
fail:
- priv->state = XC2028_SLEEP;
+ priv->state = XC2028_NO_FIRMWARE;
memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
if (retry_count < 8) {
@@ -900,6 +894,9 @@ fail:
goto retry;
}
+ /* Firmware didn't load. Put the device to sleep */
+ xc2028_sleep(fe);
+
if (rc == -ENOENT)
rc = -EINVAL;
return rc;
@@ -917,6 +914,12 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
if (rc < 0)
return rc;
+ /* If the device is sleeping, no channel is tuned */
+ if (!rc) {
+ *strength = 0;
+ return 0;
+ }
+
mutex_lock(&priv->lock);
/* Sync Lock Indicator */
@@ -964,6 +967,12 @@ static int xc2028_get_afc(struct dvb_frontend *fe, s32 *afc)
if (rc < 0)
return rc;
+ /* If the device is sleeping, no channel is tuned */
+ if (!rc) {
+ *afc = 0;
+ return 0;
+ }
+
mutex_lock(&priv->lock);
/* Sync Lock Indicator */
@@ -1281,6 +1290,10 @@ static int xc2028_sleep(struct dvb_frontend *fe)
if (rc < 0)
return rc;
+ /* Device is already in sleep mode */
+ if (!rc)
+ return 0;
+
/* Avoid firmware reload on slow devices or if PM disabled */
if (no_poweroff || priv->ctrl.disable_power_mgmt)
return 0;
@@ -1298,7 +1311,8 @@ static int xc2028_sleep(struct dvb_frontend *fe)
else
rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
- priv->state = XC2028_SLEEP;
+ if (rc >= 0)
+ priv->state = XC2028_SLEEP;
mutex_unlock(&priv->lock);
@@ -1366,7 +1380,7 @@ static void load_firmware_cb(const struct firmware *fw,
if (rc < 0)
return;
- priv->state = XC2028_SLEEP;
+ priv->state = XC2028_ACTIVE;
}
static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index cfe8056b91aa..39d824e2bb69 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -17,7 +17,6 @@ source "drivers/media/usb/cpia2/Kconfig"
source "drivers/media/usb/zr364xx/Kconfig"
source "drivers/media/usb/stkwebcam/Kconfig"
source "drivers/media/usb/s2255/Kconfig"
-source "drivers/media/usb/sn9c102/Kconfig"
source "drivers/media/usb/usbtv/Kconfig"
endif
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 0935f47497a6..7ac4b143dce8 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
obj-$(CONFIG_USB_GSPCA) += gspca/
obj-$(CONFIG_USB_PWC) += pwc/
obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
-obj-$(CONFIG_USB_SN9C102) += sn9c102/
obj-$(CONFIG_VIDEO_AU0828) += au0828/
obj-$(CONFIG_VIDEO_HDPVR) += hdpvr/
obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index bd9d19a73efd..ab45a6f9dcc9 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -173,9 +173,8 @@ static int au0828_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
int ifnum;
-#ifdef CONFIG_VIDEO_AU0828_V4L2
- int retval;
-#endif
+ int retval = 0;
+
struct au0828_dev *dev;
struct usb_device *usbdev = interface_to_usbdev(interface);
@@ -257,7 +256,11 @@ static int au0828_usb_probe(struct usb_interface *interface,
#endif
/* Digital TV */
- au0828_dvb_register(dev);
+ retval = au0828_dvb_register(dev);
+ if (retval)
+ pr_err("%s() au0282_dev_register failed\n",
+ __func__);
+
/* Store the pointer to the au0828_dev so it can be accessed in
au0828_usb_disconnect */
@@ -268,7 +271,7 @@ static int au0828_usb_probe(struct usb_interface *interface,
mutex_unlock(&dev->lock);
- return 0;
+ return retval;
}
static struct usb_driver au0828_usb_driver = {
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 9a6f15613a38..4ae8b1074649 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -33,6 +33,10 @@
#include "mxl5007t.h"
#include "tda18271.h"
+static int preallocate_big_buffers;
+module_param_named(preallocate_big_buffers, preallocate_big_buffers, int, 0644);
+MODULE_PARM_DESC(preallocate_big_buffers, "Preallocate the larger transfer buffers at module load time");
+
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define _AU0828_BULKPIPE 0x83
@@ -153,9 +157,13 @@ static int stop_urb_transfer(struct au0828_dev *dev)
dev->urb_streaming = 0;
for (i = 0; i < URB_COUNT; i++) {
- usb_kill_urb(dev->urbs[i]);
- kfree(dev->urbs[i]->transfer_buffer);
- usb_free_urb(dev->urbs[i]);
+ if (dev->urbs[i]) {
+ usb_kill_urb(dev->urbs[i]);
+ if (!preallocate_big_buffers)
+ kfree(dev->urbs[i]->transfer_buffer);
+
+ usb_free_urb(dev->urbs[i]);
+ }
}
return 0;
@@ -181,10 +189,18 @@ static int start_urb_transfer(struct au0828_dev *dev)
purb = dev->urbs[i];
- purb->transfer_buffer = kzalloc(URB_BUFSIZE, GFP_KERNEL);
+ if (preallocate_big_buffers)
+ purb->transfer_buffer = dev->dig_transfer_buffer[i];
+ else
+ purb->transfer_buffer = kzalloc(URB_BUFSIZE,
+ GFP_KERNEL);
+
if (!purb->transfer_buffer) {
usb_free_urb(purb);
dev->urbs[i] = NULL;
+ printk(KERN_ERR
+ "%s: failed big buffer allocation, err = %d\n",
+ __func__, ret);
goto err;
}
@@ -217,6 +233,27 @@ err:
return ret;
}
+static void au0828_start_transport(struct au0828_dev *dev)
+{
+ au0828_write(dev, 0x608, 0x90);
+ au0828_write(dev, 0x609, 0x72);
+ au0828_write(dev, 0x60a, 0x71);
+ au0828_write(dev, 0x60b, 0x01);
+
+}
+
+static void au0828_stop_transport(struct au0828_dev *dev, int full_stop)
+{
+ if (full_stop) {
+ au0828_write(dev, 0x608, 0x00);
+ au0828_write(dev, 0x609, 0x00);
+ au0828_write(dev, 0x60a, 0x00);
+ }
+ au0828_write(dev, 0x60b, 0x00);
+}
+
+
+
static int au0828_dvb_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
@@ -231,13 +268,17 @@ static int au0828_dvb_start_feed(struct dvb_demux_feed *feed)
if (dvb) {
mutex_lock(&dvb->lock);
+ dvb->start_count++;
+ dprintk(1, "%s(), start_count: %d, stop_count: %d\n", __func__,
+ dvb->start_count, dvb->stop_count);
if (dvb->feeding++ == 0) {
/* Start transport */
- au0828_write(dev, 0x608, 0x90);
- au0828_write(dev, 0x609, 0x72);
- au0828_write(dev, 0x60a, 0x71);
- au0828_write(dev, 0x60b, 0x01);
+ au0828_start_transport(dev);
ret = start_urb_transfer(dev);
+ if (ret < 0) {
+ au0828_stop_transport(dev, 0);
+ dvb->feeding--; /* We ran out of memory... */
+ }
}
mutex_unlock(&dvb->lock);
}
@@ -256,10 +297,16 @@ static int au0828_dvb_stop_feed(struct dvb_demux_feed *feed)
if (dvb) {
mutex_lock(&dvb->lock);
- if (--dvb->feeding == 0) {
- /* Stop transport */
- ret = stop_urb_transfer(dev);
- au0828_write(dev, 0x60b, 0x00);
+ dvb->stop_count++;
+ dprintk(1, "%s(), start_count: %d, stop_count: %d\n", __func__,
+ dvb->start_count, dvb->stop_count);
+ if (dvb->feeding > 0) {
+ dvb->feeding--;
+ if (dvb->feeding == 0) {
+ /* Stop transport */
+ ret = stop_urb_transfer(dev);
+ au0828_stop_transport(dev, 0);
+ }
}
mutex_unlock(&dvb->lock);
}
@@ -282,16 +329,10 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
/* Stop transport */
stop_urb_transfer(dev);
- au0828_write(dev, 0x608, 0x00);
- au0828_write(dev, 0x609, 0x00);
- au0828_write(dev, 0x60a, 0x00);
- au0828_write(dev, 0x60b, 0x00);
+ au0828_stop_transport(dev, 1);
/* Start transport */
- au0828_write(dev, 0x608, 0x90);
- au0828_write(dev, 0x609, 0x72);
- au0828_write(dev, 0x60a, 0x71);
- au0828_write(dev, 0x60b, 0x01);
+ au0828_start_transport(dev);
start_urb_transfer(dev);
mutex_unlock(&dvb->lock);
@@ -304,6 +345,23 @@ static int dvb_register(struct au0828_dev *dev)
dprintk(1, "%s()\n", __func__);
+ if (preallocate_big_buffers) {
+ int i;
+ for (i = 0; i < URB_COUNT; i++) {
+ dev->dig_transfer_buffer[i] = kzalloc(URB_BUFSIZE,
+ GFP_KERNEL);
+
+ if (!dev->dig_transfer_buffer[i]) {
+ result = -ENOMEM;
+
+ printk(KERN_ERR
+ "%s: failed buffer allocation (errno = %d)\n",
+ DRIVER_NAME, result);
+ goto fail_adapter;
+ }
+ }
+ }
+
INIT_WORK(&dev->restart_streaming, au0828_restart_dvb_streaming);
/* register adapter */
@@ -375,6 +433,9 @@ static int dvb_register(struct au0828_dev *dev)
/* register network adapter */
dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
+
+ dvb->start_count = 0;
+ dvb->stop_count = 0;
return 0;
fail_fe_conn:
@@ -391,6 +452,13 @@ fail_frontend:
dvb_frontend_detach(dvb->frontend);
dvb_unregister_adapter(&dvb->adapter);
fail_adapter:
+
+ if (preallocate_big_buffers) {
+ int i;
+ for (i = 0; i < URB_COUNT; i++)
+ kfree(dev->dig_transfer_buffer[i]);
+ }
+
return result;
}
@@ -411,6 +479,14 @@ void au0828_dvb_unregister(struct au0828_dev *dev)
dvb_unregister_frontend(dvb->frontend);
dvb_frontend_detach(dvb->frontend);
dvb_unregister_adapter(&dvb->adapter);
+
+ if (preallocate_big_buffers) {
+ int i;
+ for (i = 0; i < URB_COUNT; i++)
+ kfree(dev->dig_transfer_buffer[i]);
+ }
+
+
}
/* All the DVB attach calls go here, this function get's modified
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index ef1f57f22be7..5439772c1551 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -102,6 +102,8 @@ struct au0828_dvb {
struct dmx_frontend fe_mem;
struct dvb_net net;
int feeding;
+ int start_count;
+ int stop_count;
};
enum au0828_stream_state {
@@ -260,6 +262,10 @@ struct au0828_dev {
/* USB / URB Related */
int urb_streaming;
struct urb *urbs[URB_COUNT];
+
+ /* Preallocated transfer digital transfer buffers */
+
+ char *dig_transfer_buffer[URB_COUNT];
};
/* ----------------------------------------------------------- */
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 86feeeaf61c2..f14c5e89a567 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -45,6 +45,8 @@ config VIDEO_CX231XX_DVB
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
select DVB_MB86A20S if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TDA18271C2DD if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB cards based on the
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 81a1d971d797..9b925874d392 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -665,8 +665,8 @@ static int cx231xx_audio_init(struct cx231xx *dev)
cx231xx_info("cx231xx-audio.c: probing for cx231xx "
"non standard usbaudio\n");
- err = snd_card_create(index[devnr], "Cx231xx Audio", THIS_MODULE,
- 0, &card);
+ err = snd_card_new(&dev->udev->dev, index[devnr], "Cx231xx Audio",
+ THIS_MODULE, 0, &card);
if (err < 0)
return err;
@@ -682,7 +682,6 @@ static int cx231xx_audio_init(struct cx231xx *dev)
pcm->info_flags = 0;
pcm->private_data = dev;
strcpy(pcm->name, "Conexant cx231xx Capture");
- snd_card_set_dev(card, &dev->udev->dev);
strcpy(card->driver, "Cx231xx-Audio");
strcpy(card->shortname, "Cx231xx Audio");
strcpy(card->longname, "Conexant cx231xx Audio");
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 528cce958a82..2ee03e4ddd86 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -709,6 +709,8 @@ const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
/* table of devices that work with this driver */
struct usb_device_id cx231xx_id_table[] = {
+ {USB_DEVICE(0x1D19, 0x6109),
+ .driver_info = CX231XX_BOARD_PV_XCAPTURE_USB},
{USB_DEVICE(0x0572, 0x5A3C),
.driver_info = CX231XX_BOARD_UNKNOWN},
{USB_DEVICE(0x0572, 0x58A2),
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 96a5a0965399..7c0f797f1057 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -371,9 +371,9 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
mutex_lock(&dev->i2c_lock);
for (i = 0; i < num; i++) {
- addr = msgs[i].addr >> 1;
+ addr = msgs[i].addr;
- dprintk2(2, "%s %s addr=%x len=%d:",
+ dprintk2(2, "%s %s addr=0x%x len=%d:",
(msgs[i].flags & I2C_M_RD) ? "read" : "write",
i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len);
if (!msgs[i].len) {
@@ -390,32 +390,41 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
rc = cx231xx_i2c_recv_bytes(i2c_adap, &msgs[i]);
if (i2c_debug >= 2) {
for (byte = 0; byte < msgs[i].len; byte++)
- printk(" %02x", msgs[i].buf[byte]);
+ printk(KERN_CONT " %02x", msgs[i].buf[byte]);
}
} else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) &&
msgs[i].addr == msgs[i + 1].addr
&& (msgs[i].len <= 2) && (bus->nr < 3)) {
+ /* write bytes */
+ if (i2c_debug >= 2) {
+ for (byte = 0; byte < msgs[i].len; byte++)
+ printk(KERN_CONT " %02x", msgs[i].buf[byte]);
+ printk(KERN_CONT "\n");
+ }
/* read bytes */
+ dprintk2(2, "plus %s %s addr=0x%x len=%d:",
+ (msgs[i+1].flags & I2C_M_RD) ? "read" : "write",
+ i+1 == num - 1 ? "stop" : "nonstop", addr, msgs[i+1].len);
rc = cx231xx_i2c_recv_bytes_with_saddr(i2c_adap,
&msgs[i],
&msgs[i + 1]);
if (i2c_debug >= 2) {
- for (byte = 0; byte < msgs[i].len; byte++)
- printk(" %02x", msgs[i].buf[byte]);
+ for (byte = 0; byte < msgs[i+1].len; byte++)
+ printk(KERN_CONT " %02x", msgs[i+1].buf[byte]);
}
i++;
} else {
/* write bytes */
if (i2c_debug >= 2) {
for (byte = 0; byte < msgs[i].len; byte++)
- printk(" %02x", msgs[i].buf[byte]);
+ printk(KERN_CONT " %02x", msgs[i].buf[byte]);
}
rc = cx231xx_i2c_send_bytes(i2c_adap, &msgs[i]);
}
if (rc < 0)
goto err;
if (i2c_debug >= 2)
- printk("\n");
+ printk(KERN_CONT "\n");
}
mutex_unlock(&dev->i2c_lock);
return num;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 8f9b2cea88f0..8ede8ea762e6 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1539,6 +1539,8 @@ static const struct usb_device_id af9035_id_table[] = {
&af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
&af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
+ { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
+ &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index 90cfa35ef6e6..eeab79bdd2aa 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -442,6 +442,7 @@ static struct cxd2820r_config anysee_cxd2820r_config = {
* IOD[0] ZL10353 1=enabled
* IOE[0] tuner 0=enabled
* tuner is behind ZL10353 I2C-gate
+ * tuner is behind TDA10023 I2C-gate
*
* E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
* PCB: 508TC (rev0.6)
@@ -956,7 +957,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
if (fe && adap->fe[1]) {
/* attach tuner for 2nd FE */
- fe = dvb_attach(dvb_pll_attach, adap->fe[0],
+ fe = dvb_attach(dvb_pll_attach, adap->fe[1],
(0xc0 >> 1), &d->i2c_adap,
DVB_PLL_SAMSUNG_DTOS403IH102A);
}
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 44c64ef361bf..c1051c347744 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -68,6 +68,19 @@ static struct drxk_config terratec_h7_drxk = {
.microcode_name = "dvb-usb-terratec-h7-drxk.fw",
};
+static struct drxk_config cablestar_hdci_drxk = {
+ .adr = 0x29,
+ .parallel_ts = true,
+ .dynamic_clk = true,
+ .single_master = true,
+ .enable_merr_cfg = true,
+ .no_i2c_bridge = false,
+ .chunk_size = 64,
+ .mpeg_out_clk_strength = 0x02,
+ .qam_demod_parameter_count = 2,
+ .microcode_name = "dvb-usb-technisat-cablestar-hdci-drxk.fw",
+};
+
static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct az6007_device_state *st = fe_to_priv(fe);
@@ -630,6 +643,27 @@ static int az6007_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
}
+static int az6007_cablestar_hdci_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct az6007_device_state *st = adap_to_priv(adap);
+ struct dvb_usb_device *d = adap_to_d(adap);
+
+ pr_debug("attaching demod drxk\n");
+
+ adap->fe[0] = dvb_attach(drxk_attach, &cablestar_hdci_drxk,
+ &d->i2c_adap);
+ if (!adap->fe[0])
+ return -EINVAL;
+
+ adap->fe[0]->sec_priv = adap;
+ st->gate_ctrl = adap->fe[0]->ops.i2c_gate_ctrl;
+ adap->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+
+ az6007_ci_init(adap);
+
+ return 0;
+}
+
static int az6007_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap_to_d(adap);
@@ -868,6 +902,29 @@ static struct dvb_usb_device_properties az6007_props = {
}
};
+static struct dvb_usb_device_properties az6007_cablestar_hdci_props = {
+ .driver_name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .firmware = AZ6007_FIRMWARE,
+
+ .adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct az6007_device_state),
+ .i2c_algo = &az6007_i2c_algo,
+ .tuner_attach = az6007_tuner_attach,
+ .frontend_attach = az6007_cablestar_hdci_frontend_attach,
+ .streaming_ctrl = az6007_streaming_ctrl,
+/* ditch get_rc_config as it can't work (TS35 remote, I believe it's rc5) */
+ .get_rc_config = NULL,
+ .read_mac_address = az6007_read_mac_addr,
+ .download_firmware = az6007_download_firmware,
+ .identify_state = az6007_identify_state,
+ .power_ctrl = az6007_power_ctrl,
+ .num_adapters = 1,
+ .adapter = {
+ { .stream = DVB_USB_STREAM_BULK(0x02, 10, 4096), }
+ }
+};
+
static struct usb_device_id az6007_usb_table[] = {
{DVB_USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_6007,
&az6007_props, "Azurewave 6007", RC_MAP_EMPTY)},
@@ -875,6 +932,8 @@ static struct usb_device_id az6007_usb_table[] = {
&az6007_props, "Terratec H7", RC_MAP_NEC_TERRATEC_CINERGY_XS)},
{DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_H7_2,
&az6007_props, "Terratec H7", RC_MAP_NEC_TERRATEC_CINERGY_XS)},
+ {DVB_USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI,
+ &az6007_cablestar_hdci_props, "Technisat CableStar Combo HD CI", RC_MAP_EMPTY)},
{0},
};
diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c
index 5c68f3918bc8..0c2b377704ff 100644
--- a/drivers/media/usb/dvb-usb-v2/ec168.c
+++ b/drivers/media/usb/dvb-usb-v2/ec168.c
@@ -170,7 +170,7 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
error:
mutex_unlock(&d->i2c_mutex);
- return i;
+ return ret;
}
static u32 ec168_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/media/usb/dvb-usb-v2/it913x.c b/drivers/media/usb/dvb-usb-v2/it913x.c
index 1cb6899cf797..fe95a586dd5d 100644
--- a/drivers/media/usb/dvb-usb-v2/it913x.c
+++ b/drivers/media/usb/dvb-usb-v2/it913x.c
@@ -799,6 +799,9 @@ static const struct usb_device_id it913x_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2,
&it913x_properties, "Digital Dual TV Receiver CTVDIGDUAL_V2",
RC_MAP_IT913X_V1) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335,
+ &it913x_properties, "Avermedia H335",
+ RC_MAP_IT913X_V2) },
{} /* Terminating entry */
};
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index d83df4bb72d3..0a98d04c53e4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -601,7 +601,7 @@ struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
EXPORT_SYMBOL_GPL(mxl111sf_demod_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
index 3f3f8bfd190b..2d4530f5be54 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-demod.h - driver for the MaxLinear MXL111SF DVB-T demodulator
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
index e4121cb8f5ef..a619410adde4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-gpio.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
index 0220f54299a5..b85a5772d771 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-gpio.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 34434557ef65..a101d06eb143 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-i2c.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
index a57a45ffb9e4..465762145ad2 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-i2c.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
index b741b3a7a325..f6b348024bec 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-phy.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
index f0756071d347..0643738de7de 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-phy.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
index 17831b0fb9db..89bf115e927e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-reg.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 879c529640f7..a8d2c7053674 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -512,7 +512,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index 90f583e5d6a6..2046db22519e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-tuner.h - driver for the MaxLinear MXL111SF CMOS tuner
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
#else
static inline
struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
- struct mxl111sf_state *mxl_state
+ struct mxl111sf_state *mxl_state,
struct mxl111sf_tuner_config *cfg)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 08240e498451..c7304fa8ab73 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -105,7 +105,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data)
ret = -EINVAL;
}
- pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data);
+ pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]);
fail:
return ret;
}
@@ -1421,7 +1421,7 @@ static struct usb_driver mxl111sf_usb_driver = {
module_usb_driver(mxl111sf_usb_driver);
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 9816de86e48c..8516c011b7cc 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index ecca03667f98..fda5c64ba0e8 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1407,6 +1407,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl2832u_props, "Dexatek DK DVB-T Dongle", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6680,
&rtl2832u_props, "DigitalNow Quad DVB-T Receiver", NULL) },
+ { DVB_USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_MINID,
+ &rtl2832u_props, "Leadtek Winfast DTV Dongle Mini D", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00d3,
&rtl2832u_props, "TerraTec Cinergy T Stick RC (Rev. 3)", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1102,
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 20e345d9fe8f..a1c641e18362 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -149,6 +149,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret;
int i;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
@@ -173,7 +174,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (1 + msg[i].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[i].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = 0;
obuf[1] = msg[i].len;
@@ -193,12 +195,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (3 + msg[i].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[i].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
if (1 + msg[i + 1].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[i + 1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[i].len;
obuf[1] = msg[i+1].len;
@@ -223,7 +227,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[i].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[i].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[i].addr;
obuf[1] = msg[i].len;
@@ -237,8 +242,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
}
+ if (i == num)
+ ret = num;
+ else
+ ret = -EREMOTEIO;
+
+unlock:
mutex_unlock(&d->i2c_mutex);
- return i == num ? num : -EREMOTEIO;
+ return ret;
}
static u32 cxusb_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index c1a63b2a6baa..ae0f56a32e4d 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -2,7 +2,7 @@
* DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101,
* TeVii S600, S630, S650, S660, S480, S421, S632
* Prof 1100, 7500,
- * Geniatech SU3000 Cards
+ * Geniatech SU3000, T220 Cards
* Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by)
*
* This program is free software; you can redistribute it and/or modify it
@@ -29,6 +29,8 @@
#include "stb6100.h"
#include "stb6100_proc.h"
#include "m88rs2000.h"
+#include "tda18271.h"
+#include "cxd2820r.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
@@ -110,11 +112,6 @@
"Please see linux/Documentation/dvb/ for more details " \
"on firmware-problems."
-struct rc_map_dvb_usb_table_table {
- struct rc_map_table *rc_keys;
- int rc_keys_size;
-};
-
struct su3000_state {
u8 initialized;
};
@@ -129,12 +126,6 @@ module_param_named(debug, dvb_usb_dw2102_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))."
DVB_USB_DEBUG_STATUS);
-/* keymaps */
-static int ir_keymap;
-module_param_named(keymap, ir_keymap, int, 0644);
-MODULE_PARM_DESC(keymap, "set keymap 0=default 1=dvbworld 2=tevii 3=tbs ..."
- " 256=none");
-
/* demod probe */
static int demod_probe = 1;
module_param_named(demod, demod_probe, int, 0644);
@@ -301,6 +292,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret;
if (!d)
return -ENODEV;
@@ -316,7 +308,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
if (2 + msg[1].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
@@ -340,7 +333,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
if (2 + msg[0].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
@@ -357,7 +351,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
if (2 + msg[0].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
@@ -386,15 +381,17 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
break;
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int len, i, j;
+ int len, i, j, ret;
if (!d)
return -ENODEV;
@@ -430,7 +427,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
if (2 + msg[j].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
dw210x_op_rw(d->udev, 0xc3,
@@ -466,7 +464,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
if (2 + msg[j].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[j].addr << 1;
@@ -481,15 +480,18 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
}
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret;
int i;
if (!d)
@@ -506,7 +508,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[1].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
@@ -530,7 +533,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[0].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[0].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
@@ -556,9 +560,11 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
msg[i].flags == 0 ? ">>>" : "<<<");
debug_dump(msg[i].buf, msg[i].len, deb_xfer);
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
@@ -566,7 +572,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct usb_device *udev;
- int len, i, j;
+ int len, i, j, ret;
if (!d)
return -ENODEV;
@@ -618,7 +624,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (msg[j].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
dw210x_op_rw(d->udev, 0x91, 0, 0,
@@ -652,7 +659,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[j].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[j + 1].len;
@@ -671,7 +679,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[j].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[j].len + 1;
obuf[1] = (msg[j].addr << 1);
@@ -685,9 +694,11 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
}
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
@@ -1095,6 +1106,16 @@ static struct ds3000_config su3000_ds3000_config = {
.set_lock_led = dw210x_led_ctrl,
};
+static struct cxd2820r_config cxd2820r_config = {
+ .i2c_address = 0x6c, /* (0xd8 >> 1) */
+ .ts_mode = 0x38,
+};
+
+static struct tda18271_config tda18271_config = {
+ .output_opt = TDA18271_OUTPUT_LT_OFF,
+ .gate = TDA18271_GATE_DIGITAL,
+};
+
static u8 m88rs2000_inittab[] = {
DEMOD_WRITE, 0x9a, 0x30,
DEMOD_WRITE, 0x00, 0x01,
@@ -1364,6 +1385,49 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
return -EIO;
}
+static int t220_frontend_attach(struct dvb_usb_adapter *d)
+{
+ u8 obuf[3] = { 0xe, 0x80, 0 };
+ u8 ibuf[] = { 0 };
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+
+ obuf[0] = 0xe;
+ obuf[1] = 0x83;
+ obuf[2] = 0;
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+
+ msleep(100);
+
+ obuf[0] = 0xe;
+ obuf[1] = 0x80;
+ obuf[2] = 1;
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ err("command 0x0e transfer failed.");
+
+ obuf[0] = 0x51;
+
+ if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+ err("command 0x51 transfer failed.");
+
+ d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
+ &d->dev->i2c_adap, NULL);
+ if (d->fe_adap[0].fe != NULL) {
+ if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60,
+ &d->dev->i2c_adap, &tda18271_config)) {
+ info("Attached TDA18271HD/CXD2820R!\n");
+ return 0;
+ }
+ }
+
+ info("Failed to attach TDA18271HD/CXD2820R!\n");
+ return -EIO;
+}
+
static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
{
u8 obuf[] = { 0x51 };
@@ -1404,174 +1468,29 @@ static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static struct rc_map_table rc_map_dw210x_table[] = {
- { 0xf80a, KEY_POWER2 }, /*power*/
- { 0xf80c, KEY_MUTE }, /*mute*/
- { 0xf811, KEY_1 },
- { 0xf812, KEY_2 },
- { 0xf813, KEY_3 },
- { 0xf814, KEY_4 },
- { 0xf815, KEY_5 },
- { 0xf816, KEY_6 },
- { 0xf817, KEY_7 },
- { 0xf818, KEY_8 },
- { 0xf819, KEY_9 },
- { 0xf810, KEY_0 },
- { 0xf81c, KEY_CHANNELUP }, /*ch+*/
- { 0xf80f, KEY_CHANNELDOWN }, /*ch-*/
- { 0xf81a, KEY_VOLUMEUP }, /*vol+*/
- { 0xf80e, KEY_VOLUMEDOWN }, /*vol-*/
- { 0xf804, KEY_RECORD }, /*rec*/
- { 0xf809, KEY_FAVORITES }, /*fav*/
- { 0xf808, KEY_REWIND }, /*rewind*/
- { 0xf807, KEY_FASTFORWARD }, /*fast*/
- { 0xf80b, KEY_PAUSE }, /*pause*/
- { 0xf802, KEY_ESC }, /*cancel*/
- { 0xf803, KEY_TAB }, /*tab*/
- { 0xf800, KEY_UP }, /*up*/
- { 0xf81f, KEY_OK }, /*ok*/
- { 0xf801, KEY_DOWN }, /*down*/
- { 0xf805, KEY_CAMERA }, /*cap*/
- { 0xf806, KEY_STOP }, /*stop*/
- { 0xf840, KEY_ZOOM }, /*full*/
- { 0xf81e, KEY_TV }, /*tvmode*/
- { 0xf81b, KEY_LAST }, /*recall*/
-};
-
-static struct rc_map_table rc_map_tevii_table[] = {
- { 0xf80a, KEY_POWER },
- { 0xf80c, KEY_MUTE },
- { 0xf811, KEY_1 },
- { 0xf812, KEY_2 },
- { 0xf813, KEY_3 },
- { 0xf814, KEY_4 },
- { 0xf815, KEY_5 },
- { 0xf816, KEY_6 },
- { 0xf817, KEY_7 },
- { 0xf818, KEY_8 },
- { 0xf819, KEY_9 },
- { 0xf810, KEY_0 },
- { 0xf81c, KEY_MENU },
- { 0xf80f, KEY_VOLUMEDOWN },
- { 0xf81a, KEY_LAST },
- { 0xf80e, KEY_OPEN },
- { 0xf804, KEY_RECORD },
- { 0xf809, KEY_VOLUMEUP },
- { 0xf808, KEY_CHANNELUP },
- { 0xf807, KEY_PVR },
- { 0xf80b, KEY_TIME },
- { 0xf802, KEY_RIGHT },
- { 0xf803, KEY_LEFT },
- { 0xf800, KEY_UP },
- { 0xf81f, KEY_OK },
- { 0xf801, KEY_DOWN },
- { 0xf805, KEY_TUNER },
- { 0xf806, KEY_CHANNELDOWN },
- { 0xf840, KEY_PLAYPAUSE },
- { 0xf81e, KEY_REWIND },
- { 0xf81b, KEY_FAVORITES },
- { 0xf81d, KEY_BACK },
- { 0xf84d, KEY_FASTFORWARD },
- { 0xf844, KEY_EPG },
- { 0xf84c, KEY_INFO },
- { 0xf841, KEY_AB },
- { 0xf843, KEY_AUDIO },
- { 0xf845, KEY_SUBTITLE },
- { 0xf84a, KEY_LIST },
- { 0xf846, KEY_F1 },
- { 0xf847, KEY_F2 },
- { 0xf85e, KEY_F3 },
- { 0xf85c, KEY_F4 },
- { 0xf852, KEY_F5 },
- { 0xf85a, KEY_F6 },
- { 0xf856, KEY_MODE },
- { 0xf858, KEY_SWITCHVIDEOMODE },
-};
-
-static struct rc_map_table rc_map_tbs_table[] = {
- { 0xf884, KEY_POWER },
- { 0xf894, KEY_MUTE },
- { 0xf887, KEY_1 },
- { 0xf886, KEY_2 },
- { 0xf885, KEY_3 },
- { 0xf88b, KEY_4 },
- { 0xf88a, KEY_5 },
- { 0xf889, KEY_6 },
- { 0xf88f, KEY_7 },
- { 0xf88e, KEY_8 },
- { 0xf88d, KEY_9 },
- { 0xf892, KEY_0 },
- { 0xf896, KEY_CHANNELUP },
- { 0xf891, KEY_CHANNELDOWN },
- { 0xf893, KEY_VOLUMEUP },
- { 0xf88c, KEY_VOLUMEDOWN },
- { 0xf883, KEY_RECORD },
- { 0xf898, KEY_PAUSE },
- { 0xf899, KEY_OK },
- { 0xf89a, KEY_SHUFFLE },
- { 0xf881, KEY_UP },
- { 0xf890, KEY_LEFT },
- { 0xf882, KEY_RIGHT },
- { 0xf888, KEY_DOWN },
- { 0xf895, KEY_FAVORITES },
- { 0xf897, KEY_SUBTITLE },
- { 0xf89d, KEY_ZOOM },
- { 0xf89f, KEY_EXIT },
- { 0xf89e, KEY_MENU },
- { 0xf89c, KEY_EPG },
- { 0xf880, KEY_PREVIOUS },
- { 0xf89b, KEY_MODE }
-};
+static int dw2102_rc_query(struct dvb_usb_device *d)
+{
+ u8 key[2];
+ struct i2c_msg msg = {
+ .addr = DW2102_RC_QUERY,
+ .flags = I2C_M_RD,
+ .buf = key,
+ .len = 2
+ };
-static struct rc_map_table rc_map_su3000_table[] = {
- { 0x25, KEY_POWER }, /* right-bottom Red */
- { 0x0a, KEY_MUTE }, /* -/-- */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x00, KEY_0 },
- { 0x20, KEY_UP }, /* CH+ */
- { 0x21, KEY_DOWN }, /* CH+ */
- { 0x12, KEY_VOLUMEUP }, /* Brightness Up */
- { 0x13, KEY_VOLUMEDOWN },/* Brightness Down */
- { 0x1f, KEY_RECORD },
- { 0x17, KEY_PLAY },
- { 0x16, KEY_PAUSE },
- { 0x0b, KEY_STOP },
- { 0x27, KEY_FASTFORWARD },/* >> */
- { 0x26, KEY_REWIND }, /* << */
- { 0x0d, KEY_OK }, /* Mute */
- { 0x11, KEY_LEFT }, /* VOL- */
- { 0x10, KEY_RIGHT }, /* VOL+ */
- { 0x29, KEY_BACK }, /* button under 9 */
- { 0x2c, KEY_MENU }, /* TTX */
- { 0x2b, KEY_EPG }, /* EPG */
- { 0x1e, KEY_RED }, /* OSD */
- { 0x0e, KEY_GREEN }, /* Window */
- { 0x2d, KEY_YELLOW }, /* button under << */
- { 0x0f, KEY_BLUE }, /* bottom yellow button */
- { 0x14, KEY_AUDIO }, /* Snapshot */
- { 0x38, KEY_TV }, /* TV/Radio */
- { 0x0c, KEY_ESC } /* upper Red button */
-};
+ if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
+ if (msg.buf[0] != 0xff) {
+ deb_rc("%s: rc code: %x, %x\n",
+ __func__, key[0], key[1]);
+ rc_keydown(d->rc_dev, key[0], 1);
+ }
+ }
-static struct rc_map_dvb_usb_table_table keys_tables[] = {
- { rc_map_dw210x_table, ARRAY_SIZE(rc_map_dw210x_table) },
- { rc_map_tevii_table, ARRAY_SIZE(rc_map_tevii_table) },
- { rc_map_tbs_table, ARRAY_SIZE(rc_map_tbs_table) },
- { rc_map_su3000_table, ARRAY_SIZE(rc_map_su3000_table) },
-};
+ return 0;
+}
-static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int prof_rc_query(struct dvb_usb_device *d)
{
- struct rc_map_table *keymap = d->props.rc.legacy.rc_map_table;
- int keymap_size = d->props.rc.legacy.rc_map_size;
u8 key[2];
struct i2c_msg msg = {
.addr = DW2102_RC_QUERY,
@@ -1579,32 +1498,34 @@ static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
.buf = key,
.len = 2
};
- int i;
- /* override keymap */
- if ((ir_keymap > 0) && (ir_keymap <= ARRAY_SIZE(keys_tables))) {
- keymap = keys_tables[ir_keymap - 1].rc_keys ;
- keymap_size = keys_tables[ir_keymap - 1].rc_keys_size;
- } else if (ir_keymap > ARRAY_SIZE(keys_tables))
- return 0; /* none */
-
- *state = REMOTE_NO_KEY_PRESSED;
- if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
- for (i = 0; i < keymap_size ; i++) {
- if (rc5_data(&keymap[i]) == msg.buf[0]) {
- *state = REMOTE_KEY_PRESSED;
- *event = keymap[i].keycode;
- break;
- }
+ if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
+ if (msg.buf[0] != 0xff) {
+ deb_rc("%s: rc code: %x, %x\n",
+ __func__, key[0], key[1]);
+ rc_keydown(d->rc_dev, key[0]^0xff, 1);
}
+ }
- if ((*state) == REMOTE_KEY_PRESSED)
- deb_rc("%s: found rc key: %x, %x, event: %x\n",
- __func__, key[0], key[1], (*event));
- else if (key[0] != 0xff)
- deb_rc("%s: unknown rc key: %x, %x\n",
- __func__, key[0], key[1]);
+ return 0;
+}
+static int su3000_rc_query(struct dvb_usb_device *d)
+{
+ u8 key[2];
+ struct i2c_msg msg = {
+ .addr = DW2102_RC_QUERY,
+ .flags = I2C_M_RD,
+ .buf = key,
+ .len = 2
+ };
+
+ if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
+ if (msg.buf[0] != 0xff) {
+ deb_rc("%s: rc code: %x, %x\n",
+ __func__, key[0], key[1]);
+ rc_keydown(d->rc_dev, key[1] << 8 | key[0], 1);
+ }
}
return 0;
@@ -1630,6 +1551,7 @@ enum dw2102_table_entry {
TEVII_S632,
TERRATEC_CINERGY_S2_R2,
GOTVIEW_SAT_HD,
+ GENIATECH_T220,
};
static struct usb_device_id dw2102_table[] = {
@@ -1652,6 +1574,7 @@ static struct usb_device_id dw2102_table[] = {
[TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
[TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00b0)},
[GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
+ [GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
{ }
};
@@ -1711,9 +1634,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
/* init registers */
switch (dev->descriptor.idProduct) {
case USB_PID_TEVII_S650:
- dw2104_properties.rc.legacy.rc_map_table = rc_map_tevii_table;
- dw2104_properties.rc.legacy.rc_map_size =
- ARRAY_SIZE(rc_map_tevii_table);
+ dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC;
case USB_PID_DW2104:
reset = 1;
dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
@@ -1777,10 +1698,11 @@ static struct dvb_usb_device_properties dw2102_properties = {
.i2c_algo = &dw2102_serit_i2c_algo,
- .rc.legacy = {
- .rc_map_table = rc_map_dw210x_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dw210x_table),
+ .rc.core = {
.rc_interval = 150,
+ .rc_codes = RC_MAP_DM1105_NEC,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_NEC,
.rc_query = dw2102_rc_query,
},
@@ -1831,10 +1753,11 @@ static struct dvb_usb_device_properties dw2104_properties = {
.no_reconnect = 1,
.i2c_algo = &dw2104_i2c_algo,
- .rc.legacy = {
- .rc_map_table = rc_map_dw210x_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dw210x_table),
+ .rc.core = {
.rc_interval = 150,
+ .rc_codes = RC_MAP_DM1105_NEC,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_NEC,
.rc_query = dw2102_rc_query,
},
@@ -1881,10 +1804,11 @@ static struct dvb_usb_device_properties dw3101_properties = {
.no_reconnect = 1,
.i2c_algo = &dw3101_i2c_algo,
- .rc.legacy = {
- .rc_map_table = rc_map_dw210x_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dw210x_table),
+ .rc.core = {
.rc_interval = 150,
+ .rc_codes = RC_MAP_DM1105_NEC,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_NEC,
.rc_query = dw2102_rc_query,
},
@@ -1929,10 +1853,11 @@ static struct dvb_usb_device_properties s6x0_properties = {
.no_reconnect = 1,
.i2c_algo = &s6x0_i2c_algo,
- .rc.legacy = {
- .rc_map_table = rc_map_tevii_table,
- .rc_map_size = ARRAY_SIZE(rc_map_tevii_table),
+ .rc.core = {
.rc_interval = 150,
+ .rc_codes = RC_MAP_TEVII_NEC,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_NEC,
.rc_query = dw2102_rc_query,
},
@@ -2022,11 +1947,12 @@ static struct dvb_usb_device_properties su3000_properties = {
.identify_state = su3000_identify_state,
.i2c_algo = &su3000_i2c_algo,
- .rc.legacy = {
- .rc_map_table = rc_map_su3000_table,
- .rc_map_size = ARRAY_SIZE(rc_map_su3000_table),
+ .rc.core = {
.rc_interval = 150,
- .rc_query = dw2102_rc_query,
+ .rc_codes = RC_MAP_SU3000,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_RC5,
+ .rc_query = su3000_rc_query,
},
.read_mac_address = su3000_read_mac_address,
@@ -2077,6 +2003,55 @@ static struct dvb_usb_device_properties su3000_properties = {
}
};
+static struct dvb_usb_device_properties t220_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .size_of_priv = sizeof(struct su3000_state),
+ .power_ctrl = su3000_power_ctrl,
+ .num_adapters = 1,
+ .identify_state = su3000_identify_state,
+ .i2c_algo = &su3000_i2c_algo,
+
+ .rc.core = {
+ .rc_interval = 150,
+ .rc_codes = RC_MAP_SU3000,
+ .module_name = "dw2102",
+ .allowed_protos = RC_BIT_RC5,
+ .rc_query = su3000_rc_query,
+ },
+
+ .read_mac_address = su3000_read_mac_address,
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = { {
+ .streaming_ctrl = su3000_streaming_ctrl,
+ .frontend_attach = t220_frontend_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 8,
+ .endpoint = 0x82,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+ }
+ }
+ }
+ } },
+ }
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "Geniatech T220 DVB-T/T2 USB2.0",
+ { &dw2102_table[GENIATECH_T220], NULL },
+ { NULL },
+ },
+ }
+};
+
static int dw2102_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -2088,8 +2063,8 @@ static int dw2102_probe(struct usb_interface *intf,
/* fill only different fields */
p1100->firmware = P1100_FIRMWARE;
p1100->devices[0] = d1100;
- p1100->rc.legacy.rc_map_table = rc_map_tbs_table;
- p1100->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
+ p1100->rc.core.rc_query = prof_rc_query;
+ p1100->rc.core.rc_codes = RC_MAP_TBS_NEC;
p1100->adapter->fe[0].frontend_attach = stv0288_frontend_attach;
s660 = kmemdup(&s6x0_properties,
@@ -2114,8 +2089,8 @@ static int dw2102_probe(struct usb_interface *intf,
}
p7500->firmware = P7500_FIRMWARE;
p7500->devices[0] = d7500;
- p7500->rc.legacy.rc_map_table = rc_map_tbs_table;
- p7500->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
+ p7500->rc.core.rc_query = prof_rc_query;
+ p7500->rc.core.rc_codes = RC_MAP_TBS_NEC;
p7500->adapter->fe[0].frontend_attach = prof_7500_frontend_attach;
@@ -2149,7 +2124,9 @@ static int dw2102_probe(struct usb_interface *intf,
0 == dvb_usb_device_init(intf, s421,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &su3000_properties,
- THIS_MODULE, NULL, adapter_nr))
+ THIS_MODULE, NULL, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf, &t220_properties,
+ THIS_MODULE, NULL, adapter_nr))
return 0;
return -ENODEV;
@@ -2169,7 +2146,7 @@ MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
" DVB-C 3101 USB2.0,"
" TeVii S600, S630, S650, S660, S480, S421, S632"
" Prof 1100, 7500 USB2.0,"
- " Geniatech SU3000 devices");
+ " Geniatech SU3000, T220 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DW2101_FIRMWARE);
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index ca5ee6aceb62..a1fccf3096de 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -1,8 +1,12 @@
config VIDEO_EM28XX
- tristate "Empia EM28xx USB video capture support"
+ tristate "Empia EM28xx USB devices support"
depends on VIDEO_DEV && I2C
select VIDEO_TUNER
select VIDEO_TVEEPROM
+
+config VIDEO_EM28XX_V4L2
+ tristate "Empia EM28xx analog TV, video capture and/or webcam support"
+ depends on VIDEO_EM28XX
select VIDEOBUF2_VMALLOC
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
@@ -49,6 +53,8 @@ config VIDEO_EM28XX_DVB
select DVB_MB86A20S if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB cards based on the
Empiatech em28xx chips.
diff --git a/drivers/media/usb/em28xx/Makefile b/drivers/media/usb/em28xx/Makefile
index ad6d48557940..3f850d5063d0 100644
--- a/drivers/media/usb/em28xx/Makefile
+++ b/drivers/media/usb/em28xx/Makefile
@@ -1,10 +1,11 @@
-em28xx-y += em28xx-video.o em28xx-i2c.o em28xx-cards.o
-em28xx-y += em28xx-core.o em28xx-vbi.o em28xx-camera.o
+em28xx-y += em28xx-core.o em28xx-i2c.o em28xx-cards.o em28xx-camera.o
+em28xx-v4l-objs := em28xx-video.o em28xx-vbi.o
em28xx-alsa-objs := em28xx-audio.o
em28xx-rc-objs := em28xx-input.o
obj-$(CONFIG_VIDEO_EM28XX) += em28xx.o
+obj-$(CONFIG_VIDEO_EM28XX_V4L2) += em28xx-v4l.o
obj-$(CONFIG_VIDEO_EM28XX_ALSA) += em28xx-alsa.o
obj-$(CONFIG_VIDEO_EM28XX_DVB) += em28xx-dvb.o
obj-$(CONFIG_VIDEO_EM28XX_RC) += em28xx-rc.o
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 2fdb66ee44ab..1a28897af183 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com>
*
- * Copyright (C) 2007-2011 Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2007-2014 Mauro Carvalho Chehab
* - Port to work with the in-kernel driver
* - Cleanups, fixes, alsa-controls, etc.
*
@@ -50,6 +50,9 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "activates debug info");
+#define EM28XX_MAX_AUDIO_BUFS 5
+#define EM28XX_MIN_AUDIO_PACKETS 64
+
#define dprintk(fmt, arg...) do { \
if (debug) \
printk(KERN_INFO "em28xx-audio %s: " fmt, \
@@ -63,17 +66,13 @@ static int em28xx_deinit_isoc_audio(struct em28xx *dev)
int i;
dprintk("Stopping isoc\n");
- for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
+ for (i = 0; i < dev->adev.num_urb; i++) {
+ struct urb *urb = dev->adev.urb[i];
+
if (!irqs_disabled())
- usb_kill_urb(dev->adev.urb[i]);
+ usb_kill_urb(urb);
else
- usb_unlink_urb(dev->adev.urb[i]);
-
- usb_free_urb(dev->adev.urb[i]);
- dev->adev.urb[i] = NULL;
-
- kfree(dev->adev.transfer_buffer[i]);
- dev->adev.transfer_buffer[i] = NULL;
+ usb_unlink_urb(urb);
}
return 0;
@@ -91,6 +90,12 @@ static void em28xx_audio_isocirq(struct urb *urb)
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
+ if (dev->disconnected) {
+ dprintk("device disconnected while streaming. URB status=%d.\n", urb->status);
+ atomic_set(&dev->stream_started, 0);
+ return;
+ }
+
switch (urb->status) {
case 0: /* success */
case -ETIMEDOUT: /* NAK */
@@ -158,63 +163,27 @@ static void em28xx_audio_isocirq(struct urb *urb)
urb->status = 0;
status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
+ if (status < 0)
em28xx_errdev("resubmit of audio urb failed (error=%i)\n",
status);
- }
return;
}
static int em28xx_init_audio_isoc(struct em28xx *dev)
{
int i, errCode;
- const int sb_size = EM28XX_NUM_AUDIO_PACKETS *
- EM28XX_AUDIO_MAX_PACKET_SIZE;
dprintk("Starting isoc transfers\n");
- for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
- struct urb *urb;
- int j, k;
-
- dev->adev.transfer_buffer[i] = kmalloc(sb_size, GFP_ATOMIC);
- if (!dev->adev.transfer_buffer[i])
- return -ENOMEM;
-
- memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
- urb = usb_alloc_urb(EM28XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
- if (!urb) {
- em28xx_errdev("usb_alloc_urb failed!\n");
- for (j = 0; j < i; j++) {
- usb_free_urb(dev->adev.urb[j]);
- kfree(dev->adev.transfer_buffer[j]);
- }
- return -ENOMEM;
- }
-
- urb->dev = dev->udev;
- urb->context = dev;
- urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO);
- urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = dev->adev.transfer_buffer[i];
- urb->interval = 1;
- urb->complete = em28xx_audio_isocirq;
- urb->number_of_packets = EM28XX_NUM_AUDIO_PACKETS;
- urb->transfer_buffer_length = sb_size;
-
- for (j = k = 0; j < EM28XX_NUM_AUDIO_PACKETS;
- j++, k += EM28XX_AUDIO_MAX_PACKET_SIZE) {
- urb->iso_frame_desc[j].offset = k;
- urb->iso_frame_desc[j].length =
- EM28XX_AUDIO_MAX_PACKET_SIZE;
- }
- dev->adev.urb[i] = urb;
- }
+ /* Start streaming */
+ for (i = 0; i < dev->adev.num_urb; i++) {
+ memset(dev->adev.transfer_buffer[i], 0x80,
+ dev->adev.urb[i]->transfer_buffer_length);
- for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
if (errCode) {
- em28xx_errdev("submit of audio urb failed\n");
+ em28xx_errdev("submit of audio urb failed (error=%i)\n",
+ errCode);
em28xx_deinit_isoc_audio(dev);
atomic_set(&dev->stream_started, 0);
return errCode;
@@ -255,15 +224,26 @@ static struct snd_pcm_hardware snd_em28xx_hw_capture = {
.formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_KNOT,
+ .rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 62720 * 8, /* just about the value in usbaudio.c */
- .period_bytes_min = 64, /* 12544/2, */
- .period_bytes_max = 12544,
+
+
+ /*
+ * The period is 12.288 bytes. Allow a 10% of variation along its
+ * value, in order to avoid overruns/underruns due to some clock
+ * drift.
+ *
+ * FIXME: This period assumes 64 packets, and a 48000 PCM rate.
+ * Calculate it dynamically.
+ */
+ .period_bytes_min = 11059,
+ .period_bytes_max = 13516,
+
.periods_min = 2,
.periods_max = 98, /* 12544, */
};
@@ -274,28 +254,48 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int ret = 0;
- dprintk("opening device and trying to acquire exclusive lock\n");
-
if (!dev) {
em28xx_err("BUG: em28xx can't find device struct."
" Can't proceed with open\n");
return -ENODEV;
}
+ if (dev->disconnected)
+ return -ENODEV;
+
+ dprintk("opening device and trying to acquire exclusive lock\n");
+
runtime->hw = snd_em28xx_hw_capture;
- if ((dev->alt == 0 || dev->audio_ifnum) && dev->adev.users == 0) {
- if (dev->audio_ifnum)
+ if ((dev->alt == 0 || dev->is_audio_only) && dev->adev.users == 0) {
+ int nonblock = !!(substream->f_flags & O_NONBLOCK);
+
+ if (nonblock) {
+ if (!mutex_trylock(&dev->lock))
+ return -EAGAIN;
+ } else
+ mutex_lock(&dev->lock);
+ if (dev->is_audio_only)
+ /* vendor audio is on a separate interface */
dev->alt = 1;
else
+ /* vendor audio is on the same interface as video */
dev->alt = 7;
+ /*
+ * FIXME: The intention seems to be to select the alt
+ * setting with the largest wMaxPacketSize for the video
+ * endpoint.
+ * At least dev->alt should be used instead, but we
+ * should probably not touch it at all if it is
+ * already >0, because wMaxPacketSize of the audio
+ * endpoints seems to be the same for all.
+ */
dprintk("changing alternate number on interface %d to %d\n",
- dev->audio_ifnum, dev->alt);
- usb_set_interface(dev->udev, dev->audio_ifnum, dev->alt);
+ dev->ifnum, dev->alt);
+ usb_set_interface(dev->udev, dev->ifnum, dev->alt);
/* Sets volume, mute, etc */
dev->mute = 0;
- mutex_lock(&dev->lock);
ret = em28xx_audio_analog_set(dev);
if (ret < 0)
goto err;
@@ -304,7 +304,12 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
mutex_unlock(&dev->lock);
}
+ /* Dynamically adjust the period size */
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ dev->adev.period * 95 / 100,
+ dev->adev.period * 105 / 100);
+
dev->adev.capture_pcm_substream = substream;
return 0;
@@ -344,6 +349,10 @@ static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
int ret;
+ struct em28xx *dev = snd_pcm_substream_chip(substream);
+
+ if (dev->disconnected)
+ return -ENODEV;
dprintk("Setting capture parameters\n");
@@ -383,6 +392,9 @@ static int snd_em28xx_prepare(struct snd_pcm_substream *substream)
{
struct em28xx *dev = snd_pcm_substream_chip(substream);
+ if (dev->disconnected)
+ return -ENODEV;
+
dev->adev.hwptr_done_capture = 0;
dev->adev.capture_transfer_done = 0;
@@ -408,6 +420,9 @@ static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream,
struct em28xx *dev = snd_pcm_substream_chip(substream);
int retval = 0;
+ if (dev->disconnected)
+ return -ENODEV;
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
@@ -434,6 +449,9 @@ static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream
snd_pcm_uframes_t hwptr_done;
dev = snd_pcm_substream_chip(substream);
+ if (dev->disconnected)
+ return SNDRV_PCM_POS_XRUN;
+
spin_lock_irqsave(&dev->adev.slock, flags);
hwptr_done = dev->adev.hwptr_done_capture;
spin_unlock_irqrestore(&dev->adev.slock, flags);
@@ -455,6 +473,11 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
static int em28xx_vol_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *info)
{
+ struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+
+ if (dev->disconnected)
+ return -ENODEV;
+
info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
info->count = 2;
info->value.integer.min = 0;
@@ -467,11 +490,22 @@ static int em28xx_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
u16 val = (0x1f - (value->value.integer.value[0] & 0x1f)) |
(0x1f - (value->value.integer.value[1] & 0x1f)) << 8;
+ int nonblock = 0;
int rc;
- mutex_lock(&dev->lock);
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (substream)
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+ if (nonblock) {
+ if (!mutex_trylock(&dev->lock))
+ return -EAGAIN;
+ } else
+ mutex_lock(&dev->lock);
rc = em28xx_read_ac97(dev, kcontrol->private_value);
if (rc < 0)
goto err;
@@ -496,9 +530,20 @@ static int em28xx_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
+ int nonblock = 0;
int val;
- mutex_lock(&dev->lock);
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (substream)
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+ if (nonblock) {
+ if (!mutex_trylock(&dev->lock))
+ return -EAGAIN;
+ } else
+ mutex_lock(&dev->lock);
val = em28xx_read_ac97(dev, kcontrol->private_value);
mutex_unlock(&dev->lock);
if (val < 0)
@@ -520,9 +565,20 @@ static int em28xx_vol_put_mute(struct snd_kcontrol *kcontrol,
{
struct em28xx *dev = snd_kcontrol_chip(kcontrol);
u16 val = value->value.integer.value[0];
+ struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
+ int nonblock = 0;
int rc;
- mutex_lock(&dev->lock);
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (substream)
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+ if (nonblock) {
+ if (!mutex_trylock(&dev->lock))
+ return -EAGAIN;
+ } else
+ mutex_lock(&dev->lock);
rc = em28xx_read_ac97(dev, kcontrol->private_value);
if (rc < 0)
goto err;
@@ -550,9 +606,20 @@ static int em28xx_vol_get_mute(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
+ int nonblock = 0;
int val;
- mutex_lock(&dev->lock);
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (substream)
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+ if (nonblock) {
+ if (!mutex_trylock(&dev->lock))
+ return -EAGAIN;
+ } else
+ mutex_lock(&dev->lock);
val = em28xx_read_ac97(dev, kcontrol->private_value);
mutex_unlock(&dev->lock);
if (val < 0)
@@ -634,44 +701,223 @@ static struct snd_pcm_ops snd_em28xx_pcm_capture = {
.page = snd_pcm_get_vmalloc_page,
};
+static void em28xx_audio_free_urb(struct em28xx *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->adev.num_urb; i++) {
+ struct urb *urb = dev->adev.urb[i];
+
+ if (!urb)
+ continue;
+
+ usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+ dev->adev.transfer_buffer[i],
+ urb->transfer_dma);
+
+ usb_free_urb(urb);
+ }
+ kfree(dev->adev.urb);
+ kfree(dev->adev.transfer_buffer);
+ dev->adev.num_urb = 0;
+}
+
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+static int em28xx_audio_ep_packet_size(struct usb_device *udev,
+ struct usb_endpoint_descriptor *e)
+{
+ int size = le16_to_cpu(e->wMaxPacketSize);
+
+ if (udev->speed == USB_SPEED_HIGH)
+ return (size & 0x7ff) * (1 + (((size) >> 11) & 0x03));
+
+ return size & 0x7ff;
+}
+
+static int em28xx_audio_urb_init(struct em28xx *dev)
+{
+ struct usb_interface *intf;
+ struct usb_endpoint_descriptor *e, *ep = NULL;
+ int i, ep_size, interval, num_urb, npackets;
+ int urb_size, bytes_per_transfer;
+ u8 alt;
+
+ if (dev->ifnum)
+ alt = 1;
+ else
+ alt = 7;
+
+ intf = usb_ifnum_to_if(dev->udev, dev->ifnum);
+
+ if (intf->num_altsetting <= alt) {
+ em28xx_errdev("alt %d doesn't exist on interface %d\n",
+ dev->ifnum, alt);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < intf->altsetting[alt].desc.bNumEndpoints; i++) {
+ e = &intf->altsetting[alt].endpoint[i].desc;
+ if (!usb_endpoint_dir_in(e))
+ continue;
+ if (e->bEndpointAddress == EM28XX_EP_AUDIO) {
+ ep = e;
+ break;
+ }
+ }
+
+ if (!ep) {
+ em28xx_errdev("Couldn't find an audio endpoint");
+ return -ENODEV;
+ }
+
+ ep_size = em28xx_audio_ep_packet_size(dev->udev, ep);
+ interval = 1 << (ep->bInterval - 1);
+
+ em28xx_info("Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n",
+ EM28XX_EP_AUDIO, usb_speed_string(dev->udev->speed),
+ dev->ifnum, alt,
+ interval,
+ ep_size);
+
+ /* Calculate the number and size of URBs to better fit the audio samples */
+
+ /*
+ * Estimate the number of bytes per DMA transfer.
+ *
+ * This is given by the bit rate (for now, only 48000 Hz) multiplied
+ * by 2 channels and 2 bytes/sample divided by the number of microframe
+ * intervals and by the microframe rate (125 us)
+ */
+ bytes_per_transfer = DIV_ROUND_UP(48000 * 2 * 2, 125 * interval);
+
+ /*
+ * Estimate the number of transfer URBs. Don't let it go past the
+ * maximum number of URBs that is known to be supported by the device.
+ */
+ num_urb = DIV_ROUND_UP(bytes_per_transfer, ep_size);
+ if (num_urb > EM28XX_MAX_AUDIO_BUFS)
+ num_urb = EM28XX_MAX_AUDIO_BUFS;
+
+ /*
+ * Now that we know the number of bytes per transfer and the number of
+ * URBs, estimate the typical size of an URB, in order to adjust the
+ * minimal number of packets.
+ */
+ urb_size = bytes_per_transfer / num_urb;
+
+ /*
+ * Now, calculate the amount of audio packets to be filled on each
+ * URB. In order to preserve the old behaviour, use a minimal
+ * threshold for this value.
+ */
+ npackets = EM28XX_MIN_AUDIO_PACKETS;
+ if (urb_size > ep_size * npackets)
+ npackets = DIV_ROUND_UP(urb_size, ep_size);
+
+ em28xx_info("Number of URBs: %d, with %d packets and %d size",
+ num_urb, npackets, urb_size);
+
+ /* Estimate the bytes per period */
+ dev->adev.period = urb_size * npackets;
+
+ /* Allocate space to store the number of URBs to be used */
+
+ dev->adev.transfer_buffer = kcalloc(num_urb,
+ sizeof(*dev->adev.transfer_buffer),
+ GFP_ATOMIC);
+ if (!dev->adev.transfer_buffer) {
+ return -ENOMEM;
+ }
+
+ dev->adev.urb = kcalloc(num_urb, sizeof(*dev->adev.urb), GFP_ATOMIC);
+ if (!dev->adev.urb) {
+ kfree(dev->adev.transfer_buffer);
+ return -ENOMEM;
+ }
+
+ /* Alloc memory for each URB and for each transfer buffer */
+ dev->adev.num_urb = num_urb;
+ for (i = 0; i < num_urb; i++) {
+ struct urb *urb;
+ int j, k;
+ void *buf;
+
+ urb = usb_alloc_urb(npackets, GFP_ATOMIC);
+ if (!urb) {
+ em28xx_errdev("usb_alloc_urb failed!\n");
+ em28xx_audio_free_urb(dev);
+ return -ENOMEM;
+ }
+ dev->adev.urb[i] = urb;
+
+ buf = usb_alloc_coherent(dev->udev, npackets * ep_size, GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!buf) {
+ em28xx_errdev("usb_alloc_coherent failed!\n");
+ em28xx_audio_free_urb(dev);
+ return -ENOMEM;
+ }
+ dev->adev.transfer_buffer[i] = buf;
+
+ urb->dev = dev->udev;
+ urb->context = dev;
+ urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO);
+ urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_buffer = buf;
+ urb->interval = interval;
+ urb->complete = em28xx_audio_isocirq;
+ urb->number_of_packets = npackets;
+ urb->transfer_buffer_length = ep_size * npackets;
+
+ for (j = k = 0; j < npackets; j++, k += ep_size) {
+ urb->iso_frame_desc[j].offset = k;
+ urb->iso_frame_desc[j].length = ep_size;
+ }
+ }
+
+ return 0;
+}
+
static int em28xx_audio_init(struct em28xx *dev)
{
struct em28xx_audio *adev = &dev->adev;
struct snd_pcm *pcm;
struct snd_card *card;
static int devnr;
- int err;
+ int err;
- if (!dev->has_alsa_audio || dev->audio_ifnum < 0) {
+ if (!dev->has_alsa_audio) {
/* This device does not support the extension (in this case
the device is expecting the snd-usb-audio module or
doesn't have analog audio support at all) */
return 0;
}
- printk(KERN_INFO "em28xx-audio.c: probing for em28xx Audio Vendor Class\n");
+ em28xx_info("Binding audio extension\n");
+
printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus "
"Rechberger\n");
- printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2007-2011 Mauro Carvalho Chehab\n");
+ printk(KERN_INFO
+ "em28xx-audio.c: Copyright (C) 2007-2014 Mauro Carvalho Chehab\n");
- err = snd_card_create(index[devnr], "Em28xx Audio", THIS_MODULE, 0,
- &card);
+ err = snd_card_new(&dev->udev->dev, index[devnr], "Em28xx Audio",
+ THIS_MODULE, 0, &card);
if (err < 0)
return err;
spin_lock_init(&adev->slock);
+ adev->sndcard = card;
+ adev->udev = dev->udev;
+
err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto card_free;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_em28xx_pcm_capture);
pcm->info_flags = 0;
pcm->private_data = dev;
strcpy(pcm->name, "Empia 28xx Capture");
- snd_card_set_dev(card, &dev->udev->dev);
strcpy(card->driver, "Em28xx-Audio");
strcpy(card->shortname, "Em28xx Audio");
strcpy(card->longname, "Empia Em28xx Audio");
@@ -694,15 +940,25 @@ static int em28xx_audio_init(struct em28xx *dev)
em28xx_cvol_new(card, dev, "Surround", AC97_SURROUND_MASTER);
}
+ err = em28xx_audio_urb_init(dev);
+ if (err)
+ goto card_free;
+
err = snd_card_register(card);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
- adev->sndcard = card;
- adev->udev = dev->udev;
+ if (err < 0)
+ goto urb_free;
+ em28xx_info("Audio extension successfully initialized\n");
return 0;
+
+urb_free:
+ em28xx_audio_free_urb(dev);
+
+card_free:
+ snd_card_free(card);
+ adev->sndcard = NULL;
+
+ return err;
}
static int em28xx_audio_fini(struct em28xx *dev)
@@ -717,7 +973,14 @@ static int em28xx_audio_fini(struct em28xx *dev)
return 0;
}
+ em28xx_info("Closing audio extension");
+
if (dev->adev.sndcard) {
+ snd_card_disconnect(dev->adev.sndcard);
+ flush_work(&dev->wq_trigger);
+
+ em28xx_audio_free_urb(dev);
+
snd_card_free(dev->adev.sndcard);
dev->adev.sndcard = NULL;
}
@@ -745,7 +1008,8 @@ static void __exit em28xx_alsa_unregister(void)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
-MODULE_DESCRIPTION("Em28xx Audio driver");
+MODULE_DESCRIPTION(DRIVER_DESC " - audio interface");
+MODULE_VERSION(EM28XX_VERSION);
module_init(em28xx_alsa_register);
module_exit(em28xx_alsa_unregister);
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index d666741797d4..c29f5c4e7b40 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -454,3 +454,4 @@ int em28xx_init_camera(struct em28xx *dev)
return ret;
}
+EXPORT_SYMBOL_GPL(em28xx_init_camera);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index a5196697627f..4d97a76cc3b0 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -36,7 +36,6 @@
#include <media/tvaudio.h>
#include <media/i2c-addr.h>
#include <media/tveeprom.h>
-#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include "em28xx.h"
@@ -67,7 +66,7 @@ MODULE_PARM_DESC(usb_xfer_mode,
/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */
-static unsigned long em28xx_devused;
+DECLARE_BITMAP(em28xx_devused, EM28XX_MAXBOARDS);
struct em28xx_hash_table {
unsigned long hash;
@@ -356,6 +355,28 @@ static struct em28xx_reg_seq c3tech_digital_duo_digital[] = {
{ -1, -1, -1, -1},
};
+/*
+ * 2013:0258 PCTV DVB-S2 Stick (461e)
+ * GPIO 0 = POWER_ON
+ * GPIO 1 = BOOST
+ * GPIO 2 = VUV_LNB (red LED)
+ * GPIO 3 = #EXT_12V
+ * GPIO 4 = INT_DEM
+ * GPIO 5 = INT_LNB
+ * GPIO 6 = #RESET_DEM
+ * GPIO 7 = P07_LED (green LED)
+ */
+static struct em28xx_reg_seq pctv_461e[] = {
+ {EM2874_R80_GPIO_P0_CTRL, 0x7f, 0xff, 0},
+ {0x0d, 0xff, 0xff, 0},
+ {EM2874_R80_GPIO_P0_CTRL, 0x3f, 0xff, 100}, /* reset demod */
+ {EM2874_R80_GPIO_P0_CTRL, 0x7f, 0xff, 200}, /* reset demod */
+ {0x0d, 0x42, 0xff, 0},
+ {EM2874_R80_GPIO_P0_CTRL, 0xeb, 0xff, 0},
+ {EM2874_R5F_TS_ENABLE, 0x84, 0x84, 0}, /* parallel? | null discard */
+ { -1, -1, -1, -1},
+};
+
#if 0
static struct em28xx_reg_seq hauppauge_930c_gpio[] = {
{EM2874_R80_GPIO_P0_CTRL, 0x6f, 0xff, 10},
@@ -412,6 +433,70 @@ static struct em28xx_reg_seq pctv_520e[] = {
{ -1, -1, -1, -1},
};
+/* 1ae7:9003/9004 SpeedLink Vicious And Devine Laplace webcam
+ * reg 0x80/0x84:
+ * GPIO_0: capturing LED, 0=on, 1=off
+ * GPIO_2: AV mute button, 0=pressed, 1=unpressed
+ * GPIO 3: illumination button, 0=pressed, 1=unpressed
+ * GPIO_6: illumination/flash LED, 0=on, 1=off
+ * reg 0x81/0x85:
+ * GPIO_7: snapshot button, 0=pressed, 1=unpressed
+ */
+static struct em28xx_reg_seq speedlink_vad_laplace_reg_seq[] = {
+ {EM2820_R08_GPIO_CTRL, 0xf7, 0xff, 10},
+ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xb2, 10},
+ { -1, -1, -1, -1},
+};
+
+/*
+ * Button definitions
+ */
+static struct em28xx_button std_snapshot_button[] = {
+ {
+ .role = EM28XX_BUTTON_SNAPSHOT,
+ .reg_r = EM28XX_R0C_USBSUSP,
+ .reg_clearing = EM28XX_R0C_USBSUSP,
+ .mask = EM28XX_R0C_USBSUSP_SNAPSHOT,
+ .inverted = 0,
+ },
+ {-1, 0, 0, 0, 0},
+};
+
+static struct em28xx_button speedlink_vad_laplace_buttons[] = {
+ {
+ .role = EM28XX_BUTTON_SNAPSHOT,
+ .reg_r = EM2874_R85_GPIO_P1_STATE,
+ .mask = 0x80,
+ .inverted = 1,
+ },
+ {
+ .role = EM28XX_BUTTON_ILLUMINATION,
+ .reg_r = EM2874_R84_GPIO_P0_STATE,
+ .mask = 0x08,
+ .inverted = 1,
+ },
+ {-1, 0, 0, 0, 0},
+};
+
+/*
+ * LED definitions
+ */
+static struct em28xx_led speedlink_vad_laplace_leds[] = {
+ {
+ .role = EM28XX_LED_ANALOG_CAPTURING,
+ .gpio_reg = EM2874_R80_GPIO_P0_CTRL,
+ .gpio_mask = 0x01,
+ .inverted = 1,
+ },
+ {
+ .role = EM28XX_LED_ILLUMINATION,
+ .gpio_reg = EM2874_R80_GPIO_P0_CTRL,
+ .gpio_mask = 0x40,
+ .inverted = 1,
+ },
+ {-1, 0, 0, 0},
+};
+
/*
* Board definitions
*/
@@ -1391,7 +1476,7 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2820_BOARD_PROLINK_PLAYTV_USB2] = {
.name = "SIIG AVTuner-PVR / Pixelview Prolink PlayTV USB 2.0",
- .has_snapshot_button = 1,
+ .buttons = std_snapshot_button,
.tda9887_conf = TDA9887_PRESENT,
.tuner_type = TUNER_YMEC_TVF_5533MF,
.decoder = EM28XX_SAA711X,
@@ -1413,7 +1498,7 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2860_BOARD_SAA711X_REFERENCE_DESIGN] = {
.name = "EM2860/SAA711X Reference Design",
- .has_snapshot_button = 1,
+ .buttons = std_snapshot_button,
.tuner_type = TUNER_ABSENT,
.decoder = EM28XX_SAA711X,
.input = { {
@@ -2020,7 +2105,7 @@ struct em28xx_board em28xx_boards[] = {
},
/* 1b80:e1cc Delock 61959
* Empia EM2874B + Micronas DRX 3913KA2 + NXP TDA18271HDC2
- * mostly the same as MaxMedia UB-425-TC but different remote */
+ * mostly the same as MaxMedia UB-425-TC but different remote */
[EM2874_BOARD_DELOCK_61959] = {
.name = "Delock 61959",
.tuner_type = TUNER_ABSENT,
@@ -2043,7 +2128,38 @@ struct em28xx_board em28xx_boards[] = {
.tuner_gpio = default_tuner_gpio,
.def_i2c_bus = 1,
},
+ /* 1ae7:9003/9004 SpeedLink Vicious And Devine Laplace webcam
+ * Empia EM2765 + OmniVision OV2640 */
+ [EM2765_BOARD_SPEEDLINK_VAD_LAPLACE] = {
+ .name = "SpeedLink Vicious And Devine Laplace webcam",
+ .xclk = EM28XX_XCLK_FREQUENCY_24MHZ,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_100_KHZ,
+ .def_i2c_bus = 1,
+ .tuner_type = TUNER_ABSENT,
+ .is_webcam = 1,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .amux = EM28XX_AMUX_VIDEO,
+ .gpio = speedlink_vad_laplace_reg_seq,
+ } },
+ .buttons = speedlink_vad_laplace_buttons,
+ .leds = speedlink_vad_laplace_leds,
+ },
+ /* 2013:0258 PCTV DVB-S2 Stick (461e)
+ * Empia EM28178, Montage M88DS3103, Montage M88TS2022, Allegro A8293 */
+ [EM28178_BOARD_PCTV_461E] = {
+ .def_i2c_bus = 1,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ,
+ .name = "PCTV DVB-S2 Stick (461e)",
+ .tuner_type = TUNER_ABSENT,
+ .tuner_gpio = pctv_461e,
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
+ },
};
+EXPORT_SYMBOL_GPL(em28xx_boards);
+
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
/* table of devices that work with this driver */
@@ -2208,6 +2324,12 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2884_BOARD_PCTV_520E },
{ USB_DEVICE(0x1b80, 0xe1cc),
.driver_info = EM2874_BOARD_DELOCK_61959 },
+ { USB_DEVICE(0x1ae7, 0x9003),
+ .driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE },
+ { USB_DEVICE(0x1ae7, 0x9004),
+ .driver_info = EM2765_BOARD_SPEEDLINK_VAD_LAPLACE },
+ { USB_DEVICE(0x2013, 0x0258),
+ .driver_info = EM28178_BOARD_PCTV_461E },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
@@ -2239,24 +2361,6 @@ static struct em28xx_hash_table em28xx_i2c_hash[] = {
};
/* NOTE: introduce a separate hash table for devices with 16 bit eeproms */
-/* I2C possible address to saa7115, tvp5150, msp3400, tvaudio */
-static unsigned short saa711x_addrs[] = {
- 0x4a >> 1, 0x48 >> 1, /* SAA7111, SAA7111A and SAA7113 */
- 0x42 >> 1, 0x40 >> 1, /* SAA7114, SAA7115 and SAA7118 */
- I2C_CLIENT_END };
-
-static unsigned short tvp5150_addrs[] = {
- 0xb8 >> 1,
- 0xba >> 1,
- I2C_CLIENT_END
-};
-
-static unsigned short msp3400_addrs[] = {
- 0x80 >> 1,
- 0x88 >> 1,
- I2C_CLIENT_END
-};
-
int em28xx_tuner_callback(void *ptr, int component, int command, int arg)
{
struct em28xx_i2c_bus *i2c_bus = ptr;
@@ -2408,113 +2512,6 @@ static void em28xx_pre_card_setup(struct em28xx *dev)
em28xx_set_mode(dev, EM28XX_SUSPEND);
}
-static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
-{
- memset(ctl, 0, sizeof(*ctl));
-
- ctl->fname = XC2028_DEFAULT_FIRMWARE;
- ctl->max_len = 64;
- ctl->mts = em28xx_boards[dev->model].mts_firmware;
-
- switch (dev->model) {
- case EM2880_BOARD_EMPIRE_DUAL_TV:
- case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
- case EM2882_BOARD_TERRATEC_HYBRID_XS:
- ctl->demod = XC3028_FE_ZARLINK456;
- break;
- case EM2880_BOARD_TERRATEC_HYBRID_XS:
- case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
- case EM2881_BOARD_PINNACLE_HYBRID_PRO:
- ctl->demod = XC3028_FE_ZARLINK456;
- break;
- case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
- case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
- ctl->demod = XC3028_FE_DEFAULT;
- break;
- case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
- ctl->demod = XC3028_FE_DEFAULT;
- ctl->fname = XC3028L_DEFAULT_FIRMWARE;
- break;
- case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
- case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
- case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
- /* FIXME: Better to specify the needed IF */
- ctl->demod = XC3028_FE_DEFAULT;
- break;
- case EM2883_BOARD_KWORLD_HYBRID_330U:
- case EM2882_BOARD_DIKOM_DK300:
- case EM2882_BOARD_KWORLD_VS_DVBT:
- ctl->demod = XC3028_FE_CHINA;
- ctl->fname = XC2028_DEFAULT_FIRMWARE;
- break;
- case EM2882_BOARD_EVGA_INDTUBE:
- ctl->demod = XC3028_FE_CHINA;
- ctl->fname = XC3028L_DEFAULT_FIRMWARE;
- break;
- default:
- ctl->demod = XC3028_FE_OREN538;
- }
-}
-
-static void em28xx_tuner_setup(struct em28xx *dev)
-{
- struct tuner_setup tun_setup;
- struct v4l2_frequency f;
-
- if (dev->tuner_type == TUNER_ABSENT)
- return;
-
- memset(&tun_setup, 0, sizeof(tun_setup));
-
- tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
- tun_setup.tuner_callback = em28xx_tuner_callback;
-
- if (dev->board.radio.type) {
- tun_setup.type = dev->board.radio.type;
- tun_setup.addr = dev->board.radio_addr;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
- }
-
- if ((dev->tuner_type != TUNER_ABSENT) && (dev->tuner_type)) {
- tun_setup.type = dev->tuner_type;
- tun_setup.addr = dev->tuner_addr;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
- }
-
- if (dev->tda9887_conf) {
- struct v4l2_priv_tun_config tda9887_cfg;
-
- tda9887_cfg.tuner = TUNER_TDA9887;
- tda9887_cfg.priv = &dev->tda9887_conf;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &tda9887_cfg);
- }
-
- if (dev->tuner_type == TUNER_XC2028) {
- struct v4l2_priv_tun_config xc2028_cfg;
- struct xc2028_ctrl ctl;
-
- memset(&xc2028_cfg, 0, sizeof(xc2028_cfg));
- memset(&ctl, 0, sizeof(ctl));
-
- em28xx_setup_xc3028(dev, &ctl);
-
- xc2028_cfg.tuner = TUNER_XC2028;
- xc2028_cfg.priv = &ctl;
-
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &xc2028_cfg);
- }
-
- /* configure tuner */
- f.tuner = 0;
- f.type = V4L2_TUNER_ANALOG_TV;
- f.frequency = 9076; /* just a magic number */
- dev->ctl_freq = f.frequency;
- v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
-}
-
static int em28xx_hint_board(struct em28xx *dev)
{
int i;
@@ -2768,57 +2765,56 @@ static void em28xx_card_setup(struct em28xx *dev)
/* Allow override tuner type by a module parameter */
if (tuner >= 0)
dev->tuner_type = tuner;
+}
- /* request some modules */
- if (dev->board.has_msp34xx)
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "msp3400", 0, msp3400_addrs);
-
- if (dev->board.decoder == EM28XX_SAA711X)
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "saa7115_auto", 0, saa711x_addrs);
-
- if (dev->board.decoder == EM28XX_TVP5150)
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "tvp5150", 0, tvp5150_addrs);
-
- if (dev->board.adecoder == EM28XX_TVAUDIO)
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "tvaudio", dev->board.tvaudio_addr, NULL);
-
- if (dev->board.tuner_type != TUNER_ABSENT) {
- int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
-
- if (dev->board.radio.type)
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "tuner", dev->board.radio_addr, NULL);
-
- if (has_demod)
- v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap[dev->def_i2c_bus], "tuner",
- 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
- if (dev->tuner_addr == 0) {
- enum v4l2_i2c_tuner_type type =
- has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
- struct v4l2_subdev *sd;
-
- sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap[dev->def_i2c_bus], "tuner",
- 0, v4l2_i2c_tuner_addrs(type));
-
- if (sd)
- dev->tuner_addr = v4l2_i2c_subdev_addr(sd);
- } else {
- v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
- "tuner", dev->tuner_addr, NULL);
- }
- }
+void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
+{
+ memset(ctl, 0, sizeof(*ctl));
- em28xx_tuner_setup(dev);
+ ctl->fname = XC2028_DEFAULT_FIRMWARE;
+ ctl->max_len = 64;
+ ctl->mts = em28xx_boards[dev->model].mts_firmware;
- em28xx_init_camera(dev);
+ switch (dev->model) {
+ case EM2880_BOARD_EMPIRE_DUAL_TV:
+ case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
+ case EM2882_BOARD_TERRATEC_HYBRID_XS:
+ ctl->demod = XC3028_FE_ZARLINK456;
+ break;
+ case EM2880_BOARD_TERRATEC_HYBRID_XS:
+ case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
+ case EM2881_BOARD_PINNACLE_HYBRID_PRO:
+ ctl->demod = XC3028_FE_ZARLINK456;
+ break;
+ case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
+ case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
+ ctl->demod = XC3028_FE_DEFAULT;
+ break;
+ case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
+ ctl->demod = XC3028_FE_DEFAULT;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ break;
+ case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
+ case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
+ case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
+ /* FIXME: Better to specify the needed IF */
+ ctl->demod = XC3028_FE_DEFAULT;
+ break;
+ case EM2883_BOARD_KWORLD_HYBRID_330U:
+ case EM2882_BOARD_DIKOM_DK300:
+ case EM2882_BOARD_KWORLD_VS_DVBT:
+ ctl->demod = XC3028_FE_CHINA;
+ ctl->fname = XC2028_DEFAULT_FIRMWARE;
+ break;
+ case EM2882_BOARD_EVGA_INDTUBE:
+ ctl->demod = XC3028_FE_CHINA;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ break;
+ default:
+ ctl->demod = XC3028_FE_OREN538;
+ }
}
-
+EXPORT_SYMBOL_GPL(em28xx_setup_xc3028);
static void request_module_async(struct work_struct *work)
{
@@ -2831,17 +2827,30 @@ static void request_module_async(struct work_struct *work)
* can be initialised right now. Otherwise, the module init
* code will do it.
*/
+
+ /*
+ * Devicdes with an audio-only interface also have a V4L/DVB/RC
+ * interface. Don't register extensions twice on those devices.
+ */
+ if (dev->is_audio_only) {
+#if defined(CONFIG_MODULES) && defined(MODULE)
+ request_module("em28xx-alsa");
+#endif
+ return;
+ }
+
em28xx_init_extension(dev);
#if defined(CONFIG_MODULES) && defined(MODULE)
+ if (dev->has_video)
+ request_module("em28xx-v4l");
if (dev->has_audio_class)
request_module("snd-usb-audio");
else if (dev->has_alsa_audio)
request_module("em28xx-alsa");
-
if (dev->board.has_dvb)
request_module("em28xx-dvb");
- if (dev->board.has_snapshot_button ||
+ if (dev->board.buttons ||
((dev->board.ir_codes || dev->board.has_ir_i2c) && !disable_ir))
request_module("em28xx-rc");
#endif /* CONFIG_MODULES */
@@ -2867,23 +2876,20 @@ void em28xx_release_resources(struct em28xx *dev)
{
/*FIXME: I2C IR should be disconnected */
- em28xx_release_analog_resources(dev);
+ mutex_lock(&dev->lock);
if (dev->def_i2c_bus)
em28xx_i2c_unregister(dev, 1);
em28xx_i2c_unregister(dev, 0);
- if (dev->clk)
- v4l2_clk_unregister_fixed(dev->clk);
-
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
-
- v4l2_device_unregister(&dev->v4l2_dev);
usb_put_dev(dev->udev);
/* Mark device as unused */
- clear_bit(dev->devno, &em28xx_devused);
+ clear_bit(dev->devno, em28xx_devused);
+
+ mutex_unlock(&dev->lock);
};
+EXPORT_SYMBOL_GPL(em28xx_release_resources);
/*
* em28xx_init_dev()
@@ -2893,7 +2899,6 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
struct usb_interface *interface,
int minor)
{
- struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
int retval;
static const char *default_chip_name = "em28xx";
const char *chip_name = default_chip_name;
@@ -2968,6 +2973,11 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
dev->wait_after_write = 0;
dev->eeprom_addrwidth_16bit = 1;
break;
+ case CHIP_ID_EM28178:
+ chip_name = "em28178";
+ dev->wait_after_write = 0;
+ dev->eeprom_addrwidth_16bit = 1;
+ break;
case CHIP_ID_EM2883:
chip_name = "em2882/3";
dev->wait_after_write = 0;
@@ -2983,6 +2993,16 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
}
}
+ if (dev->chip_id == CHIP_ID_EM2870 ||
+ dev->chip_id == CHIP_ID_EM2874 ||
+ dev->chip_id == CHIP_ID_EM28174 ||
+ dev->chip_id == CHIP_ID_EM28178) {
+ /* Digital only device - don't load any alsa module */
+ dev->audio_mode.has_audio = false;
+ dev->has_audio_class = false;
+ dev->has_alsa_audio = false;
+ }
+
if (chip_name != default_chip_name)
printk(KERN_INFO DRIVER_NAME
": chip ID is %s\n", chip_name);
@@ -3015,15 +3035,6 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
}
}
- retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
- if (retval < 0) {
- em28xx_errdev("Call to v4l2_device_register() failed!\n");
- return retval;
- }
-
- v4l2_ctrl_handler_init(hdl, 8);
- dev->v4l2_dev.ctrl_handler = hdl;
-
rt_mutex_init(&dev->i2c_bus_lock);
/* register i2c bus 0 */
@@ -3034,7 +3045,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
if (retval < 0) {
em28xx_errdev("%s: em28xx_i2c_register bus 0 - error [%d]!\n",
__func__, retval);
- goto unregister_dev;
+ return retval;
}
/* register i2c bus 1 */
@@ -3048,88 +3059,17 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
if (retval < 0) {
em28xx_errdev("%s: em28xx_i2c_register bus 1 - error [%d]!\n",
__func__, retval);
- goto unregister_dev;
- }
- }
-
- /*
- * Default format, used for tvp5150 or saa711x output formats
- */
- dev->vinmode = 0x10;
- dev->vinctl = EM28XX_VINCTRL_INTERLACED |
- EM28XX_VINCTRL_CCIR656_ENABLE;
- /* Do board specific init and eeprom reading */
- em28xx_card_setup(dev);
+ em28xx_i2c_unregister(dev, 0);
- /* Configure audio */
- retval = em28xx_audio_setup(dev);
- if (retval < 0) {
- em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
- __func__, retval);
- goto fail;
- }
- if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
- v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
- V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
- v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
- V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f);
- } else {
- /* install the em28xx notify callback */
- v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_MUTE),
- em28xx_ctrl_notify, dev);
- v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_VOLUME),
- em28xx_ctrl_notify, dev);
- }
-
- /* wake i2c devices */
- em28xx_wake_i2c(dev);
-
- /* init video dma queues */
- INIT_LIST_HEAD(&dev->vidq.active);
- INIT_LIST_HEAD(&dev->vbiq.active);
-
- if (dev->board.has_msp34xx) {
- /* Send a reset to other chips via gpio */
- retval = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7);
- if (retval < 0) {
- em28xx_errdev("%s: em28xx_write_reg - "
- "msp34xx(1) failed! error [%d]\n",
- __func__, retval);
- goto fail;
- }
- msleep(3);
-
- retval = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff);
- if (retval < 0) {
- em28xx_errdev("%s: em28xx_write_reg - "
- "msp34xx(2) failed! error [%d]\n",
- __func__, retval);
- goto fail;
+ return retval;
}
- msleep(3);
- }
-
- retval = em28xx_register_analog_devices(dev);
- if (retval < 0) {
- goto fail;
}
- /* Save some power by putting tuner to sleep */
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
+ /* Do board specific init and eeprom reading */
+ em28xx_card_setup(dev);
return 0;
-
-fail:
- if (dev->def_i2c_bus)
- em28xx_i2c_unregister(dev, 1);
- em28xx_i2c_unregister(dev, 0);
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
-
-unregister_dev:
- v4l2_device_unregister(&dev->v4l2_dev);
-
- return retval;
}
/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
@@ -3154,7 +3094,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* Check to see next free device and mark as used */
do {
- nr = find_first_zero_bit(&em28xx_devused, EM28XX_MAXBOARDS);
+ nr = find_first_zero_bit(em28xx_devused, EM28XX_MAXBOARDS);
if (nr >= EM28XX_MAXBOARDS) {
/* No free device slots */
printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
@@ -3162,7 +3102,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
retval = -ENOMEM;
goto err_no_slot;
}
- } while (test_and_set_bit(nr, &em28xx_devused));
+ } while (test_and_set_bit(nr, em28xx_devused));
/* Don't register audio interfaces */
if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
@@ -3332,7 +3272,9 @@ static int em28xx_usb_probe(struct usb_interface *interface,
dev->alt = -1;
dev->is_audio_only = has_audio && !(has_video || has_dvb);
dev->has_alsa_audio = has_audio;
- dev->audio_ifnum = ifnum;
+ dev->audio_mode.has_audio = has_audio;
+ dev->has_video = has_video;
+ dev->ifnum = ifnum;
/* Checks if audio is provided by some interface */
for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
@@ -3369,15 +3311,11 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
- /* initialize videobuf2 stuff */
- em28xx_vb2_setup(dev);
-
/* allocate device struct */
mutex_init(&dev->lock);
- mutex_lock(&dev->lock);
retval = em28xx_init_dev(dev, udev, interface, nr);
if (retval) {
- goto unlock_and_free;
+ goto err_free;
}
if (usb_xfer_mode < 0) {
@@ -3402,26 +3340,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
em28xx_info("dvb set to %s mode.\n",
dev->dvb_xfer_bulk ? "bulk" : "isoc");
-
- /* pre-allocate DVB usb transfer buffers */
- if (dev->dvb_xfer_bulk) {
- retval = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
- dev->dvb_xfer_bulk,
- EM28XX_DVB_NUM_BUFS,
- 512,
- EM28XX_DVB_BULK_PACKET_MULTIPLIER);
- } else {
- retval = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
- dev->dvb_xfer_bulk,
- EM28XX_DVB_NUM_BUFS,
- dev->dvb_max_pkt_size_isoc,
- EM28XX_DVB_NUM_ISOC_PACKETS);
- }
- if (retval) {
- printk(DRIVER_NAME
- ": Failed to pre-allocate USB transfer buffers for DVB.\n");
- goto unlock_and_free;
- }
}
request_modules(dev);
@@ -3429,19 +3347,15 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* Should be the last thing to do, to avoid newer udev's to
open the device before fully initializing it
*/
- mutex_unlock(&dev->lock);
return 0;
-unlock_and_free:
- mutex_unlock(&dev->lock);
-
err_free:
kfree(dev->alt_max_pkt_size_isoc);
kfree(dev);
err:
- clear_bit(nr, &em28xx_devused);
+ clear_bit(nr, em28xx_devused);
err_no_slot:
usb_put_dev(udev);
@@ -3465,36 +3379,13 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
dev->disconnected = 1;
- if (dev->is_audio_only) {
- mutex_lock(&dev->lock);
- em28xx_close_extension(dev);
- mutex_unlock(&dev->lock);
- return;
- }
-
- em28xx_info("disconnecting %s\n", dev->vdev->name);
+ em28xx_info("Disconnecting %s\n", dev->name);
flush_request_modules(dev);
- mutex_lock(&dev->lock);
-
- v4l2_device_disconnect(&dev->v4l2_dev);
-
- if (dev->users) {
- em28xx_warn("device %s is open! Deregistration and memory deallocation are deferred on close.\n",
- video_device_node_name(dev->vdev));
-
- em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
- em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
- }
-
em28xx_close_extension(dev);
- /* NOTE: must be called BEFORE the resources are released */
-
- if (!dev->users)
- em28xx_release_resources(dev);
- mutex_unlock(&dev->lock);
+ em28xx_release_resources(dev);
if (!dev->users) {
kfree(dev->alt_max_pkt_size_isoc);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index fc157af5234a..898fb9bd88a2 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -23,6 +23,7 @@
*/
#include <linux/init.h>
+#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -33,6 +34,16 @@
#include "em28xx.h"
+#define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \
+ "Markus Rechberger <mrechberger@gmail.com>, " \
+ "Mauro Carvalho Chehab <mchehab@infradead.org>, " \
+ "Sascha Sommer <saschasommer@freenet.de>"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(EM28XX_VERSION);
+
/* #define ENABLE_DEBUG_ISOC_FRAMES */
static unsigned int core_debug;
@@ -53,14 +64,6 @@ MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]");
printk(KERN_INFO "%s %s :"fmt, \
dev->name, __func__ , ##arg); } while (0)
-static int alt;
-module_param(alt, int, 0644);
-MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
-
-static unsigned int disable_vbi;
-module_param(disable_vbi, int, 0644);
-MODULE_PARM_DESC(disable_vbi, "disable vbi support");
-
/* FIXME */
#define em28xx_isocdbg(fmt, arg...) do {\
if (core_debug) \
@@ -226,21 +229,42 @@ int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
EXPORT_SYMBOL_GPL(em28xx_write_reg_bits);
/*
+ * em28xx_toggle_reg_bits()
+ * toggles/inverts the bits (specified by bitmask) of a register
+ */
+int em28xx_toggle_reg_bits(struct em28xx *dev, u16 reg, u8 bitmask)
+{
+ int oldval;
+ u8 newval;
+
+ oldval = em28xx_read_reg(dev, reg);
+ if (oldval < 0)
+ return oldval;
+
+ newval = (~oldval & bitmask) | (oldval & ~bitmask);
+
+ return em28xx_write_reg(dev, reg, newval);
+}
+EXPORT_SYMBOL_GPL(em28xx_toggle_reg_bits);
+
+/*
* em28xx_is_ac97_ready()
* Checks if ac97 is ready
*/
static int em28xx_is_ac97_ready(struct em28xx *dev)
{
- int ret, i;
+ unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_AC97_XFER_TIMEOUT);
+ int ret;
/* Wait up to 50 ms for AC97 command to complete */
- for (i = 0; i < 10; i++, msleep(5)) {
+ while (time_is_after_jiffies(timeout)) {
ret = em28xx_read_reg(dev, EM28XX_R43_AC97BUSY);
if (ret < 0)
return ret;
if (!(ret & 0x01))
return 0;
+ msleep(5);
}
em28xx_warn("AC97 command still being executed: not handled properly!\n");
@@ -482,16 +506,8 @@ int em28xx_audio_setup(struct em28xx *dev)
int vid1, vid2, feat, cfg;
u32 vid;
- if (dev->chip_id == CHIP_ID_EM2870 || dev->chip_id == CHIP_ID_EM2874
- || dev->chip_id == CHIP_ID_EM28174) {
- /* Digital only device - don't load any alsa module */
- dev->audio_mode.has_audio = false;
- dev->has_audio_class = false;
- dev->has_alsa_audio = false;
+ if (!dev->audio_mode.has_audio)
return 0;
- }
-
- dev->audio_mode.has_audio = true;
/* See how this device is configured */
cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
@@ -504,17 +520,19 @@ int em28xx_audio_setup(struct em28xx *dev)
dev->has_alsa_audio = false;
dev->audio_mode.has_audio = false;
return 0;
- } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
- EM28XX_CHIPCFG_I2S_3_SAMPRATES) {
- em28xx_info("I2S Audio (3 sample rates)\n");
- dev->audio_mode.i2s_3rates = 1;
- } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
- EM28XX_CHIPCFG_I2S_5_SAMPRATES) {
- em28xx_info("I2S Audio (5 sample rates)\n");
- dev->audio_mode.i2s_5rates = 1;
- }
-
- if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) {
+ } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) {
+ if (dev->chip_id < CHIP_ID_EM2860 &&
+ (cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
+ EM2820_CHIPCFG_I2S_1_SAMPRATE)
+ dev->audio_mode.i2s_samplerates = 1;
+ else if (dev->chip_id >= CHIP_ID_EM2860 &&
+ (cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
+ EM2860_CHIPCFG_I2S_5_SAMPRATES)
+ dev->audio_mode.i2s_samplerates = 5;
+ else
+ dev->audio_mode.i2s_samplerates = 3;
+ em28xx_info("I2S Audio (%d sample rate(s))\n",
+ dev->audio_mode.i2s_samplerates);
/* Skip the code that does AC97 vendor detection */
dev->audio_mode.ac97 = EM28XX_NO_AC97;
goto init_audio;
@@ -582,23 +600,21 @@ init_audio:
}
EXPORT_SYMBOL_GPL(em28xx_audio_setup);
-int em28xx_colorlevels_set_default(struct em28xx *dev)
+const struct em28xx_led *em28xx_find_led(struct em28xx *dev,
+ enum em28xx_led_role role)
{
- em28xx_write_reg(dev, EM28XX_R20_YGAIN, CONTRAST_DEFAULT);
- em28xx_write_reg(dev, EM28XX_R21_YOFFSET, BRIGHTNESS_DEFAULT);
- em28xx_write_reg(dev, EM28XX_R22_UVGAIN, SATURATION_DEFAULT);
- em28xx_write_reg(dev, EM28XX_R23_UOFFSET, BLUE_BALANCE_DEFAULT);
- em28xx_write_reg(dev, EM28XX_R24_VOFFSET, RED_BALANCE_DEFAULT);
- em28xx_write_reg(dev, EM28XX_R25_SHARPNESS, SHARPNESS_DEFAULT);
-
- em28xx_write_reg(dev, EM28XX_R14_GAMMA, 0x20);
- em28xx_write_reg(dev, EM28XX_R15_RGAIN, 0x20);
- em28xx_write_reg(dev, EM28XX_R16_GGAIN, 0x20);
- em28xx_write_reg(dev, EM28XX_R17_BGAIN, 0x20);
- em28xx_write_reg(dev, EM28XX_R18_ROFFSET, 0x00);
- em28xx_write_reg(dev, EM28XX_R19_GOFFSET, 0x00);
- return em28xx_write_reg(dev, EM28XX_R1A_BOFFSET, 0x00);
+ if (dev->board.leds) {
+ u8 k = 0;
+ while (dev->board.leds[k].role >= 0 &&
+ dev->board.leds[k].role < EM28XX_NUM_LED_ROLES) {
+ if (dev->board.leds[k].role == role)
+ return &dev->board.leds[k];
+ k++;
+ }
+ }
+ return NULL;
}
+EXPORT_SYMBOL_GPL(em28xx_find_led);
int em28xx_capture_start(struct em28xx *dev, int start)
{
@@ -606,271 +622,57 @@ int em28xx_capture_start(struct em28xx *dev, int start)
if (dev->chip_id == CHIP_ID_EM2874 ||
dev->chip_id == CHIP_ID_EM2884 ||
- dev->chip_id == CHIP_ID_EM28174) {
+ dev->chip_id == CHIP_ID_EM28174 ||
+ dev->chip_id == CHIP_ID_EM28178) {
/* The Transport Stream Enable Register moved in em2874 */
- if (!start) {
- rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE,
- 0x00,
- EM2874_TS1_CAPTURE_ENABLE);
- return rc;
- }
-
- /* Enable Transport Stream */
rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE,
- EM2874_TS1_CAPTURE_ENABLE,
+ start ?
+ EM2874_TS1_CAPTURE_ENABLE : 0x00,
EM2874_TS1_CAPTURE_ENABLE);
- return rc;
- }
-
-
- /* FIXME: which is the best order? */
- /* video registers are sampled by VREF */
- rc = em28xx_write_reg_bits(dev, EM28XX_R0C_USBSUSP,
- start ? 0x10 : 0x00, 0x10);
- if (rc < 0)
- return rc;
-
- if (!start) {
- /* disable video capture */
- rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x27);
- return rc;
- }
-
- if (dev->board.is_webcam)
- rc = em28xx_write_reg(dev, 0x13, 0x0c);
-
- /* enable video capture */
- rc = em28xx_write_reg(dev, 0x48, 0x00);
-
- if (dev->mode == EM28XX_ANALOG_MODE)
- rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x67);
- else
- rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x37);
-
- msleep(6);
-
- return rc;
-}
-
-int em28xx_vbi_supported(struct em28xx *dev)
-{
- /* Modprobe option to manually disable */
- if (disable_vbi == 1)
- return 0;
-
- if (dev->board.is_webcam)
- return 0;
-
- /* FIXME: check subdevices for VBI support */
-
- if (dev->chip_id == CHIP_ID_EM2860 ||
- dev->chip_id == CHIP_ID_EM2883)
- return 1;
-
- /* Version of em28xx that does not support VBI */
- return 0;
-}
-
-int em28xx_set_outfmt(struct em28xx *dev)
-{
- int ret;
- u8 fmt, vinctrl;
-
- fmt = dev->format->reg;
- if (!dev->is_em25xx)
- fmt |= 0x20;
- /*
- * NOTE: it's not clear if this is really needed !
- * The datasheets say bit 5 is a reserved bit and devices seem to work
- * fine without it. But the Windows driver sets it for em2710/50+em28xx
- * devices and we've always been setting it, too.
- *
- * em2765 (em25xx, em276x/7x/8x) devices do NOT work with this bit set,
- * it's likely used for an additional (compressed ?) format there.
- */
- ret = em28xx_write_reg(dev, EM28XX_R27_OUTFMT, fmt);
- if (ret < 0)
- return ret;
-
- ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, dev->vinmode);
- if (ret < 0)
- return ret;
-
- vinctrl = dev->vinctl;
- if (em28xx_vbi_supported(dev) == 1) {
- vinctrl |= EM28XX_VINCTRL_VBI_RAW;
- em28xx_write_reg(dev, EM28XX_R34_VBI_START_H, 0x00);
- em28xx_write_reg(dev, EM28XX_R36_VBI_WIDTH, dev->vbi_width/4);
- em28xx_write_reg(dev, EM28XX_R37_VBI_HEIGHT, dev->vbi_height);
- if (dev->norm & V4L2_STD_525_60) {
- /* NTSC */
- em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x09);
- } else if (dev->norm & V4L2_STD_625_50) {
- /* PAL */
- em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x07);
- }
- }
-
- return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, vinctrl);
-}
-
-static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
- u8 ymin, u8 ymax)
-{
- em28xx_coredbg("em28xx Scale: (%d,%d)-(%d,%d)\n",
- xmin, ymin, xmax, ymax);
-
- em28xx_write_regs(dev, EM28XX_R28_XMIN, &xmin, 1);
- em28xx_write_regs(dev, EM28XX_R29_XMAX, &xmax, 1);
- em28xx_write_regs(dev, EM28XX_R2A_YMIN, &ymin, 1);
- return em28xx_write_regs(dev, EM28XX_R2B_YMAX, &ymax, 1);
-}
-
-static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
- u16 width, u16 height)
-{
- u8 cwidth = width >> 2;
- u8 cheight = height >> 2;
- u8 overflow = (height >> 9 & 0x02) | (width >> 10 & 0x01);
- /* NOTE: size limit: 2047x1023 = 2MPix */
-
- em28xx_coredbg("capture area set to (%d,%d): %dx%d\n",
- hstart, vstart,
- ((overflow & 2) << 9 | cwidth << 2),
- ((overflow & 1) << 10 | cheight << 2));
-
- em28xx_write_regs(dev, EM28XX_R1C_HSTART, &hstart, 1);
- em28xx_write_regs(dev, EM28XX_R1D_VSTART, &vstart, 1);
- em28xx_write_regs(dev, EM28XX_R1E_CWIDTH, &cwidth, 1);
- em28xx_write_regs(dev, EM28XX_R1F_CHEIGHT, &cheight, 1);
- em28xx_write_regs(dev, EM28XX_R1B_OFLOW, &overflow, 1);
-
- /* FIXME: function/meaning of these registers ? */
- /* FIXME: align width+height to multiples of 4 ?! */
- if (dev->is_em25xx) {
- em28xx_write_reg(dev, 0x34, width >> 4);
- em28xx_write_reg(dev, 0x35, height >> 4);
- }
-}
-
-static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
-{
- u8 mode;
- /* the em2800 scaler only supports scaling down to 50% */
-
- if (dev->board.is_em2800) {
- mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00);
} else {
- u8 buf[2];
-
- buf[0] = h;
- buf[1] = h >> 8;
- em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2);
-
- buf[0] = v;
- buf[1] = v >> 8;
- em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2);
- /* it seems that both H and V scalers must be active
- to work correctly */
- mode = (h || v) ? 0x30 : 0x00;
- }
- return em28xx_write_reg_bits(dev, EM28XX_R26_COMPR, mode, 0x30);
-}
-
-/* FIXME: this only function read values from dev */
-int em28xx_resolution_set(struct em28xx *dev)
-{
- int width, height;
- width = norm_maxw(dev);
- height = norm_maxh(dev);
-
- /* Properly setup VBI */
- dev->vbi_width = 720;
- if (dev->norm & V4L2_STD_525_60)
- dev->vbi_height = 12;
- else
- dev->vbi_height = 18;
-
- em28xx_set_outfmt(dev);
+ /* FIXME: which is the best order? */
+ /* video registers are sampled by VREF */
+ rc = em28xx_write_reg_bits(dev, EM28XX_R0C_USBSUSP,
+ start ? 0x10 : 0x00, 0x10);
+ if (rc < 0)
+ return rc;
- em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
+ if (start) {
+ if (dev->board.is_webcam)
+ rc = em28xx_write_reg(dev, 0x13, 0x0c);
- /* If we don't set the start position to 2 in VBI mode, we end up
- with line 20/21 being YUYV encoded instead of being in 8-bit
- greyscale. The core of the issue is that line 21 (and line 23 for
- PAL WSS) are inside of active video region, and as a result they
- get the pixelformatting associated with that area. So by cropping
- it out, we end up with the same format as the rest of the VBI
- region */
- if (em28xx_vbi_supported(dev) == 1)
- em28xx_capture_area_set(dev, 0, 2, width, height);
- else
- em28xx_capture_area_set(dev, 0, 0, width, height);
+ /* Enable video capture */
+ rc = em28xx_write_reg(dev, 0x48, 0x00);
- return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
-}
+ if (dev->mode == EM28XX_ANALOG_MODE)
+ rc = em28xx_write_reg(dev,
+ EM28XX_R12_VINENABLE, 0x67);
+ else
+ rc = em28xx_write_reg(dev,
+ EM28XX_R12_VINENABLE, 0x37);
-/* Set USB alternate setting for analog video */
-int em28xx_set_alternate(struct em28xx *dev)
-{
- int errCode;
- int i;
- unsigned int min_pkt_size = dev->width * 2 + 4;
-
- /* NOTE: for isoc transfers, only alt settings > 0 are allowed
- bulk transfers seem to work only with alt=0 ! */
- dev->alt = 0;
- if ((alt > 0) && (alt < dev->num_alt)) {
- em28xx_coredbg("alternate forced to %d\n", dev->alt);
- dev->alt = alt;
- goto set_alt;
+ msleep(6);
+ } else {
+ /* disable video capture */
+ rc = em28xx_write_reg(dev, EM28XX_R12_VINENABLE, 0x27);
+ }
}
- if (dev->analog_xfer_bulk)
- goto set_alt;
- /* When image size is bigger than a certain value,
- the frame size should be increased, otherwise, only
- green screen will be received.
- */
- if (dev->width * 2 * dev->height > 720 * 240 * 2)
- min_pkt_size *= 2;
+ if (rc < 0)
+ return rc;
- for (i = 0; i < dev->num_alt; i++) {
- /* stop when the selected alt setting offers enough bandwidth */
- if (dev->alt_max_pkt_size_isoc[i] >= min_pkt_size) {
- dev->alt = i;
- break;
- /* otherwise make sure that we end up with the maximum bandwidth
- because the min_pkt_size equation might be wrong...
- */
- } else if (dev->alt_max_pkt_size_isoc[i] >
- dev->alt_max_pkt_size_isoc[dev->alt])
- dev->alt = i;
+ /* Switch (explicitly controlled) analog capturing LED on/off */
+ if (dev->mode == EM28XX_ANALOG_MODE) {
+ const struct em28xx_led *led;
+ led = em28xx_find_led(dev, EM28XX_LED_ANALOG_CAPTURING);
+ if (led)
+ em28xx_write_reg_bits(dev, led->gpio_reg,
+ (!start ^ led->inverted) ?
+ ~led->gpio_mask : led->gpio_mask,
+ led->gpio_mask);
}
-set_alt:
- /* NOTE: for bulk transfers, we need to call usb_set_interface()
- * even if the previous settings were the same. Otherwise streaming
- * fails with all urbs having status = -EOVERFLOW ! */
- if (dev->analog_xfer_bulk) {
- dev->max_pkt_size = 512; /* USB 2.0 spec */
- dev->packet_multiplier = EM28XX_BULK_PACKET_MULTIPLIER;
- } else { /* isoc */
- em28xx_coredbg("minimum isoc packet size: %u (alt=%d)\n",
- min_pkt_size, dev->alt);
- dev->max_pkt_size =
- dev->alt_max_pkt_size_isoc[dev->alt];
- dev->packet_multiplier = EM28XX_NUM_ISOC_PACKETS;
- }
- em28xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n",
- dev->alt, dev->max_pkt_size);
- errCode = usb_set_interface(dev->udev, 0, dev->alt);
- if (errCode < 0) {
- em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
- dev->alt, errCode);
- return errCode;
- }
- return 0;
+ return rc;
}
int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio)
@@ -1238,18 +1040,6 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
EXPORT_SYMBOL_GPL(em28xx_init_usb_xfer);
/*
- * em28xx_wake_i2c()
- * configure i2c attached devices
- */
-void em28xx_wake_i2c(struct em28xx *dev)
-{
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, reset, 0);
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing,
- INPUT(dev->ctl_input)->vmux, 0, 0);
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
-}
-
-/*
* Device control list
*/
@@ -1272,7 +1062,7 @@ int em28xx_register_extension(struct em28xx_ops *ops)
ops->init(dev);
}
mutex_unlock(&em28xx_devlist_mutex);
- printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name);
+ printk(KERN_INFO "em28xx: Registered (%s) extension\n", ops->name);
return 0;
}
EXPORT_SYMBOL(em28xx_register_extension);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 344042bb845c..a0a669e81362 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -51,10 +51,14 @@
#include "a8293.h"
#include "qt1010.h"
#include "mb86a20s.h"
+#include "m88ds3103.h"
+#include "m88ts2022.h"
-MODULE_DESCRIPTION("driver for em28xx based DVB cards");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC " - digital TV interface");
+MODULE_VERSION(EM28XX_VERSION);
+
static unsigned int debug;
module_param(debug, int, 0644);
@@ -87,6 +91,7 @@ struct em28xx_dvb {
struct semaphore pll_mutex;
bool dont_attach_fe1;
int lna_gpio;
+ struct i2c_client *i2c_client_tuner;
};
@@ -198,7 +203,7 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
dvb_alt = dev->dvb_alt_isoc;
}
- usb_set_interface(dev->udev, 0, dvb_alt);
+ usb_set_interface(dev->udev, dev->ifnum, dvb_alt);
rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
if (rc < 0)
return rc;
@@ -271,7 +276,7 @@ static int em28xx_stop_feed(struct dvb_demux_feed *feed)
static int em28xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
{
struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
- struct em28xx *dev = i2c_bus->dev;
+ struct em28xx *dev = i2c_bus->dev;
if (acquire)
return em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
@@ -370,7 +375,6 @@ static struct drxk_config terratec_h5_drxk = {
.no_i2c_bridge = 1,
.microcode_name = "dvb-usb-terratec-h5-drxk.fw",
.qam_demod_parameter_count = 2,
- .load_firmware_sync = true,
};
static struct drxk_config hauppauge_930c_drxk = {
@@ -380,7 +384,6 @@ static struct drxk_config hauppauge_930c_drxk = {
.microcode_name = "dvb-usb-hauppauge-hvr930c-drxk.fw",
.chunk_size = 56,
.qam_demod_parameter_count = 2,
- .load_firmware_sync = true,
};
static struct drxk_config terratec_htc_stick_drxk = {
@@ -394,7 +397,6 @@ static struct drxk_config terratec_htc_stick_drxk = {
.antenna_dvbt = true,
/* The windows driver uses the same. This will disable LNA. */
.antenna_gpio = 0x6,
- .load_firmware_sync = true,
};
static struct drxk_config maxmedia_ub425_tc_drxk = {
@@ -403,7 +405,6 @@ static struct drxk_config maxmedia_ub425_tc_drxk = {
.no_i2c_bridge = 1,
.microcode_name = "dvb-demod-drxk-01.fw",
.chunk_size = 62,
- .load_firmware_sync = true,
.qam_demod_parameter_count = 2,
};
@@ -415,7 +416,6 @@ static struct drxk_config pctv_520e_drxk = {
.chunk_size = 58,
.antenna_dvbt = true, /* disable LNA */
.antenna_gpio = (1 << 2), /* disable LNA */
- .load_firmware_sync = true,
};
static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
@@ -808,6 +808,14 @@ static struct tda18271_config c3tech_duo_tda18271_config = {
.small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
};
+static const struct m88ds3103_config pctv_461e_m88ds3103_config = {
+ .i2c_addr = 0x68,
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .clock_out = 0,
+ .ts_mode = M88DS3103_TS_PARALLEL_16,
+ .agc = 0x99,
+};
/* ------------------------------------------------------------------ */
@@ -815,11 +823,16 @@ static int em28xx_attach_xc3028(u8 addr, struct em28xx *dev)
{
struct dvb_frontend *fe;
struct xc2028_config cfg;
+ struct xc2028_ctrl ctl;
memset(&cfg, 0, sizeof(cfg));
cfg.i2c_adap = &dev->i2c_adap[dev->def_i2c_bus];
cfg.i2c_addr = addr;
+ memset(&ctl, 0, sizeof(ctl));
+ em28xx_setup_xc3028(dev, &ctl);
+ cfg.ctrl = &ctl;
+
if (!dev->dvb->fe[0]) {
em28xx_errdev("/2: dvb frontend not attached. "
"Can't attach xc3028\n");
@@ -979,12 +992,18 @@ static int em28xx_dvb_init(struct em28xx *dev)
int result = 0, mfe_shared = 0;
struct em28xx_dvb *dvb;
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
if (!dev->board.has_dvb) {
/* This device does not support the extension */
- printk(KERN_INFO "em28xx_dvb: This device does not support the extension\n");
return 0;
}
+ em28xx_info("Binding DVB extension\n");
+
dvb = kzalloc(sizeof(struct em28xx_dvb), GFP_KERNEL);
if (dvb == NULL) {
@@ -994,6 +1013,27 @@ static int em28xx_dvb_init(struct em28xx *dev)
dev->dvb = dvb;
dvb->fe[0] = dvb->fe[1] = NULL;
+ /* pre-allocate DVB usb transfer buffers */
+ if (dev->dvb_xfer_bulk) {
+ result = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
+ dev->dvb_xfer_bulk,
+ EM28XX_DVB_NUM_BUFS,
+ 512,
+ EM28XX_DVB_BULK_PACKET_MULTIPLIER);
+ } else {
+ result = em28xx_alloc_urbs(dev, EM28XX_DIGITAL_MODE,
+ dev->dvb_xfer_bulk,
+ EM28XX_DVB_NUM_BUFS,
+ dev->dvb_max_pkt_size_isoc,
+ EM28XX_DVB_NUM_ISOC_PACKETS);
+ }
+ if (result) {
+ em28xx_errdev("em28xx_dvb: failed to pre-allocate USB transfer buffers for DVB.\n");
+ kfree(dvb);
+ dev->dvb = NULL;
+ return result;
+ }
+
mutex_lock(&dev->lock);
em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
/* init frontend */
@@ -1330,6 +1370,48 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM28178_BOARD_PCTV_461E:
+ {
+ /* demod I2C adapter */
+ struct i2c_adapter *i2c_adapter;
+ struct i2c_board_info info;
+ struct m88ts2022_config m88ts2022_config = {
+ .clock = 27000000,
+ };
+ memset(&info, 0, sizeof(struct i2c_board_info));
+
+ /* attach demod */
+ dvb->fe[0] = dvb_attach(m88ds3103_attach,
+ &pctv_461e_m88ds3103_config,
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &i2c_adapter);
+ if (dvb->fe[0] == NULL) {
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ /* attach tuner */
+ m88ts2022_config.fe = dvb->fe[0];
+ strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &m88ts2022_config;
+ request_module("m88ts2022");
+ dvb->i2c_client_tuner = i2c_new_device(i2c_adapter, &info);
+
+ /* delegate signal strength measurement to tuner */
+ dvb->fe[0]->ops.read_signal_strength =
+ dvb->fe[0]->ops.tuner_ops.get_rf_strength;
+
+ /* attach SEC */
+ if (!dvb_attach(a8293_attach, dvb->fe[0],
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &em28xx_a8293_config)) {
+ dvb_frontend_detach(dvb->fe[0]);
+ result = -ENODEV;
+ goto out_free;
+ }
+ }
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
@@ -1354,7 +1436,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
/* MFE lock */
dvb->adapter.mfe_shared = mfe_shared;
- em28xx_info("Successfully loaded em28xx-dvb\n");
+ em28xx_info("DVB extension successfully initialized\n");
ret:
em28xx_set_mode(dev, EM28XX_SUSPEND);
mutex_unlock(&dev->lock);
@@ -1375,14 +1457,23 @@ static inline void prevent_sleep(struct dvb_frontend_ops *ops)
static int em28xx_dvb_fini(struct em28xx *dev)
{
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
if (!dev->board.has_dvb) {
/* This device does not support the extension */
return 0;
}
+ em28xx_info("Closing DVB extension");
+
if (dev->dvb) {
struct em28xx_dvb *dvb = dev->dvb;
+ em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
+
if (dev->disconnected) {
/* We cannot tell the device to sleep
* once it has been unplugged. */
@@ -1392,6 +1483,7 @@ static int em28xx_dvb_fini(struct em28xx *dev)
prevent_sleep(&dvb->fe[1]->ops);
}
+ i2c_release_client(dvb->i2c_client_tuner);
em28xx_unregister_dvb(dvb);
kfree(dvb);
dev->dvb = NULL;
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index c4ff9739a7ae..7e1724076ac4 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/usb.h>
#include <linux/i2c.h>
+#include <linux/jiffies.h>
#include "em28xx.h"
#include "tuner-xc2028.h"
@@ -40,7 +41,7 @@ MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
static unsigned int i2c_debug;
module_param(i2c_debug, int, 0644);
-MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
+MODULE_PARM_DESC(i2c_debug, "i2c debug message level (1: normal debug, 2: show I2C transfers)");
/*
* em2800_i2c_send_bytes()
@@ -48,8 +49,8 @@ MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
*/
static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_I2C_XFER_TIMEOUT);
int ret;
- int write_timeout;
u8 b2[6];
if (len < 1 || len > 4)
@@ -74,22 +75,26 @@ static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
return (ret < 0) ? ret : -EIO;
}
/* wait for completion */
- for (write_timeout = EM2800_I2C_XFER_TIMEOUT; write_timeout > 0;
- write_timeout -= 5) {
+ while (time_is_after_jiffies(timeout)) {
ret = dev->em28xx_read_reg(dev, 0x05);
- if (ret == 0x80 + len - 1) {
+ if (ret == 0x80 + len - 1)
return len;
- } else if (ret == 0x94 + len - 1) {
- return -ENODEV;
- } else if (ret < 0) {
+ if (ret == 0x94 + len - 1) {
+ if (i2c_debug == 1)
+ em28xx_warn("R05 returned 0x%02x: I2C timeout",
+ ret);
+ return -ENXIO;
+ }
+ if (ret < 0) {
em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
ret);
return ret;
}
msleep(5);
}
- em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
- return -EIO;
+ if (i2c_debug)
+ em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
+ return -ETIMEDOUT;
}
/*
@@ -98,9 +103,9 @@ static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
*/
static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_I2C_XFER_TIMEOUT);
u8 buf2[4];
int ret;
- int read_timeout;
int i;
if (len < 1 || len > 4)
@@ -117,22 +122,28 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
}
/* wait for completion */
- for (read_timeout = EM2800_I2C_XFER_TIMEOUT; read_timeout > 0;
- read_timeout -= 5) {
+ while (time_is_after_jiffies(timeout)) {
ret = dev->em28xx_read_reg(dev, 0x05);
- if (ret == 0x84 + len - 1) {
+ if (ret == 0x84 + len - 1)
break;
- } else if (ret == 0x94 + len - 1) {
- return -ENODEV;
- } else if (ret < 0) {
+ if (ret == 0x94 + len - 1) {
+ if (i2c_debug == 1)
+ em28xx_warn("R05 returned 0x%02x: I2C timeout",
+ ret);
+ return -ENXIO;
+ }
+ if (ret < 0) {
em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
ret);
return ret;
}
msleep(5);
}
- if (ret != 0x84 + len - 1)
- em28xx_warn("read from i2c device at 0x%x timed out\n", addr);
+ if (ret != 0x84 + len - 1) {
+ if (i2c_debug)
+ em28xx_warn("read from i2c device at 0x%x timed out\n",
+ addr);
+ }
/* get the received message */
ret = dev->em28xx_read_reg_req_len(dev, 0x00, 4-len, buf2, len);
@@ -168,7 +179,8 @@ static int em2800_i2c_check_for_device(struct em28xx *dev, u8 addr)
static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
u16 len, int stop)
{
- int write_timeout, ret;
+ unsigned long timeout = jiffies + msecs_to_jiffies(EM28XX_I2C_XFER_TIMEOUT);
+ int ret;
if (len < 1 || len > 64)
return -EOPNOTSUPP;
@@ -191,16 +203,19 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
}
}
- /* Check success of the i2c operation */
- for (write_timeout = EM2800_I2C_XFER_TIMEOUT; write_timeout > 0;
- write_timeout -= 5) {
+ /* wait for completion */
+ while (time_is_after_jiffies(timeout)) {
ret = dev->em28xx_read_reg(dev, 0x05);
- if (ret == 0) { /* success */
+ if (ret == 0) /* success */
return len;
- } else if (ret == 0x10) {
- return -ENODEV;
- } else if (ret < 0) {
- em28xx_warn("failed to read i2c transfer status from bridge (error=%i)\n",
+ if (ret == 0x10) {
+ if (i2c_debug == 1)
+ em28xx_warn("I2C transfer timeout on writing to addr 0x%02x",
+ addr);
+ return -ENXIO;
+ }
+ if (ret < 0) {
+ em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
ret);
return ret;
}
@@ -211,8 +226,10 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
* (even with high payload) ...
*/
}
- em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
- return -EIO;
+ if (i2c_debug)
+ em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
+ addr, ret);
+ return -ETIMEDOUT;
}
/*
@@ -242,26 +259,28 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
* bytes if we are on bus B AND there was no write attempt to the
* specified slave address before AND no device is present at the
* requested slave address.
- * Anyway, the next check will fail with -ENODEV in this case, so avoid
+ * Anyway, the next check will fail with -ENXIO in this case, so avoid
* spamming the system log on device probing and do nothing here.
*/
/* Check success of the i2c operation */
ret = dev->em28xx_read_reg(dev, 0x05);
+ if (ret == 0) /* success */
+ return len;
if (ret < 0) {
- em28xx_warn("failed to read i2c transfer status from bridge (error=%i)\n",
+ em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
ret);
return ret;
}
- if (ret > 0) {
- if (ret == 0x10) {
- return -ENODEV;
- } else {
- em28xx_warn("unknown i2c error (status=%i)\n", ret);
- return -EIO;
- }
+ if (ret == 0x10) {
+ if (i2c_debug == 1)
+ em28xx_warn("I2C transfer timeout on writing to addr 0x%02x",
+ addr);
+ return -ENXIO;
}
- return len;
+
+ em28xx_warn("unknown i2c error (status=%i)\n", ret);
+ return -ETIMEDOUT;
}
/*
@@ -316,8 +335,12 @@ static int em25xx_bus_B_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
*/
if (!ret)
return len;
- else if (ret > 0)
- return -ENODEV;
+ else if (ret > 0) {
+ if (i2c_debug == 1)
+ em28xx_warn("Bus B R08 returned 0x%02x: I2C timeout",
+ ret);
+ return -ENXIO;
+ }
return ret;
/*
@@ -355,7 +378,7 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
* bytes if we are on bus B AND there was no write attempt to the
* specified slave address before AND no device is present at the
* requested slave address.
- * Anyway, the next check will fail with -ENODEV in this case, so avoid
+ * Anyway, the next check will fail with -ENXIO in this case, so avoid
* spamming the system log on device probing and do nothing here.
*/
@@ -367,8 +390,12 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
*/
if (!ret)
return len;
- else if (ret > 0)
- return -ENODEV;
+ else if (ret > 0) {
+ if (i2c_debug == 1)
+ em28xx_warn("Bus B R08 returned 0x%02x: I2C timeout",
+ ret);
+ return -ENXIO;
+ }
return ret;
/*
@@ -409,10 +436,6 @@ static inline int i2c_check_for_device(struct em28xx_i2c_bus *i2c_bus, u16 addr)
rc = em2800_i2c_check_for_device(dev, addr);
else if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM25XX_BUS_B)
rc = em25xx_bus_B_check_for_device(dev, addr);
- if (rc == -ENODEV) {
- if (i2c_debug)
- printk(" no device\n");
- }
return rc;
}
@@ -421,7 +444,7 @@ static inline int i2c_recv_bytes(struct em28xx_i2c_bus *i2c_bus,
{
struct em28xx *dev = i2c_bus->dev;
u16 addr = msg.addr << 1;
- int byte, rc = -EOPNOTSUPP;
+ int rc = -EOPNOTSUPP;
if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM28XX)
rc = em28xx_i2c_recv_bytes(dev, addr, msg.buf, msg.len);
@@ -429,10 +452,6 @@ static inline int i2c_recv_bytes(struct em28xx_i2c_bus *i2c_bus,
rc = em2800_i2c_recv_bytes(dev, addr, msg.buf, msg.len);
else if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM25XX_BUS_B)
rc = em25xx_bus_B_recv_bytes(dev, addr, msg.buf, msg.len);
- if (i2c_debug) {
- for (byte = 0; byte < msg.len; byte++)
- printk(" %02x", msg.buf[byte]);
- }
return rc;
}
@@ -441,12 +460,8 @@ static inline int i2c_send_bytes(struct em28xx_i2c_bus *i2c_bus,
{
struct em28xx *dev = i2c_bus->dev;
u16 addr = msg.addr << 1;
- int byte, rc = -EOPNOTSUPP;
+ int rc = -EOPNOTSUPP;
- if (i2c_debug) {
- for (byte = 0; byte < msg.len; byte++)
- printk(" %02x", msg.buf[byte]);
- }
if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM28XX)
rc = em28xx_i2c_send_bytes(dev, addr, msg.buf, msg.len, stop);
else if (i2c_bus->algo_type == EM28XX_I2C_ALGO_EM2800)
@@ -491,33 +506,53 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
}
for (i = 0; i < num; i++) {
addr = msgs[i].addr << 1;
- if (i2c_debug)
+ if (i2c_debug > 1)
printk(KERN_DEBUG "%s at %s: %s %s addr=%02x len=%d:",
dev->name, __func__ ,
(msgs[i].flags & I2C_M_RD) ? "read" : "write",
i == num - 1 ? "stop" : "nonstop",
addr, msgs[i].len);
- if (!msgs[i].len) { /* no len: check only for device presence */
+ if (!msgs[i].len) {
+ /*
+ * no len: check only for device presence
+ * This code is only called during device probe.
+ */
rc = i2c_check_for_device(i2c_bus, addr);
- if (rc == -ENODEV) {
+ if (rc < 0) {
+ if (rc == -ENXIO) {
+ if (i2c_debug > 1)
+ printk(KERN_CONT " no device\n");
+ rc = -ENODEV;
+ } else {
+ if (i2c_debug > 1)
+ printk(KERN_CONT " ERROR: %i\n", rc);
+ }
rt_mutex_unlock(&dev->i2c_bus_lock);
return rc;
}
} else if (msgs[i].flags & I2C_M_RD) {
/* read bytes */
rc = i2c_recv_bytes(i2c_bus, msgs[i]);
+
+ if (i2c_debug > 1 && rc >= 0)
+ printk(KERN_CONT " %*ph",
+ msgs[i].len, msgs[i].buf);
} else {
+ if (i2c_debug > 1)
+ printk(KERN_CONT " %*ph",
+ msgs[i].len, msgs[i].buf);
+
/* write bytes */
rc = i2c_send_bytes(i2c_bus, msgs[i], i == num - 1);
}
if (rc < 0) {
- if (i2c_debug)
- printk(" ERROR: %i\n", rc);
+ if (i2c_debug > 1)
+ printk(KERN_CONT " ERROR: %i\n", rc);
rt_mutex_unlock(&dev->i2c_bus_lock);
return rc;
}
- if (i2c_debug)
- printk("\n");
+ if (i2c_debug > 1)
+ printk(KERN_CONT "\n");
}
rt_mutex_unlock(&dev->i2c_bus_lock);
@@ -600,7 +635,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
* calculation and returned device dataset. Simplifies the code a lot,
* but we might have to deal with multiple sizes in the future !
*/
- int i, err;
+ int err;
struct em28xx_eeprom *dev_config;
u8 buf, *data;
@@ -631,20 +666,14 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
goto error;
}
- /* Display eeprom content */
- for (i = 0; i < len; i++) {
- if (0 == (i % 16)) {
- if (dev->eeprom_addrwidth_16bit)
- em28xx_info("i2c eeprom %04x:", i);
- else
- em28xx_info("i2c eeprom %02x:", i);
- }
- printk(" %02x", data[i]);
- if (15 == (i % 16))
- printk("\n");
+ if (i2c_debug) {
+ /* Display eeprom content */
+ print_hex_dump(KERN_INFO, "eeprom ", DUMP_PREFIX_OFFSET,
+ 16, 1, data, len, true);
+
+ if (dev->eeprom_addrwidth_16bit)
+ em28xx_info("eeprom %06x: ... (skipped)\n", 256);
}
- if (dev->eeprom_addrwidth_16bit)
- em28xx_info("i2c eeprom %04x: ... (skipped)\n", i);
if (dev->eeprom_addrwidth_16bit &&
data[0] == 0x26 && data[3] == 0x00) {
@@ -736,10 +765,16 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
em28xx_info("\tAC97 audio (5 sample rates)\n");
break;
case 2:
- em28xx_info("\tI2S audio, sample rate=32k\n");
+ if (dev->chip_id < CHIP_ID_EM2860)
+ em28xx_info("\tI2S audio, sample rate=32k\n");
+ else
+ em28xx_info("\tI2S audio, 3 sample rates\n");
break;
case 3:
- em28xx_info("\tI2S audio, 3 sample rates\n");
+ if (dev->chip_id < CHIP_ID_EM2860)
+ em28xx_info("\tI2S audio, 3 sample rates\n");
+ else
+ em28xx_info("\tI2S audio, 5 sample rates\n");
break;
}
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index ea181e4b68c5..18f65d89d4bc 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -30,8 +30,9 @@
#include "em28xx.h"
-#define EM28XX_SNAPSHOT_KEY KEY_CAMERA
-#define EM28XX_SBUTTON_QUERY_INTERVAL 500
+#define EM28XX_SNAPSHOT_KEY KEY_CAMERA
+#define EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL 500 /* [ms] */
+#define EM28XX_BUTTONS_VOLATILE_QUERY_INTERVAL 100 /* [ms] */
static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
@@ -442,6 +443,7 @@ static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
case CHIP_ID_EM2884:
case CHIP_ID_EM2874:
case CHIP_ID_EM28174:
+ case CHIP_ID_EM28178:
return em2874_ir_change_protocol(rc_dev, rc_type);
default:
printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n",
@@ -470,54 +472,98 @@ static int em28xx_probe_i2c_ir(struct em28xx *dev)
}
/**********************************************************
- Handle Webcam snapshot button
+ Handle buttons
**********************************************************/
-static void em28xx_query_sbutton(struct work_struct *work)
+static void em28xx_query_buttons(struct work_struct *work)
{
- /* Poll the register and see if the button is depressed */
struct em28xx *dev =
- container_of(work, struct em28xx, sbutton_query_work.work);
- int ret;
-
- ret = em28xx_read_reg(dev, EM28XX_R0C_USBSUSP);
-
- if (ret & EM28XX_R0C_USBSUSP_SNAPSHOT) {
- u8 cleared;
- /* Button is depressed, clear the register */
- cleared = ((u8) ret) & ~EM28XX_R0C_USBSUSP_SNAPSHOT;
- em28xx_write_regs(dev, EM28XX_R0C_USBSUSP, &cleared, 1);
-
- /* Not emulate the keypress */
- input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
- 1);
- /* Now unpress the key */
- input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
- 0);
+ container_of(work, struct em28xx, buttons_query_work.work);
+ u8 i, j;
+ int regval;
+ bool is_pressed, was_pressed;
+ const struct em28xx_led *led;
+
+ /* Poll and evaluate all addresses */
+ for (i = 0; i < dev->num_button_polling_addresses; i++) {
+ /* Read value from register */
+ regval = em28xx_read_reg(dev, dev->button_polling_addresses[i]);
+ if (regval < 0)
+ continue;
+ /* Check states of the buttons and act */
+ j = 0;
+ while (dev->board.buttons[j].role >= 0 &&
+ dev->board.buttons[j].role < EM28XX_NUM_BUTTON_ROLES) {
+ struct em28xx_button *button = &dev->board.buttons[j];
+ /* Check if button uses the current address */
+ if (button->reg_r != dev->button_polling_addresses[i]) {
+ j++;
+ continue;
+ }
+ /* Determine if button is and was pressed last time */
+ is_pressed = regval & button->mask;
+ was_pressed = dev->button_polling_last_values[i]
+ & button->mask;
+ if (button->inverted) {
+ is_pressed = !is_pressed;
+ was_pressed = !was_pressed;
+ }
+ /* Clear button state (if needed) */
+ if (is_pressed && button->reg_clearing)
+ em28xx_write_reg(dev, button->reg_clearing,
+ (~regval & button->mask)
+ | (regval & ~button->mask));
+ /* Handle button state */
+ if (!is_pressed || was_pressed) {
+ j++;
+ continue;
+ }
+ switch (button->role) {
+ case EM28XX_BUTTON_SNAPSHOT:
+ /* Emulate the keypress */
+ input_report_key(dev->sbutton_input_dev,
+ EM28XX_SNAPSHOT_KEY, 1);
+ /* Unpress the key */
+ input_report_key(dev->sbutton_input_dev,
+ EM28XX_SNAPSHOT_KEY, 0);
+ break;
+ case EM28XX_BUTTON_ILLUMINATION:
+ led = em28xx_find_led(dev,
+ EM28XX_LED_ILLUMINATION);
+ /* Switch illumination LED on/off */
+ if (led)
+ em28xx_toggle_reg_bits(dev,
+ led->gpio_reg,
+ led->gpio_mask);
+ break;
+ default:
+ WARN_ONCE(1, "BUG: unhandled button role.");
+ }
+ /* Next button */
+ j++;
+ }
+ /* Save current value for comparison during the next polling */
+ dev->button_polling_last_values[i] = regval;
}
-
/* Schedule next poll */
- schedule_delayed_work(&dev->sbutton_query_work,
- msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
+ schedule_delayed_work(&dev->buttons_query_work,
+ msecs_to_jiffies(dev->button_polling_interval));
}
-static void em28xx_register_snapshot_button(struct em28xx *dev)
+static int em28xx_register_snapshot_button(struct em28xx *dev)
{
struct input_dev *input_dev;
int err;
em28xx_info("Registering snapshot button...\n");
input_dev = input_allocate_device();
- if (!input_dev) {
- em28xx_errdev("input_allocate_device failed\n");
- return;
- }
+ if (!input_dev)
+ return -ENOMEM;
usb_make_path(dev->udev, dev->snapshot_button_path,
sizeof(dev->snapshot_button_path));
strlcat(dev->snapshot_button_path, "/sbutton",
sizeof(dev->snapshot_button_path));
- INIT_DELAYED_WORK(&dev->sbutton_query_work, em28xx_query_sbutton);
input_dev->name = "em28xx snapshot button";
input_dev->phys = dev->snapshot_button_path;
@@ -535,25 +581,86 @@ static void em28xx_register_snapshot_button(struct em28xx *dev)
if (err) {
em28xx_errdev("input_register_device failed\n");
input_free_device(input_dev);
- return;
+ return err;
}
dev->sbutton_input_dev = input_dev;
- schedule_delayed_work(&dev->sbutton_query_work,
- msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
- return;
+ return 0;
+}
+static void em28xx_init_buttons(struct em28xx *dev)
+{
+ u8 i = 0, j = 0;
+ bool addr_new = 0;
+
+ dev->button_polling_interval = EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL;
+ while (dev->board.buttons[i].role >= 0 &&
+ dev->board.buttons[i].role < EM28XX_NUM_BUTTON_ROLES) {
+ struct em28xx_button *button = &dev->board.buttons[i];
+ /* Check if polling address is already on the list */
+ addr_new = 1;
+ for (j = 0; j < dev->num_button_polling_addresses; j++) {
+ if (button->reg_r == dev->button_polling_addresses[j]) {
+ addr_new = 0;
+ break;
+ }
+ }
+ /* Check if max. number of polling addresses is exceeded */
+ if (addr_new && dev->num_button_polling_addresses
+ >= EM28XX_NUM_BUTTON_ADDRESSES_MAX) {
+ WARN_ONCE(1, "BUG: maximum number of button polling addresses exceeded.");
+ goto next_button;
+ }
+ /* Button role specific checks and actions */
+ if (button->role == EM28XX_BUTTON_SNAPSHOT) {
+ /* Register input device */
+ if (em28xx_register_snapshot_button(dev) < 0)
+ goto next_button;
+ } else if (button->role == EM28XX_BUTTON_ILLUMINATION) {
+ /* Check sanity */
+ if (!em28xx_find_led(dev, EM28XX_LED_ILLUMINATION)) {
+ em28xx_errdev("BUG: illumination button defined, but no illumination LED.\n");
+ goto next_button;
+ }
+ }
+ /* Add read address to list of polling addresses */
+ if (addr_new) {
+ unsigned int index = dev->num_button_polling_addresses;
+ dev->button_polling_addresses[index] = button->reg_r;
+ dev->num_button_polling_addresses++;
+ }
+ /* Reduce polling interval if necessary */
+ if (!button->reg_clearing)
+ dev->button_polling_interval =
+ EM28XX_BUTTONS_VOLATILE_QUERY_INTERVAL;
+next_button:
+ /* Next button */
+ i++;
+ }
+
+ /* Start polling */
+ if (dev->num_button_polling_addresses) {
+ memset(dev->button_polling_last_values, 0,
+ EM28XX_NUM_BUTTON_ADDRESSES_MAX);
+ INIT_DELAYED_WORK(&dev->buttons_query_work,
+ em28xx_query_buttons);
+ schedule_delayed_work(&dev->buttons_query_work,
+ msecs_to_jiffies(dev->button_polling_interval));
+ }
}
-static void em28xx_deregister_snapshot_button(struct em28xx *dev)
+static void em28xx_shutdown_buttons(struct em28xx *dev)
{
+ /* Cancel polling */
+ cancel_delayed_work_sync(&dev->buttons_query_work);
+ /* Clear polling addresses list */
+ dev->num_button_polling_addresses = 0;
+ /* Deregister input devices */
if (dev->sbutton_input_dev != NULL) {
em28xx_info("Deregistering snapshot button\n");
- cancel_delayed_work_sync(&dev->sbutton_query_work);
input_unregister_device(dev->sbutton_input_dev);
dev->sbutton_input_dev = NULL;
}
- return;
}
static int em28xx_ir_init(struct em28xx *dev)
@@ -564,8 +671,13 @@ static int em28xx_ir_init(struct em28xx *dev)
u64 rc_type;
u16 i2c_rc_dev_addr = 0;
- if (dev->board.has_snapshot_button)
- em28xx_register_snapshot_button(dev);
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
+ if (dev->board.buttons)
+ em28xx_init_buttons(dev);
if (dev->board.has_ir_i2c) {
i2c_rc_dev_addr = em28xx_probe_i2c_ir(dev);
@@ -583,6 +695,8 @@ static int em28xx_ir_init(struct em28xx *dev)
return 0;
}
+ em28xx_info("Registering input extension\n");
+
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
rc = rc_allocate_device();
if (!ir || !rc)
@@ -633,6 +747,7 @@ static int em28xx_ir_init(struct em28xx *dev)
case CHIP_ID_EM2884:
case CHIP_ID_EM2874:
case CHIP_ID_EM28174:
+ case CHIP_ID_EM28178:
ir->get_key = em2874_polling_getkey;
rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC |
RC_BIT_RC6_0;
@@ -675,6 +790,8 @@ static int em28xx_ir_init(struct em28xx *dev)
if (err)
goto error;
+ em28xx_info("Input extension successfully initalized\n");
+
return 0;
error:
@@ -688,7 +805,14 @@ static int em28xx_ir_fini(struct em28xx *dev)
{
struct em28xx_IR *ir = dev->ir;
- em28xx_deregister_snapshot_button(dev);
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
+ em28xx_info("Closing input extension");
+
+ em28xx_shutdown_buttons(dev);
/* skip detach on non attached boards */
if (!ir)
@@ -722,7 +846,8 @@ static void __exit em28xx_rc_unregister(void)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
-MODULE_DESCRIPTION("Em28xx Input driver");
+MODULE_DESCRIPTION(DRIVER_DESC " - input interface");
+MODULE_VERSION(EM28XX_VERSION);
module_init(em28xx_rc_register);
module_exit(em28xx_rc_unregister);
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index 0e0477847965..311fb349dafa 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -25,10 +25,12 @@
#define EM28XX_R00_CHIPCFG 0x00
/* em28xx Chip Configuration 0x00 */
-#define EM28XX_CHIPCFG_VENDOR_AUDIO 0x80
-#define EM28XX_CHIPCFG_I2S_VOLUME_CAPABLE 0x40
-#define EM28XX_CHIPCFG_I2S_5_SAMPRATES 0x30
-#define EM28XX_CHIPCFG_I2S_3_SAMPRATES 0x20
+#define EM2860_CHIPCFG_VENDOR_AUDIO 0x80
+#define EM2860_CHIPCFG_I2S_VOLUME_CAPABLE 0x40
+#define EM2820_CHIPCFG_I2S_3_SAMPRATES 0x30
+#define EM2860_CHIPCFG_I2S_5_SAMPRATES 0x30
+#define EM2820_CHIPCFG_I2S_1_SAMPRATE 0x20
+#define EM2860_CHIPCFG_I2S_3_SAMPRATES 0x20
#define EM28XX_CHIPCFG_AC97 0x10
#define EM28XX_CHIPCFG_AUDIOMASK 0x30
@@ -245,6 +247,7 @@ enum em28xx_chip_id {
CHIP_ID_EM2874 = 65,
CHIP_ID_EM2884 = 68,
CHIP_ID_EM28174 = 113,
+ CHIP_ID_EM28178 = 114,
};
/*
diff --git a/drivers/media/usb/em28xx/em28xx-v4l.h b/drivers/media/usb/em28xx/em28xx-v4l.h
new file mode 100644
index 000000000000..bce438691e0e
--- /dev/null
+++ b/drivers/media/usb/em28xx/em28xx-v4l.h
@@ -0,0 +1,20 @@
+/*
+ em28xx-video.c - driver for Empia EM2800/EM2820/2840 USB
+ video capture devices
+
+ Copyright (C) 2013-2014 Mauro Carvalho Chehab <m.chehab@samsung.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ */
+
+
+int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
+int em28xx_stop_vbi_streaming(struct vb2_queue *vq);
+extern struct vb2_ops em28xx_vbi_qops;
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 39f39c527c13..db3d655600df 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include "em28xx.h"
+#include "em28xx-v4l.h"
static unsigned int vbibufs = 5;
module_param(vbibufs, int, 0644);
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index dd19c9ff76e0..c3c928937dcd 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -38,9 +38,11 @@
#include <linux/slab.h>
#include "em28xx.h"
+#include "em28xx-v4l.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
+#include <media/v4l2-clk.h>
#include <media/msp3400.h>
#include <media/tuner.h>
@@ -49,19 +51,23 @@
"Mauro Carvalho Chehab <mchehab@infradead.org>, " \
"Sascha Sommer <saschasommer@freenet.de>"
-#define DRIVER_DESC "Empia em28xx based USB video device driver"
+static unsigned int isoc_debug;
+module_param(isoc_debug, int, 0644);
+MODULE_PARM_DESC(isoc_debug, "enable debug messages [isoc transfers]");
+
+static unsigned int disable_vbi;
+module_param(disable_vbi, int, 0644);
+MODULE_PARM_DESC(disable_vbi, "disable vbi support");
-#define EM28XX_VERSION "0.2.0"
+static int alt;
+module_param(alt, int, 0644);
+MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
#define em28xx_videodbg(fmt, arg...) do {\
if (video_debug) \
printk(KERN_INFO "%s %s :"fmt, \
dev->name, __func__ , ##arg); } while (0)
-static unsigned int isoc_debug;
-module_param(isoc_debug, int, 0644);
-MODULE_PARM_DESC(isoc_debug, "enable debug messages [isoc transfers]");
-
#define em28xx_isocdbg(fmt, arg...) \
do {\
if (isoc_debug) { \
@@ -71,7 +77,7 @@ do {\
} while (0)
MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_DESCRIPTION(DRIVER_DESC " - v4l2 interface");
MODULE_LICENSE("GPL");
MODULE_VERSION(EM28XX_VERSION);
@@ -135,6 +141,257 @@ static struct em28xx_fmt format[] = {
},
};
+static int em28xx_vbi_supported(struct em28xx *dev)
+{
+ /* Modprobe option to manually disable */
+ if (disable_vbi == 1)
+ return 0;
+
+ if (dev->board.is_webcam)
+ return 0;
+
+ /* FIXME: check subdevices for VBI support */
+
+ if (dev->chip_id == CHIP_ID_EM2860 ||
+ dev->chip_id == CHIP_ID_EM2883)
+ return 1;
+
+ /* Version of em28xx that does not support VBI */
+ return 0;
+}
+
+/*
+ * em28xx_wake_i2c()
+ * configure i2c attached devices
+ */
+static void em28xx_wake_i2c(struct em28xx *dev)
+{
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, reset, 0);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing,
+ INPUT(dev->ctl_input)->vmux, 0, 0);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
+}
+
+static int em28xx_colorlevels_set_default(struct em28xx *dev)
+{
+ em28xx_write_reg(dev, EM28XX_R20_YGAIN, CONTRAST_DEFAULT);
+ em28xx_write_reg(dev, EM28XX_R21_YOFFSET, BRIGHTNESS_DEFAULT);
+ em28xx_write_reg(dev, EM28XX_R22_UVGAIN, SATURATION_DEFAULT);
+ em28xx_write_reg(dev, EM28XX_R23_UOFFSET, BLUE_BALANCE_DEFAULT);
+ em28xx_write_reg(dev, EM28XX_R24_VOFFSET, RED_BALANCE_DEFAULT);
+ em28xx_write_reg(dev, EM28XX_R25_SHARPNESS, SHARPNESS_DEFAULT);
+
+ em28xx_write_reg(dev, EM28XX_R14_GAMMA, 0x20);
+ em28xx_write_reg(dev, EM28XX_R15_RGAIN, 0x20);
+ em28xx_write_reg(dev, EM28XX_R16_GGAIN, 0x20);
+ em28xx_write_reg(dev, EM28XX_R17_BGAIN, 0x20);
+ em28xx_write_reg(dev, EM28XX_R18_ROFFSET, 0x00);
+ em28xx_write_reg(dev, EM28XX_R19_GOFFSET, 0x00);
+ return em28xx_write_reg(dev, EM28XX_R1A_BOFFSET, 0x00);
+}
+
+static int em28xx_set_outfmt(struct em28xx *dev)
+{
+ int ret;
+ u8 fmt, vinctrl;
+
+ fmt = dev->format->reg;
+ if (!dev->is_em25xx)
+ fmt |= 0x20;
+ /*
+ * NOTE: it's not clear if this is really needed !
+ * The datasheets say bit 5 is a reserved bit and devices seem to work
+ * fine without it. But the Windows driver sets it for em2710/50+em28xx
+ * devices and we've always been setting it, too.
+ *
+ * em2765 (em25xx, em276x/7x/8x) devices do NOT work with this bit set,
+ * it's likely used for an additional (compressed ?) format there.
+ */
+ ret = em28xx_write_reg(dev, EM28XX_R27_OUTFMT, fmt);
+ if (ret < 0)
+ return ret;
+
+ ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, dev->vinmode);
+ if (ret < 0)
+ return ret;
+
+ vinctrl = dev->vinctl;
+ if (em28xx_vbi_supported(dev) == 1) {
+ vinctrl |= EM28XX_VINCTRL_VBI_RAW;
+ em28xx_write_reg(dev, EM28XX_R34_VBI_START_H, 0x00);
+ em28xx_write_reg(dev, EM28XX_R36_VBI_WIDTH, dev->vbi_width/4);
+ em28xx_write_reg(dev, EM28XX_R37_VBI_HEIGHT, dev->vbi_height);
+ if (dev->norm & V4L2_STD_525_60) {
+ /* NTSC */
+ em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x09);
+ } else if (dev->norm & V4L2_STD_625_50) {
+ /* PAL */
+ em28xx_write_reg(dev, EM28XX_R35_VBI_START_V, 0x07);
+ }
+ }
+
+ return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, vinctrl);
+}
+
+static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
+ u8 ymin, u8 ymax)
+{
+ em28xx_videodbg("em28xx Scale: (%d,%d)-(%d,%d)\n",
+ xmin, ymin, xmax, ymax);
+
+ em28xx_write_regs(dev, EM28XX_R28_XMIN, &xmin, 1);
+ em28xx_write_regs(dev, EM28XX_R29_XMAX, &xmax, 1);
+ em28xx_write_regs(dev, EM28XX_R2A_YMIN, &ymin, 1);
+ return em28xx_write_regs(dev, EM28XX_R2B_YMAX, &ymax, 1);
+}
+
+static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
+ u16 width, u16 height)
+{
+ u8 cwidth = width >> 2;
+ u8 cheight = height >> 2;
+ u8 overflow = (height >> 9 & 0x02) | (width >> 10 & 0x01);
+ /* NOTE: size limit: 2047x1023 = 2MPix */
+
+ em28xx_videodbg("capture area set to (%d,%d): %dx%d\n",
+ hstart, vstart,
+ ((overflow & 2) << 9 | cwidth << 2),
+ ((overflow & 1) << 10 | cheight << 2));
+
+ em28xx_write_regs(dev, EM28XX_R1C_HSTART, &hstart, 1);
+ em28xx_write_regs(dev, EM28XX_R1D_VSTART, &vstart, 1);
+ em28xx_write_regs(dev, EM28XX_R1E_CWIDTH, &cwidth, 1);
+ em28xx_write_regs(dev, EM28XX_R1F_CHEIGHT, &cheight, 1);
+ em28xx_write_regs(dev, EM28XX_R1B_OFLOW, &overflow, 1);
+
+ /* FIXME: function/meaning of these registers ? */
+ /* FIXME: align width+height to multiples of 4 ?! */
+ if (dev->is_em25xx) {
+ em28xx_write_reg(dev, 0x34, width >> 4);
+ em28xx_write_reg(dev, 0x35, height >> 4);
+ }
+}
+
+static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
+{
+ u8 mode;
+ /* the em2800 scaler only supports scaling down to 50% */
+
+ if (dev->board.is_em2800) {
+ mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00);
+ } else {
+ u8 buf[2];
+
+ buf[0] = h;
+ buf[1] = h >> 8;
+ em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2);
+
+ buf[0] = v;
+ buf[1] = v >> 8;
+ em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2);
+ /* it seems that both H and V scalers must be active
+ to work correctly */
+ mode = (h || v) ? 0x30 : 0x00;
+ }
+ return em28xx_write_reg_bits(dev, EM28XX_R26_COMPR, mode, 0x30);
+}
+
+/* FIXME: this only function read values from dev */
+static int em28xx_resolution_set(struct em28xx *dev)
+{
+ int width, height;
+ width = norm_maxw(dev);
+ height = norm_maxh(dev);
+
+ /* Properly setup VBI */
+ dev->vbi_width = 720;
+ if (dev->norm & V4L2_STD_525_60)
+ dev->vbi_height = 12;
+ else
+ dev->vbi_height = 18;
+
+ em28xx_set_outfmt(dev);
+
+ em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
+
+ /* If we don't set the start position to 2 in VBI mode, we end up
+ with line 20/21 being YUYV encoded instead of being in 8-bit
+ greyscale. The core of the issue is that line 21 (and line 23 for
+ PAL WSS) are inside of active video region, and as a result they
+ get the pixelformatting associated with that area. So by cropping
+ it out, we end up with the same format as the rest of the VBI
+ region */
+ if (em28xx_vbi_supported(dev) == 1)
+ em28xx_capture_area_set(dev, 0, 2, width, height);
+ else
+ em28xx_capture_area_set(dev, 0, 0, width, height);
+
+ return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
+}
+
+/* Set USB alternate setting for analog video */
+static int em28xx_set_alternate(struct em28xx *dev)
+{
+ int errCode;
+ int i;
+ unsigned int min_pkt_size = dev->width * 2 + 4;
+
+ /* NOTE: for isoc transfers, only alt settings > 0 are allowed
+ bulk transfers seem to work only with alt=0 ! */
+ dev->alt = 0;
+ if ((alt > 0) && (alt < dev->num_alt)) {
+ em28xx_videodbg("alternate forced to %d\n", dev->alt);
+ dev->alt = alt;
+ goto set_alt;
+ }
+ if (dev->analog_xfer_bulk)
+ goto set_alt;
+
+ /* When image size is bigger than a certain value,
+ the frame size should be increased, otherwise, only
+ green screen will be received.
+ */
+ if (dev->width * 2 * dev->height > 720 * 240 * 2)
+ min_pkt_size *= 2;
+
+ for (i = 0; i < dev->num_alt; i++) {
+ /* stop when the selected alt setting offers enough bandwidth */
+ if (dev->alt_max_pkt_size_isoc[i] >= min_pkt_size) {
+ dev->alt = i;
+ break;
+ /* otherwise make sure that we end up with the maximum bandwidth
+ because the min_pkt_size equation might be wrong...
+ */
+ } else if (dev->alt_max_pkt_size_isoc[i] >
+ dev->alt_max_pkt_size_isoc[dev->alt])
+ dev->alt = i;
+ }
+
+set_alt:
+ /* NOTE: for bulk transfers, we need to call usb_set_interface()
+ * even if the previous settings were the same. Otherwise streaming
+ * fails with all urbs having status = -EOVERFLOW ! */
+ if (dev->analog_xfer_bulk) {
+ dev->max_pkt_size = 512; /* USB 2.0 spec */
+ dev->packet_multiplier = EM28XX_BULK_PACKET_MULTIPLIER;
+ } else { /* isoc */
+ em28xx_videodbg("minimum isoc packet size: %u (alt=%d)\n",
+ min_pkt_size, dev->alt);
+ dev->max_pkt_size =
+ dev->alt_max_pkt_size_isoc[dev->alt];
+ dev->packet_multiplier = EM28XX_NUM_ISOC_PACKETS;
+ }
+ em28xx_videodbg("setting alternate %d with wMaxPacketSize=%u\n",
+ dev->alt, dev->max_pkt_size);
+ errCode = usb_set_interface(dev->udev, dev->ifnum, dev->alt);
+ if (errCode < 0) {
+ em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
+ dev->alt, errCode);
+ return errCode;
+ }
+ return 0;
+}
+
/* ------------------------------------------------------------------
DMA and thread functions
------------------------------------------------------------------*/
@@ -763,7 +1020,7 @@ static struct vb2_ops em28xx_video_qops = {
.wait_finish = vb2_ops_wait_finish,
};
-int em28xx_vb2_setup(struct em28xx *dev)
+static int em28xx_vb2_setup(struct em28xx *dev)
{
int rc;
struct vb2_queue *q;
@@ -831,7 +1088,7 @@ static void video_mux(struct em28xx *dev, int index)
em28xx_audio_analog_set(dev);
}
-void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
+static void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
{
struct em28xx *dev = priv;
@@ -890,7 +1147,7 @@ static int em28xx_s_ctrl(struct v4l2_ctrl *ctrl)
return (ret < 0) ? ret : 0;
}
-const struct v4l2_ctrl_ops em28xx_ctrl_ops = {
+static const struct v4l2_ctrl_ops em28xx_ctrl_ops = {
.s_ctrl = em28xx_s_ctrl,
};
@@ -1368,7 +1625,7 @@ static int vidioc_g_register(struct file *file, void *priv,
reg->val = ret;
} else {
__le16 val = 0;
- ret = em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
+ ret = dev->em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
reg->reg, (char *)&val, 2);
if (ret < 0)
return ret;
@@ -1570,6 +1827,10 @@ static int em28xx_v4l2_open(struct file *filp)
case VFL_TYPE_VBI:
fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
break;
+ case VFL_TYPE_RADIO:
+ break;
+ default:
+ return -EINVAL;
}
em28xx_videodbg("open dev=%s type=%s users=%d\n",
@@ -1590,15 +1851,17 @@ static int em28xx_v4l2_open(struct file *filp)
fh->type = fh_type;
filp->private_data = fh;
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
+ if (dev->users == 0) {
em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
- em28xx_resolution_set(dev);
- /* Needed, since GPIO might have disabled power of
- some i2c device
+ if (vdev->vfl_type != VFL_TYPE_RADIO)
+ em28xx_resolution_set(dev);
+
+ /*
+ * Needed, since GPIO might have disabled power
+ * of some i2c devices
*/
em28xx_wake_i2c(dev);
-
}
if (vdev->vfl_type == VFL_TYPE_RADIO) {
@@ -1615,40 +1878,59 @@ static int em28xx_v4l2_open(struct file *filp)
}
/*
- * em28xx_realease_resources()
+ * em28xx_v4l2_fini()
* unregisters the v4l2,i2c and usb devices
* called when the device gets disconected or at module unload
*/
-void em28xx_release_analog_resources(struct em28xx *dev)
+static int em28xx_v4l2_fini(struct em28xx *dev)
{
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
+ if (!dev->has_video) {
+ /* This device does not support the v4l2 extension */
+ return 0;
+ }
- /*FIXME: I2C IR should be disconnected */
+ em28xx_info("Closing video extension");
+
+ mutex_lock(&dev->lock);
+
+ v4l2_device_disconnect(&dev->v4l2_dev);
+
+ em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
if (dev->radio_dev) {
- if (video_is_registered(dev->radio_dev))
- video_unregister_device(dev->radio_dev);
- else
- video_device_release(dev->radio_dev);
- dev->radio_dev = NULL;
+ em28xx_info("V4L2 device %s deregistered\n",
+ video_device_node_name(dev->radio_dev));
+ video_unregister_device(dev->radio_dev);
}
if (dev->vbi_dev) {
em28xx_info("V4L2 device %s deregistered\n",
video_device_node_name(dev->vbi_dev));
- if (video_is_registered(dev->vbi_dev))
- video_unregister_device(dev->vbi_dev);
- else
- video_device_release(dev->vbi_dev);
- dev->vbi_dev = NULL;
+ video_unregister_device(dev->vbi_dev);
}
if (dev->vdev) {
em28xx_info("V4L2 device %s deregistered\n",
video_device_node_name(dev->vdev));
- if (video_is_registered(dev->vdev))
- video_unregister_device(dev->vdev);
- else
- video_device_release(dev->vdev);
- dev->vdev = NULL;
+ video_unregister_device(dev->vdev);
}
+
+ if (dev->clk) {
+ v4l2_clk_unregister_fixed(dev->clk);
+ dev->clk = NULL;
+ }
+
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ if (dev->users)
+ em28xx_warn("Device is open ! Memory deallocation is deferred on last close.\n");
+ mutex_unlock(&dev->lock);
+
+ return 0;
}
/*
@@ -1668,14 +1950,10 @@ static int em28xx_v4l2_close(struct file *filp)
mutex_lock(&dev->lock);
if (dev->users == 1) {
- /* the device is already disconnect,
- free the remaining resources */
+ /* free the remaining resources if device is disconnected */
if (dev->disconnected) {
- em28xx_release_resources(dev);
kfree(dev->alt_max_pkt_size_isoc);
- mutex_unlock(&dev->lock);
- kfree(dev);
- return 0;
+ goto exit;
}
/* Save some power by putting tuner to sleep */
@@ -1694,11 +1972,29 @@ static int em28xx_v4l2_close(struct file *filp)
}
}
+exit:
dev->users--;
mutex_unlock(&dev->lock);
return 0;
}
+/*
+ * em28xx_videodevice_release()
+ * called when the last user of the video device exits and frees the memeory
+ */
+static void em28xx_videodevice_release(struct video_device *vdev)
+{
+ struct em28xx *dev = video_get_drvdata(vdev);
+
+ video_device_release(vdev);
+ if (vdev == dev->vdev)
+ dev->vdev = NULL;
+ else if (vdev == dev->vbi_dev)
+ dev->vbi_dev = NULL;
+ else if (vdev == dev->radio_dev)
+ dev->radio_dev = NULL;
+}
+
static const struct v4l2_file_operations em28xx_v4l_fops = {
.owner = THIS_MODULE,
.open = em28xx_v4l2_open,
@@ -1753,11 +2049,10 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
};
static const struct video_device em28xx_video_template = {
- .fops = &em28xx_v4l_fops,
- .release = video_device_release_empty,
- .ioctl_ops = &video_ioctl_ops,
-
- .tvnorms = V4L2_STD_ALL,
+ .fops = &em28xx_v4l_fops,
+ .ioctl_ops = &video_ioctl_ops,
+ .release = em28xx_videodevice_release,
+ .tvnorms = V4L2_STD_ALL,
};
static const struct v4l2_file_operations radio_fops = {
@@ -1783,14 +2078,30 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
};
static struct video_device em28xx_radio_template = {
- .name = "em28xx-radio",
- .fops = &radio_fops,
- .ioctl_ops = &radio_ioctl_ops,
+ .fops = &radio_fops,
+ .ioctl_ops = &radio_ioctl_ops,
+ .release = em28xx_videodevice_release,
};
-/******************************** usb interface ******************************/
+/* I2C possible address to saa7115, tvp5150, msp3400, tvaudio */
+static unsigned short saa711x_addrs[] = {
+ 0x4a >> 1, 0x48 >> 1, /* SAA7111, SAA7111A and SAA7113 */
+ 0x42 >> 1, 0x40 >> 1, /* SAA7114, SAA7115 and SAA7118 */
+ I2C_CLIENT_END };
+static unsigned short tvp5150_addrs[] = {
+ 0xb8 >> 1,
+ 0xba >> 1,
+ I2C_CLIENT_END
+};
+static unsigned short msp3400_addrs[] = {
+ 0x80 >> 1,
+ 0x88 >> 1,
+ I2C_CLIENT_END
+};
+
+/******************************** usb interface ******************************/
static struct video_device *em28xx_vdev_init(struct em28xx *dev,
const struct video_device *template,
@@ -1817,14 +2128,198 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
return vfd;
}
-int em28xx_register_analog_devices(struct em28xx *dev)
+static void em28xx_tuner_setup(struct em28xx *dev)
+{
+ struct tuner_setup tun_setup;
+ struct v4l2_frequency f;
+
+ if (dev->tuner_type == TUNER_ABSENT)
+ return;
+
+ memset(&tun_setup, 0, sizeof(tun_setup));
+
+ tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
+ tun_setup.tuner_callback = em28xx_tuner_callback;
+
+ if (dev->board.radio.type) {
+ tun_setup.type = dev->board.radio.type;
+ tun_setup.addr = dev->board.radio_addr;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
+ }
+
+ if ((dev->tuner_type != TUNER_ABSENT) && (dev->tuner_type)) {
+ tun_setup.type = dev->tuner_type;
+ tun_setup.addr = dev->tuner_addr;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
+ }
+
+ if (dev->tda9887_conf) {
+ struct v4l2_priv_tun_config tda9887_cfg;
+
+ tda9887_cfg.tuner = TUNER_TDA9887;
+ tda9887_cfg.priv = &dev->tda9887_conf;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &tda9887_cfg);
+ }
+
+ if (dev->tuner_type == TUNER_XC2028) {
+ struct v4l2_priv_tun_config xc2028_cfg;
+ struct xc2028_ctrl ctl;
+
+ memset(&xc2028_cfg, 0, sizeof(xc2028_cfg));
+ memset(&ctl, 0, sizeof(ctl));
+
+ em28xx_setup_xc3028(dev, &ctl);
+
+ xc2028_cfg.tuner = TUNER_XC2028;
+ xc2028_cfg.priv = &ctl;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &xc2028_cfg);
+ }
+
+ /* configure tuner */
+ f.tuner = 0;
+ f.type = V4L2_TUNER_ANALOG_TV;
+ f.frequency = 9076; /* just a magic number */
+ dev->ctl_freq = f.frequency;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
+}
+
+static int em28xx_v4l2_init(struct em28xx *dev)
{
u8 val;
int ret;
unsigned int maxw;
+ struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
+
+ if (dev->is_audio_only) {
+ /* Shouldn't initialize IR for this interface */
+ return 0;
+ }
+
+ if (!dev->has_video) {
+ /* This device does not support the v4l2 extension */
+ return 0;
+ }
+
+ em28xx_info("Registering V4L2 extension\n");
+
+ mutex_lock(&dev->lock);
+
+ ret = v4l2_device_register(&dev->udev->dev, &dev->v4l2_dev);
+ if (ret < 0) {
+ em28xx_errdev("Call to v4l2_device_register() failed!\n");
+ goto err;
+ }
+
+ v4l2_ctrl_handler_init(hdl, 8);
+ dev->v4l2_dev.ctrl_handler = hdl;
+
+ /*
+ * Default format, used for tvp5150 or saa711x output formats
+ */
+ dev->vinmode = 0x10;
+ dev->vinctl = EM28XX_VINCTRL_INTERLACED |
+ EM28XX_VINCTRL_CCIR656_ENABLE;
+
+ /* request some modules */
+
+ if (dev->board.has_msp34xx)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "msp3400", 0, msp3400_addrs);
+
+ if (dev->board.decoder == EM28XX_SAA711X)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "saa7115_auto", 0, saa711x_addrs);
+
+ if (dev->board.decoder == EM28XX_TVP5150)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "tvp5150", 0, tvp5150_addrs);
+
+ if (dev->board.adecoder == EM28XX_TVAUDIO)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "tvaudio", dev->board.tvaudio_addr, NULL);
+
+ /* Initialize tuner and camera */
+
+ if (dev->board.tuner_type != TUNER_ABSENT) {
+ int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
+
+ if (dev->board.radio.type)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "tuner", dev->board.radio_addr, NULL);
+
+ if (has_demod)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev,
+ &dev->i2c_adap[dev->def_i2c_bus], "tuner",
+ 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
+ if (dev->tuner_addr == 0) {
+ enum v4l2_i2c_tuner_type type =
+ has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
+ struct v4l2_subdev *sd;
+
+ sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
+ &dev->i2c_adap[dev->def_i2c_bus], "tuner",
+ 0, v4l2_i2c_tuner_addrs(type));
+
+ if (sd)
+ dev->tuner_addr = v4l2_i2c_subdev_addr(sd);
+ } else {
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap[dev->def_i2c_bus],
+ "tuner", dev->tuner_addr, NULL);
+ }
+ }
+
+ em28xx_tuner_setup(dev);
+ em28xx_init_camera(dev);
+
+ /* Configure audio */
+ ret = em28xx_audio_setup(dev);
+ if (ret < 0) {
+ em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
+ __func__, ret);
+ goto unregister_dev;
+ }
+ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
+ v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f);
+ } else {
+ /* install the em28xx notify callback */
+ v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_MUTE),
+ em28xx_ctrl_notify, dev);
+ v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_VOLUME),
+ em28xx_ctrl_notify, dev);
+ }
+
+ /* wake i2c devices */
+ em28xx_wake_i2c(dev);
+
+ /* init video dma queues */
+ INIT_LIST_HEAD(&dev->vidq.active);
+ INIT_LIST_HEAD(&dev->vbiq.active);
- printk(KERN_INFO "%s: v4l2 driver version %s\n",
- dev->name, EM28XX_VERSION);
+ if (dev->board.has_msp34xx) {
+ /* Send a reset to other chips via gpio */
+ ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7);
+ if (ret < 0) {
+ em28xx_errdev("%s: em28xx_write_reg - msp34xx(1) failed! error [%d]\n",
+ __func__, ret);
+ goto unregister_dev;
+ }
+ msleep(3);
+
+ ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff);
+ if (ret < 0) {
+ em28xx_errdev("%s: em28xx_write_reg - msp34xx(2) failed! error [%d]\n",
+ __func__, ret);
+ goto unregister_dev;
+ }
+ msleep(3);
+ }
/* set default norm */
dev->norm = V4L2_STD_PAL;
@@ -1888,14 +2383,16 @@ int em28xx_register_analog_devices(struct em28xx *dev)
/* Reset image controls */
em28xx_colorlevels_set_default(dev);
v4l2_ctrl_handler_setup(&dev->ctrl_handler);
- if (dev->ctrl_handler.error)
- return dev->ctrl_handler.error;
+ ret = dev->ctrl_handler.error;
+ if (ret)
+ goto unregister_dev;
/* allocate and fill video video_device struct */
dev->vdev = em28xx_vdev_init(dev, &em28xx_video_template, "video");
if (!dev->vdev) {
em28xx_errdev("cannot allocate video_device.\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto unregister_dev;
}
dev->vdev->queue = &dev->vb_vidq;
dev->vdev->queue->lock = &dev->vb_queue_lock;
@@ -1925,7 +2422,7 @@ int em28xx_register_analog_devices(struct em28xx *dev)
if (ret) {
em28xx_errdev("unable to register video device (error=%i).\n",
ret);
- return ret;
+ goto unregister_dev;
}
/* Allocate and fill vbi video_device struct */
@@ -1954,7 +2451,7 @@ int em28xx_register_analog_devices(struct em28xx *dev)
vbi_nr[dev->devno]);
if (ret < 0) {
em28xx_errdev("unable to register vbi device\n");
- return ret;
+ goto unregister_dev;
}
}
@@ -1963,13 +2460,14 @@ int em28xx_register_analog_devices(struct em28xx *dev)
"radio");
if (!dev->radio_dev) {
em28xx_errdev("cannot allocate video_device.\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto unregister_dev;
}
ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
radio_nr[dev->devno]);
if (ret < 0) {
em28xx_errdev("can't register radio device\n");
- return ret;
+ goto unregister_dev;
}
em28xx_info("Registered radio device as %s\n",
video_device_node_name(dev->radio_dev));
@@ -1982,5 +2480,41 @@ int em28xx_register_analog_devices(struct em28xx *dev)
em28xx_info("V4L2 VBI device registered as %s\n",
video_device_node_name(dev->vbi_dev));
+ /* Save some power by putting tuner to sleep */
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
+
+ /* initialize videobuf2 stuff */
+ em28xx_vb2_setup(dev);
+
+ em28xx_info("V4L2 extension successfully initialized\n");
+
+ mutex_unlock(&dev->lock);
return 0;
+
+unregister_dev:
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ v4l2_device_unregister(&dev->v4l2_dev);
+err:
+ mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static struct em28xx_ops v4l2_ops = {
+ .id = EM28XX_V4L2,
+ .name = "Em28xx v4l2 Extension",
+ .init = em28xx_v4l2_init,
+ .fini = em28xx_v4l2_fini,
+};
+
+static int __init em28xx_video_register(void)
+{
+ return em28xx_register_extension(&v4l2_ops);
+}
+
+static void __exit em28xx_video_unregister(void)
+{
+ em28xx_unregister_extension(&v4l2_ops);
}
+
+module_init(em28xx_video_register);
+module_exit(em28xx_video_unregister);
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index f8726ad5d0a8..32d8a4bb7961 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -26,6 +26,9 @@
#ifndef _EM28XX_H
#define _EM28XX_H
+#define EM28XX_VERSION "0.2.1"
+#define DRIVER_DESC "Empia em28xx device driver"
+
#include <linux/workqueue.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
@@ -132,6 +135,8 @@
#define EM2884_BOARD_C3TECH_DIGITAL_DUO 88
#define EM2874_BOARD_DELOCK_61959 89
#define EM2874_BOARD_KWORLD_UB435Q_V2 90
+#define EM2765_BOARD_SPEEDLINK_VAD_LAPLACE 91
+#define EM28178_BOARD_PCTV_461E 92
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -178,8 +183,27 @@
#define EM28XX_INTERLACED_DEFAULT 1
-/* time in msecs to wait for i2c writes to finish */
-#define EM2800_I2C_XFER_TIMEOUT 20
+/*
+ * Time in msecs to wait for i2c xfers to finish.
+ * 35ms is the maximum time a SMBUS device could wait when
+ * clock stretching is used. As the transfer itself will take
+ * some time to happen, set it to 35 ms.
+ *
+ * Ok, I2C doesn't specify any limit. So, eventually, we may need
+ * to increase this timeout.
+ *
+ * FIXME: this assumes that an I2C message is not longer than 1ms.
+ * This is actually dependent on the I2C bus speed, although most
+ * devices use a 100kHz clock. So, this assumtion is true most of
+ * the time.
+ */
+#define EM28XX_I2C_XFER_TIMEOUT 36
+
+/* time in msecs to wait for AC97 xfers to finish */
+#define EM28XX_AC97_XFER_TIMEOUT 100
+
+/* max. number of button state polling addresses */
+#define EM28XX_NUM_BUTTON_ADDRESSES_MAX 5
enum em28xx_mode {
EM28XX_SUSPEND,
@@ -287,8 +311,7 @@ struct em28xx_audio_mode {
unsigned int has_audio:1;
- unsigned int i2s_3rates:1;
- unsigned int i2s_5rates:1;
+ u8 i2s_samplerates;
};
/* em28xx has two audio inputs: tuner and line in.
@@ -374,6 +397,33 @@ enum em28xx_adecoder {
EM28XX_TVAUDIO,
};
+enum em28xx_led_role {
+ EM28XX_LED_ANALOG_CAPTURING = 0,
+ EM28XX_LED_ILLUMINATION,
+ EM28XX_NUM_LED_ROLES, /* must be the last */
+};
+
+struct em28xx_led {
+ enum em28xx_led_role role;
+ u8 gpio_reg;
+ u8 gpio_mask;
+ bool inverted;
+};
+
+enum em28xx_button_role {
+ EM28XX_BUTTON_SNAPSHOT = 0,
+ EM28XX_BUTTON_ILLUMINATION,
+ EM28XX_NUM_BUTTON_ROLES, /* must be the last */
+};
+
+struct em28xx_button {
+ enum em28xx_button_role role;
+ u8 reg_r;
+ u8 reg_clearing;
+ u8 mask;
+ bool inverted;
+};
+
struct em28xx_board {
char *name;
int vchannels;
@@ -395,7 +445,6 @@ struct em28xx_board {
unsigned int mts_firmware:1;
unsigned int max_range_640_480:1;
unsigned int has_dvb:1;
- unsigned int has_snapshot_button:1;
unsigned int is_webcam:1;
unsigned int valid:1;
unsigned int has_ir_i2c:1;
@@ -410,6 +459,12 @@ struct em28xx_board {
struct em28xx_input input[MAX_EM28XX_INPUT];
struct em28xx_input radio;
char *ir_codes;
+
+ /* LEDs that need to be controlled explicitly */
+ struct em28xx_led *leds;
+
+ /* Buttons */
+ struct em28xx_button *buttons;
};
struct em28xx_eeprom {
@@ -426,15 +481,13 @@ struct em28xx_eeprom {
u8 string_idx_table;
};
-#define EM28XX_AUDIO_BUFS 5
-#define EM28XX_NUM_AUDIO_PACKETS 64
-#define EM28XX_AUDIO_MAX_PACKET_SIZE 196 /* static value */
#define EM28XX_CAPTURE_STREAM_EN 1
/* em28xx extensions */
#define EM28XX_AUDIO 0x10
#define EM28XX_DVB 0x20
#define EM28XX_RC 0x30
+#define EM28XX_V4L2 0x40
/* em28xx resource types (used for res_get/res_lock etc */
#define EM28XX_RESOURCE_VIDEO 0x01
@@ -442,8 +495,9 @@ struct em28xx_eeprom {
struct em28xx_audio {
char name[50];
- char *transfer_buffer[EM28XX_AUDIO_BUFS];
- struct urb *urb[EM28XX_AUDIO_BUFS];
+ unsigned num_urb;
+ char **transfer_buffer;
+ struct urb **urb;
struct usb_device *udev;
unsigned int capture_transfer_done;
struct snd_pcm_substream *capture_pcm_substream;
@@ -451,6 +505,8 @@ struct em28xx_audio {
unsigned int hwptr_done_capture;
struct snd_card *sndcard;
+ size_t period;
+
int users;
spinlock_t slock;
};
@@ -485,11 +541,13 @@ struct em28xx {
int model; /* index in the device_data struct */
int devno; /* marks the number of this device */
enum em28xx_chip_id chip_id;
- unsigned int is_em25xx:1; /* em25xx/em276x/7x/8x family bridge */
+ unsigned int is_em25xx:1; /* em25xx/em276x/7x/8x family bridge */
unsigned char disconnected:1; /* device has been diconnected */
-
- int audio_ifnum;
+ unsigned int has_video:1;
+ unsigned int has_audio_class:1;
+ unsigned int has_alsa_audio:1;
+ unsigned int is_audio_only:1;
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
@@ -507,10 +565,6 @@ struct em28xx {
/* Vinmode/Vinctl used at the driver */
int vinmode, vinctl;
- unsigned int has_audio_class:1;
- unsigned int has_alsa_audio:1;
- unsigned int is_audio_only:1;
-
/* Controls audio streaming */
struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */
atomic_t stream_started; /* stream should be running if true */
@@ -608,6 +662,7 @@ struct em28xx {
/* usb transfer */
struct usb_device *udev; /* the usb device */
+ u8 ifnum; /* number of the assigned usb interface */
u8 analog_ep_isoc; /* address of isoc endpoint for analog */
u8 analog_ep_bulk; /* address of bulk endpoint for analog */
u8 dvb_ep_isoc; /* address of isoc endpoint for DVB */
@@ -639,10 +694,15 @@ struct em28xx {
enum em28xx_mode mode;
- /* Snapshot button */
+ /* Button state polling */
+ struct delayed_work buttons_query_work;
+ u8 button_polling_addresses[EM28XX_NUM_BUTTON_ADDRESSES_MAX];
+ u8 button_polling_last_values[EM28XX_NUM_BUTTON_ADDRESSES_MAX];
+ u8 num_button_polling_addresses;
+ u16 button_polling_interval; /* [ms] */
+ /* Snapshot button input device */
char snapshot_button_path[30]; /* path of the input dev */
struct input_dev *sbutton_input_dev;
- struct delayed_work sbutton_query_work;
struct em28xx_dvb *dvb;
};
@@ -672,6 +732,7 @@ int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len);
int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val);
int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
u8 bitmask);
+int em28xx_toggle_reg_bits(struct em28xx *dev, u16 reg, u8 bitmask);
int em28xx_read_ac97(struct em28xx *dev, u8 reg);
int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val);
@@ -679,12 +740,9 @@ int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val);
int em28xx_audio_analog_set(struct em28xx *dev);
int em28xx_audio_setup(struct em28xx *dev);
-int em28xx_colorlevels_set_default(struct em28xx *dev);
+const struct em28xx_led *em28xx_find_led(struct em28xx *dev,
+ enum em28xx_led_role role);
int em28xx_capture_start(struct em28xx *dev, int start);
-int em28xx_vbi_supported(struct em28xx *dev);
-int em28xx_set_outfmt(struct em28xx *dev);
-int em28xx_resolution_set(struct em28xx *dev);
-int em28xx_set_alternate(struct em28xx *dev);
int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
int num_bufs, int max_pkt_size, int packet_multiplier);
int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
@@ -696,30 +754,18 @@ void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode);
void em28xx_stop_urbs(struct em28xx *dev);
int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode);
int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio);
-void em28xx_wake_i2c(struct em28xx *dev);
int em28xx_register_extension(struct em28xx_ops *dev);
void em28xx_unregister_extension(struct em28xx_ops *dev);
void em28xx_init_extension(struct em28xx *dev);
void em28xx_close_extension(struct em28xx *dev);
-/* Provided by em28xx-video.c */
-int em28xx_vb2_setup(struct em28xx *dev);
-int em28xx_register_analog_devices(struct em28xx *dev);
-void em28xx_release_analog_resources(struct em28xx *dev);
-void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv);
-int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
-int em28xx_stop_vbi_streaming(struct vb2_queue *vq);
-extern const struct v4l2_ctrl_ops em28xx_ctrl_ops;
-
/* Provided by em28xx-cards.c */
extern struct em28xx_board em28xx_boards[];
extern struct usb_device_id em28xx_id_table[];
int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
+void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl);
void em28xx_release_resources(struct em28xx *dev);
-/* Provided by em28xx-vbi.c */
-extern struct vb2_ops em28xx_vbi_qops;
-
/* Provided by em28xx-camera.c */
int em28xx_detect_sensor(struct em28xx *dev);
int em28xx_init_camera(struct em28xx *dev);
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 2f0c89cbac76..c5638964c3f2 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -198,7 +198,6 @@ static int device_authorization(struct hdpvr_device *dev)
hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n",
print_buf);
- kfree(print_buf);
#endif
msleep(100);
@@ -214,6 +213,9 @@ static int device_authorization(struct hdpvr_device *dev)
retval = ret != 8;
unlock:
mutex_unlock(&dev->usbc_mutex);
+#ifdef HDPVR_DEBUG
+ kfree(print_buf);
+#endif
return retval;
}
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 78c9bc8e7f56..abf365ab025d 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -1078,7 +1078,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
/* register webcam snapshot button input device */
pdev->button_dev = input_allocate_device();
if (!pdev->button_dev) {
- PWC_ERROR("Err, insufficient memory for webcam snapshot button device.");
rc = -ENOMEM;
goto err_video_unreg;
}
diff --git a/drivers/media/usb/stk1160/stk1160-ac97.c b/drivers/media/usb/stk1160/stk1160-ac97.c
index c8583c262c3d..c46c8be89602 100644
--- a/drivers/media/usb/stk1160/stk1160-ac97.c
+++ b/drivers/media/usb/stk1160/stk1160-ac97.c
@@ -98,13 +98,11 @@ int stk1160_ac97_register(struct stk1160 *dev)
* Just want a card to access ac96 controls,
* the actual capture interface will be handled by snd-usb-audio
*/
- rc = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
- THIS_MODULE, 0, &card);
+ rc = snd_card_new(dev->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, 0, &card);
if (rc < 0)
return rc;
- snd_card_set_dev(card, dev->dev);
-
/* TODO: I'm not sure where should I get these names :-( */
snprintf(card->shortname, sizeof(card->shortname),
"stk1160-mixer");
diff --git a/drivers/media/usb/tlg2300/pd-alsa.c b/drivers/media/usb/tlg2300/pd-alsa.c
index 3f3e141f70fb..dd8fe100590f 100644
--- a/drivers/media/usb/tlg2300/pd-alsa.c
+++ b/drivers/media/usb/tlg2300/pd-alsa.c
@@ -300,7 +300,8 @@ int poseidon_audio_init(struct poseidon *p)
struct snd_pcm *pcm;
int ret;
- ret = snd_card_create(-1, "Telegent", THIS_MODULE, 0, &card);
+ ret = snd_card_new(&p->interface->dev, -1, "Telegent",
+ THIS_MODULE, 0, &card);
if (ret != 0)
return ret;
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index 813c1ec53608..3239cd62e452 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -431,7 +431,8 @@ static int tm6000_audio_init(struct tm6000_core *dev)
if (!enable[devnr])
return -ENOENT;
- rc = snd_card_create(index[devnr], "tm6000", THIS_MODULE, 0, &card);
+ rc = snd_card_new(&dev->udev->dev, index[devnr], "tm6000",
+ THIS_MODULE, 0, &card);
if (rc < 0) {
snd_printk(KERN_ERR "cannot create card instance %d\n", devnr);
return rc;
@@ -445,7 +446,6 @@ static int tm6000_audio_init(struct tm6000_core *dev)
le16_to_cpu(dev->udev->descriptor.idVendor),
le16_to_cpu(dev->udev->descriptor.idProduct));
snd_component_add(card, component);
- snd_card_set_dev(card, &dev->udev->dev);
chip = kzalloc(sizeof(struct snd_tm6000_card), GFP_KERNEL);
if (!chip) {
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 8c05565a240e..2189bfb2e828 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -83,14 +83,3 @@ config VIDEOBUF2_DMA_SG
#depends on HAS_DMA
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
-
-config VIDEO_V4L2_INT_DEVICE
- tristate "V4L2 int device (DEPRECATED)"
- depends on VIDEO_V4L2
- ---help---
- An early framework for a hardware-independent interface for
- image sensors and bridges etc. Currently used by omap24xxcam and
- tcm825x drivers that should be converted to V4L2 subdev.
-
- Do not use for new developments.
-
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 1a85eee581f8..c6ae7bad951e 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -15,7 +15,6 @@ ifeq ($(CONFIG_OF),y)
endif
obj-$(CONFIG_VIDEO_V4L2) += videodev.o
-obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 8f7a6a454a4c..6191968db8fa 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -733,7 +733,7 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
put_user(kp->pending, &up->pending) ||
put_user(kp->sequence, &up->sequence) ||
- put_compat_timespec(&kp->timestamp, &up->timestamp) ||
+ compat_put_timespec(&kp->timestamp, &up->timestamp) ||
put_user(kp->id, &up->id) ||
copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
return -EFAULT;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index fb46790d0eca..6ff002bd5909 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -745,6 +745,11 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control";
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period";
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator";
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_PROFILE: return "VPX Profile";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index b5aaaac427ad..0a30dbf3d05c 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -872,8 +872,8 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
/* Should not happen since we thought this minor was free */
WARN_ON(video_device[vdev->minor] != NULL);
- video_device[vdev->minor] = vdev;
vdev->index = get_index(vdev);
+ video_device[vdev->minor] = vdev;
mutex_unlock(&videodev_lock);
if (vdev->ioctl_ops)
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index ee52b9f4a944..f7902fe8a526 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -515,6 +515,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
aspect.denominator = 9;
}
image_width = ((image_height * aspect.numerator) / aspect.denominator);
+ image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
/* Horizontal */
if (default_gtf)
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 68e6b5e912ff..707aef705a47 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -28,6 +28,9 @@
#include <media/v4l2-device.h>
#include <media/videobuf2-core.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/v4l2.h>
+
/* Zero out the end of the struct pointed to by p. Everything after, but
* not including, the specified field is cleared. */
#define CLEAR_AFTER_FIELD(p, field) \
@@ -2338,6 +2341,12 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
err = func(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -ENOTTY;
+ if (err == 0) {
+ if (cmd == VIDIOC_DQBUF)
+ trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
+ else if (cmd == VIDIOC_QBUF)
+ trace_v4l2_qbuf(video_devdata(file)->minor, parg);
+ }
if (has_array_args) {
*kernel_ptr = user_ptr;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 73035ee0f4de..178ce96556c6 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -558,6 +558,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
if (m2m_ctx->m2m_dev->m2m_ops->unlock)
m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
+ else if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
if (list_empty(&src_q->done_list))
poll_wait(file, &src_q->done_wq, wait);
@@ -566,6 +568,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
if (m2m_ctx->m2m_dev->m2m_ops->lock)
m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
+ else if (m2m_ctx->q_lock)
+ mutex_lock(m2m_ctx->q_lock);
spin_lock_irqsave(&src_q->done_lock, flags);
if (!list_empty(&src_q->done_list))
@@ -693,6 +697,13 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
if (ret)
goto err;
+ /*
+ * If both queues use same mutex assign it as the common buffer
+ * queues lock to the m2m context. This lock is used in the
+ * v4l2_m2m_ioctl_* helpers.
+ */
+ if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
+ m2m_ctx->q_lock = out_q_ctx->q.lock;
return m2m_ctx;
err:
@@ -740,3 +751,118 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+/* Videobuf2 ioctl helpers */
+
+int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
+
+int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
+
+int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
+
+int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
+
+int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
+
+int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
+
+int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
+
+int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
+
+/*
+ * v4l2_file_operations helpers. It is assumed here same lock is used
+ * for the output and the capture buffer queue.
+ */
+
+int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+ int ret;
+
+ if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_m2m_mmap(file, m2m_ctx, vma);
+
+ if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
+
+unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+ unsigned int ret;
+
+ if (m2m_ctx->q_lock)
+ mutex_lock(m2m_ctx->q_lock);
+
+ ret = v4l2_m2m_poll(file, m2m_ctx, wait);
+
+ if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
+
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
index a6478dca0cde..b4ed9a955fbe 100644
--- a/drivers/media/v4l2-core/v4l2-of.c
+++ b/drivers/media/v4l2-core/v4l2-of.c
@@ -121,21 +121,15 @@ static void v4l2_of_parse_parallel_bus(const struct device_node *node,
* the bus as serial CSI-2 and clock-noncontinuous isn't set, we set the
* V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag.
* The caller should hold a reference to @node.
+ *
+ * Return: 0.
*/
-void v4l2_of_parse_endpoint(const struct device_node *node,
- struct v4l2_of_endpoint *endpoint)
+int v4l2_of_parse_endpoint(const struct device_node *node,
+ struct v4l2_of_endpoint *endpoint)
{
- struct device_node *port_node = of_get_parent(node);
-
- memset(endpoint, 0, offsetof(struct v4l2_of_endpoint, head));
-
- endpoint->local_node = node;
- /*
- * It doesn't matter whether the two calls below succeed.
- * If they don't then the default value 0 is used.
- */
- of_property_read_u32(port_node, "reg", &endpoint->port);
- of_property_read_u32(node, "reg", &endpoint->id);
+ of_graph_parse_endpoint(node, &endpoint->base);
+ endpoint->bus_type = 0;
+ memset(&endpoint->bus, 0, sizeof(endpoint->bus));
v4l2_of_parse_csi_bus(node, endpoint);
/*
@@ -145,123 +139,6 @@ void v4l2_of_parse_endpoint(const struct device_node *node,
if (endpoint->bus.mipi_csi2.flags == 0)
v4l2_of_parse_parallel_bus(node, endpoint);
- of_node_put(port_node);
+ return 0;
}
EXPORT_SYMBOL(v4l2_of_parse_endpoint);
-
-/**
- * v4l2_of_get_next_endpoint() - get next endpoint node
- * @parent: pointer to the parent device node
- * @prev: previous endpoint node, or NULL to get first
- *
- * Return: An 'endpoint' node pointer with refcount incremented. Refcount
- * of the passed @prev node is not decremented, the caller have to use
- * of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent,
- struct device_node *prev)
-{
- struct device_node *endpoint;
- struct device_node *port = NULL;
-
- if (!parent)
- return NULL;
-
- if (!prev) {
- struct device_node *node;
- /*
- * It's the first call, we have to find a port subnode
- * within this node or within an optional 'ports' node.
- */
- node = of_get_child_by_name(parent, "ports");
- if (node)
- parent = node;
-
- port = of_get_child_by_name(parent, "port");
-
- if (port) {
- /* Found a port, get an endpoint. */
- endpoint = of_get_next_child(port, NULL);
- of_node_put(port);
- } else {
- endpoint = NULL;
- }
-
- if (!endpoint)
- pr_err("%s(): no endpoint nodes specified for %s\n",
- __func__, parent->full_name);
- of_node_put(node);
- } else {
- port = of_get_parent(prev);
- if (!port)
- /* Hm, has someone given us the root node ?... */
- return NULL;
-
- /* Avoid dropping prev node refcount to 0. */
- of_node_get(prev);
- endpoint = of_get_next_child(port, prev);
- if (endpoint) {
- of_node_put(port);
- return endpoint;
- }
-
- /* No more endpoints under this port, try the next one. */
- do {
- port = of_get_next_child(parent, port);
- if (!port)
- return NULL;
- } while (of_node_cmp(port->name, "port"));
-
- /* Pick up the first endpoint in this port. */
- endpoint = of_get_next_child(port, NULL);
- of_node_put(port);
- }
-
- return endpoint;
-}
-EXPORT_SYMBOL(v4l2_of_get_next_endpoint);
-
-/**
- * v4l2_of_get_remote_port_parent() - get remote port's parent node
- * @node: pointer to a local endpoint device_node
- *
- * Return: Remote device node associated with remote endpoint node linked
- * to @node. Use of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_remote_port_parent(
- const struct device_node *node)
-{
- struct device_node *np;
- unsigned int depth;
-
- /* Get remote endpoint node. */
- np = of_parse_phandle(node, "remote-endpoint", 0);
-
- /* Walk 3 levels up only if there is 'ports' node. */
- for (depth = 3; depth && np; depth--) {
- np = of_get_next_parent(np);
- if (depth == 2 && of_node_cmp(np->name, "ports"))
- break;
- }
- return np;
-}
-EXPORT_SYMBOL(v4l2_of_get_remote_port_parent);
-
-/**
- * v4l2_of_get_remote_port() - get remote port node
- * @node: pointer to a local endpoint device_node
- *
- * Return: Remote port node associated with remote endpoint node linked
- * to @node. Use of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_remote_port(const struct device_node *node)
-{
- struct device_node *np;
-
- /* Get remote endpoint node. */
- np = of_parse_phandle(node, "remote-endpoint", 0);
- if (!np)
- return NULL;
- return of_get_parent(np);
-}
-EXPORT_SYMBOL(v4l2_of_get_remote_port);
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 65411adcd0ea..7e6b209b7002 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -66,14 +66,11 @@ static void __videobuf_dc_free(struct device *dev,
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
- dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+ dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -85,11 +82,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -128,8 +126,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
+ videobuf_queue_unlock(q);
}
- videobuf_queue_unlock(q);
}
static const struct vm_operations_struct videobuf_vm_ops = {
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 9db674ccdc68..828e7c10bd70 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -338,14 +338,11 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free);
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -358,9 +355,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
@@ -376,9 +374,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
q->bufs[i]->baddr = 0;
q->ops->buf_release(q, q->bufs[i]);
}
+ videobuf_queue_unlock(q);
kfree(map);
}
- videobuf_queue_unlock(q);
return;
}
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index 1365c651c177..2ff7fcc77b11 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -54,14 +54,11 @@ MODULE_LICENSE("GPL");
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -73,11 +70,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
struct videobuf_vmalloc_memory *mem;
dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -116,8 +114,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
+ videobuf_queue_unlock(q);
}
- videobuf_queue_unlock(q);
return;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 0edc165f418d..a127925c9d61 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -298,10 +298,28 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
* related information, if no buffers are left return the queue to an
* uninitialized state. Might be called even if the queue has already been freed.
*/
-static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
+static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
{
unsigned int buffer;
+ /*
+ * Sanity check: when preparing a buffer the queue lock is released for
+ * a short while (see __buf_prepare for the details), which would allow
+ * a race with a reqbufs which can call this function. Removing the
+ * buffers from underneath __buf_prepare is obviously a bad idea, so we
+ * check if any of the buffers is in the state PREPARING, and if so we
+ * just return -EAGAIN.
+ */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ if (q->bufs[buffer] == NULL)
+ continue;
+ if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
+ dprintk(1, "reqbufs: preparing buffers, cannot free\n");
+ return -EAGAIN;
+ }
+ }
+
/* Call driver-provided cleanup function for each buffer, if provided */
if (q->ops->buf_cleanup) {
for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
@@ -326,6 +344,7 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
if (!q->num_buffers)
q->memory = 0;
INIT_LIST_HEAD(&q->queued_list);
+ return 0;
}
/**
@@ -481,6 +500,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
case VB2_BUF_STATE_PREPARED:
b->flags |= V4L2_BUF_FLAG_PREPARED;
break;
+ case VB2_BUF_STATE_PREPARING:
case VB2_BUF_STATE_DEQUEUED:
/* nothing */
break;
@@ -657,7 +677,9 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return -EBUSY;
}
- __vb2_queue_free(q, q->num_buffers);
+ ret = __vb2_queue_free(q, q->num_buffers);
+ if (ret)
+ return ret;
/*
* In case of REQBUFS(0) return immediately without calling
@@ -1116,7 +1138,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
int ret;
int write = !V4L2_TYPE_IS_OUTPUT(q->type);
- /* Verify and copy relevant information provided by the userspace */
+ /* Copy relevant information provided by the userspace */
__fill_vb2_buffer(vb, b, planes);
for (plane = 0; plane < vb->num_planes; ++plane) {
@@ -1135,6 +1157,8 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
if (planes[plane].length < planes[plane].data_offset +
q->plane_sizes[plane]) {
+ dprintk(1, "qbuf: invalid dmabuf length for plane %d\n",
+ plane);
ret = -EINVAL;
goto err;
}
@@ -1226,6 +1250,7 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
struct vb2_queue *q = vb->vb2_queue;
+ struct rw_semaphore *mmap_sem;
int ret;
ret = __verify_length(vb, b);
@@ -1235,12 +1260,32 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
return ret;
}
+ vb->state = VB2_BUF_STATE_PREPARING;
switch (q->memory) {
case V4L2_MEMORY_MMAP:
ret = __qbuf_mmap(vb, b);
break;
case V4L2_MEMORY_USERPTR:
+ /*
+ * In case of user pointer buffers vb2 allocators need to get
+ * direct access to userspace pages. This requires getting
+ * the mmap semaphore for read access in the current process
+ * structure. The same semaphore is taken before calling mmap
+ * operation, while both qbuf/prepare_buf and mmap are called
+ * by the driver or v4l2 core with the driver's lock held.
+ * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
+ * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
+ * the videobuf2 core releases the driver's lock, takes
+ * mmap_sem and then takes the driver's lock again.
+ */
+ mmap_sem = &current->mm->mmap_sem;
+ call_qop(q, wait_prepare, q);
+ down_read(mmap_sem);
+ call_qop(q, wait_finish, q);
+
ret = __qbuf_userptr(vb, b);
+
+ up_read(mmap_sem);
break;
case V4L2_MEMORY_DMABUF:
ret = __qbuf_dmabuf(vb, b);
@@ -1254,105 +1299,36 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
ret = call_qop(q, buf_prepare, vb);
if (ret)
dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
- else
- vb->state = VB2_BUF_STATE_PREPARED;
+ vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
return ret;
}
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
- const char *opname,
- int (*handler)(struct vb2_queue *,
- struct v4l2_buffer *,
- struct vb2_buffer *))
+ const char *opname)
{
- struct rw_semaphore *mmap_sem = NULL;
- struct vb2_buffer *vb;
- int ret;
-
- /*
- * In case of user pointer buffers vb2 allocators need to get direct
- * access to userspace pages. This requires getting the mmap semaphore
- * for read access in the current process structure. The same semaphore
- * is taken before calling mmap operation, while both qbuf/prepare_buf
- * and mmap are called by the driver or v4l2 core with the driver's lock
- * held. To avoid an AB-BA deadlock (mmap_sem then driver's lock in mmap
- * and driver's lock then mmap_sem in qbuf/prepare_buf) the videobuf2
- * core releases the driver's lock, takes mmap_sem and then takes the
- * driver's lock again.
- *
- * To avoid racing with other vb2 calls, which might be called after
- * releasing the driver's lock, this operation is performed at the
- * beginning of qbuf/prepare_buf processing. This way the queue status
- * is consistent after getting the driver's lock back.
- */
- if (q->memory == V4L2_MEMORY_USERPTR) {
- mmap_sem = &current->mm->mmap_sem;
- call_qop(q, wait_prepare, q);
- down_read(mmap_sem);
- call_qop(q, wait_finish, q);
- }
-
- if (q->fileio) {
- dprintk(1, "%s(): file io in progress\n", opname);
- ret = -EBUSY;
- goto unlock;
- }
-
if (b->type != q->type) {
dprintk(1, "%s(): invalid buffer type\n", opname);
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
if (b->index >= q->num_buffers) {
dprintk(1, "%s(): buffer index out of range\n", opname);
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
- vb = q->bufs[b->index];
- if (NULL == vb) {
+ if (q->bufs[b->index] == NULL) {
/* Should never happen */
dprintk(1, "%s(): buffer is NULL\n", opname);
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
if (b->memory != q->memory) {
dprintk(1, "%s(): invalid memory type\n", opname);
- ret = -EINVAL;
- goto unlock;
- }
-
- ret = __verify_planes_array(vb, b);
- if (ret)
- goto unlock;
-
- ret = handler(q, b, vb);
- if (ret)
- goto unlock;
-
- /* Fill buffer information for the userspace */
- __fill_v4l2_buffer(vb, b);
-
- dprintk(1, "%s() of buffer %d succeeded\n", opname, vb->v4l2_buf.index);
-unlock:
- if (mmap_sem)
- up_read(mmap_sem);
- return ret;
-}
-
-static int __vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
- struct vb2_buffer *vb)
-{
- if (vb->state != VB2_BUF_STATE_DEQUEUED) {
- dprintk(1, "%s(): invalid buffer state %d\n", __func__,
- vb->state);
return -EINVAL;
}
- return __buf_prepare(vb, b);
+ return __verify_planes_array(q->bufs[b->index], b);
}
/**
@@ -1372,22 +1348,95 @@ static int __vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
*/
int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
{
- return vb2_queue_or_prepare_buf(q, b, "prepare_buf", __vb2_prepare_buf);
+ struct vb2_buffer *vb;
+ int ret;
+
+ if (q->fileio) {
+ dprintk(1, "%s(): file io in progress\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
+ if (ret)
+ return ret;
+
+ vb = q->bufs[b->index];
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "%s(): invalid buffer state %d\n", __func__,
+ vb->state);
+ return -EINVAL;
+ }
+
+ ret = __buf_prepare(vb, b);
+ if (!ret) {
+ /* Fill buffer information for the userspace */
+ __fill_v4l2_buffer(vb, b);
+
+ dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
+ }
+ return ret;
}
EXPORT_SYMBOL_GPL(vb2_prepare_buf);
-static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b,
- struct vb2_buffer *vb)
+/**
+ * vb2_start_streaming() - Attempt to start streaming.
+ * @q: videobuf2 queue
+ *
+ * If there are not enough buffers, then retry_start_streaming is set to
+ * 1 and 0 is returned. The next time a buffer is queued and
+ * retry_start_streaming is 1, this function will be called again to
+ * retry starting the DMA engine.
+ */
+static int vb2_start_streaming(struct vb2_queue *q)
{
int ret;
+ /* Tell the driver to start streaming */
+ ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
+
+ /*
+ * If there are not enough buffers queued to start streaming, then
+ * the start_streaming operation will return -ENOBUFS and you have to
+ * retry when the next buffer is queued.
+ */
+ if (ret == -ENOBUFS) {
+ dprintk(1, "qbuf: not enough buffers, retry when more buffers are queued.\n");
+ q->retry_start_streaming = 1;
+ return 0;
+ }
+ if (ret)
+ dprintk(1, "qbuf: driver refused to start streaming\n");
+ else
+ q->retry_start_streaming = 0;
+ return ret;
+}
+
+static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
+ struct vb2_buffer *vb;
+
+ if (ret)
+ return ret;
+
+ vb = q->bufs[b->index];
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "%s(): invalid buffer state %d\n", __func__,
+ vb->state);
+ return -EINVAL;
+ }
+
switch (vb->state) {
case VB2_BUF_STATE_DEQUEUED:
ret = __buf_prepare(vb, b);
if (ret)
return ret;
+ break;
case VB2_BUF_STATE_PREPARED:
break;
+ case VB2_BUF_STATE_PREPARING:
+ dprintk(1, "qbuf: buffer still being prepared\n");
+ return -EINVAL;
default:
dprintk(1, "qbuf: buffer already in use\n");
return -EINVAL;
@@ -1407,6 +1456,16 @@ static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b,
if (q->streaming)
__enqueue_in_driver(vb);
+ /* Fill buffer information for the userspace */
+ __fill_v4l2_buffer(vb, b);
+
+ if (q->retry_start_streaming) {
+ ret = vb2_start_streaming(q);
+ if (ret)
+ return ret;
+ }
+
+ dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
return 0;
}
@@ -1429,7 +1488,12 @@ static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b,
*/
int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
{
- return vb2_queue_or_prepare_buf(q, b, "qbuf", __vb2_qbuf);
+ if (q->fileio) {
+ dprintk(1, "%s(): file io in progress\n", __func__);
+ return -EBUSY;
+ }
+
+ return vb2_internal_qbuf(q, b);
}
EXPORT_SYMBOL_GPL(vb2_qbuf);
@@ -1550,7 +1614,8 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
return -EINVAL;
}
- wait_event(q->done_wq, !atomic_read(&q->queued_count));
+ if (!q->retry_start_streaming)
+ wait_event(q->done_wq, !atomic_read(&q->queued_count));
return 0;
}
EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
@@ -1579,37 +1644,11 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
}
}
-/**
- * vb2_dqbuf() - Dequeue a buffer to the userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_dqbuf handler
- * in driver
- * @nonblocking: if true, this call will not sleep waiting for a buffer if no
- * buffers ready for dequeuing are present. Normally the driver
- * would be passing (file->f_flags & O_NONBLOCK) here
- *
- * Should be called from vidioc_dqbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_finish callback in the driver (if provided), in which
- * driver can perform any additional operations that may be required before
- * returning the buffer to userspace, such as cache sync,
- * 3) the buffer struct members are filled with relevant information for
- * the userspace.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_dqbuf handler in driver.
- */
-int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
{
struct vb2_buffer *vb = NULL;
int ret;
- if (q->fileio) {
- dprintk(1, "dqbuf: file io in progress\n");
- return -EBUSY;
- }
-
if (b->type != q->type) {
dprintk(1, "dqbuf: invalid buffer type\n");
return -EINVAL;
@@ -1648,6 +1687,36 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
return 0;
}
+
+/**
+ * vb2_dqbuf() - Dequeue a buffer to the userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_dqbuf handler
+ * in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ * buffers ready for dequeuing are present. Normally the driver
+ * would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from vidioc_dqbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_finish callback in the driver (if provided), in which
+ * driver can perform any additional operations that may be required before
+ * returning the buffer to userspace, such as cache sync,
+ * 3) the buffer struct members are filled with relevant information for
+ * the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_dqbuf handler in driver.
+ */
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+{
+ if (q->fileio) {
+ dprintk(1, "dqbuf: file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_internal_dqbuf(q, b, nonblocking);
+}
EXPORT_SYMBOL_GPL(vb2_dqbuf);
/**
@@ -1660,6 +1729,11 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
{
unsigned int i;
+ if (q->retry_start_streaming) {
+ q->retry_start_streaming = 0;
+ q->streaming = 0;
+ }
+
/*
* Tell driver to stop all transactions and release all queued
* buffers.
@@ -1687,37 +1761,24 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
__vb2_dqbuf(q->bufs[i]);
}
-/**
- * vb2_streamon - start streaming
- * @q: videobuf2 queue
- * @type: type argument passed from userspace to vidioc_streamon handler
- *
- * Should be called from vidioc_streamon handler of a driver.
- * This function:
- * 1) verifies current state
- * 2) passes any previously queued buffers to the driver and starts streaming
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_streamon handler in the driver.
- */
-int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
{
struct vb2_buffer *vb;
int ret;
- if (q->fileio) {
- dprintk(1, "streamon: file io in progress\n");
- return -EBUSY;
- }
-
if (type != q->type) {
dprintk(1, "streamon: invalid stream type\n");
return -EINVAL;
}
if (q->streaming) {
- dprintk(1, "streamon: already streaming\n");
- return -EBUSY;
+ dprintk(3, "streamon successful: already streaming\n");
+ return 0;
+ }
+
+ if (!q->num_buffers) {
+ dprintk(1, "streamon: no buffers have been allocated\n");
+ return -EINVAL;
}
/*
@@ -1727,12 +1788,9 @@ int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
list_for_each_entry(vb, &q->queued_list, queued_entry)
__enqueue_in_driver(vb);
- /*
- * Let driver notice that streaming state has been enabled.
- */
- ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
+ /* Tell driver to start streaming. */
+ ret = vb2_start_streaming(q);
if (ret) {
- dprintk(1, "streamon: driver refused to start streaming\n");
__vb2_queue_cancel(q);
return ret;
}
@@ -1742,39 +1800,40 @@ int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
dprintk(3, "Streamon successful\n");
return 0;
}
-EXPORT_SYMBOL_GPL(vb2_streamon);
-
/**
- * vb2_streamoff - stop streaming
+ * vb2_streamon - start streaming
* @q: videobuf2 queue
- * @type: type argument passed from userspace to vidioc_streamoff handler
+ * @type: type argument passed from userspace to vidioc_streamon handler
*
- * Should be called from vidioc_streamoff handler of a driver.
+ * Should be called from vidioc_streamon handler of a driver.
* This function:
- * 1) verifies current state,
- * 2) stop streaming and dequeues any queued buffers, including those previously
- * passed to the driver (after waiting for the driver to finish).
+ * 1) verifies current state
+ * 2) passes any previously queued buffers to the driver and starts streaming
*
- * This call can be used for pausing playback.
* The return values from this function are intended to be directly returned
- * from vidioc_streamoff handler in the driver
+ * from vidioc_streamon handler in the driver.
*/
-int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
{
if (q->fileio) {
- dprintk(1, "streamoff: file io in progress\n");
+ dprintk(1, "streamon: file io in progress\n");
return -EBUSY;
}
+ return vb2_internal_streamon(q, type);
+}
+EXPORT_SYMBOL_GPL(vb2_streamon);
+static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
if (type != q->type) {
dprintk(1, "streamoff: invalid stream type\n");
return -EINVAL;
}
if (!q->streaming) {
- dprintk(1, "streamoff: not streaming\n");
- return -EINVAL;
+ dprintk(3, "streamoff successful: not streaming\n");
+ return 0;
}
/*
@@ -1786,6 +1845,30 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
dprintk(3, "Streamoff successful\n");
return 0;
}
+
+/**
+ * vb2_streamoff - stop streaming
+ * @q: videobuf2 queue
+ * @type: type argument passed from userspace to vidioc_streamoff handler
+ *
+ * Should be called from vidioc_streamoff handler of a driver.
+ * This function:
+ * 1) verifies current state,
+ * 2) stop streaming and dequeues any queued buffers, including those previously
+ * passed to the driver (after waiting for the driver to finish).
+ *
+ * This call can be used for pausing playback.
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamoff handler in the driver
+ */
+int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ if (q->fileio) {
+ dprintk(1, "streamoff: file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_internal_streamoff(q, type);
+}
EXPORT_SYMBOL_GPL(vb2_streamoff);
/**
@@ -2277,15 +2360,16 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
goto err_reqbufs;
fileio->bufs[i].queued = 1;
}
-
- /*
- * Start streaming.
- */
- ret = vb2_streamon(q, q->type);
- if (ret)
- goto err_reqbufs;
+ fileio->index = q->num_buffers;
}
+ /*
+ * Start streaming.
+ */
+ ret = vb2_streamon(q, q->type);
+ if (ret)
+ goto err_reqbufs;
+
q->fileio = fileio;
return ret;
@@ -2308,13 +2392,8 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
struct vb2_fileio_data *fileio = q->fileio;
if (fileio) {
- /*
- * Hack fileio context to enable direct calls to vb2 ioctl
- * interface.
- */
+ vb2_internal_streamoff(q, q->type);
q->fileio = NULL;
-
- vb2_streamoff(q, q->type);
fileio->req.count = 0;
vb2_reqbufs(q, &fileio->req);
kfree(fileio);
@@ -2358,39 +2437,34 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
fileio = q->fileio;
/*
- * Hack fileio context to enable direct calls to vb2 ioctl interface.
- * The pointer will be restored before returning from this function.
- */
- q->fileio = NULL;
-
- index = fileio->index;
- buf = &fileio->bufs[index];
-
- /*
* Check if we need to dequeue the buffer.
*/
- if (buf->queued) {
- struct vb2_buffer *vb;
-
+ index = fileio->index;
+ if (index >= q->num_buffers) {
/*
* Call vb2_dqbuf to get buffer back.
*/
memset(&fileio->b, 0, sizeof(fileio->b));
fileio->b.type = q->type;
fileio->b.memory = q->memory;
- fileio->b.index = index;
- ret = vb2_dqbuf(q, &fileio->b, nonblock);
+ ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
if (ret)
- goto end;
+ return ret;
fileio->dq_count += 1;
+ index = fileio->b.index;
+ buf = &fileio->bufs[index];
+
/*
* Get number of bytes filled by the driver
*/
- vb = q->bufs[index];
- buf->size = vb2_get_plane_payload(vb, 0);
+ buf->pos = 0;
buf->queued = 0;
+ buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
+ : vb2_plane_size(q->bufs[index], 0);
+ } else {
+ buf = &fileio->bufs[index];
}
/*
@@ -2412,8 +2486,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
ret = copy_from_user(buf->vaddr + buf->pos, data, count);
if (ret) {
dprintk(3, "file io: error copying data\n");
- ret = -EFAULT;
- goto end;
+ return -EFAULT;
}
/*
@@ -2433,10 +2506,6 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
fileio->dq_count == 1) {
dprintk(3, "file io: read limit reached\n");
- /*
- * Restore fileio pointer and release the context.
- */
- q->fileio = fileio;
return __vb2_cleanup_fileio(q);
}
@@ -2448,32 +2517,20 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
fileio->b.memory = q->memory;
fileio->b.index = index;
fileio->b.bytesused = buf->pos;
- ret = vb2_qbuf(q, &fileio->b);
+ ret = vb2_internal_qbuf(q, &fileio->b);
dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
if (ret)
- goto end;
+ return ret;
/*
* Buffer has been queued, update the status
*/
buf->pos = 0;
buf->queued = 1;
- buf->size = q->bufs[0]->v4l2_planes[0].length;
+ buf->size = vb2_plane_size(q->bufs[index], 0);
fileio->q_count += 1;
-
- /*
- * Switch to the next buffer
- */
- fileio->index = (index + 1) % q->num_buffers;
-
- /*
- * Start streaming if required.
- */
- if (!read && !q->streaming) {
- ret = vb2_streamon(q, q->type);
- if (ret)
- goto end;
- }
+ if (fileio->index < q->num_buffers)
+ fileio->index++;
}
/*
@@ -2481,11 +2538,6 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
*/
if (ret == 0)
ret = count;
-end:
- /*
- * Restore the fileio context and block vb2 ioctl interface.
- */
- q->fileio = fileio;
return ret;
}
@@ -2649,16 +2701,29 @@ int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
}
EXPORT_SYMBOL_GPL(vb2_fop_mmap);
-int vb2_fop_release(struct file *file)
+int _vb2_fop_release(struct file *file, struct mutex *lock)
{
struct video_device *vdev = video_devdata(file);
if (file->private_data == vdev->queue->owner) {
+ if (lock)
+ mutex_lock(lock);
vb2_queue_release(vdev->queue);
vdev->queue->owner = NULL;
+ if (lock)
+ mutex_unlock(lock);
}
return v4l2_fh_release(file);
}
+EXPORT_SYMBOL_GPL(_vb2_fop_release);
+
+int vb2_fop_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+
+ return _vb2_fop_release(file, lock);
+}
EXPORT_SYMBOL_GPL(vb2_fop_release);
ssize_t vb2_fop_write(struct file *file, const char __user *buf,
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 33d3871d1e13..880be0782dd9 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -719,7 +719,7 @@ static int vb2_dc_map_dmabuf(void *mem_priv)
/* get the associated scatterlist for this buffer */
sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
- if (IS_ERR_OR_NULL(sgt)) {
+ if (IS_ERR(sgt)) {
pr_err("Error getting dmabuf scatterlist\n");
return -EINVAL;
}
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 0d3a8ffe47a3..c779f210d2c6 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -40,6 +40,7 @@ struct vb2_dma_sg_buf {
unsigned int num_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
+ struct vm_area_struct *vma;
};
static void vb2_dma_sg_put(void *buf_priv);
@@ -155,12 +156,18 @@ static void vb2_dma_sg_put(void *buf_priv)
}
}
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
{
struct vb2_dma_sg_buf *buf;
unsigned long first, last;
int num_pages_from_user;
+ struct vm_area_struct *vma;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
@@ -180,7 +187,38 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!buf->pages)
goto userptr_fail_alloc_pages;
- num_pages_from_user = get_user_pages(current, current->mm,
+ vma = find_vma(current->mm, vaddr);
+ if (!vma) {
+ dprintk(1, "no vma for address %lu\n", vaddr);
+ goto userptr_fail_find_vma;
+ }
+
+ if (vma->vm_end < vaddr + size) {
+ dprintk(1, "vma at %lu is too small for %lu bytes\n",
+ vaddr, size);
+ goto userptr_fail_find_vma;
+ }
+
+ buf->vma = vb2_get_vma(vma);
+ if (!buf->vma) {
+ dprintk(1, "failed to copy vma\n");
+ goto userptr_fail_find_vma;
+ }
+
+ if (vma_is_io(buf->vma)) {
+ for (num_pages_from_user = 0;
+ num_pages_from_user < buf->num_pages;
+ ++num_pages_from_user, vaddr += PAGE_SIZE) {
+ unsigned long pfn;
+
+ if (follow_pfn(buf->vma, vaddr, &pfn)) {
+ dprintk(1, "no page for address %lu\n", vaddr);
+ break;
+ }
+ buf->pages[num_pages_from_user] = pfn_to_page(pfn);
+ }
+ } else
+ num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK,
buf->num_pages,
write,
@@ -200,9 +238,12 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
userptr_fail_alloc_table_from_pages:
userptr_fail_get_user_pages:
dprintk(1, "get_user_pages requested/got: %d/%d]\n",
- num_pages_from_user, buf->num_pages);
- while (--num_pages_from_user >= 0)
- put_page(buf->pages[num_pages_from_user]);
+ buf->num_pages, num_pages_from_user);
+ if (!vma_is_io(buf->vma))
+ while (--num_pages_from_user >= 0)
+ put_page(buf->pages[num_pages_from_user]);
+ vb2_put_vma(buf->vma);
+userptr_fail_find_vma:
kfree(buf->pages);
userptr_fail_alloc_pages:
kfree(buf);
@@ -226,9 +267,11 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
while (--i >= 0) {
if (buf->write)
set_page_dirty_lock(buf->pages[i]);
- put_page(buf->pages[i]);
+ if (!vma_is_io(buf->vma))
+ put_page(buf->pages[i]);
}
kfree(buf->pages);
+ vb2_put_vma(buf->vma);
kfree(buf);
}
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 29a11db365bc..c59e9c96e86d 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -7,6 +7,17 @@ menuconfig MEMORY
if MEMORY
+config TI_AEMIF
+ tristate "Texas Instruments AEMIF driver"
+ depends on (ARCH_DAVINCI || ARCH_KEYSTONE) && OF
+ help
+ This driver is for the AEMIF module available in Texas Instruments
+ SoCs. AEMIF stands for Asynchronous External Memory Interface and
+ is intended to provide a glue-less interface to a variety of
+ asynchronuous memory devices like ASRAM, NOR and NAND memory. A total
+ of 256M bytes of any of these memories can be accessed at a given
+ time via four chip selects with 64M byte access per chip select.
+
config TI_EMIF
tristate "Texas Instruments EMIF driver"
depends on ARCH_OMAP2PLUS
@@ -50,4 +61,8 @@ config TEGRA30_MC
analysis, especially for IOMMU/SMMU(System Memory Management
Unit) module.
+config FSL_IFC
+ bool
+ depends on FSL_SOC
+
endif
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 969d923dad93..71160a2b7313 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -5,7 +5,9 @@
ifeq ($(CONFIG_DDR),y)
obj-$(CONFIG_OF) += of_memory.o
endif
+obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
+obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
obj-$(CONFIG_TEGRA30_MC) += tegra30-mc.o
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
new file mode 100644
index 000000000000..3d5d792d5cb2
--- /dev/null
+++ b/drivers/memory/fsl_ifc.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc
+ *
+ * Freescale Integrated Flash Controller
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_ifc.h>
+#include <asm/prom.h>
+
+struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
+
+/*
+ * convert_ifc_address - convert the base address
+ * @addr_base: base address of the memory bank
+ */
+unsigned int convert_ifc_address(phys_addr_t addr_base)
+{
+ return addr_base & CSPR_BA;
+}
+EXPORT_SYMBOL(convert_ifc_address);
+
+/*
+ * fsl_ifc_find - find IFC bank
+ * @addr_base: base address of the memory bank
+ *
+ * This function walks IFC banks comparing "Base address" field of the CSPR
+ * registers with the supplied addr_base argument. When bases match this
+ * function returns bank number (starting with 0), otherwise it returns
+ * appropriate errno value.
+ */
+int fsl_ifc_find(phys_addr_t addr_base)
+{
+ int i = 0;
+
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) {
+ u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
+ if (cspr & CSPR_V && (cspr & CSPR_BA) ==
+ convert_ifc_address(addr_base))
+ return i;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(fsl_ifc_find);
+
+static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
+{
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+ /*
+ * Clear all the common status and event registers
+ */
+ if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
+ out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+
+ /* enable all error and events */
+ out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN);
+
+ /* enable all error and event interrupts */
+ out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN);
+ out_be32(&ifc->cm_erattr0, 0x0);
+ out_be32(&ifc->cm_erattr1, 0x0);
+
+ return 0;
+}
+
+static int fsl_ifc_ctrl_remove(struct platform_device *dev)
+{
+ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev);
+
+ free_irq(ctrl->nand_irq, ctrl);
+ free_irq(ctrl->irq, ctrl);
+
+ irq_dispose_mapping(ctrl->nand_irq);
+ irq_dispose_mapping(ctrl->irq);
+
+ iounmap(ctrl->regs);
+
+ dev_set_drvdata(&dev->dev, NULL);
+ kfree(ctrl);
+
+ return 0;
+}
+
+/*
+ * NAND events are split between an operational interrupt which only
+ * receives OPC, and an error interrupt that receives everything else,
+ * including non-NAND errors. Whichever interrupt gets to it first
+ * records the status and wakes the wait queue.
+ */
+static DEFINE_SPINLOCK(nand_irq_lock);
+
+static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl)
+{
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ unsigned long flags;
+ u32 stat;
+
+ spin_lock_irqsave(&nand_irq_lock, flags);
+
+ stat = in_be32(&ifc->ifc_nand.nand_evter_stat);
+ if (stat) {
+ out_be32(&ifc->ifc_nand.nand_evter_stat, stat);
+ ctrl->nand_stat = stat;
+ wake_up(&ctrl->nand_wait);
+ }
+
+ spin_unlock_irqrestore(&nand_irq_lock, flags);
+
+ return stat;
+}
+
+static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data)
+{
+ struct fsl_ifc_ctrl *ctrl = data;
+
+ if (check_nand_stat(ctrl))
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+/*
+ * NOTE: This interrupt is used to report ifc events of various kinds,
+ * such as transaction errors on the chipselects.
+ */
+static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
+{
+ struct fsl_ifc_ctrl *ctrl = data;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ u32 err_axiid, err_srcid, status, cs_err, err_addr;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* read for chip select error */
+ cs_err = in_be32(&ifc->cm_evter_stat);
+ if (cs_err) {
+ dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
+ "any memory bank 0x%08X\n", cs_err);
+ /* clear the chip select error */
+ out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+
+ /* read error attribute registers print the error information */
+ status = in_be32(&ifc->cm_erattr0);
+ err_addr = in_be32(&ifc->cm_erattr1);
+
+ if (status & IFC_CM_ERATTR0_ERTYP_READ)
+ dev_err(ctrl->dev, "Read transaction error"
+ "CM_ERATTR0 0x%08X\n", status);
+ else
+ dev_err(ctrl->dev, "Write transaction error"
+ "CM_ERATTR0 0x%08X\n", status);
+
+ err_axiid = (status & IFC_CM_ERATTR0_ERAID) >>
+ IFC_CM_ERATTR0_ERAID_SHIFT;
+ dev_err(ctrl->dev, "AXI ID of the error"
+ "transaction 0x%08X\n", err_axiid);
+
+ err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >>
+ IFC_CM_ERATTR0_ESRCID_SHIFT;
+ dev_err(ctrl->dev, "SRC ID of the error"
+ "transaction 0x%08X\n", err_srcid);
+
+ dev_err(ctrl->dev, "Transaction Address corresponding to error"
+ "ERADDR 0x%08X\n", err_addr);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (check_nand_stat(ctrl))
+ ret = IRQ_HANDLED;
+
+ return ret;
+}
+
+/*
+ * fsl_ifc_ctrl_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code allocates all of
+ * the resources needed for the controller only. The
+ * resources for the NAND banks themselves are allocated
+ * in the chip probe function.
+*/
+static int fsl_ifc_ctrl_probe(struct platform_device *dev)
+{
+ int ret = 0;
+
+
+ dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
+
+ fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL);
+ if (!fsl_ifc_ctrl_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev);
+
+ /* IOMAP the entire IFC region */
+ fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
+ if (!fsl_ifc_ctrl_dev->regs) {
+ dev_err(&dev->dev, "failed to get memory region\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* get the Controller level irq */
+ fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+ if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
+ dev_err(&dev->dev, "failed to get irq resource "
+ "for IFC\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* get the nand machine irq */
+ fsl_ifc_ctrl_dev->nand_irq =
+ irq_of_parse_and_map(dev->dev.of_node, 1);
+
+ fsl_ifc_ctrl_dev->dev = &dev->dev;
+
+ ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev);
+ if (ret < 0)
+ goto err;
+
+ init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait);
+
+ ret = request_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_irq, IRQF_SHARED,
+ "fsl-ifc", fsl_ifc_ctrl_dev);
+ if (ret != 0) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_ifc_ctrl_dev->irq);
+ goto err_irq;
+ }
+
+ if (fsl_ifc_ctrl_dev->nand_irq) {
+ ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq,
+ 0, "fsl-ifc-nand", fsl_ifc_ctrl_dev);
+ if (ret != 0) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_ifc_ctrl_dev->nand_irq);
+ goto err_nandirq;
+ }
+ }
+
+ return 0;
+
+err_nandirq:
+ free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev);
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
+err_irq:
+ free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
+err:
+ return ret;
+}
+
+static const struct of_device_id fsl_ifc_match[] = {
+ {
+ .compatible = "fsl,ifc",
+ },
+ {},
+};
+
+static struct platform_driver fsl_ifc_ctrl_driver = {
+ .driver = {
+ .name = "fsl-ifc",
+ .of_match_table = fsl_ifc_match,
+ },
+ .probe = fsl_ifc_ctrl_probe,
+ .remove = fsl_ifc_ctrl_remove,
+};
+
+static int __init fsl_ifc_init(void)
+{
+ return platform_driver_register(&fsl_ifc_ctrl_driver);
+}
+subsys_initcall(fsl_ifc_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller driver");
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
new file mode 100644
index 000000000000..d3df7602f406
--- /dev/null
+++ b/drivers/memory/ti-aemif.c
@@ -0,0 +1,427 @@
+/*
+ * TI AEMIF driver
+ *
+ * Copyright (C) 2010 - 2013 Texas Instruments Incorporated. http://www.ti.com/
+ *
+ * Authors:
+ * Murali Karicheri <m-karicheri2@ti.com>
+ * Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#define TA_SHIFT 2
+#define RHOLD_SHIFT 4
+#define RSTROBE_SHIFT 7
+#define RSETUP_SHIFT 13
+#define WHOLD_SHIFT 17
+#define WSTROBE_SHIFT 20
+#define WSETUP_SHIFT 26
+#define EW_SHIFT 30
+#define SS_SHIFT 31
+
+#define TA(x) ((x) << TA_SHIFT)
+#define RHOLD(x) ((x) << RHOLD_SHIFT)
+#define RSTROBE(x) ((x) << RSTROBE_SHIFT)
+#define RSETUP(x) ((x) << RSETUP_SHIFT)
+#define WHOLD(x) ((x) << WHOLD_SHIFT)
+#define WSTROBE(x) ((x) << WSTROBE_SHIFT)
+#define WSETUP(x) ((x) << WSETUP_SHIFT)
+#define EW(x) ((x) << EW_SHIFT)
+#define SS(x) ((x) << SS_SHIFT)
+
+#define ASIZE_MAX 0x1
+#define TA_MAX 0x3
+#define RHOLD_MAX 0x7
+#define RSTROBE_MAX 0x3f
+#define RSETUP_MAX 0xf
+#define WHOLD_MAX 0x7
+#define WSTROBE_MAX 0x3f
+#define WSETUP_MAX 0xf
+#define EW_MAX 0x1
+#define SS_MAX 0x1
+#define NUM_CS 4
+
+#define TA_VAL(x) (((x) & TA(TA_MAX)) >> TA_SHIFT)
+#define RHOLD_VAL(x) (((x) & RHOLD(RHOLD_MAX)) >> RHOLD_SHIFT)
+#define RSTROBE_VAL(x) (((x) & RSTROBE(RSTROBE_MAX)) >> RSTROBE_SHIFT)
+#define RSETUP_VAL(x) (((x) & RSETUP(RSETUP_MAX)) >> RSETUP_SHIFT)
+#define WHOLD_VAL(x) (((x) & WHOLD(WHOLD_MAX)) >> WHOLD_SHIFT)
+#define WSTROBE_VAL(x) (((x) & WSTROBE(WSTROBE_MAX)) >> WSTROBE_SHIFT)
+#define WSETUP_VAL(x) (((x) & WSETUP(WSETUP_MAX)) >> WSETUP_SHIFT)
+#define EW_VAL(x) (((x) & EW(EW_MAX)) >> EW_SHIFT)
+#define SS_VAL(x) (((x) & SS(SS_MAX)) >> SS_SHIFT)
+
+#define NRCSR_OFFSET 0x00
+#define AWCCR_OFFSET 0x04
+#define A1CR_OFFSET 0x10
+
+#define ACR_ASIZE_MASK 0x3
+#define ACR_EW_MASK BIT(30)
+#define ACR_SS_MASK BIT(31)
+#define ASIZE_16BIT 1
+
+#define CONFIG_MASK (TA(TA_MAX) | \
+ RHOLD(RHOLD_MAX) | \
+ RSTROBE(RSTROBE_MAX) | \
+ RSETUP(RSETUP_MAX) | \
+ WHOLD(WHOLD_MAX) | \
+ WSTROBE(WSTROBE_MAX) | \
+ WSETUP(WSETUP_MAX) | \
+ EW(EW_MAX) | SS(SS_MAX) | \
+ ASIZE_MAX)
+
+/**
+ * struct aemif_cs_data: structure to hold cs parameters
+ * @cs: chip-select number
+ * @wstrobe: write strobe width, ns
+ * @rstrobe: read strobe width, ns
+ * @wsetup: write setup width, ns
+ * @whold: write hold width, ns
+ * @rsetup: read setup width, ns
+ * @rhold: read hold width, ns
+ * @ta: minimum turn around time, ns
+ * @enable_ss: enable/disable select strobe mode
+ * @enable_ew: enable/disable extended wait mode
+ * @asize: width of the asynchronous device's data bus
+ */
+struct aemif_cs_data {
+ u8 cs;
+ u16 wstrobe;
+ u16 rstrobe;
+ u8 wsetup;
+ u8 whold;
+ u8 rsetup;
+ u8 rhold;
+ u8 ta;
+ u8 enable_ss;
+ u8 enable_ew;
+ u8 asize;
+};
+
+/**
+ * struct aemif_device: structure to hold device data
+ * @base: base address of AEMIF registers
+ * @clk: source clock
+ * @clk_rate: clock's rate in kHz
+ * @num_cs: number of assigned chip-selects
+ * @cs_offset: start number of cs nodes
+ * @cs_data: array of chip-select settings
+ */
+struct aemif_device {
+ void __iomem *base;
+ struct clk *clk;
+ unsigned long clk_rate;
+ u8 num_cs;
+ int cs_offset;
+ struct aemif_cs_data cs_data[NUM_CS];
+};
+
+/**
+ * aemif_calc_rate - calculate timing data.
+ * @pdev: platform device to calculate for
+ * @wanted: The cycle time needed in nanoseconds.
+ * @clk: The input clock rate in kHz.
+ * @max: The maximum divider value that can be programmed.
+ *
+ * On success, returns the calculated timing value minus 1 for easy
+ * programming into AEMIF timing registers, else negative errno.
+ */
+static int aemif_calc_rate(struct platform_device *pdev, int wanted,
+ unsigned long clk, int max)
+{
+ int result;
+
+ result = DIV_ROUND_UP((wanted * clk), NSEC_PER_MSEC) - 1;
+
+ dev_dbg(&pdev->dev, "%s: result %d from %ld, %d\n", __func__, result,
+ clk, wanted);
+
+ /* It is generally OK to have a more relaxed timing than requested... */
+ if (result < 0)
+ result = 0;
+
+ /* ... But configuring tighter timings is not an option. */
+ else if (result > max)
+ result = -EINVAL;
+
+ return result;
+}
+
+/**
+ * aemif_config_abus - configure async bus parameters
+ * @pdev: platform device to configure for
+ * @csnum: aemif chip select number
+ *
+ * This function programs the given timing values (in real clock) into the
+ * AEMIF registers taking the AEMIF clock into account.
+ *
+ * This function does not use any locking while programming the AEMIF
+ * because it is expected that there is only one user of a given
+ * chip-select.
+ *
+ * Returns 0 on success, else negative errno.
+ */
+static int aemif_config_abus(struct platform_device *pdev, int csnum)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+ struct aemif_cs_data *data = &aemif->cs_data[csnum];
+ int ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
+ unsigned long clk_rate = aemif->clk_rate;
+ unsigned offset;
+ u32 set, val;
+
+ offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
+
+ ta = aemif_calc_rate(pdev, data->ta, clk_rate, TA_MAX);
+ rhold = aemif_calc_rate(pdev, data->rhold, clk_rate, RHOLD_MAX);
+ rstrobe = aemif_calc_rate(pdev, data->rstrobe, clk_rate, RSTROBE_MAX);
+ rsetup = aemif_calc_rate(pdev, data->rsetup, clk_rate, RSETUP_MAX);
+ whold = aemif_calc_rate(pdev, data->whold, clk_rate, WHOLD_MAX);
+ wstrobe = aemif_calc_rate(pdev, data->wstrobe, clk_rate, WSTROBE_MAX);
+ wsetup = aemif_calc_rate(pdev, data->wsetup, clk_rate, WSETUP_MAX);
+
+ if (ta < 0 || rhold < 0 || rstrobe < 0 || rsetup < 0 ||
+ whold < 0 || wstrobe < 0 || wsetup < 0) {
+ dev_err(&pdev->dev, "%s: cannot get suitable timings\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ set = TA(ta) | RHOLD(rhold) | RSTROBE(rstrobe) | RSETUP(rsetup) |
+ WHOLD(whold) | WSTROBE(wstrobe) | WSETUP(wsetup);
+
+ set |= (data->asize & ACR_ASIZE_MASK);
+ if (data->enable_ew)
+ set |= ACR_EW_MASK;
+ if (data->enable_ss)
+ set |= ACR_SS_MASK;
+
+ val = readl(aemif->base + offset);
+ val &= ~CONFIG_MASK;
+ val |= set;
+ writel(val, aemif->base + offset);
+
+ return 0;
+}
+
+static inline int aemif_cycles_to_nsec(int val, unsigned long clk_rate)
+{
+ return ((val + 1) * NSEC_PER_MSEC) / clk_rate;
+}
+
+/**
+ * aemif_get_hw_params - function to read hw register values
+ * @pdev: platform device to read for
+ * @csnum: aemif chip select number
+ *
+ * This function reads the defaults from the registers and update
+ * the timing values. Required for get/set commands and also for
+ * the case when driver needs to use defaults in hardware.
+ */
+static void aemif_get_hw_params(struct platform_device *pdev, int csnum)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+ struct aemif_cs_data *data = &aemif->cs_data[csnum];
+ unsigned long clk_rate = aemif->clk_rate;
+ u32 val, offset;
+
+ offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
+ val = readl(aemif->base + offset);
+
+ data->ta = aemif_cycles_to_nsec(TA_VAL(val), clk_rate);
+ data->rhold = aemif_cycles_to_nsec(RHOLD_VAL(val), clk_rate);
+ data->rstrobe = aemif_cycles_to_nsec(RSTROBE_VAL(val), clk_rate);
+ data->rsetup = aemif_cycles_to_nsec(RSETUP_VAL(val), clk_rate);
+ data->whold = aemif_cycles_to_nsec(WHOLD_VAL(val), clk_rate);
+ data->wstrobe = aemif_cycles_to_nsec(WSTROBE_VAL(val), clk_rate);
+ data->wsetup = aemif_cycles_to_nsec(WSETUP_VAL(val), clk_rate);
+ data->enable_ew = EW_VAL(val);
+ data->enable_ss = SS_VAL(val);
+ data->asize = val & ASIZE_MAX;
+}
+
+/**
+ * of_aemif_parse_abus_config - parse CS configuration from DT
+ * @pdev: platform device to parse for
+ * @np: device node ptr
+ *
+ * This function update the emif async bus configuration based on the values
+ * configured in a cs device binding node.
+ */
+static int of_aemif_parse_abus_config(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+ struct aemif_cs_data *data;
+ u32 cs;
+ u32 val;
+
+ if (of_property_read_u32(np, "ti,cs-chipselect", &cs)) {
+ dev_dbg(&pdev->dev, "cs property is required");
+ return -EINVAL;
+ }
+
+ if (cs - aemif->cs_offset >= NUM_CS || cs < aemif->cs_offset) {
+ dev_dbg(&pdev->dev, "cs number is incorrect %d", cs);
+ return -EINVAL;
+ }
+
+ if (aemif->num_cs >= NUM_CS) {
+ dev_dbg(&pdev->dev, "cs count is more than %d", NUM_CS);
+ return -EINVAL;
+ }
+
+ data = &aemif->cs_data[aemif->num_cs];
+ data->cs = cs;
+
+ /* read the current value in the hw register */
+ aemif_get_hw_params(pdev, aemif->num_cs++);
+
+ /* override the values from device node */
+ if (!of_property_read_u32(np, "ti,cs-min-turnaround-ns", &val))
+ data->ta = val;
+
+ if (!of_property_read_u32(np, "ti,cs-read-hold-ns", &val))
+ data->rhold = val;
+
+ if (!of_property_read_u32(np, "ti,cs-read-strobe-ns", &val))
+ data->rstrobe = val;
+
+ if (!of_property_read_u32(np, "ti,cs-read-setup-ns", &val))
+ data->rsetup = val;
+
+ if (!of_property_read_u32(np, "ti,cs-write-hold-ns", &val))
+ data->whold = val;
+
+ if (!of_property_read_u32(np, "ti,cs-write-strobe-ns", &val))
+ data->wstrobe = val;
+
+ if (!of_property_read_u32(np, "ti,cs-write-setup-ns", &val))
+ data->wsetup = val;
+
+ if (!of_property_read_u32(np, "ti,cs-bus-width", &val))
+ if (val == 16)
+ data->asize = 1;
+ data->enable_ew = of_property_read_bool(np, "ti,cs-extended-wait-mode");
+ data->enable_ss = of_property_read_bool(np, "ti,cs-select-strobe-mode");
+ return 0;
+}
+
+static const struct of_device_id aemif_of_match[] = {
+ { .compatible = "ti,davinci-aemif", },
+ { .compatible = "ti,da850-aemif", },
+ {},
+};
+
+static int aemif_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret = -ENODEV;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child_np;
+ struct aemif_device *aemif;
+
+ if (np == NULL)
+ return 0;
+
+ aemif = devm_kzalloc(dev, sizeof(*aemif), GFP_KERNEL);
+ if (!aemif)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, aemif);
+
+ aemif->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(aemif->clk)) {
+ dev_err(dev, "cannot get clock 'aemif'\n");
+ return PTR_ERR(aemif->clk);
+ }
+
+ clk_prepare_enable(aemif->clk);
+ aemif->clk_rate = clk_get_rate(aemif->clk) / MSEC_PER_SEC;
+
+ if (of_device_is_compatible(np, "ti,da850-aemif"))
+ aemif->cs_offset = 2;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ aemif->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(aemif->base)) {
+ ret = PTR_ERR(aemif->base);
+ goto error;
+ }
+
+ /*
+ * For every controller device node, there is a cs device node that
+ * describe the bus configuration parameters. This functions iterate
+ * over these nodes and update the cs data array.
+ */
+ for_each_available_child_of_node(np, child_np) {
+ ret = of_aemif_parse_abus_config(pdev, child_np);
+ if (ret < 0)
+ goto error;
+ }
+
+ for (i = 0; i < aemif->num_cs; i++) {
+ ret = aemif_config_abus(pdev, i);
+ if (ret < 0) {
+ dev_err(dev, "Error configuring chip select %d\n",
+ aemif->cs_data[i].cs);
+ goto error;
+ }
+ }
+
+ /*
+ * Create a child devices explicitly from here to
+ * guarantee that the child will be probed after the AEMIF timing
+ * parameters are set.
+ */
+ for_each_available_child_of_node(np, child_np) {
+ ret = of_platform_populate(child_np, NULL, NULL, dev);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+error:
+ clk_disable_unprepare(aemif->clk);
+ return ret;
+}
+
+static int aemif_remove(struct platform_device *pdev)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(aemif->clk);
+ return 0;
+}
+
+static struct platform_driver aemif_driver = {
+ .probe = aemif_probe,
+ .remove = aemif_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(aemif_of_match),
+ },
+};
+
+module_platform_driver(aemif_driver);
+
+MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
+MODULE_AUTHOR("Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments AEMIF driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 25f8f93decb6..2a635b6fdaf7 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -145,6 +145,8 @@ static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir,
unsigned int length = sg->length;
u16 sec_cnt = (u16)(length / 512);
u8 val, trans_mode, dma_dir;
+ struct memstick_dev *card = host->msh->card;
+ bool pro_card = card->id.type == MEMSTICK_TYPE_PRO;
dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n",
__func__, tpc, (data_dir == READ) ? "READ" : "WRITE",
@@ -152,19 +154,21 @@ static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir,
if (data_dir == READ) {
dma_dir = DMA_DIR_FROM_CARD;
- trans_mode = MS_TM_AUTO_READ;
+ trans_mode = pro_card ? MS_TM_AUTO_READ : MS_TM_NORMAL_READ;
} else {
dma_dir = DMA_DIR_TO_CARD;
- trans_mode = MS_TM_AUTO_WRITE;
+ trans_mode = pro_card ? MS_TM_AUTO_WRITE : MS_TM_NORMAL_WRITE;
}
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H,
- 0xFF, (u8)(sec_cnt >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L,
- 0xFF, (u8)sec_cnt);
+ if (pro_card) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H,
+ 0xFF, (u8)(sec_cnt >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L,
+ 0xFF, (u8)sec_cnt);
+ }
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
@@ -192,8 +196,14 @@ static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir,
}
rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val);
- if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT))
- return -EIO;
+ if (pro_card) {
+ if (val & (MS_INT_CMDNK | MS_INT_ERR |
+ MS_CRC16_ERR | MS_RDY_TIMEOUT))
+ return -EIO;
+ } else {
+ if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT))
+ return -EIO;
+ }
return 0;
}
@@ -462,8 +472,8 @@ static int rtsx_pci_ms_set_param(struct memstick_host *msh,
clock = 19000000;
ssc_depth = RTSX_SSC_DEPTH_500K;
- err = rtsx_pci_write_register(pcr, MS_CFG,
- 0x18, MS_BUS_WIDTH_1);
+ err = rtsx_pci_write_register(pcr, MS_CFG, 0x58,
+ MS_BUS_WIDTH_1 | PUSH_TIME_DEFAULT);
if (err < 0)
return err;
} else if (value == MEMSTICK_PAR4) {
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index dd239bdbfcb4..00d339c361fc 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2235,10 +2235,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
/* do we need to support multiple segments? */
- if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
- printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
- ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req),
- bio_segments(rsp->bio), blk_rq_bytes(rsp));
+ if (bio_multiple_segments(req->bio) ||
+ bio_multiple_segments(rsp->bio)) {
+ printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
+ ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
return -EINVAL;
}
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index a60c188c2bd9..04bd3b6de401 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -754,19 +754,19 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
int ret;
- mutex_lock(&i2o_cfg_mutex);
switch (cmd) {
case I2OGETIOPS:
ret = i2o_cfg_ioctl(file, cmd, arg);
break;
case I2OPASSTHRU32:
+ mutex_lock(&i2o_cfg_mutex);
ret = i2o_cfg_passthru32(file, cmd, arg);
+ mutex_unlock(&i2o_cfg_mutex);
break;
default:
ret = -ENOIOCTLCMD;
break;
}
- mutex_unlock(&i2o_cfg_mutex);
return ret;
}
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index a8c08f332da0..92752fb5b2d3 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -652,6 +652,44 @@ static int i2o_iop_activate(struct i2o_controller *c)
return i2o_hrt_get(c);
};
+static void i2o_res_alloc(struct i2o_controller *c, unsigned long flags)
+{
+ i2o_status_block *sb = c->status_block.virt;
+ struct resource *res = &c->mem_resource;
+ resource_size_t size, align;
+ int err;
+
+ res->name = c->pdev->bus->name;
+ res->flags = flags;
+ res->start = 0;
+ res->end = 0;
+ osm_info("%s: requires private memory resources.\n", c->name);
+
+ if (flags & IORESOURCE_MEM) {
+ size = sb->desired_mem_size;
+ align = 1 << 20; /* unspecified, use 1Mb and play safe */
+ } else {
+ size = sb->desired_io_size;
+ align = 1 << 12; /* unspecified, use 4Kb and play safe */
+ }
+
+ err = pci_bus_alloc_resource(c->pdev->bus, res, size, align, 0, 0,
+ NULL, NULL);
+ if (err < 0)
+ return;
+
+ if (flags & IORESOURCE_MEM) {
+ c->mem_alloc = 1;
+ sb->current_mem_size = resource_size(res);
+ sb->current_mem_base = res->start;
+ } else if (flags & IORESOURCE_IO) {
+ c->io_alloc = 1;
+ sb->current_io_size = resource_size(res);
+ sb->current_io_base = res->start;
+ }
+ osm_info("%s: allocated PCI space %pR\n", c->name, res);
+}
+
/**
* i2o_iop_systab_set - Set the I2O System Table of the specified IOP
* @c: I2O controller to which the system table should be send
@@ -665,52 +703,13 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
struct i2o_message *msg;
i2o_status_block *sb = c->status_block.virt;
struct device *dev = &c->pdev->dev;
- struct resource *root;
int rc;
- if (sb->current_mem_size < sb->desired_mem_size) {
- struct resource *res = &c->mem_resource;
- res->name = c->pdev->bus->name;
- res->flags = IORESOURCE_MEM;
- res->start = 0;
- res->end = 0;
- osm_info("%s: requires private memory resources.\n", c->name);
- root = pci_find_parent_resource(c->pdev, res);
- if (root == NULL)
- osm_warn("%s: Can't find parent resource!\n", c->name);
- if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
- NULL, NULL) >= 0) {
- c->mem_alloc = 1;
- sb->current_mem_size = resource_size(res);
- sb->current_mem_base = res->start;
- osm_info("%s: allocated %llu bytes of PCI memory at "
- "0x%016llX.\n", c->name,
- (unsigned long long)resource_size(res),
- (unsigned long long)res->start);
- }
- }
+ if (sb->current_mem_size < sb->desired_mem_size)
+ i2o_res_alloc(c, IORESOURCE_MEM);
- if (sb->current_io_size < sb->desired_io_size) {
- struct resource *res = &c->io_resource;
- res->name = c->pdev->bus->name;
- res->flags = IORESOURCE_IO;
- res->start = 0;
- res->end = 0;
- osm_info("%s: requires private memory resources.\n", c->name);
- root = pci_find_parent_resource(c->pdev, res);
- if (root == NULL)
- osm_warn("%s: Can't find parent resource!\n", c->name);
- if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
- NULL, NULL) >= 0) {
- c->io_alloc = 1;
- sb->current_io_size = resource_size(res);
- sb->current_mem_base = res->start;
- osm_info("%s: allocated %llu bytes of PCI I/O at "
- "0x%016llX.\n", c->name,
- (unsigned long long)resource_size(res),
- (unsigned long long)res->start);
- }
- }
+ if (sb->current_io_size < sb->desired_io_size)
+ i2o_res_alloc(c, IORESOURCE_IO);
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg))
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index a45aab9f6bb1..1c3ae57082ed 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -251,8 +251,6 @@ static int arizona_apply_hardware_patch(struct arizona* arizona)
unsigned int fll, sysclk;
int ret, err;
- regcache_cache_bypass(arizona->regmap, true);
-
/* Cache existing FLL and SYSCLK settings */
ret = regmap_read(arizona->regmap, ARIZONA_FLL1_CONTROL_1, &fll);
if (ret != 0) {
@@ -322,8 +320,6 @@ err_fll:
err);
}
- regcache_cache_bypass(arizona->regmap, false);
-
if (ret != 0)
return ret;
else
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index 13af7e50021e..8103e4362132 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -53,17 +53,25 @@ static int da9055_i2c_remove(struct i2c_client *i2c)
return 0;
}
+/*
+ * DO NOT change the device Ids. The naming is intentionally specific as both
+ * the PMIC and CODEC parts of this chip are instantiated separately as I2C
+ * devices (both have configurable I2C addresses, and are to all intents and
+ * purposes separate). As a result there are specific DA9055 ids for PMIC
+ * and CODEC, which must be different to operate together.
+ */
static struct i2c_device_id da9055_i2c_id[] = {
- {"da9055", 0},
+ {"da9055-pmic", 0},
{ }
};
+MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
static struct i2c_driver da9055_i2c_driver = {
.probe = da9055_i2c_probe,
.remove = da9055_i2c_remove,
.id_table = da9055_i2c_id,
.driver = {
- .name = "da9055",
+ .name = "da9055-pmic",
.owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index ac514fb2b877..71aa14a6bfbb 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -173,6 +173,7 @@ static const struct i2c_device_id max14577_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max14577_i2c_id);
+#ifdef CONFIG_PM_SLEEP
static int max14577_suspend(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -208,6 +209,7 @@ static int max14577_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static struct of_device_id max14577_dt_match[] = {
{ .compatible = "maxim,max14577", },
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index be88a3bf7b85..5adede0fb04c 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -164,15 +164,15 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
return pd;
}
-static inline int max8997_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long max8997_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node);
- return (int)match->data;
+ return (unsigned long)match->data;
}
- return (int)id->driver_data;
+ return id->driver_data;
}
static int max8997_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index f47eaa70eae0..5d5e186b5d8b 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -169,16 +169,16 @@ static struct max8998_platform_data *max8998_i2c_parse_dt_pdata(
return pd;
}
-static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long max8998_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(max8998_dt_match, i2c->dev.of_node);
- return (int)match->data;
+ return (unsigned long)match->data;
}
- return (int)id->driver_data;
+ return id->driver_data;
}
static int max8998_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 6841d6805fd6..41ab5e34d2ac 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -245,7 +245,7 @@ static int pcf50633_probe(struct i2c_client *client,
for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
struct platform_device *pdev;
- pdev = platform_device_alloc("pcf50633-regltr", i);
+ pdev = platform_device_alloc("pcf50633-regulator", i);
if (!pdev) {
dev_err(pcf->dev, "Cannot create regulator %d\n", i);
continue;
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index e4671088f075..281a82747275 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -26,7 +26,9 @@
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/irq.h>
#include <linux/mfd/samsung/rtc.h>
+#include <linux/mfd/samsung/s2mpa01.h>
#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s5m8763.h>
#include <linux/mfd/samsung/s5m8767.h>
#include <linux/regmap.h>
@@ -69,47 +71,52 @@ static const struct mfd_cell s2mps11_devs[] = {
}
};
+static const struct mfd_cell s2mps14_devs[] = {
+ {
+ .name = "s2mps14-pmic",
+ }, {
+ .name = "s2mps14-rtc",
+ }, {
+ .name = "s2mps14-clk",
+ }
+};
+
+static const struct mfd_cell s2mpa01_devs[] = {
+ {
+ .name = "s2mpa01-pmic",
+ },
+};
+
#ifdef CONFIG_OF
static struct of_device_id sec_dt_match[] = {
{ .compatible = "samsung,s5m8767-pmic",
.data = (void *)S5M8767X,
- },
- { .compatible = "samsung,s2mps11-pmic",
+ }, {
+ .compatible = "samsung,s2mps11-pmic",
.data = (void *)S2MPS11X,
+ }, {
+ .compatible = "samsung,s2mps14-pmic",
+ .data = (void *)S2MPS14X,
+ }, {
+ .compatible = "samsung,s2mpa01-pmic",
+ .data = (void *)S2MPA01,
+ }, {
+ /* Sentinel */
},
- {},
};
#endif
-int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest)
+static bool s2mpa01_volatile(struct device *dev, unsigned int reg)
{
- return regmap_read(sec_pmic->regmap_pmic, reg, dest);
-}
-EXPORT_SYMBOL_GPL(sec_reg_read);
-
-int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
-{
- return regmap_bulk_read(sec_pmic->regmap_pmic, reg, buf, count);
-}
-EXPORT_SYMBOL_GPL(sec_bulk_read);
-
-int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value)
-{
- return regmap_write(sec_pmic->regmap_pmic, reg, value);
-}
-EXPORT_SYMBOL_GPL(sec_reg_write);
-
-int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
-{
- return regmap_raw_write(sec_pmic->regmap_pmic, reg, buf, count);
-}
-EXPORT_SYMBOL_GPL(sec_bulk_write);
-
-int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask)
-{
- return regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, val);
+ switch (reg) {
+ case S2MPA01_REG_INT1M:
+ case S2MPA01_REG_INT2M:
+ case S2MPA01_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
}
-EXPORT_SYMBOL_GPL(sec_reg_update);
static bool s2mps11_volatile(struct device *dev, unsigned int reg)
{
@@ -141,6 +148,15 @@ static const struct regmap_config sec_regmap_config = {
.val_bits = 8,
};
+static const struct regmap_config s2mpa01_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPA01_REG_LDO_OVCB4,
+ .volatile_reg = s2mpa01_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
static const struct regmap_config s2mps11_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -150,6 +166,15 @@ static const struct regmap_config s2mps11_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
+static const struct regmap_config s2mps14_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS14_REG_LDODSCH3,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
static const struct regmap_config s5m8763_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -168,9 +193,18 @@ static const struct regmap_config s5m8767_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
-static const struct regmap_config sec_rtc_regmap_config = {
+static const struct regmap_config s5m_rtc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+
+ .max_register = SEC_RTC_REG_MAX,
+};
+
+static const struct regmap_config s2mps14_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS_RTC_REG_MAX,
};
#ifdef CONFIG_OF
@@ -210,24 +244,24 @@ static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
}
#endif
-static inline int sec_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long sec_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
#ifdef CONFIG_OF
if (i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(sec_dt_match, i2c->dev.of_node);
- return (int)match->data;
+ return (unsigned long)match->data;
}
#endif
- return (int)id->driver_data;
+ return id->driver_data;
}
static int sec_pmic_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct sec_platform_data *pdata = dev_get_platdata(&i2c->dev);
- const struct regmap_config *regmap;
+ const struct regmap_config *regmap, *regmap_rtc;
struct sec_pmic_dev *sec_pmic;
int ret;
@@ -259,17 +293,34 @@ static int sec_pmic_probe(struct i2c_client *i2c,
}
switch (sec_pmic->device_type) {
+ case S2MPA01:
+ regmap = &s2mpa01_regmap_config;
+ break;
case S2MPS11X:
regmap = &s2mps11_regmap_config;
+ /*
+ * The rtc-s5m driver does not support S2MPS11 and there
+ * is no mfd_cell for S2MPS11 RTC device.
+ * However we must pass something to devm_regmap_init_i2c()
+ * so use S5M-like regmap config even though it wouldn't work.
+ */
+ regmap_rtc = &s5m_rtc_regmap_config;
+ break;
+ case S2MPS14X:
+ regmap = &s2mps14_regmap_config;
+ regmap_rtc = &s2mps14_rtc_regmap_config;
break;
case S5M8763X:
regmap = &s5m8763_regmap_config;
+ regmap_rtc = &s5m_rtc_regmap_config;
break;
case S5M8767X:
regmap = &s5m8767_regmap_config;
+ regmap_rtc = &s5m_rtc_regmap_config;
break;
default:
regmap = &sec_regmap_config;
+ regmap_rtc = &s5m_rtc_regmap_config;
break;
}
@@ -282,10 +333,13 @@ static int sec_pmic_probe(struct i2c_client *i2c,
}
sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
+ if (!sec_pmic->rtc) {
+ dev_err(&i2c->dev, "Failed to allocate I2C for RTC\n");
+ return -ENODEV;
+ }
i2c_set_clientdata(sec_pmic->rtc, sec_pmic);
- sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc,
- &sec_rtc_regmap_config);
+ sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc, regmap_rtc);
if (IS_ERR(sec_pmic->regmap_rtc)) {
ret = PTR_ERR(sec_pmic->regmap_rtc);
dev_err(&i2c->dev, "Failed to allocate RTC register map: %d\n",
@@ -313,10 +367,18 @@ static int sec_pmic_probe(struct i2c_client *i2c,
ret = mfd_add_devices(sec_pmic->dev, -1, s5m8767_devs,
ARRAY_SIZE(s5m8767_devs), NULL, 0, NULL);
break;
+ case S2MPA01:
+ ret = mfd_add_devices(sec_pmic->dev, -1, s2mpa01_devs,
+ ARRAY_SIZE(s2mpa01_devs), NULL, 0, NULL);
+ break;
case S2MPS11X:
ret = mfd_add_devices(sec_pmic->dev, -1, s2mps11_devs,
ARRAY_SIZE(s2mps11_devs), NULL, 0, NULL);
break;
+ case S2MPS14X:
+ ret = mfd_add_devices(sec_pmic->dev, -1, s2mps14_devs,
+ ARRAY_SIZE(s2mps14_devs), NULL, 0, NULL);
+ break;
default:
/* If this happens the probe function is problem */
BUG();
@@ -345,6 +407,7 @@ static int sec_pmic_remove(struct i2c_client *i2c)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int sec_pmic_suspend(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -379,6 +442,7 @@ static int sec_pmic_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume);
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 4de494f51d40..64e7913aadc6 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -1,7 +1,7 @@
/*
* sec-irq.c
*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify it
@@ -19,6 +19,7 @@
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/irq.h>
#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s5m8763.h>
#include <linux/mfd/samsung/s5m8767.h>
@@ -59,13 +60,13 @@ static const struct regmap_irq s2mps11_irqs[] = {
.reg_offset = 1,
.mask = S2MPS11_IRQ_RTC60S_MASK,
},
- [S2MPS11_IRQ_RTCA1] = {
+ [S2MPS11_IRQ_RTCA0] = {
.reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA1_MASK,
+ .mask = S2MPS11_IRQ_RTCA0_MASK,
},
- [S2MPS11_IRQ_RTCA2] = {
+ [S2MPS11_IRQ_RTCA1] = {
.reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA2_MASK,
+ .mask = S2MPS11_IRQ_RTCA1_MASK,
},
[S2MPS11_IRQ_SMPL] = {
.reg_offset = 1,
@@ -89,6 +90,76 @@ static const struct regmap_irq s2mps11_irqs[] = {
},
};
+static const struct regmap_irq s2mps14_irqs[] = {
+ [S2MPS14_IRQ_PWRONF] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_PWRONF_MASK,
+ },
+ [S2MPS14_IRQ_PWRONR] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_PWRONR_MASK,
+ },
+ [S2MPS14_IRQ_JIGONBF] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_JIGONBF_MASK,
+ },
+ [S2MPS14_IRQ_JIGONBR] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_JIGONBR_MASK,
+ },
+ [S2MPS14_IRQ_ACOKBF] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_ACOKBF_MASK,
+ },
+ [S2MPS14_IRQ_ACOKBR] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_ACOKBR_MASK,
+ },
+ [S2MPS14_IRQ_PWRON1S] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_PWRON1S_MASK,
+ },
+ [S2MPS14_IRQ_MRB] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_MRB_MASK,
+ },
+ [S2MPS14_IRQ_RTC60S] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTC60S_MASK,
+ },
+ [S2MPS14_IRQ_RTCA1] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTCA1_MASK,
+ },
+ [S2MPS14_IRQ_RTCA0] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTCA0_MASK,
+ },
+ [S2MPS14_IRQ_SMPL] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_SMPL_MASK,
+ },
+ [S2MPS14_IRQ_RTC1S] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTC1S_MASK,
+ },
+ [S2MPS14_IRQ_WTSR] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_WTSR_MASK,
+ },
+ [S2MPS14_IRQ_INT120C] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_INT120C_MASK,
+ },
+ [S2MPS14_IRQ_INT140C] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_INT140C_MASK,
+ },
+ [S2MPS14_IRQ_TSD] = {
+ .reg_offset = 2,
+ .mask = S2MPS14_IRQ_TSD_MASK,
+ },
+};
static const struct regmap_irq s5m8767_irqs[] = {
[S5M8767_IRQ_PWRR] = {
@@ -246,6 +317,16 @@ static const struct regmap_irq_chip s2mps11_irq_chip = {
.ack_base = S2MPS11_REG_INT1,
};
+static const struct regmap_irq_chip s2mps14_irq_chip = {
+ .name = "s2mps14",
+ .irqs = s2mps14_irqs,
+ .num_irqs = ARRAY_SIZE(s2mps14_irqs),
+ .num_regs = 3,
+ .status_base = S2MPS14_REG_INT1,
+ .mask_base = S2MPS14_REG_INT1M,
+ .ack_base = S2MPS14_REG_INT1,
+};
+
static const struct regmap_irq_chip s5m8767_irq_chip = {
.name = "s5m8767",
.irqs = s5m8767_irqs,
@@ -297,6 +378,12 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
sec_pmic->irq_base, &s2mps11_irq_chip,
&sec_pmic->irq_data);
break;
+ case S2MPS14X:
+ ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ sec_pmic->irq_base, &s2mps14_irq_chip,
+ &sec_pmic->irq_data);
+ break;
default:
dev_err(sec_pmic->dev, "Unknown device type %d\n",
sec_pmic->device_type);
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 6939ae56c2e1..3cc4c7084b92 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -158,7 +158,7 @@ static int tps65217_probe(struct i2c_client *client,
{
struct tps65217 *tps;
unsigned int version;
- unsigned int chip_id = ids->driver_data;
+ unsigned long chip_id = ids->driver_data;
const struct of_device_id *match;
bool status_off = false;
int ret;
@@ -170,7 +170,7 @@ static int tps65217_probe(struct i2c_client *client,
"Failed to find matching dt id\n");
return -EINVAL;
}
- chip_id = (unsigned int)match->data;
+ chip_id = (unsigned long)match->data;
status_off = of_property_read_bool(client->dev.of_node,
"ti,pmic-shutdown-controller");
}
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 1e9a4b2102f9..bffc584e4a43 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -80,8 +80,7 @@ static const struct reg_default wm5102_revb_patch[] = {
int wm5102_patch(struct arizona *arizona)
{
const struct reg_default *wm5102_patch;
- int ret = 0;
- int i, patch_size;
+ int patch_size;
switch (arizona->rev) {
case 0:
@@ -92,21 +91,9 @@ int wm5102_patch(struct arizona *arizona)
patch_size = ARRAY_SIZE(wm5102_revb_patch);
}
- regcache_cache_bypass(arizona->regmap, true);
-
- for (i = 0; i < patch_size; i++) {
- ret = regmap_write(arizona->regmap, wm5102_patch[i].reg,
- wm5102_patch[i].def);
- if (ret != 0) {
- dev_err(arizona->dev, "Failed to write %x = %x: %d\n",
- wm5102_patch[i].reg, wm5102_patch[i].def, ret);
- goto out;
- }
- }
-
-out:
- regcache_cache_bypass(arizona->regmap, false);
- return ret;
+ return regmap_multi_reg_write_bypassed(arizona->regmap,
+ wm5102_patch,
+ patch_size);
}
static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index ba04f1bc70eb..e6fab94e2c8a 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -636,7 +636,7 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
if (i2c->dev.of_node) {
of_id = of_match_device(wm8994_of_match, &i2c->dev);
if (of_id)
- wm8994->type = (int)of_id->data;
+ wm8994->type = (enum wm8994_type)of_id->data;
} else {
wm8994->type = id->driver_data;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6cb388e8fb7d..1cb74085e410 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -235,7 +235,7 @@ config SGI_XP
config CS5535_MFGPT
tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support"
- depends on PCI && X86 && MFD_CS5535
+ depends on MFD_CS5535
default n
help
This driver provides access to MFGPT functionality for other
@@ -526,4 +526,5 @@ source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
+source "drivers/misc/echo/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 99b9424ce31d..7eb4b69580c0 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -54,3 +54,4 @@ obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
+obj-$(CONFIG_ECHO) += echo/
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index d3eee113baeb..a43053daad0e 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -72,7 +72,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index 0c6e037153d2..c6cc3dc8ae1f 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 5be808406edc..22de13727641 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -150,6 +150,12 @@ static int ssc_probe(struct platform_device *pdev)
return -ENODEV;
ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat;
+ if (pdev->dev.of_node) {
+ struct device_node *np = pdev->dev.of_node;
+ ssc->clk_from_rk_pin =
+ of_property_read_bool(np, "atmel,clk-from-rk-pin");
+ }
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ssc->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(ssc->regs))
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 820e53d0048f..9b313f7810f5 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -47,7 +47,6 @@
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/of.h>
#include "bmp085.h"
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 9e2b985293fc..14d90eae605b 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -101,7 +101,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kref.h>
#include <linux/io.h>
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 154b02e5094f..6a672f9ef522 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -32,7 +32,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/string.h>
#include <linux/list.h>
diff --git a/drivers/staging/echo/Kconfig b/drivers/misc/echo/Kconfig
index f1d41ea9cd48..f1d41ea9cd48 100644
--- a/drivers/staging/echo/Kconfig
+++ b/drivers/misc/echo/Kconfig
diff --git a/drivers/staging/echo/Makefile b/drivers/misc/echo/Makefile
index 7d4caac12a8d..7d4caac12a8d 100644
--- a/drivers/staging/echo/Makefile
+++ b/drivers/misc/echo/Makefile
diff --git a/drivers/staging/echo/echo.c b/drivers/misc/echo/echo.c
index 9597e9523cac..9597e9523cac 100644
--- a/drivers/staging/echo/echo.c
+++ b/drivers/misc/echo/echo.c
diff --git a/drivers/staging/echo/echo.h b/drivers/misc/echo/echo.h
index 9b08c63e6369..9b08c63e6369 100644
--- a/drivers/staging/echo/echo.h
+++ b/drivers/misc/echo/echo.h
diff --git a/drivers/staging/echo/fir.h b/drivers/misc/echo/fir.h
index 7b9fabf1fea5..7b9fabf1fea5 100644
--- a/drivers/staging/echo/fir.h
+++ b/drivers/misc/echo/fir.h
diff --git a/drivers/staging/echo/oslec.h b/drivers/misc/echo/oslec.h
index f4175360ce27..f4175360ce27 100644
--- a/drivers/staging/echo/oslec.h
+++ b/drivers/misc/echo/oslec.h
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 4f3bca1003a1..634f72929e12 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index c169e07654cb..33f8673d23a6 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -3,7 +3,7 @@
* Philip Edelbrock <phil@netroedge.com>
* Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2003 IBM Corp.
- * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,7 +17,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 78e55b501c94..9ebeacdb8ec4 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index e36157d5d3ab..580ff9df5529 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -27,7 +27,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
index 9c34e5704304..3f2b625b2032 100644
--- a/drivers/misc/eeprom/sunxi_sid.c
+++ b/drivers/misc/eeprom/sunxi_sid.c
@@ -21,7 +21,6 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
@@ -96,7 +95,7 @@ static int sunxi_sid_remove(struct platform_device *pdev)
}
static const struct of_device_id sunxi_sid_of_match[] = {
- { .compatible = "allwinner,sun4i-sid", .data = (void *)16},
+ { .compatible = "allwinner,sun4i-a10-sid", .data = (void *)16},
{ .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512},
{/* sentinel */},
};
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c
index a725c79c35f5..71d2793b372c 100644
--- a/drivers/misc/fsa9480.c
+++ b/drivers/misc/fsa9480.c
@@ -396,7 +396,7 @@ static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw)
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"fsa9480 micro USB", usbsw);
if (ret) {
- dev_err(&client->dev, "failed to reqeust IRQ\n");
+ dev_err(&client->dev, "failed to request IRQ\n");
return ret;
}
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index 3bfdc07a7248..50d2096ea1c7 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 8f8a6b327cdb..2c2c9cc75231 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -787,6 +787,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
if (rc != 0) {
dev_err(&pci_dev->dev,
"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
+ kfree(dma_map);
return rc;
}
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 170bd3daf336..90520d76633f 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index e3183f26216b..12c30b486b27 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -26,7 +26,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index b7f84dacf822..4a9c50a43afb 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -23,7 +23,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 61fbe6acabef..0a1565e63c71 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 036effe9a795..3ef4627f9cb1 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -23,7 +23,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 7c97550240f1..d324f8a97b88 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index 9aa2bd2a71ae..bd06d0cfac45 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/input.h>
#include <linux/interrupt.h>
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 49c7a23f02fc..d66a2f24f6b3 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -30,6 +30,7 @@
*
* See Documentation/fault-injection/provoke-crashes.txt for instructions
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/fs.h>
@@ -45,6 +46,7 @@
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
#include <linux/mman.h>
+#include <asm/cacheflush.h>
#ifdef CONFIG_IDE
#include <linux/ide.h>
@@ -101,6 +103,7 @@ enum ctype {
CT_EXEC_USERSPACE,
CT_ACCESS_USERSPACE,
CT_WRITE_RO,
+ CT_WRITE_KERN,
};
static char* cp_name[] = {
@@ -137,6 +140,7 @@ static char* cp_type[] = {
"EXEC_USERSPACE",
"ACCESS_USERSPACE",
"WRITE_RO",
+ "WRITE_KERN",
};
static struct jprobe lkdtm;
@@ -316,6 +320,13 @@ static void do_nothing(void)
return;
}
+/* Must immediately follow do_nothing for size calculuations to work out. */
+static void do_overwritten(void)
+{
+ pr_info("do_overwritten wasn't overwritten!\n");
+ return;
+}
+
static noinline void corrupt_stack(void)
{
/* Use default char array length that triggers stack protection. */
@@ -328,7 +339,12 @@ static void execute_location(void *dst)
{
void (*func)(void) = dst;
+ pr_info("attempting ok execution at %p\n", do_nothing);
+ do_nothing();
+
memcpy(dst, do_nothing, EXEC_SIZE);
+ flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
+ pr_info("attempting bad execution at %p\n", func);
func();
}
@@ -337,8 +353,13 @@ static void execute_user_location(void *dst)
/* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
+ pr_info("attempting ok execution at %p\n", do_nothing);
+ do_nothing();
+
if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
return;
+ flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
+ pr_info("attempting bad execution at %p\n", func);
func();
}
@@ -463,8 +484,12 @@ static void lkdtm_do_action(enum ctype which)
}
ptr = (unsigned long *)user_addr;
+
+ pr_info("attempting bad read at %p\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
+
+ pr_info("attempting bad write at %p\n", ptr);
*ptr = tmp;
vm_munmap(user_addr, PAGE_SIZE);
@@ -475,10 +500,28 @@ static void lkdtm_do_action(enum ctype which)
unsigned long *ptr;
ptr = (unsigned long *)&rodata;
+
+ pr_info("attempting bad write at %p\n", ptr);
*ptr ^= 0xabcd1234;
break;
}
+ case CT_WRITE_KERN: {
+ size_t size;
+ unsigned char *ptr;
+
+ size = (unsigned long)do_overwritten -
+ (unsigned long)do_nothing;
+ ptr = (unsigned char *)do_overwritten;
+
+ pr_info("attempting bad %zu byte write at %p\n", size, ptr);
+ memcpy(ptr, (unsigned char *)do_nothing, size);
+ flush_icache_range((unsigned long)ptr,
+ (unsigned long)(ptr + size));
+
+ do_overwritten();
+ break;
+ }
case CT_NONE:
default:
break;
@@ -493,8 +536,8 @@ static void lkdtm_handler(void)
spin_lock_irqsave(&count_lock, flags);
count--;
- printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
- cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
+ pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
+ cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
if (count == 0) {
do_it = true;
@@ -551,18 +594,18 @@ static int lkdtm_register_cpoint(enum cname which)
lkdtm.kp.symbol_name = "generic_ide_ioctl";
lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
#else
- printk(KERN_INFO "lkdtm: Crash point not available\n");
+ pr_info("Crash point not available\n");
return -EINVAL;
#endif
break;
default:
- printk(KERN_INFO "lkdtm: Invalid Crash Point\n");
+ pr_info("Invalid Crash Point\n");
return -EINVAL;
}
cpoint = which;
if ((ret = register_jprobe(&lkdtm)) < 0) {
- printk(KERN_INFO "lkdtm: Couldn't register jprobe\n");
+ pr_info("Couldn't register jprobe\n");
cpoint = CN_INVALID;
}
@@ -709,8 +752,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
if (type == CT_NONE)
return -EINVAL;
- printk(KERN_INFO "lkdtm: Performing direct entry %s\n",
- cp_type_to_str(type));
+ pr_info("Performing direct entry %s\n", cp_type_to_str(type));
lkdtm_do_action(type);
*off += count;
@@ -772,7 +814,7 @@ static int __init lkdtm_module_init(void)
/* Register debugfs interface */
lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
if (!lkdtm_debugfs_root) {
- printk(KERN_ERR "lkdtm: creating root dir failed\n");
+ pr_err("creating root dir failed\n");
return -ENODEV;
}
@@ -787,28 +829,26 @@ static int __init lkdtm_module_init(void)
de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
NULL, &cur->fops);
if (de == NULL) {
- printk(KERN_ERR "lkdtm: could not create %s\n",
- cur->name);
+ pr_err("could not create %s\n", cur->name);
goto out_err;
}
}
if (lkdtm_parse_commandline() == -EINVAL) {
- printk(KERN_INFO "lkdtm: Invalid command\n");
+ pr_info("Invalid command\n");
goto out_err;
}
if (cpoint != CN_INVALID && cptype != CT_NONE) {
ret = lkdtm_register_cpoint(cpoint);
if (ret < 0) {
- printk(KERN_INFO "lkdtm: Invalid crash point %d\n",
- cpoint);
+ pr_info("Invalid crash point %d\n", cpoint);
goto out_err;
}
- printk(KERN_INFO "lkdtm: Crash point %s of type %s registered\n",
- cpoint_name, cpoint_type);
+ pr_info("Crash point %s of type %s registered\n",
+ cpoint_name, cpoint_type);
} else {
- printk(KERN_INFO "lkdtm: No crash points registered, enable through debugfs\n");
+ pr_info("No crash points registered, enable through debugfs\n");
}
return 0;
@@ -823,7 +863,7 @@ static void __exit lkdtm_module_exit(void)
debugfs_remove_recursive(lkdtm_debugfs_root);
unregister_jprobe(&lkdtm);
- printk(KERN_INFO "lkdtm: Crash point unregistered\n");
+ pr_info("Crash point unregistered\n");
}
module_init(lkdtm_module_init);
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index c76fa31e9bf6..d23384dde73b 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -34,3 +34,12 @@ config INTEL_MEI_ME
82Q33 Express
82X38/X48 Express
+config INTEL_MEI_TXE
+ tristate "Intel Trusted Execution Environment with ME Interface"
+ select INTEL_MEI
+ depends on X86 && PCI && WATCHDOG_CORE
+ help
+ MEI Support for Trusted Execution Environment device on Intel SoCs
+
+ Supported SoCs:
+ Intel Bay Trail
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 08698a466268..8ebc6cda1373 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -1,6 +1,6 @@
#
# Makefile - Intel Management Engine Interface (Intel MEI) Linux driver
-# Copyright (c) 2010-2011, Intel Corporation.
+# Copyright (c) 2010-2014, Intel Corporation.
#
obj-$(CONFIG_INTEL_MEI) += mei.o
mei-objs := init.o
@@ -17,3 +17,7 @@ mei-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o
mei-me-objs := pci-me.o
mei-me-objs += hw-me.o
+
+obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
+mei-txe-objs := pci-txe.o
+mei-txe-objs += hw-txe.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 2fad84432829..b8deb3455480 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -21,7 +21,6 @@
#include <linux/fcntl.h>
#include <linux/aio.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
#include <linux/list.h>
@@ -35,7 +34,6 @@
#include "mei_dev.h"
#include "hbm.h"
-#include "hw-me.h"
#include "client.h"
const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
@@ -79,10 +77,9 @@ int mei_amthif_host_init(struct mei_device *dev)
i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
if (i < 0) {
- ret = i;
dev_info(&dev->pdev->dev,
- "amthif: failed to find the client %d\n", ret);
- return ret;
+ "amthif: failed to find the client %d\n", i);
+ return -ENOTTY;
}
cl->me_client_id = dev->me_clients[i].client_id;
@@ -116,14 +113,11 @@ int mei_amthif_host_init(struct mei_device *dev)
cl->state = MEI_FILE_CONNECTING;
- if (mei_hbm_cl_connect_req(dev, cl)) {
- dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n");
- cl->state = MEI_FILE_DISCONNECTED;
- cl->host_client_id = 0;
- } else {
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- }
- return 0;
+ ret = mei_cl_connect(cl, NULL);
+
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+
+ return ret;
}
/**
@@ -137,14 +131,12 @@ int mei_amthif_host_init(struct mei_device *dev)
struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
struct file *file)
{
- struct mei_cl_cb *pos = NULL;
- struct mei_cl_cb *next = NULL;
+ struct mei_cl_cb *cb;
- list_for_each_entry_safe(pos, next,
- &dev->amthif_rd_complete_list.list, list) {
- if (pos->cl && pos->cl == &dev->iamthif_cl &&
- pos->file_object == file)
- return pos;
+ list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list) {
+ if (cb->cl && cb->cl == &dev->iamthif_cl &&
+ cb->file_object == file)
+ return cb;
}
return NULL;
}
@@ -180,14 +172,13 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
/* Only possible if we are in timeout */
if (!cl || cl != &dev->iamthif_cl) {
dev_dbg(&dev->pdev->dev, "bad file ext.\n");
- return -ETIMEDOUT;
+ return -ETIME;
}
i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
-
if (i < 0) {
dev_dbg(&dev->pdev->dev, "amthif client not found.\n");
- return -ENODEV;
+ return -ENOTTY;
}
dev_dbg(&dev->pdev->dev, "checking amthif data\n");
cb = mei_amthif_find_read_list_entry(dev, file);
@@ -228,7 +219,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
dev_dbg(&dev->pdev->dev, "amthif Time out\n");
/* 15 sec for the message has expired */
list_del(&cb->list);
- rets = -ETIMEDOUT;
+ rets = -ETIME;
goto free;
}
}
@@ -253,9 +244,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
* the buf_idx may point beyond */
length = min_t(size_t, length, (cb->buf_idx - *offset));
- if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length))
+ if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
+ dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
- else {
+ } else {
rets = length;
if ((*offset + length) < cb->buf_idx) {
*offset += length;
@@ -302,9 +294,8 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
if (ret < 0)
return ret;
- if (ret && dev->hbuf_is_ready) {
+ if (ret && mei_hbuf_acquire(dev)) {
ret = 0;
- dev->hbuf_is_ready = false;
if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {
mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
@@ -336,10 +327,6 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
list_add_tail(&cb->list, &dev->write_list.list);
}
} else {
- if (!dev->hbuf_is_ready)
- dev_dbg(&dev->pdev->dev, "host buffer is not empty");
-
- dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");
list_add_tail(&cb->list, &dev->write_list.list);
}
return 0;
@@ -365,7 +352,7 @@ int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb)
if (ret)
return ret;
- cb->fop_type = MEI_FOP_IOCTL;
+ cb->fop_type = MEI_FOP_WRITE;
if (!list_empty(&dev->amthif_cmd_list.list) ||
dev->iamthif_state != MEI_IAMTHIF_IDLE) {
@@ -447,23 +434,23 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
/**
- * mei_amthif_irq_write_completed - processes completed iamthif operation.
+ * mei_amthif_irq_write - write iamthif command in irq thread context.
*
* @dev: the device structure.
- * @slots: free slots.
* @cb_pos: callback block.
* @cl: private data of the file object.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
-int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list)
+int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev = cl->dev;
struct mei_msg_hdr mei_hdr;
size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
u32 msg_slots = mei_data2slots(len);
+ int slots;
int rets;
rets = mei_cl_flow_ctrl_creds(cl);
@@ -480,13 +467,15 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
mei_hdr.reserved = 0;
mei_hdr.internal = 0;
- if (*slots >= msg_slots) {
+ slots = mei_hbuf_empty_slots(dev);
+
+ if (slots >= msg_slots) {
mei_hdr.length = len;
mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
- } else if (*slots == dev->hbuf_depth) {
- msg_slots = *slots;
- len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
+ } else if (slots == dev->hbuf_depth) {
+ msg_slots = slots;
+ len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
mei_hdr.length = len;
mei_hdr.msg_complete = 0;
} else {
@@ -496,7 +485,6 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
- *slots -= msg_slots;
rets = mei_write_message(dev, &mei_hdr,
dev->iamthif_msg_buf + dev->iamthif_msg_buf_index);
if (rets) {
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 4bc7d620d695..ddc5ac92a200 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -26,7 +26,6 @@
#include <linux/mei_cl_bus.h>
#include "mei_dev.h"
-#include "hw-me.h"
#include "client.h"
#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
@@ -145,9 +144,9 @@ static struct device_type mei_cl_device_type = {
static struct mei_cl *mei_bus_find_mei_cl_by_uuid(struct mei_device *dev,
uuid_le uuid)
{
- struct mei_cl *cl, *next;
+ struct mei_cl *cl;
- list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
+ list_for_each_entry(cl, &dev->device_list, device_link) {
if (!uuid_le_cmp(uuid, cl->device_uuid))
return cl;
}
@@ -524,6 +523,22 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)
schedule_work(&device->event_work);
}
+void mei_cl_bus_remove_devices(struct mei_device *dev)
+{
+ struct mei_cl *cl, *next;
+
+ mutex_lock(&dev->device_lock);
+ list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
+ if (cl->device)
+ mei_cl_remove_device(cl->device);
+
+ list_del(&cl->device_link);
+ mei_cl_unlink(cl);
+ kfree(cl);
+ }
+ mutex_unlock(&dev->device_lock);
+}
+
int __init mei_cl_bus_init(void)
{
return bus_register(&mei_cl_bus_type);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1ee2b9492a82..8c078b808cd3 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -29,20 +29,21 @@
* mei_me_cl_by_uuid - locate index of me client
*
* @dev: mei device
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
* returns me client index or -ENOENT if not found
*/
int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
{
- int i, res = -ENOENT;
+ int i;
for (i = 0; i < dev->me_clients_num; ++i)
if (uuid_le_cmp(*uuid,
- dev->me_clients[i].props.protocol_name) == 0) {
- res = i;
- break;
- }
+ dev->me_clients[i].props.protocol_name) == 0)
+ return i;
- return res;
+ return -ENOENT;
}
@@ -60,37 +61,79 @@ int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
{
int i;
+
for (i = 0; i < dev->me_clients_num; i++)
if (dev->me_clients[i].client_id == client_id)
- break;
- if (WARN_ON(dev->me_clients[i].client_id != client_id))
- return -ENOENT;
+ return i;
- if (i == dev->me_clients_num)
- return -ENOENT;
-
- return i;
+ return -ENOENT;
}
/**
- * mei_io_list_flush - removes list entry belonging to cl.
+ * mei_cl_cmp_id - tells if the clients are the same
*
- * @list: An instance of our list structure
- * @cl: host client
+ * @cl1: host client 1
+ * @cl2: host client 2
+ *
+ * returns true - if the clients has same host and me ids
+ * false - otherwise
+ */
+static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
+ const struct mei_cl *cl2)
+{
+ return cl1 && cl2 &&
+ (cl1->host_client_id == cl2->host_client_id) &&
+ (cl1->me_client_id == cl2->me_client_id);
+}
+
+/**
+ * mei_io_list_flush - removes cbs belonging to cl.
+ *
+ * @list: an instance of our list structure
+ * @cl: host client, can be NULL for flushing the whole list
+ * @free: whether to free the cbs
*/
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+static void __mei_io_list_flush(struct mei_cl_cb *list,
+ struct mei_cl *cl, bool free)
{
struct mei_cl_cb *cb;
struct mei_cl_cb *next;
+ /* enable removing everything if no cl is specified */
list_for_each_entry_safe(cb, next, &list->list, list) {
- if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
+ if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
list_del(&cb->list);
+ if (free)
+ mei_io_cb_free(cb);
+ }
}
}
/**
+ * mei_io_list_flush - removes list entry belonging to cl.
+ *
+ * @list: An instance of our list structure
+ * @cl: host client
+ */
+static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+{
+ __mei_io_list_flush(list, cl, false);
+}
+
+
+/**
+ * mei_io_list_free - removes cb belonging to cl and free them
+ *
+ * @list: An instance of our list structure
+ * @cl: host client
+ */
+static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
+{
+ __mei_io_list_flush(list, cl, true);
+}
+
+/**
* mei_io_cb_free - free mei_cb_private related memory
*
* @cb: mei callback struct
@@ -196,8 +239,8 @@ int mei_cl_flush_queues(struct mei_cl *cl)
cl_dbg(dev, cl, "remove list entry belonging to cl\n");
mei_io_list_flush(&cl->dev->read_list, cl);
- mei_io_list_flush(&cl->dev->write_list, cl);
- mei_io_list_flush(&cl->dev->write_waiting_list, cl);
+ mei_io_list_free(&cl->dev->write_list, cl);
+ mei_io_list_free(&cl->dev->write_waiting_list, cl);
mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
@@ -254,10 +297,9 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
- struct mei_cl_cb *cb = NULL;
- struct mei_cl_cb *next = NULL;
+ struct mei_cl_cb *cb;
- list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
+ list_for_each_entry(cb, &dev->read_list.list, list)
if (mei_cl_cmp_id(cl, cb->cl))
return cb;
return NULL;
@@ -375,6 +417,23 @@ void mei_host_client_init(struct work_struct *work)
mutex_unlock(&dev->device_lock);
}
+/**
+ * mei_hbuf_acquire: try to acquire host buffer
+ *
+ * @dev: the device structure
+ * returns true if host buffer was acquired
+ */
+bool mei_hbuf_acquire(struct mei_device *dev)
+{
+ if (!dev->hbuf_is_ready) {
+ dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
+ return false;
+ }
+
+ dev->hbuf_is_ready = false;
+
+ return true;
+}
/**
* mei_cl_disconnect - disconnect host client from the me one
@@ -406,8 +465,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
return -ENOMEM;
cb->fop_type = MEI_FOP_CLOSE;
- if (dev->hbuf_is_ready) {
- dev->hbuf_is_ready = false;
+ if (mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_disconnect_req(dev, cl)) {
rets = -ENODEV;
cl_err(dev, cl, "failed to disconnect.\n");
@@ -461,17 +519,17 @@ free:
bool mei_cl_is_other_connecting(struct mei_cl *cl)
{
struct mei_device *dev;
- struct mei_cl *pos;
- struct mei_cl *next;
+ struct mei_cl *ocl; /* the other client */
if (WARN_ON(!cl || !cl->dev))
return false;
dev = cl->dev;
- list_for_each_entry_safe(pos, next, &dev->file_list, link) {
- if ((pos->state == MEI_FILE_CONNECTING) &&
- (pos != cl) && cl->me_client_id == pos->me_client_id)
+ list_for_each_entry(ocl, &dev->file_list, link) {
+ if (ocl->state == MEI_FILE_CONNECTING &&
+ ocl != cl &&
+ cl->me_client_id == ocl->me_client_id)
return true;
}
@@ -505,11 +563,10 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
goto out;
}
- cb->fop_type = MEI_FOP_IOCTL;
-
- if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
- dev->hbuf_is_ready = false;
+ cb->fop_type = MEI_FOP_CONNECT;
+ /* run hbuf acquire last so we don't have to undo */
+ if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_connect_req(dev, cl)) {
rets = -ENODEV;
goto out;
@@ -521,18 +578,19 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
}
mutex_unlock(&dev->device_lock);
- rets = wait_event_timeout(dev->wait_recvd_msg,
- (cl->state == MEI_FILE_CONNECTED ||
- cl->state == MEI_FILE_DISCONNECTED),
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ wait_event_timeout(dev->wait_recvd_msg,
+ (cl->state == MEI_FILE_CONNECTED ||
+ cl->state == MEI_FILE_DISCONNECTED),
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (cl->state != MEI_FILE_CONNECTED) {
- rets = -EFAULT;
+ /* something went really wrong */
+ if (!cl->status)
+ cl->status = -EFAULT;
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
- goto out;
}
rets = cl->status;
@@ -554,7 +612,8 @@ out:
int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
{
struct mei_device *dev;
- int i;
+ struct mei_me_client *me_cl;
+ int id;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
@@ -567,19 +626,19 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
if (cl->mei_flow_ctrl_creds > 0)
return 1;
- for (i = 0; i < dev->me_clients_num; i++) {
- struct mei_me_client *me_cl = &dev->me_clients[i];
- if (me_cl->client_id == cl->me_client_id) {
- if (me_cl->mei_flow_ctrl_creds) {
- if (WARN_ON(me_cl->props.single_recv_buf == 0))
- return -EINVAL;
- return 1;
- } else {
- return 0;
- }
- }
+ id = mei_me_cl_by_id(dev, cl->me_client_id);
+ if (id < 0) {
+ cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
+ return id;
}
- return -ENOENT;
+
+ me_cl = &dev->me_clients[id];
+ if (me_cl->mei_flow_ctrl_creds) {
+ if (WARN_ON(me_cl->props.single_recv_buf == 0))
+ return -EINVAL;
+ return 1;
+ }
+ return 0;
}
/**
@@ -595,32 +654,31 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
{
struct mei_device *dev;
- int i;
+ struct mei_me_client *me_cl;
+ int id;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
- if (!dev->me_clients_num)
- return -ENOENT;
+ id = mei_me_cl_by_id(dev, cl->me_client_id);
+ if (id < 0) {
+ cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
+ return id;
+ }
- for (i = 0; i < dev->me_clients_num; i++) {
- struct mei_me_client *me_cl = &dev->me_clients[i];
- if (me_cl->client_id == cl->me_client_id) {
- if (me_cl->props.single_recv_buf != 0) {
- if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
- dev->me_clients[i].mei_flow_ctrl_creds--;
- } else {
- if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
- cl->mei_flow_ctrl_creds--;
- }
- return 0;
- }
+ me_cl = &dev->me_clients[id];
+ if (me_cl->props.single_recv_buf != 0) {
+ if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ me_cl->mei_flow_ctrl_creds--;
+ } else {
+ if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ cl->mei_flow_ctrl_creds--;
}
- return -ENOENT;
+ return 0;
}
/**
@@ -652,7 +710,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
i = mei_me_cl_by_id(dev, cl->me_client_id);
if (i < 0) {
cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
- return -ENODEV;
+ return -ENOTTY;
}
cb = mei_io_cb_init(cl, NULL);
@@ -666,9 +724,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
goto err;
cb->fop_type = MEI_FOP_READ;
- cl->read_cb = cb;
- if (dev->hbuf_is_ready) {
- dev->hbuf_is_ready = false;
+ if (mei_hbuf_acquire(dev)) {
if (mei_hbm_cl_flow_control_req(dev, cl)) {
cl_err(dev, cl, "flow control send failed\n");
rets = -ENODEV;
@@ -678,6 +734,9 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
+
+ cl->read_cb = cb;
+
return rets;
err:
mei_io_cb_free(cb);
@@ -685,27 +744,26 @@ err:
}
/**
- * mei_cl_irq_write_complete - write a message to device
+ * mei_cl_irq_write - write a message to device
* from the interrupt thread context
*
* @cl: client
* @cb: callback block.
- * @slots: free slots.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise error.
*/
-int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list)
+int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr;
size_t len;
u32 msg_slots;
+ int slots;
int rets;
-
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -722,6 +780,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
+ slots = mei_hbuf_empty_slots(dev);
len = buf->size - cb->buf_idx;
msg_slots = mei_data2slots(len);
@@ -730,13 +789,13 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
mei_hdr.reserved = 0;
mei_hdr.internal = cb->internal;
- if (*slots >= msg_slots) {
+ if (slots >= msg_slots) {
mei_hdr.length = len;
mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
- } else if (*slots == dev->hbuf_depth) {
- msg_slots = *slots;
- len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
+ } else if (slots == dev->hbuf_depth) {
+ msg_slots = slots;
+ len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
mei_hdr.length = len;
mei_hdr.msg_complete = 0;
} else {
@@ -747,7 +806,6 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
cb->request_buffer.size, cb->buf_idx);
- *slots -= msg_slots;
rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
if (rets) {
cl->status = rets;
@@ -800,21 +858,29 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
cb->fop_type = MEI_FOP_WRITE;
+ cb->buf_idx = 0;
+ cl->writing_state = MEI_IDLE;
+
+ mei_hdr.host_addr = cl->host_client_id;
+ mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.reserved = 0;
+ mei_hdr.msg_complete = 0;
+ mei_hdr.internal = cb->internal;
rets = mei_cl_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
- /* Host buffer is not ready, we queue the request */
- if (rets == 0 || !dev->hbuf_is_ready) {
- cb->buf_idx = 0;
- /* unseting complete will enqueue the cb for write */
- mei_hdr.msg_complete = 0;
+ if (rets == 0) {
+ cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
+ rets = buf->size;
+ goto out;
+ }
+ if (!mei_hbuf_acquire(dev)) {
+ cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
rets = buf->size;
goto out;
}
-
- dev->hbuf_is_ready = false;
/* Check for a maximum length */
if (buf->size > mei_hbuf_max_len(dev)) {
@@ -825,12 +891,6 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
mei_hdr.msg_complete = 1;
}
- mei_hdr.host_addr = cl->host_client_id;
- mei_hdr.me_addr = cl->me_client_id;
- mei_hdr.reserved = 0;
- mei_hdr.internal = cb->internal;
-
-
rets = mei_write_message(dev, &mei_hdr, buf->data);
if (rets)
goto err;
@@ -838,13 +898,12 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
cl->writing_state = MEI_WRITING;
cb->buf_idx = mei_hdr.length;
- rets = buf->size;
out:
if (mei_hdr.msg_complete) {
- if (mei_cl_flow_ctrl_reduce(cl)) {
- rets = -ENODEV;
+ rets = mei_cl_flow_ctrl_reduce(cl);
+ if (rets < 0)
goto err;
- }
+
list_add_tail(&cb->list, &dev->write_waiting_list.list);
} else {
list_add_tail(&cb->list, &dev->write_list.list);
@@ -854,15 +913,18 @@ out:
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
mutex_unlock(&dev->device_lock);
- if (wait_event_interruptible(cl->tx_wait,
- cl->writing_state == MEI_WRITE_COMPLETE)) {
- if (signal_pending(current))
- rets = -EINTR;
- else
- rets = -ERESTARTSYS;
- }
+ rets = wait_event_interruptible(cl->tx_wait,
+ cl->writing_state == MEI_WRITE_COMPLETE);
mutex_lock(&dev->device_lock);
+ /* wait_event_interruptible returns -ERESTARTSYS */
+ if (rets) {
+ if (signal_pending(current))
+ rets = -EINTR;
+ goto err;
+ }
}
+
+ rets = buf->size;
err:
return rets;
}
@@ -903,12 +965,11 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
void mei_cl_all_disconnect(struct mei_device *dev)
{
- struct mei_cl *cl, *next;
+ struct mei_cl *cl;
- list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ list_for_each_entry(cl, &dev->file_list, link) {
cl->state = MEI_FILE_DISCONNECTED;
cl->mei_flow_ctrl_creds = 0;
- cl->read_cb = NULL;
cl->timer_count = 0;
}
}
@@ -921,8 +982,8 @@ void mei_cl_all_disconnect(struct mei_device *dev)
*/
void mei_cl_all_wakeup(struct mei_device *dev)
{
- struct mei_cl *cl, *next;
- list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ struct mei_cl *cl;
+ list_for_each_entry(cl, &dev->file_list, link) {
if (waitqueue_active(&cl->rx_wait)) {
cl_dbg(dev, cl, "Waking up reading client!\n");
wake_up_interruptible(&cl->rx_wait);
@@ -941,12 +1002,8 @@ void mei_cl_all_wakeup(struct mei_device *dev)
*/
void mei_cl_all_write_clear(struct mei_device *dev)
{
- struct mei_cl_cb *cb, *next;
-
- list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
- list_del(&cb->list);
- mei_io_cb_free(cb);
- }
+ mei_io_list_free(&dev->write_list, NULL);
+ mei_io_list_free(&dev->write_waiting_list, NULL);
}
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index c8396e582f1c..96d5de0389f9 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -45,8 +45,6 @@ static inline void mei_io_list_init(struct mei_cl_cb *list)
{
INIT_LIST_HEAD(&list->list);
}
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
-
/*
* MEI Host Client Functions
*/
@@ -61,22 +59,6 @@ int mei_cl_unlink(struct mei_cl *cl);
int mei_cl_flush_queues(struct mei_cl *cl);
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
-/**
- * mei_cl_cmp_id - tells if file private data have same id
- *
- * @fe1: private data of 1. file object
- * @fe2: private data of 2. file object
- *
- * returns true - if ids are the same and not NULL
- */
-static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
- const struct mei_cl *cl2)
-{
- return cl1 && cl2 &&
- (cl1->host_client_id == cl2->host_client_id) &&
- (cl1->me_client_id == cl2->me_client_id);
-}
-
int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
@@ -86,15 +68,15 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
*/
static inline bool mei_cl_is_connected(struct mei_cl *cl)
{
- return (cl->dev &&
+ return cl->dev &&
cl->dev->dev_state == MEI_DEV_ENABLED &&
- cl->state == MEI_FILE_CONNECTED);
+ cl->state == MEI_FILE_CONNECTED;
}
static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
{
- return (MEI_FILE_INITIALIZING == cl->state ||
+ return MEI_FILE_INITIALIZING == cl->state ||
MEI_FILE_DISCONNECTED == cl->state ||
- MEI_FILE_DISCONNECTING == cl->state);
+ MEI_FILE_DISCONNECTING == cl->state;
}
bool mei_cl_is_other_connecting(struct mei_cl *cl);
@@ -102,8 +84,8 @@ int mei_cl_disconnect(struct mei_cl *cl);
int mei_cl_connect(struct mei_cl *cl, struct file *file);
int mei_cl_read_start(struct mei_cl *cl, size_t length);
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
-int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list);
+int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list);
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index a3ae154444b2..ced5b777c70f 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -75,6 +75,54 @@ static const struct file_operations mei_dbgfs_fops_meclients = {
.llseek = generic_file_llseek,
};
+static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mei_device *dev = fp->private_data;
+ struct mei_cl *cl;
+ const size_t bufsz = 1024;
+ char *buf;
+ int i = 0;
+ int pos = 0;
+ int ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " |me|host|state|rd|wr|\n");
+
+ mutex_lock(&dev->device_lock);
+
+ /* if the driver is not enabled the list won't b consitent */
+ if (dev->dev_state != MEI_DEV_ENABLED)
+ goto out;
+
+ list_for_each_entry(cl, &dev->file_list, link) {
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%2d|%2d|%4d|%5d|%2d|%2d|\n",
+ i, cl->me_client_id, cl->host_client_id, cl->state,
+ cl->reading_state, cl->writing_state);
+ i++;
+ }
+out:
+ mutex_unlock(&dev->device_lock);
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations mei_dbgfs_fops_active = {
+ .open = simple_open,
+ .read = mei_dbgfs_read_active,
+ .llseek = generic_file_llseek,
+};
+
static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
@@ -128,6 +176,12 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
dev_err(&dev->pdev->dev, "meclients: registration failed\n");
goto err;
}
+ f = debugfs_create_file("active", S_IRUSR, dir,
+ dev, &mei_dbgfs_fops_active);
+ if (!f) {
+ dev_err(&dev->pdev->dev, "meclients: registration failed\n");
+ goto err;
+ }
f = debugfs_create_file("devstate", S_IRUSR, dir,
dev, &mei_dbgfs_fops_devstate);
if (!f) {
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 28cd74c073b9..4960288e543a 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -21,7 +21,41 @@
#include "mei_dev.h"
#include "hbm.h"
-#include "hw-me.h"
+#include "client.h"
+
+static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status)
+{
+#define MEI_CL_CS(status) case MEI_CL_CONN_##status: return #status
+ switch (status) {
+ MEI_CL_CS(SUCCESS);
+ MEI_CL_CS(NOT_FOUND);
+ MEI_CL_CS(ALREADY_STARTED);
+ MEI_CL_CS(OUT_OF_RESOURCES);
+ MEI_CL_CS(MESSAGE_SMALL);
+ default: return "unknown";
+ }
+#undef MEI_CL_CCS
+}
+
+/**
+ * mei_cl_conn_status_to_errno - convert client connect response
+ * status to error code
+ *
+ * @status: client connect response status
+ *
+ * returns corresponding error code
+ */
+static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
+{
+ switch (status) {
+ case MEI_CL_CONN_SUCCESS: return 0;
+ case MEI_CL_CONN_NOT_FOUND: return -ENOTTY;
+ case MEI_CL_CONN_ALREADY_STARTED: return -EBUSY;
+ case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY;
+ case MEI_CL_CONN_MESSAGE_SMALL: return -EINVAL;
+ default: return -EINVAL;
+ }
+}
/**
* mei_hbm_me_cl_allocate - allocates storage for me clients
@@ -100,33 +134,6 @@ bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
/**
- * is_treat_specially_client - checks if the message belongs
- * to the file private data.
- *
- * @cl: private data of the file object
- * @rs: connect response bus message
- *
- */
-static bool is_treat_specially_client(struct mei_cl *cl,
- struct hbm_client_connect_response *rs)
-{
- if (mei_hbm_cl_addr_equal(cl, rs)) {
- if (!rs->status) {
- cl->state = MEI_FILE_CONNECTED;
- cl->status = 0;
-
- } else {
- cl->state = MEI_FILE_DISCONNECTED;
- cl->status = -ENODEV;
- }
- cl->timer_count = 0;
-
- return true;
- }
- return false;
-}
-
-/**
* mei_hbm_idle - set hbm to idle state
*
* @dev: the device structure
@@ -147,13 +154,13 @@ int mei_hbm_start_wait(struct mei_device *dev)
ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
dev->hbm_state == MEI_HBM_IDLE ||
dev->hbm_state >= MEI_HBM_STARTED,
- mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
+ mei_secs_to_jiffies(MEI_HBM_TIMEOUT));
mutex_lock(&dev->device_lock);
if (ret <= 0 && (dev->hbm_state <= MEI_HBM_START)) {
dev->hbm_state = MEI_HBM_IDLE;
dev_err(&dev->pdev->dev, "waiting for mei start failed\n");
- return -ETIMEDOUT;
+ return -ETIME;
}
return 0;
}
@@ -283,17 +290,18 @@ static int mei_hbm_prop_req(struct mei_device *dev)
}
/**
- * mei_hbm_stop_req_prepare - prepare stop request message
+ * mei_hbm_stop_req - send stop request message
*
* @dev - mei device
- * @mei_hdr - mei message header
- * @data - hbm message body buffer
+ * @cl: client info
+ *
+ * This function returns -EIO on write failure
*/
-static void mei_hbm_stop_req_prepare(struct mei_device *dev,
- struct mei_msg_hdr *mei_hdr, unsigned char *data)
+static int mei_hbm_stop_req(struct mei_device *dev)
{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_host_stop_request *req =
- (struct hbm_host_stop_request *)data;
+ (struct hbm_host_stop_request *)dev->wr_msg.data;
const size_t len = sizeof(struct hbm_host_stop_request);
mei_hbm_hdr(mei_hdr, len);
@@ -301,6 +309,8 @@ static void mei_hbm_stop_req_prepare(struct mei_device *dev,
memset(req, 0, len);
req->hbm_cmd = HOST_STOP_REQ_CMD;
req->reason = DRIVER_STOP_REQUEST;
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
}
/**
@@ -319,8 +329,7 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
mei_hbm_hdr(mei_hdr, len);
mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len);
- dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
- cl->host_client_id, cl->me_client_id);
+ cl_dbg(dev, cl, "sending flow control\n");
return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
}
@@ -330,27 +339,34 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
*
* @dev: the device structure
* @flow: flow control.
+ *
+ * return 0 on success, < 0 otherwise
*/
-static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
+static int mei_hbm_add_single_flow_creds(struct mei_device *dev,
struct hbm_flow_control *flow)
{
- struct mei_me_client *client;
- int i;
-
- for (i = 0; i < dev->me_clients_num; i++) {
- client = &dev->me_clients[i];
- if (client && flow->me_addr == client->client_id) {
- if (client->props.single_recv_buf) {
- client->mei_flow_ctrl_creds++;
- dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
- flow->me_addr);
- dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
- client->mei_flow_ctrl_creds);
- } else {
- BUG(); /* error in flow control */
- }
- }
+ struct mei_me_client *me_cl;
+ int id;
+
+ id = mei_me_cl_by_id(dev, flow->me_addr);
+ if (id < 0) {
+ dev_err(&dev->pdev->dev, "no such me client %d\n",
+ flow->me_addr);
+ return id;
}
+
+ me_cl = &dev->me_clients[id];
+ if (me_cl->props.single_recv_buf) {
+ me_cl->mei_flow_ctrl_creds++;
+ dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
+ flow->me_addr);
+ dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
+ me_cl->mei_flow_ctrl_creds);
+ } else {
+ BUG(); /* error in flow control */
+ }
+
+ return 0;
}
/**
@@ -362,8 +378,7 @@ static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
struct hbm_flow_control *flow_control)
{
- struct mei_cl *cl = NULL;
- struct mei_cl *next = NULL;
+ struct mei_cl *cl;
if (!flow_control->host_addr) {
/* single receive buffer */
@@ -372,7 +387,7 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
}
/* normal connection */
- list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ list_for_each_entry(cl, &dev->file_list, link) {
if (mei_hbm_cl_addr_equal(cl, flow_control)) {
cl->mei_flow_ctrl_creds++;
dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
@@ -405,6 +420,25 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
}
/**
+ * mei_hbm_cl_disconnect_rsp - sends disconnect respose to the FW
+ *
+ * @dev: the device structure
+ * @cl: a client to disconnect from
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ const size_t len = sizeof(struct hbm_client_connect_response);
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, dev->wr_msg.data, len);
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
* mei_hbm_cl_disconnect_res - disconnect response from ME
*
* @dev: the device structure
@@ -414,29 +448,23 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev,
struct hbm_client_connect_response *rs)
{
struct mei_cl *cl;
- struct mei_cl_cb *pos = NULL, *next = NULL;
-
- dev_dbg(&dev->pdev->dev,
- "disconnect_response:\n"
- "ME Client = %d\n"
- "Host Client = %d\n"
- "Status = %d\n",
- rs->me_addr,
- rs->host_addr,
- rs->status);
-
- list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
- cl = pos->cl;
-
- if (!cl) {
- list_del(&pos->list);
+ struct mei_cl_cb *cb, *next;
+
+ dev_dbg(&dev->pdev->dev, "hbm: disconnect response cl:host=%02d me=%02d status=%d\n",
+ rs->me_addr, rs->host_addr, rs->status);
+
+ list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) {
+ cl = cb->cl;
+
+ /* this should not happen */
+ if (WARN_ON(!cl)) {
+ list_del(&cb->list);
return;
}
- dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
if (mei_hbm_cl_addr_equal(cl, rs)) {
- list_del(&pos->list);
- if (!rs->status)
+ list_del(&cb->list);
+ if (rs->status == MEI_CL_DISCONN_SUCCESS)
cl->state = MEI_FILE_DISCONNECTED;
cl->status = 0;
@@ -476,46 +504,41 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev,
{
struct mei_cl *cl;
- struct mei_cl_cb *pos = NULL, *next = NULL;
+ struct mei_cl_cb *cb, *next;
- dev_dbg(&dev->pdev->dev,
- "connect_response:\n"
- "ME Client = %d\n"
- "Host Client = %d\n"
- "Status = %d\n",
- rs->me_addr,
- rs->host_addr,
- rs->status);
+ dev_dbg(&dev->pdev->dev, "hbm: connect response cl:host=%02d me=%02d status=%s\n",
+ rs->me_addr, rs->host_addr,
+ mei_cl_conn_status_str(rs->status));
- /* if WD or iamthif client treat specially */
+ cl = NULL;
- if (is_treat_specially_client(&dev->wd_cl, rs)) {
- dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
- mei_watchdog_register(dev);
+ list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) {
- return;
- }
+ cl = cb->cl;
+ /* this should not happen */
+ if (WARN_ON(!cl)) {
+ list_del_init(&cb->list);
+ continue;
+ }
- if (is_treat_specially_client(&dev->iamthif_cl, rs)) {
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- return;
- }
- list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+ if (cb->fop_type != MEI_FOP_CONNECT)
+ continue;
- cl = pos->cl;
- if (!cl) {
- list_del(&pos->list);
- return;
- }
- if (pos->fop_type == MEI_FOP_IOCTL) {
- if (is_treat_specially_client(cl, rs)) {
- list_del(&pos->list);
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
+ if (mei_hbm_cl_addr_equal(cl, rs)) {
+ list_del(&cb->list);
+ break;
}
}
+
+ if (!cl)
+ return;
+
+ cl->timer_count = 0;
+ if (rs->status == MEI_CL_CONN_SUCCESS)
+ cl->state = MEI_FILE_CONNECTED;
+ else
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->status = mei_cl_conn_status_to_errno(rs->status);
}
@@ -525,32 +548,34 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev,
*
* @dev: the device structure.
* @disconnect_req: disconnect request bus message from the me
+ *
+ * returns -ENOMEM on allocation failure
*/
-static void mei_hbm_fw_disconnect_req(struct mei_device *dev,
+static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
struct hbm_client_connect_request *disconnect_req)
{
- struct mei_cl *cl, *next;
- const size_t len = sizeof(struct hbm_client_connect_response);
+ struct mei_cl *cl;
+ struct mei_cl_cb *cb;
- list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ list_for_each_entry(cl, &dev->file_list, link) {
if (mei_hbm_cl_addr_equal(cl, disconnect_req)) {
dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
disconnect_req->host_addr,
disconnect_req->me_addr);
cl->state = MEI_FILE_DISCONNECTED;
cl->timer_count = 0;
- if (cl == &dev->wd_cl)
- dev->wd_pending = false;
- else if (cl == &dev->iamthif_cl)
- dev->iamthif_timer = 0;
-
- /* prepare disconnect response */
- mei_hbm_hdr(&dev->wr_ext_msg.hdr, len);
- mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD,
- dev->wr_ext_msg.data, len);
+
+ cb = mei_io_cb_init(cl, NULL);
+ if (!cb)
+ return -ENOMEM;
+ cb->fop_type = MEI_FOP_DISCONNECT_RSP;
+ cl_dbg(dev, cl, "add disconnect response as first\n");
+ list_add(&cb->list, &dev->ctrl_wr_list.list);
+
break;
}
}
+ return 0;
}
@@ -629,10 +654,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n");
dev->hbm_state = MEI_HBM_STOPPED;
- mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
- dev->wr_msg.data);
- if (mei_write_message(dev, &dev->wr_msg.hdr,
- dev->wr_msg.data)) {
+ if (mei_hbm_stop_req(dev)) {
dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n");
return -EIO;
}
@@ -778,10 +800,11 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
case ME_STOP_REQ_CMD:
dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n");
-
dev->hbm_state = MEI_HBM_STOPPED;
- mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
- dev->wr_ext_msg.data);
+ if (mei_hbm_stop_req(dev)) {
+ dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n");
+ return -EIO;
+ }
break;
default:
BUG();
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 5f92188a5cd7..20e8782711c0 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -54,6 +54,7 @@ int mei_hbm_start_req(struct mei_device *dev);
int mei_hbm_start_wait(struct mei_device *dev);
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl);
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
bool mei_hbm_version_is_supported(struct mei_device *dev);
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 6f656c053b14..8dbdaaef1af5 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -20,10 +20,10 @@
#include <linux/interrupt.h>
#include "mei_dev.h"
-#include "hw-me.h"
-
#include "hbm.h"
+#include "hw-me.h"
+#include "hw-me-regs.h"
/**
* mei_me_reg_read - Reads 32bit data from the mei device
@@ -240,11 +240,11 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
err = wait_event_interruptible_timeout(dev->wait_hw_ready,
dev->recvd_hw_ready,
- mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
+ mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
mutex_lock(&dev->device_lock);
if (!err && !dev->recvd_hw_ready) {
if (!err)
- err = -ETIMEDOUT;
+ err = -ETIME;
dev_err(&dev->pdev->dev,
"wait hw ready failed. status = %d\n", err);
return err;
@@ -303,7 +303,7 @@ static bool mei_me_hbuf_is_empty(struct mei_device *dev)
*
* @dev: the device structure
*
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
+ * returns -EOVERFLOW if overflow, otherwise empty slots count
*/
static int mei_me_hbuf_empty_slots(struct mei_device *dev)
{
@@ -326,7 +326,7 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
/**
- * mei_write_message - writes a message to mei device.
+ * mei_me_write_message - writes a message to mei device.
*
* @dev: the device structure
* @header: mei HECI header of message
@@ -354,7 +354,7 @@ static int mei_me_write_message(struct mei_device *dev,
dw_cnt = mei_data2slots(length);
if (empty_slots < 0 || dw_cnt > empty_slots)
- return -EIO;
+ return -EMSGSIZE;
mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
@@ -381,7 +381,7 @@ static int mei_me_write_message(struct mei_device *dev,
*
* @dev: the device structure
*
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
+ * returns -EOVERFLOW if overflow, otherwise filled slots count
*/
static int mei_me_count_full_read_slots(struct mei_device *dev)
{
@@ -505,17 +505,25 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check slots available for reading */
slots = mei_count_full_read_slots(dev);
while (slots > 0) {
- /* we have urgent data to send so break the read */
- if (dev->wr_ext_msg.hdr.length)
- break;
dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);
rets = mei_irq_read_handler(dev, &complete_list, &slots);
+ /* There is a race between ME write and interrupt delivery:
+ * Not all data is always available immediately after the
+ * interrupt, so try to read again on the next interrupt.
+ */
+ if (rets == -ENODATA)
+ break;
+
if (rets && dev->dev_state != MEI_DEV_RESETTING) {
+ dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n",
+ rets);
schedule_work(&dev->reset_work);
goto end;
}
}
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+
rets = mei_irq_write_handler(dev, &complete_list);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 80bd829fbd9a..893d5119fa9b 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -20,6 +20,7 @@
#define _MEI_INTERFACE_H_
#include <linux/mei.h>
+#include <linux/irqreturn.h>
#include "mei_dev.h"
#include "client.h"
diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h
new file mode 100644
index 000000000000..7283c24c1af1
--- /dev/null
+++ b/drivers/misc/mei/hw-txe-regs.h
@@ -0,0 +1,294 @@
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _MEI_HW_TXE_REGS_H_
+#define _MEI_HW_TXE_REGS_H_
+
+#include "hw.h"
+
+#define SEC_ALIVENESS_TIMER_TIMEOUT (5 * MSEC_PER_SEC)
+#define SEC_ALIVENESS_WAIT_TIMEOUT (1 * MSEC_PER_SEC)
+#define SEC_RESET_WAIT_TIMEOUT (1 * MSEC_PER_SEC)
+#define SEC_READY_WAIT_TIMEOUT (5 * MSEC_PER_SEC)
+#define START_MESSAGE_RESPONSE_WAIT_TIMEOUT (5 * MSEC_PER_SEC)
+#define RESET_CANCEL_WAIT_TIMEOUT (1 * MSEC_PER_SEC)
+
+enum {
+ SEC_BAR,
+ BRIDGE_BAR,
+
+ NUM_OF_MEM_BARS
+};
+
+/* SeC FW Status Register
+ *
+ * FW uses this register in order to report its status to host.
+ * This register resides in PCI-E config space.
+ */
+#define PCI_CFG_TXE_FW_STS0 0x40
+# define PCI_CFG_TXE_FW_STS0_WRK_ST_MSK 0x0000000F
+# define PCI_CFG_TXE_FW_STS0_OP_ST_MSK 0x000001C0
+# define PCI_CFG_TXE_FW_STS0_FW_INIT_CMPLT 0x00000200
+# define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK 0x0000F000
+# define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK 0x000F0000
+# define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK 0x00F00000
+
+
+#define IPC_BASE_ADDR 0x80400 /* SeC IPC Base Address */
+
+/* IPC Input Doorbell Register */
+#define SEC_IPC_INPUT_DOORBELL_REG (0x0000 + IPC_BASE_ADDR)
+
+/* IPC Input Status Register
+ * This register indicates whether or not processing of
+ * the most recent command has been completed by the SEC
+ * New commands and payloads should not be written by the Host
+ * until this indicates that the previous command has been processed.
+ */
+#define SEC_IPC_INPUT_STATUS_REG (0x0008 + IPC_BASE_ADDR)
+# define SEC_IPC_INPUT_STATUS_RDY BIT(0)
+
+/* IPC Host Interrupt Status Register */
+#define SEC_IPC_HOST_INT_STATUS_REG (0x0010 + IPC_BASE_ADDR)
+#define SEC_IPC_HOST_INT_STATUS_OUT_DB BIT(0)
+#define SEC_IPC_HOST_INT_STATUS_IN_RDY BIT(1)
+#define SEC_IPC_HOST_INT_STATUS_HDCP_M0_RCVD BIT(5)
+#define SEC_IPC_HOST_INT_STATUS_ILL_MEM_ACCESS BIT(17)
+#define SEC_IPC_HOST_INT_STATUS_AES_HKEY_ERR BIT(18)
+#define SEC_IPC_HOST_INT_STATUS_DES_HKEY_ERR BIT(19)
+#define SEC_IPC_HOST_INT_STATUS_TMRMTB_OVERFLOW BIT(21)
+
+/* Convenient mask for pending interrupts */
+#define SEC_IPC_HOST_INT_STATUS_PENDING \
+ (SEC_IPC_HOST_INT_STATUS_OUT_DB| \
+ SEC_IPC_HOST_INT_STATUS_IN_RDY)
+
+/* IPC Host Interrupt Mask Register */
+#define SEC_IPC_HOST_INT_MASK_REG (0x0014 + IPC_BASE_ADDR)
+
+# define SEC_IPC_HOST_INT_MASK_OUT_DB BIT(0) /* Output Doorbell Int Mask */
+# define SEC_IPC_HOST_INT_MASK_IN_RDY BIT(1) /* Input Ready Int Mask */
+
+/* IPC Input Payload RAM */
+#define SEC_IPC_INPUT_PAYLOAD_REG (0x0100 + IPC_BASE_ADDR)
+/* IPC Shared Payload RAM */
+#define IPC_SHARED_PAYLOAD_REG (0x0200 + IPC_BASE_ADDR)
+
+/* SeC Address Translation Table Entry 2 - Ctrl
+ *
+ * This register resides also in SeC's PCI-E Memory space.
+ */
+#define SATT2_CTRL_REG 0x1040
+# define SATT2_CTRL_VALID_MSK BIT(0)
+# define SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT 8
+# define SATT2_CTRL_BRIDGE_HOST_EN_MSK BIT(12)
+
+/* SATT Table Entry 2 SAP Base Address Register */
+#define SATT2_SAP_BA_REG 0x1044
+/* SATT Table Entry 2 SAP Size Register. */
+#define SATT2_SAP_SIZE_REG 0x1048
+ /* SATT Table Entry 2 SAP Bridge Address - LSB Register */
+#define SATT2_BRG_BA_LSB_REG 0x104C
+
+/* Host High-level Interrupt Status Register */
+#define HHISR_REG 0x2020
+/* Host High-level Interrupt Enable Register
+ *
+ * Resides in PCI memory space. This is the top hierarchy for
+ * interrupts from SeC to host, aggregating both interrupts that
+ * arrive through HICR registers as well as interrupts
+ * that arrive via IPC.
+ */
+#define HHIER_REG 0x2024
+#define IPC_HHIER_SEC BIT(0)
+#define IPC_HHIER_BRIDGE BIT(1)
+#define IPC_HHIER_MSK (IPC_HHIER_SEC | IPC_HHIER_BRIDGE)
+
+/* Host High-level Interrupt Mask Register.
+ *
+ * Resides in PCI memory space.
+ * This is the top hierarchy for masking interrupts from SeC to host.
+ */
+#define HHIMR_REG 0x2028
+#define IPC_HHIMR_SEC BIT(0)
+#define IPC_HHIMR_BRIDGE BIT(1)
+
+/* Host High-level IRQ Status Register */
+#define HHIRQSR_REG 0x202C
+
+/* Host Interrupt Cause Register 0 - SeC IPC Readiness
+ *
+ * This register is both an ICR to Host from PCI Memory Space
+ * and it is also exposed in the SeC memory space.
+ * This register is used by SeC's IPC driver in order
+ * to synchronize with host about IPC interface state.
+ */
+#define HICR_SEC_IPC_READINESS_REG 0x2040
+#define HICR_SEC_IPC_READINESS_HOST_RDY BIT(0)
+#define HICR_SEC_IPC_READINESS_SEC_RDY BIT(1)
+#define HICR_SEC_IPC_READINESS_SYS_RDY \
+ (HICR_SEC_IPC_READINESS_HOST_RDY | \
+ HICR_SEC_IPC_READINESS_SEC_RDY)
+#define HICR_SEC_IPC_READINESS_RDY_CLR BIT(2)
+
+/* Host Interrupt Cause Register 1 - Aliveness Response */
+/* This register is both an ICR to Host from PCI Memory Space
+ * and it is also exposed in the SeC memory space.
+ * The register may be used by SeC to ACK a host request for aliveness.
+ */
+#define HICR_HOST_ALIVENESS_RESP_REG 0x2044
+#define HICR_HOST_ALIVENESS_RESP_ACK BIT(0)
+
+/* Host Interrupt Cause Register 2 - SeC IPC Output Doorbell */
+#define HICR_SEC_IPC_OUTPUT_DOORBELL_REG 0x2048
+
+/* Host Interrupt Status Register.
+ *
+ * Resides in PCI memory space.
+ * This is the main register involved in generating interrupts
+ * from SeC to host via HICRs.
+ * The interrupt generation rules are as follows:
+ * An interrupt will be generated whenever for any i,
+ * there is a transition from a state where at least one of
+ * the following conditions did not hold, to a state where
+ * ALL the following conditions hold:
+ * A) HISR.INT[i]_STS == 1.
+ * B) HIER.INT[i]_EN == 1.
+ */
+#define HISR_REG 0x2060
+#define HISR_INT_0_STS BIT(0)
+#define HISR_INT_1_STS BIT(1)
+#define HISR_INT_2_STS BIT(2)
+#define HISR_INT_3_STS BIT(3)
+#define HISR_INT_4_STS BIT(4)
+#define HISR_INT_5_STS BIT(5)
+#define HISR_INT_6_STS BIT(6)
+#define HISR_INT_7_STS BIT(7)
+#define HISR_INT_STS_MSK \
+ (HISR_INT_0_STS | HISR_INT_1_STS | HISR_INT_2_STS)
+
+/* Host Interrupt Enable Register. Resides in PCI memory space. */
+#define HIER_REG 0x2064
+#define HIER_INT_0_EN BIT(0)
+#define HIER_INT_1_EN BIT(1)
+#define HIER_INT_2_EN BIT(2)
+#define HIER_INT_3_EN BIT(3)
+#define HIER_INT_4_EN BIT(4)
+#define HIER_INT_5_EN BIT(5)
+#define HIER_INT_6_EN BIT(6)
+#define HIER_INT_7_EN BIT(7)
+
+#define HIER_INT_EN_MSK \
+ (HIER_INT_0_EN | HIER_INT_1_EN | HIER_INT_2_EN)
+
+
+/* SEC Memory Space IPC output payload.
+ *
+ * This register is part of the output payload which SEC provides to host.
+ */
+#define BRIDGE_IPC_OUTPUT_PAYLOAD_REG 0x20C0
+
+/* SeC Interrupt Cause Register - Host Aliveness Request
+ * This register is both an ICR to SeC and it is also exposed
+ * in the host-visible PCI memory space.
+ * The register is used by host to request SeC aliveness.
+ */
+#define SICR_HOST_ALIVENESS_REQ_REG 0x214C
+#define SICR_HOST_ALIVENESS_REQ_REQUESTED BIT(0)
+
+
+/* SeC Interrupt Cause Register - Host IPC Readiness
+ *
+ * This register is both an ICR to SeC and it is also exposed
+ * in the host-visible PCI memory space.
+ * This register is used by the host's SeC driver uses in order
+ * to synchronize with SeC about IPC interface state.
+ */
+#define SICR_HOST_IPC_READINESS_REQ_REG 0x2150
+
+
+#define SICR_HOST_IPC_READINESS_HOST_RDY BIT(0)
+#define SICR_HOST_IPC_READINESS_SEC_RDY BIT(1)
+#define SICR_HOST_IPC_READINESS_SYS_RDY \
+ (SICR_HOST_IPC_READINESS_HOST_RDY | \
+ SICR_HOST_IPC_READINESS_SEC_RDY)
+#define SICR_HOST_IPC_READINESS_RDY_CLR BIT(2)
+
+/* SeC Interrupt Cause Register - SeC IPC Output Status
+ *
+ * This register indicates whether or not processing of the most recent
+ * command has been completed by the Host.
+ * New commands and payloads should not be written by SeC until this
+ * register indicates that the previous command has been processed.
+ */
+#define SICR_SEC_IPC_OUTPUT_STATUS_REG 0x2154
+# define SEC_IPC_OUTPUT_STATUS_RDY BIT(0)
+
+
+
+/* MEI IPC Message payload size 64 bytes */
+#define PAYLOAD_SIZE 64
+
+/* MAX size for SATT range 32MB */
+#define SATT_RANGE_MAX (32 << 20)
+
+
+#endif /* _MEI_HW_TXE_REGS_H_ */
+
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
new file mode 100644
index 000000000000..f60182a52f96
--- /dev/null
+++ b/drivers/misc/mei/hw-txe.c
@@ -0,0 +1,1107 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/irqreturn.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hw-txe.h"
+#include "client.h"
+#include "hbm.h"
+
+/**
+ * mei_txe_reg_read - Reads 32bit data from the device
+ *
+ * @base_addr: registers base address
+ * @offset: register offset
+ *
+ */
+static inline u32 mei_txe_reg_read(void __iomem *base_addr,
+ unsigned long offset)
+{
+ return ioread32(base_addr + offset);
+}
+
+/**
+ * mei_txe_reg_write - Writes 32bit data to the device
+ *
+ * @base_addr: registers base address
+ * @offset: register offset
+ * @value: the value to write
+ */
+static inline void mei_txe_reg_write(void __iomem *base_addr,
+ unsigned long offset, u32 value)
+{
+ iowrite32(value, base_addr + offset);
+}
+
+/**
+ * mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR
+ *
+ * @dev: the device structure
+ * @offset: register offset
+ *
+ * Doesn't check for aliveness while Reads 32bit data from the SeC BAR
+ */
+static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw,
+ unsigned long offset)
+{
+ return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset);
+}
+
+/**
+ * mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR
+ *
+ * @dev: the device structure
+ * @offset: register offset
+ *
+ * Reads 32bit data from the SeC BAR and shout loud if aliveness is not set
+ */
+static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw,
+ unsigned long offset)
+{
+ WARN(!hw->aliveness, "sec read: aliveness not asserted\n");
+ return mei_txe_sec_reg_read_silent(hw, offset);
+}
+/**
+ * mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR
+ * doesn't check for aliveness
+ *
+ * @dev: the device structure
+ * @offset: register offset
+ * @value: value to write
+ *
+ * Doesn't check for aliveness while writes 32bit data from to the SeC BAR
+ */
+static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw,
+ unsigned long offset, u32 value)
+{
+ mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value);
+}
+
+/**
+ * mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR
+ *
+ * @dev: the device structure
+ * @offset: register offset
+ * @value: value to write
+ *
+ * Writes 32bit data from the SeC BAR and shout loud if aliveness is not set
+ */
+static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw,
+ unsigned long offset, u32 value)
+{
+ WARN(!hw->aliveness, "sec write: aliveness not asserted\n");
+ mei_txe_sec_reg_write_silent(hw, offset, value);
+}
+/**
+ * mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR
+ *
+ * @hw: the device structure
+ * @offset: offset from which to read the data
+ *
+ */
+static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw,
+ unsigned long offset)
+{
+ return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset);
+}
+
+/**
+ * mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR
+ *
+ * @hw: the device structure
+ * @offset: offset from which to write the data
+ * @value: the byte to write
+ */
+static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw,
+ unsigned long offset, u32 value)
+{
+ mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value);
+}
+
+/**
+ * mei_txe_aliveness_set - request for aliveness change
+ *
+ * @dev: the device structure
+ * @req: requested aliveness value
+ *
+ * Request for aliveness change and returns true if the change is
+ * really needed and false if aliveness is already
+ * in the requested state
+ * Requires device lock to be held
+ */
+static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
+{
+
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ bool do_req = hw->aliveness != req;
+
+ dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n",
+ hw->aliveness, req);
+ if (do_req) {
+ hw->recvd_aliveness = false;
+ mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
+ }
+ return do_req;
+}
+
+
+/**
+ * mei_txe_aliveness_req_get - get aliveness requested register value
+ *
+ * @dev: the device structure
+ *
+ * Extract HICR_HOST_ALIVENESS_RESP_ACK bit from
+ * from HICR_HOST_ALIVENESS_REQ register value
+ */
+static u32 mei_txe_aliveness_req_get(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 reg;
+ reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG);
+ return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED;
+}
+
+/**
+ * mei_txe_aliveness_get - get aliveness response register value
+ * @dev: the device structure
+ *
+ * Extract HICR_HOST_ALIVENESS_RESP_ACK bit
+ * from HICR_HOST_ALIVENESS_RESP register value
+ */
+static u32 mei_txe_aliveness_get(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 reg;
+ reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG);
+ return reg & HICR_HOST_ALIVENESS_RESP_ACK;
+}
+
+/**
+ * mei_txe_aliveness_poll - waits for aliveness to settle
+ *
+ * @dev: the device structure
+ * @expected: expected aliveness value
+ *
+ * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
+ * returns > 0 if the expected value was received, -ETIME otherwise
+ */
+static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ int t = 0;
+
+ do {
+ hw->aliveness = mei_txe_aliveness_get(dev);
+ if (hw->aliveness == expected) {
+ dev_dbg(&dev->pdev->dev,
+ "aliveness settled after %d msecs\n", t);
+ return t;
+ }
+ mutex_unlock(&dev->device_lock);
+ msleep(MSEC_PER_SEC / 5);
+ mutex_lock(&dev->device_lock);
+ t += MSEC_PER_SEC / 5;
+ } while (t < SEC_ALIVENESS_WAIT_TIMEOUT);
+
+ dev_err(&dev->pdev->dev, "aliveness timed out\n");
+ return -ETIME;
+}
+
+/**
+ * mei_txe_aliveness_wait - waits for aliveness to settle
+ *
+ * @dev: the device structure
+ * @expected: expected aliveness value
+ *
+ * Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
+ * returns returns 0 on success and < 0 otherwise
+ */
+static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ const unsigned long timeout =
+ msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT);
+ long err;
+ int ret;
+
+ hw->aliveness = mei_txe_aliveness_get(dev);
+ if (hw->aliveness == expected)
+ return 0;
+
+ mutex_unlock(&dev->device_lock);
+ err = wait_event_timeout(hw->wait_aliveness,
+ hw->recvd_aliveness, timeout);
+ mutex_lock(&dev->device_lock);
+
+ hw->aliveness = mei_txe_aliveness_get(dev);
+ ret = hw->aliveness == expected ? 0 : -ETIME;
+
+ if (ret)
+ dev_err(&dev->pdev->dev, "aliveness timed out");
+ else
+ dev_dbg(&dev->pdev->dev, "aliveness settled after %d msecs\n",
+ jiffies_to_msecs(timeout - err));
+ hw->recvd_aliveness = false;
+ return ret;
+}
+
+/**
+ * mei_txe_aliveness_set_sync - sets an wait for aliveness to complete
+ *
+ * @dev: the device structure
+ *
+ * returns returns 0 on success and < 0 otherwise
+ */
+int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
+{
+ if (mei_txe_aliveness_set(dev, req))
+ return mei_txe_aliveness_wait(dev, req);
+ return 0;
+}
+
+/**
+ * mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 hintmsk;
+ /* Enable the SEC_IPC_HOST_INT_MASK_IN_RDY interrupt */
+ hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG);
+ hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY;
+ mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk);
+}
+
+/**
+ * mei_txe_input_doorbell_set
+ * - Sets bit 0 in SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL.
+ * @dev: the device structure
+ */
+static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw)
+{
+ /* Clear the interrupt cause */
+ clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause);
+ mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1);
+}
+
+/**
+ * mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_output_ready_set(struct mei_txe_hw *hw)
+{
+ mei_txe_br_reg_write(hw,
+ SICR_SEC_IPC_OUTPUT_STATUS_REG,
+ SEC_IPC_OUTPUT_STATUS_RDY);
+}
+
+/**
+ * mei_txe_is_input_ready - check if TXE is ready for receiving data
+ *
+ * @dev: the device structure
+ */
+static bool mei_txe_is_input_ready(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 status;
+ status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG);
+ return !!(SEC_IPC_INPUT_STATUS_RDY & status);
+}
+
+/**
+ * mei_txe_intr_clear - clear all interrupts
+ *
+ * @dev: the device structure
+ */
+static inline void mei_txe_intr_clear(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG,
+ SEC_IPC_HOST_INT_STATUS_PENDING);
+ mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK);
+ mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK);
+}
+
+/**
+ * mei_txe_intr_disable - disable all interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_intr_disable(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_br_reg_write(hw, HHIER_REG, 0);
+ mei_txe_br_reg_write(hw, HIER_REG, 0);
+}
+/**
+ * mei_txe_intr_disable - enable all interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_intr_enable(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK);
+ mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK);
+}
+
+/**
+ * mei_txe_pending_interrupts - check if there are pending interrupts
+ * only Aliveness, Input ready, and output doorbell are of relevance
+ *
+ * @dev: the device structure
+ *
+ * Checks if there are pending interrupts
+ * only Aliveness, Readiness, Input ready, and Output doorbell are relevant
+ */
+static bool mei_txe_pending_interrupts(struct mei_device *dev)
+{
+
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ bool ret = (hw->intr_cause & (TXE_INTR_READINESS |
+ TXE_INTR_ALIVENESS |
+ TXE_INTR_IN_READY |
+ TXE_INTR_OUT_DB));
+
+ if (ret) {
+ dev_dbg(&dev->pdev->dev,
+ "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n",
+ !!(hw->intr_cause & TXE_INTR_IN_READY),
+ !!(hw->intr_cause & TXE_INTR_READINESS),
+ !!(hw->intr_cause & TXE_INTR_ALIVENESS),
+ !!(hw->intr_cause & TXE_INTR_OUT_DB));
+ }
+ return ret;
+}
+
+/**
+ * mei_txe_input_payload_write - write a dword to the host buffer
+ * at offset idx
+ *
+ * @dev: the device structure
+ * @idx: index in the host buffer
+ * @value: value
+ */
+static void mei_txe_input_payload_write(struct mei_device *dev,
+ unsigned long idx, u32 value)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG +
+ (idx * sizeof(u32)), value);
+}
+
+/**
+ * mei_txe_out_data_read - read dword from the device buffer
+ * at offset idx
+ *
+ * @dev: the device structure
+ * @idx: index in the device buffer
+ *
+ * returns register value at index
+ */
+static u32 mei_txe_out_data_read(const struct mei_device *dev,
+ unsigned long idx)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ return mei_txe_br_reg_read(hw,
+ BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32)));
+}
+
+/* Readiness */
+
+/**
+ * mei_txe_readiness_set_host_rdy
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_readiness_set_host_rdy(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_br_reg_write(hw,
+ SICR_HOST_IPC_READINESS_REQ_REG,
+ SICR_HOST_IPC_READINESS_HOST_RDY);
+}
+
+/**
+ * mei_txe_readiness_clear
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_readiness_clear(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG,
+ SICR_HOST_IPC_READINESS_RDY_CLR);
+}
+/**
+ * mei_txe_readiness_get - Reads and returns
+ * the HICR_SEC_IPC_READINESS register value
+ *
+ * @dev: the device structure
+ */
+static u32 mei_txe_readiness_get(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
+}
+
+
+/**
+ * mei_txe_readiness_is_sec_rdy - check readiness
+ * for HICR_SEC_IPC_READINESS_SEC_RDY
+ *
+ * @readiness - cached readiness state
+ */
+static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness)
+{
+ return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY);
+}
+
+/**
+ * mei_txe_hw_is_ready - check if the hw is ready
+ *
+ * @dev: the device structure
+ */
+static bool mei_txe_hw_is_ready(struct mei_device *dev)
+{
+ u32 readiness = mei_txe_readiness_get(dev);
+ return mei_txe_readiness_is_sec_rdy(readiness);
+}
+
+/**
+ * mei_txe_host_is_ready - check if the host is ready
+ *
+ * @dev: the device structure
+ */
+static inline bool mei_txe_host_is_ready(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
+ return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY);
+}
+
+/**
+ * mei_txe_readiness_wait - wait till readiness settles
+ *
+ * @dev: the device structure
+ *
+ * returns 0 on success and -ETIME on timeout
+ */
+static int mei_txe_readiness_wait(struct mei_device *dev)
+{
+ if (mei_txe_hw_is_ready(dev))
+ return 0;
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready,
+ msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT));
+ mutex_lock(&dev->device_lock);
+ if (!dev->recvd_hw_ready) {
+ dev_err(&dev->pdev->dev, "wait for readiness failed\n");
+ return -ETIME;
+ }
+
+ dev->recvd_hw_ready = false;
+ return 0;
+}
+
+/**
+ * mei_txe_hw_config - configure hardware at the start of the devices
+ *
+ * @dev: the device structure
+ *
+ * Configure hardware at the start of the device should be done only
+ * once at the device probe time
+ */
+static void mei_txe_hw_config(struct mei_device *dev)
+{
+
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ /* Doesn't change in runtime */
+ dev->hbuf_depth = PAYLOAD_SIZE / 4;
+
+ hw->aliveness = mei_txe_aliveness_get(dev);
+ hw->readiness = mei_txe_readiness_get(dev);
+
+ dev_dbg(&dev->pdev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
+ hw->aliveness, hw->readiness);
+}
+
+
+/**
+ * mei_txe_write - writes a message to device.
+ *
+ * @dev: the device structure
+ * @header: header of message
+ * @buf: message buffer will be written
+ * returns 1 if success, 0 - otherwise.
+ */
+
+static int mei_txe_write(struct mei_device *dev,
+ struct mei_msg_hdr *header, unsigned char *buf)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ unsigned long rem;
+ unsigned long length;
+ int slots = dev->hbuf_depth;
+ u32 *reg_buf = (u32 *)buf;
+ u32 dw_cnt;
+ int i;
+
+ if (WARN_ON(!header || !buf))
+ return -EINVAL;
+
+ length = header->length;
+
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
+
+ dw_cnt = mei_data2slots(length);
+ if (dw_cnt > slots)
+ return -EMSGSIZE;
+
+ if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n"))
+ return -EAGAIN;
+
+ /* Enable Input Ready Interrupt. */
+ mei_txe_input_ready_interrupt_enable(dev);
+
+ if (!mei_txe_is_input_ready(dev)) {
+ dev_err(&dev->pdev->dev, "Input is not ready");
+ return -EAGAIN;
+ }
+
+ mei_txe_input_payload_write(dev, 0, *((u32 *)header));
+
+ for (i = 0; i < length / 4; i++)
+ mei_txe_input_payload_write(dev, i + 1, reg_buf[i]);
+
+ rem = length & 0x3;
+ if (rem > 0) {
+ u32 reg = 0;
+ memcpy(&reg, &buf[length - rem], rem);
+ mei_txe_input_payload_write(dev, i + 1, reg);
+ }
+
+ /* after each write the whole buffer is consumed */
+ hw->slots = 0;
+
+ /* Set Input-Doorbell */
+ mei_txe_input_doorbell_set(hw);
+
+ return 0;
+}
+
+/**
+ * mei_txe_hbuf_max_len - mimics the me hbuf circular buffer
+ *
+ * @dev: the device structure
+ *
+ * returns the PAYLOAD_SIZE - 4
+ */
+static size_t mei_txe_hbuf_max_len(const struct mei_device *dev)
+{
+ return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr);
+}
+
+/**
+ * mei_txe_hbuf_empty_slots - mimics the me hbuf circular buffer
+ *
+ * @dev: the device structure
+ *
+ * returns always hbuf_depth
+ */
+static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ return hw->slots;
+}
+
+/**
+ * mei_txe_count_full_read_slots - mimics the me device circular buffer
+ *
+ * @dev: the device structure
+ *
+ * returns always buffer size in dwords count
+ */
+static int mei_txe_count_full_read_slots(struct mei_device *dev)
+{
+ /* read buffers has static size */
+ return PAYLOAD_SIZE / 4;
+}
+
+/**
+ * mei_txe_read_hdr - read message header which is always in 4 first bytes
+ *
+ * @dev: the device structure
+ *
+ * returns mei message header
+ */
+
+static u32 mei_txe_read_hdr(const struct mei_device *dev)
+{
+ return mei_txe_out_data_read(dev, 0);
+}
+/**
+ * mei_txe_read - reads a message from the txe device.
+ *
+ * @dev: the device structure
+ * @buf: message buffer will be written
+ * @len: message size will be read
+ *
+ * returns -EINVAL on error wrong argument and 0 on success
+ */
+static int mei_txe_read(struct mei_device *dev,
+ unsigned char *buf, unsigned long len)
+{
+
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 i;
+ u32 *reg_buf = (u32 *)buf;
+ u32 rem = len & 0x3;
+
+ if (WARN_ON(!buf || !len))
+ return -EINVAL;
+
+ dev_dbg(&dev->pdev->dev,
+ "buffer-length = %lu buf[0]0x%08X\n",
+ len, mei_txe_out_data_read(dev, 0));
+
+ for (i = 0; i < len / 4; i++) {
+ /* skip header: index starts from 1 */
+ u32 reg = mei_txe_out_data_read(dev, i + 1);
+ dev_dbg(&dev->pdev->dev, "buf[%d] = 0x%08X\n", i, reg);
+ *reg_buf++ = reg;
+ }
+
+ if (rem) {
+ u32 reg = mei_txe_out_data_read(dev, i + 1);
+ memcpy(reg_buf, &reg, rem);
+ }
+
+ mei_txe_output_ready_set(hw);
+ return 0;
+}
+
+/**
+ * mei_txe_hw_reset - resets host and fw.
+ *
+ * @dev: the device structure
+ * @intr_enable: if interrupt should be enabled after reset.
+ *
+ * returns 0 on success and < 0 in case of error
+ */
+static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+
+ u32 aliveness_req;
+ /*
+ * read input doorbell to ensure consistency between Bridge and SeC
+ * return value might be garbage return
+ */
+ (void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG);
+
+ aliveness_req = mei_txe_aliveness_req_get(dev);
+ hw->aliveness = mei_txe_aliveness_get(dev);
+
+ /* Disable interrupts in this stage we will poll */
+ mei_txe_intr_disable(dev);
+
+ /*
+ * If Aliveness Request and Aliveness Response are not equal then
+ * wait for them to be equal
+ * Since we might have interrupts disabled - poll for it
+ */
+ if (aliveness_req != hw->aliveness)
+ if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) {
+ dev_err(&dev->pdev->dev,
+ "wait for aliveness settle failed ... bailing out\n");
+ return -EIO;
+ }
+
+ /*
+ * If Aliveness Request and Aliveness Response are set then clear them
+ */
+ if (aliveness_req) {
+ mei_txe_aliveness_set(dev, 0);
+ if (mei_txe_aliveness_poll(dev, 0) < 0) {
+ dev_err(&dev->pdev->dev,
+ "wait for aliveness failed ... bailing out\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * Set rediness RDY_CLR bit
+ */
+ mei_txe_readiness_clear(dev);
+
+ return 0;
+}
+
+/**
+ * mei_txe_hw_start - start the hardware after reset
+ *
+ * @dev: the device structure
+ *
+ * returns 0 on success and < 0 in case of error
+ */
+static int mei_txe_hw_start(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ int ret;
+
+ u32 hisr;
+
+ /* bring back interrupts */
+ mei_txe_intr_enable(dev);
+
+ ret = mei_txe_readiness_wait(dev);
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev, "wating for readiness failed\n");
+ return ret;
+ }
+
+ /*
+ * If HISR.INT2_STS interrupt status bit is set then clear it.
+ */
+ hisr = mei_txe_br_reg_read(hw, HISR_REG);
+ if (hisr & HISR_INT_2_STS)
+ mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS);
+
+ /* Clear the interrupt cause of OutputDoorbell */
+ clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause);
+
+ ret = mei_txe_aliveness_set_sync(dev, 1);
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev, "wait for aliveness failed ... bailing out\n");
+ return ret;
+ }
+
+ /* enable input ready interrupts:
+ * SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK
+ */
+ mei_txe_input_ready_interrupt_enable(dev);
+
+
+ /* Set the SICR_SEC_IPC_OUTPUT_STATUS.IPC_OUTPUT_READY bit */
+ mei_txe_output_ready_set(hw);
+
+ /* Set bit SICR_HOST_IPC_READINESS.HOST_RDY
+ */
+ mei_txe_readiness_set_host_rdy(dev);
+
+ return 0;
+}
+
+/**
+ * mei_txe_check_and_ack_intrs - translate multi BAR interrupt into
+ * single bit mask and acknowledge the interrupts
+ *
+ * @dev: the device structure
+ * @do_ack: acknowledge interrupts
+ */
+static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ u32 hisr;
+ u32 hhisr;
+ u32 ipc_isr;
+ u32 aliveness;
+ bool generated;
+
+ /* read interrupt registers */
+ hhisr = mei_txe_br_reg_read(hw, HHISR_REG);
+ generated = (hhisr & IPC_HHIER_MSK);
+ if (!generated)
+ goto out;
+
+ hisr = mei_txe_br_reg_read(hw, HISR_REG);
+
+ aliveness = mei_txe_aliveness_get(dev);
+ if (hhisr & IPC_HHIER_SEC && aliveness)
+ ipc_isr = mei_txe_sec_reg_read_silent(hw,
+ SEC_IPC_HOST_INT_STATUS_REG);
+ else
+ ipc_isr = 0;
+
+ generated = generated ||
+ (hisr & HISR_INT_STS_MSK) ||
+ (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING);
+
+ if (generated && do_ack) {
+ /* Save the interrupt causes */
+ hw->intr_cause |= hisr & HISR_INT_STS_MSK;
+ if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY)
+ hw->intr_cause |= TXE_INTR_IN_READY;
+
+
+ mei_txe_intr_disable(dev);
+ /* Clear the interrupts in hierarchy:
+ * IPC and Bridge, than the High Level */
+ mei_txe_sec_reg_write_silent(hw,
+ SEC_IPC_HOST_INT_STATUS_REG, ipc_isr);
+ mei_txe_br_reg_write(hw, HISR_REG, hisr);
+ mei_txe_br_reg_write(hw, HHISR_REG, hhisr);
+ }
+
+out:
+ return generated;
+}
+
+/**
+ * mei_txe_irq_quick_handler - The ISR of the MEI device
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ */
+irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = dev_id;
+
+ if (mei_txe_check_and_ack_intrs(dev, true))
+ return IRQ_WAKE_THREAD;
+ return IRQ_NONE;
+}
+
+
+/**
+ * mei_txe_irq_thread_handler - txe interrupt thread
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ *
+ */
+irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *) dev_id;
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ struct mei_cl_cb complete_list;
+ s32 slots;
+ int rets = 0;
+
+ dev_dbg(&dev->pdev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
+ mei_txe_br_reg_read(hw, HHISR_REG),
+ mei_txe_br_reg_read(hw, HISR_REG),
+ mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));
+
+
+ /* initialize our complete list */
+ mutex_lock(&dev->device_lock);
+ mei_io_list_init(&complete_list);
+
+ if (pci_dev_msi_enabled(dev->pdev))
+ mei_txe_check_and_ack_intrs(dev, true);
+
+ /* show irq events */
+ mei_txe_pending_interrupts(dev);
+
+ hw->aliveness = mei_txe_aliveness_get(dev);
+ hw->readiness = mei_txe_readiness_get(dev);
+
+ /* Readiness:
+ * Detection of TXE driver going through reset
+ * or TXE driver resetting the HECI interface.
+ */
+ if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
+ dev_dbg(&dev->pdev->dev, "Readiness Interrupt was received...\n");
+
+ /* Check if SeC is going through reset */
+ if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
+ dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
+ dev->recvd_hw_ready = true;
+ } else {
+ dev->recvd_hw_ready = false;
+ if (dev->dev_state != MEI_DEV_RESETTING) {
+
+ dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
+ schedule_work(&dev->reset_work);
+ goto end;
+
+ }
+ }
+ wake_up(&dev->wait_hw_ready);
+ }
+
+ /************************************************************/
+ /* Check interrupt cause:
+ * Aliveness: Detection of SeC acknowledge of host request that
+ * it remain alive or host cancellation of that request.
+ */
+
+ if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
+ /* Clear the interrupt cause */
+ dev_dbg(&dev->pdev->dev,
+ "Aliveness Interrupt: Status: %d\n", hw->aliveness);
+ hw->recvd_aliveness = true;
+ if (waitqueue_active(&hw->wait_aliveness))
+ wake_up(&hw->wait_aliveness);
+ }
+
+
+ /* Output Doorbell:
+ * Detection of SeC having sent output to host
+ */
+ slots = mei_count_full_read_slots(dev);
+ if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
+ /* Read from TXE */
+ rets = mei_irq_read_handler(dev, &complete_list, &slots);
+ if (rets && dev->dev_state != MEI_DEV_RESETTING) {
+ dev_err(&dev->pdev->dev,
+ "mei_irq_read_handler ret = %d.\n", rets);
+
+ schedule_work(&dev->reset_work);
+ goto end;
+ }
+ }
+ /* Input Ready: Detection if host can write to SeC */
+ if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
+ dev->hbuf_is_ready = true;
+ hw->slots = dev->hbuf_depth;
+ }
+
+ if (hw->aliveness && dev->hbuf_is_ready) {
+ /* get the real register value */
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ rets = mei_irq_write_handler(dev, &complete_list);
+ if (rets && rets != -EMSGSIZE)
+ dev_err(&dev->pdev->dev, "mei_irq_write_handler ret = %d.\n",
+ rets);
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ }
+
+ mei_irq_compl_handler(dev, &complete_list);
+
+end:
+ dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);
+
+ mutex_unlock(&dev->device_lock);
+
+ mei_enable_interrupts(dev);
+ return IRQ_HANDLED;
+}
+
+static const struct mei_hw_ops mei_txe_hw_ops = {
+
+ .host_is_ready = mei_txe_host_is_ready,
+
+ .hw_is_ready = mei_txe_hw_is_ready,
+ .hw_reset = mei_txe_hw_reset,
+ .hw_config = mei_txe_hw_config,
+ .hw_start = mei_txe_hw_start,
+
+ .intr_clear = mei_txe_intr_clear,
+ .intr_enable = mei_txe_intr_enable,
+ .intr_disable = mei_txe_intr_disable,
+
+ .hbuf_free_slots = mei_txe_hbuf_empty_slots,
+ .hbuf_is_ready = mei_txe_is_input_ready,
+ .hbuf_max_len = mei_txe_hbuf_max_len,
+
+ .write = mei_txe_write,
+
+ .rdbuf_full_slots = mei_txe_count_full_read_slots,
+ .read_hdr = mei_txe_read_hdr,
+
+ .read = mei_txe_read,
+
+};
+
+/**
+ * mei_txe_dev_init - allocates and initializes txe hardware specific structure
+ *
+ * @pdev - pci device
+ * returns struct mei_device * on success or NULL;
+ *
+ */
+struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+ struct mei_txe_hw *hw;
+
+ dev = kzalloc(sizeof(struct mei_device) +
+ sizeof(struct mei_txe_hw), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ mei_device_init(dev);
+
+ hw = to_txe_hw(dev);
+
+ init_waitqueue_head(&hw->wait_aliveness);
+
+ dev->ops = &mei_txe_hw_ops;
+
+ dev->pdev = pdev;
+ return dev;
+}
+
+/**
+ * mei_txe_setup_satt2 - SATT2 configuration for DMA support.
+ *
+ * @dev: the device structure
+ * @addr: physical address start of the range
+ * @range: physical range size
+ */
+int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+
+ u32 lo32 = lower_32_bits(addr);
+ u32 hi32 = upper_32_bits(addr);
+ u32 ctrl;
+
+ /* SATT is limited to 36 Bits */
+ if (hi32 & ~0xF)
+ return -EINVAL;
+
+ /* SATT has to be 16Byte aligned */
+ if (lo32 & 0xF)
+ return -EINVAL;
+
+ /* SATT range has to be 4Bytes aligned */
+ if (range & 0x4)
+ return -EINVAL;
+
+ /* SATT is limited to 32 MB range*/
+ if (range > SATT_RANGE_MAX)
+ return -EINVAL;
+
+ ctrl = SATT2_CTRL_VALID_MSK;
+ ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT;
+
+ mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range);
+ mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32);
+ mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl);
+ dev_dbg(&dev->pdev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n",
+ range, lo32, ctrl);
+
+ return 0;
+}
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h
new file mode 100644
index 000000000000..0812d98633a4
--- /dev/null
+++ b/drivers/misc/mei/hw-txe.h
@@ -0,0 +1,74 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_HW_TXE_H_
+#define _MEI_HW_TXE_H_
+
+#include <linux/irqreturn.h>
+
+#include "hw.h"
+#include "hw-txe-regs.h"
+
+/* Flatten Hierarchy interrupt cause */
+#define TXE_INTR_READINESS_BIT 0 /* HISR_INT_0_STS */
+#define TXE_INTR_READINESS HISR_INT_0_STS
+#define TXE_INTR_ALIVENESS_BIT 1 /* HISR_INT_1_STS */
+#define TXE_INTR_ALIVENESS HISR_INT_1_STS
+#define TXE_INTR_OUT_DB_BIT 2 /* HISR_INT_2_STS */
+#define TXE_INTR_OUT_DB HISR_INT_2_STS
+#define TXE_INTR_IN_READY_BIT 8 /* beyond HISR */
+#define TXE_INTR_IN_READY BIT(8)
+
+/**
+ * struct mei_txe_hw - txe hardware specifics
+ *
+ * @mem_addr: SeC and BRIDGE bars
+ * @aliveness: aliveness (power gating) state of the hardware
+ * @readiness: readiness state of the hardware
+ * @wait_aliveness: aliveness wait queue
+ * @recvd_aliveness: aliveness interrupt was recived
+ * @intr_cause: translated interrupt cause
+ */
+struct mei_txe_hw {
+ void __iomem *mem_addr[NUM_OF_MEM_BARS];
+ u32 aliveness;
+ u32 readiness;
+ u32 slots;
+
+ wait_queue_head_t wait_aliveness;
+ bool recvd_aliveness;
+
+ unsigned long intr_cause;
+};
+
+#define to_txe_hw(dev) (struct mei_txe_hw *)((dev)->hw)
+
+static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw)
+{
+ return container_of((void *)hw, struct mei_device, hw);
+}
+
+struct mei_device *mei_txe_dev_init(struct pci_dev *pdev);
+
+irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id);
+irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id);
+
+int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req);
+
+int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range);
+
+
+#endif /* _MEI_HW_TXE_H_ */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index dd44e33ad2b6..6b476ab49b2e 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -22,7 +22,7 @@
/*
* Timeouts in Seconds
*/
-#define MEI_INTEROP_TIMEOUT 7 /* Timeout on ready message */
+#define MEI_HW_READY_TIMEOUT 2 /* Timeout on ready message */
#define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */
#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
@@ -31,13 +31,13 @@
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
+#define MEI_HBM_TIMEOUT 1 /* 1 second */
/*
* MEI Version
*/
#define HBM_MINOR_VERSION 0
#define HBM_MAJOR_VERSION 1
-#define HBM_TIMEOUT 1 /* 1 second */
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
@@ -89,19 +89,19 @@ enum mei_stop_reason_types {
* Client Connect Status
* used by hbm_client_connect_response.status
*/
-enum client_connect_status_types {
- CCS_SUCCESS = 0x00,
- CCS_NOT_FOUND = 0x01,
- CCS_ALREADY_STARTED = 0x02,
- CCS_OUT_OF_RESOURCES = 0x03,
- CCS_MESSAGE_SMALL = 0x04
+enum mei_cl_connect_status {
+ MEI_CL_CONN_SUCCESS = 0x00,
+ MEI_CL_CONN_NOT_FOUND = 0x01,
+ MEI_CL_CONN_ALREADY_STARTED = 0x02,
+ MEI_CL_CONN_OUT_OF_RESOURCES = 0x03,
+ MEI_CL_CONN_MESSAGE_SMALL = 0x04
};
/*
* Client Disconnect Status
*/
-enum client_disconnect_status_types {
- CDS_SUCCESS = 0x00
+enum mei_cl_disconnect_status {
+ MEI_CL_DISCONN_SUCCESS = 0x00
};
/*
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index cdd31c2a2a2b..4460975c0eef 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -116,7 +116,6 @@ int mei_reset(struct mei_device *dev)
mei_cl_unlink(&dev->wd_cl);
mei_cl_unlink(&dev->iamthif_cl);
mei_amthif_reset_params(dev);
- memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
}
@@ -126,7 +125,6 @@ int mei_reset(struct mei_device *dev)
if (ret) {
dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret);
- dev->dev_state = MEI_DEV_DISABLED;
return ret;
}
@@ -139,7 +137,6 @@ int mei_reset(struct mei_device *dev)
ret = mei_hw_start(dev);
if (ret) {
dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret);
- dev->dev_state = MEI_DEV_DISABLED;
return ret;
}
@@ -149,7 +146,7 @@ int mei_reset(struct mei_device *dev)
ret = mei_hbm_start_req(dev);
if (ret) {
dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret);
- dev->dev_state = MEI_DEV_DISABLED;
+ dev->dev_state = MEI_DEV_RESETTING;
return ret;
}
@@ -166,6 +163,7 @@ EXPORT_SYMBOL_GPL(mei_reset);
*/
int mei_start(struct mei_device *dev)
{
+ int ret;
mutex_lock(&dev->device_lock);
/* acknowledge interrupt and stop interrupts */
@@ -175,10 +173,18 @@ int mei_start(struct mei_device *dev)
dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
- dev->dev_state = MEI_DEV_INITIALIZING;
dev->reset_count = 0;
- mei_reset(dev);
+ do {
+ dev->dev_state = MEI_DEV_INITIALIZING;
+ ret = mei_reset(dev);
+
+ if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
+ dev_err(&dev->pdev->dev, "reset failed ret = %d", ret);
+ goto err;
+ }
+ } while (ret);
+ /* we cannot start the device w/o hbm start message completed */
if (dev->dev_state == MEI_DEV_DISABLED) {
dev_err(&dev->pdev->dev, "reset failed");
goto err;
@@ -238,27 +244,40 @@ int mei_restart(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
- if (err || dev->dev_state == MEI_DEV_DISABLED)
+ if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
+ dev_err(&dev->pdev->dev, "device disabled = %d\n", err);
return -ENODEV;
+ }
+
+ /* try to start again */
+ if (err)
+ schedule_work(&dev->reset_work);
+
return 0;
}
EXPORT_SYMBOL_GPL(mei_restart);
-
static void mei_reset_work(struct work_struct *work)
{
struct mei_device *dev =
container_of(work, struct mei_device, reset_work);
+ int ret;
mutex_lock(&dev->device_lock);
- mei_reset(dev);
+ ret = mei_reset(dev);
mutex_unlock(&dev->device_lock);
- if (dev->dev_state == MEI_DEV_DISABLED)
- dev_err(&dev->pdev->dev, "reset failed");
+ if (dev->dev_state == MEI_DEV_DISABLED) {
+ dev_err(&dev->pdev->dev, "device disabled = %d\n", ret);
+ return;
+ }
+
+ /* retry reset in case of failure */
+ if (ret)
+ schedule_work(&dev->reset_work);
}
void mei_stop(struct mei_device *dev)
@@ -269,6 +288,8 @@ void mei_stop(struct mei_device *dev)
mei_nfc_host_exit(dev);
+ mei_cl_bus_remove_devices(dev);
+
mutex_lock(&dev->device_lock);
mei_wd_stop(dev);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index f0fbb5179f80..29b5af8efb71 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -26,7 +26,6 @@
#include "mei_dev.h"
#include "hbm.h"
-#include "hw-me.h"
#include "client.h"
@@ -161,29 +160,63 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,
}
/**
+ * mei_cl_irq_disconnect_rsp - send disconnection response message
+ *
+ * @cl: client
+ * @cb: callback block.
+ * @cmpl_list: complete list.
+ *
+ * returns 0, OK; otherwise, error.
+ */
+static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
+{
+ struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
+ int ret;
+
+ slots = mei_hbuf_empty_slots(dev);
+ msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response));
+
+ if (slots < msg_slots)
+ return -EMSGSIZE;
+
+ ret = mei_hbm_cl_disconnect_rsp(dev, cl);
+
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->status = 0;
+ list_del(&cb->list);
+ mei_io_cb_free(cb);
+
+ return ret;
+}
+
+
+
+/**
* mei_cl_irq_close - processes close related operation from
* interrupt thread context - send disconnect request
*
* @cl: client
* @cb: callback block.
- * @slots: free slots.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list)
+ struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
- u32 msg_slots =
- mei_data2slots(sizeof(struct hbm_client_connect_request));
+ msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ slots = mei_hbuf_empty_slots(dev);
- if (*slots < msg_slots)
+ if (slots < msg_slots)
return -EMSGSIZE;
- *slots -= msg_slots;
-
if (mei_hbm_cl_disconnect_req(dev, cl)) {
cl->status = 0;
cb->buf_idx = 0;
@@ -207,27 +240,23 @@ static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb,
*
* @cl: client
* @cb: callback block.
- * @slots: free slots.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list)
+ struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev = cl->dev;
- u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
-
+ u32 msg_slots;
+ int slots;
int ret;
+ msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
+ slots = mei_hbuf_empty_slots(dev);
- if (*slots < msg_slots) {
- /* return the cancel routine */
- list_del(&cb->list);
+ if (slots < msg_slots)
return -EMSGSIZE;
- }
-
- *slots -= msg_slots;
ret = mei_hbm_cl_flow_control_req(dev, cl);
if (ret) {
@@ -244,32 +273,30 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
/**
- * mei_cl_irq_ioctl - processes client ioctl related operation from the
- * interrupt thread context - send connection request
+ * mei_cl_irq_connect - send connect request in irq_thread context
*
* @cl: client
* @cb: callback block.
- * @slots: free slots.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
-static int mei_cl_irq_ioctl(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list)
+static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list)
{
struct mei_device *dev = cl->dev;
+ u32 msg_slots;
+ int slots;
int ret;
- u32 msg_slots =
- mei_data2slots(sizeof(struct hbm_client_connect_request));
+ msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ slots = mei_hbuf_empty_slots(dev);
- if (*slots < msg_slots) {
- /* return the cancel routine */
- list_del(&cb->list);
- return -EMSGSIZE;
- }
+ if (mei_cl_is_other_connecting(cl))
+ return 0;
- *slots -= msg_slots;
+ if (slots < msg_slots)
+ return -EMSGSIZE;
cl->state = MEI_FILE_CONNECTING;
@@ -323,7 +350,7 @@ int mei_irq_read_handler(struct mei_device *dev,
dev_err(&dev->pdev->dev, "less data available than length=%08x.\n",
*slots);
/* we can't read the message */
- ret = -ERANGE;
+ ret = -ENODATA;
goto end;
}
@@ -409,10 +436,10 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
s32 slots;
int ret;
- if (!mei_hbuf_is_ready(dev)) {
- dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
+
+ if (!mei_hbuf_acquire(dev))
return 0;
- }
+
slots = mei_hbuf_empty_slots(dev);
if (slots <= 0)
return -EMSGSIZE;
@@ -447,29 +474,16 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
if (dev->wd_state == MEI_WD_STOPPING) {
dev->wd_state = MEI_WD_IDLE;
- wake_up_interruptible(&dev->wait_stop_wd);
+ wake_up(&dev->wait_stop_wd);
}
- if (dev->wr_ext_msg.hdr.length) {
- mei_write_message(dev, &dev->wr_ext_msg.hdr,
- dev->wr_ext_msg.data);
- slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
- dev->wr_ext_msg.hdr.length = 0;
- }
- if (dev->dev_state == MEI_DEV_ENABLED) {
+ if (mei_cl_is_connected(&dev->wd_cl)) {
if (dev->wd_pending &&
mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
- if (mei_wd_send(dev))
- dev_dbg(&dev->pdev->dev, "wd send failed.\n");
- else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
- return -ENODEV;
-
+ ret = mei_wd_send(dev);
+ if (ret)
+ return ret;
dev->wd_pending = false;
-
- if (dev->wd_state == MEI_WD_RUNNING)
- slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
- else
- slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
}
}
@@ -484,28 +498,31 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
switch (cb->fop_type) {
case MEI_FOP_CLOSE:
/* send disconnect message */
- ret = mei_cl_irq_close(cl, cb, &slots, cmpl_list);
+ ret = mei_cl_irq_close(cl, cb, cmpl_list);
if (ret)
return ret;
break;
case MEI_FOP_READ:
/* send flow control message */
- ret = mei_cl_irq_read(cl, cb, &slots, cmpl_list);
+ ret = mei_cl_irq_read(cl, cb, cmpl_list);
if (ret)
return ret;
break;
- case MEI_FOP_IOCTL:
+ case MEI_FOP_CONNECT:
/* connect message */
- if (mei_cl_is_other_connecting(cl))
- continue;
- ret = mei_cl_irq_ioctl(cl, cb, &slots, cmpl_list);
+ ret = mei_cl_irq_connect(cl, cb, cmpl_list);
if (ret)
return ret;
break;
-
+ case MEI_FOP_DISCONNECT_RSP:
+ /* send disconnect resp */
+ ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
+ if (ret)
+ return ret;
+ break;
default:
BUG();
}
@@ -518,11 +535,9 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
if (cl == NULL)
continue;
if (cl == &dev->iamthif_cl)
- ret = mei_amthif_irq_write_complete(cl, cb,
- &slots, cmpl_list);
+ ret = mei_amthif_irq_write(cl, cb, cmpl_list);
else
- ret = mei_cl_irq_write_complete(cl, cb,
- &slots, cmpl_list);
+ ret = mei_cl_irq_write(cl, cb, cmpl_list);
if (ret)
return ret;
}
@@ -541,8 +556,7 @@ EXPORT_SYMBOL_GPL(mei_irq_write_handler);
void mei_timer(struct work_struct *work)
{
unsigned long timeout;
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
+ struct mei_cl *cl;
struct mei_cl_cb *cb_pos = NULL;
struct mei_cl_cb *cb_next = NULL;
@@ -570,9 +584,9 @@ void mei_timer(struct work_struct *work)
goto out;
/*** connect/disconnect timeouts ***/
- list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
- if (cl_pos->timer_count) {
- if (--cl_pos->timer_count == 0) {
+ list_for_each_entry(cl, &dev->file_list, link) {
+ if (cl->timer_count) {
+ if (--cl->timer_count == 0) {
dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n");
mei_reset(dev);
goto out;
@@ -580,6 +594,9 @@ void mei_timer(struct work_struct *work)
}
}
+ if (!mei_cl_is_connected(&dev->iamthif_cl))
+ goto out;
+
if (dev->iamthif_stall_timer) {
if (--dev->iamthif_stall_timer == 0) {
dev_err(&dev->pdev->dev, "timer: amthif hanged.\n");
@@ -619,10 +636,10 @@ void mei_timer(struct work_struct *work)
list_for_each_entry_safe(cb_pos, cb_next,
&dev->amthif_rd_complete_list.list, list) {
- cl_pos = cb_pos->file_object->private_data;
+ cl = cb_pos->file_object->private_data;
/* Finding the AMTHI entry. */
- if (cl_pos == &dev->iamthif_cl)
+ if (cl == &dev->iamthif_cl)
list_del(&cb_pos->list);
}
mei_io_cb_free(dev->iamthif_current_cb);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 5424f8ff3f7f..b35594dbf52f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -13,9 +13,6 @@
* more details.
*
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -40,7 +37,6 @@
#include <linux/mei.h>
#include "mei_dev.h"
-#include "hw-me.h"
#include "client.h"
/**
@@ -129,17 +125,11 @@ static int mei_release(struct inode *inode, struct file *file)
}
if (cl->state == MEI_FILE_CONNECTED) {
cl->state = MEI_FILE_DISCONNECTING;
- dev_dbg(&dev->pdev->dev,
- "disconnecting client host client = %d, "
- "ME client = %d\n",
- cl->host_client_id,
- cl->me_client_id);
+ cl_dbg(dev, cl, "disconnecting\n");
rets = mei_cl_disconnect(cl);
}
mei_cl_flush_queues(cl);
- dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
- cl->host_client_id,
- cl->me_client_id);
+ cl_dbg(dev, cl, "removing\n");
mei_cl_unlink(cl);
@@ -284,6 +274,7 @@ copy_buffer:
length = min_t(size_t, length, cb->buf_idx - *offset);
if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
+ dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
goto free;
}
@@ -340,7 +331,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
id = mei_me_cl_by_id(dev, cl->me_client_id);
if (id < 0) {
- rets = -ENODEV;
+ rets = -ENOTTY;
goto out;
}
@@ -404,7 +395,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
if (rets) {
- dev_err(&dev->pdev->dev, "failed to copy data from userland\n");
+ dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
}
@@ -471,7 +462,7 @@ static int mei_ioctl_connect_client(struct file *file,
if (i < 0 || dev->me_clients[i].props.fixed_address) {
dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n",
&data->in_client_uuid);
- rets = -ENODEV;
+ rets = -ENOTTY;
goto end;
}
@@ -569,7 +560,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
if (copy_from_user(connect_data, (char __user *)data,
sizeof(struct mei_connect_client_data))) {
- dev_err(&dev->pdev->dev, "failed to copy data from userland\n");
+ dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
}
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index f7de95b4cdd9..94a516716d22 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -24,7 +24,6 @@
#include <linux/mei_cl_bus.h>
#include "hw.h"
-#include "hw-me-regs.h"
#include "hbm.h"
/*
@@ -130,16 +129,18 @@ enum mei_wd_states {
/**
* enum mei_cb_file_ops - file operation associated with the callback
- * @MEI_FOP_READ - read
- * @MEI_FOP_WRITE - write
- * @MEI_FOP_IOCTL - ioctl
- * @MEI_FOP_OPEN - open
- * @MEI_FOP_CLOSE - close
+ * @MEI_FOP_READ - read
+ * @MEI_FOP_WRITE - write
+ * @MEI_FOP_CONNECT - connect
+ * @MEI_FOP_DISCONNECT_RSP - disconnect response
+ * @MEI_FOP_OPEN - open
+ * @MEI_FOP_CLOSE - close
*/
enum mei_cb_file_ops {
MEI_FOP_READ = 0,
MEI_FOP_WRITE,
- MEI_FOP_IOCTL,
+ MEI_FOP_CONNECT,
+ MEI_FOP_DISCONNECT_RSP,
MEI_FOP_OPEN,
MEI_FOP_CLOSE
};
@@ -236,20 +237,20 @@ struct mei_cl {
*/
struct mei_hw_ops {
- bool (*host_is_ready) (struct mei_device *dev);
+ bool (*host_is_ready)(struct mei_device *dev);
- bool (*hw_is_ready) (struct mei_device *dev);
- int (*hw_reset) (struct mei_device *dev, bool enable);
- int (*hw_start) (struct mei_device *dev);
- void (*hw_config) (struct mei_device *dev);
+ bool (*hw_is_ready)(struct mei_device *dev);
+ int (*hw_reset)(struct mei_device *dev, bool enable);
+ int (*hw_start)(struct mei_device *dev);
+ void (*hw_config)(struct mei_device *dev);
- void (*intr_clear) (struct mei_device *dev);
- void (*intr_enable) (struct mei_device *dev);
- void (*intr_disable) (struct mei_device *dev);
+ void (*intr_clear)(struct mei_device *dev);
+ void (*intr_enable)(struct mei_device *dev);
+ void (*intr_disable)(struct mei_device *dev);
- int (*hbuf_free_slots) (struct mei_device *dev);
- bool (*hbuf_is_ready) (struct mei_device *dev);
- size_t (*hbuf_max_len) (const struct mei_device *dev);
+ int (*hbuf_free_slots)(struct mei_device *dev);
+ bool (*hbuf_is_ready)(struct mei_device *dev);
+ size_t (*hbuf_max_len)(const struct mei_device *dev);
int (*write)(struct mei_device *dev,
struct mei_msg_hdr *hdr,
@@ -258,7 +259,7 @@ struct mei_hw_ops {
int (*rdbuf_full_slots)(struct mei_device *dev);
u32 (*read_hdr)(const struct mei_device *dev);
- int (*read) (struct mei_device *dev,
+ int (*read)(struct mei_device *dev,
unsigned char *buf, unsigned long len);
};
@@ -294,6 +295,7 @@ int __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length);
int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length);
int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
void mei_cl_bus_rx_event(struct mei_cl *cl);
+void mei_cl_bus_remove_devices(struct mei_device *dev);
int mei_cl_bus_init(void);
void mei_cl_bus_exit(void);
@@ -339,7 +341,6 @@ struct mei_cl_device {
* @hbuf_depth - depth of hardware host/write buffer is slots
* @hbuf_is_ready - query if the host host/write buffer is ready
* @wr_msg - the buffer for hbm control messages
- * @wr_ext_msg - the buffer for hbm control responses (set in read cycle)
*/
struct mei_device {
struct pci_dev *pdev; /* pointer to pci device struct */
@@ -394,11 +395,6 @@ struct mei_device {
unsigned char data[128];
} wr_msg;
- struct {
- struct mei_msg_hdr hdr;
- unsigned char data[4]; /* All HBM messages are 4 bytes */
- } wr_ext_msg; /* for control responses */
-
struct hbm_version version;
struct mei_me_client *me_clients; /* Note: memory has to be allocated */
@@ -518,8 +514,8 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
void mei_amthif_run_next_cmd(struct mei_device *dev);
-int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
- s32 *slots, struct mei_cl_cb *cmpl_list);
+int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
+ struct mei_cl_cb *cmpl_list);
void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);
int mei_amthif_irq_read_msg(struct mei_device *dev,
@@ -546,7 +542,7 @@ int mei_wd_host_init(struct mei_device *dev);
* once we got connection to the WD Client
* @dev - mei device
*/
-void mei_watchdog_register(struct mei_device *dev);
+int mei_watchdog_register(struct mei_device *dev);
/*
* mei_watchdog_unregister - Unregistering watchdog interface
* @dev - mei device
@@ -633,6 +629,8 @@ static inline int mei_count_full_read_slots(struct mei_device *dev)
return dev->ops->rdbuf_full_slots(dev);
}
+bool mei_hbuf_acquire(struct mei_device *dev);
+
#if IS_ENABLED(CONFIG_DEBUG_FS)
int mei_dbgfs_register(struct mei_device *dev, const char *name);
void mei_dbgfs_deregister(struct mei_device *dev);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index a58320c0c049..3095fc514a65 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -364,7 +364,7 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
if (!wait_event_interruptible_timeout(ndev->send_wq,
ndev->recv_req_id == ndev->req_id, HZ)) {
dev_err(&dev->pdev->dev, "NFC MEI command timeout\n");
- err = -ETIMEDOUT;
+ err = -ETIME;
} else {
ndev->req_id++;
}
@@ -502,7 +502,7 @@ int mei_nfc_host_init(struct mei_device *dev)
i = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
if (i < 0) {
dev_info(&dev->pdev->dev, "nfc: failed to find the client\n");
- ret = -ENOENT;
+ ret = -ENOTTY;
goto err;
}
@@ -520,7 +520,7 @@ int mei_nfc_host_init(struct mei_device *dev)
i = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
if (i < 0) {
dev_info(&dev->pdev->dev, "nfc: failed to find the client\n");
- ret = -ENOENT;
+ ret = -ENOTTY;
goto err;
}
@@ -552,13 +552,7 @@ err:
void mei_nfc_host_exit(struct mei_device *dev)
{
struct mei_nfc_dev *ndev = &nfc_dev;
-
cancel_work_sync(&ndev->init_work);
+}
- mutex_lock(&dev->device_lock);
- if (ndev->cl && ndev->cl->device)
- mei_cl_remove_device(ndev->cl->device);
- mei_nfc_free(ndev);
- mutex_unlock(&dev->device_lock);
-}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index ddadd08956f4..1c8fd3a3e135 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -13,9 +13,6 @@
* more details.
*
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -27,7 +24,6 @@
#include <linux/aio.h>
#include <linux/pci.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
#include <linux/sched.h>
@@ -40,11 +36,12 @@
#include <linux/mei.h>
#include "mei_dev.h"
-#include "hw-me.h"
#include "client.h"
+#include "hw-me-regs.h"
+#include "hw-me.h"
/* mei_pci_tbl - PCI Device ID Table */
-static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = {
+static const struct pci_device_id mei_me_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
@@ -270,7 +267,7 @@ static void mei_me_remove(struct pci_dev *pdev)
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int mei_me_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
@@ -330,11 +327,12 @@ static int mei_me_pci_resume(struct device *device)
return 0;
}
+
static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume);
#define MEI_ME_PM_OPS (&mei_me_pm_ops)
#else
#define MEI_ME_PM_OPS NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
/*
* PCI driver structure
*/
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
new file mode 100644
index 000000000000..ad3adb009a1e
--- /dev/null
+++ b/drivers/misc/mei/pci-txe.c
@@ -0,0 +1,293 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/uuid.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+#include <linux/mei.h>
+
+
+#include "mei_dev.h"
+#include "hw-txe.h"
+
+static const struct pci_device_id mei_txe_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F18)}, /* Baytrail */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
+
+
+static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
+{
+ int i;
+ for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
+ if (hw->mem_addr[i]) {
+ pci_iounmap(pdev, hw->mem_addr[i]);
+ hw->mem_addr[i] = NULL;
+ }
+ }
+}
+/**
+ * mei_probe - Device Initialization Routine
+ *
+ * @pdev: PCI device structure
+ * @ent: entry in mei_txe_pci_tbl
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct mei_device *dev;
+ struct mei_txe_hw *hw;
+ int err;
+ int i;
+
+ /* enable pci dev */
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pci device.\n");
+ goto end;
+ }
+ /* set PCI host mastering */
+ pci_set_master(pdev);
+ /* pci request regions for mei driver */
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get pci regions.\n");
+ goto disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No suitable DMA available.\n");
+ goto release_regions;
+ }
+ }
+
+ /* allocates and initializes the mei dev structure */
+ dev = mei_txe_dev_init(pdev);
+ if (!dev) {
+ err = -ENOMEM;
+ goto release_regions;
+ }
+ hw = to_txe_hw(dev);
+
+ /* mapping IO device memory */
+ for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
+ hw->mem_addr[i] = pci_iomap(pdev, i, 0);
+ if (!hw->mem_addr[i]) {
+ dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+ }
+
+
+ pci_enable_msi(pdev);
+
+ /* clear spurious interrupts */
+ mei_clear_interrupts(dev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_txe_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_txe_irq_quick_handler,
+ mei_txe_irq_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (err) {
+ dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
+ pdev->irq);
+ goto free_device;
+ }
+
+ if (mei_start(dev)) {
+ dev_err(&pdev->dev, "init hw failure.\n");
+ err = -ENODEV;
+ goto release_irq;
+ }
+
+ err = mei_register(dev);
+ if (err)
+ goto release_irq;
+
+ pci_set_drvdata(pdev, dev);
+
+ return 0;
+
+release_irq:
+
+ mei_cancel_work(dev);
+
+ /* disable interrupts */
+ mei_disable_interrupts(dev);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+
+free_device:
+ mei_txe_pci_iounmap(pdev, hw);
+
+ kfree(dev);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+end:
+ dev_err(&pdev->dev, "initialization failed.\n");
+ return err;
+}
+
+/**
+ * mei_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void mei_txe_remove(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+ struct mei_txe_hw *hw;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev) {
+ dev_err(&pdev->dev, "mei: dev =NULL\n");
+ return;
+ }
+
+ hw = to_txe_hw(dev);
+
+ mei_stop(dev);
+
+ /* disable interrupts */
+ mei_disable_interrupts(dev);
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ mei_txe_pci_iounmap(pdev, hw);
+
+ mei_deregister(dev);
+
+ kfree(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int mei_txe_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev = pci_get_drvdata(pdev);
+
+ if (!dev)
+ return -ENODEV;
+
+ dev_dbg(&pdev->dev, "suspend\n");
+
+ mei_stop(dev);
+
+ mei_disable_interrupts(dev);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+
+ return 0;
+}
+
+static int mei_txe_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int err;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ pci_enable_msi(pdev);
+
+ mei_clear_interrupts(dev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_txe_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_txe_irq_quick_handler,
+ mei_txe_irq_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (err) {
+ dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
+ pdev->irq);
+ return err;
+ }
+
+ err = mei_restart(dev);
+
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(mei_txe_pm_ops,
+ mei_txe_pci_suspend,
+ mei_txe_pci_resume);
+
+#define MEI_TXE_PM_OPS (&mei_txe_pm_ops)
+#else
+#define MEI_TXE_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+/*
+ * PCI driver structure
+ */
+static struct pci_driver mei_txe_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mei_txe_pci_tbl,
+ .probe = mei_txe_probe,
+ .remove = mei_txe_remove,
+ .shutdown = mei_txe_remove,
+ .driver.pm = MEI_TXE_PM_OPS,
+};
+
+module_pci_driver(mei_txe_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index f70945ed96f6..ebf1cbc198fd 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -25,7 +25,6 @@
#include "mei_dev.h"
#include "hbm.h"
-#include "hw-me.h"
#include "client.h"
static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
@@ -53,7 +52,7 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
*
* @dev: the device structure
*
- * returns -ENENT if wd client cannot be found
+ * returns -ENOTTY if wd client cannot be found
* -EIO if write has failed
* 0 on success
*/
@@ -73,7 +72,7 @@ int mei_wd_host_init(struct mei_device *dev)
id = mei_me_cl_by_uuid(dev, &mei_wd_guid);
if (id < 0) {
dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
- return id;
+ return -ENOTTY;
}
cl->me_client_id = dev->me_clients[id].client_id;
@@ -87,15 +86,20 @@ int mei_wd_host_init(struct mei_device *dev)
cl->state = MEI_FILE_CONNECTING;
- if (mei_hbm_cl_connect_req(dev, cl)) {
- dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n");
- cl->state = MEI_FILE_DISCONNECTED;
- cl->host_client_id = 0;
- return -EIO;
+ ret = mei_cl_connect(cl, NULL);
+
+ if (ret) {
+ dev_err(&dev->pdev->dev, "wd: failed to connect = %d\n", ret);
+ mei_cl_unlink(cl);
+ return ret;
}
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- return 0;
+ ret = mei_watchdog_register(dev);
+ if (ret) {
+ mei_cl_disconnect(cl);
+ mei_cl_unlink(cl);
+ }
+ return ret;
}
/**
@@ -106,13 +110,16 @@ int mei_wd_host_init(struct mei_device *dev)
* returns 0 if success,
* -EIO when message send fails
* -EINVAL when invalid message is to be sent
+ * -ENODEV on flow control failure
*/
int mei_wd_send(struct mei_device *dev)
{
+ struct mei_cl *cl = &dev->wd_cl;
struct mei_msg_hdr hdr;
+ int ret;
- hdr.host_addr = dev->wd_cl.host_client_id;
- hdr.me_addr = dev->wd_cl.me_client_id;
+ hdr.host_addr = cl->host_client_id;
+ hdr.me_addr = cl->me_client_id;
hdr.msg_complete = 1;
hdr.reserved = 0;
hdr.internal = 0;
@@ -121,10 +128,24 @@ int mei_wd_send(struct mei_device *dev)
hdr.length = MEI_WD_START_MSG_SIZE;
else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
hdr.length = MEI_WD_STOP_MSG_SIZE;
- else
+ else {
+ dev_err(&dev->pdev->dev, "wd: invalid message is to be sent, aborting\n");
return -EINVAL;
+ }
- return mei_write_message(dev, &hdr, dev->wd_data);
+ ret = mei_write_message(dev, &hdr, dev->wd_data);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "wd: write message failed\n");
+ return ret;
+ }
+
+ ret = mei_cl_flow_ctrl_reduce(cl);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "wd: flow_ctrl_reduce failed.\n");
+ return ret;
+ }
+
+ return 0;
}
/**
@@ -133,9 +154,11 @@ int mei_wd_send(struct mei_device *dev)
* @dev: the device structure
* @preserve: indicate if to keep the timeout value
*
- * returns 0 if success,
- * -EIO when message send fails
+ * returns 0 if success
+ * on error:
+ * -EIO when message send fails
* -EINVAL when invalid message is to be sent
+ * -ETIME on message timeout
*/
int mei_wd_stop(struct mei_device *dev)
{
@@ -151,20 +174,12 @@ int mei_wd_stop(struct mei_device *dev)
ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
if (ret < 0)
- goto out;
-
- if (ret && dev->hbuf_is_ready) {
- ret = 0;
- dev->hbuf_is_ready = false;
-
- if (!mei_wd_send(dev)) {
- ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl);
- if (ret)
- goto out;
- } else {
- dev_err(&dev->pdev->dev, "wd: send stop failed\n");
- }
+ goto err;
+ if (ret && mei_hbuf_acquire(dev)) {
+ ret = mei_wd_send(dev);
+ if (ret)
+ goto err;
dev->wd_pending = false;
} else {
dev->wd_pending = true;
@@ -172,21 +187,21 @@ int mei_wd_stop(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
- ret = wait_event_interruptible_timeout(dev->wait_stop_wd,
- dev->wd_state == MEI_WD_IDLE,
- msecs_to_jiffies(MEI_WD_STOP_TIMEOUT));
+ ret = wait_event_timeout(dev->wait_stop_wd,
+ dev->wd_state == MEI_WD_IDLE,
+ msecs_to_jiffies(MEI_WD_STOP_TIMEOUT));
mutex_lock(&dev->device_lock);
- if (dev->wd_state == MEI_WD_IDLE) {
- dev_dbg(&dev->pdev->dev, "wd: stop completed ret=%d.\n", ret);
- ret = 0;
- } else {
- if (!ret)
- ret = -ETIMEDOUT;
+ if (dev->wd_state != MEI_WD_IDLE) {
+ /* timeout */
+ ret = -ETIME;
dev_warn(&dev->pdev->dev,
"wd: stop failed to complete ret=%d.\n", ret);
+ goto err;
}
-
-out:
+ dev_dbg(&dev->pdev->dev, "wd: stop completed after %u msec\n",
+ MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret));
+ return 0;
+err:
return ret;
}
@@ -260,8 +275,8 @@ static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
*/
static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
{
- int ret = 0;
struct mei_device *dev;
+ int ret;
dev = watchdog_get_drvdata(wd_dev);
if (!dev)
@@ -277,25 +292,18 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
dev->wd_state = MEI_WD_RUNNING;
+ ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
+ if (ret < 0)
+ goto end;
/* Check if we can send the ping to HW*/
- if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
+ if (ret && mei_hbuf_acquire(dev)) {
- dev->hbuf_is_ready = false;
dev_dbg(&dev->pdev->dev, "wd: sending ping\n");
- if (mei_wd_send(dev)) {
- dev_err(&dev->pdev->dev, "wd: send failed.\n");
- ret = -EIO;
+ ret = mei_wd_send(dev);
+ if (ret)
goto end;
- }
-
- if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) {
- dev_err(&dev->pdev->dev,
- "wd: mei_cl_flow_ctrl_reduce() failed.\n");
- ret = -EIO;
- goto end;
- }
-
+ dev->wd_pending = false;
} else {
dev->wd_pending = true;
}
@@ -363,17 +371,25 @@ static struct watchdog_device amt_wd_dev = {
};
-void mei_watchdog_register(struct mei_device *dev)
+int mei_watchdog_register(struct mei_device *dev)
{
- if (watchdog_register_device(&amt_wd_dev)) {
- dev_err(&dev->pdev->dev,
- "wd: unable to register watchdog device.\n");
- return;
+
+ int ret;
+
+ /* unlock to perserve correct locking order */
+ mutex_unlock(&dev->device_lock);
+ ret = watchdog_register_device(&amt_wd_dev);
+ mutex_lock(&dev->device_lock);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "wd: unable to register watchdog device = %d.\n",
+ ret);
+ return ret;
}
dev_dbg(&dev->pdev->dev,
"wd: successfully register watchdog interface.\n");
watchdog_set_drvdata(&amt_wd_dev, dev);
+ return 0;
}
void mei_watchdog_unregister(struct mei_device *dev)
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index e42b331edbc6..462a5b1d8651 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -4,7 +4,6 @@ config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
depends on 64BIT && PCI && X86
select VHOST_RING
- default N
help
This enables Host Driver support for the Intel Many Integrated
Core (MIC) family of PCIe form factor coprocessor devices that
@@ -25,7 +24,6 @@ config INTEL_MIC_CARD
tristate "Intel MIC Card Driver"
depends on 64BIT && X86
select VIRTIO
- default N
help
This enables card driver support for the Intel Many Integrated
Core (MIC) device family. The card driver communicates shutdown/
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
index 347b9b3b7916..306f502be95e 100644
--- a/drivers/misc/mic/card/mic_device.h
+++ b/drivers/misc/mic/card/mic_device.h
@@ -29,6 +29,7 @@
#include <linux/workqueue.h>
#include <linux/io.h>
+#include <linux/irqreturn.h>
/**
* struct mic_intr_info - Contains h/w specific interrupt sources info
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 1a6edce2ecde..0398c696d257 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -24,6 +24,7 @@
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/notifier.h>
+#include <linux/irqreturn.h>
#include "mic_intr.h"
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
index f9c29bc918bc..dbc5afde1392 100644
--- a/drivers/misc/mic/host/mic_intr.c
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -194,7 +194,7 @@ static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev)
for (i = 0; i < MIC_MIN_MSIX; i++)
mdev->irq_info.msix_entries[i].entry = i;
- rc = pci_enable_msix(pdev, mdev->irq_info.msix_entries,
+ rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries,
MIC_MIN_MSIX);
if (rc) {
dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc);
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index 752ff873f891..7e1ef0ebbb80 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -156,7 +156,8 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
static int _mic_virtio_copy(struct mic_vdev *mvdev,
struct mic_copy_desc *copy)
{
- int ret = 0, iovcnt = copy->iovcnt;
+ int ret = 0;
+ u32 iovcnt = copy->iovcnt;
struct iovec iov;
struct iovec __user *u_iov = copy->iov;
void __user *ubuf = NULL;
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index a5925f7f17f6..956597321d2a 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -636,6 +636,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
u8 mac[ETH_ALEN];
ssize_t rom_size;
struct pch_phub_reg *chip = dev_get_drvdata(dev);
+ int ret;
if (!mac_pton(buf, mac))
return -EINVAL;
@@ -644,8 +645,10 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
if (!chip->pch_phub_extrom_base_address)
return -ENOMEM;
- pch_phub_write_gbe_mac_addr(chip, mac);
+ ret = pch_phub_write_gbe_mac_addr(chip, mac);
pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 9b2062d17327..2bef3f76032a 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -139,8 +139,11 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ubuf += sizeof(hdr);
ubufcch = ubuf;
- if (gru_user_copy_handle(&ubuf, cch))
- goto fail;
+ if (gru_user_copy_handle(&ubuf, cch)) {
+ if (cch_locked)
+ unlock_cch_handle(cch);
+ return -EFAULT;
+ }
if (cch_locked)
ubufcch->delresp = 0;
bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
@@ -179,10 +182,6 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ret = -EFAULT;
return ret ? ret : bytes;
-
-fail:
- unlock_cch_handle(cch);
- return -EFAULT;
}
int gru_dump_chiplet_request(unsigned long arg)
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index b9e2000969f0..95c894482fdd 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
nid = cpu_to_node(cpu);
page = alloc_pages_exact_node(nid,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index afe66571ce0b..21181fa243df 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -24,6 +24,9 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -36,14 +39,35 @@ struct sram_dev {
struct clk *clk;
};
+struct sram_reserve {
+ struct list_head list;
+ u32 start;
+ u32 size;
+};
+
+static int sram_reserve_cmp(void *priv, struct list_head *a,
+ struct list_head *b)
+{
+ struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
+ struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
+
+ return ra->start - rb->start;
+}
+
static int sram_probe(struct platform_device *pdev)
{
void __iomem *virt_base;
struct sram_dev *sram;
struct resource *res;
- unsigned long size;
+ struct device_node *np = pdev->dev.of_node, *child;
+ unsigned long size, cur_start, cur_size;
+ struct sram_reserve *rblocks, *block;
+ struct list_head reserve_list;
+ unsigned int nblocks;
int ret;
+ INIT_LIST_HEAD(&reserve_list);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
virt_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(virt_base))
@@ -65,19 +89,106 @@ static int sram_probe(struct platform_device *pdev)
if (!sram->pool)
return -ENOMEM;
- ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base,
- res->start, size, -1);
- if (ret < 0) {
- if (sram->clk)
- clk_disable_unprepare(sram->clk);
- return ret;
+ /*
+ * We need an additional block to mark the end of the memory region
+ * after the reserved blocks from the dt are processed.
+ */
+ nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
+ rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
+ if (!rblocks) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ block = &rblocks[0];
+ for_each_available_child_of_node(np, child) {
+ struct resource child_res;
+
+ ret = of_address_to_resource(child, 0, &child_res);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "could not get address for node %s\n",
+ child->full_name);
+ goto err_chunks;
+ }
+
+ if (child_res.start < res->start || child_res.end > res->end) {
+ dev_err(&pdev->dev,
+ "reserved block %s outside the sram area\n",
+ child->full_name);
+ ret = -EINVAL;
+ goto err_chunks;
+ }
+
+ block->start = child_res.start - res->start;
+ block->size = resource_size(&child_res);
+ list_add_tail(&block->list, &reserve_list);
+
+ dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n",
+ block->start,
+ block->start + block->size);
+
+ block++;
+ }
+
+ /* the last chunk marks the end of the region */
+ rblocks[nblocks - 1].start = size;
+ rblocks[nblocks - 1].size = 0;
+ list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
+
+ list_sort(NULL, &reserve_list, sram_reserve_cmp);
+
+ cur_start = 0;
+
+ list_for_each_entry(block, &reserve_list, list) {
+ /* can only happen if sections overlap */
+ if (block->start < cur_start) {
+ dev_err(&pdev->dev,
+ "block at 0x%x starts after current offset 0x%lx\n",
+ block->start, cur_start);
+ ret = -EINVAL;
+ goto err_chunks;
+ }
+
+ /* current start is in a reserved block, so continue after it */
+ if (block->start == cur_start) {
+ cur_start = block->start + block->size;
+ continue;
+ }
+
+ /*
+ * allocate the space between the current starting
+ * address and the following reserved block, or the
+ * end of the region.
+ */
+ cur_size = block->start - cur_start;
+
+ dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n",
+ cur_start, cur_start + cur_size);
+ ret = gen_pool_add_virt(sram->pool,
+ (unsigned long)virt_base + cur_start,
+ res->start + cur_start, cur_size, -1);
+ if (ret < 0)
+ goto err_chunks;
+
+ /* next allocation after this reserved block */
+ cur_start = block->start + block->size;
}
+ kfree(rblocks);
+
platform_set_drvdata(pdev, sram);
dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base);
return 0;
+
+err_chunks:
+ kfree(rblocks);
+err_alloc:
+ if (sram->clk)
+ clk_disable_unprepare(sram->clk);
+ return ret;
}
static int sram_remove(struct platform_device *pdev)
@@ -87,8 +198,6 @@ static int sram_remove(struct platform_device *pdev)
if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
dev_dbg(&pdev->dev, "removed while SRAM allocated\n");
- gen_pool_destroy(sram->pool);
-
if (sram->clk)
clk_disable_unprepare(sram->clk);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 3aed525e55b4..1972d57aadb3 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -22,7 +22,6 @@
#define pr_fmt(fmt) "(stc): " fmt
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/tty.h>
#include <linux/seq_file.h>
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
index 83da711ce9f1..cb0289b44a17 100644
--- a/drivers/misc/ti_dac7512.c
+++ b/drivers/misc/ti_dac7512.c
@@ -20,7 +20,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 5bc10fa193de..b00335652e52 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -20,7 +20,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index d35cda06b5e8..e0d5017785e5 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -383,11 +383,12 @@ static int vmci_enable_msix(struct pci_dev *pdev,
vmci_dev->msix_entries[i].vector = i;
}
- result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS);
+ result = pci_enable_msix_exact(pdev,
+ vmci_dev->msix_entries, VMCI_MAX_INTRS);
if (result == 0)
vmci_dev->exclusive_vectors = true;
- else if (result > 0)
- result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1);
+ else if (result == -ENOSPC)
+ result = pci_enable_msix_exact(pdev, vmci_dev->msix_entries, 1);
return result;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 29d5d988a51c..7b5424f398ac 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1959,6 +1959,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
unsigned long flags;
+ unsigned int cmd_flags = req ? req->cmd_flags : 0;
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
@@ -1974,7 +1975,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
}
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
- if (req && req->cmd_flags & REQ_DISCARD) {
+ if (cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -1983,7 +1984,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (req && req->cmd_flags & REQ_FLUSH) {
+ } else if (cmd_flags & REQ_FLUSH) {
/* complete ongoing async transfer before issuing flush */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -1999,7 +2000,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
out:
if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
- (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
+ (cmd_flags & MMC_REQ_SPECIAL_MASK))
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 357bbc54fe4b..3e049c13429c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 57a2b403bf8e..098374b1ab2b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2460,7 +2460,8 @@ void mmc_rescan(struct work_struct *work)
*/
mmc_bus_put(host);
- if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
+ if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
+ host->ops->get_cd(host) == 0) {
mmc_claim_host(host);
mmc_power_off(host);
mmc_release_host(host);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 49bc403e31f0..114f6bdfbef3 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -337,7 +337,7 @@ int mmc_of_parse(struct mmc_host *host)
break;
default:
dev_err(host->parent,
- "Invalid \"bus-width\" value %ud!\n", bus_width);
+ "Invalid \"bus-width\" value %u!\n", bus_width);
return -EINVAL;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f631f5a9bf79..98e9eb0f6643 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1119,14 +1119,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
if (mmc_card_highspeed(card)) {
if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
- && ((host->caps & (MMC_CAP_1_8V_DDR |
- MMC_CAP_UHS_DDR50))
- == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
+ && (host->caps & MMC_CAP_1_8V_DDR))
ddr = MMC_1_8V_DDR_MODE;
else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
- && ((host->caps & (MMC_CAP_1_2V_DDR |
- MMC_CAP_UHS_DDR50))
- == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
+ && (host->caps & MMC_CAP_1_2V_DDR))
ddr = MMC_1_2V_DDR_MODE;
}
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index 06ee1aeaacec..6c36fccaa1ec 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_ids.h>
#ifndef SDIO_VENDOR_ID_TI
#define SDIO_VENDOR_ID_TI 0x0097
@@ -30,6 +31,10 @@
#define SDIO_DEVICE_ID_STE_CW1200 0x2280
#endif
+#ifndef SDIO_DEVICE_ID_MARVELL_8797_F0
+#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
+#endif
+
/*
* This hook just adds a quirk for all sdio devices
*/
@@ -58,6 +63,9 @@ static const struct mmc_fixup mmc_fixup_methods[] = {
SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200,
add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512),
+ SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
+ add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
+
END_FIXUP
};
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 6f42050b7ccc..692fdb177294 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -11,6 +11,7 @@
*/
#include <linux/err.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/pm_runtime.h>
@@ -45,6 +46,13 @@ static const unsigned int tacc_mant[] = {
35, 40, 45, 50, 55, 60, 70, 80,
};
+static const unsigned int sd_au_size[] = {
+ 0, SZ_16K / 512, SZ_32K / 512, SZ_64K / 512,
+ SZ_128K / 512, SZ_256K / 512, SZ_512K / 512, SZ_1M / 512,
+ SZ_2M / 512, SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
+ SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
+};
+
#define UNSTUFF_BITS(resp,start,size) \
({ \
const int __size = size; \
@@ -216,7 +224,7 @@ static int mmc_decode_scr(struct mmc_card *card)
static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
- int err, i, max_au;
+ int err, i;
u32 *ssr;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -240,26 +248,25 @@ static int mmc_read_ssr(struct mmc_card *card)
for (i = 0; i < 16; i++)
ssr[i] = be32_to_cpu(ssr[i]);
- /* SD3.0 increases max AU size to 64MB (0xF) from 4MB (0x9) */
- max_au = card->scr.sda_spec3 ? 0xF : 0x9;
-
/*
* UNSTUFF_BITS only works with four u32s so we have to offset the
* bitfield positions accordingly.
*/
au = UNSTUFF_BITS(ssr, 428 - 384, 4);
- if (au > 0 && au <= max_au) {
- card->ssr.au = 1 << (au + 4);
- es = UNSTUFF_BITS(ssr, 408 - 384, 16);
- et = UNSTUFF_BITS(ssr, 402 - 384, 6);
- eo = UNSTUFF_BITS(ssr, 400 - 384, 2);
- if (es && et) {
- card->ssr.erase_timeout = (et * 1000) / es;
- card->ssr.erase_offset = eo * 1000;
+ if (au) {
+ if (au <= 9 || card->scr.sda_spec3) {
+ card->ssr.au = sd_au_size[au];
+ es = UNSTUFF_BITS(ssr, 408 - 384, 16);
+ et = UNSTUFF_BITS(ssr, 402 - 384, 6);
+ if (es && et) {
+ eo = UNSTUFF_BITS(ssr, 400 - 384, 2);
+ card->ssr.erase_timeout = (et * 1000) / es;
+ card->ssr.erase_offset = eo * 1000;
+ }
+ } else {
+ pr_warning("%s: SD Status: Invalid Allocation Unit size.\n",
+ mmc_hostname(card->host));
}
- } else {
- pr_warning("%s: SD Status: Invalid Allocation Unit "
- "size.\n", mmc_hostname(card->host));
}
out:
kfree(ssr);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 157b570ba343..92d1ba8e8153 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -308,7 +308,7 @@ static void sdio_acpi_set_handle(struct sdio_func *func)
struct mmc_host *host = func->card->host;
u64 addr = (host->slotno << 16) | func->num;
- acpi_preset_companion(&func->dev, ACPI_HANDLE(host->parent), addr);
+ acpi_preset_companion(&func->dev, ACPI_COMPANION(host->parent), addr);
}
#else
static inline void sdio_acpi_set_handle(struct sdio_func *func) {}
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 3d8ceb4084de..aaa90460ed23 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -53,6 +53,17 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
return ret;
}
+ if (pending && mmc_card_broken_irq_polling(card) &&
+ !(host->caps & MMC_CAP_SDIO_IRQ)) {
+ unsigned char dummy;
+
+ /* A fake interrupt could be created when we poll SDIO_CCCR_INTx
+ * register with a Marvell SD8797 card. A dummy CMD52 read to
+ * function 0 register 0xff can avoid this.
+ */
+ mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
+ }
+
count = 0;
for (i = 1; i <= 7; i++) {
if (pending & (1 << i)) {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 7fc5099e44b2..1384f67abe21 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -104,6 +104,18 @@ config MMC_SDHCI_PLTFM
If unsure, say N.
+config MMC_SDHCI_OF_ARASAN
+ tristate "SDHCI OF support for the Arasan SDHCI controllers"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ help
+ This selects the Arasan Secure Digital Host Controller Interface
+ (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_OF_ESDHC
tristate "SDHCI OF support for the Freescale eSDHC controller"
depends on MMC_SDHCI_PLTFM
@@ -324,7 +336,7 @@ config MMC_ATMELMCI
config MMC_MSM
tristate "Qualcomm SDCC Controller Support"
- depends on MMC && ARCH_MSM
+ depends on MMC && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
help
This provides support for the SD/MMC cell found in the
MSM and QSD SOCs from Qualcomm. The controller also has
@@ -479,7 +491,8 @@ config MMC_TMIO
config MMC_SDHI
tristate "SH-Mobile SDHI SD/SDIO controller support"
- depends on SUPERH || ARCH_SHMOBILE
+ depends on SUPERH || ARM
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
select MMC_TMIO_CORE
help
This provides support for the SDHI SD/SDIO controller found in
@@ -575,6 +588,16 @@ config MMC_DW_SOCFPGA
This selects support for Altera SoCFPGA specific extensions to the
Synopsys DesignWare Memory Card Interface driver.
+config MMC_DW_K3
+ tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW
+ select MMC_DW_PLTFM
+ select MMC_DW_IDMAC
+ help
+ This selects support for Hisilicon K3 SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on Hisilicon K3 SoC's.
+
config MMC_DW_PCI
tristate "Synopsys Designware MCI support on PCI bus"
depends on MMC_DW && PCI
@@ -588,7 +611,8 @@ config MMC_DW_PCI
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
- depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
+ depends on MMC_BLOCK
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
help
This selects the MMC Host Interface controller (MMCIF).
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index c41d0c364509..3483b6b6b880 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
+obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-o2micro.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_MMC_DW) += dw_mmc.o
obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
obj-$(CONFIG_MMC_DW_SOCFPGA) += dw_mmc-socfpga.o
+obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o
obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
@@ -57,6 +59,7 @@ obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
+obj-$(CONFIG_MMC_SDHCI_OF_ARASAN) += sdhci-of-arasan.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 2cbb4516d353..42706ea0ba85 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1192,11 +1192,22 @@ static void atmci_start_request(struct atmel_mci *host,
iflags |= ATMCI_CMDRDY;
cmd = mrq->cmd;
cmdflags = atmci_prepare_command(slot->mmc, cmd);
- atmci_send_command(host, cmd, cmdflags);
+
+ /*
+ * DMA transfer should be started before sending the command to avoid
+ * unexpected errors especially for read operations in SDIO mode.
+ * Unfortunately, in PDC mode, command has to be sent before starting
+ * the transfer.
+ */
+ if (host->submit_data != &atmci_submit_data_dma)
+ atmci_send_command(host, cmd, cmdflags);
if (data)
host->submit_data(host, data);
+ if (host->submit_data == &atmci_submit_data_dma)
+ atmci_send_command(host, cmd, cmdflags);
+
if (mrq->stop) {
host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
@@ -1391,8 +1402,14 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
clk_unprepare(host->mck);
switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ break;
case MMC_POWER_UP:
set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
break;
default:
/*
@@ -2204,6 +2221,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,
}
host->slot[id] = slot;
+ mmc_regulator_get_supply(mmc);
mmc_add_host(mmc);
if (gpio_is_valid(slot->detect_pin)) {
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
new file mode 100644
index 000000000000..f567c219cff4
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2013 Linaro Ltd.
+ * Copyright (c) 2013 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/of_address.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+
+static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ int ret;
+
+ ret = clk_set_rate(host->ciu_clk, ios->clock);
+ if (ret)
+ dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock);
+
+ host->bus_hz = clk_get_rate(host->ciu_clk);
+}
+
+static const struct dw_mci_drv_data k3_drv_data = {
+ .set_ios = dw_mci_k3_set_ios,
+};
+
+static const struct of_device_id dw_mci_k3_match[] = {
+ { .compatible = "hisilicon,hi4511-dw-mshc", .data = &k3_drv_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_k3_match);
+
+static int dw_mci_k3_probe(struct platform_device *pdev)
+{
+ const struct dw_mci_drv_data *drv_data;
+ const struct of_device_id *match;
+
+ match = of_match_node(dw_mci_k3_match, pdev->dev.of_node);
+ drv_data = match->data;
+
+ return dw_mci_pltfm_register(pdev, drv_data);
+}
+
+static int dw_mci_k3_suspend(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dw_mci_suspend(host);
+ if (!ret)
+ clk_disable_unprepare(host->ciu_clk);
+
+ return ret;
+}
+
+static int dw_mci_k3_resume(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(host->ciu_clk);
+ if (ret) {
+ dev_err(host->dev, "failed to enable ciu clock\n");
+ return ret;
+ }
+
+ return dw_mci_resume(host);
+}
+
+static SIMPLE_DEV_PM_OPS(dw_mci_k3_pmops, dw_mci_k3_suspend, dw_mci_k3_resume);
+
+static struct platform_driver dw_mci_k3_pltfm_driver = {
+ .probe = dw_mci_k3_probe,
+ .remove = dw_mci_pltfm_remove,
+ .driver = {
+ .name = "dwmmc_k3",
+ .of_match_table = dw_mci_k3_match,
+ .pm = &dw_mci_k3_pmops,
+ },
+};
+
+module_platform_driver(dw_mci_k3_pltfm_driver);
+
+MODULE_DESCRIPTION("K3 Specific DW-MSHC Driver Extension");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dwmmc-k3");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 4bce0deec362..c204b7d1532c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -36,6 +36,7 @@
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/mmc/slot-gpio.h>
#include "dw_mmc.h"
@@ -1032,20 +1033,29 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
int present;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci_board *brd = slot->host->pdata;
+ struct dw_mci *host = slot->host;
+ int gpio_cd = mmc_gpio_get_cd(mmc);
/* Use platform get_cd function, else try onboard card detect */
if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
present = 1;
else if (brd->get_cd)
present = !brd->get_cd(slot->id);
+ else if (!IS_ERR_VALUE(gpio_cd))
+ present = gpio_cd;
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
== 0 ? 1 : 0;
- if (present)
+ spin_lock_bh(&host->lock);
+ if (present) {
+ set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
dev_dbg(&mmc->class_dev, "card is present\n");
- else
+ } else {
+ clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
dev_dbg(&mmc->class_dev, "card is not present\n");
+ }
+ spin_unlock_bh(&host->lock);
return present;
}
@@ -1926,10 +1936,6 @@ static void dw_mci_work_routine_card(struct work_struct *work)
/* Card change detected */
slot->last_detect_state = present;
- /* Mark card as present if applicable */
- if (present != 0)
- set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
-
/* Clean up queue if present */
mrq = slot->mrq;
if (mrq) {
@@ -1977,8 +1983,6 @@ static void dw_mci_work_routine_card(struct work_struct *work)
/* Power down slot */
if (present == 0) {
- clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
-
/* Clear down the FIFO */
dw_mci_fifo_reset(host);
#ifdef CONFIG_MMC_DW_IDMAC
@@ -2079,6 +2083,26 @@ static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
return gpio;
}
+
+/* find the cd gpio for a given slot */
+static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
+ struct mmc_host *mmc)
+{
+ struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
+ int gpio;
+
+ if (!np)
+ return;
+
+ gpio = of_get_named_gpio(np, "cd-gpios", 0);
+
+ /* Having a missing entry is valid; return silently */
+ if (!gpio_is_valid(gpio))
+ return;
+
+ if (mmc_gpio_request_cd(mmc, gpio, 0))
+ dev_warn(dev, "gpio [%d] request failed\n", gpio);
+}
#else /* CONFIG_OF */
static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
{
@@ -2096,6 +2120,11 @@ static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
{
return -EINVAL;
}
+static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
+ struct mmc_host *mmc)
+{
+ return;
+}
#endif /* CONFIG_OF */
static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
@@ -2197,12 +2226,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
#endif /* CONFIG_MMC_DW_IDMAC */
}
- if (dw_mci_get_cd(mmc))
- set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
- else
- clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
-
slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
+ dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
ret = mmc_add_host(mmc);
if (ret)
@@ -2389,6 +2414,9 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+ if (of_get_property(np, "cd-inverted", NULL))
+ pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
return pdata;
}
@@ -2579,7 +2607,7 @@ int dw_mci_probe(struct dw_mci *host)
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
host->card_workqueue = alloc_workqueue("dw-mci-card",
- WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
+ WQ_MEM_RECLAIM, 1);
if (!host->card_workqueue) {
ret = -ENOMEM;
goto err_dmaunmap;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f32057972dd7..b93122636531 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1683,8 +1683,6 @@ static int mmci_remove(struct amba_device *dev)
{
struct mmc_host *mmc = amba_get_drvdata(dev);
- amba_set_drvdata(dev, NULL);
-
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 50fc9df791b2..073e871a0fc8 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -38,6 +38,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
@@ -69,37 +70,25 @@ struct mxs_mmc_host {
unsigned char bus_width;
spinlock_t lock;
int sdio_irq_en;
- int wp_gpio;
- bool wp_inverted;
- bool cd_inverted;
- bool broken_cd;
- bool non_removable;
};
-static int mxs_mmc_get_ro(struct mmc_host *mmc)
+static int mxs_mmc_get_cd(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
- int ret;
-
- if (!gpio_is_valid(host->wp_gpio))
- return -EINVAL;
-
- ret = gpio_get_value(host->wp_gpio);
+ struct mxs_ssp *ssp = &host->ssp;
+ int present, ret;
- if (host->wp_inverted)
- ret = !ret;
+ ret = mmc_gpio_get_cd(mmc);
+ if (ret >= 0)
+ return ret;
- return ret;
-}
+ present = !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
+ BM_SSP_STATUS_CARD_DETECT);
-static int mxs_mmc_get_cd(struct mmc_host *mmc)
-{
- struct mxs_mmc_host *host = mmc_priv(mmc);
- struct mxs_ssp *ssp = &host->ssp;
+ if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+ present = !present;
- return host->non_removable || host->broken_cd ||
- !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
- BM_SSP_STATUS_CARD_DETECT) ^ host->cd_inverted;
+ return present;
}
static int mxs_mmc_reset(struct mxs_mmc_host *host)
@@ -549,7 +538,7 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
static const struct mmc_host_ops mxs_mmc_ops = {
.request = mxs_mmc_request,
- .get_ro = mxs_mmc_get_ro,
+ .get_ro = mmc_gpio_get_ro,
.get_cd = mxs_mmc_get_cd,
.set_ios = mxs_mmc_set_ios,
.enable_sdio_irq = mxs_mmc_enable_sdio_irq,
@@ -579,15 +568,12 @@ static int mxs_mmc_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxs_mmc_dt_ids, &pdev->dev);
- struct device_node *np = pdev->dev.of_node;
struct mxs_mmc_host *host;
struct mmc_host *mmc;
struct resource *iores;
int ret = 0, irq_err;
struct regulator *reg_vmmc;
- enum of_gpio_flags flags;
struct mxs_ssp *ssp;
- u32 bus_width = 0;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_err = platform_get_irq(pdev, 0);
@@ -648,23 +634,13 @@ static int mxs_mmc_probe(struct platform_device *pdev)
mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
- of_property_read_u32(np, "bus-width", &bus_width);
- if (bus_width == 4)
- mmc->caps |= MMC_CAP_4_BIT_DATA;
- else if (bus_width == 8)
- mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
- host->broken_cd = of_property_read_bool(np, "broken-cd");
- host->non_removable = of_property_read_bool(np, "non-removable");
- if (host->non_removable)
- mmc->caps |= MMC_CAP_NONREMOVABLE;
- host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags);
- if (flags & OF_GPIO_ACTIVE_LOW)
- host->wp_inverted = 1;
-
- host->cd_inverted = of_property_read_bool(np, "cd-inverted");
-
mmc->f_min = 400000;
mmc->f_max = 288000000;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto out_clk_disable;
+
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->max_segs = 52;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 4ede2307b293..9ce17f6e4014 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -143,6 +143,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
{ "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio },
{ "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
+ { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
{ "PNP0D40" },
{ },
};
@@ -151,6 +152,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "80860F14" },
{ "INT33BB" },
{ "INT33C6" },
+ { "INT3436" },
{ "PNP0D40" },
{ },
};
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 461a4c3f4ef7..b841bb7cd371 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -27,6 +27,7 @@
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/mmc-esdhc-imx.h>
+#include <linux/pm_runtime.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@@ -45,6 +46,8 @@
#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
/* Bits 3 and 6 are not SDHCI standard definitions */
#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
+/* Tuning bits */
+#define ESDHC_MIX_CTRL_TUNING_MASK 0x03c00000
/* dll control register */
#define ESDHC_DLL_CTRL 0x60
@@ -385,6 +388,22 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
return ret;
}
+ if (unlikely(reg == SDHCI_TRANSFER_MODE)) {
+ if (esdhc_is_usdhc(imx_data)) {
+ u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ ret = m & ESDHC_MIX_CTRL_SDHCI_MASK;
+ /* Swap AC23 bit */
+ if (m & ESDHC_MIX_CTRL_AC23EN) {
+ ret &= ~ESDHC_MIX_CTRL_AC23EN;
+ ret |= SDHCI_TRNS_AUTO_CMD23;
+ }
+ } else {
+ ret = readw(host->ioaddr + SDHCI_TRANSFER_MODE);
+ }
+
+ return ret;
+ }
+
return readw(host->ioaddr + reg);
}
@@ -421,24 +440,20 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
- new_val = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ if (val & SDHCI_CTRL_TUNED_CLK) {
+ v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
+ } else {
+ v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
+ }
+
if (val & SDHCI_CTRL_EXEC_TUNING) {
- new_val |= ESDHC_STD_TUNING_EN |
- ESDHC_TUNING_START_TAP;
v |= ESDHC_MIX_CTRL_EXE_TUNE;
m |= ESDHC_MIX_CTRL_FBCLK_SEL;
} else {
- new_val &= ~ESDHC_STD_TUNING_EN;
v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
- m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
}
- if (val & SDHCI_CTRL_TUNED_CLK)
- v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
- else
- v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
-
- writel(new_val, host->ioaddr + ESDHC_TUNING_CTRL);
writel(v, host->ioaddr + SDHCI_ACMD12_ERR);
writel(m, host->ioaddr + ESDHC_MIX_CTRL);
}
@@ -546,7 +561,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
* Do it manually here.
*/
if (esdhc_is_usdhc(imx_data)) {
- writel(0, host->ioaddr + ESDHC_MIX_CTRL);
+ /* the tuning bits should be kept during reset */
+ new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK,
+ host->ioaddr + ESDHC_MIX_CTRL);
imx_data->is_ddr = 0;
}
}
@@ -558,19 +576,17 @@ static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
struct pltfm_imx_data *imx_data = pltfm_host->priv;
struct esdhc_platform_data *boarddata = &imx_data->boarddata;
- u32 f_host = clk_get_rate(pltfm_host->clk);
-
- if (boarddata->f_max && (boarddata->f_max < f_host))
+ if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock))
return boarddata->f_max;
else
- return f_host;
+ return pltfm_host->clock;
}
static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- return clk_get_rate(pltfm_host->clk) / 256 / 16;
+ return pltfm_host->clock / 256 / 16;
}
static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
@@ -578,7 +594,7 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
- unsigned int host_clock = clk_get_rate(pltfm_host->clk);
+ unsigned int host_clock = pltfm_host->clock;
int pre_div = 2;
int div = 1;
u32 temp, val;
@@ -681,6 +697,7 @@ static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
/* FIXME: delay a bit for card to be ready for next tuning due to errors */
mdelay(1);
+ pm_runtime_get_sync(host->mmc->parent);
reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
ESDHC_MIX_CTRL_FBCLK_SEL;
@@ -699,7 +716,7 @@ static void esdhc_request_done(struct mmc_request *mrq)
static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode)
{
struct mmc_command cmd = {0};
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct mmc_data data = {0};
struct scatterlist sg;
char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN];
@@ -809,6 +826,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
pinctrl = imx_data->pins_100mhz;
break;
case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
pinctrl = imx_data->pins_200mhz;
break;
default:
@@ -836,6 +854,7 @@ static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50;
break;
case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104;
break;
case MMC_TIMING_UHS_DDR50:
@@ -976,7 +995,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
}
pltfm_host->clk = imx_data->clk_per;
-
+ pltfm_host->clock = clk_get_rate(pltfm_host->clk);
clk_prepare_enable(imx_data->clk_per);
clk_prepare_enable(imx_data->clk_ipg);
clk_prepare_enable(imx_data->clk_ahb);
@@ -1009,11 +1028,18 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (esdhc_is_usdhc(imx_data)) {
writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+ host->mmc->caps |= MMC_CAP_1_8V_DDR;
}
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
sdhci_esdhc_ops.platform_execute_tuning =
esdhc_executing_tuning;
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
+ writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
+ ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP,
+ host->ioaddr + ESDHC_TUNING_CTRL);
+
boarddata = &imx_data->boarddata;
if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
if (!host->mmc->parent->platform_data) {
@@ -1053,7 +1079,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
break;
case ESDHC_CD_PERMANENT:
- host->mmc->caps = MMC_CAP_NONREMOVABLE;
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
break;
case ESDHC_CD_NONE:
@@ -1094,6 +1120,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_clk;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_suspend_ignore_children(&pdev->dev, 1);
+
return 0;
disable_clk:
@@ -1114,21 +1146,63 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
sdhci_remove_host(host, dead);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ if (!IS_ENABLED(CONFIG_PM_RUNTIME)) {
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
+ }
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int sdhci_esdhc_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ int ret;
+
+ ret = sdhci_runtime_suspend_host(host);
+
clk_disable_unprepare(imx_data->clk_per);
clk_disable_unprepare(imx_data->clk_ipg);
clk_disable_unprepare(imx_data->clk_ahb);
- sdhci_pltfm_free(pdev);
+ return ret;
+}
- return 0;
+static int sdhci_esdhc_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
+ clk_prepare_enable(imx_data->clk_per);
+ clk_prepare_enable(imx_data->clk_ipg);
+ clk_prepare_enable(imx_data->clk_ahb);
+
+ return sdhci_runtime_resume_host(host);
}
+#endif
+
+static const struct dev_pm_ops sdhci_esdhc_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume)
+ SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend,
+ sdhci_esdhc_runtime_resume, NULL)
+};
static struct platform_driver sdhci_esdhc_imx_driver = {
.driver = {
.name = "sdhci-esdhc-imx",
.owner = THIS_MODULE,
.of_match_table = imx_esdhc_dt_ids,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_esdhc_pmops,
},
.id_table = imx_esdhc_devtype,
.probe = sdhci_esdhc_imx_probe,
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
new file mode 100644
index 000000000000..f7c7cf62437d
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -0,0 +1,224 @@
+/*
+ * Arasan Secure Digital Host Controller Interface.
+ * Copyright (C) 2011 - 2012 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2012 Wind River Systems, Inc.
+ * Copyright (C) 2013 Pengutronix e.K.
+ * Copyright (C) 2013 Xilinx Inc.
+ *
+ * Based on sdhci-of-esdhc.c
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/module.h>
+#include "sdhci-pltfm.h"
+
+#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
+
+#define CLK_CTRL_TIMEOUT_SHIFT 16
+#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
+#define CLK_CTRL_TIMEOUT_MIN_EXP 13
+
+/**
+ * struct sdhci_arasan_data
+ * @clk_ahb: Pointer to the AHB clock
+ */
+struct sdhci_arasan_data {
+ struct clk *clk_ahb;
+};
+
+static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
+{
+ u32 div;
+ unsigned long freq;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET);
+ div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT;
+
+ freq = clk_get_rate(pltfm_host->clk);
+ freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div);
+
+ return freq;
+}
+
+static struct sdhci_ops sdhci_arasan_ops = {
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_arasan_get_timeout_clock,
+};
+
+static struct sdhci_pltfm_data sdhci_arasan_pdata = {
+ .ops = &sdhci_arasan_ops,
+};
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * sdhci_arasan_suspend - Suspend method for the driver
+ * @dev: Address of the device structure
+ * Returns 0 on success and error value on error
+ *
+ * Put the device in a low power state.
+ */
+static int sdhci_arasan_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ int ret;
+
+ ret = sdhci_suspend_host(host);
+ if (ret)
+ return ret;
+
+ clk_disable(pltfm_host->clk);
+ clk_disable(sdhci_arasan->clk_ahb);
+
+ return 0;
+}
+
+/**
+ * sdhci_arasan_resume - Resume method for the driver
+ * @dev: Address of the device structure
+ * Returns 0 on success and error value on error
+ *
+ * Resume operation after suspend
+ */
+static int sdhci_arasan_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ int ret;
+
+ ret = clk_enable(sdhci_arasan->clk_ahb);
+ if (ret) {
+ dev_err(dev, "Cannot enable AHB clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(pltfm_host->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable SD clock.\n");
+ clk_disable(sdhci_arasan->clk_ahb);
+ return ret;
+ }
+
+ return sdhci_resume_host(host);
+}
+#endif /* ! CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
+ sdhci_arasan_resume);
+
+static int sdhci_arasan_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct clk *clk_xin;
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_arasan_data *sdhci_arasan;
+
+ sdhci_arasan = devm_kzalloc(&pdev->dev, sizeof(*sdhci_arasan),
+ GFP_KERNEL);
+ if (!sdhci_arasan)
+ return -ENOMEM;
+
+ sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb");
+ if (IS_ERR(sdhci_arasan->clk_ahb)) {
+ dev_err(&pdev->dev, "clk_ahb clock not found.\n");
+ return PTR_ERR(sdhci_arasan->clk_ahb);
+ }
+
+ clk_xin = devm_clk_get(&pdev->dev, "clk_xin");
+ if (IS_ERR(clk_xin)) {
+ dev_err(&pdev->dev, "clk_xin clock not found.\n");
+ return PTR_ERR(clk_xin);
+ }
+
+ ret = clk_prepare_enable(sdhci_arasan->clk_ahb);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable AHB clock.\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(clk_xin);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable SD clock.\n");
+ goto clk_dis_ahb;
+ }
+
+ host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 0);
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ dev_err(&pdev->dev, "platform init failed (%u)\n", ret);
+ goto clk_disable_all;
+ }
+
+ sdhci_get_of_property(pdev);
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = sdhci_arasan;
+ pltfm_host->clk = clk_xin;
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "platform register failed (%u)\n", ret);
+ goto err_pltfm_free;
+ }
+
+ return 0;
+
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
+clk_disable_all:
+ clk_disable_unprepare(clk_xin);
+clk_dis_ahb:
+ clk_disable_unprepare(sdhci_arasan->clk_ahb);
+
+ return ret;
+}
+
+static int sdhci_arasan_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+
+ clk_disable_unprepare(pltfm_host->clk);
+ clk_disable_unprepare(sdhci_arasan->clk_ahb);
+
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static const struct of_device_id sdhci_arasan_of_match[] = {
+ { .compatible = "arasan,sdhci-8.9a" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
+
+static struct platform_driver sdhci_arasan_driver = {
+ .driver = {
+ .name = "sdhci-arasan",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_arasan_of_match,
+ .pm = &sdhci_arasan_dev_pm_ops,
+ },
+ .probe = sdhci_arasan_probe,
+ .remove = sdhci_arasan_remove,
+};
+
+module_platform_driver(sdhci_arasan_driver);
+
+MODULE_DESCRIPTION("Driver for the Arasan SDHCI Controller");
+MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
new file mode 100644
index 000000000000..f49666bcc52a
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2013 BayHub Technology Ltd.
+ *
+ * Authors: Peter Guo <peter.guo@bayhubtech.com>
+ * Adam Lee <adam.lee@canonical.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include "sdhci.h"
+#include "sdhci-pci.h"
+#include "sdhci-pci-o2micro.h"
+
+void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
+{
+ u32 scratch_32;
+ int ret;
+ /* Improve write performance for SD3.0 */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_DEV_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~((1 << 12) | (1 << 13) | (1 << 14));
+ pci_write_config_dword(chip->pdev, O2_SD_DEV_CTRL, scratch_32);
+
+ /* Enable Link abnormal reset generating Reset */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_MISC_REG5, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~((1 << 19) | (1 << 11));
+ scratch_32 |= (1 << 10);
+ pci_write_config_dword(chip->pdev, O2_SD_MISC_REG5, scratch_32);
+
+ /* set card power over current protection */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_TEST_REG, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 |= (1 << 4);
+ pci_write_config_dword(chip->pdev, O2_SD_TEST_REG, scratch_32);
+
+ /* adjust the output delay for SD mode */
+ pci_write_config_dword(chip->pdev, O2_SD_DELAY_CTRL, 0x00002492);
+
+ /* Set the output voltage setting of Aux 1.2v LDO */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_LD0_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(3 << 12);
+ pci_write_config_dword(chip->pdev, O2_SD_LD0_CTRL, scratch_32);
+
+ /* Set Max power supply capability of SD host */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_CAP_REG0, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x01FE);
+ scratch_32 |= 0x00CC;
+ pci_write_config_dword(chip->pdev, O2_SD_CAP_REG0, scratch_32);
+ /* Set DLL Tuning Window */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_TUNING_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x000000FF);
+ scratch_32 |= 0x00000066;
+ pci_write_config_dword(chip->pdev, O2_SD_TUNING_CTRL, scratch_32);
+
+ /* Set UHS2 T_EIDLE */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_UHS2_L1_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x000000FC);
+ scratch_32 |= 0x00000084;
+ pci_write_config_dword(chip->pdev, O2_SD_UHS2_L1_CTRL, scratch_32);
+
+ /* Set UHS2 Termination */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_FUNC_REG3, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~((1 << 21) | (1 << 30));
+
+ /* Set RTD3 function disabled */
+ scratch_32 |= ((1 << 29) | (1 << 28));
+ pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32);
+
+ /* Set L1 Entrance Timer */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_CAPS, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0xf0000000);
+ scratch_32 |= 0x30000000;
+ pci_write_config_dword(chip->pdev, O2_SD_CAPS, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_MISC_CTRL4, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x000f0000);
+ scratch_32 |= 0x00080000;
+ pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32);
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_fujin2_pci_init);
+
+int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
+{
+ struct sdhci_pci_chip *chip;
+ struct sdhci_host *host;
+ u32 reg;
+
+ chip = slot->chip;
+ host = slot->host;
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_O2_SDS0:
+ case PCI_DEVICE_ID_O2_SEABIRD0:
+ case PCI_DEVICE_ID_O2_SEABIRD1:
+ case PCI_DEVICE_ID_O2_SDS1:
+ case PCI_DEVICE_ID_O2_FUJIN2:
+ reg = sdhci_readl(host, O2_SD_VENDOR_SETTING);
+ if (reg & 0x1)
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+
+ if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2)
+ break;
+ /* set dll watch dog timer */
+ reg = sdhci_readl(host, O2_SD_VENDOR_SETTING2);
+ reg |= (1 << 12);
+ sdhci_writel(host, reg, O2_SD_VENDOR_SETTING2);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe_slot);
+
+int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+{
+ int ret;
+ u8 scratch;
+ u32 scratch_32;
+
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_O2_8220:
+ case PCI_DEVICE_ID_O2_8221:
+ case PCI_DEVICE_ID_O2_8320:
+ case PCI_DEVICE_ID_O2_8321:
+ /* This extra setup is required due to broken ADMA. */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ /* Set Multi 3 to VCC3V# */
+ pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
+
+ /* Disable CLK_REQ# support after media DET */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_CLKREQ, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x20;
+ pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
+
+ /* Choose capabilities, enable SDMA. We have to write 0x01
+ * to the capabilities register first to unlock it.
+ */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x01;
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
+
+ /* Disable ADMA1/2 */
+ pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
+ pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
+
+ /* Disable the infinite transfer mode */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_INF_MOD, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x08;
+ pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+ case PCI_DEVICE_ID_O2_SDS0:
+ case PCI_DEVICE_ID_O2_SDS1:
+ case PCI_DEVICE_ID_O2_FUJIN2:
+ /* UnLock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ /* Set timeout CLK */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CLK_SETTING, &scratch_32);
+ if (ret)
+ return ret;
+
+ scratch_32 &= ~(0xFF00);
+ scratch_32 |= 0x07E0C800;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_CLK_SETTING, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CLKREQ, &scratch_32);
+ if (ret)
+ return ret;
+ scratch_32 |= 0x3;
+ pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, &scratch_32);
+ if (ret)
+ return ret;
+
+ scratch_32 &= ~(0x1F3F070E);
+ scratch_32 |= 0x18270106;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+
+ /* Disable UHS1 funciton */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CAP_REG2, &scratch_32);
+ if (ret)
+ return ret;
+ scratch_32 &= ~(0xE0);
+ pci_write_config_dword(chip->pdev,
+ O2_SD_CAP_REG2, scratch_32);
+
+ if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2)
+ sdhci_pci_o2_fujin2_pci_init(chip);
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+ case PCI_DEVICE_ID_O2_SEABIRD0:
+ case PCI_DEVICE_ID_O2_SEABIRD1:
+ /* UnLock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG0, &scratch_32);
+
+ if ((scratch_32 & 0xff000000) == 0x01000000) {
+ scratch_32 &= 0x0000FFFF;
+ scratch_32 |= 0x1F340000;
+
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+ } else {
+ scratch_32 &= 0x0000FFFF;
+ scratch_32 |= 0x2c280000;
+
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4,
+ &scratch_32);
+ scratch_32 |= (1 << 22);
+ pci_write_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4, scratch_32);
+ }
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe);
+
+int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
+{
+ sdhci_pci_o2_probe(chip);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_resume);
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h
new file mode 100644
index 000000000000..dbec4c933488
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-o2micro.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2013 BayHub Technology Ltd.
+ *
+ * Authors: Peter Guo <peter.guo@bayhubtech.com>
+ * Adam Lee <adam.lee@canonical.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDHCI_PCI_O2MICRO_H
+#define __SDHCI_PCI_O2MICRO_H
+
+#include "sdhci-pci.h"
+
+/*
+ * O2Micro device IDs
+ */
+
+#define PCI_DEVICE_ID_O2_SDS0 0x8420
+#define PCI_DEVICE_ID_O2_SDS1 0x8421
+#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
+#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
+#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
+
+/*
+ * O2Micro device registers
+ */
+
+#define O2_SD_MISC_REG5 0x64
+#define O2_SD_LD0_CTRL 0x68
+#define O2_SD_DEV_CTRL 0x88
+#define O2_SD_LOCK_WP 0xD3
+#define O2_SD_TEST_REG 0xD4
+#define O2_SD_FUNC_REG0 0xDC
+#define O2_SD_MULTI_VCC3V 0xEE
+#define O2_SD_CLKREQ 0xEC
+#define O2_SD_CAPS 0xE0
+#define O2_SD_ADMA1 0xE2
+#define O2_SD_ADMA2 0xE7
+#define O2_SD_INF_MOD 0xF1
+#define O2_SD_MISC_CTRL4 0xFC
+#define O2_SD_TUNING_CTRL 0x300
+#define O2_SD_PLL_SETTING 0x304
+#define O2_SD_CLK_SETTING 0x328
+#define O2_SD_CAP_REG2 0x330
+#define O2_SD_CAP_REG0 0x334
+#define O2_SD_UHS1_CAP_SETTING 0x33C
+#define O2_SD_DELAY_CTRL 0x350
+#define O2_SD_UHS2_L1_CTRL 0x35C
+#define O2_SD_FUNC_REG3 0x3E0
+#define O2_SD_FUNC_REG4 0x3E4
+
+#define O2_SD_VENDOR_SETTING 0x110
+#define O2_SD_VENDOR_SETTING2 0x1C8
+
+extern void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip);
+
+extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
+
+extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
+
+extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
+
+#endif /* __SDHCI_PCI_O2MICRO_H */
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 8f753811fc7a..0955777b6c7e 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -27,79 +27,8 @@
#include <linux/mmc/sdhci-pci-data.h>
#include "sdhci.h"
-
-/*
- * PCI device IDs
- */
-#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
-#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
-#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
-#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
-#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
-#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50
-#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
-#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9
-#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa
-#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb
-#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
-#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
-
-/*
- * PCI registers
- */
-
-#define PCI_SDHCI_IFPIO 0x00
-#define PCI_SDHCI_IFDMA 0x01
-#define PCI_SDHCI_IFVENDOR 0x02
-
-#define PCI_SLOT_INFO 0x40 /* 8 bits */
-#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
-#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
-
-#define MAX_SLOTS 8
-
-struct sdhci_pci_chip;
-struct sdhci_pci_slot;
-
-struct sdhci_pci_fixes {
- unsigned int quirks;
- unsigned int quirks2;
- bool allow_runtime_pm;
-
- int (*probe) (struct sdhci_pci_chip *);
-
- int (*probe_slot) (struct sdhci_pci_slot *);
- void (*remove_slot) (struct sdhci_pci_slot *, int);
-
- int (*suspend) (struct sdhci_pci_chip *);
- int (*resume) (struct sdhci_pci_chip *);
-};
-
-struct sdhci_pci_slot {
- struct sdhci_pci_chip *chip;
- struct sdhci_host *host;
- struct sdhci_pci_data *data;
-
- int pci_bar;
- int rst_n_gpio;
- int cd_gpio;
- int cd_irq;
-
- void (*hw_reset)(struct sdhci_host *host);
-};
-
-struct sdhci_pci_chip {
- struct pci_dev *pdev;
-
- unsigned int quirks;
- unsigned int quirks2;
- bool allow_runtime_pm;
- const struct sdhci_pci_fixes *fixes;
-
- int num_slots; /* Slots on controller */
- struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
-};
-
+#include "sdhci-pci.h"
+#include "sdhci-pci-o2micro.h"
/*****************************************************************************\
* *
@@ -296,6 +225,7 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
+ .own_cd_for_runtime_pm = true,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
@@ -360,6 +290,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON,
.allow_runtime_pm = true,
+ .own_cd_for_runtime_pm = true,
};
/* Define Host controllers for Intel Merrifield platform */
@@ -381,6 +312,7 @@ static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_HS200,
.probe_slot = intel_mrfl_mmc_probe_slot,
};
@@ -393,65 +325,6 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
#define O2_SD_ADMA2 0xE7
#define O2_SD_INF_MOD 0xF1
-static int o2_probe(struct sdhci_pci_chip *chip)
-{
- int ret;
- u8 scratch;
-
- switch (chip->pdev->device) {
- case PCI_DEVICE_ID_O2_8220:
- case PCI_DEVICE_ID_O2_8221:
- case PCI_DEVICE_ID_O2_8320:
- case PCI_DEVICE_ID_O2_8321:
- /* This extra setup is required due to broken ADMA. */
- ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
- if (ret)
- return ret;
- scratch &= 0x7f;
- pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
-
- /* Set Multi 3 to VCC3V# */
- pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
-
- /* Disable CLK_REQ# support after media DET */
- ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
- if (ret)
- return ret;
- scratch |= 0x20;
- pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
-
- /* Choose capabilities, enable SDMA. We have to write 0x01
- * to the capabilities register first to unlock it.
- */
- ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
- if (ret)
- return ret;
- scratch |= 0x01;
- pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
- pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
-
- /* Disable ADMA1/2 */
- pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
- pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
-
- /* Disable the infinite transfer mode */
- ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
- if (ret)
- return ret;
- scratch |= 0x08;
- pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
-
- /* Lock WP */
- ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
- if (ret)
- return ret;
- scratch |= 0x80;
- pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
- }
-
- return 0;
-}
-
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -642,7 +515,10 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
}
static const struct sdhci_pci_fixes sdhci_o2 = {
- .probe = o2_probe,
+ .probe = sdhci_pci_o2_probe,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .probe_slot = sdhci_pci_o2_probe_slot,
+ .resume = sdhci_pci_o2_resume,
};
static const struct sdhci_pci_fixes sdhci_jmicron = {
@@ -1055,6 +931,46 @@ static const struct pci_device_id pci_ids[] = {
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_FUJIN2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SDS0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SDS1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SEABIRD0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SEABIRD1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
{ /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
},
@@ -1457,6 +1373,15 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
sdhci_pci_add_own_cd(slot);
+ /*
+ * Check if the chip needs a separate GPIO for card detect to wake up
+ * from runtime suspend. If it is not there, don't allow runtime PM.
+ * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure.
+ */
+ if (chip->fixes && chip->fixes->own_cd_for_runtime_pm &&
+ !gpio_is_valid(slot->cd_gpio))
+ chip->allow_runtime_pm = false;
+
return slot;
remove:
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
new file mode 100644
index 000000000000..6d718719659e
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -0,0 +1,78 @@
+#ifndef __SDHCI_PCI_H
+#define __SDHCI_PCI_H
+
+/*
+ * PCI device IDs
+ */
+
+#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
+#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
+#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
+#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50
+#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
+
+/*
+ * PCI registers
+ */
+
+#define PCI_SDHCI_IFPIO 0x00
+#define PCI_SDHCI_IFDMA 0x01
+#define PCI_SDHCI_IFVENDOR 0x02
+
+#define PCI_SLOT_INFO 0x40 /* 8 bits */
+#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
+#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
+
+#define MAX_SLOTS 8
+
+struct sdhci_pci_chip;
+struct sdhci_pci_slot;
+
+struct sdhci_pci_fixes {
+ unsigned int quirks;
+ unsigned int quirks2;
+ bool allow_runtime_pm;
+ bool own_cd_for_runtime_pm;
+
+ int (*probe) (struct sdhci_pci_chip *);
+
+ int (*probe_slot) (struct sdhci_pci_slot *);
+ void (*remove_slot) (struct sdhci_pci_slot *, int);
+
+ int (*suspend) (struct sdhci_pci_chip *);
+ int (*resume) (struct sdhci_pci_chip *);
+};
+
+struct sdhci_pci_slot {
+ struct sdhci_pci_chip *chip;
+ struct sdhci_host *host;
+ struct sdhci_pci_data *data;
+
+ int pci_bar;
+ int rst_n_gpio;
+ int cd_gpio;
+ int cd_irq;
+
+ void (*hw_reset)(struct sdhci_host *host);
+};
+
+struct sdhci_pci_chip {
+ struct pci_dev *pdev;
+
+ unsigned int quirks;
+ unsigned int quirks2;
+ bool allow_runtime_pm;
+ const struct sdhci_pci_fixes *fixes;
+
+ int num_slots; /* Slots on controller */
+ struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
+};
+
+#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index e2065a44dffc..bef250e95418 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -237,19 +237,21 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
#ifdef CONFIG_PM
-static int sdhci_pltfm_suspend(struct device *dev)
+int sdhci_pltfm_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_suspend_host(host);
}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
-static int sdhci_pltfm_resume(struct device *dev)
+int sdhci_pltfm_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
const struct dev_pm_ops sdhci_pltfm_pmops = {
.suspend = sdhci_pltfm_suspend,
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index e15ced79f7ed..04bc2481e5c3 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -111,6 +111,8 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
}
#ifdef CONFIG_PM
+extern int sdhci_pltfm_suspend(struct device *dev);
+extern int sdhci_pltfm_resume(struct device *dev);
extern const struct dev_pm_ops sdhci_pltfm_pmops;
#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
#else
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 5b7b2eba8a54..a835898a68dd 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -198,6 +198,7 @@ static struct sdhci_tegra_soc_data soc_data_tegra114 = {
};
static const struct of_device_id sdhci_tegra_dt_match[] = {
+ { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bd8a0982aec3..9ddef4763541 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -898,8 +898,13 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
u16 mode;
struct mmc_data *data = cmd->data;
- if (data == NULL)
+ if (data == NULL) {
+ /* clear Auto CMD settings for no data CMDs */
+ mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
+ sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
+ SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
return;
+ }
WARN_ON(!host->data);
@@ -1013,7 +1018,12 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
mdelay(1);
}
- mod_timer(&host->timer, jiffies + 10 * HZ);
+ timeout = jiffies;
+ if (!cmd->data && cmd->cmd_timeout_ms > 9000)
+ timeout += DIV_ROUND_UP(cmd->cmd_timeout_ms, 1000) * HZ + HZ;
+ else
+ timeout += 10 * HZ;
+ mod_timer(&host->timer, timeout);
host->cmd = cmd;
@@ -1391,6 +1401,13 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
mmc->card->type == MMC_TYPE_MMC ?
MMC_SEND_TUNING_BLOCK_HS200 :
MMC_SEND_TUNING_BLOCK;
+
+ /* Here we need to set the host->mrq to NULL,
+ * in case the pending finish_tasklet
+ * finishes it incorrectly.
+ */
+ host->mrq = NULL;
+
spin_unlock_irqrestore(&host->lock, flags);
sdhci_execute_tuning(mmc, tuning_opcode);
spin_lock_irqsave(&host->lock, flags);
@@ -1845,12 +1862,12 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
unsigned long timeout;
int err = 0;
bool requires_tuning_nonuhs = false;
+ unsigned long flags;
host = mmc_priv(mmc);
sdhci_runtime_pm_get(host);
- disable_irq(host->irq);
- spin_lock(&host->lock);
+ spin_lock_irqsave(&host->lock, flags);
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -1870,15 +1887,13 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
requires_tuning_nonuhs)
ctrl |= SDHCI_CTRL_EXEC_TUNING;
else {
- spin_unlock(&host->lock);
- enable_irq(host->irq);
+ spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
return 0;
}
if (host->ops->platform_execute_tuning) {
- spin_unlock(&host->lock);
- enable_irq(host->irq);
+ spin_unlock_irqrestore(&host->lock, flags);
err = host->ops->platform_execute_tuning(host, opcode);
sdhci_runtime_pm_put(host);
return err;
@@ -1951,15 +1966,12 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
host->cmd = NULL;
host->mrq = NULL;
- spin_unlock(&host->lock);
- enable_irq(host->irq);
-
+ spin_unlock_irqrestore(&host->lock, flags);
/* Wait for Buffer Read Ready interrupt */
wait_event_interruptible_timeout(host->buf_ready_int,
(host->tuning_done == 1),
msecs_to_jiffies(50));
- disable_irq(host->irq);
- spin_lock(&host->lock);
+ spin_lock_irqsave(&host->lock, flags);
if (!host->tuning_done) {
pr_info(DRIVER_NAME ": Timeout waiting for "
@@ -2034,8 +2046,7 @@ out:
err = 0;
sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
- spin_unlock(&host->lock);
- enable_irq(host->irq);
+ spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
return err;
@@ -3004,7 +3015,8 @@ int sdhci_add_host(struct sdhci_host *host)
/* SD3.0: SDR104 is supported so (for eMMC) the caps2
* field can be promoted to support HS200.
*/
- mmc->caps2 |= MMC_CAP2_HS200;
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
+ mmc->caps2 |= MMC_CAP2_HS200;
} else if (caps[1] & SDHCI_SUPPORT_SDR50)
mmc->caps |= MMC_CAP_UHS_SDR50;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index d032b080ac4d..54730f4aac87 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -381,73 +381,75 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
desc, cookie);
}
-static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
- struct sh_mmcif_plat_data *pdata)
+static struct dma_chan *
+sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
+ struct sh_mmcif_plat_data *pdata,
+ enum dma_transfer_direction direction)
{
- struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
struct dma_slave_config cfg;
+ struct dma_chan *chan;
+ unsigned int slave_id;
+ struct resource *res;
dma_cap_mask_t mask;
int ret;
- host->dma_active = false;
-
- if (pdata) {
- if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
- return;
- } else if (!host->pd->dev.of_node) {
- return;
- }
-
- /* We can only either use DMA for both Tx and Rx or not use it at all */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- host->chan_tx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
- pdata ? (void *)pdata->slave_id_tx : NULL,
- &host->pd->dev, "tx");
- dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
- host->chan_tx);
+ if (pdata)
+ slave_id = direction == DMA_MEM_TO_DEV
+ ? pdata->slave_id_tx : pdata->slave_id_rx;
+ else
+ slave_id = 0;
- if (!host->chan_tx)
- return;
+ chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ (void *)(unsigned long)slave_id, &host->pd->dev,
+ direction == DMA_MEM_TO_DEV ? "tx" : "rx");
+
+ dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
+ direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);
+
+ if (!chan)
+ return NULL;
+
+ res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
/* In the OF case the driver will get the slave ID from the DT */
- if (pdata)
- cfg.slave_id = pdata->slave_id_tx;
- cfg.direction = DMA_MEM_TO_DEV;
+ cfg.slave_id = slave_id;
+ cfg.direction = direction;
cfg.dst_addr = res->start + MMCIF_CE_DATA;
cfg.src_addr = 0;
- ret = dmaengine_slave_config(host->chan_tx, &cfg);
- if (ret < 0)
- goto ecfgtx;
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret < 0) {
+ dma_release_channel(chan);
+ return NULL;
+ }
- host->chan_rx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
- pdata ? (void *)pdata->slave_id_rx : NULL,
- &host->pd->dev, "rx");
- dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
- host->chan_rx);
+ return chan;
+}
- if (!host->chan_rx)
- goto erqrx;
+static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
+ struct sh_mmcif_plat_data *pdata)
+{
+ host->dma_active = false;
- if (pdata)
- cfg.slave_id = pdata->slave_id_rx;
- cfg.direction = DMA_DEV_TO_MEM;
- cfg.dst_addr = 0;
- cfg.src_addr = res->start + MMCIF_CE_DATA;
- ret = dmaengine_slave_config(host->chan_rx, &cfg);
- if (ret < 0)
- goto ecfgrx;
+ if (pdata) {
+ if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
+ return;
+ } else if (!host->pd->dev.of_node) {
+ return;
+ }
- return;
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV);
+ if (!host->chan_tx)
+ return;
-ecfgrx:
- dma_release_channel(host->chan_rx);
- host->chan_rx = NULL;
-erqrx:
-ecfgtx:
- dma_release_channel(host->chan_tx);
- host->chan_tx = NULL;
+ host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM);
+ if (!host->chan_rx) {
+ dma_release_channel(host->chan_tx);
+ host->chan_tx = NULL;
+ }
}
static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index f344659dceac..2d6ce257a273 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -33,6 +33,8 @@
#include "tmio_mmc.h"
+#define EXT_ACC 0xe4
+
struct sh_mobile_sdhi_of_data {
unsigned long tmio_flags;
};
@@ -54,7 +56,7 @@ static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct tmio_mmc_host *host = mmc_priv(mmc);
struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
- int ret = clk_enable(priv->clk);
+ int ret = clk_prepare_enable(priv->clk);
if (ret < 0)
return ret;
@@ -67,7 +69,7 @@ static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct tmio_mmc_host *host = mmc_priv(mmc);
struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
}
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
@@ -133,9 +135,15 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
struct tmio_mmc_data *mmc_data;
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
struct tmio_mmc_host *host;
+ struct resource *res;
int irq, ret, i = 0;
bool multiplexed_isr = true;
struct tmio_mmc_dma *dma_priv;
+ u16 ver;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
if (priv == NULL) {
@@ -206,11 +214,22 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->flags |= of_data->tmio_flags;
}
+ /* SD control register space size is 0x100, 0x200 for bus_shift=1 */
+ mmc_data->bus_shift = resource_size(res) >> 9;
+
ret = tmio_mmc_host_probe(&host, pdev, mmc_data);
if (ret < 0)
goto eprobe;
/*
+ * FIXME:
+ * this Workaround can be more clever method
+ */
+ ver = sd_ctrl_read16(host, CTL_VERSION);
+ if (ver == 0xCB0D)
+ sd_ctrl_write16(host, EXT_ACC, 1);
+
+ /*
* Allow one or more specific (named) ISRs or
* one or more multiplexed (un-named) ISRs.
*/
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 8860d4d2bc22..1900abb04236 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -62,6 +62,7 @@ static int tmio_mmc_probe(struct platform_device *pdev)
const struct mfd_cell *cell = mfd_get_cell(pdev);
struct tmio_mmc_data *pdata;
struct tmio_mmc_host *host;
+ struct resource *res;
int ret = -EINVAL, irq;
if (pdev->num_resources != 2)
@@ -84,6 +85,14 @@ static int tmio_mmc_probe(struct platform_device *pdev)
goto out;
}
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
+ pdata->bus_shift = resource_size(res) >> 10;
+ pdata->flags |= TMIO_MMC_HAVE_HIGH_REG;
+
ret = tmio_mmc_host_probe(&host, pdev, pdata);
if (ret)
goto cell_disable;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 86fd21e00099..aaa9c7e9e730 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -58,7 +58,6 @@ enum tmio_mmc_power {
struct tmio_mmc_host {
void __iomem *ctl;
- unsigned long bus_shift;
struct mmc_command *cmd;
struct mmc_request *mrq;
struct mmc_data *data;
@@ -176,19 +175,19 @@ int tmio_mmc_host_runtime_resume(struct device *dev);
static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
- return readw(host->ctl + (addr << host->bus_shift));
+ return readw(host->ctl + (addr << host->pdata->bus_shift));
}
static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
+ readsw(host->ctl + (addr << host->pdata->bus_shift), buf, count);
}
static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+ return readw(host->ctl + (addr << host->pdata->bus_shift)) |
+ readw(host->ctl + ((addr + 2) << host->pdata->bus_shift)) << 16;
}
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
@@ -198,19 +197,19 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val
*/
if (host->pdata->write16_hook && host->pdata->write16_hook(host, addr))
return;
- writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val, host->ctl + (addr << host->pdata->bus_shift));
}
static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
+ writesw(host->ctl + (addr << host->pdata->bus_shift), buf, count);
}
static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
{
- writew(val, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+ writew(val, host->ctl + (addr << host->pdata->bus_shift));
+ writew(val >> 16, host->ctl + ((addr + 2) << host->pdata->bus_shift));
}
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 65edb4a62452..03e7b280cb4c 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -293,7 +293,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
if (pdata->dma->chan_priv_tx)
cfg.slave_id = pdata->dma->slave_id_tx;
cfg.direction = DMA_MEM_TO_DEV;
- cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
+ cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->pdata->bus_shift);
cfg.src_addr = 0;
ret = dmaengine_slave_config(host->chan_tx, &cfg);
if (ret < 0)
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index f3b2d8ca1eca..8d8abf23a611 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -161,10 +161,8 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
- struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
-
/* implicit BUG_ON(!res) */
- if (resource_size(res) > 0x100) {
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
msleep(10);
}
@@ -176,14 +174,12 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
- struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
-
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
/* implicit BUG_ON(!res) */
- if (resource_size(res) > 0x100) {
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10);
}
@@ -191,16 +187,14 @@ static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
static void tmio_mmc_reset(struct tmio_mmc_host *host)
{
- struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
-
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
/* implicit BUG_ON(!res) */
- if (resource_size(res) > 0x100)
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
msleep(10);
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
- if (resource_size(res) > 0x100)
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
msleep(10);
}
@@ -944,17 +938,25 @@ static const struct mmc_host_ops tmio_mmc_ops = {
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
};
-static void tmio_mmc_init_ocr(struct tmio_mmc_host *host)
+static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
{
struct tmio_mmc_data *pdata = host->pdata;
struct mmc_host *mmc = host->mmc;
mmc_regulator_get_supply(mmc);
+ /* use ocr_mask if no regulator */
if (!mmc->ocr_avail)
- mmc->ocr_avail = pdata->ocr_mask ? : MMC_VDD_32_33 | MMC_VDD_33_34;
- else if (pdata->ocr_mask)
- dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+ mmc->ocr_avail = pdata->ocr_mask;
+
+ /*
+ * try again.
+ * There is possibility that regulator has not been probed
+ */
+ if (!mmc->ocr_avail)
+ return -EPROBE_DEFER;
+
+ return 0;
}
static void tmio_mmc_of_parse(struct platform_device *pdev,
@@ -1005,8 +1007,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
_host->set_pwr = pdata->set_pwr;
_host->set_clk_div = pdata->set_clk_div;
- /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
- _host->bus_shift = resource_size(res_ctl) >> 10;
+ ret = tmio_mmc_init_ocr(_host);
+ if (ret < 0)
+ goto host_free;
_host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
if (!_host->ctl) {
@@ -1016,14 +1019,13 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
mmc->ops = &tmio_mmc_ops;
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
- mmc->caps2 = pdata->capabilities2;
+ mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- tmio_mmc_init_ocr(_host);
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5fab4e6e8301..5ebcda39f554 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -157,10 +157,11 @@ config MTD_BCM47XX_PARTS
comment "User Modules And Translation Layers"
+#
+# MTD block device support is select'ed if needed
+#
config MTD_BLKDEVS
- tristate "Common interface to block layer for MTD 'translation layers'"
- depends on BLOCK
- default n
+ tristate
config MTD_BLOCK
tristate "Caching block device access to MTD devices"
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 5a3942bf109c..96a33e3f7b00 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -264,7 +264,8 @@ static struct mtd_part_parser afs_parser = {
static int __init afs_parser_init(void)
{
- return register_mtd_parser(&afs_parser);
+ register_mtd_parser(&afs_parser);
+ return 0;
}
static void __exit afs_parser_exit(void)
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index ddc0a4287a4b..7c9172ad2621 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -139,7 +139,8 @@ static struct mtd_part_parser ar7_parser = {
static int __init ar7_parser_init(void)
{
- return register_mtd_parser(&ar7_parser);
+ register_mtd_parser(&ar7_parser);
+ return 0;
}
static void __exit ar7_parser_exit(void)
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 7a6384b0962a..de1eb92e42f5 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -23,10 +23,12 @@
* Amount of bytes we read when analyzing each block of flash memory.
* Set it big enough to allow detecting partition and reading important data.
*/
-#define BCM47XXPART_BYTES_TO_READ 0x404
+#define BCM47XXPART_BYTES_TO_READ 0x4e8
/* Magics */
#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
+#define BOARD_DATA_MAGIC2 0xBD0D0BBD
+#define CFE_MAGIC 0x43464531 /* 1EFC */
#define FACTORY_MAGIC 0x59544346 /* FCTY */
#define POT_MAGIC1 0x54544f50 /* POTT */
#define POT_MAGIC2 0x504f /* OP */
@@ -102,8 +104,9 @@ static int bcm47xxpart_parse(struct mtd_info *master,
continue;
}
- /* CFE has small NVRAM at 0x400 */
- if (buf[0x400 / 4] == NVRAM_HEADER) {
+ /* Magic or small NVRAM at 0x400 */
+ if ((buf[0x4e0 / 4] == CFE_MAGIC && buf[0x4e4 / 4] == CFE_MAGIC) ||
+ (buf[0x400 / 4] == NVRAM_HEADER)) {
bcm47xxpart_add_part(&parts[curr_part++], "boot",
offset, MTD_WRITEABLE);
continue;
@@ -190,6 +193,21 @@ static int bcm47xxpart_parse(struct mtd_info *master,
offset, 0);
continue;
}
+
+ /* Read middle of the block */
+ if (mtd_read(master, offset + 0x8000, 0x4,
+ &bytes_read, (uint8_t *)buf) < 0) {
+ pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+ offset);
+ continue;
+ }
+
+ /* Some devices (ex. WNDR3700v3) don't have a standard 'MPFR' */
+ if (buf[0x000 / 4] == BOARD_DATA_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "board_data",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
}
/* Look for NVRAM at the end of the last block. */
@@ -243,7 +261,8 @@ static struct mtd_part_parser bcm47xxpart_mtd_parser = {
static int __init bcm47xxpart_init(void)
{
- return register_mtd_parser(&bcm47xxpart_mtd_parser);
+ register_mtd_parser(&bcm47xxpart_mtd_parser);
+ return 0;
}
static void __exit bcm47xxpart_exit(void)
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 5c813907661c..b2443f7031c9 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -221,7 +221,8 @@ static struct mtd_part_parser bcm63xx_cfe_parser = {
static int __init bcm63xx_cfe_parser_init(void)
{
- return register_mtd_parser(&bcm63xx_cfe_parser);
+ register_mtd_parser(&bcm63xx_cfe_parser);
+ return 0;
}
static void __exit bcm63xx_cfe_parser_exit(void)
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 721caebbc5cc..3e829b37af8d 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -395,7 +395,8 @@ static int __init cmdline_parser_init(void)
{
if (mtdparts)
mtdpart_setup(mtdparts);
- return register_mtd_parser(&cmdline_parser);
+ register_mtd_parser(&cmdline_parser);
+ return 0;
}
static void __exit cmdline_parser_exit(void)
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 4f091c1a9981..dd5e1018d37b 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -2047,21 +2047,21 @@ static int __init docg3_probe(struct platform_device *pdev)
ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ress) {
dev_err(dev, "No I/O memory resource defined\n");
- goto noress;
+ return ret;
}
- base = ioremap(ress->start, DOC_IOSPACE_SIZE);
+ base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
ret = -ENOMEM;
- cascade = kzalloc(sizeof(*cascade) * DOC_MAX_NBFLOORS,
- GFP_KERNEL);
+ cascade = devm_kzalloc(dev, sizeof(*cascade) * DOC_MAX_NBFLOORS,
+ GFP_KERNEL);
if (!cascade)
- goto nomem1;
+ return ret;
cascade->base = base;
mutex_init(&cascade->lock);
cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
DOC_ECC_BCH_PRIMPOLY);
if (!cascade->bch)
- goto nomem2;
+ return ret;
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
mtd = doc_probe_device(cascade, floor, dev);
@@ -2101,11 +2101,6 @@ err_probe:
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
if (cascade->floors[floor])
doc_release_device(cascade->floors[floor]);
-nomem2:
- kfree(cascade);
-nomem1:
- iounmap(base);
-noress:
return ret;
}
@@ -2119,7 +2114,6 @@ static int __exit docg3_release(struct platform_device *pdev)
{
struct docg3_cascade *cascade = platform_get_drvdata(pdev);
struct docg3 *docg3 = cascade->floors[0]->priv;
- void __iomem *base = cascade->base;
int floor;
doc_unregister_sysfs(pdev, cascade);
@@ -2129,8 +2123,6 @@ static int __exit docg3_release(struct platform_device *pdev)
doc_release_device(cascade->floors[floor]);
free_bch(docg3->cascade->bch);
- kfree(cascade);
- iounmap(base);
return 0;
}
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 7eda71dbc183..ad1913909702 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -41,6 +41,7 @@
#define OPCODE_WRSR 0x01 /* Write status register 1 byte */
#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
+#define OPCODE_QUAD_READ 0x6b /* Read data bytes */
#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
#define OPCODE_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
@@ -48,10 +49,12 @@
#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
#define OPCODE_RDID 0x9f /* Read JEDEC ID */
+#define OPCODE_RDCR 0x35 /* Read configuration register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define OPCODE_NORM_READ_4B 0x13 /* Read data bytes (low frequency) */
#define OPCODE_FAST_READ_4B 0x0c /* Read data bytes (high frequency) */
+#define OPCODE_QUAD_READ_4B 0x6c /* Read data bytes */
#define OPCODE_PP_4B 0x12 /* Page program (up to 256 bytes) */
#define OPCODE_SE_4B 0xdc /* Sector erase (usually 64KiB) */
@@ -76,6 +79,11 @@
#define SR_BP2 0x10 /* Block protect 2 */
#define SR_SRWD 0x80 /* SR write protect */
+#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */
+
+/* Configuration Register bits. */
+#define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */
+
/* Define max times to check status register before we give up. */
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define MAX_CMD_SIZE 6
@@ -84,6 +92,12 @@
/****************************************************************************/
+enum read_type {
+ M25P80_NORMAL = 0,
+ M25P80_FAST,
+ M25P80_QUAD,
+};
+
struct m25p {
struct spi_device *spi;
struct mutex lock;
@@ -94,7 +108,7 @@ struct m25p {
u8 read_opcode;
u8 program_opcode;
u8 *command;
- bool fast_read;
+ enum read_type flash_read;
};
static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -131,6 +145,26 @@ static int read_sr(struct m25p *flash)
}
/*
+ * Read configuration register, returning its value in the
+ * location. Return the configuration register value.
+ * Returns negative if error occured.
+ */
+static int read_cr(struct m25p *flash)
+{
+ u8 code = OPCODE_RDCR;
+ int ret;
+ u8 val;
+
+ ret = spi_write_then_read(flash->spi, &code, 1, &val, 1);
+ if (ret < 0) {
+ dev_err(&flash->spi->dev, "error %d reading CR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
* Write status register 1 byte
* Returns negative if error occurred.
*/
@@ -220,6 +254,93 @@ static int wait_till_ready(struct m25p *flash)
}
/*
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+ * Return negative if error occured.
+ */
+static int write_sr_cr(struct m25p *flash, u16 val)
+{
+ flash->command[0] = OPCODE_WRSR;
+ flash->command[1] = val & 0xff;
+ flash->command[2] = (val >> 8);
+
+ return spi_write(flash->spi, flash->command, 3);
+}
+
+static int macronix_quad_enable(struct m25p *flash)
+{
+ int ret, val;
+ u8 cmd[2];
+ cmd[0] = OPCODE_WRSR;
+
+ val = read_sr(flash);
+ cmd[1] = val | SR_QUAD_EN_MX;
+ write_enable(flash);
+
+ spi_write(flash->spi, &cmd, 2);
+
+ if (wait_till_ready(flash))
+ return 1;
+
+ ret = read_sr(flash);
+ if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
+ dev_err(&flash->spi->dev, "Macronix Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int spansion_quad_enable(struct m25p *flash)
+{
+ int ret;
+ int quad_en = CR_QUAD_EN_SPAN << 8;
+
+ write_enable(flash);
+
+ ret = write_sr_cr(flash, quad_en);
+ if (ret < 0) {
+ dev_err(&flash->spi->dev,
+ "error while writing configuration register\n");
+ return -EINVAL;
+ }
+
+ /* read back and check it */
+ ret = read_cr(flash);
+ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+ dev_err(&flash->spi->dev, "Spansion Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_quad_mode(struct m25p *flash, u32 jedec_id)
+{
+ int status;
+
+ switch (JEDEC_MFR(jedec_id)) {
+ case CFI_MFR_MACRONIX:
+ status = macronix_quad_enable(flash);
+ if (status) {
+ dev_err(&flash->spi->dev,
+ "Macronix quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
+ default:
+ status = spansion_quad_enable(flash);
+ if (status) {
+ dev_err(&flash->spi->dev,
+ "Spansion quad-read not enabled\n");
+ return -EINVAL;
+ }
+ return status;
+ }
+}
+
+/*
* Erase the whole flash memory
*
* Returns 0 if successful, non-zero otherwise.
@@ -350,6 +471,35 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
}
/*
+ * Dummy Cycle calculation for different type of read.
+ * It can be used to support more commands with
+ * different dummy cycle requirements.
+ */
+static inline int m25p80_dummy_cycles_read(struct m25p *flash)
+{
+ switch (flash->flash_read) {
+ case M25P80_FAST:
+ case M25P80_QUAD:
+ return 1;
+ case M25P80_NORMAL:
+ return 0;
+ default:
+ dev_err(&flash->spi->dev, "No valid read type supported\n");
+ return -1;
+ }
+}
+
+static inline unsigned int m25p80_rx_nbits(const struct m25p *flash)
+{
+ switch (flash->flash_read) {
+ case M25P80_QUAD:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+/*
* Read an address range from the flash chip. The address range
* may be any size provided it is within the physical boundaries.
*/
@@ -360,6 +510,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer t[2];
struct spi_message m;
uint8_t opcode;
+ int dummy;
pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
__func__, (u32)from, len);
@@ -367,11 +518,18 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
spi_message_init(&m);
memset(t, 0, (sizeof t));
+ dummy = m25p80_dummy_cycles_read(flash);
+ if (dummy < 0) {
+ dev_err(&flash->spi->dev, "No valid read command supported\n");
+ return -EINVAL;
+ }
+
t[0].tx_buf = flash->command;
- t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0);
+ t[0].len = m25p_cmdsz(flash) + dummy;
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
+ t[1].rx_nbits = m25p80_rx_nbits(flash);
t[1].len = len;
spi_message_add_tail(&t[1], &m);
@@ -391,8 +549,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
spi_sync(flash->spi, &m);
- *retlen = m.actual_length - m25p_cmdsz(flash) -
- (flash->fast_read ? 1 : 0);
+ *retlen = m.actual_length - m25p_cmdsz(flash) - dummy;
mutex_unlock(&flash->lock);
@@ -698,6 +855,7 @@ struct flash_info {
#define SST_WRITE 0x04 /* use SST byte programming */
#define M25P_NO_FR 0x08 /* Can't do fastread */
#define SECT_4K_PMC 0x10 /* OPCODE_BE_4K_PMC works uniformly */
+#define M25P80_QUAD_READ 0x20 /* Flash supports Quad Read */
};
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
@@ -775,7 +933,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, M25P80_QUAD_READ) },
/* Micron */
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
@@ -795,8 +953,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) },
{ "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
- { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
- { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, M25P80_QUAD_READ) },
+ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, M25P80_QUAD_READ) },
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
@@ -851,6 +1009,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
{ "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
{ "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
{ "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
@@ -937,6 +1096,7 @@ static int m25p_probe(struct spi_device *spi)
unsigned i;
struct mtd_part_parser_data ppdata;
struct device_node *np = spi->dev.of_node;
+ int ret;
/* Platform data helps sort out which chip type we have, as
* well as how this board partitions it. If we don't have
@@ -1051,22 +1211,46 @@ static int m25p_probe(struct spi_device *spi)
flash->page_size = info->page_size;
flash->mtd.writebufsize = flash->page_size;
- if (np)
+ if (np) {
/* If we were instantiated by DT, use it */
- flash->fast_read = of_property_read_bool(np, "m25p,fast-read");
- else
+ if (of_property_read_bool(np, "m25p,fast-read"))
+ flash->flash_read = M25P80_FAST;
+ else
+ flash->flash_read = M25P80_NORMAL;
+ } else {
/* If we weren't instantiated by DT, default to fast-read */
- flash->fast_read = true;
+ flash->flash_read = M25P80_FAST;
+ }
/* Some devices cannot do fast-read, no matter what DT tells us */
if (info->flags & M25P_NO_FR)
- flash->fast_read = false;
+ flash->flash_read = M25P80_NORMAL;
+
+ /* Quad-read mode takes precedence over fast/normal */
+ if (spi->mode & SPI_RX_QUAD && info->flags & M25P80_QUAD_READ) {
+ ret = set_quad_mode(flash, info->jedec_id);
+ if (ret) {
+ dev_err(&flash->spi->dev, "quad mode not supported\n");
+ return ret;
+ }
+ flash->flash_read = M25P80_QUAD;
+ }
/* Default commands */
- if (flash->fast_read)
+ switch (flash->flash_read) {
+ case M25P80_QUAD:
+ flash->read_opcode = OPCODE_QUAD_READ;
+ break;
+ case M25P80_FAST:
flash->read_opcode = OPCODE_FAST_READ;
- else
+ break;
+ case M25P80_NORMAL:
flash->read_opcode = OPCODE_NORM_READ;
+ break;
+ default:
+ dev_err(&flash->spi->dev, "No Read opcode defined\n");
+ return -EINVAL;
+ }
flash->program_opcode = OPCODE_PP;
@@ -1077,9 +1261,17 @@ static int m25p_probe(struct spi_device *spi)
flash->addr_width = 4;
if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
/* Dedicated 4-byte command set */
- flash->read_opcode = flash->fast_read ?
- OPCODE_FAST_READ_4B :
- OPCODE_NORM_READ_4B;
+ switch (flash->flash_read) {
+ case M25P80_QUAD:
+ flash->read_opcode = OPCODE_QUAD_READ_4B;
+ break;
+ case M25P80_FAST:
+ flash->read_opcode = OPCODE_FAST_READ_4B;
+ break;
+ case M25P80_NORMAL:
+ flash->read_opcode = OPCODE_NORM_READ_4B;
+ break;
+ }
flash->program_opcode = OPCODE_PP_4B;
/* No small sector erase for 4-byte command set */
flash->erase_opcode = OPCODE_SE_4B;
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 182849d39c61..5c8b322ba904 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -205,7 +205,7 @@ static int __init ms02nv_init_one(ulong addr)
mtd->type = MTD_RAM;
mtd->flags = MTD_CAP_RAM;
mtd->size = fixsize;
- mtd->name = (char *)ms02nv_name;
+ mtd->name = ms02nv_name;
mtd->owner = THIS_MODULE;
mtd->_read = ms02nv_read;
mtd->_write = ms02nv_write;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 4a47b0266d4e..624069de4f28 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -669,7 +669,6 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
if (!err)
return 0;
- spi_set_drvdata(spi, NULL);
kfree(priv);
return err;
}
@@ -899,10 +898,8 @@ static int dataflash_remove(struct spi_device *spi)
pr_debug("%s: remove\n", dev_name(&spi->dev));
status = mtd_device_unregister(&flash->mtd);
- if (status == 0) {
- spi_set_drvdata(spi, NULL);
+ if (status == 0)
kfree(flash);
- }
return status;
}
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index ec59d65897fb..8e285089229c 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -92,7 +92,7 @@ static void __exit cleanup_mtdram(void)
}
int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
- unsigned long size, char *name)
+ unsigned long size, const char *name)
{
memset(mtd, 0, sizeof(*mtd));
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 2ef19aa0086b..d38b6460d505 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -388,7 +388,7 @@ static void put_chip(struct map_info *map, struct flchip *chip)
wake_up(&chip->wq);
}
-int do_write_buffer(struct map_info *map, struct flchip *chip,
+static int do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const struct kvec **pvec,
unsigned long *pvec_seek, int len)
{
@@ -469,7 +469,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
return ret;
}
-int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
+static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
{
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
@@ -748,34 +748,6 @@ static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
}
-int word_program(struct map_info *map, loff_t adr, uint32_t curval)
-{
- int ret;
- struct lpddr_private *lpddr = map->fldrv_priv;
- int chipnum = adr >> lpddr->chipshift;
- struct flchip *chip = &lpddr->chips[chipnum];
-
- mutex_lock(&chip->mutex);
- ret = get_chip(map, chip, FL_WRITING);
- if (ret) {
- mutex_unlock(&chip->mutex);
- return ret;
- }
-
- send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval);
-
- ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime));
- if (ret) {
- printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n",
- map->name, adr, curval);
- goto out;
- }
-
-out: put_chip(map, chip);
- mutex_unlock(&chip->mutex);
- return ret;
-}
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 10debfea81e7..d6b2451eab1d 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -162,13 +163,6 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
- if (info->map.virt)
- iounmap(info->map.virt);
-
- if (info->res) {
- release_resource(info->res);
- kfree(info->res);
- }
if (plat->exit)
plat->exit();
@@ -194,7 +188,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
return err;
}
- info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
+ info = devm_kzalloc(&dev->dev, sizeof(struct ixp4xx_flash_info),
+ GFP_KERNEL);
if(!info) {
err = -ENOMEM;
goto Error;
@@ -220,20 +215,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
info->map.write = ixp4xx_probe_write16;
info->map.copy_from = ixp4xx_copy_from;
- info->res = request_mem_region(dev->resource->start,
- resource_size(dev->resource),
- "IXP4XXFlash");
- if (!info->res) {
- printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n");
- err = -ENOMEM;
- goto Error;
- }
-
- info->map.virt = ioremap(dev->resource->start,
- resource_size(dev->resource));
- if (!info->map.virt) {
- printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n");
- err = -EIO;
+ info->map.virt = devm_ioremap_resource(&dev->dev, dev->resource);
+ if (IS_ERR(info->map.virt)) {
+ err = PTR_ERR(info->map.virt);
goto Error;
}
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index d7ac65d1d569..93c507a6f862 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -123,24 +123,28 @@ ltq_mtd_probe(struct platform_device *pdev)
return -ENODEV;
}
- ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
+ ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL);
+ if (!ltq_mtd)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, ltq_mtd);
ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ltq_mtd->res) {
dev_err(&pdev->dev, "failed to get memory resource\n");
- err = -ENOENT;
- goto err_out;
+ return -ENOENT;
}
- ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
+ ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
+ GFP_KERNEL);
+ if (!ltq_mtd->map)
+ return -ENOMEM;
+
ltq_mtd->map->phys = ltq_mtd->res->start;
ltq_mtd->map->size = resource_size(ltq_mtd->res);
ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
- if (IS_ERR(ltq_mtd->map->virt)) {
- err = PTR_ERR(ltq_mtd->map->virt);
- goto err_out;
- }
+ if (IS_ERR(ltq_mtd->map->virt))
+ return PTR_ERR(ltq_mtd->map->virt);
ltq_mtd->map->name = ltq_map_name;
ltq_mtd->map->bankwidth = 2;
@@ -155,8 +159,7 @@ ltq_mtd_probe(struct platform_device *pdev)
if (!ltq_mtd->mtd) {
dev_err(&pdev->dev, "probing failed\n");
- err = -ENXIO;
- goto err_free;
+ return -ENXIO;
}
ltq_mtd->mtd->owner = THIS_MODULE;
@@ -177,10 +180,6 @@ ltq_mtd_probe(struct platform_device *pdev)
err_destroy:
map_destroy(ltq_mtd->mtd);
-err_free:
- kfree(ltq_mtd->map);
-err_out:
- kfree(ltq_mtd);
return err;
}
@@ -189,13 +188,9 @@ ltq_mtd_remove(struct platform_device *pdev)
{
struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
- if (ltq_mtd) {
- if (ltq_mtd->mtd) {
- mtd_device_unregister(ltq_mtd->mtd);
- map_destroy(ltq_mtd->mtd);
- }
- kfree(ltq_mtd->map);
- kfree(ltq_mtd);
+ if (ltq_mtd && ltq_mtd->mtd) {
+ mtd_device_unregister(ltq_mtd->mtd);
+ map_destroy(ltq_mtd->mtd);
}
return 0;
}
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 0f55589a56b8..9aad854fe912 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -61,7 +61,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- info->map.name = (char *) flash->name;
+ info->map.name = flash->name;
info->map.bankwidth = flash->width;
info->map.phys = res->start;
info->map.size = resource_size(res);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index d467f3b11c96..39cc4181f025 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -75,7 +75,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
up->name = of_get_property(dp, "model", NULL);
if (up->name && 0 < strlen(up->name))
- up->map.name = (char *)up->name;
+ up->map.name = up->name;
up->map.phys = op->resource[0].start;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 92311a56939f..34c0b16aed5c 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -313,15 +313,7 @@ static struct attribute *mtd_attrs[] = {
&dev_attr_bitflip_threshold.attr,
NULL,
};
-
-static struct attribute_group mtd_group = {
- .attrs = mtd_attrs,
-};
-
-static const struct attribute_group *mtd_groups[] = {
- &mtd_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(mtd);
static struct device_type mtd_devtype = {
.name = "mtd",
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 6e732c3820c1..3c7d6d7623c1 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -534,7 +534,7 @@ out_register:
return slave;
}
-int mtd_add_partition(struct mtd_info *master, char *name,
+int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length)
{
struct mtd_partition part;
@@ -672,22 +672,19 @@ static struct mtd_part_parser *get_partition_parser(const char *name)
#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
-int register_mtd_parser(struct mtd_part_parser *p)
+void register_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
list_add(&p->list, &part_parsers);
spin_unlock(&part_parser_lock);
-
- return 0;
}
EXPORT_SYMBOL_GPL(register_mtd_parser);
-int deregister_mtd_parser(struct mtd_part_parser *p)
+void deregister_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
list_del(&p->list);
spin_unlock(&part_parser_lock);
- return 0;
}
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 93ae6a6d94f7..a4bee41ad5cb 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -95,7 +95,7 @@ config MTD_NAND_OMAP2
platforms.
config MTD_NAND_OMAP_BCH
- depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
+ depends on MTD_NAND_OMAP2
tristate "Support hardware based BCH error correction"
default n
select BCH
@@ -326,11 +326,11 @@ config MTD_NAND_ATMEL
on Atmel AT91 and AVR32 processors.
config MTD_NAND_PXA3xx
- tristate "Support for NAND flash devices on PXA3xx"
+ tristate "NAND support on PXA3xx and Armada 370/XP"
depends on PXA3xx || ARCH_MMP || PLAT_ORION
help
This enables the driver for the NAND flash device found on
- PXA3xx processors
+ PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
@@ -428,6 +428,7 @@ config MTD_NAND_FSL_IFC
tristate "NAND support for Freescale IFC controller"
depends on MTD_NAND && FSL_SOC
select FSL_IFC
+ select MEMORY
help
Various Freescale chips e.g P1010, include a NAND Flash machine
with built-in hardware ECC capabilities.
@@ -458,17 +459,17 @@ config MTD_NAND_MXC
config MTD_NAND_SH_FLCTL
tristate "Support for NAND on Renesas SuperH FLCTL"
- depends on SUPERH || ARCH_SHMOBILE
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
help
Several Renesas SuperH CPU has FLCTL. This option enables support
for NAND Flash using FLCTL.
config MTD_NAND_DAVINCI
- tristate "Support NAND on DaVinci SoC"
- depends on ARCH_DAVINCI
+ tristate "Support NAND on DaVinci/Keystone SoC"
+ depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF)
help
Enable the driver for NAND flash chips on Texas Instruments
- DaVinci processors.
+ DaVinci/Keystone processors.
config MTD_NAND_TXX9NDFMC
tristate "NAND Flash support for TXx9 SoC"
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 59f08c44abdb..c36e9b84487c 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1961,10 +1961,8 @@ static int atmel_nand_probe(struct platform_device *pdev)
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- printk(KERN_ERR "atmel_nand: failed to allocate device structure.\n");
+ if (!host)
return -ENOMEM;
- }
res = platform_driver_register(&atmel_nand_nfc_driver);
if (res)
@@ -2062,14 +2060,14 @@ static int atmel_nand_probe(struct platform_device *pdev)
}
if (gpio_get_value(host->board.det_pin)) {
- printk(KERN_INFO "No SmartMedia card inserted.\n");
+ dev_info(&pdev->dev, "No SmartMedia card inserted.\n");
res = -ENXIO;
goto err_no_card;
}
}
if (host->board.on_flash_bbt || on_flash_bbt) {
- printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
+ dev_info(&pdev->dev, "Use On Flash BBT\n");
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
}
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index ae8dd7c41039..2880d888cfc5 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -418,10 +418,8 @@ static int au1550nd_probe(struct platform_device *pdev)
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- dev_err(&pdev->dev, "no memory for NAND context\n");
+ if (!ctx)
return -ENOMEM;
- }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -480,6 +478,8 @@ static int au1550nd_probe(struct platform_device *pdev)
mtd_device_register(&ctx->info, pd->parts, pd->num_parts);
+ platform_set_drvdata(pdev, ctx);
+
return 0;
out3:
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 2c42e125720f..94f55dbde995 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -745,7 +745,6 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
- dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto out_err_kzalloc;
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index c34985a55101..f2f64addb5e8 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -640,10 +640,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
pci_set_master(pdev);
mtd = kzalloc(sizeof(*mtd) + sizeof(struct cafe_priv), GFP_KERNEL);
- if (!mtd) {
- dev_warn(&pdev->dev, "failed to alloc mtd_info\n");
+ if (!mtd)
return -ENOMEM;
- }
cafe = (void *)(&mtd[1]);
mtd->dev.parent = &pdev->dev;
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 39b2ef848811..66ec95e6ca6c 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -164,7 +164,6 @@ static int __init cmx270_init(void)
sizeof(struct nand_chip),
GFP_KERNEL);
if (!cmx270_nand_mtd) {
- pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n");
ret = -ENOMEM;
goto err_kzalloc;
}
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index d469a9a1dea0..88109d375ae7 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -199,7 +199,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
/* Allocate memory for MTD device structure and private data */
new_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!new_mtd) {
- printk(KERN_WARNING "Unable to allocate CS553X NAND MTD device structure.\n");
err = -ENOMEM;
goto out;
}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index b77a01efb483..a4989ec6292e 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/of_mtd.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
@@ -487,7 +488,7 @@ static int nand_davinci_dev_ready(struct mtd_info *mtd)
* ten ECC bytes plus the manufacturer's bad block marker byte, and
* and not overlapping the default BBT markers.
*/
-static struct nand_ecclayout hwecc4_small __initconst = {
+static struct nand_ecclayout hwecc4_small = {
.eccbytes = 10,
.eccpos = { 0, 1, 2, 3, 4,
/* offset 5 holds the badblock marker */
@@ -503,7 +504,7 @@ static struct nand_ecclayout hwecc4_small __initconst = {
* storing ten ECC bytes plus the manufacturer's bad block marker byte,
* and not overlapping the default BBT markers.
*/
-static struct nand_ecclayout hwecc4_2048 __initconst = {
+static struct nand_ecclayout hwecc4_2048 = {
.eccbytes = 40,
.eccpos = {
/* at the end of spare sector */
@@ -534,17 +535,19 @@ static struct davinci_nand_pdata
struct davinci_nand_pdata *pdata;
const char *mode;
u32 prop;
- int len;
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct davinci_nand_pdata),
GFP_KERNEL);
pdev->dev.platform_data = pdata;
if (!pdata)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-chipselect", &prop))
pdev->id = prop;
+ else
+ return ERR_PTR(-EINVAL);
+
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-mask-ale", &prop))
pdata->mask_ale = prop;
@@ -555,6 +558,8 @@ static struct davinci_nand_pdata
"ti,davinci-mask-chipsel", &prop))
pdata->mask_chipsel = prop;
if (!of_property_read_string(pdev->dev.of_node,
+ "nand-ecc-mode", &mode) ||
+ !of_property_read_string(pdev->dev.of_node,
"ti,davinci-ecc-mode", &mode)) {
if (!strncmp("none", mode, 4))
pdata->ecc_mode = NAND_ECC_NONE;
@@ -566,12 +571,16 @@ static struct davinci_nand_pdata
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-ecc-bits", &prop))
pdata->ecc_bits = prop;
- if (!of_property_read_u32(pdev->dev.of_node,
+
+ prop = of_get_nand_bus_width(pdev->dev.of_node);
+ if (0 < prop || !of_property_read_u32(pdev->dev.of_node,
"ti,davinci-nand-buswidth", &prop))
if (prop == 16)
pdata->options |= NAND_BUSWIDTH_16;
- if (of_find_property(pdev->dev.of_node,
- "ti,davinci-nand-use-bbt", &len))
+ if (of_property_read_bool(pdev->dev.of_node,
+ "nand-on-flash-bbt") ||
+ of_property_read_bool(pdev->dev.of_node,
+ "ti,davinci-nand-use-bbt"))
pdata->bbt_options = NAND_BBT_USE_FLASH;
}
@@ -585,7 +594,7 @@ static struct davinci_nand_pdata
}
#endif
-static int __init nand_davinci_probe(struct platform_device *pdev)
+static int nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata;
struct davinci_nand_info *info;
@@ -598,6 +607,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
nand_ecc_modes_t ecc_mode;
pdata = nand_davinci_get_pdata(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
/* insist on board-specific configuration */
if (!pdata)
return -ENODEV;
@@ -607,11 +619,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
return -ENODEV;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
- if (!info) {
- dev_err(&pdev->dev, "unable to allocate memory\n");
- ret = -ENOMEM;
- goto err_nomem;
- }
+ if (!info)
+ return -ENOMEM;
platform_set_drvdata(pdev, info);
@@ -619,19 +628,23 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res1 || !res2) {
dev_err(&pdev->dev, "resource missing\n");
- ret = -EINVAL;
- goto err_nomem;
+ return -EINVAL;
}
vaddr = devm_ioremap_resource(&pdev->dev, res1);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_ioremap;
- }
- base = devm_ioremap_resource(&pdev->dev, res2);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto err_ioremap;
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ /*
+ * This registers range is used to setup NAND settings. In case with
+ * TI AEMIF driver, the same memory address range is requested already
+ * by AEMIF, so we cannot request it twice, just ioremap.
+ * The AEMIF and NAND drivers not use the same registers in this range.
+ */
+ base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
+ return -EADDRNOTAVAIL;
}
info->dev = &pdev->dev;
@@ -699,7 +712,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
if (ret == -EBUSY)
- goto err_ecc;
+ return ret;
info->chip.ecc.calculate = nand_davinci_calculate_4bit;
info->chip.ecc.correct = nand_davinci_correct_4bit;
@@ -715,8 +728,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.ecc.strength = pdata->ecc_bits;
break;
default:
- ret = -EINVAL;
- goto err_ecc;
+ return -EINVAL;
}
info->chip.ecc.mode = ecc_mode;
@@ -724,7 +736,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
if (IS_ERR(info->clk)) {
ret = PTR_ERR(info->clk);
dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
- goto err_clk;
+ return ret;
}
ret = clk_prepare_enable(info->clk);
@@ -753,7 +765,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->core_chipsel);
if (ret < 0) {
dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
- goto err_timing;
+ goto err;
}
spin_lock_irq(&davinci_nand_lock);
@@ -769,7 +781,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
- goto err_scan;
+ goto err;
}
/* Update ECC layout if needed ... for 1-bit HW ECC, the default
@@ -783,7 +795,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
if (!chunks || info->mtd.oobsize < 16) {
dev_dbg(&pdev->dev, "too small\n");
ret = -EINVAL;
- goto err_scan;
+ goto err;
}
/* For small page chips, preserve the manufacturer's
@@ -814,7 +826,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "no 4-bit ECC support yet "
"for 4KiB-page NAND\n");
ret = -EIO;
- goto err_scan;
+ goto err;
syndrome_done:
info->chip.ecc.layout = &info->ecclayout;
@@ -822,7 +834,7 @@ syndrome_done:
ret = nand_scan_tail(&info->mtd);
if (ret < 0)
- goto err_scan;
+ goto err;
if (pdata->parts)
ret = mtd_device_parse_register(&info->mtd, NULL, NULL,
@@ -835,7 +847,7 @@ syndrome_done:
NULL, 0);
}
if (ret < 0)
- goto err_scan;
+ goto err;
val = davinci_nand_readl(info, NRCSR_OFFSET);
dev_info(&pdev->dev, "controller rev. %d.%d\n",
@@ -843,8 +855,7 @@ syndrome_done:
return 0;
-err_scan:
-err_timing:
+err:
clk_disable_unprepare(info->clk);
err_clk_enable:
@@ -852,15 +863,10 @@ err_clk_enable:
if (ecc_mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
-
-err_ecc:
-err_clk:
-err_ioremap:
-err_nomem:
return ret;
}
-static int __exit nand_davinci_remove(struct platform_device *pdev)
+static int nand_davinci_remove(struct platform_device *pdev)
{
struct davinci_nand_info *info = platform_get_drvdata(pdev);
@@ -877,7 +883,8 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
}
static struct platform_driver nand_davinci_driver = {
- .remove = __exit_p(nand_davinci_remove),
+ .probe = nand_davinci_probe,
+ .remove = nand_davinci_remove,
.driver = {
.name = "davinci_nand",
.owner = THIS_MODULE,
@@ -886,7 +893,7 @@ static struct platform_driver nand_davinci_driver = {
};
MODULE_ALIAS("platform:davinci_nand");
-module_platform_driver_probe(nand_davinci_driver, nand_davinci_probe);
+module_platform_driver(nand_davinci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 370b9dd7a278..c07cd573ad3a 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -125,7 +125,6 @@ static void reset_buf(struct denali_nand_info *denali)
static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
{
- BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
denali->buf.buf[denali->buf.tail++] = byte;
}
@@ -897,7 +896,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
/* this function examines buffers to see if they contain data that
* indicate that the buffer is part of an erased region of flash.
*/
-bool is_erased(uint8_t *buf, int len)
+static bool is_erased(uint8_t *buf, int len)
{
int i = 0;
for (i = 0; i < len; i++)
@@ -1429,20 +1428,12 @@ int denali_init(struct denali_nand_info *denali)
}
}
- /* Is 32-bit DMA supported? */
- ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
- if (ret) {
- pr_err("Spectra: no usable DMA configuration\n");
- return ret;
- }
- denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
- DENALI_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ /* allocate a temporary buffer for nand_scan_ident() */
+ denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (!denali->buf.buf)
+ return -ENOMEM;
- if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
- dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
- return -EIO;
- }
denali->mtd.dev.parent = denali->dev;
denali_hw_init(denali);
denali_drv_init(denali);
@@ -1475,12 +1466,29 @@ int denali_init(struct denali_nand_info *denali)
goto failed_req_irq;
}
- /* MTD supported page sizes vary by kernel. We validate our
- * kernel supports the device here.
- */
- if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
- ret = -ENODEV;
- pr_err("Spectra: device size not supported by this version of MTD.");
+ /* allocate the right size buffer now */
+ devm_kfree(denali->dev, denali->buf.buf);
+ denali->buf.buf = devm_kzalloc(denali->dev,
+ denali->mtd.writesize + denali->mtd.oobsize,
+ GFP_KERNEL);
+ if (!denali->buf.buf) {
+ ret = -ENOMEM;
+ goto failed_req_irq;
+ }
+
+ /* Is 32-bit DMA supported? */
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ pr_err("Spectra: no usable DMA configuration\n");
+ goto failed_req_irq;
+ }
+
+ denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
+ denali->mtd.writesize + denali->mtd.oobsize,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
+ dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+ ret = -EIO;
goto failed_req_irq;
}
@@ -1602,7 +1610,8 @@ EXPORT_SYMBOL(denali_init);
void denali_remove(struct denali_nand_info *denali)
{
denali_irq_cleanup(denali->irq, denali);
- dma_unmap_single(denali->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ dma_unmap_single(denali->dev, denali->buf.dma_buf,
+ denali->mtd.writesize + denali->mtd.oobsize,
DMA_BIDIRECTIONAL);
}
EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index cec5712862c9..966817462421 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -455,12 +455,10 @@
#define ECC_SECTOR_SIZE 512
-#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
-
struct nand_buf {
int head;
int tail;
- uint8_t buf[DENALI_BUF_SIZE];
+ uint8_t *buf;
dma_addr_t dma_buf;
};
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 92530244e2cb..babb02c4b220 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -108,7 +108,7 @@ static int denali_dt_probe(struct platform_device *ofdev)
denali->dev->dma_mask = NULL;
}
- dt->clk = clk_get(&ofdev->dev, NULL);
+ dt->clk = devm_clk_get(&ofdev->dev, NULL);
if (IS_ERR(dt->clk)) {
dev_err(&ofdev->dev, "no clk available\n");
return PTR_ERR(dt->clk);
@@ -124,7 +124,6 @@ static int denali_dt_probe(struct platform_device *ofdev)
out_disable_clk:
clk_disable_unprepare(dt->clk);
- clk_put(dt->clk);
return ret;
}
@@ -135,7 +134,6 @@ static int denali_dt_remove(struct platform_device *ofdev)
denali_remove(&dt->denali);
clk_disable(dt->clk);
- clk_put(dt->clk);
return 0;
}
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 033f177a6369..6e2f387b823f 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -21,7 +21,7 @@
#define DENALI_NAND_NAME "denali-nand-pci"
/* List of platforms this NAND controller has be integrated into */
-static DEFINE_PCI_DEVICE_TABLE(denali_pci_ids) = {
+static const struct pci_device_id denali_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
{ PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
{ /* end: all zeroes */ }
@@ -131,7 +131,6 @@ static struct pci_driver denali_pci_driver = {
static int denali_init_pci(void)
{
- pr_info("Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
return pci_register_driver(&denali_pci_driver);
}
module_init(denali_init_pci);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index b68a4959f700..fec31d71b84e 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1058,7 +1058,6 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
- printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
return 0;
}
if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
@@ -1166,7 +1165,6 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
- printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
return 0;
}
@@ -1440,10 +1438,13 @@ static int __init doc_probe(unsigned long physadr)
int reg, len, numchips;
int ret = 0;
+ if (!request_mem_region(physadr, DOC_IOREMAP_LEN, NULL))
+ return -EBUSY;
virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
if (!virtadr) {
printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
- return -EIO;
+ ret = -EIO;
+ goto error_ioremap;
}
/* It's not possible to cleanly detect the DiskOnChip - the
@@ -1561,7 +1562,6 @@ static int __init doc_probe(unsigned long physadr)
sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr));
mtd = kzalloc(len, GFP_KERNEL);
if (!mtd) {
- printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
ret = -ENOMEM;
goto fail;
}
@@ -1629,6 +1629,10 @@ static int __init doc_probe(unsigned long physadr)
WriteDOC(save_control, virtadr, DOCControl);
fail:
iounmap(virtadr);
+
+error_ioremap:
+ release_mem_region(physadr, DOC_IOREMAP_LEN);
+
return ret;
}
@@ -1645,6 +1649,7 @@ static void release_nanddoc(void)
nextmtd = doc->nextdoc;
nand_release(mtd);
iounmap(doc->virtadr);
+ release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
kfree(mtd);
}
}
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index c966fc7474ce..bcf60800c3ce 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -847,7 +847,6 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
if (!fsl_lbc_ctrl_dev->nand) {
elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
if (!elbc_fcm_ctrl) {
- dev_err(dev, "failed to allocate memory\n");
mutex_unlock(&fsl_elbc_nand_mutex);
ret = -ENOMEM;
goto err;
@@ -875,7 +874,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
goto err;
}
- priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 43355779cff5..50d9161c4faf 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -30,7 +30,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand_ecc.h>
-#include <asm/fsl_ifc.h>
+#include <linux/fsl_ifc.h>
#define FSL_IFC_V1_1_0 0x01010000
#define ERR_BYTE 0xFF /* Value returned for read
@@ -1060,7 +1060,6 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
if (!fsl_ifc_ctrl_dev->nand) {
ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
if (!ifc_nand_ctrl) {
- dev_err(&dev->dev, "failed to allocate memory\n");
mutex_unlock(&fsl_ifc_nand_mutex);
return -ENOMEM;
}
@@ -1101,7 +1100,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
IFC_NAND_EVTER_INTR_FTOERIR_EN |
IFC_NAND_EVTER_INTR_WPERIR_EN,
&ifc->ifc_nand.nand_evter_intr_en);
- priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 8b2752263db9..1550692973dc 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -889,10 +889,8 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
pdata->nand_timings = devm_kzalloc(&pdev->dev,
sizeof(*pdata->nand_timings), GFP_KERNEL);
- if (!pdata->nand_timings) {
- dev_err(&pdev->dev, "no memory for nand_timing\n");
+ if (!pdata->nand_timings)
return -ENOMEM;
- }
of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
sizeof(*pdata->nand_timings));
@@ -950,10 +948,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- dev_err(&pdev->dev, "failed to allocate device structure\n");
+ if (!host)
return -ENOMEM;
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
host->data_va = devm_ioremap_resource(&pdev->dev, res);
@@ -1108,8 +1104,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
host->ecc_place = &fsmc_ecc4_lp_place;
break;
default:
- printk(KERN_WARNING "No oob scheme defined for "
- "oobsize %d\n", mtd->oobsize);
+ dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
BUG();
}
} else {
@@ -1124,8 +1120,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
nand->ecc.layout = &fsmc_ecc1_128_layout;
break;
default:
- printk(KERN_WARNING "No oob scheme defined for "
- "oobsize %d\n", mtd->oobsize);
+ dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
BUG();
}
}
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index e826f898241f..8e6148aa4539 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -132,13 +132,17 @@ static int gpio_nand_get_config_of(const struct device *dev,
static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
{
- struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ struct resource *r;
u64 addr;
- if (!r || of_property_read_u64(pdev->dev.of_node,
+ if (of_property_read_u64(pdev->dev.of_node,
"gpio-control-nand,io-sync-reg", &addr))
return NULL;
+ r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
r->start = addr;
r->end = r->start + 0x3;
r->flags = IORESOURCE_MEM;
@@ -211,10 +215,8 @@ static int gpio_nand_probe(struct platform_device *pdev)
return -EINVAL;
gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
- if (!gpiomtd) {
- dev_err(&pdev->dev, "failed to create NAND MTD\n");
+ if (!gpiomtd)
return -ENOMEM;
- }
chip = &gpiomtd->nand_chip;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index aaced29727fb..dd1df605a1d6 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -20,6 +20,7 @@
*/
#include <linux/delay.h>
#include <linux/clk.h>
+#include <linux/slab.h>
#include "gpmi-nand.h"
#include "gpmi-regs.h"
@@ -207,30 +208,41 @@ void gpmi_dump_info(struct gpmi_nand_data *this)
u32 reg;
int i;
- pr_err("Show GPMI registers :\n");
+ dev_err(this->dev, "Show GPMI registers :\n");
for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
reg = readl(r->gpmi_regs + i * 0x10);
- pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
}
/* start to print out the BCH info */
- pr_err("Show BCH registers :\n");
+ dev_err(this->dev, "Show BCH registers :\n");
for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
reg = readl(r->bch_regs + i * 0x10);
- pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
}
- pr_err("BCH Geometry :\n");
- pr_err("GF length : %u\n", geo->gf_len);
- pr_err("ECC Strength : %u\n", geo->ecc_strength);
- pr_err("Page Size in Bytes : %u\n", geo->page_size);
- pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size);
- pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size);
- pr_err("ECC Chunk Count : %u\n", geo->ecc_chunk_count);
- pr_err("Payload Size in Bytes : %u\n", geo->payload_size);
- pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size);
- pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset);
- pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset);
- pr_err("Block Mark Bit Offset : %u\n", geo->block_mark_bit_offset);
+ dev_err(this->dev, "BCH Geometry :\n"
+ "GF length : %u\n"
+ "ECC Strength : %u\n"
+ "Page Size in Bytes : %u\n"
+ "Metadata Size in Bytes : %u\n"
+ "ECC Chunk Size in Bytes: %u\n"
+ "ECC Chunk Count : %u\n"
+ "Payload Size in Bytes : %u\n"
+ "Auxiliary Size in Bytes: %u\n"
+ "Auxiliary Status Offset: %u\n"
+ "Block Mark Byte Offset : %u\n"
+ "Block Mark Bit Offset : %u\n",
+ geo->gf_len,
+ geo->ecc_strength,
+ geo->page_size,
+ geo->metadata_size,
+ geo->ecc_chunk_size,
+ geo->ecc_chunk_count,
+ geo->payload_size,
+ geo->auxiliary_size,
+ geo->auxiliary_status_offset,
+ geo->block_mark_byte_offset,
+ geo->block_mark_bit_offset);
}
/* Configures the geometry for BCH. */
@@ -265,8 +277,8 @@ int bch_set_geometry(struct gpmi_nand_data *this)
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
* On the other hand, the MX28 needs the reset, because one case has been
* seen where the BCH produced ECC errors constantly after 10000
- * consecutive reboots. The latter case has not been seen on the MX23 yet,
- * still we don't know if it could happen there as well.
+ * consecutive reboots. The latter case has not been seen on the MX23
+ * yet, still we don't know if it could happen there as well.
*/
ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
if (ret)
@@ -353,7 +365,7 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
improved_timing_is_available =
(target.tREA_in_ns >= 0) &&
(target.tRLOH_in_ns >= 0) &&
- (target.tRHOH_in_ns >= 0) ;
+ (target.tRHOH_in_ns >= 0);
/* Inspect the clock. */
nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
@@ -911,10 +923,14 @@ static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
struct resources *r = &this->resources;
struct nand_chip *nand = &this->nand;
struct mtd_info *mtd = &this->mtd;
- uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
+ uint8_t *feature;
unsigned long rate;
int ret;
+ feature = kzalloc(ONFI_SUBFEATURE_PARAM_LEN, GFP_KERNEL);
+ if (!feature)
+ return -ENOMEM;
+
nand->select_chip(mtd, 0);
/* [1] send SET FEATURE commond to NAND */
@@ -942,11 +958,13 @@ static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
this->flags |= GPMI_ASYNC_EDO_ENABLED;
this->timing_mode = mode;
+ kfree(feature);
dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
return 0;
err_out:
nand->select_chip(mtd, -1);
+ kfree(feature);
dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
return -EINVAL;
}
@@ -986,7 +1004,7 @@ void gpmi_begin(struct gpmi_nand_data *this)
/* Enable the clock. */
ret = gpmi_enable_clk(this);
if (ret) {
- pr_err("We failed in enable the clk\n");
+ dev_err(this->dev, "We failed in enable the clk\n");
goto err_out;
}
@@ -1003,7 +1021,7 @@ void gpmi_begin(struct gpmi_nand_data *this)
/* [1] Set HW_GPMI_TIMING0 */
reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
- BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ;
+ BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles);
writel(reg, gpmi_regs + HW_GPMI_TIMING0);
@@ -1090,7 +1108,7 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
reg = readl(r->gpmi_regs + HW_GPMI_STAT);
} else
- pr_err("unknow arch.\n");
+ dev_err(this->dev, "unknow arch.\n");
return reg & mask;
}
@@ -1121,10 +1139,8 @@ int gpmi_send_command(struct gpmi_nand_data *this)
desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
- if (!desc) {
- pr_err("step 1 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [2] send out the COMMAND + ADDRESS string stored in @buffer */
sgl = &this->cmd_sgl;
@@ -1134,11 +1150,8 @@ int gpmi_send_command(struct gpmi_nand_data *this)
desc = dmaengine_prep_slave_sg(channel,
sgl, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
- if (!desc) {
- pr_err("step 2 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [3] submit the DMA */
set_dma_type(this, DMA_FOR_COMMAND);
@@ -1167,20 +1180,17 @@ int gpmi_send_data(struct gpmi_nand_data *this)
pio[1] = 0;
desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
- if (!desc) {
- pr_err("step 1 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [2] send DMA request */
prepare_data_dma(this, DMA_TO_DEVICE);
desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc) {
- pr_err("step 2 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
+
/* [3] submit the DMA */
set_dma_type(this, DMA_FOR_WRITE_DATA);
return start_dma_without_bch_irq(this, desc);
@@ -1204,20 +1214,16 @@ int gpmi_read_data(struct gpmi_nand_data *this)
desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
- if (!desc) {
- pr_err("step 1 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [2] : send DMA request */
prepare_data_dma(this, DMA_FROM_DEVICE);
desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc) {
- pr_err("step 2 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [3] : submit the DMA */
set_dma_type(this, DMA_FOR_READ_DATA);
@@ -1262,10 +1268,9 @@ int gpmi_send_page(struct gpmi_nand_data *this,
(struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE,
DMA_CTRL_ACK);
- if (!desc) {
- pr_err("step 2 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
+
set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
return start_dma_with_bch_irq(this, desc);
}
@@ -1297,10 +1302,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
desc = dmaengine_prep_slave_sg(channel,
(struct scatterlist *)pio, 2,
DMA_TRANS_NONE, 0);
- if (!desc) {
- pr_err("step 1 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [2] Enable the BCH block and read. */
command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
@@ -1327,10 +1330,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
(struct scatterlist *)pio,
ARRAY_SIZE(pio), DMA_TRANS_NONE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc) {
- pr_err("step 2 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [3] Disable the BCH block */
command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
@@ -1348,10 +1349,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
(struct scatterlist *)pio, 3,
DMA_TRANS_NONE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc) {
- pr_err("step 3 error\n");
- return -1;
- }
+ if (!desc)
+ return -EINVAL;
/* [4] submit the DMA */
set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index dabbc14db563..ca6369fe91ff 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -18,9 +18,6 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@@ -352,6 +349,9 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
int common_nfc_set_geometry(struct gpmi_nand_data *this)
{
+ if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")
+ && set_geometry_by_ecc_info(this))
+ return 0;
return legacy_set_geometry(this);
}
@@ -367,25 +367,28 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
struct scatterlist *sgl = &this->data_sgl;
int ret;
- this->direct_dma_map_ok = true;
-
/* first try to map the upper buffer directly */
- sg_init_one(sgl, this->upper_buf, this->upper_len);
- ret = dma_map_sg(this->dev, sgl, 1, dr);
- if (ret == 0) {
- /* We have to use our own DMA buffer. */
- sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE);
-
- if (dr == DMA_TO_DEVICE)
- memcpy(this->data_buffer_dma, this->upper_buf,
- this->upper_len);
-
+ if (virt_addr_valid(this->upper_buf) &&
+ !object_is_on_stack(this->upper_buf)) {
+ sg_init_one(sgl, this->upper_buf, this->upper_len);
ret = dma_map_sg(this->dev, sgl, 1, dr);
if (ret == 0)
- pr_err("DMA mapping failed.\n");
+ goto map_fail;
- this->direct_dma_map_ok = false;
+ this->direct_dma_map_ok = true;
+ return;
}
+
+map_fail:
+ /* We have to use our own DMA buffer. */
+ sg_init_one(sgl, this->data_buffer_dma, this->upper_len);
+
+ if (dr == DMA_TO_DEVICE)
+ memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len);
+
+ dma_map_sg(this->dev, sgl, 1, dr);
+
+ this->direct_dma_map_ok = false;
}
/* This will be called after the DMA operation is finished. */
@@ -416,7 +419,7 @@ static void dma_irq_callback(void *param)
break;
default:
- pr_err("in wrong DMA operation.\n");
+ dev_err(this->dev, "in wrong DMA operation.\n");
}
complete(dma_c);
@@ -438,7 +441,8 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
/* Wait for the interrupt from the DMA block. */
err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
if (!err) {
- pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type);
+ dev_err(this->dev, "DMA timeout, last DMA :%d\n",
+ this->last_dma_type);
gpmi_dump_info(this);
return -ETIMEDOUT;
}
@@ -467,7 +471,8 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
/* Wait for the interrupt from the BCH block. */
err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
if (!err) {
- pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type);
+ dev_err(this->dev, "BCH timeout, last DMA :%d\n",
+ this->last_dma_type);
gpmi_dump_info(this);
return -ETIMEDOUT;
}
@@ -483,70 +488,38 @@ static int acquire_register_block(struct gpmi_nand_data *this,
void __iomem *p;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
- if (!r) {
- pr_err("Can't get resource for %s\n", res_name);
- return -ENODEV;
- }
-
- p = ioremap(r->start, resource_size(r));
- if (!p) {
- pr_err("Can't remap %s\n", res_name);
- return -ENOMEM;
- }
+ p = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
res->gpmi_regs = p;
else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
res->bch_regs = p;
else
- pr_err("unknown resource name : %s\n", res_name);
+ dev_err(this->dev, "unknown resource name : %s\n", res_name);
return 0;
}
-static void release_register_block(struct gpmi_nand_data *this)
-{
- struct resources *res = &this->resources;
- if (res->gpmi_regs)
- iounmap(res->gpmi_regs);
- if (res->bch_regs)
- iounmap(res->bch_regs);
- res->gpmi_regs = NULL;
- res->bch_regs = NULL;
-}
-
static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
{
struct platform_device *pdev = this->pdev;
- struct resources *res = &this->resources;
const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
struct resource *r;
int err;
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
if (!r) {
- pr_err("Can't get resource for %s\n", res_name);
+ dev_err(this->dev, "Can't get resource for %s\n", res_name);
return -ENODEV;
}
- err = request_irq(r->start, irq_h, 0, res_name, this);
- if (err) {
- pr_err("Can't own %s\n", res_name);
- return err;
- }
-
- res->bch_low_interrupt = r->start;
- res->bch_high_interrupt = r->end;
- return 0;
-}
-
-static void release_bch_irq(struct gpmi_nand_data *this)
-{
- struct resources *res = &this->resources;
- int i = res->bch_low_interrupt;
+ err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
+ if (err)
+ dev_err(this->dev, "error requesting BCH IRQ\n");
- for (; i <= res->bch_high_interrupt; i++)
- free_irq(i, this);
+ return err;
}
static void release_dma_channels(struct gpmi_nand_data *this)
@@ -567,7 +540,7 @@ static int acquire_dma_channels(struct gpmi_nand_data *this)
/* request dma channel */
dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
if (!dma_chan) {
- pr_err("Failed to request DMA channel.\n");
+ dev_err(this->dev, "Failed to request DMA channel.\n");
goto acquire_err;
}
@@ -579,21 +552,6 @@ acquire_err:
return -EINVAL;
}
-static void gpmi_put_clks(struct gpmi_nand_data *this)
-{
- struct resources *r = &this->resources;
- struct clk *clk;
- int i;
-
- for (i = 0; i < GPMI_CLK_MAX; i++) {
- clk = r->clock[i];
- if (clk) {
- clk_put(clk);
- r->clock[i] = NULL;
- }
- }
-}
-
static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
"gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
};
@@ -606,7 +564,7 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
int err, i;
/* The main clock is stored in the first. */
- r->clock[0] = clk_get(this->dev, "gpmi_io");
+ r->clock[0] = devm_clk_get(this->dev, "gpmi_io");
if (IS_ERR(r->clock[0])) {
err = PTR_ERR(r->clock[0]);
goto err_clock;
@@ -622,7 +580,7 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
if (extra_clks[i - 1] == NULL)
break;
- clk = clk_get(this->dev, extra_clks[i - 1]);
+ clk = devm_clk_get(this->dev, extra_clks[i - 1]);
if (IS_ERR(clk)) {
err = PTR_ERR(clk);
goto err_clock;
@@ -644,7 +602,6 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
err_clock:
dev_dbg(this->dev, "failed in finding the clocks.\n");
- gpmi_put_clks(this);
return err;
}
@@ -666,7 +623,7 @@ static int acquire_resources(struct gpmi_nand_data *this)
ret = acquire_dma_channels(this);
if (ret)
- goto exit_dma_channels;
+ goto exit_regs;
ret = gpmi_get_clks(this);
if (ret)
@@ -675,18 +632,12 @@ static int acquire_resources(struct gpmi_nand_data *this)
exit_clock:
release_dma_channels(this);
-exit_dma_channels:
- release_bch_irq(this);
exit_regs:
- release_register_block(this);
return ret;
}
static void release_resources(struct gpmi_nand_data *this)
{
- gpmi_put_clks(this);
- release_register_block(this);
- release_bch_irq(this);
release_dma_channels(this);
}
@@ -732,8 +683,7 @@ static int read_page_prepare(struct gpmi_nand_data *this,
length, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dest_phys)) {
if (alt_size < length) {
- pr_err("%s, Alternate buffer is too small\n",
- __func__);
+ dev_err(dev, "Alternate buffer is too small\n");
return -ENOMEM;
}
goto map_failed;
@@ -783,8 +733,7 @@ static int send_page_prepare(struct gpmi_nand_data *this,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, source_phys)) {
if (alt_size < length) {
- pr_err("%s, Alternate buffer is too small\n",
- __func__);
+ dev_err(dev, "Alternate buffer is too small\n");
return -ENOMEM;
}
goto map_failed;
@@ -837,14 +786,23 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
struct device *dev = this->dev;
+ struct mtd_info *mtd = &this->mtd;
/* [1] Allocate a command buffer. PAGE_SIZE is enough. */
this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
if (this->cmd_buffer == NULL)
goto error_alloc;
- /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
- this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
+ /*
+ * [2] Allocate a read/write data buffer.
+ * The gpmi_alloc_dma_buffer can be called twice.
+ * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
+ * is called before the nand_scan_ident; and we allocate a buffer
+ * of the real NAND page size when the gpmi_alloc_dma_buffer is
+ * called after the nand_scan_ident.
+ */
+ this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
+ GFP_DMA | GFP_KERNEL);
if (this->data_buffer_dma == NULL)
goto error_alloc;
@@ -872,7 +830,6 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
error_alloc:
gpmi_free_dma_buffer(this);
- pr_err("Error allocating DMA buffers!\n");
return -ENOMEM;
}
@@ -904,7 +861,8 @@ static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
ret = gpmi_send_command(this);
if (ret)
- pr_err("Chip: %u, Error %d\n", this->current_chip, ret);
+ dev_err(this->dev, "Chip: %u, Error %d\n",
+ this->current_chip, ret);
this->command_length = 0;
}
@@ -935,7 +893,7 @@ static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
struct nand_chip *chip = mtd->priv;
struct gpmi_nand_data *this = chip->priv;
- pr_debug("len is %d\n", len);
+ dev_dbg(this->dev, "len is %d\n", len);
this->upper_buf = buf;
this->upper_len = len;
@@ -947,7 +905,7 @@ static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
struct nand_chip *chip = mtd->priv;
struct gpmi_nand_data *this = chip->priv;
- pr_debug("len is %d\n", len);
+ dev_dbg(this->dev, "len is %d\n", len);
this->upper_buf = (uint8_t *)buf;
this->upper_len = len;
@@ -1026,13 +984,13 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
unsigned int max_bitflips = 0;
int ret;
- pr_debug("page number is : %d\n", page);
+ dev_dbg(this->dev, "page number is : %d\n", page);
ret = read_page_prepare(this, buf, mtd->writesize,
this->payload_virt, this->payload_phys,
nfc_geo->payload_size,
&payload_virt, &payload_phys);
if (ret) {
- pr_err("Inadequate DMA buffer\n");
+ dev_err(this->dev, "Inadequate DMA buffer\n");
ret = -ENOMEM;
return ret;
}
@@ -1046,7 +1004,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
nfc_geo->payload_size,
payload_virt, payload_phys);
if (ret) {
- pr_err("Error in ECC-based read: %d\n", ret);
+ dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
return ret;
}
@@ -1102,7 +1060,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
dma_addr_t auxiliary_phys;
int ret;
- pr_debug("ecc write page.\n");
+ dev_dbg(this->dev, "ecc write page.\n");
if (this->swap_block_mark) {
/*
* If control arrives here, we're doing block mark swapping.
@@ -1132,7 +1090,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
nfc_geo->payload_size,
&payload_virt, &payload_phys);
if (ret) {
- pr_err("Inadequate payload DMA buffer\n");
+ dev_err(this->dev, "Inadequate payload DMA buffer\n");
return 0;
}
@@ -1142,7 +1100,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
nfc_geo->auxiliary_size,
&auxiliary_virt, &auxiliary_phys);
if (ret) {
- pr_err("Inadequate auxiliary DMA buffer\n");
+ dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
goto exit_auxiliary;
}
}
@@ -1150,7 +1108,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
/* Ask the NFC. */
ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
if (ret)
- pr_err("Error in ECC-based write: %d\n", ret);
+ dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
if (!this->swap_block_mark) {
send_page_end(this, chip->oob_poi, mtd->oobsize,
@@ -1240,7 +1198,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
{
struct gpmi_nand_data *this = chip->priv;
- pr_debug("page number is %d\n", page);
+ dev_dbg(this->dev, "page number is %d\n", page);
/* clear the OOB buffer */
memset(chip->oob_poi, ~0, mtd->oobsize);
@@ -1453,7 +1411,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Write the NCB fingerprint into the page buffer. */
memset(buffer, ~0, mtd->writesize);
- memset(chip->oob_poi, ~0, mtd->oobsize);
memcpy(buffer + 12, fingerprint, strlen(fingerprint));
/* Loop through the first search area, writing NCB fingerprints. */
@@ -1568,7 +1525,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
/* Set up the NFC geometry which is used by BCH. */
ret = bch_set_geometry(this);
if (ret) {
- pr_err("Error setting BCH geometry : %d\n", ret);
+ dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
return ret;
}
@@ -1576,20 +1533,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
return gpmi_alloc_dma_buffer(this);
}
-static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
-{
- /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
- if (GPMI_IS_MX23(this))
- this->swap_block_mark = false;
- else
- this->swap_block_mark = true;
-
- /* Set up the medium geometry */
- return gpmi_set_geometry(this);
-
-}
-
-static void gpmi_nfc_exit(struct gpmi_nand_data *this)
+static void gpmi_nand_exit(struct gpmi_nand_data *this)
{
nand_release(&this->mtd);
gpmi_free_dma_buffer(this);
@@ -1603,8 +1547,11 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
struct bch_geometry *bch_geo = &this->bch_geometry;
int ret;
- /* Prepare for the BBT scan. */
- ret = gpmi_pre_bbt_scan(this);
+ /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+ this->swap_block_mark = !GPMI_IS_MX23(this);
+
+ /* Set up the medium geometry */
+ ret = gpmi_set_geometry(this);
if (ret)
return ret;
@@ -1629,7 +1576,7 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
return 0;
}
-static int gpmi_nfc_init(struct gpmi_nand_data *this)
+static int gpmi_nand_init(struct gpmi_nand_data *this)
{
struct mtd_info *mtd = &this->mtd;
struct nand_chip *chip = &this->nand;
@@ -1693,7 +1640,7 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)
return 0;
err_out:
- gpmi_nfc_exit(this);
+ gpmi_nand_exit(this);
return ret;
}
@@ -1728,15 +1675,13 @@ static int gpmi_nand_probe(struct platform_device *pdev)
if (of_id) {
pdev->id_entry = of_id->data;
} else {
- pr_err("Failed to find the right device id.\n");
+ dev_err(&pdev->dev, "Failed to find the right device id.\n");
return -ENODEV;
}
this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
- if (!this) {
- pr_err("Failed to allocate per-device memory\n");
+ if (!this)
return -ENOMEM;
- }
platform_set_drvdata(pdev, this);
this->pdev = pdev;
@@ -1750,7 +1695,7 @@ static int gpmi_nand_probe(struct platform_device *pdev)
if (ret)
goto exit_nfc_init;
- ret = gpmi_nfc_init(this);
+ ret = gpmi_nand_init(this);
if (ret)
goto exit_nfc_init;
@@ -1770,7 +1715,7 @@ static int gpmi_nand_remove(struct platform_device *pdev)
{
struct gpmi_nand_data *this = platform_get_drvdata(pdev);
- gpmi_nfc_exit(this);
+ gpmi_nand_exit(this);
release_resources(this);
return 0;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index a7685e3a8748..4c801fa18725 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -26,8 +26,6 @@
struct resources {
void __iomem *gpmi_regs;
void __iomem *bch_regs;
- unsigned int bch_low_interrupt;
- unsigned int bch_high_interrupt;
unsigned int dma_low_channel;
unsigned int dma_high_channel;
struct clk *clock[GPMI_CLK_MAX];
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index a264b888c66c..a2c804de156b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -416,10 +416,8 @@ static int jz_nand_probe(struct platform_device *pdev)
uint8_t nand_maf_id = 0, nand_dev_id = 0;
nand = kzalloc(sizeof(*nand), GFP_KERNEL);
- if (!nand) {
- dev_err(&pdev->dev, "Failed to allocate device structure.\n");
+ if (!nand)
return -ENOMEM;
- }
ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);
if (ret)
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 327d96c03505..687478c9f09c 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -539,20 +539,6 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
return 0;
}
-static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int cached, int raw)
-{
- int res;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- lpc32xx_waitfunc(mtd, chip);
-
- return res;
-}
-
static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
@@ -627,10 +613,8 @@ static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
- if (!ncfg) {
- dev_err(dev, "could not allocate memory for platform data\n");
+ if (!ncfg)
return NULL;
- }
of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
@@ -666,10 +650,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- dev_err(&pdev->dev, "failed to allocate device structure.\n");
+ if (!host)
return -ENOMEM;
- }
rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->io_base = devm_ioremap_resource(&pdev->dev, rc);
@@ -732,9 +714,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
nand_chip->ecc.write_oob = lpc32xx_write_oob;
nand_chip->ecc.read_oob = lpc32xx_read_oob;
nand_chip->ecc.strength = 4;
- nand_chip->write_page = lpc32xx_write_page;
nand_chip->waitfunc = lpc32xx_waitfunc;
+ nand_chip->options = NAND_NO_SUBPAGE_WRITE;
nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
nand_chip->bbt_td = &lpc32xx_nand_bbt;
nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
@@ -764,14 +746,12 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
if (!host->dma_buf) {
- dev_err(&pdev->dev, "Error allocating dma_buf memory\n");
res = -ENOMEM;
goto err_exit3;
}
host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
if (!host->dummy_buf) {
- dev_err(&pdev->dev, "Error allocating dummy_buf memory\n");
res = -ENOMEM;
goto err_exit3;
}
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 23e6974ccd20..53a6742e3da3 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -725,10 +725,8 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
- if (!ncfg) {
- dev_err(dev, "could not allocate memory for NAND config\n");
+ if (!ncfg)
return NULL;
- }
of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
@@ -772,10 +770,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- dev_err(&pdev->dev, "failed to allocate device structure\n");
+ if (!host)
return -ENOMEM;
- }
host->io_base_dma = rc->start;
host->io_base = devm_ioremap_resource(&pdev->dev, rc);
@@ -791,8 +787,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
if (host->ncfg->wp_gpio == -EPROBE_DEFER)
return -EPROBE_DEFER;
- if (gpio_is_valid(host->ncfg->wp_gpio) &&
- gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
+ if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
+ host->ncfg->wp_gpio, "NAND WP")) {
dev_err(&pdev->dev, "GPIO not available\n");
return -EBUSY;
}
@@ -808,7 +804,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
/* Get NAND clock */
- host->clk = clk_get(&pdev->dev, NULL);
+ host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "Clock failure\n");
res = -ENOENT;
@@ -858,7 +854,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
GFP_KERNEL);
if (host->data_buf == NULL) {
- dev_err(&pdev->dev, "Error allocating memory\n");
res = -ENOMEM;
goto err_exit2;
}
@@ -927,10 +922,8 @@ err_exit3:
dma_release_channel(host->dma_chan);
err_exit2:
clk_disable(host->clk);
- clk_put(host->clk);
err_exit1:
lpc32xx_wp_enable(host);
- gpio_free(host->ncfg->wp_gpio);
return res;
}
@@ -953,9 +946,7 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
writel(tmp, SLC_CTRL(host->io_base));
clk_disable(host->clk);
- clk_put(host->clk);
lpc32xx_wp_enable(host);
- gpio_free(host->ncfg->wp_gpio);
return 0;
}
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 439bc3896418..31ee7cfbc12b 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -653,10 +653,8 @@ static int mpc5121_nfc_probe(struct platform_device *op)
}
prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
- if (!prv) {
- dev_err(dev, "Memory exhausted!\n");
+ if (!prv)
return -ENOMEM;
- }
mtd = &prv->mtd;
chip = &prv->chip;
@@ -731,7 +729,7 @@ static int mpc5121_nfc_probe(struct platform_device *op)
of_node_put(rootnode);
/* Enable NFC clock */
- clk = devm_clk_get(dev, "nfc_clk");
+ clk = devm_clk_get(dev, "ipg");
if (IS_ERR(clk)) {
dev_err(dev, "Unable to acquire NFC clock!\n");
retval = PTR_ERR(clk);
@@ -786,7 +784,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)
/* Detect NAND chips */
if (nand_scan(mtd, be32_to_cpup(chips_no))) {
dev_err(dev, "NAND Flash not found !\n");
- devm_free_irq(dev, prv->irq, mtd);
retval = -ENXIO;
goto error;
}
@@ -811,7 +808,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)
default:
dev_err(dev, "Unsupported NAND flash!\n");
- devm_free_irq(dev, prv->irq, mtd);
retval = -ENXIO;
goto error;
}
@@ -822,7 +818,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)
retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (retval) {
dev_err(dev, "Error adding MTD device!\n");
- devm_free_irq(dev, prv->irq, mtd);
goto error;
}
@@ -836,11 +831,8 @@ static int mpc5121_nfc_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
- struct nand_chip *chip = mtd->priv;
- struct mpc5121_nfc_prv *prv = chip->priv;
nand_release(mtd);
- devm_free_irq(dev, prv->irq, mtd);
mpc5121_nfc_free(dev, mtd);
return 0;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 9dfdb06c508b..e9a4835c4dd9 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -677,7 +677,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4;
} while (--no_subpages);
- mtd->ecc_stats.corrected += ret;
pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
return ret;
@@ -1400,12 +1399,15 @@ static int mxcnd_probe(struct platform_device *pdev)
int err = 0;
/* Allocate memory for MTD device structure and private data */
- host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host) +
- NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE, GFP_KERNEL);
+ host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host),
+ GFP_KERNEL);
if (!host)
return -ENOMEM;
- host->data_buf = (uint8_t *)(host + 1);
+ /* allocate a temporary buffer for the nand_scan_ident() */
+ host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL);
+ if (!host->data_buf)
+ return -ENOMEM;
host->dev = &pdev->dev;
/* structures must be linked */
@@ -1512,7 +1514,9 @@ static int mxcnd_probe(struct platform_device *pdev)
if (err)
return err;
- clk_prepare_enable(host->clk);
+ err = clk_prepare_enable(host->clk);
+ if (err)
+ return err;
host->clk_act = 1;
/*
@@ -1531,6 +1535,15 @@ static int mxcnd_probe(struct platform_device *pdev)
goto escan;
}
+ /* allocate the right size buffer now */
+ devm_kfree(&pdev->dev, (void *)host->data_buf);
+ host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!host->data_buf) {
+ err = -ENOMEM;
+ goto escan;
+ }
+
/* Call preset again, with correct writesize this time */
host->devtype_data->preset(mtd);
@@ -1576,6 +1589,8 @@ static int mxcnd_remove(struct platform_device *pdev)
struct mxc_nand_host *host = platform_get_drvdata(pdev);
nand_release(&host->mtd);
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
return 0;
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index bd39f7b67906..9715a7ba164a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -29,6 +29,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -202,6 +204,51 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)
}
/**
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0]
+ */
+static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ chip->write_buf(mtd, &byte, 1);
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
+ */
+static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint16_t word = byte;
+
+ /*
+ * It's not entirely clear what should happen to I/O[15:8] when writing
+ * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+ *
+ * When the host supports a 16-bit bus width, only data is
+ * transferred at the 16-bit width. All address and command line
+ * transfers shall use only the lower 8-bits of the data bus. During
+ * command transfers, the host may place any value on the upper
+ * 8-bits of the data bus. During address transfers, the host shall
+ * set the upper 8-bits of the data bus to 00h.
+ *
+ * One user of the write_byte callback is nand_onfi_set_features. The
+ * four parameters are specified to be written to I/O[7:0], but this is
+ * neither an address nor a command transfer. Let's assume a 0 on the
+ * upper I/O lines is OK.
+ */
+ chip->write_buf(mtd, (uint8_t *)&word, 2);
+}
+
+/**
* nand_write_buf - [DEFAULT] write buffer to chip
* @mtd: MTD device structure
* @buf: data buffer
@@ -1408,6 +1455,30 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
}
/**
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @mtd: MTD device structure
+ * @retry_mode: the retry mode to use
+ *
+ * Some vendors supply a special command to shift the Vt threshold, to be used
+ * when there are too many bitflips in a page (i.e., ECC error). After setting
+ * a new threshold, the host should retry reading the page.
+ */
+static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("setting READ RETRY mode %d\n", retry_mode);
+
+ if (retry_mode >= chip->read_retries)
+ return -EINVAL;
+
+ if (!chip->setup_read_retry)
+ return -EOPNOTSUPP;
+
+ return chip->setup_read_retry(mtd, retry_mode);
+}
+
+/**
* nand_do_read_ops - [INTERN] Read data with ECC
* @mtd: MTD device structure
* @from: offset to read from
@@ -1420,7 +1491,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
{
int chipnr, page, realpage, col, bytes, aligned, oob_required;
struct nand_chip *chip = mtd->priv;
- struct mtd_ecc_stats stats;
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
@@ -1429,8 +1499,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
uint8_t *bufpoi, *oob, *buf;
unsigned int max_bitflips = 0;
-
- stats = mtd->ecc_stats;
+ int retry_mode = 0;
+ bool ecc_fail = false;
chipnr = (int)(from >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -1445,6 +1515,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
oob_required = oob ? 1 : 0;
while (1) {
+ unsigned int ecc_failures = mtd->ecc_stats.failed;
+
bytes = min(mtd->writesize - col, readlen);
aligned = (bytes == mtd->writesize);
@@ -1452,6 +1524,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
if (realpage != chip->pagebuf || oob) {
bufpoi = aligned ? buf : chip->buffers->databuf;
+read_retry:
chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
/*
@@ -1481,7 +1554,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Transfer not aligned data */
if (!aligned) {
if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
- !(mtd->ecc_stats.failed - stats.failed) &&
+ !(mtd->ecc_stats.failed - ecc_failures) &&
(ops->mode != MTD_OPS_RAW)) {
chip->pagebuf = realpage;
chip->pagebuf_bitflips = ret;
@@ -1492,8 +1565,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
memcpy(buf, chip->buffers->databuf + col, bytes);
}
- buf += bytes;
-
if (unlikely(oob)) {
int toread = min(oobreadlen, max_oobsize);
@@ -1511,6 +1582,25 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
else
nand_wait_ready(mtd);
}
+
+ if (mtd->ecc_stats.failed - ecc_failures) {
+ if (retry_mode + 1 < chip->read_retries) {
+ retry_mode++;
+ ret = nand_setup_read_retry(mtd,
+ retry_mode);
+ if (ret < 0)
+ break;
+
+ /* Reset failures; retry */
+ mtd->ecc_stats.failed = ecc_failures;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_fail = true;
+ }
+ }
+
+ buf += bytes;
} else {
memcpy(buf, chip->buffers->databuf + col, bytes);
buf += bytes;
@@ -1520,6 +1610,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
readlen -= bytes;
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ ret = nand_setup_read_retry(mtd, 0);
+ if (ret < 0)
+ break;
+ retry_mode = 0;
+ }
+
if (!readlen)
break;
@@ -1545,7 +1643,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
if (ret < 0)
return ret;
- if (mtd->ecc_stats.failed - stats.failed)
+ if (ecc_fail)
return -EBADMSG;
return max_bitflips;
@@ -2716,6 +2814,7 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
int status;
+ int i;
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
@@ -2723,7 +2822,9 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
return -EINVAL;
chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
- chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, subfeature_param[i]);
+
status = chip->waitfunc(mtd, chip);
if (status & NAND_STATUS_FAIL)
return -EIO;
@@ -2740,6 +2841,8 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
+ int i;
+
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
@@ -2749,7 +2852,8 @@ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
- chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ *subfeature_param++ = chip->read_byte(mtd);
return 0;
}
@@ -2812,6 +2916,8 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
chip->block_markbad = nand_default_block_markbad;
if (!chip->write_buf || chip->write_buf == nand_write_buf)
chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
+ if (!chip->write_byte || chip->write_byte == nand_write_byte)
+ chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
if (!chip->read_buf || chip->read_buf == nand_read_buf)
chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
if (!chip->scan_bbt)
@@ -2926,6 +3032,30 @@ ext_out:
return ret;
}
+static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
+
+ return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
+ feature);
+}
+
+/*
+ * Configure chip properties from Micron vendor-specific ONFI table
+ */
+static void nand_onfi_detect_micron(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
+
+ if (le16_to_cpu(p->vendor_revision) < 1)
+ return;
+
+ chip->read_retries = micron->read_retry_options;
+ chip->setup_read_retry = nand_setup_read_retry_micron;
+}
+
/*
* Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
*/
@@ -2979,7 +3109,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
chip->onfi_version = 10;
if (!chip->onfi_version) {
- pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
+ pr_info("unsupported ONFI version: %d\n", val);
return 0;
}
@@ -3032,6 +3162,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
pr_warn("Could not retrieve ONFI ECC requirements\n");
}
+ if (p->jedec_id == NAND_MFR_MICRON)
+ nand_onfi_detect_micron(chip, p);
+
return 1;
}
@@ -3152,9 +3285,12 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize = 512;
break;
case 6:
- default: /* Other cases are "reserved" (unknown) */
mtd->oobsize = 640;
break;
+ case 7:
+ default: /* Other cases are "reserved" (unknown) */
+ mtd->oobsize = 1024;
+ break;
}
extid >>= 2;
/* Calc blocksize */
@@ -3325,6 +3461,9 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
*busw = type->options & NAND_BUSWIDTH_16;
+ if (!mtd->name)
+ mtd->name = type->name;
+
return true;
}
return false;
@@ -3372,8 +3511,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
id_data[i] = chip->read_byte(mtd);
if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
- pr_info("%s: second ID read did not match "
- "%02x,%02x against %02x,%02x\n", __func__,
+ pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
*maf_id, *dev_id, id_data[0], id_data[1]);
return ERR_PTR(-ENODEV);
}
@@ -3440,10 +3578,10 @@ ident_done:
* Check, if buswidth is correct. Hardware drivers should set
* chip correct!
*/
- pr_info("NAND device: Manufacturer ID:"
- " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
- *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
- pr_warn("NAND bus width %d instead %d bit\n",
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ *maf_id, *dev_id);
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
+ pr_warn("bus width %d instead %d bit\n",
(chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
busw ? 16 : 8);
return ERR_PTR(-EINVAL);
@@ -3472,14 +3610,13 @@ ident_done:
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->cmdfunc = nand_command_lp;
- pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)\n",
- *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ *maf_id, *dev_id);
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
chip->onfi_version ? chip->onfi_params.model : type->name);
-
- pr_info("NAND device: %dMiB, %s, page size: %d, OOB size: %d\n",
+ pr_info("%dMiB, %s, page size: %d, OOB size: %d\n",
(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
mtd->writesize, mtd->oobsize);
-
return type;
}
@@ -3535,7 +3672,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
chip->select_chip(mtd, -1);
}
if (i > 1)
- pr_info("%d NAND chips detected\n", i);
+ pr_info("%d chips detected\n", i);
/* Store the number of chips and calc total size for mtd */
chip->numchips = i;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index a87b0a3afa35..daa2faacd7d0 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -169,6 +169,8 @@ struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_AMD, "AMD/Spansion"},
{NAND_MFR_MACRONIX, "Macronix"},
{NAND_MFR_EON, "Eon"},
+ {NAND_MFR_SANDISK, "SanDisk"},
+ {NAND_MFR_INTEL, "Intel"},
{0x0, "Unknown"}
};
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 52115151e4a7..9ee09a8177c6 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -241,12 +241,10 @@ static int nuc900_nand_probe(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
- int retval;
struct resource *res;
- retval = 0;
-
- nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
+ nuc900_nand = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_nand),
+ GFP_KERNEL);
if (!nuc900_nand)
return -ENOMEM;
chip = &(nuc900_nand->chip);
@@ -255,11 +253,9 @@ static int nuc900_nand_probe(struct platform_device *pdev)
nuc900_nand->mtd.owner = THIS_MODULE;
spin_lock_init(&nuc900_nand->lock);
- nuc900_nand->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(nuc900_nand->clk)) {
- retval = -ENOENT;
- goto fail1;
- }
+ nuc900_nand->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nuc900_nand->clk))
+ return -ENOENT;
clk_enable(nuc900_nand->clk);
chip->cmdfunc = nuc900_nand_command_lp;
@@ -272,57 +268,29 @@ static int nuc900_nand_probe(struct platform_device *pdev)
chip->ecc.mode = NAND_ECC_SOFT;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- retval = -ENXIO;
- goto fail1;
- }
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- retval = -EBUSY;
- goto fail1;
- }
-
- nuc900_nand->reg = ioremap(res->start, resource_size(res));
- if (!nuc900_nand->reg) {
- retval = -ENOMEM;
- goto fail2;
- }
+ nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nuc900_nand->reg))
+ return PTR_ERR(nuc900_nand->reg);
nuc900_nand_enable(nuc900_nand);
- if (nand_scan(&(nuc900_nand->mtd), 1)) {
- retval = -ENXIO;
- goto fail3;
- }
+ if (nand_scan(&(nuc900_nand->mtd), 1))
+ return -ENXIO;
mtd_device_register(&(nuc900_nand->mtd), partitions,
ARRAY_SIZE(partitions));
platform_set_drvdata(pdev, nuc900_nand);
- return retval;
-
-fail3: iounmap(nuc900_nand->reg);
-fail2: release_mem_region(res->start, resource_size(res));
-fail1: kfree(nuc900_nand);
- return retval;
+ return 0;
}
static int nuc900_nand_remove(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
- struct resource *res;
nand_release(&nuc900_nand->mtd);
- iounmap(nuc900_nand->reg);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
clk_disable(nuc900_nand->clk);
- clk_put(nuc900_nand->clk);
-
- kfree(nuc900_nand);
return 0;
}
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index f77725009907..bf642ceef681 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1633,6 +1633,7 @@ static int omap_nand_probe(struct platform_device *pdev)
int i;
dma_cap_mask_t mask;
unsigned sig;
+ unsigned oob_index;
struct resource *res;
struct mtd_part_parser_data ppdata = {};
@@ -1730,13 +1731,7 @@ static int omap_nand_probe(struct platform_device *pdev)
break;
case NAND_OMAP_POLLED:
- if (nand_chip->options & NAND_BUSWIDTH_16) {
- nand_chip->read_buf = omap_read_buf16;
- nand_chip->write_buf = omap_write_buf16;
- } else {
- nand_chip->read_buf = omap_read_buf8;
- nand_chip->write_buf = omap_write_buf8;
- }
+ /* Use nand_base defaults for {read,write}_buf */
break;
case NAND_OMAP_PREFETCH_DMA:
@@ -1832,11 +1827,14 @@ static int omap_nand_probe(struct platform_device *pdev)
(mtd->writesize /
nand_chip->ecc.size);
if (nand_chip->options & NAND_BUSWIDTH_16)
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ oob_index = BADBLOCK_MARKER_LENGTH;
else
- ecclayout->eccpos[0] = 1;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = 1;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* no reserved-marker in ecclayout for this ecc-scheme */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
@@ -1853,9 +1851,15 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
+ }
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
nand_chip->ecc.priv = nand_bch_init(mtd,
nand_chip->ecc.size,
@@ -1889,9 +1893,12 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* This ECC scheme requires ELM H/W block */
if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) {
pr_err("nand: error: could not initialize ELM\n");
@@ -1919,9 +1926,15 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
+ }
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
nand_chip->ecc.priv = nand_bch_init(mtd,
nand_chip->ecc.size,
@@ -1962,9 +1975,12 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
break;
#else
pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
@@ -1978,11 +1994,8 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- /* populate remaining ECC layout data */
- ecclayout->oobfree->length = mtd->oobsize - (BADBLOCK_MARKER_LENGTH +
- ecclayout->eccbytes);
- for (i = 1; i < ecclayout->eccbytes; i++)
- ecclayout->eccpos[i] = ecclayout->eccpos[0] + i;
+ /* all OOB bytes from oobfree->offset till end off OOB are free */
+ ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
/* check if NAND device's OOB is enough to store ECC signatures */
if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
pr_err("not enough OOB bytes required = %d, available=%d\n",
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index a393a5b6ce1e..dd7fe817eafb 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -87,7 +87,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!nc) {
- printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");
ret = -ENOMEM;
goto no_res;
}
@@ -101,7 +100,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
io_base = ioremap(res->start, resource_size(res));
if (!io_base) {
- printk(KERN_ERR "orion_nand: ioremap failed\n");
+ dev_err(&pdev->dev, "ioremap failed\n");
ret = -EIO;
goto no_res;
}
@@ -110,7 +109,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
GFP_KERNEL);
if (!board) {
- printk(KERN_ERR "orion_nand: failed to allocate board structure.\n");
ret = -ENOMEM;
goto no_res;
}
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 4d174366a0f0..90f871acb0ef 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -223,7 +223,7 @@ MODULE_DEVICE_TABLE(of, pasemi_nand_match);
static struct platform_driver pasemi_nand_driver =
{
.driver = {
- .name = (char*)driver_name,
+ .name = driver_name,
.owner = THIS_MODULE,
.of_match_table = pasemi_nand_match,
},
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index cad4cdc9df39..0b068a5c0bff 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -47,30 +48,16 @@ static int plat_nand_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
/* Allocate memory for the device structure (and zero it) */
- data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
- if (!data) {
- dev_err(&pdev->dev, "failed to allocate device structure.\n");
+ data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data),
+ GFP_KERNEL);
+ if (!data)
return -ENOMEM;
- }
-
- if (!request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev))) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- err = -EBUSY;
- goto out_free;
- }
- data->io_base = ioremap(res->start, resource_size(res));
- if (data->io_base == NULL) {
- dev_err(&pdev->dev, "ioremap failed\n");
- err = -EIO;
- goto out_release_io;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->io_base))
+ return PTR_ERR(data->io_base);
data->chip.priv = &data;
data->mtd.priv = &data->chip;
@@ -122,11 +109,6 @@ static int plat_nand_probe(struct platform_device *pdev)
out:
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
- iounmap(data->io_base);
-out_release_io:
- release_mem_region(res->start, resource_size(res));
-out_free:
- kfree(data);
return err;
}
@@ -137,16 +119,10 @@ static int plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_release(&data->mtd);
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
- iounmap(data->io_base);
- release_mem_region(res->start, resource_size(res));
- kfree(data);
return 0;
}
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4b3aaa898a8b..2a7a0b27ac38 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -7,6 +7,8 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ *
+ * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
*/
#include <linux/kernel.h>
@@ -24,6 +26,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_mtd.h>
#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
#define ARCH_HAS_DMA
@@ -35,6 +38,7 @@
#include <linux/platform_data/mtd-nand-pxa3xx.h>
+#define NAND_DEV_READY_TIMEOUT 50
#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
#define NAND_STOP_DELAY (2 * HZ/50)
#define PAGE_CHUNK_SIZE (2048)
@@ -54,6 +58,7 @@
#define NDPCR (0x18) /* Page Count Register */
#define NDBDR0 (0x1C) /* Bad Block Register 0 */
#define NDBDR1 (0x20) /* Bad Block Register 1 */
+#define NDECCCTRL (0x28) /* ECC control */
#define NDDB (0x40) /* Data Buffer */
#define NDCB0 (0x48) /* Command Buffer0 */
#define NDCB1 (0x4C) /* Command Buffer1 */
@@ -80,6 +85,9 @@
#define NDCR_INT_MASK (0xFFF)
#define NDSR_MASK (0xfff)
+#define NDSR_ERR_CNT_OFF (16)
+#define NDSR_ERR_CNT_MASK (0x1f)
+#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
#define NDSR_RDY (0x1 << 12)
#define NDSR_FLASH_RDY (0x1 << 11)
#define NDSR_CS0_PAGED (0x1 << 10)
@@ -88,8 +96,8 @@
#define NDSR_CS1_CMDD (0x1 << 7)
#define NDSR_CS0_BBD (0x1 << 6)
#define NDSR_CS1_BBD (0x1 << 5)
-#define NDSR_DBERR (0x1 << 4)
-#define NDSR_SBERR (0x1 << 3)
+#define NDSR_UNCORERR (0x1 << 4)
+#define NDSR_CORERR (0x1 << 3)
#define NDSR_WRDREQ (0x1 << 2)
#define NDSR_RDDREQ (0x1 << 1)
#define NDSR_WRCMDREQ (0x1)
@@ -98,6 +106,8 @@
#define NDCB0_ST_ROW_EN (0x1 << 26)
#define NDCB0_AUTO_RS (0x1 << 25)
#define NDCB0_CSEL (0x1 << 24)
+#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
+#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
#define NDCB0_NC (0x1 << 20)
@@ -108,6 +118,14 @@
#define NDCB0_CMD1_MASK (0xff)
#define NDCB0_ADDR_CYC_SHIFT (16)
+#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
+#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
+#define EXT_CMD_TYPE_READ 4 /* Read */
+#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
+#define EXT_CMD_TYPE_FINAL 3 /* Final command */
+#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
+#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
+
/* macros for registers read/write */
#define nand_writel(info, off, val) \
__raw_writel((val), (info)->mmio_base + (off))
@@ -120,9 +138,9 @@ enum {
ERR_NONE = 0,
ERR_DMABUSERR = -1,
ERR_SENDCMD = -2,
- ERR_DBERR = -3,
+ ERR_UNCORERR = -3,
ERR_BBERR = -4,
- ERR_SBERR = -5,
+ ERR_CORERR = -5,
};
enum {
@@ -149,7 +167,6 @@ struct pxa3xx_nand_host {
void *info_data;
/* page size of attached chip */
- unsigned int page_size;
int use_ecc;
int cs;
@@ -167,11 +184,13 @@ struct pxa3xx_nand_info {
struct clk *clk;
void __iomem *mmio_base;
unsigned long mmio_phys;
- struct completion cmd_complete;
+ struct completion cmd_complete, dev_ready;
unsigned int buf_start;
unsigned int buf_count;
unsigned int buf_size;
+ unsigned int data_buff_pos;
+ unsigned int oob_buff_pos;
/* DMA information */
int drcmr_dat;
@@ -195,13 +214,18 @@ struct pxa3xx_nand_info {
int cs;
int use_ecc; /* use HW ECC ? */
+ int ecc_bch; /* using BCH ECC? */
int use_dma; /* use DMA ? */
int use_spare; /* use spare ? */
- int is_ready;
+ int need_wait;
- unsigned int page_size; /* page size of attached chip */
- unsigned int data_size; /* data size in FIFO */
+ unsigned int data_size; /* data to be read from FIFO */
+ unsigned int chunk_size; /* split commands chunk size */
unsigned int oob_size;
+ unsigned int spare_size;
+ unsigned int ecc_size;
+ unsigned int ecc_err_cnt;
+ unsigned int max_bitflips;
int retcode;
/* cached register value */
@@ -239,6 +263,64 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
};
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+
+static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
+ .eccbytes = 32,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = { {2, 30} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
+ .eccbytes = 64,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127},
+ /* Bootrom looks in bytes 0 & 5 for bad blocks */
+ .oobfree = { {6, 26}, { 64, 32} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
+ .eccbytes = 128,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = { }
+};
+
/* Define a default flash type setting serve as flash detecting only */
#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
@@ -256,6 +338,29 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
/* convert nano-seconds to nand flash controller clock cycles */
#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
+static struct of_device_id pxa3xx_nand_dt_ids[] = {
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_PXA,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
+
+static enum pxa3xx_nand_variant
+pxa3xx_nand_get_variant(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+ if (!of_id)
+ return PXA3XX_NAND_VARIANT_PXA;
+ return (enum pxa3xx_nand_variant)of_id->data;
+}
+
static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
const struct pxa3xx_nand_timing *t)
{
@@ -280,25 +385,23 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
nand_writel(info, NDTR1CS0, ndtr1);
}
-static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
+/*
+ * Set the data and OOB size, depending on the selected
+ * spare and ECC configuration.
+ * Only applicable to READ0, READOOB and PAGEPROG commands.
+ */
+static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
+ struct mtd_info *mtd)
{
- struct pxa3xx_nand_host *host = info->host[info->cs];
int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
- info->data_size = host->page_size;
- if (!oob_enable) {
- info->oob_size = 0;
+ info->data_size = mtd->writesize;
+ if (!oob_enable)
return;
- }
- switch (host->page_size) {
- case 2048:
- info->oob_size = (info->use_ecc) ? 40 : 64;
- break;
- case 512:
- info->oob_size = (info->use_ecc) ? 8 : 16;
- break;
- }
+ info->oob_size = info->spare_size;
+ if (!info->use_ecc)
+ info->oob_size += info->ecc_size;
}
/**
@@ -313,10 +416,15 @@ static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
ndcr = info->reg_ndcr;
- if (info->use_ecc)
+ if (info->use_ecc) {
ndcr |= NDCR_ECC_EN;
- else
+ if (info->ecc_bch)
+ nand_writel(info, NDECCCTRL, 0x1);
+ } else {
ndcr &= ~NDCR_ECC_EN;
+ if (info->ecc_bch)
+ nand_writel(info, NDECCCTRL, 0x0);
+ }
if (info->use_dma)
ndcr |= NDCR_DMA_EN;
@@ -375,26 +483,39 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
static void handle_data_pio(struct pxa3xx_nand_info *info)
{
+ unsigned int do_bytes = min(info->data_size, info->chunk_size);
+
switch (info->state) {
case STATE_PIO_WRITING:
- __raw_writesl(info->mmio_base + NDDB, info->data_buff,
- DIV_ROUND_UP(info->data_size, 4));
+ __raw_writesl(info->mmio_base + NDDB,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(do_bytes, 4));
+
if (info->oob_size > 0)
- __raw_writesl(info->mmio_base + NDDB, info->oob_buff,
- DIV_ROUND_UP(info->oob_size, 4));
+ __raw_writesl(info->mmio_base + NDDB,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->oob_size, 4));
break;
case STATE_PIO_READING:
- __raw_readsl(info->mmio_base + NDDB, info->data_buff,
- DIV_ROUND_UP(info->data_size, 4));
+ __raw_readsl(info->mmio_base + NDDB,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(do_bytes, 4));
+
if (info->oob_size > 0)
- __raw_readsl(info->mmio_base + NDDB, info->oob_buff,
- DIV_ROUND_UP(info->oob_size, 4));
+ __raw_readsl(info->mmio_base + NDDB,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->oob_size, 4));
break;
default:
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
info->state);
BUG();
}
+
+ /* Update buffer pointers for multi-page read/write */
+ info->data_buff_pos += do_bytes;
+ info->oob_buff_pos += info->oob_size;
+ info->data_size -= do_bytes;
}
#ifdef ARCH_HAS_DMA
@@ -452,7 +573,7 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
{
struct pxa3xx_nand_info *info = devid;
- unsigned int status, is_completed = 0;
+ unsigned int status, is_completed = 0, is_ready = 0;
unsigned int ready, cmd_done;
if (info->cs == 0) {
@@ -465,10 +586,25 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
status = nand_readl(info, NDSR);
- if (status & NDSR_DBERR)
- info->retcode = ERR_DBERR;
- if (status & NDSR_SBERR)
- info->retcode = ERR_SBERR;
+ if (status & NDSR_UNCORERR)
+ info->retcode = ERR_UNCORERR;
+ if (status & NDSR_CORERR) {
+ info->retcode = ERR_CORERR;
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+ info->ecc_bch)
+ info->ecc_err_cnt = NDSR_ERR_CNT(status);
+ else
+ info->ecc_err_cnt = 1;
+
+ /*
+ * Each chunk composing a page is corrected independently,
+ * and we need to store maximum number of corrected bitflips
+ * to return it to the MTD layer in ecc.read_page().
+ */
+ info->max_bitflips = max_t(unsigned int,
+ info->max_bitflips,
+ info->ecc_err_cnt);
+ }
if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
/* whether use dma to transfer data */
if (info->use_dma) {
@@ -488,8 +624,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
is_completed = 1;
}
if (status & ready) {
- info->is_ready = 1;
info->state = STATE_READY;
+ is_ready = 1;
}
if (status & NDSR_WRCMDREQ) {
@@ -518,6 +654,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDSR, status);
if (is_completed)
complete(&info->cmd_complete);
+ if (is_ready)
+ complete(&info->dev_ready);
NORMAL_IRQ_EXIT:
return IRQ_HANDLED;
}
@@ -530,51 +668,94 @@ static inline int is_buf_blank(uint8_t *buf, size_t len)
return 1;
}
-static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
- uint16_t column, int page_addr)
+static void set_command_address(struct pxa3xx_nand_info *info,
+ unsigned int page_size, uint16_t column, int page_addr)
{
- int addr_cycle, exec_cmd;
- struct pxa3xx_nand_host *host;
- struct mtd_info *mtd;
+ /* small page addr setting */
+ if (page_size < PAGE_CHUNK_SIZE) {
+ info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
+ | (column & 0xFF);
- host = info->host[info->cs];
- mtd = host->mtd;
- addr_cycle = 0;
- exec_cmd = 1;
+ info->ndcb2 = 0;
+ } else {
+ info->ndcb1 = ((page_addr & 0xFFFF) << 16)
+ | (column & 0xFFFF);
+
+ if (page_addr & 0xFF0000)
+ info->ndcb2 = (page_addr & 0xFF0000) >> 16;
+ else
+ info->ndcb2 = 0;
+ }
+}
+
+static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
+{
+ struct pxa3xx_nand_host *host = info->host[info->cs];
+ struct mtd_info *mtd = host->mtd;
/* reset data and oob column point to handle data */
info->buf_start = 0;
info->buf_count = 0;
info->oob_size = 0;
+ info->data_buff_pos = 0;
+ info->oob_buff_pos = 0;
info->use_ecc = 0;
info->use_spare = 1;
- info->is_ready = 0;
info->retcode = ERR_NONE;
- if (info->cs != 0)
- info->ndcb0 = NDCB0_CSEL;
- else
- info->ndcb0 = 0;
+ info->ecc_err_cnt = 0;
+ info->ndcb3 = 0;
+ info->need_wait = 0;
switch (command) {
case NAND_CMD_READ0:
case NAND_CMD_PAGEPROG:
info->use_ecc = 1;
case NAND_CMD_READOOB:
- pxa3xx_set_datasize(info);
+ pxa3xx_set_datasize(info, mtd);
break;
case NAND_CMD_PARAM:
info->use_spare = 0;
break;
- case NAND_CMD_SEQIN:
- exec_cmd = 0;
- break;
default:
info->ndcb1 = 0;
info->ndcb2 = 0;
- info->ndcb3 = 0;
break;
}
+ /*
+ * If we are about to issue a read command, or about to set
+ * the write address, then clean the data buffer.
+ */
+ if (command == NAND_CMD_READ0 ||
+ command == NAND_CMD_READOOB ||
+ command == NAND_CMD_SEQIN) {
+
+ info->buf_count = mtd->writesize + mtd->oobsize;
+ memset(info->data_buff, 0xFF, info->buf_count);
+ }
+
+}
+
+static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
+ int ext_cmd_type, uint16_t column, int page_addr)
+{
+ int addr_cycle, exec_cmd;
+ struct pxa3xx_nand_host *host;
+ struct mtd_info *mtd;
+
+ host = info->host[info->cs];
+ mtd = host->mtd;
+ addr_cycle = 0;
+ exec_cmd = 1;
+
+ if (info->cs != 0)
+ info->ndcb0 = NDCB0_CSEL;
+ else
+ info->ndcb0 = 0;
+
+ if (command == NAND_CMD_SEQIN)
+ exec_cmd = 0;
+
addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
+ host->col_addr_cycles);
@@ -589,30 +770,42 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
if (command == NAND_CMD_READOOB)
info->buf_start += mtd->writesize;
- /* Second command setting for large pages */
- if (host->page_size >= PAGE_CHUNK_SIZE)
+ /*
+ * Multiple page read needs an 'extended command type' field,
+ * which is either naked-read or last-read according to the
+ * state.
+ */
+ if (mtd->writesize == PAGE_CHUNK_SIZE) {
info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
+ } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+ info->ndcb3 = info->chunk_size +
+ info->oob_size;
+ }
+
+ set_command_address(info, mtd->writesize, column, page_addr);
+ break;
case NAND_CMD_SEQIN:
- /* small page addr setting */
- if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) {
- info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
- | (column & 0xFF);
- info->ndcb2 = 0;
- } else {
- info->ndcb1 = ((page_addr & 0xFFFF) << 16)
- | (column & 0xFFFF);
+ info->buf_start = column;
+ set_command_address(info, mtd->writesize, 0, page_addr);
- if (page_addr & 0xFF0000)
- info->ndcb2 = (page_addr & 0xFF0000) >> 16;
- else
- info->ndcb2 = 0;
+ /*
+ * Multiple page programming needs to execute the initial
+ * SEQIN command that sets the page address.
+ */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | addr_cycle
+ | command;
+ /* No data transfer in this case */
+ info->data_size = 0;
+ exec_cmd = 1;
}
-
- info->buf_count = mtd->writesize + mtd->oobsize;
- memset(info->data_buff, 0xFF, info->buf_count);
-
break;
case NAND_CMD_PAGEPROG:
@@ -622,13 +815,40 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
break;
}
- info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
- | NDCB0_AUTO_RS
- | NDCB0_ST_ROW_EN
- | NDCB0_DBC
- | (NAND_CMD_PAGEPROG << 8)
- | NAND_CMD_SEQIN
- | addr_cycle;
+ /* Second command setting for large pages */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ /*
+ * Multiple page write uses the 'extended command'
+ * field. This can be used to issue a command dispatch
+ * or a naked-write depending on the current stage.
+ */
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+ info->ndcb3 = info->chunk_size +
+ info->oob_size;
+
+ /*
+ * This is the command dispatch that completes a chunked
+ * page program operation.
+ */
+ if (info->data_size == 0) {
+ info->ndcb0 = NDCB0_CMD_TYPE(0x1)
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | command;
+ info->ndcb1 = 0;
+ info->ndcb2 = 0;
+ info->ndcb3 = 0;
+ }
+ } else {
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_AUTO_RS
+ | NDCB0_ST_ROW_EN
+ | NDCB0_DBC
+ | (NAND_CMD_PAGEPROG << 8)
+ | NAND_CMD_SEQIN
+ | addr_cycle;
+ }
break;
case NAND_CMD_PARAM:
@@ -691,8 +911,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
return exec_cmd;
}
-static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
- int column, int page_addr)
+static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
@@ -717,10 +937,15 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
nand_writel(info, NDTR1CS0, info->ndtr1cs0);
}
+ prepare_start_command(info, command);
+
info->state = STATE_PREPARED;
- exec_cmd = prepare_command_pool(info, command, column, page_addr);
+ exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
+
if (exec_cmd) {
init_completion(&info->cmd_complete);
+ init_completion(&info->dev_ready);
+ info->need_wait = 1;
pxa3xx_nand_start(info);
ret = wait_for_completion_timeout(&info->cmd_complete,
@@ -734,6 +959,117 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
info->state = STATE_IDLE;
}
+static void nand_cmdfunc_extended(struct mtd_info *mtd,
+ const unsigned command,
+ int column, int page_addr)
+{
+ struct pxa3xx_nand_host *host = mtd->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int ret, exec_cmd, ext_cmd_type;
+
+ /*
+ * if this is a x16 device then convert the input
+ * "byte" address into a "word" address appropriate
+ * for indexing a word-oriented device
+ */
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
+ column /= 2;
+
+ /*
+ * There may be different NAND chip hooked to
+ * different chip select, so check whether
+ * chip select has been changed, if yes, reset the timing
+ */
+ if (info->cs != host->cs) {
+ info->cs = host->cs;
+ nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+ }
+
+ /* Select the extended command for the first command */
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ ext_cmd_type = EXT_CMD_TYPE_MONO;
+ break;
+ case NAND_CMD_SEQIN:
+ ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+ break;
+ case NAND_CMD_PAGEPROG:
+ ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+ break;
+ default:
+ ext_cmd_type = 0;
+ break;
+ }
+
+ prepare_start_command(info, command);
+
+ /*
+ * Prepare the "is ready" completion before starting a command
+ * transaction sequence. If the command is not executed the
+ * completion will be completed, see below.
+ *
+ * We can do that inside the loop because the command variable
+ * is invariant and thus so is the exec_cmd.
+ */
+ info->need_wait = 1;
+ init_completion(&info->dev_ready);
+ do {
+ info->state = STATE_PREPARED;
+ exec_cmd = prepare_set_command(info, command, ext_cmd_type,
+ column, page_addr);
+ if (!exec_cmd) {
+ info->need_wait = 0;
+ complete(&info->dev_ready);
+ break;
+ }
+
+ init_completion(&info->cmd_complete);
+ pxa3xx_nand_start(info);
+
+ ret = wait_for_completion_timeout(&info->cmd_complete,
+ CHIP_DELAY_TIMEOUT);
+ if (!ret) {
+ dev_err(&info->pdev->dev, "Wait time out!!!\n");
+ /* Stop State Machine for next command cycle */
+ pxa3xx_nand_stop(info);
+ break;
+ }
+
+ /* Check if the sequence is complete */
+ if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
+ break;
+
+ /*
+ * After a splitted program command sequence has issued
+ * the command dispatch, the command sequence is complete.
+ */
+ if (info->data_size == 0 &&
+ command == NAND_CMD_PAGEPROG &&
+ ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
+ break;
+
+ if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
+ /* Last read: issue a 'last naked read' */
+ if (info->data_size == info->chunk_size)
+ ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
+ else
+ ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+
+ /*
+ * If a splitted program command has no more data to transfer,
+ * the command dispatch must be issued to complete.
+ */
+ } else if (command == NAND_CMD_PAGEPROG &&
+ info->data_size == 0) {
+ ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+ }
+ } while (1);
+
+ info->state = STATE_IDLE;
+}
+
static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
@@ -753,20 +1089,14 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
chip->read_buf(mtd, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- if (info->retcode == ERR_SBERR) {
- switch (info->use_ecc) {
- case 1:
- mtd->ecc_stats.corrected++;
- break;
- case 0:
- default:
- break;
- }
- } else if (info->retcode == ERR_DBERR) {
+ if (info->retcode == ERR_CORERR && info->use_ecc) {
+ mtd->ecc_stats.corrected += info->ecc_err_cnt;
+
+ } else if (info->retcode == ERR_UNCORERR) {
/*
* for blank page (all 0xff), HW will calculate its ECC as
* 0, which is different from the ECC information within
- * OOB, ignore such double bit errors
+ * OOB, ignore such uncorrectable errors
*/
if (is_buf_blank(buf, mtd->writesize))
info->retcode = ERR_NONE;
@@ -774,7 +1104,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
mtd->ecc_stats.failed++;
}
- return 0;
+ return info->max_bitflips;
}
static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
@@ -833,21 +1163,27 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
+ int ret;
+
+ if (info->need_wait) {
+ ret = wait_for_completion_timeout(&info->dev_ready,
+ CHIP_DELAY_TIMEOUT);
+ info->need_wait = 0;
+ if (!ret) {
+ dev_err(&info->pdev->dev, "Ready time out!!!\n");
+ return NAND_STATUS_FAIL;
+ }
+ }
/* pxa3xx_nand_send_command has waited for command complete */
if (this->state == FL_WRITING || this->state == FL_ERASING) {
if (info->retcode == ERR_NONE)
return 0;
- else {
- /*
- * any error make it return 0x01 which will tell
- * the caller the erase and write fail
- */
- return 0x01;
- }
+ else
+ return NAND_STATUS_FAIL;
}
- return 0;
+ return NAND_STATUS_READY;
}
static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
@@ -869,7 +1205,6 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
}
/* calculate flash information */
- host->page_size = f->page_size;
host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
/* calculate addressing information */
@@ -906,13 +1241,15 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
uint32_t ndcr = nand_readl(info, NDCR);
if (ndcr & NDCR_PAGE_SZ) {
- host->page_size = 2048;
+ /* Controller's FIFO size */
+ info->chunk_size = 2048;
host->read_id_bytes = 4;
} else {
- host->page_size = 512;
+ info->chunk_size = 512;
host->read_id_bytes = 2;
}
+ /* Set an initial chunk size */
info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
@@ -988,18 +1325,89 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
{
struct mtd_info *mtd;
+ struct nand_chip *chip;
int ret;
+
mtd = info->host[info->cs]->mtd;
+ chip = mtd->priv;
+
/* use the common timing to make a try */
ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
if (ret)
return ret;
- pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
- if (info->is_ready)
- return 0;
+ chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
+ ret = chip->waitfunc(mtd, chip);
+ if (ret & NAND_STATUS_FAIL)
+ return -ENODEV;
+
+ return 0;
+}
- return -ENODEV;
+static int pxa_ecc_init(struct pxa3xx_nand_info *info,
+ struct nand_ecc_ctrl *ecc,
+ int strength, int ecc_stepsize, int page_size)
+{
+ if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
+ info->chunk_size = 2048;
+ info->spare_size = 40;
+ info->ecc_size = 24;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = 512;
+ ecc->strength = 1;
+ return 1;
+
+ } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
+ info->chunk_size = 512;
+ info->spare_size = 8;
+ info->ecc_size = 8;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = 512;
+ ecc->strength = 1;
+ return 1;
+
+ /*
+ * Required ECC: 4-bit correction per 512 bytes
+ * Select: 16-bit correction per 2048 bytes
+ */
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
+ info->ecc_bch = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_2KB_bch4bit;
+ ecc->strength = 16;
+ return 1;
+
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_4KB_bch4bit;
+ ecc->strength = 16;
+ return 1;
+
+ /*
+ * Required ECC: 8-bit correction per 512 bytes
+ * Select: 16-bit correction per 1024 bytes
+ */
+ } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
+ info->chunk_size = 1024;
+ info->spare_size = 0;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_4KB_bch8bit;
+ ecc->strength = 16;
+ return 1;
+ }
+ return 0;
}
static int pxa3xx_nand_scan(struct mtd_info *mtd)
@@ -1014,6 +1422,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
uint32_t id = -1;
uint64_t chipsize;
int i, ret, num;
+ uint16_t ecc_strength, ecc_step;
if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
goto KEEP_CONFIG;
@@ -1072,15 +1481,60 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
pxa3xx_flash_ids[1].name = NULL;
def = pxa3xx_flash_ids;
KEEP_CONFIG:
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.size = host->page_size;
- chip->ecc.strength = 1;
-
if (info->reg_ndcr & NDCR_DWIDTH_M)
chip->options |= NAND_BUSWIDTH_16;
+ /* Device detection must be done with ECC disabled */
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ nand_writel(info, NDECCCTRL, 0x0);
+
if (nand_scan_ident(mtd, 1, def))
return -ENODEV;
+
+ if (pdata->flash_bbt) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_USE_FLASH |
+ NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /*
+ * If the page size is bigger than the FIFO size, let's check
+ * we are given the right variant and then switch to the extended
+ * (aka splitted) command handling,
+ */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+ chip->cmdfunc = nand_cmdfunc_extended;
+ } else {
+ dev_err(&info->pdev->dev,
+ "unsupported page size on this variant\n");
+ return -ENODEV;
+ }
+ }
+
+ ecc_strength = chip->ecc_strength_ds;
+ ecc_step = chip->ecc_step_ds;
+
+ /* Set default ECC strength requirements on non-ONFI devices */
+ if (ecc_strength < 1 && ecc_step < 1) {
+ ecc_strength = 1;
+ ecc_step = 512;
+ }
+
+ ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
+ ecc_step, mtd->writesize);
+ if (!ret) {
+ dev_err(&info->pdev->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ chip->ecc_strength_ds, mtd->writesize);
+ return -ENODEV;
+ }
+
/* calculate addressing information */
if (mtd->writesize >= 2048)
host->col_addr_cycles = 2;
@@ -1121,6 +1575,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
return -ENOMEM;
info->pdev = pdev;
+ info->variant = pxa3xx_nand_get_variant(pdev);
for (cs = 0; cs < pdata->num_cs; cs++) {
mtd = (struct mtd_info *)((unsigned int)&info[1] +
(sizeof(*mtd) + sizeof(*host)) * cs);
@@ -1138,11 +1593,12 @@ static int alloc_nand_resource(struct platform_device *pdev)
chip->controller = &info->controller;
chip->waitfunc = pxa3xx_nand_waitfunc;
chip->select_chip = pxa3xx_nand_select_chip;
- chip->cmdfunc = pxa3xx_nand_cmdfunc;
chip->read_word = pxa3xx_nand_read_word;
chip->read_byte = pxa3xx_nand_read_byte;
chip->read_buf = pxa3xx_nand_read_buf;
chip->write_buf = pxa3xx_nand_write_buf;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->cmdfunc = nand_cmdfunc;
}
spin_lock_init(&chip->controller->lock);
@@ -1254,25 +1710,6 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id pxa3xx_nand_dt_ids[] = {
- {
- .compatible = "marvell,pxa3xx-nand",
- .data = (void *)PXA3XX_NAND_VARIANT_PXA,
- },
- {}
-};
-MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
-
-static enum pxa3xx_nand_variant
-pxa3xx_nand_get_variant(struct platform_device *pdev)
-{
- const struct of_device_id *of_id =
- of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
- if (!of_id)
- return PXA3XX_NAND_VARIANT_PXA;
- return (enum pxa3xx_nand_variant)of_id->data;
-}
-
static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
{
struct pxa3xx_nand_platform_data *pdata;
@@ -1292,6 +1729,7 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
if (of_get_property(np, "marvell,nand-keep-config", NULL))
pdata->keep_config = 1;
of_property_read_u32(np, "num-cs", &pdata->num_cs);
+ pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
pdev->dev.platform_data = pdata;
@@ -1329,7 +1767,6 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
}
info = platform_get_drvdata(pdev);
- info->variant = pxa3xx_nand_get_variant(pdev);
probe_success = 0;
for (cs = 0; cs < pdata->num_cs; cs++) {
struct mtd_info *mtd = info->host[cs]->mtd;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d65cbe903d40..f0918e7411d9 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -46,9 +46,43 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
-#include <plat/regs-nand.h>
#include <linux/platform_data/mtd-nand-s3c2410.h>
+#define S3C2410_NFREG(x) (x)
+
+#define S3C2410_NFCONF S3C2410_NFREG(0x00)
+#define S3C2410_NFCMD S3C2410_NFREG(0x04)
+#define S3C2410_NFADDR S3C2410_NFREG(0x08)
+#define S3C2410_NFDATA S3C2410_NFREG(0x0C)
+#define S3C2410_NFSTAT S3C2410_NFREG(0x10)
+#define S3C2410_NFECC S3C2410_NFREG(0x14)
+#define S3C2440_NFCONT S3C2410_NFREG(0x04)
+#define S3C2440_NFCMD S3C2410_NFREG(0x08)
+#define S3C2440_NFADDR S3C2410_NFREG(0x0C)
+#define S3C2440_NFDATA S3C2410_NFREG(0x10)
+#define S3C2440_NFSTAT S3C2410_NFREG(0x20)
+#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
+#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
+#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
+#define S3C2410_NFCONF_EN (1<<15)
+#define S3C2410_NFCONF_INITECC (1<<12)
+#define S3C2410_NFCONF_nFCE (1<<11)
+#define S3C2410_NFCONF_TACLS(x) ((x)<<8)
+#define S3C2410_NFCONF_TWRPH0(x) ((x)<<4)
+#define S3C2410_NFCONF_TWRPH1(x) ((x)<<0)
+#define S3C2410_NFSTAT_BUSY (1<<0)
+#define S3C2440_NFCONF_TACLS(x) ((x)<<12)
+#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8)
+#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4)
+#define S3C2440_NFCONT_INITECC (1<<4)
+#define S3C2440_NFCONT_nFCE (1<<1)
+#define S3C2440_NFCONT_ENABLE (1<<0)
+#define S3C2440_NFSTAT_READY (1<<0)
+#define S3C2412_NFCONF_NANDBOOT (1<<31)
+#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
+#define S3C2412_NFCONT_nFCE0 (1<<1)
+#define S3C2412_NFSTAT_READY (1<<0)
+
/* new oob placement block for use with hardware ecc generation
*/
@@ -919,7 +953,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
- dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto exit_error;
}
@@ -974,7 +1007,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
size = nr_sets * sizeof(*info->mtds);
info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (info->mtds == NULL) {
- dev_err(&pdev->dev, "failed to allocate mtd storage\n");
err = -ENOMEM;
goto exit_error;
}
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index a3c84ebbe392..c0670237e7a2 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -151,7 +151,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
dma_cap_set(DMA_SLAVE, mask);
flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
- (void *)pdata->slave_id_fifo0_tx);
+ (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
flctl->chan_fifo0_tx);
@@ -168,7 +168,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
goto err;
flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
- (void *)pdata->slave_id_fifo0_rx);
+ (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
flctl->chan_fifo0_rx);
@@ -897,7 +897,7 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
if (!flctl->qos_request) {
ret = dev_pm_qos_add_request(&flctl->pdev->dev,
&flctl->pm_qos,
- DEV_PM_QOS_LATENCY,
+ DEV_PM_QOS_RESUME_LATENCY,
100);
if (ret < 0)
dev_err(&flctl->pdev->dev,
@@ -1021,7 +1021,6 @@ static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_OF
struct flctl_soc_config {
unsigned long flcmncr_val;
unsigned has_hwecc:1;
@@ -1059,10 +1058,8 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "%s: failed to allocate config data\n", __func__);
+ if (!pdata)
return NULL;
- }
/* set SoC specific options */
pdata->flcmncr_val = config->flcmncr_val;
@@ -1080,12 +1077,6 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
return pdata;
}
-#else /* CONFIG_OF */
-static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
-{
- return NULL;
-}
-#endif /* CONFIG_OF */
static int flctl_probe(struct platform_device *pdev)
{
@@ -1094,38 +1085,30 @@ static int flctl_probe(struct platform_device *pdev)
struct mtd_info *flctl_mtd;
struct nand_chip *nand;
struct sh_flctl_platform_data *pdata;
- int ret = -ENXIO;
+ int ret;
int irq;
struct mtd_part_parser_data ppdata = {};
- flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
- if (!flctl) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
+ flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
+ if (!flctl)
return -ENOMEM;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get I/O memory\n");
- goto err_iomap;
- }
-
- flctl->reg = ioremap(res->start, resource_size(res));
- if (flctl->reg == NULL) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- goto err_iomap;
- }
+ flctl->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(flctl->reg))
+ return PTR_ERR(flctl->reg);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get flste irq data\n");
- goto err_flste;
+ return -ENXIO;
}
- ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl);
+ ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
+ "flste", flctl);
if (ret) {
dev_err(&pdev->dev, "request interrupt failed.\n");
- goto err_flste;
+ return ret;
}
if (pdev->dev.of_node)
@@ -1135,8 +1118,7 @@ static int flctl_probe(struct platform_device *pdev)
if (!pdata) {
dev_err(&pdev->dev, "no setup data defined\n");
- ret = -EINVAL;
- goto err_pdata;
+ return -EINVAL;
}
platform_set_drvdata(pdev, flctl);
@@ -1190,12 +1172,6 @@ static int flctl_probe(struct platform_device *pdev)
err_chip:
flctl_release_dma(flctl);
pm_runtime_disable(&pdev->dev);
-err_pdata:
- free_irq(irq, flctl);
-err_flste:
- iounmap(flctl->reg);
-err_iomap:
- kfree(flctl);
return ret;
}
@@ -1206,9 +1182,6 @@ static int flctl_remove(struct platform_device *pdev)
flctl_release_dma(flctl);
nand_release(&flctl->mtd);
pm_runtime_disable(&pdev->dev);
- free_irq(platform_get_irq(pdev, 0), flctl);
- iounmap(flctl->reg);
- kfree(flctl);
return 0;
}
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 87908d760feb..e81059b58382 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -121,10 +121,8 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
/* Allocate memory for MTD device structure and private data */
sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
- if (!sharpsl) {
- printk("Unable to allocate SharpSL NAND MTD device structure.\n");
+ if (!sharpsl)
return -ENOMEM;
- }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -136,7 +134,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
/* map physical address */
sharpsl->io = ioremap(r->start, resource_size(r));
if (!sharpsl->io) {
- printk("ioremap to access Sharp SL NAND chip failed\n");
+ dev_err(&pdev->dev, "ioremap to access Sharp SL NAND chip failed\n");
err = -EIO;
goto err_ioremap;
}
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index a3747c914d57..fb8fd35fa668 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -371,11 +371,9 @@ static int tmio_probe(struct platform_device *dev)
if (data == NULL)
dev_warn(&dev->dev, "NULL platform data!\n");
- tmio = kzalloc(sizeof *tmio, GFP_KERNEL);
- if (!tmio) {
- retval = -ENOMEM;
- goto err_kzalloc;
- }
+ tmio = devm_kzalloc(&dev->dev, sizeof(*tmio), GFP_KERNEL);
+ if (!tmio)
+ return -ENOMEM;
tmio->dev = dev;
@@ -385,22 +383,18 @@ static int tmio_probe(struct platform_device *dev)
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
- tmio->ccr = ioremap(ccr->start, resource_size(ccr));
- if (!tmio->ccr) {
- retval = -EIO;
- goto err_iomap_ccr;
- }
+ tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
+ if (!tmio->ccr)
+ return -EIO;
tmio->fcr_base = fcr->start & 0xfffff;
- tmio->fcr = ioremap(fcr->start, resource_size(fcr));
- if (!tmio->fcr) {
- retval = -EIO;
- goto err_iomap_fcr;
- }
+ tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr));
+ if (!tmio->fcr)
+ return -EIO;
retval = tmio_hw_init(dev, tmio);
if (retval)
- goto err_hwinit;
+ return retval;
/* Set address of NAND IO lines */
nand_chip->IO_ADDR_R = tmio->fcr;
@@ -428,7 +422,8 @@ static int tmio_probe(struct platform_device *dev)
/* 15 us command delay time */
nand_chip->chip_delay = 15;
- retval = request_irq(irq, &tmio_irq, 0, dev_name(&dev->dev), tmio);
+ retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0,
+ dev_name(&dev->dev), tmio);
if (retval) {
dev_err(&dev->dev, "request_irq error %d\n", retval);
goto err_irq;
@@ -440,7 +435,7 @@ static int tmio_probe(struct platform_device *dev)
/* Scan to find existence of the device */
if (nand_scan(mtd, 1)) {
retval = -ENODEV;
- goto err_scan;
+ goto err_irq;
}
/* Register the partitions */
retval = mtd_device_parse_register(mtd, NULL, NULL,
@@ -451,18 +446,8 @@ static int tmio_probe(struct platform_device *dev)
nand_release(mtd);
-err_scan:
- if (tmio->irq)
- free_irq(tmio->irq, tmio);
err_irq:
tmio_hw_stop(dev, tmio);
-err_hwinit:
- iounmap(tmio->fcr);
-err_iomap_fcr:
- iounmap(tmio->ccr);
-err_iomap_ccr:
- kfree(tmio);
-err_kzalloc:
return retval;
}
@@ -471,12 +456,7 @@ static int tmio_remove(struct platform_device *dev)
struct tmio_nand *tmio = platform_get_drvdata(dev);
nand_release(&tmio->mtd);
- if (tmio->irq)
- free_irq(tmio->irq, tmio);
tmio_hw_stop(dev, tmio);
- iounmap(tmio->fcr);
- iounmap(tmio->ccr);
- kfree(tmio);
return 0;
}
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 235714a421dd..c1622a5ba814 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -319,11 +319,8 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
continue;
txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
GFP_KERNEL);
- if (!txx9_priv) {
- dev_err(&dev->dev, "Unable to allocate "
- "TXx9 NDFMC MTD device structure.\n");
+ if (!txx9_priv)
continue;
- }
chip = &txx9_priv->chip;
mtd = &txx9_priv->mtd;
mtd->owner = THIS_MODULE;
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index d64f8c30945f..aa26c32e1bc2 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -81,7 +81,7 @@ static int parse_ofpart_partitions(struct mtd_info *master,
partname = of_get_property(pp, "label", &len);
if (!partname)
partname = of_get_property(pp, "name", &len);
- (*pparts)[i].name = (char *)partname;
+ (*pparts)[i].name = partname;
if (of_get_property(pp, "read-only", &len))
(*pparts)[i].mask_flags |= MTD_WRITEABLE;
@@ -152,7 +152,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master,
if (names && (plen > 0)) {
int len = strlen(names) + 1;
- (*pparts)[i].name = (char *)names;
+ (*pparts)[i].name = names;
plen -= len;
names += len;
} else {
@@ -173,18 +173,9 @@ static struct mtd_part_parser ofoldpart_parser = {
static int __init ofpart_parser_init(void)
{
- int rc;
- rc = register_mtd_parser(&ofpart_parser);
- if (rc)
- goto out;
-
- rc = register_mtd_parser(&ofoldpart_parser);
- if (!rc)
- return 0;
-
- deregister_mtd_parser(&ofoldpart_parser);
-out:
- return rc;
+ register_mtd_parser(&ofpart_parser);
+ register_mtd_parser(&ofoldpart_parser);
+ return 0;
}
static void __exit ofpart_parser_exit(void)
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 63699fffc96d..8e1919b6f074 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -58,7 +58,7 @@ static int generic_onenand_probe(struct platform_device *pdev)
goto out_release_mem_region;
}
- info->onenand.mmcontrol = pdata ? pdata->mmcontrol : 0;
+ info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;
info->onenand.irq = platform_get_irq(pdev, 0);
info->mtd.name = dev_name(&pdev->dev);
diff --git a/drivers/mtd/onenand/samsung.h b/drivers/mtd/onenand/samsung.h
index c4a80e67e438..9016dc0136a8 100644
--- a/drivers/mtd/onenand/samsung.h
+++ b/drivers/mtd/onenand/samsung.h
@@ -1,6 +1,4 @@
/*
- * linux/arch/arm/plat-s3c/include/plat/regs-onenand.h
- *
* Copyright (C) 2008-2010 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 580035c803d6..5da911ebdf49 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -300,7 +300,8 @@ MODULE_ALIAS("RedBoot");
static int __init redboot_parser_init(void)
{
- return register_mtd_parser(&redboot_parser);
+ register_mtd_parser(&redboot_parser);
+ return 0;
}
static void __exit redboot_parser_exit(void)
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 70106607c247..e579f9027c47 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -19,7 +19,7 @@
* or detected.
*/
-#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE)
+#if IS_ENABLED(CONFIG_MTD_NAND)
struct nand_ecc_test {
const char *name;
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 33bb1f2b63e4..6f27d9a1be3b 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1453,8 +1453,10 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
struct ubi_attach_info *scan_ai;
scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
- if (!scan_ai)
+ if (!scan_ai) {
+ err = -ENOMEM;
goto out_wl;
+ }
err = scan_all(ubi, scan_ai, 0);
if (err) {
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index e05dc6298c1d..57deae961429 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1245,8 +1245,10 @@ static int __init ubi_init(void)
ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
sizeof(struct ubi_wl_entry),
0, 0, NULL);
- if (!ubi_wl_entry_slab)
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
goto out_dev_unreg;
+ }
err = ubi_debugfs_init();
if (err)
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index ead861307b3c..c5dad652614d 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -463,8 +463,8 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
}
if (found_orphan) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
@@ -846,16 +846,16 @@ fail_bad:
ret = UBI_BAD_FASTMAP;
fail:
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
return ret;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index bf79def40126..d36134925d31 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -495,10 +495,12 @@ out:
*/
static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
{
- int err, err1;
+ int err;
size_t written;
loff_t addr;
uint32_t data = 0;
+ struct ubi_ec_hdr ec_hdr;
+
/*
* Note, we cannot generally define VID header buffers on stack,
* because of the way we deal with these buffers (see the header
@@ -509,50 +511,38 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
struct ubi_vid_hdr vid_hdr;
/*
+ * If VID or EC is valid, we have to corrupt them before erasing.
* It is important to first invalidate the EC header, and then the VID
* header. Otherwise a power cut may lead to valid EC header and
* invalid VID header, in which case UBI will treat this PEB as
* corrupted and will try to preserve it, and print scary warnings.
*/
addr = (loff_t)pnum * ubi->peb_size;
- err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
- if (!err) {
- addr += ubi->vid_hdr_aloffset;
+ err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
- if (!err)
- return 0;
+ if(err)
+ goto error;
}
- /*
- * We failed to write to the media. This was observed with Spansion
- * S29GL512N NOR flash. Most probably the previously eraseblock erasure
- * was interrupted at a very inappropriate moment, so it became
- * unwritable. In this case we probably anyway have garbage in this
- * PEB.
- */
- err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
- err1 == UBI_IO_FF) {
- struct ubi_ec_hdr ec_hdr;
-
- err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
- if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
- err1 == UBI_IO_FF)
- /*
- * Both VID and EC headers are corrupted, so we can
- * safely erase this PEB and not afraid that it will be
- * treated as a valid PEB in case of an unclean reboot.
- */
- return 0;
+ err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
+ addr += ubi->vid_hdr_aloffset;
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if (err)
+ goto error;
}
+ return 0;
+error:
/*
- * The PEB contains a valid VID header, but we cannot invalidate it.
- * Supposedly the flash media or the driver is screwed up, so return an
- * error.
+ * The PEB contains a valid VID or EC header, but we cannot invalidate
+ * it. Supposedly the flash media or the driver is screwed up, so
+ * return an error.
*/
- ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
- pnum, err, err1);
+ ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err);
ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
return -EIO;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b45b240889f5..494b888a6568 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -139,7 +139,7 @@ config MACVTAP
This adds a specialized tap character device driver that is based
on the MAC-VLAN network interface, called macvtap. A macvtap device
can be added in the same way as a macvlan device, using 'type
- macvlan', and then be accessed through the tap user space interface.
+ macvtap', and then be accessed through the tap user space interface.
To compile this driver as a module, choose M here: the module
will be called macvtap.
@@ -236,6 +236,7 @@ config VETH
config VIRTIO_NET
tristate "Virtio network driver"
depends on VIRTIO
+ select AVERAGE
---help---
This is the virtual network driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index a7271e093845..67977f15af25 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -32,39 +32,12 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/netlink.h>
+#include <net/Space.h>
/* A unified ethernet device probe. This is the easiest way to have every
ethernet adaptor have the name "eth[0123...]".
*/
-extern struct net_device *hp100_probe(int unit);
-extern struct net_device *ultra_probe(int unit);
-extern struct net_device *wd_probe(int unit);
-extern struct net_device *ne_probe(int unit);
-extern struct net_device *fmv18x_probe(int unit);
-extern struct net_device *i82596_probe(int unit);
-extern struct net_device *ni65_probe(int unit);
-extern struct net_device *sonic_probe(int unit);
-extern struct net_device *smc_init(int unit);
-extern struct net_device *atarilance_probe(int unit);
-extern struct net_device *sun3lance_probe(int unit);
-extern struct net_device *sun3_82586_probe(int unit);
-extern struct net_device *apne_probe(int unit);
-extern struct net_device *cs89x0_probe(int unit);
-extern struct net_device *mvme147lance_probe(int unit);
-extern struct net_device *tc515_probe(int unit);
-extern struct net_device *lance_probe(int unit);
-extern struct net_device *mac8390_probe(int unit);
-extern struct net_device *mac89x0_probe(int unit);
-extern struct net_device *cops_probe(int unit);
-extern struct net_device *ltpc_probe(void);
-
-/* Fibre Channel adapters */
-extern int iph5526_probe(struct net_device *dev);
-
-/* SBNI adapters */
-extern int sbni_probe(int unit);
-
struct devprobe2 {
struct net_device *(*probe)(int unit);
int status; /* non-zero if autoprobe has failed */
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index 74dc1875f9cd..326a612a2730 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -32,7 +32,6 @@
* **********************
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 5a5d720da929..6f4e80853ed4 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_BONDING) += bonding.o
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bond_netlink.o bond_options.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_sysfs_slave.o bond_debugfs.o bond_netlink.o bond_options.o
proc-$(CONFIG_PROC_FS) += bond_procfs.o
bonding-objs += $(proc-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 4ced59436558..dcde56057fe1 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -34,14 +34,14 @@
#include "bonding.h"
#include "bond_3ad.h"
-// General definitions
+/* General definitions */
#define AD_SHORT_TIMEOUT 1
#define AD_LONG_TIMEOUT 0
#define AD_STANDBY 0x2
#define AD_MAX_TX_IN_SECOND 3
#define AD_COLLECTOR_MAX_DELAY 0
-// Timer definitions(43.4.4 in the 802.3ad standard)
+/* Timer definitions (43.4.4 in the 802.3ad standard) */
#define AD_FAST_PERIODIC_TIME 1
#define AD_SLOW_PERIODIC_TIME 30
#define AD_SHORT_TIMEOUT_TIME (3*AD_FAST_PERIODIC_TIME)
@@ -49,7 +49,7 @@
#define AD_CHURN_DETECTION_TIME 60
#define AD_AGGREGATE_WAIT_TIME 2
-// Port state definitions(43.4.2.2 in the 802.3ad standard)
+/* Port state definitions (43.4.2.2 in the 802.3ad standard) */
#define AD_STATE_LACP_ACTIVITY 0x1
#define AD_STATE_LACP_TIMEOUT 0x2
#define AD_STATE_AGGREGATION 0x4
@@ -59,7 +59,9 @@
#define AD_STATE_DEFAULTED 0x40
#define AD_STATE_EXPIRED 0x80
-// Port Variables definitions used by the State Machines(43.4.7 in the 802.3ad standard)
+/* Port Variables definitions used by the State Machines (43.4.7 in the
+ * 802.3ad standard)
+ */
#define AD_PORT_BEGIN 0x1
#define AD_PORT_LACP_ENABLED 0x2
#define AD_PORT_ACTOR_CHURN 0x4
@@ -71,27 +73,27 @@
#define AD_PORT_SELECTED 0x100
#define AD_PORT_MOVED 0x200
-// Port Key definitions
-// key is determined according to the link speed, duplex and
-// user key(which is yet not supported)
-// ------------------------------------------------------------
-// Port key : | User key | Speed |Duplex|
-// ------------------------------------------------------------
-// 16 6 1 0
+/* Port Key definitions
+ * key is determined according to the link speed, duplex and
+ * user key (which is yet not supported)
+ * --------------------------------------------------------------
+ * Port key : | User key | Speed | Duplex |
+ * --------------------------------------------------------------
+ * 16 6 1 0
+ */
#define AD_DUPLEX_KEY_BITS 0x1
#define AD_SPEED_KEY_BITS 0x3E
#define AD_USER_KEY_BITS 0xFFC0
-//dalloun
#define AD_LINK_SPEED_BITMASK_1MBPS 0x1
#define AD_LINK_SPEED_BITMASK_10MBPS 0x2
#define AD_LINK_SPEED_BITMASK_100MBPS 0x4
#define AD_LINK_SPEED_BITMASK_1000MBPS 0x8
#define AD_LINK_SPEED_BITMASK_10000MBPS 0x10
-//endalloun
-// compare MAC addresses
-#define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN)
+/* compare MAC addresses */
+#define MAC_ADDRESS_EQUAL(A, B) \
+ ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
static u16 ad_ticks_per_sec;
@@ -99,7 +101,7 @@ static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-// ================= main 802.3ad protocol functions ==================
+/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
static int ad_marker_send(struct port *port, struct bond_marker *marker);
static void ad_mux_machine(struct port *port);
@@ -113,13 +115,13 @@ static void ad_initialize_agg(struct aggregator *aggregator);
static void ad_initialize_port(struct port *port, int lacp_fast);
static void ad_enable_collecting_distributing(struct port *port);
static void ad_disable_collecting_distributing(struct port *port);
-static void ad_marker_info_received(struct bond_marker *marker_info, struct port *port);
-static void ad_marker_response_received(struct bond_marker *marker, struct port *port);
+static void ad_marker_info_received(struct bond_marker *marker_info,
+ struct port *port);
+static void ad_marker_response_received(struct bond_marker *marker,
+ struct port *port);
-/////////////////////////////////////////////////////////////////////////////////
-// ================= api to bonding and kernel code ==================
-/////////////////////////////////////////////////////////////////////////////////
+/* ================= api to bonding and kernel code ================== */
/**
* __get_bond_by_port - get the port's bonding struct
@@ -141,25 +143,32 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
*
* Return the aggregator of the first slave in @bond, or %NULL if it can't be
* found.
+ * The caller must hold RCU or RTNL lock.
*/
static inline struct aggregator *__get_first_agg(struct port *port)
{
struct bonding *bond = __get_bond_by_port(port);
struct slave *first_slave;
+ struct aggregator *agg;
- // If there's no bond for this port, or bond has no slaves
+ /* If there's no bond for this port, or bond has no slaves */
if (bond == NULL)
return NULL;
- first_slave = bond_first_slave(bond);
- return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
+ rcu_read_lock();
+ first_slave = bond_first_slave_rcu(bond);
+ agg = first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
+ rcu_read_unlock();
+
+ return agg;
}
-/*
- * __agg_has_partner
+/**
+ * __agg_has_partner - see if we have a partner
+ * @agg: the agregator we're looking at
*
* Return nonzero if aggregator has a partner (denoted by a non-zero ether
- * address for the partner). Return 0 if not.
+ * address for the partner). Return 0 if not.
*/
static inline int __agg_has_partner(struct aggregator *agg)
{
@@ -169,30 +178,27 @@ static inline int __agg_has_partner(struct aggregator *agg)
/**
* __disable_port - disable the port's slave
* @port: the port we're looking at
- *
*/
static inline void __disable_port(struct port *port)
{
- bond_set_slave_inactive_flags(port->slave);
+ bond_set_slave_inactive_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
}
/**
* __enable_port - enable the port's slave, if it's up
* @port: the port we're looking at
- *
*/
static inline void __enable_port(struct port *port)
{
struct slave *slave = port->slave;
if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev))
- bond_set_slave_active_flags(slave);
+ bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
}
/**
* __port_is_enabled - check if the port's slave is in active state
* @port: the port we're looking at
- *
*/
static inline int __port_is_enabled(struct port *port)
{
@@ -218,7 +224,6 @@ static inline u32 __get_agg_selection_mode(struct port *port)
/**
* __check_agg_selection_timer - check if the selection timer has expired
* @port: the port we're looking at
- *
*/
static inline int __check_agg_selection_timer(struct port *port)
{
@@ -233,7 +238,6 @@ static inline int __check_agg_selection_timer(struct port *port)
/**
* __get_state_machine_lock - lock the port's state machines
* @port: the port we're looking at
- *
*/
static inline void __get_state_machine_lock(struct port *port)
{
@@ -243,7 +247,6 @@ static inline void __get_state_machine_lock(struct port *port)
/**
* __release_state_machine_lock - unlock the port's state machines
* @port: the port we're looking at
- *
*/
static inline void __release_state_machine_lock(struct port *port)
{
@@ -266,10 +269,11 @@ static u16 __get_link_speed(struct port *port)
struct slave *slave = port->slave;
u16 speed;
- /* this if covers only a special case: when the configuration starts with
- * link down, it sets the speed to 0.
- * This is done in spite of the fact that the e100 driver reports 0 to be
- * compatible with MVT in the future.*/
+ /* this if covers only a special case: when the configuration starts
+ * with link down, it sets the speed to 0.
+ * This is done in spite of the fact that the e100 driver reports 0
+ * to be compatible with MVT in the future.
+ */
if (slave->link != BOND_LINK_UP)
speed = 0;
else {
@@ -291,7 +295,8 @@ static u16 __get_link_speed(struct port *port)
break;
default:
- speed = 0; // unknown speed value from ethtool. shouldn't happen
+ /* unknown speed value from ethtool. shouldn't happen */
+ speed = 0;
break;
}
}
@@ -315,8 +320,9 @@ static u8 __get_duplex(struct port *port)
u8 retval;
- // handling a special case: when the configuration starts with
- // link down, it sets the duplex to 0.
+ /* handling a special case: when the configuration starts with
+ * link down, it sets the duplex to 0.
+ */
if (slave->link != BOND_LINK_UP)
retval = 0x0;
else {
@@ -340,15 +346,14 @@ static u8 __get_duplex(struct port *port)
/**
* __initialize_port_locks - initialize a port's STATE machine spinlock
* @port: the slave of the port we're looking at
- *
*/
static inline void __initialize_port_locks(struct slave *slave)
{
- // make sure it isn't called twice
+ /* make sure it isn't called twice */
spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock));
}
-//conversions
+/* Conversions */
/**
* __ad_timer_to_ticks - convert a given timer type to AD module ticks
@@ -357,39 +362,38 @@ static inline void __initialize_port_locks(struct slave *slave)
*
* If @timer_type is %current_while_timer, @par indicates long/short timer.
* If @timer_type is %periodic_timer, @par is one of %FAST_PERIODIC_TIME,
- * %SLOW_PERIODIC_TIME.
+ * %SLOW_PERIODIC_TIME.
*/
static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
{
u16 retval = 0; /* to silence the compiler */
switch (timer_type) {
- case AD_CURRENT_WHILE_TIMER: // for rx machine usage
+ case AD_CURRENT_WHILE_TIMER: /* for rx machine usage */
if (par)
- retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec); // short timeout
+ retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec);
else
- retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec); // long timeout
+ retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec);
break;
- case AD_ACTOR_CHURN_TIMER: // for local churn machine
+ case AD_ACTOR_CHURN_TIMER: /* for local churn machine */
retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec);
break;
- case AD_PERIODIC_TIMER: // for periodic machine
- retval = (par*ad_ticks_per_sec); // long timeout
+ case AD_PERIODIC_TIMER: /* for periodic machine */
+ retval = (par*ad_ticks_per_sec); /* long timeout */
break;
- case AD_PARTNER_CHURN_TIMER: // for remote churn machine
+ case AD_PARTNER_CHURN_TIMER: /* for remote churn machine */
retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec);
break;
- case AD_WAIT_WHILE_TIMER: // for selection machine
+ case AD_WAIT_WHILE_TIMER: /* for selection machine */
retval = (AD_AGGREGATE_WAIT_TIME*ad_ticks_per_sec);
break;
}
+
return retval;
}
-/////////////////////////////////////////////////////////////////////////////////
-// ================= ad_rx_machine helper functions ==================
-/////////////////////////////////////////////////////////////////////////////////
+/* ================= ad_rx_machine helper functions ================== */
/**
* __choose_matched - update a port's matched variable from a received lacpdu
@@ -416,17 +420,18 @@ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
*/
static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
{
- // check if all parameters are alike
+ /* check if all parameters are alike
+ * or this is individual link(aggregation == FALSE)
+ * then update the state machine Matched variable.
+ */
if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
(ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
- !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
+ MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) &&
(ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
(ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
- // or this is individual link(aggregation == FALSE)
((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
) {
- // update the state machine Matched variable
port->sm_vars |= AD_PORT_MATCHED;
} else {
port->sm_vars &= ~AD_PORT_MATCHED;
@@ -448,7 +453,9 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
struct port_params *partner = &port->partner_oper;
__choose_matched(lacpdu, port);
- // record the new parameter values for the partner operational
+ /* record the new parameter values for the partner
+ * operational
+ */
partner->port_number = ntohs(lacpdu->actor_port);
partner->port_priority = ntohs(lacpdu->actor_port_priority);
partner->system = lacpdu->actor_system;
@@ -456,10 +463,12 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
partner->key = ntohs(lacpdu->actor_key);
partner->port_state = lacpdu->actor_state;
- // set actor_oper_port_state.defaulted to FALSE
+ /* set actor_oper_port_state.defaulted to FALSE */
port->actor_oper_port_state &= ~AD_STATE_DEFAULTED;
- // set the partner sync. to on if the partner is sync. and the port is matched
+ /* set the partner sync. to on if the partner is sync,
+ * and the port is matched
+ */
if ((port->sm_vars & AD_PORT_MATCHED)
&& (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION))
partner->port_state |= AD_STATE_SYNCHRONIZATION;
@@ -479,11 +488,11 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
static void __record_default(struct port *port)
{
if (port) {
- // record the partner admin parameters
+ /* record the partner admin parameters */
memcpy(&port->partner_oper, &port->partner_admin,
sizeof(struct port_params));
- // set actor_oper_port_state.defaulted to true
+ /* set actor_oper_port_state.defaulted to true */
port->actor_oper_port_state |= AD_STATE_DEFAULTED;
}
}
@@ -506,14 +515,15 @@ static void __update_selected(struct lacpdu *lacpdu, struct port *port)
if (lacpdu && port) {
const struct port_params *partner = &port->partner_oper;
- // check if any parameter is different
+ /* check if any parameter is different then
+ * update the state machine selected variable.
+ */
if (ntohs(lacpdu->actor_port) != partner->port_number ||
ntohs(lacpdu->actor_port_priority) != partner->port_priority ||
- MAC_ADDRESS_COMPARE(&lacpdu->actor_system, &partner->system) ||
+ !MAC_ADDRESS_EQUAL(&lacpdu->actor_system, &partner->system) ||
ntohs(lacpdu->actor_system_priority) != partner->system_priority ||
ntohs(lacpdu->actor_key) != partner->key ||
(lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
- // update the state machine Selected variable
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -537,15 +547,16 @@ static void __update_default_selected(struct port *port)
const struct port_params *admin = &port->partner_admin;
const struct port_params *oper = &port->partner_oper;
- // check if any parameter is different
+ /* check if any parameter is different then
+ * update the state machine selected variable.
+ */
if (admin->port_number != oper->port_number ||
admin->port_priority != oper->port_priority ||
- MAC_ADDRESS_COMPARE(&admin->system, &oper->system) ||
+ !MAC_ADDRESS_EQUAL(&admin->system, &oper->system) ||
admin->system_priority != oper->system_priority ||
admin->key != oper->key ||
(admin->port_state & AD_STATE_AGGREGATION)
!= (oper->port_state & AD_STATE_AGGREGATION)) {
- // update the state machine Selected variable
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -565,12 +576,14 @@ static void __update_default_selected(struct port *port)
*/
static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
{
- // validate lacpdu and port
+ /* validate lacpdu and port */
if (lacpdu && port) {
- // check if any parameter is different
+ /* check if any parameter is different then
+ * update the port->ntt.
+ */
if ((ntohs(lacpdu->partner_port) != port->actor_port_number) ||
(ntohs(lacpdu->partner_port_priority) != port->actor_port_priority) ||
- MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) ||
+ !MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) ||
(ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) ||
(ntohs(lacpdu->partner_key) != port->actor_oper_port_key) ||
((lacpdu->partner_state & AD_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY)) ||
@@ -578,43 +591,12 @@ static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) ||
((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION))
) {
-
port->ntt = true;
}
}
}
/**
- * __attach_bond_to_agg
- * @port: the port we're looking at
- *
- * Handle the attaching of the port's control parser/multiplexer and the
- * aggregator. This function does nothing since the parser/multiplexer of the
- * receive and the parser/multiplexer of the aggregator are already combined.
- */
-static void __attach_bond_to_agg(struct port *port)
-{
- port = NULL; /* just to satisfy the compiler */
- // This function does nothing since the parser/multiplexer of the receive
- // and the parser/multiplexer of the aggregator are already combined
-}
-
-/**
- * __detach_bond_from_agg
- * @port: the port we're looking at
- *
- * Handle the detaching of the port's control parser/multiplexer from the
- * aggregator. This function does nothing since the parser/multiplexer of the
- * receive and the parser/multiplexer of the aggregator are already combined.
- */
-static void __detach_bond_from_agg(struct port *port)
-{
- port = NULL; /* just to satisfy the compiler */
- // This function does nothing since the parser/multiplexer of the receive
- // and the parser/multiplexer of the aggregator are already combined
-}
-
-/**
* __agg_ports_are_ready - check if all ports in an aggregator are ready
* @aggregator: the aggregator we're looking at
*
@@ -625,7 +607,9 @@ static int __agg_ports_are_ready(struct aggregator *aggregator)
int retval = 1;
if (aggregator) {
- // scan all ports in this aggregator to verfy if they are all ready
+ /* scan all ports in this aggregator to verfy if they are
+ * all ready.
+ */
for (port = aggregator->lag_ports;
port;
port = port->next_port_in_aggregator) {
@@ -685,7 +669,7 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
bandwidth = aggregator->num_of_ports * 10000;
break;
default:
- bandwidth = 0; /*to silence the compiler ....*/
+ bandwidth = 0; /* to silence the compiler */
}
}
return bandwidth;
@@ -695,6 +679,7 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
* __get_active_agg - get the current active aggregator
* @aggregator: the aggregator we're looking at
*
+ * Caller must hold RCU lock.
*/
static struct aggregator *__get_active_agg(struct aggregator *aggregator)
{
@@ -702,7 +687,7 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave, iter)
+ bond_for_each_slave_rcu(bond, slave, iter)
if (SLAVE_AD_INFO(slave).aggregator.is_active)
return &(SLAVE_AD_INFO(slave).aggregator);
@@ -712,15 +697,14 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
/**
* __update_lacpdu_from_port - update a port's lacpdu fields
* @port: the port we're looking at
- *
*/
static inline void __update_lacpdu_from_port(struct port *port)
{
struct lacpdu *lacpdu = &port->lacpdu;
const struct port_params *partner = &port->partner_oper;
- /* update current actual Actor parameters */
- /* lacpdu->subtype initialized
+ /* update current actual Actor parameters
+ * lacpdu->subtype initialized
* lacpdu->version_number initialized
* lacpdu->tlv_type_actor_info initialized
* lacpdu->actor_information_length initialized
@@ -756,9 +740,7 @@ static inline void __update_lacpdu_from_port(struct port *port)
*/
}
-//////////////////////////////////////////////////////////////////////////////////////
-// ================= main 802.3ad protocol code ======================================
-//////////////////////////////////////////////////////////////////////////////////////
+/* ================= main 802.3ad protocol code ========================= */
/**
* ad_lacpdu_send - send out a lacpdu packet on a given port
@@ -788,11 +770,12 @@ static int ad_lacpdu_send(struct port *port)
memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
/* Note: source address is set to be the member's PERMANENT address,
- because we use it to identify loopback lacpdus in receive. */
+ * because we use it to identify loopback lacpdus in receive.
+ */
memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
- lacpdu_header->lacpdu = port->lacpdu; // struct copy
+ lacpdu_header->lacpdu = port->lacpdu;
dev_queue_xmit(skb);
@@ -829,11 +812,12 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
/* Note: source address is set to be the member's PERMANENT address,
- because we use it to identify loopback MARKERs in receive. */
+ * because we use it to identify loopback MARKERs in receive.
+ */
memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
- marker_header->marker = *marker; // struct copy
+ marker_header->marker = *marker;
dev_queue_xmit(skb);
@@ -843,72 +827,90 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
/**
* ad_mux_machine - handle a port's mux state machine
* @port: the port we're looking at
- *
*/
static void ad_mux_machine(struct port *port)
{
mux_states_t last_state;
- // keep current State Machine state to compare later if it was changed
+ /* keep current State Machine state to compare later if it was
+ * changed
+ */
last_state = port->sm_mux_state;
if (port->sm_vars & AD_PORT_BEGIN) {
- port->sm_mux_state = AD_MUX_DETACHED; // next state
+ port->sm_mux_state = AD_MUX_DETACHED;
} else {
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
if ((port->sm_vars & AD_PORT_SELECTED)
|| (port->sm_vars & AD_PORT_STANDBY))
/* if SELECTED or STANDBY */
- port->sm_mux_state = AD_MUX_WAITING; // next state
+ port->sm_mux_state = AD_MUX_WAITING;
break;
case AD_MUX_WAITING:
- // if SELECTED == FALSE return to DETACH state
- if (!(port->sm_vars & AD_PORT_SELECTED)) { // if UNSELECTED
+ /* if SELECTED == FALSE return to DETACH state */
+ if (!(port->sm_vars & AD_PORT_SELECTED)) {
port->sm_vars &= ~AD_PORT_READY_N;
- // in order to withhold the Selection Logic to check all ports READY_N value
- // every callback cycle to update ready variable, we check READY_N and update READY here
+ /* in order to withhold the Selection Logic to
+ * check all ports READY_N value every callback
+ * cycle to update ready variable, we check
+ * READY_N and update READY here
+ */
__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
- port->sm_mux_state = AD_MUX_DETACHED; // next state
+ port->sm_mux_state = AD_MUX_DETACHED;
break;
}
- // check if the wait_while_timer expired
+ /* check if the wait_while_timer expired */
if (port->sm_mux_timer_counter
&& !(--port->sm_mux_timer_counter))
port->sm_vars |= AD_PORT_READY_N;
- // in order to withhold the selection logic to check all ports READY_N value
- // every callback cycle to update ready variable, we check READY_N and update READY here
+ /* in order to withhold the selection logic to check
+ * all ports READY_N value every callback cycle to
+ * update ready variable, we check READY_N and update
+ * READY here
+ */
__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
- // if the wait_while_timer expired, and the port is in READY state, move to ATTACHED state
+ /* if the wait_while_timer expired, and the port is
+ * in READY state, move to ATTACHED state
+ */
if ((port->sm_vars & AD_PORT_READY)
&& !port->sm_mux_timer_counter)
- port->sm_mux_state = AD_MUX_ATTACHED; // next state
+ port->sm_mux_state = AD_MUX_ATTACHED;
break;
case AD_MUX_ATTACHED:
- // check also if agg_select_timer expired(so the edable port will take place only after this timer)
- if ((port->sm_vars & AD_PORT_SELECTED) && (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) && !__check_agg_selection_timer(port)) {
- port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;// next state
- } else if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) { // if UNSELECTED or STANDBY
+ /* check also if agg_select_timer expired (so the
+ * edable port will take place only after this timer)
+ */
+ if ((port->sm_vars & AD_PORT_SELECTED) &&
+ (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) &&
+ !__check_agg_selection_timer(port)) {
+ port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;
+ } else if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY)) {
+ /* if UNSELECTED or STANDBY */
port->sm_vars &= ~AD_PORT_READY_N;
- // in order to withhold the selection logic to check all ports READY_N value
- // every callback cycle to update ready variable, we check READY_N and update READY here
+ /* in order to withhold the selection logic to
+ * check all ports READY_N value every callback
+ * cycle to update ready variable, we check
+ * READY_N and update READY here
+ */
__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
- port->sm_mux_state = AD_MUX_DETACHED;// next state
+ port->sm_mux_state = AD_MUX_DETACHED;
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
- if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY) ||
- !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION)
- ) {
- port->sm_mux_state = AD_MUX_ATTACHED;// next state
-
+ if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION)) {
+ port->sm_mux_state = AD_MUX_ATTACHED;
} else {
- // if port state hasn't changed make
- // sure that a collecting distributing
- // port in an active aggregator is enabled
+ /* if port state hasn't changed make
+ * sure that a collecting distributing
+ * port in an active aggregator is enabled
+ */
if (port->aggregator &&
port->aggregator->is_active &&
!__port_is_enabled(port)) {
@@ -917,19 +919,18 @@ static void ad_mux_machine(struct port *port)
}
}
break;
- default: //to silence the compiler
+ default:
break;
}
}
- // check if the state machine was changed
+ /* check if the state machine was changed */
if (port->sm_mux_state != last_state) {
pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n",
port->actor_port_number, last_state,
port->sm_mux_state);
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
- __detach_bond_from_agg(port);
port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
ad_disable_collecting_distributing(port);
port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
@@ -940,7 +941,6 @@ static void ad_mux_machine(struct port *port)
port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0);
break;
case AD_MUX_ATTACHED:
- __attach_bond_to_agg(port);
port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
@@ -953,7 +953,7 @@ static void ad_mux_machine(struct port *port)
ad_enable_collecting_distributing(port);
port->ntt = true;
break;
- default: //to silence the compiler
+ default:
break;
}
}
@@ -972,59 +972,63 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
{
rx_states_t last_state;
- // keep current State Machine state to compare later if it was changed
+ /* keep current State Machine state to compare later if it was
+ * changed
+ */
last_state = port->sm_rx_state;
- // check if state machine should change state
- // first, check if port was reinitialized
+ /* check if state machine should change state */
+
+ /* first, check if port was reinitialized */
if (port->sm_vars & AD_PORT_BEGIN)
- /* next state */
port->sm_rx_state = AD_RX_INITIALIZE;
- // check if port is not enabled
+ /* check if port is not enabled */
else if (!(port->sm_vars & AD_PORT_BEGIN)
&& !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED))
- /* next state */
port->sm_rx_state = AD_RX_PORT_DISABLED;
- // check if new lacpdu arrived
- else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) {
- port->sm_rx_timer_counter = 0; // zero timer
+ /* check if new lacpdu arrived */
+ else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) ||
+ (port->sm_rx_state == AD_RX_DEFAULTED) ||
+ (port->sm_rx_state == AD_RX_CURRENT))) {
+ port->sm_rx_timer_counter = 0;
port->sm_rx_state = AD_RX_CURRENT;
} else {
- // if timer is on, and if it is expired
- if (port->sm_rx_timer_counter && !(--port->sm_rx_timer_counter)) {
+ /* if timer is on, and if it is expired */
+ if (port->sm_rx_timer_counter &&
+ !(--port->sm_rx_timer_counter)) {
switch (port->sm_rx_state) {
case AD_RX_EXPIRED:
- port->sm_rx_state = AD_RX_DEFAULTED; // next state
+ port->sm_rx_state = AD_RX_DEFAULTED;
break;
case AD_RX_CURRENT:
- port->sm_rx_state = AD_RX_EXPIRED; // next state
+ port->sm_rx_state = AD_RX_EXPIRED;
break;
- default: //to silence the compiler
+ default:
break;
}
} else {
- // if no lacpdu arrived and no timer is on
+ /* if no lacpdu arrived and no timer is on */
switch (port->sm_rx_state) {
case AD_RX_PORT_DISABLED:
if (port->sm_vars & AD_PORT_MOVED)
- port->sm_rx_state = AD_RX_INITIALIZE; // next state
+ port->sm_rx_state = AD_RX_INITIALIZE;
else if (port->is_enabled
&& (port->sm_vars
& AD_PORT_LACP_ENABLED))
- port->sm_rx_state = AD_RX_EXPIRED; // next state
+ port->sm_rx_state = AD_RX_EXPIRED;
else if (port->is_enabled
&& ((port->sm_vars
& AD_PORT_LACP_ENABLED) == 0))
- port->sm_rx_state = AD_RX_LACP_DISABLED; // next state
+ port->sm_rx_state = AD_RX_LACP_DISABLED;
break;
- default: //to silence the compiler
+ default:
break;
}
}
}
- // check if the State machine was changed or new lacpdu arrived
+ /* check if the State machine was changed or new lacpdu arrived */
if ((port->sm_rx_state != last_state) || (lacpdu)) {
pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n",
port->actor_port_number, last_state,
@@ -1039,10 +1043,9 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
__record_default(port);
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
port->sm_vars &= ~AD_PORT_MOVED;
- port->sm_rx_state = AD_RX_PORT_DISABLED; // next state
-
- /*- Fall Through -*/
+ port->sm_rx_state = AD_RX_PORT_DISABLED;
+ /* Fall Through */
case AD_RX_PORT_DISABLED:
port->sm_vars &= ~AD_PORT_MATCHED;
break;
@@ -1054,13 +1057,15 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
break;
case AD_RX_EXPIRED:
- //Reset of the Synchronization flag. (Standard 43.4.12)
- //This reset cause to disable this port in the COLLECTING_DISTRIBUTING state of the
- //mux machine in case of EXPIRED even if LINK_DOWN didn't arrive for the port.
+ /* Reset of the Synchronization flag (Standard 43.4.12)
+ * This reset cause to disable this port in the
+ * COLLECTING_DISTRIBUTING state of the mux machine in
+ * case of EXPIRED even if LINK_DOWN didn't arrive for
+ * the port.
+ */
port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
- port->partner_oper.port_state |=
- AD_STATE_LACP_ACTIVITY;
+ port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
port->actor_oper_port_state |= AD_STATE_EXPIRED;
break;
@@ -1071,12 +1076,12 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
break;
case AD_RX_CURRENT:
- // detect loopback situation
- if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
- // INFO_RECEIVED_LOOPBACK_FRAMES
- pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
- "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
- port->slave->bond->dev->name, port->slave->dev->name);
+ /* detect loopback situation */
+ if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
+ &(port->actor_system))) {
+ pr_err("%s: An illegal loopback occurred on adapter (%s).\nCheck the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
+ port->slave->bond->dev->name,
+ port->slave->dev->name);
return;
}
__update_selected(lacpdu, port);
@@ -1085,7 +1090,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
break;
- default: //to silence the compiler
+ default:
break;
}
}
@@ -1094,13 +1099,14 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/**
* ad_tx_machine - handle a port's tx state machine
* @port: the port we're looking at
- *
*/
static void ad_tx_machine(struct port *port)
{
- // check if tx timer expired, to verify that we do not send more than 3 packets per second
+ /* check if tx timer expired, to verify that we do not send more than
+ * 3 packets per second
+ */
if (port->sm_tx_timer_counter && !(--port->sm_tx_timer_counter)) {
- // check if there is something to send
+ /* check if there is something to send */
if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) {
__update_lacpdu_from_port(port);
@@ -1108,14 +1114,16 @@ static void ad_tx_machine(struct port *port)
pr_debug("Sent LACPDU on port %d\n",
port->actor_port_number);
- /* mark ntt as false, so it will not be sent again until
- demanded */
+ /* mark ntt as false, so it will not be sent
+ * again until demanded
+ */
port->ntt = false;
}
}
- // restart tx timer(to verify that we will not exceed AD_MAX_TX_IN_SECOND
- port->sm_tx_timer_counter =
- ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
+ /* restart tx timer(to verify that we will not exceed
+ * AD_MAX_TX_IN_SECOND
+ */
+ port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
}
}
@@ -1129,76 +1137,79 @@ static void ad_periodic_machine(struct port *port)
{
periodic_states_t last_state;
- // keep current state machine state to compare later if it was changed
+ /* keep current state machine state to compare later if it was changed */
last_state = port->sm_periodic_state;
- // check if port was reinitialized
+ /* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
(!(port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & AD_STATE_LACP_ACTIVITY))
) {
- port->sm_periodic_state = AD_NO_PERIODIC; // next state
+ port->sm_periodic_state = AD_NO_PERIODIC;
}
- // check if state machine should change state
+ /* check if state machine should change state */
else if (port->sm_periodic_timer_counter) {
- // check if periodic state machine expired
+ /* check if periodic state machine expired */
if (!(--port->sm_periodic_timer_counter)) {
- // if expired then do tx
- port->sm_periodic_state = AD_PERIODIC_TX; // next state
+ /* if expired then do tx */
+ port->sm_periodic_state = AD_PERIODIC_TX;
} else {
- // If not expired, check if there is some new timeout parameter from the partner state
+ /* If not expired, check if there is some new timeout
+ * parameter from the partner state
+ */
switch (port->sm_periodic_state) {
case AD_FAST_PERIODIC:
if (!(port->partner_oper.port_state
& AD_STATE_LACP_TIMEOUT))
- port->sm_periodic_state = AD_SLOW_PERIODIC; // next state
+ port->sm_periodic_state = AD_SLOW_PERIODIC;
break;
case AD_SLOW_PERIODIC:
if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) {
- // stop current timer
port->sm_periodic_timer_counter = 0;
- port->sm_periodic_state = AD_PERIODIC_TX; // next state
+ port->sm_periodic_state = AD_PERIODIC_TX;
}
break;
- default: //to silence the compiler
+ default:
break;
}
}
} else {
switch (port->sm_periodic_state) {
case AD_NO_PERIODIC:
- port->sm_periodic_state = AD_FAST_PERIODIC; // next state
+ port->sm_periodic_state = AD_FAST_PERIODIC;
break;
case AD_PERIODIC_TX:
- if (!(port->partner_oper.port_state
- & AD_STATE_LACP_TIMEOUT))
- port->sm_periodic_state = AD_SLOW_PERIODIC; // next state
+ if (!(port->partner_oper.port_state &
+ AD_STATE_LACP_TIMEOUT))
+ port->sm_periodic_state = AD_SLOW_PERIODIC;
else
- port->sm_periodic_state = AD_FAST_PERIODIC; // next state
+ port->sm_periodic_state = AD_FAST_PERIODIC;
break;
- default: //to silence the compiler
+ default:
break;
}
}
- // check if the state machine was changed
+ /* check if the state machine was changed */
if (port->sm_periodic_state != last_state) {
pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n",
port->actor_port_number, last_state,
port->sm_periodic_state);
switch (port->sm_periodic_state) {
case AD_NO_PERIODIC:
- port->sm_periodic_timer_counter = 0; // zero timer
+ port->sm_periodic_timer_counter = 0;
break;
case AD_FAST_PERIODIC:
- port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_FAST_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle
+ /* decrement 1 tick we lost in the PERIODIC_TX cycle */
+ port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_FAST_PERIODIC_TIME))-1;
break;
case AD_SLOW_PERIODIC:
- port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle
+ /* decrement 1 tick we lost in the PERIODIC_TX cycle */
+ port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1;
break;
case AD_PERIODIC_TX:
port->ntt = true;
break;
- default: //to silence the compiler
+ default:
break;
}
}
@@ -1221,30 +1232,38 @@ static void ad_port_selection_logic(struct port *port)
struct slave *slave;
int found = 0;
- // if the port is already Selected, do nothing
+ /* if the port is already Selected, do nothing */
if (port->sm_vars & AD_PORT_SELECTED)
return;
bond = __get_bond_by_port(port);
- // if the port is connected to other aggregator, detach it
+ /* if the port is connected to other aggregator, detach it */
if (port->aggregator) {
- // detach the port from its former aggregator
+ /* detach the port from its former aggregator */
temp_aggregator = port->aggregator;
for (curr_port = temp_aggregator->lag_ports; curr_port;
last_port = curr_port,
- curr_port = curr_port->next_port_in_aggregator) {
+ curr_port = curr_port->next_port_in_aggregator) {
if (curr_port == port) {
temp_aggregator->num_of_ports--;
- if (!last_port) {// if it is the first port attached to the aggregator
+ /* if it is the first port attached to the
+ * aggregator
+ */
+ if (!last_port) {
temp_aggregator->lag_ports =
port->next_port_in_aggregator;
- } else {// not the first port attached to the aggregator
+ } else {
+ /* not the first port attached to the
+ * aggregator
+ */
last_port->next_port_in_aggregator =
port->next_port_in_aggregator;
}
- // clear the port's relations to this aggregator
+ /* clear the port's relations to this
+ * aggregator
+ */
port->aggregator = NULL;
port->next_port_in_aggregator = NULL;
port->actor_port_aggregator_identifier = 0;
@@ -1252,41 +1271,46 @@ static void ad_port_selection_logic(struct port *port)
pr_debug("Port %d left LAG %d\n",
port->actor_port_number,
temp_aggregator->aggregator_identifier);
- // if the aggregator is empty, clear its parameters, and set it ready to be attached
+ /* if the aggregator is empty, clear its
+ * parameters, and set it ready to be attached
+ */
if (!temp_aggregator->lag_ports)
ad_clear_agg(temp_aggregator);
break;
}
}
- if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
- pr_warning("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
- port->slave->bond->dev->name,
- port->actor_port_number,
- port->slave->dev->name,
- port->aggregator->aggregator_identifier);
+ if (!curr_port) {
+ /* meaning: the port was related to an aggregator
+ * but was not on the aggregator port list
+ */
+ pr_warn("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
+ port->slave->bond->dev->name,
+ port->actor_port_number,
+ port->slave->dev->name,
+ port->aggregator->aggregator_identifier);
}
}
- // search on all aggregators for a suitable aggregator for this port
+ /* search on all aggregators for a suitable aggregator for this port */
bond_for_each_slave(bond, slave, iter) {
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
- // keep a free aggregator for later use(if needed)
+ /* keep a free aggregator for later use(if needed) */
if (!aggregator->lag_ports) {
if (!free_aggregator)
free_aggregator = aggregator;
continue;
}
- // check if current aggregator suits us
- if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && // if all parameters match AND
- !MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(port->partner_oper.system)) &&
+ /* check if current aggregator suits us */
+ if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && /* if all parameters match AND */
+ MAC_ADDRESS_EQUAL(&(aggregator->partner_system), &(port->partner_oper.system)) &&
(aggregator->partner_system_priority == port->partner_oper.system_priority) &&
(aggregator->partner_oper_aggregator_key == port->partner_oper.key)
) &&
- ((MAC_ADDRESS_COMPARE(&(port->partner_oper.system), &(null_mac_addr)) && // partner answers
- !aggregator->is_individual) // but is not individual OR
+ ((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
+ !aggregator->is_individual) /* but is not individual OR */
)
) {
- // attach to the founded aggregator
+ /* attach to the founded aggregator */
port->aggregator = aggregator;
port->actor_port_aggregator_identifier =
port->aggregator->aggregator_identifier;
@@ -1297,23 +1321,26 @@ static void ad_port_selection_logic(struct port *port)
port->actor_port_number,
port->aggregator->aggregator_identifier);
- // mark this port as selected
+ /* mark this port as selected */
port->sm_vars |= AD_PORT_SELECTED;
found = 1;
break;
}
}
- // the port couldn't find an aggregator - attach it to a new aggregator
+ /* the port couldn't find an aggregator - attach it to a new
+ * aggregator
+ */
if (!found) {
if (free_aggregator) {
- // assign port a new aggregator
+ /* assign port a new aggregator */
port->aggregator = free_aggregator;
port->actor_port_aggregator_identifier =
port->aggregator->aggregator_identifier;
- // update the new aggregator's parameters
- // if port was responsed from the end-user
+ /* update the new aggregator's parameters
+ * if port was responsed from the end-user
+ */
if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)
/* if port is full duplex */
port->aggregator->is_individual = false;
@@ -1332,7 +1359,7 @@ static void ad_port_selection_logic(struct port *port)
port->aggregator->lag_ports = port;
port->aggregator->num_of_ports++;
- // mark this port as selected
+ /* mark this port as selected */
port->sm_vars |= AD_PORT_SELECTED;
pr_debug("Port %d joined LAG %d(new LAG)\n",
@@ -1344,23 +1371,24 @@ static void ad_port_selection_logic(struct port *port)
port->actor_port_number, port->slave->dev->name);
}
}
- // if all aggregator's ports are READY_N == TRUE, set ready=TRUE in all aggregator's ports
- // else set ready=FALSE in all aggregator's ports
- __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
+ /* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
+ * in all aggregator's ports, else set ready=FALSE in all
+ * aggregator's ports
+ */
+ __set_agg_ports_ready(port->aggregator,
+ __agg_ports_are_ready(port->aggregator));
aggregator = __get_first_agg(port);
ad_agg_selection_logic(aggregator);
}
-/*
- * Decide if "agg" is a better choice for the new active aggregator that
+/* Decide if "agg" is a better choice for the new active aggregator that
* the current best, according to the ad_select policy.
*/
static struct aggregator *ad_agg_selection_test(struct aggregator *best,
struct aggregator *curr)
{
- /*
- * 0. If no best, select current.
+ /* 0. If no best, select current.
*
* 1. If the current agg is not individual, and the best is
* individual, select current.
@@ -1416,9 +1444,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
break;
default:
- pr_warning("%s: Impossible agg select mode %d\n",
- curr->slave->bond->dev->name,
- __get_agg_selection_mode(curr->lag_ports));
+ pr_warn("%s: Impossible agg select mode %d\n",
+ curr->slave->bond->dev->name,
+ __get_agg_selection_mode(curr->lag_ports));
break;
}
@@ -1428,10 +1456,12 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
static int agg_device_up(const struct aggregator *agg)
{
struct port *port = agg->lag_ports;
+
if (!port)
return 0;
- return (netif_running(port->slave->dev) &&
- netif_carrier_ok(port->slave->dev));
+
+ return netif_running(port->slave->dev) &&
+ netif_carrier_ok(port->slave->dev);
}
/**
@@ -1467,11 +1497,12 @@ static void ad_agg_selection_logic(struct aggregator *agg)
struct slave *slave;
struct port *port;
+ rcu_read_lock();
origin = agg;
active = __get_active_agg(agg);
best = (active && agg_device_up(active)) ? active : NULL;
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
agg = &(SLAVE_AD_INFO(slave).aggregator);
agg->is_active = 0;
@@ -1482,8 +1513,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
if (best &&
__get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
- /*
- * For the STABLE policy, don't replace the old active
+ /* For the STABLE policy, don't replace the old active
* aggregator if it's still active (it has an answering
* partner) or if both the best and active don't have an
* answering partner.
@@ -1491,7 +1521,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
if (active && active->lag_ports &&
active->lag_ports->is_enabled &&
(__agg_has_partner(active) ||
- (!__agg_has_partner(active) && !__agg_has_partner(best)))) {
+ (!__agg_has_partner(active) &&
+ !__agg_has_partner(best)))) {
if (!(!active->actor_oper_aggregator_key &&
best->actor_oper_aggregator_key)) {
best = NULL;
@@ -1505,7 +1536,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
active->is_active = 1;
}
- // if there is new best aggregator, activate it
+ /* if there is new best aggregator, activate it */
if (best) {
pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
best->aggregator_identifier, best->num_of_ports,
@@ -1516,7 +1547,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->lag_ports, best->slave,
best->slave ? best->slave->dev->name : "NULL");
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
agg = &(SLAVE_AD_INFO(slave).aggregator);
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
@@ -1526,10 +1557,11 @@ static void ad_agg_selection_logic(struct aggregator *agg)
agg->is_individual, agg->is_active);
}
- // check if any partner replys
+ /* check if any partner replys */
if (best->is_individual) {
- pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave ? best->slave->bond->dev->name : "NULL");
+ pr_warn("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
+ best->slave ?
+ best->slave->bond->dev->name : "NULL");
}
best->is_active = 1;
@@ -1541,7 +1573,9 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->partner_oper_aggregator_key,
best->is_individual, best->is_active);
- // disable the ports that were related to the former active_aggregator
+ /* disable the ports that were related to the former
+ * active_aggregator
+ */
if (active) {
for (port = active->lag_ports; port;
port = port->next_port_in_aggregator) {
@@ -1550,8 +1584,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
}
}
- /*
- * if the selected aggregator is of join individuals
+ /* if the selected aggregator is of join individuals
* (partner_system is NULL), enable their ports
*/
active = __get_active_agg(origin);
@@ -1565,13 +1598,14 @@ static void ad_agg_selection_logic(struct aggregator *agg)
}
}
+ rcu_read_unlock();
+
bond_3ad_set_carrier(bond);
}
/**
* ad_clear_agg - clear a given aggregator's parameters
* @aggregator: the aggregator we're looking at
- *
*/
static void ad_clear_agg(struct aggregator *aggregator)
{
@@ -1595,7 +1629,6 @@ static void ad_clear_agg(struct aggregator *aggregator)
/**
* ad_initialize_agg - initialize a given aggregator's parameters
* @aggregator: the aggregator we're looking at
- *
*/
static void ad_initialize_agg(struct aggregator *aggregator)
{
@@ -1612,7 +1645,6 @@ static void ad_initialize_agg(struct aggregator *aggregator)
* ad_initialize_port - initialize a given port's parameters
* @aggregator: the aggregator we're looking at
* @lacp_fast: boolean. whether fast periodic should be used
- *
*/
static void ad_initialize_port(struct port *port, int lacp_fast)
{
@@ -1644,8 +1676,10 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->ntt = false;
port->actor_admin_port_key = 1;
port->actor_oper_port_key = 1;
- port->actor_admin_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY;
- port->actor_oper_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY;
+ port->actor_admin_port_state = AD_STATE_AGGREGATION |
+ AD_STATE_LACP_ACTIVITY;
+ port->actor_oper_port_state = AD_STATE_AGGREGATION |
+ AD_STATE_LACP_ACTIVITY;
if (lacp_fast)
port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
@@ -1654,7 +1688,7 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
memcpy(&port->partner_oper, &tmpl, sizeof(tmpl));
port->is_enabled = true;
- // ****** private parameters ******
+ /* private parameters */
port->sm_vars = 0x3;
port->sm_rx_state = 0;
port->sm_rx_timer_counter = 0;
@@ -1692,11 +1726,12 @@ static void ad_enable_collecting_distributing(struct port *port)
/**
* ad_disable_collecting_distributing - disable a port's transmit/receive
* @port: the port we're looking at
- *
*/
static void ad_disable_collecting_distributing(struct port *port)
{
- if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) {
+ if (port->aggregator &&
+ !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system),
+ &(null_mac_addr))) {
pr_debug("Disabling port %d(LAG %d)\n",
port->actor_port_number,
port->aggregator->aggregator_identifier);
@@ -1704,66 +1739,22 @@ static void ad_disable_collecting_distributing(struct port *port)
}
}
-#if 0
-/**
- * ad_marker_info_send - send a marker information frame
- * @port: the port we're looking at
- *
- * This function does nothing since we decided not to implement send and handle
- * response for marker PDU's, in this stage, but only to respond to marker
- * information.
- */
-static void ad_marker_info_send(struct port *port)
-{
- struct bond_marker marker;
- u16 index;
-
- // fill the marker PDU with the appropriate values
- marker.subtype = 0x02;
- marker.version_number = 0x01;
- marker.tlv_type = AD_MARKER_INFORMATION_SUBTYPE;
- marker.marker_length = 0x16;
- // convert requester_port to Big Endian
- marker.requester_port = (((port->actor_port_number & 0xFF) << 8) |((u16)(port->actor_port_number & 0xFF00) >> 8));
- marker.requester_system = port->actor_system;
- // convert requester_port(u32) to Big Endian
- marker.requester_transaction_id =
- (((++port->transaction_id & 0xFF) << 24)
- | ((port->transaction_id & 0xFF00) << 8)
- | ((port->transaction_id & 0xFF0000) >> 8)
- | ((port->transaction_id & 0xFF000000) >> 24));
- marker.pad = 0;
- marker.tlv_type_terminator = 0x00;
- marker.terminator_length = 0x00;
- for (index = 0; index < 90; index++)
- marker.reserved_90[index] = 0;
-
- // send the marker information
- if (ad_marker_send(port, &marker) >= 0) {
- pr_debug("Sent Marker Information on port %d\n",
- port->actor_port_number);
- }
-}
-#endif
-
/**
* ad_marker_info_received - handle receive of a Marker information frame
* @marker_info: Marker info received
* @port: the port we're looking at
- *
*/
static void ad_marker_info_received(struct bond_marker *marker_info,
struct port *port)
{
struct bond_marker marker;
- // copy the received marker data to the response marker
- //marker = *marker_info;
+ /* copy the received marker data to the response marker */
memcpy(&marker, marker_info, sizeof(struct bond_marker));
- // change the marker subtype to marker response
+ /* change the marker subtype to marker response */
marker.tlv_type = AD_MARKER_RESPONSE_SUBTYPE;
- // send the marker response
+ /* send the marker response */
if (ad_marker_send(port, &marker) >= 0) {
pr_debug("Sent Marker Response on port %d\n",
port->actor_port_number);
@@ -1780,22 +1771,21 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
* information.
*/
static void ad_marker_response_received(struct bond_marker *marker,
- struct port *port)
+ struct port *port)
{
- marker = NULL; /* just to satisfy the compiler */
- port = NULL; /* just to satisfy the compiler */
- // DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW
+ marker = NULL;
+ port = NULL;
+ /* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
}
-//////////////////////////////////////////////////////////////////////////////////////
-// ================= AD exported functions to the main bonding code ==================
-//////////////////////////////////////////////////////////////////////////////////////
+/* ========= AD exported functions to the main bonding code ========= */
-// Check aggregators status in team every T seconds
+/* Check aggregators status in team every T seconds */
#define AD_AGGREGATOR_SELECTION_TIMER 8
-/*
- * bond_3ad_initiate_agg_selection(struct bonding *bond)
+/**
+ * bond_3ad_initiate_agg_selection - initate aggregator selection
+ * @bond: bonding struct
*
* Set the aggregation selection timer, to initiate an agg selection in
* the very near future. Called during first initialization, and during
@@ -1806,8 +1796,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
BOND_AD_INFO(bond).agg_select_timer = timeout;
}
-static u16 aggregator_identifier;
-
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
@@ -1817,16 +1805,18 @@ static u16 aggregator_identifier;
*/
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
{
- // check that the bond is not initialized yet
- if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
+ /* check that the bond is not initialized yet */
+ if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
bond->dev->dev_addr)) {
- aggregator_identifier = 0;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
- // initialize how many times this module is called in one second(should be about every 100ms)
+ /* initialize how many times this module is called in one
+ * second (should be about every 100ms)
+ */
ad_ticks_per_sec = tick_resolution;
bond_3ad_initiate_agg_selection(bond,
@@ -1842,22 +1832,16 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
* Returns: 0 on success
* < 0 on error
*/
-int bond_3ad_bind_slave(struct slave *slave)
+void bond_3ad_bind_slave(struct slave *slave)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
struct port *port;
struct aggregator *aggregator;
- if (bond == NULL) {
- pr_err("%s: The slave %s is not attached to its bond\n",
- slave->bond->dev->name, slave->dev->name);
- return -1;
- }
-
- //check that the slave has not been initialized yet.
+ /* check that the slave has not been initialized yet. */
if (SLAVE_AD_INFO(slave).port.slave != slave) {
- // port initialization
+ /* port initialization */
port = &(SLAVE_AD_INFO(slave).port);
ad_initialize_port(port, bond->params.lacp_fast);
@@ -1865,40 +1849,40 @@ int bond_3ad_bind_slave(struct slave *slave)
__initialize_port_locks(slave);
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave).id;
- // key is determined according to the link speed, duplex and user key(which is yet not supported)
- // ------------------------------------------------------------
- // Port key : | User key | Speed |Duplex|
- // ------------------------------------------------------------
- // 16 6 1 0
- port->actor_admin_port_key = 0; // initialize this parameter
+ /* key is determined according to the link speed, duplex and user key(which
+ * is yet not supported)
+ */
+ port->actor_admin_port_key = 0;
port->actor_admin_port_key |= __get_duplex(port);
port->actor_admin_port_key |= (__get_link_speed(port) << 1);
port->actor_oper_port_key = port->actor_admin_port_key;
- // if the port is not full duplex, then the port should be not lacp Enabled
+ /* if the port is not full duplex, then the port should be not
+ * lacp Enabled
+ */
if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
port->sm_vars &= ~AD_PORT_LACP_ENABLED;
- // actor system is the bond's system
+ /* actor system is the bond's system */
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
- // tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second)
+ /* tx timer(to verify that no more than MAX_TX_IN_SECOND
+ * lacpdu's are sent in one second)
+ */
port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
port->aggregator = NULL;
port->next_port_in_aggregator = NULL;
__disable_port(port);
- // aggregator initialization
+ /* aggregator initialization */
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
ad_initialize_agg(aggregator);
aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
- aggregator->aggregator_identifier = (++aggregator_identifier);
+ aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
aggregator->slave = slave;
aggregator->is_active = 0;
aggregator->num_of_ports = 0;
}
-
- return 0;
}
/**
@@ -1918,16 +1902,13 @@ void bond_3ad_unbind_slave(struct slave *slave)
struct slave *slave_iter;
struct list_head *iter;
- // find the aggregator related to this slave
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
-
- // find the port related to this slave
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
- pr_warning("Warning: %s: Trying to unbind an uninitialized port on %s\n",
- slave->bond->dev->name, slave->dev->name);
+ pr_warn("Warning: %s: Trying to unbind an uninitialized port on %s\n",
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -1939,34 +1920,42 @@ void bond_3ad_unbind_slave(struct slave *slave)
__update_lacpdu_from_port(port);
ad_lacpdu_send(port);
- // check if this aggregator is occupied
+ /* check if this aggregator is occupied */
if (aggregator->lag_ports) {
- // check if there are other ports related to this aggregator except
- // the port related to this slave(thats ensure us that there is a
- // reason to search for new aggregator, and that we will find one
- if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
- // find new aggregator for the related port(s)
+ /* check if there are other ports related to this aggregator
+ * except the port related to this slave(thats ensure us that
+ * there is a reason to search for new aggregator, and that we
+ * will find one
+ */
+ if ((aggregator->lag_ports != port) ||
+ (aggregator->lag_ports->next_port_in_aggregator)) {
+ /* find new aggregator for the related port(s) */
bond_for_each_slave(bond, slave_iter, iter) {
new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
- // if the new aggregator is empty, or it is connected to our port only
- if (!new_aggregator->lag_ports
- || ((new_aggregator->lag_ports == port)
- && !new_aggregator->lag_ports->next_port_in_aggregator))
+ /* if the new aggregator is empty, or it is
+ * connected to our port only
+ */
+ if (!new_aggregator->lag_ports ||
+ ((new_aggregator->lag_ports == port) &&
+ !new_aggregator->lag_ports->next_port_in_aggregator))
break;
}
if (!slave_iter)
new_aggregator = NULL;
- // if new aggregator found, copy the aggregator's parameters
- // and connect the related lag_ports to the new aggregator
+
+ /* if new aggregator found, copy the aggregator's
+ * parameters and connect the related lag_ports to the
+ * new aggregator
+ */
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n",
aggregator->aggregator_identifier,
new_aggregator->aggregator_identifier);
- if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
+ if ((new_aggregator->lag_ports == port) &&
+ new_aggregator->is_active) {
pr_info("%s: Removing an active aggregator\n",
aggregator->slave->bond->dev->name);
- // select new active aggregator
select_new_active_agg = 1;
}
@@ -1982,30 +1971,33 @@ void bond_3ad_unbind_slave(struct slave *slave)
new_aggregator->is_active = aggregator->is_active;
new_aggregator->num_of_ports = aggregator->num_of_ports;
- // update the information that is written on the ports about the aggregator
+ /* update the information that is written on
+ * the ports about the aggregator
+ */
for (temp_port = aggregator->lag_ports; temp_port;
temp_port = temp_port->next_port_in_aggregator) {
temp_port->aggregator = new_aggregator;
temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier;
}
- // clear the aggregator
ad_clear_agg(aggregator);
if (select_new_active_agg)
ad_agg_selection_logic(__get_first_agg(port));
} else {
- pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
- slave->bond->dev->name);
+ pr_warn("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
+ slave->bond->dev->name);
}
- } else { // in case that the only port related to this aggregator is the one we want to remove
+ } else {
+ /* in case that the only port related to this
+ * aggregator is the one we want to remove
+ */
select_new_active_agg = aggregator->is_active;
- // clear the aggregator
ad_clear_agg(aggregator);
if (select_new_active_agg) {
pr_info("%s: Removing an active aggregator\n",
slave->bond->dev->name);
- // select new active aggregator
+ /* select new active aggregator */
temp_aggregator = __get_first_agg(port);
if (temp_aggregator)
ad_agg_selection_logic(temp_aggregator);
@@ -2014,15 +2006,19 @@ void bond_3ad_unbind_slave(struct slave *slave)
}
pr_debug("Unbinding port %d\n", port->actor_port_number);
- // find the aggregator that this port is connected to
+
+ /* find the aggregator that this port is connected to */
bond_for_each_slave(bond, slave_iter, iter) {
temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
prev_port = NULL;
- // search the port in the aggregator's related ports
+ /* search the port in the aggregator's related ports */
for (temp_port = temp_aggregator->lag_ports; temp_port;
prev_port = temp_port,
- temp_port = temp_port->next_port_in_aggregator) {
- if (temp_port == port) { // the aggregator found - detach the port from this aggregator
+ temp_port = temp_port->next_port_in_aggregator) {
+ if (temp_port == port) {
+ /* the aggregator found - detach the port from
+ * this aggregator
+ */
if (prev_port)
prev_port->next_port_in_aggregator = temp_port->next_port_in_aggregator;
else
@@ -2030,12 +2026,11 @@ void bond_3ad_unbind_slave(struct slave *slave)
temp_aggregator->num_of_ports--;
if (temp_aggregator->num_of_ports == 0) {
select_new_active_agg = temp_aggregator->is_active;
- // clear the aggregator
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
pr_info("%s: Removing an active aggregator\n",
slave->bond->dev->name);
- // select new active aggregator
+ /* select new active aggregator */
ad_agg_selection_logic(__get_first_agg(port));
}
}
@@ -2067,23 +2062,26 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
struct list_head *iter;
struct slave *slave;
struct port *port;
+ bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
read_lock(&bond->lock);
+ rcu_read_lock();
- //check if there are any slaves
+ /* check if there are any slaves */
if (!bond_has_slaves(bond))
goto re_arm;
- // check if agg_select_timer timer after initialize is timed out
- if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
- slave = bond_first_slave(bond);
+ /* check if agg_select_timer timer after initialize is timed out */
+ if (BOND_AD_INFO(bond).agg_select_timer &&
+ !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
- // select the active aggregator for the bond
+ /* select the active aggregator for the bond */
if (port) {
if (!port->slave) {
- pr_warning("%s: Warning: bond's first port is uninitialized\n",
- bond->dev->name);
+ pr_warn("%s: Warning: bond's first port is uninitialized\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2093,12 +2091,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
bond_3ad_set_carrier(bond);
}
- // for each port run the state machines
- bond_for_each_slave(bond, slave, iter) {
+ /* for each port run the state machines */
+ bond_for_each_slave_rcu(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
- pr_warning("%s: Warning: Found an uninitialized port\n",
- bond->dev->name);
+ pr_warn("%s: Warning: Found an uninitialized port\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2114,7 +2112,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
ad_mux_machine(port);
ad_tx_machine(port);
- // turn off the BEGIN bit, since we already handled it
+ /* turn off the BEGIN bit, since we already handled it */
if (port->sm_vars & AD_PORT_BEGIN)
port->sm_vars &= ~AD_PORT_BEGIN;
@@ -2122,9 +2120,20 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
re_arm:
- queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
-
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (slave->should_notify) {
+ should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
+ break;
+ }
+ }
+ rcu_read_unlock();
read_unlock(&bond->lock);
+
+ if (should_notify_rtnl && rtnl_trylock()) {
+ bond_slave_state_notify(bond);
+ rtnl_unlock();
+ }
+ queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
}
/**
@@ -2137,7 +2146,8 @@ re_arm:
* received frames (loopback). Since only the payload is given to this
* function, it check for loopback.
*/
-static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
+static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
+ u16 length)
{
struct port *port;
int ret = RX_HANDLER_ANOTHER;
@@ -2147,8 +2157,8 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u1
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
- pr_warning("%s: Warning: port of slave %s is uninitialized\n",
- slave->dev->name, slave->bond->dev->name);
+ pr_warn("%s: Warning: port of slave %s is uninitialized\n",
+ slave->dev->name, slave->bond->dev->name);
return ret;
}
@@ -2165,7 +2175,9 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u1
case AD_TYPE_MARKER:
ret = RX_HANDLER_CONSUMED;
- // No need to convert fields to Little Endian since we don't use the marker's fields.
+ /* No need to convert fields to Little Endian since we
+ * don't use the marker's fields.
+ */
switch (((struct bond_marker *)lacpdu)->tlv_type) {
case AD_MARKER_INFORMATION_SUBTYPE:
@@ -2203,8 +2215,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
- pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
- slave->bond->dev->name, slave->dev->name);
+ pr_warn("Warning: %s: speed changed for uninitialized port on %s\n",
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -2236,8 +2248,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
- pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
- slave->bond->dev->name, slave->dev->name);
+ pr_warn("%s: Warning: duplex changed for uninitialized port on %s\n",
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -2270,8 +2282,8 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
- pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
- slave->bond->dev->name, slave->dev->name);
+ pr_warn("Warning: %s: link status changed for uninitialized port on %s\n",
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -2309,10 +2321,13 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
__release_state_machine_lock(port);
}
-/*
- * set link state for bonding master: if we have an active
- * aggregator, we're up, if not, we're down. Presumes that we cannot
- * have an active aggregator if there are no slaves with link up.
+/**
+ * bond_3ad_set_carrier - set link state for bonding master
+ * @bond - bonding structure
+ *
+ * if we have an active aggregator, we're up, if not, we're down.
+ * Presumes that we cannot have an active aggregator if there are
+ * no slaves with link up.
*
* This behavior complies with IEEE 802.3 section 43.3.9.
*
@@ -2323,30 +2338,32 @@ int bond_3ad_set_carrier(struct bonding *bond)
{
struct aggregator *active;
struct slave *first_slave;
+ int ret = 1;
- first_slave = bond_first_slave(bond);
- if (!first_slave)
- return 0;
+ rcu_read_lock();
+ first_slave = bond_first_slave_rcu(bond);
+ if (!first_slave) {
+ ret = 0;
+ goto out;
+ }
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
if (active) {
/* are enough slaves available to consider link up? */
if (active->num_of_ports < bond->params.min_links) {
if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
- return 1;
+ goto out;
}
} else if (!netif_carrier_ok(bond->dev)) {
netif_carrier_on(bond->dev);
- return 1;
+ goto out;
}
- return 0;
- }
-
- if (netif_carrier_ok(bond->dev)) {
+ } else if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
- return 1;
}
- return 0;
+out:
+ rcu_read_unlock();
+ return ret;
}
/**
@@ -2378,7 +2395,8 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
ad_info->ports = aggregator->num_of_ports;
ad_info->actor_key = aggregator->actor_oper_aggregator_key;
ad_info->partner_key = aggregator->partner_oper_aggregator_key;
- memcpy(ad_info->partner_system, aggregator->partner_system.mac_addr_value, ETH_ALEN);
+ memcpy(ad_info->partner_system,
+ aggregator->partner_system.mac_addr_value, ETH_ALEN);
return 0;
}
@@ -2406,13 +2424,12 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
struct list_head *iter;
int slaves_in_agg;
int slave_agg_no;
- int res = 1;
int agg_id;
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
dev->name);
- goto out;
+ goto err_free;
}
slaves_in_agg = ad_info.ports;
@@ -2420,7 +2437,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
if (slaves_in_agg == 0) {
pr_debug("%s: Error: active aggregator is empty\n", dev->name);
- goto out;
+ goto err_free;
}
slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
@@ -2439,7 +2456,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
}
if (SLAVE_IS_OK(slave)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
+ bond_dev_queue_xmit(bond, skb, slave->dev);
goto out;
}
}
@@ -2447,21 +2464,23 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
if (slave_agg_no >= 0) {
pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
dev->name, agg_id);
- goto out;
+ goto err_free;
}
/* we couldn't find any suitable slave after the agg_no, so use the
- * first suitable found, if found. */
+ * first suitable found, if found.
+ */
if (first_ok_slave)
- res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
+ bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
+ else
+ goto err_free;
out:
- if (res) {
- /* no suitable interface, frame not sent */
- kfree_skb(skb);
- }
-
return NETDEV_TX_OK;
+err_free:
+ /* no suitable interface, frame not sent */
+ kfree_skb(skb);
+ goto out;
}
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
@@ -2483,7 +2502,10 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
return ret;
}
-/*
+/**
+ * bond_3ad_update_lacp_rate - change the lacp rate
+ * @bond - bonding struct
+ *
* When modify lacp_rate parameter via sysfs,
* update actor_oper_port_state of each port.
*
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 5d91ad0cc041..f4dd9592ac62 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -253,6 +253,7 @@ struct ad_system {
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
+ u16 aggregator_identifier;
};
struct ad_slave_info {
@@ -265,7 +266,7 @@ struct ad_slave_info {
// ================= AD Exported functions to the main bonding code ==================
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
-int bond_3ad_bind_slave(struct slave *slave);
+void bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct work_struct *);
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 02872405d35d..e8f133e926aa 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -12,8 +12,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -470,7 +469,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
/* slave being removed should not be active at this point
*
- * Caller must hold bond lock for read
+ * Caller must hold rtnl.
*/
static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
{
@@ -731,7 +730,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
client_info->ntt = 0;
}
- if (!vlan_get_tag(skb, &client_info->vlan_id))
+ if (vlan_get_tag(skb, &client_info->vlan_id))
client_info->vlan_id = 0;
if (!client_info->assigned) {
@@ -816,7 +815,7 @@ static void rlb_rebalance(struct bonding *bond)
for (; hash_index != RLB_NULL_INDEX;
hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
- assigned_slave = rlb_next_rx_slave(bond);
+ assigned_slave = __rlb_next_rx_slave(bond);
if (assigned_slave && (client_info->slave != assigned_slave)) {
client_info->slave = assigned_slave;
client_info->ntt = 1;
@@ -1372,7 +1371,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
int do_tx_balance = 1;
u32 hash_index = 0;
const u8 *hash_start = NULL;
- int res = 1;
struct ipv6hdr *ip6hdr;
skb_reset_mac_header(skb);
@@ -1470,7 +1468,8 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
ETH_ALEN);
}
- res = bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+ bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+ goto out;
} else {
if (tx_slave) {
_lock_tx_hashtbl(bond);
@@ -1479,11 +1478,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
}
- if (res) {
- /* no suitable interface, frame not sent */
- kfree_skb(skb);
- }
-
+ /* no suitable interface, frame not sent */
+ kfree_skb(skb);
+out:
return NETDEV_TX_OK;
}
@@ -1495,14 +1492,14 @@ void bond_alb_monitor(struct work_struct *work)
struct list_head *iter;
struct slave *slave;
- read_lock(&bond->lock);
-
if (!bond_has_slaves(bond)) {
bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0;
goto re_arm;
}
+ rcu_read_lock();
+
bond_info->tx_rebalance_counter++;
bond_info->lp_counter++;
@@ -1515,7 +1512,7 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, iter)
+ bond_for_each_slave_rcu(bond, slave, iter)
alb_send_learning_packets(slave, slave->dev->dev_addr);
read_unlock(&bond->curr_slave_lock);
@@ -1528,7 +1525,7 @@ void bond_alb_monitor(struct work_struct *work)
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == bond->curr_active_slave) {
SLAVE_TLB_INFO(slave).load =
@@ -1552,11 +1549,9 @@ void bond_alb_monitor(struct work_struct *work)
* dev_set_promiscuity requires rtnl and
* nothing else. Avoid race with bond_close.
*/
- read_unlock(&bond->lock);
- if (!rtnl_trylock()) {
- read_lock(&bond->lock);
+ rcu_read_unlock();
+ if (!rtnl_trylock())
goto re_arm;
- }
bond_info->rlb_promisc_timeout_counter = 0;
@@ -1568,7 +1563,7 @@ void bond_alb_monitor(struct work_struct *work)
bond_info->primary_is_promisc = 0;
rtnl_unlock();
- read_lock(&bond->lock);
+ rcu_read_lock();
}
if (bond_info->rlb_rebalance) {
@@ -1590,11 +1585,9 @@ void bond_alb_monitor(struct work_struct *work)
}
}
}
-
+ rcu_read_unlock();
re_arm:
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
-
- read_unlock(&bond->lock);
}
/* assumption: called before the slave is attached to the bond
@@ -1680,14 +1673,11 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
* If new_slave is NULL, caller must hold curr_slave_lock or
* bond->lock for write.
*
- * If new_slave is not NULL, caller must hold RTNL, bond->lock for
- * read and curr_slave_lock for write. Processing here may sleep, so
- * no other locks may be held.
+ * If new_slave is not NULL, caller must hold RTNL, curr_slave_lock
+ * for write. Processing here may sleep, so no other locks may be held.
*/
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
__releases(&bond->curr_slave_lock)
- __releases(&bond->lock)
- __acquires(&bond->lock)
__acquires(&bond->curr_slave_lock)
{
struct slave *swap_slave;
@@ -1723,7 +1713,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
tlb_clear_slave(bond, new_slave, 1);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
ASSERT_RTNL();
@@ -1749,11 +1738,9 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
/* swap mac address */
alb_swap_mac_addr(swap_slave, new_slave);
alb_fasten_mac_swap(bond, swap_slave, new_slave);
- read_lock(&bond->lock);
} else {
/* set the new_slave to the bond mac address */
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
- read_lock(&bond->lock);
alb_send_learning_packets(new_slave, bond->dev->dev_addr);
}
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 4226044efd08..e09dd4bfafff 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -12,8 +12,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6191b551a0e8..e5628fc725c3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -79,7 +79,6 @@
#include <net/pkt_sched.h>
#include <linux/rculist.h>
#include <net/flow_keys.h>
-#include <linux/reciprocal_div.h>
#include "bonding.h"
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -87,13 +86,11 @@
/*---------------------------- Module parameters ----------------------------*/
/* monitor all links that often (in milliseconds). <=0 disables monitoring */
-#define BOND_LINK_MON_INTERV 0
-#define BOND_LINK_ARP_INTERV 0
static int max_bonds = BOND_DEFAULT_MAX_BONDS;
static int tx_queues = BOND_DEFAULT_TX_QUEUES;
static int num_peer_notif = 1;
-static int miimon = BOND_LINK_MON_INTERV;
+static int miimon;
static int updelay;
static int downdelay;
static int use_carrier = 1;
@@ -104,7 +101,7 @@ static char *lacp_rate;
static int min_links;
static char *ad_select;
static char *xmit_hash_policy;
-static int arp_interval = BOND_LINK_ARP_INTERV;
+static int arp_interval;
static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
static char *arp_all_targets;
@@ -113,6 +110,7 @@ static int all_slaves_active;
static struct bond_params bonding_defaults;
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
static int packets_per_slave = 1;
+static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -189,6 +187,10 @@ module_param(packets_per_slave, int, 0);
MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
"mode; 0 for a random slave, 1 packet per "
"slave (default), >1 packets per slave.");
+module_param(lp_interval, uint, 0);
+MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
+ "the bonding driver sends learning packets to "
+ "each slaves peer switch. The default is 1.");
/*----------------------------- Global variables ----------------------------*/
@@ -204,67 +206,6 @@ static int bond_mode = BOND_MODE_ROUNDROBIN;
static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
static int lacp_fast;
-const struct bond_parm_tbl bond_lacp_tbl[] = {
-{ "slow", AD_LACP_SLOW},
-{ "fast", AD_LACP_FAST},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl bond_mode_tbl[] = {
-{ "balance-rr", BOND_MODE_ROUNDROBIN},
-{ "active-backup", BOND_MODE_ACTIVEBACKUP},
-{ "balance-xor", BOND_MODE_XOR},
-{ "broadcast", BOND_MODE_BROADCAST},
-{ "802.3ad", BOND_MODE_8023AD},
-{ "balance-tlb", BOND_MODE_TLB},
-{ "balance-alb", BOND_MODE_ALB},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl xmit_hashtype_tbl[] = {
-{ "layer2", BOND_XMIT_POLICY_LAYER2},
-{ "layer3+4", BOND_XMIT_POLICY_LAYER34},
-{ "layer2+3", BOND_XMIT_POLICY_LAYER23},
-{ "encap2+3", BOND_XMIT_POLICY_ENCAP23},
-{ "encap3+4", BOND_XMIT_POLICY_ENCAP34},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl arp_all_targets_tbl[] = {
-{ "any", BOND_ARP_TARGETS_ANY},
-{ "all", BOND_ARP_TARGETS_ALL},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl arp_validate_tbl[] = {
-{ "none", BOND_ARP_VALIDATE_NONE},
-{ "active", BOND_ARP_VALIDATE_ACTIVE},
-{ "backup", BOND_ARP_VALIDATE_BACKUP},
-{ "all", BOND_ARP_VALIDATE_ALL},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl fail_over_mac_tbl[] = {
-{ "none", BOND_FOM_NONE},
-{ "active", BOND_FOM_ACTIVE},
-{ "follow", BOND_FOM_FOLLOW},
-{ NULL, -1},
-};
-
-const struct bond_parm_tbl pri_reselect_tbl[] = {
-{ "always", BOND_PRI_RESELECT_ALWAYS},
-{ "better", BOND_PRI_RESELECT_BETTER},
-{ "failure", BOND_PRI_RESELECT_FAILURE},
-{ NULL, -1},
-};
-
-struct bond_parm_tbl ad_select_tbl[] = {
-{ "stable", BOND_AD_STABLE},
-{ "bandwidth", BOND_AD_BANDWIDTH},
-{ "count", BOND_AD_COUNT},
-{ NULL, -1},
-};
-
/*-------------------------- Forward declarations ---------------------------*/
static int bond_init(struct net_device *bond_dev);
@@ -299,7 +240,7 @@ const char *bond_mode_name(int mode)
* @skb: hw accel VLAN tagged skb to transmit
* @slave_dev: slave that is supposed to xmit this skbuff
*/
-int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
+void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
skb->dev = slave_dev;
@@ -312,8 +253,6 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
else
dev_queue_xmit(skb);
-
- return 0;
}
/*
@@ -463,6 +402,22 @@ static void bond_update_speed_duplex(struct slave *slave)
return;
}
+const char *bond_slave_link_status(s8 link)
+{
+ switch (link) {
+ case BOND_LINK_UP:
+ return "up";
+ case BOND_LINK_FAIL:
+ return "going down";
+ case BOND_LINK_DOWN:
+ return "down";
+ case BOND_LINK_BACK:
+ return "going back";
+ default:
+ return "unknown";
+ }
+}
+
/*
* if <dev> supports MII link status reporting, check its link status.
*
@@ -591,33 +546,22 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
* device and retransmit an IGMP JOIN request to the current active
* slave.
*/
-static void bond_resend_igmp_join_requests(struct bonding *bond)
+static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
{
+ struct bonding *bond = container_of(work, struct bonding,
+ mcast_work.work);
+
if (!rtnl_trylock()) {
queue_delayed_work(bond->wq, &bond->mcast_work, 1);
return;
}
call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
- rtnl_unlock();
- /* We use curr_slave_lock to protect against concurrent access to
- * igmp_retrans from multiple running instances of this function and
- * bond_change_active_slave
- */
- write_lock_bh(&bond->curr_slave_lock);
if (bond->igmp_retrans > 1) {
bond->igmp_retrans--;
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
}
- write_unlock_bh(&bond->curr_slave_lock);
-}
-
-static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
-{
- struct bonding *bond = container_of(work, struct bonding,
- mcast_work.work);
-
- bond_resend_igmp_join_requests(bond);
+ rtnl_unlock();
}
/* Flush bond's hardware addresses from slave
@@ -697,14 +641,12 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
*
* Perform special MAC address swapping for fail_over_mac settings
*
- * Called with RTNL, bond->lock for read, curr_slave_lock for write_bh.
+ * Called with RTNL, curr_slave_lock for write_bh.
*/
static void bond_do_fail_over_mac(struct bonding *bond,
struct slave *new_active,
struct slave *old_active)
__releases(&bond->curr_slave_lock)
- __releases(&bond->lock)
- __acquires(&bond->lock)
__acquires(&bond->curr_slave_lock)
{
u8 tmp_mac[ETH_ALEN];
@@ -715,9 +657,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
case BOND_FOM_ACTIVE:
if (new_active) {
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
bond_set_dev_addr(bond->dev, new_active->dev);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
}
break;
@@ -731,7 +671,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
return;
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
if (old_active) {
memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
@@ -761,7 +700,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
pr_err("%s: Error %d setting MAC of slave %s\n",
bond->dev->name, -rv, new_active->dev->name);
out:
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
break;
default:
@@ -821,7 +759,11 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
static bool bond_should_notify_peers(struct bonding *bond)
{
- struct slave *slave = bond->curr_active_slave;
+ struct slave *slave;
+
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ rcu_read_unlock();
pr_debug("bond_should_notify_peers: bond %s slave %s\n",
bond->dev->name, slave ? slave->dev->name : "NULL");
@@ -846,8 +788,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
* because it is apparently the best available slave we have, even though its
* updelay hasn't timed out yet.
*
- * If new_active is not NULL, caller must hold bond->lock for read and
- * curr_slave_lock for write_bh.
+ * If new_active is not NULL, caller must hold curr_slave_lock for write_bh.
*/
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
{
@@ -888,21 +829,25 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
if (bond_is_lb(bond)) {
bond_alb_handle_active_change(bond, new_active);
if (old_active)
- bond_set_slave_inactive_flags(old_active);
+ bond_set_slave_inactive_flags(old_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (new_active)
- bond_set_slave_active_flags(new_active);
+ bond_set_slave_active_flags(new_active,
+ BOND_SLAVE_NOTIFY_NOW);
} else {
rcu_assign_pointer(bond->curr_active_slave, new_active);
}
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
if (old_active)
- bond_set_slave_inactive_flags(old_active);
+ bond_set_slave_inactive_flags(old_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (new_active) {
bool should_notify_peers = false;
- bond_set_slave_active_flags(new_active);
+ bond_set_slave_active_flags(new_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (bond->params.fail_over_mac)
bond_do_fail_over_mac(bond, new_active,
@@ -916,14 +861,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
if (should_notify_peers)
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
}
}
@@ -949,7 +892,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
* - The primary_slave has got its link back.
* - A slave has got its link back and there's no old curr_active_slave.
*
- * Caller must hold bond->lock for read and curr_slave_lock for write_bh.
+ * Caller must hold curr_slave_lock for write_bh.
*/
void bond_select_active_slave(struct bonding *bond)
{
@@ -1254,6 +1197,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return -EBUSY;
}
+ if (bond_dev == slave_dev) {
+ pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name);
+ return -EPERM;
+ }
+
/* vlan challenged mutual exclusion */
/* no need to lock since we're protected by rtnl_lock */
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
@@ -1331,9 +1279,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_ops->ndo_set_mac_address == NULL) {
if (!bond_has_slaves(bond)) {
- pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
- bond_dev->name);
- bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+ pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
+ bond_dev->name);
+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+ bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+ pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
+ bond_dev->name);
+ }
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
bond_dev->name);
@@ -1376,7 +1328,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
*/
memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
- if (!bond->params.fail_over_mac) {
+ if (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/*
* Set slave to master's mac address. The application already
* set the master's mac address to that of the first slave
@@ -1519,14 +1472,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
switch (bond->params.mode) {
case BOND_MODE_ACTIVEBACKUP:
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave,
+ BOND_SLAVE_NOTIFY_NOW);
break;
case BOND_MODE_8023AD:
/* in 802.3ad mode, the internal mechanism
* will activate the slaves in the selected
* aggregator
*/
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
/* if this is the first slave */
if (!prev_slave) {
SLAVE_AD_INFO(new_slave).id = 1;
@@ -1544,7 +1498,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
case BOND_MODE_TLB:
case BOND_MODE_ALB:
bond_set_active_slave(new_slave);
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
break;
default:
pr_debug("This slave is always active in trunk mode\n");
@@ -1566,7 +1520,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
- read_unlock(&bond->lock);
pr_info("Error, %s: master_dev is using netpoll, "
"but new slave device does not support netpoll.\n",
bond_dev->name);
@@ -1589,16 +1542,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_unregister;
}
+ res = bond_sysfs_slave_add(new_slave);
+ if (res) {
+ pr_debug("Error %d calling bond_sysfs_slave_add\n", res);
+ goto err_upper_unlink;
+ }
+
bond->slave_cnt++;
bond_compute_features(bond);
bond_set_carrier(bond);
if (USES_PRIMARY(bond->params.mode)) {
- read_lock(&bond->lock);
+ block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
+ unblock_netpoll_tx();
}
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1610,6 +1569,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;
/* Undo stages on error */
+err_upper_unlink:
+ bond_upper_dev_unlink(bond_dev, slave_dev);
+
err_unregister:
netdev_rx_handler_unregister(slave_dev);
@@ -1618,19 +1580,15 @@ err_detach:
bond_hw_addr_flush(bond_dev, slave_dev);
vlan_vids_del_by_dev(slave_dev, bond_dev);
- write_lock_bh(&bond->lock);
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
if (bond->curr_active_slave == new_slave) {
- bond_change_active_slave(bond, NULL);
- write_unlock_bh(&bond->lock);
- read_lock(&bond->lock);
+ block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
+ bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- } else {
- write_unlock_bh(&bond->lock);
+ unblock_netpoll_tx();
}
slave_disable_netpoll(new_slave);
@@ -1639,7 +1597,8 @@ err_close:
dev_close(slave_dev);
err_restore_mac:
- if (!bond->params.fail_over_mac) {
+ if (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
@@ -1658,7 +1617,7 @@ err_free:
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
if (!bond_has_slaves(bond) &&
- ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
+ ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
eth_hw_addr_random(bond_dev);
return res;
@@ -1695,22 +1654,17 @@ static int __bond_release_one(struct net_device *bond_dev,
}
block_netpoll_tx();
- write_lock_bh(&bond->lock);
slave = bond_get_slave_by_dev(bond, slave_dev);
if (!slave) {
/* not a slave of this bond */
pr_info("%s: %s not enslaved\n",
bond_dev->name, slave_dev->name);
- write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
return -EINVAL;
}
- write_unlock_bh(&bond->lock);
-
- /* release the slave from its bond */
- bond->slave_cnt--;
+ bond_sysfs_slave_del(slave);
bond_upper_dev_unlink(bond_dev, slave_dev);
/* unregister rx_handler early so bond_handle_frame wouldn't be called
@@ -1720,12 +1674,10 @@ static int __bond_release_one(struct net_device *bond_dev,
write_lock_bh(&bond->lock);
/* Inform AD package of unbinding of slave. */
- if (bond->params.mode == BOND_MODE_8023AD) {
- /* must be called before the slave is
- * detached from the list
- */
+ if (bond->params.mode == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
- }
+
+ write_unlock_bh(&bond->lock);
pr_info("%s: releasing %s interface %s\n",
bond_dev->name,
@@ -1736,8 +1688,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->current_arp_slave = NULL;
- if (!all && !bond->params.fail_over_mac) {
- if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
+ if (!all && (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
+ if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
bond_dev->name, slave_dev->name,
@@ -1748,8 +1701,11 @@ static int __bond_release_one(struct net_device *bond_dev,
if (bond->primary_slave == slave)
bond->primary_slave = NULL;
- if (oldcurrent == slave)
+ if (oldcurrent == slave) {
+ write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, NULL);
+ write_unlock_bh(&bond->curr_slave_lock);
+ }
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
@@ -1757,9 +1713,7 @@ static int __bond_release_one(struct net_device *bond_dev,
* has been cleared (if our_slave == old_current),
* but before a new active slave is selected.
*/
- write_unlock_bh(&bond->lock);
bond_alb_deinit_slave(bond, slave);
- write_lock_bh(&bond->lock);
}
if (all) {
@@ -1770,15 +1724,11 @@ static int __bond_release_one(struct net_device *bond_dev,
* is no concern that another slave add/remove event
* will interfere.
*/
- write_unlock_bh(&bond->lock);
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- write_lock_bh(&bond->lock);
}
if (!bond_has_slaves(bond)) {
@@ -1793,9 +1743,9 @@ static int __bond_release_one(struct net_device *bond_dev,
}
}
- write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
synchronize_rcu();
+ bond->slave_cnt--;
if (!bond_has_slaves(bond)) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
@@ -1837,7 +1787,8 @@ static int __bond_release_one(struct net_device *bond_dev,
/* close slave before restoring its mac address */
dev_close(slave_dev);
- if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
+ if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
addr.sa_family = slave_dev->type;
@@ -1928,7 +1879,7 @@ static int bond_miimon_inspect(struct bonding *bond)
ignore_updelay = !bond->curr_active_slave ? true : false;
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2072,7 +2023,8 @@ static void bond_miimon_commit(struct bonding *bond)
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
bond->params.mode == BOND_MODE_8023AD)
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
@@ -2119,48 +2071,42 @@ do_failover:
* an acquisition of appropriate locks followed by a commit phase to
* implement whatever link state changes are indicated.
*/
-void bond_mii_monitor(struct work_struct *work)
+static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
bool should_notify_peers = false;
unsigned long delay;
- read_lock(&bond->lock);
-
delay = msecs_to_jiffies(bond->params.miimon);
if (!bond_has_slaves(bond))
goto re_arm;
+ rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
if (bond_miimon_inspect(bond)) {
- read_unlock(&bond->lock);
+ rcu_read_unlock();
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
- read_lock(&bond->lock);
delay = 1;
should_notify_peers = false;
goto re_arm;
}
- read_lock(&bond->lock);
-
bond_miimon_commit(bond);
- read_unlock(&bond->lock);
rtnl_unlock(); /* might sleep, hold no other locks */
- read_lock(&bond->lock);
- }
+ } else
+ rcu_read_unlock();
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
- read_unlock(&bond->lock);
-
if (should_notify_peers) {
if (!rtnl_trylock())
return;
@@ -2414,20 +2360,20 @@ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
* arp is transmitted to generate traffic. see activebackup_arp_monitor for
* arp monitoring in active backup mode.
*/
-void bond_loadbalance_arp_mon(struct work_struct *work)
+static void bond_loadbalance_arp_mon(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
struct slave *slave, *oldcurrent;
struct list_head *iter;
- int do_failover = 0;
-
- read_lock(&bond->lock);
+ int do_failover = 0, slave_state_changed = 0;
if (!bond_has_slaves(bond))
goto re_arm;
- oldcurrent = bond->curr_active_slave;
+ rcu_read_lock();
+
+ oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
* the picture unless it is null. also, slave->jiffies is not needed
@@ -2436,7 +2382,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
if (slave->link != BOND_LINK_UP) {
@@ -2444,7 +2390,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
slave->link = BOND_LINK_UP;
- bond_set_active_slave(slave);
+ slave_state_changed = 1;
/* primary_slave has no meaning in round-robin
* mode. the window of a slave being up and
@@ -2473,7 +2419,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
!bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
slave->link = BOND_LINK_DOWN;
- bond_set_backup_slave(slave);
+ slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
@@ -2498,22 +2444,33 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
bond_arp_send_all(bond, slave);
}
- if (do_failover) {
- block_netpoll_tx();
- write_lock_bh(&bond->curr_slave_lock);
+ rcu_read_unlock();
- bond_select_active_slave(bond);
+ if (do_failover || slave_state_changed) {
+ if (!rtnl_trylock())
+ goto re_arm;
- write_unlock_bh(&bond->curr_slave_lock);
- unblock_netpoll_tx();
+ if (slave_state_changed) {
+ bond_slave_state_change(bond);
+ } else if (do_failover) {
+ /* the bond_select_active_slave must hold RTNL
+ * and curr_slave_lock for write.
+ */
+ block_netpoll_tx();
+ write_lock_bh(&bond->curr_slave_lock);
+
+ bond_select_active_slave(bond);
+
+ write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
+ }
+ rtnl_unlock();
}
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work,
msecs_to_jiffies(bond->params.arp_interval));
-
- read_unlock(&bond->lock);
}
/*
@@ -2522,7 +2479,7 @@ re_arm:
* place for the slave. Returns 0 if no changes are found, >0 if changes
* to link states must be committed.
*
- * Called with bond->lock held for read.
+ * Called with rcu_read_lock hold.
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
@@ -2531,7 +2488,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
struct slave *slave;
int commit = 0;
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
last_rx = slave_last_rx(bond, slave);
@@ -2593,7 +2550,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* Called to commit link state changes noted by inspection step of
* active-backup mode ARP monitor.
*
- * Called with RTNL and bond->lock for read.
+ * Called with RTNL hold.
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
@@ -2614,7 +2571,8 @@ static void bond_ab_arp_commit(struct bonding *bond)
slave->link = BOND_LINK_UP;
if (bond->current_arp_slave) {
bond_set_slave_inactive_flags(
- bond->current_arp_slave);
+ bond->current_arp_slave,
+ BOND_SLAVE_NOTIFY_NOW);
bond->current_arp_slave = NULL;
}
@@ -2634,7 +2592,8 @@ static void bond_ab_arp_commit(struct bonding *bond)
slave->link_failure_count++;
slave->link = BOND_LINK_DOWN;
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
@@ -2668,43 +2627,41 @@ do_failover:
/*
* Send ARP probes for active-backup mode ARP monitor.
*
- * Called with bond->lock held for read.
+ * Called with rcu_read_lock hold.
*/
-static void bond_ab_arp_probe(struct bonding *bond)
+static bool bond_ab_arp_probe(struct bonding *bond)
{
- struct slave *slave, *before = NULL, *new_slave = NULL;
+ struct slave *slave, *before = NULL, *new_slave = NULL,
+ *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
+ *curr_active_slave = rcu_dereference(bond->curr_active_slave);
struct list_head *iter;
bool found = false;
+ bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
- read_lock(&bond->curr_slave_lock);
-
- if (bond->current_arp_slave && bond->curr_active_slave)
+ if (curr_arp_slave && curr_active_slave)
pr_info("PROBE: c_arp %s && cas %s BAD\n",
- bond->current_arp_slave->dev->name,
- bond->curr_active_slave->dev->name);
+ curr_arp_slave->dev->name,
+ curr_active_slave->dev->name);
- if (bond->curr_active_slave) {
- bond_arp_send_all(bond, bond->curr_active_slave);
- read_unlock(&bond->curr_slave_lock);
- return;
+ if (curr_active_slave) {
+ bond_arp_send_all(bond, curr_active_slave);
+ return should_notify_rtnl;
}
- read_unlock(&bond->curr_slave_lock);
-
/* if we don't have a curr_active_slave, search for the next available
* backup slave from the current_arp_slave and make it the candidate
* for becoming the curr_active_slave
*/
- if (!bond->current_arp_slave) {
- bond->current_arp_slave = bond_first_slave(bond);
- if (!bond->current_arp_slave)
- return;
+ if (!curr_arp_slave) {
+ curr_arp_slave = bond_first_slave_rcu(bond);
+ if (!curr_arp_slave)
+ return should_notify_rtnl;
}
- bond_set_slave_inactive_flags(bond->current_arp_slave);
+ bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (!found && !before && IS_UP(slave->dev))
before = slave;
@@ -2722,12 +2679,13 @@ static void bond_ab_arp_probe(struct bonding *bond)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_LATER);
pr_info("%s: backup interface %s is now down.\n",
bond->dev->name, slave->dev->name);
}
- if (slave == bond->current_arp_slave)
+ if (slave == curr_arp_slave)
found = true;
}
@@ -2735,64 +2693,74 @@ static void bond_ab_arp_probe(struct bonding *bond)
new_slave = before;
if (!new_slave)
- return;
+ goto check_state;
new_slave->link = BOND_LINK_BACK;
- bond_set_slave_active_flags(new_slave);
+ bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_arp_send_all(bond, new_slave);
new_slave->jiffies = jiffies;
- bond->current_arp_slave = new_slave;
+ rcu_assign_pointer(bond->current_arp_slave, new_slave);
+check_state:
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (slave->should_notify) {
+ should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
+ break;
+ }
+ }
+ return should_notify_rtnl;
}
-void bond_activebackup_arp_mon(struct work_struct *work)
+static void bond_activebackup_arp_mon(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
bool should_notify_peers = false;
+ bool should_notify_rtnl = false;
int delta_in_ticks;
- read_lock(&bond->lock);
-
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
if (!bond_has_slaves(bond))
goto re_arm;
+ rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
if (bond_ab_arp_inspect(bond)) {
- read_unlock(&bond->lock);
+ rcu_read_unlock();
/* Race avoidance with bond_close flush of workqueue */
if (!rtnl_trylock()) {
- read_lock(&bond->lock);
delta_in_ticks = 1;
should_notify_peers = false;
goto re_arm;
}
- read_lock(&bond->lock);
-
bond_ab_arp_commit(bond);
- read_unlock(&bond->lock);
rtnl_unlock();
- read_lock(&bond->lock);
+ rcu_read_lock();
}
- bond_ab_arp_probe(bond);
+ should_notify_rtnl = bond_ab_arp_probe(bond);
+ rcu_read_unlock();
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
- read_unlock(&bond->lock);
-
- if (should_notify_peers) {
+ if (should_notify_peers || should_notify_rtnl) {
if (!rtnl_trylock())
return;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
+
+ if (should_notify_peers)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
+ if (should_notify_rtnl)
+ bond_slave_state_notify(bond);
+
rtnl_unlock();
}
}
@@ -2896,9 +2864,30 @@ static int bond_slave_netdev_event(unsigned long event,
*/
break;
case NETDEV_CHANGENAME:
- /*
- * TODO: handle changing the primary's name
- */
+ /* we don't care if we don't have primary set */
+ if (!USES_PRIMARY(bond->params.mode) ||
+ !bond->params.primary[0])
+ break;
+
+ if (slave == bond->primary_slave) {
+ /* slave's name changed - he's no longer primary */
+ bond->primary_slave = NULL;
+ } else if (!strcmp(slave_dev->name, bond->params.primary)) {
+ /* we have a new primary slave */
+ bond->primary_slave = slave;
+ } else { /* we didn't change primary - exit */
+ break;
+ }
+
+ pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
+ bond->dev->name, bond->primary_slave ? slave_dev->name :
+ "none");
+
+ block_netpoll_tx();
+ write_lock_bh(&bond->curr_slave_lock);
+ bond_select_active_slave(bond);
+ write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
break;
case NETDEV_FEAT_CHANGE:
bond_compute_features(bond);
@@ -3071,9 +3060,11 @@ static int bond_open(struct net_device *bond_dev)
bond_for_each_slave(bond, slave, iter) {
if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
&& (slave != bond->curr_active_slave)) {
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
} else {
- bond_set_slave_active_flags(slave);
+ bond_set_slave_active_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
}
}
read_unlock(&bond->curr_slave_lock);
@@ -3178,6 +3169,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
struct ifslave k_sinfo;
struct ifslave __user *u_sinfo = NULL;
struct mii_ioctl_data *mii = NULL;
+ struct bond_opt_value newval;
struct net *net;
int res = 0;
@@ -3249,37 +3241,35 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- slave_dev = dev_get_by_name(net, ifr->ifr_slave);
+ slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
pr_debug("slave_dev=%p:\n", slave_dev);
if (!slave_dev)
- res = -ENODEV;
- else {
- pr_debug("slave_dev->name=%s:\n", slave_dev->name);
- switch (cmd) {
- case BOND_ENSLAVE_OLD:
- case SIOCBONDENSLAVE:
- res = bond_enslave(bond_dev, slave_dev);
- break;
- case BOND_RELEASE_OLD:
- case SIOCBONDRELEASE:
- res = bond_release(bond_dev, slave_dev);
- break;
- case BOND_SETHWADDR_OLD:
- case SIOCBONDSETHWADDR:
- bond_set_dev_addr(bond_dev, slave_dev);
- res = 0;
- break;
- case BOND_CHANGE_ACTIVE_OLD:
- case SIOCBONDCHANGEACTIVE:
- res = bond_option_active_slave_set(bond, slave_dev);
- break;
- default:
- res = -EOPNOTSUPP;
- }
+ return -ENODEV;
- dev_put(slave_dev);
+ pr_debug("slave_dev->name=%s:\n", slave_dev->name);
+ switch (cmd) {
+ case BOND_ENSLAVE_OLD:
+ case SIOCBONDENSLAVE:
+ res = bond_enslave(bond_dev, slave_dev);
+ break;
+ case BOND_RELEASE_OLD:
+ case SIOCBONDRELEASE:
+ res = bond_release(bond_dev, slave_dev);
+ break;
+ case BOND_SETHWADDR_OLD:
+ case SIOCBONDSETHWADDR:
+ bond_set_dev_addr(bond_dev, slave_dev);
+ res = 0;
+ break;
+ case BOND_CHANGE_ACTIVE_OLD:
+ case SIOCBONDCHANGEACTIVE:
+ bond_opt_initstr(&newval, slave_dev->name);
+ res = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
+ break;
+ default:
+ res = -EOPNOTSUPP;
}
return res;
@@ -3471,7 +3461,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
/* If fail_over_mac is enabled, do nothing and return success.
* Returning an error causes ifenslave to fail.
*/
- if (bond->params.fail_over_mac)
+ if (bond->params.fail_over_mac &&
+ bond->params.mode == BOND_MODE_ACTIVEBACKUP)
return 0;
if (!is_valid_ether_addr(sa->sa_data))
@@ -3550,7 +3541,7 @@ unwind:
* it fails, it tries to find the first available slave for transmission.
* The skb is consumed in all cases, thus the function is void.
*/
-void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
+static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
{
struct list_head *iter;
struct slave *slave;
@@ -3590,8 +3581,9 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
*/
static u32 bond_rr_gen_slave_id(struct bonding *bond)
{
- int packets_per_slave = bond->params.packets_per_slave;
u32 slave_id;
+ struct reciprocal_value reciprocal_packets_per_slave;
+ int packets_per_slave = bond->params.packets_per_slave;
switch (packets_per_slave) {
case 0:
@@ -3601,8 +3593,10 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
slave_id = bond->rr_tx_counter;
break;
default:
+ reciprocal_packets_per_slave =
+ bond->params.reciprocal_packets_per_slave;
slave_id = reciprocal_divide(bond->rr_tx_counter,
- packets_per_slave);
+ reciprocal_packets_per_slave);
break;
}
bond->rr_tx_counter++;
@@ -3707,33 +3701,29 @@ static inline int bond_slave_override(struct bonding *bond,
struct sk_buff *skb)
{
struct slave *slave = NULL;
- struct slave *check_slave;
struct list_head *iter;
- int res = 1;
if (!skb->queue_mapping)
return 1;
/* Find out if any slaves have the same mapping as this skb. */
- bond_for_each_slave_rcu(bond, check_slave, iter) {
- if (check_slave->queue_id == skb->queue_mapping) {
- slave = check_slave;
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (slave->queue_id == skb->queue_mapping) {
+ if (slave_can_tx(slave)) {
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ return 0;
+ }
+ /* If the slave isn't UP, use default transmit policy. */
break;
}
}
- /* If the slave isn't UP, use default transmit policy. */
- if (slave && slave->queue_id && IS_UP(slave->dev) &&
- (slave->link == BOND_LINK_UP)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
- }
-
- return res;
+ return 1;
}
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
@@ -3941,6 +3931,9 @@ void bond_setup(struct net_device *bond_dev)
* capable
*/
+ /* Don't allow bond devices to change network namespaces. */
+ bond_dev->features |= NETIF_F_NETNS_LOCAL;
+
bond_dev->hw_features = BOND_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
@@ -3974,6 +3967,29 @@ static void bond_uninit(struct net_device *bond_dev)
/*------------------------- Module initialization ---------------------------*/
+int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl)
+{
+ int i;
+
+ for (i = 0; tbl[i].modename; i++)
+ if (mode == tbl[i].mode)
+ return tbl[i].mode;
+
+ return -1;
+}
+
+static int bond_parm_tbl_lookup_name(const char *modename,
+ const struct bond_parm_tbl *tbl)
+{
+ int i;
+
+ for (i = 0; tbl[i].modename; i++)
+ if (strcmp(modename, tbl[i].modename) == 0)
+ return tbl[i].mode;
+
+ return -1;
+}
+
/*
* Convert string input module parms. Accept either the
* number of the mode or its string name. A bit complicated because
@@ -3982,27 +3998,17 @@ static void bond_uninit(struct net_device *bond_dev)
*/
int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
{
- int modeint = -1, i, rv;
- char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
+ int modeint;
+ char *p, modestr[BOND_MAX_MODENAME_LEN + 1];
for (p = (char *)buf; *p; p++)
if (!(isdigit(*p) || isspace(*p)))
break;
- if (*p)
- rv = sscanf(buf, "%20s", modestr);
- else
- rv = sscanf(buf, "%d", &modeint);
-
- if (!rv)
- return -1;
-
- for (i = 0; tbl[i].modename; i++) {
- if (modeint == tbl[i].mode)
- return tbl[i].mode;
- if (strcmp(modestr, tbl[i].modename) == 0)
- return tbl[i].mode;
- }
+ if (*p && sscanf(buf, "%20s", modestr) != 0)
+ return bond_parm_tbl_lookup_name(modestr, tbl);
+ else if (sscanf(buf, "%d", &modeint) != 0)
+ return bond_parm_tbl_lookup(modeint, tbl);
return -1;
}
@@ -4010,18 +4016,20 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
static int bond_check_params(struct bond_params *params)
{
int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
+ struct bond_opt_value newval, *valptr;
int arp_all_targets_value;
/*
* Convert string parameters.
*/
if (mode) {
- bond_mode = bond_parse_parm(mode, bond_mode_tbl);
- if (bond_mode == -1) {
- pr_err("Error: Invalid bonding mode \"%s\"\n",
- mode == NULL ? "NULL" : mode);
+ bond_opt_initstr(&newval, mode);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
+ if (!valptr) {
+ pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
return -EINVAL;
}
+ bond_mode = valptr->value;
}
if (xmit_hash_policy) {
@@ -4030,14 +4038,15 @@ static int bond_check_params(struct bond_params *params)
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
- xmit_hashtype = bond_parse_parm(xmit_hash_policy,
- xmit_hashtype_tbl);
- if (xmit_hashtype == -1) {
+ bond_opt_initstr(&newval, xmit_hash_policy);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
+ &newval);
+ if (!valptr) {
pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
- xmit_hash_policy == NULL ? "NULL" :
xmit_hash_policy);
return -EINVAL;
}
+ xmit_hashtype = valptr->value;
}
}
@@ -4046,26 +4055,29 @@ static int bond_check_params(struct bond_params *params)
pr_info("lacp_rate param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
- lacp_fast = bond_parse_parm(lacp_rate, bond_lacp_tbl);
- if (lacp_fast == -1) {
+ bond_opt_initstr(&newval, lacp_rate);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
+ &newval);
+ if (!valptr) {
pr_err("Error: Invalid lacp rate \"%s\"\n",
- lacp_rate == NULL ? "NULL" : lacp_rate);
+ lacp_rate);
return -EINVAL;
}
+ lacp_fast = valptr->value;
}
}
if (ad_select) {
- params->ad_select = bond_parse_parm(ad_select, ad_select_tbl);
- if (params->ad_select == -1) {
- pr_err("Error: Invalid ad_select \"%s\"\n",
- ad_select == NULL ? "NULL" : ad_select);
+ bond_opt_initstr(&newval, lacp_rate);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
+ &newval);
+ if (!valptr) {
+ pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
return -EINVAL;
}
-
- if (bond_mode != BOND_MODE_8023AD) {
+ params->ad_select = valptr->value;
+ if (bond_mode != BOND_MODE_8023AD)
pr_warning("ad_select param only affects 802.3ad mode\n");
- }
} else {
params->ad_select = BOND_AD_STABLE;
}
@@ -4077,9 +4089,9 @@ static int bond_check_params(struct bond_params *params)
}
if (miimon < 0) {
- pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to %d\n",
- miimon, INT_MAX, BOND_LINK_MON_INTERV);
- miimon = BOND_LINK_MON_INTERV;
+ pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ miimon, INT_MAX);
+ miimon = 0;
}
if (updelay < 0) {
@@ -4106,8 +4118,8 @@ static int bond_check_params(struct bond_params *params)
num_peer_notif = 1;
}
- /* reset values for 802.3ad */
- if (bond_mode == BOND_MODE_8023AD) {
+ /* reset values for 802.3ad/TLB/ALB */
+ if (BOND_NO_USES_ARP(bond_mode)) {
if (!miimon) {
pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
pr_warning("Forcing miimon to 100msec\n");
@@ -4136,22 +4148,13 @@ static int bond_check_params(struct bond_params *params)
resend_igmp = BOND_DEFAULT_RESEND_IGMP;
}
- if (packets_per_slave < 0 || packets_per_slave > USHRT_MAX) {
+ bond_opt_initval(&newval, packets_per_slave);
+ if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
packets_per_slave, USHRT_MAX);
packets_per_slave = 1;
}
- /* reset values for TLB/ALB */
- if ((bond_mode == BOND_MODE_TLB) ||
- (bond_mode == BOND_MODE_ALB)) {
- if (!miimon) {
- pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
- pr_warning("Forcing miimon to 100msec\n");
- miimon = BOND_DEFAULT_MIIMON;
- }
- }
-
if (bond_mode == BOND_MODE_ALB) {
pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
updelay);
@@ -4191,9 +4194,9 @@ static int bond_check_params(struct bond_params *params)
}
if (arp_interval < 0) {
- pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to %d\n",
- arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
- arp_interval = BOND_LINK_ARP_INTERV;
+ pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to 0\n",
+ arp_interval, INT_MAX);
+ arp_interval = 0;
}
for (arp_ip_count = 0, i = 0;
@@ -4232,35 +4235,40 @@ static int bond_check_params(struct bond_params *params)
return -EINVAL;
}
- arp_validate_value = bond_parse_parm(arp_validate,
- arp_validate_tbl);
- if (arp_validate_value == -1) {
+ bond_opt_initstr(&newval, arp_validate);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
+ &newval);
+ if (!valptr) {
pr_err("Error: invalid arp_validate \"%s\"\n",
- arp_validate == NULL ? "NULL" : arp_validate);
+ arp_validate);
return -EINVAL;
}
- } else
+ arp_validate_value = valptr->value;
+ } else {
arp_validate_value = 0;
+ }
arp_all_targets_value = 0;
if (arp_all_targets) {
- arp_all_targets_value = bond_parse_parm(arp_all_targets,
- arp_all_targets_tbl);
-
- if (arp_all_targets_value == -1) {
+ bond_opt_initstr(&newval, arp_all_targets);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
+ &newval);
+ if (!valptr) {
pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
arp_all_targets);
arp_all_targets_value = 0;
+ } else {
+ arp_all_targets_value = valptr->value;
}
}
if (miimon) {
pr_info("MII link monitoring set to %d ms\n", miimon);
} else if (arp_interval) {
+ valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
+ arp_validate_value);
pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
- arp_interval,
- arp_validate_tbl[arp_validate_value].modename,
- arp_ip_count);
+ arp_interval, valptr->string, arp_ip_count);
for (i = 0; i < arp_ip_count; i++)
pr_info(" %s", arp_ip_target[i]);
@@ -4284,33 +4292,41 @@ static int bond_check_params(struct bond_params *params)
}
if (primary && primary_reselect) {
- primary_reselect_value = bond_parse_parm(primary_reselect,
- pri_reselect_tbl);
- if (primary_reselect_value == -1) {
+ bond_opt_initstr(&newval, primary_reselect);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
+ &newval);
+ if (!valptr) {
pr_err("Error: Invalid primary_reselect \"%s\"\n",
- primary_reselect ==
- NULL ? "NULL" : primary_reselect);
+ primary_reselect);
return -EINVAL;
}
+ primary_reselect_value = valptr->value;
} else {
primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
}
if (fail_over_mac) {
- fail_over_mac_value = bond_parse_parm(fail_over_mac,
- fail_over_mac_tbl);
- if (fail_over_mac_value == -1) {
+ bond_opt_initstr(&newval, fail_over_mac);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
+ &newval);
+ if (!valptr) {
pr_err("Error: invalid fail_over_mac \"%s\"\n",
- arp_validate == NULL ? "NULL" : arp_validate);
+ fail_over_mac);
return -EINVAL;
}
-
+ fail_over_mac_value = valptr->value;
if (bond_mode != BOND_MODE_ACTIVEBACKUP)
pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
} else {
fail_over_mac_value = BOND_FOM_NONE;
}
+ if (lp_interval == 0) {
+ pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
+ INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
+ lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
+ }
+
/* fill params struct with the proper values */
params->mode = bond_mode;
params->xmit_policy = xmit_hashtype;
@@ -4330,11 +4346,19 @@ static int bond_check_params(struct bond_params *params)
params->all_slaves_active = all_slaves_active;
params->resend_igmp = resend_igmp;
params->min_links = min_links;
- params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
- if (packets_per_slave > 1)
- params->packets_per_slave = reciprocal_value(packets_per_slave);
- else
- params->packets_per_slave = packets_per_slave;
+ params->lp_interval = lp_interval;
+ params->packets_per_slave = packets_per_slave;
+ if (packets_per_slave > 0) {
+ params->reciprocal_packets_per_slave =
+ reciprocal_value(packets_per_slave);
+ } else {
+ /* reciprocal_packets_per_slave is unused if
+ * packets_per_slave is 0 or 1, just initialize it
+ */
+ params->reciprocal_packets_per_slave =
+ (struct reciprocal_value) { 0 };
+ }
+
if (primary) {
strncpy(params->primary, primary, IFNAMSIZ);
params->primary[IFNAMSIZ - 1] = 0;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 40e7b1cb4aea..70651f8e8e3b 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -1,6 +1,7 @@
/*
* drivers/net/bond/bond_netlink.c - Netlink interface for bonding
* Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,9 +21,81 @@
#include <net/rtnetlink.h>
#include "bonding.h"
+static size_t bond_get_slave_size(const struct net_device *bond_dev,
+ const struct net_device *slave_dev)
+{
+ return nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_STATE */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_MII_STATUS */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_SLAVE_LINK_FAILURE_COUNT */
+ nla_total_size(MAX_ADDR_LEN) + /* IFLA_BOND_SLAVE_PERM_HWADDR */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_QUEUE_ID */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
+ 0;
+}
+
+static int bond_fill_slave_info(struct sk_buff *skb,
+ const struct net_device *bond_dev,
+ const struct net_device *slave_dev)
+{
+ struct slave *slave = bond_slave_get_rtnl(slave_dev);
+
+ if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave)))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
+ slave->link_failure_count))
+ goto nla_put_failure;
+
+ if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR,
+ slave_dev->addr_len, slave->perm_hwaddr))
+ goto nla_put_failure;
+
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
+ goto nla_put_failure;
+
+ if (slave->bond->params.mode == BOND_MODE_8023AD) {
+ const struct aggregator *agg;
+
+ agg = SLAVE_AD_INFO(slave).port.aggregator;
+ if (agg)
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
+ agg->aggregator_identifier))
+ goto nla_put_failure;
+ }
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
+ [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
+ [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
+ [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
+ [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
+ [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
+ [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
+ [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
+ [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
+ [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
+ [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
+ [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
+ [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
+ [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
+ [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
+ [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
+ [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
+ [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
+ [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
+ [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -40,28 +113,238 @@ static int bond_changelink(struct net_device *bond_dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct bond_opt_value newval;
+ int miimon = 0;
int err;
- if (data && data[IFLA_BOND_MODE]) {
+ if (!data)
+ return 0;
+
+ if (data[IFLA_BOND_MODE]) {
int mode = nla_get_u8(data[IFLA_BOND_MODE]);
- err = bond_option_mode_set(bond, mode);
+ bond_opt_initval(&newval, mode);
+ err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
if (err)
return err;
}
- if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
+ if (data[IFLA_BOND_ACTIVE_SLAVE]) {
int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
struct net_device *slave_dev;
+ char *active_slave = "";
- if (ifindex == 0) {
- slave_dev = NULL;
- } else {
+ if (ifindex != 0) {
slave_dev = __dev_get_by_index(dev_net(bond_dev),
ifindex);
if (!slave_dev)
return -ENODEV;
+ active_slave = slave_dev->name;
+ }
+ bond_opt_initstr(&newval, active_slave);
+ err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_MIIMON]) {
+ miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
+
+ bond_opt_initval(&newval, miimon);
+ err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_UPDELAY]) {
+ int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
+
+ bond_opt_initval(&newval, updelay);
+ err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_DOWNDELAY]) {
+ int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
+
+ bond_opt_initval(&newval, downdelay);
+ err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_USE_CARRIER]) {
+ int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
+
+ bond_opt_initval(&newval, use_carrier);
+ err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_INTERVAL]) {
+ int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
+
+ if (arp_interval && miimon) {
+ pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
+ bond->dev->name);
+ return -EINVAL;
}
- err = bond_option_active_slave_set(bond, slave_dev);
+
+ bond_opt_initval(&newval, arp_interval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_IP_TARGET]) {
+ struct nlattr *attr;
+ int i = 0, rem;
+
+ bond_option_arp_ip_targets_clear(bond);
+ nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
+ __be32 target = nla_get_be32(attr);
+
+ bond_opt_initval(&newval, target);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
+ &newval);
+ if (err)
+ break;
+ i++;
+ }
+ if (i == 0 && bond->params.arp_interval)
+ pr_warn("%s: removing last arp target with arp_interval on\n",
+ bond->dev->name);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_VALIDATE]) {
+ int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
+
+ if (arp_validate && miimon) {
+ pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ bond_opt_initval(&newval, arp_validate);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
+ int arp_all_targets =
+ nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
+
+ bond_opt_initval(&newval, arp_all_targets);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_PRIMARY]) {
+ int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
+ struct net_device *dev;
+ char *primary = "";
+
+ dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
+ if (dev)
+ primary = dev->name;
+
+ bond_opt_initstr(&newval, primary);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_PRIMARY_RESELECT]) {
+ int primary_reselect =
+ nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
+
+ bond_opt_initval(&newval, primary_reselect);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_FAIL_OVER_MAC]) {
+ int fail_over_mac =
+ nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
+
+ bond_opt_initval(&newval, fail_over_mac);
+ err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
+ int xmit_hash_policy =
+ nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
+
+ bond_opt_initval(&newval, xmit_hash_policy);
+ err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_RESEND_IGMP]) {
+ int resend_igmp =
+ nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
+
+ bond_opt_initval(&newval, resend_igmp);
+ err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
+ int num_peer_notif =
+ nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
+
+ bond_opt_initval(&newval, num_peer_notif);
+ err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
+ int all_slaves_active =
+ nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
+
+ bond_opt_initval(&newval, all_slaves_active);
+ err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_MIN_LINKS]) {
+ int min_links =
+ nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
+
+ bond_opt_initval(&newval, min_links);
+ err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_LP_INTERVAL]) {
+ int lp_interval =
+ nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
+
+ bond_opt_initval(&newval, lp_interval);
+ err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
+ int packets_per_slave =
+ nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
+
+ bond_opt_initval(&newval, packets_per_slave);
+ err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_AD_LACP_RATE]) {
+ int lacp_rate =
+ nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
+
+ bond_opt_initval(&newval, lacp_rate);
+ err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_BOND_AD_SELECT]) {
+ int ad_select =
+ nla_get_u8(data[IFLA_BOND_AD_SELECT]);
+
+ bond_opt_initval(&newval, ad_select);
+ err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
if (err)
return err;
}
@@ -83,7 +366,36 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
static size_t bond_get_size(const struct net_device *bond_dev)
{
return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
- nla_total_size(sizeof(u32)); /* IFLA_BOND_ACTIVE_SLAVE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
+ /* IFLA_BOND_ARP_IP_TARGET */
+ nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
+ nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
+ nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
+ 0;
}
static int bond_fill_info(struct sk_buff *skb,
@@ -91,11 +403,139 @@ static int bond_fill_info(struct sk_buff *skb,
{
struct bonding *bond = netdev_priv(bond_dev);
struct net_device *slave_dev = bond_option_active_slave_get(bond);
+ struct nlattr *targets;
+ unsigned int packets_per_slave;
+ int i, targets_added;
+
+ if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
+ goto nla_put_failure;
+
+ if (slave_dev &&
+ nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
+ bond->params.updelay * bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
+ bond->params.downdelay * bond->params.miimon))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
+ goto nla_put_failure;
- if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
- (slave_dev &&
- nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
+ if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
goto nla_put_failure;
+
+ targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
+ if (!targets)
+ goto nla_put_failure;
+
+ targets_added = 0;
+ for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
+ if (bond->params.arp_targets[i]) {
+ nla_put_be32(skb, i, bond->params.arp_targets[i]);
+ targets_added = 1;
+ }
+ }
+
+ if (targets_added)
+ nla_nest_end(skb, targets);
+ else
+ nla_nest_cancel(skb, targets);
+
+ if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
+ bond->params.arp_all_targets))
+ goto nla_put_failure;
+
+ if (bond->primary_slave &&
+ nla_put_u32(skb, IFLA_BOND_PRIMARY,
+ bond->primary_slave->dev->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
+ bond->params.primary_reselect))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
+ bond->params.fail_over_mac))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
+ bond->params.xmit_policy))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
+ bond->params.resend_igmp))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
+ bond->params.num_peer_notif))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
+ bond->params.all_slaves_active))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
+ bond->params.min_links))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
+ bond->params.lp_interval))
+ goto nla_put_failure;
+
+ packets_per_slave = bond->params.packets_per_slave;
+ if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
+ packets_per_slave))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
+ bond->params.lacp_fast))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
+ bond->params.ad_select))
+ goto nla_put_failure;
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ struct ad_info info;
+
+ if (!bond_3ad_get_active_agg_info(bond, &info)) {
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
+ if (!nest)
+ goto nla_put_failure;
+
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
+ info.aggregator_id))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
+ info.ports))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
+ info.actor_key))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
+ info.partner_key))
+ goto nla_put_failure;
+ if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
+ sizeof(info.partner_system),
+ &info.partner_system))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ }
+ }
+
return 0;
nla_put_failure:
@@ -116,6 +556,8 @@ struct rtnl_link_ops bond_link_ops __read_mostly = {
.get_num_tx_queues = bond_get_num_tx_queues,
.get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
as for TX queues */
+ .get_slave_size = bond_get_slave_size,
+ .fill_slave_info = bond_fill_slave_info,
};
int __init bond_netlink_init(void)
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index ea6f640782b7..298c26509095 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1,6 +1,7 @@
/*
* drivers/net/bond/bond_options.c - bonding options
* Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,41 +14,578 @@
#include <linux/errno.h>
#include <linux/if.h>
#include <linux/netdevice.h>
-#include <linux/rwlock.h>
+#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/ctype.h>
+#include <linux/inet.h>
#include "bonding.h"
-static bool bond_mode_is_valid(int mode)
+static struct bond_opt_value bond_mode_tbl[] = {
+ { "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
+ { "active-backup", BOND_MODE_ACTIVEBACKUP, 0},
+ { "balance-xor", BOND_MODE_XOR, 0},
+ { "broadcast", BOND_MODE_BROADCAST, 0},
+ { "802.3ad", BOND_MODE_8023AD, 0},
+ { "balance-tlb", BOND_MODE_TLB, 0},
+ { "balance-alb", BOND_MODE_ALB, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_pps_tbl[] = {
+ { "default", 1, BOND_VALFLAG_DEFAULT},
+ { "maxval", USHRT_MAX, BOND_VALFLAG_MAX},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
+ { "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
+ { "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
+ { "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
+ { "encap2+3", BOND_XMIT_POLICY_ENCAP23, 0},
+ { "encap3+4", BOND_XMIT_POLICY_ENCAP34, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_arp_validate_tbl[] = {
+ { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
+ { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
+ { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
+ { "all", BOND_ARP_VALIDATE_ALL, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_arp_all_targets_tbl[] = {
+ { "any", BOND_ARP_TARGETS_ANY, BOND_VALFLAG_DEFAULT},
+ { "all", BOND_ARP_TARGETS_ALL, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_fail_over_mac_tbl[] = {
+ { "none", BOND_FOM_NONE, BOND_VALFLAG_DEFAULT},
+ { "active", BOND_FOM_ACTIVE, 0},
+ { "follow", BOND_FOM_FOLLOW, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_intmax_tbl[] = {
+ { "off", 0, BOND_VALFLAG_DEFAULT},
+ { "maxval", INT_MAX, BOND_VALFLAG_MAX},
+};
+
+static struct bond_opt_value bond_lacp_rate_tbl[] = {
+ { "slow", AD_LACP_SLOW, 0},
+ { "fast", AD_LACP_FAST, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_ad_select_tbl[] = {
+ { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
+ { "bandwidth", BOND_AD_BANDWIDTH, 0},
+ { "count", BOND_AD_COUNT, 0},
+ { NULL, -1, 0},
+};
+
+static struct bond_opt_value bond_num_peer_notif_tbl[] = {
+ { "off", 0, 0},
+ { "maxval", 255, BOND_VALFLAG_MAX},
+ { "default", 1, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0}
+};
+
+static struct bond_opt_value bond_primary_reselect_tbl[] = {
+ { "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
+ { "better", BOND_PRI_RESELECT_BETTER, 0},
+ { "failure", BOND_PRI_RESELECT_FAILURE, 0},
+ { NULL, -1},
+};
+
+static struct bond_opt_value bond_use_carrier_tbl[] = {
+ { "off", 0, 0},
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0}
+};
+
+static struct bond_opt_value bond_all_slaves_active_tbl[] = {
+ { "off", 0, BOND_VALFLAG_DEFAULT},
+ { "on", 1, 0},
+ { NULL, -1, 0}
+};
+
+static struct bond_opt_value bond_resend_igmp_tbl[] = {
+ { "off", 0, 0},
+ { "maxval", 255, BOND_VALFLAG_MAX},
+ { "default", 1, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0}
+};
+
+static struct bond_opt_value bond_lp_interval_tbl[] = {
+ { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
+ { "maxval", INT_MAX, BOND_VALFLAG_MAX},
+ { NULL, -1, 0},
+};
+
+static struct bond_option bond_opts[] = {
+ [BOND_OPT_MODE] = {
+ .id = BOND_OPT_MODE,
+ .name = "mode",
+ .desc = "bond device mode",
+ .flags = BOND_OPTFLAG_NOSLAVES | BOND_OPTFLAG_IFDOWN,
+ .values = bond_mode_tbl,
+ .set = bond_option_mode_set
+ },
+ [BOND_OPT_PACKETS_PER_SLAVE] = {
+ .id = BOND_OPT_PACKETS_PER_SLAVE,
+ .name = "packets_per_slave",
+ .desc = "Packets to send per slave in RR mode",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ROUNDROBIN)),
+ .values = bond_pps_tbl,
+ .set = bond_option_pps_set
+ },
+ [BOND_OPT_XMIT_HASH] = {
+ .id = BOND_OPT_XMIT_HASH,
+ .name = "xmit_hash_policy",
+ .desc = "balance-xor and 802.3ad hashing method",
+ .values = bond_xmit_hashtype_tbl,
+ .set = bond_option_xmit_hash_policy_set
+ },
+ [BOND_OPT_ARP_VALIDATE] = {
+ .id = BOND_OPT_ARP_VALIDATE,
+ .name = "arp_validate",
+ .desc = "validate src/dst of ARP probes",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP)),
+ .values = bond_arp_validate_tbl,
+ .set = bond_option_arp_validate_set
+ },
+ [BOND_OPT_ARP_ALL_TARGETS] = {
+ .id = BOND_OPT_ARP_ALL_TARGETS,
+ .name = "arp_all_targets",
+ .desc = "fail on any/all arp targets timeout",
+ .values = bond_arp_all_targets_tbl,
+ .set = bond_option_arp_all_targets_set
+ },
+ [BOND_OPT_FAIL_OVER_MAC] = {
+ .id = BOND_OPT_FAIL_OVER_MAC,
+ .name = "fail_over_mac",
+ .desc = "For active-backup, do not set all slaves to the same MAC",
+ .flags = BOND_OPTFLAG_NOSLAVES,
+ .values = bond_fail_over_mac_tbl,
+ .set = bond_option_fail_over_mac_set
+ },
+ [BOND_OPT_ARP_INTERVAL] = {
+ .id = BOND_OPT_ARP_INTERVAL,
+ .name = "arp_interval",
+ .desc = "arp interval in milliseconds",
+ .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB),
+ .values = bond_intmax_tbl,
+ .set = bond_option_arp_interval_set
+ },
+ [BOND_OPT_ARP_TARGETS] = {
+ .id = BOND_OPT_ARP_TARGETS,
+ .name = "arp_ip_target",
+ .desc = "arp targets in n.n.n.n form",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_arp_ip_targets_set
+ },
+ [BOND_OPT_DOWNDELAY] = {
+ .id = BOND_OPT_DOWNDELAY,
+ .name = "downdelay",
+ .desc = "Delay before considering link down, in milliseconds",
+ .values = bond_intmax_tbl,
+ .set = bond_option_downdelay_set
+ },
+ [BOND_OPT_UPDELAY] = {
+ .id = BOND_OPT_UPDELAY,
+ .name = "updelay",
+ .desc = "Delay before considering link up, in milliseconds",
+ .values = bond_intmax_tbl,
+ .set = bond_option_updelay_set
+ },
+ [BOND_OPT_LACP_RATE] = {
+ .id = BOND_OPT_LACP_RATE,
+ .name = "lacp_rate",
+ .desc = "LACPDU tx rate to request from 802.3ad partner",
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .values = bond_lacp_rate_tbl,
+ .set = bond_option_lacp_rate_set
+ },
+ [BOND_OPT_MINLINKS] = {
+ .id = BOND_OPT_MINLINKS,
+ .name = "min_links",
+ .desc = "Minimum number of available links before turning on carrier",
+ .values = bond_intmax_tbl,
+ .set = bond_option_min_links_set
+ },
+ [BOND_OPT_AD_SELECT] = {
+ .id = BOND_OPT_AD_SELECT,
+ .name = "ad_select",
+ .desc = "803.ad aggregation selection logic",
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .values = bond_ad_select_tbl,
+ .set = bond_option_ad_select_set
+ },
+ [BOND_OPT_NUM_PEER_NOTIF] = {
+ .id = BOND_OPT_NUM_PEER_NOTIF,
+ .name = "num_unsol_na",
+ .desc = "Number of peer notifications to send on failover event",
+ .values = bond_num_peer_notif_tbl,
+ .set = bond_option_num_peer_notif_set
+ },
+ [BOND_OPT_MIIMON] = {
+ .id = BOND_OPT_MIIMON,
+ .name = "miimon",
+ .desc = "Link check interval in milliseconds",
+ .values = bond_intmax_tbl,
+ .set = bond_option_miimon_set
+ },
+ [BOND_OPT_PRIMARY] = {
+ .id = BOND_OPT_PRIMARY,
+ .name = "primary",
+ .desc = "Primary network device to use",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
+ BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB)),
+ .set = bond_option_primary_set
+ },
+ [BOND_OPT_PRIMARY_RESELECT] = {
+ .id = BOND_OPT_PRIMARY_RESELECT,
+ .name = "primary_reselect",
+ .desc = "Reselect primary slave once it comes up",
+ .values = bond_primary_reselect_tbl,
+ .set = bond_option_primary_reselect_set
+ },
+ [BOND_OPT_USE_CARRIER] = {
+ .id = BOND_OPT_USE_CARRIER,
+ .name = "use_carrier",
+ .desc = "Use netif_carrier_ok (vs MII ioctls) in miimon",
+ .values = bond_use_carrier_tbl,
+ .set = bond_option_use_carrier_set
+ },
+ [BOND_OPT_ACTIVE_SLAVE] = {
+ .id = BOND_OPT_ACTIVE_SLAVE,
+ .name = "active_slave",
+ .desc = "Currently active slave",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
+ BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB)),
+ .set = bond_option_active_slave_set
+ },
+ [BOND_OPT_QUEUE_ID] = {
+ .id = BOND_OPT_QUEUE_ID,
+ .name = "queue_id",
+ .desc = "Set queue id of a slave",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_queue_id_set
+ },
+ [BOND_OPT_ALL_SLAVES_ACTIVE] = {
+ .id = BOND_OPT_ALL_SLAVES_ACTIVE,
+ .name = "all_slaves_active",
+ .desc = "Keep all frames received on an interface by setting active flag for all slaves",
+ .values = bond_all_slaves_active_tbl,
+ .set = bond_option_all_slaves_active_set
+ },
+ [BOND_OPT_RESEND_IGMP] = {
+ .id = BOND_OPT_RESEND_IGMP,
+ .name = "resend_igmp",
+ .desc = "Number of IGMP membership reports to send on link failure",
+ .values = bond_resend_igmp_tbl,
+ .set = bond_option_resend_igmp_set
+ },
+ [BOND_OPT_LP_INTERVAL] = {
+ .id = BOND_OPT_LP_INTERVAL,
+ .name = "lp_interval",
+ .desc = "The number of seconds between instances where the bonding driver sends learning packets to each slave's peer switch",
+ .values = bond_lp_interval_tbl,
+ .set = bond_option_lp_interval_set
+ },
+ [BOND_OPT_SLAVES] = {
+ .id = BOND_OPT_SLAVES,
+ .name = "slaves",
+ .desc = "Slave membership management",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_slaves_set
+ },
+ { }
+};
+
+/* Searches for a value in opt's values[] table */
+struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
{
+ struct bond_option *opt;
int i;
- for (i = 0; bond_mode_tbl[i].modename; i++);
+ opt = bond_opt_get(option);
+ if (WARN_ON(!opt))
+ return NULL;
+ for (i = 0; opt->values && opt->values[i].string; i++)
+ if (opt->values[i].value == val)
+ return &opt->values[i];
- return mode >= 0 && mode < i;
+ return NULL;
}
-int bond_option_mode_set(struct bonding *bond, int mode)
+/* Searches for a value in opt's values[] table which matches the flagmask */
+static struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
+ u32 flagmask)
{
- if (!bond_mode_is_valid(mode)) {
- pr_err("invalid mode value %d.\n", mode);
- return -EINVAL;
+ int i;
+
+ for (i = 0; opt->values && opt->values[i].string; i++)
+ if (opt->values[i].flags & flagmask)
+ return &opt->values[i];
+
+ return NULL;
+}
+
+/* If maxval is missing then there's no range to check. In case minval is
+ * missing then it's considered to be 0.
+ */
+static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
+{
+ struct bond_opt_value *minval, *maxval;
+
+ minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
+ maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
+ if (!maxval || (minval && val < minval->value) || val > maxval->value)
+ return false;
+
+ return true;
+}
+
+/**
+ * bond_opt_parse - parse option value
+ * @opt: the option to parse against
+ * @val: value to parse
+ *
+ * This function tries to extract the value from @val and check if it's
+ * a possible match for the option and returns NULL if a match isn't found,
+ * or the struct_opt_value that matched. It also strips the new line from
+ * @val->string if it's present.
+ */
+struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+ struct bond_opt_value *val)
+{
+ char *p, valstr[BOND_OPT_MAX_NAMELEN + 1] = { 0, };
+ struct bond_opt_value *tbl, *ret = NULL;
+ bool checkval;
+ int i, rv;
+
+ /* No parsing if the option wants a raw val */
+ if (opt->flags & BOND_OPTFLAG_RAWVAL)
+ return val;
+
+ tbl = opt->values;
+ if (!tbl)
+ goto out;
+
+ /* ULLONG_MAX is used to bypass string processing */
+ checkval = val->value != ULLONG_MAX;
+ if (!checkval) {
+ if (!val->string)
+ goto out;
+ p = strchr(val->string, '\n');
+ if (p)
+ *p = '\0';
+ for (p = val->string; *p; p++)
+ if (!(isdigit(*p) || isspace(*p)))
+ break;
+ /* The following code extracts the string to match or the value
+ * and sets checkval appropriately
+ */
+ if (*p) {
+ rv = sscanf(val->string, "%32s", valstr);
+ } else {
+ rv = sscanf(val->string, "%llu", &val->value);
+ checkval = true;
+ }
+ if (!rv)
+ goto out;
}
- if (bond->dev->flags & IFF_UP) {
- pr_err("%s: unable to update mode because interface is up.\n",
- bond->dev->name);
- return -EPERM;
+ for (i = 0; tbl[i].string; i++) {
+ /* Check for exact match */
+ if (checkval) {
+ if (val->value == tbl[i].value)
+ ret = &tbl[i];
+ } else {
+ if (!strcmp(valstr, "default") &&
+ (tbl[i].flags & BOND_VALFLAG_DEFAULT))
+ ret = &tbl[i];
+
+ if (!strcmp(valstr, tbl[i].string))
+ ret = &tbl[i];
+ }
+ /* Found an exact match */
+ if (ret)
+ goto out;
}
+ /* Possible range match */
+ if (checkval && bond_opt_check_range(opt, val->value))
+ ret = val;
+out:
+ return ret;
+}
- if (bond_has_slaves(bond)) {
- pr_err("%s: unable to update mode because bond has slaves.\n",
- bond->dev->name);
- return -EPERM;
+/* Check opt's dependencies against bond mode and currently set options */
+static int bond_opt_check_deps(struct bonding *bond,
+ const struct bond_option *opt)
+{
+ struct bond_params *params = &bond->params;
+
+ if (test_bit(params->mode, &opt->unsuppmodes))
+ return -EACCES;
+ if ((opt->flags & BOND_OPTFLAG_NOSLAVES) && bond_has_slaves(bond))
+ return -ENOTEMPTY;
+ if ((opt->flags & BOND_OPTFLAG_IFDOWN) && (bond->dev->flags & IFF_UP))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void bond_opt_dep_print(struct bonding *bond,
+ const struct bond_option *opt)
+{
+ struct bond_opt_value *modeval;
+ struct bond_params *params;
+
+ params = &bond->params;
+ modeval = bond_opt_get_val(BOND_OPT_MODE, params->mode);
+ if (test_bit(params->mode, &opt->unsuppmodes))
+ pr_err("%s: option %s: mode dependency failed, not supported in mode %s(%llu)\n",
+ bond->dev->name, opt->name,
+ modeval->string, modeval->value);
+}
+
+static void bond_opt_error_interpret(struct bonding *bond,
+ const struct bond_option *opt,
+ int error, struct bond_opt_value *val)
+{
+ struct bond_opt_value *minval, *maxval;
+ char *p;
+
+ switch (error) {
+ case -EINVAL:
+ if (val) {
+ if (val->string) {
+ /* sometimes RAWVAL opts may have new lines */
+ p = strchr(val->string, '\n');
+ if (p)
+ *p = '\0';
+ pr_err("%s: option %s: invalid value (%s).\n",
+ bond->dev->name, opt->name, val->string);
+ } else {
+ pr_err("%s: option %s: invalid value (%llu).\n",
+ bond->dev->name, opt->name, val->value);
+ }
+ }
+ minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
+ maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
+ if (!maxval)
+ break;
+ pr_err("%s: option %s: allowed values %llu - %llu.\n",
+ bond->dev->name, opt->name, minval ? minval->value : 0,
+ maxval->value);
+ break;
+ case -EACCES:
+ bond_opt_dep_print(bond, opt);
+ break;
+ case -ENOTEMPTY:
+ pr_err("%s: option %s: unable to set because the bond device has slaves.\n",
+ bond->dev->name, opt->name);
+ break;
+ case -EBUSY:
+ pr_err("%s: option %s: unable to set because the bond device is up.\n",
+ bond->dev->name, opt->name);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * __bond_opt_set - set a bonding option
+ * @bond: target bond device
+ * @option: option to set
+ * @val: value to set it to
+ *
+ * This function is used to change the bond's option value, it can be
+ * used for both enabling/changing an option and for disabling it. RTNL lock
+ * must be obtained before calling this function.
+ */
+int __bond_opt_set(struct bonding *bond,
+ unsigned int option, struct bond_opt_value *val)
+{
+ struct bond_opt_value *retval = NULL;
+ const struct bond_option *opt;
+ int ret = -ENOENT;
+
+ ASSERT_RTNL();
+
+ opt = bond_opt_get(option);
+ if (WARN_ON(!val) || WARN_ON(!opt))
+ goto out;
+ ret = bond_opt_check_deps(bond, opt);
+ if (ret)
+ goto out;
+ retval = bond_opt_parse(opt, val);
+ if (!retval) {
+ ret = -EINVAL;
+ goto out;
}
+ ret = opt->set(bond, retval);
+out:
+ if (ret)
+ bond_opt_error_interpret(bond, opt, ret, val);
- if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) {
+ return ret;
+}
+
+/**
+ * bond_opt_tryset_rtnl - try to acquire rtnl and call __bond_opt_set
+ * @bond: target bond device
+ * @option: option to set
+ * @buf: value to set it to
+ *
+ * This function tries to acquire RTNL without blocking and if successful
+ * calls __bond_opt_set. It is mainly used for sysfs option manipulation.
+ */
+int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf)
+{
+ struct bond_opt_value optval;
+ int ret;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+ bond_opt_initstr(&optval, buf);
+ ret = __bond_opt_set(bond, option, &optval);
+ rtnl_unlock();
+
+ return ret;
+}
+
+/**
+ * bond_opt_get - get a pointer to an option
+ * @option: option for which to return a pointer
+ *
+ * This function checks if option is valid and if so returns a pointer
+ * to its entry in the bond_opts[] option array.
+ */
+struct bond_option *bond_opt_get(unsigned int option)
+{
+ if (!BOND_OPT_VALID(option))
+ return NULL;
+
+ return &bond_opts[option];
+}
+
+int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
- bond->dev->name, bond_mode_tbl[mode].modename);
+ bond->dev->name, newval->string);
/* disable arp monitoring */
bond->params.arp_interval = 0;
/* set miimon to default value */
@@ -58,7 +596,8 @@ int bond_option_mode_set(struct bonding *bond, int mode)
/* don't cache arp_validate between modes */
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
- bond->params.mode = mode;
+ bond->params.mode = newval->value;
+
return 0;
}
@@ -81,10 +620,21 @@ struct net_device *bond_option_active_slave_get(struct bonding *bond)
}
int bond_option_active_slave_set(struct bonding *bond,
- struct net_device *slave_dev)
+ struct bond_opt_value *newval)
{
+ char ifname[IFNAMSIZ] = { 0, };
+ struct net_device *slave_dev;
int ret = 0;
+ sscanf(newval->string, "%15s", ifname); /* IFNAMSIZ */
+ if (!strlen(ifname) || newval->string[0] == '\n') {
+ slave_dev = NULL;
+ } else {
+ slave_dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+ if (!slave_dev)
+ return -ENODEV;
+ }
+
if (slave_dev) {
if (!netif_is_bond_slave(slave_dev)) {
pr_err("Device %s is not bonding slave.\n",
@@ -99,14 +649,7 @@ int bond_option_active_slave_set(struct bonding *bond,
}
}
- if (!USES_PRIMARY(bond->params.mode)) {
- pr_err("%s: Unable to change active slave; %s is in mode %d\n",
- bond->dev->name, bond->dev->name, bond->params.mode);
- return -EINVAL;
- }
-
block_netpoll_tx();
- read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
/* check to see if we are clearing active */
@@ -141,7 +684,604 @@ int bond_option_active_slave_set(struct bonding *bond,
}
write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
+
+ return ret;
+}
+
+int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting MII monitoring interval to %llu.\n",
+ bond->dev->name, newval->value);
+ bond->params.miimon = newval->value;
+ if (bond->params.updelay)
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
+ if (bond->params.downdelay)
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
+ if (newval->value && bond->params.arp_interval) {
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ bond->dev->name);
+ bond->params.arp_interval = 0;
+ if (bond->params.arp_validate)
+ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+ }
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+ * the MII timer. If the interface is down, the
+ * timer will get fired off when the open function
+ * is called.
+ */
+ if (!newval->value) {
+ cancel_delayed_work_sync(&bond->mii_work);
+ } else {
+ cancel_delayed_work_sync(&bond->arp_work);
+ queue_delayed_work(bond->wq, &bond->mii_work, 0);
+ }
+ }
+
+ return 0;
+}
+
+int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ int value = newval->value;
+
+ if (!bond->params.miimon) {
+ pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+ if ((value % bond->params.miimon) != 0) {
+ pr_warn("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+ bond->dev->name, value,
+ bond->params.miimon,
+ (value / bond->params.miimon) *
+ bond->params.miimon);
+ }
+ bond->params.updelay = value / bond->params.miimon;
+ pr_info("%s: Setting up delay to %d.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
+
+ return 0;
+}
+
+int bond_option_downdelay_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ int value = newval->value;
+
+ if (!bond->params.miimon) {
+ pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+ if ((value % bond->params.miimon) != 0) {
+ pr_warn("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
+ bond->dev->name, value,
+ bond->params.miimon,
+ (value / bond->params.miimon) *
+ bond->params.miimon);
+ }
+ bond->params.downdelay = value / bond->params.miimon;
+ pr_info("%s: Setting down delay to %d.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
+
+ return 0;
+}
+
+int bond_option_use_carrier_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting use_carrier to %llu.\n",
+ bond->dev->name, newval->value);
+ bond->params.use_carrier = newval->value;
+
+ return 0;
+}
+
+int bond_option_arp_interval_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting ARP monitoring interval to %llu.\n",
+ bond->dev->name, newval->value);
+ bond->params.arp_interval = newval->value;
+ if (newval->value) {
+ if (bond->params.miimon) {
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ bond->dev->name, bond->dev->name);
+ bond->params.miimon = 0;
+ }
+ if (!bond->params.arp_targets[0])
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ bond->dev->name);
+ }
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+ * the ARP timer. If the interface is down, the
+ * timer will get fired off when the open function
+ * is called.
+ */
+ if (!newval->value) {
+ if (bond->params.arp_validate)
+ bond->recv_probe = NULL;
+ cancel_delayed_work_sync(&bond->arp_work);
+ } else {
+ /* arp_validate can be set only in active-backup mode */
+ if (bond->params.arp_validate)
+ bond->recv_probe = bond_arp_rcv;
+ cancel_delayed_work_sync(&bond->mii_work);
+ queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ }
+ }
+
+ return 0;
+}
+
+static void _bond_options_arp_ip_target_set(struct bonding *bond, int slot,
+ __be32 target,
+ unsigned long last_rx)
+{
+ __be32 *targets = bond->params.arp_targets;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slot >= 0 && slot < BOND_MAX_ARP_TARGETS) {
+ bond_for_each_slave(bond, slave, iter)
+ slave->target_last_arp_rx[slot] = last_rx;
+ targets[slot] = target;
+ }
+}
+
+static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+{
+ __be32 *targets = bond->params.arp_targets;
+ int ind;
+
+ if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ pr_err("%s: invalid ARP target %pI4 specified for addition\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ if (bond_get_targets_ip(targets, target) != -1) { /* dup */
+ pr_err("%s: ARP target %pI4 is already present\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ ind = bond_get_targets_ip(targets, 0); /* first free slot */
+ if (ind == -1) {
+ pr_err("%s: ARP target table is full!\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &target);
+
+ _bond_options_arp_ip_target_set(bond, ind, target, jiffies);
+
+ return 0;
+}
+
+int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+{
+ int ret;
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+ ret = _bond_option_arp_ip_target_add(bond, target);
+ write_unlock_bh(&bond->lock);
+
+ return ret;
+}
+
+int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
+{
+ __be32 *targets = bond->params.arp_targets;
+ struct list_head *iter;
+ struct slave *slave;
+ unsigned long *targets_rx;
+ int ind, i;
+
+ if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ pr_err("%s: invalid ARP target %pI4 specified for removal\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ ind = bond_get_targets_ip(targets, target);
+ if (ind == -1) {
+ pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
+ bond->dev->name, &target);
+ return -EINVAL;
+ }
+
+ if (ind == 0 && !targets[1] && bond->params.arp_interval)
+ pr_warn("%s: removing last arp target with arp_interval on\n",
+ bond->dev->name);
+
+ pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
+ &target);
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+
+ bond_for_each_slave(bond, slave, iter) {
+ targets_rx = slave->target_last_arp_rx;
+ for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
+ targets_rx[i] = targets_rx[i+1];
+ targets_rx[i] = 0;
+ }
+ for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
+ targets[i] = targets[i+1];
+ targets[i] = 0;
+
+ write_unlock_bh(&bond->lock);
+
+ return 0;
+}
+
+void bond_option_arp_ip_targets_clear(struct bonding *bond)
+{
+ int i;
+
+ /* not to race with bond_arp_rcv */
+ write_lock_bh(&bond->lock);
+ for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
+ _bond_options_arp_ip_target_set(bond, i, 0, 0);
+ write_unlock_bh(&bond->lock);
+}
+
+int bond_option_arp_ip_targets_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ int ret = -EPERM;
+ __be32 target;
+
+ if (newval->string) {
+ if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
+ pr_err("%s: invalid ARP target %pI4 specified\n",
+ bond->dev->name, &target);
+ return ret;
+ }
+ if (newval->string[0] == '+')
+ ret = bond_option_arp_ip_target_add(bond, target);
+ else if (newval->string[0] == '-')
+ ret = bond_option_arp_ip_target_rem(bond, target);
+ else
+ pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
+ bond->dev->name);
+ } else {
+ target = newval->value;
+ ret = bond_option_arp_ip_target_add(bond, target);
+ }
+
+ return ret;
+}
+
+int bond_option_arp_validate_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: setting arp_validate to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+
+ if (bond->dev->flags & IFF_UP) {
+ if (!newval->value)
+ bond->recv_probe = NULL;
+ else if (bond->params.arp_interval)
+ bond->recv_probe = bond_arp_rcv;
+ }
+ bond->params.arp_validate = newval->value;
+
+ return 0;
+}
+
+int bond_option_arp_all_targets_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: setting arp_all_targets to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.arp_all_targets = newval->value;
+
+ return 0;
+}
+
+int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ char *p, *primary = newval->string;
+ struct list_head *iter;
+ struct slave *slave;
+
+ block_netpoll_tx();
+ read_lock(&bond->lock);
+ write_lock_bh(&bond->curr_slave_lock);
+
+ p = strchr(primary, '\n');
+ if (p)
+ *p = '\0';
+ /* check to see if we are clearing primary */
+ if (!strlen(primary)) {
+ pr_info("%s: Setting primary slave to None.\n",
+ bond->dev->name);
+ bond->primary_slave = NULL;
+ memset(bond->params.primary, 0, sizeof(bond->params.primary));
+ bond_select_active_slave(bond);
+ goto out;
+ }
+
+ bond_for_each_slave(bond, slave, iter) {
+ if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
+ pr_info("%s: Setting %s as primary slave.\n",
+ bond->dev->name, slave->dev->name);
+ bond->primary_slave = slave;
+ strcpy(bond->params.primary, slave->dev->name);
+ bond_select_active_slave(bond);
+ goto out;
+ }
+ }
+
+ if (bond->primary_slave) {
+ pr_info("%s: Setting primary slave to None.\n",
+ bond->dev->name);
+ bond->primary_slave = NULL;
+ bond_select_active_slave(bond);
+ }
+ strncpy(bond->params.primary, primary, IFNAMSIZ);
+ bond->params.primary[IFNAMSIZ - 1] = 0;
+
+ pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet.\n",
+ bond->dev->name, primary, bond->dev->name);
+
+out:
+ write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
unblock_netpoll_tx();
+
+ return 0;
+}
+
+int bond_option_primary_reselect_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: setting primary_reselect to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.primary_reselect = newval->value;
+
+ block_netpoll_tx();
+ write_lock_bh(&bond->curr_slave_lock);
+ bond_select_active_slave(bond);
+ write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
+
+ return 0;
+}
+
+int bond_option_fail_over_mac_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting fail_over_mac to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.fail_over_mac = newval->value;
+
+ return 0;
+}
+
+int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: setting xmit hash policy to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.xmit_policy = newval->value;
+
+ return 0;
+}
+
+int bond_option_resend_igmp_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting resend_igmp to %llu.\n",
+ bond->dev->name, newval->value);
+ bond->params.resend_igmp = newval->value;
+
+ return 0;
+}
+
+int bond_option_num_peer_notif_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ bond->params.num_peer_notif = newval->value;
+
+ return 0;
+}
+
+int bond_option_all_slaves_active_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (newval->value == bond->params.all_slaves_active)
+ return 0;
+ bond->params.all_slaves_active = newval->value;
+ bond_for_each_slave(bond, slave, iter) {
+ if (!bond_is_active_slave(slave)) {
+ if (newval->value)
+ slave->inactive = 0;
+ else
+ slave->inactive = 1;
+ }
+ }
+
+ return 0;
+}
+
+int bond_option_min_links_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting min links value to %llu\n",
+ bond->dev->name, newval->value);
+ bond->params.min_links = newval->value;
+
+ return 0;
+}
+
+int bond_option_lp_interval_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ bond->params.lp_interval = newval->value;
+
+ return 0;
+}
+
+int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ bond->params.packets_per_slave = newval->value;
+ if (newval->value > 0) {
+ bond->params.reciprocal_packets_per_slave =
+ reciprocal_value(newval->value);
+ } else {
+ /* reciprocal_packets_per_slave is unused if
+ * packets_per_slave is 0 or 1, just initialize it
+ */
+ bond->params.reciprocal_packets_per_slave =
+ (struct reciprocal_value) { 0 };
+ }
+
+ return 0;
+}
+
+int bond_option_lacp_rate_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting LACP rate to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.lacp_fast = newval->value;
+ bond_3ad_update_lacp_rate(bond);
+
+ return 0;
+}
+
+int bond_option_ad_select_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting ad_select to %s (%llu).\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.ad_select = newval->value;
+
+ return 0;
+}
+
+int bond_option_queue_id_set(struct bonding *bond,
+ struct bond_opt_value *newval)
+{
+ struct slave *slave, *update_slave;
+ struct net_device *sdev;
+ struct list_head *iter;
+ char *delim;
+ int ret = 0;
+ u16 qid;
+
+ /* delim will point to queue id if successful */
+ delim = strchr(newval->string, ':');
+ if (!delim)
+ goto err_no_cmd;
+
+ /* Terminate string that points to device name and bump it
+ * up one, so we can read the queue id there.
+ */
+ *delim = '\0';
+ if (sscanf(++delim, "%hd\n", &qid) != 1)
+ goto err_no_cmd;
+
+ /* Check buffer length, valid ifname and queue id */
+ if (strlen(newval->string) > IFNAMSIZ ||
+ !dev_valid_name(newval->string) ||
+ qid > bond->dev->real_num_tx_queues)
+ goto err_no_cmd;
+
+ /* Get the pointer to that interface if it exists */
+ sdev = __dev_get_by_name(dev_net(bond->dev), newval->string);
+ if (!sdev)
+ goto err_no_cmd;
+
+ /* Search for thes slave and check for duplicate qids */
+ update_slave = NULL;
+ bond_for_each_slave(bond, slave, iter) {
+ if (sdev == slave->dev)
+ /* We don't need to check the matching
+ * slave for dups, since we're overwriting it
+ */
+ update_slave = slave;
+ else if (qid && qid == slave->queue_id) {
+ goto err_no_cmd;
+ }
+ }
+
+ if (!update_slave)
+ goto err_no_cmd;
+
+ /* Actually set the qids for the slave */
+ update_slave->queue_id = qid;
+
+out:
return ret;
+
+err_no_cmd:
+ pr_info("invalid input for queue_id set for %s.\n",
+ bond->dev->name);
+ ret = -EPERM;
+ goto out;
+
+}
+
+int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+ char command[IFNAMSIZ + 1] = { 0, };
+ struct net_device *dev;
+ char *ifname;
+ int ret;
+
+ sscanf(newval->string, "%16s", command); /* IFNAMSIZ*/
+ ifname = command + 1;
+ if ((strlen(command) <= 1) ||
+ !dev_valid_name(ifname))
+ goto err_no_cmd;
+
+ dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+ if (!dev) {
+ pr_info("%s: Interface %s does not exist!\n",
+ bond->dev->name, ifname);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ switch (command[0]) {
+ case '+':
+ pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
+ ret = bond_enslave(bond->dev, dev);
+ break;
+
+ case '-':
+ pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
+ ret = bond_release(bond->dev, dev);
+ break;
+
+ default:
+ goto err_no_cmd;
+ }
+
+out:
+ return ret;
+
+err_no_cmd:
+ pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
+ bond->dev->name);
+ ret = -EPERM;
+ goto out;
}
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
new file mode 100644
index 000000000000..433d37f6940b
--- /dev/null
+++ b/drivers/net/bonding/bond_options.h
@@ -0,0 +1,170 @@
+/*
+ * drivers/net/bond/bond_options.h - bonding options
+ * Copyright (c) 2013 Nikolay Aleksandrov <nikolay@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _BOND_OPTIONS_H
+#define _BOND_OPTIONS_H
+
+#define BOND_OPT_MAX_NAMELEN 32
+#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST)
+#define BOND_MODE_ALL_EX(x) (~(x))
+
+/* Option flags:
+ * BOND_OPTFLAG_NOSLAVES - check if the bond device is empty before setting
+ * BOND_OPTFLAG_IFDOWN - check if the bond device is down before setting
+ * BOND_OPTFLAG_RAWVAL - the option parses the value itself
+ */
+enum {
+ BOND_OPTFLAG_NOSLAVES = BIT(0),
+ BOND_OPTFLAG_IFDOWN = BIT(1),
+ BOND_OPTFLAG_RAWVAL = BIT(2)
+};
+
+/* Value type flags:
+ * BOND_VALFLAG_DEFAULT - mark the value as default
+ * BOND_VALFLAG_(MIN|MAX) - mark the value as min/max
+ */
+enum {
+ BOND_VALFLAG_DEFAULT = BIT(0),
+ BOND_VALFLAG_MIN = BIT(1),
+ BOND_VALFLAG_MAX = BIT(2)
+};
+
+/* Option IDs, their bit positions correspond to their IDs */
+enum {
+ BOND_OPT_MODE,
+ BOND_OPT_PACKETS_PER_SLAVE,
+ BOND_OPT_XMIT_HASH,
+ BOND_OPT_ARP_VALIDATE,
+ BOND_OPT_ARP_ALL_TARGETS,
+ BOND_OPT_FAIL_OVER_MAC,
+ BOND_OPT_ARP_INTERVAL,
+ BOND_OPT_ARP_TARGETS,
+ BOND_OPT_DOWNDELAY,
+ BOND_OPT_UPDELAY,
+ BOND_OPT_LACP_RATE,
+ BOND_OPT_MINLINKS,
+ BOND_OPT_AD_SELECT,
+ BOND_OPT_NUM_PEER_NOTIF,
+ BOND_OPT_MIIMON,
+ BOND_OPT_PRIMARY,
+ BOND_OPT_PRIMARY_RESELECT,
+ BOND_OPT_USE_CARRIER,
+ BOND_OPT_ACTIVE_SLAVE,
+ BOND_OPT_QUEUE_ID,
+ BOND_OPT_ALL_SLAVES_ACTIVE,
+ BOND_OPT_RESEND_IGMP,
+ BOND_OPT_LP_INTERVAL,
+ BOND_OPT_SLAVES,
+ BOND_OPT_LAST
+};
+
+/* This structure is used for storing option values and for passing option
+ * values when changing an option. The logic when used as an arg is as follows:
+ * - if string != NULL -> parse it, if the opt is RAW type then return it, else
+ * return the parse result
+ * - if string == NULL -> parse value
+ */
+struct bond_opt_value {
+ char *string;
+ u64 value;
+ u32 flags;
+};
+
+struct bonding;
+
+struct bond_option {
+ int id;
+ char *name;
+ char *desc;
+ u32 flags;
+
+ /* unsuppmodes is used to denote modes in which the option isn't
+ * supported.
+ */
+ unsigned long unsuppmodes;
+ /* supported values which this option can have, can be a subset of
+ * BOND_OPTVAL_RANGE's value range
+ */
+ struct bond_opt_value *values;
+
+ int (*set)(struct bonding *bond, struct bond_opt_value *val);
+};
+
+int __bond_opt_set(struct bonding *bond, unsigned int option,
+ struct bond_opt_value *val);
+int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
+struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+ struct bond_opt_value *val);
+struct bond_option *bond_opt_get(unsigned int option);
+struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
+
+/* This helper is used to initialize a bond_opt_value structure for parameter
+ * passing. There should be either a valid string or value, but not both.
+ * When value is ULLONG_MAX then string will be used.
+ */
+static inline void __bond_opt_init(struct bond_opt_value *optval,
+ char *string, u64 value)
+{
+ memset(optval, 0, sizeof(*optval));
+ optval->value = ULLONG_MAX;
+ if (value == ULLONG_MAX)
+ optval->string = string;
+ else
+ optval->value = value;
+}
+#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value)
+#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX)
+
+int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval);
+int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval);
+int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_arp_validate_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_arp_all_targets_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_fail_over_mac_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_arp_interval_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_arp_ip_targets_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+void bond_option_arp_ip_targets_clear(struct bonding *bond);
+int bond_option_downdelay_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_updelay_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_lacp_rate_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_min_links_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_ad_select_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_num_peer_notif_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval);
+int bond_option_primary_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_primary_reselect_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_use_carrier_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_active_slave_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_queue_id_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_all_slaves_active_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_resend_igmp_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_lp_interval_set(struct bonding *bond,
+ struct bond_opt_value *newval);
+int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval);
+#endif /* _BOND_OPTIONS_H */
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index fb868d6c22da..3ac20e78eafc 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -65,6 +65,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
static void bond_info_show_master(struct seq_file *seq)
{
struct bonding *bond = seq->private;
+ struct bond_opt_value *optval;
struct slave *curr;
int i;
@@ -76,26 +77,32 @@ static void bond_info_show_master(struct seq_file *seq)
bond_mode_name(bond->params.mode));
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
- bond->params.fail_over_mac)
- seq_printf(seq, " (fail_over_mac %s)",
- fail_over_mac_tbl[bond->params.fail_over_mac].modename);
+ bond->params.fail_over_mac) {
+ optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
+ bond->params.fail_over_mac);
+ seq_printf(seq, " (fail_over_mac %s)", optval->string);
+ }
seq_printf(seq, "\n");
if (bond->params.mode == BOND_MODE_XOR ||
bond->params.mode == BOND_MODE_8023AD) {
+ optval = bond_opt_get_val(BOND_OPT_XMIT_HASH,
+ bond->params.xmit_policy);
seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
- xmit_hashtype_tbl[bond->params.xmit_policy].modename,
- bond->params.xmit_policy);
+ optval->string, bond->params.xmit_policy);
}
if (USES_PRIMARY(bond->params.mode)) {
seq_printf(seq, "Primary Slave: %s",
(bond->primary_slave) ?
bond->primary_slave->dev->name : "None");
- if (bond->primary_slave)
+ if (bond->primary_slave) {
+ optval = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
+ bond->params.primary_reselect);
seq_printf(seq, " (primary_reselect %s)",
- pri_reselect_tbl[bond->params.primary_reselect].modename);
+ optval->string);
+ }
seq_printf(seq, "\nCurrently Active Slave: %s\n",
(curr) ? curr->dev->name : "None");
@@ -136,8 +143,10 @@ static void bond_info_show_master(struct seq_file *seq)
seq_printf(seq, "LACP rate: %s\n",
(bond->params.lacp_fast) ? "fast" : "slow");
seq_printf(seq, "Min links: %d\n", bond->params.min_links);
+ optval = bond_opt_get_val(BOND_OPT_AD_SELECT,
+ bond->params.ad_select);
seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
- ad_select_tbl[bond->params.ad_select].modename);
+ optval->string);
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
seq_printf(seq, "bond %s has no active aggregator\n",
@@ -159,18 +168,6 @@ static void bond_info_show_master(struct seq_file *seq)
}
}
-static const char *bond_slave_link_status(s8 link)
-{
- static const char * const status[] = {
- [BOND_LINK_UP] = "up",
- [BOND_LINK_FAIL] = "going down",
- [BOND_LINK_DOWN] = "down",
- [BOND_LINK_BACK] = "going back",
- };
-
- return status[link];
-}
-
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 0ae580bbc5db..643fcc110299 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -12,8 +12,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -40,7 +39,6 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/nsproxy.h>
-#include <linux/reciprocal_div.h>
#include "bonding.h"
@@ -202,58 +200,15 @@ static ssize_t bonding_store_slaves(struct device *d,
struct device_attribute *attr,
const char *buffer, size_t count)
{
- char command[IFNAMSIZ + 1] = { 0, };
- char *ifname;
- int res, ret = count;
- struct net_device *dev;
struct bonding *bond = to_bond(d);
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
-
- sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
- ifname = command + 1;
- if ((strlen(command) <= 1) ||
- !dev_valid_name(ifname))
- goto err_no_cmd;
-
- dev = __dev_get_by_name(dev_net(bond->dev), ifname);
- if (!dev) {
- pr_info("%s: Interface %s does not exist!\n",
- bond->dev->name, ifname);
- ret = -ENODEV;
- goto out;
- }
-
- switch (command[0]) {
- case '+':
- pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
- res = bond_enslave(bond->dev, dev);
- break;
-
- case '-':
- pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
- res = bond_release(bond->dev, dev);
- break;
-
- default:
- goto err_no_cmd;
- }
-
- if (res)
- ret = res;
- goto out;
-
-err_no_cmd:
- pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
- bond->dev->name);
- ret = -EPERM;
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_SLAVES, (char *)buffer);
+ if (!ret)
+ ret = count;
-out:
- rtnl_unlock();
return ret;
}
-
static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
bonding_store_slaves);
@@ -265,37 +220,24 @@ static ssize_t bonding_show_mode(struct device *d,
struct device_attribute *attr, char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- bond_mode_tbl[bond->params.mode].modename,
- bond->params.mode);
+ val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.mode);
}
static ssize_t bonding_store_mode(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret;
struct bonding *bond = to_bond(d);
+ int ret;
- new_value = bond_parse_parm(buf, bond_mode_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid mode value %.*s.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- return -EINVAL;
- }
- if (!rtnl_trylock())
- return restart_syscall();
-
- ret = bond_option_mode_set(bond, new_value);
- if (!ret) {
- pr_info("%s: setting mode to %s (%d).\n",
- bond->dev->name, bond_mode_tbl[new_value].modename,
- new_value);
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MODE, (char *)buf);
+ if (!ret)
ret = count;
- }
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
@@ -309,31 +251,23 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- xmit_hashtype_tbl[bond->params.xmit_policy].modename,
- bond->params.xmit_policy);
+ val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
static ssize_t bonding_store_xmit_hash(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- new_value = bond_parse_parm(buf, xmit_hashtype_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid xmit hash policy value %.*s.\n",
- bond->dev->name,
- (int)strlen(buf) - 1, buf);
- ret = -EINVAL;
- } else {
- bond->params.xmit_policy = new_value;
- pr_info("%s: setting xmit hash policy to %s (%d).\n",
- bond->dev->name,
- xmit_hashtype_tbl[new_value].modename, new_value);
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_XMIT_HASH, (char *)buf);
+ if (!ret)
+ ret = count;
return ret;
}
@@ -348,10 +282,12 @@ static ssize_t bonding_show_arp_validate(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- arp_validate_tbl[bond->params.arp_validate].modename,
- bond->params.arp_validate);
+ val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
+ bond->params.arp_validate);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
static ssize_t bonding_store_arp_validate(struct device *d,
@@ -359,36 +295,11 @@ static ssize_t bonding_store_arp_validate(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
- new_value = bond_parse_parm(buf, arp_validate_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid arp_validate value %s\n",
- bond->dev->name, buf);
- ret = -EINVAL;
- goto out;
- }
- if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
- pr_err("%s: arp_validate only supported in active-backup mode.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- pr_info("%s: setting arp_validate to %s (%d).\n",
- bond->dev->name, arp_validate_tbl[new_value].modename,
- new_value);
-
- if (bond->dev->flags & IFF_UP) {
- if (!new_value)
- bond->recv_probe = NULL;
- else if (bond->params.arp_interval)
- bond->recv_probe = bond_arp_rcv;
- }
- bond->params.arp_validate = new_value;
-out:
- rtnl_unlock();
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_VALIDATE, (char *)buf);
+ if (!ret)
+ ret = count;
return ret;
}
@@ -403,10 +314,12 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- int value = bond->params.arp_all_targets;
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n", arp_all_targets_tbl[value].modename,
- value);
+ val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
+ bond->params.arp_all_targets);
+ return sprintf(buf, "%s %d\n",
+ val->string, bond->params.arp_all_targets);
}
static ssize_t bonding_store_arp_all_targets(struct device *d,
@@ -414,21 +327,13 @@ static ssize_t bonding_store_arp_all_targets(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value;
-
- new_value = bond_parse_parm(buf, arp_all_targets_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid arp_all_targets value %s\n",
- bond->dev->name, buf);
- return -EINVAL;
- }
- pr_info("%s: setting arp_all_targets to %s (%d).\n",
- bond->dev->name, arp_all_targets_tbl[new_value].modename,
- new_value);
+ int ret;
- bond->params.arp_all_targets = new_value;
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_ALL_TARGETS, (char *)buf);
+ if (!ret)
+ ret = count;
- return count;
+ return ret;
}
static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
@@ -443,44 +348,25 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- fail_over_mac_tbl[bond->params.fail_over_mac].modename,
- bond->params.fail_over_mac);
+ val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
+ bond->params.fail_over_mac);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
static ssize_t bonding_store_fail_over_mac(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
-
- if (bond_has_slaves(bond)) {
- pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- new_value = bond_parse_parm(buf, fail_over_mac_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
- bond->dev->name, buf);
- ret = -EINVAL;
- goto out;
- }
-
- bond->params.fail_over_mac = new_value;
- pr_info("%s: Setting fail_over_mac to %s (%d).\n",
- bond->dev->name, fail_over_mac_tbl[new_value].modename,
- new_value);
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_FAIL_OVER_MAC, (char *)buf);
+ if (!ret)
+ ret = count;
-out:
- rtnl_unlock();
return ret;
}
@@ -507,61 +393,12 @@ static ssize_t bonding_store_arp_interval(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int ret;
+
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_INTERVAL, (char *)buf);
+ if (!ret)
+ ret = count;
- if (!rtnl_trylock())
- return restart_syscall();
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no arp_interval value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
- bond->dev->name, new_value, INT_MAX);
- ret = -EINVAL;
- goto out;
- }
- if (BOND_NO_USES_ARP(bond->params.mode)) {
- pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
- bond->dev->name, bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- pr_info("%s: Setting ARP monitoring interval to %d.\n",
- bond->dev->name, new_value);
- bond->params.arp_interval = new_value;
- if (new_value) {
- if (bond->params.miimon) {
- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
- bond->dev->name, bond->dev->name);
- bond->params.miimon = 0;
- }
- if (!bond->params.arp_targets[0])
- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
- bond->dev->name);
- }
- if (bond->dev->flags & IFF_UP) {
- /* If the interface is up, we may need to fire off
- * the ARP timer. If the interface is down, the
- * timer will get fired off when the open function
- * is called.
- */
- if (!new_value) {
- if (bond->params.arp_validate)
- bond->recv_probe = NULL;
- cancel_delayed_work_sync(&bond->arp_work);
- } else {
- /* arp_validate can be set only in active-backup mode */
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
- cancel_delayed_work_sync(&bond->mii_work);
- queue_delayed_work(bond->wq, &bond->arp_work, 0);
- }
- }
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
@@ -574,8 +411,8 @@ static ssize_t bonding_show_arp_targets(struct device *d,
struct device_attribute *attr,
char *buf)
{
- int i, res = 0;
struct bonding *bond = to_bond(d);
+ int i, res = 0;
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
if (bond->params.arp_targets[i])
@@ -584,6 +421,7 @@ static ssize_t bonding_show_arp_targets(struct device *d,
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
+
return res;
}
@@ -592,82 +430,12 @@ static ssize_t bonding_store_arp_targets(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- struct list_head *iter;
- struct slave *slave;
- __be32 newtarget, *targets;
- unsigned long *targets_rx;
- int ind, i, j, ret = -EINVAL;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- targets = bond->params.arp_targets;
- if (!in4_pton(buf + 1, -1, (u8 *)&newtarget, -1, NULL) ||
- IS_IP_TARGET_UNUSABLE_ADDRESS(newtarget)) {
- pr_err("%s: invalid ARP target %pI4 specified for addition\n",
- bond->dev->name, &newtarget);
- goto out;
- }
- /* look for adds */
- if (buf[0] == '+') {
- if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */
- pr_err("%s: ARP target %pI4 is already present\n",
- bond->dev->name, &newtarget);
- goto out;
- }
-
- ind = bond_get_targets_ip(targets, 0); /* first free slot */
- if (ind == -1) {
- pr_err("%s: ARP target table is full!\n",
- bond->dev->name);
- goto out;
- }
-
- pr_info("%s: adding ARP target %pI4.\n", bond->dev->name,
- &newtarget);
- /* not to race with bond_arp_rcv */
- write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, iter)
- slave->target_last_arp_rx[ind] = jiffies;
- targets[ind] = newtarget;
- write_unlock_bh(&bond->lock);
- } else if (buf[0] == '-') {
- ind = bond_get_targets_ip(targets, newtarget);
- if (ind == -1) {
- pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
- bond->dev->name, &newtarget);
- goto out;
- }
-
- if (ind == 0 && !targets[1] && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
- bond->dev->name);
-
- pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
- &newtarget);
+ int ret;
- write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, iter) {
- targets_rx = slave->target_last_arp_rx;
- j = ind;
- for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
- targets_rx[j] = targets_rx[j+1];
- targets_rx[j] = 0;
- }
- for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
- targets[i] = targets[i+1];
- targets[i] = 0;
- write_unlock_bh(&bond->lock);
- } else {
- pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_TARGETS, (char *)buf);
+ if (!ret)
+ ret = count;
- ret = count;
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
@@ -690,45 +458,13 @@ static ssize_t bonding_store_downdelay(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
- if (!(bond->params.miimon)) {
- pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no down delay value specified.\n", bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- } else {
- if ((new_value % bond->params.miimon) != 0) {
- pr_warning("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
- bond->dev->name, new_value,
- bond->params.miimon,
- (new_value / bond->params.miimon) *
- bond->params.miimon);
- }
- bond->params.downdelay = new_value / bond->params.miimon;
- pr_info("%s: Setting down delay to %d.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
-
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_DOWNDELAY, (char *)buf);
+ if (!ret)
+ ret = count;
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
@@ -748,45 +484,13 @@ static ssize_t bonding_store_updelay(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
- if (!(bond->params.miimon)) {
- pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no up delay value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- } else {
- if ((new_value % bond->params.miimon) != 0) {
- pr_warning("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
- bond->dev->name, new_value,
- bond->params.miimon,
- (new_value / bond->params.miimon) *
- bond->params.miimon);
- }
- bond->params.updelay = new_value / bond->params.miimon;
- pr_info("%s: Setting up delay to %d.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_UPDELAY, (char *)buf);
+ if (!ret)
+ ret = count;
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
@@ -801,10 +505,11 @@ static ssize_t bonding_show_lacp(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- bond_lacp_tbl[bond->params.lacp_fast].modename,
- bond->params.lacp_fast);
+ val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
static ssize_t bonding_store_lacp(struct device *d,
@@ -812,40 +517,11 @@ static ssize_t bonding_store_lacp(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- if (bond->dev->flags & IFF_UP) {
- pr_err("%s: Unable to update LACP rate because interface is up.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- if (bond->params.mode != BOND_MODE_8023AD) {
- pr_err("%s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
+ int ret;
- new_value = bond_parse_parm(buf, bond_lacp_tbl);
-
- if ((new_value == 1) || (new_value == 0)) {
- bond->params.lacp_fast = new_value;
- bond_3ad_update_lacp_rate(bond);
- pr_info("%s: Setting LACP rate to %s (%d).\n",
- bond->dev->name, bond_lacp_tbl[new_value].modename,
- new_value);
- } else {
- pr_err("%s: Ignoring invalid LACP rate value %.*s.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- ret = -EINVAL;
- }
-out:
- rtnl_unlock();
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LACP_RATE, (char *)buf);
+ if (!ret)
+ ret = count;
return ret;
}
@@ -867,19 +543,12 @@ static ssize_t bonding_store_min_links(struct device *d,
{
struct bonding *bond = to_bond(d);
int ret;
- unsigned int new_value;
- ret = kstrtouint(buf, 0, &new_value);
- if (ret < 0) {
- pr_err("%s: Ignoring invalid min links value %s.\n",
- bond->dev->name, buf);
- return ret;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MINLINKS, (char *)buf);
+ if (!ret)
+ ret = count;
- pr_info("%s: Setting min links value to %u\n",
- bond->dev->name, new_value);
- bond->params.min_links = new_value;
- return count;
+ return ret;
}
static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
bonding_show_min_links, bonding_store_min_links);
@@ -889,10 +558,11 @@ static ssize_t bonding_show_ad_select(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
- return sprintf(buf, "%s %d\n",
- ad_select_tbl[bond->params.ad_select].modename,
- bond->params.ad_select);
+ val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
}
@@ -900,29 +570,13 @@ static ssize_t bonding_store_ad_select(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (bond->dev->flags & IFF_UP) {
- pr_err("%s: Unable to update ad_select because interface is up.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_AD_SELECT, (char *)buf);
+ if (!ret)
+ ret = count;
- new_value = bond_parse_parm(buf, ad_select_tbl);
-
- if (new_value != -1) {
- bond->params.ad_select = new_value;
- pr_info("%s: Setting ad_select to %s (%d).\n",
- bond->dev->name, ad_select_tbl[new_value].modename,
- new_value);
- } else {
- pr_err("%s: Ignoring invalid ad_select value %.*s.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- ret = -EINVAL;
- }
-out:
return ret;
}
static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
@@ -944,8 +598,13 @@ static ssize_t bonding_store_num_peer_notif(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int err = kstrtou8(buf, 10, &bond->params.num_peer_notif);
- return err ? err : count;
+ int ret;
+
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf);
+ if (!ret)
+ ret = count;
+
+ return ret;
}
static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
bonding_show_num_peer_notif, bonding_store_num_peer_notif);
@@ -971,56 +630,13 @@ static ssize_t bonding_store_miimon(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
+
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MIIMON, (char *)buf);
+ if (!ret)
+ ret = count;
- if (!rtnl_trylock())
- return restart_syscall();
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no miimon value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0) {
- pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
- ret = -EINVAL;
- goto out;
- }
- pr_info("%s: Setting MII monitoring interval to %d.\n",
- bond->dev->name, new_value);
- bond->params.miimon = new_value;
- if (bond->params.updelay)
- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
- if (bond->params.downdelay)
- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
- if (new_value && bond->params.arp_interval) {
- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
- bond->dev->name);
- bond->params.arp_interval = 0;
- if (bond->params.arp_validate)
- bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
- }
- if (bond->dev->flags & IFF_UP) {
- /* If the interface is up, we may need to fire off
- * the MII timer. If the interface is down, the
- * timer will get fired off when the open function
- * is called.
- */
- if (!new_value) {
- cancel_delayed_work_sync(&bond->mii_work);
- } else {
- cancel_delayed_work_sync(&bond->arp_work);
- queue_delayed_work(bond->wq, &bond->mii_work, 0);
- }
- }
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
@@ -1051,58 +667,13 @@ static ssize_t bonding_store_primary(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- struct list_head *iter;
- char ifname[IFNAMSIZ];
- struct slave *slave;
-
- if (!rtnl_trylock())
- return restart_syscall();
- block_netpoll_tx();
- read_lock(&bond->lock);
- write_lock_bh(&bond->curr_slave_lock);
-
- if (!USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: Unable to set primary slave; %s is in mode %d\n",
- bond->dev->name, bond->dev->name, bond->params.mode);
- goto out;
- }
-
- sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-
- /* check to see if we are clearing primary */
- if (!strlen(ifname) || buf[0] == '\n') {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
- bond->primary_slave = NULL;
- memset(bond->params.primary, 0, sizeof(bond->params.primary));
- bond_select_active_slave(bond);
- goto out;
- }
-
- bond_for_each_slave(bond, slave, iter) {
- if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
- pr_info("%s: Setting %s as primary slave.\n",
- bond->dev->name, slave->dev->name);
- bond->primary_slave = slave;
- strcpy(bond->params.primary, slave->dev->name);
- bond_select_active_slave(bond);
- goto out;
- }
- }
-
- strncpy(bond->params.primary, ifname, IFNAMSIZ);
- bond->params.primary[IFNAMSIZ - 1] = 0;
+ int ret;
- pr_info("%s: Recording %s as primary, "
- "but it has not been enslaved to %s yet.\n",
- bond->dev->name, ifname, bond->dev->name);
-out:
- write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- unblock_netpoll_tx();
- rtnl_unlock();
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY, (char *)buf);
+ if (!ret)
+ ret = count;
- return count;
+ return ret;
}
static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
bonding_show_primary, bonding_store_primary);
@@ -1115,45 +686,27 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct bond_opt_value *val;
+
+ val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
+ bond->params.primary_reselect);
return sprintf(buf, "%s %d\n",
- pri_reselect_tbl[bond->params.primary_reselect].modename,
- bond->params.primary_reselect);
+ val->string, bond->params.primary_reselect);
}
static ssize_t bonding_store_primary_reselect(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (!rtnl_trylock())
- return restart_syscall();
-
- new_value = bond_parse_parm(buf, pri_reselect_tbl);
- if (new_value < 0) {
- pr_err("%s: Ignoring invalid primary_reselect value %.*s.\n",
- bond->dev->name,
- (int) strlen(buf) - 1, buf);
- ret = -EINVAL;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY_RESELECT,
+ (char *)buf);
+ if (!ret)
+ ret = count;
- bond->params.primary_reselect = new_value;
- pr_info("%s: setting primary_reselect to %s (%d).\n",
- bond->dev->name, pri_reselect_tbl[new_value].modename,
- new_value);
-
- block_netpoll_tx();
- read_lock(&bond->lock);
- write_lock_bh(&bond->curr_slave_lock);
- bond_select_active_slave(bond);
- write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- unblock_netpoll_tx();
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
@@ -1176,25 +729,13 @@ static ssize_t bonding_store_carrier(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_USE_CARRIER, (char *)buf);
+ if (!ret)
+ ret = count;
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no use_carrier value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if ((new_value == 0) || (new_value == 1)) {
- bond->params.use_carrier = new_value;
- pr_info("%s: Setting use_carrier to %d.\n",
- bond->dev->name, new_value);
- } else {
- pr_info("%s: Ignoring invalid use_carrier value %d.\n",
- bond->dev->name, new_value);
- }
-out:
return ret;
}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
@@ -1225,34 +766,14 @@ static ssize_t bonding_store_active_slave(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int ret;
struct bonding *bond = to_bond(d);
- char ifname[IFNAMSIZ];
- struct net_device *dev;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
- if (!strlen(ifname) || buf[0] == '\n') {
- dev = NULL;
- } else {
- dev = __dev_get_by_name(dev_net(bond->dev), ifname);
- if (!dev) {
- ret = -ENODEV;
- goto out;
- }
- }
+ int ret;
- ret = bond_option_active_slave_set(bond, dev);
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ACTIVE_SLAVE, (char *)buf);
if (!ret)
ret = count;
- out:
- rtnl_unlock();
-
return ret;
-
}
static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
bonding_show_active_slave, bonding_store_active_slave);
@@ -1421,72 +942,15 @@ static ssize_t bonding_store_queue_id(struct device *d,
struct device_attribute *attr,
const char *buffer, size_t count)
{
- struct slave *slave, *update_slave;
struct bonding *bond = to_bond(d);
- struct list_head *iter;
- u16 qid;
- int ret = count;
- char *delim;
- struct net_device *sdev = NULL;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- /* delim will point to queue id if successful */
- delim = strchr(buffer, ':');
- if (!delim)
- goto err_no_cmd;
-
- /*
- * Terminate string that points to device name and bump it
- * up one, so we can read the queue id there.
- */
- *delim = '\0';
- if (sscanf(++delim, "%hd\n", &qid) != 1)
- goto err_no_cmd;
-
- /* Check buffer length, valid ifname and queue id */
- if (strlen(buffer) > IFNAMSIZ ||
- !dev_valid_name(buffer) ||
- qid > bond->dev->real_num_tx_queues)
- goto err_no_cmd;
-
- /* Get the pointer to that interface if it exists */
- sdev = __dev_get_by_name(dev_net(bond->dev), buffer);
- if (!sdev)
- goto err_no_cmd;
-
- /* Search for thes slave and check for duplicate qids */
- update_slave = NULL;
- bond_for_each_slave(bond, slave, iter) {
- if (sdev == slave->dev)
- /*
- * We don't need to check the matching
- * slave for dups, since we're overwriting it
- */
- update_slave = slave;
- else if (qid && qid == slave->queue_id) {
- goto err_no_cmd;
- }
- }
-
- if (!update_slave)
- goto err_no_cmd;
+ int ret;
- /* Actually set the qids for the slave */
- update_slave->queue_id = qid;
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_QUEUE_ID, (char *)buffer);
+ if (!ret)
+ ret = count;
-out:
- rtnl_unlock();
return ret;
-
-err_no_cmd:
- pr_info("invalid input for queue_id set for %s.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
}
-
static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
bonding_store_queue_id);
@@ -1508,42 +972,13 @@ static ssize_t bonding_store_slaves_active(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
- struct list_head *iter;
- struct slave *slave;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no all_slaves_active value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
-
- if (new_value == bond->params.all_slaves_active)
- goto out;
+ int ret;
- if ((new_value == 0) || (new_value == 1)) {
- bond->params.all_slaves_active = new_value;
- } else {
- pr_info("%s: Ignoring invalid all_slaves_active value %d.\n",
- bond->dev->name, new_value);
- ret = -EINVAL;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ALL_SLAVES_ACTIVE,
+ (char *)buf);
+ if (!ret)
+ ret = count;
- bond_for_each_slave(bond, slave, iter) {
- if (!bond_is_active_slave(slave)) {
- if (new_value)
- slave->inactive = 0;
- else
- slave->inactive = 1;
- }
- }
-out:
- rtnl_unlock();
return ret;
}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
@@ -1565,27 +1000,13 @@ static ssize_t bonding_store_resend_igmp(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int ret;
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no resend_igmp value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
-
- if (new_value < 0 || new_value > 255) {
- pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n",
- bond->dev->name, new_value);
- ret = -EINVAL;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_RESEND_IGMP, (char *)buf);
+ if (!ret)
+ ret = count;
- pr_info("%s: Setting resend_igmp to %d.\n",
- bond->dev->name, new_value);
- bond->params.resend_igmp = new_value;
-out:
return ret;
}
@@ -1606,24 +1027,12 @@ static ssize_t bonding_store_lp_interval(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
-
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no lp interval value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
+ int ret;
- if (new_value <= 0) {
- pr_err ("%s: lp_interval must be between 1 and %d\n",
- bond->dev->name, INT_MAX);
- ret = -EINVAL;
- goto out;
- }
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LP_INTERVAL, (char *)buf);
+ if (!ret)
+ ret = count;
- bond->params.lp_interval = new_value;
-out:
return ret;
}
@@ -1636,10 +1045,6 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
{
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
-
- if (packets_per_slave > 1)
- packets_per_slave = reciprocal_value(packets_per_slave);
-
return sprintf(buf, "%u\n", packets_per_slave);
}
@@ -1648,28 +1053,13 @@ static ssize_t bonding_store_packets_per_slave(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
- int new_value, ret = count;
+ int ret;
+
+ ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PACKETS_PER_SLAVE,
+ (char *)buf);
+ if (!ret)
+ ret = count;
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no packets_per_slave value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0 || new_value > USHRT_MAX) {
- pr_err("%s: packets_per_slave must be between 0 and %u\n",
- bond->dev->name, USHRT_MAX);
- ret = -EINVAL;
- goto out;
- }
- if (bond->params.mode != BOND_MODE_ROUNDROBIN)
- pr_warn("%s: Warning: packets_per_slave has effect only in balance-rr mode\n",
- bond->dev->name);
- if (new_value > 1)
- bond->params.packets_per_slave = reciprocal_value(new_value);
- else
- bond->params.packets_per_slave = new_value;
-out:
return ret;
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
new file mode 100644
index 000000000000..2e4eec5450c8
--- /dev/null
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -0,0 +1,144 @@
+/* Sysfs attributes of bond slaves
+ *
+ * Copyright (c) 2014 Scott Feldman <sfeldma@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "bonding.h"
+
+struct slave_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct slave *, char *);
+};
+
+#define SLAVE_ATTR(_name, _mode, _show) \
+const struct slave_attribute slave_attr_##_name = { \
+ .attr = {.name = __stringify(_name), \
+ .mode = _mode }, \
+ .show = _show, \
+};
+#define SLAVE_ATTR_RO(_name) \
+ SLAVE_ATTR(_name, S_IRUGO, _name##_show)
+
+static ssize_t state_show(struct slave *slave, char *buf)
+{
+ switch (bond_slave_state(slave)) {
+ case BOND_STATE_ACTIVE:
+ return sprintf(buf, "active\n");
+ case BOND_STATE_BACKUP:
+ return sprintf(buf, "backup\n");
+ default:
+ return sprintf(buf, "UNKONWN\n");
+ }
+}
+static SLAVE_ATTR_RO(state);
+
+static ssize_t mii_status_show(struct slave *slave, char *buf)
+{
+ return sprintf(buf, "%s\n", bond_slave_link_status(slave->link));
+}
+static SLAVE_ATTR_RO(mii_status);
+
+static ssize_t link_failure_count_show(struct slave *slave, char *buf)
+{
+ return sprintf(buf, "%d\n", slave->link_failure_count);
+}
+static SLAVE_ATTR_RO(link_failure_count);
+
+static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
+{
+ return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+}
+static SLAVE_ATTR_RO(perm_hwaddr);
+
+static ssize_t queue_id_show(struct slave *slave, char *buf)
+{
+ return sprintf(buf, "%d\n", slave->queue_id);
+}
+static SLAVE_ATTR_RO(queue_id);
+
+static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
+{
+ const struct aggregator *agg;
+
+ if (slave->bond->params.mode == BOND_MODE_8023AD) {
+ agg = SLAVE_AD_INFO(slave).port.aggregator;
+ if (agg)
+ return sprintf(buf, "%d\n",
+ agg->aggregator_identifier);
+ }
+
+ return sprintf(buf, "N/A\n");
+}
+static SLAVE_ATTR_RO(ad_aggregator_id);
+
+static const struct slave_attribute *slave_attrs[] = {
+ &slave_attr_state,
+ &slave_attr_mii_status,
+ &slave_attr_link_failure_count,
+ &slave_attr_perm_hwaddr,
+ &slave_attr_queue_id,
+ &slave_attr_ad_aggregator_id,
+ NULL
+};
+
+#define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr)
+#define to_slave(obj) container_of(obj, struct slave, kobj)
+
+static ssize_t slave_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct slave_attribute *slave_attr = to_slave_attr(attr);
+ struct slave *slave = to_slave(kobj);
+
+ return slave_attr->show(slave, buf);
+}
+
+static const struct sysfs_ops slave_sysfs_ops = {
+ .show = slave_show,
+};
+
+static struct kobj_type slave_ktype = {
+#ifdef CONFIG_SYSFS
+ .sysfs_ops = &slave_sysfs_ops,
+#endif
+};
+
+int bond_sysfs_slave_add(struct slave *slave)
+{
+ const struct slave_attribute **a;
+ int err;
+
+ err = kobject_init_and_add(&slave->kobj, &slave_ktype,
+ &(slave->dev->dev.kobj), "bonding_slave");
+ if (err)
+ return err;
+
+ for (a = slave_attrs; *a; ++a) {
+ err = sysfs_create_file(&slave->kobj, &((*a)->attr));
+ if (err) {
+ kobject_del(&slave->kobj);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void bond_sysfs_slave_del(struct slave *slave)
+{
+ const struct slave_attribute **a;
+
+ for (a = slave_attrs; *a; ++a)
+ sysfs_remove_file(&slave->kobj, &((*a)->attr));
+
+ kobject_del(&slave->kobj);
+}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index a9f4f9f4d8ce..2b0fdec695f7 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -23,8 +23,11 @@
#include <linux/netpoll.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
+#include <linux/reciprocal_div.h>
+
#include "bond_3ad.h"
#include "bond_alb.h"
+#include "bond_options.h"
#define DRV_VERSION "3.7.1"
#define DRV_RELDATE "April 27, 2011"
@@ -101,6 +104,10 @@
netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
NULL)
+/* Caller must have rcu_read_lock */
+#define bond_first_slave_rcu(bond) \
+ netdev_lower_get_first_private_rcu(bond->dev)
+
#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
@@ -167,6 +174,7 @@ struct bond_params {
int resend_igmp;
int lp_interval;
int packets_per_slave;
+ struct reciprocal_value reciprocal_packets_per_slave;
};
struct bond_parm_tbl {
@@ -187,7 +195,8 @@ struct slave {
s8 new_link;
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
- inactive:1; /* indicates inactive slave */
+ inactive:1, /* indicates inactive slave */
+ should_notify:1; /* indicateds whether the state changed */
u8 duplex;
u32 original_mtu;
u32 link_failure_count;
@@ -199,6 +208,7 @@ struct slave {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
#endif
+ struct kobject kobj;
};
/*
@@ -280,12 +290,62 @@ static inline bool bond_is_lb(const struct bonding *bond)
static inline void bond_set_active_slave(struct slave *slave)
{
- slave->backup = 0;
+ if (slave->backup) {
+ slave->backup = 0;
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ }
}
static inline void bond_set_backup_slave(struct slave *slave)
{
- slave->backup = 1;
+ if (!slave->backup) {
+ slave->backup = 1;
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ }
+}
+
+static inline void bond_set_slave_state(struct slave *slave,
+ int slave_state, bool notify)
+{
+ if (slave->backup == slave_state)
+ return;
+
+ slave->backup = slave_state;
+ if (notify) {
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ slave->should_notify = 0;
+ } else {
+ if (slave->should_notify)
+ slave->should_notify = 0;
+ else
+ slave->should_notify = 1;
+ }
+}
+
+static inline void bond_slave_state_change(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter) {
+ if (tmp->link == BOND_LINK_UP)
+ bond_set_active_slave(tmp);
+ else if (tmp->link == BOND_LINK_DOWN)
+ bond_set_backup_slave(tmp);
+ }
+}
+
+static inline void bond_slave_state_notify(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter) {
+ if (tmp->should_notify) {
+ rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL);
+ tmp->should_notify = 0;
+ }
+ }
}
static inline int bond_slave_state(struct slave *slave)
@@ -315,6 +375,9 @@ static inline bool bond_is_active_slave(struct slave *slave)
#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
BOND_ARP_VALIDATE_BACKUP)
+#define BOND_SLAVE_NOTIFY_NOW true
+#define BOND_SLAVE_NOTIFY_LATER false
+
static inline int slave_do_arp_validate(struct bonding *bond,
struct slave *slave)
{
@@ -366,17 +429,19 @@ static inline void bond_netpoll_send_skb(const struct slave *slave,
}
#endif
-static inline void bond_set_slave_inactive_flags(struct slave *slave)
+static inline void bond_set_slave_inactive_flags(struct slave *slave,
+ bool notify)
{
if (!bond_is_lb(slave->bond))
- bond_set_backup_slave(slave);
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
if (!slave->bond->params.all_slaves_active)
slave->inactive = 1;
}
-static inline void bond_set_slave_active_flags(struct slave *slave)
+static inline void bond_set_slave_active_flags(struct slave *slave,
+ bool notify)
{
- bond_set_active_slave(slave);
+ bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
slave->inactive = 0;
}
@@ -394,8 +459,8 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
in_dev = __in_dev_get_rcu(dev);
if (in_dev)
- addr = inet_confirm_addr(in_dev, dst, local, RT_SCOPE_HOST);
-
+ addr = inet_confirm_addr(dev_net(dev), in_dev, dst, local,
+ RT_SCOPE_HOST);
rcu_read_unlock();
return addr;
}
@@ -412,19 +477,18 @@ static inline bool slave_can_tx(struct slave *slave)
struct bond_net;
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
-void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
+void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
void bond_prepare_sysfs_group(struct bonding *bond);
+int bond_sysfs_slave_add(struct slave *slave);
+void bond_sysfs_slave_del(struct slave *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
-void bond_mii_monitor(struct work_struct *);
-void bond_loadbalance_arp_mon(struct work_struct *);
-void bond_activebackup_arp_mon(struct work_struct *);
int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
+int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_create_debugfs(void);
@@ -437,10 +501,11 @@ void bond_setup(struct net_device *bond_dev);
unsigned int bond_get_num_tx_queues(void);
int bond_netlink_init(void);
void bond_netlink_fini(void);
-int bond_option_mode_set(struct bonding *bond, int mode);
-int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
+int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
+int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
struct net_device *bond_option_active_slave_get(struct bonding *bond);
+const char *bond_slave_link_status(s8 link);
struct bond_net {
struct net * net; /* Associated network namespace */
@@ -520,7 +585,6 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
/* exported from bond_main.c */
extern int bond_net_id;
extern const struct bond_parm_tbl bond_lacp_tbl[];
-extern const struct bond_parm_tbl bond_mode_tbl[];
extern const struct bond_parm_tbl xmit_hashtype_tbl[];
extern const struct bond_parm_tbl arp_validate_tbl[];
extern const struct bond_parm_tbl arp_all_targets_tbl[];
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index ee92ad5a6cf8..39ba2f892ad6 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -3,7 +3,6 @@
* Author: Daniel Martensson
* License terms: GNU General Public License (GPL) version 2.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 3c069472eb8b..9e7d95dae2c7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -71,7 +71,7 @@ config CAN_AT91
and AT91SAM9X5 processors.
config CAN_TI_HECC
- depends on ARCH_OMAP3
+ depends on ARM
tristate "TI High End CAN Controller"
---help---
Driver for TI HECC (High End CAN Controller) module found on many
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index cf0f63e14e53..6efe27458116 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -22,7 +22,6 @@
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 8a0b515b33ea..8d2b89a12e09 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -9,7 +9,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 77061eebb034..951bfede8f3d 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -808,17 +808,19 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
u32 num_rx_pkts = 0;
unsigned int msg_obj, msg_ctrl_save;
struct c_can_priv *priv = netdev_priv(dev);
- u32 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG);
+ u16 val;
+
+ /*
+ * It is faster to read only one 16bit register. This is only possible
+ * for a maximum number of 16 objects.
+ */
+ BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
+ "Implementation does not support more message objects than 16");
+
+ while (quota > 0 && (val = priv->read_reg(priv, C_CAN_INTPND1_REG))) {
+ while ((msg_obj = ffs(val)) && quota > 0) {
+ val &= ~BIT(msg_obj - 1);
- for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
- msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
- val = c_can_read_reg32(priv, C_CAN_INTPND1_REG),
- msg_obj++) {
- /*
- * as interrupt pending register's bit n-1 corresponds to
- * message object n, we need to handle the same properly.
- */
- if (val & (1 << (msg_obj - 1))) {
c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
~IF_COMM_TXRQST);
msg_ctrl_save = priv->read_reg(priv,
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index bda1888cae9a..fc59bc6f040b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
@@ -324,19 +323,10 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
}
if (!priv->echo_skb[idx]) {
- struct sock *srcsk = skb->sk;
- if (atomic_read(&skb->users) != 1) {
- struct sk_buff *old_skb = skb;
-
- skb = skb_clone(old_skb, GFP_ATOMIC);
- kfree_skb(old_skb);
- if (!skb)
- return;
- } else
- skb_orphan(skb);
-
- skb->sk = srcsk;
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return;
/* make settings for echo to reduce code in irq context */
skb->protocol = htons(ETH_P_CAN);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index aaed97bee471..61376abdab39 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -144,6 +144,8 @@
#define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
+#define FLEXCAN_TIMEOUT_US (50)
+
/*
* FLEXCAN hardware feature flags
*
@@ -235,9 +237,12 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
};
/*
- * Abstract off the read/write for arm versus ppc.
+ * Abstract off the read/write for arm versus ppc. This
+ * assumes that PPC uses big-endian registers and everything
+ * else uses little-endian registers, independent of CPU
+ * endianess.
*/
-#if defined(__BIG_ENDIAN)
+#if defined(CONFIG_PPC)
static inline u32 flexcan_read(void __iomem *addr)
{
return in_be32(addr);
@@ -259,6 +264,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
}
#endif
+static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_enable(priv->reg_xceiver);
+}
+
+static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_disable(priv->reg_xceiver);
+}
+
static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
u32 reg_esr)
{
@@ -266,26 +287,95 @@ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
(reg_esr & FLEXCAN_ESR_ERR_BUS);
}
-static inline void flexcan_chip_enable(struct flexcan_priv *priv)
+static int flexcan_chip_enable(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
reg = flexcan_read(&regs->mcr);
reg &= ~FLEXCAN_MCR_MDIS;
flexcan_write(reg, &regs->mcr);
- udelay(10);
+ while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ usleep_range(10, 20);
+
+ if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+ return -ETIMEDOUT;
+
+ return 0;
}
-static inline void flexcan_chip_disable(struct flexcan_priv *priv)
+static int flexcan_chip_disable(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
reg = flexcan_read(&regs->mcr);
reg |= FLEXCAN_MCR_MDIS;
flexcan_write(reg, &regs->mcr);
+
+ while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ usleep_range(10, 20);
+
+ if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_freeze(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
+ u32 reg;
+
+ reg = flexcan_read(&regs->mcr);
+ reg |= FLEXCAN_MCR_HALT;
+ flexcan_write(reg, &regs->mcr);
+
+ while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ usleep_range(100, 200);
+
+ if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ u32 reg;
+
+ reg = flexcan_read(&regs->mcr);
+ reg &= ~FLEXCAN_MCR_HALT;
+ flexcan_write(reg, &regs->mcr);
+
+ while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ usleep_range(10, 20);
+
+ if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_softreset(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+ flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+ while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
+ usleep_range(10, 20);
+
+ if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
+ return -ETIMEDOUT;
+
+ return 0;
}
static int flexcan_get_berr_counter(const struct net_device *dev,
@@ -706,19 +796,14 @@ static int flexcan_chip_start(struct net_device *dev)
u32 reg_mcr, reg_ctrl;
/* enable module */
- flexcan_chip_enable(priv);
+ err = flexcan_chip_enable(priv);
+ if (err)
+ return err;
/* soft reset */
- flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
- udelay(10);
-
- reg_mcr = flexcan_read(&regs->mcr);
- if (reg_mcr & FLEXCAN_MCR_SOFTRST) {
- netdev_err(dev, "Failed to softreset can module (mcr=0x%08x)\n",
- reg_mcr);
- err = -ENODEV;
- goto out;
- }
+ err = flexcan_chip_softreset(priv);
+ if (err)
+ goto out_chip_disable;
flexcan_set_bittiming(dev);
@@ -785,16 +870,14 @@ static int flexcan_chip_start(struct net_device *dev)
if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
flexcan_write(0x0, &regs->rxfgmask);
- if (priv->reg_xceiver) {
- err = regulator_enable(priv->reg_xceiver);
- if (err)
- goto out;
- }
+ err = flexcan_transceiver_enable(priv);
+ if (err)
+ goto out_chip_disable;
/* synchronize with the can bus */
- reg_mcr = flexcan_read(&regs->mcr);
- reg_mcr &= ~FLEXCAN_MCR_HALT;
- flexcan_write(reg_mcr, &regs->mcr);
+ err = flexcan_chip_unfreeze(priv);
+ if (err)
+ goto out_transceiver_disable;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -807,7 +890,9 @@ static int flexcan_chip_start(struct net_device *dev)
return 0;
- out:
+ out_transceiver_disable:
+ flexcan_transceiver_disable(priv);
+ out_chip_disable:
flexcan_chip_disable(priv);
return err;
}
@@ -822,18 +907,17 @@ static void flexcan_chip_stop(struct net_device *dev)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->base;
- u32 reg;
+
+ /* freeze + disable module */
+ flexcan_chip_freeze(priv);
+ flexcan_chip_disable(priv);
/* Disable all interrupts */
flexcan_write(0, &regs->imask1);
+ flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ &regs->ctrl);
- /* Disable + halt module */
- reg = flexcan_read(&regs->mcr);
- reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
- flexcan_write(reg, &regs->mcr);
-
- if (priv->reg_xceiver)
- regulator_disable(priv->reg_xceiver);
+ flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
return;
@@ -863,7 +947,7 @@ static int flexcan_open(struct net_device *dev)
/* start chip and queuing */
err = flexcan_chip_start(dev);
if (err)
- goto out_close;
+ goto out_free_irq;
can_led_event(dev, CAN_LED_EVENT_OPEN);
@@ -872,6 +956,8 @@ static int flexcan_open(struct net_device *dev)
return 0;
+ out_free_irq:
+ free_irq(dev->irq, dev);
out_close:
close_candev(dev);
out_disable_per:
@@ -942,12 +1028,16 @@ static int register_flexcandev(struct net_device *dev)
goto out_disable_ipg;
/* select "bus clock", chip must be disabled */
- flexcan_chip_disable(priv);
+ err = flexcan_chip_disable(priv);
+ if (err)
+ goto out_disable_per;
reg = flexcan_read(&regs->ctrl);
reg |= FLEXCAN_CTRL_CLK_SRC;
flexcan_write(reg, &regs->ctrl);
- flexcan_chip_enable(priv);
+ err = flexcan_chip_enable(priv);
+ if (err)
+ goto out_chip_disable;
/* set freeze, halt and activate FIFO, restrict register access */
reg = flexcan_read(&regs->mcr);
@@ -964,14 +1054,15 @@ static int register_flexcandev(struct net_device *dev)
if (!(reg & FLEXCAN_MCR_FEN)) {
netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
err = -ENODEV;
- goto out_disable_per;
+ goto out_chip_disable;
}
err = register_candev(dev);
- out_disable_per:
/* disable core and turn off clocks */
+ out_chip_disable:
flexcan_chip_disable(priv);
+ out_disable_per:
clk_disable_unprepare(priv->clk_per);
out_disable_ipg:
clk_disable_unprepare(priv->clk_ipg);
@@ -1101,9 +1192,10 @@ static int flexcan_probe(struct platform_device *pdev)
static int flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv = netdev_priv(dev);
unregister_flexcandev(dev);
-
+ netif_napi_del(&priv->napi);
free_candev(dev);
return 0;
@@ -1114,8 +1206,11 @@ static int flexcan_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
+ int err;
- flexcan_chip_disable(priv);
+ err = flexcan_chip_disable(priv);
+ if (err)
+ return err;
if (netif_running(dev)) {
netif_stop_queue(dev);
@@ -1136,9 +1231,7 @@ static int flexcan_resume(struct device *device)
netif_device_attach(dev);
netif_start_queue(dev);
}
- flexcan_chip_enable(priv);
-
- return 0;
+ return flexcan_chip_enable(priv);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index ab5909a7bae9..71594e5676fd 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -19,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/can/error.h>
#include <linux/mfd/janz.h>
@@ -1134,20 +1134,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
*/
static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
{
- struct sock *srcsk = skb->sk;
-
- if (atomic_read(&skb->users) != 1) {
- struct sk_buff *old_skb = skb;
-
- skb = skb_clone(old_skb, GFP_ATOMIC);
- kfree_skb(old_skb);
- if (!skb)
- return;
- } else {
- skb_orphan(skb);
- }
-
- skb->sk = srcsk;
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return;
/* save this skb for tx interrupt echo handling */
skb_queue_tail(&mod->echoq, skb);
@@ -1323,7 +1312,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
/* process all communication messages */
while (true) {
- struct ican3_msg msg;
+ struct ican3_msg uninitialized_var(msg);
ret = ican3_recv_msg(mod, &msg);
if (ret)
break;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 08ac401e0214..cdb9808d12db 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -28,8 +28,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
*
@@ -59,6 +58,7 @@
#include <linux/can/dev.h>
#include <linux/can/led.h>
#include <linux/can/platform/mcp251x.h>
+#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -69,6 +69,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
@@ -264,6 +266,7 @@ struct mcp251x_priv {
int restart_tx;
struct regulator *power;
struct regulator *transceiver;
+ struct clk *clk;
};
#define MCP251X_IS(_model) \
@@ -995,22 +998,65 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_start_xmit = mcp251x_hard_start_xmit,
};
+static const struct of_device_id mcp251x_of_match[] = {
+ {
+ .compatible = "microchip,mcp2510",
+ .data = (void *)CAN_MCP251X_MCP2510,
+ },
+ {
+ .compatible = "microchip,mcp2515",
+ .data = (void *)CAN_MCP251X_MCP2515,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp251x_of_match);
+
+static const struct spi_device_id mcp251x_id_table[] = {
+ {
+ .name = "mcp2510",
+ .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2510,
+ },
+ {
+ .name = "mcp2515",
+ .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2515,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+
static int mcp251x_can_probe(struct spi_device *spi)
{
+ const struct of_device_id *of_id = of_match_device(mcp251x_of_match,
+ &spi->dev);
+ struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
- struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
- int ret = -ENODEV;
+ int freq, ret = -ENODEV;
+ struct clk *clk;
+
+ clk = devm_clk_get(&spi->dev, NULL);
+ if (IS_ERR(clk)) {
+ if (pdata)
+ freq = pdata->oscillator_frequency;
+ else
+ return PTR_ERR(clk);
+ } else {
+ freq = clk_get_rate(clk);
+ }
- if (!pdata)
- /* Platform data is required for osc freq */
- goto error_out;
+ /* Sanity check */
+ if (freq < 1000000 || freq > 25000000)
+ return -ERANGE;
/* Allocate can/net device */
net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
- if (!net) {
- ret = -ENOMEM;
- goto error_alloc;
+ if (!net)
+ return -ENOMEM;
+
+ if (!IS_ERR(clk)) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_free;
}
net->netdev_ops = &mcp251x_netdev_ops;
@@ -1019,23 +1065,27 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv = netdev_priv(net);
priv->can.bittiming_const = &mcp251x_bittiming_const;
priv->can.do_set_mode = mcp251x_do_set_mode;
- priv->can.clock.freq = pdata->oscillator_frequency / 2;
+ priv->can.clock.freq = freq / 2;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
- priv->model = spi_get_device_id(spi)->driver_data;
+ if (of_id)
+ priv->model = (enum mcp251x_model)of_id->data;
+ else
+ priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
+ priv->clk = clk;
priv->power = devm_regulator_get(&spi->dev, "vdd");
priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
(PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
ret = -EPROBE_DEFER;
- goto error_power;
+ goto out_clk;
}
ret = mcp251x_power_enable(priv->power, 1);
if (ret)
- goto error_power;
+ goto out_clk;
spi_set_drvdata(spi, priv);
@@ -1067,15 +1117,17 @@ static int mcp251x_can_probe(struct spi_device *spi)
/* Allocate non-DMA buffers */
if (!mcp251x_enable_dma) {
- priv->spi_tx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
+ priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+ GFP_KERNEL);
if (!priv->spi_tx_buf) {
ret = -ENOMEM;
- goto error_tx_buf;
+ goto error_probe;
}
- priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
+ priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+ GFP_KERNEL);
if (!priv->spi_rx_buf) {
ret = -ENOMEM;
- goto error_rx_buf;
+ goto error_probe;
}
}
@@ -1108,21 +1160,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
return ret;
error_probe:
- if (!mcp251x_enable_dma)
- kfree(priv->spi_rx_buf);
-error_rx_buf:
- if (!mcp251x_enable_dma)
- kfree(priv->spi_tx_buf);
-error_tx_buf:
if (mcp251x_enable_dma)
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
mcp251x_power_enable(priv->power, 0);
-error_power:
+
+out_clk:
+ if (!IS_ERR(clk))
+ clk_disable_unprepare(clk);
+
+out_free:
free_candev(net);
-error_alloc:
- dev_err(&spi->dev, "probe failed\n");
-error_out:
+
return ret;
}
@@ -1136,13 +1185,13 @@ static int mcp251x_can_remove(struct spi_device *spi)
if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
- } else {
- kfree(priv->spi_tx_buf);
- kfree(priv->spi_rx_buf);
}
mcp251x_power_enable(priv->power, 0);
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
free_candev(net);
return 0;
@@ -1205,21 +1254,13 @@ static int mcp251x_can_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
mcp251x_can_resume);
-static const struct spi_device_id mcp251x_id_table[] = {
- { "mcp2510", CAN_MCP251X_MCP2510 },
- { "mcp2515", CAN_MCP251X_MCP2515 },
- { },
-};
-
-MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
-
static struct spi_driver mcp251x_can_driver = {
.driver = {
.name = DEVICE_NAME,
.owner = THIS_MODULE,
+ .of_match_table = mcp251x_of_match,
.pm = &mcp251x_can_pm_ops,
},
-
.id_table = mcp251x_id_table,
.probe = mcp251x_can_probe,
.remove = mcp251x_can_remove,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index e59b3a392af6..44725296f72a 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
@@ -109,135 +108,170 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
#endif /* CONFIG_PPC_MPC52xx */
#ifdef CONFIG_PPC_MPC512x
-struct mpc512x_clockctl {
- u32 spmr; /* System PLL Mode Reg */
- u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
- u32 scfr1; /* System Clk Freq Reg 1 */
- u32 scfr2; /* System Clk Freq Reg 2 */
- u32 reserved;
- u32 bcr; /* Bread Crumb Reg */
- u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */
- u32 spccr; /* SPDIF Clk Ctrl Reg */
- u32 cccr; /* CFM Clk Ctrl Reg */
- u32 dccr; /* DIU Clk Cnfg Reg */
- u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
-};
-
-static struct of_device_id mpc512x_clock_ids[] = {
- { .compatible = "fsl,mpc5121-clock", },
- {}
-};
-
static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
- const char *clock_name, int *mscan_clksrc)
+ const char *clock_source, int *mscan_clksrc)
{
- struct mpc512x_clockctl __iomem *clockctl;
- struct device_node *np_clock;
- struct clk *sys_clk, *ref_clk;
- int plen, clockidx, clocksrc = -1;
- u32 sys_freq, val, clockdiv = 1, freq = 0;
- const u32 *pval;
-
- np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
- if (!np_clock) {
- dev_err(&ofdev->dev, "couldn't find clock node\n");
- return 0;
- }
- clockctl = of_iomap(np_clock, 0);
- if (!clockctl) {
- dev_err(&ofdev->dev, "couldn't map clock registers\n");
- goto exit_put;
- }
+ struct device_node *np;
+ u32 clockdiv;
+ enum {
+ CLK_FROM_AUTO,
+ CLK_FROM_IPS,
+ CLK_FROM_SYS,
+ CLK_FROM_REF,
+ } clk_from;
+ struct clk *clk_in, *clk_can;
+ unsigned long freq_calc;
+ struct mscan_priv *priv;
+ struct clk *clk_ipg;
- /* Determine the MSCAN device index from the peripheral's
- * physical address. Register address offsets against the
- * IMMR base are: 0x1300, 0x1380, 0x2300, 0x2380
+ /* the caller passed in the clock source spec that was read from
+ * the device tree, get the optional clock divider as well
*/
- pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
- BUG_ON(!pval || plen < sizeof(*pval));
- clockidx = (*pval & 0x80) ? 1 : 0;
- if (*pval & 0x2000)
- clockidx += 2;
+ np = ofdev->dev.of_node;
+ clockdiv = 1;
+ of_property_read_u32(np, "fsl,mscan-clock-divider", &clockdiv);
+ dev_dbg(&ofdev->dev, "device tree specs: clk src[%s] div[%d]\n",
+ clock_source ? clock_source : "<NULL>", clockdiv);
+
+ /* when clock-source is 'ip', the CANCTL1[CLKSRC] bit needs to
+ * get set, and the 'ips' clock is the input to the MSCAN
+ * component
+ *
+ * for clock-source values of 'ref' or 'sys' the CANCTL1[CLKSRC]
+ * bit needs to get cleared, an optional clock-divider may have
+ * been specified (the default value is 1), the appropriate
+ * MSCAN related MCLK is the input to the MSCAN component
+ *
+ * in the absence of a clock-source spec, first an optimal clock
+ * gets determined based on the 'sys' clock, if that fails the
+ * 'ref' clock is used
+ */
+ clk_from = CLK_FROM_AUTO;
+ if (clock_source) {
+ /* interpret the device tree's spec for the clock source */
+ if (!strcmp(clock_source, "ip"))
+ clk_from = CLK_FROM_IPS;
+ else if (!strcmp(clock_source, "sys"))
+ clk_from = CLK_FROM_SYS;
+ else if (!strcmp(clock_source, "ref"))
+ clk_from = CLK_FROM_REF;
+ else
+ goto err_invalid;
+ dev_dbg(&ofdev->dev, "got a clk source spec[%d]\n", clk_from);
+ }
+ if (clk_from == CLK_FROM_AUTO) {
+ /* no spec so far, try the 'sys' clock; round to the
+ * next MHz and see if we can get a multiple of 16MHz
+ */
+ dev_dbg(&ofdev->dev, "no clk source spec, trying SYS\n");
+ clk_in = devm_clk_get(&ofdev->dev, "sys");
+ if (IS_ERR(clk_in))
+ goto err_notavail;
+ freq_calc = clk_get_rate(clk_in);
+ freq_calc += 499999;
+ freq_calc /= 1000000;
+ freq_calc *= 1000000;
+ if ((freq_calc % 16000000) == 0) {
+ clk_from = CLK_FROM_SYS;
+ clockdiv = freq_calc / 16000000;
+ dev_dbg(&ofdev->dev,
+ "clk fit, sys[%lu] div[%d] freq[%lu]\n",
+ freq_calc, clockdiv, freq_calc / clockdiv);
+ }
+ }
+ if (clk_from == CLK_FROM_AUTO) {
+ /* no spec so far, use the 'ref' clock */
+ dev_dbg(&ofdev->dev, "no clk source spec, trying REF\n");
+ clk_in = devm_clk_get(&ofdev->dev, "ref");
+ if (IS_ERR(clk_in))
+ goto err_notavail;
+ clk_from = CLK_FROM_REF;
+ freq_calc = clk_get_rate(clk_in);
+ dev_dbg(&ofdev->dev,
+ "clk fit, ref[%lu] (no div) freq[%lu]\n",
+ freq_calc, freq_calc);
+ }
- /*
- * Clock source and divider selection: 3 different clock sources
- * can be selected: "ip", "ref" or "sys". For the latter two, a
- * clock divider can be defined as well. If the clock source is
- * not specified by the device tree, we first try to find an
- * optimal CAN source clock based on the system clock. If that
- * is not posslible, the reference clock will be used.
+ /* select IPS or MCLK as the MSCAN input (returned to the caller),
+ * setup the MCLK mux source and rate if applicable, apply the
+ * optionally specified or derived above divider, and determine
+ * the actual resulting clock rate to return to the caller
*/
- if (clock_name && !strcmp(clock_name, "ip")) {
+ switch (clk_from) {
+ case CLK_FROM_IPS:
+ clk_can = devm_clk_get(&ofdev->dev, "ips");
+ if (IS_ERR(clk_can))
+ goto err_notavail;
+ priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+ priv->clk_can = clk_can;
+ freq_calc = clk_get_rate(clk_can);
*mscan_clksrc = MSCAN_CLKSRC_IPS;
- freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
- } else {
+ dev_dbg(&ofdev->dev, "clk from IPS, clksrc[%d] freq[%lu]\n",
+ *mscan_clksrc, freq_calc);
+ break;
+ case CLK_FROM_SYS:
+ case CLK_FROM_REF:
+ clk_can = devm_clk_get(&ofdev->dev, "mclk");
+ if (IS_ERR(clk_can))
+ goto err_notavail;
+ priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+ priv->clk_can = clk_can;
+ if (clk_from == CLK_FROM_SYS)
+ clk_in = devm_clk_get(&ofdev->dev, "sys");
+ if (clk_from == CLK_FROM_REF)
+ clk_in = devm_clk_get(&ofdev->dev, "ref");
+ if (IS_ERR(clk_in))
+ goto err_notavail;
+ clk_set_parent(clk_can, clk_in);
+ freq_calc = clk_get_rate(clk_in);
+ freq_calc /= clockdiv;
+ clk_set_rate(clk_can, freq_calc);
+ freq_calc = clk_get_rate(clk_can);
*mscan_clksrc = MSCAN_CLKSRC_BUS;
-
- pval = of_get_property(ofdev->dev.of_node,
- "fsl,mscan-clock-divider", &plen);
- if (pval && plen == sizeof(*pval))
- clockdiv = *pval;
- if (!clockdiv)
- clockdiv = 1;
-
- if (!clock_name || !strcmp(clock_name, "sys")) {
- sys_clk = devm_clk_get(&ofdev->dev, "sys_clk");
- if (IS_ERR(sys_clk)) {
- dev_err(&ofdev->dev, "couldn't get sys_clk\n");
- goto exit_unmap;
- }
- /* Get and round up/down sys clock rate */
- sys_freq = 1000000 *
- ((clk_get_rate(sys_clk) + 499999) / 1000000);
-
- if (!clock_name) {
- /* A multiple of 16 MHz would be optimal */
- if ((sys_freq % 16000000) == 0) {
- clocksrc = 0;
- clockdiv = sys_freq / 16000000;
- freq = sys_freq / clockdiv;
- }
- } else {
- clocksrc = 0;
- freq = sys_freq / clockdiv;
- }
- }
-
- if (clocksrc < 0) {
- ref_clk = devm_clk_get(&ofdev->dev, "ref_clk");
- if (IS_ERR(ref_clk)) {
- dev_err(&ofdev->dev, "couldn't get ref_clk\n");
- goto exit_unmap;
- }
- clocksrc = 1;
- freq = clk_get_rate(ref_clk) / clockdiv;
- }
+ dev_dbg(&ofdev->dev, "clk from MCLK, clksrc[%d] freq[%lu]\n",
+ *mscan_clksrc, freq_calc);
+ break;
+ default:
+ goto err_invalid;
}
- /* Disable clock */
- out_be32(&clockctl->mccr[clockidx], 0x0);
- if (clocksrc >= 0) {
- /* Set source and divider */
- val = (clocksrc << 14) | ((clockdiv - 1) << 17);
- out_be32(&clockctl->mccr[clockidx], val);
- /* Enable clock */
- out_be32(&clockctl->mccr[clockidx], val | 0x10000);
- }
+ /* the above clk_can item is used for the bitrate, access to
+ * the peripheral's register set needs the clk_ipg item
+ */
+ clk_ipg = devm_clk_get(&ofdev->dev, "ipg");
+ if (IS_ERR(clk_ipg))
+ goto err_notavail_ipg;
+ if (clk_prepare_enable(clk_ipg))
+ goto err_notavail_ipg;
+ priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+ priv->clk_ipg = clk_ipg;
+
+ /* return the determined clock source rate */
+ return freq_calc;
+
+err_invalid:
+ dev_err(&ofdev->dev, "invalid clock source specification\n");
+ /* clock source rate could not get determined */
+ return 0;
- /* Enable MSCAN clock domain */
- val = in_be32(&clockctl->sccr[1]);
- if (!(val & (1 << 25)))
- out_be32(&clockctl->sccr[1], val | (1 << 25));
+err_notavail:
+ dev_err(&ofdev->dev, "cannot acquire or setup bitrate clock source\n");
+ /* clock source rate could not get determined */
+ return 0;
- dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
- *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
- clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
+err_notavail_ipg:
+ dev_err(&ofdev->dev, "cannot acquire or setup register clock\n");
+ /* clock source rate could not get determined */
+ return 0;
+}
-exit_unmap:
- iounmap(clockctl);
-exit_put:
- of_node_put(np_clock);
- return freq;
+static void mpc512x_can_put_clock(struct platform_device *ofdev)
+{
+ struct mscan_priv *priv;
+
+ priv = netdev_priv(dev_get_drvdata(&ofdev->dev));
+ if (priv->clk_ipg)
+ clk_disable_unprepare(priv->clk_ipg);
}
#else /* !CONFIG_PPC_MPC512x */
static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
@@ -245,6 +279,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
{
return 0;
}
+#define mpc512x_can_put_clock NULL
#endif /* CONFIG_PPC_MPC512x */
static const struct of_device_id mpc5xxx_can_table[];
@@ -386,11 +421,13 @@ static int mpc5xxx_can_resume(struct platform_device *ofdev)
static const struct mpc5xxx_can_data mpc5200_can_data = {
.type = MSCAN_TYPE_MPC5200,
.get_clock = mpc52xx_can_get_clock,
+ /* .put_clock not applicable */
};
static const struct mpc5xxx_can_data mpc5121_can_data = {
.type = MSCAN_TYPE_MPC5121,
.get_clock = mpc512x_can_get_clock,
+ .put_clock = mpc512x_can_put_clock,
};
static const struct of_device_id mpc5xxx_can_table[] = {
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index a955ec8c4b97..b9f3faabb0f3 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index e98abb97a050..ad8e08f9c496 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSCAN_H__
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 5f0e9b3bfa7b..6c077eb87b5e 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/interrupt.h>
@@ -22,7 +21,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 835921388e7b..d790b874ca79 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 087b13bd300e..c96eb14699d5 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -26,8 +26,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index f9b4f81cd86a..fbb61a0d901f 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
@@ -45,7 +44,8 @@ MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"esd CAN-PCI/PMC/266, "
"esd CAN-PCIe/2000, "
"Connect Tech Inc. CANpro/104-Plus Opto (CRG001), "
- "IXXAT PC-I 04/PCI")
+ "IXXAT PC-I 04/PCI, "
+ "ELCUS CAN-200-PCI")
MODULE_LICENSE("GPL v2");
#define PLX_PCI_MAX_CHAN 2
@@ -123,6 +123,11 @@ struct plx_pci_card {
#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200
#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501
+#define CAN200PCI_DEVICE_ID 0x9030
+#define CAN200PCI_VENDOR_ID 0x10b5
+#define CAN200PCI_SUB_DEVICE_ID 0x0301
+#define CAN200PCI_SUB_VENDOR_ID 0xe1c5
+
#define IXXAT_PCI_VENDOR_ID 0x10b5
#define IXXAT_PCI_DEVICE_ID 0x9050
#define IXXAT_PCI_SUB_SYS_ID 0x2540
@@ -234,6 +239,14 @@ static struct plx_pci_card_info plx_pci_card_info_cti = {
/* based on PLX9030 */
};
+static struct plx_pci_card_info plx_pci_card_info_elcus = {
+ "Eclus CAN-200-PCI", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {3, 0x00, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030 */
+};
+
static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
{
/* Adlink PCI-7841/cPCI-7841 */
@@ -319,6 +332,13 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
0, 0,
(kernel_ulong_t)&plx_pci_card_info_cti
},
+ {
+ /* Elcus CAN-200-PCI */
+ CAN200PCI_VENDOR_ID, CAN200PCI_DEVICE_ID,
+ CAN200PCI_SUB_VENDOR_ID, CAN200PCI_SUB_DEVICE_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_elcus
+ },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 06a282397fff..df136a2516c4 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 047accd4ede5..2f6e24534231 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 29f9b6321187..943df645b459 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 25377e547f9b..3fcdae266377 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -18,9 +18,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307. You can also get it
- * at http://www.gnu.org/licenses/gpl.html
+ * with this program; if not, see http://www.gnu.org/licenses/gpl.html
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index 498605f833dd..cdc0c7433a4b 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index b595d3422b9f..52fe50725d74 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/firmware.h>
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 6cd5c01b624d..9ea0dcde94ce 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -13,12 +13,10 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/io.h>
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 60d95b44d0f7..2c62fe6c8fa9 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -37,7 +37,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
@@ -518,10 +517,10 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
data = (cf->can_id & CAN_SFF_MASK) << 18;
hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
hecc_write_mbx(priv, mbxno, HECC_CANMDL,
- be32_to_cpu(*(u32 *)(cf->data)));
+ be32_to_cpu(*(__be32 *)(cf->data)));
if (cf->can_dlc > 4)
hecc_write_mbx(priv, mbxno, HECC_CANMDH,
- be32_to_cpu(*(u32 *)(cf->data + 4)));
+ be32_to_cpu(*(__be32 *)(cf->data + 4)));
else
*(u32 *)(cf->data + 4) = 0;
can_put_echo_skb(skb, ndev, mbxno);
@@ -569,12 +568,10 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
cf->can_id |= CAN_RTR_FLAG;
cf->can_dlc = get_can_dlc(data & 0xF);
data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
- *(u32 *)(cf->data) = cpu_to_be32(data);
+ *(__be32 *)(cf->data) = cpu_to_be32(data);
if (cf->can_dlc > 4) {
data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
- *(u32 *)(cf->data + 4) = cpu_to_be32(data);
- } else {
- *(u32 *)(cf->data + 4) = 0;
+ *(__be32 *)(cf->data + 4) = cpu_to_be32(data);
}
spin_lock_irqsave(&priv->mbx_lock, flags);
hecc_clear_bit(priv, HECC_CANME, mbx_mask);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 8aeec0b4601a..52c42fd49510 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -16,7 +16,6 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#include <linux/init.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index ac6177d3befc..7fbe85935f1d 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -16,7 +16,6 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#include <linux/init.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4b2d5ed62b11..e77d11049747 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -12,7 +12,6 @@
* Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
*/
-#include <linux/init.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -474,6 +473,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
return err;
dev->nchannels = msg.u.cardinfo.nchannels;
+ if (dev->nchannels > MAX_NET_DEVICES)
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 8becd3d838b5..a0fa1fd5092b 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -23,7 +23,6 @@
* who were very cooperative and answered my questions.
*/
-#include <linux/init.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0a2a5ee79a17..4e94057ef5cf 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -46,6 +46,7 @@
#include <linux/if_ether.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/slab.h>
#include <net/rtnetlink.h>
@@ -109,25 +110,23 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
stats->rx_packets++;
stats->rx_bytes += cfd->len;
}
- kfree_skb(skb);
+ consume_skb(skb);
return NETDEV_TX_OK;
}
/* perform standard echo handling for CAN network interfaces */
if (loop) {
- struct sock *srcsk = skb->sk;
- skb = skb_share_check(skb, GFP_ATOMIC);
+ skb = can_create_echo_skb(skb);
if (!skb)
return NETDEV_TX_OK;
/* receive with packet counting */
- skb->sk = srcsk;
vcan_rx(skb, dev);
} else {
/* no looped packets => no counting */
- kfree_skb(skb);
+ consume_skb(skb);
}
return NETDEV_TX_OK;
}
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index f219d38acf58..7a79b6046879 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -395,6 +395,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
if (duplicate_slave)
eql_kill_one_slave(queue, duplicate_slave);
+ dev_hold(slave->dev);
list_add(&slave->list, &queue->all_slaves);
queue->num_slaves++;
slave->dev->flags |= IFF_SLAVE;
@@ -413,39 +414,35 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *
if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
return -EFAULT;
- slave_dev = dev_get_by_name(&init_net, srq.slave_name);
- if (slave_dev) {
- if ((master_dev->flags & IFF_UP) == IFF_UP) {
- /* slave is not a master & not already a slave: */
- if (!eql_is_master(slave_dev) &&
- !eql_is_slave(slave_dev)) {
- slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
- equalizer_t *eql = netdev_priv(master_dev);
- int ret;
-
- if (!s) {
- dev_put(slave_dev);
- return -ENOMEM;
- }
-
- memset(s, 0, sizeof(*s));
- s->dev = slave_dev;
- s->priority = srq.priority;
- s->priority_bps = srq.priority;
- s->priority_Bps = srq.priority / 8;
-
- spin_lock_bh(&eql->queue.lock);
- ret = __eql_insert_slave(&eql->queue, s);
- if (ret) {
- dev_put(slave_dev);
- kfree(s);
- }
- spin_unlock_bh(&eql->queue.lock);
-
- return ret;
- }
+ slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
+ if (!slave_dev)
+ return -ENODEV;
+
+ if ((master_dev->flags & IFF_UP) == IFF_UP) {
+ /* slave is not a master & not already a slave: */
+ if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
+ slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ equalizer_t *eql = netdev_priv(master_dev);
+ int ret;
+
+ if (!s)
+ return -ENOMEM;
+
+ memset(s, 0, sizeof(*s));
+ s->dev = slave_dev;
+ s->priority = srq.priority;
+ s->priority_bps = srq.priority;
+ s->priority_Bps = srq.priority / 8;
+
+ spin_lock_bh(&eql->queue.lock);
+ ret = __eql_insert_slave(&eql->queue, s);
+ if (ret)
+ kfree(s);
+
+ spin_unlock_bh(&eql->queue.lock);
+
+ return ret;
}
- dev_put(slave_dev);
}
return -EINVAL;
@@ -461,24 +458,20 @@ static int eql_emancipate(struct net_device *master_dev, slaving_request_t __use
if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
return -EFAULT;
- slave_dev = dev_get_by_name(&init_net, srq.slave_name);
- ret = -EINVAL;
- if (slave_dev) {
- spin_lock_bh(&eql->queue.lock);
-
- if (eql_is_slave(slave_dev)) {
- slave_t *slave = __eql_find_slave_dev(&eql->queue,
- slave_dev);
+ slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
+ if (!slave_dev)
+ return -ENODEV;
- if (slave) {
- eql_kill_one_slave(&eql->queue, slave);
- ret = 0;
- }
+ ret = -EINVAL;
+ spin_lock_bh(&eql->queue.lock);
+ if (eql_is_slave(slave_dev)) {
+ slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev);
+ if (slave) {
+ eql_kill_one_slave(&eql->queue, slave);
+ ret = 0;
}
- dev_put(slave_dev);
-
- spin_unlock_bh(&eql->queue.lock);
}
+ spin_unlock_bh(&eql->queue.lock);
return ret;
}
@@ -494,7 +487,7 @@ static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
return -EFAULT;
- slave_dev = dev_get_by_name(&init_net, sc.slave_name);
+ slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
if (!slave_dev)
return -ENODEV;
@@ -510,8 +503,6 @@ static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
}
spin_unlock_bh(&eql->queue.lock);
- dev_put(slave_dev);
-
if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
ret = -EFAULT;
@@ -529,7 +520,7 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
return -EFAULT;
- slave_dev = dev_get_by_name(&init_net, sc.slave_name);
+ slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
if (!slave_dev)
return -ENODEV;
@@ -548,8 +539,6 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
}
spin_unlock_bh(&eql->queue.lock);
- dev_put(slave_dev);
-
return ret;
}
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index ede8daa68275..c53384d41c96 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -252,8 +252,7 @@ static int el3_isa_id_sequence(__be16 *phys_addr)
for (i = 0; i < el3_cards; i++) {
struct el3_private *lp = netdev_priv(el3_devs[i]);
if (lp->type == EL3_PNP &&
- !memcmp(phys_addr, el3_devs[i]->dev_addr,
- ETH_ALEN)) {
+ ether_addr_equal((u8 *)phys_addr, el3_devs[i]->dev_addr)) {
if (el3_debug > 3)
pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
phys_addr[0] & 0xff, phys_addr[0] >> 8,
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 6fc994fa4abe..b9948f00c5e9 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -73,7 +73,6 @@ earlier 3Com products.
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 078480aaa168..5992860a39c9 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -25,7 +25,6 @@
#define DRV_VERSION "1.162-ac"
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ad5272b348f0..238ccea965c8 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -693,7 +693,7 @@ DEFINE_WINDOW_IO(16)
DEFINE_WINDOW_IO(32)
#ifdef CONFIG_PCI
-#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
+#define DEVICE_PCI(dev) ((dev_is_pci(dev)) ? to_pci_dev((dev)) : NULL)
#else
#define DEVICE_PCI(dev) NULL
#endif
@@ -2079,10 +2079,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
iowrite16(len, ioaddr + Wn7_MasterLen);
spin_unlock_irq(&vp->window_lock);
vp->tx_skb = skb;
+ skb_tx_timestamp(skb);
iowrite16(StartDMADown, ioaddr + EL3_CMD);
/* netif_wake_queue() will be called at the DMADone interrupt. */
} else {
/* ... and the packet rounded to a doubleword. */
+ skb_tx_timestamp(skb);
iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
dev_kfree_skb (skb);
if (ioread16(ioaddr + TxFree) > 1536) {
@@ -2212,6 +2214,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
#endif
}
+ skb_tx_timestamp(skb);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
return NETDEV_TX_OK;
@@ -2986,6 +2989,7 @@ static const struct ethtool_ops vortex_ethtool_ops = {
.nway_reset = vortex_nway_reset,
.get_wol = vortex_get_wol,
.set_wol = vortex_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
};
#ifdef CONFIG_PCI
@@ -3290,7 +3294,6 @@ static int __init vortex_init(void)
static void __exit vortex_eisa_cleanup(void)
{
- struct vortex_private *vp;
void __iomem *ioaddr;
#ifdef CONFIG_EISA
@@ -3299,7 +3302,6 @@ static void __exit vortex_eisa_cleanup(void)
#endif
if (compaq_net_device) {
- vp = netdev_priv(compaq_net_device);
ioaddr = ioport_map(compaq_net_device->base_addr,
VORTEX_TOTAL_SIZE);
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 2923c51bb351..3e2f2c2e7b58 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -21,12 +21,6 @@ struct e8390_pkt_hdr {
unsigned short count; /* header + packet length in bytes */
};
-#ifdef notdef
-extern int ei_debug;
-#else
-#define ei_debug 1
-#endif
-
#ifdef CONFIG_NET_POLL_CONTROLLER
void ei_poll(struct net_device *dev);
void eip_poll(struct net_device *dev);
@@ -99,6 +93,7 @@ struct ei_device {
u32 *reg_offset; /* Register mapping table */
spinlock_t page_lock; /* Page register locks */
unsigned long priv; /* Private field to store bus IDs etc. */
+ u32 msg_enable; /* debug message level */
#ifdef AX88796_PLATFORM
unsigned char rxcr_base; /* default value for RXCR */
#endif
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 912ed7a5f33a..30104b60da85 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -116,9 +116,15 @@ static const char version[] =
static int apne_owned; /* signal if card already owned */
+static u32 apne_msg_enable;
+module_param_named(msg_enable, apne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
struct net_device * __init apne_probe(int unit)
{
struct net_device *dev;
+ struct ei_device *ei_local;
+
#ifndef MANUAL_CONFIG
char tuple[8];
#endif
@@ -133,11 +139,11 @@ struct net_device * __init apne_probe(int unit)
if ( !(AMIGAHW_PRESENT(PCMCIA)) )
return ERR_PTR(-ENODEV);
- printk("Looking for PCMCIA ethernet card : ");
+ pr_info("Looking for PCMCIA ethernet card : ");
/* check if a card is inserted */
if (!(PCMCIA_INSERTED)) {
- printk("NO PCMCIA card inserted\n");
+ pr_cont("NO PCMCIA card inserted\n");
return ERR_PTR(-ENODEV);
}
@@ -148,6 +154,8 @@ struct net_device * __init apne_probe(int unit)
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
}
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = apne_msg_enable;
/* disable pcmcia irq for readtuple */
pcmcia_disable_irq();
@@ -155,14 +163,14 @@ struct net_device * __init apne_probe(int unit)
#ifndef MANUAL_CONFIG
if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) ||
(tuple[2] != CISTPL_FUNCID_NETWORK)) {
- printk("not an ethernet card\n");
+ pr_cont("not an ethernet card\n");
/* XXX: shouldn't we re-enable irq here? */
free_netdev(dev);
return ERR_PTR(-ENODEV);
}
#endif
- printk("ethernet PCMCIA card inserted\n");
+ pr_cont("ethernet PCMCIA card inserted\n");
if (!init_pcmcia()) {
/* XXX: shouldn't we re-enable irq here? */
@@ -205,10 +213,10 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
#endif
static unsigned version_printed;
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
- printk("PCMCIA NE*000 ethercard probe");
+ netdev_info(dev, "PCMCIA NE*000 ethercard probe");
/* Reset card. Who knows what dain-bramaged state it was left in. */
{ unsigned long reset_start_time = jiffies;
@@ -217,7 +225,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(" not found (no reset ack).\n");
+ pr_cont(" not found (no reset ack).\n");
return -ENODEV;
}
@@ -288,7 +296,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
start_page = 0x01;
stop_page = (wordlength == 2) ? 0x40 : 0x20;
} else {
- printk(" not found.\n");
+ pr_cont(" not found.\n");
return -ENXIO;
}
@@ -320,9 +328,9 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
- printk(" %pM\n", dev->dev_addr);
+ pr_cont(" %pM\n", dev->dev_addr);
- printk("%s: %s found.\n", dev->name, name);
+ netdev_info(dev, "%s found.\n", name);
ei_status.name = name;
ei_status.tx_start_page = start_page;
@@ -352,10 +360,11 @@ static void
apne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
init_pcmcia();
- if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -365,8 +374,8 @@ apne_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk("%s: ne_reset_8390() did not complete.\n", dev->name);
- break;
+ netdev_err(dev, "ne_reset_8390() did not complete.\n");
+ break;
}
outb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */
}
@@ -386,9 +395,9 @@ apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_pa
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d][intr:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
@@ -433,9 +442,9 @@ apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int rin
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d][intr:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
@@ -481,9 +490,9 @@ apne_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d][intr:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ netdev_err(dev, "DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
@@ -513,7 +522,7 @@ apne_block_output(struct net_device *dev, int count,
while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
apne_reset_8390(dev);
NS8390_init(dev,1);
break;
@@ -536,8 +545,8 @@ static irqreturn_t apne_interrupt(int irq, void *dev_id)
pcmcia_ack_int(pcmcia_intreq);
return IRQ_NONE;
}
- if (ei_debug > 3)
- printk("pcmcia intreq = %x\n", pcmcia_intreq);
+ if (apne_msg_enable & NETIF_MSG_INTR)
+ pr_debug("pcmcia intreq = %x\n", pcmcia_intreq);
pcmcia_disable_irq(); /* to get rid of the sti() within ei_interrupt */
ei_interrupt(irq, dev_id);
pcmcia_ack_int(pcmcia_get_intreq());
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 36fa577970bb..455d4c399b52 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/isapnp.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -78,6 +77,8 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
#define AX_GPOC_PPDSET BIT(6)
+static u32 ax_msg_enable;
+
/* device private data */
struct ax_device {
@@ -147,8 +148,7 @@ static void ax_reset_8390(struct net_device *dev)
unsigned long reset_start_time = jiffies;
void __iomem *addr = (void __iomem *)dev->base_addr;
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
@@ -496,12 +496,28 @@ static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return phy_ethtool_sset(phy_dev, cmd);
}
+static u32 ax_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void ax_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops ax_ethtool_ops = {
.get_drvinfo = ax_get_drvinfo,
.get_settings = ax_get_settings,
.set_settings = ax_set_settings,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_msglevel = ax_get_msglevel,
+ .set_msglevel = ax_set_msglevel,
};
#ifdef CONFIG_AX88796_93CX6
@@ -763,6 +779,7 @@ static int ax_init_dev(struct net_device *dev)
ei_local->block_output = &ax_block_output;
ei_local->get_8390_hdr = &ax_get_8390_hdr;
ei_local->priv = 0;
+ ei_local->msg_enable = ax_msg_enable;
dev->netdev_ops = &ax_netdev_ops;
dev->ethtool_ops = &ax_ethtool_ops;
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index d801c1410fb0..73c57a4a7b9e 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/timer.h>
@@ -105,6 +104,7 @@ static void AX88190_init(struct net_device *dev, int startp);
static int ax_open(struct net_device *dev);
static int ax_close(struct net_device *dev);
static irqreturn_t ax_interrupt(int irq, void *dev_id);
+static u32 axnet_msg_enable;
/*====================================================================*/
@@ -152,6 +152,7 @@ static int axnet_probe(struct pcmcia_device *link)
return -ENOMEM;
ei_local = netdev_priv(dev);
+ ei_local->msg_enable = axnet_msg_enable;
spin_lock_init(&ei_local->page_lock);
info = PRIV(dev);
@@ -650,11 +651,12 @@ static void block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
unsigned int nic_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
int xfer_count = count;
char *buf = skb->data;
- if ((ei_debug > 4) && (count != 4))
- pr_debug("%s: [bi=%d]\n", dev->name, count+4);
+ if ((netif_msg_rx_status(ei_local)) && (count != 4))
+ netdev_dbg(dev, "[bi=%d]\n", count+4);
outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
@@ -810,11 +812,6 @@ module_pcmcia_driver(axnet_cs_driver);
#define ei_block_input (ei_local->block_input)
#define ei_get_8390_hdr (ei_local->get_8390_hdr)
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
/* Index to functions. */
static void ei_tx_intr(struct net_device *dev);
static void ei_tx_err(struct net_device *dev);
@@ -925,11 +922,10 @@ static void axnet_tx_timeout(struct net_device *dev)
isr = inb(e8390_base+EN0_ISR);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
- netdev_printk(KERN_DEBUG, dev,
- "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
- (txsr & ENTSR_ABT) ? "excess collisions." :
- (isr) ? "lost interrupt?" : "cable problem?",
- txsr, isr, tickssofar);
+ netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?",
+ txsr, isr, tickssofar);
if (!isr && !dev->stats.tx_packets)
{
@@ -998,29 +994,30 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
{
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
- if (ei_debug && ei_local->tx2 > 0)
- netdev_printk(KERN_DEBUG, dev,
- "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
- ei_local->tx2, ei_local->lasttx,
- ei_local->txing);
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx2 > 0)
+ netdev_dbg(dev,
+ "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx2, ei_local->lasttx,
+ ei_local->txing);
}
else if (ei_local->tx2 == 0)
{
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
- if (ei_debug && ei_local->tx1 > 0)
- netdev_printk(KERN_DEBUG, dev,
- "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
- ei_local->tx1, ei_local->lasttx,
- ei_local->txing);
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx1 > 0)
+ netdev_dbg(dev,
+ "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx1, ei_local->lasttx,
+ ei_local->txing);
}
else
{ /* We should never get here. */
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- ei_local->tx1, ei_local->tx2,
- ei_local->lasttx);
+ netif_dbg(ei_local, tx_err, dev,
+ "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2,
+ ei_local->lasttx);
ei_local->irqlock = 0;
netif_stop_queue(dev);
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -1124,10 +1121,9 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&ei_local->page_lock, flags);
return IRQ_NONE;
}
-
- if (ei_debug > 3)
- netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n",
- inb_p(e8390_base + EN0_ISR));
+
+ netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
+ inb_p(e8390_base + EN0_ISR));
outb_p(0x00, e8390_base + EN0_ISR);
ei_local->irqlock = 1;
@@ -1137,9 +1133,8 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
++nr_serviced < MAX_SERVICE)
{
if (!netif_running(dev) || (interrupts == 0xff)) {
- if (ei_debug > 1)
- netdev_warn(dev,
- "interrupt from stopped card\n");
+ netif_warn(ei_local, intr, dev,
+ "interrupt from stopped card\n");
outb_p(interrupts, e8390_base + EN0_ISR);
interrupts = 0;
break;
@@ -1175,14 +1170,15 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
}
}
- if (interrupts && ei_debug > 3)
+ if (interrupts && (netif_msg_intr(ei_local)))
{
handled = 1;
if (nr_serviced >= MAX_SERVICE)
{
/* 0xFF is valid for a card removal */
- if(interrupts!=0xFF)
- netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
+ if (interrupts != 0xFF)
+ netdev_warn(dev,
+ "Too much work at interrupt, status %#2.2x\n",
interrupts);
outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
} else {
@@ -1221,8 +1217,7 @@ static void ei_tx_err(struct net_device *dev)
unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
#ifdef VERBOSE_ERROR_DUMP
- netdev_printk(KERN_DEBUG, dev,
- "transmitter error (%#2x):", txsr);
+ netdev_dbg(dev, "transmitter error (%#2x):", txsr);
if (txsr & ENTSR_ABT)
pr_cont(" excess-collisions");
if (txsr & ENTSR_ND)
@@ -1287,9 +1282,9 @@ static void ei_tx_intr(struct net_device *dev)
else if (ei_local->tx2 < 0)
{
if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
- netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
- ei_local->name, ei_local->lasttx,
- ei_local->tx2);
+ netdev_err(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
+ ei_local->name, ei_local->lasttx,
+ ei_local->tx2);
ei_local->tx2 = 0;
if (ei_local->tx1 > 0)
{
@@ -1366,9 +1361,11 @@ static void ei_receive(struct net_device *dev)
Keep quiet if it looks like a card removal. One problem here
is that some clones crash in roughly the same way.
*/
- if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
- netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
- this_frame, ei_local->current_page);
+ if ((netif_msg_rx_err(ei_local)) &&
+ this_frame != ei_local->current_page &&
+ (this_frame != 0x0 || rxing_page != 0xFF))
+ netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
break; /* Done for now */
@@ -1383,11 +1380,10 @@ static void ei_receive(struct net_device *dev)
if (pkt_len < 60 || pkt_len > 1518)
{
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
- rx_frame.count, rx_frame.status,
- rx_frame.next);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
}
@@ -1398,10 +1394,9 @@ static void ei_receive(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL)
{
- if (ei_debug > 1)
- netdev_printk(KERN_DEBUG, dev,
- "Couldn't allocate a sk_buff of size %d\n",
- pkt_len);
+ netif_err(ei_local, rx_err, dev,
+ "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
dev->stats.rx_dropped++;
break;
}
@@ -1420,11 +1415,10 @@ static void ei_receive(struct net_device *dev)
}
else
{
- if (ei_debug)
- netdev_printk(KERN_DEBUG, dev,
- "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- rx_frame.status, rx_frame.next,
- rx_frame.count);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
dev->stats.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
if (pkt_stat & ENRSR_FO)
@@ -1461,6 +1455,7 @@ static void ei_rx_overrun(struct net_device *dev)
axnet_dev_t *info = PRIV(dev);
long e8390_base = dev->base_addr;
unsigned char was_txing, must_resend = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
/*
* Record whether a Tx was in progress and then issue the
@@ -1468,9 +1463,8 @@ static void ei_rx_overrun(struct net_device *dev)
*/
was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
-
- if (ei_debug > 1)
- netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n");
+
+ netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
dev->stats.rx_over_errors++;
/*
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 78c6fb4b1143..b36ee9e0d220 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -56,18 +56,15 @@
#define ei_inb_p(_p) readb((void __iomem *)_p)
#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p)
-#define NET_DEBUG 0
-#define DEBUG_INIT 2
-
#define DRV_NAME "etherh"
#define DRV_VERSION "1.11"
-static char version[] __initdata =
+static char version[] =
"EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
#include "lib8390.c"
-static unsigned int net_debug = NET_DEBUG;
+static u32 etherh_msg_enable;
struct etherh_priv {
void __iomem *ioc_fast;
@@ -317,9 +314,9 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -361,8 +358,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
- dev->name);
+ netdev_warn(dev, "timeout waiting for TX RDC\n");
etherh_reset (dev);
__NS8390_init (dev, 1);
break;
@@ -383,9 +379,9 @@ etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -423,9 +419,9 @@ etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_p
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: "
- " DMAstat %d irqlock %d\n", dev->name,
- ei_local->dmaing, ei_local->irqlock);
+ netdev_err(dev, "DMAing conflict in etherh_get_header: "
+ " DMAstat %d irqlock %d\n",
+ ei_local->dmaing, ei_local->irqlock);
return;
}
@@ -513,8 +509,8 @@ static void __init etherh_banner(void)
{
static int version_printed;
- if (net_debug && version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ if ((etherh_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ pr_info("%s", version);
}
/*
@@ -625,11 +621,27 @@ static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
+static u32 etherh_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void etherh_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops etherh_ethtool_ops = {
.get_settings = etherh_get_settings,
.set_settings = etherh_set_settings,
.get_drvinfo = etherh_get_drvinfo,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_msglevel = etherh_get_msglevel,
+ .set_msglevel = etherh_set_msglevel,
};
static const struct net_device_ops etherh_netdev_ops = {
@@ -746,6 +758,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
ei_local->block_output = etherh_block_output;
ei_local->get_8390_hdr = etherh_get_header;
ei_local->interface_num = 0;
+ ei_local->msg_enable = etherh_msg_enable;
etherh_reset(dev);
__NS8390_init(dev, 0);
@@ -754,8 +767,8 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
if (ret)
goto free;
- printk(KERN_INFO "%s: %s in slot %d, %pM\n",
- dev->name, data->name, ec->slot_no, dev->dev_addr);
+ netdev_info(dev, "%s in slot %d, %pM\n",
+ data->name, ec->slot_no, dev->dev_addr);
ecard_set_drvdata(ec, dev);
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index f615fdec0f1b..0fe19d609c2e 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -66,6 +66,7 @@ static void hydra_block_input(struct net_device *dev, int count,
static void hydra_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void hydra_remove_one(struct zorro_dev *z);
+static u32 hydra_msg_enable;
static struct zorro_device_id hydra_zorro_tbl[] = {
{ ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
@@ -119,6 +120,7 @@ static int hydra_init(struct zorro_dev *z)
int start_page, stop_page;
int j;
int err;
+ struct ei_device *ei_local;
static u32 hydra_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
@@ -137,6 +139,8 @@ static int hydra_init(struct zorro_dev *z)
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = hydra_msg_enable;
dev->base_addr = ioaddr;
dev->irq = IRQ_AMIGA_PORTS;
@@ -187,15 +191,16 @@ static int hydra_open(struct net_device *dev)
static int hydra_close(struct net_device *dev)
{
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
__ei_close(dev);
return 0;
}
static void hydra_reset_8390(struct net_device *dev)
{
- printk(KERN_INFO "Hydra hw reset not there\n");
+ netdev_info(dev, "Hydra hw reset not there\n");
}
static void hydra_get_8390_hdr(struct net_device *dev,
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index b329f5c0d62b..d2cd80444ade 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -99,11 +99,6 @@
#define ei_block_input (ei_local->block_input)
#define ei_get_8390_hdr (ei_local->get_8390_hdr)
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
/* Index to functions. */
static void ei_tx_intr(struct net_device *dev);
static void ei_tx_err(struct net_device *dev);
@@ -116,6 +111,11 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
static void do_set_multicast_list(struct net_device *dev);
static void __NS8390_init(struct net_device *dev, int startp);
+static unsigned version_printed;
+static u32 msg_enable;
+module_param(msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
/*
* SMP and the 8390 setup.
*
@@ -345,19 +345,23 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
if (ei_local->tx1 == 0) {
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
- if (ei_debug && ei_local->tx2 > 0)
- netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx2 > 0)
+ netdev_dbg(dev,
+ "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
ei_local->tx2, ei_local->lasttx, ei_local->txing);
} else if (ei_local->tx2 == 0) {
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
- if (ei_debug && ei_local->tx1 > 0)
- netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ if ((netif_msg_tx_queued(ei_local)) &&
+ ei_local->tx1 > 0)
+ netdev_dbg(dev,
+ "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
ei_local->tx1, ei_local->lasttx, ei_local->txing);
} else { /* We should never get here. */
- if (ei_debug)
- netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ netif_dbg(ei_local, tx_err, dev,
+ "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2, ei_local->lasttx);
ei_local->irqlock = 0;
netif_stop_queue(dev);
ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -388,7 +392,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
} else
ei_local->txqueue++;
- if (ei_local->tx1 && ei_local->tx2)
+ if (ei_local->tx1 && ei_local->tx2)
netif_stop_queue(dev);
else
netif_start_queue(dev);
@@ -445,9 +449,8 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
/* Change to page 0 and read the intr status reg. */
ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
- if (ei_debug > 3)
- netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
- ei_inb_p(e8390_base + EN0_ISR));
+ netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
+ ei_inb_p(e8390_base + EN0_ISR));
/* !!Assumption!! -- we stay in page 0. Don't break this. */
while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
@@ -485,7 +488,7 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
}
- if (interrupts && ei_debug) {
+ if (interrupts && (netif_msg_intr(ei_local))) {
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
if (nr_serviced >= MAX_SERVICE) {
/* 0xFF is valid for a card removal */
@@ -676,10 +679,11 @@ static void ei_receive(struct net_device *dev)
Keep quiet if it looks like a card removal. One problem here
is that some clones crash in roughly the same way.
*/
- if (ei_debug > 0 &&
+ if ((netif_msg_rx_status(ei_local)) &&
this_frame != ei_local->current_page &&
(this_frame != 0x0 || rxing_page != 0xFF))
- netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ netdev_err(dev,
+ "mismatched read page pointers %2x vs %2x\n",
this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
@@ -707,10 +711,10 @@ static void ei_receive(struct net_device *dev)
}
if (pkt_len < 60 || pkt_len > 1518) {
- if (ei_debug)
- netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
- rx_frame.count, rx_frame.status,
- rx_frame.next);
+ netif_dbg(ei_local, rx_status, dev,
+ "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
@@ -718,9 +722,9 @@ static void ei_receive(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- if (ei_debug > 1)
- netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
- pkt_len);
+ netif_err(ei_local, rx_err, dev,
+ "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
dev->stats.rx_dropped++;
break;
} else {
@@ -736,10 +740,10 @@ static void ei_receive(struct net_device *dev)
dev->stats.multicast++;
}
} else {
- if (ei_debug)
- netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- rx_frame.status, rx_frame.next,
- rx_frame.count);
+ netif_err(ei_local, rx_err, dev,
+ "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
dev->stats.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
if (pkt_stat & ENRSR_FO)
@@ -789,8 +793,7 @@ static void ei_rx_overrun(struct net_device *dev)
was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
- if (ei_debug > 1)
- netdev_dbg(dev, "Receiver overrun\n");
+ netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
dev->stats.rx_over_errors++;
/*
@@ -965,8 +968,9 @@ static void __ei_set_multicast_list(struct net_device *dev)
static void ethdev_setup(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk(version);
+
+ if ((msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ pr_info("%s", version);
ether_setup(dev);
@@ -1035,9 +1039,10 @@ static void __NS8390_init(struct net_device *dev, int startp)
ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
for (i = 0; i < 6; i++) {
ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
- if (ei_debug > 1 &&
+ if ((netif_msg_probe(ei_local)) &&
ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
- netdev_err(dev, "Hw. address read/write mismap %d\n", i);
+ netdev_err(dev,
+ "Hw. address read/write mismap %d\n", i);
}
ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 88ccc8b14f0a..90e825e8abfe 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -167,6 +167,7 @@ static void slow_sane_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
+static u32 mac8390_msg_enable;
static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
{
@@ -402,6 +403,7 @@ struct net_device * __init mac8390_probe(int unit)
struct net_device *dev;
struct nubus_dev *ndev = NULL;
int err = -ENODEV;
+ struct ei_device *ei_local;
static unsigned int slots;
@@ -440,6 +442,10 @@ struct net_device * __init mac8390_probe(int unit)
if (!ndev)
goto out;
+
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = mac8390_msg_enable;
+
err = register_netdev(dev);
if (err)
goto out;
@@ -660,19 +666,22 @@ static int mac8390_close(struct net_device *dev)
static void mac8390_no_reset(struct net_device *dev)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
ei_status.txing = 0;
- if (ei_debug > 1)
- pr_info("reset not supported\n");
+ netif_info(ei_local, hw, dev, "reset not supported\n");
}
static void interlan_reset(struct net_device *dev)
{
unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
- if (ei_debug > 1)
- pr_info("Need to reset the NS8390 t=%lu...", jiffies);
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_info(ei_local, hw, dev, "Need to reset the NS8390 t=%lu...",
+ jiffies);
ei_status.txing = 0;
target[0xC0000] = 0;
- if (ei_debug > 1)
+ if (netif_msg_hw(ei_local))
pr_cont("reset complete\n");
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 230efd6fa5d5..38fcdcf7c4c7 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -39,6 +38,7 @@ static const char version[] =
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+static u32 mcf8390_msg_enable;
#ifdef NE2000_ODDOFFSET
/*
@@ -153,9 +153,9 @@ static void mcf8390_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
u32 addr = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting the 8390 t=%ld...\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
@@ -288,7 +288,7 @@ static void mcf8390_block_output(struct net_device *dev, int count,
dma_start = jiffies;
while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RDC) == 0) {
if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
- netdev_err(dev, "timeout waiting for Tx RDC\n");
+ netdev_warn(dev, "timeout waiting for Tx RDC\n");
mcf8390_reset_8390(dev);
__NS8390_init(dev, 1);
break;
@@ -437,6 +437,7 @@ static int mcf8390_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
ei_local = netdev_priv(dev);
+ ei_local->msg_enable = mcf8390_msg_enable;
dev->irq = irq->start;
dev->base_addr = mem->start;
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index b2e840513735..58eaa8f34942 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -71,14 +71,17 @@ static struct platform_device *pdev_ne[MAX_NE_CARDS];
static int io[MAX_NE_CARDS];
static int irq[MAX_NE_CARDS];
static int bad[MAX_NE_CARDS];
+static u32 ne_msg_enable;
#ifdef MODULE
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(bad, int, NULL, 0);
+module_param_named(msg_enable, ne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es),required");
MODULE_PARM_DESC(irq, "IRQ number(s)");
MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver");
MODULE_LICENSE("GPL");
#endif /* MODULE */
@@ -214,8 +217,8 @@ static int __init do_ne_probe(struct net_device *dev)
if (base_addr > 0x1ff) { /* Check a single specified location. */
int ret = ne_probe1(dev, base_addr);
if (ret)
- printk(KERN_WARNING "ne.c: No NE*000 card found at "
- "i/o = %#lx\n", base_addr);
+ netdev_warn(dev, "ne.c: No NE*000 card found at "
+ "i/o = %#lx\n", base_addr);
return ret;
}
else if (base_addr != 0) /* Don't probe at all. */
@@ -264,11 +267,14 @@ static int __init ne_probe_isapnp(struct net_device *dev)
/* found it */
dev->base_addr = pnp_port_start(idev, 0);
dev->irq = pnp_irq(idev, 0);
- printk(KERN_INFO "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
- (char *) isapnp_clone_list[i].driver_data,
- dev->base_addr, dev->irq);
+ netdev_info(dev,
+ "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) isapnp_clone_list[i].driver_data,
+ dev->base_addr, dev->irq);
if (ne_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
- printk(KERN_ERR "ne.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
+ netdev_err(dev,
+ "ne.c: Probe of ISAPnP card at %#lx failed.\n",
+ dev->base_addr);
pnp_device_detach(idev);
return -ENXIO;
}
@@ -293,6 +299,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
int neX000, ctron, copam, bad_card;
int reg0, ret;
static unsigned version_printed;
+ struct ei_device *ei_local = netdev_priv(dev);
if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
return -EBUSY;
@@ -319,10 +326,10 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
}
- if (ei_debug && version_printed++ == 0)
- printk(KERN_INFO "%s%s", version1, version2);
+ if ((ne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, "%s%s", version1, version2);
- printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr);
+ netdev_info(dev, "NE*000 ethercard probe at %#3lx:", ioaddr);
/* A user with a poor card that fails to ack the reset, or that
does not have a valid 0x57,0x57 signature can still use this
@@ -343,10 +350,10 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
if (bad_card) {
- printk(" (warning: no reset ack)");
+ pr_cont(" (warning: no reset ack)");
break;
} else {
- printk(" not found (no reset ack).\n");
+ pr_cont(" not found (no reset ack).\n");
ret = -ENODEV;
goto err_out;
}
@@ -454,13 +461,13 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
if (bad_clone_list[i].name8 == NULL)
{
- printk(" not found (invalid signature %2.2x %2.2x).\n",
+ pr_cont(" not found (invalid signature %2.2x %2.2x).\n",
SA_prom[14], SA_prom[15]);
ret = -ENXIO;
goto err_out;
}
#else
- printk(" not found.\n");
+ pr_cont(" not found.\n");
ret = -ENXIO;
goto err_out;
#endif
@@ -476,15 +483,15 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
mdelay(10); /* wait 10ms for interrupt to propagate */
outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
dev->irq = probe_irq_off(cookie);
- if (ei_debug > 2)
- printk(" autoirq is %d\n", dev->irq);
+ if (netif_msg_probe(ei_local))
+ pr_cont(" autoirq is %d", dev->irq);
} else if (dev->irq == 2)
/* Fixup for users that don't know that IRQ 2 is really IRQ 9,
or don't know which one to set. */
dev->irq = 9;
if (! dev->irq) {
- printk(" failed to detect IRQ line.\n");
+ pr_cont(" failed to detect IRQ line.\n");
ret = -EAGAIN;
goto err_out;
}
@@ -493,7 +500,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
share and the board will usually be enabled. */
ret = request_irq(dev->irq, eip_interrupt, 0, name, dev);
if (ret) {
- printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ pr_cont(" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
goto err_out;
}
@@ -512,7 +519,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
#endif
- printk("%pM\n", dev->dev_addr);
+ pr_cont("%pM\n", dev->dev_addr);
ei_status.name = name;
ei_status.tx_start_page = start_page;
@@ -536,11 +543,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
dev->netdev_ops = &eip_netdev_ops;
NS8390p_init(dev, 0);
+ ei_local->msg_enable = ne_msg_enable;
ret = register_netdev(dev);
if (ret)
goto out_irq;
- printk(KERN_INFO "%s: %s found at %#lx, using IRQ %d.\n",
- dev->name, name, ioaddr, dev->irq);
+ netdev_info(dev, "%s found at %#lx, using IRQ %d.\n",
+ name, ioaddr, dev->irq);
return 0;
out_irq:
@@ -556,9 +564,9 @@ err_out:
static void ne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
/* DON'T change these to inb_p/outb_p or reset will fail on clones. */
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -569,7 +577,7 @@ static void ne_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
+ netdev_err(dev, "ne_reset_8390() did not complete.\n");
break;
}
outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
@@ -587,9 +595,9 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -621,6 +629,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
{
#ifdef NE_SANITY_CHECK
int xfer_count = count;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
int nic_base = dev->base_addr;
char *buf = skb->data;
@@ -628,9 +637,9 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -660,7 +669,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
this message you either 1) have a slightly incompatible clone
or 2) have noise/speed problems with your bus. */
- if (ei_debug > 1)
+ if (netif_msg_rx_status(ei_local))
{
/* DMA termination address check... */
int addr, tries = 20;
@@ -674,9 +683,9 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
break;
} while (--tries > 0);
if (tries <= 0)
- printk(KERN_WARNING "%s: RX transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
+ netdev_warn(dev, "RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ ring_offset + xfer_count, addr);
}
#endif
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -690,6 +699,7 @@ static void ne_block_output(struct net_device *dev, int count,
unsigned long dma_start;
#ifdef NE_SANITY_CHECK
int retries = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
/* Round the count up for word writes. Do we need to do this?
@@ -702,9 +712,9 @@ static void ne_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
{
- printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -751,7 +761,7 @@ retry:
/* This was for the ALPHA version only, but enough people have
been encountering problems so it is still here. */
- if (ei_debug > 1)
+ if (netif_msg_tx_queued(ei_local))
{
/* DMA termination address check... */
int addr, tries = 20;
@@ -765,9 +775,9 @@ retry:
if (tries <= 0)
{
- printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, (start_page << 8) + count, addr);
+ netdev_warn(dev, "Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ (start_page << 8) + count, addr);
if (retries++ == 0)
goto retry;
}
@@ -776,7 +786,7 @@ retry:
while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ne_reset_8390(dev);
NS8390p_init(dev, 1);
break;
@@ -936,8 +946,8 @@ int __init init_module(void)
retval = platform_driver_probe(&ne_driver, ne_drv_probe);
if (retval) {
if (io[0] == 0)
- printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\""
- " value(s) for ISA cards.\n");
+ pr_notice("ne.c: You must supply \"io=0xNNN\""
+ " value(s) for ISA cards.\n");
ne_loop_rm_unreg(1);
return retval;
}
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index fc14a85e4d5f..f395c967262e 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -33,8 +33,6 @@
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
-static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
-
#define MAX_UNITS 8 /* More are supported, limit only on options */
/* Used to pass the full-duplex flag, etc. */
static int full_duplex[MAX_UNITS];
@@ -60,6 +58,8 @@ static int options[MAX_UNITS];
#include "8390.h"
+static u32 ne2k_msg_enable;
+
/* These identify the driver base version and may not be removed. */
static const char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
@@ -76,10 +76,10 @@ MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
MODULE_DESCRIPTION("PCI NE2000 clone driver");
MODULE_LICENSE("GPL");
-module_param(debug, int, 0);
+module_param_named(msg_enable, ne2k_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
-MODULE_PARM_DESC(debug, "debug level (1-2)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_PARM_DESC(options, "Bit 5: full duplex");
MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
@@ -226,6 +226,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
static unsigned int fnd_cnt;
long ioaddr;
int flags = pci_clone_list[chip_idx].flags;
+ struct ei_device *ei_local;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -280,6 +281,8 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
goto err_out_free_res;
}
dev->netdev_ops = &ne2k_netdev_ops;
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = ne2k_msg_enable;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -379,9 +382,9 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
if (i)
goto err_out_free_netdev;
- printk("%s: %s found at %#lx, IRQ %d, %pM.\n",
- dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq,
- dev->dev_addr);
+ netdev_info(dev, "%s found at %#lx, IRQ %d, %pM.\n",
+ pci_clone_list[chip_idx].name, ioaddr, dev->irq,
+ dev->dev_addr);
return 0;
@@ -450,9 +453,10 @@ static int ne2k_pci_close(struct net_device *dev)
static void ne2k_pci_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (debug > 1) printk("%s: Resetting the 8390 t=%ld...",
- dev->name, jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n",
+ jiffies);
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -462,7 +466,7 @@ static void ne2k_pci_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
if (jiffies - reset_start_time > 2) {
- printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name);
+ netdev_err(dev, "ne2k_pci_reset_8390() did not complete.\n");
break;
}
outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
@@ -479,9 +483,9 @@ static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr "
+ netdev_err(dev, "DMAing conflict in ne2k_pci_get_8390_hdr "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -517,9 +521,9 @@ static void ne2k_pci_block_input(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_block_input "
+ netdev_err(dev, "DMAing conflict in ne2k_pci_block_input "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -572,9 +576,9 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk("%s: DMAing conflict in ne2k_pci_block_output."
+ netdev_err(dev, "DMAing conflict in ne2k_pci_block_output."
"[DMAstat:%d][irqlock:%d]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -619,7 +623,7 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ne2k_pci_reset_8390(dev);
NS8390_init(dev,1);
break;
@@ -640,8 +644,24 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
+static u32 ne2k_pci_get_msglevel(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ return ei_local->msg_enable;
+}
+
+static void ne2k_pci_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ ei_local->msg_enable = v;
+}
+
static const struct ethtool_ops ne2k_pci_ethtool_ops = {
.get_drvinfo = ne2k_pci_get_drvinfo,
+ .get_msglevel = ne2k_pci_get_msglevel,
+ .set_msglevel = ne2k_pci_set_msglevel,
};
static void ne2k_pci_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 46c5aadaca8e..ca3c2b921cf6 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -32,7 +32,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/timer.h>
@@ -67,7 +66,7 @@
#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */
static const char *if_names[] = { "auto", "10baseT", "10base2"};
-
+static u32 pcnet_msg_enable;
/*====================================================================*/
@@ -558,6 +557,7 @@ static int pcnet_config(struct pcmcia_device *link)
int start_pg, stop_pg, cm_offset;
int has_shmem = 0;
hw_info_t *local_hw_info;
+ struct ei_device *ei_local;
dev_dbg(&link->dev, "pcnet_config\n");
@@ -607,6 +607,8 @@ static int pcnet_config(struct pcmcia_device *link)
mii_phy_probe(dev);
SET_NETDEV_DEV(dev, &link->dev);
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = pcnet_msg_enable;
if (register_netdev(dev) != 0) {
pr_notice("register_netdev() failed\n");
@@ -616,7 +618,7 @@ static int pcnet_config(struct pcmcia_device *link)
if (info->flags & (IS_DL10019|IS_DL10022)) {
u_char id = inb(dev->base_addr + 0x1a);
netdev_info(dev, "NE2000 (DL100%d rev %02x): ",
- (info->flags & IS_DL10022) ? 22 : 19, id);
+ (info->flags & IS_DL10022) ? 22 : 19, id);
if (info->pna_phy)
pr_cont("PNA, ");
} else {
@@ -1063,9 +1065,9 @@ static void ei_watchdog(u_long arg)
if (info->phy_id == info->eth_phy) {
if (p)
netdev_info(dev, "autonegotiation complete: "
- "%sbaseT-%cD selected\n",
- ((p & 0x0180) ? "100" : "10"),
- ((p & 0x0140) ? 'F' : 'H'));
+ "%sbaseT-%cD selected\n",
+ ((p & 0x0180) ? "100" : "10"),
+ ((p & 0x0140) ? 'F' : 'H'));
else
netdev_info(dev, "link partner did not autonegotiate\n");
}
@@ -1081,7 +1083,7 @@ static void ei_watchdog(u_long arg)
mdio_write(mii_addr, info->phy_id, 0, 0x0400);
info->phy_id ^= info->pna_phy ^ info->eth_phy;
netdev_info(dev, "switched to %s transceiver\n",
- (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
+ (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
mdio_write(mii_addr, info->phy_id, 0,
(info->phy_id == info->eth_phy) ? 0x1000 : 0);
info->link_status = 0;
@@ -1128,9 +1130,9 @@ static void dma_get_8390_hdr(struct net_device *dev,
unsigned int nic_base = dev->base_addr;
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_input."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -1159,13 +1161,14 @@ static void dma_block_input(struct net_device *dev, int count,
unsigned int nic_base = dev->base_addr;
int xfer_count = count;
char *buf = skb->data;
+ struct ei_device *ei_local = netdev_priv(dev);
- if ((ei_debug > 4) && (count != 4))
+ if ((netif_msg_rx_status(ei_local)) && (count != 4))
netdev_dbg(dev, "[bi=%d]\n", count+4);
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_input."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1183,7 +1186,8 @@ static void dma_block_input(struct net_device *dev, int count,
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
#ifdef PCMCIA_DEBUG
- if (ei_debug > 4) { /* DMA termination address check... */
+ /* DMA termination address check... */
+ if (netif_msg_rx_status(ei_local)) {
int addr, tries = 20;
do {
/* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
@@ -1196,8 +1200,8 @@ static void dma_block_input(struct net_device *dev, int count,
} while (--tries > 0);
if (tries <= 0)
netdev_notice(dev, "RX transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- ring_offset + xfer_count, addr);
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ ring_offset + xfer_count, addr);
}
#endif
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -1213,12 +1217,12 @@ static void dma_block_output(struct net_device *dev, int count,
pcnet_dev_t *info = PRIV(dev);
#ifdef PCMCIA_DEBUG
int retries = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
#endif
u_long dma_start;
#ifdef PCMCIA_DEBUG
- if (ei_debug > 4)
- netdev_dbg(dev, "[bo=%d]\n", count);
+ netif_dbg(ei_local, tx_queued, dev, "[bo=%d]\n", count);
#endif
/* Round the count up for word writes. Do we need to do this?
@@ -1227,9 +1231,9 @@ static void dma_block_output(struct net_device *dev, int count,
if (count & 0x01)
count++;
if (ei_status.dmaing) {
- netdev_notice(dev, "DMAing conflict in dma_block_output."
- "[DMAstat:%1x][irqlock:%1x]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in dma_block_output."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1256,7 +1260,8 @@ static void dma_block_output(struct net_device *dev, int count,
#ifdef PCMCIA_DEBUG
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
- if (ei_debug > 4) { /* DMA termination address check... */
+ /* DMA termination address check... */
+ if (netif_msg_tx_queued(ei_local)) {
int addr, tries = 20;
do {
int high = inb_p(nic_base + EN0_RSARHI);
@@ -1267,8 +1272,8 @@ static void dma_block_output(struct net_device *dev, int count,
} while (--tries > 0);
if (tries <= 0) {
netdev_notice(dev, "Tx packet transfer address mismatch,"
- "%#4.4x (expected) vs. %#4.4x (actual).\n",
- (start_page << 8) + count, addr);
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ (start_page << 8) + count, addr);
if (retries++ == 0)
goto retry;
}
@@ -1277,10 +1282,10 @@ static void dma_block_output(struct net_device *dev, int count,
while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
- netdev_notice(dev, "timeout waiting for Tx RDC.\n");
- pcnet_reset_8390(dev);
- NS8390_init(dev, 1);
- break;
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
+ pcnet_reset_8390(dev);
+ NS8390_init(dev, 1);
+ break;
}
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index b0fbce39661a..139385dcdaa7 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -111,6 +111,7 @@ static struct isapnp_device_id ultra_device_ids[] __initdata = {
MODULE_DEVICE_TABLE(isapnp, ultra_device_ids);
#endif
+static u32 ultra_msg_enable;
#define START_PG 0x00 /* First page of TX buffer */
@@ -211,6 +212,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
unsigned char num_pages, irqreg, addr, piomode;
unsigned char idreg = inb(ioaddr + 7);
unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+ struct ei_device *ei_local = netdev_priv(dev);
if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME))
return -EBUSY;
@@ -232,16 +234,16 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
goto out;
}
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((ultra_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + 8 + i);
- printk("%s: %s at %#3x, %pM", dev->name, model_name,
- ioaddr, dev->dev_addr);
+ netdev_info(dev, "%s at %#3x, %pM", model_name,
+ ioaddr, dev->dev_addr);
/* Switch from the station address to the alternate register set and
read the useful registers there. */
@@ -265,7 +267,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)];
if (irq == 0) {
- printk(", failed to detect IRQ line.\n");
+ pr_cont(", failed to detect IRQ line.\n");
retval = -EAGAIN;
goto out;
}
@@ -296,7 +298,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG)*256);
if (!ei_status.mem) {
- printk(", failed to ioremap.\n");
+ pr_cont(", failed to ioremap.\n");
retval = -ENOMEM;
goto out;
}
@@ -304,14 +306,15 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG)*256;
if (piomode) {
- printk(",%s IRQ %d programmed-I/O mode.\n",
- eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
+ pr_cont(", %s IRQ %d programmed-I/O mode.\n",
+ eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
ei_status.block_input = &ultra_pio_input;
ei_status.block_output = &ultra_pio_output;
ei_status.get_8390_hdr = &ultra_pio_get_hdr;
} else {
- printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ",
- dev->irq, dev->mem_start, dev->mem_end-1);
+ pr_cont(", %s IRQ %d memory %#lx-%#lx.\n",
+ eeprom_irq ? "" : "assigned ", dev->irq, dev->mem_start,
+ dev->mem_end-1);
ei_status.block_input = &ultra_block_input;
ei_status.block_output = &ultra_block_output;
ei_status.get_8390_hdr = &ultra_get_8390_hdr;
@@ -320,6 +323,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
dev->netdev_ops = &ultra_netdev_ops;
NS8390_init(dev, 0);
+ ei_local->msg_enable = ultra_msg_enable;
retval = register_netdev(dev);
if (retval)
@@ -356,12 +360,15 @@ static int __init ultra_probe_isapnp(struct net_device *dev)
/* found it */
dev->base_addr = pnp_port_start(idev, 0);
dev->irq = pnp_irq(idev, 0);
- printk(KERN_INFO "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
- (char *) ultra_device_ids[i].driver_data,
- dev->base_addr, dev->irq);
+ netdev_info(dev,
+ "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) ultra_device_ids[i].driver_data,
+ dev->base_addr, dev->irq);
if (ultra_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
- printk(KERN_ERR "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
- pnp_device_detach(idev);
+ netdev_err(dev,
+ "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n",
+ dev->base_addr);
+ pnp_device_detach(idev);
return -ENXIO;
}
ei_status.priv = (unsigned long)idev;
@@ -412,9 +419,10 @@ static void
ultra_reset_8390(struct net_device *dev)
{
int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */
+ struct ei_device *ei_local = netdev_priv(dev);
outb(ULTRA_RESET, cmd_port);
- if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting Ultra, t=%ld...\n", jiffies);
ei_status.txing = 0;
outb(0x00, cmd_port); /* Disable shared memory for safety. */
@@ -424,7 +432,7 @@ ultra_reset_8390(struct net_device *dev)
else
outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
- if (ei_debug > 1) printk("reset done\n");
+ netif_dbg(ei_local, hw, dev, "reset done\n");
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
@@ -530,11 +538,11 @@ static int
ultra_close_card(struct net_device *dev)
{
int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
netif_stop_queue(dev);
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
outb(0x00, ioaddr + 6); /* Disable interrupts. */
free_irq(dev->irq, dev);
@@ -556,8 +564,10 @@ static int irq[MAX_ULTRA_CARDS];
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
+module_param_named(msg_enable, ultra_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("SMC Ultra/EtherEZ ISA/PnP Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index 8df4c4157230..aca957d4e121 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -69,6 +69,11 @@ static void stnic_block_output (struct net_device *dev, int count,
static void stnic_init (struct net_device *dev);
+static u32 stnic_msg_enable;
+
+module_param_named(msg_enable, stnic_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
/* SH7750 specific read/write io. */
static inline void
STNIC_DELAY (void)
@@ -100,6 +105,7 @@ static int __init stnic_probe(void)
{
struct net_device *dev;
int i, err;
+ struct ei_device *ei_local;
/* If we are not running on a SolutionEngine, give up now */
if (! MACH_SE)
@@ -125,10 +131,10 @@ static int __init stnic_probe(void)
share and the board will usually be enabled. */
err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev);
if (err) {
- printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq);
- free_netdev(dev);
- return err;
- }
+ netdev_emerg(dev, " unable to get IRQ %d.\n", dev->irq);
+ free_netdev(dev);
+ return err;
+ }
ei_status.name = dev->name;
ei_status.word16 = 1;
@@ -147,6 +153,8 @@ static int __init stnic_probe(void)
ei_status.block_output = &stnic_block_output;
stnic_init (dev);
+ ei_local = netdev_priv(dev);
+ ei_local->msg_enable = stnic_msg_enable;
err = register_netdev(dev);
if (err) {
@@ -156,7 +164,7 @@ static int __init stnic_probe(void)
}
stnic_dev = dev;
- printk (KERN_INFO "NS ST-NIC 83902A\n");
+ netdev_info(dev, "NS ST-NIC 83902A\n");
return 0;
}
@@ -164,10 +172,11 @@ static int __init stnic_probe(void)
static void
stnic_reset (struct net_device *dev)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
*(vhalf *) PA_83902_RST = 0;
udelay (5);
- if (ei_debug > 1)
- printk (KERN_WARNING "8390 reset done (%ld).\n", jiffies);
+ netif_warn(ei_local, hw, dev, "8390 reset done (%ld).\n", jiffies);
*(vhalf *) PA_83902_RST = ~0;
udelay (5);
}
@@ -176,6 +185,8 @@ static void
stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page)
{
+ struct ei_device *ei_local = netdev_priv(dev);
+
half buf[2];
STNIC_WRITE (PG0_RSAR0, 0);
@@ -196,8 +207,7 @@ stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
hdr->count = ((buf[1] >> 8) & 0xff) | (buf[1] << 8);
#endif
- if (ei_debug > 1)
- printk (KERN_DEBUG "ring %x status %02x next %02x count %04x.\n",
+ netif_dbg(ei_local, probe, dev, "ring %x status %02x next %02x count %04x.\n",
ring_page, hdr->status, hdr->next, hdr->count);
STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA);
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 03eb3eed49fa..dd7d816bde52 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -60,6 +60,7 @@ static void wd_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static int wd_close(struct net_device *dev);
+static u32 wd_msg_enable;
#define WD_START_PG 0x00 /* First page of TX buffer */
#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */
@@ -170,6 +171,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
const char *model_name;
static unsigned version_printed;
+ struct ei_device *ei_local = netdev_priv(dev);
for (i = 0; i < 8; i++)
checksum += inb(ioaddr + 8 + i);
@@ -180,19 +182,19 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
/* Check for semi-valid mem_start/end values if supplied. */
if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) {
- printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
+ netdev_warn(dev,
+ "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
dev->mem_start = 0;
dev->mem_end = 0;
}
- if (ei_debug && version_printed++ == 0)
- printk(version);
+ if ((wd_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+ netdev_info(dev, version);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + 8 + i);
- printk("%s: WD80x3 at %#3x, %pM",
- dev->name, ioaddr, dev->dev_addr);
+ netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr);
/* The following PureData probe code was contributed by
Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
@@ -244,8 +246,9 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
}
#ifndef final_version
if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01))
- printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
- word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8);
+ pr_cont("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
+ word16 ? 16 : 8,
+ (inb(ioaddr+1) & 0x01) ? 16 : 8);
#endif
}
@@ -259,7 +262,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
if (reg0 == 0xff || reg0 == 0) {
/* Future plan: this could check a few likely locations first. */
dev->mem_start = 0xd0000;
- printk(" assigning address %#lx", dev->mem_start);
+ pr_cont(" assigning address %#lx", dev->mem_start);
} else {
int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f;
/* Some boards don't have the register 5 -- it returns 0xff. */
@@ -297,8 +300,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */
- if (ei_debug > 2)
- printk(" autoirq is %d", dev->irq);
+ if (netif_msg_drv(ei_local))
+ pr_cont(" autoirq is %d", dev->irq);
if (dev->irq < 2)
dev->irq = word16 ? 10 : 5;
} else
@@ -310,7 +313,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
share and the board will usually be enabled. */
i = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
if (i) {
- printk (" unable to get IRQ %d.\n", dev->irq);
+ pr_cont(" unable to get IRQ %d.\n", dev->irq);
return i;
}
@@ -338,8 +341,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
return -ENOMEM;
}
- printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
- model_name, dev->irq, dev->mem_start, dev->mem_end-1);
+ pr_cont(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
+ model_name, dev->irq, dev->mem_start, dev->mem_end-1);
ei_status.reset_8390 = wd_reset_8390;
ei_status.block_input = wd_block_input;
@@ -348,6 +351,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
dev->netdev_ops = &wd_netdev_ops;
NS8390_init(dev, 0);
+ ei_local->msg_enable = wd_msg_enable;
#if 1
/* Enable interrupt generation on softconfig cards -- M.U */
@@ -385,9 +389,11 @@ static void
wd_reset_8390(struct net_device *dev)
{
int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
outb(WD_RESET, wd_cmd_port);
- if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting the WD80x3 t=%lu...\n",
+ jiffies);
ei_status.txing = 0;
/* Set up the ASIC registers, just in case something changed them. */
@@ -395,7 +401,7 @@ wd_reset_8390(struct net_device *dev)
if (ei_status.word16)
outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
- if (ei_debug > 1) printk("reset done\n");
+ netif_dbg(ei_local, hw, dev, "reset done\n");
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
@@ -474,9 +480,9 @@ static int
wd_close(struct net_device *dev)
{
int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
ei_close(dev);
/* Change from 16-bit to 8-bit shared memory so reboot works. */
@@ -502,10 +508,12 @@ module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(mem, int, NULL, 0);
module_param_array(mem_end, int, NULL, 0);
+module_param_named(msg_enable, wd_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)");
MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)");
MODULE_PARM_DESC(mem_end, "memory end address(es)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
MODULE_DESCRIPTION("ISA Western Digital wd8003/wd8013 ; SMC Elite, Elite16 ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index ae2a12b7db62..8308728fad05 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -44,6 +44,8 @@
static const char version[] =
"8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+static u32 zorro8390_msg_enable;
+
#include "lib8390.c"
#define DRV_NAME "zorro8390"
@@ -86,9 +88,9 @@ static struct card_info {
static void zorro8390_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
- if (ei_debug > 1)
- netdev_dbg(dev, "resetting - t=%ld...\n", jiffies);
+ netif_dbg(ei_local, hw, dev, "resetting - t=%ld...\n", jiffies);
z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -119,8 +121,9 @@ static void zorro8390_get_8390_hdr(struct net_device *dev,
* If it does, it's the last thing you'll see
*/
if (ei_status.dmaing) {
- netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
- __func__, ei_status.dmaing, ei_status.irqlock);
+ netdev_warn(dev,
+ "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -230,7 +233,7 @@ static void zorro8390_block_output(struct net_device *dev, int count,
while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2 * HZ / 100)) {
/* 20ms */
- netdev_err(dev, "timeout waiting for Tx RDC\n");
+ netdev_warn(dev, "timeout waiting for Tx RDC\n");
zorro8390_reset_8390(dev);
__NS8390_init(dev, 1);
break;
@@ -248,8 +251,9 @@ static int zorro8390_open(struct net_device *dev)
static int zorro8390_close(struct net_device *dev)
{
- if (ei_debug > 1)
- netdev_dbg(dev, "Shutting down ethercard\n");
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard\n");
__ei_close(dev);
return 0;
}
@@ -293,6 +297,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
int err;
unsigned char SA_prom[32];
int start_page, stop_page;
+ struct ei_device *ei_local = netdev_priv(dev);
static u32 zorro8390_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
@@ -383,6 +388,9 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
dev->netdev_ops = &zorro8390_netdev_ops;
__NS8390_init(dev, 0);
+
+ ei_local->msg_enable = zorro8390_msg_enable;
+
err = register_netdev(dev);
if (err) {
free_irq(IRQ_AMIGA_PORTS, dev);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 75fb1d20d6fd..c0f68dcd1dc1 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -667,8 +667,8 @@ static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
return 1000000000UL / ppn;
}
-static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+static int bfin_mac_hwtstamp_set(struct net_device *netdev,
+ struct ifreq *ifr)
{
struct hwtstamp_config config;
struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -824,6 +824,16 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
-EFAULT : 0;
}
+static int bfin_mac_hwtstamp_get(struct net_device *netdev,
+ struct ifreq *ifr)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+
+ return copy_to_user(ifr->ifr_data, &lp->stamp_cfg,
+ sizeof(lp->stamp_cfg)) ?
+ -EFAULT : 0;
+}
+
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -1062,7 +1072,8 @@ static void bfin_phc_release(struct bfin_mac_local *lp)
#else
# define bfin_mac_hwtstamp_is_none(cfg) 0
# define bfin_mac_hwtstamp_init(dev)
-# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
+# define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP)
+# define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP)
# define bfin_rx_hwtstamp(dev, skb)
# define bfin_tx_hwtstamp(dev, skb)
# define bfin_phc_init(netdev, dev) 0
@@ -1496,7 +1507,9 @@ static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
+ return bfin_mac_hwtstamp_set(netdev, ifr);
+ case SIOCGHWTSTAMP:
+ return bfin_mac_hwtstamp_get(netdev, ifr);
default:
if (lp->phydev)
return phy_mii_ioctl(lp->phydev, ifr, cmd);
@@ -1544,7 +1557,6 @@ static int bfin_mac_open(struct net_device *dev)
return ret;
phy_start(lp->phydev);
- phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev);
setup_mac_addr(dev->dev_addr);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index e06694555144..c5d75e7aeeb6 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/uaccess.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -1361,7 +1360,7 @@ static int greth_mdio_init(struct greth_private *greth)
timeout = jiffies + 6*HZ;
while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
}
- genphy_read_status(greth->phy);
+ phy_read_status(greth->phy);
greth_link_change(greth->netdev);
}
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 46dfb1378c17..511f6eecd58b 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -16,7 +16,6 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mii.h>
@@ -930,6 +929,9 @@ static int emac_resume(struct platform_device *dev)
}
static const struct of_device_id emac_of_match[] = {
+ {.compatible = "allwinner,sun4i-a10-emac",},
+
+ /* Deprecated */
{.compatible = "allwinner,sun4i-emac",},
{},
};
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 219be1bf3cfc..1517e9df5ba1 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -61,7 +61,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/highmem.h>
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 65926a956575..18e542f7853d 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -17,7 +17,6 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -42,9 +41,9 @@
#include "7990.h"
-#define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x))
-#define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x))
-#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
+#define WRITERAP(lp, x) out_be16(lp->base + LANCE_RAP, (x))
+#define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x))
+#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
#include "hplance.h"
@@ -56,9 +55,9 @@
#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
/* Lossage Factor Nine, Mr Sulu. */
-#define WRITERAP(lp,x) (lp->writerap(lp,x))
-#define WRITERDP(lp,x) (lp->writerdp(lp,x))
-#define READRDP(lp) (lp->readrdp(lp))
+#define WRITERAP(lp, x) (lp->writerap(lp, x))
+#define WRITERDP(lp, x) (lp->writerdp(lp, x))
+#define READRDP(lp) (lp->readrdp(lp))
#else
@@ -94,428 +93,436 @@ static inline __u16 READRDP(struct lance_private *lp)
#ifdef UNDEF
#define PRINT_RINGS() \
do { \
- int t; \
- for (t=0; t < RX_RING_SIZE; t++) { \
- printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\
- t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\
- ib->brx_ring[t].length,\
- ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\
- }\
- for (t=0; t < TX_RING_SIZE; t++) { \
- printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\
- t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\
- ib->btx_ring[t].length,\
- ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\
- }\
+ int t; \
+ for (t = 0; t < RX_RING_SIZE; t++) { \
+ printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
+ t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
+ ib->brx_ring[t].length, \
+ ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
+ } \
+ for (t = 0; t < TX_RING_SIZE; t++) { \
+ printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
+ t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
+ ib->btx_ring[t].length, \
+ ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
+ } \
} while (0)
#else
#define PRINT_RINGS()
#endif
/* Load the CSR registers. The LANCE has to be STOPped when we do this! */
-static void load_csrs (struct lance_private *lp)
+static void load_csrs(struct lance_private *lp)
{
- volatile struct lance_init_block *aib = lp->lance_init_block;
- int leptr;
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr;
- leptr = LANCE_ADDR (aib);
+ leptr = LANCE_ADDR(aib);
- WRITERAP(lp, LE_CSR1); /* load address of init block */
- WRITERDP(lp, leptr & 0xFFFF);
- WRITERAP(lp, LE_CSR2);
- WRITERDP(lp, leptr >> 16);
- WRITERAP(lp, LE_CSR3);
- WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
+ WRITERAP(lp, LE_CSR1); /* load address of init block */
+ WRITERDP(lp, leptr & 0xFFFF);
+ WRITERAP(lp, LE_CSR2);
+ WRITERDP(lp, leptr >> 16);
+ WRITERAP(lp, LE_CSR3);
+ WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
- /* Point back to csr0 */
- WRITERAP(lp, LE_CSR0);
+ /* Point back to csr0 */
+ WRITERAP(lp, LE_CSR0);
}
/* #define to 0 or 1 appropriately */
#define DEBUG_IRING 0
/* Set up the Lance Rx and Tx rings and the init block */
-static void lance_init_ring (struct net_device *dev)
+static void lance_init_ring(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
- int leptr;
- int i;
-
- aib = lp->lance_init_block;
-
- lp->rx_new = lp->tx_new = 0;
- lp->rx_old = lp->tx_old = 0;
-
- ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
-
- /* Copy the ethernet address to the lance init block
- * Notice that we do a byteswap if we're big endian.
- * [I think this is the right criterion; at least, sunlance,
- * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
- * However, the datasheet says that the BSWAP bit doesn't affect
- * the init block, so surely it should be low byte first for
- * everybody? Um.]
- * We could define the ib->physaddr as three 16bit values and
- * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
- */
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ aib = lp->lance_init_block;
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
+
+ /* Copy the ethernet address to the lance init block
+ * Notice that we do a byteswap if we're big endian.
+ * [I think this is the right criterion; at least, sunlance,
+ * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
+ * However, the datasheet says that the BSWAP bit doesn't affect
+ * the init block, so surely it should be low byte first for
+ * everybody? Um.]
+ * We could define the ib->physaddr as three 16bit values and
+ * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
+ */
#ifdef __BIG_ENDIAN
- ib->phys_addr [0] = dev->dev_addr [1];
- ib->phys_addr [1] = dev->dev_addr [0];
- ib->phys_addr [2] = dev->dev_addr [3];
- ib->phys_addr [3] = dev->dev_addr [2];
- ib->phys_addr [4] = dev->dev_addr [5];
- ib->phys_addr [5] = dev->dev_addr [4];
+ ib->phys_addr[0] = dev->dev_addr[1];
+ ib->phys_addr[1] = dev->dev_addr[0];
+ ib->phys_addr[2] = dev->dev_addr[3];
+ ib->phys_addr[3] = dev->dev_addr[2];
+ ib->phys_addr[4] = dev->dev_addr[5];
+ ib->phys_addr[5] = dev->dev_addr[4];
#else
- for (i=0; i<6; i++)
- ib->phys_addr[i] = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ ib->phys_addr[i] = dev->dev_addr[i];
#endif
- if (DEBUG_IRING)
- printk ("TX rings:\n");
+ if (DEBUG_IRING)
+ printk("TX rings:\n");
lp->tx_full = 0;
- /* Setup the Tx ring entries */
- for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
- leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
- ib->btx_ring [i].tmd0 = leptr;
- ib->btx_ring [i].tmd1_hadr = leptr >> 16;
- ib->btx_ring [i].tmd1_bits = 0;
- ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
- ib->btx_ring [i].misc = 0;
- if (DEBUG_IRING)
- printk ("%d: 0x%8.8x\n", i, leptr);
- }
-
- /* Setup the Rx ring entries */
- if (DEBUG_IRING)
- printk ("RX rings:\n");
- for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
- leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
-
- ib->brx_ring [i].rmd0 = leptr;
- ib->brx_ring [i].rmd1_hadr = leptr >> 16;
- ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
- /* 0xf000 == bits that must be one (reserved, presumably) */
- ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
- ib->brx_ring [i].mblength = 0;
- if (DEBUG_IRING)
- printk ("%d: 0x%8.8x\n", i, leptr);
- }
-
- /* Setup the initialization block */
-
- /* Setup rx descriptor pointer */
- leptr = LANCE_ADDR(&aib->brx_ring);
- ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
- ib->rx_ptr = leptr;
- if (DEBUG_IRING)
- printk ("RX ptr: %8.8x\n", leptr);
-
- /* Setup tx descriptor pointer */
- leptr = LANCE_ADDR(&aib->btx_ring);
- ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
- ib->tx_ptr = leptr;
- if (DEBUG_IRING)
- printk ("TX ptr: %8.8x\n", leptr);
-
- /* Clear the multicast filter */
- ib->filter [0] = 0;
- ib->filter [1] = 0;
- PRINT_RINGS();
+ /* Setup the Tx ring entries */
+ for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring[i].tmd0 = leptr;
+ ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring[i].tmd1_bits = 0;
+ ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring[i].misc = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ if (DEBUG_IRING)
+ printk("RX rings:\n");
+ for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring[i].rmd0 = leptr;
+ ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+ /* 0xf000 == bits that must be one (reserved, presumably) */
+ ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring[i].mblength = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("RX ptr: %8.8x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("TX ptr: %8.8x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+ PRINT_RINGS();
}
/* LANCE must be STOPped before we do this, too... */
-static int init_restart_lance (struct lance_private *lp)
+static int init_restart_lance(struct lance_private *lp)
{
- int i;
+ int i;
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_INIT);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_INIT);
- /* Need a hook here for sunlance ledma stuff */
+ /* Need a hook here for sunlance ledma stuff */
- /* Wait for the lance to complete initialization */
- for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
- barrier();
- if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
- printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
- return -1;
- }
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
+ return -1;
+ }
- /* Clear IDON by writing a "1", enable interrupts and start lance */
- WRITERDP(lp, LE_C0_IDON);
- WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ WRITERDP(lp, LE_C0_IDON);
+ WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
- return 0;
+ return 0;
}
-static int lance_reset (struct net_device *dev)
+static int lance_reset(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- int status;
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
- load_csrs (lp);
- lance_init_ring (dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- status = init_restart_lance (lp);
+ load_csrs(lp);
+ lance_init_ring(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ status = init_restart_lance(lp);
#ifdef DEBUG_DRIVER
- printk ("Lance restart=%d\n", status);
+ printk("Lance restart=%d\n", status);
#endif
- return status;
+ return status;
}
-static int lance_rx (struct net_device *dev)
+static int lance_rx(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_rx_desc *rd;
- unsigned char bits;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
#ifdef TEST_HITS
- int i;
+ int i;
#endif
#ifdef TEST_HITS
- printk ("[");
- for (i = 0; i < RX_RING_SIZE; i++) {
- if (i == lp->rx_new)
- printk ("%s",
- ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
- else
- printk ("%s",
- ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
- }
- printk ("]");
+ printk("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+ else
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
+ }
+ printk("]");
#endif
#ifdef CONFIG_HP300
blinken_leds(0x40, 0);
#endif
- WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
- for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */
- !((bits = rd->rmd1_bits) & LE_R1_OWN);
- rd = &ib->brx_ring [lp->rx_new]) {
-
- /* We got an incomplete frame? */
- if ((bits & LE_R1_POK) != LE_R1_POK) {
- dev->stats.rx_over_errors++;
- dev->stats.rx_errors++;
- continue;
- } else if (bits & LE_R1_ERR) {
- /* Count only the end frame as a rx error,
- * not the beginning
- */
- if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
- if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
- if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
- if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
- if (bits & LE_R1_EOP) dev->stats.rx_errors++;
- } else {
+ WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
+ for (rd = &ib->brx_ring[lp->rx_new]; /* For each Rx ring we own... */
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring[lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
int len = (rd->mblength & 0xfff) - 4;
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
- if (!skb) {
- dev->stats.rx_dropped++;
- rd->mblength = 0;
- rd->rmd1_bits = LE_R1_OWN;
- lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
- return 0;
- }
-
- skb_reserve (skb, 2); /* 16 byte align */
- skb_put (skb, len); /* make room */
- skb_copy_to_linear_data(skb,
- (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
- len);
- skb->protocol = eth_type_trans (skb, dev);
- netif_rx (skb);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- }
-
- /* Return the packet to the pool */
- rd->mblength = 0;
- rd->rmd1_bits = LE_R1_OWN;
- lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
- }
- return 0;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
}
-static int lance_tx (struct net_device *dev)
+static int lance_tx(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_tx_desc *td;
- int i, j;
- int status;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
#ifdef CONFIG_HP300
blinken_leds(0x80, 0);
#endif
- /* csr0 is 2f3 */
- WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
- /* csr0 is 73 */
-
- j = lp->tx_old;
- for (i = j; i != lp->tx_new; i = j) {
- td = &ib->btx_ring [i];
-
- /* If we hit a packet not owned by us, stop */
- if (td->tmd1_bits & LE_T1_OWN)
- break;
-
- if (td->tmd1_bits & LE_T1_ERR) {
- status = td->misc;
-
- dev->stats.tx_errors++;
- if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
- if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
-
- if (status & LE_T3_CLOS) {
- dev->stats.tx_carrier_errors++;
- if (lp->auto_select) {
- lp->tpe = 1 - lp->tpe;
- printk("%s: Carrier Lost, trying %s\n",
- dev->name, lp->tpe?"TPE":"AUI");
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
- return 0;
- }
- }
-
- /* buffer errors and underflows turn off the transmitter */
- /* Restart the adapter */
- if (status & (LE_T3_BUF|LE_T3_UFL)) {
- dev->stats.tx_fifo_errors++;
-
- printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
- dev->name);
- /* Stop the lance */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
- return 0;
- }
- } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
- /*
- * So we don't count the packet more than once.
- */
- td->tmd1_bits &= ~(LE_T1_POK);
-
- /* One collision before packet was sent. */
- if (td->tmd1_bits & LE_T1_EONE)
- dev->stats.collisions++;
-
- /* More than one collision, be optimistic. */
- if (td->tmd1_bits & LE_T1_EMORE)
- dev->stats.collisions += 2;
-
- dev->stats.tx_packets++;
- }
-
- j = (j + 1) & lp->tx_ring_mod_mask;
- }
- lp->tx_old = j;
- WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
- return 0;
+ /* csr0 is 2f3 */
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring[i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk("%s: Carrier Lost, trying %s\n",
+ dev->name,
+ lp->tpe ? "TPE" : "AUI");
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off the transmitter */
+ /* Restart the adapter */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ return 0;
}
static irqreturn_t
-lance_interrupt (int irq, void *dev_id)
+lance_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *)dev_id;
- struct lance_private *lp = netdev_priv(dev);
- int csr0;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
- spin_lock (&lp->devlock);
+ spin_lock(&lp->devlock);
- WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
- csr0 = READRDP(lp);
+ WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
+ csr0 = READRDP(lp);
- PRINT_RINGS();
+ PRINT_RINGS();
- if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
- spin_unlock (&lp->devlock);
- return IRQ_NONE; /* been generated by the Lance. */
+ if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
+ spin_unlock(&lp->devlock);
+ return IRQ_NONE; /* been generated by the Lance. */
}
- /* Acknowledge all the interrupt sources ASAP */
- WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
-
- if ((csr0 & LE_C0_ERR)) {
- /* Clear the error condition */
- WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
- }
-
- if (csr0 & LE_C0_RINT)
- lance_rx (dev);
-
- if (csr0 & LE_C0_TINT)
- lance_tx (dev);
-
- /* Log misc errors. */
- if (csr0 & LE_C0_BABL)
- dev->stats.tx_errors++; /* Tx babble. */
- if (csr0 & LE_C0_MISS)
- dev->stats.rx_errors++; /* Missed a Rx frame. */
- if (csr0 & LE_C0_MERR) {
- printk("%s: Bus master arbitration failure, status %4.4x.\n",
- dev->name, csr0);
- /* Restart the chip. */
- WRITERDP(lp, LE_C0_STRT);
- }
-
- if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
+ /* Acknowledge all the interrupt sources ASAP */
+ WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ WRITERDP(lp, LE_C0_STRT);
+ }
+
+ if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
lp->tx_full = 0;
- netif_wake_queue (dev);
- }
+ netif_wake_queue(dev);
+ }
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
- spin_unlock (&lp->devlock);
+ spin_unlock(&lp->devlock);
return IRQ_HANDLED;
}
-int lance_open (struct net_device *dev)
+int lance_open(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
int res;
- /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
- if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
- return -EAGAIN;
+ /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
+ if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
+ return -EAGAIN;
- res = lance_reset(dev);
+ res = lance_reset(dev);
spin_lock_init(&lp->devlock);
- netif_start_queue (dev);
+ netif_start_queue(dev);
return res;
}
EXPORT_SYMBOL_GPL(lance_open);
-int lance_close (struct net_device *dev)
+int lance_close(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
- netif_stop_queue (dev);
+ netif_stop_queue(dev);
- /* Stop the LANCE */
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
+ /* Stop the LANCE */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
- free_irq(lp->irq, dev);
+ free_irq(lp->irq, dev);
- return 0;
+ return 0;
}
EXPORT_SYMBOL_GPL(lance_close);
@@ -524,122 +531,122 @@ void lance_tx_timeout(struct net_device *dev)
printk("lance_tx_timeout\n");
lance_reset(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue (dev);
+ netif_wake_queue(dev);
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
-int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- int entry, skblen, len;
- static int outs;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen, len;
+ static int outs;
unsigned long flags;
- if (!TX_BUFFS_AVAIL)
- return NETDEV_TX_LOCKED;
+ if (!TX_BUFFS_AVAIL)
+ return NETDEV_TX_LOCKED;
- netif_stop_queue (dev);
+ netif_stop_queue(dev);
- skblen = skb->len;
+ skblen = skb->len;
#ifdef DEBUG_DRIVER
- /* dump the packet */
- {
- int i;
-
- for (i = 0; i < 64; i++) {
- if ((i % 16) == 0)
- printk ("\n");
- printk ("%2.2x ", skb->data [i]);
- }
- }
+ /* dump the packet */
+ {
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if ((i % 16) == 0)
+ printk("\n");
+ printk("%2.2x ", skb->data[i]);
+ }
+ }
#endif
- len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
- entry = lp->tx_new & lp->tx_ring_mod_mask;
- ib->btx_ring [entry].length = (-len) | 0xf000;
- ib->btx_ring [entry].misc = 0;
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring[entry].length = (-len) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
if (skb->len < ETH_ZLEN)
memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
- skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
+ skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
- /* Now, give the packet to the lance */
- ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
- lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
+ /* Now, give the packet to the lance */
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
- outs++;
- /* Kick the lance: transmit now */
- WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
- dev_kfree_skb (skb);
+ outs++;
+ /* Kick the lance: transmit now */
+ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
+ dev_kfree_skb(skb);
- spin_lock_irqsave (&lp->devlock, flags);
- if (TX_BUFFS_AVAIL)
- netif_start_queue (dev);
+ spin_lock_irqsave(&lp->devlock, flags);
+ if (TX_BUFFS_AVAIL)
+ netif_start_queue(dev);
else
lp->tx_full = 1;
- spin_unlock_irqrestore (&lp->devlock, flags);
+ spin_unlock_irqrestore(&lp->devlock, flags);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
EXPORT_SYMBOL_GPL(lance_start_xmit);
/* taken from the depca driver via a2065.c */
-static void lance_load_multicast (struct net_device *dev)
+static void lance_load_multicast(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
- volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
struct netdev_hw_addr *ha;
- u32 crc;
-
- /* set all multicast bits */
- if (dev->flags & IFF_ALLMULTI){
- ib->filter [0] = 0xffffffff;
- ib->filter [1] = 0xffffffff;
- return;
- }
- /* clear the multicast filter */
- ib->filter [0] = 0;
- ib->filter [1] = 0;
-
- /* Add addresses */
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(6, ha->addr);
- crc = crc >> 26;
- mcast_table [crc >> 4] |= 1 << (crc & 0xf);
- }
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
}
-void lance_set_multicast (struct net_device *dev)
+void lance_set_multicast(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
- volatile struct lance_init_block *ib = lp->init_block;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
int stopped;
stopped = netif_queue_stopped(dev);
if (!stopped)
- netif_stop_queue (dev);
-
- while (lp->tx_old != lp->tx_new)
- schedule();
+ netif_stop_queue(dev);
- WRITERAP(lp, LE_CSR0);
- WRITERDP(lp, LE_C0_STOP);
- lance_init_ring (dev);
+ while (lp->tx_old != lp->tx_new)
+ schedule();
- if (dev->flags & IFF_PROMISC) {
- ib->mode |= LE_MO_PROM;
- } else {
- ib->mode &= ~LE_MO_PROM;
- lance_load_multicast (dev);
- }
- load_csrs (lp);
- init_restart_lance (lp);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
if (!stopped)
- netif_start_queue (dev);
+ netif_start_queue(dev);
}
EXPORT_SYMBOL_GPL(lance_set_multicast);
@@ -648,10 +655,10 @@ void lance_poll(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
- spin_lock (&lp->devlock);
+ spin_lock(&lp->devlock);
WRITERAP(lp, LE_CSR0);
WRITERDP(lp, LE_C0_STRT);
- spin_unlock (&lp->devlock);
+ spin_unlock(&lp->devlock);
lance_interrupt(dev->irq, dev);
}
#endif
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index ae33a99bf476..e9e0be313804 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -35,33 +35,32 @@
#define LANCE_LOG_RX_BUFFERS 3
#endif
-#define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
-#define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
-#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
-#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
-#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
-#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
-#define PKT_BUFF_SIZE (1544)
-#define RX_BUFF_SIZE PKT_BUFF_SIZE
-#define TX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define PKT_BUFF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_BUFF_SIZE PKT_BUFF_SIZE
/* Each receive buffer is described by a receive message descriptor (RMD) */
struct lance_rx_desc {
- volatile unsigned short rmd0; /* low address of packet */
- volatile unsigned char rmd1_bits; /* descriptor bits */
- volatile unsigned char rmd1_hadr; /* high address of packet */
- volatile short length; /* This length is 2s complement (negative)!
- * Buffer length
- */
- volatile unsigned short mblength; /* Actual number of bytes received */
+ volatile unsigned short rmd0; /* low address of packet */
+ volatile unsigned char rmd1_bits; /* descriptor bits */
+ volatile unsigned char rmd1_hadr; /* high address of packet */
+ volatile short length; /* This length is 2s complement (negative)!
+ * Buffer length */
+ volatile unsigned short mblength; /* Actual number of bytes received */
};
/* Ditto for TMD: */
struct lance_tx_desc {
- volatile unsigned short tmd0; /* low address of packet */
- volatile unsigned char tmd1_bits; /* descriptor bits */
- volatile unsigned char tmd1_hadr; /* high address of packet */
- volatile short length; /* Length is 2s complement (negative)! */
+ volatile unsigned short tmd0; /* low address of packet */
+ volatile unsigned char tmd1_bits; /* descriptor bits */
+ volatile unsigned char tmd1_hadr; /* high address of packet */
+ volatile short length; /* Length is 2s complement (negative)! */
volatile unsigned short misc;
};
@@ -71,181 +70,178 @@ struct lance_tx_desc {
* init block,the Tx and Rx rings and the buffers together in memory:
*/
struct lance_init_block {
- volatile unsigned short mode; /* Pre-set mode (reg. 15) */
- volatile unsigned char phys_addr[6]; /* Physical ethernet address */
- volatile unsigned filter[2]; /* Multicast filter (64 bits) */
-
- /* Receive and transmit ring base, along with extra bits. */
- volatile unsigned short rx_ptr; /* receive descriptor addr */
- volatile unsigned short rx_len; /* receive len and high addr */
- volatile unsigned short tx_ptr; /* transmit descriptor addr */
- volatile unsigned short tx_len; /* transmit len and high addr */
-
- /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
- * This will be true if this whole struct is 8-byte aligned.
- */
- volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
- volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
-
- volatile char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
- volatile char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
- /* we use this just to make the struct big enough that we can move its startaddr
- * in order to force alignment to an eight byte boundary.
- */
+ volatile unsigned short mode; /* Pre-set mode (reg. 15) */
+ volatile unsigned char phys_addr[6]; /* Physical ethernet address */
+ volatile unsigned filter[2]; /* Multicast filter (64 bits) */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ volatile unsigned short rx_ptr; /* receive descriptor addr */
+ volatile unsigned short rx_len; /* receive len and high addr */
+ volatile unsigned short tx_ptr; /* transmit descriptor addr */
+ volatile unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
+ * This will be true if this whole struct is 8-byte aligned.
+ */
+ volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
+ volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
+
+ volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
+ volatile char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
+ /* we use this just to make the struct big enough that we can move its startaddr
+ * in order to force alignment to an eight byte boundary.
+ */
};
/* This is where we keep all the stuff the driver needs to know about.
* I'm definitely unhappy about the mechanism for allowing specific
* drivers to add things...
*/
-struct lance_private
-{
- char *name;
+struct lance_private {
+ const char *name;
unsigned long base;
- volatile struct lance_init_block *init_block; /* CPU address of RAM */
- volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
+ volatile struct lance_init_block *init_block; /* CPU address of RAM */
+ volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
- int rx_new, tx_new;
- int rx_old, tx_old;
+ int rx_new, tx_new;
+ int rx_old, tx_old;
- int lance_log_rx_bufs, lance_log_tx_bufs;
- int rx_ring_mod_mask, tx_ring_mod_mask;
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
- int tpe; /* TPE is selected */
- int auto_select; /* cable-selection is by carrier */
- unsigned short busmaster_regval;
+ int tpe; /* TPE is selected */
+ int auto_select; /* cable-selection is by carrier */
+ unsigned short busmaster_regval;
- unsigned int irq; /* IRQ to register */
+ unsigned int irq; /* IRQ to register */
- /* This is because the HP LANCE is disgusting and you have to check
- * a DIO-specific register every time you read/write the LANCE regs :-<
- * [could we get away with making these some sort of macro?]
- */
- void (*writerap)(void *, unsigned short);
- void (*writerdp)(void *, unsigned short);
- unsigned short (*readrdp)(void *);
+ /* This is because the HP LANCE is disgusting and you have to check
+ * a DIO-specific register every time you read/write the LANCE regs :-<
+ * [could we get away with making these some sort of macro?]
+ */
+ void (*writerap)(void *, unsigned short);
+ void (*writerdp)(void *, unsigned short);
+ unsigned short (*readrdp)(void *);
spinlock_t devlock;
char tx_full;
};
/*
- * Am7990 Control and Status Registers
+ * Am7990 Control and Status Registers
*/
-#define LE_CSR0 0x0000 /* LANCE Controller Status */
-#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
-#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
-#define LE_CSR3 0x0003 /* Misc */
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
+#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
+#define LE_CSR3 0x0003 /* Misc */
/*
* Bit definitions for CSR0 (LANCE Controller Status)
*/
-#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
-#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
-#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
-#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
-#define LE_C0_MERR 0x0800 /* Memory Error */
-#define LE_C0_RINT 0x0400 /* Receive Interrupt */
-#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
-#define LE_C0_IDON 0x0100 /* Initialization Done */
-#define LE_C0_INTR 0x0080 /* Interrupt Flag
- = BABL | MISS | MERR | RINT | TINT | IDON */
-#define LE_C0_INEA 0x0040 /* Interrupt Enable */
-#define LE_C0_RXON 0x0020 /* Receive On */
-#define LE_C0_TXON 0x0010 /* Transmit On */
-#define LE_C0_TDMD 0x0008 /* Transmit Demand */
-#define LE_C0_STOP 0x0004 /* Stop */
-#define LE_C0_STRT 0x0002 /* Start */
-#define LE_C0_INIT 0x0001 /* Initialize */
+#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag
+ = BABL | MISS | MERR | RINT | TINT | IDON */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
/*
* Bit definitions for CSR3
*/
-#define LE_C3_BSWP 0x0004 /* Byte Swap
- (on for big endian byte order) */
-#define LE_C3_ACON 0x0002 /* ALE Control
- (on for active low ALE) */
-#define LE_C3_BCON 0x0001 /* Byte Control */
+#define LE_C3_BSWP 0x0004 /* Byte Swap (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
/*
* Mode Flags
*/
-#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
/* these next ones 0x4000 -- 0x0080 are not available on the LANCE 7990,
* but they are in NetBSD's am7990.h, presumably for backwards-compatible chips
*/
-#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
-#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
-#define LE_MO_DLNKTST 0x1000 /* disable link status */
-#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
-#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
-#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
-#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
-#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
+#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
+#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
+#define LE_MO_DLNKTST 0x1000 /* disable link status */
+#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
+#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
+#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
+#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
+#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
/* and this one is from the C-LANCE data sheet... */
-#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
- (C-LANCE, not original LANCE) */
-#define LE_MO_INTL 0x0040 /* Internal Loopback */
-#define LE_MO_DRTY 0x0020 /* Disable Retry */
-#define LE_MO_FCOLL 0x0010 /* Force Collision */
-#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
-#define LE_MO_LOOP 0x0004 /* Loopback Enable */
-#define LE_MO_DTX 0x0002 /* Disable Transmitter */
-#define LE_MO_DRX 0x0001 /* Disable Receiver */
+#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
+ (C-LANCE, not original LANCE) */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
/*
* Receive Flags
*/
-#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
-#define LE_R1_ERR 0x40 /* Error */
-#define LE_R1_FRA 0x20 /* Framing Error */
-#define LE_R1_OFL 0x10 /* Overflow Error */
-#define LE_R1_CRC 0x08 /* CRC Error */
-#define LE_R1_BUF 0x04 /* Buffer Error */
-#define LE_R1_SOP 0x02 /* Start of Packet */
-#define LE_R1_EOP 0x01 /* End of Packet */
-#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
/*
* Transmit Flags
*/
-#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
-#define LE_T1_ERR 0x40 /* Error */
-#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
-#define LE_T1_EMORE 0x10 /* More than one retry needed */
-#define LE_T1_EONE 0x08 /* One retry needed */
-#define LE_T1_EDEF 0x04 /* Deferred */
-#define LE_T1_SOP 0x02 /* Start of Packet */
-#define LE_T1_EOP 0x01 /* End of Packet */
-#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
/*
* Error Flags
*/
-#define LE_T3_BUF 0x8000 /* Buffer Error */
-#define LE_T3_UFL 0x4000 /* Underflow Error */
-#define LE_T3_LCOL 0x1000 /* Late Collision */
-#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
-#define LE_T3_RTY 0x0400 /* Retry Error */
-#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
/* Miscellaneous useful macros */
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
- lp->tx_old - lp->tx_new-1)
+#define TX_BUFFS_AVAIL ((lp->tx_old <= lp->tx_new) ? \
+ lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new : \
+ lp->tx_old - lp->tx_new - 1)
/* The LANCE only uses 24 bit addresses. This does the obvious thing. */
#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
/* Now the prototypes we export */
int lance_open(struct net_device *dev);
-int lance_close (struct net_device *dev);
-int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
-void lance_set_multicast (struct net_device *dev);
+int lance_close(struct net_device *dev);
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast(struct net_device *dev);
void lance_tx_timeout(struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
void lance_poll(struct net_device *dev);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index d042511bdc13..2061b471fd16 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -24,9 +24,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
Module Name:
@@ -74,7 +72,6 @@ Revision History:
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/pci.h>
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 8baa3527ba74..a75092d584cc 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
Module Name:
@@ -753,7 +751,7 @@ struct amd8111e_priv{
const char *name;
struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */
struct net_device* amd8111e_net_dev; /* ptr to associated net_device */
- /* Transmit and recive skbs */
+ /* Transmit and receive skbs */
struct sk_buff *tx_skbuff[NUM_TX_BUFFERS];
struct sk_buff *rx_skbuff[NUM_RX_BUFFERS];
/* Transmit and receive dma mapped addr */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 427c148bb643..a2bd91e3d302 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -27,8 +27,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* ########################################################################
*
@@ -48,7 +47,6 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index 4b7f7ad62bb8..ca53024f017f 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -18,8 +18,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* ########################################################################
*
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 0c61fd50d882..47ce57c2c893 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -127,41 +127,41 @@ static void hplance_remove_one(struct dio_dev *d)
/* Initialise a single lance board at the given DIO device */
static void hplance_init(struct net_device *dev, struct dio_dev *d)
{
- unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
- struct hplance_private *lp;
- int i;
-
- /* reset the board */
- out_8(va+DIO_IDOFF, 0xff);
- udelay(100); /* ariba! ariba! udelay! udelay! */
-
- /* Fill the dev fields */
- dev->base_addr = va;
- dev->netdev_ops = &hplance_netdev_ops;
- dev->dma = 0;
-
- for (i=0; i<6; i++) {
- /* The NVRAM holds our ethernet address, one nibble per byte,
- * at bytes NVRAMOFF+1,3,5,7,9...
- */
- dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
- | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
- }
-
- lp = netdev_priv(dev);
- lp->lance.name = (char*)d->name; /* discards const, shut up gcc */
- lp->lance.base = va;
- lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
- lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
- lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
- lp->lance.irq = d->ipl;
- lp->lance.writerap = hplance_writerap;
- lp->lance.writerdp = hplance_writerdp;
- lp->lance.readrdp = hplance_readrdp;
- lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
- lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
- lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
- lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+ unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
+ struct hplance_private *lp;
+ int i;
+
+ /* reset the board */
+ out_8(va + DIO_IDOFF, 0xff);
+ udelay(100); /* ariba! ariba! udelay! udelay! */
+
+ /* Fill the dev fields */
+ dev->base_addr = va;
+ dev->netdev_ops = &hplance_netdev_ops;
+ dev->dma = 0;
+
+ for (i = 0; i < 6; i++) {
+ /* The NVRAM holds our ethernet address, one nibble per byte,
+ * at bytes NVRAMOFF+1,3,5,7,9...
+ */
+ dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
+ }
+
+ lp = netdev_priv(dev);
+ lp->lance.name = d->name;
+ lp->lance.base = va;
+ lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
+ lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = d->ipl;
+ lp->lance.writerap = hplance_writerap;
+ lp->lance.writerdp = hplance_writerdp;
+ lp->lance.readrdp = hplance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
}
/* This is disgusting. We have to check the DIO status register for ack every
@@ -195,25 +195,25 @@ static unsigned short hplance_readrdp(void *priv)
static int hplance_open(struct net_device *dev)
{
- int status;
- struct lance_private *lp = netdev_priv(dev);
+ int status;
+ struct lance_private *lp = netdev_priv(dev);
- status = lance_open(dev); /* call generic lance open code */
- if (status)
- return status;
- /* enable interrupts at board level. */
- out_8(lp->base + HPLANCE_STATUS, LE_IE);
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ out_8(lp->base + HPLANCE_STATUS, LE_IE);
- return 0;
+ return 0;
}
static int hplance_close(struct net_device *dev)
{
- struct lance_private *lp = netdev_priv(dev);
+ struct lance_private *lp = netdev_priv(dev);
- out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
- lance_close(dev);
- return 0;
+ out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
+ lance_close(dev);
+ return 0;
}
static int __init hplance_init_module(void)
@@ -223,7 +223,7 @@ static int __init hplance_init_module(void)
static void __exit hplance_cleanup_module(void)
{
- dio_unregister_driver(&hplance_driver);
+ dio_unregister_driver(&hplance_driver);
}
module_init(hplance_init_module);
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index e108e911da05..0e8399dec054 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -94,33 +94,31 @@ struct net_device * __init mvme147lance_probe(int unit)
dev->netdev_ops = &lance_netdev_ops;
dev->dma = 0;
- addr=(u_long *)ETHERNET_ADDRESS;
+ addr = (u_long *)ETHERNET_ADDRESS;
address = *addr;
- dev->dev_addr[0]=0x08;
- dev->dev_addr[1]=0x00;
- dev->dev_addr[2]=0x3e;
- address=address>>8;
- dev->dev_addr[5]=address&0xff;
- address=address>>8;
- dev->dev_addr[4]=address&0xff;
- address=address>>8;
- dev->dev_addr[3]=address&0xff;
-
- printk("%s: MVME147 at 0x%08lx, irq %d, "
- "Hardware Address %pM\n",
+ dev->dev_addr[0] = 0x08;
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x3e;
+ address = address >> 8;
+ dev->dev_addr[5] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[4] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[3] = address&0xff;
+
+ printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
dev->name, dev->base_addr, MVME147_LANCE_IRQ,
dev->dev_addr);
lp = netdev_priv(dev);
lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */
- if (!lp->ram)
- {
+ if (!lp->ram) {
printk("%s: No memory for LANCE buffers\n", dev->name);
free_netdev(dev);
return ERR_PTR(-ENOMEM);
}
- lp->lance.name = (char*)name; /* discards const, shut up gcc */
+ lp->lance.name = name;
lp->lance.base = dev->base_addr;
lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */
lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */
@@ -167,8 +165,8 @@ static int m147lance_open(struct net_device *dev)
if (status)
return status;
/* enable interrupts at board level. */
- m147_pcc->lan_cntrl=0; /* clear the interrupts (if any) */
- m147_pcc->lan_cntrl=0x08 | 0x04; /* Enable irq 4 */
+ m147_pcc->lan_cntrl = 0; /* clear the interrupts (if any) */
+ m147_pcc->lan_cntrl = 0x08 | 0x04; /* Enable irq 4 */
return 0;
}
@@ -176,7 +174,7 @@ static int m147lance_open(struct net_device *dev)
static int m147lance_close(struct net_device *dev)
{
/* disable interrupts at boardlevel */
- m147_pcc->lan_cntrl=0x0; /* disable interrupts */
+ m147_pcc->lan_cntrl = 0x0; /* disable interrupts */
lance_close(dev);
return 0;
}
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index d4ed89130c52..08569fe2b182 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -132,7 +132,6 @@ Include Files
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 38492e0b704e..9339cccfe05a 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1668,7 +1668,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
for (i = 0; i < ETH_ALEN; i++)
promaddr[i] = inb(ioaddr + i);
- if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
+ if (!ether_addr_equal(promaddr, dev->dev_addr) ||
!is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) {
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index ece56831a647..5e4273b7aa27 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -80,7 +80,6 @@ static char lancestr[] = "LANCE";
#include <linux/in.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/errno.h>
#include <linux/socket.h> /* Used for the temporal inet entries and routing */
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index dc08678bf9a4..928fac6dd10a 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -122,7 +122,6 @@ struct buffer_state {
* @link: PHY's last seen link state.
* @duplex: PHY's last set duplex mode.
* @speed: PHY's last set speed.
- * @max_speed: Maximum supported by current system network data-rate.
*/
struct arc_emac_priv {
/* Devices */
@@ -152,7 +151,6 @@ struct arc_emac_priv {
unsigned int link;
unsigned int duplex;
unsigned int speed;
- unsigned int max_speed;
};
/**
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 248baf6273fb..eeecc29cf5b7 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -381,17 +381,7 @@ static int arc_emac_open(struct net_device *ndev)
phy_dev->autoneg = AUTONEG_ENABLE;
phy_dev->speed = 0;
phy_dev->duplex = 0;
- phy_dev->advertising = phy_dev->supported;
-
- if (priv->max_speed > 100) {
- phy_dev->advertising &= PHY_GBIT_FEATURES;
- } else if (priv->max_speed <= 100) {
- phy_dev->advertising &= PHY_BASIC_FEATURES;
- if (priv->max_speed <= 10) {
- phy_dev->advertising &= ~SUPPORTED_100baseT_Half;
- phy_dev->advertising &= ~SUPPORTED_100baseT_Full;
- }
- }
+ phy_dev->advertising &= phy_dev->supported;
priv->last_rx_bd = 0;
@@ -704,14 +694,6 @@ static int arc_emac_probe(struct platform_device *pdev)
/* Set poll rate so that it polls every 1 ms */
arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
- /* Get max speed of operation from device tree */
- if (of_property_read_u32(pdev->dev.of_node, "max-speed",
- &priv->max_speed)) {
- dev_err(&pdev->dev, "failed to retrieve <max-speed> from device tree\n");
- err = -EINVAL;
- goto out;
- }
-
ndev->irq = irq;
dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index d71103dbf2cd..8fc93c5f6abc 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -106,6 +106,9 @@ struct alx_priv {
u16 msg_enable;
bool msi;
+
+ /* protects hw.stats */
+ spinlock_t stats_lock;
};
extern const struct ethtool_ops alx_ethtool_ops;
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
index 45b36507abc1..08e22df2a300 100644
--- a/drivers/net/ethernet/atheros/alx/ethtool.c
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -46,6 +46,66 @@
#include "reg.h"
#include "hw.h"
+/* The order of these strings must match the order of the fields in
+ * struct alx_hw_stats
+ * See hw.h
+ */
+static const char alx_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "rx_bcast_packets",
+ "rx_mcast_packets",
+ "rx_pause_packets",
+ "rx_ctrl_packets",
+ "rx_fcs_errors",
+ "rx_length_errors",
+ "rx_bytes",
+ "rx_runt_packets",
+ "rx_fragments",
+ "rx_64B_or_less_packets",
+ "rx_65B_to_127B_packets",
+ "rx_128B_to_255B_packets",
+ "rx_256B_to_511B_packets",
+ "rx_512B_to_1023B_packets",
+ "rx_1024B_to_1518B_packets",
+ "rx_1519B_to_mtu_packets",
+ "rx_oversize_packets",
+ "rx_rxf_ov_drop_packets",
+ "rx_rrd_ov_drop_packets",
+ "rx_align_errors",
+ "rx_bcast_bytes",
+ "rx_mcast_bytes",
+ "rx_address_errors",
+ "tx_packets",
+ "tx_bcast_packets",
+ "tx_mcast_packets",
+ "tx_pause_packets",
+ "tx_exc_defer_packets",
+ "tx_ctrl_packets",
+ "tx_defer_packets",
+ "tx_bytes",
+ "tx_64B_or_less_packets",
+ "tx_65B_to_127B_packets",
+ "tx_128B_to_255B_packets",
+ "tx_256B_to_511B_packets",
+ "tx_512B_to_1023B_packets",
+ "tx_1024B_to_1518B_packets",
+ "tx_1519B_to_mtu_packets",
+ "tx_single_collision",
+ "tx_multiple_collisions",
+ "tx_late_collision",
+ "tx_abort_collision",
+ "tx_underrun",
+ "tx_trd_eop",
+ "tx_length_errors",
+ "tx_trunc_packets",
+ "tx_bcast_bytes",
+ "tx_mcast_bytes",
+ "tx_update",
+};
+
+#define ALX_NUM_STATS ARRAY_SIZE(alx_gstrings_stats)
+
+
static u32 alx_get_supported_speeds(struct alx_hw *hw)
{
u32 supported = SUPPORTED_10baseT_Half |
@@ -201,6 +261,44 @@ static void alx_set_msglevel(struct net_device *netdev, u32 data)
alx->msg_enable = data;
}
+static void alx_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ spin_lock(&alx->stats_lock);
+
+ alx_update_hw_stats(hw);
+ BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) <
+ ALX_NUM_STATS * sizeof(u64));
+ memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64));
+
+ spin_unlock(&alx->stats_lock);
+}
+
+static void alx_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &alx_gstrings_stats, sizeof(alx_gstrings_stats));
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int alx_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ALX_NUM_STATS;
+ default:
+ return -EINVAL;
+ }
+}
+
const struct ethtool_ops alx_ethtool_ops = {
.get_settings = alx_get_settings,
.set_settings = alx_set_settings,
@@ -209,4 +307,7 @@ const struct ethtool_ops alx_ethtool_ops = {
.get_msglevel = alx_get_msglevel,
.set_msglevel = alx_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_strings = alx_get_strings,
+ .get_sset_count = alx_get_sset_count,
+ .get_ethtool_stats = alx_get_ethtool_stats,
};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
index 1e8c24a3cb4e..7712f068f6d4 100644
--- a/drivers/net/ethernet/atheros/alx/hw.c
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -1050,3 +1050,61 @@ bool alx_get_phy_info(struct alx_hw *hw)
return true;
}
+
+void alx_update_hw_stats(struct alx_hw *hw)
+{
+ /* RX stats */
+ hw->stats.rx_ok += alx_read_mem32(hw, ALX_MIB_RX_OK);
+ hw->stats.rx_bcast += alx_read_mem32(hw, ALX_MIB_RX_BCAST);
+ hw->stats.rx_mcast += alx_read_mem32(hw, ALX_MIB_RX_MCAST);
+ hw->stats.rx_pause += alx_read_mem32(hw, ALX_MIB_RX_PAUSE);
+ hw->stats.rx_ctrl += alx_read_mem32(hw, ALX_MIB_RX_CTRL);
+ hw->stats.rx_fcs_err += alx_read_mem32(hw, ALX_MIB_RX_FCS_ERR);
+ hw->stats.rx_len_err += alx_read_mem32(hw, ALX_MIB_RX_LEN_ERR);
+ hw->stats.rx_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BYTE_CNT);
+ hw->stats.rx_runt += alx_read_mem32(hw, ALX_MIB_RX_RUNT);
+ hw->stats.rx_frag += alx_read_mem32(hw, ALX_MIB_RX_FRAG);
+ hw->stats.rx_sz_64B += alx_read_mem32(hw, ALX_MIB_RX_SZ_64B);
+ hw->stats.rx_sz_127B += alx_read_mem32(hw, ALX_MIB_RX_SZ_127B);
+ hw->stats.rx_sz_255B += alx_read_mem32(hw, ALX_MIB_RX_SZ_255B);
+ hw->stats.rx_sz_511B += alx_read_mem32(hw, ALX_MIB_RX_SZ_511B);
+ hw->stats.rx_sz_1023B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1023B);
+ hw->stats.rx_sz_1518B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1518B);
+ hw->stats.rx_sz_max += alx_read_mem32(hw, ALX_MIB_RX_SZ_MAX);
+ hw->stats.rx_ov_sz += alx_read_mem32(hw, ALX_MIB_RX_OV_SZ);
+ hw->stats.rx_ov_rxf += alx_read_mem32(hw, ALX_MIB_RX_OV_RXF);
+ hw->stats.rx_ov_rrd += alx_read_mem32(hw, ALX_MIB_RX_OV_RRD);
+ hw->stats.rx_align_err += alx_read_mem32(hw, ALX_MIB_RX_ALIGN_ERR);
+ hw->stats.rx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BCCNT);
+ hw->stats.rx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_MCCNT);
+ hw->stats.rx_err_addr += alx_read_mem32(hw, ALX_MIB_RX_ERRADDR);
+
+ /* TX stats */
+ hw->stats.tx_ok += alx_read_mem32(hw, ALX_MIB_TX_OK);
+ hw->stats.tx_bcast += alx_read_mem32(hw, ALX_MIB_TX_BCAST);
+ hw->stats.tx_mcast += alx_read_mem32(hw, ALX_MIB_TX_MCAST);
+ hw->stats.tx_pause += alx_read_mem32(hw, ALX_MIB_TX_PAUSE);
+ hw->stats.tx_exc_defer += alx_read_mem32(hw, ALX_MIB_TX_EXC_DEFER);
+ hw->stats.tx_ctrl += alx_read_mem32(hw, ALX_MIB_TX_CTRL);
+ hw->stats.tx_defer += alx_read_mem32(hw, ALX_MIB_TX_DEFER);
+ hw->stats.tx_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BYTE_CNT);
+ hw->stats.tx_sz_64B += alx_read_mem32(hw, ALX_MIB_TX_SZ_64B);
+ hw->stats.tx_sz_127B += alx_read_mem32(hw, ALX_MIB_TX_SZ_127B);
+ hw->stats.tx_sz_255B += alx_read_mem32(hw, ALX_MIB_TX_SZ_255B);
+ hw->stats.tx_sz_511B += alx_read_mem32(hw, ALX_MIB_TX_SZ_511B);
+ hw->stats.tx_sz_1023B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1023B);
+ hw->stats.tx_sz_1518B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1518B);
+ hw->stats.tx_sz_max += alx_read_mem32(hw, ALX_MIB_TX_SZ_MAX);
+ hw->stats.tx_single_col += alx_read_mem32(hw, ALX_MIB_TX_SINGLE_COL);
+ hw->stats.tx_multi_col += alx_read_mem32(hw, ALX_MIB_TX_MULTI_COL);
+ hw->stats.tx_late_col += alx_read_mem32(hw, ALX_MIB_TX_LATE_COL);
+ hw->stats.tx_abort_col += alx_read_mem32(hw, ALX_MIB_TX_ABORT_COL);
+ hw->stats.tx_underrun += alx_read_mem32(hw, ALX_MIB_TX_UNDERRUN);
+ hw->stats.tx_trd_eop += alx_read_mem32(hw, ALX_MIB_TX_TRD_EOP);
+ hw->stats.tx_len_err += alx_read_mem32(hw, ALX_MIB_TX_LEN_ERR);
+ hw->stats.tx_trunc += alx_read_mem32(hw, ALX_MIB_TX_TRUNC);
+ hw->stats.tx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BCCNT);
+ hw->stats.tx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_MCCNT);
+
+ hw->stats.update += alx_read_mem32(hw, ALX_MIB_UPDATE);
+}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
index 96f3b4381e17..15548802d6f8 100644
--- a/drivers/net/ethernet/atheros/alx/hw.h
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -381,6 +381,73 @@ struct alx_rrd {
ALX_ISR_RX_Q6 | \
ALX_ISR_RX_Q7)
+/* Statistics counters collected by the MAC
+ *
+ * The order of the fields must match the strings in alx_gstrings_stats
+ * All stats fields should be u64
+ * See ethtool.c
+ */
+struct alx_hw_stats {
+ /* rx */
+ u64 rx_ok; /* good RX packets */
+ u64 rx_bcast; /* good RX broadcast packets */
+ u64 rx_mcast; /* good RX multicast packets */
+ u64 rx_pause; /* RX pause frames */
+ u64 rx_ctrl; /* RX control packets other than pause frames */
+ u64 rx_fcs_err; /* RX packets with bad FCS */
+ u64 rx_len_err; /* RX packets with length != actual size */
+ u64 rx_byte_cnt; /* good bytes received. FCS is NOT included */
+ u64 rx_runt; /* RX packets < 64 bytes with good FCS */
+ u64 rx_frag; /* RX packets < 64 bytes with bad FCS */
+ u64 rx_sz_64B; /* 64 byte RX packets */
+ u64 rx_sz_127B; /* 65-127 byte RX packets */
+ u64 rx_sz_255B; /* 128-255 byte RX packets */
+ u64 rx_sz_511B; /* 256-511 byte RX packets */
+ u64 rx_sz_1023B; /* 512-1023 byte RX packets */
+ u64 rx_sz_1518B; /* 1024-1518 byte RX packets */
+ u64 rx_sz_max; /* 1519 byte to MTU RX packets */
+ u64 rx_ov_sz; /* truncated RX packets, size > MTU */
+ u64 rx_ov_rxf; /* frames dropped due to RX FIFO overflow */
+ u64 rx_ov_rrd; /* frames dropped due to RRD overflow */
+ u64 rx_align_err; /* alignment errors */
+ u64 rx_bc_byte_cnt; /* RX broadcast bytes, excluding FCS */
+ u64 rx_mc_byte_cnt; /* RX multicast bytes, excluding FCS */
+ u64 rx_err_addr; /* packets dropped due to address filtering */
+
+ /* tx */
+ u64 tx_ok; /* good TX packets */
+ u64 tx_bcast; /* good TX broadcast packets */
+ u64 tx_mcast; /* good TX multicast packets */
+ u64 tx_pause; /* TX pause frames */
+ u64 tx_exc_defer; /* TX packets deferred excessively */
+ u64 tx_ctrl; /* TX control frames, excluding pause frames */
+ u64 tx_defer; /* TX packets deferred */
+ u64 tx_byte_cnt; /* bytes transmitted, FCS is NOT included */
+ u64 tx_sz_64B; /* 64 byte TX packets */
+ u64 tx_sz_127B; /* 65-127 byte TX packets */
+ u64 tx_sz_255B; /* 128-255 byte TX packets */
+ u64 tx_sz_511B; /* 256-511 byte TX packets */
+ u64 tx_sz_1023B; /* 512-1023 byte TX packets */
+ u64 tx_sz_1518B; /* 1024-1518 byte TX packets */
+ u64 tx_sz_max; /* 1519 byte to MTU TX packets */
+ u64 tx_single_col; /* packets TX after a single collision */
+ u64 tx_multi_col; /* packets TX after multiple collisions */
+ u64 tx_late_col; /* TX packets with late collisions */
+ u64 tx_abort_col; /* TX packets aborted w/excessive collisions */
+ u64 tx_underrun; /* TX packets aborted due to TX FIFO underrun
+ * or TRD FIFO underrun
+ */
+ u64 tx_trd_eop; /* reads beyond the EOP into the next frame
+ * when TRD was not written timely
+ */
+ u64 tx_len_err; /* TX packets where length != actual size */
+ u64 tx_trunc; /* TX packets truncated due to size > MTU */
+ u64 tx_bc_byte_cnt; /* broadcast bytes transmitted, excluding FCS */
+ u64 tx_mc_byte_cnt; /* multicast bytes transmitted, excluding FCS */
+ u64 update;
+};
+
+
/* maximum interrupt vectors for msix */
#define ALX_MAX_MSIX_INTRS 16
@@ -424,6 +491,9 @@ struct alx_hw {
/* PHY link patch flag */
bool lnk_patch;
+
+ /* cumulated stats from the hardware (registers are cleared on read) */
+ struct alx_hw_stats stats;
};
static inline int alx_hw_revision(struct alx_hw *hw)
@@ -491,6 +561,7 @@ bool alx_phy_configured(struct alx_hw *hw);
void alx_configure_basic(struct alx_hw *hw);
void alx_disable_rss(struct alx_hw *hw);
bool alx_get_phy_info(struct alx_hw *hw);
+void alx_update_hw_stats(struct alx_hw *hw);
static inline u32 alx_speed_to_ethadv(int speed, u8 duplex)
{
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c3c4c266b846..380d24922049 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1166,10 +1166,60 @@ static void alx_poll_controller(struct net_device *netdev)
}
#endif
+static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *net_stats)
+{
+ struct alx_priv *alx = netdev_priv(dev);
+ struct alx_hw_stats *hw_stats = &alx->hw.stats;
+
+ spin_lock(&alx->stats_lock);
+
+ alx_update_hw_stats(&alx->hw);
+
+ net_stats->tx_bytes = hw_stats->tx_byte_cnt;
+ net_stats->rx_bytes = hw_stats->rx_byte_cnt;
+ net_stats->multicast = hw_stats->rx_mcast;
+ net_stats->collisions = hw_stats->tx_single_col +
+ hw_stats->tx_multi_col +
+ hw_stats->tx_late_col +
+ hw_stats->tx_abort_col;
+
+ net_stats->rx_errors = hw_stats->rx_frag +
+ hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err +
+ hw_stats->rx_ov_sz +
+ hw_stats->rx_ov_rrd +
+ hw_stats->rx_align_err +
+ hw_stats->rx_ov_rxf;
+
+ net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf;
+ net_stats->rx_length_errors = hw_stats->rx_len_err;
+ net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
+ net_stats->rx_frame_errors = hw_stats->rx_align_err;
+ net_stats->rx_dropped = hw_stats->rx_ov_rrd;
+
+ net_stats->tx_errors = hw_stats->tx_late_col +
+ hw_stats->tx_abort_col +
+ hw_stats->tx_underrun +
+ hw_stats->tx_trunc;
+
+ net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
+ net_stats->tx_fifo_errors = hw_stats->tx_underrun;
+ net_stats->tx_window_errors = hw_stats->tx_late_col;
+
+ net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+ net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+
+ spin_unlock(&alx->stats_lock);
+
+ return net_stats;
+}
+
static const struct net_device_ops alx_netdev_ops = {
.ndo_open = alx_open,
.ndo_stop = alx_stop,
.ndo_start_xmit = alx_start_xmit,
+ .ndo_get_stats64 = alx_get_stats64,
.ndo_set_rx_mode = alx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = alx_set_mac_address,
@@ -1198,19 +1248,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* shared register for the high 32 bits, so only a single, aligned,
* 4 GB physical address range can be used for descriptors.
*/
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA config, aborting\n");
- goto out_pci_disable;
- }
+ dev_err(&pdev->dev, "No usable DMA config, aborting\n");
+ goto out_pci_disable;
}
}
@@ -1242,6 +1286,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
alx = netdev_priv(netdev);
spin_lock_init(&alx->hw.mdio_lock);
spin_lock_init(&alx->irq_lock);
+ spin_lock_init(&alx->stats_lock);
alx->dev = netdev;
alx->hw.pdev = pdev;
alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index e4358c98bc4e..af006b44b2a6 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -404,15 +404,59 @@
/* MIB */
#define ALX_MIB_BASE 0x1700
+
#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
+#define ALX_MIB_RX_BCAST (ALX_MIB_BASE + 4)
+#define ALX_MIB_RX_MCAST (ALX_MIB_BASE + 8)
+#define ALX_MIB_RX_PAUSE (ALX_MIB_BASE + 12)
+#define ALX_MIB_RX_CTRL (ALX_MIB_BASE + 16)
+#define ALX_MIB_RX_FCS_ERR (ALX_MIB_BASE + 20)
+#define ALX_MIB_RX_LEN_ERR (ALX_MIB_BASE + 24)
+#define ALX_MIB_RX_BYTE_CNT (ALX_MIB_BASE + 28)
+#define ALX_MIB_RX_RUNT (ALX_MIB_BASE + 32)
+#define ALX_MIB_RX_FRAG (ALX_MIB_BASE + 36)
+#define ALX_MIB_RX_SZ_64B (ALX_MIB_BASE + 40)
+#define ALX_MIB_RX_SZ_127B (ALX_MIB_BASE + 44)
+#define ALX_MIB_RX_SZ_255B (ALX_MIB_BASE + 48)
+#define ALX_MIB_RX_SZ_511B (ALX_MIB_BASE + 52)
+#define ALX_MIB_RX_SZ_1023B (ALX_MIB_BASE + 56)
+#define ALX_MIB_RX_SZ_1518B (ALX_MIB_BASE + 60)
+#define ALX_MIB_RX_SZ_MAX (ALX_MIB_BASE + 64)
+#define ALX_MIB_RX_OV_SZ (ALX_MIB_BASE + 68)
+#define ALX_MIB_RX_OV_RXF (ALX_MIB_BASE + 72)
+#define ALX_MIB_RX_OV_RRD (ALX_MIB_BASE + 76)
+#define ALX_MIB_RX_ALIGN_ERR (ALX_MIB_BASE + 80)
+#define ALX_MIB_RX_BCCNT (ALX_MIB_BASE + 84)
+#define ALX_MIB_RX_MCCNT (ALX_MIB_BASE + 88)
#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
+
#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
+#define ALX_MIB_TX_BCAST (ALX_MIB_BASE + 100)
+#define ALX_MIB_TX_MCAST (ALX_MIB_BASE + 104)
+#define ALX_MIB_TX_PAUSE (ALX_MIB_BASE + 108)
+#define ALX_MIB_TX_EXC_DEFER (ALX_MIB_BASE + 112)
+#define ALX_MIB_TX_CTRL (ALX_MIB_BASE + 116)
+#define ALX_MIB_TX_DEFER (ALX_MIB_BASE + 120)
+#define ALX_MIB_TX_BYTE_CNT (ALX_MIB_BASE + 124)
+#define ALX_MIB_TX_SZ_64B (ALX_MIB_BASE + 128)
+#define ALX_MIB_TX_SZ_127B (ALX_MIB_BASE + 132)
+#define ALX_MIB_TX_SZ_255B (ALX_MIB_BASE + 136)
+#define ALX_MIB_TX_SZ_511B (ALX_MIB_BASE + 140)
+#define ALX_MIB_TX_SZ_1023B (ALX_MIB_BASE + 144)
+#define ALX_MIB_TX_SZ_1518B (ALX_MIB_BASE + 148)
+#define ALX_MIB_TX_SZ_MAX (ALX_MIB_BASE + 152)
+#define ALX_MIB_TX_SINGLE_COL (ALX_MIB_BASE + 156)
+#define ALX_MIB_TX_MULTI_COL (ALX_MIB_BASE + 160)
+#define ALX_MIB_TX_LATE_COL (ALX_MIB_BASE + 164)
+#define ALX_MIB_TX_ABORT_COL (ALX_MIB_BASE + 168)
+#define ALX_MIB_TX_UNDERRUN (ALX_MIB_BASE + 172)
+#define ALX_MIB_TX_TRD_EOP (ALX_MIB_BASE + 176)
+#define ALX_MIB_TX_LEN_ERR (ALX_MIB_BASE + 180)
+#define ALX_MIB_TX_TRUNC (ALX_MIB_BASE + 184)
+#define ALX_MIB_TX_BCCNT (ALX_MIB_BASE + 188)
#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
+#define ALX_MIB_UPDATE (ALX_MIB_BASE + 196)
-#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
-#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
-#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
-#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
#define ALX_ISR 0x1600
#define ALX_ISR_DIS BIT(31)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 7f9369a3b378..b9203d928938 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -22,7 +22,6 @@
#ifndef _ATL1C_H_
#define _ATL1C_H_
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 29801750f239..4d3258dd0a88 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1500,31 +1500,40 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
struct net_device_stats *net_stats = &netdev->stats;
atl1c_update_hw_stats(adapter);
- net_stats->rx_packets = hw_stats->rx_ok;
- net_stats->tx_packets = hw_stats->tx_ok;
net_stats->rx_bytes = hw_stats->rx_byte_cnt;
net_stats->tx_bytes = hw_stats->tx_byte_cnt;
net_stats->multicast = hw_stats->rx_mcast;
net_stats->collisions = hw_stats->tx_1_col +
- hw_stats->tx_2_col * 2 +
- hw_stats->tx_late_col + hw_stats->tx_abort_col;
- net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
- hw_stats->rx_len_err + hw_stats->rx_sz_ov +
- hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
+ hw_stats->tx_2_col +
+ hw_stats->tx_late_col +
+ hw_stats->tx_abort_col;
+
+ net_stats->rx_errors = hw_stats->rx_frag +
+ hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err +
+ hw_stats->rx_sz_ov +
+ hw_stats->rx_rrd_ov +
+ hw_stats->rx_align_err +
+ hw_stats->rx_rxf_ov;
+
net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
net_stats->rx_length_errors = hw_stats->rx_len_err;
net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
net_stats->rx_frame_errors = hw_stats->rx_align_err;
- net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->rx_dropped = hw_stats->rx_rrd_ov;
- net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->tx_errors = hw_stats->tx_late_col +
+ hw_stats->tx_abort_col +
+ hw_stats->tx_underrun +
+ hw_stats->tx_trunc;
- net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
- hw_stats->tx_underrun + hw_stats->tx_trunc;
net_stats->tx_fifo_errors = hw_stats->tx_underrun;
net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
net_stats->tx_window_errors = hw_stats->tx_late_col;
+ net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+ net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+
return net_stats;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 1b0fe2d04a0e..0212dac7e23a 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -23,7 +23,6 @@
#ifndef _ATL1E_H_
#define _ATL1E_H_
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 7a73f3a9fcb5..422aab27ea1b 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1177,32 +1177,40 @@ static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
struct atl1e_hw_stats *hw_stats = &adapter->hw_stats;
struct net_device_stats *net_stats = &netdev->stats;
- net_stats->rx_packets = hw_stats->rx_ok;
- net_stats->tx_packets = hw_stats->tx_ok;
net_stats->rx_bytes = hw_stats->rx_byte_cnt;
net_stats->tx_bytes = hw_stats->tx_byte_cnt;
net_stats->multicast = hw_stats->rx_mcast;
net_stats->collisions = hw_stats->tx_1_col +
- hw_stats->tx_2_col * 2 +
- hw_stats->tx_late_col + hw_stats->tx_abort_col;
+ hw_stats->tx_2_col +
+ hw_stats->tx_late_col +
+ hw_stats->tx_abort_col;
+
+ net_stats->rx_errors = hw_stats->rx_frag +
+ hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err +
+ hw_stats->rx_sz_ov +
+ hw_stats->rx_rrd_ov +
+ hw_stats->rx_align_err +
+ hw_stats->rx_rxf_ov;
- net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
- hw_stats->rx_len_err + hw_stats->rx_sz_ov +
- hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
net_stats->rx_length_errors = hw_stats->rx_len_err;
net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
net_stats->rx_frame_errors = hw_stats->rx_align_err;
- net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->rx_dropped = hw_stats->rx_rrd_ov;
- net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+ net_stats->tx_errors = hw_stats->tx_late_col +
+ hw_stats->tx_abort_col +
+ hw_stats->tx_underrun +
+ hw_stats->tx_trunc;
- net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
- hw_stats->tx_underrun + hw_stats->tx_trunc;
net_stats->tx_fifo_errors = hw_stats->tx_underrun;
net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
net_stats->tx_window_errors = hw_stats->tx_late_col;
+ net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+ net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+
return net_stats;
}
@@ -2428,7 +2436,7 @@ err_reset:
err_register:
err_sw_init:
err_eeprom:
- iounmap(adapter->hw.hw_addr);
+ pci_iounmap(pdev, adapter->hw.hw_addr);
err_init_netdev:
err_ioremap:
free_netdev(netdev);
@@ -2466,7 +2474,7 @@ static void atl1e_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
atl1e_free_ring_resources(adapter);
atl1e_force_ps(&adapter->hw);
- iounmap(adapter->hw.hw_addr);
+ pci_iounmap(pdev, adapter->hw.hw_addr);
pci_release_regions(pdev);
free_netdev(netdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 538211d6f7d9..287272dd69da 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1678,33 +1678,42 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct stats_msg_block *smb = adapter->smb.smb;
+ u64 new_rx_errors = smb->rx_frag +
+ smb->rx_fcs_err +
+ smb->rx_len_err +
+ smb->rx_sz_ov +
+ smb->rx_rxf_ov +
+ smb->rx_rrd_ov +
+ smb->rx_align_err;
+ u64 new_tx_errors = smb->tx_late_col +
+ smb->tx_abort_col +
+ smb->tx_underrun +
+ smb->tx_trunc;
+
/* Fill out the OS statistics structure */
- adapter->soft_stats.rx_packets += smb->rx_ok;
- adapter->soft_stats.tx_packets += smb->tx_ok;
+ adapter->soft_stats.rx_packets += smb->rx_ok + new_rx_errors;
+ adapter->soft_stats.tx_packets += smb->tx_ok + new_tx_errors;
adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
adapter->soft_stats.multicast += smb->rx_mcast;
- adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
- smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
+ adapter->soft_stats.collisions += smb->tx_1_col +
+ smb->tx_2_col +
+ smb->tx_late_col +
+ smb->tx_abort_col;
/* Rx Errors */
- adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
- smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
- smb->rx_rrd_ov + smb->rx_align_err);
+ adapter->soft_stats.rx_errors += new_rx_errors;
adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
adapter->soft_stats.rx_length_errors += smb->rx_len_err;
adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
- adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
- smb->rx_rxf_ov);
adapter->soft_stats.rx_pause += smb->rx_pause;
adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
/* Tx Errors */
- adapter->soft_stats.tx_errors += (smb->tx_late_col +
- smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
+ adapter->soft_stats.tx_errors += new_tx_errors;
adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
adapter->soft_stats.tx_window_errors += smb->tx_late_col;
@@ -1718,23 +1727,18 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
adapter->soft_stats.tx_trunc += smb->tx_trunc;
adapter->soft_stats.tx_pause += smb->tx_pause;
- netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
- netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes;
netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes;
netdev->stats.multicast = adapter->soft_stats.multicast;
netdev->stats.collisions = adapter->soft_stats.collisions;
netdev->stats.rx_errors = adapter->soft_stats.rx_errors;
- netdev->stats.rx_over_errors =
- adapter->soft_stats.rx_missed_errors;
netdev->stats.rx_length_errors =
adapter->soft_stats.rx_length_errors;
netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
netdev->stats.rx_frame_errors =
adapter->soft_stats.rx_frame_errors;
netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
- netdev->stats.rx_missed_errors =
- adapter->soft_stats.rx_missed_errors;
+ netdev->stats.rx_dropped = adapter->soft_stats.rx_rrd_ov;
netdev->stats.tx_errors = adapter->soft_stats.tx_errors;
netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
netdev->stats.tx_aborted_errors =
@@ -1743,6 +1747,9 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
adapter->soft_stats.tx_window_errors;
netdev->stats.tx_carrier_errors =
adapter->soft_stats.tx_carrier_errors;
+
+ netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
+ netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
}
static void atl1_update_mailbox(struct atl1_adapter *adapter)
@@ -1872,7 +1879,7 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
adapter->rx_buffer_len);
if (unlikely(!skb)) {
/* Better luck next round */
- adapter->netdev->stats.rx_dropped++;
+ adapter->soft_stats.rx_dropped++;
break;
}
@@ -3122,7 +3129,8 @@ static void atl1_remove(struct pci_dev *pdev)
* from the BIOS during POST. If we've been messing with the MAC
* address, we need to save the permanent one.
*/
- if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
+ if (!ether_addr_equal_unaligned(adapter->hw.mac_addr,
+ adapter->hw.perm_mac_addr)) {
memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
ETH_ALEN);
atl1_set_mac_addr(&adapter->hw);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 3bf79a56220d..34a58cd846a0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -666,6 +666,7 @@ struct atl1_sft_stats {
u64 rx_errors;
u64 rx_length_errors;
u64 rx_crc_errors;
+ u64 rx_dropped;
u64 rx_frame_errors;
u64 rx_fifo_errors;
u64 rx_missed_errors;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 2fa5b86f139d..3f97d9fd0a71 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -23,6 +23,7 @@ config B44
depends on SSB_POSSIBLE && HAS_DMA
select SSB
select MII
+ select PHYLIB
---help---
If you have a network (Ethernet) controller of this type, say Y
or M and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 90e54d5488dc..8a7bf7dad898 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -6,6 +6,7 @@
* Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
* Copyright (C) 2006 Broadcom Corporation.
* Copyright (C) 2007 Michael Buesch <m@bues.ch>
+ * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
*
* Distribute under GPL.
*/
@@ -29,6 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
+#include <linux/phy.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -284,7 +286,7 @@ static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
{
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
return __b44_readphy(bp, bp->phy_addr, reg, val);
@@ -292,14 +294,14 @@ static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
{
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
return __b44_writephy(bp, bp->phy_addr, reg, val);
}
/* miilib interface */
-static int b44_mii_read(struct net_device *dev, int phy_id, int location)
+static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
{
u32 val;
struct b44 *bp = netdev_priv(dev);
@@ -309,19 +311,36 @@ static int b44_mii_read(struct net_device *dev, int phy_id, int location)
return val;
}
-static void b44_mii_write(struct net_device *dev, int phy_id, int location,
- int val)
+static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
+ int val)
{
struct b44 *bp = netdev_priv(dev);
__b44_writephy(bp, phy_id, location, val);
}
+static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
+{
+ u32 val;
+ struct b44 *bp = bus->priv;
+ int rc = __b44_readphy(bp, phy_id, location, &val);
+ if (rc)
+ return 0xffffffff;
+ return val;
+}
+
+static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
+ u16 val)
+{
+ struct b44 *bp = bus->priv;
+ return __b44_writephy(bp, phy_id, location, val);
+}
+
static int b44_phy_reset(struct b44 *bp)
{
u32 val;
int err;
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
if (err)
@@ -423,7 +442,7 @@ static int b44_setup_phy(struct b44 *bp)
b44_wap54g10_workaround(bp);
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
return 0;
if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
goto out;
@@ -521,12 +540,14 @@ static void b44_check_phy(struct b44 *bp)
{
u32 bmsr, aux;
- if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
bp->flags |= B44_FLAG_100_BASE_T;
- bp->flags |= B44_FLAG_FULL_DUPLEX;
if (!netif_carrier_ok(bp->dev)) {
u32 val = br32(bp, B44_TX_CTRL);
- val |= TX_CTRL_DUPLEX;
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ val |= TX_CTRL_DUPLEX;
+ else
+ val &= ~TX_CTRL_DUPLEX;
bw32(bp, B44_TX_CTRL, val);
netif_carrier_on(bp->dev);
b44_link_report(bp);
@@ -1315,7 +1336,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
br32(bp, B44_ENET_CTRL);
- bp->flags &= ~B44_FLAG_INTERNAL_PHY;
+ bp->flags |= B44_FLAG_EXTERNAL_PHY;
} else {
u32 val = br32(bp, B44_DEVCTRL);
@@ -1324,7 +1345,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
br32(bp, B44_DEVCTRL);
udelay(100);
}
- bp->flags |= B44_FLAG_INTERNAL_PHY;
+ bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
}
}
@@ -1339,7 +1360,10 @@ static void b44_halt(struct b44 *bp)
bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
/* now reset the chip, but without enabling the MAC&PHY
* part of it. This has to be done _after_ we shut down the PHY */
- b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+ else
+ b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
}
/* bp->lock is held. */
@@ -1460,6 +1484,10 @@ static int b44_open(struct net_device *dev)
add_timer(&bp->timer);
b44_enable_ints(bp);
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ phy_start(bp->phydev);
+
netif_start_queue(dev);
out:
return err;
@@ -1622,6 +1650,9 @@ static int b44_close(struct net_device *dev)
netif_stop_queue(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ phy_stop(bp->phydev);
+
napi_disable(&bp->napi);
del_timer_sync(&bp->timer);
@@ -1805,6 +1836,11 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ return phy_ethtool_gset(bp->phydev, cmd);
+ }
+
cmd->supported = (SUPPORTED_Autoneg);
cmd->supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
@@ -1828,8 +1864,8 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DUPLEX_FULL : DUPLEX_HALF;
cmd->port = 0;
cmd->phy_address = bp->phy_addr;
- cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
- XCVR_INTERNAL : XCVR_EXTERNAL;
+ cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
+ XCVR_EXTERNAL : XCVR_INTERNAL;
cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
if (cmd->autoneg == AUTONEG_ENABLE)
@@ -1846,7 +1882,23 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed;
+ int ret;
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ spin_lock_irq(&bp->lock);
+ if (netif_running(dev))
+ b44_setup_phy(bp);
+
+ ret = phy_ethtool_sset(bp->phydev, cmd);
+
+ spin_unlock_irq(&bp->lock);
+
+ return ret;
+ }
+
+ speed = ethtool_cmd_speed(cmd);
/* We do not support gigabit. */
if (cmd->autoneg == AUTONEG_ENABLE) {
@@ -2076,7 +2128,6 @@ static const struct ethtool_ops b44_ethtool_ops = {
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mii_ioctl_data *data = if_mii(ifr);
struct b44 *bp = netdev_priv(dev);
int err = -EINVAL;
@@ -2084,7 +2135,12 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
goto out;
spin_lock_irq(&bp->lock);
- err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ BUG_ON(!bp->phydev);
+ err = phy_mii_ioctl(bp->phydev, ifr, cmd);
+ } else {
+ err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
+ }
spin_unlock_irq(&bp->lock);
out:
return err;
@@ -2146,6 +2202,146 @@ static const struct net_device_ops b44_netdev_ops = {
#endif
};
+static void b44_adjust_link(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phydev;
+ bool status_changed = 0;
+
+ BUG_ON(!phydev);
+
+ if (bp->old_link != phydev->link) {
+ status_changed = 1;
+ bp->old_link = phydev->link;
+ }
+
+ /* reflect duplex change */
+ if (phydev->link) {
+ if ((phydev->duplex == DUPLEX_HALF) &&
+ (bp->flags & B44_FLAG_FULL_DUPLEX)) {
+ status_changed = 1;
+ bp->flags &= ~B44_FLAG_FULL_DUPLEX;
+ } else if ((phydev->duplex == DUPLEX_FULL) &&
+ !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
+ status_changed = 1;
+ bp->flags |= B44_FLAG_FULL_DUPLEX;
+ }
+ }
+
+ if (status_changed) {
+ u32 val = br32(bp, B44_TX_CTRL);
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ val |= TX_CTRL_DUPLEX;
+ else
+ val &= ~TX_CTRL_DUPLEX;
+ bw32(bp, B44_TX_CTRL, val);
+ phy_print_status(phydev);
+ }
+}
+
+static int b44_register_phy_one(struct b44 *bp)
+{
+ struct mii_bus *mii_bus;
+ struct ssb_device *sdev = bp->sdev;
+ struct phy_device *phydev;
+ char bus_id[MII_BUS_ID_SIZE + 3];
+ struct ssb_sprom *sprom = &sdev->bus->sprom;
+ int err;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus) {
+ dev_err(sdev->dev, "mdiobus_alloc() failed\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ mii_bus->priv = bp;
+ mii_bus->read = b44_mdio_read_phylib;
+ mii_bus->write = b44_mdio_write_phylib;
+ mii_bus->name = "b44_eth_mii";
+ mii_bus->parent = sdev->dev;
+ mii_bus->phy_mask = ~(1 << bp->phy_addr);
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
+ mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!mii_bus->irq) {
+ dev_err(sdev->dev, "mii_bus irq allocation failed\n");
+ err = -ENOMEM;
+ goto err_out_mdiobus;
+ }
+
+ memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR);
+
+ bp->mii_bus = mii_bus;
+
+ err = mdiobus_register(mii_bus);
+ if (err) {
+ dev_err(sdev->dev, "failed to register MII bus\n");
+ goto err_out_mdiobus_irq;
+ }
+
+ if (!bp->mii_bus->phy_map[bp->phy_addr] &&
+ (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
+
+ dev_info(sdev->dev,
+ "could not find PHY at %i, use fixed one\n",
+ bp->phy_addr);
+
+ bp->phy_addr = 0;
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
+ bp->phy_addr);
+ } else {
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+ bp->phy_addr);
+ }
+
+ phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phydev)) {
+ dev_err(sdev->dev, "could not attach PHY at %i\n",
+ bp->phy_addr);
+ err = PTR_ERR(phydev);
+ goto err_out_mdiobus_unregister;
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_MII);
+ phydev->advertising = phydev->supported;
+
+ bp->phydev = phydev;
+ bp->old_link = 0;
+ bp->phy_addr = phydev->addr;
+
+ dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
+
+ return 0;
+
+err_out_mdiobus_unregister:
+ mdiobus_unregister(mii_bus);
+
+err_out_mdiobus_irq:
+ kfree(mii_bus->irq);
+
+err_out_mdiobus:
+ mdiobus_free(mii_bus);
+
+err_out:
+ return err;
+}
+
+static void b44_unregister_phy_one(struct b44 *bp)
+{
+ struct mii_bus *mii_bus = bp->mii_bus;
+
+ phy_disconnect(bp->phydev);
+ mdiobus_unregister(mii_bus);
+ kfree(mii_bus->irq);
+ mdiobus_free(mii_bus);
+}
+
static int b44_init_one(struct ssb_device *sdev,
const struct ssb_device_id *ent)
{
@@ -2206,9 +2402,15 @@ static int b44_init_one(struct ssb_device *sdev,
goto err_out_powerdown;
}
+ if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+ dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
+ err = -ENODEV;
+ goto err_out_powerdown;
+ }
+
bp->mii_if.dev = dev;
- bp->mii_if.mdio_read = b44_mii_read;
- bp->mii_if.mdio_write = b44_mii_write;
+ bp->mii_if.mdio_read = b44_mdio_read_mii;
+ bp->mii_if.mdio_write = b44_mdio_write_mii;
bp->mii_if.phy_id = bp->phy_addr;
bp->mii_if.phy_id_mask = 0x1f;
bp->mii_if.reg_num_mask = 0x1f;
@@ -2236,13 +2438,26 @@ static int b44_init_one(struct ssb_device *sdev,
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
/* do a phy reset to test if there is an active phy */
- if (b44_phy_reset(bp) < 0)
- bp->phy_addr = B44_PHY_ADDR_NO_PHY;
+ err = b44_phy_reset(bp);
+ if (err < 0) {
+ dev_err(sdev->dev, "phy reset failed\n");
+ goto err_out_unregister_netdev;
+ }
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+ err = b44_register_phy_one(bp);
+ if (err) {
+ dev_err(sdev->dev, "Cannot register PHY, aborting\n");
+ goto err_out_unregister_netdev;
+ }
+ }
netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
return 0;
+err_out_unregister_netdev:
+ unregister_netdev(dev);
err_out_powerdown:
ssb_bus_may_powerdown(sdev->bus);
@@ -2256,8 +2471,11 @@ out:
static void b44_remove_one(struct ssb_device *sdev)
{
struct net_device *dev = ssb_get_drvdata(sdev);
+ struct b44 *bp = netdev_priv(dev);
unregister_netdev(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ b44_unregister_phy_one(bp);
ssb_device_disable(sdev, 0);
ssb_bus_may_powerdown(sdev->bus);
free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 8993d72f0420..3e9c3fc7591b 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -280,9 +280,10 @@ struct ring_info {
dma_addr_t mapping;
};
-#define B44_MCAST_TABLE_SIZE 32
-#define B44_PHY_ADDR_NO_PHY 30
-#define B44_MDC_RATIO 5000000
+#define B44_MCAST_TABLE_SIZE 32
+#define B44_PHY_ADDR_NO_LOCAL_PHY 30 /* no local phy regs */
+#define B44_PHY_ADDR_NO_PHY 31 /* no phy present at all */
+#define B44_MDC_RATIO 5000000
#define B44_STAT_REG_DECLARE \
_B44(tx_good_octets) \
@@ -344,6 +345,9 @@ B44_STAT_REG_DECLARE
struct u64_stats_sync syncp;
};
+#define B44_BOARDFLAG_ROBO 0x0010 /* Board has robo switch */
+#define B44_BOARDFLAG_ADM 0x0080 /* Board has ADMtek switch */
+
struct ssb_device;
struct b44 {
@@ -376,7 +380,7 @@ struct b44 {
#define B44_FLAG_ADV_10FULL 0x02000000
#define B44_FLAG_ADV_100HALF 0x04000000
#define B44_FLAG_ADV_100FULL 0x08000000
-#define B44_FLAG_INTERNAL_PHY 0x10000000
+#define B44_FLAG_EXTERNAL_PHY 0x10000000
#define B44_FLAG_RX_RING_HACK 0x20000000
#define B44_FLAG_TX_RING_HACK 0x40000000
#define B44_FLAG_WOL_ENABLE 0x80000000
@@ -396,6 +400,9 @@ struct b44 {
u32 tx_pending;
u8 phy_addr;
u8 force_copybreak;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ int old_link;
struct mii_if_info mii_if;
};
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index e2aa09ce6af7..0297a79a38e1 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -96,6 +96,19 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
+ if (bgmac->core->id.rev >= 4) {
+ ctl &= ~BGMAC_DMA_TX_BL_MASK;
+ ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_MR_MASK;
+ ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_PC_MASK;
+ ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
+
+ ctl &= ~BGMAC_DMA_TX_PT_MASK;
+ ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
+ }
ctl |= BGMAC_DMA_TX_ENABLE;
ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
@@ -240,6 +253,16 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+ if (bgmac->core->id.rev >= 4) {
+ ctl &= ~BGMAC_DMA_RX_BL_MASK;
+ ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
+
+ ctl &= ~BGMAC_DMA_RX_PC_MASK;
+ ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
+
+ ctl &= ~BGMAC_DMA_RX_PT_MASK;
+ ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
+ }
ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
ctl |= BGMAC_DMA_RX_ENABLE;
ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
@@ -682,70 +705,6 @@ static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
return 0;
}
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
-static void bgmac_phy_force(struct bgmac *bgmac)
-{
- u16 ctl;
- u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
- BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
-
- if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
- return;
-
- if (bgmac->autoneg)
- return;
-
- ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
- ctl &= mask;
- if (bgmac->full_duplex)
- ctl |= BGMAC_PHY_CTL_DUPLEX;
- if (bgmac->speed == BGMAC_SPEED_100)
- ctl |= BGMAC_PHY_CTL_SPEED_100;
- else if (bgmac->speed == BGMAC_SPEED_1000)
- ctl |= BGMAC_PHY_CTL_SPEED_1000;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
-}
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
-static void bgmac_phy_advertise(struct bgmac *bgmac)
-{
- u16 adv;
-
- if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
- return;
-
- if (!bgmac->autoneg)
- return;
-
- /* Adv selected 10/100 speeds */
- adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
- adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
- BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
- adv |= BGMAC_PHY_ADV_10HALF;
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
- adv |= BGMAC_PHY_ADV_100HALF;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
- adv |= BGMAC_PHY_ADV_10FULL;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
- adv |= BGMAC_PHY_ADV_100FULL;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
-
- /* Adv selected 1000 speeds */
- adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
- adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
- if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
- adv |= BGMAC_PHY_ADV2_1000HALF;
- if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
- adv |= BGMAC_PHY_ADV2_1000FULL;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
-
- /* Restart */
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
- bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
- BGMAC_PHY_CTL_RESTART);
-}
-
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
static void bgmac_phy_init(struct bgmac *bgmac)
{
@@ -789,11 +748,9 @@ static void bgmac_phy_reset(struct bgmac *bgmac)
if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
return;
- bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
- BGMAC_PHY_CTL_RESET);
+ bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
udelay(100);
- if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
- BGMAC_PHY_CTL_RESET)
+ if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
bgmac_err(bgmac, "PHY reset failed\n");
bgmac_phy_init(bgmac);
}
@@ -811,13 +768,13 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
u32 new_val = (cmdcfg & mask) | set;
- bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
+ bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
udelay(2);
if (new_val != cmdcfg || force)
bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
+ bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
udelay(2);
}
@@ -876,31 +833,56 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
-static void bgmac_speed(struct bgmac *bgmac, int speed)
+static void bgmac_mac_speed(struct bgmac *bgmac)
{
u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
u32 set = 0;
- if (speed & BGMAC_SPEED_10)
+ switch (bgmac->mac_speed) {
+ case SPEED_10:
set |= BGMAC_CMDCFG_ES_10;
- if (speed & BGMAC_SPEED_100)
+ break;
+ case SPEED_100:
set |= BGMAC_CMDCFG_ES_100;
- if (speed & BGMAC_SPEED_1000)
+ break;
+ case SPEED_1000:
set |= BGMAC_CMDCFG_ES_1000;
- if (!bgmac->full_duplex)
+ break;
+ case SPEED_2500:
+ set |= BGMAC_CMDCFG_ES_2500;
+ break;
+ default:
+ bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
+ }
+
+ if (bgmac->mac_duplex == DUPLEX_HALF)
set |= BGMAC_CMDCFG_HD;
+
bgmac_cmdcfg_maskset(bgmac, mask, set, true);
}
static void bgmac_miiconfig(struct bgmac *bgmac)
{
- u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
- BGMAC_DS_MM_SHIFT;
- if (imode == 0 || imode == 1) {
- if (bgmac->autoneg)
- bgmac_speed(bgmac, BGMAC_SPEED_100);
- else
- bgmac_speed(bgmac, bgmac->speed);
+ struct bcma_device *core = bgmac->core;
+ struct bcma_chipinfo *ci = &core->bus->chipinfo;
+ u8 imode;
+
+ if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+ ci->id == BCMA_CHIP_ID_BCM53018) {
+ bcma_awrite32(core, BCMA_IOCTL,
+ bcma_aread32(core, BCMA_IOCTL) | 0x40 |
+ BGMAC_BCMA_IOCTL_SW_CLKEN);
+ bgmac->mac_speed = SPEED_2500;
+ bgmac->mac_duplex = DUPLEX_FULL;
+ bgmac_mac_speed(bgmac);
+ } else {
+ imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
+ BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
+ if (imode == 0 || imode == 1) {
+ bgmac->mac_speed = SPEED_100;
+ bgmac->mac_duplex = DUPLEX_FULL;
+ bgmac_mac_speed(bgmac);
+ }
}
}
@@ -910,7 +892,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
struct bcma_device *core = bgmac->core;
struct bcma_bus *bus = core->bus;
struct bcma_chipinfo *ci = &bus->chipinfo;
- u32 flags = 0;
+ u32 flags;
u32 iost;
int i;
@@ -933,26 +915,36 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
}
iost = bcma_aread32(core, BCMA_IOST);
- if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
(ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
iost &= ~BGMAC_BCMA_IOST_ATTACHED;
- if (iost & BGMAC_BCMA_IOST_ATTACHED) {
- flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
- if (!bgmac->has_robosw)
- flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */
+ if (ci->id != BCMA_CHIP_ID_BCM4707) {
+ flags = 0;
+ if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+ flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+ if (!bgmac->has_robosw)
+ flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ }
+ bcma_core_enable(core, flags);
}
- bcma_core_enable(core, flags);
-
- if (core->id.rev > 2) {
- bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
- bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
+ /* Request Misc PLL for corerev > 2 */
+ if (core->id.rev > 2 &&
+ ci->id != BCMA_CHIP_ID_BCM4707 &&
+ ci->id != BCMA_CHIP_ID_BCM53018) {
+ bgmac_set(bgmac, BCMA_CLKCTLST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
+ bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
+ BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
1000);
}
- if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
+ if (ci->id == BCMA_CHIP_ID_BCM5357 ||
+ ci->id == BCMA_CHIP_ID_BCM4749 ||
ci->id == BCMA_CHIP_ID_BCM53572) {
struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
u8 et_swtype = 0;
@@ -967,10 +959,11 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
et_swtype &= 0x0f;
et_swtype <<= 4;
sw_type = et_swtype;
- } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
+ } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
- } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
+ } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
}
@@ -1007,8 +1000,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
BGMAC_CMDCFG_PROM |
BGMAC_CMDCFG_NLC |
BGMAC_CMDCFG_CFE |
- BGMAC_CMDCFG_SR,
+ BGMAC_CMDCFG_SR(core->id.rev),
false);
+ bgmac->mac_speed = SPEED_UNKNOWN;
+ bgmac->mac_duplex = DUPLEX_UNKNOWN;
bgmac_clear_mib(bgmac);
if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
@@ -1048,7 +1043,7 @@ static void bgmac_enable(struct bgmac *bgmac)
cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
- BGMAC_CMDCFG_SR, true);
+ BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
udelay(2);
cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
@@ -1077,12 +1072,16 @@ static void bgmac_enable(struct bgmac *bgmac)
break;
}
- rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
- rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
- bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
- mdp = (bp_clk * 128 / 1000) - 3;
- rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
- bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+ if (ci->id != BCMA_CHIP_ID_BCM4707 &&
+ ci->id != BCMA_CHIP_ID_BCM53018) {
+ rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
+ rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
+ bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
+ 1000000;
+ mdp = (bp_clk * 128 / 1000) - 3;
+ rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
+ bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+ }
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
@@ -1108,13 +1107,6 @@ static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
- if (!bgmac->autoneg) {
- bgmac_speed(bgmac, bgmac->speed);
- bgmac_phy_force(bgmac);
- } else if (bgmac->speed) { /* if there is anything to adv */
- bgmac_phy_advertise(bgmac);
- }
-
if (full_init) {
bgmac_dma_init(bgmac);
if (1) /* FIXME: is there any case we don't want IRQs? */
@@ -1204,6 +1196,8 @@ static int bgmac_open(struct net_device *net_dev)
}
napi_enable(&bgmac->napi);
+ phy_start(bgmac->phy_dev);
+
netif_carrier_on(net_dev);
err_out:
@@ -1216,6 +1210,8 @@ static int bgmac_stop(struct net_device *net_dev)
netif_carrier_off(net_dev);
+ phy_stop(bgmac->phy_dev);
+
napi_disable(&bgmac->napi);
bgmac_chip_intrs_off(bgmac);
free_irq(bgmac->core->irq, net_dev);
@@ -1252,27 +1248,11 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = bgmac->phyaddr;
- /* fallthru */
- case SIOCGMIIREG:
- if (!netif_running(net_dev))
- return -EAGAIN;
- data->val_out = bgmac_phy_read(bgmac, data->phy_id,
- data->reg_num & 0x1f);
- return 0;
- case SIOCSMIIREG:
- if (!netif_running(net_dev))
- return -EAGAIN;
- bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
- data->val_in);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+
+ if (!netif_running(net_dev))
+ return -EINVAL;
+
+ return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
}
static const struct net_device_ops bgmac_netdev_ops = {
@@ -1294,61 +1274,16 @@ static int bgmac_get_settings(struct net_device *net_dev,
{
struct bgmac *bgmac = netdev_priv(net_dev);
- cmd->supported = SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg;
-
- if (bgmac->autoneg) {
- WARN_ON(cmd->advertising);
- if (bgmac->full_duplex) {
- if (bgmac->speed & BGMAC_SPEED_10)
- cmd->advertising |= ADVERTISED_10baseT_Full;
- if (bgmac->speed & BGMAC_SPEED_100)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- if (bgmac->speed & BGMAC_SPEED_1000)
- cmd->advertising |= ADVERTISED_1000baseT_Full;
- } else {
- if (bgmac->speed & BGMAC_SPEED_10)
- cmd->advertising |= ADVERTISED_10baseT_Half;
- if (bgmac->speed & BGMAC_SPEED_100)
- cmd->advertising |= ADVERTISED_100baseT_Half;
- if (bgmac->speed & BGMAC_SPEED_1000)
- cmd->advertising |= ADVERTISED_1000baseT_Half;
- }
- } else {
- switch (bgmac->speed) {
- case BGMAC_SPEED_10:
- ethtool_cmd_speed_set(cmd, SPEED_10);
- break;
- case BGMAC_SPEED_100:
- ethtool_cmd_speed_set(cmd, SPEED_100);
- break;
- case BGMAC_SPEED_1000:
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- break;
- }
- }
-
- cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
-
- cmd->autoneg = bgmac->autoneg;
-
- return 0;
+ return phy_ethtool_gset(bgmac->phy_dev, cmd);
}
-#if 0
static int bgmac_set_settings(struct net_device *net_dev,
struct ethtool_cmd *cmd)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- return -1;
+ return phy_ethtool_sset(bgmac->phy_dev, cmd);
}
-#endif
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
@@ -1359,6 +1294,7 @@ static void bgmac_get_drvinfo(struct net_device *net_dev,
static const struct ethtool_ops bgmac_ethtool_ops = {
.get_settings = bgmac_get_settings,
+ .set_settings = bgmac_set_settings,
.get_drvinfo = bgmac_get_drvinfo,
};
@@ -1377,9 +1313,35 @@ static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
return bgmac_phy_write(bus->priv, mii_id, regnum, value);
}
+static void bgmac_adjust_link(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ struct phy_device *phy_dev = bgmac->phy_dev;
+ bool update = false;
+
+ if (phy_dev->link) {
+ if (phy_dev->speed != bgmac->mac_speed) {
+ bgmac->mac_speed = phy_dev->speed;
+ update = true;
+ }
+
+ if (phy_dev->duplex != bgmac->mac_duplex) {
+ bgmac->mac_duplex = phy_dev->duplex;
+ update = true;
+ }
+ }
+
+ if (update) {
+ bgmac_mac_speed(bgmac);
+ phy_print_status(phy_dev);
+ }
+}
+
static int bgmac_mii_register(struct bgmac *bgmac)
{
struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ char bus_id[MII_BUS_ID_SIZE + 3];
int i, err = 0;
mii_bus = mdiobus_alloc();
@@ -1411,8 +1373,22 @@ static int bgmac_mii_register(struct bgmac *bgmac)
bgmac->mii_bus = mii_bus;
+ /* Connect to the PHY */
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+ bgmac->phyaddr);
+ phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ bgmac_err(bgmac, "PHY connecton failed\n");
+ err = PTR_ERR(phy_dev);
+ goto err_unregister_bus;
+ }
+ bgmac->phy_dev = phy_dev;
+
return err;
+err_unregister_bus:
+ mdiobus_unregister(mii_bus);
err_free_irq:
kfree(mii_bus->irq);
err_free_bus:
@@ -1467,9 +1443,6 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
/* Defaults */
- bgmac->autoneg = true;
- bgmac->full_duplex = true;
- bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
/* On BCM4706 we need common core to access PHY */
@@ -1500,6 +1473,27 @@ static int bgmac_probe(struct bcma_device *core)
bgmac_chip_reset(bgmac);
+ /* For Northstar, we have to take all GMAC core out of reset */
+ if (core->id.id == BCMA_CHIP_ID_BCM4707 ||
+ core->id.id == BCMA_CHIP_ID_BCM53018) {
+ struct bcma_device *ns_core;
+ int ns_gmac;
+
+ /* Northstar has 4 GMAC cores */
+ for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
+ /* As Northstar requirement, we have to reset all GMACs
+ * before accessing one. bgmac_chip_reset() call
+ * bcma_core_enable() for this core. Then the other
+ * three GMACs didn't reset. We do it here.
+ */
+ ns_core = bcma_find_core_unit(core->bus,
+ BCMA_CORE_MAC_GBIT,
+ ns_gmac);
+ if (ns_core && !bcma_core_is_enabled(ns_core))
+ bcma_core_enable(ns_core, 0);
+ }
+ }
+
err = bgmac_dma_alloc(bgmac);
if (err) {
bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
@@ -1524,14 +1518,12 @@ static int bgmac_probe(struct bcma_device *core)
err = bgmac_mii_register(bgmac);
if (err) {
bgmac_err(bgmac, "Cannot register MDIO\n");
- err = -ENOTSUPP;
goto err_dma_free;
}
err = register_netdev(bgmac->net_dev);
if (err) {
bgmac_err(bgmac, "Cannot register net device\n");
- err = -ENOTSUPP;
goto err_mii_unregister;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 66c8afbdc8c7..89fa5bc69c51 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -95,7 +95,11 @@
#define BGMAC_RXQ_CTL_MDP_SHIFT 24
#define BGMAC_GPIO_SELECT 0x194
#define BGMAC_GPIO_OUTPUT_EN 0x198
-/* For 0x1e0 see BCMA_CLKCTLST */
+
+/* For 0x1e0 see BCMA_CLKCTLST. Below are BGMAC specific bits */
+#define BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ 0x00000100
+#define BGMAC_BCMA_CLKCTLST_MISC_PLL_ST 0x01000000
+
#define BGMAC_HW_WAR 0x1e4
#define BGMAC_PWR_CTL 0x1e8
#define BGMAC_DMA_BASE0 0x200 /* Tx and Rx controller */
@@ -185,6 +189,7 @@
#define BGMAC_CMDCFG_ES_10 0x00000000
#define BGMAC_CMDCFG_ES_100 0x00000004
#define BGMAC_CMDCFG_ES_1000 0x00000008
+#define BGMAC_CMDCFG_ES_2500 0x0000000C
#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
#define BGMAC_CMDCFG_PAD_EN 0x00000020
#define BGMAC_CMDCFG_CF 0x00000040
@@ -193,7 +198,9 @@
#define BGMAC_CMDCFG_TAI 0x00000200
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR 0x00000800 /* Set to reset mode */
+#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
+#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
+#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
@@ -216,27 +223,6 @@
#define BGMAC_RX_STATUS 0xb38
#define BGMAC_TX_STATUS 0xb3c
-#define BGMAC_PHY_CTL 0x00
-#define BGMAC_PHY_CTL_SPEED_MSB 0x0040
-#define BGMAC_PHY_CTL_DUPLEX 0x0100 /* duplex mode */
-#define BGMAC_PHY_CTL_RESTART 0x0200 /* restart autonegotiation */
-#define BGMAC_PHY_CTL_ANENAB 0x1000 /* enable autonegotiation */
-#define BGMAC_PHY_CTL_SPEED 0x2000
-#define BGMAC_PHY_CTL_LOOP 0x4000 /* loopback */
-#define BGMAC_PHY_CTL_RESET 0x8000 /* reset */
-/* Helpers */
-#define BGMAC_PHY_CTL_SPEED_10 0
-#define BGMAC_PHY_CTL_SPEED_100 BGMAC_PHY_CTL_SPEED
-#define BGMAC_PHY_CTL_SPEED_1000 BGMAC_PHY_CTL_SPEED_MSB
-#define BGMAC_PHY_ADV 0x04
-#define BGMAC_PHY_ADV_10HALF 0x0020 /* advertise 10MBits/s half duplex */
-#define BGMAC_PHY_ADV_10FULL 0x0040 /* advertise 10MBits/s full duplex */
-#define BGMAC_PHY_ADV_100HALF 0x0080 /* advertise 100MBits/s half duplex */
-#define BGMAC_PHY_ADV_100FULL 0x0100 /* advertise 100MBits/s full duplex */
-#define BGMAC_PHY_ADV2 0x09
-#define BGMAC_PHY_ADV2_1000HALF 0x0100 /* advertise 1000MBits/s half duplex */
-#define BGMAC_PHY_ADV2_1000FULL 0x0200 /* advertise 1000MBits/s full duplex */
-
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
@@ -254,9 +240,34 @@
#define BGMAC_DMA_TX_SUSPEND 0x00000002
#define BGMAC_DMA_TX_LOOPBACK 0x00000004
#define BGMAC_DMA_TX_FLUSH 0x00000010
+#define BGMAC_DMA_TX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
+#define BGMAC_DMA_TX_MR_SHIFT 6
+#define BGMAC_DMA_TX_MR_1 0
+#define BGMAC_DMA_TX_MR_2 1
#define BGMAC_DMA_TX_PARITY_DISABLE 0x00000800
#define BGMAC_DMA_TX_ADDREXT_MASK 0x00030000
#define BGMAC_DMA_TX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_TX_BL_MASK 0x001C0000 /* BurstLen bits */
+#define BGMAC_DMA_TX_BL_SHIFT 18
+#define BGMAC_DMA_TX_BL_16 0
+#define BGMAC_DMA_TX_BL_32 1
+#define BGMAC_DMA_TX_BL_64 2
+#define BGMAC_DMA_TX_BL_128 3
+#define BGMAC_DMA_TX_BL_256 4
+#define BGMAC_DMA_TX_BL_512 5
+#define BGMAC_DMA_TX_BL_1024 6
+#define BGMAC_DMA_TX_PC_MASK 0x00E00000 /* Prefetch control */
+#define BGMAC_DMA_TX_PC_SHIFT 21
+#define BGMAC_DMA_TX_PC_0 0
+#define BGMAC_DMA_TX_PC_4 1
+#define BGMAC_DMA_TX_PC_8 2
+#define BGMAC_DMA_TX_PC_16 3
+#define BGMAC_DMA_TX_PT_MASK 0x03000000 /* Prefetch threshold */
+#define BGMAC_DMA_TX_PT_SHIFT 24
+#define BGMAC_DMA_TX_PT_1 0
+#define BGMAC_DMA_TX_PT_2 1
+#define BGMAC_DMA_TX_PT_4 2
+#define BGMAC_DMA_TX_PT_8 3
#define BGMAC_DMA_TX_INDEX 0x04
#define BGMAC_DMA_TX_RINGLO 0x08
#define BGMAC_DMA_TX_RINGHI 0x0C
@@ -284,8 +295,33 @@
#define BGMAC_DMA_RX_DIRECT_FIFO 0x00000100
#define BGMAC_DMA_RX_OVERFLOW_CONT 0x00000400
#define BGMAC_DMA_RX_PARITY_DISABLE 0x00000800
+#define BGMAC_DMA_RX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
+#define BGMAC_DMA_RX_MR_SHIFT 6
+#define BGMAC_DMA_TX_MR_1 0
+#define BGMAC_DMA_TX_MR_2 1
#define BGMAC_DMA_RX_ADDREXT_MASK 0x00030000
#define BGMAC_DMA_RX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_RX_BL_MASK 0x001C0000 /* BurstLen bits */
+#define BGMAC_DMA_RX_BL_SHIFT 18
+#define BGMAC_DMA_RX_BL_16 0
+#define BGMAC_DMA_RX_BL_32 1
+#define BGMAC_DMA_RX_BL_64 2
+#define BGMAC_DMA_RX_BL_128 3
+#define BGMAC_DMA_RX_BL_256 4
+#define BGMAC_DMA_RX_BL_512 5
+#define BGMAC_DMA_RX_BL_1024 6
+#define BGMAC_DMA_RX_PC_MASK 0x00E00000 /* Prefetch control */
+#define BGMAC_DMA_RX_PC_SHIFT 21
+#define BGMAC_DMA_RX_PC_0 0
+#define BGMAC_DMA_RX_PC_4 1
+#define BGMAC_DMA_RX_PC_8 2
+#define BGMAC_DMA_RX_PC_16 3
+#define BGMAC_DMA_RX_PT_MASK 0x03000000 /* Prefetch threshold */
+#define BGMAC_DMA_RX_PT_SHIFT 24
+#define BGMAC_DMA_RX_PT_1 0
+#define BGMAC_DMA_RX_PT_2 1
+#define BGMAC_DMA_RX_PT_4 2
+#define BGMAC_DMA_RX_PT_8 3
#define BGMAC_DMA_RX_INDEX 0x24
#define BGMAC_DMA_RX_RINGLO 0x28
#define BGMAC_DMA_RX_RINGHI 0x2C
@@ -342,10 +378,6 @@
#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
-#define BGMAC_SPEED_10 0x0001
-#define BGMAC_SPEED_100 0x0002
-#define BGMAC_SPEED_1000 0x0004
-
#define BGMAC_WEIGHT 64
#define ETHER_MAX_LEN 1518
@@ -402,6 +434,7 @@ struct bgmac {
struct net_device *net_dev;
struct napi_struct napi;
struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
@@ -416,10 +449,9 @@ struct bgmac {
u32 int_mask;
u32 int_status;
- /* Speed-related */
- int speed;
- bool autoneg;
- bool full_duplex;
+ /* Current MAC state */
+ int mac_speed;
+ int mac_duplex;
u8 phyaddr;
bool has_robosw;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index d9980ad00b4b..6c9e1c9bdeb8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -23,7 +23,6 @@
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -58,8 +57,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.4"
-#define DRV_MODULE_RELDATE "Aug 05, 2013"
+#define DRV_MODULE_VERSION "2.2.5"
+#define DRV_MODULE_RELDATE "December 20, 2013"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -86,7 +85,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
static int disable_msi = 0;
-module_param(disable_msi, int, 0);
+module_param(disable_msi, int, S_IRUGO);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
typedef enum {
@@ -1197,6 +1196,8 @@ bnx2_copper_linkup(struct bnx2 *bp)
{
u32 bmcr;
+ bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
+
bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
if (bmcr & BMCR_ANENABLE) {
u32 local_adv, remote_adv, common;
@@ -1255,6 +1256,14 @@ bnx2_copper_linkup(struct bnx2 *bp)
}
}
+ if (bp->link_up) {
+ u32 ext_status;
+
+ bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
+ if (ext_status & EXT_STATUS_MDIX)
+ bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
+ }
+
return 0;
}
@@ -2048,29 +2057,27 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
{
- u32 bmcr;
+ u32 bmcr, adv_reg, new_adv = 0;
u32 new_bmcr;
bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
+ adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+
+ new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
+
if (bp->autoneg & AUTONEG_SPEED) {
- u32 adv_reg, adv1000_reg;
- u32 new_adv = 0;
+ u32 adv1000_reg;
u32 new_adv1000 = 0;
- bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
- adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
+ new_adv |= bnx2_phy_get_pause_adv(bp);
bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
adv1000_reg &= PHY_ALL_1000_SPEED;
- new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
- new_adv |= ADVERTISE_CSMA;
- new_adv |= bnx2_phy_get_pause_adv(bp);
-
new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
-
if ((adv1000_reg != new_adv1000) ||
(adv_reg != new_adv) ||
((bmcr & BMCR_ANENABLE) == 0)) {
@@ -2090,6 +2097,10 @@ __acquires(&bp->phy_lock)
return 0;
}
+ /* advertise nothing when forcing speed */
+ if (adv_reg != new_adv)
+ bnx2_write_phy(bp, bp->mii_adv, new_adv);
+
new_bmcr = 0;
if (bp->req_line_speed == SPEED_100) {
new_bmcr |= BMCR_SPEED100;
@@ -2341,9 +2352,15 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
}
/* ethernet@wirespeed */
- bnx2_write_phy(bp, 0x18, 0x7007);
- bnx2_read_phy(bp, 0x18, &val);
- bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
+ bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
+ bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
+ val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
+
+ /* auto-mdix */
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+ val |= AUX_CTL_MISC_CTL_AUTOMDIX;
+
+ bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
return 0;
}
@@ -2490,6 +2507,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
bp->fw_wr_seq++;
msg_data |= bp->fw_wr_seq;
+ bp->fw_last_msg = msg_data;
bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
@@ -3234,7 +3252,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if ((bp->dev->features & NETIF_F_RXHASH) &&
((status & L2_FHDR_STATUS_USE_RXHASH) ==
L2_FHDR_STATUS_USE_RXHASH))
- skb->rxhash = rx_hdr->l2_fhdr_hash;
+ skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
napi_gro_receive(&bnapi->napi, skb);
@@ -3982,8 +4001,23 @@ bnx2_setup_wol(struct bnx2 *bp)
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
}
- if (!(bp->flags & BNX2_FLAG_NO_WOL))
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
+ if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
+ u32 val;
+
+ wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
+ if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
+ bnx2_fw_sync(bp, wol_msg, 1, 0);
+ return;
+ }
+ /* Tell firmware not to power down the PHY yet, otherwise
+ * the chip will take a long time to respond to MMIO reads.
+ */
+ val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
+ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
+ val | BNX2_PORT_FEATURE_ASF_ENABLED);
+ bnx2_fw_sync(bp, wol_msg, 1, 0);
+ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
+ }
}
@@ -4015,9 +4049,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
if (bp->wol)
pci_set_power_state(bp->pdev, PCI_D3hot);
- } else {
- pci_set_power_state(bp->pdev, PCI_D3hot);
+ break;
+
}
+ if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+ u32 val;
+
+ /* Tell firmware not to power down the PHY yet,
+ * otherwise the other port may not respond to
+ * MMIO reads.
+ */
+ val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+ val &= ~BNX2_CONDITION_PM_STATE_MASK;
+ val |= BNX2_CONDITION_PM_STATE_UNPREP;
+ bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
+ }
+ pci_set_power_state(bp->pdev, PCI_D3hot);
/* No more memory access after this point until
* device is brought back to D0.
@@ -6865,6 +6912,12 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (netif_carrier_ok(dev)) {
ethtool_cmd_speed_set(cmd, bp->line_speed);
cmd->duplex = bp->duplex;
+ if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
+ if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+ }
}
else {
ethtool_cmd_speed_set(cmd, -1);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 18cb2d23e56b..e341bc366fa5 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6471,6 +6471,15 @@ struct l2_fhdr {
#define BCM5708S_TX_ACTL3 0x17
+#define MII_BNX2_EXT_STATUS 0x11
+#define EXT_STATUS_MDIX (1 << 13)
+
+#define MII_BNX2_AUX_CTL 0x18
+#define AUX_CTL_MISC_CTL 0x7007
+#define AUX_CTL_MISC_CTL_WIRESPEED (1 << 4)
+#define AUX_CTL_MISC_CTL_AUTOMDIX (1 << 9)
+#define AUX_CTL_MISC_CTL_WR (1 << 15)
+
#define MII_BNX2_DSP_RW_PORT 0x15
#define MII_BNX2_DSP_ADDRESS 0x17
#define MII_BNX2_DSP_EXPAND_REG 0x0f00
@@ -6844,6 +6853,7 @@ struct bnx2 {
#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800
#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000
#define BNX2_PHY_FLAG_NO_PARALLEL 0x00002000
+#define BNX2_PHY_FLAG_MDIX 0x00004000
u32 mii_bmcr;
u32 mii_bmsr;
@@ -6890,6 +6900,7 @@ struct bnx2 {
u16 fw_wr_seq;
u16 fw_drv_pulse_wr_seq;
+ u32 fw_last_msg;
int rx_max_ring;
int rx_ring_size;
@@ -7396,6 +7407,10 @@ struct bnx2_rv2p_fw_file {
#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
+#define BNX2_CONDITION_PM_STATE_MASK 0x00030000
+#define BNX2_CONDITION_PM_STATE_FULL 0x00030000
+#define BNX2_CONDITION_PM_STATE_PREP 0x00020000
+#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000
#define BNX2_BC_STATE_DEBUG_CMD 0x1dc
#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ec6119089b82..391f29ef6d2e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -472,7 +472,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
- bool l4_rxhash;
+ enum pkt_hash_types rxhash_type;
u16 gro_size;
u16 full_page;
};
@@ -1566,6 +1566,7 @@ struct bnx2x {
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define TX_SWITCHING (1 << 18)
#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
@@ -1573,6 +1574,7 @@ struct bnx2x {
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
#define HAS_PHYS_PORT_ID (1 << 25)
+#define AER_ENABLED (1 << 26)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -2080,7 +2082,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id);
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp);
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
@@ -2463,7 +2464,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR)))
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
+
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index bf811565ee24..dbcff509dc3f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -30,6 +30,43 @@
#include "bnx2x_init.h"
#include "bnx2x_sp.h"
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
+static int bnx2x_poll(struct napi_struct *napi, int budget);
+
+static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_rx_queue_cnic(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, NAPI_POLL_WEIGHT);
+ napi_hash_add(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_eth_queue(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, NAPI_POLL_WEIGHT);
+ napi_hash_add(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static int bnx2x_calc_num_queues(struct bnx2x *bp)
+{
+ return bnx2x_num_queues ?
+ min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
+ min_t(int, netif_get_num_default_rss_queues(),
+ BNX2X_MAX_QUEUES(bp));
+}
+
/**
* bnx2x_move_fp - move content of the fastpath structure.
*
@@ -145,7 +182,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
}
}
-int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
/* free skb in the packet ring at pos idx
* return idx of last bd freed
@@ -359,7 +396,7 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
const struct eth_fast_path_rx_cqe *cqe,
- bool *l4_rxhash)
+ enum pkt_hash_types *rxhash_type)
{
/* Get Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
@@ -367,11 +404,13 @@ static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
enum eth_rss_hash_type htype;
htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
- *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
- (htype == TCP_IPV6_HASH_TYPE);
+ *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+
return le32_to_cpu(cqe->rss_hash_result);
}
- *l4_rxhash = false;
+ *rxhash_type = PKT_HASH_TYPE_NONE;
return 0;
}
@@ -425,7 +464,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
- tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
@@ -733,8 +772,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
- skb->rxhash = tpa_info->rxhash;
- skb->l4_rxhash = tpa_info->l4_rxhash;
+ skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -817,7 +855,7 @@ void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
struct bnx2x *bp = fp->bp;
u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
@@ -851,7 +889,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad, queue;
u8 *data;
- bool l4_rxhash;
+ u32 rxhash;
+ enum pkt_hash_types rxhash_type;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -992,8 +1031,8 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
/* Set Toeplitz hash for a none-LRO skb */
- skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
- skb->l4_rxhash = l4_rxhash;
+ rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
+ skb_set_hash(skb, rxhash, rxhash_type);
skb_checksum_none_assert(skb);
@@ -1486,7 +1525,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
}
}
-void bnx2x_free_skbs_cnic(struct bnx2x *bp)
+static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
{
bnx2x_free_tx_skbs_cnic(bp);
bnx2x_free_rx_skbs_cnic(bp);
@@ -1834,7 +1873,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1856,7 +1895,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+ return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -2265,7 +2304,7 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
* virtualized environments a pf from another VM may have already
* initialized the device including loading FW
*/
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
{
/* is another pf loaded on this engine? */
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
@@ -2284,8 +2323,12 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
/* abort nic load if version mismatch */
if (my_fw != loaded_fw) {
- BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
- loaded_fw, my_fw);
+ if (print_err)
+ BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
+ loaded_fw, my_fw);
+ else
+ BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
+ loaded_fw, my_fw);
return -EBUSY;
}
}
@@ -2298,16 +2341,16 @@ static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
int path = BP_PATH(bp);
DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]++;
- load_count[path][1 + port]++;
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]++;
+ bnx2x_load_count[path][1 + port]++;
DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 1)
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 1)
return FW_MSG_CODE_DRV_LOAD_COMMON;
- else if (load_count[path][1 + port] == 1)
+ else if (bnx2x_load_count[path][1 + port] == 1)
return FW_MSG_CODE_DRV_LOAD_PORT;
else
return FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -2600,7 +2643,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error1);
/* what did mcp say? */
- rc = bnx2x_nic_load_analyze_req(bp, load_code);
+ rc = bnx2x_compare_fw_ver(bp, load_code, true);
if (rc) {
bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
@@ -3065,7 +3108,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
/*
* net_device service functions
*/
-int bnx2x_poll(struct napi_struct *napi, int budget)
+static int bnx2x_poll(struct napi_struct *napi, int budget)
{
int work_done = 0;
u8 cos;
@@ -3832,7 +3875,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
xmit_type);
}
- /* Add the macs to the parsing BD this is a vf */
+ /* Add the macs to the parsing BD if this is a vf or if
+ * Tx Switching is enabled.
+ */
if (IS_VF(bp)) {
/* override GRE parameters in BD */
bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
@@ -3844,6 +3889,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
&pbd_e2->data.mac_addr.dst_mid,
&pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
+ } else if (bp->flags & TX_SWITCHING) {
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
+ eth->h_dest);
}
SET_FLAG(pbd_e2_parsing_data,
@@ -4192,7 +4242,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
/* end of fastpath */
}
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
{
int i;
for_each_cnic_queue(bp, i)
@@ -4406,7 +4456,7 @@ alloc_mem_err:
return 0;
}
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
{
if (!NO_FCOE(bp))
/* FCoE */
@@ -4419,7 +4469,7 @@ int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
return 0;
}
-int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
{
int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 41f3ca5ad972..a89a40f88c25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -26,10 +26,8 @@
#include "bnx2x_sriov.h"
/* This is used as a replacement for an MCP if it's not present */
-extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
-
-extern int num_queues;
-extern int int_mode;
+extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+extern int bnx2x_num_queues;
/************************ Macros ********************************/
#define BNX2X_PCI_FREE(x, y, size) \
@@ -417,35 +415,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
* If bp->state is OPEN, should be called with
* netif_addr_lock_bh()
*/
-void bnx2x_set_rx_mode(struct net_device *dev);
void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
-/**
- * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
- *
- * @bp: driver handle
- *
- * If bp->state is OPEN, should be called with
- * netif_addr_lock_bh().
- */
-int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
-
-/**
- * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
- *
- * @bp: driver handle
- * @cl_id: client id
- * @rx_mode_flags: rx mode configuration
- * @rx_accept_flags: rx accept configuration
- * @tx_accept_flags: tx accept configuration (tx switch)
- * @ramrod_flags: ramrod configuration
- */
-int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags);
-
/* Parity errors related */
void bnx2x_set_pf_load(struct bnx2x *bp);
bool bnx2x_clear_pf_load(struct bnx2x *bp);
@@ -525,7 +496,7 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv);
+ void *accel_priv, select_queue_fallback_t fallback);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
@@ -565,9 +536,6 @@ int bnx2x_reload_if_running(struct net_device *dev);
int bnx2x_change_mac_addr(struct net_device *dev, void *p);
-/* NAPI poll Rx part */
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
-
/* NAPI poll Tx part */
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
@@ -578,13 +546,9 @@ int bnx2x_resume(struct pci_dev *pdev);
/* Release IRQ vectors */
void bnx2x_free_irq(struct bnx2x *bp);
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
void bnx2x_free_fp_mem(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem(struct bnx2x *bp);
void bnx2x_init_rx_rings(struct bnx2x *bp);
void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
-void bnx2x_free_skbs_cnic(struct bnx2x *bp);
void bnx2x_free_skbs(struct bnx2x *bp);
void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
void bnx2x_netif_start(struct bnx2x *bp);
@@ -608,15 +572,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
int bnx2x_enable_msi(struct bnx2x *bp);
/**
- * bnx2x_poll - NAPI callback
- *
- * @napi: napi structure
- * @budget:
- *
- */
-int bnx2x_poll(struct napi_struct *napi, int budget);
-
-/**
* bnx2x_low_latency_recv - LL callback
*
* @napi: napi structure
@@ -862,30 +817,6 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
sge->addr_lo = 0;
}
-static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
-{
- int i;
-
- /* Add NAPI objects */
- for_each_rx_queue_cnic(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
- napi_hash_add(&bnx2x_fp(bp, i, napi));
- }
-}
-
-static inline void bnx2x_add_all_napi(struct bnx2x *bp)
-{
- int i;
-
- /* Add NAPI objects */
- for_each_eth_queue(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
- napi_hash_add(&bnx2x_fp(bp, i, napi));
- }
-}
-
static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
{
int i;
@@ -919,14 +850,6 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp)
}
}
-static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
-{
- return num_queues ?
- min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, netif_get_num_default_rss_queues(),
- BNX2X_MAX_QUEUES(bp));
-}
-
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
{
int i, j;
@@ -1013,7 +936,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
- start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ start_params->gre_tunnel_mode = L2GRE_TUNNEL;
start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
return bnx2x_func_state_change(bp, &func_params);
@@ -1173,8 +1096,6 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
return fp->cl_id;
}
-u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
-
static inline void bnx2x_init_txdata(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata, u32 cid,
int txq_index, __le16 *tx_cons_sb,
@@ -1207,47 +1128,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
return bp->igu_base_sb;
}
-static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
-{
- struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
- unsigned long q_type = 0;
-
- bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
- bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
- BNX2X_FCOE_ETH_CL_ID_IDX);
- bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
- bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
- bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
- bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
- bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
- fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
- fp);
-
- DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
-
- /* qZone id equals to FW (per path) client id */
- bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
- /* init shortcut */
- bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
- bnx2x_rx_ustorm_prods_offset(fp);
-
- /* Configure Queue State object */
- __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
- __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
-
- /* No multi-CoS for FCoE L2 client */
- BUG_ON(fp->max_cos != 1);
-
- bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
- &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
- bnx2x_sp_mapping(bp, q_rdata), q_type);
-
- DP(NETIF_MSG_IFUP,
- "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
- fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
- fp->igu_sb_id);
-}
-
static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata)
{
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 32d0f1435fb4..38fc794c1655 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -358,49 +358,47 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cfg_idx = bnx2x_get_link_cfg_idx(bp);
old_multi_phy_config = bp->link_params.multi_phy_config;
- switch (cmd->port) {
- case PORT_TP:
- if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
- break; /* no port change */
-
- if (!(bp->port.supported[0] & SUPPORTED_TP ||
- bp->port.supported[1] & SUPPORTED_TP)) {
- DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
- return -EINVAL;
- }
- bp->link_params.multi_phy_config &=
- ~PORT_HW_CFG_PHY_SELECTION_MASK;
- if (bp->link_params.multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED)
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
- else
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
- break;
- case PORT_FIBRE:
- case PORT_DA:
- if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
- break; /* no port change */
-
- if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
- bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ if (cmd->port != bnx2x_get_port_type(bp)) {
+ switch (cmd->port) {
+ case PORT_TP:
+ if (!(bp->port.supported[0] & SUPPORTED_TP ||
+ bp->port.supported[1] & SUPPORTED_TP)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_FIBRE:
+ case PORT_DA:
+ if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+ bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ default:
DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
- bp->link_params.multi_phy_config &=
- ~PORT_HW_CFG_PHY_SELECTION_MASK;
- if (bp->link_params.multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED)
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
- else
- bp->link_params.multi_phy_config |=
- PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
- break;
- default:
- DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
- return -EINVAL;
}
/* Save new config in case command complete successfully */
new_multi_phy_config = bp->link_params.multi_phy_config;
@@ -1639,6 +1637,12 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
memcpy(&val, data_buf, 4);
+ /* Notice unlike bnx2x_nvram_read_dword() this will not
+ * change val using be32_to_cpu(), which causes data to flip
+ * if the eeprom is read and then written back. This is due
+ * to tools utilizing this functionality that would break
+ * if this would be resolved.
+ */
rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
/* advance to the next dword */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 11fc79585491..9b6b3d7304b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -205,6 +205,11 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
(_bank + (_addr & 0xf)), \
_val)
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars, u8 notify);
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params);
+
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -1399,57 +1404,6 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
udelay(30);
}
-
-static void bnx2x_emac_get_pfc_stat(struct link_params *params,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2])
-{
- /* Read pfc statistic */
- struct bnx2x *bp = params->bp;
- u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u32 val_xon = 0;
- u32 val_xoff = 0;
-
- DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
-
- /* PFC received frames */
- val_xoff = REG_RD(bp, emac_base +
- EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
- val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
- val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
- val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
-
- pfc_frames_received[0] = val_xon + val_xoff;
-
- /* PFC received sent */
- val_xoff = REG_RD(bp, emac_base +
- EMAC_REG_RX_PFC_STATS_XOFF_SENT);
- val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
- val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
- val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
-
- pfc_frames_sent[0] = val_xon + val_xoff;
-}
-
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2])
-{
- /* Read pfc statistic */
- struct bnx2x *bp = params->bp;
-
- DP(NETIF_MSG_LINK, "pfc statistic\n");
-
- if (!vars->link_up)
- return;
-
- if (vars->mac_type == MAC_TYPE_EMAC) {
- DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
- bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
- pfc_frames_received);
- }
-}
/******************************************************************/
/* MAC/PBF section */
/******************************************************************/
@@ -8648,8 +8602,8 @@ static void bnx2x_set_limiting_mode(struct link_params *params,
}
}
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
- struct link_params *params)
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 edc_mode;
@@ -13413,9 +13367,9 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
* a fault, for example, due to break in the TX side of fiber.
*
******************************************************************************/
-int bnx2x_check_half_open_conn(struct link_params *params,
- struct link_vars *vars,
- u8 notify)
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars,
+ u8 notify)
{
struct bnx2x *bp = params->bp;
u32 lss_status = 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 4df45234fdc0..389f5f8cb0a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -533,19 +533,11 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
struct bnx2x_ets_params *ets_params);
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2]);
+
void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
u32 chip_id, u32 shmem_base, u32 shmem2_base,
u8 port);
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
- struct link_params *params);
-
void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
-int bnx2x_check_half_open_conn(struct link_params *params,
- struct link_vars *vars, u8 notify);
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0067b975873f..7d4382286457 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -93,30 +94,30 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
-int num_queues;
-module_param(num_queues, int, 0);
+int bnx2x_num_queues;
+module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
MODULE_PARM_DESC(num_queues,
" Set number of queues (default is as a number of CPUs)");
static int disable_tpa;
-module_param(disable_tpa, int, 0);
+module_param(disable_tpa, int, S_IRUGO);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
-int int_mode;
-module_param(int_mode, int, 0);
+static int int_mode;
+module_param(int_mode, int, S_IRUGO);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
static int dropless_fc;
-module_param(dropless_fc, int, 0);
+module_param(dropless_fc, int, S_IRUGO);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
static int mrrs = -1;
-module_param(mrrs, int, 0);
+module_param(mrrs, int, S_IRUGO);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
static int debug;
-module_param(debug, int, 0);
+module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, " Default debug msglevel");
struct workqueue_struct *bnx2x_wq;
@@ -278,6 +279,12 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
#define BNX2X_PREV_WAIT_NEEDED 1
static DEFINE_SEMAPHORE(bnx2x_prev_sem);
static LIST_HEAD(bnx2x_prev_list);
+
+/* Forward declaration */
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
/****************************************************************************
* General service functions
****************************************************************************/
@@ -3000,6 +3007,9 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
if (zero_stats)
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ if (bp->flags & TX_SWITCHING)
+ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
+
__set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
__set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
@@ -3297,6 +3307,10 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
ether_stat->txq_size = bp->tx_ring_size;
ether_stat->rxq_size = bp->rx_ring_size;
+
+#ifdef CONFIG_BNX2X_SRIOV
+ ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
+#endif
}
static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
@@ -5852,11 +5866,11 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
}
/* called with netif_addr_lock_bh() */
-int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags)
+static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
{
struct bnx2x_rx_mode_ramrod_params ramrod_param;
int rc;
@@ -5964,7 +5978,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
}
/* called with netif_addr_lock_bh() */
-int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
{
unsigned long rx_mode_flags = 0, ramrod_flags = 0;
unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
@@ -6160,6 +6174,47 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
+static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ unsigned long q_type = 0;
+
+ bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
+ bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+ BNX2X_FCOE_ETH_CL_ID_IDX);
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
+ bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+ bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+ bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+ bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+ fp);
+
+ DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
+
+ /* qZone id equals to FW (per path) client id */
+ bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
+ /* init shortcut */
+ bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+ bnx2x_rx_ustorm_prods_offset(fp);
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* No multi-CoS for FCoE L2 client */
+ BUG_ON(fp->max_cos != 1);
+
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+ &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+}
+
void bnx2x_nic_init_cnic(struct bnx2x *bp)
{
if (!NO_FCOE(bp))
@@ -8732,16 +8787,16 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
int path = BP_PATH(bp);
DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]--;
- load_count[path][1 + port]--;
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]--;
+ bnx2x_load_count[path][1 + port]--;
DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 0)
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
- else if (load_count[path][1 + port] == 0)
+ else if (bnx2x_load_count[path][1 + port] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
else
reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -9767,7 +9822,7 @@ period_task_exit:
* Init service functions
*/
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
@@ -9854,6 +9909,64 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
+#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
+#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
+#define BCM_5710_UNDI_FW_MF_VERS (0x05)
+#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4))
+static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
+{
+ u8 major, minor, version;
+ u32 fw;
+
+ /* Must check that FW is loaded */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
+ BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
+ return false;
+ }
+
+ /* Read Currently loaded FW version */
+ fw = REG_RD(bp, XSEM_REG_PRAM);
+ major = fw & 0xff;
+ minor = (fw >> 0x8) & 0xff;
+ version = (fw >> 0x10) & 0xff;
+ BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
+ fw, major, minor, version);
+
+ if (major > BCM_5710_UNDI_FW_MF_MAJOR)
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor > BCM_5710_UNDI_FW_MF_MINOR))
+ return true;
+
+ if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+ (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
+ (version >= BCM_5710_UNDI_FW_MF_VERS))
+ return true;
+
+ return false;
+}
+
+static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
+{
+ int i;
+
+ /* Due to legacy (FW) code, the first function on each engine has a
+ * different offset macro from the rest of the functions.
+ * Setting this for all 8 functions is harmless regardless of whether
+ * this is actually a multi-function device.
+ */
+ for (i = 0; i < 2; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
+
+ for (i = 2; i < 8; i++)
+ REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
+
+ BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
+}
+
static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
{
u16 rcq, bd;
@@ -10054,7 +10167,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
+ rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
if (!rc) {
/* fw version is good */
@@ -10142,10 +10255,17 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
else
timer_count--;
- /* If UNDI resides in memory, manually increment it */
- if (prev_undi)
+ /* New UNDI FW supports MF and contains better
+ * cleaning methods - might be redundant but harmless.
+ */
+ if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
+ bnx2x_prev_unload_undi_mf(bp);
+ } else if (prev_undi) {
+ /* If UNDI resides in memory,
+ * manually increment it
+ */
bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
-
+ }
udelay(10);
}
@@ -10265,8 +10385,8 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
} while (--time_counter);
if (!time_counter || rc) {
- BNX2X_ERR("Failed unloading previous driver, aborting\n");
- rc = -EBUSY;
+ BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
+ rc = -EPROBE_DEFER;
}
/* Mark function if its port was used to boot from SAN */
@@ -11636,7 +11756,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
DRV_MSG_SEQ_NUMBER_MASK;
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- bnx2x_prev_unload(bp);
+ rc = bnx2x_prev_unload(bp);
+ if (rc) {
+ bnx2x_free_mem_bp(bp);
+ return rc;
+ }
}
if (CHIP_REV_IS_FPGA(bp))
@@ -11931,7 +12055,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
}
/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
-void bnx2x_set_rx_mode(struct net_device *dev)
+static void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -12156,6 +12280,14 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
return 0;
}
+static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
+{
+ if (bp->flags & AER_ENABLED) {
+ pci_disable_pcie_error_reporting(bp->pdev);
+ bp->flags &= ~AER_ENABLED;
+ }
+}
+
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
struct net_device *dev, unsigned long board_type)
{
@@ -12262,6 +12394,14 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
+
+ /* AER (Advanced Error reporting) configuration */
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (!rc)
+ bp->flags |= AER_ENABLED;
+ else
+ BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
+
/*
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
@@ -12693,8 +12833,6 @@ static int set_is_vf(int chip_id)
}
}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
-
static int bnx2x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -12869,6 +13007,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
init_one_exit:
+ bnx2x_disable_pcie_error_reporting(bp);
+
if (bp->regview)
iounmap(bp->regview);
@@ -12942,6 +13082,7 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_set_power_state(pdev, PCI_D3hot);
}
+ bnx2x_disable_pcie_error_reporting(bp);
if (remove_netdev) {
if (bp->regview)
iounmap(bp->regview);
@@ -12961,9 +13102,9 @@ static void __bnx2x_remove(struct pci_dev *pdev,
if (atomic_read(&pdev->enable_cnt) == 1)
pci_release_regions(pdev);
- }
- pci_disable_device(pdev);
+ pci_disable_device(pdev);
+ }
}
static void bnx2x_remove_one(struct pci_dev *pdev)
@@ -13120,6 +13261,14 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
+ /* If AER, perform cleanup of the PCIe registers */
+ if (bp->flags & AER_ENABLED) {
+ if (pci_cleanup_aer_uncorrect_error_status(pdev))
+ BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
+ else
+ DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
+ }
+
return PCI_ERS_RESULT_RECOVERED;
}
@@ -13758,7 +13907,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
return 0;
}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
@@ -13808,7 +13957,7 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
return cp;
}
-u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
{
struct bnx2x *bp = fp->bp;
u32 offset = BAR_USTRORM_INTMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 14ffb6e56e59..2beb5430b876 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -5932,6 +5932,7 @@
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_RST_XSEM (0x1<<22)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 18438a504d57..0fb6ff2ac8e3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -355,23 +355,6 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->get(vp, 1);
}
-
-static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
-{
- struct bnx2x_credit_pool_obj *mp = o->macs_pool;
- struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
-
- if (!mp->get(mp, 1))
- return false;
-
- if (!vp->get(vp, 1)) {
- mp->put(mp, 1);
- return false;
- }
-
- return true;
-}
-
static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
{
struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -400,22 +383,6 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->put(vp, 1);
}
-static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
-{
- struct bnx2x_credit_pool_obj *mp = o->macs_pool;
- struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
-
- if (!mp->put(mp, 1))
- return false;
-
- if (!vp->put(vp, 1)) {
- mp->get(mp, 1);
- return false;
- }
-
- return true;
-}
-
/**
* __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
*
@@ -507,22 +474,6 @@ static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
}
}
-/**
- * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
- *
- * @bp: device handle
- * @o: vlan_mac object
- *
- * @details Notice if a pending execution exists, it would perform it -
- * possibly releasing and reclaiming the execution queue lock.
- */
-void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o)
-{
- spin_lock_bh(&o->exe_queue.lock);
- __bnx2x_vlan_mac_h_write_unlock(bp, o);
- spin_unlock_bh(&o->exe_queue.lock);
-}
/**
* __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
@@ -663,7 +614,7 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST;
@@ -685,26 +636,6 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
return 0;
}
-static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- union bnx2x_classification_ramrod_data *data)
-{
- struct bnx2x_vlan_mac_registry_elem *pos;
-
- DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
- data->vlan_mac.mac, data->vlan_mac.vlan);
-
- list_for_each_entry(pos, &o->head, link)
- if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)) &&
- (data->vlan_mac.is_inner_mac ==
- pos->u.vlan_mac.is_inner_mac))
- return -EEXIST;
-
- return 0;
-}
-
/* check_del() callbacks */
static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_mac_del(struct bnx2x *bp,
@@ -716,7 +647,7 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link)
- if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
(data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos;
@@ -739,27 +670,6 @@ static struct bnx2x_vlan_mac_registry_elem *
return NULL;
}
-static struct bnx2x_vlan_mac_registry_elem *
- bnx2x_check_vlan_mac_del(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- union bnx2x_classification_ramrod_data *data)
-{
- struct bnx2x_vlan_mac_registry_elem *pos;
-
- DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
- data->vlan_mac.mac, data->vlan_mac.vlan);
-
- list_for_each_entry(pos, &o->head, link)
- if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
- (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)) &&
- (data->vlan_mac.is_inner_mac ==
- pos->u.vlan_mac.is_inner_mac))
- return pos;
-
- return NULL;
-}
-
/* check_move() callback */
static bool bnx2x_check_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *src_o,
@@ -811,8 +721,8 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
return rx_tx_flag;
}
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
- bool add, unsigned char *dev_addr, int index)
+static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+ bool add, unsigned char *dev_addr, int index)
{
u32 wb_data[2];
u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
@@ -1126,97 +1036,6 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
rule_cnt);
}
-static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- struct bnx2x_exeq_elem *elem,
- int rule_idx, int cam_offset)
-{
- struct bnx2x_raw_obj *raw = &o->raw;
- struct eth_classify_rules_ramrod_data *data =
- (struct eth_classify_rules_ramrod_data *)(raw->rdata);
- int rule_cnt = rule_idx + 1;
- union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
- bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
- u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
- u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
-
- /* Reset the ramrod data buffer for the first rule */
- if (rule_idx == 0)
- memset(data, 0, sizeof(*data));
-
- /* Set a rule header */
- bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
- &rule_entry->pair.header);
-
- /* Set VLAN and MAC themselves */
- rule_entry->pair.vlan = cpu_to_le16(vlan);
- bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
- &rule_entry->pair.mac_mid,
- &rule_entry->pair.mac_lsb, mac);
- rule_entry->pair.inner_mac =
- cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
- /* MOVE: Add a rule that will add this MAC to the target Queue */
- if (cmd == BNX2X_VLAN_MAC_MOVE) {
- rule_entry++;
- rule_cnt++;
-
- /* Setup ramrod data */
- bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
- elem->cmd_data.vlan_mac.target_obj,
- true, CLASSIFY_RULE_OPCODE_PAIR,
- &rule_entry->pair.header);
-
- /* Set a VLAN itself */
- rule_entry->pair.vlan = cpu_to_le16(vlan);
- bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
- &rule_entry->pair.mac_mid,
- &rule_entry->pair.mac_lsb, mac);
- rule_entry->pair.inner_mac =
- cpu_to_le16(elem->cmd_data.vlan_mac.u.
- vlan_mac.is_inner_mac);
- }
-
- /* Set the ramrod data header */
- /* TODO: take this to the higher level in order to prevent multiple
- writing */
- bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
- rule_cnt);
-}
-
-/**
- * bnx2x_set_one_vlan_mac_e1h -
- *
- * @bp: device handle
- * @o: bnx2x_vlan_mac_obj
- * @elem: bnx2x_exeq_elem
- * @rule_idx: rule_idx
- * @cam_offset: cam_offset
- */
-static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o,
- struct bnx2x_exeq_elem *elem,
- int rule_idx, int cam_offset)
-{
- struct bnx2x_raw_obj *raw = &o->raw;
- struct mac_configuration_cmd *config =
- (struct mac_configuration_cmd *)(raw->rdata);
- /* 57710 and 57711 do not support MOVE command,
- * so it's either ADD or DEL
- */
- bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
- true : false;
-
- /* Reset the ramrod data buffer */
- memset(config, 0, sizeof(*config));
-
- bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
- cam_offset, add,
- elem->cmd_data.vlan_mac.u.vlan_mac.mac,
- elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
- ETH_VLAN_FILTER_CLASSIFY, config);
-}
-
/**
* bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
*
@@ -1316,24 +1135,6 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
return NULL;
}
-static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
- struct bnx2x_exe_queue_obj *o,
- struct bnx2x_exeq_elem *elem)
-{
- struct bnx2x_exeq_elem *pos;
- struct bnx2x_vlan_mac_ramrod_data *data =
- &elem->cmd_data.vlan_mac.u.vlan_mac;
-
- /* Check pending for execution commands */
- list_for_each_entry(pos, &o->exe_queue, link)
- if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
- sizeof(*data)) &&
- (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
- return pos;
-
- return NULL;
-}
-
/**
* bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
*
@@ -2241,69 +2042,6 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
}
}
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac_obj,
- u8 cl_id, u32 cid, u8 func_id, void *rdata,
- dma_addr_t rdata_mapping, int state,
- unsigned long *pstate, bnx2x_obj_type type,
- struct bnx2x_credit_pool_obj *macs_pool,
- struct bnx2x_credit_pool_obj *vlans_pool)
-{
- union bnx2x_qable_obj *qable_obj =
- (union bnx2x_qable_obj *)vlan_mac_obj;
-
- bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
- rdata_mapping, state, pstate, type,
- macs_pool, vlans_pool);
-
- /* CAM pool handling */
- vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
- vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
- /* CAM offset is relevant for 57710 and 57711 chips only which have a
- * single CAM for both MACs and VLAN-MAC pairs. So the offset
- * will be taken from MACs' pool object only.
- */
- vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
- vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
-
- if (CHIP_IS_E1(bp)) {
- BNX2X_ERR("Do not support chips others than E2\n");
- BUG();
- } else if (CHIP_IS_E1H(bp)) {
- vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
- vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
- vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
- vlan_mac_obj->check_move = bnx2x_check_move_always_err;
- vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
-
- /* Exe Queue */
- bnx2x_exe_queue_init(bp,
- &vlan_mac_obj->exe_queue, 1, qable_obj,
- bnx2x_validate_vlan_mac,
- bnx2x_remove_vlan_mac,
- bnx2x_optimize_vlan_mac,
- bnx2x_execute_vlan_mac,
- bnx2x_exeq_get_vlan_mac);
- } else {
- vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
- vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
- vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
- vlan_mac_obj->check_move = bnx2x_check_move;
- vlan_mac_obj->ramrod_cmd =
- RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
-
- /* Exe Queue */
- bnx2x_exe_queue_init(bp,
- &vlan_mac_obj->exe_queue,
- CLASSIFY_RULES_COUNT,
- qable_obj, bnx2x_validate_vlan_mac,
- bnx2x_remove_vlan_mac,
- bnx2x_optimize_vlan_mac,
- bnx2x_execute_vlan_mac,
- bnx2x_exeq_get_vlan_mac);
- }
-}
-
/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
static inline void __storm_memset_mac_filters(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters,
@@ -4990,6 +4728,13 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+
+ /* tx switching */
+ data->tx_switching_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, &params->update_flags);
+ data->tx_switching_change_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &params->update_flags);
}
static inline int bnx2x_q_send_update(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 6a53c15c85a3..00d7f214a40a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -448,9 +448,6 @@ enum {
BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
};
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
- bool add, unsigned char *dev_addr, int index);
-
/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
/* RX_MODE ramrod special flags: set in rx_mode_flags field in
@@ -770,7 +767,9 @@ enum {
BNX2X_Q_UPDATE_DEF_VLAN_EN,
BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
- BNX2X_Q_UPDATE_SILENT_VLAN_REM
+ BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ BNX2X_Q_UPDATE_TX_SWITCHING
};
/* Allowed Queue states */
@@ -1307,22 +1306,12 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
unsigned long *pstate, bnx2x_obj_type type,
struct bnx2x_credit_pool_obj *vlans_pool);
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac_obj,
- u8 cl_id, u32 cid, u8 func_id, void *rdata,
- dma_addr_t rdata_mapping, int state,
- unsigned long *pstate, bnx2x_obj_type type,
- struct bnx2x_credit_pool_obj *macs_pool,
- struct bnx2x_credit_pool_obj *vlans_pool);
-
int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
-void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *o);
int bnx2x_config_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *p);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e7845e5be1c7..e42f48df6e94 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -166,6 +166,7 @@ enum bnx2x_vfop_qteardown_state {
BNX2X_VFOP_QTEARDOWN_RXMODE,
BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
BNX2X_VFOP_QTEARDOWN_CLR_MAC,
+ BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
BNX2X_VFOP_QTEARDOWN_QDTOR,
BNX2X_VFOP_QTEARDOWN_DONE
};
@@ -617,7 +618,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
&vlan_mac->user_req.vlan_mac_flags,
&vlan_mac->ramrod_flags);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
/* next state */
@@ -628,7 +629,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (vfop->rc == -EEXIST)
vfop->rc = 0;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
vfop->rc = !!obj->raw.check_pending(&obj->raw);
@@ -645,7 +646,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_CONFIG_LIST:
/* next state */
@@ -657,7 +658,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
}
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
default:
bnx2x_vfop_default(state);
@@ -798,10 +799,10 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
return -ENOMEM;
}
-int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, u16 vid, bool add)
+static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, u16 vid, bool add)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
@@ -1023,25 +1024,35 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QFLR_CLR_VLAN:
/* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
- true);
- if (vfop->rc)
- goto op_err;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
+ /* the vlan_mac vfop will re-schedule us */
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
+ qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ } else {
+ /* need to reschedule ourselves */
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ }
case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
- true);
- DP(BNX2X_MSG_IOV,
- "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
- vf->abs_vfid, vfop->rc);
- if (vfop->rc)
- goto op_err;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
+ /* the vlan_mac vfop will re-schedule us */
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
+ qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ } else {
+ /* need to reschedule ourselves */
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ }
case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate;
@@ -1112,7 +1123,10 @@ static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
switch (state) {
case BNX2X_VFOP_MCAST_DEL:
/* clear existing mcasts */
- vfop->state = BNX2X_VFOP_MCAST_ADD;
+ vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
+ : BNX2X_VFOP_MCAST_CHK_DONE;
+ mcast->mcast_list_len = vf->mcast_list_len;
+ vf->mcast_list_len = args->mc_num;
vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
@@ -1120,17 +1134,17 @@ static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (raw->check_pending(raw))
goto op_pending;
- if (args->mc_num) {
- /* update mcast list on the ramrod params */
- INIT_LIST_HEAD(&mcast->mcast_list);
- for (i = 0; i < args->mc_num; i++)
- list_add_tail(&(args->mc[i].link),
- &mcast->mcast_list);
- /* add new mcasts */
- vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
- vfop->rc = bnx2x_config_mcast(bp, mcast,
- BNX2X_MCAST_CMD_ADD);
- }
+ /* update mcast list on the ramrod params */
+ INIT_LIST_HEAD(&mcast->mcast_list);
+ for (i = 0; i < args->mc_num; i++)
+ list_add_tail(&(args->mc[i].link),
+ &mcast->mcast_list);
+ mcast->mcast_list_len = args->mc_num;
+
+ /* add new mcasts */
+ vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
+ vfop->rc = bnx2x_config_mcast(bp, mcast,
+ BNX2X_MCAST_CMD_ADD);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
case BNX2X_VFOP_MCAST_CHK_DONE:
@@ -1312,12 +1326,19 @@ static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
/* mac-clear-all: consume credit */
- vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
if (vfop->rc)
goto op_err;
return;
+ case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
+ vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+ vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
case BNX2X_VFOP_QTEARDOWN_QDTOR:
/* run the queue destruction flow */
DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
@@ -1425,12 +1446,12 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (vf->cfg_flags & VF_CFG_INT_SIMD)
val |= IGU_VF_CONF_SINGLE_ISR_EN;
val &= ~IGU_VF_CONF_PARENT_MASK;
- val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
+ val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
DP(BNX2X_MSG_IOV,
- "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
- vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
+ "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
+ vf->abs_vfid, val);
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
@@ -2197,6 +2218,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
* It needs to be initialized here so that it can be safely
* handled by a subsequent FLR flow.
*/
+ vf->mcast_list_len = 0;
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
0xFF, 0xFF, 0xFF,
bnx2x_vf_sp(bp, vf, mcast_rdata),
@@ -2372,8 +2394,9 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
goto get_vf;
case EVENT_RING_OPCODE_MALICIOUS_VF:
abs_vfid = elem->message.data.malicious_vf_event.vf_id;
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
- abs_vfid, elem->message.data.malicious_vf_event.err_id);
+ BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+ abs_vfid,
+ elem->message.data.malicious_vf_event.err_id);
goto get_vf;
default:
return 1;
@@ -2425,15 +2448,9 @@ get_vf:
bnx2x_vf_handle_filters_eqe(bp, vf);
break;
case EVENT_RING_OPCODE_VF_FLR:
- DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
- vf->abs_vfid);
- /* Do nothing for now */
- break;
case EVENT_RING_OPCODE_MALICIOUS_VF:
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
- abs_vfid, elem->message.data.malicious_vf_event.err_id);
/* Do nothing for now */
- break;
+ return 0;
}
/* SRIOV: reschedule any 'in_progress' operations */
bnx2x_iov_sp_event(bp, cid, false);
@@ -2857,13 +2874,9 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
goto op_err;
return;
}
-
- /* remove multicasts */
vfop->state = BNX2X_VFOP_CLOSE_HW;
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
- if (vfop->rc)
- goto op_err;
- return;
+ vfop->rc = 0;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_CLOSE_HW:
@@ -2897,6 +2910,9 @@ op_done:
DP(BNX2X_MSG_IOV, "set state to acquired\n");
bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ /* Not supported at the moment; Exists for macros only */
+ return;
}
int bnx2x_vfop_close_cmd(struct bnx2x *bp,
@@ -3119,6 +3135,60 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->abs_vfid, vf->op_current);
}
+static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
+{
+ struct bnx2x_queue_state_params q_params;
+ u32 prev_flags;
+ int i, rc;
+
+ /* Verify changes are needed and record current Tx switching state */
+ prev_flags = bp->flags;
+ if (enable)
+ bp->flags |= TX_SWITCHING;
+ else
+ bp->flags &= ~TX_SWITCHING;
+ if (prev_flags == bp->flags)
+ return 0;
+
+ /* Verify state enables the sending of queue ramrods */
+ if ((bp->state != BNX2X_STATE_OPEN) ||
+ (bnx2x_get_q_logical_state(bp,
+ &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE))
+ return 0;
+
+ /* send q. update ramrod to configure Tx switching */
+ memset(&q_params, 0, sizeof(q_params));
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &q_params.params.update.update_flags);
+ if (enable)
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+ else
+ __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+
+ /* send the ramrod on all the queues of the PF */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Set the appropriate Queue object */
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
+ }
+
+ DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
+ return 0;
+}
+
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
@@ -3146,12 +3216,14 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
bp->requested_nr_virtfn = num_vfs_param;
if (num_vfs_param == 0) {
+ bnx2x_set_pf_tx_switching(bp, false);
pci_disable_sriov(dev);
return 0;
} else {
return bnx2x_enable_sriov(bp);
}
}
+
#define IGU_ENTRY_SIZE 4
int bnx2x_enable_sriov(struct bnx2x *bp)
@@ -3229,6 +3301,11 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
*/
DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
bnx2x_disable_sriov(bp);
+
+ rc = bnx2x_set_pf_tx_switching(bp, true);
+ if (rc)
+ return rc;
+
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3639,7 +3716,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
/* the mac address in bulletin board is valid and is new */
if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
- memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
+ !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
/* update new mac to net device */
memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8c213fa52174..d9fcca1b5a9d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -269,6 +269,7 @@ struct bnx2x_virtf {
int leading_rss;
/* MCAST object */
+ int mcast_list_len;
struct bnx2x_mcast_obj mcast_obj;
/* RSS configuration object */
@@ -664,11 +665,6 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
struct bnx2x_vfop_filters *macs,
int qid, bool drv_only);
-int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, u16 vid, bool add);
-
int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
@@ -726,13 +722,6 @@ void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
/* Handles an FLR (or VF_DISABLE) notification form the MCP */
void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
-void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
- u16 length);
-void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
- u16 type, u16 length);
-void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv);
-void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
-
bool bnx2x_tlv_supported(u16 tlvtype);
u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
@@ -749,7 +738,6 @@ int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool is_leading);
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params);
@@ -813,7 +801,6 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
-static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0756d7dabdd5..3fa6c2a2a5a9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -21,9 +21,11 @@
#include "bnx2x_cmn.h"
#include <linux/crc32.h>
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+
/* place a given tlv on the tlv buffer at a given offset */
-void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
- u16 length)
+static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
+ u16 offset, u16 type, u16 length)
{
struct channel_tlv *tl =
(struct channel_tlv *)(tlvs_list + offset);
@@ -33,8 +35,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
}
/* Clear the mailbox and init the header of the first tlv */
-void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
- u16 type, u16 length)
+static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length)
{
mutex_lock(&bp->vf2pf_mutex);
@@ -52,7 +54,8 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
}
/* releases the mailbox */
-void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
+static void bnx2x_vfpf_finalize(struct bnx2x *bp,
+ struct vfpf_first_tlv *first_tlv)
{
DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
first_tlv->tl.type);
@@ -85,7 +88,7 @@ static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
}
/* list the types and lengths of the tlvs on the buffer */
-void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
+static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
{
int i = 1;
struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
@@ -633,7 +636,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return rc;
}
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
{
struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
@@ -800,14 +803,18 @@ int bnx2x_vfpf_config_rss(struct bnx2x *bp,
}
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
- BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
- resp->hdr.status);
- rc = -EINVAL;
+ /* Since older drivers don't support this feature (and VF has
+ * no way of knowing other than failing this), don't propagate
+ * an error in this case.
+ */
+ DP(BNX2X_MSG_IOV,
+ "Failed to send rss message to PF over VF-PF channel [%d]\n",
+ resp->hdr.status);
}
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
int bnx2x_vfpf_set_mcast(struct net_device *dev)
@@ -1416,6 +1423,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
setup_q->rxq.cache_line_log;
rxq_params->sb_cq_index = setup_q->rxq.sb_index;
+ /* rx setup - multicast engine */
+ if (bnx2x_vfq_is_leading(q)) {
+ u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ rxq_params->mcast_engine_id = mcast_id;
+ __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+ }
+
bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
q->index, q->sb_idx);
}
@@ -1706,7 +1721,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
/* ...and only the mac set by the ndo */
if (filters->n_mac_vlan_filters == 1 &&
- memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
+ !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f58a8b80302d..09f3fefcbf9c 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
while (retry < 3) {
rc = 0;
rcu_read_lock();
- ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
+ ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
if (ulp_ops)
rc = ulp_ops->iscsi_nl_send_msg(
cp->ulp_handle[CNIC_ULP_ISCSI],
@@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
for (i = 0; i < dma->num_pages; i++) {
if (dma->pg_arr[i]) {
- dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
+ dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
dma->pg_arr[i], dma->pg_map_arr[i]);
dma->pg_arr[i] = NULL;
}
@@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
for (i = 0; i < pages; i++) {
dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
- BNX2_PAGE_SIZE,
+ CNIC_PAGE_SIZE,
&dma->pg_map_arr[i],
GFP_ATOMIC);
if (dma->pg_arr[i] == NULL)
@@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
if (!use_pg_tbl)
return 0;
- dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
- ~(BNX2_PAGE_SIZE - 1);
+ dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
+ ~(CNIC_PAGE_SIZE - 1);
dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
&dma->pgtbl_map, GFP_ATOMIC);
if (dma->pgtbl == NULL)
@@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev)
if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
int i, k, arr_size;
- cp->ctx_blk_size = BNX2_PAGE_SIZE;
- cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
+ cp->ctx_blk_size = CNIC_PAGE_SIZE;
+ cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
arr_size = BNX2_MAX_CID / cp->cids_per_blk *
sizeof(struct cnic_ctx);
cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
@@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev)
for (i = 0; i < cp->ctx_blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev,
- BNX2_PAGE_SIZE,
+ CNIC_PAGE_SIZE,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
@@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
if (udev->l2_ring)
return 0;
- udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
+ udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
@@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
return -ENOMEM;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
- udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+ udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
&udev->l2_buf_map,
GFP_KERNEL | __GFP_COMP);
@@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
- PAGE_MASK;
+ CNIC_PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
else
@@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
- PAGE_MASK;
+ CNIC_PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
uinfo->name = "bnx2x_cnic";
@@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
- pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
- PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
+ CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
if (ret)
return -ENOMEM;
- n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
+ n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
long off = CNIC_KWQ16_DATA_SIZE * (i % n);
@@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
goto error;
}
- pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
if (ret)
goto error;
@@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
BNX2X_ISCSI_R2TQE_SIZE;
cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
- pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
- hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
+ pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
+ hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
cp->num_cqs = req1->num_cqs;
if (!dev->max_iscsi_conn)
@@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
req1->rq_buffer_size);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
/* init Xstorm RAM */
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
/* init Cstorm RAM */
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
}
ctx->cid = cid;
- pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
if (ret)
goto error;
- pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
if (ret)
goto error;
- pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
if (ret)
goto error;
@@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
/* TSTORM requires the base address of RQ DB & not PTE */
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
- req2->rq_page_table_addr_lo & PAGE_MASK;
+ req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
req2->rq_page_table_addr_hi;
ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
@@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
/* CSTORM and USTORM initialization is different, CSTORM requires
* CQ DB base & not PTE addr */
ictx->cstorm_st_context.cq_db_base.lo =
- req1->cq_page_table_addr_lo & PAGE_MASK;
+ req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
@@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp)
u16 hw_cons, sw_cons;
struct cnic_uio_dev *udev = cp->udev;
union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
- (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
+ (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
u32 cmd;
int comp = 0;
@@ -3244,7 +3244,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
int rc;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
+ lockdep_is_held(&cnic_lock));
if (ulp_ops && ulp_ops->cnic_get_stats)
rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
else
@@ -4384,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
u32 val;
- memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
+ memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
@@ -4628,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
- rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
+ rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
@@ -4639,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
- val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
rxbd->rx_bd_haddr_hi = val;
- val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
rxbd->rx_bd_haddr_lo = val;
@@ -4709,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
val = CNIC_RD(dev, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
- if (BNX2_PAGE_BITS > 12)
+ if (CNIC_PAGE_BITS > 12)
val |= (12 - 8) << 4;
else
- val |= (BNX2_PAGE_BITS - 8) << 4;
+ val |= (CNIC_PAGE_BITS - 8) << 4;
CNIC_WR(dev, BNX2_MQ_CONFIG, val);
@@ -4742,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
/* Initialize the kernel work queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
- (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
- val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+ val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
- val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+ val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
@@ -4768,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
- (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
- val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+ val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
- val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+ val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
@@ -4918,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 val;
- memset(txbd, 0, BNX2_PAGE_SIZE);
+ memset(txbd, 0, CNIC_PAGE_SIZE);
buf_map = udev->l2_buf_map;
for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
@@ -4978,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
- BNX2_PAGE_SIZE);
+ CNIC_PAGE_SIZE);
struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
- (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
+ (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
@@ -5004,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
}
- val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
rxbd->addr_hi = cpu_to_le32(val);
data->rx.bd_page_base.hi = cpu_to_le32(val);
- val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
rxbd->addr_lo = cpu_to_le32(val);
data->rx.bd_page_base.lo = cpu_to_le32(val);
rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
- val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
+ val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
rxcqe->addr_hi = cpu_to_le32(val);
data->rx.cqe_page_base.hi = cpu_to_le32(val);
- val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
+ val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
rxcqe->addr_lo = cpu_to_le32(val);
data->rx.cqe_page_base.lo = cpu_to_le32(val);
@@ -5220,6 +5221,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
cnic_ring_ctl(dev, cid, cli, 1);
*cid_ptr = cid >> 4;
*(cid_ptr + 1) = cid * bp->db_size;
+ *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
}
}
@@ -5264,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
msleep(10);
}
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
- rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
- memset(rx_ring, 0, BNX2_PAGE_SIZE);
+ rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
+ memset(rx_ring, 0, CNIC_PAGE_SIZE);
}
static int cnic_register_netdev(struct cnic_dev *dev)
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 0121a5d55192..d535ae4228b4 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -186,6 +186,8 @@ struct kcq_info {
u16 (*hw_idx)(u16);
};
+#define UIO_USE_TX_DOORBELL 0x017855DB
+
struct cnic_uio_dev {
struct uio_info cnic_uinfo;
u32 uio_dev;
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 95a8e4b11c9f..dcbca6997e8f 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ebbfe25acaa6..5f4d5573a73d 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.18"
-#define CNIC_MODULE_RELDATE "Sept 01, 2013"
+#define CNIC_MODULE_VERSION "2.5.20"
+#define CNIC_MODULE_RELDATE "March 14, 2014"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -24,6 +24,16 @@
#define MAX_CNIC_ULP_TYPE_EXT 3
#define MAX_CNIC_ULP_TYPE 4
+/* Use CPU native page size up to 16K for cnic ring sizes. */
+#if (PAGE_SHIFT > 14)
+#define CNIC_PAGE_BITS 14
+#else
+#define CNIC_PAGE_BITS PAGE_SHIFT
+#endif
+#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS))
+#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE)
+#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1))
+
struct kwqe {
u32 kwqe_op_flag;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index c2777712da99..b61c14ed9b8d 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* This driver is designed for the Broadcom SiByte SOC built-in
@@ -36,7 +35,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 15a66e4b1f57..70a225c8df5c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/in.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/pci.h>
@@ -37,6 +36,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/brcmphy.h>
+#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 134
+#define TG3_MIN_NUM 136
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "Sep 16, 2013"
+#define DRV_MODULE_RELDATE "Jan 03, 2014"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -208,6 +208,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_RAW_IP_ALIGN 2
+#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
+#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
+
#define TG3_FW_UPDATE_TIMEOUT_SEC 5
#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
@@ -2606,13 +2609,14 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_CTRL1000, phy9_orig);
- if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
- reg32 &= ~0x3000;
- tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
- } else if (!err)
- err = -EBUSY;
+ err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
+ if (err)
+ return err;
- return err;
+ reg32 &= ~0x3000;
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+ return 0;
}
static void tg3_carrier_off(struct tg3 *tp)
@@ -3948,32 +3952,41 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
return 0;
}
+/* tp->lock is held. */
+static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
+{
+ u32 addr_high, addr_low;
+
+ addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
+ addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5]);
+
+ if (index < 4) {
+ tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
+ tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
+ } else {
+ index -= 4;
+ tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
+ tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
+ }
+}
/* tp->lock is held. */
static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
{
- u32 addr_high, addr_low;
+ u32 addr_high;
int i;
- addr_high = ((tp->dev->dev_addr[0] << 8) |
- tp->dev->dev_addr[1]);
- addr_low = ((tp->dev->dev_addr[2] << 24) |
- (tp->dev->dev_addr[3] << 16) |
- (tp->dev->dev_addr[4] << 8) |
- (tp->dev->dev_addr[5] << 0));
for (i = 0; i < 4; i++) {
if (i == 1 && skip_mac_1)
continue;
- tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
- tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
}
if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
tg3_asic_rev(tp) == ASIC_REV_5704) {
- for (i = 0; i < 12; i++) {
- tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
- tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
- }
+ for (i = 4; i < 16; i++)
+ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
}
addr_high = (tp->dev->dev_addr[0] +
@@ -4403,9 +4416,12 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
if (tg3_flag(tp, WOL_SPEED_100MB))
adv |= ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
- if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
- adv |= ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full;
+ if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
+ if (!(tp->phy_flags &
+ TG3_PHYFLG_DISABLE_1G_HD_ADV))
+ adv |= ADVERTISED_1000baseT_Half;
+ adv |= ADVERTISED_1000baseT_Full;
+ }
fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
} else {
@@ -6827,8 +6843,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
work_mask |= opaque_key;
- if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
- (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
+ if (desc->err_vlan & RXD_ERR_MASK) {
drop_it:
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
@@ -8925,6 +8940,49 @@ static void tg3_restore_pci_state(struct tg3 *tp)
}
}
+static void tg3_override_clk(struct tg3 *tp)
+{
+ u32 val;
+
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5717:
+ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+ TG3_CPMU_MAC_ORIDE_ENABLE);
+ break;
+
+ case ASIC_REV_5719:
+ case ASIC_REV_5720:
+ tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ break;
+
+ default:
+ return;
+ }
+}
+
+static void tg3_restore_clk(struct tg3 *tp)
+{
+ u32 val;
+
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5717:
+ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
+ val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
+ break;
+
+ case ASIC_REV_5719:
+ case ASIC_REV_5720:
+ val = tr32(TG3_CPMU_CLCK_ORIDE);
+ tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ break;
+
+ default:
+ return;
+ }
+}
+
/* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp)
{
@@ -9013,6 +9071,13 @@ static int tg3_chip_reset(struct tg3 *tp)
tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
}
+ /* Set the clock to the highest frequency to avoid timeouts. With link
+ * aware mode, the clock speed could be slow and bootcode does not
+ * complete within the expected time. Override the clock to allow the
+ * bootcode to finish sooner and then restore it.
+ */
+ tg3_override_clk(tp);
+
/* Manage gphy power for all CPMU absent PCIe devices. */
if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
@@ -9151,10 +9216,7 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32(0x7c00, val | (1 << 25));
}
- if (tg3_asic_rev(tp) == ASIC_REV_5720) {
- val = tr32(TG3_CPMU_CLCK_ORIDE);
- tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
- }
+ tg3_restore_clk(tp);
/* Reprobe ASF enable state. */
tg3_flag_clear(tp, ENABLE_ASF);
@@ -9186,6 +9248,7 @@ static int tg3_chip_reset(struct tg3 *tp)
static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
+static void __tg3_set_rx_mode(struct net_device *);
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, bool silent)
@@ -9246,6 +9309,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
}
spin_lock_bh(&tp->lock);
__tg3_set_mac_addr(tp, skip_mac_1);
+ __tg3_set_rx_mode(dev);
spin_unlock_bh(&tp->lock);
return err;
@@ -9634,6 +9698,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)
tw32(MAC_HASH_REG_3, mc_filter[3]);
}
+ if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
+ rx_mode |= RX_MODE_PROMISC;
+ } else if (!(dev->flags & IFF_PROMISC)) {
+ /* Add all entries into to the mac addr filter list */
+ int i = 0;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ __tg3_set_one_mac_addr(tp, ha->addr,
+ i + TG3_UCAST_ADDR_IDX(tp));
+ i++;
+ }
+ }
+
if (rx_mode != tp->rx_mode) {
tp->rx_mode = rx_mode;
tw32_f(MAC_RX_MODE, rx_mode);
@@ -9966,6 +10044,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_asic_rev(tp) == ASIC_REV_5719)
val |= BUFMGR_MODE_NO_TX_UNDERRUN;
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
@@ -10751,6 +10830,7 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5762 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
@@ -10879,6 +10959,13 @@ static void tg3_timer(unsigned long __opaque)
} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
tg3_flag(tp, 5780_CLASS)) {
tg3_serdes_parallel_detect(tp);
+ } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
+ u32 cpmu = tr32(TG3_CPMU_STATUS);
+ bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
+ TG3_CPMU_STATUS_LINK_MASK);
+
+ if (link_up != tp->link_up)
+ tg3_setup_phy(tp, false);
}
tp->timer_counter = tp->timer_multiplier;
@@ -11746,8 +11833,6 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
get_stat64(&hw_stats->rx_frame_too_long_errors) +
get_stat64(&hw_stats->rx_undersize_packets);
- stats->rx_over_errors = old_stats->rx_over_errors +
- get_stat64(&hw_stats->rxbds_empty);
stats->rx_frame_errors = old_stats->rx_frame_errors +
get_stat64(&hw_stats->rx_align_errors);
stats->tx_aborted_errors = old_stats->tx_aborted_errors +
@@ -13594,14 +13679,13 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
-static int tg3_hwtstamp_ioctl(struct net_device *dev,
- struct ifreq *ifr, int cmd)
+static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct tg3 *tp = netdev_priv(dev);
struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
@@ -13682,6 +13766,67 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev,
-EFAULT : 0;
}
+static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct hwtstamp_config stmpconf;
+
+ if (!tg3_flag(tp, PTP_CAPABLE))
+ return -EOPNOTSUPP;
+
+ stmpconf.flags = 0;
+ stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+
+ switch (tp->rxptpctl) {
+ case 0:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+ break;
+ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+ -EFAULT : 0;
+}
+
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
@@ -13735,7 +13880,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
case SIOCSHWTSTAMP:
- return tg3_hwtstamp_ioctl(dev, ifr, cmd);
+ return tg3_hwtstamp_set(dev, ifr);
+
+ case SIOCGHWTSTAMP:
+ return tg3_hwtstamp_get(dev, ifr);
default:
/* do nothing */
@@ -13965,12 +14113,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_stop(tp);
+ tg3_set_mtu(dev, tp, new_mtu);
+
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_set_mtu(dev, tp, new_mtu);
-
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
@@ -14856,7 +15004,8 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
if (val == NIC_SRAM_DATA_SIG_MAGIC) {
u32 nic_cfg, led_cfg;
- u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
+ u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
+ u32 nic_phy_id, ver, eeprom_phy_id;
int eeprom_phy_serdes = 0;
tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
@@ -14873,6 +15022,11 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
+
if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
eeprom_phy_serdes = 1;
@@ -15025,6 +15179,9 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
+
+ if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
+ tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
}
done:
if (tg3_flag(tp, WOL_CAP))
@@ -15120,9 +15277,11 @@ static void tg3_phy_init_link_config(struct tg3 *tp)
{
u32 adv = ADVERTISED_Autoneg;
- if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
- adv |= ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full;
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
+ adv |= ADVERTISED_1000baseT_Half;
+ adv |= ADVERTISED_1000baseT_Full;
+ }
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
adv |= ADVERTISED_100baseT_Half |
@@ -16470,6 +16629,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Set these bits to enable statistics workaround. */
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
tp->coalesce_mode |= HOSTCC_MODE_ATTN;
@@ -16612,6 +16772,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
else
tg3_flag_clear(tp, POLL_SERDES);
+ if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
+ tg3_flag_set(tp, POLL_CPMU_LINK);
+
tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
@@ -17486,8 +17649,6 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_init_bufmgr_config(tp);
- features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
@@ -17519,7 +17680,8 @@ static int tg3_init_one(struct pci_dev *pdev,
features |= NETIF_F_TSO_ECN;
}
- dev->features |= features;
+ dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features |= features;
/*
@@ -17533,6 +17695,7 @@ static int tg3_init_one(struct pci_dev *pdev,
features |= NETIF_F_LOOPBACK;
dev->hw_features |= features;
+ dev->priv_flags |= IFF_UNICAST_FLT;
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) &&
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 5c3835aa1e1b..04321e5a356e 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1146,10 +1146,14 @@
#define TG3_CPMU_CLCK_ORIDE 0x00003624
#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
+#define TG3_CPMU_CLCK_ORIDE_ENABLE 0x00003628
+#define TG3_CPMU_MAC_ORIDE_ENABLE (1 << 13)
+
#define TG3_CPMU_STATUS 0x0000362c
#define TG3_CPMU_STATUS_FMSK_5717 0x20000000
#define TG3_CPMU_STATUS_FMSK_5719 0xc0000000
#define TG3_CPMU_STATUS_FSHFT_5719 30
+#define TG3_CPMU_STATUS_LINK_MASK 0x180000
#define TG3_CPMU_CLCK_STAT 0x00003630
#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
@@ -2204,7 +2208,7 @@
#define NIC_SRAM_DATA_CFG_2 0x00000d38
-#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00000400
+#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00004000
#define SHASTA_EXT_LED_MODE_MASK 0x00018000
#define SHASTA_EXT_LED_LEGACY 0x00000000
#define SHASTA_EXT_LED_SHARED 0x00008000
@@ -2226,6 +2230,9 @@
#define NIC_SRAM_CPMUSTAT_SIG 0x0000362c
#define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff
+#define NIC_SRAM_DATA_CFG_5 0x00000e0c
+#define NIC_SRAM_DISABLE_1G_HALF_ADV 0x00000002
+
#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
@@ -2601,7 +2608,11 @@ struct tg3_rx_buffer_desc {
#define RXD_ERR_TOO_SMALL 0x00400000
#define RXD_ERR_NO_RESOURCES 0x00800000
#define RXD_ERR_HUGE_FRAME 0x01000000
-#define RXD_ERR_MASK 0xffff0000
+
+#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \
+ RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \
+ RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \
+ RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
u32 reserved;
u32 opaque;
@@ -3014,6 +3025,7 @@ enum TG3_FLAGS {
TG3_FLAG_ENABLE_ASF,
TG3_FLAG_ASPM_WORKAROUND,
TG3_FLAG_POLL_SERDES,
+ TG3_FLAG_POLL_CPMU_LINK,
TG3_FLAG_MBOX_WRITE_REORDER,
TG3_FLAG_PCIX_TARGET_HWBUG,
TG3_FLAG_WOL_SPEED_100MB,
@@ -3325,6 +3337,7 @@ struct tg3 {
#define TG3_PHYFLG_1G_ON_VAUX_OK 0x00080000
#define TG3_PHYFLG_KEEP_LINK_ON_PWRDN 0x00100000
#define TG3_PHYFLG_MDIX_STATE 0x00200000
+#define TG3_PHYFLG_DISABLE_1G_HD_ADV 0x00400000
u32 led_ctrl;
u32 phy_otp;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 6f3cac060f29..354ae9792bad 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -22,6 +22,14 @@
/* IOC local definitions */
+#define bfa_ioc_state_disabled(__sm) \
+ (((__sm) == BFI_IOC_UNINIT) || \
+ ((__sm) == BFI_IOC_INITING) || \
+ ((__sm) == BFI_IOC_HWINIT) || \
+ ((__sm) == BFI_IOC_DISABLED) || \
+ ((__sm) == BFI_IOC_FAIL) || \
+ ((__sm) == BFI_IOC_CFG_DISABLED))
+
/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
@@ -42,6 +50,14 @@
((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
#define bfa_ioc_sync_complete(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
+#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
+#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
+ ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
#define bfa_ioc_mbox_cmd_pending(__ioc) \
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -76,8 +92,8 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
-static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
- u32 boot_param);
+static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc,
+ enum bfi_fwboot_type boot_type, u32 boot_param);
static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
char *serial_num);
@@ -860,7 +876,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
*/
case IOCPF_E_TIMEOUT:
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
@@ -949,7 +965,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_SEMLOCKED:
bfa_ioc_notify_fail(ioc);
bfa_ioc_sync_leave(ioc);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
break;
@@ -1031,7 +1047,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
bfa_ioc_notify_fail(ioc);
if (!iocpf->auto_recover) {
bfa_ioc_sync_leave(ioc);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
} else {
@@ -1162,7 +1178,7 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
}
- fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (fwstate == BFI_IOC_UNINIT) {
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
return;
@@ -1176,8 +1192,8 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
}
bfa_ioc_fwver_clear(ioc);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
/*
* Try to lock and then unlock the semaphore.
@@ -1309,22 +1325,504 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
}
}
-/* Returns TRUE if same. */
+static bool
+bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
+ struct bfi_ioc_image_hdr *fwhdr_2)
+{
+ int i;
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
+ if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
+ return false;
+ }
+
+ return true;
+}
+
+/* Returns TRUE if major minor and maintainence are same.
+ * If patch version are same, check for MD5 Checksum to be same.
+ */
+static bool
+bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr,
+ struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+ if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
+ return false;
+ if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
+ return false;
+ if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
+ return false;
+ if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
+ return false;
+ if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
+ drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
+ drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build)
+ return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
+
+ return true;
+}
+
+static bool
+bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr)
+{
+ if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
+ return false;
+
+ return true;
+}
+
+static bool
+fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr)
+{
+ if (fwhdr->fwver.phase == 0 &&
+ fwhdr->fwver.build == 0)
+ return false;
+
+ return true;
+}
+
+/* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
+ struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+ if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /* GA takes priority over internal builds of the same patch stream.
+ * At this point major minor maint and patch numbers are same.
+ */
+ if (fwhdr_is_ga(base_fwhdr) == true)
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_SAME;
+ else
+ return BFI_IOC_IMG_VER_OLD;
+ else
+ if (fwhdr_is_ga(fwhdr_to_cmp))
+ return BFI_IOC_IMG_VER_BETTER;
+
+ if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
+ return BFI_IOC_IMG_VER_OLD;
+
+ if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_BETTER;
+ else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
+ return BFI_IOC_IMG_VER_OLD;
+
+ /* All Version Numbers are equal.
+ * Md5 check to be done as a part of compatibility check.
+ */
+ return BFI_IOC_IMG_VER_SAME;
+}
+
+/* register definitions */
+#define FLI_CMD_REG 0x0001d000
+#define FLI_WRDATA_REG 0x0001d00c
+#define FLI_RDDATA_REG 0x0001d010
+#define FLI_ADDR_REG 0x0001d004
+#define FLI_DEV_STATUS_REG 0x0001d014
+
+#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
+#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
+#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
+#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
+
+#define NFC_STATE_RUNNING 0x20000001
+#define NFC_STATE_PAUSED 0x00004560
+#define NFC_VER_VALID 0x147
+
+enum bfa_flash_cmd {
+ BFA_FLASH_FAST_READ = 0x0b, /* fast read */
+ BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */
+ BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */
+ BFA_FLASH_WRITE = 0x02, /* write */
+ BFA_FLASH_READ_STATUS = 0x05, /* read status */
+};
+
+/* hardware error definition */
+enum bfa_flash_err {
+ BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
+ BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
+ BFA_FLASH_BAD = -3, /*!< flash bad */
+ BFA_FLASH_BUSY = -4, /*!< flash busy */
+ BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
+ BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
+ BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
+ BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
+ BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
+};
+
+/* flash command register data structure */
+union bfa_flash_cmd_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 act:1;
+ u32 rsv:1;
+ u32 write_cnt:9;
+ u32 read_cnt:9;
+ u32 addr_cnt:4;
+ u32 cmd:8;
+#else
+ u32 cmd:8;
+ u32 addr_cnt:4;
+ u32 read_cnt:9;
+ u32 write_cnt:9;
+ u32 rsv:1;
+ u32 act:1;
+#endif
+ } r;
+ u32 i;
+};
+
+/* flash device status register data structure */
+union bfa_flash_dev_status_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 rsv:21;
+ u32 fifo_cnt:6;
+ u32 busy:1;
+ u32 init_status:1;
+ u32 present:1;
+ u32 bad:1;
+ u32 good:1;
+#else
+ u32 good:1;
+ u32 bad:1;
+ u32 present:1;
+ u32 init_status:1;
+ u32 busy:1;
+ u32 fifo_cnt:6;
+ u32 rsv:21;
+#endif
+ } r;
+ u32 i;
+};
+
+/* flash address register data structure */
+union bfa_flash_addr_reg {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 addr:24;
+ u32 dummy:8;
+#else
+ u32 dummy:8;
+ u32 addr:24;
+#endif
+ } r;
+ u32 i;
+};
+
+/* Flash raw private functions */
+static void
+bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
+ u8 rd_cnt, u8 ad_cnt, u8 op)
+{
+ union bfa_flash_cmd_reg cmd;
+
+ cmd.i = 0;
+ cmd.r.act = 1;
+ cmd.r.write_cnt = wr_cnt;
+ cmd.r.read_cnt = rd_cnt;
+ cmd.r.addr_cnt = ad_cnt;
+ cmd.r.cmd = op;
+ writel(cmd.i, (pci_bar + FLI_CMD_REG));
+}
+
+static void
+bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
+{
+ union bfa_flash_addr_reg addr;
+
+ addr.r.addr = address & 0x00ffffff;
+ addr.r.dummy = 0;
+ writel(addr.i, (pci_bar + FLI_ADDR_REG));
+}
+
+static int
+bfa_flash_cmd_act_check(void __iomem *pci_bar)
+{
+ union bfa_flash_cmd_reg cmd;
+
+ cmd.i = readl(pci_bar + FLI_CMD_REG);
+
+ if (cmd.r.act)
+ return BFA_FLASH_ERR_CMD_ACT;
+
+ return 0;
+}
+
+/* Flush FLI data fifo. */
+static u32
+bfa_flash_fifo_flush(void __iomem *pci_bar)
+{
+ u32 i;
+ u32 t;
+ union bfa_flash_dev_status_reg dev_status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+
+ if (!dev_status.r.fifo_cnt)
+ return 0;
+
+ /* fifo counter in terms of words */
+ for (i = 0; i < dev_status.r.fifo_cnt; i++)
+ t = readl(pci_bar + FLI_RDDATA_REG);
+
+ /* Check the device status. It may take some time. */
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ break;
+ }
+
+ if (dev_status.r.fifo_cnt)
+ return BFA_FLASH_ERR_FIFO_CNT;
+
+ return 0;
+}
+
+/* Read flash status. */
+static u32
+bfa_flash_status_read(void __iomem *pci_bar)
+{
+ union bfa_flash_dev_status_reg dev_status;
+ u32 status;
+ u32 ret_status;
+ int i;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
+
+ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+ status = bfa_flash_cmd_act_check(pci_bar);
+ if (!status)
+ break;
+ }
+
+ if (status)
+ return status;
+
+ dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+ if (!dev_status.r.fifo_cnt)
+ return BFA_FLASH_BUSY;
+
+ ret_status = readl(pci_bar + FLI_RDDATA_REG);
+ ret_status >>= 24;
+
+ status = bfa_flash_fifo_flush(pci_bar);
+ if (status < 0)
+ return status;
+
+ return ret_status;
+}
+
+/* Start flash read operation. */
+static u32
+bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
+ char *buf)
+{
+ u32 status;
+
+ /* len must be mutiple of 4 and not exceeding fifo size */
+ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
+ return BFA_FLASH_ERR_LEN;
+
+ /* check status */
+ status = bfa_flash_status_read(pci_bar);
+ if (status == BFA_FLASH_BUSY)
+ status = bfa_flash_status_read(pci_bar);
+
+ if (status < 0)
+ return status;
+
+ /* check if write-in-progress bit is cleared */
+ if (status & BFA_FLASH_WIP_MASK)
+ return BFA_FLASH_ERR_WIP;
+
+ bfa_flash_set_addr(pci_bar, offset);
+
+ bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
+
+ return 0;
+}
+
+/* Check flash read operation. */
+static u32
+bfa_flash_read_check(void __iomem *pci_bar)
+{
+ if (bfa_flash_cmd_act_check(pci_bar))
+ return 1;
+
+ return 0;
+}
+
+/* End flash read operation. */
+static void
+bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
+{
+ u32 i;
+
+ /* read data fifo up to 32 words */
+ for (i = 0; i < len; i += 4) {
+ u32 w = readl(pci_bar + FLI_RDDATA_REG);
+ *((u32 *)(buf + i)) = swab32(w);
+ }
+
+ bfa_flash_fifo_flush(pci_bar);
+}
+
+/* Perform flash raw read. */
+
+#define FLASH_BLOCKING_OP_MAX 500
+#define FLASH_SEM_LOCK_REG 0x18820
+
+static int
+bfa_raw_sem_get(void __iomem *bar)
+{
+ int locked;
+
+ locked = readl((bar + FLASH_SEM_LOCK_REG));
+
+ return !locked;
+}
+
+static enum bfa_status
+bfa_flash_sem_get(void __iomem *bar)
+{
+ u32 n = FLASH_BLOCKING_OP_MAX;
+
+ while (!bfa_raw_sem_get(bar)) {
+ if (--n <= 0)
+ return BFA_STATUS_BADFLASH;
+ mdelay(10);
+ }
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_flash_sem_put(void __iomem *bar)
+{
+ writel(0, (bar + FLASH_SEM_LOCK_REG));
+}
+
+static enum bfa_status
+bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
+ u32 len)
+{
+ u32 n, status;
+ u32 off, l, s, residue, fifo_sz;
+
+ residue = len;
+ off = 0;
+ fifo_sz = BFA_FLASH_FIFO_SIZE;
+ status = bfa_flash_sem_get(pci_bar);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ while (residue) {
+ s = offset + off;
+ n = s / fifo_sz;
+ l = (n + 1) * fifo_sz - s;
+ if (l > residue)
+ l = residue;
+
+ status = bfa_flash_read_start(pci_bar, offset + off, l,
+ &buf[off]);
+ if (status < 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+
+ n = BFA_FLASH_BLOCKING_OP_MAX;
+ while (bfa_flash_read_check(pci_bar)) {
+ if (--n <= 0) {
+ bfa_flash_sem_put(pci_bar);
+ return BFA_STATUS_FAILED;
+ }
+ }
+
+ bfa_flash_read_end(pci_bar, l, &buf[off]);
+
+ residue -= l;
+ off += l;
+ }
+ bfa_flash_sem_put(pci_bar);
+
+ return BFA_STATUS_OK;
+}
+
+#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
+
+static enum bfa_status
+bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off,
+ u32 *fwimg)
+{
+ return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
+ BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
+ (char *)fwimg, BFI_FLASH_CHUNK_SZ);
+}
+
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *base_fwhdr)
+{
+ struct bfi_ioc_image_hdr *flash_fwhdr;
+ enum bfa_status status;
+ u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
+
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg);
+ if (status != BFA_STATUS_OK)
+ return BFI_IOC_IMG_VER_INCOMP;
+
+ flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg;
+ if (bfa_ioc_flash_fwver_valid(flash_fwhdr))
+ return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
+ else
+ return BFI_IOC_IMG_VER_INCOMP;
+}
+
+/**
+ * Returns TRUE if driver is willing to work with current smem f/w version.
+ */
bool
bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
struct bfi_ioc_image_hdr *drv_fwhdr;
- int i;
+ enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp;
drv_fwhdr = (struct bfi_ioc_image_hdr *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
- for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
- if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
- return false;
+ /* If smem is incompatible or old, driver should not work with it. */
+ drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr);
+ if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
+ drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
+ return false;
}
- return true;
+ /* IF Flash has a better F/W than smem do not work with smem.
+ * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
+ * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
+ */
+ smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr);
+
+ if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER)
+ return false;
+ else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME)
+ return true;
+ else
+ return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
+ true : false;
}
/* Return true if current running version is valid. Firmware signature and
@@ -1333,15 +1831,9 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
static bool
bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
{
- struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
+ struct bfi_ioc_image_hdr fwhdr;
bfa_nw_ioc_fwver_get(ioc, &fwhdr);
- drv_fwhdr = (struct bfi_ioc_image_hdr *)
- bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
-
- if (fwhdr.signature != drv_fwhdr->signature)
- return false;
-
if (swab32(fwhdr.bootenv) != boot_env)
return false;
@@ -1366,7 +1858,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
bool fwvalid;
u32 boot_env;
- ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
@@ -1380,8 +1872,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+ BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
+
return;
}
@@ -1411,8 +1905,9 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
- bfa_ioc_poll_fwinit(ioc);
+ if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+ BFA_STATUS_OK)
+ bfa_ioc_poll_fwinit(ioc);
}
void
@@ -1517,7 +2012,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
}
/* Initiate a full firmware download. */
-static void
+static enum bfa_status
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_env)
{
@@ -1527,18 +2022,47 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 chunkno = 0;
u32 i;
u32 asicmode;
+ u32 fwimg_size;
+ u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
+ enum bfa_status status;
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
+
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
- for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
+ for (i = 0; i < fwimg_size; i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
- fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
+ fwimg_buf);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ fwimg = fwimg_buf;
+ } else {
+ fwimg = bfa_cb_image_get_chunk(
+ bfa_ioc_asic_gen(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
}
/**
@@ -1566,6 +2090,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/*
* Set boot type, env and device mode at the end.
*/
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_FLASH) {
+ boot_type = BFI_FWBOOT_TYPE_NORMAL;
+ }
asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
ioc->port0_mode, ioc->port1_mode);
writel(asicmode, ((ioc->ioc_regs.smem_page_start)
@@ -1574,6 +2102,7 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
+ (BFI_FWBOOT_TYPE_OFF)));
writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ (BFI_FWBOOT_ENV_OFF)));
+ return BFA_STATUS_OK;
}
static void
@@ -1846,29 +2375,47 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
/* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
-static void
+static enum bfa_status
bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
u32 boot_env)
{
+ struct bfi_ioc_image_hdr *drv_fwhdr;
+ enum bfa_status status;
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
- return;
+ return BFA_STATUS_FAILED;
+ if (boot_env == BFI_FWBOOT_ENV_OS &&
+ boot_type == BFI_FWBOOT_TYPE_NORMAL) {
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+ /* Work with Flash iff flash f/w is better than driver f/w.
+ * Otherwise push drivers firmware.
+ */
+ if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
+ BFI_IOC_IMG_VER_BETTER)
+ boot_type = BFI_FWBOOT_TYPE_FLASH;
+ }
/**
* Initialize IOC state of all functions on a chip reset.
*/
if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
- writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
} else {
- writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
- writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
+ bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
+ bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
}
bfa_ioc_msgflush(ioc);
- bfa_ioc_download_fw(ioc, boot_type, boot_env);
- bfa_ioc_lpu_start(ioc);
+ status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
+ if (status == BFA_STATUS_OK)
+ bfa_ioc_lpu_start(ioc);
+ else
+ bfa_nw_iocpf_timeout(ioc);
+
+ return status;
}
/* Enable/disable IOC failure auto recovery. */
@@ -2473,7 +3020,7 @@ bfa_nw_iocpf_sem_timeout(void *ioc_arg)
static void
bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
{
- u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
if (fwstate == BFI_IOC_DISABLED) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index f04e0aab25b4..20cff7df4b55 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -215,6 +215,13 @@ struct bfa_ioc_hwif {
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc);
+ void (*ioc_set_fwstate) (struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc);
+ void (*ioc_set_alt_fwstate) (struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate);
+ enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc);
+
};
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -291,6 +298,7 @@ void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc);
void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
struct bfa_ioc_notify *notify);
bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 5df0b0c68c5a..d639558455cb 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -48,6 +48,12 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_cur_ioc_fwstate(
+ struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_alt_ioc_fwstate(
+ struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
enum bfi_asic_mode asic_mode);
static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
@@ -68,6 +74,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct = {
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
+ .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
+ .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
+ .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
+ .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
static const struct bfa_ioc_hwif nw_hwif_ct2 = {
@@ -85,6 +95,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
+ .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
+ .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
+ .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
+ .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
/* Called from bfa_ioc_attach() to map asic specific calls. */
@@ -565,6 +579,32 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
return false;
}
+static void
+bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
+{
+ return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
+ enum bfi_ioc_state fwstate)
+{
+ writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
+{
+ return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
+}
+
static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 1f24c23dc786..8c563a77cdf6 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -25,6 +25,7 @@
/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+#define BFI_FLASH_IMAGE_SZ 0x100000
/* Msg header common to all msgs */
struct bfi_mhdr {
@@ -233,7 +234,29 @@ struct bfi_ioc_getattr_reply {
#define BFI_IOC_TRC_HDR_SZ 32
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFI_IOC_FW_INV_SIGN (0xdeaddead)
#define BFI_IOC_MD5SUM_SZ 4
+
+struct bfi_ioc_fwver {
+#ifdef __BIG_ENDIAN
+ u8 patch;
+ u8 maint;
+ u8 minor;
+ u8 major;
+ u8 rsvd[2];
+ u8 build;
+ u8 phase;
+#else
+ u8 major;
+ u8 minor;
+ u8 maint;
+ u8 patch;
+ u8 phase;
+ u8 build;
+ u8 rsvd[2];
+#endif
+};
+
struct bfi_ioc_image_hdr {
u32 signature; /*!< constant signature */
u8 asic_gen; /*!< asic generation */
@@ -242,10 +265,18 @@ struct bfi_ioc_image_hdr {
u8 port1_mode; /*!< device mode for port 1 */
u32 exec; /*!< exec vector */
u32 bootenv; /*!< firmware boot env */
- u32 rsvd_b[4];
+ u32 rsvd_b[2];
+ struct bfi_ioc_fwver fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
+enum bfi_ioc_img_ver_cmp {
+ BFI_IOC_IMG_VER_INCOMP,
+ BFI_IOC_IMG_VER_OLD,
+ BFI_IOC_IMG_VER_SAME,
+ BFI_IOC_IMG_VER_BETTER
+};
+
#define BFI_FWBOOT_DEVMODE_OFF 4
#define BFI_FWBOOT_TYPE_OFF 8
#define BFI_FWBOOT_ENV_OFF 12
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index 7d10e335c27d..ae072dc5d238 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -472,7 +472,8 @@ enum bfi_enet_hds_type {
struct bfi_enet_rx_cfg {
u8 rxq_type;
- u8 rsvd[3];
+ u8 rsvd[1];
+ u16 frame_size;
struct {
u8 max_header_size;
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index f1eafc409bbd..1f512190d696 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -354,6 +354,14 @@ do { \
} \
} while (0)
+#define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q)
+
+#define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q)
+
+#define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q)
+
+#define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q)
+
/* Inline functions */
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
@@ -391,12 +399,8 @@ int bna_num_rxp_set(struct bna *bna, int num_rxp);
void bna_hw_stats_get(struct bna *bna);
/* APIs for RxF */
-struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
-void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
- struct bna_mac *mac);
-struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
-void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
- struct bna_mac *mac);
+struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
+void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
@@ -493,11 +497,17 @@ enum bna_cb_status
bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
+void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
enum bna_rxmode bitmask,
@@ -505,6 +515,8 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_disable(struct bna_rx *rx);
/* ENET */
/* API for RX */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 3ca77fad4851..13f9636cdba7 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1811,6 +1811,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
}
+ /* A separate queue to allow synchronous setting of a list of MACs */
+ INIT_LIST_HEAD(&ucam_mod->del_q);
+ for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
+ bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
+ }
+
ucam_mod->bna = bna;
}
@@ -1818,11 +1825,16 @@ static void
bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
{
struct list_head *qe;
- int i = 0;
+ int i;
+ i = 0;
list_for_each(qe, &ucam_mod->free_q)
i++;
+ i = 0;
+ list_for_each(qe, &ucam_mod->del_q)
+ i++;
+
ucam_mod->bna = NULL;
}
@@ -1851,6 +1863,13 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
&mcam_mod->free_handle_q);
}
+ /* A separate queue to allow synchronous setting of a list of MACs */
+ INIT_LIST_HEAD(&mcam_mod->del_q);
+ for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
+ bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
+ }
+
mcam_mod->bna = bna;
}
@@ -1864,6 +1883,9 @@ bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
list_for_each(qe, &mcam_mod->free_q) i++;
i = 0;
+ list_for_each(qe, &mcam_mod->del_q) i++;
+
+ i = 0;
list_for_each(qe, &mcam_mod->free_handle_q) i++;
mcam_mod->bna = NULL;
@@ -1976,7 +1998,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
BNA_MEM_T_KVA;
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
- attr->num_ucmac * sizeof(struct bna_mac);
+ (attr->num_ucmac * 2) * sizeof(struct bna_mac);
/* Virtual memory for Multicast MAC address - stored by mcam module */
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
@@ -1984,7 +2006,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
BNA_MEM_T_KVA;
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
- attr->num_mcmac * sizeof(struct bna_mac);
+ (attr->num_mcmac * 2) * sizeof(struct bna_mac);
/* Virtual memory for Multicast handle - stored by mcam module */
res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
@@ -2080,41 +2102,21 @@ bna_num_rxp_set(struct bna *bna, int num_rxp)
}
struct bna_mac *
-bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
-{
- struct list_head *qe;
-
- if (list_empty(&ucam_mod->free_q))
- return NULL;
-
- bfa_q_deq(&ucam_mod->free_q, &qe);
-
- return (struct bna_mac *)qe;
-}
-
-void
-bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
-{
- list_add_tail(&mac->qe, &ucam_mod->free_q);
-}
-
-struct bna_mac *
-bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
+bna_cam_mod_mac_get(struct list_head *head)
{
struct list_head *qe;
- if (list_empty(&mcam_mod->free_q))
+ if (list_empty(head))
return NULL;
- bfa_q_deq(&mcam_mod->free_q, &qe);
-
+ bfa_q_deq(head, &qe);
return (struct bna_mac *)qe;
}
void
-bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
+bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
{
- list_add_tail(&mac->qe, &mcam_mod->free_q);
+ list_add_tail(&mac->qe, tail);
}
struct bna_mcam_handle *
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index af3f7bb0b3b8..2702d02e98d9 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -322,6 +322,10 @@ do { \
#define BNA_CQ_EF_REMOTE (1 << 19)
#define BNA_CQ_EF_LOCAL (1 << 20)
+/* CAT2 ASIC does not use bit 21 as per the SPEC.
+ * Bit 31 is set in every end of frame completion
+ */
+#define BNA_CQ_EF_EOP (1 << 31)
/* Data structures */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 3c07064b2bc4..85e63546abe3 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -529,13 +529,13 @@ bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
struct list_head *qe;
int ret;
- /* Delete multicast entries previousely added */
+ /* First delete multicast entries to maintain the count */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
@@ -586,7 +586,7 @@ bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, cleanup);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
@@ -796,20 +796,20 @@ bna_rxf_uninit(struct bna_rxf *rxf)
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
}
if (rxf->ucast_pending_mac) {
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
- rxf->ucast_pending_mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
+ rxf->ucast_pending_mac);
rxf->ucast_pending_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
}
rxf->rxmode_pending = 0;
@@ -869,7 +869,7 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
if (rxf->ucast_pending_mac == NULL) {
rxf->ucast_pending_mac =
- bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
if (rxf->ucast_pending_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
@@ -900,7 +900,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
return BNA_CB_SUCCESS;
}
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
bfa_q_qe_init(&mac->qe);
@@ -916,35 +916,92 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
}
enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
+ struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
- struct bna_mac *mac;
+ struct bna_mac *mac, *del_mac;
int i;
+ /* Purge the pending_add_q */
+ while (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
+ /* Schedule active_q entries for deletion */
+ while (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
/* Allocate nodes */
INIT_LIST_HEAD(&list_head);
- for (i = 0, mcaddr = mclist; i < count; i++) {
- mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ for (i = 0, mcaddr = uclist; i < count; i++) {
+ mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
list_add_tail(&mac->qe, &list_head);
-
mcaddr += ETH_ALEN;
}
+ /* Add the new entries */
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+ }
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ }
+
+ return BNA_CB_UCAST_CAM_FULL;
+}
+
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head list_head;
+ struct list_head *qe;
+ u8 *mcaddr;
+ struct bna_mac *mac, *del_mac;
+ int i;
+
/* Purge the pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
/* Schedule active_q entries for deletion */
@@ -952,7 +1009,26 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+
+ del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
+
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+ mac->handle = NULL;
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ }
+
+ /* Allocate nodes */
+ INIT_LIST_HEAD(&list_head);
+ for (i = 0, mcaddr = mclist; i < count; i++) {
+ mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
+ if (mac == NULL)
+ goto err_return;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, mcaddr, ETH_ALEN);
+ list_add_tail(&mac->qe, &list_head);
+
+ mcaddr += ETH_ALEN;
}
/* Add the new entries */
@@ -974,13 +1050,56 @@ err_return:
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
- bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac, *del_mac;
+ int need_hw_config = 0;
+
+ /* Purge all entries from pending_add_q */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ }
+
+ /* Schedule all entries in active_q for deletion */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
+
+ memcpy(del_mac, mac, sizeof(*del_mac));
+ list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+ mac->handle = NULL;
+ bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ need_hw_config = 1;
+ }
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ return;
+ }
+
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx);
+}
+
+void
bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
@@ -1022,7 +1141,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
return 1;
}
@@ -1062,11 +1181,13 @@ bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
if (cleanup == BNA_SOFT_CLEANUP)
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+ mac);
else {
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+ mac);
return 1;
}
}
@@ -1690,6 +1811,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
cfg_req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
+ cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
@@ -1711,8 +1833,17 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
/* Large/Single RxQ */
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
&q0->qpt);
- q0->buffer_size =
- bna_enet_mtu_get(&rx->bna->enet);
+ if (q0->multi_buffer)
+ /* multi-buffer is enabled by allocating
+ * a new rx with new set of resources.
+ * q0->buffer_size should be initialized to
+ * fragment size.
+ */
+ cfg_req->rx_cfg.multi_buffer =
+ BNA_STATUS_T_ENABLED;
+ else
+ q0->buffer_size =
+ bna_enet_mtu_get(&rx->bna->enet);
cfg_req->q_cfg[i].ql.rx_buffer_size =
htons((u16)q0->buffer_size);
break;
@@ -2262,8 +2393,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
u32 hq_depth;
u32 dq_depth;
- dq_depth = q_cfg->q_depth;
- hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+ dq_depth = q_cfg->q0_depth;
+ hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
cq_depth = dq_depth + hq_depth;
BNA_TO_POWER_OF_2_HIGH(cq_depth);
@@ -2380,10 +2511,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rxq *q0;
struct bna_rxq *q1;
struct bna_intr_info *intr_info;
- u32 page_count;
+ struct bna_mem_descr *hqunmap_mem;
+ struct bna_mem_descr *dqunmap_mem;
struct bna_mem_descr *ccb_mem;
struct bna_mem_descr *rcb_mem;
- struct bna_mem_descr *unmapq_mem;
struct bna_mem_descr *cqpt_mem;
struct bna_mem_descr *cswqpt_mem;
struct bna_mem_descr *cpage_mem;
@@ -2393,8 +2524,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_mem_descr *dsqpt_mem;
struct bna_mem_descr *hpage_mem;
struct bna_mem_descr *dpage_mem;
- int i;
- int dpage_count, hpage_count, rcb_idx;
+ u32 dpage_count, hpage_count;
+ u32 hq_idx, dq_idx, rcb_idx;
+ u32 cq_depth, i;
+ u32 page_count;
if (!bna_rx_res_check(rx_mod, rx_cfg))
return NULL;
@@ -2402,7 +2535,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
- unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+ dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
+ hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
@@ -2454,7 +2588,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
}
rx->num_paths = rx_cfg->num_paths;
- for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
+ for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
+ i < rx->num_paths; i++) {
rxp = bna_rxp_get(rx_mod);
list_add_tail(&rxp->qe, &rx->rxp_q);
rxp->type = rx_cfg->rxp_type;
@@ -2497,9 +2632,13 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q0->rxp = rxp;
q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
- rcb_idx++;
- q0->rcb->q_depth = rx_cfg->q_depth;
+ q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
+ rcb_idx++; dq_idx++;
+ q0->rcb->q_depth = rx_cfg->q0_depth;
+ q0->q_depth = rx_cfg->q0_depth;
+ q0->multi_buffer = rx_cfg->q0_multi_buf;
+ q0->buffer_size = rx_cfg->q0_buf_size;
+ q0->num_vecs = rx_cfg->q0_num_vecs;
q0->rcb->rxq = q0;
q0->rcb->bnad = bna->bnad;
q0->rcb->id = 0;
@@ -2519,15 +2658,18 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q1->rxp = rxp;
q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
- q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
- rcb_idx++;
- q1->rcb->q_depth = rx_cfg->q_depth;
+ q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
+ rcb_idx++; hq_idx++;
+ q1->rcb->q_depth = rx_cfg->q1_depth;
+ q1->q_depth = rx_cfg->q1_depth;
+ q1->multi_buffer = BNA_STATUS_T_DISABLED;
+ q1->num_vecs = 1;
q1->rcb->rxq = q1;
q1->rcb->bnad = bna->bnad;
q1->rcb->id = 1;
q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
rx_cfg->hds_config.forced_offset
- : rx_cfg->small_buff_size;
+ : rx_cfg->q1_buf_size;
q1->rx_packets = q1->rx_bytes = 0;
q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
@@ -2542,9 +2684,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
/* Setup CQ */
rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
- rxp->cq.ccb->q_depth = rx_cfg->q_depth +
- ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
- 0 : rx_cfg->q_depth);
+ cq_depth = rx_cfg->q0_depth +
+ ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+ 0 : rx_cfg->q1_depth);
+ /* if multi-buffer is enabled sum of q0_depth
+ * and q1_depth need not be a power of 2
+ */
+ BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
q0->rcb->ccb = rxp->cq.ccb;
@@ -2670,6 +2817,30 @@ bna_rx_cleanup_complete(struct bna_rx *rx)
bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
}
+void
+bna_rx_vlan_strip_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
+ rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
+ rxf->vlan_strip_pending = true;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
+void
+bna_rx_vlan_strip_disable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
+ rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
+ rxf->vlan_strip_pending = true;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
enum bna_rxmode bitmask,
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index dc50f7836b6d..621547cd3504 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -109,20 +109,21 @@ enum bna_tx_res_req_type {
enum bna_rx_mem_type {
BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
- BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
- BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
- BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
- BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
- BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
- BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
- BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
- BNA_RX_RES_MEM_T_IBIDX = 12,
- BNA_RX_RES_MEM_T_RIT = 13,
- BNA_RX_RES_T_INTR = 14, /* Rx interrupts */
- BNA_RX_RES_T_MAX = 15
+ BNA_RX_RES_MEM_T_UNMAPHQ = 2,
+ BNA_RX_RES_MEM_T_UNMAPDQ = 3,
+ BNA_RX_RES_MEM_T_CQPT = 4,
+ BNA_RX_RES_MEM_T_CSWQPT = 5,
+ BNA_RX_RES_MEM_T_CQPT_PAGE = 6,
+ BNA_RX_RES_MEM_T_HQPT = 7,
+ BNA_RX_RES_MEM_T_DQPT = 8,
+ BNA_RX_RES_MEM_T_HSWQPT = 9,
+ BNA_RX_RES_MEM_T_DSWQPT = 10,
+ BNA_RX_RES_MEM_T_DPAGE = 11,
+ BNA_RX_RES_MEM_T_HPAGE = 12,
+ BNA_RX_RES_MEM_T_IBIDX = 13,
+ BNA_RX_RES_MEM_T_RIT = 14,
+ BNA_RX_RES_T_INTR = 15,
+ BNA_RX_RES_T_MAX = 16
};
enum bna_tx_type {
@@ -583,6 +584,8 @@ struct bna_rxq {
int buffer_size;
int q_depth;
+ u32 num_vecs;
+ enum bna_status multi_buffer;
struct bna_qpt qpt;
struct bna_rcb *rcb;
@@ -632,6 +635,8 @@ struct bna_ccb {
struct bna_rcb *rcb[2];
void *ctrl; /* For bnad */
struct bna_pkt_rate pkt_rate;
+ u32 pkts_una;
+ u32 bytes_per_intr;
/* Control path */
struct bna_cq *cq;
@@ -671,14 +676,22 @@ struct bna_rx_config {
int num_paths;
enum bna_rxp_type rxp_type;
int paused;
- int q_depth;
int coalescing_timeo;
/*
* Small/Large (or Header/Data) buffer size to be configured
- * for SLR and HDS queue type. Large buffer size comes from
- * enet->mtu.
+ * for SLR and HDS queue type.
*/
- int small_buff_size;
+ u32 frame_size;
+
+ /* header or small queue */
+ u32 q1_depth;
+ u32 q1_buf_size;
+
+ /* data or large queue */
+ u32 q0_depth;
+ u32 q0_buf_size;
+ u32 q0_num_vecs;
+ enum bna_status q0_multi_buf;
enum bna_status rss_status;
struct bna_rss_config rss_config;
@@ -866,8 +879,9 @@ struct bna_rx_mod {
/* CAM */
struct bna_ucam_mod {
- struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
+ struct bna_mac *ucmac; /* num_ucmac * 2 entries */
struct list_head free_q;
+ struct list_head del_q;
struct bna *bna;
};
@@ -880,9 +894,10 @@ struct bna_mcam_handle {
};
struct bna_mcam_mod {
- struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
- struct bna_mcam_handle *mchandle; /* BFI_MAX_MCMAC entries */
+ struct bna_mac *mcmac; /* num_mcmac * 2 entries */
+ struct bna_mcam_handle *mchandle; /* num_mcmac entries */
struct list_head free_q;
+ struct list_head del_q;
struct list_head free_handle_q;
struct bna *bna;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 248bc37cb41b..4ad1187e82fb 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
/*
* Global variables
*/
-u32 bnad_rxqs_per_cq = 2;
+static u32 bnad_rxqs_per_cq = 2;
static u32 bna_id;
static struct mutex bnad_list_mutex;
static LIST_HEAD(bnad_list);
@@ -142,7 +142,8 @@ bnad_tx_buff_unmap(struct bnad *bnad,
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vectors[vector], dma_addr),
- skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
+ dma_unmap_len(&unmap->vectors[vector], dma_len),
+ DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
nvecs--;
}
@@ -282,27 +283,32 @@ static int
bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
- int mtu, order;
+ int order;
bnad_rxq_alloc_uninit(bnad, rcb);
- mtu = bna_enet_mtu_get(&bnad->bna.enet);
- order = get_order(mtu);
+ order = get_order(rcb->rxq->buffer_size);
+
+ unmap_q->type = BNAD_RXBUF_PAGE;
if (bna_is_small_rxq(rcb->id)) {
unmap_q->alloc_order = 0;
unmap_q->map_size = rcb->rxq->buffer_size;
} else {
- unmap_q->alloc_order = order;
- unmap_q->map_size =
- (rcb->rxq->buffer_size > 2048) ?
- PAGE_SIZE << order : 2048;
+ if (rcb->rxq->multi_buffer) {
+ unmap_q->alloc_order = 0;
+ unmap_q->map_size = rcb->rxq->buffer_size;
+ unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
+ } else {
+ unmap_q->alloc_order = order;
+ unmap_q->map_size =
+ (rcb->rxq->buffer_size > 2048) ?
+ PAGE_SIZE << order : 2048;
+ }
}
BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
- unmap_q->type = BNAD_RXBUF_PAGE;
-
return 0;
}
@@ -345,10 +351,10 @@ bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
for (i = 0; i < rcb->q_depth; i++) {
struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_cleanup_page(bnad, unmap);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_rxq_cleanup_skb(bnad, unmap);
+ else
+ bnad_rxq_cleanup_page(bnad, unmap);
}
bnad_rxq_alloc_uninit(bnad, rcb);
}
@@ -480,10 +486,10 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
return;
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_refill_page(bnad, rcb, to_alloc);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_rxq_refill_skb(bnad, rcb, to_alloc);
+ else
+ bnad_rxq_refill_page(bnad, rcb, to_alloc);
}
#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
@@ -500,72 +506,114 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
#define flags_udp6 (BNA_CQ_EF_IPV6 | \
BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
-static inline struct sk_buff *
-bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
- struct bnad_rx_unmap_q *unmap_q,
- struct bnad_rx_unmap *unmap,
- u32 length, u32 flags)
+static void
+bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
+ u32 sop_ci, u32 nvecs)
{
- struct bnad *bnad = rx_ctrl->bnad;
- struct sk_buff *skb;
+ struct bnad_rx_unmap_q *unmap_q;
+ struct bnad_rx_unmap *unmap;
+ u32 ci, vec;
+
+ unmap_q = rcb->unmap_q;
+ for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
+ unmap = &unmap_q->unmap[ci];
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
+
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+ bnad_rxq_cleanup_skb(bnad, unmap);
+ else
+ bnad_rxq_cleanup_page(bnad, unmap);
+ }
+}
+
+static void
+bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
+ u32 sop_ci, u32 nvecs, u32 last_fraglen)
+{
+ struct bnad *bnad;
+ u32 ci, vec, len, totlen = 0;
+ struct bnad_rx_unmap_q *unmap_q;
+ struct bnad_rx_unmap *unmap;
+
+ unmap_q = rcb->unmap_q;
+ bnad = rcb->bnad;
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
- skb = napi_get_frags(&rx_ctrl->napi);
- if (unlikely(!skb))
- return NULL;
+ /* prefetch header */
+ prefetch(page_address(unmap_q->unmap[sop_ci].page) +
+ unmap_q->unmap[sop_ci].page_offset);
+
+ for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
+ unmap = &unmap_q->unmap[ci];
+ BNA_QE_INDX_INC(ci, rcb->q_depth);
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
+
+ len = (vec == nvecs) ?
+ last_fraglen : unmap->vector.len;
+ totlen += len;
+
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- unmap->page, unmap->page_offset, length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
+ unmap->page, unmap->page_offset, len);
unmap->page = NULL;
unmap->vector.len = 0;
-
- return skb;
}
- skb = unmap->skb;
- BUG_ON(!skb);
+ skb->len += totlen;
+ skb->data_len += totlen;
+ skb->truesize += totlen;
+}
+
+static inline void
+bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
+ struct bnad_rx_unmap *unmap, u32 len)
+{
+ prefetch(skb->data);
dma_unmap_single(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
- skb_put(skb, length);
-
+ skb_put(skb, len);
skb->protocol = eth_type_trans(skb, bnad->netdev);
unmap->skb = NULL;
unmap->vector.len = 0;
- return skb;
}
static u32
bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
{
- struct bna_cq_entry *cq, *cmpl;
+ struct bna_cq_entry *cq, *cmpl, *next_cmpl;
struct bna_rcb *rcb = NULL;
struct bnad_rx_unmap_q *unmap_q;
- struct bnad_rx_unmap *unmap;
- struct sk_buff *skb;
+ struct bnad_rx_unmap *unmap = NULL;
+ struct sk_buff *skb = NULL;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
- u32 packets = 0, length = 0, flags, masked_flags;
+ u32 packets = 0, len = 0, totlen = 0;
+ u32 pi, vec, sop_ci = 0, nvecs = 0;
+ u32 flags, masked_flags;
prefetch(bnad->netdev);
cq = ccb->sw_q;
cmpl = &cq[ccb->producer_index];
- while (cmpl->valid && (packets < budget)) {
- packets++;
- flags = ntohl(cmpl->flags);
- length = ntohs(cmpl->length);
+ while (packets < budget) {
+ if (!cmpl->valid)
+ break;
+ /* The 'valid' field is set by the adapter, only after writing
+ * the other fields of completion entry. Hence, do not load
+ * other fields of completion entry *before* the 'valid' is
+ * loaded. Adding the rmb() here prevents the compiler and/or
+ * CPU from reordering the reads which would potentially result
+ * in reading stale values in completion entry.
+ */
+ rmb();
+
BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
if (bna_is_small_rxq(cmpl->rxq_id))
@@ -574,25 +622,78 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[0];
unmap_q = rcb->unmap_q;
- unmap = &unmap_q->unmap[rcb->consumer_index];
- if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
- BNA_CQ_EF_FCS_ERROR |
- BNA_CQ_EF_TOO_LONG))) {
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- bnad_rxq_cleanup_page(bnad, unmap);
- else
- bnad_rxq_cleanup_skb(bnad, unmap);
+ /* start of packet ci */
+ sop_ci = rcb->consumer_index;
+
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
+ unmap = &unmap_q->unmap[sop_ci];
+ skb = unmap->skb;
+ } else {
+ skb = napi_get_frags(&rx_ctrl->napi);
+ if (unlikely(!skb))
+ break;
+ }
+ prefetch(skb);
+ flags = ntohl(cmpl->flags);
+ len = ntohs(cmpl->length);
+ totlen = len;
+ nvecs = 1;
+
+ /* Check all the completions for this frame.
+ * busy-wait doesn't help much, break here.
+ */
+ if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
+ (flags & BNA_CQ_EF_EOP) == 0) {
+ pi = ccb->producer_index;
+ do {
+ BNA_QE_INDX_INC(pi, ccb->q_depth);
+ next_cmpl = &cq[pi];
+
+ if (!next_cmpl->valid)
+ break;
+ /* The 'valid' field is set by the adapter, only
+ * after writing the other fields of completion
+ * entry. Hence, do not load other fields of
+ * completion entry *before* the 'valid' is
+ * loaded. Adding the rmb() here prevents the
+ * compiler and/or CPU from reordering the reads
+ * which would potentially result in reading
+ * stale values in completion entry.
+ */
+ rmb();
+
+ len = ntohs(next_cmpl->length);
+ flags = ntohl(next_cmpl->flags);
+
+ nvecs++;
+ totlen += len;
+ } while ((flags & BNA_CQ_EF_EOP) == 0);
+
+ if (!next_cmpl->valid)
+ break;
+ }
+
+ /* TODO: BNA_CQ_EF_LOCAL ? */
+ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
+ BNA_CQ_EF_FCS_ERROR |
+ BNA_CQ_EF_TOO_LONG))) {
+ bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
rcb->rxq->rx_packets_with_error++;
+
goto next;
}
- skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
- length, flags);
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+ bnad_cq_setup_skb(bnad, skb, unmap, len);
+ else
+ bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
- if (unlikely(!skb))
- break;
+ packets++;
+ rcb->rxq->rx_packets++;
+ rcb->rxq->rx_bytes += totlen;
+ ccb->bytes_per_intr += totlen;
masked_flags = flags & flags_cksum_prot_mask;
@@ -606,21 +707,22 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else
skb_checksum_none_assert(skb);
- rcb->rxq->rx_packets++;
- rcb->rxq->rx_bytes += length;
-
- if (flags & BNA_CQ_EF_VLAN)
+ if ((flags & BNA_CQ_EF_VLAN) &&
+ (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
- if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
- napi_gro_frags(&rx_ctrl->napi);
- else
+ if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
netif_receive_skb(skb);
+ else
+ napi_gro_frags(&rx_ctrl->napi);
next:
- cmpl->valid = 0;
- BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
- BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+ BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
+ for (vec = 0; vec < nvecs; vec++) {
+ cmpl = &cq[ccb->producer_index];
+ cmpl->valid = 0;
+ BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+ }
cmpl = &cq[ccb->producer_index];
}
@@ -1899,8 +2001,10 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
tx_info);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- if (!tx)
+ if (!tx) {
+ err = -ENOMEM;
goto err_return;
+ }
tx_info->tx = tx;
INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
@@ -1911,7 +2015,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
err = bnad_tx_msix_register(bnad, tx_info,
tx_id, bnad->num_txq_per_tx);
if (err)
- goto err_return;
+ goto cleanup_tx;
}
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1920,6 +2024,12 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
return 0;
+cleanup_tx:
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_destroy(tx_info->tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ tx_info->tx = NULL;
+ tx_info->tx_id = 0;
err_return:
bnad_tx_res_free(bnad, res_info);
return err;
@@ -1930,6 +2040,7 @@ err_return:
static void
bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
{
+ memset(rx_config, 0, sizeof(*rx_config));
rx_config->rx_type = BNA_RX_T_REGULAR;
rx_config->num_paths = bnad->num_rxp_per_rx;
rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
@@ -1950,12 +2061,43 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
memset(&rx_config->rss_config, 0,
sizeof(rx_config->rss_config));
}
+
+ rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
+ rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
+
+ /* BNA_RXP_SINGLE - one data-buffer queue
+ * BNA_RXP_SLR - one small-buffer and one large-buffer queues
+ * BNA_RXP_HDS - one header-buffer and one data-buffer queues
+ */
+ /* TODO: configurable param for queue type */
rx_config->rxp_type = BNA_RXP_SLR;
- rx_config->q_depth = bnad->rxq_depth;
- rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+ rx_config->frame_size > 4096) {
+ /* though size_routing_enable is set in SLR,
+ * small packets may get routed to same rxq.
+ * set buf_size to 2048 instead of PAGE_SIZE.
+ */
+ rx_config->q0_buf_size = 2048;
+ /* this should be in multiples of 2 */
+ rx_config->q0_num_vecs = 4;
+ rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
+ rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
+ } else {
+ rx_config->q0_buf_size = rx_config->frame_size;
+ rx_config->q0_num_vecs = 1;
+ rx_config->q0_depth = bnad->rxq_depth;
+ }
+
+ /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
+ if (rx_config->rxp_type == BNA_RXP_SLR) {
+ rx_config->q1_depth = bnad->rxq_depth;
+ rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
+ }
- rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
+ rx_config->vlan_strip_status =
+ (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
+ BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
}
static void
@@ -1969,6 +2111,49 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
}
/* Called with mutex_lock(&bnad->conf_mutex) held */
+static u32
+bnad_reinit_rx(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ u32 err = 0, current_err = 0;
+ u32 rx_id = 0, count = 0;
+ unsigned long flags;
+
+ /* destroy and create new rx objects */
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+ if (!bnad->rx_info[rx_id].rx)
+ continue;
+ bnad_destroy_rx(bnad, rx_id);
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_mtu_set(&bnad->bna.enet,
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+ count++;
+ current_err = bnad_setup_rx(bnad, rx_id);
+ if (current_err && !err) {
+ err = current_err;
+ pr_err("RXQ:%u setup failed\n", rx_id);
+ }
+ }
+
+ /* restore rx configuration */
+ if (bnad->rx_info[0].rx && !err) {
+ bnad_restore_vlans(bnad, 0);
+ bnad_enable_default_bcast(bnad);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ bnad_set_rx_mode(netdev);
+ }
+
+ return count;
+}
+
+/* Called with bnad_conf_lock() held */
void
bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
{
@@ -2047,13 +2232,19 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Fill Unmap Q memory requirements */
- BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
- rx_config->num_paths +
- ((rx_config->rxp_type == BNA_RXP_SINGLE) ?
- 0 : rx_config->num_paths),
- ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
- sizeof(struct bnad_rx_unmap_q)));
-
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
+ rx_config->num_paths,
+ (rx_config->q0_depth *
+ sizeof(struct bnad_rx_unmap)) +
+ sizeof(struct bnad_rx_unmap_q));
+
+ if (rx_config->rxp_type != BNA_RXP_SINGLE) {
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
+ rx_config->num_paths,
+ (rx_config->q1_depth *
+ sizeof(struct bnad_rx_unmap) +
+ sizeof(struct bnad_rx_unmap_q)));
+ }
/* Allocate resource */
err = bnad_rx_res_alloc(bnad, res_info, rx_id);
if (err)
@@ -2548,7 +2739,6 @@ bnad_open(struct net_device *netdev)
int err;
struct bnad *bnad = netdev_priv(netdev);
struct bna_pause_config pause_config;
- int mtu;
unsigned long flags;
mutex_lock(&bnad->conf_mutex);
@@ -2567,10 +2757,9 @@ bnad_open(struct net_device *netdev)
pause_config.tx_pause = 0;
pause_config.rx_pause = 0;
- mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
-
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
+ bna_enet_mtu_set(&bnad->bna.enet,
+ BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
bna_enet_enable(&bnad->bna.enet);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -2624,9 +2813,6 @@ bnad_stop(struct net_device *netdev)
bnad_destroy_tx(bnad, 0);
bnad_destroy_rx(bnad, 0);
- /* These config flags are cleared in the hardware */
- bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC);
-
/* Synchronize mailbox IRQ */
bnad_mbox_irq_sync(bnad);
@@ -2784,21 +2970,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
tcb = bnad->tx_info[0].tcb[txq_id];
- q_depth = tcb->q_depth;
- prod = tcb->producer_index;
-
- unmap_q = tcb->unmap_q;
/*
* Takes care of the Tx that is scheduled between clearing the flag
* and the netif_tx_stop_all_queues() call.
*/
- if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
+ if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
+ q_depth = tcb->q_depth;
+ prod = tcb->producer_index;
+ unmap_q = tcb->unmap_q;
+
vectors = 1 + skb_shinfo(skb)->nr_frags;
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
@@ -2863,7 +3049,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
for (i = 0, vect_id = 0; i < vectors - 1; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- u16 size = skb_frag_size(frag);
+ u32 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
/* Undo the changes starting at tcb->producer_index */
@@ -2888,10 +3074,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
0, size, DMA_TO_DEVICE);
+ dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
txqent->vector[vect_id].length = htons(size);
dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
- dma_addr);
+ dma_addr);
head_unmap->nvecs++;
}
@@ -2911,6 +3098,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
return NETDEV_TX_OK;
+ skb_tx_timestamp(skb);
+
bna_txq_prod_indx_doorbell(tcb);
smp_mb();
@@ -2937,73 +3126,128 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
return stats;
}
+static void
+bnad_set_rx_ucast_fltr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ int uc_count = netdev_uc_count(netdev);
+ enum bna_cb_status ret;
+ u8 *mac_list;
+ struct netdev_hw_addr *ha;
+ int entry;
+
+ if (netdev_uc_empty(bnad->netdev)) {
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+ return;
+ }
+
+ if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
+ goto mode_default;
+
+ mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
+ if (mac_list == NULL)
+ goto mode_default;
+
+ entry = 0;
+ netdev_for_each_uc_addr(ha, netdev) {
+ memcpy(&mac_list[entry * ETH_ALEN],
+ &ha->addr[0], ETH_ALEN);
+ entry++;
+ }
+
+ ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
+ mac_list, NULL);
+ kfree(mac_list);
+
+ if (ret != BNA_CB_SUCCESS)
+ goto mode_default;
+
+ return;
+
+ /* ucast packets not in UCAM are routed to default function */
+mode_default:
+ bnad->cfg_flags |= BNAD_CF_DEFAULT;
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+}
+
+static void
+bnad_set_rx_mcast_fltr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+ int mc_count = netdev_mc_count(netdev);
+ enum bna_cb_status ret;
+ u8 *mac_list;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ goto mode_allmulti;
+
+ if (netdev_mc_empty(netdev))
+ return;
+
+ if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
+ goto mode_allmulti;
+
+ mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
+
+ if (mac_list == NULL)
+ goto mode_allmulti;
+
+ memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+
+ /* copy rest of the MCAST addresses */
+ bnad_netdev_mc_list_get(netdev, mac_list);
+ ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
+ mac_list, NULL);
+ kfree(mac_list);
+
+ if (ret != BNA_CB_SUCCESS)
+ goto mode_allmulti;
+
+ return;
+
+mode_allmulti:
+ bnad->cfg_flags |= BNAD_CF_ALLMULTI;
+ bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
+}
+
void
bnad_set_rx_mode(struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
- u32 new_mask, valid_mask;
+ enum bna_rxmode new_mode, mode_mask;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- new_mask = valid_mask = 0;
-
- if (netdev->flags & IFF_PROMISC) {
- if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
- new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- bnad->cfg_flags |= BNAD_CF_PROMISC;
- }
- } else {
- if (bnad->cfg_flags & BNAD_CF_PROMISC) {
- new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
- valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
- bnad->cfg_flags &= ~BNAD_CF_PROMISC;
- }
- }
-
- if (netdev->flags & IFF_ALLMULTI) {
- if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
- new_mask |= BNA_RXMODE_ALLMULTI;
- valid_mask |= BNA_RXMODE_ALLMULTI;
- bnad->cfg_flags |= BNAD_CF_ALLMULTI;
- }
- } else {
- if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
- new_mask &= ~BNA_RXMODE_ALLMULTI;
- valid_mask |= BNA_RXMODE_ALLMULTI;
- bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
- }
+ if (bnad->rx_info[0].rx == NULL) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return;
}
- if (bnad->rx_info[0].rx == NULL)
- goto unlock;
-
- bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
+ /* clear bnad flags to update it with new settings */
+ bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
+ BNAD_CF_ALLMULTI);
- if (!netdev_mc_empty(netdev)) {
- u8 *mcaddr_list;
- int mc_count = netdev_mc_count(netdev);
+ new_mode = 0;
+ if (netdev->flags & IFF_PROMISC) {
+ new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags |= BNAD_CF_PROMISC;
+ } else {
+ bnad_set_rx_mcast_fltr(bnad);
- /* Index 0 holds the broadcast address */
- mcaddr_list =
- kzalloc((mc_count + 1) * ETH_ALEN,
- GFP_ATOMIC);
- if (!mcaddr_list)
- goto unlock;
+ if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
+ new_mode |= BNA_RXMODE_ALLMULTI;
- memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+ bnad_set_rx_ucast_fltr(bnad);
- /* Copy rest of the MC addresses */
- bnad_netdev_mc_list_get(netdev, mcaddr_list);
+ if (bnad->cfg_flags & BNAD_CF_DEFAULT)
+ new_mode |= BNA_RXMODE_DEFAULT;
+ }
- bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
- mcaddr_list, NULL);
+ mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
+ BNA_RXMODE_ALLMULTI;
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
- /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
- kfree(mcaddr_list);
- }
-unlock:
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -3033,14 +3277,14 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
}
static int
-bnad_mtu_set(struct bnad *bnad, int mtu)
+bnad_mtu_set(struct bnad *bnad, int frame_size)
{
unsigned long flags;
init_completion(&bnad->bnad_completions.mtu_comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
+ bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
wait_for_completion(&bnad->bnad_completions.mtu_comp);
@@ -3051,18 +3295,34 @@ bnad_mtu_set(struct bnad *bnad, int mtu)
static int
bnad_change_mtu(struct net_device *netdev, int new_mtu)
{
- int err, mtu = netdev->mtu;
+ int err, mtu;
struct bnad *bnad = netdev_priv(netdev);
+ u32 rx_count = 0, frame, new_frame;
if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
return -EINVAL;
mutex_lock(&bnad->conf_mutex);
+ mtu = netdev->mtu;
netdev->mtu = new_mtu;
- mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
- err = bnad_mtu_set(bnad, mtu);
+ frame = BNAD_FRAME_SIZE(mtu);
+ new_frame = BNAD_FRAME_SIZE(new_mtu);
+
+ /* check if multi-buffer needs to be enabled */
+ if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+ netif_running(bnad->netdev)) {
+ /* only when transition is over 4K */
+ if ((frame <= 4096 && new_frame > 4096) ||
+ (frame > 4096 && new_frame <= 4096))
+ rx_count = bnad_reinit_rx(bnad);
+ }
+
+ /* rx_count > 0 - new rx created
+ * - Linux set err = 0 and return
+ */
+ err = bnad_mtu_set(bnad, new_frame);
if (err)
err = -EBUSY;
@@ -3112,6 +3372,27 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return 0;
}
+static int bnad_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct bnad *bnad = netdev_priv(dev);
+ netdev_features_t changed = features ^ dev->features;
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
+ else
+ bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void
bnad_netpoll(struct net_device *netdev)
@@ -3159,6 +3440,7 @@ static const struct net_device_ops bnad_netdev_ops = {
.ndo_change_mtu = bnad_change_mtu,
.ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
+ .ndo_set_features = bnad_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bnad_netpoll
#endif
@@ -3171,14 +3453,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
@@ -3262,7 +3544,6 @@ bnad_uninit(struct bnad *bnad)
if (bnad->bar0)
iounmap(bnad->bar0);
- pci_set_drvdata(bnad->pcidev, NULL);
}
/*
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index f7e033f8a00e..2842c188e0da 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "3.2.21.1"
+#define BNAD_VERSION "3.2.23.0"
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -84,7 +84,7 @@ struct bnad_rx_ctrl {
#define BNAD_IOCETH_TIMEOUT 10000
#define BNAD_MIN_Q_DEPTH 512
-#define BNAD_MAX_RXQ_DEPTH 2048
+#define BNAD_MAX_RXQ_DEPTH 16384
#define BNAD_MAX_TXQ_DEPTH 2048
#define BNAD_JUMBO_MTU 9000
@@ -105,6 +105,9 @@ struct bnad_rx_ctrl {
#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx)
+#define BNAD_FRAME_SIZE(_mtu) \
+ (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN)
+
/*
* DATA STRUCTURES
*/
@@ -219,6 +222,7 @@ struct bnad_rx_info {
struct bnad_tx_vector {
DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
struct bnad_tx_unmap {
@@ -234,33 +238,38 @@ struct bnad_rx_vector {
struct bnad_rx_unmap {
struct page *page;
- u32 page_offset;
struct sk_buff *skb;
struct bnad_rx_vector vector;
+ u32 page_offset;
};
enum bnad_rxbuf_type {
BNAD_RXBUF_NONE = 0,
- BNAD_RXBUF_SKB = 1,
+ BNAD_RXBUF_SK_BUFF = 1,
BNAD_RXBUF_PAGE = 2,
- BNAD_RXBUF_MULTI = 3
+ BNAD_RXBUF_MULTI_BUFF = 3
};
-#define BNAD_RXBUF_IS_PAGE(_type) ((_type) == BNAD_RXBUF_PAGE)
+#define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF)
+#define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF)
struct bnad_rx_unmap_q {
int reuse_pi;
int alloc_order;
u32 map_size;
enum bnad_rxbuf_type type;
- struct bnad_rx_unmap unmap[0];
+ struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
};
+#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
+ ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2)
+
/* Bit mask values for bnad->cfg_flags */
#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
#define BNAD_CF_PROMISC 0x02
#define BNAD_CF_ALLMULTI 0x04
-#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
+#define BNAD_CF_DEFAULT 0x08
+#define BNAD_CF_MSIX 0x10 /* If in MSIx mode */
/* Defines for run_flags bit-mask */
/* Set, tested & cleared using xxx_bit() functions */
@@ -367,7 +376,6 @@ struct bnad_drvinfo {
* EXTERN VARIABLES
*/
extern const struct firmware *bfi_fw;
-extern u32 bnad_rxqs_per_cq;
/*
* EXTERN PROTOTYPES
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 455b5a2e59d4..f9e150825bb5 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -1131,6 +1131,7 @@ static const struct ethtool_ops bnad_ethtool_ops = {
.get_eeprom = bnad_get_eeprom,
.set_eeprom = bnad_set_eeprom,
.flash_device = bnad_flash_device,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 43405f654b4a..b3ff6d507951 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
extern char bfa_version[];
-#define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin"
-#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
+#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin"
+#define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 92578690f6de..d0c38e01e99f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -17,6 +17,7 @@
#include <linux/circ_buf.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
@@ -203,6 +204,47 @@ static int macb_mdio_reset(struct mii_bus *bus)
return 0;
}
+/**
+ * macb_set_tx_clk() - Set a clock to a new frequency
+ * @clk Pointer to the clock to change
+ * @rate New frequency in Hz
+ * @dev Pointer to the struct net_device
+ */
+static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
+{
+ long ferr, rate, rate_rounded;
+
+ switch (speed) {
+ case SPEED_10:
+ rate = 2500000;
+ break;
+ case SPEED_100:
+ rate = 25000000;
+ break;
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+ default:
+ return;
+ }
+
+ rate_rounded = clk_round_rate(clk, rate);
+ if (rate_rounded < 0)
+ return;
+
+ /* RGMII allows 50 ppm frequency error. Test and warn if this limit
+ * is not satisfied.
+ */
+ ferr = abs(rate_rounded - rate);
+ ferr = DIV_ROUND_UP(ferr, rate / 100000);
+ if (ferr > 5)
+ netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+ rate);
+
+ if (clk_set_rate(clk, rate_rounded))
+ netdev_err(dev, "adjusting tx_clk failed.\n");
+}
+
static void macb_handle_link_change(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
@@ -250,6 +292,9 @@ static void macb_handle_link_change(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
+ if (!IS_ERR(bp->tx_clk))
+ macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+
if (status_change) {
if (phydev->link) {
netif_carrier_on(dev);
@@ -587,11 +632,16 @@ static void gem_rx_refill(struct macb *bp)
"Unable to allocate sk_buff\n");
break;
}
- bp->rx_skbuff[entry] = skb;
/* now fill corresponding descriptor entry */
paddr = dma_map_single(&bp->pdev->dev, skb->data,
bp->rx_buffer_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, paddr)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ bp->rx_skbuff[entry] = skb;
if (entry == RX_RING_SIZE - 1)
paddr |= MACB_BIT(RX_WRAP);
@@ -680,7 +730,7 @@ static int gem_rx(struct macb *bp, int budget)
skb_put(skb, len);
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
dma_unmap_single(&bp->pdev->dev, addr,
- len, DMA_FROM_DEVICE);
+ bp->rx_buffer_size, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, bp->dev);
skb_checksum_none_assert(skb);
@@ -991,11 +1041,15 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
entry = macb_tx_ring_wrap(bp->tx_head);
- bp->tx_head++;
netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+ kfree_skb(skb);
+ goto unlock;
+ }
+ bp->tx_head++;
tx_skb = &bp->tx_skb[entry];
tx_skb->skb = skb;
tx_skb->mapping = mapping;
@@ -1021,6 +1075,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
netif_stop_queue(dev);
+unlock:
spin_unlock_irqrestore(&bp->lock, flags);
return NETDEV_TX_OK;
@@ -1790,21 +1845,44 @@ static int __init macb_probe(struct platform_device *pdev)
spin_lock_init(&bp->lock);
INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
- bp->pclk = clk_get(&pdev->dev, "pclk");
+ bp->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(bp->pclk)) {
- dev_err(&pdev->dev, "failed to get macb_clk\n");
+ err = PTR_ERR(bp->pclk);
+ dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
goto err_out_free_dev;
}
- clk_prepare_enable(bp->pclk);
- bp->hclk = clk_get(&pdev->dev, "hclk");
+ bp->hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(bp->hclk)) {
- dev_err(&pdev->dev, "failed to get hclk\n");
- goto err_out_put_pclk;
+ err = PTR_ERR(bp->hclk);
+ dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
+ goto err_out_free_dev;
}
- clk_prepare_enable(bp->hclk);
- bp->regs = ioremap(regs->start, resource_size(regs));
+ bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+
+ err = clk_prepare_enable(bp->pclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+ goto err_out_free_dev;
+ }
+
+ err = clk_prepare_enable(bp->hclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
+ goto err_out_disable_pclk;
+ }
+
+ if (!IS_ERR(bp->tx_clk)) {
+ err = clk_prepare_enable(bp->tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
+ err);
+ goto err_out_disable_hclk;
+ }
+ }
+
+ bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!bp->regs) {
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
err = -ENOMEM;
@@ -1812,11 +1890,12 @@ static int __init macb_probe(struct platform_device *pdev)
}
dev->irq = platform_get_irq(pdev, 0);
- err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
+ err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0,
+ dev->name, dev);
if (err) {
dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
dev->irq, err);
- goto err_out_iounmap;
+ goto err_out_disable_clocks;
}
dev->netdev_ops = &macb_netdev_ops;
@@ -1879,7 +1958,7 @@ static int __init macb_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_free_irq;
+ goto err_out_disable_clocks;
}
err = macb_mii_init(bp);
@@ -1902,16 +1981,13 @@ static int __init macb_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
-err_out_free_irq:
- free_irq(dev->irq, dev);
-err_out_iounmap:
- iounmap(bp->regs);
err_out_disable_clocks:
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
+err_out_disable_hclk:
clk_disable_unprepare(bp->hclk);
- clk_put(bp->hclk);
+err_out_disable_pclk:
clk_disable_unprepare(bp->pclk);
-err_out_put_pclk:
- clk_put(bp->pclk);
err_out_free_dev:
free_netdev(dev);
err_out:
@@ -1933,12 +2009,10 @@ static int __exit macb_remove(struct platform_device *pdev)
kfree(bp->mii_bus->irq);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
- iounmap(bp->regs);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
- clk_put(bp->hclk);
clk_disable_unprepare(bp->pclk);
- clk_put(bp->pclk);
free_netdev(dev);
}
@@ -1946,45 +2020,49 @@ static int __exit macb_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int macb_suspend(struct platform_device *pdev, pm_message_t state)
+static int macb_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
netif_carrier_off(netdev);
netif_device_detach(netdev);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
return 0;
}
-static int macb_resume(struct platform_device *pdev)
+static int macb_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
+ if (!IS_ERR(bp->tx_clk))
+ clk_prepare_enable(bp->tx_clk);
netif_device_attach(netdev);
return 0;
}
-#else
-#define macb_suspend NULL
-#define macb_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
+
static struct platform_driver macb_driver = {
.remove = __exit_p(macb_remove),
- .suspend = macb_suspend,
- .resume = macb_resume,
.driver = {
.name = "macb",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(macb_dt_ids),
+ .pm = &macb_pm_ops,
},
};
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index f4076155bed7..51c02442160a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -572,6 +572,7 @@ struct macb {
struct platform_device *pdev;
struct clk *pclk;
struct clk *hclk;
+ struct clk *tx_clk;
struct net_device *dev;
struct napi_struct napi;
struct work_struct tx_error_task;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 4fc5c8ef5121..d2a183c3a6ce 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -14,7 +14,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/circ_buf.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 8abb46b39032..53b1f9478383 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
@@ -50,7 +49,6 @@
#include <linux/if_vlan.h>
#include <linux/mdio.h>
#include <linux/crc32.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
index 1f095a9fc739..a4d2a4c08d3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cphy.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
index e36d45b78cc7..5249686afe71 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 1d021059f097..0fe7ff750d77 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
@@ -38,7 +37,6 @@
#include "common.h"
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
index eef655c827d9..81526ad36339 100644
--- a/drivers/net/ethernet/chelsio/cxgb/elmer0.h
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
index 639ff1955739..3e182eee799e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.c
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
index 5694aad4fbc0..162de5259df9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.h
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index d42337457cf7..dfa77491a910 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
index f7136b2fd1e5..d0cf611551a1 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index eb33a31b08a0..ec5e05052d99 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
index c80bf4d6d0a6..964ce59ee169 100644
--- a/drivers/net/ethernet/chelsio/cxgb/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 8061fb0ef7ed..4c5879389003 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
@@ -47,7 +46,6 @@
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/tcp.h>
#include <linux/ip.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index b9bf16b385f7..a1ba591b3431 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -11,8 +11,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index e0a03a31e7c4..816719314cc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
index d0f87d82566a..7f79cc7ceb75 100644
--- a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
@@ -12,8 +12,7 @@
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
- * with this program; if not, write to the Free Software Foundation, Inc., *
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * with this program; if not, see <http://www.gnu.org/licenses/>. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index 8c82248ce416..442480982d3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -36,7 +36,6 @@
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/mdio.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 76ae09999b5b..c0a9dd55f4e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -182,7 +182,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
for_each_port(adapter, i) {
struct net_device *dev = adapter->port[i];
- if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+ if (ether_addr_equal(dev->dev_addr, mac)) {
rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) {
dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 8d53438638b2..5f226eda8cd6 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -429,7 +429,7 @@ found:
} else {
e->state = neigh->nud_state & NUD_CONNECTED ?
L2T_STATE_VALID : L2T_STATE_STALE;
- if (memcmp(e->dmac, neigh->ha, 6))
+ if (!ether_addr_equal(e->dmac, neigh->ha))
setup_l2e_send_pending(dev, NULL, e);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 56e0415f8cdf..1f4b9b30b9ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -50,13 +50,13 @@
#include "cxgb4_uld.h"
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x06
-#define T4FW_VERSION_MICRO 0x18
+#define T4FW_VERSION_MINOR 0x09
+#define T4FW_VERSION_MICRO 0x17
#define T4FW_VERSION_BUILD 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x08
-#define T5FW_VERSION_MICRO 0x1C
+#define T5FW_VERSION_MINOR 0x09
+#define T5FW_VERSION_MICRO 0x17
#define T5FW_VERSION_BUILD 0x00
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
@@ -387,8 +387,9 @@ struct work_struct;
enum { /* adapter flags */
FULL_INIT_DONE = (1 << 0),
- USING_MSI = (1 << 1),
- USING_MSIX = (1 << 2),
+ DEV_ENABLED = (1 << 1),
+ USING_MSI = (1 << 2),
+ USING_MSIX = (1 << 3),
FW_OK = (1 << 4),
RSS_TNLALLLOOKUP = (1 << 5),
USING_SOFT_PARAMS = (1 << 6),
@@ -938,7 +939,6 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable);
int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
-int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
int t4_get_tp_version(struct adapter *adapter, u32 *vers);
int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
@@ -979,13 +979,6 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
int t4_early_init(struct adapter *adap, unsigned int mbox);
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
-int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
-int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
-int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
- const u8 *fw_data, unsigned int size, int force);
-int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
- unsigned int mtype, unsigned int maddr,
- u32 *finiver, u32 *finicsum, u32 *cfcsum);
int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size);
int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index fff02ed1295e..34e2488767d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4288,7 +4288,15 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
struct port_info *p = netdev_priv(dev);
struct adapter *adapter = p->adapter;
+ /* Block retrieving statistics during EEH error
+ * recovery. Otherwise, the recovery might fail
+ * and the PCI device will be removed permanently
+ */
spin_lock(&adapter->stats_lock);
+ if (!netif_device_present(dev)) {
+ spin_unlock(&adapter->stats_lock);
+ return ns;
+ }
t4_get_port_stats(adapter, p->tx_chan, &stats);
spin_unlock(&adapter->stats_lock);
@@ -5496,16 +5504,21 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
rtnl_lock();
adap->flags &= ~FW_OK;
notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
+ spin_lock(&adap->stats_lock);
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
netif_device_detach(dev);
netif_carrier_off(dev);
}
+ spin_unlock(&adap->stats_lock);
if (adap->flags & FULL_INIT_DONE)
cxgb_down(adap);
rtnl_unlock();
- pci_disable_device(pdev);
+ if ((adap->flags & DEV_ENABLED)) {
+ pci_disable_device(pdev);
+ adap->flags &= ~DEV_ENABLED;
+ }
out: return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
@@ -5522,9 +5535,13 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
- return PCI_ERS_RESULT_DISCONNECT;
+ if (!(adap->flags & DEV_ENABLED)) {
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Cannot reenable PCI "
+ "device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ adap->flags |= DEV_ENABLED;
}
pci_set_master(pdev);
@@ -5910,6 +5927,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
+ /* PCI device has been enabled */
+ adapter->flags |= DEV_ENABLED;
+
adapter->regs = pci_ioremap_bar(pdev, 0);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
@@ -6143,10 +6163,13 @@ static void remove_one(struct pci_dev *pdev)
iounmap(adapter->regs);
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
- kfree(adapter);
pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
+ if ((adapter->flags & DEV_ENABLED)) {
+ pci_disable_device(pdev);
+ adapter->flags &= ~DEV_ENABLED;
+ }
pci_release_regions(pdev);
+ kfree(adapter);
} else
pci_release_regions(pdev);
}
@@ -6156,6 +6179,7 @@ static struct pci_driver cxgb4_driver = {
.id_table = cxgb4_pci_tbl,
.probe = init_one,
.remove = remove_one,
+ .shutdown = remove_one,
.err_handler = &cxgb4_eeh,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index cc3511a5cd0c..47ffa64fcf19 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1630,7 +1630,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
- skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+ skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+ PKT_HASH_TYPE_L3);
if (unlikely(pkt->vlan_ex)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
@@ -1686,7 +1687,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
if (skb->dev->features & NETIF_F_RXHASH)
- skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+ skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+ PKT_HASH_TYPE_L3);
rxq->stats.pkts++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e1413eacdbd2..2c109343d570 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -32,12 +32,13 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/delay.h>
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4fw_api.h"
+static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ const u8 *fw_data, unsigned int size, int force);
/**
* t4_wait_op_done_val - wait until an operation is completed
* @adapter: the adapter performing the operation
@@ -1070,62 +1071,6 @@ unsigned int t4_flash_cfg_addr(struct adapter *adapter)
}
/**
- * t4_load_cfg - download config file
- * @adap: the adapter
- * @cfg_data: the cfg text file to write
- * @size: text file size
- *
- * Write the supplied config text file to the card's serial flash.
- */
-int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
-{
- int ret, i, n;
- unsigned int addr;
- unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
-
- addr = t4_flash_cfg_addr(adap);
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_CFG_MAX_SIZE) {
- dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
- FLASH_CFG_MAX_SIZE);
- return -EFBIG;
- }
-
- i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
- sf_sec_size);
- ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
- flash_cfg_start_sec + i - 1);
- /*
- * If size == 0 then we're simply erasing the FLASH sectors associated
- * with the on-adapter Firmware Configuration File.
- */
- if (ret || size == 0)
- goto out;
-
- /* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i < size; i += SF_PAGE_SIZE) {
- if ((size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, n, cfg_data);
- if (ret)
- goto out;
-
- addr += SF_PAGE_SIZE;
- cfg_data += SF_PAGE_SIZE;
- }
-
-out:
- if (ret)
- dev_err(adap->pdev_dev, "config file %s failed %d\n",
- (size == 0 ? "clear" : "download"), ret);
- return ret;
-}
-
-/**
* t4_load_fw - download firmware
* @adap: the adapter
* @fw_data: the firmware image to write
@@ -2810,7 +2755,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
* be doing. The only way out of this state is to RESTART the firmware
* ...
*/
-int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
{
int ret = 0;
@@ -2875,7 +2820,7 @@ int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
* the chip since older firmware won't recognize the PCIE_FW.HALT
* flag and automatically RESET itself on startup.
*/
-int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
{
if (reset) {
/*
@@ -2938,8 +2883,8 @@ int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* positive errno indicates that the adapter is ~probably~ intact, a
* negative errno indicates that things are looking bad ...
*/
-int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
- const u8 *fw_data, unsigned int size, int force)
+static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ const u8 *fw_data, unsigned int size, int force)
{
const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
int reset, ret;
@@ -2964,78 +2909,6 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
return t4_fw_restart(adap, mbox, reset);
}
-
-/**
- * t4_fw_config_file - setup an adapter via a Configuration File
- * @adap: the adapter
- * @mbox: mailbox to use for the FW command
- * @mtype: the memory type where the Configuration File is located
- * @maddr: the memory address where the Configuration File is located
- * @finiver: return value for CF [fini] version
- * @finicsum: return value for CF [fini] checksum
- * @cfcsum: return value for CF computed checksum
- *
- * Issue a command to get the firmware to process the Configuration
- * File located at the specified mtype/maddress. If the Configuration
- * File is processed successfully and return value pointers are
- * provided, the Configuration File "[fini] section version and
- * checksum values will be returned along with the computed checksum.
- * It's up to the caller to decide how it wants to respond to the
- * checksums not matching but it recommended that a prominant warning
- * be emitted in order to help people rapidly identify changed or
- * corrupted Configuration Files.
- *
- * Also note that it's possible to modify things like "niccaps",
- * "toecaps",etc. between processing the Configuration File and telling
- * the firmware to use the new configuration. Callers which want to
- * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
- * Configuration Files if they want to do this.
- */
-int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
- unsigned int mtype, unsigned int maddr,
- u32 *finiver, u32 *finicsum, u32 *cfcsum)
-{
- struct fw_caps_config_cmd caps_cmd;
- int ret;
-
- /*
- * Tell the firmware to process the indicated Configuration File.
- * If there are no errors and the caller has provided return value
- * pointers for the [fini] section version, checksum and computed
- * checksum, pass those back to the caller.
- */
- memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
- caps_cmd.cfvalid_to_len16 =
- htonl(FW_CAPS_CONFIG_CMD_CFVALID |
- FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
- FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
- FW_LEN16(caps_cmd));
- ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
- if (ret < 0)
- return ret;
-
- if (finiver)
- *finiver = ntohl(caps_cmd.finiver);
- if (finicsum)
- *finicsum = ntohl(caps_cmd.finicsum);
- if (cfcsum)
- *cfcsum = ntohl(caps_cmd.cfcsum);
-
- /*
- * And now tell the firmware to use the configuration we just loaded.
- */
- caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE);
- caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
- return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
-}
-
/**
* t4_fixup_host_params - fix up host-dependent parameters
* @adap: the adapter
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 61362450d05b..f412d0fa0850 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -268,7 +268,6 @@ int t4vf_wait_dev_ready(struct adapter *);
int t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
-int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
int t4vf_get_sge_params(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index d958c44341b5..25dfeb8f28ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -363,8 +363,8 @@ int t4vf_fw_reset(struct adapter *adapter)
* Reads the values of firmware or device parameters. Up to 7 parameters
* can be queried at once.
*/
-int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
- const u32 *params, u32 *vals)
+static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, u32 *vals)
{
int i, ret;
struct fw_params_cmd cmd, rpl;
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index ec88de4ac162..2be2a99c5ea3 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -18,7 +18,6 @@
#include <linux/mii.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ff78dfaec508..b740bfce72ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1036,11 +1036,12 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, q_number);
if (netdev->features & NETIF_F_RXHASH) {
- skb->rxhash = rss_hash;
- if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
- skb->l4_rxhash = true;
+ skb_set_hash(skb, rss_hash,
+ (rss_type &
+ (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index 43464f0a4f99..e6a83198c3dd 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -162,7 +162,7 @@ static int enic_are_pp_different(struct enic_port_profile *pp1,
return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
pp2->instance_uuid, PORT_UUID_MAX) |
!!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
- !!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
+ !ether_addr_equal(pp1->mac_addr, pp2->mac_addr);
}
static int enic_pp_preassociate(struct enic *enic, int vf,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 7080ad6c4014..a1a2b4028a5c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -23,7 +23,6 @@
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
@@ -110,8 +109,8 @@ typedef struct board_info {
u8 imr_all;
unsigned int flags;
- unsigned int in_suspend :1;
- unsigned int wake_supported :1;
+ unsigned int in_suspend:1;
+ unsigned int wake_supported:1;
enum dm9000_type type;
@@ -162,7 +161,7 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
* Read a byte from I/O port
*/
static u8
-ior(board_info_t * db, int reg)
+ior(board_info_t *db, int reg)
{
writeb(reg, db->io_addr);
return readb(db->io_data);
@@ -173,7 +172,7 @@ ior(board_info_t * db, int reg)
*/
static void
-iow(board_info_t * db, int reg, int value)
+iow(board_info_t *db, int reg, int value)
{
writeb(reg, db->io_addr);
writeb(value, db->io_data);
@@ -745,9 +744,9 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
.get_link = dm9000_get_link,
.get_wol = dm9000_get_wol,
.set_wol = dm9000_set_wol,
- .get_eeprom_len = dm9000_get_eeprom_len,
- .get_eeprom = dm9000_get_eeprom,
- .set_eeprom = dm9000_set_eeprom,
+ .get_eeprom_len = dm9000_get_eeprom_len,
+ .get_eeprom = dm9000_get_eeprom,
+ .set_eeprom = dm9000_set_eeprom,
};
static void dm9000_show_carrier(board_info_t *db,
@@ -795,7 +794,7 @@ dm9000_poll_work(struct work_struct *w)
}
} else
mii_check_media(&db->mii, netif_msg_link(db), 0);
-
+
if (netif_running(ndev))
dm9000_schedule_poll(db);
}
@@ -1252,12 +1251,11 @@ static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
dev_info(db->dev, "wake by link status change\n");
if (wcr & WCR_SAMPLEST)
dev_info(db->dev, "wake by sample packet\n");
- if (wcr & WCR_MAGICST )
+ if (wcr & WCR_MAGICST)
dev_info(db->dev, "wake by magic packet\n");
if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
dev_err(db->dev, "wake signalled with no reason? "
"NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
-
}
spin_unlock_irqrestore(&db->lock, flags);
@@ -1314,7 +1312,7 @@ dm9000_open(struct net_device *dev)
mii_check_media(&db->mii, netif_msg_link(db), 1);
netif_start_queue(dev);
-
+
dm9000_schedule_poll(db);
return 0;
@@ -1628,7 +1626,7 @@ dm9000_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(ndev->dev_addr)) {
/* try reading from mac */
-
+
mac_src = "chip";
for (i = 0; i < 6; i++)
ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index df5a892fb49c..1812f4916917 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -13,7 +13,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include "tulip.h"
-#include <linux/init.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index 93a4afaa09f1..dcf21a36a9cf 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/mii.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include "tulip.h"
@@ -458,7 +457,7 @@ void tulip_find_mii(struct net_device *dev, int board_idx)
/* Find the connected MII xcvrs.
Doing this in open() would allow detecting external xcvrs later,
but takes much time. */
- for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) {
+ for (phyn = 1; phyn <= 32 && phy_idx < ARRAY_SIZE(tp->phys); phyn++) {
int phy = phyn & 0x1f;
int mii_status = tulip_mdio_read (dev, phy, MII_BMSR);
if ((mii_status & 0x8301) == 0x8001 ||
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index add05f14b38b..1642de78aac8 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,6 +1939,7 @@ static void tulip_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
pci_release_regions (pdev);
+ pci_disable_device(pdev);
/* pci_power_off (pdev, -1); */
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index a5397b130724..aa4ee385091f 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1192,9 +1192,6 @@ static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state)
ULI526X_DBUG(0, "uli526x_suspend", 0);
- if (!netdev_priv(dev))
- return 0;
-
pci_save_state(pdev);
if (!netif_running(dev))
@@ -1228,9 +1225,6 @@ static int uli526x_resume(struct pci_dev *pdev)
ULI526X_DBUG(0, "uli526x_resume", 0);
- if (!netdev_priv(dev))
- return 0;
-
pci_restore_state(pdev);
if (!netif_running(dev))
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index ab7ebac6fbea..6204cdfe43a6 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -28,7 +28,6 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 3699565704c7..7d07a0f5320d 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -25,7 +25,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index f3d60eb13c3a..8a79a32a5674 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 4ccaf9af6fc9..05529e273050 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.9.224.0u"
+#define DRV_VER "10.0.600.0u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -42,7 +42,7 @@
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define OC_NAME_SH OC_NAME "(Skyhawk)"
-#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
+#define DRV_DESC "Emulex OneConnect NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define EMULEX_VENDOR_ID 0x10df
@@ -283,7 +283,6 @@ struct be_rx_compl_info {
u32 rss_hash;
u16 vlan_tag;
u16 pkt_size;
- u16 rxq_idx;
u16 port;
u8 vlanf;
u8 num_rcvd;
@@ -351,11 +350,13 @@ struct be_drv_stats {
u32 roce_drops_crc;
};
+/* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */
+#define BE_RESET_VLAN_TAG_ID 0xFFFF
+
struct be_vf_cfg {
unsigned char mac_addr[ETH_ALEN];
int if_handle;
int pmac_id;
- u16 def_vid;
u16 vlan_tag;
u32 tx_rate;
};
@@ -493,7 +494,7 @@ struct be_adapter {
u16 pvid;
struct phy_info phy;
u8 wol_cap;
- bool wol;
+ bool wol_en;
u32 uc_macs; /* Count of secondary UC MAC programmed */
u16 asic_rev;
u16 qnq_vid;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 94c35c8d799d..48076a6370c3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1101,23 +1101,22 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
- if (lancer_chip(adapter)) {
- req->hdr.version = 1;
- req->cq_id = cpu_to_le16(cq->id);
-
- AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
- AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
- ctxt, cq->id);
- AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
- ctxt, 1);
-
- } else {
+ if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+ } else {
+ req->hdr.version = 1;
+ req->cq_id = cpu_to_le16(cq->id);
+
+ AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
+ ctxt, cq->id);
+ AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
+ ctxt, 1);
}
/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
@@ -1187,7 +1186,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
int status;
status = be_cmd_mccq_ext_create(adapter, mccq, cq);
- if (status && !lancer_chip(adapter)) {
+ if (status && BEx_chip(adapter)) {
dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
"or newer to avoid conflicting priorities between NIC "
"and FCoE traffic");
@@ -2692,6 +2691,13 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_resp_get_fn_privileges *resp =
embedded_payload(wrb);
*privilege = le32_to_cpu(resp->privilege_mask);
+
+ /* In UMC mode FW does not return right privileges.
+ * Override with correct privilege equivalent to PF.
+ */
+ if (BEx_chip(adapter) && be_is_mc(adapter) &&
+ be_physfn(adapter))
+ *privilege = MAX_PRIVILEGES;
}
err:
@@ -2736,7 +2742,8 @@ err:
* If pmac_id is returned, pmac_id_valid is returned as true
*/
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_valid, u32 *pmac_id, u8 domain)
+ bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
+ u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
@@ -2774,7 +2781,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
if (*pmac_id_valid) {
req->mac_id = cpu_to_le32(*pmac_id);
- req->iface_id = cpu_to_le16(adapter->if_handle);
+ req->iface_id = cpu_to_le16(if_handle);
req->perm_override = 0;
} else {
req->perm_override = 1;
@@ -2827,17 +2834,21 @@ out:
return status;
}
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac,
+ u32 if_handle, bool active, u32 domain)
{
- bool active = true;
+ if (!active)
+ be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
+ if_handle, domain);
if (BEx_chip(adapter))
return be_cmd_mac_addr_query(adapter, mac, false,
- adapter->if_handle, curr_pmac_id);
+ if_handle, curr_pmac_id);
else
/* Fetch the MAC address using pmac_id */
return be_cmd_get_mac_from_list(adapter, mac, &active,
- &curr_pmac_id, 0);
+ &curr_pmac_id,
+ if_handle, domain);
}
int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
@@ -2856,7 +2867,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
adapter->if_handle, 0);
} else {
status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
- NULL, 0);
+ NULL, adapter->if_handle, 0);
}
return status;
@@ -2917,7 +2928,8 @@ int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
int status;
status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
- &pmac_id, dom);
+ &pmac_id, if_id, dom);
+
if (!status && active_mac)
be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
@@ -2997,7 +3009,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
ctxt, intf_id);
AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
- if (!BEx_chip(adapter)) {
+ if (!BEx_chip(adapter) && mode) {
AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
ctxt, adapter->hba_port_num);
AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
@@ -3028,14 +3040,16 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_acpi_wol_magic_config_v1 *req;
- int status;
- int payload_len = sizeof(*req);
+ int status = 0;
struct be_dma_mem cmd;
if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
CMD_SUBSYSTEM_ETH))
return -EPERM;
+ if (be_is_wol_excluded(adapter))
+ return status;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -3060,7 +3074,7 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
- payload_len, wrb, &cmd);
+ sizeof(*req), wrb, &cmd);
req->hdr.version = 1;
req->query_options = BE_GET_WOL_CAP;
@@ -3070,13 +3084,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
- /* the command could succeed misleadingly on old f/w
- * which is not aware of the V1 version. fake an error. */
- if (resp->hdr.response_length < payload_len) {
- status = -1;
- goto err;
- }
adapter->wol_cap = resp->wol_settings;
+ if (adapter->wol_cap & BE_WOL_CAP)
+ adapter->wol_en = true;
}
err:
mutex_unlock(&adapter->mbox_lock);
@@ -3085,6 +3095,76 @@ err:
return status;
}
+
+int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
+{
+ struct be_dma_mem extfat_cmd;
+ struct be_fat_conf_params *cfgs;
+ int status;
+ int i, j;
+
+ memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+ extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+ &extfat_cmd.dma);
+ if (!extfat_cmd.va)
+ return -ENOMEM;
+
+ status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+ if (status)
+ goto err;
+
+ cfgs = (struct be_fat_conf_params *)
+ (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
+ for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
+ u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
+ for (j = 0; j < num_modes; j++) {
+ if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
+ cfgs->module[i].trace_lvl[j].dbg_lvl =
+ cpu_to_le32(level);
+ }
+ }
+
+ status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
+err:
+ pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
+ return status;
+}
+
+int be_cmd_get_fw_log_level(struct be_adapter *adapter)
+{
+ struct be_dma_mem extfat_cmd;
+ struct be_fat_conf_params *cfgs;
+ int status, j;
+ int level = 0;
+
+ memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+ extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+ &extfat_cmd.dma);
+
+ if (!extfat_cmd.va) {
+ dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
+ __func__);
+ goto err;
+ }
+
+ status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+ if (!status) {
+ cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
+ sizeof(struct be_cmd_resp_hdr));
+ for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
+ if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
+ level = cfgs->module[0].trace_lvl[j].dbg_lvl;
+ }
+ }
+ pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
+err:
+ return level;
+}
+
int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd)
{
@@ -3609,6 +3689,40 @@ int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
return status;
}
+/* Uses MBOX */
+int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
+{
+ struct be_cmd_req_get_active_profile *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
+ wrb, NULL);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_active_profile *resp =
+ embedded_payload(wrb);
+ *profile_id = le16_to_cpu(resp->active_profile_id);
+ }
+
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0075686276aa..fc4e076dc202 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -216,6 +216,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_FUNC_CONFIG 160
#define OPCODE_COMMON_GET_PROFILE_CONFIG 164
#define OPCODE_COMMON_SET_PROFILE_CONFIG 165
+#define OPCODE_COMMON_GET_ACTIVE_PROFILE 167
#define OPCODE_COMMON_SET_HSW_CONFIG 153
#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
#define OPCODE_COMMON_READ_OBJECT 171
@@ -452,7 +453,7 @@ struct amap_mcc_context_be {
u8 rsvd2[32];
} __packed;
-struct amap_mcc_context_lancer {
+struct amap_mcc_context_v1 {
u8 async_cq_id[16];
u8 ring_size[4];
u8 rsvd0[12];
@@ -476,7 +477,7 @@ struct be_cmd_req_mcc_ext_create {
u16 num_pages;
u16 cq_id;
u32 async_event_bitmap[1];
- u8 context[sizeof(struct amap_mcc_context_be) / 8];
+ u8 context[sizeof(struct amap_mcc_context_v1) / 8];
struct phys_addr pages[8];
} __packed;
@@ -1097,6 +1098,14 @@ struct be_cmd_resp_query_fw_cfg {
u32 function_caps;
};
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter)
+{
+ return adapter->function_mode & FLEX10_MODE ||
+ adapter->function_mode & VNIC_MODE ||
+ adapter->function_mode & UMC_ENABLED;
+}
+
/******************** RSS Config ****************************************/
/* RSS type Input parameters used to compute RX hash
* RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
@@ -1917,6 +1926,17 @@ struct be_cmd_resp_set_profile_config {
struct be_cmd_resp_hdr hdr;
};
+struct be_cmd_req_get_active_profile {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_active_profile {
+ struct be_cmd_resp_hdr hdr;
+ u16 active_profile_id;
+ u16 next_profile_id;
+} __packed;
+
struct be_cmd_enable_disable_vf {
struct be_cmd_req_hdr hdr;
u8 enable;
@@ -2037,8 +2057,10 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
u32 vf_num);
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_active, u32 *pmac_id, u8 domain);
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac);
+ bool *pmac_id_active, u32 *pmac_id,
+ u32 if_handle, u8 domain);
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac,
+ u32 if_handle, bool active, u32 domain);
int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
u32 domain);
@@ -2048,6 +2070,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
u16 intf_id, u8 *mode);
int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level);
+int be_cmd_get_fw_log_level(struct be_adapter *adapter);
int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd);
int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
@@ -2063,6 +2087,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter,
int be_cmd_get_profile_config(struct be_adapter *adapter,
struct be_resources *res, u8 domain);
int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num);
int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 08330034d9ef..05be0070f55f 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -713,12 +713,13 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (be_is_wol_supported(adapter)) {
+ if (adapter->wol_cap & BE_WOL_CAP) {
wol->supported |= WAKE_MAGIC;
- if (adapter->wol)
+ if (adapter->wol_en)
wol->wolopts |= WAKE_MAGIC;
- } else
+ } else {
wol->wolopts = 0;
+ }
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
@@ -730,15 +731,15 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
- if (!be_is_wol_supported(adapter)) {
+ if (!(adapter->wol_cap & BE_WOL_CAP)) {
dev_warn(&adapter->pdev->dev, "WOL not supported\n");
return -EOPNOTSUPP;
}
if (wol->wolopts & WAKE_MAGIC)
- adapter->wol = true;
+ adapter->wol_en = true;
else
- adapter->wol = false;
+ adapter->wol_en = false;
return 0;
}
@@ -904,73 +905,21 @@ static u32 be_get_msg_level(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter)) {
- dev_err(&adapter->pdev->dev, "Operation not supported\n");
- return -EOPNOTSUPP;
- }
-
return adapter->msg_enable;
}
-static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
-{
- struct be_dma_mem extfat_cmd;
- struct be_fat_conf_params *cfgs;
- int status;
- int i, j;
-
- memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
- extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
- &extfat_cmd.dma);
- if (!extfat_cmd.va) {
- dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
- __func__);
- goto err;
- }
- status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
- if (!status) {
- cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
- sizeof(struct be_cmd_resp_hdr));
- for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
- u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
- for (j = 0; j < num_modes; j++) {
- if (cfgs->module[i].trace_lvl[j].mode ==
- MODE_UART)
- cfgs->module[i].trace_lvl[j].dbg_lvl =
- cpu_to_le32(level);
- }
- }
- status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd,
- cfgs);
- if (status)
- dev_err(&adapter->pdev->dev,
- "Message level set failed\n");
- } else {
- dev_err(&adapter->pdev->dev, "Message level get failed\n");
- }
-
- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
- extfat_cmd.dma);
-err:
- return;
-}
-
static void be_set_msg_level(struct net_device *netdev, u32 level)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter)) {
- dev_err(&adapter->pdev->dev, "Operation not supported\n");
- return;
- }
-
if (adapter->msg_enable == level)
return;
if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
- be_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
- FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL);
+ if (BEx_chip(adapter))
+ be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
+ FW_LOG_LEVEL_DEFAULT :
+ FW_LOG_LEVEL_FATAL);
adapter->msg_enable = level;
return;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a37039d353c5..36c80612e21a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -121,12 +121,6 @@ static const char * const ue_status_hi_desc[] = {
"Unknown"
};
-/* Is BE in a multi-channel mode */
-static inline bool be_is_mc(struct be_adapter *adapter) {
- return (adapter->function_mode & FLEX10_MODE ||
- adapter->function_mode & VNIC_MODE ||
- adapter->function_mode & UMC_ENABLED);
-}
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
@@ -258,6 +252,12 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ /* Proceed further only if, User provided MAC is different
+ * from active MAC
+ */
+ if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
+ return 0;
+
/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
* privilege or if PF did not provision the new MAC address.
* On BE3, this cmd will always fail if the VF doesn't have the
@@ -280,14 +280,15 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
/* Decide if the new MAC is successfully activated only after
* querying the FW
*/
- status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
+ status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
+ adapter->if_handle, true, 0);
if (status)
goto err;
/* The MAC change did not happen, either due to lack of privilege
* or PF didn't pre-provision.
*/
- if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
+ if (!ether_addr_equal(addr->sa_data, mac)) {
status = -EPERM;
goto err;
}
@@ -912,24 +913,14 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
}
-static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
- struct sk_buff *skb,
- bool *skip_hw_vlan)
+static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ bool *skip_hw_vlan)
{
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
unsigned int eth_hdr_len;
struct iphdr *ip;
- /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
- * may cause a transmit stall on that port. So the work-around is to
- * pad short packets (<= 32 bytes) to a 36-byte length.
- */
- if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
- if (skb_padto(skb, 36))
- goto tx_drop;
- skb->len = 36;
- }
-
/* For padded packets, BE HW modifies tot_len field in IP header
* incorrecly when VLAN tag is inserted by HW.
* For padded packets, Lancer computes incorrect checksum.
@@ -958,7 +949,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
vlan_tx_tag_present(skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
if (unlikely(!skb))
- goto tx_drop;
+ goto err;
}
/* HW may lockup when VLAN HW tagging is requested on
@@ -980,15 +971,39 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
be_vlan_tag_tx_chk(adapter, skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
if (unlikely(!skb))
- goto tx_drop;
+ goto err;
}
return skb;
tx_drop:
dev_kfree_skb_any(skb);
+err:
return NULL;
}
+static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ bool *skip_hw_vlan)
+{
+ /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
+ * less may cause a transmit stall on that port. So the work-around is
+ * to pad short packets (<= 32 bytes) to a 36-byte length.
+ */
+ if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
+ if (skb_padto(skb, 36))
+ return NULL;
+ skb->len = 36;
+ }
+
+ if (BEx_chip(adapter) || lancer_chip(adapter)) {
+ skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
+ if (!skb)
+ return NULL;
+ }
+
+ return skb;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1096,8 +1111,6 @@ static int be_vid_config(struct be_adapter *adapter)
dev_info(&adapter->pdev->dev,
"Disabling VLAN Promiscuous mode.\n");
adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
- dev_info(&adapter->pdev->dev,
- "Re-Enabling HW VLAN filtering\n");
}
}
}
@@ -1105,12 +1118,12 @@ static int be_vid_config(struct be_adapter *adapter)
return status;
set_vlan_promisc:
- dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
+ if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
+ return 0;
status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
if (!status) {
dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
- dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
adapter->flags |= BE_FLAGS_VLAN_PROMISC;
} else
dev_err(&adapter->pdev->dev,
@@ -1123,19 +1136,18 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
-
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
goto ret;
adapter->vlan_tag[vid] = 1;
- if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
- status = be_vid_config(adapter);
+ adapter->vlans_added++;
- if (!status)
- adapter->vlans_added++;
- else
+ status = be_vid_config(adapter);
+ if (status) {
+ adapter->vlans_added--;
adapter->vlan_tag[vid] = 0;
+ }
ret:
return status;
}
@@ -1150,9 +1162,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
goto ret;
adapter->vlan_tag[vid] = 0;
- if (adapter->vlans_added <= be_max_vlans(adapter))
- status = be_vid_config(adapter);
-
+ status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
else
@@ -1161,6 +1171,14 @@ ret:
return status;
}
+static void be_clear_promisc(struct be_adapter *adapter)
+{
+ adapter->promiscuous = false;
+ adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+
+ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
+}
+
static void be_set_rx_mode(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1174,9 +1192,7 @@ static void be_set_rx_mode(struct net_device *netdev)
/* BE was previously in promiscuous mode; disable it */
if (adapter->promiscuous) {
- adapter->promiscuous = false;
- be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
-
+ be_clear_promisc(adapter);
if (adapter->vlans_added)
be_vid_config(adapter);
}
@@ -1291,24 +1307,20 @@ static int be_set_vf_vlan(struct net_device *netdev,
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
- if (vf_cfg->vlan_tag != vlan) {
- /* If this is new value, program it. Else skip. */
- vf_cfg->vlan_tag = vlan;
+ if (vf_cfg->vlan_tag != vlan)
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
vf_cfg->if_handle, 0);
- }
} else {
/* Reset Transparent Vlan Tagging. */
- vf_cfg->vlan_tag = 0;
- vlan = vf_cfg->def_vid;
- status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
- vf_cfg->if_handle, 0);
+ status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
+ vf + 1, vf_cfg->if_handle, 0);
}
-
- if (status)
+ if (!status)
+ vf_cfg->vlan_tag = vlan;
+ else
dev_info(&adapter->pdev->dev,
- "VLAN %d config on VF %d failed\n", vlan, vf);
+ "VLAN %d config on VF %d failed\n", vlan, vf);
return status;
}
@@ -1442,12 +1454,12 @@ static inline bool csum_passed(struct be_rx_compl_info *rxcp)
(rxcp->ip_csum || rxcp->ipv6);
}
-static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
- u16 frag_idx)
+static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
{
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *rx_page_info;
struct be_queue_info *rxq = &rxo->q;
+ u16 frag_idx = rxq->tail;
rx_page_info = &rxo->page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
@@ -1459,6 +1471,7 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
rx_page_info->last_page_user = false;
}
+ queue_tail_inc(rxq);
atomic_dec(&rxq->used);
return rx_page_info;
}
@@ -1467,15 +1480,13 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
static void be_rx_compl_discard(struct be_rx_obj *rxo,
struct be_rx_compl_info *rxcp)
{
- struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
u16 i, num_rcvd = rxcp->num_rcvd;
for (i = 0; i < num_rcvd; i++) {
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
- index_inc(&rxcp->rxq_idx, rxq->len);
}
}
@@ -1486,13 +1497,12 @@ static void be_rx_compl_discard(struct be_rx_obj *rxo,
static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
struct be_rx_compl_info *rxcp)
{
- struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
u16 i, j;
u16 hdr_len, curr_frag_len, remaining;
u8 *start;
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
start = page_address(page_info->page) + page_info->page_offset;
prefetch(start);
@@ -1526,10 +1536,9 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
}
/* More frags present for this completion */
- index_inc(&rxcp->rxq_idx, rxq->len);
remaining = rxcp->pkt_size - curr_frag_len;
for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
curr_frag_len = min(remaining, rx_frag_size);
/* Coalesce all frags from the same physical page in one slot */
@@ -1550,7 +1559,6 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
skb->data_len += curr_frag_len;
skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
- index_inc(&rxcp->rxq_idx, rxq->len);
page_info->page = NULL;
}
BUG_ON(j > MAX_SKB_FRAGS);
@@ -1581,7 +1589,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (netdev->features & NETIF_F_RXHASH)
- skb->rxhash = rxcp->rss_hash;
+ skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1598,7 +1606,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info;
struct sk_buff *skb = NULL;
- struct be_queue_info *rxq = &rxo->q;
u16 remaining, curr_frag_len;
u16 i, j;
@@ -1610,7 +1617,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
remaining = rxcp->pkt_size;
for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo);
curr_frag_len = min(remaining, rx_frag_size);
@@ -1628,7 +1635,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
- index_inc(&rxcp->rxq_idx, rxq->len);
memset(page_info, 0, sizeof(*page_info));
}
BUG_ON(j > MAX_SKB_FRAGS);
@@ -1639,7 +1645,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (adapter->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = rxcp->rss_hash;
+ skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1663,8 +1669,6 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
rxcp->ipv6 =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
- rxcp->rxq_idx =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
rxcp->num_rcvd =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
rxcp->pkt_type =
@@ -1695,8 +1699,6 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
rxcp->ipv6 =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
- rxcp->rxq_idx =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
rxcp->num_rcvd =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
rxcp->pkt_type =
@@ -1921,7 +1923,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
struct be_rx_compl_info *rxcp;
struct be_adapter *adapter = rxo->adapter;
int flush_wait = 0;
- u16 tail;
/* Consume pending rx completions.
* Wait for the flush completion (identified by zero num_rcvd)
@@ -1954,9 +1955,8 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
be_cq_notify(adapter, rx_cq->id, false, 0);
/* Then free posted rx buffers that were not used */
- tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
- for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
- page_info = get_rx_page_info(rxo, tail);
+ while (atomic_read(&rxq->used) > 0) {
+ page_info = get_rx_page_info(rxo);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
}
@@ -2891,14 +2891,11 @@ static int be_vfs_mac_query(struct be_adapter *adapter)
int status, vf;
u8 mac[ETH_ALEN];
struct be_vf_cfg *vf_cfg;
- bool active = false;
for_all_vfs(adapter, vf_cfg, vf) {
- be_cmd_get_mac_from_list(adapter, mac, &active,
- &vf_cfg->pmac_id, 0);
-
- status = be_cmd_mac_addr_query(adapter, mac, false,
- vf_cfg->if_handle, 0);
+ status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
+ mac, vf_cfg->if_handle,
+ false, vf+1);
if (status)
return status;
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -3032,11 +3029,11 @@ static int be_vf_setup_init(struct be_adapter *adapter)
static int be_vf_setup(struct be_adapter *adapter)
{
+ struct device *dev = &adapter->pdev->dev;
struct be_vf_cfg *vf_cfg;
- u16 def_vlan, lnk_speed;
int status, old_vfs, vf;
- struct device *dev = &adapter->pdev->dev;
u32 privileges;
+ u16 lnk_speed;
old_vfs = pci_num_vf(adapter->pdev);
if (old_vfs) {
@@ -3103,12 +3100,6 @@ static int be_vf_setup(struct be_adapter *adapter)
if (!status)
vf_cfg->tx_rate = lnk_speed;
- status = be_cmd_get_hsw_config(adapter, &def_vlan,
- vf + 1, vf_cfg->if_handle, NULL);
- if (status)
- goto err;
- vf_cfg->def_vid = def_vlan;
-
if (!old_vfs)
be_cmd_enable_vf(adapter, vf + 1);
}
@@ -3240,6 +3231,7 @@ static int be_get_resources(struct be_adapter *adapter)
/* Routine to query per function resource limits */
static int be_get_config(struct be_adapter *adapter)
{
+ u16 profile_id;
int status;
status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
@@ -3249,6 +3241,13 @@ static int be_get_config(struct be_adapter *adapter)
if (status)
return status;
+ if (be_physfn(adapter)) {
+ status = be_cmd_get_active_profile(adapter, &profile_id);
+ if (!status)
+ dev_info(&adapter->pdev->dev,
+ "Using profile 0x%x\n", profile_id);
+ }
+
status = be_get_resources(adapter);
if (status)
return status;
@@ -3403,11 +3402,6 @@ static int be_setup(struct be_adapter *adapter)
goto err;
be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
- /* In UMC mode FW does not return right privileges.
- * Override with correct privilege equivalent to PF.
- */
- if (be_is_mc(adapter))
- adapter->cmd_privileges = MAX_PRIVILEGES;
status = be_mac_setup(adapter);
if (status)
@@ -3426,6 +3420,8 @@ static int be_setup(struct be_adapter *adapter)
be_set_rx_mode(adapter->netdev);
+ be_cmd_get_acpi_wol_cap(adapter);
+
be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
@@ -4295,74 +4291,22 @@ static void be_remove(struct pci_dev *pdev)
free_netdev(adapter->netdev);
}
-bool be_is_wol_supported(struct be_adapter *adapter)
-{
- return ((adapter->wol_cap & BE_WOL_CAP) &&
- !be_is_wol_excluded(adapter)) ? true : false;
-}
-
-u32 be_get_fw_log_level(struct be_adapter *adapter)
-{
- struct be_dma_mem extfat_cmd;
- struct be_fat_conf_params *cfgs;
- int status;
- u32 level = 0;
- int j;
-
- if (lancer_chip(adapter))
- return 0;
-
- memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
- extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
- &extfat_cmd.dma);
-
- if (!extfat_cmd.va) {
- dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
- __func__);
- goto err;
- }
-
- status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
- if (!status) {
- cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
- sizeof(struct be_cmd_resp_hdr));
- for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
- if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
- level = cfgs->module[0].trace_lvl[j].dbg_lvl;
- }
- }
- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
- extfat_cmd.dma);
-err:
- return level;
-}
-
static int be_get_initial_config(struct be_adapter *adapter)
{
- int status;
- u32 level;
+ int status, level;
status = be_cmd_get_cntl_attributes(adapter);
if (status)
return status;
- status = be_cmd_get_acpi_wol_cap(adapter);
- if (status) {
- /* in case of a failure to get wol capabillities
- * check the exclusion list to determine WOL capability */
- if (!be_is_wol_excluded(adapter))
- adapter->wol_cap |= BE_WOL_CAP;
- }
-
- if (be_is_wol_supported(adapter))
- adapter->wol = true;
-
/* Must be a power of 2 or else MODULO will BUG_ON */
adapter->be_get_temp_freq = 64;
- level = be_get_fw_log_level(adapter);
- adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+ if (BEx_chip(adapter)) {
+ level = be_cmd_get_fw_log_level(adapter);
+ adapter->msg_enable =
+ level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+ }
adapter->cfg_num_qs = netif_get_num_default_rss_queues();
return 0;
@@ -4625,7 +4569,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- if (adapter->wol)
+ if (adapter->wol_en)
be_setup_wol(adapter, true);
be_intr_set(adapter, false);
@@ -4681,7 +4625,7 @@ static int be_resume(struct pci_dev *pdev)
msecs_to_jiffies(1000));
netif_device_attach(netdev);
- if (adapter->wol)
+ if (adapter->wol_en)
be_setup_wol(adapter, false);
return 0;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 4de8cfd149cf..55e0fa03dc90 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
+#include <linux/clk.h>
#include <linux/crc32.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -51,6 +52,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
#define ETH_HASH0 0x48
#define ETH_HASH1 0x4c
#define ETH_TXCTRL 0x50
+#define ETH_END 0x54
/* mode register */
#define MODER_RXEN (1 << 0) /* receive enable */
@@ -179,6 +181,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @membase: pointer to buffer memory region
* @dma_alloc: dma allocated buffer size
* @io_region_size: I/O memory region size
+ * @num_bd: number of buffer descriptors
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
* @dty_tx: last buffer actually sent
@@ -199,6 +202,7 @@ struct ethoc {
int dma_alloc;
resource_size_t io_region_size;
+ unsigned int num_bd;
unsigned int num_tx;
unsigned int cur_tx;
unsigned int dty_tx;
@@ -216,6 +220,7 @@ struct ethoc {
struct phy_device *phy;
struct mii_bus *mdio;
+ struct clk *clk;
s8 phy_id;
};
@@ -688,6 +693,11 @@ static int ethoc_mdio_probe(struct net_device *dev)
}
priv->phy = phy;
+ phy->advertising &= ~(ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half);
+ phy->supported &= ~(SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half);
+
return 0;
}
@@ -890,6 +900,102 @@ out:
return NETDEV_TX_OK;
}
+static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phy;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phy;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static int ethoc_get_regs_len(struct net_device *netdev)
+{
+ return ETH_END;
+}
+
+static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u32 *regs_buff = p;
+ unsigned i;
+
+ regs->version = 0;
+ for (i = 0; i < ETH_END / sizeof(u32); ++i)
+ regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
+}
+
+static void ethoc_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ ring->rx_max_pending = priv->num_bd - 1;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = priv->num_bd - 1;
+
+ ring->rx_pending = priv->num_rx;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+ ring->tx_pending = priv->num_tx;
+}
+
+static int ethoc_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
+ ring->tx_pending + ring->rx_pending > priv->num_bd)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ netif_tx_disable(dev);
+ ethoc_disable_rx_and_tx(priv);
+ ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ synchronize_irq(dev->irq);
+ }
+
+ priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
+ priv->num_rx = ring->rx_pending;
+ ethoc_init_ring(priv, dev->mem_start);
+
+ if (netif_running(dev)) {
+ ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ ethoc_enable_rx_and_tx(priv);
+ netif_wake_queue(dev);
+ }
+ return 0;
+}
+
+const struct ethtool_ops ethoc_ethtool_ops = {
+ .get_settings = ethoc_get_settings,
+ .set_settings = ethoc_set_settings,
+ .get_regs_len = ethoc_get_regs_len,
+ .get_regs = ethoc_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ethoc_get_ringparam,
+ .set_ringparam = ethoc_set_ringparam,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
@@ -917,6 +1023,8 @@ static int ethoc_probe(struct platform_device *pdev)
int num_bd;
int ret = 0;
bool random_mac = false;
+ struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
/* allocate networking device */
netdev = alloc_etherdev(sizeof(struct ethoc));
@@ -1016,6 +1124,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = -ENODEV;
goto error;
}
+ priv->num_bd = num_bd;
/* num_tx must be a power of two */
priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
priv->num_rx = num_bd - priv->num_tx;
@@ -1030,8 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
/* Allow the platform setup code to pass in a MAC address. */
- if (dev_get_platdata(&pdev->dev)) {
- struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
@@ -1069,6 +1177,27 @@ static int ethoc_probe(struct platform_device *pdev)
if (random_mac)
netdev->addr_assign_type = NET_ADDR_RANDOM;
+ /* Allow the platform setup code to adjust MII management bus clock. */
+ if (!eth_clkfreq) {
+ struct clk *clk = devm_clk_get(&pdev->dev, NULL);
+
+ if (!IS_ERR(clk)) {
+ priv->clk = clk;
+ clk_prepare_enable(clk);
+ eth_clkfreq = clk_get_rate(clk);
+ }
+ }
+ if (eth_clkfreq) {
+ u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
+
+ if (!clkdiv)
+ clkdiv = 2;
+ dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
+ ethoc_write(priv, MIIMODER,
+ (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
+ clkdiv);
+ }
+
/* register MII bus */
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
@@ -1111,6 +1240,7 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->netdev_ops = &ethoc_netdev_ops;
netdev->watchdog_timeo = ETHOC_TIMEOUT;
netdev->features |= 0;
+ netdev->ethtool_ops = &ethoc_ethtool_ops;
/* setup NAPI */
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
@@ -1133,6 +1263,8 @@ free_mdio:
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
free:
+ if (priv->clk)
+ clk_disable_unprepare(priv->clk);
free_netdev(netdev);
out:
return ret;
@@ -1157,6 +1289,8 @@ static int ethoc_remove(struct platform_device *pdev)
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
+ if (priv->clk)
+ clk_disable_unprepare(priv->clk);
unregister_netdev(netdev);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 212f44b3a773..c11ecbc98149 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -24,7 +24,6 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -767,7 +766,7 @@ static void ftgmac100_free_buffers(struct ftgmac100 *priv)
continue;
dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
@@ -1149,7 +1148,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
netdev_dbg(netdev, "tx packet too big\n");
netdev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1160,7 +1159,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
netdev_err(netdev, "map socket buffer failed\n");
netdev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 0120217a16dd..3b8d6d19ff05 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -339,7 +339,8 @@ struct fec_enet_private {
void fec_ptp_init(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
-int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 50bb71c663e2..03a351300013 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -29,7 +29,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -390,12 +389,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
netdev_err(ndev, "Tx DMA memory map failed\n");
return NETDEV_TX_OK;
}
- /* Send it on its way. Tell FEC it's ready, interrupt when done,
- * it's the last BD of the frame, and to put the CRC on the end.
- */
- status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
- | BD_ENET_TX_LAST | BD_ENET_TX_TC);
- bdp->cbd_sc = status;
if (fep->bufdesc_ex) {
@@ -417,6 +410,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
}
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+ | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ bdp->cbd_sc = status;
+
bdp_pre = fec_enet_get_prevdesc(bdp, fep);
if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
!(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
@@ -528,13 +528,6 @@ fec_restart(struct net_device *ndev, int duplex)
/* Clear any outstanding interrupt. */
writel(0xffc00000, fep->hwp + FEC_IEVENT);
- /* Setup multicast filter. */
- set_multicast_list(ndev);
-#ifndef CONFIG_M5272
- writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
-#endif
-
/* Set maximum receive buffer size. */
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
@@ -655,6 +648,13 @@ fec_restart(struct net_device *ndev, int duplex)
writel(rcntl, fep->hwp + FEC_R_CNTRL);
+ /* Setup multicast filter. */
+ set_multicast_list(ndev);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
+
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= (1 << 8);
@@ -1679,8 +1679,12 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (!phydev)
return -ENODEV;
- if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
- return fec_ptp_ioctl(ndev, rq, cmd);
+ if (fep->bufdesc_ex) {
+ if (cmd == SIOCSHWTSTAMP)
+ return fec_ptp_set(ndev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return fec_ptp_get(ndev, rq);
+ }
return phy_mii_ioctl(phydev, rq, cmd);
}
@@ -1775,8 +1779,6 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
- napi_enable(&fep->napi);
-
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -1791,6 +1793,8 @@ fec_enet_open(struct net_device *ndev)
fec_enet_free_buffers(ndev);
return ret;
}
+
+ napi_enable(&fep->napi);
phy_start(fep->phy_dev);
netif_start_queue(ndev);
fep->opened = 1;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 5007e4f9fff9..89ccb5b08708 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -274,7 +273,7 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
* @ifreq: ioctl data
* @cmd: particular ioctl requested
*/
-int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -321,6 +320,20 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-EFAULT : 0;
}
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (fep->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
/**
* fec_time_keep - call timecounter_read every second to avoid timer overrun
* because ENET just support 32bit counter, will timeout in 4s
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56f2f608a9f4..62f042d4aaa9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -24,7 +24,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index f8b92864fc52..f5383abbf399 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index a9a00f39521a..fc5413488496 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index d37cd4ebac65..b4bf02f57d43 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 67caaacd19ec..3d3fde66c2cc 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index ac5d447ff8c4..7e69c983d12a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index c4f65067cf7c..583e71ab7f51 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -20,7 +20,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mii.h>
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index b14d7904a075..ad5a5aadc7e1 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -70,7 +70,6 @@
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -795,8 +794,7 @@ err_grp_init:
return err;
}
-static int gfar_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
@@ -845,7 +843,20 @@ static int gfar_hwtstamp_ioctl(struct net_device *netdev,
-EFAULT : 0;
}
-/* Ioctl MII Interface */
+static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ config.flags = 0;
+ config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (priv->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -854,7 +865,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EINVAL;
if (cmd == SIOCSHWTSTAMP)
- return gfar_hwtstamp_ioctl(dev, rq, cmd);
+ return gfar_hwtstamp_set(dev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return gfar_hwtstamp_get(dev, rq);
if (!priv->phydev)
return -ENODEV;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 114c58f9d8d2..52bb2b0195cc 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -29,7 +29,6 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index d3d7ede27ef1..63d234419cc1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -22,7 +22,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -889,11 +888,9 @@ static int gfar_set_hash_opts(struct gfar_private *priv,
static int gfar_check_filer_hardware(struct gfar_private *priv)
{
- struct gfar __iomem *regs = NULL;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 i;
- regs = priv->gfargrp[0].regs;
-
/* Check if we are in FIFO mode */
i = gfar_read(&regs->ecntrl);
i &= ECNTRL_FIFM;
@@ -927,7 +924,7 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
/* Sets the properties for arbitrary filer rule
* to the first 4 Layer 4 Bytes
*/
- regs->rbifx = 0xC0C1C2C3;
+ gfar_write(&regs->rbifx, 0xC0C1C2C3);
return 0;
}
@@ -1055,10 +1052,18 @@ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
struct ethtool_tcpip4_spec *mask,
struct filer_table *tab)
{
- gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
- gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
- gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
- gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4src),
+ be32_to_cpu(mask->ip4src),
+ RQFCR_PID_SIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4dst),
+ be32_to_cpu(mask->ip4dst),
+ RQFCR_PID_DIA, tab);
+ gfar_set_attribute(be16_to_cpu(value->pdst),
+ be16_to_cpu(mask->pdst),
+ RQFCR_PID_DPT, tab);
+ gfar_set_attribute(be16_to_cpu(value->psrc),
+ be16_to_cpu(mask->psrc),
+ RQFCR_PID_SPT, tab);
gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
}
@@ -1067,12 +1072,17 @@ static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
struct ethtool_usrip4_spec *mask,
struct filer_table *tab)
{
- gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
- gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4src),
+ be32_to_cpu(mask->ip4src),
+ RQFCR_PID_SIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4dst),
+ be32_to_cpu(mask->ip4dst),
+ RQFCR_PID_DIA, tab);
gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
- gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
- tab);
+ gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
+ be32_to_cpu(mask->l4_4_bytes),
+ RQFCR_PID_ARB, tab);
}
@@ -1139,7 +1149,41 @@ static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
}
}
- gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
+ gfar_set_attribute(be16_to_cpu(value->h_proto),
+ be16_to_cpu(mask->h_proto),
+ RQFCR_PID_ETY, tab);
+}
+
+static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
+{
+ return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+}
+
+static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
+{
+ return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
}
/* Convert a rule to binary filter format of gianfar */
@@ -1153,22 +1197,21 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
u32 old_index = tab->index;
/* Check if vlan is wanted */
- if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
+ if ((rule->flow_type & FLOW_EXT) &&
+ (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
if (!rule->m_ext.vlan_tci)
- rule->m_ext.vlan_tci = 0xFFFF;
+ rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
vlan = RQFPR_VLN;
vlan_mask = RQFPR_VLN;
/* Separate the fields */
- id = rule->h_ext.vlan_tci & VLAN_VID_MASK;
- id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
- cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
- cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
- prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
- prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
+ id = vlan_tci_vid(rule);
+ id_mask = vlan_tci_vidm(rule);
+ cfi = vlan_tci_cfi(rule);
+ cfi_mask = vlan_tci_cfim(rule);
+ prio = vlan_tci_prio(rule);
+ prio_mask = vlan_tci_priom(rule);
if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
vlan |= RQFPR_CFI;
@@ -1666,10 +1709,10 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
for (i = 0; i < sizeof(flow->m_u); i++)
flow->m_u.hdata[i] ^= 0xFF;
- flow->m_ext.vlan_etype ^= 0xFFFF;
- flow->m_ext.vlan_tci ^= 0xFFFF;
- flow->m_ext.data[0] ^= ~0;
- flow->m_ext.data[1] ^= ~0;
+ flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
static int gfar_add_cls(struct gfar_private *priv,
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index e006a09ba899..abc28da27042 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -22,7 +22,6 @@
#include <linux/device.h>
#include <linux/hrtimer.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -134,7 +133,7 @@ struct gianfar_ptp_registers {
#define REG_SIZE sizeof(struct gianfar_ptp_registers)
struct etsects {
- struct gianfar_ptp_registers *regs;
+ struct gianfar_ptp_registers __iomem *regs;
spinlock_t lock; /* protects regs */
struct ptp_clock *clock;
struct ptp_clock_info caps;
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
index acb55af7e3f3..e02dd1378751 100644
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
@@ -24,7 +24,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 5548b6d00c31..72291a8904a9 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -435,11 +435,6 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
QE_CR_PROTOCOL_ETHERNET, 0);
}
-static inline int compare_addr(u8 **addr1, u8 **addr2)
-{
- return memcmp(addr1, addr2, ETH_ALEN);
-}
-
#ifdef DEBUG
static void get_statistics(struct ucc_geth_private *ugeth,
struct ucc_geth_tx_firmware_statistics *
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index e79aaf9ae52a..413329eff2ff 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -16,7 +16,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/stddef.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index ef46b58cb4e9..7becab1aa3e4 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -35,7 +35,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index f42f1b707733..d787fdd5db7b 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -79,7 +79,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 861fa15e1e81..17fca323c143 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -78,7 +78,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 4ceae9a30274..372fa8d1fda1 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -13,7 +13,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 70074792bdef..67f342a9f65e 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -26,7 +26,6 @@
#define __IBM_NEWEMAC_CORE_H
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 952d795230a4..1fc8334fc181 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) IBM Corporation, 2003, 2010
*
@@ -523,10 +522,21 @@ retry:
return rc;
}
+static u64 ibmveth_encode_mac_addr(u8 *mac)
+{
+ int i;
+ u64 encoded = 0;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ encoded = (encoded << 8) | mac[i];
+
+ return encoded;
+}
+
static int ibmveth_open(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
- u64 mac_address = 0;
+ u64 mac_address;
int rxq_entries = 1;
unsigned long lpar_rc;
int rc;
@@ -580,8 +590,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
- mac_address = mac_address >> 16;
+ mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
adapter->rx_queue.queue_len;
@@ -1184,8 +1193,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
/* add the addresses to the filter table */
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
- unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
+ u64 mcast_addr;
+ mcast_addr = ibmveth_encode_mac_addr(ha->addr);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1276,18 +1285,21 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
{
struct net_device *netdev = dev_get_drvdata(&vdev->dev);
struct ibmveth_adapter *adapter;
+ struct iommu_table *tbl;
unsigned long ret;
int i;
int rxqentries = 1;
+ tbl = get_iommu_table_base(&vdev->dev);
+
/* netdev inits at probe time along with the structures we need below*/
if (netdev == NULL)
- return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
+ return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
adapter = netdev_priv(netdev);
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
- ret += IOMMU_PAGE_ALIGN(netdev->mtu);
+ ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
@@ -1295,11 +1307,12 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret +=
adapter->rx_buff_pool[i].size *
IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
- buff_size);
+ buff_size, tbl);
rxqentries += adapter->rx_buff_pool[i].size;
}
/* add the size of the receive queue entries */
- ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
+ ret += IOMMU_PAGE_ALIGN(
+ rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
return ret;
}
@@ -1369,9 +1382,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
- adapter->mac_addr = 0;
- memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
-
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
@@ -1380,7 +1390,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features |= netdev->hw_features;
- memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
+ memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 84066bafe057..1f37499d4398 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) IBM Corporation, 2003, 2010
*
@@ -139,7 +138,6 @@ struct ibmveth_adapter {
struct napi_struct napi;
struct net_device_stats stats;
unsigned int mcastFilterSize;
- unsigned long mac_addr;
void * buffer_list_addr;
void * filter_list_addr;
dma_addr_t buffer_list_dma;
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
index abb300a31912..a21e4f5702b5 100644
--- a/drivers/net/ethernet/icplus/ipg.h
+++ b/drivers/net/ethernet/icplus/ipg.h
@@ -18,7 +18,6 @@
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <asm/bitops.h>
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 149ac85b5f9e..bb9f0ba9d164 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -220,12 +220,12 @@ config IXGBE_DCB
If unsure, say N.
config IXGBEVF
- tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support"
depends on PCI_MSI
---help---
- This driver supports Intel(R) 82599 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
+ This driver supports Intel(R) PCI Express virtual functions for the
+ Intel(R) ixgbe driver. For more information on how to identify your
+ adapter, go to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/sb/CS-008441.htm>
@@ -243,6 +243,7 @@ config IXGBEVF
config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
+ select PTP_1588_CLOCK
depends on PCI
---help---
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -259,4 +260,44 @@ config I40E
To compile this driver as a module, choose M here. The module
will be called i40e.
+config I40E_VXLAN
+ bool "Virtual eXtensible Local Area Network Support"
+ default n
+ depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
+ ---help---
+ This allows one to create VXLAN virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. VXLAN is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ Say Y here if you want to use Virtual eXtensible Local Area Network
+ (VXLAN) in the driver.
+
+config I40E_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on I40E && DCB
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
+
+ If unsure, say N.
+
+config I40EVF
+ tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) XL710 and X710 virtual functions.
+ For more information on how to identify your adapter, go to the
+ Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called i40evf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 5bae933efc7c..cdbbca8a3755 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
+obj-$(CONFIG_I40EVF) += i40evf/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index cbaba4442d4b..bf7a01ef9a57 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3034,7 +3034,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = false;
}
- pci_disable_device(pdev);
+ pci_clear_master(pdev);
}
static int __e100_power_off(struct pci_dev *pdev, bool wake)
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index f9313b36c887..10a0f221b183 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -36,7 +36,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6d14eea17918..6d91933c4cdd 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5790,7 +5790,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct hwtstamp_config config;
@@ -5825,6 +5825,14 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
+static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
+}
+
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -5833,7 +5841,9 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCSMIIREG:
return e1000_mii_ioctl(netdev, ifr, cmd);
case SIOCSHWTSTAMP:
- return e1000e_hwtstamp_ioctl(netdev, ifr);
+ return e1000e_hwtstamp_set(netdev, ifr);
+ case SIOCGHWTSTAMP:
+ return e1000e_hwtstamp_get(netdev, ifr);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 479b2c4e552d..d9eb80acac4f 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 Intel Corporation.
+# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
@@ -41,4 +40,7 @@ i40e-objs := i40e_main.o \
i40e_debugfs.o \
i40e_diag.o \
i40e_txrx.o \
+ i40e_ptp.o \
i40e_virtchnl_pf.o
+
+i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1ca9834cdfda..72dae4d97b43 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -29,7 +28,7 @@
#define _I40E_H_
#include <net/tcp.h>
-#include <linux/init.h>
+#include <net/udp.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -50,11 +49,15 @@
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
+#include "i40e_dcb.h"
/* Useful i40e defaults */
#define I40E_BASE_PF_SEID 16
@@ -63,7 +66,7 @@
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
-#define I40E_MAX_REGISTER 0x0038FFFF
+#define I40E_MAX_REGISTER 0x800000
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
#define I40E_MIN_NUM_DESCRIPTORS 64
@@ -72,6 +75,7 @@
#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
+#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */
#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
#define I40E_MAX_AQ_BUF_SIZE 4096
@@ -81,11 +85,13 @@
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_NVM_VERSION_LO_SHIFT 0
-#define I40E_NVM_VERSION_LO_MASK (0xf << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_MID_SHIFT 4
-#define I40E_NVM_VERSION_MID_MASK (0xff << I40E_NVM_VERSION_MID_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT 12
-#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 8
+#define I40E_NVM_VERSION_HI_MASK (0xff << I40E_NVM_VERSION_HI_SHIFT)
+
+/* The values in here are decimal coded as hex as is the case in the NVM map*/
+#define I40E_CURRENT_NVM_VERSION_HI 0x2
+#define I40E_CURRENT_NVM_VERSION_LO 0x30
/* magic for getting defines into strings */
#define STRINGIFY(foo) #foo
@@ -127,7 +133,9 @@ enum i40e_state_t {
__I40E_PF_RESET_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
+ __I40E_EMP_RESET_REQUESTED,
__I40E_FILTER_OVERFLOW_PROMISC,
+ __I40E_SUSPENDED,
};
enum i40e_interrupt_policy {
@@ -157,6 +165,8 @@ struct i40e_fdir_data {
u8 *raw_packet;
};
+#define I40E_ETH_P_LLDP 0x88cc
+
#define I40E_DCB_PRIO_TYPE_STRICT 0
#define I40E_DCB_PRIO_TYPE_ETS 1
#define I40E_DCB_STRICT_PRIO_CREDITS 127
@@ -191,14 +201,20 @@ struct i40e_pf {
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num vfs requested for this vf */
u16 num_vf_qps; /* num queue pairs per vf */
- u16 num_tc_qps; /* num queue pairs per TC */
u16 num_lan_qps; /* num lan queues this pf has set up */
u16 num_lan_msix; /* num queue vectors for the base pf vsi */
+ int queues_left; /* queues left unclaimed */
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
u8 atr_sample_rate;
+ bool wol_en;
+#ifdef CONFIG_I40E_VXLAN
+ __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+ u16 pending_vxlan_bitmap;
+
+#endif
enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
@@ -216,24 +232,24 @@ struct i40e_pf {
#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
-#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7)
-#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9)
-#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10)
-#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14)
-#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15)
-#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16)
-#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18)
-#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19)
-#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20)
-#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21)
-#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22)
-#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23)
-#define I40E_FLAG_MFP_ENABLED (u64)(1 << 27)
-
- u16 num_tx_queues;
- u16 num_rx_queues;
+#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
+#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
+#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
+#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
+#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
+#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
+#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
+#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
+#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21)
+#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22)
+#define I40E_FLAG_PTP (u64)(1 << 25)
+#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
+#ifdef CONFIG_I40E_VXLAN
+#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
+#endif
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
@@ -247,6 +263,7 @@ struct i40e_pf {
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u16 sw_int_count; /* SW interrupt count */
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
@@ -270,6 +287,8 @@ struct i40e_pf {
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
+ u16 instance; /* A unique number per i40e_pf instance in the system */
+
/* sr-iov config info */
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
@@ -287,6 +306,20 @@ struct i40e_pf {
u32 fcoe_hmc_filt_num;
u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct sk_buff *ptp_tx_skb;
+ struct work_struct ptp_tx_work;
+ struct hwtstamp_config tstamp_config;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
+ spinlock_t tmreg_lock; /* Used to protect the device time registers. */
+ u64 ptp_base_adj;
+ u32 tx_hwtstamp_timeouts;
+ u32 rx_hwtstamp_cleared;
+ bool ptp_tx;
+ bool ptp_rx;
};
struct i40e_mac_filter {
@@ -441,13 +474,11 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
static char buf[32];
snprintf(buf, sizeof(buf),
- "f%d.%d a%d.%d n%02d.%02d.%02d e%08x",
+ "f%d.%d a%d.%d n%02x.%02x e%08x",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
>> I40E_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_MID_MASK)
- >> I40E_NVM_VERSION_MID_SHIFT,
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
>> I40E_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack);
@@ -495,6 +526,7 @@ int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -502,13 +534,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-/* needed by i40e_main.c */
-void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
-void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
-void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
struct i40e_pf *pf, bool add);
@@ -524,10 +549,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
struct i40e_vsi *start_vsi);
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
void i40e_veb_release(struct i40e_veb *veb);
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
@@ -544,6 +572,7 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
@@ -555,5 +584,21 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
-
+#ifdef CONFIG_I40E_DCB
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg);
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
+void i40e_dcbnl_setup(struct i40e_vsi *vsi);
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg);
+#endif /* CONFIG_I40E_DCB */
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi);
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
+void i40e_ptp_set_increment(struct i40e_pf *pf);
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_init(struct i40e_pf *pf);
+void i40e_ptp_stop(struct i40e_pf *pf);
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index cfef7fc32cdd..a50e6b3479ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -31,6 +30,8 @@
#include "i40e_adminq.h"
#include "i40e_prototype.h"
+static void i40e_resume_aq(struct i40e_hw *hw);
+
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@@ -43,13 +44,17 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
if (hw->mac.type == I40E_MAC_VF) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
hw->aq.arq.tail = I40E_VF_ARQT1;
hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
} else {
hw->aq.asq.tail = I40E_PF_ATQT;
hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
hw->aq.arq.tail = I40E_PF_ARQT;
hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
}
}
@@ -60,9 +65,8 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
- struct i40e_virt_mem mem;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)),
@@ -70,21 +74,14 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
if (ret_code)
return ret_code;
- hw->aq.asq.desc = hw->aq.asq_mem.va;
- hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
-
- ret_code = i40e_allocate_virt_mem(hw, &mem,
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
sizeof(struct i40e_asq_cmd_details)));
if (ret_code) {
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
- hw->aq.asq_mem.va = NULL;
- hw->aq.asq_mem.pa = 0;
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
}
- hw->aq.asq.details = mem.va;
-
return ret_code;
}
@@ -96,16 +93,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
- if (ret_code)
- return ret_code;
-
- hw->aq.arq.desc = hw->aq.arq_mem.va;
- hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
return ret_code;
}
@@ -119,14 +111,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
**/
static void i40e_free_adminq_asq(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
-
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
- hw->aq.asq_mem.va = NULL;
- hw->aq.asq_mem.pa = 0;
- mem.va = hw->aq.asq.details;
- i40e_free_virt_mem(hw, &mem);
- hw->aq.asq.details = NULL;
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
@@ -138,20 +123,17 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)
**/
static void i40e_free_adminq_arq(struct i40e_hw *hw)
{
- i40e_free_dma_mem(hw, &hw->aq.arq_mem);
- hw->aq.arq_mem.va = NULL;
- hw->aq.arq_mem.pa = 0;
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_aq_desc *desc;
- struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;
@@ -160,11 +142,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
*/
/* buffer_info structures do not need alignment */
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
- sizeof(struct i40e_dma_mem)));
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
- hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) {
@@ -206,29 +188,27 @@ unwind_alloc_arq_bufs:
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- mem.va = hw->aq.arq.r.arq_bi;
- i40e_free_virt_mem(hw, &mem);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
return ret_code;
}
/**
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
- struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;
/* No mapped memory needed yet, just the buffer info structures */
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
- sizeof(struct i40e_dma_mem)));
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
- hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) {
@@ -248,35 +228,36 @@ unwind_alloc_asq_bufs:
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- mem.va = hw->aq.asq.r.asq_bi;
- i40e_free_virt_mem(hw, &mem);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
return ret_code;
}
/**
* i40e_free_arq_bufs - Free receive queue buffer info elements
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static void i40e_free_arq_bufs(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
int i;
+ /* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- mem.va = hw->aq.arq.r.arq_bi;
- i40e_free_virt_mem(hw, &mem);
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
}
/**
* i40e_free_asq_bufs - Free send queue buffer info elements
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static void i40e_free_asq_bufs(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
int i;
/* only unmap if the address is non-NULL */
@@ -284,14 +265,19 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
if (hw->aq.asq.r.asq_bi[i].pa)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- /* now free the buffer info list */
- mem.va = hw->aq.asq.r.asq_bi;
- i40e_free_virt_mem(hw, &mem);
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
}
/**
* i40e_config_asq_regs - configure ASQ registers
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Configure base address and length registers for the transmit queue
**/
@@ -299,14 +285,18 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
- wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
- wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_VF_ATQBAH1,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQBAL1,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
} else {
/* configure the transmit queue */
- wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
- wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_PF_ATQBAH,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
}
@@ -314,7 +304,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
/**
* i40e_config_arq_regs - ARQ register configuration
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Configure base address and length registers for the receive (event queue)
**/
@@ -322,14 +312,18 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
- wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
- wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_VF_ARQBAH1,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQBAL1,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
} else {
/* configure the receive queue */
- wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
- wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_PF_ARQBAH,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
}
@@ -340,7 +334,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
/**
* i40e_init_asq - main initialization routine for ASQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* This is the main initialization routine for the Admin Send Queue
* Prior to calling this function, drivers *MUST* set the following fields
@@ -397,7 +391,7 @@ init_adminq_exit:
/**
* i40e_init_arq - initialize ARQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main initialization routine for the Admin Receive (Event) Queue.
* Prior to calling this function, drivers *MUST* set the following fields
@@ -454,7 +448,7 @@ init_adminq_exit:
/**
* i40e_shutdown_asq - shutdown the ASQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Send Queue
**/
@@ -466,10 +460,9 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
- if (hw->mac.type == I40E_MAC_VF)
- wr32(hw, I40E_VF_ATQLEN1, 0);
- else
- wr32(hw, I40E_PF_ATQLEN, 0);
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.asq_mutex);
@@ -478,8 +471,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
/* free ring buffers */
i40e_free_asq_bufs(hw);
- /* free the ring descriptors */
- i40e_free_adminq_asq(hw);
mutex_unlock(&hw->aq.asq_mutex);
@@ -488,7 +479,7 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
/**
* i40e_shutdown_arq - shutdown ARQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Receive Queue
**/
@@ -500,10 +491,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
- if (hw->mac.type == I40E_MAC_VF)
- wr32(hw, I40E_VF_ARQLEN1, 0);
- else
- wr32(hw, I40E_PF_ARQLEN, 0);
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.arq_mutex);
@@ -512,8 +502,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
/* free ring buffers */
i40e_free_arq_bufs(hw);
- /* free the ring descriptors */
- i40e_free_adminq_arq(hw);
mutex_unlock(&hw->aq.arq_mutex);
@@ -522,7 +510,7 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
/**
* i40e_init_adminq - main initialization routine for Admin Queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
@@ -533,8 +521,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
**/
i40e_status i40e_init_adminq(struct i40e_hw *hw)
{
- u16 eetrack_lo, eetrack_hi;
i40e_status ret_code;
+ u16 eetrack_lo, eetrack_hi;
+ int retry = 0;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
@@ -562,23 +551,41 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (ret_code)
goto init_adminq_free_asq;
- ret_code = i40e_aq_get_firmware_version(hw,
- &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
- &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
- NULL);
- if (ret_code)
+ /* There are some cases where the firmware may not be quite ready
+ * for AdminQ operations, so we retry the AdminQ setup a few times
+ * if we see timeouts in this first AQ call.
+ */
+ do {
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver,
+ &hw->aq.fw_min_ver,
+ &hw->aq.api_maj_ver,
+ &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ break;
+ retry++;
+ msleep(100);
+ i40e_resume_aq(hw);
+ } while (retry < 10);
+ if (ret_code != I40E_SUCCESS)
goto init_adminq_free_arq;
- if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
- hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
- ret_code = I40E_ERR_FIRMWARE_API_VERSION;
- goto init_adminq_free_arq;
- }
+ /* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
+ hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+
+ /* pre-emptive resource lock release */
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
0,
@@ -600,12 +607,15 @@ init_adminq_exit:
/**
* i40e_shutdown_adminq - shutdown routine for the Admin Queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_queue_shutdown(hw, true);
+
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
@@ -616,7 +626,7 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
/**
* i40e_clean_asq - cleans Admin send queue
- * @asq: pointer to the adminq send ring
+ * @hw: pointer to the hardware structure
*
* returns the number of free desc
**/
@@ -659,12 +669,12 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
-bool i40e_asq_done(struct i40e_hw *hw)
+static bool i40e_asq_done(struct i40e_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
}
@@ -674,7 +684,7 @@ bool i40e_asq_done(struct i40e_hw *hw)
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
- * @opaque: pointer to info to be used in async cleanup
+ * @cmd_details: pointer to command details structure
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
@@ -854,7 +864,7 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
/* zero out the desc */
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
}
/**
@@ -912,7 +922,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
"AQRX: Event received with error 0x%X.\n",
hw->aq.arq_last_status);
} else {
- memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
+ e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
e->msg_size = min(datalen, e->msg_size);
if (e->msg_buf != NULL && (e->msg_size != 0))
@@ -925,6 +935,11 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
@@ -947,36 +962,16 @@ clean_arq_element_out:
return ret_code;
}
-void i40e_resume_aq(struct i40e_hw *hw)
+static void i40e_resume_aq(struct i40e_hw *hw)
{
- u32 reg = 0;
-
/* Registers are reset after PF reset */
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
i40e_config_asq_regs(hw);
- reg = hw->aq.num_asq_entries;
-
- if (hw->mac.type == I40E_MAC_VF) {
- reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_VF_ATQLEN1, reg);
- } else {
- reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_PF_ATQLEN, reg);
- }
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
i40e_config_arq_regs(hw);
- reg = hw->aq.num_arq_entries;
-
- if (hw->mac.type == I40E_MAC_VF) {
- reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_VF_ARQLEN1, reg);
- } else {
- reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_PF_ARQLEN, reg);
- }
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 22e5ed683e47..993f7685a911 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -32,20 +31,20 @@
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc))[i]))
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
struct i40e_adminq_ring {
- void *desc; /* Descriptor ring memory */
- void *details; /* ASQ details */
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
union {
struct i40e_dma_mem *asq_bi;
struct i40e_dma_mem *arq_bi;
} r;
- u64 dma_addr; /* Physical address of the ring */
u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */
@@ -56,6 +55,7 @@ struct i40e_adminq_ring {
/* used for queue tracking */
u32 head;
u32 tail;
+ u32 len;
};
/* ASQ transaction details */
@@ -69,7 +69,7 @@ struct i40e_asq_cmd_details {
};
#define I40E_ADMINQ_DETAILS(R, i) \
- (&(((struct i40e_asq_cmd_details *)((R).details))[i]))
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
struct i40e_arq_event_info {
@@ -94,9 +94,6 @@ struct i40e_adminq_info {
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
- struct i40e_dma_mem asq_mem; /* send queue dynamic memory */
- struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */
-
/* last status values on send and receive queues */
enum i40e_admin_queue_err asq_last_status;
enum i40e_admin_queue_err arq_last_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index e61ebdd5a5f9..7b6374a8f8da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -35,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0000
+#define I40E_FW_API_VERSION_MINOR 0x0001
struct i40e_aq_desc {
__le16 flags;
@@ -137,10 +136,13 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106,
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -317,13 +319,15 @@ struct i40e_aqc_get_version {
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
-/* Send driver version (direct 0x0002) */
+/* Send driver version (indirect 0x0002) */
struct i40e_aqc_driver_version {
u8 driver_major_ver;
u8 driver_minor_ver;
u8 driver_build_ver;
u8 driver_subbuild_ver;
- u8 reserved[12];
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
@@ -479,7 +483,7 @@ struct i40e_aqc_mng_laa {
u8 reserved2[6];
};
-/* Manage MAC Address Read Command (0x0107) */
+/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
#define I40E_AQC_LAN_ADDR_VALID 0x10
@@ -517,6 +521,16 @@ struct i40e_aqc_mac_address_write {
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -639,13 +653,15 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
u8 reserved2[6];
};
-/* Add VSI (indirect 0x210)
+/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
*
- * Update VSI (indirect 0x211) Get VSI (indirect 0x0212)
- * use the generic i40e_aqc_switch_seid descriptor format
- * use the same completion and data structure as Add VSI
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
*/
struct i40e_aqc_add_get_update_vsi {
__le16 uplink_seid;
@@ -664,7 +680,6 @@ struct i40e_aqc_add_get_update_vsi {
#define I40E_AQ_VSI_TYPE_PF 0x2
#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
-#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
__le32 addr_high;
__le32 addr_low;
};
@@ -1026,7 +1041,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- u8 reserved[10];
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1179,33 +1196,46 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
} v4;
struct {
u8 data[16];
- } v6;
- } ipaddr;
+ } v6;
+ } ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+/* 0x0002 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0007 reserved */
/* 0x0008 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
- __le32 key_low;
- __le32 key_high;
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
- u8 reserved[14];
+ u8 reserved2[14];
/* response section */
u8 allocation_result;
#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
@@ -1548,7 +1578,7 @@ struct i40e_aqc_module_desc {
struct i40e_aq_get_phy_abilities_resp {
__le32 phy_type; /* bitmap using the above enum for offsets */
- u8 link_speed; /* bitmap using the above enum */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
u8 abilities;
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
@@ -1582,6 +1612,10 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 phy_type;
u8 link_speed;
u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
@@ -1914,22 +1948,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
- u8 header_len; /* in DWords, 1 to 15 */
- u8 protocol_index;
-#define I40E_AQC_TUNNEL_TYPE_MAC 0x0
-#define I40E_AQC_TUNNEL_TYPE_UDP 0x1
- u8 reserved[12];
+ u8 reserved0[3];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+ u8 reserved1[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
/* remove UDP Tunnel command (0x0B01) */
struct i40e_aqc_remove_udp_tunnel {
u8 reserved[2];
u8 index; /* 0 to 15 */
- u8 pf_filters;
- u8 total_filters;
- u8 reserved2[11];
+ u8 reserved2[13];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
@@ -1937,28 +1982,32 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
struct i40e_aqc_del_udp_tunnel_completion {
__le16 udp_port;
u8 index; /* 0 to 15 */
- u8 multiple_entries;
- u8 tunnels_used;
- u8 reserved;
- u8 tunnels_free;
- u8 reserved1[9];
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
/* tunnel key structure 0x0B10 */
+
struct i40e_aqc_tunnel_key_structure {
- __le16 key1_off;
- __le16 key1_len;
- __le16 key2_off;
- __le16 key2_len;
- __le16 flags;
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
/* response flags */
#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
- u8 resreved[6];
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
@@ -2052,6 +2101,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
#define I40E_AQ_CLUSTER_ID_DCB 8
#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
struct i40e_aqc_debug_dump_internals {
u8 cluster_id;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index 3b1cc214f9dc..926811ad44ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 1e4ea134975a..e7f38b57834d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -43,20 +42,20 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
- case I40E_SFP_XL710_DEVICE_ID:
- case I40E_SFP_X710_DEVICE_ID:
- case I40E_QEMU_DEVICE_ID:
- case I40E_KX_A_DEVICE_ID:
- case I40E_KX_B_DEVICE_ID:
- case I40E_KX_C_DEVICE_ID:
- case I40E_KX_D_DEVICE_ID:
- case I40E_QSFP_A_DEVICE_ID:
- case I40E_QSFP_B_DEVICE_ID:
- case I40E_QSFP_C_DEVICE_ID:
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_SFP_X710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_A:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_KX_D:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
hw->mac.type = I40E_MAC_XL710;
break;
- case I40E_VF_DEVICE_ID:
- case I40E_VF_HV_DEVICE_ID:
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
break;
default:
@@ -75,7 +74,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
- * @cap: pointer to adminq command descriptor
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
* @buffer: pointer to command buffer
*
* Dumps debug log about adminq command with descriptor contents.
@@ -126,6 +126,43 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
}
/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+ return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
*
@@ -142,14 +179,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
i40e_status status = 0;
u32 reg;
- hw->phy.get_link_info = true;
-
- /* Determine port number */
- reg = rd32(hw, I40E_PFGEN_PORTNUM);
- reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
- I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
- hw->port = (u8)reg;
-
i40e_set_mac_type(hw);
switch (hw->mac.type) {
@@ -160,6 +189,21 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
break;
}
+ hw->phy.get_link_info = true;
+
+ /* Determine port number */
+ reg = rd32(hw, I40E_PFGEN_PORTNUM);
+ reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+ I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+ hw->port = (u8)reg;
+
+ /* Determine the PF number based on the PCI fn */
+ reg = rd32(hw, I40E_GLPCI_CAPSUP);
+ if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
+ hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
+ else
+ hw->pf_id = (u8)hw->bus.func;
+
status = i40e_init_nvm(hw);
return status;
}
@@ -210,8 +254,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
cmd_data->command_flags = cpu_to_le16(flags);
- memcpy(&cmd_data->mac_sal, &mac_addr[0], 4);
- memcpy(&cmd_data->mac_sah, &mac_addr[4], 2);
+ cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
+ cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[4] << 8) |
+ mac_addr[5]);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -240,32 +287,53 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
}
/**
- * i40e_validate_mac_addr - Validate MAC address
- * @mac_addr: pointer to MAC address
- *
- * Tests a MAC address to ensure it is a valid Individual Address
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
**/
-i40e_status i40e_validate_mac_addr(u8 *mac_addr)
+static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
{
- i40e_status status = 0;
-
- /* Make sure it is not a multicast address */
- if (I40E_IS_MULTICAST(mac_addr)) {
- hw_dbg(hw, "MAC address is multicast\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
- /* Not a broadcast address */
- } else if (I40E_IS_BROADCAST(mac_addr)) {
- hw_dbg(hw, "MAC address is broadcast\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
- /* Reject the zero address */
- } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
- hw_dbg(hw, "MAC address is all zeros\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
+ enum i40e_media_type media;
+
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ media = I40E_MEDIA_TYPE_FIBER;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_10GBASE_T:
+ media = I40E_MEDIA_TYPE_BASET;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ media = I40E_MEDIA_TYPE_DA;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ media = I40E_MEDIA_TYPE_BACKPLANE;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ default:
+ media = I40E_MEDIA_TYPE_UNKNOWN;
+ break;
}
- return status;
+
+ return media;
}
+#define I40E_PF_RESET_WAIT_COUNT_A0 200
+#define I40E_PF_RESET_WAIT_COUNT 10
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -275,7 +343,8 @@ i40e_status i40e_validate_mac_addr(u8 *mac_addr)
**/
i40e_status i40e_pf_reset(struct i40e_hw *hw)
{
- u32 wait_cnt = 0;
+ u32 cnt = 0;
+ u32 cnt1 = 0;
u32 reg = 0;
u32 grst_del;
@@ -285,7 +354,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
*/
grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
>> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) {
+ for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@@ -296,17 +365,37 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
return I40E_ERR_RESET_FAILED;
}
- /* Determine the PF number based on the PCI fn */
- hw->pf_id = (u8)hw->bus.func;
+ /* Now Wait for the FW to be ready */
+ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+ reg = rd32(hw, I40E_GLNVM_ULD);
+ reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+ if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+ hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
+ break;
+ }
+ usleep_range(10000, 20000);
+ }
+ if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+ hw_dbg(hw, "wait for FW Reset complete timedout\n");
+ hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
+ return I40E_ERR_RESET_FAILED;
+ }
/* If there was a Global Reset in progress when we got here,
* we don't need to do the PF Reset
*/
- if (!wait_cnt) {
+ if (!cnt) {
+ if (hw->revision_id == 0)
+ cnt = I40E_PF_RESET_WAIT_COUNT_A0;
+ else
+ cnt = I40E_PF_RESET_WAIT_COUNT;
reg = rd32(hw, I40E_PFGEN_CTRL);
wr32(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
- for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) {
+ for (; cnt; cnt--) {
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
@@ -319,6 +408,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
}
i40e_clear_pxe_mode(hw);
+
return 0;
}
@@ -335,9 +425,47 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
/* Clear single descriptor fetch/write-back mode */
reg = rd32(hw, I40E_GLLAN_RCTL_0);
- wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+
+ if (hw->revision_id == 0) {
+ /* As a work around clear PXE_MODE instead of setting it */
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
+ } else {
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+ }
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+ u32 gpio_val = 0;
+ u32 port;
+
+ if (!hw->func_caps.led[idx])
+ return 0;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+ I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+ * if it is not our port then ignore
+ */
+ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+ (port != hw->port))
+ return 0;
+
+ return gpio_val;
}
+#define I40E_LED0 22
+#define I40E_LINK_ACTIVITY 0xC
+
/**
* i40e_led_get - return current on/off mode
* @hw: pointer to the hw struct
@@ -349,24 +477,20 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
- u32 gpio_val = 0;
u32 mode = 0;
- u32 port;
int i;
- for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
- if (!hw->func_caps.led[i])
- continue;
-
- gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
- >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
- if (port != hw->port)
+ if (!gpio_val)
continue;
- mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT;
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
}
@@ -376,60 +500,45 @@ u32 i40e_led_get(struct i40e_hw *hw)
/**
* i40e_led_set - set new on/off mode
* @hw: pointer to the hw struct
- * @mode: 0=off, else on (see EAS for mode details)
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: true if the LED should blink when on, false if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
**/
-void i40e_led_set(struct i40e_hw *hw, u32 mode)
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
- u32 gpio_val = 0;
- u32 led_mode = 0;
- u32 port;
int i;
- for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
- if (!hw->func_caps.led[i])
- continue;
+ if (mode & 0xfffffff0)
+ hw_dbg(hw, "invalid mode passed in %X\n", mode);
- gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
- >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
- if (port != hw->port)
+ if (!gpio_val)
continue;
- led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
- I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
- gpio_val |= led_mode;
+ /* this & is a bit of paranoia, but serves as a range check */
+ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+ if (mode == I40E_LINK_ACTIVITY)
+ blink = false;
+
+ gpio_val |= (blink ? 1 : 0) <<
+ I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ break;
}
}
/* Admin command wrappers */
-/**
- * i40e_aq_queue_shutdown
- * @hw: pointer to the hw struct
- * @unloading: is the driver unloading itself
- *
- * Tell the Firmware that we're shutting down the AdminQ and whether
- * or not the driver is unloading as well.
- **/
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_queue_shutdown);
-
- if (unloading)
- cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-
- return status;
-}
/**
* i40e_aq_set_link_restart_an
@@ -490,15 +599,16 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
goto aq_get_link_info_exit;
/* save off old link status information */
- memcpy(&hw->phy.link_info_old, hw_link_info,
- sizeof(struct i40e_link_status));
+ hw->phy.link_info_old = *hw_link_info;
/* update link status */
hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw->phy.media_type = i40e_get_media_type(hw);
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
hw_link_info->ext_info = resp->ext_info;
+ hw_link_info->loopback = resp->loopback;
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
hw_link_info->lse_enable = true;
@@ -519,7 +629,7 @@ aq_get_link_info_exit:
/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
*
* Add a VSI context to the hardware.
@@ -571,7 +681,8 @@ aq_add_vsi_exit:
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -665,7 +776,7 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
/**
* i40e_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
@@ -673,8 +784,8 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
@@ -683,7 +794,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
- cmd->seid = cpu_to_le16(vsi_ctx->seid);
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
@@ -707,7 +818,7 @@ aq_get_vsi_params_exit:
/**
* i40e_aq_update_vsi_params
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
*
* Update a VSI context.
@@ -717,13 +828,13 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
- cmd->seid = cpu_to_le16(vsi_ctx->seid);
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
@@ -810,7 +921,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
/**
* i40e_aq_send_driver_version
* @hw: pointer to the hw struct
- * @event: driver event: driver ok, start or stop
* @dv: driver's major, minor version
* @cmd_details: pointer to command details structure or NULL
*
@@ -873,6 +983,7 @@ i40e_get_link_status_exit:
* @downlink_seid: the VSI SEID
* @enabled_tc: bitmap of TCs to be enabled
* @default_port: true for default port VSI, false for control port
+ * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support
* @veb_seid: pointer to where to put the resulting VEB SEID
* @cmd_details: pointer to command details structure or NULL
*
@@ -881,7 +992,8 @@ i40e_get_link_status_exit:
**/
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *veb_seid,
+ bool default_port, bool enable_l2_filtering,
+ u16 *veb_seid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -907,6 +1019,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
else
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+ if (enable_l2_filtering)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+
cmd->veb_flags = cpu_to_le16(veb_flags);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -922,10 +1038,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
* @hw: pointer to the hw struct
* @veb_seid: the SEID of the VEB to query
* @switch_id: the uplink switch id
- * @floating_veb: set to true if the VEB is floating
+ * @floating: set to true if the VEB is floating
* @statistic_index: index of the stats counter block for this VEB
* @vebs_used: number of VEB's used by function
- * @vebs_unallocated: total VEB's not reserved by any function
+ * @vebs_free: total VEB's not reserved by any function
* @cmd_details: pointer to command details structure or NULL
*
* This retrieves the parameters for a particular VEB, specified by
@@ -1059,89 +1175,11 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
}
/**
- * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of vlan filters to be added
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
- cmd->num_addresses = cpu_to_le16(count);
- cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of macvlans to be removed
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
- cmd->num_addresses = cpu_to_le16(count);
- cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cmd_details: pointer to command details
@@ -1519,8 +1557,8 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_list_capabilites *cmd;
- i40e_status status = 0;
struct i40e_aq_desc desc;
+ i40e_status status = 0;
cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
@@ -1681,6 +1719,63 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
}
/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 header_len,
+ u8 protocol_index, u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_udp_tunnel *cmd =
+ (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp =
+ (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+ cmd->udp_port = cpu_to_le16(udp_port);
+ cmd->protocol_type = protocol_index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status)
+ *filter_index = resp->index;
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_udp_tunnel *cmd =
+ (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+ cmd->index = index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_delete_element - Delete switch element
* @hw: pointer to the hw struct
* @seid: the SEID to delete from the switch
@@ -1709,6 +1804,28 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
}
/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
* @hw: pointer to the hw struct
* @seid: seid for the physical port/switching component/vsi
@@ -1787,6 +1904,40 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
}
/**
+ * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component connected to Physical Port
+ * @ets_data: Buffer holding ETS parameters
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+ sizeof(*ets_data), opcode, cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
* i40e_aq_query_vsi_bw_config - Query VSI BW configuration
* @hw: pointer to the hw struct
* @seid: seid of the VSI
@@ -2039,3 +2190,110 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
return 0;
}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd =
+ (struct i40e_aqc_add_remove_control_packet_filter *)
+ &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ (struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ if (is_add) {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_control_packet_filter);
+ cmd->queue = cpu_to_le16(queue);
+ } else {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_control_packet_filter);
+ }
+
+ if (mac_addr)
+ memcpy(cmd->mac, mac_addr, ETH_ALEN);
+
+ cmd->etype = cpu_to_le16(ethtype);
+ cmd->flags = cpu_to_le16(flags);
+ cmd->seid = cpu_to_le16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stats) {
+ stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
+ stats->etype_used = le16_to_cpu(resp->etype_used);
+ stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
+ stats->etype_free = le16_to_cpu(resp->etype_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+ hw->bus.type = i40e_bus_type_pci_express;
+
+ switch (link_status & PCI_EXP_LNKSTA_NLW) {
+ case PCI_EXP_LNKSTA_NLW_X1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
new file mode 100644
index 000000000000..50730141bb7b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -0,0 +1,469 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+ u32 reg;
+
+ if (!status)
+ return I40E_ERR_PARAM;
+
+ reg = rd32(hw, I40E_PRTDCB_GENS);
+ *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+ I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+ return 0;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_ieee_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+ I40E_IEEE_ETS_WILLING_SHIFT);
+ etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+ I40E_IEEE_ETS_CBS_SHIFT);
+ etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+ I40E_IEEE_ETS_MAXTC_SHIFT);
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* Move offset to priority table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+ I40E_IEEE_PFC_WILLING_SHIFT);
+ dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+ I40E_IEEE_PFC_MBC_SHIFT);
+ dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+ I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength;
+ u16 offset = 0;
+ u16 length;
+ int i = 0;
+ u8 *buf;
+
+ typelength = ntohs(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ buf = tlv->tlvinfo;
+
+ /* The App priority table starts 5 octets after TLV header */
+ length -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < length) {
+ dcbcfg->app[i].priority = (u8)((buf[offset] &
+ I40E_IEEE_APP_PRIO_MASK) >>
+ I40E_IEEE_APP_PRIO_SHIFT);
+ dcbcfg->app[i].selector = (u8)((buf[offset] &
+ I40E_IEEE_APP_SEL_MASK) >>
+ I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ switch (subtype) {
+ case I40E_IEEE_SUBTYPE_ETS_CFG:
+ i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_ETS_REC:
+ i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_PFC_CFG:
+ i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_APP_PRI:
+ i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+ I40E_LLDP_TLV_OUI_SHIFT);
+ switch (oui) {
+ case I40E_IEEE_8021QAZ_OUI:
+ i40e_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ i40e_status ret = 0;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 type;
+ u16 length;
+ u16 typelength;
+
+ if (!lldpmib || !dcbcfg)
+ return I40E_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += ETH_HLEN;
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (tlv) {
+ typelength = ntohs(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+
+ if (type == I40E_TLV_TYPE_END)
+ break;/* END TLV break out */
+
+ switch (type) {
+ case I40E_TLV_TYPE_ORG:
+ i40e_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) +
+ length);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ i40e_status ret = 0;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+ (void *)lldpmib, I40E_LLDPDU_SIZE,
+ NULL, NULL, NULL);
+ if (ret)
+ goto free_mem;
+
+ /* Parse LLDP MIB to get dcb configuration */
+ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
+{
+ i40e_status ret = 0;
+
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+out:
+ return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+i40e_status i40e_init_dcb(struct i40e_hw *hw)
+{
+ i40e_status ret = 0;
+
+ if (!hw->func_caps.dcb)
+ return ret;
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ switch (hw->dcbx_status) {
+ case I40E_DCBX_STATUS_DONE:
+ case I40E_DCBX_STATUS_IN_PROGRESS:
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ break;
+ case I40E_DCBX_STATUS_DISABLED:
+ return ret;
+ case I40E_DCBX_STATUS_NOT_STARTED:
+ case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+ default:
+ break;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
new file mode 100644
index 000000000000..34cf1c30c7ff
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_STATUS_NOT_STARTED 0
+#define I40E_DCBX_STATUS_IN_PROGRESS 1
+#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
+#define I40E_DCBX_STATUS_DISABLED 7
+
+#define I40E_TLV_TYPE_END 0
+#define I40E_TLV_TYPE_ORG 127
+
+#define I40E_IEEE_8021QAZ_OUI 0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG 9
+#define I40E_IEEE_SUBTYPE_ETS_REC 10
+#define I40E_IEEE_SUBTYPE_PFC_CFG 11
+#define I40E_IEEE_SUBTYPE_APP_PRI 12
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_TLV_LEN_SHIFT 0
+#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT 9
+#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
+#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT 8
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT 0
+#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT 6
+#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT 7
+#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
+#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
+#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT 0
+#define I40E_IEEE_TSA_ETS 2
+
+/* Defines for IEEE PFC TLV */
+#define I40E_IEEE_PFC_CAP_SHIFT 0
+#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT 6
+#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT 7
+#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT 0
+#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT 5
+#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+
+#pragma pack(1)
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+ __be16 typelength;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+#pragma pack()
+
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
+i40e_status i40e_init_dcb(struct i40e_hw *hw);
+#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
new file mode 100644
index 000000000000..6e8103abfd0d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -0,0 +1,316 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifdef CONFIG_I40E_DCB
+#include "i40e.h"
+#include <net/dcbnl.h>
+
+/**
+ * i40e_get_pfc_delay - retrieve PFC Link Delay
+ * @hw: pointer to hardware struct
+ * @delay: holds the PFC Link delay value
+ *
+ * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA
+ **/
+static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
+{
+ u32 val;
+
+ val = rd32(hw, I40E_PRTDCB_GENC);
+ *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+ I40E_PRTDCB_GENC_PFCLDA_SHIFT);
+}
+
+/**
+ * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the ETS information
+ *
+ * Returns local IEEE ETS configuration
+ **/
+static int i40e_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ ets->willing = dcbxcfg->etscfg.willing;
+ ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+ ets->cbs = dcbxcfg->etscfg.cbs;
+ memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable,
+ sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable,
+ sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable,
+ sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable,
+ sizeof(ets->prio_tc));
+ memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
+ sizeof(ets->tc_reco_bw));
+ memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
+ sizeof(ets->tc_reco_tsa));
+ memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable,
+ sizeof(ets->reco_prio_tc));
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the PFC information
+ *
+ * Returns local IEEE PFC configuration
+ **/
+static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ pfc->pfc_cap = dcbxcfg->pfc.pfccap;
+ pfc->pfc_en = dcbxcfg->pfc.pfcenable;
+ pfc->mbc = dcbxcfg->pfc.mbc;
+ i40e_get_pfc_delay(hw, &pfc->delay);
+
+ /* Get Requests/Indicatiosn */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ pfc->requests[i] = pf->stats.priority_xoff_tx[i];
+ pfc->indications[i] = pf->stats.priority_xoff_rx[i];
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_getdcbx - retrieve current DCBx capability
+ * @netdev: the corresponding netdev
+ *
+ * Returns DCBx capability features
+ **/
+static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ return pf->dcbx_cap;
+}
+
+/**
+ * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
+ * @netdev: the corresponding netdev
+ *
+ * Returns the SAN MAC address used for LLDP exchange
+ **/
+static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev,
+ u8 *perm_addr)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ int i, j;
+
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+ for (i = 0; i < dev->addr_len; i++)
+ perm_addr[i] = pf->hw.mac.perm_addr[i];
+
+ for (j = 0; j < dev->addr_len; j++, i++)
+ perm_addr[i] = pf->hw.mac.san_addr[j];
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+ .ieee_getets = i40e_dcbnl_ieee_getets,
+ .ieee_getpfc = i40e_dcbnl_ieee_getpfc,
+ .getdcbx = i40e_dcbnl_getdcbx,
+ .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr,
+};
+
+/**
+ * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config
+ * @vsi: the corresponding vsi
+ *
+ * Set up all the IEEE APPs in the DCBNL App Table and generate event for
+ * other settings
+ **/
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
+{
+ struct net_device *dev = vsi->netdev;
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ struct dcb_app sapp;
+ u8 prio, tc_map;
+ int i;
+
+ /* DCB not enabled */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return;
+
+ dcbxcfg = &hw->local_dcbx_config;
+
+ /* Set up all the App TLVs if DCBx is negotiated */
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ prio = dcbxcfg->app[i].priority;
+ tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+
+ /* Add APP only if the TC is enabled for this VSI */
+ if (tc_map & vsi->tc_config.enabled_tc) {
+ sapp.selector = dcbxcfg->app[i].selector;
+ sapp.protocol = dcbxcfg->app[i].protocolid;
+ sapp.priority = prio;
+ dcb_ieee_setapp(dev, &sapp);
+ }
+ }
+
+ /* Notify user-space of the changes */
+ dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+}
+
+/**
+ * i40e_dcbnl_vsi_del_app - Delete APP for given VSI
+ * @vsi: the corresponding vsi
+ * @app: APP to delete
+ *
+ * Delete given APP from the DCBNL APP table for given
+ * VSI
+ **/
+static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
+ struct i40e_ieee_app_priority_table *app)
+{
+ struct net_device *dev = vsi->netdev;
+ struct dcb_app sapp;
+
+ if (!dev)
+ return -EINVAL;
+
+ sapp.selector = app->selector;
+ sapp.protocol = app->protocolid;
+ sapp.priority = app->priority;
+ return dcb_ieee_delapp(dev, &sapp);
+}
+
+/**
+ * i40e_dcbnl_del_app - Delete APP on all VSIs
+ * @pf: the corresponding pf
+ * @app: APP to delete
+ *
+ * Delete given APP from all the VSIs for given PF
+ **/
+static void i40e_dcbnl_del_app(struct i40e_pf *pf,
+ struct i40e_ieee_app_priority_table *app)
+{
+ int v, err;
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v] && pf->vsi[v]->netdev) {
+ err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+ if (err)
+ dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+ __func__, pf->vsi[v]->seid,
+ err, app->selector,
+ app->protocolid, app->priority);
+ }
+ }
+}
+
+/**
+ * i40e_dcbnl_find_app - Search APP in given DCB config
+ * @cfg: DCBX configuration data
+ * @app: APP to search for
+ *
+ * Find given APP in the DCB configuration
+ **/
+static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
+ struct i40e_ieee_app_priority_table *app)
+{
+ int i;
+
+ for (i = 0; i < cfg->numapps; i++) {
+ if (app->selector == cfg->app[i].selector &&
+ app->protocolid == cfg->app[i].protocolid &&
+ app->priority == cfg->app[i].priority)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * i40e_dcbnl_flush_apps - Delete all removed APPs
+ * @pf: the corresponding pf
+ * @new_cfg: new DCBX configuration data
+ *
+ * Find and delete all APPs that are not present in the passed
+ * DCB configuration
+ **/
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg)
+{
+ struct i40e_ieee_app_priority_table app;
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ app = dcbxcfg->app[i];
+ /* The APP is not available anymore delete it */
+ if (!i40e_dcbnl_find_app(new_cfg, &app))
+ i40e_dcbnl_del_app(pf, &app);
+ }
+}
+
+/**
+ * i40e_dcbnl_setup - DCBNL setup
+ * @vsi: the corresponding vsi
+ *
+ * Set up DCBNL ops and initial APP TLVs
+ **/
+void i40e_dcbnl_setup(struct i40e_vsi *vsi)
+{
+ struct net_device *dev = vsi->netdev;
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ /* DCB not enabled */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return;
+
+ /* Do not setup DCB NL ops for MFP mode */
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ dev->dcbnl_ops = &dcbnl_ops;
+
+ /* Set initial IEEE DCB settings */
+ i40e_dcbnl_set_all(vsi);
+}
+#endif /* CONFIG_I40E_DCB */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index ef4cb1cf31f2..da22c3fa2c00 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -192,12 +191,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_asq_entries);
- memcpy(p, pf->hw.aq.asq.desc, len);
+ memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
p += len;
len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_arq_entries);
- memcpy(p, pf->hw.aq.arq.desc, len);
+ memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
p += len;
i40e_dbg_dump_data_len = buflen;
@@ -362,7 +361,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
}
/**
- * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
* @pf: the i40e_pf created in command write
* @seid: the seid the user put in
**/
@@ -516,10 +515,10 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->stats.bytes,
rx_ring->rx_stats.non_eop_descs);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+ " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
i,
- rx_ring->rx_stats.alloc_rx_page_failed,
- rx_ring->rx_stats.alloc_rx_buff_failed);
+ rx_ring->rx_stats.alloc_page_failed,
+ rx_ring->rx_stats.alloc_buff_failed);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, rx_ring->size,
@@ -533,6 +532,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
+
dev_info(&pf->pdev->dev,
" tx_rings[%i]: desc = %p\n",
i, tx_ring->desc);
@@ -707,8 +707,13 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
{
struct i40e_adminq_ring *ring;
struct i40e_hw *hw = &pf->hw;
+ char hdr[32];
int i;
+ snprintf(hdr, sizeof(hdr), "%s %s: ",
+ dev_driver_string(&pf->pdev->dev),
+ dev_name(&pf->pdev->dev));
+
/* first the send (command) ring, then the receive (event) ring */
dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
ring = &(hw->aq.asq);
@@ -718,14 +723,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
d->cookie_high, d->cookie_low);
- dev_info(&pf->pdev->dev,
- " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- d->params.raw[0], d->params.raw[1], d->params.raw[2],
- d->params.raw[3], d->params.raw[4], d->params.raw[5],
- d->params.raw[6], d->params.raw[7], d->params.raw[8],
- d->params.raw[9], d->params.raw[10], d->params.raw[11],
- d->params.raw[12], d->params.raw[13],
- d->params.raw[14], d->params.raw[15]);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
}
dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
@@ -736,14 +735,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
d->cookie_high, d->cookie_low);
- dev_info(&pf->pdev->dev,
- " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- d->params.raw[0], d->params.raw[1], d->params.raw[2],
- d->params.raw[3], d->params.raw[4], d->params.raw[5],
- d->params.raw[6], d->params.raw[7], d->params.raw[8],
- d->params.raw[9], d->params.raw[10], d->params.raw[11],
- d->params.raw[12], d->params.raw[13],
- d->params.raw[14], d->params.raw[15]);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
}
}
@@ -759,27 +752,25 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
struct i40e_pf *pf, bool is_rx_ring)
{
- union i40e_rx_desc *ds;
+ struct i40e_tx_desc *txd;
+ union i40e_rx_desc *rxd;
struct i40e_ring ring;
struct i40e_vsi *vsi;
int i;
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
- dev_info(&pf->pdev->dev,
- "vsi %d not found\n", vsi_seid);
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
return;
}
if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ return;
+ }
+ if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
+ dev_info(&pf->pdev->dev,
+ "descriptor rings have not been allocated for vsi %d\n",
+ vsi_seid);
return;
}
if (is_rx_ring)
@@ -790,22 +781,27 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
for (i = 0; i < ring.count; i++) {
- if (is_rx_ring)
- ds = I40E_RX_DESC(&ring, i);
- else
- ds = (union i40e_rx_desc *)
- I40E_TX_DESC(&ring, i);
- if ((sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ if (!is_rx_ring) {
+ txd = I40E_TX_DESC(&ring, i);
dev_info(&pf->pdev->dev,
- " d[%03i] = 0x%016llx 0x%016llx\n", i,
- ds->read.pkt_addr, ds->read.hdr_addr);
- else
+ " d[%03i] = 0x%016llx 0x%016llx\n",
+ i, txd->buffer_addr,
+ txd->cmd_type_offset_bsz);
+ } else if (sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) {
+ rxd = I40E_RX_DESC(&ring, i);
+ dev_info(&pf->pdev->dev,
+ " d[%03i] = 0x%016llx 0x%016llx\n",
+ i, rxd->read.pkt_addr,
+ rxd->read.hdr_addr);
+ } else {
+ rxd = I40E_RX_DESC(&ring, i);
dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- i, ds->read.pkt_addr,
- ds->read.hdr_addr,
- ds->read.rsvd1, ds->read.rsvd2);
+ i, rxd->read.pkt_addr,
+ rxd->read.hdr_addr,
+ rxd->read.rsvd1, rxd->read.rsvd2);
+ }
}
} else if (cnt == 3) {
if (desc_n >= ring.count || desc_n < 0) {
@@ -813,27 +809,29 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
"descriptor %d not found\n", desc_n);
return;
}
- if (is_rx_ring)
- ds = I40E_RX_DESC(&ring, desc_n);
- else
- ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n);
- if ((sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ if (!is_rx_ring) {
+ txd = I40E_TX_DESC(&ring, desc_n);
dev_info(&pf->pdev->dev,
- "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
- vsi_seid, is_rx_ring ? "rx" : "tx", ring_id,
- desc_n, ds->read.pkt_addr, ds->read.hdr_addr);
- else
+ "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id, desc_n,
+ txd->buffer_addr, txd->cmd_type_offset_bsz);
+ } else if (sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) {
+ rxd = I40E_RX_DESC(&ring, desc_n);
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id, desc_n,
+ rxd->read.pkt_addr, rxd->read.hdr_addr);
+ } else {
+ rxd = I40E_RX_DESC(&ring, desc_n);
dev_info(&pf->pdev->dev,
"vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- vsi_seid, ring_id,
- desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
- ds->read.rsvd1, ds->read.rsvd2);
+ vsi_seid, ring_id, desc_n,
+ rxd->read.pkt_addr, rxd->read.hdr_addr,
+ rxd->read.rsvd1, rxd->read.rsvd2);
+ }
} else {
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
}
}
@@ -979,8 +977,7 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
veb = i40e_dbg_find_veb(pf, seid);
if (!veb) {
- dev_info(&pf->pdev->dev,
- "%d: can't find veb\n", seid);
+ dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
return;
}
dev_info(&pf->pdev->dev,
@@ -1006,6 +1003,22 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
}
}
+/**
+ * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
+ * @pf: the pf that would be altered
+ * @flag: flag that needs enabling or disabling
+ * @enable: Enable/disable FD SD/ATR
+ **/
+static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
+{
+ if (enable)
+ pf->flags |= flag;
+ else
+ pf->flags &= ~flag;
+ dev_info(&pf->pdev->dev, "requesting a pf reset\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+}
+
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
/**
* i40e_dbg_command_write - write into command datum
@@ -1022,8 +1035,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
char *cmd_buf, *cmd_buf_tmp;
int bytes_not_copied;
struct i40e_vsi *vsi;
- u8 *print_buf_start;
- u8 *print_buf;
int vsi_seid;
int veb_seid;
int cnt;
@@ -1048,11 +1059,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
count = cmd_buf_tmp - cmd_buf + 1;
}
- print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
- if (!print_buf_start)
- goto command_write_done;
- print_buf = print_buf_start;
-
if (strncmp(cmd_buf, "add vsi", 7) == 0) {
vsi_seid = -1;
cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
@@ -1104,7 +1110,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev,
- "add relay: vsi VSI %d not found\n", vsi_seid);
+ "add relay: VSI %d not found\n", vsi_seid);
goto command_write_done;
}
@@ -1462,20 +1468,24 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "forcing PFR\n");
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing CoreR\n");
- i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing GlobR\n");
- i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "empr", 4) == 0) {
+ dev_info(&pf->pdev->dev, "forcing EMPR\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address;
u32 value;
- cnt = sscanf(&cmd_buf[4], "%x", &address);
+ cnt = sscanf(&cmd_buf[4], "%i", &address);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "read <reg>\n");
goto command_write_done;
@@ -1494,7 +1504,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "write", 5) == 0) {
u32 address, value;
- cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value);
+ cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
if (cnt != 2) {
dev_info(&pf->pdev->dev, "write <reg> <value>\n");
goto command_write_done;
@@ -1512,7 +1522,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
address, value);
} else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
- cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid);
+ cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
@@ -1539,6 +1549,118 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else {
dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
}
+ } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
+ struct i40e_aq_desc *desc;
+ i40e_status ret;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[11],
+ "%hx %hx %hx %hx %x %x %x %x %x %x",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3);
+ if (cnt != 10) {
+ dev_info(&pf->pdev->dev,
+ "send aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ kfree(desc);
+ desc = NULL;
+ } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
+ struct i40e_aq_desc *desc;
+ i40e_status ret;
+ u16 buffer_len;
+ u8 *buff;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[20],
+ "%hx %hx %hx %hx %x %x %x %x %x %x %hd",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3,
+ &buffer_len);
+ if (cnt != 11) {
+ dev_info(&pf->pdev->dev,
+ "send indirect aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ /* Just stub a buffer big enough in case user messed up */
+ if (buffer_len == 0)
+ buffer_len = 1280;
+
+ buff = kzalloc(buffer_len, GFP_KERNEL);
+ if (!buff) {
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ ret = i40e_asq_send_command(&pf->hw, desc, buff,
+ buffer_len, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, buffer_len, true);
+ kfree(buff);
+ buff = NULL;
+ kfree(desc);
+ desc = NULL;
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
struct i40e_fdir_data fd_data;
@@ -1564,7 +1686,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (strncmp(cmd_buf, "add", 3) == 0)
add = true;
cnt = sscanf(&cmd_buf[13],
- "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s",
+ "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
&fd_data.q_index,
&fd_data.flex_off, &fd_data.pctype,
&fd_data.dest_vsi, &fd_data.dest_ctl,
@@ -1588,19 +1710,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
packet_len = min_t(u16,
packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
- dev_info(&pf->pdev->dev, "FD raw packet:\n");
for (i = 0; i < packet_len; i++) {
sscanf(&asc_packet[j], "%2hhx ",
&fd_data.raw_packet[i]);
j += 3;
- snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
}
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "FD raw packet dump\n");
+ print_hex_dump(KERN_INFO, "FD raw packet: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ fd_data.raw_packet, packet_len, true);
ret = i40e_program_fdir_filter(&fd_data, pf, add);
if (!ret) {
dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
@@ -1612,6 +1730,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
fd_data.raw_packet = NULL;
kfree(asc_packet);
asc_packet = NULL;
+ } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
+ } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
+ } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
+ } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1622,8 +1748,35 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->hw.aq.asq_last_status);
goto command_write_done;
}
+ ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+ pf->hw.mac.addr,
+ I40E_ETH_P_LLDP, 0,
+ pf->vsi[pf->lan_vsi]->seid,
+ 0, true, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: Add Control Packet Filter AQ command failed =0x%x\n",
+ __func__, pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+#ifdef CONFIG_I40E_DCB
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
int ret;
+ ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+ pf->hw.mac.addr,
+ I40E_ETH_P_LLDP, 0,
+ pf->vsi[pf->lan_vsi]->seid,
+ 0, false, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
+ __func__, pf->hw.aq.asq_last_status);
+ /* Continue and start FW LLDP anyways */
+ }
+
ret = i40e_aq_start_lldp(&pf->hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -1631,10 +1784,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->hw.aq.asq_last_status);
goto command_write_done;
}
+#ifdef CONFIG_I40E_DCB
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
} else if (strncmp(&cmd_buf[5],
"get local", 9) == 0) {
u16 llen, rlen;
- int ret, i;
+ int ret;
u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
@@ -1652,22 +1809,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
goto command_write_done;
}
- dev_info(&pf->pdev->dev,
- "Get LLDP MIB (local) AQ buffer written back:\n");
- for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
- snprintf(print_buf, 3, "%02x ", buff[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
u16 llen, rlen;
- int ret, i;
+ int ret;
u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
@@ -1686,17 +1836,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
goto command_write_done;
}
- dev_info(&pf->pdev->dev,
- "Get LLDP MIB (remote) AQ buffer written back:\n");
- for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
- snprintf(print_buf, 3, "%02x ", buff[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
@@ -1721,7 +1864,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
}
} else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
- u16 buffer_len, i, bytes;
+ u16 buffer_len, bytes;
u16 module;
u32 offset;
u16 *buff;
@@ -1775,16 +1918,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev,
"Read NVM module=0x%x offset=0x%x words=%d\n",
module, offset, buffer_len);
- for (i = 0; i < buffer_len; i++) {
- if ((i % 16) == 0) {
- snprintf(print_buf, 11, "\n0x%08x: ",
- offset + i);
- print_buf += 11;
- }
- snprintf(print_buf, 5, "%04x ", buff[i]);
- print_buf += 5;
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ if (bytes)
+ print_hex_dump(KERN_INFO, "NVM Dump: ",
+ DUMP_PREFIX_OFFSET, 16, 2,
+ buff, bytes, true);
}
kfree(buff);
buff = NULL;
@@ -1814,8 +1951,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " pfr\n");
dev_info(&pf->pdev->dev, " corer\n");
dev_info(&pf->pdev->dev, " globr\n");
+ dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
+ dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+ dev_info(&pf->pdev->dev, " fd-atr off\n");
+ dev_info(&pf->pdev->dev, " fd-atr on\n");
+ dev_info(&pf->pdev->dev, " fd-sb off\n");
+ dev_info(&pf->pdev->dev, " fd-sb on\n");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
dev_info(&pf->pdev->dev, " lldp get local\n");
@@ -1828,9 +1971,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
command_write_done:
kfree(cmd_buf);
cmd_buf = NULL;
- kfree(print_buf_start);
- print_buf = NULL;
- print_buf_start = NULL;
return count;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index de255143bde6..b2380daef8c1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -68,16 +67,16 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
- {I40E_QTX_CTL(0), 0x0000FFBF, 64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_QTX_CTL(0), 0x0000FFBF, 4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
{I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
- {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
- {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
- {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
{I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
{I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
- {I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
- {I40E_QINT_TQCTL(0), 0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
- {I40E_QINT_RQCTL(0), 0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
{I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
{ 0 }
};
@@ -119,7 +118,7 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
/* read NVM control word and if NVM valid, validate EEPROM checksum*/
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
- if ((!ret_code) &&
+ if (!ret_code &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
(0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
ret_code = i40e_validate_nvm_checksum(hw, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index 3d98277f4526..0b5911652084 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -31,10 +30,10 @@
#include "i40e_type.h"
enum i40e_lb_mode {
- I40E_LB_MODE_NONE = 0,
- I40E_LB_MODE_PHY_LOCAL,
- I40E_LB_MODE_PHY_REMOTE,
- I40E_LB_MODE_MAC_LOCAL,
+ I40E_LB_MODE_NONE = 0x0,
+ I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL,
};
struct i40e_diag_reg_test_info {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1b86138fa9e1..b1d7d8c5cb9b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -109,6 +108,8 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_oversize", stats.rx_oversize),
I40E_PF_STAT("rx_jabber", stats.rx_jabber),
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+ I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
#define I40E_QUEUE_STATS_LEN(n) \
@@ -193,28 +194,48 @@ static int i40e_get_settings(struct net_device *netdev,
ecmd->supported = SUPPORTED_10000baseKR_Full;
ecmd->advertising = ADVERTISED_10000baseKR_Full;
break;
- case I40E_PHY_TYPE_10GBASE_T:
default:
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ if (i40e_is_40G_device(hw->device_id)) {
+ ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseSR4_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
break;
}
- /* for now just say autoneg all the time */
ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
- if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
+ switch (hw->phy.media_type) {
+ case I40E_MEDIA_TYPE_BACKPLANE:
ecmd->supported |= SUPPORTED_Backplane;
ecmd->advertising |= ADVERTISED_Backplane;
ecmd->port = PORT_NONE;
- } else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
+ break;
+ case I40E_MEDIA_TYPE_BASET:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
- } else {
+ break;
+ case I40E_MEDIA_TYPE_DA:
+ case I40E_MEDIA_TYPE_CX4:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_DA;
+ break;
+ case I40E_MEDIA_TYPE_FIBER:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
+ break;
+ case I40E_MEDIA_TYPE_UNKNOWN:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
}
ecmd->transceiver = XCVR_EXTERNAL;
@@ -256,12 +277,14 @@ static void i40e_get_pauseparam(struct net_device *netdev,
((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
- pause->rx_pause = 0;
- pause->tx_pause = 0;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
+ if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_FULL) {
pause->rx_pause = 1;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
pause->tx_pause = 1;
+ }
}
static u32 i40e_get_msglevel(struct net_device *netdev)
@@ -329,38 +352,56 @@ static int i40e_get_eeprom(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
- int first_word, last_word;
- u16 i, eeprom_len;
- u16 *eeprom_buff;
- int ret_val = 0;
-
+ struct i40e_pf *pf = np->vsi->back;
+ int ret_val = 0, len;
+ u8 *eeprom_buff;
+ u16 i, sectors;
+ bool last;
+#define I40E_NVM_SECTOR_SIZE 4096
if (eeprom->len == 0)
return -EINVAL;
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_len = last_word - first_word + 1;
-
- eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+ eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
- ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len,
- eeprom_buff);
- if (eeprom_len == 0) {
- kfree(eeprom_buff);
- return -EACCES;
+ ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+ ret_val, hw->aq.asq_last_status);
+ goto free_buff;
}
- /* Device's eeprom is always little-endian, word addressable */
- for (i = 0; i < eeprom_len; i++)
- le16_to_cpus(&eeprom_buff[i]);
+ sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
+ sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
+ len = I40E_NVM_SECTOR_SIZE;
+ last = false;
+ for (i = 0; i < sectors; i++) {
+ if (i == (sectors - 1)) {
+ len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
+ last = true;
+ }
+ ret_val = i40e_aq_read_nvm(hw, 0x0,
+ eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+ len,
+ (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+ last, NULL);
+ if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed err=%d status=0x%x\n",
+ ret_val, hw->aq.asq_last_status);
+ goto release_nvm;
+ }
+ }
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+release_nvm:
+ i40e_release_nvm(hw);
+ memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
+free_buff:
kfree(eeprom_buff);
-
return ret_val;
}
@@ -368,8 +409,14 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
-
- return hw->nvm.sr_size * 2;
+ u32 val;
+
+ val = (rd32(hw, I40E_GLPCI_LBARCTRL)
+ & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
+ >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
+ /* register returns value in power of 2, 64Kbyte chunks. */
+ val = (64 * 1024) * (1 << val);
+ return val;
}
static void i40e_get_drvinfo(struct net_device *netdev,
@@ -418,15 +465,19 @@ static int i40e_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_tx_count = clamp_t(u32, ring->tx_pending,
- I40E_MIN_NUM_DESCRIPTORS,
- I40E_MAX_NUM_DESCRIPTORS);
- new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
+ ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
+ netdev_info(netdev,
+ "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+ ring->tx_pending, ring->rx_pending,
+ I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
+ return -EINVAL;
+ }
- new_rx_count = clamp_t(u32, ring->rx_pending,
- I40E_MIN_NUM_DESCRIPTORS,
- I40E_MAX_NUM_DESCRIPTORS);
- new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
if ((new_tx_count == vsi->tx_rings[0]->count) &&
@@ -699,11 +750,44 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
static int i40e_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
- return ethtool_op_get_ts_info(dev, info);
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (pf->ptp_clock)
+ info->phc_index = ptp_clock_index(pf->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+ return 0;
}
-static int i40e_link_test(struct i40e_pf *pf, u64 *data)
+static int i40e_link_test(struct net_device *netdev, u64 *data)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "link test\n");
if (i40e_get_link_status(&pf->hw))
*data = 0;
else
@@ -712,36 +796,51 @@ static int i40e_link_test(struct i40e_pf *pf, u64 *data)
return *data;
}
-static int i40e_reg_test(struct i40e_pf *pf, u64 *data)
+static int i40e_reg_test(struct net_device *netdev, u64 *data)
{
- i40e_status ret;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
- ret = i40e_diag_reg_test(&pf->hw);
- *data = ret;
+ netif_info(pf, hw, netdev, "register test\n");
+ *data = i40e_diag_reg_test(&pf->hw);
- return ret;
+ return *data;
}
-static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data)
+static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
{
- i40e_status ret;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
- ret = i40e_diag_eeprom_test(&pf->hw);
- *data = ret;
+ netif_info(pf, hw, netdev, "eeprom test\n");
+ *data = i40e_diag_eeprom_test(&pf->hw);
- return ret;
+ return *data;
}
-static int i40e_intr_test(struct i40e_pf *pf, u64 *data)
+static int i40e_intr_test(struct net_device *netdev, u64 *data)
{
- *data = -ENOSYS;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ u16 swc_old = pf->sw_int_count;
+
+ netif_info(pf, hw, netdev, "interrupt test\n");
+ wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
+ (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ usleep_range(1000, 2000);
+ *data = (swc_old == pf->sw_int_count);
return *data;
}
-static int i40e_loopback_test(struct i40e_pf *pf, u64 *data)
+static int i40e_loopback_test(struct net_device *netdev, u64 *data)
{
- *data = -ENOSYS;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "loopback test not implemented\n");
+ *data = 0;
return *data;
}
@@ -752,42 +851,38 @@ static void i40e_diag_test(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- set_bit(__I40E_TESTING, &pf->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
+ netif_info(pf, drv, netdev, "offline testing starting\n");
- netdev_info(netdev, "offline testing starting\n");
+ set_bit(__I40E_TESTING, &pf->state);
/* Link test performed before hardware reset
* so autoneg doesn't interfere with test result
*/
- netdev_info(netdev, "link test starting\n");
- if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- netdev_info(netdev, "register test starting\n");
- if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG]))
+ if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "eeprom test starting\n");
- if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM]))
+ if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "interrupt test starting\n");
- if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR]))
+ if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "loopback test starting\n");
- if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK]))
+ /* run reg test last, a reset is required after it */
+ if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, &pf->state);
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else {
- netdev_info(netdev, "online test starting\n");
/* Online tests */
- if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ netif_info(pf, drv, netdev, "online testing starting\n");
+
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* Offline only tests, not run in online; pass by default */
@@ -795,16 +890,53 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_EEPROM] = 0;
data[I40E_ETH_TEST_INTR] = 0;
data[I40E_ETH_TEST_LOOPBACK] = 0;
-
- clear_bit(__I40E_TESTING, &pf->state);
}
+
+ netif_info(pf, drv, netdev, "testing finished\n");
}
static void i40e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
- wol->supported = 0;
- wol->wolopts = 0;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if ((1 << hw->port) & wol_nvm_bits) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
+ }
+}
+
+static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if (((1 << hw->port) & wol_nvm_bits))
+ return -EOPNOTSUPP;
+
+ /* only magic packet is supported */
+ if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+ return -EOPNOTSUPP;
+
+ /* is this a new value? */
+ if (pf->wol_en != !!wol->wolopts) {
+ pf->wol_en = !!wol->wolopts;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+ }
+
+ return 0;
}
static int i40e_nway_reset(struct net_device *netdev)
@@ -838,13 +970,13 @@ static int i40e_set_phys_id(struct net_device *netdev,
pf->led_status = i40e_led_get(hw);
return blink_freq;
case ETHTOOL_ID_ON:
- i40e_led_set(hw, 0xF);
+ i40e_led_set(hw, 0xF, false);
break;
case ETHTOOL_ID_OFF:
- i40e_led_set(hw, 0x0);
+ i40e_led_set(hw, 0x0, false);
break;
case ETHTOOL_ID_INACTIVE:
- i40e_led_set(hw, pf->led_status);
+ i40e_led_set(hw, pf->led_status, false);
break;
}
@@ -1003,6 +1135,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = i40e_get_rss_hash_opts(pf, cmd);
break;
case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = 10;
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
@@ -1142,6 +1275,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
}
#define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
/**
* i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
* a specific flow spec
@@ -1162,6 +1296,12 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
bool err = false;
int ret;
int i;
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
@@ -1192,6 +1332,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
return err ? -EOPNOTSUPP : 0;
}
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
/**
* i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
* a specific flow spec
@@ -1211,6 +1352,14 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
struct iphdr *ip;
bool err = false;
int ret;
+ /* Dummy packet */
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
+
+ memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
@@ -1218,6 +1367,15 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+ ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
+
+ if (add) {
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ }
+ }
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
ret = i40e_program_fdir_filter(fd_data, pf, add);
@@ -1232,9 +1390,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
fd_data->pctype, ret);
}
- ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
- tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
-
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
ret = i40e_program_fdir_filter(fd_data, pf, add);
@@ -1268,6 +1423,7 @@ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
}
+#define I40E_IP_DUMMY_PACKET_LEN 34
/**
* i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
* a specific flow spec
@@ -1287,7 +1443,11 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
bool err = false;
int ret;
int i;
+ char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
@@ -1356,8 +1516,8 @@ static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
fd_data.flex_off = 0;
fd_data.pctype = 0;
fd_data.dest_vsi = vsi->id;
- fd_data.dest_ctl = 0;
- fd_data.fd_status = 0;
+ fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
fd_data.cnt_index = 0;
fd_data.fd_id = 0;
@@ -1400,6 +1560,7 @@ static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
return ret;
}
+
/**
* i40e_set_rxnfc - command to set RX flow classification rules
* @netdev: network interface device structure
@@ -1431,6 +1592,94 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return ret;
}
+/**
+ * i40e_max_channels - get Max number of combined channels supported
+ * @vsi: vsi pointer
+ **/
+static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
+{
+ /* TODO: This code assumes DCB and FD is disabled for now. */
+ return vsi->alloc_queue_pairs;
+}
+
+/**
+ * i40e_get_channels - Get the current channels enabled and max supported etc.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void i40e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ /* report maximum channels */
+ ch->max_combined = i40e_max_channels(vsi);
+
+ /* report info for other vector */
+ ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
+ ch->max_other = ch->other_count;
+
+ /* Note: This code assumes DCB is disabled for now. */
+ ch->combined_count = vsi->num_queue_pairs;
+}
+
+/**
+ * i40e_set_channels - Set the new channels count.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * The new channels count may not be the same as requested by the user
+ * since it gets rounded down to a power of 2 value.
+ **/
+static int i40e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int new_count;
+
+ /* We do not support setting channels for any other VSI at present */
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > i40e_max_channels(vsi))
+ return -EINVAL;
+
+ /* update feature limits from largest to smallest supported values */
+ /* TODO: Flow director limit, DCB etc */
+
+ /* cap RSS limit */
+ if (count > pf->rss_size_max)
+ count = pf->rss_size_max;
+
+ /* use rss_reconfig to rebuild with new queue count and update traffic
+ * class queue mapping
+ */
+ new_count = i40e_reconfig_rss_queues(pf, count);
+ if (new_count > 0)
+ return 0;
+ else
+ return -EINVAL;
+}
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_settings = i40e_get_settings,
.get_drvinfo = i40e_get_drvinfo,
@@ -1439,6 +1688,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.nway_reset = i40e_nway_reset,
.get_link = ethtool_op_get_link,
.get_wol = i40e_get_wol,
+ .set_wol = i40e_set_wol,
.get_eeprom_len = i40e_get_eeprom_len,
.get_eeprom = i40e_get_eeprom,
.get_ringparam = i40e_get_ringparam,
@@ -1455,6 +1705,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_ethtool_stats = i40e_get_ethtool_stats,
.get_coalesce = i40e_get_coalesce,
.set_coalesce = i40e_set_coalesce,
+ .get_channels = i40e_get_channels,
+ .set_channels = i40e_set_channels,
.get_ts_info = i40e_get_ts_info,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 901804af8b0e..bf2d4cc5b569 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -47,10 +46,10 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
u64 direct_mode_sz)
{
enum i40e_memory_type mem_type __attribute__((unused));
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
+ i40e_status ret_code;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
@@ -90,11 +89,9 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
sd_entry->u.pd_table.pd_entry =
(struct i40e_hmc_pd_entry *)
sd_entry->u.pd_table.pd_entry_virt_mem.va;
- memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem,
- sizeof(struct i40e_dma_mem));
+ sd_entry->u.pd_table.pd_page_addr = mem;
} else {
- memcpy(&sd_entry->u.bp.addr, &mem,
- sizeof(struct i40e_dma_mem));
+ sd_entry->u.bp.addr = mem;
sd_entry->u.bp.sd_pd_index = sd_index;
}
/* initialize the sd entry */
@@ -165,7 +162,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
if (ret_code)
goto exit;
- memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem));
+ pd_entry->bp.addr = mem;
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
/* Set page address and valid bit */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index aacd42a261e9..0cd4701234f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -117,7 +116,6 @@ struct i40e_hmc_info {
* @hw: pointer to our hw struct
* @pa: pointer to physical address
* @sd_index: segment descriptor index
- * @hmc_fn_id: hmc function id
* @type: if sd entry is direct or paged
**/
#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
@@ -139,7 +137,6 @@ struct i40e_hmc_info {
* I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
* @hw: pointer to our hw struct
* @sd_index: segment descriptor index
- * @hmc_fn_id: hmc function id
* @type: if sd entry is direct or paged
**/
#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
@@ -160,7 +157,6 @@ struct i40e_hmc_info {
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
- * @hmc_fn_id: hmc function id
**/
#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
wr32((hw), I40E_PFHMC_PDINV, \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index a695b91c9c79..d5d98fe2691d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -486,8 +485,7 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
/* Make one big object, a single SD */
info.count = 1;
ret_code = i40e_create_lan_hmc_object(hw, &info);
- if ((ret_code) &&
- (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
goto try_type_paged;
else if (ret_code)
goto configure_lan_hmc_out;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 00ff35006077..341de925a298 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -113,8 +112,8 @@ enum i40e_hmc_lan_object_size {
#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
#define I40E_HMC_OBJ_SIZE_TXQ 128
#define I40E_HMC_OBJ_SIZE_RXQ 32
-#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
-#define I40E_HMC_OBJ_SIZE_FCOE_FILT 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
enum i40e_hmc_lan_rsrc_type {
I40E_HMC_LAN_FULL = 0,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 12b0932204ba..b901371ca361 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -27,6 +26,9 @@
/* Local includes */
#include "i40e.h"
+#ifdef CONFIG_I40E_VXLAN
+#include <net/vxlan.h>
+#endif
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -36,22 +38,24 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 30
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
const char i40e_driver_version_str[] = DRV_VERSION;
-static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
+static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
/* a bit of forward declarations */
static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
static void i40e_handle_reset_warning(struct i40e_pf *pf);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
-static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_fdir_sb_setup(struct i40e_pf *pf);
+static int i40e_veb_get_bw_info(struct i40e_veb *veb);
/* i40e_pci_tbl - PCI Device ID Table
*
@@ -61,16 +65,16 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
* Class, Class Mask, private data (not used) }
*/
static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
- {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
/* required last entry */
{0, }
};
@@ -354,6 +358,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return stats;
+
if (!vsi->tx_rings)
return stats;
@@ -416,7 +423,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
- if (vsi->rx_rings)
+ if (vsi->rx_rings && vsi->rx_rings[0]) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
memset(&vsi->rx_rings[i]->stats, 0 ,
sizeof(vsi->rx_rings[i]->stats));
@@ -427,6 +434,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->tx_rings[i]->tx_stats, 0,
sizeof(vsi->tx_rings[i]->tx_stats));
}
+ }
vsi->stat_offsets_loaded = false;
}
@@ -461,7 +469,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
{
u64 new_data;
- if (hw->device_id == I40E_QEMU_DEVICE_ID) {
+ if (hw->device_id == I40E_DEV_ID_QEMU) {
new_data = rd32(hw, loreg);
new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
} else {
@@ -577,10 +585,11 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
veb->stat_offsets_loaded,
&oes->tx_discards, &es->tx_discards);
- i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
- veb->stat_offsets_loaded,
- &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
-
+ if (hw->revision_id > 0)
+ i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unknown_protocol,
+ &es->rx_unknown_protocol);
i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
veb->stat_offsets_loaded,
&oes->rx_bytes, &es->rx_bytes);
@@ -778,8 +787,8 @@ void i40e_update_stats(struct i40e_vsi *vsi)
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
rx_b += bytes;
rx_p += packets;
- rx_buf += p->rx_stats.alloc_rx_buff_failed;
- rx_page += p->rx_stats.alloc_rx_page_failed;
+ rx_buf += p->rx_stats.alloc_buff_failed;
+ rx_page += p->rx_stats.alloc_page_failed;
}
rcu_read_unlock();
vsi->tx_restart = tx_restart;
@@ -1065,7 +1074,7 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
if (!i40e_find_filter(vsi, macaddr, f->vlan,
is_vf, is_netdev)) {
if (!i40e_add_filter(vsi, macaddr, f->vlan,
- is_vf, is_netdev))
+ is_vf, is_netdev))
return NULL;
}
}
@@ -1207,6 +1216,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
return 0;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return -EADDRNOTAVAIL;
+
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
@@ -1260,6 +1273,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
u8 offset;
u16 qmap;
int i;
+ u16 num_tc_qps = 0;
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
offset = 0;
@@ -1281,6 +1295,9 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+ /* Number of queues per enabled TC */
+ num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
+ num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -1288,30 +1305,25 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
int pow, num_qps;
- vsi->tc_config.tc_info[i].qoffset = offset;
switch (vsi->type) {
case I40E_VSI_MAIN:
- if (i == 0)
- qcount = pf->rss_size;
- else
- qcount = pf->num_tc_qps;
- vsi->tc_config.tc_info[i].qcount = qcount;
+ qcount = min_t(int, pf->rss_size, num_tc_qps);
break;
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
default:
- qcount = vsi->alloc_queue_pairs;
- vsi->tc_config.tc_info[i].qcount = qcount;
+ qcount = num_tc_qps;
WARN_ON(i != 0);
break;
}
+ vsi->tc_config.tc_info[i].qoffset = offset;
+ vsi->tc_config.tc_info[i].qcount = qcount;
/* find the power-of-2 of the number of queue pairs */
- num_qps = vsi->tc_config.tc_info[i].qcount;
+ num_qps = qcount;
pow = 0;
- while (num_qps &&
- ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
+ while (num_qps && ((1 << pow) < qcount)) {
pow++;
num_qps >>= 1;
}
@@ -1321,7 +1333,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
(offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
- offset += vsi->tc_config.tc_info[i].qcount;
+ offset += qcount;
} else {
/* TC is not enabled so set the offset to
* default queue and allocate one queue
@@ -1497,11 +1509,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cpu_to_le16((u16)(f->vlan ==
I40E_VLAN_ANY ? 0 : f->vlan));
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY ||
- (vsi->netdev && !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
del_list[num_del].flags = cmd_flags;
num_del++;
@@ -1567,12 +1574,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
add_list[num_add].queue_number = 0;
cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
-
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
- !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
add_list[num_add].flags = cpu_to_le16(cmd_flags);
num_add++;
@@ -1638,6 +1639,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"set uni promisc failed, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
+ aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (aq_ret)
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
}
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1690,6 +1698,27 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * i40e_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifr: interface request data
+ * @cmd: ioctl command
+ **/
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return i40e_ptp_get_ts_config(pf, ifr);
+ case SIOCSHWTSTAMP:
+ return i40e_ptp_set_ts_config(pf, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
* @vsi: the vsi being adjusted
**/
@@ -1771,7 +1800,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{
struct i40e_mac_filter *f, *add_f;
bool is_netdev, is_vf;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(vsi->netdev);
@@ -1797,13 +1825,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Could not sync filters for vid %d\n", vid);
- return ret;
- }
-
/* Now if we add a vlan tag, make sure to check if it is the first
* tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
* with 0, so we now accept untagged and specified tagged traffic
@@ -1824,7 +1845,10 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
return -ENOMEM;
}
}
+ }
+ /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
+ if (vid > 0 && !vsi->info.pvid) {
list_for_each_entry(f, &vsi->mac_filter_list, list) {
if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
is_vf, is_netdev)) {
@@ -1840,10 +1864,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
}
- ret = i40e_sync_vsi_filters(vsi);
}
- return ret;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
+ return i40e_sync_vsi_filters(vsi);
}
/**
@@ -1859,7 +1886,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
struct i40e_mac_filter *f, *add_f;
bool is_vf, is_netdev;
int filter_count = 0;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(netdev);
@@ -1870,12 +1896,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
- return ret;
- }
-
/* go through all the filters for this VSI and if there is only
* vid == 0 it means there are no other filters, so vid 0 must
* be replaced with -1. This signifies that we should from now
@@ -1918,6 +1938,10 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
return i40e_sync_vsi_filters(vsi);
}
@@ -2008,8 +2032,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
+ I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_EMOD_STR;
ctxt.seid = vsi->seid;
memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
@@ -2032,8 +2057,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
**/
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
{
+ i40e_vlan_stripping_disable(vsi);
+
vsi->info.pvid = 0;
- i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
}
/**
@@ -2066,8 +2092,11 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->tx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i]->desc)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
i40e_free_tx_resources(vsi->tx_rings[i]);
}
@@ -2100,8 +2129,11 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->rx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->rx_rings[i]->desc)
+ if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
i40e_free_rx_resources(vsi->rx_rings[i]);
}
@@ -2121,7 +2153,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u32 qtx_ctl = 0;
/* some ATR related tx ring init */
- if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
+ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
ring->atr_sample_rate = vsi->back->atr_sample_rate;
ring->atr_count = 0;
} else {
@@ -2130,6 +2162,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
/* initialize XPS */
if (ring->q_vector && ring->netdev &&
+ vsi->tc_config.numtc <= 1 &&
!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask,
@@ -2141,8 +2174,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.new_context = 1;
tx_ctx.base = (ring->dma / 128);
tx_ctx.qlen = ring->count;
- tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED));
+ tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED));
+ tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
/* As part of VSI creation/update, FW allocates certain
* Tx arbitration queue sets for each TC enabled for
@@ -2176,7 +2210,10 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
}
/* Now associate this queue with this PCI function */
- qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ if (vsi->type == I40E_VSI_VMDQ2)
+ qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+ else
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
@@ -2243,7 +2280,10 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.tphwdesc_ena = 1;
rx_ctx.tphdata_ena = 1;
rx_ctx.tphhead_ena = 1;
- rx_ctx.lrxqthresh = 2;
+ if (hw->revision_id == 0)
+ rx_ctx.lrxqthresh = 0;
+ else
+ rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
@@ -2477,6 +2517,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
I40E_PFINT_ICR0_ENA_GRST_MASK |
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
I40E_PFINT_ICR0_ENA_GPIO_MASK |
+ I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
@@ -2485,8 +2526,8 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
wr32(hw, I40E_PFINT_ICR0_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
/* OTHER_ITR_IDX = 0 */
wr32(hw, I40E_PFINT_STAT_CTL0, 0);
@@ -2532,6 +2573,19 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
}
/**
+ * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ i40e_flush(hw);
+}
+
+/**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
**/
@@ -2584,23 +2638,6 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
}
/**
- * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
- * @irq: interrupt number
- * @data: pointer to a q_vector
- **/
-static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
-{
- struct i40e_q_vector *q_vector = data;
-
- if (!q_vector->tx.ring && !q_vector->rx.ring)
- return IRQ_HANDLED;
-
- pr_info("fdir ring cleaning needed\n");
-
- return IRQ_HANDLED;
-}
-
-/**
* i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
* @vsi: the VSI being configured
* @basename: name for the vector
@@ -2740,20 +2777,21 @@ static irqreturn_t i40e_intr(int irq, void *data)
{
struct i40e_pf *pf = (struct i40e_pf *)data;
struct i40e_hw *hw = &pf->hw;
+ irqreturn_t ret = IRQ_NONE;
u32 icr0, icr0_remaining;
u32 val, ena_mask;
icr0 = rd32(hw, I40E_PFINT_ICR0);
-
- val = rd32(hw, I40E_PFINT_DYN_CTL0);
- val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
- wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* if sharing a legacy IRQ, we might get called w/o an intr pending */
if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
- return IRQ_NONE;
+ goto enable_intr;
- ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+ /* if interrupt but no bits showing, must be SWINT */
+ if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
+ (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
+ pf->sw_int_count++;
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
@@ -2793,14 +2831,31 @@ static irqreturn_t i40e_intr(int irq, void *data)
val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
>> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
- if (val & I40E_RESET_CORER)
+ if (val == I40E_RESET_CORER)
pf->corer_count++;
- else if (val & I40E_RESET_GLOBR)
+ else if (val == I40E_RESET_GLOBR)
pf->globr_count++;
- else if (val & I40E_RESET_EMPR)
+ else if (val == I40E_RESET_EMPR)
pf->empr_count++;
}
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
+ dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
+ u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
+
+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ i40e_ptp_tx_hwtstamp(pf);
+ prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
+ }
+
+ wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
+ }
+
/* If a critical error is pending we have no choice but to reset the
* device.
* Report and mask out any remaining unexpected interrupts.
@@ -2809,22 +2864,19 @@ static irqreturn_t i40e_intr(int irq, void *data)
if (icr0_remaining) {
dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
icr0_remaining);
- if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+ if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
- if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
- dev_info(&pf->pdev->dev, "HMC error interrupt\n");
- } else {
- dev_info(&pf->pdev->dev, "device will be reset\n");
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
- i40e_service_event_schedule(pf);
- }
+ dev_info(&pf->pdev->dev, "device will be reset\n");
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
}
ena_mask &= ~icr0_remaining;
}
+ ret = IRQ_HANDLED;
+enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
if (!test_bit(__I40E_DOWN, &pf->state)) {
@@ -2832,6 +2884,94 @@ static irqreturn_t i40e_intr(int irq, void *data)
i40e_irq_dynamic_enable_icr0(pf);
}
+ return ret;
+}
+
+/**
+ * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ struct i40e_vsi *vsi = tx_ring->vsi;
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ dma_unmap_len_set(tx_buf, len, 0);
+
+
+ /* move to the next desc and buffer to clean */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ i40e_irq_dynamic_enable(vsi,
+ tx_ring->q_vector->v_idx + vsi->base_vector);
+ }
+ return budget > 0;
+}
+
+/**
+ * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+ struct i40e_vsi *vsi;
+
+ if (!q_vector->tx.ring)
+ return IRQ_HANDLED;
+
+ vsi = q_vector->tx.ring->vsi;
+ i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
+
return IRQ_HANDLED;
}
@@ -2974,28 +3114,20 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
} while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
- if (enable) {
- /* is STAT set ? */
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already enabled\n", i);
- continue;
- }
- } else {
- /* is !STAT set ? */
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already disabled\n", i);
- continue;
- }
- }
+ /* Skip if the queue is already in the requested state */
+ if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ continue;
+ if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ continue;
/* turn on/off the queue */
- if (enable)
+ if (enable) {
+ wr32(hw, I40E_QTX_HEAD(pf_q), 0);
tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
I40E_QTX_ENA_QENA_STAT_MASK;
- else
+ } else {
tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
@@ -3019,6 +3151,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
}
}
+ if (hw->revision_id == 0)
+ mdelay(50);
+
return 0;
}
@@ -3091,9 +3226,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
* @vsi: the VSI being configured
* @enable: start or stop the rings
**/
-static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
{
- int ret;
+ int ret = 0;
/* do rx first for enable and last for disable */
if (request) {
@@ -3102,10 +3237,9 @@ static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
return ret;
ret = i40e_vsi_control_tx(vsi, request);
} else {
- ret = i40e_vsi_control_tx(vsi, request);
- if (ret)
- return ret;
- ret = i40e_vsi_control_rx(vsi, request);
+ /* Ignore return value, we need to shutdown whatever we can */
+ i40e_vsi_control_tx(vsi, request);
+ i40e_vsi_control_rx(vsi, request);
}
return ret;
@@ -3131,7 +3265,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u16 vector = i + base;
/* free only the irqs that were actually requested */
- if (vsi->q_vectors[i]->num_ringpairs == 0)
+ if (!vsi->q_vectors[i] ||
+ !vsi->q_vectors[i]->num_ringpairs)
continue;
/* clear the affinity_mask in the IRQ descriptor */
@@ -3543,7 +3678,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
/* Get the VSI level BW configuration per TC */
aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
- NULL);
+ NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
"couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
@@ -3754,6 +3889,149 @@ out:
}
/**
+ * i40e_veb_config_tc - Configure TCs for given VEB
+ * @veb: given VEB
+ * @enabled_tc: TC bitmap
+ *
+ * Configures given TC bitmap for VEB (switching) element
+ **/
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+{
+ struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
+ struct i40e_pf *pf = veb->pf;
+ int ret = 0;
+ int i;
+
+ /* No TCs or already enabled TCs just return */
+ if (!enabled_tc || veb->enabled_tc == enabled_tc)
+ return ret;
+
+ bw_data.tc_valid_bits = enabled_tc;
+ /* bw_data.absolute_credits is not set (relative) */
+
+ /* Enable ETS TCs with equal BW Share for now */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ bw_data.tc_bw_share_credits[i] = 1;
+ }
+
+ ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "veb bw config failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ /* Update the BW information */
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed getting veb bw config, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
+out:
+ return ret;
+}
+
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
+ * @pf: PF struct
+ *
+ * Reconfigure VEB/VSIs on a given PF; it is assumed that
+ * the caller would've quiesce all the VSIs before calling
+ * this function
+ **/
+static void i40e_dcb_reconfigure(struct i40e_pf *pf)
+{
+ u8 tc_map = 0;
+ int ret;
+ u8 v;
+
+ /* Enable the TCs available on PF to all VEBs */
+ tc_map = i40e_pf_get_tc_map(pf);
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (!pf->veb[v])
+ continue;
+ ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VEB seid=%d\n",
+ pf->veb[v]->seid);
+ /* Will try to configure as many components */
+ }
+ }
+
+ /* Update each VSI */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (!pf->vsi[v])
+ continue;
+
+ /* - Enable all TCs for the LAN VSI
+ * - For all others keep them at TC0 for now
+ */
+ if (v == pf->lan_vsi)
+ tc_map = i40e_pf_get_tc_map(pf);
+ else
+ tc_map = i40e_pf_get_default_tc(pf);
+
+ ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VSI seid=%d\n",
+ pf->vsi[v]->seid);
+ /* Will try to configure as many components */
+ } else {
+ if (pf->vsi[v]->netdev)
+ i40e_dcbnl_set_all(pf->vsi[v]);
+ }
+ }
+}
+
+/**
+ * i40e_init_pf_dcb - Initialize DCB configuration
+ * @pf: PF being configured
+ *
+ * Query the current DCB configuration and cache it
+ * in the hardware structure
+ **/
+static int i40e_init_pf_dcb(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int err = 0;
+
+ if (pf->hw.func_caps.npar_enable)
+ goto out;
+
+ /* Get the initial DCB configuration */
+ err = i40e_init_dcb(hw);
+ if (!err) {
+ /* Device/Function is not DCBX capable */
+ if ((!hw->func_caps.dcb) ||
+ (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
+ dev_info(&pf->pdev->dev,
+ "DCBX offload is not supported or is disabled for this PF.\n");
+
+ if (pf->flags & I40E_FLAG_MFP_ENABLED)
+ goto out;
+
+ } else {
+ /* When status is not DISABLED then DCBX in FW */
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_IEEE;
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ }
+ }
+
+out:
+ return err;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
* i40e_up_complete - Finish the last steps of bringing up a connection
* @vsi: the VSI being configured
**/
@@ -3957,22 +4235,28 @@ static int i40e_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
err = i40e_up_complete(vsi);
if (err)
goto err_up_complete;
- if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
- err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
- if (err)
- netdev_info(netdev,
- "couldn't set broadcast err %d aq_err %d\n",
- err, pf->hw.aq.asq_last_status);
- }
+#ifdef CONFIG_I40E_VXLAN
+ vxlan_get_rx_port(netdev);
+#endif
return 0;
err_up_complete:
i40e_down(vsi);
+err_set_queues:
i40e_vsi_free_irq(vsi);
err_setup_rx:
i40e_vsi_free_rx_resources(vsi);
@@ -4054,6 +4338,24 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
+ } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
+
+ /* Request a Firmware Reset
+ *
+ * Same as Global reset, plus restarting the
+ * embedded firmware engine.
+ */
+ /* enable EMP Reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
+ val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
+
+ /* force the reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+ i40e_flush(&pf->hw);
+
} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
@@ -4091,6 +4393,144 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
}
}
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_need_reconfig - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ **/
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg)
+{
+ bool need_reconfig = false;
+
+ /* Check if ETS configuration has changed */
+ if (memcmp(&new_cfg->etscfg,
+ &old_cfg->etscfg,
+ sizeof(new_cfg->etscfg))) {
+ /* If Priority Table has changed reconfig is needed */
+ if (memcmp(&new_cfg->etscfg.prioritytable,
+ &old_cfg->etscfg.prioritytable,
+ sizeof(new_cfg->etscfg.prioritytable))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ }
+
+ if (memcmp(&new_cfg->etscfg.tcbwtable,
+ &old_cfg->etscfg.tcbwtable,
+ sizeof(new_cfg->etscfg.tcbwtable)))
+ dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+ if (memcmp(&new_cfg->etscfg.tsatable,
+ &old_cfg->etscfg.tsatable,
+ sizeof(new_cfg->etscfg.tsatable)))
+ dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ }
+
+ /* Check if PFC configuration has changed */
+ if (memcmp(&new_cfg->pfc,
+ &old_cfg->pfc,
+ sizeof(new_cfg->pfc))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "PFC config change detected.\n");
+ }
+
+ /* Check if APP Table has changed */
+ if (memcmp(&new_cfg->app,
+ &old_cfg->app,
+ sizeof(new_cfg->app))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "APP Table change detected.\n");
+ }
+
+ return need_reconfig;
+}
+
+/**
+ * i40e_handle_lldp_event - Handle LLDP Change MIB event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static int i40e_handle_lldp_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_aqc_lldp_get_mib *mib =
+ (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+ struct i40e_dcbx_config tmp_dcbx_cfg;
+ bool need_reconfig = false;
+ int ret = 0;
+ u8 type;
+
+ /* Ignore if event is not for Nearest Bridge */
+ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
+ & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
+ return ret;
+
+ /* Check MIB Type and return if event for Remote MIB update */
+ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ if (type == I40E_AQ_LLDP_MIB_REMOTE) {
+ /* Update the remote cached instance and return */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ goto exit;
+ }
+
+ /* Convert/store the DCBX data from LLDPDU temporarily */
+ memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
+ ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
+ if (ret) {
+ /* Error in LLDPDU parsing return */
+ dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
+ goto exit;
+ }
+
+ /* No change detected in DCBX configs */
+ if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+ dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+ goto exit;
+ }
+
+ need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
+
+ i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
+
+ /* Overwrite the new configuration */
+ *dcbx_cfg = tmp_dcbx_cfg;
+
+ if (!need_reconfig)
+ goto exit;
+
+ /* Reconfiguration needed quiesce all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+
+ i40e_pf_unquiesce_all_vsi(pf);
+exit:
+ return ret;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
+ * i40e_do_reset_safe - Protected reset path for userland calls.
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ **/
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+{
+ rtnl_lock();
+ i40e_do_reset(pf, reset_flags);
+ rtnl_unlock();
+}
+
/**
* i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
* @pf: board private structure
@@ -4245,6 +4685,9 @@ static void i40e_link_event(struct i40e_pf *pf)
if (pf->vf)
i40e_vc_notify_link_state(pf);
+
+ if (pf->flags & I40E_FLAG_PTP)
+ i40e_ptp_set_increment(pf);
}
/**
@@ -4326,6 +4769,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_VEB; i++)
if (pf->veb[i])
i40e_update_veb_stats(pf->veb[i]);
+
+ i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
}
/**
@@ -4336,6 +4781,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
{
u32 reset_flags = 0;
+ rtnl_lock();
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
reset_flags |= (1 << __I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
@@ -4358,7 +4804,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
*/
if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
i40e_handle_reset_warning(pf);
- return;
+ goto unlock;
}
/* If we're already down or resetting, just bail */
@@ -4366,6 +4812,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
!test_bit(__I40E_DOWN, &pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, &pf->state))
i40e_do_reset(pf, reset_flags);
+
+unlock:
+ rtnl_unlock();
}
/**
@@ -4429,6 +4878,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
return;
do {
+ event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
ret = i40e_clean_arq_element(hw, &event, &pending);
if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
dev_info(&pf->pdev->dev, "No ARQ event found\n");
@@ -4454,15 +4904,23 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
break;
case i40e_aqc_opc_lldp_update_mib:
dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+#ifdef CONFIG_I40E_DCB
+ rtnl_lock();
+ ret = i40e_handle_lldp_event(pf, &event);
+ rtnl_unlock();
+#endif /* CONFIG_I40E_DCB */
break;
case i40e_aqc_opc_event_lan_overflow:
dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
i40e_handle_lan_overflow_event(pf, &event);
break;
+ case i40e_aqc_opc_send_msg_to_peer:
+ dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
+ break;
default:
dev_info(&pf->pdev->dev,
- "ARQ Error: Unknown event %d received\n",
- event.desc.opcode);
+ "ARQ Error: Unknown event 0x%04x received\n",
+ opcode);
break;
}
} while (pending && (i++ < pf->adminq_work_limit));
@@ -4592,6 +5050,9 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
+ /* increment MSI-X count because current FW skips one */
+ pf->hw.func_caps.num_msix_vectors++;
+
if (pf->hw.debug_mask & I40E_DEBUG_USER)
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -4603,57 +5064,89 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
pf->hw.func_caps.num_tx_qp,
pf->hw.func_caps.num_vsis);
+#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
+ + pf->hw.func_caps.num_vfs)
+ if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
+ dev_info(&pf->pdev->dev,
+ "got num_vsis %d, setting num_vsis to %d\n",
+ pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
+ pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
+ }
+
return 0;
}
+static int i40e_vsi_clear(struct i40e_vsi *vsi);
+
/**
- * i40e_fdir_setup - initialize the Flow Director resources
+ * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
* @pf: board private structure
**/
-static void i40e_fdir_setup(struct i40e_pf *pf)
+static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
bool new_vsi = false;
int err, i;
- if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED)))
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
- pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-
- /* find existing or make new FDIR VSI */
+ /* find existing VSI and see if it needs configuring */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
vsi = pf->vsi[i];
+ break;
+ }
+ }
+
+ /* create a new VSI if none exists */
if (!vsi) {
- vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
+ pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
- return;
+ goto err_vsi;
}
new_vsi = true;
}
- WARN_ON(vsi->base_queue != I40E_FDIR_RING);
- i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
err = i40e_vsi_setup_tx_resources(vsi);
- if (!err)
- err = i40e_vsi_setup_rx_resources(vsi);
- if (!err)
- err = i40e_vsi_configure(vsi);
- if (!err && new_vsi) {
+ if (err)
+ goto err_setup_tx;
+ err = i40e_vsi_setup_rx_resources(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ if (new_vsi) {
char int_name[IFNAMSIZ + 9];
+ err = i40e_vsi_configure(vsi);
+ if (err)
+ goto err_setup_rx;
snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
dev_driver_string(&pf->pdev->dev));
err = i40e_vsi_request_irq(vsi, int_name);
- }
- if (!err)
+ if (err)
+ goto err_setup_rx;
err = i40e_up_complete(vsi);
+ if (err)
+ goto err_up_complete;
+ }
clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ return;
+
+err_up_complete:
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+err_setup_rx:
+ i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+ i40e_vsi_free_tx_resources(vsi);
+err_vsi:
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ i40e_vsi_clear(vsi);
}
/**
@@ -4673,26 +5166,25 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
}
/**
- * i40e_handle_reset_warning - prep for the core to reset
+ * i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
*
- * Close up the VFs and other things in prep for a Core Reset,
- * then get ready to rebuild the world.
- **/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+ * Close up the VFs and other things in prep for pf Reset.
+ **/
+static int i40e_prep_for_reset(struct i40e_pf *pf)
{
- struct i40e_driver_version dv;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
- return;
+ return 0;
dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
- i40e_vc_notify_reset(pf);
+ if (i40e_check_asq_alive(hw))
+ i40e_vc_notify_reset(pf);
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
@@ -4704,6 +5196,27 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
i40e_shutdown_adminq(&pf->hw);
+ /* call shutdown HMC */
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ }
+ return ret;
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+{
+ struct i40e_driver_version dv;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u32 v;
+
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
@@ -4731,13 +5244,6 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
- /* call shutdown HMC */
- ret = i40e_shutdown_lan_hmc(hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
- goto end_core_reset;
- }
-
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp,
pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
@@ -4751,8 +5257,16 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
+#ifdef CONFIG_I40E_DCB
+ ret = i40e_init_pf_dcb(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
+ goto end_core_reset;
+ }
+#endif /* CONFIG_I40E_DCB */
+
/* do basic switch setup */
- ret = i40e_setup_pf_switch(pf);
+ ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
goto end_core_reset;
@@ -4831,6 +5345,22 @@ end_core_reset:
}
/**
+ * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+ i40e_status ret;
+
+ ret = i40e_prep_for_reset(pf);
+ if (!ret)
+ i40e_reset_and_rebuild(pf, false);
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the pf structure
*
@@ -4911,6 +5441,52 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+{
+ const int vxlan_hdr_qwords = 4;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u8 filter_index;
+ __be16 port;
+ int i;
+
+ if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+ return;
+
+ pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->pending_vxlan_bitmap & (1 << i)) {
+ pf->pending_vxlan_bitmap &= ~(1 << i);
+ port = pf->vxlan_ports[i];
+ ret = port ?
+ i40e_aq_add_udp_tunnel(hw, ntohs(port),
+ vxlan_hdr_qwords,
+ I40E_AQC_TUNNEL_TYPE_VXLAN,
+ &filter_index, NULL)
+ : i40e_aq_del_udp_tunnel(hw, i, NULL);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
+ port ? "adding" : "deleting",
+ ntohs(port), port ? i : i);
+
+ pf->vxlan_ports[i] = 0;
+ } else {
+ dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
+ port ? "Added" : "Deleted",
+ ntohs(port), port ? i : filter_index);
+ }
+ }
+ }
+}
+
+#endif
/**
* i40e_service_task - Run the driver's async subtasks
* @work: pointer to work_struct containing our data
@@ -4929,6 +5505,9 @@ static void i40e_service_task(struct work_struct *work)
i40e_fdir_reinit_subtask(pf);
i40e_check_hang_subtask(pf);
i40e_sync_filters_subtask(pf);
+#ifdef CONFIG_I40E_VXLAN
+ i40e_sync_vxlan_filters_subtask(pf);
+#endif
i40e_clean_adminq_subtask(pf);
i40e_service_event_complete(pf);
@@ -5006,6 +5585,42 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
+ * @type: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
+{
+ int size;
+ int ret = 0;
+
+ /* allocate memory for both Tx and Rx ring pointers */
+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ vsi->tx_rings = kzalloc(size, GFP_KERNEL);
+ if (!vsi->tx_rings)
+ return -ENOMEM;
+ vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+ if (alloc_qvectors) {
+ /* allocate memory for q_vector pointers */
+ size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+ vsi->q_vectors = kzalloc(size, GFP_KERNEL);
+ if (!vsi->q_vectors) {
+ ret = -ENOMEM;
+ goto err_vectors;
+ }
+ }
+ return ret;
+
+err_vectors:
+ kfree(vsi->tx_rings);
+ return ret;
+}
+
+/**
* i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
* @pf: board private structure
* @type: type of VSI
@@ -5017,8 +5632,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
{
int ret = -ENODEV;
struct i40e_vsi *vsi;
- int sz_vectors;
- int sz_rings;
int vsi_idx;
int i;
@@ -5068,22 +5681,9 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
if (ret)
goto err_rings;
- /* allocate memory for ring pointers */
- sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
- vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
- if (!vsi->tx_rings) {
- ret = -ENOMEM;
+ ret = i40e_vsi_alloc_arrays(vsi, true);
+ if (ret)
goto err_rings;
- }
- vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
-
- /* allocate memory for q_vector pointers */
- sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
- vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
- if (!vsi->q_vectors) {
- ret = -ENOMEM;
- goto err_vectors;
- }
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
@@ -5092,8 +5692,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
ret = vsi_idx;
goto unlock_pf;
-err_vectors:
- kfree(vsi->tx_rings);
err_rings:
pf->next_vsi = i - 1;
kfree(vsi);
@@ -5103,6 +5701,26 @@ unlock_pf:
}
/**
+ * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
+ * @type: VSI pointer
+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
+{
+ /* free the ring and vector containers */
+ if (free_qvectors) {
+ kfree(vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ }
+ kfree(vsi->tx_rings);
+ vsi->tx_rings = NULL;
+ vsi->rx_rings = NULL;
+}
+
+/**
* i40e_vsi_clear - Deallocate the VSI provided
* @vsi: the VSI being un-configured
**/
@@ -5138,9 +5756,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
- /* free the ring and vector containers */
- kfree(vsi->q_vectors);
- kfree(vsi->tx_rings);
+ i40e_vsi_free_arrays(vsi, true);
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
@@ -5158,18 +5774,17 @@ free_vsi:
* i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being cleaned
**/
-static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
{
int i;
- if (vsi->tx_rings[0])
+ if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL;
}
-
- return 0;
+ }
}
/**
@@ -5186,6 +5801,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
+ /* allocate space for both Tx and Rx in one shot */
tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
if (!tx_ring)
goto err_out;
@@ -5289,19 +5905,22 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* The number of vectors we'll request will be comprised of:
* - Add 1 for "other" cause for Admin Queue events, etc.
* - The number of LAN queue pairs
- * already adjusted for the NUMA node
- * assumes symmetric Tx/Rx pairing
+ * - Queues being used for RSS.
+ * We don't need as many as max_rss_size vectors.
+ * use rss_size instead in the calculation since that
+ * is governed by number of cpus in the system.
+ * - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
* Once we count this up, try the request.
*
* If we can't get what we want, we'll simplify to nearly nothing
* and try again. If that still fails, we punt.
*/
- pf->num_lan_msix = pf->num_lan_qps;
+ pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
pf->num_vmdq_msix = pf->num_vmdq_qps;
v_budget = 1 + pf->num_lan_msix;
v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
- if (pf->flags & I40E_FLAG_FDIR_ENABLED)
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
v_budget++;
/* Scale down if necessary, and the rings will share vectors */
@@ -5437,14 +6056,13 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
err = i40e_init_msix(pf);
if (err) {
- pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
/* rework the queue expectations without MSIX */
i40e_determine_queue_usage(pf);
@@ -5513,15 +6131,15 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
**/
static int i40e_config_rss(struct i40e_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
- u32 lut = 0;
- int i, j;
- u64 hena;
/* Set of random keys generated using kernel random number generator */
static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+ struct i40e_hw *hw = &pf->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
/* Fill out hash function seed */
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
@@ -5530,16 +6148,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= I40E_DEFAULT_RSS_HENA;
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
@@ -5568,6 +6177,34 @@ static int i40e_config_rss(struct i40e_pf *pf)
}
/**
+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
+ * @pf: board private structure
+ * @queue_count: the requested queue count for rss.
+ *
+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
+ * count which may be different from the requested queue count.
+ **/
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+{
+ if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+ return 0;
+
+ queue_count = min_t(int, queue_count, pf->rss_size_max);
+ queue_count = rounddown_pow_of_two(queue_count);
+
+ if (queue_count != pf->rss_size) {
+ i40e_prep_for_reset(pf);
+
+ pf->rss_size = queue_count;
+
+ i40e_reset_and_rebuild(pf, true);
+ i40e_config_rss(pf);
+ }
+ dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
+ return pf->rss_size;
+}
+
+/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
*
@@ -5582,6 +6219,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+ pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
if (I40E_DEBUG_USER & debug)
pf->hw.debug_mask = debug;
@@ -5593,39 +6231,47 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RX_PS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
I40E_FLAG_RX_1BUF_ENABLED;
+ /* Depending on PF configurations, it is possible that the RSS
+ * maximum might end up larger than the available queues
+ */
pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ pf->rss_size_max = min_t(int, pf->rss_size_max,
+ pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
- pf->rss_size = min_t(int, pf->rss_size_max,
- nr_cpus_node(numa_node_id()));
+ pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
+ pf->rss_size = rounddown_pow_of_two(pf->rss_size);
} else {
pf->rss_size = 1;
}
- if (pf->hw.func_caps.dcb)
- pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
- else
- pf->num_tc_qps = 0;
+ /* MFP mode enabled */
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ pf->flags |= I40E_FLAG_MFP_ENABLED;
+ dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ }
- if (pf->hw.func_caps.fd) {
- /* FW/NVM is not yet fixed in this regard */
- if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
- (pf->hw.func_caps.fd_filters_best_effort > 0)) {
- pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
- dev_info(&pf->pdev->dev,
- "Flow Director ATR mode Enabled\n");
- pf->flags |= I40E_FLAG_FDIR_ENABLED;
+ /* FW/NVM is not yet fixed in this regard */
+ if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+ (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+ dev_info(&pf->pdev->dev,
+ "Flow Director ATR mode Enabled\n");
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
dev_info(&pf->pdev->dev,
"Flow Director Side Band mode Enabled\n");
- pf->fdir_pf_filter_count =
- pf->hw.func_caps.fd_filters_guaranteed;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Flow Director Side Band mode Disabled in MFP mode\n");
}
- } else {
- pf->fdir_pf_filter_count = 0;
+ pf->fdir_pf_filter_count =
+ pf->hw.func_caps.fd_filters_guaranteed;
+ pf->hw.fdir_shared_filter_count =
+ pf->hw.func_caps.fd_filters_best_effort;
}
if (pf->hw.func_caps.vmdq) {
@@ -5634,12 +6280,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
}
- /* MFP mode enabled */
- if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
- pf->flags |= I40E_FLAG_MFP_ENABLED;
- dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
- }
-
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -5647,6 +6287,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_req_vfs = min_t(int,
pf->hw.func_caps.num_vfs,
I40E_MAX_VF_COUNT);
+ dev_info(&pf->pdev->dev,
+ "Number of VFs being requested for PF[%d] = %d\n",
+ pf->hw.pf_id, pf->num_req_vfs);
}
#endif /* CONFIG_PCI_IOV */
pf->eeprom_version = 0xDEAD;
@@ -5701,6 +6344,104 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
+ * @pf: board private structure
+ * @port: The UDP port to look up
+ *
+ * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
+ **/
+static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
+{
+ u8 i;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->vxlan_ports[i] == port)
+ return i;
+ }
+
+ return i;
+}
+
+/**
+ * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: New UDP port number that VXLAN started listening to
+ **/
+static void i40e_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 next_idx;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+ return;
+ }
+
+ /* Now check if there is space to add the new port */
+ next_idx = i40e_get_vxlan_port_idx(pf, 0);
+
+ if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+ ntohs(port));
+ return;
+ }
+
+ /* New port: add it and mark its index in the bitmap */
+ pf->vxlan_ports[next_idx] = port;
+ pf->pending_vxlan_bitmap |= (1 << next_idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+}
+
+/**
+ * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: UDP port number that VXLAN stopped listening to
+ **/
+static void i40e_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ /* if port exists, set it to 0 (mark for deletion)
+ * and make it pending
+ */
+ pf->vxlan_ports[idx] = 0;
+
+ pf->pending_vxlan_bitmap |= (1 << idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+ } else {
+ netdev_warn(netdev, "Port %d was not found, not deleting\n",
+ ntohs(port));
+ }
+}
+
+#endif
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -5710,6 +6451,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_change_mtu,
+ .ndo_do_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
@@ -5722,6 +6464,10 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
.ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
+#ifdef CONFIG_I40E_VXLAN
+ .ndo_add_vxlan_port = i40e_add_vxlan_port,
+ .ndo_del_vxlan_port = i40e_del_vxlan_port,
+#endif
};
/**
@@ -5732,6 +6478,7 @@ static const struct net_device_ops i40e_netdev_ops = {
**/
static int i40e_config_netdev(struct i40e_vsi *vsi)
{
+ u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_netdev_priv *np;
@@ -5781,6 +6528,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
random_ether_addr(mac_addr);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
}
+ i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
@@ -5814,10 +6562,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
return;
- /* there is no HW VSI for FDIR */
- if (vsi->type == I40E_VSI_FDIR)
- return;
-
i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
return;
}
@@ -5901,12 +6645,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
break;
case I40E_VSI_FDIR:
- /* no queue mapping or actual HW VSI needed */
- vsi->info.valid_sections = 0;
- vsi->seid = 0;
- vsi->id = 0;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
- return 0;
break;
case I40E_VSI_VMDQ2:
@@ -6133,6 +6877,69 @@ vector_setup_out:
}
/**
+ * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
+ * @vsi: pointer to the vsi.
+ *
+ * This re-allocates a vsi's queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct
+ * on success, otherwise returns NULL on failure.
+ **/
+static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 enabled_tc;
+ int ret;
+
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_free_arrays(vsi, false);
+ i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_vsi_alloc_arrays(vsi, false);
+ if (ret)
+ goto err_vsi;
+
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+ vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* Update the FW view of the VSI. Force a reset of TC and queue
+ * layout configurations.
+ */
+ enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+
+ /* assign it some queues */
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+ return NULL;
+}
+
+/**
* i40e_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @type: VSI type
@@ -6212,6 +7019,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (v_idx < 0)
goto err_alloc;
vsi = pf->vsi[v_idx];
+ if (!vsi)
+ goto err_alloc;
vsi->type = type;
vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
@@ -6220,7 +7029,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
else if (type == I40E_VSI_SRIOV)
vsi->vf_id = param1;
/* assign it some queues */
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
+ vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
vsi->seid, ret);
@@ -6246,6 +7056,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
goto err_netdev;
vsi->netdev_registered = true;
netif_carrier_off(vsi->netdev);
+#ifdef CONFIG_I40E_DCB
+ /* Setup DCB netlink interface */
+ i40e_dcbnl_setup(vsi);
+#endif /* CONFIG_I40E_DCB */
/* fall through */
case I40E_VSI_FDIR:
@@ -6503,12 +7317,14 @@ void i40e_veb_release(struct i40e_veb *veb)
**/
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
- bool is_default = (vsi->idx == vsi->back->lan_vsi);
+ bool is_default = false;
+ bool is_cloud = false;
int ret;
/* get a VEB from the hardware */
ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, is_default, &veb->seid, NULL);
+ veb->enabled_tc, is_default,
+ is_cloud, &veb->seid, NULL);
if (ret) {
dev_info(&veb->pf->pdev->dev,
"couldn't add VEB, err %d, aq_err %d\n",
@@ -6773,11 +7589,13 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
/**
* i40e_setup_pf_switch - Setup the HW switch on startup or after reset
* @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
*
* Returns 0 on success, negative value on failure
**/
-static int i40e_setup_pf_switch(struct i40e_pf *pf)
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
{
+ u32 rxfc = 0, txfc = 0, rxfc_reg;
int ret;
/* find out what's out there already */
@@ -6790,14 +7608,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
}
i40e_pf_reset_stats(pf);
- /* fdir VSI must happen first to be sure it gets queue 0, but only
- * if there is enough room for the fdir VSI
- */
- if (pf->num_lan_qps > 1)
- i40e_fdir_setup(pf);
-
/* first time setup */
- if (pf->lan_vsi == I40E_NO_VSI) {
+ if (pf->lan_vsi == I40E_NO_VSI || reinit) {
struct i40e_vsi *vsi = NULL;
u16 uplink_seid;
@@ -6808,19 +7620,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
uplink_seid = pf->veb[pf->lan_veb]->seid;
else
uplink_seid = pf->mac_seid;
-
- vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (pf->lan_vsi == I40E_NO_VSI)
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ else if (reinit)
+ vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
if (!vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_fdir_teardown(pf);
return -EAGAIN;
}
- /* accommodate kcompat by copying the main VSI queue count
- * into the pf, since this newer code pushes the pf queue
- * info down a level into a VSI
- */
- pf->num_rx_queues = vsi->alloc_queue_pairs;
- pf->num_tx_queues = vsi->alloc_queue_pairs;
} else {
/* force a reset of TC and queue layout configurations */
u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
@@ -6830,6 +7638,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
}
i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+ i40e_fdir_sb_setup(pf);
+
/* Setup static PF queue filter control settings */
ret = i40e_setup_pf_filter_control(pf);
if (ret) {
@@ -6848,37 +7658,68 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
i40e_link_event(pf);
- /* Initialize user-specifics link properties */
+ /* Initialize user-specific link properties */
pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
I40E_AQ_AN_COMPLETED) ? true : false);
- pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
- if (pf->hw.phy.link_info.an_info &
- (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+ /* requested_mode is set in probe or by ethtool */
+ if (!pf->fc_autoneg_status)
+ goto no_autoneg;
+
+ if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
+ (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
pf->hw.fc.current_mode = I40E_FC_FULL;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
else
- pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+ pf->hw.fc.current_mode = I40E_FC_NONE;
- return ret;
-}
+ /* sync the flow control settings with the auto-neg values */
+ switch (pf->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ txfc = 1;
+ rxfc = 1;
+ break;
+ case I40E_FC_TX_PAUSE:
+ txfc = 1;
+ rxfc = 0;
+ break;
+ case I40E_FC_RX_PAUSE:
+ txfc = 0;
+ rxfc = 1;
+ break;
+ case I40E_FC_NONE:
+ case I40E_FC_DEFAULT:
+ txfc = 0;
+ rxfc = 0;
+ break;
+ case I40E_FC_PFC:
+ /* TBD */
+ break;
+ /* no default case, we have to handle all possibilities here */
+ }
-/**
- * i40e_set_rss_size - helper to set rss_size
- * @pf: board private structure
- * @queues_left: how many queues
- */
-static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
-{
- int num_tc0;
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
+
+ rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
+
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
- num_tc0 = min_t(int, queues_left, pf->rss_size_max);
- num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
- num_tc0 = rounddown_pow_of_two(num_tc0);
+ goto fc_complete;
- return num_tc0;
+no_autoneg:
+ /* disable L2 flow control, user can turn it on if they wish */
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK);
+
+fc_complete:
+ i40e_ptp_init(pf);
+
+ return ret;
}
/**
@@ -6887,12 +7728,9 @@ static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
**/
static void i40e_determine_queue_usage(struct i40e_pf *pf)
{
- int accum_tc_size;
int queues_left;
pf->num_lan_qps = 0;
- pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
- accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
/* Find the max queues to be put into basic use. We'll always be
* using TC0, whether or not DCB is running, and TC0 will get the
@@ -6900,99 +7738,45 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
*/
queues_left = pf->hw.func_caps.num_tx_qp;
- if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
- !(pf->flags & (I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
- (queues_left == 1)) {
-
+ if ((queues_left == 1) ||
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
+ !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_DCB_ENABLED))) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->rss_size = pf->num_lan_qps = 1;
/* make sure all the fancies are disabled */
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
- I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- pf->num_lan_qps = pf->rss_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- /* save num_tc_qps queues for TCs 1 thru 7 and the rest
- * are set up for RSS in TC0
- */
- queues_left -= accum_tc_size;
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
- return;
- }
-
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- queues_left -= 1; /* save 1 queue for FD */
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
- return;
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+ } else {
+ /* Not enough queues for all TCs */
+ if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
+ (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
+ pf->num_lan_qps = pf->rss_size_max;
+ queues_left -= pf->num_lan_qps;
+ }
- pf->num_lan_qps = pf->rss_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- /* save 1 queue for TCs 1 thru 7,
- * 1 queue for flow director,
- * and the rest are set up for RSS in TC0
- */
- queues_left -= 1;
- queues_left -= accum_tc_size;
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
- return;
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (queues_left > 1) {
+ queues_left -= 1; /* save 1 queue for FD */
+ } else {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
}
-
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
- } else {
- dev_info(&pf->pdev->dev,
- "Invalid configuration, flags=0x%08llx\n", pf->flags);
- return;
}
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
pf->num_vf_qps && pf->num_req_vfs && queues_left) {
- pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
- pf->num_vf_qps));
+ pf->num_req_vfs = min_t(int, pf->num_req_vfs,
+ (queues_left / pf->num_vf_qps));
queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
}
@@ -7003,6 +7787,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
}
+ pf->queues_left = queues_left;
return;
}
@@ -7024,7 +7809,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
/* Flow Director is enabled */
- if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
+ if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
settings->enable_fdir = true;
/* Ethtype and MACVLAN filters enabled for PF */
@@ -7053,6 +7838,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct i40e_driver_version dv;
struct i40e_pf *pf;
struct i40e_hw *hw;
+ static u16 pfs_found;
+ u16 link_status;
int err = 0;
u32 len;
@@ -7118,6 +7905,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ pf->instance = pfs_found;
+
+ /* do a special CORER for clearing PXE mode once at init */
+ if (hw->revision_id == 0 &&
+ (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
+ wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
+ i40e_flush(hw);
+ msleep(200);
+ pf->corer_count++;
+
+ i40e_clear_pxe_mode(hw);
+ }
/* Reset here to make sure all is clean and to define PF 'n' */
err = i40e_pf_reset(hw);
@@ -7142,8 +7941,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ /* set up a default setting for link flow control */
+ pf->hw.fc.requested_mode = I40E_FC_NONE;
+
err = i40e_init_adminq(hw);
dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+ if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
+ >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
+ dev_info(&pdev->dev,
+ "warning: NVM version not supported, supported version: %02x.%02x\n",
+ I40E_CURRENT_NVM_VERSION_HI,
+ I40E_CURRENT_NVM_VERSION_LO);
+ }
if (err) {
dev_info(&pdev->dev,
"init_adminq failed: %d expecting API %02x.%02x\n",
@@ -7152,6 +7961,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ i40e_clear_pxe_mode(hw);
err = i40e_get_capabilities(pf);
if (err)
goto err_adminq_setup;
@@ -7178,7 +7988,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
i40e_get_mac_addr(hw, hw->mac.addr);
- if (i40e_validate_mac_addr(hw->mac.addr)) {
+ if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
err = -EIO;
goto err_mac_addr;
@@ -7188,6 +7998,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
+#ifdef CONFIG_I40E_DCB
+ err = i40e_init_pf_dcb(pf);
+ if (err) {
+ dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ goto err_init_dcb;
+ }
+#endif /* CONFIG_I40E_DCB */
/* set up periodic task facility */
setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
@@ -7198,6 +8016,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
pf->link_check_timeout = jiffies;
+ /* WoL defaults to disabled */
+ pf->wol_en = false;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+
/* set up the main switch operations */
i40e_determine_queue_usage(pf);
i40e_init_interrupt_scheme(pf);
@@ -7212,7 +8034,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_switch_setup;
}
- err = i40e_setup_pf_switch(pf);
+ err = i40e_setup_pf_switch(pf, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
@@ -7250,6 +8072,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_flush(hw);
}
+ pfs_found++;
+
i40e_dbg_pf_init(pf);
/* tell the firmware that we're starting */
@@ -7263,15 +8087,41 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
+ /* Get the negotiated link width and speed from PCI config space */
+ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
+
+ i40e_set_pci_config_data(hw, link_status);
+
+ dev_info(&pdev->dev, "PCI Express: %s %s\n",
+ (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
+ "Unknown"),
+ (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
+ hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
+ hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
+ hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
+ "Unknown"));
+
+ if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+ hw->bus.speed < i40e_bus_speed_8000) {
+ dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+ dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ }
+
return 0;
/* Unwind what we've done if something failed in the setup */
err_vsis:
set_bit(__I40E_DOWN, &pf->state);
-err_switch_setup:
i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi);
+err_switch_setup:
+ i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer);
+#ifdef CONFIG_I40E_DCB
+err_init_dcb:
+#endif /* CONFIG_I40E_DCB */
err_mac_addr:
err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
@@ -7313,6 +8163,8 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_dbg_pf_exit(pf);
+ i40e_ptp_stop(pf);
+
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
@@ -7356,7 +8208,6 @@ static void i40e_remove(struct pci_dev *pdev)
"Failed to destroy the HMC resources: %d\n", ret_code);
/* shutdown the adminq */
- i40e_aq_queue_shutdown(&pf->hw, true);
ret_code = i40e_shutdown_adminq(&pf->hw);
if (ret_code)
dev_warn(&pdev->dev,
@@ -7413,7 +8264,11 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
/* shutdown all operations */
- i40e_pf_quiesce_all_vsi(pf);
+ if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+ }
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7476,9 +8331,103 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "%s\n", __func__);
+ if (test_bit(__I40E_SUSPENDED, &pf->state))
+ return;
+
+ rtnl_lock();
i40e_handle_reset_warning(pf);
+ rtnl_lock();
}
+/**
+ * i40e_shutdown - PCI callback for shutting down
+ * @pdev: PCI device information struct
+ **/
+static void i40e_shutdown(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40e_suspend - PCI callback for moving to D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/**
+ * i40e_resume - PCI callback for waking up from D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_resume(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state() clears dev->state_saves, so
+ * call pci_save_state() again to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "%s: Cannot enable PCI device from suspend\n",
+ __func__);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ /* no wakeup events while running */
+ pci_wake_from_d3(pdev, false);
+
+ /* handling the reset will rebuild the device state */
+ if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
+ clear_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_reset_and_rebuild(pf, false);
+ rtnl_unlock();
+ }
+
+ return 0;
+}
+
+#endif
static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset,
@@ -7490,6 +8439,11 @@ static struct pci_driver i40e_driver = {
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
+#ifdef CONFIG_PM
+ .suspend = i40e_suspend,
+ .resume = i40e_resume,
+#endif
+ .shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 97e1bb30ef8a..73f95b081927 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -166,15 +165,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
/**
- * i40e_read_nvm_srctl - Reads Shadow RAM.
+ * i40e_read_nvm_word - Reads Shadow RAM
* @hw: pointer to the HW structure.
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @data: word read from the Shadow RAM.
*
* Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data)
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
@@ -211,29 +210,6 @@ read_nvm_exit:
}
/**
- * i40e_read_nvm_word - Reads Shadow RAM word.
- * @hw: pointer to the HW structure.
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @data: word read from the Shadow RAM.
- *
- * Reads 16 bit word from the Shadow RAM. Each read is preceded
- * with the NVM ownership taking and followed by the release.
- **/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
-{
- i40e_status ret_code = 0;
-
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_read_nvm_srctl(hw, offset, data);
- i40e_release_nvm(hw);
- }
-
- return ret_code;
-}
-
-/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer.
* @hw: pointer to the HW structure.
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -250,36 +226,25 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
i40e_status ret_code = 0;
u16 index, word;
- u32 time;
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- /* Loop thru the selected region. */
- for (word = 0; word < *words; word++) {
- index = offset + word;
- ret_code = i40e_read_nvm_srctl(hw, index, &data[word]);
- if (ret_code)
- break;
- /* Check if we didn't exceeded the semaphore timeout. */
- time = rd32(hw, I40E_GLVFGEN_TIMER);
- if (time >= hw->nvm.hw_semaphore_timeout) {
- ret_code = I40E_ERR_TIMEOUT;
- hw_dbg(hw, "NVM read error: timeout.\n");
- break;
- }
- }
- /* Update the number of words read from the Shadow RAM. */
- *words = word;
- /* Release the NVM ownership. */
- i40e_release_nvm(hw);
+ /* Loop thru the selected region. */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+ if (ret_code)
+ break;
}
+ /* Update the number of words read from the Shadow RAM. */
+ *words = word;
+
return ret_code;
}
/**
* i40e_calc_nvm_checksum - Calculates and returns the checksum
* @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
*
* This function calculate SW Checksum that covers the whole 64kB shadow RAM
* except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
@@ -297,14 +262,14 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u32 i = 0;
/* read pointer to VPD area */
- ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module);
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* read pointer to PCIe Alt Auto-load module */
- ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
@@ -331,7 +296,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
break;
}
- ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word);
+ ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
@@ -358,7 +323,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
{
i40e_status ret_code = 0;
u16 checksum_sr = 0;
- u16 checksum_local;
+ u16 checksum_local = 0;
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
@@ -371,7 +336,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
/* Do not use i40e_read_nvm_word() because we do not want to take
* the synchronization semaphores twice here.
*/
- i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
/* Verify read checksum from EEPROM is the same as
* calculated checksum
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 702c81ba86e3..ecd0f0b663c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index f75bb9ccc900..ed91f93ede2b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -51,7 +50,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-bool i40e_asq_done(struct i40e_hw *hw);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw,
@@ -60,10 +58,11 @@ void i40e_debug_aq(struct i40e_hw *hw,
void *buffer);
void i40e_idle_aq(struct i40e_hw *hw);
-void i40e_resume_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
u32 i40e_led_get(struct i40e_hw *hw);
-void i40e_led_set(struct i40e_hw *hw, u32 mode);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
/* admin send queue commands */
@@ -71,8 +70,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading);
i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -95,9 +92,9 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -106,7 +103,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *pveb_seid,
+ bool default_port, bool enable_l2_filtering,
+ u16 *pveb_seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 veb_seid, u16 *switch_id, bool *floating,
@@ -119,12 +117,6 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@@ -164,11 +156,19 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 header_len,
+ u8 protocol_index, u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
enum i40e_aq_hmc_profile profile,
u8 pe_vf_enabled_count,
@@ -179,6 +179,15 @@ i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
@@ -207,8 +216,6 @@ bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
u8 *mac_addr);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg);
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
@@ -222,6 +229,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
/* prototype for functions used for SW locks */
@@ -236,4 +244,9 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_set_filter_control(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
new file mode 100644
index 000000000000..e33ec6c842b7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -0,0 +1,662 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+#include <linux/export.h>
+#include <linux/ptp_classify.h>
+
+/* The XL710 timesync is very much like Intel's 82599 design when it comes to
+ * the fundamental clock design. However, the clock operations are much simpler
+ * in the XL710 because the device supports a full 64 bits of nanoseconds.
+ * Because the field is so wide, we can forgo the cycle counter and just
+ * operate with the nanosecond field directly without fear of overflow.
+ *
+ * Much like the 82599, the update period is dependent upon the link speed:
+ * At 40Gb link or no link, the period is 1.6ns.
+ * At 10Gb link, the period is multiplied by 2. (3.2ns)
+ * At 1Gb link, the period is multiplied by 20. (32ns)
+ * 1588 functionality is not supported at 100Mbps.
+ */
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
+#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
+
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \
+ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
+ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PTP_TX_TIMEOUT (HZ * 15)
+
+/**
+ * i40e_ptp_read - Read the PHC time from the device
+ * @pf: Board private structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * This function reads the PRTTSYN_TIME registers and stores them in a
+ * timespec. However, since the registers are 64 bits of nanoseconds, we must
+ * convert the result to a timespec before we can return.
+ **/
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 hi, lo;
+ u64 ns;
+
+ /* The timer latches on the lowest register read. */
+ lo = rd32(hw, I40E_PRTTSYN_TIME_L);
+ hi = rd32(hw, I40E_PRTTSYN_TIME_H);
+
+ ns = (((u64)hi) << 32) | lo;
+
+ *ts = ns_to_timespec(ns);
+}
+
+/**
+ * i40e_ptp_write - Write the PHC time to the device
+ * @pf: Board private structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * This function writes the PRTTSYN_TIME registers with the user value. Since
+ * we receive a timespec from the stack, we must convert that timespec into
+ * nanoseconds before programming the registers.
+ **/
+static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u64 ns = timespec_to_ns(ts);
+
+ /* The timer will not update until the high register is written, so
+ * write the low register first.
+ */
+ wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32);
+}
+
+/**
+ * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time
+ * @hwtstamps: Timestamp structure to update
+ * @timestamp: Timestamp from the hardware
+ *
+ * We need to convert the NIC clock value into a hwtstamp which can be used by
+ * the upper level timestamping functions. Since the timestamp is simply a 64-
+ * bit nanosecond value, we can call ns_to_ktime directly to handle this.
+ **/
+static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
+ u64 timestamp)
+{
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+
+ hwtstamps->hwtstamp = ns_to_ktime(timestamp);
+}
+
+/**
+ * i40e_ptp_adjfreq - Adjust the PHC frequency
+ * @ptp: The PTP clock structure
+ * @ppb: Parts per billion adjustment from the base
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ struct i40e_hw *hw = &pf->hw;
+ u64 adj, freq, diff;
+ int neg_adj = 0;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ smp_mb(); /* Force any pending update before accessing. */
+ adj = ACCESS_ONCE(pf->ptp_base_adj);
+
+ freq = adj;
+ freq *= ppb;
+ diff = div_u64(freq, 1000000000ULL);
+
+ if (neg_adj)
+ adj -= diff;
+ else
+ adj += diff;
+
+ wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_adjtime - Adjust the PHC time
+ * @ptp: The PTP clock structure
+ * @delta: Offset in nanoseconds to adjust the PHC time by
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ struct timespec now, then = ns_to_timespec(delta);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+
+ i40e_ptp_read(pf, &now);
+ now = timespec_add(now, then);
+ i40e_ptp_write(pf, (const struct timespec *)&now);
+
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_gettime - Get the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ **/
+static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+ i40e_ptp_read(pf, ts);
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_settime - Set the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ **/
+static int i40e_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+ i40e_ptp_write(pf, ts);
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
+ * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
+ * the stack in the skb.
+ */
+static void i40e_ptp_tx_work(struct work_struct *work)
+{
+ struct i40e_pf *pf = container_of(work, struct i40e_pf,
+ ptp_tx_work);
+ struct i40e_hw *hw = &pf->hw;
+ u32 prttsyn_stat_0;
+
+ if (!pf->ptp_tx_skb)
+ return;
+
+ if (time_is_before_jiffies(pf->ptp_tx_start +
+ I40E_PTP_TX_TIMEOUT)) {
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ pf->tx_hwtstamp_timeouts++;
+ dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang");
+ return;
+ }
+
+ prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
+ if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
+ i40e_ptp_tx_hwtstamp(pf);
+ else
+ schedule_work(&pf->ptp_tx_work);
+}
+
+/**
+ * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
+ * @ptp: The PTP clock structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ *
+ * The XL710 does not support any of the ancillary features of the PHC
+ * subsystem, so this function may just return.
+ **/
+static int i40e_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
+ * @vsi: The VSI with the rings relevant to 1588
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ **/
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_ring *rx_ring;
+ unsigned long rx_event;
+ u32 prttsyn_stat;
+ int n;
+
+ if (pf->flags & I40E_FLAG_PTP)
+ return;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+ /* Unless all four receive timestamp registers are latched, we are not
+ * concerned about a possible PTP Rx hang, so just update the timeout
+ * counter and exit.
+ */
+ if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT1_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT2_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT3_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
+ pf->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* Determine the most recent watchdog or rx_timestamp event. */
+ rx_event = pf->last_rx_ptp_check;
+ for (n = 0; n < vsi->num_queue_pairs; n++) {
+ rx_ring = vsi->rx_rings[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* Only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+ pf->last_rx_ptp_check = jiffies;
+ pf->rx_hwtstamp_cleared++;
+ dev_warn(&vsi->back->pdev->dev,
+ "%s: clearing Rx timestamp hang",
+ __func__);
+ }
+}
+
+/**
+ * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
+ * @pf: Board private structure
+ *
+ * Read the value of the Tx timestamp from the registers, convert it into a
+ * value consumable by the stack, and store that result into the shhwtstamps
+ * struct before returning it up the stack.
+ **/
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct i40e_hw *hw = &pf->hw;
+ u32 hi, lo;
+ u64 ns;
+
+ lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
+ hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
+
+ ns = (((u64)hi) << 32) | lo;
+
+ i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
+ skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+}
+
+/**
+ * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp
+ * @pf: Board private structure
+ * @skb: Particular skb to send timestamp with
+ * @index: Index into the receive timestamp registers for the timestamp
+ *
+ * The XL710 receives a notification in the receive descriptor with an offset
+ * into the set of RXTIME registers where the timestamp is for that skb. This
+ * function goes and fetches the receive timestamp from that offset, if a valid
+ * one exists. The RXTIME registers are in ns, so we must convert the result
+ * first.
+ **/
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
+{
+ u32 prttsyn_stat, hi, lo;
+ struct i40e_hw *hw;
+ u64 ns;
+
+ /* Since we cannot turn off the Rx timestamp logic if the device is
+ * doing Tx timestamping, check if Rx timestamping is configured.
+ */
+ if (!pf->ptp_rx)
+ return;
+
+ hw = &pf->hw;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+ if (!(prttsyn_stat & (1 << index)))
+ return;
+
+ lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
+ hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
+
+ ns = (((u64)hi) << 32) | lo;
+
+ i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
+}
+
+/**
+ * i40e_ptp_set_increment - Utility function to update clock increment rate
+ * @pf: Board private structure
+ *
+ * During a link change, the DMA frequency that drives the 1588 logic will
+ * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds,
+ * we must update the increment value per clock tick.
+ **/
+void i40e_ptp_set_increment(struct i40e_pf *pf)
+{
+ struct i40e_link_status *hw_link_info;
+ struct i40e_hw *hw = &pf->hw;
+ u64 incval;
+
+ hw_link_info = &hw->phy.link_info;
+
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+
+ switch (hw_link_info->link_speed) {
+ case I40E_LINK_SPEED_10GB:
+ incval = I40E_PTP_10GB_INCVAL;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ incval = I40E_PTP_1GB_INCVAL;
+ break;
+ case I40E_LINK_SPEED_100MB:
+ dev_warn(&pf->pdev->dev,
+ "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n",
+ __func__);
+ incval = 0;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ default:
+ incval = I40E_PTP_40GB_INCVAL;
+ break;
+ }
+
+ /* Write the new increment value into the increment register. The
+ * hardware will not update the clock until both registers have been
+ * written.
+ */
+ wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
+
+ /* Update the base adjustement value. */
+ ACCESS_ONCE(pf->ptp_base_adj) = incval;
+ smp_mb(); /* Force the above update. */
+}
+
+/**
+ * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Obtain the current hardware timestamping settigs as requested. To do this,
+ * keep a shadow copy of the timestamp settings rather than attempting to
+ * deconstruct it from the registers.
+ **/
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &pf->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Respond to the user filter requests and make the appropriate hardware
+ * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
+ * logic, so keep track in software of whether to indicate these timestamps
+ * or not.
+ *
+ * It is permissible to "upgrade" the user request to a broader filter, as long
+ * as the user receives the timestamps they care about and the user is notified
+ * the filter has been broadened.
+ **/
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct hwtstamp_config *config = &pf->tstamp_config;
+ u32 pf_id, tsyntype, regval;
+
+ if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
+ return -EFAULT;
+
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ /* Confirm that 1588 is supported on this PF. */
+ pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
+ I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+ if (hw->pf_id != pf_id)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ pf->ptp_tx = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ pf->ptp_tx = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pf->ptp_rx = false;
+ tsyntype = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ pf->ptp_rx = true;
+ tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
+ I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ pf->ptp_rx = true;
+ tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
+ I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ default:
+ return -ERANGE;
+ }
+
+ /* Clear out all 1588-related registers to clear and unlatch them. */
+ rd32(hw, I40E_PRTTSYN_STAT_0);
+ rd32(hw, I40E_PRTTSYN_TXTIME_H);
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+
+ /* Enable/disable the Tx timestamp interrupt based on user input. */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ if (pf->ptp_tx)
+ regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+ else
+ regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+ regval = rd32(hw, I40E_PFINT_ICR0_ENA);
+ if (pf->ptp_tx)
+ regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ else
+ regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, regval);
+
+ /* There is no simple on/off switch for Rx. To "disable" Rx support,
+ * ignore any received timestamps, rather than turn off the clock.
+ */
+ if (pf->ptp_rx) {
+ regval = rd32(hw, I40E_PRTTSYN_CTL1);
+ /* clear everything but the enable bit */
+ regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+ /* now enable bits for desired Rx timestamps */
+ regval |= tsyntype;
+ wr32(hw, I40E_PRTTSYN_CTL1, regval);
+ }
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_init - Initialize the 1588 support and register the PHC
+ * @pf: Board private structure
+ *
+ * This function registers the device clock as a PHC. If it is successful, it
+ * starts the clock in the hardware.
+ **/
+void i40e_ptp_init(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+
+ strncpy(pf->ptp_caps.name, "i40e", sizeof(pf->ptp_caps.name));
+ pf->ptp_caps.owner = THIS_MODULE;
+ pf->ptp_caps.max_adj = 999999999;
+ pf->ptp_caps.n_ext_ts = 0;
+ pf->ptp_caps.pps = 0;
+ pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
+ pf->ptp_caps.adjtime = i40e_ptp_adjtime;
+ pf->ptp_caps.gettime = i40e_ptp_gettime;
+ pf->ptp_caps.settime = i40e_ptp_settime;
+ pf->ptp_caps.enable = i40e_ptp_enable;
+
+ /* Attempt to register the clock before enabling the hardware. */
+ pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
+ if (IS_ERR(pf->ptp_clock)) {
+ pf->ptp_clock = NULL;
+ dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
+ __func__);
+ } else {
+ struct timespec ts;
+ u32 regval;
+
+ spin_lock_init(&pf->tmreg_lock);
+ INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
+
+ dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
+ netdev->name);
+ pf->flags |= I40E_FLAG_PTP;
+
+ /* Ensure the clocks are running. */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+ regval = rd32(hw, I40E_PRTTSYN_CTL1);
+ regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL1, regval);
+
+ /* Set the increment value per clock tick. */
+ i40e_ptp_set_increment(pf);
+
+ /* reset the tstamp_config */
+ memset(&pf->tstamp_config, 0, sizeof(pf->tstamp_config));
+
+ /* Set the clock value. */
+ ts = ktime_to_timespec(ktime_get_real());
+ i40e_ptp_settime(&pf->ptp_caps, &ts);
+ }
+}
+
+/**
+ * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC
+ * @pf: Board private structure
+ *
+ * This function handles the cleanup work required from the initialization by
+ * clearing out the important information and unregistering the PHC.
+ **/
+void i40e_ptp_stop(struct i40e_pf *pf)
+{
+ pf->flags &= ~I40E_FLAG_PTP;
+ pf->ptp_tx = false;
+ pf->ptp_rx = false;
+
+ cancel_work_sync(&pf->ptp_tx_work);
+ if (pf->ptp_tx_skb) {
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ }
+
+ if (pf->ptp_clock) {
+ ptp_clock_unregister(pf->ptp_clock);
+ pf->ptp_clock = NULL;
+ dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
+ pf->vsi[pf->lan_vsi]->netdev->name);
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 6bd333cde28b..1d40f425acf1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -28,6 +27,10 @@
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
@@ -38,6 +41,11 @@
#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
@@ -50,9 +58,14 @@
#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
#define I40E_PF_ARQBAH 0x00080180
#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
@@ -837,7 +850,7 @@
#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
#define I40E_GLHMC_PEQ1MAX 0x000C2054
#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
@@ -903,7 +916,7 @@
#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
#define I40E_GLHMC_PEXFFLMAX 0x000C204c
#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
#define I40E_GLHMC_PEXFMAX 0x000C2048
#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
@@ -1636,7 +1649,7 @@
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSILAN_QTABLE_MAX_INDEX 15
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
@@ -1773,16 +1786,20 @@
#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
#define I40E_GL_MNG_FWSM 0x000B6134
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
@@ -2035,6 +2052,28 @@
#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
#define I40E_GLPCI_BYTCTH 0x0009C484
#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
@@ -2170,6 +2209,12 @@
#define I40E_GLPCI_PCIERR 0x000BE4FC
#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
#define I40E_GLPCI_PKTCT 0x0009C4BC
#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
@@ -2380,8 +2425,7 @@
#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+
#define I40E_PFPE_MRTEIDXMASK 0x00008600
#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
@@ -2460,8 +2504,6 @@
#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
@@ -3141,30 +3183,6 @@
#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPM_DMACR 0x000881F4
-#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
-#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT)
-#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
-#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT)
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
-#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
-#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT)
-#define I40E_GLPM_LTRC 0x000BE500
-#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
-#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT)
-#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
-#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
-#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
-#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT)
-#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
-#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
-#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT)
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
#define I40E_PRTPM_EEE_STAT 0x001E4320
#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
@@ -3201,9 +3219,6 @@
#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_HPTC 0x000AC800
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
#define I40E_PRTPM_RLPIC 0x001E43A0
#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
@@ -3265,8 +3280,8 @@
#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7
-#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
@@ -3416,9 +3431,9 @@
#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
@@ -3504,7 +3519,7 @@
#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSIQF_TCREGION_MAX_INDEX 7
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
@@ -3521,10 +3536,7 @@
#define I40E_GL_FCOEDDPC_MAX_INDEX 143
#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDDPEC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT)
+/* _i=0...143 */
#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
@@ -4276,46 +4288,10 @@
#define I40E_PFPM_APM 0x000B8080
#define I40E_PFPM_APM_APME_SHIFT 0
#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128))
-#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
-#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
-#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128))
-#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
-#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
-#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT)
-#define I40E_PFPM_PROXYFC 0x00245A80
-#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
-#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
-#define I40E_PFPM_PROXYFC_EX_SHIFT 1
-#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
-#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_SHIFT 9
-#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
-#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT)
-#define I40E_PFPM_PROXYS 0x00245B80
-#define I40E_PFPM_PROXYS_EX_SHIFT 1
-#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_SHIFT 4
-#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_NS_SHIFT 9
-#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT)
-#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_MLD_SHIFT 12
-#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)
#define I40E_PFPM_WUC 0x0006B200
#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
@@ -4536,21 +4512,21 @@
#define I40E_VFMSIX_PBA 0x00002000
#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TADD_MAX_INDEX 16
#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TMSG_MAX_INDEX 16
#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TUADD_MAX_INDEX 16
#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
@@ -4610,8 +4586,6 @@
#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)
#define I40E_VFPE_MRTEIDXMASK1 0x00009000
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
@@ -4684,5 +4658,13 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
-
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 5e5bcddac573..5f9cac55aa55 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f1f03bc5c729..d4bb482b1a7f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -77,7 +76,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
/* grab the next descriptor */
i = tx_ring->next_to_use;
fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
- tx_buf = &tx_ring->tx_bi[i];
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
@@ -129,15 +127,23 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
/* Now program a dummy descriptor */
i = tx_ring->next_to_use;
tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_bi[i];
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
tx_desc->buffer_addr = cpu_to_le64(dma);
td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+ /* set the timestamp */
+ tx_buf->time_stamp = jiffies;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -768,7 +774,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
if (!skb) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
@@ -782,7 +788,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_buff_failed++;
bi->dma = 0;
goto no_buffers;
}
@@ -792,7 +798,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_page_failed++;
goto no_buffers;
}
}
@@ -807,7 +813,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
bi->page_dma)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_page_failed++;
bi->page_dma = 0;
goto no_buffers;
}
@@ -860,12 +866,25 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
* @skb: skb currently being received and modified
* @rx_status: status value of last descriptor in packet
* @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
u32 rx_status,
- u32 rx_error)
+ u32 rx_error,
+ u16 rx_ptype)
{
+ bool ipv4_tunnel, ipv6_tunnel;
+ __wsum rx_udp_csum;
+ __sum16 csum;
+ struct iphdr *iph;
+
+ ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+ skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
@@ -873,13 +892,47 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* IP or L4 checksum error */
+ /* likely incorrect csum if alternate IP extention headers found */
+ if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ return;
+
+ /* IP or L4 or outmost IP checksum error */
if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
vsi->back->hw_csum_rx_error++;
return;
}
+ if (ipv4_tunnel &&
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ */
+ skb->transport_header = skb->mac_header +
+ sizeof(struct ethhdr) +
+ (ip_hdr(skb)->ihl * 4);
+
+ /* Add 4 bytes for VLAN tagged packets */
+ skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ ? VLAN_HLEN : 0;
+
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+ }
+
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -891,13 +944,15 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
static inline u32 i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc)
{
- if (ring->netdev->features & NETIF_F_RXHASH) {
- if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >>
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
- I40E_RX_DESC_FLTSTAT_RSS_HASH)
- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- }
- return 0;
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if ((ring->netdev->features & NETIF_F_RXHASH) &&
+ (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ else
+ return 0;
}
/**
@@ -918,11 +973,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
u64 qword;
+ u16 rx_ptype;
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
union i40e_rx_desc *next_rxd;
@@ -938,18 +994,20 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
skb = rx_bi->skb;
prefetch(skb->data);
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
- >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT;
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
- >> I40E_RXD_QW1_ERROR_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_bi->skb = NULL;
/* This memory barrier is needed to keep us from reading
@@ -1030,13 +1088,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
}
skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
- i40e_rx_checksum(vsi, skb, rx_status, rx_error);
+ if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+ i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+ I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
@@ -1059,8 +1125,8 @@ next_desc:
/* use prefetched values */
rx_desc = next_rxd;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
}
rx_ring->next_to_clean = i;
@@ -1173,7 +1239,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i;
/* make sure ATR is enabled */
- if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
+ if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
/* if sampling is disabled do nothing */
@@ -1268,7 +1334,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */
- } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
@@ -1333,7 +1399,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
return err;
}
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
@@ -1359,10 +1425,50 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
cd_mss = skb_shinfo(skb)->gso_size;
- *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
- | ((u64)cd_tso_len
- << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
- | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tsyn - set up the tsyn context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ *
+ * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
+ **/
+static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, u64 *cd_type_cmd_tso_mss)
+{
+ struct i40e_pf *pf;
+
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return 0;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (tx_flags & I40E_TX_FLAGS_TSO)
+ return 0;
+
+ /* only timestamp the outbound packet if the user has requested it and
+ * we are not already transmitting a packet to be timestamped
+ */
+ pf = i40e_netdev_to_pf(tx_ring->netdev);
+ if (pf->ptp_tx && !pf->ptp_tx_skb) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ pf->ptp_tx_skb = skb_get(skb);
+ } else {
+ return 0;
+ }
+
+ *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT;
+
+ pf->ptp_tx_start = jiffies;
+ schedule_work(&pf->ptp_tx_work);
+
return 1;
}
@@ -1660,6 +1766,7 @@ dma_error:
static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
smp_mb();
/* Check again in a case another CPU has just made room available. */
@@ -1741,6 +1848,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
__be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
+ int tsyn;
int tso;
if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
@@ -1756,9 +1864,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
first = &tx_ring->tx_bi[tx_ring->next_to_use];
/* setup IPv4/IPv6 offloads */
- if (protocol == __constant_htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4;
- else if (protocol == __constant_htons(ETH_P_IPV6))
+ else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
@@ -1771,6 +1879,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
+ tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
+
+ if (tsyn)
+ tx_flags |= I40E_TX_FLAGS_TSYN;
+
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index db55d9947f15..d5349698e513 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -25,11 +24,13 @@
*
******************************************************************************/
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
-#define I40E_MAX_ITR 0x07FF
-#define I40E_MIN_ITR 0x0001
-#define I40E_ITR_USEC_RESOLUTION 2
+#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
#define I40E_MAX_IRATE 0x03F
#define I40E_MIN_IRATE 0x001
#define I40E_IRATE_USEC_RESOLUTION 4
@@ -49,10 +50,43 @@
#define I40E_QUEUE_END_OF_LIST 0x7FF
-#define I40E_ITR_NONE 3
-#define I40E_RX_ITR 0
-#define I40E_TX_ITR 1
-#define I40E_PE_ITR 2
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+ I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR I40E_IDX_ITR0
+#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_PE_ITR I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
#define I40E_RXBUFFER_2048 2048
@@ -102,6 +136,7 @@
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
+#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -139,8 +174,8 @@ struct i40e_tx_queue_stats {
struct i40e_rx_queue_stats {
u64 non_eop_descs;
- u64 alloc_rx_page_failed;
- u64 alloc_rx_buff_failed;
+ u64 alloc_page_failed;
+ u64 alloc_buff_failed;
};
enum i40e_ring_state_t {
@@ -214,6 +249,8 @@ struct i40e_ring {
u8 atr_sample_rate;
u8 atr_count;
+ unsigned long last_rx_timestamp;
+
bool ring_active; /* is ring online or not */
/* stats structs */
@@ -262,3 +299,4 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index f3f22b20f02f..181a825d3160 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -36,38 +35,31 @@
#include "i40e_lan_hmc.h"
/* Device IDs */
-#define I40E_SFP_XL710_DEVICE_ID 0x1572
-#define I40E_SFP_X710_DEVICE_ID 0x1573
-#define I40E_QEMU_DEVICE_ID 0x1574
-#define I40E_KX_A_DEVICE_ID 0x157F
-#define I40E_KX_B_DEVICE_ID 0x1580
-#define I40E_KX_C_DEVICE_ID 0x1581
-#define I40E_KX_D_DEVICE_ID 0x1582
-#define I40E_QSFP_A_DEVICE_ID 0x1583
-#define I40E_QSFP_B_DEVICE_ID 0x1584
-#define I40E_QSFP_C_DEVICE_ID 0x1585
-#define I40E_VF_DEVICE_ID 0x154C
-#define I40E_VF_HV_DEVICE_ID 0x1571
-
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0000
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_SFP_X710 0x1573
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_KX_D 0x1582
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
-/* Check whether address is multicast. This is little-endian specific check.*/
-#define I40E_IS_MULTICAST(address) \
- (bool)(((u8 *)(address))[0] & ((u8)0x01))
-
-/* Check whether an address is broadcast. */
-#define I40E_IS_BROADCAST(address) \
- ((((u8 *)(address))[0] == ((u8)0xff)) && \
- (((u8 *)(address))[1] == ((u8)0xff)))
-
/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
@@ -75,8 +67,6 @@
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define I40E_ETH_LENGTH_OF_ADDRESS 6
-
/* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \
@@ -85,9 +75,10 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
/* bitfields for Tx queue mapping in QTX_CTL */
#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
#define I40E_QTX_CTL_PF_QUEUE 0x2
-/* debug masks */
+/* debug masks - set these bits in hw->debug_mask to control output */
enum i40e_debug_mask {
I40E_DEBUG_INIT = 0x00000001,
I40E_DEBUG_RELEASE = 0x00000002,
@@ -101,10 +92,10 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
- I40E_DEBUG_AQ_MESSAGE = 0x01000000, /* for i40e_debug() */
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
- I40E_DEBUG_AQ_COMMAND = 0x06000000, /* for i40e_debug_aq() */
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
I40E_DEBUG_AQ = 0x0F000000,
I40E_DEBUG_USER = 0xF0000000,
@@ -134,6 +125,7 @@ enum i40e_media_type {
I40E_MEDIA_TYPE_BASET,
I40E_MEDIA_TYPE_BACKPLANE,
I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
I40E_MEDIA_TYPE_VIRTUAL
};
@@ -171,6 +163,7 @@ struct i40e_link_status {
u8 link_info;
u8 an_info;
u8 ext_info;
+ u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
};
@@ -236,9 +229,9 @@ struct i40e_hw_capabilities {
struct i40e_mac_info {
enum i40e_mac_type type;
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
u16 max_fcoeq;
};
@@ -500,18 +493,26 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
- I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 3 BITS */
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- I40E_RX_DESC_STATUS_LPBK_SHIFT = 14
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
};
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x7UL << \
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
@@ -547,28 +548,32 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
/* Packet type non-ip values */
enum i40e_rx_l2_ptype {
- I40E_RX_PTYPE_L2_RESERVED = 0,
- I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
- I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
- I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
- I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
- I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
- I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
- I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
- I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- I40E_RX_PTYPE_L2_ARP = 11,
- I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
- I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
};
struct i40e_rx_ptype_decoded {
@@ -852,10 +857,7 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Value 0-25 are reserved for future use */
- I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP = 26,
- I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP = 27,
- I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP = 28,
+ /* Note: Values 0-28 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
@@ -864,8 +866,7 @@ enum i40e_filter_pctype {
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Value 37 is reserved for future use */
- I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP = 38,
+ /* Note: Values 37-38 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
@@ -877,7 +878,8 @@ enum i40e_filter_pctype {
/* Note: Value 47 is reserved for future use */
I40E_FILTER_PCTYPE_FCOE_OX = 48,
I40E_FILTER_PCTYPE_FCOE_RX = 49,
- /* Note: Value 50-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
};
@@ -1014,6 +1016,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
@@ -1138,17 +1141,4 @@ enum i40e_reset_type {
I40E_RESET_GLOBR = 2,
I40E_RESET_EMPR = 3,
};
-
-/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR 0xF
-struct i40e_lldp_variables {
- u16 length;
- u16 adminstatus;
- u16 msgfasttx;
- u16 msgtxinterval;
- u16 txparams;
- u16 timers;
- u16 crc8;
-};
-
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index cc6654f1dac7..22a1b69cd646 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -142,7 +141,7 @@ struct i40e_virtchnl_vsi_resource {
u16 num_queue_pairs;
enum i40e_vsi_type vsi_type;
u16 qset_handle;
- u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
@@ -265,7 +264,7 @@ struct i40e_virtchnl_queue_select {
*/
struct i40e_virtchnl_ether_addr {
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
u8 pad[2];
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 07596982a477..b9d1c1c8ca5a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -70,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
{
struct i40e_pf *pf = vf->pf;
- return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
+ return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
}
/***********************vf resource mgmt routines*****************/
@@ -102,130 +101,6 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
}
/**
- * i40e_ctrl_vsi_tx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
- u16 vsi_queue_id,
- enum i40e_queue_ctrl ctrl)
-{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- bool writeback = false;
- u16 pf_queue_id;
- int ret = 0;
- u32 reg;
-
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
- reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
-
- switch (ctrl) {
- case I40E_QUEUE_CTRL_ENABLE:
- reg |= I40E_QTX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_ENABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
- break;
- case I40E_QUEUE_CTRL_DISABLE:
- reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_DISABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLE:
- reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- if (!ret) {
- reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
- writeback = true;
- }
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (writeback) {
- wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
- i40e_flush(hw);
- }
-
- return ret;
-}
-
-/**
- * i40e_ctrl_vsi_rx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
- u16 vsi_queue_id,
- enum i40e_queue_ctrl ctrl)
-{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- bool writeback = false;
- u16 pf_queue_id;
- int ret = 0;
- u32 reg;
-
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
- reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
-
- switch (ctrl) {
- case I40E_QUEUE_CTRL_ENABLE:
- reg |= I40E_QRX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_ENABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
- break;
- case I40E_QUEUE_CTRL_DISABLE:
- reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_DISABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLE:
- reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- if (!ret) {
- reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
- writeback = true;
- }
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (writeback) {
- wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
- i40e_flush(hw);
- }
-
- return ret;
-}
-
-/**
* i40e_config_irq_link_list
* @vf: pointer to the vf info
* @vsi_idx: index of VSI in PF struct
@@ -260,23 +135,17 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
goto irq_list_done;
}
tempmap = vecmap->rxq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 <<
(I40E_VIRTCHNL_SUPPORTED_QTYPES *
vsi_queue_id));
- vsi_queue_id =
- find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
}
tempmap = vecmap->txq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 <<
(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
+ 1));
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
next_q = find_first_bit(&linklistmap,
@@ -307,7 +176,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
(I40E_MAX_VSI_QP *
I40E_VIRTCHNL_SUPPORTED_QTYPES),
next_q + 1);
- if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ if (next_q <
+ (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
@@ -499,7 +369,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
{
struct i40e_mac_filter *f = NULL;
struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
struct i40e_vsi *vsi;
int ret = 0;
@@ -513,14 +382,32 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
+ u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
vf->lan_vsi_index = vsi->idx;
vf->lan_vsi_id = vsi->id;
dev_info(&pf->pdev->dev,
- "LAN VSI index %d, VSI id %d\n",
- vsi->idx, vsi->id);
+ "VF %d assigned LAN VSI index %d, VSI id %d\n",
+ vf->vf_id, vsi->idx, vsi->id);
+ /* If the port VLAN has been configured and then the
+ * VF driver was removed then the VSI port VLAN
+ * configuration was destroyed. Check if there is
+ * a port VLAN and restore the VSI configuration if
+ * needed.
+ */
+ if (vf->port_vlan_id)
+ i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- 0, true, false);
+ vf->port_vlan_id, true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF MAC addr\n");
+ f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+ true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF broadcast filter\n");
}
+
if (!f) {
dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
ret = -ENOMEM;
@@ -534,150 +421,11 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
- /* accept bcast pkts. by default */
- ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
- vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
- ret = -EINVAL;
- }
-
error_alloc_vsi_res:
return ret;
}
/**
- * i40e_reset_vf
- * @vf: pointer to the vf structure
- * @flr: VFLR was issued or not
- *
- * reset the vf
- **/
-int i40e_reset_vf(struct i40e_vf *vf, bool flr)
-{
- int ret = -ENOENT;
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- u32 reg, reg_idx, msix_vf;
- bool rsd = false;
- u16 pf_queue_id;
- int i, j;
-
- /* warn the VF */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
-
- clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
-
- /* PF triggers VFR only when VF requests, in case of
- * VFLR, HW triggers VFR
- */
- if (!flr) {
- /* reset vf using VPGEN_VFRTRIG reg */
- reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- i40e_flush(hw);
- }
-
- /* poll VPGEN_VFRSTAT reg to make sure
- * that reset is complete
- */
- for (i = 0; i < 4; i++) {
- /* vf reset requires driver to first reset the
- * vf & than poll the status register to make sure
- * that the requested op was completed
- * successfully
- */
- udelay(10);
- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
- if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
- rsd = true;
- break;
- }
- }
-
- if (!rsd)
- dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
- vf->vf_id);
-
- /* fast disable qps */
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLE);
- ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLE);
- }
-
- /* Queue enable/disable requires driver to
- * first reset the vf & than poll the status register
- * to make sure that the requested op was completed
- * successfully
- */
- udelay(10);
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLECHECK);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
- vf->lan_vsi_index, j, vf->vf_id);
- ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLECHECK);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
- vf->lan_vsi_index, j, vf->vf_id);
- }
-
- /* clear the irq settings */
- msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
- for (i = 0; i < msix_vf; i++) {
- /* format is same for both registers */
- if (0 == i)
- reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
- else
- reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
- (vf->vf_id))
- + (i - 1));
- reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
- wr32(hw, reg_idx, reg);
- i40e_flush(hw);
- }
- /* disable interrupts so the VF starts in a known state */
- for (i = 0; i < msix_vf; i++) {
- /* format is same for both registers */
- if (0 == i)
- reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
- else
- reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
- (vf->vf_id))
- + (i - 1));
- wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
- i40e_flush(hw);
- }
-
- /* set the defaults for the rqctl & tqctl registers */
- reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
- I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
- wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
- wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
- }
-
- /* clear the reset bit in the VPGEN_VFRTRIG reg */
- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
- reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- /* tell the VF the reset is done */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
- i40e_flush(hw);
-
- return ret;
-}
-
-/**
* i40e_enable_vf_mappings
* @vf: pointer to the vf info
*
@@ -756,6 +504,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf)
static void i40e_free_vf_res(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_idx, reg;
+ int i, msix_vf;
/* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_index) {
@@ -763,6 +514,34 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
vf->lan_vsi_index = 0;
vf->lan_vsi_id = 0;
}
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
+ /* disable interrupts so the VF starts in a known state */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+ else
+ reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ i40e_flush(hw);
+ }
+
+ /* clear the irq settings */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ wr32(hw, reg_idx, reg);
+ i40e_flush(hw);
+ }
/* reset some of the state varibles keeping
* track of the resources
*/
@@ -804,6 +583,111 @@ error_alloc:
return ret;
}
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_MASK 0x20
+/**
+ * i40e_quiesce_vf_pci
+ * @vf: pointer to the vf structure
+ *
+ * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
+ * if the transactions never clear.
+ **/
+static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int vf_abs_id, i;
+ u32 reg;
+
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ wr32(hw, I40E_PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < 100; i++) {
+ reg = rd32(hw, I40E_PF_PCI_CIAD);
+ if ((reg & VF_TRANS_PENDING_MASK) == 0)
+ return 0;
+ udelay(1);
+ }
+ return -EIO;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the vf structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the vf
+ **/
+void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool rsd = false;
+ int i;
+ u32 reg;
+
+ /* warn the VF */
+ clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+ /* In the case of a VFLR, the HW has already reset the VF and we
+ * just need to clean up, so don't hit the VFRTRIG register.
+ */
+ if (!flr) {
+ /* reset vf using VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+ }
+
+ if (i40e_quiesce_vf_pci(vf))
+ dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
+ vf->vf_id);
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 100; i++) {
+ /* vf reset requires driver to first reset the
+ * vf & than poll the status register to make sure
+ * that the requested op was completed
+ * successfully
+ */
+ udelay(10);
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+ rsd = true;
+ break;
+ }
+ }
+
+ if (!rsd)
+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ vf->vf_id);
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+ /* clear the reset bit in the VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* On initial reset, we won't have any queues */
+ if (vf->lan_vsi_index == 0)
+ goto complete_reset;
+
+ i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
+complete_reset:
+ /* reallocate vf resources to reset the VSI state */
+ i40e_free_vf_res(vf);
+ mdelay(10);
+ i40e_alloc_vf_res(vf);
+ i40e_enable_vf_mappings(vf);
+
+ /* tell the VF the reset is done */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+ i40e_flush(hw);
+}
+
/**
* i40e_vfs_are_assigned
* @pf: pointer to the pf structure
@@ -816,7 +700,7 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
struct pci_dev *vfdev;
/* loop through all the VFs to see if we own any that are assigned */
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
while (vfdev) {
/* if we don't own it we don't care */
if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
@@ -826,12 +710,82 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
}
vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- I40E_VF_DEVICE_ID,
+ I40E_DEV_ID_VF,
vfdev);
}
return false;
}
+#ifdef CONFIG_PCI_IOV
+
+/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+#endif
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
/**
* i40e_free_vfs
@@ -842,17 +796,20 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
void i40e_free_vfs(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- int i;
+ u32 reg_idx, bit_idx;
+ int i, tmp, vf_id;
if (!pf->vf)
return;
/* Disable interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, I40E_PFINT_DYN_CTL0, 0);
- i40e_flush(hw);
+ i40e_irq_dynamic_disable_icr0(pf);
+ mdelay(10); /* let any messages in transit get finished up */
/* free up vf resources */
- for (i = 0; i < pf->num_alloc_vfs; i++) {
+ tmp = pf->num_alloc_vfs;
+ pf->num_alloc_vfs = 0;
+ for (i = 0; i < tmp; i++) {
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_free_vf_res(&pf->vf[i]);
/* disable qp mappings */
@@ -861,20 +818,25 @@ void i40e_free_vfs(struct i40e_pf *pf)
kfree(pf->vf);
pf->vf = NULL;
- pf->num_alloc_vfs = 0;
- if (!i40e_vfs_are_assigned(pf))
+ if (!i40e_vfs_are_assigned(pf)) {
pci_disable_sriov(pf->pdev);
- else
+ /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
+ * work correctly when SR-IOV gets re-enabled.
+ */
+ for (vf_id = 0; vf_id < tmp; vf_id++) {
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+ }
+ i40e_disable_pf_switch_lb(pf);
+ } else {
dev_warn(&pf->pdev->dev,
"unable to disable SR-IOV because VFs are assigned.\n");
+ }
/* Re-enable interrupt 0. */
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
- i40e_flush(hw);
+ i40e_irq_dynamic_enable_icr0(pf);
}
#ifdef CONFIG_PCI_IOV
@@ -890,6 +852,9 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
struct i40e_vf *vfs;
int i, ret = 0;
+ /* Disable interrupt 0 so we don't try to handle the VFLR. */
+ i40e_irq_dynamic_disable_icr0(pf);
+
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) {
dev_err(&pf->pdev->dev,
@@ -913,11 +878,8 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
-
- ret = i40e_alloc_vf_res(&vfs[i]);
- i40e_reset_vf(&vfs[i], true);
- if (ret)
- break;
+ /* vf resources get allocated during reset */
+ i40e_reset_vf(&vfs[i], false);
/* enable vf vplan_qtable mappings */
i40e_enable_vf_mappings(&vfs[i]);
@@ -925,10 +887,13 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
pf->vf = vfs;
pf->num_alloc_vfs = num_alloc_vfs;
+ i40e_enable_pf_switch_lb(pf);
err_alloc:
if (ret)
i40e_free_vfs(pf);
err_iov:
+ /* Re-enable interrupt 0. */
+ i40e_irq_dynamic_enable_icr0(pf);
return ret;
}
@@ -1009,6 +974,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_status aq_ret;
/* single place to detect unsuccessful return values */
@@ -1028,8 +994,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
vf->num_valid_msgs++;
}
- aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
- msg, msglen, NULL);
+ aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
if (aq_ret) {
dev_err(&pf->pdev->dev,
"Unable to send the message to VF %d aq_err %d\n",
@@ -1144,12 +1110,10 @@ err:
* unlike other virtchnl messages, pf driver
* doesn't send the response back to the vf
**/
-static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
{
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
- return -ENOENT;
-
- return i40e_reset_vf(vf, false);
+ if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ i40e_reset_vf(vf, false);
}
/**
@@ -1291,27 +1255,21 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
/* lookout for the invalid queue index */
tempmap = map->rxq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
tempmap = map->txq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
i40e_config_irq_link_list(vf, vsi_id, map);
@@ -1337,8 +1295,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
- u16 queue_id;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1354,66 +1310,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- /* Poll the status register to make sure that the
- * requested op was completed successfully
- */
- udelay(10);
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on RX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on TX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
+ if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
+ aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -1436,8 +1334,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
- u16 queue_id;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1453,65 +1349,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- /* Poll the status register to make sure that the
- * requested op was completed successfully
- */
- udelay(10);
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on RX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on TX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
+ if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
+ aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the vf */
@@ -1554,7 +1393,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
i40e_update_eth_stats(vsi);
- memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
+ stats = vsi->eth_stats;
error_param:
/* send the response back to the vf */
@@ -1563,6 +1402,40 @@ error_param:
}
/**
+ * i40e_check_vf_permission
+ * @vf: pointer to the vf info
+ * @macaddr: pointer to the MAC Address being checked
+ *
+ * Check if the VF has permission to add or delete unicast MAC address
+ * filters and return error code -EPERM if not. Then check if the
+ * address filter requested is broadcast or zero and if so return
+ * an invalid MAC address error code.
+ **/
+static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
+{
+ struct i40e_pf *pf = vf->pf;
+ int ret = 0;
+
+ if (is_broadcast_ether_addr(macaddr) ||
+ is_zero_ether_addr(macaddr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+ !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
+ /* If the host VMM administrator has set the VF MAC address
+ * administratively via the ndo_set_vf_mac command then deny
+ * permission to the VF to add or delete unicast MAC addresses.
+ * The VF may request to set the MAC address filter already
+ * assigned to it so do not return an error in that case.
+ */
+ dev_err(&pf->pdev->dev,
+ "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+ ret = -EPERM;
+ }
+ return ret;
+}
+
+/**
* i40e_vc_add_mac_addr_msg
* @vf: pointer to the vf info
* @msg: pointer to the msg buffer
@@ -1577,24 +1450,20 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
for (i = 0; i < al->num_elements; i++) {
- if (is_broadcast_ether_addr(al->list[i].addr) ||
- is_zero_ether_addr(al->list[i].addr)) {
- dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
- al->list[i].addr);
- aq_ret = I40E_ERR_PARAM;
+ ret = i40e_check_vf_permission(vf, al->list[i].addr);
+ if (ret)
goto error_param;
- }
}
vsi = pf->vsi[vsi_id];
@@ -1603,7 +1472,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr, true, false);
- if (f) {
+ if (!f) {
if (i40e_is_vsi_in_vlan(vsi))
f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
true, false);
@@ -1615,7 +1484,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add VF MAC filter\n");
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
}
@@ -1627,7 +1496,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- aq_ret);
+ ret);
}
/**
@@ -1645,15 +1514,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
+
+ for (i = 0; i < al->num_elements; i++) {
+ if (is_broadcast_ether_addr(al->list[i].addr) ||
+ is_zero_ether_addr(al->list[i].addr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
+ al->list[i].addr);
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto error_param;
+ }
+ }
vsi = pf->vsi[vsi_id];
/* delete addresses from the list */
@@ -1668,7 +1547,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
- aq_ret);
+ ret);
}
/**
@@ -1777,30 +1656,6 @@ error_param:
}
/**
- * i40e_vc_fcoe_msg
- * @vf: pointer to the vf info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * called from the vf for the fcoe msgs
- **/
-static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
-{
- i40e_status aq_ret = 0;
-
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- aq_ret = I40E_ERR_NOT_IMPLEMENTED;
-
-error_param:
- /* send the response to the vf */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
-}
-
-/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the vf info
* @msg: pointer to the msg buffer
@@ -1920,19 +1775,24 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
- struct i40e_vf *vf = &(pf->vf[vf_id]);
struct i40e_hw *hw = &pf->hw;
+ int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+ struct i40e_vf *vf;
int ret;
pf->vf_aq_requests++;
+ if (local_vf_id >= pf->num_alloc_vfs)
+ return -EINVAL;
+ vf = &(pf->vf[local_vf_id]);
/* perform basic checks on the msg */
ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
if (ret) {
- dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
+ dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
+ local_vf_id, v_opcode, msglen);
return ret;
}
- wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
ret = i40e_vc_get_version_msg(vf);
@@ -1941,7 +1801,8 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
ret = i40e_vc_get_vf_resources_msg(vf);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- ret = i40e_vc_reset_vf_msg(vf);
+ i40e_vc_reset_vf_msg(vf);
+ ret = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
@@ -1973,13 +1834,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_FCOE:
- ret = i40e_vc_fcoe_msg(vf, msg, msglen);
- break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
- dev_err(&pf->pdev->dev,
- "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
+ v_opcode, local_vf_id);
ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
I40E_ERR_NOT_IMPLEMENTED);
break;
@@ -2015,19 +1873,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* clear the bit in GLGEN_VFLRSTAT */
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
- if (i40e_reset_vf(vf, true))
- dev_err(&pf->pdev->dev,
- "Unable to reset the VF %d\n", vf_id);
- /* free up vf resources to destroy vsi state */
- i40e_free_vf_res(vf);
-
- /* allocate new vf resources with the default state */
- if (i40e_alloc_vf_res(vf))
- dev_err(&pf->pdev->dev,
- "Unable to allocate VF resources %d\n",
- vf_id);
-
- i40e_enable_vf_mappings(vf);
+ i40e_reset_vf(vf, true);
}
}
@@ -2183,6 +2029,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+ vf->pf_set_mac = true;
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
ret = 0;
@@ -2229,6 +2076,20 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
+ if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi))
+ dev_err(&pf->pdev->dev,
+ "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
+ vf_id);
+
+ /* Check for condition where there was already a port VLAN ID
+ * filter set and now it is being deleted by setting it to zero.
+ * Before deleting all the old VLAN filters we must add new ones
+ * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
+ * MAC addresses deleted.
+ */
+ if (!(vlan_id || qos) && vsi->info.pvid)
+ ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
+
if (vsi->info.pvid) {
/* kill old VLAN */
ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
@@ -2243,7 +2104,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
ret = i40e_vsi_add_pvid(vsi,
vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
else
- i40e_vlan_stripping_disable(vsi);
+ i40e_vsi_remove_pvid(vsi);
if (vlan_id) {
dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
@@ -2257,12 +2118,20 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
vsi->back->hw.aq.asq_last_status);
goto error_pvid;
}
+ /* Kill non-vlan MAC filters - ignore error return since
+ * there might not be any non-vlan MAC filters.
+ */
+ i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
}
if (ret) {
dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
goto error_pvid;
}
+ /* The Port VLAN needs to be saved across resets the same as the
+ * default LAN MAC address.
+ */
+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
ret = 0;
error_pvid:
@@ -2294,7 +2163,6 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_mac_filter *f, *ftmp;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_vf *vf;
@@ -2318,11 +2186,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->vf = vf_id;
- /* first entry of the list is the default ethernet address */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
- break;
- }
+ memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
ivi->tx_rate = 0;
ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 360382cf3040..cc1feee36e12 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -82,6 +81,8 @@ struct i40e_vf {
struct i40e_virtchnl_ether_addr default_lan_addr;
struct i40e_virtchnl_ether_addr default_fcoe_addr;
+ u16 port_vlan_id;
+ bool pf_set_mac; /* The VMM admin set the VF MAC address */
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
@@ -104,7 +105,7 @@ int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
-int i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_reset_vf(struct i40e_vf *vf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* vf configuration related iplink handlers */
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
new file mode 100644
index 000000000000..e09be37a07a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -0,0 +1,33 @@
+################################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+# Copyright(c) 2013 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+## Makefile for the Intel(R) 40GbE VF driver
+#
+#
+
+obj-$(CONFIG_I40EVF) += i40evf.o
+
+i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
+ i40e_txrx.o i40e_common.o i40e_adminq.o
+
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
new file mode 100644
index 000000000000..5470ce95936e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -0,0 +1,927 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (hw->mac.type == I40E_MAC_VF) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static void i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the transmit queue */
+ wr32(hw, I40E_VF_ATQBAH1,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQBAL1,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ } else {
+ /* configure the transmit queue */
+ wr32(hw, I40E_PF_ATQBAH,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ }
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static void i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the receive queue */
+ wr32(hw, I40E_VF_ARQBAH1,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQBAL1,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ } else {
+ /* configure the receive queue */
+ wr32(hw, I40E_PF_ARQBAH,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ }
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_asq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_arq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.asq_mutex);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+ mutex_unlock(&hw->aq.asq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40evf_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+i40e_status i40evf_init_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* initialize locks */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code)
+ goto init_adminq_destroy_locks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code)
+ goto init_adminq_free_asq;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40evf_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (i40evf_check_asq_alive(hw))
+ i40evf_aq_queue_shutdown(hw, true);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ /* destroy the locks */
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ desc_cb = *desc;
+ cb_func(hw, &desc_cb);
+ }
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)details, 0,
+ sizeof(struct i40e_asq_cmd_details));
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40evf_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40evf_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * i40evf_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status = 0;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ *details = *cmd_details;
+
+ /* If the cmd_details are defined copy the cookie. The
+ * cpu_to_le32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ cpu_to_le32(upper_32_bits(details->cookie));
+ desc->cookie_low =
+ cpu_to_le32(lower_32_bits(details->cookie));
+ }
+ } else {
+ memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~cpu_to_le16(details->flags_dis);
+ desc->flags |= cpu_to_le16(details->flags_ena);
+
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ *desc_on_ring = *desc;
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ memcpy(dma_buff->va, buff, buff_size);
+ desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+ u32 delay_len = 10;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40evf_asq_done(hw))
+ break;
+ /* ugh! delay while spin_lock */
+ udelay(delay_len);
+ total_delay += delay_len;
+ } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40evf_asq_done(hw)) {
+ *desc = *desc_on_ring;
+ if (buff != NULL)
+ memcpy(buff, dma_buff->va, buff_size);
+ retval = le16_to_cpu(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = 0;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+asq_send_command_error:
+ mutex_unlock(&hw->aq.asq_mutex);
+asq_send_command_exit:
+ return status;
+}
+
+/**
+ * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40evf_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ i40e_status ret_code = 0;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ /* set next_to_use to head */
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Queue is empty.\n");
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+ i40evf_debug_aq(hw,
+ I40E_DEBUG_AQ_COMMAND,
+ (void *)desc,
+ hw->aq.arq.r.arq_bi[desc_idx].va);
+
+ flags = le16_to_cpu(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ } else {
+ e->desc = *desc;
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_size = min(datalen, e->msg_size);
+ if (e->msg_buf != NULL && (e->msg_size != 0))
+ memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_size);
+ }
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+void i40evf_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
new file mode 100644
index 000000000000..8f72c31d95cc
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -0,0 +1,106 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_size;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct mutex asq_mutex; /* Send queue lock */
+ struct mutex arq_mutex; /* Receive queue lock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
new file mode 100644
index 000000000000..f7cea1bca38d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -0,0 +1,2153 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0001
+#define I40E_FW_API_VERSION_A0_MINOR 0x0000
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ i40e_aqc_opc_set_storm_control_config = 0x0280,
+ i40e_aqc_opc_get_storm_control_config = 0x0281,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
+ i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress table */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id ;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* Set Storm Control Configuration (direct 0x0280)
+ * Get Storm Control Configuration (direct 0x0281)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_set_get_storm_control_config {
+ __le32 broadcast_threshold;
+ __le32 multicast_threshold;
+ __le32 control_flags;
+#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
+#define I40E_AQC_STORM_CONTROL_MDICW 0x02
+#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
+#define I40E_AQC_STORM_CONTROL_BDICW 0x08
+#define I40E_AQC_STORM_CONTROL_BIDU 0x10
+#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
+#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
+ I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_flags;
+ u8 reserved2[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved3[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
+#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
+#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
+#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
+#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Reset command (0x0622) */
+struct i40e_aqc_set_phy_reset {
+ u8 reset_flags;
+#define I40E_AQ_PHY_RESET_REQUEST 0x02
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 header_len; /* in DWords, 1 to 15 */
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x3
+ u8 variable_udp_length;
+#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
+#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
+ u8 udp_key_index;
+#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
+#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 pf_filters;
+ u8 total_filters;
+ u8 reserved2[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved;
+ u8 tunnels_free;
+ u8 reserved1[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure_A0 {
+ __le16 key1_off;
+ __le16 key1_len;
+ __le16 key2_off;
+ __le16 key2_len;
+ __le16 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 resreved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0);
+
+struct i40e_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
new file mode 100644
index 000000000000..d8654fb9e525
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
new file mode 100644
index 000000000000..7b13953b28c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -0,0 +1,254 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+
+ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (hw->device_id) {
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_SFP_X710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_A:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_KX_D:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40evf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u8 *aq_buffer = (u8 *)buffer;
+ u32 data[4];
+ u32 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+ aq_desc->retval);
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ aq_desc->cookie_high, aq_desc->cookie_low);
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ aq_desc->params.internal.param0,
+ aq_desc->params.internal.param1);
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ aq_desc->params.external.addr_high,
+ aq_desc->params.external.addr_low);
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ memset(data, 0, sizeof(data));
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+ data[((i % 16) / 4)] |=
+ ((u32)aq_buffer[i]) << (8 * (i % 4));
+ if ((i % 16) == 15) {
+ i40e_debug(hw, mask,
+ "\t0x%04X %08X %08X %08X %08X\n",
+ i - 15, data[0], data[1], data[2],
+ data[3]);
+ memset(data, 0, sizeof(data));
+ }
+ }
+ if ((i % 16) != 0)
+ i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
+ i - (i % 16), data[0], data[1], data[2],
+ data[3]);
+ }
+}
+
+/**
+ * i40evf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40evf_check_asq_alive(struct i40e_hw *hw)
+{
+ return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+}
+
+/**
+ * i40evf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
+ | I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ }
+ if (!cmd_details) {
+ struct i40e_asq_cmd_details details;
+ memset(&details, 0, sizeof(details));
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = i40evf_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg)
+{
+ struct i40e_virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
+ ETH_ALEN);
+ memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ ETH_ALEN);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+i40e_status i40e_vf_reset(struct i40e_hw *hw)
+{
+ return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ 0, NULL, 0, NULL);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
new file mode 100644
index 000000000000..cb97b3eed440
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -0,0 +1,238 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(upper_32_bits(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
+ wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index);
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
new file mode 100644
index 000000000000..17e42ca26d0b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -0,0 +1,165 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u8 cpuid;
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u8 dbuff;
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u8 hbuff;
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u16 rxmax;
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u8 lrxqthresh;
+};
+
+/* Tx queue context data */
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u16 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
new file mode 100644
index 000000000000..622f373b745d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -0,0 +1,72 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...) do {} while (0)
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40evf_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__)
+extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+ __attribute__ ((format(gnu_printf, 3, 4)));
+
+typedef enum i40e_status_code i40e_status;
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
new file mode 100644
index 000000000000..7841573a58c9
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -0,0 +1,84 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40evf_init_adminq(struct i40e_hw *hw);
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool i40evf_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40evf_debug_aq(struct i40e_hw *hw,
+ enum i40e_debug_mask mask,
+ void *desc,
+ void *buffer);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40evf_resume_aq(struct i40e_hw *hw);
+bool i40evf_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading);
+
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
+/* prototype for functions used for SW locks */
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
new file mode 100644
index 000000000000..30af953cf106
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -0,0 +1,4667 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
+#define I40E_PF_ARQBAH 0x00080180
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL(_pf) (0x0010C300 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i, _pf) (0x0010C100 + ((_i) * 4) + ((_pf) * 16))/* _i=0...3 _pf=0..15 */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT(_pf) (0x0010C380 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TDPUC 0x00044100
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_PE_ENA 0x000B81A0
+#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
+#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000
+#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
+#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_UFUSE 0x00094008
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HLCTLA 0x001E4760
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSECTL1 0x001E3560
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
+#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
+#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SUBSYSID 0x000BE48C
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
+#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PFDEVID 0x000BE080
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_VFDEVID 0x000BE100
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+
+#define I40E_PFPE_MRTEIDXMASK 0x00008600
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+/* _i=0...143 */
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_STDC_MAX_INDEX 3
+#define I40E_GLPRT_STDC_STDC_SHIFT 0
+#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRT_MSCCNT 0x00256BA0
+#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
+#define I40E_PRT_SCSTS 0x00256C20
+#define I40E_PRT_SCSTS_BSCA_SHIFT 0
+#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
+#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
+#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
+#define I40E_PRT_SCSTS_MSCA_SHIFT 2
+#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
+#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
+#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
+#define I40E_PRT_SWT_BSCCNT 0x00256C60
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480
+#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 8
+#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
new file mode 100644
index 000000000000..7c08cc2e339b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -0,0 +1,97 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
new file mode 100644
index 000000000000..ffdb01d853db
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -0,0 +1,1575 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include <linux/prefetch.h>
+
+#include "i40evf.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
+{
+ return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+ ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+/**
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
+ * @ring: the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
+}
+
+/**
+ * i40evf_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_bi)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++)
+ i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (!tx_ring->netdev)
+ return;
+
+ /* cleanup Tx queue statistics */
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index));
+}
+
+/**
+ * i40evf_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
+{
+ i40evf_clean_tx_ring(tx_ring);
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+
+ if (tx_ring->desc) {
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @tx_ring: the ring of descriptors
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+{
+ u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
+ ? ring->next_to_use
+ : ring->next_to_use + ring->count);
+ return ntu - ring->next_to_clean;
+}
+
+/**
+ * i40e_check_tx_hang - Is there a hang in the Tx queue
+ * @tx_ring: the ring of descriptors
+ **/
+static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
+{
+ u32 tx_pending = i40e_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /* Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * PFC clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
+ tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and disarm the hang check */
+ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+ unsigned int total_packets = 0;
+ unsigned int total_bytes = 0;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
+
+ /* free the skb */
+ dev_kfree_skb_any(tx_buf->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_packets += total_packets;
+
+ if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
+ " VSI <%d>\n"
+ " Tx Queue <%d>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n",
+ tx_ring->vsi->seid,
+ tx_ring->queue_index,
+ tx_ring->next_to_use, i);
+ dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->tx_bi[i].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ dev_info(tx_ring->dev,
+ "tx hang detected on queue %d, resetting adapter\n",
+ tx_ring->queue_index);
+
+ tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
+
+ netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
+ }
+
+ return budget > 0;
+}
+
+/**
+ * i40e_set_new_dynamic_itr - Find new ITR level
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte counts during
+ * the last interrupt. The advantage of per interrupt computation
+ * is faster updates and more accurate ITR for the current traffic
+ * pattern. Constants in this function were computed based on
+ * theoretical maximum wire speed and thresholds were set based on
+ * testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+{
+ enum i40e_latency_range new_latency_range = rc->latency_range;
+ u32 new_itr = rc->itr;
+ int bytes_per_int;
+
+ if (rc->total_packets == 0 || !rc->itr)
+ return;
+
+ /* simple throttlerate management
+ * 0-10MB/s lowest (100000 ints/s)
+ * 10-20MB/s low (20000 ints/s)
+ * 20-1249MB/s bulk (8000 ints/s)
+ */
+ bytes_per_int = rc->total_bytes / rc->itr;
+ switch (rc->itr) {
+ case I40E_LOWEST_LATENCY:
+ if (bytes_per_int > 10)
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ case I40E_LOW_LATENCY:
+ if (bytes_per_int > 20)
+ new_latency_range = I40E_BULK_LATENCY;
+ else if (bytes_per_int <= 10)
+ new_latency_range = I40E_LOWEST_LATENCY;
+ break;
+ case I40E_BULK_LATENCY:
+ if (bytes_per_int <= 20)
+ rc->latency_range = I40E_LOW_LATENCY;
+ break;
+ }
+
+ switch (new_latency_range) {
+ case I40E_LOWEST_LATENCY:
+ new_itr = I40E_ITR_100K;
+ break;
+ case I40E_LOW_LATENCY:
+ new_itr = I40E_ITR_20K;
+ break;
+ case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_8K;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != rc->itr) {
+ /* do an exponential smoothing */
+ new_itr = (10 * new_itr * rc->itr) /
+ ((9 * new_itr) + rc->itr);
+ rc->itr = new_itr & I40E_MAX_ITR;
+ }
+
+ rc->total_bytes = 0;
+ rc->total_packets = 0;
+}
+
+/**
+ * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
+ * @q_vector: the vector to adjust
+ **/
+static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
+{
+ u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
+ struct i40e_hw *hw = &q_vector->vsi->back->hw;
+ u32 reg_addr;
+ u16 old_itr;
+
+ reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr)
+ wr32(hw, reg_addr, q_vector->rx.itr);
+
+ reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr)
+ wr32(hw, reg_addr, q_vector->tx.itr);
+}
+
+/**
+ * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int bi_size;
+
+ if (!dev)
+ return -ENOMEM;
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!tx_ring->tx_bi)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_ring->size);
+ goto err;
+ }
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ struct i40e_rx_buffer *rx_bi;
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_bi)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ if (rx_bi->dma) {
+ dma_unmap_single(dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+ if (rx_bi->skb) {
+ dev_kfree_skb(rx_bi->skb);
+ rx_bi->skb = NULL;
+ }
+ if (rx_bi->page) {
+ if (rx_bi->page_dma) {
+ dma_unmap_page(dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ __free_page(rx_bi->page);
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
+ }
+ }
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40evf_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
+{
+ i40evf_clean_rx_ring(rx_ring);
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+
+ if (rx_ring->desc) {
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40evf_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int bi_size;
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
+ ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
+ : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rx_ring->size);
+ goto err;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+err:
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+ struct sk_buff *skb;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_bi[i];
+ skb = bi->skb;
+
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ goto no_buffers;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
+ }
+
+ if (!bi->dma) {
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u64 flags = vsi->back->flags;
+
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ if (flags & I40E_FLAG_IN_NETPOLL)
+ netif_rx(skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_status: status value of last descriptor in packet
+ * @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+ struct sk_buff *skb,
+ u32 rx_status,
+ u32 rx_error,
+ u16 rx_ptype)
+{
+ bool ipv4_tunnel, ipv6_tunnel;
+ __wsum rx_udp_csum;
+ __sum16 csum;
+ struct iphdr *iph;
+
+ ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+ skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum enabled and ip headers found? */
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* likely incorrect csum if alternate IP extention headers found */
+ if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ return;
+
+ /* IP or L4 or outmost IP checksum error */
+ if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+
+ if (ipv4_tunnel &&
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ */
+ skb->transport_header = skb->mac_header +
+ sizeof(struct ethhdr) +
+ (ip_hdr(skb)->ihl * 4);
+
+ /* Add 4 bytes for VLAN tagged packets */
+ skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ ? VLAN_HLEN : 0;
+
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/**
+ * i40e_rx_hash - returns the hash value from the Rx descriptor
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc)
+{
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if ((ring->netdev->features & NETIF_F_RXHASH) &&
+ (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ else
+ return 0;
+}
+
+/**
+ * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ const int current_node = numa_node_id();
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u16 i = rx_ring->next_to_clean;
+ union i40e_rx_desc *rx_desc;
+ u32 rx_error, rx_status;
+ u64 qword;
+ u16 rx_ptype;
+
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ union i40e_rx_desc *next_rxd;
+ struct i40e_rx_buffer *rx_bi;
+ struct sk_buff *skb;
+ u16 vlan_tag;
+ rx_bi = &rx_ring->rx_bi[i];
+ skb = rx_bi->skb;
+ prefetch(skb->data);
+
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_bi->skb = NULL;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * STATUS_DD bit is set
+ */
+ rmb();
+
+ /* Get the header and possibly the whole packet
+ * If this is an skb from previous receive dma will be 0
+ */
+ if (rx_bi->dma) {
+ u16 len;
+
+ if (rx_hbo)
+ len = I40E_RX_HDR_SIZE;
+ else if (rx_sph)
+ len = rx_header_len;
+ else if (rx_packet_len)
+ len = rx_packet_len; /* 1buf/no split found */
+ else
+ len = rx_header_len; /* split always mode */
+
+ skb_put(skb, len);
+ dma_unmap_single(rx_ring->dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+
+ /* Get the rest of the data if this was a header split */
+ if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_bi->page,
+ rx_bi->page_offset,
+ rx_packet_len);
+
+ skb->len += rx_packet_len;
+ skb->data_len += rx_packet_len;
+ skb->truesize += rx_packet_len;
+
+ if ((page_count(rx_bi->page) == 1) &&
+ (page_to_nid(rx_bi->page) == current_node))
+ get_page(rx_bi->page);
+ else
+ rx_bi->page = NULL;
+
+ dma_unmap_page(rx_ring->dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+
+ if (unlikely(
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ struct i40e_rx_buffer *next_buffer;
+
+ next_buffer = &rx_ring->rx_bi[i];
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ rx_bi->skb = next_buffer->skb;
+ rx_bi->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ }
+ rx_ring->rx_stats.non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+
+ skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+ vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+ : 0;
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+ rx_ring->netdev->last_rx = jiffies;
+ budget--;
+next_desc:
+ rx_desc->wb.qword1.status_error_len = 0;
+ if (!budget)
+ break;
+
+ cleaned_count++;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rx_ring->next_to_clean = i;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ if (cleaned_count)
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return budget > 0;
+}
+
+/**
+ * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40evf_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(napi, struct i40e_q_vector, napi);
+ struct i40e_vsi *vsi = q_vector->vsi;
+ struct i40e_ring *ring;
+ bool clean_complete = true;
+ int budget_per_ring;
+
+ if (test_bit(__I40E_DOWN, &vsi->state)) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ i40e_for_each_ring(ring, q_vector->tx)
+ clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ */
+ budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ /* Work is done so exit the polling mode and re-enable the interrupt */
+ napi_complete(napi);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
+ ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ i40e_update_dynamic_itr(q_vector);
+
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
+
+ return 0;
+}
+
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags: the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise returns 0 to indicate the flags has been set properly.
+ **/
+static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
+{
+ __be16 protocol = skb->protocol;
+ u32 tx_flags = 0;
+
+ /* if we have a HW VLAN tag being added, default to the HW one */
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN, check the next protocol and store the tag */
+ } else if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ return -EINVAL;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+ }
+
+ *flags = tx_flags;
+ return 0;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ * @hdr_len: ptr to the size of the packet header
+ * @cd_tunneling: ptr to context descriptor bits
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+{
+ u32 cd_cmd, cd_tso_len, cd_mss;
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u32 l4len;
+ int err;
+ struct ipv6hdr *ipv6h;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ if (protocol == htons(ETH_P_IP)) {
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else if (skb_is_gso_v6(skb)) {
+
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
+ : ipv6_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ ipv6h->payload_len = 0;
+ tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+ *hdr_len = (skb->encapsulation
+ ? (skb_inner_transport_header(skb) - skb->data)
+ : skb_transport_offset(skb)) + l4len;
+
+ /* find the field values */
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = skb->len - *hdr_len;
+ cd_mss = skb_shinfo(skb)->gso_size;
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
+{
+ struct ipv6hdr *this_ipv6_hdr;
+ unsigned int this_tcp_hdrlen;
+ struct iphdr *this_ip_hdr;
+ u32 network_hdr_len;
+ u8 l4_hdr = 0;
+
+ if (skb->encapsulation) {
+ network_hdr_len = skb_inner_network_header_len(skb);
+ this_ip_hdr = inner_ip_hdr(skb);
+ this_ipv6_hdr = inner_ipv6_hdr(skb);
+ this_tcp_hdrlen = inner_tcp_hdrlen(skb);
+
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ }
+
+ /* Now set the ctx descriptor fields */
+ *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ I40E_TXD_CTX_UDP_TUNNELING |
+ ((skb_inner_network_offset(skb) -
+ skb_transport_offset(skb)) >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ } else {
+ network_hdr_len = skb_network_header_len(skb);
+ this_ip_hdr = ip_hdr(skb);
+ this_ipv6_hdr = ipv6_hdr(skb);
+ this_tcp_hdrlen = tcp_hdrlen(skb);
+ }
+
+ /* Enable IP checksum offloads */
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ l4_hdr = this_ip_hdr->protocol;
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ this_ip_hdr->check = 0;
+ } else {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ }
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ l4_hdr = this_ipv6_hdr->nexthdr;
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+ /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
+ *td_offset |= (skb_network_offset(skb) >> 1) <<
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (this_tcp_hdrlen >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring: ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+ const u64 cd_type_cmd_tso_mss,
+ const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+ struct i40e_tx_context_desc *context_desc;
+ int i = tx_ring->next_to_use;
+
+ if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ return;
+
+ /* grab the next descriptor */
+ context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* cpu_to_le32 and assign to struct fields */
+ context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+ context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+ context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring: ring to send buffer on
+ * @skb: send buffer
+ * @first: first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ * @td_cmd: the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ struct skb_frag_struct *frag;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ u16 i = tx_ring->next_to_use;
+ u32 td_tag = 0;
+ dma_addr_t dma;
+ u16 gso_segs;
+
+ if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+ I40E_TX_FLAGS_VLAN_SHIFT;
+ }
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+ first->gso_segs = gso_segs;
+ first->skb = skb;
+ first->tx_flags = tx_flags;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_bi = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset,
+ I40E_MAX_DATA_PER_TXD, td_tag);
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ dma += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_bi = &tx_ring->tx_bi[i];
+ }
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
+
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
+ writel(i, tx_ring->tail);
+
+ return;
+
+dma_error:
+ dev_info(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_bi map */
+ for (;;) {
+ tx_bi = &tx_ring->tx_bi[i];
+ i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
+ if (tx_bi == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ tx_ring->next_to_use = i;
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ unsigned int f;
+#endif
+ int count = 0;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
+ return 0;
+ }
+ return count;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+ u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+ u32 cd_tunneling = 0, cd_l2tag2 = 0;
+ struct i40e_tx_buffer *first;
+ u32 td_offset = 0;
+ u32 tx_flags = 0;
+ __be16 protocol;
+ u32 td_cmd = 0;
+ u8 hdr_len = 0;
+ int tso;
+ if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ /* prepare the xmit flags */
+ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ goto out_drop;
+
+ /* obtain protocol of skb */
+ protocol = skb->protocol;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+ /* setup IPv4/IPv6 offloads */
+ if (protocol == htons(ETH_P_IP))
+ tx_flags |= I40E_TX_FLAGS_IPV4;
+ else if (protocol == htons(ETH_P_IPV6))
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+
+ tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ &cd_type_cmd_tso_mss, &cd_tunneling);
+
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= I40E_TX_FLAGS_TSO;
+
+ skb_tx_timestamp(skb);
+
+ /* always enable CRC insertion offload */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ /* Always offload the checksum, since it's in the data descriptor */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ tx_flags |= I40E_TX_FLAGS_CSUM;
+
+ i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+ }
+
+ i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+ cd_tunneling, cd_l2tag2);
+
+ i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
+
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+ if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+ return NETDEV_TX_OK;
+ skb->len = I40E_MIN_TX_LEN;
+ skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+ }
+
+ return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
new file mode 100644
index 000000000000..10bf49e18d7f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -0,0 +1,296 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
+/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+
+#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
+#define I40E_MAX_IRATE 0x03F
+#define I40E_MIN_IRATE 0x001
+#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_ITR_100K 0x0005
+#define I40E_ITR_20K 0x0019
+#define I40E_ITR_8K 0x003E
+#define I40E_ITR_4K 0x007A
+#define I40E_ITR_RX_DEF I40E_ITR_8K
+#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
+#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
+#define I40E_DEFAULT_IRQ_WORK 256
+#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
+#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
+#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+ I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR I40E_IDX_ITR0
+#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_PE_ITR I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+/* Supported Rx Buffer Sizes */
+#define I40E_RXBUFFER_512 512 /* Used for packet split */
+#define I40E_RXBUFFER_2048 2048
+#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
+#define I40E_RXBUFFER_4096 4096
+#define I40E_RXBUFFER_8192 8192
+#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_NEXT_DESC(r, i, n) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ (n) = I40E_RX_DESC((r), (i)); \
+ } while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
+ do { \
+ I40E_RX_NEXT_DESC((r), (i), (n)); \
+ prefetch((n)); \
+ } while (0)
+
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_MIN_TX_LEN 17
+#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
+#define I40E_TX_FLAGS_CSUM (u32)(1)
+#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
+#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
+#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
+#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
+#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
+#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
+#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
+#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define I40E_TX_FLAGS_VLAN_SHIFT 16
+
+struct i40e_tx_buffer {
+ struct i40e_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+};
+
+struct i40e_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct i40e_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_done_old;
+};
+
+struct i40e_rx_queue_stats {
+ u64 non_eop_descs;
+ u64 alloc_page_failed;
+ u64 alloc_buff_failed;
+};
+
+enum i40e_ring_state_t {
+ __I40E_TX_FDIR_INIT_DONE,
+ __I40E_TX_XPS_INIT_DONE,
+ __I40E_TX_DETECT_HANG,
+ __I40E_HANG_CHECK_ARMED,
+ __I40E_RX_PS_ENABLED,
+ __I40E_RX_LRO_ENABLED,
+ __I40E_RX_16BYTE_DESC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_lro_enabled(ring) \
+ test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define set_ring_lro_enabled(ring) \
+ set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define clear_ring_lro_enabled(ring) \
+ clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define ring_is_16byte_desc_enabled(ring) \
+ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define set_ring_16byte_desc_enabled(ring) \
+ set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define clear_ring_16byte_desc_enabled(ring) \
+ clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+ struct i40e_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ struct net_device *netdev; /* netdev ring maps to */
+ union {
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_rx_buffer *rx_bi;
+ };
+ unsigned long state;
+ u16 queue_index; /* Queue number of ring */
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 __iomem *tail;
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ u8 dtype;
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
+#define I40E_RX_DTYPE_HEADER_SPLIT 2
+ u8 hsplit;
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
+
+ /* used in interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u8 atr_sample_rate;
+ u8 atr_count;
+
+ bool ring_active; /* is ring online or not */
+
+ /* stats structs */
+ struct i40e_queue_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct i40e_tx_queue_stats tx_stats;
+ struct i40e_rx_queue_stats rx_stats;
+ };
+
+ unsigned int size; /* length of descriptor ring in bytes */
+ dma_addr_t dma; /* physical address of ring */
+
+ struct i40e_vsi *vsi; /* Backreference to associated VSI */
+ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+ struct rcu_head rcu; /* to avoid race on free */
+} ____cacheline_internodealigned_in_smp;
+
+enum i40e_latency_range {
+ I40E_LOWEST_LATENCY = 0,
+ I40E_LOW_LATENCY = 1,
+ I40E_BULK_LATENCY = 2,
+};
+
+struct i40e_ring_container {
+ /* array of pointers to rings */
+ struct i40e_ring *ring;
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 count;
+ enum i40e_latency_range latency_range;
+ u16 itr;
+};
+
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
+int i40evf_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
new file mode 100644
index 000000000000..3bffac06592f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -0,0 +1,1152 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_SFP_X710 0x1573
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_KX_D 0x1582
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define ETH_ALEN 6
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_WIDTH_1 0x10
+#define I40E_PCI_LINK_WIDTH_2 0x20
+#define I40E_PCI_LINK_WIDTH_4 0x40
+#define I40E_PCI_LINK_WIDTH_8 0x80
+#define I40E_PCI_LINK_SPEED_2500 0x1
+#define I40E_PCI_LINK_SPEED_5000 0x2
+#define I40E_PCI_LINK_SPEED_8000 0x3
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1,
+ I40E_VSI_VMDQ2,
+ I40E_VSI_CTRL,
+ I40E_VSI_FCOE,
+ I40E_VSI_MIRROR,
+ I40E_VSI_SRIOV,
+ I40E_VSI_FDIR,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 loopback;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ u32 autoneg_advertised;
+ u32 phy_id;
+ u32 module_type;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool mfp_mode_1;
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+ u64 hw_semaphore_wait; /* - || - */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u32 numapps;
+ struct i40e_ieee_ets_config etscfg;
+ struct i40e_ieee_ets_recommend etsrec;
+ struct i40e_ieee_pfc_config pfc;
+ struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+
+ /* function pointer structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config;
+ struct i40e_dcbx_config remote_dcbx_config;
+
+ /* debug mask */
+ u32 debug_mask;
+};
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
+};
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
+ I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
+ I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_errors; /* repc */
+ u64 rx_missed; /* rmpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
new file mode 100644
index 000000000000..ccf45d04b7ef
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -0,0 +1,364 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+ I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ i40e_status v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 0
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[ETH_ALEN];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 max_fcoe_contexts;
+ u32 max_fcoe_filters;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[ETH_ALEN];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* The following are TBD, not necessary for LAN functionality.
+ * I40E_VIRTCHNL_OP_FCOE
+ */
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
new file mode 100644
index 000000000000..ff6529b288a1
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -0,0 +1,321 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40EVF_H_
+#define _I40EVF_H_
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <net/udp.h>
+#include <linux/sctp.h>
+
+
+#include "i40e_type.h"
+#include "i40e_virtchnl.h"
+#include "i40e_txrx.h"
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+#define PFX "i40evf: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+ __func__ , ## args)))
+
+/* dummy struct to make common code less painful */
+struct i40e_vsi {
+ struct i40evf_adapter *back;
+ struct net_device *netdev;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 seid;
+ u16 id;
+ unsigned long state;
+ int base_vector;
+ u16 work_limit;
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40EVF_DEFAULT_TXD 512
+#define I40EVF_DEFAULT_RXD 512
+#define I40EVF_MAX_TXD 4096
+#define I40EVF_MIN_TXD 64
+#define I40EVF_MAX_RXD 4096
+#define I40EVF_MIN_RXD 64
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 8
+
+/* Supported Rx Buffer Sizes */
+#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
+#define I40EVF_RXBUFFER_128 128 /* Used for packet split */
+#define I40EVF_RXBUFFER_256 256 /* Used for packet split */
+#define I40EVF_RXBUFFER_2048 2048
+#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+#define I40EVF_MAX_AQ_BUF_SIZE 4096
+#define I40EVF_AQ_LEN 32
+#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i) \
+ (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define MAX_RX_QUEUES 8
+#define MAX_TX_QUEUES MAX_RX_QUEUES
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct i40e_q_vector {
+ struct i40evf_adapter *adapter;
+ struct i40e_vsi *vsi;
+ struct napi_struct napi;
+ unsigned long reg_idx;
+ struct i40e_ring_container rx;
+ struct i40e_ring_container tx;
+ u32 ring_mask;
+ u8 num_ringpairs; /* total number of ring pairs in vector */
+ int v_idx; /* vector index in list */
+ char name[IFNAMSIZ + 9];
+ cpumask_var_t affinity_mask;
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the i40e hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define I40EVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define I40EVF_RX_DESC_ADV(R, i) \
+ (&(((union i40e_adv_rx_desc *)((R).desc))[i]))
+#define I40EVF_TX_DESC_ADV(R, i) \
+ (&(((union i40e_adv_tx_desc *)((R).desc))[i]))
+#define I40EVF_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
+
+#define OTHER_VECTOR 1
+#define NONQ_VECS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 4
+#define MAX_MSIX_COUNT 5
+
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS)
+
+#define I40EVF_QUEUE_END_OF_LIST 0x7FF
+#define I40EVF_FREE_VECTOR 0x7FFF
+struct i40evf_mac_filter {
+ struct list_head list;
+ u8 macaddr[ETH_ALEN];
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+struct i40evf_vlan_filter {
+ struct list_head list;
+ u16 vlan;
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+/* Driver state. The order of these is important! */
+enum i40evf_state_t {
+ __I40EVF_STARTUP, /* driver loaded, probe complete */
+ __I40EVF_FAILED, /* PF communication failed. Fatal. */
+ __I40EVF_REMOVE, /* driver is being unloaded */
+ __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
+ __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
+ __I40EVF_INIT_SW, /* got resources, setting up structs */
+ /* Below here, watchdog is running */
+ __I40EVF_DOWN, /* ready, can be opened */
+ __I40EVF_TESTING, /* in ethtool self-test */
+ __I40EVF_RESETTING, /* in reset */
+ __I40EVF_RUNNING, /* opened, working */
+};
+
+enum i40evf_critical_section_t {
+ __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
+};
+/* make common code happy */
+#define __I40E_DOWN __I40EVF_DOWN
+
+/* board specific private data structure */
+struct i40evf_adapter {
+ struct timer_list watchdog_timer;
+ struct vlan_group *vlgrp;
+ struct work_struct reset_task;
+ struct work_struct adminq_task;
+ struct delayed_work init_task;
+ struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ struct list_head vlan_filter_list;
+ char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+ /* Interrupt Throttle Rate */
+ u32 itr_setting;
+ u16 eitr_low;
+ u16 eitr_high;
+
+ /* TX */
+ struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
+ u64 restart_queue;
+ u64 hw_csum_tx_good;
+ u64 lsc_int;
+ u64 hw_tso_ctxt;
+ u64 hw_tso6_ctxt;
+ u32 tx_timeout_count;
+ struct list_head mac_filter_list;
+#ifdef DEBUG
+ bool detect_tx_hung;
+#endif /* DEBUG */
+
+ /* RX */
+ struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
+ int txd_count;
+ int rxd_count;
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 hw_csum_rx_good;
+ u64 non_eop_descs;
+ int num_msix_vectors;
+ struct msix_entry *msix_entries;
+
+ u64 rx_hdr_split;
+
+ u32 init_state;
+ volatile unsigned long flags;
+#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
+#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
+#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4)
+#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
+#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+/* duplcates for common code */
+#define I40E_FLAG_FDIR_ATR_ENABLED 0
+#define I40E_FLAG_DCB_ENABLED 0
+#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
+#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
+ /* flags for admin queue service task */
+ u32 aq_required;
+ u32 aq_pending;
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in i40e_vf.h */
+ struct i40e_hw hw;
+
+ enum i40evf_state_t state;
+ volatile unsigned long crit_section;
+ u64 tx_busy;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool dev_closed;
+ bool link_up;
+ enum i40e_virtchnl_ops current_op;
+ struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
+ struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ u16 msg_enable;
+ struct i40e_eth_stats current_stats;
+ struct i40e_vsi vsi;
+ u32 aq_wait_count;
+};
+
+struct i40evf_info {
+ enum i40e_mac_type mac;
+ unsigned int flags;
+};
+
+
+/* needed by i40evf_ethtool.c */
+extern char i40evf_driver_name[];
+extern const char i40evf_driver_version[];
+
+int i40evf_up(struct i40evf_adapter *adapter);
+void i40evf_down(struct i40evf_adapter *adapter);
+void i40evf_reinit_locked(struct i40evf_adapter *adapter);
+void i40evf_reset(struct i40evf_adapter *adapter);
+void i40evf_set_ethtool_ops(struct net_device *netdev);
+void i40evf_update_stats(struct i40evf_adapter *adapter);
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter);
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter);
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask);
+
+void i40e_napi_add_all(struct i40evf_adapter *adapter);
+void i40e_napi_del_all(struct i40evf_adapter *adapter);
+
+int i40evf_send_api_ver(struct i40evf_adapter *adapter);
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter);
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter);
+int i40evf_get_vf_config(struct i40evf_adapter *adapter);
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush);
+void i40evf_configure_queues(struct i40evf_adapter *adapter);
+void i40evf_deconfigure_queues(struct i40evf_adapter *adapter);
+void i40evf_enable_queues(struct i40evf_adapter *adapter);
+void i40evf_disable_queues(struct i40evf_adapter *adapter);
+void i40evf_map_queues(struct i40evf_adapter *adapter);
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_add_vlans(struct i40evf_adapter *adapter);
+void i40evf_del_vlans(struct i40evf_adapter *adapter);
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
+void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval, u8 *msg, u16 msglen);
+#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
new file mode 100644
index 000000000000..b0b1f4bf5ac0
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -0,0 +1,390 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* ethtool support for i40evf */
+#include "i40evf.h"
+
+#include <linux/uaccess.h>
+
+
+struct i40evf_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define I40EVF_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .stat_offset = offsetof(struct i40evf_adapter, _stat) \
+}
+
+/* All stats are u64, so we don't need to track the size of the field. */
+static const struct i40evf_stats i40evf_gstrings_stats[] = {
+ I40EVF_STAT("rx_bytes", current_stats.rx_bytes),
+ I40EVF_STAT("rx_unicast", current_stats.rx_unicast),
+ I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
+ I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
+ I40EVF_STAT("rx_discards", current_stats.rx_discards),
+ I40EVF_STAT("rx_errors", current_stats.rx_errors),
+ I40EVF_STAT("rx_missed", current_stats.rx_missed),
+ I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
+ I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
+ I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
+ I40EVF_STAT("tx_multicast", current_stats.tx_multicast),
+ I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast),
+ I40EVF_STAT("tx_discards", current_stats.tx_discards),
+ I40EVF_STAT("tx_errors", current_stats.tx_errors),
+};
+
+#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
+#define I40EVF_QUEUE_STATS_LEN \
+ (((struct i40evf_adapter *) \
+ netdev_priv(netdev))->vsi_res->num_queue_pairs * 4)
+#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN)
+
+/**
+ * i40evf_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings. Because this is a VF, we don't know what
+ * kind of link we really have, so we fake it.
+ **/
+static int i40evf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ /* In the future the VF will be able to query the PF for
+ * some information - for now use a dummy value
+ */
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = PORT_NONE;
+
+ return 0;
+}
+
+/**
+ * i40evf_get_sset_count - Get length of string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ *
+ * Reports size of string table. This driver only supports
+ * strings for statistics.
+ **/
+static int i40evf_get_sset_count(struct net_device *netdev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return I40EVF_STATS_LEN;
+ else
+ return -ENOTSUPP;
+}
+
+/**
+ * i40evf_get_ethtool_stats - report device statistics
+ * @netdev: network interface device structure
+ * @stats: ethtool statistics structure
+ * @data: pointer to data buffer
+ *
+ * All statistics are added to the data buffer as an array of u64.
+ **/
+static void i40evf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int i, j;
+ char *p;
+
+ for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
+ data[i] = *(u64 *)p;
+ }
+ for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ data[i++] = adapter->tx_rings[j]->stats.packets;
+ data[i++] = adapter->tx_rings[j]->stats.bytes;
+ }
+ for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ data[i++] = adapter->rx_rings[j]->stats.packets;
+ data[i++] = adapter->rx_rings[j]->stats.bytes;
+ }
+}
+
+/**
+ * i40evf_get_strings - Get string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ * @data: buffer for string data
+ *
+ * Builds stats string table.
+ **/
+static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ if (sset == ETH_SS_STATS) {
+ for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, i40evf_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+/**
+ * i40evf_get_msglevel - Get debug message level
+ * @netdev: network interface device structure
+ *
+ * Returns current debug message level.
+ **/
+static u32 i40evf_get_msglevel(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+/**
+ * i40evf_get_msglevel - Set debug message level
+ * @netdev: network interface device structure
+ * @data: message level
+ *
+ * Set current debug message level. Higher values cause the driver to
+ * be noisier.
+ **/
+static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+/**
+ * i40evf_get_drvinto - Get driver info
+ * @netdev: network interface device structure
+ * @drvinfo: ethool driver info structure
+ *
+ * Returns information about the driver and device for display to the user.
+ **/
+static void i40evf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, i40evf_driver_name, 32);
+ strlcpy(drvinfo->version, i40evf_driver_version, 32);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+/**
+ * i40evf_get_ringparam - Get ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Returns current ring parameters. TX and RX rings are reported separately,
+ * but the number of rings is not reported.
+ **/
+static void i40evf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_ring *tx_ring = adapter->tx_rings[0];
+ struct i40e_ring *rx_ring = adapter->rx_rings[0];
+
+ ring->rx_max_pending = I40EVF_MAX_RXD;
+ ring->tx_max_pending = I40EVF_MAX_TXD;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+}
+
+/**
+ * i40evf_set_ringparam - Set ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Sets ring parameters. TX and RX rings are controlled separately, but the
+ * number of rings is not specified, so all rings get the same settings.
+ **/
+static int i40evf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u32 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ I40EVF_MIN_TXD,
+ I40EVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ I40EVF_MIN_RXD,
+ I40EVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->txd_count) &&
+ (new_rx_count == adapter->rxd_count))
+ return 0;
+
+ adapter->txd_count = new_tx_count;
+ adapter->rxd_count = new_rx_count;
+
+ if (netif_running(netdev))
+ i40evf_reinit_locked(adapter);
+ return 0;
+}
+
+/**
+ * i40evf_get_coalesce - Get interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality.
+ **/
+static int i40evf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_vsi *vsi = &adapter->vsi;
+
+ ec->tx_max_coalesced_frames = vsi->work_limit;
+ ec->rx_max_coalesced_frames = vsi->work_limit;
+
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ ec->rx_coalesce_usecs = 1;
+ else
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ ec->tx_coalesce_usecs = 1;
+ else
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+
+ return 0;
+}
+
+/**
+ * i40evf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings.
+ **/
+static int i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_q_vector *q_vector;
+ int i;
+
+ if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames)
+ vsi->work_limit = ec->tx_max_coalesced_frames;
+
+ switch (ec->rx_coalesce_usecs) {
+ case 0:
+ vsi->rx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->rx_itr_setting = (I40E_ITR_DYNAMIC
+ | ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ break;
+ default:
+ if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+ break;
+ }
+
+ switch (ec->tx_coalesce_usecs) {
+ case 0:
+ vsi->tx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->tx_itr_setting = (I40E_ITR_DYNAMIC
+ | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ break;
+ default:
+ if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ break;
+ }
+
+ for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
+ q_vector = adapter->q_vector[i];
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
+ i40e_flush(hw);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops i40evf_ethtool_ops = {
+ .get_settings = i40evf_get_settings,
+ .get_drvinfo = i40evf_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = i40evf_get_ringparam,
+ .set_ringparam = i40evf_set_ringparam,
+ .get_strings = i40evf_get_strings,
+ .get_ethtool_stats = i40evf_get_ethtool_stats,
+ .get_sset_count = i40evf_get_sset_count,
+ .get_msglevel = i40evf_get_msglevel,
+ .set_msglevel = i40evf_set_msglevel,
+ .get_coalesce = i40evf_get_coalesce,
+ .set_coalesce = i40evf_set_coalesce,
+};
+
+/**
+ * i40evf_set_ethtool_ops - Initialize ethtool ops struct
+ * @netdev: network interface device structure
+ *
+ * Sets ethtool ops struct in our netdev so that ethtool can call
+ * our functions.
+ **/
+void i40evf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
new file mode 100644
index 000000000000..f5caf4419243
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -0,0 +1,2353 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static int i40evf_close(struct net_device *netdev);
+
+char i40evf_driver_name[] = "i40evf";
+static const char i40evf_driver_string[] =
+ "Intel(R) XL710 X710 Virtual Function Network Driver";
+
+#define DRV_VERSION "0.9.11"
+const char i40evf_driver_version[] = DRV_VERSION;
+static const char i40evf_copyright[] =
+ "Copyright (c) 2013 Intel Corporation.";
+
+/* i40evf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(i40evf_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ u64 size, u32 alignment)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+ (dma_addr_t *)&mem->pa, GFP_KERNEL);
+ if (mem->va)
+ return 0;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_dma_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+ if (!mem || !mem->va)
+ return I40E_ERR_PARAM;
+ dma_free_coherent(&adapter->pdev->dev, mem->size,
+ mem->va, (dma_addr_t)mem->pa);
+ return 0;
+}
+
+/**
+ * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem, u32 size)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+
+ if (mem->va)
+ return 0;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_virt_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ /* it's ok to kfree a NULL pointer */
+ kfree(mem->va);
+
+ return 0;
+}
+
+/**
+ * i40evf_debug_d - OS dependent version of debug printing
+ * @hw: pointer to the HW structure
+ * @mask: debug level mask
+ * @fmt_str: printf-type format description
+ **/
+void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+{
+ char buf[512];
+ va_list argptr;
+
+ if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
+ return;
+
+ va_start(argptr, fmt_str);
+ vsnprintf(buf, sizeof(buf), fmt_str, argptr);
+ va_end(argptr);
+
+ /* the debug string is already formatted with a newline */
+ pr_info("%s", buf);
+}
+
+/**
+ * i40evf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void i40evf_tx_timeout(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ adapter->tx_timeout_count++;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+/**
+ * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ wr32(hw, I40E_VFINT_DYN_CTL01, 0);
+
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+
+ synchronize_irq(adapter->msix_entries[0].vector);
+}
+
+/**
+ * i40evf_misc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_irq_disable(struct i40evf_adapter *adapter)
+{
+ int i;
+ struct i40e_hw *hw = &adapter->hw;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
+ synchronize_irq(adapter->msix_entries[i].vector);
+ }
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+
+}
+
+/**
+ * i40evf_irq_enable_queues - Enable interrupt for specified queues
+ * @adapter: board private structure
+ * @mask: bitmap of queues to enable
+ **/
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ if (mask & (1 << (i - 1))) {
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
+ I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ }
+ }
+}
+
+/**
+ * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
+ * @adapter: board private structure
+ * @mask: bitmap of vectors to trigger
+ **/
+static void i40evf_fire_sw_int(struct i40evf_adapter *adapter,
+ u32 mask)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+ uint32_t dyn_ctl;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ if (mask & (1 << i)) {
+ dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
+ dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+ }
+ }
+}
+
+/**
+ * i40evf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
+{
+ struct i40e_hw *hw = &adapter->hw;
+
+ i40evf_irq_enable_queues(adapter, ~0);
+
+ if (flush)
+ rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_msix_aq - Interrupt handler for vector 0
+ * @irq: interrupt number
+ * @data: pointer to netdev
+ **/
+static irqreturn_t i40evf_msix_aq(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 val;
+ u32 ena_mask;
+
+ /* handle non-queue interrupts */
+ val = rd32(hw, I40E_VFINT_ICR01);
+ ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+
+
+ val = rd32(hw, I40E_VFINT_DYN_CTL01);
+ val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTL01, val);
+
+ /* re-enable interrupt causes */
+ wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
+ wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
+
+ /* schedule work on the private workqueue */
+ schedule_work(&adapter->adminq_task);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
+ return IRQ_HANDLED;
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_map_vector_to_rxq - associate irqs with rx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @r_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
+{
+ struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct i40e_ring *rx_ring = adapter->rx_rings[r_idx];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ rx_ring->vsi = &adapter->vsi;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+}
+
+/**
+ * i40evf_map_vector_to_txq - associate irqs with tx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @t_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
+{
+ struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct i40e_ring *tx_ring = adapter->tx_rings[t_idx];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ tx_ring->vsi = &adapter->vsi;
+ q_vector->tx.ring = tx_ring;
+ q_vector->tx.count++;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->num_ringpairs++;
+ q_vector->ring_mask |= (1 << t_idx);
+}
+
+/**
+ * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
+{
+ int q_vectors;
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+ int rxr_remaining = adapter->vsi_res->num_queue_pairs;
+ int txr_remaining = adapter->vsi_res->num_queue_pairs;
+ int i, j;
+ int rqpv, tqpv;
+ int err = 0;
+
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ /* The ideal configuration...
+ * We have enough vectors to map one per queue.
+ */
+ if (q_vectors == (rxr_remaining * 2)) {
+ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+ for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+ i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
+ goto out;
+ }
+
+ /* If we don't have enough vectors for a 1-to-1
+ * mapping, we'll have to group them so there are
+ * multiple queues per vector.
+ * Re-adjusting *qpv takes care of the remainder.
+ */
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+ for (j = 0; j < rqpv; j++) {
+ i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
+ rxr_idx++;
+ rxr_remaining--;
+ }
+ }
+ for (i = v_start; i < q_vectors; i++) {
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+ for (j = 0; j < tqpv; j++) {
+ i40evf_map_vector_to_txq(adapter, i, txr_idx);
+ txr_idx++;
+ txr_remaining--;
+ }
+ }
+
+out:
+ adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+
+ return err;
+}
+
+/**
+ * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vectors for tx and rx handling, and requests
+ * interrupts from the kernel.
+ **/
+static int
+i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
+{
+ int vector, err, q_vectors;
+ int rx_int_idx = 0, tx_int_idx = 0;
+
+ i40evf_irq_disable(adapter);
+ /* Decrement for Other and TCP Timer vectors */
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct i40e_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "TxRx", rx_int_idx++);
+ tx_int_idx++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "rx", rx_int_idx++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "tx", tx_int_idx++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ i40evf_msix_clean_rings,
+ 0,
+ q_vector->name,
+ q_vector);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "%s: request_irq failed, error: %d\n",
+ __func__, err);
+ goto free_queue_irqs;
+ }
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ q_vector->affinity_mask);
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ irq_set_affinity_hint(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ NULL);
+ free_irq(adapter->msix_entries[vector + NONQ_VECS].vector,
+ adapter->q_vector[vector]);
+ }
+ return err;
+}
+
+/**
+ * i40evf_request_misc_irq - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
+ * vector is only for the admin queue, and stays active even when the netdev
+ * is closed.
+ **/
+static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ sprintf(adapter->name[0], "i40evf:mbx");
+ err = request_irq(adapter->msix_entries[0].vector,
+ &i40evf_msix_aq, 0, adapter->name[0], netdev);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "request_irq for msix_aq failed: %d\n", err);
+ free_irq(adapter->msix_entries[0].vector, netdev);
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_traffic_irqs - Free MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Frees all MSI-X vectors other than 0.
+ **/
+static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
+{
+ int i;
+ int q_vectors;
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (i = 0; i < q_vectors; i++) {
+ irq_set_affinity_hint(adapter->msix_entries[i+1].vector,
+ NULL);
+ free_irq(adapter->msix_entries[i+1].vector,
+ adapter->q_vector[i]);
+ }
+}
+
+/**
+ * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
+ * @adapter: board private structure
+ *
+ * Frees MSI-X vector 0.
+ **/
+static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->msix_entries[0].vector, netdev);
+}
+
+/**
+ * i40evf_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_tx(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+}
+
+/**
+ * i40evf_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_rx(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i;
+ int rx_buf_len;
+
+
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE;
+ adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
+
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE)
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
+ rx_buf_len = I40E_RX_HDR_SIZE;
+ } else {
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = I40EVF_RXBUFFER_2048;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i);
+ adapter->rx_rings[i]->rx_buf_len = rx_buf_len;
+ }
+}
+
+/**
+ * i40evf_find_vlan - Search filter list for specific vlan filter
+ * @adapter: board private structure
+ * @vlan: vlan tag
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (vlan == f->vlan)
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40evf_add_vlan - Add a vlan filter to the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ f = i40evf_find_vlan(adapter, vlan);
+ if (NULL == f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f) {
+ dev_info(&adapter->pdev->dev,
+ "%s: no memory for new VLAN filter\n",
+ __func__);
+ return NULL;
+ }
+ f->vlan = vlan;
+
+ INIT_LIST_HEAD(&f->list);
+ list_add(&f->list, &adapter->vlan_filter_list);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+
+ return f;
+}
+
+/**
+ * i40evf_del_vlan - Remove a vlan filter from the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ **/
+static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ f = i40evf_find_vlan(adapter, vlan);
+ if (f) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ }
+ return;
+}
+
+/**
+ * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ if (i40evf_add_vlan(adapter, vid) == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ i40evf_del_vlan(adapter, vid);
+ return 0;
+}
+
+/**
+ * i40evf_find_filter - Search filter list for specific mac filter
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
+ u8 *macaddr)
+{
+ struct i40evf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (ether_addr_equal(macaddr, f->macaddr))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_add_filter - Add a mac filter to the filter list
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
+ u8 *macaddr)
+{
+ struct i40evf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ mdelay(1);
+
+ f = i40evf_find_filter(adapter, macaddr);
+ if (NULL == f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f) {
+ dev_info(&adapter->pdev->dev,
+ "%s: no memory for new filter\n", __func__);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ return NULL;
+ }
+
+ memcpy(f->macaddr, macaddr, ETH_ALEN);
+
+ list_add(&f->list, &adapter->mac_filter_list);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ }
+
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return f;
+}
+
+/**
+ * i40evf_set_mac - NDO callback to set port mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_set_mac(struct net_device *netdev, void *p)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40evf_mac_filter *f;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return 0;
+
+ f = i40evf_add_filter(adapter, addr->sa_data);
+ if (f) {
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+
+ return (f == NULL) ? -ENOMEM : 0;
+}
+
+/**
+ * i40evf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40evf_set_rx_mode(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40evf_mac_filter *f, *ftmp;
+ struct netdev_hw_addr *uca;
+ struct netdev_hw_addr *mca;
+
+ /* add addr if not already in the filter list */
+ netdev_for_each_uc_addr(uca, netdev) {
+ i40evf_add_filter(adapter, uca->addr);
+ }
+ netdev_for_each_mc_addr(mca, netdev) {
+ i40evf_add_filter(adapter, mca->addr);
+ }
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ mdelay(1);
+ /* remove filter if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ bool found = false;
+
+ if (f->macaddr[0] & 0x01) {
+ netdev_for_each_mc_addr(mca, netdev) {
+ if (ether_addr_equal(mca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ } else {
+ netdev_for_each_uc_addr(uca, netdev) {
+ if (ether_addr_equal(uca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (found) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ }
+ }
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+}
+
+/**
+ * i40evf_napi_enable_all - enable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
+{
+ int q_idx;
+ struct i40e_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ struct napi_struct *napi;
+ q_vector = adapter->q_vector[q_idx];
+ napi = &q_vector->napi;
+ napi_enable(napi);
+ }
+}
+
+/**
+ * i40evf_napi_disable_all - disable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
+{
+ int q_idx;
+ struct i40e_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ napi_disable(&q_vector->napi);
+ }
+}
+
+/**
+ * i40evf_configure - set up transmit and receive data structures
+ * @adapter: board private structure
+ **/
+static void i40evf_configure(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ i40evf_set_rx_mode(netdev);
+
+ i40evf_configure_tx(adapter);
+ i40evf_configure_rx(adapter);
+ adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ struct i40e_ring *ring = adapter->rx_rings[i];
+ i40evf_alloc_rx_buffers(ring, ring->count);
+ ring->next_to_use = ring->count - 1;
+ writel(ring->next_to_use, ring->tail);
+ }
+}
+
+/**
+ * i40evf_up_complete - Finish the last steps of bringing up a connection
+ * @adapter: board private structure
+ **/
+static int i40evf_up_complete(struct i40evf_adapter *adapter)
+{
+ adapter->state = __I40EVF_RUNNING;
+ clear_bit(__I40E_DOWN, &adapter->vsi.state);
+
+ i40evf_napi_enable_all(adapter);
+
+ adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ return 0;
+}
+
+/**
+ * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ i40evf_clean_rx_ring(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ i40evf_clean_tx_ring(adapter->tx_rings[i]);
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @adapter: board private structure
+ **/
+void i40evf_down(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct i40evf_mac_filter *f;
+
+ /* remove all MAC filters from the VSI */
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ f->remove = true;
+ }
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ /* disable receives */
+ adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ msleep(20);
+
+ netif_tx_disable(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+
+ i40evf_irq_disable(adapter);
+
+ i40evf_napi_disable_all(adapter);
+
+ netif_carrier_off(netdev);
+
+ i40evf_clean_all_tx_rings(adapter);
+ i40evf_clean_all_rx_rings(adapter);
+}
+
+/**
+ * i40evf_acquire_msix_vectors - Setup the MSIX capability
+ * @adapter: board private structure
+ * @vectors: number of vectors to request
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int
+i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 0) Other (Admin Queue and link, mostly)
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ err = -EIO;
+ } else {
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NONQ_VECS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_queues - Free memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * Free all of the memory associated with queue pairs.
+ **/
+static void i40evf_free_queues(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ if (!adapter->vsi_res)
+ return;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ if (adapter->tx_rings[i])
+ kfree_rcu(adapter->tx_rings[i], rcu);
+ adapter->tx_rings[i] = NULL;
+ adapter->rx_rings[i] = NULL;
+ }
+}
+
+/**
+ * i40evf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring;
+ struct i40e_ring *rx_ring;
+
+ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ if (!tx_ring)
+ goto err_out;
+
+ tx_ring->queue_index = i;
+ tx_ring->netdev = adapter->netdev;
+ tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->count = I40EVF_DEFAULT_TXD;
+ adapter->tx_rings[i] = tx_ring;
+
+ rx_ring = &tx_ring[1];
+ rx_ring->queue_index = i;
+ rx_ring->netdev = adapter->netdev;
+ rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->count = I40EVF_DEFAULT_RXD;
+ adapter->rx_rings[i] = rx_ring;
+ }
+
+ return 0;
+
+err_out:
+ i40evf_free_queues(adapter);
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
+{
+ int vector, v_budget;
+ int pairs = 0;
+ int err = 0;
+
+ if (!adapter->vsi_res) {
+ err = -EIO;
+ goto out;
+ }
+ pairs = adapter->vsi_res->num_queue_pairs;
+
+ /* It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
+ v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1);
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter.
+ */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ i40evf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+ adapter->netdev->real_num_tx_queues = pairs;
+ return err;
+}
+
+/**
+ * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct i40e_q_vector *q_vector;
+
+ num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->vsi = &adapter->vsi;
+ q_vector->v_idx = q_idx;
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ i40evf_napi_poll, 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+ napi_vectors = adapter->vsi_res->num_queue_pairs;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
+/**
+ * i40evf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
+{
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ return;
+}
+
+/**
+ * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
+{
+ int err;
+
+ err = i40evf_set_interrupt_capability(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = i40evf_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queue vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = i40evf_alloc_queues(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
+ (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" :
+ "Disabled", adapter->vsi_res->num_queue_pairs);
+
+ return 0;
+err_alloc_queues:
+ i40evf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ i40evf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * i40evf_watchdog_timer - Periodic call-back timer
+ * @data: pointer to adapter disguised as unsigned long
+ **/
+static void i40evf_watchdog_timer(unsigned long data)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
+ schedule_work(&adapter->watchdog_task);
+ /* timer will be rescheduled in watchdog task */
+}
+
+/**
+ * i40evf_watchdog_task - Periodic call-back task
+ * @work: pointer to work_struct
+ **/
+static void i40evf_watchdog_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ watchdog_task);
+ struct i40e_hw *hw = &adapter->hw;
+
+ if (adapter->state < __I40EVF_DOWN)
+ goto watchdog_done;
+
+ if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ goto watchdog_done;
+
+ /* check for unannounced reset */
+ if ((adapter->state != __I40EVF_RESETTING) &&
+ (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
+ adapter->state = __I40EVF_RESETTING;
+ schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n",
+ __func__);
+ goto watchdog_done;
+ }
+
+ /* Process admin queue tasks. After init, everything gets done
+ * here so we don't race on the admin queue.
+ */
+ if (adapter->aq_pending)
+ goto watchdog_done;
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
+ i40evf_map_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
+ i40evf_add_ether_addrs(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
+ i40evf_add_vlans(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
+ i40evf_del_ether_addrs(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
+ i40evf_del_vlans(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
+ i40evf_disable_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
+ i40evf_configure_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
+ i40evf_enable_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->state == __I40EVF_RUNNING)
+ i40evf_request_stats(adapter);
+
+ i40evf_irq_enable(adapter, true);
+ i40evf_fire_sw_int(adapter, 0xFF);
+watchdog_done:
+ if (adapter->aq_required)
+ mod_timer(&adapter->watchdog_timer,
+ jiffies + msecs_to_jiffies(20));
+ else
+ mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ schedule_work(&adapter->adminq_task);
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS if used
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
+
+ /* Set of random keys generated using kernel random number generator */
+ static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
+ 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
+ 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
+ 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
+ 0x4954b126 };
+
+ /* Hash type is configured by the PF - we just supply the key */
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
+
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ hena = I40E_DEFAULT_RSS_HENA;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
+ if (j == adapter->vsi_res->num_queue_pairs)
+ j = 0;
+ /* lut = 4-byte sliding window of 4 lut entries */
+ lut = (lut << 8) | (j &
+ ((0x1 << 8) - 1));
+ /* On i = 3, we have 4 entries in lut; write to the register */
+ if ((i & 3) == 3)
+ wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ }
+ i40e_flush(hw);
+}
+
+/**
+ * i40evf_reset_task - Call-back task to handle hardware reset
+ * @work: pointer to work_struct
+ *
+ * During reset we need to shut down and reinitialize the admin queue
+ * before we can use it to communicate with the PF again. We also clear
+ * and reinit the rings because that context is lost as well.
+ **/
+static void i40evf_reset_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, reset_task);
+ struct i40e_hw *hw = &adapter->hw;
+ int i = 0, err;
+ uint32_t rstat_val;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ udelay(500);
+
+ /* wait until the reset is complete */
+ for (i = 0; i < 20; i++) {
+ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (rstat_val == I40E_VFR_COMPLETED)
+ break;
+ else
+ mdelay(100);
+ }
+ if (i == 20) {
+ /* reset never finished */
+ dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n",
+ __func__, rstat_val);
+ /* carry on anyway */
+ }
+ i40evf_down(adapter);
+ adapter->state = __I40EVF_RESETTING;
+
+ /* kill and reinit the admin queue */
+ if (i40evf_shutdown_adminq(hw))
+ dev_warn(&adapter->pdev->dev,
+ "%s: Failed to destroy the Admin Queue resources\n",
+ __func__);
+ err = i40evf_init_adminq(hw);
+ if (err)
+ dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
+ __func__, err);
+
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ i40evf_map_queues(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ mod_timer(&adapter->watchdog_timer, jiffies + 2);
+
+ if (netif_running(adapter->netdev)) {
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto reset_err;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto reset_err;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto reset_err;
+
+ i40evf_irq_enable(adapter, true);
+ }
+ return;
+reset_err:
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ i40evf_close(adapter->netdev);
+}
+
+/**
+ * i40evf_adminq_task - worker thread to clean the admin queue
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40evf_adminq_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, adminq_task);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ struct i40e_virtchnl_msg *v_msg;
+ i40e_status ret;
+ u16 pending;
+
+ event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
+ __func__);
+ return;
+ }
+ v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+ do {
+ ret = i40evf_clean_arq_element(hw, &event, &pending);
+ if (ret)
+ break; /* No event to process or error cleaning ARQ */
+
+ i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
+ v_msg->v_retval, event.msg_buf,
+ event.msg_size);
+ if (pending != 0) {
+ dev_info(&adapter->pdev->dev,
+ "%s: ARQ: Pending events %d\n",
+ __func__, pending);
+ memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
+ }
+ } while (pending);
+
+ /* re-enable Admin queue interrupt cause */
+ i40evf_misc_irq_enable(adapter);
+
+ kfree(event.msg_buf);
+}
+
+/**
+ * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ if (adapter->tx_rings[i]->desc)
+ i40evf_free_tx_resources(adapter->tx_rings[i]);
+
+}
+
+/**
+ * i40evf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
+ if (!err)
+ continue;
+ dev_err(&adapter->pdev->dev,
+ "%s: Allocation for Tx Queue %u failed\n",
+ __func__, i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * i40evf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
+ if (!err)
+ continue;
+ dev_err(&adapter->pdev->dev,
+ "%s: Allocation for Rx Queue %u failed\n",
+ __func__, i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ if (adapter->rx_rings[i]->desc)
+ i40evf_free_rx_resources(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int i40evf_open(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (adapter->state != __I40EVF_DOWN)
+ return -EBUSY;
+
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ /* clear any pending interrupts, may auto mask */
+ err = i40evf_request_traffic_irqs(adapter, netdev->name);
+ if (err)
+ goto err_req_irq;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto err_req_irq;
+
+ i40evf_irq_enable(adapter, true);
+
+ return 0;
+
+err_req_irq:
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+err_setup_rx:
+ i40evf_free_all_rx_resources(adapter);
+err_setup_tx:
+ i40evf_free_all_tx_resources(adapter);
+
+ return err;
+}
+
+/**
+ * i40evf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
+ * are freed, along with all transmit and receive resources.
+ **/
+static int i40evf_close(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* signal that we are down to the interrupt handler */
+ adapter->state = __I40EVF_DOWN;
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+/**
+ * i40evf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * i40evf_reinit_locked - Software reinit
+ * @adapter: board private structure
+ *
+ * Reinititalizes the ring structures in response to a software configuration
+ * change. Roughly the same as close followed by open, but skips releasing
+ * and reallocating the interrupts.
+ **/
+void i40evf_reinit_locked(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ WARN_ON(in_interrupt());
+
+ adapter->state = __I40EVF_RESETTING;
+
+ i40evf_down(adapter);
+
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_reinit;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_reinit;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto err_reinit;
+
+ i40evf_irq_enable(adapter, true);
+ return;
+
+err_reinit:
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ i40evf_close(netdev);
+}
+
+/**
+ * i40evf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+ return -EINVAL;
+
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+ i40evf_reinit_locked(adapter);
+ return 0;
+}
+
+static const struct net_device_ops i40evf_netdev_ops = {
+ .ndo_open = i40evf_open,
+ .ndo_stop = i40evf_close,
+ .ndo_start_xmit = i40evf_xmit_frame,
+ .ndo_get_stats = i40evf_get_stats,
+ .ndo_set_rx_mode = i40evf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = i40evf_set_mac,
+ .ndo_change_mtu = i40evf_change_mtu,
+ .ndo_tx_timeout = i40evf_tx_timeout,
+ .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+};
+
+/**
+ * i40evf_check_reset_complete - check that VF reset is complete
+ * @hw: pointer to hw struct
+ *
+ * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
+ **/
+static int i40evf_check_reset_complete(struct i40e_hw *hw)
+{
+ u32 rstat;
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ rstat = rd32(hw, I40E_VFGEN_RSTAT);
+ if (rstat == I40E_VFR_VFACTIVE)
+ return 0;
+ udelay(10);
+ }
+ return -EBUSY;
+}
+
+/**
+ * i40evf_init_task - worker thread to perform delayed initialization
+ * @work: pointer to work_struct containing our data
+ *
+ * This task completes the work that was begun in probe. Due to the nature
+ * of VF-PF communications, we may need to wait tens of milliseconds to get
+ * reponses back from the PF. Rather than busy-wait in probe and bog down the
+ * whole system, we'll do it in a task so we can sleep.
+ * This task only runs during driver init. Once we've established
+ * communications with the PF driver and set up our netdev, the watchdog
+ * takes over.
+ **/
+static void i40evf_init_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ init_task.work);
+ struct net_device *netdev = adapter->netdev;
+ struct i40evf_mac_filter *f;
+ struct i40e_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err, bufsz;
+
+ switch (adapter->state) {
+ case __I40EVF_STARTUP:
+ /* driver loaded, probe complete */
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n",
+ __func__, err);
+ goto err;
+ }
+ err = i40evf_check_reset_complete(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: device is still in reset (%d).\n",
+ __func__, err);
+ goto err;
+ }
+ hw->aq.num_arq_entries = I40EVF_AQ_LEN;
+ hw->aq.num_asq_entries = I40EVF_AQ_LEN;
+ hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+ hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+
+ err = i40evf_init_adminq(hw);
+ if (err) {
+ dev_info(&pdev->dev, "%s: init_adminq failed: %d\n",
+ __func__, err);
+ goto err;
+ }
+ err = i40evf_send_api_ver(adapter);
+ if (err) {
+ dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n",
+ __func__, err);
+ i40evf_shutdown_adminq(hw);
+ goto err;
+ }
+ adapter->state = __I40EVF_INIT_VERSION_CHECK;
+ goto restart;
+ break;
+ case __I40EVF_INIT_VERSION_CHECK:
+ if (!i40evf_asq_done(hw))
+ goto err;
+
+ /* aq msg sent, awaiting reply */
+ err = i40evf_verify_api_ver(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to verify API version, error %d\n",
+ err);
+ goto err;
+ }
+ err = i40evf_send_vf_config_msg(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable send config request, error %d\n",
+ err);
+ goto err;
+ }
+ adapter->state = __I40EVF_INIT_GET_RESOURCES;
+ goto restart;
+ break;
+ case __I40EVF_INIT_GET_RESOURCES:
+ /* aq msg sent, awaiting reply */
+ if (!adapter->vf_res) {
+ bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI *
+ sizeof(struct i40e_virtchnl_vsi_resource));
+ adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
+ if (!adapter->vf_res) {
+ dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ goto err;
+ }
+ }
+ err = i40evf_get_vf_config(adapter);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto restart;
+ if (err) {
+ dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n",
+ __func__, err);
+ goto err_alloc;
+ }
+ adapter->state = __I40EVF_INIT_SW;
+ break;
+ default:
+ goto err_alloc;
+ }
+ /* got VF config message back from PF, now we can parse it */
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ }
+ if (!adapter->vsi_res) {
+ dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__);
+ goto err_alloc;
+ }
+
+ adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
+
+ adapter->txd_count = I40EVF_DEFAULT_TXD;
+ adapter->rxd_count = I40EVF_DEFAULT_RXD;
+
+ netdev->netdev_ops = &i40evf_netdev_ops;
+ i40evf_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+
+ netdev->features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO;
+
+ if (adapter->vf_res->vf_offload_flags
+ & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+ netdev->vlan_features = netdev->features;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
+ /* The HW MAC address was set and/or determined in sw_init */
+ if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+ dev_info(&pdev->dev,
+ "Invalid MAC address %pMAC, using random\n",
+ adapter->hw.mac.addr);
+ random_ether_addr(adapter->hw.mac.addr);
+ }
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ INIT_LIST_HEAD(&adapter->mac_filter_list);
+ INIT_LIST_HEAD(&adapter->vlan_filter_list);
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f)
+ goto err_sw_init;
+
+ memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+ list_add(&f->list, &adapter->mac_filter_list);
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &i40evf_watchdog_timer;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+ err = i40evf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+ i40evf_map_rings_to_vectors(adapter);
+ i40evf_configure_rss(adapter);
+ err = i40evf_request_misc_irq(adapter);
+ if (err)
+ goto err_sw_init;
+
+ netif_carrier_off(netdev);
+
+ strcpy(netdev->name, "eth%d");
+
+ adapter->vsi.id = adapter->vsi_res->vsi_id;
+ adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
+ adapter->vsi.back = adapter;
+ adapter->vsi.base_vector = 1;
+ adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+ adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC;
+ adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
+ adapter->vsi.netdev = adapter->netdev;
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ adapter->netdev_registered = true;
+
+ netif_tx_stop_all_queues(netdev);
+
+ dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr);
+ if (netdev->features & NETIF_F_GRO)
+ dev_info(&pdev->dev, "GRO is enabled\n");
+
+ dev_info(&pdev->dev, "%s\n", i40evf_driver_string);
+ adapter->state = __I40EVF_DOWN;
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ i40evf_misc_irq_enable(adapter);
+ return;
+restart:
+ schedule_delayed_work(&adapter->init_task,
+ msecs_to_jiffies(50));
+ return;
+
+err_register:
+ i40evf_free_misc_irq(adapter);
+err_sw_init:
+ i40evf_reset_interrupt_capability(adapter);
+ adapter->state = __I40EVF_FAILED;
+err_alloc:
+ kfree(adapter->vf_res);
+ adapter->vf_res = NULL;
+err:
+ /* Things went into the weeds, so try again later */
+ if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
+ dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
+ if (hw->aq.asq.count)
+ i40evf_shutdown_adminq(hw); /* ignore error */
+ adapter->state = __I40EVF_FAILED;
+ return; /* do not reschedule */
+ }
+ schedule_delayed_work(&adapter->init_task, HZ * 3);
+ return;
+}
+
+/**
+ * i40evf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void i40evf_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ i40evf_close(netdev);
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+
+#endif
+ pci_disable_device(pdev);
+}
+
+/**
+ * i40evf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40evf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * i40evf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct i40evf_adapter *adapter = NULL;
+ struct i40e_hw *hw = NULL;
+ int err, pci_using_dac;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ pci_using_dac = true;
+ /* coherent mask for the same size will always succeed if
+ * dma_set_mask does
+ */
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ pci_using_dac = false;
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
+ __func__, err);
+ err = -EIO;
+ goto err_dma;
+ }
+
+ err = pci_request_regions(pdev, i40evf_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
+ MAX_TX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ adapter->state = __I40EVF_STARTUP;
+
+ /* Call save state here because it relies on the adapter struct. */
+ pci_save_state(pdev);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+
+ INIT_WORK(&adapter->reset_task, i40evf_reset_task);
+ INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
+ INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
+ schedule_delayed_work(&adapter->init_task, 10);
+
+ return 0;
+
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40evf_suspend - Power management suspend routine
+ * @pdev: PCI device information struct
+ * @state: unused
+ *
+ * Called when the system (VM) is entering sleep/suspend.
+ **/
+static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int retval = 0;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ i40evf_down(adapter);
+ rtnl_unlock();
+ }
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+/**
+ * i40evf_resume - Power managment resume routine
+ * @pdev: PCI device information struct
+ *
+ * Called when the system (VM) is resumed from sleep/suspend.
+ **/
+static int i40evf_resume(struct pci_dev *pdev)
+{
+ struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ rtnl_lock();
+ err = i40evf_set_interrupt_capability(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
+ return err;
+ }
+ err = i40evf_request_misc_irq(adapter);
+ rtnl_unlock();
+ if (err) {
+ dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
+ return err;
+ }
+
+ schedule_work(&adapter->reset_task);
+
+ netif_device_attach(netdev);
+
+ return err;
+}
+
+#endif /* CONFIG_PM */
+/**
+ * i40evf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * i40evf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40evf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+
+ cancel_delayed_work_sync(&adapter->init_task);
+
+ if (adapter->netdev_registered) {
+ unregister_netdev(netdev);
+ adapter->netdev_registered = false;
+ }
+ adapter->state = __I40EVF_REMOVE;
+
+ if (adapter->num_msix_vectors) {
+ i40evf_misc_irq_disable(adapter);
+ del_timer_sync(&adapter->watchdog_timer);
+
+ flush_scheduled_work();
+
+ i40evf_free_misc_irq(adapter);
+
+ i40evf_reset_interrupt_capability(adapter);
+ }
+
+ if (hw->aq.asq.count)
+ i40evf_shutdown_adminq(hw);
+
+ iounmap(hw->hw_addr);
+ pci_release_regions(pdev);
+
+ i40evf_free_queues(adapter);
+ kfree(adapter->vf_res);
+
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver i40evf_driver = {
+ .name = i40evf_driver_name,
+ .id_table = i40evf_pci_tbl,
+ .probe = i40evf_probe,
+ .remove = i40evf_remove,
+#ifdef CONFIG_PM
+ .suspend = i40evf_suspend,
+ .resume = i40evf_resume,
+#endif
+ .shutdown = i40evf_shutdown,
+};
+
+/**
+ * i40e_init_module - Driver Registration Routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40evf_init_module(void)
+{
+ int ret;
+ pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
+ i40evf_driver_version);
+
+ pr_info("%s\n", i40evf_copyright);
+
+ ret = pci_register_driver(&i40evf_driver);
+ return ret;
+}
+
+module_init(i40evf_init_module);
+
+/**
+ * i40e_exit_module - Driver Exit Cleanup Routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40evf_exit_module(void)
+{
+ pci_unregister_driver(&i40evf_driver);
+}
+
+module_exit(i40evf_exit_module);
+
+/* i40evf_main.c */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
new file mode 100644
index 000000000000..e6978d79e62b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -0,0 +1,772 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+
+/* busy wait delay in msec */
+#define I40EVF_BUSY_WAIT_DELAY 10
+#define I40EVF_BUSY_WAIT_COUNT 50
+
+/**
+ * i40evf_send_pf_msg
+ * @adapter: adapter structure
+ * @op: virtual channel opcode
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Send message to PF and print status if failure.
+ **/
+static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ i40e_status err;
+
+ err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
+ op, err, hw->aq.asq_last_status);
+ return err;
+}
+
+/**
+ * i40evf_send_api_ver
+ * @adapter: adapter structure
+ *
+ * Send API version admin queue message to the PF. The reply is not checked
+ * in this function. Returns 0 if the message was successfully
+ * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_api_ver(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_version_info vvi;
+
+ vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+
+ return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
+ sizeof(vvi));
+}
+
+/**
+ * i40evf_verify_api_ver
+ * @adapter: adapter structure
+ *
+ * Compare API versions with the PF. Must be called after admin queue is
+ * initialized. Returns 0 if API versions match, -EIO if
+ * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
+ **/
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_version_info *pf_vvi;
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ i40e_status err;
+
+ event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto out_alloc;
+
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ if (err) {
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_VERSION) {
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
+ if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
+ (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+ err = -EIO;
+
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
+/**
+ * i40evf_send_vf_config_msg
+ * @adapter: adapter structure
+ *
+ * Send VF configuration request admin queue message to the PF. The reply
+ * is not checked in this function. Returns 0 if the message was
+ * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
+{
+ return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
+}
+
+/**
+ * i40evf_get_vf_config
+ * @hw: pointer to the hardware structure
+ * @len: length of buffer
+ *
+ * Get VF configuration from PF and populate hw structure. Must be called after
+ * admin queue is initialized. Busy waits until response is received from PF,
+ * with maximum timeout. Response from PF is returned in the buffer for further
+ * processing by the caller.
+ **/
+int i40evf_get_vf_config(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ u16 len;
+ i40e_status err;
+
+ len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+ event.msg_size = len;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto out_alloc;
+
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error returned from PF, %d, %d\n", __func__,
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low));
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid response from PF, %d, %d\n", __func__,
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low));
+ err = -EIO;
+ goto out_alloc;
+ }
+ memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
+
+ i40e_vf_parse_hw_config(hw, adapter->vf_res);
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
+/**
+ * i40evf_configure_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF set up our (previously allocated) queues.
+ **/
+void i40evf_configure_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vsi_queue_config_info *vqci;
+ struct i40e_virtchnl_queue_pair_info *vqpi;
+ int pairs = adapter->vsi_res->num_queue_pairs;
+ int i, len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
+ (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+ vqci = kzalloc(len, GFP_ATOMIC);
+ if (!vqci) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vqci->vsi_id = adapter->vsi_res->vsi_id;
+ vqci->num_queue_pairs = pairs;
+ vqpi = vqci->qpair;
+ /* Size check is not needed here - HW max is 16 queue pairs, and we
+ * can fit info for 31 of them into the AQ buffer before it overflows.
+ */
+ for (i = 0; i < pairs; i++) {
+ vqpi->txq.vsi_id = vqci->vsi_id;
+ vqpi->txq.queue_id = i;
+ vqpi->txq.ring_len = adapter->tx_rings[i]->count;
+ vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
+
+ vqpi->rxq.vsi_id = vqci->vsi_id;
+ vqpi->rxq.queue_id = i;
+ vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
+ vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
+ vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
+ vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
+ vqpi++;
+ }
+
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ (u8 *)vqci, len);
+ kfree(vqci);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+}
+
+/**
+ * i40evf_enable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF enable all of our queues.
+ **/
+void i40evf_enable_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
+}
+
+/**
+ * i40evf_disable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF disable all of our queues.
+ **/
+void i40evf_disable_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
+}
+
+/**
+ * i40evf_map_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF map queues to interrupt vectors. Misc causes, including
+ * admin queue, are always mapped to vector 0.
+ **/
+void i40evf_map_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_irq_map_info *vimi;
+ int v_idx, q_vectors, len;
+ struct i40e_q_vector *q_vector;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ len = sizeof(struct i40e_virtchnl_irq_map_info) +
+ (adapter->num_msix_vectors *
+ sizeof(struct i40e_virtchnl_vector_map));
+ vimi = kzalloc(len, GFP_ATOMIC);
+ if (!vimi) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+
+ vimi->num_vectors = adapter->num_msix_vectors;
+ /* Queue vectors first */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ q_vector = adapter->q_vector[v_idx];
+ vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+ vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
+ vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
+ vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
+ }
+ /* Misc vector last - this is only for AdminQ messages */
+ vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+ vimi->vecmap[v_idx].vector_id = 0;
+ vimi->vecmap[v_idx].txq_map = 0;
+ vimi->vecmap[v_idx].rxq_map = 0;
+
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ (u8 *)vimi, len);
+ kfree(vimi);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
+}
+
+/**
+ * i40evf_add_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to add (contiguous)
+ * @count: number of filters
+ *
+ * Request that the PF add one or more addresses to our filters.
+ **/
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_ether_addr_list *veal;
+ int len, i = 0, count = 0;
+ struct i40evf_mac_filter *f;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->add)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+
+ veal = kzalloc(len, GFP_ATOMIC);
+ if (!veal) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ veal->vsi_id = adapter->vsi_res->vsi_id;
+ veal->num_elements = count;
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->add) {
+ memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ i++;
+ f->add = false;
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+}
+
+/**
+ * i40evf_del_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to remove (contiguous)
+ * @count: number of filtes
+ *
+ * Request that the PF remove one or more addresses from our filters.
+ **/
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_ether_addr_list *veal;
+ struct i40evf_mac_filter *f, *ftmp;
+ int len, i = 0, count = 0;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->remove)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ veal = kzalloc(len, GFP_ATOMIC);
+ if (!veal) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ veal->vsi_id = adapter->vsi_res->vsi_id;
+ veal->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (f->remove) {
+ memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+}
+
+/**
+ * i40evf_add_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to add
+ * @count: number of VLANs
+ *
+ * Request that the PF add one or more VLAN filters to our VSI.
+ **/
+void i40evf_add_vlans(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ int len, i = 0, count = 0;
+ struct i40evf_vlan_filter *f;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add) {
+ vvfl->vlan_id[i] = f->vlan;
+ i++;
+ f->add = false;
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+}
+
+/**
+ * i40evf_del_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to remove
+ * @count: number of VLANs
+ *
+ * Request that the PF remove one or more VLAN filters from our VSI.
+ **/
+void i40evf_del_vlans(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct i40evf_vlan_filter *f, *ftmp;
+ int len, i = 0, count = 0;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->remove)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->remove) {
+ vvfl->vlan_id[i] = f->vlan;
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+}
+
+/**
+ * i40evf_set_promiscuous
+ * @adapter: adapter structure
+ * @flags: bitmask to control unicast/multicast promiscuous.
+ *
+ * Request that the PF enable promiscuous mode for our VSI.
+ **/
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
+{
+ struct i40e_virtchnl_promisc_info vpi;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ vpi.vsi_id = adapter->vsi_res->vsi_id;
+ vpi.flags = flags;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ (u8 *)&vpi, sizeof(vpi));
+}
+
+/**
+ * i40evf_request_stats
+ * @adapter: adapter structure
+ *
+ * Request VSI statistics from PF.
+ **/
+void i40evf_request_stats(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* no error message, this isn't crucial */
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ /* queue maps are ignored for this message - only the vsi is used */
+ if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
+ (u8 *)&vqs, sizeof(vqs)))
+ /* if the request failed, don't lock out others */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
+
+/**
+ * i40evf_virtchnl_completion
+ * @adapter: adapter structure
+ * @v_opcode: opcode sent by PF
+ * @v_retval: retval sent by PF
+ * @msg: message sent by PF
+ * @msglen: message length
+ *
+ * Asynchronous completion function for admin queue messages. Rather than busy
+ * wait, we fire off our requests and assume that no errors will be returned.
+ * This function handles the reply messages.
+ **/
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
+ struct i40e_virtchnl_pf_event *vpe =
+ (struct i40e_virtchnl_pf_event *)msg;
+ switch (vpe->event) {
+ case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ adapter->link_up =
+ vpe->event_data.link_event.link_status;
+ if (adapter->link_up && !netif_carrier_ok(netdev)) {
+ dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ } else if (!adapter->link_up) {
+ dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ break;
+ case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ adapter->state = __I40EVF_RESETTING;
+ schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev,
+ "%s: hardware reset pending\n", __func__);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Unknown event %d from pf\n",
+ __func__, vpe->event);
+ break;
+
+ }
+ return;
+ }
+ if (v_opcode != adapter->current_op) {
+ dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n",
+ __func__, adapter->current_op, v_opcode);
+ /* We're probably completely screwed at this point, but clear
+ * the current op and try to carry on....
+ */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ return;
+ }
+ if (v_retval) {
+ dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n",
+ __func__, v_retval, v_opcode);
+ }
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_GET_STATS: {
+ struct i40e_eth_stats *stats =
+ (struct i40e_eth_stats *)msg;
+ adapter->net_stats.rx_packets = stats->rx_unicast +
+ stats->rx_multicast +
+ stats->rx_broadcast;
+ adapter->net_stats.tx_packets = stats->tx_unicast +
+ stats->tx_multicast +
+ stats->tx_broadcast;
+ adapter->net_stats.rx_bytes = stats->rx_bytes;
+ adapter->net_stats.tx_bytes = stats->tx_bytes;
+ adapter->net_stats.rx_errors = stats->rx_errors;
+ adapter->net_stats.tx_errors = stats->tx_errors;
+ adapter->net_stats.rx_dropped = stats->rx_missed;
+ adapter->net_stats.tx_dropped = stats->tx_discards;
+ adapter->current_stats = *stats;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES);
+ /* enable transmits */
+ i40evf_irq_enable(adapter, true);
+ netif_tx_start_all_queues(adapter->netdev);
+ netif_carrier_on(adapter->netdev);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n",
+ __func__, v_opcode);
+ break;
+ } /* switch v_opcode */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 47c2d10df826..06df6928f44c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -113,6 +113,59 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
}
/**
+ * igb_check_for_link_media_swap - Check which M88E1112 interface linked
+ * @hw: pointer to the HW structure
+ *
+ * Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ u8 port = 0;
+
+ /* Check the copper medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_COPPER;
+
+ /* Check the other medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_OTHER;
+
+ /* Determine if a swap needs to happen. */
+ if (port && (hw->dev_spec._82575.media_port != port)) {
+ hw->dev_spec._82575.media_port = port;
+ hw->dev_spec._82575.media_changed = true;
+ } else {
+ ret_val = igb_check_for_link_82575(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
* igb_init_phy_params_82575 - Init PHY func ptrs.
* @hw: pointer to the HW structure
**/
@@ -189,6 +242,29 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
else
phy->ops.get_cable_length = igb_get_cable_length_m88;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ /* Check if this PHY is confgured for media swap. */
+ if (phy->id == M88E1112_E_PHY_ID) {
+ u16 data;
+
+ ret_val = phy->ops.write_reg(hw,
+ E1000_M88E1112_PAGE_ADDR,
+ 2);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw,
+ E1000_M88E1112_MAC_CTRL_1,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+ E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+ igb_check_for_link_media_swap;
+ }
break;
case IGP03E1000_E_PHY_ID:
phy->type = e1000_phy_igp_3;
@@ -365,6 +441,19 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
? igb_setup_copper_link_82575
: igb_setup_serdes_link_82575;
+ if (mac->type == e1000_82580) {
+ switch (hw->device_id) {
+ /* feature not supported on these id's */
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ break;
+ default:
+ hw->dev_spec._82575.mas_capable = true;
+ break;
+ }
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 978eca31ceda..0571b973be80 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -205,6 +205,11 @@
*/
#define E1000_CONNSW_ENRGSRC 0x4
+#define E1000_CONNSW_PHYSD 0x400
+#define E1000_CONNSW_PHY_PDN 0x800
+#define E1000_CONNSW_SERDESD 0x200
+#define E1000_CONNSW_AUTOSENSE_CONF 0x2
+#define E1000_CONNSW_AUTOSENSE_EN 0x1
#define E1000_PCS_CFG_PCS_EN 8
#define E1000_PCS_LCTL_FLV_LINK_UP 1
#define E1000_PCS_LCTL_FSV_100 2
@@ -532,6 +537,17 @@
#define E1000_MDICNFG_PHY_MASK 0x03E00000
#define E1000_MDICNFG_PHY_SHIFT 21
+#define E1000_MEDIA_PORT_COPPER 1
+#define E1000_MEDIA_PORT_OTHER 2
+#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3
+#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1 0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
+#define E1000_M88E1112_PAGE_ADDR 0x16
+#define E1000_M88E1112_STATUS 0x01
+
/* PCI Express Control */
#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 2e166b22d52b..ab99e2b582a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -533,6 +533,9 @@ struct e1000_dev_spec_82575 {
bool clear_semaphore_once;
struct e1000_sfp_flags eth_flags;
bool module_plugged;
+ u8 media_port;
+ bool media_changed;
+ bool mas_capable;
};
struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 5e9ed89403aa..ccf472f073dd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -41,6 +41,7 @@
#include <linux/if_vlan.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/pci.h>
struct igb_adapter;
@@ -67,6 +68,7 @@ struct igb_adapter;
#define IGB_MIN_ITR_USECS 10
#define NON_Q_VECTORS 1
#define MAX_Q_VECTORS 8
+#define MAX_MSIX_ENTRIES 10
/* Transmit and receive queues */
#define IGB_MAX_RX_QUEUES 8
@@ -127,9 +129,9 @@ struct vf_data_storage {
#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
#define IGB_TX_HTHRESH 1
#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 4)
+ (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 16)
+ (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -337,8 +339,10 @@ struct hwmon_attr {
};
struct hwmon_buff {
- struct device *device;
- struct hwmon_attr *hwmon_list;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
+ struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
unsigned int n_hwmon;
};
#endif
@@ -355,7 +359,7 @@ struct igb_adapter {
unsigned int flags;
unsigned int num_q_vectors;
- struct msix_entry *msix_entries;
+ struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
/* Interrupt Throttle Rate */
u32 rx_itr_setting;
@@ -440,7 +444,7 @@ struct igb_adapter {
char fw_version[32];
#ifdef CONFIG_IGB_HWMON
- struct hwmon_buff igb_hwmon_buff;
+ struct hwmon_buff *igb_hwmon_buff;
bool ets;
#endif
struct i2c_algo_bit_data i2c_algo;
@@ -450,6 +454,8 @@ struct igb_adapter {
u8 rss_indir_tbl[IGB_RETA_SIZE];
unsigned long link_check_timeout;
+ int copper_tries;
+ struct e1000_info ei;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -462,6 +468,16 @@ struct igb_adapter {
#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
+#define IGB_FLAG_MEDIA_RESET (1 << 10)
+#define IGB_FLAG_MAS_CAPABLE (1 << 11)
+#define IGB_FLAG_MAS_ENABLE (1 << 12)
+#define IGB_FLAG_HAS_MSIX (1 << 13)
+
+/* Media Auto Sense */
+#define IGB_MAS_ENABLE_0 0X0001
+#define IGB_MAS_ENABLE_1 0X0002
+#define IGB_MAS_ENABLE_2 0X0004
+#define IGB_MAS_ENABLE_3 0X0008
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c3143da497c8..1df02378de69 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1386,7 +1386,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
/* Hook up test interrupt handler just for this test */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
if (request_irq(adapter->msix_entries[0].vector,
igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
@@ -1519,7 +1519,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
msleep(10);
/* Unhook test interrupt handler */
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
free_irq(adapter->msix_entries[0].vector, adapter);
else
free_irq(irq, adapter);
@@ -1983,6 +1983,10 @@ static void igb_diag_test(struct net_device *netdev,
bool if_running = netif_running(netdev);
set_bit(__IGB_TESTING, &adapter->state);
+
+ /* can't do offline tests on media switching devices */
+ if (adapter->hw.dev_spec._82575.mas_capable)
+ eth_test->flags &= ~ETH_TEST_FL_OFFLINE;
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
@@ -2929,7 +2933,7 @@ static void igb_get_channels(struct net_device *netdev,
ch->max_combined = igb_max_channels(adapter);
/* Report info for other vector */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
ch->max_other = NON_Q_VECTORS;
ch->other_count = NON_Q_VECTORS;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 58f1ce967aeb..e0af5bc61613 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -117,29 +117,29 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
unsigned int n_attr;
struct hwmon_attr *igb_attr;
- n_attr = adapter->igb_hwmon_buff.n_hwmon;
- igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+ n_attr = adapter->igb_hwmon_buff->n_hwmon;
+ igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr];
switch (type) {
case IGB_HWMON_TYPE_LOC:
igb_attr->dev_attr.show = igb_hwmon_show_location;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_label", offset);
+ "temp%u_label", offset + 1);
break;
case IGB_HWMON_TYPE_TEMP:
igb_attr->dev_attr.show = igb_hwmon_show_temp;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_input", offset);
+ "temp%u_input", offset + 1);
break;
case IGB_HWMON_TYPE_CAUTION:
igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_max", offset);
+ "temp%u_max", offset + 1);
break;
case IGB_HWMON_TYPE_MAX:
igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_crit", offset);
+ "temp%u_crit", offset + 1);
break;
default:
rc = -EPERM;
@@ -154,30 +154,16 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
igb_attr->dev_attr.attr.mode = S_IRUGO;
igb_attr->dev_attr.attr.name = igb_attr->name;
sysfs_attr_init(&igb_attr->dev_attr.attr);
- rc = device_create_file(&adapter->pdev->dev,
- &igb_attr->dev_attr);
- if (rc == 0)
- ++adapter->igb_hwmon_buff.n_hwmon;
- return rc;
+ adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr;
+
+ ++adapter->igb_hwmon_buff->n_hwmon;
+
+ return 0;
}
static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
{
- int i;
-
- if (adapter == NULL)
- return;
-
- for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
- device_remove_file(&adapter->pdev->dev,
- &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
- }
-
- kfree(adapter->igb_hwmon_buff.hwmon_list);
-
- if (adapter->igb_hwmon_buff.device)
- hwmon_device_unregister(adapter->igb_hwmon_buff.device);
}
/* called from igb_main.c */
@@ -189,11 +175,11 @@ void igb_sysfs_exit(struct igb_adapter *adapter)
/* called from igb_main.c */
int igb_sysfs_init(struct igb_adapter *adapter)
{
- struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+ struct hwmon_buff *igb_hwmon;
+ struct i2c_client *client;
+ struct device *hwmon_dev;
unsigned int i;
- int n_attrs;
int rc = 0;
- struct i2c_client *client = NULL;
/* If this method isn't defined we don't support thermals */
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -201,34 +187,16 @@ int igb_sysfs_init(struct igb_adapter *adapter)
/* Don't create thermal hwmon interface if no sensors present */
rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
- if (rc)
- goto exit;
-
- /* init i2c_client */
- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
- if (client == NULL) {
- dev_info(&adapter->pdev->dev,
- "Failed to create new i2c device..\n");
+ if (rc)
goto exit;
- }
- adapter->i2c_client = client;
- /* Allocation space for max attributes
- * max num sensors * values (loc, temp, max, caution)
- */
- n_attrs = E1000_MAX_SENSORS * 4;
- igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
- if (!igb_hwmon->hwmon_list) {
+ igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon),
+ GFP_KERNEL);
+ if (!igb_hwmon) {
rc = -ENOMEM;
- goto err;
- }
-
- igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
- if (IS_ERR(igb_hwmon->device)) {
- rc = PTR_ERR(igb_hwmon->device);
- goto err;
+ goto exit;
}
+ adapter->igb_hwmon_buff = igb_hwmon;
for (i = 0; i < E1000_MAX_SENSORS; i++) {
@@ -240,11 +208,39 @@ int igb_sysfs_init(struct igb_adapter *adapter)
/* Bail if any hwmon attr struct fails to initialize */
rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
if (rc)
- goto err;
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+ if (rc)
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+ if (rc)
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+ if (rc)
+ goto exit;
+ }
+
+ /* init i2c_client */
+ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+ if (client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device.\n");
+ rc = -ENODEV;
+ goto exit;
+ }
+ adapter->i2c_client = client;
+
+ igb_hwmon->groups[0] = &igb_hwmon->group;
+ igb_hwmon->group.attrs = igb_hwmon->attrs;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+ client->name,
+ igb_hwmon,
+ igb_hwmon->groups);
+ if (IS_ERR(hwmon_dev)) {
+ rc = PTR_ERR(hwmon_dev);
+ goto err;
}
goto exit;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 025e5f4b7481..46d31a49f5ea 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -803,7 +803,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
if (tx_queue > IGB_N0_QUEUE)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
- if (!adapter->msix_entries && msix_vector == 0)
+ if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
msixbm |= E1000_EIMS_OTHER;
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm;
@@ -983,43 +983,58 @@ err_out:
return err;
}
-static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
-{
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
- pci_disable_msi(adapter->pdev);
- }
-}
-
/**
* igb_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize
* @v_idx: Index of vector to be freed
*
- * This function frees the memory allocated to the q_vector. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
+ * This function frees the memory allocated to the q_vector.
**/
static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ adapter->q_vector[v_idx] = NULL;
+
+ /* igb_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * igb_reset_q_vector - Reset config for interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be reset
+ *
+ * If NAPI is enabled it will delete any references to the
+ * NAPI struct. This is preparation for igb_free_q_vector.
+ **/
+static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
if (q_vector->tx.ring)
adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
if (q_vector->rx.ring)
adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
- adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
- /* igb_get_stats64() might access the rings on this vector,
- * we must wait a grace period before freeing it.
- */
- kfree_rcu(q_vector, rcu);
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
+ pci_disable_msix(adapter->pdev);
+ else if (adapter->flags & IGB_FLAG_HAS_MSI)
+ pci_disable_msi(adapter->pdev);
+
+ while (v_idx--)
+ igb_reset_q_vector(adapter, v_idx);
}
/**
@@ -1038,8 +1053,10 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
- while (v_idx--)
+ while (v_idx--) {
+ igb_reset_q_vector(adapter, v_idx);
igb_free_q_vector(adapter, v_idx);
+ }
}
/**
@@ -1070,6 +1087,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
if (!msix)
goto msi_only;
+ adapter->flags |= IGB_FLAG_HAS_MSIX;
/* Number of supported queues. */
adapter->num_rx_queues = adapter->rss_queues;
@@ -1090,12 +1108,6 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
/* add 1 vector for link status interrupts */
numvecs++;
- adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
- GFP_KERNEL);
-
- if (!adapter->msix_entries)
- goto msi_only;
-
for (i = 0; i < numvecs; i++)
adapter->msix_entries[i].entry = i;
@@ -1172,7 +1184,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
(sizeof(struct igb_ring) * ring_count);
/* allocate q_vector and rings */
- q_vector = kzalloc(size, GFP_KERNEL);
+ q_vector = adapter->q_vector[v_idx];
+ if (!q_vector)
+ q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
@@ -1370,7 +1384,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int err = 0;
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
err = igb_request_msix(adapter);
if (!err)
goto request_done;
@@ -1414,7 +1428,7 @@ request_done:
static void igb_free_irq(struct igb_adapter *adapter)
{
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int vector = 0, i;
free_irq(adapter->msix_entries[vector++].vector, adapter);
@@ -1439,7 +1453,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
* mapped into these registers and so clearing the bits can cause
* issues on the VF drivers so we only need to clear what we set
*/
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
wr32(E1000_EIMC, adapter->eims_enable_mask);
@@ -1450,7 +1464,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
wr32(E1000_IAM, 0);
wr32(E1000_IMC, ~0);
wrfl();
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int i;
for (i = 0; i < adapter->num_q_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
@@ -1467,7 +1481,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC);
wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
@@ -1607,6 +1621,73 @@ static void igb_power_down_link(struct igb_adapter *adapter)
}
/**
+ * Detect and switch function for Media Auto Sense
+ * @adapter: address of the board private structure
+ **/
+static void igb_check_swap_media(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext, connsw;
+ bool swap_now = false;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ connsw = rd32(E1000_CONNSW);
+
+ /* need to live swap if current media is copper and we have fiber/serdes
+ * to go to.
+ */
+
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+ swap_now = true;
+ } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ /* copper signal takes time to appear */
+ if (adapter->copper_tries < 4) {
+ adapter->copper_tries++;
+ connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+ wr32(E1000_CONNSW, connsw);
+ return;
+ } else {
+ adapter->copper_tries = 0;
+ if ((connsw & E1000_CONNSW_PHYSD) &&
+ (!(connsw & E1000_CONNSW_PHY_PDN))) {
+ swap_now = true;
+ connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+ wr32(E1000_CONNSW, connsw);
+ }
+ }
+ }
+
+ if (!swap_now)
+ return;
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ netdev_info(adapter->netdev,
+ "MAS: changing media to fiber/serdes\n");
+ ctrl_ext |=
+ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ adapter->copper_tries = 0;
+ break;
+ case e1000_media_type_internal_serdes:
+ case e1000_media_type_fiber:
+ netdev_info(adapter->netdev,
+ "MAS: changing media to copper\n");
+ ctrl_ext &=
+ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ break;
+ default:
+ /* shouldn't get here during regular operation */
+ netdev_err(adapter->netdev,
+ "AMS: Invalid media type found, returning\n");
+ break;
+ }
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
**/
@@ -1623,7 +1704,7 @@ int igb_up(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++)
napi_enable(&(adapter->q_vector[i]->napi));
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter);
else
igb_assign_vector(adapter->q_vector[0], 0);
@@ -1719,6 +1800,37 @@ void igb_reinit_locked(struct igb_adapter *adapter)
clear_bit(__IGB_RESETTING, &adapter->state);
}
+/** igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+static s32 igb_enable_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 connsw;
+ s32 ret_val = 0;
+
+ connsw = rd32(E1000_CONNSW);
+ if (!(hw->phy.media_type == e1000_media_type_copper))
+ return ret_val;
+
+ /* configure for SerDes media detect */
+ if (!(connsw & E1000_CONNSW_SERDESD)) {
+ connsw |= E1000_CONNSW_ENRGSRC;
+ connsw |= E1000_CONNSW_AUTOSENSE_EN;
+ wr32(E1000_CONNSW, connsw);
+ wrfl();
+ } else if (connsw & E1000_CONNSW_SERDESD) {
+ /* already SerDes, no need to enable anything */
+ return ret_val;
+ } else {
+ netdev_info(adapter->netdev,
+ "MAS: Unable to configure feature, disabling..\n");
+ adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
+ }
+ return ret_val;
+}
+
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -1830,6 +1942,16 @@ void igb_reset(struct igb_adapter *adapter)
hw->mac.ops.reset_hw(hw);
wr32(E1000_WUC, 0);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ /* need to resetup here after media swap */
+ adapter->ei.get_invariants(hw);
+ adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+ }
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (igb_enable_mas(adapter))
+ dev_err(&pdev->dev,
+ "Error enabling Media Auto Sense\n");
+ }
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
@@ -1976,6 +2098,58 @@ void igb_set_fw_version(struct igb_adapter *adapter)
}
/**
+ * igb_init_mas - init Media Autosense feature if enabled in the NVM
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_init_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_data;
+
+ hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
+ switch (hw->bus.func) {
+ case E1000_FUNC_0:
+ if (eeprom_data & IGB_MAS_ENABLE_0) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_1:
+ if (eeprom_data & IGB_MAS_ENABLE_1) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_2:
+ if (eeprom_data & IGB_MAS_ENABLE_2) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_3:
+ if (eeprom_data & IGB_MAS_ENABLE_3) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ default:
+ /* Shouldn't get here */
+ netdev_err(adapter->netdev,
+ "MAS: Invalid port configuration, returning\n");
+ break;
+ }
+}
+
+/**
* igb_init_i2c - Init I2C interface
* @adapter: pointer to adapter structure
**/
@@ -2022,7 +2196,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
- unsigned long mmio_start, mmio_len;
int err, pci_using_dac;
u8 part_str[E1000_PBANUM_LENGTH];
@@ -2079,11 +2252,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
err = -EIO;
- hw->hw_addr = ioremap(mmio_start, mmio_len);
+ hw->hw_addr = pci_iomap(pdev, 0, 0);
if (!hw->hw_addr)
goto err_ioremap;
@@ -2093,8 +2263,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
- netdev->mem_start = mmio_start;
- netdev->mem_end = mmio_start + mmio_len;
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
/* PCI config space info */
hw->vendor_id = pdev->vendor;
@@ -2350,6 +2520,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ets = false;
}
#endif
+ /* Check if Media Autosense is enabled */
+ adapter->ei = *ei;
+ if (hw->dev_spec._82575.mas_capable)
+ igb_init_mas(adapter);
+
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
@@ -2382,7 +2557,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
dev_info(&pdev->dev,
"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
- adapter->msix_entries ? "MSI-X" :
+ (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
adapter->num_rx_queues, adapter->num_tx_queues);
switch (hw->mac.type) {
@@ -2470,7 +2645,7 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
- if (!adapter->msix_entries || num_vfs > 7) {
+ if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
err = -EPERM;
goto out;
}
@@ -3935,6 +4110,7 @@ static void igb_watchdog_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
u32 link;
int i;
+ u32 connsw;
link = igb_has_link(adapter);
@@ -3945,7 +4121,21 @@ static void igb_watchdog_task(struct work_struct *work)
link = false;
}
+ /* Force link down if we have fiber to swap to */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ connsw = rd32(E1000_CONNSW);
+ if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+ link = 0;
+ }
+ }
if (link) {
+ /* Perform a reset if the media type changed. */
+ if (hw->dev_spec._82575.media_changed) {
+ hw->dev_spec._82575.media_changed = false;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ igb_reset(adapter);
+ }
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4026,8 +4216,27 @@ static void igb_watchdog_task(struct work_struct *work)
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
+ /* link is down, time to check for alternate media */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
pm_schedule_suspend(netdev->dev.parent,
MSEC_PER_SEC * 5);
+
+ /* also check for alternate media here */
+ } else if (!netif_carrier_ok(netdev) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
}
}
@@ -4056,7 +4265,7 @@ static void igb_watchdog_task(struct work_struct *work)
}
/* Cause software interrupt to ensure Rx ring is cleaned */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
@@ -5977,7 +6186,7 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
}
if (!test_bit(__IGB_DOWN, &adapter->state)) {
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
wr32(E1000_EIMS, q_vector->eims_value);
else
igb_irq_enable(adapter);
@@ -7344,7 +7553,7 @@ static void igb_netpoll(struct net_device *netdev)
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
wr32(E1000_EIMC, q_vector->eims_value);
else
igb_irq_disable(adapter);
@@ -7842,7 +8051,7 @@ int igb_reinit_queues(struct igb_adapter *adapter)
if (netif_running(netdev))
igb_close(netdev);
- igb_clear_interrupt_scheme(adapter);
+ igb_reset_interrupt_capability(adapter);
if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 04bf22e5ee31..675435fc2e53 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1745,7 +1745,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
- if (memcmp(addr->sa_data, hw->mac.addr, 6))
+ if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 2224cc2edf13..1180cd59b570 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index f38fc0a343a2..0186ea2969fe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -424,9 +424,10 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
#ifdef BP_EXTENDED_STATS
q_vector->tx.ring->stats.yields++;
#endif
- } else
+ } else {
/* we don't care if someone yielded */
q_vector->state = IXGBE_QV_STATE_NAPI;
+ }
spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -458,9 +459,10 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
#ifdef BP_EXTENDED_STATS
q_vector->rx.ring->stats.yields++;
#endif
- } else
+ } else {
/* preserve yield marks */
q_vector->state |= IXGBE_QV_STATE_POLL;
+ }
spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -552,8 +554,10 @@ struct hwmon_attr {
};
struct hwmon_buff {
- struct device *device;
- struct hwmon_attr *hwmon_list;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
+ struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
unsigned int n_hwmon;
};
#endif /* CONFIG_IXGBE_HWMON */
@@ -583,6 +587,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
+static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
+{
+ writel(value, ring->tail);
+}
+
#define IXGBE_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC(R, i) \
@@ -740,6 +749,7 @@ struct ixgbe_adapter {
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 wol;
u16 bd_number;
@@ -775,7 +785,7 @@ struct ixgbe_adapter {
u32 vferr_refcount;
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
- struct hwmon_buff ixgbe_hwmon_buff;
+ struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
@@ -796,6 +806,7 @@ enum ixgbe_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
+ __IXGBE_REMOVING,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
__IXGBE_PTP_RUNNING,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 007a0083a636..edda6814108c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -626,7 +626,7 @@ static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
goto out;
}
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
IXGBE_I2C_EEPROM_DEV_ADDR2,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d259dc76604e..f2e3919750ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -124,24 +124,65 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define IXGBE_FAILED_READ_REG 0xffffffffU
-#ifndef writeq
-#define writeq(val, addr) writel((u32) (val), addr); \
- writel((u32) (val >> 32), (addr + 4));
-#endif
+static inline bool ixgbe_removed(void __iomem *addr)
+{
+ return unlikely(!addr);
+}
-#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\
- writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+ if (ixgbe_removed(reg_addr))
+ return;
+ writel(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG(a, reg, value) ixgbe_write_reg((a), (reg), (value))
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\
- readl((a)->hw_addr + (reg) + ((offset) << 2)))
+#ifndef writeq
+#define writeq writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel((u32)val, addr);
+ writel((u32)(val >> 32), addr + 4);
+}
+#endif
-#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+
+ if (ixgbe_removed(reg_addr))
+ return;
+ writeq(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
+
+static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (ixgbe_removed(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbe_check_remove(hw, reg);
+ return value;
+}
+#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
+ ixgbe_write_reg((a), (reg) + ((offset) << 2), (value))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) \
+ ixgbe_read_reg((a), (reg) + ((offset) << 2))
+
+#define IXGBE_WRITE_FLUSH(a) ixgbe_read_reg((a), IXGBE_STATUS)
#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 4e7c9b098b58..043307024c4a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1342,61 +1342,61 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
static const u32 test_pattern[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ *data = 1;
+ return 1;
+ }
for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
- before = readl(adapter->hw.hw_addr + reg);
- writel((test_pattern[pat] & write),
- (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+ before = ixgbe_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
+ val = ixgbe_read_reg(&adapter->hw, reg);
if (val != (test_pattern[pat] & write & mask)) {
e_err(drv, "pattern test reg %04X failed: got "
"0x%08X expected 0x%08X\n",
reg, val, (test_pattern[pat] & write & mask));
*data = reg;
- writel(before, adapter->hw.hw_addr + reg);
- return 1;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
}
- writel(before, adapter->hw.hw_addr + reg);
+ ixgbe_write_reg(&adapter->hw, reg, before);
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
u32 mask, u32 write)
{
u32 val, before;
- before = readl(adapter->hw.hw_addr + reg);
- writel((write & mask), (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ *data = 1;
+ return 1;
+ }
+ before = ixgbe_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg, write & mask);
+ val = ixgbe_read_reg(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
e_err(drv, "set/check reg %04X test failed: got 0x%08X "
"expected 0x%08X\n", reg, (val & mask), (write & mask));
*data = reg;
- writel(before, (adapter->hw.hw_addr + reg));
- return 1;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
}
- writel(before, (adapter->hw.hw_addr + reg));
- return 0;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return false;
}
-#define REG_PATTERN_TEST(reg, mask, write) \
- do { \
- if (reg_pattern_test(adapter, data, reg, mask, write)) \
- return 1; \
- } while (0) \
-
-
-#define REG_SET_AND_CHECK(reg, mask, write) \
- do { \
- if (reg_set_and_check(adapter, data, reg, mask, write)) \
- return 1; \
- } while (0) \
-
static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
{
const struct ixgbe_reg_test *test;
u32 value, before, after;
u32 i, toggle;
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ e_err(drv, "Adapter removed - register test blocked\n");
+ *data = 1;
+ return 1;
+ }
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
toggle = 0x7FFFF3FF;
@@ -1419,10 +1419,10 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
* tests. Some bits are read-only, some toggle, and some
* are writeable on newer MACs.
*/
- before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
- value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
- after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
+ before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
+ value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
+ ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
+ after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
if (value != after) {
e_err(drv, "failed STATUS register test got: 0x%08X "
"expected: 0x%08X\n", after, value);
@@ -1430,7 +1430,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
return 1;
}
/* restore previous status */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
+ ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
/*
* Perform the remainder of the register test, looping through
@@ -1438,38 +1438,47 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
*/
while (test->reg) {
for (i = 0; i < test->array_len; i++) {
+ bool b = false;
+
switch (test->test_type) {
case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_set_and_check(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case WRITE_NO_TEST:
- writel(test->write,
- (adapter->hw.hw_addr + test->reg)
- + (i * 0x40));
+ ixgbe_write_reg(&adapter->hw,
+ test->reg + (i * 0x40),
+ test->write);
break;
case TABLE32_TEST:
- REG_PATTERN_TEST(test->reg + (i * 4),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 4),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_LO:
- REG_PATTERN_TEST(test->reg + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 8),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_HI:
- REG_PATTERN_TEST((test->reg + 4) + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ (test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
break;
}
+ if (b)
+ return 1;
}
test++;
}
@@ -1954,6 +1963,15 @@ static void ixgbe_diag_test(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ e_err(hw, "Adapter removed - test blocked\n");
+ data[0] = 1;
+ data[1] = 1;
+ data[2] = 1;
+ data[3] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
set_bit(__IXGBE_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
struct ixgbe_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index f58db453a97e..08726177a3eb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -585,7 +585,7 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
struct dma_pool *pool;
char pool_name[32];
- snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
+ snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
IXGBE_FCPTR_ALIGN, PAGE_SIZE);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5bcc870f8367..18076c4178b4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -64,7 +64,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "3.15.1-k"
+#define DRV_VERSION "3.19.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2013 Intel Corporation.";
@@ -278,10 +278,41 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_bit(__IXGBE_REMOVING, &adapter->state) &&
!test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
schedule_work(&adapter->service_task);
}
+static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+
+ if (!hw->hw_addr)
+ return;
+ hw->hw_addr = NULL;
+ e_dev_err("Adapter removed\n");
+ ixgbe_service_event_schedule(adapter);
+}
+
+void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+{
+ u32 value;
+
+ /* The following check not only optimizes a bit by not
+ * performing a read on the status register when the
+ * register just read was a status register read that
+ * returned IXGBE_FAILED_READ_REG. It also blocks any
+ * potential recursion.
+ */
+ if (reg == IXGBE_STATUS) {
+ ixgbe_remove_adapter(hw);
+ return;
+ }
+ value = ixgbe_read_reg(hw, IXGBE_STATUS);
+ if (value == IXGBE_FAILED_READ_REG)
+ ixgbe_remove_adapter(hw);
+}
+
static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
@@ -1314,7 +1345,7 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
* such as IA-64).
*/
wmb();
- writel(val, rx_ring->tail);
+ ixgbe_write_tail(rx_ring, val);
}
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
@@ -2969,7 +3000,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_tx_desc));
IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
/*
* set WTHRESH to encourage burst writeback, it should not be set
@@ -3308,6 +3339,8 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@@ -3332,6 +3365,8 @@ void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -3372,7 +3407,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_rx_desc));
IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
@@ -4572,6 +4607,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
+ smp_mb__before_clear_bit();
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
@@ -4656,6 +4692,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
int err;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
/* lock SFP init bit to prevent race conditions with the watchdog */
while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
usleep_range(1000, 2000);
@@ -4783,7 +4821,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
int i;
/* signal that we are down to the interrupt handler */
- set_bit(__IXGBE_DOWN, &adapter->state);
+ if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
+ return; /* do nothing if already down */
/* disable receives */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -5028,7 +5067,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* assign number of SR-IOV VFs */
if (hw->mac.type != ixgbe_mac_82598EB) {
- if (max_vfs > 63) {
+ if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
adapter->num_vfs = 0;
e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
} else {
@@ -5874,8 +5913,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
u64 eics = 0;
int i;
- /* If we're down or resetting, just bail */
+ /* If we're down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -6122,8 +6162,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
**/
static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
{
- /* if interface is down do nothing */
+ /* if interface is down, removing or resetting, do nothing */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -6341,8 +6382,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
- /* If we're already down or resetting, just bail */
+ /* If we're already down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -6350,7 +6392,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
netdev_err(adapter->netdev, "Reset adapter\n");
adapter->tx_timeout_count++;
+ rtnl_lock();
ixgbe_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -6362,6 +6406,15 @@ static void ixgbe_service_task(struct work_struct *work)
struct ixgbe_adapter *adapter = container_of(work,
struct ixgbe_adapter,
service_task);
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ rtnl_lock();
+ ixgbe_down(adapter);
+ rtnl_unlock();
+ }
+ ixgbe_service_event_complete(adapter);
+ return;
+ }
ixgbe_reset_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
ixgbe_sfp_link_config_subtask(adapter);
@@ -6693,7 +6746,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_ring->next_to_use = i;
/* notify HW of packet */
- writel(i, tx_ring->tail);
+ ixgbe_write_tail(tx_ring, i);
return;
dma_error:
@@ -6828,7 +6881,7 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
}
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
#ifdef IXGBE_FCOE
@@ -6854,7 +6907,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
break;
default:
- return __netdev_pick_tx(dev, skb);
+ return fallback(dev, skb);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -6867,7 +6920,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq + f->offset;
#else
- return __netdev_pick_tx(dev, skb);
+ return fallback(dev, skb);
#endif
}
@@ -7874,6 +7927,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
+ adapter->io_addr = hw->hw_addr;
if (!hw->hw_addr) {
err = -EIO;
goto err_ioremap;
@@ -7965,8 +8019,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Mailbox */
ixgbe_init_mbx_params_pf(hw);
memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+ pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
ixgbe_enable_sriov(adapter);
- pci_sriov_set_totalvfs(pdev, 63);
skip_sriov:
#endif
@@ -8182,7 +8236,7 @@ err_register:
err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
- iounmap(hw->hw_addr);
+ iounmap(adapter->io_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -8210,7 +8264,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
ixgbe_dbg_adapter_exit(adapter);
- set_bit(__IXGBE_DOWN, &adapter->state);
+ set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
@@ -8249,7 +8303,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
kfree(adapter->ixgbe_ieee_ets);
#endif
- iounmap(adapter->hw.hw_addr);
+ iounmap(adapter->io_addr);
pci_release_selected_regions(pdev, pci_select_bars(pdev,
IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index d4a64e665398..cc3101afd29f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -27,8 +27,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ixgbe_type.h"
-#include "ixgbe_common.h"
+#include "ixgbe.h"
#include "ixgbe_mbx.h"
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 39217e5ff7dc..132557c318f8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -29,7 +29,7 @@
#include <linux/delay.h>
#include <linux/sched.h>
-#include "ixgbe_common.h"
+#include "ixgbe.h"
#include "ixgbe_phy.h"
static void ixgbe_i2c_start(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 72084f70adbb..dff0977876f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -148,7 +148,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* physical function. If the user requests greater thn
* 63 VFs then it is an error - reset to default of zero.
*/
- adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
if (err) {
@@ -257,7 +257,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* PF. The PCI bus driver already checks for other values out of
* range.
*/
- if (num_vfs > 63) {
+ if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
err = -EPERM;
goto err_out;
}
@@ -631,11 +631,14 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct ixgbe_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
u32 reg, reg_offset, vf_shift;
u32 msgbuf[4] = {0, 0, 0, 0};
u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ int i;
e_info(probe, "VF Reset msg received from vf %d\n", vf);
@@ -654,6 +657,17 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
reg |= 1 << vf_shift;
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* force drop enable for all VF Rx queues */
+ for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
+ /* flush previous write */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* indicate to hardware that we want to set drop enable */
+ reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= 1 << vf_shift;
@@ -684,6 +698,15 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
reg |= (1 << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
+ /*
+ * Reset the VFs TDWBAL and TDWBAH registers
+ * which are not cleared by an FLR
+ */
+ for (i = 0; i < q_per_pool; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
+ }
+
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET;
if (!is_zero_ether_addr(vf_mac)) {
@@ -717,8 +740,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
}
if (adapter->vfinfo[vf].pf_set_mac &&
- memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
- ETH_ALEN)) {
+ !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
e_warn(drv,
"VF %d attempted to override administratively set MAC address\n"
"Reload the VF driver to resume operations\n",
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 4713f9fc7f46..8bd29190514e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -28,6 +28,11 @@
#ifndef _IXGBE_SRIOV_H_
#define _IXGBE_SRIOV_H_
+/* ixgbe driver limit the max number of VFs could be enabled to
+ * 63 (IXGBE_MAX_VF_FUNCTIONS - 1)
+ */
+#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index d118def16f35..e74ae3682733 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -111,29 +111,29 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
unsigned int n_attr;
struct hwmon_attr *ixgbe_attr;
- n_attr = adapter->ixgbe_hwmon_buff.n_hwmon;
- ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr];
+ n_attr = adapter->ixgbe_hwmon_buff->n_hwmon;
+ ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr];
switch (type) {
case IXGBE_HWMON_TYPE_LOC:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_label", offset);
+ "temp%u_label", offset + 1);
break;
case IXGBE_HWMON_TYPE_TEMP:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_input", offset);
+ "temp%u_input", offset + 1);
break;
case IXGBE_HWMON_TYPE_CAUTION:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_max", offset);
+ "temp%u_max", offset + 1);
break;
case IXGBE_HWMON_TYPE_MAX:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_crit", offset);
+ "temp%u_crit", offset + 1);
break;
default:
rc = -EPERM;
@@ -147,32 +147,17 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
ixgbe_attr->dev_attr.store = NULL;
ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
+ sysfs_attr_init(&ixgbe_attr->dev_attr.attr);
- rc = device_create_file(&adapter->pdev->dev,
- &ixgbe_attr->dev_attr);
+ adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr;
- if (rc == 0)
- ++adapter->ixgbe_hwmon_buff.n_hwmon;
+ ++adapter->ixgbe_hwmon_buff->n_hwmon;
- return rc;
+ return 0;
}
static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
{
- int i;
-
- if (adapter == NULL)
- return;
-
- for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
- device_remove_file(&adapter->pdev->dev,
- &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
- }
-
- kfree(adapter->ixgbe_hwmon_buff.hwmon_list);
-
- if (adapter->ixgbe_hwmon_buff.device)
- hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
}
/* called from ixgbe_main.c */
@@ -184,9 +169,9 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
/* called from ixgbe_main.c */
int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
{
- struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
+ struct hwmon_buff *ixgbe_hwmon;
+ struct device *hwmon_dev;
unsigned int i;
- int n_attrs;
int rc = 0;
/* If this method isn't defined we don't support thermals */
@@ -198,23 +183,13 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw))
goto exit;
- /*
- * Allocation space for max attributs
- * max num sensors * values (loc, temp, max, caution)
- */
- n_attrs = IXGBE_MAX_SENSORS * 4;
- ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
- if (!ixgbe_hwmon->hwmon_list) {
+ ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon),
+ GFP_KERNEL);
+ if (ixgbe_hwmon == NULL) {
rc = -ENOMEM;
- goto err;
- }
-
- ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
- if (IS_ERR(ixgbe_hwmon->device)) {
- rc = PTR_ERR(ixgbe_hwmon->device);
- goto err;
+ goto exit;
}
+ adapter->ixgbe_hwmon_buff = ixgbe_hwmon;
for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
/*
@@ -226,17 +201,28 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
/* Bail if any hwmon attr struct fails to initialize */
rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
if (rc)
- goto err;
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
+ if (rc)
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
+ if (rc)
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
+ if (rc)
+ goto exit;
}
- goto exit;
+ ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group;
+ ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs;
-err:
- ixgbe_sysfs_del_adapter(adapter);
+ hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+ "ixgbe",
+ ixgbe_hwmon,
+ ixgbe_hwmon->groups);
+ if (IS_ERR(hwmon_dev))
+ rc = PTR_ERR(hwmon_dev);
exit:
return rc;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 7c19e969576f..0d39cfc4a3bf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1980,9 +1980,10 @@ enum {
#define IXGBE_FWSM_TS_ENABLED 0x1
/* Queue Drop Enable */
-#define IXGBE_QDE_ENABLE 0x00000001
-#define IXGBE_QDE_IDX_MASK 0x00007F00
-#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_IDX_MASK 0x00007F00
+#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_WRITE 0x00010000
#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
@@ -2173,6 +2174,14 @@ enum {
#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
enum ixgbe_fdir_pballoc_type {
IXGBE_FDIR_PBALLOC_NONE = 0,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 3147795bd135..05e4f32d84f7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
@@ -277,4 +278,21 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ERR_RESET_FAILED -2
#define IXGBE_ERR_INVALID_ARGUMENT -3
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
+
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+
#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 54d9acef9c4e..f68b78c732a8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -77,11 +77,11 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
stats.saved_reset_vfgotc)},
{"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
+ {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
+ {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)},
- {"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
{"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
- {"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
#ifdef BP_EXTENDED_STATS
{"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
{"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
+ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
+ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
@@ -303,20 +303,20 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) {
/* clone ring and setup updated count */
- tx_ring[i] = adapter->tx_ring[i];
+ tx_ring[i] = *adapter->tx_ring[i];
tx_ring[i].count = new_tx_count;
- err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
- if (!err)
- continue;
- while (i) {
- i--;
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
- }
+ err = ixgbevf_setup_tx_resources(&tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(&tx_ring[i]);
+ }
- vfree(tx_ring);
- tx_ring = NULL;
+ vfree(tx_ring);
+ tx_ring = NULL;
- goto clear_reset;
+ goto clear_reset;
+ }
}
}
@@ -329,20 +329,20 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
/* clone ring and setup updated count */
- rx_ring[i] = adapter->rx_ring[i];
+ rx_ring[i] = *adapter->rx_ring[i];
rx_ring[i].count = new_rx_count;
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (!err)
- continue;
- while (i) {
- i--;
- ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
- }
+ err = ixgbevf_setup_rx_resources(&rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(&rx_ring[i]);
+ }
- vfree(rx_ring);
- rx_ring = NULL;
+ vfree(rx_ring);
+ rx_ring = NULL;
- goto clear_reset;
+ goto clear_reset;
+ }
}
}
@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Tx */
if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbevf_free_tx_resources(adapter,
- &adapter->tx_ring[i]);
- adapter->tx_ring[i] = tx_ring[i];
+ ixgbevf_free_tx_resources(adapter->tx_ring[i]);
+ *adapter->tx_ring[i] = tx_ring[i];
}
adapter->tx_ring_count = new_tx_count;
@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Rx */
if (rx_ring) {
for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbevf_free_rx_resources(adapter,
- &adapter->rx_ring[i]);
- adapter->rx_ring[i] = rx_ring[i];
+ ixgbevf_free_rx_resources(adapter->rx_ring[i]);
+ *adapter->rx_ring[i] = rx_ring[i];
}
adapter->rx_ring_count = new_rx_count;
@@ -382,7 +380,7 @@ clear_reset:
/* free Tx resources if Rx error is encountered */
if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ ixgbevf_free_tx_resources(&tx_ring[i]);
vfree(tx_ring);
}
@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_yields += adapter->rx_ring[i].bp_yields;
- rx_cleaned += adapter->rx_ring[i].bp_cleaned;
- rx_yields += adapter->rx_ring[i].bp_yields;
+ rx_yields += adapter->rx_ring[i]->stats.yields;
+ rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
+ rx_yields += adapter->rx_ring[i]->stats.yields;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_yields += adapter->tx_ring[i].bp_yields;
- tx_cleaned += adapter->tx_ring[i].bp_cleaned;
- tx_yields += adapter->tx_ring[i].bp_yields;
+ tx_yields += adapter->tx_ring[i]->stats.yields;
+ tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
+ tx_yields += adapter->tx_ring[i]->stats.yields;
}
adapter->bp_rx_yields = rx_yields;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 8971e2d0a984..54829326bb09 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -46,12 +46,15 @@
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbevf_tx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- unsigned long time_stamp;
union ixgbe_adv_tx_desc *next_to_watch;
- u16 length;
- u16 mapped_as_page;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ __be16 protocol;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
};
struct ixgbevf_rx_buffer {
@@ -59,6 +62,29 @@ struct ixgbevf_rx_buffer {
dma_addr_t dma;
};
+struct ixgbevf_stats {
+ u64 packets;
+ u64 bytes;
+#ifdef BP_EXTENDED_STATS
+ u64 yields;
+ u64 misses;
+ u64 cleaned;
+#endif
+};
+
+struct ixgbevf_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_done_old;
+};
+
+struct ixgbevf_rx_queue_stats {
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+ u64 csum_err;
+};
+
struct ixgbevf_ring {
struct ixgbevf_ring *next;
struct net_device *netdev;
@@ -70,31 +96,27 @@ struct ixgbevf_ring {
unsigned int next_to_use;
unsigned int next_to_clean;
- int queue_index; /* needed for multiqueue queue management */
union {
struct ixgbevf_tx_buffer *tx_buffer_info;
struct ixgbevf_rx_buffer *rx_buffer_info;
};
- u64 total_bytes;
- u64 total_packets;
- struct u64_stats_sync syncp;
- u64 hw_csum_rx_error;
- u64 hw_csum_rx_good;
-#ifdef BP_EXTENDED_STATS
- u64 bp_yields;
- u64 bp_misses;
- u64 bp_cleaned;
-#endif
+ struct ixgbevf_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct ixgbevf_tx_queue_stats tx_stats;
+ struct ixgbevf_rx_queue_stats rx_stats;
+ };
- u16 head;
- u16 tail;
+ u64 hw_csum_rx_error;
+ u8 __iomem *tail;
u16 reg_idx; /* holds the special value that gets the hardware register
* offset associated with this ring, which is different
* for DCB and RSS modes */
u16 rx_buf_len;
+ int queue_index; /* needed for multiqueue queue management */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -125,8 +147,6 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
-#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
-#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
@@ -188,7 +208,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
rc = false;
#ifdef BP_EXTENDED_STATS
- q_vector->tx.ring->bp_yields++;
+ q_vector->tx.ring->stats.yields++;
#endif
} else {
/* we don't care if someone yielded */
@@ -223,7 +243,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
rc = false;
#ifdef BP_EXTENDED_STATS
- q_vector->rx.ring->bp_yields++;
+ q_vector->rx.ring->stats.yields++;
#endif
} else {
/* preserve yield marks */
@@ -262,6 +282,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBEVF_QV_OWNED)
rc = false;
+ q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -315,7 +336,6 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
struct ixgbevf_adapter {
struct timer_list watchdog_timer;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u16 bd_number;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
@@ -328,25 +348,18 @@ struct ixgbevf_adapter {
u32 eims_other;
/* TX */
- struct ixgbevf_ring *tx_ring; /* One per active queue */
int num_tx_queues;
+ struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 restart_queue;
- u64 hw_csum_tx_good;
- u64 lsc_int;
- u64 hw_tso_ctxt;
- u64 hw_tso6_ctxt;
u32 tx_timeout_count;
/* RX */
- struct ixgbevf_ring *rx_ring; /* One per active queue */
int num_rx_queues;
+ struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
- u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
- struct msix_entry *msix_entries;
-
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -356,6 +369,9 @@ struct ixgbevf_adapter {
u32 flags;
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
+#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
+
+ struct msix_entry *msix_entries;
/* OS defined structs */
struct net_device *netdev;
@@ -364,10 +380,12 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
- struct ixgbevf_hw_stats stats;
+ u16 bd_number;
/* Interrupt Throttle Rate */
u32 eitr_param;
+ struct ixgbevf_hw_stats stats;
+
unsigned long state;
u64 tx_busy;
unsigned int tx_ring_count;
@@ -386,9 +404,9 @@ struct ixgbevf_adapter {
u32 link_speed;
bool link_up;
- struct work_struct watchdog_task;
-
spinlock_t mbx_lock;
+
+ struct work_struct watchdog_task;
};
enum ixbgevf_state_t {
@@ -420,10 +438,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter);
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
void ixgbevf_reset(struct ixgbevf_adapter *adapter);
void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+int ixgbevf_setup_rx_resources(struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
int ethtool_ioctl(struct ifreq *ifr);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 92ef4cb5a8e8..9df28985eba7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.11.3-k"
+#define DRV_VERSION "2.12.1-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -95,13 +95,15 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
-static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
- struct ixgbevf_ring *rx_ring,
+static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
u32 val)
{
+ rx_ring->next_to_use = val;
+
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -109,7 +111,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
* such as IA-64).
*/
wmb();
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+ writel(val, rx_ring->tail);
}
/**
@@ -143,28 +145,25 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
}
static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
- struct ixgbevf_tx_buffer
- *tx_buffer_info)
-{
- if (tx_buffer_info->dma) {
- if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(tx_ring->dev,
- tx_buffer_info->dma,
- tx_buffer_info->length,
- DMA_TO_DEVICE);
- else
+ struct ixgbevf_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(tx_ring->dev,
- tx_buffer_info->dma,
- tx_buffer_info->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
- tx_buffer_info->dma = 0;
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
}
- if (tx_buffer_info->skb) {
- dev_kfree_skb_any(tx_buffer_info->skb);
- tx_buffer_info->skb = NULL;
- }
- tx_buffer_info->time_stamp = 0;
- /* tx_buffer_info must be completely set up in the transmit path */
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
}
#define IXGBE_MAX_TXD_PWR 14
@@ -185,20 +184,21 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *tx_ring)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
- union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned int i, count = 0;
+ struct ixgbevf_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = tx_ring->count / 2;
+ unsigned int i = tx_ring->next_to_clean;
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
return true;
- i = tx_ring->next_to_clean;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- eop_desc = tx_buffer_info->next_to_watch;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
do {
- bool cleaned = false;
+ union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
@@ -212,67 +212,90 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
break;
/* clear next_to_watch to prevent false hangs */
- tx_buffer_info->next_to_watch = NULL;
+ tx_buffer->next_to_watch = NULL;
- for ( ; !cleaned; count++) {
- struct sk_buff *skb;
- tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- cleaned = (tx_desc == eop_desc);
- skb = tx_buffer_info->skb;
-
- if (cleaned && skb) {
- unsigned int segs, bytecount;
-
- /* gso_segs is currently only valid for tcp */
- segs = skb_shinfo(skb)->gso_segs ?: 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_packets += segs;
- total_bytes += bytecount;
- }
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
- ixgbevf_unmap_and_free_tx_resource(tx_ring,
- tx_buffer_info);
+ /* free the skb */
+ dev_kfree_skb_any(tx_buffer->skb);
- tx_desc->wb.status = 0;
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ /* clear tx_buffer data */
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
i++;
- if (i == tx_ring->count)
- i = 0;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ }
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
}
- eop_desc = tx_buffer_info->next_to_watch;
- } while (count < tx_ring->count);
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
+
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->tx_stats.restart_queue;
}
}
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->total_bytes += total_bytes;
- tx_ring->total_packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
-
- return count < tx_ring->count;
+ return !!budget;
}
/**
@@ -341,7 +364,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
/* if IP and error */
if ((status_err & IXGBE_RXD_STAT_IPCS) &&
(status_err & IXGBE_RXDADV_ERR_IPE)) {
- ring->hw_csum_rx_error++;
+ ring->rx_stats.csum_err++;
return;
}
@@ -349,51 +372,46 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
return;
if (status_err & IXGBE_RXDADV_ERR_TCPE) {
- ring->hw_csum_rx_error++;
+ ring->rx_stats.csum_err++;
return;
}
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- ring->hw_csum_rx_good++;
}
/**
* ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
**/
-static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring,
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
int cleaned_count)
{
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
unsigned int i = rx_ring->next_to_use;
- bi = &rx_ring->rx_buffer_info[i];
-
while (cleaned_count--) {
rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
if (!bi->skb) {
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
- if (!skb) {
- adapter->alloc_rx_buff_failed++;
+ if (!skb)
goto no_buffers;
- }
+
bi->skb = skb;
- bi->dma = dma_map_single(&pdev->dev, skb->data,
+ bi->dma = dma_map_single(rx_ring->dev, skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, bi->dma)) {
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
dev_kfree_skb(skb);
bi->skb = NULL;
- dev_err(&pdev->dev, "RX DMA map failed\n");
+ dev_err(rx_ring->dev, "Rx DMA map failed\n");
break;
}
}
@@ -402,14 +420,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
i++;
if (i == rx_ring->count)
i = 0;
- bi = &rx_ring->rx_buffer_info[i];
}
no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
- }
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ if (rx_ring->next_to_use != i)
+ ixgbevf_release_rx_desc(rx_ring, i);
}
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
@@ -424,8 +440,6 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int budget)
{
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
@@ -451,7 +465,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
@@ -471,7 +485,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
if (!(staterr & IXGBE_RXD_STAT_EOP)) {
skb->next = next_buffer->skb;
IXGBE_CB(skb->next)->prev = skb;
- adapter->non_eop_descs++;
+ rx_ring->rx_stats.non_eop_descs++;
goto next_desc;
}
@@ -503,7 +517,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* source pruning.
*/
if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
- ether_addr_equal(adapter->netdev->dev_addr,
+ ether_addr_equal(rx_ring->netdev->dev_addr,
eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
goto next_desc;
@@ -516,8 +530,7 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
- ixgbevf_alloc_rx_buffers(adapter, rx_ring,
- cleaned_count);
+ ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
@@ -532,11 +545,11 @@ next_desc:
cleaned_count = ixgbevf_desc_unused(rx_ring);
if (cleaned_count)
- ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->total_packets += total_rx_packets;
- rx_ring->total_bytes += total_rx_bytes;
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
@@ -641,9 +654,9 @@ static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
#ifdef BP_EXTENDED_STATS
if (found)
- ring->bp_cleaned += found;
+ ring->stats.cleaned += found;
else
- ring->bp_misses++;
+ ring->stats.misses++;
#endif
if (found)
break;
@@ -848,8 +861,8 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- a->rx_ring[r_idx].next = q_vector->rx.ring;
- q_vector->rx.ring = &a->rx_ring[r_idx];
+ a->rx_ring[r_idx]->next = q_vector->rx.ring;
+ q_vector->rx.ring = a->rx_ring[r_idx];
q_vector->rx.count++;
}
@@ -858,8 +871,8 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- a->tx_ring[t_idx].next = q_vector->tx.ring;
- q_vector->tx.ring = &a->tx_ring[t_idx];
+ a->tx_ring[t_idx]->next = q_vector->tx.ring;
+ q_vector->tx.ring = a->tx_ring[t_idx];
q_vector->tx.count++;
}
@@ -1087,6 +1100,70 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
}
/**
+ * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 tdba = ring->dma;
+ int wait_loop = 10;
+ u32 txdctl = IXGBE_TXDCTL_ENABLE;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_tx_desc));
+
+ /* disable head writeback */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
+
+ /* enable relaxed ordering */
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
+ (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_TXCTRL_DATA_RRO_EN));
+
+ /* reset head and tail pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
+ ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ /* In order to avoid issues WTHRESH + PTHRESH should always be equal
+ * to or less than the number of on chip descriptors, which is
+ * currently 40.
+ */
+ txdctl |= (8 << 16); /* WTHRESH = 8 */
+
+ /* Setting PTHRESH to 32 both improves performance */
+ txdctl |= (1 << 8) | /* HTHRESH = 1 */
+ 32; /* PTHRESH = 32 */
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
+
+ /* poll to verify queue is enabled */
+ do {
+ usleep_range(1000, 2000);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
+ } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ pr_err("Could not enable Tx Queue %d\n", reg_idx);
+}
+
+/**
* ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
* @adapter: board private structure
*
@@ -1094,31 +1171,11 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
{
- u64 tdba;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 i, j, tdlen, txctrl;
+ u32 i;
/* Setup the HW Tx Head and Tail descriptor pointers */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbevf_ring *ring = &adapter->tx_ring[i];
- j = ring->reg_idx;
- tdba = ring->dma;
- tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
- (tdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
- adapter->tx_ring[i].head = IXGBE_VFTDH(j);
- adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
- /* Disable Tx Head Writeback RO bit, since this hoses
- * bookkeeping if things aren't delivered in order.
- */
- txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
- }
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
@@ -1129,7 +1186,7 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
- rx_ring = &adapter->rx_ring[index];
+ rx_ring = adapter->rx_ring[index];
srrctl = IXGBE_SRRCTL_DROP_EN;
@@ -1187,7 +1244,93 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
rx_buf_len = IXGBEVF_RXBUFFER_10K;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+ adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
+}
+
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
+ reg_idx);
+}
+
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
+ reg_idx);
+}
+
+static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ ixgbevf_disable_rx_queue(adapter, ring);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_rx_desc));
+
+ /* enable relaxed ordering */
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+ IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+
+ /* reset head and tail pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
+ ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ ixgbevf_configure_srrctl(adapter, reg_idx);
+
+ /* prevent DMA from exceeding buffer space available */
+ rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+ rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
+ rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+ ixgbevf_rx_desc_queue_enable(adapter, ring);
+ ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
}
/**
@@ -1198,33 +1341,17 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{
- u64 rdba;
- struct ixgbe_hw *hw = &adapter->hw;
- int i, j;
- u32 rdlen;
+ int i;
ixgbevf_setup_psrtype(adapter);
/* set_rx_buffer_len must be called before ring initialization */
ixgbevf_set_rx_buffer_len(adapter);
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rdba = adapter->rx_ring[i].dma;
- j = adapter->rx_ring[i].reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
- (rdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
- IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
- adapter->rx_ring[i].head = IXGBE_VFRDH(j);
- adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
-
- ixgbevf_configure_srrctl(adapter, j);
- }
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
}
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
@@ -1366,69 +1493,54 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
}
}
-static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ unsigned int num_rx_queues = 1;
+ int err;
- ixgbevf_set_rx_mode(netdev);
+ spin_lock_bh(&adapter->mbx_lock);
- ixgbevf_restore_vlan(adapter);
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
- ixgbevf_configure_tx(adapter);
- ixgbevf_configure_rx(adapter);
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbevf_ring *ring = &adapter->rx_ring[i];
- ixgbevf_alloc_rx_buffers(adapter, ring,
- ixgbevf_desc_unused(ring));
- }
-}
+ spin_unlock_bh(&adapter->mbx_lock);
-#define IXGBEVF_MAX_RX_DESC_POLL 10
-static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
- int rxr)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
- u32 rxdctl;
- int j = adapter->rx_ring[rxr].reg_idx;
+ if (err)
+ return err;
- do {
- usleep_range(1000, 2000);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
- } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (num_tcs > 1) {
+ /* update default Tx ring register index */
+ adapter->tx_ring[0]->reg_idx = def_q;
- if (!wait_loop)
- hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
- rxr);
+ /* we need as many queues as traffic classes */
+ num_rx_queues = num_tcs;
+ }
+
+ /* if we have a bad config abort request queue reset */
+ if (adapter->num_rx_queues != num_rx_queues) {
+ /* force mailbox timeout to prevent further messages */
+ hw->mbx.timeout = 0;
+
+ /* wait for watchdog to come around and bail us out */
+ adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+ }
- ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ return 0;
}
-static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *ring)
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
- int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
- u32 rxdctl;
- u8 reg_idx = ring->reg_idx;
-
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ ixgbevf_configure_dcb(adapter);
- /* write value back with RXDCTL.ENABLE bit cleared */
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+ ixgbevf_set_rx_mode(adapter->netdev);
- /* the hardware may take up to 100us to really disable the rx queue */
- do {
- udelay(10);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
- } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+ ixgbevf_restore_vlan(adapter);
- if (!wait_loop)
- hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
- reg_idx);
+ ixgbevf_configure_tx(adapter);
+ ixgbevf_configure_rx(adapter);
}
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1493,37 +1605,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- int i, j = 0;
- int num_rx_rings = adapter->num_rx_queues;
- u32 txdctl, rxdctl;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- /* enable WTHRESH=8 descriptors, to encourage burst writeback */
- txdctl |= (8 << 16);
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
- }
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
- }
-
- for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i].reg_idx;
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
- rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
- if (hw->mac.type == ixgbe_mac_X540_vf) {
- rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
- rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
- IXGBE_RXDCTL_RLPML_EN);
- }
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
- ixgbevf_rx_desc_queue_enable(adapter, i);
- }
ixgbevf_configure_msix(adapter);
@@ -1549,85 +1630,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
mod_timer(&adapter->watchdog_timer, jiffies);
}
-static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- unsigned int def_q = 0;
- unsigned int num_tcs = 0;
- unsigned int num_rx_queues = 1;
- int err, i;
-
- spin_lock_bh(&adapter->mbx_lock);
-
- /* fetch queue configuration from the PF */
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
- spin_unlock_bh(&adapter->mbx_lock);
-
- if (err)
- return err;
-
- if (num_tcs > 1) {
- /* update default Tx ring register index */
- adapter->tx_ring[0].reg_idx = def_q;
-
- /* we need as many queues as traffic classes */
- num_rx_queues = num_tcs;
- }
-
- /* nothing to do if we have the correct number of queues */
- if (adapter->num_rx_queues == num_rx_queues)
- return 0;
-
- /* allocate new rings */
- rx_ring = kcalloc(num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring)
- return -ENOMEM;
-
- /* setup ring fields */
- for (i = 0; i < num_rx_queues; i++) {
- rx_ring[i].count = adapter->rx_ring_count;
- rx_ring[i].queue_index = i;
- rx_ring[i].reg_idx = i;
- rx_ring[i].dev = &adapter->pdev->dev;
- rx_ring[i].netdev = adapter->netdev;
-
- /* allocate resources on the ring */
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (err) {
- while (i) {
- i--;
- ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
- }
- kfree(rx_ring);
- return err;
- }
- }
-
- /* free the existing rings and queues */
- ixgbevf_free_all_rx_resources(adapter);
- adapter->num_rx_queues = 0;
- kfree(adapter->rx_ring);
-
- /* move new rings into position on the adapter struct */
- adapter->rx_ring = rx_ring;
- adapter->num_rx_queues = num_rx_queues;
-
- /* reset ring to vector mapping */
- ixgbevf_reset_q_vectors(adapter);
- ixgbevf_map_rings_to_vectors(adapter);
-
- return 0;
-}
-
void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- ixgbevf_reset_queues(adapter);
-
ixgbevf_configure(adapter);
ixgbevf_up_complete(adapter);
@@ -1640,13 +1646,10 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
unsigned long size;
unsigned int i;
@@ -1659,7 +1662,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
@@ -1680,23 +1683,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-
- if (rx_ring->head)
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- if (rx_ring->tail)
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
}
/**
* ixgbevf_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
{
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned long size;
@@ -1715,14 +1708,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
memset(tx_ring->tx_buffer_info, 0, size);
memset(tx_ring->desc, 0, tx_ring->size);
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- if (tx_ring->head)
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- if (tx_ring->tail)
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
}
/**
@@ -1734,7 +1719,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+ ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
}
/**
@@ -1746,22 +1731,21 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+ ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
}
void ixgbevf_down(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 txdctl;
- int i, j;
+ int i;
/* signal that we are down to the interrupt handler */
set_bit(__IXGBEVF_DOWN, &adapter->state);
/* disable all enabled rx queues */
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
+ ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
netif_tx_disable(netdev);
@@ -1782,10 +1766,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
- (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
+ IXGBE_TXDCTL_SWFLSH);
}
netif_carrier_off(netdev);
@@ -1889,9 +1873,28 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
**/
static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ int err;
+
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return;
+
+ /* we need as many queues as traffic classes */
+ if (num_tcs > 1)
+ adapter->num_rx_queues = num_tcs;
}
/**
@@ -1904,40 +1907,50 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
**/
static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
{
- int i;
+ struct ixgbevf_ring *ring;
+ int rx = 0, tx = 0;
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err_tx_ring_allocation;
+ for (; tx < adapter->num_tx_queues; tx++) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err_rx_ring_allocation;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = tx;
+ ring->reg_idx = tx;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].count = adapter->tx_ring_count;
- adapter->tx_ring[i].queue_index = i;
- /* reg_idx may be remapped later by DCB config */
- adapter->tx_ring[i].reg_idx = i;
- adapter->tx_ring[i].dev = &adapter->pdev->dev;
- adapter->tx_ring[i].netdev = adapter->netdev;
+ adapter->tx_ring[tx] = ring;
}
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].count = adapter->rx_ring_count;
- adapter->rx_ring[i].queue_index = i;
- adapter->rx_ring[i].reg_idx = i;
- adapter->rx_ring[i].dev = &adapter->pdev->dev;
- adapter->rx_ring[i].netdev = adapter->netdev;
+ for (; rx < adapter->num_rx_queues; rx++) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
+
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rx;
+ ring->reg_idx = rx;
+
+ adapter->rx_ring[rx] = ring;
}
return 0;
-err_rx_ring_allocation:
- kfree(adapter->tx_ring);
-err_tx_ring_allocation:
+err_allocation:
+ while (tx) {
+ kfree(adapter->tx_ring[--tx]);
+ adapter->tx_ring[tx] = NULL;
+ }
+
+ while (rx) {
+ kfree(adapter->rx_ring[--rx]);
+ adapter->rx_ring[rx] = NULL;
+ }
return -ENOMEM;
}
@@ -2128,6 +2141,17 @@ err_set_interrupt:
**/
static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
+
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
@@ -2258,11 +2282,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->hw_csum_rx_error +=
- adapter->rx_ring[i].hw_csum_rx_error;
- adapter->hw_csum_rx_good +=
- adapter->rx_ring[i].hw_csum_rx_good;
- adapter->rx_ring[i].hw_csum_rx_error = 0;
- adapter->rx_ring[i].hw_csum_rx_good = 0;
+ adapter->rx_ring[i]->hw_csum_rx_error;
+ adapter->rx_ring[i]->hw_csum_rx_error = 0;
}
}
@@ -2340,6 +2361,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
bool link_up = adapter->link_up;
s32 need_reset;
+ ixgbevf_queue_reset_subtask(adapter);
+
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
/*
@@ -2408,22 +2431,22 @@ pf_has_reset:
/**
* ixgbevf_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
* @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
**/
-void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbevf_clean_tx_ring(adapter, tx_ring);
+ ixgbevf_clean_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
tx_ring->dma);
tx_ring->desc = NULL;
@@ -2440,23 +2463,18 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- if (adapter->tx_ring[i].desc)
- ixgbevf_free_tx_resources(adapter,
- &adapter->tx_ring[i]);
-
+ if (adapter->tx_ring[i]->desc)
+ ixgbevf_free_tx_resources(adapter->tx_ring[i]);
}
/**
* ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
* @tx_ring: tx descriptor ring (for a specific queue) to setup
*
* Return 0 on success, negative on failure
**/
-int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -2468,13 +2486,11 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
return 0;
err:
@@ -2500,7 +2516,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
hw_dbg(&adapter->hw,
@@ -2513,40 +2529,34 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
-int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
int size;
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
- goto alloc_failed;
+ goto err;
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
- if (!rx_ring->desc) {
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
- goto alloc_failed;
- }
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
+ if (!rx_ring->desc)
+ goto err;
return 0;
-alloc_failed:
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
@@ -2565,7 +2575,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
hw_dbg(&adapter->hw,
@@ -2577,22 +2587,18 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
* @rx_ring: ring to clean the resources from
*
* Free all receive software resources
**/
-void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbevf_clean_rx_ring(adapter, rx_ring);
+ ixgbevf_clean_rx_ring(rx_ring);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
rx_ring->dma);
rx_ring->desc = NULL;
@@ -2609,66 +2615,8 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->rx_ring[i].desc)
- ixgbevf_free_rx_resources(adapter,
- &adapter->rx_ring[i]);
-}
-
-static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- unsigned int def_q = 0;
- unsigned int num_tcs = 0;
- unsigned int num_rx_queues = 1;
- int err, i;
-
- spin_lock_bh(&adapter->mbx_lock);
-
- /* fetch queue configuration from the PF */
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
- spin_unlock_bh(&adapter->mbx_lock);
-
- if (err)
- return err;
-
- if (num_tcs > 1) {
- /* update default Tx ring register index */
- adapter->tx_ring[0].reg_idx = def_q;
-
- /* we need as many queues as traffic classes */
- num_rx_queues = num_tcs;
- }
-
- /* nothing to do if we have the correct number of queues */
- if (adapter->num_rx_queues == num_rx_queues)
- return 0;
-
- /* allocate new rings */
- rx_ring = kcalloc(num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring)
- return -ENOMEM;
-
- /* setup ring fields */
- for (i = 0; i < num_rx_queues; i++) {
- rx_ring[i].count = adapter->rx_ring_count;
- rx_ring[i].queue_index = i;
- rx_ring[i].reg_idx = i;
- rx_ring[i].dev = &adapter->pdev->dev;
- rx_ring[i].netdev = adapter->netdev;
- }
-
- /* free the existing ring and queues */
- adapter->num_rx_queues = 0;
- kfree(adapter->rx_ring);
-
- /* move new rings into position on the adapter struct */
- adapter->rx_ring = rx_ring;
- adapter->num_rx_queues = num_rx_queues;
-
- return 0;
+ if (adapter->rx_ring[i]->desc)
+ ixgbevf_free_rx_resources(adapter->rx_ring[i]);
}
/**
@@ -2714,11 +2662,6 @@ static int ixgbevf_open(struct net_device *netdev)
}
}
- /* setup queue reg_idx and Rx queue count */
- err = ixgbevf_setup_queues(adapter);
- if (err)
- goto err_setup_queues;
-
/* allocate transmit descriptors */
err = ixgbevf_setup_all_tx_resources(adapter);
if (err)
@@ -2756,7 +2699,6 @@ err_setup_rx:
ixgbevf_free_all_rx_resources(adapter);
err_setup_tx:
ixgbevf_free_all_tx_resources(adapter);
-err_setup_queues:
ixgbevf_reset(adapter);
err_setup_reset:
@@ -2788,6 +2730,34 @@ static int ixgbevf_close(struct net_device *netdev)
return 0;
}
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+
+ if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
+ return;
+
+ adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunately, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ixgbevf_close(dev);
+
+ ixgbevf_clear_interrupt_scheme(adapter);
+ ixgbevf_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ ixgbevf_open(dev);
+}
+
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
u32 vlan_macip_lens, u32 type_tucmd,
u32 mss_l4len_idx)
@@ -2810,8 +2780,10 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
}
static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+ struct ixgbevf_tx_buffer *first,
+ u8 *hdr_len)
{
+ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
@@ -2836,12 +2808,17 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
IPPROTO_TCP,
0);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+ IXGBE_TX_FLAGS_CSUM |
+ IXGBE_TX_FLAGS_IPV4;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+ IXGBE_TX_FLAGS_CSUM;
}
/* compute header lengths */
@@ -2849,6 +2826,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
*hdr_len += l4len;
*hdr_len = skb_transport_offset(skb) + l4len;
+ /* update gso size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* mss_l4len_id: use 1 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@@ -2857,7 +2838,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb);
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
type_tucmd, mss_l4len_idx);
@@ -2865,9 +2846,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
-static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
+ struct ixgbevf_tx_buffer *first)
{
+ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
@@ -2888,7 +2870,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
"partial checksum but proto=%x!\n",
- skb->protocol);
+ first->protocol);
}
break;
}
@@ -2916,184 +2898,190 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
}
break;
}
+
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
type_tucmd, mss_l4len_idx);
-
- return (skb->ip_summed == CHECKSUM_PARTIAL);
}
-static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
{
- struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned int len;
- unsigned int total = skb->len;
- unsigned int offset = 0, size;
- int count = 0;
- unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int f;
- int i;
+ /* set type for advanced descriptor with frame checksum insertion */
+ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS |
+ IXGBE_ADVTXD_DCMD_DEXT);
- i = tx_ring->next_to_use;
+ /* set HW vlan bit if vlan is present */
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
- len = min(skb_headlen(skb), total);
- while (len) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
-
- tx_buffer_info->length = size;
- tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(tx_ring->dev,
- skb->data + offset,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
- goto dma_error;
+ /* set segmentation enable bits for TSO/FSO */
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
- len -= size;
- total -= size;
- offset += size;
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
+ return cmd_type;
+}
- for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
+ u32 tx_flags, unsigned int paylen)
+{
+ __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
- frag = &skb_shinfo(skb)->frags[f];
- len = min((unsigned int)skb_frag_size(frag), total);
- offset = 0;
+ /* enable L4 checksum for TSO and TX checksum offload */
+ if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
- while (len) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+ /* enble IPv4 checksum for TSO */
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
- tx_buffer_info->length = size;
- tx_buffer_info->dma =
- skb_frag_dma_map(tx_ring->dev, frag,
- offset, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev,
- tx_buffer_info->dma))
- goto dma_error;
- tx_buffer_info->mapped_as_page = true;
+ /* use index 1 context for TSO/FSO/FCOE */
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
- len -= size;
- total -= size;
- offset += size;
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
- if (total == 0)
- break;
- }
+ /* Check Context must be set if Tx switch is enabled, which it
+ * always is for case where virtual functions are running
+ */
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
- if (i == 0)
- i = tx_ring->count - 1;
- else
- i = i - 1;
- tx_ring->tx_buffer_info[i].skb = skb;
+ tx_desc->read.olinfo_status = olinfo_status;
+}
- return count;
+static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
+ struct ixgbevf_tx_buffer *first,
+ const u8 hdr_len)
+{
+ dma_addr_t dma;
+ struct sk_buff *skb = first->skb;
+ struct ixgbevf_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ unsigned int paylen = skb->len - hdr_len;
+ u32 tx_flags = first->tx_flags;
+ __le32 cmd_type;
+ u16 i = tx_ring->next_to_use;
-dma_error:
- dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- /* clear timestamp and dma mappings for failed tx_buffer_info map */
- tx_buffer_info->dma = 0;
- count--;
+ ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
+ cmd_type = ixgbevf_tx_cmd_type(tx_flags);
- /* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
- i--;
- if (i < 0)
- i += tx_ring->count;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
- }
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
- return count;
-}
+ /* record length, and DMA address */
+ dma_unmap_len_set(first, len, size);
+ dma_unmap_addr_set(first, dma, dma);
-static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
- int count, unsigned int first, u32 paylen,
- u8 hdr_len)
-{
- union ixgbe_adv_tx_desc *tx_desc = NULL;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 olinfo_status = 0, cmd_type_len = 0;
- unsigned int i;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+ for (;;) {
+ while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
- cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
- cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+ dma += IXGBE_MAX_DATA_PER_TXD;
+ size -= IXGBE_MAX_DATA_PER_TXD;
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_desc->read.olinfo_status = 0;
+ }
- if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
+ if (likely(!data_len))
+ break;
- if (tx_flags & IXGBE_TX_FLAGS_TSO) {
- cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+ tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
- /* use index 1 context for tso */
- olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
- }
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
- /*
- * Check Context must be set if Tx switch is enabled, which it
- * always is for case where virtual functions are running
- */
- olinfo_status |= IXGBE_ADVTXD_CC;
+ size = skb_frag_size(frag);
+ data_len -= size;
- olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
- i = tx_ring->next_to_use;
- while (count--) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
- tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type_len | tx_buffer_info->length);
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- i++;
- if (i == tx_ring->count)
- i = 0;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_desc->read.olinfo_status = 0;
+
+ frag++;
}
- tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
+ tx_desc->read.cmd_type_len = cmd_type;
- tx_ring->tx_buffer_info[first].time_stamp = jiffies;
+ /* set the timestamp */
+ first->time_stamp = jiffies;
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier (wmb) to make certain all of the
+ * status bits have been updated before next_to_watch is written.
*/
wmb();
- tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
+ writel(i, tx_ring->tail);
+
+ return;
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+ if (tx_buffer == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
tx_ring->next_to_use = i;
}
static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
-
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
@@ -3107,7 +3095,8 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->tx_stats.restart_queue;
+
return 0;
}
@@ -3121,22 +3110,23 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_tx_buffer *first;
struct ixgbevf_ring *tx_ring;
- unsigned int first;
- unsigned int tx_flags = 0;
- u8 hdr_len = 0;
- int r_idx = 0, tso;
+ int tso;
+ u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f;
#endif
+ u8 hdr_len = 0;
u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
+
if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- tx_ring = &adapter->tx_ring[r_idx];
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
/*
* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
@@ -3152,38 +3142,41 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
count += skb_shinfo(skb)->nr_frags;
#endif
if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
- adapter->tx_busy++;
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- first = tx_ring->next_to_use;
+ /* record initial flags and protocol */
+ first->tx_flags = tx_flags;
+ first->protocol = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP))
- tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ tso = ixgbevf_tso(tx_ring, first, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else
+ ixgbevf_tx_csum(tx_ring, first);
- if (tso)
- tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
- else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
- tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ ixgbevf_tx_map(tx_ring, first, hdr_len);
- ixgbevf_tx_queue(tx_ring, tx_flags,
- ixgbevf_tx_map(tx_ring, skb, tx_flags),
- first, skb->len, hdr_len);
+ ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
- writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
+ return NETDEV_TX_OK;
- ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
+out_drop:
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
@@ -3289,8 +3282,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
#ifdef CONFIG_PM
static int ixgbevf_resume(struct pci_dev *pdev)
{
- struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
u32 err;
pci_set_power_state(pdev, PCI_D0);
@@ -3349,22 +3342,22 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
for (i = 0; i < adapter->num_rx_queues; i++) {
- ring = &adapter->rx_ring[i];
+ ring = adapter->rx_ring[i];
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
- bytes = ring->total_bytes;
- packets = ring->total_packets;
+ bytes = ring->stats.bytes;
+ packets = ring->stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
stats->rx_bytes += bytes;
stats->rx_packets += packets;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = &adapter->tx_ring[i];
+ ring = adapter->tx_ring[i];
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
- bytes = ring->total_bytes;
- packets = ring->total_packets;
+ bytes = ring->stats.bytes;
+ packets = ring->stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
stats->tx_bytes += bytes;
stats->tx_packets += packets;
@@ -3595,9 +3588,6 @@ static void ixgbevf_remove(struct pci_dev *pdev)
hw_dbg(&adapter->hw, "Remove complete\n");
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
-
free_netdev(netdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 4a5e3b0f712e..d74f5f4e5782 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -39,7 +39,6 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index ec94a20d7099..fd4b6aecf6ee 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -9,8 +9,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) 2011 John Crispin <blogic@openwrt.org>
*/
@@ -620,7 +619,7 @@ ltq_etop_set_multicast_list(struct net_device *dev)
static u16
ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/* we are currently only using the first queue */
return 0;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index a49e81bdf8e8..68e6a6613e9a 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -33,6 +33,7 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
+ depends on HAS_IOMEM
select PHYLIB
---help---
This driver supports the MDIO interface found in the network
@@ -42,12 +43,12 @@ config MVMDIO
This driver is used by the MV643XX_ETH and MVNETA drivers.
config MVNETA
- tristate "Marvell Armada 370/XP network interface support"
- depends on MACH_ARMADA_370_XP
+ tristate "Marvell Armada 370/38x/XP network interface support"
+ depends on PLAT_ORION
select MVMDIO
---help---
This driver supports the network interface units in the
- Marvell ARMADA XP and ARMADA 370 SoC family.
+ Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
Note that this driver is distinct from the mv643xx_eth
driver, which should be used for the older Marvell SoCs
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 61088a6a9424..a2565ce22b7c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -33,8 +33,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -2067,23 +2066,6 @@ static inline void oom_timer_wrapper(unsigned long data)
napi_schedule(&mp->napi);
}
-static void phy_reset(struct mv643xx_eth_private *mp)
-{
- int data;
-
- data = phy_read(mp->phy, MII_BMCR);
- if (data < 0)
- return;
-
- data |= BMCR_RESET;
- if (phy_write(mp->phy, MII_BMCR, data) < 0)
- return;
-
- do {
- data = phy_read(mp->phy, MII_BMCR);
- } while (data >= 0 && data & BMCR_RESET);
-}
-
static void port_start(struct mv643xx_eth_private *mp)
{
u32 pscr;
@@ -2096,8 +2078,9 @@ static void port_start(struct mv643xx_eth_private *mp)
struct ethtool_cmd cmd;
mv643xx_eth_get_settings(mp->dev, &cmd);
- phy_reset(mp);
+ phy_init_hw(mp->phy);
mv643xx_eth_set_settings(mp->dev, &cmd);
+ phy_start(mp->phy);
}
/*
@@ -2293,7 +2276,8 @@ static int mv643xx_eth_stop(struct net_device *dev)
del_timer_sync(&mp->rx_oom);
netif_carrier_off(dev);
-
+ if (mp->phy)
+ phy_stop(mp->phy);
free_irq(dev->irq, dev);
port_reset(mp);
@@ -2764,8 +2748,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
{
struct phy_device *phy = mp->phy;
- phy_reset(mp);
-
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index c4eeb69a5bee..fd409d76b811 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -17,7 +17,6 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d5f0d72e5e33..8d76fca7fde7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <net/ip.h>
#include <net/ipv6.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
@@ -88,8 +89,9 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
-#define MVNETA_SGMII_SERDES_CFG 0x24A0
+#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
+#define MVNETA_RGMII_SERDES_PROTO 0x0667
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -101,16 +103,56 @@
#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
+
+/* Exception Interrupt Port/Queue Cause register */
+
#define MVNETA_INTR_NEW_CAUSE 0x25a0
-#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
#define MVNETA_INTR_NEW_MASK 0x25a4
+
+/* bits 0..7 = TXQ SENT, one bit per queue.
+ * bits 8..15 = RXQ OCCUP, one bit per queue.
+ * bits 16..23 = RXQ FREE, one bit per queue.
+ * bit 29 = OLD_REG_SUM, see old reg ?
+ * bit 30 = TX_ERR_SUM, one bit for 4 ports
+ * bit 31 = MISC_SUM, one bit for 4 ports
+ */
+#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
+#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
+#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
+#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
+
#define MVNETA_INTR_OLD_CAUSE 0x25a8
#define MVNETA_INTR_OLD_MASK 0x25ac
+
+/* Data Path Port/Queue Cause Register */
#define MVNETA_INTR_MISC_CAUSE 0x25b0
#define MVNETA_INTR_MISC_MASK 0x25b4
+
+#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
+#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
+#define MVNETA_CAUSE_PTP BIT(4)
+
+#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
+#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
+#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
+#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
+#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
+#define MVNETA_CAUSE_PRBS_ERR BIT(12)
+#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
+#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
+
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
+#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
+
+#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
+#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
+#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
+
#define MVNETA_INTR_ENABLE 0x25b8
#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
-#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
+#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
+
#define MVNETA_RXQ_CMD 0x2680
#define MVNETA_RXQ_DISABLE_SHIFT 8
#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
@@ -121,7 +163,7 @@
#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
#define MVNETA_GMAC_CTRL_2 0x2c08
-#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
+#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
#define MVNETA_GMAC2_PORT_RGMII BIT(4)
#define MVNETA_GMAC2_PORT_RESET BIT(6)
#define MVNETA_GMAC_STATUS 0x2c10
@@ -176,9 +218,6 @@
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
-/* Timer */
-#define MVNETA_TX_DONE_TIMER_PERIOD 10
-
/* Napi polling weight */
#define MVNETA_RX_POLL_WEIGHT 64
@@ -221,27 +260,25 @@
#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
-struct mvneta_stats {
+struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
- u64 packets;
- u64 bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
};
struct mvneta_port {
int pkt_size;
+ unsigned int frag_size;
void __iomem *base;
struct mvneta_rx_queue *rxqs;
struct mvneta_tx_queue *txqs;
- struct timer_list tx_done_timer;
struct net_device *dev;
u32 cause_rx_tx;
struct napi_struct napi;
- /* Flags */
- unsigned long flags;
-#define MVNETA_F_TX_DONE_TIMER_BIT 0
-
/* Napi weight */
int weight;
@@ -250,8 +287,7 @@ struct mvneta_port {
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
- struct mvneta_stats tx_stats;
- struct mvneta_stats rx_stats;
+ struct mvneta_pcpu_stats *stats;
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
@@ -410,6 +446,8 @@ static int txq_number = 8;
static int rxq_def;
+static int rx_copybreak __read_mostly = 256;
+
#define MVNETA_DRIVER_NAME "mvneta"
#define MVNETA_DRIVER_VERSION "1.0"
@@ -461,21 +499,29 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
{
struct mvneta_port *pp = netdev_priv(dev);
unsigned int start;
+ int cpu;
- memset(stats, 0, sizeof(struct rtnl_link_stats64));
-
- do {
- start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
- stats->rx_packets = pp->rx_stats.packets;
- stats->rx_bytes = pp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
+ for_each_possible_cpu(cpu) {
+ struct mvneta_pcpu_stats *cpu_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ cpu_stats = per_cpu_ptr(pp->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+ rx_packets = cpu_stats->rx_packets;
+ rx_bytes = cpu_stats->rx_bytes;
+ tx_packets = cpu_stats->tx_packets;
+ tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
- do {
- start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
- stats->tx_packets = pp->tx_stats.packets;
- stats->tx_bytes = pp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
stats->rx_errors = dev->stats.rx_errors;
stats->rx_dropped = dev->stats.rx_dropped;
@@ -487,14 +533,14 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
/* Rx descriptors helper methods */
-/* Checks whether the given RX descriptor is both the first and the
- * last descriptor for the RX packet. Each RX packet is currently
+/* Checks whether the RX descriptor having this status is both the first
+ * and the last descriptor for the RX packet. Each RX packet is currently
* received through a single RX descriptor, so not having each RX
* descriptor with its first and last bits set is an error
*/
-static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
+static int mvneta_rxq_desc_is_first_last(u32 status)
{
- return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
+ return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
MVNETA_RXD_FIRST_LAST_DESC;
}
@@ -570,6 +616,7 @@ mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
int rx_desc = rxq->next_desc_to_proc;
rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
+ prefetch(rxq->descs + rxq->next_desc_to_proc);
return rxq->descs + rx_desc;
}
@@ -665,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
-
-
-/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
-static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
- if (enable)
- val |= MVNETA_GMAC2_PORT_RGMII;
- else
- val &= ~MVNETA_GMAC2_PORT_RGMII;
-
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-}
-
-/* Config SGMII port */
-static void mvneta_port_sgmii_config(struct mvneta_port *pp)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val |= MVNETA_GMAC2_PSC_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-
- mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-}
-
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
@@ -1100,17 +1118,6 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
txq->done_pkts_coal = value;
}
-/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
-static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
-{
- if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
- pp->tx_done_timer.expires = jiffies +
- msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
- add_timer(&pp->tx_done_timer);
- }
-}
-
-
/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
u32 phys_addr, u32 cookie)
@@ -1204,10 +1211,10 @@ static void mvneta_rx_error(struct mvneta_port *pp,
{
u32 status = rx_desc->status;
- if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
+ if (!mvneta_rxq_desc_is_first_last(status)) {
netdev_err(pp->dev,
"bad rx status %08x (buffer oversize), size=%d\n",
- rx_desc->status, rx_desc->data_size);
+ status, rx_desc->data_size);
return;
}
@@ -1231,13 +1238,12 @@ static void mvneta_rx_error(struct mvneta_port *pp,
}
}
-/* Handle RX checksum offload */
-static void mvneta_rx_csum(struct mvneta_port *pp,
- struct mvneta_rx_desc *rx_desc,
+/* Handle RX checksum offload based on the descriptor's status */
+static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
struct sk_buff *skb)
{
- if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
- (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
+ if ((status & MVNETA_RXD_L3_IP4) &&
+ (status & MVNETA_RXD_L4_CSUM_OK)) {
skb->csum = 0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
@@ -1246,13 +1252,16 @@ static void mvneta_rx_csum(struct mvneta_port *pp,
skb->ip_summed = CHECKSUM_NONE;
}
-/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
+/* Return tx queue pointer (find last set bit) according to <cause> returned
+ * form tx_done reg. <cause> must not be null. The return value is always a
+ * valid queue for matching the first one found in <cause>.
+ */
static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
u32 cause)
{
int queue = fls(cause) - 1;
- return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
+ return &pp->txqs[queue];
}
/* Free tx queue skbuffs */
@@ -1278,15 +1287,16 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
}
/* Handle end of transmission */
-static int mvneta_txq_done(struct mvneta_port *pp,
+static void mvneta_txq_done(struct mvneta_port *pp,
struct mvneta_tx_queue *txq)
{
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done;
tx_done = mvneta_txq_sent_desc_proc(pp, txq);
- if (tx_done == 0)
- return tx_done;
+ if (!tx_done)
+ return;
+
mvneta_txq_bufs_free(pp, txq, tx_done);
txq->count -= tx_done;
@@ -1295,8 +1305,22 @@ static int mvneta_txq_done(struct mvneta_port *pp,
if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
netif_tx_wake_queue(nq);
}
+}
- return tx_done;
+static void *mvneta_frag_alloc(const struct mvneta_port *pp)
+{
+ if (likely(pp->frag_size <= PAGE_SIZE))
+ return netdev_alloc_frag(pp->frag_size);
+ else
+ return kmalloc(pp->frag_size, GFP_ATOMIC);
+}
+
+static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
+{
+ if (likely(pp->frag_size <= PAGE_SIZE))
+ put_page(virt_to_head_page(data));
+ else
+ kfree(data);
}
/* Refill processing */
@@ -1305,22 +1329,21 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
{
dma_addr_t phys_addr;
- struct sk_buff *skb;
+ void *data;
- skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
- if (!skb)
+ data = mvneta_frag_alloc(pp);
+ if (!data)
return -ENOMEM;
- phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
+ phys_addr = dma_map_single(pp->dev->dev.parent, data,
MVNETA_RX_BUF_SIZE(pp->pkt_size),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- dev_kfree_skb(skb);
+ mvneta_frag_free(pp, data);
return -ENOMEM;
}
- mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
-
+ mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
return 0;
}
@@ -1374,9 +1397,9 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
for (i = 0; i < rxq->size; i++) {
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
- struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
+ void *data = (void *)rx_desc->buf_cookie;
- dev_kfree_skb_any(skb);
+ mvneta_frag_free(pp, data);
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
}
@@ -1391,6 +1414,8 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
{
struct net_device *dev = pp->dev;
int rx_done, rx_filled;
+ u32 rcvd_pkts = 0;
+ u32 rcvd_bytes = 0;
/* Get number of received packets */
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -1405,53 +1430,89 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
while (rx_done < rx_todo) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
struct sk_buff *skb;
+ unsigned char *data;
u32 rx_status;
int rx_bytes, err;
- prefetch(rx_desc);
rx_done++;
rx_filled++;
rx_status = rx_desc->status;
- skb = (struct sk_buff *)rx_desc->buf_cookie;
+ rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
+ data = (unsigned char *)rx_desc->buf_cookie;
- if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
+ if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+ err_drop_frame:
dev->stats.rx_errors++;
mvneta_rx_error(pp, rx_desc);
- mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
- (u32)skb);
+ /* leave the descriptor untouched */
continue;
}
- dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ if (rx_bytes <= rx_copybreak) {
+ /* better copy a small frame and not unmap the DMA region */
+ skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
+ if (unlikely(!skb))
+ goto err_drop_frame;
+
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes,
+ DMA_FROM_DEVICE);
+ memcpy(skb_put(skb, rx_bytes),
+ data + MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ mvneta_rx_csum(pp, rx_status, skb);
+ napi_gro_receive(&pp->napi, skb);
+
+ rcvd_pkts++;
+ rcvd_bytes += rx_bytes;
+
+ /* leave the descriptor and buffer untouched */
+ continue;
+ }
+
+ skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
+ if (!skb)
+ goto err_drop_frame;
+
+ dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
- rx_bytes = rx_desc->data_size -
- (ETH_FCS_LEN + MVNETA_MH_SIZE);
- u64_stats_update_begin(&pp->rx_stats.syncp);
- pp->rx_stats.packets++;
- pp->rx_stats.bytes += rx_bytes;
- u64_stats_update_end(&pp->rx_stats.syncp);
+ rcvd_pkts++;
+ rcvd_bytes += rx_bytes;
/* Linux processing */
- skb_reserve(skb, MVNETA_MH_SIZE);
+ skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
skb_put(skb, rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
- mvneta_rx_csum(pp, rx_desc, skb);
+ mvneta_rx_csum(pp, rx_status, skb);
napi_gro_receive(&pp->napi, skb);
/* Refill processing */
err = mvneta_rx_refill(pp, rx_desc);
if (err) {
- netdev_err(pp->dev, "Linux processing - Can't refill\n");
+ netdev_err(dev, "Linux processing - Can't refill\n");
rxq->missed++;
rx_filled--;
}
}
+ if (rcvd_pkts) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += rcvd_pkts;
+ stats->rx_bytes += rcvd_bytes;
+ u64_stats_update_end(&stats->syncp);
+ }
+
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
@@ -1582,25 +1643,17 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
- u64_stats_update_begin(&pp->tx_stats.syncp);
- pp->tx_stats.packets++;
- pp->tx_stats.bytes += skb->len;
- u64_stats_update_end(&pp->tx_stats.syncp);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
} else {
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
}
- if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
- mvneta_txq_done(pp, txq);
-
- /* If after calling mvneta_txq_done, count equals
- * frags, we need to set the timer
- */
- if (txq->count == frags && frags > 0)
- mvneta_add_tx_done_timer(pp);
-
return NETDEV_TX_OK;
}
@@ -1620,33 +1673,26 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
txq->txq_get_index = 0;
}
-/* handle tx done - called from tx done timer callback */
-static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
- int *tx_todo)
+/* Handle tx done - called in softirq context. The <cause_tx_done> argument
+ * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
+ */
+static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
{
struct mvneta_tx_queue *txq;
- u32 tx_done = 0;
struct netdev_queue *nq;
- *tx_todo = 0;
- while (cause_tx_done != 0) {
+ while (cause_tx_done) {
txq = mvneta_tx_done_policy(pp, cause_tx_done);
- if (!txq)
- break;
nq = netdev_get_tx_queue(pp->dev, txq->id);
__netif_tx_lock(nq, smp_processor_id());
- if (txq->count) {
- tx_done += mvneta_txq_done(pp, txq);
- *tx_todo += txq->count;
- }
+ if (txq->count)
+ mvneta_txq_done(pp, txq);
__netif_tx_unlock(nq);
cause_tx_done &= ~((1 << txq->id));
}
-
- return tx_done;
}
/* Compute crc8 of the specified address, using a unique algorithm ,
@@ -1876,14 +1922,20 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
/* Read cause register */
cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
- MVNETA_RX_INTR_MASK(rxq_number);
+ (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+
+ /* Release Tx descriptors */
+ if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
+ mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
+ cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
+ }
/* For the case where the last mvneta_poll did not process all
* RX packets
*/
cause_rx_tx |= pp->cause_rx_tx;
if (rxq_number > 1) {
- while ((cause_rx_tx != 0) && (budget > 0)) {
+ while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
int count;
struct mvneta_rx_queue *rxq;
/* get rx queue number from cause_rx_tx */
@@ -1915,7 +1967,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
local_irq_save(flags);
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number));
+ MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
local_irq_restore(flags);
}
@@ -1923,56 +1975,19 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
return rx_done;
}
-/* tx done timer callback */
-static void mvneta_tx_done_timer_callback(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct mvneta_port *pp = netdev_priv(dev);
- int tx_done = 0, tx_todo = 0;
-
- if (!netif_running(dev))
- return ;
-
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
- tx_done = mvneta_tx_done_gbe(pp,
- (((1 << txq_number) - 1) &
- MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
- &tx_todo);
- if (tx_todo > 0)
- mvneta_add_tx_done_timer(pp);
-}
-
/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
int num)
{
- struct net_device *dev = pp->dev;
int i;
for (i = 0; i < num; i++) {
- struct sk_buff *skb;
- struct mvneta_rx_desc *rx_desc;
- unsigned long phys_addr;
-
- skb = dev_alloc_skb(pp->pkt_size);
- if (!skb) {
- netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
+ memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
+ if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
+ netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
__func__, rxq->id, i, num);
break;
}
-
- rx_desc = rxq->descs + i;
- memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
- phys_addr = dma_map_single(dev->dev.parent, skb->head,
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
- dev_kfree_skb(skb);
- break;
- }
-
- mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
}
/* Add this number of RX descriptors as non occupied (ready to
@@ -2192,7 +2207,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
/* Unmask interrupts */
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number));
+ MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
phy_start(pp->phy_dev);
netif_tx_start_all_queues(pp->dev);
@@ -2225,16 +2240,6 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
mvneta_rx_reset(pp);
}
-/* tx timeout callback - display a message and stop/start the network device */
-static void mvneta_tx_timeout(struct net_device *dev)
-{
- struct mvneta_port *pp = netdev_priv(dev);
-
- netdev_info(dev, "tx timeout\n");
- mvneta_stop_dev(pp);
- mvneta_start_dev(pp);
-}
-
/* Return positive if MTU is valid */
static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
{
@@ -2282,6 +2287,8 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mvneta_cleanup_rxqs(pp);
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+ pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret) {
@@ -2429,6 +2436,8 @@ static int mvneta_open(struct net_device *dev)
mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+ pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret)
@@ -2478,8 +2487,6 @@ static int mvneta_stop(struct net_device *dev)
free_irq(dev->irq, pp);
mvneta_cleanup_rxqs(pp);
mvneta_cleanup_txqs(pp);
- del_timer(&pp->tx_done_timer);
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
return 0;
}
@@ -2615,7 +2622,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_set_rx_mode = mvneta_set_rx_mode,
.ndo_set_mac_address = mvneta_set_mac_addr,
.ndo_change_mtu = mvneta_change_mtu,
- .ndo_tx_timeout = mvneta_tx_timeout,
.ndo_get_stats64 = mvneta_get_stats64,
.ndo_do_ioctl = mvneta_ioctl,
};
@@ -2723,12 +2729,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
if (phy_mode == PHY_INTERFACE_MODE_SGMII)
- mvneta_port_sgmii_config(pp);
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+ else
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
- mvneta_gmac_rgmii_set(pp, 1);
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+
+ val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
/* Cancel Port Reset */
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val &= ~MVNETA_GMAC2_PORT_RESET;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
@@ -2741,6 +2750,7 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
static int mvneta_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram_target_info;
+ struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *phy_node;
u32 phy_addr;
@@ -2751,6 +2761,7 @@ static int mvneta_probe(struct platform_device *pdev)
const char *mac_from;
int phy_mode;
int err;
+ int cpu;
/* Our multiqueue support is not complete, so for now, only
* allow the usage of the first RX queue
@@ -2792,9 +2803,6 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
- u64_stats_init(&pp->tx_stats.syncp);
- u64_stats_init(&pp->rx_stats.syncp);
-
pp->weight = MVNETA_RX_POLL_WEIGHT;
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
@@ -2807,12 +2815,31 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk);
- pp->base = of_iomap(dn, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENODEV;
+ goto err_clk;
+ }
+
+ pp->base = devm_ioremap_resource(&pdev->dev, res);
if (pp->base == NULL) {
+ err = PTR_ERR(pp->base);
+ goto err_clk;
+ }
+
+ /* Alloc per-cpu stats */
+ pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
+ if (!pp->stats) {
err = -ENOMEM;
goto err_clk;
}
+ for_each_possible_cpu(cpu) {
+ struct mvneta_pcpu_stats *stats;
+ stats = per_cpu_ptr(pp->stats, cpu);
+ u64_stats_init(&stats->syncp);
+ }
+
dt_mac_addr = of_get_mac_address(dn);
if (dt_mac_addr) {
mac_from = "device tree";
@@ -2828,11 +2855,6 @@ static int mvneta_probe(struct platform_device *pdev)
}
}
- pp->tx_done_timer.data = (unsigned long)dev;
- pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
- init_timer(&pp->tx_done_timer);
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
pp->tx_ring_size = MVNETA_MAX_TXD;
pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -2842,7 +2864,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_init(pp, phy_addr);
if (err < 0) {
dev_err(&pdev->dev, "can't init eth hal\n");
- goto err_unmap;
+ goto err_free_stats;
}
mvneta_port_power_up(pp, phy_mode);
@@ -2872,8 +2894,8 @@ static int mvneta_probe(struct platform_device *pdev)
err_deinit:
mvneta_deinit(pp);
-err_unmap:
- iounmap(pp->base);
+err_free_stats:
+ free_percpu(pp->stats);
err_clk:
clk_disable_unprepare(pp->clk);
err_free_irq:
@@ -2892,7 +2914,7 @@ static int mvneta_remove(struct platform_device *pdev)
unregister_netdev(dev);
mvneta_deinit(pp);
clk_disable_unprepare(pp->clk);
- iounmap(pp->base);
+ free_percpu(pp->stats);
irq_dispose_mapping(dev->irq);
free_netdev(dev);
@@ -2924,3 +2946,4 @@ module_param(rxq_number, int, S_IRUGO);
module_param(txq_number, int, S_IRUGO);
module_param(rxq_def, int, S_IRUGO);
+module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index fff62460185c..b358c2f6f4bd 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -19,11 +19,9 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/in.h>
#include <linux/ip.h>
@@ -321,23 +319,6 @@ static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
wrl(pep, PHY_ADDRESS, reg_data);
}
-static void ethernet_phy_reset(struct pxa168_eth_private *pep)
-{
- int data;
-
- data = phy_read(pep->phy, MII_BMCR);
- if (data < 0)
- return;
-
- data |= BMCR_RESET;
- if (phy_write(pep->phy, MII_BMCR, data) < 0)
- return;
-
- do {
- data = phy_read(pep->phy, MII_BMCR);
- } while (data >= 0 && data & BMCR_RESET);
-}
-
static void rxq_refill(struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -646,7 +627,7 @@ static void eth_port_start(struct net_device *dev)
struct ethtool_cmd cmd;
pxa168_get_settings(pep->dev, &cmd);
- ethernet_phy_reset(pep);
+ phy_init_hw(pep->phy);
pxa168_set_settings(pep->dev, &cmd);
}
@@ -1383,7 +1364,6 @@ static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
{
struct phy_device *phy = pep->phy;
- ethernet_phy_reset(pep);
phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 43aa7acd84a6..55a37ae11440 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2495,7 +2495,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
skb_copy_from_linear_data(re->skb, skb->data, length);
skb->ip_summed = re->skb->ip_summed;
skb->csum = re->skb->csum;
- skb->rxhash = re->skb->rxhash;
+ skb_copy_hash(skb, re->skb);
skb->vlan_proto = re->skb->vlan_proto;
skb->vlan_tci = re->skb->vlan_tci;
@@ -2503,7 +2503,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
length, PCI_DMA_FROMDEVICE);
re->skb->vlan_proto = 0;
re->skb->vlan_tci = 0;
- re->skb->rxhash = 0;
+ skb_clear_hash(re->skb);
re->skb->ip_summed = CHECKSUM_NONE;
skb_put(skb, length);
}
@@ -2723,7 +2723,7 @@ static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
struct sk_buff *skb;
skb = sky2->rx_ring[sky2->rx_next].skb;
- skb->rxhash = le32_to_cpu(status);
+ skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3);
}
/* Process status response ring */
@@ -5020,6 +5020,8 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
+
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "cannot register net device\n");
@@ -5028,8 +5030,6 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_carrier_off(dev);
- netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
-
sky2_show_addr(dev);
if (hw->ports > 1) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index eb520ab64014..563495d8975a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -6,6 +6,7 @@ config MLX4_EN
tristate "Mellanox Technologies 10Gbit Ethernet support"
depends on PCI
select MLX4_CORE
+ select PTP_1588_CLOCK
---help---
This driver supports Mellanox Technologies ConnectX Ethernet
devices.
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 06fef5b44f77..c3ad464d0627 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -71,9 +71,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
return obj;
}
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
{
- mlx4_bitmap_free_range(bitmap, obj, 1);
+ mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
}
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
@@ -118,11 +118,17 @@ u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
return bitmap->avail;
}
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
+ int use_rr)
{
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
+ if (!use_rr) {
+ bitmap->last = min(bitmap->last, obj);
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ }
bitmap_clear(bitmap->table, obj, cnt);
bitmap->avail += cnt;
spin_unlock(&bitmap->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 1e9970d2f0f3..0d02fba94536 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1371,6 +1371,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
},
+ {
+ .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper
+ },
};
static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 22fcbe78311c..0487121e4a0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -34,7 +34,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/export.h>
@@ -187,7 +186,7 @@ err_put:
mlx4_table_put(dev, &cq_table->table, *cqn);
err_out:
- mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
return err;
}
@@ -217,7 +216,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
mlx4_table_put(dev, &cq_table->table, cqn);
- mlx4_bitmap_free(&cq_table->bitmap, cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
}
static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index fd6441071319..abaf6bb22416 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -42,6 +42,10 @@ int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
int port_up = 0;
int err = 0;
+ if (priv->hwtstamp_config.tx_type == tx_type &&
+ priv->hwtstamp_config.rx_filter == rx_filter)
+ return 0;
+
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
@@ -103,19 +107,191 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp)
{
+ unsigned long flags;
u64 nsec;
+ read_lock_irqsave(&mdev->clock_lock, flags);
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
+ read_unlock_irqrestore(&mdev->clock_lock, flags);
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
hwts->hwtstamp = ns_to_ktime(nsec);
}
+/**
+ * mlx4_en_remove_timestamp - disable PTP device
+ * @mdev: board private structure
+ *
+ * Stop the PTP support.
+ **/
+void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
+{
+ if (mdev->ptp_clock) {
+ ptp_clock_unregister(mdev->ptp_clock);
+ mdev->ptp_clock = NULL;
+ mlx4_info(mdev, "removed PHC\n");
+ }
+}
+
+void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
+{
+ bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
+ mdev->overflow_period);
+ unsigned long flags;
+
+ if (timeout) {
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_read(&mdev->clock);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+ mdev->last_overflow_check = jiffies;
+ }
+}
+
+/**
+ * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired frequency change in parts per billion
+ *
+ * Adjust the frequency of the PHC cycle counter by the indicated delta from
+ * the base frequency.
+ **/
+static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ u64 adj;
+ u32 diff, mult;
+ int neg_adj = 0;
+ unsigned long flags;
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+ mult = mdev->nominal_c_mult;
+ adj = mult;
+ adj *= delta;
+ diff = div_u64(adj, 1000000000ULL);
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_read(&mdev->clock);
+ mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_adjtime - Shift the time of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired change in nanoseconds
+ *
+ * Adjust the timer by resetting the timecounter structure.
+ **/
+static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ unsigned long flags;
+ s64 now;
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ now = timecounter_read(&mdev->clock);
+ now += delta;
+ timecounter_init(&mdev->clock, &mdev->cycles, now);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_gettime - Reads the current time from the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the timecounter and return the correct value in ns after converting
+ * it into a struct timespec.
+ **/
+static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ unsigned long flags;
+ u32 remainder;
+ u64 ns;
+
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ ns = timecounter_read(&mdev->clock);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec containing the new time for the cycle counter
+ *
+ * Reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ **/
+static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+ ptp_clock_info);
+ u64 ns = timespec_to_ns(ts);
+ unsigned long flags;
+
+ /* reset the timecounter */
+ write_lock_irqsave(&mdev->clock_lock, flags);
+ timecounter_init(&mdev->clock, &mdev->cycles, ns);
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mlx4_en_phc_enable - enable or disable an ancillary feature
+ * @ptp: ptp clock structure
+ * @request: Desired resource to enable or disable
+ * @on: Caller passes one to enable or zero to disable
+ *
+ * Enable (or disable) ancillary features of the PHC subsystem.
+ * Currently, no ancillary features are supported.
+ **/
+static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp,
+ struct ptp_clock_request __always_unused *request,
+ int __always_unused on)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .max_adj = 100000000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = mlx4_en_phc_adjfreq,
+ .adjtime = mlx4_en_phc_adjtime,
+ .gettime = mlx4_en_phc_gettime,
+ .settime = mlx4_en_phc_settime,
+ .enable = mlx4_en_phc_enable,
+};
+
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
+ unsigned long flags;
u64 ns;
+ rwlock_init(&mdev->clock_lock);
+
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
mdev->cycles.read = mlx4_en_read_clock;
mdev->cycles.mask = CLOCKSOURCE_MASK(48);
@@ -127,9 +303,12 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
mdev->cycles.shift = 14;
mdev->cycles.mult =
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+ mdev->nominal_c_mult = mdev->cycles.mult;
+ write_lock_irqsave(&mdev->clock_lock, flags);
timecounter_init(&mdev->clock, &mdev->cycles,
ktime_to_ns(ktime_get_real()));
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
@@ -137,15 +316,18 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
do_div(ns, NSEC_PER_SEC / 2 / HZ);
mdev->overflow_period = ns;
-}
-void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
-{
- bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
- mdev->overflow_period);
+ /* Configure the PHC */
+ mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
+ snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
- if (timeout) {
- timecounter_read(&mdev->clock);
- mdev->last_overflow_check = jiffies;
+ mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
+ &mdev->pdev->dev);
+ if (IS_ERR(mdev->ptp_clock)) {
+ mdev->ptp_clock = NULL;
+ mlx4_err(mdev, "ptp_clock_register failed\n");
+ } else {
+ mlx4_info(mdev, "registered PHC clock\n");
}
+
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3a098cc4d349..70e95324a97d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -161,12 +161,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
- if (!cq->is_tx) {
+ if (cq->is_tx) {
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
+ NAPI_POLL_WEIGHT);
+ } else {
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
- napi_enable(&cq->napi);
}
+ napi_enable(&cq->napi);
+
return 0;
}
@@ -188,12 +192,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
+ napi_disable(&cq->napi);
if (!cq->is_tx) {
- napi_disable(&cq->napi);
napi_hash_del(&cq->napi);
synchronize_rcu();
- netif_napi_del(&cq->napi);
}
+ netif_napi_del(&cq->napi);
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0596f9f85a0e..3e8d33605fe7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1193,6 +1193,9 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
+
+ if (mdev->ptp_clock)
+ info->phc_index = ptp_clock_index(mdev->ptp_clock);
}
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0d087b03a7b0..d357bf5a4686 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -174,6 +174,9 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
mlx4_err(mdev, "Internal error detected, restarting device\n");
break;
+ case MLX4_DEV_EVENT_SLAVE_INIT:
+ case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
+ break;
default:
if (port < 1 || port > dev->caps.num_ports ||
!mdev->pndev[port])
@@ -196,6 +199,9 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_remove_timestamp(mdev);
+
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
(void) mlx4_mr_free(dev, &mdev->mr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e72d8a112a6b..84a96f70dfb5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -468,6 +468,53 @@ static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
memset(&dst_mac[ETH_ALEN], 0, 2);
}
+
+static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
+ int qpn, u64 *reg_id)
+{
+ int err;
+ struct mlx4_spec_list spec_eth_outer = { {NULL} };
+ struct mlx4_spec_list spec_vxlan = { {NULL} };
+ struct mlx4_spec_list spec_eth_inner = { {NULL} };
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_REGULAR,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ return 0; /* do nothing */
+
+ rule.port = priv->port;
+ rule.qpn = qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
+ memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+ spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
+ spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
+
+ list_add_tail(&spec_eth_outer.list, &rule.list);
+ list_add_tail(&spec_vxlan.list, &rule.list);
+ list_add_tail(&spec_eth_inner.list, &rule.list);
+
+ err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
+ if (err) {
+ en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
+ return err;
+ }
+ en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
+ return 0;
+}
+
+
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
unsigned char *mac, int *qpn, u64 *reg_id)
{
@@ -585,6 +632,11 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
if (err)
goto steer_err;
+ err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
+ &priv->tunnel_reg_id);
+ if (err)
+ goto tunnel_err;
+
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
@@ -599,6 +651,9 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
return 0;
alloc_err:
+ if (priv->tunnel_reg_id)
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+tunnel_err:
mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
steer_err:
@@ -642,6 +697,11 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
}
}
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
priv->port, qpn);
mlx4_qp_release_range(dev, qpn, 1);
@@ -682,6 +742,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
err = mlx4_en_uc_steer_add(priv, new_mac,
&qpn,
&entry->reg_id);
+ if (err)
+ return err;
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+ err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
+ &priv->tunnel_reg_id);
return err;
}
}
@@ -782,7 +850,7 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
list_for_each_entry(dst_tmp, dst, list) {
found = false;
list_for_each_entry(src_tmp, src, list) {
- if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
found = true;
break;
}
@@ -797,7 +865,7 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
list_for_each_entry(src_tmp, src, list) {
found = false;
list_for_each_entry(dst_tmp, dst, list) {
- if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
dst_tmp->action = MCLIST_NONE;
found = true;
break;
@@ -1044,6 +1112,12 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
if (err)
en_err(priv, "Fail to detach multicast address\n");
+ if (mclist->tunnel_reg_id) {
+ err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
+ if (err)
+ en_err(priv, "Failed to detach multicast address\n");
+ }
+
/* remove from list */
list_del(&mclist->list);
kfree(mclist);
@@ -1061,6 +1135,10 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
if (err)
en_err(priv, "Fail to attach multicast address\n");
+ err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
+ &mclist->tunnel_reg_id);
+ if (err)
+ en_err(priv, "Failed to attach multicast address\n");
}
}
}
@@ -1598,6 +1676,15 @@ int mlx4_en_start_port(struct net_device *dev)
goto tx_err;
}
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ if (err) {
+ en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
+ err);
+ goto tx_err;
+ }
+ }
+
/* Init port */
en_dbg(HW, priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
@@ -1713,6 +1800,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
mc_list, MLX4_PROT_ETH, mclist->reg_id);
+ if (mclist->tunnel_reg_id)
+ mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
}
mlx4_en_clear_list(dev);
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
@@ -1910,8 +1999,10 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
prof->tx_ring_size, i, TX, node))
goto err;
- if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
- prof->tx_ring_size, TXBB_SIZE, node))
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
+ priv->base_tx_qpn + i,
+ prof->tx_ring_size, TXBB_SIZE,
+ node, i))
goto err;
}
@@ -2025,7 +2116,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -2084,11 +2175,21 @@ static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
+static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
+ sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
+}
+
static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
- return mlx4_en_hwtstamp_ioctl(dev, ifr);
+ return mlx4_en_hwtstamp_set(dev, ifr);
+ case SIOCGHWTSTAMP:
+ return mlx4_en_hwtstamp_get(dev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -2154,6 +2255,27 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
}
+
+#define PORT_ID_BYTE_LEN 8
+static int mlx4_en_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int i;
+ u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
+
+ if (!phys_port_id)
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(phys_port_id);
+ for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
+ ppid->id[i] = phys_port_id & 0xff;
+ phys_port_id >>= 8;
+ }
+ return 0;
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -2179,6 +2301,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2207,6 +2330,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -2365,6 +2489,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
@@ -2394,6 +2525,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
goto out;
}
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ if (err) {
+ en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
+ err);
+ goto out;
+ }
+ }
+
/* Init port */
en_warn(priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index d3f508697a3d..f1a5500ff72d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -68,6 +68,12 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
context->param3 |= cpu_to_be32(1 << 30);
+
+ if (!is_tx && !rss &&
+ (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) {
+ en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn);
+ context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */
+ }
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 07a1d0fbae47..890922c1c8ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -631,6 +631,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int ip_summed;
int factor = priv->cqe_factor;
u64 timestamp;
+ bool l2_tunnel;
if (!priv->port_up)
return 0;
@@ -709,6 +710,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
length -= ring->fcs_del;
ring->bytes += length;
ring->packets++;
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
if (likely(dev->features & NETIF_F_RXCSUM)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
@@ -721,7 +724,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* - not an IP fragment
* - no LLS polling in progress
*/
- if (!mlx4_en_cq_ll_polling(cq) &&
+ if (!mlx4_en_cq_busy_polling(cq) &&
(dev->features & NETIF_F_GRO)) {
struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
if (!gro_skb)
@@ -738,6 +741,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->data_len = length;
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (l2_tunnel)
+ gro_skb->encapsulation = 1;
if ((cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
@@ -747,7 +752,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
}
if (dev->features & NETIF_F_RXHASH)
- gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+ skb_set_hash(gro_skb,
+ be32_to_cpu(cqe->immed_rss_invalid),
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
@@ -788,8 +795,13 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
+ if (l2_tunnel)
+ skb->encapsulation = 1;
+
if (dev->features & NETIF_F_RXHASH)
- skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+ skb_set_hash(skb,
+ be32_to_cpu(cqe->immed_rss_invalid),
+ PKT_HASH_TYPE_L3);
if ((be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_VLAN_PRESENT_MASK) &&
@@ -804,8 +816,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb_mark_napi_id(skb, &cq->napi);
- /* Push it up the stack */
- netif_receive_skb(skb);
+ if (!mlx4_en_cq_busy_polling(cq))
+ napi_gro_receive(&cq->napi, skb);
+ else
+ netif_receive_skb(skb);
next:
for (nr = 0; nr < priv->num_frags; nr++)
@@ -1053,6 +1067,12 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
rss_context->base_qpn_udp = rss_context->default_qpn;
}
+
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
+ rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
+ }
+
rss_context->flags = rss_mask;
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
for (i = 0; i < 10; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a7fcd593b2db..13457032d15f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -39,6 +39,7 @@
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/tcp.h>
+#include <linux/ip.h>
#include <linux/moduleparam.h>
#include "mlx4_en.h"
@@ -55,7 +56,7 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, int qpn, u32 size,
- u16 stride, int node)
+ u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring;
@@ -140,6 +141,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->bf_enabled = true;
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+ ring->queue_index = queue_index;
+
+ if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
+ cpumask_set_cpu(queue_index, &ring->affinity_mask);
*pring = ring;
return 0;
@@ -206,6 +211,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
+ if (!user_prio && cpu_online(ring->queue_index))
+ netif_set_xps_queue(priv->dev, &ring->affinity_mask,
+ ring->queue_index);
return err;
}
@@ -317,7 +325,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
}
}
}
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
return tx_info->nr_txbb;
}
@@ -354,7 +362,9 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+static int mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq,
+ int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +382,10 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
u32 bytes = 0;
int factor = priv->cqe_factor;
u64 timestamp = 0;
+ int done = 0;
if (!priv->port_up)
- return;
+ return 0;
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
@@ -383,7 +394,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
- cons_index & size)) {
+ cons_index & size) && (done < budget)) {
/*
* make sure we read the CQE after we read the
* ownership bit
@@ -421,7 +432,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
txbbs_stamp = txbbs_skipped;
packets++;
bytes += ring->tx_info[ring_index].nr_bytes;
- } while (ring_index != new_index);
+ } while ((++done < budget) && (ring_index != new_index));
++cons_index;
index = cons_index & size_mask;
@@ -447,6 +458,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
netif_tx_wake_queue(ring->tx_queue);
priv->port_stats.wake_queue++;
}
+ return done;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -454,10 +466,31 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
- mlx4_en_process_tx_cq(cq->dev, cq);
- mlx4_en_arm_cq(priv, cq);
+ if (priv->port_up)
+ napi_schedule(&cq->napi);
+ else
+ mlx4_en_arm_cq(priv, cq);
}
+/* TX CQ polling - called by NAPI */
+int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
+{
+ struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
+ struct net_device *dev = cq->dev;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int done;
+
+ done = mlx4_en_process_tx_cq(dev, cq, budget);
+
+ /* If we used up all the quota - we're probably not done yet... */
+ if (done < budget) {
+ /* Done for now */
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+ return done;
+ }
+ return budget;
+}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
@@ -528,7 +561,10 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
int real_size;
if (skb_is_gso(skb)) {
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb->encapsulation)
+ *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
+ else
+ *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
ALIGN(*lso_header_size + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
@@ -593,7 +629,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
@@ -605,7 +641,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
if (vlan_tx_tag_present(skb))
up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
- return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up;
+ return fallback(dev, skb) % rings_p_up + up * rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
@@ -828,6 +864,14 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->inl = 1;
}
+ if (skb->encapsulation) {
+ struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
+ if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
+ op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
+ else
+ op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
+ }
+
ring->prod += nr_txbb;
/* If we used a bounce buffer then copy descriptor back into place */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index c9cdb2a2c596..8992b38578d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -963,7 +962,7 @@ err_out_free_mtt:
mlx4_mtt_cleanup(dev, &eq->mtt);
err_out_free_eq:
- mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
err_out_free_pages:
for (i = 0; i < npages; ++i)
@@ -1018,7 +1017,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
eq->page_list[i].map);
kfree(eq->page_list);
- mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
mlx4_free_cmd_mailbox(dev, mailbox);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 194928214606..7e2995ecea6f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -129,12 +129,14 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
[2] = "RSS XOR Hash Function support",
- [3] = "Device manage flow steering support",
+ [3] = "Device managed flow steering support",
[4] = "Automatic MAC reassignment support",
[5] = "Time stamping support",
[6] = "VST (control vlan insertion/stripping) support",
[7] = "FSM (MAC anti-spoofing) support",
- [8] = "Dynamic QP updates support"
+ [8] = "Dynamic QP updates support",
+ [9] = "Device managed flow steering IPoIB support",
+ [10] = "TCP/IP offloads/flow-steering for VXLAN support"
};
int i;
@@ -207,25 +209,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
-#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
-#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
+#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
#define QUERY_FUNC_CAP_QP0_PROXY 0x14
#define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
#define QUERY_FUNC_CAP_QP1_PROXY 0x1c
+#define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
-#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
-#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
+#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
+#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
+#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
-#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
+#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
if (vhcr->op_modifier == 1) {
- field = 0;
- /* ensure force vlan and force mac bits are not set */
- MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
- /* ensure that phy_wqe_gid bit is not set */
- MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+ /* Set nic_info bit to mark new fields support */
+ field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
field = vhcr->in_modifier; /* phys-port = logical-port */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
@@ -243,6 +245,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size += 2;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
+ MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
+ QUERY_FUNC_CAP_PHYS_PORT_ID);
+
} else if (vhcr->op_modifier == 0) {
/* enable rdma and ethernet interfaces, and new quota locations */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
@@ -391,22 +396,22 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out;
}
+ MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
- if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) {
mlx4_err(dev, "VLAN is enforced on this port\n");
err = -EPROTONOSUPPORT;
goto out;
}
- if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
mlx4_err(dev, "Force mac is enabled on this port\n");
err = -EPROTONOSUPPORT;
goto out;
}
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
- if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
mlx4_err(dev, "phy_wqe_gid is "
"enforced on this ib port\n");
err = -EPROTONOSUPPORT;
@@ -433,6 +438,10 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
+ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
+ MLX4_GET(func_cap->phys_port_id, outbox,
+ QUERY_FUNC_CAP_PHYS_PORT_ID);
+
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
@@ -513,6 +522,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
+#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
@@ -529,6 +539,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
+#define QUERY_DEV_CAP_VXLAN 0x9e
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -603,6 +614,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
dev_cap->fs_max_num_qp_per_entry = field;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
@@ -694,6 +708,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+ if (field & 1<<3)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -842,13 +859,18 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
+ /* For guests, disable vxlan tunneling */
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
+ field &= 0xf7;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
+
/* For guests, report Blueflame disabled */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
/* For guests, disable mw type 2 */
- MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
@@ -860,6 +882,12 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, field,
QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
}
+
+ /* turn off ipoib managed steering for guests */
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+ field &= ~0x80;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+
return 0;
}
@@ -1267,6 +1295,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_IN_SIZE 0x200
#define INIT_HCA_VERSION_OFFSET 0x000
#define INIT_HCA_VERSION 2
+#define INIT_HCA_VXLAN_OFFSET 0x0c
#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
#define INIT_HCA_FLAGS_OFFSET 0x014
#define INIT_HCA_QPC_OFFSET 0x020
@@ -1425,6 +1454,12 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ /* set parser VXLAN attributes */
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
+ u8 parser_params = 0;
+ MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
+ }
+
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
MLX4_CMD_NATIVE);
@@ -1713,6 +1748,43 @@ int mlx4_NOP(struct mlx4_dev *dev)
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
}
+int mlx4_get_phys_port_id(struct mlx4_dev *dev)
+{
+ u8 port;
+ u32 *outbox;
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ u32 guid_hi, guid_lo;
+ int err, ret = 0;
+#define MOD_STAT_CFG_PORT_OFFSET 8
+#define MOD_STAT_CFG_GUID_H 0X14
+#define MOD_STAT_CFG_GUID_L 0X1c
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
+ MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Fail to get port %d uplink guid\n",
+ port);
+ ret = err;
+ } else {
+ MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
+ MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
+ dev->caps.phys_port_id[port] = (u64)guid_lo |
+ (u64)guid_hi << 32;
+ }
+ }
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return ret;
+}
+
#define MLX4_WOL_SETUP_MODE (5 << 28)
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index a0a368b7c939..6811ee00ba7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -140,6 +140,8 @@ struct mlx4_func_cap {
u32 qp1_proxy_qpn;
u8 physical_port;
u8 port_flags;
+ u8 flags1;
+ u64 phys_port_id;
};
struct mlx4_adapter {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 01fc6515384d..d413e60071d4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -96,10 +96,10 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" To activate device managed"
" flow steering when available, set to -1");
-static bool enable_64b_cqe_eqe;
+static bool enable_64b_cqe_eqe = true;
module_param(enable_64b_cqe_eqe, bool, 0444);
MODULE_PARM_DESC(enable_64b_cqe_eqe,
- "Enable 64 byte CQEs/EQEs when the FW supports this");
+ "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
#define HCA_GLOBAL_CAP_MASK 0
@@ -150,6 +150,8 @@ struct mlx4_port_config {
struct pci_dev *pdev;
};
+static atomic_t pf_loading = ATOMIC_INIT(0);
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -388,6 +390,84 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
+
+static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ u32 lnkcap1, lnkcap2;
+ int err1, err2;
+
+#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1);
+ err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2);
+ if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ *speed = PCIE_SPEED_8_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ if (!err1) {
+ *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
+ if (!lnkcap2) { /* pre-r3.0 */
+ if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
+ *speed = PCIE_SPEED_5_0GT;
+ else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
+ *speed = PCIE_SPEED_2_5GT;
+ }
+ }
+
+ if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
+ return err1 ? err1 :
+ err2 ? err2 : -EINVAL;
+ }
+ return 0;
+}
+
+static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
+{
+ enum pcie_link_width width, width_cap;
+ enum pci_bus_speed speed, speed_cap;
+ int err;
+
+#define PCIE_SPEED_STR(speed) \
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
+ "Unknown")
+
+ err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
+ if (err) {
+ mlx4_warn(dev,
+ "Unable to determine PCIe device BW capabilities\n");
+ return;
+ }
+
+ err = pcie_get_minimum_link(dev->pdev, &speed, &width);
+ if (err || speed == PCI_SPEED_UNKNOWN ||
+ width == PCIE_LNK_WIDTH_UNKNOWN) {
+ mlx4_warn(dev,
+ "Unable to determine PCI device chain minimum BW\n");
+ return;
+ }
+
+ if (width != width_cap || speed != speed_cap)
+ mlx4_warn(dev,
+ "PCIe BW is different than device's capability\n");
+
+ mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
+ PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
+ mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
+ width, width_cap);
+ return;
+}
+
/*The function checks if there are live vf, return the num of them*/
static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
{
@@ -606,6 +686,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
dev->caps.port_mask[i] = dev->caps.port_type[i];
+ dev->caps.phys_port_id[i] = func_cap.phys_port_id;
if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
&dev->caps.gid_table_len[i],
&dev->caps.pkey_table_len[i]))
@@ -670,7 +751,7 @@ static void mlx4_request_modules(struct mlx4_dev *dev)
has_eth_port = true;
}
- if (has_ib_port)
+ if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
request_module_nowait(IB_DRV_NAME);
if (has_eth_port)
request_module_nowait(EN_DRV_NAME);
@@ -1328,6 +1409,11 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
u32 slave_read;
u32 cmd_channel_ver;
+ if (atomic_read(&pf_loading)) {
+ mlx4_warn(dev, "PF is not ready. Deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+
mutex_lock(&priv->cmd.slave_cmd_mutex);
priv->cmd.max_cmds = 1;
mlx4_warn(dev, "Sending reset\n");
@@ -1443,6 +1529,19 @@ static void choose_steering_mode(struct mlx4_dev *dev,
mlx4_log_num_mgm_entry_size);
}
+static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap)
+{
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
+ dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
+ else
+ dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
+
+ mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
+ == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
+}
+
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1483,6 +1582,11 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
}
choose_steering_mode(dev, &dev_cap);
+ choose_tunnel_offload_mode(dev, &dev_cap);
+
+ err = mlx4_get_phys_port_id(dev);
+ if (err)
+ mlx4_err(dev, "Fail to get physical port id\n");
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
@@ -1654,7 +1758,7 @@ EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
+ mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
return;
}
@@ -2222,7 +2326,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (num_vfs) {
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
+
+ atomic_inc(&pf_loading);
err = pci_enable_sriov(pdev, num_vfs);
+ atomic_dec(&pf_loading);
+
if (err) {
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
err);
@@ -2287,6 +2395,12 @@ slave_start:
goto err_mfunc;
}
+ /* check if the device is functioning at its maximum possible speed.
+ * No return code for this call, just warn the user in case of PCI
+ * express device capabilities are under-satisfied by the bus.
+ */
+ mlx4_check_pcie_caps(dev);
+
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
@@ -2567,7 +2681,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
- int ret = __mlx4_init_one(pdev, 0);
+ const struct pci_device_id *id;
+ int ret;
+
+ id = pci_match_id(mlx4_pci_table, pdev);
+ ret = __mlx4_init_one(pdev, id->driver_data);
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
@@ -2581,6 +2699,7 @@ static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
+ .shutdown = mlx4_remove_one,
.remove = mlx4_remove_one,
.err_handler = &mlx4_err_handler,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index acf9d5f1f922..db7dc0b6667d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -125,9 +125,14 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
u32 qpn)
{
- struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1];
+ struct mlx4_steer *s_steer;
struct mlx4_promisc_qp *pqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
+
list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
if (pqp->qpn == qpn)
return pqp;
@@ -154,6 +159,9 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port,
u32 prot;
int err;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
@@ -238,6 +246,9 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
pqp = get_promisc_qp(dev, port, steer, qpn);
@@ -283,6 +294,9 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
/* if qp is not promisc, it cannot be duplicated */
@@ -324,6 +338,9 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
bool ret = false;
int i;
+ if (port < 1 || port > dev->caps.num_ports)
+ return NULL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -378,6 +395,9 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
@@ -484,6 +504,9 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
int loc, i;
int err;
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
@@ -674,7 +697,8 @@ const u16 __sw_id_hw[] = {
[MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
[MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
[MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
- [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
+ [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006,
+ [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008
};
int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
@@ -699,7 +723,9 @@ static const int __rule_hw_sz[] = {
[MLX4_NET_TRANS_RULE_ID_TCP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
[MLX4_NET_TRANS_RULE_ID_UDP] =
- sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+ [MLX4_NET_TRANS_RULE_ID_VXLAN] =
+ sizeof(struct mlx4_net_trans_rule_hw_vxlan)
};
int mlx4_hw_rule_sz(struct mlx4_dev *dev,
@@ -764,6 +790,13 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
break;
+ case MLX4_NET_TRANS_RULE_ID_VXLAN:
+ rule_hw->vxlan.vni =
+ cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8);
+ rule_hw->vxlan.vni_mask =
+ cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8);
+ break;
+
default:
return -EINVAL;
}
@@ -895,6 +928,23 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
+int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
+ u32 max_range_qpn)
+{
+ int err;
+ u64 in_param;
+
+ in_param = ((u64) min_range_qpn) << 32;
+ in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF;
+
+ err = mlx4_cmd(dev, in_param, 0, 0,
+ MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE);
+
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer)
@@ -996,7 +1046,7 @@ out:
index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- index - dev->caps.num_mgms);
+ index - dev->caps.num_mgms, MLX4_USE_RR);
}
mutex_unlock(&priv->mcg_table.mutex);
@@ -1087,7 +1137,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
index, amgm_index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- amgm_index - dev->caps.num_mgms);
+ amgm_index - dev->caps.num_mgms, MLX4_USE_RR);
}
} else {
/* Remove entry from AMGM */
@@ -1107,7 +1157,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
prev, index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
- index - dev->caps.num_mgms);
+ index - dev->caps.num_mgms, MLX4_USE_RR);
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e582a41a802b..7aec6c833973 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -51,8 +51,8 @@
#define DRV_NAME "mlx4_core"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "1.1"
-#define DRV_RELDATE "Dec, 2011"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb, 2014"
#define MLX4_FS_UDP_UC_EN (1 << 1)
#define MLX4_FS_TCP_UC_EN (1 << 2)
@@ -783,6 +783,11 @@ enum {
MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
};
+enum {
+ MLX4_NO_RR = 0,
+ MLX4_USE_RR = 1,
+};
+
struct mlx4_priv {
struct mlx4_dev dev;
@@ -844,9 +849,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
extern struct workqueue_struct *mlx4_wq;
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr);
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
+ int use_rr);
u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
@@ -1236,6 +1242,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d5758adceaa2..b57e8c87a34e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -45,6 +45,7 @@
#include <linux/dcbnl.h>
#endif
#include <linux/cpu_rmap.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -56,8 +57,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "2.0"
-#define DRV_RELDATE "Dec 2011"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb 2014"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -255,6 +256,8 @@ struct mlx4_en_tx_ring {
u16 poll_cnt;
struct mlx4_en_tx_info *tx_info;
u8 *bounce_buf;
+ u8 queue_index;
+ cpumask_t affinity_mask;
u32 last_nr_txbb;
struct mlx4_qp qp;
struct mlx4_qp_context context;
@@ -373,10 +376,14 @@ struct mlx4_en_dev {
u32 priv_pdn;
spinlock_t uar_lock;
u8 mac_removed[MLX4_MAX_PORTS + 1];
+ rwlock_t clock_lock;
+ u32 nominal_c_mult;
struct cyclecounter cycles;
struct timecounter clock;
unsigned long last_overflow_check;
unsigned long overflow_period;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
};
@@ -434,6 +441,7 @@ struct mlx4_en_mc_list {
enum mlx4_en_mclist_act action;
u8 addr[ETH_ALEN];
u64 reg_id;
+ u64 tunnel_reg_id;
};
struct mlx4_en_frag_info {
@@ -565,7 +573,7 @@ struct mlx4_en_priv {
struct list_head filters;
struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
#endif
-
+ u64 tunnel_reg_id;
};
enum mlx4_en_wol {
@@ -653,7 +661,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
}
/* true if a socket is polling, even if it did not get the lock */
-static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
{
WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
return cq->state & CQ_USER_PEND;
@@ -683,7 +691,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
return false;
}
-static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
{
return false;
}
@@ -715,12 +723,13 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv);
+ void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
- int qpn, u32 size, u16 stride, int node);
+ int qpn, u32 size, u16 stride,
+ int node, int queue_index);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -742,6 +751,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
struct mlx4_en_cq *cq,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
struct mlx4_qp_context *context);
@@ -787,6 +797,7 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp);
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
+void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
int mlx4_en_timestamp_config(struct net_device *dev,
int tx_type,
int rx_filter);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index b3ee9bafff5e..24835853b753 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -32,7 +32,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -346,7 +345,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
}
static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 84cfb40bf451..74216071201f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io-mapping.h>
@@ -59,7 +58,7 @@ EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
+ mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_pd_free);
@@ -96,7 +95,7 @@ EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
+ mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR);
}
void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
@@ -164,7 +163,7 @@ EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
- mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
+ mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_uar_free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 97d342fa5032..a58bcbf1b806 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -123,6 +123,26 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
return err;
}
+int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int i;
+
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if (!table->refs[i])
+ continue;
+
+ if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
+
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
@@ -800,6 +820,47 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
}
EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+enum {
+ VXLAN_ENABLE_MODIFY = 1 << 7,
+ VXLAN_STEERING_MODIFY = 1 << 6,
+
+ VXLAN_ENABLE = 1 << 7,
+};
+
+struct mlx4_set_port_vxlan_context {
+ u32 reserved1;
+ u8 modify_flags;
+ u8 reserved2;
+ u8 enable_flags;
+ u8 steering;
+};
+
+int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
+{
+ int err;
+ u32 in_mod;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_vxlan_context *context;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof(*context));
+
+ context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
+ context->enable_flags = VXLAN_ENABLE;
+ context->steering = steering;
+
+ in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
+
int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2715e61dbb74..61d64ebffd56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -35,7 +35,6 @@
#include <linux/gfp.h>
#include <linux/export.h>
-#include <linux/init.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
@@ -250,7 +249,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
return;
- mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+ mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, MLX4_USE_RR);
}
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 2f3f2bc7f283..57428a0cb9dd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1340,43 +1340,29 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
- if (!r)
+ if (!r) {
err = -ENOENT;
- else if (r->com.owner != slave)
+ } else if (r->com.owner != slave) {
err = -EPERM;
- else {
- switch (state) {
- case RES_CQ_BUSY:
- err = -EBUSY;
- break;
-
- case RES_CQ_ALLOCATED:
- if (r->com.state != RES_CQ_HW)
- err = -EINVAL;
- else if (atomic_read(&r->ref_count))
- err = -EBUSY;
- else
- err = 0;
- break;
-
- case RES_CQ_HW:
- if (r->com.state != RES_CQ_ALLOCATED)
- err = -EINVAL;
- else
- err = 0;
- break;
-
- default:
+ } else if (state == RES_CQ_ALLOCATED) {
+ if (r->com.state != RES_CQ_HW)
err = -EINVAL;
- }
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ else
+ err = 0;
+ } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
+ err = -EINVAL;
+ } else {
+ err = 0;
+ }
- if (!err) {
- r->com.from_state = r->com.state;
- r->com.to_state = state;
- r->com.state = RES_CQ_BUSY;
- if (cq)
- *cq = r;
- }
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_CQ_BUSY;
+ if (cq)
+ *cq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
@@ -1385,7 +1371,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
}
static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
- enum res_cq_states state, struct res_srq **srq)
+ enum res_srq_states state, struct res_srq **srq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
@@ -1394,39 +1380,25 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
- if (!r)
+ if (!r) {
err = -ENOENT;
- else if (r->com.owner != slave)
+ } else if (r->com.owner != slave) {
err = -EPERM;
- else {
- switch (state) {
- case RES_SRQ_BUSY:
+ } else if (state == RES_SRQ_ALLOCATED) {
+ if (r->com.state != RES_SRQ_HW)
err = -EINVAL;
- break;
-
- case RES_SRQ_ALLOCATED:
- if (r->com.state != RES_SRQ_HW)
- err = -EINVAL;
- else if (atomic_read(&r->ref_count))
- err = -EBUSY;
- break;
-
- case RES_SRQ_HW:
- if (r->com.state != RES_SRQ_ALLOCATED)
- err = -EINVAL;
- break;
-
- default:
- err = -EINVAL;
- }
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
+ err = -EINVAL;
+ }
- if (!err) {
- r->com.from_state = r->com.state;
- r->com.to_state = state;
- r->com.state = RES_SRQ_BUSY;
- if (srq)
- *srq = r;
- }
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_SRQ_BUSY;
+ if (srq)
+ *srq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
@@ -3634,7 +3606,7 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
!is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
list_for_each_entry_safe(res, tmp, rlist, list) {
be_mac = cpu_to_be64(res->mac << 16);
- if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
+ if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
return 0;
}
pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
@@ -3844,6 +3816,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
+int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return -EPERM;
+}
+
+
static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
{
struct res_gid *rgid;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 8fdf23753779..98faf870b0b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/srq.h>
@@ -117,7 +116,7 @@ err_put:
mlx4_table_put(dev, &srq_table->table, *srqn);
err_out:
- mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR);
return err;
}
@@ -145,7 +144,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
mlx4_table_put(dev, &srq_table->table, srqn);
- mlx4_bitmap_free(&srq_table->bitmap, srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR);
}
static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 157fe8df2c3e..8ff57e8e3e91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,5 +4,5 @@
config MLX5_CORE
tristate
- depends on PCI && X86
+ depends on PCI
default n
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 8675d26a678b..405c4fbcd0ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -32,7 +32,6 @@
#include <asm-generic/kmap_types.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index c2d660be6f76..43c5f4809526 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -201,10 +201,23 @@ EXPORT_SYMBOL(mlx5_core_query_cq);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- int type, struct mlx5_cq_modify_params *params)
+ struct mlx5_modify_cq_mbox_in *in, int in_sz)
{
- return -ENOSYS;
+ struct mlx5_modify_cq_mbox_out out;
+ int err;
+
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
+ err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
}
+EXPORT_SYMBOL(mlx5_core_modify_cq);
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 80f6d127257a..10e1f1a18255 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -275,7 +275,7 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
}
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- int index)
+ int index, int *is_str)
{
struct mlx5_query_qp_mbox_out *out;
struct mlx5_qp_context *ctx;
@@ -293,19 +293,40 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
goto out;
}
+ *is_str = 0;
ctx = &out->ctx;
switch (index) {
case QP_PID:
param = qp->pid;
break;
case QP_STATE:
- param = be32_to_cpu(ctx->flags) >> 28;
+ param = (u64)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
+ *is_str = 1;
break;
case QP_XPORT:
- param = (be32_to_cpu(ctx->flags) >> 16) & 0xff;
+ param = (u64)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
+ *is_str = 1;
break;
case QP_MTU:
- param = ctx->mtu_msgmax >> 5;
+ switch (ctx->mtu_msgmax >> 5) {
+ case IB_MTU_256:
+ param = 256;
+ break;
+ case IB_MTU_512:
+ param = 512;
+ break;
+ case IB_MTU_1024:
+ param = 1024;
+ break;
+ case IB_MTU_2048:
+ param = 2048;
+ break;
+ case IB_MTU_4096:
+ param = 4096;
+ break;
+ default:
+ param = 0;
+ }
break;
case QP_N_RECV:
param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
@@ -414,6 +435,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
struct mlx5_field_desc *desc;
struct mlx5_rsc_debug *d;
char tbuf[18];
+ int is_str = 0;
u64 field;
int ret;
@@ -424,7 +446,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
d = (void *)(desc - desc->i) - sizeof(*d);
switch (d->type) {
case MLX5_DBG_RSC_QP:
- field = qp_read_field(d->dev, d->object, desc->i);
+ field = qp_read_field(d->dev, d->object, desc->i, &is_str);
break;
case MLX5_DBG_RSC_EQ:
@@ -440,7 +462,12 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
return -EINVAL;
}
- ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
+
+ if (is_str)
+ ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)field);
+ else
+ ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
+
if (ret > 0) {
if (copy_to_user(buf, tbuf, ret))
return -EFAULT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 40a9f5ed814d..23b7e2d35a93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -46,8 +46,8 @@
#include "mlx5_core.h"
#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "1.0"
-#define DRIVER_RELDATE "June 2013"
+#define DRIVER_VERSION "2.2-1"
+#define DRIVER_RELDATE "Feb 2014"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
@@ -460,7 +460,10 @@ disable_msix:
err_stop_poll:
mlx5_stop_health_poll(dev);
- mlx5_cmd_teardown_hca(dev);
+ if (mlx5_cmd_teardown_hca(dev)) {
+ dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
+ return err;
+ }
err_pagealloc_stop:
mlx5_pagealloc_stop(dev);
@@ -503,7 +506,10 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_eq_cleanup(dev);
mlx5_disable_msix(dev);
mlx5_stop_health_poll(dev);
- mlx5_cmd_teardown_hca(dev);
+ if (mlx5_cmd_teardown_hca(dev)) {
+ dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
+ return;
+ }
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 37b6ad1f9a1b..d59790a82bc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -99,7 +99,7 @@ enum {
enum {
MLX5_MAX_RECLAIM_TIME_MILI = 5000,
- MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / 4096,
+ MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
@@ -192,10 +192,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
struct fw_page *fp;
unsigned n;
- if (list_empty(&dev->priv.free_list)) {
+ if (list_empty(&dev->priv.free_list))
return -ENOMEM;
- mlx5_core_warn(dev, "\n");
- }
fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
@@ -208,7 +206,7 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
if (!fp->free_count)
list_del(&fp->list);
- *addr = fp->addr + n * 4096;
+ *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
return 0;
}
@@ -224,14 +222,15 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
return;
}
- n = (addr & ~PAGE_MASK) % 4096;
+ n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
fwp->free_count++;
set_bit(n, &fwp->bitmask);
if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
rb_erase(&fwp->rb_node, &dev->priv.page_root);
if (fwp->free_count != 1)
list_del(&fwp->list);
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
} else if (fwp->free_count == 1) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index f6afe7b5a675..8c9ac870ecb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -57,7 +57,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
in->arg = cpu_to_be32(arg);
in->register_id = cpu_to_be16(reg_num);
err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
- sizeof(out) + size_out);
+ sizeof(*out) + size_out);
if (err)
goto ex2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 54faf8bfcaf4..510576213dd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -74,7 +74,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_destroy_qp_mbox_out dout;
int err;
- memset(&dout, 0, sizeof(dout));
+ memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
@@ -84,7 +84,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
}
if (out.hdr.status) {
- pr_warn("current num of QPs 0x%x\n", atomic_read(&dev->num_qps));
+ mlx5_core_warn(dev, "current num of QPs 0x%x\n",
+ atomic_read(&dev->num_qps));
return mlx5_cmd_status_to_err(&out.hdr);
}
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 106eb972f2ac..16435b3cfa9f 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 727b546a9eb8..e0c92e0e5e1d 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -23,6 +23,7 @@
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -83,6 +84,7 @@ union ks8851_tx_hdr {
* @rc_rxqcr: Cached copy of KS_RXQCR.
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
+ * @vdd_reg: Optional regulator supplying the chip
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -130,6 +132,7 @@ struct ks8851_net {
struct spi_transfer spi_xfer2[2];
struct eeprom_93cx6 eeprom;
+ struct regulator *vdd_reg;
};
static int msg_enable;
@@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi)
ks->spidev = spi;
ks->tx_space = 6144;
+ ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
+ if (IS_ERR(ks->vdd_reg)) {
+ ret = PTR_ERR(ks->vdd_reg);
+ if (ret == -EPROBE_DEFER)
+ goto err_reg;
+ } else {
+ ret = regulator_enable(ks->vdd_reg);
+ if (ret) {
+ dev_err(&spi->dev, "regulator enable fail: %d\n",
+ ret);
+ goto err_reg_en;
+ }
+ }
+
+
mutex_init(&ks->lock);
spin_lock_init(&ks->statelock);
@@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi)
err_netdev:
free_irq(ndev->irq, ks);
-err_id:
err_irq:
+err_id:
+ if (!IS_ERR(ks->vdd_reg))
+ regulator_disable(ks->vdd_reg);
+err_reg_en:
+ if (!IS_ERR(ks->vdd_reg))
+ regulator_put(ks->vdd_reg);
+err_reg:
free_netdev(ndev);
return ret;
}
@@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi)
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
+ if (!IS_ERR(priv->vdd_reg)) {
+ regulator_disable(priv->vdd_reg);
+ regulator_put(priv->vdd_reg);
+ }
free_netdev(priv->netdev);
return 0;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ddd252a3da9c..ce84dc289c8f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4128,10 +4128,10 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
int j = ADDITIONAL_ENTRIES;
- if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
+ if (ether_addr_equal(hw->override_addr, mac_addr))
return 0;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
+ if (ether_addr_equal(hw->address[i], mac_addr))
return 0;
if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
j = i;
@@ -4149,7 +4149,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+ if (ether_addr_equal(hw->address[i], mac_addr)) {
memset(hw->address[i], 0, ETH_ALEN);
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
@@ -5853,15 +5853,12 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
- int rc;
int result = 0;
struct mii_ioctl_data *data = if_mii(ifr);
if (down_interruptible(&priv->proc_sem))
return -ERESTARTSYS;
- /* assume success */
- rc = 0;
switch (cmd) {
/* Get address of MII PHY in use. */
case SIOCGMIIPHY:
@@ -7104,8 +7101,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
ETH_ALEN);
else {
memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
- if (!memcmp(sw->other_addr, hw->override_addr,
- ETH_ALEN))
+ if (ether_addr_equal(sw->other_addr, hw->override_addr))
dev->dev_addr[5] += port->first_port;
}
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index cbd013379252..5020fd47825d 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -13,7 +13,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 79257f71c5d9..a5512a97cc4d 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -24,7 +24,6 @@
#include <linux/fcntl.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 04b3ec1352f1..9e4ddbba7036 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -37,7 +37,6 @@
#include <linux/fcntl.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index d3b47003a575..dbccf1de49ec 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -22,8 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* ChangeLog
@@ -2236,7 +2235,6 @@ out_disable:
pci_disable_device(pci_dev);
out_free:
free_netdev(ndev);
- pci_set_drvdata(pci_dev, NULL);
out:
return err;
}
@@ -2260,7 +2258,6 @@ static void ns83820_remove_one(struct pci_dev *pci_dev)
dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(dev->pci_dev);
free_netdev(ndev);
- pci_set_drvdata(pci_dev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index fbe5363cb89c..089b713b9f7b 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2148,7 +2148,7 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
* __vxge_hw_ring_replenish - Initial replenish of RxDs
* This function replenishes the RxDs from reserve array to work array
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
{
void *rxd;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index f9876ea8c8bf..e46e8698e630 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -87,6 +87,7 @@ static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
@@ -507,7 +508,8 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
* if rss is disabled/enabled, so key off of that.
*/
if (ext_info.rth_value)
- skb->rxhash = ext_info.rth_value;
+ skb_set_hash(skb, ext_info.rth_value,
+ PKT_HASH_TYPE_L3);
vxge_rx_complete(ring, skb, ext_info.vlan,
pkt_length, &ext_info);
@@ -724,9 +726,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
int vpath_idx = 0;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath = NULL;
- struct __vxge_hw_device *hldev;
-
- hldev = pci_get_drvdata(vdev->pdev);
mac_address = (u8 *)&mac_addr;
memcpy(mac_address, mac_header, ETH_ALEN);
@@ -1429,7 +1428,7 @@ vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
return status;
}
- while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
+ while (!ether_addr_equal(mac->macaddr, macaddr)) {
status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
macaddr, macmask);
if (status != VXGE_HW_OK)
@@ -1970,7 +1969,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
}
/* reset vpaths */
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -2441,9 +2440,6 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
static void vxge_rem_isr(struct vxgedev *vdev)
{
- struct __vxge_hw_device *hldev;
- hldev = pci_get_drvdata(vdev->pdev);
-
#ifdef CONFIG_PCI_MSI
if (vdev->config.intr_type == MSI_X) {
vxge_rem_msix_isr(vdev);
@@ -3189,7 +3185,7 @@ static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
return status;
}
-static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
{
struct hwtstamp_config config;
int i;
@@ -3250,6 +3246,21 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
return 0;
}
+static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
+{
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = HWTSTAMP_TX_OFF;
+ config.rx_filter = (vdev->rx_hwts ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ if (copy_to_user(data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
/**
* vxge_ioctl
* @dev: Device pointer.
@@ -3263,19 +3274,15 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct vxgedev *vdev = netdev_priv(dev);
- int ret;
switch (cmd) {
case SIOCSHWTSTAMP:
- ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
- if (ret)
- return ret;
- break;
+ return vxge_hwtstamp_set(vdev, rq->ifr_data);
+ case SIOCGHWTSTAMP:
+ return vxge_hwtstamp_get(vdev, rq->ifr_data);
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
/**
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 36ca40f8f249..3a79d93b8445 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -427,7 +427,6 @@ void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data),
}
void vxge_initialize_ethtool_ops(struct net_device *ndev);
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
/* #define VXGE_DEBUG_INIT: debug for initialization functions
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 99749bd07d72..9e1aaa7f36bb 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1956,8 +1956,7 @@ exit:
* @vid: vlan id to be added for this vpath into the list
*
* Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
+ * see also: vxge_hw_vpath_vid_delete
*
*/
enum vxge_hw_status
@@ -1979,45 +1978,13 @@ exit:
}
/**
- * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
- * from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the first vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
- u64 data;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, vid, &data);
-
- *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
- return status;
-}
-
-/**
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle.
* @vid: vlan id to be added for this vpath into the list
*
* Adds the given vlan id into the list for this vpath.
- * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
+ * see also: vxge_hw_vpath_vid_add
*
*/
enum vxge_hw_status
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
index 4a518a3b131c..ba6f833bb059 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
@@ -1918,9 +1918,6 @@ vxge_hw_ring_rxd_post_post(
struct __vxge_hw_ring *ring_handle,
void *rxdh);
-enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
-
void
vxge_hw_ring_rxd_post_post_wmb(
struct __vxge_hw_ring *ring_handle,
@@ -2186,11 +2183,6 @@ vxge_hw_vpath_vid_add(
u64 vid);
enum vxge_hw_status
-vxge_hw_vpath_vid_get(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *vid);
-
-enum vxge_hw_status
vxge_hw_vpath_vid_delete(
struct __vxge_hw_vpath_handle *vpath_handle,
u64 vid);
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index e6f0a4366f90..31eb911e4763 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1e8b9514718b..70cf97fe67f2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -26,8 +26,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -59,7 +58,6 @@
#include <linux/skbuff.h>
#include <linux/mii.h>
#include <linux/random.h>
-#include <linux/init.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -6020,7 +6018,6 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
out_error:
if (phystate_orig)
writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
- pci_set_drvdata(pci_dev, NULL);
out_freering:
free_rings(dev);
out_unmap:
@@ -6091,7 +6088,6 @@ static void nv_remove(struct pci_dev *pci_dev)
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
free_netdev(dev);
- pci_set_drvdata(pci_dev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index ba3ca18611f7..422d9b51ac24 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -19,7 +19,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 2a9003071d51..2a55d6d53ee6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_H_
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index ff3ad70935a6..51250363566b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
#include "pch_gbe_phy.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
index 94aaac5b057b..91ce07c8306c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_API_H_
#define _PCH_GBE_API_H_
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index f0ceb89af931..826f0ccdc23c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
#include "pch_gbe_api.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 27ffe0ebf0a6..464e91058c81 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index cf7c9b3a255b..08d4be616064 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index 8b7ff75fc8e0..a5cad5ea9436 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 0cbe69206e04..95ad0151ad02 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _PCH_GBE_PHY_H_
#define _PCH_GBE_PHY_H_
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 07a890eb72ad..9a6cb482dcd0 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1053,7 +1053,7 @@ static int yellowfin_rx(struct net_device *dev)
struct sk_buff *rx_skb = yp->rx_skbuff[entry];
s16 frame_status;
u16 desc_status;
- int data_size;
+ int data_size, yf_size;
u8 *buf_addr;
if(!desc->result_status)
@@ -1070,6 +1070,9 @@ static int yellowfin_rx(struct net_device *dev)
__func__, frame_status);
if (--boguscnt < 0)
break;
+
+ yf_size = sizeof(struct yellowfin_desc);
+
if ( ! (desc_status & RX_EOP)) {
if (data_size != 0)
netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
@@ -1096,12 +1099,12 @@ static int yellowfin_rx(struct net_device *dev)
if (status2 & 0x80) dev->stats.rx_dropped++;
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
} else if ((yp->flags & HasMACAddrBug) &&
- memcmp(le32_to_cpu(yp->rx_ring_dma +
- entry*sizeof(struct yellowfin_desc)),
- dev->dev_addr, 6) != 0 &&
- memcmp(le32_to_cpu(yp->rx_ring_dma +
- entry*sizeof(struct yellowfin_desc)),
- "\377\377\377\377\377\377", 6) != 0) {
+ !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
+ entry * yf_size),
+ dev->dev_addr) &&
+ !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
+ entry * yf_size),
+ "\377\377\377\377\377\377")) {
if (bogus_rx++ == 0)
netdev_warn(dev, "Bad frame to %pM\n",
buf_addr);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index dbaa49e58b0c..9abf70d74b31 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -13,11 +13,9 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index f2749d46c125..a5807703ab96 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef PASEMI_MAC_H
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index 4825959a0efe..25fae568261f 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile
index 861a0590b1f4..e14e60c88381 100644
--- a/drivers/net/ethernet/qlogic/netxen/Makefile
+++ b/drivers/net/ethernet/qlogic/netxen/Makefile
@@ -13,9 +13,7 @@
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
-# MA 02111-1307, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution
# in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 9adcdbb49476..6e426ae94692 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 1bcaf45aa864..6f6be57f4690 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 4ca2c196c98a..87e073c6e291 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 0c64c82b9acf..a310c2f6502a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 67efe754367d..db4280ce9c09 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -663,7 +661,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
list_for_each(head, del_list) {
cur = list_entry(head, nx_mac_list_t, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(addr, cur->mac_addr)) {
list_move_tail(head, &adapter->mac_list);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
index e2c5b6f2df03..7433c4d21601 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index cc68657f0536..32058614151a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 3bec8cfebf99..70849dea32b1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 0758b9435358..2eabd44f8914 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -8,7 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f2a7c7166e24..f19f81cde134 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 52
-#define QLCNIC_LINUX_VERSIONID "5.3.52"
+#define _QLCNIC_LINUX_SUBVERSION 55
+#define QLCNIC_LINUX_VERSIONID "5.3.55"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -105,6 +105,8 @@
#define QLCNIC_DEF_TX_RINGS 4
#define QLCNIC_MAX_VNIC_TX_RINGS 4
#define QLCNIC_MAX_VNIC_SDS_RINGS 4
+#define QLCNIC_83XX_MINIMUM_VECTOR 3
+#define QLCNIC_82XX_MINIMUM_VECTOR 2
enum qlcnic_queue_type {
QLCNIC_TX_QUEUE = 1,
@@ -115,6 +117,10 @@ enum qlcnic_queue_type {
#define QLCNIC_VNIC_MODE 0xFF
#define QLCNIC_DEFAULT_MODE 0x0
+/* Virtual NIC function count */
+#define QLC_DEFAULT_VNIC_COUNT 8
+#define QLC_84XX_VNIC_COUNT 16
+
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -365,6 +371,7 @@ struct qlcnic_rx_buffer {
*/
#define QLCNIC_INTR_COAL_TYPE_RX 1
#define QLCNIC_INTR_COAL_TYPE_TX 2
+#define QLCNIC_INTR_COAL_TYPE_RX_TX 3
#define QLCNIC_DEF_INTR_COALESCE_RX_TIME_US 3
#define QLCNIC_DEF_INTR_COALESCE_RX_PACKETS 256
@@ -374,7 +381,7 @@ struct qlcnic_rx_buffer {
#define QLCNIC_INTR_DEFAULT 0x04
#define QLCNIC_CONFIG_INTR_COALESCE 3
-#define QLCNIC_DEV_INFO_SIZE 1
+#define QLCNIC_DEV_INFO_SIZE 2
struct qlcnic_nic_intr_coalesce {
u8 type;
@@ -462,8 +469,10 @@ struct qlcnic_hardware_context {
u16 max_rx_ques;
u16 max_mtu;
u32 msg_enable;
- u16 act_pci_func;
+ u16 total_nic_func;
u16 max_pci_func;
+ u32 max_vnic_func;
+ u32 total_pci_func;
u32 capabilities;
u32 extra_capability[3];
@@ -791,9 +800,10 @@ struct qlcnic_cardrsp_tx_ctx {
#define QLCNIC_MAC_VLAN_ADD 3
#define QLCNIC_MAC_VLAN_DEL 4
-struct qlcnic_mac_list_s {
+struct qlcnic_mac_vlan_list {
struct list_head list;
uint8_t mac_addr[ETH_ALEN+2];
+ u16 vlan_id;
};
/* MAC Learn */
@@ -860,7 +870,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
-#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_8
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -954,6 +964,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_TX_INTR_SHARED 0x10000
#define QLCNIC_APP_CHANGED_FLAGS 0x20000
#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
+#define QLCNIC_TSS_RSS 0x80000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -963,6 +974,9 @@ struct qlcnic_ipaddr {
#define QLCNIC_BEACON_EANBLE 0xC
#define QLCNIC_BEACON_DISABLE 0xD
+#define QLCNIC_BEACON_ON 2
+#define QLCNIC_BEACON_OFF 0
+
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
@@ -1047,6 +1061,9 @@ struct qlcnic_adapter {
u8 drv_tx_rings; /* max tx rings supported by driver */
u8 drv_sds_rings; /* max sds rings supported by driver */
+ u8 drv_tss_rings; /* tss ring input */
+ u8 drv_rss_rings; /* rss ring input */
+
u8 rx_csum;
u8 portnum;
@@ -1072,6 +1089,7 @@ struct qlcnic_adapter {
u64 dev_rst_time;
bool drv_mac_learn;
bool fdb_mac_learn;
+ bool rx_mac_learn;
unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
u8 flash_mfg_id;
struct qlcnic_npar_info *npars;
@@ -1260,7 +1278,7 @@ struct qlcnic_pci_func_cfg {
u16 port_num;
u8 pci_func;
u8 func_state;
- u8 def_mac_addr[6];
+ u8 def_mac_addr[ETH_ALEN];
};
struct qlcnic_npar_func_cfg {
@@ -1462,8 +1480,6 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
-void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
-void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
#define ADDR_IN_RANGE(addr, low, high) \
(((addr) < (high)) && ((addr) >= (low)))
@@ -1499,16 +1515,11 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
#define MAX_CTL_CHECK 1000
-int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
int qlcnic_dump_fw(struct qlcnic_adapter *);
int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
-pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
- pci_channel_state_t);
-pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
-void qlcnic_82xx_io_resume(struct pci_dev *);
/* Functions from qlcnic_init.c */
void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
@@ -1543,9 +1554,7 @@ int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
-int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
void qlcnic_set_multi(struct net_device *netdev);
-void __qlcnic_set_multi(struct net_device *, u16);
int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
@@ -1558,13 +1567,11 @@ netdev_features_t qlcnic_fix_features(struct net_device *netdev,
netdev_features_t features);
int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
-int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
/* Functions from qlcnic_ethtool.c */
int qlcnic_check_loopback_buff(unsigned char *, u8 []);
int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
-int qlcnic_loopback_test(struct net_device *, u8);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
@@ -1573,10 +1580,9 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
-int qlcnic_setup_rings(struct qlcnic_adapter *, u8, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *);
int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
-void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
void qlcnic_set_drv_version(struct qlcnic_adapter *);
@@ -1605,11 +1611,8 @@ void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
-void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
-void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
-int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
@@ -1617,7 +1620,7 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *);
void qlcnic_down(struct qlcnic_adapter *, struct net_device *);
int qlcnic_up(struct qlcnic_adapter *, struct net_device *);
void __qlcnic_down(struct qlcnic_adapter *, struct net_device *);
@@ -1632,15 +1635,15 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *);
int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
int qlcnic_reset_npar_config(struct qlcnic_adapter *);
int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
-void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16);
-int qlcnic_get_beacon_state(struct qlcnic_adapter *, u8 *);
int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
int qlcnic_read_mac_addr(struct qlcnic_adapter *);
int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
void qlcnic_sriov_vf_schedule_multi(struct net_device *);
-void qlcnic_vf_add_mc_list(struct net_device *, u16);
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
+ u16 *);
/*
* QLOGIC Board information
@@ -1674,11 +1677,8 @@ static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
if (err)
- dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
- adapter->drv_tx_rings);
- else
- dev_info(&adapter->pdev->dev, "Set %d Tx queues\n",
- adapter->drv_tx_rings);
+ netdev_err(netdev, "failed to set %d Tx queues\n",
+ adapter->drv_tx_rings);
return err;
}
@@ -1744,7 +1744,8 @@ struct qlcnic_hardware_ops {
int (*change_macvlan) (struct qlcnic_adapter *, u8*, u16, u8);
void (*napi_enable) (struct qlcnic_adapter *);
void (*napi_disable) (struct qlcnic_adapter *);
- void (*config_intr_coal) (struct qlcnic_adapter *);
+ int (*config_intr_coal) (struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
int (*config_rss) (struct qlcnic_adapter *, int);
int (*config_hw_lro) (struct qlcnic_adapter *, int);
int (*config_loopback) (struct qlcnic_adapter *, u8);
@@ -1759,6 +1760,15 @@ struct qlcnic_hardware_ops {
pci_channel_state_t);
pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
void (*io_resume) (struct pci_dev *);
+ void (*get_beacon_state)(struct qlcnic_adapter *);
+ void (*enable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*disable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*enable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ void (*disable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1931,9 +1941,10 @@ static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
adapter->ahw->hw_ops->napi_disable(adapter);
}
-static inline void qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+static inline int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
{
- adapter->ahw->hw_ops->config_intr_coal(adapter);
+ return adapter->ahw->hw_ops->config_intr_coal(adapter, ethcoal);
}
static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
@@ -1985,6 +1996,11 @@ static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
adapter->ahw->hw_ops->set_mac_filter_count(adapter);
}
+static inline void qlcnic_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_beacon_state(adapter);
+}
+
static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
{
if (adapter->ahw->hw_ops->read_phys_port_id)
@@ -2027,6 +2043,54 @@ static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
}
+static inline void
+qlcnic_82xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(0x0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_82xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+/* Enable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+/* Disable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(1, sds_ring->crb_intr_mask);
+}
+
static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
{
test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
@@ -2036,10 +2100,10 @@ static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
/* When operating in a muti tx mode, driver needs to write 0x1
* to src register, instead of 0x0 to disable receiving interrupt.
*/
-static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+static inline void
+qlcnic_82xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
-
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
(adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -2048,13 +2112,42 @@ static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
writel(0, sds_ring->crb_intr_mask);
}
+static inline void qlcnic_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (adapter->ahw->hw_ops->enable_sds_intr)
+ adapter->ahw->hw_ops->enable_sds_intr(adapter, sds_ring);
+}
+
+static inline void
+qlcnic_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (adapter->ahw->hw_ops->disable_sds_intr)
+ adapter->ahw->hw_ops->disable_sds_intr(adapter, sds_ring);
+}
+
+static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->enable_tx_intr)
+ adapter->ahw->hw_ops->enable_tx_intr(adapter, tx_ring);
+}
+
+static inline void qlcnic_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->disable_tx_intr)
+ adapter->ahw->hw_ops->disable_tx_intr(adapter, tx_ring);
+}
+
/* When operating in a muti tx mode, driver needs to write 0x0
* to src register, instead of 0x1 to enable receiving interrupts.
*/
-static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+static inline void
+qlcnic_82xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
-
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
(adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -2140,4 +2233,26 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
return status;
}
+
+static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+}
+
+static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_84xx_check(adapter))
+ return QLC_84XX_VNIC_COUNT;
+ else
+ return QLC_DEFAULT_VNIC_COUNT;
+}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f776f99f7915..27c4f131863b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -13,8 +13,26 @@
#include <linux/interrupt.h>
#include <linux/aer.h>
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
+ struct qlcnic_cmd_args *);
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
+static irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
+static void qlcnic_83xx_io_resume(struct pci_dev *);
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
+static int qlcnic_83xx_resume(struct qlcnic_adapter *);
+static int qlcnic_83xx_shutdown(struct pci_dev *);
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
+
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
+#define QLC_SKIP_INACTIVE_PCI_REGS 7
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -34,7 +52,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_READ_MAX_MTU, 4, 2},
{QLCNIC_CMD_READ_MAX_LRO, 4, 2},
{QLCNIC_CMD_MAC_ADDRESS, 4, 3},
- {QLCNIC_CMD_GET_PCI_INFO, 1, 66},
+ {QLCNIC_CMD_GET_PCI_INFO, 1, 129},
{QLCNIC_CMD_GET_NIC_INFO, 2, 19},
{QLCNIC_CMD_SET_NIC_INFO, 32, 1},
{QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
@@ -68,7 +86,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIG_VPORT, 4, 4},
{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
- {QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -180,6 +198,11 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.io_error_detected = qlcnic_83xx_io_error_detected,
.io_slot_reset = qlcnic_83xx_io_slot_reset,
.io_resume = qlcnic_83xx_io_resume,
+ .get_beacon_state = qlcnic_83xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_83xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_83xx_disable_tx_intr,
};
@@ -267,11 +290,22 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
}
}
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_enable_legacy(struct qlcnic_adapter *adapter)
{
- int err, i, num_msix;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ /* MSI-X enablement failed, use legacy interrupt */
+ adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
+ adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
+ adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
+ adapter->msix_entries[0].vector = adapter->pdev->irq;
+ dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
+}
+
+static int qlcnic_83xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
num_msix = adapter->drv_sds_rings;
/* account for AEN interrupt MSI-X based interrupts */
@@ -280,29 +314,45 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
num_msix += adapter->drv_tx_rings;
- err = qlcnic_enable_msix(adapter, num_msix);
- if (err == -ENOMEM)
- return err;
- if (adapter->flags & QLCNIC_MSIX_ENABLED)
- num_msix = adapter->ahw->num_msix;
- else {
- if (qlcnic_sriov_vf_check(adapter))
- return -EINVAL;
- num_msix = 1;
+ return num_msix;
+}
+
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i, num_msix;
+
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = ahw->num_msix;
+ } else {
+ num_msix = qlcnic_83xx_calculate_msix_vector(adapter);
+
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ num_msix = ahw->num_msix;
+ } else {
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
+ num_msix = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ }
}
+
/* setup interrupt mapping table for fw */
ahw->intr_tbl = vzalloc(num_msix *
sizeof(struct qlcnic_intrpt_config));
if (!ahw->intr_tbl)
return -ENOMEM;
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- /* MSI-X enablement failed, use legacy interrupt */
- adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
- adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
- adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
- adapter->msix_entries[0].vector = adapter->pdev->irq;
- dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
- }
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_enable_legacy(adapter);
for (i = 0; i < num_msix; i++) {
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -312,35 +362,22 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
ahw->intr_tbl[i].id = i;
ahw->intr_tbl[i].src = 0;
}
+
return 0;
}
-inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
{
writel(0, adapter->tgt_mask_reg);
}
-inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
+static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
{
if (adapter->tgt_mask_reg)
writel(1, adapter->tgt_mask_reg);
}
-/* Enable MSI-x and INT-x interrupts */
-void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_sds_ring *sds_ring)
-{
- writel(0, sds_ring->crb_intr_mask);
-}
-
-/* Disable MSI-x and INT-x interrupts */
-void qlcnic_83xx_disable_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_sds_ring *sds_ring)
-{
- writel(1, sds_ring->crb_intr_mask);
-}
-
-inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
*adapter)
{
u32 mask;
@@ -477,7 +514,7 @@ irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data)
done:
adapter->ahw->diag_cnt++;
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
return IRQ_HANDLED;
}
@@ -634,10 +671,10 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
return status;
}
-void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u16 act_pci_fn = ahw->act_pci_func;
+ u16 act_pci_fn = ahw->total_nic_func;
u16 count;
ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
@@ -869,7 +906,7 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
return;
}
-void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 event[QLC_83XX_MBX_AEN_CNT];
@@ -1276,8 +1313,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
/* send the mailbox command*/
err = qlcnic_issue_cmd(adapter, &cmd);
if (err) {
- dev_err(&adapter->pdev->dev,
- "Failed to create Tx ctx in firmware 0x%x\n", err);
+ netdev_err(adapter->netdev,
+ "Failed to create Tx ctx in firmware 0x%x\n", err);
goto out;
}
mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
@@ -1288,8 +1325,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
- dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
- tx->ctx_id, mbx_out->state);
+ netdev_info(adapter->netdev,
+ "Tx Context[0x%x] Created, state:0x%x\n",
+ tx->ctx_id, mbx_out->state);
out:
qlcnic_free_mbx_args(&cmd);
return err;
@@ -1341,7 +1379,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
}
@@ -1366,7 +1404,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED)
- qlcnic_83xx_disable_intr(adapter, sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
}
}
@@ -1386,6 +1424,33 @@ out:
netif_device_attach(netdev);
}
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 beacon_state;
+ int err = 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (!err) {
+ beacon_state = cmd.rsp.arg[4];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLC_83XX_BEACON_OFF;
+ else if (beacon_state == QLC_83XX_ENABLE_BEACON)
+ ahw->beacon_state = QLC_83XX_BEACON_ON;
+ }
+ } else {
+ netdev_err(adapter->netdev, "Get beacon state failed, err=%d\n",
+ err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return;
+}
+
int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
u32 beacon)
{
@@ -1498,8 +1563,7 @@ int qlcnic_83xx_set_led(struct net_device *netdev,
return err;
}
-void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
- int enable)
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_cmd_args cmd;
int status;
@@ -1507,21 +1571,21 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
if (qlcnic_sriov_vf_check(adapter))
return;
- if (enable) {
+ if (enable)
status = qlcnic_alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_INIT_NIC_FUNC);
- if (status)
- return;
-
- cmd.req.arg[1] = BIT_0 | BIT_31;
- } else {
+ else
status = qlcnic_alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_STOP_NIC_FUNC);
- if (status)
- return;
- cmd.req.arg[1] = BIT_0 | BIT_31;
- }
+ if (status)
+ return;
+
+ cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
+
+ if (adapter->dcb)
+ cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
+
status = qlcnic_issue_cmd(adapter, &cmd);
if (status)
dev_err(&adapter->pdev->dev,
@@ -1531,7 +1595,7 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
qlcnic_free_mbx_args(&cmd);
}
-int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
{
struct qlcnic_cmd_args cmd;
int err;
@@ -1548,7 +1612,7 @@ int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
{
struct qlcnic_cmd_args cmd;
int err;
@@ -1590,7 +1654,9 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
u32 *interface_id)
{
if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_alloc_lb_filters_mem(adapter);
qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
+ adapter->rx_mac_learn = true;
} else {
if (!qlcnic_sriov_vf_check(adapter))
*interface_id = adapter->recv_ctx->context_id << 16;
@@ -1617,7 +1683,11 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
- cmd->req.arg[1] = (mode ? 1 : 0) | temp;
+
+ if (qlcnic_84xx_check(adapter) && qlcnic_sriov_pf_check(adapter))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+
+ cmd->req.arg[1] = mode | temp;
err = qlcnic_issue_cmd(adapter, cmd);
if (!err)
return err;
@@ -1711,7 +1781,7 @@ static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
ahw->extend_lb_time = 0;
}
-int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct net_device *netdev = adapter->netdev;
@@ -1780,7 +1850,7 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return status;
}
-int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 config = ahw->port_config, max_wait_count;
@@ -2015,8 +2085,8 @@ void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD);
}
-void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
- u8 type, struct qlcnic_cmd_args *cmd)
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 type, struct qlcnic_cmd_args *cmd)
{
switch (type) {
case QLCNIC_SET_STATION_MAC:
@@ -2060,37 +2130,130 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
return err;
}
-void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_set_rx_intr_coal(struct qlcnic_adapter *adapter)
{
- int err;
- u16 temp;
- struct qlcnic_cmd_args cmd;
struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
- if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
- return;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
+ temp = coal->rx_time_us;
+ cmd.req.arg[2] = coal->rx_packets | temp << 16;
+ cmd.req.arg[3] = coal->flag;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_set_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
if (err)
- return;
+ return err;
- if (coal->type == QLCNIC_INTR_COAL_TYPE_RX) {
- temp = adapter->recv_ctx->context_id;
- cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
- temp = coal->rx_time_us;
- cmd.req.arg[2] = coal->rx_packets | temp << 16;
- } else if (coal->type == QLCNIC_INTR_COAL_TYPE_TX) {
- temp = adapter->tx_ring->ctx_id;
- cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
- temp = coal->tx_time_us;
- cmd.req.arg[2] = coal->tx_packets | temp << 16;
- }
+ temp = adapter->tx_ring->ctx_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
+ temp = coal->tx_time_us;
+ cmd.req.arg[2] = coal->tx_packets | temp << 16;
cmd.req.arg[3] = coal->flag;
+
err = qlcnic_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS)
- dev_info(&adapter->pdev->dev,
- "Failed to send interrupt coalescence parameters\n");
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Rx coalescing parameters\n");
+
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Tx coalescing parameters\n");
+
+ return err;
+}
+
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ u32 rx_coalesce_usecs, rx_max_frames;
+ u32 tx_coalesce_usecs, tx_max_frames;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
+ tx_max_frames = ethcoal->tx_max_coalesced_frames;
+ rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
+ rx_max_frames = ethcoal->rx_max_coalesced_frames;
+ coal->flag = QLCNIC_INTR_DEFAULT;
+
+ if ((coal->rx_time_us == rx_coalesce_usecs) &&
+ (coal->rx_packets == rx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_TX;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
+ (coal->tx_packets == tx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ } else {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ }
+
+ switch (coal->type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_RX_TX:
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ break;
+ default:
+ err = -EINVAL;
+ netdev_err(adapter->netdev,
+ "Invalid Interrupt coalescing type\n");
+ break;
+ }
+
+ return err;
}
static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
@@ -2119,7 +2282,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
qlcnic_advert_link_change(adapter, link_status);
}
-irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
struct qlcnic_adapter *adapter = data;
struct qlcnic_mailbox *mbx;
@@ -2145,36 +2308,6 @@ out:
return IRQ_HANDLED;
}
-int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable)
-{
- int err = -EIO;
- struct qlcnic_cmd_args cmd;
-
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
- dev_err(&adapter->pdev->dev,
- "%s: Error, invoked by non management func\n",
- __func__);
- return err;
- }
-
- err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH);
- if (err)
- return err;
-
- cmd.req.arg[1] = (port & 0xf) | BIT_4;
- err = qlcnic_issue_cmd(adapter, &cmd);
-
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev, "Failed to enable eswitch%d\n",
- err);
- err = -EIO;
- }
- qlcnic_free_mbx_args(&cmd);
-
- return err;
-
-}
-
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
struct qlcnic_info *nic)
{
@@ -2268,11 +2401,37 @@ out:
return err;
}
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type,
+ u16 *nic, u16 *fcoe, u16 *iscsi)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int err = 0;
+
+ switch (type) {
+ case QLCNIC_TYPE_NIC:
+ (*nic)++;
+ break;
+ case QLCNIC_TYPE_FCOE:
+ (*fcoe)++;
+ break;
+ case QLCNIC_TYPE_ISCSI:
+ (*iscsi)++;
+ break;
+ default:
+ dev_err(dev, "%s: Unknown PCI type[%x]\n",
+ __func__, type);
+ err = -EIO;
+ }
+
+ return err;
+}
+
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
struct qlcnic_pci_info *pci_info)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct device *dev = &adapter->pdev->dev;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
struct qlcnic_cmd_args cmd;
int i, err = 0, j = 0;
u32 temp;
@@ -2283,16 +2442,20 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
err = qlcnic_issue_cmd(adapter, &cmd);
- ahw->act_pci_func = 0;
+ ahw->total_nic_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
- for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
+ for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) {
pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
i++;
+ if (!pci_info->active) {
+ i += QLC_SKIP_INACTIVE_PCI_REGS;
+ continue;
+ }
pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
- if (pci_info->type == QLCNIC_TYPE_NIC)
- ahw->act_pci_func++;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
pci_info->default_port = temp;
i++;
@@ -2310,6 +2473,13 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
err = -EIO;
}
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
qlcnic_free_mbx_args(&cmd);
return err;
@@ -3459,7 +3629,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_83xx_shutdown(struct pci_dev *pdev)
+static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -3481,7 +3651,7 @@ int qlcnic_83xx_shutdown(struct pci_dev *pdev)
return 0;
}
-int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlc_83xx_idc *idc = &ahw->idc;
@@ -3834,8 +4004,8 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
return 0;
}
-pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3856,7 +4026,7 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
int err = 0;
@@ -3879,7 +4049,7 @@ disconnect:
return PCI_ERS_RESULT_DISCONNECT;
}
-void qlcnic_83xx_io_resume(struct pci_dev *pdev)
+static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index a6a33508e401..f92485ca21d1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -324,6 +324,11 @@ struct qlc_83xx_idc {
char **name;
};
+enum qlcnic_vlan_operations {
+ QLC_VLAN_ADD = 0,
+ QLC_VLAN_DELETE
+};
+
/* Device States */
enum qlcnic_83xx_states {
QLC_83XX_IDC_DEV_UNKNOWN,
@@ -376,6 +381,8 @@ enum qlcnic_83xx_states {
/* LED configuration settings */
#define QLC_83XX_ENABLE_BEACON 0xe
+#define QLC_83XX_BEACON_ON 1
+#define QLC_83XX_BEACON_OFF 0
#define QLC_83XX_LED_RATE 0xff
#define QLC_83XX_LED_ACT (1 << 10)
#define QLC_83XX_LED_MOD (0 << 13)
@@ -518,6 +525,11 @@ enum qlc_83xx_ext_regs {
QLC_83XX_ASIC_TEMP,
};
+/* Initialize/Stop NIC command bit definitions */
+#define QLC_REGISTER_DCB_AEN BIT_1
+#define QLC_REGISTER_LB_IDC BIT_0
+#define QLC_INIT_FW_RESOURCES BIT_31
+
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -532,17 +544,13 @@ void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
-void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
-int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
-int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *);
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
-void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int);
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
@@ -563,32 +571,22 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
-void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
- struct qlcnic_cmd_args *);
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
struct qlcnic_info *);
-void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
-irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *);
int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_intr(int, void *);
irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
-void qlcnic_83xx_enable_intr(struct qlcnic_adapter *,
- struct qlcnic_host_sds_ring *);
-void qlcnic_83xx_disable_intr(struct qlcnic_adapter *,
- struct qlcnic_host_sds_ring *);
void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
const struct pci_device_id *);
-void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
-int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
-int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
-int qlcnic_enable_eswitch(struct qlcnic_adapter *, u8, u8);
-int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *);
int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
@@ -610,9 +608,7 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
u32, u8 *, int);
int qlcnic_83xx_init(struct qlcnic_adapter *, int);
int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
-int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
-int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
@@ -620,7 +616,6 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
-int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
@@ -648,9 +643,6 @@ int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
-void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
-int qlcnic_83xx_shutdown(struct pci_dev *);
-int qlcnic_83xx_resume(struct qlcnic_adapter *);
int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
@@ -658,9 +650,4 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
-pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
- pci_channel_state_t);
-pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
-void qlcnic_83xx_io_resume(struct pci_dev *);
-void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 918e18ddf038..90a2dda351ec 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -39,6 +39,9 @@
static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
/* Template header */
struct qlc_83xx_reset_hdr {
@@ -380,7 +383,7 @@ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
qlcnic_up(adapter, netdev);
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- dev_err(&adapter->pdev->dev, "%s:\n", __func__);
+ netdev_info(adapter->netdev, "%s: soft reset complete.\n", __func__);
return 0;
}
@@ -614,8 +617,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
qlcnic_83xx_enable_mbx_interrupt(adapter);
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
err = qlcnic_sriov_pf_reinit(adapter);
if (err)
@@ -1529,7 +1531,7 @@ static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev)
return -EIO;
}
-int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
{
int err;
@@ -1602,7 +1604,7 @@ static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
}
}
-int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
{
struct qlcnic_hardware_context *ahw = p_dev->ahw;
u32 addr, count, prev_ver, curr_ver;
@@ -1946,7 +1948,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
p_dev->ahw->reset.seq_index = index;
}
-void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
{
p_dev->ahw->reset.seq_index = 0;
@@ -2029,7 +2031,7 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
{
int err;
struct qlcnic_info nic_info;
@@ -2213,9 +2215,9 @@ static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- struct qlcnic_dcb *dcb;
int err = 0;
+ adapter->rx_mac_learn = false;
ahw->msix_supported = !!qlcnic_use_msi_x;
qlcnic_83xx_init_rings(adapter);
@@ -2265,8 +2267,7 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
/* Configure default, SR-IOV or Virtual NIC mode of operation */
err = qlcnic_83xx_configure_opmode(adapter);
@@ -2279,11 +2280,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
if (err)
goto disable_mbx_intr;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
-
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
return 0;
@@ -2314,7 +2310,7 @@ void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
qlcnic_83xx_idc_detach_driver(adapter);
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ qlcnic_83xx_initialize_nic(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 734d28602ac3..be7d7a62cc0d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -8,7 +8,7 @@
#include "qlcnic.h"
#include "qlcnic_hw.h"
-int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+static int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
{
if (lock) {
if (qlcnic_83xx_lock_driver(adapter))
@@ -107,7 +107,7 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
npar = adapter->npars;
- for (i = 0; i < ahw->act_pci_func; i++, npar++) {
+ for (i = 0; i < ahw->total_nic_func; i++, npar++) {
dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
npar->pci_func, npar->active, npar->type,
npar->phy_port, npar->min_bw, npar->max_bw,
@@ -115,7 +115,7 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
}
dev_info(dev, "Max functions = %d, active functions = %d\n",
- ahw->max_pci_func, ahw->act_pci_func);
+ ahw->max_pci_func, ahw->total_nic_func);
if (qlcnic_83xx_set_vnic_opmode(adapter))
return err;
@@ -224,10 +224,14 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
return -EIO;
}
- if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) {
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
- else
+ if (adapter->drv_mac_learn)
+ adapter->rx_mac_learn = true;
+ } else {
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ adapter->rx_mac_learn = false;
+ }
ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 859cb161fc63..64dcbf33d8f0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -91,18 +91,6 @@ void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
cmd->rsp.arg = NULL;
}
-static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
-{
- int i;
-
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
- if (adapter->npars[i].pci_func == pci_func)
- return i;
- }
-
- return -1;
-}
-
static u32
qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
{
@@ -966,13 +954,15 @@ out_free_dma:
int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
struct qlcnic_pci_info *pci_info)
{
- int err = 0, i;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ size_t npar_size = sizeof(struct qlcnic_pci_info_le);
+ size_t pci_size = npar_size * ahw->max_vnic_func;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
+ struct qlcnic_pci_info_le *npar;
struct qlcnic_cmd_args cmd;
dma_addr_t pci_info_dma_t;
- struct qlcnic_pci_info_le *npar;
void *pci_info_addr;
- size_t npar_size = sizeof(struct qlcnic_pci_info_le);
- size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
+ int err = 0, i;
pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
&pci_info_dma_t, GFP_KERNEL);
@@ -989,14 +979,16 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
cmd.req.arg[3] = pci_size;
err = qlcnic_issue_cmd(adapter, &cmd);
- adapter->ahw->act_pci_func = 0;
+ ahw->total_nic_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
+ for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) {
pci_info->id = le16_to_cpu(npar->id);
pci_info->active = le16_to_cpu(npar->active);
+ if (!pci_info->active)
+ continue;
pci_info->type = le16_to_cpu(npar->type);
- if (pci_info->type == QLCNIC_TYPE_NIC)
- adapter->ahw->act_pci_func++;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
pci_info->default_port =
le16_to_cpu(npar->default_port);
pci_info->tx_min_bw =
@@ -1011,6 +1003,14 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
err = -EIO;
}
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
qlcnic_free_mbx_args(&cmd);
out_free_dma:
dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
@@ -1203,7 +1203,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
esw_stats->context_id = eswitch;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (adapter->npars[i].phy_port != eswitch)
continue;
@@ -1236,15 +1236,16 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
const u8 port, const u8 rx_tx)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
int err;
u32 arg1;
- struct qlcnic_cmd_args cmd;
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ if (ahw->op_mode != QLCNIC_MGMT_FUNC)
return -EIO;
if (func_esw == QLCNIC_STATS_PORT) {
- if (port >= QLCNIC_MAX_PCI_FUNC)
+ if (port >= ahw->max_vnic_func)
goto err_ret;
} else if (func_esw == QLCNIC_STATS_ESWITCH) {
if (port >= QLCNIC_NIU_MAX_XG_PORTS)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 86bca7c14f99..7d4f54912bad 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -15,7 +15,6 @@
#define QLC_DCB_GET_MAP(V) (1 << V)
-#define QLC_DCB_AEN_BIT 0x2
#define QLC_DCB_FW_VER 0x2
#define QLC_DCB_MAX_TC 0x8
#define QLC_DCB_MAX_APP 0x8
@@ -71,7 +70,6 @@ static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
struct qlcnic_dcb_capability {
@@ -179,7 +177,6 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
.get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
.query_cee_param = qlcnic_83xx_dcb_query_cee_param,
.get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
- .register_aen = qlcnic_83xx_dcb_register_aen,
.aen_handler = qlcnic_83xx_dcb_aen_handler,
};
@@ -260,6 +257,9 @@ int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
{
struct qlcnic_dcb *dcb;
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
if (!dcb)
return -ENOMEM;
@@ -280,7 +280,6 @@ static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
return;
adapter = dcb->adapter;
- qlcnic_dcb_register_aen(dcb, 0);
while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
usleep_range(10000, 11000);
@@ -304,7 +303,6 @@ static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
{
qlcnic_dcb_get_hw_capability(dcb);
qlcnic_dcb_get_cee_cfg(dcb);
- qlcnic_dcb_register_aen(dcb, 1);
}
static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
@@ -642,29 +640,6 @@ static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
return err;
}
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
-{
- u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
- struct qlcnic_adapter *adapter = dcb->adapter;
- struct qlcnic_cmd_args cmd;
- int err;
-
- err = qlcnic_alloc_mbx_args(&cmd, adapter, val);
- if (err)
- return err;
-
- cmd.req.arg[1] = QLC_DCB_AEN_BIT;
-
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_err(&adapter->pdev->dev, "Failed to %s DCBX AEN, err %d\n",
- (flag ? "register" : "unregister"), err);
-
- qlcnic_free_mbx_args(&cmd);
-
- return err;
-}
-
static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
{
u32 *val = data;
@@ -832,7 +807,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
!type->tc_param_valid)
return;
- if (tc < 0 || (tc > QLC_DCB_MAX_TC))
+ if (tc < 0 || (tc >= QLC_DCB_MAX_TC))
return;
tc_cfg = &type->tc_cfg[tc];
@@ -868,7 +843,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
!type->tc_param_valid)
return;
- if (pgid < 0 || pgid > QLC_DCB_MAX_PG)
+ if (pgid < 0 || pgid >= QLC_DCB_MAX_PG)
return;
pgcfg = &type->pg_cfg[pgid];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index c04ae0cdc108..3cf4a10fbe1e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -25,7 +25,6 @@ struct qlcnic_dcb_ops {
int (*get_hw_capability) (struct qlcnic_dcb *);
int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
void (*init_dcbnl_ops) (struct qlcnic_dcb *);
- int (*register_aen) (struct qlcnic_dcb *, bool);
void (*aen_handler) (struct qlcnic_dcb *, void *);
int (*get_cee_cfg) (struct qlcnic_dcb *);
void (*get_info) (struct qlcnic_dcb *);
@@ -103,13 +102,6 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
return 0;
}
-static inline void
-qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
-{
- if (dcb && dcb->ops->register_aen)
- dcb->ops->register_aen(dcb, flag);
-}
-
static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
{
if (dcb && dcb->ops->aen_handler)
@@ -121,4 +113,10 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
if (dcb && dcb->ops->init_dcbnl_ops)
dcb->ops->init_dcbnl_ops(dcb);
}
+
+static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+{
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
+}
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 6b08194aa0d4..acee1a5d80c6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -229,7 +229,7 @@ static const u32 ext_diag_registers[] = {
-1
};
-#define QLCNIC_MGMT_API_VERSION 2
+#define QLCNIC_MGMT_API_VERSION 3
#define QLCNIC_ETHTOOL_REGS_VER 4
static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
@@ -278,21 +278,8 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
sizeof(drvinfo->version));
}
-static int
-qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- struct qlcnic_adapter *adapter = netdev_priv(dev);
-
- if (qlcnic_82xx_check(adapter))
- return qlcnic_82xx_get_settings(adapter, ecmd);
- else if (qlcnic_83xx_check(adapter))
- return qlcnic_83xx_get_settings(adapter, ecmd);
-
- return -EIO;
-}
-
-int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
- struct ethtool_cmd *ecmd)
+static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 speed, reg;
@@ -433,6 +420,20 @@ skip:
return 0;
}
+static int qlcnic_get_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return qlcnic_82xx_get_settings(adapter, ecmd);
+ else if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_get_settings(adapter, ecmd);
+
+ return -EIO;
+}
+
+
static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
struct ethtool_cmd *ecmd)
{
@@ -527,6 +528,9 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
regs_buff[1] = QLCNIC_MGMT_API_VERSION;
+ if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ regs_buff[2] = adapter->ahw->max_vnic_func;
+
if (qlcnic_82xx_check(adapter))
i = qlcnic_82xx_get_registers(adapter, regs_buff);
else
@@ -732,6 +736,7 @@ static int qlcnic_set_channels(struct net_device *dev,
channel->rx_count);
return err;
}
+ adapter->drv_rss_rings = channel->rx_count;
}
if (channel->tx_count) {
@@ -742,10 +747,12 @@ static int qlcnic_set_channels(struct net_device *dev,
channel->tx_count);
return err;
}
+ adapter->drv_tss_rings = channel->tx_count;
}
- err = qlcnic_setup_rings(adapter, channel->rx_count,
- channel->tx_count);
+ adapter->flags |= QLCNIC_TSS_RSS;
+
+ err = qlcnic_setup_rings(adapter);
netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
adapter->drv_sds_rings, adapter->drv_tx_rings);
@@ -1052,7 +1059,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
return 0;
}
-int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int drv_tx_rings = adapter->drv_tx_rings;
@@ -1491,9 +1498,7 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ethcoal)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_nic_intr_coalesce *coal;
- u32 rx_coalesce_usecs, rx_max_frames;
- u32 tx_coalesce_usecs, tx_max_frames;
+ int err;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return -EINVAL;
@@ -1503,82 +1508,31 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
* unsupported parameters are set.
*/
if (ethcoal->rx_coalesce_usecs > 0xffff ||
- ethcoal->rx_max_coalesced_frames > 0xffff ||
- ethcoal->tx_coalesce_usecs > 0xffff ||
- ethcoal->tx_max_coalesced_frames > 0xffff ||
- ethcoal->rx_coalesce_usecs_irq ||
- ethcoal->rx_max_coalesced_frames_irq ||
- ethcoal->tx_coalesce_usecs_irq ||
- ethcoal->tx_max_coalesced_frames_irq ||
- ethcoal->stats_block_coalesce_usecs ||
- ethcoal->use_adaptive_rx_coalesce ||
- ethcoal->use_adaptive_tx_coalesce ||
- ethcoal->pkt_rate_low ||
- ethcoal->rx_coalesce_usecs_low ||
- ethcoal->rx_max_coalesced_frames_low ||
- ethcoal->tx_coalesce_usecs_low ||
- ethcoal->tx_max_coalesced_frames_low ||
- ethcoal->pkt_rate_high ||
- ethcoal->rx_coalesce_usecs_high ||
- ethcoal->rx_max_coalesced_frames_high ||
- ethcoal->tx_coalesce_usecs_high ||
- ethcoal->tx_max_coalesced_frames_high)
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
return -EINVAL;
- coal = &adapter->ahw->coal;
+ err = qlcnic_config_intr_coalesce(adapter, ethcoal);
- if (qlcnic_83xx_check(adapter)) {
- if (!ethcoal->tx_coalesce_usecs ||
- !ethcoal->tx_max_coalesced_frames ||
- !ethcoal->rx_coalesce_usecs ||
- !ethcoal->rx_max_coalesced_frames) {
- coal->flag = QLCNIC_INTR_DEFAULT;
- coal->type = QLCNIC_INTR_COAL_TYPE_RX;
- coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
- coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
- coal->tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
- coal->tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
- } else {
- tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
- tx_max_frames = ethcoal->tx_max_coalesced_frames;
- rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
- rx_max_frames = ethcoal->rx_max_coalesced_frames;
- coal->flag = 0;
-
- if ((coal->rx_time_us == rx_coalesce_usecs) &&
- (coal->rx_packets == rx_max_frames)) {
- coal->type = QLCNIC_INTR_COAL_TYPE_TX;
- coal->tx_time_us = tx_coalesce_usecs;
- coal->tx_packets = tx_max_frames;
- } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
- (coal->tx_packets == tx_max_frames)) {
- coal->type = QLCNIC_INTR_COAL_TYPE_RX;
- coal->rx_time_us = rx_coalesce_usecs;
- coal->rx_packets = rx_max_frames;
- } else {
- coal->type = QLCNIC_INTR_COAL_TYPE_RX;
- coal->rx_time_us = rx_coalesce_usecs;
- coal->rx_packets = rx_max_frames;
- coal->tx_time_us = tx_coalesce_usecs;
- coal->tx_packets = tx_max_frames;
- }
- }
- } else {
- if (!ethcoal->rx_coalesce_usecs ||
- !ethcoal->rx_max_coalesced_frames) {
- coal->flag = QLCNIC_INTR_DEFAULT;
- coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
- coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
- } else {
- coal->flag = 0;
- coal->rx_time_us = ethcoal->rx_coalesce_usecs;
- coal->rx_packets = ethcoal->rx_max_coalesced_frames;
- }
- }
-
- qlcnic_config_intr_coalesce(adapter);
-
- return 0;
+ return err;
}
static int qlcnic_get_intr_coalesce(struct net_device *netdev,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index d262211b03b3..34e467b239a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -698,7 +698,6 @@ struct qlcnic_legacy_intr_set {
};
#define QLCNIC_MSIX_BASE 0x132110
-#define QLCNIC_MAX_PCI_FUNC 8
#define QLCNIC_MAX_VLAN_FILTERS 64
#define FLASH_ROM_WINDOW 0x42110030
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 6f7f60c09f07..03d18a0be6ce 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -455,14 +455,14 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
- struct qlcnic_mac_list_s *cur;
int err = -EINVAL;
/* Delete MAC from the existing list */
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr)) {
err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
if (err)
@@ -477,17 +477,18 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
{
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
- struct qlcnic_mac_list_s *cur;
/* look up if already exists */
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr) &&
+ cur->vlan_id == vlan)
return 0;
}
- cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
+ cur = kzalloc(sizeof(*cur), GFP_ATOMIC);
if (cur == NULL)
return -ENOMEM;
@@ -499,11 +500,12 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
return -EIO;
}
+ cur->vlan_id = vlan;
list_add_tail(&cur->list, &adapter->mac_list);
return 0;
}
-void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
+static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -516,8 +518,7 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- if (!qlcnic_sriov_vf_check(adapter))
- qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
qlcnic_nic_add_mac(adapter, bcast_addr, vlan);
if (netdev->flags & IFF_PROMISC) {
@@ -526,15 +527,11 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
} else if ((netdev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(netdev) > ahw->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
- } else if (!netdev_mc_empty(netdev) &&
- !qlcnic_sriov_vf_check(adapter)) {
+ } else if (!netdev_mc_empty(netdev)) {
netdev_for_each_mc_addr(ha, netdev)
qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_vf_add_mc_list(netdev, vlan);
-
/* configure unicast MAC address, if there is not sufficient space
* to store all the unicast addresses then enable promiscuous mode
*/
@@ -545,14 +542,15 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
- if (!qlcnic_sriov_vf_check(adapter)) {
- if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
- !adapter->fdb_mac_learn) {
- qlcnic_alloc_lb_filters_mem(adapter);
- adapter->drv_mac_learn = true;
- } else {
- adapter->drv_mac_learn = false;
- }
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
}
qlcnic_nic_set_promisc(adapter, mode);
@@ -561,16 +559,17 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_mac_vlan_list *cur;
struct netdev_hw_addr *ha;
- struct qlcnic_mac_list_s *cur;
+ size_t temp;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
if (qlcnic_sriov_vf_check(adapter)) {
if (!netdev_mc_empty(netdev)) {
netdev_for_each_mc_addr(ha, netdev) {
- cur = kzalloc(sizeof(struct qlcnic_mac_list_s),
- GFP_ATOMIC);
+ temp = sizeof(struct qlcnic_mac_vlan_list);
+ cur = kzalloc(temp, GFP_ATOMIC);
if (cur == NULL)
break;
memcpy(cur->mac_addr,
@@ -605,11 +604,11 @@ int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter)
{
- struct qlcnic_mac_list_s *cur;
struct list_head *head = &adapter->mac_list;
+ struct qlcnic_mac_vlan_list *cur;
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
qlcnic_sre_macaddr_change(adapter,
cur->mac_addr, 0, QLCNIC_MAC_DEL);
list_del(&cur->list);
@@ -756,10 +755,7 @@ int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
return 0;
}
-/*
- * Send the interrupt coalescing parameter set by ethtool to the card.
- */
-void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
int rv;
@@ -781,10 +777,32 @@ void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
if (rv != 0)
dev_err(&adapter->netdev->dev,
"Could not send interrupt coalescing parameters\n");
+
+ return rv;
+}
+
+/* Send the interrupt coalescing parameter set by ethtool to the card. */
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ int rv;
+
+ coal->flag = QLCNIC_INTR_DEFAULT;
+ coal->rx_time_us = ethcoal->rx_coalesce_usecs;
+ coal->rx_packets = ethcoal->rx_max_coalesced_frames;
+
+ rv = qlcnic_82xx_set_rx_coalesce(adapter);
+
+ if (rv)
+ netdev_err(adapter->netdev,
+ "Failed to set Rx coalescing parametrs\n");
+
+ return rv;
}
-#define QLCNIC_ENABLE_IPV4_LRO 1
-#define QLCNIC_ENABLE_IPV6_LRO 2
+#define QLCNIC_ENABLE_IPV4_LRO BIT_0
+#define QLCNIC_ENABLE_IPV6_LRO (BIT_1 | BIT_9)
int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
{
@@ -948,7 +966,7 @@ int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable)
return rv;
}
-int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+static int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
u64 word;
@@ -1247,7 +1265,7 @@ static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter,
return 0;
}
-void
+static void
qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
{
void __iomem *addr = adapter->ahw->pci_base0 +
@@ -1258,7 +1276,7 @@ qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
mutex_unlock(&adapter->ahw->mem_lock);
}
-void
+static void
qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
{
void __iomem *addr = adapter->ahw->pci_base0 +
@@ -1494,7 +1512,7 @@ int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
return 0;
}
-int
+static int
qlcnic_wol_supported(struct qlcnic_adapter *adapter)
{
u32 wol_cfg;
@@ -1534,19 +1552,34 @@ int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
return rv;
}
-int qlcnic_get_beacon_state(struct qlcnic_adapter *adapter, u8 *h_state)
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
- int err;
+ u8 beacon_state;
+ int err = 0;
- err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_STATUS);
- if (!err) {
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (!err)
- *h_state = cmd.rsp.arg[1];
+ if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_STATUS);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to get current beacon state, err=%d\n",
+ err);
+ } else {
+ beacon_state = cmd.rsp.arg[1];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLCNIC_BEACON_OFF;
+ else if (beacon_state == QLCNIC_BEACON_EANBLE)
+ ahw->beacon_state = QLCNIC_BEACON_ON;
+ }
+ }
+ qlcnic_free_mbx_args(&cmd);
}
- qlcnic_free_mbx_args(&cmd);
- return err;
+
+ return;
}
void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 13303e7d1ed7..63d75617d445 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -162,16 +162,18 @@ struct qlcnic_host_tx_ring;
struct qlcnic_hardware_context;
struct qlcnic_adapter;
-int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev);
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
u64 *uaddr, u16 vlan_id);
-void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
__be32, int);
@@ -181,9 +183,6 @@ int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8);
int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
-void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
-irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *);
int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index ad1531ae3aa8..54ebf300332a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -124,41 +124,16 @@
#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
-struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
- struct qlcnic_host_rds_ring *, u16, u16);
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max);
-inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
-{
- if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test)
- writel(0x0, tx_ring->crb_intr_mask);
-}
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
+ struct qlcnic_host_rds_ring *,
+ u16, u16);
-
-static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
+static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan)
{
- if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test)
- writel(1, tx_ring->crb_intr_mask);
-}
-
-inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
-{
- writel(0, tx_ring->crb_intr_mask);
-}
-
-inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
-{
- writel(1, tx_ring->crb_intr_mask);
-}
-
-static inline u8 qlcnic_mac_hash(u64 mac)
-{
- return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
+ return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff));
}
static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
@@ -202,7 +177,7 @@ static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
struct hlist_node *n;
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
+ if (ether_addr_equal(tmp_fil->faddr, addr) &&
tmp_fil->vlan_id == vlan_id)
return tmp_fil;
}
@@ -210,8 +185,8 @@ static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
return NULL;
}
-void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
- int loopback_pkt, u16 vlan_id)
+static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
+ struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
{
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
@@ -221,8 +196,11 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
u8 hindex, op;
int ret;
+ if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff))
+ vlan_id = 0;
+
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
- hindex = qlcnic_mac_hash(src_addr) &
+ hindex = qlcnic_mac_hash(src_addr, vlan_id) &
(adapter->fhash.fbucket_size - 1);
if (loopback_pkt) {
@@ -322,31 +300,47 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
struct sk_buff *skb)
{
+ struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct net_device *netdev = adapter->netdev;
+ u16 protocol = ntohs(skb->protocol);
struct qlcnic_filter *fil, *tmp_fil;
- struct hlist_node *n;
struct hlist_head *head;
- struct net_device *netdev = adapter->netdev;
- struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct hlist_node *n;
u64 src_addr = 0;
u16 vlan_id = 0;
- u8 hindex;
+ u8 hindex, hval;
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
- return;
+ if (!qlcnic_sriov_pf_check(adapter)) {
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ return;
+ } else {
+ if (protocol == ETH_P_8021Q) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ vlan_id = ntohs(vh->h_vlan_TCI);
+ } else if (vlan_tx_tag_present(skb)) {
+ vlan_id = vlan_tx_tag_get(skb);
+ }
+
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
+ !vlan_id)
+ return;
+ }
if (adapter->fhash.fnum >= adapter->fhash.fmax) {
adapter->stats.mac_filter_limit_overrun++;
- netdev_info(netdev, "Can not add more than %d mac addresses\n",
- adapter->fhash.fmax);
+ netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
+ adapter->fhash.fmax, adapter->fhash.fnum);
return;
}
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
- hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
+ hval = qlcnic_mac_hash(src_addr, vlan_id);
+ hindex = hval & (adapter->fhash.fbucket_size - 1);
head = &(adapter->fhash.fhead[hindex]);
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
@@ -689,12 +683,17 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
adapter->ahw->linkup = 0;
netif_carrier_off(netdev);
} else if (!adapter->ahw->linkup && linkup) {
- /* Do not advertise Link up if the port is in loopback mode */
- if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
+ adapter->ahw->linkup = 1;
+
+ /* Do not advertise Link up to the stack if device
+ * is in loopback mode
+ */
+ if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
+ netdev_info(netdev, "NIC Link is up for loopback test\n");
return;
+ }
netdev_info(netdev, "NIC Link is up\n");
- adapter->ahw->linkup = 1;
netif_carrier_on(netdev);
}
}
@@ -862,7 +861,7 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
qlcnic_enable_tx_intr(adapter, tx_ring);
}
}
@@ -903,7 +902,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1015,9 +1014,9 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
}
}
-struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *ring,
- u16 index, u16 cksum)
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *ring,
+ u16 index, u16 cksum)
{
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
@@ -1156,13 +1155,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
u32 seq_number;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = qlcnic_get_lro_sts_refhandle(sts_data0);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
@@ -1236,7 +1235,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
return buffer;
}
-int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
{
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_adapter *adapter = sds_ring->adapter;
@@ -1466,8 +1465,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ !adapter->ahw->diag_test) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
NAPI_POLL_WEIGHT);
} else {
@@ -1535,13 +1533,12 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
if (qlcnic_check_multi_tx(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ !adapter->ahw->diag_test) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
@@ -1562,7 +1559,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_disable_int(sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
@@ -1572,7 +1569,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
qlcnic_check_multi_tx(adapter)) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
- qlcnic_disable_tx_int(adapter, tx_ring);
+ qlcnic_disable_tx_intr(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
napi_disable(&tx_ring->napi);
}
@@ -1601,7 +1598,8 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
int index, length, cksum, is_lb_pkt;
- u16 vid = 0xffff, t_vid;
+ u16 vid = 0xffff;
+ int err;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
@@ -1619,19 +1617,19 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- t_vid = 0;
- is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
- qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
- }
-
if (length > rds_ring->skb_size)
skb_put(skb, rds_ring->skb_size);
else
skb_put(skb, length);
- if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
adapter->stats.rxdropped++;
dev_kfree_skb(skb);
return buffer;
@@ -1666,15 +1664,16 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
int l2_hdr_offset, l4_hdr_offset;
int index, is_lb_pkt;
u16 lro_length, length, data_offset, gso_size;
- u16 vid = 0xffff, t_vid;
+ u16 vid = 0xffff;
+ int err;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = qlcnic_83xx_hndl(sts_data[0]);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
@@ -1688,12 +1687,6 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- t_vid = 0;
- is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
- qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
- }
if (qlcnic_83xx_is_tstamp(sts_data[1]))
data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
else
@@ -1702,7 +1695,14 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
skb_put(skb, lro_length + data_offset);
skb_pull(skb, l2_hdr_offset);
- if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
adapter->stats.rxdropped++;
dev_kfree_skb(skb);
return buffer;
@@ -1832,7 +1832,7 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1855,7 +1855,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1874,7 +1874,7 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
if (work_done) {
napi_complete(&tx_ring->napi);
if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
- qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
}
return work_done;
@@ -1892,7 +1892,7 @@ static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -1912,7 +1912,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
if (adapter->flags & QLCNIC_MSIX_ENABLED)
- qlcnic_83xx_enable_intr(adapter, sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
@@ -1920,7 +1920,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
- qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
}
}
}
@@ -1938,7 +1938,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED)
- qlcnic_83xx_disable_intr(adapter, sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
@@ -1947,7 +1947,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
- qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
+ qlcnic_disable_tx_intr(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
napi_disable(&tx_ring->napi);
}
@@ -2027,8 +2027,8 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
qlcnic_free_tx_rings(adapter);
}
-void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
- int ring, u64 sts_data[])
+static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data[])
{
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct sk_buff *skb;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 550791b8fbae..1222865cfb73 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -81,6 +81,16 @@ static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_82xx_io_resume(struct pci_dev *);
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+
static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -308,12 +318,12 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
{
- struct qlcnic_mac_list_s *cur;
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
list_del(&cur->list);
@@ -337,7 +347,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
+ if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data))
return 0;
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@ -546,6 +556,11 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.io_error_detected = qlcnic_82xx_io_error_detected,
.io_slot_reset = qlcnic_82xx_io_slot_reset,
.io_resume = qlcnic_82xx_io_resume,
+ .get_beacon_state = qlcnic_82xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_82xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_82xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_82xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_82xx_disable_tx_intr,
};
static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
@@ -588,9 +603,6 @@ void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
QLCNIC_TX_QUEUE);
else
adapter->drv_tx_rings = tx_cnt;
-
- dev_info(&adapter->pdev->dev, "Set %d Tx rings\n",
- adapter->drv_tx_rings);
}
void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
@@ -601,25 +613,79 @@ void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
QLCNIC_RX_QUEUE);
else
adapter->drv_sds_rings = rx_cnt;
-
- dev_info(&adapter->pdev->dev, "Set %d SDS rings\n",
- adapter->drv_sds_rings);
}
-int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- int drv_tx_rings, drv_sds_rings, tx_vector;
- int err = -1, i;
+ int num_msix = 0, err = 0, vector;
+
+ adapter->flags &= ~QLCNIC_TSS_RSS;
+
+ if (adapter->drv_tss_rings > 0)
+ num_msix += adapter->drv_tss_rings;
+ else
+ num_msix += adapter->drv_tx_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ num_msix += adapter->drv_rss_rings;
+ else
+ num_msix += adapter->drv_sds_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
+
+restore:
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
- if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
- drv_tx_rings = 0;
- tx_vector = 0;
+ err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+ if (err == 0) {
+ adapter->ahw->num_msix = num_msix;
+ if (adapter->drv_tss_rings > 0)
+ adapter->drv_tx_rings = adapter->drv_tss_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ adapter->drv_sds_rings = adapter->drv_rss_rings;
} else {
- drv_tx_rings = adapter->drv_tx_rings;
- tx_vector = 1;
+ netdev_info(adapter->netdev,
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ num_msix = adapter->drv_tx_rings + adapter->drv_sds_rings;
+
+ /* Set rings to 0 so we can restore original TSS/RSS count */
+ adapter->drv_tss_rings = 0;
+ adapter->drv_rss_rings = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ netdev_info(adapter->netdev,
+ "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
+ adapter->drv_tx_rings, adapter->drv_sds_rings,
+ num_msix);
+ goto restore;
+
+ err = -EIO;
}
+ return err;
+}
+
+int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err = -1, vector;
+
if (!adapter->msix_entries) {
adapter->msix_entries = kcalloc(num_msix,
sizeof(struct msix_entry),
@@ -628,48 +694,43 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
return -ENOMEM;
}
- adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
if (adapter->ahw->msix_supported) {
- enable_msix:
- for (i = 0; i < num_msix; i++)
- adapter->msix_entries[i].entry = i;
+enable_msix:
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
if (err == 0) {
adapter->flags |= QLCNIC_MSIX_ENABLED;
- if (qlcnic_83xx_check(adapter)) {
- adapter->ahw->num_msix = num_msix;
- /* subtract mail box and tx ring vectors */
- adapter->drv_sds_rings = num_msix -
- drv_tx_rings - 1;
- } else {
- adapter->ahw->num_msix = num_msix;
- if (qlcnic_check_multi_tx(adapter) &&
- !adapter->ahw->diag_test &&
- (adapter->drv_tx_rings > 1))
- drv_sds_rings = num_msix - drv_tx_rings;
- else
- drv_sds_rings = num_msix;
-
- adapter->drv_sds_rings = drv_sds_rings;
- }
+ adapter->ahw->num_msix = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
} else if (err > 0) {
dev_info(&pdev->dev,
- "Unable to allocate %d MSI-X interrupt vectors\n",
- num_msix);
- if (qlcnic_83xx_check(adapter)) {
- if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
- return err;
- err -= drv_tx_rings + 1;
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ if (qlcnic_82xx_check(adapter)) {
num_msix = rounddown_pow_of_two(err);
- num_msix += drv_tx_rings + 1;
+ if (err < QLCNIC_82XX_MINIMUM_VECTOR)
+ return -EIO;
} else {
- num_msix = rounddown_pow_of_two(err);
- if (qlcnic_check_multi_tx(adapter))
- num_msix += drv_tx_rings;
+ num_msix = rounddown_pow_of_two(err - 1);
+ num_msix += 1;
+ if (err < QLCNIC_83XX_MINIMUM_VECTOR)
+ return -EIO;
+ }
+
+ if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ adapter->drv_sds_rings = num_msix;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ } else {
+ /* Distribute vectors equally */
+ adapter->drv_tx_rings = num_msix / 2;
+ adapter->drv_sds_rings = adapter->drv_tx_rings;
}
if (num_msix) {
@@ -680,14 +741,29 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
}
} else {
dev_info(&pdev->dev,
- "Unable to allocate %d MSI-X interrupt vectors\n",
- num_msix);
+ "Unable to allocate %d MSI-X vectors, err=%d\n",
+ num_msix, err);
+ return err;
}
}
return err;
}
+static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
+ num_msix = adapter->drv_sds_rings;
+
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += adapter->drv_tx_rings;
+ else
+ num_msix += QLCNIC_SINGLE_RING;
+
+ return num_msix;
+}
+
static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
{
int err = 0;
@@ -722,25 +798,30 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
{
int num_msix, err = 0;
- num_msix = adapter->drv_sds_rings;
-
- if (qlcnic_check_multi_tx(adapter))
- num_msix += adapter->drv_tx_rings;
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = adapter->ahw->num_msix;
+ } else {
+ num_msix = qlcnic_82xx_calculate_msix_vector(adapter);
- err = qlcnic_enable_msix(adapter, num_msix);
- if (err == -ENOMEM)
- return err;
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- qlcnic_disable_multi_tx(adapter);
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_disable_multi_tx(adapter);
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
- err = qlcnic_enable_msi_legacy(adapter);
- if (!err)
- return err;
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (err)
+ return err;
+ }
}
return 0;
@@ -800,25 +881,26 @@ static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_pci_info *pci_info;
int ret;
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- switch (adapter->ahw->port_type) {
+ switch (ahw->port_type) {
case QLCNIC_GBE:
- adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
+ ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
break;
case QLCNIC_XGBE:
- adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
+ ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
break;
}
return 0;
}
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (ahw->op_mode == QLCNIC_MGMT_FUNC)
return 0;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -846,12 +928,13 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_pci_info *pci_info;
int i, id = 0, ret = 0, j = 0;
u16 act_pci_func;
u8 pfn;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -859,7 +942,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
if (ret)
goto err_pci_info;
- act_pci_func = adapter->ahw->act_pci_func;
+ act_pci_func = ahw->total_nic_func;
adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
act_pci_func, GFP_KERNEL);
@@ -875,10 +958,10 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
goto err_npars;
}
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < ahw->max_vnic_func; i++) {
pfn = pci_info[i].id;
- if (pfn >= QLCNIC_MAX_PCI_FUNC) {
+ if (pfn >= ahw->max_vnic_func) {
ret = QL_STATUS_INVALID_PARAM;
goto err_eswitch;
}
@@ -1346,7 +1429,7 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
return 0;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (!adapter->npars[i].eswitch_status)
continue;
@@ -1409,7 +1492,7 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
return 0;
/* Set the NPAR config data after FW reset */
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
npar = &adapter->npars[i];
pci_func = npar->pci_func;
if (!adapter->npars[i].eswitch_status)
@@ -1484,7 +1567,7 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
{
int err;
@@ -1685,6 +1768,33 @@ static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
}
}
+static int qlcnic_config_def_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ /* Initialize interrupt coalesce parameters */
+ ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_82xx_set_rx_coalesce(adapter);
+ }
+
+ return err;
+}
+
int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int ring;
@@ -1717,7 +1827,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (adapter->drv_sds_rings > 1)
qlcnic_config_rss(adapter, 1);
- qlcnic_config_intr_coalesce(adapter);
+ qlcnic_config_def_intr_coalesce(adapter);
if (netdev->features & NETIF_F_LRO)
qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
@@ -1728,6 +1838,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_linkevent_request(adapter, 1);
adapter->ahw->reset_context = 0;
+ netif_tx_start_all_queues(netdev);
return 0;
}
@@ -1862,7 +1973,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_disable_int(sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
}
}
@@ -1885,7 +1996,6 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
- struct qlcnic_hardware_context *ahw = adapter->ahw;
int err = 0;
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -1894,15 +2004,7 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
err = -ENOMEM;
goto err_out;
}
- /* Initialize interrupt coalesce parameters */
- ahw->coal.flag = QLCNIC_INTR_DEFAULT;
- ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
- ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
- ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
- if (qlcnic_83xx_check(adapter)) {
- ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
- ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
- }
+
/* clear stats */
memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
@@ -1963,7 +2065,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
}
@@ -1995,7 +2097,7 @@ qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- dev_err(&adapter->pdev->dev, "%s:\n", __func__);
+ netdev_info(adapter->netdev, "%s: soft reset complete\n", __func__);
return 0;
}
@@ -2032,10 +2134,10 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
return err;
}
-void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u16 act_pci_fn = ahw->act_pci_func;
+ u16 act_pci_fn = ahw->total_nic_func;
u16 count;
ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
@@ -2211,7 +2313,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct qlcnic_hardware_context *ahw;
int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
- struct qlcnic_dcb *dcb;
if (pdev->is_virtfn)
return -ENODEV;
@@ -2289,7 +2390,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_wq;
adapter->dev_rst_time = jiffies;
- adapter->ahw->revision_id = pdev->revision;
+ ahw->revision_id = pdev->revision;
+ ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
if (qlcnic_mac_learn == FDB_MAC_LEARN)
adapter->fdb_mac_learn = true;
else if (qlcnic_mac_learn == DRV_MAC_LEARN)
@@ -2333,10 +2435,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->flags |= QLCNIC_NEED_FLR;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
} else if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
@@ -2365,6 +2463,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_hw;
}
+ qlcnic_dcb_enable(adapter->dcb);
+
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
@@ -2498,13 +2598,11 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
- qlcnic_dcb_free(adapter->dcb);
-
unregister_netdev(netdev);
qlcnic_sriov_cleanup(adapter);
if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ qlcnic_83xx_initialize_nic(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_detach_mailbox_work(adapter);
@@ -2512,6 +2610,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
kfree(ahw->fw_info);
}
+ qlcnic_dcb_free(adapter->dcb);
+
qlcnic_detach(adapter);
if (adapter->npars != NULL)
@@ -2606,14 +2706,8 @@ static int qlcnic_open(struct net_device *netdev)
err = __qlcnic_up(adapter, netdev);
if (err)
- goto err_out;
-
- netif_tx_start_all_queues(netdev);
-
- return 0;
+ qlcnic_detach(adapter);
-err_out:
- qlcnic_detach(adapter);
return err;
}
@@ -2640,7 +2734,7 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
if (adapter->fhash.fmax && adapter->fhash.fhead)
return;
- act_pci_func = adapter->ahw->act_pci_func;
+ act_pci_func = adapter->ahw->total_nic_func;
spin_lock_init(&adapter->mac_learn_lock);
spin_lock_init(&adapter->rx_mac_learn_lock);
@@ -2737,12 +2831,58 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
return rv;
}
-static void qlcnic_tx_timeout(struct net_device *netdev)
+static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int i;
+ struct cmd_desc_type0 *tx_desc_info;
+
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ tx_desc_info = &tx_ring->desc_head[i];
+ pr_info("TX Desc: %d\n", i);
+ print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ &tx_ring->desc_head[i],
+ sizeof(struct cmd_desc_type0), true);
+ }
+}
+
+static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_host_tx_ring *tx_ring;
int ring;
+ if (!netdev || !netif_running(netdev))
+ return;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n",
+ ring, tx_ring->ctx_id);
+ netdev_info(netdev,
+ "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+ tx_ring->tx_stats.xmit_finished,
+ tx_ring->tx_stats.xmit_called,
+ tx_ring->tx_stats.xmit_on,
+ tx_ring->tx_stats.xmit_off);
+ netdev_info(netdev,
+ "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
+ readl(tx_ring->crb_intr_mask),
+ readl(tx_ring->crb_cmd_producer),
+ tx_ring->producer, tx_ring->sw_consumer,
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+
+ netdev_info(netdev, "Total desc=%d, Available desc=%d\n",
+ tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
+
+ if (netif_msg_tx_done(adapter->ahw))
+ dump_tx_ring_desc(tx_ring);
+ }
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return;
@@ -2755,22 +2895,7 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
QLCNIC_FORCE_FW_DUMP_KEY);
} else {
netdev_info(netdev, "Tx timeout, reset adapter context.\n");
- for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
- tx_ring = &adapter->tx_ring[ring];
- netdev_info(netdev, "Tx ring=%d\n", ring);
- netdev_info(netdev,
- "crb_intr_mask=%d, producer=%d, sw_consumer=%d, hw_consumer=%d\n",
- readl(tx_ring->crb_intr_mask),
- readl(tx_ring->crb_cmd_producer),
- tx_ring->sw_consumer,
- le32_to_cpu(*(tx_ring->hw_consumer)));
- netdev_info(netdev,
- "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
- tx_ring->tx_stats.xmit_finished,
- tx_ring->tx_stats.xmit_called,
- tx_ring->tx_stats.xmit_on,
- tx_ring->tx_stats.xmit_off);
- }
+ qlcnic_dump_tx_rings(adapter);
adapter->ahw->reset_context = 1;
}
}
@@ -2793,7 +2918,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
return stats;
}
-irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
{
u32 status;
@@ -2832,7 +2957,7 @@ static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
done:
adapter->ahw->diag_cnt++;
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
return IRQ_HANDLED;
}
@@ -2880,17 +3005,39 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
- int ring;
- struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ recv_ctx = adapter->recv_ctx;
- disable_irq(adapter->irq);
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_intr(adapter->irq, sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ napi_schedule(&sds_ring->napi);
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ /* Only Multi-Tx queue capable devices need to
+ * schedule NAPI for TX rings
+ */
+ if ((qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter)))
+ return;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_intr(adapter, tx_ring);
+ napi_schedule(&tx_ring->napi);
+ }
}
- enable_irq(adapter->irq);
}
#endif
@@ -3286,7 +3433,8 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
qlcnic_api_unlock(adapter);
}
-void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
{
u32 state, xg_val = 0, gb_val = 0;
@@ -3581,8 +3729,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
return err;
}
-pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -3612,13 +3760,13 @@ pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
{
return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
PCI_ERS_RESULT_RECOVERED;
}
-void qlcnic_82xx_io_resume(struct pci_dev *pdev)
+static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
{
u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3716,7 +3864,7 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
strcpy(buf, "Tx");
}
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+ if (!QLCNIC_IS_MSI_FAMILY(adapter)) {
netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
return -EINVAL;
}
@@ -3726,12 +3874,6 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
return -EINVAL;
}
- if (ring_cnt < 2) {
- netdev_err(netdev,
- "%s rings value should not be lower than 2\n", buf);
- return -EINVAL;
- }
-
if (!is_power_of_2(ring_cnt)) {
netdev_err(netdev, "%s rings value should be a power of 2\n",
buf);
@@ -3754,7 +3896,7 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
return 0;
}
-int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
@@ -3775,12 +3917,6 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
qlcnic_teardown_intr(adapter);
- /* compute and set default and max tx/sds rings */
- qlcnic_set_tx_ring_count(adapter, tx_cnt);
- qlcnic_set_sds_ring_count(adapter, rx_cnt);
-
- netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
-
err = qlcnic_setup_intr(adapter);
if (err) {
kfree(adapter->msix_entries);
@@ -3788,9 +3924,10 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
return err;
}
+ netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+
if (qlcnic_83xx_check(adapter)) {
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ qlcnic_83xx_initialize_nic(adapter, 1);
err = qlcnic_83xx_setup_mbx_intr(adapter);
qlcnic_83xx_disable_mbx_poll(adapter);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 0daf660e12a1..396bd1fd1d27 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -126,8 +126,8 @@ struct qlcnic_vport {
u16 handle;
u16 max_tx_bw;
u16 min_tx_bw;
+ u16 pvid;
u8 vlan_mode;
- u16 vlan;
u8 qos;
bool spoofchk;
u8 mac[6];
@@ -137,6 +137,8 @@ struct qlcnic_vf_info {
u8 pci_func;
u16 rx_ctx_id;
u16 tx_ctx_id;
+ u16 *sriov_vlans;
+ int num_vlan;
unsigned long state;
struct completion ch_free_cmpl;
struct work_struct trans_work;
@@ -149,6 +151,7 @@ struct qlcnic_vf_info {
struct qlcnic_trans_list rcv_pend;
struct qlcnic_adapter *adapter;
struct qlcnic_vport *vp;
+ struct mutex vlan_list_lock; /* Lock for VLAN list */
};
struct qlcnic_async_work_list {
@@ -185,7 +188,6 @@ void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
-int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
@@ -195,8 +197,13 @@ int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u16);
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
-int qlcnic_sriov_vf_shutdown(struct pci_dev *);
-int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 21a4b274d2e4..0638c1810d54 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -35,7 +35,10 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
struct qlcnic_cmd_args *);
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.read_crb = qlcnic_83xx_read_crb,
@@ -68,6 +71,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.change_l2_filter = qlcnic_83xx_change_l2_filter,
.get_board_info = qlcnic_83xx_get_port_info,
.free_mac_list = qlcnic_sriov_vf_free_mac_list,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
};
static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
@@ -176,6 +181,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vf->adapter = adapter;
vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
mutex_init(&vf->send_cmd_lock);
+ mutex_init(&vf->vlan_list_lock);
INIT_LIST_HEAD(&vf->rcv_act.wait_list);
INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
spin_lock_init(&vf->rcv_act.lock);
@@ -276,6 +282,11 @@ static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
{
+ if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+ return;
+
+ qlcnic_sriov_free_vlans(adapter);
+
if (qlcnic_sriov_pf_check(adapter))
qlcnic_sriov_pf_cleanup(adapter);
@@ -416,10 +427,15 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
return 0;
sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
+ sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
+ dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
+ sriov->num_allowed_vlans);
+
+ qlcnic_sriov_alloc_vlans(adapter);
+
if (!sriov->any_vlan)
return 0;
- sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
num_vlans = sriov->num_allowed_vlans;
sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
if (!sriov->allowed_vlans)
@@ -432,8 +448,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
return 0;
}
-static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
- struct qlcnic_info *info)
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_cmd_args cmd;
@@ -473,14 +488,12 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
if (err)
return err;
+ ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
+
err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
if (err)
return -EIO;
- err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
- if (err)
- return err;
-
if (qlcnic_83xx_get_port_info(adapter))
return -EIO;
@@ -500,7 +513,6 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
int pci_using_dac)
{
- struct qlcnic_dcb *dcb;
int err;
INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -538,10 +550,9 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
- dcb = adapter->dcb;
-
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
+ err = qlcnic_sriov_get_vf_acl(adapter);
+ if (err)
+ goto err_out_send_channel_term;
err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
if (err)
@@ -1417,7 +1428,7 @@ cleanup_transaction:
return rsp;
}
-int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
@@ -1447,18 +1458,27 @@ out:
return ret;
}
-void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_mac_list_s *cur;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head, tmp_list;
+ struct qlcnic_vf_info *vf;
+ u16 vlan_id;
+ int i;
+
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ vf = &adapter->ahw->sriov->vf_info[0];
INIT_LIST_HEAD(&tmp_list);
head = &adapter->vf_mc_list;
netif_addr_lock_bh(netdev);
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
list_move(&cur->list, &tmp_list);
}
@@ -1466,8 +1486,28 @@ void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
while (!list_empty(&tmp_list)) {
cur = list_entry((&tmp_list)->next,
- struct qlcnic_mac_list_s, list);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, vlan);
+ struct qlcnic_mac_vlan_list, list);
+ if (!qlcnic_sriov_check_any_vlan(vf)) {
+ qlcnic_nic_add_mac(adapter, bcast_addr, 0);
+ qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
+ } else {
+ mutex_lock(&vf->vlan_list_lock);
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan_id = vf->sriov_vlans[i];
+ if (vlan_id) {
+ qlcnic_nic_add_mac(adapter, bcast_addr,
+ vlan_id);
+ qlcnic_nic_add_mac(adapter,
+ cur->mac_addr,
+ vlan_id);
+ }
+ }
+ mutex_unlock(&vf->vlan_list_lock);
+ if (qlcnic_84xx_check(adapter)) {
+ qlcnic_nic_add_mac(adapter, bcast_addr, 0);
+ qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
+ }
+ }
list_del(&cur->list);
kfree(cur);
}
@@ -1490,13 +1530,24 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u16 vlan;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 mode = VPORT_MISS_MODE_DROP;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- vlan = adapter->ahw->sriov->vlan;
- __qlcnic_set_multi(netdev, vlan);
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ }
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_vf_add_mc_list(netdev);
+
+ qlcnic_nic_set_promisc(adapter, mode);
}
static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
@@ -1584,8 +1635,6 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
if (err)
goto err_out_term_channel;
- qlcnic_dcb_get_info(adapter->dcb);
-
return 0;
err_out_term_channel:
@@ -1833,18 +1882,60 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
cancel_delayed_work_sync(&adapter->fw_work);
}
-static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
+static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i, err = -EINVAL;
+
+ if (!vf->sriov_vlans)
+ return err;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ err = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ int err = 0;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ if (vf->num_vlan >= sriov->num_allowed_vlans)
+ err = -EINVAL;
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
- u16 vlan = sriov->vlan;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ bool vlan_exist;
u8 allowed = 0;
int i;
+ vf = &adapter->ahw->sriov->vf_info[0];
+ vlan_exist = qlcnic_sriov_check_any_vlan(vf);
if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
return -EINVAL;
if (enable) {
- if (vlan)
+ if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
+ return -EINVAL;
+
+ if (qlcnic_sriov_validate_num_vlans(sriov, vf))
return -EINVAL;
if (sriov->any_vlan) {
@@ -1857,24 +1948,54 @@ static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
return -EINVAL;
}
} else {
- if (!vlan || vlan != vid)
+ if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
return -EINVAL;
}
return 0;
}
+static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
+ enum qlcnic_vlan_operations opcode)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_sriov *sriov;
+
+ sriov = adapter->ahw->sriov;
+
+ if (!vf->sriov_vlans)
+ return;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ switch (opcode) {
+ case QLC_VLAN_ADD:
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
+ break;
+ case QLC_VLAN_DELETE:
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid VLAN operation\n");
+ }
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return;
+}
+
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
struct qlcnic_cmd_args cmd;
int ret;
if (vid == 0)
return 0;
- ret = qlcnic_sriov_validate_vlan_cfg(sriov, vid, enable);
+ vf = &adapter->ahw->sriov->vf_info[0];
+ ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
if (ret)
return ret;
@@ -1894,11 +2015,11 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
qlcnic_free_mac_list(adapter);
if (enable)
- sriov->vlan = vid;
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
else
- sriov->vlan = 0;
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
- qlcnic_sriov_vf_set_multi(adapter->netdev);
+ qlcnic_set_multi(adapter->netdev);
}
qlcnic_free_mbx_args(&cmd);
@@ -1908,21 +2029,19 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
{
struct list_head *head = &adapter->mac_list;
- struct qlcnic_mac_list_s *cur;
- u16 vlan;
-
- vlan = adapter->ahw->sriov->vlan;
+ struct qlcnic_mac_vlan_list *cur;
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
- qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
- vlan, QLCNIC_MAC_DEL);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
+ QLCNIC_MAC_DEL);
list_del(&cur->list);
kfree(cur);
}
}
-int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
+
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -1946,7 +2065,7 @@ int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
return 0;
}
-int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
{
struct qlc_83xx_idc *idc = &adapter->ahw->idc;
struct net_device *netdev = adapter->netdev;
@@ -1972,3 +2091,70 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
idc->delay);
return err;
}
+
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
+ sizeof(*vf->sriov_vlans), GFP_KERNEL);
+ }
+}
+
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ kfree(vf->sriov_vlans);
+ vf->sriov_vlans = NULL;
+ }
+}
+
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (!vf->sriov_vlans[i]) {
+ vf->sriov_vlans[i] = vlan_id;
+ vf->num_vlan++;
+ return;
+ }
+ }
+}
+
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ vf->sriov_vlans[i] = 0;
+ vf->num_vlan--;
+ return;
+ }
+ }
+}
+
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
+{
+ bool err = false;
+
+ mutex_lock(&vf->vlan_list_lock);
+
+ if (vf->num_vlan)
+ err = true;
+
+ mutex_unlock(&vf->vlan_list_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 024f8161d2fe..e5277a632671 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -9,9 +9,12 @@
#include "qlcnic.h"
#include <linux/types.h>
-#define QLCNIC_SRIOV_VF_MAX_MAC 1
+#define QLCNIC_SRIOV_VF_MAX_MAC 7
#define QLC_VF_MIN_TX_RATE 100
#define QLC_VF_MAX_TX_RATE 9999
+#define QLC_MAC_OPCODE_MASK 0x7
+#define QLC_VF_FLOOD_BIT BIT_16
+#define QLC_FLOOD_MODE 0x5
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
@@ -64,9 +67,10 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_resources *res = &sriov->ff_max;
- u32 temp, num_vf_macs, num_vfs, max;
+ u16 num_macs = sriov->num_allowed_vlans + 1;
int ret = -EIO, vpid, id;
struct qlcnic_vport *vp;
+ u32 num_vfs, max, temp;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
if (vpid < 0)
@@ -75,16 +79,25 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
num_vfs = sriov->num_vfs;
max = num_vfs + 1;
info->bit_offsets = 0xffff;
+ info->max_tx_ques = res->num_tx_queues / max;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ num_macs = 1;
+
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
- num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
if (adapter->ahw->pci_func == func) {
- temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
- info->max_rx_ucast_mac_filters = temp;
- temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
- info->max_tx_mac_filters = temp;
info->min_tx_bw = 0;
info->max_tx_bw = MAX_BW;
+
+ temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
+ info->max_rx_ucast_mac_filters = temp;
+ temp = res->num_tx_mac_filters - num_macs * num_vfs;
+ info->max_tx_mac_filters = temp;
+ temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC;
+ temp = res->num_rx_mcast_mac_filters - temp;
+ info->max_rx_mcast_mac_filters = temp;
+
info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
} else {
id = qlcnic_sriov_func_to_index(adapter, func);
@@ -93,8 +106,12 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
vp = sriov->vf_info[id].vp;
info->min_tx_bw = vp->min_tx_bw;
info->max_tx_bw = vp->max_tx_bw;
- info->max_rx_ucast_mac_filters = num_vf_macs;
- info->max_tx_mac_filters = num_vf_macs;
+
+ info->max_rx_ucast_mac_filters = num_macs;
+ info->max_tx_mac_filters = num_macs;
+ temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
+ info->max_rx_mcast_mac_filters = temp;
+
info->max_tx_ques = QLCNIC_SINGLE_RING;
}
@@ -133,6 +150,25 @@ static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
}
+static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int temp, total_fn;
+
+ temp = npar_info->max_rx_mcast_mac_filters;
+ total_fn = sriov->num_vfs + 1;
+
+ temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn);
+ sriov->num_allowed_vlans = temp - 1;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ sriov->num_allowed_vlans = 1;
+
+ netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n",
+ sriov->num_allowed_vlans);
+}
+
static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
struct qlcnic_info *npar_info)
{
@@ -166,6 +202,7 @@ static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+ qlcnic_sriov_set_vf_max_vlan(adapter, npar_info);
qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
dev_info(&adapter->pdev->dev,
"\n\ttotal_pf: %d,\n"
@@ -310,6 +347,28 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
return err;
}
+/* On configuring VF flood bit, PFD will receive traffic from all VFs */
+static int qlcnic_sriov_pf_cfg_flood(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure VF Flood bit on PF, err=%d\n",
+ err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
u8 func, u8 enable)
{
@@ -404,6 +463,8 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
qlcnic_sriov_pf_disable(adapter);
+ qlcnic_sriov_free_vlans(adapter);
+
qlcnic_sriov_pf_cleanup(adapter);
/* After disabling SRIOV re-init the driver in default mode
@@ -435,6 +496,12 @@ static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
if (err)
return err;
+ if (qlcnic_84xx_check(adapter)) {
+ err = qlcnic_sriov_pf_cfg_flood(adapter);
+ if (err)
+ goto disable_vlan_filtering;
+ }
+
err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
if (err)
goto disable_vlan_filtering;
@@ -512,6 +579,8 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
if (err)
goto del_flr_queue;
+ qlcnic_sriov_alloc_vlans(adapter);
+
err = qlcnic_sriov_pf_enable(adapter, num_vfs);
return err;
@@ -609,7 +678,7 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
if (vp->vlan_mode == QLC_PVID_MODE) {
cmd.req.arg[2] |= BIT_6;
- cmd.req.arg[3] |= vp->vlan << 8;
+ cmd.req.arg[3] |= vp->pvid << 8;
}
err = qlcnic_issue_cmd(adapter, &cmd);
@@ -644,10 +713,13 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_vport *vp = vf->vp;
struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
u16 func = vf->pci_func;
+ size_t size;
int err;
adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
@@ -657,8 +729,12 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
} else {
- if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
- vp->vlan = 0;
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) {
+ size = sizeof(*vf->sriov_vlans);
+ size = size * sriov->num_allowed_vlans;
+ memset(vf->sriov_vlans, 0, size);
+ }
+
err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
@@ -680,20 +756,23 @@ err_out:
}
static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
- struct qlcnic_vport *vp,
- u16 func, u16 vlan, u8 op)
+ struct qlcnic_vf_info *vf,
+ u16 vlan, u8 op)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_macvlan_mbx mv;
+ struct qlcnic_vport *vp;
u8 *addr;
int err;
u32 *buf;
int vpid;
+ vp = vf->vp;
+
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
return -ENOMEM;
- vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
if (vpid < 0) {
err = -EINVAL;
goto out;
@@ -737,6 +816,35 @@ static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
return 0;
}
+static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ int opcode)
+{
+ struct qlcnic_sriov *sriov;
+ u16 vlan;
+ int i;
+
+ sriov = adapter->ahw->sriov;
+
+ mutex_lock(&vf->vlan_list_lock);
+ if (vf->num_vlan) {
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan = vf->sriov_vlans[i];
+ if (vlan)
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan,
+ opcode);
+ }
+ }
+ mutex_unlock(&vf->vlan_list_lock);
+
+ if (vf->vp->vlan_mode != QLC_PVID_MODE) {
+ if (qlcnic_83xx_pf_check(adapter) &&
+ qlcnic_sriov_check_any_vlan(vf))
+ return;
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode);
+ }
+}
+
static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
struct qlcnic_cmd_args *cmd)
{
@@ -744,7 +852,6 @@ static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
struct qlcnic_adapter *adapter = vf->adapter;
struct qlcnic_rcv_mbx_out *mbx_out;
int err;
- u16 vlan;
err = qlcnic_sriov_validate_create_rx_ctx(cmd);
if (err) {
@@ -755,12 +862,10 @@ static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
cmd->req.arg[6] = vf->vp->handle;
err = qlcnic_issue_cmd(adapter, cmd);
- vlan = vf->vp->vlan;
if (!err) {
mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
vf->rx_ctx_id = mbx_out->ctx_id;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
- vlan, QLCNIC_MAC_ADD);
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD);
} else {
vf->rx_ctx_id = 0;
}
@@ -844,7 +949,6 @@ static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err;
- u16 vlan;
err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
if (err) {
@@ -852,9 +956,7 @@ static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
return err;
}
- vlan = vf->vp->vlan;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
- vlan, QLCNIC_MAC_DEL);
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL);
cmd->req.arg[1] |= vf->vp->handle << 16;
err = qlcnic_issue_cmd(adapter, cmd);
@@ -1121,7 +1223,7 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
cmd->req.arg[1] &= ~0x7;
new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
- cmd->req.arg[3] |= vp->vlan << 16;
+ cmd->req.arg[3] |= vp->pvid << 16;
cmd->req.arg[1] |= new_op;
}
@@ -1191,8 +1293,10 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vport *vp = vf->vp;
u8 cmd_op, mode = vp->vlan_mode;
struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
cmd_op = trans->req_hdr->cmd_op;
cmd->rsp.arg[0] |= 1 << 25;
@@ -1206,10 +1310,10 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
switch (mode) {
case QLC_GUEST_VLAN_MODE:
cmd->rsp.arg[1] = mode | 1 << 8;
- cmd->rsp.arg[2] = 1 << 16;
+ cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16;
break;
case QLC_PVID_MODE:
- cmd->rsp.arg[1] = mode | 1 << 8 | vp->vlan << 16;
+ cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16;
break;
}
@@ -1217,24 +1321,27 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
}
static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
- struct qlcnic_vf_info *vf)
-
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ u16 vlan;
- if (!vp->vlan)
+ if (!qlcnic_sriov_check_any_vlan(vf))
return -EINVAL;
+ vlan = cmd->req.arg[1] >> 16;
if (!vf->rx_ctx_id) {
- vp->vlan = 0;
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
return 0;
}
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- vp->vlan, QLCNIC_MAC_DEL);
- vp->vlan = 0;
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_ADD);
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL);
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf,
+ 0, QLCNIC_MAC_ADD);
return 0;
}
@@ -1242,32 +1349,37 @@ static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
int err = -EIO;
+ u16 vlan;
- if (vp->vlan)
+ if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf))
return err;
+ vlan = cmd->req.arg[1] >> 16;
+
if (!vf->rx_ctx_id) {
- vp->vlan = cmd->req.arg[1] >> 16;
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
return 0;
}
- err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_DEL);
- if (err)
- return err;
+ if (qlcnic_83xx_pf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ }
- vp->vlan = cmd->req.arg[1] >> 16;
- err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- vp->vlan, QLCNIC_MAC_ADD);
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD);
if (err) {
- qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
- 0, QLCNIC_MAC_ADD);
- vp->vlan = 0;
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_ADD);
+ return err;
}
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
return err;
}
@@ -1290,7 +1402,7 @@ static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
if (op)
err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
else
- err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf);
+ err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd);
cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
return err;
@@ -1300,8 +1412,6 @@ static const int qlcnic_pf_passthru_supp_cmds[] = {
QLCNIC_CMD_GET_STATISTICS,
QLCNIC_CMD_GET_PORT_CONFIG,
QLCNIC_CMD_GET_LINK_STATUS,
- QLCNIC_CMD_DCB_QUERY_CAP,
- QLCNIC_CMD_DCB_QUERY_PARAM,
QLCNIC_CMD_INIT_NIC_FUNC,
QLCNIC_CMD_STOP_NIC_FUNC,
};
@@ -1597,7 +1707,8 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
}
if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
- vp->vlan = 0;
+ memset(vf->sriov_vlans, 0,
+ sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans);
qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
@@ -1767,20 +1878,22 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
return -EOPNOTSUPP;
}
+ memset(vf_info->sriov_vlans, 0,
+ sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans);
+
switch (vlan) {
case 4095:
- vp->vlan = 0;
vp->vlan_mode = QLC_GUEST_VLAN_MODE;
break;
case 0:
vp->vlan_mode = QLC_NO_VLAN_MODE;
- vp->vlan = 0;
vp->qos = 0;
break;
default:
vp->vlan_mode = QLC_PVID_MODE;
- vp->vlan = vlan;
+ qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan);
vp->qos = qos;
+ vp->pvid = vlan;
}
netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
@@ -1795,7 +1908,7 @@ static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
switch (vp->vlan_mode) {
case QLC_PVID_MODE:
- vlan = vp->vlan;
+ vlan = vp->pvid;
break;
case QLC_GUEST_VLAN_MODE:
vlan = MAX_VLAN_ID;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 1a9f8a400e50..3d64113a35af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -6,7 +6,6 @@
*/
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
@@ -127,6 +126,8 @@ static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter,
if (kstrtoul(buf, 2, &h_beacon))
return -EINVAL;
+ qlcnic_get_beacon_state(adapter);
+
if (ahw->beacon_state == h_beacon)
return len;
@@ -158,7 +159,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err, drv_sds_rings = adapter->drv_sds_rings;
u16 beacon;
- u8 h_beacon_state, b_state, b_rate;
+ u8 b_state, b_rate;
if (len != sizeof(u16))
return QL_STATUS_INVALID_PARAM;
@@ -168,18 +169,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
if (err)
return err;
- if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
- err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
- if (err) {
- netdev_err(adapter->netdev,
- "Failed to get current beacon state\n");
- } else {
- if (h_beacon_state == QLCNIC_BEACON_DISABLE)
- ahw->beacon_state = 0;
- else if (h_beacon_state == QLCNIC_BEACON_EANBLE)
- ahw->beacon_state = 2;
- }
- }
+ qlcnic_get_beacon_state(adapter);
if (ahw->beacon_state == b_state)
return len;
@@ -360,10 +350,28 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
-static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 count = 0;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return ahw->total_nic_func;
+
+ if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT)
+ count = QLC_DEFAULT_VNIC_COUNT;
+ else
+ count = ahw->max_vnic_func;
+
+ return count;
+}
+
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int i;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+
+ for (i = 0; i < pci_func_count; i++) {
if (adapter->npars[i].pci_func == pci_func)
return i;
}
@@ -382,7 +390,6 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
src_pci_func = pm_cfg[i].pci_func;
dest_pci_func = pm_cfg[i].dest_npar;
src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
-
if (src_index < 0)
return QL_STATUS_INVALID_PARAM;
@@ -439,6 +446,8 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
id = adapter->npars[index].phy_port;
adapter->npars[index].enable_pm = !!pm_cfg[i].action;
adapter->npars[index].dest_npar = id;
@@ -455,17 +464,19 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
- int i;
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ int i, pm_cfg_size;
u8 pci_func;
- if (size != sizeof(pm_cfg))
+ pm_cfg_size = pci_func_count * sizeof(*pm_cfg);
+ if (size != pm_cfg_size)
return QL_STATUS_INVALID_PARAM;
- memset(&pm_cfg, 0,
- sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, pm_cfg_size);
+ pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
pci_func = adapter->npars[i].pci_func;
if (!adapter->npars[i].active)
continue;
@@ -477,26 +488,26 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
pm_cfg[pci_func].dest_npar = 0;
pm_cfg[pci_func].pci_func = i;
}
- memcpy(buf, &pm_cfg, size);
-
return size;
}
static int validate_esw_config(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg, int count)
{
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, ret;
u32 op_mode;
u8 pci_func;
- int i, ret;
if (qlcnic_82xx_check(adapter))
- op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
else
- op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+ if (pci_func >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
@@ -600,6 +611,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -629,16 +642,19 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ size_t esw_cfg_size;
u8 i, pci_func;
- if (size != sizeof(esw_cfg))
+ esw_cfg_size = pci_func_count * sizeof(*esw_cfg);
+ if (size != esw_cfg_size)
return QL_STATUS_INVALID_PARAM;
- memset(&esw_cfg, 0,
- sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, esw_cfg_size);
+ esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
pci_func = adapter->npars[i].pci_func;
if (!adapter->npars[i].active)
continue;
@@ -650,9 +666,6 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
}
-
- memcpy(buf, &esw_cfg, size);
-
return size;
}
@@ -711,6 +724,8 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
if (ret)
return ret;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
adapter->npars[index].min_bw = nic_info.min_tx_bw;
adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
@@ -726,27 +741,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
- struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+ size_t np_cfg_size;
int i, ret;
- if (size != sizeof(np_cfg))
+ np_cfg_size = pci_func_count * sizeof(*np_cfg);
+ if (size != np_cfg_size)
return QL_STATUS_INVALID_PARAM;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
- memset(&np_cfg, 0,
- sizeof(struct qlcnic_npar_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+ memset(buf, 0, np_cfg_size);
+ np_cfg = (struct qlcnic_npar_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < pci_func_count; i++) {
if (qlcnic_is_valid_nic_func(adapter, i) < 0)
continue;
ret = qlcnic_get_nic_info(adapter, &nic_info, i);
if (ret)
return ret;
-
if (!adapter->npars[i].eswitch_status)
continue;
-
np_cfg[i].pci_func = i;
np_cfg[i].op_mode = (u8)nic_info.op_mode;
np_cfg[i].port_num = nic_info.phys_port;
@@ -756,8 +772,6 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
}
-
- memcpy(buf, &np_cfg, size);
return size;
}
@@ -769,6 +783,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_esw_statistics port_stats;
int ret;
@@ -778,7 +793,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
- if (offset >= QLCNIC_MAX_PCI_FUNC)
+ if (offset >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
memset(&port_stats, 0, size);
@@ -869,12 +884,13 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int ret;
if (qlcnic_83xx_check(adapter))
return QLC_STATUS_UNSUPPORTED_CMD;
- if (offset >= QLCNIC_MAX_PCI_FUNC)
+ if (offset >= pci_func_count)
return QL_STATUS_INVALID_PARAM;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -898,14 +914,17 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+ u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+ struct qlcnic_pci_func_cfg *pci_cfg;
struct qlcnic_pci_info *pci_info;
+ size_t pci_cfg_sz;
int i, ret;
- if (size != sizeof(pci_cfg))
+ pci_cfg_sz = pci_func_count * sizeof(*pci_cfg);
+ if (size != pci_cfg_sz)
return QL_STATUS_INVALID_PARAM;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -915,19 +934,17 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
return ret;
}
- memset(&pci_cfg, 0,
- sizeof(struct qlcnic_pci_func_cfg) * QLCNIC_MAX_PCI_FUNC);
-
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
+ for (i = 0; i < pci_func_count; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
+ pci_cfg[i].func_state = 0;
pci_cfg[i].port_num = pci_info[i].default_port;
pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
- memcpy(buf, &pci_cfg, size);
kfree(pci_info);
return size;
}
@@ -1269,7 +1286,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_bridged_mode);
}
-void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
@@ -1308,7 +1325,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
dev_info(dev, "failed to create eswitch stats sysfs entry");
}
-void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 03517478e589..ef332708e5f2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2248,7 +2248,6 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 6bc5db703920..829be21f97b2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -1242,8 +1242,8 @@ static void ql_get_core_dump(struct ql_adapter *qdev)
ql_queue_fw_error(qdev);
}
-void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump)
+static void ql_gen_reg_dump(struct ql_adapter *qdev,
+ struct ql_reg_dump *mpi_coredump)
{
int i, status;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 8dee1beb9854..c3c514e332b5 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -1,5 +1,4 @@
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index f705aeeba767..656c65ddadb4 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -6,7 +6,6 @@
* Ron Mercer <ron.mercer@qlogic.com>
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/module.h>
@@ -4766,7 +4765,9 @@ static int qlge_probe(struct pci_dev *pdev,
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
/* vlan gets same features (except vlan filter) */
- ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 1e49ec5b2232..819b74cefd64 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -34,7 +34,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@@ -222,6 +221,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
cmd = ioread16(ioaddr + MMDIO);
if (!(cmd & MDIO_READ))
break;
+ udelay(1);
}
if (limit < 0)
@@ -245,6 +245,7 @@ static int r6040_phy_write(void __iomem *ioaddr,
cmd = ioread16(ioaddr + MMDIO);
if (!(cmd & MDIO_WRITE))
break;
+ udelay(1);
}
return (limit < 0) ? -ETIMEDOUT : 0;
@@ -834,8 +835,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
/* Set TX descriptor & Transmit it */
lp->tx_free_desc--;
descptr = lp->tx_insert_ptr;
- if (skb->len < MISR)
- descptr->len = MISR;
+ if (skb->len < ETH_ZLEN)
+ descptr->len = ETH_ZLEN;
else
descptr->len = skb->len;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c737f0ea5de7..3ff7bc3e7a23 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -21,7 +21,6 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
@@ -210,7 +209,7 @@ static const struct {
[RTL_GIGA_MAC_VER_16] =
_R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_17] =
- _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
+ _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
[RTL_GIGA_MAC_VER_18] =
_R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
[RTL_GIGA_MAC_VER_19] =
@@ -7119,6 +7118,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
mutex_init(&tp->wk.mutex);
+ u64_stats_init(&tp->rx_stats.syncp);
+ u64_stats_init(&tp->tx_stats.syncp);
/* Get MAC address */
for (i = 0; i < ETH_ALEN; i++)
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index a30c4395b232..9e757c792d84 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -13,4 +13,4 @@ config SH_ETH
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740, R8A777x and R8A7790.
+ R8A7740, R8A777x and R8A779x.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d256ce19d4de..040cb94e8219 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,5 +1,4 @@
-/*
- * SuperH Ethernet device driver
+/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2013 Renesas Solutions Corp.
@@ -13,15 +12,11 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
@@ -148,6 +143,65 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWALCR1] = 0x00b4,
};
+static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [EDSR] = 0x0000,
+ [EDMR] = 0x0400,
+ [EDTRR] = 0x0408,
+ [EDRRR] = 0x0410,
+ [EESR] = 0x0428,
+ [EESIPR] = 0x0430,
+ [TDLAR] = 0x0010,
+ [TDFAR] = 0x0014,
+ [TDFXR] = 0x0018,
+ [TDFFR] = 0x001c,
+ [RDLAR] = 0x0030,
+ [RDFAR] = 0x0034,
+ [RDFXR] = 0x0038,
+ [RDFFR] = 0x003c,
+ [TRSCER] = 0x0438,
+ [RMFCR] = 0x0440,
+ [TFTR] = 0x0448,
+ [FDR] = 0x0450,
+ [RMCR] = 0x0458,
+ [RPADIR] = 0x0460,
+ [FCFTR] = 0x0468,
+ [CSMR] = 0x04E4,
+
+ [ECMR] = 0x0500,
+ [RFLR] = 0x0508,
+ [ECSR] = 0x0510,
+ [ECSIPR] = 0x0518,
+ [PIR] = 0x0520,
+ [APR] = 0x0554,
+ [MPR] = 0x0558,
+ [PFTCR] = 0x055c,
+ [PFRCR] = 0x0560,
+ [TPAUSER] = 0x0564,
+ [MAHR] = 0x05c0,
+ [MALR] = 0x05c8,
+ [CEFCR] = 0x0740,
+ [FRECR] = 0x0748,
+ [TSFRCR] = 0x0750,
+ [TLFRCR] = 0x0758,
+ [RFCR] = 0x0760,
+ [MAFCR] = 0x0778,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_VTAG0] = 0x0058,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRH31] = 0x01f8,
+ [TSU_ADRL31] = 0x01fc,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008C,
+};
+
static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
[ECMR] = 0x0300,
[RFLR] = 0x0308,
@@ -314,12 +368,14 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADRL31] = 0x01fc,
};
-static int sh_eth_is_gether(struct sh_eth_private *mdp)
+static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
- if (mdp->reg_offset == sh_eth_offset_gigabit)
- return 1;
- else
- return 0;
+ return mdp->reg_offset == sh_eth_offset_gigabit;
+}
+
+static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
+{
+ return mdp->reg_offset == sh_eth_offset_fast_rz;
}
static void sh_eth_select_mii(struct net_device *ndev)
@@ -395,8 +451,8 @@ static struct sh_eth_cpu_data r8a777x_data = {
.hw_swap = 1,
};
-/* R8A7790 */
-static struct sh_eth_cpu_data r8a7790_data = {
+/* R8A7790/1 */
+static struct sh_eth_cpu_data r8a779x_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_r8a777x,
@@ -646,8 +702,8 @@ static struct sh_eth_cpu_data sh7763_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tx_check = EESR_TC1 | EESR_FTC,
- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
.apr = 1,
@@ -705,6 +761,38 @@ static struct sh_eth_cpu_data r8a7740_data = {
.shift_rd0 = 1,
};
+/* R7S72100 */
+static struct sh_eth_cpu_data r7s72100_data = {
+ .chip_reset = sh_eth_chip_reset,
+ .set_duplex = sh_eth_set_duplex,
+
+ .register_type = SH_ETH_REG_FAST_RZ,
+
+ .ecsr_value = ECSR_ICD,
+ .ecsipr_value = ECSIPR_ICDIP,
+ .eesipr_value = 0xff7f009f,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+ EESR_TDE | EESR_ECI,
+ .fdr_value = 0x0000070f,
+ .rmcr_value = RMCR_RNC,
+
+ .no_psr = 1,
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
+ .no_trimd = 1,
+ .no_ade = 1,
+ .hw_crc = 1,
+ .tsu = 1,
+ .shift_rd0 = 1,
+};
+
static struct sh_eth_cpu_data sh7619_data = {
.register_type = SH_ETH_REG_FAST_SH3_SH2,
@@ -732,7 +820,7 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
if (!cd->fcftr_value)
- cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
+ cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
DEFAULT_FIFO_F_D_RFD;
if (!cd->fdr_value)
@@ -771,7 +859,7 @@ static int sh_eth_reset(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
int ret = 0;
- if (sh_eth_is_gether(mdp)) {
+ if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
sh_eth_write(ndev, EDSR_ENALL, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
EDMR);
@@ -849,20 +937,17 @@ static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
return x;
}
-/*
- * Program the hardware MAC address from dev->dev_addr.
- */
+/* Program the hardware MAC address from dev->dev_addr. */
static void update_mac_address(struct net_device *ndev)
{
sh_eth_write(ndev,
- (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
- (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
sh_eth_write(ndev,
- (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
}
-/*
- * Get MAC address from SuperH MAC address register
+/* Get MAC address from SuperH MAC address register
*
* SuperH's Ethernet device doesn't have 'ROM' to MAC address.
* This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
@@ -885,7 +970,7 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
{
- if (sh_eth_is_gether(mdp))
+ if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
return EDTRR_TRNS_GETHER;
else
return EDTRR_TRNS_ETHER;
@@ -1019,8 +1104,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
- mdp->cur_rx = mdp->cur_tx = 0;
- mdp->dirty_rx = mdp->dirty_tx = 0;
+ mdp->cur_rx = 0;
+ mdp->cur_tx = 0;
+ mdp->dirty_rx = 0;
+ mdp->dirty_tx = 0;
memset(mdp->rx_ring, 0, rx_ringsize);
@@ -1033,7 +1120,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
if (skb == NULL)
break;
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
/* RX descriptor */
@@ -1046,7 +1133,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* Rx descriptor address set */
if (i == 0) {
sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
- if (sh_eth_is_gether(mdp))
+ if (sh_eth_is_gether(mdp) ||
+ sh_eth_is_rz_fast_ether(mdp))
sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
}
}
@@ -1067,7 +1155,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
if (i == 0) {
/* Tx descriptor address set */
sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
- if (sh_eth_is_gether(mdp))
+ if (sh_eth_is_gether(mdp) ||
+ sh_eth_is_rz_fast_ether(mdp))
sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
}
}
@@ -1081,8 +1170,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
int rx_ringsize, tx_ringsize, ret = 0;
- /*
- * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
+ /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
* card needs room to do 8 byte alignment, +2 so we can reserve
* the first 2 bytes, and +16 gets room for the status word from the
* card.
@@ -1257,7 +1345,7 @@ static int sh_eth_txfree(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
- int freeNum = 0;
+ int free_num = 0;
int entry = 0;
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
@@ -1271,7 +1359,7 @@ static int sh_eth_txfree(struct net_device *ndev)
txdesc->buffer_length, DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL;
- freeNum++;
+ free_num++;
}
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
if (entry >= mdp->num_tx_ring - 1)
@@ -1280,7 +1368,7 @@ static int sh_eth_txfree(struct net_device *ndev)
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += txdesc->buffer_length;
}
- return freeNum;
+ return free_num;
}
/* Packet receive function */
@@ -1313,12 +1401,11 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
- /*
- * In case of almost all GETHER/ETHERs, the Receive Frame State
+ /* In case of almost all GETHER/ETHERs, the Receive Frame State
* (RFS) bits in the Receive Descriptor 0 are from bit 9 to
- * bit 0. However, in case of the R8A7740's GETHER, the RFS
- * bits are from bit 25 to bit 16. So, the driver needs right
- * shifting by 16.
+ * bit 0. However, in case of the R8A7740, R8A779x, and
+ * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+ * driver needs right shifting by 16.
*/
if (mdp->cd->shift_rd0)
desc_status >>= 16;
@@ -1374,7 +1461,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (skb == NULL)
break; /* Better luck next round. */
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
skb_checksum_none_assert(skb);
@@ -1392,10 +1479,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* If we don't need to check status, don't. -KDU */
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
/* fix the values for the next receiving if RDE is set */
- if (intr_status & EESR_RDE)
- mdp->cur_rx = mdp->dirty_rx =
- (sh_eth_read(ndev, RDFAR) -
- sh_eth_read(ndev, RDLAR)) >> 4;
+ if (intr_status & EESR_RDE) {
+ u32 count = (sh_eth_read(ndev, RDFAR) -
+ sh_eth_read(ndev, RDLAR)) >> 4;
+
+ mdp->cur_rx = count;
+ mdp->dirty_rx = count;
+ }
sh_eth_write(ndev, EDRRR_R, EDRRR);
}
@@ -1438,17 +1528,17 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (mdp->ether_link_active_low)
link_stat = ~link_stat;
}
- if (!(link_stat & PHY_ST_LINK))
+ if (!(link_stat & PHY_ST_LINK)) {
sh_eth_rcv_snd_disable(ndev);
- else {
+ } else {
/* Link Up */
sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
- ~DMAC_M_ECI, EESIPR);
- /*clear int */
+ ~DMAC_M_ECI, EESIPR);
+ /* clear int */
sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
- ECSR);
+ ECSR);
sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
- DMAC_M_ECI, EESIPR);
+ DMAC_M_ECI, EESIPR);
/* enable tx and rx */
sh_eth_rcv_snd_enable(ndev);
}
@@ -1517,11 +1607,11 @@ ignore_link:
if (intr_status & mask) {
/* Tx error */
u32 edtrr = sh_eth_read(ndev, EDTRR);
+
/* dmesg */
- dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
- intr_status, mdp->cur_tx);
- dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
- mdp->dirty_tx, (u32) ndev->state, edtrr);
+ dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
+ intr_status, mdp->cur_tx, mdp->dirty_tx,
+ (u32)ndev->state, edtrr);
/* dirty buffer free */
sh_eth_txfree(ndev);
@@ -1644,7 +1734,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
}
if (!mdp->link) {
sh_eth_write(ndev,
- (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
+ sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
+ ECMR);
new_state = 1;
mdp->link = phydev->link;
if (mdp->cd->no_psr || mdp->no_ether_link)
@@ -1671,7 +1762,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
struct phy_device *phydev = NULL;
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
- mdp->mii_bus->id , mdp->phy_id);
+ mdp->mii_bus->id, mdp->phy_id);
mdp->link = 0;
mdp->speed = 0;
@@ -1685,8 +1776,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
return PTR_ERR(phydev);
}
- dev_info(&ndev->dev, "attached phy %i to driver %s\n",
- phydev->addr, phydev->drv->name);
+ dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n",
+ phydev->addr, phydev->irq, phydev->drv->name);
mdp->phydev = phydev;
@@ -1703,15 +1794,13 @@ static int sh_eth_phy_start(struct net_device *ndev)
if (ret)
return ret;
- /* reset phy - this also wakes it from PDOWN */
- phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
phy_start(mdp->phydev);
return 0;
}
static int sh_eth_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
@@ -1725,7 +1814,7 @@ static int sh_eth_get_settings(struct net_device *ndev,
}
static int sh_eth_set_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
@@ -1801,7 +1890,7 @@ static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
}
static void sh_eth_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *stats, u64 *data)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int i = 0;
@@ -1818,7 +1907,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, *sh_eth_gstrings_stats,
- sizeof(sh_eth_gstrings_stats));
+ sizeof(sh_eth_gstrings_stats));
break;
}
}
@@ -1953,9 +2042,10 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
netif_stop_queue(ndev);
- if (netif_msg_timer(mdp))
- dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
- " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
+ if (netif_msg_timer(mdp)) {
+ dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n",
+ ndev->name, (int)sh_eth_read(ndev, EESR));
+ }
/* tx_errors count up */
ndev->stats.tx_errors++;
@@ -2065,6 +2155,9 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ if (sh_eth_is_rz_fast_ether(mdp))
+ return &ndev->stats;
+
pm_runtime_get_sync(&mdp->pdev->dev);
ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
@@ -2088,8 +2181,7 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
}
/* ioctl to device function */
-static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
- int cmd)
+static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct phy_device *phydev = mdp->phydev;
@@ -2209,7 +2301,7 @@ static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
sh_eth_tsu_read_entry(reg_offset, c_addr);
- if (memcmp(addr, c_addr, ETH_ALEN) == 0)
+ if (ether_addr_equal(addr, c_addr))
return i;
}
@@ -2344,8 +2436,7 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
- /*
- * Initial condition is MCT = 1, PRM = 0.
+ /* Initial condition is MCT = 1, PRM = 0.
* Depending on ndev->flags, set PRM or clear MCT
*/
ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
@@ -2411,8 +2502,7 @@ static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
mdp->vlan_num_ids++;
- /*
- * The controller has one VLAN tag HW filter. So, if the filter is
+ /* The controller has one VLAN tag HW filter. So, if the filter is
* already enabled, the driver disables it and the filte
*/
if (mdp->vlan_num_ids > 1) {
@@ -2449,6 +2539,11 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
/* SuperH's TSU register init function */
static void sh_eth_tsu_init(struct sh_eth_private *mdp)
{
+ if (sh_eth_is_rz_fast_ether(mdp)) {
+ sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+ return;
+ }
+
sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
@@ -2528,7 +2623,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = &ndev->dev;
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- mdp->pdev->name, id);
+ mdp->pdev->name, id);
/* PHY IRQ */
mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
@@ -2541,6 +2636,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
for (i = 0; i < PHY_MAX_ADDR; i++)
mdp->mii_bus->irq[i] = PHY_POLL;
+ if (pd->phy_irq > 0)
+ mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
/* register mdio bus */
ret = mdiobus_register(mdp->mii_bus);
@@ -2566,6 +2663,9 @@ static const u16 *sh_eth_get_register_offset(int register_type)
case SH_ETH_REG_GIGABIT:
reg_offset = sh_eth_offset_gigabit;
break;
+ case SH_ETH_REG_FAST_RZ:
+ reg_offset = sh_eth_offset_fast_rz;
+ break;
case SH_ETH_REG_FAST_RCAR:
reg_offset = sh_eth_offset_fast_rcar;
break;
@@ -2739,7 +2839,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* print device information */
pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
- (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
platform_set_drvdata(pdev, ndev);
@@ -2777,8 +2877,7 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int sh_eth_runtime_nop(struct device *dev)
{
- /*
- * Runtime PM callback shared between ->runtime_suspend()
+ /* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
*
* This driver re-initializes all registers after
@@ -2805,9 +2904,11 @@ static struct platform_device_id sh_eth_id_table[] = {
{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
+ { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
- { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
+ { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
+ { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
{ }
};
MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index f32c1692d310..6075915b88ec 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,5 +1,4 @@
-/*
- * SuperH Ethernet device driver
+/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2012 Renesas Solutions Corp.
@@ -12,9 +11,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -159,6 +155,7 @@ enum {
enum {
SH_ETH_REG_GIGABIT,
+ SH_ETH_REG_FAST_RZ,
SH_ETH_REG_FAST_RCAR,
SH_ETH_REG_FAST_SH4,
SH_ETH_REG_FAST_SH3_SH2
@@ -171,10 +168,9 @@ enum {
#define SH2_SH3_SKB_RX_ALIGN 2
#endif
-/*
- * Register's bits
+/* Register's bits
*/
-/* EDSR : sh7734, sh7757, sh7763, and r8a7740 only */
+/* EDSR : sh7734, sh7757, sh7763, r8a7740, and r7s72100 only */
enum EDSR_BIT {
EDSR_ENT = 0x01, EDSR_ENR = 0x02,
};
@@ -199,7 +195,7 @@ enum DMAC_T_BIT {
EDTRR_TRNS_ETHER = 0x01,
};
-/* EDRRR*/
+/* EDRRR */
enum EDRRR_R_BIT {
EDRRR_R = 0x01,
};
@@ -422,8 +418,7 @@ enum TSU_FWSLC_BIT {
#define TSU_VTAG_ENABLE 0x80000000
#define TSU_VTAG_VID_MASK 0x00000fff
-/*
- * The sh ether Tx buffer descriptors.
+/* The sh ether Tx buffer descriptors.
* This structure should be 20 bytes.
*/
struct sh_eth_txdesc {
@@ -437,10 +432,9 @@ struct sh_eth_txdesc {
#endif
u32 addr; /* TD2 */
u32 pad1; /* padding data */
-} __attribute__((aligned(2), packed));
+} __aligned(2) __packed;
-/*
- * The sh ether Rx buffer descriptors.
+/* The sh ether Rx buffer descriptors.
* This structure should be 20 bytes.
*/
struct sh_eth_rxdesc {
@@ -454,7 +448,7 @@ struct sh_eth_rxdesc {
#endif
u32 addr; /* RD2 */
u32 pad0; /* padding data */
-} __attribute__((aligned(2), packed));
+} __aligned(2) __packed;
/* This structure is used by each CPU dependency handling. */
struct sh_eth_cpu_data {
@@ -480,16 +474,16 @@ struct sh_eth_cpu_data {
unsigned long eesr_err_check;
/* hardware features */
- unsigned long irq_flags; /* IRQ configuration flags */
- unsigned no_psr:1; /* EtherC DO NOT have PSR */
- unsigned apr:1; /* EtherC have APR */
- unsigned mpr:1; /* EtherC have MPR */
- unsigned tpauser:1; /* EtherC have TPAUSER */
- unsigned bculr:1; /* EtherC have BCULR */
- unsigned tsu:1; /* EtherC have TSU */
- unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
- unsigned rpadir:1; /* E-DMAC have RPADIR */
- unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
+ unsigned long irq_flags; /* IRQ configuration flags */
+ unsigned no_psr:1; /* EtherC DO NOT have PSR */
+ unsigned apr:1; /* EtherC have APR */
+ unsigned mpr:1; /* EtherC have MPR */
+ unsigned tpauser:1; /* EtherC have TPAUSER */
+ unsigned bculr:1; /* EtherC have BCULR */
+ unsigned tsu:1; /* EtherC have TSU */
+ unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
+ unsigned rpadir:1; /* E-DMAC have RPADIR */
+ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
unsigned hw_crc:1; /* E-DMAC have CSMR */
unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
@@ -511,14 +505,14 @@ struct sh_eth_private {
struct sh_eth_txdesc *tx_ring;
struct sk_buff **rx_skbuff;
struct sk_buff **tx_skbuff;
- spinlock_t lock;
- u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ spinlock_t lock; /* Register access lock */
+ u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
u32 cur_tx, dirty_tx;
- u32 rx_buf_sz; /* Based on MTU+slack. */
+ u32 rx_buf_sz; /* Based on MTU+slack. */
int edmac_endian;
struct napi_struct napi;
/* MII transceiver section. */
- u32 phy_id; /* PHY ID */
+ u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
struct phy_device *phydev; /* PHY device control */
int link;
@@ -526,8 +520,8 @@ struct sh_eth_private {
int msg_enable;
int speed;
int duplex;
- int port; /* for TSU */
- int vlan_num_ids; /* for VLAN tag filter */
+ int port; /* for TSU */
+ int vlan_num_ids; /* for VLAN tag filter */
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index a99739c5142c..1f4449ad8900 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c76571886011..69e4fd21adb4 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/string.h>
@@ -356,7 +355,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
if (pkt_status & SEEQ_RSTAT_FIG) {
/* Packet is OK. */
/* We don't want to receive our own packets */
- if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) {
if (len > rx_copybreak) {
skb = rd->skb;
newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 676c3c057bfb..174a92f5fe51 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -14,6 +14,7 @@
#include "mcdi_pcol.h"
#include "nic.h"
#include "workarounds.h"
+#include "selftest.h"
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/wait.h>
@@ -52,31 +53,31 @@ struct efx_ef10_filter_table {
struct {
unsigned long spec; /* pointer to spec plus flag bits */
-/* BUSY flag indicates that an update is in progress. STACK_OLD is
- * used to mark and sweep stack-owned MAC filters.
+/* BUSY flag indicates that an update is in progress. AUTO_OLD is
+ * used to mark and sweep MAC filters for the device address lists.
*/
#define EFX_EF10_FILTER_FLAG_BUSY 1UL
-#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
#define EFX_EF10_FILTER_FLAGS 3UL
u64 handle; /* firmware handle */
} *entry;
wait_queue_head_t waitq;
/* Shadow of net_device address lists, guarded by mac_lock */
-#define EFX_EF10_FILTER_STACK_UC_MAX 32
-#define EFX_EF10_FILTER_STACK_MC_MAX 256
+#define EFX_EF10_FILTER_DEV_UC_MAX 32
+#define EFX_EF10_FILTER_DEV_MC_MAX 256
struct {
u8 addr[ETH_ALEN];
u16 id;
- } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
- stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
- int stack_uc_count; /* negative for PROMISC */
- int stack_mc_count; /* negative for PROMISC/ALLMULTI */
+ } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
+ dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+ int dev_uc_count; /* negative for PROMISC */
+ int dev_mc_count; /* negative for PROMISC/ALLMULTI */
};
/* An arbitrary search limit for the software hash table */
#define EFX_EF10_FILTER_SEARCH_LIMIT 200
-static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
@@ -263,6 +264,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
+ efx_ptp_probe(efx, NULL);
+
return 0;
fail3:
@@ -277,11 +280,17 @@ fail1:
static int efx_ef10_free_vis(struct efx_nic *efx)
{
- int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ size_t outlen;
+ int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
/* -EALREADY means nothing to free, so ignore */
if (rc == -EALREADY)
rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
+ rc);
return rc;
}
@@ -465,9 +474,10 @@ static void efx_ef10_remove(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
+ efx_ptp_remove(efx);
+
efx_mcdi_mon_remove(efx);
- /* This needs to be after efx_ptp_remove_channel() with no filters */
efx_ef10_rx_free_indir_table(efx);
if (nic_data->wc_membase)
@@ -669,10 +679,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
- efx_ef10_rx_push_indir_table(efx);
+ efx_ef10_rx_push_rss_config(efx);
return 0;
}
+static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /* All our allocations have been reset */
+ nic_data->must_realloc_vis = true;
+ nic_data->must_restore_filters = true;
+ nic_data->must_restore_piobufs = true;
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+}
+
static int efx_ef10_map_reset_flags(u32 *flags)
{
enum {
@@ -703,6 +724,19 @@ static int efx_ef10_map_reset_flags(u32 *flags)
return -EINVAL;
}
+static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
+{
+ int rc = efx_mcdi_reset(efx, reset_type);
+
+ /* If it was a port reset, trigger reallocation of MC resources.
+ * Note that on an MC reset nothing needs to be done now because we'll
+ * detect the MC reset later and handle it then.
+ */
+ if (reset_type == RESET_TYPE_ALL && !rc)
+ efx_ef10_reset_mc_allocations(efx);
+ return rc;
+}
+
#define EF10_DMA_STAT(ext_name, mcdi_name) \
[EF10_STAT_ ## ext_name] = \
{ #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
@@ -764,8 +798,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
- EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
- EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
};
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
@@ -834,8 +868,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
(1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
(1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
(1ULL << EF10_STAT_rx_dp_streaming_packets) | \
- (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
- (1ULL << EF10_STAT_rx_dp_emerg_wait))
+ (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
+ (1ULL << EF10_STAT_rx_dp_hlb_wait))
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
@@ -901,6 +935,7 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
stats[EF10_STAT_rx_good_bytes] =
stats[EF10_STAT_rx_bytes] -
stats[EF10_STAT_rx_bytes_minus_good_bytes];
@@ -1067,10 +1102,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
nic_data->warm_boot_count = rc;
/* All our allocations have been reset */
- nic_data->must_realloc_vis = true;
- nic_data->must_restore_filters = true;
- nic_data->must_restore_piobufs = true;
- nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+ efx_ef10_reset_mc_allocations(efx);
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
@@ -1241,8 +1273,8 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
return;
fail:
- WARN_ON(true);
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
+ tx_queue->queue);
}
static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
@@ -1256,7 +1288,7 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
tx_queue->queue);
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1265,7 +1297,8 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
@@ -1408,12 +1441,12 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
-static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
- netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
+ netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
@@ -1461,8 +1494,9 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
- MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
- INIT_RXQ_IN_FLAG_PREFIX, 1);
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
@@ -1481,13 +1515,8 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
-
- return;
-
-fail:
- WARN_ON(true);
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
+ efx_rx_queue_index(rx_queue));
}
static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
@@ -1501,7 +1530,7 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
efx_rx_queue_index(rx_queue));
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1510,7 +1539,8 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
@@ -1647,15 +1677,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
/* IRQ return is ignored */
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1669,7 +1691,7 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
- rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc && rc != -EALREADY)
@@ -1678,7 +1700,8 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
return;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
}
static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -1717,8 +1740,6 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
{
unsigned int rx_desc_ptr;
- WARN_ON(rx_queue->scatter_n == 0);
-
netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
"scattered RX aborted (dropping %u buffers)\n",
rx_queue->scatter_n);
@@ -1754,7 +1775,10 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
- WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
+ if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
+ netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
rx_queue = efx_channel_get_rx_queue(channel);
@@ -1765,17 +1789,27 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
if (n_descs != rx_queue->scatter_n + 1) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
/* detect rx abort */
if (unlikely(n_descs == rx_queue->scatter_n)) {
- WARN_ON(rx_bytes != 0);
+ if (rx_queue->scatter_n == 0 || rx_bytes != 0)
+ netdev_WARN(efx->net_dev,
+ "invalid RX abort: scatter_n=%u event="
+ EFX_QWORD_FMT "\n",
+ rx_queue->scatter_n,
+ EFX_QWORD_VAL(*event));
efx_ef10_handle_rx_abort(rx_queue);
return 0;
}
- if (unlikely(rx_queue->scatter_n != 0)) {
- /* Scattered packet completions cannot be
- * merged, so something has gone wrong.
- */
+ /* Check that RX completion merging is valid, i.e.
+ * the current firmware supports it and this is a
+ * non-scattered packet.
+ */
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
+ rx_queue->scatter_n != 0 || rx_cont) {
efx_ef10_handle_rx_bad_lbits(
rx_queue, next_ptr_lbits,
(rx_queue->removed_count +
@@ -1901,7 +1935,7 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
* events, so efx_process_channel() won't refill the
* queue. Refill it here
*/
- efx_fast_push_rx_descriptors(&channel->rx_queue);
+ efx_fast_push_rx_descriptors(&channel->rx_queue, true);
break;
default:
netif_err(efx, hw, efx->net_dev,
@@ -2232,7 +2266,9 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ 0 : spec->dmaq_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
(spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
@@ -2257,6 +2293,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
outbuf, sizeof(outbuf), NULL);
if (rc == 0)
*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (rc == -ENOSPC)
+ rc = -EBUSY; /* to match efx_farch_filter_insert() */
return rc;
}
@@ -2326,10 +2364,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
EFX_EF10_FILTER_FLAG_BUSY)
break;
if (spec->priority < saved_spec->priority &&
- !(saved_spec->priority ==
- EFX_FILTER_PRI_REQUIRED &&
- saved_spec->flags &
- EFX_FILTER_FLAG_RX_STACK)) {
+ spec->priority != EFX_FILTER_PRI_AUTO) {
rc = -EPERM;
goto out_unlock;
}
@@ -2383,11 +2418,13 @@ found:
*/
saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
if (saved_spec) {
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ if (spec->priority == EFX_FILTER_PRI_AUTO &&
+ saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
/* Just make sure it won't be removed */
- saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
table->entry[ins_index].spec &=
- ~EFX_EF10_FILTER_FLAG_STACK_OLD;
+ ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
rc = ins_index;
goto out_unlock;
}
@@ -2427,8 +2464,11 @@ found:
if (rc == 0) {
if (replacing) {
/* Update the fields that may differ */
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |=
+ EFX_FILTER_FLAG_RX_OVER_AUTO;
saved_spec->priority = spec->priority;
- saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
saved_spec->flags |= spec->flags;
saved_spec->rss_context = spec->rss_context;
saved_spec->dmaq_id = spec->dmaq_id;
@@ -2497,13 +2537,13 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
}
/* Remove a filter.
- * If !stack_requested, remove by ID
- * If stack_requested, remove by index
+ * If !by_index, remove by ID
+ * If by_index, remove by index
* Filter ID may come from userland and must be range-checked.
*/
static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, bool stack_requested)
+ unsigned int priority_mask,
+ u32 filter_id, bool by_index)
{
unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -2527,26 +2567,41 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
spin_unlock_bh(&efx->filter_lock);
schedule();
}
+
spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec || spec->priority > priority ||
- (!stack_requested &&
+ if (!spec ||
+ (!by_index &&
efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
filter_id / HUNT_FILTER_TBL_ROWS)) {
rc = -ENOENT;
goto out_unlock;
}
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
+ priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
+ /* Just remove flags */
+ spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ rc = 0;
+ goto out_unlock;
+ }
+
+ if (!(priority_mask & (1U << spec->priority))) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
spin_unlock_bh(&efx->filter_lock);
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
- /* Reset steering of a stack-owned filter */
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ /* Reset to an automatic filter */
struct efx_filter_spec new_spec = *spec;
- new_spec.priority = EFX_FILTER_PRI_REQUIRED;
+ new_spec.priority = EFX_FILTER_PRI_AUTO;
new_spec.flags = (EFX_FILTER_FLAG_RX |
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK);
+ EFX_FILTER_FLAG_RX_RSS);
new_spec.dmaq_id = 0;
new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
rc = efx_ef10_filter_push(efx, &new_spec,
@@ -2574,6 +2629,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
}
}
+
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
wake_up_all(&table->waitq);
out_unlock:
@@ -2586,7 +2642,8 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
- return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
+ return efx_ef10_filter_remove_internal(efx, 1U << priority,
+ filter_id, false);
}
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@@ -2612,10 +2669,24 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
return rc;
}
-static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
+static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
- /* TODO */
+ unsigned int priority_mask;
+ unsigned int i;
+ int rc;
+
+ priority_mask = (((1U << (priority + 1)) - 1) &
+ ~(1U << EFX_FILTER_PRI_AUTO));
+
+ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
+ rc = efx_ef10_filter_remove_internal(efx, priority_mask,
+ i, true);
+ if (rc && rc != -ENOENT)
+ return rc;
+ }
+
+ return 0;
}
static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
@@ -2716,8 +2787,6 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
rc = -EBUSY;
goto fail_unlock;
}
- EFX_WARN_ON_PARANOID(saved_spec->flags &
- EFX_FILTER_FLAG_RX_STACK);
if (spec->priority < saved_spec->priority) {
rc = -EPERM;
goto fail_unlock;
@@ -3027,8 +3096,11 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
table->entry[filter_idx].handle);
rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
-
- WARN_ON(rc != 0);
+ if (rc)
+ netdev_WARN(efx->net_dev,
+ "filter_idx=%#x handle=%#llx\n",
+ filter_idx,
+ table->entry[filter_idx].handle);
kfree(spec);
}
@@ -3052,15 +3124,15 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
/* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock);
- n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
+ n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
for (i = 0; i < n; i++) {
- filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
}
- n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
+ n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
for (i = 0; i < n; i++) {
- filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
}
spin_unlock_bh(&efx->filter_lock);
@@ -3069,28 +3141,28 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
*/
netif_addr_lock_bh(net_dev);
if (net_dev->flags & IFF_PROMISC ||
- netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
- table->stack_uc_count = -1;
+ netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
+ table->dev_uc_count = -1;
} else {
- table->stack_uc_count = 1 + netdev_uc_count(net_dev);
- memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
+ table->dev_uc_count = 1 + netdev_uc_count(net_dev);
+ memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
ETH_ALEN);
i = 1;
netdev_for_each_uc_addr(uc, net_dev) {
- memcpy(table->stack_uc_list[i].addr,
+ memcpy(table->dev_uc_list[i].addr,
uc->addr, ETH_ALEN);
i++;
}
}
if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
- netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
- table->stack_mc_count = -1;
+ netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
+ table->dev_mc_count = -1;
} else {
- table->stack_mc_count = 1 + netdev_mc_count(net_dev);
- eth_broadcast_addr(table->stack_mc_list[0].addr);
+ table->dev_mc_count = 1 + netdev_mc_count(net_dev);
+ eth_broadcast_addr(table->dev_mc_list[0].addr);
i = 1;
netdev_for_each_mc_addr(mc, net_dev) {
- memcpy(table->stack_mc_list[i].addr,
+ memcpy(table->dev_mc_list[i].addr,
mc->addr, ETH_ALEN);
i++;
}
@@ -3098,90 +3170,86 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
netif_addr_unlock_bh(net_dev);
/* Insert/renew unicast filters */
- if (table->stack_uc_count >= 0) {
- for (i = 0; i < table->stack_uc_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_uc_count >= 0) {
+ for (i = 0; i < table->dev_uc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->stack_uc_list[i].addr);
+ table->dev_uc_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
/* Fall back to unicast-promisc */
while (i--)
efx_ef10_filter_remove_safe(
- efx, EFX_FILTER_PRI_REQUIRED,
- table->stack_uc_list[i].id);
- table->stack_uc_count = -1;
+ efx, EFX_FILTER_PRI_AUTO,
+ table->dev_uc_list[i].id);
+ table->dev_uc_count = -1;
break;
}
- table->stack_uc_list[i].id = rc;
+ table->dev_uc_list[i].id = rc;
}
}
- if (table->stack_uc_count < 0) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_uc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_uc_def(&spec);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
WARN_ON(1);
- table->stack_uc_count = 0;
+ table->dev_uc_count = 0;
} else {
- table->stack_uc_list[0].id = rc;
+ table->dev_uc_list[0].id = rc;
}
}
/* Insert/renew multicast filters */
- if (table->stack_mc_count >= 0) {
- for (i = 0; i < table->stack_mc_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_mc_count >= 0) {
+ for (i = 0; i < table->dev_mc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->stack_mc_list[i].addr);
+ table->dev_mc_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
/* Fall back to multicast-promisc */
while (i--)
efx_ef10_filter_remove_safe(
- efx, EFX_FILTER_PRI_REQUIRED,
- table->stack_mc_list[i].id);
- table->stack_mc_count = -1;
+ efx, EFX_FILTER_PRI_AUTO,
+ table->dev_mc_list[i].id);
+ table->dev_mc_count = -1;
break;
}
- table->stack_mc_list[i].id = rc;
+ table->dev_mc_list[i].id = rc;
}
}
- if (table->stack_mc_count < 0) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
- EFX_FILTER_FLAG_RX_RSS |
- EFX_FILTER_FLAG_RX_STACK,
+ if (table->dev_mc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
0);
efx_filter_set_mc_def(&spec);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
WARN_ON(1);
- table->stack_mc_count = 0;
+ table->dev_mc_count = 0;
} else {
- table->stack_mc_list[0].id = rc;
+ table->dev_mc_list[0].id = rc;
}
}
/* Remove filters that weren't renewed. Since nothing else
- * changes the STACK_OLD flag or removes these filters, we
+ * changes the AUTO_OLD flag or removes these filters, we
* don't need to hold the filter_lock while scanning for
* these filters.
*/
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (ACCESS_ONCE(table->entry[i].spec) &
- EFX_EF10_FILTER_FLAG_STACK_OLD) {
- if (efx_ef10_filter_remove_internal(efx,
- EFX_FILTER_PRI_REQUIRED,
- i, true) < 0)
+ EFX_EF10_FILTER_FLAG_AUTO_OLD) {
+ if (efx_ef10_filter_remove_internal(
+ efx, 1U << EFX_FILTER_PRI_AUTO,
+ i, true) < 0)
remove_failed = true;
}
}
@@ -3195,6 +3263,87 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
return efx_mcdi_set_mac(efx);
}
+static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
+ return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+/* MC BISTs follow a different poll mechanism to phy BISTs.
+ * The BIST is done in the poll handler on the MC, and the MCDI command
+ * will block until the BIST is done.
+ */
+static int efx_ef10_poll_bist(struct efx_nic *efx)
+{
+ int rc;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
+ size_t outlen;
+ u32 result;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
+ return -EIO;
+
+ result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+ switch (result) {
+ case MC_CMD_POLL_BIST_PASSED:
+ netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
+ return 0;
+ case MC_CMD_POLL_BIST_TIMEOUT:
+ netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
+ return -EIO;
+ case MC_CMD_POLL_BIST_FAILED:
+ netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
+ return -EIO;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "BIST returned unknown result %u", result);
+ return -EIO;
+ }
+}
+
+static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
+{
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
+
+ rc = efx_ef10_start_bist(efx, bist_type);
+ if (rc != 0)
+ return rc;
+
+ return efx_ef10_poll_bist(efx);
+}
+
+static int
+efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc, rc2;
+
+ efx_reset_down(efx, RESET_TYPE_WORLD);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
+ NULL, 0, NULL, 0, NULL);
+ if (rc != 0)
+ goto out;
+
+ tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
+ tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
+
+ rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
+
+out:
+ rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
+ return rc ? rc : rc2;
+}
+
#ifdef CONFIG_SFC_MTD
struct efx_ef10_nvram_type_info {
@@ -3213,6 +3362,7 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
};
@@ -3320,6 +3470,119 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
}
+static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
+ channel->sync_events_state == SYNC_EVENTS_VALID ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
+ return 0;
+ channel->sync_events_state = SYNC_EVENTS_REQUESTED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ if (rc != 0)
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ return rc;
+}
+
+static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
+ return 0;
+ if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
+ channel->sync_events_state = SYNC_EVENTS_DISABLED;
+ return 0;
+ }
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
+ MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ return rc;
+}
+
+static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
+ bool temp)
+{
+ int (*set)(struct efx_channel *channel, bool temp);
+ struct efx_channel *channel;
+
+ set = en ?
+ efx_ef10_rx_enable_timestamping :
+ efx_ef10_rx_disable_timestamping;
+
+ efx_for_each_channel(channel, efx) {
+ int rc = set(channel, temp);
+ if (en && rc != 0) {
+ efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ efx_ef10_ptp_set_ts_sync_events(efx, false, false);
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF, 0);
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_ALL;
+ rc = efx_ptp_change_mode(efx, true, 0);
+ if (!rc)
+ rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
+ if (rc)
+ efx_ptp_change_mode(efx, false, 0);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
const struct efx_nic_type efx_hunt_a0_nic_type = {
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe,
@@ -3329,13 +3592,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.fini = efx_port_dummy_op_void,
.map_reset_reason = efx_mcdi_map_reset_reason,
.map_reset_flags = efx_ef10_map_reset_flags,
- .reset = efx_mcdi_reset,
+ .reset = efx_ef10_reset,
.probe_port = efx_mcdi_port_probe,
.remove_port = efx_mcdi_port_remove,
.fini_dmaq = efx_ef10_fini_dmaq,
.describe_stats = efx_ef10_describe_stats,
.update_stats = efx_ef10_update_stats,
.start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
@@ -3345,7 +3609,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.get_wol = efx_ef10_get_wol,
.set_wol = efx_ef10_set_wol,
.resume_wol = efx_port_dummy_op_void,
- /* TODO: test_chip */
+ .test_chip = efx_ef10_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
.mcdi_request = efx_ef10_mcdi_request,
.mcdi_poll_response = efx_ef10_mcdi_poll_response,
@@ -3360,7 +3624,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
- .rx_push_indir_table = efx_ef10_rx_push_indir_table,
+ .rx_push_rss_config = efx_ef10_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -3397,11 +3661,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.mtd_sync = efx_mcdi_mtd_sync,
#endif
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
+ .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
.can_rx_scatter = true,
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
@@ -3410,4 +3677,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
};
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index fd844b53e385..83d464347021 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -83,6 +83,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
};
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -91,6 +92,12 @@ const char *const efx_reset_type_names[] = {
*/
static struct workqueue_struct *reset_workqueue;
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
/**************************************************************************
*
* Configurable values
@@ -246,7 +253,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel);
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue, true);
}
return spent;
@@ -639,7 +646,9 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues);
- efx_nic_generate_fill_event(rx_queue);
+ efx_stop_eventq(channel);
+ efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_start_eventq(channel);
}
WARN_ON(channel->rx_pkt_n_frags);
@@ -1051,18 +1060,23 @@ static void efx_start_port(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->port_enabled = true;
- /* efx_mac_work() might have been scheduled after efx_stop_port(),
- * and then cancelled by efx_flush_all() */
+ /* Ensure MAC ingress/egress is enabled */
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
}
-/* Prevent efx_mac_work() and efx_monitor() from working */
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
static void efx_stop_port(struct efx_nic *efx)
{
netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
mutex_lock(&efx->mac_lock);
efx->port_enabled = false;
mutex_unlock(&efx->mac_lock);
@@ -1070,6 +1084,10 @@ static void efx_stop_port(struct efx_nic *efx)
/* Serialise against efx_set_multicast_list() */
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
}
static void efx_fini_port(struct efx_nic *efx)
@@ -1099,6 +1117,77 @@ static void efx_remove_port(struct efx_nic *efx)
*
**************************************************************************/
+static LIST_HEAD(efx_primary_list);
+static LIST_HEAD(efx_unassociated_list);
+
+static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
+{
+ return left->type == right->type &&
+ left->vpd_sn && right->vpd_sn &&
+ !strcmp(left->vpd_sn, right->vpd_sn);
+}
+
+static void efx_associate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ if (efx->primary == efx) {
+ /* Adding primary function; look for secondaries */
+
+ netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
+ list_add_tail(&efx->node, &efx_primary_list);
+
+ list_for_each_entry_safe(other, next, &efx_unassociated_list,
+ node) {
+ if (efx_same_controller(efx, other)) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to secondary list of %s %s\n",
+ pci_name(efx->pci_dev),
+ efx->net_dev->name);
+ list_add_tail(&other->node,
+ &efx->secondary_list);
+ other->primary = efx;
+ }
+ }
+ } else {
+ /* Adding secondary function; look for primary */
+
+ list_for_each_entry(other, &efx_primary_list, node) {
+ if (efx_same_controller(efx, other)) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to secondary list of %s %s\n",
+ pci_name(other->pci_dev),
+ other->net_dev->name);
+ list_add_tail(&efx->node,
+ &other->secondary_list);
+ efx->primary = other;
+ return;
+ }
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to unassociated list\n");
+ list_add_tail(&efx->node, &efx_unassociated_list);
+ }
+}
+
+static void efx_dissociate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ list_del(&efx->node);
+ efx->primary = NULL;
+
+ list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to unassociated list\n");
+ list_add_tail(&other->node, &efx_unassociated_list);
+ other->primary = NULL;
+ }
+}
+
/* This configures the PCI device to enable I/O and DMA. */
static int efx_init_io(struct efx_nic *efx)
{
@@ -1675,18 +1764,10 @@ static void efx_start_all(struct efx_nic *efx)
}
efx->type->start_stats(efx);
-}
-
-/* Flush all delayed work. Should only be called when no more delayed work
- * will be scheduled. This doesn't flush pending online resets (efx_reset),
- * since we're holding the rtnl_lock at this point. */
-static void efx_flush_all(struct efx_nic *efx)
-{
- /* Make sure the hardware monitor and event self-test are stopped */
- cancel_delayed_work_sync(&efx->monitor_work);
- efx_selftest_async_cancel(efx);
- /* Stop scheduled port reconfigurations */
- cancel_work_sync(&efx->mac_work);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
}
/* Quiesce the hardware and software data path, and regular activity
@@ -1702,12 +1783,16 @@ static void efx_stop_all(struct efx_nic *efx)
if (!efx->port_enabled)
return;
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
efx->type->stop_stats(efx);
efx_stop_port(efx);
- /* Flush efx_mac_work(), refill_workqueue, monitor_work */
- efx_flush_all(efx);
-
/* Stop the kernel transmit interface. This is only valid if
* the device is stopped or detached; otherwise the watchdog
* may fire immediately.
@@ -1851,7 +1936,9 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct mii_ioctl_data *data = if_mii(ifr);
if (cmd == SIOCSHWTSTAMP)
- return efx_ptp_ioctl(efx, ifr, cmd);
+ return efx_ptp_set_ts_config(efx, ifr);
+ if (cmd == SIOCGHWTSTAMP)
+ return efx_ptp_get_ts_config(efx, ifr);
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
@@ -2064,7 +2151,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
/* If disabling RX n-tuple filtering, clear existing filters */
if (net_dev->features & ~data & NETIF_F_NTUPLE)
- efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
return 0;
}
@@ -2198,6 +2285,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_init_tx_queue_core_txq(tx_queue);
}
+ efx_associate(efx);
+
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2211,6 +2300,7 @@ static int efx_register_netdev(struct efx_nic *efx)
fail_registered:
rtnl_lock();
+ efx_dissociate(efx);
unregister_netdevice(net_dev);
fail_locked:
efx->state = STATE_UNINIT;
@@ -2387,6 +2477,24 @@ int efx_try_recovery(struct efx_nic *efx)
return 0;
}
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later.
*/
@@ -2399,6 +2507,9 @@ static void efx_reset_work(struct work_struct *data)
pending = ACCESS_ONCE(efx->reset_pending);
method = fls(pending) - 1;
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
method == RESET_TYPE_RECOVER_OR_ALL) &&
efx_try_recovery(efx))
@@ -2437,6 +2548,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_MC_BIST:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
@@ -2530,6 +2642,8 @@ static int efx_init_struct(struct efx_nic *efx,
int i;
/* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
@@ -2548,6 +2662,8 @@ static int efx_init_struct(struct efx_nic *efx,
NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
efx->rx_packet_hash_offset =
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
@@ -2588,6 +2704,8 @@ static void efx_fini_struct(struct efx_nic *efx)
for (i = 0; i < EFX_MAX_CHANNELS; i++)
kfree(efx->channel[i]);
+ kfree(efx->vpd_sn);
+
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
@@ -2632,6 +2750,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
+ efx_dissociate(efx);
dev_close(efx->net_dev);
efx_disable_interrupts(efx);
rtnl_unlock();
@@ -2647,7 +2766,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
efx_fini_struct(efx);
- pci_set_drvdata(pci_dev, NULL);
free_netdev(efx->net_dev);
pci_disable_pcie_error_reporting(pci_dev);
@@ -2659,12 +2777,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
* always appear within the first 512 bytes.
*/
#define SFC_VPD_LEN 512
-static void efx_print_product_vpd(struct efx_nic *efx)
+static void efx_probe_vpd_strings(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
char vpd_data[SFC_VPD_LEN];
ssize_t vpd_size;
- int i, j;
+ int ro_start, ro_size, i, j;
/* Get the vpd data from the device */
vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
@@ -2674,14 +2792,15 @@ static void efx_print_product_vpd(struct efx_nic *efx)
}
/* Get the Read only section */
- i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
+ ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (ro_start < 0) {
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
return;
}
- j = pci_vpd_lrdt_size(&vpd_data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
+ ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
+ j = ro_size;
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
if (i + j > vpd_size)
j = vpd_size - i;
@@ -2701,6 +2820,27 @@ static void efx_print_product_vpd(struct efx_nic *efx)
netif_info(efx, drv, efx->net_dev,
"Part Number : %.*s\n", j, &vpd_data[i]);
+
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+ j = ro_size;
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
+ return;
+ }
+
+ efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
+ if (!efx->vpd_sn)
+ return;
+
+ snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
}
@@ -2797,7 +2937,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
- efx_print_product_vpd(efx);
+ efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -2841,7 +2981,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
fail2:
efx_fini_struct(efx);
fail1:
- pci_set_drvdata(pci_dev, NULL);
WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index b8235ee5d7d7..dbd7b78fe01c 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -37,7 +37,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
void efx_rx_slow_fill(unsigned long context);
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
@@ -66,6 +66,9 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_RXQ_MIN_ENT 128U
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
+#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
+ EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
+
/* Filters */
/**
@@ -134,17 +137,6 @@ efx_filter_get_filter_safe(struct efx_nic *efx,
return efx->type->filter_get_safe(efx, priority, filter_id, spec);
}
-/**
- * efx_farch_filter_clear_rx - remove RX filters by priority
- * @efx: NIC from which to remove the filters
- * @priority: Maximum priority to remove
- */
-static inline void efx_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- return efx->type->filter_clear_rx(efx, priority);
-}
-
static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority)
{
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 7fdfee019092..75ef7ef6450b 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -165,6 +165,7 @@ enum reset_type {
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
+ RESET_TYPE_MC_BIST,
RESET_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1f529fa2edb1..229428915aa8 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -318,6 +318,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
"eventq.int", NULL);
}
+ efx_fill_test(n++, strings, data, &tests->memory,
+ "core", 0, "memory", NULL);
efx_fill_test(n++, strings, data, &tests->registers,
"core", 0, "registers", NULL);
@@ -357,7 +359,8 @@ static int efx_ethtool_get_sset_count(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
return efx->type->describe_stats(efx, NULL) +
- EFX_ETHTOOL_SW_STAT_COUNT;
+ EFX_ETHTOOL_SW_STAT_COUNT +
+ efx_ptp_describe_stats(efx, NULL);
case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
default:
@@ -378,6 +381,8 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
strlcpy(strings + i * ETH_GSTRING_LEN,
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+ strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+ efx_ptp_describe_stats(efx, strings);
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
@@ -427,8 +432,11 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
break;
}
}
+ data += EFX_ETHTOOL_SW_STAT_COUNT;
spin_unlock_bh(&efx->stats_lock);
+
+ efx_ptp_update_stats(efx, data);
}
static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -583,7 +591,7 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
- ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
ring->rx_pending = efx->rxq_entries;
ring->tx_pending = efx->txq_entries;
}
@@ -596,7 +604,7 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
- ring->tx_pending > EFX_MAX_DMAQ_SIZE)
+ ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
return -EINVAL;
if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
@@ -1032,7 +1040,7 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
- efx_nic_push_rx_indir_table(efx);
+ efx->type->rx_push_rss_config(efx);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index ff5d322b9b49..18d6f761f4d0 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -469,6 +469,24 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
}
/**************************************************************************
*
+ * RSS
+ *
+ **************************************************************************
+ */
+
+static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ efx_farch_rx_push_indir_table(efx);
+}
+
+/**************************************************************************
+ *
* EEPROM/flash
*
**************************************************************************
@@ -2247,6 +2265,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
struct falcon_board *board;
int rc;
+ efx->primary = efx; /* only one usable function per controller */
+
/* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
@@ -2482,9 +2502,7 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- /* Set hash key for IPv4 */
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+ falcon_b0_rx_push_rss_config(efx);
/* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
@@ -2593,6 +2611,14 @@ void falcon_start_nic_stats(struct efx_nic *efx)
spin_unlock_bh(&efx->stats_lock);
}
+/* We don't acutally pull stats on falcon. Wait 10ms so that
+ * they arrive when we call this just after start_stats
+ */
+static void falcon_pull_nic_stats(struct efx_nic *efx)
+{
+ msleep(10);
+}
+
void falcon_stop_nic_stats(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
@@ -2672,6 +2698,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
@@ -2692,7 +2719,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = efx_port_dummy_op_void,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -2765,6 +2792,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
@@ -2786,7 +2814,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = falcon_b0_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index c0907d884d75..f72489a105ca 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1147,7 +1147,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue, true);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
efx_farch_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
@@ -1618,8 +1618,7 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
size_t i = 0;
efx_dword_t dword;
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return;
+ BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0);
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
FR_BZ_RX_INDIRECTION_TBL_ROWS);
@@ -1745,8 +1744,6 @@ void efx_farch_init_common(struct efx_nic *efx)
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
- efx_farch_rx_push_indir_table(efx);
-
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/
@@ -2187,14 +2184,14 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
}
static void
-efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
- struct efx_farch_filter_spec *spec)
+efx_farch_filter_init_rx_auto(struct efx_nic *efx,
+ struct efx_farch_filter_spec *spec)
{
/* If there's only one channel then disable RSS for non VF
* traffic, thereby allowing VFs to use RSS when the PF can't.
*/
- spec->priority = EFX_FILTER_PRI_REQUIRED;
- spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
+ spec->priority = EFX_FILTER_PRI_AUTO;
+ spec->flags = (EFX_FILTER_FLAG_RX |
(efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
(efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
spec->dmaq_id = 0;
@@ -2459,20 +2456,13 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
rc = -EEXIST;
goto out;
}
- if (spec.priority < saved_spec->priority &&
- !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
- saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
+ if (spec.priority < saved_spec->priority) {
rc = -EPERM;
goto out;
}
- if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
- /* Just make sure it won't be removed */
- saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
- rc = 0;
- goto out;
- }
- /* Retain the RX_STACK flag */
- spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
+ saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
+ spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
}
/* Insert the filter */
@@ -2553,11 +2543,11 @@ static int efx_farch_filter_remove(struct efx_nic *efx,
struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
if (!test_bit(filter_idx, table->used_bitmap) ||
- spec->priority > priority)
+ spec->priority != priority)
return -ENOENT;
- if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
- efx_farch_filter_init_rx_for_stack(efx, spec);
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ efx_farch_filter_init_rx_auto(efx, spec);
efx_farch_filter_push_rx_config(efx);
} else {
efx_farch_filter_table_clear_entry(efx, table, filter_idx);
@@ -2640,12 +2630,15 @@ efx_farch_filter_table_clear(struct efx_nic *efx,
unsigned int filter_idx;
spin_lock_bh(&efx->filter_lock);
- for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
- efx_farch_filter_remove(efx, table, filter_idx, priority);
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
+ if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
+ efx_farch_filter_remove(efx, table,
+ filter_idx, priority);
+ }
spin_unlock_bh(&efx->filter_lock);
}
-void efx_farch_filter_clear_rx(struct efx_nic *efx,
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
@@ -2654,6 +2647,7 @@ void efx_farch_filter_clear_rx(struct efx_nic *efx,
priority);
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
priority);
+ return 0;
}
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
@@ -2822,7 +2816,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
spec = &table->spec[i];
spec->type = EFX_FARCH_FILTER_UC_DEF + i;
- efx_farch_filter_init_rx_for_stack(efx, spec);
+ efx_farch_filter_init_rx_auto(efx, spec);
__set_bit(i, table->used_bitmap);
}
}
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 63c77a557178..3ef298d3c47e 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -59,12 +59,16 @@ enum efx_filter_match_flags {
/**
* enum efx_filter_priority - priority of a hardware filter specification
* @EFX_FILTER_PRI_HINT: Performance hint
+ * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
+ * or hardware requirements. This may only be used by the filter
+ * implementation for each NIC type.
* @EFX_FILTER_PRI_MANUAL: Manually configured filter
* @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
* networking and SR-IOV)
*/
enum efx_filter_priority {
EFX_FILTER_PRI_HINT = 0,
+ EFX_FILTER_PRI_AUTO,
EFX_FILTER_PRI_MANUAL,
EFX_FILTER_PRI_REQUIRED,
};
@@ -78,19 +82,18 @@ enum efx_filter_priority {
* according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
- * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
- * network stack. The filter must have a priority of
- * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
- * request with priority %EFX_FILTER_PRI_MANUAL, and a removal
- * request with priority %EFX_FILTER_PRI_MANUAL will reset the
- * steering (but not remove the filter).
+ * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
+ * overriding an automatic filter (priority
+ * %EFX_FILTER_PRI_AUTO). This may only be set by the filter
+ * implementation for each type. A removal request will restore
+ * the automatic filter in its place.
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
- EFX_FILTER_FLAG_RX_STACK = 0x04,
+ EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
};
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 4b0bd8a1514d..eb59abb57e85 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -42,6 +42,7 @@ struct efx_mcdi_async_param {
unsigned int cmd;
size_t inlen;
size_t outlen;
+ bool quiet;
efx_mcdi_async_completer *complete;
unsigned long cookie;
/* followed by request/response buffer */
@@ -101,6 +102,10 @@ int efx_mcdi_init(struct efx_nic *efx)
netif_err(efx, probe, efx->net_dev,
"Host already registered with MCPU\n");
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ efx->primary = efx;
+
return 0;
}
@@ -191,6 +196,8 @@ static int efx_mcdi_errno(unsigned int mcdi_err)
TRANSLATE_ERROR(EALREADY);
TRANSLATE_ERROR(ENOSPC);
#undef TRANSLATE_ERROR
+ case MC_CMD_ERR_ENOTSUP:
+ return -EOPNOTSUPP;
case MC_CMD_ERR_ALLOC_FAIL:
return -ENOBUFS;
case MC_CMD_ERR_MAC_EXIST:
@@ -402,8 +409,9 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
{
struct efx_nic *efx = mcdi->efx;
struct efx_mcdi_async_param *async;
- size_t hdr_len, data_len;
+ size_t hdr_len, data_len, err_len;
efx_dword_t *outbuf;
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
int rc;
if (cmpxchg(&mcdi->state,
@@ -444,6 +452,13 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
outbuf = (efx_dword_t *)(async + 1);
efx->type->mcdi_read_response(efx, outbuf, hdr_len,
min(async->outlen, data_len));
+ if (!timeout && rc && !async->quiet) {
+ err_len = min(sizeof(errbuf), data_len);
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len,
+ sizeof(errbuf));
+ efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
+ err_len, rc);
+ }
async->complete(efx, async->cookie, rc, outbuf, data_len);
kfree(async);
@@ -519,18 +534,129 @@ efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
return 0;
}
+static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ int rc;
+
+ if (mcdi->mode == MCDI_MODE_POLL)
+ rc = efx_mcdi_poll(efx);
+ else
+ rc = efx_mcdi_await_completion(efx);
+
+ if (rc != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
+ netif_err(efx, hw, efx->net_dev,
+ "MCDI request was completed without an event\n");
+ rc = 0;
+ }
+
+ /* Close the race with efx_mcdi_ev_cpl() executing just too late
+ * and completing a request we've just cancelled, by ensuring
+ * that the seqno check therein fails.
+ */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ spin_unlock_bh(&mcdi->iface_lock);
+ }
+
+ if (rc != 0) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ } else {
+ size_t hdr_len, data_len, err_len;
+
+ /* At the very least we need a memory barrier here to ensure
+ * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+ * a spurious efx_mcdi_ev_cpl() running concurrently by
+ * acquiring the iface_lock. */
+ spin_lock_bh(&mcdi->iface_lock);
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ err_len = min(sizeof(errbuf), data_len);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ BUG_ON(rc > 0);
+
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(outlen, data_len));
+ if (outlen_actual)
+ *outlen_actual = data_len;
+
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
+
+ if (cmd == MC_CMD_REBOOT && rc == -EIO) {
+ /* Don't reset if MC_CMD_REBOOT returns EIO */
+ } else if (rc == -EIO || rc == -EINTR) {
+ netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+ -rc);
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ } else if (rc && !quiet) {
+ efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
+ rc);
+ }
+
+ if (rc == -EIO || rc == -EINTR) {
+ msleep(MCDI_STATUS_SLEEP_MS);
+ efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+ }
+ }
+
+ efx_mcdi_release(mcdi);
+ return rc;
+}
+
+static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ int rc;
+
+ rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ if (rc) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ return rc;
+ }
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, quiet);
+}
+
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- int rc;
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
- rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
- if (rc)
- return rc;
- return efx_mcdi_rpc_finish(efx, cmd, inlen,
- outbuf, outlen, outlen_actual);
+/* Normally, on receiving an error code in the MCDI response,
+ * efx_mcdi_rpc will log an error message containing (among other
+ * things) the raw error code, by means of efx_mcdi_display_error.
+ * This _quiet version suppresses that; if the caller wishes to log
+ * the error conditionally on the return code, it should call this
+ * function and is then responsible for calling efx_mcdi_display_error
+ * as needed.
+ */
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, true);
}
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
@@ -543,35 +669,19 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
if (rc)
return rc;
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
efx_mcdi_acquire_sync(mcdi);
efx_mcdi_send_request(efx, cmd, inbuf, inlen);
return 0;
}
-/**
- * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
- * @efx: NIC through which to issue the command
- * @cmd: Command type number
- * @inbuf: Command parameters
- * @inlen: Length of command parameters, in bytes
- * @outlen: Length to allocate for response buffer, in bytes
- * @complete: Function to be called on completion or cancellation.
- * @cookie: Arbitrary value to be passed to @complete.
- *
- * This function does not sleep and therefore may be called in atomic
- * context. It will fail if event queues are disabled or if MCDI
- * event completions have been disabled due to an error.
- *
- * If it succeeds, the @complete function will be called exactly once
- * in atomic context, when one of the following occurs:
- * (a) the completion event is received (in NAPI context)
- * (b) event queues are disabled (in the process that disables them)
- * (c) the request times-out (in timer context)
- */
-int
-efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen, size_t outlen,
- efx_mcdi_async_completer *complete, unsigned long cookie)
+static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie, bool quiet)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
struct efx_mcdi_async_param *async;
@@ -581,6 +691,9 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
if (rc)
return rc;
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
GFP_ATOMIC);
if (!async)
@@ -589,6 +702,7 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
async->cmd = cmd;
async->inlen = inlen;
async->outlen = outlen;
+ async->quiet = quiet;
async->complete = complete;
async->cookie = cookie;
memcpy(async + 1, inbuf, inlen);
@@ -617,79 +731,73 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
return rc;
}
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, false);
+}
+
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen, efx_mcdi_async_completer *complete,
+ unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, true);
+}
+
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- int rc;
-
- if (mcdi->mode == MCDI_MODE_POLL)
- rc = efx_mcdi_poll(efx);
- else
- rc = efx_mcdi_await_completion(efx);
-
- if (rc != 0) {
- netif_err(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d mode %d timed out\n",
- cmd, (int)inlen, mcdi->mode);
-
- if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
- netif_err(efx, hw, efx->net_dev,
- "MCDI request was completed without an event\n");
- rc = 0;
- }
-
- /* Close the race with efx_mcdi_ev_cpl() executing just too late
- * and completing a request we've just cancelled, by ensuring
- * that the seqno check therein fails.
- */
- spin_lock_bh(&mcdi->iface_lock);
- ++mcdi->seqno;
- ++mcdi->credits;
- spin_unlock_bh(&mcdi->iface_lock);
- }
-
- if (rc == 0) {
- size_t hdr_len, data_len;
-
- /* At the very least we need a memory barrier here to ensure
- * we pick up changes from efx_mcdi_ev_cpl(). Protect against
- * a spurious efx_mcdi_ev_cpl() running concurrently by
- * acquiring the iface_lock. */
- spin_lock_bh(&mcdi->iface_lock);
- rc = mcdi->resprc;
- hdr_len = mcdi->resp_hdr_len;
- data_len = mcdi->resp_data_len;
- spin_unlock_bh(&mcdi->iface_lock);
-
- BUG_ON(rc > 0);
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
- if (rc == 0) {
- efx->type->mcdi_read_response(efx, outbuf, hdr_len,
- min(outlen, data_len));
- if (outlen_actual != NULL)
- *outlen_actual = data_len;
- } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
- ; /* Don't reset if MC_CMD_REBOOT returns EIO */
- else if (rc == -EIO || rc == -EINTR) {
- netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
- -rc);
- efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
- } else
- netif_dbg(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d failed rc=%d\n",
- cmd, (int)inlen, -rc);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, true);
+}
- if (rc == -EIO || rc == -EINTR) {
- msleep(MCDI_STATUS_SLEEP_MS);
- efx_mcdi_poll_reboot(efx);
- mcdi->new_epoch = true;
- }
- }
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc)
+{
+ int code = 0, err_arg = 0;
- efx_mcdi_release(mcdi);
- return rc;
+ if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
+ code = MCDI_DWORD(outbuf, ERR_CODE);
+ if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
+ err_arg = MCDI_DWORD(outbuf, ERR_ARG);
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n",
+ cmd, (int)inlen, rc, code, err_arg);
}
/* Switch to polled MCDI completions. This can be called in various
@@ -834,6 +942,30 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
spin_unlock(&mcdi->iface_lock);
}
+/* The MC is going down in to BIST mode. set the BIST flag to block
+ * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * (which doesn't actually execute a reset, it waits for the controlling
+ * function to reset it).
+ */
+static void efx_mcdi_ev_bist(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ spin_lock(&mcdi->iface_lock);
+ efx->mc_bist_for_other_fn = true;
+ if (efx_mcdi_complete_sync(mcdi)) {
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ mcdi->resprc = -EIO;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ ++mcdi->credits;
+ }
+ }
+ mcdi->new_epoch = true;
+ efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
+ spin_unlock(&mcdi->iface_lock);
+}
+
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
@@ -867,14 +999,18 @@ void efx_mcdi_process_event(struct efx_channel *channel,
efx_mcdi_sensor_event(efx, event);
break;
case MCDI_EVENT_CODE_SCHEDERR:
- netif_info(efx, hw, efx->net_dev,
- "MC Scheduler error address=0x%x\n", data);
+ netif_dbg(efx, hw, efx->net_dev,
+ "MC Scheduler alert (0x%x)\n", data);
break;
case MCDI_EVENT_CODE_REBOOT:
case MCDI_EVENT_CODE_MC_REBOOT:
netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
efx_mcdi_ev_death(efx, -EIO);
break;
+ case MCDI_EVENT_CODE_MC_BIST:
+ netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
+ efx_mcdi_ev_bist(efx);
+ break;
case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */
break;
@@ -886,6 +1022,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_PTP_PPS:
efx_ptp_event(efx, event);
break;
+ case MCDI_EVENT_CODE_PTP_TIME:
+ efx_time_sync_event(channel, event);
+ break;
case MCDI_EVENT_CODE_TX_FLUSH:
case MCDI_EVENT_CODE_RX_FLUSH:
/* Two flush events will be sent: one to the same event
@@ -1000,13 +1139,27 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
goto fail;
}
+ if (driver_operating) {
+ if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
+ efx->mcdi->fn_flags =
+ MCDI_DWORD(outbuf,
+ DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
+ } else {
+ /* Synthesise flags for Siena */
+ efx->mcdi->fn_flags =
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
+ (efx_port_num(efx) == 0) <<
+ MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
+ }
+ }
+
/* We currently assume we have control of the external link
* and are completely trusted by firmware. Abort probing
* if that's not true for this function.
*/
if (driver_operating &&
- outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
- (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+ (efx->mcdi->fn_flags &
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
@@ -1097,13 +1250,6 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1220,7 +1366,7 @@ fail1:
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
unsigned int flags, index;
const char *reason;
size_t outlen;
@@ -1235,13 +1381,17 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
retry = 2;
do {
MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
- inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
- outbuf, sizeof(outbuf), &outlen);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
- if (rc)
+ if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
+ MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
+ outlen, rc);
return rc;
+ }
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
return -EIO;
@@ -1319,17 +1469,18 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
}
-static int efx_mcdi_reset_port(struct efx_nic *efx)
+static int efx_mcdi_reset_func(struct efx_nic *efx)
{
- int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
- if (rc)
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
+ MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
return rc;
}
@@ -1347,7 +1498,6 @@ static int efx_mcdi_reset_mc(struct efx_nic *efx)
return 0;
if (rc == 0)
rc = -EIO;
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1368,7 +1518,7 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
else
- return efx_mcdi_reset_port(efx);
+ return efx_mcdi_reset_func(efx);
}
static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
@@ -1449,13 +1599,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1496,13 +1639,6 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
int rc;
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1532,13 +1668,6 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1558,14 +1687,10 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
@@ -1585,13 +1710,6 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1609,13 +1727,6 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1630,13 +1741,6 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 15816cacb548..52931aebf3c3 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -94,12 +94,14 @@ struct efx_mcdi_mtd_partition {
* struct efx_mcdi_data - extra state for NICs that implement MCDI
* @iface: Interface/protocol state
* @hwmon: Hardware monitor state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
*/
struct efx_mcdi_data {
struct efx_mcdi_iface iface;
#ifdef CONFIG_SFC_MCDI_MON
struct efx_mcdi_mon hwmon;
#endif
+ u32 fn_flags;
};
#ifdef CONFIG_SFC_MCDI_MON
@@ -116,12 +118,19 @@ void efx_mcdi_fini(struct efx_nic *efx);
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
size_t inlen, efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen);
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, size_t *outlen_actual);
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
@@ -131,6 +140,15 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen, size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc);
int efx_mcdi_poll_reboot(struct efx_nic *efx);
void efx_mcdi_mode_poll(struct efx_nic *efx);
@@ -147,6 +165,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
*/
#define MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \
+ MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
#define _MCDI_PTR(_buf, _offset) \
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
@@ -301,6 +321,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index d72ad4fc3617..bc27d5b580f5 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -24,6 +24,15 @@ enum efx_hwmon_type {
EFX_HWMON_IN, /* voltage */
EFX_HWMON_CURR, /* current */
EFX_HWMON_POWER, /* power */
+ EFX_HWMON_TYPES_COUNT
+};
+
+static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
+ [EFX_HWMON_TEMP] = " degC",
+ [EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */
+ [EFX_HWMON_IN] = " mV",
+ [EFX_HWMON_CURR] = " mA",
+ [EFX_HWMON_POWER] = " W",
};
static const struct {
@@ -33,13 +42,13 @@ static const struct {
} efx_mcdi_sensor_type[] = {
#define SENSOR(name, label, hwmon_type, port) \
[MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
- SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1),
SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
- SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1),
SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
- SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
+ SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0),
SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
- SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
+ SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1),
SENSOR(IN_1V0, "1.0V supply", IN, -1),
SENSOR(IN_1V2, "1.2V supply", IN, -1),
SENSOR(IN_1V8, "1.8V supply", IN, -1),
@@ -47,36 +56,42 @@ static const struct {
SENSOR(IN_3V3, "3.3V supply", IN, -1),
SENSOR(IN_12V0, "12.0V supply", IN, -1),
SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
- SENSOR(IN_VREF, "ref. voltage", IN, -1),
- SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
- SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
- SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
- SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
- SENSOR(FAN_0, NULL, COOL, -1),
- SENSOR(FAN_1, NULL, COOL, -1),
- SENSOR(FAN_2, NULL, COOL, -1),
- SENSOR(FAN_3, NULL, COOL, -1),
- SENSOR(FAN_4, NULL, COOL, -1),
+ SENSOR(IN_VREF, "Ref. voltage", IN, -1),
+ SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1),
+ SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1),
+ SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1),
+ SENSOR(PSU_TEMP, "Controller regulator temp.",
+ TEMP, -1),
+ SENSOR(FAN_0, "Fan 0", COOL, -1),
+ SENSOR(FAN_1, "Fan 1", COOL, -1),
+ SENSOR(FAN_2, "Fan 2", COOL, -1),
+ SENSOR(FAN_3, "Fan 3", COOL, -1),
+ SENSOR(FAN_4, "Fan 4", COOL, -1),
SENSOR(IN_VAOE, "AOE input supply", IN, -1),
SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
SENSOR(IN_IAOE, "AOE input current", CURR, -1),
SENSOR(NIC_POWER, "Board power use", POWER, -1),
SENSOR(IN_0V9, "0.9V supply", IN, -1),
- SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
- SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
- SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
- SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
- SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
+ SENSOR(IN_I0V9, "0.9V supply current", CURR, -1),
+ SENSOR(IN_I1V2, "1.2V supply current", CURR, -1),
+ SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1),
+ SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1),
+ SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1),
SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
- SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
- SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT,
+ "Controller PTAT voltage (int. ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP,
+ "Controller die temp. (int. ADC)", TEMP, -1),
SENSOR(CONTROLLER_VPTAT_EXTADC,
- "Controller int. temp. raw (at ADC)", IN, -1),
+ "Controller PTAT voltage (ext. ADC)", IN, -1),
SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
- "Controller int. temp. (via ADC)", TEMP, -1),
+ "Controller die temp. (ext. ADC)", TEMP, -1),
SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
SENSOR(AIRFLOW, "Air flow raw", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1),
+ SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1),
#undef SENSOR
};
@@ -91,7 +106,8 @@ static const char *const sensor_status_names[] = {
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
{
unsigned int type, state, value;
- const char *name = NULL, *state_txt;
+ enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
+ const char *name = NULL, *state_txt, *unit;
type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
@@ -99,16 +115,22 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
/* Deal gracefully with the board having more drivers than we
* know about, but do not expect new sensor states. */
- if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
name = efx_mcdi_sensor_type[type].label;
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ }
if (!name)
name = "No sensor name available";
EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
state_txt = sensor_status_names[state];
+ EFX_BUG_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
+ unit = efx_hwmon_unit[hwmon_type];
+ if (!unit)
+ unit = "";
netif_err(efx, hw, efx->net_dev,
- "Sensor %d (%s) reports condition '%s' for raw value %d\n",
- type, name, state_txt, value);
+ "Sensor %d (%s) reports condition '%s' for value %d%s\n",
+ type, name, state_txt, value, unit);
}
#ifdef CONFIG_SFC_MCDI_MON
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index e0a63ddb7a6c..a707fb5ef14c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -224,6 +224,8 @@
#define MC_CMD_ERR_MAC_EXIST 0x1009
/* Slave core not present */
#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
#define MC_CMD_ERR_CODE_OFST 0
@@ -390,6 +392,8 @@
* AOE_ERR_DATA)
*/
#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
@@ -462,6 +466,10 @@
#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
/* enum: the MC has detected an uncorrectable error */
#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -481,15 +489,32 @@
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
-/* Seconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
#define MCDI_EVENT_PTP_SECONDS_OFST 0
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
-/* Nanoseconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
-/* Lowest four bytes of sourceUUID from PTP packet */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
#define MCDI_EVENT_PTP_UUID_OFST 0
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
@@ -505,6 +530,13 @@
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -545,8 +577,10 @@
#define FCDI_EVENT_CODE_TIMED_READ 0x5
/* enum: One or more PPS IN events */
#define FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: One or more PPS OUT events */
-#define FCDI_EVENT_CODE_PPS_OUT 0x7
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -560,14 +594,21 @@
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define FCDI_EVENT_PPS_COUNT_OFST 0
-#define FCDI_EVENT_PPS_COUNT_LBN 0
-#define FCDI_EVENT_PPS_COUNT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT structuredef */
-#define FCDI_EXTENDED_EVENT_LENMIN 16
-#define FCDI_EXTENDED_EVENT_LENMAX 248
-#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
/* Number of timestamps following */
#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
@@ -581,14 +622,14 @@
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
/* Timestamp records comprising the event */
-#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
-#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
-#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
/***********************************/
@@ -642,6 +683,10 @@
#define MC_CMD_COPYCODE_IN_LEN 16
/* Source address */
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: The main image should be entered via a copy of a single word from and
+ * to this address when none of the other magic behaviours are required.
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
/* enum: Entering the main image via a copy of a single word from and to this
* address indicates that it should not attempt to start the datapath CPUs.
* This is useful for certain soft rebooting scenarios. (Huntington only)
@@ -872,8 +917,28 @@
#define MC_CMD_PTP_OP_RST_CLK 0x14
/* enum: Enable the forwarding of PPS events to the host */
#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x16
+#define MC_CMD_PTP_OP_MAX 0x1b
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -938,8 +1003,12 @@
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
@@ -1005,8 +1074,12 @@
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
@@ -1078,9 +1151,51 @@
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queueid to send events back */
+/* Queue id to send events back */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Event queue to send PTP time events to */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -1088,15 +1203,29 @@
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
@@ -1116,21 +1245,21 @@
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
-/* Minimum period of PPS pulse */
+/* Minimum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
-/* Maximum period of PPS pulse */
+/* Maximum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
-/* Last period of PPS pulse */
+/* Last period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
-/* Mean period of PPS pulse */
+/* Mean period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
-/* Minimum offset of PPS pulse (signed) */
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
-/* Maximum offset of PPS pulse (signed) */
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
-/* Last offset of PPS pulse (signed) */
+/* Last offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
-/* Mean offset of PPS pulse (signed) */
+/* Mean offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
@@ -1146,8 +1275,12 @@
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
/* Number of nanoseconds waited after reading NIC's hardware clock */
@@ -1177,6 +1310,16 @@
#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
/* enum: Timestamp trigger GPIO not working */
#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
@@ -1198,6 +1341,62 @@
#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+/* Uncorrected error on receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -1923,6 +2122,8 @@
#define MC_CMD_MEDIA_SFP_PLUS 0x5
/* enum: 10GBaseT. */
#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
@@ -2223,6 +2424,8 @@
#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
/* enum: KR Serdes Serial Wireside. */
#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -2286,6 +2489,10 @@
#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
/* enum: Flow control is off. */
@@ -3175,7 +3382,7 @@
#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
-#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
@@ -3269,16 +3476,18 @@
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
-#define MC_CMD_SENSOR_ENTRY_MINNUM 1
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
-#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
@@ -3291,7 +3500,7 @@
/* MC_CMD_SENSOR_ENTRY_LEN 8 */
/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
-/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
@@ -3864,6 +4073,18 @@
#define NVRAM_PARTITION_TYPE_ID_LBN 0
#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
/***********************************/
/* MC_CMD_READ_REGS
@@ -4021,6 +4242,8 @@
#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -4179,6 +4402,9 @@
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define MC_CMD_PROXY_CMD_OUT_LEN 0
+
/***********************************/
/* MC_CMD_ALLOC_BUFTBL_CHUNK
@@ -4213,7 +4439,7 @@
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
/* ID */
@@ -4226,7 +4452,7 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
@@ -6800,6 +7026,30 @@
/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
/* MC_CMD_DUMP_DO
* Take a dump of the DUT state
*/
@@ -6826,6 +7076,10 @@
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
@@ -6942,39 +7196,68 @@
/***********************************/
-/* MC_CMD_START_KR_EYE_PLOT
- * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
- * signal.
- */
-#define MC_CMD_START_KR_EYE_PLOT 0xee
-
-/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
-#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
-#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
-
-/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_POLL_KR_EYE_PLOT
- * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
- * should call this command repeatedly after starting eye plot, until no more
- * data is returned.
- */
-#define MC_CMD_POLL_KR_EYE_PLOT 0xef
-
-/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
-#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
-
-/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+/* Offset from which to read the data */
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
/***********************************/
@@ -7026,6 +7309,15 @@
#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
/* enum: Force KR Serdes reset / recalibration */
#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -7123,6 +7415,91 @@
/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
/* Requested operation */
@@ -7135,6 +7512,37 @@
/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
/***********************************/
/* MC_CMD_PCIE_TUNE
@@ -7157,6 +7565,13 @@
#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
/* enum: Override TX Driver settings */
#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
/* Align the arguments to 32 bits */
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
@@ -7258,6 +7673,37 @@
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
/***********************************/
/* MC_CMD_LICENSING
@@ -7310,5 +7756,152 @@
*/
#define MC_CMD_MC2MC_PROXY 0xf4
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.)
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7b6be61d549f..91d23252f8fa 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -90,13 +90,6 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -143,17 +136,13 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev,
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static int efx_mcdi_mdio_write(struct net_device *net_dev,
@@ -174,17 +163,13 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
- goto fail;
+ return rc;
if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
MC_CMD_MDIO_STATUS_GOOD)
return -EIO;
return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
}
static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
@@ -487,17 +472,14 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
efx->link_state.up = false;
- } else {
+ else
efx_mcdi_phy_decode_link(
efx, &efx->link_state,
MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
- }
return !efx_link_state_equal(&efx->link_state, &old_state);
}
@@ -531,11 +513,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
return;
- }
ecmd->lp_advertising =
mcdi_to_ethtool_cap(phy_cfg->media,
MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
@@ -918,21 +897,29 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
+ if (rc)
return true;
- }
return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
}
-static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear)
+enum efx_stats_action {
+ EFX_STATS_ENABLE,
+ EFX_STATS_DISABLE,
+ EFX_STATS_PULL,
+};
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx,
+ enum efx_stats_action action, int clear)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
- int period = enable ? 1000 : 0;
+ int change = action == EFX_STATS_PULL ? 0 : 1;
+ int enable = action == EFX_STATS_ENABLE ? 1 : 0;
+ int period = action == EFX_STATS_ENABLE ? 1000 : 0;
+ dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
+ u32 dma_len = action != EFX_STATS_DISABLE ?
+ MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
@@ -940,8 +927,8 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
MAC_STATS_IN_DMA, !!enable,
MAC_STATS_IN_CLEAR, clear,
- MAC_STATS_IN_PERIODIC_CHANGE, 1,
- MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
+ MAC_STATS_IN_PERIODIC_CHANGE, change,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable,
MAC_STATS_IN_PERIODIC_CLEAR, 0,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
@@ -949,14 +936,6 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
- __func__, enable ? "enable" : "disable", rc);
return rc;
}
@@ -966,13 +945,29 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
- MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+ efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
}
void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
{
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
+}
+
+#define EFX_MAC_STATS_WAIT_US 100
+#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
+
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+ int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
+
+ while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+ EFX_MC_STATS_GENERATION_INVALID &&
+ attempts-- != 0)
+ udelay(EFX_MAC_STATS_WAIT_US);
}
int efx_mcdi_port_probe(struct efx_nic *efx)
@@ -1003,7 +998,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 542a0d252ae0..af2b8c59a903 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -91,6 +91,7 @@
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
+struct hwtstamp_config;
struct efx_self_tests;
@@ -287,12 +288,9 @@ struct efx_rx_buffer {
* Used to facilitate sharing dma mappings between recycled rx buffers
* and those passed up to the kernel.
*
- * @refcnt: Number of struct efx_rx_buffer's referencing this page.
- * When refcnt falls to zero, the page is unmapped for dma
* @dma_addr: The dma address of this page.
*/
struct efx_rx_page_state {
- unsigned refcnt;
dma_addr_t dma_addr;
unsigned int __pad[0] ____cacheline_aligned;
@@ -362,10 +360,11 @@ struct efx_rx_queue {
unsigned int slow_fill_count;
};
-enum efx_rx_alloc_method {
- RX_ALLOC_METHOD_AUTO = 0,
- RX_ALLOC_METHOD_SKB = 1,
- RX_ALLOC_METHOD_PAGE = 2,
+enum efx_sync_events_state {
+ SYNC_EVENTS_DISABLED = 0,
+ SYNC_EVENTS_QUIESCENT,
+ SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID,
};
/**
@@ -407,6 +406,9 @@ enum efx_rx_alloc_method {
* by __efx_rx_packet(), if @rx_pkt_n_frags != 0
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
+ * @sync_events_state: Current state of sync events on this channel
+ * @sync_timestamp_major: Major part of the last ptp sync event
+ * @sync_timestamp_minor: Minor part of the last ptp sync event
*/
struct efx_channel {
struct efx_nic *efx;
@@ -445,6 +447,10 @@ struct efx_channel {
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
+
+ enum efx_sync_events_state sync_events_state;
+ u32 sync_timestamp_major;
+ u32 sync_timestamp_minor;
};
/**
@@ -520,15 +526,6 @@ enum nic_state {
STATE_RECOVERY = 3, /* device recovering from PCI error */
};
-/*
- * Alignment of the skb->head which wraps a page-allocated RX buffer
- *
- * The skb allocated to wrap an rx_buffer can have this alignment. Since
- * the data is memcpy'd from the rx_buf, it does not need to be equal to
- * NET_IP_ALIGN.
- */
-#define EFX_PAGE_SKB_ALIGN 2
-
/* Forward declaration */
struct efx_nic;
@@ -651,6 +648,13 @@ struct vfdi_status;
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
* @pci_dev: The PCI device
+ * @node: List node for maintaning primary/secondary function lists
+ * @primary: &struct efx_nic instance for the primary function of this
+ * controller. May be the same structure, and may be %NULL if no
+ * primary function is bound. Serialised by rtnl_lock.
+ * @secondary_list: List of &struct efx_nic instances for the secondary PCI
+ * functions of the controller, if this is for the primary function.
+ * Serialised by rtnl_lock.
* @type: Controller type attributes
* @legacy_irq: IRQ number
* @workqueue: Workqueue for port reconfigures and the HW monitor.
@@ -694,6 +698,8 @@ struct vfdi_status;
* (valid only if @rx_prefix_size != 0; always negative)
* @rx_packet_len_offset: Offset of RX packet length from start of packet data
* (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
+ * @rx_packet_ts_offset: Offset of timestamp from start of packet data
+ * (valid only if channel->sync_timestamps_enabled; always negative)
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
@@ -763,6 +769,7 @@ struct vfdi_status;
* @local_lock: Mutex protecting %local_addr_list and %local_page_list.
* @peer_work: Work item to broadcast peer addresses to VMs.
* @ptp_data: PTP state data
+ * @vpd_sn: Serial number read from VPD
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -777,6 +784,9 @@ struct efx_nic {
/* The following fields should be written very rarely */
char name[IFNAMSIZ];
+ struct list_head node;
+ struct efx_nic *primary;
+ struct list_head secondary_list;
struct pci_dev *pci_dev;
unsigned int port_num;
const struct efx_nic_type *type;
@@ -828,6 +838,7 @@ struct efx_nic {
unsigned int rx_prefix_size;
int rx_packet_hash_offset;
int rx_packet_len_offset;
+ int rx_packet_ts_offset;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
@@ -852,10 +863,14 @@ struct efx_nic {
struct work_struct mac_work;
bool port_enabled;
+ bool mc_bist_for_other_fn;
bool port_initialized;
struct net_device *net_dev;
struct efx_buffer stats_buffer;
+ u64 rx_nodesc_drops_total;
+ u64 rx_nodesc_drops_while_down;
+ bool rx_nodesc_drops_prev_state;
unsigned int phy_type;
const struct efx_phy_operations *phy_op;
@@ -907,6 +922,8 @@ struct efx_nic {
struct efx_ptp_data *ptp_data;
+ char *vpd_sn;
+
/* The following fields may be written more often */
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -959,6 +976,7 @@ struct efx_mtd_partition {
* @update_stats: Update statistics not provided by event handling.
* Either argument may be %NULL.
* @start_stats: Start the regular fetching of statistics
+ * @pull_stats: Pull stats from the NIC and wait until they arrive.
* @stop_stats: Stop the regular fetching of statistics
* @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
@@ -997,7 +1015,7 @@ struct efx_mtd_partition {
* @tx_init: Initialise TX queue on the NIC
* @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell
- * @rx_push_indir_table: Write RSS indirection table to the NIC
+ * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
* @rx_probe: Allocate resources for RX queue
* @rx_init: Initialise RX queue on the NIC
* @rx_remove: Free resources for RX queue
@@ -1017,7 +1035,8 @@ struct efx_mtd_partition {
* @filter_insert: add or replace a filter
* @filter_remove_safe: remove a filter by ID, carefully
* @filter_get_safe: retrieve a filter by ID, carefully
- * @filter_clear_rx: remove RX filters by priority
+ * @filter_clear_rx: Remove all RX filters whose priority is less than or
+ * equal to the given priority and is not %EFX_FILTER_PRI_AUTO
* @filter_count_rx_used: Get the number of filters in use at a given priority
* @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
* @filter_get_rx_ids: Get list of RX filters at a given priority
@@ -1037,6 +1056,12 @@ struct efx_mtd_partition {
* @mtd_sync: Wait for write-back to complete on MTD partition. This
* also notifies the driver that a writer has finished using this
* partition.
+ * @ptp_write_host_time: Send host time to MC as part of sync protocol
+ * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX
+ * timestamping, possibly only temporarily for the purposes of a reset.
+ * @ptp_set_ts_config: Set hardware timestamp configuration. The flags
+ * and tx_type will already have been validated but this operation
+ * must validate and update rx_filter.
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1046,6 +1071,7 @@ struct efx_mtd_partition {
* @max_dma_mask: Maximum possible DMA mask
* @rx_prefix_size: Size of RX prefix before packet data
* @rx_hash_offset: Offset of RX flow hash within prefix
+ * @rx_ts_offset: Offset of timestamp within prefix
* @rx_buffer_padding: Size of padding at end of RX packet
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
@@ -1055,6 +1081,7 @@ struct efx_mtd_partition {
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
* @mcdi_max_ver: Maximum MCDI version supported
+ * @hwtstamp_filters: Mask of hardware timestamp filter types supported
*/
struct efx_nic_type {
unsigned int (*mem_map_size)(struct efx_nic *efx);
@@ -1077,6 +1104,7 @@ struct efx_nic_type {
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
+ void (*pull_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
@@ -1105,7 +1133,7 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
- void (*rx_push_indir_table)(struct efx_nic *efx);
+ void (*rx_push_rss_config)(struct efx_nic *efx);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1130,8 +1158,8 @@ struct efx_nic_type {
int (*filter_get_safe)(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *);
- void (*filter_clear_rx)(struct efx_nic *efx,
- enum efx_filter_priority priority);
+ int (*filter_clear_rx)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
u32 (*filter_count_rx_used)(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
@@ -1155,6 +1183,9 @@ struct efx_nic_type {
int (*mtd_sync)(struct mtd_info *mtd);
#endif
void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
+ int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
+ int (*ptp_set_ts_config)(struct efx_nic *efx,
+ struct hwtstamp_config *init);
int revision;
unsigned int txd_ptr_tbl_base;
@@ -1165,6 +1196,7 @@ struct efx_nic_type {
u64 max_dma_mask;
unsigned int rx_prefix_size;
unsigned int rx_hash_offset;
+ unsigned int rx_ts_offset;
unsigned int rx_buffer_padding;
bool can_rx_scatter;
bool always_rx_scatter;
@@ -1173,6 +1205,7 @@ struct efx_nic_type {
netdev_features_t offload_features;
int mcdi_max_ver;
unsigned int max_rx_ip_filters;
+ u32 hwtstamp_filters;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 9c90bf56090f..79226b19e3c4 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -519,3 +519,15 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
}
}
}
+
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
+{
+ /* if down, or this is the first update after coming up */
+ if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
+ efx->rx_nodesc_drops_while_down +=
+ *rx_nodesc_drops - efx->rx_nodesc_drops_total;
+ efx->rx_nodesc_drops_total = *rx_nodesc_drops;
+ efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
+ *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
+}
+
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 91c63ec79c5f..a001fae1a8d7 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -412,8 +412,8 @@ enum {
EF10_STAT_rx_dp_q_disabled_packets,
EF10_STAT_rx_dp_di_dropped_packets,
EF10_STAT_rx_dp_streaming_packets,
- EF10_STAT_rx_dp_emerg_fetch,
- EF10_STAT_rx_dp_emerg_wait,
+ EF10_STAT_rx_dp_hlb_fetch,
+ EF10_STAT_rx_dp_hlb_wait,
EF10_STAT_COUNT
};
@@ -554,12 +554,29 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
bool spoofchk);
struct ethtool_ts_info;
-void efx_ptp_probe(struct efx_nic *efx);
-int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+void efx_ptp_remove(struct efx_nic *efx);
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_get_mode(struct efx_nic *efx);
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb);
+static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ if (channel->sync_events_state == SYNC_EVENTS_VALID)
+ __efx_rx_skb_attach_timestamp(channel, skb);
+}
void efx_ptp_start_datapath(struct efx_nic *efx);
void efx_ptp_stop_datapath(struct efx_nic *efx);
@@ -678,8 +695,8 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
int efx_farch_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority, u32 filter_id,
struct efx_filter_spec *);
-void efx_farch_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
@@ -747,10 +764,6 @@ int falcon_reset_xaui(struct efx_nic *efx);
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
void efx_farch_init_common(struct efx_nic *efx);
void efx_ef10_handle_drain_event(struct efx_nic *efx);
-static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
-{
- efx->type->rx_push_indir_table(efx);
-}
void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
@@ -774,6 +787,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
#define EFX_MAX_FLUSH_TIME 5000
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 3dd39dcfe36b..d7a36829649a 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -62,7 +62,7 @@
#define SYNCHRONISATION_GRANULARITY_NS 200
/* Minimum permitted length of a (corrected) synchronisation time */
-#define MIN_SYNCHRONISATION_NS 120
+#define DEFAULT_MIN_SYNCHRONISATION_NS 120
/* Maximum permitted length of a (corrected) synchronisation time */
#define MAX_SYNCHRONISATION_NS 1000
@@ -195,26 +195,29 @@ struct efx_ptp_event_rx {
/**
* struct efx_ptp_timeset - Synchronisation between host and MC
* @host_start: Host time immediately before hardware timestamp taken
- * @seconds: Hardware timestamp, seconds
- * @nanoseconds: Hardware timestamp, nanoseconds
+ * @major: Hardware timestamp, major
+ * @minor: Hardware timestamp, minor
* @host_end: Host time immediately after hardware timestamp taken
- * @waitns: Number of nanoseconds between hardware timestamp being read and
+ * @wait: Number of NIC clock ticks between hardware timestamp being read and
* host end time being seen
* @window: Difference of host_end and host_start
* @valid: Whether this timeset is valid
*/
struct efx_ptp_timeset {
u32 host_start;
- u32 seconds;
- u32 nanoseconds;
+ u32 major;
+ u32 minor;
u32 host_end;
- u32 waitns;
+ u32 wait;
u32 window; /* Derived: end - start, allowing for wrap */
};
/**
* struct efx_ptp_data - Precision Time Protocol (PTP) state
- * @channel: The PTP channel
+ * @efx: The NIC context
+ * @channel: The PTP channel (Siena only)
+ * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
+ * separate events)
* @rxq: Receive queue (awaiting timestamps)
* @txq: Transmit queue
* @evt_list: List of MC receive events awaiting packets
@@ -231,41 +234,42 @@ struct efx_ptp_timeset {
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
+ * @time_format: Time format supported by this NIC
+ * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
+ * @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @min_synchronisation_ns: Minimum acceptable corrected sync window
+ * @ts_corrections.tx: Required driver correction of transmit timestamps
+ * @ts_corrections.rx: Required driver correction of receive timestamps
+ * @ts_corrections.pps_out: PPS output error (information only)
+ * @ts_corrections.pps_in: Required driver correction of PPS input timestamps
* @evt_frags: Partly assembled PTP events
* @evt_frag_idx: Current fragment number
* @evt_code: Last event code
* @start: Address at which MC indicates ready for synchronisation
* @host_time_pps: Host time at last PPS
- * @last_sync_ns: Last number of nanoseconds between readings when synchronising
- * @base_sync_ns: Number of nanoseconds for last synchronisation.
- * @base_sync_valid: Whether base_sync_time is valid.
* @current_adjfreq: Current ppb adjustment.
- * @phc_clock: Pointer to registered phc device
+ * @phc_clock: Pointer to registered phc device (if primary function)
* @phc_clock_info: Registration structure for phc device
* @pps_work: pps work task for handling pps events
* @pps_workwq: pps work queue
* @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
* @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
* allocations in main data path).
- * @debug_ptp_dir: PTP debugfs directory
- * @missed_rx_sync: Number of packets received without syncrhonisation.
* @good_syncs: Number of successful synchronisations.
- * @no_time_syncs: Number of synchronisations with no good times.
- * @bad_sync_durations: Number of synchronisations with bad durations.
+ * @fast_syncs: Number of synchronisations requiring short delay
* @bad_syncs: Number of failed synchronisations.
- * @last_sync_time: Number of nanoseconds for last synchronisation.
* @sync_timeouts: Number of synchronisation timeouts
- * @fast_syncs: Number of synchronisations requiring short delay
- * @min_sync_delta: Minimum time between event and synchronisation
- * @max_sync_delta: Maximum time between event and synchronisation
- * @average_sync_delta: Average time between event and synchronisation.
- * Modified moving average.
- * @last_sync_delta: Last time between event and synchronisation
- * @mc_stats: Context value for MC statistics
+ * @no_time_syncs: Number of synchronisations with no good times.
+ * @invalid_sync_windows: Number of sync windows with bad durations.
+ * @undersize_sync_windows: Number of corrected sync windows that are too small
+ * @oversize_sync_windows: Number of corrected sync windows that are too large
+ * @rx_no_timestamp: Number of packets received without a timestamp.
* @timeset: Last set of synchronisation statistics.
*/
struct efx_ptp_data {
+ struct efx_nic *efx;
struct efx_channel *channel;
+ bool rx_ts_inline;
struct sk_buff_head rxq;
struct sk_buff_head txq;
struct list_head evt_list;
@@ -282,14 +286,22 @@ struct efx_ptp_data {
struct hwtstamp_config config;
bool enabled;
unsigned int mode;
+ unsigned int time_format;
+ void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
+ ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
+ s32 correction);
+ unsigned int min_synchronisation_ns;
+ struct {
+ s32 tx;
+ s32 rx;
+ s32 pps_out;
+ s32 pps_in;
+ } ts_corrections;
efx_qword_t evt_frags[MAX_EVENT_FRAGS];
int evt_frag_idx;
int evt_code;
struct efx_buffer start;
struct pps_event_time host_time_pps;
- unsigned last_sync_ns;
- unsigned base_sync_ns;
- bool base_sync_valid;
s64 current_adjfreq;
struct ptp_clock *phc_clock;
struct ptp_clock_info phc_clock_info;
@@ -297,6 +309,16 @@ struct efx_ptp_data {
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+
+ unsigned int good_syncs;
+ unsigned int fast_syncs;
+ unsigned int bad_syncs;
+ unsigned int sync_timeouts;
+ unsigned int no_time_syncs;
+ unsigned int invalid_sync_windows;
+ unsigned int undersize_sync_windows;
+ unsigned int oversize_sync_windows;
+ unsigned int rx_no_timestamp;
struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
};
@@ -309,19 +331,263 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
static int efx_phc_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
+#define PTP_SW_STAT(ext_name, field_name) \
+ { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
+#define PTP_MC_STAT(ext_name, mcdi_name) \
+ { #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST }
+static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = {
+ PTP_SW_STAT(ptp_good_syncs, good_syncs),
+ PTP_SW_STAT(ptp_fast_syncs, fast_syncs),
+ PTP_SW_STAT(ptp_bad_syncs, bad_syncs),
+ PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts),
+ PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs),
+ PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows),
+ PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows),
+ PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows),
+ PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp),
+ PTP_MC_STAT(ptp_tx_timestamp_packets, TX),
+ PTP_MC_STAT(ptp_rx_timestamp_packets, RX),
+ PTP_MC_STAT(ptp_timestamp_packets, TS),
+ PTP_MC_STAT(ptp_filter_matches, FM),
+ PTP_MC_STAT(ptp_non_filter_matches, NFM),
+};
+#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc)
+static const unsigned long efx_ptp_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
+};
+
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+{
+ if (!efx->ptp_data)
+ return 0;
+
+ return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask, strings);
+}
+
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN);
+ size_t i;
+ int rc;
+
+ if (!efx->ptp_data)
+ return 0;
+
+ /* Copy software statistics */
+ for (i = 0; i < PTP_STAT_COUNT; i++) {
+ if (efx_ptp_stat_desc[i].dma_width)
+ continue;
+ stats[i] = *(unsigned int *)((char *)efx->ptp_data +
+ efx_ptp_stat_desc[i].offset);
+ }
+
+ /* Fetch MC statistics. We *must* fill in all statistics or
+ * risk leaking kernel memory to userland, so if the MCDI
+ * request fails we pretend we got zeroes.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+ memset(outbuf, 0, sizeof(outbuf));
+ }
+ efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask,
+ stats, _MCDI_PTR(outbuf, 0), false);
+
+ return PTP_STAT_COUNT;
+}
+
+/* For Siena platforms NIC time is s and ns */
+static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ *nic_major = ts.tv_sec;
+ *nic_minor = ts.tv_nsec;
+}
+
+static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt = ktime_set(nic_major, nic_minor);
+ if (correction >= 0)
+ kt = ktime_add_ns(kt, (u64)correction);
+ else
+ kt = ktime_sub_ns(kt, (u64)-correction);
+ return kt;
+}
+
+/* To convert from s27 format to ns we multiply then divide by a power of 2.
+ * For the conversion from ns to s27, the operation is also converted to a
+ * multiply and shift.
+ */
+#define S27_TO_NS_SHIFT (27)
+#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC)
+#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT)
+#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT)
+
+/* For Huntington platforms NIC time is in seconds and fractions of a second
+ * where the minor register only uses 27 bits in units of 2^-27s.
+ */
+static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ u32 maj = ts.tv_sec;
+ u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
+ (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
+
+ /* The conversion can result in the minor value exceeding the maximum.
+ * In this case, round up to the next second.
+ */
+ if (min >= S27_MINOR_MAX) {
+ min -= S27_MINOR_MAX;
+ maj++;
+ }
+
+ *nic_major = maj;
+ *nic_minor = min;
+}
+
+static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor)
+{
+ u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC +
+ (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT);
+ return ktime_set(nic_major, ns);
+}
+
+static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ /* Apply the correction and deal with carry */
+ nic_minor += correction;
+ if ((s32)nic_minor < 0) {
+ nic_minor += S27_MINOR_MAX;
+ nic_major--;
+ } else if (nic_minor >= S27_MINOR_MAX) {
+ nic_minor -= S27_MINOR_MAX;
+ nic_major++;
+ }
+
+ return efx_ptp_s27_to_ktime(nic_major, nic_minor);
+}
+
+/* Get PTP attributes and set up time conversions */
+static int efx_ptp_get_attributes(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN);
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+ u32 fmt;
+ size_t out_len;
+
+ /* Get the PTP attributes. If the NIC doesn't support the operation we
+ * use the default format for compatibility with older NICs i.e.
+ * seconds and nanoseconds.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0)
+ fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
+ else if (rc == -EINVAL)
+ fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
+ else
+ return rc;
+
+ if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
+ ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
+ } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
+ ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
+ } else {
+ return -ERANGE;
+ }
+
+ ptp->time_format = fmt;
+
+ /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older
+ * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value
+ * to use for the minimum acceptable corrected synchronization window.
+ * If we have the extra information store it. For older firmware that
+ * does not implement the extended command use the default value.
+ */
+ if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ ptp->min_synchronisation_ns =
+ MCDI_DWORD(outbuf,
+ PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
+ else
+ ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+
+ return 0;
+}
+
+/* Get PTP timestamp corrections */
+static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN);
+ int rc;
+
+ /* Get the timestamp corrections from the NIC. If this operation is
+ * not supported (older NICs) then no correction is required.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP,
+ MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc == 0) {
+ efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
+ efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
+ efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
+ efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+ } else if (rc == -EINVAL) {
+ efx->ptp_data->ts_corrections.tx = 0;
+ efx->ptp_data->ts_corrections.rx = 0;
+ efx->ptp_data->ts_corrections.pps_out = 0;
+ efx->ptp_data->ts_corrections.pps_in = 0;
+ } else {
+ return rc;
+ }
+
+ return 0;
+}
+
/* Enable MCDI PTP support. */
static int efx_ptp_enable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
- efx->ptp_data->channel->channel);
+ efx->ptp_data->channel ?
+ efx->ptp_data->channel->channel : 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
- return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_ENABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
}
/* Disable MCDI PTP support.
@@ -332,11 +598,19 @@ static int efx_ptp_enable(struct efx_nic *efx)
static int efx_ptp_disable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_DISABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
}
static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
@@ -404,11 +678,10 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
unsigned start_ns, end_ns;
timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
- timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS);
- timeset->nanoseconds = MCDI_DWORD(data,
- PTP_OUT_SYNCHRONIZE_NANOSECONDS);
+ timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
+ timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
- timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+ timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
/* Ignore seconds */
start_ns = timeset->host_start & MC_NANOSECOND_MASK;
@@ -437,62 +710,73 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
MCDI_VAR_ARRAY_LEN(response_length,
PTP_OUT_SYNCHRONIZE_TIMESET);
unsigned i;
- unsigned total;
unsigned ngood = 0;
unsigned last_good = 0;
struct efx_ptp_data *ptp = efx->ptp_data;
u32 last_sec;
u32 start_sec;
struct timespec delta;
+ ktime_t mc_time;
if (number_readings == 0)
return -EAGAIN;
- /* Read the set of results and increment stats for any results that
- * appera to be erroneous.
+ /* Read the set of results and find the last good host-MC
+ * synchronization result. The MC times when it finishes reading the
+ * host time so the corrected window time should be fairly constant
+ * for a given platform. Increment stats for any results that appear
+ * to be erroneous.
*/
for (i = 0; i < number_readings; i++) {
+ s32 window, corrected;
+ struct timespec wait;
+
efx_ptp_read_timeset(
MCDI_ARRAY_STRUCT_PTR(synch_buf,
PTP_OUT_SYNCHRONIZE_TIMESET, i),
&ptp->timeset[i]);
- }
- /* Find the last good host-MC synchronization result. The MC times
- * when it finishes reading the host time so the corrected window time
- * should be fairly constant for a given platform.
- */
- total = 0;
- for (i = 0; i < number_readings; i++)
- if (ptp->timeset[i].window > ptp->timeset[i].waitns) {
- unsigned win;
-
- win = ptp->timeset[i].window - ptp->timeset[i].waitns;
- if (win >= MIN_SYNCHRONISATION_NS &&
- win < MAX_SYNCHRONISATION_NS) {
- total += ptp->timeset[i].window;
- ngood++;
- last_good = i;
- }
+ wait = ktime_to_timespec(
+ ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
+ window = ptp->timeset[i].window;
+ corrected = window - wait.tv_nsec;
+
+ /* We expect the uncorrected synchronization window to be at
+ * least as large as the interval between host start and end
+ * times. If it is smaller than this then this is mostly likely
+ * to be a consequence of the host's time being adjusted.
+ * Check that the corrected sync window is in a reasonable
+ * range. If it is out of range it is likely to be because an
+ * interrupt or other delay occurred between reading the system
+ * time and writing it to MC memory.
+ */
+ if (window < SYNCHRONISATION_GRANULARITY_NS) {
+ ++ptp->invalid_sync_windows;
+ } else if (corrected >= MAX_SYNCHRONISATION_NS) {
+ ++ptp->oversize_sync_windows;
+ } else if (corrected < ptp->min_synchronisation_ns) {
+ ++ptp->undersize_sync_windows;
+ } else {
+ ngood++;
+ last_good = i;
}
+ }
if (ngood == 0) {
netif_warn(efx, drv, efx->net_dev,
- "PTP no suitable synchronisations %dns\n",
- ptp->base_sync_ns);
+ "PTP no suitable synchronisations\n");
return -EAGAIN;
}
- /* Average minimum this synchronisation */
- ptp->last_sync_ns = DIV_ROUND_UP(total, ngood);
- if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) {
- ptp->base_sync_valid = true;
- ptp->base_sync_ns = ptp->last_sync_ns;
- }
+ /* Convert the NIC time into kernel time. No correction is required-
+ * this time is the output of a firmware process.
+ */
+ mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+ ptp->timeset[last_good].minor, 0);
/* Calculate delay from actual PPS to last_time */
- delta.tv_nsec =
- ptp->timeset[last_good].nanoseconds +
+ delta = ktime_to_timespec(mc_time);
+ delta.tv_nsec +=
last_time->ts_real.tv_nsec -
(ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
@@ -553,6 +837,11 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
loops++;
}
+ if (loops <= 1)
+ ++ptp->fast_syncs;
+ if (!time_before(jiffies, timeout))
+ ++ptp->sync_timeouts;
+
if (ACCESS_ONCE(*start))
efx_ptp_send_times(efx, &last_time);
@@ -561,9 +850,20 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
synch_buf, sizeof(synch_buf),
&response_length);
- if (rc == 0)
+ if (rc == 0) {
rc = efx_ptp_process_times(efx, synch_buf, response_length,
&last_time);
+ if (rc == 0)
+ ++ptp->good_syncs;
+ else
+ ++ptp->no_time_syncs;
+ }
+
+ /* Increment the bad syncs counter if the synchronize fails, whatever
+ * the reason.
+ */
+ if (rc != 0)
+ ++ptp->bad_syncs;
return rc;
}
@@ -602,9 +902,10 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
goto fail;
memset(&timestamps, 0, sizeof(timestamps));
- timestamps.hwtstamp = ktime_set(
- MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS),
- MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS));
+ timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
+ ptp_data->ts_corrections.tx);
skb_tstamp_tx(skb, &timestamps);
@@ -622,6 +923,9 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
struct list_head *cursor;
struct list_head *next;
+ if (ptp->rx_ts_inline)
+ return;
+
/* Drop time-expired events */
spin_lock_bh(&ptp->evt_lock);
if (!list_empty(&ptp->evt_list)) {
@@ -655,6 +959,8 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
struct efx_ptp_match *match;
enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
+ WARN_ON_ONCE(ptp->rx_ts_inline);
+
spin_lock_bh(&ptp->evt_lock);
evts_waiting = !list_empty(&ptp->evt_list);
spin_unlock_bh(&ptp->evt_lock);
@@ -696,13 +1002,10 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
/* Process any queued receive events and corresponding packets
*
* q is returned with all the packets that are ready for delivery.
- * true is returned if at least one of those packets requires
- * synchronisation.
*/
-static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- bool rc = false;
struct sk_buff *skb;
while ((skb = skb_dequeue(&ptp->rxq))) {
@@ -713,13 +1016,10 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
__skb_queue_tail(q, skb);
} else if (efx_ptp_match_rx(efx, skb) ==
PTP_PACKET_STATE_MATCHED) {
- rc = true;
__skb_queue_tail(q, skb);
} else if (time_after(jiffies, match->expiry)) {
match->state = PTP_PACKET_STATE_TIMED_OUT;
- if (net_ratelimit())
- netif_warn(efx, rx_err, efx->net_dev,
- "PTP packet - no timestamp seen\n");
+ ++ptp->rx_no_timestamp;
__skb_queue_tail(q, skb);
} else {
/* Replace unprocessed entry and stop */
@@ -727,8 +1027,6 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
break;
}
}
-
- return rc;
}
/* Complete processing of a received packet */
@@ -739,13 +1037,27 @@ static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
local_bh_enable();
}
-static int efx_ptp_start(struct efx_nic *efx)
+static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ if (ptp->rxfilter_installed) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_general);
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ ptp->rxfilter_installed = false;
+ }
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_filter_spec rxfilter;
int rc;
- ptp->reset_required = false;
+ if (!ptp->channel || ptp->rxfilter_installed)
+ return 0;
/* Must filter on both event and general ports to ensure
* that there is no packet re-ordering.
@@ -778,23 +1090,37 @@ static int efx_ptp_start(struct efx_nic *efx)
goto fail;
ptp->rxfilter_general = rc;
+ ptp->rxfilter_installed = true;
+ return 0;
+
+fail:
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ return rc;
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+
+ ptp->reset_required = false;
+
+ rc = efx_ptp_insert_multicast_filters(efx);
+ if (rc)
+ return rc;
+
rc = efx_ptp_enable(efx);
if (rc != 0)
- goto fail2;
+ goto fail;
ptp->evt_frag_idx = 0;
ptp->current_adjfreq = 0;
- ptp->rxfilter_installed = true;
return 0;
-fail2:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
fail:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
-
+ efx_ptp_remove_multicast_filters(efx);
return rc;
}
@@ -810,13 +1136,7 @@ static int efx_ptp_stop(struct efx_nic *efx)
rc = efx_ptp_disable(efx);
- if (ptp->rxfilter_installed) {
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
- ptp->rxfilter_installed = false;
- }
+ efx_ptp_remove_multicast_filters(efx);
/* Make sure RX packets are really delivered */
efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
@@ -844,7 +1164,7 @@ static void efx_ptp_pps_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp =
container_of(work, struct efx_ptp_data, pps_work);
- struct efx_nic *efx = ptp->channel->efx;
+ struct efx_nic *efx = ptp->efx;
struct ptp_clock_event ptp_evt;
if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
@@ -855,13 +1175,11 @@ static void efx_ptp_pps_worker(struct work_struct *work)
ptp_clock_event(ptp->phc_clock, &ptp_evt);
}
-/* Process any pending transmissions and timestamp any received packets.
- */
static void efx_ptp_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp_data =
container_of(work, struct efx_ptp_data, work);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
struct sk_buff *skb;
struct sk_buff_head tempq;
@@ -874,42 +1192,50 @@ static void efx_ptp_worker(struct work_struct *work)
efx_ptp_drop_time_expired_events(efx);
__skb_queue_head_init(&tempq);
- if (efx_ptp_process_events(efx, &tempq) ||
- !skb_queue_empty(&ptp_data->txq)) {
+ efx_ptp_process_events(efx, &tempq);
- while ((skb = skb_dequeue(&ptp_data->txq)))
- efx_ptp_xmit_skb(efx, skb);
- }
+ while ((skb = skb_dequeue(&ptp_data->txq)))
+ efx_ptp_xmit_skb(efx, skb);
while ((skb = __skb_dequeue(&tempq)))
efx_ptp_process_rx(efx, skb);
}
-/* Initialise PTP channel and state.
- *
- * Setting core_index to zero causes the queue to be initialised and doesn't
- * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
- */
-static int efx_ptp_probe_channel(struct efx_channel *channel)
+static const struct ptp_clock_info efx_phc_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "sfc",
+ .max_adj = MAX_PPB,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 1,
+ .adjfreq = efx_phc_adjfreq,
+ .adjtime = efx_phc_adjtime,
+ .gettime = efx_phc_gettime,
+ .settime = efx_phc_settime,
+ .enable = efx_phc_enable,
+};
+
+/* Initialise PTP state. */
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
{
- struct efx_nic *efx = channel->efx;
struct efx_ptp_data *ptp;
int rc = 0;
unsigned int pos;
- channel->irq_moderation = 0;
- channel->rx_queue.core_index = 0;
-
ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
efx->ptp_data = ptp;
if (!efx->ptp_data)
return -ENOMEM;
+ ptp->efx = efx;
+ ptp->channel = channel;
+ ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+
rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
if (rc != 0)
goto fail1;
- ptp->channel = channel;
skb_queue_head_init(&ptp->rxq);
skb_queue_head_init(&ptp->txq);
ptp->workwq = create_singlethread_workqueue("sfc_ptp");
@@ -929,33 +1255,32 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
ptp->evt_overflow = false;
- ptp->phc_clock_info.owner = THIS_MODULE;
- snprintf(ptp->phc_clock_info.name,
- sizeof(ptp->phc_clock_info.name),
- "%pm", efx->net_dev->perm_addr);
- ptp->phc_clock_info.max_adj = MAX_PPB;
- ptp->phc_clock_info.n_alarm = 0;
- ptp->phc_clock_info.n_ext_ts = 0;
- ptp->phc_clock_info.n_per_out = 0;
- ptp->phc_clock_info.pps = 1;
- ptp->phc_clock_info.adjfreq = efx_phc_adjfreq;
- ptp->phc_clock_info.adjtime = efx_phc_adjtime;
- ptp->phc_clock_info.gettime = efx_phc_gettime;
- ptp->phc_clock_info.settime = efx_phc_settime;
- ptp->phc_clock_info.enable = efx_phc_enable;
-
- ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
- &efx->pci_dev->dev);
- if (IS_ERR(ptp->phc_clock)) {
- rc = PTR_ERR(ptp->phc_clock);
+ /* Get the NIC PTP attributes and set up time conversions */
+ rc = efx_ptp_get_attributes(efx);
+ if (rc < 0)
goto fail3;
- }
- INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
- ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
- if (!ptp->pps_workwq) {
- rc = -ENOMEM;
- goto fail4;
+ /* Get the timestamp corrections */
+ rc = efx_ptp_get_timestamp_corrections(efx);
+ if (rc < 0)
+ goto fail3;
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) {
+ ptp->phc_clock_info = efx_phc_clock_info;
+ ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+ &efx->pci_dev->dev);
+ if (IS_ERR(ptp->phc_clock)) {
+ rc = PTR_ERR(ptp->phc_clock);
+ goto fail3;
+ }
+
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
}
ptp->nic_ts_enabled = false;
@@ -976,14 +1301,27 @@ fail1:
return rc;
}
-static void efx_ptp_remove_channel(struct efx_channel *channel)
+/* Initialise PTP channel.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ channel->irq_moderation = 0;
+ channel->rx_queue.core_index = 0;
+
+ return efx_ptp_probe(efx, channel);
+}
+
+void efx_ptp_remove(struct efx_nic *efx)
+{
if (!efx->ptp_data)
return;
- (void)efx_ptp_disable(channel->efx);
+ (void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work);
cancel_work_sync(&efx->ptp_data->pps_work);
@@ -991,15 +1329,22 @@ static void efx_ptp_remove_channel(struct efx_channel *channel)
skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq);
- ptp_clock_unregister(efx->ptp_data->phc_clock);
+ if (efx->ptp_data->phc_clock) {
+ destroy_workqueue(efx->ptp_data->pps_workwq);
+ ptp_clock_unregister(efx->ptp_data->phc_clock);
+ }
destroy_workqueue(efx->ptp_data->workwq);
- destroy_workqueue(efx->ptp_data->pps_workwq);
efx_nic_free_buffer(efx, &efx->ptp_data->start);
kfree(efx->ptp_data);
}
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+ efx_ptp_remove(channel->efx);
+}
+
static void efx_ptp_get_channel_name(struct efx_channel *channel,
char *buf, size_t len)
{
@@ -1080,14 +1425,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
/* Does this packet require timestamping? */
if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
- struct skb_shared_hwtstamps *timestamps;
-
match->state = PTP_PACKET_STATE_UNMATCHED;
- /* Clear all timestamps held: filled in later */
- timestamps = skb_hwtstamps(skb);
- memset(timestamps, 0, sizeof(*timestamps));
-
/* We expect the sequence number to be in the same position in
* the packet for PTP V1 and V2
*/
@@ -1132,8 +1471,13 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
- unsigned int new_mode)
+int efx_ptp_get_mode(struct efx_nic *efx)
+{
+ return efx->ptp_data->mode;
+}
+
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode)
{
if ((enable_wanted != efx->ptp_data->enabled) ||
(enable_wanted && (efx->ptp_data->mode != new_mode))) {
@@ -1177,8 +1521,6 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
{
- bool enable_wanted = false;
- unsigned int new_mode;
int rc;
if (init->flags)
@@ -1188,63 +1530,20 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
(init->tx_type != HWTSTAMP_TX_ON))
return -ERANGE;
- new_mode = efx->ptp_data->mode;
- /* Determine whether any PTP HW operations are required */
- switch (init->rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V1;
- enable_wanted = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- /* Although these three are accepted only IPV4 packets will be
- * timestamped
- */
- init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
- enable_wanted = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- /* Non-IP + IPv6 timestamping not supported */
- return -ERANGE;
- break;
- default:
- return -ERANGE;
- }
-
- if (init->tx_type != HWTSTAMP_TX_OFF)
- enable_wanted = true;
-
- /* Old versions of the firmware do not support the improved
- * UUID filtering option (SF bug 33070). If the firmware does
- * not accept the enhanced mode, fall back to the standard PTP
- * v2 UUID filtering.
- */
- rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
- if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
- rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
- if (rc != 0)
+ rc = efx->type->ptp_set_ts_config(efx, init);
+ if (rc)
return rc;
efx->ptp_data->config = *init;
-
return 0;
}
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
{
struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_nic *primary = efx->primary;
+
+ ASSERT_RTNL();
if (!ptp)
return;
@@ -1252,18 +1551,14 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
- ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
+ if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
+ ts_info->phc_index =
+ ptp_clock_index(primary->ptp_data->phc_clock);
ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
- ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+ ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
}
-int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
{
struct hwtstamp_config config;
int rc;
@@ -1283,6 +1578,15 @@ int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
? -EFAULT : 0;
}
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+{
+ if (!efx->ptp_data)
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
+ sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+}
+
static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
{
struct efx_ptp_data *ptp = efx->ptp_data;
@@ -1302,6 +1606,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
{
struct efx_ptp_event_rx *evt = NULL;
+ if (WARN_ON_ONCE(ptp->rx_ts_inline))
+ return;
+
if (ptp->evt_frag_idx != 3) {
ptp_event_failure(efx, 3);
return;
@@ -1320,9 +1627,10 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
MCDI_EVENT_SRC) << 8) |
(EFX_QWORD_FIELD(ptp->evt_frags[0],
MCDI_EVENT_SRC) << 16));
- evt->hwtimestamp = ktime_set(
+ evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
- EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA));
+ EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
+ ptp->ts_corrections.rx);
evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
list_add_tail(&evt->link, &ptp->evt_list);
@@ -1360,6 +1668,13 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
struct efx_ptp_data *ptp = efx->ptp_data;
int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+ if (!ptp) {
+ if (net_ratelimit())
+ netif_warn(efx, drv, efx->net_dev,
+ "Received PTP event but PTP not set up\n");
+ return;
+ }
+
if (!ptp->enabled)
return;
@@ -1397,12 +1712,99 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
}
}
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
+{
+ channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
+ channel->sync_timestamp_minor =
+ MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19;
+ /* if sync events have been disabled then we want to silently ignore
+ * this event, so throw away result.
+ */
+ (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID);
+}
+
+/* make some assumptions about the time representation rather than abstract it,
+ * since we currently only support one type of inline timestamping and only on
+ * EF10.
+ */
+#define MINOR_TICKS_PER_SECOND 0x8000000
+/* Fuzz factor for sync events to be out of order with RX events */
+#define FUZZ (MINOR_TICKS_PER_SECOND / 10)
+#define EXPECTED_SYNC_EVENTS_PER_SECOND 4
+
+static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_ts_offset;
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ struct efx_nic *efx = channel->efx;
+ u32 pkt_timestamp_major, pkt_timestamp_minor;
+ u32 diff, carry;
+ struct skb_shared_hwtstamps *timestamps;
+
+ pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx,
+ skb_mac_header(skb)) +
+ (u32) efx->ptp_data->ts_corrections.rx) &
+ (MINOR_TICKS_PER_SECOND - 1);
+
+ /* get the difference between the packet and sync timestamps,
+ * modulo one second
+ */
+ diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) &
+ (MINOR_TICKS_PER_SECOND - 1);
+ /* do we roll over a second boundary and need to carry the one? */
+ carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ?
+ 1 : 0;
+
+ if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND +
+ FUZZ) {
+ /* packet is ahead of the sync event by a quarter of a second or
+ * less (allowing for fuzz)
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major + carry;
+ } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) {
+ /* packet is behind the sync event but within the fuzz factor.
+ * This means the RX packet and sync event crossed as they were
+ * placed on the event queue, which can sometimes happen.
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry;
+ } else {
+ /* it's outside tolerance in both directions. this might be
+ * indicative of us missing sync events for some reason, so
+ * we'll call it an error rather than risk giving a bogus
+ * timestamp.
+ */
+ netif_vdbg(efx, drv, efx->net_dev,
+ "packet timestamp %x too far from sync event %x:%x\n",
+ pkt_timestamp_minor, channel->sync_timestamp_major,
+ channel->sync_timestamp_minor);
+ return;
+ }
+
+ /* attach the timestamps to the skb */
+ timestamps = skb_hwtstamps(skb);
+ timestamps->hwtstamp =
+ efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor);
+}
+
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
s64 adjustment_ns;
int rc;
@@ -1432,18 +1834,20 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
+ u32 nic_major, nic_minor;
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
- struct timespec delta_ts = ns_to_timespec(delta);
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
+ efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor);
+
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
@@ -1453,10 +1857,11 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
struct efx_ptp_data *ptp_data = container_of(ptp,
struct efx_ptp_data,
phc_clock_info);
- struct efx_nic *efx = ptp_data->channel->efx;
+ struct efx_nic *efx = ptp_data->efx;
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
int rc;
+ ktime_t kt;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
@@ -1466,8 +1871,10 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
if (rc != 0)
return rc;
- ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS);
- ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS);
+ kt = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR),
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0);
+ *ts = ktime_to_timespec(kt);
return 0;
}
@@ -1519,7 +1926,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
.keep_eventq = false,
};
-void efx_ptp_probe(struct efx_nic *efx)
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx)
{
/* Check whether PTP is implemented on this NIC. The DISABLE
* operation will succeed if and only if it is implemented.
@@ -1533,9 +1940,15 @@ void efx_ptp_start_datapath(struct efx_nic *efx)
{
if (efx_ptp_restart(efx))
netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
+ /* re-enable timestamping if it was previously enabled */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, true, true);
}
void efx_ptp_stop_datapath(struct efx_nic *efx)
{
+ /* temporarily disable timestamping */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, false, true);
efx_ptp_stop(efx);
}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 42488df1f4ec..48588ddf81b0 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -149,7 +149,7 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
* 0 on success. If a single page can be used for multiple buffers,
* then the page will either be inserted fully, or not at all.
*/
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
@@ -163,7 +163,8 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
do {
page = efx_reuse_page(rx_queue);
if (page == NULL) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
@@ -321,7 +322,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
* this means this function must run from the NAPI handler, or be called
* when NAPI is disabled.
*/
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int fill_level, batch_size;
@@ -354,7 +355,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
do {
- rc = efx_init_rx_buffers(rx_queue);
+ rc = efx_init_rx_buffers(rx_queue, atomic);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count)
@@ -439,7 +440,8 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(efx, eh);
+ skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
+ PKT_HASH_TYPE_L3);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
@@ -475,14 +477,18 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
struct sk_buff *skb;
/* Allocate an SKB to store the headers */
- skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
+ skb = netdev_alloc_skb(efx->net_dev,
+ efx->rx_ip_align + efx->rx_prefix_size +
+ hdr_len);
if (unlikely(skb == NULL))
return NULL;
EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
- skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
- memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
+ memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
+ efx->rx_prefix_size + hdr_len);
+ skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
+ __skb_put(skb, hdr_len);
/* Append the remaining page(s) onto the frag list */
if (rx_buf->len > hdr_len) {
@@ -619,6 +625,8 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ efx_rx_skb_attach_timestamp(channel, skb);
+
if (channel->type->receive_skb)
if (channel->type->receive_skb(channel, skb))
return;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 144bbff5a4ae..26641817a9c7 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
return rc_reset;
}
- if ((tests->registers < 0) && !rc_test)
+ if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
rc_test = -EIO;
}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index a2f4a06ffa4e..009dbe88f3be 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -38,6 +38,7 @@ struct efx_self_tests {
int eventq_dma[EFX_MAX_CHANNELS];
int eventq_int[EFX_MAX_CHANNELS];
/* offline tests */
+ int memory;
int registers;
int phy_ext[EFX_MAX_PHY_TESTS];
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index d034bcd124ef..23f3a6f7737a 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -118,6 +118,54 @@ out:
/**************************************************************************
*
+ * PTP
+ *
+ **************************************************************************
+ */
+
+static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time),
+ FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+}
+
+static int siena_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF,
+ efx_ptp_get_mode(efx));
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1);
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ rc = efx_ptp_change_mode(efx, true,
+ MC_CMD_PTP_MODE_V2_ENHANCED);
+ /* bug 33070 - old versions of the firmware do not support the
+ * improved UUID filtering option. Similarly old versions of the
+ * application do not expect it to be enabled. If the firmware
+ * does not accept the enhanced mode, fall back to the standard
+ * PTP v2 UUID filtering. */
+ if (rc != 0)
+ rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
+/**************************************************************************
+ *
* Device reset
*
**************************************************************************
@@ -259,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
efx_sriov_probe(efx);
- efx_ptp_probe(efx);
+ efx_ptp_defer_probe_with_channel(efx);
return 0;
@@ -273,6 +321,31 @@ fail1:
return rc;
}
+static void siena_rx_push_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ /* Enable IPv6 RSS */
+ BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
+ 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+ memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
+ efx_farch_rx_push_indir_table(efx);
+}
+
/* This call performs hardware-specific global initialisation, such as
* defining the descriptor cache sizes and number of RSS channels.
* It does not set up any buffers, descriptor rings or event queues.
@@ -313,23 +386,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
- /* Set hash key for IPv4 */
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
-
- /* Enable IPv6 RSS */
- BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
- 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
- FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
- memcpy(&temp, efx->rx_hash_key, sizeof(temp));
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
- memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
- EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
- FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
- memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
- FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
- efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+ siena_rx_push_rss_config(efx);
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -458,6 +515,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx,
+ &stats[SIENA_STAT_rx_nodesc_drop_cnt]);
efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
stats[SIENA_STAT_tx_bytes] -
stats[SIENA_STAT_tx_bad_bytes]);
@@ -837,19 +896,6 @@ fail:
/**************************************************************************
*
- * PTP
- *
- **************************************************************************
- */
-
-static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
-{
- _efx_writed(efx, cpu_to_le32(host_time),
- FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
-}
-
-/**************************************************************************
- *
* Revision-dependent attributes used by efx.c and nic.c
*
**************************************************************************
@@ -878,6 +924,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
.start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
@@ -902,7 +949,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_push_rss_config = siena_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -939,6 +986,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.mtd_sync = efx_mcdi_mtd_sync,
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
+ .ptp_set_ts_config = siena_ptp_set_ts_config,
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
@@ -957,4 +1005,11 @@ const struct efx_nic_type siena_a0_nic_type = {
NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+ .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
};
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c49d1fb16965..75d11fa4eb0a 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -429,7 +429,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
}
/* Transfer ownership of the skb to the final buffer */
+#ifdef EFX_USE_PIO
finish_packet:
+#endif
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index ffa78432164d..7984ad05357d 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -30,7 +30,6 @@
#define IOC3_NAME "ioc3-eth"
#define IOC3_VERSION "2.6.3-4"
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/mm.h>
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 513ed8b1ba58..5564a5fa3385 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -10,7 +10,6 @@
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 975dc2d8e548..ff57a46388ee 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -576,7 +576,6 @@ err_unmap_tx:
err_out_unmap:
pci_iounmap(pci_dev, ioaddr);
err_out_cleardev:
- pci_set_drvdata(pci_dev, NULL);
pci_release_regions(pci_dev);
err_out:
free_netdev(net_dev);
@@ -2427,7 +2426,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
pci_iounmap(pci_dev, sis_priv->ioaddr);
free_netdev(net_dev);
pci_release_regions(pci_dev);
- pci_set_drvdata(pci_dev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 0f096a890059..c50fb08c9905 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -17,8 +17,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Arguments:
* watchdog = TX watchdog timeout
@@ -55,7 +54,6 @@ static const char version[] =
)
#endif
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index 9965da39281b..04b35f55df97 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -15,8 +15,7 @@
. GNU General Public License for more details.
.
. You should have received a copy of the GNU General Public License
- . along with this program; if not, write to the Free Software
- . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ . along with this program; if not, see <http://www.gnu.org/licenses/>.
.
. Information contained in this file was obtained from the LAN9118
. manual from SMC. To get a copy, if you really want one, you can find
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 8ef70d9c20c1..c7a4868571f9 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 8bf29eb4a5a0..839c0e6cca01 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -19,8 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Arguments:
* io = for the base address
@@ -66,7 +65,6 @@ static const char version[] =
#endif
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -1895,7 +1893,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
SMC_SELECT_BANK(lp, 1);
val = SMC_GET_BASE(lp);
val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
- if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
+ if (((unsigned long)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n",
CARDNAME, ioaddr, val);
}
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 749654b976bc..47dce918eb0f 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -18,8 +18,7 @@
. GNU General Public License for more details.
.
. You should have received a copy of the GNU General Public License
- . along with this program; if not, write to the Free Software
- . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ . along with this program; if not, see <http://www.gnu.org/licenses/>.
.
. Information contained in this file was obtained from the LAN91C111
. manual from SMC. To get a copy, if you really want one, you can find
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8564f23a6796..6382b7c416f4 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
* Rewritten, heavily based on smsc911x simple driver by SMSC.
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 9ad5e5d39a03..23953957fed8 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************/
#ifndef __SMSC911X_H__
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index f433d97aa097..d3b967aff9e0 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
*/
@@ -1541,7 +1540,7 @@ static int smsc9420_resume(struct pci_dev *pdev)
pci_set_master(pdev);
- err = pci_enable_wake(pdev, 0, 0);
+ err = pci_enable_wake(pdev, PCI_D0, 0);
if (err)
netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n",
err);
diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h
index e441402f77a2..c63c76381af6 100644
--- a/drivers/net/ethernet/smsc/smsc9420.h
+++ b/drivers/net/ethernet/smsc/smsc9420.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
***************************************************************************
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 6e52c0f74cd9..f2d7c702c77f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -5,6 +5,7 @@ config STMMAC_ETH
select PHYLIB
select CRC32
select PTP_1588_CLOCK
+ select RESET_CONTROLLER
---help---
This is the driver for the Ethernet IPs are built around a
Synopsys IP Core and only tested on the STMicroelectronics
@@ -25,6 +26,28 @@ config STMMAC_PLATFORM
If unsure, say N.
+config DWMAC_SUNXI
+ bool "Allwinner GMAC support"
+ depends on STMMAC_PLATFORM && ARCH_SUNXI
+ default y
+ ---help---
+ Support for Allwinner A20/A31 GMAC ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for A20/A31
+ GMAC ethernet controller.
+
+config DWMAC_STI
+ bool "STi GMAC support"
+ depends on STMMAC_PLATFORM && ARCH_STI
+ default y
+ ---help---
+ Support for ethernet controller on STi SOCs.
+
+ This selects STi SoC glue layer support for the stmmac
+ device driver. This driver is used on for the STi series
+ SOCs GMAC ethernet controller.
+
config STMMAC_PCI
bool "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 356a9dd32be7..dcef28775dad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,6 +1,8 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
+stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index d234ab540b29..c553f6b5a913 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -51,6 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
while (len != 0) {
+ priv->tx_skbuff[entry] = NULL;
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -62,7 +63,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
len -= bmax;
i++;
} else {
@@ -73,7 +73,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
len = 0;
}
}
@@ -152,7 +151,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
sizeof(struct dma_desc)));
}
-const struct stmmac_chain_mode_ops chain_mode_ops = {
+const struct stmmac_mode_ops chain_mode_ops = {
.init = stmmac_init_dma_chain,
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index fc94f202a43e..74610f3aca9e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -29,7 +29,6 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/module.h>
-#include <linux/init.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
#include <linux/if_vlan.h>
@@ -293,6 +292,8 @@ struct dma_features {
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
+#define JUMBO_LEN 9000
+
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
@@ -369,7 +370,7 @@ struct stmmac_dma_ops {
struct stmmac_ops {
/* MAC core initialization */
- void (*core_init) (void __iomem *ioaddr);
+ void (*core_init) (void __iomem *ioaddr, int mtu);
/* Enable and verify that the IPC module is supported */
int (*rx_ipc) (void __iomem *ioaddr);
/* Dump MAC registers */
@@ -418,20 +419,13 @@ struct mii_regs {
unsigned int data; /* MII Data */
};
-struct stmmac_ring_mode_ops {
- unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
- unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
- void (*refill_desc3) (void *priv, struct dma_desc *p);
- void (*init_desc3) (struct dma_desc *p);
- void (*clean_desc3) (void *priv, struct dma_desc *p);
- int (*set_16kib_bfsize) (int mtu);
-};
-
-struct stmmac_chain_mode_ops {
+struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ int (*set_16kib_bfsize)(int mtu);
+ void (*init_desc3)(struct dma_desc *p);
void (*refill_desc3) (void *priv, struct dma_desc *p);
void (*clean_desc3) (void *priv, struct dma_desc *p);
};
@@ -440,8 +434,7 @@ struct mac_device_info {
const struct stmmac_ops *mac;
const struct stmmac_desc_ops *desc;
const struct stmmac_dma_ops *dma;
- const struct stmmac_ring_mode_ops *ring;
- const struct stmmac_chain_mode_ops *chain;
+ const struct stmmac_mode_ops *mode;
const struct stmmac_hwtimestamp *ptp;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
@@ -459,7 +452,7 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
-extern const struct stmmac_ring_mode_ops ring_mode_ops;
-extern const struct stmmac_chain_mode_ops chain_mode_ops;
+extern const struct stmmac_mode_ops ring_mode_ops;
+extern const struct stmmac_mode_ops chain_mode_ops;
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
new file mode 100644
index 000000000000..552bbc17863c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -0,0 +1,330 @@
+/**
+ * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
+ *
+ * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+#include <linux/phy.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+
+/**
+ * STi GMAC glue logic.
+ * --------------------
+ *
+ * _
+ * | \
+ * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK
+ * phyclk | |___________________________________________
+ * | | | (phyclk-in)
+ * --------|1 / |
+ * int-clk |_ / |
+ * | _
+ * | | \
+ * |_______|1 \ ETH_SEL_TX_RETIME_CLK
+ * | |___________________________
+ * | | (tx-retime-clk)
+ * _______|0 /
+ * | |_ /
+ * _ |
+ * | \ |
+ * --------|0 \ |
+ * clk_125 | |__|
+ * | | ETH_SEL_TXCLK_NOT_CLK125
+ * --------|1 /
+ * txclk |_ /
+ *
+ *
+ * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can
+ * generate 50MHz clock or MAC can generate it.
+ * This bit is configured by "st,ext-phyclk" property.
+ *
+ * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz
+ * clock either comes from clk-125 pin or txclk pin. This configuration is
+ * totally driven by the board wiring. This bit is configured by
+ * "st,tx-retime-src" property.
+ *
+ * TXCLK configuration is different for different phy interface modes
+ * and changes according to link speed in modes like RGMII.
+ *
+ * Below table summarizes the clock requirement and clock sources for
+ * supported phy interface modes with link speeds.
+ * ________________________________________________
+ *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link |
+ * ------------------------------------------------
+ *| MII | n/a | 25Mhz |
+ *| | | txclk |
+ * ------------------------------------------------
+ *| GMII | 125Mhz | 25Mhz |
+ *| | clk-125/txclk | txclk |
+ * ------------------------------------------------
+ *| RGMII | 125Mhz | 25Mhz |
+ *| | clk-125/txclk | clkgen |
+ * ------------------------------------------------
+ *| RMII | n/a | 25Mhz |
+ *| | |clkgen/phyclk-in |
+ * ------------------------------------------------
+ *
+ * TX lines are always retimed with a clk, which can vary depending
+ * on the board configuration. Below is the table of these bits
+ * in eth configuration register depending on source of retime clk.
+ *
+ *---------------------------------------------------------------
+ * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125|
+ *---------------------------------------------------------------
+ * txclk | 0 | n/a | 1 |
+ *---------------------------------------------------------------
+ * ck_125| 0 | n/a | 0 |
+ *---------------------------------------------------------------
+ * phyclk| 1 | 0 | n/a |
+ *---------------------------------------------------------------
+ * clkgen| 1 | 1 | n/a |
+ *---------------------------------------------------------------
+ */
+
+ /* Register definition */
+
+ /* 3 bits [8:6]
+ * [6:6] ETH_SEL_TXCLK_NOT_CLK125
+ * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK
+ * [8:8] ETH_SEL_TX_RETIME_CLK
+ *
+ */
+
+#define TX_RETIME_SRC_MASK GENMASK(8, 6)
+#define ETH_SEL_TX_RETIME_CLK BIT(8)
+#define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
+#define ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
+
+#define ENMII_MASK GENMASK(5, 5)
+#define ENMII BIT(5)
+
+/**
+ * 3 bits [4:2]
+ * 000-GMII/MII
+ * 001-RGMII
+ * 010-SGMII
+ * 100-RMII
+*/
+#define MII_PHY_SEL_MASK GENMASK(4, 2)
+#define ETH_PHY_SEL_RMII BIT(4)
+#define ETH_PHY_SEL_SGMII BIT(3)
+#define ETH_PHY_SEL_RGMII BIT(2)
+#define ETH_PHY_SEL_GMII 0x0
+#define ETH_PHY_SEL_MII 0x0
+
+#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
+ iface == PHY_INTERFACE_MODE_RGMII_ID || \
+ iface == PHY_INTERFACE_MODE_RGMII_RXID || \
+ iface == PHY_INTERFACE_MODE_RGMII_TXID)
+
+#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
+ iface == PHY_INTERFACE_MODE_GMII)
+
+struct sti_dwmac {
+ int interface;
+ bool ext_phyclk;
+ bool is_tx_retime_src_clk_125;
+ struct clk *clk;
+ int reg;
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+static u32 phy_intf_sels[] = {
+ [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
+ [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
+ [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
+ [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
+ [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
+ [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
+};
+
+enum {
+ TX_RETIME_SRC_NA = 0,
+ TX_RETIME_SRC_TXCLK = 1,
+ TX_RETIME_SRC_CLK_125,
+ TX_RETIME_SRC_PHYCLK,
+ TX_RETIME_SRC_CLKGEN,
+};
+
+static const char *const tx_retime_srcs[] = {
+ [TX_RETIME_SRC_NA] = "",
+ [TX_RETIME_SRC_TXCLK] = "txclk",
+ [TX_RETIME_SRC_CLK_125] = "clk_125",
+ [TX_RETIME_SRC_PHYCLK] = "phyclk",
+ [TX_RETIME_SRC_CLKGEN] = "clkgen",
+};
+
+static u32 tx_retime_val[] = {
+ [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125,
+ [TX_RETIME_SRC_CLK_125] = 0x0,
+ [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK,
+ [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK |
+ ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
+};
+
+static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd)
+{
+ u32 src = 0, freq = 0;
+
+ if (spd == SPEED_100) {
+ if (dwmac->interface == PHY_INTERFACE_MODE_MII ||
+ dwmac->interface == PHY_INTERFACE_MODE_GMII) {
+ src = TX_RETIME_SRC_TXCLK;
+ } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ if (dwmac->ext_phyclk) {
+ src = TX_RETIME_SRC_PHYCLK;
+ } else {
+ src = TX_RETIME_SRC_CLKGEN;
+ freq = 50000000;
+ }
+
+ } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+ src = TX_RETIME_SRC_CLKGEN;
+ freq = 25000000;
+ }
+
+ if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk)
+ clk_set_rate(dwmac->clk, freq);
+
+ } else if (spd == SPEED_1000) {
+ if (dwmac->is_tx_retime_src_clk_125)
+ src = TX_RETIME_SRC_CLK_125;
+ else
+ src = TX_RETIME_SRC_TXCLK;
+ }
+
+ regmap_update_bits(dwmac->regmap, dwmac->reg,
+ TX_RETIME_SRC_MASK, tx_retime_val[src]);
+}
+
+static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct sti_dwmac *dwmac = priv;
+
+ if (dwmac->clk)
+ clk_disable_unprepare(dwmac->clk);
+}
+
+static void sti_fix_mac_speed(void *priv, unsigned int spd)
+{
+ struct sti_dwmac *dwmac = priv;
+
+ setup_retime_src(dwmac, spd);
+
+ return;
+}
+
+static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ int err;
+
+ if (!np)
+ return -EINVAL;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
+ if (!res)
+ return -ENODATA;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ dwmac->dev = dev;
+ dwmac->interface = of_get_phy_mode(np);
+ dwmac->regmap = regmap;
+ dwmac->reg = res->start;
+ dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
+ dwmac->is_tx_retime_src_clk_125 = false;
+
+ if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
+ const char *rs;
+
+ err = of_property_read_string(np, "st,tx-retime-src", &rs);
+ if (err < 0) {
+ dev_err(dev, "st,tx-retime-src not specified\n");
+ return err;
+ }
+
+ if (!strcasecmp(rs, "clk_125"))
+ dwmac->is_tx_retime_src_clk_125 = true;
+ }
+
+ dwmac->clk = devm_clk_get(dev, "sti-ethclk");
+
+ if (IS_ERR(dwmac->clk))
+ dwmac->clk = NULL;
+
+ return 0;
+}
+
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct sti_dwmac *dwmac = priv;
+ struct regmap *regmap = dwmac->regmap;
+ int iface = dwmac->interface;
+ u32 reg = dwmac->reg;
+ u32 val, spd;
+
+ if (dwmac->clk)
+ clk_prepare_enable(dwmac->clk);
+
+ regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
+
+ val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
+ regmap_update_bits(regmap, reg, ENMII_MASK, val);
+
+ if (IS_PHY_IF_MODE_GBIT(iface))
+ spd = SPEED_1000;
+ else
+ spd = SPEED_100;
+
+ setup_retime_src(dwmac, spd);
+
+ return 0;
+}
+
+static void *sti_dwmac_setup(struct platform_device *pdev)
+{
+ struct sti_dwmac *dwmac;
+ int ret;
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sti_dwmac_parse_data(dwmac, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+ return ERR_PTR(ret);
+ }
+
+ return dwmac;
+}
+
+const struct stmmac_of_data sti_gmac_data = {
+ .fix_mac_speed = sti_fix_mac_speed,
+ .setup = sti_dwmac_setup,
+ .init = sti_dwmac_init,
+ .exit = sti_dwmac_exit,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
new file mode 100644
index 000000000000..771cd15fca18
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -0,0 +1,140 @@
+/**
+ * dwmac-sunxi.c - Allwinner sunxi DWMAC specific glue layer
+ *
+ * Copyright (C) 2013 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/stmmac.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/regulator/consumer.h>
+
+struct sunxi_priv_data {
+ int interface;
+ int clk_enabled;
+ struct clk *tx_clk;
+ struct regulator *regulator;
+};
+
+static void *sun7i_gmac_setup(struct platform_device *pdev)
+{
+ struct sunxi_priv_data *gmac;
+ struct device *dev = &pdev->dev;
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return ERR_PTR(-ENOMEM);
+
+ gmac->interface = of_get_phy_mode(dev->of_node);
+
+ gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+ if (IS_ERR(gmac->tx_clk)) {
+ dev_err(dev, "could not get tx clock\n");
+ return gmac->tx_clk;
+ }
+
+ /* Optional regulator for PHY */
+ gmac->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(gmac->regulator)) {
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+ return ERR_PTR(-EPROBE_DEFER);
+ dev_info(dev, "no regulator found\n");
+ gmac->regulator = NULL;
+ }
+
+ return gmac;
+}
+
+#define SUN7I_GMAC_GMII_RGMII_RATE 125000000
+#define SUN7I_GMAC_MII_RATE 25000000
+
+static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+ int ret;
+
+ if (gmac->regulator) {
+ ret = regulator_enable(gmac->regulator);
+ if (ret)
+ return ret;
+ }
+
+ /* Set GMAC interface port mode
+ *
+ * The GMAC TX clock lines are configured by setting the clock
+ * rate, which then uses the auto-reparenting feature of the
+ * clock driver, and enabling/disabling the clock.
+ */
+ if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
+
+ return 0;
+}
+
+static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+}
+
+static void sun7i_fix_speed(void *priv, unsigned int speed)
+{
+ struct sunxi_priv_data *gmac = priv;
+
+ /* only GMII mode requires us to reconfigure the clock lines */
+ if (gmac->interface != PHY_INTERFACE_MODE_GMII)
+ return;
+
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (speed == 1000) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
+}
+
+/* of_data specifying hardware features and callbacks.
+ * hardware features were copied from Allwinner drivers. */
+const struct stmmac_of_data sun7i_gmac_data = {
+ .has_gmac = 1,
+ .tx_coe = 1,
+ .fix_mac_speed = sun7i_fix_speed,
+ .setup = sun7i_gmac_setup,
+ .init = sun7i_gmac_init,
+ .exit = sun7i_gmac_exit,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index c12aabb8cf93..f37d90f114f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -126,11 +126,8 @@ enum power_event {
#define GMAC_ANE_PSE (3 << 7)
#define GMAC_ANE_PSE_SHIFT 7
- /* GMAC Configuration defines */
-#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
-#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
-
/* GMAC Configuration defines */
+#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */
#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
@@ -156,7 +153,7 @@ enum inter_frame_gap {
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
- GMAC_CONTROL_JE | GMAC_CONTROL_BE)
+ GMAC_CONTROL_BE)
/* GMAC Frame Filter defines */
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index cdd926832e27..b3e148ef5683 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -32,10 +32,15 @@
#include <asm/io.h>
#include "dwmac1000.h"
-static void dwmac1000_core_init(void __iomem *ioaddr)
+static void dwmac1000_core_init(void __iomem *ioaddr, int mtu)
{
u32 value = readl(ioaddr + GMAC_CONTROL);
value |= GMAC_CORE_INIT;
+ if (mtu > 1500)
+ value |= GMAC_CONTROL_2K;
+ if (mtu > 2000)
+ value |= GMAC_CONTROL_JE;
+
writel(value, ioaddr + GMAC_CONTROL);
/* Mask GMAC interrupts */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 5857d677dac1..2ff767bcfdd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -32,7 +32,7 @@
#include <asm/io.h>
#include "dwmac100.h"
-static void dwmac100_core_init(void __iomem *ioaddr)
+static void dwmac100_core_init(void __iomem *ioaddr, int mtu)
{
u32 value = readl(ioaddr + MAC_CONTROL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 1ef9d8a555aa..650a4be6bce5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -58,6 +58,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
STMMAC_RING_MODE);
wmb();
+ priv->tx_skbuff[entry] = NULL;
entry = (++priv->cur_tx) % txsize;
if (priv->extend_desc)
@@ -73,7 +74,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
STMMAC_RING_MODE);
wmb();
priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
@@ -100,10 +100,9 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
{
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
- if (unlikely(priv->plat->has_gmac))
- /* Fill DES3 in case of RING mode */
- if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
- p->des3 = p->des2 + BUF_SIZE_8KiB;
+ /* Fill DES3 in case of RING mode */
+ if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
}
/* In ring mode we need to fill the desc3 because it is used as buffer */
@@ -126,7 +125,7 @@ static int stmmac_set_16kib_bfsize(int mtu)
return ret;
}
-const struct stmmac_ring_mode_ops ring_mode_ops = {
+const struct stmmac_mode_ops ring_mode_ops = {
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 22f89ffdfd95..f9e60d7918c4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -32,6 +32,7 @@
#include <linux/pci.h>
#include "common.h"
#include <linux/ptp_clock_kernel.h>
+#include <linux/reset.h>
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
@@ -91,6 +92,7 @@ struct stmmac_priv {
int wolopts;
int wol_irq;
struct clk *stmmac_clk;
+ struct reset_control *stmmac_rst;
int clk_csr;
struct timer_list eee_ctrl_timer;
int lpi_irq;
@@ -105,21 +107,19 @@ struct stmmac_priv {
unsigned int default_addend;
u32 adv_ts;
int use_riwt;
+ int irq_wake;
spinlock_t ptp_lock;
};
-extern int phyaddr;
-
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
+int stmmac_mdio_reset(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
extern const struct stmmac_hwtimestamp stmmac_ptp;
int stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_freeze(struct net_device *ndev);
-int stmmac_restore(struct net_device *ndev);
int stmmac_resume(struct net_device *ndev);
int stmmac_suspend(struct net_device *ndev);
int stmmac_dvr_remove(struct net_device *ndev);
@@ -130,6 +130,12 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
#ifdef CONFIG_STMMAC_PLATFORM
+#ifdef CONFIG_DWMAC_SUNXI
+extern const struct stmmac_of_data sun7i_gmac_data;
+#endif
+#ifdef CONFIG_DWMAC_STI
+extern const struct stmmac_of_data sti_gmac_data;
+#endif
extern struct platform_driver stmmac_pltfr_driver;
static inline int stmmac_register_platform(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 797b56a0efc4..8543e1cfd55e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -43,6 +43,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
+#include <linux/pinctrl/consumer.h>
#ifdef CONFIG_STMMAC_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -50,9 +51,9 @@
#include <linux/net_tstamp.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
+#include <linux/reset.h>
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
-#define JUMBO_LEN 9000
/* Module parameters */
#define TX_TIMEO 5000
@@ -64,7 +65,7 @@ static int debug = -1;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
-int phyaddr = -1;
+static int phyaddr = -1;
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");
@@ -91,8 +92,8 @@ static int tc = TC_DEFAULT;
module_param(tc, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tc, "DMA threshold control value");
-#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
-static int buf_sz = DMA_BUFFER_SIZE;
+#define DEFAULT_BUFSIZE 1536
+static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
@@ -135,8 +136,8 @@ static void stmmac_verify_args(void)
dma_rxsize = DMA_RX_SIZE;
if (unlikely(dma_txsize < 0))
dma_txsize = DMA_TX_SIZE;
- if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
- buf_sz = DMA_BUFFER_SIZE;
+ if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
+ buf_sz = DEFAULT_BUFSIZE;
if (unlikely(flow_ctrl > 1))
flow_ctrl = FLOW_AUTO;
else if (likely(flow_ctrl < 0))
@@ -285,10 +286,25 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
/* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) {
+ int tx_lpi_timer = priv->tx_lpi_timer;
+
/* Check if the PHY supports EEE */
- if (phy_init_eee(priv->phydev, 1))
+ if (phy_init_eee(priv->phydev, 1)) {
+ /* To manage at run-time if the EEE cannot be supported
+ * anymore (for example because the lp caps have been
+ * changed).
+ * In that case the driver disable own timers.
+ */
+ if (priv->eee_active) {
+ pr_debug("stmmac: disable EEE\n");
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->hw->mac->set_eee_timer(priv->ioaddr, 0,
+ tx_lpi_timer);
+ }
+ priv->eee_active = 0;
goto out;
-
+ }
+ /* Activate the EEE and start timers */
if (!priv->eee_active) {
priv->eee_active = 1;
init_timer(&priv->eee_ctrl_timer);
@@ -299,13 +315,13 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
priv->hw->mac->set_eee_timer(priv->ioaddr,
STMMAC_DEFAULT_LIT_LS,
- priv->tx_lpi_timer);
+ tx_lpi_timer);
} else
/* Set HW EEE according to the speed */
priv->hw->mac->set_eee_pls(priv->ioaddr,
priv->phydev->link);
- pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
+ pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
ret = true;
}
@@ -332,7 +348,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
return;
/* exit if skb doesn't support hw tstamp */
- if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+ if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
return;
if (priv->adv_ts)
@@ -776,6 +792,7 @@ static int stmmac_init_phy(struct net_device *dev)
char phy_id_fmt[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
int interface = priv->plat->interface;
+ int max_speed = priv->plat->max_speed;
priv->oldlink = 0;
priv->speed = 0;
priv->oldduplex = -1;
@@ -800,7 +817,8 @@ static int stmmac_init_phy(struct net_device *dev)
/* Stop Advertising 1000BASE Capability if interface is not GMII */
if ((interface == PHY_INTERFACE_MODE_MII) ||
- (interface == PHY_INTERFACE_MODE_RMII))
+ (interface == PHY_INTERFACE_MODE_RMII) ||
+ (max_speed < 1000 && max_speed > 0))
phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
@@ -883,10 +901,10 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
ret = BUF_SIZE_8KiB;
else if (mtu >= BUF_SIZE_2KiB)
ret = BUF_SIZE_4KiB;
- else if (mtu >= DMA_BUFFER_SIZE)
+ else if (mtu > DEFAULT_BUFSIZE)
ret = BUF_SIZE_2KiB;
else
- ret = DMA_BUFFER_SIZE;
+ ret = DEFAULT_BUFSIZE;
return ret;
}
@@ -948,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
p->des2 = priv->rx_skbuff_dma[i];
- if ((priv->mode == STMMAC_RING_MODE) &&
+ if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
- priv->hw->ring->init_desc3(p);
+ priv->hw->mode->init_desc3(p);
return 0;
}
@@ -981,79 +999,18 @@ static int init_dma_desc_rings(struct net_device *dev)
unsigned int bfsize = 0;
int ret = -ENOMEM;
- /* Set the max buffer size according to the DESC mode
- * and the MTU. Note that RING mode allows 16KiB bsize.
- */
- if (priv->mode == STMMAC_RING_MODE)
- bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
+ if (priv->hw->mode->set_16kib_bfsize)
+ bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
+ priv->dma_buf_sz = bfsize;
+
if (netif_msg_probe(priv))
pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
txsize, rxsize, bfsize);
- if (priv->extend_desc) {
- priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_erx)
- goto err_dma;
-
- priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
- if (!priv->dma_etx) {
- dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- goto err_dma;
- }
- } else {
- priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
- sizeof(struct dma_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_rx)
- goto err_dma;
-
- priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
- sizeof(struct dma_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
- if (!priv->dma_tx) {
- dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
- goto err_dma;
- }
- }
-
- priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!priv->rx_skbuff_dma)
- goto err_rx_skbuff_dma;
-
- priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->rx_skbuff)
- goto err_rx_skbuff;
-
- priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!priv->tx_skbuff_dma)
- goto err_tx_skbuff_dma;
-
- priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->tx_skbuff)
- goto err_tx_skbuff;
-
if (netif_msg_probe(priv)) {
pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
(u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1079,21 +1036,20 @@ static int init_dma_desc_rings(struct net_device *dev)
}
priv->cur_rx = 0;
priv->dirty_rx = (unsigned int)(i - rxsize);
- priv->dma_buf_sz = bfsize;
buf_sz = bfsize;
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc) {
- priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
- rxsize, 1);
- priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
- txsize, 1);
+ priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
+ rxsize, 1);
+ priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
+ txsize, 1);
} else {
- priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
- rxsize, 0);
- priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
- txsize, 0);
+ priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
+ rxsize, 0);
+ priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
+ txsize, 0);
}
}
@@ -1121,30 +1077,6 @@ static int init_dma_desc_rings(struct net_device *dev)
err_init_rx_buffers:
while (--i >= 0)
stmmac_free_rx_buffers(priv, i);
- kfree(priv->tx_skbuff);
-err_tx_skbuff:
- kfree(priv->tx_skbuff_dma);
-err_tx_skbuff_dma:
- kfree(priv->rx_skbuff);
-err_rx_skbuff:
- kfree(priv->rx_skbuff_dma);
-err_rx_skbuff_dma:
- if (priv->extend_desc) {
- dma_free_coherent(priv->device, priv->dma_tx_size *
- sizeof(struct dma_extended_desc),
- priv->dma_etx, priv->dma_tx_phy);
- dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- } else {
- dma_free_coherent(priv->device,
- priv->dma_tx_size * sizeof(struct dma_desc),
- priv->dma_tx, priv->dma_tx_phy);
- dma_free_coherent(priv->device,
- priv->dma_rx_size * sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
- }
-err_dma:
return ret;
}
@@ -1161,25 +1093,107 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
int i;
for (i = 0; i < priv->dma_tx_size; i++) {
- if (priv->tx_skbuff[i] != NULL) {
- struct dma_desc *p;
- if (priv->extend_desc)
- p = &((priv->dma_etx + i)->basic);
- else
- p = priv->dma_tx + i;
+ struct dma_desc *p;
- if (priv->tx_skbuff_dma[i])
- dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[i],
- priv->hw->desc->get_tx_len(p),
- DMA_TO_DEVICE);
+ if (priv->extend_desc)
+ p = &((priv->dma_etx + i)->basic);
+ else
+ p = priv->dma_tx + i;
+
+ if (priv->tx_skbuff_dma[i]) {
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[i],
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[i] = 0;
+ }
+
+ if (priv->tx_skbuff[i] != NULL) {
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
- priv->tx_skbuff_dma[i] = 0;
}
}
}
+static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+{
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+ int ret = -ENOMEM;
+
+ priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!priv->rx_skbuff_dma)
+ return -ENOMEM;
+
+ priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ goto err_rx_skbuff;
+
+ priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!priv->tx_skbuff_dma)
+ goto err_tx_skbuff_dma;
+
+ priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto err_tx_skbuff;
+
+ if (priv->extend_desc) {
+ priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_erx)
+ goto err_dma;
+
+ priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_etx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ goto err_dma;
+ }
+ } else {
+ priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
+ sizeof(struct dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_rx)
+ goto err_dma;
+
+ priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
+ sizeof(struct dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_tx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ goto err_dma;
+ }
+ }
+
+ return 0;
+
+err_dma:
+ kfree(priv->tx_skbuff);
+err_tx_skbuff:
+ kfree(priv->tx_skbuff_dma);
+err_tx_skbuff_dma:
+ kfree(priv->rx_skbuff);
+err_rx_skbuff:
+ kfree(priv->rx_skbuff_dma);
+ return ret;
+}
+
static void free_dma_desc_resources(struct stmmac_priv *priv)
{
/* Release the DMA TX/RX socket buffers */
@@ -1286,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = 0;
}
- priv->hw->ring->clean_desc3(priv, p);
+ priv->hw->mode->clean_desc3(priv, p);
if (likely(skb != NULL)) {
dev_kfree_skb(skb);
@@ -1522,9 +1536,9 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
priv->dev->dev_addr, 0);
if (!is_valid_ether_addr(priv->dev->dev_addr))
eth_hw_addr_random(priv->dev);
+ pr_info("%s: device MAC address %pM\n", priv->dev->name,
+ priv->dev->dev_addr);
}
- pr_warn("%s: device MAC address %pM\n", priv->dev->name,
- priv->dev->dev_addr);
}
/**
@@ -1589,6 +1603,86 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
}
/**
+ * stmmac_hw_setup: setup mac in a usable state.
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function sets up the ip in a usable state.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_hw_setup(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = init_dma_desc_rings(dev);
+ if (ret < 0) {
+ pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ return ret;
+ }
+ /* DMA initialization and SW reset */
+ ret = stmmac_init_dma_engine(priv);
+ if (ret < 0) {
+ pr_err("%s: DMA engine initialization failed\n", __func__);
+ return ret;
+ }
+
+ /* Copy the MAC addr into the HW */
+ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
+
+ /* If required, perform hw setup of the bus. */
+ if (priv->plat->bus_setup)
+ priv->plat->bus_setup(priv->ioaddr);
+
+ /* Initialize the MAC Core */
+ priv->hw->mac->core_init(priv->ioaddr, dev->mtu);
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_set_mac(priv->ioaddr, true);
+
+ /* Set the HW DMA mode and the COE */
+ stmmac_dma_operation_mode(priv);
+
+ stmmac_mmc_setup(priv);
+
+ ret = stmmac_init_ptp(priv);
+ if (ret && ret != -EOPNOTSUPP)
+ pr_warn("%s: failed PTP initialisation\n", __func__);
+
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ ret = stmmac_init_fs(dev);
+ if (ret < 0)
+ pr_warn("%s: failed debugFS registration\n", __func__);
+#endif
+ /* Start the ball rolling... */
+ pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
+
+ /* Dump DMA/MAC registers */
+ if (netif_msg_hw(priv)) {
+ priv->hw->mac->dump_regs(priv->ioaddr);
+ priv->hw->dma->dump_regs(priv->ioaddr);
+ }
+ priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
+
+ priv->eee_enabled = stmmac_eee_init(priv);
+
+ stmmac_init_tx_coalesce(priv);
+
+ if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
+ priv->rx_riwt = MAX_DMA_RIWT;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
+ }
+
+ if (priv->pcs && priv->hw->mac->ctrl_ane)
+ priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
+
+ return 0;
+}
+
+/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
* Description:
@@ -1602,8 +1696,6 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- clk_prepare_enable(priv->stmmac_clk);
-
stmmac_check_ether_addr(priv);
if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
@@ -1616,33 +1708,29 @@ static int stmmac_open(struct net_device *dev)
}
}
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+ priv->xstats.threshold = tc;
+
/* Create and initialize the TX/RX descriptors chains. */
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
- ret = init_dma_desc_rings(dev);
+ ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
- pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ pr_err("%s: DMA descriptors allocation failed\n", __func__);
goto dma_desc_error;
}
- /* DMA initialization and SW reset */
- ret = stmmac_init_dma_engine(priv);
+ ret = stmmac_hw_setup(dev);
if (ret < 0) {
- pr_err("%s: DMA engine initialization failed\n", __func__);
+ pr_err("%s: Hw setup failed\n", __func__);
goto init_error;
}
- /* Copy the MAC addr into the HW */
- priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
-
- /* If required, perform hw setup of the bus. */
- if (priv->plat->bus_setup)
- priv->plat->bus_setup(priv->ioaddr);
-
- /* Initialize the MAC Core */
- priv->hw->mac->core_init(priv->ioaddr);
+ if (priv->phydev)
+ phy_start(priv->phydev);
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
@@ -1675,55 +1763,6 @@ static int stmmac_open(struct net_device *dev)
}
}
- /* Enable the MAC Rx/Tx */
- stmmac_set_mac(priv->ioaddr, true);
-
- /* Set the HW DMA mode and the COE */
- stmmac_dma_operation_mode(priv);
-
- /* Extra statistics */
- memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
- priv->xstats.threshold = tc;
-
- stmmac_mmc_setup(priv);
-
- ret = stmmac_init_ptp(priv);
- if (ret)
- pr_warn("%s: failed PTP initialisation\n", __func__);
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
- ret = stmmac_init_fs(dev);
- if (ret < 0)
- pr_warn("%s: failed debugFS registration\n", __func__);
-#endif
- /* Start the ball rolling... */
- pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
- priv->hw->dma->start_tx(priv->ioaddr);
- priv->hw->dma->start_rx(priv->ioaddr);
-
- /* Dump DMA/MAC registers */
- if (netif_msg_hw(priv)) {
- priv->hw->mac->dump_regs(priv->ioaddr);
- priv->hw->dma->dump_regs(priv->ioaddr);
- }
-
- if (priv->phydev)
- phy_start(priv->phydev);
-
- priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
-
- priv->eee_enabled = stmmac_eee_init(priv);
-
- stmmac_init_tx_coalesce(priv);
-
- if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
- priv->rx_riwt = MAX_DMA_RIWT;
- priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
- }
-
- if (priv->pcs && priv->hw->mac->ctrl_ane)
- priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
-
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1794,7 +1833,6 @@ static int stmmac_release(struct net_device *dev)
#ifdef CONFIG_STMMAC_DEBUG_FS
stmmac_exit_fs();
#endif
- clk_disable_unprepare(priv->stmmac_clk);
stmmac_release_ptp(priv);
@@ -1818,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int enh_desc = priv->plat->enh_desc;
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
if (!netif_queue_stopped(dev)) {
@@ -1844,35 +1883,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc;
- priv->tx_skbuff[entry] = skb;
-
/* To program the descriptors according to the size of the frame */
- if (priv->mode == STMMAC_RING_MODE) {
- is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
- priv->plat->enh_desc);
- if (unlikely(is_jumbo))
- entry = priv->hw->ring->jumbo_frm(priv, skb,
- csum_insertion);
- } else {
- is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
- priv->plat->enh_desc);
- if (unlikely(is_jumbo))
- entry = priv->hw->chain->jumbo_frm(priv, skb,
- csum_insertion);
- }
+ if (enh_desc)
+ is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
+
if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
csum_insertion, priv->mode);
- } else
+ } else {
desc = first;
+ entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+ }
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
+ priv->tx_skbuff[entry] = NULL;
entry = (++priv->cur_tx) % txsize;
if (priv->extend_desc)
desc = (struct dma_desc *)(priv->dma_etx + entry);
@@ -1882,7 +1912,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = desc->des2;
- priv->tx_skbuff[entry] = NULL;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
priv->mode);
wmb();
@@ -1890,6 +1919,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
}
+ priv->tx_skbuff[entry] = skb;
+
/* Finalize the latest segment. */
priv->hw->desc->close_tx_desc(desc);
@@ -1951,6 +1982,23 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ethhdr *ehdr;
+ u16 vlanid;
+
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
+ NETIF_F_HW_VLAN_CTAG_RX &&
+ !__vlan_get_tag(skb, &vlanid)) {
+ /* pop the vlan tag */
+ ehdr = (struct ethhdr *)skb->data;
+ memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
+ }
+}
+
+
/**
* stmmac_rx_refill: refill used skb preallocated buffers
* @priv: driver private structure
@@ -1986,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
p->des2 = priv->rx_skbuff_dma[entry];
- priv->hw->ring->refill_desc3(priv, p);
+ priv->hw->mode->refill_desc3(priv, p);
if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry);
@@ -2102,6 +2150,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
print_pkt(skb->data, frame_len);
}
+ stmmac_rx_vlan(priv->dev, skb);
+
skb->protocol = eth_type_trans(skb, priv->dev);
if (unlikely(!coe))
@@ -2229,6 +2279,9 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
else
max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
+ if (priv->plat->maxmtu < max_mtu)
+ max_mtu = priv->plat->maxmtu;
+
if ((new_mtu < 46) || (new_mtu > max_mtu)) {
pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
return -EINVAL;
@@ -2276,6 +2329,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
+ if (priv->irq_wake)
+ pm_wakeup_event(priv->device, 0);
+
if (unlikely(!dev)) {
pr_err("%s: invalid dev pointer\n", __func__);
return IRQ_NONE;
@@ -2582,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
/* To use the chained or ring mode */
if (chain_mode) {
- priv->hw->chain = &chain_mode_ops;
+ priv->hw->mode = &chain_mode_ops;
pr_info(" Chain mode enabled\n");
priv->mode = STMMAC_CHAIN_MODE;
} else {
- priv->hw->ring = &ring_mode_ops;
+ priv->hw->mode = &ring_mode_ops;
pr_info(" Ring mode enabled\n");
priv->mode = STMMAC_RING_MODE;
}
@@ -2680,10 +2736,32 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
+ priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
+ if (IS_ERR(priv->stmmac_clk)) {
+ dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
+ __func__);
+ ret = PTR_ERR(priv->stmmac_clk);
+ goto error_clk_get;
+ }
+ clk_prepare_enable(priv->stmmac_clk);
+
+ priv->stmmac_rst = devm_reset_control_get(priv->device,
+ STMMAC_RESOURCE_NAME);
+ if (IS_ERR(priv->stmmac_rst)) {
+ if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto error_hw_init;
+ }
+ dev_info(priv->device, "no reset control found\n");
+ priv->stmmac_rst = NULL;
+ }
+ if (priv->stmmac_rst)
+ reset_control_deassert(priv->stmmac_rst);
+
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)
- goto error_free_netdev;
+ goto error_hw_init;
ndev->netdev_ops = &stmmac_netdev_ops;
@@ -2721,12 +2799,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
- priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
- if (IS_ERR(priv->stmmac_clk)) {
- pr_warn("%s: warning: cannot get CSR clock\n", __func__);
- goto error_clk_get;
- }
-
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -2754,15 +2826,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
return priv;
error_mdio_register:
- clk_put(priv->stmmac_clk);
-error_clk_get:
unregister_netdev(ndev);
error_netdev_register:
netif_napi_del(&priv->napi);
-error_free_netdev:
+error_hw_init:
+ clk_disable_unprepare(priv->stmmac_clk);
+error_clk_get:
free_netdev(ndev);
- return NULL;
+ return ERR_PTR(ret);
}
/**
@@ -2786,6 +2858,9 @@ int stmmac_dvr_remove(struct net_device *ndev)
stmmac_mdio_unregister(ndev);
netif_carrier_off(ndev);
unregister_netdev(ndev);
+ if (priv->stmmac_rst)
+ reset_control_assert(priv->stmmac_rst);
+ clk_disable_unprepare(priv->stmmac_clk);
free_netdev(ndev);
return 0;
@@ -2817,10 +2892,12 @@ int stmmac_suspend(struct net_device *ndev)
stmmac_clear_descriptors(priv);
/* Enable Power down mode by programming the PMT regs */
- if (device_may_wakeup(priv->device))
+ if (device_may_wakeup(priv->device)) {
priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
- else {
+ priv->irq_wake = 1;
+ } else {
stmmac_set_mac(priv->ioaddr, false);
+ pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
clk_disable_unprepare(priv->stmmac_clk);
}
@@ -2844,18 +2921,21 @@ int stmmac_resume(struct net_device *ndev)
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console).
*/
- if (device_may_wakeup(priv->device))
+ if (device_may_wakeup(priv->device)) {
priv->hw->mac->pmt(priv->ioaddr, 0);
- else
+ priv->irq_wake = 0;
+ } else {
+ pinctrl_pm_select_default_state(priv->device);
/* enable the clk prevously disabled */
clk_prepare_enable(priv->stmmac_clk);
+ /* reset the phy so that it's ready */
+ if (priv->mii)
+ stmmac_mdio_reset(priv->mii);
+ }
netif_device_attach(ndev);
- /* Enable the MAC and DMA */
- stmmac_set_mac(priv->ioaddr, true);
- priv->hw->dma->start_tx(priv->ioaddr);
- priv->hw->dma->start_rx(priv->ioaddr);
+ stmmac_hw_setup(ndev);
napi_enable(&priv->napi);
@@ -2868,22 +2948,6 @@ int stmmac_resume(struct net_device *ndev)
return 0;
}
-
-int stmmac_freeze(struct net_device *ndev)
-{
- if (!ndev || !netif_running(ndev))
- return 0;
-
- return stmmac_release(ndev);
-}
-
-int stmmac_restore(struct net_device *ndev)
-{
- if (!ndev || !netif_running(ndev))
- return 0;
-
- return stmmac_open(ndev);
-}
#endif /* CONFIG_PM */
/* Driver can be configured w/ and w/ both PCI and Platf drivers
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index fe7bc9903867..a468eb107823 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -128,7 +128,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
* @bus: points to the mii_bus structure
* Description: reset the MII bus
*/
-static int stmmac_mdio_reset(struct mii_bus *bus)
+int stmmac_mdio_reset(struct mii_bus *bus)
{
#if defined(CONFIG_STMMAC_PLATFORM)
struct net_device *ndev = bus->priv;
@@ -166,7 +166,6 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
udelay(data->delays[1]);
gpio_set_value(reset_gpio, active_low ? 1 : 0);
udelay(data->delays[2]);
- gpio_free(reset_gpio);
}
}
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 644d80ece067..291608924849 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -26,9 +26,9 @@
#include <linux/pci.h>
#include "stmmac.h"
-struct plat_stmmacenet_data plat_dat;
-struct stmmac_mdio_bus_data mdio_data;
-struct stmmac_dma_cfg dma_cfg;
+static struct plat_stmmacenet_data plat_dat;
+static struct stmmac_mdio_bus_data mdio_data;
+static struct stmmac_dma_cfg dma_cfg;
static void stmmac_default_data(void)
{
@@ -100,9 +100,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
stmmac_default_data();
priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
- if (!priv) {
+ if (IS_ERR(priv)) {
pr_err("%s: main driver probe failed", __func__);
- ret = -ENODEV;
+ ret = PTR_ERR(priv);
goto err_out;
}
priv->dev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 51c9069ef405..8fb32a80f1c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -26,8 +26,28 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/of_device.h>
#include "stmmac.h"
+static const struct of_device_id stmmac_dt_ids[] = {
+#ifdef CONFIG_DWMAC_SUNXI
+ { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+#endif
+#ifdef CONFIG_DWMAC_STI
+ { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
+ { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
+ { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data},
+#endif
+ /* SoC specific glue layers should come before generic bindings */
+ { .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.610"},
+ { .compatible = "snps,dwmac-3.70a"},
+ { .compatible = "snps,dwmac-3.710"},
+ { .compatible = "snps,dwmac"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
+
#ifdef CONFIG_OF
static int stmmac_probe_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
@@ -35,23 +55,63 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct stmmac_dma_cfg *dma_cfg;
+ const struct of_device_id *device;
if (!np)
return -ENODEV;
+ device = of_match_device(stmmac_dt_ids, &pdev->dev);
+ if (!device)
+ return -ENODEV;
+
+ if (device->data) {
+ const struct stmmac_of_data *data = device->data;
+ plat->has_gmac = data->has_gmac;
+ plat->enh_desc = data->enh_desc;
+ plat->tx_coe = data->tx_coe;
+ plat->rx_coe = data->rx_coe;
+ plat->bugged_jumbo = data->bugged_jumbo;
+ plat->pmt = data->pmt;
+ plat->riwt_off = data->riwt_off;
+ plat->fix_mac_speed = data->fix_mac_speed;
+ plat->bus_setup = data->bus_setup;
+ plat->setup = data->setup;
+ plat->free = data->free;
+ plat->init = data->init;
+ plat->exit = data->exit;
+ }
+
*mac = of_get_mac_address(np);
plat->interface = of_get_phy_mode(np);
+ /* Get max speed of operation from device tree */
+ if (of_property_read_u32(np, "max-speed", &plat->max_speed))
+ plat->max_speed = -1;
+
plat->bus_id = of_alias_get_id(np, "ethernet");
if (plat->bus_id < 0)
plat->bus_id = 0;
- of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr);
+ /* Default to phy auto-detection */
+ plat->phy_addr = -1;
+
+ /* "snps,phy-addr" is not a standard property. Mark it as deprecated
+ * and warn of its use. Remove this when phy node support is added.
+ */
+ if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
+ dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(struct stmmac_mdio_bus_data),
GFP_KERNEL);
+ plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode");
+
+ /* Set the maxmtu to a default of JUMBO_LEN in case the
+ * parameter is not present in the device tree.
+ */
+ plat->maxmtu = JUMBO_LEN;
+
/*
* Currently only the properties needed on SPEAr600
* are provided. All other properties should be added
@@ -60,6 +120,14 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
if (of_device_is_compatible(np, "st,spear600-gmac") ||
of_device_is_compatible(np, "snps,dwmac-3.70a") ||
of_device_is_compatible(np, "snps,dwmac")) {
+ /* Note that the max-frame-size parameter as defined in the
+ * ePAPR v1.1 spec is defined as max-frame-size, it's
+ * actually used as the IEEE definition of MAC Client
+ * data, or MTU. The ePAPR specification is confusing as
+ * the definition is max-frame-size, but usage examples
+ * are clearly MTUs
+ */
+ of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
plat->has_gmac = 1;
plat->pmt = 1;
}
@@ -140,17 +208,24 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
}
}
+ /* Custom setup (if needed) */
+ if (plat_dat->setup) {
+ plat_dat->bsp_priv = plat_dat->setup(pdev);
+ if (IS_ERR(plat_dat->bsp_priv))
+ return PTR_ERR(plat_dat->bsp_priv);
+ }
+
/* Custom initialisation (if needed)*/
if (plat_dat->init) {
- ret = plat_dat->init(pdev);
+ ret = plat_dat->init(pdev, plat_dat->bsp_priv);
if (unlikely(ret))
return ret;
}
priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
- if (!priv) {
+ if (IS_ERR(priv)) {
pr_err("%s: main driver probe failed", __func__);
- return -ENODEV;
+ return PTR_ERR(priv);
}
/* Get MAC address if available (DT) */
@@ -199,7 +274,10 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
int ret = stmmac_dvr_remove(ndev);
if (priv->plat->exit)
- priv->plat->exit(pdev);
+ priv->plat->exit(pdev, priv->plat->bsp_priv);
+
+ if (priv->plat->free)
+ priv->plat->free(pdev, priv->plat->bsp_priv);
return ret;
}
@@ -207,64 +285,34 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int stmmac_pltfr_suspend(struct device *dev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
-
- return stmmac_suspend(ndev);
-}
-
-static int stmmac_pltfr_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
-
- return stmmac_resume(ndev);
-}
-
-int stmmac_pltfr_freeze(struct device *dev)
-{
int ret;
- struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- ret = stmmac_freeze(ndev);
- if (plat_dat->exit)
- plat_dat->exit(pdev);
+ ret = stmmac_suspend(ndev);
+ if (priv->plat->exit)
+ priv->plat->exit(pdev, priv->plat->bsp_priv);
return ret;
}
-int stmmac_pltfr_restore(struct device *dev)
+static int stmmac_pltfr_resume(struct device *dev)
{
- struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- if (plat_dat->init)
- plat_dat->init(pdev);
+ if (priv->plat->init)
+ priv->plat->init(pdev, priv->plat->bsp_priv);
- return stmmac_restore(ndev);
+ return stmmac_resume(ndev);
}
-static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
- .suspend = stmmac_pltfr_suspend,
- .resume = stmmac_pltfr_resume,
- .freeze = stmmac_pltfr_freeze,
- .thaw = stmmac_pltfr_restore,
- .restore = stmmac_pltfr_restore,
-};
-#else
-static const struct dev_pm_ops stmmac_pltfr_pm_ops;
#endif /* CONFIG_PM */
-static const struct of_device_id stmmac_dt_ids[] = {
- { .compatible = "st,spear600-gmac"},
- { .compatible = "snps,dwmac-3.610"},
- { .compatible = "snps,dwmac-3.70a"},
- { .compatible = "snps,dwmac-3.710"},
- { .compatible = "snps,dwmac"},
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
+static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops,
+ stmmac_pltfr_suspend, stmmac_pltfr_resume);
struct platform_driver stmmac_pltfr_driver = {
.probe = stmmac_pltfr_probe,
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b4d50d74ba18..df8d383acf48 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* This driver uses the sungem driver (c) David Miller
* (davem@redhat.com) as its basis.
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index b361424d5f57..882ce168a799 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -15,9 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* vendor id: 0x108E (Sun Microsystems, Inc.)
* device id: 0xabba (Cassini)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 388540fcb977..8e2266e1f260 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3493,10 +3493,12 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
rh = (struct rx_pkt_hdr1 *) skb->data;
if (np->dev->features & NETIF_F_RXHASH)
- skb->rxhash = ((u32)rh->hashval2_0 << 24 |
- (u32)rh->hashval2_1 << 16 |
- (u32)rh->hashval1_1 << 8 |
- (u32)rh->hashval1_2 << 0);
+ skb_set_hash(skb,
+ ((u32)rh->hashval2_0 << 24 |
+ (u32)rh->hashval2_1 << 16 |
+ (u32)rh->hashval1_1 << 8 |
+ (u32)rh->hashval1_2 << 0),
+ PKT_HASH_TYPE_L3);
skb_pull(skb, sizeof(*rh));
rp->rx_packets++;
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 7217ee5d6273..206c1063815a 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -13,7 +13,6 @@
#include <linux/in.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/errno.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index b5655b79bd3b..c2799dc46325 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -24,7 +24,6 @@
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3df56840a3b9..1c24a8f368bd 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -751,7 +751,7 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
struct vnet_mcast_entry *m;
for (m = vp->mcast_list; m; m = m->next) {
- if (!memcmp(m->addr, addr, ETH_ALEN))
+ if (ether_addr_equal(m->addr, addr))
return m;
}
return NULL;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 4f1d2549130e..2ead87759ab4 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1764,7 +1764,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
/* We reclaimed resources, so in case the Q is stopped by xmit callback,
- * we resume the transmition and use tx_lock to synchronize with xmit.*/
+ * we resume the transmission and use tx_lock to synchronize with xmit.*/
spin_lock(&priv->tx_lock);
priv->tx_level += tx_level;
BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 2dc16b6efaf0..73f74f369437 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -17,7 +17,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 5330fd298705..7d6d8ec676c8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -541,14 +541,93 @@ static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
return slave_num;
}
+static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_ale *ale = priv->ale;
+ int i;
+
+ if (priv->data.dual_emac) {
+ bool flag = false;
+
+ /* Enabling promiscuous mode for one interface will be
+ * common for both the interface as the interface shares
+ * the same hardware resource.
+ */
+ for (i = 0; i < priv->data.slaves; i++)
+ if (priv->slaves[i].ndev->flags & IFF_PROMISC)
+ flag = true;
+
+ if (!enable && flag) {
+ enable = true;
+ dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
+ }
+
+ if (enable) {
+ /* Enable Bypass */
+ cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
+
+ dev_dbg(&ndev->dev, "promiscuity enabled\n");
+ } else {
+ /* Disable Bypass */
+ cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
+ dev_dbg(&ndev->dev, "promiscuity disabled\n");
+ }
+ } else {
+ if (enable) {
+ unsigned long timeout = jiffies + HZ;
+
+ /* Disable Learn for all ports */
+ for (i = 0; i < priv->data.slaves; i++) {
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NOLEARN, 1);
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NO_SA_UPDATE, 1);
+ }
+
+ /* Clear All Untouched entries */
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+ do {
+ cpu_relax();
+ if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
+ break;
+ } while (time_after(timeout, jiffies));
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
+ priv->host_port);
+
+ /* Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(&ndev->dev, "promiscuity enabled\n");
+ } else {
+ /* Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
+
+ /* Enable Learn for all ports */
+ for (i = 0; i < priv->data.slaves; i++) {
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NOLEARN, 0);
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NO_SA_UPDATE, 0);
+ }
+ dev_dbg(&ndev->dev, "promiscuity disabled\n");
+ }
+ }
+}
+
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
- dev_err(priv->dev, "Ignoring Promiscuous mode\n");
+ cpsw_set_promiscious(ndev, true);
return;
+ } else {
+ /* Disable promiscuous mode */
+ cpsw_set_promiscious(ndev, false);
}
/* Clear all mcast from ALE */
@@ -582,7 +661,7 @@ static void cpsw_intr_disable(struct cpsw_priv *priv)
return;
}
-void cpsw_tx_handler(void *token, int len, int status)
+static void cpsw_tx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
@@ -599,7 +678,7 @@ void cpsw_tx_handler(void *token, int len, int status)
dev_kfree_skb_any(skb);
}
-void cpsw_rx_handler(void *token, int len, int status)
+static void cpsw_rx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct sk_buff *new_skb;
@@ -1085,11 +1164,17 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
+ u32 slave_port;
+
+ slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+
if (!slave->phy)
return;
phy_stop(slave->phy);
phy_disconnect(slave->phy);
slave->phy = NULL;
+ cpsw_ale_control_set(priv->ale, slave_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
}
static int cpsw_ndo_open(struct net_device *ndev)
@@ -1257,29 +1342,6 @@ fail:
return NETDEV_TX_BUSY;
}
-static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
-{
- /*
- * The switch cannot operate in promiscuous mode without substantial
- * headache. For promiscuous mode to work, we would need to put the
- * ALE in bypass mode and route all traffic to the host port.
- * Subsequently, the host will need to operate as a "bridge", learn,
- * and flood as needed. For now, we simply complain here and
- * do nothing about it :-)
- */
- if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC))
- dev_err(&ndev->dev, "promiscuity ignored!\n");
-
- /*
- * The switch cannot filter multicast traffic unless it is configured
- * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a
- * whole bunch of additional logic that this driver does not implement
- * at present.
- */
- if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI))
- dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
-}
-
#ifdef CONFIG_TI_CPTS
static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
@@ -1331,7 +1393,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
__raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
}
-static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_priv *priv = netdev_priv(dev);
struct cpts *cpts = priv->cpts;
@@ -1392,6 +1454,24 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpts *cpts = priv->cpts;
+ struct hwtstamp_config cfg;
+
+ if (priv->version != CPSW_VERSION_1 &&
+ priv->version != CPSW_VERSION_2)
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = (cpts->rx_enable ?
+ HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
#endif /*CONFIG_TI_CPTS*/
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -1406,7 +1486,9 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
switch (cmd) {
#ifdef CONFIG_TI_CPTS
case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_ioctl(dev, req);
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
#endif
case SIOCGMIIPHY:
data->phy_id = priv->slaves[slave_no].phy->addr;
@@ -1555,7 +1637,6 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
- .ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
.ndo_do_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
@@ -1803,14 +1884,29 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
mdio = of_find_device_by_node(mdio_node);
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, mdio->name, phyid);
+
+ if (strncmp(mdio->name, "gpio", 4) == 0) {
+ /* GPIO bitbang MDIO driver attached */
+ struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
+
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, bus->id, phyid);
+ } else {
+ /* davinci MDIO driver attached */
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
+ }
mac_addr = of_get_mac_address(slave_node);
if (mac_addr)
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
slave_data->phy_if = of_get_phy_mode(slave_node);
+ if (slave_data->phy_if < 0) {
+ pr_err("Missing or malformed slave[%d] phy-mode property\n",
+ i);
+ return slave_data->phy_if;
+ }
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
@@ -2133,12 +2229,8 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
- if (cpts_register(&pdev->dev, priv->cpts,
- data->cpts_clock_mult, data->cpts_clock_shift))
- dev_err(priv->dev, "error registering cpts device\n");
-
- cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
- ss_res->start, ndev->irq);
+ cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
+ &ss_res->start, ndev->irq);
if (priv->data.dual_emac) {
ret = cpsw_probe_dual_emac(pdev, priv);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 7fa60d6092ed..7f893069c418 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -163,7 +163,7 @@ int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
if (cpsw_ale_get_vlan_id(ale_entry) != vid)
continue;
cpsw_ale_get_addr(ale_entry, entry_addr);
- if (memcmp(entry_addr, addr, 6) == 0)
+ if (ether_addr_equal(entry_addr, addr))
return idx;
}
return -ENOENT;
@@ -477,6 +477,14 @@ static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
.port_shift = 0,
.bits = 1,
},
+ [ALE_P0_UNI_FLOOD] = {
+ .name = "port0_unicast_flood",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 8,
+ .port_shift = 0,
+ .bits = 1,
+ },
[ALE_VLAN_NOLEARN] = {
.name = "vlan_nolearn",
.offset = ALE_CONTROL,
@@ -573,6 +581,14 @@ static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
.port_shift = 0,
.bits = 1,
},
+ [ALE_PORT_NO_SA_UPDATE] = {
+ .name = "no_source_update",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 5,
+ .port_shift = 0,
+ .bits = 1,
+ },
[ALE_PORT_MCAST_LIMIT] = {
.name = "mcast_limit",
.offset = ALE_PORTCTL,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 30daa1265f0c..de409c33b250 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -34,6 +34,7 @@ enum cpsw_ale_control {
ALE_ENABLE,
ALE_CLEAR,
ALE_AGEOUT,
+ ALE_P0_UNI_FLOOD,
ALE_VLAN_NOLEARN,
ALE_NO_PORT_VLAN,
ALE_OUI_DENY,
@@ -47,6 +48,7 @@ enum cpsw_ale_control {
ALE_PORT_DROP_UNTAGGED,
ALE_PORT_DROP_UNKNOWN_VLAN,
ALE_PORT_NOLEARN,
+ ALE_PORT_NO_SA_UPDATE,
ALE_PORT_UNKNOWN_VLAN_MEMBER,
ALE_PORT_UNKNOWN_MCAST_FLOOD,
ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 90a79462c869..88ef27067bf2 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -81,7 +81,7 @@ struct cpdma_desc {
};
struct cpdma_desc_pool {
- u32 phys;
+ phys_addr_t phys;
u32 hw_addr;
void __iomem *iomap; /* ioremap map */
void *cpumap; /* dma_alloc map */
@@ -219,8 +219,7 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
{
if (!desc)
return 0;
- return pool->hw_addr + (__force dma_addr_t)desc -
- (__force dma_addr_t)pool->iomap;
+ return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
}
static inline struct cpdma_desc __iomem *
@@ -356,7 +355,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
int i;
spin_lock_irqsave(&ctlr->lock, flags);
- if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ if (ctlr->state == CPDMA_STATE_TEARDOWN) {
spin_unlock_irqrestore(&ctlr->lock, flags);
return -EINVAL;
}
@@ -892,7 +891,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
unsigned timeout;
spin_lock_irqsave(&chan->lock, flags);
- if (chan->state != CPDMA_STATE_ACTIVE) {
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
spin_unlock_irqrestore(&chan->lock, flags);
return -EINVAL;
}
@@ -972,7 +971,7 @@ struct cpdma_control_info {
#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
};
-struct cpdma_control_info controls[] = {
+static struct cpdma_control_info controls[] = {
[CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index cd9b164a0434..8f0e69ce07ca 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1532,9 +1532,9 @@ static int emac_dev_open(struct net_device *ndev)
struct device *emac_dev = &ndev->dev;
u32 cnt;
struct resource *res;
- int ret;
+ int q, m, ret;
+ int res_num = 0, irq_num = 0;
int i = 0;
- int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
pm_runtime_get(&priv->pdev->dev);
@@ -1564,15 +1564,24 @@ static int emac_dev_open(struct net_device *ndev)
}
/* Request IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
+ res_num))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++) {
+ dev_err(emac_dev, "Request IRQ %d\n", irq_num);
+ if (request_irq(irq_num, emac_irq, 0, ndev->name,
+ ndev)) {
+ dev_err(emac_dev,
+ "DaVinci EMAC: request_irq() failed\n");
+ ret = -EBUSY;
- while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
- for (i = res->start; i <= res->end; i++) {
- if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
- 0, ndev->name, ndev))
goto rollback;
+ }
}
- k++;
+ res_num++;
}
+ /* prepare counters for rollback in case of an error */
+ res_num--;
+ irq_num--;
/* Start/Enable EMAC hardware */
emac_hw_enable(priv);
@@ -1639,11 +1648,23 @@ static int emac_dev_open(struct net_device *ndev)
return 0;
-rollback:
-
- dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
- ret = -EBUSY;
err:
+ emac_int_disable(priv);
+ napi_disable(&priv->napi);
+
+rollback:
+ for (q = res_num; q >= 0; q--) {
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
+ /* at the first iteration, irq_num is already set to the
+ * right value
+ */
+ if (q != res_num)
+ irq_num = res->end;
+
+ for (m = irq_num; m >= res->start; m--)
+ free_irq(m, ndev);
+ }
+ cpdma_ctlr_stop(priv->dma);
pm_runtime_put(&priv->pdev->dev);
return ret;
}
@@ -1659,6 +1680,9 @@ err:
*/
static int emac_dev_stop(struct net_device *ndev)
{
+ struct resource *res;
+ int i = 0;
+ int irq_num;
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
@@ -1674,6 +1698,13 @@ static int emac_dev_stop(struct net_device *ndev)
if (priv->phydev)
phy_disconnect(priv->phydev);
+ /* Free IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
+
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 4ec92659a100..0cca9dec5d82 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -82,7 +82,7 @@ struct davinci_mdio_regs {
} user[0];
};
-struct mdio_platform_data default_pdata = {
+static const struct mdio_platform_data default_pdata = {
.bus_freq = DEF_OUT_FREQ,
};
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 4083ba8839e1..f59a6c265331 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -9,20 +9,10 @@ config TILE_NET
select CRC32
select TILE_GXIO_MPIPE if TILEGX
select HIGH_RES_TIMERS if TILEGX
+ select PTP_1588_CLOCK if TILEGX
---help---
This is a standard Linux network device driver for the
on-chip Tilera Gigabit Ethernet and XAUI interfaces.
To compile this driver as a module, choose M here: the module
will be called tile_net.
-
-config PTP_1588_CLOCK_TILEGX
- tristate "Tilera TILE-Gx mPIPE as PTP clock"
- select PTP_1588_CLOCK
- depends on TILE_NET
- depends on TILEGX
- ---help---
- This driver adds support for using the mPIPE as a PTP
- clock. This clock is only useful if your PTP programs are
- getting hardware time stamps on the PTP Ethernet packets
- using the SO_TIMESTAMPING API.
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 0e9fb3301b11..17503da9f7a5 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -187,10 +187,8 @@ struct tile_net_priv {
int echannel;
/* mPIPE instance, 0 or 1. */
int instance;
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
/* The timestamp config. */
struct hwtstamp_config stamp_cfg;
-#endif
};
static struct mpipe_data {
@@ -229,14 +227,12 @@ static struct mpipe_data {
int first_bucket;
int num_buckets;
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
/* PTP-specific data. */
struct ptp_clock *ptp_clock;
struct ptp_clock_info caps;
/* Lock for ptp accessors. */
struct mutex ptp_lock;
-#endif
} mpipe_data[NR_MPIPE_MAX] = {
[0 ... (NR_MPIPE_MAX - 1)] {
@@ -451,20 +447,17 @@ static void tile_net_provide_needed_buffers(void)
static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
gxio_mpipe_idesc_t *idesc)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
idesc->time_stamp_ns);
}
-#endif
}
/* Get TX timestamp, and store it in the skb. */
static void tile_tx_timestamp(struct sk_buff *skb, int instance)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct skb_shared_info *shtx = skb_shinfo(skb);
if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
struct mpipe_data *md = &mpipe_data[instance];
@@ -477,14 +470,11 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_tstamp_tx(skb, &shhwtstamps);
}
-#endif
}
/* Use ioctl() to enable or disable TX or RX timestamping. */
-static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
- int cmd)
+static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct hwtstamp_config config;
struct tile_net_priv *priv = netdev_priv(dev);
@@ -530,9 +520,17 @@ static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
priv->stamp_cfg = config;
return 0;
-#else
- return -EOPNOTSUPP;
-#endif
+}
+
+static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ if (copy_to_user(rq->ifr_data, &priv->stamp_cfg,
+ sizeof(priv->stamp_cfg)))
+ return -EFAULT;
+
+ return 0;
}
static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -814,8 +812,6 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
return HRTIMER_NORESTART;
}
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
-
/* PTP clock operations. */
static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
@@ -882,12 +878,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
.enable = ptp_mpipe_enable,
};
-#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
-
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
struct timespec ts;
getnstimeofday(&ts);
@@ -899,16 +892,13 @@ static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
if (IS_ERR(md->ptp_clock))
netdev_err(dev, "ptp_clock_register failed %ld\n",
PTR_ERR(md->ptp_clock));
-#endif
}
/* Initialize PTP fields in a new device. */
static void init_ptp_dev(struct tile_net_priv *priv)
{
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
-#endif
}
/* Helper functions for "tile_net_update()". */
@@ -2081,7 +2071,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
/* Return subqueue id on this core (one per core). */
static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
return smp_processor_id();
}
@@ -2099,7 +2089,9 @@ static void tile_net_tx_timeout(struct net_device *dev)
static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
if (cmd == SIOCSHWTSTAMP)
- return tile_hwtstamp_ioctl(dev, rq, cmd);
+ return tile_hwtstamp_set(dev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return tile_hwtstamp_get(dev, rq);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index f7f2ef49c0c1..d899d0072ae0 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1739,12 +1739,14 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
GELIC_CARD_PORT_STATUS_CHANGED;
- if (gelic_card_init_chain(card, &card->tx_chain,
- card->descr, GELIC_NET_TX_DESCRIPTORS))
+ result = gelic_card_init_chain(card, &card->tx_chain,
+ card->descr, GELIC_NET_TX_DESCRIPTORS);
+ if (result)
goto fail_alloc_tx;
- if (gelic_card_init_chain(card, &card->rx_chain,
- card->descr + GELIC_NET_TX_DESCRIPTORS,
- GELIC_NET_RX_DESCRIPTORS))
+ result = gelic_card_init_chain(card, &card->rx_chain,
+ card->descr + GELIC_NET_TX_DESCRIPTORS,
+ GELIC_NET_RX_DESCRIPTORS);
+ if (result)
goto fail_alloc_rx;
/* head of chain */
@@ -1754,7 +1756,8 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
card->rx_top, card->tx_top, sizeof(struct gelic_descr),
GELIC_NET_RX_DESCRIPTORS);
/* allocate rx skbs */
- if (gelic_card_alloc_rx_skbs(card))
+ result = gelic_card_alloc_rx_skbs(card);
+ if (result)
goto fail_alloc_skbs;
spin_lock_init(&card->tx_lock);
@@ -1772,7 +1775,8 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
}
#ifdef CONFIG_GELIC_WIRELESS
- if (gelic_wl_driver_probe(card)) {
+ result = gelic_wl_driver_probe(card);
+ if (result) {
dev_dbg(&dev->core, "%s: WL init failed\n", __func__);
goto fail_setup_netdev;
}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 1322546d92ac..88e9c73cebc0 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -38,7 +38,6 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -1170,19 +1169,12 @@ static int tc35815_tx_full(struct net_device *dev)
static void tc35815_restart(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
+ int ret;
if (lp->phy_dev) {
- int timeout;
-
- phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET);
- timeout = 100;
- while (--timeout) {
- if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET))
- break;
- udelay(1);
- }
- if (!timeout)
- printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
+ ret = phy_init_hw(lp->phy_dev);
+ if (ret)
+ printk(KERN_ERR "%s: PHY init failed.\n", dev->name);
}
spin_lock_bh(&lp->rx_lock);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c4dbf981804b..47eeb3abf7f7 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/net.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.h b/drivers/net/ethernet/tundra/tsi108_eth.h
index 5fee7d78dc6d..4a03c594b2b1 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.h
+++ b/drivers/net/ethernet/tundra/tsi108_eth.h
@@ -16,9 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ef312bc6b865..6ac20a6738f4 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -923,7 +923,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) {
dev_err(&pdev->dev,
"32-bit PCI DMA addresses not supported by the card!?\n");
- goto err_out;
+ goto err_out_pci_disable;
}
/* sanity check */
@@ -931,7 +931,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(pci_resource_len(pdev, 1) < io_size)) {
rc = -EIO;
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
- goto err_out;
+ goto err_out_pci_disable;
}
pioaddr = pci_resource_start(pdev, 0);
@@ -942,7 +942,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev = alloc_etherdev(sizeof(struct rhine_private));
if (!dev) {
rc = -ENOMEM;
- goto err_out;
+ goto err_out_pci_disable;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1084,6 +1084,8 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_free_netdev:
free_netdev(dev);
+err_out_pci_disable:
+ pci_disable_device(pdev);
err_out:
return rc;
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2166e879a096..a4347508031c 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f9293da19e26..4bfdf8c7ada0 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -22,11 +22,11 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
@@ -601,7 +601,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
- lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
+ ++lp->tx_bd_ci;
+ lp->tx_bd_ci %= TX_BD_NUM;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
status = cur_p->status;
}
@@ -687,7 +688,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
- lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+ ++lp->tx_bd_tail;
+ lp->tx_bd_tail %= TX_BD_NUM;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -703,7 +705,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+ ++lp->tx_bd_tail;
+ lp->tx_bd_tail %= TX_BD_NUM;
return NETDEV_TX_OK;
}
@@ -775,7 +778,8 @@ static void axienet_recv(struct net_device *ndev)
cur_p->status = 0;
cur_p->sw_id_offset = (u32) new_skb;
- lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
+ ++lp->rx_bd_ci;
+ lp->rx_bd_ci %= RX_BD_NUM;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index fefb8cd5eb65..36052b98b3fc 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/uaccess.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index bdd20b888cf6..7c81ffb861e8 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -27,8 +27,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* ALTERNATIVELY, this driver may be distributed under the terms of
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index bcc224a83734..25283f17d82f 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -373,7 +373,7 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
__raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
}
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config cfg;
struct ixp46x_ts_regs *regs;
@@ -417,6 +417,32 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config cfg;
+ struct port *port = netdev_priv(netdev);
+
+ cfg.flags = 0;
+ cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+ switch (port->hwts_rx_en) {
+ case 0:
+ cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case PTP_SLAVE_MODE:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ break;
+ case PTP_MASTER_MODE:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
int write, u16 cmd)
{
@@ -959,8 +985,12 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP)
- return hwtstamp_ioctl(dev, req, cmd);
+ if (cpu_is_ixp46x()) {
+ if (cmd == SIOCSHWTSTAMP)
+ return hwtstamp_set(dev, req);
+ if (cmd == SIOCGHWTSTAMP)
+ return hwtstamp_get(dev, req);
+ }
return phy_mii_ioctl(port->phydev, req, cmd);
}
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 0b40e1c46f07..eb78203cd58e 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -206,7 +206,6 @@
#include <linux/eisa.h>
#include <linux/errno.h>
#include <linux/fddidevice.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
@@ -241,12 +240,6 @@ static char version[] =
*/
#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
-#ifdef CONFIG_PCI
-#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
-#else
-#define DFX_BUS_PCI(dev) 0
-#endif
-
#ifdef CONFIG_EISA
#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
#else
@@ -436,7 +429,7 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
static void dfx_get_bars(struct device *bdev,
resource_size_t *bar_start, resource_size_t *bar_len)
{
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -518,7 +511,7 @@ static const struct net_device_ops dfx_netdev_ops = {
static int dfx_register(struct device *bdev)
{
static int version_disp;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
const char *print_name = dev_name(bdev);
@@ -667,7 +660,7 @@ static void dfx_bus_init(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -813,7 +806,7 @@ static void dfx_bus_uninit(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
u8 val;
@@ -967,7 +960,7 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -1877,7 +1870,7 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
@@ -3579,7 +3572,7 @@ static void dfx_unregister(struct device *bdev)
{
struct net_device *dev = dev_get_drvdata(bdev);
DFX_board_t *bp = netdev_priv(dev);
- int dfx_bus_pci = DFX_BUS_PCI(bdev);
+ int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
resource_size_t bar_start = 0; /* pointer to port */
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index f83993590174..7d3779ae7377 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -23,6 +23,7 @@
#include "h/smc.h"
#include "h/supern_2.h"
#include <linux/bitrev.h>
+#include <linux/etherdevice.h>
#ifndef lint
static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ;
@@ -55,14 +56,14 @@ static char cam_warning [] = "E_SMT_004: CAM still busy\n";
#define DUMMY_READ() smc->hw.mc_dummy = (u_short) inp(ADDR(B0_RAP))
-#define CHECK_NPP() { unsigned k = 10000 ;\
+#define CHECK_NPP() { unsigned int k = 10000 ;\
while ((inpw(FM_A(FM_STMCHN)) & FM_SNPPND) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0130, SMT_E0130_MSG) ; \
} \
}
-#define CHECK_CAM() { unsigned k = 10 ;\
+#define CHECK_CAM() { unsigned int k = 10 ;\
while (!(inpw(FM_A(FM_AFSTAT)) & FM_DONE) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0131, SMT_E0131_MSG) ; \
@@ -356,25 +357,25 @@ static void set_formac_addr(struct s_smc *smc)
long t_requ = smc->mib.m[MAC0].fddiMACT_Req ;
outpw(FM_A(FM_SAID),my_said) ; /* set short address */
- outpw(FM_A(FM_LAIL),(unsigned)((smc->hw.fddi_home_addr.a[4]<<8) +
+ outpw(FM_A(FM_LAIL),(unsigned short)((smc->hw.fddi_home_addr.a[4]<<8) +
smc->hw.fddi_home_addr.a[5])) ;
- outpw(FM_A(FM_LAIC),(unsigned)((smc->hw.fddi_home_addr.a[2]<<8) +
+ outpw(FM_A(FM_LAIC),(unsigned short)((smc->hw.fddi_home_addr.a[2]<<8) +
smc->hw.fddi_home_addr.a[3])) ;
- outpw(FM_A(FM_LAIM),(unsigned)((smc->hw.fddi_home_addr.a[0]<<8) +
+ outpw(FM_A(FM_LAIM),(unsigned short)((smc->hw.fddi_home_addr.a[0]<<8) +
smc->hw.fddi_home_addr.a[1])) ;
outpw(FM_A(FM_SAGP),my_sagp) ; /* set short group address */
- outpw(FM_A(FM_LAGL),(unsigned)((smc->hw.fp.group_addr.a[4]<<8) +
+ outpw(FM_A(FM_LAGL),(unsigned short)((smc->hw.fp.group_addr.a[4]<<8) +
smc->hw.fp.group_addr.a[5])) ;
- outpw(FM_A(FM_LAGC),(unsigned)((smc->hw.fp.group_addr.a[2]<<8) +
+ outpw(FM_A(FM_LAGC),(unsigned short)((smc->hw.fp.group_addr.a[2]<<8) +
smc->hw.fp.group_addr.a[3])) ;
- outpw(FM_A(FM_LAGM),(unsigned)((smc->hw.fp.group_addr.a[0]<<8) +
+ outpw(FM_A(FM_LAGM),(unsigned short)((smc->hw.fp.group_addr.a[0]<<8) +
smc->hw.fp.group_addr.a[1])) ;
/* set r_request regs. (MSW & LSW of TRT ) */
- outpw(FM_A(FM_TREQ1),(unsigned)(t_requ>>16)) ;
- outpw(FM_A(FM_TREQ0),(unsigned)t_requ) ;
+ outpw(FM_A(FM_TREQ1),(unsigned short)(t_requ>>16)) ;
+ outpw(FM_A(FM_TREQ0),(unsigned short)t_requ) ;
}
static void set_int(char *p, int l)
@@ -394,10 +395,10 @@ static void set_int(char *p, int l)
* append 'end of chain' pointer
*/
static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
- unsigned off, int len)
+ unsigned int off, int len)
/* u_long td; transmit descriptor */
/* struct fddi_mac *mac; mac frame pointer */
-/* unsigned off; start address within buffer memory */
+/* unsigned int off; start address within buffer memory */
/* int len ; length of the frame including the FC */
{
int i ;
@@ -1082,7 +1083,7 @@ static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
slot = tb ;
continue ;
}
- if (memcmp((char *)&tb->a,(char *)own,6))
+ if (!ether_addr_equal((char *)&tb->a, (char *)own))
continue ;
return tb;
}
diff --git a/drivers/net/fddi/skfp/h/supern_2.h b/drivers/net/fddi/skfp/h/supern_2.h
index 0b73690280f6..4ee360d2dc62 100644
--- a/drivers/net/fddi/skfp/h/supern_2.h
+++ b/drivers/net/fddi/skfp/h/supern_2.h
@@ -92,33 +92,33 @@
union rx_descr {
struct {
#ifdef LITTLE_ENDIAN
- unsigned rx_length :16 ; /* frame length lower/upper byte */
- unsigned rx_erfbb :2 ; /* received frame byte boundary */
- unsigned rx_reserv2:2 ; /* reserved */
- unsigned rx_sfrmty :3 ; /* frame type bits */
- unsigned rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
- unsigned rx_sfrmerr:1 ; /* received frame not valid */
- unsigned rx_seac0 :1 ; /* frame-copied C-indicator */
- unsigned rx_seac1 :1 ; /* address-match A-indicator */
- unsigned rx_seac2 :1 ; /* frame-error E-indicator */
- unsigned rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
- unsigned rx_reserv1:1 ; /* reserved */
- unsigned rx_msrabt :1 ; /* memory status receive abort */
- unsigned rx_msvalid:1 ; /* memory status valid */
+ unsigned int rx_length :16 ; /* frame length lower/upper byte */
+ unsigned int rx_erfbb :2 ; /* received frame byte boundary */
+ unsigned int rx_reserv2:2 ; /* reserved */
+ unsigned int rx_sfrmty :3 ; /* frame type bits */
+ unsigned int rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
+ unsigned int rx_sfrmerr:1 ; /* received frame not valid */
+ unsigned int rx_seac0 :1 ; /* frame-copied C-indicator */
+ unsigned int rx_seac1 :1 ; /* address-match A-indicator */
+ unsigned int rx_seac2 :1 ; /* frame-error E-indicator */
+ unsigned int rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
+ unsigned int rx_reserv1:1 ; /* reserved */
+ unsigned int rx_msrabt :1 ; /* memory status receive abort */
+ unsigned int rx_msvalid:1 ; /* memory status valid */
#else
- unsigned rx_msvalid:1 ; /* memory status valid */
- unsigned rx_msrabt :1 ; /* memory status receive abort */
- unsigned rx_reserv1:1 ; /* reserved */
- unsigned rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
- unsigned rx_seac2 :1 ; /* frame-error E-indicator */
- unsigned rx_seac1 :1 ; /* address-match A-indicator */
- unsigned rx_seac0 :1 ; /* frame-copied C-indicator */
- unsigned rx_sfrmerr:1 ; /* received frame not valid */
- unsigned rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
- unsigned rx_sfrmty :3 ; /* frame type bits */
- unsigned rx_erfbb :2 ; /* received frame byte boundary */
- unsigned rx_reserv2:2 ; /* reserved */
- unsigned rx_length :16 ; /* frame length lower/upper byte */
+ unsigned int rx_msvalid:1 ; /* memory status valid */
+ unsigned int rx_msrabt :1 ; /* memory status receive abort */
+ unsigned int rx_reserv1:1 ; /* reserved */
+ unsigned int rx_ssrcrtg:1 ; /* == 1 SA has MSB set */
+ unsigned int rx_seac2 :1 ; /* frame-error E-indicator */
+ unsigned int rx_seac1 :1 ; /* address-match A-indicator */
+ unsigned int rx_seac0 :1 ; /* frame-copied C-indicator */
+ unsigned int rx_sfrmerr:1 ; /* received frame not valid */
+ unsigned int rx_sadrrg :1 ; /* DA == MA or broad-/multicast */
+ unsigned int rx_sfrmty :3 ; /* frame type bits */
+ unsigned int rx_erfbb :2 ; /* received frame byte boundary */
+ unsigned int rx_reserv2:2 ; /* reserved */
+ unsigned int rx_length :16 ; /* frame length lower/upper byte */
#endif
} r ;
long i ;
@@ -162,23 +162,23 @@ union rx_descr {
union tx_descr {
struct {
#ifdef LITTLE_ENDIAN
- unsigned tx_length:16 ; /* frame length lower/upper byte */
- unsigned tx_res :8 ; /* reserved (bit 16..23) */
- unsigned tx_xmtabt:1 ; /* transmit abort */
- unsigned tx_nfcs :1 ; /* no frame check sequence */
- unsigned tx_xdone :1 ; /* give up token */
- unsigned tx_rpxm :2 ; /* byte offset */
- unsigned tx_pat1 :2 ; /* must be TXP1 */
- unsigned tx_more :1 ; /* more frame in chain */
+ unsigned int tx_length:16 ; /* frame length lower/upper byte */
+ unsigned int tx_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tx_xmtabt:1 ; /* transmit abort */
+ unsigned int tx_nfcs :1 ; /* no frame check sequence */
+ unsigned int tx_xdone :1 ; /* give up token */
+ unsigned int tx_rpxm :2 ; /* byte offset */
+ unsigned int tx_pat1 :2 ; /* must be TXP1 */
+ unsigned int tx_more :1 ; /* more frame in chain */
#else
- unsigned tx_more :1 ; /* more frame in chain */
- unsigned tx_pat1 :2 ; /* must be TXP1 */
- unsigned tx_rpxm :2 ; /* byte offset */
- unsigned tx_xdone :1 ; /* give up token */
- unsigned tx_nfcs :1 ; /* no frame check sequence */
- unsigned tx_xmtabt:1 ; /* transmit abort */
- unsigned tx_res :8 ; /* reserved (bit 16..23) */
- unsigned tx_length:16 ; /* frame length lower/upper byte */
+ unsigned int tx_more :1 ; /* more frame in chain */
+ unsigned int tx_pat1 :2 ; /* must be TXP1 */
+ unsigned int tx_rpxm :2 ; /* byte offset */
+ unsigned int tx_xdone :1 ; /* give up token */
+ unsigned int tx_nfcs :1 ; /* no frame check sequence */
+ unsigned int tx_xmtabt:1 ; /* transmit abort */
+ unsigned int tx_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tx_length:16 ; /* frame length lower/upper byte */
#endif
} t ;
long i ;
@@ -202,13 +202,13 @@ union tx_descr {
union tx_pointer {
struct t {
#ifdef LITTLE_ENDIAN
- unsigned tp_pointer:16 ; /* pointer to tx_descr (low/high) */
- unsigned tp_res :8 ; /* reserved (bit 16..23) */
- unsigned tp_pattern:8 ; /* fixed pattern (bit 24..31) */
+ unsigned int tp_pointer:16 ; /* pointer to tx_descr (low/high) */
+ unsigned int tp_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tp_pattern:8 ; /* fixed pattern (bit 24..31) */
#else
- unsigned tp_pattern:8 ; /* fixed pattern (bit 24..31) */
- unsigned tp_res :8 ; /* reserved (bit 16..23) */
- unsigned tp_pointer:16 ; /* pointer to tx_descr (low/high) */
+ unsigned int tp_pattern:8 ; /* fixed pattern (bit 24..31) */
+ unsigned int tp_res :8 ; /* reserved (bit 16..23) */
+ unsigned int tp_pointer:16 ; /* pointer to tx_descr (low/high) */
#endif
} t ;
long i ;
diff --git a/drivers/net/fddi/skfp/h/targetos.h b/drivers/net/fddi/skfp/h/targetos.h
index 53bacc107160..355194251ff8 100644
--- a/drivers/net/fddi/skfp/h/targetos.h
+++ b/drivers/net/fddi/skfp/h/targetos.h
@@ -48,7 +48,6 @@
#include <linux/fddidevice.h>
#include <linux/skbuff.h>
#include <linux/pci.h>
-#include <linux/init.h>
// is redefined by linux, but we need our definition
#undef ADDR
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 713d303a06a9..d5f58121b2e2 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -351,7 +351,6 @@ static void skfp_remove_one(struct pci_dev *pdev)
free_netdev(p);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
/*
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 08d94329c12f..9edada85ed02 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -900,7 +900,7 @@ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
rdf->version.v_pad2 = 0 ;
/* set P13 */
- if ((unsigned) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
+ if ((unsigned int) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
2*sizeof(struct smt_header))
len = frame_len ;
else
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index f6f7baf9f27a..cc27dea3414e 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -73,7 +73,7 @@ void smt_init_evc(struct s_smc *smc)
{
struct s_srf_evc *evc ;
const struct evc_init *init ;
- int i ;
+ unsigned int i ;
int index ;
int offset ;
@@ -84,7 +84,7 @@ void smt_init_evc(struct s_smc *smc)
evc = smc->evcs ;
init = evc_inits ;
- for (i = 0 ; (unsigned) i < MAX_INIT_EVC ; i++) {
+ for (i = 0 ; i < MAX_INIT_EVC ; i++) {
for (index = 0 ; index < init->n ; index++) {
evc->evc_code = init->code ;
evc->evc_para = init->para ;
@@ -98,7 +98,7 @@ void smt_init_evc(struct s_smc *smc)
init++ ;
}
- if ((unsigned) (evc - smc->evcs) > MAX_EVCS) {
+ if ((unsigned int) (evc - smc->evcs) > MAX_EVCS) {
SMT_PANIC(smc,SMT_E0127, SMT_E0127_MSG) ;
}
@@ -139,7 +139,7 @@ void smt_init_evc(struct s_smc *smc)
offset++ ;
}
#ifdef DEBUG
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (SMT_IS_CONDITION(evc->evc_code)) {
if (!evc->evc_cond_state) {
SMT_PANIC(smc,SMT_E0128, SMT_E0128_MSG) ;
@@ -160,10 +160,10 @@ void smt_init_evc(struct s_smc *smc)
static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
{
- int i ;
+ unsigned int i ;
struct s_srf_evc *evc ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (evc->evc_code == code && evc->evc_index == index)
return evc;
}
@@ -335,9 +335,9 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
static void clear_all_rep(struct s_smc *smc)
{
struct s_srf_evc *evc ;
- int i ;
+ unsigned int i ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
evc->evc_rep_required = FALSE ;
if (SMT_IS_CONDITION(evc->evc_code))
*evc->evc_cond_state = FALSE ;
@@ -348,10 +348,10 @@ static void clear_all_rep(struct s_smc *smc)
static void clear_reported(struct s_smc *smc)
{
struct s_srf_evc *evc ;
- int i ;
+ unsigned int i ;
smc->srf.any_report = FALSE ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (SMT_IS_CONDITION(evc->evc_code)) {
if (*evc->evc_cond_state == FALSE)
evc->evc_rep_required = FALSE ;
@@ -375,7 +375,7 @@ static void smt_send_srf(struct s_smc *smc)
struct s_srf_evc *evc ;
SK_LOC_DECL(struct s_pcon,pcon) ;
SMbuf *mb ;
- int i ;
+ unsigned int i ;
static const struct fddi_addr SMT_SRF_DA = {
{ 0x80, 0x01, 0x43, 0x00, 0x80, 0x08 }
@@ -405,7 +405,7 @@ static void smt_send_srf(struct s_smc *smc)
smt_add_para(smc,&pcon,(u_short) SMT_P1033,0,0) ;
smt_add_para(smc,&pcon,(u_short) SMT_P1034,0,0) ;
- for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+ for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (evc->evc_rep_required) {
smt_add_para(smc,&pcon,evc->evc_para,
(int)evc->evc_index,0) ;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 1450e33fc250..66e2b19ef709 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -662,7 +662,8 @@ static int sixpack_open(struct tty_struct *tty)
tty->receive_room = 65536;
/* Now we're ready to register. */
- if (register_netdev(dev))
+ err = register_netdev(dev);
+ if (err)
goto out_free;
tnc_init(sp);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index f91bf0ddf031..d50b23cf9ea9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -208,7 +208,7 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
eth = eth_hdr(skb);
if (!(bpq->acpt_addr[0] & 0x01) &&
- memcmp(eth->h_source, bpq->acpt_addr, ETH_ALEN))
+ !ether_addr_equal(eth->h_source, bpq->acpt_addr))
goto drop_unlock;
if (skb_cow(skb, sizeof(struct ethhdr)))
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 8e01c457015b..8a6c720a4cc9 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -9,8 +9,7 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl>
* Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org>
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 00ed75155ce8..e580583f196d 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -37,7 +37,6 @@
#include <linux/netdevice.h>
#include <linux/hippidevice.h>
#include <linux/skbuff.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -213,10 +212,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rrpriv->tx_ring_dma);
if (rrpriv->regs)
pci_iounmap(pdev, rrpriv->regs);
- if (pdev) {
+ if (pdev)
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
- }
out2:
free_netdev(dev);
out3:
@@ -244,7 +241,6 @@ static void rr_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, rr->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e6fe0d80d612..7b594ce3f21d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -12,8 +12,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
@@ -463,7 +462,7 @@ struct nvsp_message {
#define NETVSC_MTU 65536
-#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*2) /* 2MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2b0480416b31..03a2c6e17158 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
@@ -137,8 +136,7 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
if (net_device->recv_buf) {
/* Free up the receive buffer */
- free_pages((unsigned long)net_device->recv_buf,
- get_order(net_device->recv_buf_size));
+ vfree(net_device->recv_buf);
net_device->recv_buf = NULL;
}
@@ -164,9 +162,7 @@ static int netvsc_init_recv_buf(struct hv_device *device)
return -ENODEV;
ndev = net_device->ndev;
- net_device->recv_buf =
- (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
- get_order(net_device->recv_buf_size));
+ net_device->recv_buf = vzalloc(net_device->recv_buf_size);
if (!net_device->recv_buf) {
netdev_err(ndev, "unable to allocate receive "
"buffer of size %d\n", net_device->recv_buf_size);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 71baeb3ed905..d6fce9750b95 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
@@ -89,8 +88,12 @@ static int netvsc_open(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *device_obj = net_device_ctx->device_ctx;
+ struct netvsc_device *nvdev;
+ struct rndis_device *rdev;
int ret = 0;
+ netif_carrier_off(net);
+
/* Open up the device */
ret = rndis_filter_open(device_obj);
if (ret != 0) {
@@ -100,6 +103,11 @@ static int netvsc_open(struct net_device *net)
netif_start_queue(net);
+ nvdev = hv_get_drvdata(device_obj);
+ rdev = nvdev->extension;
+ if (!rdev->link_state)
+ netif_carrier_on(net);
+
return ret;
}
@@ -230,23 +238,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct net_device *net;
struct net_device_context *ndev_ctx;
struct netvsc_device *net_device;
+ struct rndis_device *rdev;
net_device = hv_get_drvdata(device_obj);
+ rdev = net_device->extension;
+
+ rdev->link_state = status != 1;
+
net = net_device->ndev;
- if (!net) {
- netdev_err(net, "got link status but net device "
- "not initialized yet\n");
+ if (!net || net->reg_state != NETREG_REGISTERED)
return;
- }
+ ndev_ctx = netdev_priv(net);
if (status == 1) {
- netif_carrier_on(net);
- ndev_ctx = netdev_priv(net);
schedule_delayed_work(&ndev_ctx->dwork, 0);
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
- netif_carrier_off(net);
+ schedule_delayed_work(&ndev_ctx->dwork, 0);
}
}
@@ -389,17 +398,35 @@ static const struct net_device_ops device_ops = {
* current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
* another netif_notify_peers() into a delayed work, otherwise GARP packet
* will not be sent after quick migration, and cause network disconnection.
+ * Also, we update the carrier status here.
*/
-static void netvsc_send_garp(struct work_struct *w)
+static void netvsc_link_change(struct work_struct *w)
{
struct net_device_context *ndev_ctx;
struct net_device *net;
struct netvsc_device *net_device;
+ struct rndis_device *rdev;
+ bool notify;
+
+ rtnl_lock();
ndev_ctx = container_of(w, struct net_device_context, dwork.work);
net_device = hv_get_drvdata(ndev_ctx->device_ctx);
+ rdev = net_device->extension;
net = net_device->ndev;
- netdev_notify_peers(net);
+
+ if (rdev->link_state) {
+ netif_carrier_off(net);
+ notify = false;
+ } else {
+ netif_carrier_on(net);
+ notify = true;
+ }
+
+ rtnl_unlock();
+
+ if (notify)
+ netdev_notify_peers(net);
}
@@ -415,13 +442,12 @@ static int netvsc_probe(struct hv_device *dev,
if (!net)
return -ENOMEM;
- /* Set initial state */
netif_carrier_off(net);
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
hv_set_drvdata(dev, net);
- INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
+ INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
net->netdev_ops = &device_ops;
@@ -444,13 +470,13 @@ static int netvsc_probe(struct hv_device *dev,
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
- netif_carrier_on(net);
-
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
rndis_filter_device_remove(dev);
free_netdev(net);
+ } else {
+ schedule_delayed_work(&net_device_ctx->dwork, 0);
}
return ret;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 0775f0aefd1e..b54fd257652b 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -11,8 +11,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
@@ -244,6 +243,22 @@ static int rndis_filter_send_request(struct rndis_device *dev,
return ret;
}
+static void rndis_set_link_state(struct rndis_device *rdev,
+ struct rndis_request *request)
+{
+ u32 link_status;
+ struct rndis_query_complete *query_complete;
+
+ query_complete = &request->response_msg.msg.query_complete;
+
+ if (query_complete->status == RNDIS_STATUS_SUCCESS &&
+ query_complete->info_buflen == sizeof(u32)) {
+ memcpy(&link_status, (void *)((unsigned long)query_complete +
+ query_complete->info_buf_offset), sizeof(u32));
+ rdev->link_state = link_status != 0;
+ }
+}
+
static void rndis_filter_receive_response(struct rndis_device *dev,
struct rndis_message *resp)
{
@@ -273,6 +288,10 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
memcpy(&request->response_msg, resp,
resp->msg_len);
+ if (request->request_msg.ndis_msg_type ==
+ RNDIS_MSG_QUERY && request->request_msg.msg.
+ query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
+ rndis_set_link_state(dev, request);
} else {
netdev_err(ndev,
"rndis response buffer overflow "
@@ -621,7 +640,6 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev)
ret = rndis_filter_query_device(dev,
RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
&link_status, &size);
- dev->link_state = (link_status != 0) ? true : false;
return ret;
}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 2cbe1c249996..a30258aad139 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -546,12 +546,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
int rc;
unsigned long flags;
- spin_lock(&lp->lock);
+ spin_lock_irqsave(&lp->lock, flags);
if (lp->irq_busy) {
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
return -EBUSY;
}
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
might_sleep();
@@ -725,10 +725,11 @@ static void at86rf230_irqwork_level(struct work_struct *work)
static irqreturn_t at86rf230_isr(int irq, void *data)
{
struct at86rf230_local *lp = data;
+ unsigned long flags;
- spin_lock(&lp->lock);
+ spin_lock_irqsave(&lp->lock, flags);
lp->irq_busy = 1;
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
schedule_work(&lp->irqwork);
@@ -987,7 +988,6 @@ err_gpio_dir:
err_slp_tr:
gpio_free(lp->rstn);
err_rstn:
- spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
return rc;
@@ -1006,7 +1006,6 @@ static int at86rf230_remove(struct spi_device *spi)
gpio_free(lp->slp_tr);
gpio_free(lp->rstn);
- spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index c6e46d6e9f75..246befa4ba05 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -715,7 +715,6 @@ static int mrf24j40_remove(struct spi_device *spi)
* complete? */
/* Clean up the SPI stuff. */
- spi_set_drvdata(spi, NULL);
kfree(devrec->buf);
kfree(devrec);
return 0;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index c14d39bf32d0..d7b2e947184b 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -180,7 +180,8 @@ static void ifb_setup(struct net_device *dev)
dev->tx_queue_len = TX_Q_LIMIT;
dev->features |= IFB_FEATURES;
- dev->vlan_features |= IFB_FEATURES;
+ dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 2a30193d0d50..3da44d5d9149 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -62,8 +62,6 @@ config SIR_BFIN_PIO
bool "PIO mode"
endchoice
-comment "Dongle support"
-
config SH_SIR
tristate "SuperH SIR on UART"
depends on IRDA && SUPERH && \
@@ -74,6 +72,8 @@ config SH_SIR
Say Y here if your want to enable SIR function on SuperH UART
devices.
+comment "Dongle support"
+
config DONGLE
bool "Serial dongle support"
depends on IRTTY_SIR
@@ -210,13 +210,6 @@ config KINGSUN_DONGLE
To compile it as a module, choose M here: the module will be called
kingsun-sir.
-config EP7211_DONGLE
- tristate "Cirrus Logic clps711x I/R support"
- depends on IRTTY_SIR && ARCH_CLPS711X && IRDA
- help
- Say Y here if you want to build support for the Cirrus logic
- EP7211 chipset's infrared module.
-
config KSDAZZLE_DONGLE
tristate "KingSun Dazzle IrDA-USB dongle"
depends on IRDA && USB
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index dfc64537f62f..be8ab5b9a4a2 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -35,7 +35,6 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
-obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o
obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o
obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 7a1f684edcb5..5f91e3e01c04 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -15,11 +15,9 @@
* for more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
deleted file mode 100644
index 5fe1f4dd3369..000000000000
--- a/drivers/net/irda/ep7211-sir.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * IR port driver for the Cirrus Logic CLPS711X processors
- *
- * Copyright 2001, Blue Mug Inc. All rights reserved.
- * Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <mach/hardware.h>
-
-#include "sir-dev.h"
-
-static int clps711x_dongle_open(struct sir_dev *dev)
-{
- unsigned int syscon;
-
- /* Turn on the SIR encoder. */
- syscon = clps_readl(SYSCON1);
- syscon |= SYSCON1_SIREN;
- clps_writel(syscon, SYSCON1);
-
- return 0;
-}
-
-static int clps711x_dongle_close(struct sir_dev *dev)
-{
- unsigned int syscon;
-
- /* Turn off the SIR encoder. */
- syscon = clps_readl(SYSCON1);
- syscon &= ~SYSCON1_SIREN;
- clps_writel(syscon, SYSCON1);
-
- return 0;
-}
-
-static struct dongle_driver clps711x_dongle = {
- .owner = THIS_MODULE,
- .driver_name = "EP7211 IR driver",
- .type = IRDA_EP7211_DONGLE,
- .open = clps711x_dongle_open,
- .close = clps711x_dongle_close,
-};
-
-static int clps711x_sir_probe(struct platform_device *pdev)
-{
- return irda_register_dongle(&clps711x_dongle);
-}
-
-static int clps711x_sir_remove(struct platform_device *pdev)
-{
- return irda_unregister_dongle(&clps711x_dongle);
-}
-
-static struct platform_driver clps711x_sir_driver = {
- .driver = {
- .name = "sir-clps711x",
- .owner = THIS_MODULE,
- },
- .probe = clps711x_sir_probe,
- .remove = clps711x_sir_remove,
-};
-module_platform_driver(clps711x_sir_driver);
-
-MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
-MODULE_DESCRIPTION("EP7211 IR dongle driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
diff --git a/drivers/net/irda/esi-sir.c b/drivers/net/irda/esi-sir.c
index a908df7c4b9d..019a3e848bcb 100644
--- a/drivers/net/irda/esi-sir.c
+++ b/drivers/net/irda/esi-sir.c
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index f9a86bdb12fa..925b78cc9797 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -58,7 +58,6 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 177441afeb96..24b6dddd7f2f 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -522,7 +522,6 @@ static void irtty_close(struct tty_struct *tty)
sirdev_put_instance(priv->dev);
/* Stop tty */
- irtty_stop_receiver(tty, TRUE);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (tty->ops->stop)
tty->ops->stop(tty);
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 7b4833874ef5..96fe3659012d 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -64,7 +64,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/device.h>
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 5f3aeac3f86d..e6b3804edacd 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -116,7 +116,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/device.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 2d4b6a1ab202..37f23a189b35 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -80,7 +80,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/device.h>
diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c
index d6d9d2e5ad49..6827777cbeea 100644
--- a/drivers/net/irda/litelink-sir.c
+++ b/drivers/net/irda/litelink-sir.c
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
index e91216452379..a9a81358477b 100644
--- a/drivers/net/irda/ma600-sir.c
+++ b/drivers/net/irda/ma600-sir.c
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 3f138ca88670..16f8ffb50e04 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -48,7 +48,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/device.h>
diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c
index 75714bc71030..f237136f3827 100644
--- a/drivers/net/irda/old_belkin-sir.c
+++ b/drivers/net/irda/old_belkin-sir.c
@@ -22,9 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index ff45cd0d60e8..c96b46b2c3a8 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -804,7 +804,7 @@ static int sh_irda_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_irda_irq, 0, "sh_irda", self);
+ err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 8d9ae5a086d5..cadf52e22464 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -761,7 +761,7 @@ static int sh_sir_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_sir_irq, 0, "sh_sir", self);
+ err = devm_request_irq(&pdev->dev, irq, sh_sir_irq, 0, "sh_sir", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index 2a9930e6e2af..cfbabb63f5cc 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/mutex.h>
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 0dcdf1592f6b..282120430f12 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -34,9 +34,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/smsc-ircc2.h b/drivers/net/irda/smsc-ircc2.h
index 317b7fd69bb3..4829fa22cb29 100644
--- a/drivers/net/irda/smsc-ircc2.h
+++ b/drivers/net/irda/smsc-ircc2.h
@@ -25,9 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 876e709b65ba..dd1bd1060ec9 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -41,7 +41,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/time.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 9abaec27f962..2900af091c2d 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -17,8 +17,7 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+this program; if not, see <http://www.gnu.org/licenses/>.
F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
F02 Oct/28/02: Add SB device ID for 3147 and 3177.
@@ -408,7 +407,6 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
err_out2:
release_region(self->io.fir_base, self->io.fir_ext);
err_out1:
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return err;
}
@@ -442,7 +440,6 @@ static void via_remove_one(struct pci_dev *pdev)
if (self->rx_buff.head)
dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
- pci_set_drvdata(pdev, NULL);
free_netdev(self->netdev);
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index f903a6a2dcb7..7ce820ecc361 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -18,8 +18,7 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+this program; if not, see <http://www.gnu.org/licenses/>.
* Comment:
* jul/08/2002 : Rx buffer length should use Rx ring ptr.
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index c5bd58b4d8a8..485006604bbc 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -15,9 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
@@ -1695,7 +1693,6 @@ out_freedev:
out_disable:
pci_disable_device(pdev);
out:
- pci_set_drvdata(pdev, NULL);
return -ENODEV;
}
@@ -1721,8 +1718,6 @@ static void vlsi_irda_remove(struct pci_dev *pdev)
free_netdev(ndev);
- pci_set_drvdata(pdev, NULL);
-
IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev));
}
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index a076eb125349..56399204e68c 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -18,9 +18,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index ac24c27b4b2d..c5011e078e1b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -39,7 +39,6 @@
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/in.h>
-#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/io.h>
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index bc8faaec33f5..1831fb7cd017 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -120,7 +120,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
struct net_device *dev = vlan->dev;
if (local)
- return vlan->forward(dev, skb);
+ return dev_forward_skb(dev, skb);
skb->dev = dev;
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@ -128,7 +128,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
else
skb->pkt_type = PACKET_MULTICAST;
- return vlan->receive(skb);
+ return netif_rx(skb);
}
static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@ -251,7 +251,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
- ret = vlan->receive(skb);
+ ret = netif_rx(skb);
out:
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
@@ -290,8 +290,8 @@ xmit_world:
return dev_queue_xmit(skb);
}
-netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
unsigned int len = skb->len;
int ret;
@@ -305,7 +305,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
}
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- struct macvlan_pcpu_stats *pcpu_stats;
+ struct vlan_pcpu_stats *pcpu_stats;
pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
@@ -317,7 +317,6 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
}
return ret;
}
-EXPORT_SYMBOL_GPL(macvlan_start_xmit);
static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
@@ -507,6 +506,9 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
static struct lock_class_key macvlan_netdev_xmit_lock_key;
static struct lock_class_key macvlan_netdev_addr_lock_key;
+#define ALWAYS_ON_FEATURES \
+ (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX)
+
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
@@ -540,19 +542,19 @@ static int macvlan_init(struct net_device *dev)
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
dev->features = lowerdev->features & MACVLAN_FEATURES;
- dev->features |= NETIF_F_LLTX;
+ dev->features |= ALWAYS_ON_FEATURES;
dev->gso_max_size = lowerdev->gso_max_size;
dev->iflink = lowerdev->ifindex;
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
- vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
+ vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct macvlan_pcpu_stats *mvlstats;
+ struct vlan_pcpu_stats *mvlstats;
mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
u64_stats_init(&mvlstats->syncp);
}
@@ -578,7 +580,7 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
struct macvlan_dev *vlan = netdev_priv(dev);
if (vlan->pcpu_stats) {
- struct macvlan_pcpu_stats *p;
+ struct vlan_pcpu_stats *p;
u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
u32 rx_errors = 0, tx_dropped = 0;
unsigned int start;
@@ -700,7 +702,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
features = netdev_increment_features(vlan->lowerdev->features,
features,
mask);
- features |= NETIF_F_LLTX;
+ features |= ALWAYS_ON_FEATURES;
return features;
}
@@ -814,10 +816,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
}
int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- int (*receive)(struct sk_buff *skb),
- int (*forward)(struct net_device *dev,
- struct sk_buff *skb))
+ struct nlattr *tb[], struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port;
@@ -831,13 +830,11 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (lowerdev == NULL)
return -ENODEV;
- /* When creating macvlans on top of other macvlans - use
+ /* When creating macvlans or macvtaps on top of other macvlans - use
* the real device as the lowerdev.
*/
- if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) {
- struct macvlan_dev *lowervlan = netdev_priv(lowerdev);
- lowerdev = lowervlan->lowerdev;
- }
+ if (netif_is_macvlan(lowerdev))
+ lowerdev = macvlan_dev_real_dev(lowerdev);
if (!tb[IFLA_MTU])
dev->mtu = lowerdev->mtu;
@@ -861,8 +858,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->lowerdev = lowerdev;
vlan->dev = dev;
vlan->port = port;
- vlan->receive = receive;
- vlan->forward = forward;
vlan->set_features = MACVLAN_FEATURES;
vlan->mode = MACVLAN_MODE_VEPA;
@@ -887,14 +882,15 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
dev->priv_flags |= IFF_MACVLAN;
err = netdev_upper_dev_link(lowerdev, dev);
if (err)
- goto destroy_port;
-
+ goto unregister_netdev;
list_add_tail_rcu(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
+unregister_netdev:
+ unregister_netdevice(dev);
destroy_port:
port->count -= 1;
if (!port->count)
@@ -907,9 +903,7 @@ EXPORT_SYMBOL_GPL(macvlan_common_newlink);
static int macvlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- return macvlan_common_newlink(src_net, dev, tb, data,
- netif_rx,
- dev_forward_skb);
+ return macvlan_common_newlink(src_net, dev, tb, data);
}
void macvlan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 2a89da080317..ff111a89e17f 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -11,7 +11,6 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/wait.h>
#include <linux/cdev.h>
#include <linux/idr.h>
@@ -70,6 +69,11 @@ static const struct proto_ops macvtap_socket_ops;
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler_data);
+}
+
/*
* RCU usage:
* The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -219,7 +223,7 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
goto out;
/* Check if we can use flow to select a queue */
- rxq = skb_get_rxhash(skb);
+ rxq = skb_get_hash(skb);
if (rxq) {
tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
goto out;
@@ -271,24 +275,27 @@ static void macvtap_del_queues(struct net_device *dev)
sock_put(&qlist[j]->sk);
}
-/*
- * Forward happens for data that gets sent from one macvlan
- * endpoint to another one in bridge mode. We just take
- * the skb and put it into the receive queue.
- */
-static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
+static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
{
- struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvtap_queue *q = macvtap_get_queue(dev, skb);
+ struct sk_buff *skb = *pskb;
+ struct net_device *dev = skb->dev;
+ struct macvlan_dev *vlan;
+ struct macvtap_queue *q;
netdev_features_t features = TAP_FEATURES;
+ vlan = macvtap_get_vlan_rcu(dev);
+ if (!vlan)
+ return RX_HANDLER_PASS;
+
+ q = macvtap_get_queue(dev, skb);
if (!q)
- goto drop;
+ return RX_HANDLER_PASS;
if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
goto drop;
- skb->dev = dev;
+ skb_push(skb, ETH_HLEN);
+
/* Apply the forward feature mask so that we perform segmentation
* according to users wishes. This only works if VNET_HDR is
* enabled.
@@ -320,22 +327,13 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
wake_up:
wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
- return NET_RX_SUCCESS;
+ return RX_HANDLER_CONSUMED;
drop:
+ /* Count errors/drops only here, thus don't care about args. */
+ macvlan_count_rx(vlan, 0, 0, 0);
kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-/*
- * Receive is for data from the external interface (lowerdev),
- * in case of macvtap, we can treat that the same way as
- * forward, which macvlan cannot.
- */
-static int macvtap_receive(struct sk_buff *skb)
-{
- skb_push(skb, ETH_HLEN);
- return macvtap_forward(skb->dev, skb);
+ return RX_HANDLER_CONSUMED;
}
static int macvtap_get_minor(struct macvlan_dev *vlan)
@@ -385,6 +383,8 @@ static int macvtap_newlink(struct net *src_net,
struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ int err;
+
INIT_LIST_HEAD(&vlan->queue_list);
/* Since macvlan supports all offloads by default, make
@@ -392,16 +392,20 @@ static int macvtap_newlink(struct net *src_net,
*/
vlan->tap_features = TUN_OFFLOADS;
+ err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
+ if (err)
+ return err;
+
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- return macvlan_common_newlink(src_net, dev, tb, data,
- macvtap_receive, macvtap_forward);
+ return macvlan_common_newlink(src_net, dev, tb, data);
}
static void macvtap_dellink(struct net_device *dev,
struct list_head *head)
{
+ netdev_rx_handler_unregister(dev);
macvtap_del_queues(dev);
macvlan_dellink(dev, head);
}
@@ -588,7 +592,7 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
return 0;
}
-static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
struct virtio_net_hdr *vnet_hdr)
{
memset(vnet_hdr, 0, sizeof(*vnet_hdr));
@@ -619,8 +623,6 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
-
- return 0;
}
/* Get packet from user space buffer */
@@ -727,9 +729,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
if (vlan) {
- local_bh_disable();
- macvlan_start_xmit(skb, vlan->dev);
- local_bh_enable();
+ skb->dev = vlan->dev;
+ dev_queue_xmit(skb);
} else {
kfree_skb(skb);
}
@@ -778,9 +779,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if ((len -= vnet_hdr_len) < 0)
return -EINVAL;
- ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
- if (ret)
- return ret;
+ macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
@@ -824,7 +823,7 @@ done:
return ret ? ret : total;
}
-static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
+static ssize_t macvtap_do_read(struct macvtap_queue *q,
const struct iovec *iv, unsigned long len,
int noblock)
{
@@ -875,7 +874,7 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
goto out;
}
- ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
+ ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
@@ -1109,7 +1108,7 @@ static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
- ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
+ ret = macvtap_do_read(q, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 8403316eb02b..3e027ed0b3bb 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -342,34 +342,6 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
EXPORT_SYMBOL(mdio45_ethtool_gset_npage);
/**
- * mdio45_ethtool_spauseparam_an - set auto-negotiated pause parameters
- * @mdio: MDIO interface
- * @ecmd: Ethtool request structure
- *
- * This function assumes that the PHY has an auto-negotiation MMD. It
- * will enable and disable advertising of flow control as appropriate.
- */
-void mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
- const struct ethtool_pauseparam *ecmd)
-{
- int adv, old_adv;
-
- WARN_ON(!(mdio->mmds & MDIO_DEVS_AN));
-
- old_adv = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE);
- adv = ((old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) |
- mii_advertise_flowctrl((ecmd->rx_pause ? FLOW_CTRL_RX : 0) |
- (ecmd->tx_pause ? FLOW_CTRL_TX : 0)));
- if (adv != old_adv) {
- mdio->mdio_write(mdio->dev, mdio->prtad, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE, adv);
- mdio45_nway_restart(mdio);
- }
-}
-EXPORT_SYMBOL(mdio45_ethtool_spauseparam_an);
-
-/**
* mdio_mii_ioctl - MII ioctl interface for MDIO (clause 22 or 45) PHYs
* @mdio: MDIO interface
* @mii_data: MII ioctl data structure
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 313a0377f68f..b57ce0cc9657 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -92,8 +92,8 @@ static int cis820x_config_intr(struct phy_device *phydev)
{
int err;
- if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, MII_CIS8201_IMASK,
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, MII_CIS8201_IMASK,
MII_CIS8201_IMASK_MASK);
else
err = phy_write(phydev, MII_CIS8201_IMASK, 0);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 383e8338ad86..d2c08f625a41 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -72,7 +72,7 @@ static int dm9161_config_intr(struct phy_device *phydev)
if (temp < 0)
return temp;
- if(PHY_INTERRUPT_ENABLED == phydev->interrupts )
+ if (PHY_INTERRUPT_ENABLED == phydev->interrupts)
temp &= ~(MII_DM9161_INTR_STOP);
else
temp |= MII_DM9161_INTR_STOP;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 7490b6c866e6..98e7cbf720a5 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -437,7 +437,10 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
if (on) {
gpio_num = gpio_tab[EXTTS0_GPIO + index];
evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
- evnt |= EVNT_RISE;
+ if (rq->extts.flags & PTP_FALLING_EDGE)
+ evnt |= EVNT_FALL;
+ else
+ evnt |= EVNT_RISE;
}
ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
return 0;
@@ -851,8 +854,8 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- return (rxts->msgtype == (*msgtype & 0xf) &&
- rxts->seqid == ntohs(*seqid));
+ return rxts->msgtype == (*msgtype & 0xf) &&
+ rxts->seqid == ntohs(*seqid);
}
static void dp83640_free_clocks(void)
@@ -1003,11 +1006,6 @@ static int dp83640_probe(struct phy_device *phydev)
} else
list_add_tail(&dp83640->list, &clock->phylist);
- if (clock->chosen && !list_empty(&clock->phylist))
- recalibrate(clock);
- else
- enable_broadcast(dp83640->phydev, clock->page, 1);
-
dp83640_clock_put(clock);
return 0;
@@ -1058,6 +1056,21 @@ static void dp83640_remove(struct phy_device *phydev)
kfree(dp83640);
}
+static int dp83640_config_init(struct phy_device *phydev)
+{
+ struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_clock *clock = dp83640->clock;
+
+ if (clock->chosen && !list_empty(&clock->phylist))
+ recalibrate(clock);
+ else
+ enable_broadcast(phydev, clock->page, 1);
+
+ enable_status_frames(phydev, true);
+ ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
+ return 0;
+}
+
static int dp83640_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, MII_DP83640_MISR);
@@ -1195,11 +1208,6 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
mutex_lock(&dp83640->clock->extreg_lock);
- if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
- enable_status_frames(phydev, true);
- ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
- }
-
ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
@@ -1281,6 +1289,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
}
/* fall through */
case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_queue_tail(&dp83640->tx_queue, skb);
schedule_work(&dp83640->ts_work);
break;
@@ -1330,6 +1339,7 @@ static struct phy_driver dp83640_driver = {
.flags = PHY_HAS_INTERRUPT,
.probe = dp83640_probe,
.remove = dp83640_remove,
+ .config_init = dp83640_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = dp83640_ack_interrupt,
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index b5ddd5077a80..97bf58bf4939 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -48,7 +48,7 @@ MODULE_LICENSE("GPL");
static int ip175c_config_init(struct phy_device *phydev)
{
int err, i;
- static int full_reset_performed = 0;
+ static int full_reset_performed;
if (full_reset_performed == 0) {
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index ff2e45e9cb54..9108f3191701 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -82,7 +82,7 @@ static int lxt970_config_intr(struct phy_device *phydev)
{
int err;
- if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
else
err = phy_write(phydev, MII_LXT970_IER, 0);
@@ -114,7 +114,7 @@ static int lxt971_config_intr(struct phy_device *phydev)
{
int err;
- if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
else
err = phy_write(phydev, MII_LXT971_IER, 0);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2e3c778ea9bf..bd37e45c89c0 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -894,6 +894,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -907,6 +909,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -920,6 +924,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -933,6 +939,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = {.owner = THIS_MODULE,},
},
{
@@ -946,6 +954,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -961,6 +971,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.get_wol = &m88e1318_get_wol,
.set_wol = &m88e1318_set_wol,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -974,6 +986,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -987,6 +1001,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1000,6 +1016,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1013,6 +1031,8 @@ static struct phy_driver marvell_drivers[] = {
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
{
@@ -1026,6 +1046,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
};
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 8004acbef2c9..e701433bf52f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c
index a5741cb0304e..f1fc51f655d9 100644
--- a/drivers/net/phy/mdio-moxart.c
+++ b/drivers/net/phy/mdio-moxart.c
@@ -8,7 +8,6 @@
*/
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index d2dd9e473e2c..096695163491 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -10,7 +10,6 @@
#include <linux/device.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/phy.h>
#include <linux/mdio-mux.h>
#include <linux/of_gpio.h>
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index f8e305d8da76..1656785ff339 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -15,7 +15,6 @@
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/phy.h>
#include <linux/mdio-mux.h>
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index 6aee02ed97ac..a51ed92fbada 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -10,7 +10,6 @@
#include <linux/of_mdio.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/phy.h>
#include <linux/io.h>
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 18969b3ad8bb..9367acc84fbb 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -13,7 +13,6 @@
*/
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -171,6 +170,9 @@ static int sun4i_mdio_remove(struct platform_device *pdev)
}
static const struct of_device_id sun4i_mdio_dt_ids[] = {
+ { .compatible = "allwinner,sun4i-a10-mdio" },
+
+ /* Deprecated */
{ .compatible = "allwinner,sun4i-mdio" },
{ }
};
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 56178761ce93..71e49000fbf3 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/mdio_bus.c
- *
- * MDIO Bus interface
+/* MDIO Bus interface
*
* Author: Andy Fleming
*
@@ -36,10 +33,10 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
-#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
/**
* mdiobus_alloc_size - allocate a mii_bus structure
@@ -139,8 +136,7 @@ int mdiobus_register(struct mii_bus *bus)
int i, err;
if (NULL == bus || NULL == bus->name ||
- NULL == bus->read ||
- NULL == bus->write)
+ NULL == bus->read || NULL == bus->write)
return -EINVAL;
BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
@@ -154,6 +150,7 @@ int mdiobus_register(struct mii_bus *bus)
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
+ put_device(&bus->dev);
return -EINVAL;
}
@@ -214,9 +211,7 @@ EXPORT_SYMBOL(mdiobus_unregister);
*/
void mdiobus_free(struct mii_bus *bus)
{
- /*
- * For compatibility with error handling in drivers.
- */
+ /* For compatibility with error handling in drivers. */
if (bus->state == MDIOBUS_ALLOCATED) {
kfree(bus);
return;
@@ -316,8 +311,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
if (phydrv->match_phy_device)
return phydrv->match_phy_device(phydev);
- return ((phydrv->phy_id & phydrv->phy_id_mask) ==
- (phydev->phy_id & phydrv->phy_id_mask));
+ return (phydrv->phy_id & phydrv->phy_id_mask) ==
+ (phydev->phy_id & phydrv->phy_id_mask);
}
#ifdef CONFIG_PM
@@ -335,15 +330,13 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!netdev)
return true;
- /*
- * Don't suspend PHY if the attched netdev parent may wakeup.
+ /* Don't suspend PHY if the attched netdev parent may wakeup.
* The parent may point to a PCI device, as in tg3 driver.
*/
if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
return false;
- /*
- * Also don't suspend PHY if the netdev itself may wakeup. This
+ /* Also don't suspend PHY if the netdev itself may wakeup. This
* is the case for devices w/o underlaying pwr. mgmt. aware bus,
* e.g. SoC devices.
*/
@@ -358,8 +351,7 @@ static int mdio_bus_suspend(struct device *dev)
struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
- /*
- * We must stop the state machine manually, otherwise it stops out of
+ /* We must stop the state machine manually, otherwise it stops out of
* control, possibly with the phydev->lock held. Upon resume, netdev
* may call phy routines that try to grab the same lock, and that may
* lead to a deadlock.
@@ -388,7 +380,7 @@ static int mdio_bus_resume(struct device *dev)
no_resume:
if (phydev->attached_dev && phydev->adjust_link)
- phy_start_machine(phydev, NULL);
+ phy_start_machine(phydev);
return 0;
}
@@ -410,12 +402,12 @@ static int mdio_bus_restore(struct device *dev)
phydev->link = 0;
phydev->state = PHY_UP;
- phy_start_machine(phydev, NULL);
+ phy_start_machine(phydev);
return 0;
}
-static struct dev_pm_ops mdio_bus_pm_ops = {
+static const struct dev_pm_ops mdio_bus_pm_ops = {
.suspend = mdio_bus_suspend,
.resume = mdio_bus_resume,
.freeze = mdio_bus_suspend,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 26fa05a472b4..5a8993b0cafc 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -81,14 +81,14 @@ static int ksz_config_flags(struct phy_device *phydev)
}
static int kszphy_extended_write(struct phy_device *phydev,
- u32 regnum, u16 val)
+ u32 regnum, u16 val)
{
phy_write(phydev, MII_KSZPHY_EXTREG, KSZPHY_EXTREG_WRITE | regnum);
return phy_write(phydev, MII_KSZPHY_EXTREG_WRITE, val);
}
static int kszphy_extended_read(struct phy_device *phydev,
- u32 regnum)
+ u32 regnum)
{
phy_write(phydev, MII_KSZPHY_EXTREG, regnum);
return phy_read(phydev, MII_KSZPHY_EXTREG_READ);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 98434b84f041..76d96b9ebcdb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/phy.c
- *
- * Framework for configuring and reading PHY devices
+/* Framework for configuring and reading PHY devices
* Based on code in sungem_phy.c and gianfar_phy.c
*
* Author: Andy Fleming
@@ -23,7 +20,6 @@
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -36,11 +32,11 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mdio.h>
-
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
-#include <asm/io.h>
+
#include <asm/irq.h>
-#include <asm/uaccess.h>
/**
* phy_print_status - Convenience function to print out the current phy status
@@ -48,13 +44,14 @@
*/
void phy_print_status(struct phy_device *phydev)
{
- if (phydev->link)
+ if (phydev->link) {
pr_info("%s - Link is Up - %d/%s\n",
dev_name(&phydev->dev),
phydev->speed,
DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
- else
+ } else {
pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
+ }
}
EXPORT_SYMBOL(phy_print_status);
@@ -69,12 +66,10 @@ EXPORT_SYMBOL(phy_print_status);
*/
static int phy_clear_interrupt(struct phy_device *phydev)
{
- int err = 0;
-
if (phydev->drv->ack_interrupt)
- err = phydev->drv->ack_interrupt(phydev);
+ return phydev->drv->ack_interrupt(phydev);
- return err;
+ return 0;
}
/**
@@ -86,13 +81,11 @@ static int phy_clear_interrupt(struct phy_device *phydev)
*/
static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
- int err = 0;
-
phydev->interrupts = interrupts;
if (phydev->drv->config_intr)
- err = phydev->drv->config_intr(phydev);
+ return phydev->drv->config_intr(phydev);
- return err;
+ return 0;
}
@@ -106,15 +99,14 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
*/
static inline int phy_aneg_done(struct phy_device *phydev)
{
- int retval;
-
- retval = phy_read(phydev, MII_BMSR);
+ int retval = phy_read(phydev, MII_BMSR);
return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
}
/* A structure for mapping a particular speed and duplex
- * combination to a particular SUPPORTED and ADVERTISED value */
+ * combination to a particular SUPPORTED and ADVERTISED value
+ */
struct phy_setting {
int speed;
int duplex;
@@ -172,13 +164,12 @@ static const struct phy_setting settings[] = {
* of that setting. Returns the index of the last setting if
* none of the others match.
*/
-static inline int phy_find_setting(int speed, int duplex)
+static inline unsigned int phy_find_setting(int speed, int duplex)
{
- int idx = 0;
+ unsigned int idx = 0;
while (idx < ARRAY_SIZE(settings) &&
- (settings[idx].speed != speed ||
- settings[idx].duplex != duplex))
+ (settings[idx].speed != speed || settings[idx].duplex != duplex))
idx++;
return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
@@ -194,7 +185,7 @@ static inline int phy_find_setting(int speed, int duplex)
* the mask in features. Returns the index of the last setting
* if nothing else matches.
*/
-static inline int phy_find_valid(int idx, u32 features)
+static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
{
while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
idx++;
@@ -213,7 +204,7 @@ static inline int phy_find_valid(int idx, u32 features)
static void phy_sanitize_settings(struct phy_device *phydev)
{
u32 features = phydev->supported;
- int idx;
+ unsigned int idx;
/* Sanitize settings based on PHY capabilities */
if ((features & SUPPORTED_Autoneg) == 0)
@@ -245,8 +236,7 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
if (cmd->phy_address != phydev->addr)
return -EINVAL;
- /* We make sure that we don't pass unsupported
- * values in to the PHY */
+ /* We make sure that we don't pass unsupported values in to the PHY */
cmd->advertising &= phydev->supported;
/* Verify the settings we care about. */
@@ -289,6 +279,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
cmd->supported = phydev->supported;
cmd->advertising = phydev->advertising;
+ cmd->lp_advertising = phydev->lp_advertising;
ethtool_cmd_speed_set(cmd, phydev->speed);
cmd->duplex = phydev->duplex;
@@ -312,8 +303,7 @@ EXPORT_SYMBOL(phy_ethtool_gset);
* PHYCONTROL layer. It changes registers without regard to
* current state. Use at own risk.
*/
-int phy_mii_ioctl(struct phy_device *phydev,
- struct ifreq *ifr, int cmd)
+int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mii_data = if_mii(ifr);
u16 val = mii_data->val_in;
@@ -326,25 +316,24 @@ int phy_mii_ioctl(struct phy_device *phydev,
case SIOCGMIIREG:
mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
mii_data->reg_num);
- break;
+ return 0;
case SIOCSMIIREG:
if (mii_data->phy_id == phydev->addr) {
- switch(mii_data->reg_num) {
+ switch (mii_data->reg_num) {
case MII_BMCR:
- if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
+ if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
phydev->autoneg = AUTONEG_DISABLE;
else
phydev->autoneg = AUTONEG_ENABLE;
- if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
+ if (!phydev->autoneg && (val & BMCR_FULLDPLX))
phydev->duplex = DUPLEX_FULL;
else
phydev->duplex = DUPLEX_HALF;
- if ((!phydev->autoneg) &&
- (val & BMCR_SPEED1000))
+ if (!phydev->autoneg && (val & BMCR_SPEED1000))
phydev->speed = SPEED_1000;
- else if ((!phydev->autoneg) &&
- (val & BMCR_SPEED100))
+ else if (!phydev->autoneg &&
+ (val & BMCR_SPEED100))
phydev->speed = SPEED_100;
break;
case MII_ADVERTISE:
@@ -360,12 +349,9 @@ int phy_mii_ioctl(struct phy_device *phydev,
mii_data->reg_num, val);
if (mii_data->reg_num == MII_BMCR &&
- val & BMCR_RESET &&
- phydev->drv->config_init) {
- phy_scan_fixups(phydev);
- phydev->drv->config_init(phydev);
- }
- break;
+ val & BMCR_RESET)
+ return phy_init_hw(phydev);
+ return 0;
case SIOCSHWTSTAMP:
if (phydev->drv->hwtstamp)
@@ -375,8 +361,6 @@ int phy_mii_ioctl(struct phy_device *phydev,
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
EXPORT_SYMBOL(phy_mii_ioctl);
@@ -399,7 +383,6 @@ int phy_start_aneg(struct phy_device *phydev)
phy_sanitize_settings(phydev);
err = phydev->drv->config_aneg(phydev);
-
if (err < 0)
goto out_unlock;
@@ -419,25 +402,18 @@ out_unlock:
}
EXPORT_SYMBOL(phy_start_aneg);
-
/**
* phy_start_machine - start PHY state machine tracking
* @phydev: the phy_device struct
- * @handler: callback function for state change notifications
*
* Description: The PHY infrastructure can run a state machine
* which tracks whether the PHY is starting up, negotiating,
* etc. This function starts the timer which tracks the state
- * of the PHY. If you want to be notified when the state changes,
- * pass in the callback @handler, otherwise, pass NULL. If you
- * want to maintain your own state machine, do not call this
- * function.
+ * of the PHY. If you want to maintain your own state machine,
+ * do not call this function.
*/
-void phy_start_machine(struct phy_device *phydev,
- void (*handler)(struct net_device *))
+void phy_start_machine(struct phy_device *phydev)
{
- phydev->adjust_state = handler;
-
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
}
@@ -457,8 +433,6 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
-
- phydev->adjust_state = NULL;
}
/**
@@ -495,7 +469,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
/* The MDIO bus is not allowed to be written in interrupt
* context, so we need to disable the irq here. A work
* queue will write the PHY to disable and clear the
- * interrupt, and then reenable the irq line. */
+ * interrupt, and then reenable the irq line.
+ */
disable_irq_nosync(irq);
atomic_inc(&phydev->irq_disable);
@@ -510,16 +485,12 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
*/
static int phy_enable_interrupts(struct phy_device *phydev)
{
- int err;
-
- err = phy_clear_interrupt(phydev);
+ int err = phy_clear_interrupt(phydev);
if (err < 0)
return err;
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
-
- return err;
+ return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
}
/**
@@ -532,13 +503,11 @@ static int phy_disable_interrupts(struct phy_device *phydev)
/* Disable PHY interrupts */
err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
-
if (err)
goto phy_err;
/* Clear the interrupt */
err = phy_clear_interrupt(phydev);
-
if (err)
goto phy_err;
@@ -562,8 +531,6 @@ phy_err:
*/
int phy_start_interrupts(struct phy_device *phydev)
{
- int err = 0;
-
atomic_set(&phydev->irq_disable, 0);
if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
phydev) < 0) {
@@ -573,9 +540,7 @@ int phy_start_interrupts(struct phy_device *phydev)
return 0;
}
- err = phy_enable_interrupts(phydev);
-
- return err;
+ return phy_enable_interrupts(phydev);
}
EXPORT_SYMBOL(phy_start_interrupts);
@@ -585,24 +550,20 @@ EXPORT_SYMBOL(phy_start_interrupts);
*/
int phy_stop_interrupts(struct phy_device *phydev)
{
- int err;
-
- err = phy_disable_interrupts(phydev);
+ int err = phy_disable_interrupts(phydev);
if (err)
phy_error(phydev);
free_irq(phydev->irq, phydev);
- /*
- * Cannot call flush_scheduled_work() here as desired because
+ /* Cannot call flush_scheduled_work() here as desired because
* of rtnl_lock(), but we do not really care about what would
* be done, except from enable_irq(), so cancel any work
* possibly pending and take care of the matter below.
*/
cancel_work_sync(&phydev->phy_queue);
- /*
- * If work indeed has been cancelled, disable_irq() will have
+ /* If work indeed has been cancelled, disable_irq() will have
* been left unbalanced from phy_interrupt() and enable_irq()
* has to be called so that other devices on the line work.
*/
@@ -613,14 +574,12 @@ int phy_stop_interrupts(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_stop_interrupts);
-
/**
* phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
* @work: work_struct that describes the work to be done
*/
void phy_change(struct work_struct *work)
{
- int err;
struct phy_device *phydev =
container_of(work, struct phy_device, phy_queue);
@@ -628,9 +587,7 @@ void phy_change(struct work_struct *work)
!phydev->drv->did_interrupt(phydev))
goto ignore;
- err = phy_disable_interrupts(phydev);
-
- if (err)
+ if (phy_disable_interrupts(phydev))
goto phy_err;
mutex_lock(&phydev->lock);
@@ -642,16 +599,13 @@ void phy_change(struct work_struct *work)
enable_irq(phydev->irq);
/* Reenable interrupts */
- if (PHY_HALTED != phydev->state)
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
-
- if (err)
+ if (PHY_HALTED != phydev->state &&
+ phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
goto irq_enable_err;
/* reschedule state queue work to run as soon as possible */
cancel_delayed_work_sync(&phydev->state_queue);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
-
return;
ignore:
@@ -690,13 +644,12 @@ void phy_stop(struct phy_device *phydev)
out_unlock:
mutex_unlock(&phydev->lock);
- /*
- * Cannot call flush_scheduled_work() here as desired because
+ /* Cannot call flush_scheduled_work() here as desired because
* of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
* will not reenable interrupts.
*/
}
-
+EXPORT_SYMBOL(phy_stop);
/**
* phy_start - start or restart a PHY device
@@ -713,20 +666,19 @@ void phy_start(struct phy_device *phydev)
mutex_lock(&phydev->lock);
switch (phydev->state) {
- case PHY_STARTING:
- phydev->state = PHY_PENDING;
- break;
- case PHY_READY:
- phydev->state = PHY_UP;
- break;
- case PHY_HALTED:
- phydev->state = PHY_RESUMING;
- default:
- break;
+ case PHY_STARTING:
+ phydev->state = PHY_PENDING;
+ break;
+ case PHY_READY:
+ phydev->state = PHY_UP;
+ break;
+ case PHY_HALTED:
+ phydev->state = PHY_RESUMING;
+ default:
+ break;
}
mutex_unlock(&phydev->lock);
}
-EXPORT_SYMBOL(phy_stop);
EXPORT_SYMBOL(phy_start);
/**
@@ -738,160 +690,132 @@ void phy_state_machine(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
- int needs_aneg = 0;
+ int needs_aneg = 0, do_suspend = 0;
int err = 0;
mutex_lock(&phydev->lock);
- if (phydev->adjust_state)
- phydev->adjust_state(phydev->attached_dev);
+ switch (phydev->state) {
+ case PHY_DOWN:
+ case PHY_STARTING:
+ case PHY_READY:
+ case PHY_PENDING:
+ break;
+ case PHY_UP:
+ needs_aneg = 1;
- switch(phydev->state) {
- case PHY_DOWN:
- case PHY_STARTING:
- case PHY_READY:
- case PHY_PENDING:
- break;
- case PHY_UP:
- needs_aneg = 1;
+ phydev->link_timeout = PHY_AN_TIMEOUT;
- phydev->link_timeout = PHY_AN_TIMEOUT;
+ break;
+ case PHY_AN:
+ err = phy_read_status(phydev);
+ if (err < 0)
+ break;
+ /* If the link is down, give up on negotiation for now */
+ if (!phydev->link) {
+ phydev->state = PHY_NOLINK;
+ netif_carrier_off(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
break;
- case PHY_AN:
- err = phy_read_status(phydev);
+ }
- if (err < 0)
- break;
+ /* Check if negotiation is done. Break if there's an error */
+ err = phy_aneg_done(phydev);
+ if (err < 0)
+ break;
- /* If the link is down, give up on
- * negotiation for now */
- if (!phydev->link) {
- phydev->state = PHY_NOLINK;
- netif_carrier_off(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
- break;
- }
+ /* If AN is done, we're running */
+ if (err > 0) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
- /* Check if negotiation is done. Break
- * if there's an error */
- err = phy_aneg_done(phydev);
- if (err < 0)
+ } else if (0 == phydev->link_timeout--) {
+ needs_aneg = 1;
+ /* If we have the magic_aneg bit, we try again */
+ if (phydev->drv->flags & PHY_HAS_MAGICANEG)
break;
-
- /* If AN is done, we're running */
- if (err > 0) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
-
- } else if (0 == phydev->link_timeout--) {
- needs_aneg = 1;
- /* If we have the magic_aneg bit,
- * we try again */
- if (phydev->drv->flags & PHY_HAS_MAGICANEG)
- break;
- }
+ }
+ break;
+ case PHY_NOLINK:
+ err = phy_read_status(phydev);
+ if (err)
break;
- case PHY_NOLINK:
- err = phy_read_status(phydev);
-
- if (err)
- break;
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
- }
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
+ }
+ break;
+ case PHY_FORCING:
+ err = genphy_update_link(phydev);
+ if (err)
break;
- case PHY_FORCING:
- err = genphy_update_link(phydev);
-
- if (err)
- break;
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- } else {
- if (0 == phydev->link_timeout--)
- needs_aneg = 1;
- }
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ if (0 == phydev->link_timeout--)
+ needs_aneg = 1;
+ }
- phydev->adjust_link(phydev->attached_dev);
- break;
- case PHY_RUNNING:
- /* Only register a CHANGE if we are
- * polling or ignoring interrupts
- */
- if (!phy_interrupt_is_valid(phydev))
- phydev->state = PHY_CHANGELINK;
+ phydev->adjust_link(phydev->attached_dev);
+ break;
+ case PHY_RUNNING:
+ /* Only register a CHANGE if we are
+ * polling or ignoring interrupts
+ */
+ if (!phy_interrupt_is_valid(phydev))
+ phydev->state = PHY_CHANGELINK;
+ break;
+ case PHY_CHANGELINK:
+ err = phy_read_status(phydev);
+ if (err)
break;
- case PHY_CHANGELINK:
- err = phy_read_status(phydev);
- if (err)
- break;
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ phydev->state = PHY_NOLINK;
+ netif_carrier_off(phydev->attached_dev);
+ }
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- } else {
- phydev->state = PHY_NOLINK;
- netif_carrier_off(phydev->attached_dev);
- }
+ phydev->adjust_link(phydev->attached_dev);
+ if (phy_interrupt_is_valid(phydev))
+ err = phy_config_interrupt(phydev,
+ PHY_INTERRUPT_ENABLED);
+ break;
+ case PHY_HALTED:
+ if (phydev->link) {
+ phydev->link = 0;
+ netif_carrier_off(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
-
- if (phy_interrupt_is_valid(phydev))
- err = phy_config_interrupt(phydev,
- PHY_INTERRUPT_ENABLED);
- break;
- case PHY_HALTED:
- if (phydev->link) {
- phydev->link = 0;
- netif_carrier_off(phydev->attached_dev);
- phydev->adjust_link(phydev->attached_dev);
- }
+ do_suspend = 1;
+ }
+ break;
+ case PHY_RESUMING:
+ err = phy_clear_interrupt(phydev);
+ if (err)
break;
- case PHY_RESUMING:
-
- err = phy_clear_interrupt(phydev);
- if (err)
- break;
-
- err = phy_config_interrupt(phydev,
- PHY_INTERRUPT_ENABLED);
+ err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
+ if (err)
+ break;
- if (err)
+ if (AUTONEG_ENABLE == phydev->autoneg) {
+ err = phy_aneg_done(phydev);
+ if (err < 0)
break;
- if (AUTONEG_ENABLE == phydev->autoneg) {
- err = phy_aneg_done(phydev);
- if (err < 0)
- break;
-
- /* err > 0 if AN is done.
- * Otherwise, it's 0, and we're
- * still waiting for AN */
- if (err > 0) {
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- netif_carrier_on(phydev->attached_dev);
- } else
- phydev->state = PHY_NOLINK;
- phydev->adjust_link(phydev->attached_dev);
- } else {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
- }
- } else {
+ /* err > 0 if AN is done.
+ * Otherwise, it's 0, and we're still waiting for AN
+ */
+ if (err > 0) {
err = phy_read_status(phydev);
if (err)
break;
@@ -899,11 +823,28 @@ void phy_state_machine(struct work_struct *work)
if (phydev->link) {
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
- } else
+ } else {
phydev->state = PHY_NOLINK;
+ }
phydev->adjust_link(phydev->attached_dev);
+ } else {
+ phydev->state = PHY_AN;
+ phydev->link_timeout = PHY_AN_TIMEOUT;
}
- break;
+ } else {
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ phydev->state = PHY_NOLINK;
+ }
+ phydev->adjust_link(phydev->attached_dev);
+ }
+ break;
}
mutex_unlock(&phydev->lock);
@@ -911,11 +852,14 @@ void phy_state_machine(struct work_struct *work)
if (needs_aneg)
err = phy_start_aneg(phydev);
+ if (do_suspend)
+ phy_suspend(phydev);
+
if (err < 0)
phy_error(phydev);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
- PHY_STATE_TIME * HZ);
+ PHY_STATE_TIME * HZ);
}
void phy_mac_interrupt(struct phy_device *phydev, int new_link)
@@ -957,14 +901,10 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
int addr)
{
- u32 ret;
-
mmd_phy_indirect(bus, prtad, devad, addr);
/* Read the content of the MMD's selected register */
- ret = bus->read(bus, addr, MII_MMD_DATA);
-
- return ret;
+ return bus->read(bus, addr, MII_MMD_DATA);
}
/**
@@ -1004,8 +944,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
*/
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
{
- int ret = -EPROTONOSUPPORT;
-
/* According to 802.3az,the EEE is supported only in full duplex-mode.
* Also EEE feature is active when core is operating with MII, GMII
* or RGMII.
@@ -1016,7 +954,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
(phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
int eee_lp, eee_cap, eee_adv;
u32 lp, cap, adv;
- int idx, status;
+ int status;
+ unsigned int idx;
/* Read phy status to properly get the right settings */
status = phy_read_status(phydev);
@@ -1031,7 +970,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
if (!cap)
- goto eee_exit;
+ return -EPROTONOSUPPORT;
/* Check which link settings negotiated and verify it in
* the EEE advertising registers.
@@ -1050,7 +989,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
idx = phy_find_setting(phydev->speed, phydev->duplex);
if (!(lp & adv & settings[idx].setting))
- goto eee_exit;
+ return -EPROTONOSUPPORT;
if (clk_stop_enable) {
/* Configure the PHY to stop receiving xMII
@@ -1067,11 +1006,10 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
MDIO_MMD_PCS, phydev->addr, val);
}
- ret = 0; /* EEE supported */
+ return 0; /* EEE supported */
}
-eee_exit:
- return ret;
+ return -EPROTONOSUPPORT;
}
EXPORT_SYMBOL(phy_init_eee);
@@ -1086,7 +1024,6 @@ int phy_get_eee_err(struct phy_device *phydev)
{
return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
MDIO_MMD_PCS, phydev->addr);
-
}
EXPORT_SYMBOL(phy_get_eee_err);
@@ -1136,9 +1073,8 @@ EXPORT_SYMBOL(phy_ethtool_get_eee);
*/
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
- int val;
+ int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
- val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
phydev->addr, val);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index d6447b3f7409..2f6989b1e0dc 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/phy_device.c
- *
- * Framework for finding and configuring PHYs.
+/* Framework for finding and configuring PHYs.
* Also contains generic PHY driver
*
* Author: Andy Fleming
@@ -33,10 +30,11 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
-#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
MODULE_DESCRIPTION("PHY library");
MODULE_AUTHOR("Andy Fleming");
@@ -53,31 +51,31 @@ static void phy_device_release(struct device *dev)
kfree(to_phy_device(dev));
}
-static struct phy_driver genphy_driver;
-extern int mdio_bus_init(void);
-extern void mdio_bus_exit(void);
+enum genphy_driver {
+ GENPHY_DRV_1G,
+ GENPHY_DRV_10G,
+ GENPHY_DRV_MAX
+};
+
+static struct phy_driver genphy_driver[GENPHY_DRV_MAX];
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
-static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
- u32 flags, phy_interface_t interface);
-
-/*
- * Creates a new phy_fixup and adds it to the list
+/**
+ * phy_register_fixup - creates a new phy_fixup and adds it to the list
* @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
* @phy_uid: Used to match against phydev->phy_id (the UID of the PHY)
- * It can also be PHY_ANY_UID
+ * It can also be PHY_ANY_UID
* @phy_uid_mask: Applied to phydev->phy_id and fixup->phy_uid before
- * comparison
+ * comparison
* @run: The actual code to be run when a matching PHY is found
*/
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *))
+ int (*run)(struct phy_device *))
{
- struct phy_fixup *fixup;
+ struct phy_fixup *fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
- fixup = kzalloc(sizeof(struct phy_fixup), GFP_KERNEL);
if (!fixup)
return -ENOMEM;
@@ -96,7 +94,7 @@ EXPORT_SYMBOL(phy_register_fixup);
/* Registers a fixup to be run on any PHY with the UID in phy_uid */
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *))
+ int (*run)(struct phy_device *))
{
return phy_register_fixup(PHY_ANY_ID, phy_uid, phy_uid_mask, run);
}
@@ -104,14 +102,13 @@ EXPORT_SYMBOL(phy_register_fixup_for_uid);
/* Registers a fixup to be run on the PHY with id string bus_id */
int phy_register_fixup_for_id(const char *bus_id,
- int (*run)(struct phy_device *))
+ int (*run)(struct phy_device *))
{
return phy_register_fixup(bus_id, PHY_ANY_UID, 0xffffffff, run);
}
EXPORT_SYMBOL(phy_register_fixup_for_id);
-/*
- * Returns 1 if fixup matches phydev in bus_id and phy_uid.
+/* Returns 1 if fixup matches phydev in bus_id and phy_uid.
* Fixups can be set to match any in one or more fields.
*/
static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
@@ -121,7 +118,7 @@ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
return 0;
if ((fixup->phy_uid & fixup->phy_uid_mask) !=
- (phydev->phy_id & fixup->phy_uid_mask))
+ (phydev->phy_id & fixup->phy_uid_mask))
if (fixup->phy_uid != PHY_ANY_UID)
return 0;
@@ -129,16 +126,14 @@ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
}
/* Runs any matching fixups for this phydev */
-int phy_scan_fixups(struct phy_device *phydev)
+static int phy_scan_fixups(struct phy_device *phydev)
{
struct phy_fixup *fixup;
mutex_lock(&phy_fixup_lock);
list_for_each_entry(fixup, &phy_fixup_list, list) {
if (phy_needs_fixup(phydev, fixup)) {
- int err;
-
- err = fixup->run(phydev);
+ int err = fixup->run(phydev);
if (err < 0) {
mutex_unlock(&phy_fixup_lock);
@@ -150,25 +145,24 @@ int phy_scan_fixups(struct phy_device *phydev)
return 0;
}
-EXPORT_SYMBOL(phy_scan_fixups);
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
- bool is_c45, struct phy_c45_device_ids *c45_ids)
+ bool is_c45,
+ struct phy_c45_device_ids *c45_ids)
{
struct phy_device *dev;
- /* We allocate the device, and initialize the
- * default values */
+ /* We allocate the device, and initialize the default values */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-
if (NULL == dev)
- return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
+ return (struct phy_device *)PTR_ERR((void *)-ENOMEM);
dev->dev.release = phy_device_release;
dev->speed = 0;
dev->duplex = -1;
- dev->pause = dev->asym_pause = 0;
+ dev->pause = 0;
+ dev->asym_pause = 0;
dev->link = 1;
dev->interface = PHY_INTERFACE_MODE_GMII;
@@ -192,14 +186,15 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
INIT_WORK(&dev->phy_queue, phy_change);
/* Request the appropriate module unconditionally; don't
- bother trying to do so only if it isn't already loaded,
- because that gets complicated. A hotplug event would have
- done an unconditional modprobe anyway.
- We don't do normal hotplug because it won't work for MDIO
- -- because it relies on the device staying around for long
- enough for the driver to get loaded. With MDIO, the NIC
- driver will get bored and give up as soon as it finds that
- there's no driver _already_ loaded. */
+ * bother trying to do so only if it isn't already loaded,
+ * because that gets complicated. A hotplug event would have
+ * done an unconditional modprobe anyway.
+ * We don't do normal hotplug because it won't work for MDIO
+ * -- because it relies on the device staying around for long
+ * enough for the driver to get loaded. With MDIO, the NIC
+ * driver will get bored and give up as soon as it finds that
+ * there's no driver _already_ loaded.
+ */
request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
device_initialize(&dev->dev);
@@ -299,10 +294,8 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
if (is_c45)
return get_phy_c45_ids(bus, addr, phy_id, c45_ids);
- /* Grab the bits from PHYIR1, and put them
- * in the upper half */
+ /* Grab the bits from PHYIR1, and put them in the upper half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
-
if (phy_reg < 0)
return -EIO;
@@ -310,7 +303,6 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
/* Grab the bits from PHYIR2, and put them in the lower half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
-
if (phy_reg < 0)
return -EIO;
@@ -320,7 +312,8 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
}
/**
- * get_phy_device - reads the specified PHY device and returns its @phy_device struct
+ * get_phy_device - reads the specified PHY device and returns its @phy_device
+ * struct
* @bus: the target MII bus
* @addr: PHY address on the MII bus
* @is_c45: If true the PHY uses the 802.3 clause 45 protocol
@@ -331,7 +324,6 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
struct phy_c45_device_ids c45_ids = {0};
- struct phy_device *dev = NULL;
u32 phy_id = 0;
int r;
@@ -343,9 +335,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
if ((phy_id & 0x1fffffff) == 0x1fffffff)
return NULL;
- dev = phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
-
- return dev;
+ return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
}
EXPORT_SYMBOL(get_phy_device);
@@ -357,14 +347,17 @@ int phy_device_register(struct phy_device *phydev)
{
int err;
- /* Don't register a phy if one is already registered at this
- * address */
+ /* Don't register a phy if one is already registered at this address */
if (phydev->bus->phy_map[phydev->addr])
return -EINVAL;
phydev->bus->phy_map[phydev->addr] = phydev;
/* Run all of the fixups for this PHY */
- phy_scan_fixups(phydev);
+ err = phy_init_hw(phydev);
+ if (err) {
+ pr_err("PHY %d failed to initialize\n", phydev->addr);
+ goto out;
+ }
err = device_add(&phydev->dev);
if (err) {
@@ -409,7 +402,7 @@ EXPORT_SYMBOL(phy_find_first);
* this function.
*/
static void phy_prepare_link(struct phy_device *phydev,
- void (*handler)(struct net_device *))
+ void (*handler)(struct net_device *))
{
phydev->adjust_link = handler;
}
@@ -432,7 +425,7 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
return rc;
phy_prepare_link(phydev, handler);
- phy_start_machine(phydev, NULL);
+ phy_start_machine(phydev);
if (phydev->irq > 0)
phy_start_interrupts(phydev);
@@ -455,16 +448,17 @@ EXPORT_SYMBOL(phy_connect_direct);
* choose to call only the subset of functions which provide
* the desired functionality.
*/
-struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
- void (*handler)(struct net_device *),
- phy_interface_t interface)
+struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
+ void (*handler)(struct net_device *),
+ phy_interface_t interface)
{
struct phy_device *phydev;
struct device *d;
int rc;
/* Search the list of PHY devices on the mdio bus for the
- * PHY with the requested name */
+ * PHY with the requested name
+ */
d = bus_find_device_by_name(&mdio_bus_type, NULL, bus_id);
if (!d) {
pr_err("PHY %s not found\n", bus_id);
@@ -481,7 +475,8 @@ struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
EXPORT_SYMBOL(phy_connect);
/**
- * phy_disconnect - disable interrupts, stop state machine, and detach a PHY device
+ * phy_disconnect - disable interrupts, stop state machine, and detach a PHY
+ * device
* @phydev: target phy_device struct
*/
void phy_disconnect(struct phy_device *phydev)
@@ -490,13 +485,53 @@ void phy_disconnect(struct phy_device *phydev)
phy_stop_interrupts(phydev);
phy_stop_machine(phydev);
-
+
phydev->adjust_link = NULL;
phy_detach(phydev);
}
EXPORT_SYMBOL(phy_disconnect);
+/**
+ * phy_poll_reset - Safely wait until a PHY reset has properly completed
+ * @phydev: The PHY device to poll
+ *
+ * Description: According to IEEE 802.3, Section 2, Subsection 22.2.4.1.1, as
+ * published in 2008, a PHY reset may take up to 0.5 seconds. The MII BMCR
+ * register must be polled until the BMCR_RESET bit clears.
+ *
+ * Furthermore, any attempts to write to PHY registers may have no effect
+ * or even generate MDIO bus errors until this is complete.
+ *
+ * Some PHYs (such as the Marvell 88E1111) don't entirely conform to the
+ * standard and do not fully reset after the BMCR_RESET bit is set, and may
+ * even *REQUIRE* a soft-reset to properly restart autonegotiation. In an
+ * effort to support such broken PHYs, this function is separate from the
+ * standard phy_init_hw() which will zero all the other bits in the BMCR
+ * and reapply all driver-specific and board-specific fixups.
+ */
+static int phy_poll_reset(struct phy_device *phydev)
+{
+ /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
+ unsigned int retries = 12;
+ int ret;
+
+ do {
+ msleep(50);
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ } while (ret & BMCR_RESET && --retries);
+ if (ret & BMCR_RESET)
+ return -ETIMEDOUT;
+
+ /* Some chips (smsc911x) may still need up to another 1ms after the
+ * BMCR_RESET bit is cleared before they are usable.
+ */
+ msleep(1);
+ return 0;
+}
+
int phy_init_hw(struct phy_device *phydev)
{
int ret;
@@ -504,12 +539,21 @@ int phy_init_hw(struct phy_device *phydev)
if (!phydev->drv || !phydev->drv->config_init)
return 0;
+ ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_poll_reset(phydev);
+ if (ret < 0)
+ return ret;
+
ret = phy_scan_fixups(phydev);
if (ret < 0)
return ret;
return phydev->drv->config_init(phydev);
}
+EXPORT_SYMBOL(phy_init_hw);
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
@@ -520,26 +564,25 @@ int phy_init_hw(struct phy_device *phydev)
*
* Description: Called by drivers to attach to a particular PHY
* device. The phy_device is found, and properly hooked up
- * to the phy_driver. If no driver is attached, then the
- * genphy_driver is used. The phy_device is given a ptr to
+ * to the phy_driver. If no driver is attached, then a
+ * generic driver is used. The phy_device is given a ptr to
* the attaching device, and given a callback for link status
* change. The phy_device is returned to the attaching driver.
*/
-static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
- u32 flags, phy_interface_t interface)
+int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
int err;
/* Assume that if there is no driver, that it doesn't
- * exist, and we should use the genphy driver. */
+ * exist, and we should use the genphy driver.
+ */
if (NULL == d->driver) {
- if (phydev->is_c45) {
- pr_err("No driver for phy %x\n", phydev->phy_id);
- return -ENODEV;
- }
-
- d->driver = &genphy_driver.driver;
+ if (phydev->is_c45)
+ d->driver = &genphy_driver[GENPHY_DRV_10G].driver;
+ else
+ d->driver = &genphy_driver[GENPHY_DRV_1G].driver;
err = d->driver->probe(d);
if (err >= 0)
@@ -565,13 +608,17 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Do initial configuration here, now that
* we have certain key parameters
- * (dev_flags and interface) */
+ * (dev_flags and interface)
+ */
err = phy_init_hw(phydev);
if (err)
phy_detach(phydev);
+ phy_resume(phydev);
+
return err;
}
+EXPORT_SYMBOL(phy_attach_direct);
/**
* phy_attach - attach a network device to a particular PHY device
@@ -582,8 +629,8 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* Description: Same as phy_attach_direct() except that a PHY bus_id
* string is passed instead of a pointer to a struct phy_device.
*/
-struct phy_device *phy_attach(struct net_device *dev,
- const char *bus_id, phy_interface_t interface)
+struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
+ phy_interface_t interface)
{
struct bus_type *bus = &mdio_bus_type;
struct phy_device *phydev;
@@ -591,7 +638,8 @@ struct phy_device *phy_attach(struct net_device *dev,
int rc;
/* Search the list of PHY devices on the mdio bus for the
- * PHY with the requested name */
+ * PHY with the requested name
+ */
d = bus_find_device_by_name(bus, NULL, bus_id);
if (!d) {
pr_err("PHY %s not found\n", bus_id);
@@ -613,18 +661,48 @@ EXPORT_SYMBOL(phy_attach);
*/
void phy_detach(struct phy_device *phydev)
{
+ int i;
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
+ phy_suspend(phydev);
/* If the device had no specific driver before (i.e. - it
* was using the generic driver), we unbind the device
* from the generic driver so that there's a chance a
- * real driver could be loaded */
- if (phydev->dev.driver == &genphy_driver.driver)
- device_release_driver(&phydev->dev);
+ * real driver could be loaded
+ */
+ for (i = 0; i < ARRAY_SIZE(genphy_driver); i++) {
+ if (phydev->dev.driver == &genphy_driver[i].driver) {
+ device_release_driver(&phydev->dev);
+ break;
+ }
+ }
}
EXPORT_SYMBOL(phy_detach);
+int phy_suspend(struct phy_device *phydev)
+{
+ struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ /* If the device has WOL enabled, we cannot suspend the PHY */
+ phy_ethtool_get_wol(phydev, &wol);
+ if (wol.wolopts)
+ return -EBUSY;
+
+ if (phydrv->suspend)
+ return phydrv->suspend(phydev);
+ return 0;
+}
+
+int phy_resume(struct phy_device *phydev)
+{
+ struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+
+ if (phydrv->resume)
+ return phydrv->resume(phydev);
+ return 0;
+}
/* Generic PHY support and helper functions */
@@ -640,20 +718,19 @@ EXPORT_SYMBOL(phy_detach);
static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
- int oldadv, adv;
+ int oldadv, adv, bmsr;
int err, changed = 0;
- /* Only allow advertising what
- * this PHY supports */
+ /* Only allow advertising what this PHY supports */
phydev->advertising &= phydev->supported;
advertise = phydev->advertising;
/* Setup standard advertisement */
- oldadv = adv = phy_read(phydev, MII_ADVERTISE);
-
+ adv = phy_read(phydev, MII_ADVERTISE);
if (adv < 0)
return adv;
+ oldadv = adv;
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
adv |= ethtool_adv_to_mii_adv_t(advertise);
@@ -666,26 +743,36 @@ static int genphy_config_advert(struct phy_device *phydev)
changed = 1;
}
- /* Configure gigabit if it's supported */
- if (phydev->supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full)) {
- oldadv = adv = phy_read(phydev, MII_CTRL1000);
+ bmsr = phy_read(phydev, MII_BMSR);
+ if (bmsr < 0)
+ return bmsr;
- if (adv < 0)
- return adv;
+ /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all
+ * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a
+ * logical 1.
+ */
+ if (!(bmsr & BMSR_ESTATEN))
+ return changed;
- adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
+ /* Configure gigabit if it's supported */
+ adv = phy_read(phydev, MII_CTRL1000);
+ if (adv < 0)
+ return adv;
- if (adv != oldadv) {
- err = phy_write(phydev, MII_CTRL1000, adv);
+ oldadv = adv;
+ adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- if (err < 0)
- return err;
+ if (phydev->supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full)) {
+ adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
+ if (adv != oldadv)
changed = 1;
- }
}
+ err = phy_write(phydev, MII_CTRL1000, adv);
+ if (err < 0)
+ return err;
+
return changed;
}
@@ -699,10 +786,10 @@ static int genphy_config_advert(struct phy_device *phydev)
*/
int genphy_setup_forced(struct phy_device *phydev)
{
- int err;
int ctl = 0;
- phydev->pause = phydev->asym_pause = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
if (SPEED_1000 == phydev->speed)
ctl |= BMCR_SPEED1000;
@@ -711,10 +798,8 @@ int genphy_setup_forced(struct phy_device *phydev)
if (DUPLEX_FULL == phydev->duplex)
ctl |= BMCR_FULLDPLX;
-
- err = phy_write(phydev, MII_BMCR, ctl);
- return err;
+ return phy_write(phydev, MII_BMCR, ctl);
}
EXPORT_SYMBOL(genphy_setup_forced);
@@ -724,25 +809,20 @@ EXPORT_SYMBOL(genphy_setup_forced);
*/
int genphy_restart_aneg(struct phy_device *phydev)
{
- int ctl;
-
- ctl = phy_read(phydev, MII_BMCR);
+ int ctl = phy_read(phydev, MII_BMCR);
if (ctl < 0)
return ctl;
- ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
/* Don't isolate the PHY if we're negotiating */
- ctl &= ~(BMCR_ISOLATE);
+ ctl &= ~BMCR_ISOLATE;
- ctl = phy_write(phydev, MII_BMCR, ctl);
-
- return ctl;
+ return phy_write(phydev, MII_BMCR, ctl);
}
EXPORT_SYMBOL(genphy_restart_aneg);
-
/**
* genphy_config_aneg - restart auto-negotiation or write BMCR
* @phydev: target phy_device struct
@@ -759,13 +839,12 @@ int genphy_config_aneg(struct phy_device *phydev)
return genphy_setup_forced(phydev);
result = genphy_config_advert(phydev);
-
if (result < 0) /* error */
return result;
-
if (result == 0) {
/* Advertisement hasn't changed, but maybe aneg was never on to
- * begin with? Or maybe phy was isolated? */
+ * begin with? Or maybe phy was isolated?
+ */
int ctl = phy_read(phydev, MII_BMCR);
if (ctl < 0)
@@ -776,7 +855,8 @@ int genphy_config_aneg(struct phy_device *phydev)
}
/* Only restart aneg if we are advertising something different
- * than we were before. */
+ * than we were before.
+ */
if (result > 0)
result = genphy_restart_aneg(phydev);
@@ -784,6 +864,11 @@ int genphy_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_config_aneg);
+static int gen10g_config_aneg(struct phy_device *phydev)
+{
+ return 0;
+}
+
/**
* genphy_update_link - update link status in @phydev
* @phydev: target phy_device struct
@@ -798,13 +883,11 @@ int genphy_update_link(struct phy_device *phydev)
/* Do a fake read */
status = phy_read(phydev, MII_BMSR);
-
if (status < 0)
return status;
/* Read link and autonegotiation status */
status = phy_read(phydev, MII_BMSR);
-
if (status < 0)
return status;
@@ -832,65 +915,70 @@ int genphy_read_status(struct phy_device *phydev)
int err;
int lpa;
int lpagb = 0;
+ int common_adv;
+ int common_adv_gb = 0;
- /* Update the link, but return if there
- * was an error */
+ /* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
return err;
+ phydev->lp_advertising = 0;
+
if (AUTONEG_ENABLE == phydev->autoneg) {
if (phydev->supported & (SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full)) {
lpagb = phy_read(phydev, MII_STAT1000);
-
if (lpagb < 0)
return lpagb;
adv = phy_read(phydev, MII_CTRL1000);
-
if (adv < 0)
return adv;
- lpagb &= adv << 2;
+ phydev->lp_advertising =
+ mii_stat1000_to_ethtool_lpa_t(lpagb);
+ common_adv_gb = lpagb & adv << 2;
}
lpa = phy_read(phydev, MII_LPA);
-
if (lpa < 0)
return lpa;
- adv = phy_read(phydev, MII_ADVERTISE);
+ phydev->lp_advertising |= mii_lpa_to_ethtool_lpa_t(lpa);
+ adv = phy_read(phydev, MII_ADVERTISE);
if (adv < 0)
return adv;
- lpa &= adv;
+ common_adv = lpa & adv;
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
- phydev->pause = phydev->asym_pause = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
- if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
+ if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) {
phydev->speed = SPEED_1000;
- if (lpagb & LPA_1000FULL)
+ if (common_adv_gb & LPA_1000FULL)
phydev->duplex = DUPLEX_FULL;
- } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+ } else if (common_adv & (LPA_100FULL | LPA_100HALF)) {
phydev->speed = SPEED_100;
-
- if (lpa & LPA_100FULL)
+
+ if (common_adv & LPA_100FULL)
phydev->duplex = DUPLEX_FULL;
} else
- if (lpa & LPA_10FULL)
+ if (common_adv & LPA_10FULL)
phydev->duplex = DUPLEX_FULL;
- if (phydev->duplex == DUPLEX_FULL){
+ if (phydev->duplex == DUPLEX_FULL) {
phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
}
} else {
int bmcr = phy_read(phydev, MII_BMCR);
+
if (bmcr < 0)
return bmcr;
@@ -906,27 +994,55 @@ int genphy_read_status(struct phy_device *phydev)
else
phydev->speed = SPEED_10;
- phydev->pause = phydev->asym_pause = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
}
return 0;
}
EXPORT_SYMBOL(genphy_read_status);
+static int gen10g_read_status(struct phy_device *phydev)
+{
+ int devad, reg;
+ u32 mmd_mask = phydev->c45_ids.devices_in_package;
+
+ phydev->link = 1;
+
+ /* For now just lie and say it's 10G all the time */
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+
+ for (devad = 0; mmd_mask; devad++, mmd_mask = mmd_mask >> 1) {
+ if (!(mmd_mask & 1))
+ continue;
+
+ /* Read twice because link state is latched and a
+ * read moves the current state into the register
+ */
+ phy_read_mmd(phydev, devad, MDIO_STAT1);
+ reg = phy_read_mmd(phydev, devad, MDIO_STAT1);
+ if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS))
+ phydev->link = 0;
+ }
+
+ return 0;
+}
+
static int genphy_config_init(struct phy_device *phydev)
{
int val;
u32 features;
/* For now, I'll claim that the generic driver supports
- * all possible port types */
+ * all possible port types
+ */
features = (SUPPORTED_TP | SUPPORTED_MII
| SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_BNC);
/* Do we support autonegotiation? */
val = phy_read(phydev, MII_BMSR);
-
if (val < 0)
return val;
@@ -944,7 +1060,6 @@ static int genphy_config_init(struct phy_device *phydev)
if (val & BMSR_ESTATEN) {
val = phy_read(phydev, MII_ESTATUS);
-
if (val < 0)
return val;
@@ -959,6 +1074,16 @@ static int genphy_config_init(struct phy_device *phydev)
return 0;
}
+
+static int gen10g_config_init(struct phy_device *phydev)
+{
+ /* Temporarily just say we support everything */
+ phydev->supported = SUPPORTED_10000baseT_Full;
+ phydev->advertising = SUPPORTED_10000baseT_Full;
+
+ return 0;
+}
+
int genphy_suspend(struct phy_device *phydev)
{
int value;
@@ -966,7 +1091,7 @@ int genphy_suspend(struct phy_device *phydev)
mutex_lock(&phydev->lock);
value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, (value | BMCR_PDOWN));
+ phy_write(phydev, MII_BMCR, value | BMCR_PDOWN);
mutex_unlock(&phydev->lock);
@@ -974,6 +1099,11 @@ int genphy_suspend(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_suspend);
+static int gen10g_suspend(struct phy_device *phydev)
+{
+ return 0;
+}
+
int genphy_resume(struct phy_device *phydev)
{
int value;
@@ -981,7 +1111,7 @@ int genphy_resume(struct phy_device *phydev)
mutex_lock(&phydev->lock);
value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, (value & ~BMCR_PDOWN));
+ phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
mutex_unlock(&phydev->lock);
@@ -989,6 +1119,11 @@ int genphy_resume(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_resume);
+static int gen10g_resume(struct phy_device *phydev)
+{
+ return 0;
+}
+
/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
@@ -999,22 +1134,18 @@ EXPORT_SYMBOL(genphy_resume);
*/
static int phy_probe(struct device *dev)
{
- struct phy_device *phydev;
- struct phy_driver *phydrv;
- struct device_driver *drv;
+ struct phy_device *phydev = to_phy_device(dev);
+ struct device_driver *drv = phydev->dev.driver;
+ struct phy_driver *phydrv = to_phy_driver(drv);
int err = 0;
- phydev = to_phy_device(dev);
-
- drv = phydev->dev.driver;
- phydrv = to_phy_driver(drv);
phydev->drv = phydrv;
/* Disable the interrupt if the PHY doesn't support it
* but the interrupt is still a valid one
*/
if (!(phydrv->flags & PHY_HAS_INTERRUPT) &&
- phy_interrupt_is_valid(phydev))
+ phy_interrupt_is_valid(phydev))
phydev->irq = PHY_POLL;
if (phydrv->flags & PHY_IS_INTERNAL)
@@ -1024,7 +1155,8 @@ static int phy_probe(struct device *dev)
/* Start out supporting everything. Eventually,
* a controller will attach, and may modify one
- * or both of these values */
+ * or both of these values
+ */
phydev->supported = phydrv->features;
phydev->advertising = phydrv->features;
@@ -1037,14 +1169,11 @@ static int phy_probe(struct device *dev)
mutex_unlock(&phydev->lock);
return err;
-
}
static int phy_remove(struct device *dev)
{
- struct phy_device *phydev;
-
- phydev = to_phy_device(dev);
+ struct phy_device *phydev = to_phy_device(dev);
mutex_lock(&phydev->lock);
phydev->state = PHY_DOWN;
@@ -1071,7 +1200,6 @@ int phy_driver_register(struct phy_driver *new_driver)
new_driver->driver.remove = phy_remove;
retval = driver_register(&new_driver->driver);
-
if (retval) {
pr_err("%s: Error %d in registering driver\n",
new_driver->name, retval);
@@ -1110,13 +1238,14 @@ EXPORT_SYMBOL(phy_driver_unregister);
void phy_drivers_unregister(struct phy_driver *drv, int n)
{
int i;
- for (i = 0; i < n; i++) {
+
+ for (i = 0; i < n; i++)
phy_driver_unregister(drv + i);
- }
}
EXPORT_SYMBOL(phy_drivers_unregister);
-static struct phy_driver genphy_driver = {
+static struct phy_driver genphy_driver[] = {
+{
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
@@ -1126,8 +1255,19 @@ static struct phy_driver genphy_driver = {
.read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
- .driver = {.owner= THIS_MODULE, },
-};
+ .driver = { .owner = THIS_MODULE, },
+}, {
+ .phy_id = 0xffffffff,
+ .phy_id_mask = 0xffffffff,
+ .name = "Generic 10G PHY",
+ .config_init = gen10g_config_init,
+ .features = 0,
+ .config_aneg = gen10g_config_aneg,
+ .read_status = gen10g_read_status,
+ .suspend = gen10g_suspend,
+ .resume = gen10g_resume,
+ .driver = {.owner = THIS_MODULE, },
+} };
static int __init phy_init(void)
{
@@ -1137,7 +1277,8 @@ static int __init phy_init(void)
if (rc)
return rc;
- rc = phy_driver_register(&genphy_driver);
+ rc = phy_drivers_register(genphy_driver,
+ ARRAY_SIZE(genphy_driver));
if (rc)
mdio_bus_exit();
@@ -1146,7 +1287,8 @@ static int __init phy_init(void)
static void __exit phy_exit(void)
{
- phy_driver_unregister(&genphy_driver);
+ phy_drivers_unregister(genphy_driver,
+ ARRAY_SIZE(genphy_driver));
mdio_bus_exit();
}
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index f3bea1346021..4cf5fb922e59 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -171,14 +170,14 @@ static int ks8995_write(struct ks8995_switch *ks, char *buf,
static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
{
- return (ks8995_read(ks, buf, addr, 1) != 1);
+ return ks8995_read(ks, buf, addr, 1) != 1;
}
static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
{
char buf = val;
- return (ks8995_write(ks, &buf, addr, 1) != 1);
+ return ks8995_write(ks, &buf, addr, 1) != 1;
}
/* ------------------------------------------------------------------------ */
@@ -325,7 +324,6 @@ static int ks8995_probe(struct spi_device *spi)
return 0;
err_drvdata:
- spi_set_drvdata(spi, NULL);
kfree(ks);
return err;
}
@@ -337,7 +335,6 @@ static int ks8995_remove(struct spi_device *spi)
ks8995 = spi_get_drvdata(spi);
sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
- spi_set_drvdata(spi, NULL);
kfree(ks8995);
return 0;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 7b4ff35c8bf7..040b8978d6ca 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -547,9 +547,9 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
skb_pull(skb,dev->hard_header_len);
eth = eth_hdr(skb);
- if(*eth->h_dest&1)
+ if(is_multicast_ether_addr(eth->h_dest))
{
- if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
+ if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type=PACKET_BROADCAST;
else
skb->pkt_type=PACKET_MULTICAST;
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 9a1849a83e2a..911b21602ff2 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -27,8 +27,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* Changelog:
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 82ee6ed954cb..2ea7efd11857 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -131,12 +131,12 @@ static inline struct pppoe_net *pppoe_pernet(struct net *net)
static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b)
{
- return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN);
+ return a->sid == b->sid && ether_addr_equal(a->remote, b->remote);
}
static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr)
{
- return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN);
+ return a->sid == sid && ether_addr_equal(a->remote, addr);
}
#if 8 % PPPOE_HASH_BITS
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b75ae5bde673..c8624a8235ab 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1648,7 +1648,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
@@ -2034,6 +2034,10 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GRO;
+
+ /* Don't allow team devices to change network namespaces. */
+ dev->features |= NETIF_F_NETNS_LOCAL;
+
dev->hw_features = TEAM_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
@@ -2851,7 +2855,7 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_FEAT_CHANGE:
team_compute_features(port->team);
break;
- case NETDEV_CHANGEMTU:
+ case NETDEV_PRECHANGEMTU:
/* Forbid to change mtu of underlaying device */
return NOTIFY_BAD;
case NETDEV_PRE_TYPE_CHANGE:
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 7f032e211343..cd2f692b8074 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -13,20 +13,14 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/skbuff.h>
-#include <linux/reciprocal_div.h>
#include <linux/if_team.h>
-static u32 random_N(unsigned int N)
-{
- return reciprocal_divide(prandom_u32(), N);
-}
-
static bool rnd_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *port;
int port_index;
- port_index = random_N(team->en_port_count);
+ port_index = prandom_u32_max(team->en_port_count);
port = team_get_port_by_index_rcu(team, port_index);
if (unlikely(!port))
goto drop;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ecec8029c5e8..26f8635b027d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -69,6 +69,7 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
+#include <linux/seq_file.h>
#include <asm/uaccess.h>
@@ -110,7 +111,7 @@ struct tap_filter {
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
-/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
+/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
* the netdevice to be fit in one page. So we can make sure the success of
* memory allocation. TODO: increase the limit. */
#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
@@ -119,7 +120,7 @@ struct tap_filter {
#define TUN_FLOW_EXPIRE (3 * HZ)
/* A tun_file connects an open character device to a tuntap netdevice. It
- * also contains all socket related strctures (except sock_fprog and tap_filter)
+ * also contains all socket related structures (except sock_fprog and tap_filter)
* to serve as one transmit queue for tuntap device. The sock_fprog and
* tap_filter were kept in tun_struct since they were used for filtering for the
* netdevice not for a specific queue (at least I didn't see the requirement for
@@ -152,6 +153,7 @@ struct tun_flow_entry {
struct tun_struct *tun;
u32 rxhash;
+ u32 rps_rxhash;
int queue_index;
unsigned long updated;
};
@@ -220,6 +222,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
rxhash, queue_index);
e->updated = jiffies;
e->rxhash = rxhash;
+ e->rps_rxhash = 0;
e->queue_index = queue_index;
e->tun = tun;
hlist_add_head_rcu(&e->hash_link, head);
@@ -232,6 +235,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
e->rxhash, e->queue_index);
+ sock_rps_reset_flow_hash(e->rps_rxhash);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
--tun->flow_count;
@@ -325,6 +329,7 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
/* TODO: keep queueing to old queue until it's empty? */
e->queue_index = queue_index;
e->updated = jiffies;
+ sock_rps_record_flow_hash(e->rps_rxhash);
} else {
spin_lock_bh(&tun->lock);
if (!tun_flow_find(head, rxhash) &&
@@ -341,15 +346,27 @@ unlock:
rcu_read_unlock();
}
+/**
+ * Save the hash received in the stack receive path and update the
+ * flow_hash table accordingly.
+ */
+static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
+{
+ if (unlikely(e->rps_rxhash != hash)) {
+ sock_rps_reset_flow_hash(e->rps_rxhash);
+ e->rps_rxhash = hash;
+ }
+}
+
/* We try to identify a flow through its rxhash first. The reason that
- * we do not check rxq no. is becuase some cards(e.g 82599), chooses
+ * we do not check rxq no. is because some cards(e.g 82599), chooses
* the rxq based on the txq where the last packet of the flow comes. As
* the userspace application move between processors, we may get a
* different rxq no. here. If we could not get rxhash, then we would
* hope the rxq no. may help here.
*/
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct tun_struct *tun = netdev_priv(dev);
struct tun_flow_entry *e;
@@ -359,12 +376,13 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
rcu_read_lock();
numqueues = ACCESS_ONCE(tun->numqueues);
- txq = skb_get_rxhash(skb);
+ txq = skb_get_hash(skb);
if (txq) {
e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
- if (e)
+ if (e) {
+ tun_flow_save_rps_rxhash(e, txq);
txq = e->queue_index;
- else
+ } else
/* use multiply and shift instead of expensive divide */
txq = ((u64)txq * numqueues) >> 32;
} else if (likely(skb_rx_queue_recorded(skb))) {
@@ -532,7 +550,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
err = 0;
- /* Re-attach the filter to presist device */
+ /* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
if (!err)
@@ -721,14 +739,32 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
struct tun_struct *tun = netdev_priv(dev);
int txq = skb->queue_mapping;
struct tun_file *tfile;
+ u32 numqueues = 0;
rcu_read_lock();
tfile = rcu_dereference(tun->tfiles[txq]);
+ numqueues = ACCESS_ONCE(tun->numqueues);
/* Drop packet if interface is not attached */
- if (txq >= tun->numqueues)
+ if (txq >= numqueues)
goto drop;
+ if (numqueues == 1) {
+ /* Select queue was not called for the skbuff, so we extract the
+ * RPS hash and save it into the flow_table here.
+ */
+ __u32 rxhash;
+
+ rxhash = skb_get_hash(skb);
+ if (rxhash) {
+ struct tun_flow_entry *e;
+ e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
+ rxhash);
+ if (e)
+ tun_flow_save_rps_rxhash(e, rxhash);
+ }
+ }
+
tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
BUG_ON(!tfile);
@@ -746,8 +782,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Limit the number of packets queued by dividing txq length with the
* number of queues.
*/
- if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
- >= dev->tx_queue_len / tun->numqueues)
+ if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues
+ >= dev->tx_queue_len)
goto drop;
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
@@ -820,9 +856,9 @@ static void tun_poll_controller(struct net_device *dev)
* Tun only receives frames when:
* 1) the char device endpoint gets data from user space
* 2) the tun socket gets a sendmsg call from user space
- * Since both of those are syncronous operations, we are guaranteed
+ * Since both of those are synchronous operations, we are guaranteed
* never to have pending data when we poll for it
- * so theres nothing to do here but return.
+ * so there is nothing to do here but return.
* We need this though so netpoll recognizes us as an interface that
* supports polling, which enables bridge devices in virt setups to
* still use netconsole
@@ -1147,7 +1183,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_reset_network_header(skb);
skb_probe_transport_header(skb, 0);
- rxhash = skb_get_rxhash(skb);
+ rxhash = skb_get_hash(skb);
netif_rx_ni(skb);
tun->dev->stats.rx_packets++;
@@ -1292,8 +1328,7 @@ done:
}
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
- struct kiocb *iocb, const struct iovec *iv,
- ssize_t len, int noblock)
+ const struct iovec *iv, ssize_t len, int noblock)
{
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
@@ -1356,7 +1391,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
goto out;
}
- ret = tun_do_read(tun, tfile, iocb, iv, len,
+ ret = tun_do_read(tun, tfile, iv, len,
file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
@@ -1457,7 +1492,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
+ ret = tun_do_read(tun, tfile, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
@@ -1651,7 +1686,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->features = dev->hw_features;
- dev->vlan_features = dev->features;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false);
@@ -2194,6 +2231,27 @@ static int tun_chr_close(struct inode *inode, struct file *file)
return 0;
}
+#ifdef CONFIG_PROC_FS
+static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct tun_struct *tun;
+ struct ifreq ifr;
+
+ memset(&ifr, 0, sizeof(ifr));
+
+ rtnl_lock();
+ tun = tun_get(f);
+ if (tun)
+ tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+ rtnl_unlock();
+
+ if (tun)
+ tun_put(tun);
+
+ return seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
+}
+#endif
+
static const struct file_operations tun_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -2208,7 +2266,10 @@ static const struct file_operations tun_fops = {
#endif
.open = tun_chr_open,
.release = tun_chr_close,
- .fasync = tun_chr_fasync
+ .fasync = tun_chr_fasync,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = tun_chr_show_fdinfo,
+#endif
};
static struct miscdevice tun_miscdev = {
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 47b0f732b0b1..7e7269fd3707 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -92,11 +92,12 @@ config USB_RTL8150
module will be called rtl8150.
config USB_RTL8152
- tristate "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
+ tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
select MII
help
This option adds support for Realtek RTL8152 based USB 2.0
- 10/100 Ethernet adapters.
+ 10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000
+ Ethernet adapters.
To compile this driver as a module, choose M here: the
module will be called r8152.
@@ -291,6 +292,21 @@ config USB_NET_SR9700
This option adds support for CoreChip-sz SR9700 based USB 1.1
10/100 Ethernet adapters.
+config USB_NET_SR9800
+ tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices"
+ depends on USB_USBNET
+ select CRC32
+ ---help---
+ Say Y if you want to use one of the following 100Mbps USB Ethernet
+ device based on the CoreChip-sz SR9800 chip.
+
+ This driver makes the adapter appear as a normal Ethernet interface,
+ typically on eth0, if it is the only ethernet device, or perhaps on
+ eth1, if you have a PCI or ISA ethernet card installed.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sr9800.
+
config USB_NET_SMSC75XX
tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b17b5e88bbaf..e2797f1e1b31 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -11,10 +11,11 @@ obj-$(CONFIG_USB_HSO) += hso.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
-obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
+obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
+obj-$(CONFIG_USB_NET_SR9800) += sr9800.o
obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index bdaa12d07a12..5d049d00c2d7 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASIX_H
@@ -28,7 +27,6 @@
#include <linux/module.h>
#include <linux/kmod.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 577c72d5f369..5c55f11572ba 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asix.h"
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 386a3df53678..5d194093f3e1 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asix.h"
@@ -918,7 +917,8 @@ static const struct driver_info ax88178_info = {
.status = asix_status,
.link_reset = ax88178_link_reset,
.reset = ax88178_reset,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 723b3879ecc2..5f18fcb8dcc7 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -21,8 +21,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asix.h"
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 8e8d0fcd4979..054e59ca6946 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
@@ -1030,20 +1029,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id = 0x03;
dev->mii.supports_gmii = 1;
- if (usb_device_no_sg_constraint(dev->udev))
- dev->can_dma_sg = 1;
-
dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
- if (dev->can_dma_sg) {
- dev->net->features |= NETIF_F_SG | NETIF_F_TSO;
- dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO;
- }
-
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
@@ -1119,6 +1110,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 hdr_off;
u32 *pkt_hdr;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
skb_trim(skb, skb->len - 4);
memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
le32_to_cpus(&rx_hdr);
@@ -1392,6 +1387,19 @@ static const struct driver_info ax88178a_info = {
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info dlink_dub1312_info = {
+ .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct driver_info sitecom_info = {
.description = "Sitecom USB 3.0 to Gigabit Adapter",
.bind = ax88179_bind,
@@ -1418,6 +1426,19 @@ static const struct driver_info samsung_info = {
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info lenovo_info = {
+ .description = "Lenovo OneLinkDock Gigabit LAN",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct usb_device_id products[] = {
{
/* ASIX AX88179 10/100/1000 */
@@ -1428,6 +1449,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0b95, 0x178a),
.driver_info = (unsigned long)&ax88178a_info,
}, {
+ /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
+ USB_DEVICE(0x2001, 0x4a00),
+ .driver_info = (unsigned long)&dlink_dub1312_info,
+}, {
/* Sitecom USB 3.0 to Gigabit Adapter */
USB_DEVICE(0x0df6, 0x0072),
.driver_info = (unsigned long)&sitecom_info,
@@ -1435,6 +1460,10 @@ static const struct usb_device_id products[] = {
/* Samsung USB Ethernet Adapter */
USB_DEVICE(0x04e8, 0xa100),
.driver_info = (unsigned long)&samsung_info,
+}, {
+ /* Lenovo OneLinkDock Gigabit LAN */
+ USB_DEVICE(0x17ef, 0x304b),
+ .driver_info = (unsigned long)&lenovo_info,
},
{ },
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index df507e6dbb9c..630caf48f63a 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -24,15 +24,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 08d55b6bf272..f7180f8db39e 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -14,12 +14,10 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ctype.h>
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 2023f3ea891e..bd363b27e854 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -14,15 +14,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -487,6 +485,7 @@ static const struct driver_info wwan_info = {
#define ZTE_VENDOR_ID 0x19D2
#define DELL_VENDOR_ID 0x413C
#define REALTEK_VENDOR_ID 0x0bda
+#define SAMSUNG_VENDOR_ID 0x04e8
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -653,6 +652,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Samsung USB Ethernet Adapters */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e15ec2b12035..d350d2795e10 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -39,7 +39,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/ctype.h>
#include <linux/ethtool.h>
@@ -69,7 +68,6 @@ static struct usb_driver cdc_ncm_driver;
static int cdc_ncm_setup(struct usbnet *dev)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- struct usb_cdc_ncm_ntb_parameters ncm_parm;
u32 val;
u8 flags;
u8 iface_no;
@@ -83,22 +81,22 @@ static int cdc_ncm_setup(struct usbnet *dev)
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
USB_TYPE_CLASS | USB_DIR_IN
|USB_RECIP_INTERFACE,
- 0, iface_no, &ncm_parm,
- sizeof(ncm_parm));
+ 0, iface_no, &ctx->ncm_parm,
+ sizeof(ctx->ncm_parm));
if (err < 0) {
dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
return err; /* GET_NTB_PARAMETERS is required */
}
/* read correct set of parameters according to device mode */
- ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
- ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
- ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
- ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
- ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
+ ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
+ ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
+ ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
+ ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
+ ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
/* devices prior to NCM Errata shall set this field to zero */
- ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
- ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
+ ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
+ ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
/* there are some minor differences in NCM and MBIM defaults */
if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
@@ -147,7 +145,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
}
/* inform device about NTB input size changes */
- if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
+ if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
@@ -163,14 +161,6 @@ static int cdc_ncm_setup(struct usbnet *dev)
dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
CDC_NCM_NTB_MAX_SIZE_TX);
ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
-
- /* Adding a pad byte here simplifies the handling in
- * cdc_ncm_fill_tx_frame, by making tx_max always
- * represent the real skb max size.
- */
- if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
- ctx->tx_max++;
-
}
/*
@@ -440,6 +430,10 @@ advance:
goto error2;
}
+ /* initialize data interface */
+ if (cdc_ncm_setup(dev))
+ goto error2;
+
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp) {
@@ -454,12 +448,6 @@ advance:
goto error2;
}
- /* initialize data interface */
- if (cdc_ncm_setup(dev)) {
- dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
- goto error2;
- }
-
usb_set_intfdata(ctx->data, dev);
usb_set_intfdata(ctx->control, dev);
@@ -476,6 +464,15 @@ advance:
dev->hard_mtu = ctx->tx_max;
dev->rx_urb_size = ctx->rx_max;
+ /* cdc_ncm_setup will override dwNtbOutMaxSize if it is
+ * outside the sane range. Adding a pad byte here if necessary
+ * simplifies the handling in cdc_ncm_fill_tx_frame, making
+ * tx_max always represent the real skb max size.
+ */
+ if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
+ ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ ctx->tx_max++;
+
return 0;
error2:
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index 0d1fe89ae0bd..91f0919fe278 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -13,13 +13,11 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/kmod.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 1e207f086b75..3eed708a6182 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -14,12 +14,10 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index e80219877730..6e9c344c7a20 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index a7e3f4e55bf3..1cc24e6f23e2 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -14,15 +14,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -86,6 +84,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u32 size;
u32 count;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
header = (struct gl_header *) skb->data;
// get the packet count of the received skb
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1a482344b3f5..660bd5ea9fc0 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1201,16 +1201,18 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
struct hso_serial *serial = urb->context;
int status = urb->status;
+ D4("\n--- Got serial_read_bulk callback %02x ---", status);
+
/* sanity check */
if (!serial) {
D1("serial == NULL");
return;
- } else if (status) {
+ }
+ if (status) {
handle_usb_error(status, __func__, serial->parent);
return;
}
- D4("\n--- Got serial_read_bulk callback %02x ---", status);
D1("Actual length = %d\n", urb->actual_length);
DUMP1(urb->transfer_buffer, urb->actual_length);
@@ -1218,25 +1220,13 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
if (serial->port.count == 0)
return;
- if (status == 0) {
- if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
- fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
- /* Valid data, handle RX data */
- spin_lock(&serial->serial_lock);
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
- put_rxbuf_data_and_resubmit_bulk_urb(serial);
- spin_unlock(&serial->serial_lock);
- } else if (status == -ENOENT || status == -ECONNRESET) {
- /* Unlinked - check for throttled port. */
- D2("Port %d, successfully unlinked urb", serial->minor);
- spin_lock(&serial->serial_lock);
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
- hso_resubmit_rx_bulk_urb(serial, urb);
- spin_unlock(&serial->serial_lock);
- } else {
- D2("Port %d, status = %d for read urb", serial->minor, status);
- return;
- }
+ if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
+ fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
+ /* Valid data, handle RX data */
+ spin_lock(&serial->serial_lock);
+ serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
+ put_rxbuf_data_and_resubmit_bulk_urb(serial);
+ spin_unlock(&serial->serial_lock);
}
/*
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index ace9e74ffbdd..4ff70b22c6ee 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -20,8 +20,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index ff8594d8dd2d..421934c83f1c 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -45,7 +45,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 6866eae3e388..5662babf0583 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -15,7 +15,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ctype.h>
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index afb117c16d2d..a359d3bb7c5b 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -25,8 +25,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
****************************************************************/
@@ -46,7 +45,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 808d6506da41..acfcc32b323d 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -15,8 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index f54637828574..82d844a8ebd0 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -36,14 +36,12 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/init.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -528,8 +526,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
u8 status;
- if (skb->len == 0) {
- dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len) {
+ dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
return 0;
}
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 93e0716a118c..4cbdb1307f3e 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -13,15 +13,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -366,6 +364,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
struct nc_trailer *trailer;
u16 hdr_len, packet_len;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
if (!(skb->len & 0x01)) {
netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
skb->len, dev->net->hard_header_len, dev->hard_mtu,
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 0fcc8e65a068..3d18bb0eee85 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -13,15 +13,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23bdd5b9274d..313cb6cd4848 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
__be16 proto;
- /* usbnet rx_complete guarantees that skb->len is at least
- * hard_header_len, so we can inspect the dest address without
- * checking skb->len
- */
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
switch (skb->data[0] & 0xf0) {
case 0x40:
proto = htons(ETH_P_IP);
@@ -712,6 +712,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
@@ -723,6 +724,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
+ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
@@ -730,6 +732,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
+ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 51073721e224..adb12f349a61 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright (c) 2014 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -7,7 +7,6 @@
*
*/
-#include <linux/init.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -24,9 +23,9 @@
#include <linux/ipv6.h>
/* Version Information */
-#define DRIVER_VERSION "v1.02.0 (2013/10/28)"
+#define DRIVER_VERSION "v1.04.0 (2014/01/15)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
-#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
+#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
#define R8152_PHY_ID 32
@@ -39,15 +38,24 @@
#define PLA_RXFIFO_CTRL2 0xc0a8
#define PLA_FMC 0xc0b4
#define PLA_CFG_WOL 0xc0b6
+#define PLA_TEREDO_CFG 0xc0bc
#define PLA_MAR 0xcd00
+#define PLA_BACKUP 0xd000
#define PAL_BDC_CR 0xd1a0
+#define PLA_TEREDO_TIMER 0xd2cc
+#define PLA_REALWOW_TIMER 0xd2e8
#define PLA_LEDSEL 0xdd90
#define PLA_LED_FEATURE 0xdd92
#define PLA_PHYAR 0xde00
+#define PLA_BOOT_CTRL 0xe004
#define PLA_GPHY_INTR_IMR 0xe022
#define PLA_EEE_CR 0xe040
#define PLA_EEEP_CR 0xe080
#define PLA_MAC_PWR_CTRL 0xe0c0
+#define PLA_MAC_PWR_CTRL2 0xe0ca
+#define PLA_MAC_PWR_CTRL3 0xe0cc
+#define PLA_MAC_PWR_CTRL4 0xe0ce
+#define PLA_WDT6_CTRL 0xe428
#define PLA_TCR0 0xe610
#define PLA_TCR1 0xe612
#define PLA_TXFIFO_CTRL 0xe618
@@ -73,16 +81,25 @@
#define PLA_BP_5 0xfc32
#define PLA_BP_6 0xfc34
#define PLA_BP_7 0xfc36
+#define PLA_BP_EN 0xfc38
+#define USB_U2P3_CTRL 0xb460
#define USB_DEV_STAT 0xb808
#define USB_USB_CTRL 0xd406
#define USB_PHY_CTRL 0xd408
#define USB_TX_AGG 0xd40a
#define USB_RX_BUF_TH 0xd40c
#define USB_USB_TIMER 0xd428
+#define USB_RX_EARLY_AGG 0xd42c
#define USB_PM_CTRL_STATUS 0xd432
#define USB_TX_DMA 0xd434
+#define USB_TOLERANCE 0xd490
+#define USB_LPM_CTRL 0xd41a
#define USB_UPS_CTRL 0xd800
+#define USB_MISC_0 0xd81a
+#define USB_POWER_CUT 0xd80a
+#define USB_AFE_CTRL2 0xd824
+#define USB_WDT11_CTRL 0xe43c
#define USB_BP_BA 0xfc26
#define USB_BP_0 0xfc28
#define USB_BP_1 0xfc2a
@@ -92,14 +109,30 @@
#define USB_BP_5 0xfc32
#define USB_BP_6 0xfc34
#define USB_BP_7 0xfc36
+#define USB_BP_EN 0xfc38
/* OCP Registers */
#define OCP_ALDPS_CONFIG 0x2010
#define OCP_EEE_CONFIG1 0x2080
#define OCP_EEE_CONFIG2 0x2092
#define OCP_EEE_CONFIG3 0x2094
+#define OCP_BASE_MII 0xa400
#define OCP_EEE_AR 0xa41a
#define OCP_EEE_DATA 0xa41c
+#define OCP_PHY_STATUS 0xa420
+#define OCP_POWER_CFG 0xa430
+#define OCP_EEE_CFG 0xa432
+#define OCP_SRAM_ADDR 0xa436
+#define OCP_SRAM_DATA 0xa438
+#define OCP_DOWN_SPEED 0xa442
+#define OCP_EEE_CFG2 0xa5d0
+#define OCP_ADC_CFG 0xbc06
+
+/* SRAM Register */
+#define SRAM_LPF_CFG 0x8012
+#define SRAM_10M_AMP1 0x8080
+#define SRAM_10M_AMP2 0x8082
+#define SRAM_IMPEDANCE 0x8084
/* PLA_RCR */
#define RCR_AAP 0x00000001
@@ -116,14 +149,17 @@
#define RXFIFO_THR2_FULL 0x00000060
#define RXFIFO_THR2_HIGH 0x00000038
#define RXFIFO_THR2_OOB 0x0000004a
+#define RXFIFO_THR2_NORMAL 0x00a0
/* PLA_RXFIFO_CTRL2 */
#define RXFIFO_THR3_FULL 0x00000078
#define RXFIFO_THR3_HIGH 0x00000048
#define RXFIFO_THR3_OOB 0x0000005a
+#define RXFIFO_THR3_NORMAL 0x0110
/* PLA_TXFIFO_CTRL */
#define TXFIFO_THR_NORMAL 0x00400008
+#define TXFIFO_THR_NORMAL2 0x01000008
/* PLA_FMC */
#define FMC_FCR_MCU_EN 0x0001
@@ -131,6 +167,9 @@
/* PLA_EEEP_CR */
#define EEEP_CR_EEEP_TX 0x0002
+/* PLA_WDT6_CTRL */
+#define WDT6_SET_MODE 0x0010
+
/* PLA_TCR0 */
#define TCR0_TX_EMPTY 0x0800
#define TCR0_AUTO_FIFO 0x0080
@@ -168,6 +207,12 @@
/* PLA_CFG_WOL */
#define MAGIC_EN 0x0001
+/* PLA_TEREDO_CFG */
+#define TEREDO_SEL 0x8000
+#define TEREDO_WAKE_MASK 0x7f00
+#define TEREDO_RS_EVENT_MASK 0x00fe
+#define OOB_TEREDO_EN 0x0001
+
/* PAL_BDC_CR */
#define ALDPS_PROXY_MODE 0x0001
@@ -185,6 +230,25 @@
#define D3_CLK_GATED_EN 0x00004000
#define MCU_CLK_RATIO 0x07010f07
#define MCU_CLK_RATIO_MASK 0x0f0f0f0f
+#define ALDPS_SPDWN_RATIO 0x0f87
+
+/* PLA_MAC_PWR_CTRL2 */
+#define EEE_SPDWN_RATIO 0x8007
+
+/* PLA_MAC_PWR_CTRL3 */
+#define PKT_AVAIL_SPDWN_EN 0x0100
+#define SUSPEND_SPDWN_EN 0x0004
+#define U1U2_SPDWN_EN 0x0002
+#define L1_SPDWN_EN 0x0001
+
+/* PLA_MAC_PWR_CTRL4 */
+#define PWRSAVE_SPDWN_EN 0x1000
+#define RXDV_SPDWN_EN 0x0800
+#define TX10MIDLE_EN 0x0100
+#define TP100_SPDWN_EN 0x0020
+#define TP500_SPDWN_EN 0x0010
+#define TP1000_SPDWN_EN 0x0008
+#define EEE_SPDWN_EN 0x0001
/* PLA_GPHY_INTR_IMR */
#define GPHY_STS_MSK 0x0001
@@ -199,6 +263,9 @@
#define EEE_RX_EN 0x0001
#define EEE_TX_EN 0x0002
+/* PLA_BOOT_CTRL */
+#define AUTOLOAD_DONE 0x0002
+
/* USB_DEV_STAT */
#define STAT_SPEED_MASK 0x0006
#define STAT_SPEED_HIGH 0x0000
@@ -208,7 +275,9 @@
#define TX_AGG_MAX_THRESHOLD 0x03
/* USB_RX_BUF_TH */
-#define RX_BUF_THR 0x7a120180
+#define RX_THR_SUPPER 0x0c350180
+#define RX_THR_HIGH 0x7a120180
+#define RX_THR_SLOW 0xffff0180
/* USB_TX_DMA */
#define TEST_MODE_DISABLE 0x00000001
@@ -218,17 +287,55 @@
#define POWER_CUT 0x0100
/* USB_PM_CTRL_STATUS */
-#define RWSUME_INDICATE 0x0001
+#define RESUME_INDICATE 0x0001
/* USB_USB_CTRL */
#define RX_AGG_DISABLE 0x0010
+/* USB_U2P3_CTRL */
+#define U2P3_ENABLE 0x0001
+
+/* USB_POWER_CUT */
+#define PWR_EN 0x0001
+#define PHASE2_EN 0x0008
+
+/* USB_MISC_0 */
+#define PCUT_STATUS 0x0001
+
+/* USB_RX_EARLY_AGG */
+#define EARLY_AGG_SUPPER 0x0e832981
+#define EARLY_AGG_HIGH 0x0e837a12
+#define EARLY_AGG_SLOW 0x0e83ffff
+
+/* USB_WDT11_CTRL */
+#define TIMER11_EN 0x0001
+
+/* USB_LPM_CTRL */
+#define LPM_TIMER_MASK 0x0c
+#define LPM_TIMER_500MS 0x04 /* 500 ms */
+#define LPM_TIMER_500US 0x0c /* 500 us */
+
+/* USB_AFE_CTRL2 */
+#define SEN_VAL_MASK 0xf800
+#define SEN_VAL_NORMAL 0xa000
+#define SEL_RXIDLE 0x0100
+
/* OCP_ALDPS_CONFIG */
#define ENPWRSAVE 0x8000
#define ENPDNPS 0x0200
#define LINKENA 0x0100
#define DIS_SDSAVE 0x0010
+/* OCP_PHY_STATUS */
+#define PHY_STAT_MASK 0x0007
+#define PHY_STAT_LAN_ON 3
+#define PHY_STAT_PWRDN 5
+
+/* OCP_POWER_CFG */
+#define EEE_CLKDIV_EN 0x8000
+#define EN_ALDPS 0x0004
+#define EN_10M_PLLOFF 0x0001
+
/* OCP_EEE_CONFIG1 */
#define RG_TXLPI_MSK_HFDUP 0x8000
#define RG_MATCLR_EN 0x4000
@@ -263,7 +370,36 @@
#define EEE_ADDR 0x003C
#define EEE_DATA 0x0002
+/* OCP_EEE_CFG */
+#define CTAP_SHORT_EN 0x0040
+#define EEE10_EN 0x0010
+
+/* OCP_DOWN_SPEED */
+#define EN_10M_BGOFF 0x0080
+
+/* OCP_EEE_CFG2 */
+#define MY1000_EEE 0x0004
+#define MY100_EEE 0x0002
+
+/* OCP_ADC_CFG */
+#define CKADSEL_L 0x0100
+#define ADC_EN 0x0080
+#define EN_EMI_L 0x0040
+
+/* SRAM_LPF_CFG */
+#define LPF_AUTO_TUNE 0x8000
+
+/* SRAM_10M_AMP1 */
+#define GDAC_IB_UPALL 0x0008
+
+/* SRAM_10M_AMP2 */
+#define AMP_DN 0x0200
+
+/* SRAM_IMPEDANCE */
+#define RX_DRIVING_MASK 0x6000
+
enum rtl_register_content {
+ _1000bps = 0x10,
_100bps = 0x08,
_10bps = 0x04,
LINK_STATUS = 0x02,
@@ -273,6 +409,9 @@ enum rtl_register_content {
#define RTL8152_MAX_TX 10
#define RTL8152_MAX_RX 10
#define INTBUFSIZE 2
+#define CRC_SIZE 4
+#define TX_ALIGN 4
+#define RX_ALIGN 8
#define INTR_LINK 0x0004
@@ -302,6 +441,10 @@ enum rtl8152_flags {
/* Define these values to match your device */
#define VENDOR_ID_REALTEK 0x0bda
#define PRODUCT_ID_RTL8152 0x8152
+#define PRODUCT_ID_RTL8153 0x8153
+
+#define VENDOR_ID_SAMSUNG 0x04e8
+#define PRODUCT_ID_SAMSUNG 0xa101
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@@ -363,6 +506,15 @@ struct r8152 {
spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
+
+ struct rtl_ops {
+ void (*init)(struct r8152 *);
+ int (*enable)(struct r8152 *);
+ void (*disable)(struct r8152 *);
+ void (*down)(struct r8152 *);
+ void (*unload)(struct r8152 *);
+ } rtl_ops;
+
int intr_interval;
u32 msg_enable;
u32 tx_qlen;
@@ -375,7 +527,11 @@ struct r8152 {
enum rtl_version {
RTL_VER_UNKNOWN = 0,
RTL_VER_01,
- RTL_VER_02
+ RTL_VER_02,
+ RTL_VER_03,
+ RTL_VER_04,
+ RTL_VER_05,
+ RTL_VER_MAX
};
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
@@ -427,8 +583,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
void *data, u16 type)
{
- u16 limit = 64;
- int ret = 0;
+ u16 limit = 64;
+ int ret = 0;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
@@ -467,9 +623,9 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
u16 size, void *data, u16 type)
{
- int ret;
- u16 byteen_start, byteen_end, byen;
- u16 limit = 512;
+ int ret;
+ u16 byteen_start, byteen_end, byen;
+ u16 limit = 512;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
@@ -653,45 +809,54 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
}
-static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
+static u16 ocp_reg_read(struct r8152 *tp, u16 addr)
{
- u32 ocp_data;
- int i;
+ u16 ocp_base, ocp_index;
- ocp_data = PHYAR_FLAG | ((reg_addr & 0x1f) << 16) |
- (value & 0xffff);
+ ocp_base = addr & 0xf000;
+ if (ocp_base != tp->ocp_base) {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
+ tp->ocp_base = ocp_base;
+ }
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
+ ocp_index = (addr & 0x0fff) | 0xb000;
+ return ocp_read_word(tp, MCU_TYPE_PLA, ocp_index);
+}
- for (i = 20; i > 0; i--) {
- udelay(25);
- ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
- if (!(ocp_data & PHYAR_FLAG))
- break;
+static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
+{
+ u16 ocp_base, ocp_index;
+
+ ocp_base = addr & 0xf000;
+ if (ocp_base != tp->ocp_base) {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
+ tp->ocp_base = ocp_base;
}
- udelay(20);
+
+ ocp_index = (addr & 0x0fff) | 0xb000;
+ ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
}
-static int r8152_mdio_read(struct r8152 *tp, u32 reg_addr)
+static inline void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
{
- u32 ocp_data;
- int i;
-
- ocp_data = (reg_addr & 0x1f) << 16;
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
+ ocp_reg_write(tp, OCP_BASE_MII + reg_addr * 2, value);
+}
- for (i = 20; i > 0; i--) {
- udelay(25);
- ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
- if (ocp_data & PHYAR_FLAG)
- break;
- }
- udelay(20);
+static inline int r8152_mdio_read(struct r8152 *tp, u32 reg_addr)
+{
+ return ocp_reg_read(tp, OCP_BASE_MII + reg_addr * 2);
+}
- if (!(ocp_data & PHYAR_FLAG))
- return -EAGAIN;
+static void sram_write(struct r8152 *tp, u16 addr, u16 data)
+{
+ ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
+ ocp_reg_write(tp, OCP_SRAM_DATA, data);
+}
- return (u16)(ocp_data & 0xffff);
+static u16 sram_read(struct r8152 *tp, u16 addr)
+{
+ ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
+ return ocp_reg_read(tp, OCP_SRAM_DATA);
}
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
@@ -715,20 +880,6 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
r8152_mdio_write(tp, reg, val);
}
-static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
-{
- u16 ocp_base, ocp_index;
-
- ocp_base = addr & 0xf000;
- if (ocp_base != tp->ocp_base) {
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
- tp->ocp_base = ocp_base;
- }
-
- ocp_index = (addr & 0x0fff) | 0xb000;
- ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
-}
-
static
int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
@@ -814,10 +965,12 @@ static void read_bulk_callback(struct urb *urb)
case -ENOENT:
return; /* the urb is in unlink state */
case -ETIME:
- pr_warn_ratelimited("may be reset is needed?..\n");
+ if (net_ratelimit())
+ netdev_warn(netdev, "maybe reset is needed?\n");
break;
default:
- pr_warn_ratelimited("Rx status %d\n", status);
+ if (net_ratelimit())
+ netdev_warn(netdev, "Rx status %d\n", status);
break;
}
@@ -850,7 +1003,8 @@ static void write_bulk_callback(struct urb *urb)
stats = rtl8152_get_stats(tp->netdev);
if (status) {
- pr_warn_ratelimited("Tx status %d\n", status);
+ if (net_ratelimit())
+ netdev_warn(tp->netdev, "Tx status %d\n", status);
stats->tx_errors += agg->skb_num;
} else {
stats->tx_packets += agg->skb_num;
@@ -927,17 +1081,17 @@ resubmit:
netif_device_detach(tp->netdev);
else if (res)
netif_err(tp, intr, tp->netdev,
- "can't resubmit intr, status %d\n", res);
+ "can't resubmit intr, status %d\n", res);
}
static inline void *rx_agg_align(void *data)
{
- return (void *)ALIGN((uintptr_t)data, 8);
+ return (void *)ALIGN((uintptr_t)data, RX_ALIGN);
}
static inline void *tx_agg_align(void *data)
{
- return (void *)ALIGN((uintptr_t)data, 4);
+ return (void *)ALIGN((uintptr_t)data, TX_ALIGN);
}
static void free_all_mem(struct r8152 *tp)
@@ -945,40 +1099,28 @@ static void free_all_mem(struct r8152 *tp)
int i;
for (i = 0; i < RTL8152_MAX_RX; i++) {
- if (tp->rx_info[i].urb) {
- usb_free_urb(tp->rx_info[i].urb);
- tp->rx_info[i].urb = NULL;
- }
+ usb_free_urb(tp->rx_info[i].urb);
+ tp->rx_info[i].urb = NULL;
- if (tp->rx_info[i].buffer) {
- kfree(tp->rx_info[i].buffer);
- tp->rx_info[i].buffer = NULL;
- tp->rx_info[i].head = NULL;
- }
+ kfree(tp->rx_info[i].buffer);
+ tp->rx_info[i].buffer = NULL;
+ tp->rx_info[i].head = NULL;
}
for (i = 0; i < RTL8152_MAX_TX; i++) {
- if (tp->tx_info[i].urb) {
- usb_free_urb(tp->tx_info[i].urb);
- tp->tx_info[i].urb = NULL;
- }
+ usb_free_urb(tp->tx_info[i].urb);
+ tp->tx_info[i].urb = NULL;
- if (tp->tx_info[i].buffer) {
- kfree(tp->tx_info[i].buffer);
- tp->tx_info[i].buffer = NULL;
- tp->tx_info[i].head = NULL;
- }
+ kfree(tp->tx_info[i].buffer);
+ tp->tx_info[i].buffer = NULL;
+ tp->tx_info[i].head = NULL;
}
- if (tp->intr_urb) {
- usb_free_urb(tp->intr_urb);
- tp->intr_urb = NULL;
- }
+ usb_free_urb(tp->intr_urb);
+ tp->intr_urb = NULL;
- if (tp->intr_buff) {
- kfree(tp->intr_buff);
- tp->intr_buff = NULL;
- }
+ kfree(tp->intr_buff);
+ tp->intr_buff = NULL;
}
static int alloc_all_mem(struct r8152 *tp)
@@ -1006,7 +1148,8 @@ static int alloc_all_mem(struct r8152 *tp)
if (buf != rx_agg_align(buf)) {
kfree(buf);
- buf = kmalloc_node(rx_buf_sz + 8, GFP_KERNEL, node);
+ buf = kmalloc_node(rx_buf_sz + RX_ALIGN, GFP_KERNEL,
+ node);
if (!buf)
goto err1;
}
@@ -1031,7 +1174,8 @@ static int alloc_all_mem(struct r8152 *tp)
if (buf != tx_agg_align(buf)) {
kfree(buf);
- buf = kmalloc_node(rx_buf_sz + 4, GFP_KERNEL, node);
+ buf = kmalloc_node(rx_buf_sz + TX_ALIGN, GFP_KERNEL,
+ node);
if (!buf)
goto err1;
}
@@ -1231,7 +1375,7 @@ static void rx_bottom(struct r8152 *tp)
stats = rtl8152_get_stats(netdev);
- pkt_len -= 4; /* CRC */
+ pkt_len -= CRC_SIZE;
rx_data += sizeof(struct rx_desc);
skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
@@ -1246,7 +1390,7 @@ static void rx_bottom(struct r8152 *tp)
stats->rx_packets++;
stats->rx_bytes += pkt_len;
- rx_data = rx_agg_align(rx_data + pkt_len + 4);
+ rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
rx_desc = (struct rx_desc *)rx_data;
len_used = (int)(rx_data - (u8 *)agg->head);
len_used += sizeof(struct rx_desc);
@@ -1336,7 +1480,7 @@ static void rtl8152_tx_timeout(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int i;
- netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
+ netif_warn(tp, tx_err, netdev, "Tx timeout\n");
for (i = 0; i < RTL8152_MAX_TX; i++)
usb_unlink_urb(tp->tx_info[i].urb);
}
@@ -1449,13 +1593,11 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
}
-static int rtl8152_enable(struct r8152 *tp)
+static void rtl_set_eee_plus(struct r8152 *tp)
{
u32 ocp_data;
- int i, ret;
u8 speed;
- set_tx_qlen(tp);
speed = rtl8152_get_speed(tp);
if (speed & _10bps) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
@@ -1466,6 +1608,12 @@ static int rtl8152_enable(struct r8152 *tp)
ocp_data &= ~EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
}
+}
+
+static int rtl_enable(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i, ret;
r8152b_reset_packet_filter(tp);
@@ -1487,6 +1635,47 @@ static int rtl8152_enable(struct r8152 *tp)
return ret;
}
+static int rtl8152_enable(struct r8152 *tp)
+{
+ set_tx_qlen(tp);
+ rtl_set_eee_plus(tp);
+
+ return rtl_enable(tp);
+}
+
+static void r8153_set_rx_agg(struct r8152 *tp)
+{
+ u8 speed;
+
+ speed = rtl8152_get_speed(tp);
+ if (speed & _1000bps) {
+ if (tp->udev->speed == USB_SPEED_SUPER) {
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
+ RX_THR_SUPPER);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+ EARLY_AGG_SUPPER);
+ } else {
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
+ RX_THR_HIGH);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+ EARLY_AGG_HIGH);
+ }
+ } else {
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_SLOW);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+ EARLY_AGG_SLOW);
+ }
+}
+
+static int rtl8153_enable(struct r8152 *tp)
+{
+ set_tx_qlen(tp);
+ rtl_set_eee_plus(tp);
+ r8153_set_rx_agg(tp);
+
+ return rtl_enable(tp);
+}
+
static void rtl8152_disable(struct r8152 *tp)
{
struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
@@ -1596,7 +1785,7 @@ static void r8152b_exit_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL);
ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_BUF_THR);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH);
ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA,
TEST_MODE_DISABLE | TX_SIZE_ADJUST1);
@@ -1613,8 +1802,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
static void r8152b_enter_oob(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ u32 ocp_data;
+ int i;
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
@@ -1685,15 +1874,269 @@ static inline void r8152b_enable_aldps(struct r8152 *tp)
LINKENA | DIS_SDSAVE);
}
+static void r8153_hw_phy_cfg(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+
+ ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+ r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
+
+ if (tp->version == RTL_VER_03) {
+ data = ocp_reg_read(tp, OCP_EEE_CFG);
+ data &= ~CTAP_SHORT_EN;
+ ocp_reg_write(tp, OCP_EEE_CFG, data);
+ }
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data |= EEE_CLKDIV_EN;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+
+ data = ocp_reg_read(tp, OCP_DOWN_SPEED);
+ data |= EN_10M_BGOFF;
+ ocp_reg_write(tp, OCP_DOWN_SPEED, data);
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data |= EN_10M_PLLOFF;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ data = sram_read(tp, SRAM_IMPEDANCE);
+ data &= ~RX_DRIVING_MASK;
+ sram_write(tp, SRAM_IMPEDANCE, data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+ ocp_data |= PFM_PWM_SWITCH;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+
+ data = sram_read(tp, SRAM_LPF_CFG);
+ data |= LPF_AUTO_TUNE;
+ sram_write(tp, SRAM_LPF_CFG, data);
+
+ data = sram_read(tp, SRAM_10M_AMP1);
+ data |= GDAC_IB_UPALL;
+ sram_write(tp, SRAM_10M_AMP1, data);
+ data = sram_read(tp, SRAM_10M_AMP2);
+ data |= AMP_DN;
+ sram_write(tp, SRAM_10M_AMP2, data);
+}
+
+static void r8153_u1u2en(struct r8152 *tp, int enable)
+{
+ u8 u1u2[8];
+
+ if (enable)
+ memset(u1u2, 0xff, sizeof(u1u2));
+ else
+ memset(u1u2, 0x00, sizeof(u1u2));
+
+ usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
+}
+
+static void r8153_u2p3en(struct r8152 *tp, int enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
+ if (enable)
+ ocp_data |= U2P3_ENABLE;
+ else
+ ocp_data &= ~U2P3_ENABLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
+}
+
+static void r8153_power_cut_en(struct r8152 *tp, int enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
+ if (enable)
+ ocp_data |= PWR_EN | PHASE2_EN;
+ else
+ ocp_data &= ~(PWR_EN | PHASE2_EN);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ ocp_data &= ~PCUT_STATUS;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+}
+
+static void r8153_teredo_off(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
+}
+
+static void r8153_first_init(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ ocp_data |= RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+ r8153_teredo_off(tp);
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data &= ~RCR_ACPT_ALL;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+
+ r8153_hw_phy_cfg(tp);
+
+ rtl8152_nic_reset(tp);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data &= ~MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= RE_INIT_LL;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+ ocp_data &= ~CPCR_RX_VLAN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
+ ocp_data |= TCR0_AUTO_FIFO;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data);
+
+ rtl8152_nic_reset(tp);
+
+ /* rx share fifo credit full threshold */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL);
+ /* TX share fifo free credit full threshold */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2);
+
+ /* rx aggregation */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+ ocp_data &= ~RX_AGG_DISABLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+}
+
+static void r8153_enter_oob(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data &= ~NOW_IS_OOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ rtl8152_disable(tp);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= RE_INIT_LL;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ mdelay(1);
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ ocp_data |= MAGIC_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~TEREDO_WAKE_MASK;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+ ocp_data |= CPCR_RX_VLAN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR);
+ ocp_data |= ALDPS_PROXY_MODE;
+ ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ ocp_data &= ~RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data |= RCR_APM | RCR_AM | RCR_AB;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+}
+
+static void r8153_disable_aldps(struct r8152 *tp)
+{
+ u16 data;
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data &= ~EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ msleep(20);
+}
+
+static void r8153_enable_aldps(struct r8152 *tp)
+{
+ u16 data;
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data |= EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+}
+
static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
{
- u16 bmcr, anar;
+ u16 bmcr, anar, gbcr;
int ret = 0;
cancel_delayed_work_sync(&tp->schedule);
anar = r8152_mdio_read(tp, MII_ADVERTISE);
anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL);
+ if (tp->mii.supports_gmii) {
+ gbcr = r8152_mdio_read(tp, MII_CTRL1000);
+ gbcr &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+ } else {
+ gbcr = 0;
+ }
if (autoneg == AUTONEG_DISABLE) {
if (speed == SPEED_10) {
@@ -1702,6 +2145,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
} else if (speed == SPEED_100) {
bmcr = BMCR_SPEED100;
anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ } else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
+ bmcr = BMCR_SPEED1000;
+ gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
} else {
ret = -EINVAL;
goto out;
@@ -1723,6 +2169,16 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
anar |= ADVERTISE_10HALF;
anar |= ADVERTISE_100HALF;
}
+ } else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
+ if (duplex == DUPLEX_FULL) {
+ anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
+ anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
+ } else {
+ anar |= ADVERTISE_10HALF;
+ anar |= ADVERTISE_100HALF;
+ gbcr |= ADVERTISE_1000HALF;
+ }
} else {
ret = -EINVAL;
goto out;
@@ -1731,6 +2187,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
}
+ if (tp->mii.supports_gmii)
+ r8152_mdio_write(tp, MII_CTRL1000, gbcr);
+
r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
@@ -1752,6 +2211,15 @@ static void rtl8152_down(struct r8152 *tp)
r8152b_enable_aldps(tp);
}
+static void rtl8153_down(struct r8152 *tp)
+{
+ r8153_u1u2en(tp, 0);
+ r8153_power_cut_en(tp, 0);
+ r8153_disable_aldps(tp);
+ r8153_enter_oob(tp);
+ r8153_enable_aldps(tp);
+}
+
static void set_carrier(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
@@ -1762,7 +2230,7 @@ static void set_carrier(struct r8152 *tp)
if (speed & LINK_STATUS) {
if (!(tp->speed & LINK_STATUS)) {
- rtl8152_enable(tp);
+ tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_carrier_on(netdev);
}
@@ -1770,7 +2238,7 @@ static void set_carrier(struct r8152 *tp)
if (tp->speed & LINK_STATUS) {
netif_carrier_off(netdev);
tasklet_disable(&tp->tl);
- rtl8152_disable(tp);
+ tp->rtl_ops.disable(tp);
tasklet_enable(&tp->tl);
}
}
@@ -1802,20 +2270,21 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
+ DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(netdev);
+ netif_start_queue(netdev);
+ set_bit(WORK_ENABLE, &tp->flags);
res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
if (res) {
if (res == -ENODEV)
netif_device_detach(tp->netdev);
- netif_warn(tp, ifup, netdev,
- "intr_urb submit failed: %d\n", res);
- return res;
+ netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
+ res);
}
- rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
- tp->speed = 0;
- netif_carrier_off(netdev);
- netif_start_queue(netdev);
- set_bit(WORK_ENABLE, &tp->flags);
return res;
}
@@ -1825,12 +2294,12 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
- usb_kill_urb(tp->intr_urb);
clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
tasklet_disable(&tp->tl);
- rtl8152_disable(tp);
+ tp->rtl_ops.disable(tp);
tasklet_enable(&tp->tl);
return res;
@@ -1851,9 +2320,16 @@ static void rtl_clear_bp(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
}
+static void r8153_clear_bp(struct r8152 *tp)
+{
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
+ rtl_clear_bp(tp);
+}
+
static void r8152b_enable_eee(struct r8152 *tp)
{
- u32 ocp_data;
+ u32 ocp_data;
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
ocp_data |= EEE_RX_EN | EEE_TX_EN;
@@ -1874,6 +2350,22 @@ static void r8152b_enable_eee(struct r8152 *tp)
ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
}
+static void r8153_enable_eee(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ data = ocp_reg_read(tp, OCP_EEE_CFG);
+ data |= EEE10_EN;
+ ocp_reg_write(tp, OCP_EEE_CFG, data);
+ data = ocp_reg_read(tp, OCP_EEE_CFG2);
+ data |= MY1000_EEE | MY100_EEE;
+ ocp_reg_write(tp, OCP_EEE_CFG2, data);
+}
+
static void r8152b_enable_fc(struct r8152 *tp)
{
u16 anar;
@@ -1909,7 +2401,7 @@ static void r8152b_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RWSUME_INDICATE;
+ ocp_data &= ~RESUME_INDICATE;
ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
r8152b_exit_oob(tp);
@@ -1943,6 +2435,75 @@ static void r8152b_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
+static void r8153_init(struct r8152 *tp)
+{
+ u32 ocp_data;
+ int i;
+
+ r8153_u1u2en(tp, 0);
+
+ for (i = 0; i < 500; i++) {
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+ break;
+ msleep(20);
+ }
+
+ for (i = 0; i < 500; i++) {
+ ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
+ if (ocp_data == PHY_STAT_LAN_ON || ocp_data == PHY_STAT_PWRDN)
+ break;
+ msleep(20);
+ }
+
+ r8153_u2p3en(tp, 0);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL);
+ ocp_data &= ~TIMER11_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data);
+
+ r8153_clear_bp(tp);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
+ ocp_data &= ~LED_MODE_MASK;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL);
+ ocp_data &= ~LPM_TIMER_MASK;
+ if (tp->udev->speed == USB_SPEED_SUPER)
+ ocp_data |= LPM_TIMER_500US;
+ else
+ ocp_data |= LPM_TIMER_500MS;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2);
+ ocp_data &= ~SEN_VAL_MASK;
+ ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data);
+
+ r8153_power_cut_en(tp, 0);
+ r8153_u1u2en(tp, 1);
+
+ r8153_first_init(tp);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
+ PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
+ U1U2_SPDWN_EN | L1_SPDWN_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
+ PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
+ TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
+ EEE_SPDWN_EN);
+
+ r8153_enable_eee(tp);
+ r8153_enable_aldps(tp);
+ r8152b_enable_fc(tp);
+
+ r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
+ BMCR_ANRESTART);
+}
+
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
{
struct r8152 *tp = usb_get_intfdata(intf);
@@ -1956,7 +2517,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
tasklet_disable(&tp->tl);
}
- rtl8152_down(tp);
+ tp->rtl_ops.down(tp);
return 0;
}
@@ -1965,10 +2526,12 @@ static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
- r8152b_init(tp);
+ tp->rtl_ops.init(tp);
netif_device_attach(tp->netdev);
if (netif_running(tp->netdev)) {
- rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
+ DUPLEX_FULL);
tp->speed = 0;
netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
@@ -2072,6 +2635,18 @@ static void r8152b_get_version(struct r8152 *tp)
case 0x4c10:
tp->version = RTL_VER_02;
break;
+ case 0x5c00:
+ tp->version = RTL_VER_03;
+ tp->mii.supports_gmii = 1;
+ break;
+ case 0x5c10:
+ tp->version = RTL_VER_04;
+ tp->mii.supports_gmii = 1;
+ break;
+ case 0x5c20:
+ tp->version = RTL_VER_05;
+ tp->mii.supports_gmii = 1;
+ break;
default:
netif_info(tp, probe, tp->netdev,
"Unknown version 0x%04x\n", version);
@@ -2079,6 +2654,80 @@ static void r8152b_get_version(struct r8152 *tp)
}
}
+static void rtl8152_unload(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ if (tp->version != RTL_VER_01) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+ ocp_data |= POWER_CUT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
+ ocp_data &= ~RESUME_INDICATE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+}
+
+static void rtl8153_unload(struct r8152 *tp)
+{
+ r8153_power_cut_en(tp, 1);
+}
+
+static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
+{
+ struct rtl_ops *ops = &tp->rtl_ops;
+ int ret = -ENODEV;
+
+ switch (id->idVendor) {
+ case VENDOR_ID_REALTEK:
+ switch (id->idProduct) {
+ case PRODUCT_ID_RTL8152:
+ ops->init = r8152b_init;
+ ops->enable = rtl8152_enable;
+ ops->disable = rtl8152_disable;
+ ops->down = rtl8152_down;
+ ops->unload = rtl8152_unload;
+ ret = 0;
+ break;
+ case PRODUCT_ID_RTL8153:
+ ops->init = r8153_init;
+ ops->enable = rtl8153_enable;
+ ops->disable = rtl8152_disable;
+ ops->down = rtl8153_down;
+ ops->unload = rtl8153_unload;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case VENDOR_ID_SAMSUNG:
+ switch (id->idProduct) {
+ case PRODUCT_ID_SAMSUNG:
+ ops->init = r8153_init;
+ ops->enable = rtl8153_enable;
+ ops->disable = rtl8152_disable;
+ ops->down = rtl8153_down;
+ ops->unload = rtl8153_unload;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (ret)
+ netif_err(tp, probe, tp->netdev, "Unknown Device\n");
+
+ return ret;
+}
+
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -2092,9 +2741,10 @@ static int rtl8152_probe(struct usb_interface *intf,
return -ENODEV;
}
+ usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
- dev_err(&intf->dev, "Out of memory");
+ dev_err(&intf->dev, "Out of memory\n");
return -ENOMEM;
}
@@ -2102,12 +2752,17 @@ static int rtl8152_probe(struct usb_interface *intf,
tp = netdev_priv(netdev);
tp->msg_enable = 0x7FFF;
- tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
- INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
-
tp->udev = udev;
tp->netdev = netdev;
tp->intf = intf;
+
+ ret = rtl_ops_init(tp, id);
+ if (ret)
+ goto out;
+
+ tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
+ INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
+
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
@@ -2124,7 +2779,7 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->mii.supports_gmii = 0;
r8152b_get_version(tp);
- r8152b_init(tp);
+ tp->rtl_ops.init(tp);
set_ethernet_addr(tp);
ret = alloc_all_mem(tp);
@@ -2135,11 +2790,11 @@ static int rtl8152_probe(struct usb_interface *intf,
ret = register_netdev(netdev);
if (ret != 0) {
- netif_err(tp, probe, netdev, "couldn't register the device");
+ netif_err(tp, probe, netdev, "couldn't register the device\n");
goto out1;
}
- netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
+ netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
@@ -2150,21 +2805,6 @@ out:
return ret;
}
-static void rtl8152_unload(struct r8152 *tp)
-{
- u32 ocp_data;
-
- if (tp->version != RTL_VER_01) {
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data |= POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
- }
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RWSUME_INDICATE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
-}
-
static void rtl8152_disconnect(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
@@ -2174,7 +2814,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
set_bit(RTL8152_UNPLUG, &tp->flags);
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
- rtl8152_unload(tp);
+ tp->rtl_ops.unload(tp);
free_all_mem(tp);
free_netdev(tp->netdev);
}
@@ -2183,6 +2823,8 @@ static void rtl8152_disconnect(struct usb_interface *intf)
/* table of devices that work with this driver */
static struct usb_device_id rtl8152_table[] = {
{USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
+ {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
+ {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
{}
};
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
deleted file mode 100644
index 2df2f4fb42a7..000000000000
--- a/drivers/net/usb/r815x.c
+++ /dev/null
@@ -1,256 +0,0 @@
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/mii.h>
-#include <linux/usb.h>
-#include <linux/usb/cdc.h>
-#include <linux/usb/usbnet.h>
-
-#define RTL815x_REQT_READ 0xc0
-#define RTL815x_REQT_WRITE 0x40
-#define RTL815x_REQ_GET_REGS 0x05
-#define RTL815x_REQ_SET_REGS 0x05
-
-#define MCU_TYPE_PLA 0x0100
-#define OCP_BASE 0xe86c
-#define BASE_MII 0xa400
-
-#define BYTE_EN_DWORD 0xff
-#define BYTE_EN_WORD 0x33
-#define BYTE_EN_BYTE 0x11
-
-#define R815x_PHY_ID 32
-#define REALTEK_VENDOR_ID 0x0bda
-
-
-static int pla_read_word(struct usb_device *udev, u16 index)
-{
- int ret;
- u8 shift = index & 2;
- __le32 *tmp;
-
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- index &= ~3;
-
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
- if (ret < 0)
- goto out2;
-
- ret = __le32_to_cpu(*tmp);
- ret >>= (shift * 8);
- ret &= 0xffff;
-
-out2:
- kfree(tmp);
- return ret;
-}
-
-static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
-{
- __le32 *tmp;
- u32 mask = 0xffff;
- u16 byen = BYTE_EN_WORD;
- u8 shift = index & 2;
- int ret;
-
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- data &= mask;
-
- if (shift) {
- byen <<= shift;
- mask <<= (shift * 8);
- data <<= (shift * 8);
- index &= ~3;
- }
-
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
- if (ret < 0)
- goto out3;
-
- data |= __le32_to_cpu(*tmp) & ~mask;
- *tmp = __cpu_to_le32(data);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
- index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp),
- 500);
-
-out3:
- kfree(tmp);
- return ret;
-}
-
-static int ocp_reg_read(struct usbnet *dev, u16 addr)
-{
- u16 ocp_base, ocp_index;
- int ret;
-
- ocp_base = addr & 0xf000;
- ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
- if (ret < 0)
- goto out;
-
- ocp_index = (addr & 0x0fff) | 0xb000;
- ret = pla_read_word(dev->udev, ocp_index);
-
-out:
- return ret;
-}
-
-static int ocp_reg_write(struct usbnet *dev, u16 addr, u16 data)
-{
- u16 ocp_base, ocp_index;
- int ret;
-
- ocp_base = addr & 0xf000;
- ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
- if (ret < 0)
- goto out1;
-
- ocp_index = (addr & 0x0fff) | 0xb000;
- ret = pla_write_word(dev->udev, ocp_index, data);
-
-out1:
- return ret;
-}
-
-static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
-{
- struct usbnet *dev = netdev_priv(netdev);
- int ret;
-
- if (phy_id != R815x_PHY_ID)
- return -EINVAL;
-
- if (usb_autopm_get_interface(dev->intf) < 0)
- return -ENODEV;
-
- ret = ocp_reg_read(dev, BASE_MII + reg * 2);
-
- usb_autopm_put_interface(dev->intf);
- return ret;
-}
-
-static
-void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
-{
- struct usbnet *dev = netdev_priv(netdev);
-
- if (phy_id != R815x_PHY_ID)
- return;
-
- if (usb_autopm_get_interface(dev->intf) < 0)
- return;
-
- ocp_reg_write(dev, BASE_MII + reg * 2, val);
-
- usb_autopm_put_interface(dev->intf);
-}
-
-static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
-{
- int status;
-
- status = usbnet_cdc_bind(dev, intf);
- if (status < 0)
- return status;
-
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = r815x_mdio_read;
- dev->mii.mdio_write = r815x_mdio_write;
- dev->mii.phy_id_mask = 0x3f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = R815x_PHY_ID;
- dev->mii.supports_gmii = 1;
-
- return status;
-}
-
-static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
-{
- int status;
-
- status = usbnet_cdc_bind(dev, intf);
- if (status < 0)
- return status;
-
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = r815x_mdio_read;
- dev->mii.mdio_write = r815x_mdio_write;
- dev->mii.phy_id_mask = 0x3f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = R815x_PHY_ID;
- dev->mii.supports_gmii = 0;
-
- return status;
-}
-
-static const struct driver_info r8152_info = {
- .description = "RTL8152 ECM Device",
- .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
- .bind = r8152_bind,
- .unbind = usbnet_cdc_unbind,
- .status = usbnet_cdc_status,
- .manage_power = usbnet_manage_power,
-};
-
-static const struct driver_info r8153_info = {
- .description = "RTL8153 ECM Device",
- .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
- .bind = r8153_bind,
- .unbind = usbnet_cdc_unbind,
- .status = usbnet_cdc_status,
- .manage_power = usbnet_manage_power,
-};
-
-static const struct usb_device_id products[] = {
-{
- USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
-#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
- .driver_info = 0,
-#else
- .driver_info = (unsigned long) &r8152_info,
-#endif
-},
-
-{
- USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
-#if defined(CONFIG_USB_RTL8153) || defined(CONFIG_USB_RTL8153_MODULE)
- .driver_info = 0,
-#else
- .driver_info = (unsigned long) &r8153_info,
-#endif
-},
-
- { }, /* END */
-};
-MODULE_DEVICE_TABLE(usb, products);
-
-static struct usb_driver r815x_driver = {
- .name = "r815x",
- .id_table = products,
- .probe = usbnet_probe,
- .disconnect = usbnet_disconnect,
- .suspend = usbnet_suspend,
- .resume = usbnet_resume,
- .reset_resume = usbnet_resume,
- .supports_autosuspend = 1,
- .disable_hub_initiated_lpm = 1,
-};
-
-module_usb_driver(r815x_driver);
-
-MODULE_AUTHOR("Hayes Wang");
-MODULE_DESCRIPTION("Realtek USB ECM device");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index cc49aac70224..524a47a28120 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -13,11 +13,9 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -494,6 +492,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
*/
int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
/* peripheral may have batched packets to us... */
while (likely(skb->len)) {
struct rndis_data_hdr *hdr = (void *)skb->data;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 6cbdac67f3a0..da2c4583bd2d 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -6,7 +6,6 @@
* version 2 as published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index a79e9d334928..a251588762ec 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -21,8 +21,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define DRIVER_VERSION "v.2.0"
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 66ebbacf066f..d9e7892262fa 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -13,14 +13,12 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
#include <linux/module.h>
#include <linux/kmod.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -2108,6 +2106,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
while (skb->len > 0) {
u32 rx_cmd_a, rx_cmd_b, align_count, size;
struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/smsc75xx.h b/drivers/net/usb/smsc75xx.h
index 67eba39e6ee2..2c7ea8fd184f 100644
--- a/drivers/net/usb/smsc75xx.h
+++ b/drivers/net/usb/smsc75xx.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 3f38ba868f61..424db65e4396 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -13,14 +13,12 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
#include <linux/module.h>
#include <linux/kmod.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -1725,6 +1723,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
while (skb->len > 0) {
u32 header, align_count;
struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index f360ee372554..526faa0c44e6 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*****************************************************************************/
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 7ec3e0ee0783..99b69af14274 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
new file mode 100644
index 000000000000..b94a0fbb8b3b
--- /dev/null
+++ b/drivers/net/usb/sr9800.c
@@ -0,0 +1,874 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * Based on asix_common.c, asix_devices.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.*
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+
+#include "sr9800.h"
+
+static int sr_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int err;
+
+ err = usbnet_read_cmd(dev, cmd, SR_REQ_RD_REG, value, index,
+ data, size);
+ if ((err != size) && (err >= 0))
+ err = -EINVAL;
+
+ return err;
+}
+
+static int sr_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int err;
+
+ err = usbnet_write_cmd(dev, cmd, SR_REQ_WR_REG, value, index,
+ data, size);
+ if ((err != size) && (err >= 0))
+ err = -EINVAL;
+
+ return err;
+}
+
+static void
+sr_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ usbnet_write_cmd_async(dev, cmd, SR_REQ_WR_REG, value, index, data,
+ size);
+}
+
+static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int offset = 0;
+
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
+ while (offset + sizeof(u32) < skb->len) {
+ struct sk_buff *sr_skb;
+ u16 size;
+ u32 header = get_unaligned_le32(skb->data + offset);
+
+ offset += sizeof(u32);
+ /* get the packet length */
+ size = (u16) (header & 0x7ff);
+ if (size != ((~header >> 16) & 0x07ff)) {
+ netdev_err(dev->net, "%s : Bad Header Length\n",
+ __func__);
+ return 0;
+ }
+
+ if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
+ (size + offset > skb->len)) {
+ netdev_err(dev->net, "%s : Bad RX Length %d\n",
+ __func__, size);
+ return 0;
+ }
+ sr_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ if (!sr_skb)
+ return 0;
+
+ skb_put(sr_skb, size);
+ memcpy(sr_skb->data, skb->data + offset, size);
+ usbnet_skb_return(dev, sr_skb);
+
+ offset += (size + 1) & 0xfffe;
+ }
+
+ if (skb->len != offset) {
+ netdev_err(dev->net, "%s : Bad SKB Length %d\n", __func__,
+ skb->len);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ u32 padbytes = 0xffff0000;
+ u32 packet_len;
+ int padlen;
+
+ padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4;
+
+ if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (4 + padlen))) {
+ if ((headroom < 4) || (tailroom < padlen)) {
+ skb->data = memmove(skb->head + 4, skb->data,
+ skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ }
+ } else {
+ struct sk_buff *skb2;
+ skb2 = skb_copy_expand(skb, 4, padlen, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ skb_push(skb, 4);
+ packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
+ cpu_to_le32s(&packet_len);
+ skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+
+ if (padlen) {
+ cpu_to_le32s(&padbytes);
+ memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ skb_put(skb, sizeof(padbytes));
+ }
+
+ return skb;
+}
+
+static void sr_status(struct usbnet *dev, struct urb *urb)
+{
+ struct sr9800_int_data *event;
+ int link;
+
+ if (urb->actual_length < 8)
+ return;
+
+ event = urb->transfer_buffer;
+ link = event->link & 0x01;
+ if (netif_carrier_ok(dev->net) != link) {
+ usbnet_link_change(dev, link, 1);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
+
+ return;
+}
+
+static inline int sr_set_sw_mii(struct usbnet *dev)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable software MII access\n");
+ return ret;
+}
+
+static inline int sr_set_hw_mii(struct usbnet *dev)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
+ return ret;
+}
+
+static inline int sr_get_phy_addr(struct usbnet *dev)
+{
+ u8 buf[2];
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_PHY_ID, 0, 0, 2, buf);
+ if (ret < 0) {
+ netdev_err(dev->net, "%s : Error reading PHYID register:%02x\n",
+ __func__, ret);
+ goto out;
+ }
+ netdev_dbg(dev->net, "%s : returning 0x%04x\n", __func__,
+ *((__le16 *)buf));
+
+ ret = buf[1];
+
+out:
+ return ret;
+}
+
+static int sr_sw_reset(struct usbnet *dev, u8 flags)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SW_RESET, flags, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to send software reset:%02x\n",
+ ret);
+
+ return ret;
+}
+
+static u16 sr_read_rx_ctl(struct usbnet *dev)
+{
+ __le16 v;
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_RX_CTL, 0, 0, 2, &v);
+ if (ret < 0) {
+ netdev_err(dev->net, "Error reading RX_CTL register:%02x\n",
+ ret);
+ goto out;
+ }
+
+ ret = le16_to_cpu(v);
+out:
+ return ret;
+}
+
+static int sr_write_rx_ctl(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net,
+ "Failed to write RX_CTL mode to 0x%04x:%02x\n",
+ mode, ret);
+
+ return ret;
+}
+
+static u16 sr_read_medium_status(struct usbnet *dev)
+{
+ __le16 v;
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
+ if (ret < 0) {
+ netdev_err(dev->net,
+ "Error reading Medium Status register:%02x\n", ret);
+ return ret; /* TODO: callers not checking for error ret */
+ }
+
+ return le16_to_cpu(v);
+}
+
+static int sr_write_medium_mode(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net,
+ "Failed to write Medium Mode mode to 0x%04x:%02x\n",
+ mode, ret);
+ return ret;
+}
+
+static int sr_write_gpio(struct usbnet *dev, u16 value, int sleep)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : value = 0x%04x\n", __func__, value);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_GPIOS, value, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to write GPIO value 0x%04x:%02x\n",
+ value, ret);
+ if (sleep)
+ msleep(sleep);
+
+ return ret;
+}
+
+/* SR9800 have a 16-bit RX_CTL value */
+static void sr_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ u16 rx_ctl = SR_DEFAULT_RX_CTL;
+
+ if (net->flags & IFF_PROMISC) {
+ rx_ctl |= SR_RX_CTL_PRO;
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > SR_MAX_MCAST) {
+ rx_ctl |= SR_RX_CTL_AMALL;
+ } else if (netdev_mc_empty(net)) {
+ /* just broadcast and directed */
+ } else {
+ /* We use the 20 byte dev->data
+ * for our 8 byte filter buffer
+ * to avoid allocating memory that
+ * is tricky to free later
+ */
+ struct netdev_hw_addr *ha;
+ u32 crc_bits;
+
+ memset(data->multi_filter, 0, SR_MCAST_FILTER_SIZE);
+
+ /* Build the multicast hash filter. */
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ data->multi_filter[crc_bits >> 3] |=
+ 1 << (crc_bits & 7);
+ }
+
+ sr_write_cmd_async(dev, SR_CMD_WRITE_MULTI_FILTER, 0, 0,
+ SR_MCAST_FILTER_SIZE, data->multi_filter);
+
+ rx_ctl |= SR_RX_CTL_AM;
+ }
+
+ sr_write_cmd_async(dev, SR_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
+}
+
+static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 res;
+
+ mutex_lock(&dev->phy_mutex);
+ sr_set_sw_mii(dev);
+ sr_read_cmd(dev, SR_CMD_READ_MII_REG, phy_id, (__u16)loc, 2, &res);
+ sr_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+
+ netdev_dbg(dev->net,
+ "%s : phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", __func__,
+ phy_id, loc, le16_to_cpu(res));
+
+ return le16_to_cpu(res);
+}
+
+static void
+sr_mdio_write(struct net_device *net, int phy_id, int loc, int val)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 res = cpu_to_le16(val);
+
+ netdev_dbg(dev->net,
+ "%s : phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", __func__,
+ phy_id, loc, val);
+ mutex_lock(&dev->phy_mutex);
+ sr_set_sw_mii(dev);
+ sr_write_cmd(dev, SR_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
+ sr_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+}
+
+/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
+static u32 sr_get_phyid(struct usbnet *dev)
+{
+ int phy_reg;
+ u32 phy_id;
+ int i;
+
+ /* Poll for the rare case the FW or phy isn't ready yet. */
+ for (i = 0; i < 100; i++) {
+ phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
+ if (phy_reg != 0 && phy_reg != 0xFFFF)
+ break;
+ mdelay(1);
+ }
+
+ if (phy_reg <= 0 || phy_reg == 0xFFFF)
+ return 0;
+
+ phy_id = (phy_reg & 0xffff) << 16;
+
+ phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2);
+ if (phy_reg < 0)
+ return 0;
+
+ phy_id |= (phy_reg & 0xffff);
+
+ return phy_id;
+}
+
+static void
+sr_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt;
+
+ if (sr_read_cmd(dev, SR_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+ return;
+ }
+ wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+ wolinfo->wolopts = 0;
+ if (opt & SR_MONITOR_LINK)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & SR_MONITOR_MAGIC)
+ wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+static int
+sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt = 0;
+
+ if (wolinfo->wolopts & WAKE_PHY)
+ opt |= SR_MONITOR_LINK;
+ if (wolinfo->wolopts & WAKE_MAGIC)
+ opt |= SR_MONITOR_MAGIC;
+
+ if (sr_write_cmd(dev, SR_CMD_WRITE_MONITOR_MODE,
+ opt, 0, 0, NULL) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sr_get_eeprom_len(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+
+ return data->eeprom_len;
+}
+
+static int sr_get_eeprom(struct net_device *net,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 *ebuf = (__le16 *)data;
+ int ret;
+ int i;
+
+ /* Crude hack to ensure that we don't overwrite memory
+ * if an odd length is supplied
+ */
+ if (eeprom->len % 2)
+ return -EINVAL;
+
+ eeprom->magic = SR_EEPROM_MAGIC;
+
+ /* sr9800 returns 2 bytes from eeprom on read */
+ for (i = 0; i < eeprom->len / 2; i++) {
+ ret = sr_read_cmd(dev, SR_CMD_READ_EEPROM, eeprom->offset + i,
+ 0, 2, &ebuf[i]);
+ if (ret < 0)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void sr_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ info->eedump_len = data->eeprom_len;
+}
+
+static u32 sr_get_link(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ return mii_link_ok(&dev->mii);
+}
+
+static int sr_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static int sr_set_mac_address(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ struct sockaddr *addr = p;
+
+ if (netif_running(net))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* We use the 20 byte dev->data
+ * for our 6 byte mac buffer
+ * to avoid allocating memory that
+ * is tricky to free later
+ */
+ memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
+ sr_write_cmd_async(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+
+ return 0;
+}
+
+static const struct ethtool_ops sr9800_ethtool_ops = {
+ .get_drvinfo = sr_get_drvinfo,
+ .get_link = sr_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_wol = sr_get_wol,
+ .set_wol = sr_set_wol,
+ .get_eeprom_len = sr_get_eeprom_len,
+ .get_eeprom = sr_get_eeprom,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+static int sr9800_link_reset(struct usbnet *dev)
+{
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ u16 mode;
+
+ mii_check_media(&dev->mii, 1, 1);
+ mii_ethtool_gset(&dev->mii, &ecmd);
+ mode = SR9800_MEDIUM_DEFAULT;
+
+ if (ethtool_cmd_speed(&ecmd) != SPEED_100)
+ mode &= ~SR_MEDIUM_PS;
+
+ if (ecmd.duplex != DUPLEX_FULL)
+ mode &= ~SR_MEDIUM_FD;
+
+ netdev_dbg(dev->net, "%s : speed: %u duplex: %d mode: 0x%04x\n",
+ __func__, ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
+
+ sr_write_medium_mode(dev, mode);
+
+ return 0;
+}
+
+
+static int sr9800_set_default_mode(struct usbnet *dev)
+{
+ u16 rx_ctl;
+ int ret;
+
+ sr_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
+ sr_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_CSMA);
+ mii_nway_restart(&dev->mii);
+
+ ret = sr_write_medium_mode(dev, SR9800_MEDIUM_DEFAULT);
+ if (ret < 0)
+ goto out;
+
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_IPG012,
+ SR9800_IPG0_DEFAULT | SR9800_IPG1_DEFAULT,
+ SR9800_IPG2_DEFAULT, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
+ goto out;
+ }
+
+ /* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ ret = sr_write_rx_ctl(dev, SR_DEFAULT_RX_CTL);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+ rx_ctl);
+
+ rx_ctl = sr_read_medium_status(dev);
+ netdev_dbg(dev->net, "Medium Status:0x%04x after all initializations\n",
+ rx_ctl);
+
+ return 0;
+out:
+ return ret;
+}
+
+static int sr9800_reset(struct usbnet *dev)
+{
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ int ret, embd_phy;
+ u16 rx_ctl;
+
+ ret = sr_write_gpio(dev,
+ SR_GPIO_RSE | SR_GPIO_GPO_2 | SR_GPIO_GPO2EN, 5);
+ if (ret < 0)
+ goto out;
+
+ embd_phy = ((sr_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
+
+ ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ if (embd_phy) {
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = sr_sw_reset(dev, SR_SWRESET_PRTE);
+ if (ret < 0)
+ goto out;
+ }
+
+ msleep(150);
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = sr_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL | SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr9800_set_default_mode(dev);
+ if (ret < 0)
+ goto out;
+
+ /* Rewrite MAC address */
+ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static const struct net_device_ops sr9800_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = sr_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = sr_ioctl,
+ .ndo_set_rx_mode = sr_set_multicast,
+};
+
+static int sr9800_phy_powerup(struct usbnet *dev)
+{
+ int ret;
+
+ /* set the embedded Ethernet PHY in power-down state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to power down PHY : %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ /* set the embedded Ethernet PHY in power-up state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+ return ret;
+ }
+ msleep(600);
+
+ /* set the embedded Ethernet PHY in reset state */
+ ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to power up PHY: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ /* set the embedded Ethernet PHY in power-up state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ u16 led01_mux, led23_mux;
+ int ret, embd_phy;
+ u32 phyid;
+ u16 rx_ctl;
+
+ data->eeprom_len = SR9800_EEPROM_LEN;
+
+ usbnet_get_endpoints(dev, intf);
+
+ /* LED Setting Rule :
+ * AABB:CCDD
+ * AA : MFA0(LED0)
+ * BB : MFA1(LED1)
+ * CC : MFA2(LED2), Reserved for SR9800
+ * DD : MFA3(LED3), Reserved for SR9800
+ */
+ led01_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_LINK;
+ led23_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_TX_ACTIVE;
+ ret = sr_write_cmd(dev, SR_CMD_LED_MUX, led01_mux, led23_mux, 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "set LINK LED failed : %d\n", ret);
+ goto out;
+ }
+
+ /* Get the MAC address */
+ ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
+ dev->net->dev_addr);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
+ return ret;
+ }
+ netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
+
+ /* Initialize MII structure */
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = sr_mdio_read;
+ dev->mii.mdio_write = sr_mdio_write;
+ dev->mii.phy_id_mask = 0x1f;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.phy_id = sr_get_phy_addr(dev);
+
+ dev->net->netdev_ops = &sr9800_netdev_ops;
+ dev->net->ethtool_ops = &sr9800_ethtool_ops;
+
+ embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
+ /* Reset the PHY to normal operation mode */
+ ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Init PHY routine */
+ ret = sr9800_phy_powerup(dev);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = sr_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ /* Read PHYID register *AFTER* the PHY was reset properly */
+ phyid = sr_get_phyid(dev);
+ netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
+
+ /* medium mode setting */
+ ret = sr9800_set_default_mode(dev);
+ if (ret < 0)
+ goto out;
+
+ if (dev->udev->speed == USB_SPEED_HIGH) {
+ ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].byte_cnt,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].threshold,
+ 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+ goto out;
+ }
+ dev->rx_urb_size =
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].size;
+ } else {
+ ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].byte_cnt,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].threshold,
+ 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+ goto out;
+ }
+ dev->rx_urb_size =
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size;
+ }
+ netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__,
+ dev->rx_urb_size);
+ return 0;
+
+out:
+ return ret;
+}
+
+static const struct driver_info sr9800_driver_info = {
+ .description = "CoreChip SR9800 USB 2.0 Ethernet",
+ .bind = sr9800_bind,
+ .status = sr_status,
+ .link_reset = sr9800_link_reset,
+ .reset = sr9800_reset,
+ .flags = DRIVER_FLAG,
+ .rx_fixup = sr_rx_fixup,
+ .tx_fixup = sr_tx_fixup,
+};
+
+static const struct usb_device_id products[] = {
+ {
+ USB_DEVICE(0x0fe6, 0x9800), /* SR9800 Device */
+ .driver_info = (unsigned long) &sr9800_driver_info,
+ },
+ {}, /* END */
+};
+
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver sr_driver = {
+ .name = DRIVER_NAME,
+ .id_table = products,
+ .probe = usbnet_probe,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .disconnect = usbnet_disconnect,
+ .supports_autosuspend = 1,
+};
+
+module_usb_driver(sr_driver);
+
+MODULE_AUTHOR("Liu Junliang <liujunliang_ljl@163.com");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_DESCRIPTION("SR9800 USB 2.0 USB2NET Dev : http://www.corechip-sz.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
new file mode 100644
index 000000000000..18f670251275
--- /dev/null
+++ b/drivers/net/usb/sr9800.h
@@ -0,0 +1,202 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef _SR9800_H
+#define _SR9800_H
+
+/* SR9800 spec. command table on Linux Platform */
+
+/* command : Software Station Management Control Reg */
+#define SR_CMD_SET_SW_MII 0x06
+/* command : PHY Read Reg */
+#define SR_CMD_READ_MII_REG 0x07
+/* command : PHY Write Reg */
+#define SR_CMD_WRITE_MII_REG 0x08
+/* command : Hardware Station Management Control Reg */
+#define SR_CMD_SET_HW_MII 0x0a
+/* command : SROM Read Reg */
+#define SR_CMD_READ_EEPROM 0x0b
+/* command : SROM Write Reg */
+#define SR_CMD_WRITE_EEPROM 0x0c
+/* command : SROM Write Enable Reg */
+#define SR_CMD_WRITE_ENABLE 0x0d
+/* command : SROM Write Disable Reg */
+#define SR_CMD_WRITE_DISABLE 0x0e
+/* command : RX Control Read Reg */
+#define SR_CMD_READ_RX_CTL 0x0f
+#define SR_RX_CTL_PRO (1 << 0)
+#define SR_RX_CTL_AMALL (1 << 1)
+#define SR_RX_CTL_SEP (1 << 2)
+#define SR_RX_CTL_AB (1 << 3)
+#define SR_RX_CTL_AM (1 << 4)
+#define SR_RX_CTL_AP (1 << 5)
+#define SR_RX_CTL_ARP (1 << 6)
+#define SR_RX_CTL_SO (1 << 7)
+#define SR_RX_CTL_RH1M (1 << 8)
+#define SR_RX_CTL_RH2M (1 << 9)
+#define SR_RX_CTL_RH3M (1 << 10)
+/* command : RX Control Write Reg */
+#define SR_CMD_WRITE_RX_CTL 0x10
+/* command : IPG0/IPG1/IPG2 Control Read Reg */
+#define SR_CMD_READ_IPG012 0x11
+/* command : IPG0/IPG1/IPG2 Control Write Reg */
+#define SR_CMD_WRITE_IPG012 0x12
+/* command : Node ID Read Reg */
+#define SR_CMD_READ_NODE_ID 0x13
+/* command : Node ID Write Reg */
+#define SR_CMD_WRITE_NODE_ID 0x14
+/* command : Multicast Filter Array Read Reg */
+#define SR_CMD_READ_MULTI_FILTER 0x15
+/* command : Multicast Filter Array Write Reg */
+#define SR_CMD_WRITE_MULTI_FILTER 0x16
+/* command : Eth/HomePNA PHY Address Reg */
+#define SR_CMD_READ_PHY_ID 0x19
+/* command : Medium Status Read Reg */
+#define SR_CMD_READ_MEDIUM_STATUS 0x1a
+#define SR_MONITOR_LINK (1 << 1)
+#define SR_MONITOR_MAGIC (1 << 2)
+#define SR_MONITOR_HSFS (1 << 4)
+/* command : Medium Status Write Reg */
+#define SR_CMD_WRITE_MEDIUM_MODE 0x1b
+#define SR_MEDIUM_GM (1 << 0)
+#define SR_MEDIUM_FD (1 << 1)
+#define SR_MEDIUM_AC (1 << 2)
+#define SR_MEDIUM_ENCK (1 << 3)
+#define SR_MEDIUM_RFC (1 << 4)
+#define SR_MEDIUM_TFC (1 << 5)
+#define SR_MEDIUM_JFE (1 << 6)
+#define SR_MEDIUM_PF (1 << 7)
+#define SR_MEDIUM_RE (1 << 8)
+#define SR_MEDIUM_PS (1 << 9)
+#define SR_MEDIUM_RSV (1 << 10)
+#define SR_MEDIUM_SBP (1 << 11)
+#define SR_MEDIUM_SM (1 << 12)
+/* command : Monitor Mode Status Read Reg */
+#define SR_CMD_READ_MONITOR_MODE 0x1c
+/* command : Monitor Mode Status Write Reg */
+#define SR_CMD_WRITE_MONITOR_MODE 0x1d
+/* command : GPIO Status Read Reg */
+#define SR_CMD_READ_GPIOS 0x1e
+#define SR_GPIO_GPO0EN (1 << 0) /* GPIO0 Output enable */
+#define SR_GPIO_GPO_0 (1 << 1) /* GPIO0 Output value */
+#define SR_GPIO_GPO1EN (1 << 2) /* GPIO1 Output enable */
+#define SR_GPIO_GPO_1 (1 << 3) /* GPIO1 Output value */
+#define SR_GPIO_GPO2EN (1 << 4) /* GPIO2 Output enable */
+#define SR_GPIO_GPO_2 (1 << 5) /* GPIO2 Output value */
+#define SR_GPIO_RESERVED (1 << 6) /* Reserved */
+#define SR_GPIO_RSE (1 << 7) /* Reload serial EEPROM */
+/* command : GPIO Status Write Reg */
+#define SR_CMD_WRITE_GPIOS 0x1f
+/* command : Eth PHY Power and Reset Control Reg */
+#define SR_CMD_SW_RESET 0x20
+#define SR_SWRESET_CLEAR 0x00
+#define SR_SWRESET_RR (1 << 0)
+#define SR_SWRESET_RT (1 << 1)
+#define SR_SWRESET_PRTE (1 << 2)
+#define SR_SWRESET_PRL (1 << 3)
+#define SR_SWRESET_BZ (1 << 4)
+#define SR_SWRESET_IPRL (1 << 5)
+#define SR_SWRESET_IPPD (1 << 6)
+/* command : Software Interface Selection Status Read Reg */
+#define SR_CMD_SW_PHY_STATUS 0x21
+/* command : Software Interface Selection Status Write Reg */
+#define SR_CMD_SW_PHY_SELECT 0x22
+/* command : BULK in Buffer Size Reg */
+#define SR_CMD_BULKIN_SIZE 0x2A
+/* command : LED_MUX Control Reg */
+#define SR_CMD_LED_MUX 0x70
+#define SR_LED_MUX_TX_ACTIVE (1 << 0)
+#define SR_LED_MUX_RX_ACTIVE (1 << 1)
+#define SR_LED_MUX_COLLISION (1 << 2)
+#define SR_LED_MUX_DUP_COL (1 << 3)
+#define SR_LED_MUX_DUP (1 << 4)
+#define SR_LED_MUX_SPEED (1 << 5)
+#define SR_LED_MUX_LINK_ACTIVE (1 << 6)
+#define SR_LED_MUX_LINK (1 << 7)
+
+/* Register Access Flags */
+#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+
+/* Multicast Filter Array size & Max Number */
+#define SR_MCAST_FILTER_SIZE 8
+#define SR_MAX_MCAST 64
+
+/* IPG0/1/2 Default Value */
+#define SR9800_IPG0_DEFAULT 0x15
+#define SR9800_IPG1_DEFAULT 0x0c
+#define SR9800_IPG2_DEFAULT 0x12
+
+/* Medium Status Default Mode */
+#define SR9800_MEDIUM_DEFAULT \
+ (SR_MEDIUM_FD | SR_MEDIUM_RFC | \
+ SR_MEDIUM_TFC | SR_MEDIUM_PS | \
+ SR_MEDIUM_AC | SR_MEDIUM_RE)
+
+/* RX Control Default Setting */
+#define SR_DEFAULT_RX_CTL \
+ (SR_RX_CTL_SO | SR_RX_CTL_AB | SR_RX_CTL_RH1M)
+
+/* EEPROM Magic Number & EEPROM Size */
+#define SR_EEPROM_MAGIC 0xdeadbeef
+#define SR9800_EEPROM_LEN 0xff
+
+/* SR9800 Driver Version and Driver Name */
+#define DRIVER_VERSION "11-Nov-2013"
+#define DRIVER_NAME "CoreChips"
+#define DRIVER_FLAG \
+ (FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET)
+
+/* SR9800 BULKIN Buffer Size */
+#define SR9800_MAX_BULKIN_2K 0
+#define SR9800_MAX_BULKIN_4K 1
+#define SR9800_MAX_BULKIN_6K 2
+#define SR9800_MAX_BULKIN_8K 3
+#define SR9800_MAX_BULKIN_16K 4
+#define SR9800_MAX_BULKIN_20K 5
+#define SR9800_MAX_BULKIN_24K 6
+#define SR9800_MAX_BULKIN_32K 7
+
+struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
+ /* 2k */
+ {2048, 0x8000, 0x8001},
+ /* 4k */
+ {4096, 0x8100, 0x8147},
+ /* 6k */
+ {6144, 0x8200, 0x81EB},
+ /* 8k */
+ {8192, 0x8300, 0x83D7},
+ /* 16 */
+ {16384, 0x8400, 0x851E},
+ /* 20k */
+ {20480, 0x8500, 0x8666},
+ /* 24k */
+ {24576, 0x8600, 0x87AE},
+ /* 32k */
+ {32768, 0x8700, 0x8A3D},
+};
+
+/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
+struct sr_data {
+ u8 multi_filter[SR_MCAST_FILTER_SIZE];
+ u8 mac_addr[ETH_ALEN];
+ u8 phymode;
+ u8 ledmode;
+ u8 eeprom_len;
+};
+
+struct sr9800_int_data {
+ __le16 res1;
+ u8 link;
+ __le16 res2;
+ u8 status;
+ __le16 res3;
+} __packed;
+
+#endif /* _SR9800_H */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index aba04f561760..f9e96c427558 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -543,17 +542,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
}
// else network stack removes extra byte if we forced a short packet
- if (skb->len) {
- /* all data was already cloned from skb inside the driver */
- if (dev->driver_info->flags & FLAG_MULTI_PACKET)
- dev_kfree_skb_any(skb);
- else
- usbnet_skb_return(dev, skb);
+ /* all data was already cloned from skb inside the driver */
+ if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+ goto done;
+
+ if (skb->len < ETH_HLEN) {
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
+ } else {
+ usbnet_skb_return(dev, skb);
return;
}
- netif_dbg(dev, rx_err, dev->net, "drop\n");
- dev->net->stats.rx_errors++;
done:
skb_queue_tail(&dev->done, skb);
}
@@ -575,13 +576,6 @@ static void rx_complete (struct urb *urb)
switch (urb_status) {
/* success */
case 0:
- if (skb->len < dev->net->hard_header_len) {
- state = rx_cleanup;
- dev->net->stats.rx_errors++;
- dev->net->stats.rx_length_errors++;
- netif_dbg(dev, rx_err, dev->net,
- "rx length %d\n", skb->len);
- }
break;
/* stalls need manual reset. this is rare ... except that
@@ -758,14 +752,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
// precondition: never called in_interrupt
static void usbnet_terminate_urbs(struct usbnet *dev)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
DECLARE_WAITQUEUE(wait, current);
int temp;
/* ensure there are no more active urbs */
- add_wait_queue(&unlink_wakeup, &wait);
+ add_wait_queue(&dev->wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
- dev->wait = &unlink_wakeup;
temp = unlink_urbs(dev, &dev->txq) +
unlink_urbs(dev, &dev->rxq);
@@ -779,15 +771,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
"waited for %d urb completions\n", temp);
}
set_current_state(TASK_RUNNING);
- dev->wait = NULL;
- remove_wait_queue(&unlink_wakeup, &wait);
+ remove_wait_queue(&dev->wait, &wait);
}
int usbnet_stop (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct driver_info *info = dev->driver_info;
- int retval;
+ int retval, pm;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
@@ -797,6 +788,8 @@ int usbnet_stop (struct net_device *net)
net->stats.rx_packets, net->stats.tx_packets,
net->stats.rx_errors, net->stats.tx_errors);
+ /* to not race resume */
+ pm = usb_autopm_get_interface(dev->intf);
/* allow minidriver to stop correctly (wireless devices to turn off
* radio etc) */
if (info->stop) {
@@ -823,6 +816,9 @@ int usbnet_stop (struct net_device *net)
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
+ if (!pm)
+ usb_autopm_put_interface(dev->intf);
+
if (info->manage_power &&
!test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
info->manage_power(dev, 0);
@@ -1443,11 +1439,12 @@ static void usbnet_bh (unsigned long param)
/* restart RX again after disabling due to high error rate */
clear_bit(EVENT_RX_KILL, &dev->flags);
- // waiting for all pending urbs to complete?
- if (dev->wait) {
- if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
- wake_up (dev->wait);
- }
+ /* waiting for all pending urbs to complete?
+ * only then can we forgo submitting anew
+ */
+ if (waitqueue_active(&dev->wait)) {
+ if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
+ wake_up_all(&dev->wait);
// or are we maybe short a few urbs?
} else if (netif_running (dev->net) &&
@@ -1586,6 +1583,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->driver_name = name;
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
+ init_waitqueue_head(&dev->wait);
skb_queue_head_init (&dev->rxq);
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
@@ -1797,9 +1795,10 @@ int usbnet_resume (struct usb_interface *intf)
spin_unlock_irq(&dev->txq.lock);
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
- /* handle remote wakeup ASAP */
- if (!dev->wait &&
- netif_device_present(dev->net) &&
+ /* handle remote wakeup ASAP
+ * we cannot race against stop
+ */
+ if (netif_device_present(dev->net) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_HALT, &dev->flags))
rx_alloc_submit(dev, GFP_NOIO);
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 35c90307d473..6aaa6eb9df72 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -13,15 +13,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2ec2041b62d4..c0e7c64765ab 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -285,7 +285,11 @@ static void veth_setup(struct net_device *dev)
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
- dev->vlan_features = dev->features;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
dev->destructor = veth_dev_free;
dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5d776447d9c3..470b01f3e7b4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
//#define DEBUG
#include <linux/netdevice.h>
@@ -27,6 +26,7 @@
#include <linux/if_vlan.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/average.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -37,11 +37,18 @@ module_param(gso, bool, 0444);
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
-#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
- sizeof(struct virtio_net_hdr_mrg_rxbuf), \
- L1_CACHE_BYTES))
#define GOOD_COPY_LEN 128
+/* Weight used for the RX packet size EWMA. The average packet size is used to
+ * determine the packet buffer size when refilling RX rings. As the entire RX
+ * ring may be refilled at once, the weight is chosen so that the EWMA will be
+ * insensitive to short-term, transient changes in packet size.
+ */
+#define RECEIVE_AVG_WEIGHT 64
+
+/* Minimum alignment for mergeable packet buffers. */
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+
#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
@@ -73,12 +80,15 @@ struct receive_queue {
struct napi_struct napi;
- /* Number of input buffers, and max we've ever had. */
- unsigned int num, max;
-
/* Chain pages by the private ptr. */
struct page *pages;
+ /* Average packet length for mergeable receive buffers. */
+ struct ewma mrg_avg_pkt_len;
+
+ /* Page frag for packet buffer allocation. */
+ struct page_frag alloc_frag;
+
/* RX: fragments + linear part + virtio header */
struct scatterlist sg[MAX_SKB_FRAGS + 2];
@@ -127,11 +137,6 @@ struct virtnet_info {
/* Lock for config space updates */
struct mutex config_lock;
- /* Page_frag for GFP_KERNEL packet buffer allocation when we run
- * low on memory.
- */
- struct page_frag alloc_frag;
-
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
@@ -222,6 +227,24 @@ static void skb_xmit_done(struct virtqueue *vq)
netif_wake_subqueue(vi->dev, vq2txq(vq));
}
+static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
+{
+ unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1);
+ return (truesize + 1) * MERGEABLE_BUFFER_ALIGN;
+}
+
+static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx)
+{
+ return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN);
+
+}
+
+static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
+{
+ unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN;
+ return (unsigned long)buf | (size - 1);
+}
+
/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct receive_queue *rq,
struct page *page, unsigned int offset,
@@ -330,38 +353,34 @@ err:
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct receive_queue *rq,
- void *buf,
+ unsigned long ctx,
unsigned int len)
{
+ void *buf = mergeable_ctx_to_buf_address(ctx);
struct skb_vnet_hdr *hdr = buf;
int num_buf = hdr->mhdr.num_buffers;
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
- struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
- MERGE_BUFFER_LEN);
+ unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
+
+ struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize);
struct sk_buff *curr_skb = head_skb;
if (unlikely(!curr_skb))
goto err_skb;
-
while (--num_buf) {
int num_skb_frags;
- buf = virtqueue_get_buf(rq->vq, &len);
- if (unlikely(!buf)) {
+ ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!ctx)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, num_buf, hdr->mhdr.num_buffers);
dev->stats.rx_length_errors++;
goto err_buf;
}
- if (unlikely(len > MERGE_BUFFER_LEN)) {
- pr_debug("%s: rx error: merge buffer too long\n",
- dev->name);
- len = MERGE_BUFFER_LEN;
- }
+ buf = mergeable_ctx_to_buf_address(ctx);
page = virt_to_head_page(buf);
- --rq->num;
num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
@@ -377,37 +396,38 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
head_skb->truesize += nskb->truesize;
num_skb_frags = 0;
}
+ truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
if (curr_skb != head_skb) {
head_skb->data_len += len;
head_skb->len += len;
- head_skb->truesize += MERGE_BUFFER_LEN;
+ head_skb->truesize += truesize;
}
offset = buf - page_address(page);
if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
put_page(page);
skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
- len, MERGE_BUFFER_LEN);
+ len, truesize);
} else {
skb_add_rx_frag(curr_skb, num_skb_frags, page,
- offset, len, MERGE_BUFFER_LEN);
+ offset, len, truesize);
}
}
+ ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
err_skb:
put_page(page);
while (--num_buf) {
- buf = virtqueue_get_buf(rq->vq, &len);
- if (unlikely(!buf)) {
+ ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!ctx)) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf);
dev->stats.rx_length_errors++;
break;
}
- page = virt_to_head_page(buf);
+ page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx));
put_page(page);
- --rq->num;
}
err_buf:
dev->stats.rx_dropped++;
@@ -426,17 +446,20 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
- if (vi->mergeable_rx_bufs)
- put_page(virt_to_head_page(buf));
- else if (vi->big_packets)
+ if (vi->mergeable_rx_bufs) {
+ unsigned long ctx = (unsigned long)buf;
+ void *base = mergeable_ctx_to_buf_address(ctx);
+ put_page(virt_to_head_page(base));
+ } else if (vi->big_packets) {
give_pages(rq, buf);
- else
+ } else {
dev_kfree_skb(buf);
+ }
return;
}
if (vi->mergeable_rx_bufs)
- skb = receive_mergeable(dev, rq, buf, len);
+ skb = receive_mergeable(dev, rq, (unsigned long)buf, len);
else if (vi->big_packets)
skb = receive_big(dev, rq, buf, len);
else
@@ -577,28 +600,45 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
return err;
}
+static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
+{
+ const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ unsigned int len;
+
+ len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len),
+ GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
+ return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
+}
+
static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
- char *buf = NULL;
+ struct page_frag *alloc_frag = &rq->alloc_frag;
+ char *buf;
+ unsigned long ctx;
int err;
+ unsigned int len, hole;
- if (gfp & __GFP_WAIT) {
- if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag,
- gfp)) {
- buf = (char *)page_address(vi->alloc_frag.page) +
- vi->alloc_frag.offset;
- get_page(vi->alloc_frag.page);
- vi->alloc_frag.offset += MERGE_BUFFER_LEN;
- }
- } else {
- buf = netdev_alloc_frag(MERGE_BUFFER_LEN);
- }
- if (!buf)
+ len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
+ if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
return -ENOMEM;
- sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN);
- err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
+ buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+ ctx = mergeable_buf_to_ctx(buf, len);
+ get_page(alloc_frag->page);
+ alloc_frag->offset += len;
+ hole = alloc_frag->size - alloc_frag->offset;
+ if (hole < len) {
+ /* To avoid internal fragmentation, if there is very likely not
+ * enough space for another buffer, add the remaining space to
+ * the current buffer. This extra space is not included in
+ * the truesize stored in ctx.
+ */
+ len += hole;
+ alloc_frag->offset += hole;
+ }
+
+ sg_init_one(rq->sg, buf, len);
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
@@ -618,6 +658,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
int err;
bool oom;
+ gfp |= __GFP_COLD;
do {
if (vi->mergeable_rx_bufs)
err = add_recvbuf_mergeable(rq, gfp);
@@ -629,12 +670,8 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
oom = err == -ENOMEM;
if (err)
break;
- ++rq->num;
} while (rq->vq->num_free);
- if (unlikely(rq->num > rq->max))
- rq->max = rq->num;
- if (unlikely(!virtqueue_kick(rq->vq)))
- return false;
+ virtqueue_kick(rq->vq);
return !oom;
}
@@ -700,11 +737,10 @@ again:
while (received < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
receive_buf(rq, buf, len);
- --rq->num;
received++;
}
- if (rq->num < rq->max / 2) {
+ if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
if (!try_fill_recv(rq, GFP_ATOMIC))
schedule_delayed_work(&vi->refill, 0);
}
@@ -840,7 +876,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
err = xmit_skb(sq, skb);
/* This should not happen! */
- if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
+ if (unlikely(err)) {
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
@@ -849,6 +885,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
kfree_skb(skb);
return NETDEV_TX_OK;
}
+ virtqueue_kick(sq->vq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
@@ -874,16 +911,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
- * never fail unless improperly formated.
+ * never fail unless improperly formatted.
*/
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
- struct scatterlist *out,
- struct scatterlist *in)
+ struct scatterlist *out)
{
struct scatterlist *sgs[4], hdr, stat;
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = ~0;
- unsigned out_num = 0, in_num = 0, tmp;
+ unsigned out_num = 0, tmp;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@@ -896,16 +932,13 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
if (out)
sgs[out_num++] = out;
- if (in)
- sgs[out_num + in_num++] = in;
/* Add return status. */
sg_init_one(&stat, &status, sizeof(status));
- sgs[out_num + in_num++] = &stat;
+ sgs[out_num] = &stat;
- BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
- BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
- < 0);
+ BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
+ virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
if (unlikely(!virtqueue_kick(vi->cvq)))
return status == VIRTIO_NET_OK;
@@ -935,8 +968,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
sg_init_one(&sg, addr->sa_data, dev->addr_len);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
- VIRTIO_NET_CTRL_MAC_ADDR_SET,
- &sg, NULL)) {
+ VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
dev_warn(&vdev->dev,
"Failed to set mac address by vq command.\n");
return -EINVAL;
@@ -1009,7 +1041,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
- VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
+ VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
rtnl_unlock();
}
@@ -1027,7 +1059,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
sg_init_one(&sg, &s, sizeof(s));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
- VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
+ VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
queue_pairs);
return -EINVAL;
@@ -1067,7 +1099,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
void *buf;
int i;
- /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
+ /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
@@ -1077,16 +1109,14 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sg_init_one(sg, &promisc, sizeof(promisc));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
- VIRTIO_NET_CTRL_RX_PROMISC,
- sg, NULL))
+ VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
promisc ? "en" : "dis");
sg_init_one(sg, &allmulti, sizeof(allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
- VIRTIO_NET_CTRL_RX_ALLMULTI,
- sg, NULL))
+ VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis");
@@ -1122,8 +1152,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
- VIRTIO_NET_CTRL_MAC_TABLE_SET,
- sg, NULL))
+ VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
kfree(buf);
@@ -1138,7 +1167,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
- VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
+ VIRTIO_NET_CTRL_VLAN_ADD, &sg))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
return 0;
}
@@ -1152,7 +1181,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
- VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
+ VIRTIO_NET_CTRL_VLAN_DEL, &sg))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
return 0;
}
@@ -1386,6 +1415,14 @@ static void free_receive_bufs(struct virtnet_info *vi)
}
}
+static void free_receive_page_frags(struct virtnet_info *vi)
+{
+ int i;
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ if (vi->rq[i].alloc_frag.page)
+ put_page(vi->rq[i].alloc_frag.page);
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -1401,15 +1438,16 @@ static void free_unused_bufs(struct virtnet_info *vi)
struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs)
- put_page(virt_to_head_page(buf));
- else if (vi->big_packets)
+ if (vi->mergeable_rx_bufs) {
+ unsigned long ctx = (unsigned long)buf;
+ void *base = mergeable_ctx_to_buf_address(ctx);
+ put_page(virt_to_head_page(base));
+ } else if (vi->big_packets) {
give_pages(&vi->rq[i], buf);
- else
+ } else {
dev_kfree_skb(buf);
- --vi->rq[i].num;
+ }
}
- BUG_ON(vi->rq[i].num != 0);
}
}
@@ -1516,6 +1554,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
napi_weight);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
+ ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
}
@@ -1552,6 +1591,33 @@ err:
return ret;
}
+#ifdef CONFIG_SYSFS
+static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attribute, char *buf)
+{
+ struct virtnet_info *vi = netdev_priv(queue->dev);
+ unsigned int queue_index = get_netdev_rx_queue_index(queue);
+ struct ewma *avg;
+
+ BUG_ON(queue_index >= vi->max_queue_pairs);
+ avg = &vi->rq[queue_index].mrg_avg_pkt_len;
+ return sprintf(buf, "%u\n", get_mergeable_buf_len(avg));
+}
+
+static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
+ __ATTR_RO(mergeable_rx_buffer_size);
+
+static struct attribute *virtio_net_mrg_rx_attrs[] = {
+ &mergeable_rx_buffer_size_attribute.attr,
+ NULL
+};
+
+static const struct attribute_group virtio_net_mrg_rx_group = {
+ .name = "virtio_net",
+ .attrs = virtio_net_mrg_rx_attrs
+};
+#endif
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err;
@@ -1645,7 +1711,8 @@ static int virtnet_probe(struct virtio_device *vdev)
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
vi->big_packets = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1666,6 +1733,10 @@ static int virtnet_probe(struct virtio_device *vdev)
if (err)
goto free_stats;
+#ifdef CONFIG_SYSFS
+ if (vi->mergeable_rx_bufs)
+ dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
+#endif
netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
@@ -1680,7 +1751,8 @@ static int virtnet_probe(struct virtio_device *vdev)
try_fill_recv(&vi->rq[i], GFP_KERNEL);
/* If we didn't even get one input buffer, we're useless. */
- if (vi->rq[i].num == 0) {
+ if (vi->rq[i].vq->num_free ==
+ virtqueue_get_vring_size(vi->rq[i].vq)) {
free_unused_bufs(vi);
err = -ENOMEM;
goto free_recv_bufs;
@@ -1714,9 +1786,8 @@ free_recv_bufs:
unregister_netdev(dev);
free_vqs:
cancel_delayed_work_sync(&vi->refill);
+ free_receive_page_frags(vi);
virtnet_del_vqs(vi);
- if (vi->alloc_frag.page)
- put_page(vi->alloc_frag.page);
free_stats:
free_percpu(vi->stats);
free:
@@ -1733,6 +1804,8 @@ static void remove_vq_common(struct virtnet_info *vi)
free_receive_bufs(vi);
+ free_receive_page_frags(vi);
+
virtnet_del_vqs(vi);
}
@@ -1750,8 +1823,6 @@ static void virtnet_remove(struct virtio_device *vdev)
unregister_netdev(vi->dev);
remove_vq_common(vi);
- if (vi->alloc_frag.page)
- put_page(vi->alloc_frag.page);
flush_work(&vi->config_work);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 7e2788c488ed..0fa3b44f7342 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1235,7 +1235,9 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
#ifdef VMXNET3_RSS
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
(adapter->netdev->features & NETIF_F_RXHASH))
- ctx->skb->rxhash = le32_to_cpu(rcd->rssHash);
+ skb_set_hash(ctx->skb,
+ le32_to_cpu(rcd->rssHash),
+ PKT_HASH_TYPE_L3);
#endif
skb_put(ctx->skb, rcd->len);
@@ -1760,11 +1762,20 @@ vmxnet3_netpoll(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
- vmxnet3_disable_all_intrs(adapter);
-
- vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
- vmxnet3_enable_all_intrs(adapter);
+ switch (adapter->intr.type) {
+#ifdef CONFIG_PCI_MSI
+ case VMXNET3_IT_MSIX: {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
+ break;
+ }
+#endif
+ case VMXNET3_IT_MSI:
+ default:
+ vmxnet3_intr(0, adapter->netdev);
+ break;
+ }
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -3132,7 +3143,6 @@ err_alloc_queue_desc:
err_alloc_shared:
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 12040a35d95d..190569d02450 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -37,7 +37,6 @@
#include <linux/spinlock.h>
#include <linux/ioport.h>
#include <linux/highmem.h>
-#include <linux/init.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ed384fee76ac..1236812c7be6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -40,6 +40,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/vxlan.h>
+#include <net/protocol.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/addrconf.h>
@@ -468,7 +469,6 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
/* Look up Ethernet address in forwarding table */
static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
const u8 *mac)
-
{
struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
struct vxlan_fdb *f;
@@ -554,13 +554,104 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
return 1;
}
+static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+ struct sk_buff *p, **pp = NULL;
+ struct vxlanhdr *vh, *vh2;
+ struct ethhdr *eh, *eh2;
+ unsigned int hlen, off_vx, off_eth;
+ const struct packet_offload *ptype;
+ __be16 type;
+ int flush = 1;
+
+ off_vx = skb_gro_offset(skb);
+ hlen = off_vx + sizeof(*vh);
+ vh = skb_gro_header_fast(skb, off_vx);
+ if (skb_gro_header_hard(skb, hlen)) {
+ vh = skb_gro_header_slow(skb, hlen, off_vx);
+ if (unlikely(!vh))
+ goto out;
+ }
+ skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+
+ off_eth = skb_gro_offset(skb);
+ hlen = off_eth + sizeof(*eh);
+ eh = skb_gro_header_fast(skb, off_eth);
+ if (skb_gro_header_hard(skb, hlen)) {
+ eh = skb_gro_header_slow(skb, hlen, off_eth);
+ if (unlikely(!eh))
+ goto out;
+ }
+
+ flush = 0;
+
+ for (p = *head; p; p = p->next) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ vh2 = (struct vxlanhdr *)(p->data + off_vx);
+ eh2 = (struct ethhdr *)(p->data + off_eth);
+ if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+ }
+
+ type = eh->h_proto;
+
+ rcu_read_lock();
+ ptype = gro_find_receive_by_type(type);
+ if (ptype == NULL) {
+ flush = 1;
+ goto out_unlock;
+ }
+
+ skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
+ pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct ethhdr *eh;
+ struct packet_offload *ptype;
+ __be16 type;
+ int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
+ int err = -ENOSYS;
+
+ eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
+ type = eh->h_proto;
+
+ rcu_read_lock();
+ ptype = gro_find_complete_by_type(type);
+ if (ptype != NULL)
+ err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
+
+ rcu_read_unlock();
+ return err;
+}
+
/* Notify netdevs that UDP port started listening */
-static void vxlan_notify_add_rx_port(struct sock *sk)
+static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
{
struct net_device *dev;
+ struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
sa_family_t sa_family = sk->sk_family;
__be16 port = inet_sk(sk)->inet_sport;
+ int err;
+
+ if (sa_family == AF_INET) {
+ err = udp_add_offload(&vs->udp_offloads);
+ if (err)
+ pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
+ }
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
@@ -572,9 +663,10 @@ static void vxlan_notify_add_rx_port(struct sock *sk)
}
/* Notify netdevs that UDP port is no more listening */
-static void vxlan_notify_del_rx_port(struct sock *sk)
+static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
{
struct net_device *dev;
+ struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
sa_family_t sa_family = sk->sk_family;
__be16 port = inet_sk(sk)->inet_sport;
@@ -586,6 +678,9 @@ static void vxlan_notify_del_rx_port(struct sock *sk)
port);
}
rcu_read_unlock();
+
+ if (sa_family == AF_INET)
+ udp_del_offload(&vs->udp_offloads);
}
/* Add new entry to forwarding table -- assumes lock held */
@@ -741,10 +836,9 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
return -EINVAL;
*ifindex = nla_get_u32(tb[NDA_IFINDEX]);
- tdev = dev_get_by_index(net, *ifindex);
+ tdev = __dev_get_by_index(net, *ifindex);
if (!tdev)
return -EADDRNOTAVAIL;
- dev_put(tdev);
} else {
*ifindex = 0;
}
@@ -916,17 +1010,32 @@ static bool vxlan_snoop(struct net_device *dev,
}
/* See if multicast group is already in use by other ID */
-static bool vxlan_group_used(struct vxlan_net *vn, union vxlan_addr *remote_ip)
+static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
{
struct vxlan_dev *vxlan;
+ /* The vxlan_sock is only used by dev, leaving group has
+ * no effect on other vxlan devices.
+ */
+ if (atomic_read(&dev->vn_sock->refcnt) == 1)
+ return false;
+
list_for_each_entry(vxlan, &vn->vxlan_list, next) {
- if (!netif_running(vxlan->dev))
+ if (!netif_running(vxlan->dev) || vxlan == dev)
continue;
- if (vxlan_addr_equal(&vxlan->default_dst.remote_ip,
- remote_ip))
- return true;
+ if (vxlan->vn_sock != dev->vn_sock)
+ continue;
+
+ if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
+ &dev->default_dst.remote_ip))
+ continue;
+
+ if (vxlan->default_dst.remote_ifindex !=
+ dev->default_dst.remote_ifindex)
+ continue;
+
+ return true;
}
return false;
@@ -949,7 +1058,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
spin_lock(&vn->sock_lock);
hlist_del_rcu(&vs->hlist);
rcu_assign_sk_user_data(vs->sock->sk, NULL);
- vxlan_notify_del_rx_port(sk);
+ vxlan_notify_del_rx_port(vs);
spin_unlock(&vn->sock_lock);
queue_work(vxlan_wq, &vs->del_work);
@@ -1047,6 +1156,16 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!vs)
goto drop;
+ /* If the NIC driver gave us an encapsulated packet
+ * with the encapsulation mark, the device checksummed it
+ * for us. Otherwise force the upper layers to verify it.
+ */
+ if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) ||
+ !skb->encapsulation)
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb->encapsulation = 0;
+
vs->rcv(vs, skb, vxh->vx_vni);
return 0;
@@ -1066,7 +1185,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
struct iphdr *oip = NULL;
struct ipv6hdr *oip6 = NULL;
struct vxlan_dev *vxlan;
- struct pcpu_tstats *stats;
+ struct pcpu_sw_netstats *stats;
union vxlan_addr saddr;
__u32 vni;
int err = 0;
@@ -1105,17 +1224,6 @@ static void vxlan_rcv(struct vxlan_sock *vs,
skb_reset_network_header(skb);
- /* If the NIC driver gave us an encapsulated packet with
- * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
- * leave the CHECKSUM_UNNECESSARY, the device checksummed it
- * for us. Otherwise force the upper layers to verify it.
- */
- if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
- !(vxlan->dev->features & NETIF_F_RXCSUM))
- skb->ip_summed = CHECKSUM_NONE;
-
- skb->encapsulation = 0;
-
if (oip6)
err = IP6_ECN_decapsulate(oip6, skb);
if (oip)
@@ -1210,6 +1318,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
neigh_release(n);
+ if (reply == NULL)
+ goto out;
+
skb_reset_mac_header(reply);
__skb_pull(reply, skb_network_offset(reply));
reply->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1231,15 +1342,103 @@ out:
}
#if IS_ENABLED(CONFIG_IPV6)
+
+static struct sk_buff *vxlan_na_create(struct sk_buff *request,
+ struct neighbour *n, bool isrouter)
+{
+ struct net_device *dev = request->dev;
+ struct sk_buff *reply;
+ struct nd_msg *ns, *na;
+ struct ipv6hdr *pip6;
+ u8 *daddr;
+ int na_olen = 8; /* opt hdr + ETH_ALEN for target */
+ int ns_olen;
+ int i, len;
+
+ if (dev == NULL)
+ return NULL;
+
+ len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
+ sizeof(*na) + na_olen + dev->needed_tailroom;
+ reply = alloc_skb(len, GFP_ATOMIC);
+ if (reply == NULL)
+ return NULL;
+
+ reply->protocol = htons(ETH_P_IPV6);
+ reply->dev = dev;
+ skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
+ skb_push(reply, sizeof(struct ethhdr));
+ skb_set_mac_header(reply, 0);
+
+ ns = (struct nd_msg *)skb_transport_header(request);
+
+ daddr = eth_hdr(request)->h_source;
+ ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
+ for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
+ if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
+ daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
+ break;
+ }
+ }
+
+ /* Ethernet header */
+ ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
+ ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
+ eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
+ reply->protocol = htons(ETH_P_IPV6);
+
+ skb_pull(reply, sizeof(struct ethhdr));
+ skb_set_network_header(reply, 0);
+ skb_put(reply, sizeof(struct ipv6hdr));
+
+ /* IPv6 header */
+
+ pip6 = ipv6_hdr(reply);
+ memset(pip6, 0, sizeof(struct ipv6hdr));
+ pip6->version = 6;
+ pip6->priority = ipv6_hdr(request)->priority;
+ pip6->nexthdr = IPPROTO_ICMPV6;
+ pip6->hop_limit = 255;
+ pip6->daddr = ipv6_hdr(request)->saddr;
+ pip6->saddr = *(struct in6_addr *)n->primary_key;
+
+ skb_pull(reply, sizeof(struct ipv6hdr));
+ skb_set_transport_header(reply, 0);
+
+ na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
+
+ /* Neighbor Advertisement */
+ memset(na, 0, sizeof(*na)+na_olen);
+ na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
+ na->icmph.icmp6_router = isrouter;
+ na->icmph.icmp6_override = 1;
+ na->icmph.icmp6_solicited = 1;
+ na->target = ns->target;
+ ether_addr_copy(&na->opt[2], n->ha);
+ na->opt[0] = ND_OPT_TARGET_LL_ADDR;
+ na->opt[1] = na_olen >> 3;
+
+ na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
+ &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
+ csum_partial(na, sizeof(*na)+na_olen, 0));
+
+ pip6->payload_len = htons(sizeof(*na)+na_olen);
+
+ skb_push(reply, sizeof(struct ipv6hdr));
+
+ reply->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return reply;
+}
+
static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct neighbour *n;
- union vxlan_addr ipa;
+ struct nd_msg *msg;
const struct ipv6hdr *iphdr;
const struct in6_addr *saddr, *daddr;
- struct nd_msg *msg;
- struct inet6_dev *in6_dev = NULL;
+ struct neighbour *n;
+ struct inet6_dev *in6_dev;
in6_dev = __in6_dev_get(dev);
if (!in6_dev)
@@ -1252,19 +1451,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
saddr = &iphdr->saddr;
daddr = &iphdr->daddr;
- if (ipv6_addr_loopback(daddr) ||
- ipv6_addr_is_multicast(daddr))
- goto out;
-
msg = (struct nd_msg *)skb_transport_header(skb);
if (msg->icmph.icmp6_code != 0 ||
msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
goto out;
- n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
+ if (ipv6_addr_loopback(daddr) ||
+ ipv6_addr_is_multicast(&msg->target))
+ goto out;
+
+ n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
if (n) {
struct vxlan_fdb *f;
+ struct sk_buff *reply;
if (!(n->nud_state & NUD_CONNECTED)) {
neigh_release(n);
@@ -1278,13 +1478,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
goto out;
}
- ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
- !!in6_dev->cnf.forwarding,
- true, false, false);
+ reply = vxlan_na_create(skb, n,
+ !!(f ? f->flags & NTF_ROUTER : 0));
+
neigh_release(n);
+
+ if (reply == NULL)
+ goto out;
+
+ if (netif_rx_ni(reply) == NET_RX_DROP)
+ dev->stats.rx_dropped++;
+
} else if (vxlan->flags & VXLAN_F_L3MISS) {
- ipa.sin6.sin6_addr = *daddr;
- ipa.sa.sa_family = AF_INET6;
+ union vxlan_addr ipa = {
+ .sin6.sin6_addr = msg->target,
+ .sa.sa_family = AF_INET6,
+ };
+
vxlan_ip_miss(dev, &ipa);
}
@@ -1366,20 +1576,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
return false;
}
-static void vxlan_sock_put(struct sk_buff *skb)
-{
- sock_put(skb->sk);
-}
-
-/* On transmit, associate with the tunnel socket */
-static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
-{
- skb_orphan(skb);
- sock_hold(sk);
- skb->sk = sk;
- skb->destructor = vxlan_sock_put;
-}
-
/* Compute source port for outgoing packet
* first choice to use L4 flow hash since it will spread
* better and maybe available from hardware
@@ -1390,7 +1586,7 @@ __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
unsigned int range = (port_max - port_min) + 1;
u32 hash;
- hash = skb_get_rxhash(skb);
+ hash = skb_get_hash(skb);
if (!hash)
hash = jhash(skb->data, 2 * ETH_ALEN,
(__force u32) skb->protocol);
@@ -1499,8 +1695,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
ip6h->daddr = *daddr;
ip6h->saddr = *saddr;
- vxlan_set_owner(vs->sock->sk, skb);
-
err = handle_offloads(skb);
if (err)
return err;
@@ -1557,8 +1751,6 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
uh->len = htons(skb->len);
uh->check = 0;
- vxlan_set_owner(vs->sock->sk, skb);
-
err = handle_offloads(skb);
if (err)
return err;
@@ -1572,11 +1764,12 @@ EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct vxlan_dev *dst_vxlan)
{
- struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
- struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
+ struct pcpu_sw_netstats *tx_stats, *rx_stats;
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
+ tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
+ rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
skb->dev = dst_vxlan->dev;
@@ -1770,7 +1963,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct ethhdr *eth;
bool did_rsc = false;
- struct vxlan_rdst *rdst;
+ struct vxlan_rdst *rdst, *fdst = NULL;
struct vxlan_fdb *f;
skb_reset_mac_header(skb);
@@ -1812,7 +2005,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_fdb_miss(vxlan, eth->h_dest);
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
}
@@ -1820,12 +2013,19 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
list_for_each_entry_rcu(rdst, &f->remotes, list) {
struct sk_buff *skb1;
+ if (!fdst) {
+ fdst = rdst;
+ continue;
+ }
skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1)
vxlan_xmit_one(skb1, dev, rdst, did_rsc);
}
- dev_kfree_skb(skb);
+ if (fdst)
+ vxlan_xmit_one(skb, dev, fdst, did_rsc);
+ else
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1882,12 +2082,12 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_sock *vs;
int i;
- dev->tstats = alloc_percpu(struct pcpu_tstats);
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
for_each_possible_cpu(i) {
- struct pcpu_tstats *vxlan_stats;
+ struct pcpu_sw_netstats *vxlan_stats;
vxlan_stats = per_cpu_ptr(dev->tstats, i);
u64_stats_init(&vxlan_stats->syncp);
}
@@ -1935,7 +2135,6 @@ static void vxlan_uninit(struct net_device *dev)
/* Start ageing timer and join group when device is brought up */
static int vxlan_open(struct net_device *dev)
{
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_sock *vs = vxlan->vn_sock;
@@ -1943,8 +2142,7 @@ static int vxlan_open(struct net_device *dev)
if (!vs)
return -ENOTCONN;
- if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
queue_work(vxlan_wq, &vxlan->igmp_join);
@@ -1983,7 +2181,7 @@ static int vxlan_stop(struct net_device *dev)
struct vxlan_sock *vs = vxlan->vn_sock;
if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- ! vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
+ !vxlan_group_used(vn, vxlan)) {
vxlan_sock_hold(vs);
dev_hold(dev);
queue_work(vxlan_wq, &vxlan->igmp_leave);
@@ -2001,6 +2199,29 @@ static void vxlan_set_multicast_list(struct net_device *dev)
{
}
+static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ struct net_device *lowerdev;
+ int max_mtu;
+
+ lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
+ if (lowerdev == NULL)
+ return eth_change_mtu(dev, new_mtu);
+
+ if (dst->remote_ip.sa.sa_family == AF_INET6)
+ max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
+ else
+ max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
+
+ if (new_mtu < 68 || new_mtu > max_mtu)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
static const struct net_device_ops vxlan_netdev_ops = {
.ndo_init = vxlan_init,
.ndo_uninit = vxlan_uninit,
@@ -2009,7 +2230,7 @@ static const struct net_device_ops vxlan_netdev_ops = {
.ndo_start_xmit = vxlan_xmit,
.ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_set_rx_mode = vxlan_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = vxlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_fdb_add = vxlan_fdb_add,
@@ -2278,7 +2499,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
struct sock *sk;
unsigned int h;
- vs = kmalloc(sizeof(*vs), GFP_KERNEL);
+ vs = kzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
return ERR_PTR(-ENOMEM);
@@ -2303,9 +2524,14 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
vs->data = data;
rcu_assign_sk_user_data(vs->sock->sk, vs);
+ /* Initialize the vxlan udp offloads structure */
+ vs->udp_offloads.port = port;
+ vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
+ vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
+
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
- vxlan_notify_add_rx_port(sk);
+ vxlan_notify_add_rx_port(vs);
spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
@@ -2630,6 +2856,44 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
.fill_info = vxlan_fill_info,
};
+static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
+ struct net_device *dev)
+{
+ struct vxlan_dev *vxlan, *next;
+ LIST_HEAD(list_kill);
+
+ list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+
+ /* In case we created vxlan device with carrier
+ * and we loose the carrier due to module unload
+ * we also need to remove vxlan device. In other
+ * cases, it's not necessary and remote_ifindex
+ * is 0 here, so no matches.
+ */
+ if (dst->remote_ifindex == dev->ifindex)
+ vxlan_dellink(vxlan->dev, &list_kill);
+ }
+
+ unregister_netdevice_many(&list_kill);
+}
+
+static int vxlan_lowerdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+
+ if (event == NETDEV_UNREGISTER)
+ vxlan_handle_lowerdev_unregister(vn, dev);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block vxlan_notifier_block __read_mostly = {
+ .notifier_call = vxlan_lowerdev_event,
+};
+
static __net_init int vxlan_init_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -2644,22 +2908,8 @@ static __net_init int vxlan_init_net(struct net *net)
return 0;
}
-static __net_exit void vxlan_exit_net(struct net *net)
-{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- struct vxlan_dev *vxlan;
- LIST_HEAD(list);
-
- rtnl_lock();
- list_for_each_entry(vxlan, &vn->vxlan_list, next)
- unregister_netdevice_queue(vxlan->dev, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
-}
-
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
- .exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
@@ -2674,18 +2924,23 @@ static int __init vxlan_init_module(void)
get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
- rc = register_pernet_device(&vxlan_net_ops);
+ rc = register_pernet_subsys(&vxlan_net_ops);
if (rc)
goto out1;
- rc = rtnl_link_register(&vxlan_link_ops);
+ rc = register_netdevice_notifier(&vxlan_notifier_block);
if (rc)
goto out2;
- return 0;
+ rc = rtnl_link_register(&vxlan_link_ops);
+ if (rc)
+ goto out3;
+ return 0;
+out3:
+ unregister_netdevice_notifier(&vxlan_notifier_block);
out2:
- unregister_pernet_device(&vxlan_net_ops);
+ unregister_pernet_subsys(&vxlan_net_ops);
out1:
destroy_workqueue(vxlan_wq);
return rc;
@@ -2695,13 +2950,15 @@ late_initcall(vxlan_init_module);
static void __exit vxlan_cleanup_module(void)
{
rtnl_link_unregister(&vxlan_link_ops);
+ unregister_netdevice_notifier(&vxlan_notifier_block);
destroy_workqueue(vxlan_wq);
- unregister_pernet_device(&vxlan_net_ops);
- rcu_barrier();
+ unregister_pernet_subsys(&vxlan_net_ops);
+ /* rcu_barrier() is called by netns */
}
module_exit(vxlan_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_VERSION(VXLAN_VERSION);
MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
+MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("vxlan");
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 0d1c7592efa0..19f7cb2cdef3 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -71,12 +71,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
const void *saddr, unsigned len)
{
struct frhdr hdr;
- struct dlci_local *dlp;
unsigned int hlen;
char *dest;
- dlp = netdev_priv(dev);
-
hdr.control = FRAD_I_UI;
switch (type)
{
@@ -107,11 +104,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
{
- struct dlci_local *dlp;
struct frhdr *hdr;
int process, header;
- dlp = netdev_priv(dev);
if (!pskb_may_pull(skb, sizeof(*hdr))) {
netdev_notice(dev, "invalid data no header\n");
dev->stats.rx_errors++;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 851dc7b7e8b0..288610df205c 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -699,8 +699,6 @@ static void dscc4_free1(struct pci_dev *pdev)
for (i = 0; i < dev_per_card; i++)
unregister_hdlc_device(dscc4_to_dev(root + i));
- pci_set_drvdata(pdev, NULL);
-
for (i = 0; i < dev_per_card; i++)
free_netdev(root[i].dev);
kfree(root);
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 62f01b74cbd6..dc334c85d966 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -29,7 +29,6 @@
#include <linux/fcntl.h>
#include <linux/hdlc.h>
#include <linux/in.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h
index e4f539ad071b..10963e8f4b39 100644
--- a/drivers/net/wan/hd64570.h
+++ b/drivers/net/wan/hd64570.h
@@ -159,7 +159,7 @@ typedef struct {
/* Packet Descriptor Status bits */
#define ST_TX_EOM 0x80 /* End of frame */
-#define ST_TX_EOT 0x01 /* End of transmition */
+#define ST_TX_EOT 0x01 /* End of transmission */
#define ST_RX_EOM 0x80 /* End of frame */
#define ST_RX_SHORT 0x40 /* Short frame */
@@ -211,7 +211,7 @@ typedef struct {
#define CTL_NORTS 0x01
#define CTL_IDLE 0x10 /* Transmit an idle pattern */
-#define CTL_UDRNC 0x20 /* Idle after CRC or FCS+flag transmition */
+#define CTL_UDRNC 0x20 /* Idle after CRC or FCS+flag transmission */
#define ST0_TXRDY 0x02 /* TX ready */
#define ST0_RXRDY 0x01 /* RX ready */
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 6269a09c7369..e92ecf1d3314 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -29,7 +29,6 @@
#include <linux/fcntl.h>
#include <linux/hdlc.h>
#include <linux/in.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
diff --git a/drivers/net/wan/hd64572.h b/drivers/net/wan/hd64572.h
index 96567c2dc4db..22137ee669cf 100644
--- a/drivers/net/wan/hd64572.h
+++ b/drivers/net/wan/hd64572.h
@@ -218,7 +218,7 @@ typedef struct {
#define ST_TX_EOM 0x80 /* End of frame */
#define ST_TX_UNDRRUN 0x08
#define ST_TX_OWNRSHP 0x02
-#define ST_TX_EOT 0x01 /* End of transmition */
+#define ST_TX_EOT 0x01 /* End of transmission */
#define ST_RX_EOM 0x80 /* End of frame */
#define ST_RX_SHORT 0x40 /* Short frame */
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 7ef435bab425..b2fe9bb89633 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -49,7 +49,6 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/hdlc.h>
-#include <linux/init.h>
#include <linux/in.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
@@ -973,7 +972,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_hdlcdev:
- pci_set_drvdata(pdev, NULL);
kfree(sc);
err_kzalloc:
pci_release_regions(pdev);
@@ -995,7 +993,6 @@ static void lmc_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
@@ -2126,7 +2123,7 @@ bug_out:
spin_unlock_irqrestore(&sc->lmc_lock, flags);
- lmc_trace(dev, "lmc_driver_timout out");
+ lmc_trace(dev, "lmc_driver_timeout out");
}
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 53efc57fcace..5b72f7f8c516 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -281,7 +281,6 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
if (card->ports[0].netdev)
free_netdev(card->ports[0].netdev);
if (card->ports[1].netdev)
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index ddbce54040e2..fe4e3ece3c42 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -260,7 +260,6 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
if (card->ports[0].netdev)
free_netdev(card->ports[0].netdev);
if (card->ports[1].netdev)
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 388ddf60a66d..1b89ecf0959e 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -57,6 +57,7 @@
#include <net/net_namespace.h>
#include <net/arp.h>
+#include <net/Space.h>
#include <asm/io.h>
#include <asm/types.h>
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 4c0a69779b89..f76aa9081585 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -542,7 +542,6 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
kfree(card);
}
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index cfce83e1f273..f35f93c31b09 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -15,7 +15,6 @@
* more details.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/skbuff.h>
@@ -1314,7 +1313,7 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
if (!(changes & BSS_CHANGED_BSSID))
return;
- if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) {
+ if (!ether_addr_equal(conf->bssid, priv->bssid)) {
adm8211_set_bssid(dev, conf->bssid);
memcpy(priv->bssid, conf->bssid, ETH_ALEN);
}
@@ -1866,7 +1865,6 @@ static int adm8211_probe(struct pci_dev *pdev,
dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
- dev->channel_change_time = 1000;
dev->max_signal = 100; /* FIXME: find better value */
dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index 14128fd265ac..7e9ede6c5798 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -23,7 +23,6 @@
#ifdef __IN_PCMCIA_PACKAGE__
#include <pcmcia/k_compat.h>
#endif
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 34c8a33cac06..99b3bfa717d5 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1721,7 +1721,7 @@ static void at76_mac80211_tx(struct ieee80211_hw *hw,
* following workaround is necessary. If the TX frame is an
* authentication frame extract the bssid and send the CMD_JOIN. */
if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) {
- if (!ether_addr_equal(priv->bssid, mgmt->bssid)) {
+ if (!ether_addr_equal_64bits(priv->bssid, mgmt->bssid)) {
memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
ieee80211_queue_work(hw, &priv->work_join_bssid);
dev_kfree_skb_any(skb);
@@ -2112,7 +2112,6 @@ static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
priv->pm_period = 0;
/* unit us */
- priv->hw->channel_change_time = 100000;
return priv;
}
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 280fc3d53a36..507d9a9ee69a 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -25,7 +25,6 @@
* that and only has minimal functionality.
*/
#include <linux/compiler.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -1765,7 +1764,7 @@ static struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
- AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
+ AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108
(CyberTAN Technology) */
AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index e0ba7cd14252..b59cfbe0276b 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -17,6 +17,7 @@
#ifndef ATH_H
#define ATH_H
+#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/spinlock.h>
@@ -165,6 +166,7 @@ struct ath_common {
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
u32 len,
gfp_t gfp_mask);
+bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);
void ath_hw_setbssidmask(struct ath_common *common);
void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 82e8088ca9b4..a6f5285235af 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -37,3 +37,10 @@ config ATH10K_TRACING
---help---
Select this to ath10k use tracing infrastructure.
+config ATH10K_DFS_CERTIFIED
+ bool "Atheros DFS support for certified platforms"
+ depends on ATH10K && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ This option enables DFS support for initiating radiation on
+ ath10k.
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index e46951b8fb92..d44d618b05f9 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -243,6 +243,16 @@ static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
misc_ie_addr | CE_ERROR_MASK);
}
+static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ u32 misc_ie_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + MISC_IE_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
+ misc_ie_addr & ~CE_ERROR_MASK);
+}
+
static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int mask)
@@ -731,7 +741,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ce_id, ret;
u32 intr_summary;
@@ -741,7 +750,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
intr_summary = CE_INTERRUPT_SUMMARY(ar);
- for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
+ for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
if (intr_summary & (1 << ce_id))
intr_summary &= ~(1 << ce_id);
else
@@ -783,22 +792,25 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
ath10k_pci_sleep(ar);
}
-void ath10k_ce_disable_interrupts(struct ath10k *ar)
+int ath10k_ce_disable_interrupts(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ce_id, ret;
ret = ath10k_pci_wake(ar);
if (ret)
- return;
+ return ret;
- for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
- struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
- u32 ctrl_addr = ce_state->ctrl_addr;
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+ ath10k_ce_error_intr_disable(ar, ctrl_addr);
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
}
+
ath10k_pci_sleep(ar);
+
+ return 0;
}
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
@@ -1047,9 +1059,19 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
const struct ce_attr *attr)
{
struct ath10k_ce_pipe *ce_state;
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
int ret;
+ /*
+ * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+ * additional TX locking checks.
+ *
+ * For the lack of a better place do the check here.
+ */
+ BUILD_BUG_ON(TARGET_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
ret = ath10k_pci_wake(ar);
if (ret)
return NULL;
@@ -1057,7 +1079,7 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ce_state = ath10k_ce_init_state(ar, ce_id, attr);
if (!ce_state) {
ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
- return NULL;
+ goto out;
}
if (attr->src_nentries) {
@@ -1066,7 +1088,8 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret);
ath10k_ce_deinit(ce_state);
- return NULL;
+ ce_state = NULL;
+ goto out;
}
}
@@ -1076,15 +1099,13 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret);
ath10k_ce_deinit(ce_state);
- return NULL;
+ ce_state = NULL;
+ goto out;
}
}
- /* Enable CE error interrupts */
- ath10k_ce_error_intr_enable(ar, ctrl_addr);
-
+out:
ath10k_pci_sleep(ar);
-
return ce_state;
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 15d45b5b7615..67dbde6a5c74 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -234,7 +234,7 @@ void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
-void ath10k_ce_disable_interrupts(struct ath10k *ar);
+int ath10k_ce_disable_interrupts(struct ath10k *ar);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 1129994fb105..3b59af3bddf4 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -597,10 +597,8 @@ static int ath10k_init_uart(struct ath10k *ar)
return ret;
}
- if (!uart_print) {
- ath10k_info("UART prints disabled\n");
+ if (!uart_print)
return 0;
- }
ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
if (ret) {
@@ -645,8 +643,8 @@ static int ath10k_init_hw_params(struct ath10k *ar)
ar->hw_params = *hw_params;
- ath10k_info("Hardware name %s version 0x%x\n",
- ar->hw_params.name, ar->target_version);
+ ath10k_dbg(ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
+ ar->hw_params.name, ar->target_version);
return 0;
}
@@ -664,7 +662,8 @@ static void ath10k_core_restart(struct work_struct *work)
ieee80211_restart_hw(ar->hw);
break;
case ATH10K_STATE_OFF:
- /* this can happen if driver is being unloaded */
+ /* this can happen if driver is being unloaded
+ * or if the crash happens during FW probing */
ath10k_warn("cannot restart a device that hasn't been started\n");
break;
case ATH10K_STATE_RESTARTING:
@@ -737,8 +736,6 @@ EXPORT_SYMBOL(ath10k_core_create);
void ath10k_core_destroy(struct ath10k *ar)
{
- ath10k_debug_destroy(ar);
-
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
@@ -786,21 +783,30 @@ int ath10k_core_start(struct ath10k *ar)
goto err;
}
- status = ath10k_htc_wait_target(&ar->htc);
- if (status)
+ status = ath10k_hif_start(ar);
+ if (status) {
+ ath10k_err("could not start HIF: %d\n", status);
goto err_wmi_detach;
+ }
+
+ status = ath10k_htc_wait_target(&ar->htc);
+ if (status) {
+ ath10k_err("failed to connect to HTC: %d\n", status);
+ goto err_hif_stop;
+ }
status = ath10k_htt_attach(ar);
if (status) {
ath10k_err("could not attach htt (%d)\n", status);
- goto err_wmi_detach;
+ goto err_hif_stop;
}
status = ath10k_init_connect_htc(ar);
if (status)
goto err_htt_detach;
- ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
+ ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
+ ar->hw->wiphy->fw_version);
status = ath10k_wmi_cmd_init(ar);
if (status) {
@@ -826,12 +832,23 @@ int ath10k_core_start(struct ath10k *ar)
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
INIT_LIST_HEAD(&ar->arvifs);
+ if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
+ ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n",
+ ar->hw_params.name, ar->target_version,
+ ar->hw->wiphy->fw_version, ar->fw_api,
+ ar->htt.target_version_major,
+ ar->htt.target_version_minor);
+
+ __set_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags);
+
return 0;
err_disconnect_htc:
ath10k_htc_stop(&ar->htc);
err_htt_detach:
ath10k_htt_detach(&ar->htt);
+err_hif_stop:
+ ath10k_hif_stop(ar);
err_wmi_detach:
ath10k_wmi_detach(ar);
err:
@@ -985,6 +1002,8 @@ void ath10k_core_unregister(struct ath10k *ar)
ath10k_mac_unregister(ar);
ath10k_core_free_firmware_files(ar);
+
+ ath10k_debug_destroy(ar);
}
EXPORT_SYMBOL(ath10k_core_unregister);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 0934f7633de3..ade1781c7186 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -30,6 +30,7 @@
#include "wmi.h"
#include "../ath.h"
#include "../regd.h"
+#include "../dfs_pattern_detector.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -43,7 +44,7 @@
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
-#define ATH10K_MAX_NUM_MGMT_PENDING 16
+#define ATH10K_MAX_NUM_MGMT_PENDING 128
struct ath10k;
@@ -192,6 +193,14 @@ struct ath10k_target_stats {
};
+struct ath10k_dfs_stats {
+ u32 phy_errors;
+ u32 pulses_total;
+ u32 pulses_detected;
+ u32 pulses_discarded;
+ u32 radar_detected;
+};
+
#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
struct ath10k_peer {
@@ -244,6 +253,9 @@ struct ath10k_vif {
u8 bssid[ETH_ALEN];
} ibss;
} u;
+
+ u8 fixed_rate;
+ u8 fixed_nss;
};
struct ath10k_vif_iter {
@@ -261,6 +273,10 @@ struct ath10k_debug {
unsigned long htt_stats_mask;
struct delayed_work htt_stats_dwork;
+ struct ath10k_dfs_stats dfs_stats;
+ struct ath_dfs_pool_stats dfs_pool_stats;
+
+ u32 fw_dbglog_mask;
};
enum ath10k_state {
@@ -295,10 +311,19 @@ enum ath10k_fw_features {
/* firmware support tx frame management over WMI, otherwise it's HTT */
ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
+ /* Firmware does not support P2P */
+ ATH10K_FW_FEATURE_NO_P2P = 3,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
+enum ath10k_dev_flags {
+ /* Indicates that ath10k device is during CAC phase of DFS */
+ ATH10K_CAC_RUNNING,
+ ATH10K_FLAG_FIRST_BOOT_DONE,
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@@ -392,6 +417,8 @@ struct ath10k {
bool monitor_enabled;
bool monitor_present;
unsigned int filter_flags;
+ unsigned long dev_flags;
+ u32 dfs_block_radar_events;
struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done;
@@ -410,6 +437,9 @@ struct ath10k {
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
+ /* number of created peers; protected by data_lock */
+ int num_peers;
+
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
struct completion offchan_tx_completed;
@@ -428,6 +458,8 @@ struct ath10k {
u32 survey_last_cycle_count;
struct survey_info survey[ATH10K_NUM_CHANS];
+ struct dfs_pattern_detector *dfs_detector;
+
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 760ff2289e3c..6debd281350a 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -614,6 +614,61 @@ static const struct file_operations fops_htt_stats_mask = {
.llseek = default_llseek,
};
+static ssize_t ath10k_read_fw_dbglog(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned int len;
+ char buf[32];
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n",
+ ar->debug.fw_dbglog_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_fw_dbglog(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long mask;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.fw_dbglog_mask = mask;
+
+ if (ar->state == ATH10K_STATE_ON) {
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
+ if (ret) {
+ ath10k_warn("dbglog cfg failed from debugfs: %d\n",
+ ret);
+ goto exit;
+ }
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_fw_dbglog = {
+ .read = ath10k_read_fw_dbglog,
+ .write = ath10k_write_fw_dbglog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_start(struct ath10k *ar)
{
int ret;
@@ -625,6 +680,14 @@ int ath10k_debug_start(struct ath10k *ar)
/* continue normally anyway, this isn't serious */
ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
+ if (ar->debug.fw_dbglog_mask) {
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
+ if (ret)
+ /* not serious */
+ ath10k_warn("failed to enable dbglog during start: %d",
+ ret);
+ }
+
return 0;
}
@@ -639,6 +702,86 @@ void ath10k_debug_stop(struct ath10k *ar)
cancel_delayed_work(&ar->debug.htt_stats_dwork);
}
+static ssize_t ath10k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+
+ ieee80211_radar_detected(ar->hw);
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath10k_write_simulate_radar,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define ATH10K_DFS_STAT(s, p) (\
+ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+ ar->debug.dfs_stats.p))
+
+#define ATH10K_DFS_POOL_STAT(s, p) (\
+ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+ ar->debug.dfs_pool_stats.p))
+
+static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int retval = 0, len = 0;
+ const int size = 8000;
+ struct ath10k *ar = file->private_data;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!ar->dfs_detector) {
+ len += scnprintf(buf + len, size - len, "DFS not enabled\n");
+ goto exit;
+ }
+
+ ar->debug.dfs_pool_stats =
+ ar->dfs_detector->get_stats(ar->dfs_detector);
+
+ len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
+
+ ATH10K_DFS_STAT("reported phy errors", phy_errors);
+ ATH10K_DFS_STAT("pulse events reported", pulses_total);
+ ATH10K_DFS_STAT("DFS pulses detected", pulses_detected);
+ ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded);
+ ATH10K_DFS_STAT("Radars detected", radar_detected);
+
+ len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
+ ATH10K_DFS_POOL_STAT("Pool references", pool_reference);
+ ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated);
+ ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error);
+ ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used);
+ ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated);
+ ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error);
+ ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used);
+
+exit:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_dfs_stats = {
+ .read = ath10k_read_dfs_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -667,6 +810,23 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_htt_stats_mask);
+ debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_fw_dbglog);
+
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ debugfs_create_file("dfs_simulate_radar", S_IWUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_simulate_radar);
+
+ debugfs_create_bool("dfs_block_radar_events", S_IWUSR,
+ ar->debug.debugfs_phy,
+ &ar->dfs_block_radar_events);
+
+ debugfs_create_file("dfs_stats", S_IRUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_dfs_stats);
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 3cfe3ee90dbe..1773c36c71a0 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -33,6 +33,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
ATH10K_DBG_BMI = 0x00000400,
+ ATH10K_DBG_REGULATORY = 0x00000800,
ATH10K_DBG_ANY = 0xffffffff,
};
@@ -53,6 +54,8 @@ void ath10k_debug_read_service_map(struct ath10k *ar,
void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
+#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
+
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
@@ -82,6 +85,9 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev)
{
}
+
+#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
+
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index edae50b52806..edc57ab505c8 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -191,6 +191,11 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ if (!skb) {
+ ath10k_warn("invalid sk_buff completion - NULL pointer. firmware crashed?\n");
+ return 0;
+ }
+
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
@@ -534,14 +539,6 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
u16 credit_count;
u16 credit_size;
- reinit_completion(&htc->ctl_resp);
-
- status = ath10k_hif_start(htc->ar);
- if (status) {
- ath10k_err("could not start HIF (%d)\n", status);
- goto err_start;
- }
-
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (status <= 0) {
@@ -549,15 +546,13 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
status = -ETIMEDOUT;
ath10k_err("ctl_resp never came in (%d)\n", status);
- goto err_target;
+ return status;
}
if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
ath10k_err("Invalid HTC ready msg len:%d\n",
htc->control_resp_len);
-
- status = -ECOMM;
- goto err_target;
+ return -ECOMM;
}
msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
@@ -567,8 +562,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
if (message_id != ATH10K_HTC_MSG_READY_ID) {
ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
- status = -ECOMM;
- goto err_target;
+ return -ECOMM;
}
htc->total_transmit_credits = credit_count;
@@ -581,9 +575,8 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
- status = -ECOMM;
ath10k_err("Invalid credit size received\n");
- goto err_target;
+ return -ECOMM;
}
ath10k_htc_setup_target_buffer_assignments(htc);
@@ -600,14 +593,10 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
if (status) {
ath10k_err("could not connect to htc service (%d)\n", status);
- goto err_target;
+ return status;
}
return 0;
-err_target:
- ath10k_hif_stop(htc->ar);
-err_start:
- return status;
}
int ath10k_htc_connect_service(struct ath10k_htc *htc,
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 5f7eeebc5432..69697af59ce0 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -104,8 +104,8 @@ err_htc_attach:
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
- ath10k_info("htt target version %d.%d\n",
- htt->target_version_major, htt->target_version_minor);
+ ath10k_dbg(ATH10K_DBG_BOOT, "htt target version %d.%d\n",
+ htt->target_version_major, htt->target_version_minor);
if (htt->target_version_major != 2 &&
htt->target_version_major != 3) {
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 1a337e93b7e9..b93ae355bc08 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1182,6 +1182,8 @@ struct htt_rx_info {
u32 info2;
} rate;
bool fcs_err;
+ bool amsdu_more;
+ bool mic_err;
};
struct ath10k_htt {
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 90d4f74c28d7..fe8bd1b59f0e 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -659,23 +659,6 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
memcpy(hdr_buf, hdr, hdr_len);
hdr = (struct ieee80211_hdr *)hdr_buf;
- /* FIXME: Hopefully this is a temporary measure.
- *
- * Reporting individual A-MSDU subframes means each reported frame
- * shares the same sequence number.
- *
- * mac80211 drops frames it recognizes as duplicates, i.e.
- * retransmission flag is set and sequence number matches sequence
- * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
- * "Duplicate detection and recovery")
- *
- * To avoid frames being dropped clear retransmission flag for all
- * received A-MSDUs.
- *
- * Worst case: actual duplicate frames will be reported but this should
- * still be handled gracefully by other OSI/ISO layers. */
- hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
-
first = skb;
while (skb) {
void *decap_hdr;
@@ -746,6 +729,9 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
skb = skb->next;
info->skb->next = NULL;
+ if (skb)
+ info->amsdu_more = true;
+
ath10k_process_rx(htt->ar, info);
}
@@ -852,6 +838,20 @@ static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
return false;
}
+static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ u32 flags;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ flags = __le32_to_cpu(rxd->attention.flags);
+
+ if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
+ return true;
+
+ return false;
+}
+
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
@@ -959,6 +959,11 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
+ if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
+ ath10k_htt_rx_free_msdu_chain(msdu_head);
+ continue;
+ }
+
/* FIXME: we do not support chaining yet.
* this needs investigation */
if (msdu_chaining) {
@@ -969,6 +974,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
+ info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
info.signal += rx->ppdu.combined_rssi;
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index d9335e9d0d04..f1d36d2d2723 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -85,16 +85,13 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
int ath10k_htt_tx_attach(struct ath10k_htt *htt)
{
- u8 pipe;
-
spin_lock_init(&htt->tx_lock);
init_waitqueue_head(&htt->empty_tx_wq);
- /* At the beginning free queue number should hint us the maximum
- * queue length */
- pipe = htt->ar->htc.endpoint[htt->eid].ul_pipe_id;
- htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
- pipe);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
+ htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+ else
+ htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 8aeb46d9b534..f1505a25d810 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -115,6 +115,7 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_10X_MAC_AGGR_DELIM 0
#define TARGET_10X_AST_SKID_LIMIT 16
#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_PEERS_MAX 128
#define TARGET_10X_NUM_OFFLOAD_PEERS 0
#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_10X_NUM_PEER_KEYS 2
@@ -269,6 +270,7 @@ enum ath10k_mcast2ucast_mode {
#define CORE_CTRL_CPU_INTR_MASK 0x00002000
#define CORE_CTRL_ADDRESS 0x0000
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
+#define PCIE_INTR_CAUSE_ADDRESS 0x000c
#define PCIE_INTR_CLR_ADDRESS 0x0014
#define SCRATCH_3_ADDRESS 0x0030
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 97ac8c87cba2..776e364eadcd 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -322,12 +322,19 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
lockdep_assert_held(&ar->conf_mutex);
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
- if (ret)
+ if (ret) {
+ ath10k_warn("Failed to create wmi peer: %i\n", ret);
return ret;
+ }
ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
- if (ret)
+ if (ret) {
+ ath10k_warn("Failed to wait for created wmi peer: %i\n", ret);
return ret;
+ }
+ spin_lock_bh(&ar->data_lock);
+ ar->num_peers++;
+ spin_unlock_bh(&ar->data_lock);
return 0;
}
@@ -373,6 +380,10 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
if (ret)
return ret;
+ spin_lock_bh(&ar->data_lock);
+ ar->num_peers--;
+ spin_unlock_bh(&ar->data_lock);
+
return 0;
}
@@ -392,6 +403,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
list_del(&peer->list);
kfree(peer);
+ ar->num_peers--;
}
spin_unlock_bh(&ar->data_lock);
}
@@ -407,6 +419,7 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)
list_del(&peer->list);
kfree(peer);
}
+ ar->num_peers = 0;
spin_unlock_bh(&ar->data_lock);
}
@@ -450,15 +463,19 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
arg.channel.mode = chan_to_phymode(&conf->chandef);
- arg.channel.min_power = channel->max_power * 3;
- arg.channel.max_power = channel->max_power * 4;
- arg.channel.max_reg_power = channel->max_reg_power * 4;
- arg.channel.max_antenna_gain = channel->max_antenna_gain;
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power * 2;
+ arg.channel.max_reg_power = channel->max_reg_power * 2;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
arg.ssid_len = arvif->u.ap.ssid_len;
arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
arg.ssid = arvif->vif->bss_conf.ssid;
arg.ssid_len = arvif->vif->bss_conf.ssid_len;
@@ -516,6 +533,11 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
lockdep_assert_held(&ar->conf_mutex);
+ if (!ar->monitor_present) {
+ ath10k_warn("mac montor stop -- monitor is not present\n");
+ return -EINVAL;
+ }
+
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
@@ -523,11 +545,13 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
/* TODO setup this dynamically, what in case we
don't have any vifs? */
arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef);
+ arg.channel.chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
- arg.channel.min_power = channel->max_power * 3;
- arg.channel.max_power = channel->max_power * 4;
- arg.channel.max_reg_power = channel->max_reg_power * 4;
- arg.channel.max_antenna_gain = channel->max_antenna_gain;
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power * 2;
+ arg.channel.max_reg_power = channel->max_reg_power * 2;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
@@ -566,6 +590,16 @@ static int ath10k_monitor_stop(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
+ if (!ar->monitor_present) {
+ ath10k_warn("mac montor stop -- monitor is not present\n");
+ return -EINVAL;
+ }
+
+ if (!ar->monitor_enabled) {
+ ath10k_warn("mac montor stop -- monitor is not enabled\n");
+ return -EINVAL;
+ }
+
ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Monitor vdev down failed: %d\n", ret);
@@ -647,6 +681,107 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
return ret;
}
+static int ath10k_start_cac(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+ ret = ath10k_monitor_create(ar);
+ if (ret) {
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ return ret;
+ }
+
+ ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
+ if (ret) {
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ath10k_monitor_destroy(ar);
+ return ret;
+ }
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath10k_stop_cac(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* CAC is not running - do nothing */
+ if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
+ return 0;
+
+ ath10k_monitor_stop(ar);
+ ath10k_monitor_destroy(ar);
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
+
+ return 0;
+}
+
+static const char *ath10k_dfs_state(enum nl80211_dfs_state dfs_state)
+{
+ switch (dfs_state) {
+ case NL80211_DFS_USABLE:
+ return "USABLE";
+ case NL80211_DFS_UNAVAILABLE:
+ return "UNAVAILABLE";
+ case NL80211_DFS_AVAILABLE:
+ return "AVAILABLE";
+ default:
+ WARN_ON(1);
+ return "bug";
+ }
+}
+
+static void ath10k_config_radar_detection(struct ath10k *ar)
+{
+ struct ieee80211_channel *chan = ar->hw->conf.chandef.chan;
+ bool radar = ar->hw->conf.radar_enabled;
+ bool chan_radar = !!(chan->flags & IEEE80211_CHAN_RADAR);
+ enum nl80211_dfs_state dfs_state = chan->dfs_state;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac radar config update: chan %dMHz radar %d chan radar %d chan state %s\n",
+ chan->center_freq, radar, chan_radar,
+ ath10k_dfs_state(dfs_state));
+
+ /*
+ * It's safe to call it even if CAC is not started.
+ * This call here guarantees changing channel, etc. will stop CAC.
+ */
+ ath10k_stop_cac(ar);
+
+ if (!radar)
+ return;
+
+ if (!chan_radar)
+ return;
+
+ if (dfs_state != NL80211_DFS_USABLE)
+ return;
+
+ ret = ath10k_start_cac(ar);
+ if (ret) {
+ /*
+ * Not possible to start CAC on current channel so starting
+ * radiation is not allowed, make this channel DFS_UNAVAILABLE
+ * by indicating that radar was detected.
+ */
+ ath10k_warn("failed to start CAC (%d)\n", ret);
+ ieee80211_radar_detected(ar->hw);
+ }
+}
+
static void ath10k_control_beaconing(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info)
{
@@ -1351,19 +1486,22 @@ static int ath10k_update_channel_list(struct ath10k *ar)
ch->allow_vht = true;
ch->allow_ibss =
- !(channel->flags & IEEE80211_CHAN_NO_IBSS);
+ !(channel->flags & IEEE80211_CHAN_NO_IR);
ch->ht40plus =
!(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
- passive = channel->flags & IEEE80211_CHAN_PASSIVE_SCAN;
+ ch->chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ passive = channel->flags & IEEE80211_CHAN_NO_IR;
ch->passive = passive;
ch->freq = channel->center_freq;
- ch->min_power = channel->max_power * 3;
- ch->max_power = channel->max_power * 4;
- ch->max_reg_power = channel->max_reg_power * 4;
- ch->max_antenna_gain = channel->max_antenna_gain;
+ ch->min_power = 0;
+ ch->max_power = channel->max_power * 2;
+ ch->max_reg_power = channel->max_reg_power * 2;
+ ch->max_antenna_gain = channel->max_antenna_gain * 2;
ch->reg_class_id = 0; /* FIXME */
/* FIXME: why use only legacy modes, why not any
@@ -1423,9 +1561,20 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath10k *ar = hw->priv;
+ bool result;
ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
+ request->dfs_region);
+ result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
+ request->dfs_region);
+ if (!result)
+ ath10k_warn("dfs region 0x%X not supported, will trigger radar for every pulse\n",
+ request->dfs_region);
+ }
+
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_ON)
ath10k_regd_update(ar);
@@ -1714,8 +1863,10 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
break;
ret = ath10k_wmi_mgmt_tx(ar, skb);
- if (ret)
+ if (ret) {
ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
}
}
@@ -1889,6 +2040,7 @@ void ath10k_halt(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_stop_cac(ar);
del_timer_sync(&ar->scan.timeout);
ath10k_offchan_tx_purge(ar);
ath10k_mgmt_over_wmi_tx_purge(ar);
@@ -1943,7 +2095,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
ret);
- ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0);
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
if (ret)
ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
ret);
@@ -1998,15 +2150,40 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
struct ath10k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
+ u32 param;
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
- conf->chandef.chan->center_freq);
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac config channel %d mhz flags 0x%x\n",
+ conf->chandef.chan->center_freq,
+ conf->chandef.chan->flags);
+
spin_lock_bh(&ar->data_lock);
ar->rx_channel = conf->chandef.chan;
spin_unlock_bh(&ar->data_lock);
+
+ ath10k_config_radar_detection(ar);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac config power %d\n",
+ hw->conf.power_level);
+
+ param = ar->wmi.pdev_param->txpower_limit2g;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ hw->conf.power_level * 2);
+ if (ret)
+ ath10k_warn("mac failed to set 2g txpower %d (%d)\n",
+ hw->conf.power_level, ret);
+
+ param = ar->wmi.pdev_param->txpower_limit5g;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ hw->conf.power_level * 2);
+ if (ret)
+ ath10k_warn("mac failed to set 5g txpower %d (%d)\n",
+ hw->conf.power_level, ret);
}
if (changed & IEEE80211_CONF_CHANGE_PS)
@@ -2037,7 +2214,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
enum wmi_sta_powersave_param param;
int ret = 0;
- u32 value;
+ u32 value, param_id;
int bit;
u32 vdev_param;
@@ -2049,6 +2226,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->vif = vif;
INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
+ INIT_LIST_HEAD(&arvif->list);
if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
ath10k_warn("Only one monitor interface allowed\n");
@@ -2128,6 +2306,13 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ath10k_warn("Failed to create peer for AP: %d\n", ret);
goto err_vdev_delete;
}
+
+ param_id = ar->wmi.pdev_param->sta_kickout_th;
+
+ /* Disable STA KICKOUT functionality in FW */
+ ret = ath10k_wmi_pdev_set_param(ar, param_id, 0);
+ if (ret)
+ ath10k_warn("Failed to disable STA KICKOUT\n");
}
if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
@@ -2265,8 +2450,14 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
+ /* Monitor must not be started if it wasn't created first.
+ * Promiscuous mode may be started on a non-monitor interface - in
+ * such case the monitor vdev is not created so starting the
+ * monitor makes no sense. Since ath10k uses no special RX filters
+ * (only BSS filter in STA mode) there's no need for any special
+ * action here. */
if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
- !ar->monitor_enabled) {
+ !ar->monitor_enabled && ar->monitor_present) {
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
ar->monitor_vdev_id);
@@ -2274,7 +2465,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
if (ret)
ath10k_warn("Unable to start monitor mode\n");
} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
- ar->monitor_enabled) {
+ ar->monitor_enabled && ar->monitor_present) {
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
ar->monitor_vdev_id);
@@ -2360,8 +2551,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
- ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
- info->bssid, arvif->vdev_id);
+ ath10k_warn("Failed to add peer %pM for vdev %d when changin bssid: %i\n",
+ info->bssid, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION) {
/*
@@ -2542,6 +2733,44 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
+static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ enum set_key_cmd cmd,
+ struct ieee80211_key_conf *key)
+{
+ u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
+ int ret;
+
+ /* 10.1 firmware branch requires default key index to be set to group
+ * key index after installing it. Otherwise FW/HW Txes corrupted
+ * frames with multi-vif APs. This is not required for main firmware
+ * branch (e.g. 636).
+ *
+ * FIXME: This has been tested only in AP. It remains unknown if this
+ * is required for multi-vif STA interfaces on 10.1 */
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
+ return;
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ return;
+
+ if (cmd != SET_KEY)
+ return;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ key->keyidx);
+ if (ret)
+ ath10k_warn("failed to set group key as default key: %d\n",
+ ret);
+}
+
static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@@ -2603,6 +2832,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto exit;
}
+ ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
+
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
if (peer && cmd == SET_KEY)
@@ -2627,6 +2858,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int max_num_peers;
int ret = 0;
mutex_lock(&ar->conf_mutex);
@@ -2637,14 +2869,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New station addition.
*/
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1;
+ else
+ max_num_peers = TARGET_NUM_PEERS;
+
+ if (ar->num_peers >= max_num_peers) {
+ ath10k_warn("Number of peers exceeded: peers number %d (max peers %d)\n",
+ ar->num_peers, max_num_peers);
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
ath10k_dbg(ATH10K_DBG_MAC,
- "mac vdev %d peer create %pM (new sta)\n",
- arvif->vdev_id, sta->addr);
+ "mac vdev %d peer create %pM (new sta) num_peers %d\n",
+ arvif->vdev_id, sta->addr, ar->num_peers);
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+ sta->addr, arvif->vdev_id, ret);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
@@ -2689,7 +2933,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_warn("Failed to disassociate station: %pM\n",
sta->addr);
}
-
+exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -3095,6 +3339,307 @@ exit:
return ret;
}
+/* Helper table for legacy fixed_rate/bitrate_mask */
+static const u8 cck_ofdm_rate[] = {
+ /* CCK */
+ 3, /* 1Mbps */
+ 2, /* 2Mbps */
+ 1, /* 5.5Mbps */
+ 0, /* 11Mbps */
+ /* OFDM */
+ 3, /* 6Mbps */
+ 7, /* 9Mbps */
+ 2, /* 12Mbps */
+ 6, /* 18Mbps */
+ 1, /* 24Mbps */
+ 5, /* 36Mbps */
+ 0, /* 48Mbps */
+ 4, /* 54Mbps */
+};
+
+/* Check if only one bit set */
+static int ath10k_check_single_mask(u32 mask)
+{
+ int bit;
+
+ bit = ffs(mask);
+ if (!bit)
+ return 0;
+
+ mask &= ~BIT(bit - 1);
+ if (mask)
+ return 2;
+
+ return 1;
+}
+
+static bool
+ath10k_default_bitrate_mask(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ u32 legacy = 0x00ff;
+ u8 ht = 0xff, i;
+ u16 vht = 0x3ff;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ legacy = 0x00fff;
+ vht = 0;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ break;
+ default:
+ return false;
+ }
+
+ if (mask->control[band].legacy != legacy)
+ return false;
+
+ for (i = 0; i < ar->num_rf_chains; i++)
+ if (mask->control[band].ht_mcs[i] != ht)
+ return false;
+
+ for (i = 0; i < ar->num_rf_chains; i++)
+ if (mask->control[band].vht_mcs[i] != vht)
+ return false;
+
+ return true;
+}
+
+static bool
+ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ u8 *fixed_nss)
+{
+ int ht_nss = 0, vht_nss = 0, i;
+
+ /* check legacy */
+ if (ath10k_check_single_mask(mask->control[band].legacy))
+ return false;
+
+ /* check HT */
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+ if (mask->control[band].ht_mcs[i] == 0xff)
+ continue;
+ else if (mask->control[band].ht_mcs[i] == 0x00)
+ break;
+ else
+ return false;
+ }
+
+ ht_nss = i;
+
+ /* check VHT */
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ if (mask->control[band].vht_mcs[i] == 0x03ff)
+ continue;
+ else if (mask->control[band].vht_mcs[i] == 0x0000)
+ break;
+ else
+ return false;
+ }
+
+ vht_nss = i;
+
+ if (ht_nss > 0 && vht_nss > 0)
+ return false;
+
+ if (ht_nss)
+ *fixed_nss = ht_nss;
+ else if (vht_nss)
+ *fixed_nss = vht_nss;
+ else
+ return false;
+
+ return true;
+}
+
+static bool
+ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ enum wmi_rate_preamble *preamble)
+{
+ int legacy = 0, ht = 0, vht = 0, i;
+
+ *preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ /* check legacy */
+ legacy = ath10k_check_single_mask(mask->control[band].legacy);
+ if (legacy > 1)
+ return false;
+
+ /* check HT */
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ ht += ath10k_check_single_mask(mask->control[band].ht_mcs[i]);
+ if (ht > 1)
+ return false;
+
+ /* check VHT */
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ vht += ath10k_check_single_mask(mask->control[band].vht_mcs[i]);
+ if (vht > 1)
+ return false;
+
+ /* Currently we support only one fixed_rate */
+ if ((legacy + ht + vht) != 1)
+ return false;
+
+ if (ht)
+ *preamble = WMI_RATE_PREAMBLE_HT;
+ else if (vht)
+ *preamble = WMI_RATE_PREAMBLE_VHT;
+
+ return true;
+}
+
+static bool
+ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ u8 *fixed_rate,
+ u8 *fixed_nss)
+{
+ u8 rate = 0, pream = 0, nss = 0, i;
+ enum wmi_rate_preamble preamble;
+
+ /* Check if single rate correct */
+ if (!ath10k_bitrate_mask_correct(mask, band, &preamble))
+ return false;
+
+ pream = preamble;
+
+ switch (preamble) {
+ case WMI_RATE_PREAMBLE_CCK:
+ case WMI_RATE_PREAMBLE_OFDM:
+ i = ffs(mask->control[band].legacy) - 1;
+
+ if (band == IEEE80211_BAND_2GHZ && i < 4)
+ pream = WMI_RATE_PREAMBLE_CCK;
+
+ if (band == IEEE80211_BAND_5GHZ)
+ i += 4;
+
+ if (i >= ARRAY_SIZE(cck_ofdm_rate))
+ return false;
+
+ rate = cck_ofdm_rate[i];
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ if (mask->control[band].ht_mcs[i])
+ break;
+
+ if (i == IEEE80211_HT_MCS_MASK_LEN)
+ return false;
+
+ rate = ffs(mask->control[band].ht_mcs[i]) - 1;
+ nss = i;
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ if (mask->control[band].vht_mcs[i])
+ break;
+
+ if (i == NL80211_VHT_NSS_MAX)
+ return false;
+
+ rate = ffs(mask->control[band].vht_mcs[i]) - 1;
+ nss = i;
+ break;
+ }
+
+ *fixed_nss = nss + 1;
+ nss <<= 4;
+ pream <<= 6;
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
+ pream, nss, rate);
+
+ *fixed_rate = pream | nss | rate;
+
+ return true;
+}
+
+static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ u8 *fixed_rate,
+ u8 *fixed_nss)
+{
+ /* First check full NSS mask, if we can simply limit NSS */
+ if (ath10k_bitrate_mask_nss(mask, band, fixed_nss))
+ return true;
+
+ /* Next Check single rate is set */
+ return ath10k_bitrate_mask_rate(mask, band, fixed_rate, fixed_nss);
+}
+
+static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
+ u8 fixed_rate,
+ u8 fixed_nss)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (arvif->fixed_rate == fixed_rate &&
+ arvif->fixed_nss == fixed_nss)
+ goto exit;
+
+ if (fixed_rate == WMI_FIXED_RATE_NONE)
+ ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
+
+ vdev_param = ar->wmi.vdev_param->fixed_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, fixed_rate);
+ if (ret) {
+ ath10k_warn("Could not set fixed_rate param 0x%02x: %d\n",
+ fixed_rate, ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ arvif->fixed_rate = fixed_rate;
+
+ vdev_param = ar->wmi.vdev_param->nss;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, fixed_nss);
+
+ if (ret) {
+ ath10k_warn("Could not set fixed_nss param %d: %d\n",
+ fixed_nss, ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ arvif->fixed_nss = fixed_nss;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k *ar = arvif->ar;
+ enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
+ u8 fixed_rate = WMI_FIXED_RATE_NONE;
+ u8 fixed_nss = ar->num_rf_chains;
+
+ if (!ath10k_default_bitrate_mask(ar, band, mask)) {
+ if (!ath10k_get_fixed_rate_nss(mask, band,
+ &fixed_rate,
+ &fixed_nss))
+ return -EINVAL;
+ }
+
+ return ath10k_set_fixed_rate_param(arvif, fixed_rate, fixed_nss);
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_tx,
.start = ath10k_start,
@@ -3117,6 +3662,7 @@ static const struct ieee80211_ops ath10k_ops = {
.tx_last_beacon = ath10k_tx_last_beacon,
.restart_complete = ath10k_restart_complete,
.get_survey = ath10k_get_survey,
+ .set_bitrate_mask = ath10k_set_bitrate_mask,
#ifdef CONFIG_PM
.suspend = ath10k_suspend,
.resume = ath10k_resume,
@@ -3249,12 +3795,37 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
},
};
-static const struct ieee80211_iface_combination ath10k_if_comb = {
- .limits = ath10k_if_limits,
- .n_limits = ARRAY_SIZE(ath10k_if_limits),
- .max_interfaces = 8,
- .num_different_channels = 1,
- .beacon_int_infra_match = true,
+static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
+ {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_if_comb[] = {
+ {
+ .limits = ath10k_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
+ {
+ .limits = ath10k_10x_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
+ },
};
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
@@ -3433,9 +4004,12 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ BIT(NL80211_IFTYPE_AP);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
+ ar->hw->wiphy->interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
@@ -3465,7 +4039,6 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
- ar->hw->channel_change_time = 5000;
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -3478,11 +4051,28 @@ int ath10k_mac_register(struct ath10k *ar)
*/
ar->hw->queues = 4;
- ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
- ar->hw->wiphy->n_iface_combinations = 1;
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10x_if_comb);
+ } else {
+ ar->hw->wiphy->iface_combinations = ath10k_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_if_comb);
+ }
ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ /* Init ath dfs pattern detector */
+ ar->ath_common.debug_mask = ATH_DBG_DFS;
+ ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
+ NL80211_DFS_UNSET);
+
+ if (!ar->dfs_detector)
+ ath10k_warn("dfs pattern detector init failed\n");
+ }
+
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
@@ -3518,6 +4108,9 @@ void ath10k_mac_unregister(struct ath10k *ar)
{
ieee80211_unregister_hw(ar->hw);
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ ar->dfs_detector->exit(ar->dfs_detector);
+
kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9e86a811086f..29fd197d1fd8 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/bitops.h>
#include "core.h"
#include "debug.h"
@@ -32,10 +33,21 @@
#include "ce.h"
#include "pci.h"
+enum ath10k_pci_irq_mode {
+ ATH10K_PCI_IRQ_AUTO = 0,
+ ATH10K_PCI_IRQ_LEGACY = 1,
+ ATH10K_PCI_IRQ_MSI = 2,
+};
+
static unsigned int ath10k_target_ps;
+static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+
module_param(ath10k_target_ps, uint, 0644);
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
+module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
+MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
+
#define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@@ -52,10 +64,16 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num);
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
static void ath10k_pci_stop_ce(struct ath10k *ar);
-static void ath10k_pci_device_reset(struct ath10k *ar);
-static int ath10k_pci_reset_target(struct ath10k *ar);
-static int ath10k_pci_start_intr(struct ath10k *ar);
-static void ath10k_pci_stop_intr(struct ath10k *ar);
+static int ath10k_pci_device_reset(struct ath10k *ar);
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
+static int ath10k_pci_init_irq(struct ath10k *ar);
+static int ath10k_pci_deinit_irq(struct ath10k *ar);
+static int ath10k_pci_request_irq(struct ath10k *ar);
+static void ath10k_pci_free_irq(struct ath10k *ar);
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+ struct ath10k_ce_pipe *rx_pipe,
+ struct bmi_xfer *xfer);
+static void ath10k_pci_cleanup_ce(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
@@ -200,6 +218,87 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
/* CE7 used only by Host */
};
+static bool ath10k_pci_irq_pending(struct ath10k *ar)
+{
+ u32 cause;
+
+ /* Check if the shared legacy irq is for us */
+ cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_CAUSE_ADDRESS);
+ if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
+ return true;
+
+ return false;
+}
+
+static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
+{
+ /* IMPORTANT: INTR_CLR register has to be set after
+ * INTR_ENABLE is set to 0, otherwise interrupt can not be
+ * really cleared. */
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ 0);
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ /* IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer. */
+ (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
+{
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ /* IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer. */
+ (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (ar_pci->num_msi_intrs == 0) {
+ if (!ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ }
+
+ tasklet_schedule(&ar_pci->early_irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_pci_request_early_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
+ * interrupt from irq vector is triggered in all cases for FW
+ * indication/errors */
+ ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
+ IRQF_SHARED, "ath10k_pci (early)", ar);
+ if (ret) {
+ ath10k_warn("failed to request early irq: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_free_early_irq(struct ath10k *ar)
+{
+ free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
+}
+
/*
* Diagnostic read/write access is provided for startup/config/debug usage.
* Caller must guarantee proper alignment, when applicable, and single user
@@ -526,17 +625,6 @@ static bool ath10k_pci_target_is_awake(struct ath10k *ar)
return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
}
-static void ath10k_pci_wait(struct ath10k *ar)
-{
- int n = 100;
-
- while (n-- && !ath10k_pci_target_is_awake(ar))
- msleep(10);
-
- if (n < 0)
- ath10k_warn("Unable to wakeup target\n");
-}
-
int ath10k_do_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -723,7 +811,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
flags);
if (ret)
- ath10k_warn("CE send failed: %p\n", nbuf);
+ ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
return ret;
}
@@ -750,9 +838,10 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
ar->fw_version_build);
host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
- if (ath10k_pci_diag_read_mem(ar, host_addr,
- &reg_dump_area, sizeof(u32)) != 0) {
- ath10k_warn("could not read hi_failure_state\n");
+ ret = ath10k_pci_diag_read_mem(ar, host_addr,
+ &reg_dump_area, sizeof(u32));
+ if (ret) {
+ ath10k_err("failed to read FW dump area address: %d\n", ret);
return;
}
@@ -762,7 +851,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
&reg_dump_values[0],
REG_DUMP_COUNT_QCA988X * sizeof(u32));
if (ret != 0) {
- ath10k_err("could not dump FW Dump Area\n");
+ ath10k_err("failed to read FW dump area: %d\n", ret);
return;
}
@@ -777,7 +866,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
reg_dump_values[i + 2],
reg_dump_values[i + 3]);
- ieee80211_queue_work(ar->hw, &ar->restart_work);
+ queue_work(ar->workqueue, &ar->restart_work);
}
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
@@ -815,53 +904,41 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
sizeof(ar_pci->msg_callbacks_current));
}
-static int ath10k_pci_start_ce(struct ath10k *ar)
+static int ath10k_pci_alloc_compl(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
const struct ce_attr *attr;
struct ath10k_pci_pipe *pipe_info;
struct ath10k_pci_compl *compl;
- int i, pipe_num, completions, disable_interrupts;
+ int i, pipe_num, completions;
spin_lock_init(&ar_pci->compl_lock);
INIT_LIST_HEAD(&ar_pci->compl_process);
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
spin_lock_init(&pipe_info->pipe_lock);
INIT_LIST_HEAD(&pipe_info->compl_free);
/* Handle Diagnostic CE specially */
- if (pipe_info->ce_hdl == ce_diag)
+ if (pipe_info->ce_hdl == ar_pci->ce_diag)
continue;
attr = &host_ce_config_wlan[pipe_num];
completions = 0;
- if (attr->src_nentries) {
- disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
- ath10k_ce_send_cb_register(pipe_info->ce_hdl,
- ath10k_pci_ce_send_done,
- disable_interrupts);
+ if (attr->src_nentries)
completions += attr->src_nentries;
- }
- if (attr->dest_nentries) {
- ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
- ath10k_pci_ce_recv_data);
+ if (attr->dest_nentries)
completions += attr->dest_nentries;
- }
-
- if (completions == 0)
- continue;
for (i = 0; i < completions; i++) {
compl = kmalloc(sizeof(*compl), GFP_KERNEL);
if (!compl) {
ath10k_warn("No memory for completion state\n");
- ath10k_pci_stop_ce(ar);
+ ath10k_pci_cleanup_ce(ar);
return -ENOMEM;
}
@@ -873,20 +950,55 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_stop_ce(struct ath10k *ar)
+static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_compl *compl;
- struct sk_buff *skb;
- int i;
+ const struct ce_attr *attr;
+ struct ath10k_pci_pipe *pipe_info;
+ int pipe_num, disable_interrupts;
- ath10k_ce_disable_interrupts(ar);
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+
+ /* Handle Diagnostic CE specially */
+ if (pipe_info->ce_hdl == ar_pci->ce_diag)
+ continue;
+
+ attr = &host_ce_config_wlan[pipe_num];
+
+ if (attr->src_nentries) {
+ disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
+ ath10k_ce_send_cb_register(pipe_info->ce_hdl,
+ ath10k_pci_ce_send_done,
+ disable_interrupts);
+ }
+
+ if (attr->dest_nentries)
+ ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
+ ath10k_pci_ce_recv_data);
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_kill_tasklet(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
- /* Cancel the pending tasklet */
tasklet_kill(&ar_pci->intr_tq);
+ tasklet_kill(&ar_pci->msi_fw_err);
+ tasklet_kill(&ar_pci->early_irq_tasklet);
for (i = 0; i < CE_COUNT; i++)
tasklet_kill(&ar_pci->pipe_info[i].intr);
+}
+
+static void ath10k_pci_stop_ce(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_compl *compl;
+ struct sk_buff *skb;
/* Mark pending completions as aborted, so that upper layers free up
* their associated resources */
@@ -920,7 +1032,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
spin_unlock_bh(&ar_pci->compl_lock);
/* Free unused completions for each pipe. */
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
spin_lock_bh(&pipe_info->pipe_lock);
@@ -974,8 +1086,8 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
case ATH10K_PCI_COMPL_RECV:
ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
if (ret) {
- ath10k_warn("Unable to post recv buffer for pipe: %d\n",
- compl->pipe_info->pipe_num);
+ ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
+ compl->pipe_info->pipe_num, ret);
break;
}
@@ -1114,7 +1226,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
for (i = 0; i < num; i++) {
skb = dev_alloc_skb(pipe_info->buf_sz);
if (!skb) {
- ath10k_warn("could not allocate skbuff for pipe %d\n",
+ ath10k_warn("failed to allocate skbuff for pipe %d\n",
num);
ret = -ENOMEM;
goto err;
@@ -1127,7 +1239,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
- ath10k_warn("could not dma map skbuff\n");
+ ath10k_warn("failed to DMA map sk_buff\n");
dev_kfree_skb_any(skb);
ret = -EIO;
goto err;
@@ -1142,7 +1254,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
ce_data);
if (ret) {
- ath10k_warn("could not enqueue to pipe %d (%d)\n",
+ ath10k_warn("failed to enqueue to pipe %d: %d\n",
num, ret);
goto err;
}
@@ -1162,7 +1274,7 @@ static int ath10k_pci_post_rx(struct ath10k *ar)
const struct ce_attr *attr;
int pipe_num, ret = 0;
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
attr = &host_ce_config_wlan[pipe_num];
@@ -1172,8 +1284,8 @@ static int ath10k_pci_post_rx(struct ath10k *ar)
ret = ath10k_pci_post_rx_pipe(pipe_info,
attr->dest_nentries - 1);
if (ret) {
- ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
- pipe_num);
+ ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
+ pipe_num, ret);
for (; pipe_num >= 0; pipe_num--) {
pipe_info = &ar_pci->pipe_info[pipe_num];
@@ -1189,23 +1301,58 @@ static int ath10k_pci_post_rx(struct ath10k *ar)
static int ath10k_pci_hif_start(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ret;
+ int ret, ret_early;
- ret = ath10k_pci_start_ce(ar);
+ ath10k_pci_free_early_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
+
+ ret = ath10k_pci_alloc_compl(ar);
if (ret) {
- ath10k_warn("could not start CE (%d)\n", ret);
- return ret;
+ ath10k_warn("failed to allocate CE completions: %d\n", ret);
+ goto err_early_irq;
+ }
+
+ ret = ath10k_pci_request_irq(ar);
+ if (ret) {
+ ath10k_warn("failed to post RX buffers for all pipes: %d\n",
+ ret);
+ goto err_free_compl;
+ }
+
+ ret = ath10k_pci_setup_ce_irq(ar);
+ if (ret) {
+ ath10k_warn("failed to setup CE interrupts: %d\n", ret);
+ goto err_stop;
}
/* Post buffers once to start things off. */
ret = ath10k_pci_post_rx(ar);
if (ret) {
- ath10k_warn("could not post rx pipes (%d)\n", ret);
- return ret;
+ ath10k_warn("failed to post RX buffers for all pipes: %d\n",
+ ret);
+ goto err_stop;
}
ar_pci->started = 1;
return 0;
+
+err_stop:
+ ath10k_ce_disable_interrupts(ar);
+ ath10k_pci_free_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_stop_ce(ar);
+ ath10k_pci_process_ce(ar);
+err_free_compl:
+ ath10k_pci_cleanup_ce(ar);
+err_early_irq:
+ /* Though there should be no interrupts (device was reset)
+ * power_down() expects the early IRQ to be installed as per the
+ * driver lifecycle. */
+ ret_early = ath10k_pci_request_early_irq(ar);
+ if (ret_early)
+ ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
+
+ return ret;
}
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
@@ -1271,6 +1418,13 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
* Indicate the completion to higer layer to free
* the buffer
*/
+
+ if (!netbuf) {
+ ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
+ ce_hdl->id);
+ continue;
+ }
+
ATH10K_SKB_CB(netbuf)->is_aborted = true;
ar_pci->msg_callbacks_current.tx_completion(ar,
netbuf,
@@ -1291,7 +1445,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int pipe_num;
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
struct ath10k_pci_pipe *pipe_info;
pipe_info = &ar_pci->pipe_info[pipe_num];
@@ -1306,7 +1460,7 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
struct ath10k_pci_pipe *pipe_info;
int pipe_num;
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
if (pipe_info->ce_hdl) {
ath10k_ce_deinit(pipe_info->ce_hdl);
@@ -1316,27 +1470,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
}
}
-static void ath10k_pci_disable_irqs(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
-
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- disable_irq(ar_pci->pdev->irq + i);
-}
-
static void ath10k_pci_hif_stop(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
- /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
- * by ath10k_pci_start_intr(). */
- ath10k_pci_disable_irqs(ar);
+ ret = ath10k_ce_disable_interrupts(ar);
+ if (ret)
+ ath10k_warn("failed to disable CE interrupts: %d\n", ret);
+ ath10k_pci_free_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
ath10k_pci_stop_ce(ar);
+ ret = ath10k_pci_request_early_irq(ar);
+ if (ret)
+ ath10k_warn("failed to re-enable early irq: %d\n", ret);
+
/* At this point, asynchronous threads are stopped, the target should
* not DMA nor interrupt. We process the leftovers and then free
* everything else up. */
@@ -1345,6 +1497,13 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_cleanup_ce(ar);
ath10k_pci_buffer_cleanup(ar);
+ /* Make the sure the device won't access any structures on the host by
+ * resetting it. The device was fed with PCI CE ringbuffer
+ * configuration during init. If ringbuffers are freed and the device
+ * were to access them this could lead to memory corruption on the
+ * host. */
+ ath10k_pci_device_reset(ar);
+
ar_pci->started = 0;
}
@@ -1363,6 +1522,8 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
void *treq, *tresp = NULL;
int ret = 0;
+ might_sleep();
+
if (resp && !resp_len)
return -EINVAL;
@@ -1403,14 +1564,12 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
if (ret)
goto err_resp;
- ret = wait_for_completion_timeout(&xfer.done,
- BMI_COMMUNICATION_TIMEOUT_HZ);
- if (ret <= 0) {
+ ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
+ if (ret) {
u32 unused_buffer;
unsigned int unused_nbytes;
unsigned int unused_id;
- ret = -ETIMEDOUT;
ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
&unused_nbytes, &unused_id);
} else {
@@ -1478,6 +1637,25 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
complete(&xfer->done);
}
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+ struct ath10k_ce_pipe *rx_pipe,
+ struct bmi_xfer *xfer)
+{
+ unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+
+ while (time_before_eq(jiffies, timeout)) {
+ ath10k_pci_bmi_send_done(tx_pipe);
+ ath10k_pci_bmi_recv_data(rx_pipe);
+
+ if (completion_done(&xfer->done))
+ return 0;
+
+ schedule();
+ }
+
+ return -ETIMEDOUT;
+}
+
/*
* Map from service/endpoint to Copy Engine.
* This table is derived from the CE_PCI TABLE, above.
@@ -1587,7 +1765,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
CORE_CTRL_ADDRESS,
&core_ctrl);
if (ret) {
- ath10k_warn("Unable to read core ctrl\n");
+ ath10k_warn("failed to read core_ctrl: %d\n", ret);
return ret;
}
@@ -1597,10 +1775,13 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
CORE_CTRL_ADDRESS,
core_ctrl);
- if (ret)
- ath10k_warn("Unable to set interrupt mask\n");
+ if (ret) {
+ ath10k_warn("failed to set target CPU interrupt mask: %d\n",
+ ret);
+ return ret;
+ }
- return ret;
+ return 0;
}
static int ath10k_pci_init_config(struct ath10k *ar)
@@ -1751,7 +1932,7 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
const struct ce_attr *attr;
int pipe_num;
- for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
pipe_info->pipe_num = pipe_num;
pipe_info->hif_ce_state = ar;
@@ -1759,7 +1940,7 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
if (pipe_info->ce_hdl == NULL) {
- ath10k_err("Unable to initialize CE for pipe: %d\n",
+ ath10k_err("failed to initialize CE for pipe: %d\n",
pipe_num);
/* It is safe to call it here. It checks if ce_hdl is
@@ -1768,31 +1949,18 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
return -1;
}
- if (pipe_num == ar_pci->ce_count - 1) {
+ if (pipe_num == CE_COUNT - 1) {
/*
* Reserve the ultimate CE for
* diagnostic Window support
*/
- ar_pci->ce_diag =
- ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
+ ar_pci->ce_diag = pipe_info->ce_hdl;
continue;
}
pipe_info->buf_sz = (size_t) (attr->src_sz_max);
}
- /*
- * Initially, establish CE completion handlers for use with BMI.
- * These are overwritten with generic handlers after we exit BMI phase.
- */
- pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
- ath10k_ce_send_cb_register(pipe_info->ce_hdl,
- ath10k_pci_bmi_send_done, 0);
-
- pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
- ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
- ath10k_pci_bmi_recv_data);
-
return 0;
}
@@ -1828,14 +1996,9 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ const char *irq_mode;
int ret;
- ret = ath10k_pci_start_intr(ar);
- if (ret) {
- ath10k_err("could not start interrupt handling (%d)\n", ret);
- goto err;
- }
-
/*
* Bring the target up cleanly.
*
@@ -1846,39 +2009,80 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
* is in an unexpected state. We try to catch that here in order to
* reset the Target and retry the probe.
*/
- ath10k_pci_device_reset(ar);
-
- ret = ath10k_pci_reset_target(ar);
- if (ret)
- goto err_irq;
+ ret = ath10k_pci_device_reset(ar);
+ if (ret) {
+ ath10k_err("failed to reset target: %d\n", ret);
+ goto err;
+ }
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
/* Force AWAKE forever */
ath10k_do_pci_wake(ar);
ret = ath10k_pci_ce_init(ar);
- if (ret)
+ if (ret) {
+ ath10k_err("failed to initialize CE: %d\n", ret);
goto err_ps;
+ }
- ret = ath10k_pci_init_config(ar);
- if (ret)
+ ret = ath10k_ce_disable_interrupts(ar);
+ if (ret) {
+ ath10k_err("failed to disable CE interrupts: %d\n", ret);
goto err_ce;
+ }
- ret = ath10k_pci_wake_target_cpu(ar);
+ ret = ath10k_pci_init_irq(ar);
if (ret) {
- ath10k_err("could not wake up target CPU (%d)\n", ret);
+ ath10k_err("failed to init irqs: %d\n", ret);
goto err_ce;
}
+ ret = ath10k_pci_request_early_irq(ar);
+ if (ret) {
+ ath10k_err("failed to request early irq: %d\n", ret);
+ goto err_deinit_irq;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_err("failed to wait for target to init: %d\n", ret);
+ goto err_free_early_irq;
+ }
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret) {
+ ath10k_err("failed to setup init config: %d\n", ret);
+ goto err_free_early_irq;
+ }
+
+ ret = ath10k_pci_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err("could not wake up target CPU: %d\n", ret);
+ goto err_free_early_irq;
+ }
+
+ if (ar_pci->num_msi_intrs > 1)
+ irq_mode = "MSI-X";
+ else if (ar_pci->num_msi_intrs == 1)
+ irq_mode = "MSI";
+ else
+ irq_mode = "legacy";
+
+ if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
+ ath10k_info("pci irq %s\n", irq_mode);
+
return 0;
+err_free_early_irq:
+ ath10k_pci_free_early_irq(ar);
+err_deinit_irq:
+ ath10k_pci_deinit_irq(ar);
err_ce:
ath10k_pci_ce_deinit(ar);
+ ath10k_pci_device_reset(ar);
err_ps:
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
-err_irq:
- ath10k_pci_stop_intr(ar);
err:
return ret;
}
@@ -1887,7 +2091,10 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ath10k_pci_stop_intr(ar);
+ ath10k_pci_free_early_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_deinit_irq(ar);
+ ath10k_pci_device_reset(ar);
ath10k_pci_ce_deinit(ar);
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
@@ -2023,25 +2230,10 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (ar_pci->num_msi_intrs == 0) {
- /*
- * IMPORTANT: INTR_CLR regiser has to be set after
- * INTR_ENABLE is set to 0, otherwise interrupt can not be
- * really cleared.
- */
- iowrite32(0, ar_pci->mem +
- (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
- iowrite32(PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL,
- ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_CLR_ADDRESS));
- /*
- * IMPORTANT: this extra read transaction is required to
- * flush the posted write buffer.
- */
- (void) ioread32(ar_pci->mem +
- (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
+ if (!ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
}
tasklet_schedule(&ar_pci->intr_tq);
@@ -2049,6 +2241,34 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static void ath10k_pci_early_irq_tasklet(unsigned long data)
+{
+ struct ath10k *ar = (struct ath10k *)data;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 fw_ind;
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn("failed to wake target in early irq tasklet: %d\n",
+ ret);
+ return;
+ }
+
+ fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
+ if (fw_ind & FW_IND_EVENT_PENDING) {
+ ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
+ fw_ind & ~FW_IND_EVENT_PENDING);
+
+ /* Some structures are unavailable during early boot or at
+ * driver teardown so just print that the device has crashed. */
+ ath10k_warn("device crashed - no diagnostics available\n");
+ }
+
+ ath10k_pci_sleep(ar);
+ ath10k_pci_enable_legacy_irq(ar);
+}
+
static void ath10k_pci_tasklet(unsigned long data)
{
struct ath10k *ar = (struct ath10k *)data;
@@ -2057,40 +2277,22 @@ static void ath10k_pci_tasklet(unsigned long data)
ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
ath10k_ce_per_engine_service_any(ar);
- if (ar_pci->num_msi_intrs == 0) {
- /* Enable Legacy PCI line interrupts */
- iowrite32(PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL,
- ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
- /*
- * IMPORTANT: this extra read transaction is required to
- * flush the posted write buffer
- */
- (void) ioread32(ar_pci->mem +
- (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
- }
+ /* Re-enable legacy irq that was disabled in the irq handler */
+ if (ar_pci->num_msi_intrs == 0)
+ ath10k_pci_enable_legacy_irq(ar);
}
-static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
+static int ath10k_pci_request_irq_msix(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ret;
- int i;
-
- ret = pci_enable_msi_block(ar_pci->pdev, num);
- if (ret)
- return ret;
+ int ret, i;
ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
ath10k_pci_msi_fw_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("request_irq(%d) failed %d\n",
+ ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
-
- pci_disable_msi(ar_pci->pdev);
return ret;
}
@@ -2099,44 +2301,38 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
ath10k_pci_per_engine_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("request_irq(%d) failed %d\n",
+ ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
ar_pci->pdev->irq + i, ret);
for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
free_irq(ar_pci->pdev->irq + i, ar);
free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
- pci_disable_msi(ar_pci->pdev);
return ret;
}
}
- ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
return 0;
}
-static int ath10k_pci_start_intr_msi(struct ath10k *ar)
+static int ath10k_pci_request_irq_msi(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
- ret = pci_enable_msi(ar_pci->pdev);
- if (ret < 0)
- return ret;
-
ret = request_irq(ar_pci->pdev->irq,
ath10k_pci_interrupt_handler,
IRQF_SHARED, "ath10k_pci", ar);
- if (ret < 0) {
- pci_disable_msi(ar_pci->pdev);
+ if (ret) {
+ ath10k_warn("failed to request MSI irq %d: %d\n",
+ ar_pci->pdev->irq, ret);
return ret;
}
- ath10k_info("MSI interrupt handling\n");
return 0;
}
-static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
+static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
@@ -2144,112 +2340,165 @@ static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
ret = request_irq(ar_pci->pdev->irq,
ath10k_pci_interrupt_handler,
IRQF_SHARED, "ath10k_pci", ar);
- if (ret < 0)
+ if (ret) {
+ ath10k_warn("failed to request legacy irq %d: %d\n",
+ ar_pci->pdev->irq, ret);
return ret;
+ }
- /*
- * Make sure to wake the Target before enabling Legacy
- * Interrupt.
- */
- iowrite32(PCIE_SOC_WAKE_V_MASK,
- ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
+ return 0;
+}
+
+static int ath10k_pci_request_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ath10k_pci_wait(ar);
+ switch (ar_pci->num_msi_intrs) {
+ case 0:
+ return ath10k_pci_request_irq_legacy(ar);
+ case 1:
+ return ath10k_pci_request_irq_msi(ar);
+ case MSI_NUM_REQUEST:
+ return ath10k_pci_request_irq_msix(ar);
+ }
- /*
- * A potential race occurs here: The CORE_BASE write
- * depends on target correctly decoding AXI address but
- * host won't know when target writes BAR to CORE_CTRL.
- * This write might get lost if target has NOT written BAR.
- * For now, fix the race by repeating the write in below
- * synchronization checking.
- */
- iowrite32(PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL,
- ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
- iowrite32(PCIE_SOC_WAKE_RESET,
- ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
-
- ath10k_info("legacy interrupt handling\n");
- return 0;
+ ath10k_warn("unknown irq configuration upon request\n");
+ return -EINVAL;
}
-static int ath10k_pci_start_intr(struct ath10k *ar)
+static void ath10k_pci_free_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ /* There's at least one interrupt irregardless whether its legacy INTR
+ * or MSI or MSI-X */
+ for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+ free_irq(ar_pci->pdev->irq + i, ar);
+}
+
+static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int num = MSI_NUM_REQUEST;
- int ret;
int i;
- tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
+ tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
- (unsigned long) ar);
+ (unsigned long)ar);
+ tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
+ (unsigned long)ar);
for (i = 0; i < CE_COUNT; i++) {
ar_pci->pipe_info[i].ar_pci = ar_pci;
- tasklet_init(&ar_pci->pipe_info[i].intr,
- ath10k_pci_ce_tasklet,
+ tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
(unsigned long)&ar_pci->pipe_info[i]);
}
+}
+
+static int ath10k_pci_init_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
+ ar_pci->features);
+ int ret;
- if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
- num = 1;
+ ath10k_pci_init_irq_tasklets(ar);
- if (num > 1) {
- ret = ath10k_pci_start_intr_msix(ar, num);
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
+ !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
+ ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
+
+ /* Try MSI-X */
+ if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
+ ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
+ ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
if (ret == 0)
- goto exit;
+ return 0;
+ if (ret > 0)
+ pci_disable_msi(ar_pci->pdev);
- ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
- num = 1;
+ /* fall-through */
}
- if (num == 1) {
- ret = ath10k_pci_start_intr_msi(ar);
+ /* Try MSI */
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
+ ar_pci->num_msi_intrs = 1;
+ ret = pci_enable_msi(ar_pci->pdev);
if (ret == 0)
- goto exit;
+ return 0;
- ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
- ret);
- num = 0;
+ /* fall-through */
}
- ret = ath10k_pci_start_intr_legacy(ar);
+ /* Try legacy irq
+ *
+ * A potential race occurs here: The CORE_BASE write
+ * depends on target correctly decoding AXI address but
+ * host won't know when target writes BAR to CORE_CTRL.
+ * This write might get lost if target has NOT written BAR.
+ * For now, fix the race by repeating the write in below
+ * synchronization checking. */
+ ar_pci->num_msi_intrs = 0;
-exit:
- ar_pci->num_msi_intrs = num;
- ar_pci->ce_count = CE_COUNT;
- return ret;
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn("failed to wake target: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+ ath10k_pci_sleep(ar);
+
+ return 0;
}
-static void ath10k_pci_stop_intr(struct ath10k *ar)
+static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
+ int ret;
- /* There's at least one interrupt irregardless whether its legacy INTR
- * or MSI or MSI-X */
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- free_irq(ar_pci->pdev->irq + i, ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn("failed to wake target: %d\n", ret);
+ return ret;
+ }
- if (ar_pci->num_msi_intrs > 0)
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ 0);
+ ath10k_pci_sleep(ar);
+
+ return 0;
+}
+
+static int ath10k_pci_deinit_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->num_msi_intrs) {
+ case 0:
+ return ath10k_pci_deinit_irq_legacy(ar);
+ case 1:
+ /* fall-through */
+ case MSI_NUM_REQUEST:
pci_disable_msi(ar_pci->pdev);
+ return 0;
+ }
+
+ ath10k_warn("unknown irq configuration upon deinit\n");
+ return -EINVAL;
}
-static int ath10k_pci_reset_target(struct ath10k *ar)
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int wait_limit = 300; /* 3 sec */
+ int ret;
- /* Wait for Target to finish initialization before we proceed. */
- iowrite32(PCIE_SOC_WAKE_V_MASK,
- ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
-
- ath10k_pci_wait(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_err("failed to wake up target: %d\n", ret);
+ return ret;
+ }
while (wait_limit-- &&
!(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
@@ -2264,34 +2513,26 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
}
if (wait_limit < 0) {
- ath10k_err("Target stalled\n");
- iowrite32(PCIE_SOC_WAKE_RESET,
- ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
- return -EIO;
+ ath10k_err("target stalled\n");
+ ret = -EIO;
+ goto out;
}
- iowrite32(PCIE_SOC_WAKE_RESET,
- ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
-
- return 0;
+out:
+ ath10k_pci_sleep(ar);
+ return ret;
}
-static void ath10k_pci_device_reset(struct ath10k *ar)
+static int ath10k_pci_device_reset(struct ath10k *ar)
{
- int i;
+ int i, ret;
u32 val;
- if (!SOC_GLOBAL_RESET_ADDRESS)
- return;
-
- ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
- PCIE_SOC_WAKE_V_MASK);
- for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (ath10k_pci_target_is_awake(ar))
- break;
- msleep(1);
+ ret = ath10k_do_pci_wake(ar);
+ if (ret) {
+ ath10k_err("failed to wake up target: %d\n",
+ ret);
+ return ret;
}
/* Put Target, including PCIe, into RESET. */
@@ -2317,7 +2558,8 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
msleep(1);
}
- ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+ ath10k_do_pci_sleep(ar);
+ return 0;
}
static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2374,7 +2616,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
if (!ar) {
- ath10k_err("ath10k_core_create failed!\n");
+ ath10k_err("failed to create driver core\n");
ret = -EINVAL;
goto err_ar_pci;
}
@@ -2393,20 +2635,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
*/
ret = pci_assign_resource(pdev, BAR_NUM);
if (ret) {
- ath10k_err("cannot assign PCI space: %d\n", ret);
+ ath10k_err("failed to assign PCI space: %d\n", ret);
goto err_ar;
}
ret = pci_enable_device(pdev);
if (ret) {
- ath10k_err("cannot enable PCI device: %d\n", ret);
+ ath10k_err("failed to enable PCI device: %d\n", ret);
goto err_ar;
}
/* Request MMIO resources */
ret = pci_request_region(pdev, BAR_NUM, "ath");
if (ret) {
- ath10k_err("PCI MMIO reservation error: %d\n", ret);
+ ath10k_err("failed to request MMIO region: %d\n", ret);
goto err_device;
}
@@ -2416,13 +2658,13 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
*/
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- ath10k_err("32-bit DMA not available: %d\n", ret);
+ ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
goto err_region;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- ath10k_err("cannot enable 32-bit consistent DMA\n");
+ ath10k_err("failed to set consistent DMA mask to 32-bit\n");
goto err_region;
}
@@ -2439,7 +2681,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
/* Arrange for access to Target SoC registers. */
mem = pci_iomap(pdev, BAR_NUM, 0);
if (!mem) {
- ath10k_err("PCI iomap error\n");
+ ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
ret = -EIO;
goto err_master;
}
@@ -2451,11 +2693,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ret = ath10k_do_pci_wake(ar);
if (ret) {
ath10k_err("Failed to get chip id: %d\n", ret);
- return ret;
+ goto err_iomap;
}
- chip_id = ath10k_pci_read32(ar,
- RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
+ chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
ath10k_do_pci_sleep(ar);
@@ -2463,7 +2704,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ret = ath10k_core_register(ar, chip_id);
if (ret) {
- ath10k_err("could not register driver core (%d)\n", ret);
+ ath10k_err("failed to register driver core: %d\n", ret);
goto err_iomap;
}
@@ -2529,7 +2770,7 @@ static int __init ath10k_pci_init(void)
ret = pci_register_driver(&ath10k_pci_driver);
if (ret)
- ath10k_err("pci_register_driver failed [%d]\n", ret);
+ ath10k_err("failed to register PCI driver: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 52fb7b973571..a4f32038c440 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -198,9 +198,7 @@ struct ath10k_pci {
struct tasklet_struct intr_tq;
struct tasklet_struct msi_fw_err;
-
- /* Number of Copy Engines supported */
- unsigned int ce_count;
+ struct tasklet_struct early_irq_tasklet;
int started;
@@ -318,6 +316,16 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return ioread32(ar_pci->mem + offset);
}
+static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
+}
+
int ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 90817ddc92ba..4eb2ecbc06ef 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -182,6 +182,27 @@ TRACE_EVENT(ath10k_htt_stats,
)
);
+TRACE_EVENT(ath10k_wmi_dbglog,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "len %zu",
+ __entry->buf_len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 5ae373a1e294..74f45fa6f428 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -75,6 +75,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
@@ -183,7 +184,7 @@ static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
TODO check this */
mcs = (info2 >> 4) & 0x0F;
- nss = (info1 >> 10) & 0x07;
+ nss = ((info1 >> 10) & 0x07) + 1;
bw = info1 & 3;
sgi = info2 & 1;
@@ -230,12 +231,15 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
~IEEE80211_FCTL_PROTECTED);
}
- if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
+ if (info->mic_err)
status->flag |= RX_FLAG_MMIC_ERROR;
if (info->fcs_err)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (info->amsdu_more)
+ status->flag |= RX_FLAG_AMSDU_MORE;
+
status->signal = info->signal;
spin_lock_bh(&ar->data_lock);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index ccf3597fd9e2..712a606a080a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -16,6 +16,7 @@
*/
#include <linux/skbuff.h>
+#include <linux/ctype.h>
#include "core.h"
#include "htc.h"
@@ -674,10 +675,8 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
/* Send the management frame buffer to the target */
ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
- if (ret) {
- dev_kfree_skb_any(skb);
+ if (ret)
return ret;
- }
/* TODO: report tx status to mac80211 - temporary just ACK */
info->flags |= IEEE80211_TX_STAT_ACK;
@@ -877,6 +876,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
struct wmi_mgmt_rx_event_v2 *ev_v2;
struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_channel *ch;
struct ieee80211_hdr *hdr;
u32 rx_status;
u32 channel;
@@ -909,6 +909,11 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ATH10K_DBG_MGMT,
"event mgmt rx status %08x\n", rx_status);
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
dev_kfree_skb(skb);
return 0;
@@ -924,7 +929,25 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (rx_status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- status->band = phy_mode_to_band(phy_mode);
+ /* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to
+ * MODE_11B. This means phy_mode is not a reliable source for the band
+ * of mgmt rx. */
+
+ ch = ar->scan_channel;
+ if (!ch)
+ ch = ar->rx_channel;
+
+ if (ch) {
+ status->band = ch->band;
+
+ if (phy_mode == MODE_11B &&
+ status->band == IEEE80211_BAND_5GHZ)
+ ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+ } else {
+ ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n");
+ status->band = phy_mode_to_band(phy_mode);
+ }
+
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
@@ -934,7 +957,11 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
- if (fc & IEEE80211_FCTL_PROTECTED) {
+ /* FW delivers WEP Shared Auth frame with Protected Bit set and
+ * encrypted payload. However in case of PMF it delivers decrypted
+ * frames with Protected Bit set. */
+ if (ieee80211_has_protected(hdr->frame_control) &&
+ !ieee80211_is_auth(hdr->frame_control)) {
status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
hdr->frame_control = __cpu_to_le16(fc &
@@ -1044,9 +1071,14 @@ static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
}
-static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n");
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
+ skb->len);
+
+ trace_ath10k_wmi_dbglog(skb->data, skb->len);
+
+ return 0;
}
static void ath10k_wmi_event_update_stats(struct ath10k *ar,
@@ -1383,9 +1415,259 @@ static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
}
+static void ath10k_dfs_radar_report(struct ath10k *ar,
+ struct wmi_single_phyerr_rx_event *event,
+ struct phyerr_radar_report *rr,
+ u64 tsf)
+{
+ u32 reg0, reg1, tsf32l;
+ struct pulse_event pe;
+ u64 tsf64;
+ u8 rssi, width;
+
+ reg0 = __le32_to_cpu(rr->reg0);
+ reg1 = __le32_to_cpu(rr->reg1);
+
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
+ MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
+ MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
+
+ if (!ar->dfs_detector)
+ return;
+
+ /* report event to DFS pattern detector */
+ tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp);
+ tsf64 = tsf & (~0xFFFFFFFFULL);
+ tsf64 |= tsf32l;
+
+ width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
+ rssi = event->hdr.rssi_combined;
+
+ /* hardware store this as 8 bit signed value,
+ * set to zero if negative number
+ */
+ if (rssi & 0x80)
+ rssi = 0;
+
+ pe.ts = tsf64;
+ pe.freq = ar->hw->conf.chandef.chan->center_freq;
+ pe.width = width;
+ pe.rssi = rssi;
+
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
+ pe.freq, pe.width, pe.rssi, pe.ts);
+
+ ATH10K_DFS_STAT_INC(ar, pulses_detected);
+
+ if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "dfs no pulse pattern detected, yet\n");
+ return;
+ }
+
+ ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n");
+ ATH10K_DFS_STAT_INC(ar, radar_detected);
+
+ /* Control radar events reporting in debugfs file
+ dfs_block_radar_events */
+ if (ar->dfs_block_radar_events) {
+ ath10k_info("DFS Radar detected, but ignored as requested\n");
+ return;
+ }
+
+ ieee80211_radar_detected(ar->hw);
+}
+
+static int ath10k_dfs_fft_report(struct ath10k *ar,
+ struct wmi_single_phyerr_rx_event *event,
+ struct phyerr_fft_report *fftr,
+ u64 tsf)
+{
+ u32 reg0, reg1;
+ u8 rssi, peak_mag;
+
+ reg0 = __le32_to_cpu(fftr->reg0);
+ reg1 = __le32_to_cpu(fftr->reg1);
+ rssi = event->hdr.rssi_combined;
+
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
+ MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
+ MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
+
+ peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
+
+ /* false event detection */
+ if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
+ peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
+ ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
+ ATH10K_DFS_STAT_INC(ar, pulses_discarded);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath10k_wmi_event_dfs(struct ath10k *ar,
+ struct wmi_single_phyerr_rx_event *event,
+ u64 tsf)
+{
+ int buf_len, tlv_len, res, i = 0;
+ struct phyerr_tlv *tlv;
+ struct phyerr_radar_report *rr;
+ struct phyerr_fft_report *fftr;
+ u8 *tlv_buf;
+
+ buf_len = __le32_to_cpu(event->hdr.buf_len);
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
+ event->hdr.phy_err_code, event->hdr.rssi_combined,
+ __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
+
+ /* Skip event if DFS disabled */
+ if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
+ return;
+
+ ATH10K_DFS_STAT_INC(ar, pulses_total);
+
+ while (i < buf_len) {
+ if (i + sizeof(*tlv) > buf_len) {
+ ath10k_warn("too short buf for tlv header (%d)\n", i);
+ return;
+ }
+
+ tlv = (struct phyerr_tlv *)&event->bufp[i];
+ tlv_len = __le16_to_cpu(tlv->len);
+ tlv_buf = &event->bufp[i + sizeof(*tlv)];
+ ath10k_dbg(ATH10K_DBG_REGULATORY,
+ "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
+ tlv_len, tlv->tag, tlv->sig);
+
+ switch (tlv->tag) {
+ case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
+ if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
+ ath10k_warn("too short radar pulse summary (%d)\n",
+ i);
+ return;
+ }
+
+ rr = (struct phyerr_radar_report *)tlv_buf;
+ ath10k_dfs_radar_report(ar, event, rr, tsf);
+ break;
+ case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
+ if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
+ ath10k_warn("too short fft report (%d)\n", i);
+ return;
+ }
+
+ fftr = (struct phyerr_fft_report *)tlv_buf;
+ res = ath10k_dfs_fft_report(ar, event, fftr, tsf);
+ if (res)
+ return;
+ break;
+ }
+
+ i += sizeof(*tlv) + tlv_len;
+ }
+}
+
+static void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+ struct wmi_single_phyerr_rx_event *event,
+ u64 tsf)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi event spectral scan\n");
+}
+
static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n");
+ struct wmi_comb_phyerr_rx_event *comb_event;
+ struct wmi_single_phyerr_rx_event *event;
+ u32 count, i, buf_len, phy_err_code;
+ u64 tsf;
+ int left_len = skb->len;
+
+ ATH10K_DFS_STAT_INC(ar, phy_errors);
+
+ /* Check if combined event available */
+ if (left_len < sizeof(*comb_event)) {
+ ath10k_warn("wmi phyerr combined event wrong len\n");
+ return;
+ }
+
+ left_len -= sizeof(*comb_event);
+
+ /* Check number of included events */
+ comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data;
+ count = __le32_to_cpu(comb_event->hdr.num_phyerr_events);
+
+ tsf = __le32_to_cpu(comb_event->hdr.tsf_u32);
+ tsf <<= 32;
+ tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi event phyerr count %d tsf64 0x%llX\n",
+ count, tsf);
+
+ event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp;
+ for (i = 0; i < count; i++) {
+ /* Check if we can read event header */
+ if (left_len < sizeof(*event)) {
+ ath10k_warn("single event (%d) wrong head len\n", i);
+ return;
+ }
+
+ left_len -= sizeof(*event);
+
+ buf_len = __le32_to_cpu(event->hdr.buf_len);
+ phy_err_code = event->hdr.phy_err_code;
+
+ if (left_len < buf_len) {
+ ath10k_warn("single event (%d) wrong buf len\n", i);
+ return;
+ }
+
+ left_len -= buf_len;
+
+ switch (phy_err_code) {
+ case PHY_ERROR_RADAR:
+ ath10k_wmi_event_dfs(ar, event, tsf);
+ break;
+ case PHY_ERROR_SPECTRAL_SCAN:
+ ath10k_wmi_event_spectral_scan(ar, event, tsf);
+ break;
+ case PHY_ERROR_FALSE_RADAR_EXT:
+ ath10k_wmi_event_dfs(ar, event, tsf);
+ ath10k_wmi_event_spectral_scan(ar, event, tsf);
+ break;
+ default:
+ break;
+ }
+
+ event += sizeof(*event) + buf_len;
+ }
}
static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
@@ -1400,9 +1682,37 @@ static void ath10k_wmi_event_profile_match(struct ath10k *ar,
}
static void ath10k_wmi_event_debug_print(struct ath10k *ar,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n");
+ char buf[101], c;
+ int i;
+
+ for (i = 0; i < sizeof(buf) - 1; i++) {
+ if (i >= skb->len)
+ break;
+
+ c = skb->data[i];
+
+ if (c == '\0')
+ break;
+
+ if (isascii(c) && isprint(c))
+ buf[i] = c;
+ else
+ buf[i] = '.';
+ }
+
+ if (i == sizeof(buf) - 1)
+ ath10k_warn("wmi debug print truncated: %d\n", skb->len);
+
+ /* for some reason the debug prints end with \n, remove that */
+ if (skb->data[i - 1] == '\n')
+ i--;
+
+ /* the last byte is always reserved for the null character */
+ buf[i] = '\0';
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
}
static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
@@ -2062,6 +2372,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
{
struct wmi_set_channel_cmd *cmd;
struct sk_buff *skb;
+ u32 ch_flags = 0;
if (arg->passive)
return -EINVAL;
@@ -2070,10 +2381,14 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
if (!skb)
return -ENOMEM;
+ if (arg->chan_radar)
+ ch_flags |= WMI_CHAN_FLAG_DFS;
+
cmd = (struct wmi_set_channel_cmd *)skb->data;
cmd->chan.mhz = __cpu_to_le32(arg->freq);
cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
cmd->chan.mode = arg->mode;
+ cmd->chan.flags |= __cpu_to_le32(ch_flags);
cmd->chan.min_power = arg->min_power;
cmd->chan.max_power = arg->max_power;
cmd->chan.reg_power = arg->max_reg_power;
@@ -2211,7 +2526,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
}
ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
- __cpu_to_le32(ar->wmi.num_mem_chunks));
+ ar->wmi.num_mem_chunks);
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
@@ -2224,10 +2539,10 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi chunk %d len %d requested, addr 0x%x\n",
+ "wmi chunk %d len %d requested, addr 0x%llx\n",
i,
- cmd->host_mem_chunks[i].size,
- cmd->host_mem_chunks[i].ptr);
+ ar->wmi.mem_chunks[i].len,
+ (unsigned long long)ar->wmi.mem_chunks[i].paddr);
}
out:
memcpy(&cmd->resource_config, &config, sizeof(config));
@@ -2302,7 +2617,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
}
ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
- __cpu_to_le32(ar->wmi.num_mem_chunks));
+ ar->wmi.num_mem_chunks);
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
@@ -2315,10 +2630,10 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi chunk %d len %d requested, addr 0x%x\n",
+ "wmi chunk %d len %d requested, addr 0x%llx\n",
i,
- cmd->host_mem_chunks[i].size,
- cmd->host_mem_chunks[i].ptr);
+ ar->wmi.mem_chunks[i].len,
+ (unsigned long long)ar->wmi.mem_chunks[i].paddr);
}
out:
memcpy(&cmd->resource_config, &config, sizeof(config));
@@ -2622,6 +2937,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
struct sk_buff *skb;
const char *cmdname;
u32 flags = 0;
+ u32 ch_flags = 0;
if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
@@ -2648,6 +2964,8 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
flags |= WMI_VDEV_START_HIDDEN_SSID;
if (arg->pmf_enabled)
flags |= WMI_VDEV_START_PMF_ENABLED;
+ if (arg->channel.chan_radar)
+ ch_flags |= WMI_CHAN_FLAG_DFS;
cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
@@ -2669,6 +2987,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
__cpu_to_le32(arg->channel.band_center_freq1);
cmd->chan.mode = arg->channel.mode;
+ cmd->chan.flags |= __cpu_to_le32(ch_flags);
cmd->chan.min_power = arg->channel.min_power;
cmd->chan.max_power = arg->channel.max_power;
cmd->chan.reg_power = arg->channel.max_reg_power;
@@ -2676,9 +2995,10 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
cmd->chan.antenna_max = arg->channel.max_antenna_gain;
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X,"
- "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq,
- arg->channel.mode, flags, arg->channel.max_power);
+ "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, "
+ "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id,
+ flags, arg->channel.freq, arg->channel.mode,
+ cmd->chan.flags, arg->channel.max_power);
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
@@ -3012,6 +3332,8 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
flags |= WMI_CHAN_FLAG_ALLOW_VHT;
if (ch->ht40plus)
flags |= WMI_CHAN_FLAG_HT40_PLUS;
+ if (ch->chan_radar)
+ flags |= WMI_CHAN_FLAG_DFS;
ci->mhz = __cpu_to_le32(ch->freq);
ci->band_center_freq1 = __cpu_to_le32(ch->freq);
@@ -3094,6 +3416,7 @@ int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
{
struct wmi_bcn_tx_cmd *cmd;
struct sk_buff *skb;
+ int ret;
skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
if (!skb)
@@ -3106,7 +3429,11 @@ int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
- return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
+ if (ret)
+ dev_kfree_skb(skb);
+
+ return ret;
}
static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
@@ -3175,3 +3502,40 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
type, delay_ms);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
}
+
+int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
+{
+ struct wmi_dbglog_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ u32 cfg;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
+
+ if (module_enable) {
+ cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ } else {
+ /* set back defaults, all modules with WARN level */
+ cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ module_enable = ~0;
+ }
+
+ cmd->module_enable = __cpu_to_le32(module_enable);
+ cmd->module_valid = __cpu_to_le32(~0);
+ cmd->config_enable = __cpu_to_le32(cfg);
+ cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
+ __le32_to_cpu(cmd->module_enable),
+ __le32_to_cpu(cmd->module_valid),
+ __le32_to_cpu(cmd->config_enable),
+ __le32_to_cpu(cmd->config_valid));
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 78c991aec7f9..4b5e7d3d32b6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -893,6 +893,7 @@ struct wmi_channel {
union {
__le32 reginfo0;
struct {
+ /* note: power unit is 0.5 dBm */
u8 min_power;
u8 max_power;
u8 reg_power;
@@ -915,7 +916,8 @@ struct wmi_channel_arg {
bool allow_ht;
bool allow_vht;
bool ht40plus;
- /* note: power unit is 1/4th of dBm */
+ bool chan_radar;
+ /* note: power unit is 0.5 dBm */
u32 min_power;
u32 max_power;
u32 max_reg_power;
@@ -1977,6 +1979,10 @@ struct wmi_mgmt_rx_event_v2 {
#define WMI_RX_STATUS_ERR_MIC 0x10
#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+#define PHY_ERROR_SPECTRAL_SCAN 0x26
+#define PHY_ERROR_FALSE_RADAR_EXT 0x24
+#define PHY_ERROR_RADAR 0x05
+
struct wmi_single_phyerr_rx_hdr {
/* TSF timestamp */
__le32 tsf_timestamp;
@@ -2068,6 +2074,87 @@ struct wmi_comb_phyerr_rx_event {
u8 bufp[0];
} __packed;
+#define PHYERR_TLV_SIG 0xBB
+#define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB
+#define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8
+
+struct phyerr_radar_report {
+ __le32 reg0; /* RADAR_REPORT_REG0_* */
+ __le32 reg1; /* REDAR_REPORT_REG1_* */
+} __packed;
+
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK 0x80000000
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_LSB 31
+
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_MASK 0x40000000
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_LSB 30
+
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_MASK 0x3FF00000
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_LSB 20
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_MASK 0x000F0000
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_LSB 16
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_MASK 0x0000FC00
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_LSB 10
+
+#define RADAR_REPORT_REG0_PULSE_SIDX_MASK 0x000003FF
+#define RADAR_REPORT_REG0_PULSE_SIDX_LSB 0
+
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_MASK 0x80000000
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_LSB 31
+
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_MASK 0x7F000000
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_LSB 24
+
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_MASK 0x00FF0000
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_LSB 16
+
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_MASK 0x0000FF00
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_LSB 8
+
+#define RADAR_REPORT_REG1_PULSE_DUR_MASK 0x000000FF
+#define RADAR_REPORT_REG1_PULSE_DUR_LSB 0
+
+struct phyerr_fft_report {
+ __le32 reg0; /* SEARCH_FFT_REPORT_REG0_ * */
+ __le32 reg1; /* SEARCH_FFT_REPORT_REG1_ * */
+} __packed;
+
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_MASK 0xFF800000
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_LSB 23
+
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_MASK 0x007FC000
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_LSB 14
+
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_MASK 0x00003000
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_LSB 12
+
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_MASK 0x00000FFF
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_LSB 0
+
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_MASK 0xFC000000
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_LSB 26
+
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_MASK 0x03FC0000
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_LSB 18
+
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_MASK 0x0003FF00
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_LSB 8
+
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK 0x000000FF
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB 0
+
+
+struct phyerr_tlv {
+ __le16 len;
+ u8 tag;
+ u8 sig;
+} __packed;
+
+#define DFS_RSSI_POSSIBLY_FALSE 50
+#define DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE 40
+
struct wmi_mgmt_tx_hdr {
__le32 vdev_id;
struct wmi_mac_addr peer_macaddr;
@@ -2233,7 +2320,12 @@ enum wmi_pdev_param {
* 0: no protection 1:use CTS-to-self 2: use RTS/CTS
*/
WMI_PDEV_PARAM_PROTECTION_MODE,
- /* Dynamic bandwidth 0: disable 1: enable */
+ /*
+ * Dynamic bandwidth - 0: disable, 1: enable
+ *
+ * When enabled HW rate control tries different bandwidths when
+ * retransmitting frames.
+ */
WMI_PDEV_PARAM_DYNAMIC_BW,
/* Non aggregrate/ 11g sw retry threshold.0-disable */
WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
@@ -2911,6 +3003,18 @@ struct wmi_vdev_install_key_arg {
const void *key_data;
};
+/*
+ * vdev fixed rate format:
+ * - preamble - b7:b6 - see WMI_RATE_PREMABLE_
+ * - nss - b5:b4 - ss number (0 mean 1ss)
+ * - rate_mcs - b3:b0 - as below
+ * CCK: 0 - 11Mbps, 1 - 5,5Mbps, 2 - 2Mbps, 3 - 1Mbps,
+ * 4 - 11Mbps (s), 5 - 5,5Mbps (s), 6 - 2Mbps (s)
+ * OFDM: 0 - 48Mbps, 1 - 24Mbps, 2 - 12Mbps, 3 - 6Mbps,
+ * 4 - 54Mbps, 5 - 36Mbps, 6 - 18Mbps, 7 - 9Mbps
+ * HT/VHT: MCS index
+ */
+
/* Preamble types to be used with VDEV fixed rate configuration */
enum wmi_rate_preamble {
WMI_RATE_PREAMBLE_OFDM,
@@ -3998,6 +4102,54 @@ struct wmi_force_fw_hang_cmd {
__le32 delay_ms;
} __packed;
+enum ath10k_dbglog_level {
+ ATH10K_DBGLOG_LEVEL_VERBOSE = 0,
+ ATH10K_DBGLOG_LEVEL_INFO = 1,
+ ATH10K_DBGLOG_LEVEL_WARN = 2,
+ ATH10K_DBGLOG_LEVEL_ERR = 3,
+};
+
+/* VAP ids to enable dbglog */
+#define ATH10K_DBGLOG_CFG_VAP_LOG_LSB 0
+#define ATH10K_DBGLOG_CFG_VAP_LOG_MASK 0x0000ffff
+
+/* to enable dbglog in the firmware */
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_LSB 16
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_MASK 0x00010000
+
+/* timestamp resolution */
+#define ATH10K_DBGLOG_CFG_RESOLUTION_LSB 17
+#define ATH10K_DBGLOG_CFG_RESOLUTION_MASK 0x000E0000
+
+/* number of queued messages before sending them to the host */
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_LSB 20
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_MASK 0x0ff00000
+
+/*
+ * Log levels to enable. This defines the minimum level to enable, this is
+ * not a bitmask. See enum ath10k_dbglog_level for the values.
+ */
+#define ATH10K_DBGLOG_CFG_LOG_LVL_LSB 28
+#define ATH10K_DBGLOG_CFG_LOG_LVL_MASK 0x70000000
+
+/*
+ * Note: this is a cleaned up version of a struct firmware uses. For
+ * example, config_valid was hidden inside an array.
+ */
+struct wmi_dbglog_cfg_cmd {
+ /* bitmask to hold mod id config*/
+ __le32 module_enable;
+
+ /* see ATH10K_DBGLOG_CFG_ */
+ __le32 config_enable;
+
+ /* mask of module id bits to be changed */
+ __le32 module_valid;
+
+ /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+ __le32 config_valid;
+} __packed;
+
#define ATH10K_RTS_MAX 2347
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
@@ -4075,5 +4227,6 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms);
int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 69f58b073e85..ef35da84f63b 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1238,14 +1238,11 @@ static void
ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
struct ieee80211_rx_status *rxs)
{
- struct ath_common *common = ath5k_hw_common(ah);
u64 tsf, bc_tstamp;
u32 hw_tu;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- if (ieee80211_is_beacon(mgmt->frame_control) &&
- le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
- ether_addr_equal(mgmt->bssid, common->curbssid)) {
+ if (le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS) {
/*
* Received an IBSS beacon with the same BSSID. Hardware *must*
* have updated the local TSF. We have to work around various
@@ -1301,23 +1298,6 @@ ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
}
}
-static void
-ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi)
-{
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- struct ath_common *common = ath5k_hw_common(ah);
-
- /* only beacons from our BSSID */
- if (!ieee80211_is_beacon(mgmt->frame_control) ||
- !ether_addr_equal(mgmt->bssid, common->curbssid))
- return;
-
- ewma_add(&ah->ah_beacon_rssi_avg, rssi);
-
- /* in IBSS mode we should keep RSSI statistics per neighbour */
- /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
-}
-
/*
* Compute padding position. skb must contain an IEEE 802.11 frame
*/
@@ -1390,6 +1370,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
struct ath5k_rx_status *rs)
{
struct ieee80211_rx_status *rxs;
+ struct ath_common *common = ath5k_hw_common(ah);
ath5k_remove_padding(skb);
@@ -1442,11 +1423,13 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
trace_ath5k_rx(ah, skb);
- ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi);
+ if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
+ ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
- /* check beacons in IBSS mode */
- if (ah->opmode == NL80211_IFTYPE_ADHOC)
- ath5k_check_ibss_tsf(ah, skb, rxs);
+ /* check beacons in IBSS mode */
+ if (ah->opmode == NL80211_IFTYPE_ADHOC)
+ ath5k_check_ibss_tsf(ah, skb, rxs);
+ }
ieee80211_rx(ah->hw, skb);
}
@@ -2549,7 +2532,6 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
hw->wiphy->available_antennas_rx = 0x3;
hw->extra_tx_headroom = 2;
- hw->channel_change_time = 5000;
/*
* Mark the device as detached to avoid processing
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index ba200b24be64..e6c52f7c26e7 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -616,7 +616,16 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
* SISRs will also clear PISR so no need to worry here.
*/
- pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
+ /* XXX: There seems to be an issue on some cards
+ * with tx interrupt flags not being updated
+ * on PISR despite that all Tx interrupt bits
+ * are cleared on SISRs. Since we handle all
+ * Tx queues all together it shouldn't be an
+ * issue if we clear Tx interrupt flags also
+ * on PISR to avoid that.
+ */
+ pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) |
+ (pisr & AR5K_INT_TX_ALL);
/*
* Write to clear them...
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index d6bc7cb61bfb..1a2973b7acf2 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -110,7 +110,7 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
if (ah->ah_version == AR5K_AR5210) {
- srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf;
+ srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf;
ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
} else {
srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 2437ad26949d..fd4c89df67e1 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1109,7 +1109,9 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
(mode == WMI_11G_HT20) ?
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
+ mutex_lock(&vif->wdev.mtx);
cfg80211_ch_switch_notify(vif->ndev, &chandef);
+ mutex_unlock(&vif->wdev.mtx);
}
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -3169,12 +3171,15 @@ static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
}
static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
- struct ieee80211_channel *chan, bool offchan,
- unsigned int wait, const u8 *buf, size_t len,
- bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
{
struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
struct ath6kl *ar = ath6kl_priv(vif->ndev);
+ struct ieee80211_channel *chan = params->chan;
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ unsigned int wait = params->wait;
+ bool no_cck = params->no_cck;
u32 id, freq;
const struct ieee80211_mgmt *mgmt;
bool more_data, queued;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 546d5da0b894..4f16d79c9eb1 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -2754,9 +2754,9 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
mask->control[band].legacy << 4;
/* copy mcs rate mask */
- mcsrate = mask->control[band].mcs[1];
+ mcsrate = mask->control[band].ht_mcs[1];
mcsrate <<= 8;
- mcsrate |= mask->control[band].mcs[0];
+ mcsrate |= mask->control[band].ht_mcs[0];
ratemask[band] |= mcsrate << 12;
ratemask[band] |= mcsrate << 28;
}
@@ -2806,7 +2806,7 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
mask->control[band].legacy << 4;
/* copy mcs rate mask */
- mcsrate = mask->control[band].mcs[0];
+ mcsrate = mask->control[band].ht_mcs[0];
ratemask[band] |= mcsrate << 12;
ratemask[band] |= mcsrate << 20;
}
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 32f139e2e897..7b96b3e5712d 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -65,6 +65,14 @@ config ATH9K_DEBUGFS
Also required for changing debug message flags at run time.
+config ATH9K_STATION_STATISTICS
+ bool "Detailed station statistics"
+ depends on ATH9K && ATH9K_DEBUGFS && DEBUG_FS
+ select MAC80211_DEBUGFS
+ default n
+ ---help---
+ This option enables detailed statistics for association stations.
+
config ATH9K_DFS_CERTIFIED
bool "Atheros DFS support for certified platforms"
depends on ATH9K && CFG80211_CERTIFICATION_ONUS
@@ -86,7 +94,7 @@ config ATH9K_DFS_CERTIFIED
config ATH9K_TX99
bool "Atheros ath9k TX99 testing support"
- depends on CFG80211_CERTIFICATION_ONUS
+ depends on ATH9K_DEBUGFS && CFG80211_CERTIFICATION_ONUS
default n
---help---
Say N. This should only be enabled on systems undergoing
@@ -104,6 +112,14 @@ config ATH9K_TX99
be evaluated to meet the RF exposure limits set forth in the
governmental SAR regulations.
+config ATH9K_WOW
+ bool "Wake on Wireless LAN support (EXPERIMENTAL)"
+ depends on ATH9K && PM
+ default n
+ ---help---
+ This option enables Wake on Wireless LAN support for certain cards.
+ Currently, AR9462 is supported.
+
config ATH9K_LEGACY_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 6205ef5a9321..a40e5c5d7418 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -11,11 +11,15 @@ ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
-ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
-ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
- dfs.o
-ath9k-$(CONFIG_PM_SLEEP) += wow.o
+ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
+ath9k-$(CONFIG_ATH9K_TX99) += tx99.o
+ath9k-$(CONFIG_ATH9K_WOW) += wow.o
+
+ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o \
+ spectral.o
+
+ath9k-$(CONFIG_ATH9K_STATION_STATISTICS) += debug_sta.o
obj-$(CONFIG_ATH9K) += ath9k.o
@@ -41,6 +45,8 @@ ath9k_hw-y:= \
ar9003_eeprom.o \
ar9003_paprd.o
+ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
+
ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
ar9003_mci.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index bd048cc69a33..a3668433dc02 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -724,14 +724,14 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
struct ath_ant_comb *antcomb = &sc->ant_comb;
int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
int curr_main_set;
- int main_rssi = rs->rs_rssi_ctl0;
- int alt_rssi = rs->rs_rssi_ctl1;
+ int main_rssi = rs->rs_rssi_ctl[0];
+ int alt_rssi = rs->rs_rssi_ctl[1];
int rx_ant_conf, main_ant_conf;
bool short_scan = false, ret;
- rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
+ rx_ant_conf = (rs->rs_rssi_ctl[2] >> ATH_ANT_RX_CURRENT_SHIFT) &
ATH_ANT_RX_MASK;
- main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
+ main_ant_conf = (rs->rs_rssi_ctl[2] >> ATH_ANT_RX_MAIN_SHIFT) &
ATH_ANT_RX_MASK;
if (alt_rssi >= antcomb->low_rssi_thresh) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 5c95fd9e9c9e..d480d2f3e185 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -32,12 +32,8 @@ static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
return 0;
}
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_off_L1_9280);
- else
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_always_on_L1_9280);
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_always_on_L1_9280);
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1);
@@ -387,6 +383,20 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
}
}
+static void ar9002_hw_init_hang_checks(struct ath_hw *ah)
+{
+ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+ ah->config.hw_hang_checks |= HW_BB_RIFS_HANG;
+ ah->config.hw_hang_checks |= HW_BB_DFS_HANG;
+ }
+
+ if (AR_SREV_9280(ah))
+ ah->config.hw_hang_checks |= HW_BB_RX_CLEAR_STUCK_HANG;
+
+ if (AR_SREV_5416(ah) || AR_SREV_9100(ah) || AR_SREV_9160(ah))
+ ah->config.hw_hang_checks |= HW_MAC_HANG;
+}
+
/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
int ar9002_hw_attach_ops(struct ath_hw *ah)
{
@@ -399,6 +409,7 @@ int ar9002_hw_attach_ops(struct ath_hw *ah)
return ret;
priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
+ priv_ops->init_hang_checks = ar9002_hw_init_hang_checks;
ops->config_pci_powersave = ar9002_hw_configpcipowersave;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index a366d6b4626f..741b38ddcb37 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -29,7 +29,8 @@ static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
((struct ath_desc*) ds)->ds_link = ds_link;
}
-static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p)
{
u32 isr = 0;
u32 mask2 = 0;
@@ -170,7 +171,8 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
return true;
if (sync_cause) {
- ath9k_debug_sync_cause(common, sync_cause);
+ if (sync_cause_p)
+ *sync_cause_p = sync_cause;
fatal_int =
(sync_cause &
(AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index f087117b2e6b..9a2afa2c690b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -201,7 +201,6 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
ath9k_hw_get_channel_centers(ah, chan, &centers);
freq = centers.synth_center;
- ah->config.spurmode = SPUR_ENABLE_EEPROM;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 7546b9a7dcbf..0a6163e9248c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -303,7 +303,7 @@ static const u32 ar9300_2p2_mac_postamble[][5] = {
{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
{0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
{0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x00008120, 0x18f04800, 0x18f04800, 0x18f04810, 0x18f04810},
{0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
{0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
};
@@ -352,7 +352,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+ {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
@@ -378,9 +378,9 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009814, 0x9280c00a},
{0x00009818, 0x00000000},
{0x0000981c, 0x00020028},
- {0x00009834, 0x6400a290},
+ {0x00009834, 0x6400a190},
{0x00009838, 0x0108ecff},
- {0x0000983c, 0x0d000600},
+ {0x0000983c, 0x14000600},
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
@@ -401,7 +401,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009d04, 0x40206c10},
{0x00009d08, 0x009c4060},
{0x00009d0c, 0x9883800a},
- {0x00009d10, 0x01834061},
+ {0x00009d10, 0x01884061},
{0x00009d14, 0x00c0040b},
{0x00009d18, 0x00000000},
{0x00009e08, 0x0038230c},
@@ -459,7 +459,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a3e8, 0x20202020},
{0x0000a3ec, 0x20202020},
{0x0000a3f0, 0x00000000},
- {0x0000a3f4, 0x00000246},
+ {0x0000a3f4, 0x00000000},
{0x0000a3f8, 0x0c9bd380},
{0x0000a3fc, 0x000f0f01},
{0x0000a400, 0x8fa91f01},
@@ -534,107 +534,107 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
- {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
- {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
- {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
- {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
- {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
- {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
- {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
- {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
+ {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5e88442e, 0x5e88442e, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x628a4431, 0x628a4431, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
- {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
- {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
- {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
{0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
{0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
{0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
@@ -644,7 +644,7 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
{0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a410, 0x000050d4, 0x000050d4, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
{0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
@@ -1086,8 +1086,8 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
{0x0000b074, 0x00000000},
{0x0000b078, 0x00000000},
{0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
{0x0000b088, 0x19191c1e},
{0x0000b08c, 0x12141417},
{0x0000b090, 0x07070e0e},
@@ -1385,9 +1385,9 @@ static const u32 ar9300_2p2_mac_core[][2] = {
{0x000081f8, 0x00000000},
{0x000081fc, 0x00000000},
{0x00008240, 0x00100000},
- {0x00008244, 0x0010f424},
+ {0x00008244, 0x0010f400},
{0x00008248, 0x00000800},
- {0x0000824c, 0x0001e848},
+ {0x0000824c, 0x0001e800},
{0x00008250, 0x00000000},
{0x00008254, 0x00000000},
{0x00008258, 0x00000000},
@@ -1726,16 +1726,30 @@ static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
static const u32 ar9300PciePhy_clkreq_enable_L1_2p2[][2] = {
/* Addr allmodes */
- {0x00004040, 0x08253e5e},
+ {0x00004040, 0x0825365e},
{0x00004040, 0x0008003b},
{0x00004044, 0x00000000},
};
static const u32 ar9300PciePhy_clkreq_disable_L1_2p2[][2] = {
/* Addr allmodes */
- {0x00004040, 0x08213e5e},
+ {0x00004040, 0x0821365e},
{0x00004040, 0x0008003b},
{0x00004044, 0x00000000},
};
+static const u32 ar9300_2p2_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9300_2p2_baseband_postamble_dfs_channel[][3] = {
+ /* Addr 5G 2G */
+ {0x00009824, 0x5ac668d0, 0x5ac668d0},
+ {0x00009e0c, 0x6d4000e2, 0x6d4000e2},
+ {0x00009e14, 0x37b9625e, 0x37b9625e},
+};
+
#endif /* INITVALS_9003_2P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h
new file mode 100644
index 000000000000..59cf738f70df
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9003_BUFFALO_H
+#define INITVALS_9003_BUFFALO_H
+
+static const u32 ar9300Modes_high_power_tx_gain_table_buffalo[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+#endif /* INITVALS_9003_BUFFALO_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 22934d3ca544..a352128c40ad 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -326,6 +326,224 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
ah->supp_cals = IQ_MISMATCH_CAL;
}
+#define OFF_UPPER_LT 24
+#define OFF_LOWER_LT 7
+
+static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
+ bool txiqcal_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ch0_done, osdac_ch0, dc_off_ch0_i1, dc_off_ch0_q1, dc_off_ch0_i2,
+ dc_off_ch0_q2, dc_off_ch0_i3, dc_off_ch0_q3;
+ int ch1_done, osdac_ch1, dc_off_ch1_i1, dc_off_ch1_q1, dc_off_ch1_i2,
+ dc_off_ch1_q2, dc_off_ch1_i3, dc_off_ch1_q3;
+ int ch2_done, osdac_ch2, dc_off_ch2_i1, dc_off_ch2_q1, dc_off_ch2_i2,
+ dc_off_ch2_q2, dc_off_ch2_i3, dc_off_ch2_q3;
+ bool status;
+ u32 temp, val;
+
+ /*
+ * Clear offset and IQ calibration, run AGC cal.
+ */
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_OFFSET_CAL);
+ REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "AGC cal without offset cal failed to complete in 1ms");
+ return false;
+ }
+
+ /*
+ * Allow only offset calibration and disable the others
+ * (Carrier Leak calibration, TX Filter calibration and
+ * Peak Detector offset calibration).
+ */
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_OFFSET_CAL);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
+ AR_PHY_CL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_PKDET_CAL);
+
+ ch0_done = 0;
+ ch1_done = 0;
+ ch2_done = 0;
+
+ while ((ch0_done == 0) || (ch1_done == 0) || (ch2_done == 0)) {
+ osdac_ch0 = (REG_READ(ah, AR_PHY_65NM_CH0_BB1) >> 30) & 0x3;
+ osdac_ch1 = (REG_READ(ah, AR_PHY_65NM_CH1_BB1) >> 30) & 0x3;
+ osdac_ch2 = (REG_READ(ah, AR_PHY_65NM_CH2_BB1) >> 30) & 0x3;
+
+ REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "DC offset cal failed to complete in 1ms");
+ return false;
+ }
+
+ REG_CLR_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ /*
+ * High gain.
+ */
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (1 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (1 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (1 << 8)));
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+ dc_off_ch0_i1 = (temp >> 26) & 0x1f;
+ dc_off_ch0_q1 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+ dc_off_ch1_i1 = (temp >> 26) & 0x1f;
+ dc_off_ch1_q1 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+ dc_off_ch2_i1 = (temp >> 26) & 0x1f;
+ dc_off_ch2_q1 = (temp >> 21) & 0x1f;
+
+ /*
+ * Low gain.
+ */
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (2 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (2 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (2 << 8)));
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+ dc_off_ch0_i2 = (temp >> 26) & 0x1f;
+ dc_off_ch0_q2 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+ dc_off_ch1_i2 = (temp >> 26) & 0x1f;
+ dc_off_ch1_q2 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+ dc_off_ch2_i2 = (temp >> 26) & 0x1f;
+ dc_off_ch2_q2 = (temp >> 21) & 0x1f;
+
+ /*
+ * Loopback.
+ */
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (3 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (3 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (3 << 8)));
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+ dc_off_ch0_i3 = (temp >> 26) & 0x1f;
+ dc_off_ch0_q3 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+ dc_off_ch1_i3 = (temp >> 26) & 0x1f;
+ dc_off_ch1_q3 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+ dc_off_ch2_i3 = (temp >> 26) & 0x1f;
+ dc_off_ch2_q3 = (temp >> 21) & 0x1f;
+
+ if ((dc_off_ch0_i1 > OFF_UPPER_LT) || (dc_off_ch0_i1 < OFF_LOWER_LT) ||
+ (dc_off_ch0_i2 > OFF_UPPER_LT) || (dc_off_ch0_i2 < OFF_LOWER_LT) ||
+ (dc_off_ch0_i3 > OFF_UPPER_LT) || (dc_off_ch0_i3 < OFF_LOWER_LT) ||
+ (dc_off_ch0_q1 > OFF_UPPER_LT) || (dc_off_ch0_q1 < OFF_LOWER_LT) ||
+ (dc_off_ch0_q2 > OFF_UPPER_LT) || (dc_off_ch0_q2 < OFF_LOWER_LT) ||
+ (dc_off_ch0_q3 > OFF_UPPER_LT) || (dc_off_ch0_q3 < OFF_LOWER_LT)) {
+ if (osdac_ch0 == 3) {
+ ch0_done = 1;
+ } else {
+ osdac_ch0++;
+
+ val = REG_READ(ah, AR_PHY_65NM_CH0_BB1) & 0x3fffffff;
+ val |= (osdac_ch0 << 30);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB1, val);
+
+ ch0_done = 0;
+ }
+ } else {
+ ch0_done = 1;
+ }
+
+ if ((dc_off_ch1_i1 > OFF_UPPER_LT) || (dc_off_ch1_i1 < OFF_LOWER_LT) ||
+ (dc_off_ch1_i2 > OFF_UPPER_LT) || (dc_off_ch1_i2 < OFF_LOWER_LT) ||
+ (dc_off_ch1_i3 > OFF_UPPER_LT) || (dc_off_ch1_i3 < OFF_LOWER_LT) ||
+ (dc_off_ch1_q1 > OFF_UPPER_LT) || (dc_off_ch1_q1 < OFF_LOWER_LT) ||
+ (dc_off_ch1_q2 > OFF_UPPER_LT) || (dc_off_ch1_q2 < OFF_LOWER_LT) ||
+ (dc_off_ch1_q3 > OFF_UPPER_LT) || (dc_off_ch1_q3 < OFF_LOWER_LT)) {
+ if (osdac_ch1 == 3) {
+ ch1_done = 1;
+ } else {
+ osdac_ch1++;
+
+ val = REG_READ(ah, AR_PHY_65NM_CH1_BB1) & 0x3fffffff;
+ val |= (osdac_ch1 << 30);
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB1, val);
+
+ ch1_done = 0;
+ }
+ } else {
+ ch1_done = 1;
+ }
+
+ if ((dc_off_ch2_i1 > OFF_UPPER_LT) || (dc_off_ch2_i1 < OFF_LOWER_LT) ||
+ (dc_off_ch2_i2 > OFF_UPPER_LT) || (dc_off_ch2_i2 < OFF_LOWER_LT) ||
+ (dc_off_ch2_i3 > OFF_UPPER_LT) || (dc_off_ch2_i3 < OFF_LOWER_LT) ||
+ (dc_off_ch2_q1 > OFF_UPPER_LT) || (dc_off_ch2_q1 < OFF_LOWER_LT) ||
+ (dc_off_ch2_q2 > OFF_UPPER_LT) || (dc_off_ch2_q2 < OFF_LOWER_LT) ||
+ (dc_off_ch2_q3 > OFF_UPPER_LT) || (dc_off_ch2_q3 < OFF_LOWER_LT)) {
+ if (osdac_ch2 == 3) {
+ ch2_done = 1;
+ } else {
+ osdac_ch2++;
+
+ val = REG_READ(ah, AR_PHY_65NM_CH2_BB1) & 0x3fffffff;
+ val |= (osdac_ch2 << 30);
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB1, val);
+
+ ch2_done = 0;
+ }
+ } else {
+ ch2_done = 1;
+ }
+ }
+
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_OFFSET_CAL);
+ REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ /*
+ * We don't need to check txiqcal_done here since it is always
+ * set for AR9550.
+ */
+ REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+
+ return true;
+}
+
/*
* solve 4x4 linear equation used in loopback iq cal.
*/
@@ -347,7 +565,7 @@ static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
const s32 result_shift = 1 << 15;
struct ath_common *common = ath9k_hw_common(ah);
- f2 = (f1 * f1 + f3 * f3) / result_shift;
+ f2 = ((f1 >> 3) * (f1 >> 3) + (f3 >> 3) * (f3 >> 3)) >> 9;
if (!f2) {
ath_dbg(common, CALIBRATE, "Divide by 0\n");
@@ -437,8 +655,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (i2_m_q2_a0_d1 > 0x800)
i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
- if (i2_p_q2_a0_d1 > 0x800)
- i2_p_q2_a0_d1 = -((0xfff - i2_p_q2_a0_d1) + 1);
+ if (i2_p_q2_a0_d1 > 0x1000)
+ i2_p_q2_a0_d1 = -((0x1fff - i2_p_q2_a0_d1) + 1);
if (iq_corr_a0_d1 > 0x800)
iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
@@ -482,6 +700,19 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
return false;
}
+ if ((i2_p_q2_a0_d0 < 1024) || (i2_p_q2_a0_d0 > 2047) ||
+ (i2_p_q2_a1_d0 < 0) || (i2_p_q2_a1_d1 < 0) ||
+ (i2_p_q2_a0_d0 <= i2_m_q2_a0_d0) ||
+ (i2_p_q2_a0_d0 <= iq_corr_a0_d0) ||
+ (i2_p_q2_a0_d1 <= i2_m_q2_a0_d1) ||
+ (i2_p_q2_a0_d1 <= iq_corr_a0_d1) ||
+ (i2_p_q2_a1_d0 <= i2_m_q2_a1_d0) ||
+ (i2_p_q2_a1_d0 <= iq_corr_a1_d0) ||
+ (i2_p_q2_a1_d1 <= i2_m_q2_a1_d1) ||
+ (i2_p_q2_a1_d1 <= iq_corr_a1_d1)) {
+ return false;
+ }
+
mag_a0_d0 = (i2_m_q2_a0_d0 * res_scale) / i2_p_q2_a0_d0;
phs_a0_d0 = (iq_corr_a0_d0 * res_scale) / i2_p_q2_a0_d0;
@@ -898,7 +1129,7 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
{
- int offset[8], total = 0, test;
+ int offset[8] = {0}, total = 0, test;
int agc_out, i;
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
@@ -923,12 +1154,18 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1);
- if (is_2g)
- REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
- AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0);
- else
+
+ if (AR_SREV_9330_11(ah)) {
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
- AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0);
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
+ } else {
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0);
+ }
for (i = 6; i > 0; i--) {
offset[i] = BIT(i - 1);
@@ -964,9 +1201,9 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
}
-static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
- struct ath9k_channel *chan,
- bool run_rtt_cal)
+static void ar9003_hw_do_pcoem_manual_peak_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ bool run_rtt_cal)
{
struct ath9k_hw_cal_data *caldata = ah->caldata;
int i;
@@ -1040,14 +1277,14 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
}
}
-static bool ar9003_hw_init_cal(struct ath_hw *ah,
- struct ath9k_channel *chan)
+static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
+ struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
bool txiqcal_done = false;
bool is_reusable = true, status = true;
- bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
+ bool run_rtt_cal = false, run_agc_cal;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
u32 rx_delay = 0;
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
@@ -1119,22 +1356,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
txiqcal_done = run_agc_cal = true;
- } else if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) {
- run_agc_cal = true;
- sep_iq_cal = true;
}
skip_tx_iqcal:
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
ar9003_mci_init_cal_req(ah, &is_reusable);
- if (sep_iq_cal) {
- txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
- udelay(5);
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
- }
-
if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
/* Disable BB_active */
@@ -1155,7 +1382,7 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
- ar9003_hw_do_manual_peak_cal(ah, chan, run_rtt_cal);
+ ar9003_hw_do_pcoem_manual_peak_cal(ah, chan, run_rtt_cal);
}
if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
@@ -1228,13 +1455,117 @@ skip_tx_iqcal:
return true;
}
+static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ bool txiqcal_done = false;
+ bool is_reusable = true, status = true;
+ bool run_agc_cal = false, sep_iq_cal = false;
+
+ /* Use chip chainmask only for calibration */
+ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
+
+ if (ah->enabled_cals & TX_CL_CAL) {
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ run_agc_cal = true;
+ }
+
+ if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))
+ goto skip_tx_iqcal;
+
+ /* Do Tx IQ Calibration */
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
+ DELPT);
+
+ /*
+ * For AR9485 or later chips, TxIQ cal runs as part of
+ * AGC calibration. Specifically, AR9550 in SoC chips.
+ */
+ if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
+ txiqcal_done = true;
+ run_agc_cal = true;
+ } else {
+ sep_iq_cal = true;
+ run_agc_cal = true;
+ }
+
+ /*
+ * In the SoC family, this will run for AR9300, AR9331 and AR9340.
+ */
+ if (sep_iq_cal) {
+ txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+ udelay(5);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ }
+
+ if (AR_SREV_9550(ah) && IS_CHAN_2GHZ(chan)) {
+ if (!ar9003_hw_dynamic_osdac_selection(ah, txiqcal_done))
+ return false;
+ }
+
+skip_tx_iqcal:
+ if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
+ if (AR_SREV_9330_11(ah))
+ ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan));
+
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ }
+
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in %d ms; noisy environment?\n",
+ AH_WAIT_TIMEOUT / 1000);
+ return false;
+ }
+
+ if (txiqcal_done)
+ ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
+
+ /* Revert chainmask to runtime parameters */
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Initialize list pointers */
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
+
+ /* Initialize current pointer to first element in list */
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+
+ if (caldata)
+ caldata->CalValid = 0;
+
+ return true;
+}
+
void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+ if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ priv_ops->init_cal = ar9003_hw_init_cal_pcoem;
+ else
+ priv_ops->init_cal = ar9003_hw_init_cal_soc;
+
priv_ops->init_cal_settings = ar9003_hw_init_cal_settings;
- priv_ops->init_cal = ar9003_hw_init_cal;
priv_ops->setup_calibration = ar9003_hw_setup_calibration;
ops->calibrate = ar9003_hw_calibrate;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 130657db5c43..b8daff78b9d1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -131,6 +131,7 @@ static const struct ar9300_eeprom ar9300_default = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -138,7 +139,7 @@ static const struct ar9300_eeprom ar9300_default = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0},
+ .future = {0, 0},
.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
@@ -333,6 +334,7 @@ static const struct ar9300_eeprom ar9300_default = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -707,6 +709,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -714,7 +717,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0},
+ .future = {0, 0},
.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
@@ -909,6 +912,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -1284,6 +1288,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -1291,7 +1296,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0},
+ .future = {0, 0},
.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
@@ -1486,6 +1491,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -1861,6 +1867,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -1868,7 +1875,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0},
+ .future = {0, 0},
.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
@@ -2063,6 +2070,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -2437,6 +2445,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80C080),
.papdRateMaskHt40 = LE32(0x0080C080),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -2444,7 +2453,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0},
+ .future = {0, 0},
.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
@@ -2639,6 +2648,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
.xlna_bias_strength = 0,
.futureModal = {
0, 0, 0, 0, 0, 0, 0,
@@ -3588,7 +3598,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9462_ALL, value);
- } else if (AR_SREV_9550(ah)) {
+ } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9550_ALL, value);
} else
@@ -3965,7 +3975,7 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
u8 tuning_caps_param = eep->baseEepHeader.params_for_tuning_caps[0];
- if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9531(ah))
return;
if (eep->baseEepHeader.featureEnable & 0x40) {
@@ -4020,7 +4030,10 @@ static void ar9003_hw_xpa_timing_control_apply(struct ath_hw *ah, bool is2ghz)
if (!(eep->baseEepHeader.featureEnable & 0x80))
return;
- if (!AR_SREV_9300(ah) && !AR_SREV_9340(ah) && !AR_SREV_9580(ah))
+ if (!AR_SREV_9300(ah) &&
+ !AR_SREV_9340(ah) &&
+ !AR_SREV_9580(ah) &&
+ !AR_SREV_9531(ah))
return;
xpa_ctl = ar9003_modal_header(ah, is2ghz)->txFrameToXpaOn;
@@ -4111,6 +4124,37 @@ static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
}
}
+static void ar9003_hw_apply_minccapwr_thresh(struct ath_hw *ah,
+ bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ const u_int32_t cca_ctrl[AR9300_MAX_CHAINS] = {
+ AR_PHY_CCA_CTRL_0,
+ AR_PHY_CCA_CTRL_1,
+ AR_PHY_CCA_CTRL_2,
+ };
+ int chain;
+ u32 val;
+
+ if (is2ghz) {
+ if (!(eep->base_ext1.misc_enable & BIT(2)))
+ return;
+ } else {
+ if (!(eep->base_ext1.misc_enable & BIT(3)))
+ return;
+ }
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->caps.tx_chainmask & BIT(chain)))
+ continue;
+
+ val = ar9003_modal_header(ah, is2ghz)->noiseFloorThreshCh[chain];
+ REG_RMW_FIELD(ah, cca_ctrl[chain],
+ AR_PHY_EXT_CCA0_THRESH62_1, val);
+ }
+
+}
+
static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -4122,9 +4166,10 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
ar9003_hw_xlna_bias_strength_apply(ah, is2ghz);
ar9003_hw_atten_apply(ah, chan);
ar9003_hw_quick_drop_apply(ah, chan->channel);
- if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9550(ah))
+ if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9531(ah))
ar9003_hw_internal_regulator_apply(ah);
ar9003_hw_apply_tuning_caps(ah);
+ ar9003_hw_apply_minccapwr_thresh(ah, chan);
ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
ar9003_hw_thermometer_apply(ah);
ar9003_hw_thermo_cal_apply(ah);
@@ -4746,7 +4791,7 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah,
}
tempslope:
- if (AR_SREV_9550(ah)) {
+ if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
/*
* AR955x has tempSlope register for each chain.
* Check whether temp_compensation feature is enabled or not.
@@ -5020,6 +5065,10 @@ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
break;
}
}
+
+ if (is2GHz && !twiceMaxEdgePower)
+ twiceMaxEdgePower = 60;
+
return twiceMaxEdgePower;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 0e5daa58a4fc..694ca2e680e5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -270,10 +270,20 @@ struct cal_ctl_data_5g {
u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
} __packed;
+#define MAX_BASE_EXTENSION_FUTURE 2
+
struct ar9300_BaseExtension_1 {
u8 ant_div_control;
- u8 future[3];
- u8 tempslopextension[8];
+ u8 future[MAX_BASE_EXTENSION_FUTURE];
+ /*
+ * misc_enable:
+ *
+ * BIT 0 - TX Gain Cap enable.
+ * BIT 1 - Uncompressed Checksum enable.
+ * BIT 2/3 - MinCCApwr enable 2g/5g.
+ */
+ u8 misc_enable;
+ int8_t tempslopextension[8];
int8_t quick_drop_low;
int8_t quick_drop_high;
} __packed;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 20e49095db2a..ec1da0cc25f5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -17,6 +17,7 @@
#include "hw.h"
#include "ar9003_mac.h"
#include "ar9003_2p2_initvals.h"
+#include "ar9003_buffalo_initvals.h"
#include "ar9485_initvals.h"
#include "ar9340_initvals.h"
#include "ar9330_1p1_initvals.h"
@@ -26,6 +27,8 @@
#include "ar9462_2p0_initvals.h"
#include "ar9462_2p1_initvals.h"
#include "ar9565_1p0_initvals.h"
+#include "ar9565_1p1_initvals.h"
+#include "ar953x_initvals.h"
/* General hardware code for the AR9003 hadware family */
@@ -148,7 +151,11 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9340Modes_high_ob_db_tx_gain_table_1p0);
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9340Modes_fast_clock_1p0);
+ ar9340Modes_fast_clock_1p0);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9340_1p0_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->ini_dfs,
+ ar9340_1p0_baseband_postamble_dfs_channel);
if (!ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
@@ -223,6 +230,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9462_2p1_modes_fast_clock);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9462_2p1_pciephy_clkreq_disable_L1);
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9462_2p1_pciephy_clkreq_disable_L1);
} else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
@@ -247,18 +258,18 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9462_2p0_soc_postamble);
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_rx_gain_table_2p0);
+ ar9462_2p0_common_rx_gain);
/* Awake -> Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9462_pciephy_clkreq_disable_L1_2p0);
+ ar9462_2p0_pciephy_clkreq_disable_L1);
/* Sleep -> Awake Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9462_pciephy_clkreq_disable_L1_2p0);
+ ar9462_2p0_pciephy_clkreq_disable_L1);
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9462_modes_fast_clock_2p0);
+ ar9462_2p0_modes_fast_clock);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9462_2p0_baseband_core_txfir_coeff_japan_2484);
@@ -298,6 +309,31 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar955x_1p0_modes_fast_clock);
+ } else if (AR_SREV_9531(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ qca953x_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ qca953x_1p0_mac_postamble);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ qca953x_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ qca953x_1p0_baseband_postamble);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ qca953x_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ qca953x_1p0_radio_postamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ qca953x_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ qca953x_1p0_soc_postamble);
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_1p0_common_wo_xlna_rx_gain_bounds);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_no_xpa_tx_gain_table);
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ qca953x_1p0_modes_fast_clock);
} else if (AR_SREV_9580(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -330,7 +366,46 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9580_1p0_low_ob_db_tx_gain_table);
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9580_1p0_modes_fast_clock);
+ ar9580_1p0_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9580_1p0_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->ini_dfs,
+ ar9580_1p0_baseband_postamble_dfs_channel);
+ } else if (AR_SREV_9565_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9565_1p1_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9565_1p1_mac_postamble);
+
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9565_1p1_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9565_1p1_baseband_postamble);
+
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9565_1p1_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9565_1p1_radio_postamble);
+
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9565_1p1_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9565_1p1_soc_postamble);
+
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p1_Common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_Modes_lowest_ob_db_tx_gain_table);
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9565_1p1_pciephy_clkreq_disable_L1);
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9565_1p1_pciephy_clkreq_disable_L1);
+
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9565_1p1_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9565_1p1_baseband_core_txfir_coeff_japan_2484);
} else if (AR_SREV_9565(ah)) {
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
ar9565_1p0_mac_core);
@@ -411,7 +486,11 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9300Modes_fast_clock_2p2);
+ ar9300Modes_fast_clock_2p2);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9300_2p2_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->ini_dfs,
+ ar9300_2p2_baseband_postamble_dfs_channel);
}
}
@@ -432,6 +511,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9550(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9531(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_xpa_tx_gain_table);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_lowest_ob_db_tx_gain_table);
@@ -440,7 +522,10 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
ar9462_2p1_modes_low_ob_db_tx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_low_ob_db_tx_gain_table_2p0);
+ ar9462_2p0_modes_low_ob_db_tx_gain);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_low_ob_db_tx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9565_1p0_modes_low_ob_db_tx_gain_table);
@@ -469,12 +554,22 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
else if (AR_SREV_9550(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_no_xpa_tx_gain_table);
- else if (AR_SREV_9462_21(ah))
+ else if (AR_SREV_9531(ah)) {
+ if (AR_SREV_9531_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p1_modes_no_xpa_tx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_no_xpa_tx_gain_table);
+ } else if (AR_SREV_9462_21(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9462_2p1_modes_high_ob_db_tx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_high_ob_db_tx_gain_table_2p0);
+ ar9462_2p0_modes_high_ob_db_tx_gain);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_high_ob_db_tx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9565_1p0_modes_high_ob_db_tx_gain_table);
@@ -500,6 +595,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_low_ob_db_tx_gain_table);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_low_ob_db_tx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9565_1p0_modes_low_ob_db_tx_gain_table);
@@ -525,12 +623,20 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_high_power_tx_gain_table);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_high_power_tx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9565_1p0_modes_high_power_tx_gain_table);
- else
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_high_power_tx_gain_table_2p2);
+ else {
+ if (ah->config.tx_gain_buffalo)
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_power_tx_gain_table_buffalo);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_power_tx_gain_table_2p2);
+ }
}
static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
@@ -546,7 +652,7 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
ar9462_2p1_modes_mix_ob_db_tx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_mix_ob_db_tx_gain_table_2p0);
+ ar9462_2p0_modes_mix_ob_db_tx_gain);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9300Modes_mixed_ob_db_tx_gain_table_2p2);
@@ -581,6 +687,13 @@ static void ar9003_tx_gain_table_mode6(struct ath_hw *ah)
ar9580_1p0_type6_tx_gain_table);
}
+static void ar9003_tx_gain_table_mode7(struct ath_hw *ah)
+{
+ if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340_cus227_tx_gain_table_1p0);
+}
+
typedef void (*ath_txgain_tab)(struct ath_hw *ah);
static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
@@ -593,6 +706,7 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
ar9003_tx_gain_table_mode4,
ar9003_tx_gain_table_mode5,
ar9003_tx_gain_table_mode6,
+ ar9003_tx_gain_table_mode7,
};
int idx = ar9003_hw_get_tx_gain_idx(ah);
@@ -621,6 +735,11 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
ar955x_1p0_common_rx_gain_table);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
ar955x_1p0_common_rx_gain_bounds);
+ } else if (AR_SREV_9531(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_1p0_common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_1p0_common_rx_gain_bounds);
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_rx_gain_table);
@@ -629,7 +748,10 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
ar9462_2p1_common_rx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_rx_gain_table_2p0);
+ ar9462_2p0_common_rx_gain);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p1_Common_rx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9565_1p0_Common_rx_gain_table);
@@ -657,15 +779,23 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
ar9462_2p1_common_wo_xlna_rx_gain);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_wo_xlna_rx_gain_table_2p0);
+ ar9462_2p0_common_wo_xlna_rx_gain);
else if (AR_SREV_9550(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar955x_1p0_common_wo_xlna_rx_gain_table);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
ar955x_1p0_common_wo_xlna_rx_gain_bounds);
+ } else if (AR_SREV_9531(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_1p0_common_wo_xlna_rx_gain_bounds);
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_wo_xlna_rx_gain_table);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p1_common_wo_xlna_rx_gain_table);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9565_1p0_common_wo_xlna_rx_gain_table);
@@ -687,7 +817,7 @@ static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
ar9462_2p1_baseband_postamble_5g_xlna);
} else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_mixed_rx_gain_table_2p0);
+ ar9462_2p0_common_mixed_rx_gain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
ar9462_2p0_baseband_core_mix_rxgain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
@@ -701,12 +831,12 @@ static void ar9003_rx_gain_table_mode3(struct ath_hw *ah)
{
if (AR_SREV_9462_21(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_2p1_common_5g_xlna_only_rx_gain);
+ ar9462_2p1_common_5g_xlna_only_rxgain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
ar9462_2p1_baseband_postamble_5g_xlna);
} else if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_2p0_5g_xlna_only_rxgain);
+ ar9462_2p0_common_5g_xlna_only_rxgain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
ar9462_2p0_baseband_postamble_5g_xlna);
}
@@ -750,6 +880,9 @@ static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
bool power_off)
{
+ unsigned int i;
+ struct ar5416IniArray *array;
+
/*
* Increase L1 Entry Latency. Some WB222 boards don't have
* this change in eeprom/OTP.
@@ -775,19 +908,125 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
* Configire PCIE after Ini init. SERDES values now come from ini file
* This enables PCIe low power mode.
*/
- if (ah->config.pcieSerDesWrite) {
- unsigned int i;
- struct ar5416IniArray *array;
+ array = power_off ? &ah->iniPcieSerdes :
+ &ah->iniPcieSerdesLowPower;
+
+ for (i = 0; i < array->ia_rows; i++) {
+ REG_WRITE(ah,
+ INI_RA(array, i, 0),
+ INI_RA(array, i, 1));
+ }
+}
+
+static void ar9003_hw_init_hang_checks(struct ath_hw *ah)
+{
+ /*
+ * All chips support detection of BB/MAC hangs.
+ */
+ ah->config.hw_hang_checks |= HW_BB_WATCHDOG;
+ ah->config.hw_hang_checks |= HW_MAC_HANG;
+
+ /*
+ * This is not required for AR9580 1.0
+ */
+ if (AR_SREV_9300_22(ah))
+ ah->config.hw_hang_checks |= HW_PHYRESTART_CLC_WAR;
+
+ if (AR_SREV_9330(ah))
+ ah->bb_watchdog_timeout_ms = 85;
+ else
+ ah->bb_watchdog_timeout_ms = 25;
+}
- array = power_off ? &ah->iniPcieSerdes :
- &ah->iniPcieSerdesLowPower;
+/*
+ * MAC HW hang check
+ * =================
+ *
+ * Signature: dcu_chain_state is 0x6 and dcu_complete_state is 0x1.
+ *
+ * The state of each DCU chain (mapped to TX queues) is available from these
+ * DMA debug registers:
+ *
+ * Chain 0 state : Bits 4:0 of AR_DMADBG_4
+ * Chain 1 state : Bits 9:5 of AR_DMADBG_4
+ * Chain 2 state : Bits 14:10 of AR_DMADBG_4
+ * Chain 3 state : Bits 19:15 of AR_DMADBG_4
+ * Chain 4 state : Bits 24:20 of AR_DMADBG_4
+ * Chain 5 state : Bits 29:25 of AR_DMADBG_4
+ * Chain 6 state : Bits 4:0 of AR_DMADBG_5
+ * Chain 7 state : Bits 9:5 of AR_DMADBG_5
+ * Chain 8 state : Bits 14:10 of AR_DMADBG_5
+ * Chain 9 state : Bits 19:15 of AR_DMADBG_5
+ *
+ * The DCU chain state "0x6" means "WAIT_FRDONE" - wait for TX frame to be done.
+ */
+
+#define NUM_STATUS_READS 50
+
+static bool ath9k_hw_verify_hang(struct ath_hw *ah, unsigned int queue)
+{
+ u32 dma_dbg_chain, dma_dbg_complete;
+ u8 dcu_chain_state, dcu_complete_state;
+ int i;
+
+ for (i = 0; i < NUM_STATUS_READS; i++) {
+ if (queue < 6)
+ dma_dbg_chain = REG_READ(ah, AR_DMADBG_4);
+ else
+ dma_dbg_chain = REG_READ(ah, AR_DMADBG_5);
+
+ dma_dbg_complete = REG_READ(ah, AR_DMADBG_6);
+
+ dcu_chain_state = (dma_dbg_chain >> (5 * queue)) & 0x1f;
+ dcu_complete_state = dma_dbg_complete & 0x3;
+
+ if ((dcu_chain_state != 0x6) || (dcu_complete_state != 0x1))
+ return false;
+ }
+
+ ath_dbg(ath9k_hw_common(ah), RESET,
+ "MAC Hang signature found for queue: %d\n", queue);
+
+ return true;
+}
+
+static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah)
+{
+ u32 dma_dbg_4, dma_dbg_5, dma_dbg_6, chk_dbg;
+ u8 dcu_chain_state, dcu_complete_state;
+ bool dcu_wait_frdone = false;
+ unsigned long chk_dcu = 0;
+ unsigned int i = 0;
+
+ dma_dbg_4 = REG_READ(ah, AR_DMADBG_4);
+ dma_dbg_5 = REG_READ(ah, AR_DMADBG_5);
+ dma_dbg_6 = REG_READ(ah, AR_DMADBG_6);
+
+ dcu_complete_state = dma_dbg_6 & 0x3;
+ if (dcu_complete_state != 0x1)
+ goto exit;
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (i < 6)
+ chk_dbg = dma_dbg_4;
+ else
+ chk_dbg = dma_dbg_5;
+
+ dcu_chain_state = (chk_dbg >> (5 * i)) & 0x1f;
+ if (dcu_chain_state == 0x6) {
+ dcu_wait_frdone = true;
+ chk_dcu |= BIT(i);
+ }
+ }
- for (i = 0; i < array->ia_rows; i++) {
- REG_WRITE(ah,
- INI_RA(array, i, 0),
- INI_RA(array, i, 1));
+ if ((dcu_complete_state == 0x1) && dcu_wait_frdone) {
+ for_each_set_bit(i, &chk_dcu, ATH9K_NUM_TX_QUEUES) {
+ if (ath9k_hw_verify_hang(ah, i))
+ return true;
}
}
+exit:
+ return false;
}
/* Sets up the AR9003 hardware familiy callbacks */
@@ -798,6 +1037,8 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
ar9003_hw_init_mode_regs(ah);
priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
+ priv_ops->init_hang_checks = ar9003_hw_init_hang_checks;
+ priv_ops->detect_mac_hang = ar9003_hw_detect_mac_hang;
ops->config_pci_powersave = ar9003_hw_configpcipowersave;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index f6c5c1b50471..729ffbf07343 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -175,7 +175,8 @@ static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
}
-static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p)
{
u32 isr = 0;
u32 mask2 = 0;
@@ -310,7 +311,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
ar9003_mci_get_isr(ah, masked);
if (sync_cause) {
- ath9k_debug_sync_cause(common, sync_cause);
+ if (sync_cause_p)
+ *sync_cause_p = sync_cause;
fatal_int =
(sync_cause &
(AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
@@ -476,12 +478,12 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
/* XXX: Keycache */
rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
- rxs->rs_rssi_ctl0 = MS(rxsp->status1, AR_RxRSSIAnt00);
- rxs->rs_rssi_ctl1 = MS(rxsp->status1, AR_RxRSSIAnt01);
- rxs->rs_rssi_ctl2 = MS(rxsp->status1, AR_RxRSSIAnt02);
- rxs->rs_rssi_ext0 = MS(rxsp->status5, AR_RxRSSIAnt10);
- rxs->rs_rssi_ext1 = MS(rxsp->status5, AR_RxRSSIAnt11);
- rxs->rs_rssi_ext2 = MS(rxsp->status5, AR_RxRSSIAnt12);
+ rxs->rs_rssi_ctl[0] = MS(rxsp->status1, AR_RxRSSIAnt00);
+ rxs->rs_rssi_ctl[1] = MS(rxsp->status1, AR_RxRSSIAnt01);
+ rxs->rs_rssi_ctl[2] = MS(rxsp->status1, AR_RxRSSIAnt02);
+ rxs->rs_rssi_ext[0] = MS(rxsp->status5, AR_RxRSSIAnt10);
+ rxs->rs_rssi_ext[1] = MS(rxsp->status5, AR_RxRSSIAnt11);
+ rxs->rs_rssi_ext[2] = MS(rxsp->status5, AR_RxRSSIAnt12);
if (rxsp->status11 & AR_RxKeyIdxValid)
rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index d39b79f5e841..09facba1dc6d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -103,7 +103,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
} else {
channelSel = CHANSEL_2G(freq) >> 1;
}
- } else if (AR_SREV_9550(ah)) {
+ } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
if (ah->is_clk_25mhz)
div = 75;
else
@@ -118,7 +118,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
/* Set to 2G mode */
bMode = 1;
} else {
- if ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) &&
+ if ((AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) &&
ah->is_clk_25mhz) {
channelSel = freq / 75;
chan_frac = ((freq % 75) * 0x20000) / 75;
@@ -641,11 +641,12 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
else
ah->enabled_cals &= ~TX_IQ_CAL;
- if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
- ah->enabled_cals |= TX_CL_CAL;
- else
- ah->enabled_cals &= ~TX_CL_CAL;
}
+
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
+ ah->enabled_cals |= TX_CL_CAL;
+ else
+ ah->enabled_cals &= ~TX_CL_CAL;
}
static void ar9003_hw_prog_ini(struct ath_hw *ah,
@@ -809,10 +810,12 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
/*
* TXGAIN initvals.
*/
- if (AR_SREV_9550(ah)) {
- int modes_txgain_index;
+ if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+ int modes_txgain_index = 1;
+
+ if (AR_SREV_9550(ah))
+ modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
- modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
if (modes_txgain_index < 0)
return -EINVAL;
@@ -1331,6 +1334,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
static void ar9003_hw_set_radar_params(struct ath_hw *ah,
struct ath_hw_radar_conf *conf)
{
+ unsigned int regWrites = 0;
u32 radar_0 = 0, radar_1 = 0;
if (!conf) {
@@ -1357,6 +1361,11 @@ static void ar9003_hw_set_radar_params(struct ath_hw *ah,
REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
else
REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+
+ if (AR_SREV_9300(ah) || AR_SREV_9340(ah) || AR_SREV_9580(ah)) {
+ REG_WRITE_ARRAY(&ah->ini_dfs,
+ IS_CHAN_HT40(ah->curchan) ? 2 : 1, regWrites);
+ }
}
static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
@@ -1807,6 +1816,68 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
}
+/*
+ * Baseband Watchdog signatures:
+ *
+ * 0x04000539: BB hang when operating in HT40 DFS Channel.
+ * Full chip reset is not required, but a recovery
+ * mechanism is needed.
+ *
+ * 0x1300000a: Related to CAC deafness.
+ * Chip reset is not required.
+ *
+ * 0x0400000a: Related to CAC deafness.
+ * Full chip reset is required.
+ *
+ * 0x04000b09: RX state machine gets into an illegal state
+ * when a packet with unsupported rate is received.
+ * Full chip reset is required and PHY_RESTART has
+ * to be disabled.
+ *
+ * 0x04000409: Packet stuck on receive.
+ * Full chip reset is required for all chips except AR9340.
+ */
+
+/*
+ * ar9003_hw_bb_watchdog_check(): Returns true if a chip reset is required.
+ */
+bool ar9003_hw_bb_watchdog_check(struct ath_hw *ah)
+{
+ u32 val;
+
+ switch(ah->bb_watchdog_last_status) {
+ case 0x04000539:
+ val = REG_READ(ah, AR_PHY_RADAR_0);
+ val &= (~AR_PHY_RADAR_0_FIRPWR);
+ val |= SM(0x7f, AR_PHY_RADAR_0_FIRPWR);
+ REG_WRITE(ah, AR_PHY_RADAR_0, val);
+ udelay(1);
+ val = REG_READ(ah, AR_PHY_RADAR_0);
+ val &= ~AR_PHY_RADAR_0_FIRPWR;
+ val |= SM(AR9300_DFS_FIRPWR, AR_PHY_RADAR_0_FIRPWR);
+ REG_WRITE(ah, AR_PHY_RADAR_0, val);
+
+ return false;
+ case 0x1300000a:
+ return false;
+ case 0x0400000a:
+ case 0x04000b09:
+ return true;
+ case 0x04000409:
+ if (AR_SREV_9340(ah) || AR_SREV_9531(ah))
+ return false;
+ else
+ return true;
+ default:
+ /*
+ * For any other unknown signatures, do a
+ * full chip reset.
+ */
+ return true;
+ }
+}
+EXPORT_SYMBOL(ar9003_hw_bb_watchdog_check);
+
void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1923,6 +1994,7 @@ EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
{
+ u8 result;
u32 val;
/* While receiving unsupported rate frame rx state machine
@@ -1930,15 +2002,13 @@ void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
* state, BB would go hang. If RXSM is in 0xb state after
* first bb panic, ensure to disable the phy_restart.
*/
- if (!((MS(ah->bb_watchdog_last_status,
- AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) ||
- ah->bb_hang_rx_ofdm))
- return;
+ result = MS(ah->bb_watchdog_last_status, AR_PHY_WATCHDOG_RX_OFDM_SM);
- ah->bb_hang_rx_ofdm = true;
- val = REG_READ(ah, AR_PHY_RESTART);
- val &= ~AR_PHY_RESTART_ENA;
-
- REG_WRITE(ah, AR_PHY_RESTART, val);
+ if ((result == 0xb) || ah->bb_hang_rx_ofdm) {
+ ah->bb_hang_rx_ofdm = true;
+ val = REG_READ(ah, AR_PHY_RESTART);
+ val &= ~AR_PHY_RESTART_ENA;
+ REG_WRITE(ah, AR_PHY_RESTART, val);
+ }
}
EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 2af667beb273..fd090b1f2d0f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -270,7 +270,7 @@
#define AR_PHY_AGC (AR_AGC_BASE + 0x14)
#define AR_PHY_EXT_ATTEN_CTL_0 (AR_AGC_BASE + 0x18)
#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
-#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
+#define AR_PHY_CCA_CTRL_0 (AR_AGC_BASE + 0x20)
#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
/*
@@ -338,17 +338,17 @@
#define AR_PHY_CCA_NOM_VAL_9300_5GHZ -115
#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ -125
#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ -125
-#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -95
-#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -100
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_2GHZ -95
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_5GHZ -100
#define AR_PHY_CCA_NOM_VAL_9462_2GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ -127
#define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ -60
-#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ -95
#define AR_PHY_CCA_NOM_VAL_9462_5GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ -127
#define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ -60
-#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ -100
#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
@@ -397,6 +397,8 @@
#define AR9280_PHY_CCA_THRESH62_S 12
#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
#define AR_PHY_EXT_CCA0_THRESH62_S 0
+#define AR_PHY_EXT_CCA0_THRESH62_1 0x000001FF
+#define AR_PHY_EXT_CCA0_THRESH62_1_S 0
#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
@@ -667,6 +669,16 @@
#define AR_PHY_65NM_CH1_RXTX4 0x1650c
#define AR_PHY_65NM_CH2_RXTX4 0x1690c
+#define AR_PHY_65NM_CH0_BB1 0x16140
+#define AR_PHY_65NM_CH0_BB2 0x16144
+#define AR_PHY_65NM_CH0_BB3 0x16148
+#define AR_PHY_65NM_CH1_BB1 0x16540
+#define AR_PHY_65NM_CH1_BB2 0x16544
+#define AR_PHY_65NM_CH1_BB3 0x16548
+#define AR_PHY_65NM_CH2_BB1 0x16940
+#define AR_PHY_65NM_CH2_BB2 0x16944
+#define AR_PHY_65NM_CH2_BB3 0x16948
+
#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3 0x00780000
#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3_S 19
#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK 0x00000004
@@ -1331,4 +1343,6 @@
#define AR_PHY_65NM_RXRF_AGC_AGC_OUT 0x00000004
#define AR_PHY_65NM_RXRF_AGC_AGC_OUT_S 2
+#define AR9300_DFS_FIRPWR -28
+
#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_wow.c b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
new file mode 100644
index 000000000000..81c88dd606dc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "ath9k.h"
+#include "reg.h"
+#include "hw-ops.h"
+
+const char *ath9k_hw_wow_event_to_string(u32 wow_event)
+{
+ if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
+ return "Magic pattern";
+ if (wow_event & AH_WOW_USER_PATTERN_EN)
+ return "User pattern";
+ if (wow_event & AH_WOW_LINK_CHANGE)
+ return "Link change";
+ if (wow_event & AH_WOW_BEACON_MISS)
+ return "Beacon miss";
+
+ return "unknown reason";
+}
+EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
+
+static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+
+ /* set rx disable bit */
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+
+ if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
+ ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+ REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
+ return;
+ }
+
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
+}
+
+static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
+ u32 ctl[13] = {0};
+ u32 data_word[KAL_NUM_DATA_WORDS];
+ u8 i;
+ u32 wow_ka_data_word0;
+
+ memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
+ memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
+
+ /* set the transmit buffer */
+ ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
+ ctl[1] = 0;
+ ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
+ ctl[4] = 0;
+ ctl[7] = (ah->txchainmask) << 2;
+ ctl[2] = 0xf << 16; /* tx_tries 0 */
+
+ for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+
+ data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
+ (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
+ data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+ (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+ data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
+ (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+ data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
+ (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
+ data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+ (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+ data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+
+ if (AR_SREV_9462_20(ah)) {
+ /* AR9462 2.0 has an extra descriptor word (time based
+ * discard) compared to other chips */
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
+ wow_ka_data_word0 = AR_WOW_TXBUF(13);
+ } else {
+ wow_ka_data_word0 = AR_WOW_TXBUF(12);
+ }
+
+ for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
+ REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
+
+}
+
+void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
+ u8 *user_mask, int pattern_count,
+ int pattern_len)
+{
+ int i;
+ u32 pattern_val, mask_val;
+ u32 set, clr;
+
+ /* FIXME: should check count by querying the hardware capability */
+ if (pattern_count >= MAX_NUM_PATTERN)
+ return;
+
+ REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
+
+ /* set the registers for pattern */
+ for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
+ memcpy(&pattern_val, user_pattern, 4);
+ REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
+ pattern_val);
+ user_pattern += 4;
+ }
+
+ /* set the registers for mask */
+ for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
+ memcpy(&mask_val, user_mask, 4);
+ REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
+ user_mask += 4;
+ }
+
+ /* set the pattern length to be matched
+ *
+ * AR_WOW_LENGTH1_REG1
+ * bit 31:24 pattern 0 length
+ * bit 23:16 pattern 1 length
+ * bit 15:8 pattern 2 length
+ * bit 7:0 pattern 3 length
+ *
+ * AR_WOW_LENGTH1_REG2
+ * bit 31:24 pattern 4 length
+ * bit 23:16 pattern 5 length
+ * bit 15:8 pattern 6 length
+ * bit 7:0 pattern 7 length
+ *
+ * the below logic writes out the new
+ * pattern length for the corresponding
+ * pattern_count, while masking out the
+ * other fields
+ */
+
+ ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
+
+ if (pattern_count < 4) {
+ /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN1_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH1_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
+ } else {
+ /* Pattern 4-7 uses AR_WOW_LENGTH2 register */
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN2_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH2_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
+ }
+
+}
+EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
+
+u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
+{
+ u32 wow_status = 0;
+ u32 val = 0, rval;
+
+ /*
+ * read the WoW status register to know
+ * the wakeup reason
+ */
+ rval = REG_READ(ah, AR_WOW_PATTERN);
+ val = AR_WOW_STATUS(rval);
+
+ /*
+ * mask only the WoW events that we have enabled. Sometimes
+ * we have spurious WoW events from the AR_WOW_PATTERN
+ * register. This mask will clean it up.
+ */
+
+ val &= ah->wow_event_mask;
+
+ if (val) {
+ if (val & AR_WOW_MAGIC_PAT_FOUND)
+ wow_status |= AH_WOW_MAGIC_PATTERN_EN;
+ if (AR_WOW_PATTERN_FOUND(val))
+ wow_status |= AH_WOW_USER_PATTERN_EN;
+ if (val & AR_WOW_KEEP_ALIVE_FAIL)
+ wow_status |= AH_WOW_LINK_CHANGE;
+ if (val & AR_WOW_BEACON_FAIL)
+ wow_status |= AH_WOW_BEACON_MISS;
+ }
+
+ /*
+ * set and clear WOW_PME_CLEAR registers for the chip to
+ * generate next wow signal.
+ * disable D3 before accessing other registers ?
+ */
+
+ /* do we need to check the bit value 0x01000000 (7-10) ?? */
+ REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
+ AR_PMCTRL_PWR_STATE_D1D3);
+
+ /*
+ * clear all events
+ */
+ REG_WRITE(ah, AR_WOW_PATTERN,
+ AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+
+ /*
+ * restore the beacon threshold to init value
+ */
+ REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+
+ /*
+ * Restore the way the PCI-E reset, Power-On-Reset, external
+ * PCIE_POR_SHORT pins are tied to its original value.
+ * Previously just before WoW sleep, we untie the PCI-E
+ * reset to our Chip's Power On Reset so that any PCI-E
+ * reset from the bus will not reset our chip
+ */
+ if (ah->is_pciexpress)
+ ath9k_hw_configpcipowersave(ah, false);
+
+ ah->wow_event_mask = 0;
+
+ return wow_status;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
+
+void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+{
+ u32 wow_event_mask;
+ u32 set, clr;
+
+ /*
+ * wow_event_mask is a mask to the AR_WOW_PATTERN register to
+ * indicate which WoW events we have enabled. The WoW events
+ * are from the 'pattern_enable' in this function and
+ * 'pattern_count' of ath9k_hw_wow_apply_pattern()
+ */
+ wow_event_mask = ah->wow_event_mask;
+
+ /*
+ * Untie Power-on-Reset from the PCI-E-Reset. When we are in
+ * WOW sleep, we do want the Reset from the PCI-E to disturb
+ * our hw state
+ */
+ if (ah->is_pciexpress) {
+ /*
+ * we need to untie the internal POR (power-on-reset)
+ * to the external PCI-E reset. We also need to tie
+ * the PCI-E Phy reset to the PCI-E reset.
+ */
+ set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
+ clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
+ REG_RMW(ah, AR_WA, set, clr);
+ }
+
+ /*
+ * set the power states appropriately and enable PME
+ */
+ set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA |
+ AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR;
+
+ /*
+ * set and clear WOW_PME_CLEAR registers for the chip
+ * to generate next wow signal.
+ */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+ clr = AR_PMCTRL_WOW_PME_CLR;
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+
+ /*
+ * Setup for:
+ * - beacon misses
+ * - magic pattern
+ * - keep alive timeout
+ * - pattern matching
+ */
+
+ /*
+ * Program default values for pattern backoff, aifs/slot/KAL count,
+ * beacon miss timeout, KAL timeout, etc.
+ */
+ set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
+ REG_SET_BIT(ah, AR_WOW_PATTERN, set);
+
+ set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
+ AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
+ AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
+ REG_SET_BIT(ah, AR_WOW_COUNT, set);
+
+ if (pattern_enable & AH_WOW_BEACON_MISS)
+ set = AR_WOW_BEACON_TIMO;
+ /* We are not using beacon miss, program a large value */
+ else
+ set = AR_WOW_BEACON_TIMO_MAX;
+
+ REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
+
+ /*
+ * Keep alive timo in ms except AR9280
+ */
+ if (!pattern_enable)
+ set = AR_WOW_KEEP_ALIVE_NEVER;
+ else
+ set = KAL_TIMEOUT * 32;
+
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
+
+ /*
+ * Keep alive delay in us. based on 'power on clock',
+ * therefore in usec
+ */
+ set = KAL_DELAY * 1000;
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
+
+ /*
+ * Create keep alive pattern to respond to beacons
+ */
+ ath9k_wow_create_keep_alive_pattern(ah);
+
+ /*
+ * Configure MAC WoW Registers
+ */
+ set = 0;
+ /* Send keep alive timeouts anyway */
+ clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
+
+ if (pattern_enable & AH_WOW_LINK_CHANGE)
+ wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
+ else
+ set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+
+ set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+ REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
+
+ /*
+ * we are relying on a bmiss failure. ensure we have
+ * enough threshold to prevent false positives
+ */
+ REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
+ AR_WOW_BMISSTHRESHOLD);
+
+ set = 0;
+ clr = 0;
+
+ if (pattern_enable & AH_WOW_BEACON_MISS) {
+ set = AR_WOW_BEACON_FAIL_EN;
+ wow_event_mask |= AR_WOW_BEACON_FAIL;
+ } else {
+ clr = AR_WOW_BEACON_FAIL_EN;
+ }
+
+ REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
+
+ set = 0;
+ clr = 0;
+ /*
+ * Enable the magic packet registers
+ */
+ if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
+ set = AR_WOW_MAGIC_EN;
+ wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
+ } else {
+ clr = AR_WOW_MAGIC_EN;
+ }
+ set |= AR_WOW_MAC_INTR_EN;
+ REG_RMW(ah, AR_WOW_PATTERN, set, clr);
+
+ REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
+ AR_WOW_PATTERN_SUPPORTED);
+
+ /*
+ * Set the power states appropriately and enable PME
+ */
+ clr = 0;
+ set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
+ AR_PMCTRL_PWR_PM_CTRL_ENA;
+
+ clr = AR_PCIE_PM_CTRL_ENA;
+ REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
+
+ /*
+ * this is needed to prevent the chip waking up
+ * the host within 3-4 seconds with certain
+ * platform/BIOS. The fix is to enable
+ * D1 & D3 to match original definition and
+ * also match the OTP value. Anyway this
+ * is more related to SW WOW.
+ */
+ clr = AR_PMCTRL_PWR_STATE_D1D3;
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+
+ set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+
+ REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
+
+ /* to bring down WOW power low margin */
+ set = BIT(13);
+ REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
+ /* HW WoW */
+ clr = BIT(5);
+ REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
+
+ ath9k_hw_set_powermode_wow_sleep(ah);
+ ah->wow_event_mask = wow_event_mask;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_enable);
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index 6e1756bc3833..f76139bbb74f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -18,6 +18,10 @@
#ifndef INITVALS_9330_1P1_H
#define INITVALS_9330_1P1_H
+#define ar9331_1p1_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+#define ar9331_modes_high_power_tx_gain_1p1 ar9331_modes_lowest_ob_db_tx_gain_1p1
+
static const u32 ar9331_1p1_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
@@ -55,7 +59,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -252,7 +256,7 @@ static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
{0x0000a2e4, 0xfffff000, 0xfffff000, 0xfffff000, 0xfffff000},
{0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffe0000, 0xfffe0000},
- {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d0, 0x000050d0},
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d4, 0x000050d4},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
{0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
@@ -337,8 +341,6 @@ static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
{0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
};
-#define ar9331_1p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
-
static const u32 ar9331_1p1_xtal_25M[][2] = {
/* Addr allmodes */
{0x00007038, 0x000002f8},
@@ -373,17 +375,17 @@ static const u32 ar9331_1p1_radio_core[][2] = {
{0x000160b4, 0x92480040},
{0x000160c0, 0x006db6db},
{0x000160c4, 0x0186db60},
- {0x000160c8, 0x6db4db6c},
+ {0x000160c8, 0x6db6db6c},
{0x000160cc, 0x6de6c300},
{0x000160d0, 0x14500820},
{0x00016100, 0x04cb0001},
{0x00016104, 0xfff80015},
{0x00016108, 0x00080010},
{0x0001610c, 0x00170000},
- {0x00016140, 0x10800000},
+ {0x00016140, 0x50804000},
{0x00016144, 0x01884080},
{0x00016148, 0x000080c0},
- {0x00016280, 0x01000015},
+ {0x00016280, 0x01001015},
{0x00016284, 0x14d20000},
{0x00016288, 0x00318000},
{0x0001628c, 0x50000000},
@@ -622,12 +624,12 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x001f0e0f},
- {0x0000a39c, 0x0075393f},
- {0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x210d0401},
+ {0x0000a3a0, 0xab9a7144},
+ {0x0000a3a4, 0x00000011},
+ {0x0000a3a8, 0x3c3c003d},
+ {0x0000a3ac, 0x30310030},
{0x0000a3c0, 0x20202020},
{0x0000a3c4, 0x22222220},
{0x0000a3c8, 0x20200020},
@@ -686,100 +688,18 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
{0x0000a7dc, 0x00000001},
};
-static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
+static const u32 ar9331_1p1_mac_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
- {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
- {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
- {0x0000a2e4, 0xfffff000, 0xfffff000, 0xfffff000, 0xfffff000},
- {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffe0000, 0xfffe0000},
- {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d0, 0x000050d0},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2d000a20, 0x2d000a20},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000a22, 0x31000a22},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000a24, 0x35000a24},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000a43, 0x38000a43},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3b000e42, 0x3b000e42},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x3f000e44, 0x3f000e44},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x42000e64, 0x42000e64},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46000e66, 0x46000e66},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x4a000ea6, 0x4a000ea6},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4a000ea6, 0x4a000ea6},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x4a000ea6, 0x4a000ea6},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x4a000ea6, 0x4a000ea6},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
- {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
- {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
- {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
- {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
- {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
- {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
- {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
- {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
- {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
- {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
- {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
- {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
- {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
- {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
- {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
- {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
- {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
- {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
- {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
- {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
- {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
- {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
- {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
- {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
- {0x0000a620, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802},
- {0x0000a624, 0x03010a03, 0x03010a03, 0x03010a03, 0x03010a03},
- {0x0000a628, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x0000a62c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x0000a630, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x0000a634, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x0000a638, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x0000a63c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
- {0x00016044, 0x034922db, 0x034922db, 0x034922db, 0x034922db},
- {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
};
-#define ar9331_1p1_mac_postamble ar9300_2p2_mac_postamble
-
static const u32 ar9331_1p1_soc_preamble[][2] = {
/* Addr allmodes */
{0x00007020, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 57ed8a112173..0ac8be96097f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -18,6 +18,28 @@
#ifndef INITVALS_9330_1P2_H
#define INITVALS_9330_1P2_H
+#define ar9331_modes_high_power_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_modes_low_ob_db_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_modes_lowest_ob_db_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_1p2_baseband_core_txfir_coeff_japan_2484 ar9331_1p1_baseband_core_txfir_coeff_japan_2484
+
+#define ar9331_1p2_xtal_25M ar9331_1p1_xtal_25M
+
+#define ar9331_1p2_xtal_40M ar9331_1p1_xtal_40M
+
+#define ar9331_1p2_soc_postamble ar9331_1p1_soc_postamble
+
+#define ar9331_1p2_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9331_1p2_soc_preamble ar9331_1p1_soc_preamble
+
+#define ar9331_1p2_mac_core ar9331_1p1_mac_core
+
+#define ar9331_common_wo_xlna_rx_gain_1p2 ar9331_common_wo_xlna_rx_gain_1p1
+
static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
@@ -103,57 +125,6 @@ static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
{0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
};
-#define ar9331_modes_high_power_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
-
-#define ar9331_modes_low_ob_db_tx_gain_1p2 ar9331_modes_high_power_tx_gain_1p2
-
-#define ar9331_modes_lowest_ob_db_tx_gain_1p2 ar9331_modes_low_ob_db_tx_gain_1p2
-
-static const u32 ar9331_1p2_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
- {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
- {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
- {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
- {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
- {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
- {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
- {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
- {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
- {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
- {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
- {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
- {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
- {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
- {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
- {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
- {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
- {0x0000a234, 0x00000fff, 0x00000fff, 0x10000fff, 0x00000fff},
- {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
- {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
- {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
- {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
- {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
- {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
- {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
- {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
- {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
- {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
- {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-};
-
static const u32 ar9331_1p2_radio_core[][2] = {
/* Addr allmodes */
{0x00016000, 0x36db6db6},
@@ -219,24 +190,318 @@ static const u32 ar9331_1p2_radio_core[][2] = {
{0x000163d4, 0x00000000},
};
-#define ar9331_1p2_baseband_core_txfir_coeff_japan_2484 ar9331_1p1_baseband_core_txfir_coeff_japan_2484
-
-#define ar9331_1p2_xtal_25M ar9331_1p1_xtal_25M
-
-#define ar9331_1p2_xtal_40M ar9331_1p1_xtal_40M
-
-#define ar9331_1p2_baseband_core ar9331_1p1_baseband_core
-
-#define ar9331_1p2_soc_postamble ar9331_1p1_soc_postamble
-
-#define ar9331_1p2_mac_postamble ar9331_1p1_mac_postamble
-
-#define ar9331_1p2_soc_preamble ar9331_1p1_soc_preamble
-
-#define ar9331_1p2_mac_core ar9331_1p1_mac_core
+static const u32 ar9331_1p2_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x9927b515},
+ {0x00009e28, 0x12ef0200},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+};
-#define ar9331_common_wo_xlna_rx_gain_1p2 ar9331_common_wo_xlna_rx_gain_1p1
+static const u32 ar9331_1p2_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
+ {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x00000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
-#define ar9331_common_rx_gain_1p2 ar9485_common_rx_gain_1_1
+static const u32 ar9331_common_rx_gain_1p2[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x01800082},
+ {0x0000a014, 0x01820181},
+ {0x0000a018, 0x01840183},
+ {0x0000a01c, 0x01880185},
+ {0x0000a020, 0x018a0189},
+ {0x0000a024, 0x02850284},
+ {0x0000a028, 0x02890288},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x21212128},
+ {0x0000a098, 0x171c1c1c},
+ {0x0000a09c, 0x02020212},
+ {0x0000a0a0, 0x00000202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x111f1100},
+ {0x0000a0c8, 0x111d111e},
+ {0x0000a0cc, 0x111b111c},
+ {0x0000a0d0, 0x22032204},
+ {0x0000a0d4, 0x22012202},
+ {0x0000a0d8, 0x221f2200},
+ {0x0000a0dc, 0x221d221e},
+ {0x0000a0e0, 0x33013302},
+ {0x0000a0e4, 0x331f3300},
+ {0x0000a0e8, 0x4402331e},
+ {0x0000a0ec, 0x44004401},
+ {0x0000a0f0, 0x441e441f},
+ {0x0000a0f4, 0x55015502},
+ {0x0000a0f8, 0x551f5500},
+ {0x0000a0fc, 0x6602551e},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
#endif /* INITVALS_9330_1P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index 25db9215985a..a01f0edb6518 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -18,6 +18,20 @@
#ifndef INITVALS_9340_H
#define INITVALS_9340_H
+#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define ar9340_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define ar9340Modes_fast_clock_1p0 ar9300Modes_fast_clock_2p2
+
+#define ar9340Common_rx_gain_table_1p0 ar9300Common_rx_gain_table_2p2
+
+#define ar9340Common_wo_xlna_rx_gain_table_1p0 ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9340_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+#define ar9340_1p0_baseband_postamble_dfs_channel ar9300_2p2_baseband_postamble_dfs_channel
+
static const u32 ar9340_1p0_radio_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800},
@@ -100,8 +114,6 @@ static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
{0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
};
-#define ar9340Modes_fast_clock_1p0 ar9300Modes_fast_clock_2p2
-
static const u32 ar9340_1p0_radio_core[][2] = {
/* Addr allmodes */
{0x00016000, 0x36db6db6},
@@ -215,16 +227,12 @@ static const u32 ar9340_1p0_radio_core_40M[][2] = {
{0x0000824c, 0x0001e800},
};
-#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
-
-#define ar9340_1p0_soc_postamble ar9300_2p2_soc_postamble
-
static const u32 ar9340_1p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
{0x00009820, 0x206a022e, 0x206a022e, 0x206a022e, 0x206a022e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x00009828, 0x06903081, 0x06903081, 0x09103881, 0x09103881},
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
@@ -340,9 +348,9 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x001f0e0f},
- {0x0000a39c, 0x0075393f},
- {0x0000a3a0, 0xb79f6427},
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x210d0401},
+ {0x0000a3a0, 0xab9a7144},
{0x0000a3a4, 0x00000000},
{0x0000a3a8, 0xaaaaaaaa},
{0x0000a3ac, 0x3c466478},
@@ -714,266 +722,6 @@ static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
{0x0000b2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
};
-static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x23232323},
- {0x0000b084, 0x21232323},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
@@ -1437,8 +1185,6 @@ static const u32 ar9340_1p0_mac_core[][2] = {
{0x000083d0, 0x000101ff},
};
-#define ar9340Common_wo_xlna_rx_gain_table_1p0 ar9300Common_wo_xlna_rx_gain_table_2p2
-
static const u32 ar9340_1p0_soc_preamble[][2] = {
/* Addr allmodes */
{0x00007008, 0x00000000},
@@ -1447,4 +1193,106 @@ static const u32 ar9340_1p0_soc_preamble[][2] = {
{0x00007038, 0x000004c2},
};
+static const u32 ar9340_cus227_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2c022220, 0x2c022220, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x30022222, 0x30022222, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x35022225, 0x35022225, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x3b02222a, 0x3b02222a, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x3f02222c, 0x3f02222c, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016048, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0x30318000, 0x30318000, 0x00318000, 0x00318000},
+ {0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+ {0x0000a3a4, 0x00000011, 0x00000011, 0x00000011, 0x00000011},
+ {0x0000a3a8, 0x3c3c3c3c, 0x3c3c3c3c, 0x3c3c3c3c, 0x3c3c3c3c},
+ {0x0000a3ac, 0x30303030, 0x30303030, 0x30303030, 0x30303030},
+};
+
#endif /* INITVALS_9340_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 092b9d412e7f..1b6b4d0cfa97 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -20,7 +20,15 @@
/* AR9462 2.0 */
-static const u32 ar9462_modes_fast_clock_2p0[][3] = {
+#define ar9462_2p0_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9462_2p0_common_wo_xlna_rx_gain ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9462_2p0_common_5g_xlna_only_rxgain ar9462_2p0_common_mixed_rx_gain
+
+#define ar9462_2p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar9462_2p0_modes_fast_clock[][3] = {
/* Addr 5G_HT20 5G_HT40 */
{0x00001030, 0x00000268, 0x000004d0},
{0x00001070, 0x0000018c, 0x00000318},
@@ -33,13 +41,6 @@ static const u32 ar9462_modes_fast_clock_2p0[][3] = {
{0x0000a254, 0x00000898, 0x00001130},
};
-static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18253ede},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0003780c},
-};
-
static const u32 ar9462_2p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
@@ -56,7 +57,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
@@ -95,11 +96,11 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
};
-static const u32 ar9462_common_rx_gain_table_2p0[][2] = {
+static const u32 ar9462_2p0_common_rx_gain[][2] = {
/* Addr allmodes */
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
@@ -359,20 +360,13 @@ static const u32 ar9462_common_rx_gain_table_2p0[][2] = {
{0x0000b1fc, 0x00000196},
};
-static const u32 ar9462_pciephy_clkreq_disable_L1_2p0[][2] = {
+static const u32 ar9462_2p0_pciephy_clkreq_disable_L1[][2] = {
/* Addr allmodes */
{0x00018c00, 0x18213ede},
{0x00018c04, 0x000801d8},
{0x00018c08, 0x0003780c},
};
-static const u32 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18212ede},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0003780c},
-};
-
static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
@@ -380,274 +374,7 @@ static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
{0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
};
-static const u32 ar9462_common_wo_xlna_rx_gain_table_2p0[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p0_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
-static const u32 ar9462_modes_low_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9462_2p0_modes_low_ob_db_tx_gain[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
@@ -879,7 +606,7 @@ static const u32 ar9462_2p0_radio_postamble[][5] = {
{0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
};
-static const u32 ar9462_modes_mix_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9462_2p0_modes_mix_ob_db_tx_gain[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
@@ -942,7 +669,7 @@ static const u32 ar9462_modes_mix_ob_db_tx_gain_table_2p0[][5] = {
{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
};
-static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9462_2p0_modes_high_ob_db_tx_gain[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
@@ -1240,19 +967,7 @@ static const u32 ar9462_2p0_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
-static const u32 ar9462_2p0_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
-
-static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = {
+static const u32 ar9462_2p0_common_mixed_rx_gain[][2] = {
/* Addr allmodes */
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
@@ -1517,266 +1232,6 @@ static const u32 ar9462_2p0_baseband_postamble_5g_xlna[][5] = {
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
};
-static const u32 ar9462_2p0_5g_xlna_only_rxgain[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
static const u32 ar9462_2p0_baseband_core_mix_rxgain[][2] = {
/* Addr allmodes */
{0x00009fd0, 0x0a2d6b93},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
index 57fc5f459d0a..dc3adda46e8b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -20,6 +20,44 @@
/* AR9462 2.1 */
+#define ar9462_2p1_mac_postamble ar9462_2p0_mac_postamble
+
+#define ar9462_2p1_baseband_core ar9462_2p0_baseband_core
+
+#define ar9462_2p1_radio_core ar9462_2p0_radio_core
+
+#define ar9462_2p1_radio_postamble ar9462_2p0_radio_postamble
+
+#define ar9462_2p1_soc_postamble ar9462_2p0_soc_postamble
+
+#define ar9462_2p1_radio_postamble_sys2ant ar9462_2p0_radio_postamble_sys2ant
+
+#define ar9462_2p1_common_rx_gain ar9462_2p0_common_rx_gain
+
+#define ar9462_2p1_common_mixed_rx_gain ar9462_2p0_common_mixed_rx_gain
+
+#define ar9462_2p1_common_5g_xlna_only_rxgain ar9462_2p0_common_5g_xlna_only_rxgain
+
+#define ar9462_2p1_baseband_core_mix_rxgain ar9462_2p0_baseband_core_mix_rxgain
+
+#define ar9462_2p1_baseband_postamble_mix_rxgain ar9462_2p0_baseband_postamble_mix_rxgain
+
+#define ar9462_2p1_baseband_postamble_5g_xlna ar9462_2p0_baseband_postamble_5g_xlna
+
+#define ar9462_2p1_common_wo_xlna_rx_gain ar9462_2p0_common_wo_xlna_rx_gain
+
+#define ar9462_2p1_modes_low_ob_db_tx_gain ar9462_2p0_modes_low_ob_db_tx_gain
+
+#define ar9462_2p1_modes_high_ob_db_tx_gain ar9462_2p0_modes_high_ob_db_tx_gain
+
+#define ar9462_2p1_modes_mix_ob_db_tx_gain ar9462_2p0_modes_mix_ob_db_tx_gain
+
+#define ar9462_2p1_modes_fast_clock ar9462_2p0_modes_fast_clock
+
+#define ar9462_2p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+
+#define ar9462_2p1_pciephy_clkreq_disable_L1 ar9462_2p0_pciephy_clkreq_disable_L1
+
static const u32 ar9462_2p1_mac_core[][2] = {
/* Addr allmodes */
{0x00000008, 0x00000000},
@@ -183,168 +221,6 @@ static const u32 ar9462_2p1_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
-static const u32 ar9462_2p1_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
-
-static const u32 ar9462_2p1_baseband_core[][2] = {
- /* Addr allmodes */
- {0x00009800, 0xafe68e30},
- {0x00009804, 0xfd14e000},
- {0x00009808, 0x9c0a9f6b},
- {0x0000980c, 0x04900000},
- {0x00009814, 0x9280c00a},
- {0x00009818, 0x00000000},
- {0x0000981c, 0x00020028},
- {0x00009834, 0x6400a290},
- {0x00009838, 0x0108ecff},
- {0x0000983c, 0x0d000600},
- {0x00009880, 0x201fff00},
- {0x00009884, 0x00001042},
- {0x000098a4, 0x00200400},
- {0x000098b0, 0x32440bbe},
- {0x000098d0, 0x004b6a8e},
- {0x000098d4, 0x00000820},
- {0x000098dc, 0x00000000},
- {0x000098e4, 0x01ffffff},
- {0x000098e8, 0x01ffffff},
- {0x000098ec, 0x01ffffff},
- {0x000098f0, 0x00000000},
- {0x000098f4, 0x00000000},
- {0x00009bf0, 0x80000000},
- {0x00009c04, 0xff55ff55},
- {0x00009c08, 0x0320ff55},
- {0x00009c0c, 0x00000000},
- {0x00009c10, 0x00000000},
- {0x00009c14, 0x00046384},
- {0x00009c18, 0x05b6b440},
- {0x00009c1c, 0x00b6b440},
- {0x00009d00, 0xc080a333},
- {0x00009d04, 0x40206c10},
- {0x00009d08, 0x009c4060},
- {0x00009d0c, 0x9883800a},
- {0x00009d10, 0x01834061},
- {0x00009d14, 0x00c0040b},
- {0x00009d18, 0x00000000},
- {0x00009e08, 0x0038230c},
- {0x00009e24, 0x990bb515},
- {0x00009e28, 0x0c6f0000},
- {0x00009e30, 0x06336f77},
- {0x00009e34, 0x6af6532f},
- {0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x15262820},
- {0x00009e4c, 0x00001004},
- {0x00009e50, 0x00ff03f1},
- {0x00009e54, 0xe4c555c2},
- {0x00009e58, 0xfd857722},
- {0x00009e5c, 0xe9198724},
- {0x00009fc0, 0x803e4788},
- {0x00009fc4, 0x0001efb5},
- {0x00009fcc, 0x40000014},
- {0x00009fd0, 0x0a193b93},
- {0x0000a20c, 0x00000000},
- {0x0000a220, 0x00000000},
- {0x0000a224, 0x00000000},
- {0x0000a228, 0x10002310},
- {0x0000a23c, 0x00000000},
- {0x0000a244, 0x0c000000},
- {0x0000a2a0, 0x00000001},
- {0x0000a2c0, 0x00000001},
- {0x0000a2c8, 0x00000000},
- {0x0000a2cc, 0x18c43433},
- {0x0000a2d4, 0x00000000},
- {0x0000a2ec, 0x00000000},
- {0x0000a2f0, 0x00000000},
- {0x0000a2f4, 0x00000000},
- {0x0000a2f8, 0x00000000},
- {0x0000a344, 0x00000000},
- {0x0000a34c, 0x00000000},
- {0x0000a350, 0x0000a000},
- {0x0000a364, 0x00000000},
- {0x0000a370, 0x00000000},
- {0x0000a390, 0x00000001},
- {0x0000a394, 0x00000444},
- {0x0000a398, 0x001f0e0f},
- {0x0000a39c, 0x0075393f},
- {0x0000a3a0, 0xb79f6427},
- {0x0000a3c0, 0x20202020},
- {0x0000a3c4, 0x22222220},
- {0x0000a3c8, 0x20200020},
- {0x0000a3cc, 0x20202020},
- {0x0000a3d0, 0x20202020},
- {0x0000a3d4, 0x20202020},
- {0x0000a3d8, 0x20202020},
- {0x0000a3dc, 0x20202020},
- {0x0000a3e0, 0x20202020},
- {0x0000a3e4, 0x20202020},
- {0x0000a3e8, 0x20202020},
- {0x0000a3ec, 0x20202020},
- {0x0000a3f0, 0x00000000},
- {0x0000a3f4, 0x00000006},
- {0x0000a3f8, 0x0c9bd380},
- {0x0000a3fc, 0x000f0f01},
- {0x0000a400, 0x8fa91f01},
- {0x0000a404, 0x00000000},
- {0x0000a408, 0x0e79e5c6},
- {0x0000a40c, 0x00820820},
- {0x0000a414, 0x1ce739ce},
- {0x0000a418, 0x2d001dce},
- {0x0000a434, 0x00000000},
- {0x0000a438, 0x00001801},
- {0x0000a43c, 0x00100000},
- {0x0000a444, 0x00000000},
- {0x0000a448, 0x05000080},
- {0x0000a44c, 0x00000001},
- {0x0000a450, 0x00010000},
- {0x0000a454, 0x07000000},
- {0x0000a644, 0xbfad9d74},
- {0x0000a648, 0x0048060a},
- {0x0000a64c, 0x00002037},
- {0x0000a670, 0x03020100},
- {0x0000a674, 0x09080504},
- {0x0000a678, 0x0d0c0b0a},
- {0x0000a67c, 0x13121110},
- {0x0000a680, 0x31301514},
- {0x0000a684, 0x35343332},
- {0x0000a688, 0x00000036},
- {0x0000a690, 0x00000838},
- {0x0000a6b0, 0x0000000a},
- {0x0000a6b4, 0x00512c01},
- {0x0000a7c0, 0x00000000},
- {0x0000a7c4, 0xfffffffc},
- {0x0000a7c8, 0x00000000},
- {0x0000a7cc, 0x00000000},
- {0x0000a7d0, 0x00000000},
- {0x0000a7d4, 0x00000004},
- {0x0000a7dc, 0x00000000},
- {0x0000a7f0, 0x80000000},
- {0x0000a8d0, 0x004b6a8e},
- {0x0000a8d4, 0x00000820},
- {0x0000a8dc, 0x00000000},
- {0x0000a8f0, 0x00000000},
- {0x0000a8f4, 0x00000000},
- {0x0000abf0, 0x80000000},
- {0x0000b2d0, 0x00000080},
- {0x0000b2d4, 0x00000000},
- {0x0000b2ec, 0x00000000},
- {0x0000b2f0, 0x00000000},
- {0x0000b2f4, 0x00000000},
- {0x0000b2f8, 0x00000000},
- {0x0000b408, 0x0e79e5c0},
- {0x0000b40c, 0x00820820},
- {0x0000b420, 0x00000000},
- {0x0000b6b0, 0x0000000a},
- {0x0000b6b4, 0x00000001},
-};
-
static const u32 ar9462_2p1_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
@@ -404,72 +280,6 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
};
-static const u32 ar9462_2p1_radio_core[][2] = {
- /* Addr allmodes */
- {0x00016000, 0x36db6db6},
- {0x00016004, 0x6db6db40},
- {0x00016008, 0x73f00000},
- {0x0001600c, 0x00000000},
- {0x00016010, 0x6d820001},
- {0x00016040, 0x7f80fff8},
- {0x0001604c, 0x2699e04f},
- {0x00016050, 0x6db6db6c},
- {0x00016058, 0x6c200000},
- {0x00016080, 0x000c0000},
- {0x00016084, 0x9a68048c},
- {0x00016088, 0x54214514},
- {0x0001608c, 0x1203040b},
- {0x00016090, 0x24926490},
- {0x00016098, 0xd2888888},
- {0x000160a0, 0x0a108ffe},
- {0x000160a4, 0x812fc491},
- {0x000160a8, 0x423c8000},
- {0x000160b4, 0x92000000},
- {0x000160b8, 0x0285dddc},
- {0x000160bc, 0x02908888},
- {0x000160c0, 0x00adb6d0},
- {0x000160c4, 0x6db6db60},
- {0x000160c8, 0x6db6db6c},
- {0x000160cc, 0x0de6c1b0},
- {0x00016100, 0x3fffbe04},
- {0x00016104, 0xfff80000},
- {0x00016108, 0x00200400},
- {0x00016110, 0x00000000},
- {0x00016144, 0x02084080},
- {0x00016148, 0x000080c0},
- {0x00016280, 0x050a0001},
- {0x00016284, 0x3d841418},
- {0x00016288, 0x00000000},
- {0x0001628c, 0xe3000000},
- {0x00016290, 0xa1005080},
- {0x00016294, 0x00000020},
- {0x00016298, 0x54a82900},
- {0x00016340, 0x121e4276},
- {0x00016344, 0x00300000},
- {0x00016400, 0x36db6db6},
- {0x00016404, 0x6db6db40},
- {0x00016408, 0x73f00000},
- {0x0001640c, 0x00000000},
- {0x00016410, 0x6c800001},
- {0x00016440, 0x7f80fff8},
- {0x0001644c, 0x4699e04f},
- {0x00016450, 0x6db6db6c},
- {0x00016500, 0x3fffbe04},
- {0x00016504, 0xfff80000},
- {0x00016508, 0x00200400},
- {0x00016510, 0x00000000},
- {0x00016544, 0x02084080},
- {0x00016548, 0x000080c0},
-};
-
-static const u32 ar9462_2p1_radio_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
- {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
- {0x0001610c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
- {0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
-};
-
static const u32 ar9462_2p1_soc_preamble[][2] = {
/* Addr allmodes */
{0x000040a4, 0x00a0c9c9},
@@ -478,1297 +288,4 @@ static const u32 ar9462_2p1_soc_preamble[][2] = {
{0x00007038, 0x000004c2},
};
-static const u32 ar9462_2p1_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
-};
-
-static const u32 ar9462_2p1_radio_postamble_sys2ant[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
- {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
- {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
-};
-
-static const u32 ar9462_2p1_common_rx_gain[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_common_mixed_rx_gain[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_baseband_core_mix_rxgain[][2] = {
- /* Addr allmodes */
- {0x00009fd0, 0x0a2d6b93},
-};
-
-static const u32 ar9462_2p1_baseband_postamble_mix_rxgain[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
- {0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da},
- {0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8},
- {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e},
- {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e},
-};
-
-static const u32 ar9462_2p1_baseband_postamble_5g_xlna[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
-};
-
-static const u32 ar9462_2p1_common_wo_xlna_rx_gain[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_common_5g_xlna_only_rx_gain[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_modes_low_ob_db_tx_gain[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
- {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
- {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
- {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
- {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
- {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
- {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
- {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
- {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
- {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
- {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
- {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
- {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
- {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
- {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
- {0x00016048, 0x64992060, 0x64992060, 0x64992060, 0x64992060},
- {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
- {0x00016444, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
- {0x00016448, 0x64992000, 0x64992000, 0x64992000, 0x64992000},
- {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
-};
-
-static const u32 ar9462_2p1_modes_high_ob_db_tx_gain[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050da, 0x000050da, 0x000050de, 0x000050de},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x55025eb3, 0x55025eb3, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001a84, 0x44001a84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x751ffff6, 0x751ffff6, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x751ffff6, 0x751ffff6, 0x58001ef0, 0x58001ef0},
- {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x5a001ef4, 0x5a001ef4},
- {0x0000a570, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
- {0x0000a574, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
- {0x0000a578, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
- {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
- {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
- {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
- {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
- {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
- {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
- {0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
- {0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
- {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
-};
-
-static const u32 ar9462_2p1_modes_mix_ob_db_tx_gain[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x0000d0da, 0x0000d0da, 0x0000d0de, 0x0000d0de},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x12000400, 0x12000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x16000402, 0x16000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1c000603, 0x1c000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x21000a02, 0x21000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x25000a04, 0x25000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x28000a20, 0x28000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2c000e20, 0x2c000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x30000e22, 0x30000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x34000e24, 0x34000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x38001640, 0x38001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x55025eb3, 0x55025eb3, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x751ffff6, 0x751ffff6, 0x5c001eec, 0x5c001eec},
- {0x0000a568, 0x751ffff6, 0x751ffff6, 0x5e001ef0, 0x5e001ef0},
- {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x60001ef4, 0x60001ef4},
- {0x0000a570, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
- {0x0000a574, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
- {0x0000a578, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
- {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
- {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
- {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
- {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
-};
-
-static const u32 ar9462_2p1_modes_fast_clock[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x00001030, 0x00000268, 0x000004d0},
- {0x00001070, 0x0000018c, 0x00000318},
- {0x000010b0, 0x00000fd0, 0x00001fa0},
- {0x00008014, 0x044c044c, 0x08980898},
- {0x0000801c, 0x148ec02b, 0x148ec057},
- {0x00008318, 0x000044c0, 0x00008980},
- {0x00009e00, 0x0372131c, 0x0372131c},
- {0x0000a230, 0x0000400b, 0x00004016},
- {0x0000a254, 0x00000898, 0x00001130},
-};
-
-static const u32 ar9462_2p1_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
#endif /* INITVALS_9462_2P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 7c1845221e1c..ce83ce47a1ca 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -20,17 +20,11 @@
/* AR9485 1.1 */
-static const u32 ar9485_1_1_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
+#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
+
+#define ar9485_1_1_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
/* Addr allmodes */
@@ -546,100 +540,6 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
- {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
- {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
- {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
- {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
- {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
- {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
- {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
- {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
- {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
- {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
- {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
- {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
-};
-
static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
@@ -1323,13 +1223,6 @@ static const u32 ar9485_1_1_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
-static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
/* Addr allmodes */
{0x00018c00, 0x18013e5e},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
new file mode 100644
index 000000000000..3c9113d9b1bc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_953X_H
+#define INITVALS_953X_H
+
+#define qca953x_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define qca953x_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define qca953x_1p0_common_rx_gain_table ar9300Common_rx_gain_table_2p2
+
+#define qca953x_1p0_common_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define qca953x_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
+
+static const u32 qca953x_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008140, 0x000000fe},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f3d7},
+ {0x00008248, 0x00000852},
+ {0x0000824c, 0x0001e7ae},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00001d40},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48107b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x8c7901ff},
+};
+
+static const u32 qca953x_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x0280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098bc, 0x00000002},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01884061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x813e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b91},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a248, 0x00000140},
+ {0x0000a2a0, 0x00000007},
+ {0x0000a2c0, 0x00000007},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x1f020503},
+ {0x0000a39c, 0x29180c03},
+ {0x0000a3a0, 0x9a8b6844},
+ {0x0000a3a4, 0x000000ff},
+ {0x0000a3a8, 0x6a6a6a6a},
+ {0x0000a3ac, 0x6a6a6a6a},
+ {0x0000a3b0, 0x00c8641a},
+ {0x0000a3b4, 0x0000001a},
+ {0x0000a3b8, 0x0088642a},
+ {0x0000a3bc, 0x000001fa},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce42108},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce73908},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce738e7},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0xbfad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x08000838},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+};
+
+static const u32 qca953x_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
+ {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+};
+
+static const u32 qca953x_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x3f80fff8},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x8036db6c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f080a},
+ {0x00016090, 0x24926490},
+ {0x00016094, 0x00000000},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x006db6d8},
+ {0x000160c4, 0x24b6db6c},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6db6fb7c},
+ {0x000160d0, 0x6db6da44},
+ {0x00016100, 0x07ff8001},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x000080d8},
+ {0x00016280, 0x01000901},
+ {0x00016284, 0x15d30000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x3f80fff8},
+ {0x0001644c, 0x000f0278},
+ {0x00016450, 0x8036db6c},
+ {0x00016454, 0x6db60000},
+ {0x00016500, 0x07ff8001},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x01884080},
+ {0x00016548, 0x000080d8},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+};
+
+static const u32 qca953x_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00016098, 0xd2dd5554, 0xd2dd5554, 0xc4128f5c, 0xc4128f5c},
+ {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x0fd08f25, 0x0fd08f25},
+ {0x000160ac, 0xa4647c00, 0xa4647c00, 0x24646800, 0x24646800},
+ {0x000160b0, 0x01885f52, 0x01885f52, 0x00fe7f46, 0x00fe7f46},
+ {0x00016104, 0xb7a00001, 0xb7a00001, 0xfff80005, 0xfff80005},
+ {0x0001610c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+ {0x00016504, 0xb7a00001, 0xb7a00001, 0xfff80001, 0xfff80001},
+ {0x0001650c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+};
+
+static const u32 qca953x_1p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00007000, 0x00000000},
+ {0x00007004, 0x00000000},
+ {0x00007008, 0x00000000},
+ {0x0000700c, 0x00000000},
+ {0x0000701c, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007024, 0x00000000},
+ {0x00007028, 0x00000000},
+ {0x0000702c, 0x00000000},
+ {0x00007030, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000000},
+};
+
+static const u32 qca953x_1p0_common_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302018, 0x50302018},
+};
+
+static const u32 qca953x_1p0_common_wo_xlna_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+};
+
+static const u32 qca953x_1p0_modes_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xfffd5aaa},
+ {0x0000a2e0, 0xfffe9ccc},
+ {0x0000a2e4, 0xffffe0f0},
+ {0x0000a2e8, 0xfffcff00},
+ {0x0000a410, 0x000050da},
+ {0x0000a500, 0x00000000},
+ {0x0000a504, 0x04000002},
+ {0x0000a508, 0x08000004},
+ {0x0000a50c, 0x0c000006},
+ {0x0000a510, 0x0f00000a},
+ {0x0000a514, 0x1300000c},
+ {0x0000a518, 0x1700000e},
+ {0x0000a51c, 0x1b000064},
+ {0x0000a520, 0x1f000242},
+ {0x0000a524, 0x23000229},
+ {0x0000a528, 0x270002a2},
+ {0x0000a52c, 0x2c001203},
+ {0x0000a530, 0x30001803},
+ {0x0000a534, 0x33000881},
+ {0x0000a538, 0x38001809},
+ {0x0000a53c, 0x3a000814},
+ {0x0000a540, 0x3f001a0c},
+ {0x0000a544, 0x43001a0e},
+ {0x0000a548, 0x46001812},
+ {0x0000a54c, 0x49001884},
+ {0x0000a550, 0x4d001e84},
+ {0x0000a554, 0x50001e69},
+ {0x0000a558, 0x550006f4},
+ {0x0000a55c, 0x59000ad3},
+ {0x0000a560, 0x5e000ad5},
+ {0x0000a564, 0x61001ced},
+ {0x0000a568, 0x660018d4},
+ {0x0000a56c, 0x660018d4},
+ {0x0000a570, 0x660018d4},
+ {0x0000a574, 0x660018d4},
+ {0x0000a578, 0x660018d4},
+ {0x0000a57c, 0x660018d4},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x03804000},
+ {0x0000a610, 0x0300ca02},
+ {0x0000a614, 0x00000e04},
+ {0x0000a618, 0x03014000},
+ {0x0000a61c, 0x00000000},
+ {0x0000a620, 0x00000000},
+ {0x0000a624, 0x03014000},
+ {0x0000a628, 0x03804c05},
+ {0x0000a62c, 0x0701de06},
+ {0x0000a630, 0x07819c07},
+ {0x0000a634, 0x0701dc07},
+ {0x0000a638, 0x0701dc07},
+ {0x0000a63c, 0x0701dc07},
+ {0x0000b2dc, 0xfffd5aaa},
+ {0x0000b2e0, 0xfffe9ccc},
+ {0x0000b2e4, 0xffffe0f0},
+ {0x0000b2e8, 0xfffcff00},
+ {0x00016044, 0x010002d4},
+ {0x00016048, 0x66482400},
+ {0x00016280, 0x01000015},
+ {0x00016444, 0x010002d4},
+ {0x00016448, 0x66482400},
+};
+
+static const u32 qca953x_1p0_modes_no_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xffd5f552},
+ {0x0000a2e0, 0xffe60664},
+ {0x0000a2e4, 0xfff80780},
+ {0x0000a2e8, 0xfffff800},
+ {0x0000a410, 0x000050d6},
+ {0x0000a500, 0x00060020},
+ {0x0000a504, 0x04060060},
+ {0x0000a508, 0x080600a0},
+ {0x0000a50c, 0x0c068020},
+ {0x0000a510, 0x10068060},
+ {0x0000a514, 0x140680a0},
+ {0x0000a518, 0x18090040},
+ {0x0000a51c, 0x1b090080},
+ {0x0000a520, 0x1f0900c0},
+ {0x0000a524, 0x240c0041},
+ {0x0000a528, 0x280d0021},
+ {0x0000a52c, 0x2d0f0061},
+ {0x0000a530, 0x310f00a1},
+ {0x0000a534, 0x350e00a2},
+ {0x0000a538, 0x360e80a2},
+ {0x0000a53c, 0x380f00a2},
+ {0x0000a540, 0x3b0e00a3},
+ {0x0000a544, 0x3d110083},
+ {0x0000a548, 0x3e1100a3},
+ {0x0000a54c, 0x401100e3},
+ {0x0000a550, 0x421380e3},
+ {0x0000a554, 0x431780e3},
+ {0x0000a558, 0x461f80e3},
+ {0x0000a55c, 0x461f80e3},
+ {0x0000a560, 0x461f80e3},
+ {0x0000a564, 0x461f80e3},
+ {0x0000a568, 0x461f80e3},
+ {0x0000a56c, 0x461f80e3},
+ {0x0000a570, 0x461f80e3},
+ {0x0000a574, 0x461f80e3},
+ {0x0000a578, 0x461f80e3},
+ {0x0000a57c, 0x461f80e3},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00804201},
+ {0x0000a610, 0x01008201},
+ {0x0000a614, 0x0180c402},
+ {0x0000a618, 0x0180c603},
+ {0x0000a61c, 0x0180c603},
+ {0x0000a620, 0x01c10603},
+ {0x0000a624, 0x01c10704},
+ {0x0000a628, 0x02c18b05},
+ {0x0000a62c, 0x0301cc07},
+ {0x0000a630, 0x0301cc07},
+ {0x0000a634, 0x0301cc07},
+ {0x0000a638, 0x0301cc07},
+ {0x0000a63c, 0x0301cc07},
+ {0x0000b2dc, 0xffd5f552},
+ {0x0000b2e0, 0xffe60664},
+ {0x0000b2e4, 0xfff80780},
+ {0x0000b2e8, 0xfffff800},
+ {0x00016044, 0x049242db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x049242db},
+ {0x00016448, 0x6c927a70},
+};
+
+static const u32 qca953x_1p1_modes_no_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xffd5f552},
+ {0x0000a2e0, 0xffe60664},
+ {0x0000a2e4, 0xfff80780},
+ {0x0000a2e8, 0xfffff800},
+ {0x0000a410, 0x000050de},
+ {0x0000a500, 0x00000061},
+ {0x0000a504, 0x04000063},
+ {0x0000a508, 0x08000065},
+ {0x0000a50c, 0x0c000261},
+ {0x0000a510, 0x10000263},
+ {0x0000a514, 0x14000265},
+ {0x0000a518, 0x18000482},
+ {0x0000a51c, 0x1b000484},
+ {0x0000a520, 0x1f000486},
+ {0x0000a524, 0x240008c2},
+ {0x0000a528, 0x28000cc1},
+ {0x0000a52c, 0x2d000ce3},
+ {0x0000a530, 0x31000ce5},
+ {0x0000a534, 0x350010e5},
+ {0x0000a538, 0x360012e5},
+ {0x0000a53c, 0x380014e5},
+ {0x0000a540, 0x3b0018e5},
+ {0x0000a544, 0x3d001d04},
+ {0x0000a548, 0x3e001d05},
+ {0x0000a54c, 0x40001d07},
+ {0x0000a550, 0x42001f27},
+ {0x0000a554, 0x43001f67},
+ {0x0000a558, 0x46001fe7},
+ {0x0000a55c, 0x47001f2b},
+ {0x0000a560, 0x49001f0d},
+ {0x0000a564, 0x4b001ed2},
+ {0x0000a568, 0x4c001ed4},
+ {0x0000a56c, 0x4e001f15},
+ {0x0000a570, 0x4f001ff6},
+ {0x0000a574, 0x4f001ff6},
+ {0x0000a578, 0x4f001ff6},
+ {0x0000a57c, 0x4f001ff6},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00804201},
+ {0x0000a610, 0x01008201},
+ {0x0000a614, 0x0180c402},
+ {0x0000a618, 0x0180c603},
+ {0x0000a61c, 0x0180c603},
+ {0x0000a620, 0x01c10603},
+ {0x0000a624, 0x01c10704},
+ {0x0000a628, 0x02c18b05},
+ {0x0000a62c, 0x02c14c07},
+ {0x0000a630, 0x01008704},
+ {0x0000a634, 0x01c10402},
+ {0x0000a638, 0x0301cc07},
+ {0x0000a63c, 0x0301cc07},
+ {0x0000b2dc, 0xffd5f552},
+ {0x0000b2e0, 0xffe60664},
+ {0x0000b2e4, 0xfff80780},
+ {0x0000b2e8, 0xfffff800},
+ {0x00016044, 0x049242db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x049242db},
+ {0x00016448, 0x6c927a70},
+};
+
+#endif /* INITVALS_953X_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index ccc5b6c99add..74d8bc05b317 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -20,6 +20,14 @@
/* AR955X 1.0 */
+#define ar955x_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define ar955x_1p0_common_rx_gain_table ar9300Common_rx_gain_table_2p2
+
+#define ar955x_1p0_common_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar955x_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
static const u32 ar955x_1p0_radio_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00016098, 0xd2dd5554, 0xd2dd5554, 0xd28b3330, 0xd28b3330},
@@ -37,13 +45,6 @@ static const u32 ar955x_1p0_radio_postamble[][5] = {
{0x00016940, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
};
-static const u32 ar955x_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
static const u32 ar955x_1p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
@@ -473,266 +474,6 @@ static const u32 ar955x_1p0_mac_core[][2] = {
{0x000083d0, 0x8c7901ff},
};
-static const u32 ar955x_1p0_common_rx_gain_table[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x23232323},
- {0x0000b084, 0x21232323},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
static const u32 ar955x_1p0_baseband_core[][2] = {
/* Addr allmodes */
{0x00009800, 0xafe68e30},
@@ -891,266 +632,6 @@ static const u32 ar955x_1p0_baseband_core[][2] = {
{0x0000c420, 0x00000000},
};
-static const u32 ar955x_1p0_common_wo_xlna_rx_gain_table[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
-
static const u32 ar955x_1p0_soc_preamble[][2] = {
/* Addr allmodes */
{0x00007000, 0x00000000},
@@ -1263,11 +744,6 @@ static const u32 ar955x_1p0_modes_no_xpa_tx_gain_table[][9] = {
{0x00016848, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
};
-static const u32 ar955x_1p0_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
-};
-
static const u32 ar955x_1p0_modes_fast_clock[][3] = {
/* Addr 5G_HT20 5G_HT40 */
{0x00001030, 0x00000268, 0x000004d0},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index a8c757b6124f..10d4a6cb1c3b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -20,6 +20,12 @@
/* AR9565 1.0 */
+#define ar9565_1p0_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9565_1p0_Modes_lowest_ob_db_tx_gain_table ar9565_1p0_modes_low_ob_db_tx_gain_table
+
+#define ar9565_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
static const u32 ar9565_1p0_mac_core[][2] = {
/* Addr allmodes */
{0x00000008, 0x00000000},
@@ -182,18 +188,6 @@ static const u32 ar9565_1p0_mac_core[][2] = {
{0x000083d0, 0x800301ff},
};
-static const u32 ar9565_1p0_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
-
static const u32 ar9565_1p0_baseband_core[][2] = {
/* Addr allmodes */
{0x00009800, 0xafe68e30},
@@ -711,66 +705,6 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
{0x0000b1fc, 0x00000196},
};
-static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
- {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
- {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
- {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
- {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
- {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
- {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
- {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
- {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
- {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
- {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
- {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
- {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
- {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
- {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
- {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
- {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-};
-
static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
/* Addr allmodes */
{0x00018c00, 0x18212ede},
@@ -1231,11 +1165,4 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
-static const u32 ar9565_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
#endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h
new file mode 100644
index 000000000000..56810539971e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9565_1P1_H
+#define INITVALS_9565_1P1_H
+
+/* AR9565 1.1 */
+
+#define ar9565_1p1_mac_core ar9565_1p0_mac_core
+
+#define ar9565_1p1_mac_postamble ar9565_1p0_mac_postamble
+
+#define ar9565_1p1_baseband_core ar9565_1p0_baseband_core
+
+#define ar9565_1p1_baseband_postamble ar9565_1p0_baseband_postamble
+
+#define ar9565_1p1_radio_core ar9565_1p0_radio_core
+
+#define ar9565_1p1_soc_preamble ar9565_1p0_soc_preamble
+
+#define ar9565_1p1_soc_postamble ar9565_1p0_soc_postamble
+
+#define ar9565_1p1_Common_rx_gain_table ar9565_1p0_Common_rx_gain_table
+
+#define ar9565_1p1_Modes_lowest_ob_db_tx_gain_table ar9565_1p0_Modes_lowest_ob_db_tx_gain_table
+
+#define ar9565_1p1_pciephy_clkreq_disable_L1 ar9565_1p0_pciephy_clkreq_disable_L1
+
+#define ar9565_1p1_modes_fast_clock ar9565_1p0_modes_fast_clock
+
+#define ar9565_1p1_common_wo_xlna_rx_gain_table ar9565_1p0_common_wo_xlna_rx_gain_table
+
+#define ar9565_1p1_modes_low_ob_db_tx_gain_table ar9565_1p0_modes_low_ob_db_tx_gain_table
+
+#define ar9565_1p1_modes_high_ob_db_tx_gain_table ar9565_1p0_modes_high_ob_db_tx_gain_table
+
+#define ar9565_1p1_modes_high_power_tx_gain_table ar9565_1p0_modes_high_power_tx_gain_table
+
+#define ar9565_1p1_baseband_core_txfir_coeff_japan_2484 ar9565_1p0_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar9565_1p1_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
+ {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+ {0x0001610c, 0x40000000, 0x40000000, 0x40000000, 0x40000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+#endif /* INITVALS_9565_1P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index bdee2ed67219..e6aec2c0207f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -20,18 +20,34 @@
/* AR9580 1.0 */
+#define ar9580_1p0_soc_preamble ar9300_2p2_soc_preamble
+
+#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define ar9580_1p0_radio_core ar9300_2p2_radio_core
+
+#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define ar9580_1p0_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9580_1p0_type5_tx_gain_table ar9300Modes_type5_tx_gain_table_2p2
+
+#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
+
#define ar9580_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
+#define ar9580_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
static const u32 ar9580_1p0_radio_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
{0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
{0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
- {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0001610c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
- {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0001650c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
- {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0001690c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
};
@@ -41,12 +57,10 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x00009804, 0xfd14e000},
{0x00009808, 0x9c0a9f6b},
{0x0000980c, 0x04900000},
- {0x00009814, 0x3280c00a},
- {0x00009818, 0x00000000},
{0x0000981c, 0x00020028},
- {0x00009834, 0x6400a290},
+ {0x00009834, 0x6400a190},
{0x00009838, 0x0108ecff},
- {0x0000983c, 0x0d000600},
+ {0x0000983c, 0x14000600},
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
@@ -67,7 +81,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x00009d04, 0x40206c10},
{0x00009d08, 0x009c4060},
{0x00009d0c, 0x9883800a},
- {0x00009d10, 0x01834061},
+ {0x00009d10, 0x01884061},
{0x00009d14, 0x00c0040b},
{0x00009d18, 0x00000000},
{0x00009e08, 0x0038230c},
@@ -198,8 +212,6 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x0000c420, 0x00000000},
};
-#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
-
static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
@@ -306,7 +318,112 @@ static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-#define ar9580_1p0_high_power_tx_gain_table ar9580_1p0_low_ob_db_tx_gain_table
+static const u32 ar9580_1p0_high_power_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
+ {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016288, 0x05a2040a, 0x05a2040a, 0x05a20408, 0x05a20408},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -414,8 +531,6 @@ static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-#define ar9580_1p0_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
-
static const u32 ar9580_1p0_mac_core[][2] = {
/* Addr allmodes */
{0x00000008, 0x00000000},
@@ -679,14 +794,6 @@ static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-#define ar9580_1p0_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
-
-#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
-
-#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
-
-#define ar9580_1p0_type5_tx_gain_table ar9300Modes_type5_tx_gain_table_2p2
-
static const u32 ar9580_1p0_type6_tx_gain_table[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
@@ -761,165 +868,271 @@ static const u32 ar9580_1p0_type6_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-static const u32 ar9580_1p0_soc_preamble[][2] = {
+static const u32 ar9580_1p0_rx_gain_table[][2] = {
/* Addr allmodes */
- {0x000040a4, 0x00a0c1c9},
- {0x00007008, 0x00000000},
- {0x00007020, 0x00000000},
- {0x00007034, 0x00000002},
- {0x00007038, 0x000004c2},
- {0x00007048, 0x00000008},
-};
-
-#define ar9580_1p0_rx_gain_table ar9462_common_rx_gain_table_2p0
-
-static const u32 ar9580_1p0_radio_core[][2] = {
- /* Addr allmodes */
- {0x00016000, 0x36db6db6},
- {0x00016004, 0x6db6db40},
- {0x00016008, 0x73f00000},
- {0x0001600c, 0x00000000},
- {0x00016040, 0x7f80fff8},
- {0x0001604c, 0x76d005b5},
- {0x00016050, 0x556cf031},
- {0x00016054, 0x13449440},
- {0x00016058, 0x0c51c92c},
- {0x0001605c, 0x3db7fffc},
- {0x00016060, 0xfffffffc},
- {0x00016064, 0x000f0278},
- {0x0001606c, 0x6db60000},
- {0x00016080, 0x00000000},
- {0x00016084, 0x0e48048c},
- {0x00016088, 0x54214514},
- {0x0001608c, 0x119f481e},
- {0x00016090, 0x24926490},
- {0x00016098, 0xd2888888},
- {0x000160a0, 0x0a108ffe},
- {0x000160a4, 0x812fc370},
- {0x000160a8, 0x423c8000},
- {0x000160b4, 0x92480080},
- {0x000160c0, 0x00adb6d0},
- {0x000160c4, 0x6db6db60},
- {0x000160c8, 0x6db6db6c},
- {0x000160cc, 0x01e6c000},
- {0x00016100, 0x3fffbe01},
- {0x00016104, 0xfff80000},
- {0x00016108, 0x00080010},
- {0x00016144, 0x02084080},
- {0x00016148, 0x00000000},
- {0x00016280, 0x058a0001},
- {0x00016284, 0x3d840208},
- {0x00016288, 0x05a20408},
- {0x0001628c, 0x00038c07},
- {0x00016290, 0x00000004},
- {0x00016294, 0x458aa14f},
- {0x00016380, 0x00000000},
- {0x00016384, 0x00000000},
- {0x00016388, 0x00800700},
- {0x0001638c, 0x00800700},
- {0x00016390, 0x00800700},
- {0x00016394, 0x00000000},
- {0x00016398, 0x00000000},
- {0x0001639c, 0x00000000},
- {0x000163a0, 0x00000001},
- {0x000163a4, 0x00000001},
- {0x000163a8, 0x00000000},
- {0x000163ac, 0x00000000},
- {0x000163b0, 0x00000000},
- {0x000163b4, 0x00000000},
- {0x000163b8, 0x00000000},
- {0x000163bc, 0x00000000},
- {0x000163c0, 0x000000a0},
- {0x000163c4, 0x000c0000},
- {0x000163c8, 0x14021402},
- {0x000163cc, 0x00001402},
- {0x000163d0, 0x00000000},
- {0x000163d4, 0x00000000},
- {0x00016400, 0x36db6db6},
- {0x00016404, 0x6db6db40},
- {0x00016408, 0x73f00000},
- {0x0001640c, 0x00000000},
- {0x00016440, 0x7f80fff8},
- {0x0001644c, 0x76d005b5},
- {0x00016450, 0x556cf031},
- {0x00016454, 0x13449440},
- {0x00016458, 0x0c51c92c},
- {0x0001645c, 0x3db7fffc},
- {0x00016460, 0xfffffffc},
- {0x00016464, 0x000f0278},
- {0x0001646c, 0x6db60000},
- {0x00016500, 0x3fffbe01},
- {0x00016504, 0xfff80000},
- {0x00016508, 0x00080010},
- {0x00016544, 0x02084080},
- {0x00016548, 0x00000000},
- {0x00016780, 0x00000000},
- {0x00016784, 0x00000000},
- {0x00016788, 0x00800700},
- {0x0001678c, 0x00800700},
- {0x00016790, 0x00800700},
- {0x00016794, 0x00000000},
- {0x00016798, 0x00000000},
- {0x0001679c, 0x00000000},
- {0x000167a0, 0x00000001},
- {0x000167a4, 0x00000001},
- {0x000167a8, 0x00000000},
- {0x000167ac, 0x00000000},
- {0x000167b0, 0x00000000},
- {0x000167b4, 0x00000000},
- {0x000167b8, 0x00000000},
- {0x000167bc, 0x00000000},
- {0x000167c0, 0x000000a0},
- {0x000167c4, 0x000c0000},
- {0x000167c8, 0x14021402},
- {0x000167cc, 0x00001402},
- {0x000167d0, 0x00000000},
- {0x000167d4, 0x00000000},
- {0x00016800, 0x36db6db6},
- {0x00016804, 0x6db6db40},
- {0x00016808, 0x73f00000},
- {0x0001680c, 0x00000000},
- {0x00016840, 0x7f80fff8},
- {0x0001684c, 0x76d005b5},
- {0x00016850, 0x556cf031},
- {0x00016854, 0x13449440},
- {0x00016858, 0x0c51c92c},
- {0x0001685c, 0x3db7fffc},
- {0x00016860, 0xfffffffc},
- {0x00016864, 0x000f0278},
- {0x0001686c, 0x6db60000},
- {0x00016900, 0x3fffbe01},
- {0x00016904, 0xfff80000},
- {0x00016908, 0x00080010},
- {0x00016944, 0x02084080},
- {0x00016948, 0x00000000},
- {0x00016b80, 0x00000000},
- {0x00016b84, 0x00000000},
- {0x00016b88, 0x00800700},
- {0x00016b8c, 0x00800700},
- {0x00016b90, 0x00800700},
- {0x00016b94, 0x00000000},
- {0x00016b98, 0x00000000},
- {0x00016b9c, 0x00000000},
- {0x00016ba0, 0x00000001},
- {0x00016ba4, 0x00000001},
- {0x00016ba8, 0x00000000},
- {0x00016bac, 0x00000000},
- {0x00016bb0, 0x00000000},
- {0x00016bb4, 0x00000000},
- {0x00016bb8, 0x00000000},
- {0x00016bbc, 0x00000000},
- {0x00016bc0, 0x000000a0},
- {0x00016bc4, 0x000c0000},
- {0x00016bc8, 0x14021402},
- {0x00016bcc, 0x00001402},
- {0x00016bd0, 0x00000000},
- {0x00016bd4, 0x00000000},
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
};
static const u32 ar9580_1p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009814, 0x3280c00a, 0x3280c00a, 0x3280c00a, 0x3280c00a},
+ {0x00009818, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
@@ -956,7 +1169,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+ {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
@@ -994,4 +1207,13 @@ static const u32 ar9580_1p0_pcie_phy_pll_on_clkreq[][2] = {
{0x00004044, 0x00000000},
};
+static const u32 ar9580_1p0_baseband_postamble_dfs_channel[][3] = {
+ /* Addr 5G 2G */
+ {0x00009814, 0x3400c00f, 0x3400c00f},
+ {0x00009824, 0x5ac668d0, 0x5ac668d0},
+ {0x00009828, 0x06903080, 0x06903080},
+ {0x00009e0c, 0x6d4000e2, 0x6d4000e2},
+ {0x00009e14, 0x37b9625e, 0x37b9625e},
+};
+
#endif /* INITVALS_9580_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 60a5da53668f..b5ac32cfbeb8 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -27,40 +27,15 @@
#include "common.h"
#include "mci.h"
#include "dfs.h"
-
-/*
- * Header for the ath9k.ko driver core *only* -- hw code nor any other driver
- * should rely on this file or its contents.
- */
+#include "spectral.h"
struct ath_node;
+struct ath_rate_table;
-/* Macro to expand scalars to 64-bit objects */
-
-#define ito64(x) (sizeof(x) == 1) ? \
- (((unsigned long long int)(x)) & (0xff)) : \
- (sizeof(x) == 2) ? \
- (((unsigned long long int)(x)) & 0xffff) : \
- ((sizeof(x) == 4) ? \
- (((unsigned long long int)(x)) & 0xffffffff) : \
- (unsigned long long int)(x))
-
-/* increment with wrap-around */
-#define INCR(_l, _sz) do { \
- (_l)++; \
- (_l) &= ((_sz) - 1); \
- } while (0)
-
-/* decrement with wrap-around */
-#define DECR(_l, _sz) do { \
- (_l)--; \
- (_l) &= ((_sz) - 1); \
- } while (0)
-
-#define TSF_TO_TU(_h,_l) \
- ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
-
-#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
+extern struct ieee80211_ops ath9k_ops;
+extern int ath9k_modparam_nohwcrypt;
+extern int led_blink;
+extern bool is_ath9k_unloaded;
struct ath_config {
u16 txpowlimit;
@@ -70,6 +45,17 @@ struct ath_config {
/* Descriptor Management */
/*************************/
+#define ATH_TXSTATUS_RING_SIZE 512
+
+/* Macro to expand scalars to 64-bit objects */
+#define ito64(x) (sizeof(x) == 1) ? \
+ (((unsigned long long int)(x)) & (0xff)) : \
+ (sizeof(x) == 2) ? \
+ (((unsigned long long int)(x)) & 0xffff) : \
+ ((sizeof(x) == 4) ? \
+ (((unsigned long long int)(x)) & 0xffffffff) : \
+ (unsigned long long int)(x))
+
#define ATH_TXBUF_RESET(_bf) do { \
(_bf)->bf_lastbf = NULL; \
(_bf)->bf_next = NULL; \
@@ -77,23 +63,6 @@ struct ath_config {
sizeof(struct ath_buf_state)); \
} while (0)
-/**
- * enum buffer_type - Buffer type flags
- *
- * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
- * @BUF_AGGR: Indicates whether the buffer can be aggregated
- * (used in aggregation scheduling)
- */
-enum buffer_type {
- BUF_AMPDU = BIT(0),
- BUF_AGGR = BIT(1),
-};
-
-#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
-#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
-
-#define ATH_TXSTATUS_RING_SIZE 512
-
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
@@ -113,11 +82,20 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
/* RX / TX */
/***********/
+#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
+
+/* increment with wrap-around */
+#define INCR(_l, _sz) do { \
+ (_l)++; \
+ (_l) &= ((_sz) - 1); \
+ } while (0)
+
#define ATH_RXBUF 512
#define ATH_TXBUF 512
#define ATH_TXBUF_RESERVE 5
#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
#define ATH_TXMAXTRY 13
+#define ATH_MAX_SW_RETRIES 30
#define TID_TO_WME_AC(_tid) \
((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE : \
@@ -133,6 +111,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_AGGR_MIN_QDEPTH 2
/* minimum h/w qdepth for non-aggregated traffic */
#define ATH_NON_AGGR_MIN_QDEPTH 8
+#define ATH_TX_COMPLETE_POLL_INT 1000
+#define ATH_TXFIFO_DEPTH 8
+#define ATH_TX_ERROR 0x01
#define IEEE80211_SEQ_SEQ_SHIFT 4
#define IEEE80211_SEQ_MAX 4096
@@ -165,11 +146,10 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
-#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
+#define IS_HT_RATE(rate) (rate & 0x80)
+#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
+#define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf))
-#define ATH_TX_COMPLETE_POLL_INT 1000
-
-#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
u32 axq_qnum; /* ath9k hardware queue number */
@@ -214,6 +194,21 @@ struct ath_rxbuf {
dma_addr_t bf_buf_addr;
};
+/**
+ * enum buffer_type - Buffer type flags
+ *
+ * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
+ * @BUF_AGGR: Indicates whether the buffer can be aggregated
+ * (used in aggregation scheduling)
+ */
+enum buffer_type {
+ BUF_AMPDU = BIT(0),
+ BUF_AGGR = BIT(1),
+};
+
+#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
+#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
+
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
@@ -269,6 +264,10 @@ struct ath_node {
bool sleeping;
bool no_ps_filter;
+
+#ifdef CONFIG_ATH9K_STATION_STATISTICS
+ struct ath_rx_rate_stats rx_rate_stats;
+#endif
};
struct ath_tx_control {
@@ -278,7 +277,6 @@ struct ath_tx_control {
struct ieee80211_sta *sta;
};
-#define ATH_TX_ERROR 0x01
/**
* @txq_map: Index is mac80211 queue number. This is
@@ -372,6 +370,22 @@ struct ath_vif {
struct ath_buf *av_bcbuf;
};
+struct ath9k_vif_iter_data {
+ u8 hw_macaddr[ETH_ALEN]; /* address of the first vif */
+ u8 mask[ETH_ALEN]; /* bssid mask */
+ bool has_hw_macaddr;
+
+ int naps; /* number of AP vifs */
+ int nmeshes; /* number of mesh vifs */
+ int nstations; /* number of station vifs */
+ int nwds; /* number of WDS vifs */
+ int nadhocs; /* number of adhoc vifs */
+};
+
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ath9k_vif_iter_data *iter_data);
+
/*******************/
/* Beacon Handling */
/*******************/
@@ -387,6 +401,9 @@ struct ath_vif {
#define ATH_DEFAULT_BMISS_LIMIT 10
#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+#define TSF_TO_TU(_h,_l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
struct ath_beacon_config {
int beacon_interval;
u16 listen_interval;
@@ -420,12 +437,10 @@ struct ath_beacon {
};
void ath9k_beacon_tasklet(unsigned long data);
-bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
u32 changed);
void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
-void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
bool ath9k_csa_is_finished(struct ath_softc *sc);
@@ -440,17 +455,14 @@ bool ath9k_csa_is_finished(struct ath_softc *sc);
#define ATH_LONG_CALINTERVAL_INT 1000 /* 1000 ms */
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
-#define ATH_ANI_MAX_SKIP_COUNT 10
-
-#define ATH_PAPRD_TIMEOUT 100 /* msecs */
-#define ATH_PLL_WORK_INTERVAL 100
+#define ATH_ANI_MAX_SKIP_COUNT 10
+#define ATH_PAPRD_TIMEOUT 100 /* msecs */
+#define ATH_PLL_WORK_INTERVAL 100
void ath_tx_complete_poll_work(struct work_struct *work);
void ath_reset_work(struct work_struct *work);
-void ath_hw_check(struct work_struct *work);
+bool ath_hw_check(struct ath_softc *sc);
void ath_hw_pll_work(struct work_struct *work);
-void ath_rx_poll(unsigned long data);
-void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon);
void ath_paprd_calibrate(struct work_struct *work);
void ath_ani_calibrate(unsigned long data);
void ath_start_ani(struct ath_softc *sc);
@@ -459,6 +471,7 @@ void ath_check_ani(struct ath_softc *sc);
int ath_update_survey_stats(struct ath_softc *sc);
void ath_update_survey_nf(struct ath_softc *sc, int channel);
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
+void ath_ps_full_sleep(unsigned long data);
/**********/
/* BTCOEX */
@@ -476,20 +489,19 @@ enum bt_op_flags {
};
struct ath_btcoex {
- bool hw_timer_enabled;
spinlock_t btcoex_lock;
struct timer_list period_timer; /* Timer for BT period */
+ struct timer_list no_stomp_timer;
u32 bt_priority_cnt;
unsigned long bt_priority_time;
unsigned long op_flags;
int bt_stomp_type; /* Types of BT stomping */
- u32 btcoex_no_stomp; /* in usec */
+ u32 btcoex_no_stomp; /* in msec */
u32 btcoex_period; /* in msec */
- u32 btscan_no_stomp; /* in usec */
+ u32 btscan_no_stomp; /* in msec */
u32 duty_cycle;
u32 bt_wait_time;
int rssi_count;
- struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
struct ath_mci_profile mci;
u8 stomp_audio;
};
@@ -537,12 +549,6 @@ static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
-struct ath9k_wow_pattern {
- u8 pattern_bytes[MAX_PATTERN_SIZE];
- u8 mask_bytes[MAX_PATTERN_SIZE];
- u32 pattern_len;
-};
-
/********************/
/* LED Control */
/********************/
@@ -570,6 +576,40 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
}
#endif
+/************************/
+/* Wake on Wireless LAN */
+/************************/
+
+struct ath9k_wow_pattern {
+ u8 pattern_bytes[MAX_PATTERN_SIZE];
+ u8 mask_bytes[MAX_PATTERN_SIZE];
+ u32 pattern_len;
+};
+
+#ifdef CONFIG_ATH9K_WOW
+void ath9k_init_wow(struct ieee80211_hw *hw);
+int ath9k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath9k_resume(struct ieee80211_hw *hw);
+void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+#else
+static inline void ath9k_init_wow(struct ieee80211_hw *hw)
+{
+}
+static inline int ath9k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ return 0;
+}
+static inline int ath9k_resume(struct ieee80211_hw *hw)
+{
+ return 0;
+}
+static inline void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+}
+#endif /* CONFIG_ATH9K_WOW */
+
/*******************************/
/* Antenna diversity/combining */
/*******************************/
@@ -642,19 +682,16 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
#define ATH9K_PCI_AR9565_1ANT 0x0080
#define ATH9K_PCI_AR9565_2ANT 0x0100
#define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200
+#define ATH9K_PCI_KILLER 0x0400
/*
* Default cache line size, in bytes.
* Used when PCI device not fully initialized by bootrom/BIOS
*/
#define DEFAULT_CACHELINE 32
-#define ATH_REGCLASSIDS_MAX 10
#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
-#define ATH_MAX_SW_RETRIES 30
-#define ATH_CHAN_MAX 255
-
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
-#define ATH_RATE_DUMMY_MARKER 0
+#define MAX_GTT_CNT 5
enum sc_op_flags {
SC_OP_INVALID,
@@ -673,37 +710,6 @@ enum sc_op_flags {
#define PS_BEACON_SYNC BIT(4)
#define PS_WAIT_FOR_ANI BIT(5)
-struct ath_rate_table;
-
-struct ath9k_vif_iter_data {
- u8 hw_macaddr[ETH_ALEN]; /* address of the first vif */
- u8 mask[ETH_ALEN]; /* bssid mask */
- bool has_hw_macaddr;
-
- int naps; /* number of AP vifs */
- int nmeshes; /* number of mesh vifs */
- int nstations; /* number of station vifs */
- int nwds; /* number of WDS vifs */
- int nadhocs; /* number of adhoc vifs */
-};
-
-/* enum spectral_mode:
- *
- * @SPECTRAL_DISABLED: spectral mode is disabled
- * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
- * something else.
- * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
- * is performed manually.
- * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels
- * during a channel scan.
- */
-enum spectral_mode {
- SPECTRAL_DISABLED = 0,
- SPECTRAL_BACKGROUND,
- SPECTRAL_MANUAL,
- SPECTRAL_CHANSCAN,
-};
-
struct ath_softc {
struct ieee80211_hw *hw;
struct device *dev;
@@ -721,14 +727,14 @@ struct ath_softc {
spinlock_t sc_pcu_lock;
struct mutex mutex;
struct work_struct paprd_work;
- struct work_struct hw_check_work;
struct work_struct hw_reset_work;
struct completion paprd_complete;
+ wait_queue_head_t tx_wait;
- unsigned int hw_busy_count;
unsigned long sc_flags;
unsigned long driver_data;
+ u8 gtt_cnt;
u32 intrstatus;
u16 ps_flags; /* PS_* */
u16 curtxpow;
@@ -759,7 +765,7 @@ struct ath_softc {
struct ath_beacon_config cur_beacon_conf;
struct delayed_work tx_complete_work;
struct delayed_work hw_pll_work;
- struct timer_list rx_poll_timer;
+ struct timer_list sleep_timer;
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex btcoex;
@@ -784,199 +790,54 @@ struct ath_softc {
bool tx99_state;
s16 tx99_power;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
atomic_t wow_got_bmiss_intr;
atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
u32 wow_intr_before_sleep;
#endif
};
-#define SPECTRAL_SCAN_BITMASK 0x10
-/* Radar info packet format, used for DFS and spectral formats. */
-struct ath_radar_info {
- u8 pulse_length_pri;
- u8 pulse_length_ext;
- u8 pulse_bw_info;
-} __packed;
-
-/* The HT20 spectral data has 4 bytes of additional information at it's end.
- *
- * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: all bins max_magnitude[9:2]
- * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]}
- * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
- */
-struct ath_ht20_mag_info {
- u8 all_bins[3];
- u8 max_exp;
-} __packed;
-
-#define SPECTRAL_HT20_NUM_BINS 56
-
-/* WARNING: don't actually use this struct! MAC may vary the amount of
- * data by -1/+2. This struct is for reference only.
- */
-struct ath_ht20_fft_packet {
- u8 data[SPECTRAL_HT20_NUM_BINS];
- struct ath_ht20_mag_info mag_info;
- struct ath_radar_info radar_info;
-} __packed;
-
-#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
-
-/* Dynamic 20/40 mode:
- *
- * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: lower bins max_magnitude[9:2]
- * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]}
- * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: upper bins max_magnitude[9:2]
- * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]}
- * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
- */
-struct ath_ht20_40_mag_info {
- u8 lower_bins[3];
- u8 upper_bins[3];
- u8 max_exp;
-} __packed;
-
-#define SPECTRAL_HT20_40_NUM_BINS 128
-
-/* WARNING: don't actually use this struct! MAC may vary the amount of
- * data. This struct is for reference only.
- */
-struct ath_ht20_40_fft_packet {
- u8 data[SPECTRAL_HT20_40_NUM_BINS];
- struct ath_ht20_40_mag_info mag_info;
- struct ath_radar_info radar_info;
-} __packed;
-
-
-#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
-
-/* grabs the max magnitude from the all/upper/lower bins */
-static inline u16 spectral_max_magnitude(u8 *bins)
-{
- return (bins[0] & 0xc0) >> 6 |
- (bins[1] & 0xff) << 2 |
- (bins[2] & 0x03) << 10;
-}
+/********/
+/* TX99 */
+/********/
-/* return the max magnitude from the all/upper/lower bins */
-static inline u8 spectral_max_index(u8 *bins)
+#ifdef CONFIG_ATH9K_TX99
+void ath9k_tx99_init_debug(struct ath_softc *sc);
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_tx_control *txctl);
+#else
+static inline void ath9k_tx99_init_debug(struct ath_softc *sc)
{
- s8 m = (bins[2] & 0xfc) >> 2;
-
- /* TODO: this still doesn't always report the right values ... */
- if (m > 32)
- m |= 0xe0;
- else
- m &= ~0xe0;
-
- return m + 29;
}
-
-/* return the bitmap weight from the all/upper/lower bins */
-static inline u8 spectral_bitmap_weight(u8 *bins)
+static inline int ath9k_tx99_send(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_tx_control *txctl)
{
- return bins[0] & 0x3f;
+ return 0;
}
-
-/* FFT sample format given to userspace via debugfs.
- *
- * Please keep the type/length at the front position and change
- * other fields after adding another sample type
- *
- * TODO: this might need rework when switching to nl80211-based
- * interface.
- */
-enum ath_fft_sample_type {
- ATH_FFT_SAMPLE_HT20 = 1,
- ATH_FFT_SAMPLE_HT20_40,
-};
-
-struct fft_sample_tlv {
- u8 type; /* see ath_fft_sample */
- __be16 length;
- /* type dependent data follows */
-} __packed;
-
-struct fft_sample_ht20 {
- struct fft_sample_tlv tlv;
-
- u8 max_exp;
-
- __be16 freq;
- s8 rssi;
- s8 noise;
-
- __be16 max_magnitude;
- u8 max_index;
- u8 bitmap_weight;
-
- __be64 tsf;
-
- u8 data[SPECTRAL_HT20_NUM_BINS];
-} __packed;
-
-struct fft_sample_ht20_40 {
- struct fft_sample_tlv tlv;
-
- u8 channel_type;
- __be16 freq;
-
- s8 lower_rssi;
- s8 upper_rssi;
-
- __be64 tsf;
-
- s8 lower_noise;
- s8 upper_noise;
-
- __be16 lower_max_magnitude;
- __be16 upper_max_magnitude;
-
- u8 lower_max_index;
- u8 upper_max_index;
-
- u8 lower_bitmap_weight;
- u8 upper_bitmap_weight;
-
- u8 max_exp;
-
- u8 data[SPECTRAL_HT20_40_NUM_BINS];
-} __packed;
-
-int ath9k_tx99_init(struct ath_softc *sc);
-void ath9k_tx99_deinit(struct ath_softc *sc);
-int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
- struct ath_tx_control *txctl);
-
-void ath9k_tasklet(unsigned long data);
-int ath_cabq_update(struct ath_softc *);
+#endif /* CONFIG_ATH9K_TX99 */
static inline void ath_read_cachesize(struct ath_common *common, int *csz)
{
common->bus_ops->read_cachesize(common, csz);
}
-extern struct ieee80211_ops ath9k_ops;
-extern int ath9k_modparam_nohwcrypt;
-extern int led_blink;
-extern bool is_ath9k_unloaded;
-
+void ath9k_tasklet(unsigned long data);
+int ath_cabq_update(struct ath_softc *);
u8 ath9k_parse_mpdudensity(u8 mpdudensity);
irqreturn_t ath_isr(int irq, void *dev);
+int ath_reset(struct ath_softc *sc);
+void ath_cancel_work(struct ath_softc *sc);
+void ath_restart_work(struct ath_softc *sc);
int ath9k_init_device(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops);
void ath9k_deinit_device(struct ath_softc *sc);
-void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
-
-void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
-int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
- enum spectral_mode spectral_mode);
-
+u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
+void ath_start_rfkill_poll(struct ath_softc *sc);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_ps_wakeup(struct ath_softc *sc);
+void ath9k_ps_restore(struct ath_softc *sc);
#ifdef CONFIG_ATH9K_PCI
int ath_pci_init(void);
@@ -994,15 +855,4 @@ static inline int ath_ahb_init(void) { return 0; };
static inline void ath_ahb_exit(void) {};
#endif
-void ath9k_ps_wakeup(struct ath_softc *sc);
-void ath9k_ps_restore(struct ath_softc *sc);
-
-u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
-
-void ath_start_rfkill_poll(struct ath_softc *sc);
-void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
-void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ath9k_vif_iter_data *iter_data);
-
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 17be35392bb4..2e8bba0eb361 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -274,18 +274,19 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc)
return slot;
}
-void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
+static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_vif *avp = (void *)vif->drv_priv;
- u64 tsfadjust;
+ u32 tsfadjust;
if (avp->av_bslot == 0)
return;
- tsfadjust = cur_conf->beacon_interval * avp->av_bslot / ATH_BCBUF;
- avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
+ tsfadjust = cur_conf->beacon_interval * avp->av_bslot;
+ tsfadjust = TU_TO_USEC(tsfadjust) / ATH_BCBUF;
+ avp->tsf_adjust = cpu_to_le64(tsfadjust);
ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
(unsigned long long)tsfadjust, avp->av_bslot);
@@ -336,8 +337,14 @@ void ath9k_beacon_tasklet(unsigned long data)
ath9k_hw_check_nav(ah);
- if (!ath9k_hw_check_alive(ah))
- ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+ /*
+ * If the previous beacon has not been transmitted
+ * and a MAC/BB hang has been identified, return
+ * here because a chip reset would have been
+ * initiated.
+ */
+ if (!ath_hw_check(sc))
+ return;
if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
ath_dbg(common, BSTUCK,
@@ -431,6 +438,33 @@ static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
ath9k_hw_enable_interrupts(ah);
}
+/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
+static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
+{
+ u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
+
+ tsf_mod = tsf & (BIT(10) - 1);
+ tsf_hi = tsf >> 32;
+ tsf_lo = ((u32) tsf) >> 10;
+
+ mod_hi = tsf_hi % div_tu;
+ mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
+
+ return (mod_lo << 10) | tsf_mod;
+}
+
+static u32 ath9k_get_next_tbtt(struct ath_softc *sc, u64 tsf,
+ unsigned int interval)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned int offset;
+
+ tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
+ offset = ath9k_mod_tsf64_tu(tsf, interval);
+
+ return (u32) tsf + TU_TO_USEC(interval) - offset;
+}
+
/*
* For multi-bss ap support beacons are either staggered evenly over N slots or
* burst together. For the former arrange for the SWBA to be delivered for each
@@ -446,7 +480,8 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
/* NB: the beacon interval is kept internally in TU's */
intval = TU_TO_USEC(conf->beacon_interval);
intval /= ATH_BCBUF;
- nexttbtt = intval;
+ nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
+ conf->beacon_interval);
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
@@ -458,7 +493,7 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
(conf->enable_beacon) ? "Enable" : "Disable",
nexttbtt, intval, conf->beacon_interval);
- ath9k_beacon_init(sc, nexttbtt, intval, true);
+ ath9k_beacon_init(sc, nexttbtt, intval, false);
}
/*
@@ -475,11 +510,9 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_beacon_state bs;
- int dtimperiod, dtimcount, sleepduration;
- int cfpperiod, cfpcount;
- u32 nexttbtt = 0, intval, tsftu;
+ int dtim_intval, sleepduration;
+ u32 nexttbtt = 0, intval;
u64 tsf;
- int num_beacons, offset, dtim_dec_count, cfp_dec_count;
/* No need to configure beacon if we are not associated */
if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
@@ -492,53 +525,25 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
intval = conf->beacon_interval;
/*
- * Setup dtim and cfp parameters according to
+ * Setup dtim parameters according to
* last beacon we received (which may be none).
*/
- dtimperiod = conf->dtim_period;
- dtimcount = conf->dtim_count;
- if (dtimcount >= dtimperiod) /* NB: sanity check */
- dtimcount = 0;
- cfpperiod = 1; /* NB: no PCF support yet */
- cfpcount = 0;
-
+ dtim_intval = intval * conf->dtim_period;
sleepduration = conf->listen_interval * intval;
/*
* Pull nexttbtt forward to reflect the current
- * TSF and calculate dtim+cfp state for the result.
+ * TSF and calculate dtim state for the result.
*/
tsf = ath9k_hw_gettsf64(ah);
- tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
-
- num_beacons = tsftu / intval + 1;
- offset = tsftu % intval;
- nexttbtt = tsftu - offset;
- if (offset)
- nexttbtt += intval;
-
- /* DTIM Beacon every dtimperiod Beacon */
- dtim_dec_count = num_beacons % dtimperiod;
- /* CFP every cfpperiod DTIM Beacon */
- cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
- if (dtim_dec_count)
- cfp_dec_count++;
-
- dtimcount -= dtim_dec_count;
- if (dtimcount < 0)
- dtimcount += dtimperiod;
-
- cfpcount -= cfp_dec_count;
- if (cfpcount < 0)
- cfpcount += cfpperiod;
-
- bs.bs_intval = intval;
+ nexttbtt = ath9k_get_next_tbtt(sc, tsf, intval);
+
+ bs.bs_intval = TU_TO_USEC(intval);
+ bs.bs_dtimperiod = conf->dtim_period * bs.bs_intval;
bs.bs_nexttbtt = nexttbtt;
- bs.bs_dtimperiod = dtimperiod*intval;
- bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
- bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
- bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
- bs.bs_cfpmaxduration = 0;
+ bs.bs_nextdtim = nexttbtt;
+ if (conf->dtim_period > 1)
+ bs.bs_nextdtim = ath9k_get_next_tbtt(sc, tsf, dtim_intval);
/*
* Calculate the number of consecutive beacons to miss* before taking
@@ -566,18 +571,16 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
* XXX fixed at 100ms
*/
- bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+ bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
+ sleepduration));
if (bs.bs_sleepduration > bs.bs_dtimperiod)
bs.bs_sleepduration = bs.bs_dtimperiod;
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_dbg(common, BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
- ath_dbg(common, BEACON,
- "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
- bs.bs_bmissthreshold, bs.bs_sleepduration,
- bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+ ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
+ bs.bs_bmissthreshold, bs.bs_sleepduration);
/* Set the computed STA beacon timers */
@@ -600,25 +603,11 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
intval = TU_TO_USEC(conf->beacon_interval);
- if (conf->ibss_creator) {
+ if (conf->ibss_creator)
nexttbtt = intval;
- } else {
- u32 tbtt, offset, tsftu;
- u64 tsf;
-
- /*
- * Pull nexttbtt forward to reflect the current
- * sync'd TSF.
- */
- tsf = ath9k_hw_gettsf64(ah);
- tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
- offset = tsftu % conf->beacon_interval;
- tbtt = tsftu - offset;
- if (offset)
- tbtt += conf->beacon_interval;
-
- nexttbtt = TU_TO_USEC(tbtt);
- }
+ else
+ nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
+ conf->beacon_interval);
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
@@ -640,7 +629,8 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
set_bit(SC_OP_BEACONS, &sc->sc_flags);
}
-bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
+static bool ath9k_allow_beacon_config(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (void *)vif->drv_priv;
@@ -711,12 +701,17 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
unsigned long flags;
bool skip_beacon = false;
+ if (vif->type == NL80211_IFTYPE_AP)
+ ath9k_set_tsfadjust(sc, vif);
+
+ if (!ath9k_allow_beacon_config(sc, vif))
+ return;
+
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_cache_beacon_config(sc, bss_conf);
ath9k_set_beacon(sc);
set_bit(SC_OP_BEACONS, &sc->sc_flags);
return;
-
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 9963b0bf9f72..3dfc2c7f1f07 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -66,7 +66,6 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
.bt_first_slot_time = 5,
.bt_hold_rx_clear = true,
};
- u32 i, idx;
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
if (AR_SREV_9300_20_OR_LATER(ah))
@@ -88,11 +87,6 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
AR_BT_DISABLE_BT_ANT;
-
- for (i = 0; i < 32; i++) {
- idx = (debruijn32 << i) >> 27;
- ah->hw_gen_timers.gen_timer_index[idx] = i;
- }
}
EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index a7e5a05b2eff..768c733cad31 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -98,10 +98,8 @@ struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
{
struct ieee80211_channel *curchan = chandef->chan;
struct ath9k_channel *channel;
- u8 chan_idx;
- chan_idx = curchan->hw_value;
- channel = &ah->channels[chan_idx];
+ channel = &ah->channels[curchan->hw_value];
ath9k_cmn_update_ichannel(channel, chandef);
return channel;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 83a2c59f680b..ab7264c1d8f7 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
-#include <linux/relay.h>
#include <asm/unaligned.h>
#include "ath9k.h"
@@ -27,6 +26,47 @@
#define REG_READ_D(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
+void ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
+{
+ if (sync_cause)
+ sc->debug.stats.istats.sync_cause_all++;
+ if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
+ sc->debug.stats.istats.sync_rtc_irq++;
+ if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
+ sc->debug.stats.istats.sync_mac_irq++;
+ if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
+ sc->debug.stats.istats.eeprom_illegal_access++;
+ if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
+ sc->debug.stats.istats.apb_timeout++;
+ if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
+ sc->debug.stats.istats.pci_mode_conflict++;
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
+ sc->debug.stats.istats.host1_fatal++;
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
+ sc->debug.stats.istats.host1_perr++;
+ if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
+ sc->debug.stats.istats.trcv_fifo_perr++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
+ sc->debug.stats.istats.radm_cpl_ep++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
+ sc->debug.stats.istats.radm_cpl_dllp_abort++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
+ sc->debug.stats.istats.radm_cpl_tlp_abort++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
+ sc->debug.stats.istats.radm_cpl_ecrc_err++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
+ sc->debug.stats.istats.radm_cpl_timeout++;
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
+ sc->debug.stats.istats.local_timeout++;
+ if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
+ sc->debug.stats.istats.pm_access++;
+ if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
+ sc->debug.stats.istats.mac_awake++;
+ if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
+ sc->debug.stats.istats.mac_asleep++;
+ if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
+ sc->debug.stats.istats.mac_sleep_access++;
+}
static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -903,14 +943,10 @@ static const struct file_operations fops_reset = {
static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
-#define PHY_ERR(s, p) \
- len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.phy_err_stats[p]);
-
#define RXS_ERR(s, e) \
do { \
len += scnprintf(buf + len, size - len, \
- "%22s : %10u\n", s, \
+ "%18s : %10u\n", s, \
sc->debug.stats.rxstats.e);\
} while (0)
@@ -923,6 +959,12 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
+ RXS_ERR("PKTS-ALL", rx_pkts_all);
+ RXS_ERR("BYTES-ALL", rx_bytes_all);
+ RXS_ERR("BEACONS", rx_beacons);
+ RXS_ERR("FRAGS", rx_frags);
+ RXS_ERR("SPECTRAL", rx_spectral);
+
RXS_ERR("CRC ERR", crc_err);
RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
RXS_ERR("PHY ERR", phy_err);
@@ -930,43 +972,10 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
- RXS_ERR("RX-LENGTH-ERR", rx_len_err);
- RXS_ERR("RX-OOM-ERR", rx_oom_err);
- RXS_ERR("RX-RATE-ERR", rx_rate_err);
- RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);
-
- PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
- PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
- PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
- PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
- PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
- PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
- PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
- PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
- PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
- PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
- PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
- PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
- PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
- PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
- PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
- PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
- PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
- PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
- PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
- PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
- PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
- PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
- PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
- PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
- PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
- PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
-
- RXS_ERR("RX-Pkts-All", rx_pkts_all);
- RXS_ERR("RX-Bytes-All", rx_bytes_all);
- RXS_ERR("RX-Beacons", rx_beacons);
- RXS_ERR("RX-Frags", rx_frags);
- RXS_ERR("RX-Spectral", rx_spectral);
+ RXS_ERR("LENGTH-ERR", rx_len_err);
+ RXS_ERR("OOM-ERR", rx_oom_err);
+ RXS_ERR("RATE-ERR", rx_rate_err);
+ RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
if (len > size)
len = size;
@@ -977,7 +986,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
return retval;
#undef RXS_ERR
-#undef PHY_ERR
}
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
@@ -1016,293 +1024,67 @@ static const struct file_operations fops_recv = {
.llseek = default_llseek,
};
-static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char *mode = "";
- unsigned int len;
-
- switch (sc->spectral_mode) {
- case SPECTRAL_DISABLED:
- mode = "disable";
- break;
- case SPECTRAL_BACKGROUND:
- mode = "background";
- break;
- case SPECTRAL_CHANSCAN:
- mode = "chanscan";
- break;
- case SPECTRAL_MANUAL:
- mode = "manual";
- break;
- }
- len = strlen(mode);
- return simple_read_from_buffer(user_buf, count, ppos, mode, len);
-}
-
-static ssize_t write_file_spec_scan_ctl(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- char buf[32];
- ssize_t len;
-
- if (config_enabled(CONFIG_ATH9K_TX99))
- return -EOPNOTSUPP;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
-
- if (strncmp("trigger", buf, 7) == 0) {
- ath9k_spectral_scan_trigger(sc->hw);
- } else if (strncmp("background", buf, 9) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
- ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
- } else if (strncmp("chanscan", buf, 8) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
- ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
- } else if (strncmp("manual", buf, 6) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
- ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
- } else if (strncmp("disable", buf, 7) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
- ath_dbg(common, CONFIG, "spectral scan: disabled\n");
- } else {
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations fops_spec_scan_ctl = {
- .read = read_file_spec_scan_ctl,
- .write = write_file_spec_scan_ctl,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_spectral_short_repeat(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_spectral_short_repeat(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- unsigned long val;
- char buf[32];
- ssize_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
-
- if (val < 0 || val > 1)
- return -EINVAL;
-
- sc->spec_config.short_repeat = val;
- return count;
-}
-
-static const struct file_operations fops_spectral_short_repeat = {
- .read = read_file_spectral_short_repeat,
- .write = write_file_spectral_short_repeat,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_spectral_count(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "%d\n", sc->spec_config.count);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_spectral_count(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
- unsigned long val;
- char buf[32];
- ssize_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
-
- if (val < 0 || val > 255)
- return -EINVAL;
-
- sc->spec_config.count = val;
- return count;
-}
-
-static const struct file_operations fops_spectral_count = {
- .read = read_file_spectral_count,
- .write = write_file_spectral_count,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_spectral_period(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "%d\n", sc->spec_config.period);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_spectral_period(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- unsigned long val;
- char buf[32];
- ssize_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
-
- if (val < 0 || val > 255)
- return -EINVAL;
-
- sc->spec_config.period = val;
- return count;
-}
-
-static const struct file_operations fops_spectral_period = {
- .read = read_file_spectral_period,
- .write = write_file_spectral_period,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
+#define PHY_ERR(s, p) \
+ len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+ sc->debug.stats.rxstats.phy_err_stats[p]);
-static ssize_t read_file_spectral_fft_period(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
struct ath_softc *sc = file->private_data;
- char buf[32];
- unsigned int len;
+ char *buf;
+ unsigned int len = 0, size = 1600;
+ ssize_t retval = 0;
- len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
-static ssize_t write_file_spectral_fft_period(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- unsigned long val;
- char buf[32];
- ssize_t len;
+ PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
+ PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
+ PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
+ PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
+ PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
+ PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
+ PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
+ PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
+ PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
+ PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+ PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+ PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+ PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
+ PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
+ PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
+ PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
+ PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
+ PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
+ PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+ PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
+ PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
+ PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+ PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
+ PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
+ PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+ PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
+ if (len > size)
+ len = size;
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
- if (val < 0 || val > 15)
- return -EINVAL;
+ return retval;
- sc->spec_config.fft_period = val;
- return count;
+#undef PHY_ERR
}
-static const struct file_operations fops_spectral_fft_period = {
- .read = read_file_spectral_fft_period,
- .write = write_file_spectral_fft_period,
+static const struct file_operations fops_phy_err = {
+ .read = read_file_phy_err,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
-static struct dentry *create_buf_file_handler(const char *filename,
- struct dentry *parent,
- umode_t mode,
- struct rchan_buf *buf,
- int *is_global)
-{
- struct dentry *buf_file;
-
- buf_file = debugfs_create_file(filename, mode, parent, buf,
- &relay_file_operations);
- *is_global = 1;
- return buf_file;
-}
-
-static int remove_buf_file_handler(struct dentry *dentry)
-{
- debugfs_remove(dentry);
-
- return 0;
-}
-
-void ath_debug_send_fft_sample(struct ath_softc *sc,
- struct fft_sample_tlv *fft_sample_tlv)
-{
- int length;
- if (!sc->rfs_chan_spec_scan)
- return;
-
- length = __be16_to_cpu(fft_sample_tlv->length) +
- sizeof(*fft_sample_tlv);
- relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
-}
-
-static struct rchan_callbacks rfs_spec_scan_cb = {
- .create_buf_file = create_buf_file_handler,
- .remove_buf_file = remove_buf_file_handler,
-};
-
-
static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1569,86 +1351,6 @@ static const struct file_operations fops_btcoex = {
};
#endif
-static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_node *an = file->private_data;
- struct ath_softc *sc = an->sc;
- struct ath_atx_tid *tid;
- struct ath_atx_ac *ac;
- struct ath_txq *txq;
- u32 len = 0, size = 4096;
- char *buf;
- size_t retval;
- int tidno, acno;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- if (!an->sta->ht_cap.ht_supported) {
- len = scnprintf(buf, size, "%s\n",
- "HT not supported");
- goto exit;
- }
-
- len = scnprintf(buf, size, "Max-AMPDU: %d\n",
- an->maxampdu);
- len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
- an->mpdudensity);
-
- len += scnprintf(buf + len, size - len,
- "%2s%7s\n", "AC", "SCHED");
-
- for (acno = 0, ac = &an->ac[acno];
- acno < IEEE80211_NUM_ACS; acno++, ac++) {
- txq = ac->txq;
- ath_txq_lock(sc, txq);
- len += scnprintf(buf + len, size - len,
- "%2d%7d\n",
- acno, ac->sched);
- ath_txq_unlock(sc, txq);
- }
-
- len += scnprintf(buf + len, size - len,
- "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
- "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
- "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
-
- for (tidno = 0, tid = &an->tid[tidno];
- tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- txq = tid->ac->txq;
- ath_txq_lock(sc, txq);
- len += scnprintf(buf + len, size - len,
- "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
- tid->tidno, tid->seq_start, tid->seq_next,
- tid->baw_size, tid->baw_head, tid->baw_tail,
- tid->bar_index, tid->sched, tid->paused);
- ath_txq_unlock(sc, txq);
- }
-exit:
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-}
-
-static const struct file_operations fops_node_stat = {
- .read = read_file_node_stat,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct dentry *dir)
-{
- struct ath_node *an = (struct ath_node *)sta->drv_priv;
- debugfs_create_file("node_stat", S_IRUGO, dir, an, &fops_node_stat);
-}
-
/* Ethtool support for get-stats */
#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
@@ -1772,117 +1474,9 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
void ath9k_deinit_debug(struct ath_softc *sc)
{
- if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
- relay_close(sc->rfs_chan_spec_scan);
- sc->rfs_chan_spec_scan = NULL;
- }
-}
-
-static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[3];
- unsigned int len;
-
- len = sprintf(buf, "%d\n", sc->tx99_state);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- char buf[32];
- bool start;
- ssize_t len;
- int r;
-
- if (sc->nvifs > 1)
- return -EOPNOTSUPP;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- if (strtobool(buf, &start))
- return -EINVAL;
-
- if (start == sc->tx99_state) {
- if (!start)
- return count;
- ath_dbg(common, XMIT, "Resetting TX99\n");
- ath9k_tx99_deinit(sc);
- }
-
- if (!start) {
- ath9k_tx99_deinit(sc);
- return count;
- }
-
- r = ath9k_tx99_init(sc);
- if (r)
- return r;
-
- return count;
-}
-
-static const struct file_operations fops_tx99 = {
- .read = read_file_tx99,
- .write = write_file_tx99,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_tx99_power(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "%d (%d dBm)\n",
- sc->tx99_power,
- sc->tx99_power / 2);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ ath9k_spectral_deinit_debug(sc);
}
-static ssize_t write_file_tx99_power(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- int r;
- u8 tx_power;
-
- r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
- if (r)
- return r;
-
- if (tx_power > MAX_RATE_POWER)
- return -EINVAL;
-
- sc->tx99_power = tx_power;
-
- ath9k_ps_wakeup(sc);
- ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
- ath9k_ps_restore(sc);
-
- return count;
-}
-
-static const struct file_operations fops_tx99_power = {
- .read = read_file_tx99_power,
- .write = write_file_tx99_power,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1899,6 +1493,8 @@ int ath9k_init_debug(struct ath_hw *ah)
#endif
ath9k_dfs_init_debug(sc);
+ ath9k_tx99_init_debug(sc);
+ ath9k_spectral_init_debug(sc);
debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dma);
@@ -1922,6 +1518,8 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_reset);
debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_recv);
+ debugfs_create_file("phy_err", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_phy_err);
debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
&ah->rxchainmask);
debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
@@ -1945,23 +1543,6 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_base_eeprom);
debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_modal_eeprom);
- sc->rfs_chan_spec_scan = relay_open("spectral_scan",
- sc->debug.debugfs_phy,
- 1024, 256, &rfs_spec_scan_cb,
- NULL);
- debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
- &fops_spec_scan_ctl);
- debugfs_create_file("spectral_short_repeat", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
- &fops_spectral_short_repeat);
- debugfs_create_file("spectral_count", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_spectral_count);
- debugfs_create_file("spectral_period", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_spectral_period);
- debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
- &fops_spectral_fft_period);
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
@@ -1974,15 +1555,6 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_btcoex);
#endif
- if (config_enabled(CONFIG_ATH9K_TX99) &&
- AR_SREV_9300_20_OR_LATER(ah)) {
- debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
- &fops_tx99);
- debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
- &fops_tx99_power);
- }
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index d6e3fa4299a4..cc7a025d833e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -27,11 +27,13 @@ struct fft_sample_tlv;
#ifdef CONFIG_ATH9K_DEBUGFS
#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
+#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
#else
#define TX_STAT_INC(q, c) do { } while (0)
+#define RX_STAT_INC(c)
#define RESET_STAT_INC(sc, type) do { } while (0)
#define ANT_STAT_INC(i, c) do { } while (0)
#define ANT_LNA_INC(i, c) do { } while (0)
@@ -42,6 +44,7 @@ enum ath_reset_type {
RESET_TYPE_BB_WATCHDOG,
RESET_TYPE_FATAL_INT,
RESET_TYPE_TX_ERROR,
+ RESET_TYPE_TX_GTT,
RESET_TYPE_TX_HANG,
RESET_TYPE_PLL_HANG,
RESET_TYPE_MAC_HANG,
@@ -201,7 +204,23 @@ struct ath_tx_stats {
TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
} while(0)
-#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
+struct ath_rx_rate_stats {
+ struct {
+ u32 ht20_cnt;
+ u32 ht40_cnt;
+ u32 sgi_cnt;
+ u32 lgi_cnt;
+ } ht_stats[24];
+
+ struct {
+ u32 ofdm_cnt;
+ } ofdm_stats[8];
+
+ struct {
+ u32 cck_lp_cnt;
+ u32 cck_sp_cnt;
+ } cck_stats[4];
+};
/**
* struct ath_rx_stats - RX Statistics
@@ -292,14 +311,12 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
-void ath_debug_send_fft_sample(struct ath_softc *sc,
- struct fft_sample_tlv *fft_sample);
void ath9k_debug_stat_ant(struct ath_softc *sc,
struct ath_hw_antcomb_conf *div_ant_conf,
int main_rssi_avg, int alt_rssi_avg);
-#else
+void ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause);
-#define RX_STAT_INC(c) /* NOP */
+#else
static inline int ath9k_init_debug(struct ath_hw *ah)
{
@@ -331,6 +348,23 @@ static inline void ath9k_debug_stat_ant(struct ath_softc *sc,
}
+static inline void
+ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
+{
+}
+
#endif /* CONFIG_ATH9K_DEBUGFS */
+#ifdef CONFIG_ATH9K_STATION_STATISTICS
+void ath_debug_rate_stats(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ struct sk_buff *skb);
+#else
+static inline void ath_debug_rate_stats(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ struct sk_buff *skb)
+{
+}
+#endif /* CONFIG_ATH9K_STATION_STATISTICS */
+
#endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
new file mode 100644
index 000000000000..d76e6e0120d2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/*************/
+/* node_aggr */
+/*************/
+
+static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_node *an = file->private_data;
+ struct ath_softc *sc = an->sc;
+ struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac;
+ struct ath_txq *txq;
+ u32 len = 0, size = 4096;
+ char *buf;
+ size_t retval;
+ int tidno, acno;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!an->sta->ht_cap.ht_supported) {
+ len = scnprintf(buf, size, "%s\n",
+ "HT not supported");
+ goto exit;
+ }
+
+ len = scnprintf(buf, size, "Max-AMPDU: %d\n",
+ an->maxampdu);
+ len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
+ an->mpdudensity);
+
+ len += scnprintf(buf + len, size - len,
+ "%2s%7s\n", "AC", "SCHED");
+
+ for (acno = 0, ac = &an->ac[acno];
+ acno < IEEE80211_NUM_ACS; acno++, ac++) {
+ txq = ac->txq;
+ ath_txq_lock(sc, txq);
+ len += scnprintf(buf + len, size - len,
+ "%2d%7d\n",
+ acno, ac->sched);
+ ath_txq_unlock(sc, txq);
+ }
+
+ len += scnprintf(buf + len, size - len,
+ "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
+ "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
+ "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
+
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
+ txq = tid->ac->txq;
+ ath_txq_lock(sc, txq);
+ if (tid->active) {
+ len += scnprintf(buf + len, size - len,
+ "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+ tid->tidno,
+ tid->seq_start,
+ tid->seq_next,
+ tid->baw_size,
+ tid->baw_head,
+ tid->baw_tail,
+ tid->bar_index,
+ tid->sched,
+ tid->paused);
+ }
+ ath_txq_unlock(sc, txq);
+ }
+exit:
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_node_aggr = {
+ .read = read_file_node_aggr,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*************/
+/* node_recv */
+/*************/
+
+void ath_debug_rate_stats(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_rx_status *rxs;
+ struct ath_rx_rate_stats *rstats;
+ struct ieee80211_sta *sta;
+ struct ath_node *an;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL);
+ if (!sta)
+ goto exit;
+
+ an = (struct ath_node *) sta->drv_priv;
+ rstats = &an->rx_rate_stats;
+ rxs = IEEE80211_SKB_RXCB(skb);
+
+ if (IS_HT_RATE(rs->rs_rate)) {
+ if (rxs->rate_idx >= ARRAY_SIZE(rstats->ht_stats))
+ goto exit;
+
+ if (rxs->flag & RX_FLAG_40MHZ)
+ rstats->ht_stats[rxs->rate_idx].ht40_cnt++;
+ else
+ rstats->ht_stats[rxs->rate_idx].ht20_cnt++;
+
+ if (rxs->flag & RX_FLAG_SHORT_GI)
+ rstats->ht_stats[rxs->rate_idx].sgi_cnt++;
+ else
+ rstats->ht_stats[rxs->rate_idx].lgi_cnt++;
+
+ goto exit;
+ }
+
+ if (IS_CCK_RATE(rs->rs_rate)) {
+ if (rxs->flag & RX_FLAG_SHORTPRE)
+ rstats->cck_stats[rxs->rate_idx].cck_sp_cnt++;
+ else
+ rstats->cck_stats[rxs->rate_idx].cck_lp_cnt++;
+
+ goto exit;
+ }
+
+ if (IS_OFDM_RATE(rs->rs_rate)) {
+ if (ah->curchan->chan->band == IEEE80211_BAND_2GHZ)
+ rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++;
+ else
+ rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++;
+ }
+exit:
+ rcu_read_unlock();
+}
+
+#define PRINT_CCK_RATE(str, i, sp) \
+ do { \
+ len += scnprintf(buf + len, size - len, \
+ "%11s : %10u\n", \
+ str, \
+ (sp) ? rstats->cck_stats[i].cck_sp_cnt : \
+ rstats->cck_stats[i].cck_lp_cnt); \
+ } while (0)
+
+#define PRINT_OFDM_RATE(str, i) \
+ do { \
+ len += scnprintf(buf + len, size - len, \
+ "%11s : %10u\n", \
+ str, \
+ rstats->ofdm_stats[i].ofdm_cnt); \
+ } while (0)
+
+static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_node *an = file->private_data;
+ struct ath_softc *sc = an->sc;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_rx_rate_stats *rstats;
+ struct ieee80211_sta *sta = an->sta;
+ enum ieee80211_band band;
+ u32 len = 0, size = 4096;
+ char *buf;
+ size_t retval;
+ int i;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ band = ah->curchan->chan->band;
+ rstats = &an->rx_rate_stats;
+
+ if (!sta->ht_cap.ht_supported)
+ goto legacy;
+
+ len += scnprintf(buf + len, size - len,
+ "%24s%10s%10s%10s\n",
+ "HT20", "HT40", "SGI", "LGI");
+
+ for (i = 0; i < 24; i++) {
+ len += scnprintf(buf + len, size - len,
+ "%8s%3u : %10u%10u%10u%10u\n",
+ "MCS", i,
+ rstats->ht_stats[i].ht20_cnt,
+ rstats->ht_stats[i].ht40_cnt,
+ rstats->ht_stats[i].sgi_cnt,
+ rstats->ht_stats[i].lgi_cnt);
+ }
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+legacy:
+ if (band == IEEE80211_BAND_2GHZ) {
+ PRINT_CCK_RATE("CCK-1M/LP", 0, false);
+ PRINT_CCK_RATE("CCK-2M/LP", 1, false);
+ PRINT_CCK_RATE("CCK-5.5M/LP", 2, false);
+ PRINT_CCK_RATE("CCK-11M/LP", 3, false);
+
+ PRINT_CCK_RATE("CCK-2M/SP", 1, true);
+ PRINT_CCK_RATE("CCK-5.5M/SP", 2, true);
+ PRINT_CCK_RATE("CCK-11M/SP", 3, true);
+ }
+
+ PRINT_OFDM_RATE("OFDM-6M", 0);
+ PRINT_OFDM_RATE("OFDM-9M", 1);
+ PRINT_OFDM_RATE("OFDM-12M", 2);
+ PRINT_OFDM_RATE("OFDM-18M", 3);
+ PRINT_OFDM_RATE("OFDM-24M", 4);
+ PRINT_OFDM_RATE("OFDM-36M", 5);
+ PRINT_OFDM_RATE("OFDM-48M", 6);
+ PRINT_OFDM_RATE("OFDM-54M", 7);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+#undef PRINT_OFDM_RATE
+#undef PRINT_CCK_RATE
+
+static const struct file_operations fops_node_recv = {
+ .read = read_file_node_recv,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir)
+{
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+
+ debugfs_create_file("node_aggr", S_IRUGO, dir, an, &fops_node_aggr);
+ debugfs_create_file("node_recv", S_IRUGO, dir, an, &fops_node_recv);
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 7187d3671512..857bb28b3894 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -158,8 +158,8 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
return;
}
- ard.rssi = rs->rs_rssi_ctl0;
- ard.ext_rssi = rs->rs_rssi_ext0;
+ ard.rssi = rs->rs_rssi_ctl[0];
+ ard.ext_rssi = rs->rs_rssi_ext[0];
/*
* hardware stores this as 8 bit signed value.
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index b4091716e9b3..07b806c56c56 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -1085,31 +1085,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
{
-#define EEP_MAP4K_SPURCHAN \
- (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan)
- struct ath_common *common = ath9k_hw_common(ah);
-
- u16 spur_val = AR_NO_SPUR;
-
- ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
- i, is2GHz, ah->config.spurchans[i][is2GHz]);
-
- switch (ah->config.spurmode) {
- case SPUR_DISABLE:
- break;
- case SPUR_ENABLE_IOCTL:
- spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
- spur_val);
- break;
- case SPUR_ENABLE_EEPROM:
- spur_val = EEP_MAP4K_SPURCHAN;
- break;
- }
-
- return spur_val;
-
-#undef EEP_MAP4K_SPURCHAN
+ return ah->eeprom.map4k.modalHeader.spurChans[i].spurChan;
}
const struct eeprom_ops eep_4k_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index e1d0c217c104..5ba1385c9838 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -1004,31 +1004,7 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
u16 i, bool is2GHz)
{
-#define EEP_MAP9287_SPURCHAN \
- (ah->eeprom.map9287.modalHeader.spurChans[i].spurChan)
-
- struct ath_common *common = ath9k_hw_common(ah);
- u16 spur_val = AR_NO_SPUR;
-
- ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
- i, is2GHz, ah->config.spurchans[i][is2GHz]);
-
- switch (ah->config.spurmode) {
- case SPUR_DISABLE:
- break;
- case SPUR_ENABLE_IOCTL:
- spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
- spur_val);
- break;
- case SPUR_ENABLE_EEPROM:
- spur_val = EEP_MAP9287_SPURCHAN;
- break;
- }
-
- return spur_val;
-
-#undef EEP_MAP9287_SPURCHAN
+ return ah->eeprom.map9287.modalHeader.spurChans[i].spurChan;
}
const struct eeprom_ops eep_ar9287_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 39107e31e79a..3218ca994746 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -1348,31 +1348,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
{
-#define EEP_DEF_SPURCHAN \
- (ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan)
- struct ath_common *common = ath9k_hw_common(ah);
-
- u16 spur_val = AR_NO_SPUR;
-
- ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
- i, is2GHz, ah->config.spurchans[i][is2GHz]);
-
- switch (ah->config.spurmode) {
- case SPUR_DISABLE:
- break;
- case SPUR_ENABLE_IOCTL:
- spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
- spur_val);
- break;
- case SPUR_ENABLE_EEPROM:
- spur_val = EEP_DEF_SPURCHAN;
- break;
- }
-
- return spur_val;
-
-#undef EEP_DEF_SPURCHAN
+ return ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan;
}
const struct eeprom_ops eep_def_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index c34f21241da9..b1956bf6e01e 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -157,36 +157,6 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
}
}
-static void ath9k_gen_timer_start(struct ath_hw *ah,
- struct ath_gen_timer *timer,
- u32 trig_timeout,
- u32 timer_period)
-{
- ath9k_hw_gen_timer_start(ah, timer, trig_timeout, timer_period);
-
- if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
- ath9k_hw_disable_interrupts(ah);
- ah->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
- }
-}
-
-static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
-{
- struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
-
- ath9k_hw_gen_timer_stop(ah, timer);
-
- /* if no timer is enabled, turn off interrupt mask */
- if (timer_table->timer_mask.val == 0) {
- ath9k_hw_disable_interrupts(ah);
- ah->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
- }
-}
-
static void ath_mci_ftp_adjust(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
@@ -257,19 +227,9 @@ static void ath_btcoex_period_timer(unsigned long data)
spin_unlock_bh(&btcoex->btcoex_lock);
- /*
- * btcoex_period is in msec while (btocex/btscan_)no_stomp are in usec,
- * ensure that we properly convert btcoex_period to usec
- * for any comparision with (btcoex/btscan_)no_stomp.
- */
- if (btcoex->btcoex_period * 1000 != btcoex->btcoex_no_stomp) {
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, timer_period,
- timer_period * 10);
- btcoex->hw_timer_enabled = true;
- }
+ if (btcoex->btcoex_period != btcoex->btcoex_no_stomp)
+ mod_timer(&btcoex->no_stomp_timer,
+ jiffies + msecs_to_jiffies(timer_period));
ath9k_ps_restore(sc);
@@ -282,7 +242,7 @@ skip_hw_wakeup:
* Generic tsf based hw timer which configures weight
* registers to time slice between wlan and bt traffic
*/
-static void ath_btcoex_no_stomp_timer(void *arg)
+static void ath_btcoex_no_stomp_timer(unsigned long arg)
{
struct ath_softc *sc = (struct ath_softc *)arg;
struct ath_hw *ah = sc->sc_ah;
@@ -311,24 +271,18 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
- btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 *
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
- btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 *
+ btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
(unsigned long) sc);
+ setup_timer(&btcoex->no_stomp_timer, ath_btcoex_no_stomp_timer,
+ (unsigned long) sc);
spin_lock_init(&btcoex->btcoex_lock);
- btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
- ath_btcoex_no_stomp_timer,
- ath_btcoex_no_stomp_timer,
- (void *) sc, AR_FIRST_NDP_TIMER);
-
- if (!btcoex->no_stomp_timer)
- return -ENOMEM;
-
return 0;
}
@@ -343,10 +297,7 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
/* make sure duty cycle timer is also stopped when resuming */
- if (btcoex->hw_timer_enabled) {
- ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
- btcoex->hw_timer_enabled = false;
- }
+ del_timer_sync(&btcoex->no_stomp_timer);
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
@@ -363,24 +314,16 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
void ath9k_btcoex_timer_pause(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
del_timer_sync(&btcoex->period_timer);
-
- if (btcoex->hw_timer_enabled) {
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
- btcoex->hw_timer_enabled = false;
- }
+ del_timer_sync(&btcoex->no_stomp_timer);
}
void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
- if (btcoex->hw_timer_enabled) {
- ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
- btcoex->hw_timer_enabled = false;
- }
+ del_timer_sync(&btcoex->no_stomp_timer);
}
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
@@ -400,12 +343,6 @@ u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status)
{
- struct ath_hw *ah = sc->sc_ah;
-
- if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
- if (status & ATH9K_INT_GENTIMER)
- ath_gen_timer_isr(sc->sc_ah);
-
if (status & ATH9K_INT_MCI)
ath_mci_intr(sc);
}
@@ -447,10 +384,6 @@ void ath9k_deinit_btcoex(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- if ((sc->btcoex.no_stomp_timer) &&
- ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
- ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
-
if (ath9k_hw_mci_is_enabled(ah))
ath_mci_cleanup(sc);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 055d7c25e090..99a203174f45 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -262,6 +262,8 @@ enum tid_aggr_state {
struct ath9k_htc_sta {
u8 index;
enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
+ struct work_struct rc_update_work;
+ struct ath9k_htc_priv *htc_priv;
};
#define ATH9K_HTC_RXBUF 256
@@ -600,10 +602,15 @@ void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv);
#ifdef CONFIG_MAC80211_LEDS
+void ath9k_configure_leds(struct ath9k_htc_priv *priv);
void ath9k_init_leds(struct ath9k_htc_priv *priv);
void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
void ath9k_led_work(struct work_struct *work);
#else
+static inline void ath9k_configure_leds(struct ath9k_htc_priv *priv)
+{
+}
+
static inline void ath9k_init_leds(struct ath9k_htc_priv *priv)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index e0c03bd64182..8b5757734596 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -70,11 +70,11 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
struct ath9k_beacon_state bs;
enum ath9k_int imask = 0;
int dtimperiod, dtimcount, sleepduration;
- int cfpperiod, cfpcount, bmiss_timeout;
+ int bmiss_timeout;
u32 nexttbtt = 0, intval, tsftu;
__be32 htc_imask = 0;
u64 tsf;
- int num_beacons, offset, dtim_dec_count, cfp_dec_count;
+ int num_beacons, offset, dtim_dec_count;
int ret __attribute__ ((unused));
u8 cmd_rsp;
@@ -84,7 +84,7 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
/*
- * Setup dtim and cfp parameters according to
+ * Setup dtim parameters according to
* last beacon we received (which may be none).
*/
dtimperiod = bss_conf->dtim_period;
@@ -93,8 +93,6 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
dtimcount = 1;
if (dtimcount >= dtimperiod) /* NB: sanity check */
dtimcount = 0;
- cfpperiod = 1; /* NB: no PCF support yet */
- cfpcount = 0;
sleepduration = intval;
if (sleepduration <= 0)
@@ -102,7 +100,7 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
/*
* Pull nexttbtt forward to reflect the current
- * TSF and calculate dtim+cfp state for the result.
+ * TSF and calculate dtim state for the result.
*/
tsf = ath9k_hw_gettsf64(priv->ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
@@ -115,26 +113,14 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
/* DTIM Beacon every dtimperiod Beacon */
dtim_dec_count = num_beacons % dtimperiod;
- /* CFP every cfpperiod DTIM Beacon */
- cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
- if (dtim_dec_count)
- cfp_dec_count++;
-
dtimcount -= dtim_dec_count;
if (dtimcount < 0)
dtimcount += dtimperiod;
- cfpcount -= cfp_dec_count;
- if (cfpcount < 0)
- cfpcount += cfpperiod;
-
- bs.bs_intval = intval;
- bs.bs_nexttbtt = nexttbtt;
- bs.bs_dtimperiod = dtimperiod*intval;
- bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
- bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
- bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
- bs.bs_cfpmaxduration = 0;
+ bs.bs_intval = TU_TO_USEC(intval);
+ bs.bs_nexttbtt = TU_TO_USEC(nexttbtt);
+ bs.bs_dtimperiod = dtimperiod * bs.bs_intval;
+ bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount * bs.bs_intval;
/*
* Calculate the number of consecutive beacons to miss* before taking
@@ -161,7 +147,8 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
* XXX fixed at 100ms
*/
- bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+ bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
+ sleepduration));
if (bs.bs_sleepduration > bs.bs_dtimperiod)
bs.bs_sleepduration = bs.bs_dtimperiod;
@@ -170,10 +157,8 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
intval, tsf, tsftu);
- ath_dbg(common, CONFIG,
- "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
- bs.bs_bmissthreshold, bs.bs_sleepduration,
- bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+ ath_dbg(common, CONFIG, "bmiss: %u sleep: %u\n",
+ bs.bs_bmissthreshold, bs.bs_sleepduration);
/* Set the computed STA beacon timers */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 105582d6b714..50f74a2a4cf8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -255,6 +255,17 @@ void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
cancel_work_sync(&priv->led_work);
}
+
+void ath9k_configure_leds(struct ath9k_htc_priv *priv)
+{
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+
+}
+
void ath9k_init_leds(struct ath9k_htc_priv *priv)
{
int ret;
@@ -268,11 +279,7 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv)
else
priv->ah->led_pin = ATH_LED_PIN_DEF;
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- /* LED off, active low */
- ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+ ath9k_configure_leds(priv);
snprintf(priv->led_name, sizeof(priv->led_name),
"ath9k_htc-%s", wiphy_name(priv->hw->wiphy));
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index c3676bf1d6c4..c57d6b859c04 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -34,6 +34,10 @@ static int ath9k_htc_btcoex_enable;
module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
#define CHAN2G(_freq, _idx) { \
.center_freq = (_freq), \
.hw_value = (_idx), \
@@ -725,12 +729,14 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
@@ -748,7 +754,6 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
hw->queues = 4;
- hw->channel_change_time = 5000;
hw->max_listen_interval = 1;
hw->vif_data_size = sizeof(struct ath9k_htc_vif);
@@ -1000,6 +1005,8 @@ int ath9k_htc_resume(struct htc_target *htc_handle)
ret = ath9k_init_htc_services(priv, priv->ah->hw_version.devid,
priv->ah->hw_version.usbdev);
+ ath9k_configure_leds(priv);
+
return ret;
}
#endif
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 608d739d1378..c9254a61ca52 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1270,18 +1270,50 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
}
+static void ath9k_htc_sta_rc_update_work(struct work_struct *work)
+{
+ struct ath9k_htc_sta *ista =
+ container_of(work, struct ath9k_htc_sta, rc_update_work);
+ struct ieee80211_sta *sta =
+ container_of((void *)ista, struct ieee80211_sta, drv_priv);
+ struct ath9k_htc_priv *priv = ista->htc_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+ ath9k_htc_setup_rate(priv, sta, &trate);
+ if (!ath9k_htc_send_rate_cmd(priv, &trate))
+ ath_dbg(common, CONFIG,
+ "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+ sta->addr, be32_to_cpu(trate.capflags));
+ else
+ ath_dbg(common, CONFIG,
+ "Unable to update supported rates for sta: %pM\n",
+ sta->addr);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
int ret;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
ret = ath9k_htc_add_station(priv, vif, sta);
- if (!ret)
+ if (!ret) {
+ INIT_WORK(&ista->rc_update_work, ath9k_htc_sta_rc_update_work);
+ ista->htc_priv = priv;
ath9k_htc_init_rate(priv, sta);
+ }
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1293,12 +1325,13 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct ath9k_htc_priv *priv = hw->priv;
- struct ath9k_htc_sta *ista;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
int ret;
+ cancel_work_sync(&ista->rc_update_work);
+
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
- ista = (struct ath9k_htc_sta *) sta->drv_priv;
htc_sta_drain(priv->htc, ista->index);
ret = ath9k_htc_remove_station(priv, vif, sta);
ath9k_htc_ps_restore(priv);
@@ -1311,28 +1344,12 @@ static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u32 changed)
{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ath_common *common = ath9k_hw_common(priv->ah);
- struct ath9k_htc_target_rate trate;
-
- mutex_lock(&priv->mutex);
- ath9k_htc_ps_wakeup(priv);
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
- memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
- ath9k_htc_setup_rate(priv, sta, &trate);
- if (!ath9k_htc_send_rate_cmd(priv, &trate))
- ath_dbg(common, CONFIG,
- "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
- sta->addr, be32_to_cpu(trate.capflags));
- else
- ath_dbg(common, CONFIG,
- "Unable to update supported rates for sta: %pM\n",
- sta->addr);
- }
+ if (!(changed & IEEE80211_RC_SUPP_RATES_CHANGED))
+ return;
- ath9k_htc_ps_restore(priv);
- mutex_unlock(&priv->mutex);
+ schedule_work(&ista->rc_update_work);
}
static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index c028df76b564..12e0f32a4905 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1075,9 +1075,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
last_rssi = priv->rx.last_rssi;
- if (ieee80211_is_beacon(hdr->frame_control) &&
- !is_zero_ether_addr(common->curbssid) &&
- ether_addr_equal(hdr->addr3, common->curbssid)) {
+ if (ath_is_mybeacon(common, hdr)) {
s8 rssi = rxbuf->rxstatus.rs_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 4f9378ddf07f..a47ea8423f1e 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -49,9 +49,10 @@ static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
}
-static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
+static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p)
{
- return ath9k_hw_ops(ah)->get_isr(ah, masked);
+ return ath9k_hw_ops(ah)->get_isr(ah, masked, sync_cause_p);
}
static inline void ath9k_hw_set_txdesc(struct ath_hw *ah, void *ds,
@@ -106,6 +107,21 @@ static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
/* Private hardware call ops */
+static inline void ath9k_hw_init_hang_checks(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_hang_checks(ah);
+}
+
+static inline bool ath9k_hw_detect_mac_hang(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->detect_mac_hang(ah);
+}
+
+static inline bool ath9k_hw_detect_bb_hang(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->detect_bb_hang(ah);
+}
+
/* PHY ops */
static inline int ath9k_hw_rf_set_freq(struct ath_hw *ah,
@@ -231,4 +247,31 @@ static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
}
+static inline void ath9k_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_cal_settings(ah);
+}
+
+static inline u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
+}
+
+static inline void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
+}
+
+static inline void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
+}
+
#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8918035da3a3..9078a6c5a74e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -17,6 +17,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/bitops.h>
#include <asm/unaligned.h>
#include "hw.h"
@@ -35,99 +37,6 @@ MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
-static int __init ath9k_init(void)
-{
- return 0;
-}
-module_init(ath9k_init);
-
-static void __exit ath9k_exit(void)
-{
- return;
-}
-module_exit(ath9k_exit);
-
-/* Private hardware callbacks */
-
-static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
-{
- ath9k_hw_private_ops(ah)->init_cal_settings(ah);
-}
-
-static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
-}
-
-static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
-{
- if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
- return;
-
- ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
-}
-
-static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
-{
- /* You will not have this callback if using the old ANI */
- if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
- return;
-
- ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
-}
-
-/********************/
-/* Helper Functions */
-/********************/
-
-#ifdef CONFIG_ATH9K_DEBUGFS
-
-void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
-{
- struct ath_softc *sc = common->priv;
- if (sync_cause)
- sc->debug.stats.istats.sync_cause_all++;
- if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
- sc->debug.stats.istats.sync_rtc_irq++;
- if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
- sc->debug.stats.istats.sync_mac_irq++;
- if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
- sc->debug.stats.istats.eeprom_illegal_access++;
- if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
- sc->debug.stats.istats.apb_timeout++;
- if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
- sc->debug.stats.istats.pci_mode_conflict++;
- if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
- sc->debug.stats.istats.host1_fatal++;
- if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
- sc->debug.stats.istats.host1_perr++;
- if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
- sc->debug.stats.istats.trcv_fifo_perr++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
- sc->debug.stats.istats.radm_cpl_ep++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
- sc->debug.stats.istats.radm_cpl_dllp_abort++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
- sc->debug.stats.istats.radm_cpl_tlp_abort++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
- sc->debug.stats.istats.radm_cpl_ecrc_err++;
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
- sc->debug.stats.istats.radm_cpl_timeout++;
- if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
- sc->debug.stats.istats.local_timeout++;
- if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
- sc->debug.stats.istats.pm_access++;
- if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
- sc->debug.stats.istats.mac_awake++;
- if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
- sc->debug.stats.istats.mac_asleep++;
- if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
- sc->debug.stats.istats.mac_sleep_access++;
-}
-#endif
-
-
static void ath9k_hw_set_clockrate(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -336,6 +245,9 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
case AR9300_DEVID_QCA955X:
ah->hw_version.macVersion = AR_SREV_VERSION_9550;
return;
+ case AR9300_DEVID_AR953X:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9531;
+ return;
}
val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
@@ -437,23 +349,22 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
static void ath9k_hw_init_config(struct ath_hw *ah)
{
- int i;
+ struct ath_common *common = ath9k_hw_common(ah);
ah->config.dma_beacon_response_time = 1;
ah->config.sw_beacon_response_time = 6;
- ah->config.additional_swba_backoff = 0;
- ah->config.ack_6mb = 0x0;
ah->config.cwm_ignore_extcca = 0;
- ah->config.pcie_clock_req = 0;
ah->config.analog_shiftreg = 1;
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- ah->config.spurchans[i][0] = AR_NO_SPUR;
- ah->config.spurchans[i][1] = AR_NO_SPUR;
- }
-
ah->config.rx_intr_mitigation = true;
- ah->config.pcieSerDesWrite = true;
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->config.rimt_last = 500;
+ ah->config.rimt_first = 2000;
+ } else {
+ ah->config.rimt_last = 250;
+ ah->config.rimt_first = 700;
+ }
/*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -473,6 +384,24 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
*/
if (num_possible_cpus() > 1)
ah->config.serialize_regmode = SER_REG_MODE_AUTO;
+
+ if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
+ if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
+ ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
+ !ah->is_pciexpress)) {
+ ah->config.serialize_regmode = SER_REG_MODE_ON;
+ } else {
+ ah->config.serialize_regmode = SER_REG_MODE_OFF;
+ }
+ }
+
+ ath_dbg(common, RESET, "serialize_regmode is %d\n",
+ ah->config.serialize_regmode);
+
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1;
+ else
+ ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
}
static void ath9k_hw_init_defaults(struct ath_hw *ah)
@@ -485,16 +414,24 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->hw_version.magic = AR5416_MAGIC;
ah->hw_version.subvendorid = 0;
- ah->atim_window = 0;
- ah->sta_id1_defaults =
- AR_STA_ID1_CRPT_MIC_ENABLE |
- AR_STA_ID1_MCAST_KSRCH;
+ ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE |
+ AR_STA_ID1_MCAST_KSRCH;
if (AR_SREV_9100(ah))
ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
+
ah->slottime = ATH9K_SLOT_TIME_9;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
ah->htc_reset_init = true;
+
+ ah->ani_function = ATH9K_ANI_ALL;
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
+
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S);
+ else
+ ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
}
static int ath9k_hw_init_macaddr(struct ath_hw *ah)
@@ -548,11 +485,11 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
* EEPROM needs to be initialized before we do this.
* This is required for regulatory compliance.
*/
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
if ((regdmn & 0xF0) == CTL_FCC) {
- ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ;
- ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ;
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_2GHZ;
+ ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_5GHZ;
}
}
@@ -576,6 +513,31 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_read_revisions(ah);
+ switch (ah->hw_version.macVersion) {
+ case AR_SREV_VERSION_5416_PCI:
+ case AR_SREV_VERSION_5416_PCIE:
+ case AR_SREV_VERSION_9160:
+ case AR_SREV_VERSION_9100:
+ case AR_SREV_VERSION_9280:
+ case AR_SREV_VERSION_9285:
+ case AR_SREV_VERSION_9287:
+ case AR_SREV_VERSION_9271:
+ case AR_SREV_VERSION_9300:
+ case AR_SREV_VERSION_9330:
+ case AR_SREV_VERSION_9485:
+ case AR_SREV_VERSION_9340:
+ case AR_SREV_VERSION_9462:
+ case AR_SREV_VERSION_9550:
+ case AR_SREV_VERSION_9565:
+ case AR_SREV_VERSION_9531:
+ break;
+ default:
+ ath_err(common,
+ "Mac Chip Rev 0x%02x.%x is not supported by this driver\n",
+ ah->hw_version.macVersion, ah->hw_version.macRev);
+ return -EOPNOTSUPP;
+ }
+
/*
* Read back AR_WA into a permanent copy and set bits 14 and 17.
* We need to do this to avoid RMW of this register. We cannot
@@ -609,50 +571,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
return -EIO;
}
- if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
- if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
- ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
- !ah->is_pciexpress)) {
- ah->config.serialize_regmode =
- SER_REG_MODE_ON;
- } else {
- ah->config.serialize_regmode =
- SER_REG_MODE_OFF;
- }
- }
-
- ath_dbg(common, RESET, "serialize_regmode is %d\n",
- ah->config.serialize_regmode);
-
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
- ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1;
- else
- ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
-
- switch (ah->hw_version.macVersion) {
- case AR_SREV_VERSION_5416_PCI:
- case AR_SREV_VERSION_5416_PCIE:
- case AR_SREV_VERSION_9160:
- case AR_SREV_VERSION_9100:
- case AR_SREV_VERSION_9280:
- case AR_SREV_VERSION_9285:
- case AR_SREV_VERSION_9287:
- case AR_SREV_VERSION_9271:
- case AR_SREV_VERSION_9300:
- case AR_SREV_VERSION_9330:
- case AR_SREV_VERSION_9485:
- case AR_SREV_VERSION_9340:
- case AR_SREV_VERSION_9462:
- case AR_SREV_VERSION_9550:
- case AR_SREV_VERSION_9565:
- break;
- default:
- ath_err(common,
- "Mac Chip Rev 0x%02x.%x is not supported by this driver\n",
- ah->hw_version.macVersion, ah->hw_version.macRev);
- return -EOPNOTSUPP;
- }
-
if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) ||
AR_SREV_9330(ah) || AR_SREV_9550(ah))
ah->is_pciexpress = false;
@@ -660,10 +578,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
ath9k_hw_init_cal_settings(ah);
- ah->ani_function = ATH9K_ANI_ALL;
- if (!AR_SREV_9300_20_OR_LATER(ah))
- ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
-
if (!ah->is_pciexpress)
ath9k_hw_disablepcie(ah);
@@ -682,15 +596,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
return r;
}
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
- ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S);
- else
- ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
-
- if (AR_SREV_9330(ah))
- ah->bb_watchdog_timeout_ms = 85;
- else
- ah->bb_watchdog_timeout_ms = 25;
+ ath9k_hw_init_hang_checks(ah);
common->state = ATH_HW_INITIALIZED;
@@ -723,6 +629,7 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9300_DEVID_AR9462:
case AR9485_DEVID_AR1111:
case AR9300_DEVID_AR9565:
+ case AR9300_DEVID_AR953X:
break;
default:
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -858,7 +765,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
/* program BB PLL phase_shift */
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
- } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
+ } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
u32 regval, pll2_divint, pll2_divfrac, refdiv;
REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
@@ -868,9 +775,15 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
udelay(100);
if (ah->is_clk_25mhz) {
- pll2_divint = 0x54;
- pll2_divfrac = 0x1eb85;
- refdiv = 3;
+ if (AR_SREV_9531(ah)) {
+ pll2_divint = 0x1c;
+ pll2_divfrac = 0xa3d2;
+ refdiv = 1;
+ } else {
+ pll2_divint = 0x54;
+ pll2_divfrac = 0x1eb85;
+ refdiv = 3;
+ }
} else {
if (AR_SREV_9340(ah)) {
pll2_divint = 88;
@@ -884,7 +797,10 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
}
regval = REG_READ(ah, AR_PHY_PLL_MODE);
- regval |= (0x1 << 16);
+ if (AR_SREV_9531(ah))
+ regval |= (0x1 << 22);
+ else
+ regval |= (0x1 << 16);
REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
udelay(100);
@@ -894,14 +810,33 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
regval = REG_READ(ah, AR_PHY_PLL_MODE);
if (AR_SREV_9340(ah))
- regval = (regval & 0x80071fff) | (0x1 << 30) |
- (0x1 << 13) | (0x4 << 26) | (0x18 << 19);
+ regval = (regval & 0x80071fff) |
+ (0x1 << 30) |
+ (0x1 << 13) |
+ (0x4 << 26) |
+ (0x18 << 19);
+ else if (AR_SREV_9531(ah))
+ regval = (regval & 0x01c00fff) |
+ (0x1 << 31) |
+ (0x2 << 29) |
+ (0xa << 25) |
+ (0x1 << 19) |
+ (0x6 << 12);
else
- regval = (regval & 0x80071fff) | (0x3 << 30) |
- (0x1 << 13) | (0x4 << 26) | (0x60 << 19);
+ regval = (regval & 0x80071fff) |
+ (0x3 << 30) |
+ (0x1 << 13) |
+ (0x4 << 26) |
+ (0x60 << 19);
REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
- REG_WRITE(ah, AR_PHY_PLL_MODE,
- REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
+
+ if (AR_SREV_9531(ah))
+ REG_WRITE(ah, AR_PHY_PLL_MODE,
+ REG_READ(ah, AR_PHY_PLL_MODE) & 0xffbfffff);
+ else
+ REG_WRITE(ah, AR_PHY_PLL_MODE,
+ REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
+
udelay(1000);
}
@@ -1281,6 +1216,42 @@ void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
*coef_exponent = coef_exp - 16;
}
+/* AR9330 WAR:
+ * call external reset function to reset WMAC if:
+ * - doing a cold reset
+ * - we have pending frames in the TX queues.
+ */
+static bool ath9k_hw_ar9330_reset_war(struct ath_hw *ah, int type)
+{
+ int i, npend = 0;
+
+ for (i = 0; i < AR_NUM_QCU; i++) {
+ npend = ath9k_hw_numtxpending(ah, i);
+ if (npend)
+ break;
+ }
+
+ if (ah->external_reset &&
+ (npend || type == ATH9K_RESET_COLD)) {
+ int reset_err = 0;
+
+ ath_dbg(ath9k_hw_common(ah), RESET,
+ "reset MAC via external reset\n");
+
+ reset_err = ah->external_reset();
+ if (reset_err) {
+ ath_err(ath9k_hw_common(ah),
+ "External reset failed, err=%d\n",
+ reset_err);
+ return false;
+ }
+
+ REG_WRITE(ah, AR_RTC_RESET, 1);
+ }
+
+ return true;
+}
+
static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
{
u32 rst_flags;
@@ -1331,38 +1302,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
}
if (AR_SREV_9330(ah)) {
- int npend = 0;
- int i;
-
- /* AR9330 WAR:
- * call external reset function to reset WMAC if:
- * - doing a cold reset
- * - we have pending frames in the TX queues
- */
-
- for (i = 0; i < AR_NUM_QCU; i++) {
- npend = ath9k_hw_numtxpending(ah, i);
- if (npend)
- break;
- }
-
- if (ah->external_reset &&
- (npend || type == ATH9K_RESET_COLD)) {
- int reset_err = 0;
-
- ath_dbg(ath9k_hw_common(ah), RESET,
- "reset MAC via external reset\n");
-
- reset_err = ah->external_reset();
- if (reset_err) {
- ath_err(ath9k_hw_common(ah),
- "External reset failed, err=%d\n",
- reset_err);
- return false;
- }
-
- REG_WRITE(ah, AR_RTC_RESET, 1);
- }
+ if (!ath9k_hw_ar9330_reset_war(ah, type))
+ return false;
}
if (ath9k_hw_mci_is_enabled(ah))
@@ -1372,7 +1313,12 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
REGWRITE_BUFFER_FLUSH(ah);
- udelay(50);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ udelay(50);
+ else if (AR_SREV_9100(ah))
+ mdelay(10);
+ else
+ udelay(100);
REG_WRITE(ah, AR_RTC_RC, 0);
if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
@@ -1408,8 +1354,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
REGWRITE_BUFFER_FLUSH(ah);
- if (!AR_SREV_9300_20_OR_LATER(ah))
- udelay(2);
+ udelay(2);
if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, 0);
@@ -1485,7 +1430,6 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
if (AR_SREV_9330(ah))
ar9003_hw_internal_regulator_apply(ah);
ath9k_hw_init_pll(ah, chan);
- ath9k_hw_set_rfmode(ah, chan);
return true;
}
@@ -1501,8 +1445,9 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
int r;
if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
- band_switch = IS_CHAN_5GHZ(ah->curchan) != IS_CHAN_5GHZ(chan);
- mode_diff = (chan->channelFlags != ah->curchan->channelFlags);
+ u32 flags_diff = chan->channelFlags ^ ah->curchan->channelFlags;
+ band_switch = !!(flags_diff & CHANNEL_5GHZ);
+ mode_diff = !!(flags_diff & ~CHANNEL_HT);
}
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1573,76 +1518,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
}
}
-static bool ath9k_hw_check_dcs(u32 dma_dbg, u32 num_dcu_states,
- int *hang_state, int *hang_pos)
-{
- static u32 dcu_chain_state[] = {5, 6, 9}; /* DCU chain stuck states */
- u32 chain_state, dcs_pos, i;
-
- for (dcs_pos = 0; dcs_pos < num_dcu_states; dcs_pos++) {
- chain_state = (dma_dbg >> (5 * dcs_pos)) & 0x1f;
- for (i = 0; i < 3; i++) {
- if (chain_state == dcu_chain_state[i]) {
- *hang_state = chain_state;
- *hang_pos = dcs_pos;
- return true;
- }
- }
- }
- return false;
-}
-
-#define DCU_COMPLETE_STATE 1
-#define DCU_COMPLETE_STATE_MASK 0x3
-#define NUM_STATUS_READS 50
-static bool ath9k_hw_detect_mac_hang(struct ath_hw *ah)
-{
- u32 chain_state, comp_state, dcs_reg = AR_DMADBG_4;
- u32 i, hang_pos, hang_state, num_state = 6;
-
- comp_state = REG_READ(ah, AR_DMADBG_6);
-
- if ((comp_state & DCU_COMPLETE_STATE_MASK) != DCU_COMPLETE_STATE) {
- ath_dbg(ath9k_hw_common(ah), RESET,
- "MAC Hang signature not found at DCU complete\n");
- return false;
- }
-
- chain_state = REG_READ(ah, dcs_reg);
- if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
- goto hang_check_iter;
-
- dcs_reg = AR_DMADBG_5;
- num_state = 4;
- chain_state = REG_READ(ah, dcs_reg);
- if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
- goto hang_check_iter;
-
- ath_dbg(ath9k_hw_common(ah), RESET,
- "MAC Hang signature 1 not found\n");
- return false;
-
-hang_check_iter:
- ath_dbg(ath9k_hw_common(ah), RESET,
- "DCU registers: chain %08x complete %08x Hang: state %d pos %d\n",
- chain_state, comp_state, hang_state, hang_pos);
-
- for (i = 0; i < NUM_STATUS_READS; i++) {
- chain_state = REG_READ(ah, dcs_reg);
- chain_state = (chain_state >> (5 * hang_pos)) & 0x1f;
- comp_state = REG_READ(ah, AR_DMADBG_6);
-
- if (((comp_state & DCU_COMPLETE_STATE_MASK) !=
- DCU_COMPLETE_STATE) ||
- (chain_state != hang_state))
- return false;
- }
-
- ath_dbg(ath9k_hw_common(ah), RESET, "MAC Hang signature 1 found\n");
-
- return true;
-}
-
void ath9k_hw_check_nav(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1659,7 +1534,7 @@ EXPORT_SYMBOL(ath9k_hw_check_nav);
bool ath9k_hw_check_alive(struct ath_hw *ah)
{
int count = 50;
- u32 reg;
+ u32 reg, last_val;
if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah);
@@ -1667,9 +1542,14 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
if (AR_SREV_9285_12_OR_LATER(ah))
return true;
+ last_val = REG_READ(ah, AR_OBS_BUS_1);
do {
reg = REG_READ(ah, AR_OBS_BUS_1);
+ if (reg != last_val)
+ return true;
+ udelay(1);
+ last_val = reg;
if ((reg & 0x7E7FFFEF) == 0x00702400)
continue;
@@ -1717,7 +1597,6 @@ static void ath9k_hw_reset_opmode(struct ath_hw *ah,
REG_RMW(ah, AR_STA_ID1, macStaId1
| AR_STA_ID1_RTS_USE_DEF
- | (ah->config.ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
| ah->sta_id1_defaults,
~AR_STA_ID1_SADH_MASK);
ath_hw_setbssidmask(common);
@@ -1776,7 +1655,7 @@ static void ath9k_hw_init_desc(struct ath_hw *ah)
}
#ifdef __BIG_ENDIAN
else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
- AR_SREV_9550(ah))
+ AR_SREV_9550(ah) || AR_SREV_9531(ah))
REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
else
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -1814,7 +1693,7 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
* If cross-band fcc is not supoprted, bail out if channelFlags differ.
*/
if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) &&
- chan->channelFlags != ah->curchan->channelFlags)
+ ((chan->channelFlags ^ ah->curchan->channelFlags) & ~CHANNEL_HT))
goto fail;
if (!ath9k_hw_check_alive(ah))
@@ -1855,10 +1734,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata, bool fastcc)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct timespec ts;
u32 saveLedState;
u32 saveDefAntenna;
u32 macStaId1;
u64 tsf = 0;
+ s64 usec = 0;
int r;
bool start_mci_reset = false;
bool save_fullsleep = ah->chip_fullsleep;
@@ -1901,10 +1782,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
- /* For chips on which RTC reset is done, save TSF before it gets cleared */
- if (AR_SREV_9100(ah) ||
- (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)))
- tsf = ath9k_hw_gettsf64(ah);
+ /* Save TSF before chip reset, a cold reset clears it */
+ tsf = ath9k_hw_gettsf64(ah);
+ getrawmonotonic(&ts);
+ usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000;
saveLedState = REG_READ(ah, AR_CFG_LED) &
(AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
@@ -1937,8 +1818,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
}
/* Restore TSF */
- if (tsf)
- ath9k_hw_settsf64(ah, tsf);
+ getrawmonotonic(&ts);
+ usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000 - usec;
+ ath9k_hw_settsf64(ah, tsf + usec);
if (AR_SREV_9280_20_OR_LATER(ah))
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
@@ -1950,6 +1832,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (r)
return r;
+ ath9k_hw_set_rfmode(ah, chan);
+
if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
@@ -2005,8 +1889,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_OBS, 8);
if (ah->config.rx_intr_mitigation) {
- REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
- REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
+ REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, ah->config.rimt_last);
+ REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, ah->config.rimt_first);
}
if (ah->config.tx_intr_mitigation) {
@@ -2044,10 +1928,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
- if (AR_SREV_9300_20_OR_LATER(ah)) {
+ if (AR_SREV_9300_20_OR_LATER(ah))
ar9003_hw_bb_watchdog_config(ah);
+
+ if (ah->config.hw_hang_checks & HW_PHYRESTART_CLC_WAR)
ar9003_hw_disable_phy_restart(ah);
- }
ath9k_hw_apply_gpio_override(ah);
@@ -2171,7 +2056,10 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
- udelay(50);
+ if (AR_SREV_9100(ah))
+ mdelay(10);
+ else
+ udelay(50);
for (i = POWER_UP_TIME / 50; i > 0; i--) {
val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
@@ -2260,9 +2148,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
case NL80211_IFTYPE_ADHOC:
REG_SET_BIT(ah, AR_TXCFG,
AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
- REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
- TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
- flags |= AR_NDP_TIMER_EN;
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
@@ -2283,7 +2168,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period);
REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period);
REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period);
- REG_WRITE(ah, AR_NDP_PERIOD, beacon_period);
REGWRITE_BUFFER_FLUSH(ah);
@@ -2300,12 +2184,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
-
- REG_WRITE(ah, AR_BEACON_PERIOD,
- TU_TO_USEC(bs->bs_intval));
- REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
- TU_TO_USEC(bs->bs_intval));
+ REG_WRITE(ah, AR_NEXT_TBTT_TIMER, bs->bs_nexttbtt);
+ REG_WRITE(ah, AR_BEACON_PERIOD, bs->bs_intval);
+ REG_WRITE(ah, AR_DMA_BEACON_PERIOD, bs->bs_intval);
REGWRITE_BUFFER_FLUSH(ah);
@@ -2333,9 +2214,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_NEXT_DTIM,
- TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
- REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
+ REG_WRITE(ah, AR_NEXT_DTIM, bs->bs_nextdtim - SLEEP_SLOP);
+ REG_WRITE(ah, AR_NEXT_TIM, nextTbtt - SLEEP_SLOP);
REG_WRITE(ah, AR_SLEEP1,
SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT)
@@ -2349,8 +2229,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_SLEEP2,
SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT));
- REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
- REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
+ REG_WRITE(ah, AR_TIM_PERIOD, beaconintval);
+ REG_WRITE(ah, AR_DTIM_PERIOD, dtimperiod);
REGWRITE_BUFFER_FLUSH(ah);
@@ -2608,13 +2488,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
- /*
- * Fast channel change across bands is available
- * only for AR9462 and AR9565.
- */
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
- pCap->hw_caps |= ATH9K_HW_CAP_FCC_BAND_SWITCH;
-
return 0;
}
@@ -2986,20 +2859,6 @@ static const struct ath_gen_timer_configuration gen_tmr_configuration[] =
/* HW generic timer primitives */
-/* compute and clear index of rightmost 1 */
-static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
-{
- u32 b;
-
- b = *mask;
- b &= (0-b);
- *mask &= ~b;
- b *= debruijn32;
- b >>= 27;
-
- return timer_table->gen_timer_index[b];
-}
-
u32 ath9k_hw_gettsf32(struct ath_hw *ah)
{
return REG_READ(ah, AR_TSF_L32);
@@ -3015,6 +2874,10 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
struct ath_gen_timer *timer;
+ if ((timer_index < AR_FIRST_NDP_TIMER) ||
+ (timer_index >= ATH_MAX_GEN_TIMER))
+ return NULL;
+
timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
if (timer == NULL)
return NULL;
@@ -3032,23 +2895,13 @@ EXPORT_SYMBOL(ath_gen_timer_alloc);
void ath9k_hw_gen_timer_start(struct ath_hw *ah,
struct ath_gen_timer *timer,
- u32 trig_timeout,
+ u32 timer_next,
u32 timer_period)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
- u32 tsf, timer_next;
-
- BUG_ON(!timer_period);
-
- set_bit(timer->index, &timer_table->timer_mask.timer_bits);
+ u32 mask = 0;
- tsf = ath9k_hw_gettsf32(ah);
-
- timer_next = tsf + trig_timeout;
-
- ath_dbg(ath9k_hw_common(ah), BTCOEX,
- "current tsf %x period %x timer_next %x\n",
- tsf, timer_period, timer_next);
+ timer_table->timer_mask |= BIT(timer->index);
/*
* Program generic timer registers
@@ -3074,10 +2927,19 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
(1 << timer->index));
}
- /* Enable both trigger and thresh interrupt masks */
- REG_SET_BIT(ah, AR_IMR_S5,
- (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
- SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
+ if (timer->trigger)
+ mask |= SM(AR_GENTMR_BIT(timer->index),
+ AR_IMR_S5_GENTIMER_TRIG);
+ if (timer->overflow)
+ mask |= SM(AR_GENTMR_BIT(timer->index),
+ AR_IMR_S5_GENTIMER_THRESH);
+
+ REG_SET_BIT(ah, AR_IMR_S5, mask);
+
+ if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
+ ah->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah);
+ }
}
EXPORT_SYMBOL(ath9k_hw_gen_timer_start);
@@ -3085,11 +2947,6 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
- if ((timer->index < AR_FIRST_NDP_TIMER) ||
- (timer->index >= ATH_MAX_GEN_TIMER)) {
- return;
- }
-
/* Clear generic timer enable bits. */
REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
gen_tmr_configuration[timer->index].mode_mask);
@@ -3109,7 +2966,12 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
(SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
- clear_bit(timer->index, &timer_table->timer_mask.timer_bits);
+ timer_table->timer_mask &= ~BIT(timer->index);
+
+ if (timer_table->timer_mask == 0) {
+ ah->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah);
+ }
}
EXPORT_SYMBOL(ath9k_hw_gen_timer_stop);
@@ -3130,32 +2992,32 @@ void ath_gen_timer_isr(struct ath_hw *ah)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
struct ath_gen_timer *timer;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 trigger_mask, thresh_mask, index;
+ unsigned long trigger_mask, thresh_mask;
+ unsigned int index;
/* get hardware generic timer interrupt status */
trigger_mask = ah->intr_gen_timer_trigger;
thresh_mask = ah->intr_gen_timer_thresh;
- trigger_mask &= timer_table->timer_mask.val;
- thresh_mask &= timer_table->timer_mask.val;
-
- trigger_mask &= ~thresh_mask;
+ trigger_mask &= timer_table->timer_mask;
+ thresh_mask &= timer_table->timer_mask;
- while (thresh_mask) {
- index = rightmost_index(timer_table, &thresh_mask);
+ for_each_set_bit(index, &thresh_mask, ARRAY_SIZE(timer_table->timers)) {
timer = timer_table->timers[index];
- BUG_ON(!timer);
- ath_dbg(common, BTCOEX, "TSF overflow for Gen timer %d\n",
- index);
+ if (!timer)
+ continue;
+ if (!timer->overflow)
+ continue;
+
+ trigger_mask &= ~BIT(index);
timer->overflow(timer->arg);
}
- while (trigger_mask) {
- index = rightmost_index(timer_table, &trigger_mask);
+ for_each_set_bit(index, &trigger_mask, ARRAY_SIZE(timer_table->timers)) {
timer = timer_table->timers[index];
- BUG_ON(!timer);
- ath_dbg(common, BTCOEX,
- "Gen timer[%d] trigger\n", index);
+ if (!timer)
+ continue;
+ if (!timer->trigger)
+ continue;
timer->trigger(timer->arg);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index a2c9a5dbac6b..0acd4b5a4892 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -52,6 +52,7 @@
#define AR9300_DEVID_QCA955X 0x0038
#define AR9485_DEVID_AR1111 0x0037
#define AR9300_DEVID_AR9565 0x0036
+#define AR9300_DEVID_AR953X 0x003d
#define AR5416_AR9100_DEVID 0x000b
@@ -168,7 +169,7 @@
#define CAB_TIMEOUT_VAL 10
#define BEACON_TIMEOUT_VAL 10
#define MIN_BEACON_TIMEOUT_VAL 1
-#define SLEEP_SLOP 3
+#define SLEEP_SLOP TU_TO_USEC(3)
#define INIT_CONFIG_STATUS 0x00000000
#define INIT_RSSI_THR 0x00000700
@@ -277,14 +278,25 @@ struct ath9k_hw_capabilities {
u8 txs_len;
};
+#define AR_NO_SPUR 0x8000
+#define AR_BASE_FREQ_2GHZ 2300
+#define AR_BASE_FREQ_5GHZ 4900
+#define AR_SPUR_FEEQ_BOUND_HT40 19
+#define AR_SPUR_FEEQ_BOUND_HT20 10
+
+enum ath9k_hw_hang_checks {
+ HW_BB_WATCHDOG = BIT(0),
+ HW_PHYRESTART_CLC_WAR = BIT(1),
+ HW_BB_RIFS_HANG = BIT(2),
+ HW_BB_DFS_HANG = BIT(3),
+ HW_BB_RX_CLEAR_STUCK_HANG = BIT(4),
+ HW_MAC_HANG = BIT(5),
+};
+
struct ath9k_ops_config {
int dma_beacon_response_time;
int sw_beacon_response_time;
- int additional_swba_backoff;
- int ack_6mb;
u32 cwm_ignore_extcca;
- bool pcieSerDesWrite;
- u8 pcie_clock_req;
u32 pcie_waen;
u8 analog_shiftreg;
u32 ofdm_trig_low;
@@ -295,20 +307,11 @@ struct ath9k_ops_config {
int serialize_regmode;
bool rx_intr_mitigation;
bool tx_intr_mitigation;
-#define SPUR_DISABLE 0
-#define SPUR_ENABLE_IOCTL 1
-#define SPUR_ENABLE_EEPROM 2
-#define AR_SPUR_5413_1 1640
-#define AR_SPUR_5413_2 1200
-#define AR_NO_SPUR 0x8000
-#define AR_BASE_FREQ_2GHZ 2300
-#define AR_BASE_FREQ_5GHZ 4900
-#define AR_SPUR_FEEQ_BOUND_HT40 19
-#define AR_SPUR_FEEQ_BOUND_HT20 10
- int spurmode;
- u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
u8 max_txtrig_level;
u16 ani_poll_interval; /* ANI poll interval in ms */
+ u16 hw_hang_checks;
+ u16 rimt_first;
+ u16 rimt_last;
/* Platform specific config */
u32 aspm_l1_fix;
@@ -317,6 +320,7 @@ struct ath9k_ops_config {
bool xatten_margin_cfg;
bool alt_mingainidx;
bool no_pll_pwrsave;
+ bool tx_gain_buffalo;
};
enum ath9k_int {
@@ -460,10 +464,6 @@ struct ath9k_beacon_state {
u32 bs_intval;
#define ATH9K_TSFOOR_THRESHOLD 0x00004240 /* 16k us */
u32 bs_dtimperiod;
- u16 bs_cfpperiod;
- u16 bs_cfpmaxduration;
- u32 bs_cfpnext;
- u16 bs_timoffset;
u16 bs_bmissthreshold;
u32 bs_sleepduration;
u32 bs_tsfoor_threshold;
@@ -499,12 +499,6 @@ struct ath9k_hw_version {
#define AR_GENTMR_BIT(_index) (1 << (_index))
-/*
- * Using de Bruijin sequence to look up 1's index in a 32 bit number
- * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
- */
-#define debruijn32 0x077CB531U
-
struct ath_gen_timer_configuration {
u32 next_addr;
u32 period_addr;
@@ -520,12 +514,8 @@ struct ath_gen_timer {
};
struct ath_gen_timer_table {
- u32 gen_timer_index[32];
struct ath_gen_timer *timers[ATH_MAX_GEN_TIMER];
- union {
- unsigned long timer_bits;
- u16 val;
- } timer_mask;
+ u16 timer_mask;
};
struct ath_hw_antcomb_conf {
@@ -596,6 +586,10 @@ struct ath_hw_radar_conf {
* register settings through the register initialization.
*/
struct ath_hw_private_ops {
+ void (*init_hang_checks)(struct ath_hw *ah);
+ bool (*detect_mac_hang)(struct ath_hw *ah);
+ bool (*detect_bb_hang)(struct ath_hw *ah);
+
/* Calibration ops */
void (*init_cal_settings)(struct ath_hw *ah);
bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
@@ -690,7 +684,8 @@ struct ath_hw_ops {
struct ath9k_channel *chan,
u8 rxchainmask,
bool longcal);
- bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked);
+ bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p);
void (*set_txdesc)(struct ath_hw *ah, void *ds,
struct ath_tx_info *i);
int (*proc_txdesc)(struct ath_hw *ah, void *ds,
@@ -786,7 +781,6 @@ struct ath_hw {
u32 txurn_interrupt_mask;
atomic_t intr_ref_cnt;
bool chip_fullsleep;
- u32 atim_window;
u32 modes_index;
/* Calibration */
@@ -865,6 +859,7 @@ struct ath_hw {
u32 gpio_mask;
u32 gpio_val;
+ struct ar5416IniArray ini_dfs;
struct ar5416IniArray iniModes;
struct ar5416IniArray iniCommon;
struct ar5416IniArray iniBB_RfGain;
@@ -921,7 +916,7 @@ struct ath_hw {
/* Enterprise mode cap */
u32 ent_mode;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
u32 wow_event_mask;
#endif
bool is_clk_25mhz;
@@ -1017,13 +1012,6 @@ bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
-#ifdef CONFIG_ATH9K_DEBUGFS
-void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause);
-#else
-static inline void ath9k_debug_sync_cause(struct ath_common *common,
- u32 sync_cause) {}
-#endif
-
/* Generic hw timer primitives */
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
@@ -1058,6 +1046,7 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
* Code specific to AR9003, we stuff these here to avoid callbacks
* for older families
*/
+bool ar9003_hw_bb_watchdog_check(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
@@ -1127,7 +1116,7 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
const char *ath9k_hw_wow_event_to_string(u32 wow_event);
void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
u8 *user_mask, int pattern_count,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 710192ed27ed..1fc2e5a26b52 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -57,6 +57,10 @@ static int ath9k_bt_ant_diversity;
module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -470,7 +474,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
-
ath_cabq_update(sc);
sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
@@ -554,7 +557,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
sc->spec_config.fft_period = 0xF;
}
-static void ath9k_init_platform(struct ath_softc *sc)
+static void ath9k_init_pcoem_platform(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -589,6 +592,9 @@ static void ath9k_init_platform(struct ath_softc *sc)
if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
ath_info(common, "WB335 2-ANT card detected\n");
+ if (sc->driver_data & ATH9K_PCI_KILLER)
+ ath_info(common, "Killer Wireless card detected\n");
+
/*
* Some WB335 cards do not support antenna diversity. Since
* we use a hardcoded value for AR9565 instead of using the
@@ -661,6 +667,27 @@ static void ath9k_eeprom_release(struct ath_softc *sc)
release_firmware(sc->sc_ah->eeprom_blob);
}
+static int ath9k_init_soc_platform(struct ath_softc *sc)
+{
+ struct ath9k_platform_data *pdata = sc->dev->platform_data;
+ struct ath_hw *ah = sc->sc_ah;
+ int ret = 0;
+
+ if (!pdata)
+ return 0;
+
+ if (pdata->eeprom_name) {
+ ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
+ if (ret)
+ return ret;
+ }
+
+ if (pdata->tx_gain_buffalo)
+ ah->config.tx_gain_buffalo = true;
+
+ return ret;
+}
+
static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops)
{
@@ -681,13 +708,13 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->reg_ops.read = ath9k_ioread32;
ah->reg_ops.write = ath9k_iowrite32;
ah->reg_ops.rmw = ath9k_reg_rmw;
- atomic_set(&ah->intr_ref_cnt, -1);
sc->sc_ah = ah;
pCap = &ah->caps;
common = ath9k_hw_common(ah);
sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
sc->tx99_power = MAX_RATE_POWER + 1;
+ init_waitqueue_head(&sc->tx_wait);
if (!pdata) {
ah->ah_flags |= AH_USE_EEPROM;
@@ -713,7 +740,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
/*
* Platform quirks.
*/
- ath9k_init_platform(sc);
+ ath9k_init_pcoem_platform(sc);
+
+ ret = ath9k_init_soc_platform(sc);
+ if (ret)
+ return ret;
/*
* Enable WLAN/BT RX Antenna diversity only when:
@@ -727,7 +758,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
common->bt_ant_diversity = 1;
spin_lock_init(&common->cc_lock);
-
spin_lock_init(&sc->sc_serial_rw);
spin_lock_init(&sc->sc_pm_lock);
mutex_init(&sc->mutex);
@@ -735,11 +765,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
(unsigned long)sc);
+ setup_timer(&sc->sleep_timer, ath_ps_full_sleep, (unsigned long)sc);
INIT_WORK(&sc->hw_reset_work, ath_reset_work);
- INIT_WORK(&sc->hw_check_work, ath_hw_check);
INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
- setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
/*
* Cache line size is used to size and align various
@@ -748,12 +777,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ath_read_cachesize(common, &csz);
common->cachelsz = csz << 2; /* convert to bytes */
- if (pdata && pdata->eeprom_name) {
- ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
- if (ret)
- return ret;
- }
-
/* Initializes the hardware for all supported chipsets */
ret = ath9k_hw_init(ah);
if (ret)
@@ -851,6 +874,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
static const struct ieee80211_iface_limit if_dfs_limits[] = {
{ .max = 1, .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
BIT(NL80211_IFTYPE_ADHOC) },
};
@@ -873,16 +899,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
-#ifdef CONFIG_PM
-static const struct wiphy_wowlan_support ath9k_wowlan_support = {
- .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
- .n_patterns = MAX_NUM_USER_PATTERN,
- .pattern_min_len = 1,
- .pattern_max_len = MAX_PATTERN_SIZE,
-};
-#endif
-
-void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -890,13 +907,15 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_RC_TABLE |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
@@ -931,19 +950,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
-#ifdef CONFIG_PM_SLEEP
- if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
- (sc->driver_data & ATH9K_PCI_WOW) &&
- device_can_wakeup(sc->dev))
- hw->wiphy->wowlan = &ath9k_wowlan_support;
-
- atomic_set(&sc->wow_sleep_proc_intr, -1);
- atomic_set(&sc->wow_got_bmiss_intr, -1);
-#endif
-
hw->queues = 4;
hw->max_rates = 4;
- hw->channel_change_time = 5000;
hw->max_listen_interval = 1;
hw->max_rate_tries = 10;
hw->sta_data_size = sizeof(struct ath_node);
@@ -966,6 +974,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&sc->sbands[IEEE80211_BAND_5GHZ];
+ ath9k_init_wow(hw);
ath9k_reload_chainmask_settings(sc);
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
@@ -1064,6 +1073,7 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
if (ATH_TXQ_SETUP(sc, i))
ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+ del_timer_sync(&sc->sleep_timer);
ath9k_hw_deinit(sc->sc_ah);
if (sc->dfs_detector != NULL)
sc->dfs_detector->exit(sc->dfs_detector);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index aed7e29dc50f..30dcef5aba10 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -65,50 +65,26 @@ void ath_tx_complete_poll_work(struct work_struct *work)
/*
* Checks if the BB/MAC is hung.
*/
-void ath_hw_check(struct work_struct *work)
+bool ath_hw_check(struct ath_softc *sc)
{
- struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- unsigned long flags;
- int busy;
- u8 is_alive, nbeacon = 1;
enum ath_reset_type type;
+ bool is_alive;
ath9k_ps_wakeup(sc);
+
is_alive = ath9k_hw_check_alive(sc->sc_ah);
- if ((is_alive && !AR_SREV_9300(sc->sc_ah)) || sc->tx99_state)
- goto out;
- else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
+ if (!is_alive) {
ath_dbg(common, RESET,
- "DCU stuck is detected. Schedule chip reset\n");
+ "HW hang detected, schedule chip reset\n");
type = RESET_TYPE_MAC_HANG;
- goto sched_reset;
- }
-
- spin_lock_irqsave(&common->cc_lock, flags);
- busy = ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
-
- ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
- busy, sc->hw_busy_count + 1);
- if (busy >= 99) {
- if (++sc->hw_busy_count >= 3) {
- type = RESET_TYPE_BB_HANG;
- goto sched_reset;
- }
- } else if (busy >= 0) {
- sc->hw_busy_count = 0;
- nbeacon = 3;
+ ath9k_queue_reset(sc, type);
}
- ath_start_rx_poll(sc, nbeacon);
- goto out;
-
-sched_reset:
- ath9k_queue_reset(sc, type);
-out:
ath9k_ps_restore(sc);
+
+ return is_alive;
}
/*
@@ -162,29 +138,6 @@ void ath_hw_pll_work(struct work_struct *work)
}
/*
- * RX Polling - monitors baseband hangs.
- */
-void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
-{
- if (!AR_SREV_9300(sc->sc_ah))
- return;
-
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
- return;
-
- mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
- (nbeacon * sc->cur_beacon_conf.beacon_interval));
-}
-
-void ath_rx_poll(unsigned long data)
-{
- struct ath_softc *sc = (struct ath_softc *)data;
-
- if (!test_bit(SC_OP_INVALID, &sc->sc_flags))
- ieee80211_queue_work(sc->hw, &sc->hw_check_work);
-}
-
-/*
* PA Pre-distortion.
*/
static void ath_paprd_activate(struct ath_softc *sc)
@@ -409,10 +362,10 @@ void ath_ani_calibrate(unsigned long data)
/* Call ANI routine if necessary */
if (aniflag) {
- spin_lock_irqsave(&common->cc_lock, flags);
+ spin_lock(&common->cc_lock);
ath9k_hw_ani_monitor(ah, ah->curchan);
ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
+ spin_unlock(&common->cc_lock);
}
/* Perform calibration if necessary */
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 6a18f9d3e9cc..5f727588ca27 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -481,8 +481,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
| AR_Q_MISC_CBR_INCR_DIS0);
value = (qi->tqi_readyTime -
(ah->config.sw_beacon_response_time -
- ah->config.dma_beacon_response_time) -
- ah->config.additional_swba_backoff) * 1024;
+ ah->config.dma_beacon_response_time)) * 1024;
REG_WRITE(ah, AR_QRDYTIMECFG(q),
value | AR_Q_RDYTIMECFG_EN);
REG_SET_BIT(ah, AR_DMISC(q),
@@ -550,25 +549,25 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
rs->rs_rssi = ATH9K_RSSI_BAD;
- rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
- rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[0] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[1] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[2] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[0] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[1] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[2] = ATH9K_RSSI_BAD;
} else {
rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
- rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl[0] = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt00);
- rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl[1] = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt01);
- rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl[2] = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt02);
- rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext[0] = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt10);
- rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext[1] = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt11);
- rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext[2] = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt12);
}
if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
@@ -923,11 +922,29 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
mask2 |= AR_IMR_S2_CST;
}
+ if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
+ if (ints & ATH9K_INT_BB_WATCHDOG) {
+ mask |= AR_IMR_BCNMISC;
+ mask2 |= AR_IMR_S2_BB_WATCHDOG;
+ }
+ }
+
ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
REG_WRITE(ah, AR_IMR, mask);
- ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
- AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
- AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
+ ah->imrs2_reg &= ~(AR_IMR_S2_TIM |
+ AR_IMR_S2_DTIM |
+ AR_IMR_S2_DTIMSYNC |
+ AR_IMR_S2_CABEND |
+ AR_IMR_S2_CABTO |
+ AR_IMR_S2_TSFOOR |
+ AR_IMR_S2_GTT |
+ AR_IMR_S2_CST);
+
+ if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
+ if (ints & ATH9K_INT_BB_WATCHDOG)
+ ah->imrs2_reg &= ~AR_IMR_S2_BB_WATCHDOG;
+ }
+
ah->imrs2_reg |= mask2;
REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index e3eed81f2439..10271373a0cd 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -133,12 +133,8 @@ struct ath_rx_status {
u8 rs_rate;
u8 rs_antenna;
u8 rs_more;
- int8_t rs_rssi_ctl0;
- int8_t rs_rssi_ctl1;
- int8_t rs_rssi_ctl2;
- int8_t rs_rssi_ext0;
- int8_t rs_rssi_ext1;
- int8_t rs_rssi_ext2;
+ int8_t rs_rssi_ctl[3];
+ int8_t rs_rssi_ext[3];
u8 rs_isaggr;
u8 rs_firstaggr;
u8 rs_moreaggr;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 21aa09e0e825..5924f72dd493 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -82,6 +82,22 @@ static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
return ret;
}
+void ath_ps_full_sleep(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ bool reset;
+
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ spin_unlock(&common->cc_lock);
+
+ ath9k_hw_setrxabort(sc->sc_ah, 1);
+ ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
+
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+}
+
void ath9k_ps_wakeup(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -92,6 +108,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
if (++sc->ps_usecount != 1)
goto unlock;
+ del_timer_sync(&sc->sleep_timer);
power_mode = sc->sc_ah->power_mode;
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
@@ -117,17 +134,17 @@ void ath9k_ps_restore(struct ath_softc *sc)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
enum ath9k_power_mode mode;
unsigned long flags;
- bool reset;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (--sc->ps_usecount != 0)
goto unlock;
if (sc->ps_idle) {
- ath9k_hw_setrxabort(sc->sc_ah, 1);
- ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
- mode = ATH9K_PM_FULL_SLEEP;
- } else if (sc->ps_enabled &&
+ mod_timer(&sc->sleep_timer, jiffies + HZ / 10);
+ goto unlock;
+ }
+
+ if (sc->ps_enabled &&
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
@@ -153,7 +170,6 @@ void ath9k_ps_restore(struct ath_softc *sc)
static void __ath_cancel_work(struct ath_softc *sc)
{
cancel_work_sync(&sc->paprd_work);
- cancel_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
cancel_delayed_work_sync(&sc->hw_pll_work);
@@ -163,13 +179,13 @@ static void __ath_cancel_work(struct ath_softc *sc)
#endif
}
-static void ath_cancel_work(struct ath_softc *sc)
+void ath_cancel_work(struct ath_softc *sc)
{
__ath_cancel_work(sc);
cancel_work_sync(&sc->hw_reset_work);
}
-static void ath_restart_work(struct ath_softc *sc)
+void ath_restart_work(struct ath_softc *sc)
{
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
@@ -177,7 +193,6 @@ static void ath_restart_work(struct ath_softc *sc)
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
- ath_start_rx_poll(sc, 3);
ath_start_ani(sc);
}
@@ -187,11 +202,7 @@ static bool ath_prepare_reset(struct ath_softc *sc)
bool ret = true;
ieee80211_stop_queues(sc->hw);
-
- sc->hw_busy_count = 0;
ath_stop_ani(sc);
- del_timer_sync(&sc->rx_poll_timer);
-
ath9k_hw_disable_interrupts(ah);
if (!ath_drain_all_txq(sc))
@@ -247,6 +258,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
}
}
+ sc->gtt_cnt = 0;
ieee80211_wake_queues(sc->hw);
return true;
@@ -319,7 +331,6 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
struct ieee80211_hw *hw = sc->hw;
struct ath9k_channel *hchan;
struct ieee80211_channel *chan = chandef->chan;
- unsigned long flags;
bool offchannel;
int pos = chan->hw_value;
int old_pos = -1;
@@ -337,9 +348,9 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
chan->center_freq, chandef->width);
/* update survey stats for the old channel before switching */
- spin_lock_irqsave(&common->cc_lock, flags);
+ spin_lock_bh(&common->cc_lock);
ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
+ spin_unlock_bh(&common->cc_lock);
ath9k_cmn_get_channel(hw, ah, chandef);
@@ -410,12 +421,6 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
an->vif = vif;
ath_tx_node_init(sc, an);
-
- if (sta->ht_cap.ht_supported) {
- an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor);
- an->mpdudensity = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
- }
}
static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
@@ -437,14 +442,8 @@ void ath9k_tasklet(unsigned long data)
ath9k_ps_wakeup(sc);
spin_lock(&sc->sc_pcu_lock);
- if ((status & ATH9K_INT_FATAL) ||
- (status & ATH9K_INT_BB_WATCHDOG)) {
-
- if (status & ATH9K_INT_FATAL)
- type = RESET_TYPE_FATAL_INT;
- else
- type = RESET_TYPE_BB_WATCHDOG;
-
+ if (status & ATH9K_INT_FATAL) {
+ type = RESET_TYPE_FATAL_INT;
ath9k_queue_reset(sc, type);
/*
@@ -456,6 +455,41 @@ void ath9k_tasklet(unsigned long data)
goto out;
}
+ if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
+ (status & ATH9K_INT_BB_WATCHDOG)) {
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ ar9003_hw_bb_watchdog_dbg_info(ah);
+ spin_unlock(&common->cc_lock);
+
+ if (ar9003_hw_bb_watchdog_check(ah)) {
+ type = RESET_TYPE_BB_WATCHDOG;
+ ath9k_queue_reset(sc, type);
+
+ /*
+ * Increment the ref. counter here so that
+ * interrupts are enabled in the reset routine.
+ */
+ atomic_inc(&ah->intr_ref_cnt);
+ ath_dbg(common, ANY,
+ "BB_WATCHDOG: Skipping interrupts\n");
+ goto out;
+ }
+ }
+
+ if (status & ATH9K_INT_GTT) {
+ sc->gtt_cnt++;
+
+ if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
+ type = RESET_TYPE_TX_GTT;
+ ath9k_queue_reset(sc, type);
+ atomic_inc(&ah->intr_ref_cnt);
+ ath_dbg(common, ANY,
+ "GTT: Skipping interrupts\n");
+ goto out;
+ }
+ }
+
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
/*
@@ -483,12 +517,26 @@ void ath9k_tasklet(unsigned long data)
}
if (status & ATH9K_INT_TX) {
- if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ /*
+ * For EDMA chips, TX completion is enabled for the
+ * beacon queue, so if a beacon has been transmitted
+ * successfully after a GTT interrupt, the GTT counter
+ * gets reset to zero here.
+ */
+ sc->gtt_cnt = 0;
+
ath_tx_edma_tasklet(sc);
- else
+ } else {
ath_tx_tasklet(sc);
+ }
+
+ wake_up(&sc->tx_wait);
}
+ if (status & ATH9K_INT_GENTIMER)
+ ath_gen_timer_isr(sc->sc_ah);
+
ath9k_btcoex_handle_interrupt(sc, status);
/* re-enable hardware interrupt */
@@ -511,14 +559,15 @@ irqreturn_t ath_isr(int irq, void *dev)
ATH9K_INT_TX | \
ATH9K_INT_BMISS | \
ATH9K_INT_CST | \
+ ATH9K_INT_GTT | \
ATH9K_INT_TSFOOR | \
ATH9K_INT_GENTIMER | \
ATH9K_INT_MCI)
struct ath_softc *sc = dev;
struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
enum ath9k_int status;
+ u32 sync_cause = 0;
bool sched = false;
/*
@@ -545,7 +594,8 @@ irqreturn_t ath_isr(int irq, void *dev)
* bits we haven't explicitly enabled so we mask the
* value to insure we only process bits we requested.
*/
- ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
+ ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */
+ ath9k_debug_sync_cause(sc, sync_cause);
status &= ah->imask; /* discard unasked-for bits */
/*
@@ -569,25 +619,19 @@ irqreturn_t ath_isr(int irq, void *dev)
!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
goto chip_reset;
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
- (status & ATH9K_INT_BB_WATCHDOG)) {
-
- spin_lock(&common->cc_lock);
- ath_hw_cycle_counters_update(common);
- ar9003_hw_bb_watchdog_dbg_info(ah);
- spin_unlock(&common->cc_lock);
-
+ if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
+ (status & ATH9K_INT_BB_WATCHDOG))
goto chip_reset;
- }
-#ifdef CONFIG_PM_SLEEP
+
+#ifdef CONFIG_ATH9K_WOW
if (status & ATH9K_INT_BMISS) {
if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
- ath_dbg(common, ANY, "during WoW we got a BMISS\n");
atomic_inc(&sc->wow_got_bmiss_intr);
atomic_dec(&sc->wow_sleep_proc_intr);
}
}
#endif
+
if (status & ATH9K_INT_SWBA)
tasklet_schedule(&sc->bcon_tasklet);
@@ -627,7 +671,7 @@ chip_reset:
#undef SCHED_INTR
}
-static int ath_reset(struct ath_softc *sc)
+int ath_reset(struct ath_softc *sc)
{
int r;
@@ -705,12 +749,19 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ah->imask |= ATH9K_INT_RXHP |
- ATH9K_INT_RXLP |
- ATH9K_INT_BB_WATCHDOG;
+ ATH9K_INT_RXLP;
else
ah->imask |= ATH9K_INT_RX;
- ah->imask |= ATH9K_INT_GTT;
+ if (ah->config.hw_hang_checks & HW_BB_WATCHDOG)
+ ah->imask |= ATH9K_INT_BB_WATCHDOG;
+
+ /*
+ * Enable GTT interrupts only for AR9003/AR9004 chips
+ * for now.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->imask |= ATH9K_INT_GTT;
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ah->imask |= ATH9K_INT_CST;
@@ -735,6 +786,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
*/
ath9k_cmn_init_crypto(sc->sc_ah);
+ ath9k_hw_reset_tsf(ah);
+
spin_unlock_bh(&sc->sc_pcu_lock);
mutex_unlock(&sc->mutex);
@@ -831,7 +884,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
mutex_lock(&sc->mutex);
ath_cancel_work(sc);
- del_timer_sync(&sc->rx_poll_timer);
if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
ath_dbg(common, ANY, "Device not present\n");
@@ -1636,13 +1688,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
- (changed & BSS_CHANGED_BEACON_INT)) {
- if (ah->opmode == NL80211_IFTYPE_AP &&
- bss_conf->enable_beacon)
- ath9k_set_tsfadjust(sc, vif);
- if (ath9k_allow_beacon_config(sc, vif))
- ath9k_beacon_config(sc, vif, changed);
- }
+ (changed & BSS_CHANGED_BEACON_INT))
+ ath9k_beacon_config(sc, vif, changed);
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
@@ -1767,13 +1814,12 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
- unsigned long flags;
int pos;
if (config_enabled(CONFIG_ATH9K_TX99))
return -EOPNOTSUPP;
- spin_lock_irqsave(&common->cc_lock, flags);
+ spin_lock_bh(&common->cc_lock);
if (idx == 0)
ath_update_survey_stats(sc);
@@ -1787,7 +1833,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels) {
- spin_unlock_irqrestore(&common->cc_lock, flags);
+ spin_unlock_bh(&common->cc_lock);
return -ENOENT;
}
@@ -1795,7 +1841,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
pos = chan->hw_value;
memcpy(survey, &sc->survey[pos], sizeof(*survey));
survey->channel = chan;
- spin_unlock_irqrestore(&common->cc_lock, flags);
+ spin_unlock_bh(&common->cc_lock);
return 0;
}
@@ -1818,13 +1864,31 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
mutex_unlock(&sc->mutex);
}
+static bool ath9k_has_tx_pending(struct ath_softc *sc)
+{
+ int i, npend;
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ if (!sc->tx.txq[i].axq_depth)
+ continue;
+
+ npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
+ if (npend)
+ break;
+ }
+
+ return !!npend;
+}
+
static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- int timeout = 200; /* ms */
- int i, j;
+ int timeout = HZ / 5; /* 200 ms */
bool drain_txq;
mutex_lock(&sc->mutex);
@@ -1842,25 +1906,9 @@ static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
return;
}
- for (j = 0; j < timeout; j++) {
- bool npend = false;
-
- if (j)
- usleep_range(1000, 2000);
-
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (!ATH_TXQ_SETUP(sc, i))
- continue;
-
- npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
-
- if (npend)
- break;
- }
-
- if (!npend)
- break;
- }
+ if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc),
+ timeout) > 0)
+ drop = false;
if (drop) {
ath9k_ps_wakeup(sc);
@@ -2022,333 +2070,6 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-
-static void ath9k_wow_map_triggers(struct ath_softc *sc,
- struct cfg80211_wowlan *wowlan,
- u32 *wow_triggers)
-{
- if (wowlan->disconnect)
- *wow_triggers |= AH_WOW_LINK_CHANGE |
- AH_WOW_BEACON_MISS;
- if (wowlan->magic_pkt)
- *wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
-
- if (wowlan->n_patterns)
- *wow_triggers |= AH_WOW_USER_PATTERN_EN;
-
- sc->wow_enabled = *wow_triggers;
-
-}
-
-static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int pattern_count = 0;
- int i, byte_cnt;
- u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
- u8 dis_deauth_mask[MAX_PATTERN_SIZE];
-
- memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
- memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
-
- /*
- * Create Dissassociate / Deauthenticate packet filter
- *
- * 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
- * +--------------+----------+---------+--------+--------+----
- * + Frame Control+ Duration + DA + SA + BSSID +
- * +--------------+----------+---------+--------+--------+----
- *
- * The above is the management frame format for disassociate/
- * deauthenticate pattern, from this we need to match the first byte
- * of 'Frame Control' and DA, SA, and BSSID fields
- * (skipping 2nd byte of FC and Duration feild.
- *
- * Disassociate pattern
- * --------------------
- * Frame control = 00 00 1010
- * DA, SA, BSSID = x:x:x:x:x:x
- * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
- * | x:x:x:x:x:x -- 22 bytes
- *
- * Deauthenticate pattern
- * ----------------------
- * Frame control = 00 00 1100
- * DA, SA, BSSID = x:x:x:x:x:x
- * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
- * | x:x:x:x:x:x -- 22 bytes
- */
-
- /* Create Disassociate Pattern first */
-
- byte_cnt = 0;
-
- /* Fill out the mask with all FF's */
-
- for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
- dis_deauth_mask[i] = 0xff;
-
- /* copy the first byte of frame control field */
- dis_deauth_pattern[byte_cnt] = 0xa0;
- byte_cnt++;
-
- /* skip 2nd byte of frame control and Duration field */
- byte_cnt += 3;
-
- /*
- * need not match the destination mac address, it can be a broadcast
- * mac address or an unicast to this station
- */
- byte_cnt += 6;
-
- /* copy the source mac address */
- memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
-
- byte_cnt += 6;
-
- /* copy the bssid, its same as the source mac address */
-
- memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
-
- /* Create Disassociate pattern mask */
-
- dis_deauth_mask[0] = 0xfe;
- dis_deauth_mask[1] = 0x03;
- dis_deauth_mask[2] = 0xc0;
-
- ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
-
- ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
- pattern_count, byte_cnt);
-
- pattern_count++;
- /*
- * for de-authenticate pattern, only the first byte of the frame
- * control field gets changed from 0xA0 to 0xC0
- */
- dis_deauth_pattern[0] = 0xC0;
-
- ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
- pattern_count, byte_cnt);
-
-}
-
-static void ath9k_wow_add_pattern(struct ath_softc *sc,
- struct cfg80211_wowlan *wowlan)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath9k_wow_pattern *wow_pattern = NULL;
- struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
- int mask_len;
- s8 i = 0;
-
- if (!wowlan->n_patterns)
- return;
-
- /*
- * Add the new user configured patterns
- */
- for (i = 0; i < wowlan->n_patterns; i++) {
-
- wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
-
- if (!wow_pattern)
- return;
-
- /*
- * TODO: convert the generic user space pattern to
- * appropriate chip specific/802.11 pattern.
- */
-
- mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
- memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
- memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
- memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
- patterns[i].pattern_len);
- memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
- wow_pattern->pattern_len = patterns[i].pattern_len;
-
- /*
- * just need to take care of deauth and disssoc pattern,
- * make sure we don't overwrite them.
- */
-
- ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
- wow_pattern->mask_bytes,
- i + 2,
- wow_pattern->pattern_len);
- kfree(wow_pattern);
-
- }
-
-}
-
-static int ath9k_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 wow_triggers_enabled = 0;
- int ret = 0;
-
- mutex_lock(&sc->mutex);
-
- ath_cancel_work(sc);
- ath_stop_ani(sc);
- del_timer_sync(&sc->rx_poll_timer);
-
- if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
- ath_dbg(common, ANY, "Device not present\n");
- ret = -EINVAL;
- goto fail_wow;
- }
-
- if (WARN_ON(!wowlan)) {
- ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
- ret = -EINVAL;
- goto fail_wow;
- }
-
- if (!device_can_wakeup(sc->dev)) {
- ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
- ret = 1;
- goto fail_wow;
- }
-
- /*
- * none of the sta vifs are associated
- * and we are not currently handling multivif
- * cases, for instance we have to seperately
- * configure 'keep alive frame' for each
- * STA.
- */
-
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
- ath_dbg(common, WOW, "None of the STA vifs are associated\n");
- ret = 1;
- goto fail_wow;
- }
-
- if (sc->nvifs > 1) {
- ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
- ret = 1;
- goto fail_wow;
- }
-
- ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
-
- ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
- wow_triggers_enabled);
-
- ath9k_ps_wakeup(sc);
-
- ath9k_stop_btcoex(sc);
-
- /*
- * Enable wake up on recieving disassoc/deauth
- * frame by default.
- */
- ath9k_wow_add_disassoc_deauth_pattern(sc);
-
- if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
- ath9k_wow_add_pattern(sc, wowlan);
-
- spin_lock_bh(&sc->sc_pcu_lock);
- /*
- * To avoid false wake, we enable beacon miss interrupt only
- * when we go to sleep. We save the current interrupt mask
- * so we can restore it after the system wakes up
- */
- sc->wow_intr_before_sleep = ah->imask;
- ah->imask &= ~ATH9K_INT_GLOBAL;
- ath9k_hw_disable_interrupts(ah);
- ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
-
- /*
- * we can now sync irq and kill any running tasklets, since we already
- * disabled interrupts and not holding a spin lock
- */
- synchronize_irq(sc->irq);
- tasklet_kill(&sc->intr_tq);
-
- ath9k_hw_wow_enable(ah, wow_triggers_enabled);
-
- ath9k_ps_restore(sc);
- ath_dbg(common, ANY, "WoW enabled in ath9k\n");
- atomic_inc(&sc->wow_sleep_proc_intr);
-
-fail_wow:
- mutex_unlock(&sc->mutex);
- return ret;
-}
-
-static int ath9k_resume(struct ieee80211_hw *hw)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 wow_status;
-
- mutex_lock(&sc->mutex);
-
- ath9k_ps_wakeup(sc);
-
- spin_lock_bh(&sc->sc_pcu_lock);
-
- ath9k_hw_disable_interrupts(ah);
- ah->imask = sc->wow_intr_before_sleep;
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
-
- wow_status = ath9k_hw_wow_wakeup(ah);
-
- if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
- /*
- * some devices may not pick beacon miss
- * as the reason they woke up so we add
- * that here for that shortcoming.
- */
- wow_status |= AH_WOW_BEACON_MISS;
- atomic_dec(&sc->wow_got_bmiss_intr);
- ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
- }
-
- atomic_dec(&sc->wow_sleep_proc_intr);
-
- if (wow_status) {
- ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
- ath9k_hw_wow_event_to_string(wow_status), wow_status);
- }
-
- ath_restart_work(sc);
- ath9k_start_btcoex(sc);
-
- ath9k_ps_restore(sc);
- mutex_unlock(&sc->mutex);
-
- return 0;
-}
-
-static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
-{
- struct ath_softc *sc = hw->priv;
-
- mutex_lock(&sc->mutex);
- device_init_wakeup(sc->dev, 1);
- device_set_wakeup_enable(sc->dev, enabled);
- mutex_unlock(&sc->mutex);
-}
-
-#endif
static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
@@ -2374,134 +2095,6 @@ static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
sc->csa_vif = vif;
}
-static void ath9k_tx99_stop(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_drain_all_txq(sc);
- ath_startrecv(sc);
-
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
-
- ieee80211_wake_queues(sc->hw);
-
- kfree_skb(sc->tx99_skb);
- sc->tx99_skb = NULL;
- sc->tx99_state = false;
-
- ath9k_hw_tx99_stop(sc->sc_ah);
- ath_dbg(common, XMIT, "TX99 stopped\n");
-}
-
-static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
-{
- static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
- 0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
- 0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
- 0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
- 0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
- 0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
- 0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
- 0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
- u32 len = 1200;
- struct ieee80211_hw *hw = sc->hw;
- struct ieee80211_hdr *hdr;
- struct ieee80211_tx_info *tx_info;
- struct sk_buff *skb;
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- return NULL;
-
- skb_put(skb, len);
-
- memset(skb->data, 0, len);
-
- hdr = (struct ieee80211_hdr *)skb->data;
- hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
- hdr->duration_id = 0;
-
- memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
- memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
- memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
-
- hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
-
- tx_info = IEEE80211_SKB_CB(skb);
- memset(tx_info, 0, sizeof(*tx_info));
- tx_info->band = hw->conf.chandef.chan->band;
- tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
- tx_info->control.vif = sc->tx99_vif;
-
- memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
-
- return skb;
-}
-
-void ath9k_tx99_deinit(struct ath_softc *sc)
-{
- ath_reset(sc);
-
- ath9k_ps_wakeup(sc);
- ath9k_tx99_stop(sc);
- ath9k_ps_restore(sc);
-}
-
-int ath9k_tx99_init(struct ath_softc *sc)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_tx_control txctl;
- int r;
-
- if (sc->sc_flags & SC_OP_INVALID) {
- ath_err(common,
- "driver is in invalid state unable to use TX99");
- return -EINVAL;
- }
-
- sc->tx99_skb = ath9k_build_tx99_skb(sc);
- if (!sc->tx99_skb)
- return -ENOMEM;
-
- memset(&txctl, 0, sizeof(txctl));
- txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
-
- ath_reset(sc);
-
- ath9k_ps_wakeup(sc);
-
- ath9k_hw_disable_interrupts(ah);
- atomic_set(&ah->intr_ref_cnt, -1);
- ath_drain_all_txq(sc);
- ath_stoprecv(sc);
-
- sc->tx99_state = true;
-
- ieee80211_stop_queues(hw);
-
- if (sc->tx99_power == MAX_RATE_POWER + 1)
- sc->tx99_power = MAX_RATE_POWER;
-
- ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
- r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
- if (r) {
- ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
- return r;
- }
-
- ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
- sc->tx99_power,
- sc->tx99_power / 2);
-
- /* We leave the harware awake as it will be chugging on */
-
- return 0;
-}
-
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -2532,7 +2125,7 @@ struct ieee80211_ops ath9k_ops = {
.set_antenna = ath9k_set_antenna,
.get_antenna = ath9k_get_antenna,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
.suspend = ath9k_suspend,
.resume = ath9k_resume,
.set_wakeup = ath9k_set_wakeup,
@@ -2544,7 +2137,7 @@ struct ieee80211_ops ath9k_ops = {
.get_et_strings = ath9k_get_et_strings,
#endif
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_STATION_STATISTICS)
.sta_add_debugfs = ath9k_sta_add_debugfs,
#endif
.sw_scan_start = ath9k_sw_scan_start,
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 0ac1b5f04256..71799fcade54 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -200,7 +200,7 @@ skip_tuning:
if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
- btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 *
+ btcoex->btcoex_no_stomp = btcoex->btcoex_period *
(100 - btcoex->duty_cycle) / 100;
ath9k_hw_btcoex_enable(sc->sc_ah);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index b5656fce4ff5..55724b02316b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -87,6 +87,19 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
+
+ /* Killer Wireless (3x3) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2000),
+ .driver_data = ATH9K_PCI_KILLER },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2001),
+ .driver_data = ATH9K_PCI_KILLER },
+
{ PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
/* PCI-E CUS198 */
@@ -354,6 +367,13 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
0x1783),
.driver_data = ATH9K_PCI_WOW },
+ /* Killer Wireless (2x2) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2003),
+ .driver_data = ATH9K_PCI_KILLER },
+
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
@@ -392,6 +412,16 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
+ 0x06B2),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0842),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
0x6671),
.driver_data = ATH9K_PCI_AR9565_1ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@@ -404,6 +434,16 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
0x1B9A, /* XAVI */
0x2812),
.driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A1),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x218A),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
/* WB335 1-ANT / Antenna Diversity */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@@ -448,13 +488,18 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
- PCI_VENDOR_ID_AZWAVE,
- 0x213A),
+ 0x11AD, /* LITEON */
+ 0x06A2),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
- PCI_VENDOR_ID_LENOVO,
- 0x3026),
+ 0x11AD, /* LITEON */
+ 0x0682),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213A),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
@@ -468,38 +513,41 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x2005),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
PCI_VENDOR_ID_DELL,
- 0x020E),
+ 0x020C),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
- /* WB335 2-ANT */
+ /* WB335 2-ANT / Antenna-Diversity */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411A),
- .driver_data = ATH9K_PCI_AR9565_2ANT },
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411B),
- .driver_data = ATH9K_PCI_AR9565_2ANT },
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411C),
- .driver_data = ATH9K_PCI_AR9565_2ANT },
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411D),
- .driver_data = ATH9K_PCI_AR9565_2ANT },
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411E),
- .driver_data = ATH9K_PCI_AR9565_2ANT },
-
- /* WB335 2-ANT / Antenna-Diversity */
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_ATHEROS,
@@ -527,11 +575,31 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ 0x11AD, /* LITEON */
+ 0x0832),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0692),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
PCI_VENDOR_ID_AZWAVE,
0x2130),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213B),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2182),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
0x144F, /* ASKEY */
0x7202),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -542,9 +610,49 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A2),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
0x185F, /* WNC */
0x3027),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0xA120),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE07F),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE081),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x4026),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ASUSTEK,
+ 0x85F2),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_DELL,
+ 0x020E),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
/* PCI-E AR9565 (WB335) */
{ PCI_VDEVICE(ATHEROS, 0x0036),
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 95ddca5495d4..82e340d3ec60 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -15,7 +15,6 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/relay.h>
#include "ath9k.h"
#include "ar9003_mac.h"
@@ -420,7 +419,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
}
- if (AR_SREV_9550(sc->sc_ah))
+ if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah))
rfilt |= ATH9K_RX_FILTER_4ADDRESS;
return rfilt;
@@ -733,11 +732,18 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
/*
- * mark descriptor as zero-length and set the 'more'
- * flag to ensure that both buffers get discarded
+ * Re-check previous descriptor, in case it has been filled
+ * in the mean time.
*/
- rs->rs_datalen = 0;
- rs->rs_more = true;
+ ret = ath9k_hw_rxprocdesc(ah, ds, rs);
+ if (ret == -EINPROGRESS) {
+ /*
+ * mark descriptor as zero-length and set the 'more'
+ * flag to ensure that both buffers get discarded
+ */
+ rs->rs_datalen = 0;
+ rs->rs_more = true;
+ }
}
list_del(&bf->list);
@@ -851,20 +857,15 @@ static int ath9k_process_rate(struct ath_common *common,
enum ieee80211_band band;
unsigned int i = 0;
struct ath_softc __maybe_unused *sc = common->priv;
+ struct ath_hw *ah = sc->sc_ah;
- band = hw->conf.chandef.chan->band;
+ band = ah->curchan->chan->band;
sband = hw->wiphy->bands[band];
- switch (hw->conf.chandef.width) {
- case NL80211_CHAN_WIDTH_5:
+ if (IS_CHAN_QUARTER_RATE(ah->curchan))
rxs->flag |= RX_FLAG_5MHZ;
- break;
- case NL80211_CHAN_WIDTH_10:
+ else if (IS_CHAN_HALF_RATE(ah->curchan))
rxs->flag |= RX_FLAG_10MHZ;
- break;
- default:
- break;
- }
if (rx_stats->rs_rate & 0x80) {
/* HT rate */
@@ -906,6 +907,7 @@ static void ath9k_process_rssi(struct ath_common *common,
struct ath_hw *ah = common->ah;
int last_rssi;
int rssi = rx_stats->rs_rssi;
+ int i, j;
/*
* RSSI is not available for subframes in an A-MPDU.
@@ -924,6 +926,20 @@ static void ath9k_process_rssi(struct ath_common *common,
return;
}
+ for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
+ s8 rssi;
+
+ if (!(ah->rxchainmask & BIT(i)))
+ continue;
+
+ rssi = rx_stats->rs_rssi_ctl[i];
+ if (rssi != ATH9K_RSSI_BAD) {
+ rxs->chains |= BIT(j);
+ rxs->chain_signal[j] = ah->noise + rssi;
+ }
+ j++;
+ }
+
/*
* Update Beacon RSSI, this is used by ANI.
*/
@@ -960,201 +976,6 @@ static void ath9k_process_tsf(struct ath_rx_status *rs,
rxs->mactime += 0x100000000ULL;
}
-#ifdef CONFIG_ATH9K_DEBUGFS
-static s8 fix_rssi_inv_only(u8 rssi_val)
-{
- if (rssi_val == 128)
- rssi_val = 0;
- return (s8) rssi_val;
-}
-#endif
-
-/* returns 1 if this was a spectral frame, even if not handled. */
-static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
- struct ath_rx_status *rs, u64 tsf)
-{
-#ifdef CONFIG_ATH9K_DEBUGFS
- struct ath_hw *ah = sc->sc_ah;
- u8 num_bins, *bins, *vdata = (u8 *)hdr;
- struct fft_sample_ht20 fft_sample_20;
- struct fft_sample_ht20_40 fft_sample_40;
- struct fft_sample_tlv *tlv;
- struct ath_radar_info *radar_info;
- int len = rs->rs_datalen;
- int dc_pos;
- u16 fft_len, length, freq = ah->curchan->chan->center_freq;
- enum nl80211_channel_type chan_type;
-
- /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
- * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
- * yet, but this is supposed to be possible as well.
- */
- if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
- rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
- rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
- return 0;
-
- /* check if spectral scan bit is set. This does not have to be checked
- * if received through a SPECTRAL phy error, but shouldn't hurt.
- */
- radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
- if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
- return 0;
-
- chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
- if ((chan_type == NL80211_CHAN_HT40MINUS) ||
- (chan_type == NL80211_CHAN_HT40PLUS)) {
- fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
- num_bins = SPECTRAL_HT20_40_NUM_BINS;
- bins = (u8 *)fft_sample_40.data;
- } else {
- fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
- num_bins = SPECTRAL_HT20_NUM_BINS;
- bins = (u8 *)fft_sample_20.data;
- }
-
- /* Variation in the data length is possible and will be fixed later */
- if ((len > fft_len + 2) || (len < fft_len - 1))
- return 1;
-
- switch (len - fft_len) {
- case 0:
- /* length correct, nothing to do. */
- memcpy(bins, vdata, num_bins);
- break;
- case -1:
- /* first byte missing, duplicate it. */
- memcpy(&bins[1], vdata, num_bins - 1);
- bins[0] = vdata[0];
- break;
- case 2:
- /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
- memcpy(bins, vdata, 30);
- bins[30] = vdata[31];
- memcpy(&bins[31], &vdata[33], num_bins - 31);
- break;
- case 1:
- /* MAC added 2 extra bytes AND first byte is missing. */
- bins[0] = vdata[0];
- memcpy(&bins[1], vdata, 30);
- bins[31] = vdata[31];
- memcpy(&bins[32], &vdata[33], num_bins - 32);
- break;
- default:
- return 1;
- }
-
- /* DC value (value in the middle) is the blind spot of the spectral
- * sample and invalid, interpolate it.
- */
- dc_pos = num_bins / 2;
- bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
-
- if ((chan_type == NL80211_CHAN_HT40MINUS) ||
- (chan_type == NL80211_CHAN_HT40PLUS)) {
- s8 lower_rssi, upper_rssi;
- s16 ext_nf;
- u8 lower_max_index, upper_max_index;
- u8 lower_bitmap_w, upper_bitmap_w;
- u16 lower_mag, upper_mag;
- struct ath9k_hw_cal_data *caldata = ah->caldata;
- struct ath_ht20_40_mag_info *mag_info;
-
- if (caldata)
- ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
- caldata->nfCalHist[3].privNF);
- else
- ext_nf = ATH_DEFAULT_NOISE_FLOOR;
-
- length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
- fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
- fft_sample_40.tlv.length = __cpu_to_be16(length);
- fft_sample_40.freq = __cpu_to_be16(freq);
- fft_sample_40.channel_type = chan_type;
-
- if (chan_type == NL80211_CHAN_HT40PLUS) {
- lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
-
- fft_sample_40.lower_noise = ah->noise;
- fft_sample_40.upper_noise = ext_nf;
- } else {
- lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
- upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
-
- fft_sample_40.lower_noise = ext_nf;
- fft_sample_40.upper_noise = ah->noise;
- }
- fft_sample_40.lower_rssi = lower_rssi;
- fft_sample_40.upper_rssi = upper_rssi;
-
- mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
- lower_mag = spectral_max_magnitude(mag_info->lower_bins);
- upper_mag = spectral_max_magnitude(mag_info->upper_bins);
- fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
- fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
- lower_max_index = spectral_max_index(mag_info->lower_bins);
- upper_max_index = spectral_max_index(mag_info->upper_bins);
- fft_sample_40.lower_max_index = lower_max_index;
- fft_sample_40.upper_max_index = upper_max_index;
- lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
- upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
- fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
- fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
- fft_sample_40.max_exp = mag_info->max_exp & 0xf;
-
- fft_sample_40.tsf = __cpu_to_be64(tsf);
-
- tlv = (struct fft_sample_tlv *)&fft_sample_40;
- } else {
- u8 max_index, bitmap_w;
- u16 magnitude;
- struct ath_ht20_mag_info *mag_info;
-
- length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
- fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
- fft_sample_20.tlv.length = __cpu_to_be16(length);
- fft_sample_20.freq = __cpu_to_be16(freq);
-
- fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- fft_sample_20.noise = ah->noise;
-
- mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
- magnitude = spectral_max_magnitude(mag_info->all_bins);
- fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
- max_index = spectral_max_index(mag_info->all_bins);
- fft_sample_20.max_index = max_index;
- bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
- fft_sample_20.bitmap_weight = bitmap_w;
- fft_sample_20.max_exp = mag_info->max_exp & 0xf;
-
- fft_sample_20.tsf = __cpu_to_be64(tsf);
-
- tlv = (struct fft_sample_tlv *)&fft_sample_20;
- }
-
- ath_debug_send_fft_sample(sc, tlv);
- return 1;
-#else
- return 0;
-#endif
-}
-
-static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (ieee80211_is_beacon(hdr->frame_control)) {
- RX_STAT_INC(rx_beacons);
- if (!is_zero_ether_addr(common->curbssid) &&
- ether_addr_equal(hdr->addr3, common->curbssid))
- return true;
- }
-
- return false;
-}
-
/*
* For Decrypt or Demic errors, we only mark packet status here and always push
* up the frame up to let mac80211 handle the actual error case, be it no
@@ -1171,32 +992,32 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hdr *hdr;
bool discard_current = sc->rx.discard_next;
- int ret = 0;
/*
* Discard corrupt descriptors which are marked in
* ath_get_next_rx_buf().
*/
- sc->rx.discard_next = rx_stats->rs_more;
if (discard_current)
- return -EINVAL;
+ goto corrupt;
+
+ sc->rx.discard_next = false;
/*
* Discard zero-length packets.
*/
if (!rx_stats->rs_datalen) {
RX_STAT_INC(rx_len_err);
- return -EINVAL;
+ goto corrupt;
}
- /*
- * rs_status follows rs_datalen so if rs_datalen is too large
- * we can take a hint that hardware corrupted it, so ignore
- * those frames.
- */
+ /*
+ * rs_status follows rs_datalen so if rs_datalen is too large
+ * we can take a hint that hardware corrupted it, so ignore
+ * those frames.
+ */
if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
RX_STAT_INC(rx_len_err);
- return -EINVAL;
+ goto corrupt;
}
/* Only use status info from the last fragment */
@@ -1210,10 +1031,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* This is different from the other corrupt descriptor
* condition handled above.
*/
- if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
- ret = -EINVAL;
- goto exit;
- }
+ if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
+ goto corrupt;
hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
@@ -1229,34 +1048,34 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
RX_STAT_INC(rx_spectral);
- ret = -EINVAL;
- goto exit;
+ return -EINVAL;
}
/*
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
*/
- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
- ret = -EINVAL;
- goto exit;
- }
+ if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
+ return -EINVAL;
- rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
- if (rx_stats->is_mybeacon) {
- sc->hw_busy_count = 0;
- ath_start_rx_poll(sc, 3);
+ if (ath_is_mybeacon(common, hdr)) {
+ RX_STAT_INC(rx_beacons);
+ rx_stats->is_mybeacon = true;
}
- if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
- ret =-EINVAL;
- goto exit;
- }
+ /*
+ * This shouldn't happen, but have a safety check anyway.
+ */
+ if (WARN_ON(!ah->curchan))
+ return -EINVAL;
+
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status))
+ return -EINVAL;
ath9k_process_rssi(common, hw, rx_stats, rx_status);
- rx_status->band = hw->conf.chandef.chan->band;
- rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = ah->curchan->chan->band;
+ rx_status->freq = ah->curchan->chan->center_freq;
rx_status->antenna = rx_stats->rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_END;
@@ -1266,9 +1085,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
sc->rx.num_pkts++;
#endif
-exit:
- sc->rx.discard_next = false;
- return ret;
+ return 0;
+
+corrupt:
+ sc->rx.discard_next = rx_stats->rs_more;
+ return -EINVAL;
}
static void ath9k_rx_skb_postprocess(struct ath_common *common,
@@ -1521,8 +1342,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
ath9k_antenna_check(sc, &rs);
-
ath9k_apply_ampdu_details(sc, &rs, rxs);
+ ath_debug_rate_stats(sc, &rs, skb);
ieee80211_rx(hw, skb);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index a13b2d143d9e..b1fd3fa84983 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -304,6 +304,7 @@
#define AR_IMR_S2 0x00ac
#define AR_IMR_S2_QCU_TXURN 0x000003FF
#define AR_IMR_S2_QCU_TXURN_S 0
+#define AR_IMR_S2_BB_WATCHDOG 0x00010000
#define AR_IMR_S2_CST 0x00400000
#define AR_IMR_S2_GTT 0x00800000
#define AR_IMR_S2_TIM 0x01000000
@@ -809,7 +810,12 @@
#define AR_SREV_REVISION_9462_21 3
#define AR_SREV_VERSION_9565 0x2C0
#define AR_SREV_REVISION_9565_10 0
+#define AR_SREV_REVISION_9565_101 1
+#define AR_SREV_REVISION_9565_11 2
#define AR_SREV_VERSION_9550 0x400
+#define AR_SREV_VERSION_9531 0x500
+#define AR_SREV_REVISION_9531_10 0
+#define AR_SREV_REVISION_9531_11 1
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -881,9 +887,6 @@
#define AR_SREV_9330(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9330))
-#define AR_SREV_9330_10(_ah) \
- (AR_SREV_9330((_ah)) && \
- ((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_10))
#define AR_SREV_9330_11(_ah) \
(AR_SREV_9330((_ah)) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_11))
@@ -927,10 +930,18 @@
#define AR_SREV_9565(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
-
#define AR_SREV_9565_10(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10))
+#define AR_SREV_9565_101(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_101))
+#define AR_SREV_9565_11(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_11))
+#define AR_SREV_9565_11_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9565_11))
#define AR_SREV_9550(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
@@ -938,11 +949,19 @@
#define AR_SREV_9580(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9580_10))
-
#define AR_SREV_9580_10(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9580_10))
+#define AR_SREV_9531(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531))
+#define AR_SREV_9531_10(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_10))
+#define AR_SREV_9531_11(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_11))
+
/* NOTE: When adding chips newer than Peacock, add chip check here */
#define AR_SREV_9580_10_OR_LATER(_ah) \
(AR_SREV_9580(_ah))
diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/spectral.c
new file mode 100644
index 000000000000..99f4de95c264
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/spectral.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/relay.h>
+#include "ath9k.h"
+
+static s8 fix_rssi_inv_only(u8 rssi_val)
+{
+ if (rssi_val == 128)
+ rssi_val = 0;
+ return (s8) rssi_val;
+}
+
+static void ath_debug_send_fft_sample(struct ath_softc *sc,
+ struct fft_sample_tlv *fft_sample_tlv)
+{
+ int length;
+ if (!sc->rfs_chan_spec_scan)
+ return;
+
+ length = __be16_to_cpu(fft_sample_tlv->length) +
+ sizeof(*fft_sample_tlv);
+ relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
+}
+
+/* returns 1 if this was a spectral frame, even if not handled. */
+int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ u8 num_bins, *bins, *vdata = (u8 *)hdr;
+ struct fft_sample_ht20 fft_sample_20;
+ struct fft_sample_ht20_40 fft_sample_40;
+ struct fft_sample_tlv *tlv;
+ struct ath_radar_info *radar_info;
+ int len = rs->rs_datalen;
+ int dc_pos;
+ u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+ enum nl80211_channel_type chan_type;
+
+ /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
+ * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
+ * yet, but this is supposed to be possible as well.
+ */
+ if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
+ rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
+ rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
+ return 0;
+
+ /* check if spectral scan bit is set. This does not have to be checked
+ * if received through a SPECTRAL phy error, but shouldn't hurt.
+ */
+ radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
+ if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
+ return 0;
+
+ chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_40_NUM_BINS;
+ bins = (u8 *)fft_sample_40.data;
+ } else {
+ fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_NUM_BINS;
+ bins = (u8 *)fft_sample_20.data;
+ }
+
+ /* Variation in the data length is possible and will be fixed later */
+ if ((len > fft_len + 2) || (len < fft_len - 1))
+ return 1;
+
+ switch (len - fft_len) {
+ case 0:
+ /* length correct, nothing to do. */
+ memcpy(bins, vdata, num_bins);
+ break;
+ case -1:
+ /* first byte missing, duplicate it. */
+ memcpy(&bins[1], vdata, num_bins - 1);
+ bins[0] = vdata[0];
+ break;
+ case 2:
+ /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
+ memcpy(bins, vdata, 30);
+ bins[30] = vdata[31];
+ memcpy(&bins[31], &vdata[33], num_bins - 31);
+ break;
+ case 1:
+ /* MAC added 2 extra bytes AND first byte is missing. */
+ bins[0] = vdata[0];
+ memcpy(&bins[1], vdata, 30);
+ bins[31] = vdata[31];
+ memcpy(&bins[32], &vdata[33], num_bins - 32);
+ break;
+ default:
+ return 1;
+ }
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ dc_pos = num_bins / 2;
+ bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
+
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ s8 lower_rssi, upper_rssi;
+ s16 ext_nf;
+ u8 lower_max_index, upper_max_index;
+ u8 lower_bitmap_w, upper_bitmap_w;
+ u16 lower_mag, upper_mag;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_ht20_40_mag_info *mag_info;
+
+ if (caldata)
+ ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+ caldata->nfCalHist[3].privNF);
+ else
+ ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+ length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+ fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+ fft_sample_40.tlv.length = __cpu_to_be16(length);
+ fft_sample_40.freq = __cpu_to_be16(freq);
+ fft_sample_40.channel_type = chan_type;
+
+ if (chan_type == NL80211_CHAN_HT40PLUS) {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+
+ fft_sample_40.lower_noise = ah->noise;
+ fft_sample_40.upper_noise = ext_nf;
+ } else {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+
+ fft_sample_40.lower_noise = ext_nf;
+ fft_sample_40.upper_noise = ah->noise;
+ }
+ fft_sample_40.lower_rssi = lower_rssi;
+ fft_sample_40.upper_rssi = upper_rssi;
+
+ mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
+ lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+ upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+ fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+ fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+ lower_max_index = spectral_max_index(mag_info->lower_bins);
+ upper_max_index = spectral_max_index(mag_info->upper_bins);
+ fft_sample_40.lower_max_index = lower_max_index;
+ fft_sample_40.upper_max_index = upper_max_index;
+ lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+ upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+ fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+ fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+ fft_sample_40.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_40;
+ } else {
+ u8 max_index, bitmap_w;
+ u16 magnitude;
+ struct ath_ht20_mag_info *mag_info;
+
+ length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+ fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+ fft_sample_20.tlv.length = __cpu_to_be16(length);
+ fft_sample_20.freq = __cpu_to_be16(freq);
+
+ fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ fft_sample_20.noise = ah->noise;
+
+ mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+ magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+ max_index = spectral_max_index(mag_info->all_bins);
+ fft_sample_20.max_index = max_index;
+ bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+ fft_sample_20.bitmap_weight = bitmap_w;
+ fft_sample_20.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_20;
+ }
+
+ ath_debug_send_fft_sample(sc, tlv);
+
+ return 1;
+}
+
+/*********************/
+/* spectral_scan_ctl */
+/*********************/
+
+static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char *mode = "";
+ unsigned int len;
+
+ switch (sc->spectral_mode) {
+ case SPECTRAL_DISABLED:
+ mode = "disable";
+ break;
+ case SPECTRAL_BACKGROUND:
+ mode = "background";
+ break;
+ case SPECTRAL_CHANSCAN:
+ mode = "chanscan";
+ break;
+ case SPECTRAL_MANUAL:
+ mode = "manual";
+ break;
+ }
+ len = strlen(mode);
+ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t write_file_spec_scan_ctl(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ ssize_t len;
+
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return -EOPNOTSUPP;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ if (strncmp("trigger", buf, 7) == 0) {
+ ath9k_spectral_scan_trigger(sc->hw);
+ } else if (strncmp("background", buf, 9) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
+ ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
+ } else if (strncmp("chanscan", buf, 8) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
+ ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
+ } else if (strncmp("manual", buf, 6) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
+ ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
+ } else if (strncmp("disable", buf, 7) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
+ ath_dbg(common, CONFIG, "spectral scan: disabled\n");
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_spec_scan_ctl = {
+ .read = read_file_spec_scan_ctl,
+ .write = write_file_spec_scan_ctl,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*************************/
+/* spectral_short_repeat */
+/*************************/
+
+static ssize_t read_file_spectral_short_repeat(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_short_repeat(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ sc->spec_config.short_repeat = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_short_repeat = {
+ .read = read_file_spectral_short_repeat,
+ .write = write_file_spectral_short_repeat,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/******************/
+/* spectral_count */
+/******************/
+
+static ssize_t read_file_spectral_count(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.count);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ sc->spec_config.count = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+ .read = read_file_spectral_count,
+ .write = write_file_spectral_count,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*******************/
+/* spectral_period */
+/*******************/
+
+static ssize_t read_file_spectral_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ sc->spec_config.period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_period = {
+ .read = read_file_spectral_period,
+ .write = write_file_spectral_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/***********************/
+/* spectral_fft_period */
+/***********************/
+
+static ssize_t read_file_spectral_fft_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_fft_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 15)
+ return -EINVAL;
+
+ sc->spec_config.fft_period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_fft_period = {
+ .read = read_file_spectral_fft_period,
+ .write = write_file_spectral_fft_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*******************/
+/* Relay interface */
+/*******************/
+
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ *is_global = 1;
+ return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+
+ return 0;
+}
+
+static struct rchan_callbacks rfs_spec_scan_cb = {
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+};
+
+/*********************/
+/* Debug Init/Deinit */
+/*********************/
+
+void ath9k_spectral_deinit_debug(struct ath_softc *sc)
+{
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
+ relay_close(sc->rfs_chan_spec_scan);
+ sc->rfs_chan_spec_scan = NULL;
+ }
+}
+
+void ath9k_spectral_init_debug(struct ath_softc *sc)
+{
+ sc->rfs_chan_spec_scan = relay_open("spectral_scan",
+ sc->debug.debugfs_phy,
+ 1024, 256, &rfs_spec_scan_cb,
+ NULL);
+ debugfs_create_file("spectral_scan_ctl",
+ S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spec_scan_ctl);
+ debugfs_create_file("spectral_short_repeat",
+ S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_short_repeat);
+ debugfs_create_file("spectral_count",
+ S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_count);
+ debugfs_create_file("spectral_period",
+ S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_period);
+ debugfs_create_file("spectral_fft_period",
+ S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_fft_period);
+}
diff --git a/drivers/net/wireless/ath/ath9k/spectral.h b/drivers/net/wireless/ath/ath9k/spectral.h
new file mode 100644
index 000000000000..ead63412ee1a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/spectral.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef SPECTRAL_H
+#define SPECTRAL_H
+
+/* enum spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ * something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ * is performed manually.
+ * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels
+ * during a channel scan.
+ */
+enum spectral_mode {
+ SPECTRAL_DISABLED = 0,
+ SPECTRAL_BACKGROUND,
+ SPECTRAL_MANUAL,
+ SPECTRAL_CHANSCAN,
+};
+
+#define SPECTRAL_SCAN_BITMASK 0x10
+/* Radar info packet format, used for DFS and spectral formats. */
+struct ath_radar_info {
+ u8 pulse_length_pri;
+ u8 pulse_length_ext;
+ u8 pulse_bw_info;
+} __packed;
+
+/* The HT20 spectral data has 4 bytes of additional information at it's end.
+ *
+ * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: all bins max_magnitude[9:2]
+ * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_mag_info {
+ u8 all_bins[3];
+ u8 max_exp;
+} __packed;
+
+#define SPECTRAL_HT20_NUM_BINS 56
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data by -1/+2. This struct is for reference only.
+ */
+struct ath_ht20_fft_packet {
+ u8 data[SPECTRAL_HT20_NUM_BINS];
+ struct ath_ht20_mag_info mag_info;
+ struct ath_radar_info radar_info;
+} __packed;
+
+#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
+
+/* Dynamic 20/40 mode:
+ *
+ * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: lower bins max_magnitude[9:2]
+ * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]}
+ * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: upper bins max_magnitude[9:2]
+ * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_40_mag_info {
+ u8 lower_bins[3];
+ u8 upper_bins[3];
+ u8 max_exp;
+} __packed;
+
+#define SPECTRAL_HT20_40_NUM_BINS 128
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data. This struct is for reference only.
+ */
+struct ath_ht20_40_fft_packet {
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+ struct ath_ht20_40_mag_info mag_info;
+ struct ath_radar_info radar_info;
+} __packed;
+
+
+#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
+
+/* grabs the max magnitude from the all/upper/lower bins */
+static inline u16 spectral_max_magnitude(u8 *bins)
+{
+ return (bins[0] & 0xc0) >> 6 |
+ (bins[1] & 0xff) << 2 |
+ (bins[2] & 0x03) << 10;
+}
+
+/* return the max magnitude from the all/upper/lower bins */
+static inline u8 spectral_max_index(u8 *bins)
+{
+ s8 m = (bins[2] & 0xfc) >> 2;
+
+ /* TODO: this still doesn't always report the right values ... */
+ if (m > 32)
+ m |= 0xe0;
+ else
+ m &= ~0xe0;
+
+ return m + 29;
+}
+
+/* return the bitmap weight from the all/upper/lower bins */
+static inline u8 spectral_bitmap_weight(u8 *bins)
+{
+ return bins[0] & 0x3f;
+}
+
+/* FFT sample format given to userspace via debugfs.
+ *
+ * Please keep the type/length at the front position and change
+ * other fields after adding another sample type
+ *
+ * TODO: this might need rework when switching to nl80211-based
+ * interface.
+ */
+enum ath_fft_sample_type {
+ ATH_FFT_SAMPLE_HT20 = 1,
+ ATH_FFT_SAMPLE_HT20_40,
+};
+
+struct fft_sample_tlv {
+ u8 type; /* see ath_fft_sample */
+ __be16 length;
+ /* type dependent data follows */
+} __packed;
+
+struct fft_sample_ht20 {
+ struct fft_sample_tlv tlv;
+
+ u8 max_exp;
+
+ __be16 freq;
+ s8 rssi;
+ s8 noise;
+
+ __be16 max_magnitude;
+ u8 max_index;
+ u8 bitmap_weight;
+
+ __be64 tsf;
+
+ u8 data[SPECTRAL_HT20_NUM_BINS];
+} __packed;
+
+struct fft_sample_ht20_40 {
+ struct fft_sample_tlv tlv;
+
+ u8 channel_type;
+ __be16 freq;
+
+ s8 lower_rssi;
+ s8 upper_rssi;
+
+ __be64 tsf;
+
+ s8 lower_noise;
+ s8 upper_noise;
+
+ __be16 lower_max_magnitude;
+ __be16 upper_max_magnitude;
+
+ u8 lower_max_index;
+ u8 upper_max_index;
+
+ u8 lower_bitmap_weight;
+ u8 upper_bitmap_weight;
+
+ u8 max_exp;
+
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+} __packed;
+
+void ath9k_spectral_init_debug(struct ath_softc *sc);
+void ath9k_spectral_deinit_debug(struct ath_softc *sc);
+
+void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
+int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
+ enum spectral_mode spectral_mode);
+
+#ifdef CONFIG_ATH9K_DEBUGFS
+int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf);
+#else
+static inline int ath_process_fft(struct ath_softc *sc,
+ struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf)
+{
+ return 0;
+}
+#endif /* CONFIG_ATH9K_DEBUGFS */
+
+#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
new file mode 100644
index 000000000000..b686a7498450
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+static void ath9k_tx99_stop(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_drain_all_txq(sc);
+ ath_startrecv(sc);
+
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ ieee80211_wake_queues(sc->hw);
+
+ kfree_skb(sc->tx99_skb);
+ sc->tx99_skb = NULL;
+ sc->tx99_state = false;
+
+ ath9k_hw_tx99_stop(sc->sc_ah);
+ ath_dbg(common, XMIT, "TX99 stopped\n");
+}
+
+static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
+{
+ static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
+ 0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
+ 0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
+ 0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
+ 0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
+ 0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
+ 0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
+ 0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
+ u32 len = 1200;
+ struct ieee80211_tx_rate *rate;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, len);
+
+ memset(skb->data, 0, len);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
+ hdr->duration_id = 0;
+
+ memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+ hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
+ rate = &tx_info->control.rates[0];
+ tx_info->band = hw->conf.chandef.chan->band;
+ tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
+ tx_info->control.vif = sc->tx99_vif;
+ rate->count = 1;
+ if (ah->curchan && IS_CHAN_HT(ah->curchan)) {
+ rate->flags |= IEEE80211_TX_RC_MCS;
+ if (IS_CHAN_HT40(ah->curchan))
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ }
+
+ memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
+
+ return skb;
+}
+
+static void ath9k_tx99_deinit(struct ath_softc *sc)
+{
+ ath_reset(sc);
+
+ ath9k_ps_wakeup(sc);
+ ath9k_tx99_stop(sc);
+ ath9k_ps_restore(sc);
+}
+
+static int ath9k_tx99_init(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_tx_control txctl;
+ int r;
+
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ ath_err(common,
+ "driver is in invalid state unable to use TX99");
+ return -EINVAL;
+ }
+
+ sc->tx99_skb = ath9k_build_tx99_skb(sc);
+ if (!sc->tx99_skb)
+ return -ENOMEM;
+
+ memset(&txctl, 0, sizeof(txctl));
+ txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+
+ ath_reset(sc);
+
+ ath9k_ps_wakeup(sc);
+
+ ath9k_hw_disable_interrupts(ah);
+ atomic_set(&ah->intr_ref_cnt, -1);
+ ath_drain_all_txq(sc);
+ ath_stoprecv(sc);
+
+ sc->tx99_state = true;
+
+ ieee80211_stop_queues(hw);
+
+ if (sc->tx99_power == MAX_RATE_POWER + 1)
+ sc->tx99_power = MAX_RATE_POWER;
+
+ ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
+ r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
+ if (r) {
+ ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
+ return r;
+ }
+
+ ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ /* We leave the harware awake as it will be chugging on */
+
+ return 0;
+}
+
+static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[3];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->tx99_state);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ bool start;
+ ssize_t len;
+ int r;
+
+ if (sc->nvifs > 1)
+ return -EOPNOTSUPP;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ if (strtobool(buf, &start))
+ return -EINVAL;
+
+ if (start == sc->tx99_state) {
+ if (!start)
+ return count;
+ ath_dbg(common, XMIT, "Resetting TX99\n");
+ ath9k_tx99_deinit(sc);
+ }
+
+ if (!start) {
+ ath9k_tx99_deinit(sc);
+ return count;
+ }
+
+ r = ath9k_tx99_init(sc);
+ if (r)
+ return r;
+
+ return count;
+}
+
+static const struct file_operations fops_tx99 = {
+ .read = read_file_tx99,
+ .write = write_file_tx99,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tx99_power(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d (%d dBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99_power(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ int r;
+ u8 tx_power;
+
+ r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
+ if (r)
+ return r;
+
+ if (tx_power > MAX_RATE_POWER)
+ return -EINVAL;
+
+ sc->tx99_power = tx_power;
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
+ ath9k_ps_restore(sc);
+
+ return count;
+}
+
+static const struct file_operations fops_tx99_power = {
+ .read = read_file_tx99_power,
+ .write = write_file_tx99_power,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_tx99_init_debug(struct ath_softc *sc)
+{
+ if (!AR_SREV_9300_20_OR_LATER(sc->sc_ah))
+ return;
+
+ debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99);
+ debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99_power);
+}
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 81c88dd606dc..1b3230fa3651 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -14,409 +14,347 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/export.h>
#include "ath9k.h"
-#include "reg.h"
-#include "hw-ops.h"
-const char *ath9k_hw_wow_event_to_string(u32 wow_event)
-{
- if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
- return "Magic pattern";
- if (wow_event & AH_WOW_USER_PATTERN_EN)
- return "User pattern";
- if (wow_event & AH_WOW_LINK_CHANGE)
- return "Link change";
- if (wow_event & AH_WOW_BEACON_MISS)
- return "Beacon miss";
-
- return "unknown reason";
-}
-EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
+static const struct wiphy_wowlan_support ath9k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = MAX_NUM_USER_PATTERN,
+ .pattern_min_len = 1,
+ .pattern_max_len = MAX_PATTERN_SIZE,
+};
-static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
+static void ath9k_wow_map_triggers(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan,
+ u32 *wow_triggers)
{
- struct ath_common *common = ath9k_hw_common(ah);
+ if (wowlan->disconnect)
+ *wow_triggers |= AH_WOW_LINK_CHANGE |
+ AH_WOW_BEACON_MISS;
+ if (wowlan->magic_pkt)
+ *wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
- REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+ if (wowlan->n_patterns)
+ *wow_triggers |= AH_WOW_USER_PATTERN_EN;
- /* set rx disable bit */
- REG_WRITE(ah, AR_CR, AR_CR_RXD);
-
- if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
- ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
- REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
- return;
- }
+ sc->wow_enabled = *wow_triggers;
- REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
}
-static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
+static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
{
+ struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
- u32 ctl[13] = {0};
- u32 data_word[KAL_NUM_DATA_WORDS];
- u8 i;
- u32 wow_ka_data_word0;
-
- memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
- memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
-
- /* set the transmit buffer */
- ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
- ctl[1] = 0;
- ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
- ctl[4] = 0;
- ctl[7] = (ah->txchainmask) << 2;
- ctl[2] = 0xf << 16; /* tx_tries 0 */
-
- for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
- REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
-
- REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
-
- data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
- (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
- data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
- (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
- data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
- (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
- data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
- (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
- data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
- (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
- data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
-
- if (AR_SREV_9462_20(ah)) {
- /* AR9462 2.0 has an extra descriptor word (time based
- * discard) compared to other chips */
- REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
- wow_ka_data_word0 = AR_WOW_TXBUF(13);
- } else {
- wow_ka_data_word0 = AR_WOW_TXBUF(12);
- }
-
- for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
- REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
-
-}
-
-void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
- u8 *user_mask, int pattern_count,
- int pattern_len)
-{
- int i;
- u32 pattern_val, mask_val;
- u32 set, clr;
+ int pattern_count = 0;
+ int i, byte_cnt;
+ u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
+ u8 dis_deauth_mask[MAX_PATTERN_SIZE];
- /* FIXME: should check count by querying the hardware capability */
- if (pattern_count >= MAX_NUM_PATTERN)
- return;
-
- REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
-
- /* set the registers for pattern */
- for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
- memcpy(&pattern_val, user_pattern, 4);
- REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
- pattern_val);
- user_pattern += 4;
- }
-
- /* set the registers for mask */
- for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
- memcpy(&mask_val, user_mask, 4);
- REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
- user_mask += 4;
- }
+ memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
+ memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
- /* set the pattern length to be matched
+ /*
+ * Create Dissassociate / Deauthenticate packet filter
+ *
+ * 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
+ * +--------------+----------+---------+--------+--------+----
+ * + Frame Control+ Duration + DA + SA + BSSID +
+ * +--------------+----------+---------+--------+--------+----
*
- * AR_WOW_LENGTH1_REG1
- * bit 31:24 pattern 0 length
- * bit 23:16 pattern 1 length
- * bit 15:8 pattern 2 length
- * bit 7:0 pattern 3 length
+ * The above is the management frame format for disassociate/
+ * deauthenticate pattern, from this we need to match the first byte
+ * of 'Frame Control' and DA, SA, and BSSID fields
+ * (skipping 2nd byte of FC and Duration feild.
*
- * AR_WOW_LENGTH1_REG2
- * bit 31:24 pattern 4 length
- * bit 23:16 pattern 5 length
- * bit 15:8 pattern 6 length
- * bit 7:0 pattern 7 length
+ * Disassociate pattern
+ * --------------------
+ * Frame control = 00 00 1010
+ * DA, SA, BSSID = x:x:x:x:x:x
+ * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+ * | x:x:x:x:x:x -- 22 bytes
*
- * the below logic writes out the new
- * pattern length for the corresponding
- * pattern_count, while masking out the
- * other fields
+ * Deauthenticate pattern
+ * ----------------------
+ * Frame control = 00 00 1100
+ * DA, SA, BSSID = x:x:x:x:x:x
+ * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+ * | x:x:x:x:x:x -- 22 bytes
*/
- ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
-
- if (pattern_count < 4) {
- /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
- set = (pattern_len & AR_WOW_LENGTH_MAX) <<
- AR_WOW_LEN1_SHIFT(pattern_count);
- clr = AR_WOW_LENGTH1_MASK(pattern_count);
- REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
- } else {
- /* Pattern 4-7 uses AR_WOW_LENGTH2 register */
- set = (pattern_len & AR_WOW_LENGTH_MAX) <<
- AR_WOW_LEN2_SHIFT(pattern_count);
- clr = AR_WOW_LENGTH2_MASK(pattern_count);
- REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
- }
+ /* Create Disassociate Pattern first */
-}
-EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
+ byte_cnt = 0;
-u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
-{
- u32 wow_status = 0;
- u32 val = 0, rval;
+ /* Fill out the mask with all FF's */
- /*
- * read the WoW status register to know
- * the wakeup reason
- */
- rval = REG_READ(ah, AR_WOW_PATTERN);
- val = AR_WOW_STATUS(rval);
+ for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
+ dis_deauth_mask[i] = 0xff;
- /*
- * mask only the WoW events that we have enabled. Sometimes
- * we have spurious WoW events from the AR_WOW_PATTERN
- * register. This mask will clean it up.
- */
+ /* copy the first byte of frame control field */
+ dis_deauth_pattern[byte_cnt] = 0xa0;
+ byte_cnt++;
- val &= ah->wow_event_mask;
-
- if (val) {
- if (val & AR_WOW_MAGIC_PAT_FOUND)
- wow_status |= AH_WOW_MAGIC_PATTERN_EN;
- if (AR_WOW_PATTERN_FOUND(val))
- wow_status |= AH_WOW_USER_PATTERN_EN;
- if (val & AR_WOW_KEEP_ALIVE_FAIL)
- wow_status |= AH_WOW_LINK_CHANGE;
- if (val & AR_WOW_BEACON_FAIL)
- wow_status |= AH_WOW_BEACON_MISS;
- }
+ /* skip 2nd byte of frame control and Duration field */
+ byte_cnt += 3;
/*
- * set and clear WOW_PME_CLEAR registers for the chip to
- * generate next wow signal.
- * disable D3 before accessing other registers ?
+ * need not match the destination mac address, it can be a broadcast
+ * mac address or an unicast to this station
*/
+ byte_cnt += 6;
- /* do we need to check the bit value 0x01000000 (7-10) ?? */
- REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
- AR_PMCTRL_PWR_STATE_D1D3);
+ /* copy the source mac address */
+ memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
- /*
- * clear all events
- */
- REG_WRITE(ah, AR_WOW_PATTERN,
- AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+ byte_cnt += 6;
- /*
- * restore the beacon threshold to init value
- */
- REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+ /* copy the bssid, its same as the source mac address */
+
+ memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
+
+ /* Create Disassociate pattern mask */
+
+ dis_deauth_mask[0] = 0xfe;
+ dis_deauth_mask[1] = 0x03;
+ dis_deauth_mask[2] = 0xc0;
+
+ ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
+ ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
+
+ pattern_count++;
/*
- * Restore the way the PCI-E reset, Power-On-Reset, external
- * PCIE_POR_SHORT pins are tied to its original value.
- * Previously just before WoW sleep, we untie the PCI-E
- * reset to our Chip's Power On Reset so that any PCI-E
- * reset from the bus will not reset our chip
+ * for de-authenticate pattern, only the first byte of the frame
+ * control field gets changed from 0xA0 to 0xC0
*/
- if (ah->is_pciexpress)
- ath9k_hw_configpcipowersave(ah, false);
+ dis_deauth_pattern[0] = 0xC0;
- ah->wow_event_mask = 0;
+ ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
- return wow_status;
}
-EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
-void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+static void ath9k_wow_add_pattern(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan)
{
- u32 wow_event_mask;
- u32 set, clr;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_wow_pattern *wow_pattern = NULL;
+ struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ int mask_len;
+ s8 i = 0;
- /*
- * wow_event_mask is a mask to the AR_WOW_PATTERN register to
- * indicate which WoW events we have enabled. The WoW events
- * are from the 'pattern_enable' in this function and
- * 'pattern_count' of ath9k_hw_wow_apply_pattern()
- */
- wow_event_mask = ah->wow_event_mask;
+ if (!wowlan->n_patterns)
+ return;
/*
- * Untie Power-on-Reset from the PCI-E-Reset. When we are in
- * WOW sleep, we do want the Reset from the PCI-E to disturb
- * our hw state
+ * Add the new user configured patterns
*/
- if (ah->is_pciexpress) {
+ for (i = 0; i < wowlan->n_patterns; i++) {
+
+ wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
+
+ if (!wow_pattern)
+ return;
+
+ /*
+ * TODO: convert the generic user space pattern to
+ * appropriate chip specific/802.11 pattern.
+ */
+
+ mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+ memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
+ memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
+ memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
+ patterns[i].pattern_len);
+ memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
+ wow_pattern->pattern_len = patterns[i].pattern_len;
+
/*
- * we need to untie the internal POR (power-on-reset)
- * to the external PCI-E reset. We also need to tie
- * the PCI-E Phy reset to the PCI-E reset.
+ * just need to take care of deauth and disssoc pattern,
+ * make sure we don't overwrite them.
*/
- set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
- clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
- REG_RMW(ah, AR_WA, set, clr);
+
+ ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
+ wow_pattern->mask_bytes,
+ i + 2,
+ wow_pattern->pattern_len);
+ kfree(wow_pattern);
+
}
- /*
- * set the power states appropriately and enable PME
- */
- set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA |
- AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR;
+}
- /*
- * set and clear WOW_PME_CLEAR registers for the chip
- * to generate next wow signal.
- */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
- clr = AR_PMCTRL_WOW_PME_CLR;
- REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+int ath9k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 wow_triggers_enabled = 0;
+ int ret = 0;
- /*
- * Setup for:
- * - beacon misses
- * - magic pattern
- * - keep alive timeout
- * - pattern matching
- */
+ mutex_lock(&sc->mutex);
- /*
- * Program default values for pattern backoff, aifs/slot/KAL count,
- * beacon miss timeout, KAL timeout, etc.
- */
- set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
- REG_SET_BIT(ah, AR_WOW_PATTERN, set);
+ ath_cancel_work(sc);
+ ath_stop_ani(sc);
- set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
- AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
- AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
- REG_SET_BIT(ah, AR_WOW_COUNT, set);
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ ath_dbg(common, ANY, "Device not present\n");
+ ret = -EINVAL;
+ goto fail_wow;
+ }
- if (pattern_enable & AH_WOW_BEACON_MISS)
- set = AR_WOW_BEACON_TIMO;
- /* We are not using beacon miss, program a large value */
- else
- set = AR_WOW_BEACON_TIMO_MAX;
+ if (WARN_ON(!wowlan)) {
+ ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
+ ret = -EINVAL;
+ goto fail_wow;
+ }
- REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
+ if (!device_can_wakeup(sc->dev)) {
+ ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
+ ret = 1;
+ goto fail_wow;
+ }
/*
- * Keep alive timo in ms except AR9280
+ * none of the sta vifs are associated
+ * and we are not currently handling multivif
+ * cases, for instance we have to seperately
+ * configure 'keep alive frame' for each
+ * STA.
*/
- if (!pattern_enable)
- set = AR_WOW_KEEP_ALIVE_NEVER;
- else
- set = KAL_TIMEOUT * 32;
- REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ ath_dbg(common, WOW, "None of the STA vifs are associated\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ if (sc->nvifs > 1) {
+ ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
+
+ ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
+ wow_triggers_enabled);
+
+ ath9k_ps_wakeup(sc);
+
+ ath9k_stop_btcoex(sc);
/*
- * Keep alive delay in us. based on 'power on clock',
- * therefore in usec
+ * Enable wake up on recieving disassoc/deauth
+ * frame by default.
*/
- set = KAL_DELAY * 1000;
- REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
+ ath9k_wow_add_disassoc_deauth_pattern(sc);
+
+ if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
+ ath9k_wow_add_pattern(sc, wowlan);
+ spin_lock_bh(&sc->sc_pcu_lock);
/*
- * Create keep alive pattern to respond to beacons
+ * To avoid false wake, we enable beacon miss interrupt only
+ * when we go to sleep. We save the current interrupt mask
+ * so we can restore it after the system wakes up
*/
- ath9k_wow_create_keep_alive_pattern(ah);
+ sc->wow_intr_before_sleep = ah->imask;
+ ah->imask &= ~ATH9K_INT_GLOBAL;
+ ath9k_hw_disable_interrupts(ah);
+ ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
/*
- * Configure MAC WoW Registers
+ * we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock
*/
- set = 0;
- /* Send keep alive timeouts anyway */
- clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
- if (pattern_enable & AH_WOW_LINK_CHANGE)
- wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
- else
- set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+ ath9k_hw_wow_enable(ah, wow_triggers_enabled);
- set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
- REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
+ ath9k_ps_restore(sc);
+ ath_dbg(common, ANY, "WoW enabled in ath9k\n");
+ atomic_inc(&sc->wow_sleep_proc_intr);
- /*
- * we are relying on a bmiss failure. ensure we have
- * enough threshold to prevent false positives
- */
- REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
- AR_WOW_BMISSTHRESHOLD);
+fail_wow:
+ mutex_unlock(&sc->mutex);
+ return ret;
+}
+
+int ath9k_resume(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 wow_status;
- set = 0;
- clr = 0;
+ mutex_lock(&sc->mutex);
- if (pattern_enable & AH_WOW_BEACON_MISS) {
- set = AR_WOW_BEACON_FAIL_EN;
- wow_event_mask |= AR_WOW_BEACON_FAIL;
- } else {
- clr = AR_WOW_BEACON_FAIL_EN;
+ ath9k_ps_wakeup(sc);
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ ath9k_hw_disable_interrupts(ah);
+ ah->imask = sc->wow_intr_before_sleep;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ wow_status = ath9k_hw_wow_wakeup(ah);
+
+ if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
+ /*
+ * some devices may not pick beacon miss
+ * as the reason they woke up so we add
+ * that here for that shortcoming.
+ */
+ wow_status |= AH_WOW_BEACON_MISS;
+ atomic_dec(&sc->wow_got_bmiss_intr);
+ ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
}
- REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
+ atomic_dec(&sc->wow_sleep_proc_intr);
- set = 0;
- clr = 0;
- /*
- * Enable the magic packet registers
- */
- if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
- set = AR_WOW_MAGIC_EN;
- wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
- } else {
- clr = AR_WOW_MAGIC_EN;
+ if (wow_status) {
+ ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
+ ath9k_hw_wow_event_to_string(wow_status), wow_status);
}
- set |= AR_WOW_MAC_INTR_EN;
- REG_RMW(ah, AR_WOW_PATTERN, set, clr);
- REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
- AR_WOW_PATTERN_SUPPORTED);
+ ath_restart_work(sc);
+ ath9k_start_btcoex(sc);
- /*
- * Set the power states appropriately and enable PME
- */
- clr = 0;
- set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
- AR_PMCTRL_PWR_PM_CTRL_ENA;
+ ath9k_ps_restore(sc);
+ mutex_unlock(&sc->mutex);
- clr = AR_PCIE_PM_CTRL_ENA;
- REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
+ return 0;
+}
- /*
- * this is needed to prevent the chip waking up
- * the host within 3-4 seconds with certain
- * platform/BIOS. The fix is to enable
- * D1 & D3 to match original definition and
- * also match the OTP value. Anyway this
- * is more related to SW WOW.
- */
- clr = AR_PMCTRL_PWR_STATE_D1D3;
- REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath_softc *sc = hw->priv;
- set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+ mutex_lock(&sc->mutex);
+ device_init_wakeup(sc->dev, 1);
+ device_set_wakeup_enable(sc->dev, enabled);
+ mutex_unlock(&sc->mutex);
+}
- REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
+void ath9k_init_wow(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
- /* to bring down WOW power low margin */
- set = BIT(13);
- REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
- /* HW WoW */
- clr = BIT(5);
- REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
+ (sc->driver_data & ATH9K_PCI_WOW) &&
+ device_can_wakeup(sc->dev))
+ hw->wiphy->wowlan = &ath9k_wowlan_support;
- ath9k_hw_set_powermode_wow_sleep(ah);
- ah->wow_event_mask = wow_event_mask;
+ atomic_set(&sc->wow_sleep_proc_intr, -1);
+ atomic_set(&sc->wow_got_bmiss_intr, -1);
}
-EXPORT_SYMBOL(ath9k_hw_wow_enable);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index b5a19e098f2d..55897d508a76 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -47,8 +47,6 @@ static u16 bits_per_symbol[][2] = {
{ 260, 540 }, /* 7: 64-QAM 5/6 */
};
-#define IS_HT_RATE(_rate) ((_rate) & 0x80)
-
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
@@ -174,14 +172,7 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
static struct ath_atx_tid *
ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr;
- u8 tidno = 0;
-
- hdr = (struct ieee80211_hdr *) skb->data;
- if (ieee80211_is_data_qos(hdr->frame_control))
- tidno = ieee80211_get_qos_ctl(hdr)[0];
-
- tidno &= IEEE80211_QOS_CTL_TID_MASK;
+ u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
return ATH_AN_2_TID(an, tidno);
}
@@ -781,11 +772,6 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
if (bt_aggr_limit)
aggr_limit = bt_aggr_limit;
- /*
- * h/w can accept aggregates up to 16 bit lengths (65535).
- * The IE, however can hold up to 65536, which shows up here
- * as zero. Ignore 65536 since we are constrained by hw.
- */
if (tid->an->maxampdu)
aggr_limit = min(aggr_limit, tid->an->maxampdu);
@@ -1410,8 +1396,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
* has already been added.
*/
if (sta->ht_cap.ht_supported) {
- an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor);
+ an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ sta->ht_cap.ampdu_factor)) - 1;
density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
an->mpdudensity = density;
}
@@ -1458,14 +1444,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- if (!tid->sched)
- continue;
-
ac = tid->ac;
txq = ac->txq;
ath_txq_lock(sc, txq);
+ if (!tid->sched) {
+ ath_txq_unlock(sc, txq);
+ continue;
+ }
+
buffered = ath_tid_has_buffered(tid);
tid->sched = false;
@@ -1790,6 +1778,9 @@ bool ath_drain_all_txq(struct ath_softc *sc)
if (!ATH_TXQ_SETUP(sc, i))
continue;
+ if (!sc->tx.txq[i].axq_depth)
+ continue;
+
if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
npend |= BIT(i);
}
@@ -2072,7 +2063,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
ATH_TXBUF_RESET(bf);
- if (tid) {
+ if (tid && ieee80211_is_data_present(hdr->frame_control)) {
fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
seqno = tid->seq_next;
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
@@ -2195,14 +2186,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
txq->stopped = true;
}
+ if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
+ tid = ath_get_skb_tid(sc, txctl->an, skb);
+
if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
ath_txq_unlock(sc, txq);
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
} else if (txctl->an &&
ieee80211_is_data_present(hdr->frame_control)) {
- tid = ath_get_skb_tid(sc, txctl->an, skb);
-
WARN_ON(tid->ac->txq != txctl->txq);
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
@@ -2753,6 +2745,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
}
}
+#ifdef CONFIG_ATH9K_TX99
+
int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
@@ -2795,3 +2789,5 @@ int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
return 0;
}
+
+#endif /* CONFIG_ATH9K_TX99 */
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index 3d70cd277fd7..1c0af9cd9a85 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -37,7 +37,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/seq_file.h>
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 349fa22a921a..4c8cdb097b65 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -37,7 +37,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
@@ -1968,18 +1967,6 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
return -ENOMEM;
ar->num_channels = chans;
- /*
- * I measured this, a bandswitch takes roughly
- * 135 ms and a frequency switch about 80.
- *
- * FIXME: measure these values again once EEPROM settings
- * are used, that will influence them!
- */
- if (bands == 2)
- ar->hw->channel_change_time = 135 * 1000;
- else
- ar->hw->channel_change_time = 80 * 1000;
-
regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
/* second part of wiphy init */
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index e935f61c7fad..536bc46a2912 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -37,7 +37,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
@@ -520,6 +519,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
{
struct ieee80211_hdr *hdr = data;
struct ieee80211_tim_ie *tim_ie;
+ struct ath_common *common = &ar->common;
u8 *tim;
u8 tim_len;
bool cam;
@@ -527,17 +527,13 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
if (likely(!(ar->hw->conf.flags & IEEE80211_CONF_PS)))
return;
- /* check if this really is a beacon */
- if (!ieee80211_is_beacon(hdr->frame_control))
- return;
-
/* min. beacon length + FCS_LEN */
if (len <= 40 + FCS_LEN)
return;
+ /* check if this really is a beacon */
/* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, ar->common.curbssid) ||
- !ar->common.curaid)
+ if (!ath_is_mybeacon(common, hdr) || !common->curaid)
return;
ar->ps.last_beacon = jiffies;
@@ -602,8 +598,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
if (bar->start_seq_num == entry_bar->start_seq_num &&
TID_CHECK(bar->control, entry_bar->control) &&
- ether_addr_equal(bar->ra, entry_bar->ta) &&
- ether_addr_equal(bar->ta, entry_bar->ra)) {
+ ether_addr_equal_64bits(bar->ra, entry_bar->ta) &&
+ ether_addr_equal_64bits(bar->ta, entry_bar->ra)) {
struct ieee80211_tx_info *tx_info;
tx_info = IEEE80211_SKB_CB(entry_skb);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index e3f696ee4d23..4cadfd48ffdf 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -37,7 +37,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index 8e99540cd90e..8b0ac14d5c32 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -59,6 +59,14 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
}
EXPORT_SYMBOL(ath_rxbuf_alloc);
+bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr)
+{
+ return ieee80211_is_beacon(hdr->frame_control) &&
+ !is_zero_ether_addr(common->curbssid) &&
+ ether_addr_equal_64bits(hdr->addr3, common->curbssid);
+}
+EXPORT_SYMBOL(ath_is_mybeacon);
+
void ath_printk(const char *level, const struct ath_common* common,
const char *fmt, ...)
{
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 1217c52ab28e..e5e905910db4 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -37,17 +37,18 @@ static int __ath_regd_init(struct ath_regulatory *reg);
/* We enable active scan on these a case by case basis by regulatory domain */
#define ATH9K_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
- NL80211_RRF_PASSIVE_SCAN)
+ NL80211_RRF_NO_IR)
#define ATH9K_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
+ NL80211_RRF_NO_IR | \
+ NL80211_RRF_NO_OFDM)
/* We allow IBSS on these on a case by case basis by regulatory domain */
#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \
ATH9K_2GHZ_CH12_13, \
@@ -113,6 +114,87 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
}
};
+static bool dynamic_country_user_possible(struct ath_regulatory *reg)
+{
+ if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+ return true;
+
+ switch (reg->country_code) {
+ case CTRY_UNITED_STATES:
+ case CTRY_JAPAN1:
+ case CTRY_JAPAN2:
+ case CTRY_JAPAN3:
+ case CTRY_JAPAN4:
+ case CTRY_JAPAN5:
+ case CTRY_JAPAN6:
+ case CTRY_JAPAN7:
+ case CTRY_JAPAN8:
+ case CTRY_JAPAN9:
+ case CTRY_JAPAN10:
+ case CTRY_JAPAN11:
+ case CTRY_JAPAN12:
+ case CTRY_JAPAN13:
+ case CTRY_JAPAN14:
+ case CTRY_JAPAN15:
+ case CTRY_JAPAN16:
+ case CTRY_JAPAN17:
+ case CTRY_JAPAN18:
+ case CTRY_JAPAN19:
+ case CTRY_JAPAN20:
+ case CTRY_JAPAN21:
+ case CTRY_JAPAN22:
+ case CTRY_JAPAN23:
+ case CTRY_JAPAN24:
+ case CTRY_JAPAN25:
+ case CTRY_JAPAN26:
+ case CTRY_JAPAN27:
+ case CTRY_JAPAN28:
+ case CTRY_JAPAN29:
+ case CTRY_JAPAN30:
+ case CTRY_JAPAN31:
+ case CTRY_JAPAN32:
+ case CTRY_JAPAN33:
+ case CTRY_JAPAN34:
+ case CTRY_JAPAN35:
+ case CTRY_JAPAN36:
+ case CTRY_JAPAN37:
+ case CTRY_JAPAN38:
+ case CTRY_JAPAN39:
+ case CTRY_JAPAN40:
+ case CTRY_JAPAN41:
+ case CTRY_JAPAN42:
+ case CTRY_JAPAN43:
+ case CTRY_JAPAN44:
+ case CTRY_JAPAN45:
+ case CTRY_JAPAN46:
+ case CTRY_JAPAN47:
+ case CTRY_JAPAN48:
+ case CTRY_JAPAN49:
+ case CTRY_JAPAN50:
+ case CTRY_JAPAN51:
+ case CTRY_JAPAN52:
+ case CTRY_JAPAN53:
+ case CTRY_JAPAN54:
+ case CTRY_JAPAN55:
+ case CTRY_JAPAN56:
+ case CTRY_JAPAN57:
+ case CTRY_JAPAN58:
+ case CTRY_JAPAN59:
+ return false;
+ }
+
+ return true;
+}
+
+static bool ath_reg_dyn_country_user_allow(struct ath_regulatory *reg)
+{
+ if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+ return false;
+ if (!dynamic_country_user_possible(reg))
+ return false;
+ return true;
+}
+
static inline bool is_wwr_sku(u16 regd)
{
return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) &&
@@ -177,118 +259,139 @@ static bool ath_is_radar_freq(u16 center_freq)
return (center_freq >= 5260 && center_freq <= 5700);
}
+static void ath_force_clear_no_ir_chan(struct wiphy *wiphy,
+ struct ieee80211_channel *ch)
+{
+ const struct ieee80211_reg_rule *reg_rule;
+
+ reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq));
+ if (IS_ERR(reg_rule))
+ return;
+
+ if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
+}
+
+static void ath_force_clear_no_ir_freq(struct wiphy *wiphy, u16 center_freq)
+{
+ struct ieee80211_channel *ch;
+
+ ch = ieee80211_get_channel(wiphy, center_freq);
+ if (!ch)
+ return;
+
+ ath_force_clear_no_ir_chan(wiphy, ch);
+}
+
+static void ath_force_no_ir_chan(struct ieee80211_channel *ch)
+{
+ ch->flags |= IEEE80211_CHAN_NO_IR;
+}
+
+static void ath_force_no_ir_freq(struct wiphy *wiphy, u16 center_freq)
+{
+ struct ieee80211_channel *ch;
+
+ ch = ieee80211_get_channel(wiphy, center_freq);
+ if (!ch)
+ return;
+
+ ath_force_no_ir_chan(ch);
+}
+
+static void
+__ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ enum nl80211_reg_initiator initiator,
+ struct ieee80211_channel *ch)
+{
+ if (ath_is_radar_freq(ch->center_freq) ||
+ (ch->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ switch (initiator) {
+ case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ ath_force_clear_no_ir_chan(wiphy, ch);
+ break;
+ case NL80211_REGDOM_SET_BY_USER:
+ if (ath_reg_dyn_country_user_allow(reg))
+ ath_force_clear_no_ir_chan(wiphy, ch);
+ break;
+ default:
+ if (ch->beacon_found)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
+ }
+}
+
/*
- * N.B: These exception rules do not apply radar freqs.
+ * These exception rules do not apply radar frequencies.
*
- * - We enable adhoc (or beaconing) if allowed by 11d
- * - We enable active scan if the channel is allowed by 11d
+ * - We enable initiating radiation if the country IE says its fine:
* - If no country IE has been processed and a we determine we have
- * received a beacon on a channel we can enable active scan and
- * adhoc (or beaconing).
+ * received a beacon on a channel we can enable initiating radiation.
*/
static void
ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
enum nl80211_reg_initiator initiator)
{
enum ieee80211_band band;
struct ieee80211_supported_band *sband;
- const struct ieee80211_reg_rule *reg_rule;
struct ieee80211_channel *ch;
unsigned int i;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
-
if (!wiphy->bands[band])
continue;
-
sband = wiphy->bands[band];
-
for (i = 0; i < sband->n_channels; i++) {
-
ch = &sband->channels[i];
-
- if (ath_is_radar_freq(ch->center_freq) ||
- (ch->flags & IEEE80211_CHAN_RADAR))
- continue;
-
- if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
- if (IS_ERR(reg_rule))
- continue;
- /*
- * If 11d had a rule for this channel ensure
- * we enable adhoc/beaconing if it allows us to
- * use it. Note that we would have disabled it
- * by applying our static world regdomain by
- * default during init, prior to calling our
- * regulatory_hint().
- */
- if (!(reg_rule->flags &
- NL80211_RRF_NO_IBSS))
- ch->flags &=
- ~IEEE80211_CHAN_NO_IBSS;
- if (!(reg_rule->flags &
- NL80211_RRF_PASSIVE_SCAN))
- ch->flags &=
- ~IEEE80211_CHAN_PASSIVE_SCAN;
- } else {
- if (ch->beacon_found)
- ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN);
- }
+ __ath_reg_apply_beaconing_flags(wiphy, reg,
+ initiator, ch);
}
}
-
}
-/* Allows active scan scan on Ch 12 and 13 */
+/**
+ * ath_reg_apply_ir_flags()
+ * @wiphy: the wiphy to use
+ * @initiator: the regulatory hint initiator
+ *
+ * If no country IE has been received always enable passive scan
+ * and no-ibss on these channels. This is only done for specific
+ * regulatory SKUs.
+ *
+ * If a country IE has been received check its rule for this
+ * channel first before enabling active scan. The passive scan
+ * would have been enforced by the initial processing of our
+ * custom regulatory domain.
+ */
static void
-ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
+ath_reg_apply_ir_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ enum nl80211_reg_initiator initiator)
{
struct ieee80211_supported_band *sband;
- struct ieee80211_channel *ch;
- const struct ieee80211_reg_rule *reg_rule;
sband = wiphy->bands[IEEE80211_BAND_2GHZ];
if (!sband)
return;
- /*
- * If no country IE has been received always enable active scan
- * on these channels. This is only done for specific regulatory SKUs
- */
- if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- ch = &sband->channels[11]; /* CH 12 */
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
- ch = &sband->channels[12]; /* CH 13 */
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
- return;
- }
-
- /*
- * If a country IE has been received check its rule for this
- * channel first before enabling active scan. The passive scan
- * would have been enforced by the initial processing of our
- * custom regulatory domain.
- */
-
- ch = &sband->channels[11]; /* CH 12 */
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
- if (!IS_ERR(reg_rule)) {
- if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
- }
-
- ch = &sband->channels[12]; /* CH 13 */
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
- if (!IS_ERR(reg_rule)) {
- if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ switch(initiator) {
+ case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ ath_force_clear_no_ir_freq(wiphy, 2467);
+ ath_force_clear_no_ir_freq(wiphy, 2472);
+ break;
+ case NL80211_REGDOM_SET_BY_USER:
+ if (!ath_reg_dyn_country_user_allow(reg))
+ break;
+ ath_force_clear_no_ir_freq(wiphy, 2467);
+ ath_force_clear_no_ir_freq(wiphy, 2472);
+ break;
+ default:
+ ath_force_no_ir_freq(wiphy, 2467);
+ ath_force_no_ir_freq(wiphy, 2472);
}
}
@@ -320,8 +423,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
*/
if (!(ch->flags & IEEE80211_CHAN_DISABLED))
ch->flags |= IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
}
}
@@ -335,12 +437,15 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
case 0x66:
case 0x67:
case 0x6C:
- ath_reg_apply_beaconing_flags(wiphy, initiator);
+ ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
break;
case 0x68:
- ath_reg_apply_beaconing_flags(wiphy, initiator);
- ath_reg_apply_active_scan_flags(wiphy, initiator);
+ ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
+ ath_reg_apply_ir_flags(wiphy, reg, initiator);
break;
+ default:
+ if (ath_reg_dyn_country_user_allow(reg))
+ ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
}
}
@@ -393,89 +498,6 @@ static void ath_reg_dyn_country(struct wiphy *wiphy,
reg_initiator_name(request->initiator));
}
-static bool dynamic_country_user_possible(struct ath_regulatory *reg)
-{
- if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
- return true;
-
- switch (reg->country_code) {
- case CTRY_UNITED_STATES:
- case CTRY_JAPAN1:
- case CTRY_JAPAN2:
- case CTRY_JAPAN3:
- case CTRY_JAPAN4:
- case CTRY_JAPAN5:
- case CTRY_JAPAN6:
- case CTRY_JAPAN7:
- case CTRY_JAPAN8:
- case CTRY_JAPAN9:
- case CTRY_JAPAN10:
- case CTRY_JAPAN11:
- case CTRY_JAPAN12:
- case CTRY_JAPAN13:
- case CTRY_JAPAN14:
- case CTRY_JAPAN15:
- case CTRY_JAPAN16:
- case CTRY_JAPAN17:
- case CTRY_JAPAN18:
- case CTRY_JAPAN19:
- case CTRY_JAPAN20:
- case CTRY_JAPAN21:
- case CTRY_JAPAN22:
- case CTRY_JAPAN23:
- case CTRY_JAPAN24:
- case CTRY_JAPAN25:
- case CTRY_JAPAN26:
- case CTRY_JAPAN27:
- case CTRY_JAPAN28:
- case CTRY_JAPAN29:
- case CTRY_JAPAN30:
- case CTRY_JAPAN31:
- case CTRY_JAPAN32:
- case CTRY_JAPAN33:
- case CTRY_JAPAN34:
- case CTRY_JAPAN35:
- case CTRY_JAPAN36:
- case CTRY_JAPAN37:
- case CTRY_JAPAN38:
- case CTRY_JAPAN39:
- case CTRY_JAPAN40:
- case CTRY_JAPAN41:
- case CTRY_JAPAN42:
- case CTRY_JAPAN43:
- case CTRY_JAPAN44:
- case CTRY_JAPAN45:
- case CTRY_JAPAN46:
- case CTRY_JAPAN47:
- case CTRY_JAPAN48:
- case CTRY_JAPAN49:
- case CTRY_JAPAN50:
- case CTRY_JAPAN51:
- case CTRY_JAPAN52:
- case CTRY_JAPAN53:
- case CTRY_JAPAN54:
- case CTRY_JAPAN55:
- case CTRY_JAPAN56:
- case CTRY_JAPAN57:
- case CTRY_JAPAN58:
- case CTRY_JAPAN59:
- return false;
- }
-
- return true;
-}
-
-static void ath_reg_dyn_country_user(struct wiphy *wiphy,
- struct ath_regulatory *reg,
- struct regulatory_request *request)
-{
- if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
- return;
- if (!dynamic_country_user_possible(reg))
- return;
- ath_reg_dyn_country(wiphy, reg, request);
-}
-
void ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg)
@@ -508,7 +530,8 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
case NL80211_REGDOM_SET_BY_DRIVER:
break;
case NL80211_REGDOM_SET_BY_USER:
- ath_reg_dyn_country_user(wiphy, reg, request);
+ if (ath_reg_dyn_country_user_allow(reg))
+ ath_reg_dyn_country(wiphy, reg, request);
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
ath_reg_dyn_country(wiphy, reg, request);
@@ -609,7 +632,8 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
const struct ieee80211_regdomain *regd;
wiphy->reg_notifier = reg_notifier;
- wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
+ wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
+ REGULATORY_CUSTOM_REG;
if (ath_is_world_regd(reg)) {
/*
@@ -617,7 +641,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
* saved on the wiphy orig_* parameters
*/
regd = ath_world_regdomain(reg);
- wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_FOLLOW_POWER;
} else {
/*
* This gets applied in the case of the absence of CRDA,
@@ -626,6 +650,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
*/
regd = ath_default_world_regdomain();
}
+
wiphy_apply_custom_regulatory(wiphy, regd);
ath_reg_apply_radar_flags(wiphy);
ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index c02dbc618724..3c2ef0c32f72 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -2644,7 +2644,7 @@ struct wcn36xx_hal_trigger_ba_rsp_candidate {
struct add_ba_info ba_info[STACFG_MAX_TC];
} __packed;
-struct wcn36xx_hal_trigget_ba_req_candidate {
+struct wcn36xx_hal_trigger_ba_req_candidate {
u8 sta_index;
u8 tid_bitmap;
} __packed;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 7839b31e4826..e64a6784079e 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -641,7 +641,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
dev_kfree_skb(skb);
}
- if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (changed & BSS_CHANGED_BEACON_ENABLED ||
+ changed & BSS_CHANGED_BEACON) {
wcn36xx_dbg(WCN36XX_DBG_MAC,
"mac bss changed beacon enabled %d\n",
bss_conf->enable_beacon);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 366339421d4f..750626b0e22d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -115,6 +115,22 @@ static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
}
}
+static void wcn36xx_smd_set_sta_default_ht_params(
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ sta_params->ht_capable = 1;
+ sta_params->tx_channel_width_set = 1;
+ sta_params->lsig_txop_protection = 1;
+ sta_params->max_ampdu_size = 3;
+ sta_params->max_ampdu_density = 5;
+ sta_params->max_amsdu_size = 0;
+ sta_params->sgi_20Mhz = 1;
+ sta_params->sgi_40mhz = 1;
+ sta_params->green_field_capable = 1;
+ sta_params->delayed_ba_support = 0;
+ sta_params->dsss_cck_mode_40mhz = 1;
+}
+
static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -172,6 +188,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
sizeof(priv_sta->supported_rates));
} else {
wcn36xx_set_default_rates(&sta_params->supported_rates);
+ wcn36xx_smd_set_sta_default_ht_params(sta_params);
}
}
@@ -1134,14 +1151,14 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
/* STA */
bss->oper_mode = 1;
bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
- } else if (vif->type == NL80211_IFTYPE_AP) {
+ } else if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
/* AP */
bss->oper_mode = 0;
bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
- } else if (vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT) {
+ } else if (vif->type == NL80211_IFTYPE_ADHOC) {
bss->bss_type = WCN36XX_HAL_IBSS_MODE;
/* STA */
@@ -1292,7 +1309,11 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
/* TODO need to find out why this is needed? */
- msg_body.tim_ie_offset = tim_off+4;
+ if (vif->type == NL80211_IFTYPE_MESH_POINT)
+ /* mesh beacon don't need this, so push further down */
+ msg_body.tim_ie_offset = 256;
+ else
+ msg_body.tim_ie_offset = tim_off+4;
msg_body.p2p_ie_offset = p2p_off;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -1838,7 +1859,7 @@ out:
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
- struct wcn36xx_hal_trigget_ba_req_candidate *candidate;
+ struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
int ret = 0;
mutex_lock(&wcn->hal_mutex);
@@ -1849,7 +1870,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
msg_body.header.len += sizeof(*candidate);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
- candidate = (struct wcn36xx_hal_trigget_ba_req_candidate *)
+ candidate = (struct wcn36xx_hal_trigger_ba_req_candidate *)
(wcn->hal_buf + sizeof(msg_body));
candidate->sta_index = sta_index;
candidate->tid_bitmap = 1;
@@ -2039,22 +2060,28 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
case WCN36XX_HAL_OTA_TX_COMPL_IND:
case WCN36XX_HAL_MISSED_BEACON_IND:
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
- mutex_lock(&wcn->hal_ind_mutex);
msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
- if (msg_ind) {
- msg_ind->msg_len = len;
- msg_ind->msg = kmalloc(len, GFP_KERNEL);
- memcpy(msg_ind->msg, buf, len);
- list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
- queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
- wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+ if (!msg_ind)
+ goto nomem;
+ msg_ind->msg_len = len;
+ msg_ind->msg = kmalloc(len, GFP_KERNEL);
+ if (!msg_ind->msg) {
+ kfree(msg_ind);
+nomem:
+ /*
+ * FIXME: Do something smarter then just
+ * printing an error.
+ */
+ wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
+ msg_header->msg_type);
+ break;
}
+ memcpy(msg_ind->msg, buf, len);
+ mutex_lock(&wcn->hal_ind_mutex);
+ list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+ queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
mutex_unlock(&wcn->hal_ind_mutex);
- if (msg_ind)
- break;
- /* FIXME: Do something smarter then just printing an error. */
- wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
- msg_header->msg_type);
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
break;
default:
wcn36xx_err("SMD_EVENT (%d) not supported\n",
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 58b63833e8e7..8fa5cbace5ab 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -54,7 +54,7 @@ enum wcn36xx_debug_mask {
};
#define wcn36xx_err(fmt, arg...) \
- printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg);
+ printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg)
#define wcn36xx_warn(fmt, arg...) \
printk(KERN_WARNING pr_fmt("WARNING " fmt), ##arg)
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 8205d3e4ab66..10919f95a83c 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -156,6 +156,19 @@ void wil6210_enable_irq(struct wil6210_priv *wil)
iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICC));
+ /* interrupt moderation parameters */
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ /* disable interrupt moderation for monitor
+ * to get better timestamp precision
+ */
+ iowrite32(0, wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+ } else {
+ iowrite32(WIL6210_ITR_TRSH,
+ wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
+ iowrite32(BIT_DMA_ITR_CNT_CRL_EN,
+ wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+ }
+
wil6210_unmask_irq_pseudo(wil);
wil6210_unmask_irq_tx(wil);
wil6210_unmask_irq_rx(wil);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index d505b2676a73..0b0975d88b43 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -21,6 +21,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
+#include <linux/prefetch.h>
#include "wil6210.h"
#include "wmi.h"
@@ -377,6 +378,8 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
}
skb_trim(skb, dmalen);
+ prefetch(skb->data);
+
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
@@ -673,9 +676,12 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
+ d->dma.b11 = ETH_HLEN; /* MAC header length */
+
switch (skb->protocol) {
case cpu_to_be16(ETH_P_IP):
protocol = ip_hdr(skb)->protocol;
+ d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
break;
case cpu_to_be16(ETH_P_IPV6):
protocol = ipv6_hdr(skb)->nexthdr;
@@ -701,8 +707,6 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
}
d->dma.ip_length = skb_network_header_len(skb);
- d->dma.b11 = ETH_HLEN; /* MAC header length */
- d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
/* Enable TCP/UDP checksum */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
/* Calculate pseudo-header */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index c4a51638736a..1f91eaf95bbe 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -39,6 +39,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (8) /* HW limit */
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
+#define WIL6210_ITR_TRSH (10000) /* arbitrary - about 15 IRQs/msec */
/* Hardware definitions begin */
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 0d950f209dae..bf93ea859f2d 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -28,8 +28,8 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with Atmel wireless lan drivers; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with Atmel wireless lan drivers; if not, see
+ <http://www.gnu.org/licenses/>.
For all queries about this code, please contact the current author,
Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
@@ -39,7 +39,6 @@
******************************************************************************/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -4278,8 +4277,7 @@ static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with AtmelMACFW; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with AtmelMACFW; if not, see <http://www.gnu.org/licenses/>.
****************************************************************************/
/* This firmware should work on the 76C502 RFMD, RFMD_D, and RFMD_E */
diff --git a/drivers/net/wireless/atmel.h b/drivers/net/wireless/atmel.h
index b9b3e5b76544..96f7318cbb04 100644
--- a/drivers/net/wireless/atmel.h
+++ b/drivers/net/wireless/atmel.h
@@ -15,8 +15,8 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with Atmel wireless lan drivers; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with Atmel wireless lan drivers; if not, see
+ <http://www.gnu.org/licenses/>.
******************************************************************************/
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 522572219217..4cfb4d99ced0 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -24,15 +24,14 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with Atmel wireless lan drivers; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with Atmel wireless lan drivers; if not, see
+ <http://www.gnu.org/licenses/>.
******************************************************************************/
#ifdef __IN_PCMCIA_PACKAGE__
#include <pcmcia/k_compat.h>
#endif
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index c1b159ebcffe..5cd97e3cbee3 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -15,14 +15,13 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with Atmel wireless lan drivers; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with Atmel wireless lan drivers; if not, see
+ <http://www.gnu.org/licenses/>.
******************************************************************************/
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include "atmel.h"
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 7f3d461f7e8d..54376fddfaf9 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -731,8 +731,6 @@ enum b43_firmware_file_type {
struct b43_request_fw_context {
/* The device we are requesting the fw for. */
struct b43_wldev *dev;
- /* a completion event structure needed if this call is asynchronous */
- struct completion fw_load_complete;
/* a pointer to the firmware object */
const struct firmware *blob;
/* The type of firmware to request. */
@@ -809,6 +807,8 @@ enum {
struct b43_wldev {
struct b43_bus_dev *dev;
struct b43_wl *wl;
+ /* a completion event structure needed if this call is asynchronous */
+ struct completion fw_load_complete;
/* The device initialization status.
* Use b43_status() to query. */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index ccd24f0acb8d..c75237eb55a1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2070,6 +2070,7 @@ void b43_do_release_fw(struct b43_firmware_file *fw)
static void b43_release_firmware(struct b43_wldev *dev)
{
+ complete(&dev->fw_load_complete);
b43_do_release_fw(&dev->fw.ucode);
b43_do_release_fw(&dev->fw.pcm);
b43_do_release_fw(&dev->fw.initvals);
@@ -2095,7 +2096,7 @@ static void b43_fw_cb(const struct firmware *firmware, void *context)
struct b43_request_fw_context *ctx = context;
ctx->blob = firmware;
- complete(&ctx->fw_load_complete);
+ complete(&ctx->dev->fw_load_complete);
}
int b43_do_request_fw(struct b43_request_fw_context *ctx,
@@ -2142,7 +2143,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
}
if (async) {
/* do this part asynchronously */
- init_completion(&ctx->fw_load_complete);
+ init_completion(&ctx->dev->fw_load_complete);
err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
ctx->dev->dev->dev, GFP_KERNEL,
ctx, b43_fw_cb);
@@ -2150,12 +2151,11 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
pr_err("Unable to load firmware\n");
return err;
}
- /* stall here until fw ready */
- wait_for_completion(&ctx->fw_load_complete);
+ wait_for_completion(&ctx->dev->fw_load_complete);
if (ctx->blob)
goto fw_ready;
/* On some ARM systems, the async request will fail, but the next sync
- * request works. For this reason, we dall through here
+ * request works. For this reason, we fall through here
*/
}
err = request_firmware(&ctx->blob, ctx->fwname,
@@ -2424,6 +2424,7 @@ error:
static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl);
static void b43_one_core_detach(struct b43_bus_dev *dev);
+static int b43_rng_init(struct b43_wl *wl);
static void b43_request_firmware(struct work_struct *work)
{
@@ -2475,6 +2476,10 @@ start_ieee80211:
goto err_one_core_detach;
wl->hw_registred = true;
b43_leds_register(wl->current_dev);
+
+ /* Register HW RNG driver */
+ b43_rng_init(wl);
+
goto out;
err_one_core_detach:
@@ -4636,9 +4641,6 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
if (!dev || b43_status(dev) != B43_STAT_INITIALIZED)
return;
- /* Unregister HW RNG driver */
- b43_rng_exit(dev->wl);
-
b43_set_status(dev, B43_STAT_UNINIT);
/* Stop the microcode PSM. */
@@ -4795,9 +4797,6 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
b43_set_status(dev, B43_STAT_INITIALIZED);
- /* Register HW RNG driver */
- b43_rng_init(dev->wl);
-
out:
return err;
@@ -5464,6 +5463,9 @@ static void b43_bcma_remove(struct bcma_device *core)
b43_one_core_detach(wldev->dev);
+ /* Unregister HW RNG driver */
+ b43_rng_exit(wl);
+
b43_leds_unregister(wl);
ieee80211_free_hw(wl->hw);
@@ -5541,6 +5543,9 @@ static void b43_ssb_remove(struct ssb_device *sdev)
b43_one_core_detach(dev);
+ /* Unregister HW RNG driver */
+ b43_rng_exit(wl);
+
if (list_empty(&wl->devlist)) {
b43_leds_unregister(wl);
/* Last core on the chip unregistered.
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 4ae63f4ddfb2..50e5ddb12fb3 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -821,10 +821,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
* channel number in b43. */
if (chanstat & B43_RX_CHAN_5GHZ) {
status.band = IEEE80211_BAND_5GHZ;
- status.freq = b43_freq_to_channel_5ghz(chanid);
+ status.freq = b43_channel_to_freq_5ghz(chanid);
} else {
status.band = IEEE80211_BAND_2GHZ;
- status.freq = b43_freq_to_channel_2ghz(chanid);
+ status.freq = b43_channel_to_freq_2ghz(chanid);
}
break;
default:
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 572668821862..349c77605231 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3919,6 +3919,7 @@ static void b43legacy_remove(struct ssb_device *dev)
* as the ieee80211 unreg will destroy the workqueue. */
cancel_work_sync(&wldev->restart_work);
cancel_work_sync(&wl->firmware_load);
+ complete(&wldev->fw_load_complete);
B43legacy_WARN_ON(!wl);
if (!wldev->fw.ucode)
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 54e36fcb3954..fcfed6b99a62 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -4,13 +4,12 @@ config BRCMUTIL
config BRCMSMAC
tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
depends on MAC80211
- depends on BCMA
+ depends on BCMA_POSSIBLE
+ select BCMA
select NEW_LEDS if BCMA_DRIVER_GPIO
select LEDS_CLASS if BCMA_DRIVER_GPIO
select BRCMUTIL
select FW_LOADER
- select CRC_CCITT
- select CRC8
select CORDIC
---help---
This module adds support for PCIe wireless adapters based on Broadcom
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 8e9b1221b32c..57cddee03252 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -28,14 +28,15 @@ brcmfmac-objs += \
fweh.o \
fwsignal.o \
p2p.o \
- dhd_cdc.o \
+ proto.o \
+ bcdc.o \
dhd_common.o \
dhd_linux.o \
+ nvram.o \
btcoex.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
dhd_sdio.o \
bcmsdh.o \
- bcmsdh_sdmmc.o \
sdio_chip.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
new file mode 100644
index 000000000000..c229210d50ba
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*******************************************************************************
+ * Communicates with the dongle by using dcmd codes.
+ * For certain dcmd codes, the dongle interprets string data from the host.
+ ******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "fwsignal.h"
+#include "dhd_dbg.h"
+#include "tracepoint.h"
+#include "proto.h"
+#include "bcdc.h"
+
+struct brcmf_proto_bcdc_dcmd {
+ __le32 cmd; /* dongle command value */
+ __le32 len; /* lower 16: output buflen;
+ * upper 16: input buflen (excludes header) */
+ __le32 flags; /* flag defns given below */
+ __le32 status; /* status code returned from the device */
+};
+
+/* BCDC flag definitions */
+#define BCDC_DCMD_ERROR 0x01 /* 1=cmd failed */
+#define BCDC_DCMD_SET 0x02 /* 0=get, 1=set cmd */
+#define BCDC_DCMD_IF_MASK 0xF000 /* I/F index */
+#define BCDC_DCMD_IF_SHIFT 12
+#define BCDC_DCMD_ID_MASK 0xFFFF0000 /* id an cmd pairing */
+#define BCDC_DCMD_ID_SHIFT 16 /* ID Mask shift bits */
+#define BCDC_DCMD_ID(flags) \
+ (((flags) & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT)
+
+/*
+ * BCDC header - Broadcom specific extension of CDC.
+ * Used on data packets to convey priority across USB.
+ */
+#define BCDC_HEADER_LEN 4
+#define BCDC_PROTO_VER 2 /* Protocol version */
+#define BCDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
+#define BCDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
+#define BCDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
+#define BCDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */
+#define BCDC_PRIORITY_MASK 0x7
+#define BCDC_FLAG2_IF_MASK 0x0f /* packet rx interface in APSTA */
+#define BCDC_FLAG2_IF_SHIFT 0
+
+#define BCDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags2) & BCDC_FLAG2_IF_MASK) >> BCDC_FLAG2_IF_SHIFT))
+#define BCDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BCDC_FLAG2_IF_MASK) | \
+ ((idx) << BCDC_FLAG2_IF_SHIFT)))
+
+/**
+ * struct brcmf_proto_bcdc_header - BCDC header format
+ *
+ * @flags: flags contain protocol and checksum info.
+ * @priority: 802.1d priority and USB flow control info (bit 4:7).
+ * @flags2: additional flags containing dongle interface index.
+ * @data_offset: start of packet data. header is following by firmware signals.
+ */
+struct brcmf_proto_bcdc_header {
+ u8 flags;
+ u8 priority;
+ u8 flags2;
+ u8 data_offset;
+};
+
+/*
+ * maximum length of firmware signal data between
+ * the BCDC header and packet data in the tx path.
+ */
+#define BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES 12
+
+#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
+#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE
+ * (amount of header tha might be added)
+ * plus any space that might be needed
+ * for bus alignment padding.
+ */
+struct brcmf_bcdc {
+ u16 reqid;
+ u8 bus_header[BUS_HEADER_LEN];
+ struct brcmf_proto_bcdc_dcmd msg;
+ unsigned char buf[BRCMF_DCMD_MAXLEN];
+};
+
+
+static int
+brcmf_proto_bcdc_msg(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
+ uint len, bool set)
+{
+ struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+ struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+ u32 flags;
+
+ brcmf_dbg(BCDC, "Enter\n");
+
+ memset(msg, 0, sizeof(struct brcmf_proto_bcdc_dcmd));
+
+ msg->cmd = cpu_to_le32(cmd);
+ msg->len = cpu_to_le32(len);
+ flags = (++bcdc->reqid << BCDC_DCMD_ID_SHIFT);
+ if (set)
+ flags |= BCDC_DCMD_SET;
+ flags = (flags & ~BCDC_DCMD_IF_MASK) |
+ (ifidx << BCDC_DCMD_IF_SHIFT);
+ msg->flags = cpu_to_le32(flags);
+
+ if (buf)
+ memcpy(bcdc->buf, buf, len);
+
+ len += sizeof(*msg);
+ if (len > BRCMF_TX_IOCTL_MAX_MSG_SIZE)
+ len = BRCMF_TX_IOCTL_MAX_MSG_SIZE;
+
+ /* Send request */
+ return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&bcdc->msg, len);
+}
+
+static int brcmf_proto_bcdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
+{
+ int ret;
+ struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+
+ brcmf_dbg(BCDC, "Enter\n");
+ len += sizeof(struct brcmf_proto_bcdc_dcmd);
+ do {
+ ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&bcdc->msg,
+ len);
+ if (ret < 0)
+ break;
+ } while (BCDC_DCMD_ID(le32_to_cpu(bcdc->msg.flags)) != id);
+
+ return ret;
+}
+
+static int
+brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len)
+{
+ struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+ struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+ void *info;
+ int ret = 0, retries = 0;
+ u32 id, flags;
+
+ brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+
+ ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false);
+ if (ret < 0) {
+ brcmf_err("brcmf_proto_bcdc_msg failed w/status %d\n",
+ ret);
+ goto done;
+ }
+
+retry:
+ /* wait for interrupt and get first fragment */
+ ret = brcmf_proto_bcdc_cmplt(drvr, bcdc->reqid, len);
+ if (ret < 0)
+ goto done;
+
+ flags = le32_to_cpu(msg->flags);
+ id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT;
+
+ if ((id < bcdc->reqid) && (++retries < RETRIES))
+ goto retry;
+ if (id != bcdc->reqid) {
+ brcmf_err("%s: unexpected request id %d (expected %d)\n",
+ brcmf_ifname(drvr, ifidx), id, bcdc->reqid);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check info buffer */
+ info = (void *)&msg[1];
+
+ /* Copy info buffer */
+ if (buf) {
+ if (ret < (int)len)
+ len = ret;
+ memcpy(buf, info, len);
+ }
+
+ /* Check the ERROR flag */
+ if (flags & BCDC_DCMD_ERROR)
+ ret = le32_to_cpu(msg->status);
+
+done:
+ return ret;
+}
+
+static int
+brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len)
+{
+ struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+ struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+ int ret = 0;
+ u32 flags, id;
+
+ brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+
+ ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, true);
+ if (ret < 0)
+ goto done;
+
+ ret = brcmf_proto_bcdc_cmplt(drvr, bcdc->reqid, len);
+ if (ret < 0)
+ goto done;
+
+ flags = le32_to_cpu(msg->flags);
+ id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT;
+
+ if (id != bcdc->reqid) {
+ brcmf_err("%s: unexpected request id %d (expected %d)\n",
+ brcmf_ifname(drvr, ifidx), id, bcdc->reqid);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check the ERROR flag */
+ if (flags & BCDC_DCMD_ERROR)
+ ret = le32_to_cpu(msg->status);
+
+done:
+ return ret;
+}
+
+static void
+brcmf_proto_bcdc_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
+ struct sk_buff *pktbuf)
+{
+ struct brcmf_proto_bcdc_header *h;
+
+ brcmf_dbg(BCDC, "Enter\n");
+
+ /* Push BDC header used to convey priority for buses that don't */
+ skb_push(pktbuf, BCDC_HEADER_LEN);
+
+ h = (struct brcmf_proto_bcdc_header *)(pktbuf->data);
+
+ h->flags = (BCDC_PROTO_VER << BCDC_FLAG_VER_SHIFT);
+ if (pktbuf->ip_summed == CHECKSUM_PARTIAL)
+ h->flags |= BCDC_FLAG_SUM_NEEDED;
+
+ h->priority = (pktbuf->priority & BCDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->data_offset = offset;
+ BCDC_SET_IF_IDX(h, ifidx);
+ trace_brcmf_bcdchdr(pktbuf->data);
+}
+
+static int
+brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+ struct sk_buff *pktbuf)
+{
+ struct brcmf_proto_bcdc_header *h;
+
+ brcmf_dbg(BCDC, "Enter\n");
+
+ /* Pop BCDC header used to convey priority for buses that don't */
+ if (pktbuf->len <= BCDC_HEADER_LEN) {
+ brcmf_dbg(INFO, "rx data too short (%d <= %d)\n",
+ pktbuf->len, BCDC_HEADER_LEN);
+ return -EBADE;
+ }
+
+ trace_brcmf_bcdchdr(pktbuf->data);
+ h = (struct brcmf_proto_bcdc_header *)(pktbuf->data);
+
+ *ifidx = BCDC_GET_IF_IDX(h);
+ if (*ifidx >= BRCMF_MAX_IFS) {
+ brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
+ return -EBADE;
+ }
+ /* The ifidx is the idx to map to matching netdev/ifp. When receiving
+ * events this is easy because it contains the bssidx which maps
+ * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
+ * bssidx 1 is used for p2p0 and no data can be received or
+ * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
+ */
+ if (*ifidx)
+ (*ifidx)++;
+
+ if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) !=
+ BCDC_PROTO_VER) {
+ brcmf_err("%s: non-BCDC packet received, flags 0x%x\n",
+ brcmf_ifname(drvr, *ifidx), h->flags);
+ return -EBADE;
+ }
+
+ if (h->flags & BCDC_FLAG_SUM_GOOD) {
+ brcmf_dbg(BCDC, "%s: BDC rcv, good checksum, flags 0x%x\n",
+ brcmf_ifname(drvr, *ifidx), h->flags);
+ pktbuf->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ pktbuf->priority = h->priority & BCDC_PRIORITY_MASK;
+
+ skb_pull(pktbuf, BCDC_HEADER_LEN);
+ if (do_fws)
+ brcmf_fws_hdrpull(drvr, *ifidx, h->data_offset << 2, pktbuf);
+ else
+ skb_pull(pktbuf, h->data_offset << 2);
+
+ if (pktbuf->len == 0)
+ return -ENODATA;
+ return 0;
+}
+
+static int
+brcmf_proto_bcdc_txdata(struct brcmf_pub *drvr, int ifidx, u8 offset,
+ struct sk_buff *pktbuf)
+{
+ brcmf_proto_bcdc_hdrpush(drvr, ifidx, offset, pktbuf);
+ return brcmf_bus_txdata(drvr->bus_if, pktbuf);
+}
+
+
+int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
+{
+ struct brcmf_bcdc *bcdc;
+
+ bcdc = kzalloc(sizeof(*bcdc), GFP_ATOMIC);
+ if (!bcdc)
+ goto fail;
+
+ /* ensure that the msg buf directly follows the cdc msg struct */
+ if ((unsigned long)(&bcdc->msg + 1) != (unsigned long)bcdc->buf) {
+ brcmf_err("struct brcmf_proto_bcdc is not correctly defined\n");
+ goto fail;
+ }
+
+ drvr->proto->hdrpull = brcmf_proto_bcdc_hdrpull;
+ drvr->proto->query_dcmd = brcmf_proto_bcdc_query_dcmd;
+ drvr->proto->set_dcmd = brcmf_proto_bcdc_set_dcmd;
+ drvr->proto->txdata = brcmf_proto_bcdc_txdata;
+ drvr->proto->pd = bcdc;
+
+ drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
+ drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
+ sizeof(struct brcmf_proto_bcdc_dcmd);
+ return 0;
+
+fail:
+ kfree(bcdc);
+ return -ENOMEM;
+}
+
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
+{
+ kfree(drvr->proto->pd);
+ drvr->proto->pd = NULL;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
new file mode 100644
index 000000000000..17e8c039ff32
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_BCDC_H
+#define BRCMFMAC_BCDC_H
+
+
+int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
+
+
+#endif /* BRCMFMAC_BCDC_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 3e10b801eee8..fa35b23bbaa7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -17,16 +17,23 @@
#include <linux/types.h>
#include <linux/netdevice.h>
-#include <linux/export.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/scatterlist.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/core.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
#include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/suspend.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <net/cfg80211.h>
#include <defs.h>
#include <brcm_hw_ids.h>
@@ -36,11 +43,19 @@
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
+#include "sdio_chip.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
+#define DMA_ALIGN_MASK 0x03
-static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
+#define SDIO_FUNC1_BLOCKSIZE 64
+#define SDIO_FUNC2_BLOCKSIZE 512
+/* Maximum milliseconds to wait for F2 to come up */
+#define SDIO_WAIT_F2RDY 3000
+
+
+static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -55,27 +70,46 @@ static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
sdiodev->irq_en = false;
}
- brcmf_sdbrcm_isr(sdiodev->bus);
+ brcmf_sdio_isr(sdiodev->bus);
return IRQ_HANDLED;
}
-static void brcmf_sdio_ib_irqhandler(struct sdio_func *func)
+static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
{
struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
brcmf_dbg(INTR, "IB intr triggered\n");
- brcmf_sdbrcm_isr(sdiodev->bus);
+ brcmf_sdio_isr(sdiodev->bus);
}
/* dummy handler for SDIO function 2 interrupt */
-static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
+static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
{
}
-int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
+static bool brcmf_sdiod_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
+{
+ bool is_err = false;
+#ifdef CONFIG_PM_SLEEP
+ is_err = atomic_read(&sdiodev->suspend);
+#endif
+ return is_err;
+}
+
+static void brcmf_sdiod_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+ wait_queue_head_t *wq)
+{
+#ifdef CONFIG_PM_SLEEP
+ int retry = 0;
+ while (atomic_read(&sdiodev->suspend) && retry++ != 30)
+ wait_event_timeout(*wq, false, HZ/100);
+#endif
+}
+
+int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
{
int ret = 0;
u8 data;
@@ -85,7 +119,7 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
sdiodev->pdata->oob_irq_nr);
ret = request_irq(sdiodev->pdata->oob_irq_nr,
- brcmf_sdio_oob_irqhandler,
+ brcmf_sdiod_oob_irqhandler,
sdiodev->pdata->oob_irq_flags,
"brcmf_oob_intr",
&sdiodev->func[1]->dev);
@@ -109,36 +143,36 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
sdio_claim_host(sdiodev->func[1]);
/* must configure SDIO_CCCR_IENx to enable irq */
- data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
+ data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
/* redirect, configure and enable io for interrupt signal */
data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
data |= SDIO_SEPINT_ACT_HI;
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
sdio_release_host(sdiodev->func[1]);
} else {
brcmf_dbg(SDIO, "Entering\n");
sdio_claim_host(sdiodev->func[1]);
- sdio_claim_irq(sdiodev->func[1], brcmf_sdio_ib_irqhandler);
- sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
+ sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
+ sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
sdio_release_host(sdiodev->func[1]);
}
return 0;
}
-int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
+int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
{
brcmf_dbg(SDIO, "Entering\n");
if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
sdio_claim_host(sdiodev->func[1]);
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
- brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
sdio_release_host(sdiodev->func[1]);
if (sdiodev->oob_irq_requested) {
@@ -161,29 +195,150 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
return 0;
}
+static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
+ uint regaddr, u8 byte)
+{
+ int err_ret;
+
+ /*
+ * Can only directly write to some F0 registers.
+ * Handle CCCR_IENx and CCCR_ABORT command
+ * as a special case.
+ */
+ if ((regaddr == SDIO_CCCR_ABORT) ||
+ (regaddr == SDIO_CCCR_IENx))
+ sdio_writeb(func, byte, regaddr, &err_ret);
+ else
+ sdio_f0_writeb(func, byte, regaddr, &err_ret);
+
+ return err_ret;
+}
+
+static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
+ u32 addr, u8 regsz, void *data, bool write)
+{
+ struct sdio_func *func;
+ int ret;
+
+ brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+ write, fn, addr, regsz);
+
+ brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
+ if (brcmf_sdiod_pm_resume_error(sdiodev))
+ return -EIO;
+
+ /* only allow byte access on F0 */
+ if (WARN_ON(regsz > 1 && !fn))
+ return -EINVAL;
+ func = sdiodev->func[fn];
+
+ switch (regsz) {
+ case sizeof(u8):
+ if (write) {
+ if (fn)
+ sdio_writeb(func, *(u8 *)data, addr, &ret);
+ else
+ ret = brcmf_sdiod_f0_writeb(func, addr,
+ *(u8 *)data);
+ } else {
+ if (fn)
+ *(u8 *)data = sdio_readb(func, addr, &ret);
+ else
+ *(u8 *)data = sdio_f0_readb(func, addr, &ret);
+ }
+ break;
+ case sizeof(u16):
+ if (write)
+ sdio_writew(func, *(u16 *)data, addr, &ret);
+ else
+ *(u16 *)data = sdio_readw(func, addr, &ret);
+ break;
+ case sizeof(u32):
+ if (write)
+ sdio_writel(func, *(u32 *)data, addr, &ret);
+ else
+ *(u32 *)data = sdio_readl(func, addr, &ret);
+ break;
+ default:
+ brcmf_err("invalid size: %d\n", regsz);
+ break;
+ }
+
+ if (ret) {
+ /*
+ * SleepCSR register access can fail when
+ * waking up the device so reduce this noise
+ * in the logs.
+ */
+ if (addr != SBSDIO_FUNC1_SLEEPCSR)
+ brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
+ write ? "write" : "read", fn, addr, ret);
+ else
+ brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+ write ? "write" : "read", fn, addr, ret);
+ }
+ return ret;
+}
+
+static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u8 regsz, void *data, bool write)
+{
+ u8 func_num;
+ s32 retry = 0;
+ int ret;
+
+ if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
+ return -ENOMEDIUM;
+
+ /*
+ * figure out how to read the register based on address range
+ * 0x00 ~ 0x7FF: function 0 CCCR and FBR
+ * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
+ * The rest: function 1 silicon backplane core registers
+ */
+ if ((addr & ~REG_F0_REG_MASK) == 0)
+ func_num = SDIO_FUNC_0;
+ else
+ func_num = SDIO_FUNC_1;
+
+ do {
+ if (!write)
+ memset(data, 0, regsz);
+ /* for retry wait for 1 ms till bus get settled down */
+ if (retry)
+ usleep_range(1000, 2000);
+ ret = brcmf_sdiod_request_data(sdiodev, func_num, addr, regsz,
+ data, write);
+ } while (ret != 0 && ret != -ENOMEDIUM &&
+ retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
+
+ if (ret == -ENOMEDIUM)
+ brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
+ else if (ret != 0)
+ brcmf_err("failed with %d\n", ret);
+
+ return ret;
+}
+
static int
-brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
+brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
{
int err = 0, i;
u8 addr[3];
- s32 retry;
+
+ if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
+ return -ENOMEDIUM;
addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
for (i = 0; i < 3; i++) {
- retry = 0;
- do {
- if (retry)
- usleep_range(1000, 2000);
- err = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE,
- SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW + i,
- &addr[i]);
- } while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
-
+ err = brcmf_sdiod_regrw_helper(sdiodev,
+ SBSDIO_FUNC1_SBADDRLOW + i,
+ sizeof(u8), &addr[i], true);
if (err) {
- brcmf_err("failed at addr:0x%0x\n",
+ brcmf_err("failed at addr: 0x%0x\n",
SBSDIO_FUNC1_SBADDRLOW + i);
break;
}
@@ -193,13 +348,13 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
}
static int
-brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
+brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
{
uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
if (bar0 != sdiodev->sbwad) {
- err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
+ err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
if (err)
return err;
@@ -214,62 +369,14 @@ brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
return 0;
}
-int
-brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
- void *data, bool write)
-{
- u8 func_num, reg_size;
- s32 retry = 0;
- int ret;
-
- /*
- * figure out how to read the register based on address range
- * 0x00 ~ 0x7FF: function 0 CCCR and FBR
- * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
- * The rest: function 1 silicon backplane core registers
- */
- if ((addr & ~REG_F0_REG_MASK) == 0) {
- func_num = SDIO_FUNC_0;
- reg_size = 1;
- } else if ((addr & ~REG_F1_MISC_MASK) == 0) {
- func_num = SDIO_FUNC_1;
- reg_size = 1;
- } else {
- func_num = SDIO_FUNC_1;
- reg_size = 4;
-
- ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
- if (ret)
- goto done;
- }
-
- do {
- if (!write)
- memset(data, 0, reg_size);
- if (retry) /* wait for 1 ms till bus get settled down */
- usleep_range(1000, 2000);
- if (reg_size == 1)
- ret = brcmf_sdioh_request_byte(sdiodev, write,
- func_num, addr, data);
- else
- ret = brcmf_sdioh_request_word(sdiodev, write,
- func_num, addr, data, 4);
- } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
-
-done:
- if (ret != 0)
- brcmf_err("failed with %d\n", ret);
-
- return ret;
-}
-
-u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
{
u8 data;
int retval;
brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
- retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+ retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+ false);
brcmf_dbg(SDIO, "data:0x%02x\n", data);
if (ret)
@@ -278,52 +385,63 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
return data;
}
-u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
{
u32 data;
int retval;
brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
- retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+ retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+ if (retval)
+ goto done;
+ retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+ false);
brcmf_dbg(SDIO, "data:0x%08x\n", data);
+done:
if (ret)
*ret = retval;
return data;
}
-void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
+void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
u8 data, int *ret)
{
int retval;
brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
- retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
-
+ retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+ true);
if (ret)
*ret = retval;
}
-void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
+void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
u32 data, int *ret)
{
int retval;
brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
- retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+ retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+ if (retval)
+ goto done;
+ retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+ true);
+done:
if (ret)
*ret = retval;
}
-static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
bool write, u32 addr, struct sk_buff *pkt)
{
unsigned int req_sz;
+ int err;
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
- if (brcmf_pm_resume_error(sdiodev))
+ brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+ if (brcmf_sdiod_pm_resume_error(sdiodev))
return -EIO;
/* Single skb use the standard mmc interface */
@@ -331,22 +449,22 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
req_sz &= (uint)~3;
if (write)
- return sdio_memcpy_toio(sdiodev->func[fn], addr,
- ((u8 *)(pkt->data)),
- req_sz);
+ err = sdio_memcpy_toio(sdiodev->func[fn], addr,
+ ((u8 *)(pkt->data)), req_sz);
else if (fn == 1)
- return sdio_memcpy_fromio(sdiodev->func[fn],
- ((u8 *)(pkt->data)),
- addr, req_sz);
+ err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
+ addr, req_sz);
else
/* function 2 read is FIFO operation */
- return sdio_readsb(sdiodev->func[fn],
- ((u8 *)(pkt->data)), addr,
- req_sz);
+ err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
+ req_sz);
+ if (err == -ENOMEDIUM)
+ brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
+ return err;
}
/**
- * brcmf_sdio_sglist_rw - SDIO interface function for block data access
+ * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
* @fn: SDIO function number
* @write: direction flag
@@ -357,9 +475,9 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
* stack for block data access. It assumes that the skb passed down by the
* caller has already been padded and aligned.
*/
-static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
- bool write, u32 addr,
- struct sk_buff_head *pktlist)
+static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+ bool write, u32 addr,
+ struct sk_buff_head *pktlist)
{
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
unsigned int max_req_sz, orig_offset, dst_offset;
@@ -377,8 +495,8 @@ static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
if (!pktlist->qlen)
return -EINVAL;
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
- if (brcmf_pm_resume_error(sdiodev))
+ brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+ if (brcmf_sdiod_pm_resume_error(sdiodev))
return -EIO;
target_list = pktlist;
@@ -485,7 +603,11 @@ static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
- if (ret != 0) {
+ if (ret == -ENOMEDIUM) {
+ brcmf_bus_change_state(sdiodev->bus_if,
+ BRCMF_BUS_NOMEDIUM);
+ break;
+ } else if (ret != 0) {
brcmf_err("CMD53 sg block %s failed %d\n",
write ? "write" : "read", ret);
ret = -EIO;
@@ -525,9 +647,7 @@ exit:
return ret;
}
-int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes)
+int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
int err;
@@ -539,7 +659,7 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
return -EIO;
}
- err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
+ err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
if (!err)
memcpy(buf, mypkt->data, nbytes);
@@ -547,50 +667,47 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
return err;
}
-int
-brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt)
+int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
{
- uint width;
+ u32 addr = sdiodev->sbwad;
int err = 0;
- brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
- fn, addr, pkt->len);
+ brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
- width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
if (err)
goto done;
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
done:
return err;
}
-int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq, uint totlen)
+int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq, uint totlen)
{
struct sk_buff *glom_skb;
struct sk_buff *skb;
- uint width;
+ u32 addr = sdiodev->sbwad;
int err = 0;
- brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
- fn, addr, pktq->qlen);
+ brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
+ addr, pktq->qlen);
- width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
if (err)
goto done;
if (pktq->qlen == 1)
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
+ pktq->next);
else if (!sdiodev->sg_support) {
glom_skb = brcmu_pkt_buf_get_skb(totlen);
if (!glom_skb)
return -ENOMEM;
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
+ glom_skb);
if (err)
goto done;
@@ -599,18 +716,17 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
skb_pull(glom_skb, skb->len);
}
} else
- err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
+ err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
+ pktq);
done:
return err;
}
-int
-brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes)
+int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
- uint width;
+ u32 addr = sdiodev->sbwad;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -622,48 +738,47 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
memcpy(mypkt->data, buf, nbytes);
- width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
if (!err)
- err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
+ mypkt);
brcmu_pkt_buf_free_skb(mypkt);
return err;
}
-int
-brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq)
+int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq)
{
struct sk_buff *skb;
- uint width;
+ u32 addr = sdiodev->sbwad;
int err;
- brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
- fn, addr, pktq->qlen);
+ brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
- width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
if (err)
return err;
if (pktq->qlen == 1 || !sdiodev->sg_support)
skb_queue_walk(pktq, skb) {
- err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
+ err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
+ addr, skb);
if (err)
break;
}
else
- err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
+ err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
+ pktq);
return err;
}
int
-brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
- u8 *data, uint size)
+brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+ u8 *data, uint size)
{
int bcmerror = 0;
struct sk_buff *pkt;
@@ -690,7 +805,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
/* Do the transfer(s) */
while (size) {
/* Set the backplane window to include the start address */
- bcmerror = brcmf_sdcard_set_sbaddr_window(sdiodev, address);
+ bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
if (bcmerror)
break;
@@ -704,8 +819,8 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
skb_put(pkt, dsize);
if (write)
memcpy(pkt->data, data, dsize);
- bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
- sdaddr, pkt);
+ bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
+ sdaddr, pkt);
if (bcmerror) {
brcmf_err("membytes transfer failed\n");
break;
@@ -727,7 +842,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
dev_kfree_skb(pkt);
/* Return the window to backplane enumeration space for core access */
- if (brcmf_sdcard_set_sbaddr_window(sdiodev, sdiodev->sbwad))
+ if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
brcmf_err("FAILED to set window back to 0x%x\n",
sdiodev->sbwad);
@@ -736,67 +851,335 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
return bcmerror;
}
-int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
{
char t_func = (char)fn;
brcmf_dbg(SDIO, "Enter\n");
/* issue abort cmd52 command through F0 */
- brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
- SDIO_CCCR_ABORT, &t_func);
+ brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
+ sizeof(t_func), &t_func, true);
brcmf_dbg(SDIO, "Exit\n");
return 0;
}
-int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
{
- u32 regs = 0;
+ if (sdiodev->bus) {
+ brcmf_sdio_remove(sdiodev->bus);
+ sdiodev->bus = NULL;
+ }
+
+ /* Disable Function 2 */
+ sdio_claim_host(sdiodev->func[2]);
+ sdio_disable_func(sdiodev->func[2]);
+ sdio_release_host(sdiodev->func[2]);
+
+ /* Disable Function 1 */
+ sdio_claim_host(sdiodev->func[1]);
+ sdio_disable_func(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+
+ sdiodev->sbwad = 0;
+
+ return 0;
+}
+
+static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
+{
+ struct sdio_func *func;
+ struct mmc_host *host;
+ uint max_blocks;
int ret = 0;
- ret = brcmf_sdioh_attach(sdiodev);
- if (ret)
+ sdiodev->num_funcs = 2;
+
+ sdio_claim_host(sdiodev->func[1]);
+
+ ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
+ if (ret) {
+ brcmf_err("Failed to set F1 blocksize\n");
+ sdio_release_host(sdiodev->func[1]);
+ goto out;
+ }
+ ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
+ if (ret) {
+ brcmf_err("Failed to set F2 blocksize\n");
+ sdio_release_host(sdiodev->func[1]);
+ goto out;
+ }
+
+ /* increase F2 timeout */
+ sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
+
+ /* Enable Function 1 */
+ ret = sdio_enable_func(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+ if (ret) {
+ brcmf_err("Failed to enable F1: err=%d\n", ret);
goto out;
+ }
- regs = SI_ENUM_BASE;
+ /*
+ * determine host related variables after brcmf_sdiod_probe()
+ * as func->cur_blksize is properly set and F2 init has been
+ * completed successfully.
+ */
+ func = sdiodev->func[2];
+ host = func->card->host;
+ sdiodev->sg_support = host->max_segs > 1;
+ max_blocks = min_t(uint, host->max_blk_count, 511u);
+ sdiodev->max_request_size = min_t(uint, host->max_req_size,
+ max_blocks * func->cur_blksize);
+ sdiodev->max_segment_count = min_t(uint, host->max_segs,
+ SG_MAX_SINGLE_ALLOC);
+ sdiodev->max_segment_size = host->max_seg_size;
/* try to attach to the target device */
- sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
+ sdiodev->bus = brcmf_sdio_probe(sdiodev);
if (!sdiodev->bus) {
- brcmf_err("device attach failed\n");
ret = -ENODEV;
goto out;
}
out:
if (ret)
- brcmf_sdio_remove(sdiodev);
+ brcmf_sdiod_remove(sdiodev);
return ret;
}
-EXPORT_SYMBOL(brcmf_sdio_probe);
-int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
+/* devices we support, null terminated */
+static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+ SDIO_DEVICE_ID_BROADCOM_4335_4339)},
+ { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+
+static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
+
+
+static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
{
- sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+ int err;
+ struct brcmf_sdio_dev *sdiodev;
+ struct brcmf_bus *bus_if;
- if (sdiodev->bus) {
- brcmf_sdbrcm_disconnect(sdiodev->bus);
- sdiodev->bus = NULL;
+ brcmf_dbg(SDIO, "Enter\n");
+ brcmf_dbg(SDIO, "Class=%x\n", func->class);
+ brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
+ brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
+ brcmf_dbg(SDIO, "Function#: %d\n", func->num);
+
+ /* Consume func num 1 but dont do anything with it. */
+ if (func->num == 1)
+ return 0;
+
+ /* Ignore anything but func 2 */
+ if (func->num != 2)
+ return -ENODEV;
+
+ bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+ if (!bus_if)
+ return -ENOMEM;
+ sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+ if (!sdiodev) {
+ kfree(bus_if);
+ return -ENOMEM;
}
- brcmf_sdioh_detach(sdiodev);
+ /* store refs to functions used. mmc_card does
+ * not hold the F0 function pointer.
+ */
+ sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
+ sdiodev->func[0]->num = 0;
+ sdiodev->func[1] = func->card->sdio_func[0];
+ sdiodev->func[2] = func;
+
+ sdiodev->bus_if = bus_if;
+ bus_if->bus_priv.sdio = sdiodev;
+ bus_if->proto_type = BRCMF_PROTO_BCDC;
+ dev_set_drvdata(&func->dev, bus_if);
+ dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
+ sdiodev->dev = &sdiodev->func[1]->dev;
+ sdiodev->pdata = brcmfmac_sdio_pdata;
+
+ atomic_set(&sdiodev->suspend, false);
+ init_waitqueue_head(&sdiodev->request_word_wait);
+ init_waitqueue_head(&sdiodev->request_buffer_wait);
+
+ brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
+ err = brcmf_sdiod_probe(sdiodev);
+ if (err) {
+ brcmf_err("F2 error, probe failed %d...\n", err);
+ goto fail;
+ }
- sdiodev->sbwad = 0;
+ brcmf_dbg(SDIO, "F2 init completed...\n");
+ return 0;
+
+fail:
+ dev_set_drvdata(&func->dev, NULL);
+ dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+ kfree(sdiodev->func[0]);
+ kfree(sdiodev);
+ kfree(bus_if);
+ return err;
+}
+
+static void brcmf_ops_sdio_remove(struct sdio_func *func)
+{
+ struct brcmf_bus *bus_if;
+ struct brcmf_sdio_dev *sdiodev;
+
+ brcmf_dbg(SDIO, "Enter\n");
+ brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
+ brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
+ brcmf_dbg(SDIO, "Function: %d\n", func->num);
+
+ if (func->num != 1 && func->num != 2)
+ return;
+
+ bus_if = dev_get_drvdata(&func->dev);
+ if (bus_if) {
+ sdiodev = bus_if->bus_priv.sdio;
+ brcmf_sdiod_remove(sdiodev);
+
+ dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+ dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
+
+ kfree(bus_if);
+ kfree(sdiodev->func[0]);
+ kfree(sdiodev);
+ }
+
+ brcmf_dbg(SDIO, "Exit\n");
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmf_ops_sdio_suspend(struct device *dev)
+{
+ mmc_pm_flag_t sdio_flags;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ int ret = 0;
+
+ brcmf_dbg(SDIO, "\n");
+
+ atomic_set(&sdiodev->suspend, true);
+
+ sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
+ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+ brcmf_err("Host can't keep power while suspended\n");
+ return -EINVAL;
+ }
+
+ ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
+ if (ret) {
+ brcmf_err("Failed to set pm_flags\n");
+ return ret;
+ }
+
+ brcmf_sdio_wd_timer(sdiodev->bus, 0);
+
+ return ret;
+}
+
+static int brcmf_ops_sdio_resume(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
+ brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+ atomic_set(&sdiodev->suspend, false);
+ return 0;
+}
+
+static const struct dev_pm_ops brcmf_sdio_pm_ops = {
+ .suspend = brcmf_ops_sdio_suspend,
+ .resume = brcmf_ops_sdio_resume,
+};
+#endif /* CONFIG_PM_SLEEP */
+
+static struct sdio_driver brcmf_sdmmc_driver = {
+ .probe = brcmf_ops_sdio_probe,
+ .remove = brcmf_ops_sdio_remove,
+ .name = BRCMFMAC_SDIO_PDATA_NAME,
+ .id_table = brcmf_sdmmc_ids,
+#ifdef CONFIG_PM_SLEEP
+ .drv = {
+ .pm = &brcmf_sdio_pm_ops,
+ },
+#endif /* CONFIG_PM_SLEEP */
+};
+
+static int brcmf_sdio_pd_probe(struct platform_device *pdev)
+{
+ brcmf_dbg(SDIO, "Enter\n");
+
+ brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
+
+ if (brcmfmac_sdio_pdata->power_on)
+ brcmfmac_sdio_pdata->power_on();
return 0;
}
-EXPORT_SYMBOL(brcmf_sdio_remove);
-void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable)
+static int brcmf_sdio_pd_remove(struct platform_device *pdev)
{
- if (enable)
- brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+ brcmf_dbg(SDIO, "Enter\n");
+
+ if (brcmfmac_sdio_pdata->power_off)
+ brcmfmac_sdio_pdata->power_off();
+
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
+
+ return 0;
+}
+
+static struct platform_driver brcmf_sdio_pd = {
+ .remove = brcmf_sdio_pd_remove,
+ .driver = {
+ .name = BRCMFMAC_SDIO_PDATA_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+void brcmf_sdio_register(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&brcmf_sdmmc_driver);
+ if (ret)
+ brcmf_err("sdio_register_driver failed: %d\n", ret);
+}
+
+void brcmf_sdio_exit(void)
+{
+ brcmf_dbg(SDIO, "Enter\n");
+
+ if (brcmfmac_sdio_pdata)
+ platform_driver_unregister(&brcmf_sdio_pd);
else
- brcmf_sdbrcm_wd_timer(sdiodev->bus, 0);
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
+}
+
+void __init brcmf_sdio_init(void)
+{
+ int ret;
+
+ brcmf_dbg(SDIO, "Enter\n");
+
+ ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
+ if (ret == -ENODEV)
+ brcmf_dbg(SDIO, "No platform data available.\n");
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
deleted file mode 100644
index abc9ceca70f3..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/core.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/suspend.h>
-#include <linux/errno.h>
-#include <linux/sched.h> /* request_irq() */
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/brcmfmac-sdio.h>
-#include <net/cfg80211.h>
-
-#include <defs.h>
-#include <brcm_hw_ids.h>
-#include <brcmu_utils.h>
-#include <brcmu_wifi.h>
-#include "sdio_host.h"
-#include "sdio_chip.h"
-#include "dhd_dbg.h"
-#include "dhd_bus.h"
-
-#define SDIO_VENDOR_ID_BROADCOM 0x02d0
-
-#define DMA_ALIGN_MASK 0x03
-
-#define SDIO_FUNC1_BLOCKSIZE 64
-#define SDIO_FUNC2_BLOCKSIZE 512
-
-/* devices we support, null terminated */
-static const struct sdio_device_id brcmf_sdmmc_ids[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
- SDIO_DEVICE_ID_BROADCOM_4335_4339)},
- { /* end: all zeroes */ },
-};
-MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
-
-static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
-
-
-bool
-brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
-{
- bool is_err = false;
-#ifdef CONFIG_PM_SLEEP
- is_err = atomic_read(&sdiodev->suspend);
-#endif
- return is_err;
-}
-
-void
-brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq)
-{
-#ifdef CONFIG_PM_SLEEP
- int retry = 0;
- while (atomic_read(&sdiodev->suspend) && retry++ != 30)
- wait_event_timeout(*wq, false, HZ/100);
-#endif
-}
-
-static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
- uint regaddr, u8 *byte)
-{
- struct sdio_func *sdfunc = sdiodev->func[0];
- int err_ret;
-
- /*
- * Can only directly write to some F0 registers.
- * Handle F2 enable/disable and Abort command
- * as a special case.
- */
- if (regaddr == SDIO_CCCR_IOEx) {
- sdfunc = sdiodev->func[2];
- if (sdfunc) {
- if (*byte & SDIO_FUNC_ENABLE_2) {
- /* Enable Function 2 */
- err_ret = sdio_enable_func(sdfunc);
- if (err_ret)
- brcmf_err("enable F2 failed:%d\n",
- err_ret);
- } else {
- /* Disable Function 2 */
- err_ret = sdio_disable_func(sdfunc);
- if (err_ret)
- brcmf_err("Disable F2 failed:%d\n",
- err_ret);
- }
- } else {
- err_ret = -ENOENT;
- }
- } else if ((regaddr == SDIO_CCCR_ABORT) ||
- (regaddr == SDIO_CCCR_IENx)) {
- sdfunc = kmemdup(sdiodev->func[0], sizeof(struct sdio_func),
- GFP_KERNEL);
- if (!sdfunc)
- return -ENOMEM;
- sdfunc->num = 0;
- sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
- kfree(sdfunc);
- } else if (regaddr < 0xF0) {
- brcmf_err("F0 Wr:0x%02x: write disallowed\n", regaddr);
- err_ret = -EPERM;
- } else {
- sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
- }
-
- return err_ret;
-}
-
-int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
- uint regaddr, u8 *byte)
-{
- int err_ret;
-
- brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x\n", rw, func, regaddr);
-
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_byte_wait);
- if (brcmf_pm_resume_error(sdiodev))
- return -EIO;
-
- if (rw && func == 0) {
- /* handle F0 separately */
- err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
- } else {
- if (rw) /* CMD52 Write */
- sdio_writeb(sdiodev->func[func], *byte, regaddr,
- &err_ret);
- else if (func == 0) {
- *byte = sdio_f0_readb(sdiodev->func[func], regaddr,
- &err_ret);
- } else {
- *byte = sdio_readb(sdiodev->func[func], regaddr,
- &err_ret);
- }
- }
-
- if (err_ret)
- brcmf_err("Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
- rw ? "write" : "read", func, regaddr, *byte, err_ret);
-
- return err_ret;
-}
-
-int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
- uint rw, uint func, uint addr, u32 *word,
- uint nbytes)
-{
- int err_ret = -EIO;
-
- if (func == 0) {
- brcmf_err("Only CMD52 allowed to F0\n");
- return -EINVAL;
- }
-
- brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
- rw, func, addr, nbytes);
-
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
- if (brcmf_pm_resume_error(sdiodev))
- return -EIO;
-
- if (rw) { /* CMD52 Write */
- if (nbytes == 4)
- sdio_writel(sdiodev->func[func], *word, addr,
- &err_ret);
- else if (nbytes == 2)
- sdio_writew(sdiodev->func[func], (*word & 0xFFFF),
- addr, &err_ret);
- else
- brcmf_err("Invalid nbytes: %d\n", nbytes);
- } else { /* CMD52 Read */
- if (nbytes == 4)
- *word = sdio_readl(sdiodev->func[func], addr, &err_ret);
- else if (nbytes == 2)
- *word = sdio_readw(sdiodev->func[func], addr,
- &err_ret) & 0xFFFF;
- else
- brcmf_err("Invalid nbytes: %d\n", nbytes);
- }
-
- if (err_ret)
- brcmf_err("Failed to %s word, Err: 0x%08x\n",
- rw ? "write" : "read", err_ret);
-
- return err_ret;
-}
-
-static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
-{
- /* read 24 bits and return valid 17 bit addr */
- int i, ret;
- u32 scratch, regdata;
- __le32 scratch_le;
- u8 *ptr = (u8 *)&scratch_le;
-
- for (i = 0; i < 3; i++) {
- regdata = brcmf_sdio_regrl(sdiodev, regaddr, &ret);
- if (ret != 0)
- brcmf_err("Can't read!\n");
-
- *ptr++ = (u8) regdata;
- regaddr++;
- }
-
- /* Only the lower 17-bits are valid */
- scratch = le32_to_cpu(scratch_le);
- scratch &= 0x0001FFFF;
- return scratch;
-}
-
-static int brcmf_sdioh_enablefuncs(struct brcmf_sdio_dev *sdiodev)
-{
- int err_ret;
- u32 fbraddr;
- u8 func;
-
- brcmf_dbg(SDIO, "\n");
-
- /* Get the Card's common CIS address */
- sdiodev->func_cis_ptr[0] = brcmf_sdioh_get_cisaddr(sdiodev,
- SDIO_CCCR_CIS);
- brcmf_dbg(SDIO, "Card's Common CIS Ptr = 0x%x\n",
- sdiodev->func_cis_ptr[0]);
-
- /* Get the Card's function CIS (for each function) */
- for (fbraddr = SDIO_FBR_BASE(1), func = 1;
- func <= sdiodev->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
- sdiodev->func_cis_ptr[func] =
- brcmf_sdioh_get_cisaddr(sdiodev, SDIO_FBR_CIS + fbraddr);
- brcmf_dbg(SDIO, "Function %d CIS Ptr = 0x%x\n",
- func, sdiodev->func_cis_ptr[func]);
- }
-
- /* Enable Function 1 */
- err_ret = sdio_enable_func(sdiodev->func[1]);
- if (err_ret)
- brcmf_err("Failed to enable F1 Err: 0x%08x\n", err_ret);
-
- return false;
-}
-
-/*
- * Public entry points & extern's
- */
-int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev)
-{
- int err_ret = 0;
-
- brcmf_dbg(SDIO, "\n");
-
- sdiodev->num_funcs = 2;
-
- sdio_claim_host(sdiodev->func[1]);
-
- err_ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
- if (err_ret) {
- brcmf_err("Failed to set F1 blocksize\n");
- goto out;
- }
-
- err_ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
- if (err_ret) {
- brcmf_err("Failed to set F2 blocksize\n");
- goto out;
- }
-
- brcmf_sdioh_enablefuncs(sdiodev);
-
-out:
- sdio_release_host(sdiodev->func[1]);
- brcmf_dbg(SDIO, "Done\n");
- return err_ret;
-}
-
-void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev)
-{
- brcmf_dbg(SDIO, "\n");
-
- /* Disable Function 2 */
- sdio_claim_host(sdiodev->func[2]);
- sdio_disable_func(sdiodev->func[2]);
- sdio_release_host(sdiodev->func[2]);
-
- /* Disable Function 1 */
- sdio_claim_host(sdiodev->func[1]);
- sdio_disable_func(sdiodev->func[1]);
- sdio_release_host(sdiodev->func[1]);
-
-}
-
-static int brcmf_ops_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- int err;
- struct brcmf_sdio_dev *sdiodev;
- struct brcmf_bus *bus_if;
- struct mmc_host *host;
- uint max_blocks;
-
- brcmf_dbg(SDIO, "Enter\n");
- brcmf_dbg(SDIO, "Class=%x\n", func->class);
- brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
- brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
- brcmf_dbg(SDIO, "Function#: %d\n", func->num);
-
- /* Consume func num 1 but dont do anything with it. */
- if (func->num == 1)
- return 0;
-
- /* Ignore anything but func 2 */
- if (func->num != 2)
- return -ENODEV;
-
- bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
- if (!bus_if)
- return -ENOMEM;
- sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
- if (!sdiodev) {
- kfree(bus_if);
- return -ENOMEM;
- }
-
- sdiodev->func[0] = func->card->sdio_func[0];
- sdiodev->func[1] = func->card->sdio_func[0];
- sdiodev->func[2] = func;
-
- sdiodev->bus_if = bus_if;
- bus_if->bus_priv.sdio = sdiodev;
- dev_set_drvdata(&func->dev, bus_if);
- dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
- sdiodev->dev = &sdiodev->func[1]->dev;
- sdiodev->pdata = brcmfmac_sdio_pdata;
-
- atomic_set(&sdiodev->suspend, false);
- init_waitqueue_head(&sdiodev->request_byte_wait);
- init_waitqueue_head(&sdiodev->request_word_wait);
- init_waitqueue_head(&sdiodev->request_buffer_wait);
-
- brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n");
- err = brcmf_sdio_probe(sdiodev);
- if (err) {
- brcmf_err("F2 error, probe failed %d...\n", err);
- goto fail;
- }
-
- /*
- * determine host related variables after brcmf_sdio_probe()
- * as func->cur_blksize is properly set and F2 init has been
- * completed successfully.
- */
- host = func->card->host;
- sdiodev->sg_support = host->max_segs > 1;
- max_blocks = min_t(uint, host->max_blk_count, 511u);
- sdiodev->max_request_size = min_t(uint, host->max_req_size,
- max_blocks * func->cur_blksize);
- sdiodev->max_segment_count = min_t(uint, host->max_segs,
- SG_MAX_SINGLE_ALLOC);
- sdiodev->max_segment_size = host->max_seg_size;
- brcmf_dbg(SDIO, "F2 init completed...\n");
- return 0;
-
-fail:
- dev_set_drvdata(&func->dev, NULL);
- dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
- kfree(sdiodev);
- kfree(bus_if);
- return err;
-}
-
-static void brcmf_ops_sdio_remove(struct sdio_func *func)
-{
- struct brcmf_bus *bus_if;
- struct brcmf_sdio_dev *sdiodev;
-
- brcmf_dbg(SDIO, "Enter\n");
- brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
- brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
- brcmf_dbg(SDIO, "Function: %d\n", func->num);
-
- if (func->num != 1 && func->num != 2)
- return;
-
- bus_if = dev_get_drvdata(&func->dev);
- if (bus_if) {
- sdiodev = bus_if->bus_priv.sdio;
- brcmf_sdio_remove(sdiodev);
-
- dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
- dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
-
- kfree(bus_if);
- kfree(sdiodev);
- }
-
- brcmf_dbg(SDIO, "Exit\n");
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int brcmf_sdio_suspend(struct device *dev)
-{
- mmc_pm_flag_t sdio_flags;
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- int ret = 0;
-
- brcmf_dbg(SDIO, "\n");
-
- atomic_set(&sdiodev->suspend, true);
-
- sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
- if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
- brcmf_err("Host can't keep power while suspended\n");
- return -EINVAL;
- }
-
- ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
- if (ret) {
- brcmf_err("Failed to set pm_flags\n");
- return ret;
- }
-
- brcmf_sdio_wdtmr_enable(sdiodev, false);
-
- return ret;
-}
-
-static int brcmf_sdio_resume(struct device *dev)
-{
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
-
- brcmf_sdio_wdtmr_enable(sdiodev, true);
- atomic_set(&sdiodev->suspend, false);
- return 0;
-}
-
-static const struct dev_pm_ops brcmf_sdio_pm_ops = {
- .suspend = brcmf_sdio_suspend,
- .resume = brcmf_sdio_resume,
-};
-#endif /* CONFIG_PM_SLEEP */
-
-static struct sdio_driver brcmf_sdmmc_driver = {
- .probe = brcmf_ops_sdio_probe,
- .remove = brcmf_ops_sdio_remove,
- .name = BRCMFMAC_SDIO_PDATA_NAME,
- .id_table = brcmf_sdmmc_ids,
-#ifdef CONFIG_PM_SLEEP
- .drv = {
- .pm = &brcmf_sdio_pm_ops,
- },
-#endif /* CONFIG_PM_SLEEP */
-};
-
-static int brcmf_sdio_pd_probe(struct platform_device *pdev)
-{
- brcmf_dbg(SDIO, "Enter\n");
-
- brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
-
- if (brcmfmac_sdio_pdata->power_on)
- brcmfmac_sdio_pdata->power_on();
-
- return 0;
-}
-
-static int brcmf_sdio_pd_remove(struct platform_device *pdev)
-{
- brcmf_dbg(SDIO, "Enter\n");
-
- if (brcmfmac_sdio_pdata->power_off)
- brcmfmac_sdio_pdata->power_off();
-
- sdio_unregister_driver(&brcmf_sdmmc_driver);
-
- return 0;
-}
-
-static struct platform_driver brcmf_sdio_pd = {
- .remove = brcmf_sdio_pd_remove,
- .driver = {
- .name = BRCMFMAC_SDIO_PDATA_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-void brcmf_sdio_register(void)
-{
- int ret;
-
- ret = sdio_register_driver(&brcmf_sdmmc_driver);
- if (ret)
- brcmf_err("sdio_register_driver failed: %d\n", ret);
-}
-
-void brcmf_sdio_exit(void)
-{
- brcmf_dbg(SDIO, "Enter\n");
-
- if (brcmfmac_sdio_pdata)
- platform_driver_unregister(&brcmf_sdio_pd);
- else
- sdio_unregister_driver(&brcmf_sdmmc_driver);
-}
-
-void __init brcmf_sdio_init(void)
-{
- int ret;
-
- brcmf_dbg(SDIO, "Enter\n");
-
- ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
- if (ret == -ENODEV)
- brcmf_dbg(SDIO, "No platform data available.\n");
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 899a2ada5b82..939d6b132922 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -21,481 +21,33 @@
#ifndef _BRCMF_H_
#define _BRCMF_H_
-#define BRCMF_VERSION_STR "4.218.248.5"
-
#include "fweh.h"
-/*******************************************************************************
- * IO codes that are interpreted by dongle firmware
- ******************************************************************************/
-#define BRCMF_C_GET_VERSION 1
-#define BRCMF_C_UP 2
-#define BRCMF_C_DOWN 3
-#define BRCMF_C_SET_PROMISC 10
-#define BRCMF_C_GET_RATE 12
-#define BRCMF_C_GET_INFRA 19
-#define BRCMF_C_SET_INFRA 20
-#define BRCMF_C_GET_AUTH 21
-#define BRCMF_C_SET_AUTH 22
-#define BRCMF_C_GET_BSSID 23
-#define BRCMF_C_GET_SSID 25
-#define BRCMF_C_SET_SSID 26
-#define BRCMF_C_TERMINATED 28
-#define BRCMF_C_GET_CHANNEL 29
-#define BRCMF_C_SET_CHANNEL 30
-#define BRCMF_C_GET_SRL 31
-#define BRCMF_C_SET_SRL 32
-#define BRCMF_C_GET_LRL 33
-#define BRCMF_C_SET_LRL 34
-#define BRCMF_C_GET_RADIO 37
-#define BRCMF_C_SET_RADIO 38
-#define BRCMF_C_GET_PHYTYPE 39
-#define BRCMF_C_SET_KEY 45
-#define BRCMF_C_SET_PASSIVE_SCAN 49
-#define BRCMF_C_SCAN 50
-#define BRCMF_C_SCAN_RESULTS 51
-#define BRCMF_C_DISASSOC 52
-#define BRCMF_C_REASSOC 53
-#define BRCMF_C_SET_ROAM_TRIGGER 55
-#define BRCMF_C_SET_ROAM_DELTA 57
-#define BRCMF_C_GET_BCNPRD 75
-#define BRCMF_C_SET_BCNPRD 76
-#define BRCMF_C_GET_DTIMPRD 77
-#define BRCMF_C_SET_DTIMPRD 78
-#define BRCMF_C_SET_COUNTRY 84
-#define BRCMF_C_GET_PM 85
-#define BRCMF_C_SET_PM 86
-#define BRCMF_C_GET_CURR_RATESET 114
-#define BRCMF_C_GET_AP 117
-#define BRCMF_C_SET_AP 118
-#define BRCMF_C_GET_RSSI 127
-#define BRCMF_C_GET_WSEC 133
-#define BRCMF_C_SET_WSEC 134
-#define BRCMF_C_GET_PHY_NOISE 135
-#define BRCMF_C_GET_BSS_INFO 136
-#define BRCMF_C_GET_BANDLIST 140
-#define BRCMF_C_SET_SCB_TIMEOUT 158
-#define BRCMF_C_GET_PHYLIST 180
-#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
-#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
-#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
-#define BRCMF_C_GET_VALID_CHANNELS 217
-#define BRCMF_C_GET_KEY_PRIMARY 235
-#define BRCMF_C_SET_KEY_PRIMARY 236
-#define BRCMF_C_SET_SCAN_PASSIVE_TIME 258
-#define BRCMF_C_GET_VAR 262
-#define BRCMF_C_SET_VAR 263
-
-/* phy types (returned by WLC_GET_PHYTPE) */
-#define WLC_PHY_TYPE_A 0
-#define WLC_PHY_TYPE_B 1
-#define WLC_PHY_TYPE_G 2
-#define WLC_PHY_TYPE_N 4
-#define WLC_PHY_TYPE_LP 5
-#define WLC_PHY_TYPE_SSN 6
-#define WLC_PHY_TYPE_HT 7
-#define WLC_PHY_TYPE_LCN 8
-#define WLC_PHY_TYPE_NULL 0xf
-
#define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002
-#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
-
-/* size of brcmf_scan_params not including variable length array */
-#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
-
-/* masks for channel and ssid count */
-#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
-#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
-
-/* primary (ie tx) key */
-#define BRCMF_PRIMARY_KEY (1 << 1)
-
/* For supporting multiple interfaces */
#define BRCMF_MAX_IFS 16
-#define DOT11_BSSTYPE_ANY 2
#define DOT11_MAX_DEFAULT_KEYS 4
-#define BRCMF_ESCAN_REQ_VERSION 1
-
-#define WLC_BSS_RSSI_ON_CHANNEL 0x0002
-
-#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
-#define BRCMF_STA_ASSOC 0x10 /* Associated */
-
-#define BRCMF_E_STATUS_SUCCESS 0
-#define BRCMF_E_STATUS_FAIL 1
-#define BRCMF_E_STATUS_TIMEOUT 2
-#define BRCMF_E_STATUS_NO_NETWORKS 3
-#define BRCMF_E_STATUS_ABORT 4
-#define BRCMF_E_STATUS_NO_ACK 5
-#define BRCMF_E_STATUS_UNSOLICITED 6
-#define BRCMF_E_STATUS_ATTEMPT 7
-#define BRCMF_E_STATUS_PARTIAL 8
-#define BRCMF_E_STATUS_NEWSCAN 9
-#define BRCMF_E_STATUS_NEWASSOC 10
-#define BRCMF_E_STATUS_11HQUIET 11
-#define BRCMF_E_STATUS_SUPPRESS 12
-#define BRCMF_E_STATUS_NOCHANS 13
-#define BRCMF_E_STATUS_CS_ABORT 15
-#define BRCMF_E_STATUS_ERROR 16
-
-#define BRCMF_E_REASON_INITIAL_ASSOC 0
-#define BRCMF_E_REASON_LOW_RSSI 1
-#define BRCMF_E_REASON_DEAUTH 2
-#define BRCMF_E_REASON_DISASSOC 3
-#define BRCMF_E_REASON_BCNS_LOST 4
-#define BRCMF_E_REASON_MINTXRATE 9
-#define BRCMF_E_REASON_TXFAIL 10
-
-#define BRCMF_E_REASON_LINK_BSSCFG_DIS 4
-#define BRCMF_E_REASON_FAST_ROAM_FAILED 5
-#define BRCMF_E_REASON_DIRECTED_ROAM 6
-#define BRCMF_E_REASON_TSPEC_REJECTED 7
-#define BRCMF_E_REASON_BETTER_AP 8
-
-#define BRCMF_E_PRUNE_ENCR_MISMATCH 1
-#define BRCMF_E_PRUNE_BCAST_BSSID 2
-#define BRCMF_E_PRUNE_MAC_DENY 3
-#define BRCMF_E_PRUNE_MAC_NA 4
-#define BRCMF_E_PRUNE_REG_PASSV 5
-#define BRCMF_E_PRUNE_SPCT_MGMT 6
-#define BRCMF_E_PRUNE_RADAR 7
-#define BRCMF_E_RSN_MISMATCH 8
-#define BRCMF_E_PRUNE_NO_COMMON_RATES 9
-#define BRCMF_E_PRUNE_BASIC_RATES 10
-#define BRCMF_E_PRUNE_CIPHER_NA 12
-#define BRCMF_E_PRUNE_KNOWN_STA 13
-#define BRCMF_E_PRUNE_WDS_PEER 15
-#define BRCMF_E_PRUNE_QBSS_LOAD 16
-#define BRCMF_E_PRUNE_HOME_AP 17
-
-#define BRCMF_E_SUP_OTHER 0
-#define BRCMF_E_SUP_DECRYPT_KEY_DATA 1
-#define BRCMF_E_SUP_BAD_UCAST_WEP128 2
-#define BRCMF_E_SUP_BAD_UCAST_WEP40 3
-#define BRCMF_E_SUP_UNSUP_KEY_LEN 4
-#define BRCMF_E_SUP_PW_KEY_CIPHER 5
-#define BRCMF_E_SUP_MSG3_TOO_MANY_IE 6
-#define BRCMF_E_SUP_MSG3_IE_MISMATCH 7
-#define BRCMF_E_SUP_NO_INSTALL_FLAG 8
-#define BRCMF_E_SUP_MSG3_NO_GTK 9
-#define BRCMF_E_SUP_GRP_KEY_CIPHER 10
-#define BRCMF_E_SUP_GRP_MSG1_NO_GTK 11
-#define BRCMF_E_SUP_GTK_DECRYPT_FAIL 12
-#define BRCMF_E_SUP_SEND_FAIL 13
-#define BRCMF_E_SUP_DEAUTH 14
-
-#define BRCMF_E_IF_ADD 1
-#define BRCMF_E_IF_DEL 2
-#define BRCMF_E_IF_CHANGE 3
-
-#define BRCMF_E_IF_FLAG_NOIF 1
-
-#define BRCMF_E_IF_ROLE_STA 0
-#define BRCMF_E_IF_ROLE_AP 1
-#define BRCMF_E_IF_ROLE_WDS 2
-
-#define BRCMF_E_LINK_BCN_LOSS 1
-#define BRCMF_E_LINK_DISASSOC 2
-#define BRCMF_E_LINK_ASSOC_REC 3
-#define BRCMF_E_LINK_BSSCFG_DIS 4
-
/* Small, medium and maximum buffer size for dcmd
*/
#define BRCMF_DCMD_SMLEN 256
#define BRCMF_DCMD_MEDLEN 1536
#define BRCMF_DCMD_MAXLEN 8192
-#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS 256
-
-/* Pattern matching filter. Specifies an offset within received packets to
- * start matching, the pattern to match, the size of the pattern, and a bitmask
- * that indicates which bits within the pattern should be matched.
- */
-struct brcmf_pkt_filter_pattern_le {
- /*
- * Offset within received packet to start pattern matching.
- * Offset '0' is the first byte of the ethernet header.
- */
- __le32 offset;
- /* Size of the pattern. Bitmask must be the same size.*/
- __le32 size_bytes;
- /*
- * Variable length mask and pattern data. mask starts at offset 0.
- * Pattern immediately follows mask.
- */
- u8 mask_and_pattern[1];
-};
-
-/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
-struct brcmf_pkt_filter_le {
- __le32 id; /* Unique filter id, specified by app. */
- __le32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */
- __le32 negate_match; /* Negate the result of filter matches */
- union { /* Filter definitions */
- struct brcmf_pkt_filter_pattern_le pattern; /* Filter pattern */
- } u;
-};
-
-/* IOVAR "pkt_filter_enable" parameter. */
-struct brcmf_pkt_filter_enable_le {
- __le32 id; /* Unique filter id */
- __le32 enable; /* Enable/disable bool */
-};
-
-/* BSS info structure
- * Applications MUST CHECK ie_offset field and length field to access IEs and
- * next bss_info structure in a vector (in struct brcmf_scan_results)
+/* IOCTL from host to device are limited in lenght. A device can only handle
+ * ethernet frame size. This limitation is to be applied by protocol layer.
*/
-struct brcmf_bss_info_le {
- __le32 version; /* version field */
- __le32 length; /* byte length of data in this record,
- * starting at version and including IEs
- */
- u8 BSSID[ETH_ALEN];
- __le16 beacon_period; /* units are Kusec */
- __le16 capability; /* Capability information */
- u8 SSID_len;
- u8 SSID[32];
- struct {
- __le32 count; /* # rates in this set */
- u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
- } rateset; /* supported rates */
- __le16 chanspec; /* chanspec for bss */
- __le16 atim_window; /* units are Kusec */
- u8 dtim_period; /* DTIM period */
- __le16 RSSI; /* receive signal strength (in dBm) */
- s8 phy_noise; /* noise (in dBm) */
-
- u8 n_cap; /* BSS is 802.11N Capable */
- /* 802.11N BSS Capabilities (based on HT_CAP_*): */
- __le32 nbss_cap;
- u8 ctl_ch; /* 802.11N BSS control channel number */
- __le32 reserved32[1]; /* Reserved for expansion of BSS properties */
- u8 flags; /* flags */
- u8 reserved[3]; /* Reserved for expansion of BSS properties */
- u8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
+#define BRCMF_TX_IOCTL_MAX_MSG_SIZE (ETH_FRAME_LEN+ETH_FCS_LEN)
- __le16 ie_offset; /* offset at which IEs start, from beginning */
- __le32 ie_length; /* byte length of Information Elements */
- __le16 SNR; /* average SNR of during frame reception */
- /* Add new fields here */
- /* variable length Information Elements */
-};
-
-struct brcm_rateset_le {
- /* # rates in this set */
- __le32 count;
- /* rates in 500kbps units w/hi bit set if basic */
- u8 rates[BRCMF_MAXRATES_IN_SET];
-};
-
-struct brcmf_ssid {
- u32 SSID_len;
- unsigned char SSID[32];
-};
-
-struct brcmf_ssid_le {
- __le32 SSID_len;
- unsigned char SSID[32];
-};
-
-struct brcmf_scan_params_le {
- struct brcmf_ssid_le ssid_le; /* default: {0, ""} */
- u8 bssid[ETH_ALEN]; /* default: bcast */
- s8 bss_type; /* default: any,
- * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
- */
- u8 scan_type; /* flags, 0 use default */
- __le32 nprobes; /* -1 use default, number of probes per channel */
- __le32 active_time; /* -1 use default, dwell time per channel for
- * active scanning
- */
- __le32 passive_time; /* -1 use default, dwell time per channel
- * for passive scanning
- */
- __le32 home_time; /* -1 use default, dwell time for the
- * home channel between channel scans
- */
- __le32 channel_num; /* count of channels and ssids that follow
- *
- * low half is count of channels in
- * channel_list, 0 means default (use all
- * available channels)
- *
- * high half is entries in struct brcmf_ssid
- * array that follows channel_list, aligned for
- * s32 (4 bytes) meaning an odd channel count
- * implies a 2-byte pad between end of
- * channel_list and first ssid
- *
- * if ssid count is zero, single ssid in the
- * fixed parameter portion is assumed, otherwise
- * ssid in the fixed portion is ignored
- */
- __le16 channel_list[1]; /* list of chanspecs */
-};
-
-struct brcmf_scan_results {
- u32 buflen;
- u32 version;
- u32 count;
- struct brcmf_bss_info_le bss_info_le[];
-};
-
-struct brcmf_escan_params_le {
- __le32 version;
- __le16 action;
- __le16 sync_id;
- struct brcmf_scan_params_le params_le;
-};
-
-struct brcmf_escan_result_le {
- __le32 buflen;
- __le32 version;
- __le16 sync_id;
- __le16 bss_count;
- struct brcmf_bss_info_le bss_info_le;
-};
-
-#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
- sizeof(struct brcmf_bss_info_le))
-
-/* used for association with a specific BSSID and chanspec list */
-struct brcmf_assoc_params_le {
- /* 00:00:00:00:00:00: broadcast scan */
- u8 bssid[ETH_ALEN];
- /* 0: all available channels, otherwise count of chanspecs in
- * chanspec_list */
- __le32 chanspec_num;
- /* list of chanspecs */
- __le16 chanspec_list[1];
-};
-
-/* used for join with or without a specific bssid and channel list */
-struct brcmf_join_params {
- struct brcmf_ssid_le ssid_le;
- struct brcmf_assoc_params_le params_le;
-};
-
-/* scan params for extended join */
-struct brcmf_join_scan_params_le {
- u8 scan_type; /* 0 use default, active or passive scan */
- __le32 nprobes; /* -1 use default, nr of probes per channel */
- __le32 active_time; /* -1 use default, dwell time per channel for
- * active scanning
- */
- __le32 passive_time; /* -1 use default, dwell time per channel
- * for passive scanning
- */
- __le32 home_time; /* -1 use default, dwell time for the home
- * channel between channel scans
- */
-};
-
-/* extended join params */
-struct brcmf_ext_join_params_le {
- struct brcmf_ssid_le ssid_le; /* {0, ""}: wildcard scan */
- struct brcmf_join_scan_params_le scan_le;
- struct brcmf_assoc_params_le assoc_le;
-};
-
-struct brcmf_wsec_key {
- u32 index; /* key index */
- u32 len; /* key length */
- u8 data[WLAN_MAX_KEY_LEN]; /* key data */
- u32 pad_1[18];
- u32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
- u32 flags; /* misc flags */
- u32 pad_2[3];
- u32 iv_initialized; /* has IV been initialized already? */
- u32 pad_3;
- /* Rx IV */
- struct {
- u32 hi; /* upper 32 bits of IV */
- u16 lo; /* lower 16 bits of IV */
- } rxiv;
- u32 pad_4[2];
- u8 ea[ETH_ALEN]; /* per station */
-};
-
-/*
- * dongle requires same struct as above but with fields in little endian order
- */
-struct brcmf_wsec_key_le {
- __le32 index; /* key index */
- __le32 len; /* key length */
- u8 data[WLAN_MAX_KEY_LEN]; /* key data */
- __le32 pad_1[18];
- __le32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
- __le32 flags; /* misc flags */
- __le32 pad_2[3];
- __le32 iv_initialized; /* has IV been initialized already? */
- __le32 pad_3;
- /* Rx IV */
- struct {
- __le32 hi; /* upper 32 bits of IV */
- __le16 lo; /* lower 16 bits of IV */
- } rxiv;
- __le32 pad_4[2];
- u8 ea[ETH_ALEN]; /* per station */
-};
-
-/* Used to get specific STA parameters */
-struct brcmf_scb_val_le {
- __le32 val;
- u8 ea[ETH_ALEN];
-};
-
-/* channel encoding */
-struct brcmf_channel_info_le {
- __le32 hw_channel;
- __le32 target_channel;
- __le32 scan_channel;
-};
-
-struct brcmf_sta_info_le {
- __le16 ver; /* version of this struct */
- __le16 len; /* length in bytes of this structure */
- __le16 cap; /* sta's advertised capabilities */
- __le32 flags; /* flags defined below */
- __le32 idle; /* time since data pkt rx'd from sta */
- u8 ea[ETH_ALEN]; /* Station address */
- __le32 count; /* # rates in this set */
- u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units */
- /* w/hi bit set if basic */
- __le32 in; /* seconds elapsed since associated */
- __le32 listen_interval_inms; /* Min Listen interval in ms for STA */
- __le32 tx_pkts; /* # of packets transmitted */
- __le32 tx_failures; /* # of packets failed */
- __le32 rx_ucast_pkts; /* # of unicast packets received */
- __le32 rx_mcast_pkts; /* # of multicast packets received */
- __le32 tx_rate; /* Rate of last successful tx frame */
- __le32 rx_rate; /* Rate of last successful rx frame */
- __le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
- __le32 rx_decrypt_failures; /* # of packet decrypted failed */
-};
-
-struct brcmf_chanspec_list {
- __le32 count; /* # of entries */
- __le32 element[1]; /* variable length uint32 list */
-};
+#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS 256
-/*
- * WLC_E_PROBRESP_MSG
- * WLC_E_P2P_PROBREQ_MSG
- * WLC_E_ACTION_FRAME_RX
+/* Length of firmware version string stored for
+ * ethtool driver info which uses 32 bytes as well.
*/
-struct brcmf_rx_mgmt_data {
- __be16 version;
- __be16 chanspec;
- __be32 rssi;
- __be32 mactime;
- __be32 rate;
-};
+#define BRCMF_DRIVER_FIRMWARE_VERSION_LEN 32
/* Bus independent dongle command */
struct brcmf_dcmd {
@@ -535,7 +87,7 @@ struct brcmf_fws_info; /* firmware signalling info */
struct brcmf_pub {
/* Linkage ponters */
struct brcmf_bus *bus_if;
- struct brcmf_proto *prot;
+ struct brcmf_proto *proto;
struct brcmf_cfg80211_info *config;
/* Internal brcmf items */
@@ -544,7 +96,7 @@ struct brcmf_pub {
u8 wme_dp; /* wme discard priority */
/* Dongle media info */
- unsigned long drv_version; /* Version of dongle-resident driver */
+ char fwver[BRCMF_DRIVER_FIRMWARE_VERSION_LEN];
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
/* Multicast data packets sent to dongle */
@@ -566,14 +118,6 @@ struct brcmf_pub {
#endif
};
-struct brcmf_if_event {
- u8 ifidx;
- u8 action;
- u8 flags;
- u8 bssidx;
- u8 role;
-};
-
/* forward declarations */
struct brcmf_cfg80211_vif;
struct brcmf_fws_mac_descriptor;
@@ -635,16 +179,6 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
/* Return pointer to interface name */
char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
-/* Query dongle */
-int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len);
-int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len);
-
-/* Remove any protocol-specific data header. */
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
- struct sk_buff *rxp);
-
int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
char *name, u8 *mac_addr);
@@ -655,4 +189,7 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp);
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
bool success);
+/* Sets dongle media info (drv_version, mac address). */
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index a6eb09e5d46f..c4535616064e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -17,13 +17,23 @@
#ifndef _BRCMF_BUS_H_
#define _BRCMF_BUS_H_
+#include "dhd_dbg.h"
+
/* The level of bus communication with the dongle */
enum brcmf_bus_state {
+ BRCMF_BUS_UNKNOWN, /* Not determined yet */
+ BRCMF_BUS_NOMEDIUM, /* No medium access to dongle */
BRCMF_BUS_DOWN, /* Not ready for frame transfers */
BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
BRCMF_BUS_DATA /* Ready for frame transfers */
};
+/* The level of bus communication with the dongle */
+enum brcmf_bus_protocol_type {
+ BRCMF_PROTO_BCDC,
+ BRCMF_PROTO_MSGBUF
+};
+
struct brcmf_bus_dcmd {
char *name;
char *param;
@@ -34,6 +44,7 @@ struct brcmf_bus_dcmd {
/**
* struct brcmf_bus_ops - bus callback operations.
*
+ * @preinit: execute bus/device specific dongle init commands (optional).
* @init: prepare for communication with dongle.
* @stop: clear pending frames, disable data flow.
* @txdata: send a data frame to the dongle. When the data
@@ -51,6 +62,7 @@ struct brcmf_bus_dcmd {
* indicated otherwise these callbacks are mandatory.
*/
struct brcmf_bus_ops {
+ int (*preinit)(struct device *dev);
int (*init)(struct device *dev);
void (*stop)(struct device *dev);
int (*txdata)(struct device *dev, struct sk_buff *skb);
@@ -63,6 +75,7 @@ struct brcmf_bus_ops {
* struct brcmf_bus - interface structure between common and bus layer
*
* @bus_priv: pointer to private bus device.
+ * @proto_type: protocol type, bcdc or msgbuf
* @dev: device pointer of bus device.
* @drvr: public driver information.
* @state: operational state of the bus interface.
@@ -78,6 +91,7 @@ struct brcmf_bus {
struct brcmf_sdio_dev *sdio;
struct brcmf_usbdev *usb;
} bus_priv;
+ enum brcmf_bus_protocol_type proto_type;
struct device *dev;
struct brcmf_pub *drvr;
enum brcmf_bus_state state;
@@ -85,7 +99,6 @@ struct brcmf_bus {
unsigned long tx_realloc;
u32 chip;
u32 chiprev;
- struct list_head dcmd_list;
struct brcmf_bus_ops *ops;
};
@@ -93,6 +106,13 @@ struct brcmf_bus {
/*
* callback wrappers
*/
+static inline int brcmf_bus_preinit(struct brcmf_bus *bus)
+{
+ if (!bus->ops->preinit)
+ return 0;
+ return bus->ops->preinit(bus->dev);
+}
+
static inline int brcmf_bus_init(struct brcmf_bus *bus)
{
return bus->ops->init(bus->dev);
@@ -128,6 +148,23 @@ struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
return bus->ops->gettxq(bus->dev);
}
+
+static inline bool brcmf_bus_ready(struct brcmf_bus *bus)
+{
+ return bus->state == BRCMF_BUS_LOAD || bus->state == BRCMF_BUS_DATA;
+}
+
+static inline void brcmf_bus_change_state(struct brcmf_bus *bus,
+ enum brcmf_bus_state new_state)
+{
+ /* NOMEDIUM is permanent */
+ if (bus->state == BRCMF_BUS_NOMEDIUM)
+ return;
+
+ brcmf_dbg(TRACE, "%d -> %d\n", bus->state, new_state);
+ bus->state = new_state;
+}
+
/*
* interface functions from common layer
*/
@@ -139,7 +176,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
/* Indication from bus module regarding presence/insertion of dongle. */
-int brcmf_attach(uint bus_hdrlen, struct device *dev);
+int brcmf_attach(struct device *dev);
/* Indication from bus module regarding removal/absence of dongle */
void brcmf_detach(struct device *dev);
/* Indication from bus module that dongle should be reset */
@@ -151,6 +188,9 @@ void brcmf_txflowblock(struct device *dev, bool state);
void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
int brcmf_bus_start(struct device *dev);
+s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data,
+ u32 len);
+void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
#ifdef CONFIG_BRCMFMAC_SDIO
void brcmf_sdio_exit(void);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
deleted file mode 100644
index dd85401063cb..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*******************************************************************************
- * Communicates with the dongle by using dcmd codes.
- * For certain dcmd codes, the dongle interprets string data from the host.
- ******************************************************************************/
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-
-#include <brcmu_utils.h>
-#include <brcmu_wifi.h>
-
-#include "dhd.h"
-#include "dhd_proto.h"
-#include "dhd_bus.h"
-#include "fwsignal.h"
-#include "dhd_dbg.h"
-#include "tracepoint.h"
-
-struct brcmf_proto_cdc_dcmd {
- __le32 cmd; /* dongle command value */
- __le32 len; /* lower 16: output buflen;
- * upper 16: input buflen (excludes header) */
- __le32 flags; /* flag defns given below */
- __le32 status; /* status code returned from the device */
-};
-
-/* Max valid buffer size that can be sent to the dongle */
-#define CDC_MAX_MSG_SIZE (ETH_FRAME_LEN+ETH_FCS_LEN)
-
-/* CDC flag definitions */
-#define CDC_DCMD_ERROR 0x01 /* 1=cmd failed */
-#define CDC_DCMD_SET 0x02 /* 0=get, 1=set cmd */
-#define CDC_DCMD_IF_MASK 0xF000 /* I/F index */
-#define CDC_DCMD_IF_SHIFT 12
-#define CDC_DCMD_ID_MASK 0xFFFF0000 /* id an cmd pairing */
-#define CDC_DCMD_ID_SHIFT 16 /* ID Mask shift bits */
-#define CDC_DCMD_ID(flags) \
- (((flags) & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT)
-
-/*
- * BDC header - Broadcom specific extension of CDC.
- * Used on data packets to convey priority across USB.
- */
-#define BDC_HEADER_LEN 4
-#define BDC_PROTO_VER 2 /* Protocol version */
-#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
-#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
-#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
-#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */
-#define BDC_PRIORITY_MASK 0x7
-#define BDC_FLAG2_IF_MASK 0x0f /* packet rx interface in APSTA */
-#define BDC_FLAG2_IF_SHIFT 0
-
-#define BDC_GET_IF_IDX(hdr) \
- ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
-#define BDC_SET_IF_IDX(hdr, idx) \
- ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \
- ((idx) << BDC_FLAG2_IF_SHIFT)))
-
-/**
- * struct brcmf_proto_bdc_header - BDC header format
- *
- * @flags: flags contain protocol and checksum info.
- * @priority: 802.1d priority and USB flow control info (bit 4:7).
- * @flags2: additional flags containing dongle interface index.
- * @data_offset: start of packet data. header is following by firmware signals.
- */
-struct brcmf_proto_bdc_header {
- u8 flags;
- u8 priority;
- u8 flags2;
- u8 data_offset;
-};
-
-/*
- * maximum length of firmware signal data between
- * the BDC header and packet data in the tx path.
- */
-#define BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES 12
-
-#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
-#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE
- * (amount of header tha might be added)
- * plus any space that might be needed
- * for bus alignment padding.
- */
-#define ROUND_UP_MARGIN 2048 /* Biggest bus block size possible for
- * round off at the end of buffer
- * Currently is SDIO
- */
-
-struct brcmf_proto {
- u16 reqid;
- u8 bus_header[BUS_HEADER_LEN];
- struct brcmf_proto_cdc_dcmd msg;
- unsigned char buf[BRCMF_DCMD_MAXLEN + ROUND_UP_MARGIN];
-};
-
-static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
-{
- struct brcmf_proto *prot = drvr->prot;
- int len = le32_to_cpu(prot->msg.len) +
- sizeof(struct brcmf_proto_cdc_dcmd);
-
- brcmf_dbg(CDC, "Enter\n");
-
- /* NOTE : cdc->msg.len holds the desired length of the buffer to be
- * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
- * is actually sent to the dongle
- */
- if (len > CDC_MAX_MSG_SIZE)
- len = CDC_MAX_MSG_SIZE;
-
- /* Send request */
- return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&prot->msg, len);
-}
-
-static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
-{
- int ret;
- struct brcmf_proto *prot = drvr->prot;
-
- brcmf_dbg(CDC, "Enter\n");
- len += sizeof(struct brcmf_proto_cdc_dcmd);
- do {
- ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&prot->msg,
- len);
- if (ret < 0)
- break;
- } while (CDC_DCMD_ID(le32_to_cpu(prot->msg.flags)) != id);
-
- return ret;
-}
-
-int
-brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len)
-{
- struct brcmf_proto *prot = drvr->prot;
- struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
- void *info;
- int ret = 0, retries = 0;
- u32 id, flags;
-
- brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len);
-
- memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
-
- msg->cmd = cpu_to_le32(cmd);
- msg->len = cpu_to_le32(len);
- flags = (++prot->reqid << CDC_DCMD_ID_SHIFT);
- flags = (flags & ~CDC_DCMD_IF_MASK) |
- (ifidx << CDC_DCMD_IF_SHIFT);
- msg->flags = cpu_to_le32(flags);
-
- if (buf)
- memcpy(prot->buf, buf, len);
-
- ret = brcmf_proto_cdc_msg(drvr);
- if (ret < 0) {
- brcmf_err("brcmf_proto_cdc_msg failed w/status %d\n",
- ret);
- goto done;
- }
-
-retry:
- /* wait for interrupt and get first fragment */
- ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
- if (ret < 0)
- goto done;
-
- flags = le32_to_cpu(msg->flags);
- id = (flags & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT;
-
- if ((id < prot->reqid) && (++retries < RETRIES))
- goto retry;
- if (id != prot->reqid) {
- brcmf_err("%s: unexpected request id %d (expected %d)\n",
- brcmf_ifname(drvr, ifidx), id, prot->reqid);
- ret = -EINVAL;
- goto done;
- }
-
- /* Check info buffer */
- info = (void *)&msg[1];
-
- /* Copy info buffer */
- if (buf) {
- if (ret < (int)len)
- len = ret;
- memcpy(buf, info, len);
- }
-
- /* Check the ERROR flag */
- if (flags & CDC_DCMD_ERROR)
- ret = le32_to_cpu(msg->status);
-
-done:
- return ret;
-}
-
-int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len)
-{
- struct brcmf_proto *prot = drvr->prot;
- struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
- int ret = 0;
- u32 flags, id;
-
- brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len);
-
- memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
-
- msg->cmd = cpu_to_le32(cmd);
- msg->len = cpu_to_le32(len);
- flags = (++prot->reqid << CDC_DCMD_ID_SHIFT) | CDC_DCMD_SET;
- flags = (flags & ~CDC_DCMD_IF_MASK) |
- (ifidx << CDC_DCMD_IF_SHIFT);
- msg->flags = cpu_to_le32(flags);
-
- if (buf)
- memcpy(prot->buf, buf, len);
-
- ret = brcmf_proto_cdc_msg(drvr);
- if (ret < 0)
- goto done;
-
- ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
- if (ret < 0)
- goto done;
-
- flags = le32_to_cpu(msg->flags);
- id = (flags & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT;
-
- if (id != prot->reqid) {
- brcmf_err("%s: unexpected request id %d (expected %d)\n",
- brcmf_ifname(drvr, ifidx), id, prot->reqid);
- ret = -EINVAL;
- goto done;
- }
-
- /* Check the ERROR flag */
- if (flags & CDC_DCMD_ERROR)
- ret = le32_to_cpu(msg->status);
-
-done:
- return ret;
-}
-
-static bool pkt_sum_needed(struct sk_buff *skb)
-{
- return skb->ip_summed == CHECKSUM_PARTIAL;
-}
-
-static void pkt_set_sum_good(struct sk_buff *skb, bool x)
-{
- skb->ip_summed = (x ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
-}
-
-void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
- struct sk_buff *pktbuf)
-{
- struct brcmf_proto_bdc_header *h;
-
- brcmf_dbg(CDC, "Enter\n");
-
- /* Push BDC header used to convey priority for buses that don't */
- skb_push(pktbuf, BDC_HEADER_LEN);
-
- h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
-
- h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
- if (pkt_sum_needed(pktbuf))
- h->flags |= BDC_FLAG_SUM_NEEDED;
-
- h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
- h->flags2 = 0;
- h->data_offset = offset;
- BDC_SET_IF_IDX(h, ifidx);
- trace_brcmf_bdchdr(pktbuf->data);
-}
-
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
- struct sk_buff *pktbuf)
-{
- struct brcmf_proto_bdc_header *h;
-
- brcmf_dbg(CDC, "Enter\n");
-
- /* Pop BDC header used to convey priority for buses that don't */
-
- if (pktbuf->len <= BDC_HEADER_LEN) {
- brcmf_dbg(INFO, "rx data too short (%d <= %d)\n",
- pktbuf->len, BDC_HEADER_LEN);
- return -EBADE;
- }
-
- trace_brcmf_bdchdr(pktbuf->data);
- h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
-
- *ifidx = BDC_GET_IF_IDX(h);
- if (*ifidx >= BRCMF_MAX_IFS) {
- brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
- return -EBADE;
- }
- /* The ifidx is the idx to map to matching netdev/ifp. When receiving
- * events this is easy because it contains the bssidx which maps
- * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
- * bssidx 1 is used for p2p0 and no data can be received or
- * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
- */
- if (*ifidx)
- (*ifidx)++;
-
- if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
- BDC_PROTO_VER) {
- brcmf_err("%s: non-BDC packet received, flags 0x%x\n",
- brcmf_ifname(drvr, *ifidx), h->flags);
- return -EBADE;
- }
-
- if (h->flags & BDC_FLAG_SUM_GOOD) {
- brcmf_dbg(CDC, "%s: BDC rcv, good checksum, flags 0x%x\n",
- brcmf_ifname(drvr, *ifidx), h->flags);
- pkt_set_sum_good(pktbuf, true);
- }
-
- pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
-
- skb_pull(pktbuf, BDC_HEADER_LEN);
- if (do_fws)
- brcmf_fws_hdrpull(drvr, *ifidx, h->data_offset << 2, pktbuf);
- else
- skb_pull(pktbuf, h->data_offset << 2);
-
- if (pktbuf->len == 0)
- return -ENODATA;
- return 0;
-}
-
-int brcmf_proto_attach(struct brcmf_pub *drvr)
-{
- struct brcmf_proto *cdc;
-
- cdc = kzalloc(sizeof(struct brcmf_proto), GFP_ATOMIC);
- if (!cdc)
- goto fail;
-
- /* ensure that the msg buf directly follows the cdc msg struct */
- if ((unsigned long)(&cdc->msg + 1) != (unsigned long)cdc->buf) {
- brcmf_err("struct brcmf_proto is not correctly defined\n");
- goto fail;
- }
-
- drvr->prot = cdc;
- drvr->hdrlen += BDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
- drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
- sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN;
- return 0;
-
-fail:
- kfree(cdc);
- return -ENOMEM;
-}
-
-/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */
-void brcmf_proto_detach(struct brcmf_pub *drvr)
-{
- kfree(drvr->prot);
- drvr->prot = NULL;
-}
-
-void brcmf_proto_stop(struct brcmf_pub *drvr)
-{
- /* Nothing to do for CDC */
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 9431af2465f3..6a8983a1fb9c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -21,9 +21,9 @@
#include <brcmu_utils.h>
#include "dhd.h"
#include "dhd_bus.h"
-#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "fwil.h"
+#include "fwil_types.h"
#include "tracepoint.h"
#define PKTFILTER_BUF_SIZE 128
@@ -32,15 +32,6 @@
#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
-#ifdef DEBUG
-static const char brcmf_version[] =
- "Dongle Host Driver, version " BRCMF_VERSION_STR "\nCompiled on "
- __DATE__ " at " __TIME__;
-#else
-static const char brcmf_version[] =
- "Dongle Host Driver, version " BRCMF_VERSION_STR;
-#endif
-
bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec)
@@ -257,8 +248,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
u8 buf[BRCMF_DCMD_SMLEN];
char *ptr;
s32 err;
- struct brcmf_bus_dcmd *cmdlst;
- struct list_head *cur, *q;
/* retreive mac address */
err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
@@ -281,9 +270,14 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
}
ptr = (char *)buf;
strsep(&ptr, "\n");
+
/* Print fw version info */
brcmf_err("Firmware version = %s\n", buf);
+ /* locate firmware version number for ethtool */
+ ptr = strrchr(buf, ' ') + 1;
+ strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
+
/*
* Setup timeout if Beacons are lost and roam is off to report
* link down
@@ -342,17 +336,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER,
0, true);
- /* set bus specific command if there is any */
- list_for_each_safe(cur, q, &ifp->drvr->bus_if->dcmd_list) {
- cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list);
- if (cmdlst->name && cmdlst->param && cmdlst->param_len) {
- brcmf_fil_iovar_data_set(ifp, cmdlst->name,
- cmdlst->param,
- cmdlst->param_len);
- }
- list_del(cur);
- kfree(cmdlst);
- }
+ /* do bus specific preinit here */
+ err = brcmf_bus_preinit(ifp->drvr->bus_if);
done:
return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 0f9e9057e7dd..03fe8aca4d32 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -22,7 +22,6 @@
#include "dhd.h"
#include "dhd_bus.h"
#include "dhd_dbg.h"
-#include "tracepoint.h"
static struct dentry *root_folder;
@@ -42,6 +41,40 @@ void brcmf_debugfs_exit(void)
root_folder = NULL;
}
+static
+ssize_t brcmf_debugfs_chipinfo_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct brcmf_pub *drvr = f->private_data;
+ struct brcmf_bus *bus = drvr->bus_if;
+ char buf[40];
+ int res;
+
+ /* only allow read from start */
+ if (*ppos > 0)
+ return 0;
+
+ res = scnprintf(buf, sizeof(buf), "chip: %x(%u) rev %u\n",
+ bus->chip, bus->chip, bus->chiprev);
+ return simple_read_from_buffer(data, count, ppos, buf, res);
+}
+
+static const struct file_operations brcmf_debugfs_chipinfo_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = brcmf_debugfs_chipinfo_read
+};
+
+static int brcmf_debugfs_create_chipinfo(struct brcmf_pub *drvr)
+{
+ struct dentry *dentry = drvr->dbgfs_dir;
+
+ if (!IS_ERR_OR_NULL(dentry))
+ debugfs_create_file("chipinfo", S_IRUGO, dentry, drvr,
+ &brcmf_debugfs_chipinfo_ops);
+ return 0;
+}
+
int brcmf_debugfs_attach(struct brcmf_pub *drvr)
{
struct device *dev = drvr->bus_if->dev;
@@ -50,6 +83,7 @@ int brcmf_debugfs_attach(struct brcmf_pub *drvr)
return -ENODEV;
drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
+ brcmf_debugfs_create_chipinfo(drvr);
return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index 0af1f5dc583a..ef52ed7abc69 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -33,7 +33,7 @@
#define BRCMF_USB_VAL 0x00002000
#define BRCMF_SCAN_VAL 0x00004000
#define BRCMF_CONN_VAL 0x00008000
-#define BRCMF_CDC_VAL 0x00010000
+#define BRCMF_BCDC_VAL 0x00010000
#define BRCMF_SDIO_VAL 0x00020000
/* set default print format */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 64e9cff241b9..d4d966beb840 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -24,13 +24,13 @@
#include "dhd.h"
#include "dhd_bus.h"
-#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "fwil_types.h"
#include "p2p.h"
#include "wl_cfg80211.h"
#include "fwil.h"
#include "fwsignal.h"
+#include "proto.h"
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
@@ -592,28 +592,6 @@ static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
return &ifp->stats;
}
-/*
- * Set current toe component enables in toe_ol iovar,
- * and set toe global enable iovar
- */
-static int brcmf_toe_set(struct brcmf_if *ifp, u32 toe_ol)
-{
- s32 err;
-
- err = brcmf_fil_iovar_int_set(ifp, "toe_ol", toe_ol);
- if (err < 0) {
- brcmf_err("Setting toe_ol failed, %d\n", err);
- return err;
- }
-
- err = brcmf_fil_iovar_int_set(ifp, "toe", (toe_ol != 0));
- if (err < 0)
- brcmf_err("Setting toe failed, %d\n", err);
-
- return err;
-
-}
-
static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -621,8 +599,8 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
struct brcmf_pub *drvr = ifp->drvr;
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- snprintf(info->version, sizeof(info->version), "%lu",
- drvr->drv_version);
+ snprintf(info->version, sizeof(info->version), "n/a");
+ strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
sizeof(info->bus_info));
}
@@ -631,124 +609,6 @@ static const struct ethtool_ops brcmf_ethtool_ops = {
.get_drvinfo = brcmf_ethtool_get_drvinfo,
};
-static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
-{
- struct brcmf_pub *drvr = ifp->drvr;
- struct ethtool_drvinfo info;
- char drvname[sizeof(info.driver)];
- u32 cmd;
- struct ethtool_value edata;
- u32 toe_cmpnt, csum_dir;
- int ret;
-
- brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
-
- /* all ethtool calls start with a cmd word */
- if (copy_from_user(&cmd, uaddr, sizeof(u32)))
- return -EFAULT;
-
- switch (cmd) {
- case ETHTOOL_GDRVINFO:
- /* Copy out any request driver name */
- if (copy_from_user(&info, uaddr, sizeof(info)))
- return -EFAULT;
- strncpy(drvname, info.driver, sizeof(info.driver));
- drvname[sizeof(info.driver) - 1] = '\0';
-
- /* clear struct for return */
- memset(&info, 0, sizeof(info));
- info.cmd = cmd;
-
- /* if requested, identify ourselves */
- if (strcmp(drvname, "?dhd") == 0) {
- sprintf(info.driver, "dhd");
- strcpy(info.version, BRCMF_VERSION_STR);
- }
- /* report dongle driver type */
- else
- sprintf(info.driver, "wl");
-
- sprintf(info.version, "%lu", drvr->drv_version);
- if (copy_to_user(uaddr, &info, sizeof(info)))
- return -EFAULT;
- brcmf_dbg(TRACE, "given %*s, returning %s\n",
- (int)sizeof(drvname), drvname, info.driver);
- break;
-
- /* Get toe offload components from dongle */
- case ETHTOOL_GRXCSUM:
- case ETHTOOL_GTXCSUM:
- ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
- if (ret < 0)
- return ret;
-
- csum_dir =
- (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
-
- edata.cmd = cmd;
- edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
-
- if (copy_to_user(uaddr, &edata, sizeof(edata)))
- return -EFAULT;
- break;
-
- /* Set toe offload components in dongle */
- case ETHTOOL_SRXCSUM:
- case ETHTOOL_STXCSUM:
- if (copy_from_user(&edata, uaddr, sizeof(edata)))
- return -EFAULT;
-
- /* Read the current settings, update and write back */
- ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
- if (ret < 0)
- return ret;
-
- csum_dir =
- (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
-
- if (edata.data != 0)
- toe_cmpnt |= csum_dir;
- else
- toe_cmpnt &= ~csum_dir;
-
- ret = brcmf_toe_set(ifp, toe_cmpnt);
- if (ret < 0)
- return ret;
-
- /* If setting TX checksum mode, tell Linux the new mode */
- if (cmd == ETHTOOL_STXCSUM) {
- if (edata.data)
- ifp->ndev->features |= NETIF_F_IP_CSUM;
- else
- ifp->ndev->features &= ~NETIF_F_IP_CSUM;
- }
-
- break;
-
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
- int cmd)
-{
- struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
-
- brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd);
-
- if (!drvr->iflist[ifp->bssidx])
- return -1;
-
- if (cmd == SIOCETHTOOL)
- return brcmf_ethtool(ifp, ifr->ifr_data);
-
- return -EOPNOTSUPP;
-}
-
static int brcmf_netdev_stop(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -769,7 +629,6 @@ static int brcmf_netdev_open(struct net_device *ndev)
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_bus *bus_if = drvr->bus_if;
u32 toe_ol;
- s32 ret = 0;
brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
@@ -788,21 +647,20 @@ static int brcmf_netdev_open(struct net_device *ndev)
else
ndev->features &= ~NETIF_F_IP_CSUM;
- /* Allow transmit calls */
- netif_start_queue(ndev);
if (brcmf_cfg80211_up(ndev)) {
brcmf_err("failed to bring up cfg80211\n");
- return -1;
+ return -EIO;
}
- return ret;
+ /* Allow transmit calls */
+ netif_start_queue(ndev);
+ return 0;
}
static const struct net_device_ops brcmf_netdev_ops_pri = {
.ndo_open = brcmf_netdev_open,
.ndo_stop = brcmf_netdev_stop,
.ndo_get_stats = brcmf_netdev_get_stats,
- .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
.ndo_start_xmit = brcmf_netdev_start_xmit,
.ndo_set_mac_address = brcmf_netdev_set_mac_address,
.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
@@ -844,7 +702,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
- ndev->destructor = free_netdev;
+ ndev->destructor = brcmf_cfg80211_free_netdev;
return 0;
fail:
@@ -868,13 +726,6 @@ static int brcmf_net_p2p_stop(struct net_device *ndev)
return brcmf_cfg80211_down(ndev);
}
-static int brcmf_net_p2p_do_ioctl(struct net_device *ndev,
- struct ifreq *ifr, int cmd)
-{
- brcmf_dbg(TRACE, "Enter\n");
- return 0;
-}
-
static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -887,7 +738,6 @@ static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
static const struct net_device_ops brcmf_netdev_ops_p2p = {
.ndo_open = brcmf_net_p2p_open,
.ndo_stop = brcmf_net_p2p_stop,
- .ndo_do_ioctl = brcmf_net_p2p_do_ioctl,
.ndo_start_xmit = brcmf_net_p2p_start_xmit
};
@@ -1009,14 +859,12 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
}
/* unregister will take care of freeing it */
unregister_netdev(ifp->ndev);
- if (bssidx == 0)
- brcmf_cfg80211_detach(drvr->config);
} else {
kfree(ifp);
}
}
-int brcmf_attach(uint bus_hdrlen, struct device *dev)
+int brcmf_attach(struct device *dev)
{
struct brcmf_pub *drvr = NULL;
int ret = 0;
@@ -1031,7 +879,7 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
mutex_init(&drvr->proto_block);
/* Link to bus module */
- drvr->hdrlen = bus_hdrlen;
+ drvr->hdrlen = 0;
drvr->bus_if = dev_get_drvdata(dev);
drvr->bus_if->drvr = drvr;
@@ -1048,8 +896,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
/* attach firmware event handler */
brcmf_fweh_attach(drvr);
- INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
-
return ret;
fail:
@@ -1088,7 +934,7 @@ int brcmf_bus_start(struct device *dev)
p2p_ifp = NULL;
/* signal bus ready */
- bus_if->state = BRCMF_BUS_DATA;
+ brcmf_bus_change_state(bus_if, BRCMF_BUS_DATA);
/* Bus is ready, do any initialization */
ret = brcmf_c_preinit_dcmds(ifp);
@@ -1115,8 +961,7 @@ int brcmf_bus_start(struct device *dev)
fail:
if (ret < 0) {
brcmf_err("failed: %d\n", ret);
- if (drvr->config)
- brcmf_cfg80211_detach(drvr->config);
+ brcmf_cfg80211_detach(drvr->config);
if (drvr->fws) {
brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr);
@@ -1138,14 +983,21 @@ fail:
return 0;
}
+void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+
+ if (drvr) {
+ drvr->hdrlen += len;
+ }
+}
+
static void brcmf_bus_detach(struct brcmf_pub *drvr)
{
brcmf_dbg(TRACE, "Enter\n");
if (drvr) {
- /* Stop the protocol module */
- brcmf_proto_stop(drvr);
-
/* Stop the bus module */
brcmf_bus_stop(drvr->bus_if);
}
@@ -1177,6 +1029,8 @@ void brcmf_detach(struct device *dev)
/* stop firmware event handling */
brcmf_fweh_detach(drvr);
+ brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
+
/* make sure primary interface removed last */
for (i = BRCMF_MAX_IFS-1; i > -1; i--)
if (drvr->iflist[i]) {
@@ -1184,10 +1038,11 @@ void brcmf_detach(struct device *dev)
brcmf_del_if(drvr, i);
}
+ brcmf_cfg80211_detach(drvr->config);
+
brcmf_bus_detach(drvr);
- if (drvr->prot)
- brcmf_proto_detach(drvr);
+ brcmf_proto_detach(drvr);
brcmf_fws_deinit(drvr);
@@ -1196,6 +1051,14 @@ void brcmf_detach(struct device *dev)
kfree(drvr);
}
+s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_if *ifp = bus_if->drvr->iflist[0];
+
+ return brcmf_fil_iovar_data_set(ifp, name, data, len);
+}
+
static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
{
return atomic_read(&ifp->pend_8021x_cnt);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
deleted file mode 100644
index 53c6e710f2cb..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMF_PROTO_H_
-#define _BRCMF_PROTO_H_
-
-/*
- * Exported from the brcmf protocol module (brcmf_cdc)
- */
-
-/* Linkage, sets prot link and updates hdrlen in pub */
-int brcmf_proto_attach(struct brcmf_pub *drvr);
-
-/* Unlink, frees allocated protocol memory (including brcmf_proto) */
-void brcmf_proto_detach(struct brcmf_pub *drvr);
-
-/* Stop protocol: sync w/dongle state. */
-void brcmf_proto_stop(struct brcmf_pub *drvr);
-
-/* Add any protocol-specific data header.
- * Caller must reserve prot_hdrlen prepend space.
- */
-void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
- struct sk_buff *txp);
-
-/* Sets dongle media info (drv_version, mac address). */
-int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
-
-#endif /* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index b02953c4ade7..ddaa9efd053d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -32,6 +32,7 @@
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
#include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/moduleparam.h>
#include <asm/unaligned.h>
#include <defs.h>
#include <brcmu_wifi.h>
@@ -40,6 +41,7 @@
#include <soc.h>
#include "sdio_host.h"
#include "sdio_chip.h"
+#include "nvram.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -110,6 +112,8 @@ struct rte_console {
#define BRCMF_TXBOUND 20 /* Default for max tx frames in
one scheduling */
+#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
+
#define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
#define MEMBLOCK 2048 /* Block size used for downloading
@@ -257,9 +261,6 @@ struct rte_console {
#define MAX_HDR_READ (1 << 6)
#define MAX_RX_DATASZ 2048
-/* Maximum milliseconds to wait for F2 to come up */
-#define BRCMF_WAIT_F2RDY 3000
-
/* Bump up limit on waiting for HT to account for first startup;
* if the image is doing a CRC calculation before programming the PMU
* for HT availability, it could take a couple hundred ms more, so
@@ -360,15 +361,15 @@ struct brcmf_sdio_hdrinfo {
u16 len_left;
u16 len_nxtfrm;
u8 dat_offset;
+ bool lastfrm;
+ u16 tail_pad;
};
/* misc chip info needed by some of the routines */
/* Private data for SDIO bus interaction */
struct brcmf_sdio {
struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
- struct chip_info *ci; /* Chip info struct */
- char *vars; /* Variables (from CIS and/or other) */
- uint varsz; /* Size of variables buffer */
+ struct brcmf_chip *ci; /* Chip info struct */
u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
@@ -384,7 +385,7 @@ struct brcmf_sdio {
u8 tx_seq; /* Transmit sequence number (next) */
u8 tx_max; /* Maximum transmit sequence allowed */
- u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
+ u8 *hdrbuf; /* buffer for handling rx frame */
u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
u8 rx_seq; /* Receive sequence number (expected) */
struct brcmf_sdio_hdrinfo cur_read;
@@ -455,6 +456,9 @@ struct brcmf_sdio {
bool sleeping; /* SDIO bus sleeping */
u8 tx_hdrlen; /* sdio bus header length for tx packet */
+ bool txglom; /* host tx glomming enable flag */
+ u16 head_align; /* buffer pointer alignment */
+ u16 sgentry_align; /* scatter-gather buffer alignment */
};
/* clkstate */
@@ -479,6 +483,10 @@ static const uint max_roundup = 512;
#define ALIGNMENT 4
+static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
+module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
+MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
+
enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_NORMAL,
BRCMF_SDIO_FT_SUPER,
@@ -499,6 +507,10 @@ enum brcmf_sdio_frmtype {
#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
+#define BCM43362_FIRMWARE_NAME "brcm/brcmfmac43362-sdio.bin"
+#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
+#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
+#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
@@ -514,6 +526,10 @@ MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
struct brcmf_firmware_names {
u32 chipid;
@@ -537,11 +553,13 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
{ BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
{ BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
- { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
+ { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
+ { BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
+ { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }
};
-static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
+static const struct firmware *brcmf_sdio_get_fw(struct brcmf_sdio *bus,
enum brcmf_firmware_type type)
{
const struct firmware *fw;
@@ -606,8 +624,8 @@ r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
int ret;
- *regvar = brcmf_sdio_regrl(bus->sdiodev,
- bus->ci->c_inf[idx].base + offset, &ret);
+ *regvar = brcmf_sdiod_regrl(bus->sdiodev,
+ bus->ci->c_inf[idx].base + offset, &ret);
return ret;
}
@@ -618,15 +636,15 @@ w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
int ret;
- brcmf_sdio_regwl(bus->sdiodev,
- bus->ci->c_inf[idx].base + reg_offset,
- regval, &ret);
+ brcmf_sdiod_regwl(bus->sdiodev,
+ bus->ci->c_inf[idx].base + reg_offset,
+ regval, &ret);
return ret;
}
static int
-brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
+brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
{
u8 wr_val = 0, rd_val, cmp_val, bmask;
int err = 0;
@@ -636,8 +654,8 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
/* 1st KSO write goes to AOS wake up core if device is asleep */
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- wr_val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ wr_val, &err);
if (err) {
brcmf_err("SDIO_AOS KSO write error: %d\n", err);
return err;
@@ -667,15 +685,15 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
* just one write attempt may fail,
* read it back until it matches written value
*/
- rd_val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- &err);
+ rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ &err);
if (((rd_val & bmask) == cmp_val) && !err)
break;
brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
try_cnt, MAX_KSO_ATTEMPTS, err);
udelay(KSO_WAIT_US);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- wr_val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ wr_val, &err);
} while (try_cnt++ < MAX_KSO_ATTEMPTS);
return err;
@@ -686,7 +704,7 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
/* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
+static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
{
int err;
u8 clkctl, clkreq, devctl;
@@ -706,16 +724,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
clkreq =
bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- clkreq, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
if (err) {
brcmf_err("HT Avail request error: %d\n", err);
return -EBADE;
}
/* Check current status */
- clkctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
brcmf_err("HT Avail read error: %d\n", err);
return -EBADE;
@@ -724,8 +742,8 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
/* Go to pending and await interrupt if appropriate */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
/* Allow only clock-available interrupt */
- devctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_err("Devctl error setting CA: %d\n",
err);
@@ -733,28 +751,28 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
}
devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
bus->clkstate = CLK_PENDING;
return 0;
} else if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
/* Otherwise, wait here (polling) for HT Avail */
timeout = jiffies +
msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- clkctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR,
- &err);
+ clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
if (time_after(jiffies, timeout))
break;
else
@@ -787,16 +805,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
bus->clkstate = CLK_SDONLY;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- clkreq, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
if (err) {
brcmf_err("Failed access turning clock off: %d\n",
@@ -808,7 +826,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
}
/* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
+static int brcmf_sdio_sdclk(struct brcmf_sdio *bus, bool on)
{
brcmf_dbg(SDIO, "Enter\n");
@@ -821,7 +839,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
}
/* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
+static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
{
#ifdef DEBUG
uint oldstate = bus->clkstate;
@@ -832,7 +850,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
/* Early exit if we're already there */
if (bus->clkstate == target) {
if (target == CLK_AVAIL) {
- brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
bus->activity = true;
}
return 0;
@@ -842,32 +860,32 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
case CLK_AVAIL:
/* Make sure SD clock is available */
if (bus->clkstate == CLK_NONE)
- brcmf_sdbrcm_sdclk(bus, true);
+ brcmf_sdio_sdclk(bus, true);
/* Now request HT Avail on the backplane */
- brcmf_sdbrcm_htclk(bus, true, pendok);
- brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ brcmf_sdio_htclk(bus, true, pendok);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
bus->activity = true;
break;
case CLK_SDONLY:
/* Remove HT request, or bring up SD clock */
if (bus->clkstate == CLK_NONE)
- brcmf_sdbrcm_sdclk(bus, true);
+ brcmf_sdio_sdclk(bus, true);
else if (bus->clkstate == CLK_AVAIL)
- brcmf_sdbrcm_htclk(bus, false, false);
+ brcmf_sdio_htclk(bus, false, false);
else
brcmf_err("request for %d -> %d\n",
bus->clkstate, target);
- brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
break;
case CLK_NONE:
/* Make sure to remove HT request */
if (bus->clkstate == CLK_AVAIL)
- brcmf_sdbrcm_htclk(bus, false, false);
+ brcmf_sdio_htclk(bus, false, false);
/* Now remove the SD clock */
- brcmf_sdbrcm_sdclk(bus, false);
- brcmf_sdbrcm_wd_timer(bus, 0);
+ brcmf_sdio_sdclk(bus, false);
+ brcmf_sdio_wd_timer(bus, 0);
break;
}
#ifdef DEBUG
@@ -878,7 +896,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
}
static int
-brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
+brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
{
int err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -901,13 +919,13 @@ brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
data_ok(bus)))
return -EBUSY;
- err = brcmf_sdbrcm_kso_control(bus, false);
+ err = brcmf_sdio_kso_control(bus, false);
/* disable watchdog */
if (!err)
- brcmf_sdbrcm_wd_timer(bus, 0);
+ brcmf_sdio_wd_timer(bus, 0);
} else {
bus->idlecount = 0;
- err = brcmf_sdbrcm_kso_control(bus, true);
+ err = brcmf_sdio_kso_control(bus, true);
}
if (!err) {
/* Change state */
@@ -925,16 +943,16 @@ end:
/* control clocks */
if (sleep) {
if (!bus->sr_enabled)
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, pendok);
+ brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
} else {
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, pendok);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
}
return err;
}
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
+static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
u32 hmb_data;
@@ -1010,7 +1028,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
return intstatus;
}
-static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
+static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
{
uint retries = 0;
u16 lastrbc;
@@ -1022,18 +1040,18 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
rtx ? ", send NAK" : "");
if (abort)
- brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_RF_TERM, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_RF_TERM, &err);
bus->sdcnt.f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
- hi = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_RFRAMEBCHI, &err);
- lo = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_RFRAMEBCLO, &err);
+ hi = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_RFRAMEBCHI, &err);
+ lo = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_RFRAMEBCLO, &err);
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
@@ -1063,14 +1081,10 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
/* Clear partial in any case */
bus->cur_read.len = 0;
-
- /* If we can't reach the device, signal failure */
- if (err)
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
/* return total length of buffer chain */
-static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
+static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
{
struct sk_buff *p;
uint total;
@@ -1081,7 +1095,7 @@ static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
return total;
}
-static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
+static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
{
struct sk_buff *cur, *next;
@@ -1097,10 +1111,18 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
* host and WiFi dongle which contains information needed for SDIO core and
* firmware
*
- * It consists of 2 parts: hw header and software header
+ * It consists of 3 parts: hardware header, hardware extension header and
+ * software header
* hardware header (frame tag) - 4 bytes
* Byte 0~1: Frame length
* Byte 2~3: Checksum, bit-wise inverse of frame length
+ * hardware extension header - 8 bytes
+ * Tx glom mode only, N/A for Rx or normal Tx
+ * Byte 0~1: Packet length excluding hw frame tag
+ * Byte 2: Reserved
+ * Byte 3: Frame flags, bit 0: last frame indication
+ * Byte 4~5: Reserved
+ * Byte 6~7: Tail padding length
* software header - 8 bytes
* Byte 0: Rx/Tx sequence number
* Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
@@ -1111,6 +1133,7 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
* Byte 6~7: Reserved
*/
#define SDPCM_HWHDR_LEN 4
+#define SDPCM_HWEXT_LEN 8
#define SDPCM_SWHDR_LEN 8
#define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
/* software header */
@@ -1147,7 +1170,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
u8 rx_seq, fc, tx_seq_max;
u32 swheader;
- trace_brcmf_sdpcm_hdr(false, header);
+ trace_brcmf_sdpcm_hdr(SDPCM_RX, header);
/* hw header */
len = get_unaligned_le16(header);
@@ -1160,7 +1183,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
if ((u16)(~(len ^ checksum))) {
brcmf_err("HW header checksum error\n");
bus->sdcnt.rx_badhdr++;
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
return -EIO;
}
if (len < SDPCM_HDRLEN) {
@@ -1192,7 +1215,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
type != BRCMF_SDIO_FT_SUPER) {
brcmf_err("HW header length too long\n");
bus->sdcnt.rx_toolong++;
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
rd->len = 0;
return -EPROTO;
}
@@ -1211,7 +1234,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
brcmf_err("seq %d: bad data offset\n", rx_seq);
bus->sdcnt.rx_badhdr++;
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
rd->len = 0;
return -ENXIO;
}
@@ -1260,25 +1283,34 @@ static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
struct brcmf_sdio_hdrinfo *hd_info)
{
- u32 sw_header;
+ u32 hdrval;
+ u8 hdr_offset;
brcmf_sdio_update_hwhdr(header, hd_info->len);
-
- sw_header = bus->tx_seq;
- sw_header |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
- SDPCM_CHANNEL_MASK;
- sw_header |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
- SDPCM_DOFFSET_MASK;
- *(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
- *(((__le32 *)header) + 2) = 0;
- trace_brcmf_sdpcm_hdr(true, header);
+ hdr_offset = SDPCM_HWHDR_LEN;
+
+ if (bus->txglom) {
+ hdrval = (hd_info->len - hdr_offset) | (hd_info->lastfrm << 24);
+ *((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
+ hdrval = (u16)hd_info->tail_pad << 16;
+ *(((__le32 *)(header + hdr_offset)) + 1) = cpu_to_le32(hdrval);
+ hdr_offset += SDPCM_HWEXT_LEN;
+ }
+
+ hdrval = hd_info->seq_num;
+ hdrval |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
+ SDPCM_CHANNEL_MASK;
+ hdrval |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
+ SDPCM_DOFFSET_MASK;
+ *((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
+ *(((__le32 *)(header + hdr_offset)) + 1) = 0;
+ trace_brcmf_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header);
}
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
+static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
{
u16 dlen, totlen;
u8 *dptr, num = 0;
- u32 align = 0;
u16 sublen;
struct sk_buff *pfirst, *pnext;
@@ -1293,11 +1325,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
bus->glomd, skb_peek(&bus->glom));
- if (bus->sdiodev->pdata)
- align = bus->sdiodev->pdata->sd_sgentry_align;
- if (align < 4)
- align = 4;
-
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
pfirst = pnext = NULL;
@@ -1321,9 +1348,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pnext = NULL;
break;
}
- if (sublen % align) {
+ if (sublen % bus->sgentry_align) {
brcmf_err("sublen %d not multiple of %d\n",
- sublen, align);
+ sublen, bus->sgentry_align);
}
totlen += sublen;
@@ -1336,7 +1363,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
/* Allocate/chain packet for next subframe */
- pnext = brcmu_pkt_buf_get_skb(sublen + align);
+ pnext = brcmu_pkt_buf_get_skb(sublen + bus->sgentry_align);
if (pnext == NULL) {
brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
num, sublen);
@@ -1345,7 +1372,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_queue_tail(&bus->glom, pnext);
/* Adhere to start alignment requirements */
- pkt_align(pnext, sublen, align);
+ pkt_align(pnext, sublen, bus->sgentry_align);
}
/* If all allocations succeeded, save packet chain
@@ -1360,7 +1387,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
pfirst = pnext = NULL;
} else {
- brcmf_sdbrcm_free_glom(bus);
+ brcmf_sdio_free_glom(bus);
num = 0;
}
@@ -1383,16 +1410,15 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
pfirst = skb_peek(&bus->glom);
- dlen = (u16) brcmf_sdbrcm_glom_len(bus);
+ dlen = (u16) brcmf_sdio_glom_len(bus);
/* Do an SDIO read for the superframe. Configurable iovar to
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
*/
sdio_claim_host(bus->sdiodev->func[1]);
- errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
- bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
+ errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
+ &bus->glom, dlen);
sdio_release_host(bus->sdiodev->func[1]);
bus->sdcnt.f2rxdata++;
@@ -1403,12 +1429,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
sdio_claim_host(bus->sdiodev->func[1]);
if (bus->glomerr++ < 3) {
- brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmf_sdio_rxfail(bus, true, true);
} else {
bus->glomerr = 0;
- brcmf_sdbrcm_rxfail(bus, true, false);
+ brcmf_sdio_rxfail(bus, true, false);
bus->sdcnt.rxglomfail++;
- brcmf_sdbrcm_free_glom(bus);
+ brcmf_sdio_free_glom(bus);
}
sdio_release_host(bus->sdiodev->func[1]);
return 0;
@@ -1456,12 +1482,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (bus->glomerr++ < 3) {
/* Restore superframe header space */
skb_push(pfirst, sfdoff);
- brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmf_sdio_rxfail(bus, true, true);
} else {
bus->glomerr = 0;
- brcmf_sdbrcm_rxfail(bus, true, false);
+ brcmf_sdio_rxfail(bus, true, false);
bus->sdcnt.rxglomfail++;
- brcmf_sdbrcm_free_glom(bus);
+ brcmf_sdio_free_glom(bus);
}
sdio_release_host(bus->sdiodev->func[1]);
bus->cur_read.len = 0;
@@ -1505,8 +1531,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
return num;
}
-static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
- bool *pending)
+static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
+ bool *pending)
{
DECLARE_WAITQUEUE(wait, current);
int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
@@ -1527,7 +1553,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
return timeout;
}
-static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
+static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->dcmd_resp_wait))
wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1535,7 +1561,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
return 0;
}
static void
-brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdio_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
{
uint rdlen, pad;
u8 *buf = NULL, *rbuf;
@@ -1549,9 +1575,9 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
goto done;
rbuf = bus->rxbuf;
- pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
+ pad = ((unsigned long)rbuf % bus->head_align);
if (pad)
- rbuf += (BRCMF_SDALIGN - pad);
+ rbuf += (bus->head_align - pad);
/* Copy the already-read portion over */
memcpy(buf, hdr, BRCMF_FIRSTREAD);
@@ -1565,19 +1591,15 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
((len + pad) < bus->sdiodev->bus_if->maxctl))
rdlen += pad;
- } else if (rdlen % BRCMF_SDALIGN) {
- rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
+ } else if (rdlen % bus->head_align) {
+ rdlen += bus->head_align - (rdlen % bus->head_align);
}
- /* Satisfy length-alignment requirements */
- if (rdlen & (ALIGNMENT - 1))
- rdlen = roundup(rdlen, ALIGNMENT);
-
/* Drop if the read is too big or it exceeds our maximum */
if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
rdlen, bus->sdiodev->bus_if->maxctl);
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
goto done;
}
@@ -1585,15 +1607,12 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
len, len - doff, bus->sdiodev->bus_if->maxctl);
bus->sdcnt.rx_toolong++;
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
goto done;
}
/* Read remain of frame body */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
- bus->sdiodev->sbwad,
- SDIO_FUNC_2,
- F2SYNC, rbuf, rdlen);
+ sdret = brcmf_sdiod_recv_buf(bus->sdiodev, rbuf, rdlen);
bus->sdcnt.f2rxdata++;
/* Control frame failures need retransmission */
@@ -1601,7 +1620,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
brcmf_err("read %d control bytes failed: %d\n",
rdlen, sdret);
bus->sdcnt.rxc_errors++;
- brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmf_sdio_rxfail(bus, true, true);
goto done;
} else
memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
@@ -1626,19 +1645,19 @@ gotpkt:
done:
/* Awake any waiters */
- brcmf_sdbrcm_dcmd_resp_wake(bus);
+ brcmf_sdio_dcmd_resp_wake(bus);
}
/* Pad read to blocksize for efficiency */
-static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
+static void brcmf_sdio_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
{
if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
*pad = bus->blocksize - (*rdlen % bus->blocksize);
if (*pad <= bus->roundup && *pad < bus->blocksize &&
*rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
*rdlen += *pad;
- } else if (*rdlen % BRCMF_SDALIGN) {
- *rdlen += BRCMF_SDALIGN - (*rdlen % BRCMF_SDALIGN);
+ } else if (*rdlen % bus->head_align) {
+ *rdlen += bus->head_align - (*rdlen % bus->head_align);
}
}
@@ -1658,8 +1677,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
bus->rxpending = true;
for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
- !bus->rxskip && rxleft &&
- bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
+ !bus->rxskip && rxleft && brcmf_bus_ready(bus->sdiodev->bus_if);
rd->seq_num++, rxleft--) {
/* Handle glomming separately */
@@ -1667,7 +1685,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
u8 cnt;
brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
bus->glomd, skb_peek(&bus->glom));
- cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
+ cnt = brcmf_sdio_rxglom(bus, rd->seq_num);
brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
rd->seq_num += cnt - 1;
rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
@@ -1678,17 +1696,14 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
/* read header first for unknow frame length */
sdio_claim_host(bus->sdiodev->func[1]);
if (!rd->len) {
- ret = brcmf_sdcard_recv_buf(bus->sdiodev,
- bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC,
- bus->rxhdr,
- BRCMF_FIRSTREAD);
+ ret = brcmf_sdiod_recv_buf(bus->sdiodev,
+ bus->rxhdr, BRCMF_FIRSTREAD);
bus->sdcnt.f2rxhdrs++;
if (ret < 0) {
brcmf_err("RXHEADER FAILED: %d\n",
ret);
bus->sdcnt.rx_hdrfail++;
- brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmf_sdio_rxfail(bus, true, true);
sdio_release_host(bus->sdiodev->func[1]);
continue;
}
@@ -1707,9 +1722,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
}
if (rd->channel == SDPCM_CONTROL_CHANNEL) {
- brcmf_sdbrcm_read_control(bus, bus->rxhdr,
- rd->len,
- rd->dat_offset);
+ brcmf_sdio_read_control(bus, bus->rxhdr,
+ rd->len,
+ rd->dat_offset);
/* prepare the descriptor for the next read */
rd->len = rd->len_nxtfrm << 4;
rd->len_nxtfrm = 0;
@@ -1723,23 +1738,22 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
head_read = BRCMF_FIRSTREAD;
}
- brcmf_pad(bus, &pad, &rd->len_left);
+ brcmf_sdio_pad(bus, &pad, &rd->len_left);
pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
- BRCMF_SDALIGN);
+ bus->head_align);
if (!pkt) {
/* Give up on data, request rtx of events */
brcmf_err("brcmu_pkt_buf_get_skb failed\n");
- brcmf_sdbrcm_rxfail(bus, false,
+ brcmf_sdio_rxfail(bus, false,
RETRYCHAN(rd->channel));
sdio_release_host(bus->sdiodev->func[1]);
continue;
}
skb_pull(pkt, head_read);
- pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
+ pkt_align(pkt, rd->len_left, bus->head_align);
- ret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, pkt);
+ ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
bus->sdcnt.f2rxdata++;
sdio_release_host(bus->sdiodev->func[1]);
@@ -1748,7 +1762,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len, rd->channel, ret);
brcmu_pkt_buf_free_skb(pkt);
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_rxfail(bus, true,
+ brcmf_sdio_rxfail(bus, true,
RETRYCHAN(rd->channel));
sdio_release_host(bus->sdiodev->func[1]);
continue;
@@ -1773,7 +1787,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len,
roundup(rd_new.len, 16) >> 4);
rd->len = 0;
- brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmf_sdio_rxfail(bus, true, true);
sdio_release_host(bus->sdiodev->func[1]);
brcmu_pkt_buf_free_skb(pkt);
continue;
@@ -1795,7 +1809,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
/* Force retry w/normal header read */
rd->len = 0;
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_rxfail(bus, false, true);
+ brcmf_sdio_rxfail(bus, false, true);
sdio_release_host(bus->sdiodev->func[1]);
brcmu_pkt_buf_free_skb(pkt);
continue;
@@ -1820,7 +1834,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
brcmf_err("%s: glom superframe w/o "
"descriptor!\n", __func__);
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdio_rxfail(bus, false, false);
sdio_release_host(bus->sdiodev->func[1]);
}
/* prepare the descriptor for the next read */
@@ -1864,13 +1878,36 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
}
static void
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
+brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->ctrl_wait))
wake_up_interruptible(&bus->ctrl_wait);
return;
}
+static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
+{
+ u16 head_pad;
+ u8 *dat_buf;
+
+ dat_buf = (u8 *)(pkt->data);
+
+ /* Check head padding */
+ head_pad = ((unsigned long)dat_buf % bus->head_align);
+ if (head_pad) {
+ if (skb_headroom(pkt) < head_pad) {
+ bus->sdiodev->bus_if->tx_realloc++;
+ head_pad = 0;
+ if (skb_cow(pkt, head_pad))
+ return -ENOMEM;
+ }
+ skb_push(pkt, head_pad);
+ dat_buf = (u8 *)(pkt->data);
+ memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
+ }
+ return head_pad;
+}
+
/**
* struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
* bus layer usage.
@@ -1880,37 +1917,47 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
/* bit mask of data length chopped from the previous packet */
#define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
-static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
+static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
struct sk_buff_head *pktq,
- struct sk_buff *pkt, uint chan)
+ struct sk_buff *pkt, u16 total_len)
{
+ struct brcmf_sdio_dev *sdiodev;
struct sk_buff *pkt_pad;
- u16 tail_pad, tail_chop, sg_align;
+ u16 tail_pad, tail_chop, chain_pad;
unsigned int blksize;
- u8 *dat_buf;
- int ntail;
+ bool lastfrm;
+ int ntail, ret;
+ sdiodev = bus->sdiodev;
blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
- sg_align = 4;
- if (sdiodev->pdata && sdiodev->pdata->sd_sgentry_align > 4)
- sg_align = sdiodev->pdata->sd_sgentry_align;
/* sg entry alignment should be a divisor of block size */
- WARN_ON(blksize % sg_align);
+ WARN_ON(blksize % bus->sgentry_align);
/* Check tail padding */
- pkt_pad = NULL;
- tail_chop = pkt->len % sg_align;
- tail_pad = sg_align - tail_chop;
- tail_pad += blksize - (pkt->len + tail_pad) % blksize;
+ lastfrm = skb_queue_is_last(pktq, pkt);
+ tail_pad = 0;
+ tail_chop = pkt->len % bus->sgentry_align;
+ if (tail_chop)
+ tail_pad = bus->sgentry_align - tail_chop;
+ chain_pad = (total_len + tail_pad) % blksize;
+ if (lastfrm && chain_pad)
+ tail_pad += blksize - chain_pad;
if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
- pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+ pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop +
+ bus->head_align);
if (pkt_pad == NULL)
return -ENOMEM;
+ ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
+ if (unlikely(ret < 0)) {
+ kfree_skb(pkt_pad);
+ return ret;
+ }
memcpy(pkt_pad->data,
pkt->data + pkt->len - tail_chop,
tail_chop);
*(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
skb_trim(pkt, pkt->len - tail_chop);
+ skb_trim(pkt_pad, tail_pad + tail_chop);
__skb_queue_after(pktq, pkt, pkt_pad);
} else {
ntail = pkt->data_len + tail_pad -
@@ -1920,14 +1967,10 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
return -ENOMEM;
if (skb_linearize(pkt))
return -ENOMEM;
- dat_buf = (u8 *)(pkt->data);
__skb_put(pkt, tail_pad);
}
- if (pkt_pad)
- return pkt->len + tail_chop;
- else
- return pkt->len - tail_pad;
+ return tail_pad;
}
/**
@@ -1946,58 +1989,66 @@ static int
brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
uint chan)
{
- u16 head_pad, head_align;
+ u16 head_pad, total_len;
struct sk_buff *pkt_next;
- u8 *dat_buf;
- int err;
+ u8 txseq;
+ int ret;
struct brcmf_sdio_hdrinfo hd_info = {0};
- /* SDIO ADMA requires at least 32 bit alignment */
- head_align = 4;
- if (bus->sdiodev->pdata && bus->sdiodev->pdata->sd_head_align > 4)
- head_align = bus->sdiodev->pdata->sd_head_align;
+ txseq = bus->tx_seq;
+ total_len = 0;
+ skb_queue_walk(pktq, pkt_next) {
+ /* alignment packet inserted in previous
+ * loop cycle can be skipped as it is
+ * already properly aligned and does not
+ * need an sdpcm header.
+ */
+ if (*(u32 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
+ continue;
- pkt_next = pktq->next;
- dat_buf = (u8 *)(pkt_next->data);
+ /* align packet data pointer */
+ ret = brcmf_sdio_txpkt_hdalign(bus, pkt_next);
+ if (ret < 0)
+ return ret;
+ head_pad = (u16)ret;
+ if (head_pad)
+ memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad);
- /* Check head padding */
- head_pad = ((unsigned long)dat_buf % head_align);
- if (head_pad) {
- if (skb_headroom(pkt_next) < head_pad) {
- bus->sdiodev->bus_if->tx_realloc++;
- head_pad = 0;
- if (skb_cow(pkt_next, head_pad))
- return -ENOMEM;
- }
- skb_push(pkt_next, head_pad);
- dat_buf = (u8 *)(pkt_next->data);
- memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
- }
+ total_len += pkt_next->len;
- if (bus->sdiodev->sg_support && pktq->qlen > 1) {
- err = brcmf_sdio_txpkt_prep_sg(bus->sdiodev, pktq,
- pkt_next, chan);
- if (err < 0)
- return err;
- hd_info.len = (u16)err;
- } else {
hd_info.len = pkt_next->len;
- }
-
- hd_info.channel = chan;
- hd_info.dat_offset = head_pad + bus->tx_hdrlen;
-
- /* Now fill the header */
- brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
-
- if (BRCMF_BYTES_ON() &&
- ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
- (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
- brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, "Tx Frame:\n");
- else if (BRCMF_HDRS_ON())
- brcmf_dbg_hex_dump(true, pkt_next, head_pad + bus->tx_hdrlen,
- "Tx Header:\n");
+ hd_info.lastfrm = skb_queue_is_last(pktq, pkt_next);
+ if (bus->txglom && pktq->qlen > 1) {
+ ret = brcmf_sdio_txpkt_prep_sg(bus, pktq,
+ pkt_next, total_len);
+ if (ret < 0)
+ return ret;
+ hd_info.tail_pad = (u16)ret;
+ total_len += (u16)ret;
+ }
+ hd_info.channel = chan;
+ hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+ hd_info.seq_num = txseq++;
+
+ /* Now fill the header */
+ brcmf_sdio_hdpack(bus, pkt_next->data, &hd_info);
+
+ if (BRCMF_BYTES_ON() &&
+ ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
+ (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
+ brcmf_dbg_hex_dump(true, pkt_next, hd_info.len,
+ "Tx Frame:\n");
+ else if (BRCMF_HDRS_ON())
+ brcmf_dbg_hex_dump(true, pkt_next,
+ head_pad + bus->tx_hdrlen,
+ "Tx Header:\n");
+ }
+ /* Hardware length tag of the first packet should be total
+ * length of the chain (including padding)
+ */
+ if (bus->txglom)
+ brcmf_sdio_update_hwhdr(pktq->next->data, total_len);
return 0;
}
@@ -2015,6 +2066,7 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
{
u8 *hdr;
u32 dat_offset;
+ u16 tail_pad;
u32 dummy_flags, chop_len;
struct sk_buff *pkt_next, *tmp, *pkt_prev;
@@ -2024,42 +2076,41 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
if (chop_len) {
pkt_prev = pkt_next->prev;
- memcpy(pkt_prev->data + pkt_prev->len,
- pkt_next->data, chop_len);
skb_put(pkt_prev, chop_len);
}
__skb_unlink(pkt_next, pktq);
brcmu_pkt_buf_free_skb(pkt_next);
} else {
- hdr = pkt_next->data + SDPCM_HWHDR_LEN;
+ hdr = pkt_next->data + bus->tx_hdrlen - SDPCM_SWHDR_LEN;
dat_offset = le32_to_cpu(*(__le32 *)hdr);
dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
SDPCM_DOFFSET_SHIFT;
skb_pull(pkt_next, dat_offset);
+ if (bus->txglom) {
+ tail_pad = le16_to_cpu(*(__le16 *)(hdr - 2));
+ skb_trim(pkt_next, pkt_next->len - tail_pad);
+ }
}
}
}
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
- uint chan)
+static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
+ uint chan)
{
int ret;
int i;
- struct sk_buff_head localq;
+ struct sk_buff *pkt_next, *tmp;
brcmf_dbg(TRACE, "Enter\n");
- __skb_queue_head_init(&localq);
- __skb_queue_tail(&localq, pkt);
- ret = brcmf_sdio_txpkt_prep(bus, &localq, chan);
+ ret = brcmf_sdio_txpkt_prep(bus, pktq, chan);
if (ret)
goto done;
sdio_claim_host(bus->sdiodev->func[1]);
- ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, &localq);
+ ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
bus->sdcnt.f2txdata++;
if (ret < 0) {
@@ -2068,57 +2119,71 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
ret);
bus->sdcnt.tx_sderrs++;
- brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
+ brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ hi = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
}
-
}
sdio_release_host(bus->sdiodev->func[1]);
- if (ret == 0)
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
done:
- brcmf_sdio_txpkt_postp(bus, &localq);
- __skb_dequeue_tail(&localq);
- brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
+ brcmf_sdio_txpkt_postp(bus, pktq);
+ if (ret == 0)
+ bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP;
+ skb_queue_walk_safe(pktq, pkt_next, tmp) {
+ __skb_unlink(pkt_next, pktq);
+ brcmf_txcomplete(bus->sdiodev->dev, pkt_next, ret == 0);
+ }
return ret;
}
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
+static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
+ struct sk_buff_head pktq;
u32 intstatus = 0;
- int ret = 0, prec_out;
+ int ret = 0, prec_out, i;
uint cnt = 0;
- u8 tx_prec_map;
+ u8 tx_prec_map, pkt_num;
brcmf_dbg(TRACE, "Enter\n");
tx_prec_map = ~bus->flowcontrol;
/* Send frames until the limit or some other event */
- for (cnt = 0; (cnt < maxframes) && data_ok(bus); cnt++) {
+ for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
+ pkt_num = 1;
+ __skb_queue_head_init(&pktq);
+ if (bus->txglom)
+ pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
+ brcmf_sdio_txglomsz);
+ pkt_num = min_t(u32, pkt_num,
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
spin_lock_bh(&bus->txqlock);
- pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
- if (pkt == NULL) {
- spin_unlock_bh(&bus->txqlock);
- break;
+ for (i = 0; i < pkt_num; i++) {
+ pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
+ &prec_out);
+ if (pkt == NULL)
+ break;
+ __skb_queue_tail(&pktq, pkt);
}
spin_unlock_bh(&bus->txqlock);
+ if (i == 0)
+ break;
- ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
+ ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
+ cnt += i;
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
@@ -2146,7 +2211,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
return cnt;
}
-static void brcmf_sdbrcm_bus_stop(struct device *dev)
+static void brcmf_sdio_bus_stop(struct device *dev)
{
u32 local_hostintmask;
u8 saveclk;
@@ -2163,62 +2228,57 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
bus->watchdog_tsk = NULL;
}
- sdio_claim_host(bus->sdiodev->func[1]);
-
- /* Enable clock for device interrupts */
- brcmf_sdbrcm_bus_sleep(bus, false, false);
+ if (bus_if->state == BRCMF_BUS_DOWN) {
+ sdio_claim_host(sdiodev->func[1]);
+
+ /* Enable clock for device interrupts */
+ brcmf_sdio_bus_sleep(bus, false, false);
+
+ /* Disable and clear interrupts at the chip level also */
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Force backplane clocks to assure F2 interrupt propagates */
+ saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
+ if (!err)
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ if (err)
+ brcmf_err("Failed to force clock for F2: err %d\n",
+ err);
- /* Disable and clear interrupts at the chip level also */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
- local_hostintmask = bus->hostintmask;
- bus->hostintmask = 0;
+ /* Turn off the bus (F2), free any pending packets */
+ brcmf_dbg(INTR, "disable SDIO interrupts\n");
+ sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
- /* Change our idea of bus state */
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+ /* Clear any pending interrupts now that F2 is disabled */
+ w_sdreg32(bus, local_hostintmask,
+ offsetof(struct sdpcmd_regs, intstatus));
- /* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (!err) {
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ sdio_release_host(sdiodev->func[1]);
}
- if (err)
- brcmf_err("Failed to force clock for F2: err %d\n", err);
-
- /* Turn off the bus (F2), free any pending packets */
- brcmf_dbg(INTR, "disable SDIO interrupts\n");
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1,
- NULL);
-
- /* Clear any pending interrupts now that F2 is disabled */
- w_sdreg32(bus, local_hostintmask,
- offsetof(struct sdpcmd_regs, intstatus));
-
- /* Turn off the backplane clock (only) */
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
- sdio_release_host(bus->sdiodev->func[1]);
-
/* Clear the data packet queues */
brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
/* Clear any held glomming stuff */
if (bus->glomd)
brcmu_pkt_buf_free_skb(bus->glomd);
- brcmf_sdbrcm_free_glom(bus);
+ brcmf_sdio_free_glom(bus);
/* Clear rx control and wake any waiters */
spin_lock_bh(&bus->rxctl_lock);
bus->rxlen = 0;
spin_unlock_bh(&bus->rxctl_lock);
- brcmf_sdbrcm_dcmd_resp_wake(bus);
+ brcmf_sdio_dcmd_resp_wake(bus);
/* Reset some F2 state stuff */
bus->rxskip = false;
bus->tx_seq = bus->rx_seq = 0;
}
-static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
+static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
{
unsigned long flags;
@@ -2243,7 +2303,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
addr = bus->ci->c_inf[idx].base +
offsetof(struct sdpcmd_regs, intstatus);
- ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
+ val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
bus->sdcnt.f1regdata++;
if (ret != 0)
val = 0;
@@ -2253,7 +2313,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
/* Clear interrupts */
if (val) {
- ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
+ brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
bus->sdcnt.f1regdata++;
}
@@ -2267,7 +2327,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
return ret;
}
-static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
+static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
{
u32 newstatus = 0;
unsigned long intstatus;
@@ -2286,48 +2346,29 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
#ifdef DEBUG
/* Check for inconsistent device control */
- devctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
- if (err) {
- brcmf_err("error reading DEVCTL: %d\n", err);
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- }
+ devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
#endif /* DEBUG */
/* Read CSR, if clock on switch to AVAIL, else ignore */
- clkctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (err) {
- brcmf_err("error reading CSR: %d\n",
- err);
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- }
+ clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
devctl, clkctl);
if (SBSDIO_HTAV(clkctl)) {
- devctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
- if (err) {
- brcmf_err("error reading DEVCTL: %d\n",
- err);
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- }
+ devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
- if (err) {
- brcmf_err("error writing DEVCTL: %d\n",
- err);
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- }
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
bus->clkstate = CLK_AVAIL;
}
}
/* Make sure backplane clock is on */
- brcmf_sdbrcm_bus_sleep(bus, false, true);
+ brcmf_sdio_bus_sleep(bus, false, true);
/* Pending interrupt indicates new device status */
if (atomic_read(&bus->ipend) > 0) {
@@ -2358,7 +2399,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
/* Handle host mailbox indication */
if (intstatus & I_HMB_HOST_INT) {
intstatus &= ~I_HMB_HOST_INT;
- intstatus |= brcmf_sdbrcm_hostmail(bus);
+ intstatus |= brcmf_sdio_hostmail(bus);
}
sdio_release_host(bus->sdiodev->func[1]);
@@ -2403,16 +2444,15 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
set_bit(n, (unsigned long *)&bus->intstatus.counter);
}
- brcmf_sdbrcm_clrintr(bus);
+ brcmf_sdio_clrintr(bus);
if (data_ok(bus) && bus->ctrl_frame_stat &&
(bus->clkstate == CLK_AVAIL)) {
int i;
sdio_claim_host(bus->sdiodev->func[1]);
- err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
- (u32) bus->ctrl_frame_len);
+ err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf,
+ (u32)bus->ctrl_frame_len);
if (err < 0) {
/* On failure, abort the command and
@@ -2421,20 +2461,20 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
err);
bus->sdcnt.tx_sderrs++;
- brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, &err);
bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI,
- &err);
- lo = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO,
- &err);
+ hi = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI,
+ &err);
+ lo = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO,
+ &err);
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@@ -2445,7 +2485,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
}
sdio_release_host(bus->sdiodev->func[1]);
bus->ctrl_frame_stat = false;
- brcmf_sdbrcm_wait_event_wakeup(bus);
+ brcmf_sdio_wait_event_wakeup(bus);
}
/* Send queued frames (limit 1 if rx may still be pending) */
else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
@@ -2453,13 +2493,12 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
&& data_ok(bus)) {
framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
txlimit;
- framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
+ framecnt = brcmf_sdio_sendfromq(bus, framecnt);
txlimit -= framecnt;
}
- if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
+ if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) {
brcmf_err("failed backplane access over SDIO, halting operation\n");
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
atomic_set(&bus->intstatus, 0);
} else if (atomic_read(&bus->intstatus) ||
atomic_read(&bus->ipend) > 0 ||
@@ -2475,12 +2514,12 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
bus->activity = false;
brcmf_dbg(SDIO, "idle state\n");
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_bus_sleep(bus, true, false);
+ brcmf_sdio_bus_sleep(bus, true, false);
sdio_release_host(bus->sdiodev->func[1]);
}
}
-static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
+static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -2489,7 +2528,7 @@ static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
return &bus->txq;
}
-static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
+static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
{
int ret = -EBADE;
uint datalen, prec;
@@ -2545,7 +2584,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
#ifdef DEBUG
#define CONSOLE_LINE_MAX 192
-static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
+static int brcmf_sdio_readconsole(struct brcmf_sdio *bus)
{
struct brcmf_console *c = &bus->console;
u8 line[CONSOLE_LINE_MAX], ch;
@@ -2558,8 +2597,8 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
/* Read console log struct */
addr = bus->console_addr + offsetof(struct rte_console, log_le);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
- sizeof(c->log_le));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
+ sizeof(c->log_le));
if (rv < 0)
return rv;
@@ -2584,7 +2623,7 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
/* Read the console buffer */
addr = le32_to_cpu(c->log_le.buf);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
if (rv < 0)
return rv;
@@ -2622,14 +2661,13 @@ break2:
}
#endif /* DEBUG */
-static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
+static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
{
int i;
int ret;
bus->ctrl_frame_stat = false;
- ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, frame, len);
+ ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
if (ret < 0) {
/* On failure, abort the command and terminate the frame */
@@ -2637,18 +2675,18 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
ret);
bus->sdcnt.tx_sderrs++;
- brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ hi = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->sdcnt.f1regdata += 2;
if (hi == 0 && lo == 0)
break;
@@ -2662,10 +2700,10 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
}
static int
-brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
+brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
u8 *frame;
- u16 len;
+ u16 len, pad;
uint retries = 0;
u8 doff = 0;
int ret = -1;
@@ -2681,41 +2719,45 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
len = (msglen += bus->tx_hdrlen);
/* Add alignment padding (optional for ctl frames) */
- doff = ((unsigned long)frame % BRCMF_SDALIGN);
+ doff = ((unsigned long)frame % bus->head_align);
if (doff) {
frame -= doff;
len += doff;
msglen += doff;
memset(frame, 0, doff + bus->tx_hdrlen);
}
- /* precondition: doff < BRCMF_SDALIGN */
+ /* precondition: doff < bus->head_align */
doff += bus->tx_hdrlen;
/* Round send length to next SDIO block */
+ pad = 0;
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
- u16 pad = bus->blocksize - (len % bus->blocksize);
- if ((pad <= bus->roundup) && (pad < bus->blocksize))
- len += pad;
- } else if (len % BRCMF_SDALIGN) {
- len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
+ pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad > bus->roundup) || (pad >= bus->blocksize))
+ pad = 0;
+ } else if (len % bus->head_align) {
+ pad = bus->head_align - (len % bus->head_align);
}
-
- /* Satisfy length-alignment requirements */
- if (len & (ALIGNMENT - 1))
- len = roundup(len, ALIGNMENT);
+ len += pad;
/* precondition: IS_ALIGNED((unsigned long)frame, 2) */
/* Make sure backplane clock is on */
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_bus_sleep(bus, false, false);
+ brcmf_sdio_bus_sleep(bus, false, false);
sdio_release_host(bus->sdiodev->func[1]);
hd_info.len = (u16)msglen;
hd_info.channel = SDPCM_CONTROL_CHANNEL;
hd_info.dat_offset = doff;
+ hd_info.seq_num = bus->tx_seq;
+ hd_info.lastfrm = true;
+ hd_info.tail_pad = pad;
brcmf_sdio_hdpack(bus, frame, &hd_info);
+ if (bus->txglom)
+ brcmf_sdio_update_hwhdr(frame, len);
+
if (!data_ok(bus)) {
brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
bus->tx_max, bus->tx_seq);
@@ -2746,7 +2788,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
do {
sdio_claim_host(bus->sdiodev->func[1]);
- ret = brcmf_tx_frame(bus, frame, len);
+ ret = brcmf_sdio_tx_frame(bus, frame, len);
sdio_release_host(bus->sdiodev->func[1]);
} while (ret < 0 && retries++ < TXRETRIES);
}
@@ -2756,7 +2798,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
bus->activity = false;
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_dbg(INFO, "idle\n");
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
+ brcmf_sdio_clkctl(bus, CLK_NONE, true);
sdio_release_host(bus->sdiodev->func[1]);
}
@@ -2790,8 +2832,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
* address of sdpcm_shared structure
*/
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_bus_sleep(bus, false, false);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
+ brcmf_sdio_bus_sleep(bus, false, false);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
sdio_release_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
@@ -2811,8 +2853,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
}
/* Read hndrte_shared structure */
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
- sizeof(struct sdpcm_shared_le));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+ sizeof(struct sdpcm_shared_le));
if (rv < 0)
return rv;
@@ -2848,22 +2890,22 @@ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
/* obtain console information from device memory */
addr = sh->console_addr + offsetof(struct rte_console, log_le);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
- (u8 *)&sh_val, sizeof(u32));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
if (rv < 0)
return rv;
console_ptr = le32_to_cpu(sh_val);
addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
- (u8 *)&sh_val, sizeof(u32));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
if (rv < 0)
return rv;
console_size = le32_to_cpu(sh_val);
addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
- (u8 *)&sh_val, sizeof(u32));
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
if (rv < 0)
return rv;
console_index = le32_to_cpu(sh_val);
@@ -2877,8 +2919,8 @@ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
/* obtain the console data from device */
conbuf[console_size] = '\0';
- rv = brcmf_sdio_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
- console_size);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
+ console_size);
if (rv < 0)
goto done;
@@ -2915,8 +2957,8 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
return 0;
}
- error = brcmf_sdio_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
- sizeof(struct brcmf_trap_info));
+ error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
+ sizeof(struct brcmf_trap_info));
if (error < 0)
return error;
@@ -2959,14 +3001,14 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
sdio_claim_host(bus->sdiodev->func[1]);
if (sh->assert_file_addr != 0) {
- error = brcmf_sdio_ramrw(bus->sdiodev, false,
- sh->assert_file_addr, (u8 *)file, 80);
+ error = brcmf_sdiod_ramrw(bus->sdiodev, false,
+ sh->assert_file_addr, (u8 *)file, 80);
if (error < 0)
return error;
}
if (sh->assert_exp_addr != 0) {
- error = brcmf_sdio_ramrw(bus->sdiodev, false,
- sh->assert_exp_addr, (u8 *)expr, 80);
+ error = brcmf_sdiod_ramrw(bus->sdiodev, false,
+ sh->assert_exp_addr, (u8 *)expr, 80);
if (error < 0)
return error;
}
@@ -2978,7 +3020,7 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
return simple_read_from_buffer(data, count, &pos, buf, res);
}
-static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
+static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
{
int error;
struct sdpcm_shared sh;
@@ -2999,8 +3041,8 @@ static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
return 0;
}
-static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
- size_t count, loff_t *ppos)
+static int brcmf_sdio_died_dump(struct brcmf_sdio *bus, char __user *data,
+ size_t count, loff_t *ppos)
{
int error = 0;
struct sdpcm_shared sh;
@@ -3041,7 +3083,7 @@ static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
struct brcmf_sdio *bus = f->private_data;
int res;
- res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
+ res = brcmf_sdio_died_dump(bus, data, count, ppos);
if (res > 0)
*ppos += res;
return (ssize_t)res;
@@ -3066,7 +3108,7 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
}
#else
-static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
+static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
{
return 0;
}
@@ -3077,7 +3119,7 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
#endif /* DEBUG */
static int
-brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
+brcmf_sdio_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
{
int timeleft;
uint rxlen = 0;
@@ -3090,7 +3132,7 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
brcmf_dbg(TRACE, "Enter\n");
/* Wait until control frame is available */
- timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
+ timeleft = brcmf_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending);
spin_lock_bh(&bus->rxctl_lock);
rxlen = bus->rxlen;
@@ -3107,13 +3149,13 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
rxlen, msglen);
} else if (timeleft == 0) {
brcmf_err("resumed on timeout\n");
- brcmf_sdbrcm_checkdied(bus);
+ brcmf_sdio_checkdied(bus);
} else if (pending) {
brcmf_dbg(CTL, "cancelled\n");
return -ERESTARTSYS;
} else {
brcmf_dbg(CTL, "resumed for unknown reason?\n");
- brcmf_sdbrcm_checkdied(bus);
+ brcmf_sdio_checkdied(bus);
}
if (rxlen)
@@ -3124,46 +3166,69 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
return rxlen ? (int)rxlen : -ETIMEDOUT;
}
-static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
+#ifdef DEBUG
+static bool
+brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
+ u8 *ram_data, uint ram_sz)
{
- struct chip_info *ci = bus->ci;
-
- /* To enter download state, disable ARM and reset SOCRAM.
- * To exit download state, simply reset ARM (default is RAM boot).
- */
- if (enter) {
- bus->alp_only = true;
+ char *ram_cmp;
+ int err;
+ bool ret = true;
+ int address;
+ int offset;
+ int len;
- brcmf_sdio_chip_enter_download(bus->sdiodev, ci);
- } else {
- if (!brcmf_sdio_chip_exit_download(bus->sdiodev, ci, bus->vars,
- bus->varsz))
- return false;
+ /* read back and verify */
+ brcmf_dbg(INFO, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr,
+ ram_sz);
+ ram_cmp = kmalloc(MEMBLOCK, GFP_KERNEL);
+ /* do not proceed while no memory but */
+ if (!ram_cmp)
+ return true;
- /* Allow HT Clock now that the ARM is running. */
- bus->alp_only = false;
-
- bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
+ address = ram_addr;
+ offset = 0;
+ while (offset < ram_sz) {
+ len = ((offset + MEMBLOCK) < ram_sz) ? MEMBLOCK :
+ ram_sz - offset;
+ err = brcmf_sdiod_ramrw(sdiodev, false, address, ram_cmp, len);
+ if (err) {
+ brcmf_err("error %d on reading %d membytes at 0x%08x\n",
+ err, len, address);
+ ret = false;
+ break;
+ } else if (memcmp(ram_cmp, &ram_data[offset], len)) {
+ brcmf_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n",
+ offset, len);
+ ret = false;
+ break;
+ }
+ offset += len;
+ address += len;
}
+ kfree(ram_cmp);
+
+ return ret;
+}
+#else /* DEBUG */
+static bool
+brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
+ u8 *ram_data, uint ram_sz)
+{
return true;
}
+#endif /* DEBUG */
-static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
+static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
+ const struct firmware *fw)
{
- const struct firmware *fw;
int err;
int offset;
int address;
int len;
- fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
- if (fw == NULL)
- return -ENOENT;
-
- if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
- BRCMF_MAX_CORENUM)
- memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
+ brcmf_dbg(TRACE, "Enter\n");
err = 0;
offset = 0;
@@ -3171,148 +3236,113 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
while (offset < fw->size) {
len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
fw->size - offset;
- err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
- (u8 *)&fw->data[offset], len);
+ err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
+ (u8 *)&fw->data[offset], len);
if (err) {
brcmf_err("error %d on writing %d membytes at 0x%08x\n",
err, len, address);
- goto failure;
+ return err;
}
offset += len;
address += len;
}
-
-failure:
- release_firmware(fw);
+ if (!err)
+ if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
+ (u8 *)fw->data, fw->size))
+ err = -EIO;
return err;
}
-/*
- * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
- * and ending in a NUL.
- * Removes carriage returns, empty lines, comment lines, and converts
- * newlines to NULs.
- * Shortens buffer as needed and pads with NULs. End of buffer is marked
- * by two NULs.
-*/
-
-static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
- const struct firmware *nv)
-{
- char *varbuf;
- char *dp;
- bool findNewline;
- int column;
- int ret = 0;
- uint buf_len, n, len;
-
- len = nv->size;
- varbuf = vmalloc(len);
- if (!varbuf)
- return -ENOMEM;
-
- memcpy(varbuf, nv->data, len);
- dp = varbuf;
-
- findNewline = false;
- column = 0;
-
- for (n = 0; n < len; n++) {
- if (varbuf[n] == 0)
- break;
- if (varbuf[n] == '\r')
- continue;
- if (findNewline && varbuf[n] != '\n')
- continue;
- findNewline = false;
- if (varbuf[n] == '#') {
- findNewline = true;
- continue;
- }
- if (varbuf[n] == '\n') {
- if (column == 0)
- continue;
- *dp++ = 0;
- column = 0;
- continue;
- }
- *dp++ = varbuf[n];
- column++;
- }
- buf_len = dp - varbuf;
- while (dp < varbuf + n)
- *dp++ = 0;
-
- kfree(bus->vars);
- /* roundup needed for download to device */
- bus->varsz = roundup(buf_len + 1, 4);
- bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
- if (bus->vars == NULL) {
- bus->varsz = 0;
- ret = -ENOMEM;
- goto err;
- }
+static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
+ const struct firmware *nv)
+{
+ void *vars;
+ u32 varsz;
+ int address;
+ int err;
- /* copy the processed variables and add null termination */
- memcpy(bus->vars, varbuf, buf_len);
- bus->vars[buf_len] = 0;
-err:
- vfree(varbuf);
- return ret;
-}
+ brcmf_dbg(TRACE, "Enter\n");
-static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
-{
- const struct firmware *nv;
- int ret;
+ vars = brcmf_nvram_strip(nv, &varsz);
- nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
- if (nv == NULL)
- return -ENOENT;
+ if (vars == NULL)
+ return -EINVAL;
- ret = brcmf_process_nvram_vars(bus, nv);
+ address = bus->ci->ramsize - varsz + bus->ci->rambase;
+ err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
+ if (err)
+ brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
+ err, varsz, address);
+ else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
+ err = -EIO;
- release_firmware(nv);
+ brcmf_nvram_free(vars);
- return ret;
+ return err;
}
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
+static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
{
- int bcmerror = -1;
+ int bcmerror = -EFAULT;
+ const struct firmware *fw;
+ u32 rstvec;
+
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Keep arm in reset */
- if (!brcmf_sdbrcm_download_state(bus, true)) {
- brcmf_err("error placing ARM core in reset\n");
+ brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci);
+
+ fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
+ if (fw == NULL) {
+ bcmerror = -ENOENT;
goto err;
}
- if (brcmf_sdbrcm_download_code_file(bus)) {
+ rstvec = get_unaligned_le32(fw->data);
+ brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
+
+ bcmerror = brcmf_sdio_download_code_file(bus, fw);
+ release_firmware(fw);
+ if (bcmerror) {
brcmf_err("dongle image file download failed\n");
goto err;
}
- if (brcmf_sdbrcm_download_nvram(bus)) {
+ fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+ if (fw == NULL) {
+ bcmerror = -ENOENT;
+ goto err;
+ }
+
+ bcmerror = brcmf_sdio_download_nvram(bus, fw);
+ release_firmware(fw);
+ if (bcmerror) {
brcmf_err("dongle nvram file download failed\n");
goto err;
}
/* Take arm out of reset */
- if (!brcmf_sdbrcm_download_state(bus, false)) {
+ if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) {
brcmf_err("error getting out of ARM core reset\n");
goto err;
}
+ /* Allow HT Clock now that the ARM is running. */
+ brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_LOAD);
bcmerror = 0;
err:
+ brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
+ sdio_release_host(bus->sdiodev->func[1]);
return bcmerror;
}
-static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
+static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
{
- u32 addr, reg;
+ u32 addr, reg, pmu_cc3_mask = ~0;
+ int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -3320,49 +3350,61 @@ static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
if (bus->ci->pmurev < 17)
return false;
- /* read PMU chipcontrol register 3*/
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
- brcmf_sdio_regwl(bus->sdiodev, addr, 3, NULL);
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
- reg = brcmf_sdio_regrl(bus->sdiodev, addr, NULL);
+ switch (bus->ci->chip) {
+ case BCM43241_CHIP_ID:
+ case BCM4335_CHIP_ID:
+ case BCM4339_CHIP_ID:
+ /* read PMU chipcontrol register 3 */
+ addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
+ brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
+ addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
+ reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
+ return (reg & pmu_cc3_mask) != 0;
+ default:
+ addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
+ reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
+ if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
+ return false;
- return (bool)reg;
+ addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
+ reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
+ return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+ PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+ }
}
-static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
+static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
{
int err = 0;
u8 val;
brcmf_dbg(TRACE, "Enter\n");
- val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
- &err);
+ val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
if (err) {
brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
return;
}
val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
- val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
return;
}
/* Add CMD14 Support */
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
- (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
- SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
- &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
+ (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
+ SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
+ &err);
if (err) {
brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
return;
}
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_FORCE_HT, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HT, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
return;
@@ -3374,7 +3416,7 @@ static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
}
/* enable KSO bit */
-static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
+static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
{
u8 val;
int err = 0;
@@ -3385,8 +3427,7 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
if (bus->ci->c_inf[1].rev < 12)
return 0;
- val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- &err);
+ val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
if (err) {
brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
return err;
@@ -3395,8 +3436,8 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ val, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
return err;
@@ -3407,31 +3448,66 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
}
-static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
+static int brcmf_sdio_bus_preinit(struct device *dev)
{
- bool ret;
-
- sdio_claim_host(bus->sdiodev->func[1]);
-
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+ uint pad_size;
+ u32 value;
+ u8 idx;
+ int err;
- ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
+ /* the commands below use the terms tx and rx from
+ * a device perspective, ie. bus:txglom affects the
+ * bus transfers from device to host.
+ */
+ idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ if (bus->ci->c_inf[idx].rev < 12) {
+ /* for sdio core rev < 12, disable txgloming */
+ value = 0;
+ err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
+ sizeof(u32));
+ } else {
+ /* otherwise, set txglomalign */
+ value = 4;
+ if (sdiodev->pdata)
+ value = sdiodev->pdata->sd_sgentry_align;
+ /* SDIO ADMA requires at least 32 bit alignment */
+ value = max_t(u32, value, 4);
+ err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
+ sizeof(u32));
+ }
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+ if (err < 0)
+ goto done;
- sdio_release_host(bus->sdiodev->func[1]);
+ bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
+ if (sdiodev->sg_support) {
+ bus->txglom = false;
+ value = 1;
+ pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
+ err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
+ &value, sizeof(u32));
+ if (err < 0) {
+ /* bus:rxglom is allowed to fail */
+ err = 0;
+ } else {
+ bus->txglom = true;
+ bus->tx_hdrlen += SDPCM_HWEXT_LEN;
+ }
+ }
+ brcmf_bus_add_txhdrlen(bus->sdiodev->dev, bus->tx_hdrlen);
- return ret;
+done:
+ return err;
}
-static int brcmf_sdbrcm_bus_init(struct device *dev)
+static int brcmf_sdio_bus_init(struct device *dev)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
- unsigned long timeout;
- u8 ready, enable;
int err, ret = 0;
u8 saveclk;
@@ -3439,8 +3515,11 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* try to download image and nvram to the dongle */
if (bus_if->state == BRCMF_BUS_DOWN) {
- if (!(brcmf_sdbrcm_download_firmware(bus)))
- return -1;
+ bus->alp_only = true;
+ err = brcmf_sdio_download_firmware(bus);
+ if (err)
+ return err;
+ bus->alp_only = false;
}
if (!bus->sdiodev->bus_if->drvr)
@@ -3448,21 +3527,21 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* Start the watchdog timer */
bus->sdcnt.tickcnt = 0;
- brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
sdio_claim_host(bus->sdiodev->func[1]);
/* Make sure backplane clock is on, needed to generate F2 interrupt */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
if (bus->clkstate != CLK_AVAIL)
goto exit;
/* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ saveclk = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
}
if (err) {
brcmf_err("Failed to force clock for F2: err %d\n", err);
@@ -3472,56 +3551,42 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* Enable function 2 (frame transfers) */
w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
offsetof(struct sdpcmd_regs, tosbmailboxdata));
- enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
-
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
+ err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
- timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
- ready = 0;
- while (enable != ready) {
- ready = brcmf_sdio_regrb(bus->sdiodev,
- SDIO_CCCR_IORx, NULL);
- if (time_after(jiffies, timeout))
- break;
- else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
- /* prevent busy waiting if it takes too long */
- msleep_interruptible(20);
- }
- brcmf_dbg(INFO, "enable 0x%02x, ready 0x%02x\n", enable, ready);
+ brcmf_dbg(INFO, "enable F2: err=%d\n", err);
/* If F2 successfully enabled, set core and enable interrupts */
- if (ready == enable) {
+ if (!err) {
/* Set up the interrupt mask and enable interrupts */
bus->hostintmask = HOSTINTMASK;
w_sdreg32(bus, bus->hostintmask,
offsetof(struct sdpcmd_regs, hostintmask));
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
} else {
/* Disable F2 again */
- enable = SDIO_FUNC_ENABLE_1;
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
+ sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
ret = -ENODEV;
}
- if (brcmf_sdbrcm_sr_capable(bus)) {
- brcmf_sdbrcm_sr_init(bus);
+ if (brcmf_sdio_sr_capable(bus)) {
+ brcmf_sdio_sr_init(bus);
} else {
/* Restore previous clock setting */
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- saveclk, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ saveclk, &err);
}
if (ret == 0) {
- ret = brcmf_sdio_intr_register(bus->sdiodev);
+ ret = brcmf_sdiod_intr_register(bus->sdiodev);
if (ret != 0)
brcmf_err("intr register failed:%d\n", ret);
}
/* If we didn't come up, turn off backplane clock */
- if (bus_if->state != BRCMF_BUS_DATA)
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ if (ret != 0)
+ brcmf_sdio_clkctl(bus, CLK_NONE, false);
exit:
sdio_release_host(bus->sdiodev->func[1]);
@@ -3529,10 +3594,8 @@ exit:
return ret;
}
-void brcmf_sdbrcm_isr(void *arg)
+void brcmf_sdio_isr(struct brcmf_sdio *bus)
{
- struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
-
brcmf_dbg(TRACE, "Enter\n");
if (!bus) {
@@ -3540,7 +3603,7 @@ void brcmf_sdbrcm_isr(void *arg)
return;
}
- if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
+ if (!brcmf_bus_ready(bus->sdiodev->bus_if)) {
brcmf_err("bus is down. we have nothing to do\n");
return;
}
@@ -3551,7 +3614,6 @@ void brcmf_sdbrcm_isr(void *arg)
else
if (brcmf_sdio_intr_rstatus(bus)) {
brcmf_err("failed backplane access\n");
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
/* Disable additional interrupts (is this needed now)? */
@@ -3562,7 +3624,7 @@ void brcmf_sdbrcm_isr(void *arg)
queue_work(bus->brcmf_wq, &bus->datawork);
}
-static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
+static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
{
#ifdef DEBUG
struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
@@ -3586,9 +3648,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
u8 devpend;
sdio_claim_host(bus->sdiodev->func[1]);
- devpend = brcmf_sdio_regrb(bus->sdiodev,
- SDIO_CCCR_INTx,
- NULL);
+ devpend = brcmf_sdiod_regrb(bus->sdiodev,
+ SDIO_CCCR_INTx,
+ NULL);
sdio_release_host(bus->sdiodev->func[1]);
intstatus =
devpend & (INTR_STATUS_FUNC1 |
@@ -3618,8 +3680,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->console.count -= bus->console_interval;
sdio_claim_host(bus->sdiodev->func[1]);
/* Make sure backplane clock is on */
- brcmf_sdbrcm_bus_sleep(bus, false, false);
- if (brcmf_sdbrcm_readconsole(bus) < 0)
+ brcmf_sdio_bus_sleep(bus, false, false);
+ if (brcmf_sdio_readconsole(bus) < 0)
/* stop on error */
bus->console_interval = 0;
sdio_release_host(bus->sdiodev->func[1]);
@@ -3633,11 +3695,11 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->idlecount = 0;
if (bus->activity) {
bus->activity = false;
- brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
} else {
brcmf_dbg(SDIO, "idle\n");
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_bus_sleep(bus, true, false);
+ brcmf_sdio_bus_sleep(bus, true, false);
sdio_release_host(bus->sdiodev->func[1]);
}
}
@@ -3652,38 +3714,13 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
datawork);
while (atomic_read(&bus->dpc_tskcnt)) {
- brcmf_sdbrcm_dpc(bus);
+ brcmf_sdio_dpc(bus);
atomic_dec(&bus->dpc_tskcnt);
}
}
-static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- kfree(bus->rxbuf);
- bus->rxctl = bus->rxbuf = NULL;
- bus->rxlen = 0;
-}
-
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- if (bus->sdiodev->bus_if->maxctl) {
- bus->rxblen =
- roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
- ALIGNMENT) + BRCMF_SDALIGN;
- bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
- if (!(bus->rxbuf))
- return false;
- }
-
- return true;
-}
-
static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
+brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
{
u8 clkctl = 0;
int err = 0;
@@ -3691,23 +3728,21 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
u32 reg_val;
u32 drivestrength;
- bus->alp_only = true;
-
sdio_claim_host(bus->sdiodev->func[1]);
pr_debug("F1 signature read @0x18000000=0x%4x\n",
- brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
+ brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
/*
* Force PLL off until brcmf_sdio_chip_attach()
* programs PLL control regs
*/
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- BRCMF_INIT_CLKCTL1, &err);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ BRCMF_INIT_CLKCTL1, &err);
if (!err)
- clkctl = brcmf_sdio_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
@@ -3715,12 +3750,17 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
goto fail;
}
- if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
+ /* SDIO register access works so moving
+ * state from UNKNOWN to DOWN.
+ */
+ brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
+
+ if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
brcmf_err("brcmf_sdio_chip_attach failed!\n");
goto fail;
}
- if (brcmf_sdbrcm_kso_init(bus)) {
+ if (brcmf_sdio_kso_init(bus)) {
brcmf_err("error enabling KSO\n");
goto fail;
}
@@ -3739,33 +3779,33 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
}
/* Set card control so an SDIO card reset does a WLAN backplane reset */
- reg_val = brcmf_sdio_regrb(bus->sdiodev,
- SDIO_CCCR_BRCM_CARDCTRL, &err);
+ reg_val = brcmf_sdiod_regrb(bus->sdiodev,
+ SDIO_CCCR_BRCM_CARDCTRL, &err);
if (err)
goto fail;
reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
- brcmf_sdio_regwb(bus->sdiodev,
- SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
+ brcmf_sdiod_regwb(bus->sdiodev,
+ SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
if (err)
goto fail;
/* set PMUControl so a backplane reset does PMU state reload */
reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
pmucontrol);
- reg_val = brcmf_sdio_regrl(bus->sdiodev,
- reg_addr,
- &err);
+ reg_val = brcmf_sdiod_regrl(bus->sdiodev,
+ reg_addr,
+ &err);
if (err)
goto fail;
reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
- brcmf_sdio_regwl(bus->sdiodev,
- reg_addr,
- reg_val,
- &err);
+ brcmf_sdiod_regwl(bus->sdiodev,
+ reg_addr,
+ reg_val,
+ &err);
if (err)
goto fail;
@@ -3774,9 +3814,13 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
+ /* allocate header buffer */
+ bus->hdrbuf = kzalloc(MAX_HDR_READ + bus->head_align, GFP_KERNEL);
+ if (!bus->hdrbuf)
+ return false;
/* Locate an appropriately-aligned portion of hdrbuf */
bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
- BRCMF_SDALIGN);
+ bus->head_align);
/* Set the poll and/or interrupt flags */
bus->intr = true;
@@ -3791,42 +3835,8 @@ fail:
return false;
}
-static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- sdio_claim_host(bus->sdiodev->func[1]);
-
- /* Disable F2 to clear any intermediate frame state on the dongle */
- brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
-
- bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- bus->rxflow = false;
-
- /* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
-
- sdio_release_host(bus->sdiodev->func[1]);
-
- /* ...and initialize clock/power states */
- bus->clkstate = CLK_SDONLY;
- bus->idletime = BRCMF_IDLE_INTERVAL;
- bus->idleclock = BRCMF_IDLE_ACTIVE;
-
- /* Query the F2 block size, set roundup accordingly */
- bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
- bus->roundup = min(max_roundup, bus->blocksize);
-
- /* SR state */
- bus->sleeping = false;
- bus->sr_enabled = false;
-
- return true;
-}
-
static int
-brcmf_sdbrcm_watchdog_thread(void *data)
+brcmf_sdio_watchdog_thread(void *data)
{
struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
@@ -3836,7 +3846,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
if (kthread_should_stop())
break;
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
- brcmf_sdbrcm_bus_watchdog(bus);
+ brcmf_sdio_bus_watchdog(bus);
/* Count the tick for reference */
bus->sdcnt.tickcnt++;
} else
@@ -3846,7 +3856,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
}
static void
-brcmf_sdbrcm_watchdog(unsigned long data)
+brcmf_sdio_watchdog(unsigned long data)
{
struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
@@ -3859,73 +3869,23 @@ brcmf_sdbrcm_watchdog(unsigned long data)
}
}
-static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- if (bus->ci) {
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
- brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
- sdio_release_host(bus->sdiodev->func[1]);
- brcmf_sdio_chip_detach(&bus->ci);
- if (bus->vars && bus->varsz)
- kfree(bus->vars);
- bus->vars = NULL;
- }
-
- brcmf_dbg(TRACE, "Disconnected\n");
-}
-
-/* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- if (bus) {
- /* De-register interrupt handler */
- brcmf_sdio_intr_unregister(bus->sdiodev);
-
- cancel_work_sync(&bus->datawork);
- if (bus->brcmf_wq)
- destroy_workqueue(bus->brcmf_wq);
-
- if (bus->sdiodev->bus_if->drvr) {
- brcmf_detach(bus->sdiodev->dev);
- brcmf_sdbrcm_release_dongle(bus);
- }
-
- brcmf_sdbrcm_release_malloc(bus);
-
- kfree(bus);
- }
-
- brcmf_dbg(TRACE, "Disconnected\n");
-}
-
static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
- .stop = brcmf_sdbrcm_bus_stop,
- .init = brcmf_sdbrcm_bus_init,
- .txdata = brcmf_sdbrcm_bus_txdata,
- .txctl = brcmf_sdbrcm_bus_txctl,
- .rxctl = brcmf_sdbrcm_bus_rxctl,
- .gettxq = brcmf_sdbrcm_bus_gettxq,
+ .stop = brcmf_sdio_bus_stop,
+ .preinit = brcmf_sdio_bus_preinit,
+ .init = brcmf_sdio_bus_init,
+ .txdata = brcmf_sdio_bus_txdata,
+ .txctl = brcmf_sdio_bus_txctl,
+ .rxctl = brcmf_sdio_bus_rxctl,
+ .gettxq = brcmf_sdio_bus_gettxq,
};
-void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
+struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
{
int ret;
struct brcmf_sdio *bus;
- struct brcmf_bus_dcmd *dlst;
- u32 dngl_txglom;
- u32 txglomalign = 0;
- u8 idx;
brcmf_dbg(TRACE, "Enter\n");
- /* We make an assumption about address window mappings:
- * regsva == SI_ENUM_BASE*/
-
/* Allocate private bus interface state */
bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
if (!bus)
@@ -3939,6 +3899,18 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->txminmax = BRCMF_TXMINMAX;
bus->tx_seq = SDPCM_SEQ_WRAP - 1;
+ /* platform specific configuration:
+ * alignments must be at least 4 bytes for ADMA
+ */
+ bus->head_align = ALIGNMENT;
+ bus->sgentry_align = ALIGNMENT;
+ if (sdiodev->pdata) {
+ if (sdiodev->pdata->sd_head_align > ALIGNMENT)
+ bus->head_align = sdiodev->pdata->sd_head_align;
+ if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT)
+ bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
+ }
+
INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
if (bus->brcmf_wq == NULL) {
@@ -3947,8 +3919,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
}
/* attempt to attach to the dongle */
- if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
- brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
+ if (!(brcmf_sdio_probe_attach(bus))) {
+ brcmf_err("brcmf_sdio_probe_attach failed\n");
goto fail;
}
@@ -3960,11 +3932,11 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
/* Set up the watchdog timer */
init_timer(&bus->timer);
bus->timer.data = (unsigned long)bus;
- bus->timer.function = brcmf_sdbrcm_watchdog;
+ bus->timer.function = brcmf_sdio_watchdog;
/* Initialize watchdog thread */
init_completion(&bus->watchdog_wait);
- bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
+ bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
bus, "brcmf_watchdog");
if (IS_ERR(bus->watchdog_tsk)) {
pr_warn("brcmf_watchdog thread failed to start\n");
@@ -3983,50 +3955,52 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
/* Attach to the common layer, reserve hdr space */
- ret = brcmf_attach(bus->tx_hdrlen, bus->sdiodev->dev);
+ ret = brcmf_attach(bus->sdiodev->dev);
if (ret != 0) {
brcmf_err("brcmf_attach failed\n");
goto fail;
}
/* Allocate buffers */
- if (!(brcmf_sdbrcm_probe_malloc(bus))) {
- brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
- goto fail;
+ if (bus->sdiodev->bus_if->maxctl) {
+ bus->rxblen =
+ roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
+ ALIGNMENT) + bus->head_align;
+ bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
+ if (!(bus->rxbuf)) {
+ brcmf_err("rxbuf allocation failed\n");
+ goto fail;
+ }
}
- if (!(brcmf_sdbrcm_probe_init(bus))) {
- brcmf_err("brcmf_sdbrcm_probe_init failed\n");
- goto fail;
- }
+ sdio_claim_host(bus->sdiodev->func[1]);
+
+ /* Disable F2 to clear any intermediate frame state on the dongle */
+ sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+
+ bus->rxflow = false;
+
+ /* Done with backplane-dependent accesses, can drop clock... */
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ sdio_release_host(bus->sdiodev->func[1]);
+
+ /* ...and initialize clock/power states */
+ bus->clkstate = CLK_SDONLY;
+ bus->idletime = BRCMF_IDLE_INTERVAL;
+ bus->idleclock = BRCMF_IDLE_ACTIVE;
+
+ /* Query the F2 block size, set roundup accordingly */
+ bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+ bus->roundup = min(max_roundup, bus->blocksize);
+
+ /* SR state */
+ bus->sleeping = false;
+ bus->sr_enabled = false;
brcmf_sdio_debugfs_create(bus);
brcmf_dbg(INFO, "completed!!\n");
- /* sdio bus core specific dcmd */
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- dlst = kzalloc(sizeof(struct brcmf_bus_dcmd), GFP_KERNEL);
- if (dlst) {
- if (bus->ci->c_inf[idx].rev < 12) {
- /* for sdio core rev < 12, disable txgloming */
- dngl_txglom = 0;
- dlst->name = "bus:txglom";
- dlst->param = (char *)&dngl_txglom;
- dlst->param_len = sizeof(u32);
- } else {
- /* otherwise, set txglomalign */
- if (sdiodev->pdata)
- txglomalign = sdiodev->pdata->sd_sgentry_align;
- /* SDIO ADMA requires at least 32 bit alignment */
- if (txglomalign < 4)
- txglomalign = 4;
- dlst->name = "bus:txglomalign";
- dlst->param = (char *)&txglomalign;
- dlst->param_len = sizeof(u32);
- }
- list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
- }
-
/* if firmware path present try to download and bring up bus */
ret = brcmf_bus_start(bus->sdiodev->dev);
if (ret != 0) {
@@ -4037,24 +4011,54 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
return bus;
fail:
- brcmf_sdbrcm_release(bus);
+ brcmf_sdio_remove(bus);
return NULL;
}
-void brcmf_sdbrcm_disconnect(void *ptr)
+/* Detach and free everything */
+void brcmf_sdio_remove(struct brcmf_sdio *bus)
{
- struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
-
brcmf_dbg(TRACE, "Enter\n");
- if (bus)
- brcmf_sdbrcm_release(bus);
+ if (bus) {
+ /* De-register interrupt handler */
+ brcmf_sdiod_intr_unregister(bus->sdiodev);
+
+ cancel_work_sync(&bus->datawork);
+ if (bus->brcmf_wq)
+ destroy_workqueue(bus->brcmf_wq);
+
+ if (bus->sdiodev->bus_if->drvr) {
+ brcmf_detach(bus->sdiodev->dev);
+ }
+
+ if (bus->ci) {
+ if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+ /* Leave the device in state where it is
+ * 'quiet'. This is done by putting it in
+ * download_state which essentially resets
+ * all necessary cores.
+ */
+ msleep(20);
+ brcmf_sdio_chip_enter_download(bus->sdiodev,
+ bus->ci);
+ brcmf_sdio_clkctl(bus, CLK_NONE, false);
+ sdio_release_host(bus->sdiodev->func[1]);
+ }
+ brcmf_sdio_chip_detach(&bus->ci);
+ }
+
+ kfree(bus->rxbuf);
+ kfree(bus->hdrbuf);
+ kfree(bus);
+ }
brcmf_dbg(TRACE, "Disconnected\n");
}
-void
-brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
+void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
{
/* Totally stop the timer */
if (!wdtick && bus->wd_timer_valid) {
@@ -4065,7 +4069,7 @@ brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
}
/* don't start the wd until fw is loaded */
- if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
+ if (bus->sdiodev->bus_if->state != BRCMF_BUS_DATA)
return;
if (wdtick) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index 14bc24dc5bae..51b53a73d074 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -122,6 +122,52 @@ enum brcmf_fweh_event_code {
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
#define BRCMF_EVENT_MSG_GROUP 0x04
+/* status field values in struct brcmf_event_msg */
+#define BRCMF_E_STATUS_SUCCESS 0
+#define BRCMF_E_STATUS_FAIL 1
+#define BRCMF_E_STATUS_TIMEOUT 2
+#define BRCMF_E_STATUS_NO_NETWORKS 3
+#define BRCMF_E_STATUS_ABORT 4
+#define BRCMF_E_STATUS_NO_ACK 5
+#define BRCMF_E_STATUS_UNSOLICITED 6
+#define BRCMF_E_STATUS_ATTEMPT 7
+#define BRCMF_E_STATUS_PARTIAL 8
+#define BRCMF_E_STATUS_NEWSCAN 9
+#define BRCMF_E_STATUS_NEWASSOC 10
+#define BRCMF_E_STATUS_11HQUIET 11
+#define BRCMF_E_STATUS_SUPPRESS 12
+#define BRCMF_E_STATUS_NOCHANS 13
+#define BRCMF_E_STATUS_CS_ABORT 15
+#define BRCMF_E_STATUS_ERROR 16
+
+/* reason field values in struct brcmf_event_msg */
+#define BRCMF_E_REASON_INITIAL_ASSOC 0
+#define BRCMF_E_REASON_LOW_RSSI 1
+#define BRCMF_E_REASON_DEAUTH 2
+#define BRCMF_E_REASON_DISASSOC 3
+#define BRCMF_E_REASON_BCNS_LOST 4
+#define BRCMF_E_REASON_MINTXRATE 9
+#define BRCMF_E_REASON_TXFAIL 10
+
+#define BRCMF_E_REASON_LINK_BSSCFG_DIS 4
+#define BRCMF_E_REASON_FAST_ROAM_FAILED 5
+#define BRCMF_E_REASON_DIRECTED_ROAM 6
+#define BRCMF_E_REASON_TSPEC_REJECTED 7
+#define BRCMF_E_REASON_BETTER_AP 8
+
+/* action field values for brcmf_ifevent */
+#define BRCMF_E_IF_ADD 1
+#define BRCMF_E_IF_DEL 2
+#define BRCMF_E_IF_CHANGE 3
+
+/* flag field values for brcmf_ifevent */
+#define BRCMF_E_IF_FLAG_NOIF 1
+
+/* role field values for brcmf_ifevent */
+#define BRCMF_E_IF_ROLE_STA 0
+#define BRCMF_E_IF_ROLE_AP 1
+#define BRCMF_E_IF_ROLE_WDS 2
+
/**
* definitions for event packet validation.
*/
@@ -160,6 +206,14 @@ struct brcmf_event_msg {
u8 bsscfgidx;
};
+struct brcmf_if_event {
+ u8 ifidx;
+ u8 action;
+ u8 flags;
+ u8 bssidx;
+ u8 role;
+};
+
typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
const struct brcmf_event_msg *evtmsg,
void *data);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index 04f395930d86..22adbe311d20 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -27,6 +27,7 @@
#include "dhd_dbg.h"
#include "tracepoint.h"
#include "fwil.h"
+#include "proto.h"
#define MAX_HEX_DUMP_LEN 64
@@ -46,11 +47,9 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
if (data != NULL)
len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
if (set)
- err = brcmf_proto_cdc_set_dcmd(drvr, ifp->ifidx, cmd, data,
- len);
+ err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd, data, len);
else
- err = brcmf_proto_cdc_query_dcmd(drvr, ifp->ifidx, cmd, data,
- len);
+ err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len);
if (err >= 0)
err = 0;
@@ -69,7 +68,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
err = brcmf_fil_cmd_data(ifp, cmd, data, len, true);
mutex_unlock(&ifp->drvr->proto_block);
@@ -87,7 +86,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
mutex_unlock(&ifp->drvr->proto_block);
@@ -156,7 +155,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
sizeof(drvr->proto_buf));
@@ -196,7 +195,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
mutex_unlock(&drvr->proto_block);
return err;
@@ -279,7 +278,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
drvr->proto_buf, sizeof(drvr->proto_buf));
@@ -318,7 +317,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
}
brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
- min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
mutex_unlock(&drvr->proto_block);
return err;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index 16eb8202fb1e..77eae86e55c2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -17,6 +17,67 @@
#ifndef _fwil_h_
#define _fwil_h_
+/*******************************************************************************
+ * Dongle command codes that are interpreted by firmware
+ ******************************************************************************/
+#define BRCMF_C_GET_VERSION 1
+#define BRCMF_C_UP 2
+#define BRCMF_C_DOWN 3
+#define BRCMF_C_SET_PROMISC 10
+#define BRCMF_C_GET_RATE 12
+#define BRCMF_C_GET_INFRA 19
+#define BRCMF_C_SET_INFRA 20
+#define BRCMF_C_GET_AUTH 21
+#define BRCMF_C_SET_AUTH 22
+#define BRCMF_C_GET_BSSID 23
+#define BRCMF_C_GET_SSID 25
+#define BRCMF_C_SET_SSID 26
+#define BRCMF_C_TERMINATED 28
+#define BRCMF_C_GET_CHANNEL 29
+#define BRCMF_C_SET_CHANNEL 30
+#define BRCMF_C_GET_SRL 31
+#define BRCMF_C_SET_SRL 32
+#define BRCMF_C_GET_LRL 33
+#define BRCMF_C_SET_LRL 34
+#define BRCMF_C_GET_RADIO 37
+#define BRCMF_C_SET_RADIO 38
+#define BRCMF_C_GET_PHYTYPE 39
+#define BRCMF_C_SET_KEY 45
+#define BRCMF_C_SET_PASSIVE_SCAN 49
+#define BRCMF_C_SCAN 50
+#define BRCMF_C_SCAN_RESULTS 51
+#define BRCMF_C_DISASSOC 52
+#define BRCMF_C_REASSOC 53
+#define BRCMF_C_SET_ROAM_TRIGGER 55
+#define BRCMF_C_SET_ROAM_DELTA 57
+#define BRCMF_C_GET_BCNPRD 75
+#define BRCMF_C_SET_BCNPRD 76
+#define BRCMF_C_GET_DTIMPRD 77
+#define BRCMF_C_SET_DTIMPRD 78
+#define BRCMF_C_SET_COUNTRY 84
+#define BRCMF_C_GET_PM 85
+#define BRCMF_C_SET_PM 86
+#define BRCMF_C_GET_CURR_RATESET 114
+#define BRCMF_C_GET_AP 117
+#define BRCMF_C_SET_AP 118
+#define BRCMF_C_GET_RSSI 127
+#define BRCMF_C_GET_WSEC 133
+#define BRCMF_C_SET_WSEC 134
+#define BRCMF_C_GET_PHY_NOISE 135
+#define BRCMF_C_GET_BSS_INFO 136
+#define BRCMF_C_GET_BANDLIST 140
+#define BRCMF_C_SET_SCB_TIMEOUT 158
+#define BRCMF_C_GET_PHYLIST 180
+#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
+#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
+#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
+#define BRCMF_C_GET_VALID_CHANNELS 217
+#define BRCMF_C_GET_KEY_PRIMARY 235
+#define BRCMF_C_SET_KEY_PRIMARY 236
+#define BRCMF_C_SET_SCAN_PASSIVE_TIME 258
+#define BRCMF_C_GET_VAR 262
+#define BRCMF_C_SET_VAR 263
+
s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index ecabb04f33c3..af17a5bc8b83 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -29,6 +29,24 @@
#define BRCMF_ARP_OL_HOST_AUTO_REPLY 0x00000004
#define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008
+#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
+#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002
+
+#define BRCMF_STA_ASSOC 0x10 /* Associated */
+
+/* size of brcmf_scan_params not including variable length array */
+#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
+
+/* masks for channel and ssid count */
+#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
+
+/* primary (ie tx) key */
+#define BRCMF_PRIMARY_KEY (1 << 1)
+#define DOT11_BSSTYPE_ANY 2
+#define BRCMF_ESCAN_REQ_VERSION 1
+
+#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
enum brcmf_fil_p2p_if_types {
BRCMF_FIL_P2P_IF_CLIENT,
@@ -90,4 +108,290 @@ enum brcmf_tdls_manual_ep_ops {
BRCMF_TDLS_MANUAL_EP_DISCOVERY = 6
};
+/* Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+struct brcmf_pkt_filter_pattern_le {
+ /*
+ * Offset within received packet to start pattern matching.
+ * Offset '0' is the first byte of the ethernet header.
+ */
+ __le32 offset;
+ /* Size of the pattern. Bitmask must be the same size.*/
+ __le32 size_bytes;
+ /*
+ * Variable length mask and pattern data. mask starts at offset 0.
+ * Pattern immediately follows mask.
+ */
+ u8 mask_and_pattern[1];
+};
+
+/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+struct brcmf_pkt_filter_le {
+ __le32 id; /* Unique filter id, specified by app. */
+ __le32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */
+ __le32 negate_match; /* Negate the result of filter matches */
+ union { /* Filter definitions */
+ struct brcmf_pkt_filter_pattern_le pattern; /* Filter pattern */
+ } u;
+};
+
+/* IOVAR "pkt_filter_enable" parameter. */
+struct brcmf_pkt_filter_enable_le {
+ __le32 id; /* Unique filter id */
+ __le32 enable; /* Enable/disable bool */
+};
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in struct brcmf_scan_results)
+ */
+struct brcmf_bss_info_le {
+ __le32 version; /* version field */
+ __le32 length; /* byte length of data in this record,
+ * starting at version and including IEs
+ */
+ u8 BSSID[ETH_ALEN];
+ __le16 beacon_period; /* units are Kusec */
+ __le16 capability; /* Capability information */
+ u8 SSID_len;
+ u8 SSID[32];
+ struct {
+ __le32 count; /* # rates in this set */
+ u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+ } rateset; /* supported rates */
+ __le16 chanspec; /* chanspec for bss */
+ __le16 atim_window; /* units are Kusec */
+ u8 dtim_period; /* DTIM period */
+ __le16 RSSI; /* receive signal strength (in dBm) */
+ s8 phy_noise; /* noise (in dBm) */
+
+ u8 n_cap; /* BSS is 802.11N Capable */
+ /* 802.11N BSS Capabilities (based on HT_CAP_*): */
+ __le32 nbss_cap;
+ u8 ctl_ch; /* 802.11N BSS control channel number */
+ __le32 reserved32[1]; /* Reserved for expansion of BSS properties */
+ u8 flags; /* flags */
+ u8 reserved[3]; /* Reserved for expansion of BSS properties */
+ u8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
+
+ __le16 ie_offset; /* offset at which IEs start, from beginning */
+ __le32 ie_length; /* byte length of Information Elements */
+ __le16 SNR; /* average SNR of during frame reception */
+ /* Add new fields here */
+ /* variable length Information Elements */
+};
+
+struct brcm_rateset_le {
+ /* # rates in this set */
+ __le32 count;
+ /* rates in 500kbps units w/hi bit set if basic */
+ u8 rates[BRCMF_MAXRATES_IN_SET];
+};
+
+struct brcmf_ssid {
+ u32 SSID_len;
+ unsigned char SSID[32];
+};
+
+struct brcmf_ssid_le {
+ __le32 SSID_len;
+ unsigned char SSID[32];
+};
+
+struct brcmf_scan_params_le {
+ struct brcmf_ssid_le ssid_le; /* default: {0, ""} */
+ u8 bssid[ETH_ALEN]; /* default: bcast */
+ s8 bss_type; /* default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ u8 scan_type; /* flags, 0 use default */
+ __le32 nprobes; /* -1 use default, number of probes per channel */
+ __le32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ __le32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ __le32 home_time; /* -1 use default, dwell time for the
+ * home channel between channel scans
+ */
+ __le32 channel_num; /* count of channels and ssids that follow
+ *
+ * low half is count of channels in
+ * channel_list, 0 means default (use all
+ * available channels)
+ *
+ * high half is entries in struct brcmf_ssid
+ * array that follows channel_list, aligned for
+ * s32 (4 bytes) meaning an odd channel count
+ * implies a 2-byte pad between end of
+ * channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the
+ * fixed parameter portion is assumed, otherwise
+ * ssid in the fixed portion is ignored
+ */
+ __le16 channel_list[1]; /* list of chanspecs */
+};
+
+struct brcmf_scan_results {
+ u32 buflen;
+ u32 version;
+ u32 count;
+ struct brcmf_bss_info_le bss_info_le[];
+};
+
+struct brcmf_escan_params_le {
+ __le32 version;
+ __le16 action;
+ __le16 sync_id;
+ struct brcmf_scan_params_le params_le;
+};
+
+struct brcmf_escan_result_le {
+ __le32 buflen;
+ __le32 version;
+ __le16 sync_id;
+ __le16 bss_count;
+ struct brcmf_bss_info_le bss_info_le;
+};
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
+ sizeof(struct brcmf_bss_info_le))
+
+/* used for association with a specific BSSID and chanspec list */
+struct brcmf_assoc_params_le {
+ /* 00:00:00:00:00:00: broadcast scan */
+ u8 bssid[ETH_ALEN];
+ /* 0: all available channels, otherwise count of chanspecs in
+ * chanspec_list */
+ __le32 chanspec_num;
+ /* list of chanspecs */
+ __le16 chanspec_list[1];
+};
+
+/* used for join with or without a specific bssid and channel list */
+struct brcmf_join_params {
+ struct brcmf_ssid_le ssid_le;
+ struct brcmf_assoc_params_le params_le;
+};
+
+/* scan params for extended join */
+struct brcmf_join_scan_params_le {
+ u8 scan_type; /* 0 use default, active or passive scan */
+ __le32 nprobes; /* -1 use default, nr of probes per channel */
+ __le32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ __le32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ __le32 home_time; /* -1 use default, dwell time for the home
+ * channel between channel scans
+ */
+};
+
+/* extended join params */
+struct brcmf_ext_join_params_le {
+ struct brcmf_ssid_le ssid_le; /* {0, ""}: wildcard scan */
+ struct brcmf_join_scan_params_le scan_le;
+ struct brcmf_assoc_params_le assoc_le;
+};
+
+struct brcmf_wsec_key {
+ u32 index; /* key index */
+ u32 len; /* key length */
+ u8 data[WLAN_MAX_KEY_LEN]; /* key data */
+ u32 pad_1[18];
+ u32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ u32 flags; /* misc flags */
+ u32 pad_2[3];
+ u32 iv_initialized; /* has IV been initialized already? */
+ u32 pad_3;
+ /* Rx IV */
+ struct {
+ u32 hi; /* upper 32 bits of IV */
+ u16 lo; /* lower 16 bits of IV */
+ } rxiv;
+ u32 pad_4[2];
+ u8 ea[ETH_ALEN]; /* per station */
+};
+
+/*
+ * dongle requires same struct as above but with fields in little endian order
+ */
+struct brcmf_wsec_key_le {
+ __le32 index; /* key index */
+ __le32 len; /* key length */
+ u8 data[WLAN_MAX_KEY_LEN]; /* key data */
+ __le32 pad_1[18];
+ __le32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ __le32 flags; /* misc flags */
+ __le32 pad_2[3];
+ __le32 iv_initialized; /* has IV been initialized already? */
+ __le32 pad_3;
+ /* Rx IV */
+ struct {
+ __le32 hi; /* upper 32 bits of IV */
+ __le16 lo; /* lower 16 bits of IV */
+ } rxiv;
+ __le32 pad_4[2];
+ u8 ea[ETH_ALEN]; /* per station */
+};
+
+/* Used to get specific STA parameters */
+struct brcmf_scb_val_le {
+ __le32 val;
+ u8 ea[ETH_ALEN];
+};
+
+/* channel encoding */
+struct brcmf_channel_info_le {
+ __le32 hw_channel;
+ __le32 target_channel;
+ __le32 scan_channel;
+};
+
+struct brcmf_sta_info_le {
+ __le16 ver; /* version of this struct */
+ __le16 len; /* length in bytes of this structure */
+ __le16 cap; /* sta's advertised capabilities */
+ __le32 flags; /* flags defined below */
+ __le32 idle; /* time since data pkt rx'd from sta */
+ u8 ea[ETH_ALEN]; /* Station address */
+ __le32 count; /* # rates in this set */
+ u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units */
+ /* w/hi bit set if basic */
+ __le32 in; /* seconds elapsed since associated */
+ __le32 listen_interval_inms; /* Min Listen interval in ms for STA */
+ __le32 tx_pkts; /* # of packets transmitted */
+ __le32 tx_failures; /* # of packets failed */
+ __le32 rx_ucast_pkts; /* # of unicast packets received */
+ __le32 rx_mcast_pkts; /* # of multicast packets received */
+ __le32 tx_rate; /* Rate of last successful tx frame */
+ __le32 rx_rate; /* Rate of last successful rx frame */
+ __le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
+ __le32 rx_decrypt_failures; /* # of packet decrypted failed */
+};
+
+struct brcmf_chanspec_list {
+ __le32 count; /* # of entries */
+ __le32 element[1]; /* variable length uint32 list */
+};
+
+/*
+ * WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+struct brcmf_rx_mgmt_data {
+ __be16 version;
+ __be16 chanspec;
+ __be32 rssi;
+ __be32 mactime;
+ __be32 rate;
+};
+
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index d0cd0bf95c5a..c3e7d76dbf35 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -27,7 +27,6 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "dhd.h"
-#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "dhd_bus.h"
#include "fwil.h"
@@ -36,6 +35,7 @@
#include "fwsignal.h"
#include "p2p.h"
#include "wl_cfg80211.h"
+#include "proto.h"
/**
* DOC: Firmware Signalling
@@ -105,6 +105,7 @@ static struct {
};
#undef BRCMF_FWS_TLV_DEF
+
static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
{
int i;
@@ -123,6 +124,12 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
#endif /* DEBUG */
/*
+ * The PKTTAG tlv has additional bytes when firmware-signalling
+ * mode has REUSESEQ flag set.
+ */
+#define BRCMF_FWS_TYPE_SEQ_LEN 2
+
+/*
* flags used to enable tlv signalling from firmware.
*/
#define BRCMF_FWS_FLAGS_RSSI_SIGNALS 0x0001
@@ -147,8 +154,15 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
#define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST 0x01
#define BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED 0x02
-#define BRCMF_FWS_RET_OK_NOSCHEDULE 0
-#define BRCMF_FWS_RET_OK_SCHEDULE 1
+#define BRCMF_FWS_RET_OK_NOSCHEDULE 0
+#define BRCMF_FWS_RET_OK_SCHEDULE 1
+
+#define BRCMF_FWS_MODE_REUSESEQ_SHIFT 3 /* seq reuse */
+#define BRCMF_FWS_MODE_SET_REUSESEQ(x, val) ((x) = \
+ ((x) & ~(1 << BRCMF_FWS_MODE_REUSESEQ_SHIFT)) | \
+ (((val) & 1) << BRCMF_FWS_MODE_REUSESEQ_SHIFT))
+#define BRCMF_FWS_MODE_GET_REUSESEQ(x) \
+ (((x) >> BRCMF_FWS_MODE_REUSESEQ_SHIFT) & 1)
/**
* enum brcmf_fws_skb_state - indicates processing state of skb.
@@ -171,6 +185,7 @@ enum brcmf_fws_skb_state {
* @bus_flags: 2 bytes reserved for bus specific parameters
* @if_flags: holds interface index and packet related flags.
* @htod: host to device packet identifier (used in PKTTAG tlv).
+ * @htod_seq: this 16-bit is original seq number for every suppress packet.
* @state: transmit state of the packet.
* @mac: descriptor related to destination for this packet.
*
@@ -181,6 +196,7 @@ struct brcmf_skbuff_cb {
u16 bus_flags;
u16 if_flags;
u32 htod;
+ u16 htod_seq;
enum brcmf_fws_skb_state state;
struct brcmf_fws_mac_descriptor *mac;
};
@@ -257,6 +273,22 @@ struct brcmf_skbuff_cb {
BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \
BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT)
+#define BRCMF_SKB_HTOD_SEQ_FROMFW_MASK 0x2000
+#define BRCMF_SKB_HTOD_SEQ_FROMFW_SHIFT 13
+#define BRCMF_SKB_HTOD_SEQ_FROMDRV_MASK 0x1000
+#define BRCMF_SKB_HTOD_SEQ_FROMDRV_SHIFT 12
+#define BRCMF_SKB_HTOD_SEQ_NR_MASK 0x0fff
+#define BRCMF_SKB_HTOD_SEQ_NR_SHIFT 0
+
+#define brcmf_skb_htod_seq_set_field(skb, field, value) \
+ brcmu_maskset16(&(brcmf_skbcb(skb)->htod_seq), \
+ BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \
+ BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT, (value))
+#define brcmf_skb_htod_seq_get_field(skb, field) \
+ brcmu_maskget16(brcmf_skbcb(skb)->htod_seq, \
+ BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \
+ BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT)
+
#define BRCMF_FWS_TXSTAT_GENERATION_MASK 0x80000000
#define BRCMF_FWS_TXSTAT_GENERATION_SHIFT 31
#define BRCMF_FWS_TXSTAT_FLAGS_MASK 0x78000000
@@ -265,8 +297,8 @@ struct brcmf_skbuff_cb {
#define BRCMF_FWS_TXSTAT_FIFO_SHIFT 24
#define BRCMF_FWS_TXSTAT_HSLOT_MASK 0x00FFFF00
#define BRCMF_FWS_TXSTAT_HSLOT_SHIFT 8
-#define BRCMF_FWS_TXSTAT_PKTID_MASK 0x00FFFFFF
-#define BRCMF_FWS_TXSTAT_PKTID_SHIFT 0
+#define BRCMF_FWS_TXSTAT_FREERUN_MASK 0x000000FF
+#define BRCMF_FWS_TXSTAT_FREERUN_SHIFT 0
#define brcmf_txstatus_get_field(txs, field) \
brcmu_maskget32(txs, BRCMF_FWS_TXSTAT_ ## field ## _MASK, \
@@ -443,6 +475,7 @@ struct brcmf_fws_info {
unsigned long borrow_defer_timestamp;
bool bus_flow_blocked;
bool creditmap_received;
+ u8 mode;
};
/*
@@ -805,20 +838,23 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
brcmf_fws_hanger_cleanup(fws, matchfn, ifidx);
}
-static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
+static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
{
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
u8 *wlh;
u16 data_offset = 0;
u8 fillers;
__le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
+ __le16 pktseq = cpu_to_le16(brcmf_skbcb(skb)->htod_seq);
- brcmf_dbg(TRACE, "enter: %s, idx=%d pkttag=0x%08X, hslot=%d\n",
+ brcmf_dbg(TRACE, "enter: %s, idx=%d hslot=%d htod %X seq %X\n",
entry->name, brcmf_skb_if_flags_get_field(skb, INDEX),
- le32_to_cpu(pkttag), (le32_to_cpu(pkttag) >> 8) & 0xffff);
+ (le32_to_cpu(pkttag) >> 8) & 0xffff,
+ brcmf_skbcb(skb)->htod, brcmf_skbcb(skb)->htod_seq);
if (entry->send_tim_signal)
data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
-
+ if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode))
+ data_offset += BRCMF_FWS_TYPE_SEQ_LEN;
/* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
fillers = round_up(data_offset, 4) - data_offset;
@@ -830,7 +866,12 @@ static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
memcpy(&wlh[2], &pkttag, sizeof(pkttag));
- wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2;
+ if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) {
+ wlh[1] += BRCMF_FWS_TYPE_SEQ_LEN;
+ memcpy(&wlh[2 + BRCMF_FWS_TYPE_PKTTAG_LEN], &pktseq,
+ sizeof(pktseq));
+ }
+ wlh += wlh[1] + 2;
if (entry->send_tim_signal) {
entry->send_tim_signal = 0;
@@ -846,9 +887,7 @@ static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
if (fillers)
memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
- brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
- data_offset >> 2, skb);
- return 0;
+ return (u8)(data_offset >> 2);
}
static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
@@ -856,10 +895,11 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
int fifo, bool send_immediately)
{
struct sk_buff *skb;
- struct brcmf_bus *bus;
struct brcmf_skbuff_cb *skcb;
s32 err;
u32 len;
+ u8 data_offset;
+ int ifidx;
/* check delayedQ and suppressQ in one call using bitmap */
if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0)
@@ -875,6 +915,7 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
/* create a dummy packet and sent that. The traffic */
/* bitmap info will automatically be attached to that packet */
len = BRCMF_FWS_TYPE_PKTTAG_LEN + 2 +
+ BRCMF_FWS_TYPE_SEQ_LEN +
BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 +
4 + fws->drvr->hdrlen;
skb = brcmu_pkt_buf_get_skb(len);
@@ -884,13 +925,13 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
skcb = brcmf_skbcb(skb);
skcb->mac = entry;
skcb->state = BRCMF_FWS_SKBSTATE_TIM;
- bus = fws->drvr->bus_if;
- err = brcmf_fws_hdrpush(fws, skb);
- if (err == 0) {
- brcmf_fws_unlock(fws);
- err = brcmf_bus_txdata(bus, skb);
- brcmf_fws_lock(fws);
- }
+ skcb->htod = 0;
+ skcb->htod_seq = 0;
+ data_offset = brcmf_fws_hdrpush(fws, skb);
+ ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
+ brcmf_fws_unlock(fws);
+ err = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
+ brcmf_fws_lock(fws);
if (err)
brcmu_pkt_buf_free_skb(skb);
return true;
@@ -1172,8 +1213,13 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
{
int prec = 2 * fifo;
u32 *qfull_stat = &fws->stats.delayq_full_error;
-
struct brcmf_fws_mac_descriptor *entry;
+ struct pktq *pq;
+ struct sk_buff_head *queue;
+ struct sk_buff *p_head;
+ struct sk_buff *p_tail;
+ u32 fr_new;
+ u32 fr_compare;
entry = brcmf_skbcb(p)->mac;
if (entry == NULL) {
@@ -1185,9 +1231,55 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
prec += 1;
qfull_stat = &fws->stats.supprq_full_error;
- }
- if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) {
+ /* Fix out of order delivery of frames. Dont assume frame */
+ /* can be inserted at the end, but look for correct position */
+ pq = &entry->psq;
+ if (pktq_full(pq) || pktq_pfull(pq, prec)) {
+ *qfull_stat += 1;
+ return -ENFILE;
+ }
+ queue = &pq->q[prec].skblist;
+
+ p_head = skb_peek(queue);
+ p_tail = skb_peek_tail(queue);
+ fr_new = brcmf_skb_htod_tag_get_field(p, FREERUN);
+
+ while (p_head != p_tail) {
+ fr_compare = brcmf_skb_htod_tag_get_field(p_tail,
+ FREERUN);
+ /* be sure to handle wrap of 256 */
+ if (((fr_new > fr_compare) &&
+ ((fr_new - fr_compare) < 128)) ||
+ ((fr_new < fr_compare) &&
+ ((fr_compare - fr_new) > 128)))
+ break;
+ p_tail = skb_queue_prev(queue, p_tail);
+ }
+ /* Position found. Determine what to do */
+ if (p_tail == NULL) {
+ /* empty list */
+ __skb_queue_tail(queue, p);
+ } else {
+ fr_compare = brcmf_skb_htod_tag_get_field(p_tail,
+ FREERUN);
+ if (((fr_new > fr_compare) &&
+ ((fr_new - fr_compare) < 128)) ||
+ ((fr_new < fr_compare) &&
+ ((fr_compare - fr_new) > 128))) {
+ /* After tail */
+ __skb_queue_after(queue, p_tail, p);
+ } else {
+ /* Before tail */
+ __skb_insert(p, p_tail->prev, p_tail, queue);
+ }
+ }
+
+ /* Complete the counters and statistics */
+ pq->len++;
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (u8) prec;
+ } else if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) {
*qfull_stat += 1;
return -ENFILE;
}
@@ -1277,7 +1369,8 @@ done:
}
static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
- struct sk_buff *skb, u32 genbit)
+ struct sk_buff *skb, u32 genbit,
+ u16 seq)
{
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
u32 hslot;
@@ -1297,9 +1390,19 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
entry->generation = genbit;
ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
- if (ret == 0)
+ if (ret == 0) {
+ brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
+ brcmf_skbcb(skb)->htod_seq = seq;
+ if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
+ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
+ brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
+ } else {
+ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
+ }
ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
skb);
+ }
+
if (ret != 0) {
/* suppress q is full or hdrpull failed, drop this packet */
brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
@@ -1317,7 +1420,7 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
static int
brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
- u32 genbit)
+ u32 genbit, u16 seq)
{
u32 fifo;
int ret;
@@ -1360,8 +1463,8 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
if (entry->suppressed && entry->suppr_transit_count)
entry->suppr_transit_count--;
- brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags,
- skcb->htod);
+ brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, flags,
+ skcb->htod, seq);
/* pick up the implicit credit from this packet */
fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
@@ -1374,7 +1477,8 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
brcmf_fws_macdesc_return_req_credit(skb);
if (!remove_from_hanger)
- ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit);
+ ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit,
+ seq);
if (remove_from_hanger || ret)
brcmf_txfinalize(fws->drvr, skb, true);
@@ -1406,10 +1510,12 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
{
__le32 status_le;
+ __le16 seq_le;
u32 status;
u32 hslot;
u32 genbit;
u8 flags;
+ u16 seq;
fws->stats.txs_indicate++;
memcpy(&status_le, data, sizeof(status_le));
@@ -1417,9 +1523,16 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
flags = brcmf_txstatus_get_field(status, FLAGS);
hslot = brcmf_txstatus_get_field(status, HSLOT);
genbit = brcmf_txstatus_get_field(status, GENERATION);
+ if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) {
+ memcpy(&seq_le, &data[BRCMF_FWS_TYPE_PKTTAG_LEN],
+ sizeof(seq_le));
+ seq = le16_to_cpu(seq_le);
+ } else {
+ seq = 0;
+ }
brcmf_fws_lock(fws);
- brcmf_fws_txs_process(fws, flags, hslot, genbit);
+ brcmf_fws_txs_process(fws, flags, hslot, genbit, seq);
brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_NOSCHEDULE;
}
@@ -1603,15 +1716,15 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
return 0;
}
-static void brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
+static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
struct sk_buff *p)
{
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
struct brcmf_fws_mac_descriptor *entry = skcb->mac;
u8 flags;
- brcmf_skb_if_flags_set_field(p, TRANSMIT, 1);
- brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
+ if (skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED)
+ brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST;
if (brcmf_skb_if_flags_get_field(p, REQUESTED)) {
/*
@@ -1621,7 +1734,7 @@ static void brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
}
brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
- brcmf_fws_hdrpush(fws, p);
+ return brcmf_fws_hdrpush(fws, p);
}
static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
@@ -1652,7 +1765,7 @@ static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
fws->stats.rollback_failed++;
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED,
- hslot, 0);
+ hslot, 0, 0);
} else {
fws->stats.rollback_success++;
brcmf_fws_return_credits(fws, fifo, 1);
@@ -1689,20 +1802,21 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
{
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
struct brcmf_fws_mac_descriptor *entry;
- struct brcmf_bus *bus = fws->drvr->bus_if;
int rc;
u8 ifidx;
+ u8 data_offset;
entry = skcb->mac;
if (IS_ERR(entry))
return PTR_ERR(entry);
- brcmf_fws_precommit_skb(fws, fifo, skb);
+ data_offset = brcmf_fws_precommit_skb(fws, fifo, skb);
entry->transit_count++;
if (entry->suppressed)
entry->suppr_transit_count++;
+ ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
brcmf_fws_unlock(fws);
- rc = brcmf_bus_txdata(bus, skb);
+ rc = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
brcmf_fws_lock(fws);
brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
skcb->if_flags, skcb->htod, rc);
@@ -1732,6 +1846,8 @@ static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
int rc, hslot;
+ skcb->htod = 0;
+ skcb->htod_seq = 0;
hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
brcmf_skb_htod_tag_set_field(p, FREERUN, skcb->mac->seq[fifo]);
@@ -1757,7 +1873,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
/* determine the priority */
if (!skb->priority)
- skb->priority = cfg80211_classify8021d(skb);
+ skb->priority = cfg80211_classify8021d(skb, NULL);
drvr->tx_multicast += !!multicast;
if (pae)
@@ -1861,10 +1977,9 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
&skb, true);
ifidx = brcmf_skb_if_flags_get_field(skb,
INDEX);
- brcmf_proto_hdrpush(drvr, ifidx, 0, skb);
- /* Use bus module to send data frame */
+ /* Use proto layer to send data frame */
brcmf_fws_unlock(fws);
- ret = brcmf_bus_txdata(drvr->bus_if, skb);
+ ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
brcmf_fws_lock(fws);
if (ret < 0)
brcmf_txfinalize(drvr, skb, false);
@@ -1908,6 +2023,7 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
struct brcmf_fws_info *fws;
u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
int rc;
+ u32 mode;
drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
if (!drvr->fws) {
@@ -1966,6 +2082,18 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1))
brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n");
+ /* Enable seq number reuse, if supported */
+ if (brcmf_fil_iovar_int_get(drvr->iflist[0], "wlfc_mode", &mode) == 0) {
+ if (BRCMF_FWS_MODE_GET_REUSESEQ(mode)) {
+ mode = 0;
+ BRCMF_FWS_MODE_SET_REUSESEQ(mode, 1);
+ if (brcmf_fil_iovar_int_set(drvr->iflist[0],
+ "wlfc_mode", mode) == 0) {
+ BRCMF_FWS_MODE_SET_REUSESEQ(fws->mode, 1);
+ }
+ }
+ }
+
brcmf_fws_hanger_init(&fws->hanger);
brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
@@ -2022,7 +2150,7 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
}
brcmf_fws_lock(fws);
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
- brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0);
+ brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0);
brcmf_fws_unlock(fws);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.c b/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
new file mode 100644
index 000000000000..d5ef86db631b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+
+#include "nvram.h"
+
+/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a file
+ * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
+ * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
+ * End of buffer is completed with token identifying length of buffer.
+ */
+void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length)
+{
+ u8 *nvram;
+ u32 i;
+ u32 len;
+ u32 column;
+ u8 val;
+ bool comment;
+ u32 token;
+ __le32 token_le;
+
+ /* Alloc for extra 0 byte + roundup by 4 + length field */
+ nvram = kmalloc(nv->size + 1 + 3 + sizeof(token_le), GFP_KERNEL);
+ if (!nvram)
+ return NULL;
+
+ len = 0;
+ column = 0;
+ comment = false;
+ for (i = 0; i < nv->size; i++) {
+ val = nv->data[i];
+ if (val == 0)
+ break;
+ if (val == '\r')
+ continue;
+ if (comment && (val != '\n'))
+ continue;
+ comment = false;
+ if (val == '#') {
+ comment = true;
+ continue;
+ }
+ if (val == '\n') {
+ if (column == 0)
+ continue;
+ nvram[len] = 0;
+ len++;
+ column = 0;
+ continue;
+ }
+ nvram[len] = val;
+ len++;
+ column++;
+ }
+ column = len;
+ *new_length = roundup(len + 1, 4);
+ while (column != *new_length) {
+ nvram[column] = 0;
+ column++;
+ }
+
+ token = *new_length / 4;
+ token = (~token << 16) | (token & 0x0000FFFF);
+ token_le = cpu_to_le32(token);
+
+ memcpy(&nvram[*new_length], &token_le, sizeof(token_le));
+ *new_length += sizeof(token_le);
+
+ return nvram;
+}
+
+void brcmf_nvram_free(void *nvram)
+{
+ kfree(nvram);
+}
+
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.h b/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
new file mode 100644
index 000000000000..d454580928c9
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_NVRAM_H
+#define BRCMFMAC_NVRAM_H
+
+
+void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length);
+void brcmf_nvram_free(void *nvram);
+
+
+#endif /* BRCMFMAC_NVRAM_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 4a2293041821..fc4f98b275d7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -812,7 +812,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
struct ieee80211_channel *chan = request->channels[i];
if (chan->flags & (IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_PASSIVE_SCAN))
+ IEEE80211_CHAN_NO_IR))
continue;
chanspecs[i] = channel_to_chanspec(&p2p->cfg->d11inf,
@@ -1243,7 +1243,7 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
IEEE80211_P2P_ATTR_DEVICE_ID,
p2p_dev_addr, sizeof(p2p_dev_addr));
if ((err >= 0) &&
- (!memcmp(p2p_dev_addr, afx_hdl->tx_dst_addr, ETH_ALEN))) {
+ (ether_addr_equal(p2p_dev_addr, afx_hdl->tx_dst_addr))) {
if (!bi->ctl_ch) {
ch.chspec = le16_to_cpu(bi->chanspec);
cfg->d11inf.decchspec(&ch);
@@ -1380,8 +1380,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
(brcmf_p2p_gon_req_collision(p2p, (u8 *)e->addr))) {
if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
&p2p->status) &&
- (memcmp(afx_hdl->tx_dst_addr, e->addr,
- ETH_ALEN) == 0)) {
+ (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
afx_hdl->peer_chan = ch.chnum;
brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
afx_hdl->peer_chan);
@@ -1865,7 +1864,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
cfg->d11inf.decchspec(&ch);
if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
- (memcmp(afx_hdl->tx_dst_addr, e->addr, ETH_ALEN) == 0)) {
+ (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
afx_hdl->peer_chan = ch.chnum;
brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
afx_hdl->peer_chan);
@@ -1956,21 +1955,21 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
if (err < 0) {
brcmf_err("set p2p_disc error\n");
- brcmf_free_vif(cfg, p2p_vif);
+ brcmf_free_vif(p2p_vif);
goto exit;
}
/* obtain bsscfg index for P2P discovery */
err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
if (err < 0) {
brcmf_err("retrieving discover bsscfg index failed\n");
- brcmf_free_vif(cfg, p2p_vif);
+ brcmf_free_vif(p2p_vif);
goto exit;
}
/* Verify that firmware uses same bssidx as driver !! */
if (p2p_ifp->bssidx != bssidx) {
brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
bssidx, p2p_ifp->bssidx);
- brcmf_free_vif(cfg, p2p_vif);
+ brcmf_free_vif(p2p_vif);
goto exit;
}
@@ -1998,7 +1997,7 @@ void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
brcmf_p2p_cancel_remain_on_channel(vif->ifp);
brcmf_p2p_deinit_discovery(p2p);
/* remove discovery interface */
- brcmf_free_vif(p2p->cfg, vif);
+ brcmf_free_vif(vif);
p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
}
/* just set it all to zero */
@@ -2223,7 +2222,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
return &p2p_vif->wdev;
fail:
- brcmf_free_vif(p2p->cfg, p2p_vif);
+ brcmf_free_vif(p2p_vif);
return ERR_PTR(err);
}
@@ -2232,31 +2231,12 @@ fail:
*
* @vif: virtual interface object to delete.
*/
-static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_info *cfg,
+static void brcmf_p2p_delete_p2pdev(struct brcmf_p2p_info *p2p,
struct brcmf_cfg80211_vif *vif)
{
cfg80211_unregister_wdev(&vif->wdev);
- cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
- brcmf_free_vif(cfg, vif);
-}
-
-/**
- * brcmf_p2p_free_p2p_if() - free up net device related data.
- *
- * @ndev: net device that needs to be freed.
- */
-static void brcmf_p2p_free_p2p_if(struct net_device *ndev)
-{
- struct brcmf_cfg80211_info *cfg;
- struct brcmf_cfg80211_vif *vif;
- struct brcmf_if *ifp;
-
- ifp = netdev_priv(ndev);
- cfg = ifp->drvr->config;
- vif = ifp->vif;
-
- brcmf_free_vif(cfg, vif);
- free_netdev(ifp->ndev);
+ p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
+ brcmf_free_vif(vif);
}
/**
@@ -2336,8 +2316,6 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
brcmf_err("Registering netdevice failed\n");
goto fail;
}
- /* override destructor */
- ifp->ndev->destructor = brcmf_p2p_free_p2p_if;
cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
/* Disable firmware roaming for P2P interface */
@@ -2350,7 +2328,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
return &ifp->vif->wdev;
fail:
- brcmf_free_vif(cfg, vif);
+ brcmf_free_vif(vif);
return ERR_PTR(err);
}
@@ -2359,8 +2337,6 @@ fail:
*
* @wiphy: wiphy device of interface.
* @wdev: wireless device of interface.
- *
- * TODO: not yet supported.
*/
int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
{
@@ -2386,7 +2362,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
break;
case NL80211_IFTYPE_P2P_DEVICE:
- brcmf_p2p_delete_p2pdev(cfg, vif);
+ brcmf_p2p_delete_p2pdev(p2p, vif);
return 0;
default:
return -ENOTSUPP;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
new file mode 100644
index 000000000000..b6b464184946
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+ #include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_wifi.h>
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "proto.h"
+#include "bcdc.h"
+
+
+int brcmf_proto_attach(struct brcmf_pub *drvr)
+{
+ struct brcmf_proto *proto;
+
+ proto = kzalloc(sizeof(*proto), GFP_ATOMIC);
+ if (!proto)
+ goto fail;
+
+ drvr->proto = proto;
+ /* BCDC protocol is only protocol supported for the moment */
+ if (brcmf_proto_bcdc_attach(drvr))
+ goto fail;
+
+ if ((proto->txdata == NULL) || (proto->hdrpull == NULL) ||
+ (proto->query_dcmd == NULL) || (proto->set_dcmd == NULL)) {
+ brcmf_err("Not all proto handlers have been installed\n");
+ goto fail;
+ }
+ return 0;
+
+fail:
+ kfree(proto);
+ drvr->proto = NULL;
+ return -ENOMEM;
+}
+
+void brcmf_proto_detach(struct brcmf_pub *drvr)
+{
+ if (drvr->proto) {
+ brcmf_proto_bcdc_detach(drvr);
+ kfree(drvr->proto);
+ drvr->proto = NULL;
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
new file mode 100644
index 000000000000..482fb0ba4a30
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_PROTO_H
+#define BRCMFMAC_PROTO_H
+
+struct brcmf_proto {
+ int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+ struct sk_buff *skb);
+ int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len);
+ int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
+ uint len);
+ int (*txdata)(struct brcmf_pub *drvr, int ifidx, u8 offset,
+ struct sk_buff *skb);
+ void *pd;
+};
+
+
+int brcmf_proto_attach(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
+
+static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
+ u8 *ifidx, struct sk_buff *skb)
+{
+ return drvr->proto->hdrpull(drvr, do_fws, ifidx, skb);
+}
+static inline int brcmf_proto_query_dcmd(struct brcmf_pub *drvr, int ifidx,
+ uint cmd, void *buf, uint len)
+{
+ return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len);
+}
+static inline int brcmf_proto_set_dcmd(struct brcmf_pub *drvr, int ifidx,
+ uint cmd, void *buf, uint len)
+{
+ return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len);
+}
+static inline int brcmf_proto_txdata(struct brcmf_pub *drvr, int ifidx,
+ u8 offset, struct sk_buff *skb)
+{
+ return drvr->proto->txdata(drvr, ifidx, offset, skb);
+}
+
+
+#endif /* BRCMFMAC_PROTO_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 2096a14ef1fb..82bf3c5d3cdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -19,6 +19,7 @@
#include <linux/netdevice.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/bcma/bcma.h>
@@ -50,6 +51,9 @@
#define BCM43143_CORE_ARM_BASE 0x18003000
#define BCM43143_RAMSIZE 0x70000
+/* All D11 cores, ID 0x812 */
+#define BCM43xx_CORE_D11_BASE 0x18001000
+
#define SBCOREREV(sbidh) \
((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
((sbidh) & SSB_IDHIGH_RCLO))
@@ -65,6 +69,10 @@
/* ARM CR4 core specific control flag bits */
#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+#define D11_BCMA_IOCTL_PHYRESET 0x0008
+
#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
/* SDIO Pad drive strength to select value mappings */
struct sdiod_drive_str {
@@ -83,6 +91,24 @@ static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
{0, 0x1}
};
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
+ {6, 0x7},
+ {5, 0x6},
+ {4, 0x5},
+ {3, 0x4},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
+ {3, 0x3},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0} };
+
/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
{16, 0x7},
@@ -92,7 +118,7 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
};
u8
-brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
+brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
{
u8 idx;
@@ -105,22 +131,22 @@ brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
static u32
brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid)
+ struct brcmf_chip *ci, u16 coreid)
{
u32 regdata;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidhigh),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidhigh),
+ NULL);
return SBCOREREV(regdata);
}
static u32
brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid)
+ struct brcmf_chip *ci, u16 coreid)
{
u8 idx;
@@ -131,7 +157,7 @@ brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
static bool
brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid)
+ struct brcmf_chip *ci, u16 coreid)
{
u32 regdata;
u8 idx;
@@ -140,9 +166,9 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
if (idx == BRCMF_MAX_CORENUM)
return false;
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
return (SSB_TMSLOW_CLOCK == regdata);
@@ -150,7 +176,7 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
static bool
brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid)
+ struct brcmf_chip *ci, u16 coreid)
{
u32 regdata;
u8 idx;
@@ -160,13 +186,13 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
if (idx == BRCMF_MAX_CORENUM)
return false;
- regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ NULL);
ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
- regdata = brcmf_sdio_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ NULL);
ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
return ret;
@@ -174,7 +200,8 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
static void
brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits)
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits)
{
u32 regdata, base;
u8 idx;
@@ -182,130 +209,126 @@ brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
base = ci->c_inf[idx].base;
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
if (regdata & SSB_TMSLOW_RESET)
return;
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
/*
* set target reject and spin until busy is clear
* (preserve core-specific bits)
*/
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
- NULL);
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata | SSB_TMSLOW_REJECT, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbtmstatelow), NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ regdata | SSB_TMSLOW_REJECT, NULL);
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbtmstatelow), NULL);
udelay(1);
- SPINWAIT((brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL) &
- SSB_TMSHIGH_BUSY), 100000);
-
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL);
+ SPINWAIT((brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbtmstatehigh),
+ NULL) &
+ SSB_TMSHIGH_BUSY), 100000);
+
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbtmstatehigh),
+ NULL);
if (regdata & SSB_TMSHIGH_BUSY)
brcmf_err("core state still busy\n");
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
+ NULL);
if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
regdata |= SSB_IMSTATE_REJECT;
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
+ regdata, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
udelay(1);
- SPINWAIT((brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL) &
- SSB_IMSTATE_BUSY), 100000);
+ SPINWAIT((brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL) &
+ SSB_IMSTATE_BUSY), 100000);
}
/* set reset and reject while enabling the clocks */
regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata, NULL);
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
- NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ regdata, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbtmstatelow), NULL);
udelay(10);
/* clear the initiator reject bit */
- regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
+ NULL);
if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
regdata &= ~SSB_IMSTATE_REJECT;
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
+ regdata, NULL);
}
}
/* leave reset and reject asserted */
- brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
udelay(1);
}
static void
brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits)
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits)
{
u8 idx;
u32 regdata;
+ u32 wrapbase;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return;
+ wrapbase = ci->c_inf[idx].wrapbase;
+
/* if core is already in reset, just return */
- regdata = brcmf_sdio_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
if ((regdata & BCMA_RESET_CTL_RESET) != 0)
return;
- /* ensure no pending backplane operation
- * 300uc should be sufficient for backplane ops to be finish
- * extra 10ms is taken into account for firmware load stage
- * after 10300us carry on disabling the core anyway
- */
- SPINWAIT(brcmf_sdio_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_ST,
- NULL), 10300);
- regdata = brcmf_sdio_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_ST,
- NULL);
- if (regdata)
- brcmf_err("disabling core 0x%x with reset status %x\n",
- coreid, regdata);
-
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- BCMA_RESET_CTL_RESET, NULL);
- udelay(1);
+ /* configure reset */
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+ BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- core_bits, NULL);
- regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
+ /* put in reset */
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
+ BCMA_RESET_CTL_RESET, NULL);
usleep_range(10, 20);
+ /* wait till reset is 1 */
+ SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
+ BCMA_RESET_CTL_RESET, 300);
+
+ /* post reset configure */
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+ BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
}
static void
brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits)
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits, u32 post_resetbits)
{
u32 regdata;
u8 idx;
@@ -318,93 +341,91 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
* Must do the disable sequence first to work for
* arbitrary current core state.
*/
- brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, 0);
+ brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
+ in_resetbits);
/*
* Now do the initialization sequence.
* set reset while enabling the clock and
* forcing them on throughout the core
*/
- brcmf_sdio_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
- NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
+ brcmf_sdiod_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
+ NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
/* clear any serror */
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+ NULL);
if (regdata & SSB_TMSHIGH_SERR)
- brcmf_sdio_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- 0, NULL);
+ brcmf_sdiod_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+ 0, NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate),
+ NULL);
if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
- brcmf_sdio_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
- NULL);
+ brcmf_sdiod_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate),
+ regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
+ NULL);
/* clear reset and allow it to propagate throughout the core */
- brcmf_sdio_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
/* leave clock enabled */
- brcmf_sdio_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_CLOCK, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
}
static void
brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits)
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits, u32 post_resetbits)
{
u8 idx;
u32 regdata;
+ u32 wrapbase;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return;
+ wrapbase = ci->c_inf[idx].wrapbase;
+
/* must disable first to work for arbitrary current core state */
- brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
-
- /* now do initialization sequence */
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- core_bits | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- 0, NULL);
- regdata = brcmf_sdio_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- NULL);
- udelay(1);
+ brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
+ in_resetbits);
- brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- core_bits | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
- udelay(1);
+ while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
+ BCMA_RESET_CTL_RESET) {
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
+ usleep_range(40, 60);
+ }
+
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
+ BCMA_IOCTL_CLK, NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
}
#ifdef DEBUG
/* safety check for chipinfo */
-static int brcmf_sdio_chip_cichk(struct chip_info *ci)
+static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
{
u8 core_idx;
@@ -431,172 +452,213 @@ static int brcmf_sdio_chip_cichk(struct chip_info *ci)
return 0;
}
#else /* DEBUG */
-static inline int brcmf_sdio_chip_cichk(struct chip_info *ci)
+static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
{
return 0;
}
#endif
static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u32 regs)
+ struct brcmf_chip *ci)
{
u32 regdata;
- int ret;
+ u32 socitype;
/* Get CC core rev
- * Chipid is assume to be at offset 0 from regs arg
+ * Chipid is assume to be at offset 0 from SI_ENUM_BASE
* For different chiptypes or old sdio hosts w/o chipcommon,
* other ways of recognition should be added here.
*/
- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
- ci->c_inf[0].base = regs;
- regdata = brcmf_sdio_regrl(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, chipid),
- NULL);
+ regdata = brcmf_sdiod_regrl(sdiodev,
+ CORE_CC_REG(SI_ENUM_BASE, chipid),
+ NULL);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
ci->chiprev >= 2)
ci->chip = BCM4339_CHIP_ID;
- ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+ socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
- brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
+ brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
+ socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
- /* Address of cores for new chips should be added here */
- switch (ci->chip) {
- case BCM43143_CHIP_ID:
- ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
- ci->c_inf[0].cib = 0x2b000000;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
- ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
- ci->c_inf[1].cib = 0x18000000;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
- ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
- ci->c_inf[2].cib = 0x14000000;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->c_inf[3].cib = 0x07000000;
- ci->ramsize = BCM43143_RAMSIZE;
- break;
- case BCM43241_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2a084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0e004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x14080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->ramsize = 0x90000;
- break;
- case BCM4329_CHIP_ID:
+ if (socitype == SOCI_SB) {
+ if (ci->chip != BCM4329_CHIP_ID) {
+ brcmf_err("SB chip is not supported\n");
+ return -ENODEV;
+ }
+ ci->iscoreup = brcmf_sdio_sb_iscoreup;
+ ci->corerev = brcmf_sdio_sb_corerev;
+ ci->coredisable = brcmf_sdio_sb_coredisable;
+ ci->resetcore = brcmf_sdio_sb_resetcore;
+
+ ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+ ci->c_inf[0].base = SI_ENUM_BASE;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
ci->ramsize = BCM4329_RAMSIZE;
- break;
- case BCM4330_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x27004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x07004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x0d080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x03004211;
- ci->ramsize = 0x48000;
- break;
- case BCM4334_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x29004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0d004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x13080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->ramsize = 0x80000;
- break;
- case BCM4335_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2b084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x0f004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x01084411;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- case BCM4339_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2e084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x15004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x04084411;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- default:
- brcmf_err("chipid 0x%x is not supported\n", ci->chip);
- return -ENODEV;
- }
-
- ret = brcmf_sdio_chip_cichk(ci);
- if (ret)
- return ret;
-
- switch (ci->socitype) {
- case SOCI_SB:
- ci->iscoreup = brcmf_sdio_sb_iscoreup;
- ci->corerev = brcmf_sdio_sb_corerev;
- ci->coredisable = brcmf_sdio_sb_coredisable;
- ci->resetcore = brcmf_sdio_sb_resetcore;
- break;
- case SOCI_AI:
+ } else if (socitype == SOCI_AI) {
ci->iscoreup = brcmf_sdio_ai_iscoreup;
ci->corerev = brcmf_sdio_ai_corerev;
ci->coredisable = brcmf_sdio_ai_coredisable;
ci->resetcore = brcmf_sdio_ai_resetcore;
- break;
- default:
- brcmf_err("socitype %u not supported\n", ci->socitype);
+
+ ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+ ci->c_inf[0].base = SI_ENUM_BASE;
+
+ /* Address of cores for new chips should be added here */
+ switch (ci->chip) {
+ case BCM43143_CHIP_ID:
+ ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
+ ci->c_inf[0].cib = 0x2b000000;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
+ ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
+ ci->c_inf[1].cib = 0x18000000;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
+ ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
+ ci->c_inf[2].cib = 0x14000000;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
+ ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+ ci->c_inf[3].cib = 0x07000000;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+ ci->ramsize = BCM43143_RAMSIZE;
+ break;
+ case BCM43241_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x2a084411;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x0e004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x14080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x07004211;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+ ci->ramsize = 0x90000;
+ break;
+ case BCM4330_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x27004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x07004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x0d080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x03004211;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+ ci->ramsize = 0x48000;
+ break;
+ case BCM4334_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x29004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x0d004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x13080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x07004211;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+ ci->ramsize = 0x80000;
+ break;
+ case BCM4335_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x2b084411;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18005000;
+ ci->c_inf[1].wrapbase = 0x18105000;
+ ci->c_inf[1].cib = 0x0f004211;
+ ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+ ci->c_inf[2].base = 0x18002000;
+ ci->c_inf[2].wrapbase = 0x18102000;
+ ci->c_inf[2].cib = 0x01084411;
+ ci->c_inf[3].id = BCMA_CORE_80211;
+ ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+ ci->ramsize = 0xc0000;
+ ci->rambase = 0x180000;
+ break;
+ case BCM43362_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x27004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x0a004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x08080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x03004211;
+ ci->c_inf[4].id = BCMA_CORE_80211;
+ ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+ ci->ramsize = 0x3C000;
+ break;
+ case BCM4339_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x2e084411;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18005000;
+ ci->c_inf[1].wrapbase = 0x18105000;
+ ci->c_inf[1].cib = 0x15004211;
+ ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+ ci->c_inf[2].base = 0x18002000;
+ ci->c_inf[2].wrapbase = 0x18102000;
+ ci->c_inf[2].cib = 0x04084411;
+ ci->c_inf[3].id = BCMA_CORE_80211;
+ ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
+ ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+ ci->ramsize = 0xc0000;
+ ci->rambase = 0x180000;
+ break;
+ default:
+ brcmf_err("AXI chip is not supported\n");
+ return -ENODEV;
+ }
+ } else {
+ brcmf_err("chip backplane type %u is not supported\n",
+ socitype);
return -ENODEV;
}
- return 0;
+ return brcmf_sdio_chip_cichk(ci);
}
static int
@@ -607,7 +669,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
if (err) {
brcmf_err("error writing for HT off\n");
return err;
@@ -615,8 +677,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
/* If register supported, wait for ALPAvail and then force ALP */
/* This may take up to 15 milliseconds */
- clkval = brcmf_sdio_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) != clkset) {
brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
@@ -624,8 +686,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
return -EACCES;
}
- SPINWAIT(((clkval = brcmf_sdio_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
!SBSDIO_ALPAV(clkval)),
PMU_MAX_TRANSITION_DLY);
if (!SBSDIO_ALPAV(clkval)) {
@@ -635,18 +697,18 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
}
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
- brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
udelay(65);
/* Also, disable the extra SDIO pull-ups */
- brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
return 0;
}
static void
brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci)
+ struct brcmf_chip *ci)
{
u32 base = ci->c_inf[0].base;
@@ -654,16 +716,16 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
/* get chipcommon capabilites */
- ci->c_inf[0].caps = brcmf_sdio_regrl(sdiodev,
- CORE_CC_REG(base, capabilities),
- NULL);
+ ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
+ CORE_CC_REG(base, capabilities),
+ NULL);
/* get pmu caps & rev */
if (ci->c_inf[0].caps & CC_CAP_PMU) {
ci->pmucaps =
- brcmf_sdio_regrl(sdiodev,
- CORE_CC_REG(base, pmucapabilities),
- NULL);
+ brcmf_sdiod_regrl(sdiodev,
+ CORE_CC_REG(base, pmucapabilities),
+ NULL);
ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
}
@@ -677,19 +739,18 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
* Make sure any on-chip ARM is off (in case strapping is wrong),
* or downloaded code was already running.
*/
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
+ ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
}
int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct chip_info **ci_ptr, u32 regs)
+ struct brcmf_chip **ci_ptr)
{
int ret;
- struct chip_info *ci;
+ struct brcmf_chip *ci;
brcmf_dbg(TRACE, "Enter\n");
- /* alloc chip_info_t */
- ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
+ ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
if (!ci)
return -ENOMEM;
@@ -697,16 +758,16 @@ int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
if (ret != 0)
goto err;
- ret = brcmf_sdio_chip_recognition(sdiodev, ci, regs);
+ ret = brcmf_sdio_chip_recognition(sdiodev, ci);
if (ret != 0)
goto err;
brcmf_sdio_chip_buscoresetup(sdiodev, ci);
- brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
- 0, NULL);
- brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
- 0, NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
+ 0, NULL);
+ brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
+ 0, NULL);
*ci_ptr = ci;
return 0;
@@ -717,7 +778,7 @@ err:
}
void
-brcmf_sdio_chip_detach(struct chip_info **ci_ptr)
+brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -736,7 +797,7 @@ static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
void
brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u32 drivestrength)
+ struct brcmf_chip *ci, u32 drivestrength)
{
const struct sdiod_drive_str *str_tab = NULL;
u32 str_mask;
@@ -757,6 +818,11 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
str_mask = 0x00003800;
str_shift = 11;
break;
+ case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+ str_tab = sdiod_drvstr_tab6_1v8;
+ str_mask = 0x00001800;
+ str_shift = 11;
+ break;
case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
/* note: 43143 does not support tristate */
i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
@@ -769,6 +835,11 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
brcmf_sdio_chip_name(ci->chip, chn, 8),
drivestrength);
break;
+ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+ str_tab = sdiod_drive_strength_tab5_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
default:
brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
brcmf_sdio_chip_name(ci->chip, chn, 8),
@@ -784,119 +855,31 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
}
}
addr = CORE_CC_REG(base, chipcontrol_addr);
- brcmf_sdio_regwl(sdiodev, addr, 1, NULL);
- cc_data_temp = brcmf_sdio_regrl(sdiodev, addr, NULL);
+ brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+ cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
cc_data_temp &= ~str_mask;
drivestrength_sel <<= str_shift;
cc_data_temp |= drivestrength_sel;
- brcmf_sdio_regwl(sdiodev, addr, cc_data_temp, NULL);
+ brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
str_tab[i].strength, drivestrength, cc_data_temp);
}
}
-#ifdef DEBUG
-static bool
-brcmf_sdio_chip_verifynvram(struct brcmf_sdio_dev *sdiodev, u32 nvram_addr,
- char *nvram_dat, uint nvram_sz)
-{
- char *nvram_ularray;
- int err;
- bool ret = true;
-
- /* read back and verify */
- brcmf_dbg(INFO, "Compare NVRAM dl & ul; size=%d\n", nvram_sz);
- nvram_ularray = kmalloc(nvram_sz, GFP_KERNEL);
- /* do not proceed while no memory but */
- if (!nvram_ularray)
- return true;
-
- /* Upload image to verify downloaded contents. */
- memset(nvram_ularray, 0xaa, nvram_sz);
-
- /* Read the vars list to temp buffer for comparison */
- err = brcmf_sdio_ramrw(sdiodev, false, nvram_addr, nvram_ularray,
- nvram_sz);
- if (err) {
- brcmf_err("error %d on reading %d nvram bytes at 0x%08x\n",
- err, nvram_sz, nvram_addr);
- } else if (memcmp(nvram_dat, nvram_ularray, nvram_sz)) {
- brcmf_err("Downloaded NVRAM image is corrupted\n");
- ret = false;
- }
- kfree(nvram_ularray);
-
- return ret;
-}
-#else /* DEBUG */
-static inline bool
-brcmf_sdio_chip_verifynvram(struct brcmf_sdio_dev *sdiodev, u32 nvram_addr,
- char *nvram_dat, uint nvram_sz)
-{
- return true;
-}
-#endif /* DEBUG */
-
-static bool brcmf_sdio_chip_writenvram(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci,
- char *nvram_dat, uint nvram_sz)
-{
- int err;
- u32 nvram_addr;
- u32 token;
- __le32 token_le;
-
- nvram_addr = (ci->ramsize - 4) - nvram_sz + ci->rambase;
-
- /* Write the vars list */
- err = brcmf_sdio_ramrw(sdiodev, true, nvram_addr, nvram_dat, nvram_sz);
- if (err) {
- brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
- err, nvram_sz, nvram_addr);
- return false;
- }
-
- if (!brcmf_sdio_chip_verifynvram(sdiodev, nvram_addr,
- nvram_dat, nvram_sz))
- return false;
-
- /* generate token:
- * nvram size, converted to words, in lower 16-bits, checksum
- * in upper 16-bits.
- */
- token = nvram_sz / 4;
- token = (~token << 16) | (token & 0x0000FFFF);
- token_le = cpu_to_le32(token);
-
- brcmf_dbg(INFO, "RAM size: %d\n", ci->ramsize);
- brcmf_dbg(INFO, "nvram is placed at %d, size %d, token=0x%08x\n",
- nvram_addr, nvram_sz, token);
-
- /* Write the length token to the last word */
- if (brcmf_sdio_ramrw(sdiodev, true, (ci->ramsize - 4 + ci->rambase),
- (u8 *)&token_le, 4))
- return false;
-
- return true;
-}
-
static void
brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci)
+ struct brcmf_chip *ci)
{
- u32 zeros = 0;
-
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
- ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0);
-
- /* clear length token */
- brcmf_sdio_ramrw(sdiodev, true, ci->ramsize - 4, (u8 *)&zeros, 4);
+ ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
+ ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
+ D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
+ ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
}
-static bool
-brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
- char *nvram_dat, uint nvram_sz)
+static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
+ struct brcmf_chip *ci)
{
u8 core_idx;
u32 reg_addr;
@@ -906,56 +889,64 @@ brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
return false;
}
- if (!brcmf_sdio_chip_writenvram(sdiodev, ci, nvram_dat, nvram_sz))
- return false;
-
/* clear all interrupts */
core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
reg_addr = ci->c_inf[core_idx].base;
reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdio_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+ brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
+ ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
return true;
}
static inline void
brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci)
+ struct brcmf_chip *ci)
{
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4,
- ARMCR4_BCMA_IOCTL_CPUHALT);
+ u8 idx;
+ u32 regdata;
+ u32 wrapbase;
+ idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
+
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
+
+ wrapbase = ci->c_inf[idx].wrapbase;
+ regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
+ regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
+ ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
+ ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
+ ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
+ D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
}
-static bool
-brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
- char *nvram_dat, uint nvram_sz)
+static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
+ struct brcmf_chip *ci, u32 rstvec)
{
u8 core_idx;
u32 reg_addr;
- if (!brcmf_sdio_chip_writenvram(sdiodev, ci, nvram_dat, nvram_sz))
- return false;
-
/* clear all interrupts */
core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
reg_addr = ci->c_inf[core_idx].base;
reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdio_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+ brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
/* Write reset vector to address 0 */
- brcmf_sdio_ramrw(sdiodev, true, 0, (void *)&ci->rst_vec,
- sizeof(ci->rst_vec));
+ brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
+ sizeof(rstvec));
/* restore ARM */
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, 0);
+ ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
+ 0, 0);
return true;
}
void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci)
+ struct brcmf_chip *ci)
{
u8 arm_core_idx;
@@ -969,15 +960,13 @@ void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
}
bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, char *nvram_dat,
- uint nvram_sz)
+ struct brcmf_chip *ci, u32 rstvec)
{
u8 arm_core_idx;
arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
if (BRCMF_MAX_CORENUM != arm_core_idx)
- return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci, nvram_dat,
- nvram_sz);
+ return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
- return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, nvram_dat, nvram_sz);
+ return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
index 507c61c991fa..fb0614329ede 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -54,15 +54,7 @@
#define BRCMF_MAX_CORENUM 6
-/* SDIO device ID */
-#define SDIO_DEVICE_ID_BROADCOM_43143 43143
-#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
-#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
-#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
-#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
-#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
-
-struct chip_core_info {
+struct brcmf_core {
u16 id;
u16 rev;
u32 base;
@@ -71,27 +63,28 @@ struct chip_core_info {
u32 cib;
};
-struct chip_info {
+struct brcmf_chip {
u32 chip;
u32 chiprev;
- u32 socitype;
/* core info */
/* always put chipcommon core at 0, bus core at 1 */
- struct chip_core_info c_inf[BRCMF_MAX_CORENUM];
+ struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
u32 pmurev;
u32 pmucaps;
u32 ramsize;
u32 rambase;
u32 rst_vec; /* reset vertor for ARM CR4 core */
- bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+ bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
u16 coreid);
- u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+ u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
u16 coreid);
void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits);
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits);
void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u16 coreid, u32 core_bits);
+ struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+ u32 in_resetbits, u32 post_resetbits);
};
struct sbconfig {
@@ -224,15 +217,15 @@ struct sdpcmd_regs {
};
int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct chip_info **ci_ptr, u32 regs);
-void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+ struct brcmf_chip **ci_ptr);
+void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u32 drivestrength);
-u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+ struct brcmf_chip *ci,
+ u32 drivestrength);
+u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci);
+ struct brcmf_chip *ci);
bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, char *nvram_dat,
- uint nvram_sz);
+ struct brcmf_chip *ci, u32 rstvec);
#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index fc0d4f0129db..092e9c824992 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -164,11 +164,9 @@ struct brcmf_sdio;
struct brcmf_sdio_dev {
struct sdio_func *func[SDIO_MAX_FUNCS];
u8 num_funcs; /* Supported funcs on client */
- u32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
u32 sbwad; /* Save backplane window address */
- void *bus;
+ struct brcmf_sdio *bus;
atomic_t suspend; /* suspend flag */
- wait_queue_head_t request_byte_wait;
wait_queue_head_t request_word_wait;
wait_queue_head_t request_buffer_wait;
struct device *dev;
@@ -185,22 +183,19 @@ struct brcmf_sdio_dev {
};
/* Register/deregister interrupt handler. */
-int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
-int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
/* sdio device register access interface */
-u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
- int *ret);
-void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
- int *ret);
-int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
- void *data, bool write);
+u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+ int *ret);
+void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+ int *ret);
/* Buffer transfer to/from device (client) core via cmd53.
* fn: function number
- * addr: backplane address (i.e. >= regsva from attach)
* flags: backplane width, address increment, sync/async
* buf: pointer to memory data buffer
* nbytes: number of bytes to transfer to/from buf
@@ -210,17 +205,14 @@ int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
* Returns 0 or error code.
* NOTE: Async operation is not currently supported.
*/
-int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq);
-int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes);
-
-int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt);
-int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes);
-int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq, uint totlen);
+int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq);
+int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes);
+
+int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt);
+int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes);
+int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq, uint totlen);
/* Flags bits */
@@ -236,43 +228,16 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
* nbytes: number of bytes to transfer to/from buf
* Returns 0 or error code.
*/
-int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
- u8 *buf, uint nbytes);
-int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
- u8 *data, uint size);
+int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+ u8 *data, uint size);
/* Issue an abort to the specified function */
-int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
-
-/* platform specific/high level functions */
-int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
-int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
-
-/* attach, return handler on success, NULL if failed.
- * The handler shall be provided by all subsequent calls. No local cache
- * cfghdl points to the starting address of pci device mapped memory
- */
-int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
-void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
-
-/* read or write one byte using cmd52 */
-int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
- uint addr, u8 *byte);
-
-/* read or write 2/4 bytes using cmd53 */
-int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
- uint addr, u32 *word, uint nbyte);
-
-/* Watchdog timer interface for pm ops */
-void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
-void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
-void brcmf_sdbrcm_disconnect(void *ptr);
-void brcmf_sdbrcm_isr(void *arg);
+struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdio_remove(struct brcmf_sdio *bus);
+void brcmf_sdio_isr(struct brcmf_sdio *bus);
-void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick);
-void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
- wait_queue_head_t *wq);
-bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
index 3c67529b9074..4d7d51f95716 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -89,7 +89,7 @@ TRACE_EVENT(brcmf_hexdump,
TP_printk("hexdump [addr=%lx, length=%lu]", __entry->addr, __entry->len)
);
-TRACE_EVENT(brcmf_bdchdr,
+TRACE_EVENT(brcmf_bcdchdr,
TP_PROTO(void *data),
TP_ARGS(data),
TP_STRUCT__entry(
@@ -107,24 +107,35 @@ TRACE_EVENT(brcmf_bdchdr,
memcpy(__get_dynamic_array(signal),
(u8 *)data + 4, __entry->siglen);
),
- TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
+ TP_printk("bcdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
);
+#ifndef SDPCM_RX
+#define SDPCM_RX 0
+#endif
+#ifndef SDPCM_TX
+#define SDPCM_TX 1
+#endif
+#ifndef SDPCM_GLOM
+#define SDPCM_GLOM 2
+#endif
+
TRACE_EVENT(brcmf_sdpcm_hdr,
- TP_PROTO(bool tx, void *data),
- TP_ARGS(tx, data),
+ TP_PROTO(u8 dir, void *data),
+ TP_ARGS(dir, data),
TP_STRUCT__entry(
- __field(u8, tx)
+ __field(u8, dir)
__field(u16, len)
- __array(u8, hdr, 12)
+ __dynamic_array(u8, hdr, dir == SDPCM_GLOM ? 20 : 12)
),
TP_fast_assign(
- memcpy(__entry->hdr, data, 12);
- __entry->len = __entry->hdr[0] | (__entry->hdr[1] << 8);
- __entry->tx = tx ? 1 : 0;
+ memcpy(__get_dynamic_array(hdr), data, dir == SDPCM_GLOM ? 20 : 12);
+ __entry->len = *(u8 *)data | (*((u8 *)data + 1) << 8);
+ __entry->dir = dir;
),
- TP_printk("sdpcm: %s len %u, seq %d", __entry->tx ? "TX" : "RX",
- __entry->len, __entry->hdr[4])
+ TP_printk("sdpcm: %s len %u, seq %d",
+ __entry->dir == SDPCM_RX ? "RX" : "TX",
+ __entry->len, ((u8 *)__get_dynamic_array(hdr))[4])
);
#ifdef CONFIG_BRCM_TRACING
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 422f44c63175..24f65cd53859 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -522,10 +522,10 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
/* update state of upper layer */
if (state == BRCMFMAC_USB_STATE_DOWN) {
brcmf_dbg(USB, "DBUS is down\n");
- bcmf_bus->state = BRCMF_BUS_DOWN;
+ brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DOWN);
} else if (state == BRCMFMAC_USB_STATE_UP) {
brcmf_dbg(USB, "DBUS is up\n");
- bcmf_bus->state = BRCMF_BUS_DATA;
+ brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DATA);
} else {
brcmf_dbg(USB, "DBUS current state=%d\n", state);
}
@@ -1253,9 +1253,10 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->ops = &brcmf_usb_bus_ops;
bus->chip = bus_pub->devid;
bus->chiprev = bus_pub->chiprev;
+ bus->proto_type = BRCMF_PROTO_BCDC;
/* Attach to the common driver interface */
- ret = brcmf_attach(0, dev);
+ ret = brcmf_attach(dev);
if (ret) {
brcmf_err("brcmf_attach failed\n");
goto fail;
@@ -1454,7 +1455,7 @@ static int brcmf_usb_resume(struct usb_interface *intf)
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
brcmf_dbg(USB, "Enter\n");
- if (!brcmf_attach(0, devinfo->dev))
+ if (!brcmf_attach(devinfo->dev))
return brcmf_bus_start(&usb->dev);
return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 571f013cebbb..d7718a5fa2f0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -202,9 +202,9 @@ static struct ieee80211_supported_band __wl_band_5ghz_a = {
/* This is to override regulatory domains defined in cfg80211 module (reg.c)
* By default world regulatory domain defined in reg.c puts the flags
- * NL80211_RRF_PASSIVE_SCAN and NL80211_RRF_NO_IBSS for 5GHz channels (for
- * 36..48 and 149..165). With respect to these flags, wpa_supplicant doesn't
- * start p2p operations on 5GHz channels. All the changes in world regulatory
+ * NL80211_RRF_NO_IR for 5GHz channels (for * 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't * start p2p
+ * operations on 5GHz channels. All the changes in world regulatory
* domain are to be done here.
*/
static const struct ieee80211_regdomain brcmf_regdom = {
@@ -1095,10 +1095,10 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
BRCMF_C_DISASSOC, NULL, 0);
if (err) {
brcmf_err("WLC_DISASSOC failed (%d)\n", err);
- cfg80211_disconnected(vif->wdev.netdev, 0,
- NULL, 0, GFP_KERNEL);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
+ cfg80211_disconnected(vif->wdev.netdev, 0, NULL, 0, GFP_KERNEL);
+
}
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
@@ -1758,6 +1758,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
return -EIO;
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
+ cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL);
memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
scbval.val = cpu_to_le32(reason_code);
@@ -2556,8 +2557,8 @@ brcmf_compare_update_same_bss(struct brcmf_cfg80211_info *cfg,
ch_bss.band == ch_bss_info_le.band &&
bss_info_le->SSID_len == bss->SSID_len &&
!memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) {
- if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
- (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
+ if ((bss->flags & BRCMF_BSS_RSSI_ON_CHANNEL) ==
+ (bss_info_le->flags & BRCMF_BSS_RSSI_ON_CHANNEL)) {
s16 bss_rssi = le16_to_cpu(bss->RSSI);
s16 bss_info_rssi = le16_to_cpu(bss_info_le->RSSI);
@@ -2566,13 +2567,13 @@ brcmf_compare_update_same_bss(struct brcmf_cfg80211_info *cfg,
*/
if (bss_info_rssi > bss_rssi)
bss->RSSI = bss_info_le->RSSI;
- } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
- (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
+ } else if ((bss->flags & BRCMF_BSS_RSSI_ON_CHANNEL) &&
+ (bss_info_le->flags & BRCMF_BSS_RSSI_ON_CHANNEL) == 0) {
/* preserve the on-channel rssi measurement
* if the new measurement is off channel
*/
bss->RSSI = bss_info_le->RSSI;
- bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
+ bss->flags |= BRCMF_BSS_RSSI_ON_CHANNEL;
}
return 1;
}
@@ -2988,6 +2989,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
}
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+ cfg->escan_info.run = brcmf_run_escan;
err = brcmf_do_escan(cfg, wiphy, ifp, request);
if (err) {
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
@@ -3973,11 +3975,12 @@ brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
static int
brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
- struct ieee80211_channel *chan, bool offchan,
- unsigned int wait, const u8 *buf, size_t len,
- bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct ieee80211_channel *chan = params->chan;
+ const u8 *buf = params->buf;
+ size_t len = params->len;
const struct ieee80211_mgmt *mgmt;
struct brcmf_cfg80211_vif *vif;
s32 err = 0;
@@ -4341,7 +4344,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
wiphy->max_remain_on_channel_duration = 5000;
brcmf_wiphy_pno_params(wiphy);
brcmf_dbg(INFO, "Registering custom regulatory\n");
- wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
err = wiphy_register(wiphy);
if (err < 0) {
@@ -4358,9 +4361,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
{
struct brcmf_cfg80211_vif *vif;
- if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT)
- return ERR_PTR(-ENOSPC);
-
brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
sizeof(*vif));
vif = kzalloc(sizeof(*vif), GFP_KERNEL);
@@ -4377,21 +4377,25 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
brcmf_init_prof(&vif->profile);
list_add_tail(&vif->list, &cfg->vif_list);
- cfg->vif_cnt++;
return vif;
}
-void brcmf_free_vif(struct brcmf_cfg80211_info *cfg,
- struct brcmf_cfg80211_vif *vif)
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
{
list_del(&vif->list);
- cfg->vif_cnt--;
-
kfree(vif);
- if (!cfg->vif_cnt) {
- wiphy_unregister(cfg->wiphy);
- wiphy_free(cfg->wiphy);
- }
+}
+
+void brcmf_cfg80211_free_netdev(struct net_device *ndev)
+{
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_if *ifp;
+
+ ifp = netdev_priv(ndev);
+ vif = ifp->vif;
+
+ brcmf_free_vif(vif);
+ free_netdev(ndev);
}
static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
@@ -4978,20 +4982,20 @@ cfg80211_p2p_attach_out:
wl_deinit_priv(cfg);
cfg80211_attach_out:
- brcmf_free_vif(cfg, vif);
+ brcmf_free_vif(vif);
return NULL;
}
void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_vif *vif;
- struct brcmf_cfg80211_vif *tmp;
+ if (!cfg)
+ return;
- wl_deinit_priv(cfg);
+ WARN_ON(!list_empty(&cfg->vif_list));
+ wiphy_unregister(cfg->wiphy);
brcmf_btcoex_detach(cfg);
- list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) {
- brcmf_free_vif(cfg, vif);
- }
+ wl_deinit_priv(cfg);
+ wiphy_free(cfg->wiphy);
}
static s32
@@ -5086,7 +5090,8 @@ dongle_scantime_out:
}
-static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
+static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
+ u32 bw_cap[])
{
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct ieee80211_channel *band_chan_arr;
@@ -5099,7 +5104,6 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
enum ieee80211_band band;
u32 channel;
u32 *n_cnt;
- bool ht40_allowed;
u32 index;
u32 ht40_flag;
bool update;
@@ -5132,18 +5136,17 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
array_size = ARRAY_SIZE(__wl_2ghz_channels);
n_cnt = &__wl_band_2ghz.n_channels;
band = IEEE80211_BAND_2GHZ;
- ht40_allowed = (bw_cap == WLC_N_BW_40ALL);
} else if (ch.band == BRCMU_CHAN_BAND_5G) {
band_chan_arr = __wl_5ghz_a_channels;
array_size = ARRAY_SIZE(__wl_5ghz_a_channels);
n_cnt = &__wl_band_5ghz_a.n_channels;
band = IEEE80211_BAND_5GHZ;
- ht40_allowed = !(bw_cap == WLC_N_BW_20ALL);
} else {
- brcmf_err("Invalid channel Sepc. 0x%x.\n", ch.chspec);
+ brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
continue;
}
- if (!ht40_allowed && ch.bw == BRCMU_CHAN_BW_40)
+ if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) &&
+ ch.bw == BRCMU_CHAN_BW_40)
continue;
update = false;
for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
@@ -5161,7 +5164,10 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
ieee80211_channel_to_frequency(ch.chnum, band);
band_chan_arr[index].hw_value = ch.chnum;
- if (ch.bw == BRCMU_CHAN_BW_40 && ht40_allowed) {
+ brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
+ ch.chnum, band_chan_arr[index].center_freq,
+ ch.bw, ch.sb);
+ if (ch.bw == BRCMU_CHAN_BW_40) {
/* assuming the order is HT20, HT40 Upper,
* HT40 lower from chanspecs
*/
@@ -5197,10 +5203,10 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
if (channel & WL_CHAN_RADAR)
band_chan_arr[index].flags |=
(IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_NO_IBSS);
+ IEEE80211_CHAN_NO_IR);
if (channel & WL_CHAN_PASSIVE)
band_chan_arr[index].flags |=
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
}
}
if (!update)
@@ -5212,6 +5218,46 @@ exit:
return err;
}
+static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
+{
+ u32 band, mimo_bwcap;
+ int err;
+
+ band = WLC_BAND_2G;
+ err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+ if (!err) {
+ bw_cap[IEEE80211_BAND_2GHZ] = band;
+ band = WLC_BAND_5G;
+ err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+ if (!err) {
+ bw_cap[IEEE80211_BAND_5GHZ] = band;
+ return;
+ }
+ WARN_ON(1);
+ return;
+ }
+ brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n");
+ mimo_bwcap = 0;
+ err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap);
+ if (err)
+ /* assume 20MHz if firmware does not give a clue */
+ mimo_bwcap = WLC_N_BW_20ALL;
+
+ switch (mimo_bwcap) {
+ case WLC_N_BW_40ALL:
+ bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
+ /* fall-thru */
+ case WLC_N_BW_20IN2G_40IN5G:
+ bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
+ /* fall-thru */
+ case WLC_N_BW_20ALL:
+ bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
+ bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
+ break;
+ default:
+ brcmf_err("invalid mimo_bw_cap value\n");
+ }
+}
static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
{
@@ -5220,13 +5266,13 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
s32 phy_list;
u32 band_list[3];
u32 nmode;
- u32 bw_cap = 0;
+ u32 bw_cap[2] = { 0, 0 };
s8 phy;
s32 err;
u32 nband;
s32 i;
- struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
- s32 index;
+ struct ieee80211_supported_band *bands[2] = { NULL, NULL };
+ struct ieee80211_supported_band *band;
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_PHYLIST,
&phy_list, sizeof(phy_list));
@@ -5252,11 +5298,10 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
if (err) {
brcmf_err("nmode error (%d)\n", err);
} else {
- err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &bw_cap);
- if (err)
- brcmf_err("mimo_bw_cap error (%d)\n", err);
+ brcmf_get_bwcap(ifp, bw_cap);
}
- brcmf_dbg(INFO, "nmode=%d, mimo_bw_cap=%d\n", nmode, bw_cap);
+ brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
+ bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
err = brcmf_construct_reginfo(cfg, bw_cap);
if (err) {
@@ -5265,40 +5310,33 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
}
nband = band_list[0];
- memset(bands, 0, sizeof(bands));
for (i = 1; i <= nband && i < ARRAY_SIZE(band_list); i++) {
- index = -1;
+ band = NULL;
if ((band_list[i] == WLC_BAND_5G) &&
- (__wl_band_5ghz_a.n_channels > 0)) {
- index = IEEE80211_BAND_5GHZ;
- bands[index] = &__wl_band_5ghz_a;
- if ((bw_cap == WLC_N_BW_40ALL) ||
- (bw_cap == WLC_N_BW_20IN2G_40IN5G))
- bands[index]->ht_cap.cap |=
- IEEE80211_HT_CAP_SGI_40;
- } else if ((band_list[i] == WLC_BAND_2G) &&
- (__wl_band_2ghz.n_channels > 0)) {
- index = IEEE80211_BAND_2GHZ;
- bands[index] = &__wl_band_2ghz;
- if (bw_cap == WLC_N_BW_40ALL)
- bands[index]->ht_cap.cap |=
- IEEE80211_HT_CAP_SGI_40;
- }
+ (__wl_band_5ghz_a.n_channels > 0))
+ band = &__wl_band_5ghz_a;
+ else if ((band_list[i] == WLC_BAND_2G) &&
+ (__wl_band_2ghz.n_channels > 0))
+ band = &__wl_band_2ghz;
+ else
+ continue;
- if ((index >= 0) && nmode) {
- bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
- bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
- bands[index]->ht_cap.ht_supported = true;
- bands[index]->ht_cap.ampdu_factor =
- IEEE80211_HT_MAX_AMPDU_64K;
- bands[index]->ht_cap.ampdu_density =
- IEEE80211_HT_MPDU_DENSITY_16;
- /* An HT shall support all EQM rates for one spatial
- * stream
- */
- bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
+ if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+ band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ band->ht_cap.ht_supported = true;
+ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ /* An HT shall support all EQM rates for one spatial
+ * stream
+ */
+ band->ht_cap.mcs.rx_mask[0] = 0xff;
+ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ bands[band->band] = band;
}
wiphy = cfg_to_wiphy(cfg);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index d9bdaf9a72d0..2dc6a074e8ed 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -412,7 +412,6 @@ struct brcmf_cfg80211_info {
struct work_struct escan_timeout_work;
u8 *escan_ioctl_buf;
struct list_head vif_list;
- u8 vif_cnt;
struct brcmf_cfg80211_vif_event vif_event;
struct completion vif_disabled;
struct brcmu_d11inf d11inf;
@@ -487,8 +486,7 @@ enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
enum nl80211_iftype type,
bool pm_block);
-void brcmf_free_vif(struct brcmf_cfg80211_info *cfg,
- struct brcmf_cfg80211_vif *vif);
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len);
@@ -507,5 +505,6 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
bool fw_abort);
void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
+void brcmf_cfg80211_free_netdev(struct net_device *ndev);
#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index cc87926f5055..635ae034c7e5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -59,23 +59,18 @@
#define BRCM_2GHZ_2412_2462 REG_RULE(2412-10, 2462+10, 40, 0, 19, 0)
#define BRCM_2GHZ_2467_2472 REG_RULE(2467-10, 2472+10, 20, 0, 19, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define BRCM_5GHZ_5180_5240 REG_RULE(5180-10, 5240+10, 40, 0, 21, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define BRCM_5GHZ_5260_5320 REG_RULE(5260-10, 5320+10, 40, 0, 21, \
- NL80211_RRF_PASSIVE_SCAN | \
NL80211_RRF_DFS | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define BRCM_5GHZ_5500_5700 REG_RULE(5500-10, 5700+10, 40, 0, 21, \
- NL80211_RRF_PASSIVE_SCAN | \
NL80211_RRF_DFS | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define BRCM_5GHZ_5745_5825 REG_RULE(5745-10, 5825+10, 40, 0, 21, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
static const struct ieee80211_regdomain brcms_regdom_x2 = {
.n_reg_rules = 6,
@@ -395,7 +390,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
brcms_b_set_chanspec(wlc->hw, chanspec,
- !!(ch->flags & IEEE80211_CHAN_PASSIVE_SCAN),
+ !!(ch->flags & IEEE80211_CHAN_NO_IR),
&txpwr);
}
@@ -657,8 +652,8 @@ static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
*/
if (!(ch->flags & IEEE80211_CHAN_DISABLED))
ch->flags |= IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR |
+ IEEE80211_CHAN_NO_IR;
}
}
@@ -684,18 +679,15 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
continue;
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- rule = freq_reg_info(wiphy, ch->center_freq);
+ rule = freq_reg_info(wiphy,
+ MHZ_TO_KHZ(ch->center_freq));
if (IS_ERR(rule))
continue;
- if (!(rule->flags & NL80211_RRF_NO_IBSS))
- ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
- if (!(rule->flags & NL80211_RRF_PASSIVE_SCAN))
- ch->flags &=
- ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (!(rule->flags & NL80211_RRF_NO_IR))
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
} else if (ch->beacon_found) {
- ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN);
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
}
}
}
@@ -775,8 +767,8 @@ void brcms_c_regd_init(struct brcms_c_info *wlc)
}
wlc->wiphy->reg_notifier = brcms_reg_notifier;
- wlc->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_STRICT_REGULATORY;
+ wlc->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_STRICT_REG;
wiphy_apply_custom_regulatory(wlc->wiphy, regd->regdomain);
brcms_reg_apply_beaconing_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index edc5d105ff98..925034b80e9c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -125,13 +125,13 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = {
CHAN2GHZ(10, 2457, IEEE80211_CHAN_NO_HT40PLUS),
CHAN2GHZ(11, 2462, IEEE80211_CHAN_NO_HT40PLUS),
CHAN2GHZ(12, 2467,
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_NO_HT40PLUS),
CHAN2GHZ(13, 2472,
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_NO_HT40PLUS),
CHAN2GHZ(14, 2484,
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS |
IEEE80211_CHAN_NO_OFDM)
};
@@ -144,51 +144,51 @@ static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
CHAN5GHZ(48, IEEE80211_CHAN_NO_HT40PLUS),
/* UNII-2 */
CHAN5GHZ(52,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(56,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(60,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(64,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
/* MID */
CHAN5GHZ(100,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(104,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(108,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(112,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(116,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(120,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(124,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(128,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(132,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(136,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
CHAN5GHZ(140,
- IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS |
+ IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS |
IEEE80211_CHAN_NO_HT40MINUS),
/* UNII-3 */
CHAN5GHZ(149, IEEE80211_CHAN_NO_HT40MINUS),
@@ -1071,7 +1071,6 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
hw->max_rates = 2; /* Primary rate and 1 fallback rate */
/* channel change time is dependent on chip and band */
- hw->channel_change_time = 7 * 1000;
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 8138f1cff4e5..9417cb5a2553 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7108,7 +7108,6 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
struct sk_buff *p,
struct ieee80211_rx_status *rx_status)
{
- int preamble;
int channel;
u32 rspec;
unsigned char *plcp;
@@ -7191,7 +7190,6 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET;
/* Determine short preamble and rate_idx */
- preamble = 0;
if (is_cck_rate(rspec)) {
if (rxh->PhyRxStatus_0 & PRXS0_SHORTH)
rx_status->flag |= RX_FLAG_SHORTPRE;
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 84113ea16f84..6fa5d4863782 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -41,6 +41,7 @@
#define BCM4331_CHIP_ID 0x4331
#define BCM4334_CHIP_ID 0x4334
#define BCM4335_CHIP_ID 0x4335
+#define BCM43362_CHIP_ID 43362
#define BCM4339_CHIP_ID 0x4339
#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index 0505cc065e0d..7ca2aa1035b2 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -82,6 +82,20 @@
#define WLC_N_BW_40ALL 1
#define WLC_N_BW_20IN2G_40IN5G 2
+#define WLC_BW_20MHZ_BIT BIT(0)
+#define WLC_BW_40MHZ_BIT BIT(1)
+#define WLC_BW_80MHZ_BIT BIT(2)
+#define WLC_BW_160MHZ_BIT BIT(3)
+
+/* Bandwidth capabilities */
+#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT| \
+ WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_160MHZ (WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
+ WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_UNRESTRICTED 0xFF
+
/* band types */
#define WLC_BAND_AUTO 0 /* auto-select */
#define WLC_BAND_5G 1 /* 5 Ghz */
diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c
index ebdcdf44f155..d3acc85932a5 100644
--- a/drivers/net/wireless/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/cw1200/cw1200_sdio.c
@@ -108,9 +108,9 @@ static irqreturn_t cw1200_gpio_irq(int irq, void *dev_id)
struct hwbus_priv *self = dev_id;
if (self->core) {
- sdio_claim_host(self->func);
+ cw1200_sdio_lock(self);
cw1200_irq_handler(self->core);
- sdio_release_host(self->func);
+ cw1200_sdio_unlock(self);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index acdff0f7f952..5a9ffd3a6a6c 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -14,7 +14,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
index 090f01577dd2..3e78cc3ccb78 100644
--- a/drivers/net/wireless/cw1200/main.c
+++ b/drivers/net/wireless/cw1200/main.c
@@ -21,7 +21,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
@@ -302,7 +301,6 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
- hw->channel_change_time = 1000; /* TODO: find actual value */
hw->queues = 4;
priv->rts_threshold = -1;
diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/cw1200/pm.c
index b37abb9f0453..6907c8fd4578 100644
--- a/drivers/net/wireless/cw1200/pm.c
+++ b/drivers/net/wireless/cw1200/pm.c
@@ -225,7 +225,7 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
cw1200_set_pm(priv, &priv->powersave_mode);
if (wait_event_interruptible_timeout(priv->ps_mode_switch_done,
!priv->ps_mode_switch_in_progress, 1*HZ) <= 0) {
- goto revert3;
+ goto revert4;
}
}
@@ -254,11 +254,11 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
/* Stop serving thread */
if (cw1200_bh_suspend(priv))
- goto revert4;
+ goto revert5;
ret = timer_pending(&priv->mcast_timeout);
if (ret)
- goto revert5;
+ goto revert6;
/* Store suspend state */
pm_state->suspend_state = state;
@@ -280,9 +280,9 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return 0;
-revert5:
+revert6:
WARN_ON(cw1200_bh_resume(priv));
-revert4:
+revert5:
cw1200_resume_work(priv, &priv->bss_loss_work,
state->bss_loss_tmo);
cw1200_resume_work(priv, &priv->join_timeout,
@@ -291,6 +291,7 @@ revert4:
state->direct_probe);
cw1200_resume_work(priv, &priv->link_id_gc_work,
state->link_id_gc);
+revert4:
kfree(state);
revert3:
wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off);
diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c
index ee3c19037aac..9afcd4ce3368 100644
--- a/drivers/net/wireless/cw1200/scan.c
+++ b/drivers/net/wireless/cw1200/scan.c
@@ -173,8 +173,9 @@ void cw1200_scan_work(struct work_struct *work)
cw1200_set_pm(priv, &priv->powersave_mode);
if (priv->scan.status < 0)
- wiphy_dbg(priv->hw->wiphy, "[SCAN] Scan failed (%d).\n",
- priv->scan.status);
+ wiphy_warn(priv->hw->wiphy,
+ "[SCAN] Scan failed (%d).\n",
+ priv->scan.status);
else if (priv->scan.req)
wiphy_dbg(priv->hw->wiphy,
"[SCAN] Scan completed.\n");
@@ -197,9 +198,9 @@ void cw1200_scan_work(struct work_struct *work)
if ((*it)->band != first->band)
break;
if (((*it)->flags ^ first->flags) &
- IEEE80211_CHAN_PASSIVE_SCAN)
+ IEEE80211_CHAN_NO_IR)
break;
- if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
(*it)->max_power != first->max_power)
break;
}
@@ -210,7 +211,7 @@ void cw1200_scan_work(struct work_struct *work)
else
scan.max_tx_rate = WSM_TRANSMIT_RATE_1;
scan.num_probes =
- (first->flags & IEEE80211_CHAN_PASSIVE_SCAN) ? 0 : 2;
+ (first->flags & IEEE80211_CHAN_NO_IR) ? 0 : 2;
scan.num_ssids = priv->scan.n_ssids;
scan.ssids = &priv->scan.ssids[0];
scan.num_channels = it - priv->scan.curr;
@@ -233,7 +234,7 @@ void cw1200_scan_work(struct work_struct *work)
}
for (i = 0; i < scan.num_channels; ++i) {
scan.ch[i].number = priv->scan.curr[i]->hw_value;
- if (priv->scan.curr[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+ if (priv->scan.curr[i]->flags & IEEE80211_CHAN_NO_IR) {
scan.ch[i].min_chan_time = 50;
scan.ch[i].max_chan_time = 100;
} else {
@@ -241,7 +242,7 @@ void cw1200_scan_work(struct work_struct *work)
scan.ch[i].max_chan_time = 25;
}
}
- if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
priv->scan.output_power != first->max_power) {
priv->scan.output_power = first->max_power;
wsm_set_output_power(priv,
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 010b252be584..103f7bce8932 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/etherdevice.h>
#include "cw1200.h"
#include "sta.h"
@@ -555,8 +556,8 @@ u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
pr_debug("[STA] multicast: %pM\n", ha->addr);
memcpy(&priv->multicast_filter.macaddrs[count],
ha->addr, ETH_ALEN);
- if (memcmp(ha->addr, broadcast_ipv4, ETH_ALEN) &&
- memcmp(ha->addr, broadcast_ipv6, ETH_ALEN))
+ if (!ether_addr_equal(ha->addr, broadcast_ipv4) &&
+ !ether_addr_equal(ha->addr, broadcast_ipv6))
priv->has_multicast_subscription = true;
count++;
}
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
index e824d4d4a18d..0bd541175ecd 100644
--- a/drivers/net/wireless/cw1200/txrx.c
+++ b/drivers/net/wireless/cw1200/txrx.c
@@ -1166,8 +1166,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
return;
} else if (ieee80211_is_beacon(frame->frame_control) &&
!arg->status && priv->vif &&
- !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid,
- ETH_ALEN)) {
+ ether_addr_equal(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid)) {
const u8 *tim_ie;
u8 *ies = ((struct ieee80211_mgmt *)
(skb->data))->u.beacon.variable;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index d39e3e24077b..599f30f22841 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -563,7 +563,7 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr, u16 fc,
/* Possible WDS frame: either IEEE 802.11 compliant (if FromDS)
* or own non-standard frame with 4th address after payload */
- if (memcmp(hdr->addr1, local->dev->dev_addr, ETH_ALEN) != 0 &&
+ if (!ether_addr_equal(hdr->addr1, local->dev->dev_addr) &&
(hdr->addr1[0] != 0xff || hdr->addr1[1] != 0xff ||
hdr->addr1[2] != 0xff || hdr->addr1[3] != 0xff ||
hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) {
@@ -622,12 +622,12 @@ static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
/* check that the frame is unicast frame to us */
if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_TODS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
- memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+ ether_addr_equal(hdr->addr1, dev->dev_addr) &&
+ ether_addr_equal(hdr->addr3, dev->dev_addr)) {
/* ToDS frame with own addr BSSID and DA */
} else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_FROMDS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+ ether_addr_equal(hdr->addr1, dev->dev_addr)) {
/* FromDS frame with own addr as DA */
} else
return 0;
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 344a981a052e..8bde77689469 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,5 +1,6 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/etherdevice.h>
#include "hostap_80211.h"
#include "hostap_common.h"
@@ -103,8 +104,7 @@ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
} else if (local->iw_mode == IW_MODE_INFRA &&
(local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
- memcmp(skb->data + ETH_ALEN, dev->dev_addr,
- ETH_ALEN) != 0) {
+ !ether_addr_equal(skb->data + ETH_ALEN, dev->dev_addr)) {
/* AP client mode: send frames with foreign src addr
* using 4-addr WDS frames */
use_wds = WDS_COMPLIANT_FRAME;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index d6033a8e5dea..596525528f50 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
+#include <linux/etherdevice.h>
#include "hostap_wlan.h"
#include "hostap.h"
@@ -106,13 +107,12 @@ static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta)
s = ap->sta_hash[STA_HASH(sta->addr)];
if (s == NULL) return;
- if (memcmp(s->addr, sta->addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(s->addr, sta->addr)) {
ap->sta_hash[STA_HASH(sta->addr)] = s->hnext;
return;
}
- while (s->hnext != NULL && memcmp(s->hnext->addr, sta->addr, ETH_ALEN)
- != 0)
+ while (s->hnext != NULL && !ether_addr_equal(s->hnext->addr, sta->addr))
s = s->hnext;
if (s->hnext != NULL)
s->hnext = s->hnext->hnext;
@@ -147,7 +147,7 @@ static void ap_free_sta(struct ap_data *ap, struct sta_info *sta)
if (!sta->ap && sta->u.sta.challenge)
kfree(sta->u.sta.challenge);
- del_timer(&sta->timer);
+ del_timer_sync(&sta->timer);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
kfree(sta);
@@ -435,7 +435,7 @@ int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
entry = list_entry(ptr, struct mac_entry, list);
- if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
+ if (ether_addr_equal(entry->addr, mac)) {
list_del(ptr);
kfree(entry);
mac_restrictions->entries--;
@@ -459,7 +459,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
spin_lock_bh(&mac_restrictions->lock);
list_for_each_entry(entry, &mac_restrictions->mac_list, list) {
- if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
+ if (ether_addr_equal(entry->addr, mac)) {
found = 1;
break;
}
@@ -957,7 +957,7 @@ static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta)
struct sta_info *s;
s = ap->sta_hash[STA_HASH(sta)];
- while (s != NULL && memcmp(s->addr, sta, ETH_ALEN) != 0)
+ while (s != NULL && !ether_addr_equal(s->addr, sta))
s = s->hnext;
return s;
}
@@ -1391,7 +1391,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
status_code = __le16_to_cpu(*pos);
pos++;
- if (memcmp(dev->dev_addr, hdr->addr2, ETH_ALEN) == 0 ||
+ if (ether_addr_equal(dev->dev_addr, hdr->addr2) ||
ap_control_mac_deny(&ap->mac_restrictions, hdr->addr2)) {
txt = "authentication denied";
resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
@@ -1935,7 +1935,7 @@ static void handle_pspoll(local_info_t *local,
PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=%pM, TA=%pM PWRMGT=%d\n",
hdr->addr1, hdr->addr2, !!ieee80211_has_pm(hdr->frame_control));
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP,
"handle_pspoll - addr1(BSSID)=%pM not own MAC\n",
hdr->addr1);
@@ -2230,7 +2230,7 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
goto done;
}
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)=%pM"
" not own MAC\n", hdr->addr1);
goto done;
@@ -2267,13 +2267,13 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
goto done;
}
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=%pM"
" not own MAC\n", hdr->addr1);
goto done;
}
- if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(hdr->addr3, dev->dev_addr)) {
PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=%pM"
" not own MAC\n", hdr->addr3);
goto done;
@@ -3035,7 +3035,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
if (!wds) {
/* FromDS frame - not for us; probably
* broadcast/multicast in another BSS - drop */
- if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(hdr->addr1, dev->dev_addr)) {
printk(KERN_DEBUG "Odd.. FromDS packet "
"received with own BSSID\n");
hostap_dump_rx_80211(dev->name, skb, rx_stats);
@@ -3044,7 +3044,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
goto out;
}
} else if (stype == IEEE80211_STYPE_NULLFUNC && sta == NULL &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+ ether_addr_equal(hdr->addr1, dev->dev_addr)) {
if (local->hostapd) {
prism2_rx_80211(local->apdev, skb, rx_stats,
@@ -3073,7 +3073,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
/* If BSSID (Addr3) is foreign, this frame is a normal
* broadcast frame from an IBSS network. Drop it silently.
* If BSSID is own, report the dropping of this frame. */
- if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(hdr->addr3, dev->dev_addr)) {
printk(KERN_DEBUG "%s: dropped received packet from %pM"
" with no ToDS flag "
"(type=0x%02x, subtype=0x%02x)\n", dev->name,
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 56cd01ca8ad0..9f825f2620da 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -1,7 +1,6 @@
#define PRISM2_PCCARD
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/if.h>
#include <linux/slab.h>
#include <linux/wait.h>
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index c275dc1623fe..6df3ee561d52 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2175,7 +2175,7 @@ static void hostap_tx_callback(local_info_t *local,
struct hostap_tx_callback_info *cb;
/* Make sure that frame was from us. */
- if (memcmp(txdesc->addr2, local->dev->dev_addr, ETH_ALEN)) {
+ if (!ether_addr_equal(txdesc->addr2, local->dev->dev_addr)) {
printk(KERN_DEBUG "%s: TX callback - foreign frame\n",
local->dev->name);
return;
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index e5090309824e..3e5fa7872b64 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -655,7 +655,7 @@ static int hostap_join_ap(struct net_device *dev)
if (!local->last_scan_results)
break;
entry = &local->last_scan_results[i];
- if (memcmp(local->preferred_ap, entry->bssid, ETH_ALEN) == 0) {
+ if (ether_addr_equal(local->preferred_ap, entry->bssid)) {
req.channel = entry->chid;
break;
}
@@ -1978,7 +1978,7 @@ static inline int prism2_translate_scan(local_info_t *local,
list_for_each(ptr, &local->bss_list) {
struct hostap_bss_info *bss;
bss = list_entry(ptr, struct hostap_bss_info, list);
- if (memcmp(bss->bssid, scan->bssid, ETH_ALEN) == 0) {
+ if (ether_addr_equal(bss->bssid, scan->bssid)) {
bss->included = 1;
current_ev = __prism2_translate_scan(
local, info, scan, bss, current_ev,
@@ -2567,7 +2567,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
local->passive_scan_interval = value;
if (timer_pending(&local->passive_scan_timer))
del_timer(&local->passive_scan_timer);
- if (value > 0) {
+ if (value > 0 && value < INT_MAX / HZ) {
local->passive_scan_timer.expires = jiffies +
local->passive_scan_interval * HZ;
add_timer(&local->passive_scan_timer);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index a1257c92afc4..67db34e56d7e 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -155,8 +155,7 @@ int prism2_wds_add(local_info_t *local, u8 *remote_addr,
if (prism2_wds_special_addr(iface->u.wds.remote_addr))
empty = iface;
- else if (memcmp(iface->u.wds.remote_addr, remote_addr,
- ETH_ALEN) == 0) {
+ else if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) {
match = iface;
break;
}
@@ -214,8 +213,7 @@ int prism2_wds_del(local_info_t *local, u8 *remote_addr,
if (iface->type != HOSTAP_INTERFACE_WDS)
continue;
- if (memcmp(iface->u.wds.remote_addr, remote_addr,
- ETH_ALEN) == 0) {
+ if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) {
selected = iface;
break;
}
@@ -1085,7 +1083,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
if (local->iw_mode != IW_MODE_INFRA ||
is_zero_ether_addr(local->bssid) ||
- memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0)
+ ether_addr_equal(local->bssid, "\x44\x44\x44\x44\x44\x44"))
return 0;
ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH,
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 05ca3402dca7..91158e2e961c 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -5,7 +5,6 @@
* Andy Warner <andyw@pobox.com> */
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index c3d067ee4db9..3bf530d9a40f 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index aa7ad3a7a69b..4e5c0f8c9496 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -496,7 +496,7 @@ void hostap_init_proc(local_info_t *local)
void hostap_remove_proc(local_info_t *local)
{
- remove_proc_subtree(local->ddev->name, hostap_proc);
+ proc_remove(local->proc);
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index f8ab193009cd..3aba49259ef1 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1930,10 +1930,10 @@ static int ipw2100_wdev_init(struct net_device *dev)
bg_band->channels[i].max_power = geo->bg[i].max_power;
if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
bg_band->channels[i].flags |=
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
bg_band->channels[i].flags |=
- IEEE80211_CHAN_NO_IBSS;
+ IEEE80211_CHAN_NO_IR;
if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
bg_band->channels[i].flags |=
IEEE80211_CHAN_RADAR;
@@ -6362,7 +6362,6 @@ out:
&ipw2100_attribute_group);
free_libipw(dev, 0);
- pci_set_drvdata(pci_dev, NULL);
}
pci_iounmap(pci_dev, ioaddr);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 81903e33d5b1..139326065bd9 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -3012,7 +3012,7 @@ static void ipw_remove_current_network(struct ipw_priv *priv)
spin_lock_irqsave(&priv->ieee->lock, flags);
list_for_each_safe(element, safe, &priv->ieee->network_list) {
network = list_entry(element, struct libipw_network, list);
- if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+ if (ether_addr_equal(network->bssid, priv->bssid)) {
list_del(element);
list_add_tail(&network->list,
&priv->ieee->network_free_list);
@@ -3921,7 +3921,7 @@ static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
int i;
for (i = 0; i < priv->num_stations; i++) {
- if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
+ if (ether_addr_equal(priv->stations[i], bssid)) {
/* Another node is active in network */
priv->missed_adhoc_beacons = 0;
if (!(priv->config & CFG_STATIC_CHANNEL))
@@ -3953,7 +3953,7 @@ static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
int i;
for (i = 0; i < priv->num_stations; i++)
- if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
+ if (ether_addr_equal(priv->stations[i], bssid))
return i;
return IPW_INVALID_STATION;
@@ -5622,7 +5622,7 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
return 0;
}
- if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+ if (ether_addr_equal(network->bssid, priv->bssid)) {
IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
"because of the same BSSID match: %pM"
".\n", print_ssid(ssid, network->ssid,
@@ -5849,7 +5849,7 @@ static int ipw_best_network(struct ipw_priv *priv,
}
if ((priv->config & CFG_STATIC_BSSID) &&
- memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+ !ether_addr_equal(network->bssid, priv->bssid)) {
IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
"because of BSSID mismatch: %pM.\n",
print_ssid(ssid, network->ssid,
@@ -6988,7 +6988,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
}
if ((priv->status & STATUS_ASSOCIATED) &&
(priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
- if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
+ if (!ether_addr_equal(network->bssid, priv->bssid))
if (network->capability & WLAN_CAPABILITY_IBSS)
if ((network->ssid_len ==
priv->assoc_network->ssid_len) &&
@@ -8210,29 +8210,29 @@ static int is_network_packet(struct ipw_priv *priv,
switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
/* packets from our adapter are dropped (echo) */
- if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
+ if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
return 0;
/* {broad,multi}cast packets to our BSSID go through */
if (is_multicast_ether_addr(header->addr1))
- return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
+ return ether_addr_equal(header->addr3, priv->bssid);
/* packets to our adapter go through */
- return !memcmp(header->addr1, priv->net_dev->dev_addr,
- ETH_ALEN);
+ return ether_addr_equal(header->addr1,
+ priv->net_dev->dev_addr);
case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
/* packets from our adapter are dropped (echo) */
- if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
+ if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
return 0;
/* {broad,multi}cast packets to our BSS go through */
if (is_multicast_ether_addr(header->addr1))
- return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
+ return ether_addr_equal(header->addr2, priv->bssid);
/* packets to our adapter go through */
- return !memcmp(header->addr1, priv->net_dev->dev_addr,
- ETH_ALEN);
+ return ether_addr_equal(header->addr1,
+ priv->net_dev->dev_addr);
}
return 1;
@@ -8260,7 +8260,7 @@ static int is_duplicate_packet(struct ipw_priv *priv,
list_for_each(p, &priv->ibss_mac_hash[index]) {
entry =
list_entry(p, struct ipw_ibss_seq, list);
- if (!memcmp(entry->mac, mac, ETH_ALEN))
+ if (ether_addr_equal(entry->mac, mac))
break;
}
if (p == &priv->ibss_mac_hash[index]) {
@@ -8329,7 +8329,7 @@ static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
IEEE80211_STYPE_PROBE_RESP) ||
(WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
IEEE80211_STYPE_BEACON))) {
- if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
+ if (ether_addr_equal(header->addr3, priv->bssid))
ipw_add_station(priv, header->addr2);
}
@@ -9045,7 +9045,7 @@ static int ipw_wx_set_wap(struct net_device *dev,
}
priv->config |= CFG_STATIC_BSSID;
- if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+ if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
IPW_DEBUG_WX("BSSID set to current BSSID.\n");
mutex_unlock(&priv->mutex);
return 0;
@@ -11472,10 +11472,10 @@ static int ipw_wdev_init(struct net_device *dev)
bg_band->channels[i].max_power = geo->bg[i].max_power;
if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
bg_band->channels[i].flags |=
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
bg_band->channels[i].flags |=
- IEEE80211_CHAN_NO_IBSS;
+ IEEE80211_CHAN_NO_IR;
if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
bg_band->channels[i].flags |=
IEEE80211_CHAN_RADAR;
@@ -11511,10 +11511,10 @@ static int ipw_wdev_init(struct net_device *dev)
a_band->channels[i].max_power = geo->a[i].max_power;
if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
a_band->channels[i].flags |=
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
a_band->channels[i].flags |=
- IEEE80211_CHAN_NO_IBSS;
+ IEEE80211_CHAN_NO_IR;
if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
a_band->channels[i].flags |=
IEEE80211_CHAN_RADAR;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index 570d6fb88967..aa301d1eee3c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 9ffe65931b29..a586a85bfcfe 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -874,13 +874,13 @@ void libipw_rx_any(struct libipw_device *ieee,
switch (ieee->iw_mode) {
case IW_MODE_ADHOC:
/* our BSS and not from/to DS */
- if (memcmp(hdr->addr3, ieee->bssid, ETH_ALEN) == 0)
+ if (ether_addr_equal(hdr->addr3, ieee->bssid))
if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) {
/* promisc: get all */
if (ieee->dev->flags & IFF_PROMISC)
is_packet_for_us = 1;
/* to us */
- else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0)
+ else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
is_packet_for_us = 1;
/* mcast */
else if (is_multicast_ether_addr(hdr->addr1))
@@ -889,18 +889,18 @@ void libipw_rx_any(struct libipw_device *ieee,
break;
case IW_MODE_INFRA:
/* our BSS (== from our AP) and from DS */
- if (memcmp(hdr->addr2, ieee->bssid, ETH_ALEN) == 0)
+ if (ether_addr_equal(hdr->addr2, ieee->bssid))
if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) {
/* promisc: get all */
if (ieee->dev->flags & IFF_PROMISC)
is_packet_for_us = 1;
/* to us */
- else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0)
+ else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
is_packet_for_us = 1;
/* mcast */
else if (is_multicast_ether_addr(hdr->addr1)) {
/* not our own packet bcasted from AP */
- if (memcmp(hdr->addr3, ieee->dev->dev_addr, ETH_ALEN))
+ if (!ether_addr_equal(hdr->addr3, ieee->dev->dev_addr))
is_packet_for_us = 1;
}
}
@@ -1468,7 +1468,7 @@ static inline int is_same_network(struct libipw_network *src,
* as one network */
return ((src->ssid_len == dst->ssid_len) &&
(src->channel == dst->channel) &&
- ether_addr_equal(src->bssid, dst->bssid) &&
+ ether_addr_equal_64bits(src->bssid, dst->bssid) &&
!memcmp(src->ssid, dst->ssid, src->ssid_len));
}
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
index f767dd106b09..c1b4441fb8b2 100644
--- a/drivers/net/wireless/iwlegacy/3945-debug.c
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -48,7 +48,7 @@ il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
return p;
}
-ssize_t
+static ssize_t
il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -313,7 +313,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
return ret;
}
-ssize_t
+static ssize_t
il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -403,7 +403,7 @@ il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
return ret;
}
-ssize_t
+static ssize_t
il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index dea3b50d68b9..0487461ae4da 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -1595,7 +1595,7 @@ il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
* and use long active_dwell time.
*/
if (!is_active || il_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
+ (chan->flags & IEEE80211_CHAN_NO_IR)) {
scan_ch->type = 0; /* passive */
if (IL_UCODE_API(il->ucode_ver) == 1)
scan_ch->active_dwell =
@@ -2396,8 +2396,7 @@ __il3945_up(struct il_priv *il)
clear_bit(S_RFKILL, &il->status);
else {
set_bit(S_RFKILL, &il->status);
- IL_WARN("Radio disabled by HW RF Kill switch\n");
- return -ENODEV;
+ return -ERFKILL;
}
_il_wr(il, CSR_INT, 0xFFFFFFFF);
@@ -3575,9 +3574,9 @@ il3945_setup_mac(struct il_priv *il)
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
- hw->wiphy->flags |=
- WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS;
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index aea667b430c3..9a45f6f626f6 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -25,7 +25,6 @@
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index f09e257759d5..d37a6fd90d40 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -466,10 +465,10 @@ il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
switch (il->iw_mode) {
case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
/* packets to our IBSS update information */
- return ether_addr_equal(header->addr3, il->bssid);
+ return ether_addr_equal_64bits(header->addr3, il->bssid);
case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
/* packets to our IBSS update information */
- return ether_addr_equal(header->addr2, il->bssid);
+ return ether_addr_equal_64bits(header->addr2, il->bssid);
default:
return 1;
}
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
index c8153fc64f74..e0597bfdddb8 100644
--- a/drivers/net/wireless/iwlegacy/4965-debug.c
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -55,7 +55,7 @@ il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
return p;
}
-ssize_t
+static ssize_t
il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -467,7 +467,7 @@ il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
return ret;
}
-ssize_t
+static ssize_t
il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -633,7 +633,7 @@ il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
return ret;
}
-ssize_t
+static ssize_t
il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 3982ab76f375..43f488a8cda2 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -805,7 +805,7 @@ il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
}
if (!is_active || il_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ (chan->flags & IEEE80211_CHAN_NO_IR))
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
else
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
@@ -5778,9 +5778,9 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
- hw->wiphy->flags |=
- WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS;
/*
* For now, disable PS by default because it affects
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 3ccbaf791b48..4d5e33259ca8 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -24,7 +24,6 @@
*
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
index 777a578294bd..fe47db9c20cd 100644
--- a/drivers/net/wireless/iwlegacy/4965.c
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index b03e22ef5462..02e8233ccf29 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -33,7 +33,6 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/lockdep.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@@ -3445,10 +3444,10 @@ il_init_geos(struct il_priv *il)
if (il_is_channel_valid(ch)) {
if (!(ch->flags & EEPROM_CHANNEL_IBSS))
- geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+ geo_ch->flags |= IEEE80211_CHAN_NO_IR;
if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
- geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+ geo_ch->flags |= IEEE80211_CHAN_NO_IR;
if (ch->flags & EEPROM_CHANNEL_RADAR)
geo_ch->flags |= IEEE80211_CHAN_RADAR;
@@ -3746,10 +3745,10 @@ il_full_rxon_required(struct il_priv *il)
/* These items are only settable from the full RXON command */
CHK(!il_is_associated(il));
- CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
- CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
- CHK(!ether_addr_equal(staging->wlap_bssid_addr,
- active->wlap_bssid_addr));
+ CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr));
+ CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr));
+ CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr,
+ active->wlap_bssid_addr));
CHK_NEQ(staging->dev_type, active->dev_type);
CHK_NEQ(staging->channel, active->channel);
CHK_NEQ(staging->air_propagation, active->air_propagation);
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
index eff26501d60a..344010153196 100644
--- a/drivers/net/wireless/iwlegacy/debug.c
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -31,7 +31,7 @@
#include "common.h"
-void
+static void
il_clear_traffic_stats(struct il_priv *il)
{
memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
@@ -567,12 +567,12 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
flags & IEEE80211_CHAN_RADAR ?
" (IEEE 802.11h required)" : "",
((channels[i].
- flags & IEEE80211_CHAN_NO_IBSS) ||
+ flags & IEEE80211_CHAN_NO_IR) ||
(channels[i].
flags & IEEE80211_CHAN_RADAR)) ? "" :
", IBSS",
channels[i].
- flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ flags & IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
@@ -594,12 +594,12 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
flags & IEEE80211_CHAN_RADAR ?
" (IEEE 802.11h required)" : "",
((channels[i].
- flags & IEEE80211_CHAN_NO_IBSS) ||
+ flags & IEEE80211_CHAN_NO_IR) ||
(channels[i].
flags & IEEE80211_CHAN_RADAR)) ? "" :
", IBSS",
channels[i].
- flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ flags & IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 23d5f0275ce9..562772d85102 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index 1b0f0d502568..be1086c87157 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index cfddde194940..aeae4e80ea40 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index ebdac909f0cd..751ae1d10b7f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index d94f8ab15004..d2fe2596d54e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -352,12 +352,12 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
channels[i].max_power,
channels[i].flags & IEEE80211_CHAN_RADAR ?
" (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+ ((channels[i].flags & IEEE80211_CHAN_NO_IR)
|| (channels[i].flags &
IEEE80211_CHAN_RADAR)) ? "" :
", IBSS",
channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
+ IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
@@ -375,12 +375,12 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
channels[i].max_power,
channels[i].flags & IEEE80211_CHAN_RADAR ?
" (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+ ((channels[i].flags & IEEE80211_CHAN_NO_IR)
|| (channels[i].flags &
IEEE80211_CHAN_RADAR)) ? "" :
", IBSS",
channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
+ IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 7434d9edf3b7..3441f70d0ff9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 352c6cb7b4f1..7b140e487deb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.c b/drivers/net/wireless/iwlwifi/dvm/led.c
index 33c7e15d24f5..ca4d6692cc4e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/iwlwifi/dvm/led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index 8749dcfe695f..6a0817d9c4fa 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 3d5bdc4217a8..576f7ee38ca5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,6 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index cae4d3182e33..73086c1629ca 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -28,7 +28,6 @@
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@@ -155,9 +154,9 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
}
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS;
#ifdef CONFIG_PM_SLEEP
if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
@@ -322,12 +321,6 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
flush_workqueue(priv->workqueue);
- /* User space software may expect getting rfkill changes
- * even if interface is down, trans->down will leave the RF
- * kill interrupt enabled
- */
- iwl_trans_stop_hw(priv->trans, false);
-
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -413,9 +406,8 @@ static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait,
{
struct iwl_resume_data *resume_data = data;
struct iwl_priv *priv = resume_data->priv;
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- if (len - 4 != sizeof(*resume_data->cmd)) {
+ if (iwl_rx_packet_payload_len(pkt) != sizeof(*resume_data->cmd)) {
IWL_ERR(priv, "rx wrong size data\n");
return true;
}
@@ -704,6 +696,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ return false;
+ return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ return false;
+ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+ return true;
+
+ /* disabled by default */
+ return false;
+}
+
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@@ -725,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ if (!iwl_enable_rx_ampdu(priv->cfg))
break;
IWL_DEBUG_HT(priv, "start Rx\n");
ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -737,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
if (!priv->trans->ops->txq_enable)
break;
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ if (!iwl_enable_tx_ampdu(priv->cfg))
break;
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 7aad766865cf..ba1b1ea54252 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -1313,7 +1313,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
}
/* Reset chip to save power until we load uCode during "up". */
- iwl_trans_stop_hw(priv->trans, false);
+ iwl_trans_stop_device(priv->trans);
priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
priv->eeprom_blob,
@@ -1458,7 +1458,7 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
dev_kfree_skb(priv->beacon_skb);
- iwl_trans_stop_hw(priv->trans, true);
+ iwl_trans_op_mode_leave(priv->trans);
ieee80211_free_hw(priv->hw);
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index 77cb59712235..b4e61417013a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <net/mac80211.h>
#include "iwl-io.h"
#include "iwl-debug.h"
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.h b/drivers/net/wireless/iwlwifi/dvm/power.h
index 7b03e1342d47..570d3a5e4670 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.h
+++ b/drivers/net/wireless/iwlwifi/dvm/power.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index b647e506564c..0977d93b529d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -24,7 +24,6 @@
*
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 26fc550cd68c..bdd5644a400b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -389,13 +389,6 @@ struct iwl_lq_sta {
u8 last_bt_traffic;
};
-static inline u8 num_of_ant(u8 mask)
-{
- return !!((mask) & ANT_A) +
- !!((mask) & ANT_B) +
- !!((mask) & ANT_C);
-}
-
static inline u8 first_antenna(u8 mask)
{
if (mask & ANT_A)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index d71776dd1e6a..7a1bc1c547e1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -205,8 +205,7 @@ static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 __maybe_unused len =
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ u32 __maybe_unused len = iwl_rx_packet_len(pkt);
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
"notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
@@ -457,7 +456,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
const int reg_recalib_period = 60;
int change;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ u32 len = iwl_rx_packet_payload_len(pkt);
__le32 *flag;
struct statistics_general_common *common;
struct statistics_rx_non_phy *rx_non_phy;
@@ -467,8 +466,6 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
struct statistics_tx *tx;
struct statistics_bt_activity *bt_activity;
- len -= sizeof(struct iwl_cmd_header); /* skip header */
-
IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
len);
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index d7ce2f12a907..503a81e58185 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 35e0ee8b4e5b..be98b913ed58 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -544,7 +544,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
channel = chan->hw_value;
scan_ch->channel = cpu_to_le16(channel);
- if (!is_active || (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ if (!is_active || (chan->flags & IEEE80211_CHAN_NO_IR))
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
else
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index c3c13ce96eb0..9cdd91cdf661 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -590,6 +590,7 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
sizeof(priv->tid_data[sta_id][tid]));
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
priv->num_stations--;
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index fbeee081ee2f..058c5892c427 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <net/mac80211.h>
#include "iwl-io.h"
#include "iwl-modparams.h"
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h
index 9356c4b908ca..507726534b84 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 1fef5240e6ad..398dd096674c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/ieee80211.h>
#include "iwl-io.h"
@@ -368,6 +367,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
goto drop_unlock_priv;
memset(dev_cmd, 0, sizeof(*dev_cmd));
+ dev_cmd->hdr.cmd = REPLY_TX;
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
/* Total # bytes to be transmitted */
@@ -1291,8 +1291,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
struct iwl_ht_agg *agg;
struct sk_buff_head reclaimed_skbs;
- struct ieee80211_tx_info *info;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb;
int sta_id;
int tid;
@@ -1379,22 +1377,28 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
freed = 0;
skb_queue_walk(&reclaimed_skbs, skb) {
- hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data_qos(hdr->frame_control))
freed++;
else
WARN_ON_ONCE(1);
- info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
if (freed == 1) {
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
info = IEEE80211_SKB_CB(skb);
memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_resp->txed_2_done;
info->status.ampdu_len = ba_resp->txed;
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 63637949a146..cf03ef5619d9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,7 +28,6 @@
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/init.h>
#include "iwl-io.h"
#include "iwl-agn-hw.h"
@@ -389,7 +388,6 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
{
struct iwl_priv *priv = data;
struct iwl_calib_hdr *hdr;
- int len;
if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
@@ -397,12 +395,8 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
}
hdr = (struct iwl_calib_hdr *)pkt->data;
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- /* reduce the size by the length field itself */
- len -= sizeof(__le32);
-
- if (iwl_calib_set(priv, hdr, len))
+ if (iwl_calib_set(priv, hdr, iwl_rx_packet_payload_len(pkt)))
IWL_ERR(priv, "Failed to record calibration data %d\n",
hdr->op_code);
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 0d2afe098afc..854ba84ccb73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index c727ec7c90a6..3e63323637f3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index ecc01e1a61a1..6674f2c4541c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8ac305be68f4..8048de90233f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 3c34a72a5d64..2a59da2ff87a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,7 @@ static const struct iwl_base_params iwl7000_base_params = {
};
static const struct iwl_ht_params iwl7000_ht_params = {
- .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .stbc = true,
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 6d73f943cefa..7f37fb86837b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 03fd9aa8bfda..1ced525157dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -129,6 +129,12 @@ enum iwl_led_mode {
#define ANT_BC (ANT_B | ANT_C)
#define ANT_ABC (ANT_A | ANT_B | ANT_C)
+static inline u8 num_of_ant(u8 mask)
+{
+ return !!((mask) & ANT_A) +
+ !!((mask) & ANT_B) +
+ !!((mask) & ANT_C);
+}
/*
* @max_ll_items: max number of OTP blocks
@@ -156,12 +162,14 @@ struct iwl_base_params {
};
/*
+ * @stbc: support Tx STBC and 1*SS Rx STBC
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
* @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
*/
struct iwl_ht_params {
enum ieee80211_smps_mode smps_mode;
const bool ht_greenfield_support; /* if used set to true */
+ const bool stbc;
bool use_rts_for_aggregation;
u8 ht40_bands;
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index da4eca8b3007..9d325516c42d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -198,7 +198,8 @@
CSR_INT_BIT_RF_KILL | \
CSR_INT_BIT_SW_RX | \
CSR_INT_BIT_WAKEUP | \
- CSR_INT_BIT_ALIVE)
+ CSR_INT_BIT_ALIVE | \
+ CSR_INT_BIT_RX_PERIODIC)
/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index b2bb32a781dd..a75aac986a23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 8f61c717f619..23e7351e02de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 684c416d3493..78bd41bf34b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index ff570027e9dd..75103554cd63 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -322,6 +322,41 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
pieces->img[type].sec[sec].offset = offset;
}
+static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
+{
+ int i, j;
+ struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
+ struct iwl_fw_cipher_scheme *fwcs;
+ struct ieee80211_cipher_scheme *cs;
+ u32 cipher;
+
+ if (len < sizeof(*l) ||
+ len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
+ return -EINVAL;
+
+ for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
+ fwcs = &l->cs[j];
+ cipher = le32_to_cpu(fwcs->cipher);
+
+ /* we skip schemes with zero cipher suite selector */
+ if (!cipher)
+ continue;
+
+ cs = &fw->cs[j++];
+ cs->cipher = cipher;
+ cs->iftype = BIT(NL80211_IFTYPE_STATION);
+ cs->hdr_len = fwcs->hdr_len;
+ cs->pn_len = fwcs->pn_len;
+ cs->pn_off = fwcs->pn_off;
+ cs->key_idx_off = fwcs->key_idx_off;
+ cs->key_idx_mask = fwcs->key_idx_mask;
+ cs->key_idx_shift = fwcs->key_idx_shift;
+ cs->mic_len = fwcs->mic_len;
+ }
+
+ return 0;
+}
+
/*
* Gets uCode section from tlv.
*/
@@ -729,6 +764,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
break;
+ case IWL_UCODE_TLV_CSCHEME:
+ if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
+ goto invalid_tlv_len;
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
@@ -1247,7 +1286,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
MODULE_PARM_DESC(11n_disable,
- "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
+ "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 429337a2b9a1..592c01e11013 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -67,7 +67,7 @@
/* for all modules */
#define DRV_NAME "iwlwifi"
#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2013 Intel Corporation"
+#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 4c887f365908..c44cf1149648 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -614,10 +614,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags = IEEE80211_CHAN_NO_HT40;
if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
- channel->flags |= IEEE80211_CHAN_NO_IBSS;
+ channel->flags |= IEEE80211_CHAN_NO_IR;
if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
- channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+ channel->flags |= IEEE80211_CHAN_NO_IR;
if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
channel->flags |= IEEE80211_CHAN_RADAR;
@@ -751,6 +751,13 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+ if (cfg->ht_params->stbc) {
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ if (tx_chains > 1)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ }
+
if (iwlwifi_mod_params.amsdu_size_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index d73304a23ec2..e3c7deafabe6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
index e5f2e362ab0b..25d0105741db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
index 8e941f8bd7d6..a6d3bdf82cdd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 484d318245fb..9564ae173d06 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 6c6c35c5228c..88e2d6eb569f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -125,6 +125,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_SECURE_SEC_INIT = 25,
IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
IWL_UCODE_TLV_NUM_OF_CPU = 27,
+ IWL_UCODE_TLV_CSCHEME = 28,
};
struct iwl_ucode_tlv {
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 75db087120c3..5f1493c44097 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,9 @@
* @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
* @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
* containing CAM (Continuous Active Mode) indication.
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
+ * single bound interface).
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -113,7 +116,9 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
+ IWL_UCODE_TLV_FLAGS_P2P_PS = BIT(21),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
+ IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
};
/* The default calibrate table size if not specified by firmware file */
@@ -209,6 +214,44 @@ enum iwl_fw_phy_cfg {
FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
};
+#define IWL_UCODE_MAX_CS 1
+
+/**
+ * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
+ * @cipher: a cipher suite selector
+ * @flags: cipher scheme flags (currently reserved for a future use)
+ * @hdr_len: a size of MPDU security header
+ * @pn_len: a size of PN
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: bit shift needed to get key_idx
+ * @mic_len: mic length in bytes
+ * @hw_cipher: a HW cipher index used in host commands
+ */
+struct iwl_fw_cipher_scheme {
+ __le32 cipher;
+ u8 flags;
+ u8 hdr_len;
+ u8 pn_len;
+ u8 pn_off;
+ u8 key_idx_off;
+ u8 key_idx_mask;
+ u8 key_idx_shift;
+ u8 mic_len;
+ u8 hw_cipher;
+} __packed;
+
+/**
+ * struct iwl_fw_cscheme_list - a cipher scheme list
+ * @size: a number of entries
+ * @cs: cipher scheme entries
+ */
+struct iwl_fw_cscheme_list {
+ u8 size;
+ struct iwl_fw_cipher_scheme cs[];
+} __packed;
+
/**
* struct iwl_fw - variables associated with the firmware
*
@@ -224,6 +267,7 @@ enum iwl_fw_phy_cfg {
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
* @mvm_fw: indicates this is MVM firmware
+ * @cipher_scheme: optional external cipher scheme.
*/
struct iwl_fw {
u32 ucode_ver;
@@ -243,6 +287,8 @@ struct iwl_fw {
u32 phy_config;
bool mvm_fw;
+
+ struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
};
static inline u8 iwl_fw_valid_tx_ant(const struct iwl_fw *fw)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index ad8e19a56eca..f98175a0d35b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 63d10ec08dbc..c339c1bed080 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index a1f580c0c6c6..b29075c3da8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -79,9 +79,12 @@ enum iwl_power_level {
IWL_POWER_NUM
};
-#define IWL_DISABLE_HT_ALL BIT(0)
-#define IWL_DISABLE_HT_TXAGG BIT(1)
-#define IWL_DISABLE_HT_RXAGG BIT(2)
+enum iwl_disable_11n {
+ IWL_DISABLE_HT_ALL = BIT(0),
+ IWL_DISABLE_HT_TXAGG = BIT(1),
+ IWL_DISABLE_HT_RXAGG = BIT(2),
+ IWL_ENABLE_HT_TXAGG = BIT(3),
+};
/**
* struct iwl_mod_params
@@ -90,7 +93,7 @@ enum iwl_power_level {
*
* @sw_crypto: using hardware encryption, default = 0
* @disable_11n: disable 11n capabilities, default = 0,
- * use IWL_DISABLE_HT_* constants
+ * use IWL_[DIS,EN]ABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 0
* @restart_fw: restart firmware, default = 1
* @wd_disable: enable stuck queue check, default = 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index 940b8a9d5285..b5bc959b1dfe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 2e2f1c8c99f9..95af97a6c2cf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index b76a9a8fc0b3..725e954d8475 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -182,6 +182,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+
+ if (ch_idx >= NUM_2GHZ_CHANNELS &&
+ !data->sku_cap_band_52GHz_enable)
+ ch_flags &= ~NVM_CHANNEL_VALID;
+
if (!(ch_flags & NVM_CHANNEL_VALID)) {
IWL_DEBUG_EEPROM(dev,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
@@ -223,10 +228,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags |= IEEE80211_CHAN_NO_160MHZ;
if (!(ch_flags & NVM_CHANNEL_IBSS))
- channel->flags |= IEEE80211_CHAN_NO_IBSS;
+ channel->flags |= IEEE80211_CHAN_NO_IR;
if (!(ch_flags & NVM_CHANNEL_ACTIVE))
- channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+ channel->flags |= IEEE80211_CHAN_NO_IR;
if (ch_flags & NVM_CHANNEL_RADAR)
channel->flags |= IEEE80211_CHAN_RADAR;
@@ -263,13 +268,19 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_vht_cap *vht_cap)
{
+ int num_ants = num_of_ant(data->valid_rx_ant);
+
vht_cap->vht_supported = true;
vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+ if (num_ants > 1)
+ vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
+
if (iwlwifi_mod_params.amsdu_size_8K)
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
@@ -283,7 +294,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
- if (data->valid_rx_ant == 1 || cfg->rx_with_siso_diversity) {
+ if (num_ants == 1 ||
+ cfg->rx_with_siso_diversity) {
vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
/* this works because NOT_SUPPORTED == 3 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index 3325059c52d4..0c4399aba8c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 976448a57d02..b5be51f3cd3d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -155,14 +155,12 @@ void iwl_opmode_deregister(const char *name);
/**
* struct iwl_op_mode - operational mode
+ * @ops - pointer to its own ops
*
* This holds an implementation of the mac80211 / fw API.
- *
- * @ops - pointer to its own ops
*/
struct iwl_op_mode {
const struct iwl_op_mode_ops *ops;
- const struct iwl_trans *trans;
char op_mode_specific[0] __aligned(sizeof(void *));
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index 1a405ae6a9c5..fa77d63a277a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
index ce983af79644..9ee18d0d2d01 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index a70c7b9d9bad..100bd0d79681 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -102,6 +102,9 @@
/* Device system time */
#define DEVICE_SYSTEM_TIME_REG 0xA0206C
+/* Device NMI register */
+#define DEVICE_SET_NMI_REG 0x00a01c30
+
/*****************************************************************************
* 7000/3000 series SHR DTS addresses *
*****************************************************************************/
@@ -274,4 +277,8 @@ static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
/*********************** END TX SCHEDULER *************************************/
+/* Oscillator clock */
+#define OSC_CLK (0xa04068)
+#define OSC_CLK_FORCE_CONTROL (0x8)
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 143292b4dbbf..1f065cf4a4ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,7 @@
#include "iwl-debug.h"
#include "iwl-config.h"
#include "iwl-fw.h"
+#include "iwl-op-mode.h"
/**
* DOC: Transport layer - what is it ?
@@ -100,8 +101,7 @@
* start_fw
*
* 5) Then when finished (or reset):
- * stop_fw (a.k.a. stop device for the moment)
- * stop_hw
+ * stop_device
*
* 6) Eventually, the free function will be called.
*/
@@ -176,6 +176,16 @@ struct iwl_rx_packet {
u8 data[];
} __packed;
+static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
+{
+ return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+}
+
+static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
+{
+ return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
+}
+
/**
* enum CMD_MODE - how to send the host commands ?
*
@@ -318,6 +328,24 @@ enum iwl_d3_status {
};
/**
+ * enum iwl_trans_status: transport status flags
+ * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
+ * @STATUS_DEVICE_ENABLED: APM is enabled
+ * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
+ * @STATUS_INT_ENABLED: interrupts are enabled
+ * @STATUS_RFKILL: the HW RFkill switch is in KILL position
+ * @STATUS_FW_ERROR: the fw is in error state
+ */
+enum iwl_trans_status {
+ STATUS_SYNC_HCMD_ACTIVE,
+ STATUS_DEVICE_ENABLED,
+ STATUS_TPOWER_PMI,
+ STATUS_INT_ENABLED,
+ STATUS_RFKILL,
+ STATUS_FW_ERROR,
+};
+
+/**
* struct iwl_trans_config - transport configuration
*
* @op_mode: pointer to the upper layer.
@@ -361,9 +389,7 @@ struct iwl_trans;
*
* @start_hw: starts the HW- from that point on, the HW can send interrupts
* May sleep
- * @stop_hw: stops the HW- from that point on, the HW will be in low power but
- * will still issue interrupt if the HW RF kill is triggered unless
- * op_mode_leaving is true.
+ * @op_mode_leave: Turn off the HW RF kill indication if on
* May sleep
* @start_fw: allocates and inits all the resources for the transport
* layer. Also kick a fw image.
@@ -371,8 +397,11 @@ struct iwl_trans;
* @fw_alive: called when the fw sends alive notification. If the fw provides
* the SCD base address in SRAM, then provide it here, or 0 otherwise.
* May sleep
- * @stop_device:stops the whole device (embedded CPU put to reset)
- * May sleep
+ * @stop_device: stops the whole device (embedded CPU put to reset) and stops
+ * the HW. From that point on, the HW will be in low power but will still
+ * issue interrupt if the HW RF kill is triggered. This callback must do
+ * the right thing and not crash even if start_hw() was called but not
+ * start_fw(). May sleep
* @d3_suspend: put the device into the correct mode for WoWLAN during
* suspend. This is optional, if not implemented WoWLAN will not be
* supported. This callback may sleep.
@@ -418,7 +447,7 @@ struct iwl_trans;
struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans);
- void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
+ void (*op_mode_leave)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
@@ -479,6 +508,7 @@ enum iwl_trans_state {
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @cfg - pointer to the configuration
+ * @status: a bit-mask of transport status flags
* @dev - pointer to struct device * that represents the device
* @hw_id: a u32 with the ID of the device / subdevice.
* Set during transport allocation.
@@ -499,6 +529,7 @@ struct iwl_trans {
struct iwl_op_mode *op_mode;
const struct iwl_cfg *cfg;
enum iwl_trans_state state;
+ unsigned long status;
struct device *dev;
u32 hw_rev;
@@ -540,15 +571,14 @@ static inline int iwl_trans_start_hw(struct iwl_trans *trans)
return trans->ops->start_hw(trans);
}
-static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
- bool op_mode_leaving)
+static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
{
might_sleep();
- trans->ops->stop_hw(trans, op_mode_leaving);
+ if (trans->ops->op_mode_leave)
+ trans->ops->op_mode_leave(trans);
- if (op_mode_leaving)
- trans->op_mode = NULL;
+ trans->op_mode = NULL;
trans->state = IWL_TRANS_NO_FW;
}
@@ -570,6 +600,7 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+ clear_bit(STATUS_FW_ERROR, &trans->status);
return trans->ops->start_fw(trans, fw, run_in_rfkill);
}
@@ -601,6 +632,13 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
{
int ret;
+ if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans->status)))
+ return -ERFKILL;
+
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return -EIO;
+
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return -EIO;
@@ -640,6 +678,9 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int queue)
{
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return -EIO;
+
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
@@ -657,9 +698,6 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
{
- if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
-
trans->ops->txq_disable(trans, queue);
}
@@ -760,7 +798,8 @@ static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
{
- trans->ops->set_pmi(trans, state);
+ if (trans->ops->set_pmi)
+ trans->ops->set_pmi(trans, state);
}
static inline void
@@ -780,6 +819,16 @@ iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
__release(nic_access);
}
+static inline void iwl_trans_fw_error(struct iwl_trans *trans)
+{
+ if (WARN_ON_ONCE(!trans->op_mode))
+ return;
+
+ /* prevent double restarts due to the same erroneous FW */
+ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
+ iwl_op_mode_nic_error(trans->op_mode);
+}
+
/*****************************************************
* driver (transport) register/unregister functions
******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index 6d73817850ce..f98ec2b23898 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -1,10 +1,10 @@
obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
-iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
+iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
iwlmvm-y += power.o power_legacy.o bt-coex.o
iwlmvm-y += led.o tt.o
-iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c
index 93fd1457954b..a1376539d2dc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/iwlwifi/mvm/binding.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -183,15 +183,29 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
return -EINVAL;
+ /*
+ * Update SF - Disable if needed. if this fails, SF might still be on
+ * while many macs are bound, which is forbidden - so fail the binding.
+ */
+ if (iwl_mvm_sf_update(mvm, vif, false))
+ return -EINVAL;
+
return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
}
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
return -EINVAL;
- return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+ ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+
+ if (!ret)
+ if (iwl_mvm_sf_update(mvm, vif, true))
+ IWL_ERR(mvm, "Failed to update SF state\n");
+
+ return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 75b72a956552..18a895a949d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -294,9 +294,9 @@ static const __le64 iwl_ci_mask[][3] = {
cpu_to_le64(0x0)
},
{
- cpu_to_le64(0xFE00000000ULL),
+ cpu_to_le64(0xFFC0000000ULL),
cpu_to_le64(0x0ULL),
- cpu_to_le64(0x0)
+ cpu_to_le64(0x0ULL)
},
};
@@ -396,7 +396,8 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_ANT_ISOLATION |
BT_VALID_ANT_ISOLATION_THRS |
BT_VALID_TXTX_DELTA_FREQ_THRS |
- BT_VALID_TXRX_MAX_FREQ_0);
+ BT_VALID_TXRX_MAX_FREQ_0 |
+ BT_VALID_SYNC_TO_SCO);
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
@@ -514,7 +515,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
if (IS_ERR_OR_NULL(sta))
return 0;
- mvmsta = (void *)sta->drv_priv;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* nothing to do */
if (mvmsta->bt_reduced_txpower == enable)
@@ -846,7 +847,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
if (IS_ERR_OR_NULL(sta))
return;
- mvmsta = (void *)sta->drv_priv;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
data->num_bss_ifaces++;
@@ -871,8 +872,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
- /* Rssi update while not associated ?! */
- if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+ /*
+ * Rssi update while not associated - can happen since the statistics
+ * are handled asynchronously
+ */
+ if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
/* No BT - reports should be disabled */
@@ -917,11 +921,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
enum iwl_bt_coex_lut_type lut_type;
if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
- BT_LOW_TRAFFIC)
+ BT_HIGH_TRAFFIC)
return LINK_QUAL_AGG_TIME_LIMIT_DEF;
lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
@@ -936,7 +940,7 @@ u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
BT_HIGH_TRAFFIC)
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 4b6d670c3509..036857698565 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index b9b81e881dd0..f36a7ee0267f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -886,8 +886,7 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
if (err)
return err;
- size = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- size -= sizeof(cmd.resp_pkt->hdr);
+ size = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (size < sizeof(__le16)) {
err = -EINVAL;
} else {
@@ -1211,15 +1210,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
#ifdef CONFIG_IWLWIFI_DEBUGFS
- len = le32_to_cpu(d3_cfg_cmd.resp_pkt->len_n_flags) &
- FH_RSCSR_FRAME_SIZE_MSK;
- if (len >= sizeof(u32) * 2) {
+ len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
+ if (len >= sizeof(u32)) {
mvm->d3_test_pme_ptr =
le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
- } else if (test) {
- /* in test mode we require the pointer */
- ret = -EIO;
- goto out;
}
#endif
iwl_free_resp(&d3_cfg_cmd);
@@ -1231,10 +1225,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->aux_sta.sta_id = old_aux_sta_id;
mvm_ap_sta->sta_id = old_ap_sta_id;
mvmvif->ap_sta_id = old_ap_sta_id;
- out_noreset:
- kfree(key_data.rsc_tsc);
+
if (ret < 0)
ieee80211_restart_hw(mvm->hw);
+ out_noreset:
+ kfree(key_data.rsc_tsc);
mutex_unlock(&mvm->mutex);
@@ -1537,10 +1532,16 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
struct iwl_mvm_d3_gtk_iter_data gtkdata = {
.status = status,
};
+ u32 disconnection_reasons =
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
if (!status || !vif->bss_conf.bssid)
return false;
+ if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
+ return false;
+
/* find last GTK that we used initially, if any */
gtkdata.find_phase = true;
ieee80211_iter_keys(mvm->hw, vif,
@@ -1665,8 +1666,8 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
else
status_size = sizeof(struct iwl_wowlan_status_v4);
- len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- if (len - sizeof(struct iwl_cmd_header) < status_size) {
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
@@ -1701,8 +1702,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
status.wake_packet = status_v4->wake_packet;
}
- if (len - sizeof(struct iwl_cmd_header) !=
- status_size + ALIGN(status.wake_packet_bufsize, 4)) {
+ if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
@@ -1805,6 +1805,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_read_d3_sram(mvm);
keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (keep)
+ mvm->keep_vif = vif;
+#endif
/* has unlocked the mutex, so skip that */
goto out;
@@ -1861,6 +1865,7 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
return err;
}
mvm->d3_test_active = true;
+ mvm->keep_vif = NULL;
return 0;
}
@@ -1871,10 +1876,14 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
u32 pme_asserted;
while (true) {
- pme_asserted = iwl_trans_read_mem32(mvm->trans,
- mvm->d3_test_pme_ptr);
- if (pme_asserted)
- break;
+ /* read pme_ptr if available */
+ if (mvm->d3_test_pme_ptr) {
+ pme_asserted = iwl_trans_read_mem32(mvm->trans,
+ mvm->d3_test_pme_ptr);
+ if (pme_asserted)
+ break;
+ }
+
if (msleep_interruptible(100))
break;
}
@@ -1885,6 +1894,10 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
+ /* skip the one we keep connection on */
+ if (_data == vif)
+ return;
+
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_connection_loss(vif);
}
@@ -1911,7 +1924,7 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_test_disconn_work_iter, NULL);
+ iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
ieee80211_wake_queues(mvm->hw);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
new file mode 100644
index 000000000000..0e29cd83a06a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -0,0 +1,546 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "debugfs.h"
+
+static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_dbgfs_pm_mask param, int val)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
+
+ dbgfs_pm->mask |= param;
+
+ switch (param) {
+ case MVM_DEBUGFS_PM_KEEP_ALIVE: {
+ struct ieee80211_hw *hw = mvm->hw;
+ int dtimper = hw->conf.ps_dtim_period ?: 1;
+ int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+ IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
+ if (val * MSEC_PER_SEC < 3 * dtimper_msec)
+ IWL_WARN(mvm,
+ "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
+ val * MSEC_PER_SEC, 3 * dtimper_msec);
+ dbgfs_pm->keep_alive_seconds = val;
+ break;
+ }
+ case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
+ IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
+ val ? "enabled" : "disabled");
+ dbgfs_pm->skip_over_dtim = val;
+ break;
+ case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
+ IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
+ dbgfs_pm->skip_dtim_periods = val;
+ break;
+ case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
+ dbgfs_pm->rx_data_timeout = val;
+ break;
+ case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
+ dbgfs_pm->tx_data_timeout = val;
+ break;
+ case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
+ IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
+ dbgfs_pm->disable_power_off = val;
+ break;
+ case MVM_DEBUGFS_PM_LPRX_ENA:
+ IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
+ dbgfs_pm->lprx_ena = val;
+ break;
+ case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
+ IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
+ dbgfs_pm->lprx_rssi_threshold = val;
+ break;
+ case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
+ IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
+ dbgfs_pm->snooze_ena = val;
+ break;
+ case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING:
+ IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val);
+ dbgfs_pm->uapsd_misbehaving = val;
+ break;
+ }
+}
+
+static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_dbgfs_pm_mask param;
+ int val, ret;
+
+ if (!strncmp("keep_alive=", buf, 11)) {
+ if (sscanf(buf + 11, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_KEEP_ALIVE;
+ } else if (!strncmp("skip_over_dtim=", buf, 15)) {
+ if (sscanf(buf + 15, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
+ } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
+ } else if (!strncmp("rx_data_timeout=", buf, 16)) {
+ if (sscanf(buf + 16, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
+ } else if (!strncmp("tx_data_timeout=", buf, 16)) {
+ if (sscanf(buf + 16, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
+ } else if (!strncmp("disable_power_off=", buf, 18) &&
+ !(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
+ } else if (!strncmp("lprx=", buf, 5)) {
+ if (sscanf(buf + 5, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_LPRX_ENA;
+ } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
+ if (sscanf(buf + 20, "%d", &val) != 1)
+ return -EINVAL;
+ if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
+ POWER_LPRX_RSSI_THRESHOLD_MIN)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+ } else if (!strncmp("snooze_enable=", buf, 14)) {
+ if (sscanf(buf + 14, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
+ } else if (!strncmp("uapsd_misbehaving=", buf, 18)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ iwl_dbgfs_update_pm(mvm, vif, param, val);
+ ret = iwl_mvm_power_update_mode(mvm, vif);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[512];
+ int bufsz = sizeof(buf);
+ int pos;
+
+ pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u8 ap_sta_id;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ char buf[512];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+ int i;
+
+ mutex_lock(&mvm->mutex);
+
+ ap_sta_id = mvmvif->ap_sta_id;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
+ mvmvif->id, mvmvif->color);
+ pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
+ vif->bss_conf.bssid);
+ pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
+ for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
+ i, mvmvif->queue_params[i].txop,
+ mvmvif->queue_params[i].cw_min,
+ mvmvif->queue_params[i].cw_max,
+ mvmvif->queue_params[i].aifs,
+ mvmvif->queue_params[i].uapsd);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ ap_sta_id != IWL_MVM_STATION_COUNT) {
+ struct ieee80211_sta *sta;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!IS_ERR_OR_NULL(sta)) {
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "ap_sta_id %d - reduced Tx power %d\n",
+ ap_sta_id,
+ mvm_sta->bt_reduced_txpower);
+ }
+ }
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (chanctx_conf)
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "idle rx chains %d, active rx chains: %d\n",
+ chanctx_conf->rx_chains_static,
+ chanctx_conf->rx_chains_dynamic);
+ rcu_read_unlock();
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
+ enum iwl_dbgfs_bf_mask param, int value)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+ dbgfs_bf->mask |= param;
+
+ switch (param) {
+ case MVM_DEBUGFS_BF_ENERGY_DELTA:
+ dbgfs_bf->bf_energy_delta = value;
+ break;
+ case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
+ dbgfs_bf->bf_roaming_energy_delta = value;
+ break;
+ case MVM_DEBUGFS_BF_ROAMING_STATE:
+ dbgfs_bf->bf_roaming_state = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
+ dbgfs_bf->bf_temp_threshold = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
+ dbgfs_bf->bf_temp_fast_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
+ dbgfs_bf->bf_temp_slow_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
+ dbgfs_bf->bf_enable_beacon_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_DEBUG_FLAG:
+ dbgfs_bf->bf_debug_flag = value;
+ break;
+ case MVM_DEBUGFS_BF_ESCAPE_TIMER:
+ dbgfs_bf->bf_escape_timer = value;
+ break;
+ case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
+ dbgfs_bf->ba_enable_beacon_abort = value;
+ break;
+ case MVM_DEBUGFS_BA_ESCAPE_TIMER:
+ dbgfs_bf->ba_escape_timer = value;
+ break;
+ }
+}
+
+static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_dbgfs_bf_mask param;
+ int value, ret = 0;
+
+ if (!strncmp("bf_energy_delta=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ENERGY_DELTA_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
+ if (sscanf(buf+24, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_state=", buf, 17)) {
+ if (sscanf(buf+17, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ROAMING_STATE_MIN ||
+ value > IWL_BF_ROAMING_STATE_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ROAMING_STATE;
+ } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
+ if (sscanf(buf+18, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
+ value > IWL_BF_TEMP_THRESHOLD_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
+ } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
+ value > IWL_BF_TEMP_FAST_FILTER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
+ } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
+ value > IWL_BF_TEMP_SLOW_FILTER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
+ } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+ if (sscanf(buf+24, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
+ } else if (!strncmp("bf_debug_flag=", buf, 14)) {
+ if (sscanf(buf+14, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_DEBUG_FLAG;
+ } else if (!strncmp("bf_escape_timer=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ESCAPE_TIMER_MIN ||
+ value > IWL_BF_ESCAPE_TIMER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
+ } else if (!strncmp("ba_escape_timer=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BA_ESCAPE_TIMER_MIN ||
+ value > IWL_BA_ESCAPE_TIMER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
+ } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
+ if (sscanf(buf+23, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ iwl_dbgfs_update_bf(vif, param, value);
+ if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ else
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter =
+ cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
+ .ba_enable_beacon_abort =
+ cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
+ };
+
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+ if (mvmvif->bf_data.bf_enabled)
+ cmd.bf_enable_beacon_filter = cpu_to_le32(1);
+ else
+ cmd.bf_enable_beacon_filter = 0;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
+ le32_to_cpu(cmd.bf_energy_delta));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
+ le32_to_cpu(cmd.bf_roaming_energy_delta));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
+ le32_to_cpu(cmd.bf_roaming_state));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
+ le32_to_cpu(cmd.bf_temp_threshold));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_fast_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_slow_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
+ le32_to_cpu(cmd.bf_enable_beacon_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
+ le32_to_cpu(cmd.bf_debug_flag));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
+ le32_to_cpu(cmd.bf_escape_timer));
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
+ le32_to_cpu(cmd.ba_escape_timer));
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
+ le32_to_cpu(cmd.ba_enable_beacon_abort));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, vif, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+
+MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+
+void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct dentry *dbgfs_dir = vif->debugfs_dir;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[100];
+
+ /*
+ * Check if debugfs directory already exist before creating it.
+ * This may happen when, for example, resetting hw or suspend-resume
+ */
+ if (!dbgfs_dir || mvmvif->dbgfs_dir)
+ return;
+
+ mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
+ mvmvif->mvm = mvm;
+
+ if (!mvmvif->dbgfs_dir) {
+ IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
+ dbgfs_dir->d_name.name);
+ return;
+ }
+
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
+ (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
+ mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS)))
+ MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
+ S_IRUSR);
+
+ MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
+ S_IRUSR);
+
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ mvmvif == mvm->bf_allowed_vif)
+ MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
+
+ /*
+ * Create symlink for convenience pointing to interface specific
+ * debugfs entries for the driver. For example, under
+ * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
+ * find
+ * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
+ */
+ snprintf(buf, 100, "../../../%s/%s/%s/%s",
+ dbgfs_dir->d_parent->d_parent->d_name.name,
+ dbgfs_dir->d_parent->d_name.name,
+ dbgfs_dir->d_name.name,
+ mvmvif->dbgfs_dir->d_name.name);
+
+ mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
+ mvm->debugfs_dir, buf);
+ if (!mvmvif->dbgfs_slink)
+ IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
+ dbgfs_dir->d_name.name);
+ return;
+err:
+ IWL_ERR(mvm, "Can't create debugfs entity\n");
+}
+
+void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ debugfs_remove(mvmvif->dbgfs_slink);
+ mvmvif->dbgfs_slink = NULL;
+
+ debugfs_remove_recursive(mvmvif->dbgfs_dir);
+ mvmvif->dbgfs_dir = NULL;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index a8fe6b41f9a3..369d4c90e669 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,30 +63,18 @@
#include "mvm.h"
#include "sta.h"
#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "debugfs.h"
-struct iwl_dbgfs_mvm_ctx {
- struct iwl_mvm *mvm;
- struct ieee80211_vif *vif;
-};
-
-static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
-
- char buf[16];
- int buf_size, ret;
+ int ret;
u32 scd_q_msk;
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
return -EIO;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
if (sscanf(buf, "%x", &scd_q_msk) != 1)
return -EINVAL;
@@ -99,24 +87,15 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
return ret;
}
-static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
struct ieee80211_sta *sta;
-
- char buf[8];
- int buf_size, sta_id, drain, ret;
+ int sta_id, drain, ret;
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
return -EIO;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
return -EINVAL;
if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
@@ -144,73 +123,57 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
{
struct iwl_mvm *mvm = file->private_data;
const struct fw_img *img;
- int ofs, len, pos = 0;
- size_t bufsz, ret;
- char *buf;
+ unsigned int ofs, len;
+ size_t ret;
u8 *ptr;
if (!mvm->ucode_loaded)
return -EINVAL;
/* default is to dump the entire data segment */
- if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) {
- img = &mvm->fw->img[mvm->cur_ucode];
- ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
- len = img->sec[IWL_UCODE_SECTION_DATA].len;
- } else {
+ img = &mvm->fw->img[mvm->cur_ucode];
+ ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+ if (mvm->dbgfs_sram_len) {
ofs = mvm->dbgfs_sram_offset;
len = mvm->dbgfs_sram_len;
}
- bufsz = len * 4 + 256;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
ptr = kzalloc(len, GFP_KERNEL);
- if (!ptr) {
- kfree(buf);
+ if (!ptr)
return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len);
- pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", ofs);
iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
- for (ofs = 0; ofs < len; ofs += 16) {
- pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
- bufsz - pos, false);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len);
- kfree(buf);
kfree(ptr);
return ret;
}
-static ssize_t iwl_dbgfs_sram_write(struct file *file,
- const char __user *user_buf, size_t count,
- loff_t *ppos)
+static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
- char buf[64];
- int buf_size;
+ const struct fw_img *img;
u32 offset, len;
+ u32 img_offset, img_len;
+
+ if (!mvm->ucode_loaded)
+ return -EINVAL;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
+ img = &mvm->fw->img[mvm->cur_ucode];
+ img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ img_len = img->sec[IWL_UCODE_SECTION_DATA].len;
if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
if ((offset & 0x3) || (len & 0x3))
return -EINVAL;
+
+ if (offset + len > img_offset + img_len)
+ return -EINVAL;
+
mvm->dbgfs_sram_offset = offset;
mvm->dbgfs_sram_len = len;
} else {
@@ -267,22 +230,14 @@ static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
- char buf[64] = {};
- int ret;
- int val;
+ int ret, val;
if (!mvm->ucode_loaded)
return -EIO;
- count = min_t(size_t, count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
if (!strncmp("disable_power_off_d0=", buf, 21)) {
if (sscanf(buf + 21, "%d", &val) != 1)
return -EINVAL;
@@ -302,212 +257,6 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
return ret ?: count;
}
-static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- enum iwl_dbgfs_pm_mask param, int val)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
-
- dbgfs_pm->mask |= param;
-
- switch (param) {
- case MVM_DEBUGFS_PM_KEEP_ALIVE: {
- struct ieee80211_hw *hw = mvm->hw;
- int dtimper = hw->conf.ps_dtim_period ?: 1;
- int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
-
- IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
- if (val * MSEC_PER_SEC < 3 * dtimper_msec) {
- IWL_WARN(mvm,
- "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
- val * MSEC_PER_SEC, 3 * dtimper_msec);
- }
- dbgfs_pm->keep_alive_seconds = val;
- break;
- }
- case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
- IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
- val ? "enabled" : "disabled");
- dbgfs_pm->skip_over_dtim = val;
- break;
- case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
- IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
- dbgfs_pm->skip_dtim_periods = val;
- break;
- case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
- IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
- dbgfs_pm->rx_data_timeout = val;
- break;
- case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
- IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
- dbgfs_pm->tx_data_timeout = val;
- break;
- case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
- IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
- dbgfs_pm->disable_power_off = val;
- break;
- case MVM_DEBUGFS_PM_LPRX_ENA:
- IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
- dbgfs_pm->lprx_ena = val;
- break;
- case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
- IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
- dbgfs_pm->lprx_rssi_threshold = val;
- break;
- case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
- IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
- dbgfs_pm->snooze_ena = val;
- break;
- }
-}
-
-static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->dbgfs_data;
- enum iwl_dbgfs_pm_mask param;
- char buf[32] = {};
- int val;
- int ret;
-
- count = min_t(size_t, count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- if (!strncmp("keep_alive=", buf, 11)) {
- if (sscanf(buf + 11, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_KEEP_ALIVE;
- } else if (!strncmp("skip_over_dtim=", buf, 15)) {
- if (sscanf(buf + 15, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
- } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
- if (sscanf(buf + 18, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
- } else if (!strncmp("rx_data_timeout=", buf, 16)) {
- if (sscanf(buf + 16, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
- } else if (!strncmp("tx_data_timeout=", buf, 16)) {
- if (sscanf(buf + 16, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
- } else if (!strncmp("disable_power_off=", buf, 18) &&
- !(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
- if (sscanf(buf + 18, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
- } else if (!strncmp("lprx=", buf, 5)) {
- if (sscanf(buf + 5, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_LPRX_ENA;
- } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
- if (sscanf(buf + 20, "%d", &val) != 1)
- return -EINVAL;
- if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
- POWER_LPRX_RSSI_THRESHOLD_MIN)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
- } else if (!strncmp("snooze_enable=", buf, 14)) {
- if (sscanf(buf + 14, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
- } else {
- return -EINVAL;
- }
-
- mutex_lock(&mvm->mutex);
- iwl_dbgfs_update_pm(mvm, vif, param, val);
- ret = iwl_mvm_power_update_mode(mvm, vif);
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->dbgfs_data;
- char buf[512];
- int bufsz = sizeof(buf);
- int pos;
-
- pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->dbgfs_data;
- u8 ap_sta_id;
- struct ieee80211_chanctx_conf *chanctx_conf;
- char buf[512];
- int bufsz = sizeof(buf);
- int pos = 0;
- int i;
-
- mutex_lock(&mvm->mutex);
-
- ap_sta_id = mvmvif->ap_sta_id;
-
- pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
- mvmvif->id, mvmvif->color);
- pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
- vif->bss_conf.bssid);
- pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
- for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++) {
- pos += scnprintf(buf+pos, bufsz-pos,
- "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
- i, mvmvif->queue_params[i].txop,
- mvmvif->queue_params[i].cw_min,
- mvmvif->queue_params[i].cw_max,
- mvmvif->queue_params[i].aifs,
- mvmvif->queue_params[i].uapsd);
- }
-
- if (vif->type == NL80211_IFTYPE_STATION &&
- ap_sta_id != IWL_MVM_STATION_COUNT) {
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvm_sta;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
- lockdep_is_held(&mvm->mutex));
- mvm_sta = (void *)sta->drv_priv;
- pos += scnprintf(buf+pos, bufsz-pos,
- "ap_sta_id %d - reduced Tx power %d\n",
- ap_sta_id, mvm_sta->bt_reduced_txpower);
- }
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- if (chanctx_conf) {
- pos += scnprintf(buf+pos, bufsz-pos,
- "idle rx chains %d, active rx chains: %d\n",
- chanctx_conf->rx_chains_static,
- chanctx_conf->rx_chains_dynamic);
- }
- rcu_read_unlock();
-
- mutex_unlock(&mvm->mutex);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
#define BT_MBOX_MSG(_notif, _num, _field) \
((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
>> BT_MBOX##_num##_##_field##_POS)
@@ -783,11 +532,9 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
}
#undef PRINT_STAT_LE32
-static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
int ret;
mutex_lock(&mvm->mutex);
@@ -804,6 +551,14 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ iwl_write_prph(mvm->trans, DEVICE_SET_NMI_REG, 1);
+
+ return count;
+}
+
static ssize_t
iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
char __user *user_buf,
@@ -828,21 +583,11 @@ iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
}
static ssize_t
-iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
- const char __user *user_buf,
+iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
- char buf[8];
- int buf_size;
u8 scan_rx_ant;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
-
- /* get the argument from the user and check if it is valid */
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
return -EINVAL;
if (scan_rx_ant > ANT_ABC)
@@ -850,228 +595,17 @@ iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
return -EINVAL;
- /* change the rx antennas for scan command */
mvm->scan_rx_ant = scan_rx_ant;
return count;
}
-
-static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
- enum iwl_dbgfs_bf_mask param, int value)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
-
- dbgfs_bf->mask |= param;
-
- switch (param) {
- case MVM_DEBUGFS_BF_ENERGY_DELTA:
- dbgfs_bf->bf_energy_delta = value;
- break;
- case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
- dbgfs_bf->bf_roaming_energy_delta = value;
- break;
- case MVM_DEBUGFS_BF_ROAMING_STATE:
- dbgfs_bf->bf_roaming_state = value;
- break;
- case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
- dbgfs_bf->bf_temp_threshold = value;
- break;
- case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
- dbgfs_bf->bf_temp_fast_filter = value;
- break;
- case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
- dbgfs_bf->bf_temp_slow_filter = value;
- break;
- case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
- dbgfs_bf->bf_enable_beacon_filter = value;
- break;
- case MVM_DEBUGFS_BF_DEBUG_FLAG:
- dbgfs_bf->bf_debug_flag = value;
- break;
- case MVM_DEBUGFS_BF_ESCAPE_TIMER:
- dbgfs_bf->bf_escape_timer = value;
- break;
- case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
- dbgfs_bf->ba_enable_beacon_abort = value;
- break;
- case MVM_DEBUGFS_BA_ESCAPE_TIMER:
- dbgfs_bf->ba_escape_timer = value;
- break;
- }
-}
-
-static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->dbgfs_data;
- enum iwl_dbgfs_bf_mask param;
- char buf[256];
- int buf_size;
- int value;
- int ret = 0;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- if (!strncmp("bf_energy_delta=", buf, 16)) {
- if (sscanf(buf+16, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_ENERGY_DELTA_MIN ||
- value > IWL_BF_ENERGY_DELTA_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_ENERGY_DELTA;
- } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
- if (sscanf(buf+24, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
- value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
- } else if (!strncmp("bf_roaming_state=", buf, 17)) {
- if (sscanf(buf+17, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_ROAMING_STATE_MIN ||
- value > IWL_BF_ROAMING_STATE_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_ROAMING_STATE;
- } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
- if (sscanf(buf+18, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
- value > IWL_BF_TEMP_THRESHOLD_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
- } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
- if (sscanf(buf+20, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
- value > IWL_BF_TEMP_FAST_FILTER_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
- } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
- if (sscanf(buf+20, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
- value > IWL_BF_TEMP_SLOW_FILTER_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
- } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
- if (sscanf(buf+24, "%d", &value) != 1)
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
- } else if (!strncmp("bf_debug_flag=", buf, 14)) {
- if (sscanf(buf+14, "%d", &value) != 1)
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_DEBUG_FLAG;
- } else if (!strncmp("bf_escape_timer=", buf, 16)) {
- if (sscanf(buf+16, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BF_ESCAPE_TIMER_MIN ||
- value > IWL_BF_ESCAPE_TIMER_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
- } else if (!strncmp("ba_escape_timer=", buf, 16)) {
- if (sscanf(buf+16, "%d", &value) != 1)
- return -EINVAL;
- if (value < IWL_BA_ESCAPE_TIMER_MIN ||
- value > IWL_BA_ESCAPE_TIMER_MAX)
- return -EINVAL;
- param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
- } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
- if (sscanf(buf+23, "%d", &value) != 1)
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
- param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
- } else {
- return -EINVAL;
- }
-
- mutex_lock(&mvm->mutex);
- iwl_dbgfs_update_bf(vif, param, value);
- if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
- } else {
- ret = iwl_mvm_enable_beacon_filter(mvm, vif);
- }
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct iwl_beacon_filter_cmd cmd = {
- IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter =
- cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
- .ba_enable_beacon_abort =
- cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
- };
-
- iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- if (mvmvif->bf_data.bf_enabled)
- cmd.bf_enable_beacon_filter = cpu_to_le32(1);
- else
- cmd.bf_enable_beacon_filter = 0;
-
- pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
- le32_to_cpu(cmd.bf_energy_delta));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
- le32_to_cpu(cmd.bf_roaming_energy_delta));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
- le32_to_cpu(cmd.bf_roaming_state));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
- le32_to_cpu(cmd.bf_temp_threshold));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
- le32_to_cpu(cmd.bf_temp_fast_filter));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
- le32_to_cpu(cmd.bf_temp_slow_filter));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
- le32_to_cpu(cmd.bf_enable_beacon_filter));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
- le32_to_cpu(cmd.bf_debug_flag));
- pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
- le32_to_cpu(cmd.bf_escape_timer));
- pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
- le32_to_cpu(cmd.ba_escape_timer));
- pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
- le32_to_cpu(cmd.ba_enable_beacon_abort));
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
#ifdef CONFIG_PM_SLEEP
-static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct iwl_mvm *mvm = file->private_data;
- char buf[8] = {};
int store;
- count = min_t(size_t, count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
if (sscanf(buf, "%d", &store) != 1)
return -EINVAL;
@@ -1124,61 +658,33 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
}
#endif
-#define MVM_DEBUGFS_READ_FILE_OPS(name) \
-static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .read = iwl_dbgfs_##name##_read, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-}
-
-#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name) \
-static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .write = iwl_dbgfs_##name##_write, \
- .read = iwl_dbgfs_##name##_read, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-};
-
-#define MVM_DEBUGFS_WRITE_FILE_OPS(name) \
-static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .write = iwl_dbgfs_##name##_write, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-};
-
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \
if (!debugfs_create_file(#name, mode, parent, mvm, \
&iwl_dbgfs_##name##_ops)) \
goto err; \
} while (0)
-#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, vif, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
- } while (0)
-
/* Device wide debugfs entries */
-MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush);
-MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
+MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
+MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
-MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
#ifdef CONFIG_PM_SLEEP
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
#endif
-/* Interface specific debugfs entries */
-MVM_DEBUGFS_READ_FILE_OPS(mac_params);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params);
-
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
char buf[100];
@@ -1196,6 +702,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
#ifdef CONFIG_PM_SLEEP
@@ -1206,6 +713,19 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
goto err;
#endif
+ if (!debugfs_create_blob("nvm_hw", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_hw_blob))
+ goto err;
+ if (!debugfs_create_blob("nvm_sw", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_sw_blob))
+ goto err;
+ if (!debugfs_create_blob("nvm_calib", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_calib_blob))
+ goto err;
+ if (!debugfs_create_blob("nvm_prod", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_prod_blob))
+ goto err;
+
/*
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
@@ -1221,72 +741,3 @@ err:
IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
return -ENOMEM;
}
-
-void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct dentry *dbgfs_dir = vif->debugfs_dir;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[100];
-
- /*
- * Check if debugfs directory already exist before creating it.
- * This may happen when, for example, resetting hw or suspend-resume
- */
- if (!dbgfs_dir || mvmvif->dbgfs_dir)
- return;
-
- mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
- mvmvif->dbgfs_data = mvm;
-
- if (!mvmvif->dbgfs_dir) {
- IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
- dbgfs_dir->d_name.name);
- return;
- }
-
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
- vif->type == NL80211_IFTYPE_STATION && !vif->p2p)
- MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
- S_IRUSR);
-
- MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
- S_IRUSR);
-
- if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
- mvmvif == mvm->bf_allowed_vif)
- MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
- S_IRUSR | S_IWUSR);
-
- /*
- * Create symlink for convenience pointing to interface specific
- * debugfs entries for the driver. For example, under
- * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
- * find
- * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
- */
- snprintf(buf, 100, "../../../%s/%s/%s/%s",
- dbgfs_dir->d_parent->d_parent->d_name.name,
- dbgfs_dir->d_parent->d_name.name,
- dbgfs_dir->d_name.name,
- mvmvif->dbgfs_dir->d_name.name);
-
- mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
- mvm->debugfs_dir, buf);
- if (!mvmvif->dbgfs_slink)
- IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
- dbgfs_dir->d_name.name);
- return;
-err:
- IWL_ERR(mvm, "Can't create debugfs entity\n");
-}
-
-void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- debugfs_remove(mvmvif->dbgfs_slink);
- mvmvif->dbgfs_slink = NULL;
-
- debugfs_remove_recursive(mvmvif->dbgfs_dir);
- mvmvif->dbgfs_dir = NULL;
-}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/iwlwifi/mvm/debugfs.h
new file mode 100644
index 000000000000..e3a9774af495
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.h
@@ -0,0 +1,101 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#define MVM_DEBUGFS_READ_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ argtype *arg = file->private_data; \
+ char buf[buflen] = {}; \
+ size_t buf_size = min(count, sizeof(buf) - 1); \
+ \
+ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ \
+ return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \
+} \
+
+#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
index 4ea5e24ca92d..1b4e54d416b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -127,6 +127,7 @@ enum iwl_bt_coex_valid_bit_msk {
BT_VALID_ANT_ISOLATION_THRS = BIT(15),
BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
+ BT_VALID_SYNC_TO_SCO = BIT(18),
};
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 4e7dd8cf87dc..8415ff312d0e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index 39c3148bdfa8..c405cda1025f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 5cb93ae5cd2f..884c08725308 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -85,6 +85,8 @@
* PBW Snoozing enabled
* @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
* @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
+ * detection enablement
*/
enum iwl_power_flags {
POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
@@ -94,6 +96,7 @@ enum iwl_power_flags {
POWER_FLAGS_BT_SCO_ENA = BIT(8),
POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
+ POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12),
};
#define IWL_POWER_VEC_SIZE 5
@@ -228,6 +231,19 @@ struct iwl_mac_power_cmd {
u8 reserved;
} __packed;
+/*
+ * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when
+ * associated AP is identified as improperly implementing uAPSD protocol.
+ * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
+ * @sta_id: index of station in uCode's station table - associated AP ID in
+ * this context.
+ */
+struct iwl_uapsd_misbehaving_ap_notif {
+ __le32 sta_id;
+ u8 mac_id;
+ u8 reserved[3];
+} __packed;
+
/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 538f1c7a5966..85057219cc43 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -281,8 +281,31 @@ enum {
/* # entries in rate scale table to support Tx retries */
#define LQ_MAX_RETRY_NUM 16
-/* Link quality command flags, only this one is available */
-#define LQ_FLAG_SET_STA_TLC_RTS_MSK BIT(0)
+/* Link quality command flags bit fields */
+
+/* Bit 0: (0) Don't use RTS (1) Use RTS */
+#define LQ_FLAG_USE_RTS_POS 0
+#define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS)
+
+/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
+#define LQ_FLAG_COLOR_POS 1
+#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS)
+
+/* Bit 4-5: Tx RTS BW Signalling
+ * (0) No RTS BW signalling
+ * (1) Static BW signalling
+ * (2) Dynamic BW signalling
+ */
+#define LQ_FLAG_RTS_BW_SIG_POS 4
+#define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS)
+
+/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
+ * Dyanmic BW selection allows Tx with narrower BW then requested in rates
+ */
+#define LQ_FLAG_DYNAMIC_BW_POS 6
+#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS)
/**
* struct iwl_lq_cmd - link quality command
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index c3782b48ded1..9426905de6b2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -504,6 +504,7 @@ struct iwl_scan_offload_profile {
* @match_notify: clients waiting for match found notification
* @pass_match: clients waiting for the results
* @active_clients: active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify: clients waiting for match notification without match
*/
struct iwl_scan_offload_profile_cfg {
struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
@@ -512,7 +513,8 @@ struct iwl_scan_offload_profile_cfg {
u8 match_notify;
u8 pass_match;
u8 active_clients;
- u8 reserved[3];
+ u8 any_beacon_notify;
+ u8 reserved[2];
} __packed;
/**
@@ -530,14 +532,13 @@ struct iwl_scan_offload_schedule {
/*
* iwl_scan_offload_flags
*
- * IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID: filter mode - upload every beacon or match
- * ssid list.
+ * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
* IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
* IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
* on A band.
*/
enum iwl_scan_offload_flags {
- IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID = BIT(0),
+ IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3),
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 4aca5933a65d..1b60fdff6a56 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -97,9 +97,6 @@ enum iwl_sta_flags {
STA_FLG_FLG_ANT_B),
STA_FLG_PS = BIT(8),
- STA_FLG_INVALID = BIT(9),
- STA_FLG_DLP_EN = BIT(10),
- STA_FLG_SET_ALL_KEYS = BIT(11),
STA_FLG_DRAIN_FLOW = BIT(12),
STA_FLG_PAN = BIT(13),
STA_FLG_CLASS_AUTH = BIT(14),
@@ -138,7 +135,14 @@ enum iwl_sta_flags {
/**
* enum iwl_sta_key_flag - key flags for the ADD_STA host command
- * @STA_KEY_FLG_EN_MSK: mask for encryption algorithm
+ * @STA_KEY_FLG_NO_ENC: no encryption
+ * @STA_KEY_FLG_WEP: WEP encryption algorithm
+ * @STA_KEY_FLG_CCM: CCMP encryption algorithm
+ * @STA_KEY_FLG_TKIP: TKIP encryption algorithm
+ * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
+ * @STA_KEY_FLG_CMAC: CMAC encryption algorithm
+ * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
+ * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
* @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
* station info array (1 - n 1X mode)
* @STA_KEY_FLG_KEYID_MSK: the index of the key
@@ -152,6 +156,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_WEP = (1 << 0),
STA_KEY_FLG_CCM = (2 << 0),
STA_KEY_FLG_TKIP = (3 << 0),
+ STA_KEY_FLG_EXT = (4 << 0),
STA_KEY_FLG_CMAC = (6 << 0),
STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
STA_KEY_FLG_EN_MSK = (7 << 0),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index d606197bde8f..b674c2a2b51c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -132,6 +132,7 @@ enum iwl_tx_flags {
#define TX_CMD_SEC_WEP 0x01
#define TX_CMD_SEC_CCM 0x02
#define TX_CMD_SEC_TKIP 0x03
+#define TX_CMD_SEC_EXT 0x04
#define TX_CMD_SEC_MSK 0x07
#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index bad5a552dd8d..989d7dbdca6c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -141,6 +141,7 @@ enum {
/* Power - legacy power table command */
POWER_TABLE_CMD = 0x77,
+ PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
/* Thermal Throttling*/
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
@@ -183,6 +184,7 @@ enum {
BT_PROFILE_NOTIFICATION = 0xce,
BT_COEX_CI = 0x5d,
+ REPLY_SF_CFG_CMD = 0xd1,
REPLY_BEACON_FILTERING_CMD = 0xd2,
REPLY_DEBUG_CMD = 0xf0,
@@ -1052,6 +1054,7 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
+ RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
@@ -1131,6 +1134,7 @@ struct iwl_set_calib_default_cmd {
} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
#define MAX_PORT_ID_NUM 2
+#define MAX_MCAST_FILTERING_ADDRESSES 256
/**
* struct iwl_mcast_filter_cmd - configure multicast filter.
@@ -1363,4 +1367,65 @@ struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */
struct mvm_statistics_general general;
} __packed;
+/***********************************
+ * Smart Fifo API
+ ***********************************/
+/* Smart Fifo state */
+enum iwl_sf_state {
+ SF_LONG_DELAY_ON = 0, /* should never be called by driver */
+ SF_FULL_ON,
+ SF_UNINIT,
+ SF_INIT_OFF,
+ SF_HW_NUM_STATES
+};
+
+/* Smart Fifo possible scenario */
+enum iwl_sf_scenario {
+ SF_SCENARIO_SINGLE_UNICAST,
+ SF_SCENARIO_AGG_UNICAST,
+ SF_SCENARIO_MULTICAST,
+ SF_SCENARIO_BA_RESP,
+ SF_SCENARIO_TX_RESP,
+ SF_NUM_SCENARIO
+};
+
+#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */
+#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
+
+/* smart FIFO default values */
+#define SF_W_MARK_SISO 4096
+#define SF_W_MARK_MIMO2 8192
+#define SF_W_MARK_MIMO3 6144
+#define SF_W_MARK_LEGACY 4096
+#define SF_W_MARK_SCAN 4096
+
+/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
+#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */
+#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */
+#define SF_BA_IDLE_TIMER 320 /* 300 uSec */
+#define SF_BA_AGING_TIMER 2016 /* 2 mSec */
+#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */
+#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */
+
+#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
+
+/**
+ * Smart Fifo configuration command.
+ * @state: smart fifo state, types listed in iwl_sf_sate.
+ * @watermark: Minimum allowed availabe free space in RXF for transient state.
+ * @long_delay_timeouts: aging and idle timer values for each scenario
+ * in long delay state.
+ * @full_on_timeouts: timer values for each scenario in full on state.
+ */
+struct iwl_sf_cfg_cmd {
+ enum iwl_sf_state state;
+ __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
+ __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+ __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+} __packed; /* SF_CFG_API_S_VER_2 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 70e5297646b2..c03d39541f9e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -241,7 +241,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
lockdep_assert_held(&mvm->mutex);
- if (mvm->init_ucode_complete)
+ if (WARN_ON_ONCE(mvm->init_ucode_complete))
return 0;
iwl_init_notification_wait(&mvm->notif_wait,
@@ -287,7 +287,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
IWL_DEBUG_RF_KILL(mvm,
"jump over all phy activities due to RF kill\n");
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
- return 1;
+ ret = 1;
+ goto out;
}
/* Send TX valid antennas before triggering calibrations */
@@ -319,9 +320,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
error:
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
- if (!iwlmvm_mod_params.init_dbg) {
- iwl_trans_stop_device(mvm->trans);
- } else if (!mvm->nvm_data) {
+ if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
/* we want to debug INIT and we have no NVM - fake */
mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
sizeof(struct ieee80211_channel) +
@@ -370,11 +369,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = -ERFKILL;
goto error;
}
- /* should stop & start HW since that INIT image just loaded */
- iwl_trans_stop_hw(mvm->trans, false);
- ret = iwl_trans_start_hw(mvm->trans);
- if (ret)
- return ret;
+ if (!iwlmvm_mod_params.init_dbg) {
+ /*
+ * should stop and start HW since that INIT
+ * image just loaded
+ */
+ iwl_trans_stop_device(mvm->trans);
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+ }
}
if (iwlmvm_mod_params.init_dbg)
@@ -386,6 +390,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
+ ret = iwl_mvm_sf_update(mvm, NULL, false);
+ if (ret)
+ IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
+
ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
if (ret)
goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
index 2269a9e5cc67..6b4ea6bf8ffe 100644
--- a/drivers/net/wireless/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/iwlwifi/mvm/led.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -103,7 +103,7 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
return 0;
default:
return -EINVAL;
- };
+ }
mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(mvm->hw->wiphy));
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index f41f9b079831..ba723d50939a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,10 +69,10 @@
#include "mvm.h"
const u8 iwl_mvm_ac_to_tx_fifo[] = {
- IWL_MVM_TX_FIFO_BK,
- IWL_MVM_TX_FIFO_BE,
- IWL_MVM_TX_FIFO_VI,
IWL_MVM_TX_FIFO_VO,
+ IWL_MVM_TX_FIFO_VI,
+ IWL_MVM_TX_FIFO_BE,
+ IWL_MVM_TX_FIFO_BK,
};
struct iwl_mvm_mac_iface_iterator_data {
@@ -85,35 +85,15 @@ struct iwl_mvm_mac_iface_iterator_data {
bool found_vif;
};
-static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm_mac_iface_iterator_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u32 ac;
- /* Iterator may already find the interface being added -- skip it */
- if (vif == data->vif) {
- data->found_vif = true;
+ /* Skip the interface for which we are trying to assign a tsf_id */
+ if (vif == data->vif)
return;
- }
-
- /* Mark the queues used by the vif */
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
- __set_bit(vif->hw_queue[ac], data->used_hw_queues);
-
- if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
- __set_bit(vif->cab_queue, data->used_hw_queues);
-
- /*
- * Mark MAC IDs as used by clearing the available bit, and
- * (below) mark TSFs as used if their existing use is not
- * compatible with the new interface type.
- * No locking or atomic bit operations are needed since the
- * data is on the stack of the caller function.
- */
- __clear_bit(mvmvif->id, data->available_mac_ids);
/*
* The TSF is a hardware/firmware resource, there are 4 and
@@ -135,21 +115,26 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
case NL80211_IFTYPE_STATION:
/*
* The new interface is client, so if the existing one
- * we're iterating is an AP, the TSF should be used to
+ * we're iterating is an AP, and both interfaces have the
+ * same beacon interval, the same TSF should be used to
* avoid drift between the new client and existing AP,
* the existing AP will get drift updates from the new
* client context in this case
*/
if (vif->type == NL80211_IFTYPE_AP) {
if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
+ (vif->bss_conf.beacon_int ==
+ data->vif->bss_conf.beacon_int)) {
data->preferred_tsf = mvmvif->tsf_id;
- return;
+ return;
+ }
}
break;
case NL80211_IFTYPE_AP:
/*
- * The new interface is AP/GO, so should get drift
+ * The new interface is AP/GO, so in case both interfaces
+ * have the same beacon interval, it should get drift
* updates from an existing client or use the same
* TSF as an existing GO. There's no drift between
* TSFs internally but if they used different TSFs
@@ -159,9 +144,12 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
if (vif->type == NL80211_IFTYPE_STATION ||
vif->type == NL80211_IFTYPE_AP) {
if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
+ (vif->bss_conf.beacon_int ==
+ data->vif->bss_conf.beacon_int)) {
data->preferred_tsf = mvmvif->tsf_id;
- return;
+ return;
+ }
}
break;
default:
@@ -187,6 +175,39 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
data->preferred_tsf = NUM_TSF_IDS;
}
+static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mac_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 ac;
+
+ /* Iterator may already find the interface being added -- skip it */
+ if (vif == data->vif) {
+ data->found_vif = true;
+ return;
+ }
+
+ /* Mark the queues used by the vif */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ __set_bit(vif->hw_queue[ac], data->used_hw_queues);
+
+ if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+ __set_bit(vif->cab_queue, data->used_hw_queues);
+
+ /* Mark MAC IDs as used by clearing the available bit, and
+ * (below) mark TSFs as used if their existing use is not
+ * compatible with the new interface type.
+ * No locking or atomic bit operations are needed since the
+ * data is on the stack of the caller function.
+ */
+ __clear_bit(mvmvif->id, data->available_mac_ids);
+
+ /* find a suitable tsf_id */
+ iwl_mvm_mac_tsf_id_iter(_data, mac, vif);
+}
+
/*
* Get the mask of the queus used by the vif
*/
@@ -205,6 +226,29 @@ u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
return qmask;
}
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_mac_iface_iterator_data data = {
+ .mvm = mvm,
+ .vif = vif,
+ .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+ /* no preference yet */
+ .preferred_tsf = NUM_TSF_IDS,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_mac_tsf_id_iter, &data);
+
+ if (data.preferred_tsf != NUM_TSF_IDS)
+ mvmvif->tsf_id = data.preferred_tsf;
+ else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids))
+ mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+ NUM_TSF_IDS);
+}
+
static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -488,6 +532,40 @@ static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
*ofdm_rates = ofdm;
}
+static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd)
+{
+ /* for both sta and ap, ht_operation_mode hold the protection_mode */
+ u8 protection_mode = vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION;
+ /* The fw does not distinguish between ht and fat */
+ u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT;
+
+ IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode);
+ /*
+ * See section 9.23.3.1 of IEEE 80211-2012.
+ * Nongreenfield HT STAs Present is not supported.
+ */
+ switch (protection_mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ cmd->protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ /* Protect when channel wider than 20MHz */
+ if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
+ cmd->protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ default:
+ IWL_ERR(mvm, "Illegal protection mode %d\n",
+ protection_mode);
+ break;
+ }
+}
+
static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_ctx_cmd *cmd,
@@ -495,6 +573,8 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_chanctx_conf *chanctx;
+ bool ht_enabled = !!(vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION);
u8 cck_ack_rates, ofdm_ack_rates;
int i;
@@ -550,18 +630,23 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cpu_to_le32(vif->bss_conf.use_short_slot ?
MAC_FLG_SHORT_SLOT : 0);
- for (i = 0; i < AC_NUM; i++) {
- cmd->ac[i].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min);
- cmd->ac[i].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max);
- cmd->ac[i].aifsn = mvmvif->queue_params[i].aifs;
- cmd->ac[i].edca_txop =
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u8 txf = iwl_mvm_ac_to_tx_fifo[i];
+
+ cmd->ac[txf].cw_min =
+ cpu_to_le16(mvmvif->queue_params[i].cw_min);
+ cmd->ac[txf].cw_max =
+ cpu_to_le16(mvmvif->queue_params[i].cw_max);
+ cmd->ac[txf].edca_txop =
cpu_to_le16(mvmvif->queue_params[i].txop * 32);
- cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
+ cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs;
+ cmd->ac[txf].fifos_mask = BIT(txf);
}
/* in AP mode, the MCAST FIFO takes the EDCA params from VO */
if (vif->type == NL80211_IFTYPE_AP)
- cmd->ac[AC_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |=
+ BIT(IWL_MVM_TX_FIFO_MCAST);
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
@@ -573,16 +658,13 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cmd->protection_flags |=
cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
}
-
- /*
- * I think that we should enable these 2 flags regardless the HT PROT
- * fields in the HT IE, but I am not sure. Someone knows whom to ask?...
- */
- if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
+ IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
+ vif->bss_conf.use_cts_prot,
+ vif->bss_conf.ht_operation_mode);
+ if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
- cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_HT_PROT |
- MAC_PROT_FLG_FAT_PROT);
- }
+ if (ht_enabled)
+ iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
}
@@ -974,7 +1056,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
iwl_mvm_mac_ap_iterator, &data);
if (data.beacon_device_ts) {
- u32 rand = (prandom_u32() % (80 - 20)) + 20;
+ u32 rand = (prandom_u32() % (64 - 36)) + 36;
mvmvif->ap_beacon_time = data.beacon_device_ts +
ieee80211_tu_to_usec(data.beacon_int * rand /
100);
@@ -1153,10 +1235,18 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
- u16 *id = _data;
+ struct iwl_missed_beacons_notif *missed_beacons = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (mvmvif->id == *id)
+ if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
+ return;
+
+ /*
+ * TODO: the threshold should be adjusted based on latency conditions,
+ * and/or in case of a CS flow on one of the other AP vifs.
+ */
+ if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
+ IWL_MVM_MISSED_BEACONS_THRESHOLD)
ieee80211_beacon_loss(vif);
}
@@ -1165,12 +1255,19 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacons_notif *missed_beacons = (void *)pkt->data;
- u16 id = (u16)le32_to_cpu(missed_beacons->mac_id);
+ struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
+
+ IWL_DEBUG_INFO(mvm,
+ "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
+ le32_to_cpu(mb->mac_id),
+ le32_to_cpu(mb->consec_missed_beacons),
+ le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
+ le32_to_cpu(mb->num_recvd_beacons),
+ le32_to_cpu(mb->num_expected_beacons));
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_beacon_loss_iterator,
- &id);
+ mb);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 74bc2c8af06d..c35b8661b395 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -199,9 +199,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS;
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
@@ -246,7 +246,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+ if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
@@ -256,10 +256,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
- NL80211_FEATURE_P2P_GO_OPPPS;
+ NL80211_FEATURE_P2P_GO_OPPPS |
+ NL80211_FEATURE_LOW_PRIORITY_SCAN;
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+ /* currently FW API supports only one optional cipher scheme */
+ if (mvm->fw->cs[0].cipher) {
+ mvm->hw->n_cipher_schemes = 1;
+ mvm->hw->cipher_schemes = &mvm->fw->cs[0];
+ }
+
#ifdef CONFIG_PM_SLEEP
if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
mvm->trans->ops->d3_suspend &&
@@ -321,6 +328,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
ieee80211_free_txskb(hw, skb);
}
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ return false;
+ return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ return false;
+ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+ return true;
+
+ /* enabled by default */
+ return true;
+}
+
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@@ -340,7 +365,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
+ if (!iwl_enable_rx_ampdu(mvm->cfg)) {
ret = -EINVAL;
break;
}
@@ -350,7 +375,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
break;
case IEEE80211_AMPDU_TX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
+ if (!iwl_enable_tx_ampdu(mvm->cfg)) {
ret = -EINVAL;
break;
}
@@ -398,7 +423,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
iwl_trans_stop_device(mvm->trans);
- iwl_trans_stop_hw(mvm->trans, false);
mvm->scan_status = IWL_MVM_SCAN_NONE;
@@ -470,7 +494,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_work_sync(&mvm->roc_done_wk);
iwl_trans_stop_device(mvm->trans);
- iwl_trans_stop_hw(mvm->trans, false);
iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */
@@ -487,17 +510,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_work_sync(&mvm->async_handlers_wk);
}
-static void iwl_mvm_pm_disable_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = data;
- int ret;
-
- ret = iwl_mvm_power_disable(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to disable power management\n");
-}
-
static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -520,6 +532,20 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
return NULL;
}
+static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ s8 tx_power)
+{
+ /* FW is in charge of regulatory enforcement */
+ struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
+ .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
+ .pwr_restriction = cpu_to_le16(tx_power),
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
+ sizeof(reduce_txpwr_cmd),
+ &reduce_txpwr_cmd);
+}
+
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -540,26 +566,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_unlock;
- /*
- * TODO: remove this temporary code.
- * Currently MVM FW supports power management only on single MAC.
- * If new interface added, disable PM on existing interface.
- * P2P device is a special case, since it is handled by FW similary to
- * scan. If P2P deviced is added, PM remains enabled on existing
- * interface.
- * Note: the method below does not count the new interface being added
- * at this moment.
- */
+ /* Counting number of interfaces is needed for legacy PM */
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count++;
- if (mvm->vif_count > 1) {
- IWL_DEBUG_MAC80211(mvm,
- "Disable power on existing interfaces\n");
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_pm_disable_iterator, mvm);
- }
/*
* The AP binding flow can be done only after the beacon
@@ -590,11 +599,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_release;
- /*
- * Update power state on the new interface. Admittedly, based on
- * mac80211 logics this power update will disable power management
- */
- iwl_mvm_power_update_mode(mvm, vif);
+ iwl_mvm_power_disable(mvm, vif);
/* beacon filtering */
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
@@ -655,9 +660,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
out_release:
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
+
+ /* TODO: remove this when legacy PM will be discarded */
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_power_update_iterator, mvm);
+
iwl_mvm_mac_ctxt_release(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
@@ -743,21 +751,13 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvmvif->phy_ctxt = NULL;
}
- /*
- * TODO: remove this temporary code.
- * Currently MVM FW supports power management only on single MAC.
- * Check if only one additional interface remains after removing
- * current one. Update power mode on the remaining interface.
- */
if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
- mvm->vif_count);
- if (mvm->vif_count == 1) {
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
- }
+
+ /* TODO: remove this when legacy PM will be discarded */
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_update_iterator, mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -766,23 +766,91 @@ out_release:
mutex_unlock(&mvm->mutex);
}
-static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- s8 tx_power)
+static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
{
- /* FW is in charge of regulatory enforcement */
- struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
- .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
- .pwr_restriction = cpu_to_le16(tx_power),
+ return 0;
+}
+
+struct iwl_mvm_mc_iter_data {
+ struct iwl_mvm *mvm;
+ int port_id;
+};
+
+static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mc_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+ int ret, len;
+
+ /* if we don't have free ports, mcast frames will be dropped */
+ if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ cmd->port_id = data->port_id++;
+ memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+ if (ret)
+ IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
+}
+
+static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_mc_iter_data iter_data = {
+ .mvm = mvm,
};
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
- sizeof(reduce_txpwr_cmd),
- &reduce_txpwr_cmd);
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
+ return;
+
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_mc_iface_iterator, &iter_data);
}
-static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
+static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
- return 0;
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mcast_filter_cmd *cmd;
+ struct netdev_hw_addr *addr;
+ int addr_count = netdev_hw_addr_list_count(mc_list);
+ bool pass_all = false;
+ int len;
+
+ if (addr_count > MAX_MCAST_FILTERING_ADDRESSES) {
+ pass_all = true;
+ addr_count = 0;
+ }
+
+ len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
+ cmd = kzalloc(len, GFP_ATOMIC);
+ if (!cmd)
+ return 0;
+
+ if (pass_all) {
+ cmd->pass_all = 1;
+ return (u64)(unsigned long)cmd;
+ }
+
+ netdev_hw_addr_list_for_each(addr, mc_list) {
+ IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
+ cmd->count, addr->addr);
+ memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
+ addr->addr, ETH_ALEN);
+ cmd->count++;
+ }
+
+ return (u64)(unsigned long)cmd;
}
static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
@@ -790,21 +858,22 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags,
u64 multicast)
{
- *total_flags = 0;
-}
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
-static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mcast_filter_cmd mcast_filter_cmd = {
- .pass_all = 1,
- };
+ mutex_lock(&mvm->mutex);
- memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ /* replace previous configuration */
+ kfree(mvm->mcast_filter_cmd);
+ mvm->mcast_filter_cmd = cmd;
- return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC,
- sizeof(mcast_filter_cmd),
- &mcast_filter_cmd);
+ if (!cmd)
+ goto out;
+
+ iwl_mvm_recalc_multicast(mvm);
+out:
+ mutex_unlock(&mvm->mutex);
+ *total_flags = 0;
}
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
@@ -815,6 +884,14 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
+ /*
+ * Re-calculate the tsf id, as the master-slave relations depend on the
+ * beacon interval, which was not known when the station interface was
+ * added.
+ */
+ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
+ iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+
ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
@@ -827,7 +904,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update quotas\n");
return;
}
- iwl_mvm_configure_mcast_filter(mvm, vif);
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status)) {
@@ -849,7 +925,17 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_protect_session(mvm, vif, dur, dur,
5 * dur);
}
+
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ /*
+ * If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
+ */
+ WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
+ "Failed to update SF upon disassociation\n");
+
/* remove AP station now that the MAC is unassoc */
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
if (ret)
@@ -861,6 +947,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update quotas\n");
}
+ iwl_mvm_recalc_multicast(mvm);
+
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
@@ -874,6 +962,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update power mode\n");
}
iwl_mvm_bt_coex_vif_change(mvm);
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
+ IEEE80211_SMPS_AUTOMATIC);
} else if (changes & BSS_CHANGED_BEACON_INFO) {
/*
* We received a beacon _after_ association so
@@ -881,7 +971,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
- } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_QOS)) {
+ } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
+ BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mode(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
@@ -916,6 +1007,13 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_unlock;
+ /*
+ * Re-calculate the tsf id, as the master-slave relations depend on the
+ * beacon interval, which was not known when the AP interface was added.
+ */
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+
/* Add the mac context */
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
@@ -934,9 +1032,16 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_unbind;
+ /* must be set before quota calculations */
+ mvmvif->ap_ibss_active = true;
+
+ /* power updated needs to be done before quotas */
+ mvm->bound_vif_cnt++;
+ iwl_mvm_power_update_binding(mvm, vif, true);
+
ret = iwl_mvm_update_quotas(mvm, vif);
if (ret)
- goto out_rm_bcast;
+ goto out_quota_failed;
/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
@@ -947,7 +1052,10 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
return 0;
-out_rm_bcast:
+out_quota_failed:
+ mvm->bound_vif_cnt--;
+ iwl_mvm_power_update_binding(mvm, vif, false);
+ mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
out_unbind:
iwl_mvm_binding_remove_vif(mvm, vif);
@@ -979,6 +1087,10 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_update_quotas(mvm, NULL);
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
iwl_mvm_binding_remove_vif(mvm, vif);
+
+ mvm->bound_vif_cnt--;
+ iwl_mvm_power_update_binding(mvm, vif, false);
+
iwl_mvm_mac_ctxt_remove(mvm, vif);
mutex_unlock(&mvm->mutex);
@@ -990,6 +1102,22 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
struct ieee80211_bss_conf *bss_conf,
u32 changes)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ enum ieee80211_bss_change ht_change = BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_HT |
+ BSS_CHANGED_BANDWIDTH;
+ int ret;
+
+ /* Changes will be applied when the AP/IBSS is started */
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ if (changes & ht_change) {
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+ }
+
/* Need to send a new beacon template to the FW */
if (changes & BSS_CHANGED_BEACON) {
if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
@@ -1080,7 +1208,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
switch (cmd) {
case STA_NOTIFY_SLEEP:
@@ -1102,6 +1230,28 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
}
}
+static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+ /*
+ * This is called before mac80211 does RCU synchronisation,
+ * so here we already invalidate our internal RCU-protected
+ * station pointer. The rest of the code will thus no longer
+ * be able to find the station this way, and we don't rely
+ * on further RCU synchronisation after the sta_state()
+ * callback deleted the station.
+ */
+ mutex_lock(&mvm->mutex);
+ if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+ ERR_PTR(-ENOENT));
+ mutex_unlock(&mvm->mutex);
+}
+
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -1149,7 +1299,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
ret = iwl_mvm_update_sta(mvm, vif, sta);
if (ret == 0)
iwl_mvm_rs_rate_init(mvm, sta,
- mvmvif->phy_ctxt->channel->band);
+ mvmvif->phy_ctxt->channel->band,
+ true);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
@@ -1187,6 +1338,17 @@ static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return 0;
}
+static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ changed & IEEE80211_RC_NSS_CHANGED)
+ iwl_mvm_sf_update(mvm, vif, false);
+}
+
static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params)
@@ -1309,7 +1471,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
*/
return 0;
default:
- return -EOPNOTSUPP;
+ /* currently FW supports only one optional cipher scheme */
+ if (hw->n_cipher_schemes &&
+ hw->cipher_schemes->cipher == key->cipher)
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ else
+ return -EOPNOTSUPP;
}
mutex_lock(&mvm->mutex);
@@ -1515,7 +1682,7 @@ static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
goto out;
}
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
if (ret) {
@@ -1553,13 +1720,14 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
if (WARN_ONCE((phy_ctxt->ref > 1) &&
(changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
- IEEE80211_CHANCTX_CHANGE_RADAR)),
+ IEEE80211_CHANCTX_CHANGE_RADAR |
+ IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
"Cannot change PHY. Ref=%d, changed=0x%X\n",
phy_ctxt->ref, changed))
return;
mutex_lock(&mvm->mutex);
- iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
iwl_mvm_bt_coex_vif_change(mvm);
@@ -1602,7 +1770,13 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out_unlock;
/*
- * Setting the quota at this stage is only required for monitor
+ * Power state must be updated before quotas,
+ * otherwise fw will complain.
+ */
+ mvm->bound_vif_cnt++;
+ iwl_mvm_power_update_binding(mvm, vif, true);
+
+ /* Setting the quota at this stage is only required for monitor
* interfaces. For the other types, the bss_info changed flow
* will handle quota settings.
*/
@@ -1617,6 +1791,8 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
out_remove_binding:
iwl_mvm_binding_remove_vif(mvm, vif);
+ mvm->bound_vif_cnt--;
+ iwl_mvm_power_update_binding(mvm, vif, false);
out_unlock:
mutex_unlock(&mvm->mutex);
if (ret)
@@ -1648,6 +1824,9 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
iwl_mvm_binding_remove_vif(mvm, vif);
+ mvm->bound_vif_cnt--;
+ iwl_mvm_power_update_binding(mvm, vif, false);
+
out_unlock:
mvmvif->phy_ctxt = NULL;
mutex_unlock(&mvm->mutex);
@@ -1744,14 +1923,17 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
.add_interface = iwl_mvm_mac_add_interface,
.remove_interface = iwl_mvm_mac_remove_interface,
.config = iwl_mvm_mac_config,
+ .prepare_multicast = iwl_mvm_prepare_multicast,
.configure_filter = iwl_mvm_configure_filter,
.bss_info_changed = iwl_mvm_bss_info_changed,
.hw_scan = iwl_mvm_mac_hw_scan,
.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
+ .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
.sta_state = iwl_mvm_mac_sta_state,
.sta_notify = iwl_mvm_mac_sta_notify,
.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
+ .sta_rc_update = iwl_mvm_sta_rc_update,
.conf_tx = iwl_mvm_mac_conf_tx,
.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
.sched_scan_start = iwl_mvm_mac_sched_scan_start,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index fed21ef4162d..2b0ba1fc3c82 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -81,6 +81,7 @@
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_BK = 0,
@@ -151,7 +152,7 @@ enum iwl_power_scheme {
IWL_POWER_SCHEME_LP
};
-#define IWL_CONN_MAX_LISTEN_INTERVAL 70
+#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
@@ -163,6 +164,8 @@ struct iwl_mvm_power_ops {
struct ieee80211_vif *vif);
int (*power_update_device_mode)(struct iwl_mvm *mvm);
int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+ void (*power_update_binding)(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool assign);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
char *buf, int bufsz);
@@ -181,6 +184,7 @@ enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
+ MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9),
};
struct iwl_dbgfs_pm {
@@ -193,6 +197,7 @@ struct iwl_dbgfs_pm {
bool lprx_ena;
u32 lprx_rssi_threshold;
bool snooze_ena;
+ bool uapsd_misbehaving;
int mask;
};
@@ -269,8 +274,8 @@ struct iwl_mvm_vif_bf_data {
* @bcast_sta: station used for broadcast packets. Used by the following
* vifs: P2P_DEVICE, GO and AP.
* @beacon_skb: the skb used to hold the AP/GO beacon template
- * @smps_requests: the requests of of differents parts of the driver, regard
- the desired smps mode.
+ * @smps_requests: the SMPS requests of differents parts of the driver,
+ * combined on update to yield the overall request to mac80211.
*/
struct iwl_mvm_vif {
u16 id;
@@ -323,14 +328,19 @@ struct iwl_mvm_vif {
#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_mvm *mvm;
struct dentry *dbgfs_dir;
struct dentry *dbgfs_slink;
- void *dbgfs_data;
struct iwl_dbgfs_pm dbgfs_pm;
struct iwl_dbgfs_bf dbgfs_bf;
#endif
enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
+
+ /* FW identified misbehaving AP */
+ u8 uapsd_misbehaving_bssid[ETH_ALEN];
+
+ bool pm_prevented;
};
static inline struct iwl_mvm_vif *
@@ -479,6 +489,7 @@ struct iwl_mvm {
/* Scan status, cmd (pre-allocated) and auxiliary station */
enum iwl_scan_status scan_status;
struct iwl_scan_cmd *scan_cmd;
+ struct iwl_mcast_filter_cmd *mcast_filter_cmd;
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
@@ -489,11 +500,19 @@ struct iwl_mvm {
u8 scan_last_antenna_idx; /* to toggle TX between antennas */
u8 mgmt_last_antenna_idx;
+ /* last smart fifo state that was successfully sent to firmware */
+ enum iwl_sf_state sf_state;
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct dentry *debugfs_dir;
u32 dbgfs_sram_offset, dbgfs_sram_len;
bool disable_power_off;
bool disable_power_off_d3;
+
+ struct debugfs_blob_wrapper nvm_hw_blob;
+ struct debugfs_blob_wrapper nvm_sw_blob;
+ struct debugfs_blob_wrapper nvm_calib_blob;
+ struct debugfs_blob_wrapper nvm_prod_blob;
#endif
struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -507,12 +526,6 @@ struct iwl_mvm {
*/
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
- /*
- * This counter of created interfaces is referenced only in conjunction
- * with FW limitation related to power management. Currently PM is
- * supported only on a single interface.
- * IMPORTANT: this variable counts all interfaces except P2P device.
- */
u8 vif_count;
/* -1 for always, 0 for never, >0 for that many times */
@@ -531,6 +544,7 @@ struct iwl_mvm {
bool store_d3_resume_sram;
void *d3_resume_sram;
u32 d3_test_pme_ptr;
+ struct ieee80211_vif *keep_vif;
#endif
#endif
@@ -554,6 +568,11 @@ struct iwl_mvm {
u8 aux_queue;
u8 first_agg_queue;
u8 last_agg_queue;
+
+ u8 bound_vif_cnt;
+
+ /* Indicate if device power save is allowed */
+ bool ps_prevented;
};
/* Extract MVM priv from op_mode and _hw */
@@ -693,6 +712,8 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
/* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -750,8 +771,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#endif /* CONFIG_IWLWIFI_DEBUGFS */
/* rate scaling */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
- u8 flags, bool init);
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
/* power managment */
static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
@@ -773,6 +793,19 @@ static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
return 0;
}
+static inline void iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool assign)
+{
+ if (mvm->pm_ops->power_update_binding)
+ mvm->pm_ops->power_update_binding(mvm, vif, assign);
+}
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -864,4 +897,8 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
+/* smart fifo */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool added_vif);
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 2beffd028b67..35b71af78d02 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -367,16 +367,17 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
+ if (WARN(section_id >= NVM_NUM_OF_SECTIONS,
+ "Invalid NVM section ID %d\n", section_id)) {
+ ret = -EINVAL;
+ break;
+ }
+
temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
if (!temp) {
ret = -ENOMEM;
break;
}
- if (WARN_ON(section_id >= NVM_NUM_OF_SECTIONS)) {
- IWL_ERR(mvm, "Invalid NVM section ID\n");
- ret = -EINVAL;
- break;
- }
mvm->nvm_sections[section_id].data = temp;
mvm->nvm_sections[section_id].length = section_size;
@@ -391,17 +392,16 @@ out:
/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
{
- int i, ret;
- u16 section_id;
+ int i, ret = 0;
struct iwl_nvm_section *sections = mvm->nvm_sections;
IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
- for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
- section_id = nvm_to_read[i];
- ret = iwl_nvm_write_section(mvm, section_id,
- sections[section_id].data,
- sections[section_id].length);
+ for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
+ if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length)
+ continue;
+ ret = iwl_nvm_write_section(mvm, i, sections[i].data,
+ sections[i].length);
if (ret < 0) {
IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
break;
@@ -443,6 +443,29 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
}
mvm->nvm_sections[section].data = temp;
mvm->nvm_sections[section].length = ret;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ switch (section) {
+ case NVM_SECTION_TYPE_HW:
+ mvm->nvm_hw_blob.data = temp;
+ mvm->nvm_hw_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_SW:
+ mvm->nvm_sw_blob.data = temp;
+ mvm->nvm_sw_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_CALIBRATION:
+ mvm->nvm_calib_blob.data = temp;
+ mvm->nvm_calib_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_PRODUCTION:
+ mvm->nvm_prod_blob.data = temp;
+ mvm->nvm_prod_blob.size = ret;
+ break;
+ default:
+ WARN(1, "section: %d", section);
+ }
+#endif
}
kfree(nvm_buffer);
if (ret < 0)
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index d86083c6f445..a3d43de342d7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -236,6 +236,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
false),
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
+ RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
+ iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
};
#undef RX_HANDLER
#define CMD(x) [x] = #x
@@ -307,10 +309,12 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BT_PROFILE_NOTIFICATION),
CMD(BT_CONFIG),
CMD(MCAST_FILTER_CMD),
+ CMD(REPLY_SF_CFG_CMD),
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
CMD(MAC_PM_POWER_TABLE),
CMD(BT_COEX_CI),
+ CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
};
#undef CMD
@@ -341,7 +345,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
op_mode = hw->priv;
op_mode->ops = &iwl_mvm_ops;
- op_mode->trans = trans;
mvm = IWL_OP_MODE_GET_MVM(op_mode);
mvm->dev = trans->dev;
@@ -359,6 +362,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->aux_queue = 11;
mvm->first_agg_queue = 12;
}
+ mvm->sf_state = SF_UNINIT;
mutex_init(&mvm->mutex);
spin_lock_init(&mvm->async_handlers_lock);
@@ -424,7 +428,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
* there is no need to unnecessarily power up the NIC at driver load
*/
if (iwlwifi_mod_params.nvm_file) {
- iwl_nvm_init(mvm);
+ err = iwl_nvm_init(mvm);
+ if (err)
+ goto out_free;
} else {
err = iwl_trans_start_hw(mvm->trans);
if (err)
@@ -432,16 +438,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
err = iwl_run_init_mvm_ucode(mvm, true);
+ iwl_trans_stop_device(trans);
mutex_unlock(&mvm->mutex);
/* returns 0 if successful, 1 if success but in rfkill */
if (err < 0 && !iwlmvm_mod_params.init_dbg) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
goto out_free;
}
-
- /* Stop the hw after the ALIVE and NVM has been read */
- if (!iwlmvm_mod_params.init_dbg)
- iwl_trans_stop_hw(mvm->trans, false);
}
scan_size = sizeof(struct iwl_scan_cmd) +
@@ -470,11 +473,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_unregister:
ieee80211_unregister_hw(mvm->hw);
+ iwl_mvm_leds_exit(mvm);
out_free:
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
if (!iwlwifi_mod_params.nvm_file)
- iwl_trans_stop_hw(trans, true);
+ iwl_trans_op_mode_leave(trans);
ieee80211_free_hw(mvm->hw);
return NULL;
}
@@ -491,12 +495,14 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
+ kfree(mvm->mcast_filter_cmd);
+ mvm->mcast_filter_cmd = NULL;
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
kfree(mvm->d3_resume_sram);
#endif
- iwl_trans_stop_hw(mvm->trans, true);
+ iwl_trans_op_mode_leave(mvm->trans);
iwl_phy_db_free(mvm->phy_db);
mvm->phy_db = NULL;
@@ -661,6 +667,8 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
else
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ if (state && mvm->cur_ucode != IWL_UCODE_INIT)
+ iwl_trans_stop_device(mvm->trans);
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index a8652ddd6bed..b7268c0b3333 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 550824aa84ea..d9eab3b7bb9f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,7 +64,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <net/mac80211.h>
@@ -186,6 +185,92 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
}
}
+static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ enum ieee80211_ac_numbers ac;
+ bool tid_found = false;
+
+ for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
+ if (!mvmvif->queue_params[ac].uapsd)
+ continue;
+
+ if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+
+ cmd->uapsd_ac_flags |= BIT(ac);
+
+ /* QNDP TID - the highest TID with no admission control */
+ if (!tid_found && !mvmvif->queue_params[ac].acm) {
+ tid_found = true;
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ cmd->qndp_tid = 6;
+ break;
+ case IEEE80211_AC_VI:
+ cmd->qndp_tid = 5;
+ break;
+ case IEEE80211_AC_BE:
+ cmd->qndp_tid = 0;
+ break;
+ case IEEE80211_AC_BK:
+ cmd->qndp_tid = 1;
+ break;
+ }
+ }
+ }
+
+ if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
+
+ if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+ BIT(IEEE80211_AC_VI) |
+ BIT(IEEE80211_AC_BE) |
+ BIT(IEEE80211_AC_BK))) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
+ cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
+ cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
+ cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+ }
+
+ cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+
+ if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+ }
+
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+ } else {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+ }
+ cmd->heavy_tx_thld_percentage =
+ IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
+ cmd->heavy_rx_thld_percentage =
+ IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
+}
+
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
@@ -198,8 +283,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
- enum ieee80211_ac_numbers ac;
- bool tid_found = false;
+ bool allow_uapsd = true;
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
@@ -217,7 +301,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
+ mvm->ps_prevented)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -227,7 +312,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
mvmvif->dbgfs_pm.disable_power_off)
cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
#endif
- if (!vif->bss_conf.ps)
+ if (!vif->bss_conf.ps || mvmvif->pm_prevented)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -269,81 +354,24 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
}
- for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
- if (!mvmvif->queue_params[ac].uapsd)
- continue;
-
- if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
- cmd->flags |=
- cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
-
- cmd->uapsd_ac_flags |= BIT(ac);
+ if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+ ETH_ALEN))
+ allow_uapsd = false;
- /* QNDP TID - the highest TID with no admission control */
- if (!tid_found && !mvmvif->queue_params[ac].acm) {
- tid_found = true;
- switch (ac) {
- case IEEE80211_AC_VO:
- cmd->qndp_tid = 6;
- break;
- case IEEE80211_AC_VI:
- cmd->qndp_tid = 5;
- break;
- case IEEE80211_AC_BE:
- cmd->qndp_tid = 0;
- break;
- case IEEE80211_AC_BK:
- cmd->qndp_tid = 1;
- break;
- }
- }
- }
-
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
- if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
- BIT(IEEE80211_AC_VI) |
- BIT(IEEE80211_AC_BE) |
- BIT(IEEE80211_AC_BK))) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
- cmd->snooze_interval =
- cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
- cmd->snooze_window =
- (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
- cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
- cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
- }
-
- cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
-
- if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
- cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
- cmd->rx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
- cmd->tx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
- } else {
- cmd->rx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
- cmd->tx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
- }
+ if (vif->p2p &&
+ !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
+ allow_uapsd = false;
+ /*
+ * Avoid using uAPSD if P2P client is associated to GO that uses
+ * opportunistic power save. This is due to current FW limitation.
+ */
+ if (vif->p2p &&
+ vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_ENABLE_BIT)
+ allow_uapsd = false;
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
- cmd->heavy_tx_thld_packets =
- IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
- cmd->heavy_rx_thld_packets =
- IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
- } else {
- cmd->heavy_tx_thld_packets =
- IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
- cmd->heavy_rx_thld_packets =
- IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
- }
- cmd->heavy_tx_thld_percentage =
- IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
- cmd->heavy_rx_thld_percentage =
- IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
- }
+ if (allow_uapsd)
+ iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
@@ -381,6 +409,13 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->flags &=
cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
}
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) {
+ u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK;
+ if (mvmvif->dbgfs_pm.uapsd_misbehaving)
+ cmd->flags |= cpu_to_le16(flag);
+ else
+ cmd->flags &= cpu_to_le16(flag);
+ }
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
@@ -391,18 +426,11 @@ static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
bool ba_enable;
struct iwl_mac_power_cmd cmd = {};
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ if (vif->type != NL80211_IFTYPE_STATION)
return 0;
- /*
- * TODO: The following vif_count verification is temporary condition.
- * Avoid power mode update if more than one interface is currently
- * active. Remove this condition when FW will support power management
- * on multiple MACs.
- */
- IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
- mvm->vif_count);
- if (mvm->vif_count > 1)
+ if (vif->p2p &&
+ !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS))
return 0;
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
@@ -446,7 +474,7 @@ static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
sizeof(cmd), &cmd);
}
-static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
{
struct iwl_device_power_cmd cmd = {
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
@@ -455,7 +483,8 @@ static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
return 0;
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
+ force_disable)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -472,6 +501,78 @@ static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
&cmd);
}
+static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+{
+ return _iwl_mvm_power_update_device(mvm, false);
+}
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid,
+ ETH_ALEN))
+ memset(mvmvif->uapsd_misbehaving_bssid, 0, ETH_ALEN);
+}
+
+static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u8 *ap_sta_id = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* The ap_sta_id is not expected to change during current association
+ * so no explicit protection is needed
+ */
+ if (mvmvif->ap_sta_id == *ap_sta_id)
+ memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+ ETH_ALEN);
+}
+
+int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
+ u8 ap_sta_id = le32_to_cpu(notif->sta_id);
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
+
+ return 0;
+}
+
+static void iwl_mvm_power_binding_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = _data;
+ int ret;
+
+ mvmvif->pm_prevented = (mvm->bound_vif_cnt <= 1) ? false : true;
+
+ ret = iwl_mvm_power_mac_update_mode(mvm, vif);
+ WARN_ONCE(ret, "Failed to update power parameters on a specific vif\n");
+}
+
+static void _iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool assign)
+{
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ int ret = _iwl_mvm_power_update_device(mvm, assign);
+ mvm->ps_prevented = assign;
+ WARN_ONCE(ret, "Failed to update power device state\n");
+ }
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_binding_iterator,
+ mvm);
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, char *buf,
@@ -494,70 +595,58 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
le16_to_cpu(cmd.keep_alive_seconds));
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
- 1 : 0);
- pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
- cmd.skip_dtim_periods);
- if (!(cmd.flags &
- cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
- pos += scnprintf(buf+pos, bufsz-pos,
- "rx_data_timeout = %d\n",
- le32_to_cpu(cmd.rx_data_timeout));
- pos += scnprintf(buf+pos, bufsz-pos,
- "tx_data_timeout = %d\n",
- le32_to_cpu(cmd.tx_data_timeout));
- }
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- pos += scnprintf(buf+pos, bufsz-pos,
- "lprx_rssi_threshold = %d\n",
- cmd.lprx_rssi_threshold);
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
- pos +=
- scnprintf(buf+pos, bufsz-pos,
- "rx_data_timeout_uapsd = %d\n",
- le32_to_cpu(cmd.rx_data_timeout_uapsd));
- pos +=
- scnprintf(buf+pos, bufsz-pos,
- "tx_data_timeout_uapsd = %d\n",
- le32_to_cpu(cmd.tx_data_timeout_uapsd));
- pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n",
- cmd.qndp_tid);
- pos += scnprintf(buf+pos, bufsz-pos,
- "uapsd_ac_flags = 0x%x\n",
- cmd.uapsd_ac_flags);
- pos += scnprintf(buf+pos, bufsz-pos,
- "uapsd_max_sp = %d\n",
- cmd.uapsd_max_sp);
- pos += scnprintf(buf+pos, bufsz-pos,
- "heavy_tx_thld_packets = %d\n",
- cmd.heavy_tx_thld_packets);
- pos += scnprintf(buf+pos, bufsz-pos,
- "heavy_rx_thld_packets = %d\n",
- cmd.heavy_rx_thld_packets);
- pos += scnprintf(buf+pos, bufsz-pos,
- "heavy_tx_thld_percentage = %d\n",
- cmd.heavy_tx_thld_percentage);
- pos += scnprintf(buf+pos, bufsz-pos,
- "heavy_rx_thld_percentage = %d\n",
- cmd.heavy_rx_thld_percentage);
- pos +=
- scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ?
- 1 : 0);
- }
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
- pos += scnprintf(buf+pos, bufsz-pos,
- "snooze_interval = %d\n",
- cmd.snooze_interval);
- pos += scnprintf(buf+pos, bufsz-pos,
- "snooze_window = %d\n",
- cmd.snooze_window);
- }
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0);
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+ cmd.skip_dtim_periods);
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout));
}
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "lprx_rssi_threshold = %d\n",
+ cmd.lprx_rssi_threshold);
+
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n",
+ cmd.uapsd_ac_flags);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n",
+ cmd.uapsd_max_sp);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n",
+ cmd.heavy_tx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n",
+ cmd.heavy_rx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n",
+ cmd.heavy_tx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n",
+ cmd.heavy_rx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ?
+ 1 : 0);
+
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n",
+ cmd.snooze_interval);
+ pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n",
+ cmd.snooze_window);
+
return pos;
}
@@ -654,6 +743,7 @@ const struct iwl_mvm_power_ops pm_mac_ops = {
.power_update_mode = iwl_mvm_power_mac_update_mode,
.power_update_device_mode = iwl_mvm_power_update_device,
.power_disable = iwl_mvm_power_mac_disable,
+ .power_update_binding = _iwl_mvm_power_update_binding,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
index 2ce79bad5845..ef712ae5bc62 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 17e2bc827f9a..ce5db6c4ef7e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -217,8 +217,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
} else {
cmd.quotas[idx].quota =
cpu_to_le32(quota * data.n_interfaces[i]);
- cmd.quotas[idx].max_duration =
- cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ cmd.quotas[idx].max_duration = cpu_to_le32(0);
}
idx++;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index a0b4cc8d9c3b..6abf74e1351f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -24,7 +24,6 @@
*
*****************************************************************************/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/mac80211.h>
@@ -42,33 +41,37 @@
#define RS_NAME "iwl-mvm-rs"
-#define NUM_TRY_BEFORE_ANT_TOGGLE 1
-#define IWL_NUMBER_TRY 1
-#define IWL_HT_NUMBER_TRY 3
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define RS_LEGACY_RETRIES_PER_RATE 1
+#define RS_HT_VHT_RETRIES_PER_RATE 2
+#define RS_HT_VHT_RETRIES_PER_RATE_TW 1
+#define RS_INITIAL_MIMO_NUM_RATES 3
+#define RS_INITIAL_SISO_NUM_RATES 3
+#define RS_INITIAL_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
+#define RS_SECONDARY_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
+#define RS_SECONDARY_SISO_NUM_RATES 3
+#define RS_SECONDARY_SISO_RETRIES 1
#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
-#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
+#define IWL_RATE_MIN_FAILURE_TH 3 /* min failures to calc tpt */
#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
/* max allowed rate miss before sync LQ cmd */
#define IWL_MISSED_RATE_MAX 15
-/* max time to accum history 2 seconds */
-#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
+#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ)
+
static u8 rs_ht_to_legacy[] = {
- [IWL_RATE_1M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_2M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_5M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_11M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_6M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_9M_INDEX] = IWL_RATE_6M_INDEX,
- [IWL_RATE_12M_INDEX] = IWL_RATE_9M_INDEX,
- [IWL_RATE_18M_INDEX] = IWL_RATE_12M_INDEX,
- [IWL_RATE_24M_INDEX] = IWL_RATE_18M_INDEX,
- [IWL_RATE_36M_INDEX] = IWL_RATE_24M_INDEX,
- [IWL_RATE_48M_INDEX] = IWL_RATE_36M_INDEX,
- [IWL_RATE_54M_INDEX] = IWL_RATE_48M_INDEX,
- [IWL_RATE_60M_INDEX] = IWL_RATE_54M_INDEX,
+ [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX,
+ [IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX,
+ [IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX,
+ [IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX,
+ [IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX,
+ [IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX,
+ [IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX,
+ [IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX,
+ [IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX,
};
static const u8 ant_toggle_lookup[] = {
@@ -126,6 +129,196 @@ static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
IWL_DECLARE_MCS_RATE(9), /* MCS 9 */
};
+enum rs_action {
+ RS_ACTION_STAY = 0,
+ RS_ACTION_DOWNSCALE = -1,
+ RS_ACTION_UPSCALE = 1,
+};
+
+enum rs_column_mode {
+ RS_INVALID = 0,
+ RS_LEGACY,
+ RS_SISO,
+ RS_MIMO2,
+};
+
+#define MAX_NEXT_COLUMNS 5
+#define MAX_COLUMN_CHECKS 3
+
+typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl);
+
+struct rs_tx_column {
+ enum rs_column_mode mode;
+ u8 ant;
+ bool sgi;
+ enum rs_column next_columns[MAX_NEXT_COLUMNS];
+ allow_column_func_t checks[MAX_COLUMN_CHECKS];
+};
+
+static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ if (!sta->ht_cap.ht_supported)
+ return false;
+
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ return false;
+
+ if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
+ return false;
+
+ if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+ return false;
+
+ return true;
+}
+
+static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ if (!sta->ht_cap.ht_supported)
+ return false;
+
+ return true;
+}
+
+static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ struct rs_rate *rate = &tbl->rate;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+ if (is_ht20(rate) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ return true;
+ if (is_ht40(rate) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ return true;
+ if (is_ht80(rate) && (vht_cap->cap &
+ IEEE80211_VHT_CAP_SHORT_GI_80))
+ return true;
+
+ return false;
+}
+
+static const struct rs_tx_column rs_tx_columns[] = {
+ [RS_COLUMN_LEGACY_ANT_A] = {
+ .mode = RS_LEGACY,
+ .ant = ANT_A,
+ .next_columns = {
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ },
+ [RS_COLUMN_LEGACY_ANT_B] = {
+ .mode = RS_LEGACY,
+ .ant = ANT_B,
+ .next_columns = {
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_A] = {
+ .mode = RS_SISO,
+ .ant = ANT_A,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_B] = {
+ .mode = RS_SISO,
+ .ant = ANT_B,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_A_SGI] = {
+ .mode = RS_SISO,
+ .ant = ANT_A,
+ .sgi = true,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ rs_sgi_allow,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_B_SGI] = {
+ .mode = RS_SISO,
+ .ant = ANT_B,
+ .sgi = true,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ rs_sgi_allow,
+ },
+ },
+ [RS_COLUMN_MIMO2] = {
+ .mode = RS_MIMO2,
+ .ant = ANT_AB,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_mimo_allow,
+ },
+ },
+ [RS_COLUMN_MIMO2_SGI] = {
+ .mode = RS_MIMO2,
+ .ant = ANT_AB,
+ .sgi = true,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_mimo_allow,
+ rs_sgi_allow,
+ },
+ },
+};
+
static inline u8 rs_extract_rate(u32 rate_n_flags)
{
/* also works for HT because bits 7:6 are zero there */
@@ -163,28 +356,19 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
return idx;
}
- return -1;
+ return IWL_RATE_INVALID;
}
static void rs_rate_scale_perform(struct iwl_mvm *mvm,
struct sk_buff *skb,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta);
-static void rs_fill_link_cmd(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate);
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags);
-#else
-static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags)
-{}
-#endif
-
/**
* The following tables contain the expected throughput metrics for all rates
*
@@ -264,6 +448,52 @@ static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
#define MCS_INDEX_PER_STREAM (8)
+static const char *rs_pretty_ant(u8 ant)
+{
+ static const char * const ant_name[] = {
+ [ANT_NONE] = "None",
+ [ANT_A] = "A",
+ [ANT_B] = "B",
+ [ANT_AB] = "AB",
+ [ANT_C] = "C",
+ [ANT_AC] = "AC",
+ [ANT_BC] = "BC",
+ [ANT_ABC] = "ABC",
+ };
+
+ if (ant > ANT_ABC)
+ return "UNKNOWN";
+
+ return ant_name[ant];
+}
+
+static const char *rs_pretty_lq_type(enum iwl_table_type type)
+{
+ static const char * const lq_types[] = {
+ [LQ_NONE] = "NONE",
+ [LQ_LEGACY_A] = "LEGACY_A",
+ [LQ_LEGACY_G] = "LEGACY_G",
+ [LQ_HT_SISO] = "HT SISO",
+ [LQ_HT_MIMO2] = "HT MIMO",
+ [LQ_VHT_SISO] = "VHT SISO",
+ [LQ_VHT_MIMO2] = "VHT MIMO",
+ };
+
+ if (type < LQ_NONE || type >= LQ_MAX)
+ return "UNKNOWN";
+
+ return lq_types[type];
+}
+
+static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
+ const char *prefix)
+{
+ IWL_DEBUG_RATE(mvm, "%s: (%s: %d) ANT: %s BW: %d SGI: %d\n",
+ prefix, rs_pretty_lq_type(rate->type),
+ rate->index, rs_pretty_ant(rate->ant),
+ rate->bw, rate->sgi);
+}
+
static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
{
window->data = 0;
@@ -271,7 +501,6 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
window->success_ratio = IWL_INVALID_VALUE;
window->counter = 0;
window->average_tpt = IWL_INVALID_VALUE;
- window->stamp = 0;
}
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@ -279,30 +508,6 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
return (ant_type & valid_antenna) == ant_type;
}
-#ifdef CONFIG_MAC80211_DEBUGFS
-/**
- * Program the device to use fixed rate for frame transmit
- * This is for debugging/testing only
- * once the device start use fixed rate, we need to reload the module
- * to being back the normal operation.
- */
-static void rs_program_fix_rate(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta)
-{
- lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
- lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
- lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
-
- IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
- lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
-
- if (lq_sta->dbg_fixed_rate) {
- rs_fill_link_cmd(NULL, NULL, lq_sta, lq_sta->dbg_fixed_rate);
- iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
- }
-}
-#endif
-
static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
@@ -428,192 +633,168 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
else
window->average_tpt = IWL_INVALID_VALUE;
- /* Tag this window as having been updated */
- window->stamp = jiffies;
-
return 0;
}
-/*
- * Fill uCode API rate_n_flags field, based on "search" or "active" table.
- */
-/* FIXME:RS:remove this function and put the flags statically in the table */
-static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
- struct iwl_scale_tbl_info *tbl, int index)
+/* Convert rs_rate object into ucode rate bitmask */
+static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
+ struct rs_rate *rate)
{
- u32 rate_n_flags = 0;
+ u32 ucode_rate = 0;
+ int index = rate->index;
- rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+ ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) &
RATE_MCS_ANT_ABC_MSK);
- if (is_legacy(tbl->lq_type)) {
- rate_n_flags |= iwl_rates[index].plcp;
+ if (is_legacy(rate)) {
+ ucode_rate |= iwl_rates[index].plcp;
if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
- rate_n_flags |= RATE_MCS_CCK_MSK;
- return rate_n_flags;
+ ucode_rate |= RATE_MCS_CCK_MSK;
+ return ucode_rate;
}
- if (is_ht(tbl->lq_type)) {
+ if (is_ht(rate)) {
if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
index = IWL_LAST_HT_RATE;
}
- rate_n_flags |= RATE_MCS_HT_MSK;
+ ucode_rate |= RATE_MCS_HT_MSK;
- if (is_ht_siso(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_ht_siso;
- else if (is_ht_mimo2(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_ht_mimo2;
+ if (is_ht_siso(rate))
+ ucode_rate |= iwl_rates[index].plcp_ht_siso;
+ else if (is_ht_mimo2(rate))
+ ucode_rate |= iwl_rates[index].plcp_ht_mimo2;
else
WARN_ON_ONCE(1);
- } else if (is_vht(tbl->lq_type)) {
+ } else if (is_vht(rate)) {
if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
index = IWL_LAST_VHT_RATE;
}
- rate_n_flags |= RATE_MCS_VHT_MSK;
- if (is_vht_siso(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_vht_siso;
- else if (is_vht_mimo2(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_vht_mimo2;
+ ucode_rate |= RATE_MCS_VHT_MSK;
+ if (is_vht_siso(rate))
+ ucode_rate |= iwl_rates[index].plcp_vht_siso;
+ else if (is_vht_mimo2(rate))
+ ucode_rate |= iwl_rates[index].plcp_vht_mimo2;
else
WARN_ON_ONCE(1);
} else {
- IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+ IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
}
- rate_n_flags |= tbl->bw;
- if (tbl->is_SGI)
- rate_n_flags |= RATE_MCS_SGI_MSK;
+ ucode_rate |= rate->bw;
+ if (rate->sgi)
+ ucode_rate |= RATE_MCS_SGI_MSK;
- return rate_n_flags;
+ return ucode_rate;
}
-/*
- * Interpret uCode API's rate_n_flags format,
- * fill "search" or "active" tx mode table.
- */
-static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
- enum ieee80211_band band,
- struct iwl_scale_tbl_info *tbl,
- int *rate_idx)
+/* Convert a ucode rate into an rs_rate object */
+static int rs_rate_from_ucode_rate(const u32 ucode_rate,
+ enum ieee80211_band band,
+ struct rs_rate *rate)
{
- u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
- u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
+ u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
+ u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate);
u8 nss;
- memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
- *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
+ memset(rate, 0, sizeof(*rate));
+ rate->index = iwl_hwrate_to_plcp_idx(ucode_rate);
- if (*rate_idx == IWL_RATE_INVALID) {
- *rate_idx = -1;
+ if (rate->index == IWL_RATE_INVALID)
return -EINVAL;
- }
- tbl->is_SGI = 0; /* default legacy setup */
- tbl->bw = 0;
- tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
- tbl->lq_type = LQ_NONE;
- tbl->max_search = IWL_MAX_SEARCH;
+
+ rate->ant = (ant_msk >> RATE_MCS_ANT_POS);
/* Legacy */
- if (!(rate_n_flags & RATE_MCS_HT_MSK) &&
- !(rate_n_flags & RATE_MCS_VHT_MSK)) {
+ if (!(ucode_rate & RATE_MCS_HT_MSK) &&
+ !(ucode_rate & RATE_MCS_VHT_MSK)) {
if (num_of_ant == 1) {
if (band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_LEGACY_A;
+ rate->type = LQ_LEGACY_A;
else
- tbl->lq_type = LQ_LEGACY_G;
+ rate->type = LQ_LEGACY_G;
}
return 0;
}
/* HT or VHT */
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- tbl->is_SGI = 1;
+ if (ucode_rate & RATE_MCS_SGI_MSK)
+ rate->sgi = true;
- tbl->bw = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+ rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- nss = ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+ if (ucode_rate & RATE_MCS_HT_MSK) {
+ nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >>
RATE_HT_MCS_NSS_POS) + 1;
if (nss == 1) {
- tbl->lq_type = LQ_HT_SISO;
+ rate->type = LQ_HT_SISO;
WARN_ON_ONCE(num_of_ant != 1);
} else if (nss == 2) {
- tbl->lq_type = LQ_HT_MIMO2;
+ rate->type = LQ_HT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
} else {
WARN_ON_ONCE(1);
}
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
- nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ } else if (ucode_rate & RATE_MCS_VHT_MSK) {
+ nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
if (nss == 1) {
- tbl->lq_type = LQ_VHT_SISO;
+ rate->type = LQ_VHT_SISO;
WARN_ON_ONCE(num_of_ant != 1);
} else if (nss == 2) {
- tbl->lq_type = LQ_VHT_MIMO2;
+ rate->type = LQ_VHT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
} else {
WARN_ON_ONCE(1);
}
}
- WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_160);
- WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_80 &&
- !is_vht(tbl->lq_type));
+ WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160);
+ WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
+ !is_vht(rate));
return 0;
}
/* switch to another antenna/antennas and return 1 */
/* if no other valid antenna found, return 0 */
-static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
- struct iwl_scale_tbl_info *tbl)
+static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
{
u8 new_ant_type;
- if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+ if (!rate->ant || rate->ant > ANT_ABC)
return 0;
- if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
+ if (!rs_is_valid_ant(valid_ant, rate->ant))
return 0;
- new_ant_type = ant_toggle_lookup[tbl->ant_type];
+ new_ant_type = ant_toggle_lookup[rate->ant];
- while ((new_ant_type != tbl->ant_type) &&
+ while ((new_ant_type != rate->ant) &&
!rs_is_valid_ant(valid_ant, new_ant_type))
new_ant_type = ant_toggle_lookup[new_ant_type];
- if (new_ant_type == tbl->ant_type)
+ if (new_ant_type == rate->ant)
return 0;
- tbl->ant_type = new_ant_type;
- *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
- *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+ rate->ant = new_ant_type;
+
return 1;
}
-/**
- * rs_get_supported_rates - get the available rates
- *
- * if management frame or broadcast frame only return
- * basic available rates.
- *
- */
static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
- struct ieee80211_hdr *hdr,
- enum iwl_table_type rate_type)
+ struct rs_rate *rate)
{
- if (is_legacy(rate_type))
+ if (is_legacy(rate))
return lq_sta->active_legacy_rate;
- else if (is_siso(rate_type))
+ else if (is_siso(rate))
return lq_sta->active_siso_rate;
- else if (is_mimo2(rate_type))
+ else if (is_mimo2(rate))
return lq_sta->active_mimo2_rate;
WARN_ON_ONCE(1);
@@ -628,7 +809,7 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
/* 802.11A or ht walks to the next literal adjacent rate in
* the rate table */
- if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+ if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) {
int i;
u32 mask;
@@ -676,73 +857,80 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
return (high << 8) | low;
}
-static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- u8 scale_index, u8 ht_possible)
+static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate)
{
- s32 low;
- u16 rate_mask;
+ return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate);
+}
+
+/* Get the next supported lower rate in the current column.
+ * Return true if bottom rate in the current column was reached
+ */
+static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate)
+{
+ u8 low;
u16 high_low;
- u8 switch_to_legacy = 0;
+ u16 rate_mask;
struct iwl_mvm *mvm = lq_sta->drv;
- /* check if we need to switch from HT to legacy rates.
- * assumption is that mandatory rates (1Mbps or 6Mbps)
- * are always supported (spec demand) */
- if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
- switch_to_legacy = 1;
- scale_index = rs_ht_to_legacy[scale_index];
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_LEGACY_A;
- else
- tbl->lq_type = LQ_LEGACY_G;
+ rate_mask = rs_get_supported_rates(lq_sta, rate);
+ high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask,
+ rate->type);
+ low = high_low & 0xff;
- if (num_of_ant(tbl->ant_type) > 1)
- tbl->ant_type =
- first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+ /* Bottom rate of column reached */
+ if (low == IWL_RATE_INVALID)
+ return true;
- tbl->bw = 0;
- tbl->is_SGI = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- }
+ rate->index = low;
+ return false;
+}
- rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+/* Get the next rate to use following a column downgrade */
+static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate)
+{
+ struct iwl_mvm *mvm = lq_sta->drv;
- /* Mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- /* supp_rates has no CCK bits in A mode */
+ if (is_legacy(rate)) {
+ /* No column to downgrade from Legacy */
+ return;
+ } else if (is_siso(rate)) {
+ /* Downgrade to Legacy if we were in SISO */
if (lq_sta->band == IEEE80211_BAND_5GHZ)
- rate_mask = (u16)(rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+ rate->type = LQ_LEGACY_A;
else
- rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
- }
+ rate->type = LQ_LEGACY_G;
- /* If we switched from HT to legacy, check current rate */
- if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
- low = scale_index;
- goto out;
+ rate->bw = RATE_MCS_CHAN_WIDTH_20;
+
+ WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX &&
+ rate->index > IWL_RATE_MCS_9_INDEX);
+
+ rate->index = rs_ht_to_legacy[rate->index];
+ } else {
+ /* Downgrade to SISO with same MCS if in MIMO */
+ rate->type = is_vht_mimo2(rate) ?
+ LQ_VHT_SISO : LQ_HT_SISO;
}
- high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
- tbl->lq_type);
- low = high_low & 0xff;
- if (low == IWL_RATE_INVALID)
- low = scale_index;
+ if (num_of_ant(rate->ant) > 1)
+ rate->ant = first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
-out:
- return rate_n_flags_from_tbl(lq_sta->drv, tbl, low);
+ /* Relevant in both switching to SISO or Legacy */
+ rate->sgi = false;
+
+ if (!rs_rate_supported(lq_sta, rate))
+ rs_get_lower_rate_in_column(lq_sta, rate);
}
-/*
- * Simple function to compare two rate scale table types
- */
-static bool table_type_matches(struct iwl_scale_tbl_info *a,
- struct iwl_scale_tbl_info *b)
+/* Simple function to compare two rate scale table types */
+static inline bool rs_rate_match(struct rs_rate *a,
+ struct rs_rate *b)
{
- return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
- (a->is_SGI == b->is_SGI);
+ return (a->type == b->type) && (a->ant == b->ant) && (a->sgi == b->sgi);
}
static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
@@ -766,7 +954,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
{
int legacy_success;
int retries;
- int rs_index, mac_index, i;
+ int mac_index, i;
struct iwl_lq_sta *lq_sta = priv_sta;
struct iwl_lq_cmd *table;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -774,13 +962,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
enum mac80211_rate_control_flags mac_flags;
- u32 tx_rate;
- struct iwl_scale_tbl_info tbl_type;
+ u32 ucode_rate;
+ struct rs_rate rate;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
- IWL_DEBUG_RATE_LIMIT(mvm,
- "get frame ack response, update rate scale window\n");
-
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
@@ -808,10 +993,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
* to a new "search" mode (which might become the new "active" mode).
*/
table = &lq_sta->lq;
- tx_rate = le32_to_cpu(table->rs_table[0]);
- rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type, &rs_index);
+ ucode_rate = le32_to_cpu(table->rs_table[0]);
+ rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
if (info->band == IEEE80211_BAND_5GHZ)
- rs_index -= IWL_FIRST_OFDM_RATE;
+ rate.index -= IWL_FIRST_OFDM_RATE;
mac_flags = info->status.rates[0].flags;
mac_index = info->status.rates[0].idx;
/* For HT packets, map MCS to PLCP */
@@ -834,19 +1019,19 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
/* Here we actually compare this rate to the latest LQ command */
if ((mac_index < 0) ||
- (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
- (tbl_type.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
- (tbl_type.ant_type != info->status.antenna) ||
- (!!(tx_rate & RATE_MCS_HT_MSK) !=
+ (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+ (rate.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
+ (rate.ant != info->status.antenna) ||
+ (!!(ucode_rate & RATE_MCS_HT_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_MCS)) ||
- (!!(tx_rate & RATE_MCS_VHT_MSK) !=
+ (!!(ucode_rate & RATE_MCS_VHT_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
- (!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
+ (!!(ucode_rate & RATE_HT_MCS_GF_MSK) !=
!!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
- (rs_index != mac_index)) {
+ (rate.index != mac_index)) {
IWL_DEBUG_RATE(mvm,
"initial rate %d does not match %d (0x%x)\n",
- mac_index, rs_index, tx_rate);
+ mac_index, rate.index, ucode_rate);
/*
* Since rates mis-match, the last LQ command may have failed.
* After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
@@ -855,7 +1040,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
lq_sta->missed_rate_counter++;
if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
lq_sta->missed_rate_counter = 0;
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+ IWL_DEBUG_RATE(mvm,
+ "Too many rates mismatch. Send sync LQ. rs_state %d\n",
+ lq_sta->rs_state);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
}
/* Regardless, ignore this status info for outdated rate */
return;
@@ -864,28 +1052,23 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
lq_sta->missed_rate_counter = 0;
/* Figure out if rate scale algorithm is in active or search table */
- if (table_type_matches(&tbl_type,
- &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+ if (rs_rate_match(&rate,
+ &(lq_sta->lq_info[lq_sta->active_tbl].rate))) {
curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- } else if (table_type_matches(
- &tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+ } else if (rs_rate_match(&rate,
+ &lq_sta->lq_info[1 - lq_sta->active_tbl].rate)) {
curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
} else {
IWL_DEBUG_RATE(mvm,
"Neither active nor search matches tx rate\n");
tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- IWL_DEBUG_RATE(mvm, "active- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type,
- tmp_tbl->is_SGI);
+ rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- IWL_DEBUG_RATE(mvm, "search- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type,
- tmp_tbl->is_SGI);
- IWL_DEBUG_RATE(mvm, "actual- lq:%x, ant:%x, SGI:%d\n",
- tbl_type.lq_type, tbl_type.ant_type,
- tbl_type.is_SGI);
+ rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
+ rs_dump_rate(mvm, &rate, "ACTUAL");
+
/*
* no matching table found, let's by-pass the data collection
* and continue to perform rate scale to find the rate table
@@ -902,15 +1085,14 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
* first index into rate scale table.
*/
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- tx_rate = le32_to_cpu(table->rs_table[0]);
- rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type,
- &rs_index);
- rs_collect_tx_data(curr_tbl, rs_index,
+ ucode_rate = le32_to_cpu(table->rs_table[0]);
+ rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
+ rs_collect_tx_data(curr_tbl, rate.index,
info->status.ampdu_len,
info->status.ampdu_ack_len);
/* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
lq_sta->total_success += info->status.ampdu_ack_len;
lq_sta->total_failed += (info->status.ampdu_len -
info->status.ampdu_ack_len);
@@ -927,31 +1109,31 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
/* Collect data for each rate used during failed TX attempts */
for (i = 0; i <= retries; ++i) {
- tx_rate = le32_to_cpu(table->rs_table[i]);
- rs_get_tbl_info_from_mcs(tx_rate, info->band,
- &tbl_type, &rs_index);
+ ucode_rate = le32_to_cpu(table->rs_table[i]);
+ rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
/*
* Only collect stats if retried rate is in the same RS
* table as active/search.
*/
- if (table_type_matches(&tbl_type, curr_tbl))
+ if (rs_rate_match(&rate, &curr_tbl->rate))
tmp_tbl = curr_tbl;
- else if (table_type_matches(&tbl_type, other_tbl))
+ else if (rs_rate_match(&rate, &other_tbl->rate))
tmp_tbl = other_tbl;
else
continue;
- rs_collect_tx_data(tmp_tbl, rs_index, 1,
+
+ rs_collect_tx_data(tmp_tbl, rate.index, 1,
i < retries ? 0 : legacy_success);
}
/* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
lq_sta->total_success += legacy_success;
lq_sta->total_failed += retries + (1 - legacy_success);
}
}
/* The last TX rate is cached in lq_sta; it's set in if/else above */
- lq_sta->last_rate_n_flags = tx_rate;
+ lq_sta->last_rate_n_flags = ucode_rate;
done:
/* See if there's a better rate or modulation mode to try. */
if (sta && sta->supp_rates[sband->band])
@@ -969,8 +1151,8 @@ done:
static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
struct iwl_lq_sta *lq_sta)
{
- IWL_DEBUG_RATE(mvm, "we are staying in the same table\n");
- lq_sta->stay_in_tbl = 1; /* only place this gets set */
+ IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
+ lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
if (is_legacy) {
lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
@@ -984,37 +1166,31 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
lq_sta->total_failed = 0;
lq_sta->total_success = 0;
lq_sta->flush_timer = jiffies;
- lq_sta->action_counter = 0;
+ lq_sta->visited_columns = 0;
}
-/*
- * Find correct throughput table for given mode of modulation
- */
-static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl)
+static s32 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ const struct rs_tx_column *column,
+ u32 bw)
{
/* Used to choose among HT tables */
s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
- /* Check for invalid LQ type */
- if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_ht(tbl->lq_type) &&
- !(is_vht(tbl->lq_type)))) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
+ if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
+ column->mode != RS_SISO &&
+ column->mode != RS_MIMO2))
+ return expected_tpt_legacy;
/* Legacy rates have only one table */
- if (is_legacy(tbl->lq_type)) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
+ if (column->mode == RS_LEGACY)
+ return expected_tpt_legacy;
ht_tbl_pointer = expected_tpt_mimo2_20MHz;
/* Choose among many HT tables depending on number of streams
* (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
* status */
- if (is_siso(tbl->lq_type)) {
- switch (tbl->bw) {
+ if (column->mode == RS_SISO) {
+ switch (bw) {
case RATE_MCS_CHAN_WIDTH_20:
ht_tbl_pointer = expected_tpt_siso_20MHz;
break;
@@ -1027,8 +1203,8 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
default:
WARN_ON_ONCE(1);
}
- } else if (is_mimo2(tbl->lq_type)) {
- switch (tbl->bw) {
+ } else if (column->mode == RS_MIMO2) {
+ switch (bw) {
case RATE_MCS_CHAN_WIDTH_20:
ht_tbl_pointer = expected_tpt_mimo2_20MHz;
break;
@@ -1045,14 +1221,23 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
WARN_ON_ONCE(1);
}
- if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
- tbl->expected_tpt = ht_tbl_pointer[0];
- else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
- tbl->expected_tpt = ht_tbl_pointer[1];
- else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
- tbl->expected_tpt = ht_tbl_pointer[2];
+ if (!column->sgi && !lq_sta->is_agg) /* Normal */
+ return ht_tbl_pointer[0];
+ else if (column->sgi && !lq_sta->is_agg) /* SGI */
+ return ht_tbl_pointer[1];
+ else if (!column->sgi && lq_sta->is_agg) /* AGG */
+ return ht_tbl_pointer[2];
else /* AGG+SGI */
- tbl->expected_tpt = ht_tbl_pointer[3];
+ return ht_tbl_pointer[3];
+}
+
+static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ struct rs_rate *rate = &tbl->rate;
+ const struct rs_tx_column *column = &rs_tx_columns[tbl->column];
+
+ tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
}
/*
@@ -1089,7 +1274,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
while (1) {
high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
- tbl->lq_type);
+ tbl->rate.type);
low = high_low & 0xff;
high = (high_low >> 8) & 0xff;
@@ -1110,7 +1295,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
* "active" throughput (under perfect conditions).
*/
if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
- ((active_sr > IWL_RATE_DECREASE_TH) &&
+ ((active_sr > RS_SR_FORCE_DECREASE) &&
(active_sr <= IWL_RATE_HIGH_TH) &&
(tpt_tbl[rate] <= active_tpt))) ||
((active_sr >= IWL_RATE_SCALE_SWITCH) &&
@@ -1157,417 +1342,14 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
return new_rate;
}
-/* Move to the next action and wrap around to the first action in case
- * we're at the last action. Assumes actions start at 0.
- */
-static inline void rs_move_next_action(struct iwl_scale_tbl_info *tbl,
- u8 last_action)
-{
- BUILD_BUG_ON(IWL_LEGACY_FIRST_ACTION != 0);
- BUILD_BUG_ON(IWL_SISO_FIRST_ACTION != 0);
- BUILD_BUG_ON(IWL_MIMO2_FIRST_ACTION != 0);
-
- tbl->action = (tbl->action + 1) % (last_action + 1);
-}
-
-static void rs_set_bw_from_sta(struct iwl_scale_tbl_info *tbl,
- struct ieee80211_sta *sta)
+static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
{
if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
- tbl->bw = RATE_MCS_CHAN_WIDTH_80;
+ return RATE_MCS_CHAN_WIDTH_80;
else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
- tbl->bw = RATE_MCS_CHAN_WIDTH_40;
- else
- tbl->bw = RATE_MCS_CHAN_WIDTH_20;
-}
-
-static bool rs_sgi_allowed(struct iwl_scale_tbl_info *tbl,
- struct ieee80211_sta *sta)
-{
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
-
- if (is_ht20(tbl) && (ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- return true;
- if (is_ht40(tbl) && (ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- return true;
- if (is_ht80(tbl) && (vht_cap->cap &
- IEEE80211_VHT_CAP_SHORT_GI_80))
- return true;
-
- return false;
-}
-
-/*
- * Set up search table for MIMO2
- */
-static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- s32 rate;
-
- if (!sta->ht_cap.ht_supported)
- return -1;
-
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
- return -1;
-
- /* Need both Tx chains/antennas to support MIMO */
- if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
- return -1;
-
- IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
-
- tbl->lq_type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_mimo2_rate;
-
- rs_set_bw_from_sta(tbl, sta);
- rs_set_expected_tpt_table(lq_sta, tbl);
-
- rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 best rate %d mask %X\n",
- rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
-
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
- tbl->current_rate);
- return 0;
-}
-
-/*
- * Set up search table for SISO
- */
-static int rs_switch_to_siso(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- s32 rate;
-
- if (!sta->ht_cap.ht_supported)
- return -1;
-
- IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
-
- tbl->lq_type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_siso_rate;
-
- rs_set_bw_from_sta(tbl, sta);
- rs_set_expected_tpt_table(lq_sta, tbl);
- rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(mvm, "LQ: get best rate %d mask %X\n", rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(mvm,
- "can not switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
- tbl->current_rate);
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from legacy
- */
-static int rs_move_legacy_other(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta,
- int index)
-{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 tx_chains_num = num_of_ant(valid_tx_ant);
- int ret;
- u8 update_search_tbl_counter = 0;
-
- start_action = tbl->action;
- while (1) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_LEGACY_SWITCH_ANTENNA:
- IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
-
- if (tx_chains_num <= 1)
- break;
-
- /* Don't change antenna if success has been great */
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- /* Set up search table to try other antenna */
- memcpy(search_tbl, tbl, sz);
-
- if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate,
- search_tbl)) {
- update_search_tbl_counter = 1;
- rs_set_expected_tpt_table(lq_sta, search_tbl);
- goto out;
- }
- break;
- case IWL_LEGACY_SWITCH_SISO:
- IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to SISO\n");
-
- /* Set up search table to try SISO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- ret = rs_switch_to_siso(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
-
- break;
- case IWL_LEGACY_SWITCH_MIMO2:
- IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
-
- /* Set up search table to try MIMO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- search_tbl->ant_type = ANT_AB;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
- break;
- default:
- WARN_ON_ONCE(1);
- }
- rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
-out:
- lq_sta->search_better_tbl = 1;
- rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from SISO
- */
-static int rs_move_siso_to_other(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta, int index)
-{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 tx_chains_num = num_of_ant(valid_tx_ant);
- u8 update_search_tbl_counter = 0;
- int ret;
-
- if (tbl->action == IWL_SISO_SWITCH_MIMO2 &&
- !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
- tbl->action = IWL_SISO_SWITCH_ANTENNA;
-
- start_action = tbl->action;
- while (1) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_SISO_SWITCH_ANTENNA:
- IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
- if (tx_chains_num <= 1)
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
- BT_MBOX_MSG(&mvm->last_bt_notif, 3,
- TRAFFIC_LOAD) == 0)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate,
- search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
- case IWL_SISO_SWITCH_MIMO2:
- IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- search_tbl->ant_type = ANT_AB;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
- break;
- case IWL_SISO_SWITCH_GI:
- if (!rs_sgi_allowed(tbl, sta))
- break;
-
- IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
-
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = !tbl->is_SGI;
- rs_set_expected_tpt_table(lq_sta, search_tbl);
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl, index);
- update_search_tbl_counter = 1;
- goto out;
- default:
- WARN_ON_ONCE(1);
- }
- rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
- out:
- lq_sta->search_better_tbl = 1;
- rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from MIMO2
- */
-static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta, int index)
-{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 update_search_tbl_counter = 0;
- int ret;
-
- start_action = tbl->action;
- while (1) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_MIMO2_SWITCH_SISO_A:
- case IWL_MIMO2_SWITCH_SISO_B:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
-
- /* Set up new search table for SISO */
- memcpy(search_tbl, tbl, sz);
-
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
- search_tbl->ant_type = ANT_A;
- else /* tbl->action == IWL_MIMO2_SWITCH_SISO_B */
- search_tbl->ant_type = ANT_B;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_siso(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
-
- case IWL_MIMO2_SWITCH_GI:
- if (!rs_sgi_allowed(tbl, sta))
- break;
-
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
-
- /* Set up new search table for MIMO2 */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = !tbl->is_SGI;
- rs_set_expected_tpt_table(lq_sta, search_tbl);
- /*
- * If active table already uses the fastest possible
- * modulation (dual stream with short guard interval),
- * and it's working well, there's no need to look
- * for a better type of modulation!
- */
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl, index);
- update_search_tbl_counter = 1;
- goto out;
- default:
- WARN_ON_ONCE(1);
- }
- rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
- out:
- lq_sta->search_better_tbl = 1;
- rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
+ return RATE_MCS_CHAN_WIDTH_40;
- return 0;
+ return RATE_MCS_CHAN_WIDTH_20;
}
/*
@@ -1591,13 +1373,13 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
tbl = &(lq_sta->lq_info[active_tbl]);
/* If we've been disallowing search, see if we should now allow it */
- if (lq_sta->stay_in_tbl) {
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
/* Elapsed time using current modulation mode */
if (lq_sta->flush_timer)
flush_interval_passed =
time_after(jiffies,
(unsigned long)(lq_sta->flush_timer +
- IWL_RATE_SCALE_FLUSH_INTVL));
+ RS_STAY_IN_COLUMN_TIMEOUT));
/*
* Check if we should allow search for new modulation mode.
@@ -1619,10 +1401,14 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
flush_interval_passed);
/* Allow search for new mode */
- lq_sta->stay_in_tbl = 0; /* only place reset */
+ lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED;
+ IWL_DEBUG_RATE(mvm,
+ "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n");
lq_sta->total_failed = 0;
lq_sta->total_success = 0;
lq_sta->flush_timer = 0;
+ /* mark the current column as visited */
+ lq_sta->visited_columns = BIT(tbl->column);
/*
* Else if we've used this modulation mode enough repetitions
* (regardless of elapsed time or success/failure), reset
@@ -1646,7 +1432,8 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
/* If transitioning to allow "search", reset all history
* bitmaps and stats in active table (this will become the new
* "search" table). */
- if (!lq_sta->stay_in_tbl) {
+ if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
+ IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&(tbl->win[i]));
}
@@ -1659,15 +1446,10 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
static void rs_update_rate_tbl(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- int index)
+ struct rs_rate *rate)
{
- u32 rate;
-
- /* Update uCode's rate table. */
- rate = rate_n_flags_from_tbl(mvm, tbl, index);
- rs_fill_link_cmd(mvm, sta, lq_sta, rate);
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+ rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
}
static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
@@ -1686,6 +1468,250 @@ static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
return tid;
}
+static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ int i, j, n;
+ enum rs_column next_col_id;
+ const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
+ const struct rs_tx_column *next_col;
+ allow_column_func_t allow_func;
+ u8 valid_ants = iwl_fw_valid_tx_ant(mvm->fw);
+ s32 *expected_tpt_tbl;
+ s32 tpt, max_expected_tpt;
+
+ for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
+ next_col_id = curr_col->next_columns[i];
+
+ if (next_col_id == RS_COLUMN_INVALID)
+ continue;
+
+ if (lq_sta->visited_columns & BIT(next_col_id)) {
+ IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n",
+ next_col_id);
+ continue;
+ }
+
+ next_col = &rs_tx_columns[next_col_id];
+
+ if (!rs_is_valid_ant(valid_ants, next_col->ant)) {
+ IWL_DEBUG_RATE(mvm,
+ "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n",
+ next_col_id, valid_ants, next_col->ant);
+ continue;
+ }
+
+ for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
+ allow_func = next_col->checks[j];
+ if (allow_func && !allow_func(mvm, sta, tbl))
+ break;
+ }
+
+ if (j != MAX_COLUMN_CHECKS) {
+ IWL_DEBUG_RATE(mvm,
+ "Skip column %d: not allowed (check %d failed)\n",
+ next_col_id, j);
+
+ continue;
+ }
+
+ tpt = lq_sta->last_tpt / 100;
+ expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
+ tbl->rate.bw);
+ if (WARN_ON_ONCE(!expected_tpt_tbl))
+ continue;
+
+ max_expected_tpt = 0;
+ for (n = 0; n < IWL_RATE_COUNT; n++)
+ if (expected_tpt_tbl[n] > max_expected_tpt)
+ max_expected_tpt = expected_tpt_tbl[n];
+
+ if (tpt >= max_expected_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
+ next_col_id, max_expected_tpt, tpt);
+ continue;
+ }
+
+ break;
+ }
+
+ if (i == MAX_NEXT_COLUMNS)
+ return RS_COLUMN_INVALID;
+
+ IWL_DEBUG_RATE(mvm, "Found potential column %d\n", next_col_id);
+
+ return next_col_id;
+}
+
+static int rs_switch_to_column(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ enum rs_column col_id)
+{
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct rs_rate *rate = &search_tbl->rate;
+ const struct rs_tx_column *column = &rs_tx_columns[col_id];
+ const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u16 rate_mask = 0;
+ u32 rate_idx = 0;
+
+ memcpy(search_tbl, tbl, sz);
+
+ rate->sgi = column->sgi;
+ rate->ant = column->ant;
+
+ if (column->mode == RS_LEGACY) {
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate->type = LQ_LEGACY_A;
+ else
+ rate->type = LQ_LEGACY_G;
+
+ rate_mask = lq_sta->active_legacy_rate;
+ } else if (column->mode == RS_SISO) {
+ rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+ rate_mask = lq_sta->active_siso_rate;
+ } else if (column->mode == RS_MIMO2) {
+ rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+ rate_mask = lq_sta->active_mimo2_rate;
+ } else {
+ WARN_ON_ONCE("Bad column mode");
+ }
+
+ rate->bw = rs_bw_from_sta_bw(sta);
+ search_tbl->column = col_id;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+
+ lq_sta->visited_columns |= BIT(col_id);
+
+ /* Get the best matching rate if we're changing modes. e.g.
+ * SISO->MIMO, LEGACY->SISO, MIMO->SISO
+ */
+ if (curr_column->mode != column->mode) {
+ rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl,
+ rate_mask, rate->index);
+
+ if ((rate_idx == IWL_RATE_INVALID) ||
+ !(BIT(rate_idx) & rate_mask)) {
+ IWL_DEBUG_RATE(mvm,
+ "can not switch with index %d"
+ " rate mask %x\n",
+ rate_idx, rate_mask);
+
+ goto err;
+ }
+
+ rate->index = rate_idx;
+ }
+
+ IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n",
+ col_id, rate->index);
+
+ return 0;
+
+err:
+ rate->type = LQ_NONE;
+ return -1;
+}
+
+static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
+ struct iwl_scale_tbl_info *tbl,
+ s32 sr, int low, int high,
+ int current_tpt,
+ int low_tpt, int high_tpt)
+{
+ enum rs_action action = RS_ACTION_STAY;
+
+ /* Too many failures, decrease rate */
+ if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) {
+ IWL_DEBUG_RATE(mvm,
+ "decrease rate because of low SR\n");
+ action = RS_ACTION_DOWNSCALE;
+ /* No throughput measured yet for adjacent rates; try increase. */
+ } else if ((low_tpt == IWL_INVALID_VALUE) &&
+ (high_tpt == IWL_INVALID_VALUE)) {
+ if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) {
+ IWL_DEBUG_RATE(mvm,
+ "Good SR and no high rate measurement. "
+ "Increase rate\n");
+ action = RS_ACTION_UPSCALE;
+ } else if (low != IWL_RATE_INVALID) {
+ IWL_DEBUG_RATE(mvm,
+ "Remain in current rate\n");
+ action = RS_ACTION_STAY;
+ }
+ }
+
+ /* Both adjacent throughputs are measured, but neither one has better
+ * throughput; we're using the best rate, don't change it!
+ */
+ else if ((low_tpt != IWL_INVALID_VALUE) &&
+ (high_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt < current_tpt) &&
+ (high_tpt < current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "Both high and low are worse. "
+ "Maintain rate\n");
+ action = RS_ACTION_STAY;
+ }
+
+ /* At least one adjacent rate's throughput is measured,
+ * and may have better performance.
+ */
+ else {
+ /* Higher adjacent rate's throughput is measured */
+ if (high_tpt != IWL_INVALID_VALUE) {
+ /* Higher rate has better throughput */
+ if (high_tpt > current_tpt &&
+ sr >= IWL_RATE_INCREASE_TH) {
+ IWL_DEBUG_RATE(mvm,
+ "Higher rate is better and good "
+ "SR. Increate rate\n");
+ action = RS_ACTION_UPSCALE;
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "Higher rate isn't better OR "
+ "no good SR. Maintain rate\n");
+ action = RS_ACTION_STAY;
+ }
+
+ /* Lower adjacent rate's throughput is measured */
+ } else if (low_tpt != IWL_INVALID_VALUE) {
+ /* Lower rate has better throughput */
+ if (low_tpt > current_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "Lower rate is better. "
+ "Decrease rate\n");
+ action = RS_ACTION_DOWNSCALE;
+ } else if (sr >= IWL_RATE_INCREASE_TH) {
+ IWL_DEBUG_RATE(mvm,
+ "Lower rate isn't better and "
+ "good SR. Increase rate\n");
+ action = RS_ACTION_UPSCALE;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it.
+ */
+ if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID) &&
+ ((sr > IWL_RATE_HIGH_TH) ||
+ (current_tpt > (100 * tbl->expected_tpt[low])))) {
+ IWL_DEBUG_RATE(mvm,
+ "Sanity check failed. Maintain rate\n");
+ action = RS_ACTION_STAY;
+ }
+
+ return action;
+}
+
/*
* Do rate scaling and search for new modulation mode.
*/
@@ -1705,20 +1731,19 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
int low_tpt = IWL_INVALID_VALUE;
int high_tpt = IWL_INVALID_VALUE;
u32 fail_count;
- s8 scale_action = 0;
+ enum rs_action scale_action = RS_ACTION_STAY;
u16 rate_mask;
u8 update_lq = 0;
struct iwl_scale_tbl_info *tbl, *tbl1;
- u16 rate_scale_index_msk = 0;
u8 active_tbl = 0;
u8 done_search = 0;
u16 high_low;
s32 sr;
u8 tid = IWL_MAX_TID_COUNT;
+ u8 prev_agg = lq_sta->is_agg;
struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
struct iwl_mvm_tid_data *tid_data;
-
- IWL_DEBUG_RATE(mvm, "rate scale calculate new rate for skb\n");
+ struct rs_rate *rate;
/* Send management frames and NO_ACK data using lowest rate. */
/* TODO: this could probably be improved.. */
@@ -1726,8 +1751,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
- lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
-
tid = rs_get_tid(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
(lq_sta->tx_agg_tid_en & (1 << tid))) {
@@ -1751,45 +1774,29 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
active_tbl = 1 - lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
+ rate = &tbl->rate;
+
+ if (prev_agg != lq_sta->is_agg) {
+ IWL_DEBUG_RATE(mvm,
+ "Aggregation changed: prev %d current %d. Update expected TPT table\n",
+ prev_agg, lq_sta->is_agg);
+ rs_set_expected_tpt_table(lq_sta, tbl);
+ }
/* current tx rate */
index = lq_sta->last_txrate_idx;
- IWL_DEBUG_RATE(mvm, "Rate scale index %d for type %d\n", index,
- tbl->lq_type);
-
/* rates available for this association, and for modulation mode */
- rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+ rate_mask = rs_get_supported_rates(lq_sta, rate);
- IWL_DEBUG_RATE(mvm, "mask 0x%04X\n", rate_mask);
-
- /* mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- /* supp_rates has no CCK bits in A mode */
- rate_scale_index_msk = (u16) (rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
- else
- rate_scale_index_msk = (u16) (rate_mask &
- lq_sta->supp_rates);
-
- } else {
- rate_scale_index_msk = rate_mask;
- }
-
- if (!rate_scale_index_msk)
- rate_scale_index_msk = rate_mask;
-
- if (!((1 << index) & rate_scale_index_msk)) {
+ if (!(BIT(index) & rate_mask)) {
IWL_ERR(mvm, "Current Rate is not valid\n");
if (lq_sta->search_better_tbl) {
/* revert to active table if search table is not valid*/
- tbl->lq_type = LQ_NONE;
+ rate->type = LQ_NONE;
lq_sta->search_better_tbl = 0;
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- /* get "active" rate info */
- index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
- rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
+ rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
}
return;
}
@@ -1806,6 +1813,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
index = lq_sta->max_rate_idx;
update_lq = 1;
window = &(tbl->win[index]);
+ IWL_DEBUG_RATE(mvm,
+ "Forcing user max rate %d\n",
+ index);
goto lq_update;
}
@@ -1822,8 +1832,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
(window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
IWL_DEBUG_RATE(mvm,
- "LQ: still below TH. succ=%d total=%d for index %d\n",
- window->success_counter, window->counter, index);
+ "(%s: %d): Test Window: succ %d total %d\n",
+ rs_pretty_lq_type(rate->type),
+ index, window->success_counter, window->counter);
/* Can't calculate this yet; not enough history */
window->average_tpt = IWL_INVALID_VALUE;
@@ -1838,8 +1849,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
* actual average throughput */
if (window->average_tpt != ((window->success_ratio *
tbl->expected_tpt[index] + 64) / 128)) {
- IWL_ERR(mvm,
- "expected_tpt should have been calculated by now\n");
window->average_tpt = ((window->success_ratio *
tbl->expected_tpt[index] + 64) / 128);
}
@@ -1851,34 +1860,33 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
* continuing to use the setup that we've been trying. */
if (window->average_tpt > lq_sta->last_tpt) {
IWL_DEBUG_RATE(mvm,
- "LQ: SWITCHING TO NEW TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+ "SWITCHING TO NEW TABLE SR: %d "
+ "cur-tpt %d old-tpt %d\n",
window->success_ratio,
window->average_tpt,
lq_sta->last_tpt);
- if (!is_legacy(tbl->lq_type))
- lq_sta->enable_counter = 1;
-
/* Swap tables; "search" becomes "active" */
lq_sta->active_tbl = active_tbl;
current_tpt = window->average_tpt;
/* Else poor success; go back to mode in "active" table */
} else {
IWL_DEBUG_RATE(mvm,
- "LQ: GOING BACK TO THE OLD TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+ "GOING BACK TO THE OLD TABLE: SR %d "
+ "cur-tpt %d old-tpt %d\n",
window->success_ratio,
window->average_tpt,
lq_sta->last_tpt);
/* Nullify "search" table */
- tbl->lq_type = LQ_NONE;
+ rate->type = LQ_NONE;
/* Revert to "active" table */
active_tbl = lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
/* Revert to "active" rate and throughput info */
- index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+ index = tbl->rate.index;
current_tpt = lq_sta->last_tpt;
/* Need to set up a new rate table in uCode */
@@ -1894,8 +1902,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
/* (Else) not in search of better modulation mode, try for better
* starting rate, while staying in this mode. */
- high_low = rs_get_adjacent_rate(mvm, index, rate_scale_index_msk,
- tbl->lq_type);
+ high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type);
low = high_low & 0xff;
high = (high_low >> 8) & 0xff;
@@ -1913,118 +1920,58 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
if (high != IWL_RATE_INVALID)
high_tpt = tbl->win[high].average_tpt;
- scale_action = 0;
-
- /* Too many failures, decrease rate */
- if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
- IWL_DEBUG_RATE(mvm,
- "decrease rate because of low success_ratio\n");
- scale_action = -1;
- /* No throughput measured yet for adjacent rates; try increase. */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
- if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else if (low != IWL_RATE_INVALID)
- scale_action = 0;
- }
-
- /* Both adjacent throughputs are measured, but neither one has better
- * throughput; we're using the best rate, don't change it! */
- else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) &&
- (high_tpt < current_tpt))
- scale_action = 0;
-
- /* At least one adjacent rate's throughput is measured,
- * and may have better performance. */
- else {
- /* Higher adjacent rate's throughput is measured */
- if (high_tpt != IWL_INVALID_VALUE) {
- /* Higher rate has better throughput */
- if (high_tpt > current_tpt &&
- sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- } else {
- scale_action = 0;
- }
-
- /* Lower adjacent rate's throughput is measured */
- } else if (low_tpt != IWL_INVALID_VALUE) {
- /* Lower rate has better throughput */
- if (low_tpt > current_tpt) {
- IWL_DEBUG_RATE(mvm,
- "decrease rate because of low tpt\n");
- scale_action = -1;
- } else if (sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- }
- }
- }
-
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it. */
- if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
- ((sr > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * tbl->expected_tpt[low]))))
- scale_action = 0;
+ IWL_DEBUG_RATE(mvm,
+ "(%s: %d): cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
+ rs_pretty_lq_type(rate->type), index, current_tpt, sr,
+ low, high, low_tpt, high_tpt);
- if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
- if (lq_sta->last_bt_traffic >
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
- /*
- * don't set scale_action, don't want to scale up if
- * the rate scale doesn't otherwise think that is a
- * good idea.
- */
- } else if (lq_sta->last_bt_traffic <=
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
- scale_action = -1;
- }
- }
- lq_sta->last_bt_traffic =
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+ scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
+ current_tpt, low_tpt, high_tpt);
- if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
- /* search for a new modulation */
+ /* Force a search in case BT doesn't like us being in MIMO */
+ if (is_mimo(rate) &&
+ !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) {
+ IWL_DEBUG_RATE(mvm,
+ "BT Coex forbids MIMO. Search for new config\n");
rs_stay_in_table(lq_sta, true);
goto lq_update;
}
switch (scale_action) {
- case -1:
+ case RS_ACTION_DOWNSCALE:
/* Decrease starting rate, update uCode's rate table */
if (low != IWL_RATE_INVALID) {
update_lq = 1;
index = low;
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "At the bottom rate. Can't decrease\n");
}
break;
- case 1:
+ case RS_ACTION_UPSCALE:
/* Increase starting rate, update uCode's rate table */
if (high != IWL_RATE_INVALID) {
update_lq = 1;
index = high;
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "At the top rate. Can't increase\n");
}
break;
- case 0:
+ case RS_ACTION_STAY:
/* No change */
default:
break;
}
- IWL_DEBUG_RATE(mvm,
- "choose rate scale index %d action %d low %d high %d type %d\n",
- index, scale_action, low, high, tbl->lq_type);
-
lq_update:
/* Replace uCode's rate table for the destination station. */
- if (update_lq)
- rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
+ if (update_lq) {
+ tbl->rate.index = index;
+ rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
+ }
rs_stay_in_table(lq_sta, false);
@@ -2035,20 +1982,29 @@ lq_update:
* 3) Allowing a new search
*/
if (!update_lq && !done_search &&
- !lq_sta->stay_in_tbl && window->counter) {
+ lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED
+ && window->counter) {
+ enum rs_column next_column;
+
/* Save current throughput to compare with "search" throughput*/
lq_sta->last_tpt = current_tpt;
- /* Select a new "search" modulation mode to try.
- * If one is found, set up the new "search" table. */
- if (is_legacy(tbl->lq_type))
- rs_move_legacy_other(mvm, lq_sta, sta, index);
- else if (is_siso(tbl->lq_type))
- rs_move_siso_to_other(mvm, lq_sta, sta, index);
- else if (is_mimo2(tbl->lq_type))
- rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
- else
- WARN_ON_ONCE(1);
+ IWL_DEBUG_RATE(mvm,
+ "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n",
+ update_lq, done_search, lq_sta->rs_state,
+ window->counter);
+
+ next_column = rs_get_next_column(mvm, lq_sta, sta, tbl);
+ if (next_column != RS_COLUMN_INVALID) {
+ int ret = rs_switch_to_column(mvm, lq_sta, sta,
+ next_column);
+ if (!ret)
+ lq_sta->search_better_tbl = 1;
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n");
+ lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED;
+ }
/* If new "search" mode was selected, set up in uCode table */
if (lq_sta->search_better_tbl) {
@@ -2058,36 +2014,31 @@ lq_update:
rs_rate_scale_clear_window(&(tbl->win[i]));
/* Use new "search" start rate */
- index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+ index = tbl->rate.index;
- IWL_DEBUG_RATE(mvm,
- "Switch current mcs: %X index: %d\n",
- tbl->current_rate, index);
- rs_fill_link_cmd(mvm, sta, lq_sta, tbl->current_rate);
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+ rs_dump_rate(mvm, &tbl->rate,
+ "Switch to SEARCH TABLE:");
+ rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
} else {
done_search = 1;
}
}
- if (done_search && !lq_sta->stay_in_tbl) {
+ if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) {
/* If the "active" (non-search) mode was legacy,
* and we've tried switching antennas,
* but we haven't been able to try HT modes (not available),
* stay with best antenna legacy modulation for a while
* before next round of mode comparisons. */
tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
- if (is_legacy(tbl1->lq_type) && !sta->ht_cap.ht_supported &&
- lq_sta->action_counter > tbl1->max_search) {
+ if (is_legacy(&tbl1->rate) && !sta->ht_cap.ht_supported) {
IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
rs_set_stay_in_table(mvm, 1, lq_sta);
- }
-
+ } else {
/* If we're in an HT mode, and all 3 mode switch actions
* have been tried and compared, stay in this best modulation
* mode for a while before next round of mode comparisons. */
- if (lq_sta->enable_counter &&
- (lq_sta->action_counter >= tbl1->max_search)) {
if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
(tid != IWL_MAX_TID_COUNT)) {
@@ -2105,7 +2056,6 @@ lq_update:
}
out:
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index);
lq_sta->last_txrate_idx = index;
}
@@ -2126,12 +2076,12 @@ out:
static void rs_initialize_lq(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- enum ieee80211_band band)
+ enum ieee80211_band band,
+ bool init)
{
struct iwl_scale_tbl_info *tbl;
- int rate_idx;
+ struct rs_rate *rate;
int i;
- u32 rate;
u8 active_tbl = 0;
u8 valid_tx_ant;
@@ -2148,27 +2098,30 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
active_tbl = 1 - lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
+ rate = &tbl->rate;
if ((i < 0) || (i >= IWL_RATE_COUNT))
i = 0;
- rate = iwl_rates[i].plcp;
- tbl->ant_type = first_antenna(valid_tx_ant);
- rate |= tbl->ant_type << RATE_MCS_ANT_POS;
-
- if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
- rate |= RATE_MCS_CCK_MSK;
+ rate->index = i;
+ rate->ant = first_antenna(valid_tx_ant);
+ rate->sgi = false;
+ rate->bw = RATE_MCS_CHAN_WIDTH_20;
+ if (band == IEEE80211_BAND_5GHZ)
+ rate->type = LQ_LEGACY_A;
+ else
+ rate->type = LQ_LEGACY_G;
- rs_get_tbl_info_from_mcs(rate, band, tbl, &rate_idx);
- if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
- rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+ WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
+ if (rate->ant == ANT_A)
+ tbl->column = RS_COLUMN_LEGACY_ANT_A;
+ else
+ tbl->column = RS_COLUMN_LEGACY_ANT_B;
- rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx);
- tbl->current_rate = rate;
rs_set_expected_tpt_table(lq_sta, tbl);
- rs_fill_link_cmd(NULL, NULL, lq_sta, rate);
+ rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
/* TODO restore station should remember the lq cmd */
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
}
static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
@@ -2182,8 +2135,6 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = mvm_sta;
- IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
-
/* Get max rate if user set max rate */
if (lq_sta) {
lq_sta->max_rate_idx = txrc->max_rate_idx;
@@ -2242,11 +2193,59 @@ static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
return -1;
}
+static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
+ struct ieee80211_sta_vht_cap *vht_cap,
+ struct iwl_lq_sta *lq_sta)
+{
+ int i;
+ int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
+
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
+
+ /* Disable MCS9 as a workaround */
+ if (i == IWL_RATE_MCS_9_INDEX)
+ continue;
+
+ /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
+ if (i == IWL_RATE_MCS_9_INDEX &&
+ sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ continue;
+
+ lq_sta->active_siso_rate |= BIT(i);
+ }
+ }
+
+ if (sta->rx_nss < 2)
+ return;
+
+ highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
+
+ /* Disable MCS9 as a workaround */
+ if (i == IWL_RATE_MCS_9_INDEX)
+ continue;
+
+ /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
+ if (i == IWL_RATE_MCS_9_INDEX &&
+ sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ continue;
+
+ lq_sta->active_mimo2_rate |= BIT(i);
+ }
+ }
+}
+
/*
* Called after adding a new station to initialize rate scaling
*/
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum ieee80211_band band)
+ enum ieee80211_band band, bool init)
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
@@ -2259,6 +2258,8 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
lq_sta = &sta_priv->lq_sta;
+ memset(lq_sta, 0, sizeof(*lq_sta));
+
sband = hw->wiphy->bands[band];
lq_sta->lq.sta_id = sta_priv->sta_id;
@@ -2268,7 +2269,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
lq_sta->flush_timer = 0;
- lq_sta->supp_rates = sta->supp_rates[sband->band];
IWL_DEBUG_RATE(mvm,
"LQ: *** rate scale station global init for station %d ***\n",
@@ -2308,27 +2308,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->is_vht = false;
} else {
- int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
- if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
- for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
- if (i == IWL_RATE_9M_INDEX)
- continue;
-
- lq_sta->active_siso_rate |= BIT(i);
- }
- }
-
- highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
- if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
- for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
- if (i == IWL_RATE_9M_INDEX)
- continue;
-
- lq_sta->active_mimo2_rate |= BIT(i);
- }
- }
-
- /* TODO: avoid MCS9 in 20Mhz which isn't valid for 11ac */
+ rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
lq_sta->is_vht = true;
}
@@ -2341,15 +2321,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- lq_sta->lq.dual_stream_ant_msk =
- iwl_fw_valid_tx_ant(mvm->fw) &
- ~first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- if (!lq_sta->lq.dual_stream_ant_msk) {
- lq_sta->lq.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) == 2) {
- lq_sta->lq.dual_stream_ant_msk =
- iwl_fw_valid_tx_ant(mvm->fw);
- }
+ lq_sta->lq.dual_stream_ant_msk = ANT_AB;
/* as default allow aggregation for all tids */
lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
@@ -2364,121 +2336,184 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->dbg_fixed_rate = 0;
#endif
- rs_initialize_lq(mvm, sta, lq_sta, band);
+ rs_initialize_lq(mvm, sta, lq_sta, band, init);
}
-static void rs_fill_link_cmd(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta, u32 new_rate)
+static void rs_rate_update(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta,
+ u32 changed)
+{
+ u8 tid;
+ struct iwl_op_mode *op_mode =
+ (struct iwl_op_mode *)mvm_r;
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ /* Stop any ongoing aggregations as rs starts off assuming no agg */
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ ieee80211_stop_tx_ba_session(sta, tid);
+
+ iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
+ struct iwl_lq_cmd *lq_cmd,
+ enum ieee80211_band band,
+ u32 ucode_rate)
+{
+ struct rs_rate rate;
+ int i;
+ int num_rates = ARRAY_SIZE(lq_cmd->rs_table);
+ __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate);
+
+ for (i = 0; i < num_rates; i++)
+ lq_cmd->rs_table[i] = ucode_rate_le32;
+
+ rs_rate_from_ucode_rate(ucode_rate, band, &rate);
+
+ if (is_mimo(&rate))
+ lq_cmd->mimo_delim = num_rates - 1;
+ else
+ lq_cmd->mimo_delim = 0;
+}
+#endif /* CONFIG_MAC80211_DEBUGFS */
+
+static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate,
+ __le32 *rs_table, int *rs_table_index,
+ int num_rates, int num_retries,
+ u8 valid_tx_ant, bool toggle_ant)
+{
+ int i, j;
+ __le32 ucode_rate;
+ bool bottom_reached = false;
+ int prev_rate_idx = rate->index;
+ int end = LINK_QUAL_MAX_RETRY_NUM;
+ int index = *rs_table_index;
+
+ for (i = 0; i < num_rates && index < end; i++) {
+ ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm, rate));
+ for (j = 0; j < num_retries && index < end; j++, index++)
+ rs_table[index] = ucode_rate;
+
+ if (toggle_ant)
+ rs_toggle_antenna(valid_tx_ant, rate);
+
+ prev_rate_idx = rate->index;
+ bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
+ if (bottom_reached && !is_legacy(rate))
+ break;
+ }
+
+ if (!bottom_reached)
+ rate->index = prev_rate_idx;
+
+ *rs_table_index = index;
+}
+
+/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
+ * column the rate table should look like this:
+ *
+ * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
+ * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
+ * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
+ * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps
+ * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps
+ * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps
+ * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps
+ * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps
+ * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps
+ * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
+ */
+static void rs_build_rates_table(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate)
{
- struct iwl_scale_tbl_info tbl_type;
- int index = 0;
- int rate_idx;
- int repeat_rate = 0;
- u8 ant_toggle_cnt = 0;
- u8 use_ht_possible = 1;
+ struct rs_rate rate;
+ int num_rates, num_retries, index = 0;
u8 valid_tx_ant = 0;
struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+ bool toggle_ant = false;
- /* Override starting rate (index 0) if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate);
+ memcpy(&rate, initial_rate, sizeof(rate));
- /* Interpret new_rate (rate_n_flags) */
- rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
- &tbl_type, &rate_idx);
+ valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- /* How many times should we repeat the initial rate? */
- if (is_legacy(tbl_type.lq_type)) {
- ant_toggle_cnt = 1;
- repeat_rate = IWL_NUMBER_TRY;
+ if (is_siso(&rate)) {
+ num_rates = RS_INITIAL_SISO_NUM_RATES;
+ num_retries = RS_HT_VHT_RETRIES_PER_RATE;
+ } else if (is_mimo(&rate)) {
+ num_rates = RS_INITIAL_MIMO_NUM_RATES;
+ num_retries = RS_HT_VHT_RETRIES_PER_RATE;
} else {
- repeat_rate = min(IWL_HT_NUMBER_TRY,
- LINK_QUAL_AGG_DISABLE_START_DEF - 1);
+ num_rates = RS_INITIAL_LEGACY_NUM_RATES;
+ num_retries = RS_LEGACY_RETRIES_PER_RATE;
+ toggle_ant = true;
}
- lq_cmd->mimo_delim = is_mimo(tbl_type.lq_type) ? 1 : 0;
-
- /* Fill 1st table entry (index 0) */
- lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
-
- if (num_of_ant(tbl_type.ant_type) == 1)
- lq_cmd->single_stream_ant_msk = tbl_type.ant_type;
- else if (num_of_ant(tbl_type.ant_type) == 2)
- lq_cmd->dual_stream_ant_msk = tbl_type.ant_type;
- /* otherwise we don't modify the existing value */
-
- index++;
- repeat_rate--;
- if (mvm)
- valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
-
- /* Fill rest of rate table */
- while (index < LINK_QUAL_MAX_RETRY_NUM) {
- /* Repeat initial/next rate.
- * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
- * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (mvm &&
- rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
- }
+ rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+ num_rates, num_retries, valid_tx_ant,
+ toggle_ant);
- /* Override next rate if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate);
+ rs_get_lower_rate_down_column(lq_sta, &rate);
- /* Fill next table entry */
- lq_cmd->rs_table[index] =
- cpu_to_le32(new_rate);
- repeat_rate--;
- index++;
- }
+ if (is_siso(&rate)) {
+ num_rates = RS_SECONDARY_SISO_NUM_RATES;
+ num_retries = RS_SECONDARY_SISO_RETRIES;
+ } else if (is_legacy(&rate)) {
+ num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
+ num_retries = RS_LEGACY_RETRIES_PER_RATE;
+ } else {
+ WARN_ON_ONCE(1);
+ }
- rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
- &rate_idx);
-
- /* Indicate to uCode which entries might be MIMO.
- * If initial rate was MIMO, this will finally end up
- * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
- if (is_mimo(tbl_type.lq_type))
- lq_cmd->mimo_delim = index;
-
- /* Get next rate */
- new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
- use_ht_possible);
-
- /* How many times should we repeat the next rate? */
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (mvm &&
- rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
-
- repeat_rate = IWL_NUMBER_TRY;
- } else {
- repeat_rate = IWL_HT_NUMBER_TRY;
- }
+ toggle_ant = true;
- /* Don't allow HT rates after next pass.
- * rs_get_lower_rate() will change type to LQ_LEGACY_A
- * or LQ_LEGACY_G.
- */
- use_ht_possible = 0;
+ rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+ num_rates, num_retries, valid_tx_ant,
+ toggle_ant);
- /* Override next rate if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate);
+ rs_get_lower_rate_down_column(lq_sta, &rate);
- /* Fill next table entry */
- lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
+ num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
+ num_retries = RS_LEGACY_RETRIES_PER_RATE;
- index++;
- repeat_rate--;
- }
+ rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+ num_rates, num_retries, valid_tx_ant,
+ toggle_ant);
+
+}
+
+static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate)
+{
+ struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+ u8 ant = initial_rate->ant;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ if (lq_sta->dbg_fixed_rate) {
+ rs_build_rates_table_from_fixed(mvm, lq_cmd,
+ lq_sta->band,
+ lq_sta->dbg_fixed_rate);
+ ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+ RATE_MCS_ANT_POS;
+ } else
+#endif
+ rs_build_rates_table(mvm, lq_sta, initial_rate);
+
+ if (num_of_ant(ant) == 1)
+ lq_cmd->single_stream_ant_msk = ant;
lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
@@ -2512,31 +2547,83 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
}
#ifdef CONFIG_MAC80211_DEBUGFS
-static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags)
+static int rs_pretty_print_rate(char *buf, const u32 rate)
{
- struct iwl_mvm *mvm;
- u8 valid_tx_ant;
- u8 ant_sel_tx;
- mvm = lq_sta->drv;
- valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- if (lq_sta->dbg_fixed_rate) {
- ant_sel_tx =
- ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
- >> RATE_MCS_ANT_POS);
- if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
- *rate_n_flags = lq_sta->dbg_fixed_rate;
- IWL_DEBUG_RATE(mvm, "Fixed rate ON\n");
- } else {
- lq_sta->dbg_fixed_rate = 0;
- IWL_ERR(mvm,
- "Invalid antenna selection 0x%X, Valid is 0x%X\n",
- ant_sel_tx, valid_tx_ant);
- IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
- }
+ char *type, *bw;
+ u8 mcs = 0, nss = 0;
+ u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
+
+ if (!(rate & RATE_MCS_HT_MSK) &&
+ !(rate & RATE_MCS_VHT_MSK)) {
+ int index = iwl_hwrate_to_plcp_idx(rate);
+
+ return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
+ rs_pretty_ant(ant),
+ index == IWL_RATE_INVALID ? "BAD" :
+ iwl_rate_mcs[index].mbps);
+ }
+
+ if (rate & RATE_MCS_VHT_MSK) {
+ type = "VHT";
+ mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+ >> RATE_VHT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_HT_MSK) {
+ type = "HT";
+ mcs = rate & RATE_HT_MCS_INDEX_MSK;
} else {
- IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
+ type = "Unknown"; /* shouldn't happen */
+ }
+
+ switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ bw = "20Mhz";
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ bw = "40Mhz";
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ bw = "80Mhz";
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ bw = "160Mhz";
+ break;
+ default:
+ bw = "BAD BW";
+ }
+
+ return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
+ type, rs_pretty_ant(ant), bw, mcs, nss,
+ (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+ (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+ (rate & RATE_MCS_BF_MSK) ? "BF " : "",
+ (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
+}
+
+/**
+ * Program the device to use fixed rate for frame transmit
+ * This is for debugging/testing only
+ * once the device start use fixed rate, we need to reload the module
+ * to being back the normal operation.
+ */
+static void rs_program_fix_rate(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta)
+{
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+
+ IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
+ lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+
+ if (lq_sta->dbg_fixed_rate) {
+ struct rs_rate rate;
+ rs_rate_from_ucode_rate(lq_sta->dbg_fixed_rate,
+ lq_sta->band, &rate);
+ rs_fill_lq_cmd(mvm, NULL, lq_sta, &rate);
+ iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, false);
}
}
@@ -2572,15 +2659,14 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
char *buff;
int desc = 0;
int i = 0;
- int index = 0;
ssize_t ret;
struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_mvm *mvm;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-
+ struct rs_rate *rate = &tbl->rate;
mvm = lq_sta->drv;
- buff = kmalloc(1024, GFP_KERNEL);
+ buff = kmalloc(2048, GFP_KERNEL);
if (!buff)
return -ENOMEM;
@@ -2595,23 +2681,23 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(tbl->lq_type)) ? "legacy" :
- is_vht(tbl->lq_type) ? "VHT" : "HT");
- if (is_ht(tbl->lq_type)) {
+ (is_legacy(rate)) ? "legacy" :
+ is_vht(rate) ? "VHT" : "HT");
+ if (!is_legacy(rate)) {
desc += sprintf(buff+desc, " %s",
- (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+ (is_siso(rate)) ? "SISO" : "MIMO2");
desc += sprintf(buff+desc, " %s",
- (is_ht20(tbl)) ? "20MHz" :
- (is_ht40(tbl)) ? "40MHz" :
- (is_ht80(tbl)) ? "80Mhz" : "BAD BW");
+ (is_ht20(rate)) ? "20MHz" :
+ (is_ht40(rate)) ? "40MHz" :
+ (is_ht80(rate)) ? "80Mhz" : "BAD BW");
desc += sprintf(buff+desc, " %s %s\n",
- (tbl->is_SGI) ? "SGI" : "",
+ (rate->sgi) ? "SGI" : "NGI",
(lq_sta->is_agg) ? "AGG on" : "");
}
desc += sprintf(buff+desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
desc += sprintf(buff+desc,
- "general: flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+ "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
lq_sta->lq.flags,
lq_sta->lq.mimo_delim,
lq_sta->lq.single_stream_ant_msk,
@@ -2631,19 +2717,10 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
lq_sta->lq.initial_rate_index[3]);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- index = iwl_hwrate_to_plcp_idx(
- le32_to_cpu(lq_sta->lq.rs_table[i]));
- if (is_legacy(tbl->lq_type)) {
- desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
- i, le32_to_cpu(lq_sta->lq.rs_table[i]),
- iwl_rate_mcs[index].mbps);
- } else {
- desc += sprintf(buff+desc,
- " rate[%d] 0x%X %smbps (%s)\n",
- i, le32_to_cpu(lq_sta->lq.rs_table[i]),
- iwl_rate_mcs[index].mbps,
- iwl_rate_mcs[index].mcs);
- }
+ u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
+
+ desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
+ desc += rs_pretty_print_rate(buff+desc, r);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
@@ -2665,6 +2742,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
int i, j;
ssize_t ret;
struct iwl_scale_tbl_info *tbl;
+ struct rs_rate *rate;
struct iwl_lq_sta *lq_sta = file->private_data;
buff = kmalloc(1024, GFP_KERNEL);
@@ -2673,16 +2751,17 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
for (i = 0; i < LQ_SIZE; i++) {
tbl = &(lq_sta->lq_info[i]);
+ rate = &tbl->rate;
desc += sprintf(buff+desc,
"%s type=%d SGI=%d BW=%s DUP=0\n"
- "rate=0x%X\n",
+ "index=%d\n",
lq_sta->active_tbl == i ? "*" : "x",
- tbl->lq_type,
- tbl->is_SGI,
- is_ht20(tbl) ? "20Mhz" :
- is_ht40(tbl) ? "40Mhz" :
- is_ht80(tbl) ? "80Mhz" : "ERR",
- tbl->current_rate);
+ rate->type,
+ rate->sgi,
+ is_ht20(rate) ? "20Mhz" :
+ is_ht40(rate) ? "40Mhz" :
+ is_ht80(rate) ? "80Mhz" : "ERR",
+ rate->index);
for (j = 0; j < IWL_RATE_COUNT; j++) {
desc += sprintf(buff+desc,
"counter=%d success=%d %%=%d\n",
@@ -2746,6 +2825,7 @@ static struct rate_control_ops rs_mvm_ops = {
.free = rs_free,
.alloc_sta = rs_alloc_sta,
.free_sta = rs_free_sta,
+ .rate_update = rs_rate_update,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = rs_add_debugfs,
.remove_sta_debugfs = rs_remove_debugfs,
@@ -2778,13 +2858,13 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
if (enable) {
if (mvmsta->tx_protection == 0)
- lq->flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
+ lq->flags |= LQ_FLAG_USE_RTS_MSK;
mvmsta->tx_protection++;
} else {
mvmsta->tx_protection--;
if (mvmsta->tx_protection == 0)
- lq->flags &= ~LQ_FLAG_SET_STA_TLC_RTS_MSK;
+ lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
}
- return iwl_mvm_send_lq_cmd(mvm, lq, CMD_ASYNC, false);
+ return iwl_mvm_send_lq_cmd(mvm, lq, false);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 5d5344f7070b..7bc6404f6986 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -155,38 +155,7 @@ enum {
#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
#define IWL_RATE_HIGH_TH 10880 /* 85% */
#define IWL_RATE_INCREASE_TH 6400 /* 50% */
-#define IWL_RATE_DECREASE_TH 1920 /* 15% */
-
-/* possible actions when in legacy mode */
-enum {
- IWL_LEGACY_SWITCH_ANTENNA,
- IWL_LEGACY_SWITCH_SISO,
- IWL_LEGACY_SWITCH_MIMO2,
- IWL_LEGACY_FIRST_ACTION = IWL_LEGACY_SWITCH_ANTENNA,
- IWL_LEGACY_LAST_ACTION = IWL_LEGACY_SWITCH_MIMO2,
-};
-
-/* possible actions when in siso mode */
-enum {
- IWL_SISO_SWITCH_ANTENNA,
- IWL_SISO_SWITCH_MIMO2,
- IWL_SISO_SWITCH_GI,
- IWL_SISO_FIRST_ACTION = IWL_SISO_SWITCH_ANTENNA,
- IWL_SISO_LAST_ACTION = IWL_SISO_SWITCH_GI,
-};
-
-/* possible actions when in mimo mode */
-enum {
- IWL_MIMO2_SWITCH_SISO_A,
- IWL_MIMO2_SWITCH_SISO_B,
- IWL_MIMO2_SWITCH_GI,
- IWL_MIMO2_FIRST_ACTION = IWL_MIMO2_SWITCH_SISO_A,
- IWL_MIMO2_LAST_ACTION = IWL_MIMO2_SWITCH_GI,
-};
-
-#define IWL_MAX_SEARCH IWL_MIMO2_LAST_ACTION
-
-#define IWL_ACTION_LIMIT 3 /* # possible actions */
+#define RS_SR_FORCE_DECREASE 1920 /* 15% */
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
@@ -224,22 +193,45 @@ enum iwl_table_type {
LQ_MAX,
};
-#define is_legacy(tbl) (((tbl) == LQ_LEGACY_G) || ((tbl) == LQ_LEGACY_A))
-#define is_ht_siso(tbl) ((tbl) == LQ_HT_SISO)
-#define is_ht_mimo2(tbl) ((tbl) == LQ_HT_MIMO2)
-#define is_vht_siso(tbl) ((tbl) == LQ_VHT_SISO)
-#define is_vht_mimo2(tbl) ((tbl) == LQ_VHT_MIMO2)
-#define is_siso(tbl) (is_ht_siso(tbl) || is_vht_siso(tbl))
-#define is_mimo2(tbl) (is_ht_mimo2(tbl) || is_vht_mimo2(tbl))
-#define is_mimo(tbl) (is_mimo2(tbl))
-#define is_ht(tbl) (is_ht_siso(tbl) || is_ht_mimo2(tbl))
-#define is_vht(tbl) (is_vht_siso(tbl) || is_vht_mimo2(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_LEGACY_A)
-#define is_g_band(tbl) ((tbl) == LQ_LEGACY_G)
-
-#define is_ht20(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_20)
-#define is_ht40(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_40)
-#define is_ht80(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_80)
+struct rs_rate {
+ int index;
+ enum iwl_table_type type;
+ u8 ant;
+ u32 bw;
+ bool sgi;
+};
+
+
+#define is_type_legacy(type) (((type) == LQ_LEGACY_G) || \
+ ((type) == LQ_LEGACY_A))
+#define is_type_ht_siso(type) ((type) == LQ_HT_SISO)
+#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
+#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
+#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
+#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type))
+#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type))
+#define is_type_mimo(type) (is_type_mimo2(type))
+#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
+#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
+#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
+#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
+
+#define is_legacy(rate) is_type_legacy((rate)->type)
+#define is_ht_siso(rate) is_type_ht_siso((rate)->type)
+#define is_ht_mimo2(rate) is_type_ht_mimo2((rate)->type)
+#define is_vht_siso(rate) is_type_vht_siso((rate)->type)
+#define is_vht_mimo2(rate) is_type_vht_mimo2((rate)->type)
+#define is_siso(rate) is_type_siso((rate)->type)
+#define is_mimo2(rate) is_type_mimo2((rate)->type)
+#define is_mimo(rate) is_type_mimo((rate)->type)
+#define is_ht(rate) is_type_ht((rate)->type)
+#define is_vht(rate) is_type_vht((rate)->type)
+#define is_a_band(rate) is_type_a_band((rate)->type)
+#define is_g_band(rate) is_type_g_band((rate)->type)
+
+#define is_ht20(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_20)
+#define is_ht40(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_40)
+#define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80)
#define IWL_MAX_MCS_DISPLAY_SIZE 12
@@ -257,7 +249,23 @@ struct iwl_rate_scale_data {
s32 success_ratio; /* per-cent * 128 */
s32 counter; /* number of frames attempted */
s32 average_tpt; /* success ratio * expected throughput */
- unsigned long stamp;
+};
+
+/* Possible Tx columns
+ * Tx Column = a combo of legacy/siso/mimo x antenna x SGI
+ */
+enum rs_column {
+ RS_COLUMN_LEGACY_ANT_A = 0,
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_MIMO2_SGI,
+
+ RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_INVALID,
};
/**
@@ -267,17 +275,18 @@ struct iwl_rate_scale_data {
* one for "active", and one for "search".
*/
struct iwl_scale_tbl_info {
- enum iwl_table_type lq_type;
- u8 ant_type;
- u8 is_SGI; /* 1 = short guard interval */
- u32 bw; /* channel bandwidth; RATE_MCS_CHAN_WIDTH_XX */
- u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
- u8 max_search; /* maximun number of tables we can search */
+ struct rs_rate rate;
+ enum rs_column column;
s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
- u32 current_rate; /* rate_n_flags, uCode API format */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
+enum {
+ RS_STATE_SEARCH_CYCLE_STARTED,
+ RS_STATE_SEARCH_CYCLE_ENDED,
+ RS_STATE_STAY_IN_COLUMN,
+};
+
/**
* struct iwl_lq_sta -- driver's rate scaling private structure
*
@@ -285,8 +294,7 @@ struct iwl_scale_tbl_info {
*/
struct iwl_lq_sta {
u8 active_tbl; /* index of active table, range 0-1 */
- u8 enable_counter; /* indicates HT mode */
- u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
+ u8 rs_state; /* RS_STATE_* */
u8 search_better_tbl; /* 1: currently trying alternate mode */
s32 last_tpt;
@@ -299,12 +307,13 @@ struct iwl_lq_sta {
u32 total_success; /* total successful frames, any/all rates */
u64 flush_timer; /* time staying in mode before new search */
- u8 action_counter; /* # mode-switch actions tried */
+ u32 visited_columns; /* Bitmask marking which Tx columns were
+ * explored during a search cycle
+ */
bool is_vht;
enum ieee80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
- u32 supp_rates;
u16 active_legacy_rate;
u16 active_siso_rate;
u16 active_mimo2_rate;
@@ -328,32 +337,11 @@ struct iwl_lq_sta {
u32 last_rate_n_flags;
/* packets destined for this STA are aggregated */
u8 is_agg;
- /* BT traffic this sta was last updated in */
- u8 last_bt_traffic;
-};
-
-enum iwl_bt_coex_profile_traffic_load {
- IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0,
- IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1,
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2,
- IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3,
-/*
- * There are no more even though below is a u8, the
- * indication from the BT device only has two bits.
- */
};
-
-static inline u8 num_of_ant(u8 mask)
-{
- return !!((mask) & ANT_A) +
- !!((mask) & ANT_B) +
- !!((mask) & ANT_C);
-}
-
/* Initialize station's rate scaling information after adding station */
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum ieee80211_band band);
+ enum ieee80211_band band, bool init);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 3a1f3982109d..a85b60f7e67e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -251,6 +251,12 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
stats->flag |= RX_FLAG_DECRYPTED;
return 0;
+ case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+ return -1;
+ stats->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+
default:
IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index dff7592e1ff8..742afc429c94 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,9 @@
#define IWL_PLCP_QUIET_THRESH 1
#define IWL_ACTIVE_QUIET_TIME 10
+#define LONG_OUT_TIME_PERIOD 600
+#define SHORT_OUT_TIME_PERIOD 200
+#define SUSPEND_TIME_PERIOD 100
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
{
@@ -87,20 +90,22 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
return cpu_to_le16(rx_chain);
}
-static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
+static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif,
+ u32 flags, bool is_assoc)
{
- if (vif->bss_conf.assoc)
- return cpu_to_le32(200 * 1024);
- else
+ if (!is_assoc)
return 0;
+ if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
+ return cpu_to_le32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
+ return cpu_to_le32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
}
-static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
+static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif,
+ bool is_assoc)
{
- if (!vif->bss_conf.assoc)
+ if (!is_assoc)
return 0;
-
- return cpu_to_le32(ieee80211_tu_to_usec(vif->bss_conf.beacon_int));
+ return cpu_to_le32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
}
static inline __le32
@@ -192,7 +197,7 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
for (i = 0; i < cmd->channel_count; i++) {
chan->channel = cpu_to_le16(req->channels[i]->hw_value);
chan->type = cpu_to_le32(type);
- if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = cpu_to_le16(active_dwell);
chan->passive_dwell = cpu_to_le16(passive_dwell);
@@ -262,6 +267,15 @@ static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
return (u16)len;
}
+static void iwl_mvm_vif_assoc_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ bool *is_assoc = data;
+
+ if (vif->bss_conf.assoc)
+ *is_assoc = true;
+}
+
int iwl_mvm_scan_request(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
@@ -274,6 +288,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_cmd *cmd = mvm->scan_cmd;
+ bool is_assoc = false;
int ret;
u32 status;
int ssid_len = 0;
@@ -289,13 +304,17 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
mvm->fw->ucode_capa.max_probe_length +
(MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
-
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_vif_assoc_iterator,
+ &is_assoc);
cmd->channel_count = (u8)req->n_channels;
cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
- cmd->max_out_time = iwl_mvm_scan_max_out_time(vif);
- cmd->suspend_time = iwl_mvm_scan_suspend_time(vif);
+ cmd->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
+ is_assoc);
+ cmd->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
@@ -325,7 +344,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
- cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+ cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+ TX_CMD_FLG_BT_DIS);
cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
cmd->tx_cmd.rate_n_flags =
@@ -454,13 +474,18 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
return;
+ if (iwl_mvm_is_radio_killed(mvm)) {
+ ieee80211_scan_completed(mvm->hw, true);
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ return;
+ }
+
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
scan_abort_notif,
ARRAY_SIZE(scan_abort_notif),
iwl_mvm_scan_abort_notif, NULL);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD,
- CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
if (ret) {
IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
/* mac80211's state will be cleaned in the fw_restart flow */
@@ -522,6 +547,12 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req,
struct iwl_scan_offload_cmd *scan)
{
+ bool is_assoc = false;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_vif_assoc_iterator,
+ &is_assoc);
scan->channel_count =
mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
@@ -529,8 +560,9 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
- scan->max_out_time = cpu_to_le32(200 * 1024);
- scan->suspend_time = iwl_mvm_scan_suspend_time(vif);
+ scan->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
+ is_assoc);
+ scan->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
@@ -642,7 +674,7 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
channels->iter_count[index] = cpu_to_le16(1);
channels->iter_interval[index] = 0;
- if (!(s_band->channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR))
channels->type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
@@ -776,6 +808,8 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+ if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+ profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
for (i = 0; i < req->n_match_sets; i++) {
profile = &profile_cfg->profiles[i];
@@ -817,11 +851,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
IWL_DEBUG_SCAN(mvm,
"Sending scheduled scan with filtering, filter len %d\n",
req->n_match_sets);
- scan_req.flags |=
- cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID);
} else {
IWL_DEBUG_SCAN(mvm,
"Sending Scheduled scan without filtering\n");
+ scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
}
return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
new file mode 100644
index 000000000000..8401627c0030
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -0,0 +1,291 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+
+/* For counting bound interfaces */
+struct iwl_mvm_active_iface_iterator_data {
+ struct ieee80211_vif *ignore_vif;
+ u8 sta_vif_ap_sta_id;
+ enum iwl_sf_state sta_vif_state;
+ int num_active_macs;
+};
+
+/*
+ * Count bound interfaces which are not p2p, besides data->ignore_vif.
+ * data->station_vif will point to one bound vif of type station, if exists.
+ */
+static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_active_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif == data->ignore_vif || !mvmvif->phy_ctxt ||
+ vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return;
+
+ data->num_active_macs++;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ data->sta_vif_ap_sta_id = mvmvif->ap_sta_id;
+ if (vif->bss_conf.assoc)
+ data->sta_vif_state = SF_FULL_ON;
+ else
+ data->sta_vif_state = SF_INIT_OFF;
+ }
+}
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in SF_FULL_ON state.
+ */
+static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+ {
+ cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER),
+ cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER),
+ cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_MCAST_AGING_TIMER),
+ cpu_to_le32(SF_MCAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_BA_AGING_TIMER),
+ cpu_to_le32(SF_BA_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_TX_RE_AGING_TIMER),
+ cpu_to_le32(SF_TX_RE_IDLE_TIMER)
+ },
+};
+
+static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
+ struct ieee80211_sta *sta)
+{
+ int i, j, watermark;
+
+ sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN);
+
+ /*
+ * If we are in association flow - check antenna configuration
+ * capabilities of the AP station, and choose the watermark accordingly.
+ */
+ if (sta) {
+ if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+ switch (sta->rx_nss) {
+ case 1:
+ watermark = SF_W_MARK_SISO;
+ break;
+ case 2:
+ watermark = SF_W_MARK_MIMO2;
+ break;
+ default:
+ watermark = SF_W_MARK_MIMO3;
+ break;
+ }
+ } else {
+ watermark = SF_W_MARK_LEGACY;
+ }
+ /* default watermark value for unassociated mode. */
+ } else {
+ watermark = SF_W_MARK_MIMO2;
+ }
+ sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark);
+
+ for (i = 0; i < SF_NUM_SCENARIO; i++) {
+ for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) {
+ sf_cmd->long_delay_timeouts[i][j] =
+ cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
+ }
+ }
+ BUILD_BUG_ON(sizeof(sf_full_timeout) !=
+ sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
+
+ memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
+ sizeof(sf_full_timeout));
+}
+
+static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
+ enum iwl_sf_state new_state)
+{
+ struct iwl_sf_cfg_cmd sf_cmd = {
+ .state = new_state,
+ };
+ struct ieee80211_sta *sta;
+ int ret = 0;
+
+ /*
+ * If an associated AP sta changed its antenna configuration, the state
+ * will remain FULL_ON but SF parameters need to be reconsidered.
+ */
+ if (new_state != SF_FULL_ON && mvm->sf_state == new_state)
+ return 0;
+
+ switch (new_state) {
+ case SF_UNINIT:
+ break;
+ case SF_FULL_ON:
+ if (sta_id == IWL_MVM_STATION_COUNT) {
+ IWL_ERR(mvm,
+ "No station: Cannot switch SF to FULL_ON\n");
+ return -EINVAL;
+ }
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ iwl_mvm_fill_sf_command(&sf_cmd, sta);
+ rcu_read_unlock();
+ break;
+ case SF_INIT_OFF:
+ iwl_mvm_fill_sf_command(&sf_cmd, NULL);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
+ new_state);
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC,
+ sizeof(sf_cmd), &sf_cmd);
+ if (!ret)
+ mvm->sf_state = new_state;
+
+ return ret;
+}
+
+/*
+ * Update Smart fifo:
+ * Count bound interfaces that are not to be removed, ignoring p2p devices,
+ * and set new state accordingly.
+ */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
+ bool remove_vif)
+{
+ enum iwl_sf_state new_state;
+ u8 sta_id = IWL_MVM_STATION_COUNT;
+ struct iwl_mvm_vif *mvmvif = NULL;
+ struct iwl_mvm_active_iface_iterator_data data = {
+ .ignore_vif = changed_vif,
+ .sta_vif_state = SF_UNINIT,
+ .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
+ };
+
+ if (IWL_UCODE_API(mvm->fw->ucode_ver) < 8)
+ return 0;
+
+ /*
+ * Ignore the call if we are in HW Restart flow, or if the handled
+ * vif is a p2p device.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE))
+ return 0;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bound_iface_iterator,
+ &data);
+
+ /* If changed_vif exists and is not to be removed, add to the count */
+ if (changed_vif && !remove_vif)
+ data.num_active_macs++;
+
+ switch (data.num_active_macs) {
+ case 0:
+ /* If there are no active macs - change state to SF_INIT_OFF */
+ new_state = SF_INIT_OFF;
+ break;
+ case 1:
+ if (remove_vif) {
+ /* The one active mac left is of type station
+ * and we filled the relevant data during iteration
+ */
+ new_state = data.sta_vif_state;
+ sta_id = data.sta_vif_ap_sta_id;
+ } else {
+ if (WARN_ON(!changed_vif))
+ return -EINVAL;
+ if (changed_vif->type != NL80211_IFTYPE_STATION) {
+ new_state = SF_UNINIT;
+ } else if (changed_vif->bss_conf.assoc) {
+ mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
+ sta_id = mvmvif->ap_sta_id;
+ new_state = SF_FULL_ON;
+ } else {
+ new_state = SF_INIT_OFF;
+ }
+ }
+ break;
+ default:
+ /* If there are multiple active macs - change to SF_UNINIT */
+ new_state = SF_UNINIT;
+ }
+ return iwl_mvm_sf_config(mvm, sta_id, new_state);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 329952363a54..3397f59cd4e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -452,8 +452,15 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
- /* This station is in use */
- if (!IS_ERR(sta))
+ /*
+ * This station is in use or RCU-removed; the latter happens in
+ * managed mode, where mac80211 removes the station before we
+ * can remove it from firmware (we can only do that after the
+ * MAC is marked unassociated), and possibly while the deauth
+ * frame to disconnect from the AP is still queued. Then, the
+ * station pointer is -ENOENT when the last skb is reclaimed.
+ */
+ if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
continue;
if (PTR_ERR(sta) == -EINVAL) {
@@ -645,7 +652,7 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
- static const u8 *baddr = _baddr;
+ const u8 *baddr = _baddr;
lockdep_assert_held(&mvm->mutex);
@@ -840,7 +847,7 @@ static const u8 tid_to_ac[] = {
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data;
int txq_id;
@@ -895,7 +902,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u8 buf_size)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
int queue, fifo, ret;
u16 ssn;
@@ -932,26 +939,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
sta->addr, tid);
- if (mvm->cfg->ht_params->use_rts_for_aggregation) {
- /*
- * switch to RTS/CTS if it is the prefer protection
- * method for HT traffic
- * this function also sends the LQ command
- */
- return iwl_mvm_tx_protection(mvm, mvmsta, true);
- /*
- * TODO: remove the TLC_RTS flag when we tear down the last
- * AGG session (agg_tids_count in DVM)
- */
- }
-
- return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false);
+ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
}
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
u16 txq_id;
int err;
@@ -1023,7 +1017,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
u16 txq_id;
enum iwl_mvm_agg_state old_state;
@@ -1123,8 +1117,8 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
memcpy(cmd.key, keyconf->key, keyconf->keylen);
break;
default:
- WARN_ON(1);
- return -EINVAL;
+ key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
+ memcpy(cmd.key, keyconf->key, keyconf->keylen);
}
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
@@ -1288,8 +1282,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
0, NULL, CMD_SYNC);
break;
default:
- IWL_ERR(mvm, "Unknown cipher %x\n", keyconf->cipher);
- ret = -EINVAL;
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf,
+ sta_id, 0, NULL, CMD_SYNC);
}
if (ret)
@@ -1416,7 +1410,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd_v6 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
@@ -1438,7 +1432,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
u16 sleep_state_flags =
(reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd_v6 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 4dfc359a4bdd..4968d0237dc5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -298,6 +298,12 @@ struct iwl_mvm_sta {
bool tt_tx_protection;
};
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
+{
+ return (void *)sta->drv_priv;
+}
+
/**
* struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
* broadcast)
diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h
index eb74391d91ca..0241665925f7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/testmode.h
+++ b/drivers/net/wireless/iwlwifi/mvm/testmode.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 95ce4b601fef..b4c2abaa297b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -249,12 +249,12 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_time_event_data *te_data = data;
struct iwl_time_event_resp *resp;
- int resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ int resp_len = iwl_rx_packet_payload_len(pkt);
if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
return true;
- if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
return true;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index d9c8d6cfa2db..4a61c8c02372 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 1f3282dff513..3afa6b6bf835 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -340,7 +340,7 @@ static void check_exit_ctkill(struct work_struct *work)
iwl_trans_start_hw(mvm->trans);
temp = check_nic_temperature(mvm);
- iwl_trans_stop_hw(mvm->trans, false);
+ iwl_trans_stop_device(mvm->trans);
if (temp < MIN_TEMPERATURE || temp > MAX_TEMPERATURE) {
IWL_DEBUG_TEMP(mvm, "Failed to measure NIC temperature\n");
@@ -388,7 +388,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
continue;
- mvmsta = (void *)sta->drv_priv;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (enable == mvmsta->tt_tx_protection)
continue;
err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 43d97c33a75a..76ee486039d7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -253,8 +253,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
break;
default:
- IWL_ERR(mvm, "Unknown encode cipher %x\n", keyconf->cipher);
- break;
+ tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
}
}
@@ -276,6 +275,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
return NULL;
memset(dev_cmd, 0, sizeof(*dev_cmd));
+ dev_cmd->hdr.cmd = TX_CMD;
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
if (info->control.hw_key)
@@ -361,7 +361,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 txq_id = info->hw_queue;
bool is_data_qos = false, is_ampdu = false;
- mvmsta = (void *)sta->drv_priv;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
fc = hdr->frame_control;
if (WARN_ON_ONCE(!mvmsta))
@@ -390,7 +390,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
is_data_qos = true;
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
}
@@ -407,13 +406,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
}
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
- tid, txq_id, seq_number);
+ tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
if (is_data_qos && !ieee80211_has_morefrags(fc))
- mvmsta->tid_data[tid].seq_number = seq_number;
+ mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
spin_unlock(&mvmsta->lock);
@@ -432,7 +431,7 @@ drop:
static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 tid)
{
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
struct ieee80211_vif *vif = mvmsta->vif;
@@ -660,9 +659,15 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ /*
+ * sta can't be NULL otherwise it'd mean that the sta has been freed in
+ * the firmware while we still have packets for it in the Tx queues.
+ */
+ if (WARN_ON_ONCE(!sta))
+ goto out;
- if (!IS_ERR_OR_NULL(sta)) {
- mvmsta = (void *)sta->drv_priv;
+ if (!IS_ERR(sta)) {
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (tid != IWL_TID_NON_QOS) {
struct iwl_mvm_tid_data *tid_data =
@@ -676,7 +681,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
spin_unlock_bh(&mvmsta->lock);
}
} else {
- sta = NULL;
mvmsta = NULL;
}
@@ -684,42 +688,38 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
- atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
- if (mvmsta) {
- /*
- * If there are no pending frames for this STA, notify
- * mac80211 that this station can go to sleep in its
- * STA table.
- */
- if (mvmsta->vif->type == NL80211_IFTYPE_AP)
- ieee80211_sta_block_awake(mvm->hw, sta, false);
- /*
- * We might very well have taken mvmsta pointer while
- * the station was being removed. The remove flow might
- * have seen a pending_frame (because we didn't take
- * the lock) even if now the queues are drained. So make
- * really sure now that this the station is not being
- * removed. If it is, run the drain worker to remove it.
- */
- spin_lock_bh(&mvmsta->lock);
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
- if (IS_ERR_OR_NULL(sta)) {
- /*
- * Station disappeared in the meantime:
- * so we are draining.
- */
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
- }
- spin_unlock_bh(&mvmsta->lock);
- } else if (!mvmsta) {
- /* Tx response without STA, so we are draining */
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
- }
+ if (txq_id >= mvm->first_agg_queue)
+ goto out;
+
+ /* We can't free more than one frame at once on a shared queue */
+ WARN_ON(skb_freed > 1);
+
+ /* If we have still frames from this STA nothing to do here */
+ if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
+ goto out;
+
+ if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * If there are no pending frames for this STA, notify
+ * mac80211 that this station can go to sleep in its
+ * STA table.
+ * If mvmsta is not NULL, sta is valid.
+ */
+ ieee80211_sta_block_awake(mvm->hw, sta, false);
}
+ if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
+ /*
+ * We are draining and this was the last packet - pre_rcu_remove
+ * has been called already. We might be after the
+ * synchronize_net already.
+ * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
+ */
+ set_bit(sta_id, mvm->sta_drained);
+ schedule_work(&mvm->sta_drained_wk);
+ }
+
+out:
rcu_read_unlock();
}
@@ -793,7 +793,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
- struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
}
@@ -822,16 +822,12 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
struct sk_buff_head reclaimed_skbs;
struct iwl_mvm_tid_data *tid_data;
- struct ieee80211_tx_info *info;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb;
int sta_id, tid, freed;
-
/* "flow" corresponds to Tx queue */
u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
-
/* "ssn" is start of block-ack Tx window, corresponds to index
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
@@ -849,7 +845,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
- mvmsta = (void *)sta->drv_priv;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
tid_data = &mvmsta->tid_data[tid];
if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
@@ -888,22 +884,26 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
freed = 0;
skb_queue_walk(&reclaimed_skbs, skb) {
- hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data_qos(hdr->frame_control))
freed++;
else
WARN_ON_ONCE(1);
- info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
if (freed == 1) {
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
- info = IEEE80211_SKB_CB(skb);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_notif->txed_2_done;
info->status.ampdu_len = ba_notif->txed;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index ed69e9b78e82..86989df69356 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -168,8 +168,8 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
goto out_free_resp;
}
- resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+ resp_len = iwl_rx_packet_payload_len(pkt);
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
ret = -EIO;
goto out_free_resp;
}
@@ -411,6 +411,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
mvm->status, table.valid);
}
+ IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
+
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.data3,
table.blink1, table.blink2, table.ilink1,
@@ -486,22 +488,18 @@ void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
* this case to clear the state indicating that station creation is in
* progress.
*/
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
- u8 flags, bool init)
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
{
struct iwl_host_cmd cmd = {
.id = LQ_CMD,
.len = { sizeof(struct iwl_lq_cmd), },
- .flags = flags,
+ .flags = init ? CMD_SYNC : CMD_ASYNC,
.data = { lq, },
};
if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT))
return -EINVAL;
- if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
- return -EINVAL;
-
return iwl_mvm_send_cmd(mvm, &cmd);
}
@@ -522,6 +520,11 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int i;
lockdep_assert_held(&mvm->mutex);
+
+ /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
+ if (num_of_ant(iwl_fw_valid_rx_ant(mvm->fw)) == 1)
+ return;
+
mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->smps_requests[req_type] = smps_request;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index e6272546395a..3872ead75488 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -297,6 +297,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
@@ -350,24 +353,30 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 051268c037b1..e851f26fd44c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -256,13 +256,13 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
* @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_write_waitq: wait queue for uCode load
- * @status - transport specific status flags
* @cmd_queue - command queue number
* @rx_buf_size_8k: 8 kB RX buffer size
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @rx_page_order: page order for receive buffer size
* @wd_timeout: queue watchdog timeout (jiffies)
* @reg_lock: protect hw register access
+ * @cmd_in_flight: true when we have a host command in flight
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
@@ -274,7 +274,6 @@ struct iwl_trans_pcie {
__le32 *ict_tbl;
dma_addr_t ict_tbl_dma;
int ict_index;
- u32 inta;
bool use_ict;
struct isr_statistics isr_stats;
@@ -296,7 +295,6 @@ struct iwl_trans_pcie {
wait_queue_head_t ucode_write_waitq;
wait_queue_head_t wait_command_queue;
- unsigned long status;
u8 cmd_queue;
u8 cmd_fifo;
u8 n_no_reclaim_cmds;
@@ -313,24 +311,7 @@ struct iwl_trans_pcie {
/*protect hw register */
spinlock_t reg_lock;
-};
-
-/**
- * enum iwl_pcie_status: status of the PCIe transport
- * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
- * @STATUS_DEVICE_ENABLED: APM is enabled
- * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
- * @STATUS_INT_ENABLED: interrupts are enabled
- * @STATUS_RFKILL: the HW RFkill switch is in KILL position
- * @STATUS_FW_ERROR: the fw is in error state
- */
-enum iwl_pcie_status {
- STATUS_HCMD_ACTIVE,
- STATUS_DEVICE_ENABLED,
- STATUS_TPOWER_PMI,
- STATUS_INT_ENABLED,
- STATUS_RFKILL,
- STATUS_FW_ERROR,
+ bool cmd_in_flight;
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
@@ -363,7 +344,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans);
/*****************************************************
* ICT - interrupt handling
******************************************************/
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
+irqreturn_t iwl_pcie_isr(int irq, void *data);
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
void iwl_pcie_free_ict(struct iwl_trans *trans);
void iwl_pcie_reset_ict(struct iwl_trans *trans);
@@ -399,8 +380,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
******************************************************/
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
/* disable interrupts from uCode/NIC to host */
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
@@ -417,14 +397,18 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+ set_bit(STATUS_INT_ENABLED, &trans->status);
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
- iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+ trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
static inline void iwl_wake_queue(struct iwl_trans *trans,
@@ -477,12 +461,31 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}
-static inline void iwl_nic_error(struct iwl_trans *trans)
+static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+ u32 reg, u32 mask, u32 value)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
+
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+}
- set_bit(STATUS_FW_ERROR, &trans_pcie->status);
- iwl_op_mode_nic_error(trans->op_mode);
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
}
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index be3995afa9d0..08c23d497a02 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -148,10 +148,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- unsigned long flags;
u32 reg;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (rxq->need_update == 0)
goto exit_unlock;
@@ -162,11 +161,8 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
rxq->write_actual = (rxq->write & ~0x7);
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
} else {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
/* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
+ if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
@@ -193,7 +189,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
rxq->need_update = 0;
exit_unlock:
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
/*
@@ -212,7 +208,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
/*
* If the device isn't enabled - not need to try to add buffers...
@@ -222,10 +217,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
* stopped, we cannot access the HW (in particular not prph).
* So don't try to restock if the APM has been already stopped.
*/
- if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write];
@@ -242,7 +237,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
@@ -251,9 +246,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
}
}
@@ -273,16 +268,15 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
- unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
return;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
@@ -311,17 +305,17 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
return;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
BUG_ON(rxb->page);
rxb->page = page;
@@ -332,9 +326,9 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
@@ -343,12 +337,12 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
/* and also 256 byte aligned! */
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
}
@@ -382,13 +376,12 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_rxq_restock(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
@@ -514,7 +507,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
int i, err;
- unsigned long flags;
if (!rxq->bd) {
err = iwl_pcie_rx_alloc(trans);
@@ -522,7 +514,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
return err;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
@@ -538,16 +530,16 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
iwl_pcie_rx_replenish(trans);
iwl_pcie_rx_hw_init(trans, rxq);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
rxq->need_update = 1;
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
return 0;
}
@@ -556,7 +548,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- unsigned long flags;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
@@ -567,9 +558,9 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
cancel_work_sync(&trans_pcie->rx_replenish);
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
@@ -592,7 +583,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- unsigned long flags;
bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
u32 offset = 0;
@@ -625,7 +615,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
pkt->hdr.cmd);
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len = iwl_rx_packet_len(pkt);
len += sizeof(u32); /* account for status word */
trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
@@ -694,7 +684,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
/* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
@@ -715,7 +705,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
/*
@@ -791,7 +781,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
APMG_PS_CTRL_VAL_RESET_REQ))) {
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
iwl_op_mode_wimax_active(trans->op_mode);
wake_up(&trans_pcie->wait_command_queue);
return;
@@ -800,14 +790,95 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
iwl_pcie_dump_csr(trans);
iwl_dump_fh(trans, NULL);
- /* set the ERROR bit before we wake up the caller */
- set_bit(STATUS_FW_ERROR, &trans_pcie->status);
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
- wake_up(&trans_pcie->wait_command_queue);
-
local_bh_disable();
- iwl_nic_error(trans);
+ /* The STATUS_FW_ERROR bit is set in this function. This must happen
+ * before we wake up the command caller, to ensure a proper cleanup. */
+ iwl_trans_fw_error(trans);
local_bh_enable();
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ wake_up(&trans_pcie->wait_command_queue);
+}
+
+static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 inta;
+
+ lockdep_assert_held(&trans_pcie->irq_lock);
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(trans, CSR_INT);
+
+ /* the thread will service interrupts and re-enable them */
+ return inta;
+}
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT 12
+#define ICT_SIZE (1 << ICT_SHIFT)
+#define ICT_COUNT (ICT_SIZE / sizeof(u32))
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 inta;
+ u32 val = 0;
+ u32 read;
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
+ if (!read)
+ return 0;
+
+ /*
+ * Collect all entries up to the first 0, starting from ict_index;
+ * note we already read at ict_index.
+ */
+ do {
+ val |= read;
+ IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+ trans_pcie->ict_index, read);
+ trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+ trans_pcie->ict_index =
+ iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
+
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
+ read);
+ } while (read);
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ return inta;
}
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
@@ -817,12 +888,61 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
- unsigned long flags;
u32 i;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (likely(trans_pcie->use_ict))
+ inta = iwl_pcie_int_cause_ict(trans);
+ else
+ inta = iwl_pcie_int_cause_non_ict(trans);
+
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
+ inta, trans_pcie->inta_mask,
+ iwl_read32(trans, CSR_INT_MASK),
+ iwl_read32(trans, CSR_FH_INT_STATUS));
+ if (inta & (~trans_pcie->inta_mask))
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)\n",
+ inta & (~trans_pcie->inta_mask));
+ }
+
+ inta &= trans_pcie->inta_mask;
+
+ /*
+ * Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC.
+ */
+ if (unlikely(!inta)) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ /*
+ * Re-enable interrupts here since we don't
+ * have anything to service
+ */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ iwl_enable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_NONE;
+ }
+
+ if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /*
+ * Hardware disappeared. It might have
+ * already raised an interrupt.
+ */
+ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ spin_unlock(&trans_pcie->irq_lock);
+ goto out;
+ }
/* Ack/clear/reset pending uCode interrupts.
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
@@ -835,19 +955,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
* hardware bugs here by ACKing all the possible interrupts so that
* interrupt coalescing can still be achieved.
*/
- iwl_write32(trans, CSR_INT,
- trans_pcie->inta | ~trans_pcie->inta_mask);
-
- inta = trans_pcie->inta;
+ iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
if (iwl_have_debug_level(IWL_DL_ISR))
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
inta, iwl_read32(trans, CSR_INT_MASK));
- /* saved interrupt in inta variable now we can reset trans_pcie->inta */
- trans_pcie->inta = 0;
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
@@ -894,14 +1008,14 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill) {
- set_bit(STATUS_RFKILL, &trans_pcie->status);
- if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status))
+ set_bit(STATUS_RFKILL, &trans->status);
+ if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
wake_up(&trans_pcie->wait_command_queue);
} else {
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
}
handled |= CSR_INT_BIT_RF_KILL;
@@ -1005,7 +1119,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* Re-enable all interrupts */
/* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
iwl_enable_interrupts(trans);
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
@@ -1022,11 +1136,6 @@ out:
*
******************************************************************************/
-/* a device (PCI-E) page is 4096 bytes long */
-#define ICT_SHIFT 12
-#define ICT_SIZE (1 << ICT_SHIFT)
-#define ICT_COUNT (ICT_SIZE / sizeof(u32))
-
/* Free dram table */
void iwl_pcie_free_ict(struct iwl_trans *trans)
{
@@ -1051,7 +1160,7 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->ict_tbl =
- dma_alloc_coherent(trans->dev, ICT_SIZE,
+ dma_zalloc_coherent(trans->dev, ICT_SIZE,
&trans_pcie->ict_tbl_dma,
GFP_KERNEL);
if (!trans_pcie->ict_tbl)
@@ -1063,17 +1172,10 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
return -EINVAL;
}
- IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
- (unsigned long long)trans_pcie->ict_tbl_dma);
-
- IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
-
- /* reset table and index to all 0 */
- memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
- trans_pcie->ict_index = 0;
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n",
+ (unsigned long long)trans_pcie->ict_tbl_dma,
+ trans_pcie->ict_tbl);
- /* add periodic RX interrupt */
- trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
return 0;
}
@@ -1084,12 +1186,11 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
- unsigned long flags;
if (!trans_pcie->ict_tbl)
return;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
@@ -1106,124 +1207,26 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
trans_pcie->ict_index = 0;
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
iwl_enable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
/* Device is going down disable ict interrupt usage */
void iwl_pcie_disable_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
trans_pcie->use_ict = false;
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
-/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
-static irqreturn_t iwl_pcie_isr(int irq, void *data)
+irqreturn_t iwl_pcie_isr(int irq, void *data)
{
struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 inta, inta_mask;
- irqreturn_t ret = IRQ_NONE;
-
- lockdep_assert_held(&trans_pcie->irq_lock);
-
- trace_iwlwifi_dev_irq(trans->dev);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the irq thread will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(trans, CSR_INT_MASK);
- iwl_write32(trans, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(trans, CSR_INT);
-
- if (inta & (~inta_mask)) {
- IWL_DEBUG_ISR(trans,
- "We got a masked interrupt (0x%08x)...Ack and ignore\n",
- inta & (~inta_mask));
- iwl_write32(trans, CSR_INT, inta & (~inta_mask));
- inta &= inta_mask;
- }
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- return IRQ_HANDLED;
- }
-
- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans,
- "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask,
- iwl_read32(trans, CSR_FH_INT_STATUS));
-
- trans_pcie->inta |= inta;
- /* the thread will service interrupts and re-enable them */
- if (likely(inta))
- return IRQ_WAKE_THREAD;
-
- ret = IRQ_HANDLED;
-
-none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
-
- return ret;
-}
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
-{
- struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie;
- u32 inta;
- u32 val = 0;
- u32 read;
- unsigned long flags;
- irqreturn_t ret = IRQ_NONE;
if (!trans)
return IRQ_NONE;
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (unlikely(!trans_pcie->use_ict)) {
- ret = iwl_pcie_isr(irq, data);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return ret;
- }
-
- trace_iwlwifi_dev_irq(trans->dev);
-
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
@@ -1231,73 +1234,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
*/
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
- if (!read) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- /*
- * Collect all entries up to the first 0, starting from ict_index;
- * note we already read at ict_index.
- */
- do {
- val |= read;
- IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
- trans_pcie->ict_index, read);
- trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
- trans_pcie->ict_index =
- iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
-
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
- read);
- } while (read);
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
- inta, trans_pcie->inta_mask, val);
- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
- iwl_read32(trans, CSR_INT_MASK));
-
- inta &= trans_pcie->inta_mask;
- trans_pcie->inta |= inta;
-
- /* iwl_pcie_tasklet() will service interrupts and re-enable them */
- if (likely(inta)) {
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_WAKE_THREAD;
- }
-
- ret = IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service.
- * only Re-enable if disabled by irq.
- */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return ret;
+ return IRQ_WAKE_THREAD;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index cde9c16f6e4f..f9507807b486 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -75,33 +75,6 @@
#include "iwl-agn-hw.h"
#include "internal.h"
-static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
- u32 reg, u32 mask, u32 value)
-{
- u32 v;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- WARN_ON_ONCE(value & ~mask);
-#endif
-
- v = iwl_read32(trans, reg);
- v &= ~mask;
- v |= value;
- iwl_write32(trans, reg, v);
-}
-
-static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
-{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
-}
-
-static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
-{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
-}
-
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -150,7 +123,6 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
*/
static int iwl_pcie_apm_init(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret = 0;
IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
@@ -206,6 +178,28 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
goto out;
}
+ if (trans->cfg->host_interrupt_operation_mode) {
+ /*
+ * This is a bit of an abuse - This is needed for 7260 / 3160
+ * only check host_interrupt_operation_mode even if this is
+ * not related to host_interrupt_operation_mode.
+ *
+ * Enable the oscillator to count wake up time for L1 exit. This
+ * consumes slightly more power (100uA) - but allows to be sure
+ * that we wake up from L1 on time.
+ *
+ * This looks weird: read twice the same register, discard the
+ * value, set a bit, and yet again, read that same register
+ * just to discard the value. But that's the way the hardware
+ * seems to like it.
+ */
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ }
+
/*
* Enable DMA clock and wait for it to stabilize.
*
@@ -223,7 +217,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
/* Clear the interrupt in APMG if the NIC is in RFKILL */
iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
- set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+ set_bit(STATUS_DEVICE_ENABLED, &trans->status);
out:
return ret;
@@ -249,10 +243,9 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
static void iwl_pcie_apm_stop(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
- clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
@@ -273,13 +266,12 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
static int iwl_pcie_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
/* nic_init */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_apm_init(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_set_pwr(trans, false);
@@ -582,7 +574,6 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
bool hw_rfkill;
@@ -592,16 +583,14 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
return -EIO;
}
- clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
-
iwl_enable_rfkill_int(trans);
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
+ set_bit(STATUS_RFKILL, &trans->status);
else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
@@ -640,12 +629,14 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
+ bool hw_rfkill, was_hw_rfkill;
+
+ was_hw_rfkill = iwl_is_rfkill_set(trans);
/* tell the device to stop sending interrupts */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* device going down, Stop using ICT table */
iwl_pcie_disable_ict(trans);
@@ -657,7 +648,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* restart. So don't process again if the device is
* already dead.
*/
- if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
@@ -677,21 +668,45 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
* Clean again the interrupt here
*/
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
- iwl_enable_rfkill_int(trans);
+ spin_unlock(&trans_pcie->irq_lock);
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
/* clear all status bits */
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
- clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
- clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
- clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
+
+ /*
+ * Even if we stop the HW, we still want the RF kill
+ * interrupt
+ */
+ iwl_enable_rfkill_int(trans);
+
+ /*
+ * Check again since the RF kill state may have changed while
+ * all the interrupts were disabled, in this case we couldn't
+ * receive the RF kill interrupt and update the state in the
+ * op_mode.
+ * Don't call the op_mode if the rkfill state hasn't changed.
+ * This allows the op_mode to call stop_device from the rfkill
+ * notification without endless recursion. Under very rare
+ * circumstances, we might have a small recursion if the rfkill
+ * state changed exactly now while we were called from stop_device.
+ * This is very unlikely but can happen and is supported.
+ */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ if (hw_rfkill != was_hw_rfkill)
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -776,7 +791,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
int err;
@@ -787,7 +801,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
}
/* Reset the entire device */
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(10, 15);
@@ -798,53 +812,30 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
+ set_bit(STATUS_RFKILL, &trans->status);
else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
return 0;
}
-static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
- bool op_mode_leaving)
+static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill;
- unsigned long flags;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ /* disable interrupts - don't enable HW RF kill interrupt */
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_apm_stop(trans);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_disable_ict(trans);
-
- if (!op_mode_leaving) {
- /*
- * Even if we stop the HW, we still want the RF kill
- * interrupt
- */
- iwl_enable_rfkill_int(trans);
-
- /*
- * Check again since the RF kill state may have changed while
- * all the interrupts were disabled, in this case we couldn't
- * receive the RF kill interrupt and update the state in the
- * op_mode.
- */
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
- else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- }
}
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -928,12 +919,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
if (state)
- set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+ set_bit(STATUS_TPOWER_PMI, &trans->status);
else
- clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
}
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
@@ -944,6 +933,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
+ if (trans_pcie->cmd_in_flight)
+ goto out;
+
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -983,6 +975,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
}
}
+out:
/*
* Fool sparse by faking we release the lock - sparse will
* track nic_access anyway.
@@ -1004,6 +997,9 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
*/
__acquire(&trans_pcie->reg_lock);
+ if (trans_pcie->cmd_in_flight)
+ goto out;
+
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
@@ -1013,6 +1009,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
* scheduled on different CPUs (after we drop reg_lock).
*/
mmiowb();
+out:
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
}
@@ -1457,7 +1454,7 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
static const struct iwl_trans_ops trans_ops_pcie = {
.start_hw = iwl_trans_pcie_start_hw,
- .stop_hw = iwl_trans_pcie_stop_hw,
+ .op_mode_leave = iwl_trans_pcie_op_mode_leave,
.fw_alive = iwl_trans_pcie_fw_alive,
.start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
@@ -1609,7 +1606,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
- err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
+ err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
iwl_pcie_irq_handler,
IRQF_SHARED, DRV_NAME, trans);
if (err) {
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 0adde919a258..3d549008b3e2 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
le32_to_cpu(txq->scratchbufs[i].scratch));
- iwl_nic_error(trans);
+ iwl_trans_fw_error(trans);
}
/*
@@ -289,21 +289,21 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
*/
void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->q.id;
if (txq->need_update == 0)
return;
- if (trans->cfg->base_params->shadow_reg_enable) {
+ if (trans->cfg->base_params->shadow_reg_enable ||
+ txq_id == trans_pcie->cmd_queue) {
/* shadow register enabled */
iwl_write32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
} else {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
/* if we're trying to save power */
- if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
+ if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/* wake up nic if it's powered down ...
* uCode will wake up, and interrupt us again, so next
* time we'll skip this part. */
@@ -739,10 +739,9 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ch, txq_id, ret;
- unsigned long flags;
/* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_txq_set_sched(trans, 0);
@@ -759,13 +758,19 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
iwl_read_direct32(trans,
FH_TSSR_TX_STATUS_REG));
}
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
- if (!trans_pcie->txq) {
- IWL_WARN(trans,
- "Stopping tx queues that aren't allocated...\n");
+ /*
+ * This function can be called before the op_mode disabled the
+ * queues. This happens when we have an rfkill interrupt.
+ * Since we stop Tx altogether - mark the queues as stopped.
+ */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* This can happen: start_hw, stop_device */
+ if (!trans_pcie->txq)
return 0;
- }
/* Unmap DMA from host system and free skb's */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -867,7 +872,6 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
int txq_id, slots_num;
- unsigned long flags;
bool alloc = false;
if (!trans_pcie->txq) {
@@ -877,7 +881,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
alloc = true;
}
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
/* Turn off all Tx DMA fifos */
iwl_write_prph(trans, SCD_TXFACT, 0);
@@ -886,7 +890,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
trans_pcie->kw.dma >> 4);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -1005,6 +1009,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
+ unsigned long flags;
int nfreed = 0;
lockdep_assert_held(&txq->lock);
@@ -1023,10 +1028,20 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
idx, q->write_ptr, q->read_ptr);
- iwl_nic_error(trans);
+ iwl_trans_fw_error(trans);
}
}
+ if (q->read_ptr == q->write_ptr) {
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ WARN_ON(!trans_pcie->cmd_in_flight);
+ trans_pcie->cmd_in_flight = false;
+ __iwl_trans_pcie_clear_bit(trans,
+ CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ }
+
iwl_pcie_txq_progress(trans_pcie, txq);
}
@@ -1143,8 +1158,15 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
- WARN_ONCE(1, "queue %d not used", txq_id);
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", txq_id);
return;
}
@@ -1178,12 +1200,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
+ unsigned long flags;
void *dup_buf = NULL;
dma_addr_t phys_addr;
int idx;
u16 copy_size, cmd_size, scratch_size;
bool had_nocopy = false;
- int i;
+ int i, ret;
u32 cmd_pos;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
@@ -1381,10 +1404,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+
+ /*
+ * wake up the NIC to make sure that the firmware will see the host
+ * command - we will let the NIC sleep once all the host commands
+ * returned.
+ */
+ if (!trans_pcie->cmd_in_flight) {
+ trans_pcie->cmd_in_flight = true;
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
+ 15000);
+ if (ret < 0) {
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ trans_pcie->cmd_in_flight = false;
+ idx = -EIO;
+ goto out;
+ }
+ }
+
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
out:
spin_unlock_bh(&txq->lock);
free_dup_buf:
@@ -1449,12 +1500,12 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
iwl_pcie_cmdq_reclaim(trans, txq_id, index);
if (!(meta->flags & CMD_ASYNC)) {
- if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
+ if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
IWL_WARN(trans,
"HCMD_ACTIVE already clear for command %s\n",
get_cmd_string(trans_pcie, cmd->hdr.cmd));
}
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(trans_pcie, cmd->hdr.cmd));
wake_up(&trans_pcie->wait_command_queue);
@@ -1466,7 +1517,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-#define COMMAND_POKE_TIMEOUT (HZ / 10)
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
@@ -1494,13 +1544,12 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cmd_idx;
int ret;
- int timeout = HOST_COMPLETE_TIMEOUT;
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(trans_pcie, cmd->id));
- if (WARN(test_and_set_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status),
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
"Command %s: a command is already active!\n",
get_cmd_string(trans_pcie, cmd->id)))
return -EIO;
@@ -1511,64 +1560,39 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(trans_pcie, cmd->id), ret);
return ret;
}
- while (timeout > 0) {
- unsigned long flags;
-
- timeout -= COMMAND_POKE_TIMEOUT;
- ret = wait_event_timeout(trans_pcie->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status),
- COMMAND_POKE_TIMEOUT);
- if (ret)
- break;
- /* poke the device - it may have lost the command */
- if (iwl_trans_grab_nic_access(trans, true, &flags)) {
- iwl_trans_release_nic_access(trans, &flags);
- IWL_DEBUG_INFO(trans,
- "Tried to wake NIC for command %s\n",
- get_cmd_string(trans_pcie, cmd->id));
- } else {
- IWL_ERR(trans, "Failed to poke NIC for command %s\n",
- get_cmd_string(trans_pcie, cmd->id));
- break;
- }
- }
-
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ HOST_COMPLETE_TIMEOUT);
if (!ret) {
- if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
- struct iwl_txq *txq =
- &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_queue *q = &txq->q;
- IWL_ERR(trans,
- "Error sending %s: time out after %dms.\n",
- get_cmd_string(trans_pcie, cmd->id),
- jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ get_cmd_string(trans_pcie, cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
- IWL_ERR(trans,
- "Current CMD queue read_ptr %d write_ptr %d\n",
- q->read_ptr, q->write_ptr);
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ q->read_ptr, q->write_ptr);
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
- IWL_DEBUG_INFO(trans,
- "Clearing HCMD_ACTIVE for command %s\n",
- get_cmd_string(trans_pcie, cmd->id));
- ret = -ETIMEDOUT;
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ ret = -ETIMEDOUT;
- iwl_nic_error(trans);
+ iwl_trans_fw_error(trans);
- goto cancel;
- }
+ goto cancel;
}
- if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
IWL_ERR(trans, "FW error in SYNC CMD %s\n",
get_cmd_string(trans_pcie, cmd->id));
dump_stack();
@@ -1577,7 +1601,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+ test_bit(STATUS_RFKILL, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
ret = -ERFKILL;
goto cancel;
@@ -1614,13 +1638,8 @@ cancel:
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
- return -EIO;
-
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+ test_bit(STATUS_RFKILL, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
cmd->id);
return -ERFKILL;
@@ -1674,7 +1693,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq->entries[q->write_ptr].skb = skb;
txq->entries[q->write_ptr].cmd = dev_cmd;
- dev_cmd->hdr.cmd = REPLY_TX;
dev_cmd->hdr.sequence =
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(q->write_ptr)));
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index 91f2ca90c70f..1a554a685e91 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -8,9 +8,8 @@
Ltd. under the terms of the GNU General Public License Version 2, June 1991
(the "License"). You may use, redistribute and/or modify this File in
accordance with the terms and conditions of the License, a copy of which
- is available along with the File in the license.txt file or by writing to
- the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 or on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+ is available along with the File in the license.txt file or on the worldwide
+ web at http://www.gnu.org/licenses/gpl.txt.
THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 116f4aba08d6..cb6d189bc3e6 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -621,7 +621,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
id = *pos++;
elen = *pos++;
left -= 2;
- if (elen > left || elen == 0) {
+ if (elen > left) {
lbs_deb_scan("scan response: invalid IE fmt\n");
goto done;
}
@@ -1268,14 +1268,9 @@ static struct cfg80211_scan_request *
_new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme)
{
struct cfg80211_scan_request *creq = NULL;
- int i, n_channels = 0;
+ int i, n_channels = ieee80211_get_num_supported_channels(wiphy);
enum ieee80211_band band;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
- if (wiphy->bands[band])
- n_channels += wiphy->bands[band]->n_channels;
- }
-
creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
n_channels * sizeof(void *),
GFP_ATOMIC);
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 178b222b3ce1..65f18f1e869c 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -248,7 +248,7 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
/* handle unexpected PS SLEEP event */
if (priv->psstate == PS_STATE_FULL_POWER) {
lbs_deb_cmd(
- "EVENT: in FULL POWER mode, ignoreing PS_SLEEP\n");
+ "EVENT: in FULL POWER mode, ignoring PS_SLEEP\n");
break;
}
priv->psstate = PS_STATE_PRE_SLEEP;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 991238afd1b6..58c6ee5de98f 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -849,7 +849,7 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
card->started = true;
/* Tell PM core that we don't need the card to be
* powered now */
- pm_runtime_put_noidle(&func->dev);
+ pm_runtime_put(&func->dev);
}
}
@@ -907,8 +907,8 @@ static int if_sdio_power_on(struct if_sdio_card *card)
sdio_release_host(func);
ret = if_sdio_prog_firmware(card);
if (ret) {
- sdio_disable_func(func);
- return ret;
+ sdio_claim_host(func);
+ goto disable;
}
return 0;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 83669151bb82..f11728a866ff 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -93,7 +93,6 @@ static void free_if_spi_card(struct if_spi_card *card)
list_del(&packet->list);
kfree(packet);
}
- spi_set_drvdata(card->spi, NULL);
kfree(card);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index a1b32ee9594a..69d4c3179d04 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -159,10 +159,15 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = {
.reg_rules = {
REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
REG_RULE(5725-10, 5850+10, 40, 0, 30,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+ NL80211_RRF_NO_IR),
}
};
+static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
+ &hwsim_world_regdom_custom_01,
+ &hwsim_world_regdom_custom_02,
+};
+
struct hwsim_vif_priv {
u32 magic;
u8 bssid[ETH_ALEN];
@@ -321,8 +326,52 @@ static const struct ieee80211_rate hwsim_rates[] = {
{ .bitrate = 540 }
};
+static const struct ieee80211_iface_limit hwsim_if_limits[] = {
+ { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
+};
+
+static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
+ { .max = 8, .types = BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb[] = {
+ {
+ .limits = hwsim_if_limits,
+ .n_limits = ARRAY_SIZE(hwsim_if_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+ },
+ {
+ .limits = hwsim_if_dfs_limits,
+ .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+ }
+};
+
static spinlock_t hwsim_radio_lock;
static struct list_head hwsim_radios;
+static int hwsim_radio_idx;
+
+static struct platform_driver mac80211_hwsim_driver = {
+ .driver = {
+ .name = "mac80211_hwsim",
+ .owner = THIS_MODULE,
+ },
+};
struct mac80211_hwsim_data {
struct list_head list;
@@ -332,8 +381,10 @@ struct mac80211_hwsim_data {
struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
+ struct ieee80211_iface_combination if_combination;
struct mac_address addresses[2];
+ int channels, idx;
struct ieee80211_channel *tmp_chan;
struct delayed_work roc_done;
@@ -353,7 +404,6 @@ struct mac80211_hwsim_data {
} ps;
bool ps_poll_pending;
struct dentry *debugfs;
- struct dentry *debugfs_ps;
struct sk_buff_head pending; /* packets pending */
/*
@@ -362,7 +412,6 @@ struct mac80211_hwsim_data {
* radio can be in more then one group.
*/
u64 group;
- struct dentry *debugfs_group;
int power_level;
@@ -403,21 +452,179 @@ static struct genl_family hwsim_genl_family = {
/* MAC80211_HWSIM netlink policy */
static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
- [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC,
- .len = 6*sizeof(u8) },
- [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC,
- .len = 6*sizeof(u8) },
+ [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
+ [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
[HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
[HWSIM_ATTR_FLAGS] = { .type = NLA_U32 },
[HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 },
[HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 },
[HWSIM_ATTR_TX_INFO] = { .type = NLA_UNSPEC,
- .len = IEEE80211_TX_MAX_RATES*sizeof(
- struct hwsim_tx_rate)},
+ .len = IEEE80211_TX_MAX_RATES *
+ sizeof(struct hwsim_tx_rate)},
[HWSIM_ATTR_COOKIE] = { .type = NLA_U64 },
+ [HWSIM_ATTR_CHANNELS] = { .type = NLA_U32 },
+ [HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 },
+ [HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 },
+ [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
+ [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
};
+static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct ieee80211_channel *chan);
+
+/* sysfs attributes */
+static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_data *data = dat;
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+ struct sk_buff *skb;
+ struct ieee80211_pspoll *pspoll;
+
+ if (!vp->assoc)
+ return;
+
+ wiphy_debug(data->hw->wiphy,
+ "%s: send PS-Poll to %pM for aid %d\n",
+ __func__, vp->bssid, vp->aid);
+
+ skb = dev_alloc_skb(sizeof(*pspoll));
+ if (!skb)
+ return;
+ pspoll = (void *) skb_put(skb, sizeof(*pspoll));
+ pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
+ IEEE80211_STYPE_PSPOLL |
+ IEEE80211_FCTL_PM);
+ pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
+ memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
+ memcpy(pspoll->ta, mac, ETH_ALEN);
+
+ rcu_read_lock();
+ mac80211_hwsim_tx_frame(data->hw, skb,
+ rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_read_unlock();
+}
+
+static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
+ struct ieee80211_vif *vif, int ps)
+{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+
+ if (!vp->assoc)
+ return;
+
+ wiphy_debug(data->hw->wiphy,
+ "%s: send data::nullfunc to %pM ps=%d\n",
+ __func__, vp->bssid, ps);
+
+ skb = dev_alloc_skb(sizeof(*hdr));
+ if (!skb)
+ return;
+ hdr = (void *) skb_put(skb, sizeof(*hdr) - ETH_ALEN);
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ (ps ? IEEE80211_FCTL_PM : 0));
+ hdr->duration_id = cpu_to_le16(0);
+ memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
+ memcpy(hdr->addr2, mac, ETH_ALEN);
+ memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
+
+ rcu_read_lock();
+ mac80211_hwsim_tx_frame(data->hw, skb,
+ rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_read_unlock();
+}
+
+
+static void hwsim_send_nullfunc_ps(void *dat, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_data *data = dat;
+ hwsim_send_nullfunc(data, mac, vif, 1);
+}
+
+static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_data *data = dat;
+ hwsim_send_nullfunc(data, mac, vif, 0);
+}
+
+static int hwsim_fops_ps_read(void *dat, u64 *val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ *val = data->ps;
+ return 0;
+}
+
+static int hwsim_fops_ps_write(void *dat, u64 val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ enum ps_mode old_ps;
+
+ if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL &&
+ val != PS_MANUAL_POLL)
+ return -EINVAL;
+
+ old_ps = data->ps;
+ data->ps = val;
+
+ if (val == PS_MANUAL_POLL) {
+ ieee80211_iterate_active_interfaces(data->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ hwsim_send_ps_poll, data);
+ data->ps_poll_pending = true;
+ } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
+ ieee80211_iterate_active_interfaces(data->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ hwsim_send_nullfunc_ps,
+ data);
+ } else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
+ ieee80211_iterate_active_interfaces(data->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ hwsim_send_nullfunc_no_ps,
+ data);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
+ "%llu\n");
+
+static int hwsim_write_simulate_radar(void *dat, u64 val)
+{
+ struct mac80211_hwsim_data *data = dat;
+
+ ieee80211_radar_detected(data->hw);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwsim_simulate_radar, NULL,
+ hwsim_write_simulate_radar, "%llu\n");
+
+static int hwsim_fops_group_read(void *dat, u64 *val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ *val = data->group;
+ return 0;
+}
+
+static int hwsim_fops_group_write(void *dat, u64 val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ data->group = val;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
+ hwsim_fops_group_read, hwsim_fops_group_write,
+ "%llx\n");
+
static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -641,7 +848,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
}
if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
- sizeof(struct mac_address), data->addresses[1].addr))
+ ETH_ALEN, data->addresses[1].addr))
goto nla_put_failure;
/* We get the skb->data */
@@ -880,7 +1087,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return;
}
- if (channels == 1) {
+ if (data->channels == 1) {
channel = data->channel;
} else if (txi->hw_queue == 4) {
channel = data->tmp_chan;
@@ -908,7 +1115,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
if (control->sta)
hwsim_check_sta_magic(control->sta);
- if (rctbl)
+ if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
ieee80211_get_tx_rates(txi->control.vif, control->sta, skb,
txi->control.rates,
ARRAY_SIZE(txi->control.rates));
@@ -1015,7 +1222,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
{
u32 _pid = ACCESS_ONCE(wmediumd_portid);
- if (rctbl) {
+ if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
ieee80211_get_tx_rates(txi->control.vif, NULL, skb,
txi->control.rates,
@@ -1052,7 +1259,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
if (skb == NULL)
return;
info = IEEE80211_SKB_CB(skb);
- if (rctbl)
+ if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
ieee80211_get_tx_rates(vif, NULL, skb,
info->control.rates,
ARRAY_SIZE(info->control.rates));
@@ -1143,7 +1350,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
data->channel = conf->chandef.chan;
- WARN_ON(data->channel && channels > 1);
+ WARN_ON(data->channel && data->channels > 1);
data->power_level = conf->power_level;
if (!data->started || !data->beacon_int)
@@ -1390,8 +1597,6 @@ static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = {
[HWSIM_TM_ATTR_PS] = { .type = NLA_U32 },
};
-static int hwsim_fops_ps_write(void *dat, u64 val);
-
static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
void *data, int len)
@@ -1493,7 +1698,7 @@ static void hw_scan_work(struct work_struct *work)
req->channels[hwsim->scan_chan_idx]->center_freq);
hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx];
- if (hwsim->tmp_chan->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
+ if (hwsim->tmp_chan->flags & IEEE80211_CHAN_NO_IR ||
!req->n_ssids) {
dwell = 120;
} else {
@@ -1702,8 +1907,7 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
hwsim_check_chanctx_magic(ctx);
}
-static struct ieee80211_ops mac80211_hwsim_ops =
-{
+static const struct ieee80211_ops mac80211_hwsim_ops = {
.tx = mac80211_hwsim_tx,
.start = mac80211_hwsim_start,
.stop = mac80211_hwsim_stop,
@@ -1728,208 +1932,290 @@ static struct ieee80211_ops mac80211_hwsim_ops =
.set_tsf = mac80211_hwsim_set_tsf,
};
+static struct ieee80211_ops mac80211_hwsim_mchan_ops;
-static void mac80211_hwsim_free(void)
+static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
+ const struct ieee80211_regdomain *regd,
+ bool reg_strict)
{
- struct list_head tmplist, *i, *tmp;
- struct mac80211_hwsim_data *data, *tmpdata;
-
- INIT_LIST_HEAD(&tmplist);
+ int err;
+ u8 addr[ETH_ALEN];
+ struct mac80211_hwsim_data *data;
+ struct ieee80211_hw *hw;
+ enum ieee80211_band band;
+ const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
+ int idx;
spin_lock_bh(&hwsim_radio_lock);
- list_for_each_safe(i, tmp, &hwsim_radios)
- list_move(i, &tmplist);
+ idx = hwsim_radio_idx++;
spin_unlock_bh(&hwsim_radio_lock);
- list_for_each_entry_safe(data, tmpdata, &tmplist, list) {
- debugfs_remove(data->debugfs_group);
- debugfs_remove(data->debugfs_ps);
- debugfs_remove(data->debugfs);
- ieee80211_unregister_hw(data->hw);
- device_release_driver(data->dev);
- device_unregister(data->dev);
- ieee80211_free_hw(data->hw);
+ if (channels > 1)
+ ops = &mac80211_hwsim_mchan_ops;
+ hw = ieee80211_alloc_hw(sizeof(*data), ops);
+ if (!hw) {
+ printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw failed\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+ data = hw->priv;
+ data->hw = hw;
+
+ data->dev = device_create(hwsim_class, NULL, 0, hw, "hwsim%d", idx);
+ if (IS_ERR(data->dev)) {
+ printk(KERN_DEBUG
+ "mac80211_hwsim: device_create failed (%ld)\n",
+ PTR_ERR(data->dev));
+ err = -ENOMEM;
+ goto failed_drvdata;
+ }
+ data->dev->driver = &mac80211_hwsim_driver.driver;
+ err = device_bind_driver(data->dev);
+ if (err != 0) {
+ printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
+ err);
+ goto failed_hw;
}
- class_destroy(hwsim_class);
-}
-
-static struct platform_driver mac80211_hwsim_driver = {
- .driver = {
- .name = "mac80211_hwsim",
- .owner = THIS_MODULE,
- },
-};
-
-static const struct net_device_ops hwsim_netdev_ops = {
- .ndo_start_xmit = hwsim_mon_xmit,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static void hwsim_mon_setup(struct net_device *dev)
-{
- dev->netdev_ops = &hwsim_netdev_ops;
- dev->destructor = free_netdev;
- ether_setup(dev);
- dev->tx_queue_len = 0;
- dev->type = ARPHRD_IEEE80211_RADIOTAP;
- memset(dev->dev_addr, 0, ETH_ALEN);
- dev->dev_addr[0] = 0x12;
-}
+ skb_queue_head_init(&data->pending);
-static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
-{
- struct mac80211_hwsim_data *data = dat;
- struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
- struct sk_buff *skb;
- struct ieee80211_pspoll *pspoll;
+ SET_IEEE80211_DEV(hw, data->dev);
+ memset(addr, 0, ETH_ALEN);
+ addr[0] = 0x02;
+ addr[3] = idx >> 8;
+ addr[4] = idx;
+ memcpy(data->addresses[0].addr, addr, ETH_ALEN);
+ memcpy(data->addresses[1].addr, addr, ETH_ALEN);
+ data->addresses[1].addr[0] |= 0x40;
+ hw->wiphy->n_addresses = 2;
+ hw->wiphy->addresses = data->addresses;
+
+ data->channels = channels;
+ data->idx = idx;
+
+ if (data->channels > 1) {
+ hw->wiphy->max_scan_ssids = 255;
+ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ hw->wiphy->max_remain_on_channel_duration = 1000;
+ /* For channels > 1 DFS is not allowed */
+ hw->wiphy->n_iface_combinations = 1;
+ hw->wiphy->iface_combinations = &data->if_combination;
+ data->if_combination = hwsim_if_comb[0];
+ data->if_combination.num_different_channels = data->channels;
+ } else {
+ hw->wiphy->iface_combinations = hwsim_if_comb;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
+ }
- if (!vp->assoc)
- return;
+ INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
+ INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
+
+ hw->queues = 5;
+ hw->offchannel_tx_hw_queue = 4;
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
+
+ hw->flags = IEEE80211_HW_MFP_CAPABLE |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_WANT_MONITOR_VIF |
+ IEEE80211_HW_QUEUE_CONTROL |
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ if (rctbl)
+ hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
+
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_AP_UAPSD;
+ hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+
+ /* ask mac80211 to reserve space for magic */
+ hw->vif_data_size = sizeof(struct hwsim_vif_priv);
+ hw->sta_data_size = sizeof(struct hwsim_sta_priv);
+ hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv);
+
+ memcpy(data->channels_2ghz, hwsim_channels_2ghz,
+ sizeof(hwsim_channels_2ghz));
+ memcpy(data->channels_5ghz, hwsim_channels_5ghz,
+ sizeof(hwsim_channels_5ghz));
+ memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
+
+ for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ struct ieee80211_supported_band *sband = &data->bands[band];
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ sband->channels = data->channels_2ghz;
+ sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz);
+ sband->bitrates = data->rates;
+ sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
+ break;
+ case IEEE80211_BAND_5GHZ:
+ sband->channels = data->channels_5ghz;
+ sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
+ sband->bitrates = data->rates + 4;
+ sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
+ break;
+ default:
+ continue;
+ }
- wiphy_debug(data->hw->wiphy,
- "%s: send PS-Poll to %pM for aid %d\n",
- __func__, vp->bssid, vp->aid);
+ sband->ht_cap.ht_supported = true;
+ sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+ sband->ht_cap.ampdu_factor = 0x3;
+ sband->ht_cap.ampdu_density = 0x6;
+ memset(&sband->ht_cap.mcs, 0,
+ sizeof(sband->ht_cap.mcs));
+ sband->ht_cap.mcs.rx_mask[0] = 0xff;
+ sband->ht_cap.mcs.rx_mask[1] = 0xff;
+ sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ hw->wiphy->bands[band] = sband;
+
+ sband->vht_cap.vht_supported = true;
+ sband->vht_cap.cap =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_TXSTBC |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_RXSTBC_2 |
+ IEEE80211_VHT_CAP_RXSTBC_3 |
+ IEEE80211_VHT_CAP_RXSTBC_4 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ sband->vht_cap.vht_mcs.rx_mcs_map =
+ cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
+ sband->vht_cap.vht_mcs.tx_mcs_map =
+ sband->vht_cap.vht_mcs.rx_mcs_map;
+ }
- skb = dev_alloc_skb(sizeof(*pspoll));
- if (!skb)
- return;
- pspoll = (void *) skb_put(skb, sizeof(*pspoll));
- pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
- IEEE80211_STYPE_PSPOLL |
- IEEE80211_FCTL_PM);
- pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
- memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
- memcpy(pspoll->ta, mac, ETH_ALEN);
+ /* By default all radios belong to the first group */
+ data->group = 1;
+ mutex_init(&data->mutex);
- rcu_read_lock();
- mac80211_hwsim_tx_frame(data->hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
- rcu_read_unlock();
-}
+ /* Enable frame retransmissions for lossy channels */
+ hw->max_rates = 4;
+ hw->max_rate_tries = 11;
-static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
- struct ieee80211_vif *vif, int ps)
-{
- struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
+ if (reg_strict)
+ hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
+ if (regd) {
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+ wiphy_apply_custom_regulatory(hw->wiphy, regd);
+ /* give the regulatory workqueue a chance to run */
+ schedule_timeout_interruptible(1);
+ }
- if (!vp->assoc)
- return;
+ err = ieee80211_register_hw(hw);
+ if (err < 0) {
+ printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
+ err);
+ goto failed_hw;
+ }
- wiphy_debug(data->hw->wiphy,
- "%s: send data::nullfunc to %pM ps=%d\n",
- __func__, vp->bssid, ps);
+ wiphy_debug(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr);
- skb = dev_alloc_skb(sizeof(*hdr));
- if (!skb)
- return;
- hdr = (void *) skb_put(skb, sizeof(*hdr) - ETH_ALEN);
- hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC |
- (ps ? IEEE80211_FCTL_PM : 0));
- hdr->duration_id = cpu_to_le16(0);
- memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
- memcpy(hdr->addr2, mac, ETH_ALEN);
- memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
+ if (reg_alpha2)
+ regulatory_hint(hw->wiphy, reg_alpha2);
- rcu_read_lock();
- mac80211_hwsim_tx_frame(data->hw, skb,
- rcu_dereference(vif->chanctx_conf)->def.chan);
- rcu_read_unlock();
-}
+ data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir);
+ debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
+ debugfs_create_file("group", 0666, data->debugfs, data,
+ &hwsim_fops_group);
+ if (data->channels == 1)
+ debugfs_create_file("dfs_simulate_radar", 0222,
+ data->debugfs,
+ data, &hwsim_simulate_radar);
+ tasklet_hrtimer_init(&data->beacon_timer,
+ mac80211_hwsim_beacon,
+ CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
-static void hwsim_send_nullfunc_ps(void *dat, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct mac80211_hwsim_data *data = dat;
- hwsim_send_nullfunc(data, mac, vif, 1);
-}
+ spin_lock_bh(&hwsim_radio_lock);
+ list_add_tail(&data->list, &hwsim_radios);
+ spin_unlock_bh(&hwsim_radio_lock);
+ return idx;
-static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct mac80211_hwsim_data *data = dat;
- hwsim_send_nullfunc(data, mac, vif, 0);
+failed_hw:
+ device_unregister(data->dev);
+failed_drvdata:
+ ieee80211_free_hw(hw);
+failed:
+ return err;
}
-
-static int hwsim_fops_ps_read(void *dat, u64 *val)
+static void mac80211_hwsim_destroy_radio(struct mac80211_hwsim_data *data)
{
- struct mac80211_hwsim_data *data = dat;
- *val = data->ps;
- return 0;
+ debugfs_remove_recursive(data->debugfs);
+ ieee80211_unregister_hw(data->hw);
+ device_release_driver(data->dev);
+ device_unregister(data->dev);
+ ieee80211_free_hw(data->hw);
}
-static int hwsim_fops_ps_write(void *dat, u64 val)
+static void mac80211_hwsim_free(void)
{
- struct mac80211_hwsim_data *data = dat;
- enum ps_mode old_ps;
-
- if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL &&
- val != PS_MANUAL_POLL)
- return -EINVAL;
-
- old_ps = data->ps;
- data->ps = val;
+ struct mac80211_hwsim_data *data;
- if (val == PS_MANUAL_POLL) {
- ieee80211_iterate_active_interfaces(data->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- hwsim_send_ps_poll, data);
- data->ps_poll_pending = true;
- } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
- ieee80211_iterate_active_interfaces(data->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- hwsim_send_nullfunc_ps,
- data);
- } else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
- ieee80211_iterate_active_interfaces(data->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- hwsim_send_nullfunc_no_ps,
- data);
+ spin_lock_bh(&hwsim_radio_lock);
+ while ((data = list_first_entry_or_null(&hwsim_radios,
+ struct mac80211_hwsim_data,
+ list))) {
+ list_del(&data->list);
+ spin_unlock_bh(&hwsim_radio_lock);
+ mac80211_hwsim_destroy_radio(data);
+ spin_lock_bh(&hwsim_radio_lock);
}
-
- return 0;
+ spin_unlock_bh(&hwsim_radio_lock);
+ class_destroy(hwsim_class);
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
- "%llu\n");
-
-
-static int hwsim_fops_group_read(void *dat, u64 *val)
-{
- struct mac80211_hwsim_data *data = dat;
- *val = data->group;
- return 0;
-}
+static const struct net_device_ops hwsim_netdev_ops = {
+ .ndo_start_xmit = hwsim_mon_xmit,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
-static int hwsim_fops_group_write(void *dat, u64 val)
+static void hwsim_mon_setup(struct net_device *dev)
{
- struct mac80211_hwsim_data *data = dat;
- data->group = val;
- return 0;
+ dev->netdev_ops = &hwsim_netdev_ops;
+ dev->destructor = free_netdev;
+ ether_setup(dev);
+ dev->tx_queue_len = 0;
+ dev->type = ARPHRD_IEEE80211_RADIOTAP;
+ memset(dev->dev_addr, 0, ETH_ALEN);
+ dev->dev_addr[0] = 0x12;
}
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
- hwsim_fops_group_read, hwsim_fops_group_write,
- "%llx\n");
-
-static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(
- struct mac_address *addr)
+static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
{
struct mac80211_hwsim_data *data;
bool _found = false;
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry(data, &hwsim_radios, list) {
- if (memcmp(data->addresses[1].addr, addr,
- sizeof(struct mac_address)) == 0) {
+ if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
_found = true;
break;
}
@@ -1952,27 +2238,26 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
struct hwsim_tx_rate *tx_attempts;
unsigned long ret_skb_ptr;
struct sk_buff *skb, *tmp;
- struct mac_address *src;
+ const u8 *src;
unsigned int hwsim_flags;
-
int i;
bool found = false;
+ if (info->snd_portid != wmediumd_portid)
+ return -EINVAL;
+
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
- !info->attrs[HWSIM_ATTR_FLAGS] ||
- !info->attrs[HWSIM_ATTR_COOKIE] ||
- !info->attrs[HWSIM_ATTR_TX_INFO])
+ !info->attrs[HWSIM_ATTR_FLAGS] ||
+ !info->attrs[HWSIM_ATTR_COOKIE] ||
+ !info->attrs[HWSIM_ATTR_TX_INFO])
goto out;
- src = (struct mac_address *)nla_data(
- info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
+ src = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]);
-
ret_skb_ptr = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
data2 = get_hwsim_data_ref_from_addr(src);
-
- if (data2 == NULL)
+ if (!data2)
goto out;
/* look for the skb matching the cookie passed back from user */
@@ -2029,38 +2314,37 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
struct mac80211_hwsim_data *data2;
struct ieee80211_rx_status rx_status;
- struct mac_address *dst;
+ const u8 *dst;
int frame_data_len;
- char *frame_data;
+ void *frame_data;
struct sk_buff *skb = NULL;
+ if (info->snd_portid != wmediumd_portid)
+ return -EINVAL;
+
if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
!info->attrs[HWSIM_ATTR_FRAME] ||
!info->attrs[HWSIM_ATTR_RX_RATE] ||
!info->attrs[HWSIM_ATTR_SIGNAL])
goto out;
- dst = (struct mac_address *)nla_data(
- info->attrs[HWSIM_ATTR_ADDR_RECEIVER]);
-
+ dst = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_RECEIVER]);
frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]);
- frame_data = (char *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
+ frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
/* Allocate new skb here */
skb = alloc_skb(frame_data_len, GFP_KERNEL);
if (skb == NULL)
goto err;
- if (frame_data_len <= IEEE80211_MAX_DATA_LEN) {
- /* Copy the data */
- memcpy(skb_put(skb, frame_data_len), frame_data,
- frame_data_len);
- } else
+ if (frame_data_len > IEEE80211_MAX_DATA_LEN)
goto err;
- data2 = get_hwsim_data_ref_from_addr(dst);
+ /* Copy the data */
+ memcpy(skb_put(skb, frame_data_len), frame_data, frame_data_len);
- if (data2 == NULL)
+ data2 = get_hwsim_data_ref_from_addr(dst);
+ if (!data2)
goto out;
/* check if radio is configured properly */
@@ -2068,7 +2352,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
if (data2->idle || !data2->started)
goto out;
- /*A frame is received from user space*/
+ /* A frame is received from user space */
memset(&rx_status, 0, sizeof(rx_status));
rx_status.freq = data2->channel->center_freq;
rx_status.band = data2->channel->band;
@@ -2090,8 +2374,24 @@ out:
static int hwsim_register_received_nl(struct sk_buff *skb_2,
struct genl_info *info)
{
- if (info == NULL)
- goto out;
+ struct mac80211_hwsim_data *data;
+ int chans = 1;
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry(data, &hwsim_radios, list)
+ chans = max(chans, data->channels);
+ spin_unlock_bh(&hwsim_radio_lock);
+
+ /* In the future we should revise the userspace API and allow it
+ * to set a flag that it does support multi-channel, then we can
+ * let this pass conditionally on the flag.
+ * For current userspace, prohibit it since it won't work right.
+ */
+ if (chans > 1)
+ return -EOPNOTSUPP;
+
+ if (wmediumd_portid)
+ return -EBUSY;
wmediumd_portid = info->snd_portid;
@@ -2099,9 +2399,53 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
"switching to wmediumd mode with pid %d\n", info->snd_portid);
return 0;
-out:
- printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
- return -EINVAL;
+}
+
+static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ unsigned int chans = channels;
+ const char *alpha2 = NULL;
+ const struct ieee80211_regdomain *regd = NULL;
+ bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+
+ if (info->attrs[HWSIM_ATTR_CHANNELS])
+ chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+
+ if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
+ alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
+
+ if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
+ u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
+
+ if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
+ return -EINVAL;
+ regd = hwsim_world_regdom_custom[idx];
+ }
+
+ return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict);
+}
+
+static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct mac80211_hwsim_data *data;
+ int idx;
+
+ if (!info->attrs[HWSIM_ATTR_RADIO_ID])
+ return -EINVAL;
+ idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry(data, &hwsim_radios, list) {
+ if (data->idx != idx)
+ continue;
+ list_del(&data->list);
+ spin_unlock_bh(&hwsim_radio_lock);
+ mac80211_hwsim_destroy_radio(data);
+ return 0;
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+
+ return -ENODEV;
}
/* Generic Netlink operations array */
@@ -2122,6 +2466,18 @@ static const struct genl_ops hwsim_ops[] = {
.policy = hwsim_genl_policy,
.doit = hwsim_tx_info_frame_received_nl,
},
+ {
+ .cmd = HWSIM_CMD_CREATE_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_create_radio_nl,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = HWSIM_CMD_DESTROY_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_destroy_radio_nl,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -2150,10 +2506,6 @@ static int hwsim_init_netlink(void)
{
int rc;
- /* userspace test API hasn't been adjusted for multi-channel */
- if (channels > 1)
- return 0;
-
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops);
@@ -2173,77 +2525,36 @@ failure:
static void hwsim_exit_netlink(void)
{
- int ret;
-
- /* userspace test API hasn't been adjusted for multi-channel */
- if (channels > 1)
- return;
-
- printk(KERN_INFO "mac80211_hwsim: closing netlink\n");
/* unregister the notifier */
netlink_unregister_notifier(&hwsim_netlink_notifier);
/* unregister the family */
- ret = genl_unregister_family(&hwsim_genl_family);
- if (ret)
- printk(KERN_DEBUG "mac80211_hwsim: "
- "unregister family %i\n", ret);
+ genl_unregister_family(&hwsim_genl_family);
}
-static const struct ieee80211_iface_limit hwsim_if_limits[] = {
- { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
- { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO) },
- { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
-};
-
-static struct ieee80211_iface_combination hwsim_if_comb = {
- .limits = hwsim_if_limits,
- .n_limits = ARRAY_SIZE(hwsim_if_limits),
- .max_interfaces = 2048,
- .num_different_channels = 1,
-};
-
static int __init init_mac80211_hwsim(void)
{
- int i, err = 0;
- u8 addr[ETH_ALEN];
- struct mac80211_hwsim_data *data;
- struct ieee80211_hw *hw;
- enum ieee80211_band band;
+ int i, err;
- if (radios < 1 || radios > 100)
+ if (radios < 0 || radios > 100)
return -EINVAL;
if (channels < 1)
return -EINVAL;
- if (channels > 1) {
- hwsim_if_comb.num_different_channels = channels;
- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
- mac80211_hwsim_ops.cancel_hw_scan =
- mac80211_hwsim_cancel_hw_scan;
- mac80211_hwsim_ops.sw_scan_start = NULL;
- mac80211_hwsim_ops.sw_scan_complete = NULL;
- mac80211_hwsim_ops.remain_on_channel =
- mac80211_hwsim_roc;
- mac80211_hwsim_ops.cancel_remain_on_channel =
- mac80211_hwsim_croc;
- mac80211_hwsim_ops.add_chanctx =
- mac80211_hwsim_add_chanctx;
- mac80211_hwsim_ops.remove_chanctx =
- mac80211_hwsim_remove_chanctx;
- mac80211_hwsim_ops.change_chanctx =
- mac80211_hwsim_change_chanctx;
- mac80211_hwsim_ops.assign_vif_chanctx =
- mac80211_hwsim_assign_vif_chanctx;
- mac80211_hwsim_ops.unassign_vif_chanctx =
- mac80211_hwsim_unassign_vif_chanctx;
- }
+ mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
+ mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
+ mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
+ mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
+ mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
+ mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
+ mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
+ mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
+ mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
+ mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
+ mac80211_hwsim_mchan_ops.assign_vif_chanctx =
+ mac80211_hwsim_assign_vif_chanctx;
+ mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
+ mac80211_hwsim_unassign_vif_chanctx;
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
@@ -2255,348 +2566,116 @@ static int __init init_mac80211_hwsim(void)
hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
if (IS_ERR(hwsim_class)) {
err = PTR_ERR(hwsim_class);
- goto failed_unregister_driver;
+ goto out_unregister_driver;
}
- memset(addr, 0, ETH_ALEN);
- addr[0] = 0x02;
-
for (i = 0; i < radios; i++) {
- printk(KERN_DEBUG "mac80211_hwsim: Initializing radio %d\n",
- i);
- hw = ieee80211_alloc_hw(sizeof(*data), &mac80211_hwsim_ops);
- if (!hw) {
- printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw "
- "failed\n");
- err = -ENOMEM;
- goto failed;
- }
- data = hw->priv;
- data->hw = hw;
-
- data->dev = device_create(hwsim_class, NULL, 0, hw,
- "hwsim%d", i);
- if (IS_ERR(data->dev)) {
- printk(KERN_DEBUG
- "mac80211_hwsim: device_create failed (%ld)\n",
- PTR_ERR(data->dev));
- err = -ENOMEM;
- goto failed_drvdata;
- }
- data->dev->driver = &mac80211_hwsim_driver.driver;
- err = device_bind_driver(data->dev);
- if (err != 0) {
- printk(KERN_DEBUG
- "mac80211_hwsim: device_bind_driver failed (%d)\n",
- err);
- goto failed_hw;
- }
-
- skb_queue_head_init(&data->pending);
+ const char *reg_alpha2 = NULL;
+ const struct ieee80211_regdomain *regd = NULL;
+ bool reg_strict = false;
- SET_IEEE80211_DEV(hw, data->dev);
- addr[3] = i >> 8;
- addr[4] = i;
- memcpy(data->addresses[0].addr, addr, ETH_ALEN);
- memcpy(data->addresses[1].addr, addr, ETH_ALEN);
- data->addresses[1].addr[0] |= 0x40;
- hw->wiphy->n_addresses = 2;
- hw->wiphy->addresses = data->addresses;
-
- hw->wiphy->iface_combinations = &hwsim_if_comb;
- hw->wiphy->n_iface_combinations = 1;
-
- if (channels > 1) {
- hw->wiphy->max_scan_ssids = 255;
- hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
- hw->wiphy->max_remain_on_channel_duration = 1000;
- }
-
- INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
- INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
-
- hw->channel_change_time = 1;
- hw->queues = 5;
- hw->offchannel_tx_hw_queue = 4;
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
-
- hw->flags = IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_STATIC_SMPS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_QUEUE_CONTROL;
- if (rctbl)
- hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
-
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_AP_UAPSD;
- hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
-
- /* ask mac80211 to reserve space for magic */
- hw->vif_data_size = sizeof(struct hwsim_vif_priv);
- hw->sta_data_size = sizeof(struct hwsim_sta_priv);
- hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv);
-
- memcpy(data->channels_2ghz, hwsim_channels_2ghz,
- sizeof(hwsim_channels_2ghz));
- memcpy(data->channels_5ghz, hwsim_channels_5ghz,
- sizeof(hwsim_channels_5ghz));
- memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
-
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
- struct ieee80211_supported_band *sband = &data->bands[band];
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- sband->channels = data->channels_2ghz;
- sband->n_channels =
- ARRAY_SIZE(hwsim_channels_2ghz);
- sband->bitrates = data->rates;
- sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
- break;
- case IEEE80211_BAND_5GHZ:
- sband->channels = data->channels_5ghz;
- sband->n_channels =
- ARRAY_SIZE(hwsim_channels_5ghz);
- sband->bitrates = data->rates + 4;
- sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
- break;
- default:
- continue;
- }
-
- sband->ht_cap.ht_supported = true;
- sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
- sband->ht_cap.ampdu_factor = 0x3;
- sband->ht_cap.ampdu_density = 0x6;
- memset(&sband->ht_cap.mcs, 0,
- sizeof(sband->ht_cap.mcs));
- sband->ht_cap.mcs.rx_mask[0] = 0xff;
- sband->ht_cap.mcs.rx_mask[1] = 0xff;
- sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
-
- hw->wiphy->bands[band] = sband;
-
- sband->vht_cap.vht_supported = true;
- sband->vht_cap.cap =
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
- IEEE80211_VHT_CAP_RXLDPC |
- IEEE80211_VHT_CAP_SHORT_GI_80 |
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_TXSTBC |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
- IEEE80211_VHT_CAP_RXSTBC_4 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
- sband->vht_cap.vht_mcs.rx_mcs_map =
- cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
- sband->vht_cap.vht_mcs.tx_mcs_map =
- sband->vht_cap.vht_mcs.rx_mcs_map;
- }
- /* By default all radios are belonging to the first group */
- data->group = 1;
- mutex_init(&data->mutex);
-
- /* Enable frame retransmissions for lossy channels */
- hw->max_rates = 4;
- hw->max_rate_tries = 11;
-
- /* Work to be done prior to ieee80211_register_hw() */
switch (regtest) {
- case HWSIM_REGTEST_DISABLED:
- case HWSIM_REGTEST_DRIVER_REG_FOLLOW:
- case HWSIM_REGTEST_DRIVER_REG_ALL:
case HWSIM_REGTEST_DIFF_COUNTRY:
- /*
- * Nothing to be done for driver regulatory domain
- * hints prior to ieee80211_register_hw()
- */
- break;
- case HWSIM_REGTEST_WORLD_ROAM:
- if (i == 0) {
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_01);
- }
- break;
- case HWSIM_REGTEST_CUSTOM_WORLD:
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_01);
- break;
- case HWSIM_REGTEST_CUSTOM_WORLD_2:
- if (i == 0) {
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_01);
- } else if (i == 1) {
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_02);
- }
- break;
- case HWSIM_REGTEST_STRICT_ALL:
- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
- break;
- case HWSIM_REGTEST_STRICT_FOLLOW:
- case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
- if (i == 0)
- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
- break;
- case HWSIM_REGTEST_ALL:
- if (i == 0) {
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_01);
- } else if (i == 1) {
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy_apply_custom_regulatory(hw->wiphy,
- &hwsim_world_regdom_custom_02);
- } else if (i == 4)
- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
- break;
- default:
- break;
- }
-
- /* give the regulatory workqueue a chance to run */
- if (regtest)
- schedule_timeout_interruptible(1);
- err = ieee80211_register_hw(hw);
- if (err < 0) {
- printk(KERN_DEBUG "mac80211_hwsim: "
- "ieee80211_register_hw failed (%d)\n", err);
- goto failed_hw;
- }
-
- /* Work to be done after to ieee80211_register_hw() */
- switch (regtest) {
- case HWSIM_REGTEST_WORLD_ROAM:
- case HWSIM_REGTEST_DISABLED:
+ if (i < ARRAY_SIZE(hwsim_alpha2s))
+ reg_alpha2 = hwsim_alpha2s[i];
break;
case HWSIM_REGTEST_DRIVER_REG_FOLLOW:
if (!i)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
+ reg_alpha2 = hwsim_alpha2s[0];
break;
- case HWSIM_REGTEST_DRIVER_REG_ALL:
case HWSIM_REGTEST_STRICT_ALL:
- regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
+ reg_strict = true;
+ case HWSIM_REGTEST_DRIVER_REG_ALL:
+ reg_alpha2 = hwsim_alpha2s[0];
break;
- case HWSIM_REGTEST_DIFF_COUNTRY:
- if (i < ARRAY_SIZE(hwsim_alpha2s))
- regulatory_hint(hw->wiphy, hwsim_alpha2s[i]);
+ case HWSIM_REGTEST_WORLD_ROAM:
+ if (i == 0)
+ regd = &hwsim_world_regdom_custom_01;
break;
case HWSIM_REGTEST_CUSTOM_WORLD:
+ regd = &hwsim_world_regdom_custom_01;
+ break;
case HWSIM_REGTEST_CUSTOM_WORLD_2:
- /*
- * Nothing to be done for custom world regulatory
- * domains after to ieee80211_register_hw
- */
+ if (i == 0)
+ regd = &hwsim_world_regdom_custom_01;
+ else if (i == 1)
+ regd = &hwsim_world_regdom_custom_02;
break;
case HWSIM_REGTEST_STRICT_FOLLOW:
- if (i == 0)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
+ if (i == 0) {
+ reg_strict = true;
+ reg_alpha2 = hwsim_alpha2s[0];
+ }
break;
case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
- if (i == 0)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
- else if (i == 1)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[1]);
+ if (i == 0) {
+ reg_strict = true;
+ reg_alpha2 = hwsim_alpha2s[0];
+ } else if (i == 1) {
+ reg_alpha2 = hwsim_alpha2s[1];
+ }
break;
case HWSIM_REGTEST_ALL:
- if (i == 2)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
- else if (i == 3)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[1]);
- else if (i == 4)
- regulatory_hint(hw->wiphy, hwsim_alpha2s[2]);
+ switch (i) {
+ case 0:
+ regd = &hwsim_world_regdom_custom_01;
+ break;
+ case 1:
+ regd = &hwsim_world_regdom_custom_02;
+ break;
+ case 2:
+ reg_alpha2 = hwsim_alpha2s[0];
+ break;
+ case 3:
+ reg_alpha2 = hwsim_alpha2s[1];
+ break;
+ case 4:
+ reg_strict = true;
+ reg_alpha2 = hwsim_alpha2s[2];
+ break;
+ }
break;
default:
break;
}
- wiphy_debug(hw->wiphy, "hwaddr %pm registered\n",
- hw->wiphy->perm_addr);
-
- data->debugfs = debugfs_create_dir("hwsim",
- hw->wiphy->debugfsdir);
- data->debugfs_ps = debugfs_create_file("ps", 0666,
- data->debugfs, data,
- &hwsim_fops_ps);
- data->debugfs_group = debugfs_create_file("group", 0666,
- data->debugfs, data,
- &hwsim_fops_group);
-
- tasklet_hrtimer_init(&data->beacon_timer,
- mac80211_hwsim_beacon,
- CLOCK_REALTIME, HRTIMER_MODE_ABS);
-
- list_add_tail(&data->list, &hwsim_radios);
+ err = mac80211_hwsim_create_radio(channels, reg_alpha2,
+ regd, reg_strict);
+ if (err < 0)
+ goto out_free_radios;
}
hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
if (hwsim_mon == NULL) {
err = -ENOMEM;
- goto failed;
+ goto out_free_radios;
}
rtnl_lock();
-
err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
- if (err < 0)
- goto failed_mon;
-
+ if (err < 0) {
+ rtnl_unlock();
+ goto out_free_radios;
+ }
err = register_netdevice(hwsim_mon);
- if (err < 0)
- goto failed_mon;
-
+ if (err < 0) {
+ rtnl_unlock();
+ goto out_free_mon;
+ }
rtnl_unlock();
err = hwsim_init_netlink();
if (err < 0)
- goto failed_nl;
+ goto out_free_mon;
return 0;
-failed_nl:
- printk(KERN_DEBUG "mac_80211_hwsim: failed initializing netlink\n");
- return err;
-
-failed_mon:
- rtnl_unlock();
+out_free_mon:
free_netdev(hwsim_mon);
+out_free_radios:
mac80211_hwsim_free();
- return err;
-
-failed_hw:
- device_unregister(data->dev);
-failed_drvdata:
- ieee80211_free_hw(hw);
-failed:
- mac80211_hwsim_free();
-failed_unregister_driver:
+out_unregister_driver:
platform_driver_unregister(&mac80211_hwsim_driver);
return err;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index afaad5a443b6..2747cce5a269 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -65,6 +65,9 @@ enum hwsim_tx_control_flags {
* kernel, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
* %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ * @HWSIM_CMD_CREATE_RADIO: create a new radio with the given parameters,
+ * returns the radio ID (>= 0) or negative on errors
+ * @HWSIM_CMD_DESTROY_RADIO: destroy a radio
* @__HWSIM_CMD_MAX: enum limit
*/
enum {
@@ -72,6 +75,8 @@ enum {
HWSIM_CMD_REGISTER,
HWSIM_CMD_FRAME,
HWSIM_CMD_TX_INFO_FRAME,
+ HWSIM_CMD_CREATE_RADIO,
+ HWSIM_CMD_DESTROY_RADIO,
__HWSIM_CMD_MAX,
};
#define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1)
@@ -94,6 +99,14 @@ enum {
space
* @HWSIM_ATTR_TX_INFO: ieee80211_tx_rate array
* @HWSIM_ATTR_COOKIE: sk_buff cookie to identify the frame
+ * @HWSIM_ATTR_CHANNELS: u32 attribute used with the %HWSIM_CMD_CREATE_RADIO
+ * command giving the number of channels supported by the new radio
+ * @HWSIM_ATTR_RADIO_ID: u32 attribute used with %HWSIM_CMD_DESTROY_RADIO
+ * only to destroy a radio
+ * @HWSIM_ATTR_REG_HINT_ALPHA2: alpha2 for regulatoro driver hint
+ * (nla string, length 2)
+ * @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute)
+ * @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute)
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -108,6 +121,11 @@ enum {
HWSIM_ATTR_SIGNAL,
HWSIM_ATTR_TX_INFO,
HWSIM_ATTR_COOKIE,
+ HWSIM_ATTR_CHANNELS,
+ HWSIM_ATTR_RADIO_ID,
+ HWSIM_ATTR_REG_HINT_ALPHA2,
+ HWSIM_ATTR_REG_CUSTOM_REG,
+ HWSIM_ATTR_REG_STRICT_REG,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index 5e0eec4d71c7..5d9a8084665d 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -189,8 +189,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
vht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_vht_cap));
memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header),
- (u8 *)bss_desc->bcn_vht_cap +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_vht_cap,
le16_to_cpu(vht_cap->header.len));
mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 0b803c05cab3..7db1a89fdd95 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -308,8 +308,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
ht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_ht_cap +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_ht_cap,
le16_to_cpu(ht_cap->header.len));
mwifiex_fill_cap_info(priv, radio_type, ht_cap);
@@ -483,7 +482,7 @@ mwifiex_get_ba_tbl(struct mwifiex_private *priv, int tid, u8 *ra)
spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
- if (!memcmp(tx_ba_tsr_tbl->ra, ra, ETH_ALEN) &&
+ if (ether_addr_equal_unaligned(tx_ba_tsr_tbl->ra, ra) &&
tx_ba_tsr_tbl->tid == tid) {
spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
flags);
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 1214c587fd08..63211707f939 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -69,9 +69,9 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset);
/* Copy SNAP header */
- snap.snap_type =
- le16_to_cpu(*(__le16 *) ((u8 *)skb_src->data + dt_offset));
- dt_offset += sizeof(u16);
+ snap.snap_type = ((struct ethhdr *)skb_src->data)->h_proto;
+
+ dt_offset += sizeof(__be16);
memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr));
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index f7ff4725506a..ecdf34505b54 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -31,12 +31,12 @@ config MWIFIEX_PCIE
mwifiex_pcie.
config MWIFIEX_USB
- tristate "Marvell WiFi-Ex Driver for USB8797"
+ tristate "Marvell WiFi-Ex Driver for USB8797/8897"
depends on MWIFIEX && USB
select FW_LOADER
---help---
This adds support for wireless adapters based on Marvell
- Avastar 88W8797 chipset with USB interface.
+ 8797/8897 chipset with USB interface.
If you choose to build it as a module, it will be called
mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index aeaea0e3b4c4..8bfc07cd330e 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -50,24 +50,24 @@ static const struct ieee80211_regdomain mwifiex_world_regdom_custom = {
REG_RULE(2412-10, 2462+10, 40, 3, 20, 0),
/* Channel 12 - 13 */
REG_RULE(2467-10, 2472+10, 20, 3, 20,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+ NL80211_RRF_NO_IR),
/* Channel 14 */
REG_RULE(2484-10, 2484+10, 20, 3, 20,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+ NL80211_RRF_NO_IR |
NL80211_RRF_NO_OFDM),
/* Channel 36 - 48 */
REG_RULE(5180-10, 5240+10, 40, 3, 20,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+ NL80211_RRF_NO_IR),
/* Channel 149 - 165 */
REG_RULE(5745-10, 5825+10, 40, 3, 20,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+ NL80211_RRF_NO_IR),
/* Channel 52 - 64 */
REG_RULE(5260-10, 5320+10, 40, 3, 30,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+ NL80211_RRF_NO_IR |
NL80211_RRF_DFS),
/* Channel 100 - 140 */
REG_RULE(5500-10, 5700+10, 40, 3, 30,
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+ NL80211_RRF_NO_IR |
NL80211_RRF_DFS),
}
};
@@ -184,10 +184,10 @@ mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
*/
static int
mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
- struct ieee80211_channel *chan, bool offchan,
- unsigned int wait, const u8 *buf, size_t len,
- bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
{
+ const u8 *buf = params->buf;
+ size_t len = params->len;
struct sk_buff *skb;
u16 pkt_len;
const struct ieee80211_mgmt *mgmt;
@@ -222,6 +222,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
tx_info = MWIFIEX_SKB_TXCB(skb);
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
+ tx_info->pkt_len = pkt_len;
mwifiex_form_mgmt_frame(skb, buf, len);
mwifiex_queue_tx_pkt(priv, skb);
@@ -537,23 +538,33 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv = mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY);
wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for %c%c\n",
request->alpha2[0], request->alpha2[1]);
- memcpy(adapter->country_code, request->alpha2, sizeof(request->alpha2));
-
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_CORE:
case NL80211_REGDOM_SET_BY_USER:
- break;
- /* Todo: apply driver specific changes in channel flags based
- on the request initiator if necessary. */
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
break;
+ default:
+ wiphy_err(wiphy, "unknown regdom initiator: %d\n",
+ request->initiator);
+ return;
+ }
+
+ /* Don't send world or same regdom info to firmware */
+ if (strncmp(request->alpha2, "00", 2) &&
+ strncmp(request->alpha2, adapter->country_code,
+ sizeof(request->alpha2))) {
+ memcpy(adapter->country_code, request->alpha2,
+ sizeof(request->alpha2));
+ mwifiex_send_domain_info_cmd_fw(wiphy);
+ mwifiex_dnld_txpwr_table(priv);
}
- mwifiex_send_domain_info_cmd_fw(wiphy);
}
/*
@@ -1170,10 +1181,10 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
else
bitmap_rates[1] = mask->control[band].legacy;
- /* Fill MCS rates */
- bitmap_rates[2] = mask->control[band].mcs[0];
+ /* Fill HT MCS rates */
+ bitmap_rates[2] = mask->control[band].ht_mcs[0];
if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2)
- bitmap_rates[2] |= mask->control[band].mcs[1] << 8;
+ bitmap_rates[2] |= mask->control[band].ht_mcs[1] << 8;
return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
HostCmd_ACT_GEN_SET, 0, bitmap_rates);
@@ -1968,7 +1979,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
user_scan_cfg->chan_list[i].radio_type = chan->band;
- if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ if (chan->flags & IEEE80211_CHAN_NO_IR)
user_scan_cfg->chan_list[i].scan_type =
MWIFIEX_SCAN_TYPE_PASSIVE;
else
@@ -2438,7 +2449,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
ETH_ALEN);
mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
ETH_ALEN;
- mef_entry->filter[filt_num].offset = 14;
+ mef_entry->filter[filt_num].offset = 28;
mef_entry->filter[filt_num].filt_type = TYPE_EQ;
if (filt_num)
mef_entry->filter[filt_num].filt_action = TYPE_OR;
@@ -2666,6 +2677,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
struct wiphy *wiphy;
struct mwifiex_private *priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
u8 *country_code;
+ u32 thr, retry;
/* create a new wiphy for use with cfg80211 */
wiphy = wiphy_new(&mwifiex_cfg80211_ops,
@@ -2702,9 +2714,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
- WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_STRICT_REGULATORY |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->regulatory_flags |=
+ REGULATORY_CUSTOM_REG |
+ REGULATORY_STRICT_REG;
wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
@@ -2754,6 +2767,19 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
country_code);
}
+ mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, FRAG_THRESH_I, &thr);
+ wiphy->frag_threshold = thr;
+ mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, RTS_THRESH_I, &thr);
+ wiphy->rts_threshold = thr;
+ mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, SHORT_RETRY_LIM_I, &retry);
+ wiphy->retry_short = (u8) retry;
+ mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, LONG_RETRY_LIM_I, &retry);
+ wiphy->retry_long = (u8) retry;
+
adapter->wiphy = wiphy;
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index e47f4e3012b8..1ddc8b2e3722 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -312,14 +312,14 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
}
if (GET_BSS_ROLE(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY))
== MWIFIEX_BSS_ROLE_STA) {
- if (!sleep_cfm_buf->resp_ctrl)
+ if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl))
/* Response is not needed for sleep
confirm command */
adapter->ps_state = PS_STATE_SLEEP;
else
adapter->ps_state = PS_STATE_SLEEP_CFM;
- if (!sleep_cfm_buf->resp_ctrl &&
+ if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
(adapter->is_hs_configured &&
!adapter->sleep_period.period)) {
adapter->pm_wakeup_card_req = true;
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 5c85d7803d00..3a21bd03d6db 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -130,6 +130,7 @@ struct mwifiex_txinfo {
u8 flags;
u8 bss_num;
u8 bss_type;
+ u32 pkt_len;
};
enum mwifiex_wmm_ac_e {
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index c8385ec77a86..5fa932d5f905 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -30,7 +30,7 @@ struct rfc_1042_hdr {
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
- u16 snap_type;
+ __be16 snap_type;
};
struct rx_packet_hdr {
@@ -226,7 +226,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
/* HW_SPEC fw_cap_info */
-#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14)))
+#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(12)|BIT(13)))
#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
@@ -468,8 +468,6 @@ enum P2P_MODES {
#define MWIFIEX_CRITERIA_UNICAST BIT(1)
#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
-#define CFG_DATA_TYPE_CAL 2
-
struct mwifiex_ie_types_header {
__le16 type;
__le16 len;
@@ -610,12 +608,12 @@ struct mwifiex_ie_types_tsf_timestamp {
struct mwifiex_cf_param_set {
u8 cfp_cnt;
u8 cfp_period;
- u16 cfp_max_duration;
- u16 cfp_duration_remaining;
+ __le16 cfp_max_duration;
+ __le16 cfp_duration_remaining;
} __packed;
struct mwifiex_ibss_param_set {
- u16 atim_window;
+ __le16 atim_window;
} __packed;
struct mwifiex_ie_types_ss_param_set {
@@ -627,7 +625,7 @@ struct mwifiex_ie_types_ss_param_set {
} __packed;
struct mwifiex_fh_param_set {
- u16 dwell_time;
+ __le16 dwell_time;
u8 hop_set;
u8 hop_pattern;
u8 hop_index;
@@ -684,10 +682,10 @@ struct host_cmd_ds_802_11_key_material {
} __packed;
struct host_cmd_ds_gen {
- u16 command;
- u16 size;
- u16 seq_num;
- u16 result;
+ __le16 command;
+ __le16 size;
+ __le16 seq_num;
+ __le16 result;
};
#define S_DS_GEN sizeof(struct host_cmd_ds_gen)
@@ -820,8 +818,8 @@ struct ieee_types_cf_param_set {
u8 len;
u8 cfp_cnt;
u8 cfp_period;
- u16 cfp_max_duration;
- u16 cfp_duration_remaining;
+ __le16 cfp_max_duration;
+ __le16 cfp_duration_remaining;
} __packed;
struct ieee_types_ibss_param_set {
@@ -957,7 +955,7 @@ struct mwifiex_hs_config_param {
} __packed;
struct hs_activate_param {
- u16 resp_ctrl;
+ __le16 resp_ctrl;
} __packed;
struct host_cmd_ds_802_11_hs_cfg_enh {
@@ -1131,7 +1129,7 @@ struct host_cmd_ds_802_11_bg_scan_query {
} __packed;
struct host_cmd_ds_802_11_bg_scan_query_rsp {
- u32 report_condition;
+ __le32 report_condition;
struct host_cmd_ds_802_11_scan_rsp scan_resp;
} __packed;
@@ -1230,7 +1228,7 @@ struct mwifiex_ie_types_wmm_queue_status {
struct mwifiex_ie_types_header header;
u8 queue_index;
u8 disabled;
- u16 medium_time;
+ __le16 medium_time;
u8 flow_required;
u8 flow_created;
u32 reserved;
@@ -1310,7 +1308,7 @@ struct mwifiex_ie_types_vht_oper {
u8 chan_center_freq_1;
u8 chan_center_freq_2;
/* Basic MCS set map, each 2 bits stands for a NSS */
- u16 basic_mcs_map;
+ __le16 basic_mcs_map;
} __packed;
struct mwifiex_ie_types_wmmcap {
@@ -1592,12 +1590,6 @@ struct mwifiex_ie_list {
struct mwifiex_ie ie_list[MAX_MGMT_IE_INDEX];
} __packed;
-struct host_cmd_ds_802_11_cfg_data {
- __le16 action;
- __le16 type;
- __le16 data_len;
-} __packed;
-
struct coalesce_filt_field_param {
u8 operation;
u8 operand_len;
@@ -1678,7 +1670,6 @@ struct host_cmd_ds_command {
struct host_cmd_ds_sys_config uap_sys_config;
struct host_cmd_ds_sta_deauth sta_deauth;
struct host_cmd_11ac_vht_cfg vht_cfg;
- struct host_cmd_ds_802_11_cfg_data cfg_data;
struct host_cmd_ds_coalesce_cfg coalesce_cfg;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 6499117fce43..1d0a817f2bf0 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -643,7 +643,8 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
if (priv)
priv->stats.rx_dropped++;
- adapter->if_ops.data_complete(adapter, skb);
+ dev_kfree_skb_any(skb);
+ adapter->if_ops.data_complete(adapter);
}
}
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 8bb8988c435c..9d3d2758ec35 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -648,6 +648,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info = MWIFIEX_SKB_TXCB(skb);
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
+ tx_info->pkt_len = skb->len;
/* Record the current time the packet was queued; used to
* determine the amount of time the packet was queued in
@@ -747,9 +748,9 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
- skb->priority = cfg80211_classify8021d(skb);
+ skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
}
@@ -992,12 +993,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
rtnl_unlock();
}
- priv = adapter->priv[0];
- if (!priv || !priv->wdev)
- goto exit_remove;
-
- wiphy_unregister(priv->wdev->wiphy);
- wiphy_free(priv->wdev->wiphy);
+ wiphy_unregister(adapter->wiphy);
+ wiphy_free(adapter->wiphy);
mwifiex_terminate_workqueue(adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 1d72f13adb9d..d8ad554ce39f 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -32,6 +32,7 @@
#include <net/lib80211.h>
#include <linux/firmware.h>
#include <linux/ctype.h>
+#include <linux/of.h>
#include "decl.h"
#include "ioctl.h"
@@ -615,7 +616,7 @@ struct mwifiex_if_ops {
void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
- int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
+ int (*data_complete) (struct mwifiex_adapter *);
int (*init_fw_port) (struct mwifiex_adapter *);
int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
void (*card_reset) (struct mwifiex_adapter *);
@@ -739,6 +740,7 @@ struct mwifiex_adapter {
u8 scan_delay_cnt;
u8 empty_tx_q_cnt;
const struct firmware *cal_data;
+ struct device_node *dt_node;
/* 11AC */
u32 is_hw_11ac_capable;
@@ -1151,6 +1153,9 @@ void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv);
+int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
+ struct device_node *node, const char *prefix);
+void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
extern const struct ethtool_ops mwifiex_ethtool_ops;
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 03688aa14e8a..7fe7b53fb17a 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -1211,6 +1211,12 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
rd_index = card->rxbd_rdptr & reg->rx_mask;
skb_data = card->rx_buf_list[rd_index];
+ /* If skb allocation was failed earlier for Rx packet,
+ * rx_buf_list[rd_index] would have been left with a NULL.
+ */
+ if (!skb_data)
+ return -ENOMEM;
+
MWIFIEX_SKB_PACB(skb_data, &buf_pa);
pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
PCI_DMA_FROMDEVICE);
@@ -1525,6 +1531,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(adapter, skb->data,
skb->len);
+ mwifiex_pcie_enable_host_int(adapter);
+ if (mwifiex_write_reg(adapter,
+ PCIE_CPU_INT_EVENT,
+ CPU_INTR_SLEEP_CFM_DONE)) {
+ dev_warn(adapter->dev,
+ "Write register failed\n");
+ return -1;
+ }
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
@@ -1993,23 +2007,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
adapter->int_status |= pcie_ireg;
spin_unlock_irqrestore(&adapter->int_lock, flags);
- if (pcie_ireg & HOST_INTR_CMD_DONE) {
- if ((adapter->ps_state == PS_STATE_SLEEP_CFM) ||
- (adapter->ps_state == PS_STATE_SLEEP)) {
- mwifiex_pcie_enable_host_int(adapter);
- if (mwifiex_write_reg(adapter,
- PCIE_CPU_INT_EVENT,
- CPU_INTR_SLEEP_CFM_DONE)
- ) {
- dev_warn(adapter->dev,
- "Write register failed\n");
- return;
-
- }
- }
- } else if (!adapter->pps_uapsd_mode &&
- adapter->ps_state == PS_STATE_SLEEP &&
- mwifiex_pcie_ok_to_access_hw(adapter)) {
+ if (!adapter->pps_uapsd_mode &&
+ adapter->ps_state == PS_STATE_SLEEP &&
+ mwifiex_pcie_ok_to_access_hw(adapter)) {
/* Potentially for PCIe we could get other
* interrupts like shared. Don't change power
* state until cookie is set */
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 8cf7d50a7603..668547c2de84 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -515,14 +515,14 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16((u16) user_scan_in->
chan_list[0].scan_time);
- else if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ else if (ch->flags & IEEE80211_CHAN_NO_IR)
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->passive_scan_time);
else
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->active_scan_time);
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
scan_chan_list[chan_idx].chan_scan_mode_bitmap
|= MWIFIEX_PASSIVE_SCAN;
else
@@ -1681,7 +1681,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
const u8 *ie_buf;
size_t ie_len;
u16 channel = 0;
- u64 fw_tsf = 0;
+ __le64 fw_tsf = 0;
u16 beacon_size = 0;
u32 curr_bcn_bytes;
u32 freq;
@@ -1815,7 +1815,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
ie_buf, ie_len, rssi, GFP_KERNEL);
bss_priv = (struct mwifiex_bss_priv *)bss->priv;
bss_priv->band = band;
- bss_priv->fw_tsf = fw_tsf;
+ bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
if (priv->media_connected &&
!memcmp(bssid,
priv->curr_bss_params.bss_descriptor
@@ -2101,12 +2101,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
curr_bss->ht_info_offset);
if (curr_bss->bcn_vht_cap)
- curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf +
- curr_bss->vht_cap_offset);
+ curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf +
+ curr_bss->vht_cap_offset);
if (curr_bss->bcn_vht_oper)
- curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf +
- curr_bss->vht_info_offset);
+ curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf +
+ curr_bss->vht_info_offset);
if (curr_bss->bcn_bss_co_2040)
curr_bss->bcn_bss_co_2040 =
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 2181ee283d82..9208a8816b80 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -354,7 +354,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
}
if (hs_activate) {
hs_cfg->action = cpu_to_le16(HS_ACTIVATE);
- hs_cfg->params.hs_activate.resp_ctrl = RESP_NEEDED;
+ hs_cfg->params.hs_activate.resp_ctrl = cpu_to_le16(RESP_NEEDED);
} else {
hs_cfg->action = cpu_to_le16(HS_CONFIGURE);
hs_cfg->params.hs_config.conditions = hscfg_param->conditions;
@@ -1156,30 +1156,62 @@ static u32 mwifiex_parse_cal_cfg(u8 *src, size_t len, u8 *dst)
return d - dst;
}
+int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
+ struct device_node *node, const char *prefix)
+{
+#ifdef CONFIG_OF
+ struct property *prop;
+ size_t len = strlen(prefix);
+ int ret;
+
+ /* look for all matching property names */
+ for_each_property_of_node(node, prop) {
+ if (len > strlen(prop->name) ||
+ strncmp(prop->name, prefix, len))
+ continue;
+
+ /* property header is 6 bytes, data must fit in cmd buffer */
+ if (prop && prop->value && prop->length > 6 &&
+ prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) {
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
+ HostCmd_ACT_GEN_SET, 0,
+ prop);
+ if (ret)
+ return ret;
+ }
+ }
+#endif
+ return 0;
+}
+
/* This function prepares command of set_cfg_data. */
static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd,
- u16 cmd_action)
+ struct host_cmd_ds_command *cmd, void *data_buf)
{
- struct host_cmd_ds_802_11_cfg_data *cfg_data = &cmd->params.cfg_data;
struct mwifiex_adapter *adapter = priv->adapter;
- u32 len, cal_data_offset;
- u8 *tmp_cmd = (u8 *)cmd;
+ struct property *prop = data_buf;
+ u32 len;
+ u8 *data = (u8 *)cmd + S_DS_GEN;
+ int ret;
- cal_data_offset = S_DS_GEN + sizeof(*cfg_data);
- if ((adapter->cal_data->data) && (adapter->cal_data->size > 0))
+ if (prop) {
+ len = prop->length;
+ ret = of_property_read_u8_array(adapter->dt_node, prop->name,
+ data, len);
+ if (ret)
+ return ret;
+ dev_dbg(adapter->dev,
+ "download cfg_data from device tree: %s\n", prop->name);
+ } else if (adapter->cal_data->data && adapter->cal_data->size > 0) {
len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
- adapter->cal_data->size,
- (u8 *)(tmp_cmd + cal_data_offset));
- else
+ adapter->cal_data->size, data);
+ dev_dbg(adapter->dev, "download cfg_data from config file\n");
+ } else {
return -1;
-
- cfg_data->action = cpu_to_le16(cmd_action);
- cfg_data->type = cpu_to_le16(CFG_DATA_TYPE_CAL);
- cfg_data->data_len = cpu_to_le16(len);
+ }
cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA);
- cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*cfg_data) + len);
+ cmd->size = cpu_to_le16(S_DS_GEN + len);
return 0;
}
@@ -1267,7 +1299,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_get_hw_spec(priv, cmd_ptr);
break;
case HostCmd_CMD_CFG_DATA:
- ret = mwifiex_cmd_cfg_data(priv, cmd_ptr, cmd_action);
+ ret = mwifiex_cmd_cfg_data(priv, cmd_ptr, data_buf);
break;
case HostCmd_CMD_MAC_CONTROL:
ret = mwifiex_cmd_mac_control(priv, cmd_ptr, cmd_action,
@@ -1527,7 +1559,19 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (ret)
return -1;
- /* Download calibration data to firmware */
+ /* Download calibration data to firmware.
+ * The cal-data can be read from device tree and/or
+ * a configuration file and downloaded to firmware.
+ */
+ adapter->dt_node =
+ of_find_node_by_name(NULL, "marvell_cfgdata");
+ if (adapter->dt_node) {
+ ret = mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node,
+ "marvell,caldata");
+ if (ret)
+ return -1;
+ }
+
if (adapter->cal_data) {
ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
HostCmd_ACT_GEN_SET, 0, NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 2675ca7f8d14..24523e4015cb 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -338,8 +338,7 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf)
if (!data_buf)
return -1;
- pg_tlv_hdr = (struct mwifiex_types_power_group *)
- ((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg));
+ pg_tlv_hdr = (struct mwifiex_types_power_group *)((u8 *)data_buf);
pg = (struct mwifiex_power_group *)
((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group));
length = le16_to_cpu(pg_tlv_hdr->length);
@@ -383,19 +382,25 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
struct mwifiex_types_power_group *pg_tlv_hdr;
struct mwifiex_power_group *pg;
u16 action = le16_to_cpu(txp_cfg->action);
+ u16 tlv_buf_left;
- switch (action) {
- case HostCmd_ACT_GEN_GET:
- pg_tlv_hdr = (struct mwifiex_types_power_group *)
- ((u8 *) txp_cfg +
- sizeof(struct host_cmd_ds_txpwr_cfg));
+ pg_tlv_hdr = (struct mwifiex_types_power_group *)
+ ((u8 *)txp_cfg +
+ sizeof(struct host_cmd_ds_txpwr_cfg));
- pg = (struct mwifiex_power_group *)
- ((u8 *) pg_tlv_hdr +
- sizeof(struct mwifiex_types_power_group));
+ pg = (struct mwifiex_power_group *)
+ ((u8 *)pg_tlv_hdr +
+ sizeof(struct mwifiex_types_power_group));
+ tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*txp_cfg);
+ if (tlv_buf_left <
+ le16_to_cpu(pg_tlv_hdr->length) + sizeof(*pg_tlv_hdr))
+ return 0;
+
+ switch (action) {
+ case HostCmd_ACT_GEN_GET:
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
- mwifiex_get_power_level(priv, txp_cfg);
+ mwifiex_get_power_level(priv, pg_tlv_hdr);
priv->tx_power_level = (u16) pg->power_min;
break;
@@ -404,14 +409,6 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
if (!le32_to_cpu(txp_cfg->mode))
break;
- pg_tlv_hdr = (struct mwifiex_types_power_group *)
- ((u8 *) txp_cfg +
- sizeof(struct host_cmd_ds_txpwr_cfg));
-
- pg = (struct mwifiex_power_group *)
- ((u8 *) pg_tlv_hdr +
- sizeof(struct mwifiex_types_power_group));
-
if (pg->power_max == pg->power_min)
priv->tx_power_level = (u16) pg->power_min;
break;
@@ -785,8 +782,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
}
/* If BSSID is diff, modify current BSS parameters */
- if (memcmp(priv->curr_bss_params.bss_descriptor.mac_address,
- ibss_coal_resp->bssid, ETH_ALEN)) {
+ if (!ether_addr_equal(priv->curr_bss_params.bss_descriptor.mac_address, ibss_coal_resp->bssid)) {
/* BSSID */
memcpy(priv->curr_bss_params.bss_descriptor.mac_address,
ibss_coal_resp->bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index a09398fe9e2a..c5cb2ed19ec2 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -184,6 +184,16 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
}
+void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
+{
+ if (priv->adapter->dt_node) {
+ char txpwr[] = {"marvell,00_txpwrlimit"};
+
+ memcpy(&txpwr[8], priv->adapter->country_code, 2);
+ mwifiex_dnld_dt_cfgdata(priv, priv->adapter->dt_node, txpwr);
+ }
+}
+
static int mwifiex_process_country_ie(struct mwifiex_private *priv,
struct cfg80211_bss *bss)
{
@@ -205,6 +215,14 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
return 0;
}
+ if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) {
+ rcu_read_unlock();
+ wiphy_dbg(priv->wdev->wiphy,
+ "11D: skip setting domain info in FW\n");
+ return 0;
+ }
+ memcpy(priv->adapter->country_code, &country_ie[2], 2);
+
domain_info->country_code[0] = country_ie[2];
domain_info->country_code[1] = country_ie[3];
domain_info->country_code[2] = ' ';
@@ -226,6 +244,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
return -1;
}
+ mwifiex_dnld_txpwr_table(priv);
+
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index bb22664923ef..4651d676df38 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -36,12 +36,12 @@ mwifiex_discard_gratuitous_arp(struct mwifiex_private *priv,
struct sk_buff *skb)
{
const struct mwifiex_arp_eth_header *arp;
- struct ethhdr *eth_hdr;
+ struct ethhdr *eth;
struct ipv6hdr *ipv6;
struct icmp6hdr *icmpv6;
- eth_hdr = (struct ethhdr *)skb->data;
- switch (ntohs(eth_hdr->h_proto)) {
+ eth = (struct ethhdr *)skb->data;
+ switch (ntohs(eth->h_proto)) {
case ETH_P_ARP:
arp = (void *)(skb->data + sizeof(struct ethhdr));
if (arp->hdr.ar_op == htons(ARPOP_REPLY) ||
@@ -87,16 +87,19 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
struct rx_packet_hdr *rx_pkt_hdr;
struct rxpd *local_rx_pd;
int hdr_chop;
- struct ethhdr *eth_hdr;
- u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+ struct ethhdr *eth;
local_rx_pd = (struct rxpd *) (skb->data);
rx_pkt_hdr = (void *)local_rx_pd +
le16_to_cpu(local_rx_pd->rx_pkt_offset);
- if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
- rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
+ if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ sizeof(bridge_tunnel_header))) ||
+ (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+ sizeof(rfc1042_header)) &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type
@@ -106,7 +109,7 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
* To create the Ethernet II, just move the src, dst address
* right before the snap_type.
*/
- eth_hdr = (struct ethhdr *)
+ eth = (struct ethhdr *)
((u8 *) &rx_pkt_hdr->eth803_hdr
+ sizeof(rx_pkt_hdr->eth803_hdr) +
sizeof(rx_pkt_hdr->rfc1042_hdr)
@@ -114,14 +117,14 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
- sizeof(rx_pkt_hdr->eth803_hdr.h_source)
- sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
- memcpy(eth_hdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
- sizeof(eth_hdr->h_source));
- memcpy(eth_hdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
- sizeof(eth_hdr->h_dest));
+ memcpy(eth->h_source, rx_pkt_hdr->eth803_hdr.h_source,
+ sizeof(eth->h_source));
+ memcpy(eth->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
+ sizeof(eth->h_dest));
/* Chop off the rxpd + the excess memory from the 802.2/llc/snap
header that was removed. */
- hdr_chop = (u8 *) eth_hdr - (u8 *) local_rx_pd;
+ hdr_chop = (u8 *) eth - (u8 *) local_rx_pd;
} else {
/* Chop off the rxpd */
hdr_chop = (u8 *) &rx_pkt_hdr->eth803_hdr -
@@ -185,12 +188,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
"wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
skb->len, rx_pkt_offset, rx_pkt_length);
priv->stats.rx_dropped++;
-
- if (adapter->if_ops.data_complete)
- adapter->if_ops.data_complete(adapter, skb);
- else
- dev_kfree_skb_any(skb);
-
+ dev_kfree_skb_any(skb);
return ret;
}
@@ -226,7 +224,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
* directly to os. Don't pass thru rx reordering
*/
if (!IS_11N_ENABLED(priv) ||
- memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) {
+ !ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) {
mwifiex_process_rx_packet(priv, skb);
return ret;
}
@@ -244,12 +242,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
ta, (u8) rx_pkt_type, skb);
- if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
- if (adapter->if_ops.data_complete)
- adapter->if_ops.data_complete(adapter, skb);
- else
- dev_kfree_skb_any(skb);
- }
+ if (ret || (rx_pkt_type == PKT_TYPE_BAR))
+ dev_kfree_skb_any(skb);
if (ret)
priv->stats.rx_dropped++;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 7b581af24f5f..354d64c9606f 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -148,6 +148,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
tx_info = MWIFIEX_SKB_TXCB(skb);
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
+ tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);
skb_reserve(skb, sizeof(struct txpd) + INTF_HEADER_LEN);
skb_push(skb, sizeof(struct txpd));
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 8f923d0d2ba6..37f26afd4314 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -40,6 +40,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
struct rxpd *local_rx_pd;
struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
+ int ret;
local_rx_pd = (struct rxpd *) (skb->data);
/* Get the BSS number from rxpd, get corresponding priv */
@@ -58,9 +59,15 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
rx_info->bss_type = priv->bss_type;
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
- return mwifiex_process_uap_rx_packet(priv, skb);
+ ret = mwifiex_process_uap_rx_packet(priv, skb);
+ else
+ ret = mwifiex_process_sta_rx_packet(priv, skb);
+
+ /* Decrement RX pending counter for each packet */
+ if (adapter->if_ops.data_complete)
+ adapter->if_ops.data_complete(adapter);
- return mwifiex_process_sta_rx_packet(priv, skb);
+ return ret;
}
EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
@@ -105,7 +112,7 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
switch (ret) {
case -ENOSR:
- dev_err(adapter->dev, "data: -ENOSR is returned\n");
+ dev_dbg(adapter->dev, "data: -ENOSR is returned\n");
break;
case -EBUSY:
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -168,7 +175,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
mwifiex_set_trans_start(priv->netdev);
if (!status) {
priv->stats.tx_packets++;
- priv->stats.tx_bytes += skb->len;
+ priv->stats.tx_bytes += tx_info->pkt_len;
if (priv->tx_timeout_cnt)
priv->tx_timeout_cnt = 0;
} else {
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 92f76d655e6c..3c74eb254927 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -98,7 +98,6 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
int hdr_chop;
struct timeval tv;
struct ethhdr *p_ethhdr;
- u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
uap_rx_pd = (struct uap_rxpd *)(skb->data);
rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
@@ -112,8 +111,12 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
return;
}
- if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
- rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
+ if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ sizeof(bridge_tunnel_header))) ||
+ (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+ sizeof(rfc1042_header)) &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
/* Replace the 803 header and rfc1042 header (llc/snap) with
* an Ethernet II header, keep the src/dst and snap_type
* (ethertype).
@@ -144,7 +147,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
}
- /* Chop off the leading header bytes so the it points
+ /* Chop off the leading header bytes so that it points
* to the start of either the reconstructed EthII frame
* or the 802.2/llc/snap frame.
*/
@@ -176,6 +179,19 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
tx_info->bss_type = priv->bss_type;
tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
+ if (is_unicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest)) {
+ /* Update bridge packet statistics as the
+ * packet is not going to kernel/upper layer.
+ */
+ priv->stats.rx_bytes += skb->len;
+ priv->stats.rx_packets++;
+
+ /* Sending bridge packet to TX queue, so save the packet
+ * length in TXCB to update statistics in TX complete.
+ */
+ tx_info->pkt_len = skb->len;
+ }
+
do_gettimeofday(&tv);
skb->tstamp = timeval_to_ktime(tv);
mwifiex_wmm_add_buf_txqueue(priv, skb);
@@ -264,12 +280,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
le16_to_cpu(uap_rx_pd->rx_pkt_length));
priv->stats.rx_dropped++;
-
- if (adapter->if_ops.data_complete)
- adapter->if_ops.data_complete(adapter, skb);
- else
- dev_kfree_skb_any(skb);
-
+ dev_kfree_skb_any(skb);
return 0;
}
@@ -323,12 +334,8 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
uap_rx_pd->priority, ta, pkt_type,
skb);
- if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
- if (adapter->if_ops.data_complete)
- adapter->if_ops.data_complete(adapter, skb);
- else
- dev_kfree_skb_any(skb);
- }
+ if (ret || (rx_pkt_type == PKT_TYPE_BAR))
+ dev_kfree_skb_any(skb);
if (ret)
priv->stats.rx_dropped++;
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index edf5b7a24900..208748804a55 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -22,15 +22,19 @@
#define USB_VERSION "1.0"
-static const char usbdriver_name[] = "usb8797";
-
static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
static struct usb_card_rec *usb_card;
static struct usb_device_id mwifiex_usb_table[] = {
- {USB_DEVICE(USB8797_VID, USB8797_PID_1)},
- {USB_DEVICE_AND_INTERFACE_INFO(USB8797_VID, USB8797_PID_2,
+ /* 8797 */
+ {USB_DEVICE(USB8XXX_VID, USB8797_PID_1)},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8797_PID_2,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC, 0xff)},
+ /* 8897 */
+ {USB_DEVICE(USB8XXX_VID, USB8897_PID_1)},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
USB_CLASS_VENDOR_SPEC,
USB_SUBCLASS_VENDOR_SPEC, 0xff)},
{ } /* Terminating entry */
@@ -343,10 +347,20 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
id_vendor, id_product, bcd_device);
/* PID_1 is used for firmware downloading only */
- if (id_product == USB8797_PID_1)
- card->usb_boot_state = USB8797_FW_DNLD;
- else
- card->usb_boot_state = USB8797_FW_READY;
+ switch (id_product) {
+ case USB8797_PID_1:
+ case USB8897_PID_1:
+ card->usb_boot_state = USB8XXX_FW_DNLD;
+ break;
+ case USB8797_PID_2:
+ case USB8897_PID_2:
+ card->usb_boot_state = USB8XXX_FW_READY;
+ break;
+ default:
+ pr_warning("unknown id_product %#x\n", id_product);
+ card->usb_boot_state = USB8XXX_FW_DNLD;
+ break;
+ }
card->udev = udev;
card->intf = intf;
@@ -511,13 +525,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
MWIFIEX_BSS_ROLE_ANY),
MWIFIEX_ASYNC_CMD);
-#ifdef CONFIG_PM
- /* Resume handler may be called due to remote wakeup,
- * force to exit suspend anyway
- */
- usb_disable_autosuspend(card->udev);
-#endif /* CONFIG_PM */
-
return 0;
}
@@ -551,13 +558,12 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
}
static struct usb_driver mwifiex_usb_driver = {
- .name = usbdriver_name,
+ .name = "mwifiex_usb",
.probe = mwifiex_usb_probe,
.disconnect = mwifiex_usb_disconnect,
.id_table = mwifiex_usb_table,
.suspend = mwifiex_usb_suspend,
.resume = mwifiex_usb_resume,
- .supports_autosuspend = 1,
};
static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
@@ -755,9 +761,20 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
card->adapter = adapter;
adapter->dev = &card->udev->dev;
- strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
usb_card = card;
+ switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
+ case USB8897_PID_1:
+ case USB8897_PID_2:
+ strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
+ break;
+ case USB8797_PID_1:
+ case USB8797_PID_2:
+ default:
+ strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
+ break;
+ }
+
return 0;
}
@@ -773,7 +790,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
{
int ret = 0;
u8 *firmware = fw->fw_buf, *recv_buff;
- u32 retries = USB8797_FW_MAX_RETRY, dlen;
+ u32 retries = USB8XXX_FW_MAX_RETRY, dlen;
u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0;
struct fw_data *fwdata;
struct fw_sync_header sync_fw;
@@ -875,7 +892,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
continue;
}
- retries = USB8797_FW_MAX_RETRY;
+ retries = USB8XXX_FW_MAX_RETRY;
break;
}
fw_seqnum++;
@@ -899,13 +916,13 @@ static int mwifiex_usb_dnld_fw(struct mwifiex_adapter *adapter,
int ret;
struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
- if (card->usb_boot_state == USB8797_FW_DNLD) {
+ if (card->usb_boot_state == USB8XXX_FW_DNLD) {
ret = mwifiex_prog_fw_w_helper(adapter, fw);
if (ret)
return -1;
/* Boot state changes after successful firmware download */
- if (card->usb_boot_state == USB8797_FW_DNLD)
+ if (card->usb_boot_state == USB8XXX_FW_DNLD)
return -1;
}
@@ -938,11 +955,9 @@ static int mwifiex_usb_cmd_event_complete(struct mwifiex_adapter *adapter,
return 0;
}
-static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter,
- struct sk_buff *skb)
+static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter)
{
atomic_dec(&adapter->rx_pending);
- dev_kfree_skb_any(skb);
return 0;
}
@@ -1041,4 +1056,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION);
MODULE_VERSION(USB_VERSION);
MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("mrvl/usb8797_uapsta.bin");
+MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index 98c4316cd1a9..15b73d12e998 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -22,19 +22,23 @@
#include <linux/usb.h>
-#define USB8797_VID 0x1286
+#define USB8XXX_VID 0x1286
+
#define USB8797_PID_1 0x2043
#define USB8797_PID_2 0x2044
+#define USB8897_PID_1 0x2045
+#define USB8897_PID_2 0x2046
-#define USB8797_FW_DNLD 1
-#define USB8797_FW_READY 2
-#define USB8797_FW_MAX_RETRY 3
+#define USB8XXX_FW_DNLD 1
+#define USB8XXX_FW_READY 2
+#define USB8XXX_FW_MAX_RETRY 3
#define MWIFIEX_TX_DATA_URB 6
#define MWIFIEX_RX_DATA_URB 6
#define MWIFIEX_USB_TIMEOUT 100
#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
+#define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin"
#define FW_DNLD_TX_BUF_SIZE 620
#define FW_DNLD_RX_BUF_SIZE 2048
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 5d9e150f4111..9b82e225880c 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -191,6 +191,9 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
if (!skb)
return -1;
+ priv->stats.rx_bytes += skb->len;
+ priv->stats.rx_packets++;
+
skb->dev = priv->netdev;
skb->protocol = eth_type_trans(skb, priv->netdev);
skb->ip_summed = CHECKSUM_NONE;
@@ -217,8 +220,6 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
(skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
- priv->stats.rx_bytes += skb->len;
- priv->stats.rx_packets++;
if (in_interrupt())
netif_rx(skb);
else
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 13eaeed03898..981cf6e7c73b 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -559,7 +559,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
mwifiex_wmm_delete_all_ralist(priv);
memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
- if (priv->adapter->if_ops.clean_pcie_ring)
+ if (priv->adapter->if_ops.clean_pcie_ring &&
+ !priv->adapter->surprise_removed)
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
}
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index b953ad621e0b..4987c3f942ce 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -9,7 +9,6 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -1258,7 +1257,7 @@ mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh)
{
return priv->capture_beacon &&
ieee80211_is_beacon(wh->frame_control) &&
- ether_addr_equal(wh->addr3, priv->capture_bssid);
+ ether_addr_equal_64bits(wh->addr3, priv->capture_bssid);
}
static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
@@ -5893,8 +5892,6 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
hw->extra_tx_headroom -= priv->ap_fw ? REDUCED_TX_HEADROOM : 0;
- hw->channel_change_time = 10;
-
hw->queues = MWL8K_TX_WMM_QUEUES;
/* Set rssi values to dBm */
diff --git a/drivers/net/wireless/orinoco/hermes.c b/drivers/net/wireless/orinoco/hermes.c
index 75c15bc7b34c..43790fbea0e0 100644
--- a/drivers/net/wireless/orinoco/hermes.c
+++ b/drivers/net/wireless/orinoco/hermes.c
@@ -40,7 +40,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include "hermes.h"
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index d21d95939316..c0a27377d9e2 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index bdfe637953f4..f9805c9353d2 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -52,7 +52,6 @@
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index e2264bc12ebf..b60048c95e0a 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index d43e3740e45d..0fe67d2da208 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/sort.h>
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index b3879fbf5368..bc065e8e348b 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c
index 3837e1eec5f4..1f6fd5ff5531 100644
--- a/drivers/net/wireless/p54/led.c
+++ b/drivers/net/wireless/p54/led.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 067e6f2fd050..eede90b63f84 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
@@ -757,7 +756,6 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT);
- dev->channel_change_time = 1000; /* TODO: find actual value */
priv->beacon_req_id = cpu_to_le32(0);
priv->tx_stats[P54_QUEUE_BEACON].limit = 1;
priv->tx_stats[P54_QUEUE_FWSCAN].limit = 1;
diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h
index e3ed893b5aaf..aedfaf24f386 100644
--- a/drivers/net/wireless/p54/net2280.h
+++ b/drivers/net/wireless/p54/net2280.h
@@ -20,8 +20,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*-------------------------------------------------------------------------*/
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index f9a07b0d83ac..d411de409050 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -13,7 +13,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index e328d3058c41..6e635cfa24c8 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -12,7 +12,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/usb.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f95de0d16216..153c61539ec8 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -17,7 +17,6 @@
*/
#include <linux/export.h>
-#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <asm/div64.h>
@@ -308,7 +307,7 @@ static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb)
return;
/* only consider beacons from the associated BSSID */
- if (!ether_addr_equal(hdr->addr3, priv->bssid))
+ if (!ether_addr_equal_64bits(hdr->addr3, priv->bssid))
return;
tim = p54_find_ie(skb, WLAN_EID_TIM);
@@ -587,7 +586,7 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
chan = priv->curchan;
if (chan) {
struct survey_info *survey = &priv->survey[chan->hw_value];
- survey->noise = clamp_t(s8, priv->noise, -128, 127);
+ survey->noise = clamp(priv->noise, -128, 127);
survey->channel_time = priv->survey_raw.active;
survey->channel_time_tx = priv->survey_raw.tx;
survey->channel_time_busy = priv->survey_raw.tx +
diff --git a/drivers/net/wireless/prism54/isl_38xx.c b/drivers/net/wireless/prism54/isl_38xx.c
index 02fc67bccbd0..333c1a2f882e 100644
--- a/drivers/net/wireless/prism54/isl_38xx.c
+++ b/drivers/net/wireless/prism54/isl_38xx.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/isl_38xx.h b/drivers/net/wireless/prism54/isl_38xx.h
index 19c33d313734..547ab885610b 100644
--- a/drivers/net/wireless/prism54/isl_38xx.h
+++ b/drivers/net/wireless/prism54/isl_38xx.h
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 8863a6cb2388..78fa64d3f223 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -25,6 +24,7 @@
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/etherdevice.h>
#include <asm/uaccess.h>
@@ -1861,7 +1861,7 @@ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info,
if (mutex_lock_interruptible(&acl->lock))
return -ERESTARTSYS;
list_for_each_entry(entry, &acl->mac_list, _list) {
- if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) {
+ if (ether_addr_equal(entry->addr, addr->sa_data)) {
list_del(&entry->_list);
acl->size--;
kfree(entry);
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index a34bceb6e3cd..842a2549facc 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h
index 59e31258d450..83fec557997e 100644
--- a/drivers/net/wireless/prism54/isl_oid.h
+++ b/drivers/net/wireless/prism54/isl_oid.h
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index e05d9b4c8317..931cf440ff18 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -914,7 +913,6 @@ islpci_setup(struct pci_dev *pdev)
do_islpci_free_memory:
islpci_free_memory(priv);
do_free_netdev:
- pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
priv = NULL;
return NULL;
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index c40403877f97..f6f088e05fe4 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 799e148d0370..674658f2e6ef 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index 6ca30a5b7bfb..80f50f1bc6f2 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 9e68e0cb718e..1105a12dbde8 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -199,7 +198,6 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
do_unregister_netdev:
unregister_netdev(ndev);
islpci_free_memory(priv);
- pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
priv = NULL;
do_pci_clear_mwi:
@@ -247,7 +245,6 @@ prism54_remove(struct pci_dev *pdev)
/* free the PCI memory and unmap the remapped page */
islpci_free_memory(priv);
- pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
priv = NULL;
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 9f19cceab487..0de14dfa68cc 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
index 0db93db9b675..700c434c8803 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index 056af38e72e3..47b34bfe890a 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/oid_mgt.h b/drivers/net/wireless/prism54/oid_mgt.h
index 92c8a2d4acd8..cf5141df8474 100644
--- a/drivers/net/wireless/prism54/oid_mgt.h
+++ b/drivers/net/wireless/prism54/oid_mgt.h
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/prism54/prismcompat.h b/drivers/net/wireless/prism54/prismcompat.h
index aa1d1747784f..bc1401eb4b9d 100644
--- a/drivers/net/wireless/prism54/prismcompat.h
+++ b/drivers/net/wireless/prism54/prismcompat.h
@@ -11,8 +11,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 9b557a1bb7f8..cbf0a589d32a 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -17,8 +17,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Changes:
* Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 08/08/2000
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 8169a85c4498..5028557aa18a 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -15,8 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Portions of this file are based on NDISwrapper project,
* Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani
@@ -27,7 +26,6 @@
// #define VERBOSE // more; success messages
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 38ed9a3e44c8..4ccfef5094e0 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -26,7 +24,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index e4b07f0aa3cc..0fd3a9d01a60 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 0ac5c589ddce..2f1cd929c6f6 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -26,7 +24,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -1880,6 +1877,11 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
EEPROM_MAC_ADDR_0));
/*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize hw_mode information.
*/
spec->supported_bands = SUPPORT_BAND_2GHZ;
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 9c10068e4987..573e87bcc553 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 85acc79f68b8..d849d590de25 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -26,7 +24,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -1709,6 +1706,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK;
+ /*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
rt2x00_eeprom_addr(rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 1b91a4cef965..afba0739c3b8 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index aab6b5e4f5dd..a394a9a95919 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -21,9 +21,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 776aff3678ff..41d4a8167dc3 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -24,9 +24,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -6453,7 +6451,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
- rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
@@ -6466,7 +6464,8 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ if (rt2x00_is_usb(rt2x00dev) &&
+ rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
else
rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
@@ -6486,10 +6485,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
- rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
- else
- rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
@@ -6510,16 +6506,26 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
- rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x42);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
rt2800_rfcsr_write(rt2x00dev, 59, 0x8f);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
- rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
- else
- rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+ if (rt2x00_is_usb(rt2x00dev))
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xd5);
+ } else {
+ if (rt2x00_is_usb(rt2x00dev))
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xb5);
+ }
rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
@@ -6602,7 +6608,6 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 1, 0x3F);
rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
- rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
rt2800_rfcsr_write(rt2x00dev, 6, 0xE4);
rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
@@ -7453,10 +7458,9 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
u32 reg;
/*
- * Disable powersaving as default on PCI devices.
+ * Disable powersaving as default.
*/
- if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
- rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/*
* Initialize all hw fields.
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index a94ba447e63c..3019db637a4b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef RT2800LIB_H
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.c b/drivers/net/wireless/rt2x00/rt2800mmio.c
index a8cc736b5063..de4790b41be7 100644
--- a/drivers/net/wireless/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.c
@@ -19,9 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* Module: rt2800mmio
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.h b/drivers/net/wireless/rt2x00/rt2800mmio.h
index 6a10de3eee3e..b63312ce3f27 100644
--- a/drivers/net/wireless/rt2x00/rt2800mmio.h
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.h
@@ -19,9 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* Module: rt2800mmio
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index b504455b4fec..a5b32ca2cf0f 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -20,9 +20,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index a81c9ee281c0..9dfef4607d6b 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -20,9 +20,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
index 1359227ca411..f6d1bf5be006 100644
--- a/drivers/net/wireless/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -19,9 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* Module: rt2800soc
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index a81ceb61d746..42a2e06512f2 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -18,9 +18,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -31,7 +29,6 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -767,7 +764,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Overwrite TX done handler
*/
- PREPARE_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);
+ INIT_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);
return 0;
}
@@ -992,6 +989,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c15) },
{ USB_DEVICE(0x07d1, 0x3c16) },
{ USB_DEVICE(0x07d1, 0x3c17) },
+ { USB_DEVICE(0x2001, 0x3317) },
{ USB_DEVICE(0x2001, 0x3c1b) },
/* Draytek */
{ USB_DEVICE(0x07fa, 0x7712) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 671ea3592610..ea7cac095997 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -17,9 +17,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index e4ba2ce0f212..e3b885d8f7db 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -15,9 +15,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 8cb43f8f3efc..1122dc44c9fd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 3db0d99d9da7..a2fd05ba25ca 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 7f7baae5ae02..2e3d1645e68b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h
index e11d39bdfef7..e65712c235bd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.h
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9dd92a700442..2bde6729f5e6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -567,10 +565,10 @@ static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev,
#undef TID_CHECK
- if (!ether_addr_equal(ba->ra, entry->ta))
+ if (!ether_addr_equal_64bits(ba->ra, entry->ta))
continue;
- if (!ether_addr_equal(ba->ta, entry->ra))
+ if (!ether_addr_equal_64bits(ba->ta, entry->ra))
continue;
/* Mark BAR since we received the according BA */
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index 063ebcce97f8..4c0e01b5d515 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index 1b4254b4272d..fbae2799e3ee 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index 997a6c89e66e..c681d04b506c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.h b/drivers/net/wireless/rt2x00/rt2x00leds.h
index 3b46f0c3332a..b2c5269570da 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.h
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 7f40ab8e1bd8..fb7c349ccc9c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index c2b3b6629188..9b941c0c1264 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 2183e7978399..ddeb5a709aa3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c
index 64b06c6abe58..6f236ea180aa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h
index cda3dbcf7ead..701c3127efb9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mmio.h
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 25da20e7e1f3..d93db4b0371b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -156,8 +154,6 @@ exit_release_regions:
exit_disable_device:
pci_disable_device(pci_dev);
- pci_set_drvdata(pci_dev, NULL);
-
return retval;
}
EXPORT_SYMBOL_GPL(rt2x00pci_probe);
@@ -177,7 +173,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
/*
* Free the PCI device data.
*/
- pci_set_drvdata(pci_dev, NULL);
pci_disable_device(pci_dev);
pci_release_regions(pci_dev);
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 60d90b20f8b9..bc0ca5f58f38 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a5d38e8ad9e4..5642ccceca7c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -15,9 +15,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index ebe117224979..c48125be0e34 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 6f867eec49cc..3cc541d13d67 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
index 9271a5fce0a8..69a0cdadb07f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.c
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h
index 474cbfc1efc7..9948d355e9a4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.h
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 4e121627925d..10572452cc21 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -14,9 +14,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 323ca7b2b095..e7bcf62347d5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index a5b69cb49012..24402984ee57 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -27,7 +25,6 @@
#include <linux/crc-itu-t.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 9bc6b6044e34..1442075a8382 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1baf9c896dcd..a140170b1eb3 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -27,7 +25,6 @@
#include <linux/crc-itu-t.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 7577e0ba3877..4a4f235466d1 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -13,9 +13,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index a91506b12a62..3867d1470b36 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -15,7 +15,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -108,6 +107,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
struct rtl8180_priv *priv = dev->priv;
unsigned int count = 32;
u8 signal, agc, sq;
+ dma_addr_t mapping;
while (count--) {
struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
@@ -129,6 +129,17 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
if (unlikely(!new_skb))
goto done;
+ mapping = pci_map_single(priv->pdev,
+ skb_tail_pointer(new_skb),
+ MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ kfree_skb(new_skb);
+ dev_err(&priv->pdev->dev, "RX DMA map error\n");
+
+ goto done;
+ }
+
pci_unmap_single(priv->pdev,
*((dma_addr_t *)skb->cb),
MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
@@ -159,9 +170,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
skb = new_skb;
priv->rx_buf[priv->rx_idx] = skb;
- *((dma_addr_t *) skb->cb) =
- pci_map_single(priv->pdev, skb_tail_pointer(skb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ *((dma_addr_t *) skb->cb) = mapping;
}
done:
@@ -267,6 +276,13 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
mapping = pci_map_single(priv->pdev, skb->data,
skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ kfree_skb(skb);
+ dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
+ return;
+
+ }
+
tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
RTL818X_TX_DESC_FLAG_LS |
(ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
index dc845693f321..b1bfee738937 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
@@ -19,7 +19,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
index a63c443c3c6f..eebf23976524 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/max2820.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
@@ -18,7 +18,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index ee638d0749d6..d60a5f399022 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -15,7 +15,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
index 7614d9ccc729..959b049827de 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
@@ -19,7 +19,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 9a6edb0c014e..fd78df813a85 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -20,7 +20,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -416,7 +415,7 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
struct rtl8187_rx_info *info;
int ret = 0;
- while (skb_queue_len(&priv->rx_queue) < 16) {
+ while (skb_queue_len(&priv->rx_queue) < 32) {
skb = __dev_alloc_skb(RTL8187_MAX_RX, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index 56aee067f324..a6ad79f61bf9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -15,6 +15,8 @@
#ifndef RTL8187_H
#define RTL8187_H
+#include <linux/cache.h>
+
#include "rtl818x.h"
#include "leds.h"
@@ -139,7 +141,10 @@ struct rtl8187_priv {
u8 aifsn[4];
u8 rfkill_mask;
struct {
- __le64 buf;
+ union {
+ __le64 buf;
+ u8 dummy1[L1_CACHE_BYTES];
+ } ____cacheline_aligned;
struct sk_buff_head queue;
} b_tx_status; /* This queue is used by both -b and non-b devices */
struct mutex io_mutex;
@@ -147,7 +152,8 @@ struct rtl8187_priv {
u8 bits8;
__le16 bits16;
__le32 bits32;
- } *io_dmabuf;
+ u8 dummy2[L1_CACHE_BYTES];
+ } *io_dmabuf ____cacheline_aligned;
bool rfkill_off;
u16 seqno;
};
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
index a26193a04447..5ecf18ed67b8 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
@@ -16,7 +16,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/usb.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index ff784072fb42..93bb384eb001 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -353,7 +353,6 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
/* TODO: Correct this value for our hw */
/* TODO: define these hard code value */
- hw->channel_change_time = 100;
hw->max_listen_interval = 10;
hw->max_rate_tries = 4;
/* hw->max_rates = 1; */
@@ -1293,7 +1292,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
/* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
return;
rtlpriv->link_info.bcn_rx_inperiod++;
@@ -1437,7 +1436,8 @@ void rtl_watchdog_wq_callback(void *data)
/* if we can't recv beacon for 6s, we should
* reconnect this AP
*/
- if (rtlpriv->link_info.roam_times >= 3) {
+ if ((rtlpriv->link_info.roam_times >= 3) &&
+ !is_zero_ether_addr(rtlpriv->mac80211.bssid)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"AP off, try to reconnect now\n");
rtlpriv->link_info.roam_times = 0;
@@ -1780,7 +1780,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
return;
/* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
return;
if (rtl_find_221_ie(hw, data, len))
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 0e510f73041a..0276153c72cc 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -295,7 +295,7 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
/* Does STA already exist? */
for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
- if (memcmp(addr, sta_addr, ETH_ALEN) == 0)
+ if (ether_addr_equal_unaligned(addr, sta_addr))
return i;
}
/* Get a free CAM entry. */
@@ -335,7 +335,7 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> i;
if (((bitmap & BIT(0)) == BIT(0)) &&
- (memcmp(addr, sta_addr, ETH_ALEN) == 0)) {
+ (ether_addr_equal_unaligned(addr, sta_addr))) {
/* Remove from HW Security CAM */
eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]);
rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 210ce7cd94d8..2d337a0c3df0 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -46,10 +46,20 @@ void rtl_fw_cb(const struct firmware *firmware, void *context)
"Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
+ if (rtlpriv->cfg->alt_fw_name) {
+ err = request_firmware(&firmware,
+ rtlpriv->cfg->alt_fw_name,
+ rtlpriv->io.dev);
+ pr_info("Loading alternative firmware %s\n",
+ rtlpriv->cfg->alt_fw_name);
+ if (!err)
+ goto found_alt;
+ }
pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
rtlpriv->max_fw_size = 0;
return;
}
+found_alt:
if (firmware->size > rtlpriv->max_fw_size) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Firmware is too big!\n");
@@ -184,6 +194,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
rtlpriv->cfg->maps
[RTL_IBSS_INT_MASKS]);
}
+ mac->link_state = MAC80211_LINKED;
break;
case NL80211_IFTYPE_ADHOC:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 5a53195d016b..d7aa165fe677 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -688,8 +688,6 @@ static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
rtlpriv->stats.rxbytesunicast += skb->len;
}
- rtl_is_special_data(hw, skb, false);
-
if (ieee80211_is_data(fc)) {
rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 0d81f766fd0f..d1c0191a195b 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
/*<2> Enable Adapter */
if (rtlpriv->cfg->ops->hw_init(hw))
- return 1;
+ return false;
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
/*<3> Enable Interrupt */
@@ -478,7 +478,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
return;
/* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
return;
rtlpriv->psc.last_beacon = jiffies;
@@ -923,7 +923,7 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
return;
/* and only beacons from the associated BSSID, please */
- if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
return;
/* check if this really is a beacon */
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index d7d0d4948b01..a4eb9b271438 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -59,30 +59,26 @@ static struct country_code_to_enum_rd allCountries[] = {
*/
#define RTL819x_2GHZ_CH12_13 \
REG_RULE(2467-10, 2472+10, 40, 0, 20,\
- NL80211_RRF_PASSIVE_SCAN)
+ NL80211_RRF_NO_IR)
#define RTL819x_2GHZ_CH14 \
REG_RULE(2484-10, 2484+10, 40, 0, 20, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_OFDM)
+ NL80211_RRF_NO_IR | NL80211_RRF_NO_OFDM)
/* 5G chan 36 - chan 64*/
#define RTL819x_5GHZ_5150_5350 \
REG_RULE(5150-10, 5350+10, 40, 0, 30, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
/* 5G chan 100 - chan 165*/
#define RTL819x_5GHZ_5470_5850 \
REG_RULE(5470-10, 5850+10, 40, 0, 30, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
/* 5G chan 149 - chan 165*/
#define RTL819x_5GHZ_5725_5850 \
REG_RULE(5725-10, 5850+10, 40, 0, 30, \
- NL80211_RRF_PASSIVE_SCAN | \
- NL80211_RRF_NO_IBSS)
+ NL80211_RRF_NO_IR)
#define RTL819x_5GHZ_ALL \
(RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5470_5850)
@@ -172,7 +168,8 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
(ch->flags & IEEE80211_CHAN_RADAR))
continue;
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ reg_rule = freq_reg_info(wiphy,
+ MHZ_TO_KHZ(ch->center_freq));
if (IS_ERR(reg_rule))
continue;
@@ -185,16 +182,11 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
*regulatory_hint().
*/
- if (!(reg_rule->flags & NL80211_RRF_NO_IBSS))
- ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
- if (!(reg_rule->
- flags & NL80211_RRF_PASSIVE_SCAN))
- ch->flags &=
- ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
} else {
if (ch->beacon_found)
- ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN);
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
}
}
}
@@ -219,11 +211,11 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
*/
if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
ch = &sband->channels[11]; /* CH 12 */
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
ch = &sband->channels[12]; /* CH 13 */
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
return;
}
@@ -235,19 +227,19 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
*/
ch = &sband->channels[11]; /* CH 12 */
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq));
if (!IS_ERR(reg_rule)) {
- if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
}
ch = &sband->channels[12]; /* CH 13 */
- reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq));
if (!IS_ERR(reg_rule)) {
- if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
- if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
}
}
@@ -284,8 +276,7 @@ static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
*/
if (!(ch->flags & IEEE80211_CHAN_DISABLED))
ch->flags |= IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN;
+ IEEE80211_CHAN_NO_IR;
}
}
@@ -354,9 +345,9 @@ static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
wiphy->reg_notifier = reg_notifier;
- wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
- wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY;
- wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS;
+ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+ wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
+ wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
regd = _rtl_regdomain_select(reg);
wiphy_apply_custom_regulatory(wiphy, regd);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
index 21a5cf060677..a6184b6e1d57 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
@@ -1078,7 +1078,7 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
rtldm->swing_flag_ofdm = true;
}
- if (rtldm->swing_idx_cck != rtldm->swing_idx_cck) {
+ if (rtldm->swing_idx_cck_cur != rtldm->swing_idx_cck) {
rtldm->swing_idx_cck_cur = rtldm->swing_idx_cck;
rtldm->swing_flag_cck = true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index e9caa5d4cff0..eb78fd8607f7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -158,6 +158,42 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
};
+static u32 power_index_reg[6] = {0xc90, 0xc91, 0xc92, 0xc98, 0xc99, 0xc9a};
+
+void dm_restorepowerindex(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 index;
+
+ for (index = 0; index < 6; index++)
+ rtl_write_byte(rtlpriv, power_index_reg[index],
+ rtlpriv->dm.powerindex_backup[index]);
+}
+EXPORT_SYMBOL_GPL(dm_restorepowerindex);
+
+void dm_writepowerindex(struct ieee80211_hw *hw, u8 value)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 index;
+
+ for (index = 0; index < 6; index++)
+ rtl_write_byte(rtlpriv, power_index_reg[index], value);
+}
+EXPORT_SYMBOL_GPL(dm_writepowerindex);
+
+void dm_savepowerindex(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 index;
+ u8 tmp;
+
+ for (index = 0; index < 6; index++) {
+ tmp = rtl_read_byte(rtlpriv, power_index_reg[index]);
+ rtlpriv->dm.powerindex_backup[index] = tmp;
+ }
+}
+EXPORT_SYMBOL_GPL(dm_savepowerindex);
+
static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -180,7 +216,12 @@ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
- dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
+ dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi;
+
+ dm_digtable->forbidden_igi = DM_DIG_MIN;
+ dm_digtable->large_fa_hit = 0;
+ dm_digtable->recover_cnt = 0;
+ dm_digtable->dig_dynamic_min = 0x25;
}
static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
@@ -206,7 +247,9 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb;
}
- return (u8) rssi_val_min;
+ if (rssi_val_min > 100)
+ rssi_val_min = 100;
+ return (u8)rssi_val_min;
}
static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
@@ -224,9 +267,17 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
+
+ ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
+ falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
+ falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
+
falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
- falsealm_cnt->cnt_rate_illegal +
- falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_fast_fsync_fail +
+ falsealm_cnt->cnt_sb_search_fail;
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
@@ -271,12 +322,14 @@ static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
value_igi++;
else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
value_igi += 2;
+
if (value_igi > DM_DIG_FA_UPPER)
value_igi = DM_DIG_FA_UPPER;
else if (value_igi < DM_DIG_FA_LOWER)
value_igi = DM_DIG_FA_LOWER;
+
if (rtlpriv->falsealm_cnt.cnt_all > 10000)
- value_igi = 0x32;
+ value_igi = DM_DIG_FA_UPPER;
dm_digtable->cur_igvalue = value_igi;
rtl92c_dm_write_dig(hw);
@@ -286,32 +339,80 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *digtable = &rtlpriv->dm_digtable;
+ u32 isbt;
+
+ /* modify DIG lower bound, deal with abnorally large false alarm */
+ if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
+ digtable->large_fa_hit++;
+ if (digtable->forbidden_igi < digtable->cur_igvalue) {
+ digtable->forbidden_igi = digtable->cur_igvalue;
+ digtable->large_fa_hit = 1;
+ }
- if (rtlpriv->falsealm_cnt.cnt_all > digtable->fa_highthresh) {
- if ((digtable->back_val - 2) < digtable->back_range_min)
- digtable->back_val = digtable->back_range_min;
- else
- digtable->back_val -= 2;
- } else if (rtlpriv->falsealm_cnt.cnt_all < digtable->fa_lowthresh) {
- if ((digtable->back_val + 2) > digtable->back_range_max)
- digtable->back_val = digtable->back_range_max;
- else
- digtable->back_val += 2;
+ if (digtable->large_fa_hit >= 3) {
+ if ((digtable->forbidden_igi + 1) >
+ digtable->rx_gain_max)
+ digtable->rx_gain_min = digtable->rx_gain_max;
+ else
+ digtable->rx_gain_min = (digtable->forbidden_igi + 1);
+ digtable->recover_cnt = 3600; /* 3600=2hr */
+ }
+ } else {
+ /* Recovery mechanism for IGI lower bound */
+ if (digtable->recover_cnt != 0) {
+ digtable->recover_cnt--;
+ } else {
+ if (digtable->large_fa_hit == 0) {
+ if ((digtable->forbidden_igi-1) < DM_DIG_MIN) {
+ digtable->forbidden_igi = DM_DIG_MIN;
+ digtable->rx_gain_min = DM_DIG_MIN;
+ } else {
+ digtable->forbidden_igi--;
+ digtable->rx_gain_min = digtable->forbidden_igi + 1;
+ }
+ } else if (digtable->large_fa_hit == 3) {
+ digtable->large_fa_hit = 0;
+ }
+ }
+ }
+ if (rtlpriv->falsealm_cnt.cnt_all < 250) {
+ isbt = rtl_read_byte(rtlpriv, 0x4fd) & 0x01;
+
+ if (!isbt) {
+ if (rtlpriv->falsealm_cnt.cnt_all >
+ digtable->fa_lowthresh) {
+ if ((digtable->back_val - 2) <
+ digtable->back_range_min)
+ digtable->back_val = digtable->back_range_min;
+ else
+ digtable->back_val -= 2;
+ } else if (rtlpriv->falsealm_cnt.cnt_all <
+ digtable->fa_lowthresh) {
+ if ((digtable->back_val + 2) >
+ digtable->back_range_max)
+ digtable->back_val = digtable->back_range_max;
+ else
+ digtable->back_val += 2;
+ }
+ } else {
+ digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+ }
+ } else {
+ /* Adjust initial gain by false alarm */
+ if (rtlpriv->falsealm_cnt.cnt_all > 1000)
+ digtable->cur_igvalue = digtable->pre_igvalue + 2;
+ else if (rtlpriv->falsealm_cnt.cnt_all > 750)
+ digtable->cur_igvalue = digtable->pre_igvalue + 1;
+ else if (rtlpriv->falsealm_cnt.cnt_all < 500)
+ digtable->cur_igvalue = digtable->pre_igvalue - 1;
}
- if ((digtable->rssi_val_min + 10 - digtable->back_val) >
- digtable->rx_gain_max)
+ /* Check initial gain by upper/lower bound */
+ if (digtable->cur_igvalue > digtable->rx_gain_max)
digtable->cur_igvalue = digtable->rx_gain_max;
- else if ((digtable->rssi_val_min + 10 -
- digtable->back_val) < digtable->rx_gain_min)
- digtable->cur_igvalue = digtable->rx_gain_min;
- else
- digtable->cur_igvalue = digtable->rssi_val_min + 10 -
- digtable->back_val;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "rssi_val_min = %x back_val %x\n",
- digtable->rssi_val_min, digtable->back_val);
+ if (digtable->cur_igvalue < digtable->rx_gain_min)
+ digtable->cur_igvalue = digtable->rx_gain_min;
rtl92c_dm_write_dig(hw);
}
@@ -329,7 +430,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
multi_sta = true;
if (!multi_sta ||
- dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
+ dm_digtable->cursta_cstate == DIG_STA_DISCONNECT) {
initialized = false;
dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
return;
@@ -375,7 +476,6 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
"presta_cstate = %x, cursta_cstate = %x\n",
dm_digtable->presta_cstate, dm_digtable->cursta_cstate);
-
if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
@@ -383,6 +483,8 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
dm_digtable->rssi_val_min =
rtl92c_dm_initial_gain_min_pwdb(hw);
+ if (dm_digtable->rssi_val_min > 100)
+ dm_digtable->rssi_val_min = 100;
rtl92c_dm_ctrl_initgain_by_rssi(hw);
}
} else {
@@ -398,11 +500,12 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
+ if (dm_digtable->rssi_val_min > 100)
+ dm_digtable->rssi_val_min = 100;
if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
if (dm_digtable->rssi_val_min <= 25)
@@ -424,48 +527,14 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
}
if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
- if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
- if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
- dm_digtable->cur_cck_fa_state =
- CCK_FA_STAGE_High;
- else
- dm_digtable->cur_cck_fa_state = CCK_FA_STAGE_Low;
-
- if (dm_digtable->pre_cck_fa_state !=
- dm_digtable->cur_cck_fa_state) {
- if (dm_digtable->cur_cck_fa_state ==
- CCK_FA_STAGE_Low)
- rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
- 0x83);
- else
- rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
- 0xcd);
-
- dm_digtable->pre_cck_fa_state =
- dm_digtable->cur_cck_fa_state;
- }
-
- rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
-
- if (IS_92C_SERIAL(rtlhal->version))
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
- MASKBYTE2, 0xd7);
- } else {
+ if ((dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) ||
+ (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_MAX))
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
+ else
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
- rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
- if (IS_92C_SERIAL(rtlhal->version))
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
- MASKBYTE2, 0xd3);
- }
dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
}
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n",
- dm_digtable->cur_cck_pd_state);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n",
- IS_92C_SERIAL(rtlhal->version));
}
static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
@@ -482,6 +551,8 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
else
dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
+ dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
+
rtl92c_dm_initial_gain_sta(hw);
rtl92c_dm_initial_gain_multi_sta(hw);
rtl92c_dm_cck_packet_detection_thresh(hw);
@@ -493,23 +564,26 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
static void rtl92c_dm_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
if (rtlpriv->dm.dm_initialgain_enable == false)
return;
- if (dm_digtable->dig_enable_flag == false)
+ if (!(rtlpriv->dm.dm_flag & DYNAMIC_FUNC_DIG))
return;
rtl92c_dm_ctrl_initgain_by_twoport(hw);
-
}
static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- rtlpriv->dm.dynamic_txpower_enable = false;
-
+ if (rtlpriv->rtlhal.interface == INTF_USB &&
+ rtlpriv->rtlhal.board_type & 0x1) {
+ dm_savepowerindex(hw);
+ rtlpriv->dm.dynamic_txpower_enable = true;
+ } else {
+ rtlpriv->dm.dynamic_txpower_enable = false;
+ }
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
}
@@ -524,9 +598,14 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
dm_digtable->back_val);
- dm_digtable->cur_igvalue += 2;
- if (dm_digtable->cur_igvalue > 0x3f)
- dm_digtable->cur_igvalue = 0x3f;
+ if (rtlpriv->rtlhal.interface == INTF_USB &&
+ !dm_digtable->dig_enable_flag) {
+ dm_digtable->pre_igvalue = 0x17;
+ return;
+ }
+ dm_digtable->cur_igvalue -= 1;
+ if (dm_digtable->cur_igvalue < DM_DIG_MIN)
+ dm_digtable->cur_igvalue = DM_DIG_MIN;
if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) {
rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
@@ -536,11 +615,47 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
dm_digtable->pre_igvalue = dm_digtable->cur_igvalue;
}
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_WARNING,
+ "dig values 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
+ dm_digtable->rssi_val_min, dm_digtable->back_val,
+ dm_digtable->rx_gain_max, dm_digtable->rx_gain_min,
+ dm_digtable->large_fa_hit, dm_digtable->forbidden_igi);
}
EXPORT_SYMBOL(rtl92c_dm_write_dig);
static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
+
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_AP) {
+ /* TODO: Handle ADHOC and AP Mode */
+ }
+
+ if (tmpentry_max_pwdb != 0)
+ rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
+ else
+ rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
+
+ if (tmpentry_min_pwdb != 0xff)
+ rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
+ else
+ rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
+
+/* TODO:
+ * if (mac->opmode == NL80211_IFTYPE_STATION) {
+ * if (rtlpriv->rtlhal.fw_ready) {
+ * u32 param = (u32)(rtlpriv->dm.undec_sm_pwdb << 16);
+ * rtl8192c_set_rssi_cmd(hw, param);
+ * }
+ * }
+ */
}
void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
@@ -750,6 +865,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
}
+ /* Handle USB High PA boards */
delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
(thermalvalue - rtlpriv->dm.thermalvalue) :
@@ -1140,22 +1256,22 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
- static u8 initialize;
- static u32 reg_874, reg_c70, reg_85c, reg_a74;
- if (initialize == 0) {
- reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
- MASKDWORD) & 0x1CC000) >> 14;
+ if (!rtlpriv->reg_init) {
+ rtlpriv->reg_874 = (rtl_get_bbreg(hw,
+ RFPGA0_XCD_RFINTERFACESW,
+ MASKDWORD) & 0x1CC000) >> 14;
- reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
- MASKDWORD) & BIT(3)) >> 3;
+ rtlpriv->reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
+ MASKDWORD) & BIT(3)) >> 3;
- reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
- MASKDWORD) & 0xFF000000) >> 24;
+ rtlpriv->reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+ MASKDWORD) & 0xFF000000) >> 24;
- reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
+ rtlpriv->reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) &
+ 0xF000) >> 12;
- initialize = 1;
+ rtlpriv->reg_init = true;
}
if (!bforce_in_normal) {
@@ -1192,12 +1308,12 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
} else {
rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
- 0x1CC000, reg_874);
+ 0x1CC000, rtlpriv->reg_874);
rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
- reg_c70);
+ rtlpriv->reg_c70);
rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
- reg_85c);
- rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
+ rtlpriv->reg_85c);
+ rtl_set_bbreg(hw, 0xa74, 0xF000, rtlpriv->reg_a74);
rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
}
@@ -1213,6 +1329,7 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ /* Determine the minimum RSSI */
if (((mac->link_state == MAC80211_NOLINK)) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
dm_pstable->rssi_val_min = 0;
@@ -1241,6 +1358,7 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
dm_pstable->rssi_val_min);
}
+ /* Power Saving for 92C */
if (IS_92C_SERIAL(rtlhal->version))
;/* rtl92c_dm_1r_cca(hw); */
else
@@ -1252,12 +1370,23 @@ void rtl92c_dm_init(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ rtlpriv->dm.dm_flag = DYNAMIC_FUNC_DISABLE | DYNAMIC_FUNC_DIG;
+ rtlpriv->dm.undec_sm_pwdb = -1;
+ rtlpriv->dm.undec_sm_cck = -1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtl92c_dm_diginit(hw);
+
+ rtlpriv->dm.dm_flag |= HAL_DM_HIPWR_DISABLE;
rtl92c_dm_init_dynamic_txpower(hw);
+
rtl92c_dm_init_edca_turbo(hw);
rtl92c_dm_init_rate_adaptive_mask(hw);
+ rtlpriv->dm.dm_flag |= DYNAMIC_FUNC_SS;
rtl92c_dm_initialize_txpower_tracking(hw);
rtl92c_dm_init_dynamic_bb_powersaving(hw);
+
+ rtlpriv->dm.ofdm_pkt_cnt = 0;
+ rtlpriv->dm.dm_rssi_sel = RSSI_DEFAULT;
}
EXPORT_SYMBOL(rtl92c_dm_init);
@@ -1308,7 +1437,7 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
- rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
@@ -1328,8 +1457,16 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
"PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+ if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_NORMAL)
+ dm_restorepowerindex(hw);
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL1)
+ dm_writepowerindex(hw, 0x14);
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL2)
+ dm_writepowerindex(hw, 0x10);
}
-
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
}
@@ -1400,12 +1537,6 @@ u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
else
curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW);
- /* Set Tx Power according to BT status. */
- if (undec_sm_pwdb >= 30)
- curr_bt_rssi_state |= BT_RSSI_STATE_TXPOWER_LOW;
- else if (undec_sm_pwdb < 25)
- curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW);
-
/* Check BT state related to BT_Idle in B/G mode. */
if (undec_sm_pwdb < 15)
curr_bt_rssi_state |= BT_RSSI_STATE_BG_EDCA_LOW;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
index 518e208c0180..4f232a063636 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -91,6 +91,17 @@
#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define DYNAMIC_FUNC_DISABLE 0x0
+#define DYNAMIC_FUNC_DIG BIT(0)
+#define DYNAMIC_FUNC_HP BIT(1)
+#define DYNAMIC_FUNC_SS BIT(2) /*Tx Power Tracking*/
+#define DYNAMIC_FUNC_BT BIT(3)
+#define DYNAMIC_FUNC_ANT_DIV BIT(4)
+
+#define RSSI_CCK 0
+#define RSSI_OFDM 1
+#define RSSI_DEFAULT 2
+
struct swat_t {
u8 failure_cnt;
u8 try_flag;
@@ -167,5 +178,8 @@ void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw);
void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw);
+void dm_savepowerindex(struct ieee80211_hw *hw);
+void dm_writepowerindex(struct ieee80211_hw *hw, u8 value);
+void dm_restorepowerindex(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 0c0e78263a66..9e32ac8a4425 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -1147,6 +1147,12 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
0x522, 0x550, 0x551, 0x040
};
+ u32 iqk_bb_reg_92C[9] = {
+ 0xc04, 0xc08, 0x874, 0xb68,
+ 0xb6c, 0x870, 0x860, 0x864,
+ 0x800
+ };
+
const u32 retrycount = 2;
if (t == 0) {
@@ -1157,6 +1163,8 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
rtlphy->adda_backup, 16);
_rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
rtlphy->iqk_mac_backup);
+ _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg_92C,
+ rtlphy->iqk_bb_backup, 9);
}
_rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
if (t == 0) {
@@ -1167,14 +1175,18 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
if (!rtlphy->rfpi_enable)
_rtl92c_phy_pi_mode_switch(hw, true);
- if (t == 0) {
- rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
- rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
- rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
- }
+
+ rtl_set_bbreg(hw, 0x800, BIT(24), 0x0);
+
rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
+
+ rtl_set_bbreg(hw, 0x870, BIT(10), 0x1);
+ rtl_set_bbreg(hw, 0x870, BIT(26), 0x1);
+ rtl_set_bbreg(hw, 0x860, BIT(10), 0x0);
+ rtl_set_bbreg(hw, 0x864, BIT(10), 0x0);
+
if (is2t) {
rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
@@ -1239,13 +1251,9 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
0x3FF0000) >> 16;
}
}
- rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
- rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
- rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
+
rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
- rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
- if (is2t)
- rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
+
if (t != 0) {
if (!rtlphy->rfpi_enable)
_rtl92c_phy_pi_mode_switch(hw, false);
@@ -1253,6 +1261,15 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
rtlphy->adda_backup, 16);
_rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
rtlphy->iqk_mac_backup);
+ _rtl92c_phy_reload_adda_registers(hw, iqk_bb_reg_92C,
+ rtlphy->iqk_bb_backup, 9);
+
+ rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
+ if (is2t)
+ rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
+
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index a82b30a1996c..2eb0b38384dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
bool is92c;
int err;
u8 tmp_u1b;
+ unsigned long flags;
rtlpci->being_init_adapter = true;
+
+ /* Since this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
+
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl92ce_init_mac(hw);
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
err = 1;
- return err;
+ goto exit;
}
err = rtl92c_download_fw(hw);
@@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now..\n");
err = 1;
- return err;
+ goto exit;
}
rtlhal->last_hmeboxnum = 0;
@@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
}
rtl92c_dm_init(hw);
+exit:
+ local_irq_restore(flags);
rtlpci->being_init_adapter = false;
return err;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
index 16a0b9e59acf..c16209a336ea 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -101,6 +101,15 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
"PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+ if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_NORMAL)
+ dm_restorepowerindex(hw);
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL1)
+ dm_writepowerindex(hw, 0x14);
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL2)
+ dm_writepowerindex(hw, 0x10);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
index d947e7d350bb..fafa6bac2a3f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
@@ -30,3 +30,6 @@
#include "../rtl8192ce/dm.h"
void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw);
+void dm_savepowerindex(struct ieee80211_hw *hw);
+void dm_writepowerindex(struct ieee80211_hw *hw, u8 value);
+void dm_restorepowerindex(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 189ba124a8c6..468bf73cc883 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1022,7 +1022,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
if (ppsc->rfpwr_state == ERFON) {
rtl92c_phy_set_rfpath_switch(hw, 1);
if (iqk_initialized) {
- rtl92c_phy_iq_calibrate(hw, false);
+ rtl92c_phy_iq_calibrate(hw, true);
} else {
rtl92c_phy_iq_calibrate(hw, false);
iqk_initialized = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 34e56308301e..0c09240eadcc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -120,6 +120,7 @@ bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u16 regval;
+ u32 regval32;
u8 b_reg_hwparafile = 1;
_rtl92c_phy_init_bb_rf_register_definition(hw);
@@ -135,8 +136,11 @@ bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
} else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) {
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
FEN_BB_GLB_RSTn | FEN_BBRSTB);
- rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
}
+ regval32 = rtl_read_dword(rtlpriv, 0x87c);
+ rtl_write_dword(rtlpriv, 0x87c, regval32 & (~BIT(31)));
+ if (IS_HARDWARE_TYPE_8192CU(rtlhal))
+ rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
if (b_reg_hwparafile == 1)
rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 2119313a737b..b878d56d2f4d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -85,17 +85,15 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
if (mac->act_scanning) {
tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
- if (turbo_scanoff) {
- for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
- tx_agc[idx1] = ppowerlevel[idx1] |
- (ppowerlevel[idx1] << 8) |
- (ppowerlevel[idx1] << 16) |
- (ppowerlevel[idx1] << 24);
- if (rtlhal->interface == INTF_USB) {
- if (tx_agc[idx1] > 0x20 &&
- rtlefuse->external_pa)
- tx_agc[idx1] = 0x20;
- }
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ if (rtlhal->interface == INTF_USB) {
+ if (tx_agc[idx1] > 0x20 &&
+ rtlefuse->external_pa)
+ tx_agc[idx1] = 0x20;
}
}
} else {
@@ -107,7 +105,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
TXHIGHPWRLEVEL_LEVEL2) {
tx_agc[RF90_PATH_A] = 0x00000000;
tx_agc[RF90_PATH_B] = 0x00000000;
- } else{
+ } else {
for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
tx_agc[idx1] = ppowerlevel[idx1] |
(ppowerlevel[idx1] << 8) |
@@ -373,7 +371,12 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
regoffset == RTXAGC_B_MCS07_MCS04)
regoffset = 0xc98;
for (i = 0; i < 3; i++) {
- writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
+ if (i != 2)
+ writeVal = (writeVal > 8) ?
+ (writeVal - 8) : 0;
+ else
+ writeVal = (writeVal > 6) ?
+ (writeVal - 6) : 0;
rtl_write_byte(rtlpriv, (u32)(regoffset + i),
(u8)writeVal);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 9936de716ad5..c61311084d7e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -50,6 +50,9 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
{
@@ -69,14 +72,21 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
"Can't alloc buffer for fw\n");
return 1;
}
-
+ if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
+ !IS_92C_SERIAL(rtlpriv->rtlhal.version)) {
+ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin";
+ } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) {
+ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin";
+ } else {
+ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+ }
+ /* provide name of alternative file */
+ rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
rtlpriv->max_fw_size = 0x4000;
err = request_firmware_nowait(THIS_MODULE, 1,
rtlpriv->cfg->fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
-
-
return err;
}
@@ -307,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
{RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
/* HP - Lite-On ,8188CUS Slim Combo */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
index 966be519edb8..7903c154de00 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
@@ -36,7 +36,7 @@ u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
0x804, 0x00000003,
0x808, 0x0000fc00,
0x80c, 0x0000000a,
- 0x810, 0x10005388,
+ 0x810, 0x10000330,
0x814, 0x020c3d10,
0x818, 0x02200385,
0x81c, 0x00000000,
@@ -110,22 +110,22 @@ u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
0xc44, 0x000100b7,
0xc48, 0xec020107,
0xc4c, 0x007f037f,
- 0xc50, 0x6954341e,
+ 0xc50, 0x69543420,
0xc54, 0x43bc0094,
- 0xc58, 0x6954341e,
+ 0xc58, 0x69543420,
0xc5c, 0x433c0094,
0xc60, 0x00000000,
0xc64, 0x5116848b,
0xc68, 0x47c00bff,
0xc6c, 0x00000036,
0xc70, 0x2c7f000d,
- 0xc74, 0x0186115b,
+ 0xc74, 0x2186115b,
0xc78, 0x0000001f,
0xc7c, 0x00b99612,
0xc80, 0x40000100,
0xc84, 0x20f60000,
0xc88, 0x40000100,
- 0xc8c, 0x20200000,
+ 0xc8c, 0xa0e40000,
0xc90, 0x00121820,
0xc94, 0x00000000,
0xc98, 0x00121820,
@@ -226,7 +226,7 @@ u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
0x804, 0x00000001,
0x808, 0x0000fc00,
0x80c, 0x0000000a,
- 0x810, 0x10005388,
+ 0x810, 0x10000330,
0x814, 0x020c3d10,
0x818, 0x02200385,
0x81c, 0x00000000,
@@ -300,9 +300,9 @@ u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
0xc44, 0x000100b7,
0xc48, 0xec020107,
0xc4c, 0x007f037f,
- 0xc50, 0x6954341e,
+ 0xc50, 0x69543420,
0xc54, 0x43bc0094,
- 0xc58, 0x6954341e,
+ 0xc58, 0x69543420,
0xc5c, 0x433c0094,
0xc60, 0x00000000,
0xc64, 0x5116848b,
@@ -340,7 +340,7 @@ u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
0xce4, 0x00000000,
0xce8, 0x37644302,
0xcec, 0x2f97d40c,
- 0xd00, 0x00080740,
+ 0xd00, 0x00000740,
0xd04, 0x00020401,
0xd08, 0x0000907f,
0xd0c, 0x20010201,
@@ -633,17 +633,17 @@ u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH] = {
0x012, 0x00071000,
0x012, 0x000b0000,
0x012, 0x000fc000,
- 0x013, 0x000287af,
+ 0x013, 0x000287b3,
0x013, 0x000244b7,
0x013, 0x000204ab,
0x013, 0x0001c49f,
0x013, 0x00018493,
- 0x013, 0x00014297,
- 0x013, 0x00010295,
- 0x013, 0x0000c298,
- 0x013, 0x0000819c,
- 0x013, 0x000040a8,
- 0x013, 0x0000001c,
+ 0x013, 0x0001429b,
+ 0x013, 0x00010299,
+ 0x013, 0x0000c29c,
+ 0x013, 0x000081a0,
+ 0x013, 0x000040ac,
+ 0x013, 0x00000020,
0x014, 0x0001944c,
0x014, 0x00059444,
0x014, 0x0009944c,
@@ -932,10 +932,10 @@ u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH] = {
0x608, 0x0000000e,
0x609, 0x0000002a,
0x652, 0x00000020,
- 0x63c, 0x0000000a,
- 0x63d, 0x0000000e,
- 0x63e, 0x0000000a,
- 0x63f, 0x0000000e,
+ 0x63c, 0x00000008,
+ 0x63d, 0x00000008,
+ 0x63e, 0x0000000c,
+ 0x63f, 0x0000000c,
0x66e, 0x00000005,
0x700, 0x00000021,
0x701, 0x00000043,
diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/rtlwifi/stats.c
index 8ed31744a054..4f083fc1d360 100644
--- a/drivers/net/wireless/rtlwifi/stats.c
+++ b/drivers/net/wireless/rtlwifi/stats.c
@@ -176,6 +176,7 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
struct rtl_sta_info *drv_priv = NULL;
struct ieee80211_sta *sta = NULL;
long undec_sm_pwdb;
+ long undec_sm_cck;
rcu_read_lock();
if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
@@ -185,12 +186,16 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
if (sta) {
drv_priv = (struct rtl_sta_info *) sta->drv_priv;
undec_sm_pwdb = drv_priv->rssi_stat.undec_sm_pwdb;
+ undec_sm_cck = drv_priv->rssi_stat.undec_sm_cck;
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+ undec_sm_cck = rtlpriv->dm.undec_sm_cck;
}
if (undec_sm_pwdb < 0)
undec_sm_pwdb = pstatus->rx_pwdb_all;
+ if (undec_sm_cck < 0)
+ undec_sm_cck = pstatus->rx_pwdb_all;
if (pstatus->rx_pwdb_all > (u32) undec_sm_pwdb) {
undec_sm_pwdb = (((undec_sm_pwdb) *
(RX_SMOOTH_FACTOR - 1)) +
@@ -200,6 +205,15 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) +
(pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
}
+ if (pstatus->rx_pwdb_all > (u32) undec_sm_cck) {
+ undec_sm_cck = (((undec_sm_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ undec_sm_cck = undec_sm_cck + 1;
+ } else {
+ undec_sm_pwdb = (((undec_sm_cck) * (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ }
if (sta) {
drv_priv->rssi_stat.undec_sm_pwdb = undec_sm_pwdb;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 6e2b5c5c83c8..4933f02ce1d5 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -475,14 +475,14 @@ static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
rtlpriv->stats.rxbytesunicast += skb->len;
}
- rtl_is_special_data(hw, skb, false);
-
if (ieee80211_is_data(fc)) {
rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
if (unicast)
rtlpriv->link_info.num_rx_inperiod++;
}
+ /* static bcn for roaming */
+ rtl_beacon_statistic(hw, skb);
}
}
@@ -517,8 +517,6 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
rtlpriv->stats.rxbytesunicast += skb->len;
}
- rtl_is_special_data(hw, skb, false);
-
if (ieee80211_is_data(fc)) {
rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
@@ -553,7 +551,7 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
}
}
-#define __RX_SKB_MAX_QUEUED 32
+#define __RX_SKB_MAX_QUEUED 64
static void _rtl_rx_work(unsigned long param)
{
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 0c65386fa30d..8c647391bedf 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1033,6 +1033,7 @@ struct rtl_ht_agg {
struct rssi_sta {
long undec_sm_pwdb;
+ long undec_sm_cck;
};
struct rtl_tid_data {
@@ -1323,8 +1324,10 @@ struct fast_ant_training {
struct rtl_dm {
/*PHY status for Dynamic Management */
long entry_min_undec_sm_pwdb;
+ long undec_sm_cck;
long undec_sm_pwdb; /*out dm */
long entry_max_undec_sm_pwdb;
+ s32 ofdm_pkt_cnt;
bool dm_initialgain_enable;
bool dynamic_txpower_enable;
bool current_turbo_edca;
@@ -1339,6 +1342,7 @@ struct rtl_dm {
bool inform_fw_driverctrldm;
bool current_mrc_switch;
u8 txpowercount;
+ u8 powerindex_backup[6];
u8 thermalvalue_rxgain;
u8 thermalvalue_iqk;
@@ -1350,7 +1354,9 @@ struct rtl_dm {
bool done_txpower;
u8 dynamic_txhighpower_lvl; /*Tx high power level */
u8 dm_flag; /*Indicate each dynamic mechanism's status. */
+ u8 dm_flag_tmp;
u8 dm_type;
+ u8 dm_rssi_sel;
u8 txpower_track_control;
bool interrupt_migration;
bool disable_tx_int;
@@ -1804,6 +1810,7 @@ struct rtl_hal_cfg {
bool write_readback;
char *name;
char *fw_name;
+ char *alt_fw_name;
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
@@ -1948,6 +1955,7 @@ struct dig_t {
u8 pre_ccastate;
u8 cur_ccasate;
u8 large_fa_hit;
+ u8 dig_dynamic_min;
u8 forbidden_igi;
u8 dig_state;
u8 dig_highpwrstate;
@@ -2028,22 +2036,15 @@ struct rtl_priv {
struct dig_t dm_digtable;
struct ps_t dm_pstable;
- /* section shared by individual drivers */
- union {
- struct { /* data buffer pointer for USB reads */
- __le32 *usb_data;
- int usb_data_index;
- bool initialized;
- };
- struct { /* section for 8723ae */
- bool reg_init; /* true if regs saved */
- u32 reg_874;
- u32 reg_c70;
- u32 reg_85c;
- u32 reg_a74;
- bool bt_operation_on;
- };
- };
+ u32 reg_874;
+ u32 reg_c70;
+ u32 reg_85c;
+ u32 reg_a74;
+ bool reg_init; /* true if regs saved */
+ bool bt_operation_on;
+ __le32 *usb_data;
+ int usb_data_index;
+ bool initialized;
bool enter_ps; /* true when entering PS */
u8 rate_mask[5];
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index db6430c1a084..5a4ec56c83d0 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -18,10 +18,8 @@ int wl1251_acx_frame_rates(struct wl1251 *wl, u8 ctrl_rate, u8 ctrl_mod,
wl1251_debug(DEBUG_ACX, "acx frame rates");
rates = kzalloc(sizeof(*rates), GFP_KERNEL);
- if (!rates) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rates)
+ return -ENOMEM;
rates->tx_ctrl_frame_rate = ctrl_rate;
rates->tx_ctrl_frame_mod = ctrl_mod;
@@ -49,10 +47,8 @@ int wl1251_acx_station_id(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx dot11_station_id");
mac = kzalloc(sizeof(*mac), GFP_KERNEL);
- if (!mac) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!mac)
+ return -ENOMEM;
for (i = 0; i < ETH_ALEN; i++)
mac->mac[i] = wl->mac_addr[ETH_ALEN - 1 - i];
@@ -74,10 +70,8 @@ int wl1251_acx_default_key(struct wl1251 *wl, u8 key_id)
wl1251_debug(DEBUG_ACX, "acx dot11_default_key (%d)", key_id);
default_key = kzalloc(sizeof(*default_key), GFP_KERNEL);
- if (!default_key) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!default_key)
+ return -ENOMEM;
default_key->id = key_id;
@@ -104,10 +98,8 @@ int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event,
wl1251_debug(DEBUG_ACX, "acx wake up conditions");
wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL);
- if (!wake_up) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!wake_up)
+ return -ENOMEM;
wake_up->wake_up_event = wake_up_event;
wake_up->listen_interval = listen_interval;
@@ -132,16 +124,13 @@ int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth)
wl1251_debug(DEBUG_ACX, "acx sleep auth");
auth = kzalloc(sizeof(*auth), GFP_KERNEL);
- if (!auth) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!auth)
+ return -ENOMEM;
auth->sleep_auth = sleep_auth;
ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
-out:
kfree(auth);
return ret;
}
@@ -154,10 +143,8 @@ int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len)
wl1251_debug(DEBUG_ACX, "acx fw rev");
rev = kzalloc(sizeof(*rev), GFP_KERNEL);
- if (!rev) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rev)
+ return -ENOMEM;
ret = wl1251_cmd_interrogate(wl, ACX_FW_REV, rev, sizeof(*rev));
if (ret < 0) {
@@ -191,10 +178,8 @@ int wl1251_acx_tx_power(struct wl1251 *wl, int power)
return -EINVAL;
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->current_tx_power = power * 10;
@@ -209,7 +194,7 @@ out:
return ret;
}
-int wl1251_acx_feature_cfg(struct wl1251 *wl)
+int wl1251_acx_feature_cfg(struct wl1251 *wl, u32 data_flow_options)
{
struct acx_feature_config *feature;
int ret;
@@ -217,13 +202,11 @@ int wl1251_acx_feature_cfg(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx feature cfg");
feature = kzalloc(sizeof(*feature), GFP_KERNEL);
- if (!feature) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!feature)
+ return -ENOMEM;
- /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
- feature->data_flow_options = 0;
+ /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE can be set */
+ feature->data_flow_options = data_flow_options;
feature->options = 0;
ret = wl1251_cmd_configure(wl, ACX_FEATURE_CFG,
@@ -261,10 +244,8 @@ int wl1251_acx_data_path_params(struct wl1251 *wl,
wl1251_debug(DEBUG_ACX, "acx data path params");
params = kzalloc(sizeof(*params), GFP_KERNEL);
- if (!params) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!params)
+ return -ENOMEM;
params->rx_packet_ring_chunk_size = DP_RX_PACKET_RING_CHUNK_SIZE;
params->tx_packet_ring_chunk_size = DP_TX_PACKET_RING_CHUNK_SIZE;
@@ -309,10 +290,8 @@ int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time)
wl1251_debug(DEBUG_ACX, "acx rx msdu life time");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->lifetime = life_time;
ret = wl1251_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME,
@@ -335,10 +314,8 @@ int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter)
wl1251_debug(DEBUG_ACX, "acx rx config");
rx_config = kzalloc(sizeof(*rx_config), GFP_KERNEL);
- if (!rx_config) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rx_config)
+ return -ENOMEM;
rx_config->config_options = config;
rx_config->filter_options = filter;
@@ -363,10 +340,8 @@ int wl1251_acx_pd_threshold(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx data pd threshold");
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!pd)
+ return -ENOMEM;
/* FIXME: threshold value not set */
@@ -389,10 +364,8 @@ int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time)
wl1251_debug(DEBUG_ACX, "acx slot");
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!slot)
+ return -ENOMEM;
slot->wone_index = STATION_WONE_INDEX;
slot->slot_time = slot_time;
@@ -408,7 +381,8 @@ out:
return ret;
}
-int wl1251_acx_group_address_tbl(struct wl1251 *wl)
+int wl1251_acx_group_address_tbl(struct wl1251 *wl, bool enable,
+ void *mc_list, u32 mc_list_len)
{
struct acx_dot11_grp_addr_tbl *acx;
int ret;
@@ -416,15 +390,13 @@ int wl1251_acx_group_address_tbl(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx group address tbl");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
/* MAC filtering */
- acx->enabled = 0;
- acx->num_groups = 0;
- memset(acx->mac_table, 0, ADDRESS_GROUP_MAX_LEN);
+ acx->enabled = enable;
+ acx->num_groups = mc_list_len;
+ memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
ret = wl1251_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL,
acx, sizeof(*acx));
@@ -444,10 +416,8 @@ int wl1251_acx_service_period_timeout(struct wl1251 *wl)
int ret;
rx_timeout = kzalloc(sizeof(*rx_timeout), GFP_KERNEL);
- if (!rx_timeout) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rx_timeout)
+ return -ENOMEM;
wl1251_debug(DEBUG_ACX, "acx service period timeout");
@@ -475,10 +445,8 @@ int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold)
wl1251_debug(DEBUG_ACX, "acx rts threshold");
rts = kzalloc(sizeof(*rts), GFP_KERNEL);
- if (!rts) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rts)
+ return -ENOMEM;
rts->threshold = rts_threshold;
@@ -501,10 +469,8 @@ int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter)
wl1251_debug(DEBUG_ACX, "acx beacon filter opt");
beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL);
- if (!beacon_filter) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!beacon_filter)
+ return -ENOMEM;
beacon_filter->enable = enable_filter;
beacon_filter->max_num_beacons = 0;
@@ -530,10 +496,8 @@ int wl1251_acx_beacon_filter_table(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx beacon filter table");
ie_table = kzalloc(sizeof(*ie_table), GFP_KERNEL);
- if (!ie_table) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!ie_table)
+ return -ENOMEM;
/* configure default beacon pass-through rules */
ie_table->num_ie = 1;
@@ -560,10 +524,8 @@ int wl1251_acx_conn_monit_params(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx connection monitor parameters");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->synch_fail_thold = SYNCH_FAIL_DEFAULT_THRESHOLD;
acx->bss_lose_timeout = NO_BEACON_DEFAULT_TIMEOUT;
@@ -589,10 +551,8 @@ int wl1251_acx_sg_enable(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx sg enable");
pta = kzalloc(sizeof(*pta), GFP_KERNEL);
- if (!pta) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!pta)
+ return -ENOMEM;
pta->enable = SG_ENABLE;
@@ -615,10 +575,8 @@ int wl1251_acx_sg_cfg(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx sg cfg");
param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (!param) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!param)
+ return -ENOMEM;
/* BT-WLAN coext parameters */
param->min_rate = RATE_INDEX_24MBPS;
@@ -669,10 +627,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx cca threshold");
detection = kzalloc(sizeof(*detection), GFP_KERNEL);
- if (!detection) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!detection)
+ return -ENOMEM;
detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D;
detection->tx_energy_detection = 0;
@@ -682,7 +638,6 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl)
if (ret < 0)
wl1251_warning("failed to set cca threshold: %d", ret);
-out:
kfree(detection);
return ret;
}
@@ -695,10 +650,8 @@ int wl1251_acx_bcn_dtim_options(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx bcn dtim options");
bb = kzalloc(sizeof(*bb), GFP_KERNEL);
- if (!bb) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!bb)
+ return -ENOMEM;
bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE;
bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE;
@@ -724,10 +677,8 @@ int wl1251_acx_aid(struct wl1251 *wl, u16 aid)
wl1251_debug(DEBUG_ACX, "acx aid");
acx_aid = kzalloc(sizeof(*acx_aid), GFP_KERNEL);
- if (!acx_aid) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx_aid)
+ return -ENOMEM;
acx_aid->aid = aid;
@@ -750,10 +701,8 @@ int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask)
wl1251_debug(DEBUG_ACX, "acx event mbox mask");
mask = kzalloc(sizeof(*mask), GFP_KERNEL);
- if (!mask) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!mask)
+ return -ENOMEM;
/* high event mask is unused */
mask->high_event_mask = 0xffffffff;
@@ -805,10 +754,8 @@ int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble)
wl1251_debug(DEBUG_ACX, "acx_set_preamble");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->preamble = preamble;
@@ -832,10 +779,8 @@ int wl1251_acx_cts_protect(struct wl1251 *wl,
wl1251_debug(DEBUG_ACX, "acx_set_ctsprotect");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->ctsprotect = ctsprotect;
@@ -856,10 +801,8 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime)
int ret;
tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL);
- if (!tsf_info) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!tsf_info)
+ return -ENOMEM;
ret = wl1251_cmd_interrogate(wl, ACX_TSF_INFO,
tsf_info, sizeof(*tsf_info));
@@ -900,19 +843,22 @@ int wl1251_acx_rate_policies(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx rate policies");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
/* configure one default (one-size-fits-all) rate class */
- acx->rate_class_cnt = 1;
+ acx->rate_class_cnt = 2;
acx->rate_class[0].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT;
acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT;
acx->rate_class[0].aflags = 0;
+ /* no-retry rate class */
+ acx->rate_class[1].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
+ acx->rate_class[1].short_retry_limit = 0;
+ acx->rate_class[1].long_retry_limit = 0;
+ acx->rate_class[1].aflags = 0;
+
ret = wl1251_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("Setting of rate policies failed: %d", ret);
@@ -932,10 +878,8 @@ int wl1251_acx_mem_cfg(struct wl1251 *wl)
wl1251_debug(DEBUG_ACX, "acx mem cfg");
mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
- if (!mem_conf) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!mem_conf)
+ return -ENOMEM;
/* memory config */
mem_conf->mem_config.num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS);
@@ -979,10 +923,8 @@ int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim)
wl1251_debug(DEBUG_ACX, "acx tbtt and dtim");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->tbtt = tbtt;
acx->dtim = dtim;
@@ -1008,10 +950,8 @@ int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
wl1251_debug(DEBUG_ACX, "acx bet enable");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->enable = mode;
acx->max_consecutive = max_consecutive;
@@ -1027,6 +967,32 @@ out:
return ret;
}
+int wl1251_acx_arp_ip_filter(struct wl1251 *wl, bool enable, __be32 address)
+{
+ struct wl1251_acx_arp_filter *acx;
+ int ret;
+
+ wl1251_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ acx->version = ACX_IPV4_VERSION;
+ acx->enable = enable;
+
+ if (enable)
+ memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE);
+
+ ret = wl1251_cmd_configure(wl, ACX_ARP_IP_FILTER,
+ acx, sizeof(*acx));
+ if (ret < 0)
+ wl1251_warning("failed to set arp ip filter: %d", ret);
+
+ kfree(acx);
+ return ret;
+}
+
int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifs, u16 txop)
{
@@ -1037,11 +1003,8 @@ int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
"aifs %d txop %d", ac, cw_min, cw_max, aifs, txop);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->ac = ac;
acx->cw_min = cw_min;
@@ -1073,11 +1036,8 @@ int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
ps_scheme, ack_policy);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-
- if (!acx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!acx)
+ return -ENOMEM;
acx->queue = queue;
acx->type = type;
diff --git a/drivers/net/wireless/ti/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h
index c2ba100f9b1a..2bdec38699f4 100644
--- a/drivers/net/wireless/ti/wl1251/acx.h
+++ b/drivers/net/wireless/ti/wl1251/acx.h
@@ -350,8 +350,8 @@ struct acx_slot {
} __packed;
-#define ADDRESS_GROUP_MAX (8)
-#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ADDRESS_GROUP_MAX)
+#define ACX_MC_ADDRESS_GROUP_MAX (8)
+#define ACX_MC_ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX)
struct acx_dot11_grp_addr_tbl {
struct acx_header header;
@@ -359,7 +359,7 @@ struct acx_dot11_grp_addr_tbl {
u8 enabled;
u8 num_groups;
u8 pad[2];
- u8 mac_table[ADDRESS_GROUP_MAX_LEN];
+ u8 mac_table[ACX_MC_ADDRESS_GROUP_MAX_LEN];
} __packed;
@@ -1232,6 +1232,20 @@ struct wl1251_acx_bet_enable {
u8 padding[2];
} __packed;
+#define ACX_IPV4_VERSION 4
+#define ACX_IPV6_VERSION 6
+#define ACX_IPV4_ADDR_SIZE 4
+struct wl1251_acx_arp_filter {
+ struct acx_header header;
+ u8 version; /* The IP version: 4 - IPv4, 6 - IPv6.*/
+ u8 enable; /* 1 - ARP filtering is enabled, 0 - disabled */
+ u8 padding[2];
+ u8 address[16]; /* The IP address used to filter ARP packets.
+ ARP packets that do not match this address are
+ dropped. When the IP Version is 4, the last 12
+ bytes of the the address are ignored. */
+} __attribute__((packed));
+
struct wl1251_acx_ac_cfg {
struct acx_header header;
@@ -1440,7 +1454,7 @@ int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event,
int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth);
int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len);
int wl1251_acx_tx_power(struct wl1251 *wl, int power);
-int wl1251_acx_feature_cfg(struct wl1251 *wl);
+int wl1251_acx_feature_cfg(struct wl1251 *wl, u32 data_flow_options);
int wl1251_acx_mem_map(struct wl1251 *wl,
struct acx_header *mem_map, size_t len);
int wl1251_acx_data_path_params(struct wl1251 *wl,
@@ -1449,7 +1463,8 @@ int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time);
int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter);
int wl1251_acx_pd_threshold(struct wl1251 *wl);
int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time);
-int wl1251_acx_group_address_tbl(struct wl1251 *wl);
+int wl1251_acx_group_address_tbl(struct wl1251 *wl, bool enable,
+ void *mc_list, u32 mc_list_len);
int wl1251_acx_service_period_timeout(struct wl1251 *wl);
int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold);
int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter);
@@ -1473,6 +1488,7 @@ int wl1251_acx_mem_cfg(struct wl1251 *wl);
int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
u8 max_consecutive);
+int wl1251_acx_arp_ip_filter(struct wl1251 *wl, bool enable, __be32 address);
int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifs, u16 txop);
int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
diff --git a/drivers/net/wireless/ti/wl1251/boot.c b/drivers/net/wireless/ti/wl1251/boot.c
index a2e5241382da..2000cd536077 100644
--- a/drivers/net/wireless/ti/wl1251/boot.c
+++ b/drivers/net/wireless/ti/wl1251/boot.c
@@ -299,7 +299,8 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
ROAMING_TRIGGER_LOW_RSSI_EVENT_ID |
ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID |
REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID |
- BT_PTA_PREDICTION_EVENT_ID | JOIN_EVENT_COMPLETE_ID;
+ BT_PTA_PREDICTION_EVENT_ID | JOIN_EVENT_COMPLETE_ID |
+ PS_REPORT_EVENT_ID;
ret = wl1251_event_unmask(wl);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index 6822b845efc1..223649bcaa5a 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -3,6 +3,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/crc7.h>
+#include <linux/etherdevice.h>
#include "wl1251.h"
#include "reg.h"
@@ -203,11 +204,11 @@ out:
return ret;
}
-int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
+int wl1251_cmd_data_path_rx(struct wl1251 *wl, u8 channel, bool enable)
{
struct cmd_enabledisable_path *cmd;
int ret;
- u16 cmd_rx, cmd_tx;
+ u16 cmd_rx;
wl1251_debug(DEBUG_CMD, "cmd data path");
@@ -219,13 +220,10 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
cmd->channel = channel;
- if (enable) {
+ if (enable)
cmd_rx = CMD_ENABLE_RX;
- cmd_tx = CMD_ENABLE_TX;
- } else {
+ else
cmd_rx = CMD_DISABLE_RX;
- cmd_tx = CMD_DISABLE_TX;
- }
ret = wl1251_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd));
if (ret < 0) {
@@ -237,17 +235,38 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
wl1251_debug(DEBUG_BOOT, "rx %s cmd channel %d",
enable ? "start" : "stop", channel);
+out:
+ kfree(cmd);
+ return ret;
+}
+
+int wl1251_cmd_data_path_tx(struct wl1251 *wl, u8 channel, bool enable)
+{
+ struct cmd_enabledisable_path *cmd;
+ int ret;
+ u16 cmd_tx;
+
+ wl1251_debug(DEBUG_CMD, "cmd data path");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->channel = channel;
+
+ if (enable)
+ cmd_tx = CMD_ENABLE_TX;
+ else
+ cmd_tx = CMD_DISABLE_TX;
+
ret = wl1251_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd));
- if (ret < 0) {
+ if (ret < 0)
wl1251_error("tx %s cmd for channel %d failed",
enable ? "start" : "stop", channel);
- goto out;
- }
-
- wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
- enable ? "start" : "stop", channel);
+ else
+ wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
+ enable ? "start" : "stop", channel);
-out:
kfree(cmd);
return ret;
}
@@ -410,7 +429,9 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
struct wl1251_cmd_scan *cmd;
int i, ret = 0;
- wl1251_debug(DEBUG_CMD, "cmd scan");
+ wl1251_debug(DEBUG_CMD, "cmd scan channels %d", n_channels);
+
+ WARN_ON(n_channels > SCAN_MAX_NUM_OF_CHANNELS);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
@@ -421,6 +442,13 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
CFG_RX_MGMT_EN |
CFG_RX_BCN_EN);
cmd->params.scan_options = 0;
+ /*
+ * Use high priority scan when not associated to prevent fw issue
+ * causing never-ending scans (sometimes 20+ minutes).
+ * Note: This bug may be caused by the fw's DTIM handling.
+ */
+ if (is_zero_ether_addr(wl->bssid))
+ cmd->params.scan_options |= WL1251_SCAN_OPT_PRIORITY_HIGH;
cmd->params.num_channels = n_channels;
cmd->params.num_probe_requests = n_probes;
cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
diff --git a/drivers/net/wireless/ti/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h
index ee4f2b391822..d824ff978311 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.h
+++ b/drivers/net/wireless/ti/wl1251/cmd.h
@@ -35,7 +35,8 @@ int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len);
int wl1251_cmd_configure(struct wl1251 *wl, u16 id, void *buf, size_t len);
int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity,
void *bitmap, u16 bitmap_len, u8 bitmap_control);
-int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable);
+int wl1251_cmd_data_path_rx(struct wl1251 *wl, u8 channel, bool enable);
+int wl1251_cmd_data_path_tx(struct wl1251 *wl, u8 channel, bool enable);
int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
u16 beacon_interval, u8 dtim_interval);
int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode);
@@ -167,6 +168,11 @@ struct cmd_read_write_memory {
#define CMDMBOX_HEADER_LEN 4
#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
+#define WL1251_SCAN_OPT_PASSIVE 1
+#define WL1251_SCAN_OPT_5GHZ_BAND 2
+#define WL1251_SCAN_OPT_TRIGGERD_SCAN 4
+#define WL1251_SCAN_OPT_PRIORITY_HIGH 8
+
#define WL1251_SCAN_MIN_DURATION 30000
#define WL1251_SCAN_MAX_DURATION 60000
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index 74ae8e1c2e33..db0105313745 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -46,6 +46,43 @@ static int wl1251_event_scan_complete(struct wl1251 *wl,
return ret;
}
+#define WL1251_PSM_ENTRY_RETRIES 3
+static int wl1251_event_ps_report(struct wl1251 *wl,
+ struct event_mailbox *mbox)
+{
+ int ret = 0;
+
+ wl1251_debug(DEBUG_EVENT, "ps status: %x", mbox->ps_status);
+
+ switch (mbox->ps_status) {
+ case EVENT_ENTER_POWER_SAVE_FAIL:
+ wl1251_debug(DEBUG_PSM, "PSM entry failed");
+
+ if (wl->station_mode != STATION_POWER_SAVE_MODE) {
+ /* remain in active mode */
+ wl->psm_entry_retry = 0;
+ break;
+ }
+
+ if (wl->psm_entry_retry < WL1251_PSM_ENTRY_RETRIES) {
+ wl->psm_entry_retry++;
+ ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ } else {
+ wl1251_error("Power save entry failed, giving up");
+ wl->psm_entry_retry = 0;
+ }
+ break;
+ case EVENT_ENTER_POWER_SAVE_SUCCESS:
+ case EVENT_EXIT_POWER_SAVE_FAIL:
+ case EVENT_EXIT_POWER_SAVE_SUCCESS:
+ default:
+ wl->psm_entry_retry = 0;
+ break;
+ }
+
+ return 0;
+}
+
static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
{
wl1251_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -80,7 +117,14 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
}
}
- if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
+ if (vector & PS_REPORT_EVENT_ID) {
+ wl1251_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
+ ret = wl1251_event_ps_report(wl, mbox);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (wl->vif && vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
/* indicate to the stack, that beacons have been lost */
diff --git a/drivers/net/wireless/ti/wl1251/event.h b/drivers/net/wireless/ti/wl1251/event.h
index 30eb5d150bf7..88570a5cd042 100644
--- a/drivers/net/wireless/ti/wl1251/event.h
+++ b/drivers/net/wireless/ti/wl1251/event.h
@@ -112,6 +112,13 @@ struct event_mailbox {
u8 padding[19];
} __packed;
+enum {
+ EVENT_ENTER_POWER_SAVE_FAIL = 0,
+ EVENT_ENTER_POWER_SAVE_SUCCESS,
+ EVENT_EXIT_POWER_SAVE_FAIL,
+ EVENT_EXIT_POWER_SAVE_SUCCESS,
+};
+
int wl1251_event_unmask(struct wl1251 *wl);
void wl1251_event_mbox_config(struct wl1251 *wl);
int wl1251_event_handle(struct wl1251 *wl, u8 mbox);
diff --git a/drivers/net/wireless/ti/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
index 89b43d35473c..1d799bffaa9f 100644
--- a/drivers/net/wireless/ti/wl1251/init.c
+++ b/drivers/net/wireless/ti/wl1251/init.c
@@ -33,7 +33,7 @@ int wl1251_hw_init_hwenc_config(struct wl1251 *wl)
{
int ret;
- ret = wl1251_acx_feature_cfg(wl);
+ ret = wl1251_acx_feature_cfg(wl, 0);
if (ret < 0) {
wl1251_warning("couldn't set feature config");
return ret;
@@ -127,7 +127,7 @@ int wl1251_hw_init_phy_config(struct wl1251 *wl)
if (ret < 0)
return ret;
- ret = wl1251_acx_group_address_tbl(wl);
+ ret = wl1251_acx_group_address_tbl(wl, true, NULL, 0);
if (ret < 0)
return ret;
@@ -394,8 +394,13 @@ int wl1251_hw_init(struct wl1251 *wl)
if (ret < 0)
goto out_free_data_path;
- /* Enable data path */
- ret = wl1251_cmd_data_path(wl, wl->channel, 1);
+ /* Enable rx data path */
+ ret = wl1251_cmd_data_path_rx(wl, wl->channel, 1);
+ if (ret < 0)
+ goto out_free_data_path;
+
+ /* Enable tx data path */
+ ret = wl1251_cmd_data_path_tx(wl, wl->channel, 1);
if (ret < 0)
goto out_free_data_path;
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 3291ffa95273..757e25784a8a 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -28,6 +28,7 @@
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/netdevice.h>
#include "wl1251.h"
#include "wl12xx_80211.h"
@@ -479,10 +480,13 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
wl->next_tx_complete = 0;
wl->elp = false;
wl->station_mode = STATION_ACTIVE_MODE;
+ wl->psm_entry_retry = 0;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
wl->rssi_thold = 0;
wl->channel = WL1251_DEFAULT_CHANNEL;
+ wl->monitor_present = false;
+ wl->joined = false;
wl1251_debugfs_reset(wl);
@@ -521,7 +525,7 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
+ if (!ether_addr_equal_unaligned(wl->mac_addr, vif->addr)) {
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = wl1251_acx_station_id(wl);
@@ -542,6 +546,7 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface");
wl->vif = NULL;
+ memset(wl->bssid, 0, ETH_ALEN);
mutex_unlock(&wl->mutex);
}
@@ -566,6 +571,11 @@ static int wl1251_build_qos_null_data(struct wl1251 *wl)
sizeof(template));
}
+static bool wl1251_can_do_pm(struct ieee80211_conf *conf, struct wl1251 *wl)
+{
+ return (conf->flags & IEEE80211_CONF_PS) && !wl->monitor_present;
+}
+
static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1251 *wl = hw->priv;
@@ -575,8 +585,10 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
channel = ieee80211_frequency_to_channel(
conf->chandef.chan->center_freq);
- wl1251_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d",
+ wl1251_debug(DEBUG_MAC80211,
+ "mac80211 config ch %d monitor %s psm %s power %d",
channel,
+ conf->flags & IEEE80211_CONF_MONITOR ? "on" : "off",
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
conf->power_level);
@@ -586,16 +598,44 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
if (ret < 0)
goto out;
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ u32 mode;
+
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ wl->monitor_present = true;
+ mode = DF_SNIFF_MODE_ENABLE | DF_ENCRYPTION_DISABLE;
+ } else {
+ wl->monitor_present = false;
+ mode = 0;
+ }
+
+ ret = wl1251_acx_feature_cfg(wl, mode);
+ if (ret < 0)
+ goto out_sleep;
+ }
+
if (channel != wl->channel) {
wl->channel = channel;
- ret = wl1251_join(wl, wl->bss_type, wl->channel,
- wl->beacon_int, wl->dtim_period);
+ /*
+ * Use ENABLE_RX command for channel switching when no
+ * interface is present (monitor mode only).
+ * This leaves the tx path disabled in firmware, whereas
+ * the usual JOIN command seems to transmit some frames
+ * at firmware level.
+ */
+ if (wl->vif == NULL) {
+ wl->joined = false;
+ ret = wl1251_cmd_data_path_rx(wl, wl->channel, 1);
+ } else {
+ ret = wl1251_join(wl, wl->bss_type, wl->channel,
+ wl->beacon_int, wl->dtim_period);
+ }
if (ret < 0)
goto out_sleep;
}
- if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
+ if (wl1251_can_do_pm(conf, wl) && !wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm enabled");
wl->psm_requested = true;
@@ -611,8 +651,7 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
goto out_sleep;
- } else if (!(conf->flags & IEEE80211_CONF_PS) &&
- wl->psm_requested) {
+ } else if (!wl1251_can_do_pm(conf, wl) && wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm disabled");
wl->psm_requested = false;
@@ -648,6 +687,16 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
wl->power_level = conf->power_level;
}
+ /*
+ * Tell stack that connection is lost because hw encryption isn't
+ * supported in monitor mode.
+ * This requires temporary enabling of the hw connection monitor flag
+ */
+ if ((changed & IEEE80211_CONF_CHANGE_MONITOR) && wl->vif) {
+ wl->hw->flags |= IEEE80211_HW_CONNECTION_MONITOR;
+ ieee80211_connection_loss(wl->vif);
+ }
+
out_sleep:
wl1251_ps_elp_sleep(wl);
@@ -657,6 +706,44 @@ out:
return ret;
}
+struct wl1251_filter_params {
+ bool enabled;
+ int mc_list_length;
+ u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
+};
+
+static u64 wl1251_op_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct wl1251_filter_params *fp;
+ struct netdev_hw_addr *ha;
+ struct wl1251 *wl = hw->priv;
+
+ if (unlikely(wl->state == WL1251_STATE_OFF))
+ return 0;
+
+ fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
+ if (!fp) {
+ wl1251_error("Out of memory setting filters.");
+ return 0;
+ }
+
+ /* update multicast filtering parameters */
+ fp->mc_list_length = 0;
+ if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
+ fp->enabled = false;
+ } else {
+ fp->enabled = true;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ memcpy(fp->mc_list[fp->mc_list_length],
+ ha->addr, ETH_ALEN);
+ fp->mc_list_length++;
+ }
+ }
+
+ return (u64)(unsigned long)fp;
+}
+
#define WL1251_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
FIF_ALLMULTI | \
FIF_FCSFAIL | \
@@ -667,8 +754,9 @@ out:
static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed,
- unsigned int *total,u64 multicast)
+ unsigned int *total, u64 multicast)
{
+ struct wl1251_filter_params *fp = (void *)(unsigned long)multicast;
struct wl1251 *wl = hw->priv;
int ret;
@@ -677,9 +765,11 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
*total &= WL1251_SUPPORTED_FILTERS;
changed &= WL1251_SUPPORTED_FILTERS;
- if (changed == 0)
+ if (changed == 0) {
/* no filters which we support changed */
+ kfree(fp);
return;
+ }
mutex_lock(&wl->mutex);
@@ -716,6 +806,15 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if (*total & FIF_ALLMULTI || *total & FIF_PROMISC_IN_BSS)
+ ret = wl1251_acx_group_address_tbl(wl, false, NULL, 0);
+ else if (fp)
+ ret = wl1251_acx_group_address_tbl(wl, fp->enabled,
+ fp->mc_list,
+ fp->mc_list_length);
+ if (ret < 0)
+ goto out;
+
/* send filters to firmware */
wl1251_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
@@ -723,6 +822,7 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
out:
mutex_unlock(&wl->mutex);
+ kfree(fp);
}
/* HW encryption */
@@ -802,12 +902,12 @@ static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&wl->mutex);
- ret = wl1251_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out_unlock;
-
switch (cmd) {
case SET_KEY:
+ if (wl->monitor_present) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
wl_cmd->key_action = KEY_ADD_OR_REPLACE;
break;
case DISABLE_KEY:
@@ -818,6 +918,10 @@ static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
}
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_unlock;
+
ret = wl1251_set_key_type(wl, wl_cmd, cmd, key, addr);
if (ret < 0) {
wl1251_error("Set KEY type failed");
@@ -930,6 +1034,7 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
req->n_channels, WL1251_SCAN_NUM_PROBES);
if (ret < 0) {
+ wl1251_debug(DEBUG_SCAN, "scan failed %d", ret);
wl->scanning = false;
goto out_idle;
}
@@ -977,6 +1082,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
{
struct wl1251 *wl = hw->priv;
struct sk_buff *beacon, *skb;
+ bool enable;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1023,6 +1129,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
+ /* Disable temporary enabled hw connection monitor flag */
+ wl->hw->flags &= ~IEEE80211_HW_CONNECTION_MONITOR;
+
if (bss_conf->assoc) {
wl->beacon_int = bss_conf->beacon_int;
@@ -1075,6 +1184,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ __be32 addr = bss_conf->arp_addr_list[0];
+ WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+
+ enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
+ wl1251_acx_arp_ip_filter(wl, enable, addr);
+
+ if (ret < 0)
+ goto out_sleep;
+ }
+
if (changed & BSS_CHANGED_BEACON) {
beacon = ieee80211_beacon_get(hw, vif);
if (!beacon)
@@ -1245,6 +1365,7 @@ static const struct ieee80211_ops wl1251_ops = {
.add_interface = wl1251_op_add_interface,
.remove_interface = wl1251_op_remove_interface,
.config = wl1251_op_config,
+ .prepare_multicast = wl1251_op_prepare_multicast,
.configure_filter = wl1251_op_configure_filter,
.tx = wl1251_op_tx,
.set_key = wl1251_op_set_key,
@@ -1347,7 +1468,6 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
/* unit us */
/* FIXME: find a proper value */
- wl->hw->channel_change_time = 10000;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
@@ -1401,7 +1521,10 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
wl->channel = WL1251_DEFAULT_CHANNEL;
+ wl->monitor_present = false;
+ wl->joined = false;
wl->scanning = false;
+ wl->bss_type = MAX_BSS_TYPE;
wl->default_key = 0;
wl->listen_int = 1;
wl->rx_counter = 0;
@@ -1413,6 +1536,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->elp = false;
wl->station_mode = STATION_ACTIVE_MODE;
wl->psm_requested = false;
+ wl->psm_entry_retry = 0;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
wl->rssi_thold = 0;
@@ -1478,3 +1602,4 @@ MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
MODULE_FIRMWARE(WL1251_FW_NAME);
+MODULE_FIRMWARE(WL1251_NVS_NAME);
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index 23289d49dd31..cde0eaf99714 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -83,7 +83,7 @@ static void wl1251_rx_status(struct wl1251 *wl,
status->flag |= RX_FLAG_MACTIME_START;
- if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
+ if (!wl->monitor_present && (desc->flags & RX_DESC_ENCRYPTION_MASK)) {
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
if (likely(!(desc->flags & RX_DESC_DECRYPT_FAIL)))
@@ -180,7 +180,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length);
/* The actual length doesn't include the target's alignment */
- skb->len = desc->length - PLCP_HEADER_LENGTH;
+ skb_trim(skb, desc->length - PLCP_HEADER_LENGTH);
fc = (u16 *)skb->data;
diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c
index 28121c590a2b..81de83c6fcf6 100644
--- a/drivers/net/wireless/ti/wl1251/tx.c
+++ b/drivers/net/wireless/ti/wl1251/tx.c
@@ -28,6 +28,7 @@
#include "tx.h"
#include "ps.h"
#include "io.h"
+#include "event.h"
static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
{
@@ -89,8 +90,12 @@ static void wl1251_tx_control(struct tx_double_buffer_desc *tx_hdr,
/* 802.11 packets */
tx_hdr->control.packet_type = 0;
- if (control->flags & IEEE80211_TX_CTL_NO_ACK)
+ /* Also disable retry and ACK policy for injected packets */
+ if ((control->flags & IEEE80211_TX_CTL_NO_ACK) ||
+ (control->flags & IEEE80211_TX_CTL_INJECTED)) {
+ tx_hdr->control.rate_policy = 1;
tx_hdr->control.ack_policy = 1;
+ }
tx_hdr->control.tx_complete = 1;
@@ -277,6 +282,26 @@ static void wl1251_tx_trigger(struct wl1251 *wl)
TX_STATUS_DATA_OUT_COUNT_MASK;
}
+static void enable_tx_for_packet_injection(struct wl1251 *wl)
+{
+ int ret;
+
+ ret = wl1251_cmd_join(wl, BSS_TYPE_STA_BSS, wl->channel,
+ wl->beacon_int, wl->dtim_period);
+ if (ret < 0) {
+ wl1251_warning("join failed");
+ return;
+ }
+
+ ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
+ if (ret < 0) {
+ wl1251_warning("join timeout");
+ return;
+ }
+
+ wl->joined = true;
+}
+
/* caller must hold wl->mutex */
static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
{
@@ -287,6 +312,9 @@ static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
if (info->control.hw_key) {
+ if (unlikely(wl->monitor_present))
+ return -EINVAL;
+
idx = info->control.hw_key->hw_key_idx;
if (unlikely(wl->default_key != idx)) {
ret = wl1251_acx_default_key(wl, idx);
@@ -295,6 +323,10 @@ static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
}
}
+ /* Enable tx path in monitor mode for packet injection */
+ if ((wl->vif == NULL) && !wl->joined)
+ enable_tx_for_packet_injection(wl);
+
ret = wl1251_tx_path_status(wl);
if (ret < 0)
return ret;
@@ -394,6 +426,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
info = IEEE80211_SKB_CB(skb);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
+ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
(result->status == TX_SUCCESS))
info->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 2c3bd1bff3f6..235617a7716d 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -93,6 +93,7 @@ enum {
} while (0)
#define WL1251_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \
+ CFG_MC_FILTER_EN | \
CFG_BSSID_FILTER_EN)
#define WL1251_DEFAULT_RX_FILTER (CFG_RX_PRSP_EN | \
@@ -303,6 +304,8 @@ struct wl1251 {
u8 bss_type;
u8 listen_int;
int channel;
+ bool monitor_present;
+ bool joined;
void *target_mem_map;
struct acx_data_path_params_resp *data_path;
@@ -368,6 +371,9 @@ struct wl1251 {
/* PSM mode requested */
bool psm_requested;
+ /* retry counter for PSM entries */
+ u8 psm_entry_retry;
+
u16 beacon_int;
u8 dtim_period;
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index 4a0bbb13806b..7541bd1a4a4b 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -47,7 +47,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
* In active scans, we only scan channels not
* marked as passive.
*/
- (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) {
+ (passive || !(flags & IEEE80211_CHAN_NO_IR))) {
wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
req->channels[i]->band,
req->channels[i]->center_freq);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 34d9dfff2ad3..9b2ecf52449f 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1688,7 +1688,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
if (channel->flags & (IEEE80211_CHAN_DISABLED |
IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_PASSIVE_SCAN))
+ IEEE80211_CHAN_NO_IR))
continue;
ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 0368b9cbfb89..b46b3116cc55 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -91,8 +91,7 @@ static void wl1271_reg_notify(struct wiphy *wiphy,
continue;
if (ch->flags & IEEE80211_CHAN_RADAR)
- ch->flags |= IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_PASSIVE_SCAN;
+ ch->flags |= IEEE80211_CHAN_NO_IR;
}
@@ -4458,6 +4457,16 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if ((changed & BSS_CHANGED_TXPOWER) &&
+ bss_conf->txpower != wlvif->power_level) {
+
+ ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
+ if (ret < 0)
+ goto out;
+
+ wlvif->power_level = bss_conf->txpower;
+ }
+
if (is_ap)
wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
else
@@ -5711,7 +5720,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
/* unit us */
/* FIXME: find a proper value */
- wl->hw->channel_change_time = 10000;
wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 7ed86203304b..1e3d51cd673a 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -188,16 +188,14 @@ wlcore_scan_get_channels(struct wl1271 *wl,
flags = req_channels[i]->flags;
if (force_passive)
- flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+ flags |= IEEE80211_CHAN_NO_IR;
if ((req_channels[i]->band == band) &&
!(flags & IEEE80211_CHAN_DISABLED) &&
(!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
/* if radar is set, we ignore the passive flag */
(radar ||
- !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
-
-
+ !!(flags & IEEE80211_CHAN_NO_IR) == passive)) {
if (flags & IEEE80211_CHAN_RADAR) {
channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
@@ -220,7 +218,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
(band == IEEE80211_BAND_2GHZ) &&
(channels[j].channel >= 12) &&
(channels[j].channel <= 14) &&
- (flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ (flags & IEEE80211_CHAN_NO_IR) &&
!force_passive) {
/* pactive channels treated as DFS */
channels[j].flags = SCAN_CHANNEL_FLAGS_DFS;
@@ -243,8 +241,8 @@ wlcore_scan_get_channels(struct wl1271 *wl,
max_dwell_time_active,
flags & IEEE80211_CHAN_RADAR ?
", DFS" : "",
- flags & IEEE80211_CHAN_PASSIVE_SCAN ?
- ", PASSIVE" : "");
+ flags & IEEE80211_CHAN_NO_IR ?
+ ", NO-IR" : "");
j++;
}
}
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 38d2089f338a..d24d4a958c67 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/kernel.h>
@@ -44,6 +43,7 @@
#include <linux/string.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
#include <net/iw_handler.h>
@@ -673,8 +673,7 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
matchflag = 1;
if (matchflag) {
for (i = 0; i < this->bss_cnt; i++) {
- if (!memcmp(this->bss_set[i].bssid,
- sig.bssid, ETH_ALEN)) {
+ if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
matchflag = 0;
break;
}
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 71ab320fae82..73a49b868035 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* This file implements all the hardware specific functions for the ZD1211
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 7ab922209b25..b03786c9f3aa 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ZD_CHIP_H
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index 9a1b013f81be..41bd755bc135 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ZD_DEF_H
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index c6208a7988e4..e7af261e9198 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -16,8 +16,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/netdevice.h>
@@ -533,9 +532,8 @@ void zd_mac_tx_failed(struct urb *urb)
tx_hdr = (struct ieee80211_hdr *)skb->data;
/* we skip all frames not matching the reported destination */
- if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) {
+ if (unlikely(!ether_addr_equal(tx_hdr->addr1, tx_status->mac)))
continue;
- }
/* we skip all frames not matching the reported final rate */
@@ -998,7 +996,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
continue;
tx_hdr = (struct ieee80211_hdr *)skb->data;
- if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
+ if (likely(ether_addr_equal(tx_hdr->addr2, rx_hdr->addr1)))
{
found = 1;
break;
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index c01eca859f95..5a484235308f 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ZD_MAC_H
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
index c875ee05e22e..dc179c414518 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/errno.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index 725b7c99b23d..8f14e25e1041 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ZD_RF_H
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
index 12babcb633c3..99aed7d78952 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
index 385c670d1293..5fea485be574 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
index 784d9ccb8fef..a93f657a41c7 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
index c4d324e19c24..61b924027356 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 84d94f572a46..a912dc051111 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -15,8 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 45e3bb28a01c..a9075f225178 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ZD_USB_H
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index c47794b9d42f..ae413a2cbee7 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -143,12 +143,7 @@ struct xenvif {
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
-
- /* Allow xenvif_start_xmit() to peek ahead in the rx request
- * ring. This is a prediction of what rx_req_cons will be
- * once all queued skbs are put on the ring.
- */
- RING_IDX rx_req_cons_peek;
+ RING_IDX rx_last_skb_slots;
/* This array is allocated seperately as it is large */
struct gnttab_copy *grant_copy_op;
@@ -205,8 +200,6 @@ void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif);
-int xenvif_rx_ring_full(struct xenvif *vif);
-
int xenvif_must_stop_queue(struct xenvif *vif);
/* (Un)Map communication rings. */
@@ -218,21 +211,20 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
/* Check for SKBs from frontend and schedule backend processing */
void xenvif_check_rx_xenvif(struct xenvif *vif);
-/* Queue an SKB for transmission to the frontend */
-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
-/* Notify xenvif that ring now has space to send an skb to the frontend */
-void xenvif_notify_tx_completion(struct xenvif *vif);
-
/* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif);
-/* Returns number of ring slots required to send an skb to the frontend */
-unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
-
int xenvif_tx_action(struct xenvif *vif, int budget);
-void xenvif_rx_action(struct xenvif *vif);
int xenvif_kthread(void *data);
+void xenvif_kick_thread(struct xenvif *vif);
+
+/* Determine whether the needed number of slots (req) are available,
+ * and set req_event if not.
+ */
+bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
+
+void xenvif_stop_queue(struct xenvif *vif);
extern bool separate_tx_rx_irq;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index fff8cddfed81..301cc037fda8 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -47,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif)
return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
}
-static int xenvif_rx_schedulable(struct xenvif *vif)
-{
- return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
-}
-
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
@@ -105,8 +100,7 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
- if (xenvif_rx_schedulable(vif))
- netif_wake_queue(vif->dev);
+ xenvif_kick_thread(vif);
return IRQ_HANDLED;
}
@@ -122,24 +116,34 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
+ int min_slots_needed;
BUG_ON(skb->dev != dev);
/* Drop the packet if vif is not ready */
- if (vif->task == NULL)
+ if (vif->task == NULL || !xenvif_schedulable(vif))
goto drop;
- /* Drop the packet if the target domain has no receive buffers. */
- if (!xenvif_rx_schedulable(vif))
- goto drop;
+ /* At best we'll need one slot for the header and one for each
+ * frag.
+ */
+ min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
- /* Reserve ring slots for the worst-case number of fragments. */
- vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
+ /* If the skb is GSO then we'll also need an extra slot for the
+ * metadata.
+ */
+ if (skb_is_gso(skb))
+ min_slots_needed++;
- if (vif->can_queue && xenvif_must_stop_queue(vif))
- netif_stop_queue(dev);
+ /* If the skb can't possibly fit in the remaining slots
+ * then turn off the queue to give the ring a chance to
+ * drain.
+ */
+ if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
+ xenvif_stop_queue(vif);
- xenvif_queue_tx_skb(vif, skb);
+ skb_queue_tail(&vif->rx_queue, skb);
+ xenvif_kick_thread(vif);
return NETDEV_TX_OK;
@@ -149,12 +153,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-void xenvif_notify_tx_completion(struct xenvif *vif)
-{
- if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
- netif_wake_queue(vif->dev);
-}
-
static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
@@ -388,6 +386,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
if (err < 0)
goto err;
+ init_waitqueue_head(&vif->wq);
+
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
@@ -420,7 +420,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
disable_irq(vif->rx_irq);
}
- init_waitqueue_head(&vif->wq);
task = kthread_create(xenvif_kthread,
(void *)vif, "%s", vif->dev->name);
if (IS_ERR(task)) {
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 78425554a537..438d0c09b7e6 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -39,7 +39,6 @@
#include <linux/udp.h>
#include <net/tcp.h>
-#include <net/ip6_checksum.h>
#include <xen/xen.h>
#include <xen/events.h>
@@ -138,36 +137,26 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
vif->pending_prod + vif->pending_cons;
}
-static int max_required_rx_slots(struct xenvif *vif)
+bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
{
- int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+ RING_IDX prod, cons;
- /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
- max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
-
- return max;
-}
+ do {
+ prod = vif->rx.sring->req_prod;
+ cons = vif->rx.req_cons;
-int xenvif_rx_ring_full(struct xenvif *vif)
-{
- RING_IDX peek = vif->rx_req_cons_peek;
- RING_IDX needed = max_required_rx_slots(vif);
+ if (prod - cons >= needed)
+ return true;
- return ((vif->rx.sring->req_prod - peek) < needed) ||
- ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
-}
+ vif->rx.sring->req_event = prod + 1;
-int xenvif_must_stop_queue(struct xenvif *vif)
-{
- if (!xenvif_rx_ring_full(vif))
- return 0;
-
- vif->rx.sring->req_event = vif->rx_req_cons_peek +
- max_required_rx_slots(vif);
- mb(); /* request notification /then/ check the queue */
+ /* Make sure event is visible before we check prod
+ * again.
+ */
+ mb();
+ } while (vif->rx.sring->req_prod != prod);
- return xenvif_rx_ring_full(vif);
+ return false;
}
/*
@@ -210,93 +199,6 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
return false;
}
-struct xenvif_count_slot_state {
- unsigned long copy_off;
- bool head;
-};
-
-unsigned int xenvif_count_frag_slots(struct xenvif *vif,
- unsigned long offset, unsigned long size,
- struct xenvif_count_slot_state *state)
-{
- unsigned count = 0;
-
- offset &= ~PAGE_MASK;
-
- while (size > 0) {
- unsigned long bytes;
-
- bytes = PAGE_SIZE - offset;
-
- if (bytes > size)
- bytes = size;
-
- if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
- count++;
- state->copy_off = 0;
- }
-
- if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
- bytes = MAX_BUFFER_OFFSET - state->copy_off;
-
- state->copy_off += bytes;
-
- offset += bytes;
- size -= bytes;
-
- if (offset == PAGE_SIZE)
- offset = 0;
-
- state->head = false;
- }
-
- return count;
-}
-
-/*
- * Figure out how many ring slots we're going to need to send @skb to
- * the guest. This function is essentially a dry run of
- * xenvif_gop_frag_copy.
- */
-unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
-{
- struct xenvif_count_slot_state state;
- unsigned int count;
- unsigned char *data;
- unsigned i;
-
- state.head = true;
- state.copy_off = 0;
-
- /* Slot for the first (partial) page of data. */
- count = 1;
-
- /* Need a slot for the GSO prefix for GSO extra data? */
- if (skb_shinfo(skb)->gso_size)
- count++;
-
- data = skb->data;
- while (data < skb_tail_pointer(skb)) {
- unsigned long offset = offset_in_page(data);
- unsigned long size = PAGE_SIZE - offset;
-
- if (data + size > skb_tail_pointer(skb))
- size = skb_tail_pointer(skb) - data;
-
- count += xenvif_count_frag_slots(vif, offset, size, &state);
-
- data += size;
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
-
- count += xenvif_count_frag_slots(vif, offset, size, &state);
- }
- return count;
-}
-
struct netrx_pending_operations {
unsigned copy_prod, copy_cons;
unsigned meta_prod, meta_cons;
@@ -338,7 +240,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
unsigned long bytes;
- int gso_type;
+ int gso_type = XEN_NETIF_GSO_TYPE_NONE;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -397,12 +299,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
}
/* Leave a gap for the GSO descriptor. */
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- else
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ }
if (*head && ((1 << gso_type) & vif->gso_mask))
vif->rx.req_cons++;
@@ -436,19 +338,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1;
int old_meta_prod;
int gso_type;
- int gso_size;
old_meta_prod = npo->meta_prod;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- gso_size = skb_shinfo(skb)->gso_size;
- } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- gso_size = skb_shinfo(skb)->gso_size;
- } else {
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
- gso_size = 0;
+ gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
}
/* Set up a GSO prefix descriptor, if necessary */
@@ -456,7 +354,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
- meta->gso_size = gso_size;
+ meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
meta->id = req->id;
}
@@ -466,7 +364,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
if ((1 << gso_type) & vif->gso_mask) {
meta->gso_type = gso_type;
- meta->gso_size = gso_size;
+ meta->gso_size = skb_shinfo(skb)->gso_size;
} else {
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
@@ -557,12 +455,12 @@ struct skb_cb_overlay {
int meta_slots_used;
};
-static void xenvif_kick_thread(struct xenvif *vif)
+void xenvif_kick_thread(struct xenvif *vif)
{
wake_up(&vif->wq);
}
-void xenvif_rx_action(struct xenvif *vif)
+static void xenvif_rx_action(struct xenvif *vif)
{
s8 status;
u16 flags;
@@ -571,11 +469,9 @@ void xenvif_rx_action(struct xenvif *vif)
struct sk_buff *skb;
LIST_HEAD(notify);
int ret;
- int nr_frags;
- int count;
unsigned long offset;
struct skb_cb_overlay *sco;
- int need_to_notify = 0;
+ bool need_to_notify = false;
struct netrx_pending_operations npo = {
.copy = vif->grant_copy_op,
@@ -584,29 +480,47 @@ void xenvif_rx_action(struct xenvif *vif)
skb_queue_head_init(&rxq);
- count = 0;
-
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
- vif = netdev_priv(skb->dev);
- nr_frags = skb_shinfo(skb)->nr_frags;
+ RING_IDX max_slots_needed;
+ int i;
+
+ /* We need a cheap worse case estimate for the number of
+ * slots we'll use.
+ */
+
+ max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
+ skb_headlen(skb),
+ PAGE_SIZE);
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ unsigned int size;
+ size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+ }
+ if (skb_is_gso(skb) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
+ max_slots_needed++;
+
+ /* If the skb may not fit then bail out now */
+ if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
+ skb_queue_head(&vif->rx_queue, skb);
+ need_to_notify = true;
+ vif->rx_last_skb_slots = max_slots_needed;
+ break;
+ } else
+ vif->rx_last_skb_slots = 0;
sco = (struct skb_cb_overlay *)skb->cb;
sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
-
- count += nr_frags + 1;
+ BUG_ON(sco->meta_slots_used > max_slots_needed);
__skb_queue_tail(&rxq, skb);
-
- /* Filled the batch queue? */
- /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
- break;
}
BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
if (!npo.copy_prod)
- return;
+ goto done;
BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
@@ -614,8 +528,6 @@ void xenvif_rx_action(struct xenvif *vif)
while ((skb = __skb_dequeue(&rxq)) != NULL) {
sco = (struct skb_cb_overlay *)skb->cb;
- vif = netdev_priv(skb->dev);
-
if ((1 << vif->meta[npo.meta_cons].gso_type) &
vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&vif->rx,
@@ -678,28 +590,15 @@ void xenvif_rx_action(struct xenvif *vif)
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
- if (ret)
- need_to_notify = 1;
-
- xenvif_notify_tx_completion(vif);
+ need_to_notify |= !!ret;
npo.meta_cons += sco->meta_slots_used;
dev_kfree_skb(skb);
}
+done:
if (need_to_notify)
notify_remote_via_irq(vif->rx_irq);
-
- /* More work to do? */
- if (!skb_queue_empty(&vif->rx_queue))
- xenvif_kick_thread(vif);
-}
-
-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
-{
- skb_queue_tail(&vif->rx_queue, skb);
-
- xenvif_kick_thread(vif);
}
void xenvif_check_rx_xenvif(struct xenvif *vif)
@@ -1141,265 +1040,14 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
-
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ /* gso_segs will be calculated later */
return 0;
}
-static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
- unsigned int max)
-{
- if (skb_headlen(skb) >= len)
- return 0;
-
- /* If we need to pullup then pullup to the max, so we
- * won't need to do it again.
- */
- if (max > skb->len)
- max = skb->len;
-
- if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
- return -ENOMEM;
-
- if (skb_headlen(skb) < len)
- return -EPROTO;
-
- return 0;
-}
-
-/* This value should be large enough to cover a tagged ethernet header plus
- * maximally sized IP and TCP or UDP headers.
- */
-#define MAX_IP_HDR_LEN 128
-
-static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
- int recalculate_partial_csum)
-{
- unsigned int off;
- bool fragment;
- int err;
-
- fragment = false;
-
- err = maybe_pull_tail(skb,
- sizeof(struct iphdr),
- MAX_IP_HDR_LEN);
- if (err < 0)
- goto out;
-
- if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
- fragment = true;
-
- off = ip_hdrlen(skb);
-
- err = -EPROTO;
-
- if (fragment)
- goto out;
-
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- err = maybe_pull_tail(skb,
- off + sizeof(struct tcphdr),
- MAX_IP_HDR_LEN);
- if (err < 0)
- goto out;
-
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct tcphdr, check))) {
- err = -EPROTO;
- goto out;
- }
-
- if (recalculate_partial_csum)
- tcp_hdr(skb)->check =
- ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr,
- skb->len - off,
- IPPROTO_TCP, 0);
- break;
- case IPPROTO_UDP:
- err = maybe_pull_tail(skb,
- off + sizeof(struct udphdr),
- MAX_IP_HDR_LEN);
- if (err < 0)
- goto out;
-
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct udphdr, check))) {
- err = -EPROTO;
- goto out;
- }
-
- if (recalculate_partial_csum)
- udp_hdr(skb)->check =
- ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr,
- skb->len - off,
- IPPROTO_UDP, 0);
- break;
- default:
- goto out;
- }
-
- err = 0;
-
-out:
- return err;
-}
-
-/* This value should be large enough to cover a tagged ethernet header plus
- * an IPv6 header, all options, and a maximal TCP or UDP header.
- */
-#define MAX_IPV6_HDR_LEN 256
-
-#define OPT_HDR(type, skb, off) \
- (type *)(skb_network_header(skb) + (off))
-
-static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
- int recalculate_partial_csum)
-{
- int err;
- u8 nexthdr;
- unsigned int off;
- unsigned int len;
- bool fragment;
- bool done;
-
- fragment = false;
- done = false;
-
- off = sizeof(struct ipv6hdr);
-
- err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- nexthdr = ipv6_hdr(skb)->nexthdr;
-
- len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
- while (off <= len && !done) {
- switch (nexthdr) {
- case IPPROTO_DSTOPTS:
- case IPPROTO_HOPOPTS:
- case IPPROTO_ROUTING: {
- struct ipv6_opt_hdr *hp;
-
- err = maybe_pull_tail(skb,
- off +
- sizeof(struct ipv6_opt_hdr),
- MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
- nexthdr = hp->nexthdr;
- off += ipv6_optlen(hp);
- break;
- }
- case IPPROTO_AH: {
- struct ip_auth_hdr *hp;
-
- err = maybe_pull_tail(skb,
- off +
- sizeof(struct ip_auth_hdr),
- MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- hp = OPT_HDR(struct ip_auth_hdr, skb, off);
- nexthdr = hp->nexthdr;
- off += ipv6_authlen(hp);
- break;
- }
- case IPPROTO_FRAGMENT: {
- struct frag_hdr *hp;
-
- err = maybe_pull_tail(skb,
- off +
- sizeof(struct frag_hdr),
- MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- hp = OPT_HDR(struct frag_hdr, skb, off);
-
- if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
- fragment = true;
-
- nexthdr = hp->nexthdr;
- off += sizeof(struct frag_hdr);
- break;
- }
- default:
- done = true;
- break;
- }
- }
-
- err = -EPROTO;
-
- if (!done || fragment)
- goto out;
-
- switch (nexthdr) {
- case IPPROTO_TCP:
- err = maybe_pull_tail(skb,
- off + sizeof(struct tcphdr),
- MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct tcphdr, check))) {
- err = -EPROTO;
- goto out;
- }
-
- if (recalculate_partial_csum)
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- skb->len - off,
- IPPROTO_TCP, 0);
- break;
- case IPPROTO_UDP:
- err = maybe_pull_tail(skb,
- off + sizeof(struct udphdr),
- MAX_IPV6_HDR_LEN);
- if (err < 0)
- goto out;
-
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct udphdr, check))) {
- err = -EPROTO;
- goto out;
- }
-
- if (recalculate_partial_csum)
- udp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- skb->len - off,
- IPPROTO_UDP, 0);
- break;
- default:
- goto out;
- }
-
- err = 0;
-
-out:
- return err;
-}
-
static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
{
- int err = -EPROTO;
- int recalculate_partial_csum = 0;
+ bool recalculate_partial_csum = false;
/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
* peers can fail to set NETRXF_csum_blank when sending a GSO
@@ -1409,19 +1057,14 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
vif->rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
- recalculate_partial_csum = 1;
+ recalculate_partial_csum = true;
}
/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- if (skb->protocol == htons(ETH_P_IP))
- err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
- else if (skb->protocol == htons(ETH_P_IPV6))
- err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
-
- return err;
+ return skb_checksum_setup(skb, recalculate_partial_csum);
}
static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
@@ -1687,6 +1330,20 @@ static int xenvif_tx_submit(struct xenvif *vif)
skb_probe_transport_header(skb, 0);
+ /* If the packet is GSO then we will have just set up the
+ * transport header offset in checksum_setup so it's now
+ * straightforward to calculate gso_segs.
+ */
+ if (skb_is_gso(skb)) {
+ int mss = skb_shinfo(skb)->gso_size;
+ int hdrlen = skb_transport_header(skb) -
+ skb_mac_header(skb) +
+ tcp_hdrlen(skb);
+
+ skb_shinfo(skb)->gso_segs =
+ DIV_ROUND_UP(skb->len - hdrlen, mss);
+ }
+
vif->dev->stats.rx_bytes += skb->len;
vif->dev->stats.rx_packets++;
@@ -1811,7 +1468,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
static inline int rx_work_todo(struct xenvif *vif)
{
- return !skb_queue_empty(&vif->rx_queue);
+ return !skb_queue_empty(&vif->rx_queue) &&
+ xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
}
static inline int tx_work_todo(struct xenvif *vif)
@@ -1861,8 +1519,6 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
rxs = (struct xen_netif_rx_sring *)addr;
BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
- vif->rx_req_cons_peek = 0;
-
return 0;
err:
@@ -1870,9 +1526,24 @@ err:
return err;
}
+void xenvif_stop_queue(struct xenvif *vif)
+{
+ if (!vif->can_queue)
+ return;
+
+ netif_stop_queue(vif->dev);
+}
+
+static void xenvif_start_queue(struct xenvif *vif)
+{
+ if (xenvif_schedulable(vif))
+ netif_wake_queue(vif->dev);
+}
+
int xenvif_kthread(void *data)
{
struct xenvif *vif = data;
+ struct sk_buff *skb;
while (!kthread_should_stop()) {
wait_event_interruptible(vif->wq,
@@ -1881,12 +1552,20 @@ int xenvif_kthread(void *data)
if (kthread_should_stop())
break;
- if (rx_work_todo(vif))
+ if (!skb_queue_empty(&vif->rx_queue))
xenvif_rx_action(vif);
+ if (skb_queue_empty(&vif->rx_queue) &&
+ netif_queue_stopped(vif->dev))
+ xenvif_start_queue(vif);
+
cond_resched();
}
+ /* Bin any remaining skbs */
+ while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
+ dev_kfree_skb(skb);
+
return 0;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index f0358992b04f..7a206cffb062 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -15,8 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "common.h"
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e59acb1daa23..e30d80033cbc 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -117,6 +117,7 @@ struct netfront_info {
} tx_skbs[NET_TX_RING_SIZE];
grant_ref_t gref_tx_head;
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
+ struct page *grant_tx_page[NET_TX_RING_SIZE];
unsigned tx_skb_freelist;
spinlock_t rx_lock ____cacheline_aligned_in_smp;
@@ -396,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
gnttab_release_grant_reference(
&np->gref_tx_head, np->grant_tx_ref[id]);
np->grant_tx_ref[id] = GRANT_INVALID_REF;
+ np->grant_tx_page[id] = NULL;
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
dev_kfree_skb_irq(skb);
}
@@ -452,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
mfn, GNTMAP_readonly);
+ np->grant_tx_page[id] = virt_to_page(data);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
@@ -497,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
np->xbdev->otherend_id,
mfn, GNTMAP_readonly);
+ np->grant_tx_page[id] = page;
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = bytes;
@@ -596,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref(
ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
+ np->grant_tx_page[id] = virt_to_page(data);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
@@ -617,7 +622,9 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size;
- gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+ gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
+ XEN_NETIF_GSO_TYPE_TCPV6 :
+ XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
@@ -809,15 +816,18 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
return -EINVAL;
}
- /* Currently only TCPv4 S.O. is supported. */
- if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
+ gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
if (net_ratelimit())
pr_warn("Bad GSO type %d\n", gso->u.gso.type);
return -EINVAL;
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ skb_shinfo(skb)->gso_type =
+ (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
+ SKB_GSO_TCPV4 :
+ SKB_GSO_TCPV6;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -859,9 +869,7 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
{
- struct iphdr *iph;
- int err = -EPROTO;
- int recalculate_partial_csum = 0;
+ bool recalculate_partial_csum = false;
/*
* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
@@ -873,54 +881,14 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
struct netfront_info *np = netdev_priv(dev);
np->rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
- recalculate_partial_csum = 1;
+ recalculate_partial_csum = true;
}
/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- if (skb->protocol != htons(ETH_P_IP))
- goto out;
-
- iph = (void *)skb->data;
-
- switch (iph->protocol) {
- case IPPROTO_TCP:
- if (!skb_partial_csum_set(skb, 4 * iph->ihl,
- offsetof(struct tcphdr, check)))
- goto out;
-
- if (recalculate_partial_csum) {
- struct tcphdr *tcph = tcp_hdr(skb);
- tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - iph->ihl*4,
- IPPROTO_TCP, 0);
- }
- break;
- case IPPROTO_UDP:
- if (!skb_partial_csum_set(skb, 4 * iph->ihl,
- offsetof(struct udphdr, check)))
- goto out;
-
- if (recalculate_partial_csum) {
- struct udphdr *udph = udp_hdr(skb);
- udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - iph->ihl*4,
- IPPROTO_UDP, 0);
- }
- break;
- default:
- if (net_ratelimit())
- pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
- iph->protocol);
- goto out;
- }
-
- err = 0;
-
-out:
- return err;
+ return skb_checksum_setup(skb, recalculate_partial_csum);
}
static int handle_incoming_queue(struct net_device *dev,
@@ -939,6 +907,7 @@ static int handle_incoming_queue(struct net_device *dev,
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
+ skb_reset_network_header(skb);
if (checksum_setup(dev, skb)) {
kfree_skb(skb);
@@ -1122,10 +1091,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
continue;
skb = np->tx_skbs[i].skb;
- gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
- GNTMAP_readonly);
- gnttab_release_grant_reference(&np->gref_tx_head,
- np->grant_tx_ref[i]);
+ get_page(np->grant_tx_page[i]);
+ gnttab_end_foreign_access(np->grant_tx_ref[i],
+ GNTMAP_readonly,
+ (unsigned long)page_address(np->grant_tx_page[i]));
+ np->grant_tx_page[i] = NULL;
np->grant_tx_ref[i] = GRANT_INVALID_REF;
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
dev_kfree_skb_irq(skb);
@@ -1134,78 +1104,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
static void xennet_release_rx_bufs(struct netfront_info *np)
{
- struct mmu_update *mmu = np->rx_mmu;
- struct multicall_entry *mcl = np->rx_mcl;
- struct sk_buff_head free_list;
- struct sk_buff *skb;
- unsigned long mfn;
- int xfer = 0, noxfer = 0, unused = 0;
int id, ref;
- dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
- __func__);
- return;
-
- skb_queue_head_init(&free_list);
-
spin_lock_bh(&np->rx_lock);
for (id = 0; id < NET_RX_RING_SIZE; id++) {
- ref = np->grant_rx_ref[id];
- if (ref == GRANT_INVALID_REF) {
- unused++;
- continue;
- }
+ struct sk_buff *skb;
+ struct page *page;
skb = np->rx_skbs[id];
- mfn = gnttab_end_foreign_transfer_ref(ref);
- gnttab_release_grant_reference(&np->gref_rx_head, ref);
- np->grant_rx_ref[id] = GRANT_INVALID_REF;
-
- if (0 == mfn) {
- skb_shinfo(skb)->nr_frags = 0;
- dev_kfree_skb(skb);
- noxfer++;
+ if (!skb)
continue;
- }
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Remap the page. */
- const struct page *page =
- skb_frag_page(&skb_shinfo(skb)->frags[0]);
- unsigned long pfn = page_to_pfn(page);
- void *vaddr = page_address(page);
-
- MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
- mfn_pte(mfn, PAGE_KERNEL),
- 0);
- mcl++;
- mmu->ptr = ((u64)mfn << PAGE_SHIFT)
- | MMU_MACHPHYS_UPDATE;
- mmu->val = pfn;
- mmu++;
+ ref = np->grant_rx_ref[id];
+ if (ref == GRANT_INVALID_REF)
+ continue;
- set_phys_to_machine(pfn, mfn);
- }
- __skb_queue_tail(&free_list, skb);
- xfer++;
- }
+ page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
- dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
- __func__, xfer, noxfer, unused);
+ /* gnttab_end_foreign_access() needs a page ref until
+ * foreign access is ended (which may be deferred).
+ */
+ get_page(page);
+ gnttab_end_foreign_access(ref, 0,
+ (unsigned long)page_address(page));
+ np->grant_rx_ref[id] = GRANT_INVALID_REF;
- if (xfer) {
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Do all the remapping work and M2P updates. */
- MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
- NULL, DOMID_SELF);
- mcl++;
- HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
- }
+ kfree_skb(skb);
}
- __skb_queue_purge(&free_list);
-
spin_unlock_bh(&np->rx_lock);
}
@@ -1233,6 +1160,15 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
features &= ~NETIF_F_SG;
}
+ if (features & NETIF_F_IPV6_CSUM) {
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+ "feature-ipv6-csum-offload", "%d", &val) < 0)
+ val = 0;
+
+ if (!val)
+ features &= ~NETIF_F_IPV6_CSUM;
+ }
+
if (features & NETIF_F_TSO) {
if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
"feature-gso-tcpv4", "%d", &val) < 0)
@@ -1242,6 +1178,15 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
features &= ~NETIF_F_TSO;
}
+ if (features & NETIF_F_TSO6) {
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+ "feature-gso-tcpv6", "%d", &val) < 0)
+ val = 0;
+
+ if (!val)
+ features &= ~NETIF_F_TSO6;
+ }
+
return features;
}
@@ -1358,6 +1303,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
for (i = 0; i < NET_RX_RING_SIZE; i++) {
np->rx_skbs[i] = NULL;
np->grant_rx_ref[i] = GRANT_INVALID_REF;
+ np->grant_tx_page[i] = NULL;
}
/* A grant for every tx ring slot */
@@ -1380,7 +1326,9 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_napi_add(netdev, &np->napi, xennet_poll, 64);
netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_GSO_ROBUST;
- netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
/*
* Assume that all hw features are available for now. This set
@@ -1758,6 +1706,19 @@ again:
goto abort_transaction;
}
+ err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
+ if (err) {
+ message = "writing feature-gso-tcpv6";
+ goto abort_transaction;
+ }
+
+ err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
+ "1");
+ if (err) {
+ message = "writing feature-ipv6-csum-offload";
+ goto abort_transaction;
+ }
+
err = xenbus_transaction_end(xbt, 0);
if (err) {
if (err == -EAGAIN)
@@ -1872,7 +1833,6 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateInitWait:
@@ -1887,6 +1847,10 @@ static void netback_changed(struct xenbus_device *dev,
netdev_notify_peers(netdev);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
@@ -2115,7 +2079,7 @@ static int __init netif_init(void)
if (!xen_domain())
return -ENODEV;
- if (xen_hvm_domain() && !xen_platform_pci_unplug)
+ if (!xen_has_pv_nic_devices())
return -ENODEV;
pr_info("Initialising Xen virtual ethernet driver\n");
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index c1fb20603338..fe20e1cc0545 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -58,5 +58,6 @@ config NFC_PORT100
source "drivers/nfc/pn544/Kconfig"
source "drivers/nfc/microread/Kconfig"
+source "drivers/nfc/nfcmrvl/Kconfig"
endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index c715fe8582a8..56ab822ba03d 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -9,5 +9,6 @@ obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o
obj-$(CONFIG_NFC_SIM) += nfcsim.o
obj-$(CONFIG_NFC_PORT100) += port100.o
+obj-$(CONFIG_NFC_MRVL) += nfcmrvl/
ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 85f90090cc1d..11c7cbdade66 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -129,7 +127,7 @@ void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context)
reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ);
if (reply_size < MEI_NFC_HEADER_SIZE) {
- kfree(skb);
+ kfree_skb(skb);
return;
}
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 696e3467eccc..df85cd3d9db0 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 72fafec3d460..2d1395be64ae 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index 970ded6bfcf5..f868333271aa 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/nfc/microread/microread.h b/drivers/nfc/microread/microread.h
index 64b447a1c5bf..f538641431a2 100644
--- a/drivers/nfc/microread/microread.h
+++ b/drivers/nfc/microread/microread.h
@@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LOCAL_MICROREAD_H_
diff --git a/drivers/nfc/nfcmrvl/Kconfig b/drivers/nfc/nfcmrvl/Kconfig
new file mode 100644
index 000000000000..5e18afd9abe2
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/Kconfig
@@ -0,0 +1,23 @@
+config NFC_MRVL
+ tristate "Marvell NFC driver support"
+ depends on NFC_NCI
+ help
+ The core driver to support Marvell NFC devices.
+
+ This driver is required if you want to support
+ Marvell NFC device 8897.
+
+ Say Y here to compile Marvell NFC driver into the kernel or
+ say M to compile it as module.
+
+config NFC_MRVL_USB
+ tristate "Marvell NFC-over-USB driver"
+ depends on NFC_MRVL && USB
+ help
+ Marvell NFC-over-USB driver.
+
+ This driver provides support for Marvell NFC-over-USB devices:
+ 8897.
+
+ Say Y here to compile support for Marvell NFC-over-USB driver
+ into the kernel or say M to compile it as module.
diff --git a/drivers/nfc/nfcmrvl/Makefile b/drivers/nfc/nfcmrvl/Makefile
new file mode 100644
index 000000000000..97a0de72dc01
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for NFCMRVL NCI based NFC driver
+#
+
+nfcmrvl-y += main.o
+obj-$(CONFIG_NFC_MRVL) += nfcmrvl.o
+
+nfcmrvl_usb-y += usb.o
+obj-$(CONFIG_NFC_MRVL_USB) += nfcmrvl_usb.o
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
new file mode 100644
index 000000000000..85e8bcf98693
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -0,0 +1,165 @@
+/*
+ * Marvell NFC driver: major functions
+ *
+ * Copyright (C) 2014, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include "nfcmrvl.h"
+
+#define VERSION "1.0"
+
+static int nfcmrvl_nci_open(struct nci_dev *ndev)
+{
+ struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
+ int err;
+
+ if (test_and_set_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
+ return 0;
+
+ err = priv->if_ops->nci_open(priv);
+
+ if (err)
+ clear_bit(NFCMRVL_NCI_RUNNING, &priv->flags);
+
+ return err;
+}
+
+static int nfcmrvl_nci_close(struct nci_dev *ndev)
+{
+ struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
+
+ if (!test_and_clear_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
+ return 0;
+
+ priv->if_ops->nci_close(priv);
+
+ return 0;
+}
+
+static int nfcmrvl_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
+
+ nfc_info(priv->dev, "send entry, len %d\n", skb->len);
+
+ skb->dev = (void *)ndev;
+
+ if (!test_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
+ return -EBUSY;
+
+ return priv->if_ops->nci_send(priv, skb);
+}
+
+static int nfcmrvl_nci_setup(struct nci_dev *ndev)
+{
+ __u8 val;
+
+ val = NFCMRVL_GPIO_PIN_NFC_NOT_ALLOWED;
+ nci_set_config(ndev, NFCMRVL_NOT_ALLOWED_ID, 1, &val);
+ val = NFCMRVL_GPIO_PIN_NFC_ACTIVE;
+ nci_set_config(ndev, NFCMRVL_ACTIVE_ID, 1, &val);
+ val = NFCMRVL_EXT_COEX_ENABLE;
+ nci_set_config(ndev, NFCMRVL_EXT_COEX_ID, 1, &val);
+
+ return 0;
+}
+
+static struct nci_ops nfcmrvl_nci_ops = {
+ .open = nfcmrvl_nci_open,
+ .close = nfcmrvl_nci_close,
+ .send = nfcmrvl_nci_send,
+ .setup = nfcmrvl_nci_setup,
+};
+
+struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
+ struct nfcmrvl_if_ops *ops,
+ struct device *dev)
+{
+ struct nfcmrvl_private *priv;
+ int rc;
+ u32 protocols;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->drv_data = drv_data;
+ priv->if_ops = ops;
+ priv->dev = dev;
+
+ protocols = NFC_PROTO_JEWEL_MASK
+ | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
+ | NFC_PROTO_ISO14443_MASK
+ | NFC_PROTO_ISO14443_B_MASK
+ | NFC_PROTO_NFC_DEP_MASK;
+
+ priv->ndev = nci_allocate_device(&nfcmrvl_nci_ops, protocols, 0, 0);
+ if (!priv->ndev) {
+ nfc_err(dev, "nci_allocate_device failed");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ nci_set_drvdata(priv->ndev, priv);
+
+ rc = nci_register_device(priv->ndev);
+ if (rc) {
+ nfc_err(dev, "nci_register_device failed %d", rc);
+ nci_free_device(priv->ndev);
+ goto error;
+ }
+
+ nfc_info(dev, "registered with nci successfully\n");
+ return priv;
+
+error:
+ kfree(priv);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(nfcmrvl_nci_register_dev);
+
+void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
+{
+ struct nci_dev *ndev = priv->ndev;
+
+ nci_unregister_device(ndev);
+ nci_free_device(ndev);
+ kfree(priv);
+}
+EXPORT_SYMBOL_GPL(nfcmrvl_nci_unregister_dev);
+
+int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, void *data, int count)
+{
+ struct sk_buff *skb;
+
+ skb = nci_skb_alloc(priv->ndev, count, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb_put(skb, count), data, count);
+ nci_recv_frame(priv->ndev, skb);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(nfcmrvl_nci_recv_frame);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell NFC driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nfcmrvl/nfcmrvl.h b/drivers/nfc/nfcmrvl/nfcmrvl.h
new file mode 100644
index 000000000000..54c4a956bd45
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/nfcmrvl.h
@@ -0,0 +1,48 @@
+/**
+ * Marvell NFC driver
+ *
+ * Copyright (C) 2014, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ **/
+
+/* Define private flags: */
+#define NFCMRVL_NCI_RUNNING 1
+
+#define NFCMRVL_EXT_COEX_ID 0xE0
+#define NFCMRVL_NOT_ALLOWED_ID 0xE1
+#define NFCMRVL_ACTIVE_ID 0xE2
+#define NFCMRVL_EXT_COEX_ENABLE 1
+#define NFCMRVL_GPIO_PIN_NFC_NOT_ALLOWED 0xA
+#define NFCMRVL_GPIO_PIN_NFC_ACTIVE 0xB
+#define NFCMRVL_NCI_MAX_EVENT_SIZE 260
+
+struct nfcmrvl_private {
+ struct nci_dev *ndev;
+ unsigned long flags;
+ void *drv_data;
+ struct device *dev;
+ struct nfcmrvl_if_ops *if_ops;
+};
+
+struct nfcmrvl_if_ops {
+ int (*nci_open) (struct nfcmrvl_private *priv);
+ int (*nci_close) (struct nfcmrvl_private *priv);
+ int (*nci_send) (struct nfcmrvl_private *priv, struct sk_buff *skb);
+};
+
+void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv);
+int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, void *data, int count);
+struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
+ struct nfcmrvl_if_ops *ops,
+ struct device *dev);
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
new file mode 100644
index 000000000000..3221ca37d6c9
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -0,0 +1,459 @@
+/**
+ * Marvell NFC-over-USB driver: USB interface related functions
+ *
+ * Copyright (C) 2014, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ **/
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include "nfcmrvl.h"
+
+#define VERSION "1.0"
+
+static struct usb_device_id nfcmrvl_table[] = {
+ { USB_DEVICE_INTERFACE_CLASS(0x1286, 0x2046, 0xff) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, nfcmrvl_table);
+
+#define NFCMRVL_USB_BULK_RUNNING 1
+#define NFCMRVL_USB_SUSPENDING 2
+
+struct nfcmrvl_usb_drv_data {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ unsigned long flags;
+ struct work_struct waker;
+ struct usb_anchor tx_anchor;
+ struct usb_anchor bulk_anchor;
+ struct usb_anchor deferred;
+ int tx_in_flight;
+ /* protects tx_in_flight */
+ spinlock_t txlock;
+ struct usb_endpoint_descriptor *bulk_tx_ep;
+ struct usb_endpoint_descriptor *bulk_rx_ep;
+ int suspend_count;
+ struct nfcmrvl_private *priv;
+};
+
+static int nfcmrvl_inc_tx(struct nfcmrvl_usb_drv_data *drv_data)
+{
+ unsigned long flags;
+ int rv;
+
+ spin_lock_irqsave(&drv_data->txlock, flags);
+ rv = test_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags);
+ if (!rv)
+ drv_data->tx_in_flight++;
+ spin_unlock_irqrestore(&drv_data->txlock, flags);
+
+ return rv;
+}
+
+static void nfcmrvl_bulk_complete(struct urb *urb)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = urb->context;
+ int err;
+
+ dev_dbg(&drv_data->udev->dev, "urb %p status %d count %d",
+ urb, urb->status, urb->actual_length);
+
+ if (!test_bit(NFCMRVL_NCI_RUNNING, &drv_data->flags))
+ return;
+
+ if (!urb->status) {
+ if (nfcmrvl_nci_recv_frame(drv_data->priv, urb->transfer_buffer,
+ urb->actual_length) < 0)
+ nfc_err(&drv_data->udev->dev, "corrupted Rx packet");
+ }
+
+ if (!test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags))
+ return;
+
+ usb_anchor_urb(urb, &drv_data->bulk_anchor);
+ usb_mark_last_busy(drv_data->udev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected
+ */
+ if (err != -EPERM && err != -ENODEV)
+ nfc_err(&drv_data->udev->dev,
+ "urb %p failed to resubmit (%d)", urb, -err);
+ usb_unanchor_urb(urb);
+ }
+}
+
+static int
+nfcmrvl_submit_bulk_urb(struct nfcmrvl_usb_drv_data *drv_data, gfp_t mem_flags)
+{
+ struct urb *urb;
+ unsigned char *buf;
+ unsigned int pipe;
+ int err, size = NFCMRVL_NCI_MAX_EVENT_SIZE;
+
+ if (!drv_data->bulk_rx_ep)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(0, mem_flags);
+ if (!urb)
+ return -ENOMEM;
+
+ buf = kmalloc(size, mem_flags);
+ if (!buf) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvbulkpipe(drv_data->udev,
+ drv_data->bulk_rx_ep->bEndpointAddress);
+
+ usb_fill_bulk_urb(urb, drv_data->udev, pipe, buf, size,
+ nfcmrvl_bulk_complete, drv_data);
+
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ usb_mark_last_busy(drv_data->udev);
+ usb_anchor_urb(urb, &drv_data->bulk_anchor);
+
+ err = usb_submit_urb(urb, mem_flags);
+ if (err) {
+ if (err != -EPERM && err != -ENODEV)
+ nfc_err(&drv_data->udev->dev,
+ "urb %p submission failed (%d)", urb, -err);
+ usb_unanchor_urb(urb);
+ }
+
+ usb_free_urb(urb);
+
+ return err;
+}
+
+static void nfcmrvl_tx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct nci_dev *ndev = (struct nci_dev *)skb->dev;
+ struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
+ struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data;
+
+ nfc_info(priv->dev, "urb %p status %d count %d",
+ urb, urb->status, urb->actual_length);
+
+ spin_lock(&drv_data->txlock);
+ drv_data->tx_in_flight--;
+ spin_unlock(&drv_data->txlock);
+
+ kfree(urb->setup_packet);
+ kfree_skb(skb);
+}
+
+static int nfcmrvl_usb_nci_open(struct nfcmrvl_private *priv)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data;
+ int err;
+
+ err = usb_autopm_get_interface(drv_data->intf);
+ if (err)
+ return err;
+
+ drv_data->intf->needs_remote_wakeup = 1;
+
+ err = nfcmrvl_submit_bulk_urb(drv_data, GFP_KERNEL);
+ if (err)
+ goto failed;
+
+ set_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags);
+ nfcmrvl_submit_bulk_urb(drv_data, GFP_KERNEL);
+
+ usb_autopm_put_interface(drv_data->intf);
+ return 0;
+
+failed:
+ usb_autopm_put_interface(drv_data->intf);
+ return err;
+}
+
+static void nfcmrvl_usb_stop_traffic(struct nfcmrvl_usb_drv_data *drv_data)
+{
+ usb_kill_anchored_urbs(&drv_data->bulk_anchor);
+}
+
+static int nfcmrvl_usb_nci_close(struct nfcmrvl_private *priv)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data;
+ int err;
+
+ cancel_work_sync(&drv_data->waker);
+
+ clear_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags);
+
+ nfcmrvl_usb_stop_traffic(drv_data);
+ usb_kill_anchored_urbs(&drv_data->tx_anchor);
+ err = usb_autopm_get_interface(drv_data->intf);
+ if (err)
+ goto failed;
+
+ drv_data->intf->needs_remote_wakeup = 0;
+ usb_autopm_put_interface(drv_data->intf);
+
+failed:
+ usb_scuttle_anchored_urbs(&drv_data->deferred);
+ return 0;
+}
+
+static int nfcmrvl_usb_nci_send(struct nfcmrvl_private *priv,
+ struct sk_buff *skb)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data;
+ struct urb *urb;
+ unsigned int pipe;
+ int err;
+
+ if (!drv_data->bulk_tx_ep)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ pipe = usb_sndbulkpipe(drv_data->udev,
+ drv_data->bulk_tx_ep->bEndpointAddress);
+
+ usb_fill_bulk_urb(urb, drv_data->udev, pipe, skb->data, skb->len,
+ nfcmrvl_tx_complete, skb);
+
+ err = nfcmrvl_inc_tx(drv_data);
+ if (err) {
+ usb_anchor_urb(urb, &drv_data->deferred);
+ schedule_work(&drv_data->waker);
+ err = 0;
+ goto done;
+ }
+
+ usb_anchor_urb(urb, &drv_data->tx_anchor);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ if (err != -EPERM && err != -ENODEV)
+ nfc_err(&drv_data->udev->dev,
+ "urb %p submission failed (%d)", urb, -err);
+ kfree(urb->setup_packet);
+ usb_unanchor_urb(urb);
+ } else {
+ usb_mark_last_busy(drv_data->udev);
+ }
+
+done:
+ usb_free_urb(urb);
+ return err;
+}
+
+static struct nfcmrvl_if_ops usb_ops = {
+ .nci_open = nfcmrvl_usb_nci_open,
+ .nci_close = nfcmrvl_usb_nci_close,
+ .nci_send = nfcmrvl_usb_nci_send,
+};
+
+static void nfcmrvl_waker(struct work_struct *work)
+{
+ struct nfcmrvl_usb_drv_data *drv_data =
+ container_of(work, struct nfcmrvl_usb_drv_data, waker);
+ int err;
+
+ err = usb_autopm_get_interface(drv_data->intf);
+ if (err)
+ return;
+
+ usb_autopm_put_interface(drv_data->intf);
+}
+
+static int nfcmrvl_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_endpoint_descriptor *ep_desc;
+ struct nfcmrvl_usb_drv_data *drv_data;
+ struct nfcmrvl_private *priv;
+ int i;
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ nfc_info(&udev->dev, "intf %p id %p", intf, id);
+
+ drv_data = devm_kzalloc(&intf->dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ ep_desc = &intf->cur_altsetting->endpoint[i].desc;
+
+ if (!drv_data->bulk_tx_ep &&
+ usb_endpoint_is_bulk_out(ep_desc)) {
+ drv_data->bulk_tx_ep = ep_desc;
+ continue;
+ }
+
+ if (!drv_data->bulk_rx_ep &&
+ usb_endpoint_is_bulk_in(ep_desc)) {
+ drv_data->bulk_rx_ep = ep_desc;
+ continue;
+ }
+ }
+
+ if (!drv_data->bulk_tx_ep || !drv_data->bulk_rx_ep)
+ return -ENODEV;
+
+ drv_data->udev = udev;
+ drv_data->intf = intf;
+
+ INIT_WORK(&drv_data->waker, nfcmrvl_waker);
+ spin_lock_init(&drv_data->txlock);
+
+ init_usb_anchor(&drv_data->tx_anchor);
+ init_usb_anchor(&drv_data->bulk_anchor);
+ init_usb_anchor(&drv_data->deferred);
+
+ priv = nfcmrvl_nci_register_dev(drv_data, &usb_ops,
+ &drv_data->udev->dev);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ drv_data->priv = priv;
+ priv->dev = &drv_data->udev->dev;
+
+ usb_set_intfdata(intf, drv_data);
+
+ return 0;
+}
+
+static void nfcmrvl_disconnect(struct usb_interface *intf)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf);
+
+ if (!drv_data)
+ return;
+
+ nfc_info(&drv_data->udev->dev, "intf %p", intf);
+
+ nfcmrvl_nci_unregister_dev(drv_data->priv);
+
+ usb_set_intfdata(drv_data->intf, NULL);
+}
+
+#ifdef CONFIG_PM
+static int nfcmrvl_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf);
+
+ nfc_info(&drv_data->udev->dev, "intf %p", intf);
+
+ if (drv_data->suspend_count++)
+ return 0;
+
+ spin_lock_irq(&drv_data->txlock);
+ if (!(PMSG_IS_AUTO(message) && drv_data->tx_in_flight)) {
+ set_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags);
+ spin_unlock_irq(&drv_data->txlock);
+ } else {
+ spin_unlock_irq(&drv_data->txlock);
+ drv_data->suspend_count--;
+ return -EBUSY;
+ }
+
+ nfcmrvl_usb_stop_traffic(drv_data);
+ usb_kill_anchored_urbs(&drv_data->tx_anchor);
+
+ return 0;
+}
+
+static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data)
+{
+ struct urb *urb;
+ int err;
+
+ while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err)
+ break;
+
+ drv_data->tx_in_flight++;
+ }
+ usb_scuttle_anchored_urbs(&drv_data->deferred);
+}
+
+static int nfcmrvl_resume(struct usb_interface *intf)
+{
+ struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf);
+ int err = 0;
+
+ nfc_info(&drv_data->udev->dev, "intf %p", intf);
+
+ if (--drv_data->suspend_count)
+ return 0;
+
+ if (!test_bit(NFCMRVL_NCI_RUNNING, &drv_data->flags))
+ goto done;
+
+ if (test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags)) {
+ err = nfcmrvl_submit_bulk_urb(drv_data, GFP_NOIO);
+ if (err) {
+ clear_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags);
+ goto failed;
+ }
+
+ nfcmrvl_submit_bulk_urb(drv_data, GFP_NOIO);
+ }
+
+ spin_lock_irq(&drv_data->txlock);
+ nfcmrvl_play_deferred(drv_data);
+ clear_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags);
+ spin_unlock_irq(&drv_data->txlock);
+
+ return 0;
+
+failed:
+ usb_scuttle_anchored_urbs(&drv_data->deferred);
+done:
+ spin_lock_irq(&drv_data->txlock);
+ clear_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags);
+ spin_unlock_irq(&drv_data->txlock);
+
+ return err;
+}
+#endif
+
+static struct usb_driver nfcmrvl_usb_driver = {
+ .name = "nfcmrvl",
+ .probe = nfcmrvl_probe,
+ .disconnect = nfcmrvl_disconnect,
+#ifdef CONFIG_PM
+ .suspend = nfcmrvl_suspend,
+ .resume = nfcmrvl_resume,
+ .reset_resume = nfcmrvl_resume,
+#endif
+ .id_table = nfcmrvl_table,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+ .soft_unbind = 1,
+};
+module_usb_driver(nfcmrvl_usb_driver);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell NFC-over-USB driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 71308645593f..683671a71c7e 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -22,8 +22,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include <linux/platform_device.h>
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 2daf04c07338..cf1a87bb74f8 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
@@ -523,6 +521,9 @@ static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev)
if (frame->ccid.type != 0x83)
return false;
+ if (!frame->ccid.datalen)
+ return false;
+
if (frame->data[frame->ccid.datalen - 2] == 0x63)
return false;
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index b158ee1c2ac6..d6185ff2f87b 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index ee67de50c36f..330cd4031009 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 74cfa0a88b9e..3df4a109cfad 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -13,9 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -197,42 +195,42 @@ static int pn544_hci_ready(struct nfc_hci_dev *hdev)
{{0x9e, 0xaa}, 0x01},
- {{0x9b, 0xd1}, 0x0d},
- {{0x9b, 0xd2}, 0x24},
- {{0x9b, 0xd3}, 0x0a},
- {{0x9b, 0xd4}, 0x22},
- {{0x9b, 0xd5}, 0x08},
- {{0x9b, 0xd6}, 0x1e},
- {{0x9b, 0xdd}, 0x1c},
+ {{0x9b, 0xd1}, 0x17},
+ {{0x9b, 0xd2}, 0x58},
+ {{0x9b, 0xd3}, 0x10},
+ {{0x9b, 0xd4}, 0x47},
+ {{0x9b, 0xd5}, 0x0c},
+ {{0x9b, 0xd6}, 0x37},
+ {{0x9b, 0xdd}, 0x33},
- {{0x9b, 0x84}, 0x13},
- {{0x99, 0x81}, 0x7f},
- {{0x99, 0x31}, 0x70},
+ {{0x9b, 0x84}, 0x00},
+ {{0x99, 0x81}, 0x79},
+ {{0x99, 0x31}, 0x79},
{{0x98, 0x00}, 0x3f},
- {{0x9f, 0x09}, 0x00},
+ {{0x9f, 0x09}, 0x02},
{{0x9f, 0x0a}, 0x05},
{{0x9e, 0xd1}, 0xa1},
- {{0x99, 0x23}, 0x00},
-
- {{0x9e, 0x74}, 0x80},
+ {{0x99, 0x23}, 0x01},
+ {{0x9e, 0x74}, 0x00},
+ {{0x9e, 0x90}, 0x00},
{{0x9f, 0x28}, 0x10},
- {{0x9f, 0x35}, 0x14},
+ {{0x9f, 0x35}, 0x04},
- {{0x9f, 0x36}, 0x60},
+ {{0x9f, 0x36}, 0x11},
{{0x9c, 0x31}, 0x00},
- {{0x9c, 0x32}, 0xc8},
+ {{0x9c, 0x32}, 0x00},
- {{0x9c, 0x19}, 0x40},
+ {{0x9c, 0x19}, 0x0a},
- {{0x9c, 0x1a}, 0x40},
+ {{0x9c, 0x1a}, 0x0a},
{{0x9c, 0x0c}, 0x00},
@@ -242,13 +240,13 @@ static int pn544_hci_ready(struct nfc_hci_dev *hdev)
{{0x9c, 0x13}, 0x00},
- {{0x98, 0xa2}, 0x0e},
+ {{0x98, 0xa2}, 0x09},
- {{0x98, 0x93}, 0x40},
+ {{0x98, 0x93}, 0x00},
- {{0x98, 0x7d}, 0x02},
+ {{0x98, 0x7d}, 0x08},
{{0x98, 0x7e}, 0x00},
- {{0x9f, 0xc8}, 0x01},
+ {{0x9f, 0xc8}, 0x00},
};
struct hw_config *p = hw_config;
int count = ARRAY_SIZE(hw_config);
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
index 01020e585443..491bf45da358 100644
--- a/drivers/nfc/pn544/pn544.h
+++ b/drivers/nfc/pn544/pn544.h
@@ -12,9 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LOCAL_PN544_H_
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 8a0571eb2627..a8555f81cbba 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -1509,6 +1509,7 @@ static void port100_disconnect(struct usb_interface *interface)
usb_free_urb(dev->in_urb);
usb_free_urb(dev->out_urb);
+ usb_put_dev(dev->udev);
kfree(dev->cmd);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index c6973f101a3e..889005fa4d04 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -7,14 +7,6 @@ config OF
menu "Device Tree and Open Firmware support"
depends on OF
-config PROC_DEVICETREE
- bool "Support for device tree in /proc"
- depends on PROC_FS && !SPARC
- help
- This option adds a device-tree directory under /proc which contains
- an image of the device tree that the kernel copies from Open
- Firmware or other boot firmware. If unsure, say Y here.
-
config OF_SELFTEST
bool "Device Tree Runtime self tests"
depends on OF_IRQ
@@ -44,6 +36,10 @@ config OF_DYNAMIC
config OF_ADDRESS
def_bool y
depends on !SPARC
+ select OF_ADDRESS_PCI if PCI
+
+config OF_ADDRESS_PCI
+ bool
config OF_IRQ
def_bool y
@@ -75,4 +71,10 @@ config OF_MTD
depends on MTD
def_bool y
+config OF_RESERVED_MEM
+ depends on OF_EARLY_FLATTREE
+ bool
+ help
+ Helpers to allow for reservation of memory regions
+
endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index efd05102c405..ed9660adad77 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
obj-$(CONFIG_OF_MTD) += of_mtd.o
+obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index d3dd41c840f1..cb4242a69cd5 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -91,7 +91,7 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
return IORESOURCE_MEM;
}
-#ifdef CONFIG_PCI
+#ifdef CONFIG_OF_ADDRESS_PCI
/*
* PCI bus specific translator
*/
@@ -99,11 +99,12 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
static int of_bus_pci_match(struct device_node *np)
{
/*
+ * "pciex" is PCI Express
* "vci" is for the /chaos bridge on 1st-gen PCI powermacs
* "ht" is hypertransport
*/
- return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") ||
- !strcmp(np->type, "ht");
+ return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") ||
+ !strcmp(np->type, "vci") || !strcmp(np->type, "ht");
}
static void of_bus_pci_count_cells(struct device_node *np,
@@ -165,7 +166,9 @@ static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
{
return of_bus_default_translate(addr + 1, offset, na - 1);
}
+#endif /* CONFIG_OF_ADDRESS_PCI */
+#ifdef CONFIG_PCI
const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
unsigned int *flags)
{
@@ -355,7 +358,7 @@ static unsigned int of_bus_isa_get_flags(const __be32 *addr)
*/
static struct of_bus of_busses[] = {
-#ifdef CONFIG_PCI
+#ifdef CONFIG_OF_ADDRESS_PCI
/* PCI */
{
.name = "pci",
@@ -366,7 +369,7 @@ static struct of_bus of_busses[] = {
.translate = of_bus_pci_translate,
.get_flags = of_bus_pci_get_flags,
},
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_OF_ADDRESS_PCI */
/* ISA */
{
.name = "isa",
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f807d0edabf3..4557a142c752 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -21,8 +21,10 @@
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/proc_fs.h>
#include "of_private.h"
@@ -35,6 +37,12 @@ struct device_node *of_chosen;
struct device_node *of_aliases;
static struct device_node *of_stdout;
+static struct kset *of_kset;
+
+/*
+ * Used to protect the of_aliases; but also overloaded to hold off addition of
+ * nodes to sysfs
+ */
DEFINE_MUTEX(of_aliases_mutex);
/* use when traversing tree through the allnext, child, sibling,
@@ -92,14 +100,14 @@ int __weak of_node_to_nid(struct device_node *np)
struct device_node *of_node_get(struct device_node *node)
{
if (node)
- kref_get(&node->kref);
+ kobject_get(&node->kobj);
return node;
}
EXPORT_SYMBOL(of_node_get);
-static inline struct device_node *kref_to_device_node(struct kref *kref)
+static inline struct device_node *kobj_to_device_node(struct kobject *kobj)
{
- return container_of(kref, struct device_node, kref);
+ return container_of(kobj, struct device_node, kobj);
}
/**
@@ -109,16 +117,15 @@ static inline struct device_node *kref_to_device_node(struct kref *kref)
* In of_node_put() this function is passed to kref_put()
* as the destructor.
*/
-static void of_node_release(struct kref *kref)
+static void of_node_release(struct kobject *kobj)
{
- struct device_node *node = kref_to_device_node(kref);
+ struct device_node *node = kobj_to_device_node(kobj);
struct property *prop = node->properties;
/* We should never be releasing nodes that haven't been detached. */
if (!of_node_check_flag(node, OF_DETACHED)) {
pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name);
dump_stack();
- kref_init(&node->kref);
return;
}
@@ -151,11 +158,154 @@ static void of_node_release(struct kref *kref)
void of_node_put(struct device_node *node)
{
if (node)
- kref_put(&node->kref, of_node_release);
+ kobject_put(&node->kobj);
}
EXPORT_SYMBOL(of_node_put);
+#else
+static void of_node_release(struct kobject *kobj)
+{
+ /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
+}
#endif /* CONFIG_OF_DYNAMIC */
+struct kobj_type of_node_ktype = {
+ .release = of_node_release,
+};
+
+static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct property *pp = container_of(bin_attr, struct property, attr);
+ return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
+}
+
+static const char *safe_name(struct kobject *kobj, const char *orig_name)
+{
+ const char *name = orig_name;
+ struct kernfs_node *kn;
+ int i = 0;
+
+ /* don't be a hero. After 16 tries give up */
+ while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) {
+ sysfs_put(kn);
+ if (name != orig_name)
+ kfree(name);
+ name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
+ }
+
+ if (name != orig_name)
+ pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
+ kobject_name(kobj), name);
+ return name;
+}
+
+static int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+ int rc;
+
+ /* Important: Don't leak passwords */
+ bool secure = strncmp(pp->name, "security-", 9) == 0;
+
+ sysfs_bin_attr_init(&pp->attr);
+ pp->attr.attr.name = safe_name(&np->kobj, pp->name);
+ pp->attr.attr.mode = secure ? S_IRUSR : S_IRUGO;
+ pp->attr.size = secure ? 0 : pp->length;
+ pp->attr.read = of_node_property_read;
+
+ rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
+ WARN(rc, "error adding attribute %s to node %s\n", pp->name, np->full_name);
+ return rc;
+}
+
+static int __of_node_add(struct device_node *np)
+{
+ const char *name;
+ struct property *pp;
+ int rc;
+
+ np->kobj.kset = of_kset;
+ if (!np->parent) {
+ /* Nodes without parents are new top level trees */
+ rc = kobject_add(&np->kobj, NULL, safe_name(&of_kset->kobj, "base"));
+ } else {
+ name = safe_name(&np->parent->kobj, kbasename(np->full_name));
+ if (!name || !name[0])
+ return -EINVAL;
+
+ rc = kobject_add(&np->kobj, &np->parent->kobj, "%s", name);
+ }
+ if (rc)
+ return rc;
+
+ for_each_property_of_node(np, pp)
+ __of_add_property_sysfs(np, pp);
+
+ return 0;
+}
+
+int of_node_add(struct device_node *np)
+{
+ int rc = 0;
+
+ BUG_ON(!of_node_is_initialized(np));
+
+ /*
+ * Grab the mutex here so that in a race condition between of_init() and
+ * of_node_add(), node addition will still be consistent.
+ */
+ mutex_lock(&of_aliases_mutex);
+ if (of_kset)
+ rc = __of_node_add(np);
+ else
+ /* This scenario may be perfectly valid, but report it anyway */
+ pr_info("of_node_add(%s) before of_init()\n", np->full_name);
+ mutex_unlock(&of_aliases_mutex);
+ return rc;
+}
+
+#if defined(CONFIG_OF_DYNAMIC)
+static void of_node_remove(struct device_node *np)
+{
+ struct property *pp;
+
+ BUG_ON(!of_node_is_initialized(np));
+
+ /* only remove properties if on sysfs */
+ if (of_node_is_attached(np)) {
+ for_each_property_of_node(np, pp)
+ sysfs_remove_bin_file(&np->kobj, &pp->attr);
+ kobject_del(&np->kobj);
+ }
+
+ /* finally remove the kobj_init ref */
+ of_node_put(np);
+}
+#endif
+
+static int __init of_init(void)
+{
+ struct device_node *np;
+
+ /* Create the kset, and register existing nodes */
+ mutex_lock(&of_aliases_mutex);
+ of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
+ if (!of_kset) {
+ mutex_unlock(&of_aliases_mutex);
+ return -ENOMEM;
+ }
+ for_each_of_allnodes(np)
+ __of_node_add(np);
+ mutex_unlock(&of_aliases_mutex);
+
+ /* Symlink in /proc as required by userspace ABI */
+ if (of_allnodes)
+ proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
+
+ return 0;
+}
+core_initcall(of_init);
+
static struct property *__of_find_property(const struct device_node *np,
const char *name, int *lenp)
{
@@ -342,27 +492,72 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
}
EXPORT_SYMBOL(of_get_cpu_node);
-/** Checks if the given "compat" string matches one of the strings in
- * the device's "compatible" property
+/**
+ * __of_device_is_compatible() - Check if the node matches given constraints
+ * @device: pointer to node
+ * @compat: required compatible string, NULL or "" for any match
+ * @type: required device_type value, NULL or "" for any match
+ * @name: required node name, NULL or "" for any match
+ *
+ * Checks if the given @compat, @type and @name strings match the
+ * properties of the given @device. A constraints can be skipped by
+ * passing NULL or an empty string as the constraint.
+ *
+ * Returns 0 for no match, and a positive integer on match. The return
+ * value is a relative score with larger values indicating better
+ * matches. The score is weighted for the most specific compatible value
+ * to get the highest score. Matching type is next, followed by matching
+ * name. Practically speaking, this results in the following priority
+ * order for matches:
+ *
+ * 1. specific compatible && type && name
+ * 2. specific compatible && type
+ * 3. specific compatible && name
+ * 4. specific compatible
+ * 5. general compatible && type && name
+ * 6. general compatible && type
+ * 7. general compatible && name
+ * 8. general compatible
+ * 9. type && name
+ * 10. type
+ * 11. name
*/
static int __of_device_is_compatible(const struct device_node *device,
- const char *compat)
-{
- const char* cp;
- int cplen, l;
+ const char *compat, const char *type, const char *name)
+{
+ struct property *prop;
+ const char *cp;
+ int index = 0, score = 0;
+
+ /* Compatible match has highest priority */
+ if (compat && compat[0]) {
+ prop = __of_find_property(device, "compatible", NULL);
+ for (cp = of_prop_next_string(prop, NULL); cp;
+ cp = of_prop_next_string(prop, cp), index++) {
+ if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
+ score = INT_MAX/2 - (index << 2);
+ break;
+ }
+ }
+ if (!score)
+ return 0;
+ }
- cp = __of_get_property(device, "compatible", &cplen);
- if (cp == NULL)
- return 0;
- while (cplen > 0) {
- if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
- return 1;
- l = strlen(cp) + 1;
- cp += l;
- cplen -= l;
+ /* Matching type is better than matching name */
+ if (type && type[0]) {
+ if (!device->type || of_node_cmp(type, device->type))
+ return 0;
+ score += 2;
}
- return 0;
+ /* Matching name is a bit better than not */
+ if (name && name[0]) {
+ if (!device->name || of_node_cmp(name, device->name))
+ return 0;
+ score++;
+ }
+
+ return score;
}
/** Checks if the given "compat" string matches one of the strings in
@@ -375,7 +570,7 @@ int of_device_is_compatible(const struct device_node *device,
int res;
raw_spin_lock_irqsave(&devtree_lock, flags);
- res = __of_device_is_compatible(device, compat);
+ res = __of_device_is_compatible(device, compat, NULL, NULL);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return res;
}
@@ -415,6 +610,9 @@ static int __of_device_is_available(const struct device_node *device)
const char *status;
int statlen;
+ if (!device)
+ return 0;
+
status = __of_get_property(device, "status", &statlen);
if (status == NULL)
return 1;
@@ -678,10 +876,7 @@ struct device_node *of_find_compatible_node(struct device_node *from,
raw_spin_lock_irqsave(&devtree_lock, flags);
np = from ? from->allnext : of_allnodes;
for (; np; np = np->allnext) {
- if (type
- && !(np->type && (of_node_cmp(np->type, type) == 0)))
- continue;
- if (__of_device_is_compatible(np, compatible) &&
+ if (__of_device_is_compatible(np, compatible, type, NULL) &&
of_node_get(np))
break;
}
@@ -731,25 +926,22 @@ static
const struct of_device_id *__of_match_node(const struct of_device_id *matches,
const struct device_node *node)
{
+ const struct of_device_id *best_match = NULL;
+ int score, best_score = 0;
+
if (!matches)
return NULL;
- while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
- int match = 1;
- if (matches->name[0])
- match &= node->name
- && !strcmp(matches->name, node->name);
- if (matches->type[0])
- match &= node->type
- && !strcmp(matches->type, node->type);
- if (matches->compatible[0])
- match &= __of_device_is_compatible(node,
- matches->compatible);
- if (match)
- return matches;
- matches++;
+ for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
+ score = __of_device_is_compatible(node, matches->compatible,
+ matches->type, matches->name);
+ if (score > best_score) {
+ best_match = matches;
+ best_score = score;
+ }
}
- return NULL;
+
+ return best_match;
}
/**
@@ -862,6 +1054,38 @@ struct device_node *of_find_node_by_phandle(phandle handle)
EXPORT_SYMBOL(of_find_node_by_phandle);
/**
+ * of_property_count_elems_of_size - Count the number of elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @elem_size: size of the individual element
+ *
+ * Search for a property in a device node and count the number of elements of
+ * size elem_size in it. Returns number of elements on sucess, -EINVAL if the
+ * property does not exist or its length does not match a multiple of elem_size
+ * and -ENODATA if the property does not have a value.
+ */
+int of_property_count_elems_of_size(const struct device_node *np,
+ const char *propname, int elem_size)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+
+ if (prop->length % elem_size != 0) {
+ pr_err("size of %s in node %s is not a multiple of %d\n",
+ propname, np->full_name, elem_size);
+ return -EINVAL;
+ }
+
+ return prop->length / elem_size;
+}
+EXPORT_SYMBOL_GPL(of_property_count_elems_of_size);
+
+/**
* of_find_property_value_of_size
*
* @np: device node from which the property value is to be read.
@@ -1456,6 +1680,10 @@ static int of_property_notify(int action, struct device_node *np,
{
struct of_prop_reconfig pr;
+ /* only call notifiers if the node is attached */
+ if (!of_node_is_attached(np))
+ return 0;
+
pr.dn = np;
pr.prop = prop;
return of_reconfig_notify(action, &pr);
@@ -1469,11 +1697,31 @@ static int of_property_notify(int action, struct device_node *np,
#endif
/**
+ * __of_add_property - Add a property to a node without lock operations
+ */
+static int __of_add_property(struct device_node *np, struct property *prop)
+{
+ struct property **next;
+
+ prop->next = NULL;
+ next = &np->properties;
+ while (*next) {
+ if (strcmp(prop->name, (*next)->name) == 0)
+ /* duplicate ! don't insert it */
+ return -EEXIST;
+
+ next = &(*next)->next;
+ }
+ *next = prop;
+
+ return 0;
+}
+
+/**
* of_add_property - Add a property to a node
*/
int of_add_property(struct device_node *np, struct property *prop)
{
- struct property **next;
unsigned long flags;
int rc;
@@ -1481,27 +1729,16 @@ int of_add_property(struct device_node *np, struct property *prop)
if (rc)
return rc;
- prop->next = NULL;
raw_spin_lock_irqsave(&devtree_lock, flags);
- next = &np->properties;
- while (*next) {
- if (strcmp(prop->name, (*next)->name) == 0) {
- /* duplicate ! don't insert it */
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return -1;
- }
- next = &(*next)->next;
- }
- *next = prop;
+ rc = __of_add_property(np, prop);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ if (rc)
+ return rc;
-#ifdef CONFIG_PROC_DEVICETREE
- /* try to add to proc as well if it was initialized */
- if (np->pde)
- proc_device_tree_add_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
+ if (of_node_is_attached(np))
+ __of_add_property_sysfs(np, prop);
- return 0;
+ return rc;
}
/**
@@ -1541,11 +1778,11 @@ int of_remove_property(struct device_node *np, struct property *prop)
if (!found)
return -ENODEV;
-#ifdef CONFIG_PROC_DEVICETREE
- /* try to remove the proc node as well */
- if (np->pde)
- proc_device_tree_remove_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
+ /* at early boot, bail hear and defer setup to of_init() */
+ if (!of_kset)
+ return 0;
+
+ sysfs_remove_bin_file(&np->kobj, &prop->attr);
return 0;
}
@@ -1591,16 +1828,17 @@ int of_update_property(struct device_node *np, struct property *newprop)
next = &(*next)->next;
}
raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ if (rc)
+ return rc;
+
+ /* Update the sysfs attribute */
+ if (oldprop)
+ sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
+ __of_add_property_sysfs(np, newprop);
if (!found)
return -ENODEV;
-#ifdef CONFIG_PROC_DEVICETREE
- /* try to add to proc as well if it was initialized */
- if (np->pde)
- proc_device_tree_update_prop(np->pde, newprop, oldprop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
return 0;
}
@@ -1635,22 +1873,6 @@ int of_reconfig_notify(unsigned long action, void *p)
return notifier_to_errno(rc);
}
-#ifdef CONFIG_PROC_DEVICETREE
-static void of_add_proc_dt_entry(struct device_node *dn)
-{
- struct proc_dir_entry *ent;
-
- ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde);
- if (ent)
- proc_device_tree_add_node(dn, ent);
-}
-#else
-static void of_add_proc_dt_entry(struct device_node *dn)
-{
- return;
-}
-#endif
-
/**
* of_attach_node - Plug a device node into the tree and global list.
*/
@@ -1668,24 +1890,13 @@ int of_attach_node(struct device_node *np)
np->allnext = of_allnodes;
np->parent->child = np;
of_allnodes = np;
+ of_node_clear_flag(np, OF_DETACHED);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
- of_add_proc_dt_entry(np);
+ of_node_add(np);
return 0;
}
-#ifdef CONFIG_PROC_DEVICETREE
-static void of_remove_proc_dt_entry(struct device_node *dn)
-{
- proc_remove(dn->pde);
-}
-#else
-static void of_remove_proc_dt_entry(struct device_node *dn)
-{
- return;
-}
-#endif
-
/**
* of_detach_node - "Unplug" a node from the device tree.
*
@@ -1741,7 +1952,7 @@ int of_detach_node(struct device_node *np)
of_node_set_flag(np, OF_DETACHED);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
- of_remove_proc_dt_entry(np);
+ of_node_remove(np);
return rc;
}
#endif /* defined(CONFIG_OF_DYNAMIC) */
@@ -1777,9 +1988,9 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
of_chosen = of_find_node_by_path("/chosen@0");
if (of_chosen) {
- const char *name;
-
- name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ const char *name = of_get_property(of_chosen, "stdout-path", NULL);
+ if (!name)
+ name = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (name)
of_stdout = of_find_node_by_path(name);
}
@@ -1940,3 +2151,153 @@ struct device_node *of_find_next_cache_node(const struct device_node *np)
return NULL;
}
+
+/**
+ * of_graph_parse_endpoint() - parse common endpoint node properties
+ * @node: pointer to endpoint device_node
+ * @endpoint: pointer to the OF endpoint data structure
+ *
+ * The caller should hold a reference to @node.
+ */
+int of_graph_parse_endpoint(const struct device_node *node,
+ struct of_endpoint *endpoint)
+{
+ struct device_node *port_node = of_get_parent(node);
+
+ WARN_ONCE(!port_node, "%s(): endpoint %s has no parent node\n",
+ __func__, node->full_name);
+
+ memset(endpoint, 0, sizeof(*endpoint));
+
+ endpoint->local_node = node;
+ /*
+ * It doesn't matter whether the two calls below succeed.
+ * If they don't then the default value 0 is used.
+ */
+ of_property_read_u32(port_node, "reg", &endpoint->port);
+ of_property_read_u32(node, "reg", &endpoint->id);
+
+ of_node_put(port_node);
+
+ return 0;
+}
+EXPORT_SYMBOL(of_graph_parse_endpoint);
+
+/**
+ * of_graph_get_next_endpoint() - get next endpoint node
+ * @parent: pointer to the parent device node
+ * @prev: previous endpoint node, or NULL to get first
+ *
+ * Return: An 'endpoint' node pointer with refcount incremented. Refcount
+ * of the passed @prev node is not decremented, the caller have to use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
+ struct device_node *prev)
+{
+ struct device_node *endpoint;
+ struct device_node *port = NULL;
+
+ if (!parent)
+ return NULL;
+
+ if (!prev) {
+ struct device_node *node;
+ /*
+ * It's the first call, we have to find a port subnode
+ * within this node or within an optional 'ports' node.
+ */
+ node = of_get_child_by_name(parent, "ports");
+ if (node)
+ parent = node;
+
+ port = of_get_child_by_name(parent, "port");
+
+ if (port) {
+ /* Found a port, get an endpoint. */
+ endpoint = of_get_next_child(port, NULL);
+ of_node_put(port);
+ } else {
+ endpoint = NULL;
+ }
+
+ if (!endpoint)
+ pr_err("%s(): no endpoint nodes specified for %s\n",
+ __func__, parent->full_name);
+ of_node_put(node);
+
+ return endpoint;
+ }
+
+ port = of_get_parent(prev);
+ if (WARN_ONCE(!port, "%s(): endpoint %s has no parent node\n",
+ __func__, prev->full_name))
+ return NULL;
+
+ /* Avoid dropping prev node refcount to 0. */
+ of_node_get(prev);
+ endpoint = of_get_next_child(port, prev);
+ if (endpoint) {
+ of_node_put(port);
+ return endpoint;
+ }
+
+ /* No more endpoints under this port, try the next one. */
+ do {
+ port = of_get_next_child(parent, port);
+ if (!port)
+ return NULL;
+ } while (of_node_cmp(port->name, "port"));
+
+ /* Pick up the first endpoint in this port. */
+ endpoint = of_get_next_child(port, NULL);
+ of_node_put(port);
+
+ return endpoint;
+}
+EXPORT_SYMBOL(of_graph_get_next_endpoint);
+
+/**
+ * of_graph_get_remote_port_parent() - get remote port's parent node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote device node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port_parent(
+ const struct device_node *node)
+{
+ struct device_node *np;
+ unsigned int depth;
+
+ /* Get remote endpoint node. */
+ np = of_parse_phandle(node, "remote-endpoint", 0);
+
+ /* Walk 3 levels up only if there is 'ports' node. */
+ for (depth = 3; depth && np; depth--) {
+ np = of_get_next_parent(np);
+ if (depth == 2 && of_node_cmp(np->name, "ports"))
+ break;
+ }
+ return np;
+}
+EXPORT_SYMBOL(of_graph_get_remote_port_parent);
+
+/**
+ * of_graph_get_remote_port() - get remote port node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote port node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port(const struct device_node *node)
+{
+ struct device_node *np;
+
+ /* Get remote endpoint node. */
+ np = of_parse_phandle(node, "remote-endpoint", 0);
+ if (!np)
+ return NULL;
+ return of_get_next_parent(np);
+}
+EXPORT_SYMBOL(of_graph_get_remote_port);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index f685e55e0717..dafb9736ab9b 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -85,6 +85,9 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
int cplen, i;
ssize_t tsize, csize, repend;
+ if ((!dev) || (!dev->of_node))
+ return -ENODEV;
+
/* Name & Type */
csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name,
dev->of_node->type);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 758b4f8b30b7..fa16a912a927 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -15,6 +15,8 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/sizes.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -202,6 +204,7 @@ static void * unflatten_dt_node(struct boot_param_header *blob,
__alignof__(struct device_node));
if (allnextpp) {
char *fn;
+ of_node_init(np);
np->full_name = fn = ((char *)np) + sizeof(*np);
if (new_format) {
/* rebuild full path for new format */
@@ -232,7 +235,6 @@ static void * unflatten_dt_node(struct boot_param_header *blob,
dad->next->sibling = np;
dad->next = np;
}
- kref_init(&np->kref);
}
/* process properties */
while (1) {
@@ -440,6 +442,129 @@ struct boot_param_header *initial_boot_params;
#ifdef CONFIG_OF_EARLY_FLATTREE
/**
+ * res_mem_reserve_reg() - reserve all memory described in 'reg' property
+ */
+static int __init __reserved_mem_reserve_reg(unsigned long node,
+ const char *uname)
+{
+ int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
+ phys_addr_t base, size;
+ unsigned long len;
+ __be32 *prop;
+ int nomap, first = 1;
+
+ prop = of_get_flat_dt_prop(node, "reg", &len);
+ if (!prop)
+ return -ENOENT;
+
+ if (len && len % t_len != 0) {
+ pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
+ uname);
+ return -EINVAL;
+ }
+
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+
+ while (len >= t_len) {
+ base = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ size = dt_mem_next_cell(dt_root_size_cells, &prop);
+
+ if (base && size &&
+ early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
+ pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
+ uname, &base, (unsigned long)size / SZ_1M);
+ else
+ pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n",
+ uname, &base, (unsigned long)size / SZ_1M);
+
+ len -= t_len;
+ if (first) {
+ fdt_reserved_mem_save_node(node, uname, base, size);
+ first = 0;
+ }
+ }
+ return 0;
+}
+
+/**
+ * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
+ * in /reserved-memory matches the values supported by the current implementation,
+ * also check if ranges property has been provided
+ */
+static int __reserved_mem_check_root(unsigned long node)
+{
+ __be32 *prop;
+
+ prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
+ if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
+ return -EINVAL;
+
+ prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
+ if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
+ return -EINVAL;
+
+ prop = of_get_flat_dt_prop(node, "ranges", NULL);
+ if (!prop)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
+ */
+static int __init __fdt_scan_reserved_mem(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ static int found;
+ const char *status;
+ int err;
+
+ if (!found && depth == 1 && strcmp(uname, "reserved-memory") == 0) {
+ if (__reserved_mem_check_root(node) != 0) {
+ pr_err("Reserved memory: unsupported node format, ignoring\n");
+ /* break scan */
+ return 1;
+ }
+ found = 1;
+ /* scan next node */
+ return 0;
+ } else if (!found) {
+ /* scan next node */
+ return 0;
+ } else if (found && depth < 2) {
+ /* scanning of /reserved-memory has been finished */
+ return 1;
+ }
+
+ status = of_get_flat_dt_prop(node, "status", NULL);
+ if (status && strcmp(status, "okay") != 0 && strcmp(status, "ok") != 0)
+ return 0;
+
+ err = __reserved_mem_reserve_reg(node, uname);
+ if (err == -ENOENT && of_get_flat_dt_prop(node, "size", NULL))
+ fdt_reserved_mem_save_node(node, uname, 0, 0);
+
+ /* scan next node */
+ return 0;
+}
+
+/**
+ * early_init_fdt_scan_reserved_mem() - create reserved memory regions
+ *
+ * This function grabs memory from early allocator for device exclusive use
+ * defined in device tree structures. It should be called by arch specific code
+ * once the early allocator (i.e. memblock) has been fully activated.
+ */
+void __init early_init_fdt_scan_reserved_mem(void)
+{
+ if (!initial_boot_params)
+ return;
+
+ of_scan_flat_dt(__fdt_scan_reserved_mem, NULL);
+ fdt_init_reserved_mem();
+}
+
+/**
* of_scan_flat_dt - scan flattened tree blob and call callback on each.
* @it: callback function
* @data: context data pointer
@@ -856,6 +981,16 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size);
}
+int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
+ phys_addr_t size, bool nomap)
+{
+ if (memblock_is_region_reserved(base, size))
+ return -EBUSY;
+ if (nomap)
+ return memblock_remove(base, size);
+ return memblock_reserve(base, size);
+}
+
/*
* called from unflatten_device_tree() to bootstrap devicetree itself
* Architectures can override this definition if memblock isn't used
@@ -864,6 +999,14 @@ void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
return __va(memblock_alloc(size, align));
}
+#else
+int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
+ phys_addr_t size, bool nomap)
+{
+ pr_err("Reserved memory not supported, ignoring range 0x%llx - 0x%llx%s\n",
+ base, size, nomap ? " (nomap)" : "");
+ return -ENOSYS;
+}
#endif
bool __init early_init_dt_scan(void *params)
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 27212402c532..9bcf2cf19357 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -216,6 +216,9 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
goto fail;
}
+ if (!of_device_is_available(newpar))
+ match = 0;
+
/* Get #interrupt-cells and #address-cells of new
* parent
*/
@@ -435,7 +438,8 @@ void __init of_irq_init(const struct of_device_id *matches)
INIT_LIST_HEAD(&intc_parent_list);
for_each_matching_node(np, matches) {
- if (!of_find_property(np, "interrupt-controller", NULL))
+ if (!of_find_property(np, "interrupt-controller", NULL) ||
+ !of_device_is_available(np))
continue;
/*
* Here, we allocate and populate an intc_desc with the node
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d5a57a9e329c..5b3c24f3cde5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -22,6 +22,77 @@
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
MODULE_LICENSE("GPL");
+static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
+{
+ /* The default values for phydev->supported are provided by the PHY
+ * driver "features" member, we want to reset to sane defaults fist
+ * before supporting higher speeds.
+ */
+ phydev->supported &= PHY_DEFAULT_FEATURES;
+
+ switch (max_speed) {
+ default:
+ return;
+
+ case SPEED_1000:
+ phydev->supported |= PHY_1000BT_FEATURES;
+ case SPEED_100:
+ phydev->supported |= PHY_100BT_FEATURES;
+ case SPEED_10:
+ phydev->supported |= PHY_10BT_FEATURES;
+ }
+}
+
+static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child,
+ u32 addr)
+{
+ struct phy_device *phy;
+ bool is_c45;
+ int rc;
+ u32 max_speed = 0;
+
+ is_c45 = of_device_is_compatible(child,
+ "ethernet-phy-ieee802.3-c45");
+
+ phy = get_phy_device(mdio, addr, is_c45);
+ if (!phy || IS_ERR(phy))
+ return 1;
+
+ rc = irq_of_parse_and_map(child, 0);
+ if (rc > 0) {
+ phy->irq = rc;
+ if (mdio->irq)
+ mdio->irq[addr] = rc;
+ } else {
+ if (mdio->irq)
+ phy->irq = mdio->irq[addr];
+ }
+
+ /* Associate the OF node with the device structure so it
+ * can be looked up later */
+ of_node_get(child);
+ phy->dev.of_node = child;
+
+ /* All data is now stored in the phy struct;
+ * register it */
+ rc = phy_device_register(phy);
+ if (rc) {
+ phy_device_free(phy);
+ of_node_put(child);
+ return 1;
+ }
+
+ /* Set phydev->supported based on the "max-speed" property
+ * if present */
+ if (!of_property_read_u32(child, "max-speed", &max_speed))
+ of_set_phy_supported(phy, max_speed);
+
+ dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
+ child->name, addr);
+
+ return 0;
+}
+
/**
* of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
@@ -32,11 +103,10 @@ MODULE_LICENSE("GPL");
*/
int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
- struct phy_device *phy;
struct device_node *child;
const __be32 *paddr;
u32 addr;
- bool is_c45, scanphys = false;
+ bool scanphys = false;
int rc, i, len;
/* Mask out all PHYs from auto probing. Instead the PHYs listed in
@@ -67,44 +137,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
}
addr = be32_to_cpup(paddr);
- if (addr >= 32) {
+ if (addr >= PHY_MAX_ADDR) {
dev_err(&mdio->dev, "%s PHY address %i is too large\n",
child->full_name, addr);
continue;
}
- if (mdio->irq) {
- mdio->irq[addr] = irq_of_parse_and_map(child, 0);
- if (!mdio->irq[addr])
- mdio->irq[addr] = PHY_POLL;
- }
-
- is_c45 = of_device_is_compatible(child,
- "ethernet-phy-ieee802.3-c45");
- phy = get_phy_device(mdio, addr, is_c45);
-
- if (!phy || IS_ERR(phy)) {
- dev_err(&mdio->dev,
- "cannot get PHY at address %i\n",
- addr);
- continue;
- }
-
- /* Associate the OF node with the device structure so it
- * can be looked up later */
- of_node_get(child);
- phy->dev.of_node = child;
-
- /* All data is now stored in the phy struct; register it */
- rc = phy_device_register(phy);
- if (rc) {
- phy_device_free(phy);
- of_node_put(child);
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ if (rc)
continue;
- }
-
- dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
- child->name, addr);
}
if (!scanphys)
@@ -117,9 +158,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
if (paddr)
continue;
- is_c45 = of_device_is_compatible(child,
- "ethernet-phy-ieee802.3-c45");
-
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
/* skip already registered PHYs */
if (mdio->phy_map[addr])
@@ -129,34 +167,9 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
dev_info(&mdio->dev, "scan phy %s at address %i\n",
child->name, addr);
- phy = get_phy_device(mdio, addr, is_c45);
- if (!phy || IS_ERR(phy))
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ if (rc)
continue;
-
- if (mdio->irq) {
- mdio->irq[addr] =
- irq_of_parse_and_map(child, 0);
- if (!mdio->irq[addr])
- mdio->irq[addr] = PHY_POLL;
- }
-
- /* Associate the OF node with the device structure so it
- * can be looked up later */
- of_node_get(child);
- phy->dev.of_node = child;
-
- /* All data is now stored in the phy struct;
- * register it */
- rc = phy_device_register(phy);
- if (rc) {
- phy_device_free(phy);
- of_node_put(child);
- continue;
- }
-
- dev_info(&mdio->dev, "registered phy %s at address %i\n",
- child->name, addr);
- break;
}
}
@@ -247,3 +260,23 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
return IS_ERR(phy) ? NULL : phy;
}
EXPORT_SYMBOL(of_phy_connect_fixed_link);
+
+/**
+ * of_phy_attach - Attach to a PHY without starting the state machine
+ * @dev: pointer to net_device claiming the phy
+ * @phy_np: Node pointer for the PHY
+ * @flags: flags to pass to the PHY
+ * @iface: PHY data interface type
+ */
+struct phy_device *of_phy_attach(struct net_device *dev,
+ struct device_node *phy_np, u32 flags,
+ phy_interface_t iface)
+{
+ struct phy_device *phy = of_phy_find_device(phy_np);
+
+ if (!phy)
+ return NULL;
+
+ return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy;
+}
+EXPORT_SYMBOL(of_phy_attach);
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index 8f9be2e09937..a3df3428dac6 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -13,8 +13,8 @@
/**
* It maps 'enum phy_interface_t' found in include/linux/phy.h
- * into the device tree binding of 'phy-mode', so that Ethernet
- * device driver can get phy interface from device tree.
+ * into the device tree binding of 'phy-mode' or 'phy-connection-type',
+ * so that Ethernet device driver can get phy interface from device tree.
*/
static const char *phy_modes[] = {
[PHY_INTERFACE_MODE_NA] = "",
@@ -30,14 +30,16 @@ static const char *phy_modes[] = {
[PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
[PHY_INTERFACE_MODE_RTBI] = "rtbi",
[PHY_INTERFACE_MODE_SMII] = "smii",
+ [PHY_INTERFACE_MODE_XGMII] = "xgmii",
};
/**
* of_get_phy_mode - Get phy mode for given device_node
* @np: Pointer to the given device_node
*
- * The function gets phy interface string from property 'phy-mode',
- * and return its index in phy_modes table, or errno in error case.
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
*/
int of_get_phy_mode(struct device_node *np)
{
@@ -46,6 +48,8 @@ int of_get_phy_mode(struct device_node *np)
err = of_property_read_string(np, "phy-mode", &pm);
if (err < 0)
+ err = of_property_read_string(np, "phy-connection-type", &pm);
+ if (err < 0)
return err;
for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
new file mode 100644
index 000000000000..daaaf935911d
--- /dev/null
+++ b/drivers/of/of_reserved_mem.c
@@ -0,0 +1,217 @@
+/*
+ * Device tree based initialization code for reserved memory.
+ *
+ * Copyright (c) 2013, The Linux Foundation. All Rights Reserved.
+ * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ * Author: Josh Cartwright <joshc@codeaurora.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/mm.h>
+#include <linux/sizes.h>
+#include <linux/of_reserved_mem.h>
+
+#define MAX_RESERVED_REGIONS 16
+static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
+static int reserved_mem_count;
+
+#if defined(CONFIG_HAVE_MEMBLOCK)
+#include <linux/memblock.h>
+int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
+ phys_addr_t *res_base)
+{
+ /*
+ * We use __memblock_alloc_base() because memblock_alloc_base()
+ * panic()s on allocation failure.
+ */
+ phys_addr_t base = __memblock_alloc_base(size, align, end);
+ if (!base)
+ return -ENOMEM;
+
+ /*
+ * Check if the allocated region fits in to start..end window
+ */
+ if (base < start) {
+ memblock_free(base, size);
+ return -ENOMEM;
+ }
+
+ *res_base = base;
+ if (nomap)
+ return memblock_remove(base, size);
+ return 0;
+}
+#else
+int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
+ phys_addr_t *res_base)
+{
+ pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n",
+ size, nomap ? " (nomap)" : "");
+ return -ENOSYS;
+}
+#endif
+
+/**
+ * res_mem_save_node() - save fdt node for second pass initialization
+ */
+void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
+ phys_addr_t base, phys_addr_t size)
+{
+ struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
+
+ if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
+ pr_err("Reserved memory: not enough space all defined regions.\n");
+ return;
+ }
+
+ rmem->fdt_node = node;
+ rmem->name = uname;
+ rmem->base = base;
+ rmem->size = size;
+
+ reserved_mem_count++;
+ return;
+}
+
+/**
+ * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align'
+ * and 'alloc-ranges' properties
+ */
+static int __init __reserved_mem_alloc_size(unsigned long node,
+ const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
+{
+ int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
+ phys_addr_t start = 0, end = 0;
+ phys_addr_t base = 0, align = 0, size;
+ unsigned long len;
+ __be32 *prop;
+ int nomap;
+ int ret;
+
+ prop = of_get_flat_dt_prop(node, "size", &len);
+ if (!prop)
+ return -EINVAL;
+
+ if (len != dt_root_size_cells * sizeof(__be32)) {
+ pr_err("Reserved memory: invalid size property in '%s' node.\n",
+ uname);
+ return -EINVAL;
+ }
+ size = dt_mem_next_cell(dt_root_size_cells, &prop);
+
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+
+ prop = of_get_flat_dt_prop(node, "alignment", &len);
+ if (prop) {
+ if (len != dt_root_addr_cells * sizeof(__be32)) {
+ pr_err("Reserved memory: invalid alignment property in '%s' node.\n",
+ uname);
+ return -EINVAL;
+ }
+ align = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ }
+
+ prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
+ if (prop) {
+
+ if (len % t_len != 0) {
+ pr_err("Reserved memory: invalid alloc-ranges property in '%s', skipping node.\n",
+ uname);
+ return -EINVAL;
+ }
+
+ base = 0;
+
+ while (len > 0) {
+ start = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ end = start + dt_mem_next_cell(dt_root_size_cells,
+ &prop);
+
+ ret = early_init_dt_alloc_reserved_memory_arch(size,
+ align, start, end, nomap, &base);
+ if (ret == 0) {
+ pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
+ uname, &base,
+ (unsigned long)size / SZ_1M);
+ break;
+ }
+ len -= t_len;
+ }
+
+ } else {
+ ret = early_init_dt_alloc_reserved_memory_arch(size, align,
+ 0, 0, nomap, &base);
+ if (ret == 0)
+ pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
+ uname, &base, (unsigned long)size / SZ_1M);
+ }
+
+ if (base == 0) {
+ pr_info("Reserved memory: failed to allocate memory for node '%s'\n",
+ uname);
+ return -ENOMEM;
+ }
+
+ *res_base = base;
+ *res_size = size;
+
+ return 0;
+}
+
+static const struct of_device_id __rmem_of_table_sentinel
+ __used __section(__reservedmem_of_table_end);
+
+/**
+ * res_mem_init_node() - call region specific reserved memory init code
+ */
+static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
+{
+ extern const struct of_device_id __reservedmem_of_table[];
+ const struct of_device_id *i;
+
+ for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
+ reservedmem_of_init_fn initfn = i->data;
+ const char *compat = i->compatible;
+
+ if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
+ continue;
+
+ if (initfn(rmem, rmem->fdt_node, rmem->name) == 0) {
+ pr_info("Reserved memory: initialized node %s, compatible id %s\n",
+ rmem->name, compat);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * fdt_init_reserved_mem - allocate and init all saved reserved memory regions
+ */
+void __init fdt_init_reserved_mem(void)
+{
+ int i;
+ for (i = 0; i < reserved_mem_count; i++) {
+ struct reserved_mem *rmem = &reserved_mem[i];
+ unsigned long node = rmem->fdt_node;
+ int err = 0;
+
+ if (rmem->size == 0)
+ err = __reserved_mem_alloc_size(node, rmem->name,
+ &rmem->base, &rmem->size);
+ if (err == 0)
+ __reserved_mem_init_node(rmem);
+ }
+}
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 7b666736c168..36b4035881b0 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -176,11 +176,10 @@ static struct device_node * __init of_pdt_create_node(phandle node,
return NULL;
dp = prom_early_alloc(sizeof(*dp));
+ of_node_init(dp);
of_pdt_incr_unique_id(dp);
dp->parent = parent;
- kref_init(&dp->kref);
-
dp->name = of_pdt_get_one_property(node, "name");
dp->type = of_pdt_get_one_property(node, "device_type");
dp->phandle = node;
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index e21012bde639..ae4450070503 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -30,6 +30,67 @@ static struct selftest_results {
} \
}
+static void __init of_selftest_dynamic(void)
+{
+ struct device_node *np;
+ struct property *prop;
+
+ np = of_find_node_by_path("/testcase-data");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ /* Array of 4 properties for the purpose of testing */
+ prop = kzalloc(sizeof(*prop) * 4, GFP_KERNEL);
+ if (!prop) {
+ selftest(0, "kzalloc() failed\n");
+ return;
+ }
+
+ /* Add a new property - should pass*/
+ prop->name = "new-property";
+ prop->value = "new-property-data";
+ prop->length = strlen(prop->value);
+ selftest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
+
+ /* Try to add an existing property - should fail */
+ prop++;
+ prop->name = "new-property";
+ prop->value = "new-property-data-should-fail";
+ prop->length = strlen(prop->value);
+ selftest(of_add_property(np, prop) != 0,
+ "Adding an existing property should have failed\n");
+
+ /* Try to modify an existing property - should pass */
+ prop->value = "modify-property-data-should-pass";
+ prop->length = strlen(prop->value);
+ selftest(of_update_property(np, prop) == 0,
+ "Updating an existing property should have passed\n");
+
+ /* Try to modify non-existent property - should pass*/
+ prop++;
+ prop->name = "modify-property";
+ prop->value = "modify-missing-property-data-should-pass";
+ prop->length = strlen(prop->value);
+ selftest(of_update_property(np, prop) == 0,
+ "Updating a missing property should have passed\n");
+
+ /* Remove property - should pass */
+ selftest(of_remove_property(np, prop) == 0,
+ "Removing a property should have passed\n");
+
+ /* Adding very large property - should pass */
+ prop++;
+ prop->name = "large-property-PAGE_SIZEx8";
+ prop->length = PAGE_SIZE * 8;
+ prop->value = kzalloc(prop->length, GFP_KERNEL);
+ selftest(prop->value != NULL, "Unable to allocate large buffer\n");
+ if (prop->value)
+ selftest(of_add_property(np, prop) == 0,
+ "Adding a large property should have passed\n");
+}
+
static void __init of_selftest_parse_phandle_with_args(void)
{
struct device_node *np;
@@ -300,6 +361,72 @@ static void __init of_selftest_parse_interrupts_extended(void)
of_node_put(np);
}
+static struct of_device_id match_node_table[] = {
+ { .data = "A", .name = "name0", }, /* Name alone is lowest priority */
+ { .data = "B", .type = "type1", }, /* followed by type alone */
+
+ { .data = "Ca", .name = "name2", .type = "type1", }, /* followed by both together */
+ { .data = "Cb", .name = "name2", }, /* Only match when type doesn't match */
+ { .data = "Cc", .name = "name2", .type = "type2", },
+
+ { .data = "E", .compatible = "compat3" },
+ { .data = "G", .compatible = "compat2", },
+ { .data = "H", .compatible = "compat2", .name = "name5", },
+ { .data = "I", .compatible = "compat2", .type = "type1", },
+ { .data = "J", .compatible = "compat2", .type = "type1", .name = "name8", },
+ { .data = "K", .compatible = "compat2", .name = "name9", },
+ {}
+};
+
+static struct {
+ const char *path;
+ const char *data;
+} match_node_tests[] = {
+ { .path = "/testcase-data/match-node/name0", .data = "A", },
+ { .path = "/testcase-data/match-node/name1", .data = "B", },
+ { .path = "/testcase-data/match-node/a/name2", .data = "Ca", },
+ { .path = "/testcase-data/match-node/b/name2", .data = "Cb", },
+ { .path = "/testcase-data/match-node/c/name2", .data = "Cc", },
+ { .path = "/testcase-data/match-node/name3", .data = "E", },
+ { .path = "/testcase-data/match-node/name4", .data = "G", },
+ { .path = "/testcase-data/match-node/name5", .data = "H", },
+ { .path = "/testcase-data/match-node/name6", .data = "G", },
+ { .path = "/testcase-data/match-node/name7", .data = "I", },
+ { .path = "/testcase-data/match-node/name8", .data = "J", },
+ { .path = "/testcase-data/match-node/name9", .data = "K", },
+};
+
+static void __init of_selftest_match_node(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(match_node_tests); i++) {
+ np = of_find_node_by_path(match_node_tests[i].path);
+ if (!np) {
+ selftest(0, "missing testcase node %s\n",
+ match_node_tests[i].path);
+ continue;
+ }
+
+ match = of_match_node(match_node_table, np);
+ if (!match) {
+ selftest(0, "%s didn't match anything\n",
+ match_node_tests[i].path);
+ continue;
+ }
+
+ if (strcmp(match->data, match_node_tests[i].data) != 0) {
+ selftest(0, "%s got wrong match. expected %s, got %s\n",
+ match_node_tests[i].path, match_node_tests[i].data,
+ (const char *)match->data);
+ continue;
+ }
+ selftest(1, "passed");
+ }
+}
+
static int __init of_selftest(void)
{
struct device_node *np;
@@ -312,10 +439,12 @@ static int __init of_selftest(void)
of_node_put(np);
pr_info("start of selftest - you will see error messages\n");
+ of_selftest_dynamic();
of_selftest_parse_phandle_with_args();
of_selftest_property_match_string();
of_selftest_parse_interrupts();
of_selftest_parse_interrupts_extended();
+ of_selftest_match_node();
pr_info("end of selftest - %i passed, %i failed\n",
selftest_results.passed, selftest_results.failed);
return 0;
diff --git a/drivers/of/testcase-data/testcases.dtsi b/drivers/of/testcase-data/testcases.dtsi
new file mode 100644
index 000000000000..3a5b75a8e4d7
--- /dev/null
+++ b/drivers/of/testcase-data/testcases.dtsi
@@ -0,0 +1,3 @@
+#include "tests-phandle.dtsi"
+#include "tests-interrupts.dtsi"
+#include "tests-match.dtsi"
diff --git a/drivers/of/testcase-data/tests-interrupts.dtsi b/drivers/of/testcase-data/tests-interrupts.dtsi
new file mode 100644
index 000000000000..c843720bd3e5
--- /dev/null
+++ b/drivers/of/testcase-data/tests-interrupts.dtsi
@@ -0,0 +1,58 @@
+
+/ {
+ testcase-data {
+ interrupts {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ test_intc0: intc0 {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ test_intc1: intc1 {
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+
+ test_intc2: intc2 {
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ test_intmap0: intmap0 {
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ interrupt-map = <1 &test_intc0 9>,
+ <2 &test_intc1 10 11 12>,
+ <3 &test_intc2 13 14>,
+ <4 &test_intc2 15 16>;
+ };
+
+ test_intmap1: intmap1 {
+ #interrupt-cells = <2>;
+ interrupt-map = <0x5000 1 2 &test_intc0 15>;
+ };
+
+ interrupts0 {
+ interrupt-parent = <&test_intc0>;
+ interrupts = <1>, <2>, <3>, <4>;
+ };
+
+ interrupts1 {
+ interrupt-parent = <&test_intmap0>;
+ interrupts = <1>, <2>, <3>, <4>;
+ };
+
+ interrupts-extended0 {
+ reg = <0x5000 0x100>;
+ interrupts-extended = <&test_intc0 1>,
+ <&test_intc1 2 3 4>,
+ <&test_intc2 5 6>,
+ <&test_intmap0 1>,
+ <&test_intmap0 2>,
+ <&test_intmap0 3>,
+ <&test_intmap1 1 2>;
+ };
+ };
+ };
+};
diff --git a/drivers/of/testcase-data/tests-match.dtsi b/drivers/of/testcase-data/tests-match.dtsi
new file mode 100644
index 000000000000..c9e541129534
--- /dev/null
+++ b/drivers/of/testcase-data/tests-match.dtsi
@@ -0,0 +1,19 @@
+
+/ {
+ testcase-data {
+ match-node {
+ name0 { };
+ name1 { device_type = "type1"; };
+ a { name2 { device_type = "type1"; }; };
+ b { name2 { }; };
+ c { name2 { device_type = "type2"; }; };
+ name3 { compatible = "compat3"; };
+ name4 { compatible = "compat2", "compat3"; };
+ name5 { compatible = "compat2", "compat3"; };
+ name6 { compatible = "compat1", "compat2", "compat3"; };
+ name7 { compatible = "compat2"; device_type = "type1"; };
+ name8 { compatible = "compat2"; device_type = "type1"; };
+ name9 { compatible = "compat2"; };
+ };
+ };
+};
diff --git a/drivers/of/testcase-data/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi
new file mode 100644
index 000000000000..788a4c24b8f5
--- /dev/null
+++ b/drivers/of/testcase-data/tests-phandle.dtsi
@@ -0,0 +1,42 @@
+
+/ {
+ testcase-data {
+ security-password = "password";
+ duplicate-name = "duplicate";
+ duplicate-name { };
+ phandle-tests {
+ provider0: provider0 {
+ #phandle-cells = <0>;
+ };
+
+ provider1: provider1 {
+ #phandle-cells = <1>;
+ };
+
+ provider2: provider2 {
+ #phandle-cells = <2>;
+ };
+
+ provider3: provider3 {
+ #phandle-cells = <3>;
+ };
+
+ consumer-a {
+ phandle-list = <&provider1 1>,
+ <&provider2 2 0>,
+ <0>,
+ <&provider3 4 4 3>,
+ <&provider2 5 100>,
+ <&provider0>,
+ <&provider1 7>;
+ phandle-list-names = "first", "second", "third";
+
+ phandle-list-bad-phandle = <12345678 0 0>;
+ phandle-list-bad-args = <&provider2 1 0>,
+ <&provider3 0>;
+ empty-property;
+ unterminated-string = [40 41 42 43];
+ };
+ };
+ };
+};
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 6a83ee1e9178..3fa66244ce32 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -905,7 +905,8 @@ int parport_claim_or_block(struct pardevice *dev)
/* If dev->waiting is clear now, an interrupt
gave us the port and we would deadlock if we slept. */
if (dev->waiting) {
- interruptible_sleep_on (&dev->wait_q);
+ wait_event_interruptible(dev->wait_q,
+ !dev->waiting);
if (signal_pending (current)) {
return -EINTR;
}
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 17d2b07ee67c..e04fe2d9df3b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -33,21 +33,14 @@ obj-$(CONFIG_PCI_IOV) += iov.o
#
# Some architectures use the generic PCI setup functions
#
-obj-$(CONFIG_X86) += setup-bus.o
-obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
-obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
-obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
-obj-$(CONFIG_PARISC) += setup-bus.o
-obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
-obj-$(CONFIG_PPC) += setup-bus.o
-obj-$(CONFIG_FRV) += setup-bus.o
-obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
-obj-$(CONFIG_X86_VISWS) += setup-irq.o
-obj-$(CONFIG_MN10300) += setup-bus.o
-obj-$(CONFIG_MICROBLAZE) += setup-bus.o
-obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
-obj-$(CONFIG_SPARC_LEON) += setup-bus.o setup-irq.o
-obj-$(CONFIG_M68K) += setup-bus.o setup-irq.o
+obj-$(CONFIG_ALPHA) += setup-irq.o
+obj-$(CONFIG_ARM) += setup-irq.o
+obj-$(CONFIG_UNICORE32) += setup-irq.o
+obj-$(CONFIG_SUPERH) += setup-irq.o
+obj-$(CONFIG_MIPS) += setup-irq.o
+obj-$(CONFIG_TILE) += setup-irq.o
+obj-$(CONFIG_SPARC_LEON) += setup-irq.o
+obj-$(CONFIG_M68K) += setup-irq.o
#
# ACPI Related PCI FW Functions
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 00660cc502c5..fb8aed307c28 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -132,7 +132,7 @@ static void pci_clip_resource_to_region(struct pci_bus *bus,
static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
- resource_size_t min, unsigned int type_mask,
+ resource_size_t min, unsigned long type_mask,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
@@ -144,7 +144,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
struct resource *r, avail;
resource_size_t max;
- type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
+ type_mask |= IORESOURCE_TYPE_BITS;
pci_bus_for_each_resource(bus, r, i) {
if (!r)
@@ -162,8 +162,6 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
avail = *r;
pci_clip_resource_to_region(bus, &avail, region);
- if (!resource_size(&avail))
- continue;
/*
* "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
@@ -202,7 +200,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
*/
int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
- resource_size_t min, unsigned int type_mask,
+ resource_size_t min, unsigned long type_mask,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index 06ace6248c61..47aaf22d814e 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -32,11 +32,6 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
bridge->release_data = release_data;
}
-static bool resource_contains(struct resource *res1, struct resource *res2)
-{
- return res1->start <= res2->start && res1->end >= res2->end;
-}
-
void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
struct resource *res)
{
@@ -45,9 +40,6 @@ void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
resource_size_t offset = 0;
list_for_each_entry(window, &bridge->windows, list) {
- if (resource_type(res) != resource_type(window->res))
- continue;
-
if (resource_contains(window->res, res)) {
offset = window->offset;
break;
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 47d46c6d8468..a6f67ec8882f 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -27,7 +27,7 @@ config PCI_TEGRA
config PCI_RCAR_GEN2
bool "Renesas R-Car Gen2 Internal PCI controller"
- depends on ARM && (ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST)
+ depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
help
Say Y here if you want internal PCI support on R-Car Gen2 SoC.
There are 3 internal PCI controllers available with a single
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index e8663a8c3406..ee082509b0ba 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -424,20 +424,40 @@ static void imx6_pcie_reset_phy(struct pcie_port *pp)
static int imx6_pcie_link_up(struct pcie_port *pp)
{
- u32 rc, ltssm, rx_valid;
+ u32 rc, debug_r0, rx_valid;
+ int count = 5;
/*
- * Test if the PHY reports that the link is up and also that
- * the link training finished. It might happen that the PHY
- * reports the link is already up, but the link training bit
- * is still set, so make sure to check the training is done
- * as well here.
+ * Test if the PHY reports that the link is up and also that the LTSSM
+ * training finished. There are three possible states of the link when
+ * this code is called:
+ * 1) The link is DOWN (unlikely)
+ * The link didn't come up yet for some reason. This usually means
+ * we have a real problem somewhere. Reset the PHY and exit. This
+ * state calls for inspection of the DEBUG registers.
+ * 2) The link is UP, but still in LTSSM training
+ * Wait for the training to finish, which should take a very short
+ * time. If the training does not finish, we have a problem and we
+ * need to inspect the DEBUG registers. If the training does finish,
+ * the link is up and operating correctly.
+ * 3) The link is UP and no longer in LTSSM training
+ * The link is up and operating correctly.
*/
- rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
- if ((rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) &&
- !(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
- return 1;
-
+ while (1) {
+ rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
+ if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
+ break;
+ if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
+ return 1;
+ if (!count--)
+ break;
+ dev_dbg(pp->dev, "Link is up, but still in training\n");
+ /*
+ * Wait a little bit, then re-check if the link finished
+ * the training.
+ */
+ usleep_range(1000, 2000);
+ }
/*
* From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
* Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
@@ -446,15 +466,16 @@ static int imx6_pcie_link_up(struct pcie_port *pp)
* to gen2 is stuck
*/
pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
- ltssm = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
+ debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
if (rx_valid & 0x01)
return 0;
- if (ltssm != 0x0d)
+ if ((debug_r0 & 0x3f) != 0x0d)
return 0;
dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
+ dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
imx6_pcie_reset_phy(pp);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 13478ecd4113..d3d1cfd51e09 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -60,14 +60,6 @@
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET BIT(20)
-/*
- * This product ID is registered by Marvell, and used when the Marvell
- * SoC is not the root complex, but an endpoint on the PCIe bus. It is
- * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI
- * bridge.
- */
-#define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846
-
/* PCI configuration space of a PCI-to-PCI bridge */
struct mvebu_sw_pci_bridge {
u16 vendor;
@@ -109,7 +101,9 @@ struct mvebu_pcie {
struct mvebu_pcie_port *ports;
struct msi_chip *msi;
struct resource io;
+ char io_name[30];
struct resource realio;
+ char mem_name[30];
struct resource mem;
struct resource busn;
int nports;
@@ -388,7 +382,8 @@ static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
bridge->class = PCI_CLASS_BRIDGE_PCI;
bridge->vendor = PCI_VENDOR_ID_MARVELL;
- bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID;
+ bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
+ bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
bridge->cache_line_size = 0x10;
@@ -679,10 +674,30 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct mvebu_pcie *pcie = sys_to_pcie(sys);
int i;
+ int domain = 0;
+
+#ifdef CONFIG_PCI_DOMAINS
+ domain = sys->domain;
+#endif
+
+ snprintf(pcie->mem_name, sizeof(pcie->mem_name), "PCI MEM %04x",
+ domain);
+ pcie->mem.name = pcie->mem_name;
- if (resource_size(&pcie->realio) != 0)
+ snprintf(pcie->io_name, sizeof(pcie->io_name), "PCI I/O %04x", domain);
+ pcie->realio.name = pcie->io_name;
+
+ if (request_resource(&iomem_resource, &pcie->mem))
+ return 0;
+
+ if (resource_size(&pcie->realio) != 0) {
+ if (request_resource(&ioport_resource, &pcie->realio)) {
+ release_resource(&pcie->mem);
+ return 0;
+ }
pci_add_resource_offset(&sys->resources, &pcie->realio,
sys->io_offset);
+ }
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
@@ -804,7 +819,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
for (i = 0; i < nranges; i++) {
u32 flags = of_read_number(range, 1);
- u32 slot = of_read_number(range, 2);
+ u32 slot = of_read_number(range + 1, 1);
u64 cpuaddr = of_read_number(range + na, pna);
unsigned long rtype;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index ceec147baec3..fd3e3ab56509 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
/* AHB-PCI Bridge PCI communication registers */
@@ -39,9 +40,26 @@
#define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20)
#define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24)
+#define RCAR_PCI_INT_SIGTABORT (1 << 0)
+#define RCAR_PCI_INT_SIGRETABORT (1 << 1)
+#define RCAR_PCI_INT_REMABORT (1 << 2)
+#define RCAR_PCI_INT_PERR (1 << 3)
+#define RCAR_PCI_INT_SIGSERR (1 << 4)
+#define RCAR_PCI_INT_RESERR (1 << 5)
+#define RCAR_PCI_INT_WIN1ERR (1 << 12)
+#define RCAR_PCI_INT_WIN2ERR (1 << 13)
#define RCAR_PCI_INT_A (1 << 16)
#define RCAR_PCI_INT_B (1 << 17)
#define RCAR_PCI_INT_PME (1 << 19)
+#define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT | \
+ RCAR_PCI_INT_SIGRETABORT | \
+ RCAR_PCI_INT_SIGRETABORT | \
+ RCAR_PCI_INT_REMABORT | \
+ RCAR_PCI_INT_PERR | \
+ RCAR_PCI_INT_SIGSERR | \
+ RCAR_PCI_INT_RESERR | \
+ RCAR_PCI_INT_WIN1ERR | \
+ RCAR_PCI_INT_WIN2ERR)
#define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30)
#define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0)
@@ -74,9 +92,6 @@
#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
-/* Number of internal PCI controllers */
-#define RCAR_PCI_NR_CONTROLLERS 3
-
struct rcar_pci_priv {
struct device *dev;
void __iomem *reg;
@@ -84,6 +99,7 @@ struct rcar_pci_priv {
struct resource mem_res;
struct resource *cfg_res;
int irq;
+ unsigned long window_size;
};
/* PCI configuration space operations */
@@ -102,6 +118,10 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
if (slot > 2)
return NULL;
+ /* bridge logic only has registers to 0x40 */
+ if (slot == 0x0 && where >= 0x40)
+ return NULL;
+
val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG :
RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG;
@@ -156,7 +176,7 @@ static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn,
}
/* PCI interrupt mapping */
-static int __init rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct pci_sys_data *sys = dev->bus->sysdata;
struct rcar_pci_priv *priv = sys->private_data;
@@ -164,8 +184,48 @@ static int __init rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return priv->irq;
}
+#ifdef CONFIG_PCI_DEBUG
+/* if debug enabled, then attach an error handler irq to the bridge */
+
+static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
+{
+ struct rcar_pci_priv *priv = pw;
+ u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
+
+ if (status & RCAR_PCI_INT_ALLERRORS) {
+ dev_err(priv->dev, "error irq: status %08x\n", status);
+
+ /* clear the error(s) */
+ iowrite32(status & RCAR_PCI_INT_ALLERRORS,
+ priv->reg + RCAR_PCI_INT_STATUS_REG);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq,
+ IRQF_SHARED, "error irq", priv);
+ if (ret) {
+ dev_err(priv->dev, "cannot claim IRQ for error handling\n");
+ return;
+ }
+
+ val = ioread32(priv->reg + RCAR_PCI_INT_ENABLE_REG);
+ val |= RCAR_PCI_INT_ALLERRORS;
+ iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG);
+}
+#else
+static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
+#endif
+
/* PCI host controller setup */
-static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
+static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
{
struct rcar_pci_priv *priv = sys->private_data;
void __iomem *reg = priv->reg;
@@ -183,10 +243,31 @@ static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
iowrite32(val, reg + RCAR_USBCTR_REG);
udelay(4);
- /* De-assert reset and set PCIAHB window1 size to 1GB */
+ /* De-assert reset and reset PCIAHB window1 size */
val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK |
RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST);
- iowrite32(val | RCAR_USBCTR_PCIAHB_WIN1_1G, reg + RCAR_USBCTR_REG);
+
+ /* Setup PCIAHB window1 size */
+ switch (priv->window_size) {
+ case SZ_2G:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_2G;
+ break;
+ case SZ_1G:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_1G;
+ break;
+ case SZ_512M:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_512M;
+ break;
+ default:
+ pr_warn("unknown window size %ld - defaulting to 256M\n",
+ priv->window_size);
+ priv->window_size = SZ_256M;
+ /* fall-through */
+ case SZ_256M:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_256M;
+ break;
+ }
+ iowrite32(val, reg + RCAR_USBCTR_REG);
/* Configure AHB master and slave modes */
iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG);
@@ -197,7 +278,7 @@ static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
RCAR_PCI_ARBITER_PCIBP_MODE;
iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG);
- /* PCI-AHB mapping: 0x40000000-0x80000000 */
+ /* PCI-AHB mapping: 0x40000000 base */
iowrite32(0x40000000 | RCAR_PCIAHB_PREFETCH16,
reg + RCAR_PCIAHB_WIN1_CTR_REG);
@@ -224,10 +305,15 @@ static int __init rcar_pci_setup(int nr, struct pci_sys_data *sys)
iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME,
reg + RCAR_PCI_INT_ENABLE_REG);
+ if (priv->irq > 0)
+ rcar_pci_setup_errirq(priv);
+
/* Add PCI resources */
pci_add_resource(&sys->resources, &priv->io_res);
pci_add_resource(&sys->resources, &priv->mem_res);
+ /* Setup bus number based on platform device id */
+ sys->busnr = to_platform_device(priv->dev)->id;
return 1;
}
@@ -236,48 +322,13 @@ static struct pci_ops rcar_pci_ops = {
.write = rcar_pci_write_config,
};
-static struct hw_pci rcar_hw_pci __initdata = {
- .map_irq = rcar_pci_map_irq,
- .ops = &rcar_pci_ops,
- .setup = rcar_pci_setup,
-};
-
-static int rcar_pci_count __initdata;
-
-static int __init rcar_pci_add_controller(struct rcar_pci_priv *priv)
-{
- void **private_data;
- int count;
-
- if (rcar_hw_pci.nr_controllers < rcar_pci_count)
- goto add_priv;
-
- /* (Re)allocate private data pointer array if needed */
- count = rcar_pci_count + RCAR_PCI_NR_CONTROLLERS;
- private_data = kzalloc(count * sizeof(void *), GFP_KERNEL);
- if (!private_data)
- return -ENOMEM;
-
- rcar_pci_count = count;
- if (rcar_hw_pci.private_data) {
- memcpy(private_data, rcar_hw_pci.private_data,
- rcar_hw_pci.nr_controllers * sizeof(void *));
- kfree(rcar_hw_pci.private_data);
- }
-
- rcar_hw_pci.private_data = private_data;
-
-add_priv:
- /* Add private data pointer to the array */
- rcar_hw_pci.private_data[rcar_hw_pci.nr_controllers++] = priv;
- return 0;
-}
-
-static int __init rcar_pci_probe(struct platform_device *pdev)
+static int rcar_pci_probe(struct platform_device *pdev)
{
struct resource *cfg_res, *mem_res;
struct rcar_pci_priv *priv;
void __iomem *reg;
+ struct hw_pci hw;
+ void *hw_private[1];
cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(&pdev->dev, cfg_res);
@@ -308,31 +359,34 @@ static int __init rcar_pci_probe(struct platform_device *pdev)
priv->reg = reg;
priv->dev = &pdev->dev;
- return rcar_pci_add_controller(priv);
+ if (priv->irq < 0) {
+ dev_err(&pdev->dev, "no valid irq found\n");
+ return priv->irq;
+ }
+
+ priv->window_size = SZ_1G;
+
+ hw_private[0] = priv;
+ memset(&hw, 0, sizeof(hw));
+ hw.nr_controllers = ARRAY_SIZE(hw_private);
+ hw.private_data = hw_private;
+ hw.map_irq = rcar_pci_map_irq;
+ hw.ops = &rcar_pci_ops;
+ hw.setup = rcar_pci_setup;
+ pci_common_init_dev(&pdev->dev, &hw);
+ return 0;
}
static struct platform_driver rcar_pci_driver = {
.driver = {
.name = "pci-rcar-gen2",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
},
+ .probe = rcar_pci_probe,
};
-static int __init rcar_pci_init(void)
-{
- int retval;
-
- retval = platform_driver_probe(&rcar_pci_driver, rcar_pci_probe);
- if (!retval)
- pci_common_init(&rcar_hw_pci);
-
- /* Private data pointer array is not needed any more */
- kfree(rcar_hw_pci.private_data);
- rcar_hw_pci.private_data = NULL;
-
- return retval;
-}
-
-subsys_initcall(rcar_pci_init);
+module_platform_driver(rcar_pci_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI");
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index b8ba2f794559..330f7e3a32dd 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -25,7 +25,6 @@
*/
#include <linux/clk.h>
-#include <linux/clk/tegra.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/interrupt.h>
@@ -39,6 +38,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/tegra-cpuidle.h>
@@ -259,10 +259,13 @@ struct tegra_pcie {
struct clk *pex_clk;
struct clk *afi_clk;
- struct clk *pcie_xclk;
struct clk *pll_e;
struct clk *cml_clk;
+ struct reset_control *pex_rst;
+ struct reset_control *afi_rst;
+ struct reset_control *pcie_xrst;
+
struct tegra_msi msi;
struct list_head ports;
@@ -858,7 +861,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
pads_writel(pcie, value, PADS_CTL);
/* take the PCIe interface module out of reset */
- tegra_periph_reset_deassert(pcie->pcie_xclk);
+ reset_control_deassert(pcie->pcie_xrst);
/* finally enable PCIe */
value = afi_readl(pcie, AFI_CONFIGURATION);
@@ -891,9 +894,9 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
/* TODO: disable and unprepare clocks? */
- tegra_periph_reset_assert(pcie->pcie_xclk);
- tegra_periph_reset_assert(pcie->afi_clk);
- tegra_periph_reset_assert(pcie->pex_clk);
+ reset_control_assert(pcie->pcie_xrst);
+ reset_control_assert(pcie->afi_rst);
+ reset_control_assert(pcie->pex_rst);
tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
@@ -921,9 +924,9 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
const struct tegra_pcie_soc_data *soc = pcie->soc_data;
int err;
- tegra_periph_reset_assert(pcie->pcie_xclk);
- tegra_periph_reset_assert(pcie->afi_clk);
- tegra_periph_reset_assert(pcie->pex_clk);
+ reset_control_assert(pcie->pcie_xrst);
+ reset_control_assert(pcie->afi_rst);
+ reset_control_assert(pcie->pex_rst);
tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
@@ -952,13 +955,14 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
}
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
- pcie->pex_clk);
+ pcie->pex_clk,
+ pcie->pex_rst);
if (err) {
dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
return err;
}
- tegra_periph_reset_deassert(pcie->afi_clk);
+ reset_control_deassert(pcie->afi_rst);
err = clk_prepare_enable(pcie->afi_clk);
if (err < 0) {
@@ -996,10 +1000,6 @@ static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
if (IS_ERR(pcie->afi_clk))
return PTR_ERR(pcie->afi_clk);
- pcie->pcie_xclk = devm_clk_get(pcie->dev, "pcie_xclk");
- if (IS_ERR(pcie->pcie_xclk))
- return PTR_ERR(pcie->pcie_xclk);
-
pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
if (IS_ERR(pcie->pll_e))
return PTR_ERR(pcie->pll_e);
@@ -1013,6 +1013,23 @@ static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
return 0;
}
+static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
+{
+ pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
+ if (IS_ERR(pcie->pex_rst))
+ return PTR_ERR(pcie->pex_rst);
+
+ pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
+ if (IS_ERR(pcie->afi_rst))
+ return PTR_ERR(pcie->afi_rst);
+
+ pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
+ if (IS_ERR(pcie->pcie_xrst))
+ return PTR_ERR(pcie->pcie_xrst);
+
+ return 0;
+}
+
static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
@@ -1025,6 +1042,12 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
return err;
}
+ err = tegra_pcie_resets_get(pcie);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get resets: %d\n", err);
+ return err;
+ }
+
err = tegra_pcie_power_on(pcie);
if (err) {
dev_err(&pdev->dev, "failed to power up: %d\n", err);
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 17ce88f79d2b..509a29d84509 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -294,14 +294,12 @@ no_valid_irq:
static void clear_irq(unsigned int irq)
{
unsigned int pos, nvec;
- struct irq_desc *desc;
struct msi_desc *msi;
struct pcie_port *pp;
struct irq_data *data = irq_get_irq_data(irq);
/* get the port structure */
- desc = irq_to_desc(irq);
- msi = irq_desc_get_msi_desc(desc);
+ msi = irq_data_get_msi(data);
pp = sys_to_pcie(msi->dev->bus->sysdata);
if (!pp) {
BUG();
@@ -800,7 +798,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* setup RC BARs */
dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
- dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_1);
+ dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
/* setup interrupt pins */
dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index b6162be4df40..2b859249303b 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -93,7 +93,6 @@ struct acpiphp_slot {
struct list_head funcs; /* one slot may have different
objects (i.e. for each function) */
struct slot *slot;
- struct mutex crit_sect;
u8 device; /* pci device# */
u32 flags; /* see below */
@@ -117,20 +116,30 @@ struct acpiphp_func {
};
struct acpiphp_context {
- acpi_handle handle;
+ struct acpi_hotplug_context hp;
struct acpiphp_func func;
struct acpiphp_bridge *bridge;
unsigned int refcount;
};
+static inline struct acpiphp_context *to_acpiphp_context(struct acpi_hotplug_context *hp)
+{
+ return container_of(hp, struct acpiphp_context, hp);
+}
+
static inline struct acpiphp_context *func_to_context(struct acpiphp_func *func)
{
return container_of(func, struct acpiphp_context, func);
}
+static inline struct acpi_device *func_to_acpi_device(struct acpiphp_func *func)
+{
+ return func_to_context(func)->hp.self;
+}
+
static inline acpi_handle func_to_handle(struct acpiphp_func *func)
{
- return func_to_context(func)->handle;
+ return func_to_acpi_device(func)->handle;
}
/*
@@ -158,7 +167,6 @@ struct acpiphp_attention_info
#define FUNC_HAS_STA (0x00000001)
#define FUNC_HAS_EJ0 (0x00000002)
-#define FUNC_HAS_DCK (0x00000004)
/* function prototypes */
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index ee26bac2d378..bccc27ee1030 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -58,71 +58,59 @@
static LIST_HEAD(bridge_list);
static DEFINE_MUTEX(bridge_mutex);
-static DEFINE_MUTEX(acpiphp_context_lock);
-static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type);
+static void acpiphp_post_dock_fixup(struct acpi_device *adev);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(struct pci_bus *bus);
-static void hotplug_event(acpi_handle handle, u32 type, void *data);
+static void hotplug_event(u32 type, struct acpiphp_context *context);
static void free_bridge(struct kref *kref);
-static void acpiphp_context_handler(acpi_handle handle, void *context)
-{
- /* Intentionally empty. */
-}
-
/**
* acpiphp_init_context - Create hotplug context and grab a reference to it.
- * @handle: ACPI object handle to create the context for.
+ * @adev: ACPI device object to create the context for.
*
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
*/
-static struct acpiphp_context *acpiphp_init_context(acpi_handle handle)
+static struct acpiphp_context *acpiphp_init_context(struct acpi_device *adev)
{
struct acpiphp_context *context;
- acpi_status status;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return NULL;
- context->handle = handle;
context->refcount = 1;
- status = acpi_attach_data(handle, acpiphp_context_handler, context);
- if (ACPI_FAILURE(status)) {
- kfree(context);
- return NULL;
- }
+ acpi_set_hp_context(adev, &context->hp, acpiphp_hotplug_notify, NULL,
+ acpiphp_post_dock_fixup);
return context;
}
/**
* acpiphp_get_context - Get hotplug context and grab a reference to it.
- * @handle: ACPI object handle to get the context for.
+ * @adev: ACPI device object to get the context for.
*
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
*/
-static struct acpiphp_context *acpiphp_get_context(acpi_handle handle)
+static struct acpiphp_context *acpiphp_get_context(struct acpi_device *adev)
{
- struct acpiphp_context *context = NULL;
- acpi_status status;
- void *data;
+ struct acpiphp_context *context;
- status = acpi_get_data(handle, acpiphp_context_handler, &data);
- if (ACPI_SUCCESS(status)) {
- context = data;
- context->refcount++;
- }
+ if (!adev->hp)
+ return NULL;
+
+ context = to_acpiphp_context(adev->hp);
+ context->refcount++;
return context;
}
/**
* acpiphp_put_context - Drop a reference to ACPI hotplug context.
- * @handle: ACPI object handle to put the context for.
+ * @context: ACPI hotplug context to drop a reference to.
*
* The context object is removed if there are no more references to it.
*
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
*/
static void acpiphp_put_context(struct acpiphp_context *context)
{
@@ -130,7 +118,7 @@ static void acpiphp_put_context(struct acpiphp_context *context)
return;
WARN_ON(context->bridge);
- acpi_detach_data(context->handle, acpiphp_context_handler);
+ context->hp.self->hp = NULL;
kfree(context);
}
@@ -144,6 +132,27 @@ static inline void put_bridge(struct acpiphp_bridge *bridge)
kref_put(&bridge->ref, free_bridge);
}
+static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
+{
+ struct acpiphp_context *context;
+
+ acpi_lock_hp_context();
+ context = acpiphp_get_context(adev);
+ if (!context || context->func.parent->is_going_away) {
+ acpi_unlock_hp_context();
+ return NULL;
+ }
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ acpi_unlock_hp_context();
+ return context;
+}
+
+static void acpiphp_let_context_go(struct acpiphp_context *context)
+{
+ put_bridge(context->func.parent);
+}
+
static void free_bridge(struct kref *kref)
{
struct acpiphp_context *context;
@@ -151,7 +160,7 @@ static void free_bridge(struct kref *kref)
struct acpiphp_slot *slot, *next;
struct acpiphp_func *func, *tmp;
- mutex_lock(&acpiphp_context_lock);
+ acpi_lock_hp_context();
bridge = container_of(kref, struct acpiphp_bridge, ref);
@@ -175,31 +184,32 @@ static void free_bridge(struct kref *kref)
pci_dev_put(bridge->pci_dev);
kfree(bridge);
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
}
-/*
- * the _DCK method can do funny things... and sometimes not
- * hah-hah funny.
+/**
+ * acpiphp_post_dock_fixup - Post-dock fixups for PCI devices.
+ * @adev: ACPI device object corresponding to a PCI device.
*
- * TBD - figure out a way to only call fixups for
- * systems that require them.
+ * TBD - figure out a way to only call fixups for systems that require them.
*/
-static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
+static void acpiphp_post_dock_fixup(struct acpi_device *adev)
{
- struct acpiphp_context *context = data;
- struct pci_bus *bus = context->func.slot->bus;
+ struct acpiphp_context *context = acpiphp_grab_context(adev);
+ struct pci_bus *bus;
u32 buses;
- if (!bus->self)
+ if (!context)
return;
+ bus = context->func.slot->bus;
+ if (!bus->self)
+ goto out;
+
/* fixup bad _DCK function that rewrites
* secondary bridge on slot
*/
- pci_read_config_dword(bus->self,
- PCI_PRIMARY_BUS,
- &buses);
+ pci_read_config_dword(bus->self, PCI_PRIMARY_BUS, &buses);
if (((buses >> 8) & 0xff) != bus->busn_res.start) {
buses = (buses & 0xff000000)
@@ -208,13 +218,10 @@ static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
| ((unsigned int)(bus->busn_res.end) << 16);
pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
}
-}
-
-static const struct acpi_dock_ops acpiphp_dock_ops = {
- .fixup = post_dock_fixups,
- .handler = hotplug_event,
-};
+ out:
+ acpiphp_let_context_go(context);
+}
/* Check whether the PCI device is managed by native PCIe hotplug driver */
static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
@@ -245,26 +252,19 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
return true;
}
-static void acpiphp_dock_init(void *data)
-{
- struct acpiphp_context *context = data;
-
- get_bridge(context->func.parent);
-}
-
-static void acpiphp_dock_release(void *data)
-{
- struct acpiphp_context *context = data;
-
- put_bridge(context->func.parent);
-}
-
-/* callback routine to register each ACPI PCI slot object */
-static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
- void **rv)
+/**
+ * acpiphp_add_context - Add ACPIPHP context to an ACPI device object.
+ * @handle: ACPI handle of the object to add a context to.
+ * @lvl: Not used.
+ * @data: The object's parent ACPIPHP bridge.
+ * @rv: Not used.
+ */
+static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
+ void **rv)
{
struct acpiphp_bridge *bridge = data;
struct acpiphp_context *context;
+ struct acpi_device *adev;
struct acpiphp_slot *slot;
struct acpiphp_func *newfunc;
acpi_status status = AE_OK;
@@ -274,9 +274,6 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
struct pci_dev *pdev = bridge->pci_dev;
u32 val;
- if (pdev && device_is_managed_by_native_pciehp(pdev))
- return AE_OK;
-
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
@@ -284,31 +281,34 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
"can't evaluate _ADR (%#x)\n", status);
return AE_OK;
}
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
- mutex_lock(&acpiphp_context_lock);
- context = acpiphp_init_context(handle);
+ acpi_lock_hp_context();
+ context = acpiphp_init_context(adev);
if (!context) {
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
acpi_handle_err(handle, "No hotplug context\n");
return AE_NOT_EXIST;
}
newfunc = &context->func;
newfunc->function = function;
newfunc->parent = bridge;
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
- if (acpi_has_method(handle, "_EJ0"))
+ /*
+ * If this is a dock device, its _EJ0 should be executed by the dock
+ * notify handler after calling _DCK.
+ */
+ if (!is_dock_device(adev) && acpi_has_method(handle, "_EJ0"))
newfunc->flags = FUNC_HAS_EJ0;
if (acpi_has_method(handle, "_STA"))
newfunc->flags |= FUNC_HAS_STA;
- if (acpi_has_method(handle, "_DCK"))
- newfunc->flags |= FUNC_HAS_DCK;
-
/* search for objects that share the same slot */
list_for_each_entry(slot, &bridge->slots, node)
if (slot->device == device)
@@ -316,19 +316,26 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
if (!slot) {
- status = AE_NO_MEMORY;
- goto err;
+ acpi_lock_hp_context();
+ acpiphp_put_context(context);
+ acpi_unlock_hp_context();
+ return AE_NO_MEMORY;
}
slot->bus = bridge->pci_bus;
slot->device = device;
INIT_LIST_HEAD(&slot->funcs);
- mutex_init(&slot->crit_sect);
list_add_tail(&slot->node, &bridge->slots);
- /* Register slots for ejectable functions only. */
- if (acpi_pci_check_ejectable(pbus, handle) || is_dock_device(handle)) {
+ /*
+ * Expose slots to user space for functions that have _EJ0 or _RMV or
+ * are located in dock stations. Do not expose them for devices handled
+ * by the native PCIe hotplug (PCIeHP), becuase that code is supposed to
+ * expose slots to user space in those cases.
+ */
+ if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
+ && !(pdev && device_is_managed_by_native_pciehp(pdev))) {
unsigned long long sun;
int retval;
@@ -362,44 +369,16 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
&val, 60*1000))
slot->flags |= SLOT_ENABLED;
- if (is_dock_device(handle)) {
- /* we don't want to call this device's _EJ0
- * because we want the dock notify handler
- * to call it after it calls _DCK
- */
- newfunc->flags &= ~FUNC_HAS_EJ0;
- if (register_hotplug_dock_device(handle,
- &acpiphp_dock_ops, context,
- acpiphp_dock_init, acpiphp_dock_release))
- pr_debug("failed to register dock device\n");
- }
-
- /* install notify handler */
- if (!(newfunc->flags & FUNC_HAS_DCK)) {
- status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event,
- context);
- if (ACPI_FAILURE(status))
- acpi_handle_err(handle,
- "failed to install notify handler\n");
- }
-
return AE_OK;
-
- err:
- mutex_lock(&acpiphp_context_lock);
- acpiphp_put_context(context);
- mutex_unlock(&acpiphp_context_lock);
- return status;
}
-static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
+static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev)
{
struct acpiphp_context *context;
struct acpiphp_bridge *bridge = NULL;
- mutex_lock(&acpiphp_context_lock);
- context = acpiphp_get_context(handle);
+ acpi_lock_hp_context();
+ context = acpiphp_get_context(adev);
if (context) {
bridge = context->bridge;
if (bridge)
@@ -407,7 +386,7 @@ static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
acpiphp_put_context(context);
}
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
return bridge;
}
@@ -415,22 +394,15 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
{
struct acpiphp_slot *slot;
struct acpiphp_func *func;
- acpi_status status;
list_for_each_entry(slot, &bridge->slots, node) {
list_for_each_entry(func, &slot->funcs, sibling) {
- acpi_handle handle = func_to_handle(func);
-
- if (is_dock_device(handle))
- unregister_hotplug_dock_device(handle);
+ struct acpi_device *adev = func_to_acpi_device(func);
- if (!(func->flags & FUNC_HAS_DCK)) {
- status = acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event);
- if (ACPI_FAILURE(status))
- pr_err("failed to remove notify handler\n");
- }
+ acpi_lock_hp_context();
+ adev->hp->notify = NULL;
+ adev->hp->fixup = NULL;
+ acpi_unlock_hp_context();
}
slot->flags |= SLOT_IS_GOING_AWAY;
if (slot->slot)
@@ -441,7 +413,9 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
list_del(&bridge->list);
mutex_unlock(&bridge_mutex);
+ acpi_lock_hp_context();
bridge->is_going_away = true;
+ acpi_unlock_hp_context();
}
/**
@@ -450,7 +424,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
*/
static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
{
- struct list_head *tmp;
+ struct pci_bus *tmp;
unsigned char max, n;
/*
@@ -463,41 +437,14 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
*/
max = bus->busn_res.start;
- list_for_each(tmp, &bus->children) {
- n = pci_bus_max_busnr(pci_bus_b(tmp));
+ list_for_each_entry(tmp, &bus->children, node) {
+ n = pci_bus_max_busnr(tmp);
if (n > max)
max = n;
}
return max;
}
-/**
- * acpiphp_bus_trim - Trim device objects in an ACPI namespace subtree.
- * @handle: ACPI device object handle to start from.
- */
-static void acpiphp_bus_trim(acpi_handle handle)
-{
- struct acpi_device *adev = NULL;
-
- acpi_bus_get_device(handle, &adev);
- if (adev)
- acpi_bus_trim(adev);
-}
-
-/**
- * acpiphp_bus_add - Scan ACPI namespace subtree.
- * @handle: ACPI object handle to start the scan from.
- */
-static void acpiphp_bus_add(acpi_handle handle)
-{
- struct acpi_device *adev = NULL;
-
- acpi_bus_scan(handle);
- acpi_bus_get_device(handle, &adev);
- if (adev)
- acpi_device_set_power(adev, ACPI_STATE_D0);
-}
-
static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
{
struct acpiphp_func *func;
@@ -537,9 +484,13 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
{
struct acpiphp_func *func;
- list_for_each_entry(func, &slot->funcs, sibling)
- acpiphp_bus_add(func_to_handle(func));
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ struct acpi_device *adev = func_to_acpi_device(func);
+ acpi_bus_scan(adev->handle);
+ if (acpi_device_enumerated(adev))
+ acpi_device_set_power(adev, ACPI_STATE_D0);
+ }
return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
}
@@ -604,32 +555,15 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
}
}
-/* return first device in slot, acquiring a reference on it */
-static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot)
-{
- struct pci_bus *bus = slot->bus;
- struct pci_dev *dev;
- struct pci_dev *ret = NULL;
-
- down_read(&pci_bus_sem);
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (PCI_SLOT(dev->devfn) == slot->device) {
- ret = pci_dev_get(dev);
- break;
- }
- up_read(&pci_bus_sem);
-
- return ret;
-}
-
/**
* disable_slot - disable a slot
* @slot: ACPI PHP slot
*/
static void disable_slot(struct acpiphp_slot *slot)
{
+ struct pci_bus *bus = slot->bus;
+ struct pci_dev *dev, *prev;
struct acpiphp_func *func;
- struct pci_dev *pdev;
/*
* enable_slot() enumerates all functions in this device via
@@ -637,22 +571,18 @@ static void disable_slot(struct acpiphp_slot *slot)
* methods (_EJ0, etc.) or not. Therefore, we remove all functions
* here.
*/
- while ((pdev = dev_in_slot(slot))) {
- pci_stop_and_remove_bus_device(pdev);
- pci_dev_put(pdev);
- }
+ list_for_each_entry_safe_reverse(dev, prev, &bus->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) == slot->device)
+ pci_stop_and_remove_bus_device(dev);
list_for_each_entry(func, &slot->funcs, sibling)
- acpiphp_bus_trim(func_to_handle(func));
+ acpi_bus_trim(func_to_acpi_device(func));
slot->flags &= (~SLOT_ENABLED);
}
-static bool acpiphp_no_hotplug(acpi_handle handle)
+static bool acpiphp_no_hotplug(struct acpi_device *adev)
{
- struct acpi_device *adev = NULL;
-
- acpi_bus_get_device(handle, &adev);
return adev && adev->flags.no_hotplug;
}
@@ -661,7 +591,7 @@ static bool slot_no_hotplug(struct acpiphp_slot *slot)
struct acpiphp_func *func;
list_for_each_entry(func, &slot->funcs, sibling)
- if (acpiphp_no_hotplug(func_to_handle(func)))
+ if (acpiphp_no_hotplug(func_to_acpi_device(func)))
return true;
return false;
@@ -709,40 +639,48 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
return (unsigned int)sta;
}
+static inline bool device_status_valid(unsigned int sta)
+{
+ /*
+ * ACPI spec says that _STA may return bit 0 clear with bit 3 set
+ * if the device is valid but does not require a device driver to be
+ * loaded (Section 6.3.7 of ACPI 5.0A).
+ */
+ unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING;
+ return (sta & mask) == mask;
+}
+
/**
* trim_stale_devices - remove PCI devices that are not responding.
* @dev: PCI device to start walking the hierarchy from.
*/
static void trim_stale_devices(struct pci_dev *dev)
{
- acpi_handle handle = ACPI_HANDLE(&dev->dev);
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
struct pci_bus *bus = dev->subordinate;
bool alive = false;
- if (handle) {
+ if (adev) {
acpi_status status;
unsigned long long sta;
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
- || acpiphp_no_hotplug(handle);
+ status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
+ alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
+ || acpiphp_no_hotplug(adev);
}
- if (!alive) {
- u32 v;
+ if (!alive)
+ alive = pci_device_is_present(dev);
- /* Check if the device responds. */
- alive = pci_bus_read_dev_vendor_id(dev->bus, dev->devfn, &v, 0);
- }
if (!alive) {
pci_stop_and_remove_bus_device(dev);
- if (handle)
- acpiphp_bus_trim(handle);
+ if (adev)
+ acpi_bus_trim(adev);
} else if (bus) {
struct pci_dev *child, *tmp;
/* The device is a bridge. so check the bus below it. */
pm_runtime_get_sync(&dev->dev);
- list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
+ list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list)
trim_stale_devices(child);
pm_runtime_put(&dev->dev);
@@ -768,13 +706,12 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
struct pci_bus *bus = slot->bus;
struct pci_dev *dev, *tmp;
- mutex_lock(&slot->crit_sect);
if (slot_no_hotplug(slot)) {
; /* do nothing */
- } else if (get_slot_status(slot) == ACPI_STA_ALL) {
+ } else if (device_status_valid(get_slot_status(slot))) {
/* remove stale devices if any */
- list_for_each_entry_safe(dev, tmp, &bus->devices,
- bus_list)
+ list_for_each_entry_safe_reverse(dev, tmp,
+ &bus->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot->device)
trim_stale_devices(dev);
@@ -783,7 +720,6 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
} else {
disable_slot(slot);
}
- mutex_unlock(&slot->crit_sect);
}
}
@@ -805,7 +741,7 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
int i;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
+ list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) {
for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
if ((res->flags & type_mask) && !res->start &&
@@ -823,186 +759,112 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
* ACPI event handlers
*/
-void acpiphp_check_host_bridge(acpi_handle handle)
+void acpiphp_check_host_bridge(struct acpi_device *adev)
{
struct acpiphp_bridge *bridge;
- bridge = acpiphp_handle_to_bridge(handle);
+ bridge = acpiphp_dev_to_bridge(adev);
if (bridge) {
+ pci_lock_rescan_remove();
+
acpiphp_check_bridge(bridge);
+
+ pci_unlock_rescan_remove();
put_bridge(bridge);
}
}
static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot);
-static void hotplug_event(acpi_handle handle, u32 type, void *data)
+static void hotplug_event(u32 type, struct acpiphp_context *context)
{
- struct acpiphp_context *context = data;
+ acpi_handle handle = context->hp.self->handle;
struct acpiphp_func *func = &context->func;
+ struct acpiphp_slot *slot = func->slot;
struct acpiphp_bridge *bridge;
- char objname[64];
- struct acpi_buffer buffer = { .length = sizeof(objname),
- .pointer = objname };
- mutex_lock(&acpiphp_context_lock);
+ acpi_lock_hp_context();
bridge = context->bridge;
if (bridge)
get_bridge(bridge);
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ pci_lock_rescan_remove();
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* bus re-enumerate */
- pr_debug("%s: Bus check notify on %s\n", __func__, objname);
- pr_debug("%s: re-enumerating slots under %s\n",
- __func__, objname);
- if (bridge) {
+ acpi_handle_debug(handle, "Bus check in %s()\n", __func__);
+ if (bridge)
acpiphp_check_bridge(bridge);
- } else {
- struct acpiphp_slot *slot = func->slot;
-
- if (slot->flags & SLOT_IS_GOING_AWAY)
- break;
-
- mutex_lock(&slot->crit_sect);
+ else if (!(slot->flags & SLOT_IS_GOING_AWAY))
enable_slot(slot);
- mutex_unlock(&slot->crit_sect);
- }
+
break;
case ACPI_NOTIFY_DEVICE_CHECK:
/* device check */
- pr_debug("%s: Device check notify on %s\n", __func__, objname);
+ acpi_handle_debug(handle, "Device check in %s()\n", __func__);
if (bridge) {
acpiphp_check_bridge(bridge);
- } else {
- struct acpiphp_slot *slot = func->slot;
- int ret;
-
- if (slot->flags & SLOT_IS_GOING_AWAY)
- break;
-
+ } else if (!(slot->flags & SLOT_IS_GOING_AWAY)) {
/*
* Check if anything has changed in the slot and rescan
* from the parent if that's the case.
*/
- mutex_lock(&slot->crit_sect);
- ret = acpiphp_rescan_slot(slot);
- mutex_unlock(&slot->crit_sect);
- if (ret)
+ if (acpiphp_rescan_slot(slot))
acpiphp_check_bridge(func->parent);
}
break;
case ACPI_NOTIFY_EJECT_REQUEST:
/* request device eject */
- pr_debug("%s: Device eject notify on %s\n", __func__, objname);
- acpiphp_disable_and_eject_slot(func->slot);
+ acpi_handle_debug(handle, "Eject request in %s()\n", __func__);
+ acpiphp_disable_and_eject_slot(slot);
break;
}
+ pci_unlock_rescan_remove();
if (bridge)
put_bridge(bridge);
}
-static void hotplug_event_work(void *data, u32 type)
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type)
{
- struct acpiphp_context *context = data;
- acpi_handle handle = context->handle;
-
- acpi_scan_lock_acquire();
- pci_lock_rescan_remove();
+ struct acpiphp_context *context;
- hotplug_event(handle, type, context);
+ context = acpiphp_grab_context(adev);
+ if (!context)
+ return -ENODATA;
- pci_unlock_rescan_remove();
- acpi_scan_lock_release();
- acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
- put_bridge(context->func.parent);
+ hotplug_event(type, context);
+ acpiphp_let_context_go(context);
+ return 0;
}
/**
- * handle_hotplug_event - handle ACPI hotplug event
- * @handle: Notify()'ed acpi_handle
- * @type: Notify code
- * @data: pointer to acpiphp_context structure
+ * acpiphp_enumerate_slots - Enumerate PCI slots for a given bus.
+ * @bus: PCI bus to enumerate the slots for.
*
- * Handles ACPI event notification on slots.
- */
-static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
-{
- struct acpiphp_context *context;
- u32 ost_code = ACPI_OST_SC_SUCCESS;
-
- switch (type) {
- case ACPI_NOTIFY_BUS_CHECK:
- case ACPI_NOTIFY_DEVICE_CHECK:
- break;
- case ACPI_NOTIFY_EJECT_REQUEST:
- ost_code = ACPI_OST_SC_EJECT_IN_PROGRESS;
- acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
- break;
-
- case ACPI_NOTIFY_DEVICE_WAKE:
- return;
-
- case ACPI_NOTIFY_FREQUENCY_MISMATCH:
- acpi_handle_err(handle, "Device cannot be configured due "
- "to a frequency mismatch\n");
- goto out;
-
- case ACPI_NOTIFY_BUS_MODE_MISMATCH:
- acpi_handle_err(handle, "Device cannot be configured due "
- "to a bus mode mismatch\n");
- goto out;
-
- case ACPI_NOTIFY_POWER_FAULT:
- acpi_handle_err(handle, "Device has suffered a power fault\n");
- goto out;
-
- default:
- acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
- ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
- goto out;
- }
-
- mutex_lock(&acpiphp_context_lock);
- context = acpiphp_get_context(handle);
- if (context && !WARN_ON(context->handle != handle)) {
- get_bridge(context->func.parent);
- acpiphp_put_context(context);
- acpi_hotplug_execute(hotplug_event_work, context, type);
- mutex_unlock(&acpiphp_context_lock);
- return;
- }
- mutex_unlock(&acpiphp_context_lock);
- ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
-
- out:
- acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
-}
-
-/*
- * Create hotplug slots for the PCI bus.
- * It should always return 0 to avoid skipping following notifiers.
+ * A "slot" is an object associated with a PCI device number. All functions
+ * (PCI devices) with the same bus and device number belong to the same slot.
*/
void acpiphp_enumerate_slots(struct pci_bus *bus)
{
struct acpiphp_bridge *bridge;
+ struct acpi_device *adev;
acpi_handle handle;
acpi_status status;
if (acpiphp_disabled)
return;
- handle = ACPI_HANDLE(bus->bridge);
- if (!handle)
+ adev = ACPI_COMPANION(bus->bridge);
+ if (!adev)
return;
+ handle = adev->handle;
bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
if (!bridge) {
acpi_handle_err(handle, "No memory for bridge object\n");
@@ -1030,10 +892,10 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
* parent is going to be handled by pciehp, in which case this
* bridge is not interesting to us either.
*/
- mutex_lock(&acpiphp_context_lock);
- context = acpiphp_get_context(handle);
+ acpi_lock_hp_context();
+ context = acpiphp_get_context(adev);
if (!context) {
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
put_device(&bus->dev);
pci_dev_put(bridge->pci_dev);
kfree(bridge);
@@ -1043,17 +905,17 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
context->bridge = bridge;
/* Get a reference to the parent bridge. */
get_bridge(context->func.parent);
- mutex_unlock(&acpiphp_context_lock);
+ acpi_unlock_hp_context();
}
- /* must be added to the list prior to calling register_slot */
+ /* Must be added to the list prior to calling acpiphp_add_context(). */
mutex_lock(&bridge_mutex);
list_add(&bridge->list, &bridge_list);
mutex_unlock(&bridge_mutex);
/* register all slot objects under this bridge */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
- register_slot, NULL, bridge, NULL);
+ acpiphp_add_context, NULL, bridge, NULL);
if (ACPI_FAILURE(status)) {
acpi_handle_err(handle, "failed to register slots\n");
cleanup_bridge(bridge);
@@ -1061,7 +923,10 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
}
}
-/* Destroy hotplug slots associated with the PCI bus */
+/**
+ * acpiphp_remove_slots - Remove slot objects associated with a given bus.
+ * @bus: PCI bus to remove the slot objects for.
+ */
void acpiphp_remove_slots(struct pci_bus *bus)
{
struct acpiphp_bridge *bridge;
@@ -1092,13 +957,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
if (slot->flags & SLOT_IS_GOING_AWAY)
return -ENODEV;
- mutex_lock(&slot->crit_sect);
/* configure all functions */
if (!(slot->flags & SLOT_ENABLED))
enable_slot(slot);
- mutex_unlock(&slot->crit_sect);
-
pci_unlock_rescan_remove();
return 0;
}
@@ -1114,8 +976,6 @@ static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
if (slot->flags & SLOT_IS_GOING_AWAY)
return -ENODEV;
- mutex_lock(&slot->crit_sect);
-
/* unconfigure all functions */
disable_slot(slot);
@@ -1129,7 +989,6 @@ static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
break;
}
- mutex_unlock(&slot->crit_sect);
return 0;
}
@@ -1137,9 +996,15 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot)
{
int ret;
+ /*
+ * Acquire acpi_scan_lock to ensure that the execution of _EJ0 in
+ * acpiphp_disable_and_eject_slot() will be synchronized properly.
+ */
+ acpi_scan_lock_acquire();
pci_lock_rescan_remove();
ret = acpiphp_disable_and_eject_slot(slot);
pci_unlock_rescan_remove();
+ acpi_scan_lock_release();
return ret;
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index ecfac7e72d91..8dcccffd6e21 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -31,12 +31,11 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <acpi/acpi_bus.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
-#include <asm/uaccess.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <asm/uaccess.h>
#include "acpiphp.h"
#include "../pci.h"
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 31273e155e6c..037e2612c5bd 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -920,12 +920,12 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
break;
}
- if (bus_cap & 20) {
+ if (bus_cap & 0x20) {
dbg("bus max supports 66MHz PCI-X\n");
bus->max_bus_speed = PCI_SPEED_66MHz_PCIX;
break;
}
- if (bus_cap & 10) {
+ if (bus_cap & 0x10) {
dbg("bus max supports 66MHz PCI\n");
bus->max_bus_speed = PCI_SPEED_66MHz;
break;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index ccb0925bcd7b..8a66866b8cf1 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -76,6 +76,7 @@ struct slot {
struct hotplug_slot *hotplug_slot;
struct delayed_work work; /* work for button event */
struct mutex lock;
+ struct mutex hotplug_lock;
struct workqueue_struct *wq;
};
@@ -109,6 +110,8 @@ struct controller {
#define INT_BUTTON_PRESS 7
#define INT_BUTTON_RELEASE 8
#define INT_BUTTON_CANCEL 9
+#define INT_LINK_UP 10
+#define INT_LINK_DOWN 11
#define STATIC_STATE 0
#define BLINKINGON_STATE 1
@@ -132,6 +135,7 @@ u8 pciehp_handle_attention_button(struct slot *p_slot);
u8 pciehp_handle_switch_change(struct slot *p_slot);
u8 pciehp_handle_presence_change(struct slot *p_slot);
u8 pciehp_handle_power_fault(struct slot *p_slot);
+void pciehp_handle_linkstate_change(struct slot *p_slot);
int pciehp_configure_device(struct slot *p_slot);
int pciehp_unconfigure_device(struct slot *p_slot);
void pciehp_queue_pushbutton_work(struct work_struct *work);
@@ -153,6 +157,7 @@ void pciehp_green_led_on(struct slot *slot);
void pciehp_green_led_off(struct slot *slot);
void pciehp_green_led_blink(struct slot *slot);
int pciehp_check_link_status(struct controller *ctrl);
+bool pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_reset_slot(struct slot *slot, int probe);
@@ -162,8 +167,6 @@ static inline const char *slot_name(struct slot *slot)
}
#ifdef CONFIG_ACPI
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
#include <linux/pci-acpi.h>
void __init pciehp_acpi_slot_detection_init(void);
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index eddddd447d0d..20fea57d2149 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -112,6 +112,7 @@ static struct pcie_port_service_driver __initdata dummy_driver = {
static int __init select_detection_mode(void)
{
struct dummy_slot *slot, *tmp;
+
if (pcie_port_service_register(&dummy_driver))
return PCIEHP_DETECT_ACPI;
pcie_port_service_unregister(&dummy_driver);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 53b58debc288..0e0a2fff20a3 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -108,6 +108,7 @@ static int init_slot(struct controller *ctrl)
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
goto out;
+
ops->enable_slot = enable_slot;
ops->disable_slot = disable_slot;
ops->get_power_status = get_power_status;
@@ -283,8 +284,11 @@ static int pciehp_probe(struct pcie_device *dev)
slot = ctrl->slot;
pciehp_get_adapter_status(slot, &occupied);
pciehp_get_power_status(slot, &poweron);
- if (occupied && pciehp_force)
+ if (occupied && pciehp_force) {
+ mutex_lock(&slot->hotplug_lock);
pciehp_enable_slot(slot);
+ mutex_unlock(&slot->hotplug_lock);
+ }
/* If empty slot's power status is on, turn power off */
if (!occupied && poweron && POWER_CTRL(ctrl))
pciehp_power_off_slot(slot);
@@ -328,10 +332,12 @@ static int pciehp_resume (struct pcie_device *dev)
/* Check if slot is occupied */
pciehp_get_adapter_status(slot, &status);
+ mutex_lock(&slot->hotplug_lock);
if (status)
pciehp_enable_slot(slot);
else
pciehp_disable_slot(slot);
+ mutex_unlock(&slot->hotplug_lock);
return 0;
}
#endif /* PM */
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 50628487597d..c75e6a678dcc 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -150,6 +150,27 @@ u8 pciehp_handle_power_fault(struct slot *p_slot)
return 1;
}
+void pciehp_handle_linkstate_change(struct slot *p_slot)
+{
+ u32 event_type;
+ struct controller *ctrl = p_slot->ctrl;
+
+ /* Link Status Change */
+ ctrl_dbg(ctrl, "Data Link Layer State change\n");
+
+ if (pciehp_check_link_active(ctrl)) {
+ ctrl_info(ctrl, "slot(%s): Link Up event\n",
+ slot_name(p_slot));
+ event_type = INT_LINK_UP;
+ } else {
+ ctrl_info(ctrl, "slot(%s): Link Down event\n",
+ slot_name(p_slot));
+ event_type = INT_LINK_DOWN;
+ }
+
+ queue_interrupt_event(p_slot, event_type);
+}
+
/* The following routines constitute the bulk of the
hotplug controller logic
*/
@@ -212,7 +233,8 @@ static int board_added(struct slot *p_slot)
if (retval) {
ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
pci_domain_nr(parent), parent->number);
- goto err_exit;
+ if (retval != -EEXIST)
+ goto err_exit;
}
pciehp_green_led_on(p_slot);
@@ -255,6 +277,9 @@ static int remove_board(struct slot *p_slot)
struct power_work_info {
struct slot *p_slot;
struct work_struct work;
+ unsigned int req;
+#define DISABLE_REQ 0
+#define ENABLE_REQ 1
};
/**
@@ -269,30 +294,38 @@ static void pciehp_power_thread(struct work_struct *work)
struct power_work_info *info =
container_of(work, struct power_work_info, work);
struct slot *p_slot = info->p_slot;
+ int ret;
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
- case POWEROFF_STATE:
- mutex_unlock(&p_slot->lock);
+ switch (info->req) {
+ case DISABLE_REQ:
ctrl_dbg(p_slot->ctrl,
"Disabling domain:bus:device=%04x:%02x:00\n",
pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
p_slot->ctrl->pcie->port->subordinate->number);
+ mutex_lock(&p_slot->hotplug_lock);
pciehp_disable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
- break;
- case POWERON_STATE:
mutex_unlock(&p_slot->lock);
- if (pciehp_enable_slot(p_slot))
+ break;
+ case ENABLE_REQ:
+ ctrl_dbg(p_slot->ctrl,
+ "Enabling domain:bus:device=%04x:%02x:00\n",
+ pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
+ p_slot->ctrl->pcie->port->subordinate->number);
+ mutex_lock(&p_slot->hotplug_lock);
+ ret = pciehp_enable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
+ if (ret)
pciehp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
+ mutex_unlock(&p_slot->lock);
break;
default:
break;
}
- mutex_unlock(&p_slot->lock);
kfree(info);
}
@@ -315,9 +348,11 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
switch (p_slot->state) {
case BLINKINGOFF_STATE:
p_slot->state = POWEROFF_STATE;
+ info->req = DISABLE_REQ;
break;
case BLINKINGON_STATE:
p_slot->state = POWERON_STATE;
+ info->req = ENABLE_REQ;
break;
default:
kfree(info);
@@ -364,11 +399,10 @@ static void handle_button_press_event(struct slot *p_slot)
*/
ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
- if (p_slot->state == BLINKINGOFF_STATE) {
+ if (p_slot->state == BLINKINGOFF_STATE)
pciehp_green_led_on(p_slot);
- } else {
+ else
pciehp_green_led_off(p_slot);
- }
pciehp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "PCI slot #%s - action canceled "
"due to button press\n", slot_name(p_slot));
@@ -407,14 +441,81 @@ static void handle_surprise_event(struct slot *p_slot)
INIT_WORK(&info->work, pciehp_power_thread);
pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus)
+ if (!getstatus) {
p_slot->state = POWEROFF_STATE;
- else
+ info->req = DISABLE_REQ;
+ } else {
p_slot->state = POWERON_STATE;
+ info->req = ENABLE_REQ;
+ }
queue_work(p_slot->wq, &info->work);
}
+/*
+ * Note: This function must be called with slot->lock held
+ */
+static void handle_link_event(struct slot *p_slot, u32 event)
+{
+ struct controller *ctrl = p_slot->ctrl;
+ struct power_work_info *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
+ __func__);
+ return;
+ }
+ info->p_slot = p_slot;
+ info->req = event == INT_LINK_UP ? ENABLE_REQ : DISABLE_REQ;
+ INIT_WORK(&info->work, pciehp_power_thread);
+
+ switch (p_slot->state) {
+ case BLINKINGON_STATE:
+ case BLINKINGOFF_STATE:
+ cancel_delayed_work(&p_slot->work);
+ /* Fall through */
+ case STATIC_STATE:
+ p_slot->state = event == INT_LINK_UP ?
+ POWERON_STATE : POWEROFF_STATE;
+ queue_work(p_slot->wq, &info->work);
+ break;
+ case POWERON_STATE:
+ if (event == INT_LINK_UP) {
+ ctrl_info(ctrl,
+ "Link Up event ignored on slot(%s): already powering on\n",
+ slot_name(p_slot));
+ kfree(info);
+ } else {
+ ctrl_info(ctrl,
+ "Link Down event queued on slot(%s): currently getting powered on\n",
+ slot_name(p_slot));
+ p_slot->state = POWEROFF_STATE;
+ queue_work(p_slot->wq, &info->work);
+ }
+ break;
+ case POWEROFF_STATE:
+ if (event == INT_LINK_UP) {
+ ctrl_info(ctrl,
+ "Link Up event queued on slot(%s): currently getting powered off\n",
+ slot_name(p_slot));
+ p_slot->state = POWERON_STATE;
+ queue_work(p_slot->wq, &info->work);
+ } else {
+ ctrl_info(ctrl,
+ "Link Down event ignored on slot(%s): already powering off\n",
+ slot_name(p_slot));
+ kfree(info);
+ }
+ break;
+ default:
+ ctrl_err(ctrl, "Not a valid state on slot(%s)\n",
+ slot_name(p_slot));
+ kfree(info);
+ break;
+ }
+}
+
static void interrupt_event_handler(struct work_struct *work)
{
struct event_info *info = container_of(work, struct event_info, work);
@@ -433,12 +534,23 @@ static void interrupt_event_handler(struct work_struct *work)
pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
- case INT_PRESENCE_OFF:
if (!HP_SUPR_RM(ctrl))
break;
+ ctrl_dbg(ctrl, "Surprise Insertion\n");
+ handle_surprise_event(p_slot);
+ break;
+ case INT_PRESENCE_OFF:
+ /*
+ * Regardless of surprise capability, we need to
+ * definitely remove a card that has been pulled out!
+ */
ctrl_dbg(ctrl, "Surprise Removal\n");
handle_surprise_event(p_slot);
break;
+ case INT_LINK_UP:
+ case INT_LINK_DOWN:
+ handle_link_event(p_slot, info->event_type);
+ break;
default:
break;
}
@@ -447,6 +559,9 @@ static void interrupt_event_handler(struct work_struct *work)
kfree(info);
}
+/*
+ * Note: This function must be called with slot->hotplug_lock held
+ */
int pciehp_enable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
@@ -479,13 +594,15 @@ int pciehp_enable_slot(struct slot *p_slot)
pciehp_get_latch_status(p_slot, &getstatus);
rc = board_added(p_slot);
- if (rc) {
+ if (rc)
pciehp_get_latch_status(p_slot, &getstatus);
- }
+
return rc;
}
-
+/*
+ * Note: This function must be called with slot->hotplug_lock held
+ */
int pciehp_disable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
@@ -494,24 +611,6 @@ int pciehp_disable_slot(struct slot *p_slot)
if (!p_slot->ctrl)
return 1;
- if (!HP_SUPR_RM(p_slot->ctrl)) {
- pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus) {
- ctrl_info(ctrl, "No adapter on slot(%s)\n",
- slot_name(p_slot));
- return -ENODEV;
- }
- }
-
- if (MRL_SENS(p_slot->ctrl)) {
- pciehp_get_latch_status(p_slot, &getstatus);
- if (getstatus) {
- ctrl_info(ctrl, "Latch open on slot(%s)\n",
- slot_name(p_slot));
- return -ENODEV;
- }
- }
-
if (POWER_CTRL(p_slot->ctrl)) {
pciehp_get_power_status(p_slot, &getstatus);
if (!getstatus) {
@@ -536,7 +635,9 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
case STATIC_STATE:
p_slot->state = POWERON_STATE;
mutex_unlock(&p_slot->lock);
+ mutex_lock(&p_slot->hotplug_lock);
retval = pciehp_enable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 14acfccb7670..d7d058fa19a4 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -206,7 +206,7 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
mutex_unlock(&ctrl->ctrl_lock);
}
-static bool check_link_active(struct controller *ctrl)
+bool pciehp_check_link_active(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 lnk_status;
@@ -225,12 +225,12 @@ static void __pcie_wait_link_active(struct controller *ctrl, bool active)
{
int timeout = 1000;
- if (check_link_active(ctrl) == active)
+ if (pciehp_check_link_active(ctrl) == active)
return;
while (timeout > 0) {
msleep(10);
timeout -= 10;
- if (check_link_active(ctrl) == active)
+ if (pciehp_check_link_active(ctrl) == active)
return;
}
ctrl_dbg(ctrl, "Data Link Layer Link Active not %s in 1000 msec\n",
@@ -242,11 +242,6 @@ static void pcie_wait_link_active(struct controller *ctrl)
__pcie_wait_link_active(ctrl, true);
}
-static void pcie_wait_link_not_active(struct controller *ctrl)
-{
- __pcie_wait_link_active(ctrl, false);
-}
-
static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
{
u32 l;
@@ -332,11 +327,6 @@ static int pciehp_link_enable(struct controller *ctrl)
return __pciehp_link_set(ctrl, true);
}
-static int pciehp_link_disable(struct controller *ctrl)
-{
- return __pciehp_link_set(ctrl, false);
-}
-
void pciehp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
@@ -508,14 +498,6 @@ void pciehp_power_off_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
- /* Disable the link at first */
- pciehp_link_disable(ctrl);
- /* wait the link is down */
- if (ctrl->link_active_reporting)
- pcie_wait_link_not_active(ctrl);
- else
- msleep(1000);
-
pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
@@ -540,7 +522,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
- PCI_EXP_SLTSTA_CC);
+ PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
detected &= ~intr_loc;
intr_loc |= detected;
if (!intr_loc)
@@ -579,6 +561,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
ctrl->power_fault_detected = 1;
pciehp_handle_power_fault(slot);
}
+
+ if (intr_loc & PCI_EXP_SLTSTA_DLLSC)
+ pciehp_handle_linkstate_change(slot);
+
return IRQ_HANDLED;
}
@@ -596,9 +582,17 @@ void pcie_enable_notification(struct controller *ctrl)
* when it is cleared in the interrupt service routine, and
* next power fault detected interrupt was notified again.
*/
- cmd = PCI_EXP_SLTCTL_PDCE;
+
+ /*
+ * Always enable link events: thus link-up and link-down shall
+ * always be treated as hotplug and unplug respectively. Enable
+ * presence detect only if Attention Button is not present.
+ */
+ cmd = PCI_EXP_SLTCTL_DLLSCE;
if (ATTN_BUTTN(ctrl))
cmd |= PCI_EXP_SLTCTL_ABPE;
+ else
+ cmd |= PCI_EXP_SLTCTL_PDCE;
if (MRL_SENS(ctrl))
cmd |= PCI_EXP_SLTCTL_MRLSCE;
if (!pciehp_poll_mode)
@@ -606,7 +600,8 @@ void pcie_enable_notification(struct controller *ctrl)
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
- PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
+ PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
+ PCI_EXP_SLTCTL_DLLSCE);
pcie_write_cmd(ctrl, cmd, mask);
}
@@ -624,33 +619,38 @@ static void pcie_disable_notification(struct controller *ctrl)
/*
* pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
- * bus reset of the bridge, but if the slot supports surprise removal we need
- * to disable presence detection around the bus reset and clear any spurious
+ * bus reset of the bridge, but at the same time we want to ensure that it is
+ * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
+ * disable link state notification and presence detection change notification
+ * momentarily, if we see that they could interfere. Also, clear any spurious
* events after.
*/
int pciehp_reset_slot(struct slot *slot, int probe)
{
struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 stat_mask = 0, ctrl_mask = 0;
if (probe)
return 0;
- if (HP_SUPR_RM(ctrl)) {
- pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_PDCE);
- if (pciehp_poll_mode)
- del_timer_sync(&ctrl->poll_timer);
+ if (!ATTN_BUTTN(ctrl)) {
+ ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
+ stat_mask |= PCI_EXP_SLTSTA_PDC;
}
+ ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
+ stat_mask |= PCI_EXP_SLTSTA_DLLSC;
+
+ pcie_write_cmd(ctrl, 0, ctrl_mask);
+ if (pciehp_poll_mode)
+ del_timer_sync(&ctrl->poll_timer);
pci_reset_bridge_secondary_bus(ctrl->pcie->port);
- if (HP_SUPR_RM(ctrl)) {
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
- PCI_EXP_SLTSTA_PDC);
- pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE);
- if (pciehp_poll_mode)
- int_poll_timeout(ctrl->poll_timer.data);
- }
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
+ pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask);
+ if (pciehp_poll_mode)
+ int_poll_timeout(ctrl->poll_timer.data);
return 0;
}
@@ -687,6 +687,7 @@ static int pcie_init_slot(struct controller *ctrl)
slot->ctrl = ctrl;
mutex_init(&slot->lock);
+ mutex_init(&slot->hotplug_lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
ctrl->slot = slot;
return 0;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index b07d7cc2d697..1b533060ce65 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -50,7 +50,7 @@ int pciehp_configure_device(struct slot *p_slot)
"at %04x:%02x:00, cannot hot-add\n", pci_name(dev),
pci_domain_nr(parent), parent->number);
pci_dev_put(dev);
- ret = -EINVAL;
+ ret = -EEXIST;
goto out;
}
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 2c2930ea06ad..6b2b7dddbbdb 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
struct ioapic {
acpi_handle handle;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 9dce7c5e2a77..de7a74782f92 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -170,97 +170,6 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
pci_dev_put(dev);
}
-static int sriov_migration(struct pci_dev *dev)
-{
- u16 status;
- struct pci_sriov *iov = dev->sriov;
-
- if (!iov->num_VFs)
- return 0;
-
- if (!(iov->cap & PCI_SRIOV_CAP_VFM))
- return 0;
-
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
- if (!(status & PCI_SRIOV_STATUS_VFM))
- return 0;
-
- schedule_work(&iov->mtask);
-
- return 1;
-}
-
-static void sriov_migration_task(struct work_struct *work)
-{
- int i;
- u8 state;
- u16 status;
- struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
-
- for (i = iov->initial_VFs; i < iov->num_VFs; i++) {
- state = readb(iov->mstate + i);
- if (state == PCI_SRIOV_VFM_MI) {
- writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
- state = readb(iov->mstate + i);
- if (state == PCI_SRIOV_VFM_AV)
- virtfn_add(iov->self, i, 1);
- } else if (state == PCI_SRIOV_VFM_MO) {
- virtfn_remove(iov->self, i, 1);
- writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
- state = readb(iov->mstate + i);
- if (state == PCI_SRIOV_VFM_AV)
- virtfn_add(iov->self, i, 0);
- }
- }
-
- pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
- status &= ~PCI_SRIOV_STATUS_VFM;
- pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
-}
-
-static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
-{
- int bir;
- u32 table;
- resource_size_t pa;
- struct pci_sriov *iov = dev->sriov;
-
- if (nr_virtfn <= iov->initial_VFs)
- return 0;
-
- pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
- bir = PCI_SRIOV_VFM_BIR(table);
- if (bir > PCI_STD_RESOURCE_END)
- return -EIO;
-
- table = PCI_SRIOV_VFM_OFFSET(table);
- if (table + nr_virtfn > pci_resource_len(dev, bir))
- return -EIO;
-
- pa = pci_resource_start(dev, bir) + table;
- iov->mstate = ioremap(pa, nr_virtfn);
- if (!iov->mstate)
- return -ENOMEM;
-
- INIT_WORK(&iov->mtask, sriov_migration_task);
-
- iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
-
- return 0;
-}
-
-static void sriov_disable_migration(struct pci_dev *dev)
-{
- struct pci_sriov *iov = dev->sriov;
-
- iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
-
- cancel_work_sync(&iov->mtask);
- iounmap(iov->mstate);
-}
-
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
{
int rc;
@@ -351,12 +260,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
goto failed;
}
- if (iov->cap & PCI_SRIOV_CAP_VFM) {
- rc = sriov_enable_migration(dev, nr_virtfn);
- if (rc)
- goto failed;
- }
-
kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
iov->num_VFs = nr_virtfn;
@@ -387,9 +290,6 @@ static void sriov_disable(struct pci_dev *dev)
if (!iov->num_VFs)
return;
- if (iov->cap & PCI_SRIOV_CAP_VFM)
- sriov_disable_migration(dev);
-
for (i = 0; i < iov->num_VFs; i++)
virtfn_remove(dev, i, 0);
@@ -688,25 +588,6 @@ void pci_disable_sriov(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_disable_sriov);
/**
- * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
- * @dev: the PCI device
- *
- * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
- *
- * Physical Function driver is responsible to register IRQ handler using
- * VF Migration Interrupt Message Number, and call this function when the
- * interrupt is generated by the hardware.
- */
-irqreturn_t pci_sriov_migration(struct pci_dev *dev)
-{
- if (!dev->is_physfn)
- return IRQ_NONE;
-
- return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
-}
-EXPORT_SYMBOL_GPL(pci_sriov_migration);
-
-/**
* pci_num_vf - return number of VFs associated with a PF device_release_driver
* @dev: the PCI device
*
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7a0fec6ce571..955ab7990c5b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -545,9 +545,15 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
return -ENOMEM;
list_for_each_entry(entry, &pdev->msi_list, list) {
char *name = kmalloc(20, GFP_KERNEL);
+ if (!name)
+ goto error_attrs;
+
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
- if (!msi_dev_attr)
+ if (!msi_dev_attr) {
+ kfree(name);
goto error_attrs;
+ }
+
sprintf(name, "%d", entry->irq);
sysfs_attr_init(&msi_dev_attr->attr);
msi_dev_attr->attr.name = name;
@@ -589,6 +595,7 @@ error_attrs:
++count;
msi_attr = msi_attrs[count];
}
+ kfree(msi_attrs);
return ret;
}
@@ -959,7 +966,6 @@ EXPORT_SYMBOL(pci_disable_msi);
/**
* pci_msix_vec_count - return the number of device's MSI-X table entries
* @dev: pointer to the pci_dev data structure of MSI-X device function
-
* This function returns the number of device's MSI-X table entries and
* therefore the number of MSI-X vectors device is capable of sending.
* It returns a negative errno if the device is not capable of sending MSI-X
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 2bdbc0080204..f49abef88485 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -12,9 +12,6 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/pci-aspm.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
-
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
@@ -306,10 +303,10 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
}
/* ACPI bus type */
-static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
+static struct acpi_device *acpi_pci_find_companion(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- bool is_bridge;
+ bool check_children;
u64 addr;
/*
@@ -317,14 +314,12 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
* is set only after acpi_pci_find_device() has been called for the
* given device.
*/
- is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
+ check_children = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
|| pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
- *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
- if (!*handle)
- return -ENODEV;
- return 0;
+ return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
+ check_children);
}
static void pci_acpi_setup(struct device *dev)
@@ -367,7 +362,7 @@ static bool pci_acpi_bus_match(struct device *dev)
static struct acpi_bus_type acpi_pci_bus = {
.name = "PCI",
.match = pci_acpi_bus_match,
- .find_device = acpi_pci_find_device,
+ .find_companion = acpi_pci_find_companion,
.setup = pci_acpi_setup,
.cleanup = pci_acpi_cleanup,
};
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 25f0bc659164..d911e0c1f359 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -616,15 +616,11 @@ static int pci_pm_prepare(struct device *dev)
int error = 0;
/*
- * PCI devices suspended at run time need to be resumed at this
- * point, because in general it is necessary to reconfigure them for
- * system suspend. Namely, if the device is supposed to wake up the
- * system from the sleep state, we may need to reconfigure it for this
- * purpose. In turn, if the device is not supposed to wake up the
- * system from the sleep state, we'll have to prevent it from signaling
- * wake-up.
+ * Devices having power.ignore_children set may still be necessary for
+ * suspending their children in the next phase of device suspend.
*/
- pm_runtime_resume(dev);
+ if (dev->power.ignore_children)
+ pm_runtime_resume(dev);
if (drv && drv->pm && drv->pm->prepare)
error = drv->pm->prepare(dev);
@@ -654,6 +650,16 @@ static int pci_pm_suspend(struct device *dev)
goto Fixup;
}
+ /*
+ * PCI devices suspended at run time need to be resumed at this point,
+ * because in general it is necessary to reconfigure them for system
+ * suspend. Namely, if the device is supposed to wake up the system
+ * from the sleep state, we may need to reconfigure it for this purpose.
+ * In turn, if the device is not supposed to wake up the system from the
+ * sleep state, we'll have to prevent it from signaling wake-up.
+ */
+ pm_runtime_resume(dev);
+
pci_dev->state_saved = false;
if (pm->suspend) {
pci_power_t prev = pci_dev->current_state;
@@ -808,6 +814,14 @@ static int pci_pm_freeze(struct device *dev)
return 0;
}
+ /*
+ * This used to be done in pci_pm_prepare() for all devices and some
+ * drivers may depend on it, so do it here. Ideally, runtime-suspended
+ * devices should not be touched during freeze/thaw transitions,
+ * however.
+ */
+ pm_runtime_resume(dev);
+
pci_dev->state_saved = false;
if (pm->freeze) {
int error;
@@ -915,6 +929,9 @@ static int pci_pm_poweroff(struct device *dev)
goto Fixup;
}
+ /* The reason to do that is the same as in pci_pm_suspend(). */
+ pm_runtime_resume(dev);
+
pci_dev->state_saved = false;
if (pm->poweroff) {
int error;
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 6f5d343d251c..45113daaa778 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -29,7 +29,6 @@
#include <linux/nls.h>
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
-#include <acpi/acpi_bus.h>
#include "pci.h"
#define DEVICE_LABEL_DSM 0x07
@@ -162,7 +161,6 @@ static const char device_label_dsm_uuid[] = {
};
enum acpi_attr_enum {
- ACPI_ATTR_NONE = 0,
ACPI_ATTR_LABEL_SHOW,
ACPI_ATTR_INDEX_SHOW,
};
@@ -170,84 +168,61 @@ enum acpi_attr_enum {
static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
{
int len;
- len = utf16s_to_utf8s((const wchar_t *)obj->
- package.elements[1].string.pointer,
- obj->package.elements[1].string.length,
+ len = utf16s_to_utf8s((const wchar_t *)obj->string.pointer,
+ obj->string.length,
UTF16_LITTLE_ENDIAN,
buf, PAGE_SIZE);
buf[len] = '\n';
}
static int
-dsm_get_label(acpi_handle handle, int func,
- struct acpi_buffer *output,
- char *buf, enum acpi_attr_enum attribute)
+dsm_get_label(struct device *dev, char *buf, enum acpi_attr_enum attr)
{
- struct acpi_object_list input;
- union acpi_object params[4];
- union acpi_object *obj;
- int len = 0;
-
- int err;
-
- input.count = 4;
- input.pointer = params;
- params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(device_label_dsm_uuid);
- params[0].buffer.pointer = (char *)device_label_dsm_uuid;
- params[1].type = ACPI_TYPE_INTEGER;
- params[1].integer.value = 0x02;
- params[2].type = ACPI_TYPE_INTEGER;
- params[2].integer.value = func;
- params[3].type = ACPI_TYPE_PACKAGE;
- params[3].package.count = 0;
- params[3].package.elements = NULL;
-
- err = acpi_evaluate_object(handle, "_DSM", &input, output);
- if (err)
+ acpi_handle handle;
+ union acpi_object *obj, *tmp;
+ int len = -1;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
return -1;
- obj = (union acpi_object *)output->pointer;
-
- switch (obj->type) {
- case ACPI_TYPE_PACKAGE:
- if (obj->package.count != 2)
- break;
- len = obj->package.elements[0].integer.value;
- if (buf) {
- if (attribute == ACPI_ATTR_INDEX_SHOW)
- scnprintf(buf, PAGE_SIZE, "%llu\n",
- obj->package.elements[0].integer.value);
- else if (attribute == ACPI_ATTR_LABEL_SHOW)
- dsm_label_utf16s_to_utf8s(obj, buf);
- kfree(output->pointer);
- return strlen(buf);
- }
- kfree(output->pointer);
- return len;
- break;
- default:
- kfree(output->pointer);
+ obj = acpi_evaluate_dsm(handle, device_label_dsm_uuid, 0x2,
+ DEVICE_LABEL_DSM, NULL);
+ if (!obj)
+ return -1;
+
+ tmp = obj->package.elements;
+ if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 &&
+ tmp[0].type == ACPI_TYPE_INTEGER &&
+ tmp[1].type == ACPI_TYPE_STRING) {
+ /*
+ * The second string element is optional even when
+ * this _DSM is implemented; when not implemented,
+ * this entry must return a null string.
+ */
+ if (attr == ACPI_ATTR_INDEX_SHOW)
+ scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value);
+ else if (attr == ACPI_ATTR_LABEL_SHOW)
+ dsm_label_utf16s_to_utf8s(tmp + 1, buf);
+ len = strlen(buf) > 0 ? strlen(buf) : -1;
}
- return -1;
+
+ ACPI_FREE(obj);
+
+ return len;
}
static bool
device_has_dsm(struct device *dev)
{
acpi_handle handle;
- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
handle = ACPI_HANDLE(dev);
-
if (!handle)
- return FALSE;
+ return false;
- if (dsm_get_label(handle, DEVICE_LABEL_DSM, &output, NULL,
- ACPI_ATTR_NONE) > 0)
- return TRUE;
-
- return FALSE;
+ return !!acpi_check_dsm(handle, device_label_dsm_uuid, 0x2,
+ 1 << DEVICE_LABEL_DSM);
}
static umode_t
@@ -266,44 +241,13 @@ acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n)
static ssize_t
acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_handle handle;
- int length;
-
- handle = ACPI_HANDLE(dev);
-
- if (!handle)
- return -1;
-
- length = dsm_get_label(handle, DEVICE_LABEL_DSM,
- &output, buf, ACPI_ATTR_LABEL_SHOW);
-
- if (length < 1)
- return -1;
-
- return length;
+ return dsm_get_label(dev, buf, ACPI_ATTR_LABEL_SHOW);
}
static ssize_t
acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_handle handle;
- int length;
-
- handle = ACPI_HANDLE(dev);
-
- if (!handle)
- return -1;
-
- length = dsm_get_label(handle, DEVICE_LABEL_DSM,
- &output, buf, ACPI_ATTR_INDEX_SHOW);
-
- if (length < 0)
- return -1;
-
- return length;
-
+ return dsm_get_label(dev, buf, ACPI_ATTR_INDEX_SHOW);
}
static struct device_attribute acpi_attr_label = {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 276ef9c18802..4e0acefb7565 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -351,28 +351,17 @@ static struct device_attribute dev_rescan_attr = __ATTR(rescan,
(S_IWUSR|S_IWGRP),
NULL, dev_rescan_store);
-static void remove_callback(struct device *dev)
-{
- pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
-}
-
static ssize_t
-remove_store(struct device *dev, struct device_attribute *dummy,
+remove_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int ret = 0;
unsigned long val;
if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
- /* An attribute cannot be unregistered by one of its own methods,
- * so we have to use this roundabout approach.
- */
- if (val)
- ret = device_schedule_callback(dev, remove_callback);
- if (ret)
- count = ret;
+ if (val && device_remove_file_self(dev, attr))
+ pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
return count;
}
static struct device_attribute dev_remove_attr = __ATTR(remove,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1febe90831b4..7325d43bf030 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -108,12 +108,12 @@ static bool pcie_ari_disabled;
*/
unsigned char pci_bus_max_busnr(struct pci_bus* bus)
{
- struct list_head *tmp;
+ struct pci_bus *tmp;
unsigned char max, n;
max = bus->busn_res.end;
- list_for_each(tmp, &bus->children) {
- n = pci_bus_max_busnr(pci_bus_b(tmp));
+ list_for_each_entry(tmp, &bus->children, node) {
+ n = pci_bus_max_busnr(tmp);
if(n > max)
max = n;
}
@@ -401,33 +401,40 @@ EXPORT_SYMBOL_GPL(pci_find_ht_capability);
* @res: child resource record for which parent is sought
*
* For given resource region of given device, return the resource
- * region of parent bus the given region is contained in or where
- * it should be allocated from.
+ * region of parent bus the given region is contained in.
*/
struct resource *
pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
{
const struct pci_bus *bus = dev->bus;
+ struct resource *r;
int i;
- struct resource *best = NULL, *r;
pci_bus_for_each_resource(bus, r, i) {
if (!r)
continue;
- if (res->start && !(res->start >= r->start && res->end <= r->end))
- continue; /* Not contained */
- if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
- continue; /* Wrong type */
- if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
- return r; /* Exact match */
- /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
- if (r->flags & IORESOURCE_PREFETCH)
- continue;
- /* .. but we can put a prefetchable resource inside a non-prefetchable one */
- if (!best)
- best = r;
+ if (res->start && resource_contains(r, res)) {
+
+ /*
+ * If the window is prefetchable but the BAR is
+ * not, the allocator made a mistake.
+ */
+ if (r->flags & IORESOURCE_PREFETCH &&
+ !(res->flags & IORESOURCE_PREFETCH))
+ return NULL;
+
+ /*
+ * If we're below a transparent bridge, there may
+ * be both a positively-decoded aperture and a
+ * subtractively-decoded region that contain the BAR.
+ * We want the positively-decoded one, so this depends
+ * on pci_bus_for_each_resource() giving us those
+ * first.
+ */
+ return r;
+ }
}
- return best;
+ return NULL;
}
/**
@@ -1178,9 +1185,16 @@ int pci_load_and_free_saved_state(struct pci_dev *dev,
}
EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
+int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
+{
+ return pci_enable_resources(dev, bars);
+}
+
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
+ u16 cmd;
+ u8 pin;
err = pci_set_power_state(dev, PCI_D0);
if (err < 0 && err != -EIO)
@@ -1190,6 +1204,17 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
return err;
pci_fixup_device(pci_fixup_enable, dev);
+ if (dev->msi_enabled || dev->msix_enabled)
+ return 0;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if (pin) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ pci_write_config_word(dev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_INTX_DISABLE);
+ }
+
return 0;
}
@@ -1611,29 +1636,27 @@ static void pci_pme_list_scan(struct work_struct *work)
struct pci_pme_device *pme_dev, *n;
mutex_lock(&pci_pme_list_mutex);
- if (!list_empty(&pci_pme_list)) {
- list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
- if (pme_dev->dev->pme_poll) {
- struct pci_dev *bridge;
-
- bridge = pme_dev->dev->bus->self;
- /*
- * If bridge is in low power state, the
- * configuration space of subordinate devices
- * may be not accessible
- */
- if (bridge && bridge->current_state != PCI_D0)
- continue;
- pci_pme_wakeup(pme_dev->dev, NULL);
- } else {
- list_del(&pme_dev->list);
- kfree(pme_dev);
- }
+ list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
+ if (pme_dev->dev->pme_poll) {
+ struct pci_dev *bridge;
+
+ bridge = pme_dev->dev->bus->self;
+ /*
+ * If bridge is in low power state, the
+ * configuration space of subordinate devices
+ * may be not accessible
+ */
+ if (bridge && bridge->current_state != PCI_D0)
+ continue;
+ pci_pme_wakeup(pme_dev->dev, NULL);
+ } else {
+ list_del(&pme_dev->list);
+ kfree(pme_dev);
}
- if (!list_empty(&pci_pme_list))
- schedule_delayed_work(&pci_pme_work,
- msecs_to_jiffies(PME_TIMEOUT));
}
+ if (!list_empty(&pci_pme_list))
+ schedule_delayed_work(&pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
}
@@ -2180,21 +2203,18 @@ void pci_request_acs(void)
}
/**
- * pci_enable_acs - enable ACS if hardware support it
+ * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
* @dev: the PCI device
*/
-void pci_enable_acs(struct pci_dev *dev)
+static int pci_std_enable_acs(struct pci_dev *dev)
{
int pos;
u16 cap;
u16 ctrl;
- if (!pci_acs_enable)
- return;
-
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
if (!pos)
- return;
+ return -ENODEV;
pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
@@ -2212,6 +2232,23 @@ void pci_enable_acs(struct pci_dev *dev)
ctrl |= (cap & PCI_ACS_UF);
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+
+ return 0;
+}
+
+/**
+ * pci_enable_acs - enable ACS if hardware support it
+ * @dev: the PCI device
+ */
+void pci_enable_acs(struct pci_dev *dev)
+{
+ if (!pci_acs_enable)
+ return;
+
+ if (!pci_std_enable_acs(dev))
+ return;
+
+ pci_dev_specific_enable_acs(dev);
}
static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
@@ -4237,6 +4274,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
"Rounding up size of resource #%d to %#llx.\n",
i, (unsigned long long)size);
}
+ r->flags |= IORESOURCE_UNSET;
r->end = size - 1;
r->start = 0;
}
@@ -4250,6 +4288,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
r = &dev->resource[i];
if (!(r->flags & IORESOURCE_MEM))
continue;
+ r->flags |= IORESOURCE_UNSET;
r->end = resource_size(r) - 1;
r->start = 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4df38df224f4..6bd082299e31 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,8 +1,6 @@
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
-#include <linux/workqueue.h>
-
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
@@ -240,8 +238,6 @@ struct pci_sriov {
struct pci_dev *dev; /* lowest numbered PF */
struct pci_dev *self; /* this PF */
struct mutex lock; /* lock for VF bus */
- struct work_struct mtask; /* VF Migration task */
- u8 __iomem *mstate; /* VF Migration State Array */
};
#ifdef CONFIG_PCI_ATS
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 04796c056d12..ef09f5f2fe6c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -252,6 +252,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
/* Address above 32-bit boundary; disable the BAR */
pci_write_config_dword(dev, pos, 0);
pci_write_config_dword(dev, pos + 4, 0);
+ res->flags |= IORESOURCE_UNSET;
region.start = 0;
region.end = sz64;
bar_disabled = true;
@@ -731,22 +732,6 @@ struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *de
return child;
}
-static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
-{
- struct pci_bus *parent = child->parent;
-
- /* Attempts to fix that up are really dangerous unless
- we're going to re-assign all bus numbers. */
- if (!pcibios_assign_all_busses())
- return;
-
- while (parent->parent && parent->busn_res.end < max) {
- parent->busn_res.end = max;
- pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
- parent = parent->parent;
- }
-}
-
/*
* If it's a bridge, configure it and scan the bus behind it.
* For CardBus bridges, we don't scan behind as the devices will
@@ -782,7 +767,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
/* Check if setup is sensible at all */
if (!pass &&
(primary != bus->number || secondary <= bus->number ||
- secondary > subordinate)) {
+ secondary > subordinate || subordinate > bus->busn_res.end)) {
dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
secondary, subordinate);
broken = 1;
@@ -805,11 +790,10 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
goto out;
/*
- * If we already got to this bus through a different bridge,
- * don't re-add it. This can happen with the i450NX chipset.
- *
- * However, we continue to descend down the hierarchy and
- * scan remaining child buses.
+ * The bus might already exist for two reasons: Either we are
+ * rescanning the bus or the bus is reachable through more than
+ * one bridge. The second case can happen with the i450NX
+ * chipset.
*/
child = pci_find_bus(pci_domain_nr(bus), secondary);
if (!child) {
@@ -822,17 +806,19 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
}
cmax = pci_scan_child_bus(child);
- if (cmax > max)
- max = cmax;
- if (child->busn_res.end > max)
- max = child->busn_res.end;
+ if (cmax > subordinate)
+ dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
+ subordinate, cmax);
+ /* subordinate should equal child->busn_res.end */
+ if (subordinate > max)
+ max = subordinate;
} else {
/*
* We need to assign a number to this bus which we always
* do in the second pass.
*/
if (!pass) {
- if (pcibios_assign_all_busses() || broken)
+ if (pcibios_assign_all_busses() || broken || is_cardbus)
/* Temporarily disable forwarding of the
configuration cycles on all bridges in
this bus segment to avoid possible
@@ -844,19 +830,25 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
goto out;
}
+ if (max >= bus->busn_res.end) {
+ dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n",
+ max, &bus->busn_res);
+ goto out;
+ }
+
/* Clear errors */
pci_write_config_word(dev, PCI_STATUS, 0xffff);
- /* Prevent assigning a bus number that already exists.
- * This can happen when a bridge is hot-plugged, so in
- * this case we only re-scan this bus. */
+ /* The bus will already exist if we are rescanning */
child = pci_find_bus(pci_domain_nr(bus), max+1);
if (!child) {
- child = pci_add_new_bus(bus, dev, ++max);
+ child = pci_add_new_bus(bus, dev, max+1);
if (!child)
goto out;
- pci_bus_insert_busn_res(child, max, 0xff);
+ pci_bus_insert_busn_res(child, max+1,
+ bus->busn_res.end);
}
+ max++;
buses = (buses & 0xff000000)
| ((unsigned int)(child->primary) << 0)
| ((unsigned int)(child->busn_res.start) << 8)
@@ -878,20 +870,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
if (!is_cardbus) {
child->bridge_ctl = bctl;
- /*
- * Adjust subordinate busnr in parent buses.
- * We do this before scanning for children because
- * some devices may not be detected if the bios
- * was lazy.
- */
- pci_fixup_parent_subordinate_busnr(child, max);
- /* Now we can scan all subordinate buses... */
max = pci_scan_child_bus(child);
- /*
- * now fix it up again since we have found
- * the real value of max.
- */
- pci_fixup_parent_subordinate_busnr(child, max);
} else {
/*
* For CardBus bridges, we leave 4 bus numbers
@@ -922,11 +901,15 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
}
}
max += i;
- pci_fixup_parent_subordinate_busnr(child, max);
}
/*
* Set the subordinate bus number to its real value.
*/
+ if (max > bus->busn_res.end) {
+ dev_warn(&dev->dev, "max busn %02x is outside %pR\n",
+ max, &bus->busn_res);
+ max = bus->busn_res.end;
+ }
pci_bus_update_busn_res_end(child, max);
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
}
@@ -1125,10 +1108,10 @@ int pci_setup_device(struct pci_dev *dev)
pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
/*
- * Do the ugly legacy mode stuff here rather than broken chip
- * quirk code. Legacy mode ATA controllers have fixed
- * addresses. These are not always echoed in BAR0-3, and
- * BAR0-3 in a few cases contain junk!
+ * Do the ugly legacy mode stuff here rather than broken chip
+ * quirk code. Legacy mode ATA controllers have fixed
+ * addresses. These are not always echoed in BAR0-3, and
+ * BAR0-3 in a few cases contain junk!
*/
if (class == PCI_CLASS_STORAGE_IDE) {
u8 progif;
@@ -1139,11 +1122,15 @@ int pci_setup_device(struct pci_dev *dev)
res = &dev->resource[0];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
+ res);
region.start = 0x3F6;
region.end = 0x3F6;
res = &dev->resource[1];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
+ res);
}
if ((progif & 4) == 0) {
region.start = 0x170;
@@ -1151,11 +1138,15 @@ int pci_setup_device(struct pci_dev *dev)
res = &dev->resource[2];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
+ res);
region.start = 0x376;
region.end = 0x376;
res = &dev->resource[3];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
+ res);
}
}
break;
@@ -1208,18 +1199,6 @@ static void pci_release_capabilities(struct pci_dev *dev)
pci_free_cap_save_buffers(dev);
}
-static void pci_free_resources(struct pci_dev *dev)
-{
- int i;
-
- pci_cleanup_rom(dev);
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *res = dev->resource + i;
- if (res->parent)
- release_resource(res);
- }
-}
-
/**
* pci_release_dev - free a pci device structure when all users of it are finished.
* @dev: device that's been disconnected
@@ -1229,14 +1208,9 @@ static void pci_free_resources(struct pci_dev *dev)
*/
static void pci_release_dev(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
-
- down_write(&pci_bus_sem);
- list_del(&pci_dev->bus_list);
- up_write(&pci_bus_sem);
-
- pci_free_resources(pci_dev);
+ struct pci_dev *pci_dev;
+ pci_dev = to_pci_dev(dev);
pci_release_capabilities(pci_dev);
pci_release_of_node(pci_dev);
pcibios_release_device(pci_dev);
@@ -1852,7 +1826,7 @@ int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
res->flags |= IORESOURCE_PCI_FIXED;
}
- conflict = insert_resource_conflict(parent_res, res);
+ conflict = request_resource_conflict(parent_res, res);
if (conflict)
dev_printk(KERN_DEBUG, &b->dev,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5cb726c193de..e7292065a1b1 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -296,6 +296,7 @@ static void quirk_s3_64M(struct pci_dev *dev)
struct resource *r = &dev->resource[0];
if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
+ r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0x3ffffff;
}
@@ -937,6 +938,8 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C
static void quirk_dunord(struct pci_dev *dev)
{
struct resource *r = &dev->resource [1];
+
+ r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0xffffff;
}
@@ -1740,6 +1743,7 @@ static void quirk_tc86c001_ide(struct pci_dev *dev)
struct resource *r = &dev->resource[0];
if (r->start & 0x8) {
+ r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0xf;
}
@@ -1769,6 +1773,7 @@ static void quirk_plx_pci9050(struct pci_dev *dev)
dev_info(&dev->dev,
"Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
bar);
+ r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0xff;
}
@@ -3423,6 +3428,61 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
#endif
}
+/*
+ * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * transactions and validate bus numbers in requests, but do not provide an
+ * actual PCIe ACS capability. This is the list of device IDs known to fall
+ * into that category as provided by Intel in Red Hat bugzilla 1037684.
+ */
+static const u16 pci_quirk_intel_pch_acs_ids[] = {
+ /* Ibexpeak PCH */
+ 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
+ 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
+ /* Cougarpoint PCH */
+ 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
+ 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
+ /* Pantherpoint PCH */
+ 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
+ 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
+ /* Lynxpoint-H PCH */
+ 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
+ 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
+ /* Lynxpoint-LP PCH */
+ 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
+ 0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
+ /* Wildcat PCH */
+ 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
+ 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
+};
+
+static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
+{
+ int i;
+
+ /* Filter out a few obvious non-matches first */
+ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
+ if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
+ return true;
+
+ return false;
+}
+
+#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
+
+static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
+ INTEL_PCH_ACS_FLAGS : 0;
+
+ if (!pci_quirk_intel_pch_acs_match(dev))
+ return -ENOTTY;
+
+ return acs_flags & ~flags ? 0 : 1;
+}
+
static const struct pci_dev_acs_enabled {
u16 vendor;
u16 device;
@@ -3434,6 +3494,7 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ 0 }
};
@@ -3461,3 +3522,132 @@ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
return -ENOTTY;
}
+
+/* Config space offset of Root Complex Base Address register */
+#define INTEL_LPC_RCBA_REG 0xf0
+/* 31:14 RCBA address */
+#define INTEL_LPC_RCBA_MASK 0xffffc000
+/* RCBA Enable */
+#define INTEL_LPC_RCBA_ENABLE (1 << 0)
+
+/* Backbone Scratch Pad Register */
+#define INTEL_BSPR_REG 0x1104
+/* Backbone Peer Non-Posted Disable */
+#define INTEL_BSPR_REG_BPNPD (1 << 8)
+/* Backbone Peer Posted Disable */
+#define INTEL_BSPR_REG_BPPD (1 << 9)
+
+/* Upstream Peer Decode Configuration Register */
+#define INTEL_UPDCR_REG 0x1114
+/* 5:0 Peer Decode Enable bits */
+#define INTEL_UPDCR_REG_MASK 0x3f
+
+static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
+{
+ u32 rcba, bspr, updcr;
+ void __iomem *rcba_mem;
+
+ /*
+ * Read the RCBA register from the LPC (D31:F0). PCH root ports
+ * are D28:F* and therefore get probed before LPC, thus we can't
+ * use pci_get_slot/pci_read_config_dword here.
+ */
+ pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
+ INTEL_LPC_RCBA_REG, &rcba);
+ if (!(rcba & INTEL_LPC_RCBA_ENABLE))
+ return -EINVAL;
+
+ rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
+ PAGE_ALIGN(INTEL_UPDCR_REG));
+ if (!rcba_mem)
+ return -ENOMEM;
+
+ /*
+ * The BSPR can disallow peer cycles, but it's set by soft strap and
+ * therefore read-only. If both posted and non-posted peer cycles are
+ * disallowed, we're ok. If either are allowed, then we need to use
+ * the UPDCR to disable peer decodes for each port. This provides the
+ * PCIe ACS equivalent of PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF
+ */
+ bspr = readl(rcba_mem + INTEL_BSPR_REG);
+ bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
+ if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
+ updcr = readl(rcba_mem + INTEL_UPDCR_REG);
+ if (updcr & INTEL_UPDCR_REG_MASK) {
+ dev_info(&dev->dev, "Disabling UPDCR peer decodes\n");
+ updcr &= ~INTEL_UPDCR_REG_MASK;
+ writel(updcr, rcba_mem + INTEL_UPDCR_REG);
+ }
+ }
+
+ iounmap(rcba_mem);
+ return 0;
+}
+
+/* Miscellaneous Port Configuration register */
+#define INTEL_MPC_REG 0xd8
+/* MPC: Invalid Receive Bus Number Check Enable */
+#define INTEL_MPC_REG_IRBNCE (1 << 26)
+
+static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
+{
+ u32 mpc;
+
+ /*
+ * When enabled, the IRBNCE bit of the MPC register enables the
+ * equivalent of PCI ACS Source Validation (PCI_ACS_SV), which
+ * ensures that requester IDs fall within the bus number range
+ * of the bridge. Enable if not already.
+ */
+ pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
+ if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
+ dev_info(&dev->dev, "Enabling MPC IRBNCE\n");
+ mpc |= INTEL_MPC_REG_IRBNCE;
+ pci_write_config_word(dev, INTEL_MPC_REG, mpc);
+ }
+}
+
+static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
+{
+ if (!pci_quirk_intel_pch_acs_match(dev))
+ return -ENOTTY;
+
+ if (pci_quirk_enable_intel_lpc_acs(dev)) {
+ dev_warn(&dev->dev, "Failed to enable Intel PCH ACS quirk\n");
+ return 0;
+ }
+
+ pci_quirk_enable_intel_rp_mpc_acs(dev);
+
+ dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
+
+ dev_info(&dev->dev, "Intel PCH root port ACS workaround enabled\n");
+
+ return 0;
+}
+
+static const struct pci_dev_enable_acs {
+ u16 vendor;
+ u16 device;
+ int (*enable_acs)(struct pci_dev *dev);
+} pci_dev_enable_acs[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
+ { 0 }
+};
+
+void pci_dev_specific_enable_acs(struct pci_dev *dev)
+{
+ const struct pci_dev_enable_acs *i;
+ int ret;
+
+ for (i = pci_dev_enable_acs; i->enable_acs; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID)) {
+ ret = i->enable_acs(dev);
+ if (ret >= 0)
+ return;
+ }
+ }
+}
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 4ff36bfa785e..8bd76c9ba21c 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -3,6 +3,18 @@
#include <linux/pci-aspm.h>
#include "pci.h"
+static void pci_free_resources(struct pci_dev *dev)
+{
+ int i;
+
+ pci_cleanup_rom(dev);
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *res = dev->resource + i;
+ if (res->parent)
+ release_resource(res);
+ }
+}
+
static void pci_stop_dev(struct pci_dev *dev)
{
pci_pme_active(dev, false);
@@ -25,6 +37,11 @@ static void pci_destroy_dev(struct pci_dev *dev)
device_del(&dev->dev);
+ down_write(&pci_bus_sem);
+ list_del(&dev->bus_list);
+ up_write(&pci_bus_sem);
+
+ pci_free_resources(dev);
put_device(&dev->dev);
}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 5d595724e5f4..c1839450d4d6 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -197,8 +197,10 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
void pci_cleanup_rom(struct pci_dev *pdev)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
+
if (res->flags & IORESOURCE_ROM_COPY) {
kfree((void*)(unsigned long)res->start);
+ res->flags |= IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_ROM_COPY;
res->start = 0;
res->end = 0;
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 3ff2ac7c14e2..4a1b972efe7f 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -54,14 +54,14 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
{
- struct pci_bus* child;
- struct list_head *tmp;
+ struct pci_bus *child;
+ struct pci_bus *tmp;
if(bus->number == busnr)
return bus;
- list_for_each(tmp, &bus->children) {
- child = pci_do_find_bus(pci_bus_b(tmp), busnr);
+ list_for_each_entry(tmp, &bus->children, node) {
+ child = pci_do_find_bus(tmp, busnr);
if(child)
return child;
}
@@ -111,7 +111,7 @@ pci_find_next_bus(const struct pci_bus *from)
down_read(&pci_bus_sem);
n = from ? from->node.next : pci_root_buses.next;
if (n != &pci_root_buses)
- b = pci_bus_b(n);
+ b = list_entry(n, struct pci_bus, node);
up_read(&pci_bus_sem);
return b;
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 5c060b152ce6..7eed671d5586 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -44,6 +44,9 @@ void pci_update_resource(struct pci_dev *dev, int resno)
if (!res->flags)
return;
+ if (res->flags & IORESOURCE_UNSET)
+ return;
+
/*
* Ignore non-moveable resources. This might be legacy resources for
* which no functional BAR register exists or another important
@@ -101,11 +104,6 @@ void pci_update_resource(struct pci_dev *dev, int resno)
if (disable)
pci_write_config_word(dev, PCI_COMMAND, cmd);
-
- res->flags &= ~IORESOURCE_UNSET;
- dev_dbg(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
- resno, res, (unsigned long long)region.start,
- (unsigned long long)region.end);
}
int pci_claim_resource(struct pci_dev *dev, int resource)
@@ -113,18 +111,23 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
struct resource *res = &dev->resource[resource];
struct resource *root, *conflict;
+ if (res->flags & IORESOURCE_UNSET) {
+ dev_info(&dev->dev, "can't claim BAR %d %pR: no address assigned\n",
+ resource, res);
+ return -EINVAL;
+ }
+
root = pci_find_parent_resource(dev, res);
if (!root) {
- dev_info(&dev->dev, "no compatible bridge window for %pR\n",
- res);
+ dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
+ resource, res);
return -EINVAL;
}
conflict = request_resource_conflict(root, res);
if (conflict) {
- dev_info(&dev->dev,
- "address space collision: %pR conflicts with %s %pR\n",
- res, conflict->name, conflict);
+ dev_info(&dev->dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
+ resource, res, conflict->name, conflict);
return -EBUSY;
}
@@ -263,6 +266,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
resource_size_t align, size;
int ret;
+ res->flags |= IORESOURCE_UNSET;
align = pci_resource_alignment(dev, res);
if (!align) {
dev_info(&dev->dev, "BAR %d: can't assign %pR "
@@ -282,6 +286,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
ret = pci_revert_fw_address(res, dev, resno, size);
if (!ret) {
+ res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
if (resno < PCI_BRIDGE_RESOURCES)
@@ -297,6 +302,7 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
resource_size_t new_size;
int ret;
+ res->flags |= IORESOURCE_UNSET;
if (!res->parent) {
dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR "
"\n", resno, res);
@@ -307,6 +313,7 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
new_size = resource_size(res) + addsize;
ret = _pci_assign_resource(dev, resno, new_size, min_align);
if (!ret) {
+ res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res);
if (resno < PCI_BRIDGE_RESOURCES)
@@ -336,9 +343,15 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
(!(r->flags & IORESOURCE_ROM_ENABLE)))
continue;
+ if (r->flags & IORESOURCE_UNSET) {
+ dev_err(&dev->dev, "can't enable device: BAR %d %pR not assigned\n",
+ i, r);
+ return -EINVAL;
+ }
+
if (!r->parent) {
- dev_err(&dev->dev, "device not available "
- "(can't reserve %pR)\n", r);
+ dev_err(&dev->dev, "can't enable device: BAR %d %pR not claimed\n",
+ i, r);
return -EINVAL;
}
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index d1cd60f51f87..179b8edc2262 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -20,6 +20,7 @@
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/time.h>
+#include <xen/platform_pci.h>
#include <asm/xen/swiotlb-xen.h>
#define INVALID_GRANT_REF (0)
@@ -1146,6 +1147,9 @@ static int __init pcifront_init(void)
if (!xen_pv_domain() || xen_initial_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
pci_frontend_registrar(1 /* enable */);
return xenbus_register_frontend(&xenpci_driver);
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index 6eecd7cddf57..54d3089d157b 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -125,9 +125,6 @@ sa1100_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
if (freqs->new < freqs->old)
sa1100_pcmcia_set_mecr(skt, freqs->new);
break;
- case CPUFREQ_RESUMECHANGE:
- sa1100_pcmcia_set_mecr(skt, freqs->new);
- break;
}
return 0;
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 8485761e76af..946f90ef6020 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1076,7 +1076,7 @@ static void yenta_config_init(struct yenta_socket *socket)
*/
static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
{
- struct list_head *tmp;
+ struct pci_bus *sibling;
unsigned char upper_limit;
/*
* We only check and fix the parent bridge: All systems which need
@@ -1095,18 +1095,18 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
/* stay within the limits of the bus range of the parent: */
upper_limit = bridge_to_fix->parent->busn_res.end;
- /* check the bus ranges of all silbling bridges to prevent overlap */
- list_for_each(tmp, &bridge_to_fix->parent->children) {
- struct pci_bus *silbling = pci_bus_b(tmp);
+ /* check the bus ranges of all sibling bridges to prevent overlap */
+ list_for_each_entry(sibling, &bridge_to_fix->parent->children,
+ node) {
/*
- * If the silbling has a higher secondary bus number
+ * If the sibling has a higher secondary bus number
* and it's secondary is equal or smaller than our
* current upper limit, set the new upper limit to
- * the bus number below the silbling's range:
+ * the bus number below the sibling's range:
*/
- if (silbling->busn_res.start > bridge_to_fix->busn_res.end
- && silbling->busn_res.start <= upper_limit)
- upper_limit = silbling->busn_res.start - 1;
+ if (sibling->busn_res.start > bridge_to_fix->busn_res.end
+ && sibling->busn_res.start <= upper_limit)
+ upper_limit = sibling->busn_res.start - 1;
}
/* Show that the wanted subordinate number is not possible: */
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index b901c472d7f3..8d3c49cc500f 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -5,7 +5,7 @@
menu "PHY Subsystem"
config GENERIC_PHY
- tristate "PHY Core"
+ bool "PHY Core"
help
Generic PHY support.
@@ -17,28 +17,55 @@ config GENERIC_PHY
config PHY_EXYNOS_MIPI_VIDEO
tristate "S5P/EXYNOS SoC series MIPI CSI-2/DSI PHY driver"
+ depends on HAS_IOMEM
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ select GENERIC_PHY
+ default y if ARCH_S5PV210 || ARCH_EXYNOS
help
Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
and EXYNOS SoCs.
config PHY_MVEBU_SATA
def_bool y
- depends on ARCH_KIRKWOOD || ARCH_DOVE
+ depends on ARCH_KIRKWOOD || ARCH_DOVE || MACH_DOVE
depends on OF
select GENERIC_PHY
+config OMAP_CONTROL_PHY
+ tristate "OMAP CONTROL PHY Driver"
+ help
+ Enable this to add support for the PHY part present in the control
+ module. This driver has API to power on the USB2 PHY and to write to
+ the mailbox. The mailbox is present only in omap4 and the register to
+ power on the USB2 PHY is present in OMAP4 and OMAP5. OMAP5 has an
+ additional register to power on USB3 PHY/SATA PHY/PCIE PHY
+ (PIPE3 PHY).
+
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
depends on ARCH_OMAP2PLUS
depends on USB_PHY
select GENERIC_PHY
- select OMAP_CONTROL_USB
+ select OMAP_CONTROL_PHY
+ depends on OMAP_OCP2SCP
help
Enable this to support the transceiver that is part of SOC. This
driver takes care of all the PHY functionality apart from comparator.
The USB OTG controller communicates with the comparator using this
driver.
+config TI_PIPE3
+ tristate "TI PIPE3 PHY Driver"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ select GENERIC_PHY
+ select OMAP_CONTROL_PHY
+ depends on OMAP_OCP2SCP
+ help
+ Enable this to support the PIPE3 PHY that is part of TI SOCs. This
+ driver takes care of all the PHY functionality apart from comparator.
+ This driver interacts with the "OMAP Control PHY Driver" to power
+ on/off the PHY.
+
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
@@ -53,6 +80,8 @@ config TWL4030_USB
config PHY_EXYNOS_DP_VIDEO
tristate "EXYNOS SoC series Display Port PHY driver"
depends on OF
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ default ARCH_EXYNOS
select GENERIC_PHY
help
Support for Display Port PHY found on Samsung EXYNOS SoCs.
@@ -60,7 +89,81 @@ config PHY_EXYNOS_DP_VIDEO
config BCM_KONA_USB2_PHY
tristate "Broadcom Kona USB2 PHY Driver"
depends on GENERIC_PHY
+ depends on HAS_IOMEM
help
Enable this to support the Broadcom Kona USB 2.0 PHY.
+config PHY_EXYNOS5250_SATA
+ tristate "Exynos5250 Sata SerDes/PHY driver"
+ depends on SOC_EXYNOS5250
+ depends on HAS_IOMEM
+ depends on OF
+ select GENERIC_PHY
+ select I2C
+ select I2C_S3C2410
+ select MFD_SYSCON
+ help
+ Enable this to support SATA SerDes/Phy found on Samsung's
+ Exynos5250 based SoCs.This SerDes/Phy supports SATA 1.5 Gb/s,
+ SATA 3.0 Gb/s, SATA 6.0 Gb/s speeds. It supports one SATA host
+ port to accept one SATA device.
+
+config PHY_SUN4I_USB
+ tristate "Allwinner sunxi SoC USB PHY driver"
+ depends on ARCH_SUNXI && HAS_IOMEM && OF
+ select GENERIC_PHY
+ help
+ Enable this to support the transceiver that is part of Allwinner
+ sunxi SoCs.
+
+ This driver controls the entire USB PHY block, both the USB OTG
+ parts, as well as the 2 regular USB 2 host PHYs.
+
+config PHY_SAMSUNG_USB2
+ tristate "Samsung USB 2.0 PHY driver"
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support the Samsung USB 2.0 PHY driver for Samsung
+ SoCs. This driver provides the interface for USB 2.0 PHY. Support for
+ particular SoCs has to be enabled in addition to this driver. Number
+ and type of supported phys depends on the SoC.
+
+config PHY_EXYNOS4210_USB2
+ bool "Support for Exynos 4210"
+ depends on PHY_SAMSUNG_USB2
+ depends on CPU_EXYNOS4210
+ help
+ Enable USB PHY support for Exynos 4210. This option requires that
+ Samsung USB 2.0 PHY driver is enabled and means that support for this
+ particular SoC is compiled in the driver. In case of Exynos 4210 four
+ phys are available - device, host, HSIC0 and HSIC1.
+
+config PHY_EXYNOS4X12_USB2
+ bool "Support for Exynos 4x12"
+ depends on PHY_SAMSUNG_USB2
+ depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412)
+ help
+ Enable USB PHY support for Exynos 4x12. This option requires that
+ Samsung USB 2.0 PHY driver is enabled and means that support for this
+ particular SoC is compiled in the driver. In case of Exynos 4x12 four
+ phys are available - device, host, HSIC0 and HSIC1.
+
+config PHY_EXYNOS5250_USB2
+ bool "Support for Exynos 5250"
+ depends on PHY_SAMSUNG_USB2
+ depends on SOC_EXYNOS5250
+ help
+ Enable USB PHY support for Exynos 5250. This option requires that
+ Samsung USB 2.0 PHY driver is enabled and means that support for this
+ particular SoC is compiled in the driver. In case of Exynos 5250 four
+ phys are available - device, host, HSIC0 and HSIC.
+
+config PHY_XGENE
+ tristate "APM X-Gene 15Gbps PHY support"
+ depends on HAS_IOMEM && OF && (ARM64 || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ This option enables support for APM X-Gene SoC multi-purpose PHY.
+
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index b57c25371cca..2faf78edc864 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -7,5 +7,14 @@ obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o
obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
+obj-$(CONFIG_OMAP_CONTROL_PHY) += phy-omap-control.o
obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o
+obj-$(CONFIG_TI_PIPE3) += phy-ti-pipe3.o
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
+obj-$(CONFIG_PHY_EXYNOS5250_SATA) += phy-exynos5250-sata.o
+obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
+obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-samsung-usb2.o
+obj-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o
+obj-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o
+obj-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o
+obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
diff --git a/drivers/phy/phy-bcm-kona-usb2.c b/drivers/phy/phy-bcm-kona-usb2.c
index efc5c1a13a5d..e94f5a6a5645 100644
--- a/drivers/phy/phy-bcm-kona-usb2.c
+++ b/drivers/phy/phy-bcm-kona-usb2.c
@@ -128,10 +128,8 @@ static int bcm_kona_usb2_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev,
of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
- return 0;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id bcm_kona_usb2_dt_ids[] = {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 645c867c1257..623b71c54b3e 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -162,6 +162,9 @@ int phy_init(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -173,6 +176,8 @@ int phy_init(struct phy *phy)
dev_err(&phy->dev, "phy init failed --> %d\n", ret);
goto out;
}
+ } else {
+ ret = 0; /* Override possible ret == -ENOTSUPP */
}
++phy->init_count;
@@ -187,6 +192,9 @@ int phy_exit(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -212,6 +220,9 @@ int phy_power_on(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -223,6 +234,8 @@ int phy_power_on(struct phy *phy)
dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
goto out;
}
+ } else {
+ ret = 0; /* Override possible ret == -ENOTSUPP */
}
++phy->power_count;
mutex_unlock(&phy->mutex);
@@ -240,6 +253,9 @@ int phy_power_off(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
mutex_lock(&phy->mutex);
if (phy->power_count == 1 && phy->ops->power_off) {
ret = phy->ops->power_off(phy);
@@ -258,8 +274,8 @@ int phy_power_off(struct phy *phy)
EXPORT_SYMBOL_GPL(phy_power_off);
/**
- * of_phy_get() - lookup and obtain a reference to a phy by phandle
- * @dev: device that requests this phy
+ * _of_phy_get() - lookup and obtain a reference to a phy by phandle
+ * @np: device_node for which to get the phy
* @index: the index of the phy
*
* Returns the phy associated with the given phandle value,
@@ -268,20 +284,17 @@ EXPORT_SYMBOL_GPL(phy_power_off);
* not yet loaded. This function uses of_xlate call back function provided
* while registering the phy_provider to find the phy instance.
*/
-static struct phy *of_phy_get(struct device *dev, int index)
+static struct phy *_of_phy_get(struct device_node *np, int index)
{
int ret;
struct phy_provider *phy_provider;
struct phy *phy = NULL;
struct of_phandle_args args;
- ret = of_parse_phandle_with_args(dev->of_node, "phys", "#phy-cells",
+ ret = of_parse_phandle_with_args(np, "phys", "#phy-cells",
index, &args);
- if (ret) {
- dev_dbg(dev, "failed to get phy in %s node\n",
- dev->of_node->full_name);
+ if (ret)
return ERR_PTR(-ENODEV);
- }
mutex_lock(&phy_provider_mutex);
phy_provider = of_phy_provider_lookup(args.np);
@@ -301,6 +314,36 @@ err0:
}
/**
+ * of_phy_get() - lookup and obtain a reference to a phy using a device_node.
+ * @np: device_node for which to get the phy
+ * @con_id: name of the phy from device's point of view
+ *
+ * Returns the phy driver, after getting a refcount to it; or
+ * -ENODEV if there is no such phy. The caller is responsible for
+ * calling phy_put() to release that count.
+ */
+struct phy *of_phy_get(struct device_node *np, const char *con_id)
+{
+ struct phy *phy = NULL;
+ int index = 0;
+
+ if (con_id)
+ index = of_property_match_string(np, "phy-names", con_id);
+
+ phy = _of_phy_get(np, index);
+ if (IS_ERR(phy))
+ return phy;
+
+ if (!try_module_get(phy->ops->owner))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ get_device(&phy->dev);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(of_phy_get);
+
+/**
* phy_put() - release the PHY
* @phy: the phy returned by phy_get()
*
@@ -308,7 +351,7 @@ err0:
*/
void phy_put(struct phy *phy)
{
- if (IS_ERR(phy))
+ if (!phy || IS_ERR(phy))
return;
module_put(phy->ops->owner);
@@ -328,6 +371,9 @@ void devm_phy_put(struct device *dev, struct phy *phy)
{
int r;
+ if (!phy)
+ return;
+
r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
}
@@ -388,18 +434,12 @@ struct phy *phy_get(struct device *dev, const char *string)
if (dev->of_node) {
index = of_property_match_string(dev->of_node, "phy-names",
string);
- phy = of_phy_get(dev, index);
- if (IS_ERR(phy)) {
- dev_err(dev, "unable to find phy\n");
- return phy;
- }
+ phy = _of_phy_get(dev->of_node, index);
} else {
phy = phy_lookup(dev, string);
- if (IS_ERR(phy)) {
- dev_err(dev, "unable to find phy\n");
- return phy;
- }
}
+ if (IS_ERR(phy))
+ return phy;
if (!try_module_get(phy->ops->owner))
return ERR_PTR(-EPROBE_DEFER);
@@ -411,6 +451,27 @@ struct phy *phy_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(phy_get);
/**
+ * phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or the name of the controller
+ * port for non-dt case
+ *
+ * Returns the phy driver, after getting a refcount to it; or
+ * NULL if there is no such phy. The caller is responsible for
+ * calling phy_put() to release that count.
+ */
+struct phy *phy_optional_get(struct device *dev, const char *string)
+{
+ struct phy *phy = phy_get(dev, string);
+
+ if (PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(phy_optional_get);
+
+/**
* devm_phy_get() - lookup and obtain a reference to a phy.
* @dev: device that requests this phy
* @string: the phy name as given in the dt data or phy device name
@@ -441,6 +502,61 @@ struct phy *devm_phy_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(devm_phy_get);
/**
+ * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or phy device name
+ * for non-dt case
+ *
+ * Gets the phy using phy_get(), and associates a device with it using
+ * devres. On driver detach, release function is invoked on the devres
+ * data, then, devres data is freed. This differs to devm_phy_get() in
+ * that if the phy does not exist, it is not considered an error and
+ * -ENODEV will not be returned. Instead the NULL phy is returned,
+ * which can be passed to all other phy consumer calls.
+ */
+struct phy *devm_phy_optional_get(struct device *dev, const char *string)
+{
+ struct phy *phy = devm_phy_get(dev, string);
+
+ if (PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_phy_optional_get);
+
+/**
+ * devm_of_phy_get() - lookup and obtain a reference to a phy.
+ * @dev: device that requests this phy
+ * @np: node containing the phy
+ * @con_id: name of the phy from device's point of view
+ *
+ * Gets the phy using of_phy_get(), and associates a device with it using
+ * devres. On driver detach, release function is invoked on the devres data,
+ * then, devres data is freed.
+ */
+struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
+ const char *con_id)
+{
+ struct phy **ptr, *phy;
+
+ ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ phy = of_phy_get(np, con_id);
+ if (!IS_ERR(phy)) {
+ *ptr = phy;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_get);
+
+/**
* phy_create() - create a new phy
* @dev: device that is creating the new phy
* @ops: function pointers for performing phy operations
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index 1dbe6ce7b2ce..0786fef842e7 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -76,10 +76,6 @@ static int exynos_dp_video_phy_probe(struct platform_device *pdev)
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(dev, &exynos_dp_video_phy_ops, NULL);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create Display Port PHY\n");
@@ -87,6 +83,10 @@ static int exynos_dp_video_phy_probe(struct platform_device *pdev)
}
phy_set_drvdata(phy, state);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
return 0;
}
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 0c5efab11af1..7f139326a642 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -134,11 +134,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
dev_set_drvdata(dev, state);
spin_lock_init(&state->slock);
- phy_provider = devm_of_phy_provider_register(dev,
- exynos_mipi_video_phy_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
struct phy *phy = devm_phy_create(dev,
&exynos_mipi_video_phy_ops, NULL);
@@ -152,6 +147,11 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
phy_set_drvdata(phy, &state->phys[i]);
}
+ phy_provider = devm_of_phy_provider_register(dev,
+ exynos_mipi_video_phy_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
return 0;
}
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c
new file mode 100644
index 000000000000..236a52ad94eb
--- /dev/null
+++ b/drivers/phy/phy-exynos4210-usb2.c
@@ -0,0 +1,261 @@
+/*
+ * Samsung SoC USB 1.1/2.0 PHY driver - Exynos 4210 support
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include "phy-samsung-usb2.h"
+
+/* Exynos USB PHY registers */
+
+/* PHY power control */
+#define EXYNOS_4210_UPHYPWR 0x0
+
+#define EXYNOS_4210_UPHYPWR_PHY0_SUSPEND BIT(0)
+#define EXYNOS_4210_UPHYPWR_PHY0_PWR BIT(3)
+#define EXYNOS_4210_UPHYPWR_PHY0_OTG_PWR BIT(4)
+#define EXYNOS_4210_UPHYPWR_PHY0_SLEEP BIT(5)
+#define EXYNOS_4210_UPHYPWR_PHY0 ( \
+ EXYNOS_4210_UPHYPWR_PHY0_SUSPEND | \
+ EXYNOS_4210_UPHYPWR_PHY0_PWR | \
+ EXYNOS_4210_UPHYPWR_PHY0_OTG_PWR | \
+ EXYNOS_4210_UPHYPWR_PHY0_SLEEP)
+
+#define EXYNOS_4210_UPHYPWR_PHY1_SUSPEND BIT(6)
+#define EXYNOS_4210_UPHYPWR_PHY1_PWR BIT(7)
+#define EXYNOS_4210_UPHYPWR_PHY1_SLEEP BIT(8)
+#define EXYNOS_4210_UPHYPWR_PHY1 ( \
+ EXYNOS_4210_UPHYPWR_PHY1_SUSPEND | \
+ EXYNOS_4210_UPHYPWR_PHY1_PWR | \
+ EXYNOS_4210_UPHYPWR_PHY1_SLEEP)
+
+#define EXYNOS_4210_UPHYPWR_HSIC0_SUSPEND BIT(9)
+#define EXYNOS_4210_UPHYPWR_HSIC0_SLEEP BIT(10)
+#define EXYNOS_4210_UPHYPWR_HSIC0 ( \
+ EXYNOS_4210_UPHYPWR_HSIC0_SUSPEND | \
+ EXYNOS_4210_UPHYPWR_HSIC0_SLEEP)
+
+#define EXYNOS_4210_UPHYPWR_HSIC1_SUSPEND BIT(11)
+#define EXYNOS_4210_UPHYPWR_HSIC1_SLEEP BIT(12)
+#define EXYNOS_4210_UPHYPWR_HSIC1 ( \
+ EXYNOS_4210_UPHYPWR_HSIC1_SUSPEND | \
+ EXYNOS_4210_UPHYPWR_HSIC1_SLEEP)
+
+/* PHY clock control */
+#define EXYNOS_4210_UPHYCLK 0x4
+
+#define EXYNOS_4210_UPHYCLK_PHYFSEL_MASK (0x3 << 0)
+#define EXYNOS_4210_UPHYCLK_PHYFSEL_OFFSET 0
+#define EXYNOS_4210_UPHYCLK_PHYFSEL_48MHZ (0x0 << 0)
+#define EXYNOS_4210_UPHYCLK_PHYFSEL_24MHZ (0x3 << 0)
+#define EXYNOS_4210_UPHYCLK_PHYFSEL_12MHZ (0x2 << 0)
+
+#define EXYNOS_4210_UPHYCLK_PHY0_ID_PULLUP BIT(2)
+#define EXYNOS_4210_UPHYCLK_PHY0_COMMON_ON BIT(4)
+#define EXYNOS_4210_UPHYCLK_PHY1_COMMON_ON BIT(7)
+
+/* PHY reset control */
+#define EXYNOS_4210_UPHYRST 0x8
+
+#define EXYNOS_4210_URSTCON_PHY0 BIT(0)
+#define EXYNOS_4210_URSTCON_OTG_HLINK BIT(1)
+#define EXYNOS_4210_URSTCON_OTG_PHYLINK BIT(2)
+#define EXYNOS_4210_URSTCON_PHY1_ALL BIT(3)
+#define EXYNOS_4210_URSTCON_PHY1_P0 BIT(4)
+#define EXYNOS_4210_URSTCON_PHY1_P1P2 BIT(5)
+#define EXYNOS_4210_URSTCON_HOST_LINK_ALL BIT(6)
+#define EXYNOS_4210_URSTCON_HOST_LINK_P0 BIT(7)
+#define EXYNOS_4210_URSTCON_HOST_LINK_P1 BIT(8)
+#define EXYNOS_4210_URSTCON_HOST_LINK_P2 BIT(9)
+
+/* Isolation, configured in the power management unit */
+#define EXYNOS_4210_USB_ISOL_DEVICE_OFFSET 0x704
+#define EXYNOS_4210_USB_ISOL_DEVICE BIT(0)
+#define EXYNOS_4210_USB_ISOL_HOST_OFFSET 0x708
+#define EXYNOS_4210_USB_ISOL_HOST BIT(0)
+
+/* USBYPHY1 Floating prevention */
+#define EXYNOS_4210_UPHY1CON 0x34
+#define EXYNOS_4210_UPHY1CON_FLOAT_PREVENTION 0x1
+
+/* Mode switching SUB Device <-> Host */
+#define EXYNOS_4210_MODE_SWITCH_OFFSET 0x21c
+#define EXYNOS_4210_MODE_SWITCH_MASK 1
+#define EXYNOS_4210_MODE_SWITCH_DEVICE 0
+#define EXYNOS_4210_MODE_SWITCH_HOST 1
+
+enum exynos4210_phy_id {
+ EXYNOS4210_DEVICE,
+ EXYNOS4210_HOST,
+ EXYNOS4210_HSIC0,
+ EXYNOS4210_HSIC1,
+ EXYNOS4210_NUM_PHYS,
+};
+
+/*
+ * exynos4210_rate_to_clk() converts the supplied clock rate to the value that
+ * can be written to the phy register.
+ */
+static int exynos4210_rate_to_clk(unsigned long rate, u32 *reg)
+{
+ switch (rate) {
+ case 12 * MHZ:
+ *reg = EXYNOS_4210_UPHYCLK_PHYFSEL_12MHZ;
+ break;
+ case 24 * MHZ:
+ *reg = EXYNOS_4210_UPHYCLK_PHYFSEL_24MHZ;
+ break;
+ case 48 * MHZ:
+ *reg = EXYNOS_4210_UPHYCLK_PHYFSEL_48MHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void exynos4210_isol(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 offset;
+ u32 mask;
+
+ switch (inst->cfg->id) {
+ case EXYNOS4210_DEVICE:
+ offset = EXYNOS_4210_USB_ISOL_DEVICE_OFFSET;
+ mask = EXYNOS_4210_USB_ISOL_DEVICE;
+ break;
+ case EXYNOS4210_HOST:
+ offset = EXYNOS_4210_USB_ISOL_HOST_OFFSET;
+ mask = EXYNOS_4210_USB_ISOL_HOST;
+ break;
+ default:
+ return;
+ };
+
+ regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
+}
+
+static void exynos4210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 rstbits = 0;
+ u32 phypwr = 0;
+ u32 rst;
+ u32 pwr;
+ u32 clk;
+
+ switch (inst->cfg->id) {
+ case EXYNOS4210_DEVICE:
+ phypwr = EXYNOS_4210_UPHYPWR_PHY0;
+ rstbits = EXYNOS_4210_URSTCON_PHY0;
+ break;
+ case EXYNOS4210_HOST:
+ phypwr = EXYNOS_4210_UPHYPWR_PHY1;
+ rstbits = EXYNOS_4210_URSTCON_PHY1_ALL |
+ EXYNOS_4210_URSTCON_PHY1_P0 |
+ EXYNOS_4210_URSTCON_PHY1_P1P2 |
+ EXYNOS_4210_URSTCON_HOST_LINK_ALL |
+ EXYNOS_4210_URSTCON_HOST_LINK_P0;
+ writel(on, drv->reg_phy + EXYNOS_4210_UPHY1CON);
+ break;
+ case EXYNOS4210_HSIC0:
+ phypwr = EXYNOS_4210_UPHYPWR_HSIC0;
+ rstbits = EXYNOS_4210_URSTCON_PHY1_P1P2 |
+ EXYNOS_4210_URSTCON_HOST_LINK_P1;
+ break;
+ case EXYNOS4210_HSIC1:
+ phypwr = EXYNOS_4210_UPHYPWR_HSIC1;
+ rstbits = EXYNOS_4210_URSTCON_PHY1_P1P2 |
+ EXYNOS_4210_URSTCON_HOST_LINK_P2;
+ break;
+ };
+
+ if (on) {
+ clk = readl(drv->reg_phy + EXYNOS_4210_UPHYCLK);
+ clk &= ~EXYNOS_4210_UPHYCLK_PHYFSEL_MASK;
+ clk |= drv->ref_reg_val << EXYNOS_4210_UPHYCLK_PHYFSEL_OFFSET;
+ writel(clk, drv->reg_phy + EXYNOS_4210_UPHYCLK);
+
+ pwr = readl(drv->reg_phy + EXYNOS_4210_UPHYPWR);
+ pwr &= ~phypwr;
+ writel(pwr, drv->reg_phy + EXYNOS_4210_UPHYPWR);
+
+ rst = readl(drv->reg_phy + EXYNOS_4210_UPHYRST);
+ rst |= rstbits;
+ writel(rst, drv->reg_phy + EXYNOS_4210_UPHYRST);
+ udelay(10);
+ rst &= ~rstbits;
+ writel(rst, drv->reg_phy + EXYNOS_4210_UPHYRST);
+ /* The following delay is necessary for the reset sequence to be
+ * completed */
+ udelay(80);
+ } else {
+ pwr = readl(drv->reg_phy + EXYNOS_4210_UPHYPWR);
+ pwr |= phypwr;
+ writel(pwr, drv->reg_phy + EXYNOS_4210_UPHYPWR);
+ }
+}
+
+static int exynos4210_power_on(struct samsung_usb2_phy_instance *inst)
+{
+ /* Order of initialisation is important - first power then isolation */
+ exynos4210_phy_pwr(inst, 1);
+ exynos4210_isol(inst, 0);
+
+ return 0;
+}
+
+static int exynos4210_power_off(struct samsung_usb2_phy_instance *inst)
+{
+ exynos4210_isol(inst, 1);
+ exynos4210_phy_pwr(inst, 0);
+
+ return 0;
+}
+
+
+static const struct samsung_usb2_common_phy exynos4210_phys[] = {
+ {
+ .label = "device",
+ .id = EXYNOS4210_DEVICE,
+ .power_on = exynos4210_power_on,
+ .power_off = exynos4210_power_off,
+ },
+ {
+ .label = "host",
+ .id = EXYNOS4210_HOST,
+ .power_on = exynos4210_power_on,
+ .power_off = exynos4210_power_off,
+ },
+ {
+ .label = "hsic0",
+ .id = EXYNOS4210_HSIC0,
+ .power_on = exynos4210_power_on,
+ .power_off = exynos4210_power_off,
+ },
+ {
+ .label = "hsic1",
+ .id = EXYNOS4210_HSIC1,
+ .power_on = exynos4210_power_on,
+ .power_off = exynos4210_power_off,
+ },
+ {},
+};
+
+const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = {
+ .has_mode_switch = 0,
+ .num_phys = EXYNOS4210_NUM_PHYS,
+ .phys = exynos4210_phys,
+ .rate_to_clk = exynos4210_rate_to_clk,
+};
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c
new file mode 100644
index 000000000000..d92a7cc5698a
--- /dev/null
+++ b/drivers/phy/phy-exynos4x12-usb2.c
@@ -0,0 +1,328 @@
+/*
+ * Samsung SoC USB 1.1/2.0 PHY driver - Exynos 4x12 support
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include "phy-samsung-usb2.h"
+
+/* Exynos USB PHY registers */
+
+/* PHY power control */
+#define EXYNOS_4x12_UPHYPWR 0x0
+
+#define EXYNOS_4x12_UPHYPWR_PHY0_SUSPEND BIT(0)
+#define EXYNOS_4x12_UPHYPWR_PHY0_PWR BIT(3)
+#define EXYNOS_4x12_UPHYPWR_PHY0_OTG_PWR BIT(4)
+#define EXYNOS_4x12_UPHYPWR_PHY0_SLEEP BIT(5)
+#define EXYNOS_4x12_UPHYPWR_PHY0 ( \
+ EXYNOS_4x12_UPHYPWR_PHY0_SUSPEND | \
+ EXYNOS_4x12_UPHYPWR_PHY0_PWR | \
+ EXYNOS_4x12_UPHYPWR_PHY0_OTG_PWR | \
+ EXYNOS_4x12_UPHYPWR_PHY0_SLEEP)
+
+#define EXYNOS_4x12_UPHYPWR_PHY1_SUSPEND BIT(6)
+#define EXYNOS_4x12_UPHYPWR_PHY1_PWR BIT(7)
+#define EXYNOS_4x12_UPHYPWR_PHY1_SLEEP BIT(8)
+#define EXYNOS_4x12_UPHYPWR_PHY1 ( \
+ EXYNOS_4x12_UPHYPWR_PHY1_SUSPEND | \
+ EXYNOS_4x12_UPHYPWR_PHY1_PWR | \
+ EXYNOS_4x12_UPHYPWR_PHY1_SLEEP)
+
+#define EXYNOS_4x12_UPHYPWR_HSIC0_SUSPEND BIT(9)
+#define EXYNOS_4x12_UPHYPWR_HSIC0_PWR BIT(10)
+#define EXYNOS_4x12_UPHYPWR_HSIC0_SLEEP BIT(11)
+#define EXYNOS_4x12_UPHYPWR_HSIC0 ( \
+ EXYNOS_4x12_UPHYPWR_HSIC0_SUSPEND | \
+ EXYNOS_4x12_UPHYPWR_HSIC0_PWR | \
+ EXYNOS_4x12_UPHYPWR_HSIC0_SLEEP)
+
+#define EXYNOS_4x12_UPHYPWR_HSIC1_SUSPEND BIT(12)
+#define EXYNOS_4x12_UPHYPWR_HSIC1_PWR BIT(13)
+#define EXYNOS_4x12_UPHYPWR_HSIC1_SLEEP BIT(14)
+#define EXYNOS_4x12_UPHYPWR_HSIC1 ( \
+ EXYNOS_4x12_UPHYPWR_HSIC1_SUSPEND | \
+ EXYNOS_4x12_UPHYPWR_HSIC1_PWR | \
+ EXYNOS_4x12_UPHYPWR_HSIC1_SLEEP)
+
+/* PHY clock control */
+#define EXYNOS_4x12_UPHYCLK 0x4
+
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_MASK (0x7 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_OFFSET 0
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_9MHZ6 (0x0 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_10MHZ (0x1 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_12MHZ (0x2 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_19MHZ2 (0x3 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_20MHZ (0x4 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_24MHZ (0x5 << 0)
+#define EXYNOS_4x12_UPHYCLK_PHYFSEL_50MHZ (0x7 << 0)
+
+#define EXYNOS_4x12_UPHYCLK_PHY0_ID_PULLUP BIT(3)
+#define EXYNOS_4x12_UPHYCLK_PHY0_COMMON_ON BIT(4)
+#define EXYNOS_4x12_UPHYCLK_PHY1_COMMON_ON BIT(7)
+
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_MASK (0x7f << 10)
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_OFFSET 10
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_12MHZ (0x24 << 10)
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_15MHZ (0x1c << 10)
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_16MHZ (0x1a << 10)
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_19MHZ2 (0x15 << 10)
+#define EXYNOS_4x12_UPHYCLK_HSIC_REFCLK_20MHZ (0x14 << 10)
+
+/* PHY reset control */
+#define EXYNOS_4x12_UPHYRST 0x8
+
+#define EXYNOS_4x12_URSTCON_PHY0 BIT(0)
+#define EXYNOS_4x12_URSTCON_OTG_HLINK BIT(1)
+#define EXYNOS_4x12_URSTCON_OTG_PHYLINK BIT(2)
+#define EXYNOS_4x12_URSTCON_HOST_PHY BIT(3)
+#define EXYNOS_4x12_URSTCON_PHY1 BIT(4)
+#define EXYNOS_4x12_URSTCON_HSIC0 BIT(5)
+#define EXYNOS_4x12_URSTCON_HSIC1 BIT(6)
+#define EXYNOS_4x12_URSTCON_HOST_LINK_ALL BIT(7)
+#define EXYNOS_4x12_URSTCON_HOST_LINK_P0 BIT(8)
+#define EXYNOS_4x12_URSTCON_HOST_LINK_P1 BIT(9)
+#define EXYNOS_4x12_URSTCON_HOST_LINK_P2 BIT(10)
+
+/* Isolation, configured in the power management unit */
+#define EXYNOS_4x12_USB_ISOL_OFFSET 0x704
+#define EXYNOS_4x12_USB_ISOL_OTG BIT(0)
+#define EXYNOS_4x12_USB_ISOL_HSIC0_OFFSET 0x708
+#define EXYNOS_4x12_USB_ISOL_HSIC0 BIT(0)
+#define EXYNOS_4x12_USB_ISOL_HSIC1_OFFSET 0x70c
+#define EXYNOS_4x12_USB_ISOL_HSIC1 BIT(0)
+
+/* Mode switching SUB Device <-> Host */
+#define EXYNOS_4x12_MODE_SWITCH_OFFSET 0x21c
+#define EXYNOS_4x12_MODE_SWITCH_MASK 1
+#define EXYNOS_4x12_MODE_SWITCH_DEVICE 0
+#define EXYNOS_4x12_MODE_SWITCH_HOST 1
+
+enum exynos4x12_phy_id {
+ EXYNOS4x12_DEVICE,
+ EXYNOS4x12_HOST,
+ EXYNOS4x12_HSIC0,
+ EXYNOS4x12_HSIC1,
+ EXYNOS4x12_NUM_PHYS,
+};
+
+/*
+ * exynos4x12_rate_to_clk() converts the supplied clock rate to the value that
+ * can be written to the phy register.
+ */
+static int exynos4x12_rate_to_clk(unsigned long rate, u32 *reg)
+{
+ /* EXYNOS_4x12_UPHYCLK_PHYFSEL_MASK */
+
+ switch (rate) {
+ case 9600 * KHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_9MHZ6;
+ break;
+ case 10 * MHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_10MHZ;
+ break;
+ case 12 * MHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_12MHZ;
+ break;
+ case 19200 * KHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_19MHZ2;
+ break;
+ case 20 * MHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_20MHZ;
+ break;
+ case 24 * MHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_24MHZ;
+ break;
+ case 50 * MHZ:
+ *reg = EXYNOS_4x12_UPHYCLK_PHYFSEL_50MHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void exynos4x12_isol(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 offset;
+ u32 mask;
+
+ switch (inst->cfg->id) {
+ case EXYNOS4x12_DEVICE:
+ case EXYNOS4x12_HOST:
+ offset = EXYNOS_4x12_USB_ISOL_OFFSET;
+ mask = EXYNOS_4x12_USB_ISOL_OTG;
+ break;
+ case EXYNOS4x12_HSIC0:
+ offset = EXYNOS_4x12_USB_ISOL_HSIC0_OFFSET;
+ mask = EXYNOS_4x12_USB_ISOL_HSIC0;
+ break;
+ case EXYNOS4x12_HSIC1:
+ offset = EXYNOS_4x12_USB_ISOL_HSIC1_OFFSET;
+ mask = EXYNOS_4x12_USB_ISOL_HSIC1;
+ break;
+ default:
+ return;
+ };
+
+ regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
+}
+
+static void exynos4x12_setup_clk(struct samsung_usb2_phy_instance *inst)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 clk;
+
+ clk = readl(drv->reg_phy + EXYNOS_4x12_UPHYCLK);
+ clk &= ~EXYNOS_4x12_UPHYCLK_PHYFSEL_MASK;
+ clk |= drv->ref_reg_val << EXYNOS_4x12_UPHYCLK_PHYFSEL_OFFSET;
+ writel(clk, drv->reg_phy + EXYNOS_4x12_UPHYCLK);
+}
+
+static void exynos4x12_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 rstbits = 0;
+ u32 phypwr = 0;
+ u32 rst;
+ u32 pwr;
+ u32 mode = 0;
+ u32 switch_mode = 0;
+
+ switch (inst->cfg->id) {
+ case EXYNOS4x12_DEVICE:
+ phypwr = EXYNOS_4x12_UPHYPWR_PHY0;
+ rstbits = EXYNOS_4x12_URSTCON_PHY0;
+ mode = EXYNOS_4x12_MODE_SWITCH_DEVICE;
+ switch_mode = 1;
+ break;
+ case EXYNOS4x12_HOST:
+ phypwr = EXYNOS_4x12_UPHYPWR_PHY1;
+ rstbits = EXYNOS_4x12_URSTCON_HOST_PHY;
+ mode = EXYNOS_4x12_MODE_SWITCH_HOST;
+ switch_mode = 1;
+ break;
+ case EXYNOS4x12_HSIC0:
+ phypwr = EXYNOS_4x12_UPHYPWR_HSIC0;
+ rstbits = EXYNOS_4x12_URSTCON_HSIC1 |
+ EXYNOS_4x12_URSTCON_HOST_LINK_P0 |
+ EXYNOS_4x12_URSTCON_HOST_PHY;
+ break;
+ case EXYNOS4x12_HSIC1:
+ phypwr = EXYNOS_4x12_UPHYPWR_HSIC1;
+ rstbits = EXYNOS_4x12_URSTCON_HSIC1 |
+ EXYNOS_4x12_URSTCON_HOST_LINK_P1;
+ break;
+ };
+
+ if (on) {
+ if (switch_mode)
+ regmap_update_bits(drv->reg_sys,
+ EXYNOS_4x12_MODE_SWITCH_OFFSET,
+ EXYNOS_4x12_MODE_SWITCH_MASK, mode);
+
+ pwr = readl(drv->reg_phy + EXYNOS_4x12_UPHYPWR);
+ pwr &= ~phypwr;
+ writel(pwr, drv->reg_phy + EXYNOS_4x12_UPHYPWR);
+
+ rst = readl(drv->reg_phy + EXYNOS_4x12_UPHYRST);
+ rst |= rstbits;
+ writel(rst, drv->reg_phy + EXYNOS_4x12_UPHYRST);
+ udelay(10);
+ rst &= ~rstbits;
+ writel(rst, drv->reg_phy + EXYNOS_4x12_UPHYRST);
+ /* The following delay is necessary for the reset sequence to be
+ * completed */
+ udelay(80);
+ } else {
+ pwr = readl(drv->reg_phy + EXYNOS_4x12_UPHYPWR);
+ pwr |= phypwr;
+ writel(pwr, drv->reg_phy + EXYNOS_4x12_UPHYPWR);
+ }
+}
+
+static int exynos4x12_power_on(struct samsung_usb2_phy_instance *inst)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+
+ inst->enabled = 1;
+ exynos4x12_setup_clk(inst);
+ exynos4x12_phy_pwr(inst, 1);
+ exynos4x12_isol(inst, 0);
+
+ /* Power on the device, as it is necessary for HSIC to work */
+ if (inst->cfg->id == EXYNOS4x12_HSIC0) {
+ struct samsung_usb2_phy_instance *device =
+ &drv->instances[EXYNOS4x12_DEVICE];
+ exynos4x12_phy_pwr(device, 1);
+ exynos4x12_isol(device, 0);
+ }
+
+ return 0;
+}
+
+static int exynos4x12_power_off(struct samsung_usb2_phy_instance *inst)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ struct samsung_usb2_phy_instance *device =
+ &drv->instances[EXYNOS4x12_DEVICE];
+
+ inst->enabled = 0;
+ exynos4x12_isol(inst, 1);
+ exynos4x12_phy_pwr(inst, 0);
+
+ if (inst->cfg->id == EXYNOS4x12_HSIC0 && !device->enabled) {
+ exynos4x12_isol(device, 1);
+ exynos4x12_phy_pwr(device, 0);
+ }
+
+ return 0;
+}
+
+
+static const struct samsung_usb2_common_phy exynos4x12_phys[] = {
+ {
+ .label = "device",
+ .id = EXYNOS4x12_DEVICE,
+ .power_on = exynos4x12_power_on,
+ .power_off = exynos4x12_power_off,
+ },
+ {
+ .label = "host",
+ .id = EXYNOS4x12_HOST,
+ .power_on = exynos4x12_power_on,
+ .power_off = exynos4x12_power_off,
+ },
+ {
+ .label = "hsic0",
+ .id = EXYNOS4x12_HSIC0,
+ .power_on = exynos4x12_power_on,
+ .power_off = exynos4x12_power_off,
+ },
+ {
+ .label = "hsic1",
+ .id = EXYNOS4x12_HSIC1,
+ .power_on = exynos4x12_power_on,
+ .power_off = exynos4x12_power_off,
+ },
+ {},
+};
+
+const struct samsung_usb2_phy_config exynos4x12_usb2_phy_config = {
+ .has_mode_switch = 1,
+ .num_phys = EXYNOS4x12_NUM_PHYS,
+ .phys = exynos4x12_phys,
+ .rate_to_clk = exynos4x12_rate_to_clk,
+};
diff --git a/drivers/phy/phy-exynos5250-sata.c b/drivers/phy/phy-exynos5250-sata.c
new file mode 100644
index 000000000000..c9361b731fa6
--- /dev/null
+++ b/drivers/phy/phy-exynos5250-sata.c
@@ -0,0 +1,251 @@
+/*
+ * Samsung SATA SerDes(PHY) driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Authors: Girish K S <ks.giri@samsung.com>
+ * Yuvaraj Kumar C D <yuvaraj.cd@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+
+#define SATAPHY_CONTROL_OFFSET 0x0724
+#define EXYNOS5_SATAPHY_PMU_ENABLE BIT(0)
+#define EXYNOS5_SATA_RESET 0x4
+#define RESET_GLOBAL_RST_N BIT(0)
+#define RESET_CMN_RST_N BIT(1)
+#define RESET_CMN_BLOCK_RST_N BIT(2)
+#define RESET_CMN_I2C_RST_N BIT(3)
+#define RESET_TX_RX_PIPE_RST_N BIT(4)
+#define RESET_TX_RX_BLOCK_RST_N BIT(5)
+#define RESET_TX_RX_I2C_RST_N (BIT(6) | BIT(7))
+#define LINK_RESET 0xf0000
+#define EXYNOS5_SATA_MODE0 0x10
+#define SATA_SPD_GEN3 BIT(1)
+#define EXYNOS5_SATA_CTRL0 0x14
+#define CTRL0_P0_PHY_CALIBRATED_SEL BIT(9)
+#define CTRL0_P0_PHY_CALIBRATED BIT(8)
+#define EXYNOS5_SATA_PHSATA_CTRLM 0xe0
+#define PHCTRLM_REF_RATE BIT(1)
+#define PHCTRLM_HIGH_SPEED BIT(0)
+#define EXYNOS5_SATA_PHSATA_STATM 0xf0
+#define PHSTATM_PLL_LOCKED BIT(0)
+
+#define PHY_PLL_TIMEOUT (usecs_to_jiffies(1000))
+
+struct exynos_sata_phy {
+ struct phy *phy;
+ struct clk *phyclk;
+ void __iomem *regs;
+ struct regmap *pmureg;
+ struct i2c_client *client;
+};
+
+static int wait_for_reg_status(void __iomem *base, u32 reg, u32 checkbit,
+ u32 status)
+{
+ unsigned long timeout = jiffies + PHY_PLL_TIMEOUT;
+
+ while (time_before(jiffies, timeout)) {
+ if ((readl(base + reg) & checkbit) == status)
+ return 0;
+ }
+
+ return -EFAULT;
+}
+
+static int exynos_sata_phy_power_on(struct phy *phy)
+{
+ struct exynos_sata_phy *sata_phy = phy_get_drvdata(phy);
+
+ return regmap_update_bits(sata_phy->pmureg, SATAPHY_CONTROL_OFFSET,
+ EXYNOS5_SATAPHY_PMU_ENABLE, true);
+
+}
+
+static int exynos_sata_phy_power_off(struct phy *phy)
+{
+ struct exynos_sata_phy *sata_phy = phy_get_drvdata(phy);
+
+ return regmap_update_bits(sata_phy->pmureg, SATAPHY_CONTROL_OFFSET,
+ EXYNOS5_SATAPHY_PMU_ENABLE, false);
+
+}
+
+static int exynos_sata_phy_init(struct phy *phy)
+{
+ u32 val = 0;
+ int ret = 0;
+ u8 buf[] = { 0x3a, 0x0b };
+ struct exynos_sata_phy *sata_phy = phy_get_drvdata(phy);
+
+ ret = regmap_update_bits(sata_phy->pmureg, SATAPHY_CONTROL_OFFSET,
+ EXYNOS5_SATAPHY_PMU_ENABLE, true);
+ if (ret != 0)
+ dev_err(&sata_phy->phy->dev, "phy init failed\n");
+
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
+ val |= RESET_GLOBAL_RST_N | RESET_CMN_RST_N | RESET_CMN_BLOCK_RST_N
+ | RESET_CMN_I2C_RST_N | RESET_TX_RX_PIPE_RST_N
+ | RESET_TX_RX_BLOCK_RST_N | RESET_TX_RX_I2C_RST_N;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
+ val |= LINK_RESET;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
+ val |= RESET_CMN_RST_N;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
+ val &= ~PHCTRLM_REF_RATE;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
+
+ /* High speed enable for Gen3 */
+ val = readl(sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
+ val |= PHCTRLM_HIGH_SPEED;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_PHSATA_CTRLM);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_CTRL0);
+ val |= CTRL0_P0_PHY_CALIBRATED_SEL | CTRL0_P0_PHY_CALIBRATED;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_CTRL0);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_MODE0);
+ val |= SATA_SPD_GEN3;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_MODE0);
+
+ ret = i2c_master_send(sata_phy->client, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ /* release cmu reset */
+ val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
+ val &= ~RESET_CMN_RST_N;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ val = readl(sata_phy->regs + EXYNOS5_SATA_RESET);
+ val |= RESET_CMN_RST_N;
+ writel(val, sata_phy->regs + EXYNOS5_SATA_RESET);
+
+ ret = wait_for_reg_status(sata_phy->regs,
+ EXYNOS5_SATA_PHSATA_STATM,
+ PHSTATM_PLL_LOCKED, 1);
+ if (ret < 0)
+ dev_err(&sata_phy->phy->dev,
+ "PHY PLL locking failed\n");
+ return ret;
+}
+
+static struct phy_ops exynos_sata_phy_ops = {
+ .init = exynos_sata_phy_init,
+ .power_on = exynos_sata_phy_power_on,
+ .power_off = exynos_sata_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int exynos_sata_phy_probe(struct platform_device *pdev)
+{
+ struct exynos_sata_phy *sata_phy;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct phy_provider *phy_provider;
+ struct device_node *node;
+ int ret = 0;
+
+ sata_phy = devm_kzalloc(dev, sizeof(*sata_phy), GFP_KERNEL);
+ if (!sata_phy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ sata_phy->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sata_phy->regs))
+ return PTR_ERR(sata_phy->regs);
+
+ sata_phy->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(sata_phy->pmureg)) {
+ dev_err(dev, "syscon regmap lookup failed.\n");
+ return PTR_ERR(sata_phy->pmureg);
+ }
+
+ node = of_parse_phandle(dev->of_node,
+ "samsung,exynos-sataphy-i2c-phandle", 0);
+ if (!node)
+ return -EINVAL;
+
+ sata_phy->client = of_find_i2c_device_by_node(node);
+ if (!sata_phy->client)
+ return -EPROBE_DEFER;
+
+ dev_set_drvdata(dev, sata_phy);
+
+ sata_phy->phyclk = devm_clk_get(dev, "sata_phyctrl");
+ if (IS_ERR(sata_phy->phyclk)) {
+ dev_err(dev, "failed to get clk for PHY\n");
+ return PTR_ERR(sata_phy->phyclk);
+ }
+
+ ret = clk_prepare_enable(sata_phy->phyclk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable source clk\n");
+ return ret;
+ }
+
+ sata_phy->phy = devm_phy_create(dev, &exynos_sata_phy_ops, NULL);
+ if (IS_ERR(sata_phy->phy)) {
+ clk_disable_unprepare(sata_phy->phyclk);
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(sata_phy->phy);
+ }
+
+ phy_set_drvdata(sata_phy->phy, sata_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ clk_disable_unprepare(sata_phy->phyclk);
+ return PTR_ERR(phy_provider);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id exynos_sata_phy_of_match[] = {
+ { .compatible = "samsung,exynos5250-sata-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_sata_phy_of_match);
+
+static struct platform_driver exynos_sata_phy_driver = {
+ .probe = exynos_sata_phy_probe,
+ .driver = {
+ .of_match_table = exynos_sata_phy_of_match,
+ .name = "samsung,sata-phy",
+ .owner = THIS_MODULE,
+ }
+};
+module_platform_driver(exynos_sata_phy_driver);
+
+MODULE_DESCRIPTION("Samsung SerDes PHY driver");
+MODULE_LICENSE("GPL V2");
+MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
+MODULE_AUTHOR("Yuvaraj C D <yuvaraj.cd@samsung.com>");
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c
new file mode 100644
index 000000000000..94179afda951
--- /dev/null
+++ b/drivers/phy/phy-exynos5250-usb2.c
@@ -0,0 +1,404 @@
+/*
+ * Samsung SoC USB 1.1/2.0 PHY driver - Exynos 5250 support
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include "phy-samsung-usb2.h"
+
+/* Exynos USB PHY registers */
+#define EXYNOS_5250_REFCLKSEL_CRYSTAL 0x0
+#define EXYNOS_5250_REFCLKSEL_XO 0x1
+#define EXYNOS_5250_REFCLKSEL_CLKCORE 0x2
+
+#define EXYNOS_5250_FSEL_9MHZ6 0x0
+#define EXYNOS_5250_FSEL_10MHZ 0x1
+#define EXYNOS_5250_FSEL_12MHZ 0x2
+#define EXYNOS_5250_FSEL_19MHZ2 0x3
+#define EXYNOS_5250_FSEL_20MHZ 0x4
+#define EXYNOS_5250_FSEL_24MHZ 0x5
+#define EXYNOS_5250_FSEL_50MHZ 0x7
+
+/* Normal host */
+#define EXYNOS_5250_HOSTPHYCTRL0 0x0
+
+#define EXYNOS_5250_HOSTPHYCTRL0_PHYSWRSTALL BIT(31)
+#define EXYNOS_5250_HOSTPHYCTRL0_REFCLKSEL_SHIFT 19
+#define EXYNOS_5250_HOSTPHYCTRL0_REFCLKSEL_MASK \
+ (0x3 << EXYNOS_5250_HOSTPHYCTRL0_REFCLKSEL_SHIFT)
+#define EXYNOS_5250_HOSTPHYCTRL0_FSEL_SHIFT 16
+#define EXYNOS_5250_HOSTPHYCTRL0_FSEL_MASK \
+ (0x7 << EXYNOS_5250_HOSTPHYCTRL0_FSEL_SHIFT)
+#define EXYNOS_5250_HOSTPHYCTRL0_TESTBURNIN BIT(11)
+#define EXYNOS_5250_HOSTPHYCTRL0_RETENABLE BIT(10)
+#define EXYNOS_5250_HOSTPHYCTRL0_COMMON_ON_N BIT(9)
+#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_MASK (0x3 << 7)
+#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_DUAL (0x0 << 7)
+#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_ID0 (0x1 << 7)
+#define EXYNOS_5250_HOSTPHYCTRL0_VATESTENB_ANALOGTEST (0x2 << 7)
+#define EXYNOS_5250_HOSTPHYCTRL0_SIDDQ BIT(6)
+#define EXYNOS_5250_HOSTPHYCTRL0_FORCESLEEP BIT(5)
+#define EXYNOS_5250_HOSTPHYCTRL0_FORCESUSPEND BIT(4)
+#define EXYNOS_5250_HOSTPHYCTRL0_WORDINTERFACE BIT(3)
+#define EXYNOS_5250_HOSTPHYCTRL0_UTMISWRST BIT(2)
+#define EXYNOS_5250_HOSTPHYCTRL0_LINKSWRST BIT(1)
+#define EXYNOS_5250_HOSTPHYCTRL0_PHYSWRST BIT(0)
+
+/* HSIC0 & HSIC1 */
+#define EXYNOS_5250_HSICPHYCTRL1 0x10
+#define EXYNOS_5250_HSICPHYCTRL2 0x20
+
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_MASK (0x3 << 23)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_DEFAULT (0x2 << 23)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_MASK (0x7f << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_12 (0x24 << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_15 (0x1c << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_16 (0x1a << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_19_2 (0x15 << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_20 (0x14 << 16)
+#define EXYNOS_5250_HSICPHYCTRLX_SIDDQ BIT(6)
+#define EXYNOS_5250_HSICPHYCTRLX_FORCESLEEP BIT(5)
+#define EXYNOS_5250_HSICPHYCTRLX_FORCESUSPEND BIT(4)
+#define EXYNOS_5250_HSICPHYCTRLX_WORDINTERFACE BIT(3)
+#define EXYNOS_5250_HSICPHYCTRLX_UTMISWRST BIT(2)
+#define EXYNOS_5250_HSICPHYCTRLX_PHYSWRST BIT(0)
+
+/* EHCI control */
+#define EXYNOS_5250_HOSTEHCICTRL 0x30
+#define EXYNOS_5250_HOSTEHCICTRL_ENAINCRXALIGN BIT(29)
+#define EXYNOS_5250_HOSTEHCICTRL_ENAINCR4 BIT(28)
+#define EXYNOS_5250_HOSTEHCICTRL_ENAINCR8 BIT(27)
+#define EXYNOS_5250_HOSTEHCICTRL_ENAINCR16 BIT(26)
+#define EXYNOS_5250_HOSTEHCICTRL_AUTOPPDONOVRCUREN BIT(25)
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_SHIFT 19
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_MASK \
+ (0x3f << EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_SHIFT)
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL1_SHIFT 13
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL1_MASK \
+ (0x3f << EXYNOS_5250_HOSTEHCICTRL_FLADJVAL1_SHIFT)
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL2_SHIFT 7
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_MASK \
+ (0x3f << EXYNOS_5250_HOSTEHCICTRL_FLADJVAL0_SHIFT)
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVALHOST_SHIFT 1
+#define EXYNOS_5250_HOSTEHCICTRL_FLADJVALHOST_MASK \
+ (0x1 << EXYNOS_5250_HOSTEHCICTRL_FLADJVALHOST_SHIFT)
+#define EXYNOS_5250_HOSTEHCICTRL_SIMULATIONMODE BIT(0)
+
+/* OHCI control */
+#define EXYNOS_5250_HOSTOHCICTRL 0x34
+#define EXYNOS_5250_HOSTOHCICTRL_FRAMELENVAL_SHIFT 1
+#define EXYNOS_5250_HOSTOHCICTRL_FRAMELENVAL_MASK \
+ (0x3ff << EXYNOS_5250_HOSTOHCICTRL_FRAMELENVAL_SHIFT)
+#define EXYNOS_5250_HOSTOHCICTRL_FRAMELENVALEN BIT(0)
+
+/* USBOTG */
+#define EXYNOS_5250_USBOTGSYS 0x38
+#define EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET BIT(14)
+#define EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG BIT(13)
+#define EXYNOS_5250_USBOTGSYS_PHY_SW_RST BIT(12)
+#define EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT 9
+#define EXYNOS_5250_USBOTGSYS_REFCLKSEL_MASK \
+ (0x3 << EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT)
+#define EXYNOS_5250_USBOTGSYS_ID_PULLUP BIT(8)
+#define EXYNOS_5250_USBOTGSYS_COMMON_ON BIT(7)
+#define EXYNOS_5250_USBOTGSYS_FSEL_SHIFT 4
+#define EXYNOS_5250_USBOTGSYS_FSEL_MASK \
+ (0x3 << EXYNOS_5250_USBOTGSYS_FSEL_SHIFT)
+#define EXYNOS_5250_USBOTGSYS_FORCE_SLEEP BIT(3)
+#define EXYNOS_5250_USBOTGSYS_OTGDISABLE BIT(2)
+#define EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG BIT(1)
+#define EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND BIT(0)
+
+/* Isolation, configured in the power management unit */
+#define EXYNOS_5250_USB_ISOL_OTG_OFFSET 0x704
+#define EXYNOS_5250_USB_ISOL_OTG BIT(0)
+#define EXYNOS_5250_USB_ISOL_HOST_OFFSET 0x708
+#define EXYNOS_5250_USB_ISOL_HOST BIT(0)
+
+/* Mode swtich register */
+#define EXYNOS_5250_MODE_SWITCH_OFFSET 0x230
+#define EXYNOS_5250_MODE_SWITCH_MASK 1
+#define EXYNOS_5250_MODE_SWITCH_DEVICE 0
+#define EXYNOS_5250_MODE_SWITCH_HOST 1
+
+enum exynos4x12_phy_id {
+ EXYNOS5250_DEVICE,
+ EXYNOS5250_HOST,
+ EXYNOS5250_HSIC0,
+ EXYNOS5250_HSIC1,
+ EXYNOS5250_NUM_PHYS,
+};
+
+/*
+ * exynos5250_rate_to_clk() converts the supplied clock rate to the value that
+ * can be written to the phy register.
+ */
+static int exynos5250_rate_to_clk(unsigned long rate, u32 *reg)
+{
+ /* EXYNOS_5250_FSEL_MASK */
+
+ switch (rate) {
+ case 9600 * KHZ:
+ *reg = EXYNOS_5250_FSEL_9MHZ6;
+ break;
+ case 10 * MHZ:
+ *reg = EXYNOS_5250_FSEL_10MHZ;
+ break;
+ case 12 * MHZ:
+ *reg = EXYNOS_5250_FSEL_12MHZ;
+ break;
+ case 19200 * KHZ:
+ *reg = EXYNOS_5250_FSEL_19MHZ2;
+ break;
+ case 20 * MHZ:
+ *reg = EXYNOS_5250_FSEL_20MHZ;
+ break;
+ case 24 * MHZ:
+ *reg = EXYNOS_5250_FSEL_24MHZ;
+ break;
+ case 50 * MHZ:
+ *reg = EXYNOS_5250_FSEL_50MHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void exynos5250_isol(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 offset;
+ u32 mask;
+
+ switch (inst->cfg->id) {
+ case EXYNOS5250_DEVICE:
+ offset = EXYNOS_5250_USB_ISOL_OTG_OFFSET;
+ mask = EXYNOS_5250_USB_ISOL_OTG;
+ break;
+ case EXYNOS5250_HOST:
+ offset = EXYNOS_5250_USB_ISOL_HOST_OFFSET;
+ mask = EXYNOS_5250_USB_ISOL_HOST;
+ break;
+ default:
+ return;
+ };
+
+ regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
+}
+
+static int exynos5250_power_on(struct samsung_usb2_phy_instance *inst)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 ctrl0;
+ u32 otg;
+ u32 ehci;
+ u32 ohci;
+ u32 hsic;
+
+ switch (inst->cfg->id) {
+ case EXYNOS5250_DEVICE:
+ regmap_update_bits(drv->reg_sys,
+ EXYNOS_5250_MODE_SWITCH_OFFSET,
+ EXYNOS_5250_MODE_SWITCH_MASK,
+ EXYNOS_5250_MODE_SWITCH_DEVICE);
+
+ /* OTG configuration */
+ otg = readl(drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ /* The clock */
+ otg &= ~EXYNOS_5250_USBOTGSYS_FSEL_MASK;
+ otg |= drv->ref_reg_val << EXYNOS_5250_USBOTGSYS_FSEL_SHIFT;
+ /* Reset */
+ otg &= ~(EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND |
+ EXYNOS_5250_USBOTGSYS_FORCE_SLEEP |
+ EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG);
+ otg |= EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
+ EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET |
+ EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
+ EXYNOS_5250_USBOTGSYS_OTGDISABLE;
+ /* Ref clock */
+ otg &= ~EXYNOS_5250_USBOTGSYS_REFCLKSEL_MASK;
+ otg |= EXYNOS_5250_REFCLKSEL_CLKCORE <<
+ EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT;
+ writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ udelay(100);
+ otg &= ~(EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
+ EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
+ EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET |
+ EXYNOS_5250_USBOTGSYS_OTGDISABLE);
+ writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+
+
+ break;
+ case EXYNOS5250_HOST:
+ case EXYNOS5250_HSIC0:
+ case EXYNOS5250_HSIC1:
+ /* Host registers configuration */
+ ctrl0 = readl(drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
+ /* The clock */
+ ctrl0 &= ~EXYNOS_5250_HOSTPHYCTRL0_FSEL_MASK;
+ ctrl0 |= drv->ref_reg_val <<
+ EXYNOS_5250_HOSTPHYCTRL0_FSEL_SHIFT;
+
+ /* Reset */
+ ctrl0 &= ~(EXYNOS_5250_HOSTPHYCTRL0_PHYSWRST |
+ EXYNOS_5250_HOSTPHYCTRL0_PHYSWRSTALL |
+ EXYNOS_5250_HOSTPHYCTRL0_SIDDQ |
+ EXYNOS_5250_HOSTPHYCTRL0_FORCESUSPEND |
+ EXYNOS_5250_HOSTPHYCTRL0_FORCESLEEP);
+ ctrl0 |= EXYNOS_5250_HOSTPHYCTRL0_LINKSWRST |
+ EXYNOS_5250_HOSTPHYCTRL0_UTMISWRST |
+ EXYNOS_5250_HOSTPHYCTRL0_COMMON_ON_N;
+ writel(ctrl0, drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
+ udelay(10);
+ ctrl0 &= ~(EXYNOS_5250_HOSTPHYCTRL0_LINKSWRST |
+ EXYNOS_5250_HOSTPHYCTRL0_UTMISWRST);
+ writel(ctrl0, drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
+
+ /* OTG configuration */
+ otg = readl(drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ /* The clock */
+ otg &= ~EXYNOS_5250_USBOTGSYS_FSEL_MASK;
+ otg |= drv->ref_reg_val << EXYNOS_5250_USBOTGSYS_FSEL_SHIFT;
+ /* Reset */
+ otg &= ~(EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND |
+ EXYNOS_5250_USBOTGSYS_FORCE_SLEEP |
+ EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG);
+ otg |= EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
+ EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET |
+ EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
+ EXYNOS_5250_USBOTGSYS_OTGDISABLE;
+ /* Ref clock */
+ otg &= ~EXYNOS_5250_USBOTGSYS_REFCLKSEL_MASK;
+ otg |= EXYNOS_5250_REFCLKSEL_CLKCORE <<
+ EXYNOS_5250_USBOTGSYS_REFCLKSEL_SHIFT;
+ writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ udelay(10);
+ otg &= ~(EXYNOS_5250_USBOTGSYS_PHY_SW_RST |
+ EXYNOS_5250_USBOTGSYS_LINK_SW_RST_UOTG |
+ EXYNOS_5250_USBOTGSYS_PHYLINK_SW_RESET);
+
+ /* HSIC phy configuration */
+ hsic = (EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_12 |
+ EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_DEFAULT |
+ EXYNOS_5250_HSICPHYCTRLX_PHYSWRST);
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL1);
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL2);
+ udelay(10);
+ hsic &= ~EXYNOS_5250_HSICPHYCTRLX_PHYSWRST;
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL1);
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL2);
+ /* The following delay is necessary for the reset sequence to be
+ * completed */
+ udelay(80);
+
+ /* Enable EHCI DMA burst */
+ ehci = readl(drv->reg_phy + EXYNOS_5250_HOSTEHCICTRL);
+ ehci |= EXYNOS_5250_HOSTEHCICTRL_ENAINCRXALIGN |
+ EXYNOS_5250_HOSTEHCICTRL_ENAINCR4 |
+ EXYNOS_5250_HOSTEHCICTRL_ENAINCR8 |
+ EXYNOS_5250_HOSTEHCICTRL_ENAINCR16;
+ writel(ehci, drv->reg_phy + EXYNOS_5250_HOSTEHCICTRL);
+
+ /* OHCI settings */
+ ohci = readl(drv->reg_phy + EXYNOS_5250_HOSTOHCICTRL);
+ /* Following code is based on the old driver */
+ ohci |= 0x1 << 3;
+ writel(ohci, drv->reg_phy + EXYNOS_5250_HOSTOHCICTRL);
+
+ break;
+ }
+ inst->enabled = 1;
+ exynos5250_isol(inst, 0);
+
+ return 0;
+}
+
+static int exynos5250_power_off(struct samsung_usb2_phy_instance *inst)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 ctrl0;
+ u32 otg;
+ u32 hsic;
+
+ inst->enabled = 0;
+ exynos5250_isol(inst, 1);
+
+ switch (inst->cfg->id) {
+ case EXYNOS5250_DEVICE:
+ otg = readl(drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ otg |= (EXYNOS_5250_USBOTGSYS_FORCE_SUSPEND |
+ EXYNOS_5250_USBOTGSYS_SIDDQ_UOTG |
+ EXYNOS_5250_USBOTGSYS_FORCE_SLEEP);
+ writel(otg, drv->reg_phy + EXYNOS_5250_USBOTGSYS);
+ break;
+ case EXYNOS5250_HOST:
+ ctrl0 = readl(drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
+ ctrl0 |= (EXYNOS_5250_HOSTPHYCTRL0_SIDDQ |
+ EXYNOS_5250_HOSTPHYCTRL0_FORCESUSPEND |
+ EXYNOS_5250_HOSTPHYCTRL0_FORCESLEEP |
+ EXYNOS_5250_HOSTPHYCTRL0_PHYSWRST |
+ EXYNOS_5250_HOSTPHYCTRL0_PHYSWRSTALL);
+ writel(ctrl0, drv->reg_phy + EXYNOS_5250_HOSTPHYCTRL0);
+ break;
+ case EXYNOS5250_HSIC0:
+ case EXYNOS5250_HSIC1:
+ hsic = (EXYNOS_5250_HSICPHYCTRLX_REFCLKDIV_12 |
+ EXYNOS_5250_HSICPHYCTRLX_REFCLKSEL_DEFAULT |
+ EXYNOS_5250_HSICPHYCTRLX_SIDDQ |
+ EXYNOS_5250_HSICPHYCTRLX_FORCESLEEP |
+ EXYNOS_5250_HSICPHYCTRLX_FORCESUSPEND
+ );
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL1);
+ writel(hsic, drv->reg_phy + EXYNOS_5250_HSICPHYCTRL2);
+ break;
+ }
+
+ return 0;
+}
+
+
+static const struct samsung_usb2_common_phy exynos5250_phys[] = {
+ {
+ .label = "device",
+ .id = EXYNOS5250_DEVICE,
+ .power_on = exynos5250_power_on,
+ .power_off = exynos5250_power_off,
+ },
+ {
+ .label = "host",
+ .id = EXYNOS5250_HOST,
+ .power_on = exynos5250_power_on,
+ .power_off = exynos5250_power_off,
+ },
+ {
+ .label = "hsic0",
+ .id = EXYNOS5250_HSIC0,
+ .power_on = exynos5250_power_on,
+ .power_off = exynos5250_power_off,
+ },
+ {
+ .label = "hsic1",
+ .id = EXYNOS5250_HSIC1,
+ .power_on = exynos5250_power_on,
+ .power_off = exynos5250_power_off,
+ },
+ {},
+};
+
+const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = {
+ .has_mode_switch = 1,
+ .num_phys = EXYNOS5250_NUM_PHYS,
+ .phys = exynos5250_phys,
+ .rate_to_clk = exynos5250_rate_to_clk,
+};
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c
index d43786f62437..d70ecd6a1b3f 100644
--- a/drivers/phy/phy-mvebu-sata.c
+++ b/drivers/phy/phy-mvebu-sata.c
@@ -99,17 +99,17 @@ static int phy_mvebu_sata_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- phy_provider = devm_of_phy_provider_register(&pdev->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL);
if (IS_ERR(phy))
return PTR_ERR(phy);
phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
/* The boot loader may of left it on. Turn it off. */
phy_mvebu_sata_power_off(phy);
diff --git a/drivers/usb/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c
index e7253182e47d..311b4f9a5132 100644
--- a/drivers/usb/phy/phy-omap-control.c
+++ b/drivers/phy/phy-omap-control.c
@@ -1,5 +1,5 @@
/*
- * omap-control-usb.c - The USB part of control module.
+ * omap-control-phy.c - The PHY part of control module.
*
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
* This program is free software; you can redistribute it and/or modify
@@ -24,36 +24,36 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/usb/omap_control_usb.h>
+#include <linux/phy/omap_control_phy.h>
/**
- * omap_control_usb_phy_power - power on/off the phy using control module reg
+ * omap_control_phy_power - power on/off the phy using control module reg
* @dev: the control module device
* @on: 0 or 1, based on powering on or off the PHY
*/
-void omap_control_usb_phy_power(struct device *dev, int on)
+void omap_control_phy_power(struct device *dev, int on)
{
u32 val;
unsigned long rate;
- struct omap_control_usb *control_usb;
+ struct omap_control_phy *control_phy;
if (IS_ERR(dev) || !dev) {
pr_err("%s: invalid device\n", __func__);
return;
}
- control_usb = dev_get_drvdata(dev);
- if (!control_usb) {
- dev_err(dev, "%s: invalid control usb device\n", __func__);
+ control_phy = dev_get_drvdata(dev);
+ if (!control_phy) {
+ dev_err(dev, "%s: invalid control phy device\n", __func__);
return;
}
- if (control_usb->type == OMAP_CTRL_TYPE_OTGHS)
+ if (control_phy->type == OMAP_CTRL_TYPE_OTGHS)
return;
- val = readl(control_usb->power);
+ val = readl(control_phy->power);
- switch (control_usb->type) {
+ switch (control_phy->type) {
case OMAP_CTRL_TYPE_USB2:
if (on)
val &= ~OMAP_CTRL_DEV_PHY_PD;
@@ -62,19 +62,20 @@ void omap_control_usb_phy_power(struct device *dev, int on)
break;
case OMAP_CTRL_TYPE_PIPE3:
- rate = clk_get_rate(control_usb->sys_clk);
+ rate = clk_get_rate(control_phy->sys_clk);
rate = rate/1000000;
if (on) {
- val &= ~(OMAP_CTRL_USB_PWRCTL_CLK_CMD_MASK |
- OMAP_CTRL_USB_PWRCTL_CLK_FREQ_MASK);
- val |= OMAP_CTRL_USB3_PHY_TX_RX_POWERON <<
- OMAP_CTRL_USB_PWRCTL_CLK_CMD_SHIFT;
- val |= rate << OMAP_CTRL_USB_PWRCTL_CLK_FREQ_SHIFT;
+ val &= ~(OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK |
+ OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK);
+ val |= OMAP_CTRL_PIPE3_PHY_TX_RX_POWERON <<
+ OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT;
+ val |= rate <<
+ OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT;
} else {
- val &= ~OMAP_CTRL_USB_PWRCTL_CLK_CMD_MASK;
- val |= OMAP_CTRL_USB3_PHY_TX_RX_POWEROFF <<
- OMAP_CTRL_USB_PWRCTL_CLK_CMD_SHIFT;
+ val &= ~OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK;
+ val |= OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF <<
+ OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT;
}
break;
@@ -100,66 +101,66 @@ void omap_control_usb_phy_power(struct device *dev, int on)
break;
default:
dev_err(dev, "%s: type %d not recognized\n",
- __func__, control_usb->type);
+ __func__, control_phy->type);
break;
}
- writel(val, control_usb->power);
+ writel(val, control_phy->power);
}
-EXPORT_SYMBOL_GPL(omap_control_usb_phy_power);
+EXPORT_SYMBOL_GPL(omap_control_phy_power);
/**
* omap_control_usb_host_mode - set AVALID, VBUSVALID and ID pin in grounded
- * @ctrl_usb: struct omap_control_usb *
+ * @ctrl_phy: struct omap_control_phy *
*
* Writes to the mailbox register to notify the usb core that a usb
* device has been connected.
*/
-static void omap_control_usb_host_mode(struct omap_control_usb *ctrl_usb)
+static void omap_control_usb_host_mode(struct omap_control_phy *ctrl_phy)
{
u32 val;
- val = readl(ctrl_usb->otghs_control);
+ val = readl(ctrl_phy->otghs_control);
val &= ~(OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_SESSEND);
val |= OMAP_CTRL_DEV_AVALID | OMAP_CTRL_DEV_VBUSVALID;
- writel(val, ctrl_usb->otghs_control);
+ writel(val, ctrl_phy->otghs_control);
}
/**
* omap_control_usb_device_mode - set AVALID, VBUSVALID and ID pin in high
* impedance
- * @ctrl_usb: struct omap_control_usb *
+ * @ctrl_phy: struct omap_control_phy *
*
* Writes to the mailbox register to notify the usb core that it has been
* connected to a usb host.
*/
-static void omap_control_usb_device_mode(struct omap_control_usb *ctrl_usb)
+static void omap_control_usb_device_mode(struct omap_control_phy *ctrl_phy)
{
u32 val;
- val = readl(ctrl_usb->otghs_control);
+ val = readl(ctrl_phy->otghs_control);
val &= ~OMAP_CTRL_DEV_SESSEND;
val |= OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_AVALID |
OMAP_CTRL_DEV_VBUSVALID;
- writel(val, ctrl_usb->otghs_control);
+ writel(val, ctrl_phy->otghs_control);
}
/**
* omap_control_usb_set_sessionend - Enable SESSIONEND and IDIG to high
* impedance
- * @ctrl_usb: struct omap_control_usb *
+ * @ctrl_phy: struct omap_control_phy *
*
* Writes to the mailbox register to notify the usb core it's now in
* disconnected state.
*/
-static void omap_control_usb_set_sessionend(struct omap_control_usb *ctrl_usb)
+static void omap_control_usb_set_sessionend(struct omap_control_phy *ctrl_phy)
{
u32 val;
- val = readl(ctrl_usb->otghs_control);
+ val = readl(ctrl_phy->otghs_control);
val &= ~(OMAP_CTRL_DEV_AVALID | OMAP_CTRL_DEV_VBUSVALID);
val |= OMAP_CTRL_DEV_IDDIG | OMAP_CTRL_DEV_SESSEND;
- writel(val, ctrl_usb->otghs_control);
+ writel(val, ctrl_phy->otghs_control);
}
/**
@@ -174,30 +175,30 @@ static void omap_control_usb_set_sessionend(struct omap_control_usb *ctrl_usb)
void omap_control_usb_set_mode(struct device *dev,
enum omap_control_usb_mode mode)
{
- struct omap_control_usb *ctrl_usb;
+ struct omap_control_phy *ctrl_phy;
if (IS_ERR(dev) || !dev)
return;
- ctrl_usb = dev_get_drvdata(dev);
+ ctrl_phy = dev_get_drvdata(dev);
- if (!ctrl_usb) {
- dev_err(dev, "Invalid control usb device\n");
+ if (!ctrl_phy) {
+ dev_err(dev, "Invalid control phy device\n");
return;
}
- if (ctrl_usb->type != OMAP_CTRL_TYPE_OTGHS)
+ if (ctrl_phy->type != OMAP_CTRL_TYPE_OTGHS)
return;
switch (mode) {
case USB_MODE_HOST:
- omap_control_usb_host_mode(ctrl_usb);
+ omap_control_usb_host_mode(ctrl_phy);
break;
case USB_MODE_DEVICE:
- omap_control_usb_device_mode(ctrl_usb);
+ omap_control_usb_device_mode(ctrl_phy);
break;
case USB_MODE_DISCONNECT:
- omap_control_usb_set_sessionend(ctrl_usb);
+ omap_control_usb_set_sessionend(ctrl_phy);
break;
default:
dev_vdbg(dev, "invalid omap control usb mode\n");
@@ -207,13 +208,13 @@ EXPORT_SYMBOL_GPL(omap_control_usb_set_mode);
#ifdef CONFIG_OF
-static const enum omap_control_usb_type otghs_data = OMAP_CTRL_TYPE_OTGHS;
-static const enum omap_control_usb_type usb2_data = OMAP_CTRL_TYPE_USB2;
-static const enum omap_control_usb_type pipe3_data = OMAP_CTRL_TYPE_PIPE3;
-static const enum omap_control_usb_type dra7usb2_data = OMAP_CTRL_TYPE_DRA7USB2;
-static const enum omap_control_usb_type am437usb2_data = OMAP_CTRL_TYPE_AM437USB2;
+static const enum omap_control_phy_type otghs_data = OMAP_CTRL_TYPE_OTGHS;
+static const enum omap_control_phy_type usb2_data = OMAP_CTRL_TYPE_USB2;
+static const enum omap_control_phy_type pipe3_data = OMAP_CTRL_TYPE_PIPE3;
+static const enum omap_control_phy_type dra7usb2_data = OMAP_CTRL_TYPE_DRA7USB2;
+static const enum omap_control_phy_type am437usb2_data = OMAP_CTRL_TYPE_AM437USB2;
-static const struct of_device_id omap_control_usb_id_table[] = {
+static const struct of_device_id omap_control_phy_id_table[] = {
{
.compatible = "ti,control-phy-otghs",
.data = &otghs_data,
@@ -227,93 +228,93 @@ static const struct of_device_id omap_control_usb_id_table[] = {
.data = &pipe3_data,
},
{
- .compatible = "ti,control-phy-dra7usb2",
+ .compatible = "ti,control-phy-usb2-dra7",
.data = &dra7usb2_data,
},
{
- .compatible = "ti,control-phy-am437usb2",
+ .compatible = "ti,control-phy-usb2-am437",
.data = &am437usb2_data,
},
{},
};
-MODULE_DEVICE_TABLE(of, omap_control_usb_id_table);
+MODULE_DEVICE_TABLE(of, omap_control_phy_id_table);
#endif
-static int omap_control_usb_probe(struct platform_device *pdev)
+static int omap_control_phy_probe(struct platform_device *pdev)
{
struct resource *res;
const struct of_device_id *of_id;
- struct omap_control_usb *control_usb;
+ struct omap_control_phy *control_phy;
- of_id = of_match_device(of_match_ptr(omap_control_usb_id_table),
- &pdev->dev);
+ of_id = of_match_device(of_match_ptr(omap_control_phy_id_table),
+ &pdev->dev);
if (!of_id)
return -EINVAL;
- control_usb = devm_kzalloc(&pdev->dev, sizeof(*control_usb),
+ control_phy = devm_kzalloc(&pdev->dev, sizeof(*control_phy),
GFP_KERNEL);
- if (!control_usb) {
- dev_err(&pdev->dev, "unable to alloc memory for control usb\n");
+ if (!control_phy) {
+ dev_err(&pdev->dev, "unable to alloc memory for control phy\n");
return -ENOMEM;
}
- control_usb->dev = &pdev->dev;
- control_usb->type = *(enum omap_control_usb_type *)of_id->data;
+ control_phy->dev = &pdev->dev;
+ control_phy->type = *(enum omap_control_phy_type *)of_id->data;
- if (control_usb->type == OMAP_CTRL_TYPE_OTGHS) {
+ if (control_phy->type == OMAP_CTRL_TYPE_OTGHS) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"otghs_control");
- control_usb->otghs_control = devm_ioremap_resource(
+ control_phy->otghs_control = devm_ioremap_resource(
&pdev->dev, res);
- if (IS_ERR(control_usb->otghs_control))
- return PTR_ERR(control_usb->otghs_control);
+ if (IS_ERR(control_phy->otghs_control))
+ return PTR_ERR(control_phy->otghs_control);
} else {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"power");
- control_usb->power = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(control_usb->power)) {
+ control_phy->power = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(control_phy->power)) {
dev_err(&pdev->dev, "Couldn't get power register\n");
- return PTR_ERR(control_usb->power);
+ return PTR_ERR(control_phy->power);
}
}
- if (control_usb->type == OMAP_CTRL_TYPE_PIPE3) {
- control_usb->sys_clk = devm_clk_get(control_usb->dev,
+ if (control_phy->type == OMAP_CTRL_TYPE_PIPE3) {
+ control_phy->sys_clk = devm_clk_get(control_phy->dev,
"sys_clkin");
- if (IS_ERR(control_usb->sys_clk)) {
+ if (IS_ERR(control_phy->sys_clk)) {
pr_err("%s: unable to get sys_clkin\n", __func__);
return -EINVAL;
}
}
- dev_set_drvdata(control_usb->dev, control_usb);
+ dev_set_drvdata(control_phy->dev, control_phy);
return 0;
}
-static struct platform_driver omap_control_usb_driver = {
- .probe = omap_control_usb_probe,
+static struct platform_driver omap_control_phy_driver = {
+ .probe = omap_control_phy_probe,
.driver = {
- .name = "omap-control-usb",
+ .name = "omap-control-phy",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(omap_control_usb_id_table),
+ .of_match_table = of_match_ptr(omap_control_phy_id_table),
},
};
-static int __init omap_control_usb_init(void)
+static int __init omap_control_phy_init(void)
{
- return platform_driver_register(&omap_control_usb_driver);
+ return platform_driver_register(&omap_control_phy_driver);
}
-subsys_initcall(omap_control_usb_init);
+subsys_initcall(omap_control_phy_init);
-static void __exit omap_control_usb_exit(void)
+static void __exit omap_control_phy_exit(void)
{
- platform_driver_unregister(&omap_control_usb_driver);
+ platform_driver_unregister(&omap_control_phy_driver);
}
-module_exit(omap_control_usb_exit);
+module_exit(omap_control_phy_exit);
-MODULE_ALIAS("platform: omap_control_usb");
+MODULE_ALIAS("platform: omap_control_phy");
MODULE_AUTHOR("Texas Instruments Inc.");
-MODULE_DESCRIPTION("OMAP Control Module USB Driver");
+MODULE_DESCRIPTION("OMAP Control Module PHY Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index bfc5c337f99a..a2205a841e5e 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -21,16 +21,19 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/io.h>
-#include <linux/usb/omap_usb.h>
+#include <linux/phy/omap_usb.h>
#include <linux/usb/phy_companion.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
-#include <linux/usb/omap_control_usb.h>
+#include <linux/phy/omap_control_phy.h>
#include <linux/phy/phy.h>
#include <linux/of_platform.h>
+#define USB2PHY_DISCON_BYP_LATCH (1 << 31)
+#define USB2PHY_ANA_CONFIG1 0x4c
+
/**
* omap_usb2_set_comparator - links the comparator present in the sytem with
* this phy
@@ -98,65 +101,116 @@ static int omap_usb_set_peripheral(struct usb_otg *otg,
return 0;
}
-static int omap_usb2_suspend(struct usb_phy *x, int suspend)
+static int omap_usb_power_off(struct phy *x)
{
- struct omap_usb *phy = phy_to_omapusb(x);
- int ret;
+ struct omap_usb *phy = phy_get_drvdata(x);
- if (suspend && !phy->is_suspended) {
- omap_control_usb_phy_power(phy->control_dev, 0);
- pm_runtime_put_sync(phy->dev);
- phy->is_suspended = 1;
- } else if (!suspend && phy->is_suspended) {
- ret = pm_runtime_get_sync(phy->dev);
- if (ret < 0) {
- dev_err(phy->dev, "get_sync failed with err %d\n", ret);
- return ret;
- }
- omap_control_usb_phy_power(phy->control_dev, 1);
- phy->is_suspended = 0;
- }
+ omap_control_phy_power(phy->control_dev, 0);
return 0;
}
-static int omap_usb_power_off(struct phy *x)
+static int omap_usb_power_on(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
- omap_control_usb_phy_power(phy->control_dev, 0);
+ omap_control_phy_power(phy->control_dev, 1);
return 0;
}
-static int omap_usb_power_on(struct phy *x)
+static int omap_usb_init(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
-
- omap_control_usb_phy_power(phy->control_dev, 1);
+ u32 val;
+
+ if (phy->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
+ /*
+ *
+ * Reduce the sensitivity of internal PHY by enabling the
+ * DISCON_BYP_LATCH of the USB2PHY_ANA_CONFIG1 register. This
+ * resolves issues with certain devices which can otherwise
+ * be prone to false disconnects.
+ *
+ */
+ val = omap_usb_readl(phy->phy_base, USB2PHY_ANA_CONFIG1);
+ val |= USB2PHY_DISCON_BYP_LATCH;
+ omap_usb_writel(phy->phy_base, USB2PHY_ANA_CONFIG1, val);
+ }
return 0;
}
static struct phy_ops ops = {
+ .init = omap_usb_init,
.power_on = omap_usb_power_on,
.power_off = omap_usb_power_off,
.owner = THIS_MODULE,
};
+#ifdef CONFIG_OF
+static const struct usb_phy_data omap_usb2_data = {
+ .label = "omap_usb2",
+ .flags = OMAP_USB2_HAS_START_SRP | OMAP_USB2_HAS_SET_VBUS,
+};
+
+static const struct usb_phy_data omap5_usb2_data = {
+ .label = "omap5_usb2",
+ .flags = 0,
+};
+
+static const struct usb_phy_data dra7x_usb2_data = {
+ .label = "dra7x_usb2",
+ .flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT,
+};
+
+static const struct usb_phy_data am437x_usb2_data = {
+ .label = "am437x_usb2",
+ .flags = 0,
+};
+
+static const struct of_device_id omap_usb2_id_table[] = {
+ {
+ .compatible = "ti,omap-usb2",
+ .data = &omap_usb2_data,
+ },
+ {
+ .compatible = "ti,omap5-usb2",
+ .data = &omap5_usb2_data,
+ },
+ {
+ .compatible = "ti,dra7x-usb2",
+ .data = &dra7x_usb2_data,
+ },
+ {
+ .compatible = "ti,am437x-usb2",
+ .data = &am437x_usb2_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_usb2_id_table);
+#endif
+
static int omap_usb2_probe(struct platform_device *pdev)
{
struct omap_usb *phy;
struct phy *generic_phy;
+ struct resource *res;
struct phy_provider *phy_provider;
struct usb_otg *otg;
struct device_node *node = pdev->dev.of_node;
struct device_node *control_node;
struct platform_device *control_pdev;
+ const struct of_device_id *of_id;
+ struct usb_phy_data *phy_data;
+
+ of_id = of_match_device(of_match_ptr(omap_usb2_id_table), &pdev->dev);
- if (!node)
+ if (!of_id)
return -EINVAL;
+ phy_data = (struct usb_phy_data *)of_id->data;
+
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy) {
dev_err(&pdev->dev, "unable to allocate memory for USB2 PHY\n");
@@ -172,15 +226,17 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy->dev = &pdev->dev;
phy->phy.dev = phy->dev;
- phy->phy.label = "omap-usb2";
- phy->phy.set_suspend = omap_usb2_suspend;
+ phy->phy.label = phy_data->label;
phy->phy.otg = otg;
phy->phy.type = USB_PHY_TYPE_USB2;
- phy_provider = devm_of_phy_provider_register(phy->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
+ if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!phy->phy_base)
+ return -ENOMEM;
+ phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
+ }
control_node = of_parse_phandle(node, "ctrl-module", 0);
if (!control_node) {
@@ -195,14 +251,14 @@ static int omap_usb2_probe(struct platform_device *pdev)
}
phy->control_dev = &control_pdev->dev;
-
- phy->is_suspended = 1;
- omap_control_usb_phy_power(phy->control_dev, 0);
+ omap_control_phy_power(phy->control_dev, 0);
otg->set_host = omap_usb_set_host;
otg->set_peripheral = omap_usb_set_peripheral;
- otg->set_vbus = omap_usb_set_vbus;
- otg->start_srp = omap_usb_start_srp;
+ if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS)
+ otg->set_vbus = omap_usb_set_vbus;
+ if (phy_data->flags & OMAP_USB2_HAS_START_SRP)
+ otg->start_srp = omap_usb_start_srp;
otg->phy = &phy->phy;
platform_set_drvdata(pdev, phy);
@@ -214,6 +270,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy_set_drvdata(generic_phy, phy);
+ phy_provider = devm_of_phy_provider_register(phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
if (IS_ERR(phy->wkupclk)) {
dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
@@ -297,14 +358,6 @@ static const struct dev_pm_ops omap_usb2_pm_ops = {
#define DEV_PM_OPS NULL
#endif
-#ifdef CONFIG_OF
-static const struct of_device_id omap_usb2_id_table[] = {
- { .compatible = "ti,omap-usb2" },
- {}
-};
-MODULE_DEVICE_TABLE(of, omap_usb2_id_table);
-#endif
-
static struct platform_driver omap_usb2_driver = {
.probe = omap_usb2_probe,
.remove = omap_usb2_remove,
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c
new file mode 100644
index 000000000000..8a8c6bc8709a
--- /dev/null
+++ b/drivers/phy/phy-samsung-usb2.c
@@ -0,0 +1,228 @@
+/*
+ * Samsung SoC USB 1.1/2.0 PHY driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include "phy-samsung-usb2.h"
+
+static int samsung_usb2_phy_power_on(struct phy *phy)
+{
+ struct samsung_usb2_phy_instance *inst = phy_get_drvdata(phy);
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ int ret;
+
+ dev_dbg(drv->dev, "Request to power_on \"%s\" usb phy\n",
+ inst->cfg->label);
+ ret = clk_prepare_enable(drv->clk);
+ if (ret)
+ goto err_main_clk;
+ ret = clk_prepare_enable(drv->ref_clk);
+ if (ret)
+ goto err_instance_clk;
+ if (inst->cfg->power_on) {
+ spin_lock(&drv->lock);
+ ret = inst->cfg->power_on(inst);
+ spin_unlock(&drv->lock);
+ }
+
+ return 0;
+
+err_instance_clk:
+ clk_disable_unprepare(drv->clk);
+err_main_clk:
+ return ret;
+}
+
+static int samsung_usb2_phy_power_off(struct phy *phy)
+{
+ struct samsung_usb2_phy_instance *inst = phy_get_drvdata(phy);
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ int ret = 0;
+
+ dev_dbg(drv->dev, "Request to power_off \"%s\" usb phy\n",
+ inst->cfg->label);
+ if (inst->cfg->power_off) {
+ spin_lock(&drv->lock);
+ ret = inst->cfg->power_off(inst);
+ spin_unlock(&drv->lock);
+ }
+ clk_disable_unprepare(drv->ref_clk);
+ clk_disable_unprepare(drv->clk);
+ return ret;
+}
+
+static struct phy_ops samsung_usb2_phy_ops = {
+ .power_on = samsung_usb2_phy_power_on,
+ .power_off = samsung_usb2_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *samsung_usb2_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct samsung_usb2_phy_driver *drv;
+
+ drv = dev_get_drvdata(dev);
+ if (!drv)
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(args->args[0] >= drv->cfg->num_phys))
+ return ERR_PTR(-ENODEV);
+
+ return drv->instances[args->args[0]].phy;
+}
+
+static const struct of_device_id samsung_usb2_phy_of_match[] = {
+#ifdef CONFIG_PHY_EXYNOS4210_USB2
+ {
+ .compatible = "samsung,exynos4210-usb2-phy",
+ .data = &exynos4210_usb2_phy_config,
+ },
+#endif
+#ifdef CONFIG_PHY_EXYNOS4X12_USB2
+ {
+ .compatible = "samsung,exynos4x12-usb2-phy",
+ .data = &exynos4x12_usb2_phy_config,
+ },
+#endif
+#ifdef CONFIG_PHY_EXYNOS5250_USB2
+ {
+ .compatible = "samsung,exynos5250-usb2-phy",
+ .data = &exynos5250_usb2_phy_config,
+ },
+#endif
+ { },
+};
+
+static int samsung_usb2_phy_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct samsung_usb2_phy_config *cfg;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct resource *mem;
+ struct samsung_usb2_phy_driver *drv;
+ int i, ret;
+
+ if (!pdev->dev.of_node) {
+ dev_err(dev, "This driver is required to be instantiated from device tree\n");
+ return -EINVAL;
+ }
+
+ match = of_match_node(samsung_usb2_phy_of_match, pdev->dev.of_node);
+ if (!match) {
+ dev_err(dev, "of_match_node() failed\n");
+ return -EINVAL;
+ }
+ cfg = match->data;
+
+ drv = devm_kzalloc(dev, sizeof(struct samsung_usb2_phy_driver) +
+ cfg->num_phys * sizeof(struct samsung_usb2_phy_instance),
+ GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, drv);
+ spin_lock_init(&drv->lock);
+
+ drv->cfg = cfg;
+ drv->dev = dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->reg_phy = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(drv->reg_phy)) {
+ dev_err(dev, "Failed to map register memory (phy)\n");
+ return PTR_ERR(drv->reg_phy);
+ }
+
+ drv->reg_pmu = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "samsung,pmureg-phandle");
+ if (IS_ERR(drv->reg_pmu)) {
+ dev_err(dev, "Failed to map PMU registers (via syscon)\n");
+ return PTR_ERR(drv->reg_pmu);
+ }
+
+ if (drv->cfg->has_mode_switch) {
+ drv->reg_sys = syscon_regmap_lookup_by_phandle(
+ pdev->dev.of_node, "samsung,sysreg-phandle");
+ if (IS_ERR(drv->reg_sys)) {
+ dev_err(dev, "Failed to map system registers (via syscon)\n");
+ return PTR_ERR(drv->reg_sys);
+ }
+ }
+
+ drv->clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(drv->clk)) {
+ dev_err(dev, "Failed to get clock of phy controller\n");
+ return PTR_ERR(drv->clk);
+ }
+
+ drv->ref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(drv->ref_clk)) {
+ dev_err(dev, "Failed to get reference clock for the phy controller\n");
+ return PTR_ERR(drv->ref_clk);
+ }
+
+ drv->ref_rate = clk_get_rate(drv->ref_clk);
+ if (drv->cfg->rate_to_clk) {
+ ret = drv->cfg->rate_to_clk(drv->ref_rate, &drv->ref_reg_val);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < drv->cfg->num_phys; i++) {
+ char *label = drv->cfg->phys[i].label;
+ struct samsung_usb2_phy_instance *p = &drv->instances[i];
+
+ dev_dbg(dev, "Creating phy \"%s\"\n", label);
+ p->phy = devm_phy_create(dev, &samsung_usb2_phy_ops, NULL);
+ if (IS_ERR(p->phy)) {
+ dev_err(drv->dev, "Failed to create usb2_phy \"%s\"\n",
+ label);
+ return PTR_ERR(p->phy);
+ }
+
+ p->cfg = &drv->cfg->phys[i];
+ p->drv = drv;
+ phy_set_bus_width(p->phy, 8);
+ phy_set_drvdata(p->phy, p);
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev,
+ samsung_usb2_phy_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(drv->dev, "Failed to register phy provider\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ return 0;
+}
+
+static struct platform_driver samsung_usb2_phy_driver = {
+ .probe = samsung_usb2_phy_probe,
+ .driver = {
+ .of_match_table = samsung_usb2_phy_of_match,
+ .name = "samsung-usb2-phy",
+ .owner = THIS_MODULE,
+ }
+};
+
+module_platform_driver(samsung_usb2_phy_driver);
+MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC USB PHY driver");
+MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:samsung-usb2-phy");
diff --git a/drivers/phy/phy-samsung-usb2.h b/drivers/phy/phy-samsung-usb2.h
new file mode 100644
index 000000000000..45b3170652bd
--- /dev/null
+++ b/drivers/phy/phy-samsung-usb2.h
@@ -0,0 +1,67 @@
+/*
+ * Samsung SoC USB 1.1/2.0 PHY driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PHY_EXYNOS_USB2_H
+#define _PHY_EXYNOS_USB2_H
+
+#include <linux/clk.h>
+#include <linux/phy/phy.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#define KHZ 1000
+#define MHZ (KHZ * KHZ)
+
+struct samsung_usb2_phy_driver;
+struct samsung_usb2_phy_instance;
+struct samsung_usb2_phy_config;
+
+struct samsung_usb2_phy_instance {
+ const struct samsung_usb2_common_phy *cfg;
+ struct phy *phy;
+ struct samsung_usb2_phy_driver *drv;
+ bool enabled;
+};
+
+struct samsung_usb2_phy_driver {
+ const struct samsung_usb2_phy_config *cfg;
+ struct clk *clk;
+ struct clk *ref_clk;
+ unsigned long ref_rate;
+ u32 ref_reg_val;
+ struct device *dev;
+ void __iomem *reg_phy;
+ struct regmap *reg_pmu;
+ struct regmap *reg_sys;
+ spinlock_t lock;
+ struct samsung_usb2_phy_instance instances[0];
+};
+
+struct samsung_usb2_common_phy {
+ int (*power_on)(struct samsung_usb2_phy_instance *);
+ int (*power_off)(struct samsung_usb2_phy_instance *);
+ unsigned int id;
+ char *label;
+};
+
+
+struct samsung_usb2_phy_config {
+ const struct samsung_usb2_common_phy *phys;
+ int (*rate_to_clk)(unsigned long, u32 *);
+ unsigned int num_phys;
+ bool has_mode_switch;
+};
+
+extern const struct samsung_usb2_phy_config exynos4210_usb2_phy_config;
+extern const struct samsung_usb2_phy_config exynos4x12_usb2_phy_config;
+extern const struct samsung_usb2_phy_config exynos5250_usb2_phy_config;
+#endif
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
new file mode 100644
index 000000000000..e6e6c4ba7145
--- /dev/null
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -0,0 +1,331 @@
+/*
+ * Allwinner sun4i USB phy driver
+ *
+ * Copyright (C) 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ *
+ * Modelled after: Samsung S5P/EXYNOS SoC series MIPI CSIS/DSIM DPHY driver
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#define REG_ISCR 0x00
+#define REG_PHYCTL 0x04
+#define REG_PHYBIST 0x08
+#define REG_PHYTUNE 0x0c
+
+#define PHYCTL_DATA BIT(7)
+
+#define SUNXI_AHB_ICHR8_EN BIT(10)
+#define SUNXI_AHB_INCR4_BURST_EN BIT(9)
+#define SUNXI_AHB_INCRX_ALIGN_EN BIT(8)
+#define SUNXI_ULPI_BYPASS_EN BIT(0)
+
+/* Common Control Bits for Both PHYs */
+#define PHY_PLL_BW 0x03
+#define PHY_RES45_CAL_EN 0x0c
+
+/* Private Control Bits for Each PHY */
+#define PHY_TX_AMPLITUDE_TUNE 0x20
+#define PHY_TX_SLEWRATE_TUNE 0x22
+#define PHY_VBUSVALID_TH_SEL 0x25
+#define PHY_PULLUP_RES_SEL 0x27
+#define PHY_OTG_FUNC_EN 0x28
+#define PHY_VBUS_DET_EN 0x29
+#define PHY_DISCON_TH_SEL 0x2a
+
+#define MAX_PHYS 3
+
+struct sun4i_usb_phy_data {
+ struct clk *clk;
+ void __iomem *base;
+ struct mutex mutex;
+ int num_phys;
+ u32 disc_thresh;
+ struct sun4i_usb_phy {
+ struct phy *phy;
+ void __iomem *pmu;
+ struct regulator *vbus;
+ struct reset_control *reset;
+ int index;
+ } phys[MAX_PHYS];
+};
+
+#define to_sun4i_usb_phy_data(phy) \
+ container_of((phy), struct sun4i_usb_phy_data, phys[(phy)->index])
+
+static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
+ int len)
+{
+ struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
+ u32 temp, usbc_bit = BIT(phy->index * 2);
+ int i;
+
+ mutex_lock(&phy_data->mutex);
+
+ for (i = 0; i < len; i++) {
+ temp = readl(phy_data->base + REG_PHYCTL);
+
+ /* clear the address portion */
+ temp &= ~(0xff << 8);
+
+ /* set the address */
+ temp |= ((addr + i) << 8);
+ writel(temp, phy_data->base + REG_PHYCTL);
+
+ /* set the data bit and clear usbc bit*/
+ temp = readb(phy_data->base + REG_PHYCTL);
+ if (data & 0x1)
+ temp |= PHYCTL_DATA;
+ else
+ temp &= ~PHYCTL_DATA;
+ temp &= ~usbc_bit;
+ writeb(temp, phy_data->base + REG_PHYCTL);
+
+ /* pulse usbc_bit */
+ temp = readb(phy_data->base + REG_PHYCTL);
+ temp |= usbc_bit;
+ writeb(temp, phy_data->base + REG_PHYCTL);
+
+ temp = readb(phy_data->base + REG_PHYCTL);
+ temp &= ~usbc_bit;
+ writeb(temp, phy_data->base + REG_PHYCTL);
+
+ data >>= 1;
+ }
+ mutex_unlock(&phy_data->mutex);
+}
+
+static void sun4i_usb_phy_passby(struct sun4i_usb_phy *phy, int enable)
+{
+ u32 bits, reg_value;
+
+ if (!phy->pmu)
+ return;
+
+ bits = SUNXI_AHB_ICHR8_EN | SUNXI_AHB_INCR4_BURST_EN |
+ SUNXI_AHB_INCRX_ALIGN_EN | SUNXI_ULPI_BYPASS_EN;
+
+ reg_value = readl(phy->pmu);
+
+ if (enable)
+ reg_value |= bits;
+ else
+ reg_value &= ~bits;
+
+ writel(reg_value, phy->pmu);
+}
+
+static int sun4i_usb_phy_init(struct phy *_phy)
+{
+ struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+ struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+ int ret;
+
+ ret = clk_prepare_enable(data->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(phy->reset);
+ if (ret) {
+ clk_disable_unprepare(data->clk);
+ return ret;
+ }
+
+ /* Adjust PHY's magnitude and rate */
+ sun4i_usb_phy_write(phy, PHY_TX_AMPLITUDE_TUNE, 0x14, 5);
+
+ /* Disconnect threshold adjustment */
+ sun4i_usb_phy_write(phy, PHY_DISCON_TH_SEL, data->disc_thresh, 2);
+
+ sun4i_usb_phy_passby(phy, 1);
+
+ return 0;
+}
+
+static int sun4i_usb_phy_exit(struct phy *_phy)
+{
+ struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+ struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+
+ sun4i_usb_phy_passby(phy, 0);
+ reset_control_assert(phy->reset);
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static int sun4i_usb_phy_power_on(struct phy *_phy)
+{
+ struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+ int ret = 0;
+
+ if (phy->vbus)
+ ret = regulator_enable(phy->vbus);
+
+ return ret;
+}
+
+static int sun4i_usb_phy_power_off(struct phy *_phy)
+{
+ struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+
+ if (phy->vbus)
+ regulator_disable(phy->vbus);
+
+ return 0;
+}
+
+static struct phy_ops sun4i_usb_phy_ops = {
+ .init = sun4i_usb_phy_init,
+ .exit = sun4i_usb_phy_exit,
+ .power_on = sun4i_usb_phy_power_on,
+ .power_off = sun4i_usb_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *sun4i_usb_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
+
+ if (WARN_ON(args->args[0] == 0 || args->args[0] >= data->num_phys))
+ return ERR_PTR(-ENODEV);
+
+ return data->phys[args->args[0]].phy;
+}
+
+static int sun4i_usb_phy_probe(struct platform_device *pdev)
+{
+ struct sun4i_usb_phy_data *data;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *pmu = NULL;
+ struct phy_provider *phy_provider;
+ struct reset_control *reset;
+ struct regulator *vbus;
+ struct resource *res;
+ struct phy *phy;
+ char name[16];
+ int i;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->mutex);
+
+ if (of_device_is_compatible(np, "allwinner,sun5i-a13-usb-phy"))
+ data->num_phys = 2;
+ else
+ data->num_phys = 3;
+
+ if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy"))
+ data->disc_thresh = 3;
+ else
+ data->disc_thresh = 2;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_ctrl");
+ data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->clk = devm_clk_get(dev, "usb_phy");
+ if (IS_ERR(data->clk)) {
+ dev_err(dev, "could not get usb_phy clock\n");
+ return PTR_ERR(data->clk);
+ }
+
+ /* Skip 0, 0 is the phy for otg which is not yet supported. */
+ for (i = 1; i < data->num_phys; i++) {
+ snprintf(name, sizeof(name), "usb%d_vbus", i);
+ vbus = devm_regulator_get_optional(dev, name);
+ if (IS_ERR(vbus)) {
+ if (PTR_ERR(vbus) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ vbus = NULL;
+ }
+
+ snprintf(name, sizeof(name), "usb%d_reset", i);
+ reset = devm_reset_control_get(dev, name);
+ if (IS_ERR(reset)) {
+ dev_err(dev, "failed to get reset %s\n", name);
+ return PTR_ERR(reset);
+ }
+
+ if (i) { /* No pmu for usbc0 */
+ snprintf(name, sizeof(name), "pmu%d", i);
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, name);
+ pmu = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pmu))
+ return PTR_ERR(pmu);
+ }
+
+ phy = devm_phy_create(dev, &sun4i_usb_phy_ops, NULL);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create PHY %d\n", i);
+ return PTR_ERR(phy);
+ }
+
+ data->phys[i].phy = phy;
+ data->phys[i].pmu = pmu;
+ data->phys[i].vbus = vbus;
+ data->phys[i].reset = reset;
+ data->phys[i].index = i;
+ phy_set_drvdata(phy, &data->phys[i]);
+ }
+
+ dev_set_drvdata(dev, data);
+ phy_provider = devm_of_phy_provider_register(dev, sun4i_usb_phy_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_usb_phy_of_match[] = {
+ { .compatible = "allwinner,sun4i-a10-usb-phy" },
+ { .compatible = "allwinner,sun5i-a13-usb-phy" },
+ { .compatible = "allwinner,sun7i-a20-usb-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match);
+
+static struct platform_driver sun4i_usb_phy_driver = {
+ .probe = sun4i_usb_phy_probe,
+ .driver = {
+ .of_match_table = sun4i_usb_phy_of_match,
+ .name = "sun4i-usb-phy",
+ .owner = THIS_MODULE,
+ }
+};
+module_platform_driver(sun4i_usb_phy_driver);
+
+MODULE_DESCRIPTION("Allwinner sun4i USB phy driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
new file mode 100644
index 000000000000..591367654613
--- /dev/null
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -0,0 +1,470 @@
+/*
+ * phy-ti-pipe3 - PIPE3 PHY driver.
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/phy/phy.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/phy/omap_control_phy.h>
+#include <linux/of_platform.h>
+
+#define PLL_STATUS 0x00000004
+#define PLL_GO 0x00000008
+#define PLL_CONFIGURATION1 0x0000000C
+#define PLL_CONFIGURATION2 0x00000010
+#define PLL_CONFIGURATION3 0x00000014
+#define PLL_CONFIGURATION4 0x00000020
+
+#define PLL_REGM_MASK 0x001FFE00
+#define PLL_REGM_SHIFT 0x9
+#define PLL_REGM_F_MASK 0x0003FFFF
+#define PLL_REGM_F_SHIFT 0x0
+#define PLL_REGN_MASK 0x000001FE
+#define PLL_REGN_SHIFT 0x1
+#define PLL_SELFREQDCO_MASK 0x0000000E
+#define PLL_SELFREQDCO_SHIFT 0x1
+#define PLL_SD_MASK 0x0003FC00
+#define PLL_SD_SHIFT 10
+#define SET_PLL_GO 0x1
+#define PLL_LDOPWDN BIT(15)
+#define PLL_TICOPWDN BIT(16)
+#define PLL_LOCK 0x2
+#define PLL_IDLE 0x1
+
+/*
+ * This is an Empirical value that works, need to confirm the actual
+ * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
+ * to be correctly reflected in the PIPE3PHY_PLL_STATUS register.
+ */
+#define PLL_IDLE_TIME 100 /* in milliseconds */
+#define PLL_LOCK_TIME 100 /* in milliseconds */
+
+struct pipe3_dpll_params {
+ u16 m;
+ u8 n;
+ u8 freq:3;
+ u8 sd;
+ u32 mf;
+};
+
+struct pipe3_dpll_map {
+ unsigned long rate;
+ struct pipe3_dpll_params params;
+};
+
+struct ti_pipe3 {
+ void __iomem *pll_ctrl_base;
+ struct device *dev;
+ struct device *control_dev;
+ struct clk *wkupclk;
+ struct clk *sys_clk;
+ struct clk *refclk;
+ struct pipe3_dpll_map *dpll_map;
+};
+
+static struct pipe3_dpll_map dpll_map_usb[] = {
+ {12000000, {1250, 5, 4, 20, 0} }, /* 12 MHz */
+ {16800000, {3125, 20, 4, 20, 0} }, /* 16.8 MHz */
+ {19200000, {1172, 8, 4, 20, 65537} }, /* 19.2 MHz */
+ {20000000, {1000, 7, 4, 10, 0} }, /* 20 MHz */
+ {26000000, {1250, 12, 4, 20, 0} }, /* 26 MHz */
+ {38400000, {3125, 47, 4, 20, 92843} }, /* 38.4 MHz */
+ { }, /* Terminator */
+};
+
+static struct pipe3_dpll_map dpll_map_sata[] = {
+ {12000000, {1000, 7, 4, 6, 0} }, /* 12 MHz */
+ {16800000, {714, 7, 4, 6, 0} }, /* 16.8 MHz */
+ {19200000, {625, 7, 4, 6, 0} }, /* 19.2 MHz */
+ {20000000, {600, 7, 4, 6, 0} }, /* 20 MHz */
+ {26000000, {461, 7, 4, 6, 0} }, /* 26 MHz */
+ {38400000, {312, 7, 4, 6, 0} }, /* 38.4 MHz */
+ { }, /* Terminator */
+};
+
+static inline u32 ti_pipe3_readl(void __iomem *addr, unsigned offset)
+{
+ return __raw_readl(addr + offset);
+}
+
+static inline void ti_pipe3_writel(void __iomem *addr, unsigned offset,
+ u32 data)
+{
+ __raw_writel(data, addr + offset);
+}
+
+static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
+{
+ unsigned long rate;
+ struct pipe3_dpll_map *dpll_map = phy->dpll_map;
+
+ rate = clk_get_rate(phy->sys_clk);
+
+ for (; dpll_map->rate; dpll_map++) {
+ if (rate == dpll_map->rate)
+ return &dpll_map->params;
+ }
+
+ dev_err(phy->dev, "No DPLL configuration for %lu Hz SYS CLK\n", rate);
+
+ return NULL;
+}
+
+static int ti_pipe3_power_off(struct phy *x)
+{
+ struct ti_pipe3 *phy = phy_get_drvdata(x);
+
+ omap_control_phy_power(phy->control_dev, 0);
+
+ return 0;
+}
+
+static int ti_pipe3_power_on(struct phy *x)
+{
+ struct ti_pipe3 *phy = phy_get_drvdata(x);
+
+ omap_control_phy_power(phy->control_dev, 1);
+
+ return 0;
+}
+
+static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy)
+{
+ u32 val;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(PLL_LOCK_TIME);
+ do {
+ cpu_relax();
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if (val & PLL_LOCK)
+ break;
+ } while (!time_after(jiffies, timeout));
+
+ if (!(val & PLL_LOCK)) {
+ dev_err(phy->dev, "DPLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int ti_pipe3_dpll_program(struct ti_pipe3 *phy)
+{
+ u32 val;
+ struct pipe3_dpll_params *dpll_params;
+
+ dpll_params = ti_pipe3_get_dpll_params(phy);
+ if (!dpll_params)
+ return -EINVAL;
+
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
+ val &= ~PLL_REGN_MASK;
+ val |= dpll_params->n << PLL_REGN_SHIFT;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
+
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ val &= ~PLL_SELFREQDCO_MASK;
+ val |= dpll_params->freq << PLL_SELFREQDCO_SHIFT;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
+
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
+ val &= ~PLL_REGM_MASK;
+ val |= dpll_params->m << PLL_REGM_SHIFT;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
+
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION4);
+ val &= ~PLL_REGM_F_MASK;
+ val |= dpll_params->mf << PLL_REGM_F_SHIFT;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION4, val);
+
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION3);
+ val &= ~PLL_SD_MASK;
+ val |= dpll_params->sd << PLL_SD_SHIFT;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION3, val);
+
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_GO, SET_PLL_GO);
+
+ return ti_pipe3_dpll_wait_lock(phy);
+}
+
+static int ti_pipe3_init(struct phy *x)
+{
+ struct ti_pipe3 *phy = phy_get_drvdata(x);
+ u32 val;
+ int ret = 0;
+
+ /* Bring it out of IDLE if it is IDLE */
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ if (val & PLL_IDLE) {
+ val &= ~PLL_IDLE;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
+ ret = ti_pipe3_dpll_wait_lock(phy);
+ }
+
+ /* Program the DPLL only if not locked */
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if (!(val & PLL_LOCK))
+ if (ti_pipe3_dpll_program(phy))
+ return -EINVAL;
+
+ return ret;
+}
+
+static int ti_pipe3_exit(struct phy *x)
+{
+ struct ti_pipe3 *phy = phy_get_drvdata(x);
+ u32 val;
+ unsigned long timeout;
+
+ /* SATA DPLL can't be powered down due to Errata i783 */
+ if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
+ return 0;
+
+ /* Put DPLL in IDLE mode */
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+ val |= PLL_IDLE;
+ ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
+
+ /* wait for LDO and Oscillator to power down */
+ timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
+ do {
+ cpu_relax();
+ val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
+ if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
+ break;
+ } while (!time_after(jiffies, timeout));
+
+ if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
+ dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
+ val);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+static struct phy_ops ops = {
+ .init = ti_pipe3_init,
+ .exit = ti_pipe3_exit,
+ .power_on = ti_pipe3_power_on,
+ .power_off = ti_pipe3_power_off,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id ti_pipe3_id_table[];
+#endif
+
+static int ti_pipe3_probe(struct platform_device *pdev)
+{
+ struct ti_pipe3 *phy;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *control_node;
+ struct platform_device *control_pdev;
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(ti_pipe3_id_table), &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ dev_err(&pdev->dev, "unable to alloc mem for TI PIPE3 PHY\n");
+ return -ENOMEM;
+ }
+
+ phy->dpll_map = (struct pipe3_dpll_map *)match->data;
+ if (!phy->dpll_map) {
+ dev_err(&pdev->dev, "no DPLL data\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll_ctrl");
+ phy->pll_ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(phy->pll_ctrl_base))
+ return PTR_ERR(phy->pll_ctrl_base);
+
+ phy->dev = &pdev->dev;
+
+ if (!of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
+
+ phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
+ if (IS_ERR(phy->wkupclk)) {
+ dev_err(&pdev->dev, "unable to get wkupclk\n");
+ return PTR_ERR(phy->wkupclk);
+ }
+
+ phy->refclk = devm_clk_get(phy->dev, "refclk");
+ if (IS_ERR(phy->refclk)) {
+ dev_err(&pdev->dev, "unable to get refclk\n");
+ return PTR_ERR(phy->refclk);
+ }
+ } else {
+ phy->wkupclk = ERR_PTR(-ENODEV);
+ phy->refclk = ERR_PTR(-ENODEV);
+ }
+
+ phy->sys_clk = devm_clk_get(phy->dev, "sysclk");
+ if (IS_ERR(phy->sys_clk)) {
+ dev_err(&pdev->dev, "unable to get sysclk\n");
+ return -EINVAL;
+ }
+
+ control_node = of_parse_phandle(node, "ctrl-module", 0);
+ if (!control_node) {
+ dev_err(&pdev->dev, "Failed to get control device phandle\n");
+ return -EINVAL;
+ }
+
+ control_pdev = of_find_device_by_node(control_node);
+ if (!control_pdev) {
+ dev_err(&pdev->dev, "Failed to get control device\n");
+ return -EINVAL;
+ }
+
+ phy->control_dev = &control_pdev->dev;
+
+ omap_control_phy_power(phy->control_dev, 0);
+
+ platform_set_drvdata(pdev, phy);
+ pm_runtime_enable(phy->dev);
+
+ generic_phy = devm_phy_create(phy->dev, &ops, NULL);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, phy);
+ phy_provider = devm_of_phy_provider_register(phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ pm_runtime_get(&pdev->dev);
+
+ return 0;
+}
+
+static int ti_pipe3_remove(struct platform_device *pdev)
+{
+ if (!pm_runtime_suspended(&pdev->dev))
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+
+static int ti_pipe3_runtime_suspend(struct device *dev)
+{
+ struct ti_pipe3 *phy = dev_get_drvdata(dev);
+
+ if (!IS_ERR(phy->wkupclk))
+ clk_disable_unprepare(phy->wkupclk);
+ if (!IS_ERR(phy->refclk))
+ clk_disable_unprepare(phy->refclk);
+
+ return 0;
+}
+
+static int ti_pipe3_runtime_resume(struct device *dev)
+{
+ u32 ret = 0;
+ struct ti_pipe3 *phy = dev_get_drvdata(dev);
+
+ if (!IS_ERR(phy->refclk)) {
+ ret = clk_prepare_enable(phy->refclk);
+ if (ret) {
+ dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
+ goto err1;
+ }
+ }
+
+ if (!IS_ERR(phy->wkupclk)) {
+ ret = clk_prepare_enable(phy->wkupclk);
+ if (ret) {
+ dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
+ goto err2;
+ }
+ }
+
+ return 0;
+
+err2:
+ if (!IS_ERR(phy->refclk))
+ clk_disable_unprepare(phy->refclk);
+
+err1:
+ return ret;
+}
+
+static const struct dev_pm_ops ti_pipe3_pm_ops = {
+ SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
+ ti_pipe3_runtime_resume, NULL)
+};
+
+#define DEV_PM_OPS (&ti_pipe3_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id ti_pipe3_id_table[] = {
+ {
+ .compatible = "ti,phy-usb3",
+ .data = dpll_map_usb,
+ },
+ {
+ .compatible = "ti,omap-usb3",
+ .data = dpll_map_usb,
+ },
+ {
+ .compatible = "ti,phy-pipe3-sata",
+ .data = dpll_map_sata,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ti_pipe3_id_table);
+#endif
+
+static struct platform_driver ti_pipe3_driver = {
+ .probe = ti_pipe3_probe,
+ .remove = ti_pipe3_remove,
+ .driver = {
+ .name = "ti-pipe3",
+ .owner = THIS_MODULE,
+ .pm = DEV_PM_OPS,
+ .of_match_table = of_match_ptr(ti_pipe3_id_table),
+ },
+};
+
+module_platform_driver(ti_pipe3_driver);
+
+MODULE_ALIAS("platform: ti_pipe3");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("TI PIPE3 phy driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index daf65e68aaab..2e0e9b3774c8 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -338,7 +338,7 @@ static void twl4030_usb_set_mode(struct twl4030_usb *twl, int mode)
dev_err(twl->dev, "unsupported T2 transceiver mode %d\n",
mode);
break;
- };
+ }
}
static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
@@ -661,7 +661,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct phy_init_data *init_data = NULL;
- twl = devm_kzalloc(&pdev->dev, sizeof *twl, GFP_KERNEL);
+ twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
return -ENOMEM;
@@ -676,7 +676,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
return -EINVAL;
}
- otg = devm_kzalloc(&pdev->dev, sizeof *otg, GFP_KERNEL);
+ otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
@@ -695,11 +695,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
otg->set_host = twl4030_set_host;
otg->set_peripheral = twl4030_set_peripheral;
- phy_provider = devm_of_phy_provider_register(twl->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(twl->dev, &ops, init_data);
if (IS_ERR(phy)) {
dev_dbg(&pdev->dev, "Failed to create PHY\n");
@@ -708,6 +703,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
phy_set_drvdata(phy, twl);
+ phy_provider = devm_of_phy_provider_register(twl->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
new file mode 100644
index 000000000000..4aa1ccd1511f
--- /dev/null
+++ b/drivers/phy/phy-xgene.c
@@ -0,0 +1,1750 @@
+/*
+ * AppliedMicro X-Gene Multi-purpose PHY driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Loc Ho <lho@apm.com>
+ * Tuan Phan <tphan@apm.com>
+ * Suman Tripathi <stripathi@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The APM X-Gene PHY consists of two PLL clock macro's (CMU) and lanes.
+ * The first PLL clock macro is used for internal reference clock. The second
+ * PLL clock macro is used to generate the clock for the PHY. This driver
+ * configures the first PLL CMU, the second PLL CMU, and programs the PHY to
+ * operate according to the mode of operation. The first PLL CMU is only
+ * required if internal clock is enabled.
+ *
+ * Logical Layer Out Of HW module units:
+ *
+ * -----------------
+ * | Internal | |------|
+ * | Ref PLL CMU |----| | ------------- ---------
+ * ------------ ---- | MUX |-----|PHY PLL CMU|----| Serdes|
+ * | | | | ---------
+ * External Clock ------| | -------------
+ * |------|
+ *
+ * The Ref PLL CMU CSR (Configuration System Registers) is accessed
+ * indirectly from the SDS offset at 0x2000. It is only required for
+ * internal reference clock.
+ * The PHY PLL CMU CSR is accessed indirectly from the SDS offset at 0x0000.
+ * The Serdes CSR is accessed indirectly from the SDS offset at 0x0400.
+ *
+ * The Ref PLL CMU can be located within the same PHY IP or outside the PHY IP
+ * due to shared Ref PLL CMU. For PHY with Ref PLL CMU shared with another IP,
+ * it is located outside the PHY IP. This is the case for the PHY located
+ * at 0x1f23a000 (SATA Port 4/5). For such PHY, another resource is required
+ * to located the SDS/Ref PLL CMU module and its clock for that IP enabled.
+ *
+ * Currently, this driver only supports Gen3 SATA mode with external clock.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/phy/phy.h>
+#include <linux/clk.h>
+
+/* Max 2 lanes per a PHY unit */
+#define MAX_LANE 2
+
+/* Register offset inside the PHY */
+#define SERDES_PLL_INDIRECT_OFFSET 0x0000
+#define SERDES_PLL_REF_INDIRECT_OFFSET 0x2000
+#define SERDES_INDIRECT_OFFSET 0x0400
+#define SERDES_LANE_STRIDE 0x0200
+
+/* Some default Serdes parameters */
+#define DEFAULT_SATA_TXBOOST_GAIN { 0x1e, 0x1e, 0x1e }
+#define DEFAULT_SATA_TXEYEDIRECTION { 0x0, 0x0, 0x0 }
+#define DEFAULT_SATA_TXEYETUNING { 0xa, 0xa, 0xa }
+#define DEFAULT_SATA_SPD_SEL { 0x1, 0x3, 0x7 }
+#define DEFAULT_SATA_TXAMP { 0x8, 0x8, 0x8 }
+#define DEFAULT_SATA_TXCN1 { 0x2, 0x2, 0x2 }
+#define DEFAULT_SATA_TXCN2 { 0x0, 0x0, 0x0 }
+#define DEFAULT_SATA_TXCP1 { 0xa, 0xa, 0xa }
+
+#define SATA_SPD_SEL_GEN3 0x7
+#define SATA_SPD_SEL_GEN2 0x3
+#define SATA_SPD_SEL_GEN1 0x1
+
+#define SSC_DISABLE 0
+#define SSC_ENABLE 1
+
+#define FBDIV_VAL_50M 0x77
+#define REFDIV_VAL_50M 0x1
+#define FBDIV_VAL_100M 0x3B
+#define REFDIV_VAL_100M 0x0
+
+/* SATA Clock/Reset CSR */
+#define SATACLKENREG 0x00000000
+#define SATA0_CORE_CLKEN 0x00000002
+#define SATA1_CORE_CLKEN 0x00000004
+#define SATASRESETREG 0x00000004
+#define SATA_MEM_RESET_MASK 0x00000020
+#define SATA_MEM_RESET_RD(src) (((src) & 0x00000020) >> 5)
+#define SATA_SDS_RESET_MASK 0x00000004
+#define SATA_CSR_RESET_MASK 0x00000001
+#define SATA_CORE_RESET_MASK 0x00000002
+#define SATA_PMCLK_RESET_MASK 0x00000010
+#define SATA_PCLK_RESET_MASK 0x00000008
+
+/* SDS CSR used for PHY Indirect access */
+#define SATA_ENET_SDS_PCS_CTL0 0x00000000
+#define REGSPEC_CFG_I_TX_WORDMODE0_SET(dst, src) \
+ (((dst) & ~0x00070000) | (((u32) (src) << 16) & 0x00070000))
+#define REGSPEC_CFG_I_RX_WORDMODE0_SET(dst, src) \
+ (((dst) & ~0x00e00000) | (((u32) (src) << 21) & 0x00e00000))
+#define SATA_ENET_SDS_CTL0 0x0000000c
+#define REGSPEC_CFG_I_CUSTOMER_PIN_MODE0_SET(dst, src) \
+ (((dst) & ~0x00007fff) | (((u32) (src)) & 0x00007fff))
+#define SATA_ENET_SDS_CTL1 0x00000010
+#define CFG_I_SPD_SEL_CDR_OVR1_SET(dst, src) \
+ (((dst) & ~0x0000000f) | (((u32) (src)) & 0x0000000f))
+#define SATA_ENET_SDS_RST_CTL 0x00000024
+#define SATA_ENET_SDS_IND_CMD_REG 0x0000003c
+#define CFG_IND_WR_CMD_MASK 0x00000001
+#define CFG_IND_RD_CMD_MASK 0x00000002
+#define CFG_IND_CMD_DONE_MASK 0x00000004
+#define CFG_IND_ADDR_SET(dst, src) \
+ (((dst) & ~0x003ffff0) | (((u32) (src) << 4) & 0x003ffff0))
+#define SATA_ENET_SDS_IND_RDATA_REG 0x00000040
+#define SATA_ENET_SDS_IND_WDATA_REG 0x00000044
+#define SATA_ENET_CLK_MACRO_REG 0x0000004c
+#define I_RESET_B_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src)) & 0x00000001))
+#define I_PLL_FBDIV_SET(dst, src) \
+ (((dst) & ~0x001ff000) | (((u32) (src) << 12) & 0x001ff000))
+#define I_CUSTOMEROV_SET(dst, src) \
+ (((dst) & ~0x00000f80) | (((u32) (src) << 7) & 0x00000f80))
+#define O_PLL_LOCK_RD(src) (((src) & 0x40000000) >> 30)
+#define O_PLL_READY_RD(src) (((src) & 0x80000000) >> 31)
+
+/* PLL Clock Macro Unit (CMU) CSR accessing from SDS indirectly */
+#define CMU_REG0 0x00000
+#define CMU_REG0_PLL_REF_SEL_MASK 0x00002000
+#define CMU_REG0_PLL_REF_SEL_SET(dst, src) \
+ (((dst) & ~0x00002000) | (((u32) (src) << 13) & 0x00002000))
+#define CMU_REG0_PDOWN_MASK 0x00004000
+#define CMU_REG0_CAL_COUNT_RESOL_SET(dst, src) \
+ (((dst) & ~0x000000e0) | (((u32) (src) << 5) & 0x000000e0))
+#define CMU_REG1 0x00002
+#define CMU_REG1_PLL_CP_SET(dst, src) \
+ (((dst) & ~0x00003c00) | (((u32) (src) << 10) & 0x00003c00))
+#define CMU_REG1_PLL_MANUALCAL_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define CMU_REG1_PLL_CP_SEL_SET(dst, src) \
+ (((dst) & ~0x000003e0) | (((u32) (src) << 5) & 0x000003e0))
+#define CMU_REG1_REFCLK_CMOS_SEL_MASK 0x00000001
+#define CMU_REG1_REFCLK_CMOS_SEL_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
+#define CMU_REG2 0x00004
+#define CMU_REG2_PLL_REFDIV_SET(dst, src) \
+ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
+#define CMU_REG2_PLL_LFRES_SET(dst, src) \
+ (((dst) & ~0x0000001e) | (((u32) (src) << 1) & 0x0000001e))
+#define CMU_REG2_PLL_FBDIV_SET(dst, src) \
+ (((dst) & ~0x00003fe0) | (((u32) (src) << 5) & 0x00003fe0))
+#define CMU_REG3 0x00006
+#define CMU_REG3_VCOVARSEL_SET(dst, src) \
+ (((dst) & ~0x0000000f) | (((u32) (src) << 0) & 0x0000000f))
+#define CMU_REG3_VCO_MOMSEL_INIT_SET(dst, src) \
+ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
+#define CMU_REG3_VCO_MANMOMSEL_SET(dst, src) \
+ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
+#define CMU_REG4 0x00008
+#define CMU_REG5 0x0000a
+#define CMU_REG5_PLL_LFSMCAP_SET(dst, src) \
+ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
+#define CMU_REG5_PLL_LOCK_RESOLUTION_SET(dst, src) \
+ (((dst) & ~0x0000000e) | (((u32) (src) << 1) & 0x0000000e))
+#define CMU_REG5_PLL_LFCAP_SET(dst, src) \
+ (((dst) & ~0x00003000) | (((u32) (src) << 12) & 0x00003000))
+#define CMU_REG5_PLL_RESETB_MASK 0x00000001
+#define CMU_REG6 0x0000c
+#define CMU_REG6_PLL_VREGTRIM_SET(dst, src) \
+ (((dst) & ~0x00000600) | (((u32) (src) << 9) & 0x00000600))
+#define CMU_REG6_MAN_PVT_CAL_SET(dst, src) \
+ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
+#define CMU_REG7 0x0000e
+#define CMU_REG7_PLL_CALIB_DONE_RD(src) ((0x00004000 & (u32) (src)) >> 14)
+#define CMU_REG7_VCO_CAL_FAIL_RD(src) ((0x00000c00 & (u32) (src)) >> 10)
+#define CMU_REG8 0x00010
+#define CMU_REG9 0x00012
+#define CMU_REG9_WORD_LEN_8BIT 0x000
+#define CMU_REG9_WORD_LEN_10BIT 0x001
+#define CMU_REG9_WORD_LEN_16BIT 0x002
+#define CMU_REG9_WORD_LEN_20BIT 0x003
+#define CMU_REG9_WORD_LEN_32BIT 0x004
+#define CMU_REG9_WORD_LEN_40BIT 0x005
+#define CMU_REG9_WORD_LEN_64BIT 0x006
+#define CMU_REG9_WORD_LEN_66BIT 0x007
+#define CMU_REG9_TX_WORD_MODE_CH1_SET(dst, src) \
+ (((dst) & ~0x00000380) | (((u32) (src) << 7) & 0x00000380))
+#define CMU_REG9_TX_WORD_MODE_CH0_SET(dst, src) \
+ (((dst) & ~0x00000070) | (((u32) (src) << 4) & 0x00000070))
+#define CMU_REG9_PLL_POST_DIVBY2_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define CMU_REG9_VBG_BYPASSB_SET(dst, src) \
+ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
+#define CMU_REG9_IGEN_BYPASS_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define CMU_REG10 0x00014
+#define CMU_REG10_VREG_REFSEL_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
+#define CMU_REG11 0x00016
+#define CMU_REG12 0x00018
+#define CMU_REG12_STATE_DELAY9_SET(dst, src) \
+ (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0))
+#define CMU_REG13 0x0001a
+#define CMU_REG14 0x0001c
+#define CMU_REG15 0x0001e
+#define CMU_REG16 0x00020
+#define CMU_REG16_PVT_DN_MAN_ENA_MASK 0x00000001
+#define CMU_REG16_PVT_UP_MAN_ENA_MASK 0x00000002
+#define CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(dst, src) \
+ (((dst) & ~0x0000001c) | (((u32) (src) << 2) & 0x0000001c))
+#define CMU_REG16_CALIBRATION_DONE_OVERRIDE_SET(dst, src) \
+ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
+#define CMU_REG16_BYPASS_PLL_LOCK_SET(dst, src) \
+ (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020))
+#define CMU_REG17 0x00022
+#define CMU_REG17_PVT_CODE_R2A_SET(dst, src) \
+ (((dst) & ~0x00007f00) | (((u32) (src) << 8) & 0x00007f00))
+#define CMU_REG17_RESERVED_7_SET(dst, src) \
+ (((dst) & ~0x000000e0) | (((u32) (src) << 5) & 0x000000e0))
+#define CMU_REG17_PVT_TERM_MAN_ENA_MASK 0x00008000
+#define CMU_REG18 0x00024
+#define CMU_REG19 0x00026
+#define CMU_REG20 0x00028
+#define CMU_REG21 0x0002a
+#define CMU_REG22 0x0002c
+#define CMU_REG23 0x0002e
+#define CMU_REG24 0x00030
+#define CMU_REG25 0x00032
+#define CMU_REG26 0x00034
+#define CMU_REG26_FORCE_PLL_LOCK_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
+#define CMU_REG27 0x00036
+#define CMU_REG28 0x00038
+#define CMU_REG29 0x0003a
+#define CMU_REG30 0x0003c
+#define CMU_REG30_LOCK_COUNT_SET(dst, src) \
+ (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006))
+#define CMU_REG30_PCIE_MODE_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define CMU_REG31 0x0003e
+#define CMU_REG32 0x00040
+#define CMU_REG32_FORCE_VCOCAL_START_MASK 0x00004000
+#define CMU_REG32_PVT_CAL_WAIT_SEL_SET(dst, src) \
+ (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006))
+#define CMU_REG32_IREF_ADJ_SET(dst, src) \
+ (((dst) & ~0x00000180) | (((u32) (src) << 7) & 0x00000180))
+#define CMU_REG33 0x00042
+#define CMU_REG34 0x00044
+#define CMU_REG34_VCO_CAL_VTH_LO_MAX_SET(dst, src) \
+ (((dst) & ~0x0000000f) | (((u32) (src) << 0) & 0x0000000f))
+#define CMU_REG34_VCO_CAL_VTH_HI_MAX_SET(dst, src) \
+ (((dst) & ~0x00000f00) | (((u32) (src) << 8) & 0x00000f00))
+#define CMU_REG34_VCO_CAL_VTH_LO_MIN_SET(dst, src) \
+ (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0))
+#define CMU_REG34_VCO_CAL_VTH_HI_MIN_SET(dst, src) \
+ (((dst) & ~0x0000f000) | (((u32) (src) << 12) & 0x0000f000))
+#define CMU_REG35 0x00046
+#define CMU_REG35_PLL_SSC_MOD_SET(dst, src) \
+ (((dst) & ~0x0000fe00) | (((u32) (src) << 9) & 0x0000fe00))
+#define CMU_REG36 0x00048
+#define CMU_REG36_PLL_SSC_EN_SET(dst, src) \
+ (((dst) & ~0x00000010) | (((u32) (src) << 4) & 0x00000010))
+#define CMU_REG36_PLL_SSC_VSTEP_SET(dst, src) \
+ (((dst) & ~0x0000ffc0) | (((u32) (src) << 6) & 0x0000ffc0))
+#define CMU_REG36_PLL_SSC_DSMSEL_SET(dst, src) \
+ (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020))
+#define CMU_REG37 0x0004a
+#define CMU_REG38 0x0004c
+#define CMU_REG39 0x0004e
+
+/* PHY lane CSR accessing from SDS indirectly */
+#define RXTX_REG0 0x000
+#define RXTX_REG0_CTLE_EQ_HR_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG0_CTLE_EQ_QR_SET(dst, src) \
+ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
+#define RXTX_REG0_CTLE_EQ_FR_SET(dst, src) \
+ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
+#define RXTX_REG1 0x002
+#define RXTX_REG1_RXACVCM_SET(dst, src) \
+ (((dst) & ~0x0000f000) | (((u32) (src) << 12) & 0x0000f000))
+#define RXTX_REG1_CTLE_EQ_SET(dst, src) \
+ (((dst) & ~0x00000f80) | (((u32) (src) << 7) & 0x00000f80))
+#define RXTX_REG1_RXVREG1_SET(dst, src) \
+ (((dst) & ~0x00000060) | (((u32) (src) << 5) & 0x00000060))
+#define RXTX_REG1_RXIREF_ADJ_SET(dst, src) \
+ (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006))
+#define RXTX_REG2 0x004
+#define RXTX_REG2_VTT_ENA_SET(dst, src) \
+ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
+#define RXTX_REG2_TX_FIFO_ENA_SET(dst, src) \
+ (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020))
+#define RXTX_REG2_VTT_SEL_SET(dst, src) \
+ (((dst) & ~0x000000c0) | (((u32) (src) << 6) & 0x000000c0))
+#define RXTX_REG4 0x008
+#define RXTX_REG4_TX_LOOPBACK_BUF_EN_MASK 0x00000040
+#define RXTX_REG4_TX_DATA_RATE_SET(dst, src) \
+ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
+#define RXTX_REG4_TX_WORD_MODE_SET(dst, src) \
+ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
+#define RXTX_REG5 0x00a
+#define RXTX_REG5_TX_CN1_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG5_TX_CP1_SET(dst, src) \
+ (((dst) & ~0x000007e0) | (((u32) (src) << 5) & 0x000007e0))
+#define RXTX_REG5_TX_CN2_SET(dst, src) \
+ (((dst) & ~0x0000001f) | (((u32) (src) << 0) & 0x0000001f))
+#define RXTX_REG6 0x00c
+#define RXTX_REG6_TXAMP_CNTL_SET(dst, src) \
+ (((dst) & ~0x00000780) | (((u32) (src) << 7) & 0x00000780))
+#define RXTX_REG6_TXAMP_ENA_SET(dst, src) \
+ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
+#define RXTX_REG6_RX_BIST_ERRCNT_RD_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
+#define RXTX_REG6_TX_IDLE_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define RXTX_REG6_RX_BIST_RESYNC_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define RXTX_REG7 0x00e
+#define RXTX_REG7_RESETB_RXD_MASK 0x00000100
+#define RXTX_REG7_RESETB_RXA_MASK 0x00000080
+#define RXTX_REG7_BIST_ENA_RX_SET(dst, src) \
+ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
+#define RXTX_REG7_RX_WORD_MODE_SET(dst, src) \
+ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
+#define RXTX_REG8 0x010
+#define RXTX_REG8_CDR_LOOP_ENA_SET(dst, src) \
+ (((dst) & ~0x00004000) | (((u32) (src) << 14) & 0x00004000))
+#define RXTX_REG8_CDR_BYPASS_RXLOS_SET(dst, src) \
+ (((dst) & ~0x00000800) | (((u32) (src) << 11) & 0x00000800))
+#define RXTX_REG8_SSC_ENABLE_SET(dst, src) \
+ (((dst) & ~0x00000200) | (((u32) (src) << 9) & 0x00000200))
+#define RXTX_REG8_SD_VREF_SET(dst, src) \
+ (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0))
+#define RXTX_REG8_SD_DISABLE_SET(dst, src) \
+ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
+#define RXTX_REG7 0x00e
+#define RXTX_REG7_RESETB_RXD_SET(dst, src) \
+ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
+#define RXTX_REG7_RESETB_RXA_SET(dst, src) \
+ (((dst) & ~0x00000080) | (((u32) (src) << 7) & 0x00000080))
+#define RXTX_REG7_LOOP_BACK_ENA_CTLE_MASK 0x00004000
+#define RXTX_REG7_LOOP_BACK_ENA_CTLE_SET(dst, src) \
+ (((dst) & ~0x00004000) | (((u32) (src) << 14) & 0x00004000))
+#define RXTX_REG11 0x016
+#define RXTX_REG11_PHASE_ADJUST_LIMIT_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG12 0x018
+#define RXTX_REG12_LATCH_OFF_ENA_SET(dst, src) \
+ (((dst) & ~0x00002000) | (((u32) (src) << 13) & 0x00002000))
+#define RXTX_REG12_SUMOS_ENABLE_SET(dst, src) \
+ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
+#define RXTX_REG12_RX_DET_TERM_ENABLE_MASK 0x00000002
+#define RXTX_REG12_RX_DET_TERM_ENABLE_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define RXTX_REG13 0x01a
+#define RXTX_REG14 0x01c
+#define RXTX_REG14_CLTE_LATCAL_MAN_PROG_SET(dst, src) \
+ (((dst) & ~0x0000003f) | (((u32) (src) << 0) & 0x0000003f))
+#define RXTX_REG14_CTLE_LATCAL_MAN_ENA_SET(dst, src) \
+ (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
+#define RXTX_REG26 0x034
+#define RXTX_REG26_PERIOD_ERROR_LATCH_SET(dst, src) \
+ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
+#define RXTX_REG26_BLWC_ENA_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define RXTX_REG21 0x02a
+#define RXTX_REG21_DO_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
+#define RXTX_REG21_XO_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
+#define RXTX_REG21_LATCH_CAL_FAIL_ODD_RD(src) ((0x0000000f & (u32)(src)))
+#define RXTX_REG22 0x02c
+#define RXTX_REG22_SO_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
+#define RXTX_REG22_EO_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
+#define RXTX_REG22_LATCH_CAL_FAIL_EVEN_RD(src) ((0x0000000f & (u32)(src)))
+#define RXTX_REG23 0x02e
+#define RXTX_REG23_DE_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
+#define RXTX_REG23_XE_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
+#define RXTX_REG24 0x030
+#define RXTX_REG24_EE_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
+#define RXTX_REG24_SE_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
+#define RXTX_REG27 0x036
+#define RXTX_REG28 0x038
+#define RXTX_REG31 0x03e
+#define RXTX_REG38 0x04c
+#define RXTX_REG38_CUSTOMER_PINMODE_INV_SET(dst, src) \
+ (((dst) & 0x0000fffe) | (((u32) (src) << 1) & 0x0000fffe))
+#define RXTX_REG39 0x04e
+#define RXTX_REG40 0x050
+#define RXTX_REG41 0x052
+#define RXTX_REG42 0x054
+#define RXTX_REG43 0x056
+#define RXTX_REG44 0x058
+#define RXTX_REG45 0x05a
+#define RXTX_REG46 0x05c
+#define RXTX_REG47 0x05e
+#define RXTX_REG48 0x060
+#define RXTX_REG49 0x062
+#define RXTX_REG50 0x064
+#define RXTX_REG51 0x066
+#define RXTX_REG52 0x068
+#define RXTX_REG53 0x06a
+#define RXTX_REG54 0x06c
+#define RXTX_REG55 0x06e
+#define RXTX_REG61 0x07a
+#define RXTX_REG61_ISCAN_INBERT_SET(dst, src) \
+ (((dst) & ~0x00000010) | (((u32) (src) << 4) & 0x00000010))
+#define RXTX_REG61_LOADFREQ_SHIFT_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define RXTX_REG61_EYE_COUNT_WIDTH_SEL_SET(dst, src) \
+ (((dst) & ~0x000000c0) | (((u32) (src) << 6) & 0x000000c0))
+#define RXTX_REG61_SPD_SEL_CDR_SET(dst, src) \
+ (((dst) & ~0x00003c00) | (((u32) (src) << 10) & 0x00003c00))
+#define RXTX_REG62 0x07c
+#define RXTX_REG62_PERIOD_H1_QLATCH_SET(dst, src) \
+ (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
+#define RXTX_REG81 0x0a2
+#define RXTX_REG89_MU_TH7_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG89_MU_TH8_SET(dst, src) \
+ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
+#define RXTX_REG89_MU_TH9_SET(dst, src) \
+ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
+#define RXTX_REG96 0x0c0
+#define RXTX_REG96_MU_FREQ1_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG96_MU_FREQ2_SET(dst, src) \
+ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
+#define RXTX_REG96_MU_FREQ3_SET(dst, src) \
+ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
+#define RXTX_REG99 0x0c6
+#define RXTX_REG99_MU_PHASE1_SET(dst, src) \
+ (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
+#define RXTX_REG99_MU_PHASE2_SET(dst, src) \
+ (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
+#define RXTX_REG99_MU_PHASE3_SET(dst, src) \
+ (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
+#define RXTX_REG102 0x0cc
+#define RXTX_REG102_FREQLOOP_LIMIT_SET(dst, src) \
+ (((dst) & ~0x00000060) | (((u32) (src) << 5) & 0x00000060))
+#define RXTX_REG114 0x0e4
+#define RXTX_REG121 0x0f2
+#define RXTX_REG121_SUMOS_CAL_CODE_RD(src) ((0x0000003e & (u32)(src)) >> 0x1)
+#define RXTX_REG125 0x0fa
+#define RXTX_REG125_PQ_REG_SET(dst, src) \
+ (((dst) & ~0x0000fe00) | (((u32) (src) << 9) & 0x0000fe00))
+#define RXTX_REG125_SIGN_PQ_SET(dst, src) \
+ (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
+#define RXTX_REG125_SIGN_PQ_2C_SET(dst, src) \
+ (((dst) & ~0x00000080) | (((u32) (src) << 7) & 0x00000080))
+#define RXTX_REG125_PHZ_MANUALCODE_SET(dst, src) \
+ (((dst) & ~0x0000007c) | (((u32) (src) << 2) & 0x0000007c))
+#define RXTX_REG125_PHZ_MANUAL_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define RXTX_REG127 0x0fe
+#define RXTX_REG127_FORCE_SUM_CAL_START_MASK 0x00000002
+#define RXTX_REG127_FORCE_LAT_CAL_START_MASK 0x00000004
+#define RXTX_REG127_FORCE_SUM_CAL_START_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define RXTX_REG127_FORCE_LAT_CAL_START_SET(dst, src) \
+ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
+#define RXTX_REG127_LATCH_MAN_CAL_ENA_SET(dst, src) \
+ (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
+#define RXTX_REG127_DO_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
+#define RXTX_REG127_XO_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
+#define RXTX_REG128 0x100
+#define RXTX_REG128_LATCH_CAL_WAIT_SEL_SET(dst, src) \
+ (((dst) & ~0x0000000c) | (((u32) (src) << 2) & 0x0000000c))
+#define RXTX_REG128_EO_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
+#define RXTX_REG128_SO_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
+#define RXTX_REG129 0x102
+#define RXTX_REG129_DE_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
+#define RXTX_REG129_XE_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
+#define RXTX_REG130 0x104
+#define RXTX_REG130_EE_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
+#define RXTX_REG130_SE_LATCH_MANCAL_SET(dst, src) \
+ (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
+#define RXTX_REG145 0x122
+#define RXTX_REG145_TX_IDLE_SATA_SET(dst, src) \
+ (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
+#define RXTX_REG145_RXES_ENA_SET(dst, src) \
+ (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
+#define RXTX_REG145_RXDFE_CONFIG_SET(dst, src) \
+ (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
+#define RXTX_REG145_RXVWES_LATENA_SET(dst, src) \
+ (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
+#define RXTX_REG147 0x126
+#define RXTX_REG148 0x128
+
+/* Clock macro type */
+enum cmu_type_t {
+ REF_CMU = 0, /* Clock macro is the internal reference clock */
+ PHY_CMU = 1, /* Clock macro is the PLL for the Serdes */
+};
+
+enum mux_type_t {
+ MUX_SELECT_ATA = 0, /* Switch the MUX to ATA */
+ MUX_SELECT_SGMMII = 0, /* Switch the MUX to SGMII */
+};
+
+enum clk_type_t {
+ CLK_EXT_DIFF = 0, /* External differential */
+ CLK_INT_DIFF = 1, /* Internal differential */
+ CLK_INT_SING = 2, /* Internal single ended */
+};
+
+enum phy_mode {
+ MODE_SATA = 0, /* List them for simple reference */
+ MODE_SGMII = 1,
+ MODE_PCIE = 2,
+ MODE_USB = 3,
+ MODE_XFI = 4,
+ MODE_MAX
+};
+
+struct xgene_sata_override_param {
+ u32 speed[MAX_LANE]; /* Index for override parameter per lane */
+ u32 txspeed[3]; /* Tx speed */
+ u32 txboostgain[MAX_LANE*3]; /* Tx freq boost and gain control */
+ u32 txeyetuning[MAX_LANE*3]; /* Tx eye tuning */
+ u32 txeyedirection[MAX_LANE*3]; /* Tx eye tuning direction */
+ u32 txamplitude[MAX_LANE*3]; /* Tx amplitude control */
+ u32 txprecursor_cn1[MAX_LANE*3]; /* Tx emphasis taps 1st pre-cursor */
+ u32 txprecursor_cn2[MAX_LANE*3]; /* Tx emphasis taps 2nd pre-cursor */
+ u32 txpostcursor_cp1[MAX_LANE*3]; /* Tx emphasis taps post-cursor */
+};
+
+struct xgene_phy_ctx {
+ struct device *dev;
+ struct phy *phy;
+ enum phy_mode mode; /* Mode of operation */
+ enum clk_type_t clk_type; /* Input clock selection */
+ void __iomem *sds_base; /* PHY CSR base addr */
+ struct clk *clk; /* Optional clock */
+
+ /* Override Serdes parameters */
+ struct xgene_sata_override_param sata_param;
+};
+
+/*
+ * For chip earlier than A3 version, enable this flag.
+ * To enable, pass boot argument phy_xgene.preA3Chip=1
+ */
+static int preA3Chip;
+MODULE_PARM_DESC(preA3Chip, "Enable pre-A3 chip support (1=enable 0=disable)");
+module_param_named(preA3Chip, preA3Chip, int, 0444);
+
+static void sds_wr(void __iomem *csr_base, u32 indirect_cmd_reg,
+ u32 indirect_data_reg, u32 addr, u32 data)
+{
+ unsigned long deadline = jiffies + HZ;
+ u32 val;
+ u32 cmd;
+
+ cmd = CFG_IND_WR_CMD_MASK | CFG_IND_CMD_DONE_MASK;
+ cmd = CFG_IND_ADDR_SET(cmd, addr);
+ writel(data, csr_base + indirect_data_reg);
+ readl(csr_base + indirect_data_reg); /* Force a barrier */
+ writel(cmd, csr_base + indirect_cmd_reg);
+ readl(csr_base + indirect_cmd_reg); /* Force a barrier */
+ do {
+ val = readl(csr_base + indirect_cmd_reg);
+ } while (!(val & CFG_IND_CMD_DONE_MASK) &&
+ time_before(jiffies, deadline));
+ if (!(val & CFG_IND_CMD_DONE_MASK))
+ pr_err("SDS WR timeout at 0x%p offset 0x%08X value 0x%08X\n",
+ csr_base + indirect_cmd_reg, addr, data);
+}
+
+static void sds_rd(void __iomem *csr_base, u32 indirect_cmd_reg,
+ u32 indirect_data_reg, u32 addr, u32 *data)
+{
+ unsigned long deadline = jiffies + HZ;
+ u32 val;
+ u32 cmd;
+
+ cmd = CFG_IND_RD_CMD_MASK | CFG_IND_CMD_DONE_MASK;
+ cmd = CFG_IND_ADDR_SET(cmd, addr);
+ writel(cmd, csr_base + indirect_cmd_reg);
+ readl(csr_base + indirect_cmd_reg); /* Force a barrier */
+ do {
+ val = readl(csr_base + indirect_cmd_reg);
+ } while (!(val & CFG_IND_CMD_DONE_MASK) &&
+ time_before(jiffies, deadline));
+ *data = readl(csr_base + indirect_data_reg);
+ if (!(val & CFG_IND_CMD_DONE_MASK))
+ pr_err("SDS WR timeout at 0x%p offset 0x%08X value 0x%08X\n",
+ csr_base + indirect_cmd_reg, addr, *data);
+}
+
+static void cmu_wr(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
+ u32 reg, u32 data)
+{
+ void __iomem *sds_base = ctx->sds_base;
+ u32 val;
+
+ if (cmu_type == REF_CMU)
+ reg += SERDES_PLL_REF_INDIRECT_OFFSET;
+ else
+ reg += SERDES_PLL_INDIRECT_OFFSET;
+ sds_wr(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_WDATA_REG, reg, data);
+ sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_RDATA_REG, reg, &val);
+ pr_debug("CMU WR addr 0x%X value 0x%08X <-> 0x%08X\n", reg, data, val);
+}
+
+static void cmu_rd(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
+ u32 reg, u32 *data)
+{
+ void __iomem *sds_base = ctx->sds_base;
+
+ if (cmu_type == REF_CMU)
+ reg += SERDES_PLL_REF_INDIRECT_OFFSET;
+ else
+ reg += SERDES_PLL_INDIRECT_OFFSET;
+ sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_RDATA_REG, reg, data);
+ pr_debug("CMU RD addr 0x%X value 0x%08X\n", reg, *data);
+}
+
+static void cmu_toggle1to0(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
+ u32 reg, u32 bits)
+{
+ u32 val;
+
+ cmu_rd(ctx, cmu_type, reg, &val);
+ val |= bits;
+ cmu_wr(ctx, cmu_type, reg, val);
+ cmu_rd(ctx, cmu_type, reg, &val);
+ val &= ~bits;
+ cmu_wr(ctx, cmu_type, reg, val);
+}
+
+static void cmu_clrbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
+ u32 reg, u32 bits)
+{
+ u32 val;
+
+ cmu_rd(ctx, cmu_type, reg, &val);
+ val &= ~bits;
+ cmu_wr(ctx, cmu_type, reg, val);
+}
+
+static void cmu_setbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
+ u32 reg, u32 bits)
+{
+ u32 val;
+
+ cmu_rd(ctx, cmu_type, reg, &val);
+ val |= bits;
+ cmu_wr(ctx, cmu_type, reg, val);
+}
+
+static void serdes_wr(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 data)
+{
+ void __iomem *sds_base = ctx->sds_base;
+ u32 val;
+
+ reg += SERDES_INDIRECT_OFFSET;
+ reg += lane * SERDES_LANE_STRIDE;
+ sds_wr(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_WDATA_REG, reg, data);
+ sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_RDATA_REG, reg, &val);
+ pr_debug("SERDES WR addr 0x%X value 0x%08X <-> 0x%08X\n", reg, data,
+ val);
+}
+
+static void serdes_rd(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 *data)
+{
+ void __iomem *sds_base = ctx->sds_base;
+
+ reg += SERDES_INDIRECT_OFFSET;
+ reg += lane * SERDES_LANE_STRIDE;
+ sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
+ SATA_ENET_SDS_IND_RDATA_REG, reg, data);
+ pr_debug("SERDES RD addr 0x%X value 0x%08X\n", reg, *data);
+}
+
+static void serdes_clrbits(struct xgene_phy_ctx *ctx, int lane, u32 reg,
+ u32 bits)
+{
+ u32 val;
+
+ serdes_rd(ctx, lane, reg, &val);
+ val &= ~bits;
+ serdes_wr(ctx, lane, reg, val);
+}
+
+static void serdes_setbits(struct xgene_phy_ctx *ctx, int lane, u32 reg,
+ u32 bits)
+{
+ u32 val;
+
+ serdes_rd(ctx, lane, reg, &val);
+ val |= bits;
+ serdes_wr(ctx, lane, reg, val);
+}
+
+static void xgene_phy_cfg_cmu_clk_type(struct xgene_phy_ctx *ctx,
+ enum cmu_type_t cmu_type,
+ enum clk_type_t clk_type)
+{
+ u32 val;
+
+ /* Set the reset sequence delay for TX ready assertion */
+ cmu_rd(ctx, cmu_type, CMU_REG12, &val);
+ val = CMU_REG12_STATE_DELAY9_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG12, val);
+ /* Set the programmable stage delays between various enable stages */
+ cmu_wr(ctx, cmu_type, CMU_REG13, 0x0222);
+ cmu_wr(ctx, cmu_type, CMU_REG14, 0x2225);
+
+ /* Configure clock type */
+ if (clk_type == CLK_EXT_DIFF) {
+ /* Select external clock mux */
+ cmu_rd(ctx, cmu_type, CMU_REG0, &val);
+ val = CMU_REG0_PLL_REF_SEL_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG0, val);
+ /* Select CMOS as reference clock */
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+ dev_dbg(ctx->dev, "Set external reference clock\n");
+ } else if (clk_type == CLK_INT_DIFF) {
+ /* Select internal clock mux */
+ cmu_rd(ctx, cmu_type, CMU_REG0, &val);
+ val = CMU_REG0_PLL_REF_SEL_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG0, val);
+ /* Select CMOS as reference clock */
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+ dev_dbg(ctx->dev, "Set internal reference clock\n");
+ } else if (clk_type == CLK_INT_SING) {
+ /*
+ * NOTE: This clock type is NOT support for controller
+ * whose internal clock shared in the PCIe controller
+ *
+ * Select internal clock mux
+ */
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+ /* Select CML as reference clock */
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+ dev_dbg(ctx->dev,
+ "Set internal single ended reference clock\n");
+ }
+}
+
+static void xgene_phy_sata_cfg_cmu_core(struct xgene_phy_ctx *ctx,
+ enum cmu_type_t cmu_type,
+ enum clk_type_t clk_type)
+{
+ u32 val;
+ int ref_100MHz;
+
+ if (cmu_type == REF_CMU) {
+ /* Set VCO calibration voltage threshold */
+ cmu_rd(ctx, cmu_type, CMU_REG34, &val);
+ val = CMU_REG34_VCO_CAL_VTH_LO_MAX_SET(val, 0x7);
+ val = CMU_REG34_VCO_CAL_VTH_HI_MAX_SET(val, 0xc);
+ val = CMU_REG34_VCO_CAL_VTH_LO_MIN_SET(val, 0x3);
+ val = CMU_REG34_VCO_CAL_VTH_HI_MIN_SET(val, 0x8);
+ cmu_wr(ctx, cmu_type, CMU_REG34, val);
+ }
+
+ /* Set the VCO calibration counter */
+ cmu_rd(ctx, cmu_type, CMU_REG0, &val);
+ if (cmu_type == REF_CMU || preA3Chip)
+ val = CMU_REG0_CAL_COUNT_RESOL_SET(val, 0x4);
+ else
+ val = CMU_REG0_CAL_COUNT_RESOL_SET(val, 0x7);
+ cmu_wr(ctx, cmu_type, CMU_REG0, val);
+
+ /* Configure PLL for calibration */
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_PLL_CP_SET(val, 0x1);
+ if (cmu_type == REF_CMU || preA3Chip)
+ val = CMU_REG1_PLL_CP_SEL_SET(val, 0x5);
+ else
+ val = CMU_REG1_PLL_CP_SEL_SET(val, 0x3);
+ if (cmu_type == REF_CMU)
+ val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x0);
+ else
+ val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+
+ if (cmu_type != REF_CMU)
+ cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
+
+ /* Configure the PLL for either 100MHz or 50MHz */
+ cmu_rd(ctx, cmu_type, CMU_REG2, &val);
+ if (cmu_type == REF_CMU) {
+ val = CMU_REG2_PLL_LFRES_SET(val, 0xa);
+ ref_100MHz = 1;
+ } else {
+ val = CMU_REG2_PLL_LFRES_SET(val, 0x3);
+ if (clk_type == CLK_EXT_DIFF)
+ ref_100MHz = 0;
+ else
+ ref_100MHz = 1;
+ }
+ if (ref_100MHz) {
+ val = CMU_REG2_PLL_FBDIV_SET(val, FBDIV_VAL_100M);
+ val = CMU_REG2_PLL_REFDIV_SET(val, REFDIV_VAL_100M);
+ } else {
+ val = CMU_REG2_PLL_FBDIV_SET(val, FBDIV_VAL_50M);
+ val = CMU_REG2_PLL_REFDIV_SET(val, REFDIV_VAL_50M);
+ }
+ cmu_wr(ctx, cmu_type, CMU_REG2, val);
+
+ /* Configure the VCO */
+ cmu_rd(ctx, cmu_type, CMU_REG3, &val);
+ if (cmu_type == REF_CMU) {
+ val = CMU_REG3_VCOVARSEL_SET(val, 0x3);
+ val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x10);
+ } else {
+ val = CMU_REG3_VCOVARSEL_SET(val, 0xF);
+ if (preA3Chip)
+ val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x15);
+ else
+ val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x1a);
+ val = CMU_REG3_VCO_MANMOMSEL_SET(val, 0x15);
+ }
+ cmu_wr(ctx, cmu_type, CMU_REG3, val);
+
+ /* Disable force PLL lock */
+ cmu_rd(ctx, cmu_type, CMU_REG26, &val);
+ val = CMU_REG26_FORCE_PLL_LOCK_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG26, val);
+
+ /* Setup PLL loop filter */
+ cmu_rd(ctx, cmu_type, CMU_REG5, &val);
+ val = CMU_REG5_PLL_LFSMCAP_SET(val, 0x3);
+ val = CMU_REG5_PLL_LFCAP_SET(val, 0x3);
+ if (cmu_type == REF_CMU || !preA3Chip)
+ val = CMU_REG5_PLL_LOCK_RESOLUTION_SET(val, 0x7);
+ else
+ val = CMU_REG5_PLL_LOCK_RESOLUTION_SET(val, 0x4);
+ cmu_wr(ctx, cmu_type, CMU_REG5, val);
+
+ /* Enable or disable manual calibration */
+ cmu_rd(ctx, cmu_type, CMU_REG6, &val);
+ val = CMU_REG6_PLL_VREGTRIM_SET(val, preA3Chip ? 0x0 : 0x2);
+ val = CMU_REG6_MAN_PVT_CAL_SET(val, preA3Chip ? 0x1 : 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG6, val);
+
+ /* Configure lane for 20-bits */
+ if (cmu_type == PHY_CMU) {
+ cmu_rd(ctx, cmu_type, CMU_REG9, &val);
+ val = CMU_REG9_TX_WORD_MODE_CH1_SET(val,
+ CMU_REG9_WORD_LEN_20BIT);
+ val = CMU_REG9_TX_WORD_MODE_CH0_SET(val,
+ CMU_REG9_WORD_LEN_20BIT);
+ val = CMU_REG9_PLL_POST_DIVBY2_SET(val, 0x1);
+ if (!preA3Chip) {
+ val = CMU_REG9_VBG_BYPASSB_SET(val, 0x0);
+ val = CMU_REG9_IGEN_BYPASS_SET(val , 0x0);
+ }
+ cmu_wr(ctx, cmu_type, CMU_REG9, val);
+
+ if (!preA3Chip) {
+ cmu_rd(ctx, cmu_type, CMU_REG10, &val);
+ val = CMU_REG10_VREG_REFSEL_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG10, val);
+ }
+ }
+
+ cmu_rd(ctx, cmu_type, CMU_REG16, &val);
+ val = CMU_REG16_CALIBRATION_DONE_OVERRIDE_SET(val, 0x1);
+ val = CMU_REG16_BYPASS_PLL_LOCK_SET(val, 0x1);
+ if (cmu_type == REF_CMU || preA3Chip)
+ val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x4);
+ else
+ val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x7);
+ cmu_wr(ctx, cmu_type, CMU_REG16, val);
+
+ /* Configure for SATA */
+ cmu_rd(ctx, cmu_type, CMU_REG30, &val);
+ val = CMU_REG30_PCIE_MODE_SET(val, 0x0);
+ val = CMU_REG30_LOCK_COUNT_SET(val, 0x3);
+ cmu_wr(ctx, cmu_type, CMU_REG30, val);
+
+ /* Disable state machine bypass */
+ cmu_wr(ctx, cmu_type, CMU_REG31, 0xF);
+
+ cmu_rd(ctx, cmu_type, CMU_REG32, &val);
+ val = CMU_REG32_PVT_CAL_WAIT_SEL_SET(val, 0x3);
+ if (cmu_type == REF_CMU || preA3Chip)
+ val = CMU_REG32_IREF_ADJ_SET(val, 0x3);
+ else
+ val = CMU_REG32_IREF_ADJ_SET(val, 0x1);
+ cmu_wr(ctx, cmu_type, CMU_REG32, val);
+
+ /* Set VCO calibration threshold */
+ if (cmu_type != REF_CMU && preA3Chip)
+ cmu_wr(ctx, cmu_type, CMU_REG34, 0x8d27);
+ else
+ cmu_wr(ctx, cmu_type, CMU_REG34, 0x873c);
+
+ /* Set CTLE Override and override waiting from state machine */
+ cmu_wr(ctx, cmu_type, CMU_REG37, 0xF00F);
+}
+
+static void xgene_phy_ssc_enable(struct xgene_phy_ctx *ctx,
+ enum cmu_type_t cmu_type)
+{
+ u32 val;
+
+ /* Set SSC modulation value */
+ cmu_rd(ctx, cmu_type, CMU_REG35, &val);
+ val = CMU_REG35_PLL_SSC_MOD_SET(val, 98);
+ cmu_wr(ctx, cmu_type, CMU_REG35, val);
+
+ /* Enable SSC, set vertical step and DSM value */
+ cmu_rd(ctx, cmu_type, CMU_REG36, &val);
+ val = CMU_REG36_PLL_SSC_VSTEP_SET(val, 30);
+ val = CMU_REG36_PLL_SSC_EN_SET(val, 1);
+ val = CMU_REG36_PLL_SSC_DSMSEL_SET(val, 1);
+ cmu_wr(ctx, cmu_type, CMU_REG36, val);
+
+ /* Reset the PLL */
+ cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
+ cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
+
+ /* Force VCO calibration to restart */
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG32,
+ CMU_REG32_FORCE_VCOCAL_START_MASK);
+}
+
+static void xgene_phy_sata_cfg_lanes(struct xgene_phy_ctx *ctx)
+{
+ u32 val;
+ u32 reg;
+ int i;
+ int lane;
+
+ for (lane = 0; lane < MAX_LANE; lane++) {
+ serdes_wr(ctx, lane, RXTX_REG147, 0x6);
+
+ /* Set boost control for quarter, half, and full rate */
+ serdes_rd(ctx, lane, RXTX_REG0, &val);
+ val = RXTX_REG0_CTLE_EQ_HR_SET(val, 0x10);
+ val = RXTX_REG0_CTLE_EQ_QR_SET(val, 0x10);
+ val = RXTX_REG0_CTLE_EQ_FR_SET(val, 0x10);
+ serdes_wr(ctx, lane, RXTX_REG0, val);
+
+ /* Set boost control value */
+ serdes_rd(ctx, lane, RXTX_REG1, &val);
+ val = RXTX_REG1_RXACVCM_SET(val, 0x7);
+ val = RXTX_REG1_CTLE_EQ_SET(val,
+ ctx->sata_param.txboostgain[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ serdes_wr(ctx, lane, RXTX_REG1, val);
+
+ /* Latch VTT value based on the termination to ground and
+ enable TX FIFO */
+ serdes_rd(ctx, lane, RXTX_REG2, &val);
+ val = RXTX_REG2_VTT_ENA_SET(val, 0x1);
+ val = RXTX_REG2_VTT_SEL_SET(val, 0x1);
+ val = RXTX_REG2_TX_FIFO_ENA_SET(val, 0x1);
+ serdes_wr(ctx, lane, RXTX_REG2, val);
+
+ /* Configure Tx for 20-bits */
+ serdes_rd(ctx, lane, RXTX_REG4, &val);
+ val = RXTX_REG4_TX_WORD_MODE_SET(val, CMU_REG9_WORD_LEN_20BIT);
+ serdes_wr(ctx, lane, RXTX_REG4, val);
+
+ if (!preA3Chip) {
+ serdes_rd(ctx, lane, RXTX_REG1, &val);
+ val = RXTX_REG1_RXVREG1_SET(val, 0x2);
+ val = RXTX_REG1_RXIREF_ADJ_SET(val, 0x2);
+ serdes_wr(ctx, lane, RXTX_REG1, val);
+ }
+
+ /* Set pre-emphasis first 1 and 2, and post-emphasis values */
+ serdes_rd(ctx, lane, RXTX_REG5, &val);
+ val = RXTX_REG5_TX_CN1_SET(val,
+ ctx->sata_param.txprecursor_cn1[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ val = RXTX_REG5_TX_CP1_SET(val,
+ ctx->sata_param.txpostcursor_cp1[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ val = RXTX_REG5_TX_CN2_SET(val,
+ ctx->sata_param.txprecursor_cn2[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ serdes_wr(ctx, lane, RXTX_REG5, val);
+
+ /* Set TX amplitude value */
+ serdes_rd(ctx, lane, RXTX_REG6, &val);
+ val = RXTX_REG6_TXAMP_CNTL_SET(val,
+ ctx->sata_param.txamplitude[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ val = RXTX_REG6_TXAMP_ENA_SET(val, 0x1);
+ val = RXTX_REG6_TX_IDLE_SET(val, 0x0);
+ val = RXTX_REG6_RX_BIST_RESYNC_SET(val, 0x0);
+ val = RXTX_REG6_RX_BIST_ERRCNT_RD_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG6, val);
+
+ /* Configure Rx for 20-bits */
+ serdes_rd(ctx, lane, RXTX_REG7, &val);
+ val = RXTX_REG7_BIST_ENA_RX_SET(val, 0x0);
+ val = RXTX_REG7_RX_WORD_MODE_SET(val, CMU_REG9_WORD_LEN_20BIT);
+ serdes_wr(ctx, lane, RXTX_REG7, val);
+
+ /* Set CDR and LOS values and enable Rx SSC */
+ serdes_rd(ctx, lane, RXTX_REG8, &val);
+ val = RXTX_REG8_CDR_LOOP_ENA_SET(val, 0x1);
+ val = RXTX_REG8_CDR_BYPASS_RXLOS_SET(val, 0x0);
+ val = RXTX_REG8_SSC_ENABLE_SET(val, 0x1);
+ val = RXTX_REG8_SD_DISABLE_SET(val, 0x0);
+ val = RXTX_REG8_SD_VREF_SET(val, 0x4);
+ serdes_wr(ctx, lane, RXTX_REG8, val);
+
+ /* Set phase adjust upper/lower limits */
+ serdes_rd(ctx, lane, RXTX_REG11, &val);
+ val = RXTX_REG11_PHASE_ADJUST_LIMIT_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG11, val);
+
+ /* Enable Latch Off; disable SUMOS and Tx termination */
+ serdes_rd(ctx, lane, RXTX_REG12, &val);
+ val = RXTX_REG12_LATCH_OFF_ENA_SET(val, 0x1);
+ val = RXTX_REG12_SUMOS_ENABLE_SET(val, 0x0);
+ val = RXTX_REG12_RX_DET_TERM_ENABLE_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG12, val);
+
+ /* Set period error latch to 512T and enable BWL */
+ serdes_rd(ctx, lane, RXTX_REG26, &val);
+ val = RXTX_REG26_PERIOD_ERROR_LATCH_SET(val, 0x0);
+ val = RXTX_REG26_BLWC_ENA_SET(val, 0x1);
+ serdes_wr(ctx, lane, RXTX_REG26, val);
+
+ serdes_wr(ctx, lane, RXTX_REG28, 0x0);
+
+ /* Set DFE loop preset value */
+ serdes_wr(ctx, lane, RXTX_REG31, 0x0);
+
+ /* Set Eye Monitor counter width to 12-bit */
+ serdes_rd(ctx, lane, RXTX_REG61, &val);
+ val = RXTX_REG61_ISCAN_INBERT_SET(val, 0x1);
+ val = RXTX_REG61_LOADFREQ_SHIFT_SET(val, 0x0);
+ val = RXTX_REG61_EYE_COUNT_WIDTH_SEL_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG61, val);
+
+ serdes_rd(ctx, lane, RXTX_REG62, &val);
+ val = RXTX_REG62_PERIOD_H1_QLATCH_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG62, val);
+
+ /* Set BW select tap X for DFE loop */
+ for (i = 0; i < 9; i++) {
+ reg = RXTX_REG81 + i * 2;
+ serdes_rd(ctx, lane, reg, &val);
+ val = RXTX_REG89_MU_TH7_SET(val, 0xe);
+ val = RXTX_REG89_MU_TH8_SET(val, 0xe);
+ val = RXTX_REG89_MU_TH9_SET(val, 0xe);
+ serdes_wr(ctx, lane, reg, val);
+ }
+
+ /* Set BW select tap X for frequency adjust loop */
+ for (i = 0; i < 3; i++) {
+ reg = RXTX_REG96 + i * 2;
+ serdes_rd(ctx, lane, reg, &val);
+ val = RXTX_REG96_MU_FREQ1_SET(val, 0x10);
+ val = RXTX_REG96_MU_FREQ2_SET(val, 0x10);
+ val = RXTX_REG96_MU_FREQ3_SET(val, 0x10);
+ serdes_wr(ctx, lane, reg, val);
+ }
+
+ /* Set BW select tap X for phase adjust loop */
+ for (i = 0; i < 3; i++) {
+ reg = RXTX_REG99 + i * 2;
+ serdes_rd(ctx, lane, reg, &val);
+ val = RXTX_REG99_MU_PHASE1_SET(val, 0x7);
+ val = RXTX_REG99_MU_PHASE2_SET(val, 0x7);
+ val = RXTX_REG99_MU_PHASE3_SET(val, 0x7);
+ serdes_wr(ctx, lane, reg, val);
+ }
+
+ serdes_rd(ctx, lane, RXTX_REG102, &val);
+ val = RXTX_REG102_FREQLOOP_LIMIT_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG102, val);
+
+ serdes_wr(ctx, lane, RXTX_REG114, 0xffe0);
+
+ serdes_rd(ctx, lane, RXTX_REG125, &val);
+ val = RXTX_REG125_SIGN_PQ_SET(val,
+ ctx->sata_param.txeyedirection[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ val = RXTX_REG125_PQ_REG_SET(val,
+ ctx->sata_param.txeyetuning[lane * 3 +
+ ctx->sata_param.speed[lane]]);
+ val = RXTX_REG125_PHZ_MANUAL_SET(val, 0x1);
+ serdes_wr(ctx, lane, RXTX_REG125, val);
+
+ serdes_rd(ctx, lane, RXTX_REG127, &val);
+ val = RXTX_REG127_LATCH_MAN_CAL_ENA_SET(val, 0x0);
+ serdes_wr(ctx, lane, RXTX_REG127, val);
+
+ serdes_rd(ctx, lane, RXTX_REG128, &val);
+ val = RXTX_REG128_LATCH_CAL_WAIT_SEL_SET(val, 0x3);
+ serdes_wr(ctx, lane, RXTX_REG128, val);
+
+ serdes_rd(ctx, lane, RXTX_REG145, &val);
+ val = RXTX_REG145_RXDFE_CONFIG_SET(val, 0x3);
+ val = RXTX_REG145_TX_IDLE_SATA_SET(val, 0x0);
+ if (preA3Chip) {
+ val = RXTX_REG145_RXES_ENA_SET(val, 0x1);
+ val = RXTX_REG145_RXVWES_LATENA_SET(val, 0x1);
+ } else {
+ val = RXTX_REG145_RXES_ENA_SET(val, 0x0);
+ val = RXTX_REG145_RXVWES_LATENA_SET(val, 0x0);
+ }
+ serdes_wr(ctx, lane, RXTX_REG145, val);
+
+ /*
+ * Set Rx LOS filter clock rate, sample rate, and threshold
+ * windows
+ */
+ for (i = 0; i < 4; i++) {
+ reg = RXTX_REG148 + i * 2;
+ serdes_wr(ctx, lane, reg, 0xFFFF);
+ }
+ }
+}
+
+static int xgene_phy_cal_rdy_chk(struct xgene_phy_ctx *ctx,
+ enum cmu_type_t cmu_type,
+ enum clk_type_t clk_type)
+{
+ void __iomem *csr_serdes = ctx->sds_base;
+ int loop;
+ u32 val;
+
+ /* Release PHY main reset */
+ writel(0xdf, csr_serdes + SATA_ENET_SDS_RST_CTL);
+ readl(csr_serdes + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
+
+ if (cmu_type != REF_CMU) {
+ cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
+ /*
+ * As per PHY design spec, the PLL reset requires a minimum
+ * of 800us.
+ */
+ usleep_range(800, 1000);
+
+ cmu_rd(ctx, cmu_type, CMU_REG1, &val);
+ val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG1, val);
+ /*
+ * As per PHY design spec, the PLL auto calibration requires
+ * a minimum of 800us.
+ */
+ usleep_range(800, 1000);
+
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG32,
+ CMU_REG32_FORCE_VCOCAL_START_MASK);
+ /*
+ * As per PHY design spec, the PLL requires a minimum of
+ * 800us to settle.
+ */
+ usleep_range(800, 1000);
+ }
+
+ if (!preA3Chip)
+ goto skip_manual_cal;
+
+ /*
+ * Configure the termination resister calibration
+ * The serial receive pins, RXP/RXN, have TERMination resistor
+ * that is required to be calibrated.
+ */
+ cmu_rd(ctx, cmu_type, CMU_REG17, &val);
+ val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x12);
+ val = CMU_REG17_RESERVED_7_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG17, val);
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG17,
+ CMU_REG17_PVT_TERM_MAN_ENA_MASK);
+ /*
+ * The serial transmit pins, TXP/TXN, have Pull-UP and Pull-DOWN
+ * resistors that are required to the calibrated.
+ * Configure the pull DOWN calibration
+ */
+ cmu_rd(ctx, cmu_type, CMU_REG17, &val);
+ val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x29);
+ val = CMU_REG17_RESERVED_7_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG17, val);
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG16,
+ CMU_REG16_PVT_DN_MAN_ENA_MASK);
+ /* Configure the pull UP calibration */
+ cmu_rd(ctx, cmu_type, CMU_REG17, &val);
+ val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x28);
+ val = CMU_REG17_RESERVED_7_SET(val, 0x0);
+ cmu_wr(ctx, cmu_type, CMU_REG17, val);
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG16,
+ CMU_REG16_PVT_UP_MAN_ENA_MASK);
+
+skip_manual_cal:
+ /* Poll the PLL calibration completion status for at least 1 ms */
+ loop = 100;
+ do {
+ cmu_rd(ctx, cmu_type, CMU_REG7, &val);
+ if (CMU_REG7_PLL_CALIB_DONE_RD(val))
+ break;
+ /*
+ * As per PHY design spec, PLL calibration status requires
+ * a minimum of 10us to be updated.
+ */
+ usleep_range(10, 100);
+ } while (--loop > 0);
+
+ cmu_rd(ctx, cmu_type, CMU_REG7, &val);
+ dev_dbg(ctx->dev, "PLL calibration %s\n",
+ CMU_REG7_PLL_CALIB_DONE_RD(val) ? "done" : "failed");
+ if (CMU_REG7_VCO_CAL_FAIL_RD(val)) {
+ dev_err(ctx->dev,
+ "PLL calibration failed due to VCO failure\n");
+ return -1;
+ }
+ dev_dbg(ctx->dev, "PLL calibration successful\n");
+
+ cmu_rd(ctx, cmu_type, CMU_REG15, &val);
+ dev_dbg(ctx->dev, "PHY Tx is %sready\n", val & 0x300 ? "" : "not ");
+ return 0;
+}
+
+static void xgene_phy_pdwn_force_vco(struct xgene_phy_ctx *ctx,
+ enum cmu_type_t cmu_type,
+ enum clk_type_t clk_type)
+{
+ u32 val;
+
+ dev_dbg(ctx->dev, "Reset VCO and re-start again\n");
+ if (cmu_type == PHY_CMU) {
+ cmu_rd(ctx, cmu_type, CMU_REG16, &val);
+ val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x7);
+ cmu_wr(ctx, cmu_type, CMU_REG16, val);
+ }
+
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG0, CMU_REG0_PDOWN_MASK);
+ cmu_toggle1to0(ctx, cmu_type, CMU_REG32,
+ CMU_REG32_FORCE_VCOCAL_START_MASK);
+}
+
+static int xgene_phy_hw_init_sata(struct xgene_phy_ctx *ctx,
+ enum clk_type_t clk_type, int ssc_enable)
+{
+ void __iomem *sds_base = ctx->sds_base;
+ u32 val;
+ int i;
+
+ /* Configure the PHY for operation */
+ dev_dbg(ctx->dev, "Reset PHY\n");
+ /* Place PHY into reset */
+ writel(0x0, sds_base + SATA_ENET_SDS_RST_CTL);
+ val = readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
+ /* Release PHY lane from reset (active high) */
+ writel(0x20, sds_base + SATA_ENET_SDS_RST_CTL);
+ readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
+ /* Release all PHY module out of reset except PHY main reset */
+ writel(0xde, sds_base + SATA_ENET_SDS_RST_CTL);
+ readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
+
+ /* Set the operation speed */
+ val = readl(sds_base + SATA_ENET_SDS_CTL1);
+ val = CFG_I_SPD_SEL_CDR_OVR1_SET(val,
+ ctx->sata_param.txspeed[ctx->sata_param.speed[0]]);
+ writel(val, sds_base + SATA_ENET_SDS_CTL1);
+
+ dev_dbg(ctx->dev, "Set the customer pin mode to SATA\n");
+ val = readl(sds_base + SATA_ENET_SDS_CTL0);
+ val = REGSPEC_CFG_I_CUSTOMER_PIN_MODE0_SET(val, 0x4421);
+ writel(val, sds_base + SATA_ENET_SDS_CTL0);
+
+ /* Configure the clock macro unit (CMU) clock type */
+ xgene_phy_cfg_cmu_clk_type(ctx, PHY_CMU, clk_type);
+
+ /* Configure the clock macro */
+ xgene_phy_sata_cfg_cmu_core(ctx, PHY_CMU, clk_type);
+
+ /* Enable SSC if enabled */
+ if (ssc_enable)
+ xgene_phy_ssc_enable(ctx, PHY_CMU);
+
+ /* Configure PHY lanes */
+ xgene_phy_sata_cfg_lanes(ctx);
+
+ /* Set Rx/Tx 20-bit */
+ val = readl(sds_base + SATA_ENET_SDS_PCS_CTL0);
+ val = REGSPEC_CFG_I_RX_WORDMODE0_SET(val, 0x3);
+ val = REGSPEC_CFG_I_TX_WORDMODE0_SET(val, 0x3);
+ writel(val, sds_base + SATA_ENET_SDS_PCS_CTL0);
+
+ /* Start PLL calibration and try for three times */
+ i = 10;
+ do {
+ if (!xgene_phy_cal_rdy_chk(ctx, PHY_CMU, clk_type))
+ break;
+ /* If failed, toggle the VCO power signal and start again */
+ xgene_phy_pdwn_force_vco(ctx, PHY_CMU, clk_type);
+ } while (--i > 0);
+ /* Even on failure, allow to continue any way */
+ if (i <= 0)
+ dev_err(ctx->dev, "PLL calibration failed\n");
+
+ return 0;
+}
+
+static int xgene_phy_hw_initialize(struct xgene_phy_ctx *ctx,
+ enum clk_type_t clk_type,
+ int ssc_enable)
+{
+ int rc;
+
+ dev_dbg(ctx->dev, "PHY init clk type %d\n", clk_type);
+
+ if (ctx->mode == MODE_SATA) {
+ rc = xgene_phy_hw_init_sata(ctx, clk_type, ssc_enable);
+ if (rc)
+ return rc;
+ } else {
+ dev_err(ctx->dev, "Un-supported customer pin mode %d\n",
+ ctx->mode);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * Receiver Offset Calibration:
+ *
+ * Calibrate the receiver signal path offset in two steps - summar and
+ * latch calibrations
+ */
+static void xgene_phy_force_lat_summer_cal(struct xgene_phy_ctx *ctx, int lane)
+{
+ int i;
+ struct {
+ u32 reg;
+ u32 val;
+ } serdes_reg[] = {
+ {RXTX_REG38, 0x0},
+ {RXTX_REG39, 0xff00},
+ {RXTX_REG40, 0xffff},
+ {RXTX_REG41, 0xffff},
+ {RXTX_REG42, 0xffff},
+ {RXTX_REG43, 0xffff},
+ {RXTX_REG44, 0xffff},
+ {RXTX_REG45, 0xffff},
+ {RXTX_REG46, 0xffff},
+ {RXTX_REG47, 0xfffc},
+ {RXTX_REG48, 0x0},
+ {RXTX_REG49, 0x0},
+ {RXTX_REG50, 0x0},
+ {RXTX_REG51, 0x0},
+ {RXTX_REG52, 0x0},
+ {RXTX_REG53, 0x0},
+ {RXTX_REG54, 0x0},
+ {RXTX_REG55, 0x0},
+ };
+
+ /* Start SUMMER calibration */
+ serdes_setbits(ctx, lane, RXTX_REG127,
+ RXTX_REG127_FORCE_SUM_CAL_START_MASK);
+ /*
+ * As per PHY design spec, the Summer calibration requires a minimum
+ * of 100us to complete.
+ */
+ usleep_range(100, 500);
+ serdes_clrbits(ctx, lane, RXTX_REG127,
+ RXTX_REG127_FORCE_SUM_CAL_START_MASK);
+ /*
+ * As per PHY design spec, the auto calibration requires a minimum
+ * of 100us to complete.
+ */
+ usleep_range(100, 500);
+
+ /* Start latch calibration */
+ serdes_setbits(ctx, lane, RXTX_REG127,
+ RXTX_REG127_FORCE_LAT_CAL_START_MASK);
+ /*
+ * As per PHY design spec, the latch calibration requires a minimum
+ * of 100us to complete.
+ */
+ usleep_range(100, 500);
+ serdes_clrbits(ctx, lane, RXTX_REG127,
+ RXTX_REG127_FORCE_LAT_CAL_START_MASK);
+
+ /* Configure the PHY lane for calibration */
+ serdes_wr(ctx, lane, RXTX_REG28, 0x7);
+ serdes_wr(ctx, lane, RXTX_REG31, 0x7e00);
+ serdes_clrbits(ctx, lane, RXTX_REG4,
+ RXTX_REG4_TX_LOOPBACK_BUF_EN_MASK);
+ serdes_clrbits(ctx, lane, RXTX_REG7,
+ RXTX_REG7_LOOP_BACK_ENA_CTLE_MASK);
+ for (i = 0; i < ARRAY_SIZE(serdes_reg); i++)
+ serdes_wr(ctx, lane, serdes_reg[i].reg,
+ serdes_reg[i].val);
+}
+
+static void xgene_phy_reset_rxd(struct xgene_phy_ctx *ctx, int lane)
+{
+ /* Reset digital Rx */
+ serdes_clrbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK);
+ /* As per PHY design spec, the reset requires a minimum of 100us. */
+ usleep_range(100, 150);
+ serdes_setbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK);
+}
+
+static int xgene_phy_get_avg(int accum, int samples)
+{
+ return (accum + (samples / 2)) / samples;
+}
+
+static void xgene_phy_gen_avg_val(struct xgene_phy_ctx *ctx, int lane)
+{
+ int max_loop = 10;
+ int avg_loop = 0;
+ int lat_do = 0, lat_xo = 0, lat_eo = 0, lat_so = 0;
+ int lat_de = 0, lat_xe = 0, lat_ee = 0, lat_se = 0;
+ int sum_cal = 0;
+ int lat_do_itr, lat_xo_itr, lat_eo_itr, lat_so_itr;
+ int lat_de_itr, lat_xe_itr, lat_ee_itr, lat_se_itr;
+ int sum_cal_itr;
+ int fail_even;
+ int fail_odd;
+ u32 val;
+
+ dev_dbg(ctx->dev, "Generating avg calibration value for lane %d\n",
+ lane);
+
+ /* Enable RX Hi-Z termination */
+ serdes_setbits(ctx, lane, RXTX_REG12,
+ RXTX_REG12_RX_DET_TERM_ENABLE_MASK);
+ /* Turn off DFE */
+ serdes_wr(ctx, lane, RXTX_REG28, 0x0000);
+ /* DFE Presets to zero */
+ serdes_wr(ctx, lane, RXTX_REG31, 0x0000);
+
+ /*
+ * Receiver Offset Calibration:
+ * Calibrate the receiver signal path offset in two steps - summar
+ * and latch calibration.
+ * Runs the "Receiver Offset Calibration multiple times to determine
+ * the average value to use.
+ */
+ while (avg_loop < max_loop) {
+ /* Start the calibration */
+ xgene_phy_force_lat_summer_cal(ctx, lane);
+
+ serdes_rd(ctx, lane, RXTX_REG21, &val);
+ lat_do_itr = RXTX_REG21_DO_LATCH_CALOUT_RD(val);
+ lat_xo_itr = RXTX_REG21_XO_LATCH_CALOUT_RD(val);
+ fail_odd = RXTX_REG21_LATCH_CAL_FAIL_ODD_RD(val);
+
+ serdes_rd(ctx, lane, RXTX_REG22, &val);
+ lat_eo_itr = RXTX_REG22_EO_LATCH_CALOUT_RD(val);
+ lat_so_itr = RXTX_REG22_SO_LATCH_CALOUT_RD(val);
+ fail_even = RXTX_REG22_LATCH_CAL_FAIL_EVEN_RD(val);
+
+ serdes_rd(ctx, lane, RXTX_REG23, &val);
+ lat_de_itr = RXTX_REG23_DE_LATCH_CALOUT_RD(val);
+ lat_xe_itr = RXTX_REG23_XE_LATCH_CALOUT_RD(val);
+
+ serdes_rd(ctx, lane, RXTX_REG24, &val);
+ lat_ee_itr = RXTX_REG24_EE_LATCH_CALOUT_RD(val);
+ lat_se_itr = RXTX_REG24_SE_LATCH_CALOUT_RD(val);
+
+ serdes_rd(ctx, lane, RXTX_REG121, &val);
+ sum_cal_itr = RXTX_REG121_SUMOS_CAL_CODE_RD(val);
+
+ /* Check for failure. If passed, sum them for averaging */
+ if ((fail_even == 0 || fail_even == 1) &&
+ (fail_odd == 0 || fail_odd == 1)) {
+ lat_do += lat_do_itr;
+ lat_xo += lat_xo_itr;
+ lat_eo += lat_eo_itr;
+ lat_so += lat_so_itr;
+ lat_de += lat_de_itr;
+ lat_xe += lat_xe_itr;
+ lat_ee += lat_ee_itr;
+ lat_se += lat_se_itr;
+ sum_cal += sum_cal_itr;
+
+ dev_dbg(ctx->dev, "Iteration %d:\n", avg_loop);
+ dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n",
+ lat_do_itr, lat_xo_itr, lat_eo_itr,
+ lat_so_itr);
+ dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n",
+ lat_de_itr, lat_xe_itr, lat_ee_itr,
+ lat_se_itr);
+ dev_dbg(ctx->dev, "SUM 0x%x\n", sum_cal_itr);
+ ++avg_loop;
+ } else {
+ dev_err(ctx->dev,
+ "Receiver calibration failed at %d loop\n",
+ avg_loop);
+ }
+ xgene_phy_reset_rxd(ctx, lane);
+ }
+
+ /* Update latch manual calibration with average value */
+ serdes_rd(ctx, lane, RXTX_REG127, &val);
+ val = RXTX_REG127_DO_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_do, max_loop));
+ val = RXTX_REG127_XO_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_xo, max_loop));
+ serdes_wr(ctx, lane, RXTX_REG127, val);
+
+ serdes_rd(ctx, lane, RXTX_REG128, &val);
+ val = RXTX_REG128_EO_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_eo, max_loop));
+ val = RXTX_REG128_SO_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_so, max_loop));
+ serdes_wr(ctx, lane, RXTX_REG128, val);
+
+ serdes_rd(ctx, lane, RXTX_REG129, &val);
+ val = RXTX_REG129_DE_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_de, max_loop));
+ val = RXTX_REG129_XE_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_xe, max_loop));
+ serdes_wr(ctx, lane, RXTX_REG129, val);
+
+ serdes_rd(ctx, lane, RXTX_REG130, &val);
+ val = RXTX_REG130_EE_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_ee, max_loop));
+ val = RXTX_REG130_SE_LATCH_MANCAL_SET(val,
+ xgene_phy_get_avg(lat_se, max_loop));
+ serdes_wr(ctx, lane, RXTX_REG130, val);
+
+ /* Update SUMMER calibration with average value */
+ serdes_rd(ctx, lane, RXTX_REG14, &val);
+ val = RXTX_REG14_CLTE_LATCAL_MAN_PROG_SET(val,
+ xgene_phy_get_avg(sum_cal, max_loop));
+ serdes_wr(ctx, lane, RXTX_REG14, val);
+
+ dev_dbg(ctx->dev, "Average Value:\n");
+ dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n",
+ xgene_phy_get_avg(lat_do, max_loop),
+ xgene_phy_get_avg(lat_xo, max_loop),
+ xgene_phy_get_avg(lat_eo, max_loop),
+ xgene_phy_get_avg(lat_so, max_loop));
+ dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n",
+ xgene_phy_get_avg(lat_de, max_loop),
+ xgene_phy_get_avg(lat_xe, max_loop),
+ xgene_phy_get_avg(lat_ee, max_loop),
+ xgene_phy_get_avg(lat_se, max_loop));
+ dev_dbg(ctx->dev, "SUM 0x%x\n",
+ xgene_phy_get_avg(sum_cal, max_loop));
+
+ serdes_rd(ctx, lane, RXTX_REG14, &val);
+ val = RXTX_REG14_CTLE_LATCAL_MAN_ENA_SET(val, 0x1);
+ serdes_wr(ctx, lane, RXTX_REG14, val);
+ dev_dbg(ctx->dev, "Enable Manual Summer calibration\n");
+
+ serdes_rd(ctx, lane, RXTX_REG127, &val);
+ val = RXTX_REG127_LATCH_MAN_CAL_ENA_SET(val, 0x1);
+ dev_dbg(ctx->dev, "Enable Manual Latch calibration\n");
+ serdes_wr(ctx, lane, RXTX_REG127, val);
+
+ /* Disable RX Hi-Z termination */
+ serdes_rd(ctx, lane, RXTX_REG12, &val);
+ val = RXTX_REG12_RX_DET_TERM_ENABLE_SET(val, 0);
+ serdes_wr(ctx, lane, RXTX_REG12, val);
+ /* Turn on DFE */
+ serdes_wr(ctx, lane, RXTX_REG28, 0x0007);
+ /* Set DFE preset */
+ serdes_wr(ctx, lane, RXTX_REG31, 0x7e00);
+}
+
+static int xgene_phy_hw_init(struct phy *phy)
+{
+ struct xgene_phy_ctx *ctx = phy_get_drvdata(phy);
+ int rc;
+ int i;
+
+ rc = xgene_phy_hw_initialize(ctx, CLK_EXT_DIFF, SSC_DISABLE);
+ if (rc) {
+ dev_err(ctx->dev, "PHY initialize failed %d\n", rc);
+ return rc;
+ }
+
+ /* Setup clock properly after PHY configuration */
+ if (!IS_ERR(ctx->clk)) {
+ /* HW requires an toggle of the clock */
+ clk_prepare_enable(ctx->clk);
+ clk_disable_unprepare(ctx->clk);
+ clk_prepare_enable(ctx->clk);
+ }
+
+ /* Compute average value */
+ for (i = 0; i < MAX_LANE; i++)
+ xgene_phy_gen_avg_val(ctx, i);
+
+ dev_dbg(ctx->dev, "PHY initialized\n");
+ return 0;
+}
+
+static const struct phy_ops xgene_phy_ops = {
+ .init = xgene_phy_hw_init,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *xgene_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct xgene_phy_ctx *ctx = dev_get_drvdata(dev);
+
+ if (args->args_count <= 0)
+ return ERR_PTR(-EINVAL);
+ if (args->args[0] < MODE_SATA || args->args[0] >= MODE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ ctx->mode = args->args[0];
+ return ctx->phy;
+}
+
+static void xgene_phy_get_param(struct platform_device *pdev,
+ const char *name, u32 *buffer,
+ int count, u32 *default_val,
+ u32 conv_factor)
+{
+ int i;
+
+ if (!of_property_read_u32_array(pdev->dev.of_node, name, buffer,
+ count)) {
+ for (i = 0; i < count; i++)
+ buffer[i] /= conv_factor;
+ return;
+ }
+ /* Does not exist, load default */
+ for (i = 0; i < count; i++)
+ buffer[i] = default_val[i % 3];
+}
+
+static int xgene_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct xgene_phy_ctx *ctx;
+ struct resource *res;
+ int rc = 0;
+ u32 default_spd[] = DEFAULT_SATA_SPD_SEL;
+ u32 default_txboost_gain[] = DEFAULT_SATA_TXBOOST_GAIN;
+ u32 default_txeye_direction[] = DEFAULT_SATA_TXEYEDIRECTION;
+ u32 default_txeye_tuning[] = DEFAULT_SATA_TXEYETUNING;
+ u32 default_txamp[] = DEFAULT_SATA_TXAMP;
+ u32 default_txcn1[] = DEFAULT_SATA_TXCN1;
+ u32 default_txcn2[] = DEFAULT_SATA_TXCN2;
+ u32 default_txcp1[] = DEFAULT_SATA_TXCP1;
+ int i;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->sds_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->sds_base)) {
+ rc = PTR_ERR(ctx->sds_base);
+ goto error;
+ }
+
+ /* Retrieve optional clock */
+ ctx->clk = clk_get(&pdev->dev, NULL);
+
+ /* Load override paramaters */
+ xgene_phy_get_param(pdev, "apm,tx-eye-tuning",
+ ctx->sata_param.txeyetuning, 6, default_txeye_tuning, 1);
+ xgene_phy_get_param(pdev, "apm,tx-eye-direction",
+ ctx->sata_param.txeyedirection, 6, default_txeye_direction, 1);
+ xgene_phy_get_param(pdev, "apm,tx-boost-gain",
+ ctx->sata_param.txboostgain, 6, default_txboost_gain, 1);
+ xgene_phy_get_param(pdev, "apm,tx-amplitude",
+ ctx->sata_param.txamplitude, 6, default_txamp, 13300);
+ xgene_phy_get_param(pdev, "apm,tx-pre-cursor1",
+ ctx->sata_param.txprecursor_cn1, 6, default_txcn1, 18200);
+ xgene_phy_get_param(pdev, "apm,tx-pre-cursor2",
+ ctx->sata_param.txprecursor_cn2, 6, default_txcn2, 18200);
+ xgene_phy_get_param(pdev, "apm,tx-post-cursor",
+ ctx->sata_param.txpostcursor_cp1, 6, default_txcp1, 18200);
+ xgene_phy_get_param(pdev, "apm,tx-speed",
+ ctx->sata_param.txspeed, 3, default_spd, 1);
+ for (i = 0; i < MAX_LANE; i++)
+ ctx->sata_param.speed[i] = 2; /* Default to Gen3 */
+
+ ctx->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ctx);
+
+ ctx->phy = devm_phy_create(ctx->dev, &xgene_phy_ops, NULL);
+ if (IS_ERR(ctx->phy)) {
+ dev_dbg(&pdev->dev, "Failed to create PHY\n");
+ rc = PTR_ERR(ctx->phy);
+ goto error;
+ }
+ phy_set_drvdata(ctx->phy, ctx);
+
+ phy_provider = devm_of_phy_provider_register(ctx->dev,
+ xgene_phy_xlate);
+ if (IS_ERR(phy_provider)) {
+ rc = PTR_ERR(phy_provider);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ return rc;
+}
+
+static const struct of_device_id xgene_phy_of_match[] = {
+ {.compatible = "apm,xgene-phy",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_phy_of_match);
+
+static struct platform_driver xgene_phy_driver = {
+ .probe = xgene_phy_probe,
+ .driver = {
+ .name = "xgene-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = xgene_phy_of_match,
+ },
+};
+module_platform_driver(xgene_phy_driver);
+
+MODULE_DESCRIPTION("APM X-Gene Multi-Purpose PHY driver");
+MODULE_AUTHOR("Loc Ho <lho@apm.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index be361b7cd30f..06cee0189f3e 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -217,14 +217,14 @@ config PINCTRL_IMX28
select PINCTRL_MXS
config PINCTRL_MSM
- tristate
+ bool
select PINMUX
select PINCONF
select GENERIC_PINCONF
config PINCTRL_MSM8X74
tristate "Qualcomm 8x74 pin controller driver"
- depends on GPIOLIB && OF && OF_IRQ
+ depends on GPIOLIB && OF
select PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5ee61a470016..c0fe6091566a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -851,7 +851,9 @@ static struct pinctrl *create_pinctrl(struct device *dev)
kref_init(&p->users);
/* Add the pinctrl handle to the global list */
+ mutex_lock(&pinctrl_list_mutex);
list_add_tail(&p->node, &pinctrl_list);
+ mutex_unlock(&pinctrl_list_mutex);
return p;
}
@@ -1642,8 +1644,10 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
device_root, pctldev, &pinctrl_groups_ops);
debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
device_root, pctldev, &pinctrl_gpioranges_ops);
- pinmux_init_device_debugfs(device_root, pctldev);
- pinconf_init_device_debugfs(device_root, pctldev);
+ if (pctldev->desc->pmxops)
+ pinmux_init_device_debugfs(device_root, pctldev);
+ if (pctldev->desc->confops)
+ pinconf_init_device_debugfs(device_root, pctldev);
}
static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 340fb4e6c600..eda13de2e7c0 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -186,7 +186,9 @@ int pinctrl_dt_to_map(struct pinctrl *p)
/* CONFIG_OF enabled, p->dev not instantiated from DT */
if (!np) {
- dev_dbg(p->dev, "no of_node; not parsing pinctrl DT\n");
+ if (of_have_populated_dt())
+ dev_dbg(p->dev,
+ "no of_node; not parsing pinctrl DT\n");
return 0;
}
diff --git a/drivers/pinctrl/mvebu/Kconfig b/drivers/pinctrl/mvebu/Kconfig
index 366fa541ee91..cc298fade93a 100644
--- a/drivers/pinctrl/mvebu/Kconfig
+++ b/drivers/pinctrl/mvebu/Kconfig
@@ -8,6 +8,7 @@ config PINCTRL_MVEBU
config PINCTRL_DOVE
bool
select PINCTRL_MVEBU
+ select MFD_SYSCON
config PINCTRL_KIRKWOOD
bool
@@ -17,6 +18,14 @@ config PINCTRL_ARMADA_370
bool
select PINCTRL_MVEBU
+config PINCTRL_ARMADA_375
+ bool
+ select PINCTRL_MVEBU
+
+config PINCTRL_ARMADA_38X
+ bool
+ select PINCTRL_MVEBU
+
config PINCTRL_ARMADA_XP
bool
select PINCTRL_MVEBU
diff --git a/drivers/pinctrl/mvebu/Makefile b/drivers/pinctrl/mvebu/Makefile
index 37c253297af0..bc1b9f14f539 100644
--- a/drivers/pinctrl/mvebu/Makefile
+++ b/drivers/pinctrl/mvebu/Makefile
@@ -2,4 +2,6 @@ obj-$(CONFIG_PINCTRL_MVEBU) += pinctrl-mvebu.o
obj-$(CONFIG_PINCTRL_DOVE) += pinctrl-dove.o
obj-$(CONFIG_PINCTRL_KIRKWOOD) += pinctrl-kirkwood.o
obj-$(CONFIG_PINCTRL_ARMADA_370) += pinctrl-armada-370.o
+obj-$(CONFIG_PINCTRL_ARMADA_375) += pinctrl-armada-375.o
+obj-$(CONFIG_PINCTRL_ARMADA_38X) += pinctrl-armada-38x.o
obj-$(CONFIG_PINCTRL_ARMADA_XP) += pinctrl-armada-xp.o
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index ae1f760cbdd2..670e5b01c678 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -23,6 +23,18 @@
#include "pinctrl-mvebu.h"
+static void __iomem *mpp_base;
+
+static int armada_370_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_370_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
MPP_MODE(0,
MPP_FUNCTION(0x0, "gpio", NULL),
@@ -373,7 +385,7 @@ static struct of_device_id armada_370_pinctrl_of_match[] = {
};
static struct mvebu_mpp_ctrl mv88f6710_mpp_controls[] = {
- MPP_REG_CTRL(0, 65),
+ MPP_FUNC_CTRL(0, 65, NULL, armada_370_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
@@ -385,6 +397,12 @@ static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
static int armada_370_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_370_pinctrl_info;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
soc->variant = 0; /* no variants for Armada 370 */
soc->controls = mv88f6710_mpp_controls;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
new file mode 100644
index 000000000000..db078fe7ace6
--- /dev/null
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
@@ -0,0 +1,459 @@
+/*
+ * Marvell Armada 375 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+static void __iomem *mpp_base;
+
+static int armada_375_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_375_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
+static struct mvebu_mpp_mode mv88f6720_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad2"),
+ MPP_FUNCTION(0x2, "spi0", "cs1"),
+ MPP_FUNCTION(0x3, "spi1", "cs1"),
+ MPP_FUNCTION(0x5, "nand", "io2")),
+ MPP_MODE(1,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad3"),
+ MPP_FUNCTION(0x2, "spi0", "mosi"),
+ MPP_FUNCTION(0x3, "spi1", "mosi"),
+ MPP_FUNCTION(0x5, "nand", "io3")),
+ MPP_MODE(2,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad4"),
+ MPP_FUNCTION(0x2, "ptp", "eventreq"),
+ MPP_FUNCTION(0x3, "led", "c0"),
+ MPP_FUNCTION(0x4, "audio", "sdi"),
+ MPP_FUNCTION(0x5, "nand", "io4"),
+ MPP_FUNCTION(0x6, "spi1", "mosi")),
+ MPP_MODE(3,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad5"),
+ MPP_FUNCTION(0x2, "ptp", "triggen"),
+ MPP_FUNCTION(0x3, "led", "p3"),
+ MPP_FUNCTION(0x4, "audio", "mclk"),
+ MPP_FUNCTION(0x5, "nand", "io5"),
+ MPP_FUNCTION(0x6, "spi1", "miso")),
+ MPP_MODE(4,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad6"),
+ MPP_FUNCTION(0x2, "spi0", "miso"),
+ MPP_FUNCTION(0x3, "spi1", "miso"),
+ MPP_FUNCTION(0x5, "nand", "io6")),
+ MPP_MODE(5,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad7"),
+ MPP_FUNCTION(0x2, "spi0", "cs2"),
+ MPP_FUNCTION(0x3, "spi1", "cs2"),
+ MPP_FUNCTION(0x5, "nand", "io7"),
+ MPP_FUNCTION(0x6, "spi1", "miso")),
+ MPP_MODE(6,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad0"),
+ MPP_FUNCTION(0x3, "led", "p1"),
+ MPP_FUNCTION(0x4, "audio", "rclk"),
+ MPP_FUNCTION(0x5, "nand", "io0")),
+ MPP_MODE(7,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad1"),
+ MPP_FUNCTION(0x2, "ptp", "clk"),
+ MPP_FUNCTION(0x3, "led", "p2"),
+ MPP_FUNCTION(0x4, "audio", "extclk"),
+ MPP_FUNCTION(0x5, "nand", "io1")),
+ MPP_MODE(8,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev ", "bootcs"),
+ MPP_FUNCTION(0x2, "spi0", "cs0"),
+ MPP_FUNCTION(0x3, "spi1", "cs0"),
+ MPP_FUNCTION(0x5, "nand", "ce")),
+ MPP_MODE(9,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "nf", "wen"),
+ MPP_FUNCTION(0x2, "spi0", "sck"),
+ MPP_FUNCTION(0x3, "spi1", "sck"),
+ MPP_FUNCTION(0x5, "nand", "we")),
+ MPP_MODE(10,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "nf", "ren"),
+ MPP_FUNCTION(0x2, "dram", "vttctrl"),
+ MPP_FUNCTION(0x3, "led", "c1"),
+ MPP_FUNCTION(0x5, "nand", "re"),
+ MPP_FUNCTION(0x6, "spi1", "sck")),
+ MPP_MODE(11,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "a0"),
+ MPP_FUNCTION(0x3, "led", "c2"),
+ MPP_FUNCTION(0x4, "audio", "sdo"),
+ MPP_FUNCTION(0x5, "nand", "cle")),
+ MPP_MODE(12,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "a1"),
+ MPP_FUNCTION(0x4, "audio", "bclk"),
+ MPP_FUNCTION(0x5, "nand", "ale")),
+ MPP_MODE(13,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "readyn"),
+ MPP_FUNCTION(0x2, "pcie0", "rstoutn"),
+ MPP_FUNCTION(0x3, "pcie1", "rstoutn"),
+ MPP_FUNCTION(0x5, "nand", "rb"),
+ MPP_FUNCTION(0x6, "spi1", "mosi")),
+ MPP_MODE(14,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "i2c0", "sda"),
+ MPP_FUNCTION(0x3, "uart1", "txd")),
+ MPP_MODE(15,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "i2c0", "sck"),
+ MPP_FUNCTION(0x3, "uart1", "rxd")),
+ MPP_MODE(16,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "uart0", "txd")),
+ MPP_MODE(17,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "uart0", "rxd")),
+ MPP_MODE(18,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "intn")),
+ MPP_MODE(19,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "rstn")),
+ MPP_MODE(20,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "pclk")),
+ MPP_MODE(21,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "fsync")),
+ MPP_MODE(22,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "drx")),
+ MPP_MODE(23,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "tdm", "dtx")),
+ MPP_MODE(24,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p0"),
+ MPP_FUNCTION(0x2, "ge1", "rxd0"),
+ MPP_FUNCTION(0x3, "sd", "cmd"),
+ MPP_FUNCTION(0x4, "uart0", "rts"),
+ MPP_FUNCTION(0x5, "spi0", "cs0"),
+ MPP_FUNCTION(0x6, "dev", "cs1")),
+ MPP_MODE(25,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p2"),
+ MPP_FUNCTION(0x2, "ge1", "rxd1"),
+ MPP_FUNCTION(0x3, "sd", "d0"),
+ MPP_FUNCTION(0x4, "uart0", "cts"),
+ MPP_FUNCTION(0x5, "spi0", "mosi"),
+ MPP_FUNCTION(0x6, "dev", "cs2")),
+ MPP_MODE(26,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+ MPP_FUNCTION(0x2, "ge1", "rxd2"),
+ MPP_FUNCTION(0x3, "sd", "d2"),
+ MPP_FUNCTION(0x4, "uart1", "rts"),
+ MPP_FUNCTION(0x5, "spi0", "cs1"),
+ MPP_FUNCTION(0x6, "led", "c1")),
+ MPP_MODE(27,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+ MPP_FUNCTION(0x2, "ge1", "rxd3"),
+ MPP_FUNCTION(0x3, "sd", "d1"),
+ MPP_FUNCTION(0x4, "uart1", "cts"),
+ MPP_FUNCTION(0x5, "spi0", "miso"),
+ MPP_FUNCTION(0x6, "led", "c2")),
+ MPP_MODE(28,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p3"),
+ MPP_FUNCTION(0x2, "ge1", "txctl"),
+ MPP_FUNCTION(0x3, "sd", "clk"),
+ MPP_FUNCTION(0x5, "dram", "vttctrl")),
+ MPP_MODE(29,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+ MPP_FUNCTION(0x2, "ge1", "rxclk"),
+ MPP_FUNCTION(0x3, "sd", "d3"),
+ MPP_FUNCTION(0x5, "spi0", "sck"),
+ MPP_FUNCTION(0x6, "pcie0", "rstoutn")),
+ MPP_MODE(30,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "txd0"),
+ MPP_FUNCTION(0x3, "spi1", "cs0"),
+ MPP_FUNCTION(0x5, "led", "p3"),
+ MPP_FUNCTION(0x6, "ptp", "eventreq")),
+ MPP_MODE(31,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "txd1"),
+ MPP_FUNCTION(0x3, "spi1", "mosi"),
+ MPP_FUNCTION(0x5, "led", "p0")),
+ MPP_MODE(32,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "txd2"),
+ MPP_FUNCTION(0x3, "spi1", "sck"),
+ MPP_FUNCTION(0x4, "ptp", "triggen"),
+ MPP_FUNCTION(0x5, "led", "c0")),
+ MPP_MODE(33,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "txd3"),
+ MPP_FUNCTION(0x3, "spi1", "miso"),
+ MPP_FUNCTION(0x5, "led", "p2")),
+ MPP_MODE(34,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "txclkout"),
+ MPP_FUNCTION(0x3, "spi1", "sck"),
+ MPP_FUNCTION(0x5, "led", "c1")),
+ MPP_MODE(35,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge1", "rxctl"),
+ MPP_FUNCTION(0x3, "spi1", "cs1"),
+ MPP_FUNCTION(0x4, "spi0", "cs2"),
+ MPP_FUNCTION(0x5, "led", "p1")),
+ MPP_MODE(36,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+ MPP_FUNCTION(0x5, "led", "c2")),
+ MPP_MODE(37,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+ MPP_FUNCTION(0x2, "tdm", "intn"),
+ MPP_FUNCTION(0x4, "ge", "mdc")),
+ MPP_MODE(38,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+ MPP_FUNCTION(0x4, "ge", "mdio")),
+ MPP_MODE(39,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "ref", "clkout"),
+ MPP_FUNCTION(0x5, "led", "p3")),
+ MPP_MODE(40,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "uart1", "txd"),
+ MPP_FUNCTION(0x5, "led", "p0")),
+ MPP_MODE(41,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "uart1", "rxd"),
+ MPP_FUNCTION(0x5, "led", "p1")),
+ MPP_MODE(42,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x3, "spi1", "cs2"),
+ MPP_FUNCTION(0x4, "led", "c0"),
+ MPP_FUNCTION(0x6, "ptp", "clk")),
+ MPP_MODE(43,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "sata0", "prsnt"),
+ MPP_FUNCTION(0x4, "dram", "vttctrl"),
+ MPP_FUNCTION(0x5, "led", "c1")),
+ MPP_MODE(44,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "sata0", "prsnt")),
+ MPP_MODE(45,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "spi0", "cs2"),
+ MPP_FUNCTION(0x4, "pcie0", "rstoutn"),
+ MPP_FUNCTION(0x5, "led", "c2"),
+ MPP_FUNCTION(0x6, "spi1", "cs2")),
+ MPP_MODE(46,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p0"),
+ MPP_FUNCTION(0x2, "ge0", "txd0"),
+ MPP_FUNCTION(0x3, "ge1", "txd0"),
+ MPP_FUNCTION(0x6, "dev", "wen1")),
+ MPP_MODE(47,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p1"),
+ MPP_FUNCTION(0x2, "ge0", "txd1"),
+ MPP_FUNCTION(0x3, "ge1", "txd1"),
+ MPP_FUNCTION(0x5, "ptp", "triggen"),
+ MPP_FUNCTION(0x6, "dev", "ale0")),
+ MPP_MODE(48,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p2"),
+ MPP_FUNCTION(0x2, "ge0", "txd2"),
+ MPP_FUNCTION(0x3, "ge1", "txd2"),
+ MPP_FUNCTION(0x6, "dev", "ale1")),
+ MPP_MODE(49,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "p3"),
+ MPP_FUNCTION(0x2, "ge0", "txd3"),
+ MPP_FUNCTION(0x3, "ge1", "txd3"),
+ MPP_FUNCTION(0x6, "dev", "a2")),
+ MPP_MODE(50,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "c0"),
+ MPP_FUNCTION(0x2, "ge0", "rxd0"),
+ MPP_FUNCTION(0x3, "ge1", "rxd0"),
+ MPP_FUNCTION(0x5, "ptp", "eventreq"),
+ MPP_FUNCTION(0x6, "dev", "ad12")),
+ MPP_MODE(51,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "c1"),
+ MPP_FUNCTION(0x2, "ge0", "rxd1"),
+ MPP_FUNCTION(0x3, "ge1", "rxd1"),
+ MPP_FUNCTION(0x6, "dev", "ad8")),
+ MPP_MODE(52,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "led", "c2"),
+ MPP_FUNCTION(0x2, "ge0", "rxd2"),
+ MPP_FUNCTION(0x3, "ge1", "rxd2"),
+ MPP_FUNCTION(0x5, "i2c0", "sda"),
+ MPP_FUNCTION(0x6, "dev", "ad9")),
+ MPP_MODE(53,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie1", "rstoutn"),
+ MPP_FUNCTION(0x2, "ge0", "rxd3"),
+ MPP_FUNCTION(0x3, "ge1", "rxd3"),
+ MPP_FUNCTION(0x5, "i2c0", "sck"),
+ MPP_FUNCTION(0x6, "dev", "ad10")),
+ MPP_MODE(54,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "pcie0", "rstoutn"),
+ MPP_FUNCTION(0x2, "ge0", "rxctl"),
+ MPP_FUNCTION(0x3, "ge1", "rxctl"),
+ MPP_FUNCTION(0x6, "dev", "ad11")),
+ MPP_MODE(55,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge0", "rxclk"),
+ MPP_FUNCTION(0x3, "ge1", "rxclk"),
+ MPP_FUNCTION(0x6, "dev", "cs0")),
+ MPP_MODE(56,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge0", "txclkout"),
+ MPP_FUNCTION(0x3, "ge1", "txclkout"),
+ MPP_FUNCTION(0x6, "dev", "oe")),
+ MPP_MODE(57,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ge0", "txctl"),
+ MPP_FUNCTION(0x3, "ge1", "txctl"),
+ MPP_FUNCTION(0x6, "dev", "wen0")),
+ MPP_MODE(58,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "led", "c0")),
+ MPP_MODE(59,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x4, "led", "c1")),
+ MPP_MODE(60,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "uart1", "txd"),
+ MPP_FUNCTION(0x4, "led", "c2"),
+ MPP_FUNCTION(0x6, "dev", "ad13")),
+ MPP_MODE(61,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "i2c1", "sda"),
+ MPP_FUNCTION(0x2, "uart1", "rxd"),
+ MPP_FUNCTION(0x3, "spi1", "cs2"),
+ MPP_FUNCTION(0x4, "led", "p0"),
+ MPP_FUNCTION(0x6, "dev", "ad14")),
+ MPP_MODE(62,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "i2c1", "sck"),
+ MPP_FUNCTION(0x4, "led", "p1"),
+ MPP_FUNCTION(0x6, "dev", "ad15")),
+ MPP_MODE(63,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ptp", "triggen"),
+ MPP_FUNCTION(0x4, "led", "p2"),
+ MPP_FUNCTION(0x6, "dev", "burst")),
+ MPP_MODE(64,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "dram", "vttctrl"),
+ MPP_FUNCTION(0x4, "led", "p3")),
+ MPP_MODE(65,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "sata1", "prsnt")),
+ MPP_MODE(66,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x2, "ptp", "eventreq"),
+ MPP_FUNCTION(0x4, "spi1", "cs3"),
+ MPP_FUNCTION(0x5, "pcie0", "rstoutn"),
+ MPP_FUNCTION(0x6, "dev", "cs3")),
+};
+
+static struct mvebu_pinctrl_soc_info armada_375_pinctrl_info;
+
+static struct of_device_id armada_375_pinctrl_of_match[] = {
+ { .compatible = "marvell,mv88f6720-pinctrl" },
+ { },
+};
+
+static struct mvebu_mpp_ctrl mv88f6720_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 69, NULL, armada_375_mpp_ctrl),
+};
+
+static struct pinctrl_gpio_range mv88f6720_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 32),
+ MPP_GPIO_RANGE(2, 64, 64, 3),
+};
+
+static int armada_375_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = &armada_375_pinctrl_info;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
+
+ soc->variant = 0; /* no variants for Armada 375 */
+ soc->controls = mv88f6720_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(mv88f6720_mpp_controls);
+ soc->modes = mv88f6720_mpp_modes;
+ soc->nmodes = ARRAY_SIZE(mv88f6720_mpp_modes);
+ soc->gpioranges = mv88f6720_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(mv88f6720_mpp_gpio_ranges);
+
+ pdev->dev.platform_data = soc;
+
+ return mvebu_pinctrl_probe(pdev);
+}
+
+static int armada_375_pinctrl_remove(struct platform_device *pdev)
+{
+ return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver armada_375_pinctrl_driver = {
+ .driver = {
+ .name = "armada-375-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(armada_375_pinctrl_of_match),
+ },
+ .probe = armada_375_pinctrl_probe,
+ .remove = armada_375_pinctrl_remove,
+};
+
+module_platform_driver(armada_375_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 375 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
new file mode 100644
index 000000000000..1049f82fb62f
--- /dev/null
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
@@ -0,0 +1,462 @@
+/*
+ * Marvell Armada 380/385 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+static void __iomem *mpp_base;
+
+static int armada_38x_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_38x_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
+enum {
+ V_88F6810 = BIT(0),
+ V_88F6820 = BIT(1),
+ V_88F6828 = BIT(2),
+ V_88F6810_PLUS = (V_88F6810 | V_88F6820 | V_88F6828),
+ V_88F6820_PLUS = (V_88F6820 | V_88F6828),
+};
+
+static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua0", "rxd", V_88F6810_PLUS)),
+ MPP_MODE(1,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua0", "txd", V_88F6810_PLUS)),
+ MPP_MODE(2,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c0", "sck", V_88F6810_PLUS)),
+ MPP_MODE(3,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c0", "sda", V_88F6810_PLUS)),
+ MPP_MODE(4,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge", "mdc", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ua1", "txd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "rts", V_88F6810_PLUS)),
+ MPP_MODE(5,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge", "mdio", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ua1", "rxd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "cts", V_88F6810_PLUS)),
+ MPP_MODE(6,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txclkout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge0", "crs", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs3", V_88F6810_PLUS)),
+ MPP_MODE(7,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txd0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad9", V_88F6810_PLUS)),
+ MPP_MODE(8,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txd1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad10", V_88F6810_PLUS)),
+ MPP_MODE(9,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txd2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad11", V_88F6810_PLUS)),
+ MPP_MODE(10,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txd3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad12", V_88F6810_PLUS)),
+ MPP_MODE(11,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txctl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad13", V_88F6810_PLUS)),
+ MPP_MODE(12,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxd0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "cs1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad14", V_88F6810_PLUS)),
+ MPP_MODE(13,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxd1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie0", "clkreq", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "clkreq", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "cs2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad15", V_88F6810_PLUS)),
+ MPP_MODE(14,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxd2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ptp", "clk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "vtt_ctrl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "cs3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "wen1", V_88F6810_PLUS)),
+ MPP_MODE(15,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxd3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge", "mdc slave", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "mosi", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "pcie1", "rstout", V_88F6820_PLUS)),
+ MPP_MODE(16,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxctl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge", "mdio slave", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "decc_err", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "miso", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "pcie0", "clkreq", V_88F6810_PLUS)),
+ MPP_MODE(17,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ptp", "clk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "rxd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sata1", "prsnt", V_88F6810_PLUS)),
+ MPP_MODE(18,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "rxerr", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ptp", "trig_gen", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "txd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi0", "cs0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "pcie1", "rstout", V_88F6820_PLUS)),
+ MPP_MODE(19,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "col", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ptp", "event_req", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie0", "clkreq", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sata1", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "ua0", "cts", V_88F6810_PLUS)),
+ MPP_MODE(20,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ge0", "txclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ptp", "clk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "ua0", "rts", V_88F6810_PLUS)),
+ MPP_MODE(21,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxd0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "cmd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "bootcs", V_88F6810_PLUS)),
+ MPP_MODE(22,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "mosi", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad0", V_88F6810_PLUS)),
+ MPP_MODE(23,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad2", V_88F6810_PLUS)),
+ MPP_MODE(24,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "miso", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ua0", "cts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "rxd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d4", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ready", V_88F6810_PLUS)),
+ MPP_MODE(25,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ua0", "rts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua1", "txd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d5", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs0", V_88F6810_PLUS)),
+ MPP_MODE(26,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "i2c1", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d6", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs1", V_88F6810_PLUS)),
+ MPP_MODE(27,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "spi0", "cs3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txclkout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "i2c1", "sda", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d7", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "cs2", V_88F6810_PLUS)),
+ MPP_MODE(28,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txd0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "clk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad5", V_88F6810_PLUS)),
+ MPP_MODE(29,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txd1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ale0", V_88F6810_PLUS)),
+ MPP_MODE(30,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txd2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "oen", V_88F6810_PLUS)),
+ MPP_MODE(31,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txd3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ale1", V_88F6810_PLUS)),
+ MPP_MODE(32,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "txctl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "wen0", V_88F6810_PLUS)),
+ MPP_MODE(33,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "m", "decc_err", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad3", V_88F6810_PLUS)),
+ MPP_MODE(34,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad1", V_88F6810_PLUS)),
+ MPP_MODE(35,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ref", "clk_out1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "a1", V_88F6810_PLUS)),
+ MPP_MODE(36,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ptp", "trig_gen", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "a0", V_88F6810_PLUS)),
+ MPP_MODE(37,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ptp", "clk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad8", V_88F6810_PLUS)),
+ MPP_MODE(38,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ptp", "event_req", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxd1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ref", "clk_out0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad4", V_88F6810_PLUS)),
+ MPP_MODE(39,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c1", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxd2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "cts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "a2", V_88F6810_PLUS)),
+ MPP_MODE(40,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "i2c1", "sda", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxd3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "rts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "sd0", "d2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad6", V_88F6810_PLUS)),
+ MPP_MODE(41,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "rxd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge1", "rxctl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "cts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "cs3", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "burst/last", V_88F6810_PLUS)),
+ MPP_MODE(42,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "txd", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "ua0", "rts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "ad7", V_88F6810_PLUS)),
+ MPP_MODE(43,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "clkreq", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "m", "vtt_ctrl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "decc_err", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "dev", "clkout", V_88F6810_PLUS)),
+ MPP_MODE(44,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "sata1", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "sata2", "prsnt", V_88F6828),
+ MPP_VAR_FUNCTION(4, "sata3", "prsnt", V_88F6828),
+ MPP_VAR_FUNCTION(5, "pcie0", "rstout", V_88F6810_PLUS)),
+ MPP_MODE(45,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ref", "clk_out0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "pcie2", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "pcie3", "rstout", V_88F6810_PLUS)),
+ MPP_MODE(46,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ref", "clk_out1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "pcie2", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "pcie3", "rstout", V_88F6810_PLUS)),
+ MPP_MODE(47,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "sata1", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "sata2", "prsnt", V_88F6828),
+ MPP_VAR_FUNCTION(4, "spi1", "cs2", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sata3", "prsnt", V_88F6828)),
+ MPP_MODE(48,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "m", "vtt_ctrl", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm2c", "pclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "mclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d4", V_88F6810_PLUS)),
+ MPP_MODE(49,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata2", "prsnt", V_88F6828),
+ MPP_VAR_FUNCTION(2, "sata3", "prsnt", V_88F6828),
+ MPP_VAR_FUNCTION(3, "tdm2c", "fsync", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "lrclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d5", V_88F6810_PLUS)),
+ MPP_MODE(50,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm2c", "drx", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "extclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "cmd", V_88F6810_PLUS)),
+ MPP_MODE(51,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm2c", "dtx", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "sdo", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "m", "decc_err", V_88F6810_PLUS)),
+ MPP_MODE(52,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm2c", "intn", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "sdi", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d6", V_88F6810_PLUS)),
+ MPP_MODE(53,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata1", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "tdm2c", "rstn", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "audio", "bclk", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d7", V_88F6810_PLUS)),
+ MPP_MODE(54,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "sata0", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "sata1", "prsnt", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d3", V_88F6810_PLUS)),
+ MPP_MODE(55,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "cts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge", "mdio", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "clkreq", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "cs1", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d0", V_88F6810_PLUS)),
+ MPP_MODE(56,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "ua1", "rts", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "ge", "mdc", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "m", "decc_err", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "mosi", V_88F6810_PLUS)),
+ MPP_MODE(57,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "clk", V_88F6810_PLUS)),
+ MPP_MODE(58,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie1", "clkreq", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(2, "i2c1", "sck", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie2", "clkreq", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "miso", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d1", V_88F6810_PLUS)),
+ MPP_MODE(59,
+ MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(2, "i2c1", "sda", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
+ MPP_VAR_FUNCTION(4, "spi1", "cs0", V_88F6810_PLUS),
+ MPP_VAR_FUNCTION(5, "sd0", "d2", V_88F6810_PLUS)),
+};
+
+static struct mvebu_pinctrl_soc_info armada_38x_pinctrl_info;
+
+static struct of_device_id armada_38x_pinctrl_of_match[] = {
+ {
+ .compatible = "marvell,mv88f6810-pinctrl",
+ .data = (void *) V_88F6810,
+ },
+ {
+ .compatible = "marvell,mv88f6820-pinctrl",
+ .data = (void *) V_88F6820,
+ },
+ {
+ .compatible = "marvell,mv88f6828-pinctrl",
+ .data = (void *) V_88F6828,
+ },
+ { },
+};
+
+static struct mvebu_mpp_ctrl armada_38x_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 59, NULL, armada_38x_mpp_ctrl),
+};
+
+static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 27),
+};
+
+static int armada_38x_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = &armada_38x_pinctrl_info;
+ const struct of_device_id *match =
+ of_match_device(armada_38x_pinctrl_of_match, &pdev->dev);
+ struct resource *res;
+
+ if (!match)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
+
+ soc->variant = (unsigned) match->data & 0xff;
+ soc->controls = armada_38x_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(armada_38x_mpp_controls);
+ soc->gpioranges = armada_38x_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(armada_38x_mpp_gpio_ranges);
+ soc->modes = armada_38x_mpp_modes;
+ soc->nmodes = armada_38x_mpp_controls[0].npins;
+
+ pdev->dev.platform_data = soc;
+
+ return mvebu_pinctrl_probe(pdev);
+}
+
+static int armada_38x_pinctrl_remove(struct platform_device *pdev)
+{
+ return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver armada_38x_pinctrl_driver = {
+ .driver = {
+ .name = "armada-38x-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(armada_38x_pinctrl_of_match),
+ },
+ .probe = armada_38x_pinctrl_probe,
+ .remove = armada_38x_pinctrl_remove,
+};
+
+module_platform_driver(armada_38x_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 38x pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index 843a51f9d129..de311129f7a0 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -33,6 +33,18 @@
#include "pinctrl-mvebu.h"
+static void __iomem *mpp_base;
+
+static int armada_xp_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_xp_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
enum armada_xp_variant {
V_MV78230 = BIT(0),
V_MV78260 = BIT(1),
@@ -366,7 +378,7 @@ static struct of_device_id armada_xp_pinctrl_of_match[] = {
};
static struct mvebu_mpp_ctrl mv78230_mpp_controls[] = {
- MPP_REG_CTRL(0, 48),
+ MPP_FUNC_CTRL(0, 48, NULL, armada_xp_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = {
@@ -375,7 +387,7 @@ static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = {
};
static struct mvebu_mpp_ctrl mv78260_mpp_controls[] = {
- MPP_REG_CTRL(0, 66),
+ MPP_FUNC_CTRL(0, 66, NULL, armada_xp_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = {
@@ -385,7 +397,7 @@ static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = {
};
static struct mvebu_mpp_ctrl mv78460_mpp_controls[] = {
- MPP_REG_CTRL(0, 66),
+ MPP_FUNC_CTRL(0, 66, NULL, armada_xp_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78460_mpp_gpio_ranges[] = {
@@ -399,10 +411,16 @@ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info;
const struct of_device_id *match =
of_match_device(armada_xp_pinctrl_of_match, &pdev->dev);
+ struct resource *res;
if (!match)
return -ENODEV;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
+
soc->variant = (unsigned) match->data & 0xff;
switch (soc->variant) {
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 47268393af34..3b022178a566 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -18,107 +18,122 @@
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
#include "pinctrl-mvebu.h"
-#define DOVE_SB_REGS_VIRT_BASE IOMEM(0xfde00000)
-#define DOVE_MPP_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xd0200)
-#define DOVE_PMU_MPP_GENERAL_CTRL (DOVE_MPP_VIRT_BASE + 0x10)
-#define DOVE_AU0_AC97_SEL BIT(16)
-#define DOVE_PMU_SIGNAL_SELECT_0 (DOVE_SB_REGS_VIRT_BASE + 0xd802C)
-#define DOVE_PMU_SIGNAL_SELECT_1 (DOVE_SB_REGS_VIRT_BASE + 0xd8030)
-#define DOVE_GLOBAL_CONFIG_1 (DOVE_SB_REGS_VIRT_BASE + 0xe802C)
-#define DOVE_GLOBAL_CONFIG_1 (DOVE_SB_REGS_VIRT_BASE + 0xe802C)
-#define DOVE_TWSI_ENABLE_OPTION1 BIT(7)
-#define DOVE_GLOBAL_CONFIG_2 (DOVE_SB_REGS_VIRT_BASE + 0xe8030)
-#define DOVE_TWSI_ENABLE_OPTION2 BIT(20)
-#define DOVE_TWSI_ENABLE_OPTION3 BIT(21)
-#define DOVE_TWSI_OPTION3_GPIO BIT(22)
-#define DOVE_SSP_CTRL_STATUS_1 (DOVE_SB_REGS_VIRT_BASE + 0xe8034)
-#define DOVE_SSP_ON_AU1 BIT(0)
-#define DOVE_MPP_GENERAL_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xe803c)
-#define DOVE_AU1_SPDIFO_GPIO_EN BIT(1)
-#define DOVE_NAND_GPIO_EN BIT(0)
-#define DOVE_GPIO_LO_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xd0400)
-#define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_LO_VIRT_BASE + 0x40)
-#define DOVE_SPI_GPIO_SEL BIT(5)
-#define DOVE_UART1_GPIO_SEL BIT(4)
-#define DOVE_AU1_GPIO_SEL BIT(3)
-#define DOVE_CAM_GPIO_SEL BIT(2)
-#define DOVE_SD1_GPIO_SEL BIT(1)
-#define DOVE_SD0_GPIO_SEL BIT(0)
-
-#define MPPS_PER_REG 8
-#define MPP_BITS 4
-#define MPP_MASK 0xf
+/* Internal registers can be configured at any 1 MiB aligned address */
+#define INT_REGS_MASK ~(SZ_1M - 1)
+#define MPP4_REGS_OFFS 0xd0440
+#define PMU_REGS_OFFS 0xd802c
+#define GC_REGS_OFFS 0xe802c
+
+/* MPP Base registers */
+#define PMU_MPP_GENERAL_CTRL 0x10
+#define AU0_AC97_SEL BIT(16)
+
+/* MPP Control 4 register */
+#define SPI_GPIO_SEL BIT(5)
+#define UART1_GPIO_SEL BIT(4)
+#define AU1_GPIO_SEL BIT(3)
+#define CAM_GPIO_SEL BIT(2)
+#define SD1_GPIO_SEL BIT(1)
+#define SD0_GPIO_SEL BIT(0)
+
+/* PMU Signal Select registers */
+#define PMU_SIGNAL_SELECT_0 0x00
+#define PMU_SIGNAL_SELECT_1 0x04
+
+/* Global Config regmap registers */
+#define GLOBAL_CONFIG_1 0x00
+#define TWSI_ENABLE_OPTION1 BIT(7)
+#define GLOBAL_CONFIG_2 0x04
+#define TWSI_ENABLE_OPTION2 BIT(20)
+#define TWSI_ENABLE_OPTION3 BIT(21)
+#define TWSI_OPTION3_GPIO BIT(22)
+#define SSP_CTRL_STATUS_1 0x08
+#define SSP_ON_AU1 BIT(0)
+#define MPP_GENERAL_CONFIG 0x10
+#define AU1_SPDIFO_GPIO_EN BIT(1)
+#define NAND_GPIO_EN BIT(0)
#define CONFIG_PMU BIT(4)
-static int dove_pmu_mpp_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static void __iomem *mpp_base;
+static void __iomem *mpp4_base;
+static void __iomem *pmu_base;
+static struct regmap *gconfmap;
+
+static int dove_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int dove_mpp_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned off = (ctrl->pid / MPPS_PER_REG) * MPP_BITS;
- unsigned shift = (ctrl->pid % MPPS_PER_REG) * MPP_BITS;
- unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
+static int dove_pmu_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
unsigned long func;
- if (pmu & (1 << ctrl->pid)) {
- func = readl(DOVE_PMU_SIGNAL_SELECT_0 + off);
- *config = (func >> shift) & MPP_MASK;
- *config |= CONFIG_PMU;
- } else {
- func = readl(DOVE_MPP_VIRT_BASE + off);
- *config = (func >> shift) & MPP_MASK;
- }
+ if ((pmu & BIT(pid)) == 0)
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+
+ func = readl(pmu_base + PMU_SIGNAL_SELECT_0 + off);
+ *config = (func >> shift) & MVEBU_MPP_MASK;
+ *config |= CONFIG_PMU;
+
return 0;
}
-static int dove_pmu_mpp_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_pmu_mpp_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned off = (ctrl->pid / MPPS_PER_REG) * MPP_BITS;
- unsigned shift = (ctrl->pid % MPPS_PER_REG) * MPP_BITS;
- unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
unsigned long func;
- if (config & CONFIG_PMU) {
- writel(pmu | (1 << ctrl->pid), DOVE_PMU_MPP_GENERAL_CTRL);
- func = readl(DOVE_PMU_SIGNAL_SELECT_0 + off);
- func &= ~(MPP_MASK << shift);
- func |= (config & MPP_MASK) << shift;
- writel(func, DOVE_PMU_SIGNAL_SELECT_0 + off);
- } else {
- writel(pmu & ~(1 << ctrl->pid), DOVE_PMU_MPP_GENERAL_CTRL);
- func = readl(DOVE_MPP_VIRT_BASE + off);
- func &= ~(MPP_MASK << shift);
- func |= (config & MPP_MASK) << shift;
- writel(func, DOVE_MPP_VIRT_BASE + off);
+ if ((config & CONFIG_PMU) == 0) {
+ writel(pmu & ~BIT(pid), mpp_base + PMU_MPP_GENERAL_CTRL);
+ return default_mpp_ctrl_set(mpp_base, pid, config);
}
+
+ writel(pmu | BIT(pid), mpp_base + PMU_MPP_GENERAL_CTRL);
+ func = readl(pmu_base + PMU_SIGNAL_SELECT_0 + off);
+ func &= ~(MVEBU_MPP_MASK << shift);
+ func |= (config & MVEBU_MPP_MASK) << shift;
+ writel(func, pmu_base + PMU_SIGNAL_SELECT_0 + off);
+
return 0;
}
-static int dove_mpp4_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static int dove_mpp4_ctrl_get(unsigned pid, unsigned long *config)
{
- unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
+ unsigned long mpp4 = readl(mpp4_base);
unsigned long mask;
- switch (ctrl->pid) {
+ switch (pid) {
case 24: /* mpp_camera */
- mask = DOVE_CAM_GPIO_SEL;
+ mask = CAM_GPIO_SEL;
break;
case 40: /* mpp_sdio0 */
- mask = DOVE_SD0_GPIO_SEL;
+ mask = SD0_GPIO_SEL;
break;
case 46: /* mpp_sdio1 */
- mask = DOVE_SD1_GPIO_SEL;
+ mask = SD1_GPIO_SEL;
break;
case 58: /* mpp_spi0 */
- mask = DOVE_SPI_GPIO_SEL;
+ mask = SPI_GPIO_SEL;
break;
case 62: /* mpp_uart1 */
- mask = DOVE_UART1_GPIO_SEL;
+ mask = UART1_GPIO_SEL;
break;
default:
return -EINVAL;
@@ -129,27 +144,26 @@ static int dove_mpp4_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
return 0;
}
-static int dove_mpp4_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_mpp4_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
+ unsigned long mpp4 = readl(mpp4_base);
unsigned long mask;
- switch (ctrl->pid) {
+ switch (pid) {
case 24: /* mpp_camera */
- mask = DOVE_CAM_GPIO_SEL;
+ mask = CAM_GPIO_SEL;
break;
case 40: /* mpp_sdio0 */
- mask = DOVE_SD0_GPIO_SEL;
+ mask = SD0_GPIO_SEL;
break;
case 46: /* mpp_sdio1 */
- mask = DOVE_SD1_GPIO_SEL;
+ mask = SD1_GPIO_SEL;
break;
case 58: /* mpp_spi0 */
- mask = DOVE_SPI_GPIO_SEL;
+ mask = SPI_GPIO_SEL;
break;
case 62: /* mpp_uart1 */
- mask = DOVE_UART1_GPIO_SEL;
+ mask = UART1_GPIO_SEL;
break;
default:
return -EINVAL;
@@ -159,74 +173,69 @@ static int dove_mpp4_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
if (config)
mpp4 |= mask;
- writel(mpp4, DOVE_MPP_CTRL4_VIRT_BASE);
+ writel(mpp4, mpp4_base);
return 0;
}
-static int dove_nand_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static int dove_nand_ctrl_get(unsigned pid, unsigned long *config)
{
- unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
+ unsigned int gmpp;
- *config = ((gmpp & DOVE_NAND_GPIO_EN) != 0);
+ regmap_read(gconfmap, MPP_GENERAL_CONFIG, &gmpp);
+ *config = ((gmpp & NAND_GPIO_EN) != 0);
return 0;
}
-static int dove_nand_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_nand_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
-
- gmpp &= ~DOVE_NAND_GPIO_EN;
- if (config)
- gmpp |= DOVE_NAND_GPIO_EN;
-
- writel(gmpp, DOVE_MPP_GENERAL_VIRT_BASE);
-
+ regmap_update_bits(gconfmap, MPP_GENERAL_CONFIG,
+ NAND_GPIO_EN,
+ (config) ? NAND_GPIO_EN : 0);
return 0;
}
-static int dove_audio0_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static int dove_audio0_ctrl_get(unsigned pid, unsigned long *config)
{
- unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
- *config = ((pmu & DOVE_AU0_AC97_SEL) != 0);
+ *config = ((pmu & AU0_AC97_SEL) != 0);
return 0;
}
-static int dove_audio0_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_audio0_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
- pmu &= ~DOVE_AU0_AC97_SEL;
+ pmu &= ~AU0_AC97_SEL;
if (config)
- pmu |= DOVE_AU0_AC97_SEL;
- writel(pmu, DOVE_PMU_MPP_GENERAL_CTRL);
+ pmu |= AU0_AC97_SEL;
+ writel(pmu, mpp_base + PMU_MPP_GENERAL_CTRL);
return 0;
}
-static int dove_audio1_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static int dove_audio1_ctrl_get(unsigned pid, unsigned long *config)
{
- unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
- unsigned long sspc1 = readl(DOVE_SSP_CTRL_STATUS_1);
- unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
- unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+ unsigned int mpp4 = readl(mpp4_base);
+ unsigned int sspc1;
+ unsigned int gmpp;
+ unsigned int gcfg2;
+
+ regmap_read(gconfmap, SSP_CTRL_STATUS_1, &sspc1);
+ regmap_read(gconfmap, MPP_GENERAL_CONFIG, &gmpp);
+ regmap_read(gconfmap, GLOBAL_CONFIG_2, &gcfg2);
*config = 0;
- if (mpp4 & DOVE_AU1_GPIO_SEL)
+ if (mpp4 & AU1_GPIO_SEL)
*config |= BIT(3);
- if (sspc1 & DOVE_SSP_ON_AU1)
+ if (sspc1 & SSP_ON_AU1)
*config |= BIT(2);
- if (gmpp & DOVE_AU1_SPDIFO_GPIO_EN)
+ if (gmpp & AU1_SPDIFO_GPIO_EN)
*config |= BIT(1);
- if (gcfg2 & DOVE_TWSI_OPTION3_GPIO)
+ if (gcfg2 & TWSI_OPTION3_GPIO)
*config |= BIT(0);
/* SSP/TWSI only if I2S1 not set*/
@@ -238,35 +247,24 @@ static int dove_audio1_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
return 0;
}
-static int dove_audio1_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_audio1_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
- unsigned long sspc1 = readl(DOVE_SSP_CTRL_STATUS_1);
- unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
- unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+ unsigned int mpp4 = readl(mpp4_base);
- /*
- * clear all audio1 related bits before configure
- */
- gcfg2 &= ~DOVE_TWSI_OPTION3_GPIO;
- gmpp &= ~DOVE_AU1_SPDIFO_GPIO_EN;
- sspc1 &= ~DOVE_SSP_ON_AU1;
- mpp4 &= ~DOVE_AU1_GPIO_SEL;
-
- if (config & BIT(0))
- gcfg2 |= DOVE_TWSI_OPTION3_GPIO;
- if (config & BIT(1))
- gmpp |= DOVE_AU1_SPDIFO_GPIO_EN;
- if (config & BIT(2))
- sspc1 |= DOVE_SSP_ON_AU1;
+ mpp4 &= ~AU1_GPIO_SEL;
if (config & BIT(3))
- mpp4 |= DOVE_AU1_GPIO_SEL;
-
- writel(mpp4, DOVE_MPP_CTRL4_VIRT_BASE);
- writel(sspc1, DOVE_SSP_CTRL_STATUS_1);
- writel(gmpp, DOVE_MPP_GENERAL_VIRT_BASE);
- writel(gcfg2, DOVE_GLOBAL_CONFIG_2);
+ mpp4 |= AU1_GPIO_SEL;
+ writel(mpp4, mpp4_base);
+
+ regmap_update_bits(gconfmap, SSP_CTRL_STATUS_1,
+ SSP_ON_AU1,
+ (config & BIT(2)) ? SSP_ON_AU1 : 0);
+ regmap_update_bits(gconfmap, MPP_GENERAL_CONFIG,
+ AU1_SPDIFO_GPIO_EN,
+ (config & BIT(1)) ? AU1_SPDIFO_GPIO_EN : 0);
+ regmap_update_bits(gconfmap, GLOBAL_CONFIG_2,
+ TWSI_OPTION3_GPIO,
+ (config & BIT(0)) ? TWSI_OPTION3_GPIO : 0);
return 0;
}
@@ -276,11 +274,11 @@ static int dove_audio1_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
* break other functions. If you require all mpps as gpio
* enforce gpio setting by pinctrl mapping.
*/
-static int dove_audio1_ctrl_gpio_req(struct mvebu_mpp_ctrl *ctrl, u8 pid)
+static int dove_audio1_ctrl_gpio_req(unsigned pid)
{
unsigned long config;
- dove_audio1_ctrl_get(ctrl, &config);
+ dove_audio1_ctrl_get(pid, &config);
switch (config) {
case 0x02: /* i2s1 : gpio[56:57] */
@@ -303,76 +301,62 @@ static int dove_audio1_ctrl_gpio_req(struct mvebu_mpp_ctrl *ctrl, u8 pid)
}
/* mpp[52:57] has gpio pins capable of in and out */
-static int dove_audio1_ctrl_gpio_dir(struct mvebu_mpp_ctrl *ctrl, u8 pid,
- bool input)
+static int dove_audio1_ctrl_gpio_dir(unsigned pid, bool input)
{
if (pid < 52 || pid > 57)
return -ENOTSUPP;
return 0;
}
-static int dove_twsi_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
- unsigned long *config)
+static int dove_twsi_ctrl_get(unsigned pid, unsigned long *config)
{
- unsigned long gcfg1 = readl(DOVE_GLOBAL_CONFIG_1);
- unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+ unsigned int gcfg1;
+ unsigned int gcfg2;
+
+ regmap_read(gconfmap, GLOBAL_CONFIG_1, &gcfg1);
+ regmap_read(gconfmap, GLOBAL_CONFIG_2, &gcfg2);
*config = 0;
- if (gcfg1 & DOVE_TWSI_ENABLE_OPTION1)
+ if (gcfg1 & TWSI_ENABLE_OPTION1)
*config = 1;
- else if (gcfg2 & DOVE_TWSI_ENABLE_OPTION2)
+ else if (gcfg2 & TWSI_ENABLE_OPTION2)
*config = 2;
- else if (gcfg2 & DOVE_TWSI_ENABLE_OPTION3)
+ else if (gcfg2 & TWSI_ENABLE_OPTION3)
*config = 3;
return 0;
}
-static int dove_twsi_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
- unsigned long config)
+static int dove_twsi_ctrl_set(unsigned pid, unsigned long config)
{
- unsigned long gcfg1 = readl(DOVE_GLOBAL_CONFIG_1);
- unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
-
- gcfg1 &= ~DOVE_TWSI_ENABLE_OPTION1;
- gcfg2 &= ~(DOVE_TWSI_ENABLE_OPTION2 | DOVE_TWSI_ENABLE_OPTION3);
+ unsigned int gcfg1 = 0;
+ unsigned int gcfg2 = 0;
switch (config) {
case 1:
- gcfg1 |= DOVE_TWSI_ENABLE_OPTION1;
+ gcfg1 = TWSI_ENABLE_OPTION1;
break;
case 2:
- gcfg2 |= DOVE_TWSI_ENABLE_OPTION2;
+ gcfg2 = TWSI_ENABLE_OPTION2;
break;
case 3:
- gcfg2 |= DOVE_TWSI_ENABLE_OPTION3;
+ gcfg2 = TWSI_ENABLE_OPTION3;
break;
}
- writel(gcfg1, DOVE_GLOBAL_CONFIG_1);
- writel(gcfg2, DOVE_GLOBAL_CONFIG_2);
+ regmap_update_bits(gconfmap, GLOBAL_CONFIG_1,
+ TWSI_ENABLE_OPTION1,
+ gcfg1);
+ regmap_update_bits(gconfmap, GLOBAL_CONFIG_2,
+ TWSI_ENABLE_OPTION2 | TWSI_ENABLE_OPTION3,
+ gcfg2);
return 0;
}
static struct mvebu_mpp_ctrl dove_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 0, "mpp0", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(1, 1, "mpp1", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(2, 2, "mpp2", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(3, 3, "mpp3", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(4, 4, "mpp4", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(5, 5, "mpp5", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(6, 6, "mpp6", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(7, 7, "mpp7", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(8, 8, "mpp8", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(9, 9, "mpp9", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(10, 10, "mpp10", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(11, 11, "mpp11", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(12, 12, "mpp12", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(13, 13, "mpp13", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(14, 14, "mpp14", dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(15, 15, "mpp15", dove_pmu_mpp_ctrl),
- MPP_REG_CTRL(16, 23),
+ MPP_FUNC_CTRL(0, 15, NULL, dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(16, 23, NULL, dove_mpp_ctrl),
MPP_FUNC_CTRL(24, 39, "mpp_camera", dove_mpp4_ctrl),
MPP_FUNC_CTRL(40, 45, "mpp_sdio0", dove_mpp4_ctrl),
MPP_FUNC_CTRL(46, 51, "mpp_sdio1", dove_mpp4_ctrl),
@@ -772,8 +756,17 @@ static struct of_device_id dove_pinctrl_of_match[] = {
{ }
};
+static struct regmap_config gc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 5,
+};
+
static int dove_pinctrl_probe(struct platform_device *pdev)
{
+ struct resource *res, *mpp_res;
+ struct resource fb_res;
const struct of_device_id *match =
of_match_device(dove_pinctrl_of_match, &pdev->dev);
pdev->dev.platform_data = (void *)match->data;
@@ -789,6 +782,59 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
}
clk_prepare_enable(clk);
+ mpp_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, mpp_res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
+
+ /* prepare fallback resource */
+ memcpy(&fb_res, mpp_res, sizeof(struct resource));
+ fb_res.start = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_warn(&pdev->dev, "falling back to hardcoded MPP4 resource\n");
+ adjust_resource(&fb_res,
+ (mpp_res->start & INT_REGS_MASK) + MPP4_REGS_OFFS, 0x4);
+ res = &fb_res;
+ }
+
+ mpp4_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp4_base))
+ return PTR_ERR(mpp4_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ dev_warn(&pdev->dev, "falling back to hardcoded PMU resource\n");
+ adjust_resource(&fb_res,
+ (mpp_res->start & INT_REGS_MASK) + PMU_REGS_OFFS, 0x8);
+ res = &fb_res;
+ }
+
+ pmu_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmu_base))
+ return PTR_ERR(pmu_base);
+
+ gconfmap = syscon_regmap_lookup_by_compatible("marvell,dove-global-config");
+ if (IS_ERR(gconfmap)) {
+ void __iomem *gc_base;
+
+ dev_warn(&pdev->dev, "falling back to hardcoded global registers\n");
+ adjust_resource(&fb_res,
+ (mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14);
+ gc_base = devm_ioremap_resource(&pdev->dev, &fb_res);
+ if (IS_ERR(gc_base))
+ return PTR_ERR(gc_base);
+ gconfmap = devm_regmap_init_mmio(&pdev->dev,
+ gc_base, &gc_regmap_config);
+ if (IS_ERR(gconfmap))
+ return PTR_ERR(gconfmap);
+ }
+
+ /* Warn on any missing DT resource */
+ if (fb_res.start)
+ dev_warn(&pdev->dev, FW_BUG "Missing pinctrl regs in DTB. Please update your firmware.\n");
+
return mvebu_pinctrl_probe(pdev);
}
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index 6b504b5935a5..0d0211a1a0b0 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -21,6 +21,18 @@
#include "pinctrl-mvebu.h"
+static void __iomem *mpp_base;
+
+static int kirkwood_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+ return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int kirkwood_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+ return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
#define V(f6180, f6190, f6192, f6281, f6282, dx4122) \
((f6180 << 0) | (f6190 << 1) | (f6192 << 2) | \
(f6281 << 3) | (f6282 << 4) | (dx4122 << 5))
@@ -359,7 +371,7 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
};
static struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = {
- MPP_REG_CTRL(0, 29),
+ MPP_FUNC_CTRL(0, 29, NULL, kirkwood_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
@@ -367,7 +379,7 @@ static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
};
static struct mvebu_mpp_ctrl mv88f619x_mpp_controls[] = {
- MPP_REG_CTRL(0, 35),
+ MPP_FUNC_CTRL(0, 35, NULL, kirkwood_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = {
@@ -376,7 +388,7 @@ static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = {
};
static struct mvebu_mpp_ctrl mv88f628x_mpp_controls[] = {
- MPP_REG_CTRL(0, 49),
+ MPP_FUNC_CTRL(0, 49, NULL, kirkwood_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f628x_gpio_ranges[] = {
@@ -456,9 +468,16 @@ static struct of_device_id kirkwood_pinctrl_of_match[] = {
static int kirkwood_pinctrl_probe(struct platform_device *pdev)
{
+ struct resource *res;
const struct of_device_id *match =
of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
pdev->dev.platform_data = (void *)match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mpp_base))
+ return PTR_ERR(mpp_base);
+
return mvebu_pinctrl_probe(pdev);
}
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index 0fd1ad31fbf9..9908374f8f92 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -50,7 +50,6 @@ struct mvebu_pinctrl {
struct device *dev;
struct pinctrl_dev *pctldev;
struct pinctrl_desc desc;
- void __iomem *base;
struct mvebu_pinctrl_group *groups;
unsigned num_groups;
struct mvebu_pinctrl_function *functions;
@@ -138,43 +137,6 @@ static struct mvebu_pinctrl_function *mvebu_pinctrl_find_function_by_name(
return NULL;
}
-/*
- * Common mpp pin configuration registers on MVEBU are
- * registers of eight 4-bit values for each mpp setting.
- * Register offset and bit mask are calculated accordingly below.
- */
-static int mvebu_common_mpp_get(struct mvebu_pinctrl *pctl,
- struct mvebu_pinctrl_group *grp,
- unsigned long *config)
-{
- unsigned pin = grp->gid;
- unsigned off = (pin / MPPS_PER_REG) * MPP_BITS;
- unsigned shift = (pin % MPPS_PER_REG) * MPP_BITS;
-
- *config = readl(pctl->base + off);
- *config >>= shift;
- *config &= MPP_MASK;
-
- return 0;
-}
-
-static int mvebu_common_mpp_set(struct mvebu_pinctrl *pctl,
- struct mvebu_pinctrl_group *grp,
- unsigned long config)
-{
- unsigned pin = grp->gid;
- unsigned off = (pin / MPPS_PER_REG) * MPP_BITS;
- unsigned shift = (pin % MPPS_PER_REG) * MPP_BITS;
- unsigned long reg;
-
- reg = readl(pctl->base + off);
- reg &= ~(MPP_MASK << shift);
- reg |= (config << shift);
- writel(reg, pctl->base + off);
-
- return 0;
-}
-
static int mvebu_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned gid, unsigned long *config)
{
@@ -184,10 +146,7 @@ static int mvebu_pinconf_group_get(struct pinctrl_dev *pctldev,
if (!grp->ctrl)
return -EINVAL;
- if (grp->ctrl->mpp_get)
- return grp->ctrl->mpp_get(grp->ctrl, config);
-
- return mvebu_common_mpp_get(pctl, grp, config);
+ return grp->ctrl->mpp_get(grp->pins[0], config);
}
static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
@@ -202,11 +161,7 @@ static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
return -EINVAL;
for (i = 0; i < num_configs; i++) {
- if (grp->ctrl->mpp_set)
- ret = grp->ctrl->mpp_set(grp->ctrl, configs[i]);
- else
- ret = mvebu_common_mpp_set(pctl, grp, configs[i]);
-
+ ret = grp->ctrl->mpp_set(grp->pins[0], configs[i]);
if (ret)
return ret;
} /* for each config */
@@ -347,7 +302,7 @@ static int mvebu_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
return -EINVAL;
if (grp->ctrl->mpp_gpio_req)
- return grp->ctrl->mpp_gpio_req(grp->ctrl, offset);
+ return grp->ctrl->mpp_gpio_req(offset);
setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
if (!setting)
@@ -370,7 +325,7 @@ static int mvebu_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
return -EINVAL;
if (grp->ctrl->mpp_gpio_dir)
- return grp->ctrl->mpp_gpio_dir(grp->ctrl, offset, input);
+ return grp->ctrl->mpp_gpio_dir(offset, input);
setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
if (!setting)
@@ -593,11 +548,12 @@ static int mvebu_pinctrl_build_functions(struct platform_device *pdev,
int mvebu_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
- struct resource *res;
struct mvebu_pinctrl *pctl;
- void __iomem *base;
struct pinctrl_pin_desc *pdesc;
unsigned gid, n, k;
+ unsigned size, noname = 0;
+ char *noname_buf;
+ void *p;
int ret;
if (!soc || !soc->controls || !soc->modes) {
@@ -605,11 +561,6 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
pctl = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pinctrl),
GFP_KERNEL);
if (!pctl) {
@@ -623,7 +574,6 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pctl->desc.pmxops = &mvebu_pinmux_ops;
pctl->desc.confops = &mvebu_pinconf_ops;
pctl->variant = soc->variant;
- pctl->base = base;
pctl->dev = &pdev->dev;
platform_set_drvdata(pdev, pctl);
@@ -633,33 +583,23 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pctl->desc.npins = 0;
for (n = 0; n < soc->ncontrols; n++) {
struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
- char *names;
pctl->desc.npins += ctrl->npins;
- /* initial control pins */
+ /* initialize control's pins[] array */
for (k = 0; k < ctrl->npins; k++)
ctrl->pins[k] = ctrl->pid + k;
- /* special soc specific control */
- if (ctrl->mpp_get || ctrl->mpp_set) {
- if (!ctrl->name || !ctrl->mpp_get || !ctrl->mpp_set) {
- dev_err(&pdev->dev, "wrong soc control info\n");
- return -EINVAL;
- }
+ /*
+ * We allow to pass controls with NULL name that we treat
+ * as a range of one-pin groups with generic mvebu register
+ * controls.
+ */
+ if (!ctrl->name) {
+ pctl->num_groups += ctrl->npins;
+ noname += ctrl->npins;
+ } else {
pctl->num_groups += 1;
- continue;
}
-
- /* generic mvebu register control */
- names = devm_kzalloc(&pdev->dev, ctrl->npins * 8, GFP_KERNEL);
- if (!names) {
- dev_err(&pdev->dev, "failed to alloc mpp names\n");
- return -ENOMEM;
- }
- for (k = 0; k < ctrl->npins; k++)
- sprintf(names + 8*k, "mpp%d", ctrl->pid+k);
- ctrl->name = names;
- pctl->num_groups += ctrl->npins;
}
pdesc = devm_kzalloc(&pdev->dev, pctl->desc.npins *
@@ -673,12 +613,17 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pdesc[n].number = n;
pctl->desc.pins = pdesc;
- pctl->groups = devm_kzalloc(&pdev->dev, pctl->num_groups *
- sizeof(struct mvebu_pinctrl_group), GFP_KERNEL);
- if (!pctl->groups) {
- dev_err(&pdev->dev, "failed to alloc pinctrl groups\n");
+ /*
+ * allocate groups and name buffers for unnamed groups.
+ */
+ size = pctl->num_groups * sizeof(*pctl->groups) + noname * 8;
+ p = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!p) {
+ dev_err(&pdev->dev, "failed to alloc group data\n");
return -ENOMEM;
}
+ pctl->groups = p;
+ noname_buf = p + pctl->num_groups * sizeof(*pctl->groups);
/* assign mpp controls to groups */
gid = 0;
@@ -690,17 +635,26 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pctl->groups[gid].pins = ctrl->pins;
pctl->groups[gid].npins = ctrl->npins;
- /* generic mvebu register control maps to a number of groups */
- if (!ctrl->mpp_get && !ctrl->mpp_set) {
+ /*
+ * We treat unnamed controls as a range of one-pin groups
+ * with generic mvebu register controls. Use one group for
+ * each in this range and assign a default group name.
+ */
+ if (!ctrl->name) {
+ pctl->groups[gid].name = noname_buf;
pctl->groups[gid].npins = 1;
+ sprintf(noname_buf, "mpp%d", ctrl->pid+0);
+ noname_buf += 8;
for (k = 1; k < ctrl->npins; k++) {
gid++;
pctl->groups[gid].gid = gid;
pctl->groups[gid].ctrl = ctrl;
- pctl->groups[gid].name = &ctrl->name[8*k];
+ pctl->groups[gid].name = noname_buf;
pctl->groups[gid].pins = &ctrl->pins[k];
pctl->groups[gid].npins = 1;
+ sprintf(noname_buf, "mpp%d", ctrl->pid+k);
+ noname_buf += 8;
}
}
gid++;
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.h b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
index 90bd3beee860..65a98e6f7265 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.h
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
@@ -28,20 +28,19 @@
* between two or more different settings, e.g. assign mpp pin 13 to
* uart1 or sata.
*
- * If optional mpp_get/_set functions are set these are used to get/set
- * a specific mode. Otherwise it is assumed that the mpp control is based
- * on 4-bit groups in subsequent registers. The optional mpp_gpio_req/_dir
- * functions can be used to allow pin settings with varying gpio pins.
+ * The mpp_get/_set functions are mandatory and are used to get/set a
+ * specific mode. The optional mpp_gpio_req/_dir functions can be used
+ * to allow pin settings with varying gpio pins.
*/
struct mvebu_mpp_ctrl {
const char *name;
u8 pid;
u8 npins;
unsigned *pins;
- int (*mpp_get)(struct mvebu_mpp_ctrl *ctrl, unsigned long *config);
- int (*mpp_set)(struct mvebu_mpp_ctrl *ctrl, unsigned long config);
- int (*mpp_gpio_req)(struct mvebu_mpp_ctrl *ctrl, u8 pid);
- int (*mpp_gpio_dir)(struct mvebu_mpp_ctrl *ctrl, u8 pid, bool input);
+ int (*mpp_get)(unsigned pid, unsigned long *config);
+ int (*mpp_set)(unsigned pid, unsigned long config);
+ int (*mpp_gpio_req)(unsigned pid);
+ int (*mpp_gpio_dir)(unsigned pid, bool input);
};
/**
@@ -114,18 +113,6 @@ struct mvebu_pinctrl_soc_info {
int ngpioranges;
};
-#define MPP_REG_CTRL(_idl, _idh) \
- { \
- .name = NULL, \
- .pid = _idl, \
- .npins = _idh - _idl + 1, \
- .pins = (unsigned[_idh - _idl + 1]) { }, \
- .mpp_get = NULL, \
- .mpp_set = NULL, \
- .mpp_gpio_req = NULL, \
- .mpp_gpio_dir = NULL, \
- }
-
#define MPP_FUNC_CTRL(_idl, _idh, _name, _func) \
{ \
.name = _name, \
@@ -186,6 +173,34 @@ struct mvebu_pinctrl_soc_info {
.npins = _npins, \
}
+#define MVEBU_MPPS_PER_REG 8
+#define MVEBU_MPP_BITS 4
+#define MVEBU_MPP_MASK 0xf
+
+static inline int default_mpp_ctrl_get(void __iomem *base, unsigned int pid,
+ unsigned long *config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+
+ *config = (readl(base + off) >> shift) & MVEBU_MPP_MASK;
+
+ return 0;
+}
+
+static inline int default_mpp_ctrl_set(void __iomem *base, unsigned int pid,
+ unsigned long config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned long reg;
+
+ reg = readl(base + off) & ~(MVEBU_MPP_MASK << shift);
+ writel(reg | (config << shift), base + off);
+
+ return 0;
+}
+
int mvebu_pinctrl_probe(struct platform_device *pdev);
int mvebu_pinctrl_remove(struct platform_device *pdev);
diff --git a/drivers/pinctrl/pinctrl-adi2-bf54x.c b/drivers/pinctrl/pinctrl-adi2-bf54x.c
index ea9d9ab9cda1..008a29e92e56 100644
--- a/drivers/pinctrl/pinctrl-adi2-bf54x.c
+++ b/drivers/pinctrl/pinctrl-adi2-bf54x.c
@@ -309,39 +309,6 @@ static const unsigned keys_8x8_pins[] = {
GPIO_PE4, GPIO_PE5, GPIO_PE6, GPIO_PE7,
};
-static const struct adi_pin_group adi_pin_groups[] = {
- ADI_PIN_GROUP("uart0grp", uart0_pins),
- ADI_PIN_GROUP("uart1grp", uart1_pins),
- ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins),
- ADI_PIN_GROUP("uart2grp", uart2_pins),
- ADI_PIN_GROUP("uart3grp", uart3_pins),
- ADI_PIN_GROUP("uart3ctsrtsgrp", uart3_ctsrts_pins),
- ADI_PIN_GROUP("rsi0grp", rsi0_pins),
- ADI_PIN_GROUP("spi0grp", spi0_pins),
- ADI_PIN_GROUP("spi1grp", spi1_pins),
- ADI_PIN_GROUP("twi0grp", twi0_pins),
- ADI_PIN_GROUP("twi1grp", twi1_pins),
- ADI_PIN_GROUP("rotarygrp", rotary_pins),
- ADI_PIN_GROUP("can0grp", can0_pins),
- ADI_PIN_GROUP("can1grp", can1_pins),
- ADI_PIN_GROUP("smc0grp", smc0_pins),
- ADI_PIN_GROUP("sport0grp", sport0_pins),
- ADI_PIN_GROUP("sport1grp", sport1_pins),
- ADI_PIN_GROUP("sport2grp", sport2_pins),
- ADI_PIN_GROUP("sport3grp", sport3_pins),
- ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins),
- ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins),
- ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins),
- ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins),
- ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins),
- ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins),
- ADI_PIN_GROUP("atapigrp", atapi_pins),
- ADI_PIN_GROUP("atapialtergrp", atapi_alter_pins),
- ADI_PIN_GROUP("nfc0grp", nfc0_pins),
- ADI_PIN_GROUP("keys_4x4grp", keys_4x4_pins),
- ADI_PIN_GROUP("keys_8x8grp", keys_8x8_pins),
-};
-
static const unsigned short uart0_mux[] = {
P_UART0_TX, P_UART0_RX,
0
@@ -513,6 +480,39 @@ static const unsigned short keys_8x8_mux[] = {
0
};
+static const struct adi_pin_group adi_pin_groups[] = {
+ ADI_PIN_GROUP("uart0grp", uart0_pins, uart0_mux),
+ ADI_PIN_GROUP("uart1grp", uart1_pins, uart1_mux),
+ ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins, uart1_ctsrts_mux),
+ ADI_PIN_GROUP("uart2grp", uart2_pins, uart2_mux),
+ ADI_PIN_GROUP("uart3grp", uart3_pins, uart3_mux),
+ ADI_PIN_GROUP("uart3ctsrtsgrp", uart3_ctsrts_pins, uart3_ctsrts_mux),
+ ADI_PIN_GROUP("rsi0grp", rsi0_pins, rsi0_mux),
+ ADI_PIN_GROUP("spi0grp", spi0_pins, spi0_mux),
+ ADI_PIN_GROUP("spi1grp", spi1_pins, spi1_mux),
+ ADI_PIN_GROUP("twi0grp", twi0_pins, twi0_mux),
+ ADI_PIN_GROUP("twi1grp", twi1_pins, twi1_mux),
+ ADI_PIN_GROUP("rotarygrp", rotary_pins, rotary_mux),
+ ADI_PIN_GROUP("can0grp", can0_pins, can0_mux),
+ ADI_PIN_GROUP("can1grp", can1_pins, can1_mux),
+ ADI_PIN_GROUP("smc0grp", smc0_pins, smc0_mux),
+ ADI_PIN_GROUP("sport0grp", sport0_pins, sport0_mux),
+ ADI_PIN_GROUP("sport1grp", sport1_pins, sport1_mux),
+ ADI_PIN_GROUP("sport2grp", sport2_pins, sport2_mux),
+ ADI_PIN_GROUP("sport3grp", sport3_pins, sport3_mux),
+ ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins, ppi0_8b_mux),
+ ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins, ppi0_16b_mux),
+ ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins, ppi0_24b_mux),
+ ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins, ppi1_8b_mux),
+ ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins, ppi1_16b_mux),
+ ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins, ppi2_8b_mux),
+ ADI_PIN_GROUP("atapigrp", atapi_pins, atapi_mux),
+ ADI_PIN_GROUP("atapialtergrp", atapi_alter_pins, atapi_alter_mux),
+ ADI_PIN_GROUP("nfc0grp", nfc0_pins, nfc0_mux),
+ ADI_PIN_GROUP("keys_4x4grp", keys_4x4_pins, keys_4x4_mux),
+ ADI_PIN_GROUP("keys_8x8grp", keys_8x8_pins, keys_8x8_mux),
+};
+
static const char * const uart0grp[] = { "uart0grp" };
static const char * const uart1grp[] = { "uart1grp" };
static const char * const uart1ctsrtsgrp[] = { "uart1ctsrtsgrp" };
@@ -532,49 +532,45 @@ static const char * const sport0grp[] = { "sport0grp" };
static const char * const sport1grp[] = { "sport1grp" };
static const char * const sport2grp[] = { "sport2grp" };
static const char * const sport3grp[] = { "sport3grp" };
-static const char * const ppi0_8bgrp[] = { "ppi0_8bgrp" };
-static const char * const ppi0_16bgrp[] = { "ppi0_16bgrp" };
-static const char * const ppi0_24bgrp[] = { "ppi0_24bgrp" };
-static const char * const ppi1_8bgrp[] = { "ppi1_8bgrp" };
-static const char * const ppi1_16bgrp[] = { "ppi1_16bgrp" };
-static const char * const ppi2_8bgrp[] = { "ppi2_8bgrp" };
+static const char * const ppi0grp[] = { "ppi0_8bgrp",
+ "ppi0_16bgrp",
+ "ppi0_24bgrp" };
+static const char * const ppi1grp[] = { "ppi1_8bgrp",
+ "ppi1_16bgrp" };
+static const char * const ppi2grp[] = { "ppi2_8bgrp" };
static const char * const atapigrp[] = { "atapigrp" };
static const char * const atapialtergrp[] = { "atapialtergrp" };
static const char * const nfc0grp[] = { "nfc0grp" };
-static const char * const keys_4x4grp[] = { "keys_4x4grp" };
-static const char * const keys_8x8grp[] = { "keys_8x8grp" };
+static const char * const keysgrp[] = { "keys_4x4grp",
+ "keys_8x8grp" };
static const struct adi_pmx_func adi_pmx_functions[] = {
- ADI_PMX_FUNCTION("uart0", uart0grp, uart0_mux),
- ADI_PMX_FUNCTION("uart1", uart1grp, uart1_mux),
- ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp, uart1_ctsrts_mux),
- ADI_PMX_FUNCTION("uart2", uart2grp, uart2_mux),
- ADI_PMX_FUNCTION("uart3", uart3grp, uart3_mux),
- ADI_PMX_FUNCTION("uart3_ctsrts", uart3ctsrtsgrp, uart3_ctsrts_mux),
- ADI_PMX_FUNCTION("rsi0", rsi0grp, rsi0_mux),
- ADI_PMX_FUNCTION("spi0", spi0grp, spi0_mux),
- ADI_PMX_FUNCTION("spi1", spi1grp, spi1_mux),
- ADI_PMX_FUNCTION("twi0", twi0grp, twi0_mux),
- ADI_PMX_FUNCTION("twi1", twi1grp, twi1_mux),
- ADI_PMX_FUNCTION("rotary", rotarygrp, rotary_mux),
- ADI_PMX_FUNCTION("can0", can0grp, can0_mux),
- ADI_PMX_FUNCTION("can1", can1grp, can1_mux),
- ADI_PMX_FUNCTION("smc0", smc0grp, smc0_mux),
- ADI_PMX_FUNCTION("sport0", sport0grp, sport0_mux),
- ADI_PMX_FUNCTION("sport1", sport1grp, sport1_mux),
- ADI_PMX_FUNCTION("sport2", sport2grp, sport2_mux),
- ADI_PMX_FUNCTION("sport3", sport3grp, sport3_mux),
- ADI_PMX_FUNCTION("ppi0_8b", ppi0_8bgrp, ppi0_8b_mux),
- ADI_PMX_FUNCTION("ppi0_16b", ppi0_16bgrp, ppi0_16b_mux),
- ADI_PMX_FUNCTION("ppi0_24b", ppi0_24bgrp, ppi0_24b_mux),
- ADI_PMX_FUNCTION("ppi1_8b", ppi1_8bgrp, ppi1_8b_mux),
- ADI_PMX_FUNCTION("ppi1_16b", ppi1_16bgrp, ppi1_16b_mux),
- ADI_PMX_FUNCTION("ppi2_8b", ppi2_8bgrp, ppi2_8b_mux),
- ADI_PMX_FUNCTION("atapi", atapigrp, atapi_mux),
- ADI_PMX_FUNCTION("atapi_alter", atapialtergrp, atapi_alter_mux),
- ADI_PMX_FUNCTION("nfc0", nfc0grp, nfc0_mux),
- ADI_PMX_FUNCTION("keys_4x4", keys_4x4grp, keys_4x4_mux),
- ADI_PMX_FUNCTION("keys_8x8", keys_8x8grp, keys_8x8_mux),
+ ADI_PMX_FUNCTION("uart0", uart0grp),
+ ADI_PMX_FUNCTION("uart1", uart1grp),
+ ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp),
+ ADI_PMX_FUNCTION("uart2", uart2grp),
+ ADI_PMX_FUNCTION("uart3", uart3grp),
+ ADI_PMX_FUNCTION("uart3_ctsrts", uart3ctsrtsgrp),
+ ADI_PMX_FUNCTION("rsi0", rsi0grp),
+ ADI_PMX_FUNCTION("spi0", spi0grp),
+ ADI_PMX_FUNCTION("spi1", spi1grp),
+ ADI_PMX_FUNCTION("twi0", twi0grp),
+ ADI_PMX_FUNCTION("twi1", twi1grp),
+ ADI_PMX_FUNCTION("rotary", rotarygrp),
+ ADI_PMX_FUNCTION("can0", can0grp),
+ ADI_PMX_FUNCTION("can1", can1grp),
+ ADI_PMX_FUNCTION("smc0", smc0grp),
+ ADI_PMX_FUNCTION("sport0", sport0grp),
+ ADI_PMX_FUNCTION("sport1", sport1grp),
+ ADI_PMX_FUNCTION("sport2", sport2grp),
+ ADI_PMX_FUNCTION("sport3", sport3grp),
+ ADI_PMX_FUNCTION("ppi0", ppi0grp),
+ ADI_PMX_FUNCTION("ppi1", ppi1grp),
+ ADI_PMX_FUNCTION("ppi2", ppi2grp),
+ ADI_PMX_FUNCTION("atapi", atapigrp),
+ ADI_PMX_FUNCTION("atapi_alter", atapialtergrp),
+ ADI_PMX_FUNCTION("nfc0", nfc0grp),
+ ADI_PMX_FUNCTION("keys", keysgrp),
};
static const struct adi_pinctrl_soc_data adi_bf54x_soc = {
diff --git a/drivers/pinctrl/pinctrl-adi2-bf60x.c b/drivers/pinctrl/pinctrl-adi2-bf60x.c
index bf57aea2826c..4cb59fe9be70 100644
--- a/drivers/pinctrl/pinctrl-adi2-bf60x.c
+++ b/drivers/pinctrl/pinctrl-adi2-bf60x.c
@@ -259,37 +259,6 @@ static const unsigned lp3_pins[] = {
GPIO_PF12, GPIO_PF13, GPIO_PF14, GPIO_PF15,
};
-static const struct adi_pin_group adi_pin_groups[] = {
- ADI_PIN_GROUP("uart0grp", uart0_pins),
- ADI_PIN_GROUP("uart0ctsrtsgrp", uart0_ctsrts_pins),
- ADI_PIN_GROUP("uart1grp", uart1_pins),
- ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins),
- ADI_PIN_GROUP("rsi0grp", rsi0_pins),
- ADI_PIN_GROUP("eth0grp", eth0_pins),
- ADI_PIN_GROUP("eth1grp", eth1_pins),
- ADI_PIN_GROUP("spi0grp", spi0_pins),
- ADI_PIN_GROUP("spi1grp", spi1_pins),
- ADI_PIN_GROUP("twi0grp", twi0_pins),
- ADI_PIN_GROUP("twi1grp", twi1_pins),
- ADI_PIN_GROUP("rotarygrp", rotary_pins),
- ADI_PIN_GROUP("can0grp", can0_pins),
- ADI_PIN_GROUP("smc0grp", smc0_pins),
- ADI_PIN_GROUP("sport0grp", sport0_pins),
- ADI_PIN_GROUP("sport1grp", sport1_pins),
- ADI_PIN_GROUP("sport2grp", sport2_pins),
- ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins),
- ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins),
- ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins),
- ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins),
- ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins),
- ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins),
- ADI_PIN_GROUP("ppi2_16bgrp", ppi2_16b_pins),
- ADI_PIN_GROUP("lp0grp", lp0_pins),
- ADI_PIN_GROUP("lp1grp", lp1_pins),
- ADI_PIN_GROUP("lp2grp", lp2_pins),
- ADI_PIN_GROUP("lp3grp", lp3_pins),
-};
-
static const unsigned short uart0_mux[] = {
P_UART0_TX, P_UART0_RX,
0
@@ -446,6 +415,37 @@ static const unsigned short lp3_mux[] = {
0
};
+static const struct adi_pin_group adi_pin_groups[] = {
+ ADI_PIN_GROUP("uart0grp", uart0_pins, uart0_mux),
+ ADI_PIN_GROUP("uart0ctsrtsgrp", uart0_ctsrts_pins, uart0_ctsrts_mux),
+ ADI_PIN_GROUP("uart1grp", uart1_pins, uart1_mux),
+ ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins, uart1_ctsrts_mux),
+ ADI_PIN_GROUP("rsi0grp", rsi0_pins, rsi0_mux),
+ ADI_PIN_GROUP("eth0grp", eth0_pins, eth0_mux),
+ ADI_PIN_GROUP("eth1grp", eth1_pins, eth1_mux),
+ ADI_PIN_GROUP("spi0grp", spi0_pins, spi0_mux),
+ ADI_PIN_GROUP("spi1grp", spi1_pins, spi1_mux),
+ ADI_PIN_GROUP("twi0grp", twi0_pins, twi0_mux),
+ ADI_PIN_GROUP("twi1grp", twi1_pins, twi1_mux),
+ ADI_PIN_GROUP("rotarygrp", rotary_pins, rotary_mux),
+ ADI_PIN_GROUP("can0grp", can0_pins, can0_mux),
+ ADI_PIN_GROUP("smc0grp", smc0_pins, smc0_mux),
+ ADI_PIN_GROUP("sport0grp", sport0_pins, sport0_mux),
+ ADI_PIN_GROUP("sport1grp", sport1_pins, sport1_mux),
+ ADI_PIN_GROUP("sport2grp", sport2_pins, sport2_mux),
+ ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins, ppi0_8b_mux),
+ ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins, ppi0_16b_mux),
+ ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins, ppi0_24b_mux),
+ ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins, ppi1_8b_mux),
+ ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins, ppi1_16b_mux),
+ ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins, ppi2_8b_mux),
+ ADI_PIN_GROUP("ppi2_16bgrp", ppi2_16b_pins, ppi2_16b_mux),
+ ADI_PIN_GROUP("lp0grp", lp0_pins, lp0_mux),
+ ADI_PIN_GROUP("lp1grp", lp1_pins, lp1_mux),
+ ADI_PIN_GROUP("lp2grp", lp2_pins, lp2_mux),
+ ADI_PIN_GROUP("lp3grp", lp3_pins, lp3_mux),
+};
+
static const char * const uart0grp[] = { "uart0grp" };
static const char * const uart0ctsrtsgrp[] = { "uart0ctsrtsgrp" };
static const char * const uart1grp[] = { "uart1grp" };
@@ -463,47 +463,43 @@ static const char * const smc0grp[] = { "smc0grp" };
static const char * const sport0grp[] = { "sport0grp" };
static const char * const sport1grp[] = { "sport1grp" };
static const char * const sport2grp[] = { "sport2grp" };
-static const char * const ppi0_8bgrp[] = { "ppi0_8bgrp" };
-static const char * const ppi0_16bgrp[] = { "ppi0_16bgrp" };
-static const char * const ppi0_24bgrp[] = { "ppi0_24bgrp" };
-static const char * const ppi1_8bgrp[] = { "ppi1_8bgrp" };
-static const char * const ppi1_16bgrp[] = { "ppi1_16bgrp" };
-static const char * const ppi2_8bgrp[] = { "ppi2_8bgrp" };
-static const char * const ppi2_16bgrp[] = { "ppi2_16bgrp" };
+static const char * const ppi0grp[] = { "ppi0_8bgrp",
+ "ppi0_16bgrp",
+ "ppi0_24bgrp" };
+static const char * const ppi1grp[] = { "ppi1_8bgrp",
+ "ppi1_16bgrp" };
+static const char * const ppi2grp[] = { "ppi2_8bgrp",
+ "ppi2_16bgrp" };
static const char * const lp0grp[] = { "lp0grp" };
static const char * const lp1grp[] = { "lp1grp" };
static const char * const lp2grp[] = { "lp2grp" };
static const char * const lp3grp[] = { "lp3grp" };
static const struct adi_pmx_func adi_pmx_functions[] = {
- ADI_PMX_FUNCTION("uart0", uart0grp, uart0_mux),
- ADI_PMX_FUNCTION("uart0_ctsrts", uart0ctsrtsgrp, uart0_ctsrts_mux),
- ADI_PMX_FUNCTION("uart1", uart1grp, uart1_mux),
- ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp, uart1_ctsrts_mux),
- ADI_PMX_FUNCTION("rsi0", rsi0grp, rsi0_mux),
- ADI_PMX_FUNCTION("eth0", eth0grp, eth0_mux),
- ADI_PMX_FUNCTION("eth1", eth1grp, eth1_mux),
- ADI_PMX_FUNCTION("spi0", spi0grp, spi0_mux),
- ADI_PMX_FUNCTION("spi1", spi1grp, spi1_mux),
- ADI_PMX_FUNCTION("twi0", twi0grp, twi0_mux),
- ADI_PMX_FUNCTION("twi1", twi1grp, twi1_mux),
- ADI_PMX_FUNCTION("rotary", rotarygrp, rotary_mux),
- ADI_PMX_FUNCTION("can0", can0grp, can0_mux),
- ADI_PMX_FUNCTION("smc0", smc0grp, smc0_mux),
- ADI_PMX_FUNCTION("sport0", sport0grp, sport0_mux),
- ADI_PMX_FUNCTION("sport1", sport1grp, sport1_mux),
- ADI_PMX_FUNCTION("sport2", sport2grp, sport2_mux),
- ADI_PMX_FUNCTION("ppi0_8b", ppi0_8bgrp, ppi0_8b_mux),
- ADI_PMX_FUNCTION("ppi0_16b", ppi0_16bgrp, ppi0_16b_mux),
- ADI_PMX_FUNCTION("ppi0_24b", ppi0_24bgrp, ppi0_24b_mux),
- ADI_PMX_FUNCTION("ppi1_8b", ppi1_8bgrp, ppi1_8b_mux),
- ADI_PMX_FUNCTION("ppi1_16b", ppi1_16bgrp, ppi1_16b_mux),
- ADI_PMX_FUNCTION("ppi2_8b", ppi2_8bgrp, ppi2_8b_mux),
- ADI_PMX_FUNCTION("ppi2_16b", ppi2_16bgrp, ppi2_16b_mux),
- ADI_PMX_FUNCTION("lp0", lp0grp, lp0_mux),
- ADI_PMX_FUNCTION("lp1", lp1grp, lp1_mux),
- ADI_PMX_FUNCTION("lp2", lp2grp, lp2_mux),
- ADI_PMX_FUNCTION("lp3", lp3grp, lp3_mux),
+ ADI_PMX_FUNCTION("uart0", uart0grp),
+ ADI_PMX_FUNCTION("uart0_ctsrts", uart0ctsrtsgrp),
+ ADI_PMX_FUNCTION("uart1", uart1grp),
+ ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp),
+ ADI_PMX_FUNCTION("rsi0", rsi0grp),
+ ADI_PMX_FUNCTION("eth0", eth0grp),
+ ADI_PMX_FUNCTION("eth1", eth1grp),
+ ADI_PMX_FUNCTION("spi0", spi0grp),
+ ADI_PMX_FUNCTION("spi1", spi1grp),
+ ADI_PMX_FUNCTION("twi0", twi0grp),
+ ADI_PMX_FUNCTION("twi1", twi1grp),
+ ADI_PMX_FUNCTION("rotary", rotarygrp),
+ ADI_PMX_FUNCTION("can0", can0grp),
+ ADI_PMX_FUNCTION("smc0", smc0grp),
+ ADI_PMX_FUNCTION("sport0", sport0grp),
+ ADI_PMX_FUNCTION("sport1", sport1grp),
+ ADI_PMX_FUNCTION("sport2", sport2grp),
+ ADI_PMX_FUNCTION("ppi0", ppi0grp),
+ ADI_PMX_FUNCTION("ppi1", ppi1grp),
+ ADI_PMX_FUNCTION("ppi2", ppi2grp),
+ ADI_PMX_FUNCTION("lp0", lp0grp),
+ ADI_PMX_FUNCTION("lp1", lp1grp),
+ ADI_PMX_FUNCTION("lp2", lp2grp),
+ ADI_PMX_FUNCTION("lp3", lp3grp),
};
static const struct adi_pinctrl_soc_data adi_bf60x_soc = {
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index 7a39562c3e42..200ea1e72d40 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -89,6 +89,19 @@ struct gpio_port_saved {
u32 mux;
};
+/*
+ * struct gpio_pint_saved - PINT registers saved in PM operations
+ *
+ * @assign: ASSIGN register
+ * @edge_set: EDGE_SET register
+ * @invert_set: INVERT_SET register
+ */
+struct gpio_pint_saved {
+ u32 assign;
+ u32 edge_set;
+ u32 invert_set;
+};
+
/**
* struct gpio_pint - Pin interrupt controller device. Multiple ADI GPIO
* banks can be mapped into one Pin interrupt controller.
@@ -114,7 +127,7 @@ struct gpio_pint {
int irq;
struct irq_domain *domain[2];
struct gpio_pint_regs *regs;
- struct adi_pm_pint_save saved_data;
+ struct gpio_pint_saved saved_data;
int map_count;
spinlock_t lock;
@@ -160,7 +173,7 @@ struct adi_pinctrl {
struct gpio_port {
struct list_head node;
void __iomem *base;
- unsigned int irq_base;
+ int irq_base;
unsigned int width;
struct gpio_port_t *regs;
struct gpio_port_saved saved_data;
@@ -605,8 +618,8 @@ static struct pinctrl_ops adi_pctrl_ops = {
.get_group_pins = adi_get_group_pins,
};
-static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned func_id,
+ unsigned group_id)
{
struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
struct gpio_port *port;
@@ -614,7 +627,7 @@ static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
unsigned long flags;
unsigned short *mux, pin;
- mux = (unsigned short *)pinctrl->soc->functions[selector].mux;
+ mux = (unsigned short *)pinctrl->soc->groups[group_id].mux;
while (*mux) {
pin = P_IDENT(*mux);
@@ -628,7 +641,7 @@ static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
spin_lock_irqsave(&port->lock, flags);
portmux_setup(port, pin_to_offset(range, pin),
- P_FUNCT2MUX(*mux));
+ P_FUNCT2MUX(*mux));
port_setup(port, pin_to_offset(range, pin), false);
mux++;
@@ -638,8 +651,8 @@ static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
-static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
+static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned func_id,
+ unsigned group_id)
{
struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
struct gpio_port *port;
@@ -647,7 +660,7 @@ static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned selector,
unsigned long flags;
unsigned short *mux, pin;
- mux = (unsigned short *)pinctrl->soc->functions[selector].mux;
+ mux = (unsigned short *)pinctrl->soc->groups[group_id].mux;
while (*mux) {
pin = P_IDENT(*mux);
diff --git a/drivers/pinctrl/pinctrl-adi2.h b/drivers/pinctrl/pinctrl-adi2.h
index 1f06f8df1fa3..3ca29738213f 100644
--- a/drivers/pinctrl/pinctrl-adi2.h
+++ b/drivers/pinctrl/pinctrl-adi2.h
@@ -21,13 +21,15 @@ struct adi_pin_group {
const char *name;
const unsigned *pins;
const unsigned num;
+ const unsigned short *mux;
};
-#define ADI_PIN_GROUP(n, p) \
+#define ADI_PIN_GROUP(n, p, m) \
{ \
.name = n, \
.pins = p, \
.num = ARRAY_SIZE(p), \
+ .mux = m, \
}
/**
@@ -41,15 +43,13 @@ struct adi_pmx_func {
const char *name;
const char * const *groups;
const unsigned num_groups;
- const unsigned short *mux;
};
-#define ADI_PMX_FUNCTION(n, g, m) \
+#define ADI_PMX_FUNCTION(n, g) \
{ \
.name = n, \
.groups = g, \
.num_groups = ARRAY_SIZE(g), \
- .mux = m, \
}
/**
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 38c6f8b9790e..5d24aaec5dbc 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1137,6 +1137,17 @@ static void at91_gpio_free(struct gpio_chip *chip, unsigned offset)
pinctrl_free_gpio(gpio);
}
+static int at91_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct at91_gpio_chip *at91_gpio = to_at91_gpio_chip(chip);
+ void __iomem *pio = at91_gpio->regbase;
+ unsigned mask = 1 << offset;
+ u32 osr;
+
+ osr = readl_relaxed(pio + PIO_OSR);
+ return !(osr & mask);
+}
+
static int at91_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct at91_gpio_chip *at91_gpio = to_at91_gpio_chip(chip);
@@ -1286,22 +1297,22 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
case IRQ_TYPE_EDGE_FALLING:
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_LOW:
- irq_set_handler(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_HIGH:
- irq_set_handler(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
@@ -1310,7 +1321,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
* disable additional interrupt modes:
* fall back to default behavior
*/
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_AIMDR);
return 0;
case IRQ_TYPE_NONE:
@@ -1325,6 +1336,31 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
return 0;
}
+static unsigned int gpio_irq_startup(struct irq_data *d)
+{
+ struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
+ unsigned pin = d->hwirq;
+ int ret;
+
+ ret = gpio_lock_as_irq(&at91_gpio->chip, pin);
+ if (ret) {
+ dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
+ d->hwirq);
+ return ret;
+ }
+ gpio_irq_unmask(d);
+ return 0;
+}
+
+static void gpio_irq_shutdown(struct irq_data *d)
+{
+ struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
+ unsigned pin = d->hwirq;
+
+ gpio_irq_mask(d);
+ gpio_unlock_as_irq(&at91_gpio->chip, pin);
+}
+
#ifdef CONFIG_PM
static u32 wakeups[MAX_GPIO_BANKS];
@@ -1399,6 +1435,8 @@ void at91_pinctrl_gpio_resume(void)
static struct irq_chip gpio_irqchip = {
.name = "GPIO",
+ .irq_startup = gpio_irq_startup,
+ .irq_shutdown = gpio_irq_shutdown,
.irq_disable = gpio_irq_mask,
.irq_mask = gpio_irq_mask,
.irq_unmask = gpio_irq_unmask,
@@ -1543,6 +1581,7 @@ static int at91_gpio_of_irq_setup(struct device_node *node,
static struct gpio_chip at91_gpio_template = {
.request = at91_gpio_request,
.free = at91_gpio_free,
+ .get_direction = at91_gpio_get_direction,
.direction_input = at91_gpio_direction_input,
.get = at91_gpio_get,
.direction_output = at91_gpio_direction_output,
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 665b96bc0c3a..bf2b3f655469 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -60,6 +60,10 @@
#define BYT_NGPIO_NCORE 28
#define BYT_NGPIO_SUS 44
+#define BYT_SCORE_ACPI_UID "1"
+#define BYT_NCORE_ACPI_UID "2"
+#define BYT_SUS_ACPI_UID "3"
+
/*
* Baytrail gpio controller consist of three separate sub-controllers called
* SCORE, NCORE and SUS. The sub-controllers are identified by their acpi UID.
@@ -102,17 +106,17 @@ static unsigned const sus_pins[BYT_NGPIO_SUS] = {
static struct pinctrl_gpio_range byt_ranges[] = {
{
- .name = "1", /* match with acpi _UID in probe */
+ .name = BYT_SCORE_ACPI_UID, /* match with acpi _UID in probe */
.npins = BYT_NGPIO_SCORE,
.pins = score_pins,
},
{
- .name = "2",
+ .name = BYT_NCORE_ACPI_UID,
.npins = BYT_NGPIO_NCORE,
.pins = ncore_pins,
},
{
- .name = "3",
+ .name = BYT_SUS_ACPI_UID,
.npins = BYT_NGPIO_SUS,
.pins = sus_pins,
},
@@ -145,9 +149,41 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset,
return vg->reg_base + reg_offset + reg;
}
+static bool is_special_pin(struct byt_gpio *vg, unsigned offset)
+{
+ /* SCORE pin 92-93 */
+ if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) &&
+ offset >= 92 && offset <= 93)
+ return true;
+
+ /* SUS pin 11-21 */
+ if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) &&
+ offset >= 11 && offset <= 21)
+ return true;
+
+ return false;
+}
+
static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
{
struct byt_gpio *vg = to_byt_gpio(chip);
+ void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
+ u32 value;
+ bool special;
+
+ /*
+ * In most cases, func pin mux 000 means GPIO function.
+ * But, some pins may have func pin mux 001 represents
+ * GPIO function. Only allow user to export pin with
+ * func pin mux preset as GPIO function by BIOS/FW.
+ */
+ value = readl(reg) & BYT_PIN_MUX;
+ special = is_special_pin(vg, offset);
+ if ((special && value != 1) || (!special && value)) {
+ dev_err(&vg->pdev->dev,
+ "pin %u cannot be used as GPIO.\n", offset);
+ return -EINVAL;
+ }
pm_runtime_get(&vg->pdev->dev);
diff --git a/drivers/pinctrl/pinctrl-capri.c b/drivers/pinctrl/pinctrl-capri.c
index 4669c53f99b0..eb2500212147 100644
--- a/drivers/pinctrl/pinctrl-capri.c
+++ b/drivers/pinctrl/pinctrl-capri.c
@@ -1435,7 +1435,7 @@ int __init capri_pinctrl_probe(struct platform_device *pdev)
}
static struct of_device_id capri_pinctrl_of_match[] = {
- { .compatible = "brcm,capri-pinctrl", },
+ { .compatible = "brcm,bcm11351-pinctrl", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c
index 155b1b3a0e7a..07c81306f2f3 100644
--- a/drivers/pinctrl/pinctrl-exynos.c
+++ b/drivers/pinctrl/pinctrl-exynos.c
@@ -1042,6 +1042,88 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
},
};
+/* pin banks of exynos5260 pin-controller 0 */
+static struct samsung_pin_bank exynos5260_pin_banks0[] = {
+ EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(4, 0x080, "gpb1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(5, 0x0a0, "gpb2", 0x14),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0c0, "gpb3", 0x18),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpb4", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpb5", 0x20),
+ EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd0", 0x24),
+ EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpd1", 0x28),
+ EXYNOS_PIN_BANK_EINTG(5, 0x160, "gpd2", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpe0", 0x30),
+ EXYNOS_PIN_BANK_EINTG(5, 0x1a0, "gpe1", 0x34),
+ EXYNOS_PIN_BANK_EINTG(4, 0x1c0, "gpf0", 0x38),
+ EXYNOS_PIN_BANK_EINTG(8, 0x1e0, "gpf1", 0x3c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x200, "gpk0", 0x40),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc00, "gpx0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc20, "gpx1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc40, "gpx2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gpx3", 0x0c),
+};
+
+/* pin banks of exynos5260 pin-controller 1 */
+static struct samsung_pin_bank exynos5260_pin_banks1[] = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpc0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpc1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpc3", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(4, 0x080, "gpc4", 0x10),
+};
+
+/* pin banks of exynos5260 pin-controller 2 */
+static struct samsung_pin_bank exynos5260_pin_banks2[] = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes
+ * three gpio/pin-mux/pinconfig controllers.
+ */
+struct samsung_pin_ctrl exynos5260_pin_ctrl[] = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos5260_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos5260_pin_banks0),
+ .geint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ .weint_con = EXYNOS_WKUP_ECON_OFFSET,
+ .weint_mask = EXYNOS_WKUP_EMASK_OFFSET,
+ .weint_pend = EXYNOS_WKUP_EPEND_OFFSET,
+ .svc = EXYNOS_SVC_OFFSET,
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .label = "exynos5260-gpio-ctrl0",
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos5260_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos5260_pin_banks1),
+ .geint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ .svc = EXYNOS_SVC_OFFSET,
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .label = "exynos5260-gpio-ctrl1",
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos5260_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos5260_pin_banks2),
+ .geint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ .svc = EXYNOS_SVC_OFFSET,
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .label = "exynos5260-gpio-ctrl2",
+ },
+};
+
/* pin banks of exynos5420 pin-controller 0 */
static struct samsung_pin_bank exynos5420_pin_banks0[] = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00),
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index 4779b8e0eee8..e118fb121e02 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -491,7 +491,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
pin->mux_mode |= IOMUXC_CONFIG_SION;
pin->config = config & ~IMX_PAD_SION;
- dev_dbg(info->dev, "%s: %d 0x%08lx", info->pins[i].name,
+ dev_dbg(info->dev, "%s: %d 0x%08lx", info->pins[pin_id].name,
pin->mux_mode, pin->config);
}
diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/pinctrl-imx1-core.c
index 17aecde1b51d..815384b377b5 100644
--- a/drivers/pinctrl/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/pinctrl-imx1-core.c
@@ -45,7 +45,7 @@ struct imx1_pinctrl {
#define MX1_DDIR 0x00
#define MX1_OCR 0x04
#define MX1_ICONFA 0x0c
-#define MX1_ICONFB 0x10
+#define MX1_ICONFB 0x14
#define MX1_GIUS 0x20
#define MX1_GPR 0x38
#define MX1_PUEN 0x40
@@ -97,13 +97,13 @@ static void imx1_write_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
u32 old_val;
u32 new_val;
- dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
- reg, offset, value);
-
/* Use the next register if the pin's port pin number is >=16 */
if (pin_id % 32 >= 16)
reg += 0x04;
+ dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
+ reg, offset, value);
+
/* Get current state of pins */
old_val = readl(reg);
old_val &= mask;
@@ -139,7 +139,7 @@ static int imx1_read_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
u32 reg_offset)
{
void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset;
- int offset = pin_id % 16;
+ int offset = (pin_id % 16) * 2;
/* Use the next register if the pin's port pin number is >=16 */
if (pin_id % 32 >= 16)
diff --git a/drivers/pinctrl/pinctrl-msm.c b/drivers/pinctrl/pinctrl-msm.c
index ef2bf3126da6..343f421c7696 100644
--- a/drivers/pinctrl/pinctrl-msm.c
+++ b/drivers/pinctrl/pinctrl-msm.c
@@ -28,7 +28,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include "core.h"
@@ -50,7 +49,6 @@
* @enabled_irqs: Bitmap of currently enabled irqs.
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
- * @wake_irqs: Bitmap of irqs with requested as wakeup source.
* @soc; Reference to soc_data of platform specific data.
* @regs: Base address for the TLMM register map.
*/
@@ -65,7 +63,6 @@ struct msm_pinctrl {
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
- DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs;
@@ -203,42 +200,29 @@ static const struct pinmux_ops msm_pinmux_ops = {
static int msm_config_reg(struct msm_pinctrl *pctrl,
const struct msm_pingroup *g,
unsigned param,
- s16 *reg,
unsigned *mask,
unsigned *bit)
{
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- *reg = g->ctl_reg;
- *bit = g->pull_bit;
- *mask = 3;
- break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- *reg = g->ctl_reg;
- *bit = g->pull_bit;
- *mask = 3;
- break;
case PIN_CONFIG_BIAS_PULL_UP:
- *reg = g->ctl_reg;
*bit = g->pull_bit;
*mask = 3;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- *reg = g->ctl_reg;
*bit = g->drv_bit;
*mask = 7;
break;
+ case PIN_CONFIG_OUTPUT:
+ *bit = g->oe_bit;
+ *mask = 1;
+ break;
default:
dev_err(pctrl->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
}
- if (*reg < 0) {
- dev_err(pctrl->dev, "Config param %04x not supported on group %s\n",
- param, g->name);
- return -ENOTSUPP;
- }
-
return 0;
}
@@ -261,8 +245,10 @@ static int msm_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
#define MSM_PULL_DOWN 1
#define MSM_PULL_UP 3
-static const unsigned msm_regval_to_drive[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
-static const unsigned msm_drive_to_regval[] = { -1, -1, 0, -1, 1, -1, 2, -1, 3, -1, 4, -1, 5, -1, 6, -1, 7 };
+static unsigned msm_regval_to_drive(u32 val)
+{
+ return (val + 1) * 2;
+}
static int msm_config_group_get(struct pinctrl_dev *pctldev,
unsigned int group,
@@ -274,17 +260,16 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
unsigned mask;
unsigned arg;
unsigned bit;
- s16 reg;
int ret;
u32 val;
g = &pctrl->soc->groups[group];
- ret = msm_config_reg(pctrl, g, param, &reg, &mask, &bit);
+ ret = msm_config_reg(pctrl, g, param, &mask, &bit);
if (ret < 0)
return ret;
- val = readl(pctrl->regs + reg);
+ val = readl(pctrl->regs + g->ctl_reg);
arg = (val >> bit) & mask;
/* Convert register value to pinconf value */
@@ -299,7 +284,15 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
arg = arg == MSM_PULL_UP;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- arg = msm_regval_to_drive[arg];
+ arg = msm_regval_to_drive(arg);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ /* Pin is not output */
+ if (!arg)
+ return -EINVAL;
+
+ val = readl(pctrl->regs + g->io_reg);
+ arg = !!(val & BIT(g->in_bit));
break;
default:
dev_err(pctrl->dev, "Unsupported config parameter: %x\n",
@@ -324,7 +317,6 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
unsigned mask;
unsigned arg;
unsigned bit;
- s16 reg;
int ret;
u32 val;
int i;
@@ -335,7 +327,7 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
- ret = msm_config_reg(pctrl, g, param, &reg, &mask, &bit);
+ ret = msm_config_reg(pctrl, g, param, &mask, &bit);
if (ret < 0)
return ret;
@@ -352,10 +344,24 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_DRIVE_STRENGTH:
/* Check for invalid values */
- if (arg >= ARRAY_SIZE(msm_drive_to_regval))
+ if (arg > 16 || arg < 2 || (arg % 2) != 0)
arg = -1;
else
- arg = msm_drive_to_regval[arg];
+ arg = (arg / 2) - 1;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ /* set output value */
+ spin_lock_irqsave(&pctrl->lock, flags);
+ val = readl(pctrl->regs + g->io_reg);
+ if (arg)
+ val |= BIT(g->out_bit);
+ else
+ val &= ~BIT(g->out_bit);
+ writel(val, pctrl->regs + g->io_reg);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ /* enable output */
+ arg = 1;
break;
default:
dev_err(pctrl->dev, "Unsupported config parameter: %x\n",
@@ -370,10 +376,10 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
}
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + reg);
+ val = readl(pctrl->regs + g->ctl_reg);
val &= ~(mask << bit);
val |= arg << bit;
- writel(val, pctrl->regs + reg);
+ writel(val, pctrl->regs + g->ctl_reg);
spin_unlock_irqrestore(&pctrl->lock, flags);
}
@@ -402,8 +408,6 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u32 val;
g = &pctrl->soc->groups[offset];
- if (WARN_ON(g->io_reg < 0))
- return -EINVAL;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -424,8 +428,6 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
u32 val;
g = &pctrl->soc->groups[offset];
- if (WARN_ON(g->io_reg < 0))
- return -EINVAL;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -452,8 +454,6 @@ static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
u32 val;
g = &pctrl->soc->groups[offset];
- if (WARN_ON(g->io_reg < 0))
- return -EINVAL;
val = readl(pctrl->regs + g->io_reg);
return !!(val & BIT(g->in_bit));
@@ -467,8 +467,6 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
u32 val;
g = &pctrl->soc->groups[offset];
- if (WARN_ON(g->io_reg < 0))
- return;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -534,7 +532,7 @@ static void msm_gpio_dbg_show_one(struct seq_file *s,
pull = (ctl_reg >> g->pull_bit) & 3;
seq_printf(s, " %-8s: %-3s %d", g->name, is_out ? "out" : "in", func);
- seq_printf(s, " %dmA", msm_regval_to_drive[drive]);
+ seq_printf(s, " %dmA", msm_regval_to_drive(drive));
seq_printf(s, " %s", pulls[pull]);
}
@@ -617,8 +615,6 @@ static void msm_gpio_irq_mask(struct irq_data *d)
pctrl = irq_data_get_irq_chip_data(d);
g = &pctrl->soc->groups[d->hwirq];
- if (WARN_ON(g->intr_cfg_reg < 0))
- return;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -640,8 +636,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
pctrl = irq_data_get_irq_chip_data(d);
g = &pctrl->soc->groups[d->hwirq];
- if (WARN_ON(g->intr_status_reg < 0))
- return;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -667,8 +661,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
pctrl = irq_data_get_irq_chip_data(d);
g = &pctrl->soc->groups[d->hwirq];
- if (WARN_ON(g->intr_status_reg < 0))
- return;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -693,8 +685,6 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pctrl = irq_data_get_irq_chip_data(d);
g = &pctrl->soc->groups[d->hwirq];
- if (WARN_ON(g->intr_cfg_reg < 0))
- return -EINVAL;
spin_lock_irqsave(&pctrl->lock, flags);
@@ -783,22 +773,12 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct msm_pinctrl *pctrl;
unsigned long flags;
- unsigned ngpio;
pctrl = irq_data_get_irq_chip_data(d);
- ngpio = pctrl->chip.ngpio;
spin_lock_irqsave(&pctrl->lock, flags);
- if (on) {
- if (bitmap_empty(pctrl->wake_irqs, ngpio))
- enable_irq_wake(pctrl->irq);
- set_bit(d->hwirq, pctrl->wake_irqs);
- } else {
- clear_bit(d->hwirq, pctrl->wake_irqs);
- if (bitmap_empty(pctrl->wake_irqs, ngpio))
- disable_irq_wake(pctrl->irq);
- }
+ irq_set_irq_wake(pctrl->irq, on);
spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -869,6 +849,12 @@ static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
static int msm_gpio_init(struct msm_pinctrl *pctrl)
{
struct gpio_chip *chip;
@@ -876,10 +862,14 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
int ret;
int i;
int r;
+ unsigned ngpio = pctrl->soc->ngpios;
+
+ if (WARN_ON(ngpio > MAX_NR_GPIO))
+ return -EINVAL;
chip = &pctrl->chip;
chip->base = 0;
- chip->ngpio = pctrl->soc->ngpios;
+ chip->ngpio = ngpio;
chip->label = dev_name(pctrl->dev);
chip->dev = pctrl->dev;
chip->owner = THIS_MODULE;
@@ -907,6 +897,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
for (i = 0; i < chip->ngpio; i++) {
irq = irq_create_mapping(pctrl->domain, i);
+ irq_set_lockdep_class(irq, &gpio_lock_class);
irq_set_chip_and_handler(irq, &msm_gpio_irq_chip, handle_edge_irq);
irq_set_chip_data(irq, pctrl);
}
diff --git a/drivers/pinctrl/pinctrl-msm.h b/drivers/pinctrl/pinctrl-msm.h
index 206e782e2daa..8fbe9fb19f36 100644
--- a/drivers/pinctrl/pinctrl-msm.h
+++ b/drivers/pinctrl/pinctrl-msm.h
@@ -13,10 +13,7 @@
#ifndef __PINCTRL_MSM_H__
#define __PINCTRL_MSM_H__
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/machine.h>
+struct pinctrl_pin_desc;
/**
* struct msm_function - a pinmux function
diff --git a/drivers/pinctrl/pinctrl-msm8x74.c b/drivers/pinctrl/pinctrl-msm8x74.c
index f944bf2172ef..dde5529807aa 100644
--- a/drivers/pinctrl/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/pinctrl-msm8x74.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
#include "pinctrl-msm.h"
@@ -406,6 +405,7 @@ enum msm8x74_functions {
MSM_MUX_blsp_i2c6,
MSM_MUX_blsp_i2c11,
MSM_MUX_blsp_spi1,
+ MSM_MUX_blsp_spi8,
MSM_MUX_blsp_uart2,
MSM_MUX_blsp_uart8,
MSM_MUX_slimbus,
@@ -416,6 +416,9 @@ static const char * const blsp_i2c2_groups[] = { "gpio6", "gpio7" };
static const char * const blsp_i2c6_groups[] = { "gpio29", "gpio30" };
static const char * const blsp_i2c11_groups[] = { "gpio83", "gpio84" };
static const char * const blsp_spi1_groups[] = { "gpio0", "gpio1", "gpio2", "gpio3" };
+static const char * const blsp_spi8_groups[] = {
+ "gpio45", "gpio46", "gpio47", "gpio48"
+};
static const char * const blsp_uart2_groups[] = { "gpio4", "gpio5" };
static const char * const blsp_uart8_groups[] = { "gpio45", "gpio46" };
static const char * const slimbus_groups[] = { "gpio70", "gpio71" };
@@ -425,6 +428,7 @@ static const struct msm_function msm8x74_functions[] = {
FUNCTION(blsp_i2c6),
FUNCTION(blsp_i2c11),
FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi8),
FUNCTION(blsp_uart2),
FUNCTION(blsp_uart8),
FUNCTION(slimbus),
@@ -476,10 +480,10 @@ static const struct msm_pingroup msm8x74_groups[] = {
PINGROUP(42, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(43, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(44, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(45, NA, blsp_uart8, NA, NA, NA, NA, NA),
- PINGROUP(46, NA, blsp_uart8, NA, NA, NA, NA, NA),
- PINGROUP(47, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(48, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, blsp_spi8, blsp_uart8, NA, NA, NA, NA, NA),
+ PINGROUP(46, blsp_spi8, blsp_uart8, NA, NA, NA, NA, NA),
+ PINGROUP(47, blsp_spi8, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, blsp_spi8, NA, NA, NA, NA, NA, NA),
PINGROUP(49, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(50, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(51, NA, NA, NA, NA, NA, NA, NA),
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index cd2b1a1c9275..cec7762cf335 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -4,7 +4,7 @@
* Copyright (C) 2008,2009 STMicroelectronics
* Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
* Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
- * Copyright (C) 2011 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -33,7 +33,6 @@
#include <linux/pinctrl/pinconf.h>
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
-#include <linux/platform_data/pinctrl-nomadik.h>
#include "pinctrl-nomadik.h"
#include "core.h"
@@ -45,6 +44,221 @@
* Symbols in this file are called "nmk_gpio" for "nomadik gpio"
*/
+/*
+ * pin configurations are represented by 32-bit integers:
+ *
+ * bit 0.. 8 - Pin Number (512 Pins Maximum)
+ * bit 9..10 - Alternate Function Selection
+ * bit 11..12 - Pull up/down state
+ * bit 13 - Sleep mode behaviour
+ * bit 14 - Direction
+ * bit 15 - Value (if output)
+ * bit 16..18 - SLPM pull up/down state
+ * bit 19..20 - SLPM direction
+ * bit 21..22 - SLPM Value (if output)
+ * bit 23..25 - PDIS value (if input)
+ * bit 26 - Gpio mode
+ * bit 27 - Sleep mode
+ *
+ * to facilitate the definition, the following macros are provided
+ *
+ * PIN_CFG_DEFAULT - default config (0):
+ * pull up/down = disabled
+ * sleep mode = input/wakeup
+ * direction = input
+ * value = low
+ * SLPM direction = same as normal
+ * SLPM pull = same as normal
+ * SLPM value = same as normal
+ *
+ * PIN_CFG - default config with alternate function
+ */
+
+typedef unsigned long pin_cfg_t;
+
+#define PIN_NUM_MASK 0x1ff
+#define PIN_NUM(x) ((x) & PIN_NUM_MASK)
+
+#define PIN_ALT_SHIFT 9
+#define PIN_ALT_MASK (0x3 << PIN_ALT_SHIFT)
+#define PIN_ALT(x) (((x) & PIN_ALT_MASK) >> PIN_ALT_SHIFT)
+#define PIN_GPIO (NMK_GPIO_ALT_GPIO << PIN_ALT_SHIFT)
+#define PIN_ALT_A (NMK_GPIO_ALT_A << PIN_ALT_SHIFT)
+#define PIN_ALT_B (NMK_GPIO_ALT_B << PIN_ALT_SHIFT)
+#define PIN_ALT_C (NMK_GPIO_ALT_C << PIN_ALT_SHIFT)
+
+#define PIN_PULL_SHIFT 11
+#define PIN_PULL_MASK (0x3 << PIN_PULL_SHIFT)
+#define PIN_PULL(x) (((x) & PIN_PULL_MASK) >> PIN_PULL_SHIFT)
+#define PIN_PULL_NONE (NMK_GPIO_PULL_NONE << PIN_PULL_SHIFT)
+#define PIN_PULL_UP (NMK_GPIO_PULL_UP << PIN_PULL_SHIFT)
+#define PIN_PULL_DOWN (NMK_GPIO_PULL_DOWN << PIN_PULL_SHIFT)
+
+#define PIN_SLPM_SHIFT 13
+#define PIN_SLPM_MASK (0x1 << PIN_SLPM_SHIFT)
+#define PIN_SLPM(x) (((x) & PIN_SLPM_MASK) >> PIN_SLPM_SHIFT)
+#define PIN_SLPM_MAKE_INPUT (NMK_GPIO_SLPM_INPUT << PIN_SLPM_SHIFT)
+#define PIN_SLPM_NOCHANGE (NMK_GPIO_SLPM_NOCHANGE << PIN_SLPM_SHIFT)
+/* These two replace the above in DB8500v2+ */
+#define PIN_SLPM_WAKEUP_ENABLE (NMK_GPIO_SLPM_WAKEUP_ENABLE << PIN_SLPM_SHIFT)
+#define PIN_SLPM_WAKEUP_DISABLE (NMK_GPIO_SLPM_WAKEUP_DISABLE << PIN_SLPM_SHIFT)
+#define PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP PIN_SLPM_WAKEUP_DISABLE
+
+#define PIN_SLPM_GPIO PIN_SLPM_WAKEUP_ENABLE /* In SLPM, pin is a gpio */
+#define PIN_SLPM_ALTFUNC PIN_SLPM_WAKEUP_DISABLE /* In SLPM, pin is altfunc */
+
+#define PIN_DIR_SHIFT 14
+#define PIN_DIR_MASK (0x1 << PIN_DIR_SHIFT)
+#define PIN_DIR(x) (((x) & PIN_DIR_MASK) >> PIN_DIR_SHIFT)
+#define PIN_DIR_INPUT (0 << PIN_DIR_SHIFT)
+#define PIN_DIR_OUTPUT (1 << PIN_DIR_SHIFT)
+
+#define PIN_VAL_SHIFT 15
+#define PIN_VAL_MASK (0x1 << PIN_VAL_SHIFT)
+#define PIN_VAL(x) (((x) & PIN_VAL_MASK) >> PIN_VAL_SHIFT)
+#define PIN_VAL_LOW (0 << PIN_VAL_SHIFT)
+#define PIN_VAL_HIGH (1 << PIN_VAL_SHIFT)
+
+#define PIN_SLPM_PULL_SHIFT 16
+#define PIN_SLPM_PULL_MASK (0x7 << PIN_SLPM_PULL_SHIFT)
+#define PIN_SLPM_PULL(x) \
+ (((x) & PIN_SLPM_PULL_MASK) >> PIN_SLPM_PULL_SHIFT)
+#define PIN_SLPM_PULL_NONE \
+ ((1 + NMK_GPIO_PULL_NONE) << PIN_SLPM_PULL_SHIFT)
+#define PIN_SLPM_PULL_UP \
+ ((1 + NMK_GPIO_PULL_UP) << PIN_SLPM_PULL_SHIFT)
+#define PIN_SLPM_PULL_DOWN \
+ ((1 + NMK_GPIO_PULL_DOWN) << PIN_SLPM_PULL_SHIFT)
+
+#define PIN_SLPM_DIR_SHIFT 19
+#define PIN_SLPM_DIR_MASK (0x3 << PIN_SLPM_DIR_SHIFT)
+#define PIN_SLPM_DIR(x) \
+ (((x) & PIN_SLPM_DIR_MASK) >> PIN_SLPM_DIR_SHIFT)
+#define PIN_SLPM_DIR_INPUT ((1 + 0) << PIN_SLPM_DIR_SHIFT)
+#define PIN_SLPM_DIR_OUTPUT ((1 + 1) << PIN_SLPM_DIR_SHIFT)
+
+#define PIN_SLPM_VAL_SHIFT 21
+#define PIN_SLPM_VAL_MASK (0x3 << PIN_SLPM_VAL_SHIFT)
+#define PIN_SLPM_VAL(x) \
+ (((x) & PIN_SLPM_VAL_MASK) >> PIN_SLPM_VAL_SHIFT)
+#define PIN_SLPM_VAL_LOW ((1 + 0) << PIN_SLPM_VAL_SHIFT)
+#define PIN_SLPM_VAL_HIGH ((1 + 1) << PIN_SLPM_VAL_SHIFT)
+
+#define PIN_SLPM_PDIS_SHIFT 23
+#define PIN_SLPM_PDIS_MASK (0x3 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS(x) \
+ (((x) & PIN_SLPM_PDIS_MASK) >> PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_NO_CHANGE (0 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_DISABLED (1 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_ENABLED (2 << PIN_SLPM_PDIS_SHIFT)
+
+#define PIN_LOWEMI_SHIFT 25
+#define PIN_LOWEMI_MASK (0x1 << PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI(x) (((x) & PIN_LOWEMI_MASK) >> PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI_DISABLED (0 << PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI_ENABLED (1 << PIN_LOWEMI_SHIFT)
+
+#define PIN_GPIOMODE_SHIFT 26
+#define PIN_GPIOMODE_MASK (0x1 << PIN_GPIOMODE_SHIFT)
+#define PIN_GPIOMODE(x) (((x) & PIN_GPIOMODE_MASK) >> PIN_GPIOMODE_SHIFT)
+#define PIN_GPIOMODE_DISABLED (0 << PIN_GPIOMODE_SHIFT)
+#define PIN_GPIOMODE_ENABLED (1 << PIN_GPIOMODE_SHIFT)
+
+#define PIN_SLEEPMODE_SHIFT 27
+#define PIN_SLEEPMODE_MASK (0x1 << PIN_SLEEPMODE_SHIFT)
+#define PIN_SLEEPMODE(x) (((x) & PIN_SLEEPMODE_MASK) >> PIN_SLEEPMODE_SHIFT)
+#define PIN_SLEEPMODE_DISABLED (0 << PIN_SLEEPMODE_SHIFT)
+#define PIN_SLEEPMODE_ENABLED (1 << PIN_SLEEPMODE_SHIFT)
+
+
+/* Shortcuts. Use these instead of separate DIR, PULL, and VAL. */
+#define PIN_INPUT_PULLDOWN (PIN_DIR_INPUT | PIN_PULL_DOWN)
+#define PIN_INPUT_PULLUP (PIN_DIR_INPUT | PIN_PULL_UP)
+#define PIN_INPUT_NOPULL (PIN_DIR_INPUT | PIN_PULL_NONE)
+#define PIN_OUTPUT_LOW (PIN_DIR_OUTPUT | PIN_VAL_LOW)
+#define PIN_OUTPUT_HIGH (PIN_DIR_OUTPUT | PIN_VAL_HIGH)
+
+#define PIN_SLPM_INPUT_PULLDOWN (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_DOWN)
+#define PIN_SLPM_INPUT_PULLUP (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_UP)
+#define PIN_SLPM_INPUT_NOPULL (PIN_SLPM_DIR_INPUT | PIN_SLPM_PULL_NONE)
+#define PIN_SLPM_OUTPUT_LOW (PIN_SLPM_DIR_OUTPUT | PIN_SLPM_VAL_LOW)
+#define PIN_SLPM_OUTPUT_HIGH (PIN_SLPM_DIR_OUTPUT | PIN_SLPM_VAL_HIGH)
+
+#define PIN_CFG_DEFAULT (0)
+
+#define PIN_CFG(num, alt) \
+ (PIN_CFG_DEFAULT |\
+ (PIN_NUM(num) | PIN_##alt))
+
+#define PIN_CFG_INPUT(num, alt, pull) \
+ (PIN_CFG_DEFAULT |\
+ (PIN_NUM(num) | PIN_##alt | PIN_INPUT_##pull))
+
+#define PIN_CFG_OUTPUT(num, alt, val) \
+ (PIN_CFG_DEFAULT |\
+ (PIN_NUM(num) | PIN_##alt | PIN_OUTPUT_##val))
+
+/*
+ * "nmk_gpio" and "NMK_GPIO" stand for "Nomadik GPIO", leaving
+ * the "gpio" namespace for generic and cross-machine functions
+ */
+
+#define GPIO_BLOCK_SHIFT 5
+#define NMK_GPIO_PER_CHIP (1 << GPIO_BLOCK_SHIFT)
+
+/* Register in the logic block */
+#define NMK_GPIO_DAT 0x00
+#define NMK_GPIO_DATS 0x04
+#define NMK_GPIO_DATC 0x08
+#define NMK_GPIO_PDIS 0x0c
+#define NMK_GPIO_DIR 0x10
+#define NMK_GPIO_DIRS 0x14
+#define NMK_GPIO_DIRC 0x18
+#define NMK_GPIO_SLPC 0x1c
+#define NMK_GPIO_AFSLA 0x20
+#define NMK_GPIO_AFSLB 0x24
+#define NMK_GPIO_LOWEMI 0x28
+
+#define NMK_GPIO_RIMSC 0x40
+#define NMK_GPIO_FIMSC 0x44
+#define NMK_GPIO_IS 0x48
+#define NMK_GPIO_IC 0x4c
+#define NMK_GPIO_RWIMSC 0x50
+#define NMK_GPIO_FWIMSC 0x54
+#define NMK_GPIO_WKS 0x58
+/* These appear in DB8540 and later ASICs */
+#define NMK_GPIO_EDGELEVEL 0x5C
+#define NMK_GPIO_LEVEL 0x60
+
+
+/* Pull up/down values */
+enum nmk_gpio_pull {
+ NMK_GPIO_PULL_NONE,
+ NMK_GPIO_PULL_UP,
+ NMK_GPIO_PULL_DOWN,
+};
+
+/* Sleep mode */
+enum nmk_gpio_slpm {
+ NMK_GPIO_SLPM_INPUT,
+ NMK_GPIO_SLPM_WAKEUP_ENABLE = NMK_GPIO_SLPM_INPUT,
+ NMK_GPIO_SLPM_NOCHANGE,
+ NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE,
+};
+
+/*
+ * Platform data to register a block: only the initial gpio/irq number.
+ */
+struct nmk_gpio_platform_data {
+ char *name;
+ int first_gpio;
+ int first_irq;
+ int num_gpio;
+ u32 (*get_secondary_status)(unsigned int bank);
+ void (*set_ioforce)(bool enable);
+ bool supports_sleepmode;
+};
+
struct nmk_gpio_chip {
struct gpio_chip chip;
struct irq_domain *domain;
@@ -1026,7 +1240,7 @@ static const struct irq_domain_ops nmk_gpio_irq_simple_ops = {
static int nmk_gpio_probe(struct platform_device *dev)
{
- struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
+ struct nmk_gpio_platform_data *pdata;
struct device_node *np = dev->dev.of_node;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
@@ -1034,32 +1248,24 @@ static int nmk_gpio_probe(struct platform_device *dev)
struct clk *clk;
int secondary_irq;
void __iomem *base;
- int irq_start = 0;
int irq;
int ret;
- if (!pdata && !np) {
- dev_err(&dev->dev, "No platform data or device tree found\n");
- return -ENODEV;
- }
-
- if (np) {
- pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- if (of_get_property(np, "st,supports-sleepmode", NULL))
- pdata->supports_sleepmode = true;
+ pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
- if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
- dev_err(&dev->dev, "gpio-bank property not found\n");
- return -EINVAL;
- }
+ if (of_get_property(np, "st,supports-sleepmode", NULL))
+ pdata->supports_sleepmode = true;
- pdata->first_gpio = dev->id * NMK_GPIO_PER_CHIP;
- pdata->num_gpio = NMK_GPIO_PER_CHIP;
+ if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
+ dev_err(&dev->dev, "gpio-bank property not found\n");
+ return -EINVAL;
}
+ pdata->first_gpio = dev->id * NMK_GPIO_PER_CHIP;
+ pdata->num_gpio = NMK_GPIO_PER_CHIP;
+
irq = platform_get_irq(dev, 0);
if (irq < 0)
return irq;
@@ -1107,10 +1313,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
clk_enable(nmk_chip->clk);
nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
clk_disable(nmk_chip->clk);
-
-#ifdef CONFIG_OF_GPIO
chip->of_node = np;
-#endif
ret = gpiochip_add(&nmk_chip->chip);
if (ret)
@@ -1122,10 +1325,8 @@ static int nmk_gpio_probe(struct platform_device *dev)
platform_set_drvdata(dev, nmk_chip);
- if (!np)
- irq_start = pdata->first_irq;
nmk_chip->domain = irq_domain_add_simple(np,
- NMK_GPIO_PER_CHIP, irq_start,
+ NMK_GPIO_PER_CHIP, 0,
&nmk_gpio_irq_simple_ops, nmk_chip);
if (!nmk_chip->domain) {
dev_err(&dev->dev, "failed to create irqdomain\n");
@@ -1834,35 +2035,36 @@ static const struct of_device_id nmk_pinctrl_match[] = {
{},
};
-static int nmk_pinctrl_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int nmk_pinctrl_suspend(struct device *dev)
{
struct nmk_pinctrl *npct;
- npct = platform_get_drvdata(pdev);
+ npct = dev_get_drvdata(dev);
if (!npct)
return -EINVAL;
return pinctrl_force_sleep(npct->pctl);
}
-static int nmk_pinctrl_resume(struct platform_device *pdev)
+static int nmk_pinctrl_resume(struct device *dev)
{
struct nmk_pinctrl *npct;
- npct = platform_get_drvdata(pdev);
+ npct = dev_get_drvdata(dev);
if (!npct)
return -EINVAL;
return pinctrl_force_default(npct->pctl);
}
+#endif
static int nmk_pinctrl_probe(struct platform_device *pdev)
{
- const struct platform_device_id *platid = platform_get_device_id(pdev);
+ const struct of_device_id *match;
struct device_node *np = pdev->dev.of_node;
struct device_node *prcm_np;
struct nmk_pinctrl *npct;
- struct resource *res;
unsigned int version = 0;
int i;
@@ -1870,16 +2072,10 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
if (!npct)
return -ENOMEM;
- if (platid)
- version = platid->driver_data;
- else if (np) {
- const struct of_device_id *match;
-
- match = of_match_device(nmk_pinctrl_match, &pdev->dev);
- if (!match)
- return -ENODEV;
- version = (unsigned int) match->data;
- }
+ match = of_match_device(nmk_pinctrl_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+ version = (unsigned int) match->data;
/* Poke in other ASIC variants here */
if (version == PINCTRL_NMK_STN8815)
@@ -1889,17 +2085,9 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
if (version == PINCTRL_NMK_DB8540)
nmk_pinctrl_db8540_init(&npct->soc);
- if (np) {
- prcm_np = of_parse_phandle(np, "prcm", 0);
- if (prcm_np)
- npct->prcm_base = of_iomap(prcm_np, 0);
- }
-
- /* Allow platform passed information to over-write DT. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- npct->prcm_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ prcm_np = of_parse_phandle(np, "prcm", 0);
+ if (prcm_np)
+ npct->prcm_base = of_iomap(prcm_np, 0);
if (!npct->prcm_base) {
if (version == PINCTRL_NMK_STN8815) {
dev_info(&pdev->dev,
@@ -1958,25 +2146,18 @@ static struct platform_driver nmk_gpio_driver = {
.probe = nmk_gpio_probe,
};
-static const struct platform_device_id nmk_pinctrl_id[] = {
- { "pinctrl-stn8815", PINCTRL_NMK_STN8815 },
- { "pinctrl-db8500", PINCTRL_NMK_DB8500 },
- { "pinctrl-db8540", PINCTRL_NMK_DB8540 },
- { }
-};
+static SIMPLE_DEV_PM_OPS(nmk_pinctrl_pm_ops,
+ nmk_pinctrl_suspend,
+ nmk_pinctrl_resume);
static struct platform_driver nmk_pinctrl_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "pinctrl-nomadik",
.of_match_table = nmk_pinctrl_match,
+ .pm = &nmk_pinctrl_pm_ops,
},
.probe = nmk_pinctrl_probe,
- .id_table = nmk_pinctrl_id,
-#ifdef CONFIG_PM
- .suspend = nmk_pinctrl_suspend,
- .resume = nmk_pinctrl_resume,
-#endif
};
static int __init nmk_gpio_init(void)
diff --git a/drivers/pinctrl/pinctrl-nomadik.h b/drivers/pinctrl/pinctrl-nomadik.h
index bcd4191e10ea..d8215f1e70c7 100644
--- a/drivers/pinctrl/pinctrl-nomadik.h
+++ b/drivers/pinctrl/pinctrl-nomadik.h
@@ -1,13 +1,23 @@
#ifndef PINCTRL_PINCTRL_NOMADIK_H
#define PINCTRL_PINCTRL_NOMADIK_H
-#include <linux/platform_data/pinctrl-nomadik.h>
-
/* Package definitions */
#define PINCTRL_NMK_STN8815 0
#define PINCTRL_NMK_DB8500 1
#define PINCTRL_NMK_DB8540 2
+/* Alternate functions: function C is set in hw by setting both A and B */
+#define NMK_GPIO_ALT_GPIO 0
+#define NMK_GPIO_ALT_A 1
+#define NMK_GPIO_ALT_B 2
+#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
+
+#define NMK_GPIO_ALT_CX_SHIFT 2
+#define NMK_GPIO_ALT_C1 ((1<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C2 ((2<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C3 ((3<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C4 ((4<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+
#define PRCM_GPIOCR_ALTCX(pin_num,\
altc1_used, altc1_ri, altc1_cb,\
altc2_used, altc2_ri, altc2_cb,\
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index 47ec2e8741e4..0324d4cb19b2 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -1120,6 +1120,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = (void *)exynos4x12_pin_ctrl },
{ .compatible = "samsung,exynos5250-pinctrl",
.data = (void *)exynos5250_pin_ctrl },
+ { .compatible = "samsung,exynos5260-pinctrl",
+ .data = (void *)exynos5260_pin_ctrl },
{ .compatible = "samsung,exynos5420-pinctrl",
.data = (void *)exynos5420_pin_ctrl },
{ .compatible = "samsung,s5pv210-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-samsung.h b/drivers/pinctrl/pinctrl-samsung.h
index 30622d9afa2e..bab9c2122556 100644
--- a/drivers/pinctrl/pinctrl-samsung.h
+++ b/drivers/pinctrl/pinctrl-samsung.h
@@ -254,6 +254,7 @@ struct samsung_pmx_func {
extern struct samsung_pin_ctrl exynos4210_pin_ctrl[];
extern struct samsung_pin_ctrl exynos4x12_pin_ctrl[];
extern struct samsung_pin_ctrl exynos5250_pin_ctrl[];
+extern struct samsung_pin_ctrl exynos5260_pin_ctrl[];
extern struct samsung_pin_ctrl exynos5420_pin_ctrl[];
extern struct samsung_pin_ctrl s3c64xx_pin_ctrl[];
extern struct samsung_pin_ctrl s3c2412_pin_ctrl[];
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index de6459628b4f..81075f2a1d3f 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -662,6 +662,7 @@ static int pcs_pinconf_get(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_DRIVE_STRENGTH:
case PIN_CONFIG_SLEW_RATE:
+ case PIN_CONFIG_LOW_POWER_MODE:
default:
*config = data;
break;
@@ -699,6 +700,7 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_INPUT_SCHMITT:
case PIN_CONFIG_DRIVE_STRENGTH:
case PIN_CONFIG_SLEW_RATE:
+ case PIN_CONFIG_LOW_POWER_MODE:
shift = ffs(func->conf[i].mask) - 1;
data &= ~func->conf[i].mask;
data |= (arg << shift) & func->conf[i].mask;
@@ -1101,6 +1103,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
{ "pinctrl-single,drive-strength", PIN_CONFIG_DRIVE_STRENGTH, },
{ "pinctrl-single,slew-rate", PIN_CONFIG_SLEW_RATE, },
{ "pinctrl-single,input-schmitt", PIN_CONFIG_INPUT_SCHMITT, },
+ { "pinctrl-single,low-power-mode", PIN_CONFIG_LOW_POWER_MODE, },
};
struct pcs_conf_type prop4[] = {
{ "pinctrl-single,bias-pullup", PIN_CONFIG_BIAS_PULL_UP, },
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 320c27363cc8..bd725b0a4341 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -13,7 +13,12 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
@@ -266,11 +271,59 @@ struct st_pctl_group {
struct st_pinconf *pin_conf;
};
+/*
+ * Edge triggers are not supported at hardware level, it is supported by
+ * software by exploiting the level trigger support in hardware.
+ * Software uses a virtual register (EDGE_CONF) for edge trigger configuration
+ * of each gpio pin in a GPIO bank.
+ *
+ * Each bank has a 32 bit EDGE_CONF register which is divided in to 8 parts of
+ * 4-bits. Each 4-bit space is allocated for each pin in a gpio bank.
+ *
+ * bit allocation per pin is:
+ * Bits: [0 - 3] | [4 - 7] [8 - 11] ... ... ... ... [ 28 - 31]
+ * --------------------------------------------------------
+ * | pin-0 | pin-2 | pin-3 | ... ... ... ... | pin -7 |
+ * --------------------------------------------------------
+ *
+ * A pin can have one of following the values in its edge configuration field.
+ *
+ * ------- ----------------------------
+ * [0-3] - Description
+ * ------- ----------------------------
+ * 0000 - No edge IRQ.
+ * 0001 - Falling edge IRQ.
+ * 0010 - Rising edge IRQ.
+ * 0011 - Rising and Falling edge IRQ.
+ * ------- ----------------------------
+ */
+
+#define ST_IRQ_EDGE_CONF_BITS_PER_PIN 4
+#define ST_IRQ_EDGE_MASK 0xf
+#define ST_IRQ_EDGE_FALLING BIT(0)
+#define ST_IRQ_EDGE_RISING BIT(1)
+#define ST_IRQ_EDGE_BOTH (BIT(0) | BIT(1))
+
+#define ST_IRQ_RISING_EDGE_CONF(pin) \
+ (ST_IRQ_EDGE_RISING << (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN))
+
+#define ST_IRQ_FALLING_EDGE_CONF(pin) \
+ (ST_IRQ_EDGE_FALLING << (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN))
+
+#define ST_IRQ_BOTH_EDGE_CONF(pin) \
+ (ST_IRQ_EDGE_BOTH << (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN))
+
+#define ST_IRQ_EDGE_CONF(conf, pin) \
+ (conf >> (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN) & ST_IRQ_EDGE_MASK)
+
struct st_gpio_bank {
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range range;
void __iomem *base;
struct st_pio_control pc;
+ struct irq_domain *domain;
+ unsigned long irq_edge_conf;
+ spinlock_t lock;
};
struct st_pinctrl {
@@ -284,6 +337,7 @@ struct st_pinctrl {
int ngroups;
struct regmap *regmap;
const struct st_pctl_data *data;
+ void __iomem *irqmux_base;
};
/* SOC specific data */
@@ -330,12 +384,25 @@ static unsigned int stih416_delays[] = {0, 300, 500, 750, 1000, 1250, 1500,
static const struct st_pctl_data stih416_data = {
.rt_style = st_retime_style_dedicated,
.input_delays = stih416_delays,
- .ninput_delays = 14,
+ .ninput_delays = ARRAY_SIZE(stih416_delays),
.output_delays = stih416_delays,
- .noutput_delays = 14,
+ .noutput_delays = ARRAY_SIZE(stih416_delays),
.alt = 0, .oe = 40, .pu = 50, .od = 60, .rt = 100,
};
+static const struct st_pctl_data stih407_flashdata = {
+ .rt_style = st_retime_style_none,
+ .input_delays = stih416_delays,
+ .ninput_delays = ARRAY_SIZE(stih416_delays),
+ .output_delays = stih416_delays,
+ .noutput_delays = ARRAY_SIZE(stih416_delays),
+ .alt = 0,
+ .oe = -1, /* Not Available */
+ .pu = -1, /* Not Available */
+ .od = 60,
+ .rt = 100,
+};
+
/* Low level functions.. */
static inline int st_gpio_bank(int gpio)
{
@@ -356,25 +423,29 @@ static void st_pinconf_set_config(struct st_pio_control *pc,
unsigned int oe_value, pu_value, od_value;
unsigned long mask = BIT(pin);
- regmap_field_read(output_enable, &oe_value);
- regmap_field_read(pull_up, &pu_value);
- regmap_field_read(open_drain, &od_value);
-
- /* Clear old values */
- oe_value &= ~mask;
- pu_value &= ~mask;
- od_value &= ~mask;
-
- if (config & ST_PINCONF_OE)
- oe_value |= mask;
- if (config & ST_PINCONF_PU)
- pu_value |= mask;
- if (config & ST_PINCONF_OD)
- od_value |= mask;
-
- regmap_field_write(output_enable, oe_value);
- regmap_field_write(pull_up, pu_value);
- regmap_field_write(open_drain, od_value);
+ if (output_enable) {
+ regmap_field_read(output_enable, &oe_value);
+ oe_value &= ~mask;
+ if (config & ST_PINCONF_OE)
+ oe_value |= mask;
+ regmap_field_write(output_enable, oe_value);
+ }
+
+ if (pull_up) {
+ regmap_field_read(pull_up, &pu_value);
+ pu_value &= ~mask;
+ if (config & ST_PINCONF_PU)
+ pu_value |= mask;
+ regmap_field_write(pull_up, pu_value);
+ }
+
+ if (open_drain) {
+ regmap_field_read(open_drain, &od_value);
+ od_value &= ~mask;
+ if (config & ST_PINCONF_OD)
+ od_value |= mask;
+ regmap_field_write(open_drain, od_value);
+ }
}
static void st_pctl_set_function(struct st_pio_control *pc,
@@ -385,6 +456,9 @@ static void st_pctl_set_function(struct st_pio_control *pc,
int pin = st_gpio_pin(pin_id);
int offset = pin * 4;
+ if (!alt)
+ return;
+
regmap_field_read(alt, &val);
val &= ~(0xf << offset);
val |= function << offset;
@@ -522,17 +596,23 @@ static void st_pinconf_get_direction(struct st_pio_control *pc,
{
unsigned int oe_value, pu_value, od_value;
- regmap_field_read(pc->oe, &oe_value);
- regmap_field_read(pc->pu, &pu_value);
- regmap_field_read(pc->od, &od_value);
+ if (pc->oe) {
+ regmap_field_read(pc->oe, &oe_value);
+ if (oe_value & BIT(pin))
+ ST_PINCONF_PACK_OE(*config);
+ }
- if (oe_value & BIT(pin))
- ST_PINCONF_PACK_OE(*config);
- if (pu_value & BIT(pin))
- ST_PINCONF_PACK_PU(*config);
- if (od_value & BIT(pin))
- ST_PINCONF_PACK_OD(*config);
+ if (pc->pu) {
+ regmap_field_read(pc->pu, &pu_value);
+ if (pu_value & BIT(pin))
+ ST_PINCONF_PACK_PU(*config);
+ }
+ if (pc->od) {
+ regmap_field_read(pc->od, &od_value);
+ if (od_value & BIT(pin))
+ ST_PINCONF_PACK_OD(*config);
+ }
}
static int st_pinconf_get_retime_packed(struct st_pinctrl *info,
@@ -1051,8 +1131,21 @@ static int st_pctl_dt_setup_retime(struct st_pinctrl *info,
return -EINVAL;
}
-static int st_parse_syscfgs(struct st_pinctrl *info,
- int bank, struct device_node *np)
+
+static struct regmap_field *st_pc_get_value(struct device *dev,
+ struct regmap *regmap, int bank,
+ int data, int lsb, int msb)
+{
+ struct reg_field reg = REG_FIELD((data + bank) * 4, lsb, msb);
+
+ if (data < 0)
+ return NULL;
+
+ return devm_regmap_field_alloc(dev, regmap, reg);
+}
+
+static void st_parse_syscfgs(struct st_pinctrl *info, int bank,
+ struct device_node *np)
{
const struct st_pctl_data *data = info->data;
/**
@@ -1062,29 +1155,21 @@ static int st_parse_syscfgs(struct st_pinctrl *info,
*/
int lsb = (bank%4) * ST_GPIO_PINS_PER_BANK;
int msb = lsb + ST_GPIO_PINS_PER_BANK - 1;
- struct reg_field alt_reg = REG_FIELD((data->alt + bank) * 4, 0, 31);
- struct reg_field oe_reg = REG_FIELD((data->oe + bank/4) * 4, lsb, msb);
- struct reg_field pu_reg = REG_FIELD((data->pu + bank/4) * 4, lsb, msb);
- struct reg_field od_reg = REG_FIELD((data->od + bank/4) * 4, lsb, msb);
struct st_pio_control *pc = &info->banks[bank].pc;
struct device *dev = info->dev;
struct regmap *regmap = info->regmap;
- pc->alt = devm_regmap_field_alloc(dev, regmap, alt_reg);
- pc->oe = devm_regmap_field_alloc(dev, regmap, oe_reg);
- pc->pu = devm_regmap_field_alloc(dev, regmap, pu_reg);
- pc->od = devm_regmap_field_alloc(dev, regmap, od_reg);
-
- if (IS_ERR(pc->alt) || IS_ERR(pc->oe) ||
- IS_ERR(pc->pu) || IS_ERR(pc->od))
- return -EINVAL;
+ pc->alt = st_pc_get_value(dev, regmap, bank, data->alt, 0, 31);
+ pc->oe = st_pc_get_value(dev, regmap, bank/4, data->oe, lsb, msb);
+ pc->pu = st_pc_get_value(dev, regmap, bank/4, data->pu, lsb, msb);
+ pc->od = st_pc_get_value(dev, regmap, bank/4, data->od, lsb, msb);
/* retime avaiable for all pins by default */
pc->rt_pin_mask = 0xff;
of_property_read_u32(np, "st,retime-pin-mask", &pc->rt_pin_mask);
st_pctl_dt_setup_retime(info, bank, pc);
- return 0;
+ return;
}
/*
@@ -1200,6 +1285,194 @@ static int st_pctl_parse_functions(struct device_node *np,
return 0;
}
+static int st_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct st_gpio_bank *bank = gpio_chip_to_bank(chip);
+ int irq = -ENXIO;
+
+ if (offset < chip->ngpio)
+ irq = irq_find_mapping(bank->domain, offset);
+
+ dev_info(chip->dev, "%s: request IRQ for GPIO %d, return %d\n",
+ chip->label, offset + chip->base, irq);
+ return irq;
+}
+
+static void st_gpio_irq_mask(struct irq_data *d)
+{
+ struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(d->hwirq), bank->base + REG_PIO_CLR_PMASK);
+}
+
+static void st_gpio_irq_unmask(struct irq_data *d)
+{
+ struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
+}
+
+static unsigned int st_gpio_irq_startup(struct irq_data *d)
+{
+ struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ if (gpio_lock_as_irq(&bank->gpio_chip, d->hwirq))
+ dev_err(bank->gpio_chip.dev,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ d->hwirq);
+
+ st_gpio_irq_unmask(d);
+
+ return 0;
+}
+
+static void st_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ st_gpio_irq_mask(d);
+ gpio_unlock_as_irq(&bank->gpio_chip, d->hwirq);
+}
+
+static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
+{
+ struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ int comp, pin = d->hwirq;
+ u32 val;
+ u32 pin_edge_conf = 0;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ comp = 0;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ comp = 0;
+ pin_edge_conf = ST_IRQ_FALLING_EDGE_CONF(pin);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ comp = 1;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ comp = 1;
+ pin_edge_conf = ST_IRQ_RISING_EDGE_CONF(pin);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ comp = st_gpio_get(&bank->gpio_chip, pin);
+ pin_edge_conf = ST_IRQ_BOTH_EDGE_CONF(pin);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bank->lock, flags);
+ bank->irq_edge_conf &= ~(ST_IRQ_EDGE_MASK << (
+ pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN));
+ bank->irq_edge_conf |= pin_edge_conf;
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ val = readl(bank->base + REG_PIO_PCOMP);
+ val &= ~BIT(pin);
+ val |= (comp << pin);
+ writel(val, bank->base + REG_PIO_PCOMP);
+
+ return 0;
+}
+
+/*
+ * As edge triggers are not supported at hardware level, it is supported by
+ * software by exploiting the level trigger support in hardware.
+ *
+ * Steps for detection raising edge interrupt in software.
+ *
+ * Step 1: CONFIGURE pin to detect level LOW interrupts.
+ *
+ * Step 2: DETECT level LOW interrupt and in irqmux/gpio bank interrupt handler,
+ * if the value of pin is low, then CONFIGURE pin for level HIGH interrupt.
+ * IGNORE calling the actual interrupt handler for the pin at this stage.
+ *
+ * Step 3: DETECT level HIGH interrupt and in irqmux/gpio-bank interrupt handler
+ * if the value of pin is HIGH, CONFIGURE pin for level LOW interrupt and then
+ * DISPATCH the interrupt to the interrupt handler of the pin.
+ *
+ * step-1 ________ __________
+ * | | step - 3
+ * | |
+ * step -2 |_____|
+ *
+ * falling edge is also detected int the same way.
+ *
+ */
+static void __gpio_irq_handler(struct st_gpio_bank *bank)
+{
+ unsigned long port_in, port_mask, port_comp, active_irqs;
+ unsigned long bank_edge_mask, flags;
+ int n, val, ecfg;
+
+ spin_lock_irqsave(&bank->lock, flags);
+ bank_edge_mask = bank->irq_edge_conf;
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ for (;;) {
+ port_in = readl(bank->base + REG_PIO_PIN);
+ port_comp = readl(bank->base + REG_PIO_PCOMP);
+ port_mask = readl(bank->base + REG_PIO_PMASK);
+
+ active_irqs = (port_in ^ port_comp) & port_mask;
+
+ if (active_irqs == 0)
+ break;
+
+ for_each_set_bit(n, &active_irqs, BITS_PER_LONG) {
+ /* check if we are detecting fake edges ... */
+ ecfg = ST_IRQ_EDGE_CONF(bank_edge_mask, n);
+
+ if (ecfg) {
+ /* edge detection. */
+ val = st_gpio_get(&bank->gpio_chip, n);
+
+ writel(BIT(n),
+ val ? bank->base + REG_PIO_SET_PCOMP :
+ bank->base + REG_PIO_CLR_PCOMP);
+
+ if (ecfg != ST_IRQ_EDGE_BOTH &&
+ !((ecfg & ST_IRQ_EDGE_FALLING) ^ val))
+ continue;
+ }
+
+ generic_handle_irq(irq_find_mapping(bank->domain, n));
+ }
+ }
+}
+
+static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ /* interrupt dedicated per bank */
+ struct irq_chip *chip = irq_get_chip(irq);
+ struct st_gpio_bank *bank = irq_get_handler_data(irq);
+
+ chained_irq_enter(chip, desc);
+ __gpio_irq_handler(bank);
+ chained_irq_exit(chip, desc);
+}
+
+static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_get_chip(irq);
+ struct st_pinctrl *info = irq_get_handler_data(irq);
+ unsigned long status;
+ int n;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl(info->irqmux_base);
+
+ for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
+ __gpio_irq_handler(&info->banks[n]);
+
+ chained_irq_exit(chip, desc);
+}
+
static struct gpio_chip st_gpio_template = {
.request = st_gpio_request,
.free = st_gpio_free,
@@ -1210,6 +1483,34 @@ static struct gpio_chip st_gpio_template = {
.ngpio = ST_GPIO_PINS_PER_BANK,
.of_gpio_n_cells = 1,
.of_xlate = st_gpio_xlate,
+ .to_irq = st_gpio_to_irq,
+};
+
+static struct irq_chip st_gpio_irqchip = {
+ .name = "GPIO",
+ .irq_mask = st_gpio_irq_mask,
+ .irq_unmask = st_gpio_irq_unmask,
+ .irq_set_type = st_gpio_irq_set_type,
+ .irq_startup = st_gpio_irq_startup,
+ .irq_shutdown = st_gpio_irq_shutdown,
+};
+
+static int st_gpio_irq_domain_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ struct st_gpio_bank *bank = h->host_data;
+
+ irq_set_chip(virq, &st_gpio_irqchip);
+ irq_set_handler(virq, handle_simple_irq);
+ set_irq_flags(virq, IRQF_VALID);
+ irq_set_chip_data(virq, bank);
+
+ return 0;
+}
+
+static struct irq_domain_ops st_gpio_irq_ops = {
+ .map = st_gpio_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
};
static int st_gpiolib_register_bank(struct st_pinctrl *info,
@@ -1219,8 +1520,8 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
struct pinctrl_gpio_range *range = &bank->range;
struct device *dev = info->dev;
int bank_num = of_alias_get_id(np, "gpio");
- struct resource res;
- int err;
+ struct resource res, irq_res;
+ int gpio_irq = 0, err, i;
if (of_address_to_resource(np, 0, &res))
return -ENODEV;
@@ -1233,6 +1534,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
bank->gpio_chip.base = bank_num * ST_GPIO_PINS_PER_BANK;
bank->gpio_chip.ngpio = ST_GPIO_PINS_PER_BANK;
bank->gpio_chip.of_node = np;
+ spin_lock_init(&bank->lock);
of_property_read_string(np, "st,bank-name", &range->name);
bank->gpio_chip.label = range->name;
@@ -1248,6 +1550,51 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
}
dev_info(dev, "%s bank added.\n", range->name);
+ /**
+ * GPIO bank can have one of the two possible types of
+ * interrupt-wirings.
+ *
+ * First type is via irqmux, single interrupt is used by multiple
+ * gpio banks. This reduces number of overall interrupts numbers
+ * required. All these banks belong to a single pincontroller.
+ * _________
+ * | |----> [gpio-bank (n) ]
+ * | |----> [gpio-bank (n + 1)]
+ * [irqN]-- | irq-mux |----> [gpio-bank (n + 2)]
+ * | |----> [gpio-bank (... )]
+ * |_________|----> [gpio-bank (n + 7)]
+ *
+ * Second type has a dedicated interrupt per each gpio bank.
+ *
+ * [irqN]----> [gpio-bank (n)]
+ */
+
+ if (of_irq_to_resource(np, 0, &irq_res)) {
+ gpio_irq = irq_res.start;
+ irq_set_chained_handler(gpio_irq, st_gpio_irq_handler);
+ irq_set_handler_data(gpio_irq, bank);
+ }
+
+ if (info->irqmux_base > 0 || gpio_irq > 0) {
+ /* Setup IRQ domain */
+ bank->domain = irq_domain_add_linear(np,
+ ST_GPIO_PINS_PER_BANK,
+ &st_gpio_irq_ops, bank);
+ if (!bank->domain) {
+ dev_err(dev, "Failed to add irq domain for %s\n",
+ np->full_name);
+ } else {
+ for (i = 0; i < ST_GPIO_PINS_PER_BANK; i++) {
+ if (irq_create_mapping(bank->domain, i) < 0)
+ dev_err(dev,
+ "Failed to map IRQ %i\n", i);
+ }
+ }
+
+ } else {
+ dev_info(dev, "No IRQ support for %s bank\n", np->full_name);
+ }
+
return 0;
}
@@ -1264,6 +1611,10 @@ static struct of_device_id st_pctl_of_match[] = {
{ .compatible = "st,stih416-rear-pinctrl", .data = &stih416_data},
{ .compatible = "st,stih416-fvdp-fe-pinctrl", .data = &stih416_data},
{ .compatible = "st,stih416-fvdp-lite-pinctrl", .data = &stih416_data},
+ { .compatible = "st,stih407-sbc-pinctrl", .data = &stih416_data},
+ { .compatible = "st,stih407-front-pinctrl", .data = &stih416_data},
+ { .compatible = "st,stih407-rear-pinctrl", .data = &stih416_data},
+ { .compatible = "st,stih407-flash-pinctrl", .data = &stih407_flashdata},
{ /* sentinel */ }
};
@@ -1276,6 +1627,8 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
int grp_index = 0;
+ int irq = 0;
+ struct resource *res;
st_pctl_dt_child_count(info, np);
if (!info->nbanks) {
@@ -1306,6 +1659,21 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
}
info->data = of_match_node(st_pctl_of_match, np)->data;
+ irq = platform_get_irq(pdev, 0);
+
+ if (irq > 0) {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "irqmux");
+ info->irqmux_base = devm_ioremap_resource(&pdev->dev, res);
+
+ if (IS_ERR(info->irqmux_base))
+ return PTR_ERR(info->irqmux_base);
+
+ irq_set_chained_handler(irq, st_gpio_irqmux_handler);
+ irq_set_handler_data(irq, info);
+
+ }
+
pctl_desc->npins = info->nbanks * ST_GPIO_PINS_PER_BANK;
pdesc = devm_kzalloc(&pdev->dev,
sizeof(*pdesc) * pctl_desc->npins, GFP_KERNEL);
diff --git a/drivers/pinctrl/pinctrl-sunxi-pins.h b/drivers/pinctrl/pinctrl-sunxi-pins.h
index 6fd8d4d95140..3d6066988a72 100644
--- a/drivers/pinctrl/pinctrl-sunxi-pins.h
+++ b/drivers/pinctrl/pinctrl-sunxi-pins.h
@@ -1932,27 +1932,27 @@ static const struct sunxi_desc_pin sun5i_a13_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF0,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* D1 */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* D1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF1,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* D0 */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* D0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF2,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* CLK */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* CLK */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF3,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* CMD */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* CMD */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF4,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* D3 */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* D3 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PF5,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x4, "mmc0")), /* D2 */
+ SUNXI_FUNCTION(0x2, "mmc0")), /* D2 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PG0,
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index 9ccf681dad2f..f9fabe9bf47d 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -584,7 +585,7 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
spin_lock_irqsave(&pctl->lock, flags);
regval = readl(pctl->membase + reg);
- regval &= ~IRQ_CFG_IRQ_MASK;
+ regval &= ~(IRQ_CFG_IRQ_MASK << index);
writel(regval | (mode << index), pctl->membase + reg);
spin_unlock_irqrestore(&pctl->lock, flags);
@@ -665,6 +666,7 @@ static struct irq_chip sunxi_pinctrl_irq_chip = {
static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_get_chip(irq);
struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG);
@@ -674,10 +676,12 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
if (reg) {
int irqoffset;
+ chained_irq_enter(chip, desc);
for_each_set_bit(irqoffset, &reg, SUNXI_IRQ_NUMBER) {
int pin_irq = irq_find_mapping(pctl->domain, irqoffset);
generic_handle_irq(pin_irq);
}
+ chained_irq_exit(chip, desc);
}
}
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
index 01c494f8a14f..552b0e97077a 100644
--- a/drivers/pinctrl/pinctrl-sunxi.h
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -511,7 +511,7 @@ static inline u32 sunxi_pull_offset(u16 pin)
static inline u32 sunxi_irq_cfg_reg(u16 irq)
{
- u8 reg = irq / IRQ_CFG_IRQ_PER_REG;
+ u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04;
return reg + IRQ_CFG_REG;
}
@@ -523,7 +523,7 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
static inline u32 sunxi_irq_ctrl_reg(u16 irq)
{
- u8 reg = irq / IRQ_CTRL_IRQ_PER_REG;
+ u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04;
return reg + IRQ_CTRL_REG;
}
@@ -535,7 +535,7 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
static inline u32 sunxi_irq_status_reg(u16 irq)
{
- u8 reg = irq / IRQ_STATUS_IRQ_PER_REG;
+ u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04;
return reg + IRQ_STATUS_REG;
}
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index a2e93a2b5ff4..65458096f41e 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -39,6 +39,7 @@ struct tegra_pmx {
struct pinctrl_dev *pctl;
const struct tegra_pinctrl_soc_data *soc;
+ const char **group_pins;
int nbanks;
void __iomem **regs;
@@ -620,6 +621,8 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
struct tegra_pmx *pmx;
struct resource *res;
int i;
+ const char **group_pins;
+ int fn, gn, gfn;
pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
if (!pmx) {
@@ -629,6 +632,41 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
pmx->dev = &pdev->dev;
pmx->soc = soc_data;
+ /*
+ * Each mux group will appear in 4 functions' list of groups.
+ * This over-allocates slightly, since not all groups are mux groups.
+ */
+ pmx->group_pins = devm_kzalloc(&pdev->dev,
+ soc_data->ngroups * 4 * sizeof(*pmx->group_pins),
+ GFP_KERNEL);
+ if (!pmx->group_pins)
+ return -ENOMEM;
+
+ group_pins = pmx->group_pins;
+ for (fn = 0; fn < soc_data->nfunctions; fn++) {
+ struct tegra_function *func = &soc_data->functions[fn];
+
+ func->groups = group_pins;
+
+ for (gn = 0; gn < soc_data->ngroups; gn++) {
+ const struct tegra_pingroup *g = &soc_data->groups[gn];
+
+ if (g->mux_reg == -1)
+ continue;
+
+ for (gfn = 0; gfn < 4; gfn++)
+ if (g->funcs[gfn] == fn)
+ break;
+ if (gfn == 4)
+ continue;
+
+ BUG_ON(group_pins - pmx->group_pins >=
+ soc_data->ngroups * 4);
+ *group_pins++ = g->name;
+ func->ngroups++;
+ }
+ }
+
tegra_pinctrl_gpio_range.npins = pmx->soc->ngpios;
tegra_pinctrl_desc.name = dev_name(&pdev->dev);
tegra_pinctrl_desc.pins = pmx->soc->pins;
@@ -645,7 +683,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
GFP_KERNEL);
if (!pmx->regs) {
dev_err(&pdev->dev, "Can't alloc regs pointer\n");
- return -ENODEV;
+ return -ENOMEM;
}
for (i = 0; i < pmx->nbanks; i++) {
diff --git a/drivers/pinctrl/pinctrl-tegra.h b/drivers/pinctrl/pinctrl-tegra.h
index 817f7061dc4c..6053832d433e 100644
--- a/drivers/pinctrl/pinctrl-tegra.h
+++ b/drivers/pinctrl/pinctrl-tegra.h
@@ -72,7 +72,7 @@ enum tegra_pinconf_tristate {
*/
struct tegra_function {
const char *name;
- const char * const *groups;
+ const char **groups;
unsigned ngroups;
};
@@ -193,7 +193,7 @@ struct tegra_pinctrl_soc_data {
unsigned ngpios;
const struct pinctrl_pin_desc *pins;
unsigned npins;
- const struct tegra_function *functions;
+ struct tegra_function *functions;
unsigned nfunctions;
const struct tegra_pingroup *groups;
unsigned ngroups;
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
index 93c9e3899d5e..63fe7619d3ff 100644
--- a/drivers/pinctrl/pinctrl-tegra114.c
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -1,10 +1,8 @@
/*
- * Pinctrl data and driver for the NVIDIA Tegra114 pinmux
+ * Pinctrl data for the NVIDIA Tegra114 pinmux
*
* Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
- * Author: Pritesh Raithatha <praithatha@nvidia.com>
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -13,9 +11,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
@@ -203,8 +198,8 @@
#define TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 _GPIO(245)
/* All non-GPIO pins follow */
-#define NUM_GPIOS (TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 + 1)
-#define _PIN(offset) (NUM_GPIOS + (offset))
+#define NUM_GPIOS (TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
/* Non-GPIO pins */
#define TEGRA_PIN_CORE_PWR_REQ _PIN(0)
@@ -212,8 +207,11 @@
#define TEGRA_PIN_PWR_INT_N _PIN(2)
#define TEGRA_PIN_RESET_OUT_N _PIN(3)
#define TEGRA_PIN_OWR _PIN(4)
+#define TEGRA_PIN_JTAG_RTCK _PIN(5)
+#define TEGRA_PIN_CLK_32K_IN _PIN(6)
+#define TEGRA_PIN_GMI_CLK_LB _PIN(7)
-static const struct pinctrl_pin_desc tegra114_pins[] = {
+static const struct pinctrl_pin_desc tegra114_pins[] = {
PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PA0, "CLK_32K_OUT PA0"),
PINCTRL_PIN(TEGRA_PIN_UART3_CTS_N_PA1, "UART3_CTS_N PA1"),
PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PA2, "DAP2_FS PA2"),
@@ -385,9 +383,12 @@ static const struct pinctrl_pin_desc tegra114_pins[] = {
PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5, "SDMMC3_CLK_LB_IN PEE5"),
PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
- PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
PINCTRL_PIN(TEGRA_PIN_RESET_OUT_N, "RESET_OUT_N"),
+ PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
+ PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK, "JTAG_RTCK"),
+ PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
};
static const unsigned clk_32k_out_pa0_pins[] = {
@@ -1074,10 +1075,6 @@ static const unsigned cpu_pwr_req_pins[] = {
TEGRA_PIN_CPU_PWR_REQ,
};
-static const unsigned owr_pins[] = {
- TEGRA_PIN_OWR,
-};
-
static const unsigned pwr_int_n_pins[] = {
TEGRA_PIN_PWR_INT_N,
};
@@ -1086,6 +1083,22 @@ static const unsigned reset_out_n_pins[] = {
TEGRA_PIN_RESET_OUT_N,
};
+static const unsigned owr_pins[] = {
+ TEGRA_PIN_OWR,
+};
+
+static const unsigned jtag_rtck_pins[] = {
+ TEGRA_PIN_JTAG_RTCK,
+};
+
+static const unsigned clk_32k_in_pins[] = {
+ TEGRA_PIN_CLK_32K_IN,
+};
+
+static const unsigned gmi_clk_lb_pins[] = {
+ TEGRA_PIN_GMI_CLK_LB,
+};
+
static const unsigned drive_ao1_pins[] = {
TEGRA_PIN_KB_ROW0_PR0,
TEGRA_PIN_KB_ROW1_PR1,
@@ -1127,7 +1140,6 @@ static const unsigned drive_at1_pins[] = {
TEGRA_PIN_GMI_AD13_PH5,
TEGRA_PIN_GMI_AD14_PH6,
TEGRA_PIN_GMI_AD15_PH7,
-
TEGRA_PIN_GMI_IORDY_PI5,
TEGRA_PIN_GMI_CS7_N_PI6,
};
@@ -1141,15 +1153,12 @@ static const unsigned drive_at2_pins[] = {
TEGRA_PIN_GMI_AD5_PG5,
TEGRA_PIN_GMI_AD6_PG6,
TEGRA_PIN_GMI_AD7_PG7,
-
TEGRA_PIN_GMI_WR_N_PI0,
TEGRA_PIN_GMI_OE_N_PI1,
TEGRA_PIN_GMI_CS6_N_PI3,
TEGRA_PIN_GMI_RST_N_PI4,
TEGRA_PIN_GMI_WAIT_PI7,
-
TEGRA_PIN_GMI_DQS_P_PJ3,
-
TEGRA_PIN_GMI_ADV_N_PK0,
TEGRA_PIN_GMI_CLK_PK1,
TEGRA_PIN_GMI_CS4_N_PK2,
@@ -1342,14 +1351,37 @@ static const unsigned drive_uda_pins[] = {
};
static const unsigned drive_dev3_pins[] = {
- TEGRA_PIN_CLK3_OUT_PEE0,
- TEGRA_PIN_CLK3_REQ_PEE1,
+};
+
+static const unsigned drive_cec_pins[] = {
+};
+
+static const unsigned drive_at6_pins[] = {
+};
+
+static const unsigned drive_dap5_pins[] = {
+};
+
+static const unsigned drive_usb_vbus_en_pins[] = {
+};
+
+static const unsigned drive_ao3_pins[] = {
+};
+
+static const unsigned drive_hv0_pins[] = {
+};
+
+static const unsigned drive_sdio4_pins[] = {
+};
+
+static const unsigned drive_ao0_pins[] = {
};
enum tegra_mux {
TEGRA_MUX_BLINK,
TEGRA_MUX_CEC,
TEGRA_MUX_CLDVFS,
+ TEGRA_MUX_CLK,
TEGRA_MUX_CLK12,
TEGRA_MUX_CPU,
TEGRA_MUX_DAP,
@@ -1394,6 +1426,7 @@ enum tegra_mux {
TEGRA_MUX_RSVD2,
TEGRA_MUX_RSVD3,
TEGRA_MUX_RSVD4,
+ TEGRA_MUX_RTCK,
TEGRA_MUX_SDMMC1,
TEGRA_MUX_SDMMC2,
TEGRA_MUX_SDMMC3,
@@ -1425,944 +1458,16 @@ enum tegra_mux {
TEGRA_MUX_VI_ALT3,
};
-static const char * const blink_groups[] = {
- "clk_32k_out_pa0",
-};
-
-static const char * const cec_groups[] = {
- "hdmi_cec_pee3",
-};
-
-static const char * const cldvfs_groups[] = {
- "gmi_ad9_ph1",
- "gmi_ad10_ph2",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "dvfs_pwm_px0",
- "dvfs_clk_px2",
-};
-
-static const char * const clk12_groups[] = {
- "sdmmc1_wp_n_pv3",
- "sdmmc1_clk_pz0",
-};
-
-static const char * const cpu_groups[] = {
- "cpu_pwr_req",
-};
-
-static const char * const dap_groups[] = {
- "clk1_req_pee2",
- "clk2_req_pcc5",
-};
-
-static const char * const dap1_groups[] = {
- "clk1_req_pee2",
-};
-
-static const char * const dap2_groups[] = {
- "clk1_out_pw4",
- "gpio_x4_aud_px4",
-};
-
-static const char * const dev3_groups[] = {
- "clk3_req_pee1",
-};
-
-static const char * const displaya_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
- "uart3_rts_n_pc0",
- "pu3",
- "pu4",
- "pu5",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_col3_pq3",
- "sdmmc3_dat2_pb5",
-};
-
-static const char * const displaya_alt_groups[] = {
- "kb_row6_pr6",
-};
-
-static const char * const displayb_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const dtv_groups[] = {
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "dap4_fs_pp4",
- "dap4_dout_pp6",
- "gmi_wait_pi7",
- "gmi_ad8_ph0",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
-};
-
-static const char * const emc_dll_groups[] = {
- "kb_col0_pq0",
- "kb_col1_pq1",
-};
-
-static const char * const extperiph1_groups[] = {
- "clk1_out_pw4",
-};
-
-static const char * const extperiph2_groups[] = {
- "clk2_out_pw5",
-};
-
-static const char * const extperiph3_groups[] = {
- "clk3_out_pee0",
-};
-
-static const char * const gmi_groups[] = {
- "gmi_wp_n_pc7",
-
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_ad8_ph0",
- "gmi_ad9_ph1",
- "gmi_ad10_ph2",
- "gmi_ad11_ph3",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_wr_n_pi0",
- "gmi_oe_n_pi1",
- "gmi_cs6_n_pi3",
- "gmi_rst_n_pi4",
- "gmi_iordy_pi5",
- "gmi_cs7_n_pi6",
- "gmi_wait_pi7",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_dqs_p_pj3",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs4_n_pk2",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
-};
-
-static const char * const gmi_alt_groups[] = {
- "gmi_wp_n_pc7",
- "gmi_cs3_n_pk4",
- "gmi_a16_pj7",
-};
-
-static const char * const hda_groups[] = {
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
-};
-
-static const char * const hsi_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-};
-
-static const char * const i2c1_groups[] = {
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const i2c2_groups[] = {
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
-};
-
-static const char * const i2c3_groups[] = {
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
-};
-
-static const char * const i2c4_groups[] = {
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-};
-
-static const char * const i2cpwr_groups[] = {
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-};
-
-static const char * const i2s0_groups[] = {
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
-};
-
-static const char * const i2s1_groups[] = {
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
-};
-
-static const char * const i2s2_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
-};
-
-static const char * const i2s3_groups[] = {
- "dap4_fs_pp4",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_sclk_pp7",
-};
-
-static const char * const i2s4_groups[] = {
- "pcc1",
- "pbb0",
- "pbb7",
- "pcc2",
-};
-
-static const char * const irda_groups[] = {
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
-};
-
-static const char * const kbc_groups[] = {
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
-};
-
-static const char * const nand_groups[] = {
- "gmi_wp_n_pc7",
- "gmi_wait_pi7",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_cs4_n_pk2",
- "gmi_cs6_n_pi3",
- "gmi_cs7_n_pi6",
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_ad8_ph0",
- "gmi_ad9_ph1",
- "gmi_ad10_ph2",
- "gmi_ad11_ph3",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_wr_n_pi0",
- "gmi_oe_n_pi1",
- "gmi_dqs_p_pj3",
- "gmi_rst_n_pi4",
-};
-
-static const char * const nand_alt_groups[] = {
- "gmi_cs6_n_pi3",
- "gmi_cs7_n_pi6",
- "gmi_rst_n_pi4",
-};
-
-static const char * const owr_groups[] = {
- "pu0",
- "kb_col4_pq4",
- "owr",
- "sdmmc3_cd_n_pv2",
-};
-
-static const char * const pmi_groups[] = {
- "pwr_int_n",
-};
-
-static const char * const pwm0_groups[] = {
- "sdmmc1_dat2_py5",
- "uart3_rts_n_pc0",
- "pu3",
- "gmi_ad8_ph0",
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const pwm1_groups[] = {
- "sdmmc1_dat1_py6",
- "pu4",
- "gmi_ad9_ph1",
- "sdmmc3_dat2_pb5",
-};
-
-static const char * const pwm2_groups[] = {
- "pu5",
- "gmi_ad10_ph2",
- "kb_col3_pq3",
- "sdmmc3_dat1_pb6",
-};
-
-static const char * const pwm3_groups[] = {
- "pu6",
- "gmi_ad11_ph3",
- "sdmmc3_cmd_pa7",
-};
-
-static const char * const pwron_groups[] = {
- "core_pwr_req",
-};
-
-static const char * const reset_out_n_groups[] = {
- "reset_out_n",
-};
-
-static const char * const rsvd1_groups[] = {
- "pv1",
- "hdmi_int_pn7",
- "pu1",
- "pu2",
- "gmi_wp_n_pc7",
- "gmi_adv_n_pk0",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_wr_n_pi0",
- "gmi_oe_n_pi1",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x7_aud_px7",
-
- "reset_out_n",
-};
-
-static const char * const rsvd2_groups[] = {
- "pv0",
- "pv1",
- "sdmmc1_dat0_py7",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "dap4_fs_pp4",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_sclk_pp7",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "gmi_iordy_pi5",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat7_paa7",
- "pcc1",
- "pbb7",
- "pcc2",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "sys_clk_req_pz5",
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "owr",
- "spdif_out_pk5",
- "gpio_x1_aud_px1",
- "sdmmc3_clk_pa6",
- "sdmmc3_dat0_pb7",
- "gpio_w2_aud_pw2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "reset_out_n",
-};
-
-static const char * const rsvd3_groups[] = {
- "pv0",
- "pv1",
- "sdmmc1_clk_pz0",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "pu0",
- "pu1",
- "pu2",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "dap4_din_pp5",
- "dap4_sclk_pp7",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "pcc1",
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "pbb7",
- "pcc2",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "clk_32k_out_pa0",
- "sys_clk_req_pz5",
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "owr",
- "clk1_req_pee2",
- "clk1_out_pw4",
- "spdif_out_pk5",
- "spdif_in_pk6",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dvfs_pwm_px0",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
- "dvfs_clk_px2",
- "sdmmc3_clk_pa6",
- "sdmmc3_dat0_pb7",
- "hdmi_cec_pee3",
- "sdmmc3_cd_n_pv2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "reset_out_n",
-};
-
-static const char * const rsvd4_groups[] = {
- "pv0",
- "pv1",
- "sdmmc1_clk_pz0",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "pu0",
- "pu1",
- "pu2",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "dap4_fs_pp4",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_sclk_pp7",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_rst_n_pi4",
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
- "cam_mclk_pcc0",
- "pcc1",
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "pbb7",
- "pcc2",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_col2_pq2",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "clk_32k_out_pa0",
- "sys_clk_req_pz5",
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "owr",
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
- "clk1_req_pee2",
- "clk1_out_pw4",
- "spdif_in_pk6",
- "spdif_out_pk5",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dvfs_pwm_px0",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
- "dvfs_clk_px2",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
- "gpio_x7_aud_px7",
- "sdmmc3_cd_n_pv2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_in_pee5",
- "sdmmc3_clk_lb_out_pee4",
-};
-
-static const char * const sdmmc1_groups[] = {
-
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
- "uart3_cts_n_pa1",
- "kb_col5_pq5",
- "sdmmc1_wp_n_pv3",
-};
-
-static const char * const sdmmc2_groups[] = {
- "gmi_iordy_pi5",
- "gmi_clk_pk1",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_cs7_n_pi6",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_dqs_p_pj3",
-};
-
-static const char * const sdmmc3_groups[] = {
- "kb_col4_pq4",
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
- "hdmi_cec_pee3",
- "sdmmc3_cd_n_pv2",
- "sdmmc3_clk_lb_in_pee5",
- "sdmmc3_clk_lb_out_pee4",
-};
-
-static const char * const sdmmc4_groups[] = {
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
-};
-
-static const char * const soc_groups[] = {
- "gmi_cs1_n_pj2",
- "gmi_oe_n_pi1",
- "clk_32k_out_pa0",
- "hdmi_cec_pee3",
-};
-
-static const char * const spdif_groups[] = {
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "spdif_in_pk6",
- "spdif_out_pk5",
-};
-
-static const char * const spi1_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "gpio_x3_aud_px3",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
- "gpio_x7_aud_px7",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const spi2_groups[] = {
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
- "gpio_x7_aud_px7",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const spi3_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const spi4_groups[] = {
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "uart3_cts_n_pa1",
- "gmi_wait_pi7",
- "gmi_cs6_n_pi3",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_a19_pk7",
- "gmi_wr_n_pi0",
- "sdmmc1_wp_n_pv3",
-};
-
-static const char * const spi5_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
-};
-
-static const char * const spi6_groups[] = {
- "dvfs_pwm_px0",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
- "dvfs_clk_px2",
- "gpio_x6_aud_px6",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const sysclk_groups[] = {
- "sys_clk_req_pz5",
-};
-
-static const char * const trace_groups[] = {
- "gmi_iordy_pi5",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs2_n_pk3",
- "gmi_cs4_n_pk2",
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
- "gmi_dqs_p_pj3",
-};
-
-static const char * const uarta_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat1_pb6",
- "sdmmc1_wp_n_pv3",
-};
-
-static const char * const uartb_groups[] = {
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
-};
-
-static const char * const uartc_groups[] = {
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
-};
-
-static const char * const uartd_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
-};
-
-static const char * const ulpi_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
-};
-
-static const char * const usb_groups[] = {
- "pv0",
- "pu6",
- "gmi_cs0_n_pj0",
- "gmi_cs4_n_pk2",
- "gmi_ad11_ph3",
- "kb_col0_pq0",
- "spdif_in_pk6",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
-};
-
-static const char * const vgp1_groups[] = {
- "cam_i2c_scl_pbb1",
-};
-
-static const char * const vgp2_groups[] = {
- "cam_i2c_sda_pbb2",
-};
-
-static const char * const vgp3_groups[] = {
- "pbb3",
-};
-
-static const char * const vgp4_groups[] = {
- "pbb4",
-};
-
-static const char * const vgp5_groups[] = {
- "pbb5",
-};
-
-static const char * const vgp6_groups[] = {
- "pbb6",
-};
-
-static const char * const vi_groups[] = {
- "cam_mclk_pcc0",
- "pbb0",
-};
-
-static const char * const vi_alt1_groups[] = {
- "cam_mclk_pcc0",
- "pbb0",
-};
-
-static const char * const vi_alt3_groups[] = {
- "cam_mclk_pcc0",
- "pbb0",
-};
-
#define FUNCTION(fname) \
{ \
.name = #fname, \
- .groups = fname##_groups, \
- .ngroups = ARRAY_SIZE(fname##_groups), \
}
-static const struct tegra_function tegra114_functions[] = {
+static struct tegra_function tegra114_functions[] = {
FUNCTION(blink),
FUNCTION(cec),
FUNCTION(cldvfs),
+ FUNCTION(clk),
FUNCTION(clk12),
FUNCTION(cpu),
FUNCTION(dap),
@@ -2407,6 +1512,7 @@ static const struct tegra_function tegra114_functions[] = {
FUNCTION(rsvd2),
FUNCTION(rsvd3),
FUNCTION(rsvd4),
+ FUNCTION(rtck),
FUNCTION(sdmmc1),
FUNCTION(sdmmc2),
FUNCTION(sdmmc3),
@@ -2438,11 +1544,11 @@ static const struct tegra_function tegra114_functions[] = {
FUNCTION(vi_alt3),
};
-#define DRV_PINGROUP_REG_START 0x868 /* bank 0 */
-#define PINGROUP_REG_START 0x3000 /* bank 1 */
+#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
+#define PINGROUP_REG_A 0x3000 /* bank 1 */
-#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_START)
-#define PINGROUP_REG_N(r) -1
+#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
+#define PINGROUP_REG_N(r) -1
#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior, rcv_sel) \
{ \
@@ -2484,13 +1590,14 @@ static const struct tegra_function tegra114_functions[] = {
.drvtype_reg = -1, \
}
-#define DRV_PINGROUP_DVRTYPE_Y(r) ((r) - DRV_PINGROUP_REG_START)
-#define DRV_PINGROUP_DVRTYPE_N(r) -1
+#define DRV_PINGROUP_REG_Y(r) ((r) - DRV_PINGROUP_REG_A)
+#define DRV_PINGROUP_REG_N(r) -1
+
#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
- drvdn_b, drvdn_w, drvup_b, drvup_w, \
- slwr_b, slwr_w, slwf_b, slwf_w, \
- drvtype) \
+ drvdn_b, drvdn_w, drvup_b, drvup_w, \
+ slwr_b, slwr_w, slwf_b, slwf_w, \
+ drvtype) \
{ \
.name = "drive_" #pg_name, \
.pins = drive_##pg_name##_pins, \
@@ -2503,7 +1610,7 @@ static const struct tegra_function tegra114_functions[] = {
.lock_reg = -1, \
.ioreset_reg = -1, \
.rcv_sel_reg = -1, \
- .drv_reg = DRV_PINGROUP_DVRTYPE_Y(r), \
+ .drv_reg = DRV_PINGROUP_REG_Y(r), \
.drv_bank = 0, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
@@ -2516,14 +1623,13 @@ static const struct tegra_function tegra114_functions[] = {
.slwr_width = slwr_w, \
.slwf_bit = slwf_b, \
.slwf_width = slwf_w, \
- .drvtype_reg = DRV_PINGROUP_DVRTYPE_##drvtype(r), \
+ .drvtype_reg = DRV_PINGROUP_REG_##drvtype(r), \
.drvtype_bank = 0, \
.drvtype_bit = 6, \
}
static const struct tegra_pingroup tegra114_groups[] = {
/* pg_name, f0, f1, f2, f3, safe, r, od, ior, rcv_sel */
- /* FIXME: Fill in correct data in safe column */
PINGROUP(ulpi_data0_po1, SPI3, HSI, UARTA, ULPI, ULPI, 0x3000, N, N, N),
PINGROUP(ulpi_data1_po2, SPI3, HSI, UARTA, ULPI, ULPI, 0x3004, N, N, N),
PINGROUP(ulpi_data2_po3, SPI3, HSI, UARTA, ULPI, ULPI, 0x3008, N, N, N),
@@ -2635,6 +1741,7 @@ static const struct tegra_pingroup tegra114_groups[] = {
PINGROUP(pbb6, VGP6, DISPLAYA, DISPLAYB, RSVD4, RSVD4, 0x32a4, N, N, N),
PINGROUP(pbb7, I2S4, RSVD2, RSVD3, RSVD4, RSVD4, 0x32a8, N, N, N),
PINGROUP(pcc2, I2S4, RSVD2, RSVD3, RSVD4, RSVD4, 0x32ac, N, N, N),
+ PINGROUP(jtag_rtck, RTCK, RSVD2, RSVD3, RSVD4, RTCK, 0x32b0, N, N, N),
PINGROUP(pwr_i2c_scl_pz6, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x32b4, Y, N, N),
PINGROUP(pwr_i2c_sda_pz7, I2CPWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x32b8, Y, N, N),
PINGROUP(kb_row0_pr0, KBC, RSVD2, RSVD3, RSVD4, RSVD4, 0x32bc, N, N, N),
@@ -2661,6 +1768,7 @@ static const struct tegra_pingroup tegra114_groups[] = {
PINGROUP(core_pwr_req, PWRON, RSVD2, RSVD3, RSVD4, RSVD4, 0x3324, N, N, N),
PINGROUP(cpu_pwr_req, CPU, RSVD2, RSVD3, RSVD4, RSVD4, 0x3328, N, N, N),
PINGROUP(pwr_int_n, PMI, RSVD2, RSVD3, RSVD4, RSVD4, 0x332c, N, N, N),
+ PINGROUP(clk_32k_in, CLK, RSVD2, RSVD3, RSVD4, CLK, 0x3330, N, N, N),
PINGROUP(owr, OWR, RSVD2, RSVD3, RSVD4, RSVD4, 0x3334, N, N, Y),
PINGROUP(dap1_fs_pn0, I2S0, HDA, GMI, RSVD4, RSVD4, 0x3338, N, N, N),
PINGROUP(dap1_din_pn1, I2S0, HDA, GMI, RSVD4, RSVD4, 0x333c, N, N, N),
@@ -2697,38 +1805,48 @@ static const struct tegra_pingroup tegra114_groups[] = {
PINGROUP(usb_vbus_en1_pn5, USB, RSVD2, RSVD3, RSVD4, RSVD4, 0x33f8, Y, N, N),
PINGROUP(sdmmc3_clk_lb_in_pee5, SDMMC3, RSVD2, RSVD3, RSVD4, RSVD4, 0x33fc, N, N, N),
PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3, RSVD2, RSVD3, RSVD4, RSVD4, 0x3400, N, N, N),
+ PINGROUP(gmi_clk_lb, SDMMC2, NAND, GMI, RSVD4, GMI, 0x3404, N, N, N),
PINGROUP(reset_out_n, RSVD1, RSVD2, RSVD3, RESET_OUT_N, RSVD3, 0x3408, N, N, N),
/* pg_name, r, hsm_b, schmitt_b, lpmd_b, drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w, drvtype */
- DRV_PINGROUP(ao1, 0x868, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(ao2, 0x86c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(at1, 0x870, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
- DRV_PINGROUP(at2, 0x874, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
- DRV_PINGROUP(at3, 0x878, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
- DRV_PINGROUP(at4, 0x87c, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
- DRV_PINGROUP(at5, 0x880, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(cdev1, 0x884, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(cdev2, 0x888, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(dap1, 0x890, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(dap2, 0x894, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(dap3, 0x898, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(dap4, 0x89c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(dbg, 0x8a0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(sdio3, 0x8b0, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
- DRV_PINGROUP(spi, 0x8b4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(uaa, 0x8b8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(uab, 0x8bc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(uart2, 0x8c0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(uart3, 0x8c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(sdio1, 0x8ec, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
- DRV_PINGROUP(ddc, 0x8fc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(gma, 0x900, 2, 3, 4, 14, 5, 20, 5, 28, 2, 30, 2, Y),
- DRV_PINGROUP(gme, 0x910, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(gmf, 0x914, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(gmg, 0x918, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(gmh, 0x91c, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(owr, 0x920, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(uda, 0x924, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao1, 0x868, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao2, 0x86c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(at1, 0x870, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at2, 0x874, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at3, 0x878, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at4, 0x87c, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(at5, 0x880, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(cdev1, 0x884, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(cdev2, 0x888, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap1, 0x890, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap2, 0x894, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap3, 0x898, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dap4, 0x89c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dbg, 0x8a0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(sdio3, 0x8b0, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
+ DRV_PINGROUP(spi, 0x8b4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uaa, 0x8b8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uab, 0x8bc, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uart2, 0x8c0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uart3, 0x8c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(sdio1, 0x8ec, 2, 3, -1, 12, 7, 20, 7, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ddc, 0x8fc, 2, 3, -1, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gma, 0x900, 2, 3, -1, 14, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gme, 0x910, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmf, 0x914, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmg, 0x918, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(gmh, 0x91c, 2, 3, 4, 14, 5, 19, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(owr, 0x920, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(uda, 0x924, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(dev3, 0x92c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(cec, 0x938, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(at6, 0x994, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, Y),
+ DRV_PINGROUP(dap5, 0x998, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(usb_vbus_en, 0x99c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao3, 0x9a0, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N),
+ DRV_PINGROUP(hv0, 0x9a4, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N),
+ DRV_PINGROUP(sdio4, 0x9a8, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao0, 0x9ac, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
};
static const struct tegra_pinctrl_soc_data tegra114_pinctrl = {
diff --git a/drivers/pinctrl/pinctrl-tegra124.c b/drivers/pinctrl/pinctrl-tegra124.c
index c20e0e1dda83..73773706755b 100644
--- a/drivers/pinctrl/pinctrl-tegra124.c
+++ b/drivers/pinctrl/pinctrl-tegra124.c
@@ -1,7 +1,7 @@
/*
* Pinctrl data for the NVIDIA Tegra124 pinmux
*
- * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -212,8 +212,8 @@
#define TEGRA_PIN_PFF2 _GPIO(250)
/* All non-GPIO pins follow */
-#define NUM_GPIOS (TEGRA_PIN_PFF2 + 1)
-#define _PIN(offset) (NUM_GPIOS + (offset))
+#define NUM_GPIOS (TEGRA_PIN_PFF2 + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
/* Non-GPIO pins */
#define TEGRA_PIN_CORE_PWR_REQ _PIN(0)
@@ -325,13 +325,13 @@ static const struct pinctrl_pin_desc tegra124_pins[] = {
PINCTRL_PIN(TEGRA_PIN_KB_ROW8_PS0, "KB_ROW8 PS0"),
PINCTRL_PIN(TEGRA_PIN_KB_ROW9_PS1, "KB_ROW9 PS1"),
PINCTRL_PIN(TEGRA_PIN_KB_ROW10_PS2, "KB_ROW10 PS2"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW11_PS3, "KB_ROW10 PS3"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW12_PS4, "KB_ROW10 PS4"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW13_PS5, "KB_ROW10 PS5"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW14_PS6, "KB_ROW10 PS6"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW15_PS7, "KB_ROW10 PS7"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW16_PT0, "KB_ROW10 PT0"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW17_PT1, "KB_ROW10 PT1"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW11_PS3, "KB_ROW11 PS3"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW12_PS4, "KB_ROW12 PS4"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW13_PS5, "KB_ROW13 PS5"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW14_PS6, "KB_ROW14 PS6"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW15_PS7, "KB_ROW15 PS7"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW16_PT0, "KB_ROW16 PT0"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW17_PT1, "KB_ROW17 PT1"),
PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SCL_PT5, "GEN2_I2C_SCL PT5"),
PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SDA_PT6, "GEN2_I2C_SDA PT6"),
PINCTRL_PIN(TEGRA_PIN_SDMMC4_CMD_PT7, "SDMMC4_CMD PT7"),
@@ -406,16 +406,16 @@ static const struct pinctrl_pin_desc tegra124_pins[] = {
PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PEE3, "HDMI_CEC PEE3"),
PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4, "SDMMC3_CLK_LB_OUT PEE4"),
PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5, "SDMMC3_CLK_LB_IN PEE5"),
+ PINCTRL_PIN(TEGRA_PIN_DP_HPD_PFF0, "DP_HPD PFF0"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN2_PFF1, "USB_VBUS_EN2 PFF1"),
+ PINCTRL_PIN(TEGRA_PIN_PFF2, "PFF2"),
PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
- PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
PINCTRL_PIN(TEGRA_PIN_RESET_OUT_N, "RESET_OUT_N"),
- PINCTRL_PIN(TEGRA_PIN_DP_HPD_PFF0, "DP_HPD PFF0"),
- PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN2_PFF1, "USB_VBUS_EN2 PFF1"),
- PINCTRL_PIN(TEGRA_PIN_PFF2, "PFF2"),
+ PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
- PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK, "JTAG_RTCK"),
};
@@ -1138,6 +1138,7 @@ static const unsigned sdmmc3_clk_lb_out_pee4_pins[] = {
static const unsigned sdmmc3_clk_lb_in_pee5_pins[] = {
TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
};
+
static const unsigned dp_hpd_pff0_pins[] = {
TEGRA_PIN_DP_HPD_PFF0,
};
@@ -1158,24 +1159,24 @@ static const unsigned cpu_pwr_req_pins[] = {
TEGRA_PIN_CPU_PWR_REQ,
};
-static const unsigned owr_pins[] = {
- TEGRA_PIN_OWR,
-};
-
static const unsigned pwr_int_n_pins[] = {
TEGRA_PIN_PWR_INT_N,
};
+static const unsigned gmi_clk_lb_pins[] = {
+ TEGRA_PIN_GMI_CLK_LB,
+};
+
static const unsigned reset_out_n_pins[] = {
TEGRA_PIN_RESET_OUT_N,
};
-static const unsigned clk_32k_in_pins[] = {
- TEGRA_PIN_CLK_32K_IN,
+static const unsigned owr_pins[] = {
+ TEGRA_PIN_OWR,
};
-static const unsigned gmi_clk_lb_pins[] = {
- TEGRA_PIN_GMI_CLK_LB,
+static const unsigned clk_32k_in_pins[] = {
+ TEGRA_PIN_CLK_32K_IN,
};
static const unsigned jtag_rtck_pins[] = {
@@ -1441,15 +1442,15 @@ static const unsigned drive_gpv_pins[] = {
TEGRA_PIN_PFF2,
};
-static const unsigned drive_cec_pins[] = {
- TEGRA_PIN_HDMI_CEC_PEE3,
-};
-
static const unsigned drive_dev3_pins[] = {
TEGRA_PIN_CLK3_OUT_PEE0,
TEGRA_PIN_CLK3_REQ_PEE1,
};
+static const unsigned drive_cec_pins[] = {
+ TEGRA_PIN_HDMI_CEC_PEE3,
+};
+
static const unsigned drive_at6_pins[] = {
TEGRA_PIN_PK1,
TEGRA_PIN_PK3,
@@ -1496,8 +1497,10 @@ static const unsigned drive_ao4_pins[] = {
enum tegra_mux {
TEGRA_MUX_BLINK,
+ TEGRA_MUX_CCLA,
TEGRA_MUX_CEC,
TEGRA_MUX_CLDVFS,
+ TEGRA_MUX_CLK,
TEGRA_MUX_CLK12,
TEGRA_MUX_CPU,
TEGRA_MUX_DAP,
@@ -1507,6 +1510,7 @@ enum tegra_mux {
TEGRA_MUX_DISPLAYA,
TEGRA_MUX_DISPLAYA_ALT,
TEGRA_MUX_DISPLAYB,
+ TEGRA_MUX_DP,
TEGRA_MUX_DTV,
TEGRA_MUX_EXTPERIPH1,
TEGRA_MUX_EXTPERIPH2,
@@ -1528,6 +1532,9 @@ enum tegra_mux {
TEGRA_MUX_IRDA,
TEGRA_MUX_KBC,
TEGRA_MUX_OWR,
+ TEGRA_MUX_PE,
+ TEGRA_MUX_PE0,
+ TEGRA_MUX_PE1,
TEGRA_MUX_PMI,
TEGRA_MUX_PWM0,
TEGRA_MUX_PWM1,
@@ -1539,6 +1546,8 @@ enum tegra_mux {
TEGRA_MUX_RSVD2,
TEGRA_MUX_RSVD3,
TEGRA_MUX_RSVD4,
+ TEGRA_MUX_RTCK,
+ TEGRA_MUX_SATA,
TEGRA_MUX_SDMMC1,
TEGRA_MUX_SDMMC2,
TEGRA_MUX_SDMMC3,
@@ -1551,6 +1560,8 @@ enum tegra_mux {
TEGRA_MUX_SPI4,
TEGRA_MUX_SPI5,
TEGRA_MUX_SPI6,
+ TEGRA_MUX_SYS,
+ TEGRA_MUX_TMDS,
TEGRA_MUX_TRACE,
TEGRA_MUX_UARTA,
TEGRA_MUX_UARTB,
@@ -1569,1134 +1580,19 @@ enum tegra_mux {
TEGRA_MUX_VI_ALT3,
TEGRA_MUX_VIMCLK2,
TEGRA_MUX_VIMCLK2_ALT,
- TEGRA_MUX_SATA,
- TEGRA_MUX_CCLA,
- TEGRA_MUX_PE0,
- TEGRA_MUX_PE,
- TEGRA_MUX_PE1,
- TEGRA_MUX_DP,
- TEGRA_MUX_RTCK,
- TEGRA_MUX_SYS,
- TEGRA_MUX_CLK,
- TEGRA_MUX_TMDS,
-};
-
-static const char * const blink_groups[] = {
- "clk_32k_out_pa0",
-};
-
-static const char * const cec_groups[] = {
- "hdmi_cec_pee3",
-};
-
-static const char * const cldvfs_groups[] = {
- "ph2",
- "ph3",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "dvfs_pwm_px0",
- "dvfs_clk_px2",
-};
-
-static const char * const clk12_groups[] = {
- "sdmmc1_wp_n_pv3",
- "sdmmc1_clk_pz0",
-};
-
-static const char * const cpu_groups[] = {
- "cpu_pwr_req",
-};
-
-static const char * const dap_groups[] = {
- "dap_mclk1_pee2",
- "clk2_req_pcc5",
-};
-
-static const char * const dap1_groups[] = {
- "dap_mclk1_pee2",
-};
-
-static const char * const dap2_groups[] = {
- "dap_mclk1_pw4",
- "gpio_x4_aud_px4",
-};
-
-static const char * const dev3_groups[] = {
- "clk3_req_pee1",
-};
-
-static const char * const displaya_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "ph1",
- "pi4",
- "pbb3",
- "pbb4",
- "pbb5",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_col3_pq3",
- "sdmmc3_dat2_pb5",
-};
-
-static const char * const displaya_alt_groups[] = {
- "kb_row6_pr6",
-};
-
-static const char * const displayb_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_sclk_pp3",
-
- "pu3",
- "pu4",
- "pu5",
-
- "pbb3",
- "pbb4",
- "pbb6",
-
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
-
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const dtv_groups[] = {
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "dap4_fs_pp4",
- "dap4_dout_pp6",
- "pi7",
- "ph0",
- "ph6",
- "ph7",
-};
-
-static const char * const extperiph1_groups[] = {
- "dap_mclk1_pw4",
-};
-
-static const char * const extperiph2_groups[] = {
- "clk2_out_pw5",
-};
-
-static const char * const extperiph3_groups[] = {
- "clk3_out_pee0",
-};
-
-static const char * const gmi_groups[] = {
- "uart2_cts_n_pj5",
- "uart2_rts_n_pj6",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
-
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
-
- "dap4_fs_pp4",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_sclk_pp7",
-
- "pc7",
-
- "pg0",
- "pg1",
- "pg2",
- "pg3",
- "pg4",
- "pg5",
- "pg6",
- "pg7",
-
- "ph0",
- "ph1",
- "ph2",
- "ph3",
- "ph4",
- "ph5",
- "ph6",
- "ph7",
-
- "pi0",
- "pi1",
- "pi2",
- "pi3",
- "pi4",
- "pi5",
- "pi6",
- "pi7",
-
- "pj0",
- "pj2",
-
- "pk0",
- "pk1",
- "pk2",
- "pk3",
- "pk4",
-
- "pj7",
- "pb0",
- "pb1",
- "pk7",
-
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
-
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "gmi_clk_lb",
-
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
-
- "dap2_fs_pa2",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_sclk_pa3",
-
- "dvfs_pwm_px0",
- "dvfs_clk_px2",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
-};
-
-static const char * const gmi_alt_groups[] = {
- "pc7",
- "pk4",
- "pj7",
-};
-
-static const char * const hda_groups[] = {
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
-};
-
-static const char * const hsi_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-};
-
-static const char * const i2c1_groups[] = {
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const i2c2_groups[] = {
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
-};
-
-static const char * const i2c3_groups[] = {
- "spdif_in_pk6",
- "spdif_out_pk5",
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
-};
-
-static const char * const i2c4_groups[] = {
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-};
-
-static const char * const i2cpwr_groups[] = {
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-};
-
-static const char * const i2s0_groups[] = {
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_sclk_pn3",
-};
-
-static const char * const i2s1_groups[] = {
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
-};
-
-static const char * const i2s2_groups[] = {
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
-};
-
-static const char * const i2s3_groups[] = {
- "dap4_fs_pp4",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_sclk_pp7",
-};
-
-static const char * const i2s4_groups[] = {
- "pcc1",
- "pbb6",
- "pbb7",
- "pcc2",
-};
-
-static const char * const irda_groups[] = {
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "kb_row11_ps3",
- "kb_row12_ps4",
-};
-
-static const char * const kbc_groups[] = {
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_row16_pt0",
- "kb_row17_pt1",
-
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
-};
-
-static const char * const owr_groups[] = {
- "pu0",
- "kb_col4_pq4",
- "owr",
- "sdmmc3_cd_n_pv2",
-};
-
-static const char * const pmi_groups[] = {
- "pwr_int_n",
-};
-
-static const char * const pwm0_groups[] = {
- "sdmmc1_dat2_py5",
- "uart3_rts_n_pc0",
- "pu3",
- "ph0",
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const pwm1_groups[] = {
- "sdmmc1_dat1_py6",
- "pu4",
- "ph1",
- "sdmmc3_dat2_pb5",
-};
-
-static const char * const pwm2_groups[] = {
- "pu5",
- "ph2",
- "kb_col3_pq3",
- "sdmmc3_dat1_pb6",
-};
-
-static const char * const pwm3_groups[] = {
- "pu6",
- "ph3",
- "sdmmc3_cmd_pa7",
-};
-
-static const char * const pwron_groups[] = {
- "core_pwr_req",
-};
-
-static const char * const reset_out_n_groups[] = {
- "reset_out_n",
-};
-
-static const char * const rsvd1_groups[] = {
- "pv0",
- "pv1",
-
- "hdmi_int_pn7",
- "pu1",
- "pu2",
- "pc7",
- "pi7",
- "pk0",
- "pj0",
- "pj2",
- "pk2",
- "pi3",
- "pi6",
-
- "pg0",
- "pg1",
- "pg2",
- "pg3",
- "pg4",
- "pg5",
- "pg6",
- "pg7",
-
- "pi0",
- "pi1",
-
- "gpio_x7_aud_px7",
-
- "reset_out_n",
-};
-
-static const char * const rsvd2_groups[] = {
- "pv0",
- "pv1",
-
- "sdmmc1_dat0_py7",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
-
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
-
- "clk2_out_pee0",
- "clk2_req_pee1",
- "pc7",
- "pi5",
- "pj0",
- "pj2",
-
- "pk4",
- "pk2",
- "pi3",
- "pi6",
- "pg0",
- "pg1",
- "pg5",
- "pg6",
- "pg7",
-
- "ph4",
- "ph5",
- "pj7",
- "pb0",
- "pb1",
- "pk7",
- "pi0",
- "pi1",
-
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat7_paa7",
- "pcc1",
- "pbb6",
- "pbb7",
- "pcc2",
- "jtag_rtck",
-
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
-
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
-
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "clk_32k_in",
- "owr",
-
- "spdif_in_pk6",
- "spdif_out_pk5",
- "gpio_x1_aud_px1",
-
- "sdmmc3_clk_pa6",
- "sdmmc3_dat0_pb7",
-
- "pex_l0_rst_n_pdd1",
- "pex_l0_clkreq_n_pdd2",
- "pex_wake_n_pdd3",
- "pex_l1_rst_n_pdd5",
- "pex_l1_clkreq_n_pdd6",
- "hdmi_cec_pee3",
-
- "gpio_w2_aud_pw2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "gmi_clk_lb",
- "reset_out_n",
- "kb_row16_pt0",
- "kb_row17_pt1",
- "dp_hpd_pff0",
- "usb_vbus_en2_pff1",
- "pff2",
-};
-
-static const char * const rsvd3_groups[] = {
- "dap3_sclk_pp3",
- "pv0",
- "pv1",
- "sdmmc1_clk_pz0",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
-
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-
- "pu6",
-
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
-
- "dap4_din_pp5",
- "dap4_sclk_pp7",
-
- "clk3_out_pee0",
- "clk3_req_pee1",
-
- "sdmmc4_dat5_paa5",
- "gpio_pcc1",
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "pbb5",
- "pbb7",
- "jtag_rtck",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row15_ps7",
-
- "clk_32k_out_pa0",
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "clk_32k_in",
- "owr",
-
- "dap_mclk1_pw4",
- "spdif_in_pk6",
- "spdif_out_pk5",
- "sdmmc3_clk_pa6",
- "sdmmc3_dat0_pb7",
-
- "pex_l0_rst_n_pdd1",
- "pex_l0_clkreq_n_pdd2",
- "pex_wake_n_pdd3",
- "pex_l1_rst_n_pdd5",
- "pex_l1_clkreq_n_pdd6",
- "hdmi_cec_pee3",
-
- "sdmmc3_cd_n_pv2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "reset_out_n",
- "kb_row16_pt0",
- "kb_row17_pt1",
- "dp_hpd_pff0",
- "usb_vbus_en2_pff1",
- "pff2",
-};
-
-static const char * const rsvd4_groups[] = {
- "dap3_dout_pp2",
- "pv0",
- "pv1",
- "sdmmc1_clk_pz0",
-
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "hdmi_int_pn7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
-
- "pu0",
- "pu1",
- "pu2",
-
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
-
- "dap4_fs_pp4",
- "dap4_dout_pp6",
- "dap4_din_pp5",
- "dap4_sclk_pp7",
-
- "clk3_out_pee0",
- "clk3_req_pee1",
-
- "pi5",
- "pk1",
- "pk2",
- "pg0",
- "pg1",
- "pg2",
- "pg3",
- "ph4",
- "ph5",
- "pb0",
- "pb1",
- "pk7",
- "pi0",
- "pi1",
- "pi2",
-
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
-
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
-
- "jtag_rtck",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
-
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col5_pq5",
-
- "clk_32k_out_pa0",
- "core_pwr_req",
- "cpu_pwr_req",
- "pwr_int_n",
- "clk_32k_in",
- "owr",
-
- "dap1_fs_pn0",
- "dap1_din_pn1",
- "dap1_sclk_pn3",
- "dap_mclk1_req_pee2",
- "dap_mclk1_pw5",
-
- "dap2_fs_pa2",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_sclk_pa3",
-
- "dvfs_pwm_px0",
- "dvfs_clk_px2",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
-
- "gpio_x5_aud_px5",
- "gpio_x7_aud_px7",
-
- "pex_l0_rst_n_pdd1",
- "pex_l0_clkreq_n_pdd2",
- "pex_wake_n_pdd3",
- "pex_l1_rst_n_pdd5",
- "pex_l1_clkreq_n_pdd6",
- "hdmi_cec_pee3",
-
- "sdmmc3_cd_n_pv2",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "sdmmc3_clk_lb_out_pee4",
- "sdmmc3_clk_lb_in_pee5",
- "gmi_clk_lb",
-
- "dp_hpd_pff0",
- "usb_vbus_en2_pff1",
- "pff2",
-};
-
-static const char * const sdmmc1_groups[] = {
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
- "clk2_out_pw5",
- "clk2_req_pcc",
- "uart3_cts_n_pa1",
- "sdmmc1_wp_n_pv3",
-};
-
-static const char * const sdmmc2_groups[] = {
- "pi5",
- "pk1",
- "pk3",
- "pk4",
- "pi6",
- "ph4",
- "ph5",
- "ph6",
- "ph7",
- "pi2",
- "cam_mclk_pcc0",
- "pcc1",
- "pbb0",
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "pbb7",
- "pcc2",
- "gmi_clk_lb",
-};
-
-static const char * const sdmmc3_groups[] = {
- "pk0",
- "pcc2",
-
- "kb_col4_pq4",
- "kb_col5_pq5",
-
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
-
- "sdmmc3_cd_n_pv2",
- "sdmmc3_clk_lb_in_pee5",
- "sdmmc3_clk_lb_out_pee4",
-};
-
-static const char * const sdmmc4_groups[] = {
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
-};
-
-static const char * const soc_groups[] = {
- "pk0",
- "pj2",
- "kb_row15_ps7",
- "clk_32k_out_pa0",
-};
-
-static const char * const spdif_groups[] = {
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "spdif_in_pk6",
- "spdif_out_pk5",
-};
-
-static const char * const spi1_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "gpio_x3_aud_px3",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
- "gpio_x7_aud_px7",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const spi2_groups[] = {
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "gpio_x4_aud_px4",
- "gpio_x5_aud_px5",
- "gpio_x6_aud_px6",
- "gpio_x7_aud_px7",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const spi3_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
-};
-
-static const char * const spi4_groups[] = {
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
-
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
-
- "pi3",
- "pg4",
- "pg5",
- "pg6",
- "pg7",
- "ph3",
- "pi4",
- "sdmmc1_wp_n_pv3",
-};
-
-static const char * const spi5_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "dap3_fs_pp0",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_sclk_pp3",
-};
-
-static const char * const spi6_groups[] = {
- "dvfs_pwm_px0",
- "gpio_x1_aud_px1",
- "gpio_x3_aud_px3",
- "dvfs_clk_px2",
- "gpio_x6_aud_px6",
- "gpio_w2_aud_pw2",
- "gpio_w3_aud_pw3",
-};
-
-static const char * const trace_groups[] = {
- "pi2",
- "pi4",
- "pi7",
- "ph0",
- "ph6",
- "ph7",
- "pg2",
- "pg3",
- "pk1",
- "pk3",
-};
-
-static const char * const uarta_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat3_py4",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat0_py7",
-
-
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
-
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
-
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "kb_row10_ps2",
- "kb_col3_pq3",
- "kb_col4_pq4",
-
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat1_pb6",
- "sdmmc1_wp_n_pv3",
-
-};
-
-static const char * const uartb_groups[] = {
- "uart2_rts_n_pj6",
- "uart2_cts_n_pj5",
-};
-
-static const char * const uartc_groups[] = {
- "uart3_txd_pw6",
- "uart3_rxd_pw7",
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "kb_row16_pt0",
- "kn_row17_pt1",
-};
-
-static const char * const uartd_groups[] = {
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "pj7",
- "pb0",
- "pb1",
- "pk7",
- "kb_col6_pq6",
- "kb_col7_pq7",
-};
-
-static const char * const ulpi_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
-};
-
-static const char * const usb_groups[] = {
- "pj0",
- "usb_vbus_en0_pn4",
- "usb_vbus_en1_pn5",
- "usb_vbus_en2_pff1",
-};
-
-static const char * const vgp1_groups[] = {
- "cam_i2c_scl_pbb1",
-};
-
-static const char * const vgp2_groups[] = {
- "cam_i2c_sda_pbb2",
-};
-
-static const char * const vgp3_groups[] = {
- "pbb3",
-};
-
-static const char * const vgp4_groups[] = {
- "pbb4",
-};
-
-static const char * const vgp5_groups[] = {
- "pbb5",
-};
-
-static const char * const vgp6_groups[] = {
- "pbb0",
-};
-
-static const char * const vi_groups[] = {
- "cam_mclk_pcc0",
-};
-
-static const char * const vi_alt1_groups[] = {
- "cam_mclk_pcc0",
-};
-
-static const char * const vi_alt3_groups[] = {
- "cam_mclk_pcc0",
-};
-
-static const char * const vimclk2_groups[] = {
- "pbb0",
-};
-
-static const char * const vimclk2_alt_groups[] = {
- "pbb0",
-};
-
-static const char * const sata_groups[] = {
- "dap_mclk1_req_pee2",
- "dap1_dout_pn2",
- "pff2",
-};
-
-static const char * const ccla_groups[] = {
- "pk3",
-};
-
-static const char * const rtck_groups[] = {
- "jtag_rtck",
-};
-
-static const char * const sys_groups[] = {
- "kb_row3_pr3",
-};
-
-static const char * const pe0_groups[] = {
- "pex_l0_rst_n_pdd1",
- "pex_l0_clkreq_n_pdd2",
-};
-
-static const char * const pe_groups[] = {
- "pex_wake_n_pdd3",
-};
-
-static const char * const pe1_groups[] = {
- "pex_l1_rst_n_pdd5",
- "pex_l1_clkreq_n_pdd6",
-};
-
-static const char * const dp_groups[] = {
- "dp_hpd_pff0",
-};
-
-static const char * const clk_groups[] = {
- "clk_32k_in",
-};
-
-static const char * const tmds_groups[] = {
- "pg4",
- "ph1",
- "ph2",
};
#define FUNCTION(fname) \
{ \
.name = #fname, \
- .groups = fname##_groups, \
- .ngroups = ARRAY_SIZE(fname##_groups), \
}
-static const struct tegra_function tegra124_functions[] = {
+static struct tegra_function tegra124_functions[] = {
FUNCTION(blink),
+ FUNCTION(ccla),
FUNCTION(cec),
FUNCTION(cldvfs),
+ FUNCTION(clk),
FUNCTION(clk12),
FUNCTION(cpu),
FUNCTION(dap),
@@ -2706,6 +1602,7 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(displaya),
FUNCTION(displaya_alt),
FUNCTION(displayb),
+ FUNCTION(dp),
FUNCTION(dtv),
FUNCTION(extperiph1),
FUNCTION(extperiph2),
@@ -2727,6 +1624,9 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(irda),
FUNCTION(kbc),
FUNCTION(owr),
+ FUNCTION(pe),
+ FUNCTION(pe0),
+ FUNCTION(pe1),
FUNCTION(pmi),
FUNCTION(pwm0),
FUNCTION(pwm1),
@@ -2738,6 +1638,8 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(rsvd2),
FUNCTION(rsvd3),
FUNCTION(rsvd4),
+ FUNCTION(rtck),
+ FUNCTION(sata),
FUNCTION(sdmmc1),
FUNCTION(sdmmc2),
FUNCTION(sdmmc3),
@@ -2750,6 +1652,8 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(spi4),
FUNCTION(spi5),
FUNCTION(spi6),
+ FUNCTION(sys),
+ FUNCTION(tmds),
FUNCTION(trace),
FUNCTION(uarta),
FUNCTION(uartb),
@@ -2768,23 +1672,13 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(vi_alt3),
FUNCTION(vimclk2),
FUNCTION(vimclk2_alt),
- FUNCTION(sata),
- FUNCTION(ccla),
- FUNCTION(pe0),
- FUNCTION(pe),
- FUNCTION(pe1),
- FUNCTION(dp),
- FUNCTION(rtck),
- FUNCTION(sys),
- FUNCTION(clk),
- FUNCTION(tmds),
};
-#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
-#define PINGROUP_REG_A 0x3000 /* bank 1 */
+#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
+#define PINGROUP_REG_A 0x3000 /* bank 1 */
-#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
-#define PINGROUP_REG_N(r) -1
+#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
+#define PINGROUP_REG_N(r) -1
#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior, rcv_sel) \
{ \
@@ -2792,12 +1686,12 @@ static const struct tegra_function tegra124_functions[] = {
.pins = pg_name##_pins, \
.npins = ARRAY_SIZE(pg_name##_pins), \
.funcs = { \
- TEGRA_MUX_ ## f0, \
- TEGRA_MUX_ ## f1, \
- TEGRA_MUX_ ## f2, \
- TEGRA_MUX_ ## f3, \
+ TEGRA_MUX_##f0, \
+ TEGRA_MUX_##f1, \
+ TEGRA_MUX_##f2, \
+ TEGRA_MUX_##f3, \
}, \
- .func_safe = TEGRA_MUX_ ## f_safe, \
+ .func_safe = TEGRA_MUX_##f_safe, \
.mux_reg = PINGROUP_REG_Y(r), \
.mux_bank = 1, \
.mux_bit = 0, \
@@ -2826,8 +1720,9 @@ static const struct tegra_function tegra124_functions[] = {
.drvtype_reg = -1, \
}
-#define DRV_PINGROUP_DVRTYPE_Y(r) ((r) - DRV_PINGROUP_REG_A)
-#define DRV_PINGROUP_DVRTYPE_N(r) -1
+#define DRV_PINGROUP_REG_Y(r) ((r) - DRV_PINGROUP_REG_A)
+#define DRV_PINGROUP_REG_N(r) -1
+
#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
drvdn_b, drvdn_w, drvup_b, drvup_w, \
@@ -2845,7 +1740,7 @@ static const struct tegra_function tegra124_functions[] = {
.lock_reg = -1, \
.ioreset_reg = -1, \
.rcv_sel_reg = -1, \
- .drv_reg = DRV_PINGROUP_DVRTYPE_Y(r), \
+ .drv_reg = DRV_PINGROUP_REG_Y(r), \
.drv_bank = 0, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
@@ -2858,7 +1753,7 @@ static const struct tegra_function tegra124_functions[] = {
.slwr_width = slwr_w, \
.slwf_bit = slwf_b, \
.slwf_width = slwf_w, \
- .drvtype_reg = DRV_PINGROUP_DVRTYPE_##drvtype(r), \
+ .drvtype_reg = DRV_PINGROUP_REG_##drvtype(r), \
.drvtype_bank = 0, \
.drvtype_bit = 6, \
}
@@ -2909,8 +1804,8 @@ static const struct tegra_pingroup tegra124_groups[] = {
PINGROUP(pu4, PWM1, UARTA, GMI, DISPLAYB, PWM1, 0x3194, N, N, N),
PINGROUP(pu5, PWM2, UARTA, GMI, DISPLAYB, PWM2, 0x3198, N, N, N),
PINGROUP(pu6, PWM3, UARTA, RSVD3, GMI, RSVD3, 0x319c, N, N, N),
- PINGROUP(gen1_i2c_scl_pc4, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a0, Y, N, N),
- PINGROUP(gen1_i2c_sda_pc5, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a4, Y, N, N),
+ PINGROUP(gen1_i2c_sda_pc5, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a0, Y, N, N),
+ PINGROUP(gen1_i2c_scl_pc4, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a4, Y, N, N),
PINGROUP(dap4_fs_pp4, I2S3, GMI, DTV, RSVD4, I2S3, 0x31a8, N, N, N),
PINGROUP(dap4_din_pp5, I2S3, GMI, RSVD3, RSVD4, I2S3, 0x31ac, N, N, N),
PINGROUP(dap4_dout_pp6, I2S3, GMI, DTV, RSVD4, I2S3, 0x31b0, N, N, N),
@@ -2964,9 +1859,9 @@ static const struct tegra_pingroup tegra124_groups[] = {
PINGROUP(sdmmc4_dat4_paa4, SDMMC4, SPI3, GMI, RSVD4, SDMMC4, 0x3270, N, Y, N),
PINGROUP(sdmmc4_dat5_paa5, SDMMC4, SPI3, RSVD3, RSVD4, SDMMC4, 0x3274, N, Y, N),
PINGROUP(sdmmc4_dat6_paa6, SDMMC4, SPI3, GMI, RSVD4, SDMMC4, 0x3278, N, Y, N),
- PINGROUP(sdmmc4_dat7_paa7, SDMMC4, RSVD1, GMI, RSVD4, SDMMC4, 0x327c, N, Y, N),
+ PINGROUP(sdmmc4_dat7_paa7, SDMMC4, RSVD2, GMI, RSVD4, SDMMC4, 0x327c, N, Y, N),
PINGROUP(cam_mclk_pcc0, VI, VI_ALT1, VI_ALT3, SDMMC2, VI, 0x3284, N, N, N),
- PINGROUP(pcc1, I2S4, RSVD1, RSVD3, SDMMC2, I2S4, 0x3288, N, N, N),
+ PINGROUP(pcc1, I2S4, RSVD2, RSVD3, SDMMC2, I2S4, 0x3288, N, N, N),
PINGROUP(pbb0, VGP6, VIMCLK2, SDMMC2, VIMCLK2_ALT, VGP6, 0x328c, N, N, N),
PINGROUP(cam_i2c_scl_pbb1, VGP1, I2C3, RSVD3, SDMMC2, VGP1, 0x3290, Y, N, N),
PINGROUP(cam_i2c_sda_pbb2, VGP2, I2C3, RSVD3, SDMMC2, VGP2, 0x3294, Y, N, N),
@@ -3047,8 +1942,8 @@ static const struct tegra_pingroup tegra124_groups[] = {
PINGROUP(gpio_w3_aud_pw3, SPI6, SPI1, SPI2, I2C1, SPI1, 0x33f0, N, N, N),
PINGROUP(usb_vbus_en0_pn4, USB, RSVD2, RSVD3, RSVD4, USB, 0x33f4, Y, N, N),
PINGROUP(usb_vbus_en1_pn5, USB, RSVD2, RSVD3, RSVD4, USB, 0x33f8, Y, N, N),
- PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x33fc, N, N, N),
- PINGROUP(sdmmc3_clk_lb_in_pee5, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x3400, N, N, N),
+ PINGROUP(sdmmc3_clk_lb_in_pee5, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x33fc, N, N, N),
+ PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x3400, N, N, N),
PINGROUP(gmi_clk_lb, SDMMC2, RSVD2, GMI, RSVD4, SDMMC2, 0x3404, N, N, N),
PINGROUP(reset_out_n, RSVD1, RSVD2, RSVD3, RESET_OUT_N, RSVD1, 0x3408, N, N, N),
PINGROUP(kb_row16_pt0, KBC, RSVD2, RSVD3, UARTC, KBC, 0x340c, N, N, N),
diff --git a/drivers/pinctrl/pinctrl-tegra20.c b/drivers/pinctrl/pinctrl-tegra20.c
index fcfb7d012c5b..e0b504088387 100644
--- a/drivers/pinctrl/pinctrl-tegra20.c
+++ b/drivers/pinctrl/pinctrl-tegra20.c
@@ -1894,637 +1894,12 @@ enum tegra_mux {
TEGRA_MUX_XIO,
};
-static const char * const ahb_clk_groups[] = {
- "cdev2",
-};
-
-static const char * const apb_clk_groups[] = {
- "cdev2",
-};
-
-static const char * const audio_sync_groups[] = {
- "cdev1",
-};
-
-static const char * const crt_groups[] = {
- "crtp",
- "lm1",
-};
-
-static const char * const dap1_groups[] = {
- "dap1",
-};
-
-static const char * const dap2_groups[] = {
- "dap2",
-};
-
-static const char * const dap3_groups[] = {
- "dap3",
-};
-
-static const char * const dap4_groups[] = {
- "dap4",
-};
-
-static const char * const dap5_groups[] = {
- "gme",
-};
-
-static const char * const displaya_groups[] = {
- "lcsn",
- "ld0",
- "ld1",
- "ld10",
- "ld11",
- "ld12",
- "ld13",
- "ld14",
- "ld15",
- "ld16",
- "ld17",
- "ld2",
- "ld3",
- "ld4",
- "ld5",
- "ld6",
- "ld7",
- "ld8",
- "ld9",
- "ldc",
- "ldi",
- "lhp0",
- "lhp1",
- "lhp2",
- "lhs",
- "lm0",
- "lm1",
- "lpp",
- "lpw0",
- "lpw1",
- "lpw2",
- "lsc0",
- "lsc1",
- "lsck",
- "lsda",
- "lsdi",
- "lspi",
- "lvp0",
- "lvp1",
- "lvs",
-};
-
-static const char * const displayb_groups[] = {
- "lcsn",
- "ld0",
- "ld1",
- "ld10",
- "ld11",
- "ld12",
- "ld13",
- "ld14",
- "ld15",
- "ld16",
- "ld17",
- "ld2",
- "ld3",
- "ld4",
- "ld5",
- "ld6",
- "ld7",
- "ld8",
- "ld9",
- "ldc",
- "ldi",
- "lhp0",
- "lhp1",
- "lhp2",
- "lhs",
- "lm0",
- "lm1",
- "lpp",
- "lpw0",
- "lpw1",
- "lpw2",
- "lsc0",
- "lsc1",
- "lsck",
- "lsda",
- "lsdi",
- "lspi",
- "lvp0",
- "lvp1",
- "lvs",
-};
-
-static const char * const emc_test0_dll_groups[] = {
- "kbca",
-};
-
-static const char * const emc_test1_dll_groups[] = {
- "kbcc",
-};
-
-static const char * const gmi_groups[] = {
- "ata",
- "atb",
- "atc",
- "atd",
- "ate",
- "dap1",
- "dap2",
- "dap4",
- "gma",
- "gmb",
- "gmc",
- "gmd",
- "gme",
- "gpu",
- "irrx",
- "irtx",
- "pta",
- "spia",
- "spib",
- "spic",
- "spid",
- "spie",
- "uca",
- "ucb",
-};
-
-static const char * const gmi_int_groups[] = {
- "gmb",
-};
-
-static const char * const hdmi_groups[] = {
- "hdint",
- "lpw0",
- "lpw2",
- "lsc1",
- "lsck",
- "lsda",
- "lspi",
- "pta",
-};
-
-static const char * const i2cp_groups[] = {
- "i2cp",
-};
-
-static const char * const i2c1_groups[] = {
- "rm",
- "spdi",
- "spdo",
- "spig",
- "spih",
-};
-
-static const char * const i2c2_groups[] = {
- "ddc",
- "pta",
-};
-
-static const char * const i2c3_groups[] = {
- "dtf",
-};
-
-static const char * const ide_groups[] = {
- "ata",
- "atb",
- "atc",
- "atd",
- "ate",
- "gmb",
-};
-
-static const char * const irda_groups[] = {
- "uad",
-};
-
-static const char * const kbc_groups[] = {
- "kbca",
- "kbcb",
- "kbcc",
- "kbcd",
- "kbce",
- "kbcf",
-};
-
-static const char * const mio_groups[] = {
- "kbcb",
- "kbcd",
- "kbcf",
-};
-
-static const char * const mipi_hs_groups[] = {
- "uaa",
- "uab",
-};
-
-static const char * const nand_groups[] = {
- "ata",
- "atb",
- "atc",
- "atd",
- "ate",
- "gmb",
- "gmd",
- "kbca",
- "kbcb",
- "kbcc",
- "kbcd",
- "kbce",
- "kbcf",
-};
-
-static const char * const osc_groups[] = {
- "cdev1",
- "cdev2",
-};
-
-static const char * const owr_groups[] = {
- "kbce",
- "owc",
- "uac",
-};
-
-static const char * const pcie_groups[] = {
- "gpv",
- "slxa",
- "slxk",
-};
-
-static const char * const plla_out_groups[] = {
- "cdev1",
-};
-
-static const char * const pllc_out1_groups[] = {
- "csus",
-};
-
-static const char * const pllm_out1_groups[] = {
- "cdev1",
-};
-
-static const char * const pllp_out2_groups[] = {
- "csus",
-};
-
-static const char * const pllp_out3_groups[] = {
- "csus",
-};
-
-static const char * const pllp_out4_groups[] = {
- "cdev2",
-};
-
-static const char * const pwm_groups[] = {
- "gpu",
- "sdb",
- "sdc",
- "sdd",
- "ucb",
-};
-
-static const char * const pwr_intr_groups[] = {
- "pmc",
-};
-
-static const char * const pwr_on_groups[] = {
- "pmc",
-};
-
-static const char * const rsvd1_groups[] = {
- "dta",
- "dtb",
- "dtc",
- "dtd",
- "dte",
- "gmd",
- "gme",
-};
-
-static const char * const rsvd2_groups[] = {
- "crtp",
- "dap1",
- "dap3",
- "dap4",
- "ddc",
- "dtb",
- "dtc",
- "dte",
- "dtf",
- "gpu7",
- "gpv",
- "hdint",
- "i2cp",
- "owc",
- "rm",
- "sdio1",
- "spdi",
- "spdo",
- "uac",
- "uca",
- "uda",
-};
-
-static const char * const rsvd3_groups[] = {
- "crtp",
- "dap2",
- "dap3",
- "ddc",
- "gpu7",
- "gpv",
- "hdint",
- "i2cp",
- "ld17",
- "ldc",
- "ldi",
- "lhp0",
- "lhp1",
- "lhp2",
- "lm1",
- "lpp",
- "lpw1",
- "lvp0",
- "lvp1",
- "owc",
- "pmc",
- "rm",
- "uac",
-};
-
-static const char * const rsvd4_groups[] = {
- "ata",
- "ate",
- "crtp",
- "dap3",
- "dap4",
- "ddc",
- "dta",
- "dtc",
- "dtd",
- "dtf",
- "gpu",
- "gpu7",
- "gpv",
- "hdint",
- "i2cp",
- "kbce",
- "lcsn",
- "ld0",
- "ld1",
- "ld2",
- "ld3",
- "ld4",
- "ld5",
- "ld6",
- "ld7",
- "ld8",
- "ld9",
- "ld10",
- "ld11",
- "ld12",
- "ld13",
- "ld14",
- "ld15",
- "ld16",
- "ld17",
- "ldc",
- "ldi",
- "lhp0",
- "lhp1",
- "lhp2",
- "lhs",
- "lm0",
- "lpp",
- "lpw1",
- "lsc0",
- "lsdi",
- "lvp0",
- "lvp1",
- "lvs",
- "owc",
- "pmc",
- "pta",
- "rm",
- "spif",
- "uac",
- "uca",
- "ucb",
-};
-
-static const char * const rtck_groups[] = {
- "gpu7",
-};
-
-static const char * const sdio1_groups[] = {
- "sdio1",
-};
-
-static const char * const sdio2_groups[] = {
- "dap1",
- "dta",
- "dtd",
- "kbca",
- "kbcb",
- "kbcd",
- "spdi",
- "spdo",
-};
-
-static const char * const sdio3_groups[] = {
- "sdb",
- "sdc",
- "sdd",
- "slxa",
- "slxc",
- "slxd",
- "slxk",
-};
-
-static const char * const sdio4_groups[] = {
- "atb",
- "atc",
- "atd",
- "gma",
- "gme",
-};
-
-static const char * const sflash_groups[] = {
- "gmc",
- "gmd",
-};
-
-static const char * const spdif_groups[] = {
- "slxc",
- "slxd",
- "spdi",
- "spdo",
- "uad",
-};
-
-static const char * const spi1_groups[] = {
- "dtb",
- "dte",
- "spia",
- "spib",
- "spic",
- "spid",
- "spie",
- "spif",
- "uda",
-};
-
-static const char * const spi2_groups[] = {
- "sdb",
- "slxa",
- "slxc",
- "slxd",
- "slxk",
- "spia",
- "spib",
- "spic",
- "spid",
- "spie",
- "spif",
- "spig",
- "spih",
- "uab",
-};
-
-static const char * const spi2_alt_groups[] = {
- "spid",
- "spie",
- "spig",
- "spih",
-};
-
-static const char * const spi3_groups[] = {
- "gma",
- "lcsn",
- "lm0",
- "lpw0",
- "lpw2",
- "lsc1",
- "lsck",
- "lsda",
- "lsdi",
- "sdc",
- "sdd",
- "spia",
- "spib",
- "spic",
- "spif",
- "spig",
- "spih",
- "uaa",
-};
-
-static const char * const spi4_groups[] = {
- "gmc",
- "irrx",
- "irtx",
- "slxa",
- "slxc",
- "slxd",
- "slxk",
- "uad",
-};
-
-static const char * const trace_groups[] = {
- "kbcc",
- "kbcf",
-};
-
-static const char * const twc_groups[] = {
- "dap2",
- "sdc",
-};
-
-static const char * const uarta_groups[] = {
- "gpu",
- "irrx",
- "irtx",
- "sdb",
- "sdd",
- "sdio1",
- "uaa",
- "uab",
- "uad",
-};
-
-static const char * const uartb_groups[] = {
- "irrx",
- "irtx",
-};
-
-static const char * const uartc_groups[] = {
- "uca",
- "ucb",
-};
-
-static const char * const uartd_groups[] = {
- "gmc",
- "uda",
-};
-
-static const char * const uarte_groups[] = {
- "gma",
- "sdio1",
-};
-
-static const char * const ulpi_groups[] = {
- "uaa",
- "uab",
- "uda",
-};
-
-static const char * const vi_groups[] = {
- "dta",
- "dtb",
- "dtc",
- "dtd",
- "dte",
- "dtf",
-};
-
-static const char * const vi_sensor_clk_groups[] = {
- "csus",
-};
-
-static const char * const xio_groups[] = {
- "ld0",
- "ld1",
- "ld10",
- "ld11",
- "ld12",
- "ld13",
- "ld14",
- "ld15",
- "ld16",
- "ld2",
- "ld3",
- "ld4",
- "ld5",
- "ld6",
- "ld7",
- "ld8",
- "ld9",
- "lhs",
- "lsc0",
- "lspi",
- "lvs",
-};
-
#define FUNCTION(fname) \
{ \
.name = #fname, \
- .groups = fname##_groups, \
- .ngroups = ARRAY_SIZE(fname##_groups), \
}
-static const struct tegra_function tegra20_functions[] = {
+static struct tegra_function tegra20_functions[] = {
FUNCTION(ahb_clk),
FUNCTION(apb_clk),
FUNCTION(audio_sync),
@@ -2881,18 +2256,7 @@ static struct platform_driver tegra20_pinctrl_driver = {
.probe = tegra20_pinctrl_probe,
.remove = tegra_pinctrl_remove,
};
-
-static int __init tegra20_pinctrl_init(void)
-{
- return platform_driver_register(&tegra20_pinctrl_driver);
-}
-arch_initcall(tegra20_pinctrl_init);
-
-static void __exit tegra20_pinctrl_exit(void)
-{
- platform_driver_unregister(&tegra20_pinctrl_driver);
-}
-module_exit(tegra20_pinctrl_exit);
+module_platform_driver(tegra20_pinctrl_driver);
MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra20 pinctrl driver");
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
index 2300deba25bd..41d24f5c2854 100644
--- a/drivers/pinctrl/pinctrl-tegra30.c
+++ b/drivers/pinctrl/pinctrl-tegra30.c
@@ -25,7 +25,7 @@
* Most pins affected by the pinmux can also be GPIOs. Define these first.
* These must match how the GPIO driver names/numbers its pins.
*/
-#define _GPIO(offset) (offset)
+#define _GPIO(offset) (offset)
#define TEGRA_PIN_CLK_32K_OUT_PA0 _GPIO(0)
#define TEGRA_PIN_UART3_CTS_N_PA1 _GPIO(1)
@@ -277,8 +277,8 @@
#define TEGRA_PIN_PEE7 _GPIO(247)
/* All non-GPIO pins follow */
-#define NUM_GPIOS (TEGRA_PIN_PEE7 + 1)
-#define _PIN(offset) (NUM_GPIOS + (offset))
+#define NUM_GPIOS (TEGRA_PIN_PEE7 + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
/* Non-GPIO pins */
#define TEGRA_PIN_CLK_32K_IN _PIN(0)
@@ -2015,1253 +2015,13 @@ enum tegra_mux {
TEGRA_MUX_VI_ALT2,
TEGRA_MUX_VI_ALT3,
};
-static const char * const blink_groups[] = {
- "clk_32k_out_pa0",
-};
-
-static const char * const cec_groups[] = {
- "hdmi_cec_pee3",
- "owr",
-};
-
-static const char * const clk_12m_out_groups[] = {
- "pv3",
-};
-
-static const char * const clk_32k_in_groups[] = {
- "clk_32k_in",
-};
-
-static const char * const core_pwr_req_groups[] = {
- "core_pwr_req",
-};
-
-static const char * const cpu_pwr_req_groups[] = {
- "cpu_pwr_req",
-};
-
-static const char * const crt_groups[] = {
- "crt_hsync_pv6",
- "crt_vsync_pv7",
-};
-
-static const char * const dap_groups[] = {
- "clk1_req_pee2",
- "clk2_req_pcc5",
-};
-
-static const char * const ddr_groups[] = {
- "vi_d0_pt4",
- "vi_d1_pd5",
- "vi_d10_pt2",
- "vi_d11_pt3",
- "vi_d2_pl0",
- "vi_d3_pl1",
- "vi_d4_pl2",
- "vi_d5_pl3",
- "vi_d6_pl4",
- "vi_d7_pl5",
- "vi_d8_pl6",
- "vi_d9_pl7",
- "vi_hsync_pd7",
- "vi_vsync_pd6",
-};
-
-static const char * const dev3_groups[] = {
- "clk3_req_pee1",
-};
-
-static const char * const displaya_groups[] = {
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_fs_pp0",
- "dap3_sclk_pp3",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "lcd_cs0_n_pn4",
- "lcd_cs1_n_pw0",
- "lcd_d0_pe0",
- "lcd_d1_pe1",
- "lcd_d10_pf2",
- "lcd_d11_pf3",
- "lcd_d12_pf4",
- "lcd_d13_pf5",
- "lcd_d14_pf6",
- "lcd_d15_pf7",
- "lcd_d16_pm0",
- "lcd_d17_pm1",
- "lcd_d18_pm2",
- "lcd_d19_pm3",
- "lcd_d2_pe2",
- "lcd_d20_pm4",
- "lcd_d21_pm5",
- "lcd_d22_pm6",
- "lcd_d23_pm7",
- "lcd_d3_pe3",
- "lcd_d4_pe4",
- "lcd_d5_pe5",
- "lcd_d6_pe6",
- "lcd_d7_pe7",
- "lcd_d8_pf0",
- "lcd_d9_pf1",
- "lcd_dc0_pn6",
- "lcd_dc1_pd2",
- "lcd_de_pj1",
- "lcd_hsync_pj3",
- "lcd_m1_pw1",
- "lcd_pclk_pb3",
- "lcd_pwr0_pb2",
- "lcd_pwr1_pc1",
- "lcd_pwr2_pc6",
- "lcd_sck_pz4",
- "lcd_sdin_pz2",
- "lcd_sdout_pn5",
- "lcd_vsync_pj4",
- "lcd_wr_n_pz3",
-};
-
-static const char * const displayb_groups[] = {
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_fs_pp0",
- "dap3_sclk_pp3",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "lcd_cs0_n_pn4",
- "lcd_cs1_n_pw0",
- "lcd_d0_pe0",
- "lcd_d1_pe1",
- "lcd_d10_pf2",
- "lcd_d11_pf3",
- "lcd_d12_pf4",
- "lcd_d13_pf5",
- "lcd_d14_pf6",
- "lcd_d15_pf7",
- "lcd_d16_pm0",
- "lcd_d17_pm1",
- "lcd_d18_pm2",
- "lcd_d19_pm3",
- "lcd_d2_pe2",
- "lcd_d20_pm4",
- "lcd_d21_pm5",
- "lcd_d22_pm6",
- "lcd_d23_pm7",
- "lcd_d3_pe3",
- "lcd_d4_pe4",
- "lcd_d5_pe5",
- "lcd_d6_pe6",
- "lcd_d7_pe7",
- "lcd_d8_pf0",
- "lcd_d9_pf1",
- "lcd_dc0_pn6",
- "lcd_dc1_pd2",
- "lcd_de_pj1",
- "lcd_hsync_pj3",
- "lcd_m1_pw1",
- "lcd_pclk_pb3",
- "lcd_pwr0_pb2",
- "lcd_pwr1_pc1",
- "lcd_pwr2_pc6",
- "lcd_sck_pz4",
- "lcd_sdin_pz2",
- "lcd_sdout_pn5",
- "lcd_vsync_pj4",
- "lcd_wr_n_pz3",
-};
-
-static const char * const dtv_groups[] = {
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
-};
-
-static const char * const extperiph1_groups[] = {
- "clk1_out_pw4",
-};
-
-static const char * const extperiph2_groups[] = {
- "clk2_out_pw5",
-};
-
-static const char * const extperiph3_groups[] = {
- "clk3_out_pee0",
-};
-
-static const char * const gmi_groups[] = {
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_fs_pn0",
- "dap1_sclk_pn3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_fs_pp4",
- "dap4_sclk_pp7",
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad10_ph2",
- "gmi_ad11_ph3",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_ad8_ph0",
- "gmi_ad9_ph1",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_cs4_n_pk2",
- "gmi_cs6_n_pi3",
- "gmi_cs7_n_pi6",
- "gmi_dqs_pi2",
- "gmi_iordy_pi5",
- "gmi_oe_n_pi1",
- "gmi_rst_n_pi4",
- "gmi_wait_pi7",
- "gmi_wp_n_pc7",
- "gmi_wr_n_pi0",
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
- "spi1_cs0_n_px6",
- "spi1_mosi_px4",
- "spi1_sck_px5",
- "spi2_cs0_n_px3",
- "spi2_miso_px1",
- "spi2_mosi_px0",
- "spi2_sck_px2",
- "uart2_cts_n_pj5",
- "uart2_rts_n_pj6",
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "uart3_rxd_pw7",
- "uart3_txd_pw6",
-};
-
-static const char * const gmi_alt_groups[] = {
- "gmi_a16_pj7",
- "gmi_cs3_n_pk4",
- "gmi_cs7_n_pi6",
- "gmi_wp_n_pc7",
-};
-
-static const char * const hda_groups[] = {
- "clk1_req_pee2",
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_fs_pn0",
- "dap1_sclk_pn3",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "pex_l0_clkreq_n_pdd2",
- "pex_l0_prsnt_n_pdd0",
- "pex_l0_rst_n_pdd1",
- "pex_l1_clkreq_n_pdd6",
- "pex_l1_prsnt_n_pdd4",
- "pex_l1_rst_n_pdd5",
- "pex_l2_clkreq_n_pcc7",
- "pex_l2_prsnt_n_pdd7",
- "pex_l2_rst_n_pcc6",
- "pex_wake_n_pdd3",
- "spdif_in_pk6",
-};
-
-static const char * const hdcp_groups[] = {
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "lcd_pwr0_pb2",
- "lcd_pwr2_pc6",
- "lcd_sck_pz4",
- "lcd_sdout_pn5",
- "lcd_wr_n_pz3",
-};
-
-static const char * const hdmi_groups[] = {
- "hdmi_int_pn7",
-};
-
-static const char * const hsi_groups[] = {
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-};
-
-static const char * const i2c1_groups[] = {
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "spdif_in_pk6",
- "spdif_out_pk5",
- "spi2_cs1_n_pw2",
- "spi2_cs2_n_pw3",
-};
-
-static const char * const i2c2_groups[] = {
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
-};
-
-static const char * const i2c3_groups[] = {
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat4_paa4",
-};
-
-static const char * const i2c4_groups[] = {
- "ddc_scl_pv4",
- "ddc_sda_pv5",
-};
-
-static const char * const i2cpwr_groups[] = {
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
-};
-
-static const char * const i2s0_groups[] = {
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_fs_pn0",
- "dap1_sclk_pn3",
-};
-
-static const char * const i2s1_groups[] = {
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
-};
-
-static const char * const i2s2_groups[] = {
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_fs_pp0",
- "dap3_sclk_pp3",
-};
-
-static const char * const i2s3_groups[] = {
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_fs_pp4",
- "dap4_sclk_pp7",
-};
-
-static const char * const i2s4_groups[] = {
- "pbb0",
- "pbb7",
- "pcc1",
- "pcc2",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
-};
-
-static const char * const invalid_groups[] = {
- "kb_row3_pr3",
- "sdmmc4_clk_pcc4",
-};
-
-static const char * const kbc_groups[] = {
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
-};
-
-static const char * const mio_groups[] = {
- "kb_col6_pq6",
- "kb_col7_pq7",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
-};
-
-static const char * const nand_groups[] = {
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad10_ph2",
- "gmi_ad11_ph3",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_ad8_ph0",
- "gmi_ad9_ph1",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_cs4_n_pk2",
- "gmi_cs6_n_pi3",
- "gmi_cs7_n_pi6",
- "gmi_dqs_pi2",
- "gmi_iordy_pi5",
- "gmi_oe_n_pi1",
- "gmi_rst_n_pi4",
- "gmi_wait_pi7",
- "gmi_wp_n_pc7",
- "gmi_wr_n_pi0",
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "kb_row4_pr4",
- "kb_row5_pr5",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
-};
-
-static const char * const nand_alt_groups[] = {
- "gmi_cs6_n_pi3",
- "gmi_cs7_n_pi6",
- "gmi_rst_n_pi4",
-};
-
-static const char * const owr_groups[] = {
- "pu0",
- "pv2",
- "kb_row5_pr5",
- "owr",
-};
-
-static const char * const pcie_groups[] = {
- "pex_l0_clkreq_n_pdd2",
- "pex_l0_prsnt_n_pdd0",
- "pex_l0_rst_n_pdd1",
- "pex_l1_clkreq_n_pdd6",
- "pex_l1_prsnt_n_pdd4",
- "pex_l1_rst_n_pdd5",
- "pex_l2_clkreq_n_pcc7",
- "pex_l2_prsnt_n_pdd7",
- "pex_l2_rst_n_pcc6",
- "pex_wake_n_pdd3",
-};
-
-static const char * const pwm0_groups[] = {
- "gmi_ad8_ph0",
- "pu3",
- "sdmmc3_dat3_pb4",
- "sdmmc3_dat5_pd0",
- "uart3_rts_n_pc0",
-};
-
-static const char * const pwm1_groups[] = {
- "gmi_ad9_ph1",
- "pu4",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat4_pd1",
-};
-
-static const char * const pwm2_groups[] = {
- "gmi_ad10_ph2",
- "pu5",
- "sdmmc3_clk_pa6",
-};
-
-static const char * const pwm3_groups[] = {
- "gmi_ad11_ph3",
- "pu6",
- "sdmmc3_cmd_pa7",
-};
-
-static const char * const pwr_int_n_groups[] = {
- "pwr_int_n",
-};
-
-static const char * const rsvd1_groups[] = {
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs0_n_pj0",
- "gmi_cs1_n_pj2",
- "gmi_cs2_n_pk3",
- "gmi_cs3_n_pk4",
- "gmi_cs4_n_pk2",
- "gmi_dqs_pi2",
- "gmi_iordy_pi5",
- "gmi_oe_n_pi1",
- "gmi_wait_pi7",
- "gmi_wp_n_pc7",
- "gmi_wr_n_pi0",
- "pu1",
- "pu2",
- "pv0",
- "pv1",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
- "vi_pclk_pt0",
-};
-
-static const char * const rsvd2_groups[] = {
- "clk1_out_pw4",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "clk_32k_in",
- "clk_32k_out_pa0",
- "core_pwr_req",
- "cpu_pwr_req",
- "crt_hsync_pv6",
- "crt_vsync_pv7",
- "dap3_din_pp1",
- "dap3_dout_pp2",
- "dap3_fs_pp0",
- "dap3_sclk_pp3",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_fs_pp4",
- "dap4_sclk_pp7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "pbb0",
- "pbb7",
- "pcc1",
- "pcc2",
- "pv0",
- "pv1",
- "pv2",
- "pv3",
- "hdmi_cec_pee3",
- "hdmi_int_pn7",
- "jtag_rtck_pu7",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "pwr_int_n",
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat0_py7",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat3_py4",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc4_rst_n_pcc3",
- "spdif_out_pk5",
- "sys_clk_req_pz5",
- "uart3_cts_n_pa1",
- "uart3_rxd_pw7",
- "uart3_txd_pw6",
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
- "vi_d0_pt4",
- "vi_d10_pt2",
- "vi_d11_pt3",
- "vi_hsync_pd7",
- "vi_vsync_pd6",
-};
-
-static const char * const rsvd3_groups[] = {
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "clk1_out_pw4",
- "clk1_req_pee2",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "clk_32k_in",
- "clk_32k_out_pa0",
- "core_pwr_req",
- "cpu_pwr_req",
- "crt_hsync_pv6",
- "crt_vsync_pv7",
- "dap2_din_pa4",
- "dap2_dout_pa5",
- "dap2_fs_pa2",
- "dap2_sclk_pa3",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "pbb0",
- "pbb7",
- "pcc1",
- "pcc2",
- "pv0",
- "pv1",
- "pv2",
- "pv3",
- "hdmi_cec_pee3",
- "hdmi_int_pn7",
- "jtag_rtck_pu7",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row3_pr3",
- "lcd_d0_pe0",
- "lcd_d1_pe1",
- "lcd_d10_pf2",
- "lcd_d11_pf3",
- "lcd_d12_pf4",
- "lcd_d13_pf5",
- "lcd_d14_pf6",
- "lcd_d15_pf7",
- "lcd_d16_pm0",
- "lcd_d17_pm1",
- "lcd_d18_pm2",
- "lcd_d19_pm3",
- "lcd_d2_pe2",
- "lcd_d20_pm4",
- "lcd_d21_pm5",
- "lcd_d22_pm6",
- "lcd_d23_pm7",
- "lcd_d3_pe3",
- "lcd_d4_pe4",
- "lcd_d5_pe5",
- "lcd_d6_pe6",
- "lcd_d7_pe7",
- "lcd_d8_pf0",
- "lcd_d9_pf1",
- "lcd_dc0_pn6",
- "lcd_dc1_pd2",
- "lcd_de_pj1",
- "lcd_hsync_pj3",
- "lcd_m1_pw1",
- "lcd_pclk_pb3",
- "lcd_pwr1_pc1",
- "lcd_vsync_pj4",
- "owr",
- "pex_l0_clkreq_n_pdd2",
- "pex_l0_prsnt_n_pdd0",
- "pex_l0_rst_n_pdd1",
- "pex_l1_clkreq_n_pdd6",
- "pex_l1_prsnt_n_pdd4",
- "pex_l1_rst_n_pdd5",
- "pex_l2_clkreq_n_pcc7",
- "pex_l2_prsnt_n_pdd7",
- "pex_l2_rst_n_pcc6",
- "pex_wake_n_pdd3",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "pwr_int_n",
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc4_rst_n_pcc3",
- "sys_clk_req_pz5",
-};
-
-static const char * const rsvd4_groups[] = {
- "clk1_out_pw4",
- "clk1_req_pee2",
- "clk2_out_pw5",
- "clk2_req_pcc5",
- "clk3_out_pee0",
- "clk3_req_pee1",
- "clk_32k_in",
- "clk_32k_out_pa0",
- "core_pwr_req",
- "cpu_pwr_req",
- "crt_hsync_pv6",
- "crt_vsync_pv7",
- "dap4_din_pp5",
- "dap4_dout_pp6",
- "dap4_fs_pp4",
- "dap4_sclk_pp7",
- "ddc_scl_pv4",
- "ddc_sda_pv5",
- "gen1_i2c_scl_pc4",
- "gen1_i2c_sda_pc5",
- "gen2_i2c_scl_pt5",
- "gen2_i2c_sda_pt6",
- "gmi_a19_pk7",
- "gmi_ad0_pg0",
- "gmi_ad1_pg1",
- "gmi_ad10_ph2",
- "gmi_ad11_ph3",
- "gmi_ad12_ph4",
- "gmi_ad13_ph5",
- "gmi_ad14_ph6",
- "gmi_ad15_ph7",
- "gmi_ad2_pg2",
- "gmi_ad3_pg3",
- "gmi_ad4_pg4",
- "gmi_ad5_pg5",
- "gmi_ad6_pg6",
- "gmi_ad7_pg7",
- "gmi_ad8_ph0",
- "gmi_ad9_ph1",
- "gmi_adv_n_pk0",
- "gmi_clk_pk1",
- "gmi_cs2_n_pk3",
- "gmi_cs4_n_pk2",
- "gmi_dqs_pi2",
- "gmi_iordy_pi5",
- "gmi_oe_n_pi1",
- "gmi_rst_n_pi4",
- "gmi_wait_pi7",
- "gmi_wr_n_pi0",
- "pcc2",
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
- "pv0",
- "pv1",
- "pv2",
- "pv3",
- "hdmi_cec_pee3",
- "hdmi_int_pn7",
- "jtag_rtck_pu7",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_row0_pr0",
- "kb_row1_pr1",
- "kb_row2_pr2",
- "kb_row4_pr4",
- "lcd_cs0_n_pn4",
- "lcd_cs1_n_pw0",
- "lcd_d0_pe0",
- "lcd_d1_pe1",
- "lcd_d10_pf2",
- "lcd_d11_pf3",
- "lcd_d12_pf4",
- "lcd_d13_pf5",
- "lcd_d14_pf6",
- "lcd_d15_pf7",
- "lcd_d16_pm0",
- "lcd_d17_pm1",
- "lcd_d18_pm2",
- "lcd_d19_pm3",
- "lcd_d2_pe2",
- "lcd_d20_pm4",
- "lcd_d21_pm5",
- "lcd_d22_pm6",
- "lcd_d23_pm7",
- "lcd_d3_pe3",
- "lcd_d4_pe4",
- "lcd_d5_pe5",
- "lcd_d6_pe6",
- "lcd_d7_pe7",
- "lcd_d8_pf0",
- "lcd_d9_pf1",
- "lcd_dc0_pn6",
- "lcd_dc1_pd2",
- "lcd_de_pj1",
- "lcd_hsync_pj3",
- "lcd_m1_pw1",
- "lcd_pclk_pb3",
- "lcd_pwr1_pc1",
- "lcd_sdin_pz2",
- "lcd_vsync_pj4",
- "owr",
- "pex_l0_clkreq_n_pdd2",
- "pex_l0_prsnt_n_pdd0",
- "pex_l0_rst_n_pdd1",
- "pex_l1_clkreq_n_pdd6",
- "pex_l1_prsnt_n_pdd4",
- "pex_l1_rst_n_pdd5",
- "pex_l2_clkreq_n_pcc7",
- "pex_l2_prsnt_n_pdd7",
- "pex_l2_rst_n_pcc6",
- "pex_wake_n_pdd3",
- "pwr_i2c_scl_pz6",
- "pwr_i2c_sda_pz7",
- "pwr_int_n",
- "spi1_miso_px7",
- "sys_clk_req_pz5",
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "uart3_rxd_pw7",
- "uart3_txd_pw6",
- "vi_d0_pt4",
- "vi_d1_pd5",
- "vi_d10_pt2",
- "vi_d11_pt3",
- "vi_d2_pl0",
- "vi_d3_pl1",
- "vi_d4_pl2",
- "vi_d5_pl3",
- "vi_d6_pl4",
- "vi_d7_pl5",
- "vi_d8_pl6",
- "vi_d9_pl7",
- "vi_hsync_pd7",
- "vi_pclk_pt0",
- "vi_vsync_pd6",
-};
-
-static const char * const rtck_groups[] = {
- "jtag_rtck_pu7",
-};
-
-static const char * const sata_groups[] = {
- "gmi_cs6_n_pi3",
-};
-
-static const char * const sdmmc1_groups[] = {
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat0_py7",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat3_py4",
-};
-
-static const char * const sdmmc2_groups[] = {
- "dap1_din_pn1",
- "dap1_dout_pn2",
- "dap1_fs_pn0",
- "dap1_sclk_pn3",
- "kb_row10_ps2",
- "kb_row11_ps3",
- "kb_row12_ps4",
- "kb_row13_ps5",
- "kb_row14_ps6",
- "kb_row15_ps7",
- "kb_row6_pr6",
- "kb_row7_pr7",
- "kb_row8_ps0",
- "kb_row9_ps1",
- "spdif_in_pk6",
- "spdif_out_pk5",
- "vi_d1_pd5",
- "vi_d2_pl0",
- "vi_d3_pl1",
- "vi_d4_pl2",
- "vi_d5_pl3",
- "vi_d6_pl4",
- "vi_d7_pl5",
- "vi_d8_pl6",
- "vi_d9_pl7",
- "vi_pclk_pt0",
-};
-
-static const char * const sdmmc3_groups[] = {
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
- "sdmmc3_dat4_pd1",
- "sdmmc3_dat5_pd0",
- "sdmmc3_dat6_pd3",
- "sdmmc3_dat7_pd4",
-};
-
-static const char * const sdmmc4_groups[] = {
- "cam_i2c_scl_pbb1",
- "cam_i2c_sda_pbb2",
- "cam_mclk_pcc0",
- "pbb0",
- "pbb3",
- "pbb4",
- "pbb5",
- "pbb6",
- "pbb7",
- "pcc1",
- "sdmmc4_clk_pcc4",
- "sdmmc4_cmd_pt7",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "sdmmc4_dat4_paa4",
- "sdmmc4_dat5_paa5",
- "sdmmc4_dat6_paa6",
- "sdmmc4_dat7_paa7",
- "sdmmc4_rst_n_pcc3",
-};
-
-static const char * const spdif_groups[] = {
- "sdmmc3_dat6_pd3",
- "sdmmc3_dat7_pd4",
- "spdif_in_pk6",
- "spdif_out_pk5",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
-};
-
-static const char * const spi1_groups[] = {
- "spi1_cs0_n_px6",
- "spi1_miso_px7",
- "spi1_mosi_px4",
- "spi1_sck_px5",
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
-};
-
-static const char * const spi2_groups[] = {
- "sdmmc3_cmd_pa7",
- "sdmmc3_dat4_pd1",
- "sdmmc3_dat5_pd0",
- "sdmmc3_dat6_pd3",
- "sdmmc3_dat7_pd4",
- "spi1_cs0_n_px6",
- "spi1_mosi_px4",
- "spi1_sck_px5",
- "spi2_cs0_n_px3",
- "spi2_cs1_n_pw2",
- "spi2_cs2_n_pw3",
- "spi2_miso_px1",
- "spi2_mosi_px0",
- "spi2_sck_px2",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-};
-
-static const char * const spi2_alt_groups[] = {
- "spi1_cs0_n_px6",
- "spi1_miso_px7",
- "spi1_mosi_px4",
- "spi1_sck_px5",
- "spi2_cs1_n_pw2",
- "spi2_cs2_n_pw3",
-};
-
-static const char * const spi3_groups[] = {
- "sdmmc3_clk_pa6",
- "sdmmc3_dat0_pb7",
- "sdmmc3_dat1_pb6",
- "sdmmc3_dat2_pb5",
- "sdmmc3_dat3_pb4",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
- "spi1_miso_px7",
- "spi2_cs0_n_px3",
- "spi2_cs1_n_pw2",
- "spi2_cs2_n_pw3",
- "spi2_miso_px1",
- "spi2_mosi_px0",
- "spi2_sck_px2",
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
-};
-
-static const char * const spi4_groups[] = {
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
- "sdmmc3_dat4_pd1",
- "sdmmc3_dat5_pd0",
- "sdmmc3_dat6_pd3",
- "sdmmc3_dat7_pd4",
- "uart2_cts_n_pj5",
- "uart2_rts_n_pj6",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
-};
-
-static const char * const spi5_groups[] = {
- "lcd_cs0_n_pn4",
- "lcd_cs1_n_pw0",
- "lcd_pwr0_pb2",
- "lcd_pwr2_pc6",
- "lcd_sck_pz4",
- "lcd_sdin_pz2",
- "lcd_sdout_pn5",
- "lcd_wr_n_pz3",
-};
-
-static const char * const spi6_groups[] = {
- "spi2_cs0_n_px3",
- "spi2_miso_px1",
- "spi2_mosi_px0",
- "spi2_sck_px2",
-};
-
-static const char * const sysclk_groups[] = {
- "sys_clk_req_pz5",
-};
-
-static const char * const test_groups[] = {
- "kb_col0_pq0",
- "kb_col1_pq1",
-};
-
-static const char * const trace_groups[] = {
- "kb_col0_pq0",
- "kb_col1_pq1",
- "kb_col2_pq2",
- "kb_col3_pq3",
- "kb_col4_pq4",
- "kb_col5_pq5",
- "kb_col6_pq6",
- "kb_col7_pq7",
- "kb_row4_pr4",
- "kb_row5_pr5",
-};
-
-static const char * const uarta_groups[] = {
- "pu0",
- "pu1",
- "pu2",
- "pu3",
- "pu4",
- "pu5",
- "pu6",
- "sdmmc1_clk_pz0",
- "sdmmc1_cmd_pz1",
- "sdmmc1_dat0_py7",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat3_py4",
- "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7",
- "uart2_cts_n_pj5",
- "uart2_rts_n_pj6",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
-};
-
-static const char * const uartb_groups[] = {
- "uart2_cts_n_pj5",
- "uart2_rts_n_pj6",
- "uart2_rxd_pc3",
- "uart2_txd_pc2",
-};
-
-static const char * const uartc_groups[] = {
- "uart3_cts_n_pa1",
- "uart3_rts_n_pc0",
- "uart3_rxd_pw7",
- "uart3_txd_pw6",
-};
-
-static const char * const uartd_groups[] = {
- "gmi_a16_pj7",
- "gmi_a17_pb0",
- "gmi_a18_pb1",
- "gmi_a19_pk7",
- "ulpi_clk_py0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
-};
-
-static const char * const uarte_groups[] = {
- "sdmmc1_dat0_py7",
- "sdmmc1_dat1_py6",
- "sdmmc1_dat2_py5",
- "sdmmc1_dat3_py4",
- "sdmmc4_dat0_paa0",
- "sdmmc4_dat1_paa1",
- "sdmmc4_dat2_paa2",
- "sdmmc4_dat3_paa3",
-};
-
-static const char * const ulpi_groups[] = {
- "ulpi_clk_py0",
- "ulpi_data0_po1",
- "ulpi_data1_po2",
- "ulpi_data2_po3",
- "ulpi_data3_po4",
- "ulpi_data4_po5",
- "ulpi_data5_po6",
- "ulpi_data6_po7",
- "ulpi_data7_po0",
- "ulpi_dir_py1",
- "ulpi_nxt_py2",
- "ulpi_stp_py3",
-};
-
-static const char * const vgp1_groups[] = {
- "cam_i2c_scl_pbb1",
-};
-
-static const char * const vgp2_groups[] = {
- "cam_i2c_sda_pbb2",
-};
-
-static const char * const vgp3_groups[] = {
- "pbb3",
- "sdmmc4_dat5_paa5",
-};
-
-static const char * const vgp4_groups[] = {
- "pbb4",
- "sdmmc4_dat6_paa6",
-};
-
-static const char * const vgp5_groups[] = {
- "pbb5",
- "sdmmc4_dat7_paa7",
-};
-
-static const char * const vgp6_groups[] = {
- "pbb6",
- "sdmmc4_rst_n_pcc3",
-};
-
-static const char * const vi_groups[] = {
- "cam_mclk_pcc0",
- "vi_d0_pt4",
- "vi_d1_pd5",
- "vi_d10_pt2",
- "vi_d11_pt3",
- "vi_d2_pl0",
- "vi_d3_pl1",
- "vi_d4_pl2",
- "vi_d5_pl3",
- "vi_d6_pl4",
- "vi_d7_pl5",
- "vi_d8_pl6",
- "vi_d9_pl7",
- "vi_hsync_pd7",
- "vi_mclk_pt1",
- "vi_pclk_pt0",
- "vi_vsync_pd6",
-};
-
-static const char * const vi_alt1_groups[] = {
- "cam_mclk_pcc0",
- "vi_mclk_pt1",
-};
-
-static const char * const vi_alt2_groups[] = {
- "vi_mclk_pt1",
-};
-
-static const char * const vi_alt3_groups[] = {
- "cam_mclk_pcc0",
- "vi_mclk_pt1",
-};
#define FUNCTION(fname) \
{ \
.name = #fname, \
- .groups = fname##_groups, \
- .ngroups = ARRAY_SIZE(fname##_groups), \
}
-static const struct tegra_function tegra30_functions[] = {
+static struct tegra_function tegra30_functions[] = {
FUNCTION(blink),
FUNCTION(cec),
FUNCTION(clk_12m_out),
@@ -3345,11 +2105,11 @@ static const struct tegra_function tegra30_functions[] = {
FUNCTION(vi_alt3),
};
-#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
-#define PINGROUP_REG_A 0x3000 /* bank 1 */
+#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
+#define PINGROUP_REG_A 0x3000 /* bank 1 */
-#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
-#define PINGROUP_REG_N(r) -1
+#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
+#define PINGROUP_REG_N(r) -1
#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior) \
{ \
@@ -3357,12 +2117,12 @@ static const struct tegra_function tegra30_functions[] = {
.pins = pg_name##_pins, \
.npins = ARRAY_SIZE(pg_name##_pins), \
.funcs = { \
- TEGRA_MUX_ ## f0, \
- TEGRA_MUX_ ## f1, \
- TEGRA_MUX_ ## f2, \
- TEGRA_MUX_ ## f3, \
+ TEGRA_MUX_##f0, \
+ TEGRA_MUX_##f1, \
+ TEGRA_MUX_##f2, \
+ TEGRA_MUX_##f3, \
}, \
- .func_safe = TEGRA_MUX_ ## f_safe, \
+ .func_safe = TEGRA_MUX_##f_safe, \
.mux_reg = PINGROUP_REG_Y(r), \
.mux_bank = 1, \
.mux_bit = 0, \
@@ -3389,6 +2149,9 @@ static const struct tegra_function tegra30_functions[] = {
.drvtype_reg = -1, \
}
+#define DRV_PINGROUP_REG_Y(r) ((r) - DRV_PINGROUP_REG_A)
+#define DRV_PINGROUP_REG_N(r) -1
+
#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b, \
drvdn_b, drvdn_w, drvup_b, drvup_w, \
slwr_b, slwr_w, slwf_b, slwf_w) \
@@ -3404,7 +2167,7 @@ static const struct tegra_function tegra30_functions[] = {
.lock_reg = -1, \
.ioreset_reg = -1, \
.rcv_sel_reg = -1, \
- .drv_reg = ((r) - DRV_PINGROUP_REG_A), \
+ .drv_reg = DRV_PINGROUP_REG_Y(r), \
.drv_bank = 0, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
@@ -3422,7 +2185,6 @@ static const struct tegra_function tegra30_functions[] = {
static const struct tegra_pingroup tegra30_groups[] = {
/* pg_name, f0, f1, f2, f3, safe, r, od, ior */
- /* FIXME: Fill in correct data in safe column */
PINGROUP(clk_32k_out_pa0, BLINK, RSVD2, RSVD3, RSVD4, RSVD4, 0x331c, N, N),
PINGROUP(uart3_cts_n_pa1, UARTC, RSVD2, GMI, RSVD4, RSVD4, 0x317c, N, N),
PINGROUP(dap2_fs_pa2, I2S1, HDA, RSVD3, GMI, RSVD3, 0x3358, N, N),
@@ -3735,6 +2497,7 @@ static struct of_device_id tegra30_pinctrl_of_match[] = {
{ .compatible = "nvidia,tegra30-pinmux", },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra30_pinctrl_of_match);
static struct platform_driver tegra30_pinctrl_driver = {
.driver = {
@@ -3745,20 +2508,8 @@ static struct platform_driver tegra30_pinctrl_driver = {
.probe = tegra30_pinctrl_probe,
.remove = tegra_pinctrl_remove,
};
-
-static int __init tegra30_pinctrl_init(void)
-{
- return platform_driver_register(&tegra30_pinctrl_driver);
-}
-arch_initcall(tegra30_pinctrl_init);
-
-static void __exit tegra30_pinctrl_exit(void)
-{
- platform_driver_unregister(&tegra30_pinctrl_driver);
-}
-module_exit(tegra30_pinctrl_exit);
+module_platform_driver(tegra30_pinctrl_driver);
MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra30 pinctrl driver");
MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, tegra30_pinctrl_of_match);
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index c381ae63c508..48093719167a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -2260,6 +2260,42 @@ static const unsigned int msiof0_tx_pins[] = {
static const unsigned int msiof0_tx_mux[] = {
MSIOF0_TXD_MARK,
};
+
+static const unsigned int msiof0_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof0_clk_b_mux[] = {
+ MSIOF0_SCK_B_MARK,
+};
+static const unsigned int msiof0_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 12),
+};
+static const unsigned int msiof0_ss1_b_mux[] = {
+ MSIOF0_SS1_B_MARK,
+};
+static const unsigned int msiof0_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof0_ss2_b_mux[] = {
+ MSIOF0_SS2_B_MARK,
+};
+static const unsigned int msiof0_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 29),
+};
+static const unsigned int msiof0_rx_b_mux[] = {
+ MSIOF0_RXD_B_MARK,
+};
+static const unsigned int msiof0_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 28),
+};
+static const unsigned int msiof0_tx_b_mux[] = {
+ MSIOF0_TXD_B_MARK,
+};
/* - MSIOF1 ----------------------------------------------------------------- */
static const unsigned int msiof1_clk_pins[] = {
/* SCK */
@@ -2303,6 +2339,42 @@ static const unsigned int msiof1_tx_pins[] = {
static const unsigned int msiof1_tx_mux[] = {
MSIOF1_TXD_MARK,
};
+
+static const unsigned int msiof1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 16),
+};
+static const unsigned int msiof1_clk_b_mux[] = {
+ MSIOF1_SCK_B_MARK,
+};
+static const unsigned int msiof1_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof1_ss1_b_mux[] = {
+ MSIOF1_SS1_B_MARK,
+};
+static const unsigned int msiof1_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 19),
+};
+static const unsigned int msiof1_ss2_b_mux[] = {
+ MSIOF1_SS2_B_MARK,
+};
+static const unsigned int msiof1_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int msiof1_rx_b_mux[] = {
+ MSIOF1_RXD_B_MARK,
+};
+static const unsigned int msiof1_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 20),
+};
+static const unsigned int msiof1_tx_b_mux[] = {
+ MSIOF1_TXD_B_MARK,
+};
/* - MSIOF2 ----------------------------------------------------------------- */
static const unsigned int msiof2_clk_pins[] = {
/* SCK */
@@ -2389,6 +2461,58 @@ static const unsigned int msiof3_tx_pins[] = {
static const unsigned int msiof3_tx_mux[] = {
MSIOF3_TXD_MARK,
};
+
+static const unsigned int msiof3_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof3_clk_b_mux[] = {
+ MSIOF3_SCK_B_MARK,
+};
+static const unsigned int msiof3_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_sync_b_mux[] = {
+ MSIOF3_SYNC_B_MARK,
+};
+static const unsigned int msiof3_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_rx_b_mux[] = {
+ MSIOF3_RXD_B_MARK,
+};
+static const unsigned int msiof3_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_tx_b_mux[] = {
+ MSIOF3_TXD_B_MARK,
+};
+/* - QSPI ------------------------------------------------------------------- */
+static const unsigned int qspi_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int qspi_ctrl_mux[] = {
+ SPCLK_MARK, SSL_MARK,
+};
+static const unsigned int qspi_data2_pins[] = {
+ /* MOSI_IO0, MISO_IO1 */
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int qspi_data2_mux[] = {
+ MOSI_IO0_MARK, MISO_IO1_MARK,
+};
+static const unsigned int qspi_data4_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int qspi_data4_mux[] = {
+ MOSI_IO0_MARK, MISO_IO1_MARK, IO2_MARK, IO3_MARK,
+};
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -3231,6 +3355,13 @@ static const unsigned int usb0_pins[] = {
static const unsigned int usb0_mux[] = {
USB0_PWEN_MARK, USB0_OVC_VBUS_MARK,
};
+static const unsigned int usb0_ovc_vbus_pins[] = {
+ /* OVC/VBUS */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int usb0_ovc_vbus_mux[] = {
+ USB0_OVC_VBUS_MARK,
+};
/* - USB1 ------------------------------------------------------------------- */
static const unsigned int usb1_pins[] = {
/* PWEN, OVC */
@@ -3653,12 +3784,22 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(msiof0_ss2),
SH_PFC_PIN_GROUP(msiof0_rx),
SH_PFC_PIN_GROUP(msiof0_tx),
+ SH_PFC_PIN_GROUP(msiof0_clk_b),
+ SH_PFC_PIN_GROUP(msiof0_ss1_b),
+ SH_PFC_PIN_GROUP(msiof0_ss2_b),
+ SH_PFC_PIN_GROUP(msiof0_rx_b),
+ SH_PFC_PIN_GROUP(msiof0_tx_b),
SH_PFC_PIN_GROUP(msiof1_clk),
SH_PFC_PIN_GROUP(msiof1_sync),
SH_PFC_PIN_GROUP(msiof1_ss1),
SH_PFC_PIN_GROUP(msiof1_ss2),
SH_PFC_PIN_GROUP(msiof1_rx),
SH_PFC_PIN_GROUP(msiof1_tx),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_rx_b),
+ SH_PFC_PIN_GROUP(msiof1_tx_b),
SH_PFC_PIN_GROUP(msiof2_clk),
SH_PFC_PIN_GROUP(msiof2_sync),
SH_PFC_PIN_GROUP(msiof2_ss1),
@@ -3671,6 +3812,13 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(msiof3_ss2),
SH_PFC_PIN_GROUP(msiof3_rx),
SH_PFC_PIN_GROUP(msiof3_tx),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_rx_b),
+ SH_PFC_PIN_GROUP(msiof3_tx_b),
+ SH_PFC_PIN_GROUP(qspi_ctrl),
+ SH_PFC_PIN_GROUP(qspi_data2),
+ SH_PFC_PIN_GROUP(qspi_data4),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -3789,6 +3937,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(tpu0_to2),
SH_PFC_PIN_GROUP(tpu0_to3),
SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb0_ovc_vbus),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb2),
VIN_DATA_PIN_GROUP(vin0_data, 24),
@@ -3941,6 +4090,11 @@ static const char * const msiof0_groups[] = {
"msiof0_ss2",
"msiof0_rx",
"msiof0_tx",
+ "msiof0_clk_b",
+ "msiof0_ss1_b",
+ "msiof0_ss2_b",
+ "msiof0_rx_b",
+ "msiof0_tx_b",
};
static const char * const msiof1_groups[] = {
@@ -3950,6 +4104,11 @@ static const char * const msiof1_groups[] = {
"msiof1_ss2",
"msiof1_rx",
"msiof1_tx",
+ "msiof1_clk_b",
+ "msiof1_ss1_b",
+ "msiof1_ss2_b",
+ "msiof1_rx_b",
+ "msiof1_tx_b",
};
static const char * const msiof2_groups[] = {
@@ -3968,6 +4127,16 @@ static const char * const msiof3_groups[] = {
"msiof3_ss2",
"msiof3_rx",
"msiof3_tx",
+ "msiof3_clk_b",
+ "msiof3_sync_b",
+ "msiof3_rx_b",
+ "msiof3_tx_b",
+};
+
+static const char * const qspi_groups[] = {
+ "qspi_ctrl",
+ "qspi_data2",
+ "qspi_data4",
};
static const char * const scif0_groups[] = {
@@ -4134,6 +4303,7 @@ static const char * const tpu0_groups[] = {
static const char * const usb0_groups[] = {
"usb0",
+ "usb0_ovc_vbus",
};
static const char * const usb1_groups[] = {
@@ -4213,6 +4383,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(qspi),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 77d103fe39d9..5186d70c49d4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -89,7 +89,8 @@ enum {
/* GPSR6 */
FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14,
- FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, FN_IP13_22, FN_IP13_24_23,
+ FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19,
+ FN_IP13_22, FN_IP13_24_23, FN_SD1_CLK,
FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0,
FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7,
FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17,
@@ -788,6 +789,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN),
PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC),
PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN),
+ PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK),
/* IPSR0 */
PINMUX_IPSR_DATA(IP0_0, D0),
@@ -1943,6 +1945,50 @@ static const unsigned int i2c4_c_pins[] = {
static const unsigned int i2c4_c_mux[] = {
SCL4_C_MARK, SDA4_C_MARK,
};
+/* - I2C7 ------------------------------------------------------------------- */
+static const unsigned int i2c7_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int i2c7_mux[] = {
+ SCL7_MARK, SDA7_MARK,
+};
+static const unsigned int i2c7_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+};
+static const unsigned int i2c7_b_mux[] = {
+ SCL7_B_MARK, SDA7_B_MARK,
+};
+static const unsigned int i2c7_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int i2c7_c_mux[] = {
+ SCL7_C_MARK, SDA7_C_MARK,
+};
+/* - I2C8 ------------------------------------------------------------------- */
+static const unsigned int i2c8_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+};
+static const unsigned int i2c8_mux[] = {
+ SCL8_MARK, SDA8_MARK,
+};
+static const unsigned int i2c8_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
+};
+static const unsigned int i2c8_b_mux[] = {
+ SCL8_B_MARK, SDA8_B_MARK,
+};
+static const unsigned int i2c8_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(6, 22), RCAR_GP_PIN(6, 23),
+};
+static const unsigned int i2c8_c_mux[] = {
+ SCL8_C_MARK, SDA8_C_MARK,
+};
/* - INTC ------------------------------------------------------------------- */
static const unsigned int intc_irq0_pins[] = {
/* IRQ */
@@ -2049,6 +2095,92 @@ static const unsigned int msiof0_tx_pins[] = {
static const unsigned int msiof0_tx_mux[] = {
MSIOF0_TXD_MARK,
};
+
+static const unsigned int msiof0_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 16),
+};
+static const unsigned int msiof0_clk_b_mux[] = {
+ MSIOF0_SCK_B_MARK,
+};
+static const unsigned int msiof0_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 17),
+};
+static const unsigned int msiof0_sync_b_mux[] = {
+ MSIOF0_SYNC_B_MARK,
+};
+static const unsigned int msiof0_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof0_ss1_b_mux[] = {
+ MSIOF0_SS1_B_MARK,
+};
+static const unsigned int msiof0_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 19),
+};
+static const unsigned int msiof0_ss2_b_mux[] = {
+ MSIOF0_SS2_B_MARK,
+};
+static const unsigned int msiof0_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 21),
+};
+static const unsigned int msiof0_rx_b_mux[] = {
+ MSIOF0_RXD_B_MARK,
+};
+static const unsigned int msiof0_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 20),
+};
+static const unsigned int msiof0_tx_b_mux[] = {
+ MSIOF0_TXD_B_MARK,
+};
+
+static const unsigned int msiof0_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 26),
+};
+static const unsigned int msiof0_clk_c_mux[] = {
+ MSIOF0_SCK_C_MARK,
+};
+static const unsigned int msiof0_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 25),
+};
+static const unsigned int msiof0_sync_c_mux[] = {
+ MSIOF0_SYNC_C_MARK,
+};
+static const unsigned int msiof0_ss1_c_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 27),
+};
+static const unsigned int msiof0_ss1_c_mux[] = {
+ MSIOF0_SS1_C_MARK,
+};
+static const unsigned int msiof0_ss2_c_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 28),
+};
+static const unsigned int msiof0_ss2_c_mux[] = {
+ MSIOF0_SS2_C_MARK,
+};
+static const unsigned int msiof0_rx_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 29),
+};
+static const unsigned int msiof0_rx_c_mux[] = {
+ MSIOF0_RXD_C_MARK,
+};
+static const unsigned int msiof0_tx_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 30),
+};
+static const unsigned int msiof0_tx_c_mux[] = {
+ MSIOF0_TXD_C_MARK,
+};
/* - MSIOF1 ----------------------------------------------------------------- */
static const unsigned int msiof1_clk_pins[] = {
/* SCK */
@@ -2092,6 +2224,143 @@ static const unsigned int msiof1_tx_pins[] = {
static const unsigned int msiof1_tx_mux[] = {
MSIOF1_TXD_MARK,
};
+
+static const unsigned int msiof1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 29),
+};
+static const unsigned int msiof1_clk_b_mux[] = {
+ MSIOF1_SCK_B_MARK,
+};
+static const unsigned int msiof1_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 30),
+};
+static const unsigned int msiof1_sync_b_mux[] = {
+ MSIOF1_SYNC_B_MARK,
+};
+static const unsigned int msiof1_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 31),
+};
+static const unsigned int msiof1_ss1_b_mux[] = {
+ MSIOF1_SS1_B_MARK,
+};
+static const unsigned int msiof1_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(7, 16),
+};
+static const unsigned int msiof1_ss2_b_mux[] = {
+ MSIOF1_SS2_B_MARK,
+};
+static const unsigned int msiof1_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(7, 18),
+};
+static const unsigned int msiof1_rx_b_mux[] = {
+ MSIOF1_RXD_B_MARK,
+};
+static const unsigned int msiof1_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(7, 17),
+};
+static const unsigned int msiof1_tx_b_mux[] = {
+ MSIOF1_TXD_B_MARK,
+};
+
+static const unsigned int msiof1_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int msiof1_clk_c_mux[] = {
+ MSIOF1_SCK_C_MARK,
+};
+static const unsigned int msiof1_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int msiof1_sync_c_mux[] = {
+ MSIOF1_SYNC_C_MARK,
+};
+static const unsigned int msiof1_rx_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 18),
+};
+static const unsigned int msiof1_rx_c_mux[] = {
+ MSIOF1_RXD_C_MARK,
+};
+static const unsigned int msiof1_tx_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 17),
+};
+static const unsigned int msiof1_tx_c_mux[] = {
+ MSIOF1_TXD_C_MARK,
+};
+
+static const unsigned int msiof1_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 28),
+};
+static const unsigned int msiof1_clk_d_mux[] = {
+ MSIOF1_SCK_D_MARK,
+};
+static const unsigned int msiof1_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 30),
+};
+static const unsigned int msiof1_sync_d_mux[] = {
+ MSIOF1_SYNC_D_MARK,
+};
+static const unsigned int msiof1_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 29),
+};
+static const unsigned int msiof1_ss1_d_mux[] = {
+ MSIOF1_SS1_D_MARK,
+};
+static const unsigned int msiof1_rx_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 27),
+};
+static const unsigned int msiof1_rx_d_mux[] = {
+ MSIOF1_RXD_D_MARK,
+};
+static const unsigned int msiof1_tx_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 26),
+};
+static const unsigned int msiof1_tx_d_mux[] = {
+ MSIOF1_TXD_D_MARK,
+};
+
+static const unsigned int msiof1_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int msiof1_clk_e_mux[] = {
+ MSIOF1_SCK_E_MARK,
+};
+static const unsigned int msiof1_sync_e_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int msiof1_sync_e_mux[] = {
+ MSIOF1_SYNC_E_MARK,
+};
+static const unsigned int msiof1_rx_e_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 17),
+};
+static const unsigned int msiof1_rx_e_mux[] = {
+ MSIOF1_RXD_E_MARK,
+};
+static const unsigned int msiof1_tx_e_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int msiof1_tx_e_mux[] = {
+ MSIOF1_TXD_E_MARK,
+};
/* - MSIOF2 ----------------------------------------------------------------- */
static const unsigned int msiof2_clk_pins[] = {
/* SCK */
@@ -2135,6 +2404,197 @@ static const unsigned int msiof2_tx_pins[] = {
static const unsigned int msiof2_tx_mux[] = {
MSIOF2_TXD_MARK,
};
+
+static const unsigned int msiof2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 0),
+};
+static const unsigned int msiof2_clk_b_mux[] = {
+ MSIOF2_SCK_B_MARK,
+};
+static const unsigned int msiof2_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(3, 1),
+};
+static const unsigned int msiof2_sync_b_mux[] = {
+ MSIOF2_SYNC_B_MARK,
+};
+static const unsigned int msiof2_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(3, 8),
+};
+static const unsigned int msiof2_ss1_b_mux[] = {
+ MSIOF2_SS1_B_MARK,
+};
+static const unsigned int msiof2_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(3, 9),
+};
+static const unsigned int msiof2_ss2_b_mux[] = {
+ MSIOF2_SS2_B_MARK,
+};
+static const unsigned int msiof2_rx_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(3, 17),
+};
+static const unsigned int msiof2_rx_b_mux[] = {
+ MSIOF2_RXD_B_MARK,
+};
+static const unsigned int msiof2_tx_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(3, 16),
+};
+static const unsigned int msiof2_tx_b_mux[] = {
+ MSIOF2_TXD_B_MARK,
+};
+
+static const unsigned int msiof2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int msiof2_clk_c_mux[] = {
+ MSIOF2_SCK_C_MARK,
+};
+static const unsigned int msiof2_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int msiof2_sync_c_mux[] = {
+ MSIOF2_SYNC_C_MARK,
+};
+static const unsigned int msiof2_rx_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int msiof2_rx_c_mux[] = {
+ MSIOF2_RXD_C_MARK,
+};
+static const unsigned int msiof2_tx_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int msiof2_tx_c_mux[] = {
+ MSIOF2_TXD_C_MARK,
+};
+
+static const unsigned int msiof2_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int msiof2_clk_d_mux[] = {
+ MSIOF2_SCK_D_MARK,
+};
+static const unsigned int msiof2_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int msiof2_sync_d_mux[] = {
+ MSIOF2_SYNC_D_MARK,
+};
+static const unsigned int msiof2_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 17),
+};
+static const unsigned int msiof2_ss1_d_mux[] = {
+ MSIOF2_SS1_D_MARK,
+};
+static const unsigned int msiof2_ss2_d_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(2, 19),
+};
+static const unsigned int msiof2_ss2_d_mux[] = {
+ MSIOF2_SS2_D_MARK,
+};
+static const unsigned int msiof2_rx_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 18),
+};
+static const unsigned int msiof2_rx_d_mux[] = {
+ MSIOF2_RXD_D_MARK,
+};
+static const unsigned int msiof2_tx_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int msiof2_tx_d_mux[] = {
+ MSIOF2_TXD_D_MARK,
+};
+
+static const unsigned int msiof2_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(7, 15),
+};
+static const unsigned int msiof2_clk_e_mux[] = {
+ MSIOF2_SCK_E_MARK,
+};
+static const unsigned int msiof2_sync_e_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(7, 16),
+};
+static const unsigned int msiof2_sync_e_mux[] = {
+ MSIOF2_SYNC_E_MARK,
+};
+static const unsigned int msiof2_rx_e_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(7, 14),
+};
+static const unsigned int msiof2_rx_e_mux[] = {
+ MSIOF2_RXD_E_MARK,
+};
+static const unsigned int msiof2_tx_e_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(7, 13),
+};
+static const unsigned int msiof2_tx_e_mux[] = {
+ MSIOF2_TXD_E_MARK,
+};
+/* - QSPI ------------------------------------------------------------------- */
+static const unsigned int qspi_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int qspi_ctrl_mux[] = {
+ SPCLK_MARK, SSL_MARK,
+};
+static const unsigned int qspi_data2_pins[] = {
+ /* MOSI_IO0, MISO_IO1 */
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int qspi_data2_mux[] = {
+ MOSI_IO0_MARK, MISO_IO1_MARK,
+};
+static const unsigned int qspi_data4_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int qspi_data4_mux[] = {
+ MOSI_IO0_MARK, MISO_IO1_MARK, IO2_MARK, IO3_MARK,
+};
+
+static const unsigned int qspi_ctrl_b_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 5),
+};
+static const unsigned int qspi_ctrl_b_mux[] = {
+ SPCLK_B_MARK, SSL_B_MARK,
+};
+static const unsigned int qspi_data2_b_pins[] = {
+ /* MOSI_IO0, MISO_IO1 */
+ RCAR_GP_PIN(6, 1), RCAR_GP_PIN(6, 2),
+};
+static const unsigned int qspi_data2_b_mux[] = {
+ MOSI_IO0_B_MARK, MISO_IO1_B_MARK,
+};
+static const unsigned int qspi_data4_b_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(6, 1), RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 3),
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int qspi_data4_b_mux[] = {
+ SPCLK_B_MARK, MOSI_IO0_B_MARK, MISO_IO1_B_MARK,
+ IO2_B_MARK, IO3_B_MARK, SSL_B_MARK,
+};
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -3123,6 +3583,12 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(i2c4),
SH_PFC_PIN_GROUP(i2c4_b),
SH_PFC_PIN_GROUP(i2c4_c),
+ SH_PFC_PIN_GROUP(i2c7),
+ SH_PFC_PIN_GROUP(i2c7_b),
+ SH_PFC_PIN_GROUP(i2c7_c),
+ SH_PFC_PIN_GROUP(i2c8),
+ SH_PFC_PIN_GROUP(i2c8_b),
+ SH_PFC_PIN_GROUP(i2c8_c),
SH_PFC_PIN_GROUP(intc_irq0),
SH_PFC_PIN_GROUP(intc_irq1),
SH_PFC_PIN_GROUP(intc_irq2),
@@ -3137,18 +3603,75 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(msiof0_ss2),
SH_PFC_PIN_GROUP(msiof0_rx),
SH_PFC_PIN_GROUP(msiof0_tx),
+ SH_PFC_PIN_GROUP(msiof0_clk_b),
+ SH_PFC_PIN_GROUP(msiof0_sync_b),
+ SH_PFC_PIN_GROUP(msiof0_ss1_b),
+ SH_PFC_PIN_GROUP(msiof0_ss2_b),
+ SH_PFC_PIN_GROUP(msiof0_rx_b),
+ SH_PFC_PIN_GROUP(msiof0_tx_b),
+ SH_PFC_PIN_GROUP(msiof0_clk_c),
+ SH_PFC_PIN_GROUP(msiof0_sync_c),
+ SH_PFC_PIN_GROUP(msiof0_ss1_c),
+ SH_PFC_PIN_GROUP(msiof0_ss2_c),
+ SH_PFC_PIN_GROUP(msiof0_rx_c),
+ SH_PFC_PIN_GROUP(msiof0_tx_c),
SH_PFC_PIN_GROUP(msiof1_clk),
SH_PFC_PIN_GROUP(msiof1_sync),
SH_PFC_PIN_GROUP(msiof1_ss1),
SH_PFC_PIN_GROUP(msiof1_ss2),
SH_PFC_PIN_GROUP(msiof1_rx),
SH_PFC_PIN_GROUP(msiof1_tx),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_sync_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_rx_b),
+ SH_PFC_PIN_GROUP(msiof1_tx_b),
+ SH_PFC_PIN_GROUP(msiof1_clk_c),
+ SH_PFC_PIN_GROUP(msiof1_sync_c),
+ SH_PFC_PIN_GROUP(msiof1_rx_c),
+ SH_PFC_PIN_GROUP(msiof1_tx_c),
+ SH_PFC_PIN_GROUP(msiof1_clk_d),
+ SH_PFC_PIN_GROUP(msiof1_sync_d),
+ SH_PFC_PIN_GROUP(msiof1_ss1_d),
+ SH_PFC_PIN_GROUP(msiof1_rx_d),
+ SH_PFC_PIN_GROUP(msiof1_tx_d),
+ SH_PFC_PIN_GROUP(msiof1_clk_e),
+ SH_PFC_PIN_GROUP(msiof1_sync_e),
+ SH_PFC_PIN_GROUP(msiof1_rx_e),
+ SH_PFC_PIN_GROUP(msiof1_tx_e),
SH_PFC_PIN_GROUP(msiof2_clk),
SH_PFC_PIN_GROUP(msiof2_sync),
SH_PFC_PIN_GROUP(msiof2_ss1),
SH_PFC_PIN_GROUP(msiof2_ss2),
SH_PFC_PIN_GROUP(msiof2_rx),
SH_PFC_PIN_GROUP(msiof2_tx),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_rx_b),
+ SH_PFC_PIN_GROUP(msiof2_tx_b),
+ SH_PFC_PIN_GROUP(msiof2_clk_c),
+ SH_PFC_PIN_GROUP(msiof2_sync_c),
+ SH_PFC_PIN_GROUP(msiof2_rx_c),
+ SH_PFC_PIN_GROUP(msiof2_tx_c),
+ SH_PFC_PIN_GROUP(msiof2_clk_d),
+ SH_PFC_PIN_GROUP(msiof2_sync_d),
+ SH_PFC_PIN_GROUP(msiof2_ss1_d),
+ SH_PFC_PIN_GROUP(msiof2_ss2_d),
+ SH_PFC_PIN_GROUP(msiof2_rx_d),
+ SH_PFC_PIN_GROUP(msiof2_tx_d),
+ SH_PFC_PIN_GROUP(msiof2_clk_e),
+ SH_PFC_PIN_GROUP(msiof2_sync_e),
+ SH_PFC_PIN_GROUP(msiof2_rx_e),
+ SH_PFC_PIN_GROUP(msiof2_tx_e),
+ SH_PFC_PIN_GROUP(qspi_ctrl),
+ SH_PFC_PIN_GROUP(qspi_data2),
+ SH_PFC_PIN_GROUP(qspi_data4),
+ SH_PFC_PIN_GROUP(qspi_ctrl_b),
+ SH_PFC_PIN_GROUP(qspi_data2_b),
+ SH_PFC_PIN_GROUP(qspi_data4_b),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_data_b),
SH_PFC_PIN_GROUP(scif0_data_c),
@@ -3335,6 +3858,18 @@ static const char * const i2c4_groups[] = {
"i2c4_c",
};
+static const char * const i2c7_groups[] = {
+ "i2c7",
+ "i2c7_b",
+ "i2c7_c",
+};
+
+static const char * const i2c8_groups[] = {
+ "i2c8",
+ "i2c8_b",
+ "i2c8_c",
+};
+
static const char * const intc_groups[] = {
"intc_irq0",
"intc_irq1",
@@ -3356,6 +3891,18 @@ static const char * const msiof0_groups[] = {
"msiof0_ss2",
"msiof0_rx",
"msiof0_tx",
+ "msiof0_clk_b",
+ "msiof0_sync_b",
+ "msiof0_ss1_b",
+ "msiof0_ss2_b",
+ "msiof0_rx_b",
+ "msiof0_tx_b",
+ "msiof0_clk_c",
+ "msiof0_sync_c",
+ "msiof0_ss1_c",
+ "msiof0_ss2_c",
+ "msiof0_rx_c",
+ "msiof0_tx_c",
};
static const char * const msiof1_groups[] = {
@@ -3365,6 +3912,25 @@ static const char * const msiof1_groups[] = {
"msiof1_ss2",
"msiof1_rx",
"msiof1_tx",
+ "msiof1_clk_b",
+ "msiof1_sync_b",
+ "msiof1_ss1_b",
+ "msiof1_ss2_b",
+ "msiof1_rx_b",
+ "msiof1_tx_b",
+ "msiof1_clk_c",
+ "msiof1_sync_c",
+ "msiof1_rx_c",
+ "msiof1_tx_c",
+ "msiof1_clk_d",
+ "msiof1_sync_d",
+ "msiof1_ss1_d",
+ "msiof1_rx_d",
+ "msiof1_tx_d",
+ "msiof1_clk_e",
+ "msiof1_sync_e",
+ "msiof1_rx_e",
+ "msiof1_tx_e",
};
static const char * const msiof2_groups[] = {
@@ -3374,6 +3940,35 @@ static const char * const msiof2_groups[] = {
"msiof2_ss2",
"msiof2_rx",
"msiof2_tx",
+ "msiof2_clk_b",
+ "msiof2_sync_b",
+ "msiof2_ss1_b",
+ "msiof2_ss2_b",
+ "msiof2_rx_b",
+ "msiof2_tx_b",
+ "msiof2_clk_c",
+ "msiof2_sync_c",
+ "msiof2_rx_c",
+ "msiof2_tx_c",
+ "msiof2_clk_d",
+ "msiof2_sync_d",
+ "msiof2_ss1_d",
+ "msiof2_ss2_d",
+ "msiof2_rx_d",
+ "msiof2_tx_d",
+ "msiof2_clk_e",
+ "msiof2_sync_e",
+ "msiof2_rx_e",
+ "msiof2_tx_e",
+};
+
+static const char * const qspi_groups[] = {
+ "qspi_ctrl",
+ "qspi_data2",
+ "qspi_data4",
+ "qspi_ctrl_b",
+ "qspi_data2_b",
+ "qspi_data4_b",
};
static const char * const scif0_groups[] = {
@@ -3566,11 +4161,14 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c3),
SH_PFC_FUNCTION(i2c4),
+ SH_PFC_FUNCTION(i2c7),
+ SH_PFC_FUNCTION(i2c8),
SH_PFC_FUNCTION(intc),
SH_PFC_FUNCTION(mmc),
SH_PFC_FUNCTION(msiof0),
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(qspi),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -3825,7 +4423,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_11_FN, FN_IP13_25,
GP_6_10_FN, FN_IP13_24_23,
GP_6_9_FN, FN_IP13_22,
- 0, 0,
+ GP_6_8_FN, FN_SD1_CLK,
GP_6_7_FN, FN_IP13_21_19,
GP_6_6_FN, FN_IP13_18_16,
GP_6_5_FN, FN_IP13_15,
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c
index 2b9f32065920..c4dd3d5cf9c3 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas6.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c
@@ -1,7 +1,8 @@
/*
* pinctrl pads, groups, functions for CSR SiRFatlasVI
*
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
*
* Licensed under GPLv2 or later.
*/
@@ -529,6 +530,40 @@ static const struct sirfsoc_padmux usp0_padmux = {
static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
+static const struct sirfsoc_muxmask usp0_only_utfs_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22),
+ },
+};
+
+static const struct sirfsoc_padmux usp0_only_utfs_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp0_only_utfs_muxmask),
+ .muxmask = usp0_only_utfs_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(1) | BIT(2) | BIT(6),
+ .funcval = 0,
+};
+
+static const unsigned usp0_only_utfs_pins[] = { 51, 52, 53, 54 };
+
+static const struct sirfsoc_muxmask usp0_only_urfs_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(19) | BIT(20) | BIT(21) | BIT(23),
+ },
+};
+
+static const struct sirfsoc_padmux usp0_only_urfs_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp0_only_urfs_muxmask),
+ .muxmask = usp0_only_urfs_muxmask,
+ .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+ .funcmask = BIT(1) | BIT(2) | BIT(9),
+ .funcval = 0,
+};
+
+static const unsigned usp0_only_urfs_pins[] = { 51, 52, 53, 55 };
+
static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
{
.group = 1,
@@ -905,6 +940,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
usp0_uart_nostreamctrl_pins),
+ SIRFSOC_PIN_GROUP("usp0_only_utfs_grp", usp0_only_utfs_pins),
+ SIRFSOC_PIN_GROUP("usp0_only_urfs_grp", usp0_only_urfs_pins),
SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
SIRFSOC_PIN_GROUP("usp1_uart_nostreamctrl_grp",
usp1_uart_nostreamctrl_pins),
@@ -953,6 +990,9 @@ static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
static const char * const usp0_uart_nostreamctrl_grp[] = {
"usp0_uart_nostreamctrl_grp" };
static const char * const usp0grp[] = { "usp0grp" };
+static const char * const usp0_only_utfs_grp[] = { "usp0_only_utfs_grp" };
+static const char * const usp0_only_urfs_grp[] = { "usp0_only_urfs_grp" };
+
static const char * const usp1grp[] = { "usp1grp" };
static const char * const usp1_uart_nostreamctrl_grp[] = {
"usp1_uart_nostreamctrl_grp" };
@@ -1003,6 +1043,10 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
usp0_uart_nostreamctrl_grp,
usp0_uart_nostreamctrl_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0_only_utfs", usp0_only_utfs_grp,
+ usp0_only_utfs_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0_only_urfs", usp0_only_urfs_grp,
+ usp0_only_urfs_padmux),
SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
SIRFSOC_PMX_FUNCTION("usp1_uart_nostreamctrl",
usp1_uart_nostreamctrl_grp,
diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c
index 37b42651d76a..8aa76f0776d7 100644
--- a/drivers/pinctrl/sirf/pinctrl-prima2.c
+++ b/drivers/pinctrl/sirf/pinctrl-prima2.c
@@ -1,7 +1,8 @@
/*
* pinctrl pads, groups, functions for CSR SiRFprimaII
*
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
*
* Licensed under GPLv2 or later.
*/
@@ -413,7 +414,7 @@ static const struct sirfsoc_padmux ac97_padmux = {
.funcval = 0,
};
-static const unsigned ac97_pins[] = { 33, 34, 35, 36 };
+static const unsigned ac97_pins[] = { 43, 44, 45, 46 };
static const struct sirfsoc_muxmask spi1_muxmask[] = {
{
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index a0d6152701cd..5f3adb87c1ef 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -1,7 +1,8 @@
/*
* pinmux driver for CSR SiRFprimaII
*
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
*
* Licensed under GPLv2 or later.
*/
@@ -598,7 +599,7 @@ static unsigned int sirfsoc_gpio_irq_startup(struct irq_data *d)
{
struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq))
+ if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE))
dev_err(bank->chip.gc.dev,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
@@ -611,7 +612,7 @@ static void sirfsoc_gpio_irq_shutdown(struct irq_data *d)
struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
sirfsoc_gpio_irq_mask(d);
- gpio_unlock_as_irq(&bank->chip.gc, d->hwirq);
+ gpio_unlock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE);
}
static struct irq_chip sirfsoc_irq_chip = {
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index b28d1af9c232..9802b67040cc 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -276,7 +276,20 @@ static int wmt_pctl_dt_node_to_map_pull(struct wmt_pinctrl_data *data,
if (!configs)
return -ENOMEM;
- configs[0] = pull;
+ switch (pull) {
+ case 0:
+ configs[0] = PIN_CONFIG_BIAS_DISABLE;
+ break;
+ case 1:
+ configs[0] = PIN_CONFIG_BIAS_PULL_DOWN;
+ break;
+ case 2:
+ configs[0] = PIN_CONFIG_BIAS_PULL_UP;
+ break;
+ default:
+ configs[0] = PIN_CONFIG_BIAS_DISABLE;
+ dev_err(data->dev, "invalid pull state %d - disabling\n", pull);
+ }
map->type = PIN_MAP_TYPE_CONFIGS_PIN;
map->data.configs.group_or_pin = data->groups[group];
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index b13303e75a34..440ed776efd4 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -25,4 +25,18 @@ config CHROMEOS_LAPTOP
If you have a supported Chromebook, choose Y or M here.
The module will be called chromeos_laptop.
+config CHROMEOS_PSTORE
+ tristate "Chrome OS pstore support"
+ ---help---
+ This module instantiates the persistent storage on x86 ChromeOS
+ devices. It can be used to store away console logs and crash
+ information across reboots.
+
+ The range of memory used is 0xf00000-0x1000000, traditionally
+ the memory used to back VGA controller memory.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called chromeos_pstore.
+
+
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 015e9195e226..2b860ca7450f 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
+obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 3e5b4497a1d0..7f3aad0e115c 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -27,6 +27,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#define ATMEL_TP_I2C_ADDR 0x4b
#define ATMEL_TP_I2C_BL_ADDR 0x25
@@ -40,7 +41,7 @@ static struct i2c_client *als;
static struct i2c_client *tp;
static struct i2c_client *ts;
-const char *i2c_adapter_names[] = {
+static const char *i2c_adapter_names[] = {
"SMBus I801 adapter",
"i915 gmbus vga",
"i915 gmbus panel",
@@ -53,20 +54,33 @@ enum i2c_adapter_type {
I2C_ADAPTER_PANEL,
};
-static struct i2c_board_info __initdata cyapa_device = {
+struct i2c_peripheral {
+ int (*add)(enum i2c_adapter_type type);
+ enum i2c_adapter_type type;
+};
+
+#define MAX_I2C_PERIPHERALS 3
+
+struct chromeos_laptop {
+ struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS];
+};
+
+static struct chromeos_laptop *cros_laptop;
+
+static struct i2c_board_info cyapa_device = {
I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
};
-static struct i2c_board_info __initdata isl_als_device = {
+static struct i2c_board_info isl_als_device = {
I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
};
-static struct i2c_board_info __initdata tsl2583_als_device = {
+static struct i2c_board_info tsl2583_als_device = {
I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
};
-static struct i2c_board_info __initdata tsl2563_als_device = {
+static struct i2c_board_info tsl2563_als_device = {
I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
};
@@ -89,7 +103,7 @@ static struct mxt_platform_data atmel_224s_tp_platform_data = {
.config_length = 0,
};
-static struct i2c_board_info __initdata atmel_224s_tp_device = {
+static struct i2c_board_info atmel_224s_tp_device = {
I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR),
.platform_data = &atmel_224s_tp_platform_data,
.flags = I2C_CLIENT_WAKE,
@@ -110,13 +124,13 @@ static struct mxt_platform_data atmel_1664s_platform_data = {
.config_length = 0,
};
-static struct i2c_board_info __initdata atmel_1664s_device = {
+static struct i2c_board_info atmel_1664s_device = {
I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR),
.platform_data = &atmel_1664s_platform_data,
.flags = I2C_CLIENT_WAKE,
};
-static struct i2c_client __init *__add_probed_i2c_device(
+static struct i2c_client *__add_probed_i2c_device(
const char *name,
int bus,
struct i2c_board_info *info,
@@ -169,7 +183,7 @@ static struct i2c_client __init *__add_probed_i2c_device(
return client;
}
-static int __init __find_i2c_adap(struct device *dev, void *data)
+static int __find_i2c_adap(struct device *dev, void *data)
{
const char *name = data;
static const char *prefix = "i2c-";
@@ -180,7 +194,7 @@ static int __init __find_i2c_adap(struct device *dev, void *data)
return (strncmp(adapter->name, name, strlen(name)) == 0);
}
-static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
+static int find_i2c_adapter_num(enum i2c_adapter_type type)
{
struct device *dev = NULL;
struct i2c_adapter *adapter;
@@ -189,8 +203,9 @@ static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
dev = bus_find_device(&i2c_bus_type, NULL, (void *)name,
__find_i2c_adap);
if (!dev) {
- pr_err("%s: i2c adapter %s not found on system.\n", __func__,
- name);
+ /* Adapters may appear later. Deferred probing will retry */
+ pr_notice("%s: i2c adapter %s not found on system.\n", __func__,
+ name);
return -ENODEV;
}
adapter = to_i2c_adapter(dev);
@@ -205,7 +220,7 @@ static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
* Returns NULL if no devices found.
* See Documentation/i2c/instantiating-devices for more information.
*/
-static __init struct i2c_client *add_probed_i2c_device(
+static struct i2c_client *add_probed_i2c_device(
const char *name,
enum i2c_adapter_type type,
struct i2c_board_info *info,
@@ -222,7 +237,7 @@ static __init struct i2c_client *add_probed_i2c_device(
* info->addr.
* Returns NULL if no device found.
*/
-static __init struct i2c_client *add_i2c_device(const char *name,
+static struct i2c_client *add_i2c_device(const char *name,
enum i2c_adapter_type type,
struct i2c_board_info *info)
{
@@ -233,161 +248,259 @@ static __init struct i2c_client *add_i2c_device(const char *name,
addr_list);
}
-
-static struct i2c_client __init *add_smbus_device(const char *name,
- struct i2c_board_info *info)
+static int setup_cyapa_tp(enum i2c_adapter_type type)
{
- return add_i2c_device(name, I2C_ADAPTER_SMBUS, info);
-}
+ if (tp)
+ return 0;
-static int __init setup_cyapa_smbus_tp(const struct dmi_system_id *id)
-{
- /* add cyapa touchpad on smbus */
- tp = add_smbus_device("trackpad", &cyapa_device);
- return 0;
+ /* add cyapa touchpad */
+ tp = add_i2c_device("trackpad", type, &cyapa_device);
+ return (!tp) ? -EAGAIN : 0;
}
-static int __init setup_atmel_224s_tp(const struct dmi_system_id *id)
+static int setup_atmel_224s_tp(enum i2c_adapter_type type)
{
const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR,
ATMEL_TP_I2C_ADDR,
I2C_CLIENT_END };
+ if (tp)
+ return 0;
- /* add atmel mxt touchpad on VGA DDC GMBus */
- tp = add_probed_i2c_device("trackpad", I2C_ADAPTER_VGADDC,
+ /* add atmel mxt touchpad */
+ tp = add_probed_i2c_device("trackpad", type,
&atmel_224s_tp_device, addr_list);
- return 0;
+ return (!tp) ? -EAGAIN : 0;
}
-static int __init setup_atmel_1664s_ts(const struct dmi_system_id *id)
+static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
{
const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
ATMEL_TS_I2C_ADDR,
I2C_CLIENT_END };
+ if (ts)
+ return 0;
- /* add atmel mxt touch device on PANEL GMBus */
- ts = add_probed_i2c_device("touchscreen", I2C_ADAPTER_PANEL,
+ /* add atmel mxt touch device */
+ ts = add_probed_i2c_device("touchscreen", type,
&atmel_1664s_device, addr_list);
- return 0;
+ return (!ts) ? -EAGAIN : 0;
}
-
-static int __init setup_isl29018_als(const struct dmi_system_id *id)
+static int setup_isl29018_als(enum i2c_adapter_type type)
{
+ if (als)
+ return 0;
+
/* add isl29018 light sensor */
- als = add_smbus_device("lightsensor", &isl_als_device);
- return 0;
+ als = add_i2c_device("lightsensor", type, &isl_als_device);
+ return (!als) ? -EAGAIN : 0;
}
-static int __init setup_isl29023_als(const struct dmi_system_id *id)
+static int setup_tsl2583_als(enum i2c_adapter_type type)
{
- /* add isl29023 light sensor on Panel GMBus */
- als = add_i2c_device("lightsensor", I2C_ADAPTER_PANEL,
- &isl_als_device);
- return 0;
+ if (als)
+ return 0;
+
+ /* add tsl2583 light sensor */
+ als = add_i2c_device(NULL, type, &tsl2583_als_device);
+ return (!als) ? -EAGAIN : 0;
}
-static int __init setup_tsl2583_als(const struct dmi_system_id *id)
+static int setup_tsl2563_als(enum i2c_adapter_type type)
{
- /* add tsl2583 light sensor on smbus */
- als = add_smbus_device(NULL, &tsl2583_als_device);
- return 0;
+ if (als)
+ return 0;
+
+ /* add tsl2563 light sensor */
+ als = add_i2c_device(NULL, type, &tsl2563_als_device);
+ return (!als) ? -EAGAIN : 0;
}
-static int __init setup_tsl2563_als(const struct dmi_system_id *id)
+static int __init chromeos_laptop_dmi_matched(const struct dmi_system_id *id)
{
- /* add tsl2563 light sensor on smbus */
- als = add_smbus_device(NULL, &tsl2563_als_device);
- return 0;
+ cros_laptop = (void *)id->driver_data;
+ pr_debug("DMI Matched %s.\n", id->ident);
+
+ /* Indicate to dmi_scan that processing is done. */
+ return 1;
}
-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
- {
- .ident = "Samsung Series 5 550 - Touchpad",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
- },
- .callback = setup_cyapa_smbus_tp,
+static int chromeos_laptop_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < MAX_I2C_PERIPHERALS; i++) {
+ struct i2c_peripheral *i2c_dev;
+
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+
+ /* No more peripherals. */
+ if (i2c_dev->add == NULL)
+ break;
+
+ /* Add the device. Set -EPROBE_DEFER on any failure */
+ if (i2c_dev->add(i2c_dev->type))
+ ret = -EPROBE_DEFER;
+ }
+
+ return ret;
+}
+
+static struct chromeos_laptop samsung_series_5_550 = {
+ .i2c_peripherals = {
+ /* Touchpad. */
+ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+ /* Light Sensor. */
+ { .add = setup_isl29018_als, I2C_ADAPTER_SMBUS },
},
- {
- .ident = "Chromebook Pixel - Touchscreen",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
- },
- .callback = setup_atmel_1664s_ts,
+};
+
+static struct chromeos_laptop samsung_series_5 = {
+ .i2c_peripherals = {
+ /* Light Sensor. */
+ { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS },
+ },
+};
+
+static struct chromeos_laptop chromebook_pixel = {
+ .i2c_peripherals = {
+ /* Touch Screen. */
+ { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL },
+ /* Touchpad. */
+ { .add = setup_atmel_224s_tp, I2C_ADAPTER_VGADDC },
+ /* Light Sensor. */
+ { .add = setup_isl29018_als, I2C_ADAPTER_PANEL },
+ },
+};
+
+static struct chromeos_laptop acer_c7_chromebook = {
+ .i2c_peripherals = {
+ /* Touchpad. */
+ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+ },
+};
+
+static struct chromeos_laptop acer_ac700 = {
+ .i2c_peripherals = {
+ /* Light Sensor. */
+ { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
},
+};
+
+static struct chromeos_laptop hp_pavilion_14_chromebook = {
+ .i2c_peripherals = {
+ /* Touchpad. */
+ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+ },
+};
+
+static struct chromeos_laptop cr48 = {
+ .i2c_peripherals = {
+ /* Light Sensor. */
+ { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
+ },
+};
+
+#define _CBDD(board_) \
+ .callback = chromeos_laptop_dmi_matched, \
+ .driver_data = (void *)&board_
+
+static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
{
- .ident = "Chromebook Pixel - Touchpad",
+ .ident = "Samsung Series 5 550",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
},
- .callback = setup_atmel_224s_tp,
+ _CBDD(samsung_series_5_550),
},
{
- .ident = "Samsung Series 5 550 - Light Sensor",
+ .ident = "Samsung Series 5",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
},
- .callback = setup_isl29018_als,
+ _CBDD(samsung_series_5),
},
{
- .ident = "Chromebook Pixel - Light Sensor",
+ .ident = "Chromebook Pixel",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
},
- .callback = setup_isl29023_als,
+ _CBDD(chromebook_pixel),
},
{
- .ident = "Acer C7 Chromebook - Touchpad",
+ .ident = "Acer C7 Chromebook",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
},
- .callback = setup_cyapa_smbus_tp,
+ _CBDD(acer_c7_chromebook),
},
{
- .ident = "HP Pavilion 14 Chromebook - Touchpad",
+ .ident = "Acer AC700",
.matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
},
- .callback = setup_cyapa_smbus_tp,
+ _CBDD(acer_ac700),
},
{
- .ident = "Samsung Series 5 - Light Sensor",
+ .ident = "HP Pavilion 14 Chromebook",
.matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
},
- .callback = setup_tsl2583_als,
+ _CBDD(hp_pavilion_14_chromebook),
},
{
- .ident = "Cr-48 - Light Sensor",
+ .ident = "Cr-48",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
},
- .callback = setup_tsl2563_als,
- },
- {
- .ident = "Acer AC700 - Light Sensor",
- .matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
- },
- .callback = setup_tsl2563_als,
+ _CBDD(cr48),
},
{ }
};
MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table);
+static struct platform_device *cros_platform_device;
+
+static struct platform_driver cros_platform_driver = {
+ .driver = {
+ .name = "chromeos_laptop",
+ .owner = THIS_MODULE,
+ },
+ .probe = chromeos_laptop_probe,
+};
+
static int __init chromeos_laptop_init(void)
{
+ int ret;
if (!dmi_check_system(chromeos_laptop_dmi_table)) {
pr_debug("%s unsupported system.\n", __func__);
return -ENODEV;
}
+
+ ret = platform_driver_register(&cros_platform_driver);
+ if (ret)
+ return ret;
+
+ cros_platform_device = platform_device_alloc("chromeos_laptop", -1);
+ if (!cros_platform_device) {
+ ret = -ENOMEM;
+ goto fail_platform_device1;
+ }
+
+ ret = platform_device_add(cros_platform_device);
+ if (ret)
+ goto fail_platform_device2;
+
return 0;
+
+fail_platform_device2:
+ platform_device_put(cros_platform_device);
+fail_platform_device1:
+ platform_driver_unregister(&cros_platform_driver);
+ return ret;
}
static void __exit chromeos_laptop_exit(void)
@@ -398,6 +511,9 @@ static void __exit chromeos_laptop_exit(void)
i2c_unregister_device(tp);
if (ts)
i2c_unregister_device(ts);
+
+ platform_device_unregister(cros_platform_device);
+ platform_driver_unregister(&cros_platform_driver);
}
module_init(chromeos_laptop_init);
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
new file mode 100644
index 000000000000..e0e0e65cf442
--- /dev/null
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -0,0 +1,101 @@
+/*
+ * chromeos_pstore.c - Driver to instantiate Chromebook ramoops device
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pstore_ram.h>
+
+static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
+ {
+ /*
+ * Today all Chromebooks/boxes ship with GOOGLE as vendor and
+ * coreboot as bios vendor. No other systems with this
+ * combination are known to date.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ },
+ },
+ {
+ /*
+ * The first Samsung Chromebox and Chromebook Series 5 550 use
+ * coreboot but with Samsung as the system vendor.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ },
+ },
+ {
+ /* x86-alex, the first Samsung Chromebook. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+ },
+ },
+ {
+ /* x86-mario, the Cr-48 pilot device from Google. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "IEC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+ },
+ },
+ {
+ /* x86-zgb, the first Acer Chromebook. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, chromeos_pstore_dmi_table);
+
+/*
+ * On x86 chromebooks/boxes, the firmware will keep the legacy VGA memory
+ * range untouched across reboots, so we use that to store our pstore
+ * contents for panic logs, etc.
+ */
+static struct ramoops_platform_data chromeos_ramoops_data = {
+ .mem_size = 0x100000,
+ .mem_address = 0xf00000,
+ .record_size = 0x20000,
+ .console_size = 0x20000,
+ .ftrace_size = 0x20000,
+ .dump_oops = 1,
+};
+
+static struct platform_device chromeos_ramoops = {
+ .name = "ramoops",
+ .dev = {
+ .platform_data = &chromeos_ramoops_data,
+ },
+};
+
+static int __init chromeos_pstore_init(void)
+{
+ if (dmi_check_system(chromeos_pstore_dmi_table))
+ return platform_device_register(&chromeos_ramoops);
+
+ return -ENODEV;
+}
+
+static void __exit chromeos_pstore_exit(void)
+{
+ platform_device_unregister(&chromeos_ramoops);
+}
+
+module_init(chromeos_pstore_init);
+module_exit(chromeos_pstore_exit);
+
+MODULE_DESCRIPTION("Chrome OS pstore module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d9dcd37b5a52..5f67843c7fb7 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -27,8 +27,6 @@ config ACER_WMI
depends on ACPI_WMI
select INPUT_SPARSEKMAP
# Acer WMI depends on ACPI_VIDEO when ACPI is enabled
- # but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select VIDEO_OUTPUT_CONTROL if ACPI
select ACPI_VIDEO if ACPI
---help---
This is a driver for newer Acer (and Wistron) laptops. It adds
@@ -197,6 +195,17 @@ config HP_ACCEL
To compile this driver as a module, choose M here: the module will
be called hp_accel.
+config HP_WIRELESS
+ tristate "HP WIRELESS"
+ depends on ACPI
+ depends on INPUT
+ help
+ This driver provides supports for new HP wireless button for Windows 8.
+ On such systems the driver should load automatically (via ACPI alias).
+
+ To compile this driver as a module, choose M here: the module will
+ be called hp-wireless.
+
config HP_WMI
tristate "HP WMI extras"
depends on ACPI_WMI
@@ -808,4 +817,12 @@ config PVPANIC
a paravirtualized device provided by QEMU; it lets a virtual machine
(guest) communicate panic events to the host.
+config INTEL_BAYTRAIL_MBI
+ tristate
+ depends on PCI
+ ---help---
+ Needed on Baytrail platforms for access to the IOSF Sideband Mailbox
+ Interface. This is a requirement for systems that need to configure
+ the PUNIT for power management features such as RAPL.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index f0e6aa407ffb..9b87cfc42b84 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
obj-$(CONFIG_ACERHDF) += acerhdf.o
obj-$(CONFIG_HP_ACCEL) += hp_accel.o
+obj-$(CONFIG_HP_WIRELESS) += hp-wireless.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
obj-$(CONFIG_AMILO_RFKILL) += amilo-rfkill.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
@@ -54,3 +55,4 @@ obj-$(CONFIG_INTEL_RST) += intel-rst.o
obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
obj-$(CONFIG_PVPANIC) += pvpanic.o
+obj-$(CONFIG_INTEL_BAYTRAIL_MBI) += intel_baytrail.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c9076bdaf2c1..c91f69b39db4 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -41,8 +41,6 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-
-#include <acpi/acpi_drivers.h>
#include <acpi/video.h>
MODULE_AUTHOR("Carlos Corbacho");
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 594323a926cf..7f4dc6f51f8a 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -53,8 +53,7 @@
#include <linux/rfkill.h>
#include <linux/slab.h>
#include <linux/dmi.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#define ASUS_LAPTOP_VERSION "0.42"
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 6fe268f6af91..c5e082fb82fa 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -45,8 +45,7 @@
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <acpi/video.h>
#include "asus-wmi.h"
@@ -184,7 +183,6 @@ struct asus_wmi {
struct input_dev *inputdev;
struct backlight_device *backlight_device;
- struct device *hwmon_device;
struct platform_device *platform_device;
struct led_classdev wlan_led;
@@ -1073,20 +1071,12 @@ static ssize_t asus_hwmon_temp1(struct device *dev,
return sprintf(buf, "%d\n", value);
}
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL, 0);
-
-static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "asus\n");
-}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+static DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL);
+static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
static struct attribute *hwmon_attributes[] = {
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_name.dev_attr.attr,
+ &dev_attr_pwm1.attr,
+ &dev_attr_temp1_input.attr,
NULL
};
@@ -1100,9 +1090,9 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
int dev_id = -1;
u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
- if (attr == &sensor_dev_attr_pwm1.dev_attr.attr)
+ if (attr == &dev_attr_pwm1.attr)
dev_id = ASUS_WMI_DEVID_FAN_CTRL;
- else if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr)
+ else if (attr == &dev_attr_temp1_input.attr)
dev_id = ASUS_WMI_DEVID_THERMAL_CTRL;
if (dev_id != -1) {
@@ -1137,35 +1127,20 @@ static struct attribute_group hwmon_attribute_group = {
.is_visible = asus_hwmon_sysfs_is_visible,
.attrs = hwmon_attributes
};
-
-static void asus_wmi_hwmon_exit(struct asus_wmi *asus)
-{
- struct device *hwmon;
-
- hwmon = asus->hwmon_device;
- if (!hwmon)
- return;
- sysfs_remove_group(&hwmon->kobj, &hwmon_attribute_group);
- hwmon_device_unregister(hwmon);
- asus->hwmon_device = NULL;
-}
+__ATTRIBUTE_GROUPS(hwmon_attribute);
static int asus_wmi_hwmon_init(struct asus_wmi *asus)
{
struct device *hwmon;
- int result;
- hwmon = hwmon_device_register(&asus->platform_device->dev);
+ hwmon = hwmon_device_register_with_groups(&asus->platform_device->dev,
+ "asus", asus,
+ hwmon_attribute_groups);
if (IS_ERR(hwmon)) {
pr_err("Could not register asus hwmon device\n");
return PTR_ERR(hwmon);
}
- dev_set_drvdata(hwmon, asus);
- asus->hwmon_device = hwmon;
- result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group);
- if (result)
- asus_wmi_hwmon_exit(asus);
- return result;
+ return 0;
}
/*
@@ -1836,7 +1811,6 @@ fail_backlight:
fail_rfkill:
asus_wmi_led_exit(asus);
fail_leds:
- asus_wmi_hwmon_exit(asus);
fail_hwmon:
asus_wmi_input_exit(asus);
fail_input:
@@ -1854,7 +1828,6 @@ static int asus_wmi_remove(struct platform_device *device)
wmi_remove_notify_handler(asus->driver->event_guid);
asus_wmi_backlight_exit(asus);
asus_wmi_input_exit(asus);
- asus_wmi_hwmon_exit(asus);
asus_wmi_led_exit(asus);
asus_wmi_rfkill_exit(asus);
asus_wmi_debugfs_exit(asus);
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 6dfa8d3b4eec..70d355a9ae2c 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -21,14 +21,13 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/input.h>
#include <linux/rfkill.h>
MODULE_LICENSE("GPL");
-
struct cmpc_accel {
int sensitivity;
int g_select;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index eaa78edb1f4e..7297df2ebf50 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -173,8 +173,7 @@
/* ======= */
struct compal_data{
/* Fan control */
- struct device *hwmon_dev;
- int pwm_enable; /* 0:full on, 1:set by pwm1, 2:control by moterboard */
+ int pwm_enable; /* 0:full on, 1:set by pwm1, 2:control by motherboard */
unsigned char curr_pwm;
/* Power supply */
@@ -402,15 +401,6 @@ SIMPLE_MASKED_STORE_SHOW(wake_up_wlan, WAKE_UP_ADDR, WAKE_UP_WLAN)
SIMPLE_MASKED_STORE_SHOW(wake_up_key, WAKE_UP_ADDR, WAKE_UP_KEY)
SIMPLE_MASKED_STORE_SHOW(wake_up_mouse, WAKE_UP_ADDR, WAKE_UP_MOUSE)
-
-/* General hwmon interface */
-static ssize_t hwmon_name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", DRIVER_NAME);
-}
-
-
/* Fan control interface */
static ssize_t pwm_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -665,55 +655,55 @@ static DEVICE_ATTR(wake_up_key,
static DEVICE_ATTR(wake_up_mouse,
0644, wake_up_mouse_show, wake_up_mouse_store);
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, hwmon_name_show, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, fan_show, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, temp_cpu, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, temp_cpu_local, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, temp_cpu_DTS, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, temp_northbridge, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, temp_vga, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, temp_SKIN, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, label_cpu, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, label_cpu_local, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, label_cpu_DTS, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, label_northbridge, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO, label_vga, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO, label_SKIN, NULL, 1);
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, pwm_show, pwm_store, 1);
-static SENSOR_DEVICE_ATTR(pwm1_enable,
- S_IRUGO | S_IWUSR, pwm_enable_show, pwm_enable_store, 0);
-
-static struct attribute *compal_attributes[] = {
+static DEVICE_ATTR(fan1_input, S_IRUGO, fan_show, NULL);
+static DEVICE_ATTR(temp1_input, S_IRUGO, temp_cpu, NULL);
+static DEVICE_ATTR(temp2_input, S_IRUGO, temp_cpu_local, NULL);
+static DEVICE_ATTR(temp3_input, S_IRUGO, temp_cpu_DTS, NULL);
+static DEVICE_ATTR(temp4_input, S_IRUGO, temp_northbridge, NULL);
+static DEVICE_ATTR(temp5_input, S_IRUGO, temp_vga, NULL);
+static DEVICE_ATTR(temp6_input, S_IRUGO, temp_SKIN, NULL);
+static DEVICE_ATTR(temp1_label, S_IRUGO, label_cpu, NULL);
+static DEVICE_ATTR(temp2_label, S_IRUGO, label_cpu_local, NULL);
+static DEVICE_ATTR(temp3_label, S_IRUGO, label_cpu_DTS, NULL);
+static DEVICE_ATTR(temp4_label, S_IRUGO, label_northbridge, NULL);
+static DEVICE_ATTR(temp5_label, S_IRUGO, label_vga, NULL);
+static DEVICE_ATTR(temp6_label, S_IRUGO, label_SKIN, NULL);
+static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, pwm_show, pwm_store);
+static DEVICE_ATTR(pwm1_enable,
+ S_IRUGO | S_IWUSR, pwm_enable_show, pwm_enable_store);
+
+static struct attribute *compal_platform_attrs[] = {
&dev_attr_wake_up_pme.attr,
&dev_attr_wake_up_modem.attr,
&dev_attr_wake_up_lan.attr,
&dev_attr_wake_up_wlan.attr,
&dev_attr_wake_up_key.attr,
&dev_attr_wake_up_mouse.attr,
- /* Maybe put the sensor-stuff in a separate hwmon-driver? That way,
- * the hwmon sysfs won't be cluttered with the above files. */
- &sensor_dev_attr_name.dev_attr.attr,
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp5_input.dev_attr.attr,
- &sensor_dev_attr_temp6_input.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- &sensor_dev_attr_temp3_label.dev_attr.attr,
- &sensor_dev_attr_temp4_label.dev_attr.attr,
- &sensor_dev_attr_temp5_label.dev_attr.attr,
- &sensor_dev_attr_temp6_label.dev_attr.attr,
NULL
};
+static struct attribute_group compal_platform_attr_group = {
+ .attrs = compal_platform_attrs
+};
-static struct attribute_group compal_attribute_group = {
- .attrs = compal_attributes
+static struct attribute *compal_hwmon_attrs[] = {
+ &dev_attr_pwm1_enable.attr,
+ &dev_attr_pwm1.attr,
+ &dev_attr_fan1_input.attr,
+ &dev_attr_temp1_input.attr,
+ &dev_attr_temp2_input.attr,
+ &dev_attr_temp3_input.attr,
+ &dev_attr_temp4_input.attr,
+ &dev_attr_temp5_input.attr,
+ &dev_attr_temp6_input.attr,
+ &dev_attr_temp1_label.attr,
+ &dev_attr_temp2_label.attr,
+ &dev_attr_temp3_label.attr,
+ &dev_attr_temp4_label.attr,
+ &dev_attr_temp5_label.attr,
+ &dev_attr_temp6_label.attr,
+ NULL
};
+ATTRIBUTE_GROUPS(compal_hwmon);
static int compal_probe(struct platform_device *);
static int compal_remove(struct platform_device *);
@@ -1021,30 +1011,28 @@ static int compal_probe(struct platform_device *pdev)
{
int err;
struct compal_data *data;
+ struct device *hwmon_dev;
if (!extra_features)
return 0;
/* Fan control */
- data = kzalloc(sizeof(struct compal_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct compal_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
initialize_fan_control_data(data);
- err = sysfs_create_group(&pdev->dev.kobj, &compal_attribute_group);
- if (err) {
- kfree(data);
+ err = sysfs_create_group(&pdev->dev.kobj, &compal_platform_attr_group);
+ if (err)
return err;
- }
- data->hwmon_dev = hwmon_device_register(&pdev->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- sysfs_remove_group(&pdev->dev.kobj,
- &compal_attribute_group);
- kfree(data);
- return err;
+ hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+ DRIVER_NAME, data,
+ compal_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto remove;
}
/* Power supply */
@@ -1054,6 +1042,10 @@ static int compal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
return 0;
+
+remove:
+ sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
+ return err;
}
static void __exit compal_cleanup(void)
@@ -1080,12 +1072,9 @@ static int compal_remove(struct platform_device *pdev)
pwm_disable_control();
data = platform_get_drvdata(pdev);
- hwmon_device_unregister(data->hwmon_dev);
power_supply_unregister(&data->psy);
- kfree(data);
-
- sysfs_remove_group(&pdev->dev.kobj, &compal_attribute_group);
+ sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
return 0;
}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index c608b1d33f4a..fed4111ac31a 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -559,19 +559,45 @@ static void dell_update_rfkill(struct work_struct *ignored)
}
static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
+static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+
+ if (str & 0x20)
+ return false;
+
+ if (unlikely(data == 0xe0)) {
+ extended = true;
+ return false;
+ } else if (unlikely(extended)) {
+ switch (data) {
+ case 0x8:
+ schedule_delayed_work(&dell_rfkill_work,
+ round_jiffies_relative(HZ / 4));
+ break;
+ }
+ extended = false;
+ }
+
+ return false;
+}
static int __init dell_setup_rfkill(void)
{
- int status;
- int ret;
+ int status, ret, whitelisted;
const char *product;
/*
- * rfkill causes trouble on various non Latitudes, according to Dell
- * actually testing the rfkill functionality is only done on Latitudes.
+ * rfkill support causes trouble on various models, mostly Inspirons.
+ * So we whitelist certain series, and don't support rfkill on others.
*/
+ whitelisted = 0;
product = dmi_get_system_info(DMI_PRODUCT_NAME);
- if (!force_rfkill && (!product || strncmp(product, "Latitude", 8)))
+ if (product && (strncmp(product, "Latitude", 8) == 0 ||
+ strncmp(product, "Precision", 9) == 0))
+ whitelisted = 1;
+ if (!force_rfkill && !whitelisted)
return 0;
get_buffer();
@@ -633,7 +659,16 @@ static int __init dell_setup_rfkill(void)
goto err_wwan;
}
+ ret = i8042_install_filter(dell_laptop_i8042_filter);
+ if (ret) {
+ pr_warn("Unable to install key filter\n");
+ goto err_filter;
+ }
+
return 0;
+err_filter:
+ if (wwan_rfkill)
+ rfkill_unregister(wwan_rfkill);
err_wwan:
rfkill_destroy(wwan_rfkill);
if (bluetooth_rfkill)
@@ -684,7 +719,7 @@ static int dell_send_intensity(struct backlight_device *bd)
out:
release_buffer();
- return 0;
+ return ret;
}
static int dell_get_intensity(struct backlight_device *bd)
@@ -755,30 +790,6 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
-{
- static bool extended;
-
- if (str & 0x20)
- return false;
-
- if (unlikely(data == 0xe0)) {
- extended = true;
- return false;
- } else if (unlikely(extended)) {
- switch (data) {
- case 0x8:
- schedule_delayed_work(&dell_rfkill_work,
- round_jiffies_relative(HZ / 4));
- break;
- }
- extended = false;
- }
-
- return false;
-}
-
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -828,12 +839,6 @@ static int __init dell_init(void)
goto fail_rfkill;
}
- ret = i8042_install_filter(dell_laptop_i8042_filter);
- if (ret) {
- pr_warn("Unable to install key filter\n");
- goto fail_filter;
- }
-
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
@@ -885,7 +890,6 @@ static int __init dell_init(void)
fail_backlight:
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
-fail_filter:
dell_cleanup_rfkill();
fail_rfkill:
free_page((unsigned long)bufferpage);
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c
index bcf8cc6b5537..dbc97a33bbc8 100644
--- a/drivers/platform/x86/dell-wmi-aio.c
+++ b/drivers/platform/x86/dell-wmi-aio.c
@@ -24,7 +24,6 @@
#include <linux/types.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-#include <acpi/acpi_drivers.h>
#include <linux/acpi.h>
#include <linux/string.h>
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 60e0900bc117..390e8e33d5e3 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -32,7 +32,6 @@
#include <linux/types.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-#include <acpi/acpi_drivers.h>
#include <linux/acpi.h>
#include <linux/string.h>
#include <linux/dmi.h>
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 7029cba7025b..399e8c562192 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -28,8 +28,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/slab.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#include <linux/uaccess.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
@@ -166,7 +165,6 @@ struct eeepc_laptop {
struct platform_device *platform_device;
struct acpi_device *device; /* the device we are in */
- struct device *hwmon_device;
struct backlight_device *backlight_device;
struct input_dev *inputdev;
@@ -1069,7 +1067,7 @@ static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
{ \
return store_sys_hwmon(_get, buf, count); \
} \
- static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
+ static DEVICE_ATTR(_name, _mode, show_##_name, store_##_name);
EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
@@ -1077,55 +1075,26 @@ EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
-static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "eeepc\n");
-}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
-
-static struct attribute *hwmon_attributes[] = {
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_name.dev_attr.attr,
+static struct attribute *hwmon_attrs[] = {
+ &dev_attr_pwm1.attr,
+ &dev_attr_fan1_input.attr,
+ &dev_attr_pwm1_enable.attr,
NULL
};
-
-static struct attribute_group hwmon_attribute_group = {
- .attrs = hwmon_attributes
-};
-
-static void eeepc_hwmon_exit(struct eeepc_laptop *eeepc)
-{
- struct device *hwmon;
-
- hwmon = eeepc->hwmon_device;
- if (!hwmon)
- return;
- sysfs_remove_group(&hwmon->kobj,
- &hwmon_attribute_group);
- hwmon_device_unregister(hwmon);
- eeepc->hwmon_device = NULL;
-}
+ATTRIBUTE_GROUPS(hwmon);
static int eeepc_hwmon_init(struct eeepc_laptop *eeepc)
{
+ struct device *dev = &eeepc->platform_device->dev;
struct device *hwmon;
- int result;
- hwmon = hwmon_device_register(&eeepc->platform_device->dev);
+ hwmon = devm_hwmon_device_register_with_groups(dev, "eeepc", NULL,
+ hwmon_groups);
if (IS_ERR(hwmon)) {
pr_err("Could not register eeepc hwmon device\n");
- eeepc->hwmon_device = NULL;
return PTR_ERR(hwmon);
}
- eeepc->hwmon_device = hwmon;
- result = sysfs_create_group(&hwmon->kobj,
- &hwmon_attribute_group);
- if (result)
- eeepc_hwmon_exit(eeepc);
- return result;
+ return 0;
}
/*
@@ -1481,7 +1450,6 @@ static int eeepc_acpi_add(struct acpi_device *device)
fail_rfkill:
eeepc_led_exit(eeepc);
fail_led:
- eeepc_hwmon_exit(eeepc);
fail_hwmon:
eeepc_input_exit(eeepc);
fail_input:
@@ -1501,7 +1469,6 @@ static int eeepc_acpi_remove(struct acpi_device *device)
eeepc_backlight_exit(eeepc);
eeepc_rfkill_exit(eeepc);
eeepc_input_exit(eeepc);
- eeepc_hwmon_exit(eeepc);
eeepc_led_exit(eeepc);
eeepc_platform_exit(eeepc);
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index af67e6e56ebb..6112933f6278 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -33,7 +33,7 @@
#include <linux/input/sparse-keymap.h>
#include <linux/dmi.h>
#include <linux/fb.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#include "asus-wmi.h"
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 9d30d69aa78f..e6f336270c21 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -66,7 +66,6 @@
#include <linux/backlight.h>
#include <linux/input.h>
#include <linux/kfifo.h>
-#include <linux/video_output.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
@@ -633,7 +632,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = {
static int acpi_fujitsu_add(struct acpi_device *device)
{
- int result = 0;
int state = 0;
struct input_dev *input;
int error;
@@ -669,8 +667,8 @@ static int acpi_fujitsu_add(struct acpi_device *device)
if (error)
goto err_free_input_dev;
- result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
- if (result) {
+ error = acpi_bus_update_power(fujitsu->acpi_handle, &state);
+ if (error) {
pr_err("Error reading power state\n");
goto err_unregister_input_dev;
}
@@ -700,7 +698,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
get_lcd_level();
- return result;
+ return 0;
err_unregister_input_dev:
input_unregister_device(input);
@@ -708,7 +706,7 @@ err_unregister_input_dev:
err_free_input_dev:
input_free_device(input);
err_stop:
- return result;
+ return error;
}
static int acpi_fujitsu_remove(struct acpi_device *device)
@@ -831,8 +829,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (error)
goto err_free_input_dev;
- result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
- if (result) {
+ error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
+ if (error) {
pr_err("Error reading power state\n");
goto err_unregister_input_dev;
}
@@ -907,7 +905,7 @@ err_free_input_dev:
err_free_fifo:
kfifo_free(&fujitsu_hotkey->fifo);
err_stop:
- return result;
+ return error;
}
static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
new file mode 100644
index 000000000000..415348fc1210
--- /dev/null
+++ b/drivers/platform/x86/hp-wireless.c
@@ -0,0 +1,132 @@
+/*
+ * hp-wireless button for Windows 8
+ *
+ * Copyright (C) 2014 Alex Hung <alex.hung@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Hung");
+MODULE_ALIAS("acpi*:HPQ6001:*");
+
+static struct input_dev *hpwl_input_dev;
+
+static const struct acpi_device_id hpwl_ids[] = {
+ {"HPQ6001", 0},
+ {"", 0},
+};
+
+static int hp_wireless_input_setup(void)
+{
+ int err;
+
+ hpwl_input_dev = input_allocate_device();
+ if (!hpwl_input_dev)
+ return -ENOMEM;
+
+ hpwl_input_dev->name = "HP Wireless hotkeys";
+ hpwl_input_dev->phys = "hpq6001/input0";
+ hpwl_input_dev->id.bustype = BUS_HOST;
+ hpwl_input_dev->evbit[0] = BIT(EV_KEY);
+ set_bit(KEY_RFKILL, hpwl_input_dev->keybit);
+
+ err = input_register_device(hpwl_input_dev);
+ if (err)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ input_free_device(hpwl_input_dev);
+ return err;
+}
+
+static void hp_wireless_input_destroy(void)
+{
+ input_unregister_device(hpwl_input_dev);
+}
+
+static void hpwl_notify(struct acpi_device *acpi_dev, u32 event)
+{
+ if (event != 0x80) {
+ pr_info("Received unknown event (0x%x)\n", event);
+ return;
+ }
+
+ input_report_key(hpwl_input_dev, KEY_RFKILL, 1);
+ input_sync(hpwl_input_dev);
+ input_report_key(hpwl_input_dev, KEY_RFKILL, 0);
+ input_sync(hpwl_input_dev);
+}
+
+static int hpwl_add(struct acpi_device *device)
+{
+ int err;
+
+ err = hp_wireless_input_setup();
+ return err;
+}
+
+static int hpwl_remove(struct acpi_device *device)
+{
+ hp_wireless_input_destroy();
+ return 0;
+}
+
+static struct acpi_driver hpwl_driver = {
+ .name = "hp-wireless",
+ .owner = THIS_MODULE,
+ .ids = hpwl_ids,
+ .ops = {
+ .add = hpwl_add,
+ .remove = hpwl_remove,
+ .notify = hpwl_notify,
+ },
+};
+
+static int __init hpwl_init(void)
+{
+ int err;
+
+ pr_info("Initializing HPQ6001 module\n");
+ err = acpi_bus_register_driver(&hpwl_driver);
+ if (err) {
+ pr_err("Unable to register HP wireless control driver.\n");
+ goto error_acpi_register;
+ }
+
+ return 0;
+
+error_acpi_register:
+ return err;
+}
+
+static void __exit hpwl_exit(void)
+{
+ pr_info("Exiting HPQ6001 module\n");
+ acpi_bus_unregister_driver(&hpwl_driver);
+}
+
+module_init(hpwl_init);
+module_exit(hpwl_exit);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index a8e43cf70fac..3dc934438c28 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -36,7 +36,7 @@
#include <linux/uaccess.h>
#include <linux/leds.h>
#include <linux/atomic.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include "../../misc/lis3lv02d/lis3lv02d.h"
#define DRIVER_NAME "hp_accel"
@@ -77,6 +77,7 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
static struct acpi_device_id lis3lv02d_device_ids[] = {
{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
{"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
+ {"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
@@ -88,7 +89,7 @@ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
*
* Returns 0 on success.
*/
-int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
+static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
{
struct acpi_device *dev = lis3->bus_priv;
if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI,
@@ -106,7 +107,7 @@ int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
*
* Returns 0 on success.
*/
-int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
+static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
{
struct acpi_device *dev = lis3->bus_priv;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
@@ -129,7 +130,7 @@ int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
*
* Returns 0 on success.
*/
-int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
+static int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
{
struct acpi_device *dev = lis3->bus_priv;
unsigned long long ret; /* Not used when writting */
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 19ec95147f69..6dd060a0bb65 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -26,8 +26,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/rfkill.h>
#include <linux/platform_device.h>
#include <linux/input.h>
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c
index a2083a9e5662..d45bca34bf1b 100644
--- a/drivers/platform/x86/intel-rst.c
+++ b/drivers/platform/x86/intel-rst.c
@@ -20,7 +20,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index 1838400dc036..04cf5dffdfd9 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -19,7 +19,7 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_baytrail.c b/drivers/platform/x86/intel_baytrail.c
new file mode 100644
index 000000000000..f96626b17260
--- /dev/null
+++ b/drivers/platform/x86/intel_baytrail.c
@@ -0,0 +1,224 @@
+/*
+ * Baytrail IOSF-SB MailBox Interface Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
+ * mailbox interface (MBI) to communicate with mutiple devices. This
+ * driver implements BayTrail-specific access to this interface.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+
+#include "intel_baytrail.h"
+
+static DEFINE_SPINLOCK(iosf_mbi_lock);
+
+static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
+{
+ return (op << 24) | (port << 16) | (offset << 8) | BT_MBI_ENABLE;
+}
+
+static struct pci_dev *mbi_pdev; /* one mbi device */
+
+/* Hold lock before calling */
+static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev,
+ BT_MBI_MCRX_OFFSET, mcrx);
+ if (result < 0)
+ goto iosf_mbi_read_err;
+ }
+
+ result = pci_write_config_dword(mbi_pdev,
+ BT_MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto iosf_mbi_read_err;
+
+ result = pci_read_config_dword(mbi_pdev,
+ BT_MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto iosf_mbi_read_err;
+
+ return 0;
+
+iosf_mbi_read_err:
+ dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
+ result);
+ return result;
+}
+
+/* Hold lock before calling */
+static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ result = pci_write_config_dword(mbi_pdev,
+ BT_MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto iosf_mbi_write_err;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev,
+ BT_MBI_MCRX_OFFSET, mcrx);
+ if (result < 0)
+ goto iosf_mbi_write_err;
+ }
+
+ result = pci_write_config_dword(mbi_pdev,
+ BT_MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto iosf_mbi_write_err;
+
+ return 0;
+
+iosf_mbi_write_err:
+ dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
+ result);
+ return result;
+}
+
+int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ BUG_ON(port == BT_MBI_UNIT_GFX);
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+ mcrx = offset & BT_MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(bt_mbi_read);
+
+int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ BUG_ON(port == BT_MBI_UNIT_GFX);
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+ mcrx = offset & BT_MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(bt_mbi_write);
+
+int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+ u32 mcr, mcrx;
+ u32 value;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ BUG_ON(port == BT_MBI_UNIT_GFX);
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+ mcrx = offset & BT_MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+
+ /* Read current mdr value */
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr & BT_MBI_RD_MASK, &value);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+ return ret;
+ }
+
+ /* Apply mask */
+ value &= ~mask;
+ mdr &= mask;
+ value |= mdr;
+
+ /* Write back */
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr | BT_MBI_WR_MASK, value);
+
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(bt_mbi_modify);
+
+static int iosf_mbi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *unused)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ mbi_pdev = pci_dev_get(pdev);
+ return 0;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
+
+static struct pci_driver iosf_mbi_pci_driver = {
+ .name = "iosf_mbi_pci",
+ .probe = iosf_mbi_probe,
+ .id_table = iosf_mbi_pci_ids,
+};
+
+static int __init bt_mbi_init(void)
+{
+ return pci_register_driver(&iosf_mbi_pci_driver);
+}
+
+static void __exit bt_mbi_exit(void)
+{
+ pci_unregister_driver(&iosf_mbi_pci_driver);
+ if (mbi_pdev) {
+ pci_dev_put(mbi_pdev);
+ mbi_pdev = NULL;
+ }
+}
+
+module_init(bt_mbi_init);
+module_exit(bt_mbi_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("BayTrail Mailbox Interface accessor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_baytrail.h b/drivers/platform/x86/intel_baytrail.h
new file mode 100644
index 000000000000..8bcc311262e9
--- /dev/null
+++ b/drivers/platform/x86/intel_baytrail.h
@@ -0,0 +1,90 @@
+/*
+ * intel_baytrail.h: MailBox access support for Intel BayTrail platforms
+ */
+
+#ifndef INTEL_BAYTRAIL_MBI_SYMS_H
+#define INTEL_BAYTRAIL_MBI_SYMS_H
+
+#define BT_MBI_MCR_OFFSET 0xD0
+#define BT_MBI_MDR_OFFSET 0xD4
+#define BT_MBI_MCRX_OFFSET 0xD8
+
+#define BT_MBI_RD_MASK 0xFEFFFFFF
+#define BT_MBI_WR_MASK 0X01000000
+
+#define BT_MBI_MASK_HI 0xFFFFFF00
+#define BT_MBI_MASK_LO 0x000000FF
+#define BT_MBI_ENABLE 0xF0
+
+/* BT-SB unit access methods */
+#define BT_MBI_UNIT_AUNIT 0x00
+#define BT_MBI_UNIT_SMC 0x01
+#define BT_MBI_UNIT_CPU 0x02
+#define BT_MBI_UNIT_BUNIT 0x03
+#define BT_MBI_UNIT_PMC 0x04
+#define BT_MBI_UNIT_GFX 0x06
+#define BT_MBI_UNIT_SMI 0x0C
+#define BT_MBI_UNIT_USB 0x43
+#define BT_MBI_UNIT_SATA 0xA3
+#define BT_MBI_UNIT_PCIE 0xA6
+
+/* Read/write opcodes */
+#define BT_MBI_AUNIT_READ 0x10
+#define BT_MBI_AUNIT_WRITE 0x11
+#define BT_MBI_SMC_READ 0x10
+#define BT_MBI_SMC_WRITE 0x11
+#define BT_MBI_CPU_READ 0x10
+#define BT_MBI_CPU_WRITE 0x11
+#define BT_MBI_BUNIT_READ 0x10
+#define BT_MBI_BUNIT_WRITE 0x11
+#define BT_MBI_PMC_READ 0x06
+#define BT_MBI_PMC_WRITE 0x07
+#define BT_MBI_GFX_READ 0x00
+#define BT_MBI_GFX_WRITE 0x01
+#define BT_MBI_SMIO_READ 0x06
+#define BT_MBI_SMIO_WRITE 0x07
+#define BT_MBI_USB_READ 0x06
+#define BT_MBI_USB_WRITE 0x07
+#define BT_MBI_SATA_READ 0x00
+#define BT_MBI_SATA_WRITE 0x01
+#define BT_MBI_PCIE_READ 0x00
+#define BT_MBI_PCIE_WRITE 0x01
+
+/**
+ * bt_mbi_read() - MailBox Interface read command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be read
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr);
+
+/**
+ * bt_mbi_write() - MailBox unmasked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be written
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
+
+/**
+ * bt_mbi_modify() - MailBox masked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data being modified
+ * @mask: mask indicating bits in mdr to be modified
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
+
+#endif /* INTEL_BAYTRAIL_MBI_SYMS_H */
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 11244f8703c4..e8b46d2c468c 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -36,10 +36,8 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/pm.h>
-
#include <linux/thermal.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
MODULE_AUTHOR("Thomas Sujith");
MODULE_AUTHOR("Zhang Rui");
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index f6f18cde0f11..4bc960416785 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -50,9 +50,6 @@
#include <linux/platform_device.h>
#include <linux/dmi.h>
#include <linux/rfkill.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
#define DRIVER_NAME "intel_oaktrail"
#define DRIVER_VERSION "0.4ac1"
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 60ea476a9130..76ca094ed012 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -62,12 +62,10 @@
#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
#define IPC_IOC 0x100 /* IPC command register IOC bit */
-enum {
- SCU_IPC_LINCROFT,
- SCU_IPC_PENWELL,
- SCU_IPC_CLOVERVIEW,
- SCU_IPC_TANGIER,
-};
+#define PCI_DEVICE_ID_LINCROFT 0x082a
+#define PCI_DEVICE_ID_PENWELL 0x080e
+#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea
+#define PCI_DEVICE_ID_TANGIER 0x11a0
/* intel scu ipc driver data*/
struct intel_scu_ipc_pdata_t {
@@ -78,35 +76,29 @@ struct intel_scu_ipc_pdata_t {
u8 irq_mode;
};
-static struct intel_scu_ipc_pdata_t intel_scu_ipc_pdata[] = {
- [SCU_IPC_LINCROFT] = {
- .ipc_base = 0xff11c000,
- .i2c_base = 0xff12b000,
- .ipc_len = 0x100,
- .i2c_len = 0x10,
- .irq_mode = 0,
- },
- [SCU_IPC_PENWELL] = {
- .ipc_base = 0xff11c000,
- .i2c_base = 0xff12b000,
- .ipc_len = 0x100,
- .i2c_len = 0x10,
- .irq_mode = 1,
- },
- [SCU_IPC_CLOVERVIEW] = {
- .ipc_base = 0xff11c000,
- .i2c_base = 0xff12b000,
- .ipc_len = 0x100,
- .i2c_len = 0x10,
- .irq_mode = 1,
- },
- [SCU_IPC_TANGIER] = {
- .ipc_base = 0xff009000,
- .i2c_base = 0xff00d000,
- .ipc_len = 0x100,
- .i2c_len = 0x10,
- .irq_mode = 0,
- },
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
+ .ipc_base = 0xff11c000,
+ .i2c_base = 0xff12b000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ .irq_mode = 0,
+};
+
+/* Penwell and Cloverview */
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
+ .ipc_base = 0xff11c000,
+ .i2c_base = 0xff12b000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ .irq_mode = 1,
+};
+
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
+ .ipc_base = 0xff009000,
+ .i2c_base = 0xff00d000,
+ .ipc_len = 0x100,
+ .i2c_len = 0x10,
+ .irq_mode = 0,
};
static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
@@ -583,15 +575,14 @@ static irqreturn_t ioc(int irq, void *dev_id)
*/
static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int err, pid;
+ int err;
struct intel_scu_ipc_pdata_t *pdata;
resource_size_t pci_resource;
if (ipcdev.pdev) /* We support only one SCU */
return -EBUSY;
- pid = id->driver_data;
- pdata = &intel_scu_ipc_pdata[pid];
+ pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
ipcdev.pdev = pci_dev_get(dev);
ipcdev.irq_mode = pdata->irq_mode;
@@ -650,11 +641,21 @@ static void ipc_remove(struct pci_dev *pdev)
}
static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
- {PCI_VDEVICE(INTEL, 0x082a), SCU_IPC_LINCROFT},
- {PCI_VDEVICE(INTEL, 0x080e), SCU_IPC_PENWELL},
- {PCI_VDEVICE(INTEL, 0x08ea), SCU_IPC_CLOVERVIEW},
- {PCI_VDEVICE(INTEL, 0x11a0), SCU_IPC_TANGIER},
- { 0,}
+ {
+ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_LINCROFT),
+ (kernel_ulong_t)&intel_scu_ipc_lincroft_pdata,
+ }, {
+ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL),
+ (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
+ }, {
+ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CLOVERVIEW),
+ (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
+ }, {
+ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER),
+ (kernel_ulong_t)&intel_scu_ipc_tangier_pdata,
+ }, {
+ 0,
+ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c
index 0aea63b3729a..f4bad83053a9 100644
--- a/drivers/platform/x86/mxm-wmi.c
+++ b/drivers/platform/x86/mxm-wmi.c
@@ -20,8 +20,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/mxm-wmi.h>
+#include <linux/acpi.h>
MODULE_AUTHOR("Dave Airlie");
MODULE_DESCRIPTION("MXM WMI Driver");
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 3008fd20572e..609d38779b26 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -125,12 +125,10 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-
#ifndef ACPI_HOTKEY_COMPONENT
#define ACPI_HOTKEY_COMPONENT 0x10000000
#endif
diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c
index 47ae0c47d4b5..c9f6e511daa6 100644
--- a/drivers/platform/x86/pvpanic.c
+++ b/drivers/platform/x86/pvpanic.c
@@ -24,8 +24,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
MODULE_DESCRIPTION("pvpanic device driver");
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
index cae7098e9b0d..5413f62d2e61 100644
--- a/drivers/platform/x86/samsung-q10.c
+++ b/drivers/platform/x86/samsung-q10.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/backlight.h>
#include <linux/dmi.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#define SAMSUNGQ10_BL_MAX_INTENSITY 7
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index fb233ae7bb0e..8f8551a63cc0 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -61,9 +61,6 @@
#include <linux/workqueue.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
-#include <asm/uaccess.h>
#include <linux/sonypi.h>
#include <linux/sony-laptop.h>
#include <linux/rfkill.h>
@@ -71,6 +68,7 @@
#include <linux/poll.h>
#include <linux/miscdevice.h>
#endif
+#include <asm/uaccess.h>
#define dprintk(fmt, ...) \
do { \
@@ -791,7 +789,7 @@ static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
void *buffer, size_t buflen)
{
int ret = 0;
- size_t len = len;
+ size_t len;
union acpi_object *object = __call_snc_method(handle, name, value);
if (!object)
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 9b93fdb61ed7..6a6ea28a7e51 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -32,9 +32,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/platform_device.h>
#define GUID "C364AC71-36DB-495A-8494-B439D472A505"
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 58b0274d24cc..94bb6157c957 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -61,7 +61,6 @@
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/slab.h>
-
#include <linux/nvram.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -74,21 +73,16 @@
#include <linux/input.h>
#include <linux/leds.h>
#include <linux/rfkill.h>
-#include <asm/uaccess.h>
-
#include <linux/dmi.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
-
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
+#include <linux/thinkpad_acpi.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/initval.h>
-
-#include <acpi/acpi_drivers.h>
-
-#include <linux/pci_ids.h>
-
-#include <linux/thinkpad_acpi.h>
+#include <asm/uaccess.h>
/* ThinkPad CMOS commands */
#define TP_CMOS_VOLUME_DOWN 0
@@ -6782,8 +6776,9 @@ static int __init volume_create_alsa_mixer(void)
struct snd_kcontrol *ctl_mute;
int rc;
- rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE,
- sizeof(struct tpacpi_alsa_data), &card);
+ rc = snd_card_new(&tpacpi_pdev->dev,
+ alsa_index, alsa_id, THIS_MODULE,
+ sizeof(struct tpacpi_alsa_data), &card);
if (rc < 0 || !card) {
pr_err("Failed to create ALSA card structures: %d\n", rc);
return 1;
@@ -6834,7 +6829,6 @@ static int __init volume_create_alsa_mixer(void)
}
data->ctl_mute_id = &ctl_mute->id;
- snd_card_set_dev(card, &tpacpi_pdev->dev);
rc = snd_card_register(card);
if (rc < 0) {
pr_err("Failed to register ALSA card: %d\n", rc);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 7fce391818d3..90dd7645a9e5 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -54,11 +54,9 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/i8042.h>
-
+#include <linux/acpi.h>
#include <asm/uaccess.h>
-#include <acpi/acpi_drivers.h>
-
MODULE_AUTHOR("John Belmonte");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
MODULE_LICENSE("GPL");
@@ -151,6 +149,7 @@ static const struct acpi_device_id toshiba_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, toshiba_device_ids);
static const struct key_entry toshiba_acpi_keymap[] = {
+ { KE_KEY, 0x9e, { KEY_RFKILL } },
{ KE_KEY, 0x101, { KEY_MUTE } },
{ KE_KEY, 0x102, { KEY_ZOOMOUT } },
{ KE_KEY, 0x103, { KEY_ZOOMIN } },
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 74dd01ae343b..2cb1ea62b4a7 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -23,14 +23,12 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Bluetooth Enable Driver");
MODULE_LICENSE("GPL");
-
static int toshiba_bt_rfkill_add(struct acpi_device *device);
static int toshiba_bt_rfkill_remove(struct acpi_device *device);
static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index c2e7b2657aeb..43d13295e63d 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -37,8 +37,6 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
ACPI_MODULE_NAME("wmi");
MODULE_AUTHOR("Carlos Corbacho");
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 4b1377bd5944..49cbccec6e2d 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -18,8 +18,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/input.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#define MODULE_NAME "xo15-ebook"
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index bc00693d0c79..874c236ac1a7 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -239,6 +239,7 @@ int pnp_add_card(struct pnp_card *card)
error = device_register(&card->dev);
if (error) {
dev_err(&card->dev, "could not register (err=%d)\n", error);
+ put_device(&card->dev);
return error;
}
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 14655a0f0431..9f611cbbc294 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -24,7 +24,6 @@
#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
-#include <acpi/acpi_bus.h>
#include "../base.h"
#include "pnpacpi.h"
@@ -242,6 +241,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
struct pnp_dev *dev;
char *pnpid;
struct acpi_hardware_id *id;
+ int error;
/* Skip devices that are already bound */
if (device->physical_node_count)
@@ -300,10 +300,16 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
/* clear out the damaged flags */
if (!dev->active)
pnp_init_resources(dev);
- pnp_add_device(dev);
+
+ error = pnp_add_device(dev);
+ if (error) {
+ put_device(&dev->dev);
+ return error;
+ }
+
num++;
- return AE_OK;
+ return 0;
}
static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
@@ -329,20 +335,15 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
&& compare_pnp_id(pnp->id, acpi_device_hid(acpi));
}
-static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
+static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
{
- struct device *adev;
- struct acpi_device *acpi;
-
- adev = bus_find_device(&acpi_bus_type, NULL,
- to_pnp_dev(dev), acpi_pnp_match);
- if (!adev)
- return -ENODEV;
+ dev = bus_find_device(&acpi_bus_type, NULL, to_pnp_dev(dev),
+ acpi_pnp_match);
+ if (!dev)
+ return NULL;
- acpi = to_acpi_device(adev);
- *handle = acpi->handle;
- put_device(adev);
- return 0;
+ put_device(dev);
+ return to_acpi_device(dev);
}
/* complete initialization of a PNPACPI device includes having
@@ -356,7 +357,7 @@ static bool acpi_pnp_bus_match(struct device *dev)
static struct acpi_bus_type __initdata acpi_pnp_bus = {
.name = "PNP",
.match = acpi_pnp_bus_match,
- .find_device = acpi_pnp_find_device,
+ .find_companion = acpi_pnp_find_companion,
};
int pnpacpi_disabled __initdata;
diff --git a/drivers/pnp/pnpacpi/pnpacpi.h b/drivers/pnp/pnpacpi/pnpacpi.h
index 3e60225b0227..051ef9699777 100644
--- a/drivers/pnp/pnpacpi/pnpacpi.h
+++ b/drivers/pnp/pnpacpi/pnpacpi.h
@@ -1,7 +1,6 @@
#ifndef ACPI_PNP_H
#define ACPI_PNP_H
-#include <acpi/acpi_bus.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 167f3d00c916..66977ebf13b3 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
struct resource r = {0};
int i, flags;
- if (acpi_dev_resource_memory(res, &r)
- || acpi_dev_resource_io(res, &r)
- || acpi_dev_resource_address_space(res, &r)
+ if (acpi_dev_resource_address_space(res, &r)
|| acpi_dev_resource_ext_address_space(res, &r)) {
pnp_add_resource(dev, &r);
return AE_OK;
@@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
}
switch (res->type) {
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ if (acpi_dev_resource_memory(res, &r))
+ pnp_add_resource(dev, &r);
+ break;
+ case ACPI_RESOURCE_TYPE_IO:
+ case ACPI_RESOURCE_TYPE_FIXED_IO:
+ if (acpi_dev_resource_io(res, &r))
+ pnp_add_resource(dev, &r);
+ break;
case ACPI_RESOURCE_TYPE_DMA:
dma = &res->data.dma;
if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index 769d265b221b..deb7f4bcdb7b 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -21,7 +21,7 @@
#include "pnpbios.h"
-static struct {
+__visible struct {
u16 offset;
u16 segment;
} pnp_bios_callpoint;
@@ -41,6 +41,7 @@ asmlinkage void pnp_bios_callfunc(void);
__asm__(".text \n"
__ALIGN_STR "\n"
+ ".globl pnp_bios_callfunc\n"
"pnp_bios_callfunc:\n"
" pushl %edx \n"
" pushl %ecx \n"
@@ -66,9 +67,9 @@ static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
* after PnP BIOS oopses.
*/
-u32 pnp_bios_fault_esp;
-u32 pnp_bios_fault_eip;
-u32 pnp_bios_is_utter_crap = 0;
+__visible u32 pnp_bios_fault_esp;
+__visible u32 pnp_bios_fault_eip;
+__visible u32 pnp_bios_is_utter_crap = 0;
static spinlock_t pnp_bios_lock;
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 9b86a01af631..074569e77d22 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -312,18 +312,19 @@ static int __init insert_device(struct pnp_bios_node *node)
struct list_head *pos;
struct pnp_dev *dev;
char id[8];
+ int error;
/* check if the device is already added */
list_for_each(pos, &pnpbios_protocol.devices) {
dev = list_entry(pos, struct pnp_dev, protocol_list);
if (dev->number == node->handle)
- return -1;
+ return -EEXIST;
}
pnp_eisa_id_to_string(node->eisa_id & PNP_EISA_ID_MASK, id);
dev = pnp_alloc_dev(&pnpbios_protocol, node->handle, id);
if (!dev)
- return -1;
+ return -ENOMEM;
pnpbios_parse_data_stream(dev, node);
dev->active = pnp_is_active(dev);
@@ -342,7 +343,12 @@ static int __init insert_device(struct pnp_bios_node *node)
if (!dev->active)
pnp_init_resources(dev);
- pnp_add_device(dev);
+ error = pnp_add_device(dev);
+ if (error) {
+ put_device(&dev->dev);
+ return error;
+ }
+
pnpbios_interface_attach_device(node);
return 0;
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index d95e101ffb43..01712cbfd92e 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -31,7 +31,7 @@ static int pnp_reserve_mem[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some
* option registration
*/
-struct pnp_option *pnp_build_option(struct pnp_dev *dev, unsigned long type,
+static struct pnp_option *pnp_build_option(struct pnp_dev *dev, unsigned long type,
unsigned int option_flags)
{
struct pnp_option *option;
@@ -385,7 +385,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
* device is active because it itself may be in use */
if (!dev->active) {
if (request_irq(*irq, pnp_test_handler,
- IRQF_DISABLED | IRQF_PROBE_SHARED, "pnp", NULL))
+ IRQF_PROBE_SHARED, "pnp", NULL))
return 0;
free_irq(*irq, NULL);
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 0196acf05231..ba6975123071 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -384,6 +384,7 @@ config AB8500_BM
config BATTERY_GOLDFISH
tristate "Goldfish battery driver"
depends on GOLDFISH || COMPILE_TEST
+ depends on HAS_IOMEM
help
Say Y to enable support for the battery and AC power in the
Goldfish emulator.
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 563174891c90..041f9b638d28 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -192,7 +192,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV)
/*
* Voltage is measured in units of 1.22mV. The voltage is stored as
- * a 10-bit number plus sign, in the upper bits of a 16-bit register
+ * a 12-bit number plus sign, in the upper bits of a 16-bit register
*/
err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 80edb7d8cb54..0b4cf9d63291 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -444,8 +444,6 @@ static int isp1704_charger_probe(struct platform_device *pdev)
ret = PTR_ERR(isp->phy);
goto fail0;
}
- if (!isp->phy)
- goto fail0;
isp->dev = &pdev->dev;
platform_set_drvdata(pdev, isp);
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index c7ff6d67f158..0fbac861080d 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (chip->pdata->battery_online)
+ if (chip->pdata && chip->pdata->battery_online)
chip->online = chip->pdata->battery_online();
else
chip->online = 1;
@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
+ if (!chip->pdata || !chip->pdata->charger_online
+ || !chip->pdata->charger_enable) {
chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
return;
}
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 3c6768378a94..61b51e17d932 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -834,7 +834,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
}
static const struct x86_cpu_id energy_unit_quirk_ids[] = {
- { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
+ { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */
{}
};
@@ -947,11 +947,11 @@ static void package_power_limit_irq_restore(int package_id)
}
static const struct x86_cpu_id rapl_ids[] = {
- { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
- { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
- { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
- { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
- { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
+ { X86_VENDOR_INTEL, 6, 0x2a},/* Sandy Bridge */
+ { X86_VENDOR_INTEL, 6, 0x2d},/* Sandy Bridge EP */
+ { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */
+ { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */
+ { X86_VENDOR_INTEL, 6, 0x45},/* Haswell */
/* TODO: Add more CPU IDs after testing */
{}
};
@@ -1147,6 +1147,11 @@ static int rapl_check_domain(int cpu, int domain)
if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
return -ENODEV;
+ /* PP1/uncore/graphics domain may not be active at the time of
+ * driver loading. So skip further checks.
+ */
+ if (domain == RAPL_DOMAIN_PP1)
+ return 0;
/* energy counters roll slowly on some domains */
while (++retry < 10) {
usleep_range(10000, 15000);
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index fb7300837fee..bc1e5139ba29 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -699,8 +699,6 @@ int ps3_vuart_read_async(struct ps3_system_bus_device *dev, unsigned int bytes)
BUG_ON(!bytes);
- PREPARE_WORK(&priv->rx_list.work.work, ps3_vuart_work);
-
spin_lock_irqsave(&priv->rx_list.lock, flags);
if (priv->rx_list.bytes_held >= bytes) {
dev_dbg(&dev->core, "%s:%d: schedule_work %xh bytes\n",
@@ -1052,7 +1050,7 @@ static int ps3_vuart_probe(struct ps3_system_bus_device *dev)
INIT_LIST_HEAD(&priv->rx_list.head);
spin_lock_init(&priv->rx_list.lock);
- INIT_WORK(&priv->rx_list.work.work, NULL);
+ INIT_WORK(&priv->rx_list.work.work, ps3_vuart_work);
priv->rx_list.work.trigger = 0;
priv->rx_list.work.dev = dev;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 5be73ba0519a..5a7910e61e17 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -73,6 +73,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86 || COMPILE_TEST
+ depends on HAS_IOMEM
select PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 7acab93d5a47..22f2f2857b82 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -41,6 +41,15 @@ config PWM_AB8500
To compile this driver as a module, choose M here: the module
will be called pwm-ab8500.
+config PWM_ATMEL
+ tristate "Atmel PWM support"
+ depends on ARCH_AT91
+ help
+ Generic PWM framework driver for Atmel SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-atmel.
+
config PWM_ATMEL_TCB
tristate "Atmel TC Block PWM support"
depends on ATMEL_TCLIB && OF
@@ -122,7 +131,8 @@ config PWM_MXS
config PWM_PCA9685
tristate "NXP PCA9685 PWM driver"
- depends on OF && REGMAP_I2C
+ depends on OF && I2C
+ select REGMAP_I2C
help
Generic PWM framework driver for NXP PCA9685 LED controller.
@@ -149,7 +159,7 @@ config PWM_PXA
config PWM_RENESAS_TPU
tristate "Renesas TPU PWM support"
- depends on ARCH_SHMOBILE
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
This driver exposes the Timer Pulse Unit (TPU) PWM controller found
in Renesas chips through the PWM API.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 4abf337dcd02..d8906ec69976 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_PWM) += core.o
obj-$(CONFIG_PWM_SYSFS) += sysfs.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
+obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 2ca95042a0b9..a80471399c20 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -808,12 +808,12 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label);
if (test_bit(PWMF_REQUESTED, &pwm->flags))
- seq_printf(s, " requested");
+ seq_puts(s, " requested");
if (test_bit(PWMF_ENABLED, &pwm->flags))
- seq_printf(s, " enabled");
+ seq_puts(s, " enabled");
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
}
}
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
new file mode 100644
index 000000000000..bf4144a14661
--- /dev/null
+++ b/drivers/pwm/pwm-atmel.c
@@ -0,0 +1,395 @@
+/*
+ * Driver for Atmel Pulse Width Modulation Controller
+ *
+ * Copyright (C) 2013 Atmel Corporation
+ * Bo Shen <voice.shen@atmel.com>
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+/* The following is global registers for PWM controller */
+#define PWM_ENA 0x04
+#define PWM_DIS 0x08
+#define PWM_SR 0x0C
+/* Bit field in SR */
+#define PWM_SR_ALL_CH_ON 0x0F
+
+/* The following register is PWM channel related registers */
+#define PWM_CH_REG_OFFSET 0x200
+#define PWM_CH_REG_SIZE 0x20
+
+#define PWM_CMR 0x0
+/* Bit field in CMR */
+#define PWM_CMR_CPOL (1 << 9)
+#define PWM_CMR_UPD_CDTY (1 << 10)
+
+/* The following registers for PWM v1 */
+#define PWMV1_CDTY 0x04
+#define PWMV1_CPRD 0x08
+#define PWMV1_CUPD 0x10
+
+/* The following registers for PWM v2 */
+#define PWMV2_CDTY 0x04
+#define PWMV2_CDTYUPD 0x08
+#define PWMV2_CPRD 0x0C
+#define PWMV2_CPRDUPD 0x10
+
+/*
+ * Max value for duty and period
+ *
+ * Although the duty and period register is 32 bit,
+ * however only the LSB 16 bits are significant.
+ */
+#define PWM_MAX_DTY 0xFFFF
+#define PWM_MAX_PRD 0xFFFF
+#define PRD_MAX_PRES 10
+
+struct atmel_pwm_chip {
+ struct pwm_chip chip;
+ struct clk *clk;
+ void __iomem *base;
+
+ void (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
+ unsigned long dty, unsigned long prd);
+};
+
+static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct atmel_pwm_chip, chip);
+}
+
+static inline u32 atmel_pwm_readl(struct atmel_pwm_chip *chip,
+ unsigned long offset)
+{
+ return readl_relaxed(chip->base + offset);
+}
+
+static inline void atmel_pwm_writel(struct atmel_pwm_chip *chip,
+ unsigned long offset, unsigned long val)
+{
+ writel_relaxed(val, chip->base + offset);
+}
+
+static inline u32 atmel_pwm_ch_readl(struct atmel_pwm_chip *chip,
+ unsigned int ch, unsigned long offset)
+{
+ unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE;
+
+ return readl_relaxed(chip->base + base + offset);
+}
+
+static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
+ unsigned int ch, unsigned long offset,
+ unsigned long val)
+{
+ unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE;
+
+ writel_relaxed(val, chip->base + base + offset);
+}
+
+static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ unsigned long clk_rate, prd, dty;
+ unsigned long long div;
+ unsigned int pres = 0;
+ int ret;
+
+ if (test_bit(PWMF_ENABLED, &pwm->flags) && (period_ns != pwm->period)) {
+ dev_err(chip->dev, "cannot change PWM period while enabled\n");
+ return -EBUSY;
+ }
+
+ clk_rate = clk_get_rate(atmel_pwm->clk);
+ div = clk_rate;
+
+ /* Calculate the period cycles */
+ while (div > PWM_MAX_PRD) {
+ div = clk_rate / (1 << pres);
+ div = div * period_ns;
+ /* 1/Hz = 100000000 ns */
+ do_div(div, 1000000000);
+
+ if (pres++ > PRD_MAX_PRES) {
+ dev_err(chip->dev, "pres exceeds the maximum value\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Calculate the duty cycles */
+ prd = div;
+ div *= duty_ns;
+ do_div(div, period_ns);
+ dty = div;
+
+ ret = clk_enable(atmel_pwm->clk);
+ if (ret) {
+ dev_err(chip->dev, "failed to enable PWM clock\n");
+ return ret;
+ }
+
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, pres);
+ atmel_pwm->config(chip, pwm, dty, prd);
+
+ clk_disable(atmel_pwm->clk);
+ return ret;
+}
+
+static void atmel_pwm_config_v1(struct pwm_chip *chip, struct pwm_device *pwm,
+ unsigned long dty, unsigned long prd)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ unsigned int val;
+
+ if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ /*
+ * If the PWM channel is enabled, using the update register,
+ * it needs to set bit 10 of CMR to 0
+ */
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CUPD, dty);
+
+ val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+ val &= ~PWM_CMR_UPD_CDTY;
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
+ } else {
+ /*
+ * If the PWM channel is disabled, write value to duty and
+ * period registers directly.
+ */
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CDTY, dty);
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CPRD, prd);
+ }
+}
+
+static void atmel_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm,
+ unsigned long dty, unsigned long prd)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+
+ if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ /*
+ * If the PWM channel is enabled, using the duty update register
+ * to update the value.
+ */
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CDTYUPD, dty);
+ } else {
+ /*
+ * If the PWM channel is disabled, write value to duty and
+ * period registers directly.
+ */
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CDTY, dty);
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CPRD, prd);
+ }
+}
+
+static int atmel_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ u32 val;
+ int ret;
+
+ val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+
+ if (polarity == PWM_POLARITY_NORMAL)
+ val &= ~PWM_CMR_CPOL;
+ else
+ val |= PWM_CMR_CPOL;
+
+ ret = clk_enable(atmel_pwm->clk);
+ if (ret) {
+ dev_err(chip->dev, "failed to enable PWM clock\n");
+ return ret;
+ }
+
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
+
+ clk_disable(atmel_pwm->clk);
+
+ return 0;
+}
+
+static int atmel_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ int ret;
+
+ ret = clk_enable(atmel_pwm->clk);
+ if (ret) {
+ dev_err(chip->dev, "failed to enable PWM clock\n");
+ return ret;
+ }
+
+ atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm);
+
+ return 0;
+}
+
+static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+
+ atmel_pwm_writel(atmel_pwm, PWM_DIS, 1 << pwm->hwpwm);
+
+ clk_disable(atmel_pwm->clk);
+}
+
+static const struct pwm_ops atmel_pwm_ops = {
+ .config = atmel_pwm_config,
+ .set_polarity = atmel_pwm_set_polarity,
+ .enable = atmel_pwm_enable,
+ .disable = atmel_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+struct atmel_pwm_data {
+ void (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
+ unsigned long dty, unsigned long prd);
+};
+
+static const struct atmel_pwm_data atmel_pwm_data_v1 = {
+ .config = atmel_pwm_config_v1,
+};
+
+static const struct atmel_pwm_data atmel_pwm_data_v2 = {
+ .config = atmel_pwm_config_v2,
+};
+
+static const struct platform_device_id atmel_pwm_devtypes[] = {
+ {
+ .name = "at91sam9rl-pwm",
+ .driver_data = (kernel_ulong_t)&atmel_pwm_data_v1,
+ }, {
+ .name = "sama5d3-pwm",
+ .driver_data = (kernel_ulong_t)&atmel_pwm_data_v2,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(platform, atmel_pwm_devtypes);
+
+static const struct of_device_id atmel_pwm_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9rl-pwm",
+ .data = &atmel_pwm_data_v1,
+ }, {
+ .compatible = "atmel,sama5d3-pwm",
+ .data = &atmel_pwm_data_v2,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
+
+static inline const struct atmel_pwm_data *
+atmel_pwm_get_driver_data(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_device(atmel_pwm_dt_ids, &pdev->dev);
+ if (!match)
+ return NULL;
+
+ return match->data;
+ } else {
+ const struct platform_device_id *id;
+
+ id = platform_get_device_id(pdev);
+
+ return (struct atmel_pwm_data *)id->driver_data;
+ }
+}
+
+static int atmel_pwm_probe(struct platform_device *pdev)
+{
+ const struct atmel_pwm_data *data;
+ struct atmel_pwm_chip *atmel_pwm;
+ struct resource *res;
+ int ret;
+
+ data = atmel_pwm_get_driver_data(pdev);
+ if (!data)
+ return -ENODEV;
+
+ atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL);
+ if (!atmel_pwm)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ atmel_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(atmel_pwm->base))
+ return PTR_ERR(atmel_pwm->base);
+
+ atmel_pwm->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(atmel_pwm->clk))
+ return PTR_ERR(atmel_pwm->clk);
+
+ ret = clk_prepare(atmel_pwm->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare PWM clock\n");
+ return ret;
+ }
+
+ atmel_pwm->chip.dev = &pdev->dev;
+ atmel_pwm->chip.ops = &atmel_pwm_ops;
+
+ if (pdev->dev.of_node) {
+ atmel_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
+ atmel_pwm->chip.of_pwm_n_cells = 3;
+ }
+
+ atmel_pwm->chip.base = -1;
+ atmel_pwm->chip.npwm = 4;
+ atmel_pwm->config = data->config;
+
+ ret = pwmchip_add(&atmel_pwm->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add PWM chip %d\n", ret);
+ goto unprepare_clk;
+ }
+
+ platform_set_drvdata(pdev, atmel_pwm);
+
+ return ret;
+
+unprepare_clk:
+ clk_unprepare(atmel_pwm->clk);
+ return ret;
+}
+
+static int atmel_pwm_remove(struct platform_device *pdev)
+{
+ struct atmel_pwm_chip *atmel_pwm = platform_get_drvdata(pdev);
+
+ clk_unprepare(atmel_pwm->clk);
+
+ return pwmchip_remove(&atmel_pwm->chip);
+}
+
+static struct platform_driver atmel_pwm_driver = {
+ .driver = {
+ .name = "atmel-pwm",
+ .of_match_table = of_match_ptr(atmel_pwm_dt_ids),
+ },
+ .id_table = atmel_pwm_devtypes,
+ .probe = atmel_pwm_probe,
+ .remove = atmel_pwm_remove,
+};
+module_platform_driver(atmel_pwm_driver);
+
+MODULE_ALIAS("platform:atmel-pwm");
+MODULE_AUTHOR("Bo Shen <voice.shen@atmel.com>");
+MODULE_DESCRIPTION("Atmel PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index 33aa4461e1ce..e593e9c45c51 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -224,7 +224,7 @@ static struct platform_driver ep93xx_pwm_driver = {
module_platform_driver(ep93xx_pwm_driver);
MODULE_DESCRIPTION("Cirrus Logic EP93xx PWM driver");
-MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, "
- "H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>");
+MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
MODULE_ALIAS("platform:ep93xx-pwm");
MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 0a2ede3c3932..9c46209e1d02 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -165,13 +165,12 @@ static const struct pwm_ops jz4740_pwm_ops = {
static int jz4740_pwm_probe(struct platform_device *pdev)
{
struct jz4740_pwm_chip *jz4740;
- int ret;
jz4740 = devm_kzalloc(&pdev->dev, sizeof(*jz4740), GFP_KERNEL);
if (!jz4740)
return -ENOMEM;
- jz4740->clk = clk_get(NULL, "ext");
+ jz4740->clk = devm_clk_get(&pdev->dev, "ext");
if (IS_ERR(jz4740->clk))
return PTR_ERR(jz4740->clk);
@@ -180,29 +179,16 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
jz4740->chip.npwm = NUM_PWM;
jz4740->chip.base = -1;
- ret = pwmchip_add(&jz4740->chip);
- if (ret < 0) {
- clk_put(jz4740->clk);
- return ret;
- }
-
platform_set_drvdata(pdev, jz4740);
- return 0;
+ return pwmchip_add(&jz4740->chip);
}
static int jz4740_pwm_remove(struct platform_device *pdev)
{
struct jz4740_pwm_chip *jz4740 = platform_get_drvdata(pdev);
- int ret;
-
- ret = pwmchip_remove(&jz4740->chip);
- if (ret < 0)
- return ret;
- clk_put(jz4740->clk);
-
- return 0;
+ return pwmchip_remove(&jz4740->chip);
}
static struct platform_driver jz4740_pwm_driver = {
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 8a843a04c224..a40b9c34e9ff 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -52,8 +52,10 @@ lp3943_pwm_request_map(struct lp3943_pwm *lp3943_pwm, int hwpwm)
offset = pwm_map->output[i];
/* Return an error if the pin is already assigned */
- if (test_and_set_bit(offset, &lp3943->pin_used))
+ if (test_and_set_bit(offset, &lp3943->pin_used)) {
+ kfree(pwm_map);
return ERR_PTR(-EBUSY);
+ }
}
return pwm_map;
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index a4d2164aaf55..8d995731cef8 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -8,7 +8,7 @@
* published by the Free Software Foundation.
*
* 2008-02-13 initial version
- * eric miao <eric.miao@marvell.com>
+ * eric miao <eric.miao@marvell.com>
*/
#include <linux/module.h>
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pwm.h>
+#include <linux/of_device.h>
#include <asm/div64.h>
@@ -124,6 +125,46 @@ static struct pwm_ops pxa_pwm_ops = {
.owner = THIS_MODULE,
};
+#ifdef CONFIG_OF
+/*
+ * Device tree users must create one device instance for each pwm channel.
+ * Hence we dispense with the HAS_SECONDARY_PWM and "tell" the original driver
+ * code that this is a single channel pxa25x-pwm. Currently all devices are
+ * supported identically.
+ */
+static struct of_device_id pwm_of_match[] = {
+ { .compatible = "marvell,pxa250-pwm", .data = &pwm_id_table[0]},
+ { .compatible = "marvell,pxa270-pwm", .data = &pwm_id_table[0]},
+ { .compatible = "marvell,pxa168-pwm", .data = &pwm_id_table[0]},
+ { .compatible = "marvell,pxa910-pwm", .data = &pwm_id_table[0]},
+ { }
+};
+MODULE_DEVICE_TABLE(of, pwm_of_match);
+#else
+#define pwm_of_match NULL
+#endif
+
+static const struct platform_device_id *pxa_pwm_get_id_dt(struct device *dev)
+{
+ const struct of_device_id *id = of_match_device(pwm_of_match, dev);
+
+ return id ? id->data : NULL;
+}
+
+static struct pwm_device *
+pxa_pwm_of_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ pwm = pwm_request_from_chip(pc, 0, NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ pwm_set_period(pwm, args->args[0]);
+
+ return pwm;
+}
+
static int pwm_probe(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
@@ -131,6 +172,12 @@ static int pwm_probe(struct platform_device *pdev)
struct resource *r;
int ret = 0;
+ if (IS_ENABLED(CONFIG_OF) && id == NULL)
+ id = pxa_pwm_get_id_dt(&pdev->dev);
+
+ if (id == NULL)
+ return -EINVAL;
+
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
if (pwm == NULL) {
dev_err(&pdev->dev, "failed to allocate memory\n");
@@ -146,6 +193,11 @@ static int pwm_probe(struct platform_device *pdev)
pwm->chip.base = -1;
pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
+ if (IS_ENABLED(CONFIG_OF)) {
+ pwm->chip.of_xlate = pxa_pwm_of_xlate;
+ pwm->chip.of_pwm_n_cells = 1;
+ }
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(pwm->mmio_base))
@@ -176,6 +228,7 @@ static struct platform_driver pwm_driver = {
.driver = {
.name = "pxa25x-pwm",
.owner = THIS_MODULE,
+ .of_match_table = pwm_of_match,
},
.probe = pwm_probe,
.remove = pwm_remove,
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 4e5c3d13d4f8..032092c7a6ae 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -279,7 +279,6 @@ static int ecap_pwm_remove(struct platform_device *pdev)
pwmss_submodule_state_change(pdev->dev.parent, PWMSS_ECAPCLK_STOP_REQ);
pm_runtime_put_sync(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return pwmchip_remove(&pc->chip);
}
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index a4d8f519d965..aee4471424d1 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -360,8 +360,8 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Enable TBCLK before enabling PWM device */
ret = clk_enable(pc->tbclk);
if (ret) {
- pr_err("Failed to enable TBCLK for %s\n",
- dev_name(pc->chip.dev));
+ dev_err(chip->dev, "Failed to enable TBCLK for %s\n",
+ dev_name(pc->chip.dev));
return ret;
}
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 8c20332d4825..4bd0c639e16d 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -169,15 +169,7 @@ static struct attribute *pwm_attrs[] = {
&dev_attr_polarity.attr,
NULL
};
-
-static const struct attribute_group pwm_attr_group = {
- .attrs = pwm_attrs,
-};
-
-static const struct attribute_group *pwm_attr_groups[] = {
- &pwm_attr_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(pwm);
static void pwm_export_release(struct device *child)
{
@@ -205,7 +197,7 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
export->child.release = pwm_export_release;
export->child.parent = parent;
export->child.devt = MKDEV(0, 0);
- export->child.groups = pwm_attr_groups;
+ export->child.groups = pwm_groups;
dev_set_name(&export->child, "pwm%u", pwm->hwpwm);
ret = device_register(&export->child);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index b4b0d83f9ef6..7061ac0ad428 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -678,6 +678,7 @@ struct tsi721_bdma_chan {
struct list_head free_list;
dma_cookie_t completed_cookie;
struct tasklet_struct tasklet;
+ bool active;
};
#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 502663f5f7c6..91245f5dbe81 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
{
/* Disable BDMA channel interrupts */
iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
-
- tasklet_schedule(&bdma_chan->tasklet);
+ if (bdma_chan->active)
+ tasklet_schedule(&bdma_chan->tasklet);
}
#ifdef CONFIG_PCI_MSI
@@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
}
#endif /* CONFIG_PCI_MSI */
- tasklet_enable(&bdma_chan->tasklet);
+ bdma_chan->active = true;
tsi721_bdma_interrupt_enable(bdma_chan, 1);
return bdma_chan->bd_num - 1;
@@ -576,9 +576,7 @@ err_out:
static void tsi721_free_chan_resources(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
-#ifdef CONFIG_PCI_MSI
struct tsi721_device *priv = to_tsi721(dchan->device);
-#endif
LIST_HEAD(list);
dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
@@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
BUG_ON(!list_empty(&bdma_chan->active_list));
BUG_ON(!list_empty(&bdma_chan->queue));
- tasklet_disable(&bdma_chan->tasklet);
+ tsi721_bdma_interrupt_enable(bdma_chan, 0);
+ bdma_chan->active = false;
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector);
+ synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector);
+ } else
+#endif
+ synchronize_irq(priv->pdev->irq);
+
+ tasklet_kill(&bdma_chan->tasklet);
spin_lock_bh(&bdma_chan->lock);
list_splice_init(&bdma_chan->free_list, &list);
spin_unlock_bh(&bdma_chan->lock);
- tsi721_bdma_interrupt_enable(bdma_chan, 0);
-
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
@@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
bdma_chan->dchan.cookie = 1;
bdma_chan->dchan.chan_id = i;
bdma_chan->id = i;
+ bdma_chan->active = false;
spin_lock_init(&bdma_chan->lock);
@@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv)
tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
(unsigned long)bdma_chan);
- tasklet_disable(&bdma_chan->tasklet);
list_add_tail(&bdma_chan->dchan.device_node,
&mport->dma.channels);
}
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index d333f7eac106..7a721d67e6ac 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -310,10 +310,8 @@ static int pm800_regulator_probe(struct platform_device *pdev)
pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
GFP_KERNEL);
- if (!pm800_data) {
- dev_err(&pdev->dev, "Failed to allocate pm800_regualtors");
+ if (!pm800_data)
return -ENOMEM;
- }
pm800_data->map = chip->subchip->regmap_power;
pm800_data->chip = chip;
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index f704d83c93c4..337634ad0562 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -2,7 +2,7 @@
* Regulators driver for Marvell 88PM8607
*
* Copyright (C) 2009 Marvell International Ltd.
- * Haojian Zhuang <haojian.zhuang@marvell.com>
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -78,7 +78,7 @@ static const unsigned int BUCK2_suspend_table[] = {
};
static const unsigned int BUCK3_table[] = {
- 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
@@ -89,7 +89,7 @@ static const unsigned int BUCK3_table[] = {
};
static const unsigned int BUCK3_suspend_table[] = {
- 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
@@ -322,7 +322,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
nproot = of_node_get(pdev->dev.parent->of_node);
if (!nproot)
return -ENODEV;
- nproot = of_find_node_by_name(nproot, "regulators");
+ nproot = of_get_child_by_name(nproot, "regulators");
if (!nproot) {
dev_err(&pdev->dev, "failed to find regulators node\n");
return -ENODEV;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index db9ae6fa2404..1cd8584a7b88 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -70,6 +70,14 @@ config REGULATOR_88PM8607
help
This driver supports 88PM8607 voltage regulator chips.
+config REGULATOR_ACT8865
+ tristate "Active-semi act8865 voltage regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver controls a active-semi act8865 voltage output
+ regulator via I2C bus.
+
config REGULATOR_AD5398
tristate "Analog Devices AD5398/AD5821 regulators"
depends on I2C
@@ -131,6 +139,14 @@ config REGULATOR_AS3722
AS3722 PMIC. This will enable support for all the software
controllable DCDC/LDO regulators.
+config REGULATOR_BCM590XX
+ tristate "Broadcom BCM590xx PMU Regulators"
+ depends on MFD_BCM590XX
+ help
+ This driver provides support for the voltage regulators on the
+ BCM590xx PMUs. This will enable support for the software
+ controllable LDO/Switching regulators.
+
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
@@ -249,6 +265,13 @@ config REGULATOR_LP8788
help
This driver supports LP8788 voltage regulator chip.
+config REGULATOR_MAX14577
+ tristate "Maxim 14577 regulator"
+ depends on MFD_MAX14577
+ help
+ This driver controls a Maxim 14577 regulator via I2C bus.
+ The regulators include safeout LDO and current regulator 'CHARGER'.
+
config REGULATOR_MAX1586
tristate "Maxim 1586/1587 voltage regulator"
depends on I2C
@@ -384,12 +407,12 @@ config REGULATOR_PCF50633
on PCF50633
config REGULATOR_PFUZE100
- tristate "Support regulators on Freescale PFUZE100 PMIC"
+ tristate "Freescale PFUZE100/PFUZE200 regulator driver"
depends on I2C
select REGMAP_I2C
help
- Say y here to support the regulators found on the Freescale PFUZE100
- PMIC.
+ Say y here to support the regulators found on the Freescale
+ PFUZE100/PFUZE200 PMIC.
config REGULATOR_RC5T583
tristate "RICOH RC5T583 Power regulators"
@@ -401,13 +424,21 @@ config REGULATOR_RC5T583
through regulator interface. The device supports multiple DCDC/LDO
outputs which can be controlled by i2c communication.
+config REGULATOR_S2MPA01
+ tristate "Samsung S2MPA01 voltage regulator"
+ depends on MFD_SEC_CORE
+ help
+ This driver controls Samsung S2MPA01 voltage output regulator
+ via I2C bus. S2MPA01 has 10 Bucks and 26 LDO outputs.
+
config REGULATOR_S2MPS11
- tristate "Samsung S2MPS11 voltage regulator"
+ tristate "Samsung S2MPS11/S2MPS14 voltage regulator"
depends on MFD_SEC_CORE
help
- This driver supports a Samsung S2MPS11 voltage output regulator
- via I2C bus. S2MPS11 is comprised of high efficient Buck converters
- including Dual-Phase Buck converter, Buck-Boost converter, various LDOs.
+ This driver supports a Samsung S2MPS11/S2MPS14 voltage output
+ regulator via I2C bus. The chip is comprised of high efficient Buck
+ converters including Dual-Phase Buck converter, Buck-Boost converter,
+ various LDOs.
config REGULATOR_S5M8767
tristate "Samsung S5M8767A voltage regulator"
@@ -417,6 +448,12 @@ config REGULATOR_S5M8767
via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
supports DVS mode with 8bits of output voltage control.
+config REGULATOR_ST_PWM
+ tristate "STMicroelectronics PWM voltage regulator"
+ depends on ARCH_STI
+ help
+ This driver supports ST's PWM controlled voltage regulators.
+
config REGULATOR_TI_ABB
tristate "TI Adaptive Body Bias on-chip LDO"
depends on ARCH_OMAP
@@ -498,6 +535,15 @@ config REGULATOR_TPS65217
voltage regulators. It supports software based voltage control
for different voltage domains
+config REGULATOR_TPS65218
+ tristate "TI TPS65218 Power regulators"
+ depends on MFD_TPS65218 && OF
+ help
+ This driver supports TPS65218 voltage regulator chips. TPS65218
+ provides six step-down converters and one general-purpose LDO
+ voltage regulators. It supports software based voltage control
+ for different voltage domains
+
config REGULATOR_TPS6524X
tristate "TI TPS6524X Power regulators"
depends on SPI
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 01c597ea1744..f0fe0c50b59c 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -14,11 +14,13 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o
+obj-$(CONFIG_REGULATOR_ACT8865) += act8865-regulator.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
+obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
@@ -35,6 +37,7 @@ obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
+obj-$(CONFIG_REGULATOR_MAX14577) += max14577.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
@@ -55,8 +58,10 @@ obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
+obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_ST_PWM) += st-pwm.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
@@ -65,6 +70,7 @@ obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65090) += tps65090-regulator.o
obj-$(CONFIG_REGULATOR_TPS65217) += tps65217-regulator.o
+obj-$(CONFIG_REGULATOR_TPS65218) += tps65218-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index f70a9bfa5ff2..c873ee0082cf 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -99,6 +99,7 @@ static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
static struct regulator_ops aat2870_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = aat2870_ldo_set_voltage_sel,
.get_voltage_sel = aat2870_ldo_get_voltage_sel,
.enable = aat2870_ldo_enable,
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 77b46d0b37a6..e10febe9ec34 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -498,7 +498,7 @@ static int ab3100_regulator_register(struct platform_device *pdev,
struct ab3100_platform_data *plfdata,
struct regulator_init_data *init_data,
struct device_node *np,
- int id)
+ unsigned long id)
{
struct regulator_desc *desc;
struct ab3100_regulator *reg;
@@ -646,7 +646,7 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
err = ab3100_regulator_register(
pdev, NULL, ab3100_regulator_matches[i].init_data,
ab3100_regulator_matches[i].of_node,
- (int) ab3100_regulator_matches[i].driver_data);
+ (unsigned long)ab3100_regulator_matches[i].driver_data);
if (err) {
ab3100_regulators_remove(pdev);
return err;
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 603f192e84f1..c625468c7f2c 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -2998,37 +2998,6 @@ static void abx500_get_regulator_info(struct ab8500 *ab8500)
}
}
-static int ab8500_regulator_init_registers(struct platform_device *pdev,
- int id, int mask, int value)
-{
- struct ab8500_reg_init *reg_init = abx500_regulator.init;
- int err;
-
- BUG_ON(value & ~mask);
- BUG_ON(mask & ~reg_init[id].mask);
-
- /* initialize register */
- err = abx500_mask_and_set_register_interruptible(
- &pdev->dev,
- reg_init[id].bank,
- reg_init[id].addr,
- mask, value);
- if (err < 0) {
- dev_err(&pdev->dev,
- "Failed to initialize 0x%02x, 0x%02x.\n",
- reg_init[id].bank,
- reg_init[id].addr);
- return err;
- }
- dev_vdbg(&pdev->dev,
- " init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
- reg_init[id].bank,
- reg_init[id].addr,
- mask, value);
-
- return 0;
-}
-
static int ab8500_regulator_register(struct platform_device *pdev,
struct regulator_init_data *init_data,
int id, struct device_node *np)
@@ -3036,7 +3005,6 @@ static int ab8500_regulator_register(struct platform_device *pdev,
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_regulator_info *info = NULL;
struct regulator_config config = { };
- int err;
/* assign per-regulator data */
info = &abx500_regulator.info[id];
@@ -3058,17 +3026,12 @@ static int ab8500_regulator_register(struct platform_device *pdev,
}
/* register regulator with framework */
- info->regulator = regulator_register(&info->desc, &config);
+ info->regulator = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
if (IS_ERR(info->regulator)) {
- err = PTR_ERR(info->regulator);
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
- /* when we fail, un-register all earlier regulators */
- while (--id >= 0) {
- info = &abx500_regulator.info[id];
- regulator_unregister(info->regulator);
- }
- return err;
+ return PTR_ERR(info->regulator);
}
return 0;
@@ -3095,9 +3058,7 @@ static int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct device_node *np = pdev->dev.of_node;
- struct ab8500_platform_data *ppdata;
- struct ab8500_regulator_platform_data *pdata;
- int i, err;
+ int err;
if (!ab8500) {
dev_err(&pdev->dev, "null mfd parent\n");
@@ -3106,83 +3067,20 @@ static int ab8500_regulator_probe(struct platform_device *pdev)
abx500_get_regulator_info(ab8500);
- if (np) {
- err = of_regulator_match(&pdev->dev, np,
- abx500_regulator.match,
- abx500_regulator.match_size);
- if (err < 0) {
- dev_err(&pdev->dev,
- "Error parsing regulator init data: %d\n", err);
- return err;
- }
-
- err = ab8500_regulator_of_probe(pdev, np);
- return err;
- }
-
- ppdata = dev_get_platdata(ab8500->dev);
- if (!ppdata) {
- dev_err(&pdev->dev, "null parent pdata\n");
- return -EINVAL;
- }
-
- pdata = ppdata->regulator;
- if (!pdata) {
- dev_err(&pdev->dev, "null pdata\n");
- return -EINVAL;
- }
-
- /* make sure the platform data has the correct size */
- if (pdata->num_regulator != abx500_regulator.info_size) {
- dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
- return -EINVAL;
- }
-
- /* initialize debug (initial state is recorded with this call) */
- err = ab8500_regulator_debug_init(pdev);
- if (err)
+ err = of_regulator_match(&pdev->dev, np,
+ abx500_regulator.match,
+ abx500_regulator.match_size);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Error parsing regulator init data: %d\n", err);
return err;
-
- /* initialize registers */
- for (i = 0; i < pdata->num_reg_init; i++) {
- int id, mask, value;
-
- id = pdata->reg_init[i].id;
- mask = pdata->reg_init[i].mask;
- value = pdata->reg_init[i].value;
-
- /* check for configuration errors */
- BUG_ON(id >= abx500_regulator.init_size);
-
- err = ab8500_regulator_init_registers(pdev, id, mask, value);
- if (err < 0)
- return err;
}
-
- /* register all regulators */
- for (i = 0; i < abx500_regulator.info_size; i++) {
- err = ab8500_regulator_register(pdev, &pdata->regulator[i],
- i, NULL);
- if (err < 0)
- return err;
- }
-
- return 0;
+ return ab8500_regulator_of_probe(pdev, np);
}
static int ab8500_regulator_remove(struct platform_device *pdev)
{
- int i, err;
-
- for (i = 0; i < abx500_regulator.info_size; i++) {
- struct ab8500_regulator_info *info = NULL;
- info = &abx500_regulator.info[i];
-
- dev_vdbg(rdev_get_dev(info->regulator),
- "%s-remove\n", info->desc.name);
-
- regulator_unregister(info->regulator);
- }
+ int err;
/* remove regulator debug */
err = ab8500_regulator_debug_exit(pdev);
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
new file mode 100644
index 000000000000..b92d7dd01a18
--- /dev/null
+++ b/drivers/regulator/act8865-regulator.c
@@ -0,0 +1,344 @@
+/*
+ * act8865-regulator.c - Voltage regulation for the active-semi ACT8865
+ * http://www.active-semi.com/sheets/ACT8865_Datasheet.pdf
+ *
+ * Copyright (C) 2013 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/act8865.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regmap.h>
+
+/*
+ * ACT8865 Global Register Map.
+ */
+#define ACT8865_SYS_MODE 0x00
+#define ACT8865_SYS_CTRL 0x01
+#define ACT8865_DCDC1_VSET1 0x20
+#define ACT8865_DCDC1_VSET2 0x21
+#define ACT8865_DCDC1_CTRL 0x22
+#define ACT8865_DCDC2_VSET1 0x30
+#define ACT8865_DCDC2_VSET2 0x31
+#define ACT8865_DCDC2_CTRL 0x32
+#define ACT8865_DCDC3_VSET1 0x40
+#define ACT8865_DCDC3_VSET2 0x41
+#define ACT8865_DCDC3_CTRL 0x42
+#define ACT8865_LDO1_VSET 0x50
+#define ACT8865_LDO1_CTRL 0x51
+#define ACT8865_LDO2_VSET 0x54
+#define ACT8865_LDO2_CTRL 0x55
+#define ACT8865_LDO3_VSET 0x60
+#define ACT8865_LDO3_CTRL 0x61
+#define ACT8865_LDO4_VSET 0x64
+#define ACT8865_LDO4_CTRL 0x65
+
+/*
+ * Field Definitions.
+ */
+#define ACT8865_ENA 0x80 /* ON - [7] */
+#define ACT8865_VSEL_MASK 0x3F /* VSET - [5:0] */
+
+/*
+ * ACT8865 voltage number
+ */
+#define ACT8865_VOLTAGE_NUM 64
+
+struct act8865 {
+ struct regmap *regmap;
+};
+
+static const struct regmap_config act8865_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct regulator_linear_range act8865_volatge_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000),
+ REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000),
+ REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
+};
+
+static struct regulator_ops act8865_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_desc act8865_reg[] = {
+ {
+ .name = "DCDC_REG1",
+ .id = ACT8865_ID_DCDC1,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_DCDC1_VSET1,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_DCDC1_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "DCDC_REG2",
+ .id = ACT8865_ID_DCDC2,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_DCDC2_VSET1,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_DCDC2_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "DCDC_REG3",
+ .id = ACT8865_ID_DCDC3,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_DCDC3_VSET1,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_DCDC3_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO_REG1",
+ .id = ACT8865_ID_LDO1,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_LDO1_VSET,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_LDO1_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO_REG2",
+ .id = ACT8865_ID_LDO2,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_LDO2_VSET,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_LDO2_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO_REG3",
+ .id = ACT8865_ID_LDO3,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_LDO3_VSET,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_LDO3_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO_REG4",
+ .id = ACT8865_ID_LDO4,
+ .ops = &act8865_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ACT8865_VOLTAGE_NUM,
+ .linear_ranges = act8865_volatge_ranges,
+ .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
+ .vsel_reg = ACT8865_LDO4_VSET,
+ .vsel_mask = ACT8865_VSEL_MASK,
+ .enable_reg = ACT8865_LDO4_CTRL,
+ .enable_mask = ACT8865_ENA,
+ .owner = THIS_MODULE,
+ },
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id act8865_dt_ids[] = {
+ { .compatible = "active-semi,act8865" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, act8865_dt_ids);
+
+static struct of_regulator_match act8865_matches[] = {
+ [ACT8865_ID_DCDC1] = { .name = "DCDC_REG1"},
+ [ACT8865_ID_DCDC2] = { .name = "DCDC_REG2"},
+ [ACT8865_ID_DCDC3] = { .name = "DCDC_REG3"},
+ [ACT8865_ID_LDO1] = { .name = "LDO_REG1"},
+ [ACT8865_ID_LDO2] = { .name = "LDO_REG2"},
+ [ACT8865_ID_LDO3] = { .name = "LDO_REG3"},
+ [ACT8865_ID_LDO4] = { .name = "LDO_REG4"},
+};
+
+static int act8865_pdata_from_dt(struct device *dev,
+ struct device_node **of_node,
+ struct act8865_platform_data *pdata)
+{
+ int matched, i;
+ struct device_node *np;
+ struct act8865_regulator_data *regulator;
+
+ np = of_get_child_by_name(dev->of_node, "regulators");
+ if (!np) {
+ dev_err(dev, "missing 'regulators' subnode in DT\n");
+ return -EINVAL;
+ }
+
+ matched = of_regulator_match(dev, np,
+ act8865_matches, ARRAY_SIZE(act8865_matches));
+ of_node_put(np);
+ if (matched <= 0)
+ return matched;
+
+ pdata->regulators = devm_kzalloc(dev,
+ sizeof(struct act8865_regulator_data) *
+ ARRAY_SIZE(act8865_matches), GFP_KERNEL);
+ if (!pdata->regulators)
+ return -ENOMEM;
+
+ pdata->num_regulators = matched;
+ regulator = pdata->regulators;
+
+ for (i = 0; i < ARRAY_SIZE(act8865_matches); i++) {
+ regulator->id = i;
+ regulator->name = act8865_matches[i].name;
+ regulator->platform_data = act8865_matches[i].init_data;
+ of_node[i] = act8865_matches[i].of_node;
+ regulator++;
+ }
+
+ return 0;
+}
+#else
+static inline int act8865_pdata_from_dt(struct device *dev,
+ struct device_node **of_node,
+ struct act8865_platform_data *pdata)
+{
+ return 0;
+}
+#endif
+
+static int act8865_pmic_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
+{
+ struct regulator_dev *rdev;
+ struct device *dev = &client->dev;
+ struct act8865_platform_data *pdata = dev_get_platdata(dev);
+ struct regulator_config config = { };
+ struct act8865 *act8865;
+ struct device_node *of_node[ACT8865_REG_NUM];
+ int i, id;
+ int ret = -EINVAL;
+ int error;
+
+ if (dev->of_node && !pdata) {
+ const struct of_device_id *id;
+ struct act8865_platform_data pdata_of;
+
+ id = of_match_device(of_match_ptr(act8865_dt_ids), dev);
+ if (!id)
+ return -ENODEV;
+
+ ret = act8865_pdata_from_dt(dev, of_node, &pdata_of);
+ if (ret < 0)
+ return ret;
+
+ pdata = &pdata_of;
+ }
+
+ if (pdata->num_regulators > ACT8865_REG_NUM) {
+ dev_err(dev, "Too many regulators found!\n");
+ return -EINVAL;
+ }
+
+ act8865 = devm_kzalloc(dev, sizeof(struct act8865), GFP_KERNEL);
+ if (!act8865)
+ return -ENOMEM;
+
+ act8865->regmap = devm_regmap_init_i2c(client, &act8865_regmap_config);
+ if (IS_ERR(act8865->regmap)) {
+ error = PTR_ERR(act8865->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ error);
+ return error;
+ }
+
+ /* Finally register devices */
+ for (i = 0; i < ACT8865_REG_NUM; i++) {
+
+ id = pdata->regulators[i].id;
+
+ config.dev = dev;
+ config.init_data = pdata->regulators[i].platform_data;
+ config.of_node = of_node[i];
+ config.driver_data = act8865;
+ config.regmap = act8865->regmap;
+
+ rdev = devm_regulator_register(&client->dev, &act8865_reg[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "failed to register %s\n",
+ act8865_reg[id].name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ i2c_set_clientdata(client, act8865);
+
+ return 0;
+}
+
+static const struct i2c_device_id act8865_ids[] = {
+ { "act8865", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, act8865_ids);
+
+static struct i2c_driver act8865_pmic_driver = {
+ .driver = {
+ .name = "act8865",
+ .owner = THIS_MODULE,
+ },
+ .probe = act8865_pmic_probe,
+ .id_table = act8865_ids,
+};
+
+module_i2c_driver(act8865_pmic_driver);
+
+MODULE_DESCRIPTION("active-semi act8865 voltage regulator driver");
+MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index c734d0980826..7c397bb81e01 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -34,6 +34,9 @@
#define LDO_RAMP_UP_UNIT_IN_CYCLES 64 /* 64 cycles per step */
#define LDO_RAMP_UP_FREQ_IN_MHZ 24 /* cycle based on 24M OSC */
+#define LDO_POWER_GATE 0x00
+#define LDO_FET_FULL_ON 0x1f
+
struct anatop_regulator {
const char *name;
u32 control_reg;
@@ -48,19 +51,10 @@ struct anatop_regulator {
int max_voltage;
struct regulator_desc rdesc;
struct regulator_init_data *initdata;
+ bool bypass;
+ int sel;
};
-static int anatop_regmap_set_voltage_sel(struct regulator_dev *reg,
- unsigned selector)
-{
- struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
-
- if (!anatop_reg->control_reg)
- return -ENOTSUPP;
-
- return regulator_set_voltage_sel_regmap(reg, selector);
-}
-
static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
unsigned int old_sel,
unsigned int new_sel)
@@ -87,22 +81,99 @@ static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
return ret;
}
-static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg)
+static int anatop_regmap_enable(struct regulator_dev *reg)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+ int sel;
- if (!anatop_reg->control_reg)
- return -ENOTSUPP;
+ sel = anatop_reg->bypass ? LDO_FET_FULL_ON : anatop_reg->sel;
+ return regulator_set_voltage_sel_regmap(reg, sel);
+}
+
+static int anatop_regmap_disable(struct regulator_dev *reg)
+{
+ return regulator_set_voltage_sel_regmap(reg, LDO_POWER_GATE);
+}
+
+static int anatop_regmap_is_enabled(struct regulator_dev *reg)
+{
+ return regulator_get_voltage_sel_regmap(reg) != LDO_POWER_GATE;
+}
+
+static int anatop_regmap_core_set_voltage_sel(struct regulator_dev *reg,
+ unsigned selector)
+{
+ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+ int ret;
+
+ if (anatop_reg->bypass || !anatop_regmap_is_enabled(reg)) {
+ anatop_reg->sel = selector;
+ return 0;
+ }
+
+ ret = regulator_set_voltage_sel_regmap(reg, selector);
+ if (!ret)
+ anatop_reg->sel = selector;
+ return ret;
+}
+
+static int anatop_regmap_core_get_voltage_sel(struct regulator_dev *reg)
+{
+ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+
+ if (anatop_reg->bypass || !anatop_regmap_is_enabled(reg))
+ return anatop_reg->sel;
return regulator_get_voltage_sel_regmap(reg);
}
+static int anatop_regmap_get_bypass(struct regulator_dev *reg, bool *enable)
+{
+ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+ int sel;
+
+ sel = regulator_get_voltage_sel_regmap(reg);
+ if (sel == LDO_FET_FULL_ON)
+ WARN_ON(!anatop_reg->bypass);
+ else if (sel != LDO_POWER_GATE)
+ WARN_ON(anatop_reg->bypass);
+
+ *enable = anatop_reg->bypass;
+ return 0;
+}
+
+static int anatop_regmap_set_bypass(struct regulator_dev *reg, bool enable)
+{
+ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+ int sel;
+
+ if (enable == anatop_reg->bypass)
+ return 0;
+
+ sel = enable ? LDO_FET_FULL_ON : anatop_reg->sel;
+ anatop_reg->bypass = enable;
+
+ return regulator_set_voltage_sel_regmap(reg, sel);
+}
+
static struct regulator_ops anatop_rops = {
- .set_voltage_sel = anatop_regmap_set_voltage_sel,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+};
+
+static struct regulator_ops anatop_core_rops = {
+ .enable = anatop_regmap_enable,
+ .disable = anatop_regmap_disable,
+ .is_enabled = anatop_regmap_is_enabled,
+ .set_voltage_sel = anatop_regmap_core_set_voltage_sel,
.set_voltage_time_sel = anatop_regmap_set_voltage_time_sel,
- .get_voltage_sel = anatop_regmap_get_voltage_sel,
+ .get_voltage_sel = anatop_regmap_core_get_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
+ .get_bypass = anatop_regmap_get_bypass,
+ .set_bypass = anatop_regmap_set_bypass,
};
static int anatop_regulator_probe(struct platform_device *pdev)
@@ -116,18 +187,16 @@ static int anatop_regulator_probe(struct platform_device *pdev)
struct regulator_init_data *initdata;
struct regulator_config config = { };
int ret = 0;
+ u32 val;
initdata = of_get_regulator_init_data(dev, np);
sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL);
if (!sreg)
return -ENOMEM;
sreg->initdata = initdata;
- sreg->name = kstrdup(of_get_property(np, "regulator-name", NULL),
- GFP_KERNEL);
+ sreg->name = of_get_property(np, "regulator-name", NULL);
rdesc = &sreg->rdesc;
- memset(rdesc, 0, sizeof(*rdesc));
rdesc->name = sreg->name;
- rdesc->ops = &anatop_rops;
rdesc->type = REGULATOR_VOLTAGE;
rdesc->owner = THIS_MODULE;
@@ -143,37 +212,37 @@ static int anatop_regulator_probe(struct platform_device *pdev)
&sreg->control_reg);
if (ret) {
dev_err(dev, "no anatop-reg-offset property set\n");
- goto anatop_probe_end;
+ return ret;
}
ret = of_property_read_u32(np, "anatop-vol-bit-width",
&sreg->vol_bit_width);
if (ret) {
dev_err(dev, "no anatop-vol-bit-width property set\n");
- goto anatop_probe_end;
+ return ret;
}
ret = of_property_read_u32(np, "anatop-vol-bit-shift",
&sreg->vol_bit_shift);
if (ret) {
dev_err(dev, "no anatop-vol-bit-shift property set\n");
- goto anatop_probe_end;
+ return ret;
}
ret = of_property_read_u32(np, "anatop-min-bit-val",
&sreg->min_bit_val);
if (ret) {
dev_err(dev, "no anatop-min-bit-val property set\n");
- goto anatop_probe_end;
+ return ret;
}
ret = of_property_read_u32(np, "anatop-min-voltage",
&sreg->min_voltage);
if (ret) {
dev_err(dev, "no anatop-min-voltage property set\n");
- goto anatop_probe_end;
+ return ret;
}
ret = of_property_read_u32(np, "anatop-max-voltage",
&sreg->max_voltage);
if (ret) {
dev_err(dev, "no anatop-max-voltage property set\n");
- goto anatop_probe_end;
+ return ret;
}
/* read LDO ramp up setting, only for core reg */
@@ -199,32 +268,35 @@ static int anatop_regulator_probe(struct platform_device *pdev)
config.of_node = pdev->dev.of_node;
config.regmap = sreg->anatop;
+ /* Only core regulators have the ramp up delay configuration. */
+ if (sreg->control_reg && sreg->delay_bit_width) {
+ rdesc->ops = &anatop_core_rops;
+
+ ret = regmap_read(config.regmap, rdesc->vsel_reg, &val);
+ if (ret) {
+ dev_err(dev, "failed to read initial state\n");
+ return ret;
+ }
+
+ sreg->sel = (val & rdesc->vsel_mask) >> sreg->vol_bit_shift;
+ if (sreg->sel == LDO_FET_FULL_ON) {
+ sreg->sel = 0;
+ sreg->bypass = true;
+ }
+ } else {
+ rdesc->ops = &anatop_rops;
+ }
+
/* register regulator */
rdev = devm_regulator_register(dev, rdesc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n",
rdesc->name);
- ret = PTR_ERR(rdev);
- goto anatop_probe_end;
+ return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, rdev);
-anatop_probe_end:
- if (ret)
- kfree(sreg->name);
-
- return ret;
-}
-
-static int anatop_regulator_remove(struct platform_device *pdev)
-{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
- struct anatop_regulator *sreg = rdev_get_drvdata(rdev);
- const char *name = sreg->name;
-
- kfree(name);
-
return 0;
}
@@ -240,7 +312,6 @@ static struct platform_driver anatop_regulator_driver = {
.of_match_table = of_anatop_regulator_match_tbl,
},
.probe = anatop_regulator_probe,
- .remove = anatop_regulator_remove,
};
static int __init anatop_regulator_init(void)
@@ -259,3 +330,4 @@ MODULE_AUTHOR("Nancy Chen <Nancy.Chen@freescale.com>");
MODULE_AUTHOR("Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>");
MODULE_DESCRIPTION("ANATOP Regulator driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:anatop_regulator");
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index 4f6c2055f6b2..b1033d30b504 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -153,11 +153,9 @@ static const struct regulator_desc arizona_ldo1 = {
.vsel_reg = ARIZONA_LDO1_CONTROL_1,
.vsel_mask = ARIZONA_LDO1_VSEL_MASK,
- .bypass_reg = ARIZONA_LDO1_CONTROL_1,
- .bypass_mask = ARIZONA_LDO1_BYPASS,
.min_uV = 900000,
- .uV_step = 50000,
- .n_voltages = 7,
+ .uV_step = 25000,
+ .n_voltages = 13,
.enable_time = 500,
.owner = THIS_MODULE,
@@ -189,10 +187,8 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
int ret;
ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL);
- if (ldo1 == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!ldo1)
return -ENOMEM;
- }
ldo1->arizona = arizona;
@@ -203,6 +199,7 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
*/
switch (arizona->type) {
case WM5102:
+ case WM8997:
desc = &arizona_ldo1_hc;
ldo1->init_data = arizona_ldo1_dvfs;
break;
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index fd3154d86901..6fdd9bf6927f 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -28,8 +28,6 @@
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
-#define ARIZONA_MICSUPP_MAX_SELECTOR 0x1f
-
struct arizona_micsupp {
struct regulator_dev *regulator;
struct arizona *arizona;
@@ -40,42 +38,6 @@ struct arizona_micsupp {
struct work_struct check_cp_work;
};
-static int arizona_micsupp_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- if (selector > ARIZONA_MICSUPP_MAX_SELECTOR)
- return -EINVAL;
-
- if (selector == ARIZONA_MICSUPP_MAX_SELECTOR)
- return 3300000;
- else
- return (selector * 50000) + 1700000;
-}
-
-static int arizona_micsupp_map_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- unsigned int voltage;
- int selector;
-
- if (min_uV < 1700000)
- min_uV = 1700000;
-
- if (min_uV > 3200000)
- selector = ARIZONA_MICSUPP_MAX_SELECTOR;
- else
- selector = DIV_ROUND_UP(min_uV - 1700000, 50000);
-
- if (selector < 0)
- return -EINVAL;
-
- voltage = arizona_micsupp_list_voltage(rdev, selector);
- if (voltage < min_uV || voltage > max_uV)
- return -EINVAL;
-
- return selector;
-}
-
static void arizona_micsupp_check_cp(struct work_struct *work)
{
struct arizona_micsupp *micsupp =
@@ -145,8 +107,8 @@ static struct regulator_ops arizona_micsupp_ops = {
.disable = arizona_micsupp_disable,
.is_enabled = regulator_is_enabled_regmap,
- .list_voltage = arizona_micsupp_list_voltage,
- .map_voltage = arizona_micsupp_map_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -155,11 +117,16 @@ static struct regulator_ops arizona_micsupp_ops = {
.set_bypass = arizona_micsupp_set_bypass,
};
+static const struct regulator_linear_range arizona_micsupp_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1700000, 0, 0x1e, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x1f, 0x1f, 0),
+};
+
static const struct regulator_desc arizona_micsupp = {
.name = "MICVDD",
.supply_name = "CPVDD",
.type = REGULATOR_VOLTAGE,
- .n_voltages = ARIZONA_MICSUPP_MAX_SELECTOR + 1,
+ .n_voltages = 32,
.ops = &arizona_micsupp_ops,
.vsel_reg = ARIZONA_LDO2_CONTROL_1,
@@ -169,6 +136,9 @@ static const struct regulator_desc arizona_micsupp = {
.bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1,
.bypass_mask = ARIZONA_CPMIC_BYPASS,
+ .linear_ranges = arizona_micsupp_ranges,
+ .n_linear_ranges = ARRAY_SIZE(arizona_micsupp_ranges),
+
.enable_time = 3000,
.owner = THIS_MODULE,
@@ -234,10 +204,8 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
int ret;
micsupp = devm_kzalloc(&pdev->dev, sizeof(*micsupp), GFP_KERNEL);
- if (micsupp == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!micsupp)
return -ENOMEM;
- }
micsupp->arizona = arizona;
INIT_WORK(&micsupp->check_cp_work, arizona_micsupp_check_cp);
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index c77a58478cca..b47283f91e2d 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -191,7 +191,7 @@ static int as3711_regulator_parse_dt(struct device *dev,
{
struct as3711_regulator_pdata *pdata = dev_get_platdata(dev);
struct device_node *regulators =
- of_find_node_by_name(dev->parent->of_node, "regulators");
+ of_get_child_by_name(dev->parent->of_node, "regulators");
struct of_regulator_match *match;
int ret, i;
@@ -221,7 +221,6 @@ static int as3711_regulator_probe(struct platform_device *pdev)
{
struct as3711_regulator_pdata *pdata = dev_get_platdata(&pdev->dev);
struct as3711 *as3711 = dev_get_drvdata(pdev->dev.parent);
- struct regulator_init_data *reg_data;
struct regulator_config config = {.dev = &pdev->dev,};
struct as3711_regulator *reg = NULL;
struct as3711_regulator *regs;
@@ -246,22 +245,14 @@ static int as3711_regulator_probe(struct platform_device *pdev)
regs = devm_kzalloc(&pdev->dev, AS3711_REGULATOR_NUM *
sizeof(struct as3711_regulator), GFP_KERNEL);
- if (!regs) {
- dev_err(&pdev->dev, "Memory allocation failed exiting..\n");
+ if (!regs)
return -ENOMEM;
- }
for (id = 0, ri = as3711_reg_info; id < AS3711_REGULATOR_NUM; ++id, ri++) {
- reg_data = pdata->init_data[id];
-
- /* No need to register if there is no regulator data */
- if (!reg_data)
- continue;
-
reg = &regs[id];
reg->reg_info = ri;
- config.init_data = reg_data;
+ config.init_data = pdata->init_data[id];
config.driver_data = reg;
config.regmap = as3711->regmap;
config.of_node = of_node[id];
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index b9f1d24c6812..85585219ce82 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -99,7 +99,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = {
.sleep_ctrl_mask = AS3722_SD0_EXT_ENABLE_MASK,
.control_reg = AS3722_SD0_CONTROL_REG,
.mode_mask = AS3722_SD0_MODE_FAST,
- .n_voltages = AS3722_SD0_VSEL_MAX + 1,
},
{
.regulator_id = AS3722_REGULATOR_ID_SD1,
@@ -112,7 +111,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = {
.sleep_ctrl_mask = AS3722_SD1_EXT_ENABLE_MASK,
.control_reg = AS3722_SD1_CONTROL_REG,
.mode_mask = AS3722_SD1_MODE_FAST,
- .n_voltages = AS3722_SD0_VSEL_MAX + 1,
},
{
.regulator_id = AS3722_REGULATOR_ID_SD2,
@@ -181,7 +179,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = {
.sleep_ctrl_mask = AS3722_SD6_EXT_ENABLE_MASK,
.control_reg = AS3722_SD6_CONTROL_REG,
.mode_mask = AS3722_SD6_MODE_FAST,
- .n_voltages = AS3722_SD0_VSEL_MAX + 1,
},
{
.regulator_id = AS3722_REGULATOR_ID_LDO0,
@@ -595,6 +592,22 @@ static int as3722_sd016_set_current_limit(struct regulator_dev *rdev,
return as3722_update_bits(as3722, reg, mask, val);
}
+static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs)
+{
+ int err;
+ unsigned val;
+
+ err = as3722_read(as3722_regs->as3722, AS3722_FUSE7_REG, &val);
+ if (err < 0) {
+ dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
+ AS3722_FUSE7_REG, err);
+ return false;
+ }
+ if (val & AS3722_FUSE7_SD0_LOW_VOLTAGE)
+ return true;
+ return false;
+}
+
static const struct regulator_linear_range as3722_sd2345_ranges[] = {
REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500),
REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
@@ -706,6 +719,7 @@ static int as3722_get_regulator_dt_data(struct platform_device *pdev,
ret = of_regulator_match(&pdev->dev, np, as3722_regulator_matches,
ARRAY_SIZE(as3722_regulator_matches));
+ of_node_put(np);
if (ret < 0) {
dev_err(&pdev->dev, "Parsing of regulator node failed: %d\n",
ret);
@@ -820,9 +834,19 @@ static int as3722_regulator_probe(struct platform_device *pdev)
ops = &as3722_sd016_extcntrl_ops;
else
ops = &as3722_sd016_ops;
- as3722_regs->desc[id].min_uV = 610000;
+ if (id == AS3722_REGULATOR_ID_SD0 &&
+ as3722_sd0_is_low_voltage(as3722_regs)) {
+ as3722_regs->desc[id].n_voltages =
+ AS3722_SD0_VSEL_LOW_VOL_MAX + 1;
+ as3722_regs->desc[id].min_uV = 410000;
+ } else {
+ as3722_regs->desc[id].n_voltages =
+ AS3722_SD0_VSEL_MAX + 1,
+ as3722_regs->desc[id].min_uV = 610000;
+ }
as3722_regs->desc[id].uV_step = 10000;
as3722_regs->desc[id].linear_min_sel = 1;
+ as3722_regs->desc[id].enable_time = 600;
break;
case AS3722_REGULATOR_ID_SD2:
case AS3722_REGULATOR_ID_SD3:
@@ -842,9 +866,6 @@ static int as3722_regulator_probe(struct platform_device *pdev)
ops = &as3722_ldo_extcntrl_ops;
else
ops = &as3722_ldo_ops;
- as3722_regs->desc[id].min_uV = 825000;
- as3722_regs->desc[id].uV_step = 25000;
- as3722_regs->desc[id].linear_min_sel = 1;
as3722_regs->desc[id].enable_time = 500;
as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
as3722_regs->desc[id].n_linear_ranges =
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
new file mode 100644
index 000000000000..ab08ca7cfb08
--- /dev/null
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -0,0 +1,403 @@
+/*
+ * Broadcom BCM590xx regulator driver
+ *
+ * Copyright 2014 Linaro Limited
+ * Author: Matt Porter <mporter@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/bcm590xx.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+/* Register defs */
+#define BCM590XX_RFLDOPMCTRL1 0x60
+#define BCM590XX_IOSR1PMCTRL1 0x7a
+#define BCM590XX_IOSR2PMCTRL1 0x7c
+#define BCM590XX_CSRPMCTRL1 0x7e
+#define BCM590XX_SDSR1PMCTRL1 0x82
+#define BCM590XX_SDSR2PMCTRL1 0x86
+#define BCM590XX_MSRPMCTRL1 0x8a
+#define BCM590XX_VSRPMCTRL1 0x8e
+#define BCM590XX_REG_ENABLE BIT(7)
+
+#define BCM590XX_RFLDOCTRL 0x96
+#define BCM590XX_CSRVOUT1 0xc0
+#define BCM590XX_LDO_VSEL_MASK GENMASK(5, 3)
+#define BCM590XX_SR_VSEL_MASK GENMASK(5, 0)
+
+/* LDO regulator IDs */
+#define BCM590XX_REG_RFLDO 0
+#define BCM590XX_REG_CAMLDO1 1
+#define BCM590XX_REG_CAMLDO2 2
+#define BCM590XX_REG_SIMLDO1 3
+#define BCM590XX_REG_SIMLDO2 4
+#define BCM590XX_REG_SDLDO 5
+#define BCM590XX_REG_SDXLDO 6
+#define BCM590XX_REG_MMCLDO1 7
+#define BCM590XX_REG_MMCLDO2 8
+#define BCM590XX_REG_AUDLDO 9
+#define BCM590XX_REG_MICLDO 10
+#define BCM590XX_REG_USBLDO 11
+#define BCM590XX_REG_VIBLDO 12
+
+/* DCDC regulator IDs */
+#define BCM590XX_REG_CSR 13
+#define BCM590XX_REG_IOSR1 14
+#define BCM590XX_REG_IOSR2 15
+#define BCM590XX_REG_MSR 16
+#define BCM590XX_REG_SDSR1 17
+#define BCM590XX_REG_SDSR2 18
+#define BCM590XX_REG_VSR 19
+
+#define BCM590XX_NUM_REGS 20
+
+#define BCM590XX_REG_IS_LDO(n) (n < BCM590XX_REG_CSR)
+
+struct bcm590xx_board {
+ struct regulator_init_data *bcm590xx_pmu_init_data[BCM590XX_NUM_REGS];
+};
+
+/* LDO group A: supported voltages in microvolts */
+static const unsigned int ldo_a_table[] = {
+ 1200000, 1800000, 2500000, 2700000, 2800000,
+ 2900000, 3000000, 3300000,
+};
+
+/* LDO group C: supported voltages in microvolts */
+static const unsigned int ldo_c_table[] = {
+ 3100000, 1800000, 2500000, 2700000, 2800000,
+ 2900000, 3000000, 3300000,
+};
+
+/* DCDC group CSR: supported voltages in microvolts */
+static const struct regulator_linear_range dcdc_csr_ranges[] = {
+ REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
+ REGULATOR_LINEAR_RANGE(1360000, 51, 55, 20000),
+ REGULATOR_LINEAR_RANGE(900000, 56, 63, 0),
+};
+
+/* DCDC group IOSR1: supported voltages in microvolts */
+static const struct regulator_linear_range dcdc_iosr1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(860000, 2, 51, 10000),
+ REGULATOR_LINEAR_RANGE(1500000, 52, 52, 0),
+ REGULATOR_LINEAR_RANGE(1800000, 53, 53, 0),
+ REGULATOR_LINEAR_RANGE(900000, 54, 63, 0),
+};
+
+/* DCDC group SDSR1: supported voltages in microvolts */
+static const struct regulator_linear_range dcdc_sdsr1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
+ REGULATOR_LINEAR_RANGE(1340000, 51, 51, 0),
+ REGULATOR_LINEAR_RANGE(900000, 52, 63, 0),
+};
+
+struct bcm590xx_info {
+ const char *name;
+ const char *vin_name;
+ u8 n_voltages;
+ const unsigned int *volt_table;
+ u8 n_linear_ranges;
+ const struct regulator_linear_range *linear_ranges;
+};
+
+#define BCM590XX_REG_TABLE(_name, _table) \
+ { \
+ .name = #_name, \
+ .n_voltages = ARRAY_SIZE(_table), \
+ .volt_table = _table, \
+ }
+
+#define BCM590XX_REG_RANGES(_name, _ranges) \
+ { \
+ .name = #_name, \
+ .n_linear_ranges = ARRAY_SIZE(_ranges), \
+ .linear_ranges = _ranges, \
+ }
+
+static struct bcm590xx_info bcm590xx_regs[] = {
+ BCM590XX_REG_TABLE(rfldo, ldo_a_table),
+ BCM590XX_REG_TABLE(camldo1, ldo_c_table),
+ BCM590XX_REG_TABLE(camldo2, ldo_c_table),
+ BCM590XX_REG_TABLE(simldo1, ldo_a_table),
+ BCM590XX_REG_TABLE(simldo2, ldo_a_table),
+ BCM590XX_REG_TABLE(sdldo, ldo_c_table),
+ BCM590XX_REG_TABLE(sdxldo, ldo_a_table),
+ BCM590XX_REG_TABLE(mmcldo1, ldo_a_table),
+ BCM590XX_REG_TABLE(mmcldo2, ldo_a_table),
+ BCM590XX_REG_TABLE(audldo, ldo_a_table),
+ BCM590XX_REG_TABLE(micldo, ldo_a_table),
+ BCM590XX_REG_TABLE(usbldo, ldo_a_table),
+ BCM590XX_REG_TABLE(vibldo, ldo_c_table),
+ BCM590XX_REG_RANGES(csr, dcdc_csr_ranges),
+ BCM590XX_REG_RANGES(iosr1, dcdc_iosr1_ranges),
+ BCM590XX_REG_RANGES(iosr2, dcdc_iosr1_ranges),
+ BCM590XX_REG_RANGES(msr, dcdc_iosr1_ranges),
+ BCM590XX_REG_RANGES(sdsr1, dcdc_sdsr1_ranges),
+ BCM590XX_REG_RANGES(sdsr2, dcdc_iosr1_ranges),
+ BCM590XX_REG_RANGES(vsr, dcdc_iosr1_ranges),
+};
+
+struct bcm590xx_reg {
+ struct regulator_desc *desc;
+ struct bcm590xx *mfd;
+ struct bcm590xx_info **info;
+};
+
+static int bcm590xx_get_vsel_register(int id)
+{
+ if (BCM590XX_REG_IS_LDO(id))
+ return BCM590XX_RFLDOCTRL + id;
+ else
+ return BCM590XX_CSRVOUT1 + (id - BCM590XX_REG_CSR) * 3;
+}
+
+static int bcm590xx_get_enable_register(int id)
+{
+ int reg = 0;
+
+ if (BCM590XX_REG_IS_LDO(id))
+ reg = BCM590XX_RFLDOPMCTRL1 + id * 2;
+ else
+ switch (id) {
+ case BCM590XX_REG_CSR:
+ reg = BCM590XX_CSRPMCTRL1;
+ break;
+ case BCM590XX_REG_IOSR1:
+ reg = BCM590XX_IOSR1PMCTRL1;
+ break;
+ case BCM590XX_REG_IOSR2:
+ reg = BCM590XX_IOSR2PMCTRL1;
+ break;
+ case BCM590XX_REG_MSR:
+ reg = BCM590XX_MSRPMCTRL1;
+ break;
+ case BCM590XX_REG_SDSR1:
+ reg = BCM590XX_SDSR1PMCTRL1;
+ break;
+ case BCM590XX_REG_SDSR2:
+ reg = BCM590XX_SDSR2PMCTRL1;
+ break;
+ };
+
+ return reg;
+}
+
+static struct regulator_ops bcm590xx_ops_ldo = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+};
+
+static struct regulator_ops bcm590xx_ops_dcdc = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+#define BCM590XX_MATCH(_name, _id) \
+ { \
+ .name = #_name, \
+ .driver_data = (void *)&bcm590xx_regs[BCM590XX_REG_##_id], \
+ }
+
+static struct of_regulator_match bcm590xx_matches[] = {
+ BCM590XX_MATCH(rfldo, RFLDO),
+ BCM590XX_MATCH(camldo1, CAMLDO1),
+ BCM590XX_MATCH(camldo2, CAMLDO2),
+ BCM590XX_MATCH(simldo1, SIMLDO1),
+ BCM590XX_MATCH(simldo2, SIMLDO2),
+ BCM590XX_MATCH(sdldo, SDLDO),
+ BCM590XX_MATCH(sdxldo, SDXLDO),
+ BCM590XX_MATCH(mmcldo1, MMCLDO1),
+ BCM590XX_MATCH(mmcldo2, MMCLDO2),
+ BCM590XX_MATCH(audldo, AUDLDO),
+ BCM590XX_MATCH(micldo, MICLDO),
+ BCM590XX_MATCH(usbldo, USBLDO),
+ BCM590XX_MATCH(vibldo, VIBLDO),
+ BCM590XX_MATCH(csr, CSR),
+ BCM590XX_MATCH(iosr1, IOSR1),
+ BCM590XX_MATCH(iosr2, IOSR2),
+ BCM590XX_MATCH(msr, MSR),
+ BCM590XX_MATCH(sdsr1, SDSR1),
+ BCM590XX_MATCH(sdsr2, SDSR2),
+ BCM590XX_MATCH(vsr, VSR),
+};
+
+static struct bcm590xx_board *bcm590xx_parse_dt_reg_data(
+ struct platform_device *pdev,
+ struct of_regulator_match **bcm590xx_reg_matches)
+{
+ struct bcm590xx_board *data;
+ struct device_node *np = pdev->dev.parent->of_node;
+ struct device_node *regulators;
+ struct of_regulator_match *matches = bcm590xx_matches;
+ int count = ARRAY_SIZE(bcm590xx_matches);
+ int idx = 0;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "of node not found\n");
+ return NULL;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to allocate regulator board data\n");
+ return NULL;
+ }
+
+ np = of_node_get(np);
+ regulators = of_get_child_by_name(np, "regulators");
+ if (!regulators) {
+ dev_warn(&pdev->dev, "regulator node not found\n");
+ return NULL;
+ }
+
+ ret = of_regulator_match(&pdev->dev, regulators, matches, count);
+ of_node_put(regulators);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
+ ret);
+ return NULL;
+ }
+
+ *bcm590xx_reg_matches = matches;
+
+ for (idx = 0; idx < count; idx++) {
+ if (!matches[idx].init_data || !matches[idx].of_node)
+ continue;
+
+ data->bcm590xx_pmu_init_data[idx] = matches[idx].init_data;
+ }
+
+ return data;
+}
+
+static int bcm590xx_probe(struct platform_device *pdev)
+{
+ struct bcm590xx *bcm590xx = dev_get_drvdata(pdev->dev.parent);
+ struct bcm590xx_board *pmu_data = NULL;
+ struct bcm590xx_reg *pmu;
+ struct regulator_config config = { };
+ struct bcm590xx_info *info;
+ struct regulator_init_data *reg_data;
+ struct regulator_dev *rdev;
+ struct of_regulator_match *bcm590xx_reg_matches = NULL;
+ int i;
+
+ pmu_data = bcm590xx_parse_dt_reg_data(pdev,
+ &bcm590xx_reg_matches);
+
+ pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu) {
+ dev_err(&pdev->dev, "Memory allocation failed for pmu\n");
+ return -ENOMEM;
+ }
+
+ pmu->mfd = bcm590xx;
+
+ platform_set_drvdata(pdev, pmu);
+
+ pmu->desc = devm_kzalloc(&pdev->dev, BCM590XX_NUM_REGS *
+ sizeof(struct regulator_desc), GFP_KERNEL);
+ if (!pmu->desc) {
+ dev_err(&pdev->dev, "Memory alloc fails for desc\n");
+ return -ENOMEM;
+ }
+
+ pmu->info = devm_kzalloc(&pdev->dev, BCM590XX_NUM_REGS *
+ sizeof(struct bcm590xx_info *), GFP_KERNEL);
+ if (!pmu->info) {
+ dev_err(&pdev->dev, "Memory alloc fails for info\n");
+ return -ENOMEM;
+ }
+
+ info = bcm590xx_regs;
+
+ for (i = 0; i < BCM590XX_NUM_REGS; i++, info++) {
+ if (pmu_data)
+ reg_data = pmu_data->bcm590xx_pmu_init_data[i];
+ else
+ reg_data = NULL;
+
+ /* Register the regulators */
+ pmu->info[i] = info;
+
+ pmu->desc[i].name = info->name;
+ pmu->desc[i].supply_name = info->vin_name;
+ pmu->desc[i].id = i;
+ pmu->desc[i].volt_table = info->volt_table;
+ pmu->desc[i].n_voltages = info->n_voltages;
+ pmu->desc[i].linear_ranges = info->linear_ranges;
+ pmu->desc[i].n_linear_ranges = info->n_linear_ranges;
+
+ if (BCM590XX_REG_IS_LDO(i)) {
+ pmu->desc[i].ops = &bcm590xx_ops_ldo;
+ pmu->desc[i].vsel_mask = BCM590XX_LDO_VSEL_MASK;
+ } else {
+ pmu->desc[i].ops = &bcm590xx_ops_dcdc;
+ pmu->desc[i].vsel_mask = BCM590XX_SR_VSEL_MASK;
+ }
+
+ pmu->desc[i].vsel_reg = bcm590xx_get_vsel_register(i);
+ pmu->desc[i].enable_is_inverted = true;
+ pmu->desc[i].enable_mask = BCM590XX_REG_ENABLE;
+ pmu->desc[i].enable_reg = bcm590xx_get_enable_register(i);
+ pmu->desc[i].type = REGULATOR_VOLTAGE;
+ pmu->desc[i].owner = THIS_MODULE;
+
+ config.dev = bcm590xx->dev;
+ config.init_data = reg_data;
+ config.driver_data = pmu;
+ config.regmap = bcm590xx->regmap;
+
+ if (bcm590xx_reg_matches)
+ config.of_node = bcm590xx_reg_matches[i].of_node;
+
+ rdev = devm_regulator_register(&pdev->dev, &pmu->desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(bcm590xx->dev,
+ "failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver bcm590xx_regulator_driver = {
+ .driver = {
+ .name = "bcm590xx-vregs",
+ .owner = THIS_MODULE,
+ },
+ .probe = bcm590xx_probe,
+};
+module_platform_driver(bcm590xx_regulator_driver);
+
+MODULE_AUTHOR("Matt Porter <mporter@linaro.org>");
+MODULE_DESCRIPTION("BCM590xx voltage regulator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bcm590xx-vregs");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d85f31385b24..9a09f3cdbabb 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -953,6 +953,8 @@ static int machine_constraints_current(struct regulator_dev *rdev,
return 0;
}
+static int _regulator_do_enable(struct regulator_dev *rdev);
+
/**
* set_machine_constraints - sets regulator constraints
* @rdev: regulator source
@@ -1013,10 +1015,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
/* If the constraints say the regulator should be on at this point
* and we have control then make sure it is enabled.
*/
- if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
- ops->enable) {
- ret = ops->enable(rdev);
- if (ret < 0) {
+ if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+ ret = _regulator_do_enable(rdev);
+ if (ret < 0 && ret != -EINVAL) {
rdev_err(rdev, "failed to enable\n");
goto out;
}
@@ -1272,6 +1273,8 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
if (r->dev.parent &&
node == r->dev.of_node)
return r;
+ *ret = -EPROBE_DEFER;
+ return NULL;
} else {
/*
* If we couldn't even get the node then it's
@@ -1312,7 +1315,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
struct regulator_dev *rdev;
struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
const char *devname = NULL;
- int ret = -EPROBE_DEFER;
+ int ret;
if (id == NULL) {
pr_err("get() with no identifier\n");
@@ -1322,6 +1325,11 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
if (dev)
devname = dev_name(dev);
+ if (have_full_constraints())
+ ret = -ENODEV;
+ else
+ ret = -EPROBE_DEFER;
+
mutex_lock(&regulator_list_mutex);
rdev = regulator_dev_lookup(dev, id, &ret);
@@ -1334,9 +1342,8 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
* If we have return value from dev_lookup fail, we do not expect to
* succeed, so, quit with appropriate error value
*/
- if (ret && ret != -ENODEV) {
+ if (ret && ret != -ENODEV)
goto out;
- }
if (!devname)
devname = "deviceless";
@@ -1351,8 +1358,9 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
rdev = dummy_regulator_rdev;
goto found;
- } else {
- dev_err(dev, "dummy supplies not allowed\n");
+ /* Don't log an error when called from regulator_get_optional() */
+ } else if (!have_full_constraints() || exclusive) {
+ dev_warn(dev, "dummy supplies not allowed\n");
}
mutex_unlock(&regulator_list_mutex);
@@ -1900,8 +1908,6 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
trace_regulator_disable_complete(rdev_get_name(rdev));
- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
- NULL);
return 0;
}
@@ -1925,6 +1931,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
rdev_err(rdev, "failed to disable\n");
return ret;
}
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ NULL);
}
rdev->use_count = 0;
@@ -1977,20 +1985,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
{
int ret = 0;
- /* force disable */
- if (rdev->desc->ops->disable) {
- /* ah well, who wants to live forever... */
- ret = rdev->desc->ops->disable(rdev);
- if (ret < 0) {
- rdev_err(rdev, "failed to force disable\n");
- return ret;
- }
- /* notify other consumers that power has been forced off */
- _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
- REGULATOR_EVENT_DISABLE, NULL);
+ ret = _regulator_do_disable(rdev);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to force disable\n");
+ return ret;
}
- return ret;
+ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
+ REGULATOR_EVENT_DISABLE, NULL);
+
+ return 0;
}
/**
@@ -2134,7 +2138,7 @@ EXPORT_SYMBOL_GPL(regulator_is_enabled);
* @regulator: regulator source
*
* Returns positive if the regulator driver backing the source/client
- * can change its voltage, false otherwise. Usefull for detecting fixed
+ * can change its voltage, false otherwise. Useful for detecting fixed
* or dummy regulators and disabling voltage change logic in the client
* driver.
*/
@@ -2244,7 +2248,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
ret = regulator_get_voltage(regulator);
if (ret >= 0)
- return (min_uV <= ret && ret <= max_uV);
+ return min_uV <= ret && ret <= max_uV;
else
return ret;
}
@@ -2395,6 +2399,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
struct regulator_dev *rdev = regulator->rdev;
int ret = 0;
int old_min_uV, old_max_uV;
+ int current_uV;
mutex_lock(&rdev->mutex);
@@ -2405,6 +2410,19 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
goto out;
+ /* If we're trying to set a range that overlaps the current voltage,
+ * return succesfully even though the regulator does not support
+ * changing the voltage.
+ */
+ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ current_uV = _regulator_get_voltage(rdev);
+ if (min_uV <= current_uV && current_uV <= max_uV) {
+ regulator->min_uV = min_uV;
+ regulator->max_uV = max_uV;
+ goto out;
+ }
+ }
+
/* sanity check */
if (!rdev->desc->ops->set_voltage &&
!rdev->desc->ops->set_voltage_sel) {
@@ -2416,7 +2434,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
if (ret < 0)
goto out;
-
+
/* restore original values in case of error */
old_min_uV = regulator->min_uV;
old_max_uV = regulator->max_uV;
@@ -2430,7 +2448,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
if (ret < 0)
goto out2;
-
+
out:
mutex_unlock(&rdev->mutex);
return ret;
@@ -3623,23 +3641,18 @@ int regulator_suspend_finish(void)
mutex_lock(&regulator_list_mutex);
list_for_each_entry(rdev, &regulator_list, list) {
- struct regulator_ops *ops = rdev->desc->ops;
-
mutex_lock(&rdev->mutex);
- if ((rdev->use_count > 0 || rdev->constraints->always_on) &&
- ops->enable) {
- error = ops->enable(rdev);
+ if (rdev->use_count > 0 || rdev->constraints->always_on) {
+ error = _regulator_do_enable(rdev);
if (error)
ret = error;
} else {
if (!have_full_constraints())
goto unlock;
- if (!ops->disable)
- goto unlock;
if (!_regulator_is_enabled(rdev))
goto unlock;
- error = ops->disable(rdev);
+ error = _regulator_do_disable(rdev);
if (error)
ret = error;
}
@@ -3813,7 +3826,7 @@ static int __init regulator_init_complete(void)
ops = rdev->desc->ops;
c = rdev->constraints;
- if (!ops->disable || (c && c->always_on))
+ if (c && c->always_on)
continue;
mutex_lock(&rdev->mutex);
@@ -3834,10 +3847,9 @@ static int __init regulator_init_complete(void)
/* We log since this may kill the system if it
* goes wrong. */
rdev_info(rdev, "disabling\n");
- ret = ops->disable(rdev);
- if (ret != 0) {
+ ret = _regulator_do_disable(rdev);
+ if (ret != 0)
rdev_err(rdev, "couldn't disable: %d\n", ret);
- }
} else {
/* The intention is that in future we will
* assume that full constraints are provided
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 3adeaeffc485..fdb6ea8ae7e6 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -240,6 +240,31 @@ static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev,
return ret;
}
+static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_sel,
+ unsigned int new_sel)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int id = rdev_get_id(rdev);
+ int ret = 0;
+
+ /* The DVC controlled LDOs and DCDCs ramp with 6.25mV/µs after enabling
+ * the activate bit.
+ */
+ switch (id) {
+ case DA9052_ID_BUCK1:
+ case DA9052_ID_BUCK2:
+ case DA9052_ID_BUCK3:
+ case DA9052_ID_LDO2:
+ case DA9052_ID_LDO3:
+ ret = (new_sel - old_sel) * info->step_uV / 6250;
+ break;
+ }
+
+ return ret;
+}
+
static struct regulator_ops da9052_dcdc_ops = {
.get_current_limit = da9052_dcdc_get_current_limit,
.set_current_limit = da9052_dcdc_set_current_limit,
@@ -248,6 +273,7 @@ static struct regulator_ops da9052_dcdc_ops = {
.map_voltage = da9052_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = da9052_regulator_set_voltage_sel,
+ .set_voltage_time_sel = da9052_regulator_set_voltage_time_sel,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -258,6 +284,7 @@ static struct regulator_ops da9052_ldo_ops = {
.map_voltage = da9052_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = da9052_regulator_set_voltage_sel,
+ .set_voltage_time_sel = da9052_regulator_set_voltage_time_sel,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -401,7 +428,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
if (!nproot)
return -ENODEV;
- nproot = of_find_node_by_name(nproot, "regulators");
+ nproot = of_get_child_by_name(nproot, "regulators");
if (!nproot)
return -ENODEV;
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 7f340206d329..9516317e1a9f 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -19,6 +19,8 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/mfd/da9055/core.h>
#include <linux/mfd/da9055/reg.h>
@@ -446,6 +448,9 @@ static int da9055_gpio_init(struct da9055_regulator *regulator,
struct da9055_regulator_info *info = regulator->info;
int ret = 0;
+ if (!pdata)
+ return 0;
+
if (pdata->gpio_ren && pdata->gpio_ren[id]) {
char name[18];
int gpio_mux = pdata->gpio_ren[id];
@@ -530,6 +535,59 @@ static inline struct da9055_regulator_info *find_regulator_info(int id)
return NULL;
}
+#ifdef CONFIG_OF
+static struct of_regulator_match da9055_reg_matches[] = {
+ { .name = "BUCK1", },
+ { .name = "BUCK2", },
+ { .name = "LDO1", },
+ { .name = "LDO2", },
+ { .name = "LDO3", },
+ { .name = "LDO4", },
+ { .name = "LDO5", },
+ { .name = "LDO6", },
+};
+
+static int da9055_regulator_dt_init(struct platform_device *pdev,
+ struct da9055_regulator *regulator,
+ struct regulator_config *config,
+ int regid)
+{
+ struct device_node *nproot, *np;
+ int ret;
+
+ nproot = of_node_get(pdev->dev.parent->of_node);
+ if (!nproot)
+ return -ENODEV;
+
+ np = of_get_child_by_name(nproot, "regulators");
+ if (!np)
+ return -ENODEV;
+
+ ret = of_regulator_match(&pdev->dev, np, &da9055_reg_matches[regid], 1);
+ of_node_put(nproot);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error matching regulator: %d\n", ret);
+ return ret;
+ }
+
+ config->init_data = da9055_reg_matches[regid].init_data;
+ config->of_node = da9055_reg_matches[regid].of_node;
+
+ if (!config->of_node)
+ return -ENODEV;
+
+ return 0;
+}
+#else
+static inline int da9055_regulator_dt_init(struct platform_device *pdev,
+ struct da9055_regulator *regulator,
+ struct regulator_config *config,
+ int regid)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_OF */
+
static int da9055_regulator_probe(struct platform_device *pdev)
{
struct regulator_config config = { };
@@ -538,9 +596,6 @@ static int da9055_regulator_probe(struct platform_device *pdev)
struct da9055_pdata *pdata = dev_get_platdata(da9055->dev);
int ret, irq;
- if (pdata == NULL || pdata->regulators[pdev->id] == NULL)
- return -ENODEV;
-
regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9055_regulator),
GFP_KERNEL);
if (!regulator)
@@ -557,8 +612,14 @@ static int da9055_regulator_probe(struct platform_device *pdev)
config.driver_data = regulator;
config.regmap = da9055->regmap;
- if (pdata && pdata->regulators)
+ if (pdata && pdata->regulators) {
config.init_data = pdata->regulators[pdev->id];
+ } else {
+ ret = da9055_regulator_dt_init(pdev, regulator, &config,
+ pdev->id);
+ if (ret < 0)
+ return ret;
+ }
ret = da9055_gpio_init(regulator, &config, pdata, pdev->id);
if (ret < 0)
@@ -576,7 +637,9 @@ static int da9055_regulator_probe(struct platform_device *pdev)
/* Only LDO 5 and 6 has got the over current interrupt */
if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) {
irq = platform_get_irq_byname(pdev, "REGULATOR");
- irq = regmap_irq_get_virq(da9055->irq_data, irq);
+ if (irq < 0)
+ return irq;
+
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
da9055_ldo5_6_oc_irq,
IRQF_TRIGGER_HIGH |
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 56727eb745df..7c9461d13313 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -1,3 +1,4 @@
+
/*
* Regulator driver for DA9063 PMIC series
*
@@ -60,7 +61,8 @@ struct da9063_regulator_info {
.desc.ops = &da9063_ldo_ops, \
.desc.min_uV = (min_mV) * 1000, \
.desc.uV_step = (step_mV) * 1000, \
- .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1), \
+ .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \
+ + (DA9063_V##regl_name##_BIAS)), \
.desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
.desc.enable_mask = DA9063_LDO_EN, \
.desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
@@ -363,7 +365,7 @@ static int da9063_set_suspend_voltage(struct regulator_dev *rdev, int uV)
sel = regulator_map_voltage_linear(rdev, uV, uV);
if (sel < 0)
- return -EINVAL;
+ return sel;
sel <<= ffs(rdev->desc->vsel_mask) - 1;
@@ -664,7 +666,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
struct device_node *node;
int i, n, num;
- node = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ node = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!node) {
dev_err(&pdev->dev, "Regulators device node not found\n");
return ERR_PTR(-ENODEV);
@@ -672,6 +674,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
num = of_regulator_match(&pdev->dev, node, da9063_matches,
ARRAY_SIZE(da9063_matches));
+ of_node_put(node);
if (num < 0) {
dev_err(&pdev->dev, "Failed to match regulators\n");
return ERR_PTR(-EINVAL);
@@ -708,7 +711,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
struct platform_device *pdev,
struct of_regulator_match **da9063_reg_matches)
{
- da9063_reg_matches = NULL;
+ *da9063_reg_matches = NULL;
return ERR_PTR(-ENODEV);
}
#endif
@@ -754,7 +757,7 @@ static int da9063_regulator_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"Error while reading BUCKs configuration\n");
- return -EIO;
+ return ret;
}
bcores_merged = val & DA9063_BCORE_MERGE;
bmem_bio_merged = val & DA9063_BUCK_MERGE;
@@ -773,10 +776,8 @@ static int da9063_regulator_probe(struct platform_device *pdev)
size = sizeof(struct da9063_regulators) +
n_regulators * sizeof(struct da9063_regulator);
regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!regulators) {
- dev_err(&pdev->dev, "No memory for regulators\n");
+ if (!regulators)
return -ENOMEM;
- }
regulators->n_regulators = n_regulators;
platform_set_drvdata(pdev, regulators);
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 6f5ecbe1132e..7a320dd11c46 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -134,11 +134,8 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
int error;
chip = devm_kzalloc(&i2c->dev, sizeof(struct da9210), GFP_KERNEL);
- if (NULL == chip) {
- dev_err(&i2c->dev,
- "Cannot kzalloc memory for regulator structure\n");
+ if (!chip)
return -ENOMEM;
- }
chip->regmap = devm_regmap_init_i2c(i2c, &da9210_regmap_config);
if (IS_ERR(chip->regmap)) {
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index a53c11a529d5..617c1adca816 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -263,6 +263,8 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .fixed_uV = 1800000,
+ .n_voltages = 1,
},
.exclude_from_power_state = true,
},
@@ -431,17 +433,11 @@ static int db8500_regulator_register(struct platform_device *pdev,
config.of_node = np;
/* register with the regulator framework */
- info->rdev = regulator_register(&info->desc, &config);
+ info->rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
if (IS_ERR(info->rdev)) {
err = PTR_ERR(info->rdev);
dev_err(&pdev->dev, "failed to register %s: err %i\n",
info->desc.name, err);
-
- /* if failing, unregister all earlier regulators */
- while (--id >= 0) {
- info = &dbx500_regulator_info[id];
- regulator_unregister(info->rdev);
- }
return err;
}
@@ -530,20 +526,8 @@ static int db8500_regulator_probe(struct platform_device *pdev)
static int db8500_regulator_remove(struct platform_device *pdev)
{
- int i;
-
ux500_regulator_debug_exit();
- for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
- struct dbx500_regulator_info *info;
- info = &dbx500_regulator_info[i];
-
- dev_vdbg(rdev_get_dev(info->rdev),
- "regulator-%s-remove\n", info->desc.name);
-
- regulator_unregister(info->rdev);
- }
-
return 0;
}
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
index ce89f7848a57..2d16b9f16de7 100644
--- a/drivers/regulator/dbx500-prcmu.c
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -78,6 +78,7 @@ static struct ux500_regulator_debug {
void ux500_regulator_suspend_debug(void)
{
int i;
+
for (i = 0; i < rdebug.num_regulators; i++)
rdebug.state_before_suspend[i] =
rdebug.regulator_array[i].is_enabled;
@@ -86,6 +87,7 @@ void ux500_regulator_suspend_debug(void)
void ux500_regulator_resume_debug(void)
{
int i;
+
for (i = 0; i < rdebug.num_regulators; i++)
rdebug.state_after_suspend[i] =
rdebug.regulator_array[i].is_enabled;
@@ -127,9 +129,9 @@ static int ux500_regulator_status_print(struct seq_file *s, void *p)
int i;
/* print dump header */
- err = seq_printf(s, "ux500-regulator status:\n");
+ err = seq_puts(s, "ux500-regulator status:\n");
if (err < 0)
- dev_err(dev, "seq_printf overflow\n");
+ dev_err(dev, "seq_puts overflow\n");
err = seq_printf(s, "%31s : %8s : %8s\n", "current",
"before", "after");
@@ -202,18 +204,12 @@ ux500_regulator_debug_init(struct platform_device *pdev,
rdebug.num_regulators = num_regulators;
rdebug.state_before_suspend = kzalloc(num_regulators, GFP_KERNEL);
- if (!rdebug.state_before_suspend) {
- dev_err(&pdev->dev,
- "could not allocate memory for saving state\n");
+ if (!rdebug.state_before_suspend)
goto exit_destroy_power_state;
- }
rdebug.state_after_suspend = kzalloc(num_regulators, GFP_KERNEL);
- if (!rdebug.state_after_suspend) {
- dev_err(&pdev->dev,
- "could not allocate memory for saving state\n");
+ if (!rdebug.state_after_suspend)
goto exit_free;
- }
dbx500_regulator_testcase(regulator_info, num_regulators);
return 0;
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index df9f42524abb..2436db9e2ca3 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -25,7 +25,11 @@
struct regulator_dev *dummy_regulator_rdev;
-static struct regulator_init_data dummy_initdata;
+static struct regulator_init_data dummy_initdata = {
+ .constraints = {
+ .always_on = 1,
+ },
+};
static struct regulator_ops dummy_ops;
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 7ca3d9e3b0fe..714fd9a89aa1 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -90,11 +90,11 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV)
return 0;
ret = regulator_map_voltage_linear(rdev, uV, uV);
if (ret < 0)
- return -EINVAL;
+ return ret;
ret = regmap_update_bits(di->regmap, di->sleep_reg,
VSEL_NSEL_MASK, ret);
if (ret < 0)
- return -EINVAL;
+ return ret;
/* Cache the sleep voltage setting.
* Might not be the real voltage which is rounded */
di->sleep_vol_cache = uV;
@@ -244,10 +244,9 @@ static int fan53555_regulator_probe(struct i2c_client *client,
di = devm_kzalloc(&client->dev, sizeof(struct fan53555_device_info),
GFP_KERNEL);
- if (!di) {
- dev_err(&client->dev, "Failed to allocate device info data!\n");
+ if (!di)
return -ENOMEM;
- }
+
di->regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config);
if (IS_ERR(di->regmap)) {
dev_err(&client->dev, "Failed to allocate regmap!\n");
@@ -260,14 +259,14 @@ static int fan53555_regulator_probe(struct i2c_client *client,
ret = regmap_read(di->regmap, FAN53555_ID1, &val);
if (ret < 0) {
dev_err(&client->dev, "Failed to get chip ID!\n");
- return -ENODEV;
+ return ret;
}
di->chip_id = val & DIE_ID;
/* Get chip revision */
ret = regmap_read(di->regmap, FAN53555_ID2, &val);
if (ret < 0) {
dev_err(&client->dev, "Failed to get chip Rev!\n");
- return -ENODEV;
+ return ret;
}
di->chip_rev = val & DIE_REV;
dev_info(&client->dev, "FAN53555 Option[%d] Rev[%d] Detected!\n",
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 5ea64b94341c..c61f7e97e4f8 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -130,17 +130,15 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct fixed_voltage_data),
GFP_KERNEL);
- if (drvdata == NULL) {
- dev_err(&pdev->dev, "Failed to allocate device data\n");
- ret = -ENOMEM;
- goto err;
- }
+ if (!drvdata)
+ return -ENOMEM;
- drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
+ drvdata->desc.name = devm_kstrdup(&pdev->dev,
+ config->supply_name,
+ GFP_KERNEL);
if (drvdata->desc.name == NULL) {
dev_err(&pdev->dev, "Failed to allocate supply name\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
drvdata->desc.type = REGULATOR_VOLTAGE;
drvdata->desc.owner = THIS_MODULE;
@@ -149,13 +147,13 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->desc.enable_time = config->startup_delay;
if (config->input_supply) {
- drvdata->desc.supply_name = kstrdup(config->input_supply,
- GFP_KERNEL);
+ drvdata->desc.supply_name = devm_kstrdup(&pdev->dev,
+ config->input_supply,
+ GFP_KERNEL);
if (!drvdata->desc.supply_name) {
dev_err(&pdev->dev,
"Failed to allocate input supply\n");
- ret = -ENOMEM;
- goto err_name;
+ return -ENOMEM;
}
}
@@ -186,11 +184,12 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
cfg.driver_data = drvdata;
cfg.of_node = pdev->dev.of_node;
- drvdata->dev = regulator_register(&drvdata->desc, &cfg);
+ drvdata->dev = devm_regulator_register(&pdev->dev, &drvdata->desc,
+ &cfg);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
- goto err_input;
+ return ret;
}
platform_set_drvdata(pdev, drvdata);
@@ -199,24 +198,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->desc.fixed_uV);
return 0;
-
-err_input:
- kfree(drvdata->desc.supply_name);
-err_name:
- kfree(drvdata->desc.name);
-err:
- return ret;
-}
-
-static int reg_fixed_voltage_remove(struct platform_device *pdev)
-{
- struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
-
- regulator_unregister(drvdata->dev);
- kfree(drvdata->desc.supply_name);
- kfree(drvdata->desc.name);
-
- return 0;
}
#if defined(CONFIG_OF)
@@ -229,7 +210,6 @@ MODULE_DEVICE_TABLE(of, fixed_of_match);
static struct platform_driver regulator_fixed_voltage_driver = {
.probe = reg_fixed_voltage_probe,
- .remove = reg_fixed_voltage_remove,
.driver = {
.name = "reg-fixed-voltage",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 234960dc9607..989b23b377c0 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -136,7 +136,6 @@ static struct gpio_regulator_config *
of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
{
struct gpio_regulator_config *config;
- struct property *prop;
const char *regtype;
int proplen, gpio, i;
int ret;
@@ -172,22 +171,35 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
if (!config->gpios)
return ERR_PTR(-ENOMEM);
+ proplen = of_property_count_u32_elems(np, "gpios-states");
+ /* optional property */
+ if (proplen < 0)
+ proplen = 0;
+
+ if (proplen > 0 && proplen != config->nr_gpios) {
+ dev_warn(dev, "gpios <-> gpios-states mismatch\n");
+ proplen = 0;
+ }
+
for (i = 0; i < config->nr_gpios; i++) {
gpio = of_get_named_gpio(np, "gpios", i);
if (gpio < 0)
break;
config->gpios[i].gpio = gpio;
+ if (proplen > 0) {
+ of_property_read_u32_index(np, "gpios-states", i, &ret);
+ if (ret)
+ config->gpios[i].flags = GPIOF_OUT_INIT_HIGH;
+ }
}
/* Fetch states. */
- prop = of_find_property(np, "states", NULL);
- if (!prop) {
+ proplen = of_property_count_u32_elems(np, "states");
+ if (proplen < 0) {
dev_err(dev, "No 'states' property found\n");
return ERR_PTR(-EINVAL);
}
- proplen = prop->length / sizeof(int);
-
config->states = devm_kzalloc(dev,
sizeof(struct gpio_regulator_state)
* (proplen / 2),
@@ -196,24 +208,25 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
return ERR_PTR(-ENOMEM);
for (i = 0; i < proplen / 2; i++) {
- config->states[i].value =
- be32_to_cpup((int *)prop->value + (i * 2));
- config->states[i].gpios =
- be32_to_cpup((int *)prop->value + (i * 2 + 1));
+ of_property_read_u32_index(np, "states", i * 2,
+ &config->states[i].value);
+ of_property_read_u32_index(np, "states", i * 2 + 1,
+ &config->states[i].gpios);
}
config->nr_states = i;
+ config->type = REGULATOR_VOLTAGE;
ret = of_property_read_string(np, "regulator-type", &regtype);
- if (ret < 0) {
- dev_err(dev, "Missing 'regulator-type' property\n");
- return ERR_PTR(-EINVAL);
+ if (ret >= 0) {
+ if (!strncmp("voltage", regtype, 7))
+ config->type = REGULATOR_VOLTAGE;
+ else if (!strncmp("current", regtype, 7))
+ config->type = REGULATOR_CURRENT;
+ else
+ dev_warn(dev, "Unknown regulator-type '%s'\n",
+ regtype);
}
- if (!strncmp("voltage", regtype, 7))
- config->type = REGULATOR_VOLTAGE;
- else if (!strncmp("current", regtype, 7))
- config->type = REGULATOR_CURRENT;
-
return config;
}
@@ -238,10 +251,8 @@ static int gpio_regulator_probe(struct platform_device *pdev)
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data),
GFP_KERNEL);
- if (drvdata == NULL) {
- dev_err(&pdev->dev, "Failed to allocate device data\n");
+ if (drvdata == NULL)
return -ENOMEM;
- }
drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
if (drvdata->desc.name == NULL) {
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index e221a271ba56..cbc39096c78d 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -37,10 +37,17 @@ int regulator_is_enabled_regmap(struct regulator_dev *rdev)
if (ret != 0)
return ret;
- if (rdev->desc->enable_is_inverted)
- return (val & rdev->desc->enable_mask) == 0;
- else
- return (val & rdev->desc->enable_mask) != 0;
+ val &= rdev->desc->enable_mask;
+
+ if (rdev->desc->enable_is_inverted) {
+ if (rdev->desc->enable_val)
+ return val != rdev->desc->enable_val;
+ return val == 0;
+ } else {
+ if (rdev->desc->enable_val)
+ return val == rdev->desc->enable_val;
+ return val != 0;
+ }
}
EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap);
@@ -57,10 +64,13 @@ int regulator_enable_regmap(struct regulator_dev *rdev)
{
unsigned int val;
- if (rdev->desc->enable_is_inverted)
- val = 0;
- else
- val = rdev->desc->enable_mask;
+ if (rdev->desc->enable_is_inverted) {
+ val = rdev->desc->disable_val;
+ } else {
+ val = rdev->desc->enable_val;
+ if (!val)
+ val = rdev->desc->enable_mask;
+ }
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val);
@@ -80,10 +90,13 @@ int regulator_disable_regmap(struct regulator_dev *rdev)
{
unsigned int val;
- if (rdev->desc->enable_is_inverted)
- val = rdev->desc->enable_mask;
- else
- val = 0;
+ if (rdev->desc->enable_is_inverted) {
+ val = rdev->desc->enable_val;
+ if (!val)
+ val = rdev->desc->enable_mask;
+ } else {
+ val = rdev->desc->disable_val;
+ }
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val);
@@ -419,10 +432,13 @@ int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable)
{
unsigned int val;
- if (enable)
- val = rdev->desc->bypass_mask;
- else
- val = 0;
+ if (enable) {
+ val = rdev->desc->bypass_val_on;
+ if (!val)
+ val = rdev->desc->bypass_mask;
+ } else {
+ val = rdev->desc->bypass_val_off;
+ }
return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg,
rdev->desc->bypass_mask, val);
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 947c05ffe0ab..66fd2330dca0 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -25,8 +25,6 @@ struct lp3971 {
struct device *dev;
struct mutex io_lock;
struct i2c_client *i2c;
- int num_regulators;
- struct regulator_dev **rdev;
};
static u8 lp3971_reg_read(struct lp3971 *lp3971, u8 reg);
@@ -329,7 +327,7 @@ static int lp3971_i2c_read(struct i2c_client *i2c, char reg, int count,
return -EIO;
ret = i2c_smbus_read_byte_data(i2c, reg);
if (ret < 0)
- return -EIO;
+ return ret;
*dest = ret;
return 0;
@@ -383,42 +381,27 @@ static int setup_regulators(struct lp3971 *lp3971,
{
int i, err;
- lp3971->num_regulators = pdata->num_regulators;
- lp3971->rdev = kcalloc(pdata->num_regulators,
- sizeof(struct regulator_dev *), GFP_KERNEL);
- if (!lp3971->rdev) {
- err = -ENOMEM;
- goto err_nomem;
- }
-
/* Instantiate the regulators */
for (i = 0; i < pdata->num_regulators; i++) {
struct regulator_config config = { };
struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
+ struct regulator_dev *rdev;
config.dev = lp3971->dev;
config.init_data = reg->initdata;
config.driver_data = lp3971;
- lp3971->rdev[i] = regulator_register(&regulators[reg->id],
- &config);
- if (IS_ERR(lp3971->rdev[i])) {
- err = PTR_ERR(lp3971->rdev[i]);
+ rdev = devm_regulator_register(lp3971->dev,
+ &regulators[reg->id], &config);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
dev_err(lp3971->dev, "regulator init failed: %d\n",
err);
- goto error;
+ return err;
}
}
return 0;
-
-error:
- while (--i >= 0)
- regulator_unregister(lp3971->rdev[i]);
- kfree(lp3971->rdev);
- lp3971->rdev = NULL;
-err_nomem:
- return err;
}
static int lp3971_i2c_probe(struct i2c_client *i2c,
@@ -460,19 +443,6 @@ static int lp3971_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int lp3971_i2c_remove(struct i2c_client *i2c)
-{
- struct lp3971 *lp3971 = i2c_get_clientdata(i2c);
- int i;
-
- for (i = 0; i < lp3971->num_regulators; i++)
- regulator_unregister(lp3971->rdev[i]);
-
- kfree(lp3971->rdev);
-
- return 0;
-}
-
static const struct i2c_device_id lp3971_i2c_id[] = {
{ "lp3971", 0 },
{ }
@@ -485,7 +455,6 @@ static struct i2c_driver lp3971_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = lp3971_i2c_probe,
- .remove = lp3971_i2c_remove,
.id_table = lp3971_i2c_id,
};
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 093e6f44ff8a..aea485afcc1a 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -22,8 +22,6 @@ struct lp3972 {
struct device *dev;
struct mutex io_lock;
struct i2c_client *i2c;
- int num_regulators;
- struct regulator_dev **rdev;
};
/* LP3972 Control Registers */
@@ -478,41 +476,27 @@ static int setup_regulators(struct lp3972 *lp3972,
{
int i, err;
- lp3972->num_regulators = pdata->num_regulators;
- lp3972->rdev = kcalloc(pdata->num_regulators,
- sizeof(struct regulator_dev *), GFP_KERNEL);
- if (!lp3972->rdev) {
- err = -ENOMEM;
- goto err_nomem;
- }
-
/* Instantiate the regulators */
for (i = 0; i < pdata->num_regulators; i++) {
struct lp3972_regulator_subdev *reg = &pdata->regulators[i];
struct regulator_config config = { };
+ struct regulator_dev *rdev;
config.dev = lp3972->dev;
config.init_data = reg->initdata;
config.driver_data = lp3972;
- lp3972->rdev[i] = regulator_register(&regulators[reg->id],
- &config);
- if (IS_ERR(lp3972->rdev[i])) {
- err = PTR_ERR(lp3972->rdev[i]);
+ rdev = devm_regulator_register(lp3972->dev,
+ &regulators[reg->id], &config);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
dev_err(lp3972->dev, "regulator init failed: %d\n",
err);
- goto error;
+ return err;
}
}
return 0;
-error:
- while (--i >= 0)
- regulator_unregister(lp3972->rdev[i]);
- kfree(lp3972->rdev);
- lp3972->rdev = NULL;
-err_nomem:
- return err;
}
static int lp3972_i2c_probe(struct i2c_client *i2c,
@@ -557,18 +541,6 @@ static int lp3972_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int lp3972_i2c_remove(struct i2c_client *i2c)
-{
- struct lp3972 *lp3972 = i2c_get_clientdata(i2c);
- int i;
-
- for (i = 0; i < lp3972->num_regulators; i++)
- regulator_unregister(lp3972->rdev[i]);
- kfree(lp3972->rdev);
-
- return 0;
-}
-
static const struct i2c_device_id lp3972_i2c_id[] = {
{ "lp3972", 0 },
{ }
@@ -581,7 +553,6 @@ static struct i2c_driver lp3972_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = lp3972_i2c_probe,
- .remove = lp3972_i2c_remove,
.id_table = lp3972_i2c_id,
};
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 2e4734ff79fc..2e022aabd951 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -211,7 +211,7 @@ static int lp872x_get_timestep_usec(struct lp872x *lp)
ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val);
if (ret)
- return -EINVAL;
+ return ret;
val = (val & mask) >> shift;
if (val >= size)
@@ -229,7 +229,7 @@ static int lp872x_regulator_enable_time(struct regulator_dev *rdev)
u8 addr, val;
if (time_step_us < 0)
- return -EINVAL;
+ return time_step_us;
switch (rid) {
case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
new file mode 100644
index 000000000000..ed60baaeceec
--- /dev/null
+++ b/drivers/regulator/max14577.c
@@ -0,0 +1,269 @@
+/*
+ * max14577.c - Regulator driver for the Maxim 14577
+ *
+ * Copyright (C) 2013,2014 Samsung Electronics
+ * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/max14577.h>
+#include <linux/mfd/max14577-private.h>
+#include <linux/regulator/of_regulator.h>
+
+static int max14577_reg_is_enabled(struct regulator_dev *rdev)
+{
+ int rid = rdev_get_id(rdev);
+ struct regmap *rmap = rdev->regmap;
+ u8 reg_data;
+
+ switch (rid) {
+ case MAX14577_CHARGER:
+ max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL2, &reg_data);
+ if ((reg_data & CHGCTRL2_MBCHOSTEN_MASK) == 0)
+ return 0;
+ max14577_read_reg(rmap, MAX14577_CHG_REG_STATUS3, &reg_data);
+ if ((reg_data & STATUS3_CGMBC_MASK) == 0)
+ return 0;
+ /* MBCHOSTEN and CGMBC are on */
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int max14577_reg_get_current_limit(struct regulator_dev *rdev)
+{
+ u8 reg_data;
+ struct regmap *rmap = rdev->regmap;
+
+ if (rdev_get_id(rdev) != MAX14577_CHARGER)
+ return -EINVAL;
+
+ max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, &reg_data);
+
+ if ((reg_data & CHGCTRL4_MBCICHWRCL_MASK) == 0)
+ return MAX14577_REGULATOR_CURRENT_LIMIT_MIN;
+
+ reg_data = ((reg_data & CHGCTRL4_MBCICHWRCH_MASK) >>
+ CHGCTRL4_MBCICHWRCH_SHIFT);
+ return MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START +
+ reg_data * MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP;
+}
+
+static int max14577_reg_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ int i, current_bits = 0xf;
+ u8 reg_data;
+
+ if (rdev_get_id(rdev) != MAX14577_CHARGER)
+ return -EINVAL;
+
+ if (min_uA > MAX14577_REGULATOR_CURRENT_LIMIT_MAX ||
+ max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_MIN)
+ return -EINVAL;
+
+ if (max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START) {
+ /* Less than 200 mA, so set 90mA (turn only Low Bit off) */
+ u8 reg_data = 0x0 << CHGCTRL4_MBCICHWRCL_SHIFT;
+ return max14577_update_reg(rdev->regmap,
+ MAX14577_CHG_REG_CHG_CTRL4,
+ CHGCTRL4_MBCICHWRCL_MASK, reg_data);
+ }
+
+ /* max_uA is in range: <LIMIT_HIGH_START, inifinite>, so search for
+ * valid current starting from LIMIT_MAX. */
+ for (i = MAX14577_REGULATOR_CURRENT_LIMIT_MAX;
+ i >= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START;
+ i -= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP) {
+ if (i <= max_uA)
+ break;
+ current_bits--;
+ }
+ BUG_ON(current_bits < 0); /* Cannot happen */
+ /* Turn Low Bit on (use range 200mA-950 mA) */
+ reg_data = 0x1 << CHGCTRL4_MBCICHWRCL_SHIFT;
+ /* and set proper High Bits */
+ reg_data |= current_bits << CHGCTRL4_MBCICHWRCH_SHIFT;
+
+ return max14577_update_reg(rdev->regmap, MAX14577_CHG_REG_CHG_CTRL4,
+ CHGCTRL4_MBCICHWRCL_MASK | CHGCTRL4_MBCICHWRCH_MASK,
+ reg_data);
+}
+
+static struct regulator_ops max14577_safeout_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+};
+
+static struct regulator_ops max14577_charger_ops = {
+ .is_enabled = max14577_reg_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_current_limit = max14577_reg_get_current_limit,
+ .set_current_limit = max14577_reg_set_current_limit,
+};
+
+static const struct regulator_desc supported_regulators[] = {
+ [MAX14577_SAFEOUT] = {
+ .name = "SAFEOUT",
+ .id = MAX14577_SAFEOUT,
+ .ops = &max14577_safeout_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .min_uV = MAX14577_REGULATOR_SAFEOUT_VOLTAGE,
+ .enable_reg = MAX14577_REG_CONTROL2,
+ .enable_mask = CTRL2_SFOUTORD_MASK,
+ },
+ [MAX14577_CHARGER] = {
+ .name = "CHARGER",
+ .id = MAX14577_CHARGER,
+ .ops = &max14577_charger_ops,
+ .type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
+ .enable_reg = MAX14577_CHG_REG_CHG_CTRL2,
+ .enable_mask = CHGCTRL2_MBCHOSTEN_MASK,
+ },
+};
+
+#ifdef CONFIG_OF
+static struct of_regulator_match max14577_regulator_matches[] = {
+ { .name = "SAFEOUT", },
+ { .name = "CHARGER", },
+};
+
+static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *np;
+
+ np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to get child OF node for regulators\n");
+ return -EINVAL;
+ }
+
+ ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches,
+ MAX14577_REG_MAX);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
+ else
+ ret = 0;
+
+ of_node_put(np);
+
+ return ret;
+}
+
+static inline struct regulator_init_data *match_init_data(int index)
+{
+ return max14577_regulator_matches[index].init_data;
+}
+
+static inline struct device_node *match_of_node(int index)
+{
+ return max14577_regulator_matches[index].of_node;
+}
+#else /* CONFIG_OF */
+static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline struct regulator_init_data *match_init_data(int index)
+{
+ return NULL;
+}
+
+static inline struct device_node *match_of_node(int index)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+
+static int max14577_regulator_probe(struct platform_device *pdev)
+{
+ struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
+ struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev);
+ int i, ret;
+ struct regulator_config config = {};
+
+ ret = max14577_regulator_dt_parse_pdata(pdev);
+ if (ret)
+ return ret;
+
+ config.dev = &pdev->dev;
+ config.regmap = max14577->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(supported_regulators); i++) {
+ struct regulator_dev *regulator;
+ /*
+ * Index of supported_regulators[] is also the id and must
+ * match index of pdata->regulators[].
+ */
+ if (pdata && pdata->regulators) {
+ config.init_data = pdata->regulators[i].initdata;
+ config.of_node = pdata->regulators[i].of_node;
+ } else {
+ config.init_data = match_init_data(i);
+ config.of_node = match_of_node(i);
+ }
+
+ regulator = devm_regulator_register(&pdev->dev,
+ &supported_regulators[i], &config);
+ if (IS_ERR(regulator)) {
+ ret = PTR_ERR(regulator);
+ dev_err(&pdev->dev,
+ "Regulator init failed for ID %d with error: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static struct platform_driver max14577_regulator_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "max14577-regulator",
+ },
+ .probe = max14577_regulator_probe,
+};
+
+static int __init max14577_regulator_init(void)
+{
+ BUILD_BUG_ON(MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START +
+ MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP * 0xf !=
+ MAX14577_REGULATOR_CURRENT_LIMIT_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(supported_regulators) != MAX14577_REG_MAX);
+
+ return platform_driver_register(&max14577_regulator_driver);
+}
+subsys_initcall(max14577_regulator_init);
+
+static void __exit max14577_regulator_exit(void)
+{
+ platform_driver_unregister(&max14577_regulator_driver);
+}
+module_exit(max14577_regulator_exit);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_DESCRIPTION("MAXIM 14577 regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:max14577-regulator");
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index e242dd316d36..d23d0577754b 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -46,8 +46,6 @@ struct max1586_data {
unsigned int v3_curr_sel;
unsigned int v6_curr_sel;
-
- struct regulator_dev *rdev[0];
};
/*
@@ -162,14 +160,12 @@ static struct regulator_desc max1586_reg[] = {
static int max1586_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
- struct regulator_dev **rdev;
struct max1586_platform_data *pdata = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct max1586_data *max1586;
int i, id;
- max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data) +
- sizeof(struct regulator_dev *) * (MAX1586_V6 + 1),
+ max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data),
GFP_KERNEL);
if (!max1586)
return -ENOMEM;
@@ -186,8 +182,9 @@ static int max1586_pmic_probe(struct i2c_client *client,
max1586->v3_curr_sel = 24; /* 1.3V */
max1586->v6_curr_sel = 0;
- rdev = max1586->rdev;
for (i = 0; i < pdata->num_subdevs && i <= MAX1586_V6; i++) {
+ struct regulator_dev *rdev;
+
id = pdata->subdevs[i].id;
if (!pdata->subdevs[i].platform_data)
continue;
@@ -207,12 +204,12 @@ static int max1586_pmic_probe(struct i2c_client *client,
config.init_data = pdata->subdevs[i].platform_data;
config.driver_data = max1586;
- rdev[i] = devm_regulator_register(&client->dev,
+ rdev = devm_regulator_register(&client->dev,
&max1586_reg[id], &config);
- if (IS_ERR(rdev[i])) {
+ if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
max1586_reg[id].name);
- return PTR_ERR(rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index ae001ccf26f4..ef1af2debbd2 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -65,7 +65,6 @@ enum max77686_ramp_rate {
};
struct max77686_data {
- struct regulator_dev *rdev[MAX77686_REGULATORS];
unsigned int opmode[MAX77686_REGULATORS];
};
@@ -400,7 +399,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
unsigned int i;
pmic_np = iodev->dev->of_node;
- regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");
+ regulators_np = of_get_child_by_name(pmic_np, "voltage-regulators");
if (!regulators_np) {
dev_err(&pdev->dev, "could not find regulators sub-node\n");
return -EINVAL;
@@ -410,8 +409,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
pdata->num_regulators, GFP_KERNEL);
if (!rdata) {
- dev_err(&pdev->dev,
- "could not allocate memory for regulator data\n");
+ of_node_put(regulators_np);
return -ENOMEM;
}
@@ -425,6 +423,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
}
pdata->regulators = rdata;
+ of_node_put(regulators_np);
return 0;
}
@@ -474,16 +473,18 @@ static int max77686_pmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, max77686);
for (i = 0; i < MAX77686_REGULATORS; i++) {
+ struct regulator_dev *rdev;
+
config.init_data = pdata->regulators[i].initdata;
config.of_node = pdata->regulators[i].of_node;
max77686->opmode[i] = regulators[i].enable_mask;
- max77686->rdev[i] = devm_regulator_register(&pdev->dev,
+ rdev = devm_regulator_register(&pdev->dev,
&regulators[i], &config);
- if (IS_ERR(max77686->rdev[i])) {
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"regulator init failed for %d\n", i);
- return PTR_ERR(max77686->rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index feb20bf4ccab..653a58b49cdf 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -34,13 +34,6 @@
#define CHGIN_ILIM_STEP_20mA 20000
-struct max77693_pmic_dev {
- struct device *dev;
- struct max77693_dev *iodev;
- int num_regulators;
- struct regulator_dev **rdev;
-};
-
/* CHARGER regulator ops */
/* CHARGER regulator uses two bits for enabling */
static int max77693_chg_is_enabled(struct regulator_dev *rdev)
@@ -138,6 +131,7 @@ static struct regulator_ops max77693_charger_ops = {
.n_voltages = 4, \
.ops = &max77693_safeout_ops, \
.type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
.volt_table = max77693_safeout_table, \
.vsel_reg = MAX77693_CHG_REG_SAFEOUT_CTRL, \
.vsel_mask = SAFEOUT_CTRL_SAFEOUT##_num##_MASK, \
@@ -169,19 +163,22 @@ static int max77693_pmic_dt_parse_rdata(struct device *dev,
struct max77693_regulator_data *tmp;
int i, matched = 0;
- np = of_find_node_by_name(dev->parent->of_node, "regulators");
+ np = of_get_child_by_name(dev->parent->of_node, "regulators");
if (!np)
return -EINVAL;
rmatch = devm_kzalloc(dev,
sizeof(*rmatch) * ARRAY_SIZE(regulators), GFP_KERNEL);
- if (!rmatch)
+ if (!rmatch) {
+ of_node_put(np);
return -ENOMEM;
+ }
for (i = 0; i < ARRAY_SIZE(regulators); i++)
rmatch[i].name = regulators[i].name;
matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(regulators));
+ of_node_put(np);
if (matched <= 0)
return matched;
*rdata = devm_kzalloc(dev, sizeof(**rdata) * matched, GFP_KERNEL);
@@ -228,7 +225,6 @@ static int max77693_pmic_init_rdata(struct device *dev,
static int max77693_pmic_probe(struct platform_device *pdev)
{
struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct max77693_pmic_dev *max77693_pmic;
struct max77693_regulator_data *rdata = NULL;
int num_rdata, i;
struct regulator_config config;
@@ -239,39 +235,22 @@ static int max77693_pmic_probe(struct platform_device *pdev)
return -ENODEV;
}
- max77693_pmic = devm_kzalloc(&pdev->dev,
- sizeof(struct max77693_pmic_dev),
- GFP_KERNEL);
- if (!max77693_pmic)
- return -ENOMEM;
-
- max77693_pmic->rdev = devm_kzalloc(&pdev->dev,
- sizeof(struct regulator_dev *) * num_rdata,
- GFP_KERNEL);
- if (!max77693_pmic->rdev)
- return -ENOMEM;
-
- max77693_pmic->dev = &pdev->dev;
- max77693_pmic->iodev = iodev;
- max77693_pmic->num_regulators = num_rdata;
-
config.dev = &pdev->dev;
config.regmap = iodev->regmap;
- config.driver_data = max77693_pmic;
- platform_set_drvdata(pdev, max77693_pmic);
- for (i = 0; i < max77693_pmic->num_regulators; i++) {
+ for (i = 0; i < num_rdata; i++) {
int id = rdata[i].id;
+ struct regulator_dev *rdev;
config.init_data = rdata[i].initdata;
config.of_node = rdata[i].of_node;
- max77693_pmic->rdev[i] = devm_regulator_register(&pdev->dev,
+ rdev = devm_regulator_register(&pdev->dev,
&regulators[id], &config);
- if (IS_ERR(max77693_pmic->rdev[i])) {
- dev_err(max77693_pmic->dev,
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
"Failed to initialize regulator-%d\n", id);
- return PTR_ERR(max77693_pmic->rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 7f049c92ee52..3172da847d24 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -49,7 +49,6 @@
#define MAX8649_RAMP_DOWN (1 << 1)
struct max8649_regulator_info {
- struct regulator_dev *regulator;
struct device *dev;
struct regmap *regmap;
@@ -154,6 +153,7 @@ static int max8649_regulator_probe(struct i2c_client *client,
{
struct max8649_platform_data *pdata = dev_get_platdata(&client->dev);
struct max8649_regulator_info *info = NULL;
+ struct regulator_dev *regulator;
struct regulator_config config = { };
unsigned int val;
unsigned char data;
@@ -234,12 +234,12 @@ static int max8649_regulator_probe(struct i2c_client *client,
config.driver_data = info;
config.regmap = info->regmap;
- info->regulator = devm_regulator_register(&client->dev, &dcdc_desc,
+ regulator = devm_regulator_register(&client->dev, &dcdc_desc,
&config);
- if (IS_ERR(info->regulator)) {
+ if (IS_ERR(regulator)) {
dev_err(info->dev, "failed to register regulator %s\n",
dcdc_desc.name);
- return PTR_ERR(info->regulator);
+ return PTR_ERR(regulator);
}
return 0;
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 8d94d3d7f97f..2fc411188794 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -81,16 +81,17 @@ enum {
struct max8660 {
struct i2c_client *client;
u8 shadow_regs[MAX8660_N_REGS]; /* as chip is write only */
- struct regulator_dev *rdev[];
};
static int max8660_write(struct max8660 *max8660, u8 reg, u8 mask, u8 val)
{
- static const u8 max8660_addresses[MAX8660_N_REGS] =
- { 0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80 };
+ static const u8 max8660_addresses[MAX8660_N_REGS] = {
+ 0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80
+ };
int ret;
u8 reg_val = (max8660->shadow_regs[reg] & mask) | val;
+
dev_vdbg(&max8660->client->dev, "Writing reg %02x with %02x\n",
max8660_addresses[reg], reg_val);
@@ -112,6 +113,7 @@ static int max8660_dcdc_is_enabled(struct regulator_dev *rdev)
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 val = max8660->shadow_regs[MAX8660_OVER1];
u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
+
return !!(val & mask);
}
@@ -119,6 +121,7 @@ static int max8660_dcdc_enable(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 bit = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
+
return max8660_write(max8660, MAX8660_OVER1, 0xff, bit);
}
@@ -126,15 +129,16 @@ static int max8660_dcdc_disable(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? ~1 : ~4;
+
return max8660_write(max8660, MAX8660_OVER1, mask, 0);
}
static int max8660_dcdc_get_voltage_sel(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
-
u8 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
u8 selector = max8660->shadow_regs[reg];
+
return selector;
}
@@ -207,6 +211,7 @@ static int max8660_ldo67_is_enabled(struct regulator_dev *rdev)
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 val = max8660->shadow_regs[MAX8660_OVER2];
u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
+
return !!(val & mask);
}
@@ -214,6 +219,7 @@ static int max8660_ldo67_enable(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 bit = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
+
return max8660_write(max8660, MAX8660_OVER2, 0xff, bit);
}
@@ -221,15 +227,16 @@ static int max8660_ldo67_disable(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? ~2 : ~4;
+
return max8660_write(max8660, MAX8660_OVER2, mask, 0);
}
static int max8660_ldo67_get_voltage_sel(struct regulator_dev *rdev)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
-
u8 shift = (rdev_get_id(rdev) == MAX8660_V6) ? 0 : 4;
u8 selector = (max8660->shadow_regs[MAX8660_L12VCR] >> shift) & 0xf;
+
return selector;
}
@@ -330,7 +337,7 @@ static int max8660_pdata_from_dt(struct device *dev,
struct max8660_subdev_data *sub;
struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)];
- np = of_find_node_by_name(dev->of_node, "regulators");
+ np = of_get_child_by_name(dev->of_node, "regulators");
if (!np) {
dev_err(dev, "missing 'regulators' subnode in DT\n");
return -EINVAL;
@@ -340,6 +347,7 @@ static int max8660_pdata_from_dt(struct device *dev,
rmatch[i].name = max8660_reg[i].name;
matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(rmatch));
+ of_node_put(np);
if (matched <= 0)
return matched;
@@ -373,7 +381,6 @@ static inline int max8660_pdata_from_dt(struct device *dev,
static int max8660_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
- struct regulator_dev **rdev;
struct device *dev = &client->dev;
struct max8660_platform_data *pdata = dev_get_platdata(dev);
struct regulator_config config = { };
@@ -406,14 +413,11 @@ static int max8660_probe(struct i2c_client *client,
return -EINVAL;
}
- max8660 = devm_kzalloc(dev, sizeof(struct max8660) +
- sizeof(struct regulator_dev *) * MAX8660_V_END,
- GFP_KERNEL);
+ max8660 = devm_kzalloc(dev, sizeof(struct max8660), GFP_KERNEL);
if (!max8660)
return -ENOMEM;
max8660->client = client;
- rdev = max8660->rdev;
if (pdata->en34_is_high) {
/* Simulate always on */
@@ -481,6 +485,7 @@ static int max8660_probe(struct i2c_client *client,
/* Finally register devices */
for (i = 0; i < pdata->num_subdevs; i++) {
+ struct regulator_dev *rdev;
id = pdata->subdevs[i].id;
@@ -489,13 +494,13 @@ static int max8660_probe(struct i2c_client *client,
config.of_node = of_node[i];
config.driver_data = max8660;
- rdev[i] = devm_regulator_register(&client->dev,
+ rdev = devm_regulator_register(&client->dev,
&max8660_reg[id], &config);
- if (IS_ERR(rdev[i])) {
- ret = PTR_ERR(rdev[i]);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
dev_err(&client->dev, "failed to register %s\n",
max8660_reg[id].name);
- return PTR_ERR(rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index 0c5fe6c6ac26..9623e9e290bf 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -34,7 +34,6 @@
struct max8907_regulator {
struct regulator_desc desc[MAX8907_NUM_REGULATORS];
- struct regulator_dev *rdev[MAX8907_NUM_REGULATORS];
};
#define REG_MBATT() \
@@ -231,7 +230,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)
if (!np)
return 0;
- regulators = of_find_node_by_name(np, "regulators");
+ regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulators node not found\n");
return -EINVAL;
@@ -292,10 +291,9 @@ static int max8907_regulator_probe(struct platform_device *pdev)
return ret;
pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
- if (!pmic) {
- dev_err(&pdev->dev, "Failed to alloc pmic\n");
+ if (!pmic)
return -ENOMEM;
- }
+
platform_set_drvdata(pdev, pmic);
memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc));
@@ -311,6 +309,8 @@ static int max8907_regulator_probe(struct platform_device *pdev)
}
for (i = 0; i < MAX8907_NUM_REGULATORS; i++) {
+ struct regulator_dev *rdev;
+
config.dev = pdev->dev.parent;
if (pdata)
idata = pdata->init_data[i];
@@ -350,13 +350,13 @@ static int max8907_regulator_probe(struct platform_device *pdev)
pmic->desc[i].ops = &max8907_out5v_hwctl_ops;
}
- pmic->rdev[i] = devm_regulator_register(&pdev->dev,
+ rdev = devm_regulator_register(&pdev->dev,
&pmic->desc[i], &config);
- if (IS_ERR(pmic->rdev[i])) {
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
pmic->desc[i].name);
- return PTR_ERR(pmic->rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 759510789e71..dad2bcd14e96 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -36,9 +36,7 @@
struct max8925_regulator_info {
struct regulator_desc desc;
- struct regulator_dev *regulator;
struct i2c_client *i2c;
- struct max8925_chip *chip;
int vol_reg;
int enable_reg;
@@ -251,10 +249,11 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
{
struct device_node *nproot, *np;
int rcount;
+
nproot = of_node_get(pdev->dev.parent->of_node);
if (!nproot)
return -ENODEV;
- np = of_find_node_by_name(nproot, "regulators");
+ np = of_get_child_by_name(nproot, "regulators");
if (!np) {
dev_err(&pdev->dev, "failed to find regulators node\n");
return -ENODEV;
@@ -264,7 +263,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
&max8925_regulator_matches[ridx], 1);
of_node_put(np);
if (rcount < 0)
- return -ENODEV;
+ return rcount;
config->init_data = max8925_regulator_matches[ridx].init_data;
config->of_node = max8925_regulator_matches[ridx].of_node;
@@ -303,7 +302,6 @@ static int max8925_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
ri->i2c = chip->i2c;
- ri->chip = chip;
config.dev = &pdev->dev;
config.driver_data = ri;
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 788e5ae2af1b..d920f5a32ec8 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -48,9 +48,7 @@ enum {
struct max8952_data {
struct i2c_client *client;
- struct device *dev;
struct max8952_platform_data *pdata;
- struct regulator_dev *rdev;
bool vid0;
bool vid1;
@@ -59,6 +57,7 @@ struct max8952_data {
static int max8952_read_reg(struct max8952_data *max8952, u8 reg)
{
int ret = i2c_smbus_read_byte_data(max8952->client, reg);
+
if (ret > 0)
ret &= 0xff;
@@ -144,10 +143,8 @@ static struct max8952_platform_data *max8952_parse_dt(struct device *dev)
int i;
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- dev_err(dev, "Failed to allocate platform data\n");
+ if (!pd)
return NULL;
- }
pd->gpio_vid0 = of_get_named_gpio(np, "max8952,vid-gpios", 0);
pd->gpio_vid1 = of_get_named_gpio(np, "max8952,vid-gpios", 1);
@@ -199,6 +196,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
struct max8952_platform_data *pdata = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct max8952_data *max8952;
+ struct regulator_dev *rdev;
int ret = 0, err = 0;
@@ -219,10 +217,9 @@ static int max8952_pmic_probe(struct i2c_client *client,
return -ENOMEM;
max8952->client = client;
- max8952->dev = &client->dev;
max8952->pdata = pdata;
- config.dev = max8952->dev;
+ config.dev = &client->dev;
config.init_data = pdata->reg_data;
config.driver_data = max8952;
config.of_node = client->dev.of_node;
@@ -231,11 +228,11 @@ static int max8952_pmic_probe(struct i2c_client *client,
if (pdata->reg_data->constraints.boot_on)
config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
- max8952->rdev = regulator_register(&regulator, &config);
+ rdev = devm_regulator_register(&client->dev, &regulator, &config);
- if (IS_ERR(max8952->rdev)) {
- ret = PTR_ERR(max8952->rdev);
- dev_err(max8952->dev, "regulator init failed (%d)\n", ret);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&client->dev, "regulator init failed (%d)\n", ret);
return ret;
}
@@ -263,7 +260,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
err = 3;
if (err) {
- dev_warn(max8952->dev, "VID0/1 gpio invalid: "
+ dev_warn(&client->dev, "VID0/1 gpio invalid: "
"DVS not available.\n");
max8952->vid0 = 0;
max8952->vid1 = 0;
@@ -274,7 +271,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
/* Disable Pulldown of EN only */
max8952_write_reg(max8952, MAX8952_REG_CONTROL, 0x60);
- dev_err(max8952->dev, "DVS modes disabled because VID0 and VID1"
+ dev_err(&client->dev, "DVS modes disabled because VID0 and VID1"
" do not have proper controls.\n");
} else {
/*
@@ -321,9 +318,6 @@ static int max8952_pmic_remove(struct i2c_client *client)
{
struct max8952_data *max8952 = i2c_get_clientdata(client);
struct max8952_platform_data *pdata = max8952->pdata;
- struct regulator_dev *rdev = max8952->rdev;
-
- regulator_unregister(rdev);
gpio_free(pdata->gpio_vid0);
gpio_free(pdata->gpio_vid1);
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 892aa1e5b96c..dbedf1768db0 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -93,7 +93,6 @@
struct max8973_chip {
struct device *dev;
struct regulator_desc desc;
- struct regulator_dev *rdev;
struct regmap *regmap;
bool enable_external_control;
int dvs_gpio;
@@ -379,10 +378,8 @@ static int max8973_probe(struct i2c_client *client,
}
max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL);
- if (!max) {
- dev_err(&client->dev, "Memory allocation for max failed\n");
+ if (!max)
return -ENOMEM;
- }
max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config);
if (IS_ERR(max->regmap)) {
@@ -474,7 +471,6 @@ static int max8973_probe(struct i2c_client *client,
return ret;
}
- max->rdev = rdev;
return 0;
}
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 2d618fc9c1af..90b4c530dee5 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -38,7 +38,6 @@ struct max8997_data {
struct device *dev;
struct max8997_dev *iodev;
int num_regulators;
- struct regulator_dev **rdev;
int ramp_delay; /* in mV/us */
bool buck1_gpiodvs;
@@ -924,7 +923,7 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
return -ENODEV;
}
- regulators_np = of_find_node_by_name(pmic_np, "regulators");
+ regulators_np = of_get_child_by_name(pmic_np, "regulators");
if (!regulators_np) {
dev_err(&pdev->dev, "could not find regulators sub-node\n");
return -EINVAL;
@@ -937,7 +936,6 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
pdata->num_regulators, GFP_KERNEL);
if (!rdata) {
of_node_put(regulators_np);
- dev_err(&pdev->dev, "could not allocate memory for regulator data\n");
return -ENOMEM;
}
@@ -1030,10 +1028,10 @@ static int max8997_pmic_probe(struct platform_device *pdev)
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = iodev->pdata;
struct regulator_config config = { };
- struct regulator_dev **rdev;
+ struct regulator_dev *rdev;
struct max8997_data *max8997;
struct i2c_client *i2c;
- int i, ret, size, nr_dvs;
+ int i, ret, nr_dvs;
u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0;
if (!pdata) {
@@ -1052,12 +1050,6 @@ static int max8997_pmic_probe(struct platform_device *pdev)
if (!max8997)
return -ENOMEM;
- size = sizeof(struct regulator_dev *) * pdata->num_regulators;
- max8997->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!max8997->rdev)
- return -ENOMEM;
-
- rdev = max8997->rdev;
max8997->dev = &pdev->dev;
max8997->iodev = iodev;
max8997->num_regulators = pdata->num_regulators;
@@ -1205,12 +1197,12 @@ static int max8997_pmic_probe(struct platform_device *pdev)
config.driver_data = max8997;
config.of_node = pdata->regulators[i].reg_node;
- rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
- &config);
- if (IS_ERR(rdev[i])) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[id],
+ &config);
+ if (IS_ERR(rdev)) {
dev_err(max8997->dev, "regulator init failed for %d\n",
id);
- return PTR_ERR(rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index ae3f0656feb0..961091b46557 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -40,7 +40,6 @@ struct max8998_data {
struct device *dev;
struct max8998_dev *iodev;
int num_regulators;
- struct regulator_dev **rdev;
u8 buck1_vol[4]; /* voltages for selection */
u8 buck2_vol[2];
unsigned int buck1_idx; /* index to last changed voltage */
@@ -674,8 +673,10 @@ static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev,
rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
pdata->num_regulators, GFP_KERNEL);
- if (!rdata)
+ if (!rdata) {
+ of_node_put(regulators_np);
return -ENOMEM;
+ }
pdata->regulators = rdata;
for (i = 0; i < ARRAY_SIZE(regulators); ++i) {
@@ -692,6 +693,9 @@ static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev,
}
pdata->num_regulators = rdata - pdata->regulators;
+ of_node_put(reg_np);
+ of_node_put(regulators_np);
+
ret = max8998_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
if (ret)
return -EINVAL;
@@ -741,10 +745,10 @@ static int max8998_pmic_probe(struct platform_device *pdev)
struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max8998_platform_data *pdata = iodev->pdata;
struct regulator_config config = { };
- struct regulator_dev **rdev;
+ struct regulator_dev *rdev;
struct max8998_data *max8998;
struct i2c_client *i2c;
- int i, ret, size;
+ int i, ret;
unsigned int v;
if (!pdata) {
@@ -763,12 +767,6 @@ static int max8998_pmic_probe(struct platform_device *pdev)
if (!max8998)
return -ENOMEM;
- size = sizeof(struct regulator_dev *) * pdata->num_regulators;
- max8998->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!max8998->rdev)
- return -ENOMEM;
-
- rdev = max8998->rdev;
max8998->dev = &pdev->dev;
max8998->iodev = iodev;
max8998->num_regulators = pdata->num_regulators;
@@ -872,13 +870,12 @@ static int max8998_pmic_probe(struct platform_device *pdev)
config.init_data = pdata->regulators[i].initdata;
config.driver_data = max8998;
- rdev[i] = devm_regulator_register(&pdev->dev,
- &regulators[index], &config);
- if (IS_ERR(rdev[i])) {
- ret = PTR_ERR(rdev[i]);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[index],
+ &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
dev_err(max8998->dev, "regulator %s init failed (%d)\n",
regulators[index].name, ret);
- rdev[i] = NULL;
return ret;
}
}
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 96c9f80d9550..f374fa57220f 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -274,25 +274,25 @@ static struct mc13xxx_regulator mc13892_regulators[] = {
MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw),
MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst),
MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi),
- MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0,
mc13892_vpll),
- MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,
mc13892_vdig),
- MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1, \
+ MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1,
mc13892_vsd),
- MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0,
mc13892_vusb2),
- MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1, \
+ MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1,
mc13892_vvideo),
- MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1, \
+ MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1,
mc13892_vaudio),
- MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,
mc13892_vcam),
- MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0,
mc13892_vgen1),
- MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0,
mc13892_vgen2),
- MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0, \
+ MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0,
mc13892_vgen3),
MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb),
MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo),
@@ -476,8 +476,8 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
}
mc13xxx_lock(priv->mc13xxx);
- ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, mask,
- reg_value);
+ ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
+ mask, reg_value);
mc13xxx_unlock(priv->mc13xxx);
return ret;
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index da4859282302..05b971726ffa 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -167,8 +167,10 @@ int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
struct device_node *parent;
int num;
- of_node_get(pdev->dev.parent->of_node);
- parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!pdev->dev.parent->of_node)
+ return -ENODEV;
+
+ parent = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!parent)
return -ENODEV;
@@ -187,8 +189,10 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
struct device_node *parent, *child;
int i, parsed = 0;
- of_node_get(pdev->dev.parent->of_node);
- parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!pdev->dev.parent->of_node)
+ return NULL;
+
+ parent = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!parent)
return NULL;
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index d7da1c15a6da..134f90ec9ca1 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -105,7 +105,7 @@ static int pcf50633_regulator_probe(struct platform_device *pdev)
static struct platform_driver pcf50633_regulator_driver = {
.driver = {
- .name = "pcf50633-regltr",
+ .name = "pcf50633-regulator",
},
.probe = pcf50633_regulator_probe,
};
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 8b5e4c712a01..67e678c4301c 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -56,6 +56,8 @@
#define PFUZE100_VGEN5VOL 0x70
#define PFUZE100_VGEN6VOL 0x71
+enum chips { PFUZE100, PFUZE200 };
+
struct pfuze_regulator {
struct regulator_desc desc;
unsigned char stby_reg;
@@ -63,6 +65,7 @@ struct pfuze_regulator {
};
struct pfuze_chip {
+ int chip_id;
struct regmap *regmap;
struct device *dev;
struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR];
@@ -78,21 +81,23 @@ static const int pfuze100_vsnvs[] = {
};
static const struct i2c_device_id pfuze_device_id[] = {
- {.name = "pfuze100"},
- {},
+ {.name = "pfuze100", .driver_data = PFUZE100},
+ {.name = "pfuze200", .driver_data = PFUZE200},
+ { }
};
MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
static const struct of_device_id pfuze_dt_ids[] = {
- { .compatible = "fsl,pfuze100" },
- {},
+ { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
+ { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
+ { }
};
MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
struct pfuze_chip *pfuze100 = rdev_get_drvdata(rdev);
- int id = rdev->desc->id;
+ int id = rdev_get_id(rdev);
unsigned int ramp_bits;
int ret;
@@ -139,14 +144,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
};
-#define PFUZE100_FIXED_REG(_name, base, voltage) \
- [PFUZE100_ ## _name] = { \
+#define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \
+ [_chip ## _ ## _name] = { \
.desc = { \
.name = #_name, \
.n_voltages = 1, \
.ops = &pfuze100_fixed_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
- .id = PFUZE100_ ## _name, \
+ .id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.min_uV = (voltage), \
.enable_reg = (base), \
@@ -154,14 +159,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
}, \
}
-#define PFUZE100_SW_REG(_name, base, min, max, step) \
- [PFUZE100_ ## _name] = { \
+#define PFUZE100_SW_REG(_chip, _name, base, min, max, step) \
+ [_chip ## _ ## _name] = { \
.desc = { \
.name = #_name,\
.n_voltages = ((max) - (min)) / (step) + 1, \
.ops = &pfuze100_sw_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
- .id = PFUZE100_ ## _name, \
+ .id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.min_uV = (min), \
.uV_step = (step), \
@@ -172,14 +177,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
.stby_mask = 0x3f, \
}
-#define PFUZE100_SWB_REG(_name, base, mask, voltages) \
- [PFUZE100_ ## _name] = { \
+#define PFUZE100_SWB_REG(_chip, _name, base, mask, voltages) \
+ [_chip ## _ ## _name] = { \
.desc = { \
.name = #_name, \
.n_voltages = ARRAY_SIZE(voltages), \
.ops = &pfuze100_swb_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
- .id = PFUZE100_ ## _name, \
+ .id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.volt_table = voltages, \
.vsel_reg = (base), \
@@ -187,14 +192,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
}, \
}
-#define PFUZE100_VGEN_REG(_name, base, min, max, step) \
- [PFUZE100_ ## _name] = { \
+#define PFUZE100_VGEN_REG(_chip, _name, base, min, max, step) \
+ [_chip ## _ ## _name] = { \
.desc = { \
.name = #_name, \
.n_voltages = ((max) - (min)) / (step) + 1, \
.ops = &pfuze100_ldo_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
- .id = PFUZE100_ ## _name, \
+ .id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.min_uV = (min), \
.uV_step = (step), \
@@ -207,25 +212,45 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
.stby_mask = 0x20, \
}
+/* PFUZE100 */
static struct pfuze_regulator pfuze100_regulators[] = {
- PFUZE100_SW_REG(SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
- PFUZE100_SW_REG(SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000),
- PFUZE100_SW_REG(SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
- PFUZE100_SW_REG(SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
- PFUZE100_SW_REG(SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
- PFUZE100_SW_REG(SW4, PFUZE100_SW4VOL, 400000, 1975000, 25000),
- PFUZE100_SWB_REG(SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
- PFUZE100_SWB_REG(VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
- PFUZE100_FIXED_REG(VREFDDR, PFUZE100_VREFDDRCON, 750000),
- PFUZE100_VGEN_REG(VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
- PFUZE100_VGEN_REG(VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
- PFUZE100_VGEN_REG(VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
- PFUZE100_VGEN_REG(VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
- PFUZE100_VGEN_REG(VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
- PFUZE100_VGEN_REG(VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+ PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
+ PFUZE100_SW_REG(PFUZE100, SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000),
+ PFUZE100_SW_REG(PFUZE100, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(PFUZE100, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(PFUZE100, SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(PFUZE100, SW4, PFUZE100_SW4VOL, 400000, 1975000, 25000),
+ PFUZE100_SWB_REG(PFUZE100, SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
+ PFUZE100_SWB_REG(PFUZE100, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+ PFUZE100_FIXED_REG(PFUZE100, VREFDDR, PFUZE100_VREFDDRCON, 750000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE100, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+};
+
+static struct pfuze_regulator pfuze200_regulators[] = {
+ PFUZE100_SW_REG(PFUZE200, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
+ PFUZE100_SW_REG(PFUZE200, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(PFUZE200, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(PFUZE200, SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
+ PFUZE100_SWB_REG(PFUZE200, SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
+ PFUZE100_SWB_REG(PFUZE200, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+ PFUZE100_FIXED_REG(PFUZE200, VREFDDR, PFUZE100_VREFDDRCON, 750000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
};
+static struct pfuze_regulator *pfuze_regulators;
+
#ifdef CONFIG_OF
+/* PFUZE100 */
static struct of_regulator_match pfuze100_matches[] = {
{ .name = "sw1ab", },
{ .name = "sw1c", },
@@ -244,24 +269,56 @@ static struct of_regulator_match pfuze100_matches[] = {
{ .name = "vgen6", },
};
+/* PFUZE200 */
+static struct of_regulator_match pfuze200_matches[] = {
+
+ { .name = "sw1ab", },
+ { .name = "sw2", },
+ { .name = "sw3a", },
+ { .name = "sw3b", },
+ { .name = "swbst", },
+ { .name = "vsnvs", },
+ { .name = "vrefddr", },
+ { .name = "vgen1", },
+ { .name = "vgen2", },
+ { .name = "vgen3", },
+ { .name = "vgen4", },
+ { .name = "vgen5", },
+ { .name = "vgen6", },
+};
+
+static struct of_regulator_match *pfuze_matches;
+
static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
{
struct device *dev = chip->dev;
struct device_node *np, *parent;
int ret;
- np = of_node_get(dev->parent->of_node);
+ np = of_node_get(dev->of_node);
if (!np)
- return 0;
+ return -EINVAL;
- parent = of_find_node_by_name(np, "regulators");
+ parent = of_get_child_by_name(np, "regulators");
if (!parent) {
dev_err(dev, "regulators node not found\n");
return -EINVAL;
}
- ret = of_regulator_match(dev, parent, pfuze100_matches,
- ARRAY_SIZE(pfuze100_matches));
+ switch (chip->chip_id) {
+ case PFUZE200:
+ pfuze_matches = pfuze200_matches;
+ ret = of_regulator_match(dev, parent, pfuze200_matches,
+ ARRAY_SIZE(pfuze200_matches));
+ break;
+
+ case PFUZE100:
+ default:
+ pfuze_matches = pfuze100_matches;
+ ret = of_regulator_match(dev, parent, pfuze100_matches,
+ ARRAY_SIZE(pfuze100_matches));
+ break;
+ }
of_node_put(parent);
if (ret < 0) {
@@ -275,12 +332,12 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
static inline struct regulator_init_data *match_init_data(int index)
{
- return pfuze100_matches[index].init_data;
+ return pfuze_matches[index].init_data;
}
static inline struct device_node *match_of_node(int index)
{
- return pfuze100_matches[index].of_node;
+ return pfuze_matches[index].of_node;
}
#else
static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
@@ -308,22 +365,23 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
if (ret)
return ret;
- switch (value & 0x0f) {
- /* Freescale misprogrammed 1-3% of parts prior to week 8 of 2013 as ID=8 */
- case 0x8:
- dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
- case 0x0:
- break;
- default:
- dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
- return -ENODEV;
+ if (((value & 0x0f) == 0x8) && (pfuze_chip->chip_id == PFUZE100)) {
+ /*
+ * Freescale misprogrammed 1-3% of parts prior to week 8 of 2013
+ * as ID=8 in PFUZE100
+ */
+ dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
+ } else if ((value & 0x0f) != pfuze_chip->chip_id) {
+ /* device id NOT match with your setting */
+ dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
+ return -ENODEV;
}
ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value);
if (ret)
return ret;
dev_info(pfuze_chip->dev,
- "Full lay: %x, Metal lay: %x\n",
+ "Full layer: %x, Metal layer: %x\n",
(value & 0xf0) >> 4, value & 0x0f);
ret = regmap_read(pfuze_chip->regmap, PFUZE100_FABID, &value);
@@ -350,17 +408,31 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
dev_get_platdata(&client->dev);
struct regulator_config config = { };
int i, ret;
+ const struct of_device_id *match;
+ u32 regulator_num;
+ u32 sw_check_start, sw_check_end;
pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
GFP_KERNEL);
if (!pfuze_chip)
return -ENOMEM;
- i2c_set_clientdata(client, pfuze_chip);
-
- memcpy(pfuze_chip->regulator_descs, pfuze100_regulators,
- sizeof(pfuze_chip->regulator_descs));
+ if (client->dev.of_node) {
+ match = of_match_device(of_match_ptr(pfuze_dt_ids),
+ &client->dev);
+ if (!match) {
+ dev_err(&client->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ pfuze_chip->chip_id = (int)(long)match->data;
+ } else if (id) {
+ pfuze_chip->chip_id = id->driver_data;
+ } else {
+ dev_err(&client->dev, "No dts match or id table match found\n");
+ return -ENODEV;
+ }
+ i2c_set_clientdata(client, pfuze_chip);
pfuze_chip->dev = &client->dev;
pfuze_chip->regmap = devm_regmap_init_i2c(client, &pfuze_regmap_config);
@@ -377,11 +449,34 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
return ret;
}
+ /* use the right regulators after identify the right device */
+ switch (pfuze_chip->chip_id) {
+ case PFUZE200:
+ pfuze_regulators = pfuze200_regulators;
+ regulator_num = ARRAY_SIZE(pfuze200_regulators);
+ sw_check_start = PFUZE200_SW2;
+ sw_check_end = PFUZE200_SW3B;
+ break;
+
+ case PFUZE100:
+ default:
+ pfuze_regulators = pfuze100_regulators;
+ regulator_num = ARRAY_SIZE(pfuze100_regulators);
+ sw_check_start = PFUZE100_SW2;
+ sw_check_end = PFUZE100_SW4;
+ break;
+ }
+ dev_info(&client->dev, "pfuze%s found.\n",
+ (pfuze_chip->chip_id == PFUZE100) ? "100" : "200");
+
+ memcpy(pfuze_chip->regulator_descs, pfuze_regulators,
+ sizeof(pfuze_chip->regulator_descs));
+
ret = pfuze_parse_regulators_dt(pfuze_chip);
if (ret)
return ret;
- for (i = 0; i < PFUZE100_MAX_REGULATOR; i++) {
+ for (i = 0; i < regulator_num; i++) {
struct regulator_init_data *init_data;
struct regulator_desc *desc;
int val;
@@ -394,7 +489,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
init_data = match_init_data(i);
/* SW2~SW4 high bit check and modify the voltage value table */
- if (i > PFUZE100_SW1C && i < PFUZE100_SWBST) {
+ if (i >= sw_check_start && i <= sw_check_end) {
regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
if (val & 0x40) {
desc->min_uV = 800000;
@@ -408,31 +503,18 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
config.driver_data = pfuze_chip;
config.of_node = match_of_node(i);
- pfuze_chip->regulators[i] = regulator_register(desc, &config);
+ pfuze_chip->regulators[i] =
+ devm_regulator_register(&client->dev, desc, &config);
if (IS_ERR(pfuze_chip->regulators[i])) {
dev_err(&client->dev, "register regulator%s failed\n",
- pfuze100_regulators[i].desc.name);
- ret = PTR_ERR(pfuze_chip->regulators[i]);
- while (--i >= 0)
- regulator_unregister(pfuze_chip->regulators[i]);
- return ret;
+ pfuze_regulators[i].desc.name);
+ return PTR_ERR(pfuze_chip->regulators[i]);
}
}
return 0;
}
-static int pfuze100_regulator_remove(struct i2c_client *client)
-{
- int i;
- struct pfuze_chip *pfuze_chip = i2c_get_clientdata(client);
-
- for (i = 0; i < PFUZE100_MAX_REGULATOR; i++)
- regulator_unregister(pfuze_chip->regulators[i]);
-
- return 0;
-}
-
static struct i2c_driver pfuze_driver = {
.id_table = pfuze_device_id,
.driver = {
@@ -441,11 +523,10 @@ static struct i2c_driver pfuze_driver = {
.of_match_table = pfuze_dt_ids,
},
.probe = pfuze100_regulator_probe,
- .remove = pfuze100_regulator_remove,
};
module_i2c_driver(pfuze_driver);
MODULE_AUTHOR("Robin Gong <b38343@freescale.com>");
-MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100 PMIC");
+MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/PFUZE200 PMIC");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("i2c:pfuze100-regulator");
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index b58affb33143..4c414ae109ae 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -119,7 +119,6 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev);
- struct regulator_init_data *reg_data;
struct regulator_config config = { };
struct rc5t583_regulator *reg = NULL;
struct rc5t583_regulator *regs;
@@ -135,19 +134,11 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
regs = devm_kzalloc(&pdev->dev, RC5T583_REGULATOR_MAX *
sizeof(struct rc5t583_regulator), GFP_KERNEL);
- if (!regs) {
- dev_err(&pdev->dev, "Memory allocation failed exiting..\n");
+ if (!regs)
return -ENOMEM;
- }
for (id = 0; id < RC5T583_REGULATOR_MAX; ++id) {
- reg_data = pdata->reg_init_data[id];
-
- /* No need to register if there is no regulator data */
- if (!reg_data)
- continue;
-
reg = &regs[id];
ri = &rc5t583_reg_info[id];
reg->reg_info = ri;
@@ -169,7 +160,7 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
skip_ext_pwr_config:
config.dev = &pdev->dev;
- config.init_data = reg_data;
+ config.init_data = pdata->reg_init_data[id];
config.driver_data = reg;
config.regmap = rc5t583->regmap;
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
new file mode 100644
index 000000000000..808b3aa7a42c
--- /dev/null
+++ b/drivers/regulator/s2mpa01.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/s2mpa01.h>
+
+#define S2MPA01_REGULATOR_CNT ARRAY_SIZE(regulators)
+
+struct s2mpa01_info {
+ int ramp_delay24;
+ int ramp_delay3;
+ int ramp_delay5;
+ int ramp_delay16;
+ int ramp_delay7;
+ int ramp_delay8910;
+};
+
+static int get_ramp_delay(int ramp_delay)
+{
+ unsigned char cnt = 0;
+
+ ramp_delay /= 6250;
+
+ while (true) {
+ ramp_delay = ramp_delay >> 1;
+ if (ramp_delay == 0)
+ break;
+ cnt++;
+ }
+
+ if (cnt > 3)
+ cnt = 3;
+
+ return cnt;
+}
+
+static int s2mpa01_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector,
+ unsigned int new_selector)
+{
+ struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev);
+ unsigned int ramp_delay = 0;
+ int old_volt, new_volt;
+
+ switch (rdev->desc->id) {
+ case S2MPA01_BUCK2:
+ case S2MPA01_BUCK4:
+ ramp_delay = s2mpa01->ramp_delay24;
+ break;
+ case S2MPA01_BUCK3:
+ ramp_delay = s2mpa01->ramp_delay3;
+ break;
+ case S2MPA01_BUCK5:
+ ramp_delay = s2mpa01->ramp_delay5;
+ break;
+ case S2MPA01_BUCK1:
+ case S2MPA01_BUCK6:
+ ramp_delay = s2mpa01->ramp_delay16;
+ break;
+ case S2MPA01_BUCK7:
+ ramp_delay = s2mpa01->ramp_delay7;
+ break;
+ case S2MPA01_BUCK8:
+ case S2MPA01_BUCK9:
+ case S2MPA01_BUCK10:
+ ramp_delay = s2mpa01->ramp_delay8910;
+ break;
+ }
+
+ if (ramp_delay == 0)
+ ramp_delay = rdev->desc->ramp_delay;
+
+ old_volt = rdev->desc->min_uV + (rdev->desc->uV_step * old_selector);
+ new_volt = rdev->desc->min_uV + (rdev->desc->uV_step * new_selector);
+
+ return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay);
+}
+
+static int s2mpa01_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev);
+ unsigned int ramp_val, ramp_shift, ramp_reg = S2MPA01_REG_RAMP2;
+ unsigned int ramp_enable = 1, enable_shift = 0;
+ int ret;
+
+ switch (rdev->desc->id) {
+ case S2MPA01_BUCK1:
+ enable_shift = S2MPA01_BUCK1_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ if (ramp_delay > s2mpa01->ramp_delay16)
+ s2mpa01->ramp_delay16 = ramp_delay;
+ else
+ ramp_delay = s2mpa01->ramp_delay16;
+
+ ramp_shift = S2MPA01_BUCK16_RAMP_SHIFT;
+ ramp_reg = S2MPA01_REG_RAMP1;
+ break;
+ case S2MPA01_BUCK2:
+ enable_shift = S2MPA01_BUCK2_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ if (ramp_delay > s2mpa01->ramp_delay24)
+ s2mpa01->ramp_delay24 = ramp_delay;
+ else
+ ramp_delay = s2mpa01->ramp_delay24;
+
+ ramp_shift = S2MPA01_BUCK24_RAMP_SHIFT;
+ ramp_reg = S2MPA01_REG_RAMP1;
+ break;
+ case S2MPA01_BUCK3:
+ enable_shift = S2MPA01_BUCK3_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ s2mpa01->ramp_delay3 = ramp_delay;
+ ramp_shift = S2MPA01_BUCK3_RAMP_SHIFT;
+ ramp_reg = S2MPA01_REG_RAMP1;
+ break;
+ case S2MPA01_BUCK4:
+ enable_shift = S2MPA01_BUCK4_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ if (ramp_delay > s2mpa01->ramp_delay24)
+ s2mpa01->ramp_delay24 = ramp_delay;
+ else
+ ramp_delay = s2mpa01->ramp_delay24;
+
+ ramp_shift = S2MPA01_BUCK24_RAMP_SHIFT;
+ ramp_reg = S2MPA01_REG_RAMP1;
+ break;
+ case S2MPA01_BUCK5:
+ s2mpa01->ramp_delay5 = ramp_delay;
+ ramp_shift = S2MPA01_BUCK5_RAMP_SHIFT;
+ break;
+ case S2MPA01_BUCK6:
+ if (ramp_delay > s2mpa01->ramp_delay16)
+ s2mpa01->ramp_delay16 = ramp_delay;
+ else
+ ramp_delay = s2mpa01->ramp_delay16;
+
+ ramp_shift = S2MPA01_BUCK16_RAMP_SHIFT;
+ break;
+ case S2MPA01_BUCK7:
+ s2mpa01->ramp_delay7 = ramp_delay;
+ ramp_shift = S2MPA01_BUCK7_RAMP_SHIFT;
+ break;
+ case S2MPA01_BUCK8:
+ case S2MPA01_BUCK9:
+ case S2MPA01_BUCK10:
+ if (ramp_delay > s2mpa01->ramp_delay8910)
+ s2mpa01->ramp_delay8910 = ramp_delay;
+ else
+ ramp_delay = s2mpa01->ramp_delay8910;
+
+ ramp_shift = S2MPA01_BUCK8910_RAMP_SHIFT;
+ break;
+ default:
+ return 0;
+ }
+
+ if (!ramp_enable)
+ goto ramp_disable;
+
+ if (enable_shift) {
+ ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
+ 1 << enable_shift, 1 << enable_shift);
+ if (ret) {
+ dev_err(&rdev->dev, "failed to enable ramp rate\n");
+ return ret;
+ }
+ }
+
+ ramp_val = get_ramp_delay(ramp_delay);
+
+ return regmap_update_bits(rdev->regmap, ramp_reg, 0x3 << ramp_shift,
+ ramp_val << ramp_shift);
+
+ramp_disable:
+ return regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
+ 1 << enable_shift, 0);
+}
+
+static struct regulator_ops s2mpa01_ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops s2mpa01_buck_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = s2mpa01_regulator_set_voltage_time_sel,
+ .set_ramp_delay = s2mpa01_set_ramp_delay,
+};
+
+#define regulator_desc_ldo1(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPA01_LDO##num, \
+ .ops = &s2mpa01_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_LDO_MIN, \
+ .uV_step = S2MPA01_LDO_STEP1, \
+ .n_voltages = S2MPA01_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPA01_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPA01_LDO_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+#define regulator_desc_ldo2(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPA01_LDO##num, \
+ .ops = &s2mpa01_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_LDO_MIN, \
+ .uV_step = S2MPA01_LDO_STEP2, \
+ .n_voltages = S2MPA01_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPA01_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPA01_LDO_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck1_4(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPA01_BUCK##num, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN1, \
+ .uV_step = S2MPA01_BUCK_STEP1, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B1CTRL2 + (num - 1) * 2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B1CTRL1 + (num - 1) * 2, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck5 { \
+ .name = "BUCK5", \
+ .id = S2MPA01_BUCK5, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN2, \
+ .uV_step = S2MPA01_BUCK_STEP1, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B5CTRL2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B5CTRL1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck6_7(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPA01_BUCK##num, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN1, \
+ .uV_step = S2MPA01_BUCK_STEP1, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B6CTRL2 + (num - 6) * 2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B6CTRL1 + (num - 6) * 2, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck8 { \
+ .name = "BUCK8", \
+ .id = S2MPA01_BUCK8, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN2, \
+ .uV_step = S2MPA01_BUCK_STEP2, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B8CTRL2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B8CTRL1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck9 { \
+ .name = "BUCK9", \
+ .id = S2MPA01_BUCK9, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN4, \
+ .uV_step = S2MPA01_BUCK_STEP2, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B9CTRL2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B9CTRL1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+#define regulator_desc_buck10 { \
+ .name = "BUCK10", \
+ .id = S2MPA01_BUCK10, \
+ .ops = &s2mpa01_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPA01_BUCK_MIN3, \
+ .uV_step = S2MPA01_BUCK_STEP2, \
+ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPA01_RAMP_DELAY, \
+ .vsel_reg = S2MPA01_REG_B10CTRL2, \
+ .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPA01_REG_B10CTRL1, \
+ .enable_mask = S2MPA01_ENABLE_MASK \
+}
+
+static struct regulator_desc regulators[] = {
+ regulator_desc_ldo2(1),
+ regulator_desc_ldo1(2),
+ regulator_desc_ldo1(3),
+ regulator_desc_ldo1(4),
+ regulator_desc_ldo1(5),
+ regulator_desc_ldo2(6),
+ regulator_desc_ldo1(7),
+ regulator_desc_ldo1(8),
+ regulator_desc_ldo1(9),
+ regulator_desc_ldo1(10),
+ regulator_desc_ldo2(11),
+ regulator_desc_ldo1(12),
+ regulator_desc_ldo1(13),
+ regulator_desc_ldo1(14),
+ regulator_desc_ldo1(15),
+ regulator_desc_ldo1(16),
+ regulator_desc_ldo1(17),
+ regulator_desc_ldo1(18),
+ regulator_desc_ldo1(19),
+ regulator_desc_ldo1(20),
+ regulator_desc_ldo1(21),
+ regulator_desc_ldo2(22),
+ regulator_desc_ldo2(23),
+ regulator_desc_ldo1(24),
+ regulator_desc_ldo1(25),
+ regulator_desc_ldo1(26),
+ regulator_desc_buck1_4(1),
+ regulator_desc_buck1_4(2),
+ regulator_desc_buck1_4(3),
+ regulator_desc_buck1_4(4),
+ regulator_desc_buck5,
+ regulator_desc_buck6_7(6),
+ regulator_desc_buck6_7(7),
+ regulator_desc_buck8,
+ regulator_desc_buck9,
+ regulator_desc_buck10,
+};
+
+static int s2mpa01_pmic_probe(struct platform_device *pdev)
+{
+ struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX];
+ struct device_node *reg_np = NULL;
+ struct regulator_config config = { };
+ struct s2mpa01_info *s2mpa01;
+ int i;
+
+ s2mpa01 = devm_kzalloc(&pdev->dev, sizeof(*s2mpa01), GFP_KERNEL);
+ if (!s2mpa01)
+ return -ENOMEM;
+
+ for (i = 0; i < S2MPA01_REGULATOR_CNT; i++)
+ rdata[i].name = regulators[i].name;
+
+ if (iodev->dev->of_node) {
+ reg_np = of_get_child_by_name(iodev->dev->of_node,
+ "regulators");
+ if (!reg_np) {
+ dev_err(&pdev->dev,
+ "could not find regulators sub-node\n");
+ return -EINVAL;
+ }
+
+ of_regulator_match(&pdev->dev, reg_np, rdata,
+ S2MPA01_REGULATOR_MAX);
+ of_node_put(reg_np);
+ }
+
+ platform_set_drvdata(pdev, s2mpa01);
+
+ config.dev = &pdev->dev;
+ config.regmap = iodev->regmap_pmic;
+ config.driver_data = s2mpa01;
+
+ for (i = 0; i < S2MPA01_REGULATOR_MAX; i++) {
+ struct regulator_dev *rdev;
+ if (pdata)
+ config.init_data = pdata->regulators[i].initdata;
+ else
+ config.init_data = rdata[i].init_data;
+
+ if (reg_np)
+ config.of_node = rdata[i].of_node;
+
+ rdev = devm_regulator_register(&pdev->dev,
+ &regulators[i], &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "regulator init failed for %d\n",
+ i);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id s2mpa01_pmic_id[] = {
+ { "s2mpa01-pmic", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(platform, s2mpa01_pmic_id);
+
+static struct platform_driver s2mpa01_pmic_driver = {
+ .driver = {
+ .name = "s2mpa01-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = s2mpa01_pmic_probe,
+ .id_table = s2mpa01_pmic_id,
+};
+
+module_platform_driver(s2mpa01_pmic_driver);
+
+/* Module information */
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>");
+MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 9e61922d8230..68fd54702edb 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -1,13 +1,18 @@
/*
* s2mps11.c
*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
+ * Copyright (c) 2012-2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*
*/
@@ -24,18 +29,21 @@
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s2mps11.h>
-
-#define S2MPS11_REGULATOR_CNT ARRAY_SIZE(regulators)
+#include <linux/mfd/samsung/s2mps14.h>
struct s2mps11_info {
- struct regulator_dev *rdev[S2MPS11_REGULATOR_MAX];
-
+ unsigned int rdev_num;
int ramp_delay2;
int ramp_delay34;
int ramp_delay5;
int ramp_delay16;
int ramp_delay7810;
int ramp_delay9;
+ /*
+ * One bit for each S2MPS14 regulator whether the suspend mode
+ * was enabled.
+ */
+ unsigned int s2mps14_suspend_state:30;
};
static int get_ramp_delay(int ramp_delay)
@@ -65,13 +73,11 @@ static int s2mps11_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int ramp_delay = 0;
int old_volt, new_volt;
- switch (rdev->desc->id) {
+ switch (rdev_get_id(rdev)) {
case S2MPS11_BUCK2:
ramp_delay = s2mps11->ramp_delay2;
break;
case S2MPS11_BUCK3:
- ramp_delay = s2mps11->ramp_delay34;
- break;
case S2MPS11_BUCK4:
ramp_delay = s2mps11->ramp_delay34;
break;
@@ -107,7 +113,7 @@ static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
unsigned int ramp_enable = 1, enable_shift = 0;
int ret;
- switch (rdev->desc->id) {
+ switch (rdev_get_id(rdev)) {
case S2MPS11_BUCK1:
if (ramp_delay > s2mps11->ramp_delay16)
s2mps11->ramp_delay16 = ramp_delay;
@@ -238,7 +244,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.set_ramp_delay = s2mps11_set_ramp_delay,
};
-#define regulator_desc_ldo1(num) { \
+#define regulator_desc_s2mps11_ldo1(num) { \
.name = "LDO"#num, \
.id = S2MPS11_LDO##num, \
.ops = &s2mps11_ldo_ops, \
@@ -252,7 +258,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_reg = S2MPS11_REG_L1CTRL + num - 1, \
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_ldo2(num) { \
+#define regulator_desc_s2mps11_ldo2(num) { \
.name = "LDO"#num, \
.id = S2MPS11_LDO##num, \
.ops = &s2mps11_ldo_ops, \
@@ -267,7 +273,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_buck1_4(num) { \
+#define regulator_desc_s2mps11_buck1_4(num) { \
.name = "BUCK"#num, \
.id = S2MPS11_BUCK##num, \
.ops = &s2mps11_buck_ops, \
@@ -283,7 +289,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_buck5 { \
+#define regulator_desc_s2mps11_buck5 { \
.name = "BUCK5", \
.id = S2MPS11_BUCK5, \
.ops = &s2mps11_buck_ops, \
@@ -299,7 +305,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_buck6_8(num) { \
+#define regulator_desc_s2mps11_buck6_8(num) { \
.name = "BUCK"#num, \
.id = S2MPS11_BUCK##num, \
.ops = &s2mps11_buck_ops, \
@@ -315,7 +321,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_buck9 { \
+#define regulator_desc_s2mps11_buck9 { \
.name = "BUCK9", \
.id = S2MPS11_BUCK9, \
.ops = &s2mps11_buck_ops, \
@@ -331,7 +337,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_buck10 { \
+#define regulator_desc_s2mps11_buck10 { \
.name = "BUCK10", \
.id = S2MPS11_BUCK10, \
.ops = &s2mps11_buck_ops, \
@@ -347,72 +353,252 @@ static struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-static struct regulator_desc regulators[] = {
- regulator_desc_ldo2(1),
- regulator_desc_ldo1(2),
- regulator_desc_ldo1(3),
- regulator_desc_ldo1(4),
- regulator_desc_ldo1(5),
- regulator_desc_ldo2(6),
- regulator_desc_ldo1(7),
- regulator_desc_ldo1(8),
- regulator_desc_ldo1(9),
- regulator_desc_ldo1(10),
- regulator_desc_ldo2(11),
- regulator_desc_ldo1(12),
- regulator_desc_ldo1(13),
- regulator_desc_ldo1(14),
- regulator_desc_ldo1(15),
- regulator_desc_ldo1(16),
- regulator_desc_ldo1(17),
- regulator_desc_ldo1(18),
- regulator_desc_ldo1(19),
- regulator_desc_ldo1(20),
- regulator_desc_ldo1(21),
- regulator_desc_ldo2(22),
- regulator_desc_ldo2(23),
- regulator_desc_ldo1(24),
- regulator_desc_ldo1(25),
- regulator_desc_ldo1(26),
- regulator_desc_ldo2(27),
- regulator_desc_ldo1(28),
- regulator_desc_ldo1(29),
- regulator_desc_ldo1(30),
- regulator_desc_ldo1(31),
- regulator_desc_ldo1(32),
- regulator_desc_ldo1(33),
- regulator_desc_ldo1(34),
- regulator_desc_ldo1(35),
- regulator_desc_ldo1(36),
- regulator_desc_ldo1(37),
- regulator_desc_ldo1(38),
- regulator_desc_buck1_4(1),
- regulator_desc_buck1_4(2),
- regulator_desc_buck1_4(3),
- regulator_desc_buck1_4(4),
- regulator_desc_buck5,
- regulator_desc_buck6_8(6),
- regulator_desc_buck6_8(7),
- regulator_desc_buck6_8(8),
- regulator_desc_buck9,
- regulator_desc_buck10,
+static const struct regulator_desc s2mps11_regulators[] = {
+ regulator_desc_s2mps11_ldo2(1),
+ regulator_desc_s2mps11_ldo1(2),
+ regulator_desc_s2mps11_ldo1(3),
+ regulator_desc_s2mps11_ldo1(4),
+ regulator_desc_s2mps11_ldo1(5),
+ regulator_desc_s2mps11_ldo2(6),
+ regulator_desc_s2mps11_ldo1(7),
+ regulator_desc_s2mps11_ldo1(8),
+ regulator_desc_s2mps11_ldo1(9),
+ regulator_desc_s2mps11_ldo1(10),
+ regulator_desc_s2mps11_ldo2(11),
+ regulator_desc_s2mps11_ldo1(12),
+ regulator_desc_s2mps11_ldo1(13),
+ regulator_desc_s2mps11_ldo1(14),
+ regulator_desc_s2mps11_ldo1(15),
+ regulator_desc_s2mps11_ldo1(16),
+ regulator_desc_s2mps11_ldo1(17),
+ regulator_desc_s2mps11_ldo1(18),
+ regulator_desc_s2mps11_ldo1(19),
+ regulator_desc_s2mps11_ldo1(20),
+ regulator_desc_s2mps11_ldo1(21),
+ regulator_desc_s2mps11_ldo2(22),
+ regulator_desc_s2mps11_ldo2(23),
+ regulator_desc_s2mps11_ldo1(24),
+ regulator_desc_s2mps11_ldo1(25),
+ regulator_desc_s2mps11_ldo1(26),
+ regulator_desc_s2mps11_ldo2(27),
+ regulator_desc_s2mps11_ldo1(28),
+ regulator_desc_s2mps11_ldo1(29),
+ regulator_desc_s2mps11_ldo1(30),
+ regulator_desc_s2mps11_ldo1(31),
+ regulator_desc_s2mps11_ldo1(32),
+ regulator_desc_s2mps11_ldo1(33),
+ regulator_desc_s2mps11_ldo1(34),
+ regulator_desc_s2mps11_ldo1(35),
+ regulator_desc_s2mps11_ldo1(36),
+ regulator_desc_s2mps11_ldo1(37),
+ regulator_desc_s2mps11_ldo1(38),
+ regulator_desc_s2mps11_buck1_4(1),
+ regulator_desc_s2mps11_buck1_4(2),
+ regulator_desc_s2mps11_buck1_4(3),
+ regulator_desc_s2mps11_buck1_4(4),
+ regulator_desc_s2mps11_buck5,
+ regulator_desc_s2mps11_buck6_8(6),
+ regulator_desc_s2mps11_buck6_8(7),
+ regulator_desc_s2mps11_buck6_8(8),
+ regulator_desc_s2mps11_buck9,
+ regulator_desc_s2mps11_buck10,
+};
+
+static int s2mps14_regulator_enable(struct regulator_dev *rdev)
+{
+ struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+ unsigned int val;
+
+ if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+ val = S2MPS14_ENABLE_SUSPEND;
+ else
+ val = rdev->desc->enable_mask;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, val);
+}
+
+static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
+{
+ int ret;
+ unsigned int val;
+ struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+
+ /* LDO3 should be always on and does not support suspend mode */
+ if (rdev_get_id(rdev) == S2MPS14_LDO3)
+ return 0;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
+ if (ret < 0)
+ return ret;
+
+ s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev));
+ /*
+ * Don't enable suspend mode if regulator is already disabled because
+ * this would effectively for a short time turn on the regulator after
+ * resuming.
+ * However we still want to toggle the suspend_state bit for regulator
+ * in case if it got enabled before suspending the system.
+ */
+ if (!(val & rdev->desc->enable_mask))
+ return 0;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, S2MPS14_ENABLE_SUSPEND);
+}
+
+static struct regulator_ops s2mps14_reg_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = s2mps14_regulator_enable,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_suspend_disable = s2mps14_regulator_set_suspend_disable,
+};
+
+#define regulator_desc_s2mps14_ldo1(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPS14_LDO##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS14_LDO_MIN_800MV, \
+ .uV_step = S2MPS14_LDO_STEP_25MV, \
+ .n_voltages = S2MPS14_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPS14_LDO_VSEL_MASK, \
+ .enable_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+#define regulator_desc_s2mps14_ldo2(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPS14_LDO##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS14_LDO_MIN_1800MV, \
+ .uV_step = S2MPS14_LDO_STEP_25MV, \
+ .n_voltages = S2MPS14_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPS14_LDO_VSEL_MASK, \
+ .enable_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+#define regulator_desc_s2mps14_ldo3(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPS14_LDO##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS14_LDO_MIN_800MV, \
+ .uV_step = S2MPS14_LDO_STEP_12_5MV, \
+ .n_voltages = S2MPS14_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPS14_LDO_VSEL_MASK, \
+ .enable_reg = S2MPS14_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+#define regulator_desc_s2mps14_buck1235(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS14_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS14_BUCK1235_MIN_600MV, \
+ .uV_step = S2MPS14_BUCK1235_STEP_6_25MV, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .linear_min_sel = S2MPS14_BUCK1235_START_SEL, \
+ .ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS14_REG_B1CTRL1 + (num - 1) * 2, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+#define regulator_desc_s2mps14_buck4(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS14_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS14_BUCK4_MIN_1400MV, \
+ .uV_step = S2MPS14_BUCK4_STEP_12_5MV, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .linear_min_sel = S2MPS14_BUCK4_START_SEL, \
+ .ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS14_REG_B1CTRL1 + (num - 1) * 2, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+
+static const struct regulator_desc s2mps14_regulators[] = {
+ regulator_desc_s2mps14_ldo3(1),
+ regulator_desc_s2mps14_ldo3(2),
+ regulator_desc_s2mps14_ldo1(3),
+ regulator_desc_s2mps14_ldo1(4),
+ regulator_desc_s2mps14_ldo3(5),
+ regulator_desc_s2mps14_ldo3(6),
+ regulator_desc_s2mps14_ldo1(7),
+ regulator_desc_s2mps14_ldo2(8),
+ regulator_desc_s2mps14_ldo3(9),
+ regulator_desc_s2mps14_ldo3(10),
+ regulator_desc_s2mps14_ldo1(11),
+ regulator_desc_s2mps14_ldo2(12),
+ regulator_desc_s2mps14_ldo2(13),
+ regulator_desc_s2mps14_ldo2(14),
+ regulator_desc_s2mps14_ldo2(15),
+ regulator_desc_s2mps14_ldo2(16),
+ regulator_desc_s2mps14_ldo2(17),
+ regulator_desc_s2mps14_ldo2(18),
+ regulator_desc_s2mps14_ldo1(19),
+ regulator_desc_s2mps14_ldo1(20),
+ regulator_desc_s2mps14_ldo1(21),
+ regulator_desc_s2mps14_ldo3(22),
+ regulator_desc_s2mps14_ldo1(23),
+ regulator_desc_s2mps14_ldo2(24),
+ regulator_desc_s2mps14_ldo2(25),
+ regulator_desc_s2mps14_buck1235(1),
+ regulator_desc_s2mps14_buck1235(2),
+ regulator_desc_s2mps14_buck1235(3),
+ regulator_desc_s2mps14_buck4(4),
+ regulator_desc_s2mps14_buck1235(5),
};
static int s2mps11_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
- struct of_regulator_match rdata[S2MPS11_REGULATOR_MAX];
+ struct sec_platform_data *pdata = iodev->pdata;
+ struct of_regulator_match *rdata = NULL;
struct device_node *reg_np = NULL;
struct regulator_config config = { };
struct s2mps11_info *s2mps11;
- int i, ret;
+ int i, ret = 0;
+ const struct regulator_desc *regulators;
+ enum sec_device_type dev_type;
s2mps11 = devm_kzalloc(&pdev->dev, sizeof(struct s2mps11_info),
GFP_KERNEL);
if (!s2mps11)
return -ENOMEM;
+ dev_type = platform_get_device_id(pdev)->driver_data;
+ switch (dev_type) {
+ case S2MPS11X:
+ s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
+ regulators = s2mps11_regulators;
+ break;
+ case S2MPS14X:
+ s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
+ regulators = s2mps14_regulators;
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device type: %u\n", dev_type);
+ return -EINVAL;
+ };
+
if (!iodev->dev->of_node) {
if (pdata) {
goto common_reg;
@@ -423,16 +609,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < S2MPS11_REGULATOR_CNT; i++)
+ rdata = kzalloc(sizeof(*rdata) * s2mps11->rdev_num, GFP_KERNEL);
+ if (!rdata)
+ return -ENOMEM;
+
+ for (i = 0; i < s2mps11->rdev_num; i++)
rdata[i].name = regulators[i].name;
- reg_np = of_find_node_by_name(iodev->dev->of_node, "regulators");
+ reg_np = of_get_child_by_name(iodev->dev->of_node, "regulators");
if (!reg_np) {
dev_err(&pdev->dev, "could not find regulators sub-node\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- of_regulator_match(&pdev->dev, reg_np, rdata, S2MPS11_REGULATOR_MAX);
+ of_regulator_match(&pdev->dev, reg_np, rdata, s2mps11->rdev_num);
+ of_node_put(reg_np);
common_reg:
platform_set_drvdata(pdev, s2mps11);
@@ -440,29 +632,36 @@ common_reg:
config.dev = &pdev->dev;
config.regmap = iodev->regmap_pmic;
config.driver_data = s2mps11;
- for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
+ for (i = 0; i < s2mps11->rdev_num; i++) {
+ struct regulator_dev *regulator;
+
if (!reg_np) {
config.init_data = pdata->regulators[i].initdata;
+ config.of_node = pdata->regulators[i].reg_node;
} else {
config.init_data = rdata[i].init_data;
config.of_node = rdata[i].of_node;
}
- s2mps11->rdev[i] = devm_regulator_register(&pdev->dev,
+ regulator = devm_regulator_register(&pdev->dev,
&regulators[i], &config);
- if (IS_ERR(s2mps11->rdev[i])) {
- ret = PTR_ERR(s2mps11->rdev[i]);
+ if (IS_ERR(regulator)) {
+ ret = PTR_ERR(regulator);
dev_err(&pdev->dev, "regulator init failed for %d\n",
i);
- return ret;
+ goto out;
}
}
- return 0;
+out:
+ kfree(rdata);
+
+ return ret;
}
static const struct platform_device_id s2mps11_pmic_id[] = {
- { "s2mps11-pmic", 0},
+ { "s2mps11-pmic", S2MPS11X},
+ { "s2mps14-pmic", S2MPS14X},
{ },
};
MODULE_DEVICE_TABLE(platform, s2mps11_pmic_id);
@@ -490,5 +689,5 @@ module_exit(s2mps11_pmic_exit);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPS11 Regulator Driver");
+MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index aeb40aad0ae7..f05badabd69e 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -11,11 +11,8 @@
*
*/
-#include <linux/bug.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
@@ -23,6 +20,7 @@
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s5m8767.h>
#include <linux/regulator/of_regulator.h>
+#include <linux/regmap.h>
#define S5M8767_OPMODE_NORMAL_MODE 0x1
@@ -120,8 +118,8 @@ static const struct sec_voltage_desc *reg_voltage_map[] = {
[S5M8767_BUCK4] = &buck_voltage_val2,
[S5M8767_BUCK5] = &buck_voltage_val1,
[S5M8767_BUCK6] = &buck_voltage_val1,
- [S5M8767_BUCK7] = NULL,
- [S5M8767_BUCK8] = NULL,
+ [S5M8767_BUCK7] = &buck_voltage_val3,
+ [S5M8767_BUCK8] = &buck_voltage_val3,
[S5M8767_BUCK9] = &buck_voltage_val3,
};
@@ -169,12 +167,11 @@ static unsigned int s5m8767_opmode_reg[][4] = {
{0x0, 0x3, 0x1, 0x1}, /* BUCK9 */
};
-static int s5m8767_get_register(struct regulator_dev *rdev, int *reg,
- int *enable_ctrl)
+static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
+ int *reg, int *enable_ctrl)
{
- int i, reg_id = rdev_get_id(rdev);
+ int i;
unsigned int mode;
- struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
switch (reg_id) {
case S5M8767_LDO1 ... S5M8767_LDO2:
@@ -213,52 +210,6 @@ static int s5m8767_get_register(struct regulator_dev *rdev, int *reg,
return 0;
}
-static int s5m8767_reg_is_enabled(struct regulator_dev *rdev)
-{
- struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- int ret, reg;
- int mask = 0xc0, enable_ctrl;
- unsigned int val;
-
- ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
- if (ret == -EINVAL)
- return 1;
- else if (ret)
- return ret;
-
- ret = sec_reg_read(s5m8767->iodev, reg, &val);
- if (ret)
- return ret;
-
- return (val & mask) == enable_ctrl;
-}
-
-static int s5m8767_reg_enable(struct regulator_dev *rdev)
-{
- struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- int ret, reg;
- int mask = 0xc0, enable_ctrl;
-
- ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
- if (ret)
- return ret;
-
- return sec_reg_update(s5m8767->iodev, reg, enable_ctrl, mask);
-}
-
-static int s5m8767_reg_disable(struct regulator_dev *rdev)
-{
- struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- int ret, reg;
- int mask = 0xc0, enable_ctrl;
-
- ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
- if (ret)
- return ret;
-
- return sec_reg_update(s5m8767->iodev, reg, ~mask, mask);
-}
-
static int s5m8767_get_vsel_reg(int reg_id, struct s5m8767_info *s5m8767)
{
int reg;
@@ -408,18 +359,21 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
static struct regulator_ops s5m8767_ops = {
.list_voltage = regulator_list_voltage_linear,
- .is_enabled = s5m8767_reg_is_enabled,
- .enable = s5m8767_reg_enable,
- .disable = s5m8767_reg_disable,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = s5m8767_set_voltage_sel,
.set_voltage_time_sel = s5m8767_set_voltage_time_sel,
};
static struct regulator_ops s5m8767_buck78_ops = {
- .is_enabled = s5m8767_reg_is_enabled,
- .enable = s5m8767_reg_enable,
- .disable = s5m8767_reg_disable,
+ .list_voltage = regulator_list_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
};
#define s5m8767_regulator_desc(_name) { \
@@ -478,6 +432,66 @@ static struct regulator_desc regulators[] = {
s5m8767_regulator_desc(BUCK9),
};
+/*
+ * Enable GPIO control over BUCK9 in regulator_config for that regulator.
+ */
+static void s5m8767_regulator_config_ext_control(struct s5m8767_info *s5m8767,
+ struct sec_regulator_data *rdata,
+ struct regulator_config *config)
+{
+ int i, mode = 0;
+
+ if (rdata->id != S5M8767_BUCK9)
+ return;
+
+ /* Check if opmode for regulator matches S5M8767_ENCTRL_USE_GPIO */
+ for (i = 0; i < s5m8767->num_regulators; i++) {
+ const struct sec_opmode_data *opmode = &s5m8767->opmode[i];
+ if (opmode->id == rdata->id) {
+ mode = s5m8767_opmode_reg[rdata->id][opmode->mode];
+ break;
+ }
+ }
+ if (mode != S5M8767_ENCTRL_USE_GPIO) {
+ dev_warn(s5m8767->dev,
+ "ext-control for %s: mismatched op_mode (%x), ignoring\n",
+ rdata->reg_node->name, mode);
+ return;
+ }
+
+ if (!gpio_is_valid(rdata->ext_control_gpio)) {
+ dev_warn(s5m8767->dev,
+ "ext-control for %s: GPIO not valid, ignoring\n",
+ rdata->reg_node->name);
+ return;
+ }
+
+ config->ena_gpio = rdata->ext_control_gpio;
+ config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
+}
+
+/*
+ * Turn on GPIO control over BUCK9.
+ */
+static int s5m8767_enable_ext_control(struct s5m8767_info *s5m8767,
+ struct regulator_dev *rdev)
+{
+ int id = rdev_get_id(rdev);
+ int ret, reg, enable_ctrl;
+
+ if (id != S5M8767_BUCK9)
+ return -EINVAL;
+
+ ret = s5m8767_get_register(s5m8767, id, &reg, &enable_ctrl);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ reg, S5M8767_ENCTRL_MASK,
+ S5M8767_ENCTRL_USE_GPIO << S5M8767_ENCTRL_SHIFT);
+}
+
+
#ifdef CONFIG_OF
static int s5m8767_pmic_dt_parse_dvs_gpio(struct sec_pmic_dev *iodev,
struct sec_platform_data *pdata,
@@ -515,6 +529,16 @@ static int s5m8767_pmic_dt_parse_ds_gpio(struct sec_pmic_dev *iodev,
return 0;
}
+static void s5m8767_pmic_dt_parse_ext_control_gpio(struct sec_pmic_dev *iodev,
+ struct sec_regulator_data *rdata,
+ struct device_node *reg_np)
+{
+ rdata->ext_control_gpio = of_get_named_gpio(reg_np,
+ "s5m8767,pmic-ext-control-gpios", 0);
+ if (!gpio_is_valid(rdata->ext_control_gpio))
+ rdata->ext_control_gpio = 0;
+}
+
static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
struct sec_platform_data *pdata)
{
@@ -530,7 +554,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
return -ENODEV;
}
- regulators_np = of_find_node_by_name(pmic_np, "regulators");
+ regulators_np = of_get_child_by_name(pmic_np, "regulators");
if (!regulators_np) {
dev_err(iodev->dev, "could not find regulators sub-node\n");
return -EINVAL;
@@ -541,19 +565,13 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
pdata->num_regulators, GFP_KERNEL);
- if (!rdata) {
- dev_err(iodev->dev,
- "could not allocate memory for regulator data\n");
+ if (!rdata)
return -ENOMEM;
- }
rmode = devm_kzalloc(&pdev->dev, sizeof(*rmode) *
pdata->num_regulators, GFP_KERNEL);
- if (!rmode) {
- dev_err(iodev->dev,
- "could not allocate memory for regulator mode\n");
+ if (!rmode)
return -ENOMEM;
- }
pdata->regulators = rdata;
pdata->opmode = rmode;
@@ -569,6 +587,8 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
continue;
}
+ s5m8767_pmic_dt_parse_ext_control_gpio(iodev, rdata, reg_np);
+
rdata->id = i;
rdata->initdata = of_get_regulator_init_data(
&pdev->dev, reg_np);
@@ -586,6 +606,8 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rmode++;
}
+ of_node_put(regulators_np);
+
if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL)) {
pdata->buck2_gpiodvs = true;
@@ -745,17 +767,20 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
pdata->buck2_init);
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS2, buck_init);
+ regmap_write(s5m8767->iodev->regmap_pmic, S5M8767_REG_BUCK2DVS2,
+ buck_init);
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
pdata->buck3_init);
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS2, buck_init);
+ regmap_write(s5m8767->iodev->regmap_pmic, S5M8767_REG_BUCK3DVS2,
+ buck_init);
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
pdata->buck4_init);
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS2, buck_init);
+ regmap_write(s5m8767->iodev->regmap_pmic, S5M8767_REG_BUCK4DVS2,
+ buck_init);
for (i = 0; i < 8; i++) {
if (s5m8767->buck2_gpiodvs) {
@@ -837,76 +862,82 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
pdata->buck4_gpiodvs) {
- sec_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL,
- (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1),
- 1 << 1);
- sec_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL,
- (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1),
- 1 << 1);
- sec_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL,
- (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1),
- 1 << 1);
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK2CTRL, 1 << 1,
+ (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1));
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK3CTRL, 1 << 1,
+ (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1));
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK4CTRL, 1 << 1,
+ (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1));
}
/* Initialize GPIO DVS registers */
for (i = 0; i < 8; i++) {
if (s5m8767->buck2_gpiodvs) {
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS1 + i,
- s5m8767->buck2_vol[i]);
+ regmap_write(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK2DVS1 + i,
+ s5m8767->buck2_vol[i]);
}
if (s5m8767->buck3_gpiodvs) {
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS1 + i,
- s5m8767->buck3_vol[i]);
+ regmap_write(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK3DVS1 + i,
+ s5m8767->buck3_vol[i]);
}
if (s5m8767->buck4_gpiodvs) {
- sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS1 + i,
- s5m8767->buck4_vol[i]);
+ regmap_write(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK4DVS1 + i,
+ s5m8767->buck4_vol[i]);
}
}
if (s5m8767->buck2_ramp)
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x08, 0x08);
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_DVSRAMP, 0x08, 0x08);
if (s5m8767->buck3_ramp)
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x04, 0x04);
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_DVSRAMP, 0x04, 0x04);
if (s5m8767->buck4_ramp)
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x02, 0x02);
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_DVSRAMP, 0x02, 0x02);
if (s5m8767->buck2_ramp || s5m8767->buck3_ramp
|| s5m8767->buck4_ramp) {
+ unsigned int val;
switch (s5m8767->ramp_delay) {
case 5:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0x40, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_5;
break;
case 10:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0x90, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_10;
break;
case 25:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0xd0, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_25;
break;
case 50:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0xe0, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_50;
break;
case 100:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0xf0, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_100;
break;
default:
- sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0x90, 0xf0);
+ val = S5M8767_DVS_BUCK_RAMP_10;
}
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_DVSRAMP,
+ S5M8767_DVS_BUCK_RAMP_MASK,
+ val << S5M8767_DVS_BUCK_RAMP_SHIFT);
}
for (i = 0; i < pdata->num_regulators; i++) {
const struct sec_voltage_desc *desc;
int id = pdata->regulators[i].id;
+ int enable_reg, enable_val;
desc = reg_voltage_map[id];
if (desc) {
@@ -920,6 +951,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
regulators[id].vsel_mask = 0x3f;
else
regulators[id].vsel_mask = 0xff;
+
+ s5m8767_get_register(s5m8767, id, &enable_reg,
+ &enable_val);
+ regulators[id].enable_reg = enable_reg;
+ regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
+ regulators[id].enable_val = enable_val;
}
config.dev = s5m8767->dev;
@@ -927,6 +964,9 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
config.driver_data = s5m8767;
config.regmap = iodev->regmap_pmic;
config.of_node = pdata->regulators[i].reg_node;
+ if (pdata->regulators[i].ext_control_gpio)
+ s5m8767_regulator_config_ext_control(s5m8767,
+ &pdata->regulators[i], &config);
rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
&config);
@@ -936,6 +976,16 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
id);
return ret;
}
+
+ if (pdata->regulators[i].ext_control_gpio) {
+ ret = s5m8767_enable_ext_control(s5m8767, rdev[i]);
+ if (ret < 0) {
+ dev_err(s5m8767->dev,
+ "failed to enable gpio control over %s: %d\n",
+ rdev[i]->desc->name, ret);
+ return ret;
+ }
+ }
}
return 0;
diff --git a/drivers/regulator/st-pwm.c b/drivers/regulator/st-pwm.c
new file mode 100644
index 000000000000..e367af1c5f9d
--- /dev/null
+++ b/drivers/regulator/st-pwm.c
@@ -0,0 +1,190 @@
+/*
+ * Regulator driver for ST's PWM Regulators
+ *
+ * Copyright (C) 2014 - STMicroelectronics Inc.
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pwm.h>
+
+#define ST_PWM_REG_PERIOD 8448
+
+struct st_pwm_regulator_pdata {
+ const struct regulator_desc *desc;
+ struct st_pwm_voltages *duty_cycle_table;
+};
+
+struct st_pwm_regulator_data {
+ const struct st_pwm_regulator_pdata *pdata;
+ struct pwm_device *pwm;
+ bool enabled;
+ int state;
+};
+
+struct st_pwm_voltages {
+ unsigned int uV;
+ unsigned int dutycycle;
+};
+
+static int st_pwm_regulator_get_voltage_sel(struct regulator_dev *dev)
+{
+ struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+
+ return drvdata->state;
+}
+
+static int st_pwm_regulator_set_voltage_sel(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+ int dutycycle;
+ int ret;
+
+ dutycycle = (ST_PWM_REG_PERIOD / 100) *
+ drvdata->pdata->duty_cycle_table[selector].dutycycle;
+
+ ret = pwm_config(drvdata->pwm, dutycycle, ST_PWM_REG_PERIOD);
+ if (ret) {
+ dev_err(&dev->dev, "Failed to configure PWM\n");
+ return ret;
+ }
+
+ drvdata->state = selector;
+
+ if (!drvdata->enabled) {
+ ret = pwm_enable(drvdata->pwm);
+ if (ret) {
+ dev_err(&dev->dev, "Failed to enable PWM\n");
+ return ret;
+ }
+ drvdata->enabled = true;
+ }
+
+ return 0;
+}
+
+static int st_pwm_regulator_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+
+ if (selector >= dev->desc->n_voltages)
+ return -EINVAL;
+
+ return drvdata->pdata->duty_cycle_table[selector].uV;
+}
+
+static struct regulator_ops st_pwm_regulator_voltage_ops = {
+ .set_voltage_sel = st_pwm_regulator_set_voltage_sel,
+ .get_voltage_sel = st_pwm_regulator_get_voltage_sel,
+ .list_voltage = st_pwm_regulator_list_voltage,
+ .map_voltage = regulator_map_voltage_iterate,
+};
+
+static struct st_pwm_voltages b2105_duty_cycle_table[] = {
+ { .uV = 1114000, .dutycycle = 0, },
+ { .uV = 1095000, .dutycycle = 10, },
+ { .uV = 1076000, .dutycycle = 20, },
+ { .uV = 1056000, .dutycycle = 30, },
+ { .uV = 1036000, .dutycycle = 40, },
+ { .uV = 1016000, .dutycycle = 50, },
+ /* WARNING: Values above 50% duty-cycle cause boot failures. */
+};
+
+static const struct regulator_desc b2105_desc = {
+ .name = "b2105-pwm-regulator",
+ .ops = &st_pwm_regulator_voltage_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(b2105_duty_cycle_table),
+ .supply_name = "pwm",
+};
+
+static const struct st_pwm_regulator_pdata b2105_info = {
+ .desc = &b2105_desc,
+ .duty_cycle_table = b2105_duty_cycle_table,
+};
+
+static struct of_device_id st_pwm_of_match[] = {
+ { .compatible = "st,b2105-pwm-regulator", .data = &b2105_info, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, st_pwm_of_match);
+
+static int st_pwm_regulator_probe(struct platform_device *pdev)
+{
+ struct st_pwm_regulator_data *drvdata;
+ struct regulator_dev *regulator;
+ struct regulator_config config = { };
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_match;
+
+ if (!np) {
+ dev_err(&pdev->dev, "Device Tree node missing\n");
+ return -EINVAL;
+ }
+
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ of_match = of_match_device(st_pwm_of_match, &pdev->dev);
+ if (!of_match) {
+ dev_err(&pdev->dev, "failed to match of device\n");
+ return -ENODEV;
+ }
+ drvdata->pdata = of_match->data;
+
+ config.init_data = of_get_regulator_init_data(&pdev->dev, np);
+ if (!config.init_data)
+ return -ENOMEM;
+
+ config.of_node = np;
+ config.dev = &pdev->dev;
+ config.driver_data = drvdata;
+
+ drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
+ if (IS_ERR(drvdata->pwm)) {
+ dev_err(&pdev->dev, "Failed to get PWM\n");
+ return PTR_ERR(drvdata->pwm);
+ }
+
+ regulator = devm_regulator_register(&pdev->dev,
+ drvdata->pdata->desc, &config);
+ if (IS_ERR(regulator)) {
+ dev_err(&pdev->dev, "Failed to register regulator %s\n",
+ drvdata->pdata->desc->name);
+ return PTR_ERR(regulator);
+ }
+
+ return 0;
+}
+
+static struct platform_driver st_pwm_regulator_driver = {
+ .driver = {
+ .name = "st-pwm-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(st_pwm_of_match),
+ },
+ .probe = st_pwm_regulator_probe,
+};
+
+module_platform_driver(st_pwm_regulator_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>");
+MODULE_DESCRIPTION("ST PWM Regulator Driver");
+MODULE_ALIAS("platform:st_pwm-regulator");
diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c
index f78857bd6a15..a7e152696a02 100644
--- a/drivers/regulator/stw481x-vmmc.c
+++ b/drivers/regulator/stw481x-vmmc.c
@@ -74,7 +74,8 @@ static int stw481x_vmmc_regulator_probe(struct platform_device *pdev)
config.init_data = of_get_regulator_init_data(&pdev->dev,
pdev->dev.of_node);
- stw481x->vmmc_regulator = regulator_register(&vmmc_regulator, &config);
+ stw481x->vmmc_regulator = devm_regulator_register(&pdev->dev,
+ &vmmc_regulator, &config);
if (IS_ERR(stw481x->vmmc_regulator)) {
dev_err(&pdev->dev,
"error initializing STw481x VMMC regulator\n");
@@ -85,14 +86,6 @@ static int stw481x_vmmc_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int stw481x_vmmc_regulator_remove(struct platform_device *pdev)
-{
- struct stw481x *stw481x = dev_get_platdata(&pdev->dev);
-
- regulator_unregister(stw481x->vmmc_regulator);
- return 0;
-}
-
static const struct of_device_id stw481x_vmmc_match[] = {
{ .compatible = "st,stw481x-vmmc", },
{},
@@ -105,7 +98,6 @@ static struct platform_driver stw481x_vmmc_regulator_driver = {
.of_match_table = stw481x_vmmc_match,
},
.probe = stw481x_vmmc_regulator_probe,
- .remove = stw481x_vmmc_regulator_remove,
};
module_platform_driver(stw481x_vmmc_regulator_driver);
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index b187b6bba7ad..a2dabb575b97 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -54,8 +54,8 @@ struct ti_abb_info {
/**
* struct ti_abb_reg - Register description for ABB block
- * @setup_reg: setup register offset from base
- * @control_reg: control register offset from base
+ * @setup_off: setup register offset from base
+ * @control_off: control register offset from base
* @sr2_wtcnt_value_mask: setup register- sr2_wtcnt_value mask
* @fbb_sel_mask: setup register- FBB sel mask
* @rbb_sel_mask: setup register- RBB sel mask
@@ -64,8 +64,8 @@ struct ti_abb_info {
* @opp_sel_mask: control register - mask for mode to operate
*/
struct ti_abb_reg {
- u32 setup_reg;
- u32 control_reg;
+ u32 setup_off;
+ u32 control_off;
/* Setup register fields */
u32 sr2_wtcnt_value_mask;
@@ -83,6 +83,8 @@ struct ti_abb_reg {
* @rdesc: regulator descriptor
* @clk: clock(usually sysclk) supplying ABB block
* @base: base address of ABB block
+ * @setup_reg: setup register of ABB block
+ * @control_reg: control register of ABB block
* @int_base: interrupt register base address
* @efuse_base: (optional) efuse base address for ABB modes
* @ldo_base: (optional) LDOVBB vset override base address
@@ -99,6 +101,8 @@ struct ti_abb {
struct regulator_desc rdesc;
struct clk *clk;
void __iomem *base;
+ void __iomem *setup_reg;
+ void __iomem *control_reg;
void __iomem *int_base;
void __iomem *efuse_base;
void __iomem *ldo_base;
@@ -118,20 +122,18 @@ struct ti_abb {
* ti_abb_rmw() - handy wrapper to set specific register bits
* @mask: mask for register field
* @value: value shifted to mask location and written
- * @offset: offset of register
- * @base: base address
+ * @reg: register address
*
* Return: final register value (may be unused)
*/
-static inline u32 ti_abb_rmw(u32 mask, u32 value, u32 offset,
- void __iomem *base)
+static inline u32 ti_abb_rmw(u32 mask, u32 value, void __iomem *reg)
{
u32 val;
- val = readl(base + offset);
+ val = readl(reg);
val &= ~mask;
val |= (value << __ffs(mask)) & mask;
- writel(val, base + offset);
+ writel(val, reg);
return val;
}
@@ -263,21 +265,19 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
if (ret)
goto out;
- ti_abb_rmw(regs->fbb_sel_mask | regs->rbb_sel_mask, 0, regs->setup_reg,
- abb->base);
+ ti_abb_rmw(regs->fbb_sel_mask | regs->rbb_sel_mask, 0, abb->setup_reg);
switch (info->opp_sel) {
case TI_ABB_SLOW_OPP:
- ti_abb_rmw(regs->rbb_sel_mask, 1, regs->setup_reg, abb->base);
+ ti_abb_rmw(regs->rbb_sel_mask, 1, abb->setup_reg);
break;
case TI_ABB_FAST_OPP:
- ti_abb_rmw(regs->fbb_sel_mask, 1, regs->setup_reg, abb->base);
+ ti_abb_rmw(regs->fbb_sel_mask, 1, abb->setup_reg);
break;
}
/* program next state of ABB ldo */
- ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg,
- abb->base);
+ ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, abb->control_reg);
/*
* program LDO VBB vset override if needed for !bypass mode
@@ -288,7 +288,7 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
ti_abb_program_ldovbb(dev, abb, info);
/* Initiate ABB ldo change */
- ti_abb_rmw(regs->opp_change_mask, 1, regs->control_reg, abb->base);
+ ti_abb_rmw(regs->opp_change_mask, 1, abb->control_reg);
/* Wait for ABB LDO to complete transition to new Bias setting */
ret = ti_abb_wait_txdone(dev, abb);
@@ -490,8 +490,7 @@ static int ti_abb_init_timings(struct device *dev, struct ti_abb *abb)
dev_dbg(dev, "%s: Clk_rate=%ld, sr2_cnt=0x%08x\n", __func__,
clk_get_rate(abb->clk), sr2_wt_cnt_val);
- ti_abb_rmw(regs->sr2_wtcnt_value_mask, sr2_wt_cnt_val, regs->setup_reg,
- abb->base);
+ ti_abb_rmw(regs->sr2_wtcnt_value_mask, sr2_wt_cnt_val, abb->setup_reg);
return 0;
}
@@ -508,32 +507,24 @@ static int ti_abb_init_table(struct device *dev, struct ti_abb *abb,
struct regulator_init_data *rinit_data)
{
struct ti_abb_info *info;
- const struct property *prop;
- const __be32 *abb_info;
const u32 num_values = 6;
char *pname = "ti,abb_info";
- u32 num_entries, i;
+ u32 i;
unsigned int *volt_table;
- int min_uV = INT_MAX, max_uV = 0;
+ int num_entries, min_uV = INT_MAX, max_uV = 0;
struct regulation_constraints *c = &rinit_data->constraints;
- prop = of_find_property(dev->of_node, pname, NULL);
- if (!prop) {
- dev_err(dev, "No '%s' property?\n", pname);
- return -ENODEV;
- }
-
- if (!prop->value) {
- dev_err(dev, "Empty '%s' property?\n", pname);
- return -ENODATA;
- }
-
/*
* Each abb_info is a set of n-tuple, where n is num_values, consisting
* of voltage and a set of detection logic for ABB information for that
* voltage to apply.
*/
- num_entries = prop->length / sizeof(u32);
+ num_entries = of_property_count_u32_elems(dev->of_node, pname);
+ if (num_entries < 0) {
+ dev_err(dev, "No '%s' property?\n", pname);
+ return num_entries;
+ }
+
if (!num_entries || (num_entries % num_values)) {
dev_err(dev, "All '%s' list entries need %d vals\n", pname,
num_values);
@@ -542,38 +533,38 @@ static int ti_abb_init_table(struct device *dev, struct ti_abb *abb,
num_entries /= num_values;
info = devm_kzalloc(dev, sizeof(*info) * num_entries, GFP_KERNEL);
- if (!info) {
- dev_err(dev, "Can't allocate info table for '%s' property\n",
- pname);
+ if (!info)
return -ENOMEM;
- }
+
abb->info = info;
volt_table = devm_kzalloc(dev, sizeof(unsigned int) * num_entries,
GFP_KERNEL);
- if (!volt_table) {
- dev_err(dev, "Can't allocate voltage table for '%s' property\n",
- pname);
+ if (!volt_table)
return -ENOMEM;
- }
abb->rdesc.n_voltages = num_entries;
abb->rdesc.volt_table = volt_table;
/* We do not know where the OPP voltage is at the moment */
abb->current_info_idx = -EINVAL;
- abb_info = prop->value;
for (i = 0; i < num_entries; i++, info++, volt_table++) {
u32 efuse_offset, rbb_mask, fbb_mask, vset_mask;
u32 efuse_val;
/* NOTE: num_values should equal to entries picked up here */
- *volt_table = be32_to_cpup(abb_info++);
- info->opp_sel = be32_to_cpup(abb_info++);
- efuse_offset = be32_to_cpup(abb_info++);
- rbb_mask = be32_to_cpup(abb_info++);
- fbb_mask = be32_to_cpup(abb_info++);
- vset_mask = be32_to_cpup(abb_info++);
+ of_property_read_u32_index(dev->of_node, pname, i * num_values,
+ volt_table);
+ of_property_read_u32_index(dev->of_node, pname,
+ i * num_values + 1, &info->opp_sel);
+ of_property_read_u32_index(dev->of_node, pname,
+ i * num_values + 2, &efuse_offset);
+ of_property_read_u32_index(dev->of_node, pname,
+ i * num_values + 3, &rbb_mask);
+ of_property_read_u32_index(dev->of_node, pname,
+ i * num_values + 4, &fbb_mask);
+ of_property_read_u32_index(dev->of_node, pname,
+ i * num_values + 5, &vset_mask);
dev_dbg(dev,
"[%d]v=%d ABB=%d ef=0x%x rbb=0x%x fbb=0x%x vset=0x%x\n",
@@ -648,8 +639,8 @@ static struct regulator_ops ti_abb_reg_ops = {
/* Default ABB block offsets, IF this changes in future, create new one */
static const struct ti_abb_reg abb_regs_v1 = {
/* WARNING: registers are wrongly documented in TRM */
- .setup_reg = 0x04,
- .control_reg = 0x00,
+ .setup_off = 0x04,
+ .control_off = 0x00,
.sr2_wtcnt_value_mask = (0xff << 8),
.fbb_sel_mask = (0x01 << 2),
@@ -661,8 +652,8 @@ static const struct ti_abb_reg abb_regs_v1 = {
};
static const struct ti_abb_reg abb_regs_v2 = {
- .setup_reg = 0x00,
- .control_reg = 0x04,
+ .setup_off = 0x00,
+ .control_off = 0x04,
.sr2_wtcnt_value_mask = (0xff << 8),
.fbb_sel_mask = (0x01 << 2),
@@ -673,9 +664,20 @@ static const struct ti_abb_reg abb_regs_v2 = {
.opp_sel_mask = (0x03 << 0),
};
+static const struct ti_abb_reg abb_regs_generic = {
+ .sr2_wtcnt_value_mask = (0xff << 8),
+ .fbb_sel_mask = (0x01 << 2),
+ .rbb_sel_mask = (0x01 << 1),
+ .sr2_en_mask = (0x01 << 0),
+
+ .opp_change_mask = (0x01 << 2),
+ .opp_sel_mask = (0x03 << 0),
+};
+
static const struct of_device_id ti_abb_of_match[] = {
{.compatible = "ti,abb-v1", .data = &abb_regs_v1},
{.compatible = "ti,abb-v2", .data = &abb_regs_v2},
+ {.compatible = "ti,abb-v3", .data = &abb_regs_generic},
{ },
};
@@ -722,11 +724,29 @@ static int ti_abb_probe(struct platform_device *pdev)
abb->regs = match->data;
/* Map ABB resources */
- pname = "base-address";
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
- abb->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(abb->base))
- return PTR_ERR(abb->base);
+ if (abb->regs->setup_off || abb->regs->control_off) {
+ pname = "base-address";
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+ abb->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(abb->base))
+ return PTR_ERR(abb->base);
+
+ abb->setup_reg = abb->base + abb->regs->setup_off;
+ abb->control_reg = abb->base + abb->regs->control_off;
+
+ } else {
+ pname = "control-address";
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+ abb->control_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(abb->control_reg))
+ return PTR_ERR(abb->control_reg);
+
+ pname = "setup-address";
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+ abb->setup_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(abb->setup_reg))
+ return PTR_ERR(abb->setup_reg);
+ }
pname = "int-address";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
@@ -860,7 +880,7 @@ skip_opt:
platform_set_drvdata(pdev, rdev);
/* Enable the ldo if not already done by bootloader */
- ti_abb_rmw(abb->regs->sr2_en_mask, 1, abb->regs->setup_reg, abb->base);
+ ti_abb_rmw(abb->regs->sr2_en_mask, 1, abb->setup_reg);
return 0;
}
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index b0a3f0917a27..f31f22e3e1bd 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -70,16 +70,16 @@
#define TPS51632_POWER_STATE_SINGLE_PHASE_CCM 0x1
#define TPS51632_POWER_STATE_SINGLE_PHASE_DCM 0x2
-#define TPS51632_MIN_VOLATGE 500000
-#define TPS51632_MAX_VOLATGE 1520000
-#define TPS51632_VOLATGE_STEP_10mV 10000
-#define TPS51632_VOLATGE_STEP_20mV 20000
+#define TPS51632_MIN_VOLTAGE 500000
+#define TPS51632_MAX_VOLTAGE 1520000
+#define TPS51632_VOLTAGE_STEP_10mV 10000
+#define TPS51632_VOLTAGE_STEP_20mV 20000
#define TPS51632_MAX_VSEL 0x7F
#define TPS51632_MIN_VSEL 0x19
#define TPS51632_DEFAULT_RAMP_DELAY 6000
#define TPS51632_VOLT_VSEL(uV) \
- (DIV_ROUND_UP(uV - TPS51632_MIN_VOLATGE, \
- TPS51632_VOLATGE_STEP_10mV) + \
+ (DIV_ROUND_UP(uV - TPS51632_MIN_VOLTAGE, \
+ TPS51632_VOLTAGE_STEP_10mV) + \
TPS51632_MIN_VSEL)
/* TPS51632 chip information */
@@ -227,10 +227,8 @@ static struct tps51632_regulator_platform_data *
struct device_node *np = dev->of_node;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "Memory alloc failed for platform data\n");
+ if (!pdata)
return NULL;
- }
pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node);
if (!pdata->reg_init_data) {
@@ -243,9 +241,9 @@ static struct tps51632_regulator_platform_data *
pdata->dvfs_step_20mV = of_property_read_bool(np, "ti,dvfs-step-20mV");
pdata->base_voltage_uV = pdata->reg_init_data->constraints.min_uV ? :
- TPS51632_MIN_VOLATGE;
+ TPS51632_MIN_VOLTAGE;
pdata->max_voltage_uV = pdata->reg_init_data->constraints.max_uV ? :
- TPS51632_MAX_VOLATGE;
+ TPS51632_MAX_VOLTAGE;
return pdata;
}
#else
@@ -284,32 +282,30 @@ static int tps51632_probe(struct i2c_client *client,
}
if (pdata->enable_pwm_dvfs) {
- if ((pdata->base_voltage_uV < TPS51632_MIN_VOLATGE) ||
- (pdata->base_voltage_uV > TPS51632_MAX_VOLATGE)) {
+ if ((pdata->base_voltage_uV < TPS51632_MIN_VOLTAGE) ||
+ (pdata->base_voltage_uV > TPS51632_MAX_VOLTAGE)) {
dev_err(&client->dev, "Invalid base_voltage_uV setting\n");
return -EINVAL;
}
if ((pdata->max_voltage_uV) &&
- ((pdata->max_voltage_uV < TPS51632_MIN_VOLATGE) ||
- (pdata->max_voltage_uV > TPS51632_MAX_VOLATGE))) {
+ ((pdata->max_voltage_uV < TPS51632_MIN_VOLTAGE) ||
+ (pdata->max_voltage_uV > TPS51632_MAX_VOLTAGE))) {
dev_err(&client->dev, "Invalid max_voltage_uV setting\n");
return -EINVAL;
}
}
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
- if (!tps) {
- dev_err(&client->dev, "Memory allocation failed\n");
+ if (!tps)
return -ENOMEM;
- }
tps->dev = &client->dev;
- tps->desc.name = id->name;
+ tps->desc.name = client->name;
tps->desc.id = 0;
tps->desc.ramp_delay = TPS51632_DEFAULT_RAMP_DELAY;
- tps->desc.min_uV = TPS51632_MIN_VOLATGE;
- tps->desc.uV_step = TPS51632_VOLATGE_STEP_10mV;
+ tps->desc.min_uV = TPS51632_MIN_VOLTAGE;
+ tps->desc.uV_step = TPS51632_VOLTAGE_STEP_10mV;
tps->desc.linear_min_sel = TPS51632_MIN_VSEL;
tps->desc.n_voltages = TPS51632_MAX_VSEL + 1;
tps->desc.ops = &tps51632_dcdc_ops;
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index c2c0185a2dcd..a1672044e519 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -299,10 +299,8 @@ static struct tps62360_regulator_platform_data *
struct device_node *np = dev->of_node;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "Memory alloc failed for platform data\n");
+ if (!pdata)
return NULL;
- }
pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node);
if (!pdata->reg_init_data) {
@@ -360,7 +358,7 @@ static int tps62360_probe(struct i2c_client *client,
dev_err(&client->dev, "Error: No device match found\n");
return -ENODEV;
}
- chip_id = (int)match->data;
+ chip_id = (int)(long)match->data;
if (!pdata)
pdata = of_get_tps62360_platform_data(&client->dev);
} else if (id) {
@@ -377,11 +375,8 @@ static int tps62360_probe(struct i2c_client *client,
}
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
- if (!tps) {
- dev_err(&client->dev, "%s(): Memory allocation failed\n",
- __func__);
+ if (!tps)
return -ENOMEM;
- }
tps->en_discharge = pdata->en_discharge;
tps->en_internal_pulldn = pdata->en_internal_pulldn;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 162a0fae20b3..98e66ce26723 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -359,7 +359,6 @@ static struct regulator_ops tps6507x_pmic_ops = {
.map_voltage = regulator_map_voltage_ascend,
};
-#ifdef CONFIG_OF
static struct of_regulator_match tps6507x_matches[] = {
{ .name = "VDCDC1"},
{ .name = "VDCDC2"},
@@ -381,12 +380,10 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board),
GFP_KERNEL);
- if (!tps_board) {
- dev_err(&pdev->dev, "Failure to alloc pdata for regulators.\n");
+ if (!tps_board)
return NULL;
- }
- regulators = of_find_node_by_name(np, "regulators");
+ regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulator node not found\n");
return NULL;
@@ -396,6 +393,7 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
matches = tps6507x_matches;
ret = of_regulator_match(&pdev->dev, regulators, matches, count);
+ of_node_put(regulators);
if (ret < 0) {
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
ret);
@@ -406,10 +404,8 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
reg_data = devm_kzalloc(&pdev->dev, (sizeof(struct regulator_init_data)
* TPS6507X_NUM_REGULATOR), GFP_KERNEL);
- if (!reg_data) {
- dev_err(&pdev->dev, "Failure to alloc init data for regulators.\n");
+ if (!reg_data)
return NULL;
- }
tps_board->tps6507x_pmic_init_data = reg_data;
@@ -424,15 +420,7 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
return tps_board;
}
-#else
-static inline struct tps6507x_board *tps6507x_parse_dt_reg_data(
- struct platform_device *pdev,
- struct of_regulator_match **tps6507x_reg_matches)
-{
- *tps6507x_reg_matches = NULL;
- return NULL;
-}
-#endif
+
static int tps6507x_pmic_probe(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
@@ -453,9 +441,10 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
*/
tps_board = dev_get_platdata(tps6507x_dev->dev);
- if (!tps_board && tps6507x_dev->dev->of_node)
+ if (IS_ENABLED(CONFIG_OF) && !tps_board &&
+ tps6507x_dev->dev->of_node)
tps_board = tps6507x_parse_dt_reg_data(pdev,
- &tps6507x_reg_matches);
+ &tps6507x_reg_matches);
if (!tps_board)
return -EINVAL;
@@ -481,7 +470,7 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
tps->info[i] = info;
if (init_data->driver_data) {
struct tps6507x_reg_platform_data *data =
- init_data->driver_data;
+ init_data->driver_data;
tps->info[i]->defdcdc_default = data->defdcdc_default;
}
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index 676f75548f00..2e92ef68574d 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -168,17 +168,13 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
tps65090_pdata = devm_kzalloc(&pdev->dev, sizeof(*tps65090_pdata),
GFP_KERNEL);
- if (!tps65090_pdata) {
- dev_err(&pdev->dev, "Memory alloc for tps65090_pdata failed\n");
+ if (!tps65090_pdata)
return ERR_PTR(-ENOMEM);
- }
reg_pdata = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX *
sizeof(*reg_pdata), GFP_KERNEL);
- if (!reg_pdata) {
- dev_err(&pdev->dev, "Memory alloc for reg_pdata failed\n");
+ if (!reg_pdata)
return ERR_PTR(-ENOMEM);
- }
regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
@@ -188,6 +184,7 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
ret = of_regulator_match(&pdev->dev, regulators, tps65090_matches,
ARRAY_SIZE(tps65090_matches));
+ of_node_put(regulators);
if (ret < 0) {
dev_err(&pdev->dev,
"Error parsing regulator init data: %d\n", ret);
@@ -252,10 +249,8 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
pmic = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX * sizeof(*pmic),
GFP_KERNEL);
- if (!pmic) {
- dev_err(&pdev->dev, "mem alloc for pmic failed\n");
+ if (!pmic)
return -ENOMEM;
- }
for (num = 0; num < TPS65090_REGULATOR_MAX; num++) {
tps_pdata = tps65090_pdata->reg_pdata[num];
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 9ea1bf26bd13..10b78d2b766a 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -187,7 +187,7 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
struct device_node *regs;
int i, count;
- regs = of_find_node_by_name(node, "regulators");
+ regs = of_get_child_by_name(node, "regulators");
if (!regs)
return NULL;
@@ -202,7 +202,7 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
return NULL;
for (i = 0; i < count; i++) {
- if (!reg_matches[i].init_data || !reg_matches[i].of_node)
+ if (!reg_matches[i].of_node)
continue;
pdata->tps65217_init_data[i] = reg_matches[i].init_data;
@@ -222,7 +222,6 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
{
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65217_board *pdata = dev_get_platdata(tps->dev);
- struct regulator_init_data *reg_data;
struct regulator_dev *rdev;
struct regulator_config config = { };
int i;
@@ -243,19 +242,9 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tps);
for (i = 0; i < TPS65217_NUM_REGULATOR; i++) {
-
- reg_data = pdata->tps65217_init_data[i];
-
- /*
- * Regulator API handles empty constraints but not NULL
- * constraints
- */
- if (!reg_data)
- continue;
-
/* Register the regulators */
config.dev = tps->dev;
- config.init_data = reg_data;
+ config.init_data = pdata->tps65217_init_data[i];
config.driver_data = tps;
config.regmap = tps->regmap;
if (tps->dev->of_node)
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
new file mode 100644
index 000000000000..cec72fa71d1d
--- /dev/null
+++ b/drivers/regulator/tps65218-regulator.c
@@ -0,0 +1,285 @@
+/*
+ * tps65218-regulator.c
+ *
+ * Regulator driver for TPS65218 PMIC
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps65218.h>
+
+static unsigned int tps65218_ramp_delay = 4000;
+
+enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4, DCDC5, DCDC6, LDO1 };
+
+#define TPS65218_REGULATOR(_name, _id, _ops, _n, _vr, _vm, _er, _em, _t, \
+ _lr, _nlr) \
+ { \
+ .name = _name, \
+ .id = _id, \
+ .ops = &_ops, \
+ .n_voltages = _n, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = _vr, \
+ .vsel_mask = _vm, \
+ .enable_reg = _er, \
+ .enable_mask = _em, \
+ .volt_table = _t, \
+ .linear_ranges = _lr, \
+ .n_linear_ranges = _nlr, \
+ } \
+
+#define TPS65218_INFO(_id, _nm, _min, _max) \
+ { \
+ .id = _id, \
+ .name = _nm, \
+ .min_uV = _min, \
+ .max_uV = _max, \
+ }
+
+static const struct regulator_linear_range dcdc1_dcdc2_ranges[] = {
+ REGULATOR_LINEAR_RANGE(850000, 0x0, 0x32, 10000),
+ REGULATOR_LINEAR_RANGE(1375000, 0x33, 0x3f, 25000),
+};
+
+static const struct regulator_linear_range ldo1_dcdc3_ranges[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0x0, 0x1a, 25000),
+ REGULATOR_LINEAR_RANGE(1600000, 0x1b, 0x3f, 50000),
+};
+
+static const struct regulator_linear_range dcdc4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1175000, 0x0, 0xf, 25000),
+ REGULATOR_LINEAR_RANGE(1550000, 0x10, 0x34, 50000),
+};
+
+static struct tps_info tps65218_pmic_regs[] = {
+ TPS65218_INFO(0, "DCDC1", 850000, 167500),
+ TPS65218_INFO(1, "DCDC2", 850000, 1675000),
+ TPS65218_INFO(2, "DCDC3", 900000, 3400000),
+ TPS65218_INFO(3, "DCDC4", 1175000, 3400000),
+ TPS65218_INFO(4, "DCDC5", 1000000, 1000000),
+ TPS65218_INFO(5, "DCDC6", 1800000, 1800000),
+ TPS65218_INFO(6, "LDO1", 900000, 3400000),
+};
+
+#define TPS65218_OF_MATCH(comp, label) \
+ { \
+ .compatible = comp, \
+ .data = &label, \
+ }
+
+static const struct of_device_id tps65218_of_match[] = {
+ TPS65218_OF_MATCH("ti,tps65218-dcdc1", tps65218_pmic_regs[DCDC1]),
+ TPS65218_OF_MATCH("ti,tps65218-dcdc2", tps65218_pmic_regs[DCDC2]),
+ TPS65218_OF_MATCH("ti,tps65218-dcdc3", tps65218_pmic_regs[DCDC3]),
+ TPS65218_OF_MATCH("ti,tps65218-dcdc4", tps65218_pmic_regs[DCDC4]),
+ TPS65218_OF_MATCH("ti,tps65218-dcdc5", tps65218_pmic_regs[DCDC5]),
+ TPS65218_OF_MATCH("ti,tps65218-dcdc6", tps65218_pmic_regs[DCDC6]),
+ TPS65218_OF_MATCH("ti,tps65218-ldo1", tps65218_pmic_regs[LDO1]),
+ { }
+};
+MODULE_DEVICE_TABLE(of, tps65218_of_match);
+
+static int tps65218_pmic_set_voltage_sel(struct regulator_dev *dev,
+ unsigned selector)
+{
+ int ret;
+ struct tps65218 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ /* Set the voltage based on vsel value and write protect level is 2 */
+ ret = tps65218_set_bits(tps, dev->desc->vsel_reg, dev->desc->vsel_mask,
+ selector, TPS65218_PROTECT_L1);
+
+ /* Set GO bit for DCDC1/2 to initiate voltage transistion */
+ switch (rid) {
+ case TPS65218_DCDC_1:
+ case TPS65218_DCDC_2:
+ ret = tps65218_set_bits(tps, TPS65218_REG_CONTRL_SLEW_RATE,
+ TPS65218_SLEW_RATE_GO,
+ TPS65218_SLEW_RATE_GO,
+ TPS65218_PROTECT_L1);
+ break;
+ }
+
+ return ret;
+}
+
+static int tps65218_pmic_enable(struct regulator_dev *dev)
+{
+ struct tps65218 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ if (rid < TPS65218_DCDC_1 || rid > TPS65218_LDO_1)
+ return -EINVAL;
+
+ /* Enable the regulator and password protection is level 1 */
+ return tps65218_set_bits(tps, dev->desc->enable_reg,
+ dev->desc->enable_mask, dev->desc->enable_mask,
+ TPS65218_PROTECT_L1);
+}
+
+static int tps65218_pmic_disable(struct regulator_dev *dev)
+{
+ struct tps65218 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ if (rid < TPS65218_DCDC_1 || rid > TPS65218_LDO_1)
+ return -EINVAL;
+
+ /* Disable the regulator and password protection is level 1 */
+ return tps65218_clear_bits(tps, dev->desc->enable_reg,
+ dev->desc->enable_mask, TPS65218_PROTECT_L1);
+}
+
+static int tps65218_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector, unsigned int new_selector)
+{
+ int old_uv, new_uv;
+
+ old_uv = regulator_list_voltage_linear_range(rdev, old_selector);
+ if (old_uv < 0)
+ return old_uv;
+
+ new_uv = regulator_list_voltage_linear_range(rdev, new_selector);
+ if (new_uv < 0)
+ return new_uv;
+
+ return DIV_ROUND_UP(abs(old_uv - new_uv), tps65218_ramp_delay);
+}
+
+/* Operations permitted on DCDC1, DCDC2 */
+static struct regulator_ops tps65218_dcdc12_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = tps65218_pmic_enable,
+ .disable = tps65218_pmic_disable,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = tps65218_pmic_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_time_sel = tps65218_set_voltage_time_sel,
+};
+
+/* Operations permitted on DCDC3, DCDC4 and LDO1 */
+static struct regulator_ops tps65218_ldo1_dcdc34_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = tps65218_pmic_enable,
+ .disable = tps65218_pmic_disable,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = tps65218_pmic_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+/* Operations permitted on DCDC5, DCDC6 */
+static struct regulator_ops tps65218_dcdc56_pmic_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = tps65218_pmic_enable,
+ .disable = tps65218_pmic_disable,
+};
+
+static const struct regulator_desc regulators[] = {
+ TPS65218_REGULATOR("DCDC1", TPS65218_DCDC_1, tps65218_dcdc12_ops, 64,
+ TPS65218_REG_CONTROL_DCDC1,
+ TPS65218_CONTROL_DCDC1_MASK,
+ TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC1_EN, NULL,
+ dcdc1_dcdc2_ranges, 2),
+ TPS65218_REGULATOR("DCDC2", TPS65218_DCDC_2, tps65218_dcdc12_ops, 64,
+ TPS65218_REG_CONTROL_DCDC2,
+ TPS65218_CONTROL_DCDC2_MASK,
+ TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC2_EN, NULL,
+ dcdc1_dcdc2_ranges, 2),
+ TPS65218_REGULATOR("DCDC3", TPS65218_DCDC_3, tps65218_ldo1_dcdc34_ops,
+ 64, TPS65218_REG_CONTROL_DCDC3,
+ TPS65218_CONTROL_DCDC3_MASK, TPS65218_REG_ENABLE1,
+ TPS65218_ENABLE1_DC3_EN, NULL,
+ ldo1_dcdc3_ranges, 2),
+ TPS65218_REGULATOR("DCDC4", TPS65218_DCDC_4, tps65218_ldo1_dcdc34_ops,
+ 53, TPS65218_REG_CONTROL_DCDC4,
+ TPS65218_CONTROL_DCDC4_MASK,
+ TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC4_EN, NULL,
+ dcdc4_ranges, 2),
+ TPS65218_REGULATOR("DCDC5", TPS65218_DCDC_5, tps65218_dcdc56_pmic_ops,
+ 1, -1, -1, TPS65218_REG_ENABLE1,
+ TPS65218_ENABLE1_DC5_EN, NULL, NULL, 0),
+ TPS65218_REGULATOR("DCDC6", TPS65218_DCDC_6, tps65218_dcdc56_pmic_ops,
+ 1, -1, -1, TPS65218_REG_ENABLE1,
+ TPS65218_ENABLE1_DC6_EN, NULL, NULL, 0),
+ TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, tps65218_ldo1_dcdc34_ops, 64,
+ TPS65218_REG_CONTROL_DCDC4,
+ TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_LDO1_EN, NULL, ldo1_dcdc3_ranges,
+ 2),
+};
+
+static int tps65218_regulator_probe(struct platform_device *pdev)
+{
+ struct tps65218 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_init_data *init_data;
+ const struct tps_info *template;
+ struct regulator_dev *rdev;
+ const struct of_device_id *match;
+ struct regulator_config config = { };
+ int id;
+
+ match = of_match_device(tps65218_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ template = match->data;
+ id = template->id;
+ init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+
+ platform_set_drvdata(pdev, tps);
+
+ tps->info[id] = &tps65218_pmic_regs[id];
+ config.dev = &pdev->dev;
+ config.init_data = init_data;
+ config.driver_data = tps;
+ config.regmap = tps->regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config);
+ if (IS_ERR(rdev)) {
+ dev_err(tps->dev, "failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver tps65218_regulator_driver = {
+ .driver = {
+ .name = "tps65218-pmic",
+ .owner = THIS_MODULE,
+ .of_match_table = tps65218_of_match,
+ },
+ .probe = tps65218_regulator_probe,
+};
+
+module_platform_driver(tps65218_regulator_driver);
+
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("TPS65218 voltage regulator driver");
+MODULE_ALIAS("platform:tps65218-pmic");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 9f6bfda711b7..5b494db9f95c 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -593,10 +593,9 @@ static int pmic_probe(struct spi_device *spi)
}
hw = devm_kzalloc(&spi->dev, sizeof(struct tps6524x), GFP_KERNEL);
- if (!hw) {
- dev_err(dev, "cannot allocate regulator private data\n");
+ if (!hw)
return -ENOMEM;
- }
+
spi_set_drvdata(spi, hw);
memset(hw, 0, sizeof(struct tps6524x));
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 0485d47f0d8a..32f38a63d944 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -363,10 +363,8 @@ static struct tps6586x_platform_data *tps6586x_parse_regulator_dt(
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "Memory alloction failed\n");
+ if (!pdata)
return NULL;
- }
for (i = 0; i < num; i++) {
int id;
@@ -398,7 +396,7 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
{
struct tps6586x_regulator *ri = NULL;
struct regulator_config config = { };
- struct regulator_dev **rdev;
+ struct regulator_dev *rdev;
struct regulator_init_data *reg_data;
struct tps6586x_platform_data *pdata;
struct of_regulator_match *tps6586x_reg_matches = NULL;
@@ -418,13 +416,6 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
return -ENODEV;
}
- rdev = devm_kzalloc(&pdev->dev, TPS6586X_ID_MAX_REGULATOR *
- sizeof(*rdev), GFP_KERNEL);
- if (!rdev) {
- dev_err(&pdev->dev, "Mmemory alloc failed\n");
- return -ENOMEM;
- }
-
version = tps6586x_get_version(pdev->dev.parent);
for (id = 0; id < TPS6586X_ID_MAX_REGULATOR; ++id) {
@@ -451,12 +442,11 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
if (tps6586x_reg_matches)
config.of_node = tps6586x_reg_matches[id].of_node;
- rdev[id] = devm_regulator_register(&pdev->dev, &ri->desc,
- &config);
- if (IS_ERR(rdev[id])) {
+ rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
- return PTR_ERR(rdev[id]);
+ return PTR_ERR(rdev);
}
if (reg_data) {
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index a00132e31ec7..fa7db8847578 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -88,6 +88,11 @@ static const unsigned int VMMC_VSEL_table[] = {
1800000, 2800000, 3000000, 3300000,
};
+/* supported BBCH voltages in microvolts */
+static const unsigned int VBB_VSEL_table[] = {
+ 3000000, 2520000, 3150000, 5000000,
+};
+
struct tps_info {
const char *name;
const char *vin_name;
@@ -183,6 +188,12 @@ static struct tps_info tps65910_regs[] = {
.voltage_table = VMMC_VSEL_table,
.enable_time_us = 100,
},
+ {
+ .name = "vbb",
+ .vin_name = "vcc7",
+ .n_voltages = ARRAY_SIZE(VBB_VSEL_table),
+ .voltage_table = VBB_VSEL_table,
+ },
};
static struct tps_info tps65911_regs[] = {
@@ -339,6 +350,8 @@ static int tps65910_get_ctrl_register(int id)
return TPS65910_VAUX33;
case TPS65910_REG_VMMC:
return TPS65910_VMMC;
+ case TPS65910_REG_VBB:
+ return TPS65910_BBCH;
default:
return -EINVAL;
}
@@ -528,6 +541,10 @@ static int tps65910_get_voltage_sel(struct regulator_dev *dev)
value &= LDO_SEL_MASK;
value >>= LDO_SEL_SHIFT;
break;
+ case TPS65910_REG_VBB:
+ value &= BBCH_BBSEL_MASK;
+ value >>= BBCH_BBSEL_SHIFT;
+ break;
default:
return -EINVAL;
}
@@ -638,6 +655,9 @@ static int tps65910_set_voltage_sel(struct regulator_dev *dev,
case TPS65910_REG_VMMC:
return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK,
selector << LDO_SEL_SHIFT);
+ case TPS65910_REG_VBB:
+ return tps65910_reg_update_bits(pmic->mfd, reg, BBCH_BBSEL_MASK,
+ selector << BBCH_BBSEL_SHIFT);
}
return -EINVAL;
@@ -669,6 +689,9 @@ static int tps65911_set_voltage_sel(struct regulator_dev *dev,
case TPS65910_REG_VIO:
return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK,
selector << LDO_SEL_SHIFT);
+ case TPS65910_REG_VBB:
+ return tps65910_reg_update_bits(pmic->mfd, reg, BBCH_BBSEL_MASK,
+ selector << BBCH_BBSEL_SHIFT);
}
return -EINVAL;
@@ -762,6 +785,18 @@ static struct regulator_ops tps65910_ops_vdd3 = {
.map_voltage = regulator_map_voltage_ascend,
};
+static struct regulator_ops tps65910_ops_vbb = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_mode = tps65910_set_mode,
+ .get_mode = tps65910_get_mode,
+ .get_voltage_sel = tps65910_get_voltage_sel,
+ .set_voltage_sel = tps65910_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+};
+
static struct regulator_ops tps65910_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
@@ -944,6 +979,7 @@ static struct of_regulator_match tps65910_matches[] = {
{ .name = "vaux2", .driver_data = (void *) &tps65910_regs[10] },
{ .name = "vaux33", .driver_data = (void *) &tps65910_regs[11] },
{ .name = "vmmc", .driver_data = (void *) &tps65910_regs[12] },
+ { .name = "vbb", .driver_data = (void *) &tps65910_regs[13] },
};
static struct of_regulator_match tps65911_matches[] = {
@@ -975,11 +1011,8 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
pmic_plat_data = devm_kzalloc(&pdev->dev, sizeof(*pmic_plat_data),
GFP_KERNEL);
-
- if (!pmic_plat_data) {
- dev_err(&pdev->dev, "Failure to alloc pdata for regulators.\n");
+ if (!pmic_plat_data)
return NULL;
- }
np = of_node_get(pdev->dev.parent->of_node);
regulators = of_get_child_by_name(np, "regulators");
@@ -1062,10 +1095,8 @@ static int tps65910_probe(struct platform_device *pdev)
}
pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
- if (!pmic) {
- dev_err(&pdev->dev, "Memory allocation failed for pmic\n");
+ if (!pmic)
return -ENOMEM;
- }
pmic->mfd = tps65910;
platform_set_drvdata(pdev, pmic);
@@ -1094,24 +1125,18 @@ static int tps65910_probe(struct platform_device *pdev)
pmic->desc = devm_kzalloc(&pdev->dev, pmic->num_regulators *
sizeof(struct regulator_desc), GFP_KERNEL);
- if (!pmic->desc) {
- dev_err(&pdev->dev, "Memory alloc fails for desc\n");
+ if (!pmic->desc)
return -ENOMEM;
- }
pmic->info = devm_kzalloc(&pdev->dev, pmic->num_regulators *
sizeof(struct tps_info *), GFP_KERNEL);
- if (!pmic->info) {
- dev_err(&pdev->dev, "Memory alloc fails for info\n");
+ if (!pmic->info)
return -ENOMEM;
- }
pmic->rdev = devm_kzalloc(&pdev->dev, pmic->num_regulators *
sizeof(struct regulator_dev *), GFP_KERNEL);
- if (!pmic->rdev) {
- dev_err(&pdev->dev, "Memory alloc fails for rdev\n");
+ if (!pmic->rdev)
return -ENOMEM;
- }
for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS;
i++, info++) {
@@ -1145,6 +1170,10 @@ static int tps65910_probe(struct platform_device *pdev)
pmic->desc[i].ops = &tps65910_ops_dcdc;
pmic->desc[i].ramp_delay = 5000;
}
+ } else if (i == TPS65910_REG_VBB &&
+ tps65910_chip_id(tps65910) == TPS65910) {
+ pmic->desc[i].ops = &tps65910_ops_vbb;
+ pmic->desc[i].volt_table = info->voltage_table;
} else {
if (tps65910_chip_id(tps65910) == TPS65910) {
pmic->desc[i].ops = &tps65910_ops;
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
index 71f457a42623..26aa6d9c308f 100644
--- a/drivers/regulator/tps80031-regulator.c
+++ b/drivers/regulator/tps80031-regulator.c
@@ -115,7 +115,7 @@ static int tps80031_reg_is_enabled(struct regulator_dev *rdev)
ri->rinfo->state_reg, ret);
return ret;
}
- return ((reg_val & TPS80031_STATE_MASK) == TPS80031_STATE_ON);
+ return (reg_val & TPS80031_STATE_MASK) == TPS80031_STATE_ON;
}
static int tps80031_reg_enable(struct regulator_dev *rdev)
@@ -693,10 +693,8 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
pmic = devm_kzalloc(&pdev->dev,
TPS80031_REGULATOR_MAX * sizeof(*pmic), GFP_KERNEL);
- if (!pmic) {
- dev_err(&pdev->dev, "mem alloc for pmic failed\n");
+ if (!pmic)
return -ENOMEM;
- }
for (num = 0; num < TPS80031_REGULATOR_MAX; ++num) {
tps_pdata = pdata->regulator_pdata[num];
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 8ebd785485c7..fed28abef419 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -58,7 +58,7 @@ struct twlreg_info {
struct regulator_desc desc;
/* chip specific features */
- unsigned long features;
+ unsigned long features;
/*
* optional override functions for voltage set/get
@@ -1128,7 +1128,7 @@ static int twlreg_probe(struct platform_device *pdev)
if (!initdata)
return -EINVAL;
- info = kmemdup(template, sizeof (*info), GFP_KERNEL);
+ info = kmemdup(template, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 6823e6f2b88a..0d88a82ab2a2 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -469,10 +469,8 @@ static int wm831x_buckv_probe(struct platform_device *pdev)
dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
GFP_KERNEL);
- if (dcdc == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!dcdc)
return -ENOMEM;
- }
dcdc->wm831x = wm831x;
@@ -622,10 +620,8 @@ static int wm831x_buckp_probe(struct platform_device *pdev)
dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
GFP_KERNEL);
- if (dcdc == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!dcdc)
return -ENOMEM;
- }
dcdc->wm831x = wm831x;
@@ -752,18 +748,15 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
return -ENODEV;
dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc), GFP_KERNEL);
- if (dcdc == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!dcdc)
return -ENOMEM;
- }
dcdc->wm831x = wm831x;
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No REG resource\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
dcdc->base = res->start;
@@ -788,7 +781,7 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
id + 1, ret);
- goto err;
+ return ret;
}
irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
@@ -799,15 +792,12 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n",
irq, ret);
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, dcdc);
return 0;
-
-err:
- return ret;
}
static struct platform_driver wm831x_boostp_driver = {
@@ -846,10 +836,8 @@ static int wm831x_epe_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probing EPE%d\n", id + 1);
dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc), GFP_KERNEL);
- if (dcdc == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!dcdc)
return -ENOMEM;
- }
dcdc->wm831x = wm831x;
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 0339b886df5d..72e385e76a9d 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -165,10 +165,8 @@ static int wm831x_isink_probe(struct platform_device *pdev)
isink = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_isink),
GFP_KERNEL);
- if (isink == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!isink)
return -ENOMEM;
- }
isink->wm831x = wm831x;
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 46d6700467b5..eca0eeb78acd 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -235,10 +235,8 @@ static int wm831x_gp_ldo_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
- if (ldo == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!ldo)
return -ENOMEM;
- }
ldo->wm831x = wm831x;
@@ -447,10 +445,8 @@ static int wm831x_aldo_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
- if (ldo == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!ldo)
return -ENOMEM;
- }
ldo->wm831x = wm831x;
@@ -594,10 +590,8 @@ static int wm831x_alive_ldo_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
- if (ldo == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!ldo)
return -ENOMEM;
- }
ldo->wm831x = wm831x;
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index de7b9c73e3fa..7ec7c390eeda 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -361,7 +361,7 @@ static int wm8350_dcdc_set_suspend_voltage(struct regulator_dev *rdev, int uV)
sel = regulator_map_voltage_linear(rdev, uV, uV);
if (sel < 0)
- return -EINVAL;
+ return sel;
/* all DCDCs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
@@ -574,7 +574,7 @@ static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
sel = regulator_map_voltage_linear_range(rdev, uV, uV);
if (sel < 0)
- return -EINVAL;
+ return sel;
/* all LDOs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 71c5911f2e71..c24346db8a71 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -134,10 +134,8 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm8994_ldo), GFP_KERNEL);
- if (ldo == NULL) {
- dev_err(&pdev->dev, "Unable to allocate private data\n");
+ if (!ldo)
return -ENOMEM;
- }
ldo->wm8994 = wm8994;
ldo->supply = wm8994_ldo_consumer[id];
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 1e2d83f2b995..cc29832c9638 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_RESET_CONTROLLER) += core.o
+obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
new file mode 100644
index 000000000000..695bd3496eba
--- /dev/null
+++ b/drivers/reset/reset-sunxi.c
@@ -0,0 +1,175 @@
+/*
+ * Allwinner SoCs Reset Controller driver
+ *
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct sunxi_reset_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+};
+
+static int sunxi_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct sunxi_reset_data *data = container_of(rcdev,
+ struct sunxi_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * 4));
+ writel(reg & ~BIT(offset), data->membase + (bank * 4));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int sunxi_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct sunxi_reset_data *data = container_of(rcdev,
+ struct sunxi_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * 4));
+ writel(reg | BIT(offset), data->membase + (bank * 4));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static struct reset_control_ops sunxi_reset_ops = {
+ .assert = sunxi_reset_assert,
+ .deassert = sunxi_reset_deassert,
+};
+
+static int sunxi_reset_init(struct device_node *np)
+{
+ struct sunxi_reset_data *data;
+ struct resource res;
+ resource_size_t size;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ goto err_alloc;
+
+ size = resource_size(&res);
+ if (!request_mem_region(res.start, size, np->name)) {
+ ret = -EBUSY;
+ goto err_alloc;
+ }
+
+ data->membase = ioremap(res.start, size);
+ if (!data->membase) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = size * 32;
+ data->rcdev.ops = &sunxi_reset_ops;
+ data->rcdev.of_node = np;
+ reset_controller_register(&data->rcdev);
+
+ return 0;
+
+err_alloc:
+ kfree(data);
+ return ret;
+};
+
+/*
+ * These are the reset controller we need to initialize early on in
+ * our system, before we can even think of using a regular device
+ * driver for it.
+ */
+static const struct of_device_id sunxi_early_reset_dt_ids[] __initdata = {
+ { .compatible = "allwinner,sun6i-a31-ahb1-reset", },
+ { /* sentinel */ },
+};
+
+void __init sun6i_reset_init(void)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, sunxi_early_reset_dt_ids)
+ sunxi_reset_init(np);
+}
+
+/*
+ * And these are the controllers we can register through the regular
+ * device model.
+ */
+static const struct of_device_id sunxi_reset_dt_ids[] = {
+ { .compatible = "allwinner,sun6i-a31-clock-reset", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids);
+
+static int sunxi_reset_probe(struct platform_device *pdev)
+{
+ return sunxi_reset_init(pdev->dev.of_node);
+}
+
+static int sunxi_reset_remove(struct platform_device *pdev)
+{
+ struct sunxi_reset_data *data = platform_get_drvdata(pdev);
+
+ reset_controller_unregister(&data->rcdev);
+ iounmap(data->membase);
+ kfree(data);
+
+ return 0;
+}
+
+static struct platform_driver sunxi_reset_driver = {
+ .probe = sunxi_reset_probe,
+ .remove = sunxi_reset_remove,
+ .driver = {
+ .name = "sunxi-reset",
+ .owner = THIS_MODULE,
+ .of_match_table = sunxi_reset_dt_ids,
+ },
+};
+module_platform_driver(sunxi_reset_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
+MODULE_DESCRIPTION("Allwinner SoCs Reset Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 007730222116..db933decc39c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -212,6 +212,17 @@ config RTC_DRV_DS3232
This driver can also be built as a module. If so, the module
will be called rtc-ds3232.
+config RTC_DRV_HYM8563
+ tristate "Haoyu Microelectronics HYM8563"
+ depends on I2C && OF
+ help
+ Say Y to enable support for the HYM8563 I2C RTC chip. Apart
+ from the usual rtc functions it provides a clock output of
+ up to 32kHz.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-hym8563.
+
config RTC_DRV_LP8788
tristate "TI LP8788 RTC driver"
depends on MFD_LP8788
@@ -304,6 +315,17 @@ config RTC_DRV_ISL12022
This driver can also be built as a module. If so, the module
will be called rtc-isl12022.
+config RTC_DRV_ISL12057
+ depends on I2C
+ select REGMAP_I2C
+ tristate "Intersil ISL12057"
+ help
+ If you say yes here you get support for the Intersil ISL12057
+ I2C RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-isl12057.
+
config RTC_DRV_X1205
tristate "Xicor/Intersil X1205"
help
@@ -626,7 +648,7 @@ comment "Platform RTC drivers"
config RTC_DRV_CMOS
tristate "PC-style 'CMOS'"
- depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64
+ depends on X86 || ARM || M32R || PPC || MIPS || SPARC64
default y if X86
help
Say "yes" here to get direct support for the real time clock
@@ -1104,6 +1126,13 @@ config RTC_DRV_SUN4V
If you say Y here you will get support for the Hypervisor
based RTC on SUN4V systems.
+config RTC_DRV_SUNXI
+ tristate "Allwinner sun4i/sun7i RTC"
+ depends on ARCH_SUNXI
+ help
+ If you say Y here you will get support for the RTC found on
+ Allwinner A10/A20.
+
config RTC_DRV_STARFIRE
bool "Starfire RTC"
depends on SPARC64
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 27b4bd884066..b427bf7dd20d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -55,9 +55,11 @@ obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o
obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o
obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o
+obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o
obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
+obj-$(CONFIG_RTC_DRV_ISL12057) += rtc-isl12057.o
obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
obj-$(CONFIG_RTC_DRV_LP8788) += rtc-lp8788.o
obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o
@@ -117,6 +119,7 @@ obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
+obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o
obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
obj-$(CONFIG_RTC_DRV_TILE) += rtc-tile.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 02426812bebc..589351ef75d0 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/rtc.h>
#include <linux/kdev_t.h>
#include <linux/idr.h>
@@ -157,12 +158,27 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
{
struct rtc_device *rtc;
struct rtc_wkalrm alrm;
- int id, err;
+ int of_id = -1, id = -1, err;
+
+ if (dev->of_node)
+ of_id = of_alias_get_id(dev->of_node, "rtc");
+ else if (dev->parent && dev->parent->of_node)
+ of_id = of_alias_get_id(dev->parent->of_node, "rtc");
+
+ if (of_id >= 0) {
+ id = ida_simple_get(&rtc_ida, of_id, of_id + 1,
+ GFP_KERNEL);
+ if (id < 0)
+ dev_warn(dev, "/aliases ID %d not available\n",
+ of_id);
+ }
- id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
- err = id;
- goto exit;
+ id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ err = id;
+ goto exit;
+ }
}
rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL);
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c
index 9cfa8170a2d6..4af016985890 100644
--- a/drivers/rtc/rtc-as3722.c
+++ b/drivers/rtc/rtc-as3722.c
@@ -198,7 +198,7 @@ static int as3722_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- as3722_rtc->rtc = rtc_device_register("as3722", &pdev->dev,
+ as3722_rtc->rtc = devm_rtc_device_register(&pdev->dev, "as3722-rtc",
&as3722_rtc_ops, THIS_MODULE);
if (IS_ERR(as3722_rtc->rtc)) {
ret = PTR_ERR(as3722_rtc->rtc);
@@ -209,28 +209,16 @@ static int as3722_rtc_probe(struct platform_device *pdev)
as3722_rtc->alarm_irq = platform_get_irq(pdev, 0);
dev_info(&pdev->dev, "RTC interrupt %d\n", as3722_rtc->alarm_irq);
- ret = request_threaded_irq(as3722_rtc->alarm_irq, NULL,
+ ret = devm_request_threaded_irq(&pdev->dev, as3722_rtc->alarm_irq, NULL,
as3722_alarm_irq, IRQF_ONESHOT | IRQF_EARLY_RESUME,
"rtc-alarm", as3722_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
as3722_rtc->alarm_irq, ret);
- goto scrub;
+ return ret;
}
disable_irq(as3722_rtc->alarm_irq);
return 0;
-scrub:
- rtc_device_unregister(as3722_rtc->rtc);
- return ret;
-}
-
-static int as3722_rtc_remove(struct platform_device *pdev)
-{
- struct as3722_rtc *as3722_rtc = platform_get_drvdata(pdev);
-
- free_irq(as3722_rtc->alarm_irq, as3722_rtc);
- rtc_device_unregister(as3722_rtc->rtc);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -260,7 +248,6 @@ static const struct dev_pm_ops as3722_rtc_pm_ops = {
static struct platform_driver as3722_rtc_driver = {
.probe = as3722_rtc_probe,
- .remove = as3722_rtc_remove,
.driver = {
.name = "as3722-rtc",
.pm = &as3722_rtc_pm_ops,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index a2325bc5e497..cae212f30d65 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -756,11 +756,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
irq_handler_t rtc_cmos_int_handler;
if (is_hpet_enabled()) {
- int err;
-
rtc_cmos_int_handler = hpet_rtc_interrupt;
- err = hpet_register_irq_handler(cmos_interrupt);
- if (err != 0) {
+ retval = hpet_register_irq_handler(cmos_interrupt);
+ if (retval) {
dev_warn(dev, "hpet_register_irq_handler "
" failed in rtc_init().");
goto cleanup1;
@@ -1175,7 +1173,7 @@ static struct platform_driver cmos_platform_driver = {
.remove = __exit_p(cmos_platform_remove),
.shutdown = cmos_platform_shutdown,
.driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
#ifdef CONFIG_PM
.pm = &cmos_pm_ops,
#endif
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 80f323731ee2..2dd586a19b59 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -787,7 +787,6 @@ static int ds1305_remove(struct spi_device *spi)
cancel_work_sync(&ds1305->work);
}
- spi_set_drvdata(spi, NULL);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 17b73fdc3b6e..5a1f3b2a8f1e 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -13,12 +13,13 @@
*/
#include <linux/bcd.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/rtc.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -215,12 +216,19 @@ static int ds1742_rtc_remove(struct platform_device *pdev)
return 0;
}
+static struct of_device_id __maybe_unused ds1742_rtc_of_match[] = {
+ { .compatible = "maxim,ds1742", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ds1742_rtc_of_match);
+
static struct platform_driver ds1742_rtc_driver = {
.probe = ds1742_rtc_probe,
.remove = ds1742_rtc_remove,
.driver = {
.name = "rtc-ds1742",
.owner = THIS_MODULE,
+ .of_match_table = ds1742_rtc_of_match,
},
};
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
new file mode 100644
index 000000000000..bd628a6f981d
--- /dev/null
+++ b/drivers/rtc/rtc-hym8563.c
@@ -0,0 +1,606 @@
+/*
+ * Haoyu HYM8563 RTC driver
+ *
+ * Copyright (C) 2013 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on rtc-HYM8563
+ * Copyright (C) 2010 ROCKCHIP, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/i2c.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+
+#define HYM8563_CTL1 0x00
+#define HYM8563_CTL1_TEST BIT(7)
+#define HYM8563_CTL1_STOP BIT(5)
+#define HYM8563_CTL1_TESTC BIT(3)
+
+#define HYM8563_CTL2 0x01
+#define HYM8563_CTL2_TI_TP BIT(4)
+#define HYM8563_CTL2_AF BIT(3)
+#define HYM8563_CTL2_TF BIT(2)
+#define HYM8563_CTL2_AIE BIT(1)
+#define HYM8563_CTL2_TIE BIT(0)
+
+#define HYM8563_SEC 0x02
+#define HYM8563_SEC_VL BIT(7)
+#define HYM8563_SEC_MASK 0x7f
+
+#define HYM8563_MIN 0x03
+#define HYM8563_MIN_MASK 0x7f
+
+#define HYM8563_HOUR 0x04
+#define HYM8563_HOUR_MASK 0x3f
+
+#define HYM8563_DAY 0x05
+#define HYM8563_DAY_MASK 0x3f
+
+#define HYM8563_WEEKDAY 0x06
+#define HYM8563_WEEKDAY_MASK 0x07
+
+#define HYM8563_MONTH 0x07
+#define HYM8563_MONTH_CENTURY BIT(7)
+#define HYM8563_MONTH_MASK 0x1f
+
+#define HYM8563_YEAR 0x08
+
+#define HYM8563_ALM_MIN 0x09
+#define HYM8563_ALM_HOUR 0x0a
+#define HYM8563_ALM_DAY 0x0b
+#define HYM8563_ALM_WEEK 0x0c
+
+/* Each alarm check can be disabled by setting this bit in the register */
+#define HYM8563_ALM_BIT_DISABLE BIT(7)
+
+#define HYM8563_CLKOUT 0x0d
+#define HYM8563_CLKOUT_DISABLE BIT(7)
+#define HYM8563_CLKOUT_32768 0
+#define HYM8563_CLKOUT_1024 1
+#define HYM8563_CLKOUT_32 2
+#define HYM8563_CLKOUT_1 3
+#define HYM8563_CLKOUT_MASK 3
+
+#define HYM8563_TMR_CTL 0x0e
+#define HYM8563_TMR_CTL_ENABLE BIT(7)
+#define HYM8563_TMR_CTL_4096 0
+#define HYM8563_TMR_CTL_64 1
+#define HYM8563_TMR_CTL_1 2
+#define HYM8563_TMR_CTL_1_60 3
+#define HYM8563_TMR_CTL_MASK 3
+
+#define HYM8563_TMR_CNT 0x0f
+
+struct hym8563 {
+ struct i2c_client *client;
+ struct rtc_device *rtc;
+ bool valid;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
+};
+
+/*
+ * RTC handling
+ */
+
+static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hym8563 *hym8563 = i2c_get_clientdata(client);
+ u8 buf[7];
+ int ret;
+
+ if (!hym8563->valid) {
+ dev_warn(&client->dev, "no valid clock/calendar values available\n");
+ return -EPERM;
+ }
+
+ ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);
+
+ tm->tm_sec = bcd2bin(buf[0] & HYM8563_SEC_MASK);
+ tm->tm_min = bcd2bin(buf[1] & HYM8563_MIN_MASK);
+ tm->tm_hour = bcd2bin(buf[2] & HYM8563_HOUR_MASK);
+ tm->tm_mday = bcd2bin(buf[3] & HYM8563_DAY_MASK);
+ tm->tm_wday = bcd2bin(buf[4] & HYM8563_WEEKDAY_MASK); /* 0 = Sun */
+ tm->tm_mon = bcd2bin(buf[5] & HYM8563_MONTH_MASK) - 1; /* 0 = Jan */
+ tm->tm_year = bcd2bin(buf[6]) + 100;
+
+ return 0;
+}
+
+static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hym8563 *hym8563 = i2c_get_clientdata(client);
+ u8 buf[7];
+ int ret;
+
+ /* Years >= 2100 are to far in the future, 19XX is to early */
+ if (tm->tm_year < 100 || tm->tm_year >= 200)
+ return -EINVAL;
+
+ buf[0] = bin2bcd(tm->tm_sec);
+ buf[1] = bin2bcd(tm->tm_min);
+ buf[2] = bin2bcd(tm->tm_hour);
+ buf[3] = bin2bcd(tm->tm_mday);
+ buf[4] = bin2bcd(tm->tm_wday);
+ buf[5] = bin2bcd(tm->tm_mon + 1);
+
+ /*
+ * While the HYM8563 has a century flag in the month register,
+ * it does not seem to carry it over a subsequent write/read.
+ * So we'll limit ourself to 100 years, starting at 2000 for now.
+ */
+ buf[6] = tm->tm_year - 100;
+
+ /*
+ * CTL1 only contains TEST-mode bits apart from stop,
+ * so no need to read the value first
+ */
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1,
+ HYM8563_CTL1_STOP);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_i2c_block_data(client, HYM8563_SEC, 7, buf);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, 0);
+ if (ret < 0)
+ return ret;
+
+ hym8563->valid = true;
+
+ return 0;
+}
+
+static int hym8563_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int data;
+
+ data = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
+ if (data < 0)
+ return data;
+
+ if (enabled)
+ data |= HYM8563_CTL2_AIE;
+ else
+ data &= ~HYM8563_CTL2_AIE;
+
+ return i2c_smbus_write_byte_data(client, HYM8563_CTL2, data);
+};
+
+static int hym8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rtc_time *alm_tm = &alm->time;
+ u8 buf[4];
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, HYM8563_ALM_MIN, 4, buf);
+ if (ret < 0)
+ return ret;
+
+ /* The alarm only has a minute accuracy */
+ alm_tm->tm_sec = -1;
+
+ alm_tm->tm_min = (buf[0] & HYM8563_ALM_BIT_DISABLE) ?
+ -1 :
+ bcd2bin(buf[0] & HYM8563_MIN_MASK);
+ alm_tm->tm_hour = (buf[1] & HYM8563_ALM_BIT_DISABLE) ?
+ -1 :
+ bcd2bin(buf[1] & HYM8563_HOUR_MASK);
+ alm_tm->tm_mday = (buf[2] & HYM8563_ALM_BIT_DISABLE) ?
+ -1 :
+ bcd2bin(buf[2] & HYM8563_DAY_MASK);
+ alm_tm->tm_wday = (buf[3] & HYM8563_ALM_BIT_DISABLE) ?
+ -1 :
+ bcd2bin(buf[3] & HYM8563_WEEKDAY_MASK);
+
+ alm_tm->tm_mon = -1;
+ alm_tm->tm_year = -1;
+
+ ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & HYM8563_CTL2_AIE)
+ alm->enabled = 1;
+
+ return 0;
+}
+
+static int hym8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rtc_time *alm_tm = &alm->time;
+ u8 buf[4];
+ int ret;
+
+ /*
+ * The alarm has no seconds so deal with it
+ */
+ if (alm_tm->tm_sec) {
+ alm_tm->tm_sec = 0;
+ alm_tm->tm_min++;
+ if (alm_tm->tm_min >= 60) {
+ alm_tm->tm_min = 0;
+ alm_tm->tm_hour++;
+ if (alm_tm->tm_hour >= 24) {
+ alm_tm->tm_hour = 0;
+ alm_tm->tm_mday++;
+ if (alm_tm->tm_mday > 31)
+ alm_tm->tm_mday = 0;
+ }
+ }
+ }
+
+ ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~HYM8563_CTL2_AIE;
+
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CTL2, ret);
+ if (ret < 0)
+ return ret;
+
+ buf[0] = (alm_tm->tm_min < 60 && alm_tm->tm_min >= 0) ?
+ bin2bcd(alm_tm->tm_min) : HYM8563_ALM_BIT_DISABLE;
+
+ buf[1] = (alm_tm->tm_hour < 24 && alm_tm->tm_hour >= 0) ?
+ bin2bcd(alm_tm->tm_hour) : HYM8563_ALM_BIT_DISABLE;
+
+ buf[2] = (alm_tm->tm_mday <= 31 && alm_tm->tm_mday >= 1) ?
+ bin2bcd(alm_tm->tm_mday) : HYM8563_ALM_BIT_DISABLE;
+
+ buf[3] = (alm_tm->tm_wday < 7 && alm_tm->tm_wday >= 0) ?
+ bin2bcd(alm_tm->tm_wday) : HYM8563_ALM_BIT_DISABLE;
+
+ ret = i2c_smbus_write_i2c_block_data(client, HYM8563_ALM_MIN, 4, buf);
+ if (ret < 0)
+ return ret;
+
+ return hym8563_rtc_alarm_irq_enable(dev, alm->enabled);
+}
+
+static const struct rtc_class_ops hym8563_rtc_ops = {
+ .read_time = hym8563_rtc_read_time,
+ .set_time = hym8563_rtc_set_time,
+ .alarm_irq_enable = hym8563_rtc_alarm_irq_enable,
+ .read_alarm = hym8563_rtc_read_alarm,
+ .set_alarm = hym8563_rtc_set_alarm,
+};
+
+/*
+ * Handling of the clkout
+ */
+
+#ifdef CONFIG_COMMON_CLK
+#define clkout_hw_to_hym8563(_hw) container_of(_hw, struct hym8563, clkout_hw)
+
+static int clkout_rates[] = {
+ 32768,
+ 1024,
+ 32,
+ 1,
+};
+
+static unsigned long hym8563_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
+ struct i2c_client *client = hym8563->client;
+ int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
+
+ if (ret < 0 || ret & HYM8563_CLKOUT_DISABLE)
+ return 0;
+
+ ret &= HYM8563_CLKOUT_MASK;
+ return clkout_rates[ret];
+}
+
+static long hym8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] <= rate)
+ return clkout_rates[i];
+
+ return 0;
+}
+
+static int hym8563_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
+ struct i2c_client *client = hym8563->client;
+ int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
+ int i;
+
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] == rate) {
+ ret &= ~HYM8563_CLKOUT_MASK;
+ ret |= i;
+ return i2c_smbus_write_byte_data(client,
+ HYM8563_CLKOUT, ret);
+ }
+
+ return -EINVAL;
+}
+
+static int hym8563_clkout_control(struct clk_hw *hw, bool enable)
+{
+ struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
+ struct i2c_client *client = hym8563->client;
+ int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
+
+ if (ret < 0)
+ return ret;
+
+ if (enable)
+ ret &= ~HYM8563_CLKOUT_DISABLE;
+ else
+ ret |= HYM8563_CLKOUT_DISABLE;
+
+ return i2c_smbus_write_byte_data(client, HYM8563_CLKOUT, ret);
+}
+
+static int hym8563_clkout_prepare(struct clk_hw *hw)
+{
+ return hym8563_clkout_control(hw, 1);
+}
+
+static void hym8563_clkout_unprepare(struct clk_hw *hw)
+{
+ hym8563_clkout_control(hw, 0);
+}
+
+static int hym8563_clkout_is_prepared(struct clk_hw *hw)
+{
+ struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
+ struct i2c_client *client = hym8563->client;
+ int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
+
+ if (ret < 0)
+ return ret;
+
+ return !(ret & HYM8563_CLKOUT_DISABLE);
+}
+
+static const struct clk_ops hym8563_clkout_ops = {
+ .prepare = hym8563_clkout_prepare,
+ .unprepare = hym8563_clkout_unprepare,
+ .is_prepared = hym8563_clkout_is_prepared,
+ .recalc_rate = hym8563_clkout_recalc_rate,
+ .round_rate = hym8563_clkout_round_rate,
+ .set_rate = hym8563_clkout_set_rate,
+};
+
+static struct clk *hym8563_clkout_register_clk(struct hym8563 *hym8563)
+{
+ struct i2c_client *client = hym8563->client;
+ struct device_node *node = client->dev.of_node;
+ struct clk *clk;
+ struct clk_init_data init;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CLKOUT,
+ HYM8563_CLKOUT_DISABLE);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ init.name = "hym8563-clkout";
+ init.ops = &hym8563_clkout_ops;
+ init.flags = CLK_IS_ROOT;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ hym8563->clkout_hw.init = &init;
+
+ /* register the clock */
+ clk = clk_register(&client->dev, &hym8563->clkout_hw);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return clk;
+}
+#endif
+
+/*
+ * The alarm interrupt is implemented as a level-low interrupt in the
+ * hym8563, while the timer interrupt uses a falling edge.
+ * We don't use the timer at all, so the interrupt is requested to
+ * use the level-low trigger.
+ */
+static irqreturn_t hym8563_irq(int irq, void *dev_id)
+{
+ struct hym8563 *hym8563 = (struct hym8563 *)dev_id;
+ struct i2c_client *client = hym8563->client;
+ struct mutex *lock = &hym8563->rtc->ops_lock;
+ int data, ret;
+
+ mutex_lock(lock);
+
+ /* Clear the alarm flag */
+
+ data = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
+ if (data < 0) {
+ dev_err(&client->dev, "%s: error reading i2c data %d\n",
+ __func__, data);
+ goto out;
+ }
+
+ data &= ~HYM8563_CTL2_AF;
+
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CTL2, data);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: error writing i2c data %d\n",
+ __func__, ret);
+ }
+
+out:
+ mutex_unlock(lock);
+ return IRQ_HANDLED;
+}
+
+static int hym8563_init_device(struct i2c_client *client)
+{
+ int ret;
+
+ /* Clear stop flag if present */
+ ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
+ if (ret < 0)
+ return ret;
+
+ /* Disable alarm and timer interrupts */
+ ret &= ~HYM8563_CTL2_AIE;
+ ret &= ~HYM8563_CTL2_TIE;
+
+ /* Clear any pending alarm and timer flags */
+ if (ret & HYM8563_CTL2_AF)
+ ret &= ~HYM8563_CTL2_AF;
+
+ if (ret & HYM8563_CTL2_TF)
+ ret &= ~HYM8563_CTL2_TF;
+
+ ret &= ~HYM8563_CTL2_TI_TP;
+
+ return i2c_smbus_write_byte_data(client, HYM8563_CTL2, ret);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hym8563_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+
+ if (device_may_wakeup(dev)) {
+ ret = enable_irq_wake(client->irq);
+ if (ret) {
+ dev_err(dev, "enable_irq_wake failed, %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hym8563_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(hym8563_pm_ops, hym8563_suspend, hym8563_resume);
+
+static int hym8563_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct hym8563 *hym8563;
+ int ret;
+
+ hym8563 = devm_kzalloc(&client->dev, sizeof(*hym8563), GFP_KERNEL);
+ if (!hym8563)
+ return -ENOMEM;
+
+ hym8563->client = client;
+ i2c_set_clientdata(client, hym8563);
+
+ device_set_wakeup_capable(&client->dev, true);
+
+ ret = hym8563_init_device(client);
+ if (ret) {
+ dev_err(&client->dev, "could not init device, %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, hym8563_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->name, hym8563);
+ if (ret < 0) {
+ dev_err(&client->dev, "irq %d request failed, %d\n",
+ client->irq, ret);
+ return ret;
+ }
+
+ /* check state of calendar information */
+ ret = i2c_smbus_read_byte_data(client, HYM8563_SEC);
+ if (ret < 0)
+ return ret;
+
+ hym8563->valid = !(ret & HYM8563_SEC_VL);
+ dev_dbg(&client->dev, "rtc information is %s\n",
+ hym8563->valid ? "valid" : "invalid");
+
+ hym8563->rtc = devm_rtc_device_register(&client->dev, client->name,
+ &hym8563_rtc_ops, THIS_MODULE);
+ if (IS_ERR(hym8563->rtc))
+ return PTR_ERR(hym8563->rtc);
+
+#ifdef CONFIG_COMMON_CLK
+ hym8563_clkout_register_clk(hym8563);
+#endif
+
+ return 0;
+}
+
+static const struct i2c_device_id hym8563_id[] = {
+ { "hym8563", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, hym8563_id);
+
+static struct of_device_id hym8563_dt_idtable[] = {
+ { .compatible = "haoyu,hym8563" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hym8563_dt_idtable);
+
+static struct i2c_driver hym8563_driver = {
+ .driver = {
+ .name = "rtc-hym8563",
+ .owner = THIS_MODULE,
+ .pm = &hym8563_pm_ops,
+ .of_match_table = hym8563_dt_idtable,
+ },
+ .probe = hym8563_probe,
+ .id_table = hym8563_id,
+};
+
+module_i2c_driver(hym8563_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("HYM8563 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
new file mode 100644
index 000000000000..7854a656628f
--- /dev/null
+++ b/drivers/rtc/rtc-isl12057.c
@@ -0,0 +1,310 @@
+/*
+ * rtc-isl12057 - Driver for Intersil ISL12057 I2C Real Time Clock
+ *
+ * Copyright (C) 2013, Arnaud EBALARD <arno@natisbad.org>
+ *
+ * This work is largely based on Intersil ISL1208 driver developed by
+ * Hebert Valerio Riedel <hvr@gnu.org>.
+ *
+ * Detailed datasheet on which this development is based is available here:
+ *
+ * http://natisbad.org/NAS2/refs/ISL12057.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rtc.h>
+#include <linux/i2c.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define DRV_NAME "rtc-isl12057"
+
+/* RTC section */
+#define ISL12057_REG_RTC_SC 0x00 /* Seconds */
+#define ISL12057_REG_RTC_MN 0x01 /* Minutes */
+#define ISL12057_REG_RTC_HR 0x02 /* Hours */
+#define ISL12057_REG_RTC_HR_PM BIT(5) /* AM/PM bit in 12h format */
+#define ISL12057_REG_RTC_HR_MIL BIT(6) /* 24h/12h format */
+#define ISL12057_REG_RTC_DW 0x03 /* Day of the Week */
+#define ISL12057_REG_RTC_DT 0x04 /* Date */
+#define ISL12057_REG_RTC_MO 0x05 /* Month */
+#define ISL12057_REG_RTC_YR 0x06 /* Year */
+#define ISL12057_RTC_SEC_LEN 7
+
+/* Alarm 1 section */
+#define ISL12057_REG_A1_SC 0x07 /* Alarm 1 Seconds */
+#define ISL12057_REG_A1_MN 0x08 /* Alarm 1 Minutes */
+#define ISL12057_REG_A1_HR 0x09 /* Alarm 1 Hours */
+#define ISL12057_REG_A1_HR_PM BIT(5) /* AM/PM bit in 12h format */
+#define ISL12057_REG_A1_HR_MIL BIT(6) /* 24h/12h format */
+#define ISL12057_REG_A1_DWDT 0x0A /* Alarm 1 Date / Day of the week */
+#define ISL12057_REG_A1_DWDT_B BIT(6) /* DW / DT selection bit */
+#define ISL12057_A1_SEC_LEN 4
+
+/* Alarm 2 section */
+#define ISL12057_REG_A2_MN 0x0B /* Alarm 2 Minutes */
+#define ISL12057_REG_A2_HR 0x0C /* Alarm 2 Hours */
+#define ISL12057_REG_A2_DWDT 0x0D /* Alarm 2 Date / Day of the week */
+#define ISL12057_A2_SEC_LEN 3
+
+/* Control/Status registers */
+#define ISL12057_REG_INT 0x0E
+#define ISL12057_REG_INT_A1IE BIT(0) /* Alarm 1 interrupt enable bit */
+#define ISL12057_REG_INT_A2IE BIT(1) /* Alarm 2 interrupt enable bit */
+#define ISL12057_REG_INT_INTCN BIT(2) /* Interrupt control enable bit */
+#define ISL12057_REG_INT_RS1 BIT(3) /* Freq out control bit 1 */
+#define ISL12057_REG_INT_RS2 BIT(4) /* Freq out control bit 2 */
+#define ISL12057_REG_INT_EOSC BIT(7) /* Oscillator enable bit */
+
+#define ISL12057_REG_SR 0x0F
+#define ISL12057_REG_SR_A1F BIT(0) /* Alarm 1 interrupt bit */
+#define ISL12057_REG_SR_A2F BIT(1) /* Alarm 2 interrupt bit */
+#define ISL12057_REG_SR_OSF BIT(7) /* Oscillator failure bit */
+
+/* Register memory map length */
+#define ISL12057_MEM_MAP_LEN 0x10
+
+struct isl12057_rtc_data {
+ struct regmap *regmap;
+ struct mutex lock;
+};
+
+static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs)
+{
+ tm->tm_sec = bcd2bin(regs[ISL12057_REG_RTC_SC]);
+ tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]);
+
+ if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */
+ tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x0f);
+ if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM)
+ tm->tm_hour += 12;
+ } else { /* 24 hour mode */
+ tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x3f);
+ }
+
+ tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]);
+ tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */
+ tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO]) - 1; /* starts at 1 */
+ tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100;
+}
+
+static int isl12057_rtc_tm_to_regs(u8 *regs, struct rtc_time *tm)
+{
+ /*
+ * The clock has an 8 bit wide bcd-coded register for the year.
+ * tm_year is an offset from 1900 and we are interested in the
+ * 2000-2099 range, so any value less than 100 is invalid.
+ */
+ if (tm->tm_year < 100)
+ return -EINVAL;
+
+ regs[ISL12057_REG_RTC_SC] = bin2bcd(tm->tm_sec);
+ regs[ISL12057_REG_RTC_MN] = bin2bcd(tm->tm_min);
+ regs[ISL12057_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */
+ regs[ISL12057_REG_RTC_DT] = bin2bcd(tm->tm_mday);
+ regs[ISL12057_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1);
+ regs[ISL12057_REG_RTC_YR] = bin2bcd(tm->tm_year - 100);
+ regs[ISL12057_REG_RTC_DW] = bin2bcd(tm->tm_wday + 1);
+
+ return 0;
+}
+
+/*
+ * Try and match register bits w/ fixed null values to see whether we
+ * are dealing with an ISL12057. Note: this function is called early
+ * during init and hence does need mutex protection.
+ */
+static int isl12057_i2c_validate_chip(struct regmap *regmap)
+{
+ u8 regs[ISL12057_MEM_MAP_LEN];
+ static const u8 mask[ISL12057_MEM_MAP_LEN] = { 0x80, 0x80, 0x80, 0xf8,
+ 0xc0, 0x60, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x60, 0x7c };
+ int ret, i;
+
+ ret = regmap_bulk_read(regmap, 0, regs, ISL12057_MEM_MAP_LEN);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ISL12057_MEM_MAP_LEN; ++i) {
+ if (regs[i] & mask[i]) /* check if bits are cleared */
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct isl12057_rtc_data *data = dev_get_drvdata(dev);
+ u8 regs[ISL12057_RTC_SEC_LEN];
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_bulk_read(data->regmap, ISL12057_REG_RTC_SC, regs,
+ ISL12057_RTC_SEC_LEN);
+ mutex_unlock(&data->lock);
+
+ if (ret) {
+ dev_err(dev, "%s: RTC read failed\n", __func__);
+ return ret;
+ }
+
+ isl12057_rtc_regs_to_tm(tm, regs);
+
+ return rtc_valid_tm(tm);
+}
+
+static int isl12057_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct isl12057_rtc_data *data = dev_get_drvdata(dev);
+ u8 regs[ISL12057_RTC_SEC_LEN];
+ int ret;
+
+ ret = isl12057_rtc_tm_to_regs(regs, tm);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_bulk_write(data->regmap, ISL12057_REG_RTC_SC, regs,
+ ISL12057_RTC_SEC_LEN);
+ mutex_unlock(&data->lock);
+
+ if (ret)
+ dev_err(dev, "%s: RTC write failed\n", __func__);
+
+ return ret;
+}
+
+/*
+ * Check current RTC status and enable/disable what needs to be. Return 0 if
+ * everything went ok and a negative value upon error. Note: this function
+ * is called early during init and hence does need mutex protection.
+ */
+static int isl12057_check_rtc_status(struct device *dev, struct regmap *regmap)
+{
+ int ret;
+
+ /* Enable oscillator if not already running */
+ ret = regmap_update_bits(regmap, ISL12057_REG_INT,
+ ISL12057_REG_INT_EOSC, 0);
+ if (ret < 0) {
+ dev_err(dev, "Unable to enable oscillator\n");
+ return ret;
+ }
+
+ /* Clear oscillator failure bit if needed */
+ ret = regmap_update_bits(regmap, ISL12057_REG_SR,
+ ISL12057_REG_SR_OSF, 0);
+ if (ret < 0) {
+ dev_err(dev, "Unable to clear oscillator failure bit\n");
+ return ret;
+ }
+
+ /* Clear alarm bit if needed */
+ ret = regmap_update_bits(regmap, ISL12057_REG_SR,
+ ISL12057_REG_SR_A1F, 0);
+ if (ret < 0) {
+ dev_err(dev, "Unable to clear alarm bit\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct rtc_class_ops rtc_ops = {
+ .read_time = isl12057_rtc_read_time,
+ .set_time = isl12057_rtc_set_time,
+};
+
+static struct regmap_config isl12057_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int isl12057_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct isl12057_rtc_data *data;
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENODEV;
+
+ regmap = devm_regmap_init_i2c(client, &isl12057_rtc_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev, "regmap allocation failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = isl12057_i2c_validate_chip(regmap);
+ if (ret)
+ return ret;
+
+ ret = isl12057_check_rtc_status(dev, regmap);
+ if (ret)
+ return ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->lock);
+ data->regmap = regmap;
+ dev_set_drvdata(dev, data);
+
+ rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id isl12057_dt_match[] = {
+ { .compatible = "isl,isl12057" },
+ { },
+};
+#endif
+
+static const struct i2c_device_id isl12057_id[] = {
+ { "isl12057", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, isl12057_id);
+
+static struct i2c_driver isl12057_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(isl12057_dt_match),
+ },
+ .probe = isl12057_probe,
+ .id_table = isl12057_id,
+};
+module_i2c_driver(isl12057_driver);
+
+MODULE_AUTHOR("Arnaud EBALARD <arno@natisbad.org>");
+MODULE_DESCRIPTION("Intersil ISL12057 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max8907.c b/drivers/rtc/rtc-max8907.c
index 8e45b3c4aa2f..3032178bd9e6 100644
--- a/drivers/rtc/rtc-max8907.c
+++ b/drivers/rtc/rtc-max8907.c
@@ -51,7 +51,7 @@ static irqreturn_t max8907_irq_handler(int irq, void *data)
{
struct max8907_rtc *rtc = data;
- regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0);
+ regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0);
rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
@@ -64,7 +64,7 @@ static void regs_to_tm(u8 *regs, struct rtc_time *tm)
bcd2bin(regs[RTC_YEAR1]) - 1900;
tm->tm_mon = bcd2bin(regs[RTC_MONTH] & 0x1f) - 1;
tm->tm_mday = bcd2bin(regs[RTC_DATE] & 0x3f);
- tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07) - 1;
+ tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07);
if (regs[RTC_HOUR] & HOUR_12) {
tm->tm_hour = bcd2bin(regs[RTC_HOUR] & 0x01f);
if (tm->tm_hour == 12)
@@ -88,7 +88,7 @@ static void tm_to_regs(struct rtc_time *tm, u8 *regs)
regs[RTC_YEAR1] = bin2bcd(low);
regs[RTC_MONTH] = bin2bcd(tm->tm_mon + 1);
regs[RTC_DATE] = bin2bcd(tm->tm_mday);
- regs[RTC_WEEKDAY] = tm->tm_wday + 1;
+ regs[RTC_WEEKDAY] = tm->tm_wday;
regs[RTC_HOUR] = bin2bcd(tm->tm_hour);
regs[RTC_MIN] = bin2bcd(tm->tm_min);
regs[RTC_SEC] = bin2bcd(tm->tm_sec);
@@ -153,7 +153,7 @@ static int max8907_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
tm_to_regs(&alrm->time, regs);
/* Disable alarm while we update the target time */
- ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0);
+ ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0);
if (ret < 0)
return ret;
@@ -163,8 +163,7 @@ static int max8907_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
if (alrm->enabled)
- ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL,
- 0x7f, 0x7f);
+ ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x77);
return ret;
}
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 50c572645546..419874fefa4b 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -391,11 +391,13 @@ static int mxc_rtc_probe(struct platform_device *pdev)
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
dev_err(&pdev->dev, "unable to get clock!\n");
- ret = PTR_ERR(pdata->clk);
- goto exit_free_pdata;
+ return PTR_ERR(pdata->clk);
}
- clk_prepare_enable(pdata->clk);
+ ret = clk_prepare_enable(pdata->clk);
+ if (ret)
+ return ret;
+
rate = clk_get_rate(pdata->clk);
if (rate == 32768)
@@ -447,8 +449,6 @@ static int mxc_rtc_probe(struct platform_device *pdev)
exit_put_clk:
clk_disable_unprepare(pdata->clk);
-exit_free_pdata:
-
return ret;
}
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 1ee514a3972c..9bd842e97749 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -197,10 +197,7 @@ static int pcf2127_probe(struct i2c_client *client,
pcf2127_driver.driver.name,
&pcf2127_rtc_ops, THIS_MODULE);
- if (IS_ERR(pcf2127->rtc))
- return PTR_ERR(pcf2127->rtc);
-
- return 0;
+ return PTR_ERR_OR_ZERO(pcf2127->rtc);
}
static const struct i2c_device_id pcf2127_id[] = {
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index 00b0eb7fe166..de8d9c427782 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -52,8 +52,45 @@
#define RX8581_CTRL_STOP 0x02 /* STOP bit */
#define RX8581_CTRL_RESET 0x01 /* RESET bit */
+struct rx8581 {
+ struct i2c_client *client;
+ struct rtc_device *rtc;
+ s32 (*read_block_data)(const struct i2c_client *client, u8 command,
+ u8 length, u8 *values);
+ s32 (*write_block_data)(const struct i2c_client *client, u8 command,
+ u8 length, const u8 *values);
+};
+
static struct i2c_driver rx8581_driver;
+static int rx8581_read_block_data(const struct i2c_client *client, u8 command,
+ u8 length, u8 *values)
+{
+ s32 i, data;
+
+ for (i = 0; i < length; i++) {
+ data = i2c_smbus_read_byte_data(client, command + i);
+ if (data < 0)
+ return data;
+ values[i] = data;
+ }
+ return i;
+}
+
+static int rx8581_write_block_data(const struct i2c_client *client, u8 command,
+ u8 length, const u8 *values)
+{
+ s32 i, ret;
+
+ for (i = 0; i < length; i++) {
+ ret = i2c_smbus_write_byte_data(client, command + i,
+ values[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return length;
+}
+
/*
* In the routines that deal directly with the rx8581 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
@@ -62,6 +99,7 @@ static int rx8581_get_datetime(struct i2c_client *client, struct rtc_time *tm)
{
unsigned char date[7];
int data, err;
+ struct rx8581 *rx8581 = i2c_get_clientdata(client);
/* First we ensure that the "update flag" is not set, we read the
* time and date then re-read the "update flag". If the update flag
@@ -80,14 +118,13 @@ static int rx8581_get_datetime(struct i2c_client *client, struct rtc_time *tm)
err = i2c_smbus_write_byte_data(client,
RX8581_REG_FLAG, (data & ~RX8581_FLAG_UF));
if (err != 0) {
- dev_err(&client->dev, "Unable to write device "
- "flags\n");
+ dev_err(&client->dev, "Unable to write device flags\n");
return -EIO;
}
}
/* Now read time and date */
- err = i2c_smbus_read_i2c_block_data(client, RX8581_REG_SC,
+ err = rx8581->read_block_data(client, RX8581_REG_SC,
7, date);
if (err < 0) {
dev_err(&client->dev, "Unable to read date\n");
@@ -140,6 +177,7 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
int data, err;
unsigned char buf[7];
+ struct rx8581 *rx8581 = i2c_get_clientdata(client);
dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -176,7 +214,7 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
}
/* write register's data */
- err = i2c_smbus_write_i2c_block_data(client, RX8581_REG_SC, 7, buf);
+ err = rx8581->write_block_data(client, RX8581_REG_SC, 7, buf);
if (err < 0) {
dev_err(&client->dev, "Unable to write to date registers\n");
return -EIO;
@@ -231,22 +269,39 @@ static const struct rtc_class_ops rx8581_rtc_ops = {
static int rx8581_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct rtc_device *rtc;
+ struct rx8581 *rx8581;
dev_dbg(&client->dev, "%s\n", __func__);
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- return -ENODEV;
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)
+ && !i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EIO;
- dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
+ rx8581 = devm_kzalloc(&client->dev, sizeof(struct rx8581), GFP_KERNEL);
+ if (!rx8581)
+ return -ENOMEM;
- rtc = devm_rtc_device_register(&client->dev, rx8581_driver.driver.name,
- &rx8581_rtc_ops, THIS_MODULE);
+ i2c_set_clientdata(client, rx8581);
+ rx8581->client = client;
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ rx8581->read_block_data = i2c_smbus_read_i2c_block_data;
+ rx8581->write_block_data = i2c_smbus_write_i2c_block_data;
+ } else {
+ rx8581->read_block_data = rx8581_read_block_data;
+ rx8581->write_block_data = rx8581_write_block_data;
+ }
- i2c_set_clientdata(client, rtc);
+ dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
+
+ rx8581->rtc = devm_rtc_device_register(&client->dev,
+ rx8581_driver.driver.name, &rx8581_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rx8581->rtc)) {
+ dev_err(&client->dev,
+ "unable to register the class device\n");
+ return PTR_ERR(rx8581->rtc);
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7afd373b9595..c4cde9c08f1f 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -580,10 +580,12 @@ static int s3c_rtc_suspend(struct device *dev)
clk_enable(rtc_clk);
/* save TICNT for anyone using periodic interrupts */
- ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt_en_save &= S3C64XX_RTCCON_TICEN;
+ ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT);
+ } else {
+ ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
}
s3c_rtc_enable(pdev, 0);
@@ -605,10 +607,15 @@ static int s3c_rtc_resume(struct device *dev)
clk_enable(rtc_clk);
s3c_rtc_enable(pdev, 1);
- writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
- if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
- tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
- writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
+ writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
+ if (ticnt_en_save) {
+ tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
+ writew(tmp | ticnt_en_save,
+ s3c_rtc_base + S3C2410_RTCCON);
+ }
+ } else {
+ writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
}
if (device_may_wakeup(dev) && wake_en) {
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index ae8119dc2846..476af93543f6 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -639,6 +639,7 @@ static void s5m_rtc_shutdown(struct platform_device *pdev)
s5m_rtc_enable_smpl(info, false);
}
+#ifdef CONFIG_PM_SLEEP
static int s5m_rtc_resume(struct device *dev)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
@@ -660,6 +661,7 @@ static int s5m_rtc_suspend(struct device *dev)
return ret;
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
new file mode 100644
index 000000000000..68a35284e5ad
--- /dev/null
+++ b/drivers/rtc/rtc-sunxi.c
@@ -0,0 +1,523 @@
+/*
+ * An RTC driver for Allwinner A10/A20
+ *
+ * Copyright (c) 2013, Carlo Caione <carlo.caione@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/types.h>
+
+#define SUNXI_LOSC_CTRL 0x0000
+#define SUNXI_LOSC_CTRL_RTC_HMS_ACC BIT(8)
+#define SUNXI_LOSC_CTRL_RTC_YMD_ACC BIT(7)
+
+#define SUNXI_RTC_YMD 0x0004
+
+#define SUNXI_RTC_HMS 0x0008
+
+#define SUNXI_ALRM_DHMS 0x000c
+
+#define SUNXI_ALRM_EN 0x0014
+#define SUNXI_ALRM_EN_CNT_EN BIT(8)
+
+#define SUNXI_ALRM_IRQ_EN 0x0018
+#define SUNXI_ALRM_IRQ_EN_CNT_IRQ_EN BIT(0)
+
+#define SUNXI_ALRM_IRQ_STA 0x001c
+#define SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND BIT(0)
+
+#define SUNXI_MASK_DH 0x0000001f
+#define SUNXI_MASK_SM 0x0000003f
+#define SUNXI_MASK_M 0x0000000f
+#define SUNXI_MASK_LY 0x00000001
+#define SUNXI_MASK_D 0x00000ffe
+#define SUNXI_MASK_M 0x0000000f
+
+#define SUNXI_GET(x, mask, shift) (((x) & ((mask) << (shift))) \
+ >> (shift))
+
+#define SUNXI_SET(x, mask, shift) (((x) & (mask)) << (shift))
+
+/*
+ * Get date values
+ */
+#define SUNXI_DATE_GET_DAY_VALUE(x) SUNXI_GET(x, SUNXI_MASK_DH, 0)
+#define SUNXI_DATE_GET_MON_VALUE(x) SUNXI_GET(x, SUNXI_MASK_M, 8)
+#define SUNXI_DATE_GET_YEAR_VALUE(x, mask) SUNXI_GET(x, mask, 16)
+
+/*
+ * Get time values
+ */
+#define SUNXI_TIME_GET_SEC_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 0)
+#define SUNXI_TIME_GET_MIN_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 8)
+#define SUNXI_TIME_GET_HOUR_VALUE(x) SUNXI_GET(x, SUNXI_MASK_DH, 16)
+
+/*
+ * Get alarm values
+ */
+#define SUNXI_ALRM_GET_SEC_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 0)
+#define SUNXI_ALRM_GET_MIN_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 8)
+#define SUNXI_ALRM_GET_HOUR_VALUE(x) SUNXI_GET(x, SUNXI_MASK_DH, 16)
+
+/*
+ * Set date values
+ */
+#define SUNXI_DATE_SET_DAY_VALUE(x) SUNXI_DATE_GET_DAY_VALUE(x)
+#define SUNXI_DATE_SET_MON_VALUE(x) SUNXI_SET(x, SUNXI_MASK_M, 8)
+#define SUNXI_DATE_SET_YEAR_VALUE(x, mask) SUNXI_SET(x, mask, 16)
+#define SUNXI_LEAP_SET_VALUE(x, shift) SUNXI_SET(x, SUNXI_MASK_LY, shift)
+
+/*
+ * Set time values
+ */
+#define SUNXI_TIME_SET_SEC_VALUE(x) SUNXI_TIME_GET_SEC_VALUE(x)
+#define SUNXI_TIME_SET_MIN_VALUE(x) SUNXI_SET(x, SUNXI_MASK_SM, 8)
+#define SUNXI_TIME_SET_HOUR_VALUE(x) SUNXI_SET(x, SUNXI_MASK_DH, 16)
+
+/*
+ * Set alarm values
+ */
+#define SUNXI_ALRM_SET_SEC_VALUE(x) SUNXI_ALRM_GET_SEC_VALUE(x)
+#define SUNXI_ALRM_SET_MIN_VALUE(x) SUNXI_SET(x, SUNXI_MASK_SM, 8)
+#define SUNXI_ALRM_SET_HOUR_VALUE(x) SUNXI_SET(x, SUNXI_MASK_DH, 16)
+#define SUNXI_ALRM_SET_DAY_VALUE(x) SUNXI_SET(x, SUNXI_MASK_D, 21)
+
+/*
+ * Time unit conversions
+ */
+#define SEC_IN_MIN 60
+#define SEC_IN_HOUR (60 * SEC_IN_MIN)
+#define SEC_IN_DAY (24 * SEC_IN_HOUR)
+
+/*
+ * The year parameter passed to the driver is usually an offset relative to
+ * the year 1900. This macro is used to convert this offset to another one
+ * relative to the minimum year allowed by the hardware.
+ */
+#define SUNXI_YEAR_OFF(x) ((x)->min - 1900)
+
+/*
+ * min and max year are arbitrary set considering the limited range of the
+ * hardware register field
+ */
+struct sunxi_rtc_data_year {
+ unsigned int min; /* min year allowed */
+ unsigned int max; /* max year allowed */
+ unsigned int mask; /* mask for the year field */
+ unsigned char leap_shift; /* bit shift to get the leap year */
+};
+
+static struct sunxi_rtc_data_year data_year_param[] = {
+ [0] = {
+ .min = 2010,
+ .max = 2073,
+ .mask = 0x3f,
+ .leap_shift = 22,
+ },
+ [1] = {
+ .min = 1970,
+ .max = 2225,
+ .mask = 0xff,
+ .leap_shift = 24,
+ },
+};
+
+struct sunxi_rtc_dev {
+ struct rtc_device *rtc;
+ struct device *dev;
+ struct sunxi_rtc_data_year *data_year;
+ void __iomem *base;
+ int irq;
+};
+
+static irqreturn_t sunxi_rtc_alarmirq(int irq, void *id)
+{
+ struct sunxi_rtc_dev *chip = (struct sunxi_rtc_dev *) id;
+ u32 val;
+
+ val = readl(chip->base + SUNXI_ALRM_IRQ_STA);
+
+ if (val & SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND) {
+ val |= SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND;
+ writel(val, chip->base + SUNXI_ALRM_IRQ_STA);
+
+ rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void sunxi_rtc_setaie(int to, struct sunxi_rtc_dev *chip)
+{
+ u32 alrm_val = 0;
+ u32 alrm_irq_val = 0;
+
+ if (to) {
+ alrm_val = readl(chip->base + SUNXI_ALRM_EN);
+ alrm_val |= SUNXI_ALRM_EN_CNT_EN;
+
+ alrm_irq_val = readl(chip->base + SUNXI_ALRM_IRQ_EN);
+ alrm_irq_val |= SUNXI_ALRM_IRQ_EN_CNT_IRQ_EN;
+ } else {
+ writel(SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND,
+ chip->base + SUNXI_ALRM_IRQ_STA);
+ }
+
+ writel(alrm_val, chip->base + SUNXI_ALRM_EN);
+ writel(alrm_irq_val, chip->base + SUNXI_ALRM_IRQ_EN);
+}
+
+static int sunxi_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+ u32 alrm;
+ u32 alrm_en;
+ u32 date;
+
+ alrm = readl(chip->base + SUNXI_ALRM_DHMS);
+ date = readl(chip->base + SUNXI_RTC_YMD);
+
+ alrm_tm->tm_sec = SUNXI_ALRM_GET_SEC_VALUE(alrm);
+ alrm_tm->tm_min = SUNXI_ALRM_GET_MIN_VALUE(alrm);
+ alrm_tm->tm_hour = SUNXI_ALRM_GET_HOUR_VALUE(alrm);
+
+ alrm_tm->tm_mday = SUNXI_DATE_GET_DAY_VALUE(date);
+ alrm_tm->tm_mon = SUNXI_DATE_GET_MON_VALUE(date);
+ alrm_tm->tm_year = SUNXI_DATE_GET_YEAR_VALUE(date,
+ chip->data_year->mask);
+
+ alrm_tm->tm_mon -= 1;
+
+ /*
+ * switch from (data_year->min)-relative offset to
+ * a (1900)-relative one
+ */
+ alrm_tm->tm_year += SUNXI_YEAR_OFF(chip->data_year);
+
+ alrm_en = readl(chip->base + SUNXI_ALRM_IRQ_EN);
+ if (alrm_en & SUNXI_ALRM_EN_CNT_EN)
+ wkalrm->enabled = 1;
+
+ return 0;
+}
+
+static int sunxi_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
+ u32 date, time;
+
+ /*
+ * read again in case it changes
+ */
+ do {
+ date = readl(chip->base + SUNXI_RTC_YMD);
+ time = readl(chip->base + SUNXI_RTC_HMS);
+ } while ((date != readl(chip->base + SUNXI_RTC_YMD)) ||
+ (time != readl(chip->base + SUNXI_RTC_HMS)));
+
+ rtc_tm->tm_sec = SUNXI_TIME_GET_SEC_VALUE(time);
+ rtc_tm->tm_min = SUNXI_TIME_GET_MIN_VALUE(time);
+ rtc_tm->tm_hour = SUNXI_TIME_GET_HOUR_VALUE(time);
+
+ rtc_tm->tm_mday = SUNXI_DATE_GET_DAY_VALUE(date);
+ rtc_tm->tm_mon = SUNXI_DATE_GET_MON_VALUE(date);
+ rtc_tm->tm_year = SUNXI_DATE_GET_YEAR_VALUE(date,
+ chip->data_year->mask);
+
+ rtc_tm->tm_mon -= 1;
+
+ /*
+ * switch from (data_year->min)-relative offset to
+ * a (1900)-relative one
+ */
+ rtc_tm->tm_year += SUNXI_YEAR_OFF(chip->data_year);
+
+ return rtc_valid_tm(rtc_tm);
+}
+
+static int sunxi_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+ struct rtc_time tm_now;
+ u32 alrm = 0;
+ unsigned long time_now = 0;
+ unsigned long time_set = 0;
+ unsigned long time_gap = 0;
+ unsigned long time_gap_day = 0;
+ unsigned long time_gap_hour = 0;
+ unsigned long time_gap_min = 0;
+ int ret = 0;
+
+ ret = sunxi_rtc_gettime(dev, &tm_now);
+ if (ret < 0) {
+ dev_err(dev, "Error in getting time\n");
+ return -EINVAL;
+ }
+
+ rtc_tm_to_time(alrm_tm, &time_set);
+ rtc_tm_to_time(&tm_now, &time_now);
+ if (time_set <= time_now) {
+ dev_err(dev, "Date to set in the past\n");
+ return -EINVAL;
+ }
+
+ time_gap = time_set - time_now;
+ time_gap_day = time_gap / SEC_IN_DAY;
+ time_gap -= time_gap_day * SEC_IN_DAY;
+ time_gap_hour = time_gap / SEC_IN_HOUR;
+ time_gap -= time_gap_hour * SEC_IN_HOUR;
+ time_gap_min = time_gap / SEC_IN_MIN;
+ time_gap -= time_gap_min * SEC_IN_MIN;
+
+ if (time_gap_day > 255) {
+ dev_err(dev, "Day must be in the range 0 - 255\n");
+ return -EINVAL;
+ }
+
+ sunxi_rtc_setaie(0, chip);
+ writel(0, chip->base + SUNXI_ALRM_DHMS);
+ usleep_range(100, 300);
+
+ alrm = SUNXI_ALRM_SET_SEC_VALUE(time_gap) |
+ SUNXI_ALRM_SET_MIN_VALUE(time_gap_min) |
+ SUNXI_ALRM_SET_HOUR_VALUE(time_gap_hour) |
+ SUNXI_ALRM_SET_DAY_VALUE(time_gap_day);
+ writel(alrm, chip->base + SUNXI_ALRM_DHMS);
+
+ writel(0, chip->base + SUNXI_ALRM_IRQ_EN);
+ writel(SUNXI_ALRM_IRQ_EN_CNT_IRQ_EN, chip->base + SUNXI_ALRM_IRQ_EN);
+
+ sunxi_rtc_setaie(wkalrm->enabled, chip);
+
+ return 0;
+}
+
+static int sunxi_rtc_wait(struct sunxi_rtc_dev *chip, int offset,
+ unsigned int mask, unsigned int ms_timeout)
+{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(ms_timeout);
+ u32 reg;
+
+ do {
+ reg = readl(chip->base + offset);
+ reg &= mask;
+
+ if (reg == mask)
+ return 0;
+
+ } while (time_before(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int sunxi_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
+ u32 date = 0;
+ u32 time = 0;
+ int year;
+
+ /*
+ * the input rtc_tm->tm_year is the offset relative to 1900. We use
+ * the SUNXI_YEAR_OFF macro to rebase it with respect to the min year
+ * allowed by the hardware
+ */
+
+ year = rtc_tm->tm_year + 1900;
+ if (year < chip->data_year->min || year > chip->data_year->max) {
+ dev_err(dev, "rtc only supports year in range %d - %d\n",
+ chip->data_year->min, chip->data_year->max);
+ return -EINVAL;
+ }
+
+ rtc_tm->tm_year -= SUNXI_YEAR_OFF(chip->data_year);
+ rtc_tm->tm_mon += 1;
+
+ date = SUNXI_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
+ SUNXI_DATE_SET_MON_VALUE(rtc_tm->tm_mon) |
+ SUNXI_DATE_SET_YEAR_VALUE(rtc_tm->tm_year,
+ chip->data_year->mask);
+
+ if (is_leap_year(year))
+ date |= SUNXI_LEAP_SET_VALUE(1, chip->data_year->leap_shift);
+
+ time = SUNXI_TIME_SET_SEC_VALUE(rtc_tm->tm_sec) |
+ SUNXI_TIME_SET_MIN_VALUE(rtc_tm->tm_min) |
+ SUNXI_TIME_SET_HOUR_VALUE(rtc_tm->tm_hour);
+
+ writel(0, chip->base + SUNXI_RTC_HMS);
+ writel(0, chip->base + SUNXI_RTC_YMD);
+
+ writel(time, chip->base + SUNXI_RTC_HMS);
+
+ /*
+ * After writing the RTC HH-MM-SS register, the
+ * SUNXI_LOSC_CTRL_RTC_HMS_ACC bit is set and it will not
+ * be cleared until the real writing operation is finished
+ */
+
+ if (sunxi_rtc_wait(chip, SUNXI_LOSC_CTRL,
+ SUNXI_LOSC_CTRL_RTC_HMS_ACC, 50)) {
+ dev_err(dev, "Failed to set rtc time.\n");
+ return -1;
+ }
+
+ writel(date, chip->base + SUNXI_RTC_YMD);
+
+ /*
+ * After writing the RTC YY-MM-DD register, the
+ * SUNXI_LOSC_CTRL_RTC_YMD_ACC bit is set and it will not
+ * be cleared until the real writing operation is finished
+ */
+
+ if (sunxi_rtc_wait(chip, SUNXI_LOSC_CTRL,
+ SUNXI_LOSC_CTRL_RTC_YMD_ACC, 50)) {
+ dev_err(dev, "Failed to set rtc time.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int sunxi_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
+
+ if (!enabled)
+ sunxi_rtc_setaie(enabled, chip);
+
+ return 0;
+}
+
+static const struct rtc_class_ops sunxi_rtc_ops = {
+ .read_time = sunxi_rtc_gettime,
+ .set_time = sunxi_rtc_settime,
+ .read_alarm = sunxi_rtc_getalarm,
+ .set_alarm = sunxi_rtc_setalarm,
+ .alarm_irq_enable = sunxi_rtc_alarm_irq_enable
+};
+
+static const struct of_device_id sunxi_rtc_dt_ids[] = {
+ { .compatible = "allwinner,sun4i-rtc", .data = &data_year_param[0] },
+ { .compatible = "allwinner,sun7i-a20-rtc", .data = &data_year_param[1] },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sunxi_rtc_dt_ids);
+
+static int sunxi_rtc_probe(struct platform_device *pdev)
+{
+ struct sunxi_rtc_dev *chip;
+ struct resource *res;
+ const struct of_device_id *of_id;
+ int ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, chip);
+ chip->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chip->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+
+ chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return chip->irq;
+ }
+ ret = devm_request_irq(&pdev->dev, chip->irq, sunxi_rtc_alarmirq,
+ 0, dev_name(&pdev->dev), chip);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request IRQ\n");
+ return ret;
+ }
+
+ of_id = of_match_device(sunxi_rtc_dt_ids, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "Unable to setup RTC data\n");
+ return -ENODEV;
+ }
+ chip->data_year = (struct sunxi_rtc_data_year *) of_id->data;
+
+ /* clear the alarm count value */
+ writel(0, chip->base + SUNXI_ALRM_DHMS);
+
+ /* disable alarm, not generate irq pending */
+ writel(0, chip->base + SUNXI_ALRM_EN);
+
+ /* disable alarm week/cnt irq, unset to cpu */
+ writel(0, chip->base + SUNXI_ALRM_IRQ_EN);
+
+ /* clear alarm week/cnt irq pending */
+ writel(SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND, chip->base +
+ SUNXI_ALRM_IRQ_STA);
+
+ chip->rtc = rtc_device_register("rtc-sunxi", &pdev->dev,
+ &sunxi_rtc_ops, THIS_MODULE);
+ if (IS_ERR(chip->rtc)) {
+ dev_err(&pdev->dev, "unable to register device\n");
+ return PTR_ERR(chip->rtc);
+ }
+
+ dev_info(&pdev->dev, "RTC enabled\n");
+
+ return 0;
+}
+
+static int sunxi_rtc_remove(struct platform_device *pdev)
+{
+ struct sunxi_rtc_dev *chip = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(chip->rtc);
+
+ return 0;
+}
+
+static struct platform_driver sunxi_rtc_driver = {
+ .probe = sunxi_rtc_probe,
+ .remove = sunxi_rtc_remove,
+ .driver = {
+ .name = "sunxi-rtc",
+ .owner = THIS_MODULE,
+ .of_match_table = sunxi_rtc_dt_ids,
+ },
+};
+
+module_platform_driver(sunxi_rtc_driver);
+
+MODULE_DESCRIPTION("sunxi RTC driver");
+MODULE_AUTHOR("Carlo Caione <carlo.caione@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index c2e80d7ca5e2..1915464e4cd6 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -479,7 +479,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
u8 rd_reg;
if (irq <= 0)
- goto out1;
+ return ret;
/* Initialize the register map */
if (twl_class_is_4030())
@@ -489,7 +489,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
- goto out1;
+ return ret;
if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M)
dev_warn(&pdev->dev, "Power up reset detected.\n");
@@ -500,7 +500,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
/* Clear RTC Power up reset and pending alarm interrupts */
ret = twl_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
- goto out1;
+ return ret;
if (twl_class_is_6030()) {
twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
@@ -512,7 +512,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Enabling TWL-RTC\n");
ret = twl_rtc_write_u8(BIT_RTC_CTRL_REG_STOP_RTC_M, REG_RTC_CTRL_REG);
if (ret < 0)
- goto out1;
+ return ret;
/* ensure interrupts are disabled, bootloaders can be strange */
ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG);
@@ -522,34 +522,29 @@ static int twl_rtc_probe(struct platform_device *pdev)
/* init cached IRQ enable bits */
ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
- goto out1;
+ return ret;
device_init_wakeup(&pdev->dev, 1);
- rtc = rtc_device_register(pdev->name,
- &pdev->dev, &twl_rtc_ops, THIS_MODULE);
+ rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &twl_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
PTR_ERR(rtc));
- goto out1;
+ return PTR_ERR(rtc);
}
- ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- dev_name(&rtc->dev), rtc);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ twl_rtc_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ dev_name(&rtc->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
- goto out2;
+ return ret;
}
platform_set_drvdata(pdev, rtc);
return 0;
-
-out2:
- rtc_device_unregister(rtc);
-out1:
- return ret;
}
/*
@@ -559,9 +554,6 @@ out1:
static int twl_rtc_remove(struct platform_device *pdev)
{
/* leave rtc running, but disable irqs */
- struct rtc_device *rtc = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
if (twl_class_is_6030()) {
@@ -571,10 +563,6 @@ static int twl_rtc_remove(struct platform_device *pdev)
REG_INT_MSK_STS_A);
}
-
- free_irq(irq, rtc);
-
- rtc_device_unregister(rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index aabc22c587fb..88c9c92e89fd 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -293,7 +293,7 @@ static int rtc_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- rtc1_base = ioremap(res->start, resource_size(res));
+ rtc1_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!rtc1_base)
return -EBUSY;
@@ -303,13 +303,14 @@ static int rtc_probe(struct platform_device *pdev)
goto err_rtc1_iounmap;
}
- rtc2_base = ioremap(res->start, resource_size(res));
+ rtc2_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!rtc2_base) {
retval = -EBUSY;
goto err_rtc1_iounmap;
}
- rtc = rtc_device_register(rtc_name, &pdev->dev, &vr41xx_rtc_ops, THIS_MODULE);
+ rtc = devm_rtc_device_register(&pdev->dev, rtc_name, &vr41xx_rtc_ops,
+ THIS_MODULE);
if (IS_ERR(rtc)) {
retval = PTR_ERR(rtc);
goto err_iounmap_all;
@@ -330,24 +331,24 @@ static int rtc_probe(struct platform_device *pdev)
aie_irq = platform_get_irq(pdev, 0);
if (aie_irq <= 0) {
retval = -EBUSY;
- goto err_device_unregister;
+ goto err_iounmap_all;
}
- retval = request_irq(aie_irq, elapsedtime_interrupt, 0,
- "elapsed_time", pdev);
+ retval = devm_request_irq(&pdev->dev, aie_irq, elapsedtime_interrupt, 0,
+ "elapsed_time", pdev);
if (retval < 0)
- goto err_device_unregister;
+ goto err_iounmap_all;
pie_irq = platform_get_irq(pdev, 1);
if (pie_irq <= 0) {
retval = -EBUSY;
- goto err_free_irq;
+ goto err_iounmap_all;
}
- retval = request_irq(pie_irq, rtclong1_interrupt, 0,
- "rtclong1", pdev);
+ retval = devm_request_irq(&pdev->dev, pie_irq, rtclong1_interrupt, 0,
+ "rtclong1", pdev);
if (retval < 0)
- goto err_free_irq;
+ goto err_iounmap_all;
platform_set_drvdata(pdev, rtc);
@@ -358,47 +359,20 @@ static int rtc_probe(struct platform_device *pdev)
return 0;
-err_free_irq:
- free_irq(aie_irq, pdev);
-
-err_device_unregister:
- rtc_device_unregister(rtc);
-
err_iounmap_all:
- iounmap(rtc2_base);
rtc2_base = NULL;
err_rtc1_iounmap:
- iounmap(rtc1_base);
rtc1_base = NULL;
return retval;
}
-static int rtc_remove(struct platform_device *pdev)
-{
- struct rtc_device *rtc;
-
- rtc = platform_get_drvdata(pdev);
- if (rtc)
- rtc_device_unregister(rtc);
-
- free_irq(aie_irq, pdev);
- free_irq(pie_irq, pdev);
- if (rtc1_base)
- iounmap(rtc1_base);
- if (rtc2_base)
- iounmap(rtc2_base);
-
- return 0;
-}
-
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:RTC");
static struct platform_driver rtc_platform_driver = {
.probe = rtc_probe,
- .remove = rtc_remove,
.driver = {
.name = rtc_name,
.owner = THIS_MODULE,
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 92bd22ce6760..9cbc567698ce 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -504,7 +504,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
struct dasd_diag_req *dreq;
struct dasd_diag_bio *dbio;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst;
unsigned int count, datasize;
sector_t recid, first_rec, last_rec;
@@ -525,10 +525,10 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
/* Check struct bio and count the number of blocks for the request. */
count = 0;
rq_for_each_segment(bv, req, iter) {
- if (bv->bv_len & (blksize - 1))
+ if (bv.bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (block->s2b_shift + 9);
+ count += bv.bv_len >> (block->s2b_shift + 9);
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
@@ -545,8 +545,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
dbio = dreq->bio;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len; off += blksize) {
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ for (off = 0; off < bv.bv_len; off += blksize) {
memset(dbio, 0, sizeof (struct dasd_diag_bio));
dbio->type = rw_cmd;
dbio->block_number = recid + 1;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 95e45782692f..2e8e0755070b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2551,7 +2551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst;
unsigned int off;
int count, cidaw, cplength, datasize;
@@ -2573,13 +2573,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
count = 0;
cidaw = 0;
rq_for_each_segment(bv, req, iter) {
- if (bv->bv_len & (blksize - 1))
+ if (bv.bv_len & (blksize - 1))
/* Eckd can only do full blocks. */
return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (block->s2b_shift + 9);
+ count += bv.bv_len >> (block->s2b_shift + 9);
#if defined(CONFIG_64BIT)
- if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
- cidaw += bv->bv_len >> (block->s2b_shift + 9);
+ if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+ cidaw += bv.bv_len >> (block->s2b_shift + 9);
#endif
}
/* Paranoia. */
@@ -2650,16 +2650,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
last_rec - recid + 1, cmd, basedev, blksize);
}
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
if (copy && rq_data_dir(req) == WRITE)
- memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+ memcpy(copy + bv.bv_offset, dst, bv.bv_len);
if (copy)
- dst = copy + bv->bv_offset;
+ dst = copy + bv.bv_offset;
}
- for (off = 0; off < bv->bv_len; off += blksize) {
+ for (off = 0; off < bv.bv_len; off += blksize) {
sector_t trkid = recid;
unsigned int recoffs = sector_div(trkid, blk_per_trk);
rcmd = cmd;
@@ -2735,7 +2735,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst, *idaw_dst;
unsigned int cidaw, cplength, datasize;
unsigned int tlf;
@@ -2813,8 +2813,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
idaw_dst = NULL;
idaw_len = 0;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- seg_len = bv->bv_len;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
trkid = recid;
@@ -3039,7 +3039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
{
struct dasd_ccw_req *cqr;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst;
unsigned int trkcount, ctidaw;
unsigned char cmd;
@@ -3125,8 +3125,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
new_track = 1;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- seg_len = bv->bv_len;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
trkid = recid;
@@ -3158,9 +3158,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
}
} else {
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
last_tidaw = itcw_add_tidaw(itcw, 0x00,
- dst, bv->bv_len);
+ dst, bv.bv_len);
if (IS_ERR(last_tidaw)) {
ret = -EINVAL;
goto out_error;
@@ -3278,7 +3278,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst;
unsigned char cmd;
unsigned int trkcount;
@@ -3378,8 +3378,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
}
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- seg_len = bv->bv_len;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ seg_len = bv.bv_len;
if (cmd == DASD_ECKD_CCW_READ_TRACK)
memset(dst, 0, seg_len);
if (!len_to_track_end) {
@@ -3424,7 +3424,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
struct dasd_eckd_private *private;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst, *cda;
unsigned int blksize, blk_per_trk, off;
sector_t recid;
@@ -3442,8 +3442,8 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
ccw++;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len; off += blksize) {
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->uses_cdl && recid <= 2*blk_per_trk)
ccw++;
@@ -3454,7 +3454,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
cda = (char *)((addr_t) ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
- memcpy(dst, cda, bv->bv_len);
+ memcpy(dst, cda, bv.bv_len);
kmem_cache_free(dasd_page_cache,
(void *)((addr_t)cda & PAGE_MASK));
}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 9cbc8c32ba59..2c8e68bf9a1c 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -260,7 +260,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst;
int count, cidaw, cplength, datasize;
sector_t recid, first_rec, last_rec;
@@ -283,13 +283,13 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
count = 0;
cidaw = 0;
rq_for_each_segment(bv, req, iter) {
- if (bv->bv_len & (blksize - 1))
+ if (bv.bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (block->s2b_shift + 9);
+ count += bv.bv_len >> (block->s2b_shift + 9);
#if defined(CONFIG_64BIT)
- if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
- cidaw += bv->bv_len / blksize;
+ if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+ cidaw += bv.bv_len / blksize;
#endif
}
/* Paranoia. */
@@ -326,16 +326,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
}
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
+ dst = page_address(bv.bv_page) + bv.bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
if (copy && rq_data_dir(req) == WRITE)
- memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+ memcpy(copy + bv.bv_offset, dst, bv.bv_len);
if (copy)
- dst = copy + bv->bv_offset;
+ dst = copy + bv.bv_offset;
}
- for (off = 0; off < bv->bv_len; off += blksize) {
+ for (off = 0; off < bv.bv_len; off += blksize) {
/* Locate record for stupid devices. */
if (private->rdc_data.mode.bits.data_chain == 0) {
ccw[-1].flags |= CCW_FLAG_CC;
@@ -384,7 +384,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
struct dasd_fba_private *private;
struct ccw1 *ccw;
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
char *dst, *cda;
unsigned int blksize, off;
int status;
@@ -399,8 +399,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
if (private->rdc_data.mode.bits.data_chain != 0)
ccw++;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len; off += blksize) {
+ dst = page_address(bv.bv_page) + bv.bv_offset;
+ for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->rdc_data.mode.bits.data_chain == 0)
ccw++;
@@ -411,7 +411,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
cda = (char *)((addr_t) ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
- memcpy(dst, cda, bv->bv_len);
+ memcpy(dst, cda, bv.bv_len);
kmem_cache_free(dasd_page_cache,
(void *)((addr_t)cda & PAGE_MASK));
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 6eca019bcf30..ee0e85abe1fd 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -304,12 +304,6 @@ dcssblk_load_segment(char *name, struct segment_info **seg_info)
return rc;
}
-static void dcssblk_unregister_callback(struct device *dev)
-{
- device_unregister(dev);
- put_device(dev);
-}
-
/*
* device attribute for switching shared/nonshared (exclusive)
* operation (show + store)
@@ -397,7 +391,13 @@ removeseg:
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
- rc = device_schedule_callback(dev, dcssblk_unregister_callback);
+ up_write(&dcssblk_devices_sem);
+
+ if (device_remove_file_self(dev, attr)) {
+ device_unregister(dev);
+ put_device(dev);
+ }
+ return rc;
out:
up_write(&dcssblk_devices_sem);
return rc;
@@ -808,18 +808,19 @@ static void
dcssblk_make_request(struct request_queue *q, struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
unsigned long index;
unsigned long page_addr;
unsigned long source_addr;
unsigned long bytes_done;
- int i;
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
goto fail;
- if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+ if ((bio->bi_iter.bi_sector & 7) != 0 ||
+ (bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
@@ -842,22 +843,22 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
}
}
- index = (bio->bi_sector >> 3);
- bio_for_each_segment(bvec, bio, i) {
+ index = (bio->bi_iter.bi_sector >> 3);
+ bio_for_each_segment(bvec, bio, iter) {
page_addr = (unsigned long)
- page_address(bvec->bv_page) + bvec->bv_offset;
+ page_address(bvec.bv_page) + bvec.bv_offset;
source_addr = dev_info->start + (index<<12) + bytes_done;
- if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
+ if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
// More paranoia.
goto fail;
if (bio_data_dir(bio) == READ) {
memcpy((void*)page_addr, (void*)source_addr,
- bvec->bv_len);
+ bvec.bv_len);
} else {
memcpy((void*)source_addr, (void*)page_addr,
- bvec->bv_len);
+ bvec.bv_len);
}
- bytes_done += bvec->bv_len;
+ bytes_done += bvec.bv_len;
}
bio_endio(bio, 0);
return;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index d0ab5019d885..76bed1743db1 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -130,7 +130,7 @@ static void scm_request_prepare(struct scm_request *scmrq)
struct aidaw *aidaw = scmrq->aidaw;
struct msb *msb = &scmrq->aob->msb[0];
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
msb->bs = MSB_BS_4K;
scmrq->aob->request.msb_count = 1;
@@ -142,9 +142,9 @@ static void scm_request_prepare(struct scm_request *scmrq)
msb->data_addr = (u64) aidaw;
rq_for_each_segment(bv, scmrq->request, iter) {
- WARN_ON(bv->bv_offset);
- msb->blk_count += bv->bv_len >> 12;
- aidaw->data_addr = (u64) page_address(bv->bv_page);
+ WARN_ON(bv.bv_offset);
+ msb->blk_count += bv.bv_len >> 12;
+ aidaw->data_addr = (u64) page_address(bv.bv_page);
aidaw++;
}
}
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 27f930cd657f..9aae909d47a5 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -122,7 +122,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
struct aidaw *aidaw = scmrq->aidaw;
struct msb *msb = &scmrq->aob->msb[0];
struct req_iterator iter;
- struct bio_vec *bv;
+ struct bio_vec bv;
int i = 0;
u64 addr;
@@ -163,7 +163,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
i++;
}
rq_for_each_segment(bv, req, iter) {
- aidaw->data_addr = (u64) page_address(bv->bv_page);
+ aidaw->data_addr = (u64) page_address(bv.bv_page);
aidaw++;
i++;
}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 464dd29d06c0..6969d39f1e2e 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -184,25 +184,26 @@ static unsigned long xpram_highest_page_index(void)
static void xpram_make_request(struct request_queue *q, struct bio *bio)
{
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
unsigned int index;
unsigned long page_addr;
unsigned long bytes;
- int i;
- if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+ if ((bio->bi_iter.bi_sector & 7) != 0 ||
+ (bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
- if ((bio->bi_size >> 12) > xdev->size)
+ if ((bio->bi_iter.bi_size >> 12) > xdev->size)
/* Request size is no page-aligned. */
goto fail;
- if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
+ if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
goto fail;
- index = (bio->bi_sector >> 3) + xdev->offset;
- bio_for_each_segment(bvec, bio, i) {
+ index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
+ bio_for_each_segment(bvec, bio, iter) {
page_addr = (unsigned long)
- kmap(bvec->bv_page) + bvec->bv_offset;
- bytes = bvec->bv_len;
+ kmap(bvec.bv_page) + bvec.bv_offset;
+ bytes = bvec.bv_len;
if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
/* More paranoia. */
goto fail;
@@ -257,6 +258,7 @@ static int __init xpram_setup_sizes(unsigned long pages)
unsigned long mem_needed;
unsigned long mem_auto;
unsigned long long size;
+ char *sizes_end;
int mem_auto_no;
int i;
@@ -275,8 +277,8 @@ static int __init xpram_setup_sizes(unsigned long pages)
mem_auto_no = 0;
for (i = 0; i < xpram_devs; i++) {
if (sizes[i]) {
- size = simple_strtoull(sizes[i], &sizes[i], 0);
- switch (sizes[i][0]) {
+ size = simple_strtoull(sizes[i], &sizes_end, 0);
+ switch (*sizes_end) {
case 'g':
case 'G':
size <<= 20;
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index eb5d22795c47..5af7f0bd6125 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -922,7 +922,7 @@ static int __init con3215_init(void)
raw3215_freelist = req;
}
- cdev = ccw_device_probe_console();
+ cdev = ccw_device_create_console(&raw3215_ccw_driver);
if (IS_ERR(cdev))
return -ENODEV;
@@ -932,6 +932,12 @@ static int __init con3215_init(void)
cdev->handler = raw3215_irq;
raw->flags |= RAW3215_FIXED;
+ if (ccw_device_enable_console(cdev)) {
+ ccw_device_destroy_console(cdev);
+ raw3215_free_info(raw);
+ raw3215[0] = NULL;
+ return -ENODEV;
+ }
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 699fd3e363df..75ffe9980c3e 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -7,6 +7,7 @@
* Copyright IBM Corp. 2003, 2009
*/
+#include <linux/module.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -30,6 +31,9 @@
static struct raw3270_fn con3270_fn;
+static bool auto_update = 1;
+module_param(auto_update, bool, 0);
+
/*
* Main 3270 console view data structure.
*/
@@ -204,6 +208,8 @@ con3270_update(struct con3270 *cp)
struct string *s, *n;
int rc;
+ if (!auto_update && !raw3270_view_active(&cp->view))
+ return;
if (cp->view.dev)
raw3270_activate_view(&cp->view);
@@ -529,6 +535,7 @@ con3270_flush(void)
if (!cp->view.dev)
return;
raw3270_pm_unfreeze(&cp->view);
+ raw3270_activate_view(&cp->view);
spin_lock_irqsave(&cp->view.lock, flags);
con3270_wait_write(cp);
cp->nr_up = 0;
@@ -576,7 +583,6 @@ static struct console con3270 = {
static int __init
con3270_init(void)
{
- struct ccw_device *cdev;
struct raw3270 *rp;
void *cbuf;
int i;
@@ -591,10 +597,7 @@ con3270_init(void)
cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
}
- cdev = ccw_device_probe_console();
- if (IS_ERR(cdev))
- return -ENODEV;
- rp = raw3270_setup_console(cdev);
+ rp = raw3270_setup_console();
if (IS_ERR(rp))
return PTR_ERR(rp);
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 2cdec21e8924..9f849df4381e 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -276,6 +276,15 @@ __raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
}
int
+raw3270_view_active(struct raw3270_view *view)
+{
+ struct raw3270 *rp = view->dev;
+
+ return rp && rp->view == view &&
+ !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+}
+
+int
raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
{
unsigned long flags;
@@ -776,22 +785,37 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
}
#ifdef CONFIG_TN3270_CONSOLE
+/* Tentative definition - see below for actual definition. */
+static struct ccw_driver raw3270_ccw_driver;
+
/*
* Setup 3270 device configured as console.
*/
-struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
+struct raw3270 __init *raw3270_setup_console(void)
{
+ struct ccw_device *cdev;
unsigned long flags;
struct raw3270 *rp;
char *ascebc;
int rc;
+ cdev = ccw_device_create_console(&raw3270_ccw_driver);
+ if (IS_ERR(cdev))
+ return ERR_CAST(cdev);
+
rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
ascebc = kzalloc(256, GFP_KERNEL);
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc)
return ERR_PTR(rc);
set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
+
+ rc = ccw_device_enable_console(cdev);
+ if (rc) {
+ ccw_device_destroy_console(cdev);
+ return ERR_PTR(rc);
+ }
+
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
do {
__raw3270_reset_device(rp);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 7b73ff8c1bd7..e1e41c2861fb 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -173,6 +173,7 @@ int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *);
int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
int raw3270_reset(struct raw3270_view *);
struct raw3270_view *raw3270_view(struct raw3270_view *);
+int raw3270_view_active(struct raw3270_view *);
/* Reference count inliner for view structures. */
static inline void
@@ -190,7 +191,7 @@ raw3270_put_view(struct raw3270_view *view)
wake_up(&raw3270_wait_queue);
}
-struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
+struct raw3270 *raw3270_setup_console(void);
void raw3270_wait_cons_dev(struct raw3270 *);
/* Notifier for device addition/removal */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index cb3c4e05a385..49af8eeb90ea 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -700,3 +700,8 @@ out:
free_page((unsigned long) sccb);
return rc;
}
+
+bool sclp_has_sprp(void)
+{
+ return !!(sclp_fac84 & 0x2);
+}
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 82f2c389b4d1..14196ea0fdf3 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -20,7 +20,9 @@ struct read_info_sccb {
struct sccb_header header; /* 0-7 */
u16 rnmax; /* 8-9 */
u8 rnsize; /* 10 */
- u8 _reserved0[24 - 11]; /* 11-15 */
+ u8 _reserved0[16 - 11]; /* 11-15 */
+ u16 ncpurl; /* 16-17 */
+ u8 _reserved7[24 - 18]; /* 18-23 */
u8 loadparm[8]; /* 24-31 */
u8 _reserved1[48 - 32]; /* 32-47 */
u64 facilities; /* 48-55 */
@@ -32,13 +34,16 @@ struct read_info_sccb {
u8 _reserved4[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */
- u8 _reserved5[4096 - 112]; /* 112-4095 */
+ u8 _reserved5[120 - 112]; /* 112-119 */
+ u16 hcpua; /* 120-121 */
+ u8 _reserved6[4096 - 122]; /* 122-4095 */
} __packed __aligned(PAGE_SIZE);
static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
static unsigned int sclp_con_has_vt220 __initdata;
static unsigned int sclp_con_has_linemode __initdata;
static unsigned long sclp_hsa_size;
+static unsigned int sclp_max_cpu;
static struct sclp_ipl_info sclp_ipl_info;
u64 sclp_facilities;
@@ -102,6 +107,15 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp_rzm <<= 20;
+ if (!sccb->hcpua) {
+ if (MACHINE_IS_VM)
+ sclp_max_cpu = 64;
+ else
+ sclp_max_cpu = sccb->ncpurl;
+ } else {
+ sclp_max_cpu = sccb->hcpua + 1;
+ }
+
/* Save IPL information */
sclp_ipl_info.is_valid = 1;
if (sccb->flags & 0x2)
@@ -129,6 +143,11 @@ unsigned long long sclp_get_rzm(void)
return sclp_rzm;
}
+unsigned int sclp_get_max_cpu(void)
+{
+ return sclp_max_cpu;
+}
+
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. The sclp_facilities_detect() function retrieves
@@ -184,9 +203,9 @@ static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
sccb_init_eq_size(sccb);
if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
return -EIO;
- if (sccb->evbuf.blk_cnt != 0)
- return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
- return 0;
+ if (sccb->evbuf.blk_cnt == 0)
+ return 0;
+ return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
@@ -195,6 +214,8 @@ static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
sccb->length = PAGE_SIZE;
if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
return -EIO;
+ if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0)
+ return 0;
return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 64c467998a90..0efb27f6f199 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -922,8 +922,8 @@ static int ur_set_online(struct ccw_device *cdev)
goto fail_free_cdev;
}
- urd->device = device_create(vmur_class, NULL, urd->char_device->dev,
- NULL, "%s", node_id);
+ urd->device = device_create(vmur_class, &cdev->dev,
+ urd->char_device->dev, NULL, "%s", node_id);
if (IS_ERR(urd->device)) {
rc = PTR_ERR(urd->device);
TRACE("ur_set_online: device_create rc=%d\n", rc);
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index f055df0b167f..445564c790f6 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -186,55 +186,71 @@ void airq_iv_release(struct airq_iv *iv)
EXPORT_SYMBOL(airq_iv_release);
/**
- * airq_iv_alloc_bit - allocate an irq bit from an interrupt vector
+ * airq_iv_alloc - allocate irq bits from an interrupt vector
* @iv: pointer to an interrupt vector structure
+ * @num: number of consecutive irq bits to allocate
*
- * Returns the bit number of the allocated irq, or -1UL if no bit
- * is available or the AIRQ_IV_ALLOC flag has not been specified
+ * Returns the bit number of the first irq in the allocated block of irqs,
+ * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
+ * specified
*/
-unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
+unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
{
- unsigned long bit;
+ unsigned long bit, i;
- if (!iv->avail)
+ if (!iv->avail || num == 0)
return -1UL;
spin_lock(&iv->lock);
bit = find_first_bit_inv(iv->avail, iv->bits);
- if (bit < iv->bits) {
- clear_bit_inv(bit, iv->avail);
- if (bit >= iv->end)
- iv->end = bit + 1;
- } else
+ while (bit + num <= iv->bits) {
+ for (i = 1; i < num; i++)
+ if (!test_bit_inv(bit + i, iv->avail))
+ break;
+ if (i >= num) {
+ /* Found a suitable block of irqs */
+ for (i = 0; i < num; i++)
+ clear_bit_inv(bit + i, iv->avail);
+ if (bit + num >= iv->end)
+ iv->end = bit + num + 1;
+ break;
+ }
+ bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
+ }
+ if (bit + num > iv->bits)
bit = -1UL;
spin_unlock(&iv->lock);
return bit;
}
-EXPORT_SYMBOL(airq_iv_alloc_bit);
+EXPORT_SYMBOL(airq_iv_alloc);
/**
- * airq_iv_free_bit - free an irq bit of an interrupt vector
+ * airq_iv_free - free irq bits of an interrupt vector
* @iv: pointer to interrupt vector structure
- * @bit: number of the irq bit to free
+ * @bit: number of the first irq bit to free
+ * @num: number of consecutive irq bits to free
*/
-void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
+void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
{
- if (!iv->avail)
+ unsigned long i;
+
+ if (!iv->avail || num == 0)
return;
spin_lock(&iv->lock);
- /* Clear (possibly left over) interrupt bit */
- clear_bit_inv(bit, iv->vector);
- /* Make the bit position available again */
- set_bit_inv(bit, iv->avail);
- if (bit == iv->end - 1) {
+ for (i = 0; i < num; i++) {
+ /* Clear (possibly left over) interrupt bit */
+ clear_bit_inv(bit + i, iv->vector);
+ /* Make the bit positions available again */
+ set_bit_inv(bit + i, iv->avail);
+ }
+ if (bit + num >= iv->end) {
/* Find new end of bit-field */
- while (--iv->end > 0)
- if (!test_bit_inv(iv->end - 1, iv->avail))
- break;
+ while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
+ iv->end--;
}
spin_unlock(&iv->lock);
}
-EXPORT_SYMBOL(airq_iv_free_bit);
+EXPORT_SYMBOL(airq_iv_free);
/**
* airq_iv_scan - scan interrupt vector for non-zero bits
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index fd3367a1dc7a..dfd7bc681c25 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -168,14 +168,12 @@ static ssize_t ccwgroup_online_show(struct device *dev,
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentially created. Saves memory :)
*/
-static void ccwgroup_ungroup_callback(struct device *dev)
+static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
- device_unregister(dev);
+ device_unregister(&gdev->dev);
__ccwgroup_remove_cdev_refs(gdev);
}
mutex_unlock(&gdev->reg_mutex);
@@ -195,10 +193,9 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev,
rc = -EINVAL;
goto out;
}
- /* Note that we cannot unregister the device from one of its
- * attribute methods, so we have to use this roundabout approach.
- */
- rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
+
+ if (device_remove_file_self(dev, attr))
+ ccwgroup_ungroup(gdev);
out:
if (rc) {
if (rc != -EAGAIN)
@@ -224,6 +221,14 @@ static const struct attribute_group *ccwgroup_attr_groups[] = {
NULL,
};
+static void ccwgroup_ungroup_workfn(struct work_struct *work)
+{
+ struct ccwgroup_device *gdev =
+ container_of(work, struct ccwgroup_device, ungroup_work);
+
+ ccwgroup_ungroup(gdev);
+}
+
static void ccwgroup_release(struct device *dev)
{
kfree(to_ccwgroupdev(dev));
@@ -323,6 +328,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
atomic_set(&gdev->onoff, 0);
mutex_init(&gdev->reg_mutex);
mutex_lock(&gdev->reg_mutex);
+ INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn);
gdev->count = num_devices;
gdev->dev.bus = &ccwgroup_bus_type;
gdev->dev.parent = parent;
@@ -404,10 +410,10 @@ EXPORT_SYMBOL(ccwgroup_create_dev);
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
- struct device *dev = data;
+ struct ccwgroup_device *gdev = to_ccwgroupdev(data);
if (action == BUS_NOTIFY_UNBIND_DRIVER)
- device_schedule_callback(dev, ccwgroup_ungroup_callback);
+ schedule_work(&gdev->ungroup_work);
return NOTIFY_OK;
}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index f6b9188c5af5..9f0ea6cb6922 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -610,6 +610,7 @@ void chsc_chp_online(struct chp_id chpid)
css_wait_for_slow_path();
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
+ css_schedule_reprobe();
}
}
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 7b29d0be0ca3..1d3661af7bd8 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -173,8 +173,7 @@ static struct css_driver chsc_subchannel_driver = {
static int __init chsc_init_dbfs(void)
{
- chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
- 16 * sizeof(long));
+ chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
if (!chsc_debug_msg_id)
goto out;
debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 88e35d85d205..9e058c4657a3 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -18,6 +18,7 @@
#include <linux/device.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <asm/cio.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -28,7 +29,7 @@
#include <asm/chpid.h>
#include <asm/airq.h>
#include <asm/isc.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
#include <asm/fcx.h>
#include <asm/nmi.h>
#include <asm/crw.h>
@@ -54,7 +55,7 @@ debug_info_t *cio_debug_crw_id;
*/
static int __init cio_debug_init(void)
{
- cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
+ cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long));
if (!cio_debug_msg_id)
goto out_unregister;
debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
@@ -64,7 +65,7 @@ static int __init cio_debug_init(void)
goto out_unregister;
debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
debug_set_level(cio_debug_trace_id, 2);
- cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
+ cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long));
if (!cio_debug_crw_id)
goto out_unregister;
debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
@@ -342,8 +343,9 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib)
*/
int cio_commit_config(struct subchannel *sch)
{
- struct schib schib;
int ccode, retry, ret = 0;
+ struct schib schib;
+ struct irb irb;
if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
@@ -367,7 +369,10 @@ int cio_commit_config(struct subchannel *sch)
ret = -EAGAIN;
break;
case 1: /* status pending */
- return -EBUSY;
+ ret = -EBUSY;
+ if (tsch(sch->schid, &irb))
+ return ret;
+ break;
case 2: /* busy */
udelay(100); /* allow for recovery */
ret = -EBUSY;
@@ -403,7 +408,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "ensch");
@@ -418,20 +422,14 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
sch->config.isc = sch->isc;
sch->config.intparm = intparm;
- for (retry = 0; retry < 3; retry++) {
+ ret = cio_commit_config(sch);
+ if (ret == -EIO) {
+ /*
+ * Got a program check in msch. Try without
+ * the concurrent sense bit the next time.
+ */
+ sch->config.csense = 0;
ret = cio_commit_config(sch);
- if (ret == -EIO) {
- /*
- * Got a program check in msch. Try without
- * the concurrent sense bit the next time.
- */
- sch->config.csense = 0;
- } else if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
}
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
@@ -444,7 +442,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
*/
int cio_disable_subchannel(struct subchannel *sch)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "dissch");
@@ -456,16 +453,8 @@ int cio_disable_subchannel(struct subchannel *sch)
return -ENODEV;
sch->config.ena = 0;
+ ret = cio_commit_config(sch);
- for (retry = 0; retry < 3; retry++) {
- ret = cio_commit_config(sch);
- if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
- }
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
@@ -596,8 +585,6 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
-static struct irq_desc *irq_desc_io;
-
static struct irqaction io_interrupt = {
.name = "IO",
.handler = do_cio_interrupt,
@@ -608,7 +595,6 @@ void __init init_cio_interrupts(void)
irq_set_chip_and_handler(IO_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
setup_irq(IO_INTERRUPT, &io_interrupt);
- irq_desc_io = irq_to_desc(IO_INTERRUPT);
}
#ifdef CONFIG_CCW_CONSOLE
@@ -635,7 +621,7 @@ void cio_tsch(struct subchannel *sch)
local_bh_disable();
irq_enter();
}
- kstat_incr_irqs_this_cpu(IO_INTERRUPT, irq_desc_io);
+ kstat_incr_irq_this_cpu(IO_INTERRUPT);
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e9d783563cbb..d8d9b5b5cc56 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1571,12 +1571,27 @@ out:
return rc;
}
+static void ccw_device_set_int_class(struct ccw_device *cdev)
+{
+ struct ccw_driver *cdrv = cdev->drv;
+
+ /* Note: we interpret class 0 in this context as an uninitialized
+ * field since it translates to a non-I/O interrupt class. */
+ if (cdrv->int_class != 0)
+ cdev->private->int_class = cdrv->int_class;
+ else
+ cdev->private->int_class = IRQIO_CIO;
+}
+
#ifdef CONFIG_CCW_CONSOLE
-static int ccw_device_console_enable(struct ccw_device *cdev,
- struct subchannel *sch)
+int __init ccw_device_enable_console(struct ccw_device *cdev)
{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
int rc;
+ if (!cdev->drv || !cdev->handler)
+ return -EINVAL;
+
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch);
if (rc)
@@ -1609,12 +1624,11 @@ out_unlock:
return rc;
}
-struct ccw_device *ccw_device_probe_console(void)
+struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
{
struct io_subchannel_private *io_priv;
struct ccw_device *cdev;
struct subchannel *sch;
- int ret;
sch = cio_probe_console();
if (IS_ERR(sch))
@@ -1631,18 +1645,23 @@ struct ccw_device *ccw_device_probe_console(void)
kfree(io_priv);
return cdev;
}
+ cdev->drv = drv;
set_io_private(sch, io_priv);
- ret = ccw_device_console_enable(cdev, sch);
- if (ret) {
- set_io_private(sch, NULL);
- put_device(&sch->dev);
- put_device(&cdev->dev);
- kfree(io_priv);
- return ERR_PTR(ret);
- }
+ ccw_device_set_int_class(cdev);
return cdev;
}
+void __init ccw_device_destroy_console(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct io_subchannel_private *io_priv = to_io_private(sch);
+
+ set_io_private(sch, NULL);
+ put_device(&sch->dev);
+ put_device(&cdev->dev);
+ kfree(io_priv);
+}
+
/**
* ccw_device_wait_idle() - busy wait for device to become idle
* @cdev: ccw device
@@ -1726,15 +1745,8 @@ ccw_device_probe (struct device *dev)
int ret;
cdev->drv = cdrv; /* to let the driver call _set_online */
- /* Note: we interpret class 0 in this context as an uninitialized
- * field since it translates to a non-I/O interrupt class. */
- if (cdrv->int_class != 0)
- cdev->private->int_class = cdrv->int_class;
- else
- cdev->private->int_class = IRQIO_CIO;
-
+ ccw_device_set_int_class(cdev);
ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
-
if (ret) {
cdev->drv = NULL;
cdev->private->int_class = IRQIO_CIO;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 8acaae18bd11..a563e4c00590 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -359,14 +359,12 @@ static inline int multicast_outbound(struct qdio_q *q)
#define need_siga_sync_out_after_pci(q) \
(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
-#define for_each_input_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->input_qs[0]; \
- i < irq_ptr->nr_input_qs; \
- q = irq_ptr->input_qs[++i])
-#define for_each_output_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->output_qs[0]; \
- i < irq_ptr->nr_output_qs; \
- q = irq_ptr->output_qs[++i])
+#define for_each_input_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_input_qs && \
+ ({ q = irq_ptr->input_qs[i]; 1; }); i++)
+#define for_each_output_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_output_qs && \
+ ({ q = irq_ptr->output_qs[i]; 1; }); i++)
#define prev_buf(bufnr) \
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index c883a085c059..77466c4faabb 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -996,7 +996,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
}
- if (!pci_out_supported(q))
+ if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
return;
for_each_output_queue(irq_ptr, q, i) {
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index dc542e0a3055..0bc91e46395a 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -311,7 +311,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
} __packed * msg = ap_msg->message;
int rcblen = CEIL4(xcRB->request_control_blk_length);
- int replylen;
+ int replylen, req_sumlen, resp_sumlen;
char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
char *function_code;
@@ -321,12 +321,34 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
xcRB->request_data_length;
if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
return -EINVAL;
+
+ /* Overflow check
+ sum must be greater (or equal) than the largest operand */
+ req_sumlen = CEIL4(xcRB->request_control_blk_length) +
+ xcRB->request_data_length;
+ if ((CEIL4(xcRB->request_control_blk_length) <=
+ xcRB->request_data_length) ?
+ (req_sumlen < xcRB->request_data_length) :
+ (req_sumlen < CEIL4(xcRB->request_control_blk_length))) {
+ return -EINVAL;
+ }
+
replylen = sizeof(struct type86_fmt2_msg) +
CEIL4(xcRB->reply_control_blk_length) +
xcRB->reply_data_length;
if (replylen > MSGTYPE06_MAX_MSG_SIZE)
return -EINVAL;
+ /* Overflow check
+ sum must be greater (or equal) than the largest operand */
+ resp_sumlen = CEIL4(xcRB->reply_control_blk_length) +
+ xcRB->reply_data_length;
+ if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ?
+ (resp_sumlen < xcRB->reply_data_length) :
+ (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) {
+ return -EINVAL;
+ }
+
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index d6297176ab85..1e1fc671f89a 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -1,7 +1,7 @@
/*
* ccw based virtio transport
*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2012, 2014
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -32,6 +32,8 @@
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/virtio-ccw.h>
+#include <asm/isc.h>
+#include <asm/airq.h>
/*
* virtio related functions
@@ -58,6 +60,9 @@ struct virtio_ccw_device {
unsigned long indicators;
unsigned long indicators2;
struct vq_config_block *config_block;
+ bool is_thinint;
+ bool going_away;
+ void *airq_info;
};
struct vq_info_block {
@@ -72,15 +77,38 @@ struct virtio_feature_desc {
__u8 index;
} __packed;
+struct virtio_thinint_area {
+ unsigned long summary_indicator;
+ unsigned long indicator;
+ u64 bit_nr;
+ u8 isc;
+} __packed;
+
struct virtio_ccw_vq_info {
struct virtqueue *vq;
int num;
void *queue;
struct vq_info_block *info_block;
+ int bit_nr;
struct list_head node;
long cookie;
};
+#define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
+
+#define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
+#define MAX_AIRQ_AREAS 20
+
+static int virtio_ccw_use_airq = 1;
+
+struct airq_info {
+ rwlock_t lock;
+ u8 summary_indicator;
+ struct airq_struct airq;
+ struct airq_iv *aiv;
+};
+static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
#define CCW_CMD_SET_IND 0x43
@@ -91,6 +119,7 @@ struct virtio_ccw_vq_info {
#define CCW_CMD_WRITE_CONF 0x21
#define CCW_CMD_WRITE_STATUS 0x31
#define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_SET_IND_ADAPTER 0x73
#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
#define VIRTIO_CCW_DOING_RESET 0x00040000
@@ -102,6 +131,7 @@ struct virtio_ccw_vq_info {
#define VIRTIO_CCW_DOING_SET_IND 0x01000000
#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
+#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -109,6 +139,125 @@ static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
return container_of(vdev, struct virtio_ccw_device, vdev);
}
+static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
+{
+ unsigned long i, flags;
+
+ write_lock_irqsave(&info->lock, flags);
+ for (i = 0; i < airq_iv_end(info->aiv); i++) {
+ if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
+ airq_iv_free_bit(info->aiv, i);
+ airq_iv_set_ptr(info->aiv, i, 0);
+ break;
+ }
+ }
+ write_unlock_irqrestore(&info->lock, flags);
+}
+
+static void virtio_airq_handler(struct airq_struct *airq)
+{
+ struct airq_info *info = container_of(airq, struct airq_info, airq);
+ unsigned long ai;
+
+ inc_irq_stat(IRQIO_VAI);
+ read_lock(&info->lock);
+ /* Walk through indicators field, summary indicator active. */
+ for (ai = 0;;) {
+ ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
+ if (ai == -1UL)
+ break;
+ vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
+ }
+ info->summary_indicator = 0;
+ smp_wmb();
+ /* Walk through indicators field, summary indicator not active. */
+ for (ai = 0;;) {
+ ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
+ if (ai == -1UL)
+ break;
+ vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
+ }
+ read_unlock(&info->lock);
+}
+
+static struct airq_info *new_airq_info(void)
+{
+ struct airq_info *info;
+ int rc;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+ rwlock_init(&info->lock);
+ info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
+ if (!info->aiv) {
+ kfree(info);
+ return NULL;
+ }
+ info->airq.handler = virtio_airq_handler;
+ info->airq.lsi_ptr = &info->summary_indicator;
+ info->airq.lsi_mask = 0xff;
+ info->airq.isc = VIRTIO_AIRQ_ISC;
+ rc = register_adapter_interrupt(&info->airq);
+ if (rc) {
+ airq_iv_release(info->aiv);
+ kfree(info);
+ return NULL;
+ }
+ return info;
+}
+
+static void destroy_airq_info(struct airq_info *info)
+{
+ if (!info)
+ return;
+
+ unregister_adapter_interrupt(&info->airq);
+ airq_iv_release(info->aiv);
+ kfree(info);
+}
+
+static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+ u64 *first, void **airq_info)
+{
+ int i, j;
+ struct airq_info *info;
+ unsigned long indicator_addr = 0;
+ unsigned long bit, flags;
+
+ for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
+ if (!airq_areas[i])
+ airq_areas[i] = new_airq_info();
+ info = airq_areas[i];
+ if (!info)
+ return 0;
+ write_lock_irqsave(&info->lock, flags);
+ bit = airq_iv_alloc(info->aiv, nvqs);
+ if (bit == -1UL) {
+ /* Not enough vacancies. */
+ write_unlock_irqrestore(&info->lock, flags);
+ continue;
+ }
+ *first = bit;
+ *airq_info = info;
+ indicator_addr = (unsigned long)info->aiv->vector;
+ for (j = 0; j < nvqs; j++) {
+ airq_iv_set_ptr(info->aiv, bit + j,
+ (unsigned long)vqs[j]);
+ }
+ write_unlock_irqrestore(&info->lock, flags);
+ }
+ return indicator_addr;
+}
+
+static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
+{
+ struct virtio_ccw_vq_info *info;
+
+ list_for_each_entry(info, &vcdev->virtqueues, node)
+ drop_airq_indicator(info->vq, vcdev->airq_info);
+}
+
static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
{
unsigned long flags;
@@ -145,6 +294,51 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
return ret ? ret : vcdev->err;
}
+static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
+ struct ccw1 *ccw)
+{
+ int ret;
+ unsigned long *indicatorp = NULL;
+ struct virtio_thinint_area *thinint_area = NULL;
+ struct airq_info *airq_info = vcdev->airq_info;
+
+ if (vcdev->is_thinint) {
+ thinint_area = kzalloc(sizeof(*thinint_area),
+ GFP_DMA | GFP_KERNEL);
+ if (!thinint_area)
+ return;
+ thinint_area->summary_indicator =
+ (unsigned long) &airq_info->summary_indicator;
+ thinint_area->isc = VIRTIO_AIRQ_ISC;
+ ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
+ ccw->count = sizeof(*thinint_area);
+ ccw->cda = (__u32)(unsigned long) thinint_area;
+ } else {
+ indicatorp = kmalloc(sizeof(&vcdev->indicators),
+ GFP_DMA | GFP_KERNEL);
+ if (!indicatorp)
+ return;
+ *indicatorp = 0;
+ ccw->cmd_code = CCW_CMD_SET_IND;
+ ccw->count = sizeof(vcdev->indicators);
+ ccw->cda = (__u32)(unsigned long) indicatorp;
+ }
+ /* Deregister indicators from host. */
+ vcdev->indicators = 0;
+ ccw->flags = 0;
+ ret = ccw_io_helper(vcdev, ccw,
+ vcdev->is_thinint ?
+ VIRTIO_CCW_DOING_SET_IND_ADAPTER :
+ VIRTIO_CCW_DOING_SET_IND);
+ if (ret && (ret != -ENODEV))
+ dev_info(&vcdev->cdev->dev,
+ "Failed to deregister indicators (%d)\n", ret);
+ else if (vcdev->is_thinint)
+ virtio_ccw_drop_indicators(vcdev);
+ kfree(indicatorp);
+ kfree(thinint_area);
+}
+
static inline long do_kvm_notify(struct subchannel_id schid,
unsigned long queue_index,
long cookie)
@@ -232,11 +426,13 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
{
struct virtqueue *vq, *n;
struct ccw1 *ccw;
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
if (!ccw)
return;
+ virtio_ccw_drop_indicator(vcdev, ccw);
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
virtio_ccw_del_vq(vq, ccw);
@@ -326,6 +522,54 @@ out_err:
return ERR_PTR(err);
}
+static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
+ struct virtqueue *vqs[], int nvqs,
+ struct ccw1 *ccw)
+{
+ int ret;
+ struct virtio_thinint_area *thinint_area = NULL;
+ struct airq_info *info;
+
+ thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
+ if (!thinint_area) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Try to get an indicator. */
+ thinint_area->indicator = get_airq_indicator(vqs, nvqs,
+ &thinint_area->bit_nr,
+ &vcdev->airq_info);
+ if (!thinint_area->indicator) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ info = vcdev->airq_info;
+ thinint_area->summary_indicator =
+ (unsigned long) &info->summary_indicator;
+ thinint_area->isc = VIRTIO_AIRQ_ISC;
+ ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = sizeof(*thinint_area);
+ ccw->cda = (__u32)(unsigned long)thinint_area;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
+ if (ret) {
+ if (ret == -EOPNOTSUPP) {
+ /*
+ * The host does not support adapter interrupts
+ * for virtio-ccw, stop trying.
+ */
+ virtio_ccw_use_airq = 0;
+ pr_info("Adapter interrupts unsupported on host\n");
+ } else
+ dev_warn(&vcdev->cdev->dev,
+ "enabling adapter interrupts = %d\n", ret);
+ virtio_ccw_drop_indicators(vcdev);
+ }
+out:
+ kfree(thinint_area);
+ return ret;
+}
+
static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
@@ -355,15 +599,23 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
if (!indicatorp)
goto out;
*indicatorp = (unsigned long) &vcdev->indicators;
- /* Register queue indicators with host. */
- vcdev->indicators = 0;
- ccw->cmd_code = CCW_CMD_SET_IND;
- ccw->flags = 0;
- ccw->count = sizeof(vcdev->indicators);
- ccw->cda = (__u32)(unsigned long) indicatorp;
- ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
- if (ret)
- goto out;
+ if (vcdev->is_thinint) {
+ ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
+ if (ret)
+ /* no error, just fall back to legacy interrupts */
+ vcdev->is_thinint = 0;
+ }
+ if (!vcdev->is_thinint) {
+ /* Register queue indicators with host. */
+ vcdev->indicators = 0;
+ ccw->cmd_code = CCW_CMD_SET_IND;
+ ccw->flags = 0;
+ ccw->count = sizeof(vcdev->indicators);
+ ccw->cda = (__u32)(unsigned long) indicatorp;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
+ if (ret)
+ goto out;
+ }
/* Register indicators2 with host for config changes */
*indicatorp = (unsigned long) &vcdev->indicators2;
vcdev->indicators2 = 0;
@@ -636,14 +888,23 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
struct virtqueue *vq;
struct virtio_driver *drv;
+ if (!vcdev)
+ return;
/* Check if it's a notification from the host. */
if ((intparm == 0) &&
(scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
/* OK */
}
- if (irb_is_error(irb))
- vcdev->err = -EIO; /* XXX - use real error */
+ if (irb_is_error(irb)) {
+ /* Command reject? */
+ if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
+ (irb->ecw[0] & SNS0_CMD_REJECT))
+ vcdev->err = -EOPNOTSUPP;
+ else
+ /* Map everything else to -EIO. */
+ vcdev->err = -EIO;
+ }
if (vcdev->curr_io & activity) {
switch (activity) {
case VIRTIO_CCW_DOING_READ_FEAT:
@@ -656,6 +917,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
case VIRTIO_CCW_DOING_SET_CONF_IND:
case VIRTIO_CCW_DOING_RESET:
case VIRTIO_CCW_DOING_READ_VQ_CONF:
+ case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
vcdev->curr_io &= ~activity;
wake_up(&vcdev->wait_q);
break;
@@ -727,23 +989,46 @@ static int virtio_ccw_probe(struct ccw_device *cdev)
return 0;
}
+static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
+{
+ unsigned long flags;
+ struct virtio_ccw_device *vcdev;
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ vcdev = dev_get_drvdata(&cdev->dev);
+ if (!vcdev || vcdev->going_away) {
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ return NULL;
+ }
+ vcdev->going_away = true;
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ return vcdev;
+}
+
static void virtio_ccw_remove(struct ccw_device *cdev)
{
- struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+ unsigned long flags;
+ struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
- if (cdev->online) {
+ if (vcdev && cdev->online)
unregister_virtio_device(&vcdev->vdev);
- dev_set_drvdata(&cdev->dev, NULL);
- }
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
cdev->handler = NULL;
}
static int virtio_ccw_offline(struct ccw_device *cdev)
{
- struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+ unsigned long flags;
+ struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
- unregister_virtio_device(&vcdev->vdev);
- dev_set_drvdata(&cdev->dev, NULL);
+ if (vcdev) {
+ unregister_virtio_device(&vcdev->vdev);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ }
return 0;
}
@@ -752,6 +1037,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
{
int ret;
struct virtio_ccw_device *vcdev;
+ unsigned long flags;
vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
if (!vcdev) {
@@ -771,6 +1057,8 @@ static int virtio_ccw_online(struct ccw_device *cdev)
goto out_free;
}
+ vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
+
vcdev->vdev.dev.parent = &cdev->dev;
vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops;
@@ -779,7 +1067,9 @@ static int virtio_ccw_online(struct ccw_device *cdev)
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, vcdev);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
vcdev->vdev.id.vendor = cdev->id.cu_type;
vcdev->vdev.id.device = cdev->id.cu_model;
ret = register_virtio_device(&vcdev->vdev);
@@ -790,7 +1080,9 @@ static int virtio_ccw_online(struct ccw_device *cdev)
}
return 0;
out_put:
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
put_device(&vcdev->vdev.dev);
return ret;
out_free:
@@ -928,6 +1220,10 @@ module_init(virtio_ccw_init);
static void __exit virtio_ccw_exit(void)
{
+ int i;
+
ccw_driver_unregister(&virtio_ccw_driver);
+ for (i = 0; i < MAX_AIRQ_AREAS; i++)
+ destroy_airq_info(airq_areas[i]);
}
module_exit(virtio_ccw_exit);
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 4dfe8c1092da..d28f05d0c75a 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_LCS) += lcs.o
obj-$(CONFIG_CLAW) += claw.o
qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
obj-$(CONFIG_QETH) += qeth.o
-qeth_l2-y += qeth_l2_main.o
+qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
obj-$(CONFIG_QETH_L2) += qeth_l2.o
qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
obj-$(CONFIG_QETH_L3) += qeth_l3.o
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 9b333fcf1a4c..ce16d1bdb20a 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -739,8 +739,12 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 4, __func__);
- if (conn && conn->netdev)
- privptr = netdev_priv(conn->netdev);
+ if (!conn || !conn->netdev) {
+ IUCV_DBF_TEXT(data, 2,
+ "Send confirmation for unlinked connection\n");
+ return;
+ }
+ privptr = netdev_priv(conn->netdev);
conn->prof.tx_pending--;
if (single_flag) {
if ((skb = skb_dequeue(&conn->commit_queue))) {
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 41ef94320ee8..a0de045eb227 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -156,6 +156,27 @@ struct qeth_ipa_info {
__u32 enabled_funcs;
};
+/* SETBRIDGEPORT stuff */
+enum qeth_sbp_roles {
+ QETH_SBP_ROLE_NONE = 0,
+ QETH_SBP_ROLE_PRIMARY = 1,
+ QETH_SBP_ROLE_SECONDARY = 2,
+};
+
+enum qeth_sbp_states {
+ QETH_SBP_STATE_INACTIVE = 0,
+ QETH_SBP_STATE_STANDBY = 1,
+ QETH_SBP_STATE_ACTIVE = 2,
+};
+
+#define QETH_SBP_HOST_NOTIFICATION 1
+
+struct qeth_sbp_info {
+ __u32 supported_funcs;
+ enum qeth_sbp_roles role;
+ __u32 hostnotification:1;
+};
+
static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
enum qeth_ipa_funcs func)
{
@@ -672,6 +693,7 @@ struct qeth_card_options {
struct qeth_ipa_info adp; /*Adapter parameters*/
struct qeth_routing_info route6;
struct qeth_ipa_info ipa6;
+ struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
int fake_broadcast;
int add_hhlen;
int layer2;
@@ -716,6 +738,8 @@ struct qeth_discipline {
int (*freeze)(struct ccwgroup_device *);
int (*thaw) (struct ccwgroup_device *);
int (*restore)(struct ccwgroup_device *);
+ int (*control_event_handler)(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd);
};
struct qeth_vlan_vid {
@@ -738,6 +762,12 @@ struct qeth_rx {
int qdio_err;
};
+struct carrier_info {
+ __u8 card_type;
+ __u16 port_mode;
+ __u32 port_speed;
+};
+
#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
struct qeth_card {
@@ -851,6 +881,7 @@ extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
extern const struct attribute_group *qeth_generic_attr_groups[];
extern const struct attribute_group *qeth_osn_attr_groups[];
+extern struct workqueue_struct *qeth_wq;
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_realloc_buffer_pool(struct qeth_card *, int);
@@ -914,9 +945,15 @@ struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_mdio_read(struct net_device *, int, int);
int qeth_snmp_command(struct qeth_card *, char __user *);
int qeth_query_oat_command(struct qeth_card *, char __user *);
+int qeth_query_card_info(struct qeth_card *card,
+ struct carrier_info *carrier_info);
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
void *reply_param);
+int qeth_bridgeport_query_ports(struct qeth_card *card,
+ enum qeth_sbp_roles *role, enum qeth_sbp_states *state);
+int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
+int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
int qeth_get_elements_for_frags(struct sk_buff *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index eb4e1f809feb..a0aff2eb247c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -33,8 +33,8 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
/* N P A M L V H */
[QETH_DBF_SETUP] = {"qeth_setup",
8, 1, 8, 5, &debug_hex_ascii_view, NULL},
- [QETH_DBF_MSG] = {"qeth_msg",
- 8, 1, 128, 3, &debug_sprintf_view, NULL},
+ [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
+ &debug_sprintf_view, NULL},
[QETH_DBF_CTRL] = {"qeth_control",
8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
@@ -68,7 +68,8 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
enum qeth_qdio_buffer_states newbufstate);
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
-static struct workqueue_struct *qeth_wq;
+struct workqueue_struct *qeth_wq;
+EXPORT_SYMBOL_GPL(qeth_wq);
static void qeth_close_dev_handler(struct work_struct *work)
{
@@ -615,6 +616,13 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
card->info.hwtrap = 2;
qeth_schedule_recovery(card);
return NULL;
+ case IPA_CMD_SETBRIDGEPORT:
+ case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+ if (card->discipline->control_event_handler
+ (card, cmd))
+ return cmd;
+ else
+ return NULL;
case IPA_CMD_MODCCID:
return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
@@ -1652,7 +1660,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
QETH_CARD_TEXT_(card, 3, "1err%d", rc);
- qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
case QETH_QDIO_CLEANING:
@@ -2597,6 +2604,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
return 0;
out_qdio:
qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+ qdio_free(CARD_DDEV(card));
return rc;
}
@@ -4602,6 +4610,42 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_query_oat_command);
+int qeth_query_card_info_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_query_card_info *card_info;
+ struct carrier_info *carrier_info;
+
+ QETH_CARD_TEXT(card, 2, "qcrdincb");
+ carrier_info = (struct carrier_info *)reply->param;
+ cmd = (struct qeth_ipa_cmd *)data;
+ card_info = &cmd->data.setadapterparms.data.card_info;
+ if (cmd->data.setadapterparms.hdr.return_code == 0) {
+ carrier_info->card_type = card_info->card_type;
+ carrier_info->port_mode = card_info->port_mode;
+ carrier_info->port_speed = card_info->port_speed;
+ }
+
+ qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ return 0;
+}
+
+int qeth_query_card_info(struct qeth_card *card,
+ struct carrier_info *carrier_info)
+{
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "qcrdinfo");
+ if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
+ return -EOPNOTSUPP;
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
+ sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
+ (void *)carrier_info);
+}
+EXPORT_SYMBOL_GPL(qeth_query_card_info);
+
static inline int qeth_get_qdio_q_format(struct qeth_card *card)
{
switch (card->info.type) {
@@ -4862,9 +4906,11 @@ retry:
if (retries < 3)
QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
dev_name(&card->gdev->dev));
+ rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
rc = ccw_device_set_online(CARD_RDEV(card));
if (rc)
goto retriable;
@@ -4874,7 +4920,6 @@ retry:
rc = ccw_device_set_online(CARD_DDEV(card));
if (rc)
goto retriable;
- rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
retriable:
if (rc == -ERESTARTSYS) {
QETH_DBF_TEXT(SETUP, 2, "break1");
@@ -4920,6 +4965,7 @@ retriable:
card->options.ipa4.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
+ card->options.sbp.supported_funcs = 0;
card->info.diagass_support = 0;
qeth_query_ipassists(card, QETH_PROT_IPV4);
if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
@@ -5606,11 +5652,65 @@ void qeth_core_get_drvinfo(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
+/* Helper function to fill 'advertizing' and 'supported' which are the same. */
+/* Autoneg and full-duplex are supported and advertized uncondionally. */
+/* Always advertize and support all speeds up to specified, and only one */
+/* specified port type. */
+static void qeth_set_ecmd_adv_sup(struct ethtool_cmd *ecmd,
+ int maxspeed, int porttype)
+{
+ int port_sup, port_adv, spd_sup, spd_adv;
+
+ switch (porttype) {
+ case PORT_TP:
+ port_sup = SUPPORTED_TP;
+ port_adv = ADVERTISED_TP;
+ break;
+ case PORT_FIBRE:
+ port_sup = SUPPORTED_FIBRE;
+ port_adv = ADVERTISED_FIBRE;
+ break;
+ default:
+ port_sup = SUPPORTED_TP;
+ port_adv = ADVERTISED_TP;
+ WARN_ON_ONCE(1);
+ }
+
+ /* "Fallthrough" case'es ordered from high to low result in setting */
+ /* flags cumulatively, starting from the specified speed and down to */
+ /* the lowest possible. */
+ spd_sup = 0;
+ spd_adv = 0;
+ switch (maxspeed) {
+ case SPEED_10000:
+ spd_sup |= SUPPORTED_10000baseT_Full;
+ spd_adv |= ADVERTISED_10000baseT_Full;
+ case SPEED_1000:
+ spd_sup |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
+ spd_adv |= ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full;
+ case SPEED_100:
+ spd_sup |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
+ spd_adv |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
+ case SPEED_10:
+ spd_sup |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
+ spd_adv |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
+ break;
+ default:
+ spd_sup = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
+ spd_adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
+ WARN_ON_ONCE(1);
+ }
+ ecmd->advertising = ADVERTISED_Autoneg | port_adv | spd_adv;
+ ecmd->supported = SUPPORTED_Autoneg | port_sup | spd_sup;
+}
+
int qeth_core_ethtool_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct qeth_card *card = netdev->ml_priv;
enum qeth_link_types link_type;
+ struct carrier_info carrier_info;
if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
link_type = QETH_LINK_TYPE_10GBIT_ETH;
@@ -5618,80 +5718,92 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
link_type = card->info.link_type;
ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = SUPPORTED_Autoneg;
- ecmd->advertising = ADVERTISED_Autoneg;
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_ENABLE;
switch (link_type) {
case QETH_LINK_TYPE_FAST_ETH:
case QETH_LINK_TYPE_LANE_ETH100:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_TP;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_100, PORT_TP);
ecmd->speed = SPEED_100;
ecmd->port = PORT_TP;
break;
case QETH_LINK_TYPE_GBIT_ETH:
case QETH_LINK_TYPE_LANE_ETH1000:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE);
ecmd->speed = SPEED_1000;
ecmd->port = PORT_FIBRE;
break;
case QETH_LINK_TYPE_10GBIT_ETH:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full |
- ADVERTISED_FIBRE;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE);
ecmd->speed = SPEED_10000;
ecmd->port = PORT_FIBRE;
break;
default:
- ecmd->supported |= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_TP;
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_10, PORT_TP);
ecmd->speed = SPEED_10;
ecmd->port = PORT_TP;
}
+ /* Check if we can obtain more accurate information. */
+ /* If QUERY_CARD_INFO command is not supported or fails, */
+ /* just return the heuristics that was filled above. */
+ if (qeth_query_card_info(card, &carrier_info) != 0)
+ return 0;
+
+ netdev_dbg(netdev,
+ "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
+ carrier_info.card_type,
+ carrier_info.port_mode,
+ carrier_info.port_speed);
+
+ /* Update attributes for which we've obtained more authoritative */
+ /* information, leave the rest the way they where filled above. */
+ switch (carrier_info.card_type) {
+ case CARD_INFO_TYPE_1G_COPPER_A:
+ case CARD_INFO_TYPE_1G_COPPER_B:
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_TP);
+ ecmd->port = PORT_TP;
+ break;
+ case CARD_INFO_TYPE_1G_FIBRE_A:
+ case CARD_INFO_TYPE_1G_FIBRE_B:
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ break;
+ case CARD_INFO_TYPE_10G_FIBRE_A:
+ case CARD_INFO_TYPE_10G_FIBRE_B:
+ qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ break;
+ }
+
+ switch (carrier_info.port_mode) {
+ case CARD_INFO_PORTM_FULLDUPLEX:
+ ecmd->duplex = DUPLEX_FULL;
+ break;
+ case CARD_INFO_PORTM_HALFDUPLEX:
+ ecmd->duplex = DUPLEX_HALF;
+ break;
+ }
+
+ switch (carrier_info.port_speed) {
+ case CARD_INFO_PORTS_10M:
+ ecmd->speed = SPEED_10;
+ break;
+ case CARD_INFO_PORTS_100M:
+ ecmd->speed = SPEED_100;
+ break;
+ case CARD_INFO_PORTS_1G:
+ ecmd->speed = SPEED_1000;
+ break;
+ case CARD_INFO_PORTS_10G:
+ ecmd->speed = SPEED_10000;
+ break;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 06c55780005e..7b55768a9592 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -249,10 +249,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_DELIP, "delip"},
{IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
{IPA_CMD_SET_DIAG_ASS, "set_diag_ass"},
+ {IPA_CMD_SETBRIDGEPORT, "set_bridge_port"},
{IPA_CMD_CREATE_ADDR, "create_addr"},
{IPA_CMD_DESTROY_ADDR, "destroy_addr"},
{IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"},
{IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"},
+ {IPA_CMD_ADDRESS_CHANGE_NOTIF, "address_change_notification"},
{IPA_CMD_UNKNOWN, "unknown"},
};
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 07085d55f9a1..cf6a90ed42ae 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -104,10 +104,12 @@ enum qeth_ipa_cmds {
IPA_CMD_DELIP = 0xb7,
IPA_CMD_SETADAPTERPARMS = 0xb8,
IPA_CMD_SET_DIAG_ASS = 0xb9,
+ IPA_CMD_SETBRIDGEPORT = 0xbe,
IPA_CMD_CREATE_ADDR = 0xc3,
IPA_CMD_DESTROY_ADDR = 0xc4,
IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1,
IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2,
+ IPA_CMD_ADDRESS_CHANGE_NOTIF = 0xd3,
IPA_CMD_UNKNOWN = 0x00
};
@@ -274,7 +276,24 @@ enum qeth_ipa_set_access_mode_rc {
SET_ACCESS_CTRL_RC_REFLREL_FAILED = 0x0024,
SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED = 0x0028,
};
-
+enum qeth_card_info_card_type {
+ CARD_INFO_TYPE_1G_COPPER_A = 0x61,
+ CARD_INFO_TYPE_1G_FIBRE_A = 0x71,
+ CARD_INFO_TYPE_10G_FIBRE_A = 0x91,
+ CARD_INFO_TYPE_1G_COPPER_B = 0xb1,
+ CARD_INFO_TYPE_1G_FIBRE_B = 0xa1,
+ CARD_INFO_TYPE_10G_FIBRE_B = 0xc1,
+};
+enum qeth_card_info_port_mode {
+ CARD_INFO_PORTM_HALFDUPLEX = 0x0002,
+ CARD_INFO_PORTM_FULLDUPLEX = 0x0003,
+};
+enum qeth_card_info_port_speed {
+ CARD_INFO_PORTS_10M = 0x00000005,
+ CARD_INFO_PORTS_100M = 0x00000006,
+ CARD_INFO_PORTS_1G = 0x00000007,
+ CARD_INFO_PORTS_10G = 0x00000008,
+};
/* (SET)DELIP(M) IPA stuff ***************************************************/
struct qeth_ipacmd_setdelip4 {
@@ -404,6 +423,14 @@ struct qeth_qoat_priv {
char *buffer;
};
+struct qeth_query_card_info {
+ __u8 card_type;
+ __u8 reserved1;
+ __u16 port_mode;
+ __u32 port_speed;
+ __u32 reserved2;
+};
+
struct qeth_ipacmd_setadpparms_hdr {
__u32 supp_hw_cmds;
__u32 reserved1;
@@ -424,6 +451,7 @@ struct qeth_ipacmd_setadpparms {
struct qeth_snmp_cmd snmp;
struct qeth_set_access_ctrl set_access_ctrl;
struct qeth_query_oat query_oat;
+ struct qeth_query_card_info card_info;
__u32 mode;
} data;
} __attribute__ ((packed));
@@ -474,6 +502,124 @@ struct qeth_ipacmd_diagass {
__u8 cdata[64];
} __attribute__ ((packed));
+/* SETBRIDGEPORT IPA Command: *********************************************/
+enum qeth_ipa_sbp_cmd {
+ IPA_SBP_QUERY_COMMANDS_SUPPORTED = 0x00000000L,
+ IPA_SBP_RESET_BRIDGE_PORT_ROLE = 0x00000001L,
+ IPA_SBP_SET_PRIMARY_BRIDGE_PORT = 0x00000002L,
+ IPA_SBP_SET_SECONDARY_BRIDGE_PORT = 0x00000004L,
+ IPA_SBP_QUERY_BRIDGE_PORTS = 0x00000008L,
+ IPA_SBP_BRIDGE_PORT_STATE_CHANGE = 0x00000010L,
+};
+
+struct net_if_token {
+ __u16 devnum;
+ __u8 cssid;
+ __u8 iid;
+ __u8 ssid;
+ __u8 chpid;
+ __u16 chid;
+} __packed;
+
+struct mac_addr_lnid {
+ __u8 mac[6];
+ __u16 lnid;
+} __packed;
+
+struct qeth_ipacmd_sbp_hdr {
+ __u32 supported_sbp_cmds;
+ __u32 enabled_sbp_cmds;
+ __u16 cmdlength;
+ __u16 reserved1;
+ __u32 command_code;
+ __u16 return_code;
+ __u8 used_total;
+ __u8 seq_no;
+ __u32 reserved2;
+} __packed;
+
+struct qeth_sbp_query_cmds_supp {
+ __u32 supported_cmds;
+ __u32 reserved;
+} __packed;
+
+struct qeth_sbp_reset_role {
+} __packed;
+
+struct qeth_sbp_set_primary {
+ struct net_if_token token;
+} __packed;
+
+struct qeth_sbp_set_secondary {
+} __packed;
+
+struct qeth_sbp_port_entry {
+ __u8 role;
+ __u8 state;
+ __u8 reserved1;
+ __u8 reserved2;
+ struct net_if_token token;
+} __packed;
+
+struct qeth_sbp_query_ports {
+ __u8 primary_bp_supported;
+ __u8 secondary_bp_supported;
+ __u8 num_entries;
+ __u8 entry_length;
+ struct qeth_sbp_port_entry entry[];
+} __packed;
+
+struct qeth_sbp_state_change {
+ __u8 primary_bp_supported;
+ __u8 secondary_bp_supported;
+ __u8 num_entries;
+ __u8 entry_length;
+ struct qeth_sbp_port_entry entry[];
+} __packed;
+
+struct qeth_ipacmd_setbridgeport {
+ struct qeth_ipacmd_sbp_hdr hdr;
+ union {
+ struct qeth_sbp_query_cmds_supp query_cmds_supp;
+ struct qeth_sbp_reset_role reset_role;
+ struct qeth_sbp_set_primary set_primary;
+ struct qeth_sbp_set_secondary set_secondary;
+ struct qeth_sbp_query_ports query_ports;
+ struct qeth_sbp_state_change state_change;
+ } data;
+} __packed;
+
+/* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/
+/* Bitmask for entry->change_code. Both bits may be raised. */
+enum qeth_ipa_addr_change_code {
+ IPA_ADDR_CHANGE_CODE_VLANID = 0x01,
+ IPA_ADDR_CHANGE_CODE_MACADDR = 0x02,
+ IPA_ADDR_CHANGE_CODE_REMOVAL = 0x80, /* else addition */
+};
+enum qeth_ipa_addr_change_retcode {
+ IPA_ADDR_CHANGE_RETCODE_OK = 0x0000,
+ IPA_ADDR_CHANGE_RETCODE_LOSTEVENTS = 0x0010,
+};
+enum qeth_ipa_addr_change_lostmask {
+ IPA_ADDR_CHANGE_MASK_OVERFLOW = 0x01,
+ IPA_ADDR_CHANGE_MASK_STATECHANGE = 0x02,
+};
+
+struct qeth_ipacmd_addr_change_entry {
+ struct net_if_token token;
+ struct mac_addr_lnid addr_lnid;
+ __u8 change_code;
+ __u8 reserved1;
+ __u16 reserved2;
+} __packed;
+
+struct qeth_ipacmd_addr_change {
+ __u8 lost_event_mask;
+ __u8 reserved;
+ __u16 num_entries;
+ struct qeth_ipacmd_addr_change_entry entry[];
+} __packed;
+
/* Header for each IPA command */
struct qeth_ipacmd_hdr {
__u8 command;
@@ -503,6 +649,8 @@ struct qeth_ipa_cmd {
struct qeth_ipacmd_setadpparms setadapterparms;
struct qeth_set_routing setrtg;
struct qeth_ipacmd_diagass diagass;
+ struct qeth_ipacmd_setbridgeport sbp;
+ struct qeth_ipacmd_addr_change addrchange;
} data;
} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
new file mode 100644
index 000000000000..0767556404bd
--- /dev/null
+++ b/drivers/s390/net/qeth_l2.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright IBM Corp. 2013
+ * Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com>
+ */
+
+#ifndef __QETH_L2_H__
+#define __QETH_L2_H__
+
+#include "qeth_core.h"
+
+int qeth_l2_create_device_attributes(struct device *);
+void qeth_l2_remove_device_attributes(struct device *);
+void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
+
+#endif /* __QETH_L2_H__ */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index ec8ccdae7aba..908d82529ee9 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include "qeth_core.h"
+#include "qeth_l2.h"
static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
@@ -32,6 +33,11 @@ static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
unsigned long));
static void qeth_l2_set_multicast_list(struct net_device *);
static int qeth_l2_recover(void *);
+static void qeth_bridgeport_query_support(struct qeth_card *card);
+static void qeth_bridge_state_change(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd);
+static void qeth_bridge_host_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd);
static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
@@ -880,6 +886,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ qeth_l2_create_device_attributes(&gdev->dev);
INIT_LIST_HEAD(&card->vid_list);
INIT_LIST_HEAD(&card->mc_list);
card->options.layer2 = 1;
@@ -891,6 +898,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+ qeth_l2_remove_device_attributes(&cgdev->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
@@ -986,6 +994,10 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
rc = -ENODEV;
goto out_remove;
}
+ qeth_bridgeport_query_support(card);
+ if (card->options.sbp.supported_funcs)
+ dev_info(&card->gdev->dev,
+ "The device represents a HiperSockets Bridge Capable Port\n");
qeth_trace_features(card);
if (!card->dev && qeth_l2_setup_netdev(card)) {
@@ -1003,6 +1015,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
} else
card->info.hwtrap = 0;
+ qeth_l2_setup_bridgeport_attrs(card);
+
card->state = CARD_STATE_HARDSETUP;
memset(&card->rx, 0, sizeof(struct qeth_rx));
qeth_print_status_message(card);
@@ -1077,6 +1091,7 @@ out_remove:
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_RECOVER)
card->state = CARD_STATE_RECOVER;
else
@@ -1118,6 +1133,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
rc = (rc2) ? rc2 : rc3;
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_UP)
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
@@ -1180,6 +1196,7 @@ static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
+ qdio_free(CARD_DDEV(card));
}
static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
@@ -1228,6 +1245,26 @@ out:
return rc;
}
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l2_control_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
+{
+ switch (cmd->hdr.command) {
+ case IPA_CMD_SETBRIDGEPORT:
+ if (cmd->data.sbp.hdr.command_code ==
+ IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
+ qeth_bridge_state_change(card, cmd);
+ return 0;
+ } else
+ return 1;
+ case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+ qeth_bridge_host_event(card, cmd);
+ return 0;
+ default:
+ return 1;
+ }
+}
+
struct qeth_discipline qeth_l2_discipline = {
.start_poll = qeth_qdio_start_poll,
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
@@ -1241,6 +1278,7 @@ struct qeth_discipline qeth_l2_discipline = {
.freeze = qeth_l2_pm_suspend,
.thaw = qeth_l2_pm_resume,
.restore = qeth_l2_pm_resume,
+ .control_event_handler = qeth_l2_control_event,
};
EXPORT_SYMBOL_GPL(qeth_l2_discipline);
@@ -1347,6 +1385,594 @@ void qeth_osn_deregister(struct net_device *dev)
}
EXPORT_SYMBOL(qeth_osn_deregister);
+/* SETBRIDGEPORT support, async notifications */
+
+enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
+
+/**
+ * qeth_bridge_emit_host_event() - bridgeport address change notification
+ * @card: qeth_card structure pointer, for udev events.
+ * @evtype: "normal" register/unregister, or abort, or reset. For abort
+ * and reset token and addr_lnid are unused and may be NULL.
+ * @code: event bitmask: high order bit 0x80 value 1 means removal of an
+ * object, 0 - addition of an object.
+ * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC.
+ * @token: "network token" structure identifying physical address of the port.
+ * @addr_lnid: pointer to structure with MAC address and VLAN ID.
+ *
+ * This function is called when registrations and deregistrations are
+ * reported by the hardware, and also when notifications are enabled -
+ * for all currently registered addresses.
+ */
+static void qeth_bridge_emit_host_event(struct qeth_card *card,
+ enum qeth_an_event_type evtype,
+ u8 code, struct net_if_token *token, struct mac_addr_lnid *addr_lnid)
+{
+ char str[7][32];
+ char *env[8];
+ int i = 0;
+
+ switch (evtype) {
+ case anev_reg_unreg:
+ snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
+ (code & IPA_ADDR_CHANGE_CODE_REMOVAL)
+ ? "deregister" : "register");
+ env[i] = str[i]; i++;
+ if (code & IPA_ADDR_CHANGE_CODE_VLANID) {
+ snprintf(str[i], sizeof(str[i]), "VLAN=%d",
+ addr_lnid->lnid);
+ env[i] = str[i]; i++;
+ }
+ if (code & IPA_ADDR_CHANGE_CODE_MACADDR) {
+ snprintf(str[i], sizeof(str[i]), "MAC=%pM6",
+ &addr_lnid->mac);
+ env[i] = str[i]; i++;
+ }
+ snprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
+ token->cssid, token->ssid, token->devnum);
+ env[i] = str[i]; i++;
+ snprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
+ env[i] = str[i]; i++;
+ snprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
+ token->chpid);
+ env[i] = str[i]; i++;
+ snprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x", token->chid);
+ env[i] = str[i]; i++;
+ break;
+ case anev_abort:
+ snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
+ env[i] = str[i]; i++;
+ break;
+ case anev_reset:
+ snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
+ env[i] = str[i]; i++;
+ break;
+ }
+ env[i] = NULL;
+ kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env);
+}
+
+struct qeth_bridge_state_data {
+ struct work_struct worker;
+ struct qeth_card *card;
+ struct qeth_sbp_state_change qports;
+};
+
+static void qeth_bridge_state_change_worker(struct work_struct *work)
+{
+ struct qeth_bridge_state_data *data =
+ container_of(work, struct qeth_bridge_state_data, worker);
+ /* We are only interested in the first entry - local port */
+ struct qeth_sbp_port_entry *entry = &data->qports.entry[0];
+ char env_locrem[32];
+ char env_role[32];
+ char env_state[32];
+ char *env[] = {
+ env_locrem,
+ env_role,
+ env_state,
+ NULL
+ };
+
+ /* Role should not change by itself, but if it did, */
+ /* information from the hardware is authoritative. */
+ mutex_lock(&data->card->conf_mutex);
+ data->card->options.sbp.role = entry->role;
+ mutex_unlock(&data->card->conf_mutex);
+
+ snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
+ snprintf(env_role, sizeof(env_role), "ROLE=%s",
+ (entry->role == QETH_SBP_ROLE_NONE) ? "none" :
+ (entry->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
+ (entry->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
+ "<INVALID>");
+ snprintf(env_state, sizeof(env_state), "STATE=%s",
+ (entry->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
+ (entry->state == QETH_SBP_STATE_STANDBY) ? "standby" :
+ (entry->state == QETH_SBP_STATE_ACTIVE) ? "active" :
+ "<INVALID>");
+ kobject_uevent_env(&data->card->gdev->dev.kobj,
+ KOBJ_CHANGE, env);
+ kfree(data);
+}
+
+static void qeth_bridge_state_change(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
+{
+ struct qeth_sbp_state_change *qports =
+ &cmd->data.sbp.data.state_change;
+ struct qeth_bridge_state_data *data;
+ int extrasize;
+
+ QETH_CARD_TEXT(card, 2, "brstchng");
+ if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
+ QETH_CARD_TEXT_(card, 2, "BPsz%.8d", qports->entry_length);
+ return;
+ }
+ extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries;
+ data = kzalloc(sizeof(struct qeth_bridge_state_data) + extrasize,
+ GFP_ATOMIC);
+ if (!data) {
+ QETH_CARD_TEXT(card, 2, "BPSalloc");
+ return;
+ }
+ INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
+ data->card = card;
+ memcpy(&data->qports, qports,
+ sizeof(struct qeth_sbp_state_change) + extrasize);
+ queue_work(qeth_wq, &data->worker);
+}
+
+struct qeth_bridge_host_data {
+ struct work_struct worker;
+ struct qeth_card *card;
+ struct qeth_ipacmd_addr_change hostevs;
+};
+
+static void qeth_bridge_host_event_worker(struct work_struct *work)
+{
+ struct qeth_bridge_host_data *data =
+ container_of(work, struct qeth_bridge_host_data, worker);
+ int i;
+
+ if (data->hostevs.lost_event_mask) {
+ dev_info(&data->card->gdev->dev,
+"Address notification from the HiperSockets Bridge Port stopped %s (%s)\n",
+ data->card->dev->name,
+ (data->hostevs.lost_event_mask == 0x01)
+ ? "Overflow"
+ : (data->hostevs.lost_event_mask == 0x02)
+ ? "Bridge port state change"
+ : "Unknown reason");
+ mutex_lock(&data->card->conf_mutex);
+ data->card->options.sbp.hostnotification = 0;
+ mutex_unlock(&data->card->conf_mutex);
+ qeth_bridge_emit_host_event(data->card, anev_abort,
+ 0, NULL, NULL);
+ } else
+ for (i = 0; i < data->hostevs.num_entries; i++) {
+ struct qeth_ipacmd_addr_change_entry *entry =
+ &data->hostevs.entry[i];
+ qeth_bridge_emit_host_event(data->card,
+ anev_reg_unreg,
+ entry->change_code,
+ &entry->token, &entry->addr_lnid);
+ }
+ kfree(data);
+}
+
+static void qeth_bridge_host_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
+{
+ struct qeth_ipacmd_addr_change *hostevs =
+ &cmd->data.addrchange;
+ struct qeth_bridge_host_data *data;
+ int extrasize;
+
+ QETH_CARD_TEXT(card, 2, "brhostev");
+ if (cmd->hdr.return_code != 0x0000) {
+ if (cmd->hdr.return_code == 0x0010) {
+ if (hostevs->lost_event_mask == 0x00)
+ hostevs->lost_event_mask = 0xff;
+ } else {
+ QETH_CARD_TEXT_(card, 2, "BPHe%04x",
+ cmd->hdr.return_code);
+ return;
+ }
+ }
+ extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) *
+ hostevs->num_entries;
+ data = kzalloc(sizeof(struct qeth_bridge_host_data) + extrasize,
+ GFP_ATOMIC);
+ if (!data) {
+ QETH_CARD_TEXT(card, 2, "BPHalloc");
+ return;
+ }
+ INIT_WORK(&data->worker, qeth_bridge_host_event_worker);
+ data->card = card;
+ memcpy(&data->hostevs, hostevs,
+ sizeof(struct qeth_ipacmd_addr_change) + extrasize);
+ queue_work(qeth_wq, &data->worker);
+}
+
+/* SETBRIDGEPORT support; sending commands */
+
+struct _qeth_sbp_cbctl {
+ u16 ipa_rc;
+ u16 cmd_rc;
+ union {
+ u32 supported;
+ struct {
+ enum qeth_sbp_roles *role;
+ enum qeth_sbp_states *state;
+ } qports;
+ } data;
+};
+
+/**
+ * qeth_bridgeport_makerc() - derive "traditional" error from hardware codes.
+ * @card: qeth_card structure pointer, for debug messages.
+ * @cbctl: state structure with hardware return codes.
+ * @setcmd: IPA command code
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ */
+static int qeth_bridgeport_makerc(struct qeth_card *card,
+ struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd)
+{
+ int rc;
+
+ switch (cbctl->ipa_rc) {
+ case IPA_RC_SUCCESS:
+ switch (cbctl->cmd_rc) {
+ case 0x0000:
+ rc = 0;
+ break;
+ case 0x0004:
+ rc = -ENOSYS;
+ break;
+ case 0x000C: /* Not configured as bridge Port */
+ rc = -ENODEV; /* maybe not the best code here? */
+ dev_err(&card->gdev->dev,
+ "The HiperSockets device is not configured as a Bridge Port\n");
+ break;
+ case 0x0014: /* Another device is Primary */
+ switch (setcmd) {
+ case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
+ rc = -EEXIST;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets LAN already has a primary Bridge Port\n");
+ break;
+ case IPA_SBP_SET_SECONDARY_BRIDGE_PORT:
+ rc = -EBUSY;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets device is already a primary Bridge Port\n");
+ break;
+ default:
+ rc = -EIO;
+ }
+ break;
+ case 0x0018: /* This device is currently Secondary */
+ rc = -EBUSY;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets device is already a secondary Bridge Port\n");
+ break;
+ case 0x001C: /* Limit for Secondary devices reached */
+ rc = -EEXIST;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets LAN cannot have more secondary Bridge Ports\n");
+ break;
+ case 0x0024: /* This device is currently Primary */
+ rc = -EBUSY;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets device is already a primary Bridge Port\n");
+ break;
+ case 0x0020: /* Not authorized by zManager */
+ rc = -EACCES;
+ dev_err(&card->gdev->dev,
+ "The HiperSockets device is not authorized to be a Bridge Port\n");
+ break;
+ default:
+ rc = -EIO;
+ }
+ break;
+ case IPA_RC_NOTSUPP:
+ rc = -ENOSYS;
+ break;
+ case IPA_RC_UNSUPPORTED_COMMAND:
+ rc = -ENOSYS;
+ break;
+ default:
+ rc = -EIO;
+ }
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc);
+ QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc);
+ }
+ return rc;
+}
+
+static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+ QETH_CARD_TEXT(card, 2, "brqsupcb");
+ cbctl->ipa_rc = cmd->hdr.return_code;
+ cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
+ if ((cbctl->ipa_rc == 0) && (cbctl->cmd_rc == 0)) {
+ cbctl->data.supported =
+ cmd->data.sbp.data.query_cmds_supp.supported_cmds;
+ } else {
+ cbctl->data.supported = 0;
+ }
+ return 0;
+}
+
+/**
+ * qeth_bridgeport_query_support() - store bitmask of supported subfunctions.
+ * @card: qeth_card structure pointer.
+ *
+ * Sets bitmask of supported setbridgeport subfunctions in the qeth_card
+ * strucutre: card->options.sbp.supported_funcs.
+ */
+static void qeth_bridgeport_query_support(struct qeth_card *card)
+{
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ struct _qeth_sbp_cbctl cbctl;
+
+ QETH_CARD_TEXT(card, 2, "brqsuppo");
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.sbp.hdr.cmdlength =
+ sizeof(struct qeth_ipacmd_sbp_hdr) +
+ sizeof(struct qeth_sbp_query_cmds_supp);
+ cmd->data.sbp.hdr.command_code =
+ IPA_SBP_QUERY_COMMANDS_SUPPORTED;
+ cmd->data.sbp.hdr.used_total = 1;
+ cmd->data.sbp.hdr.seq_no = 1;
+ if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
+ (void *)&cbctl) ||
+ qeth_bridgeport_makerc(card, &cbctl,
+ IPA_SBP_QUERY_COMMANDS_SUPPORTED)) {
+ /* non-zero makerc signifies failure, and produce messages */
+ card->options.sbp.role = QETH_SBP_ROLE_NONE;
+ return;
+ }
+ card->options.sbp.supported_funcs = cbctl.data.supported;
+}
+
+static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports;
+ struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+
+ QETH_CARD_TEXT(card, 2, "brqprtcb");
+ cbctl->ipa_rc = cmd->hdr.return_code;
+ cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
+ if ((cbctl->ipa_rc != 0) || (cbctl->cmd_rc != 0))
+ return 0;
+ if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
+ cbctl->cmd_rc = 0xffff;
+ QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
+ return 0;
+ }
+ /* first entry contains the state of the local port */
+ if (qports->num_entries > 0) {
+ if (cbctl->data.qports.role)
+ *cbctl->data.qports.role = qports->entry[0].role;
+ if (cbctl->data.qports.state)
+ *cbctl->data.qports.state = qports->entry[0].state;
+ }
+ return 0;
+}
+
+/**
+ * qeth_bridgeport_query_ports() - query local bridgeport status.
+ * @card: qeth_card structure pointer.
+ * @role: Role of the port: 0-none, 1-primary, 2-secondary.
+ * @state: State of the port: 0-inactive, 1-standby, 2-active.
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ *
+ * 'role' and 'state' are not updated in case of hardware operation failure.
+ */
+int qeth_bridgeport_query_ports(struct qeth_card *card,
+ enum qeth_sbp_roles *role, enum qeth_sbp_states *state)
+{
+ int rc = 0;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ struct _qeth_sbp_cbctl cbctl = {
+ .data = {
+ .qports = {
+ .role = role,
+ .state = state,
+ },
+ },
+ };
+
+ QETH_CARD_TEXT(card, 2, "brqports");
+ if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
+ return -EOPNOTSUPP;
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.sbp.hdr.cmdlength =
+ sizeof(struct qeth_ipacmd_sbp_hdr);
+ cmd->data.sbp.hdr.command_code =
+ IPA_SBP_QUERY_BRIDGE_PORTS;
+ cmd->data.sbp.hdr.used_total = 1;
+ cmd->data.sbp.hdr.seq_no = 1;
+ rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
+ (void *)&cbctl);
+ if (rc)
+ return rc;
+ rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
+ if (rc)
+ return rc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
+
+static int qeth_bridgeport_set_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+ struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+ QETH_CARD_TEXT(card, 2, "brsetrcb");
+ cbctl->ipa_rc = cmd->hdr.return_code;
+ cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
+ return 0;
+}
+
+/**
+ * qeth_bridgeport_setrole() - Assign primary role to the port.
+ * @card: qeth_card structure pointer.
+ * @role: Role to assign.
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ */
+int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
+{
+ int rc = 0;
+ int cmdlength;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ struct _qeth_sbp_cbctl cbctl;
+ enum qeth_ipa_sbp_cmd setcmd;
+
+ QETH_CARD_TEXT(card, 2, "brsetrol");
+ switch (role) {
+ case QETH_SBP_ROLE_NONE:
+ setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
+ cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
+ sizeof(struct qeth_sbp_reset_role);
+ break;
+ case QETH_SBP_ROLE_PRIMARY:
+ setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
+ cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
+ sizeof(struct qeth_sbp_set_primary);
+ break;
+ case QETH_SBP_ROLE_SECONDARY:
+ setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
+ cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
+ sizeof(struct qeth_sbp_set_secondary);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!(card->options.sbp.supported_funcs & setcmd))
+ return -EOPNOTSUPP;
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.sbp.hdr.cmdlength = cmdlength;
+ cmd->data.sbp.hdr.command_code = setcmd;
+ cmd->data.sbp.hdr.used_total = 1;
+ cmd->data.sbp.hdr.seq_no = 1;
+ rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb,
+ (void *)&cbctl);
+ if (rc)
+ return rc;
+ rc = qeth_bridgeport_makerc(card, &cbctl, setcmd);
+ return rc;
+}
+
+/**
+ * qeth_anset_makerc() - derive "traditional" error from hardware codes.
+ * @card: qeth_card structure pointer, for debug messages.
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ */
+static int qeth_anset_makerc(struct qeth_card *card, int pnso_rc, u16 response)
+{
+ int rc;
+
+ if (pnso_rc == 0)
+ switch (response) {
+ case 0x0001:
+ rc = 0;
+ break;
+ case 0x0004:
+ case 0x0100:
+ case 0x0106:
+ rc = -ENOSYS;
+ dev_err(&card->gdev->dev,
+ "Setting address notification failed\n");
+ break;
+ case 0x0107:
+ rc = -EAGAIN;
+ break;
+ default:
+ rc = -EIO;
+ }
+ else
+ rc = -EIO;
+
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "SBPp%04x", pnso_rc);
+ QETH_CARD_TEXT_(card, 2, "SBPr%04x", response);
+ }
+ return rc;
+}
+
+static void qeth_bridgeport_an_set_cb(void *priv,
+ enum qdio_brinfo_entry_type type, void *entry)
+{
+ struct qeth_card *card = (struct qeth_card *)priv;
+ struct qdio_brinfo_entry_l2 *l2entry;
+ u8 code;
+
+ if (type != l2_addr_lnid) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ l2entry = (struct qdio_brinfo_entry_l2 *)entry;
+ code = IPA_ADDR_CHANGE_CODE_MACADDR;
+ if (l2entry->addr_lnid.lnid)
+ code |= IPA_ADDR_CHANGE_CODE_VLANID;
+ qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
+ (struct net_if_token *)&l2entry->nit,
+ (struct mac_addr_lnid *)&l2entry->addr_lnid);
+}
+
+/**
+ * qeth_bridgeport_an_set() - Enable or disable bridgeport address notification
+ * @card: qeth_card structure pointer.
+ * @enable: 0 - disable, non-zero - enable notifications
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ *
+ * On enable, emits a series of address notifications udev events for all
+ * currently registered hosts.
+ */
+int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
+{
+ int rc;
+ u16 response;
+ struct ccw_device *ddev;
+ struct subchannel_id schid;
+
+ if (!card)
+ return -EINVAL;
+ if (!card->options.sbp.supported_funcs)
+ return -EOPNOTSUPP;
+ ddev = CARD_DDEV(card);
+ ccw_device_get_schid(ddev, &schid);
+
+ if (enable) {
+ qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
+ rc = qdio_pnso_brinfo(schid, 1, &response,
+ qeth_bridgeport_an_set_cb, card);
+ } else
+ rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL);
+ return qeth_anset_makerc(card, rc, response);
+}
+EXPORT_SYMBOL_GPL(qeth_bridgeport_an_set);
+
module_init(qeth_l2_init);
module_exit(qeth_l2_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
new file mode 100644
index 000000000000..ae1bc04b8653
--- /dev/null
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright IBM Corp. 2013
+ * Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <asm/ebcdic.h>
+#include "qeth_l2.h"
+
+#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
+
+static int qeth_card_hw_is_reachable(struct qeth_card *card)
+{
+ return (card->state == CARD_STATE_SOFTSETUP) ||
+ (card->state == CARD_STATE_UP);
+}
+
+static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf,
+ int show_state)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ enum qeth_sbp_states state = QETH_SBP_STATE_INACTIVE;
+ int rc = 0;
+ char *word;
+
+ if (!card)
+ return -EINVAL;
+
+ mutex_lock(&card->conf_mutex);
+
+ if (qeth_card_hw_is_reachable(card) &&
+ card->options.sbp.supported_funcs)
+ rc = qeth_bridgeport_query_ports(card,
+ &card->options.sbp.role, &state);
+ if (!rc) {
+ if (show_state)
+ switch (state) {
+ case QETH_SBP_STATE_INACTIVE:
+ word = "inactive"; break;
+ case QETH_SBP_STATE_STANDBY:
+ word = "standby"; break;
+ case QETH_SBP_STATE_ACTIVE:
+ word = "active"; break;
+ default:
+ rc = -EIO;
+ }
+ else
+ switch (card->options.sbp.role) {
+ case QETH_SBP_ROLE_NONE:
+ word = "none"; break;
+ case QETH_SBP_ROLE_PRIMARY:
+ word = "primary"; break;
+ case QETH_SBP_ROLE_SECONDARY:
+ word = "secondary"; break;
+ default:
+ rc = -EIO;
+ }
+ if (rc)
+ QETH_CARD_TEXT_(card, 2, "SBP%02x:%02x",
+ card->options.sbp.role, state);
+ else
+ rc = sprintf(buf, "%s\n", word);
+ }
+
+ mutex_unlock(&card->conf_mutex);
+
+ return rc;
+}
+
+static ssize_t qeth_bridge_port_role_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
+}
+
+static ssize_t qeth_bridge_port_role_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ int rc = 0;
+ enum qeth_sbp_roles role;
+
+ if (!card)
+ return -EINVAL;
+ if (sysfs_streq(buf, "primary"))
+ role = QETH_SBP_ROLE_PRIMARY;
+ else if (sysfs_streq(buf, "secondary"))
+ role = QETH_SBP_ROLE_SECONDARY;
+ else if (sysfs_streq(buf, "none"))
+ role = QETH_SBP_ROLE_NONE;
+ else
+ return -EINVAL;
+
+ mutex_lock(&card->conf_mutex);
+
+ if (qeth_card_hw_is_reachable(card)) {
+ rc = qeth_bridgeport_setrole(card, role);
+ if (!rc)
+ card->options.sbp.role = role;
+ } else
+ card->options.sbp.role = role;
+
+ mutex_unlock(&card->conf_mutex);
+
+ return rc ? rc : count;
+}
+
+static DEVICE_ATTR(bridge_role, 0644, qeth_bridge_port_role_show,
+ qeth_bridge_port_role_store);
+
+static ssize_t qeth_bridge_port_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
+}
+
+static DEVICE_ATTR(bridge_state, 0644, qeth_bridge_port_state_show,
+ NULL);
+
+static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ int enabled;
+
+ if (!card)
+ return -EINVAL;
+
+ mutex_lock(&card->conf_mutex);
+
+ enabled = card->options.sbp.hostnotification;
+
+ mutex_unlock(&card->conf_mutex);
+
+ return sprintf(buf, "%d\n", enabled);
+}
+
+static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ int rc = 0;
+ int enable;
+
+ if (!card)
+ return -EINVAL;
+
+ if (sysfs_streq(buf, "0"))
+ enable = 0;
+ else if (sysfs_streq(buf, "1"))
+ enable = 1;
+ else
+ return -EINVAL;
+
+ mutex_lock(&card->conf_mutex);
+
+ if (qeth_card_hw_is_reachable(card)) {
+ rc = qeth_bridgeport_an_set(card, enable);
+ if (!rc)
+ card->options.sbp.hostnotification = enable;
+ } else
+ card->options.sbp.hostnotification = enable;
+
+ mutex_unlock(&card->conf_mutex);
+
+ return rc ? rc : count;
+}
+
+static DEVICE_ATTR(bridge_hostnotify, 0644,
+ qeth_bridgeport_hostnotification_show,
+ qeth_bridgeport_hostnotification_store);
+
+static struct attribute *qeth_l2_bridgeport_attrs[] = {
+ &dev_attr_bridge_role.attr,
+ &dev_attr_bridge_state.attr,
+ &dev_attr_bridge_hostnotify.attr,
+ NULL,
+};
+
+static struct attribute_group qeth_l2_bridgeport_attr_group = {
+ .attrs = qeth_l2_bridgeport_attrs,
+};
+
+int qeth_l2_create_device_attributes(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &qeth_l2_bridgeport_attr_group);
+}
+
+void qeth_l2_remove_device_attributes(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &qeth_l2_bridgeport_attr_group);
+}
+
+/**
+ * qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online.
+ * @card: qeth_card structure pointer
+ *
+ * Note: this function is called with conf_mutex held by the caller
+ */
+void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
+{
+ int rc;
+
+ if (!card)
+ return;
+ if (!card->options.sbp.supported_funcs)
+ return;
+ if (card->options.sbp.role != QETH_SBP_ROLE_NONE) {
+ /* Conditional to avoid spurious error messages */
+ qeth_bridgeport_setrole(card, card->options.sbp.role);
+ /* Let the callback function refresh the stored role value. */
+ qeth_bridgeport_query_ports(card,
+ &card->options.sbp.role, NULL);
+ }
+ if (card->options.sbp.hostnotification) {
+ rc = qeth_bridgeport_an_set(card, 1);
+ if (rc)
+ card->options.sbp.hostnotification = 0;
+ } else
+ qeth_bridgeport_an_set(card, 0);
+}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index c1b0b2761f8d..3524d34ff694 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3447,6 +3447,7 @@ out_remove:
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_RECOVER)
card->state = CARD_STATE_RECOVER;
else
@@ -3493,6 +3494,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
rc = (rc2) ? rc2 : rc3;
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_UP)
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
@@ -3545,6 +3547,7 @@ static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
+ qdio_free(CARD_DDEV(card));
}
static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
@@ -3593,6 +3596,13 @@ out:
return rc;
}
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l3_control_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
+{
+ return 1;
+}
+
struct qeth_discipline qeth_l3_discipline = {
.start_poll = qeth_qdio_start_poll,
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
@@ -3606,6 +3616,7 @@ struct qeth_discipline qeth_l3_discipline = {
.freeze = qeth_l3_pm_suspend,
.thaw = qeth_l3_pm_resume,
.restore = qeth_l3_pm_resume,
+ .control_event_handler = qeth_l3_control_event,
};
EXPORT_SYMBOL_GPL(qeth_l3_discipline);
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index c1441ed282eb..c7763e482eb2 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -11,7 +11,6 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index fc1339cf91ac..7c71e7b4febf 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -9,7 +9,6 @@
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/major.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/ioport.h> /* request_region */
#include <linux/slab.h>
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index ddbe5a9e713d..af15a2fdab5e 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -19,7 +19,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/ioport.h>
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index d9f268f23774..25c738e9ef19 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -9,7 +9,6 @@
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
-#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 6b4678a7900a..4ccb5d869389 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -507,7 +507,6 @@ static int jsflash_init(void)
}
/* Let us be really paranoid for modifications to probing code. */
- /* extern enum sparc_cpu sparc_cpu_model; */ /* in <asm/system.h> */
if (sparc_cpu_model != sun4m) {
/* We must be on sun4m because we use MMU Bypass ASI. */
return -ENXIO;
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index b0aae0536d58..b7acafc85099 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/of.h>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 1f02003ea08d..c8bd092fc945 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -204,9 +204,9 @@ config SCSI_MULTI_LUN
Some devices support more than one LUN (Logical Unit Number) in order
to allow access to several media, e.g. CD jukebox, USB card reader,
mobile phone in mass storage mode. This option forces the kernel to
- probe for all LUNs by default. This setting can be overriden by
+ probe for all LUNs by default. This setting can be overridden by
max_luns boot/module parameter. Note that this option does not affect
- devices conforming to SCSI-3 or higher as they can explicitely report
+ devices conforming to SCSI-3 or higher as they can explicitly report
their number of LUNs. It is safe to say Y here unless you have one of
those rare devices which reacts in an unexpected way when probed for
multiple LUNs.
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 1e9d6ad9302b..bcd223868227 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -584,7 +584,7 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
NCR5380_setup(instance);
for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
- if ((mask & possible) && (request_irq(i, &probe_intr, IRQF_DISABLED, "NCR-probe", NULL) == 0))
+ if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
trying_irqs |= mask;
timeout = jiffies + (250 * HZ / 1000);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 9323d058706b..eaaf8705a5f4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 30200
+# define AAC_DRIVER_BUILD 30300
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index dada38aeacc0..5c6a8703f535 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -480,7 +480,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
{
- u32 var;
+ u32 var = 0;
if (!(dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
@@ -500,13 +500,14 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
if (bled && (bled != -ETIMEDOUT))
return -EINVAL;
}
- if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */
+ if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */
rx_writel(dev, MUnit.reserved2, 3);
msleep(5000); /* Delay 5 seconds */
var = 0x00000001;
}
- if (var != 0x00000001)
+ if (bled && (var != 0x00000001))
return -EINVAL;
+ ssleep(5);
if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
return -ENODEV;
if (startup_timeout < 300)
@@ -646,7 +647,7 @@ int _aac_rx_init(struct aac_dev *dev)
dev->sync_mode = 0; /* sync. mode not supported */
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+ IRQF_SHARED, "aacraid", dev) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 2244f315f33b..e66477c98240 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -387,8 +387,7 @@ int aac_sa_init(struct aac_dev *dev)
goto error_irq;
dev->sync_mode = 0; /* sync. mode not supported */
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED|IRQF_DISABLED,
- "aacraid", (void *)dev ) < 0) {
+ IRQF_SHARED, "aacraid", (void *)dev) < 0) {
printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
name, instance);
goto error_iounmap;
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 7e17107643d4..9c65aed26212 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -647,7 +647,7 @@ int aac_src_init(struct aac_dev *dev)
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+ IRQF_SHARED, "aacraid", dev) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
@@ -804,7 +804,7 @@ int aac_srcv_init(struct aac_dev *dev)
goto error_iounmap;
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
- IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+ IRQF_SHARED, "aacraid", dev) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 3f7b6fee0a74..e86eb6a921fc 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -857,7 +857,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
SETPORT(SIMODE0, 0);
SETPORT(SIMODE1, 0);
- if( request_irq(shpnt->irq, swintr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) {
+ if (request_irq(shpnt->irq, swintr, IRQF_SHARED, "aha152x", shpnt)) {
printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq);
goto out_host_put;
}
@@ -891,7 +891,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
SETPORT(SSTAT0, 0x7f);
SETPORT(SSTAT1, 0xef);
- if ( request_irq(shpnt->irq, intr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) {
+ if (request_irq(shpnt->irq, intr, IRQF_SHARED, "aha152x", shpnt)) {
printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq);
goto out_host_put;
}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
index 9df9e2ce3538..8373447bd7d3 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
@@ -209,7 +209,6 @@ struct instruction {
#define AIC_OP_JC16 0x9105
#define AIC_OP_JNC16 0x9205
#define AIC_OP_CALL16 0x9305
-#define AIC_OP_CALL16 0x9305
/* Page extension is low three bits of second opcode byte. */
#define AIC_OP_JMPF 0xA005
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 4f6a30b8e5f9..652b41b4ddbd 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2500,16 +2500,15 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
{
uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
- dma_addr_t dma_coherent_handle;
+
/*
********************************************************************
** here we need to tell iop 331 our freeccb.HighPart
** if freeccb.HighPart is not zero
********************************************************************
*/
- dma_coherent_handle = acb->dma_coherent_handle;
- cdb_phyaddr = (uint32_t)(dma_coherent_handle);
- cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
+ cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle);
+ cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);
acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
/*
***********************************************************************
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 09ba1869d366..059ff477a398 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2971,7 +2971,7 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
ec->irqaddr = ashost->fast + INT_REG;
ec->irqmask = 0x0a;
- ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost);
+ ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost);
if (ret) {
printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n",
host->host_no, ashost->scsi.irq, ret);
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index b679778376c5..f8e060900052 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -262,7 +262,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,
goto out_unmap;
}
- ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED,
+ ret = request_irq(host->irq, cumanascsi_intr, 0,
"CumanaSCSI-1", host);
if (ret) {
printk("scsi%d: IRQ%d not free: %d\n",
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 58915f29055b..abc66f5263ec 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -431,7 +431,7 @@ static int cumanascsi2_probe(struct expansion_card *ec,
goto out_free;
ret = request_irq(ec->irq, cumanascsi_2_intr,
- IRQF_DISABLED, "cumanascsi2", info);
+ 0, "cumanascsi2", info);
if (ret) {
printk("scsi%d: IRQ%d not free: %d\n",
host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index abc9593615e9..5e1b73e1b743 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -358,7 +358,7 @@ static int powertecscsi_probe(struct expansion_card *ec,
goto out_free;
ret = request_irq(ec->irq, powertecscsi_intr,
- IRQF_DISABLED, "powertec", info);
+ 0, "powertec", info);
if (ret) {
printk("scsi%d: IRQ%d not free: %d\n",
host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a3e6c8a3ff0f..296c936cc03c 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -90,6 +90,7 @@
#include <linux/init.h>
#include <linux/nvram.h>
#include <linux/bitops.h>
+#include <linux/wait.h>
#include <asm/setup.h>
#include <asm/atarihw.h>
@@ -549,8 +550,10 @@ static void falcon_get_lock(void)
local_irq_save(flags);
- while (!in_irq() && falcon_got_lock && stdma_others_waiting())
- sleep_on(&falcon_fairness_wait);
+ wait_event_cmd(falcon_fairness_wait,
+ in_interrupt() || !falcon_got_lock || !stdma_others_waiting(),
+ local_irq_restore(flags),
+ local_irq_save(flags));
while (!falcon_got_lock) {
if (in_irq())
@@ -562,7 +565,10 @@ static void falcon_get_lock(void)
falcon_trying_lock = 0;
wake_up(&falcon_try_wait);
} else {
- sleep_on(&falcon_try_wait);
+ wait_event_cmd(falcon_try_wait,
+ falcon_got_lock && !falcon_trying_lock,
+ local_irq_restore(flags),
+ local_irq_save(flags));
}
}
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 2e28f6c419fe..1bfb0bd01198 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -98,6 +98,14 @@ struct be_mcc_obj {
struct be_queue_info cq;
};
+struct beiscsi_mcc_tag_state {
+#define MCC_TAG_STATE_COMPLETED 0x00
+#define MCC_TAG_STATE_RUNNING 0x01
+#define MCC_TAG_STATE_TIMEOUT 0x02
+ uint8_t tag_state;
+ struct be_dma_mem tag_mem_state;
+};
+
struct be_ctrl_info {
u8 __iomem *csr;
u8 __iomem *db; /* Door Bell */
@@ -122,6 +130,8 @@ struct be_ctrl_info {
unsigned short mcc_alloc_index;
unsigned short mcc_free_index;
unsigned int mcc_tag_available;
+
+ struct beiscsi_mcc_tag_state ptag_state[MAX_MCC_CMD + 1];
};
#include "be_cmds.h"
@@ -129,6 +139,7 @@ struct be_ctrl_info {
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
#define mcc_timeout 120000 /* 12s timeout */
+#define BEISCSI_LOGOUT_SYNC_DELAY 250
/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 3338391b64de..1432ed5e9fc6 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -138,7 +138,7 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
* @phba: Driver private structure
* @tag: Tag for the MBX Command
* @wrb: the WRB used for the MBX Command
- * @cmd_hdr: IOCTL Hdr for the MBX Cmd
+ * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
*
* Waits for MBX completion with the passed TAG.
*
@@ -148,21 +148,26 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
**/
int beiscsi_mccq_compl(struct beiscsi_hba *phba,
uint32_t tag, struct be_mcc_wrb **wrb,
- void *cmd_hdr)
+ struct be_dma_mem *mbx_cmd_mem)
{
int rc = 0;
uint32_t mcc_tag_response;
uint16_t status = 0, addl_status = 0, wrb_num = 0;
struct be_mcc_wrb *temp_wrb;
- struct be_cmd_req_hdr *ioctl_hdr;
- struct be_cmd_resp_hdr *ioctl_resp_hdr;
+ struct be_cmd_req_hdr *mbx_hdr;
+ struct be_cmd_resp_hdr *mbx_resp_hdr;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
if (beiscsi_error(phba)) {
free_mcc_tag(&phba->ctrl, tag);
- return -EIO;
+ return -EPERM;
}
+ /* Set MBX Tag state to Active */
+ spin_lock(&phba->ctrl.mbox_lock);
+ phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING;
+ spin_unlock(&phba->ctrl.mbox_lock);
+
/* wait for the mccq completion */
rc = wait_event_interruptible_timeout(
phba->ctrl.mcc_wait[tag],
@@ -171,56 +176,71 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
BEISCSI_HOST_MBX_TIMEOUT));
if (rc <= 0) {
+ struct be_dma_mem *tag_mem;
+ /* Set MBX Tag state to timeout */
+ spin_lock(&phba->ctrl.mbox_lock);
+ phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT;
+ spin_unlock(&phba->ctrl.mbox_lock);
+
+ /* Store resource addr to be freed later */
+ tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
+ if (mbx_cmd_mem) {
+ tag_mem->size = mbx_cmd_mem->size;
+ tag_mem->va = mbx_cmd_mem->va;
+ tag_mem->dma = mbx_cmd_mem->dma;
+ } else
+ tag_mem->size = 0;
+
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
"BC_%d : MBX Cmd Completion timed out\n");
- rc = -EBUSY;
-
- /* decrement the mccq used count */
- atomic_dec(&phba->ctrl.mcc_obj.q.used);
-
- goto release_mcc_tag;
- } else
+ return -EBUSY;
+ } else {
rc = 0;
+ /* Set MBX Tag state to completed */
+ spin_lock(&phba->ctrl.mbox_lock);
+ phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
+ spin_unlock(&phba->ctrl.mbox_lock);
+ }
mcc_tag_response = phba->ctrl.mcc_numtag[tag];
status = (mcc_tag_response & CQE_STATUS_MASK);
addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
CQE_STATUS_ADDL_SHIFT);
- if (cmd_hdr) {
- ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr;
+ if (mbx_cmd_mem) {
+ mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
} else {
wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
CQE_STATUS_WRB_SHIFT;
temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
- ioctl_hdr = embedded_payload(temp_wrb);
+ mbx_hdr = embedded_payload(temp_wrb);
if (wrb)
*wrb = temp_wrb;
}
if (status || addl_status) {
- beiscsi_log(phba, KERN_ERR,
+ beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
"BC_%d : MBX Cmd Failed for "
"Subsys : %d Opcode : %d with "
"Status : %d and Extd_Status : %d\n",
- ioctl_hdr->subsystem,
- ioctl_hdr->opcode,
+ mbx_hdr->subsystem,
+ mbx_hdr->opcode,
status, addl_status);
if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
- ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
+ mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
"BC_%d : Insufficent Buffer Error "
"Resp_Len : %d Actual_Resp_Len : %d\n",
- ioctl_resp_hdr->response_length,
- ioctl_resp_hdr->actual_resp_len);
+ mbx_resp_hdr->response_length,
+ mbx_resp_hdr->actual_resp_len);
rc = -EAGAIN;
goto release_mcc_tag;
@@ -319,6 +339,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl)
{
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
u16 compl_status, extd_status;
unsigned short tag;
@@ -338,7 +359,32 @@ int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
- wake_up_interruptible(&ctrl->mcc_wait[tag]);
+
+ if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) {
+ wake_up_interruptible(&ctrl->mcc_wait[tag]);
+ } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) {
+ struct be_dma_mem *tag_mem;
+ tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Completion for timeout Command "
+ "from FW\n");
+ /* Check if memory needs to be freed */
+ if (tag_mem->size)
+ pci_free_consistent(ctrl->pdev, tag_mem->size,
+ tag_mem->va, tag_mem->dma);
+
+ /* Change tag state */
+ spin_lock(&phba->ctrl.mbox_lock);
+ ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
+ spin_unlock(&phba->ctrl.mbox_lock);
+
+ /* Free MCC Tag */
+ free_mcc_tag(ctrl, tag);
+ }
+
return 0;
}
@@ -354,8 +400,23 @@ static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
return NULL;
}
-static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
+/**
+ * be2iscsi_fail_session(): Closing session with appropriate error
+ * @cls_session: ptr to session
+ *
+ * Depending on adapter state appropriate error flag is passed.
+ **/
+void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ uint32_t iscsi_err_flag;
+
+ if (phba->state & BE_ADAPTER_STATE_SHUTDOWN)
+ iscsi_err_flag = ISCSI_ERR_INVALID_HOST;
+ else
+ iscsi_err_flag = ISCSI_ERR_CONN_FAILED;
+
iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
}
@@ -386,18 +447,6 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
}
}
-static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
- u16 num_popped)
-{
- u32 val = 0;
- val |= qid & DB_CQ_RING_ID_MASK;
- if (arm)
- val |= 1 << DB_CQ_REARM_SHIFT;
- val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
- iowrite32(val, phba->db_va + DB_CQ_OFFSET);
-}
-
-
int beiscsi_process_mcc(struct beiscsi_hba *phba)
{
struct be_mcc_compl *compl;
@@ -428,7 +477,7 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
}
if (num)
- beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
+ hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
return status;
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 627ebbe0172c..7cf7f99ee442 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -103,7 +103,7 @@ struct be_mcc_compl {
/********** MCC door bell ************/
#define DB_MCCQ_OFFSET 0x140
-#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+#define DB_MCCQ_RING_ID_MASK 0xFFFF /* bits 0 - 15 */
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */
@@ -709,7 +709,8 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
int beiscsi_mccq_compl(struct beiscsi_hba *phba,
- uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
+ uint32_t tag, struct be_mcc_wrb **wrb,
+ struct be_dma_mem *mbx_cmd_mem);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
@@ -1017,8 +1018,8 @@ struct be_mcc_wrb_context {
int *users_final_status;
} __packed;
-#define DB_DEF_PDU_RING_ID_MASK 0x3FF /* bits 0 - 9 */
-#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 0 - 9 */
+#define DB_DEF_PDU_RING_ID_MASK 0x3FFF /* bits 0 - 13 */
+#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 16 - 29 */
#define DB_DEF_PDU_REARM_SHIFT 14
#define DB_DEF_PDU_EVENT_SHIFT 15
#define DB_DEF_PDU_CQPROC_SHIFT 16
@@ -1317,4 +1318,5 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
u8 subsystem, u8 opcode, int cmd_len);
+void be2iscsi_fail_session(struct iscsi_cls_session *cls_session);
#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 889066d9d6fb..a3df43324c98 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -793,7 +793,7 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
break;
case BE2ISCSI_LINK_SPEED_100MBPS:
- ihost->port_speed = BE2ISCSI_LINK_SPEED_100MBPS;
+ ihost->port_speed = ISCSI_PORT_SPEED_100MBPS;
break;
case BE2ISCSI_LINK_SPEED_1GBPS:
ihost->port_speed = ISCSI_PORT_SPEED_1GBPS;
@@ -1153,16 +1153,18 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
return -EAGAIN;
}
- ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
+ ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BS_%d : mgmt_open_connection Failed");
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
+ if (ret != -EBUSY)
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+
beiscsi_free_ep(beiscsi_ep);
- return -EBUSY;
+ return ret;
}
ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
@@ -1359,6 +1361,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
beiscsi_mccq_compl(phba, tag, NULL, NULL);
beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
free_ep:
+ msleep(BEISCSI_LOGOUT_SYNC_DELAY);
beiscsi_free_ep(beiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1f375051483a..0d822297aa80 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -228,24 +228,25 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
struct invalidate_command_table *inv_tbl;
struct be_dma_mem nonemb_cmd;
unsigned int cid, tag, num_invalidate;
+ int rc;
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (!aborted_task || !aborted_task->sc) {
/* we raced */
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return SUCCESS;
}
aborted_io_task = aborted_task->dd_data;
if (!aborted_io_task->scsi_cmnd) {
/* raced or invalid command */
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return SUCCESS;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/* Invalidate WRB Posted for this Task */
AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
aborted_io_task->pwrb_handle->pwrb,
@@ -285,9 +286,11 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return FAILED;
}
- beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
+ rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ if (rc != -EBUSY)
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+
return iscsi_eh_abort(sc);
}
@@ -303,13 +306,14 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
struct invalidate_command_table *inv_tbl;
struct be_dma_mem nonemb_cmd;
unsigned int cid, tag, i, num_invalidate;
+ int rc;
/* invalidate iocbs */
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return FAILED;
}
conn = session->leadconn;
@@ -325,7 +329,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
continue;
- if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
+ if (sc->device->lun != abrt_task->sc->device->lun)
continue;
/* Invalidate WRB Posted for this Task */
@@ -338,7 +342,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
num_invalidate++;
inv_tbl++;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
inv_tbl = phba->inv_tbl;
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -363,9 +367,10 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
return FAILED;
}
- beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
+ rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ if (rc != -EBUSY)
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
return iscsi_eh_device_reset(sc);
}
@@ -674,8 +679,19 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
}
pci_set_master(pcidev);
- if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
- ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
+ ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
+ pci_disable_device(pcidev);
+ return ret;
+ } else {
+ ret = pci_set_consistent_dma_mask(pcidev,
+ DMA_BIT_MASK(32));
+ }
+ } else {
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
if (ret) {
dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
pci_disable_device(pcidev);
@@ -804,14 +820,23 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba,
unsigned char rearm, unsigned char event)
{
u32 val = 0;
- val |= id & DB_EQ_RING_ID_MASK;
+
if (rearm)
val |= 1 << DB_EQ_REARM_SHIFT;
if (clr_interrupt)
val |= 1 << DB_EQ_CLR_SHIFT;
if (event)
val |= 1 << DB_EQ_EVNT_SHIFT;
+
val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
+ /* Setting lower order EQ_ID Bits */
+ val |= (id & DB_EQ_RING_ID_LOW_MASK);
+
+ /* Setting Higher order EQ_ID Bits */
+ val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
+ DB_EQ_RING_ID_HIGH_MASK)
+ << DB_EQ_HIGH_SET_SHIFT);
+
iowrite32(val, phba->db_va + DB_EQ_OFFSET);
}
@@ -873,7 +898,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
struct be_queue_info *cq;
unsigned int num_eq_processed;
struct be_eq_obj *pbe_eq;
- unsigned long flags;
pbe_eq = dev_id;
eq = &pbe_eq->q;
@@ -882,31 +906,15 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
phba = pbe_eq->phba;
num_eq_processed = 0;
- if (blk_iopoll_enabled) {
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
- blk_iopoll_sched(&pbe_eq->iopoll);
-
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- num_eq_processed++;
- }
- } else {
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- num_eq_processed++;
- }
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
+ blk_iopoll_sched(&pbe_eq->iopoll);
- if (pbe_eq->todo_cq)
- queue_work(phba->wq, &pbe_eq->work_cqs);
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ num_eq_processed++;
}
if (num_eq_processed)
@@ -927,7 +935,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)
struct hwi_context_memory *phwi_context;
struct be_eq_entry *eqe = NULL;
struct be_queue_info *eq;
- struct be_queue_info *cq;
struct be_queue_info *mcc;
unsigned long flags, index;
unsigned int num_mcceq_processed, num_ioeq_processed;
@@ -953,72 +960,40 @@ static irqreturn_t be_isr(int irq, void *dev_id)
num_ioeq_processed = 0;
num_mcceq_processed = 0;
- if (blk_iopoll_enabled) {
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- if (((eqe->dw[offsetof(struct amap_eq_entry,
- resource_id) / 32] &
- EQE_RESID_MASK) >> 16) == mcc->id) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_mcc_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- num_mcceq_processed++;
- } else {
- if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
- blk_iopoll_sched(&pbe_eq->iopoll);
- num_ioeq_processed++;
- }
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- }
- if (num_ioeq_processed || num_mcceq_processed) {
- if (pbe_eq->todo_mcc_cq)
- queue_work(phba->wq, &pbe_eq->work_cqs);
-
- if ((num_mcceq_processed) && (!num_ioeq_processed))
- hwi_ring_eq_db(phba, eq->id, 0,
- (num_ioeq_processed +
- num_mcceq_processed) , 1, 1);
- else
- hwi_ring_eq_db(phba, eq->id, 0,
- (num_ioeq_processed +
- num_mcceq_processed), 0, 1);
-
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
- } else {
- cq = &phwi_context->be_cq[0];
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
-
- if (((eqe->dw[offsetof(struct amap_eq_entry,
- resource_id) / 32] &
- EQE_RESID_MASK) >> 16) != cq->id) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_mcc_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- } else {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- }
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ if (((eqe->dw[offsetof(struct amap_eq_entry,
+ resource_id) / 32] &
+ EQE_RESID_MASK) >> 16) == mcc->id) {
+ spin_lock_irqsave(&phba->isr_lock, flags);
+ pbe_eq->todo_mcc_cq = true;
+ spin_unlock_irqrestore(&phba->isr_lock, flags);
+ num_mcceq_processed++;
+ } else {
+ if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
+ blk_iopoll_sched(&pbe_eq->iopoll);
num_ioeq_processed++;
}
- if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ }
+ if (num_ioeq_processed || num_mcceq_processed) {
+ if (pbe_eq->todo_mcc_cq)
queue_work(phba->wq, &pbe_eq->work_cqs);
- if (num_ioeq_processed) {
+ if ((num_mcceq_processed) && (!num_ioeq_processed))
hwi_ring_eq_db(phba, eq->id, 0,
- num_ioeq_processed, 1, 1);
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
- }
+ (num_ioeq_processed +
+ num_mcceq_processed) , 1, 1);
+ else
+ hwi_ring_eq_db(phba, eq->id, 0,
+ (num_ioeq_processed +
+ num_mcceq_processed), 0, 1);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
}
static int beiscsi_init_irqs(struct beiscsi_hba *phba)
@@ -1093,15 +1068,25 @@ free_msix_irqs:
return ret;
}
-static void hwi_ring_cq_db(struct beiscsi_hba *phba,
+void hwi_ring_cq_db(struct beiscsi_hba *phba,
unsigned int id, unsigned int num_processed,
unsigned char rearm, unsigned char event)
{
u32 val = 0;
- val |= id & DB_CQ_RING_ID_MASK;
+
if (rearm)
val |= 1 << DB_CQ_REARM_SHIFT;
+
val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
+
+ /* Setting lower order CQ_ID Bits */
+ val |= (id & DB_CQ_RING_ID_LOW_MASK);
+
+ /* Setting Higher order CQ_ID Bits */
+ val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
+ DB_CQ_RING_ID_HIGH_MASK)
+ << DB_CQ_HIGH_SET_SHIFT);
+
iowrite32(val, phba->db_va + DB_CQ_OFFSET);
}
@@ -1150,9 +1135,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
return 1;
}
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->back_lock);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->back_lock);
return 0;
}
@@ -1342,8 +1327,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
resid = csol_cqe->res_cnt;
if (!task->sc) {
- if (io_task->scsi_cmnd)
+ if (io_task->scsi_cmnd) {
scsi_dma_unmap(io_task->scsi_cmnd);
+ io_task->scsi_cmnd = NULL;
+ }
return;
}
@@ -1380,6 +1367,7 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
conn->rxdata_octets += resid;
unmap:
scsi_dma_unmap(io_task->scsi_cmnd);
+ io_task->scsi_cmnd = NULL;
iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
}
@@ -1568,7 +1556,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
pwrb = pwrb_handle->pwrb;
type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->back_lock);
switch (type) {
case HWH_TYPE_IO:
case HWH_TYPE_IO_RD:
@@ -1607,7 +1595,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
break;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->back_lock);
}
static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
@@ -4360,12 +4348,16 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
goto boot_freemem;
}
- ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
+ ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BM_%d : beiscsi_get_session_info Failed");
- goto boot_freemem;
+
+ if (ret != -EBUSY)
+ goto boot_freemem;
+ else
+ return ret;
}
session_resp = nonemb_cmd.va ;
@@ -4625,6 +4617,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
spin_unlock(&phba->io_sgl_lock);
io_task->psgl_handle = NULL;
}
+
+ if (io_task->scsi_cmnd) {
+ scsi_dma_unmap(io_task->scsi_cmnd);
+ io_task->scsi_cmnd = NULL;
+ }
} else {
if (!beiscsi_conn->login_in_progress)
beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
@@ -4646,9 +4643,9 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
* login/startup related tasks.
*/
beiscsi_conn->login_in_progress = 0;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->back_lock);
beiscsi_cleanup_task(task);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->back_lock);
pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
@@ -5216,11 +5213,10 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
}
pci_disable_msix(phba->pcidev);
- if (blk_iopoll_enabled)
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_disable(&pbe_eq->iopoll);
- }
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_disable(&pbe_eq->iopoll);
+ }
if (unload_state == BEISCSI_CLEAN_UNLOAD) {
destroy_workqueue(phba->wq);
@@ -5273,6 +5269,8 @@ static void beiscsi_shutdown(struct pci_dev *pcidev)
return;
}
+ phba->state = BE_ADAPTER_STATE_SHUTDOWN;
+ iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session);
beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
pci_disable_device(pcidev);
}
@@ -5429,32 +5427,18 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- if (blk_iopoll_enabled) {
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
- be_iopoll);
- blk_iopoll_enable(&pbe_eq->iopoll);
- }
-
- i = (phba->msix_enabled) ? i : 0;
- /* Work item for MCC handling */
+ for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
- } else {
- if (phba->msix_enabled) {
- for (i = 0; i <= phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
- } else {
- pbe_eq = &phwi_context->be_eq[0];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
+ blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+ be_iopoll);
+ blk_iopoll_enable(&pbe_eq->iopoll);
}
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+
ret = beiscsi_init_irqs(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5594,6 +5578,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_tag[i] = i + 1;
phba->ctrl.mcc_numtag[i + 1] = 0;
phba->ctrl.mcc_tag_available++;
+ memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
+ sizeof(struct beiscsi_mcc_tag_state));
}
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
@@ -5614,32 +5600,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- if (blk_iopoll_enabled) {
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
- be_iopoll);
- blk_iopoll_enable(&pbe_eq->iopoll);
- }
-
- i = (phba->msix_enabled) ? i : 0;
- /* Work item for MCC handling */
+ for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
- } else {
- if (phba->msix_enabled) {
- for (i = 0; i <= phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
- } else {
- pbe_eq = &phwi_context->be_eq[0];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
+ blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+ be_iopoll);
+ blk_iopoll_enable(&pbe_eq->iopoll);
}
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+
ret = beiscsi_init_irqs(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5668,11 +5640,10 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
free_blkenbld:
destroy_workqueue(phba->wq);
- if (blk_iopoll_enabled)
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_disable(&pbe_eq->iopoll);
- }
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_disable(&pbe_eq->iopoll);
+ }
free_twq:
beiscsi_clean_port(phba);
beiscsi_free_mem(phba);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 31fa27b4a9b2..9380b55bdeaf 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "10.0.659.0"
+#define BUILD_STR "10.2.125.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -97,9 +97,14 @@
#define INVALID_SESS_HANDLE 0xFFFFFFFF
+/**
+ * Adapter States
+ **/
#define BE_ADAPTER_LINK_UP 0x001
#define BE_ADAPTER_LINK_DOWN 0x002
#define BE_ADAPTER_PCI_ERR 0x004
+#define BE_ADAPTER_STATE_SHUTDOWN 0x008
+
#define BEISCSI_CLEAN_UNLOAD 0x01
#define BEISCSI_EEH_UNLOAD 0x02
@@ -135,11 +140,15 @@
#define DB_RXULP0_OFFSET 0xA0
/********* Event Q door bell *************/
#define DB_EQ_OFFSET DB_CQ_OFFSET
-#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
+#define DB_EQ_RING_ID_LOW_MASK 0x1FF /* bits 0 - 8 */
/* Clear the interrupt for this eq */
#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
/* Must be 1 */
#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
+/* Higher Order EQ_ID bit */
+#define DB_EQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */
+#define DB_EQ_HIGH_SET_SHIFT 11
+#define DB_EQ_HIGH_FEILD_SHIFT 9
/* Number of event entries processed */
#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
@@ -147,7 +156,12 @@
/********* Compl Q door bell *************/
#define DB_CQ_OFFSET 0x120
-#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+#define DB_CQ_RING_ID_LOW_MASK 0x3FF /* bits 0 - 9 */
+/* Higher Order CQ_ID bit */
+#define DB_CQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */
+#define DB_CQ_HIGH_SET_SHIFT 11
+#define DB_CQ_HIGH_FEILD_SHIFT 10
+
/* Number of event entries processed */
#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
@@ -821,6 +835,9 @@ void beiscsi_process_all_cqs(struct work_struct *work);
void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
struct iscsi_task *task);
+void hwi_ring_cq_db(struct beiscsi_hba *phba,
+ unsigned int id, unsigned int num_processed,
+ unsigned char rearm, unsigned char event);
static inline bool beiscsi_error(struct beiscsi_hba *phba)
{
return phba->ue_detected || phba->fw_timeout;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index b2fcac78feaa..088bdf752cfa 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -828,22 +828,25 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
- rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
+ rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd);
+
+ if (resp_buf)
+ memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
+
if (rc) {
- /* Check if the IOCTL needs to be re-issued */
+ /* Check if the MBX Cmd needs to be re-issued */
if (rc == -EAGAIN)
return rc;
- beiscsi_log(phba, KERN_ERR,
+ beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
- goto free_cmd;
+ if (rc != -EBUSY)
+ goto free_cmd;
+ else
+ return rc;
}
-
- if (resp_buf)
- memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
-
free_cmd:
pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
nonemb_cmd->va, nonemb_cmd->dma);
@@ -1348,7 +1351,6 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
{
int rc;
unsigned int tag;
- struct be_mcc_wrb *wrb = NULL;
tag = be_cmd_set_vlan(phba, vlan_tag);
if (!tag) {
@@ -1358,7 +1360,7 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
return -EBUSY;
}
- rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 65180e15de6e..315d6d6dcfc8 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3878,7 +3878,7 @@ bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
bfa_trc(sfp, sfp->data_valid);
if (sfp->data_valid) {
u32 size = sizeof(struct sfp_mem_s);
- u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
+ u8 *des = (u8 *) &(sfp->sfpmem);
memcpy(des, sfp->dbuf_kva, size);
}
/*
@@ -6851,7 +6851,7 @@ static u32
bfa_flash_status_read(void __iomem *pci_bar)
{
union bfa_flash_dev_status_reg_u dev_status;
- u32 status;
+ int status;
u32 ret_status;
int i;
@@ -6899,7 +6899,7 @@ static u32
bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
char *buf)
{
- u32 status;
+ int status;
/*
* len must be mutiple of 4 and not exceeding fifo size
@@ -7006,7 +7006,7 @@ bfa_flash_sem_get(void __iomem *bar)
while (!bfa_raw_sem_get(bar)) {
if (--n <= 0)
return BFA_STATUS_BADFLASH;
- udelay(10000);
+ mdelay(10);
}
return BFA_STATUS_OK;
}
@@ -7021,7 +7021,8 @@ bfa_status_t
bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
u32 len)
{
- u32 n, status;
+ u32 n;
+ int status;
u32 off, l, s, residue, fifo_sz;
residue = len;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 157f6044a9bb..8994fb857ee9 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2304,8 +2304,10 @@ bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
spin_lock_irqsave(&bfad->bfad_lock, flags);
- if (bfa_fcport_is_dport(&bfad->bfa))
+ if (bfa_fcport_is_dport(&bfad->bfa)) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return BFA_STATUS_DPORT_ERR;
+ }
if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 9967f9c14851..f067332bf763 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -73,9 +73,14 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
break;
- case BFI_IOIM_STS_ABORTED:
case BFI_IOIM_STS_TIMEDOUT:
+ host_status = DID_TIME_OUT;
+ cmnd->result = ScsiResult(host_status, 0);
+ break;
case BFI_IOIM_STS_PATHTOV:
+ host_status = DID_TRANSPORT_DISRUPTED;
+ cmnd->result = ScsiResult(host_status, 0);
+ break;
default:
host_status = DID_ERROR;
cmnd->result = ScsiResult(host_status, 0);
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 1ebf3fb683e6..6a976657b475 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "2.4.1"
+#define BNX2FC_VERSION "2.4.2"
#define PFX "bnx2fc: "
@@ -367,6 +367,7 @@ struct bnx2fc_rport {
atomic_t num_active_ios;
u32 flush_in_prog;
unsigned long timestamp;
+ unsigned long retry_delay_timestamp;
struct list_head free_task_list;
struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
struct list_head active_cmd_queue;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 9b948505d118..6287f6a8b79d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Sep 17, 2013"
+#define DRV_MODULE_RELDATE "Dec 11, 2013"
static char version[] =
@@ -850,6 +850,9 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
__bnx2fc_destroy(interface);
}
mutex_unlock(&bnx2fc_dev_lock);
+
+ /* Ensure ALL destroy work has been completed before return */
+ flush_workqueue(bnx2fc_wq);
return;
default:
@@ -2389,6 +2392,9 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
__bnx2fc_destroy(interface);
mutex_unlock(&bnx2fc_dev_lock);
+ /* Ensure ALL destroy work has been completed before return */
+ flush_workqueue(bnx2fc_wq);
+
bnx2fc_ulp_stop(hba);
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index ed880891cb7c..32a5e0a2a669 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -594,13 +594,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
mp_req->mp_resp_bd = NULL;
}
if (mp_req->req_buf) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->req_buf,
mp_req->req_buf_dma);
mp_req->req_buf = NULL;
}
if (mp_req->resp_buf) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->resp_buf,
mp_req->resp_buf_dma);
mp_req->resp_buf = NULL;
@@ -622,7 +622,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req->req_len = sizeof(struct fcp_cmnd);
io_req->data_xfer_len = mp_req->req_len;
- mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->req_buf_dma,
GFP_ATOMIC);
if (!mp_req->req_buf) {
@@ -631,7 +631,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
return FAILED;
}
- mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->resp_buf_dma,
GFP_ATOMIC);
if (!mp_req->resp_buf) {
@@ -639,8 +639,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
bnx2fc_free_mp_resc(io_req);
return FAILED;
}
- memset(mp_req->req_buf, 0, PAGE_SIZE);
- memset(mp_req->resp_buf, 0, PAGE_SIZE);
+ memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
+ memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
/* Allocate and map mp_req_bd and mp_resp_bd */
sz = sizeof(struct fcoe_bd_ctx);
@@ -665,7 +665,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req_bd = mp_req->mp_req_bd;
mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_req_bd->buf_len = PAGE_SIZE;
+ mp_req_bd->buf_len = CNIC_PAGE_SIZE;
mp_req_bd->flags = 0;
/*
@@ -677,7 +677,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
addr = mp_req->resp_buf_dma;
mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_resp_bd->buf_len = PAGE_SIZE;
+ mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
mp_resp_bd->flags = 0;
return SUCCESS;
@@ -1871,7 +1871,15 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
rc = SCSI_MLQUEUE_TARGET_BUSY;
goto exit_qcmd;
}
-
+ if (tgt->retry_delay_timestamp) {
+ if (time_after(jiffies, tgt->retry_delay_timestamp)) {
+ tgt->retry_delay_timestamp = 0;
+ } else {
+ /* If retry_delay timer is active, flow off the ML */
+ rc = SCSI_MLQUEUE_TARGET_BUSY;
+ goto exit_qcmd;
+ }
+ }
io_req = bnx2fc_cmd_alloc(tgt);
if (!io_req) {
rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -1961,6 +1969,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
" fcp_resid = 0x%x\n",
io_req->cdb_status, io_req->fcp_resid);
sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+
+ if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
+ io_req->cdb_status == SAM_STAT_BUSY) {
+ /* Set the jiffies + retry_delay_timer * 100ms
+ for the rport/tgt */
+ tgt->retry_delay_timestamp = jiffies +
+ fcp_rsp->retry_delay_timer * HZ / 10;
+ }
+
}
if (io_req->fcp_resid)
scsi_set_resid(sc_cmd, io_req->fcp_resid);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 4d93177dfb53..6870cf6781d9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -386,6 +386,7 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt->rq_prod_idx = 0x8000;
tgt->rq_cons_idx = 0;
atomic_set(&tgt->num_active_ios, 0);
+ tgt->retry_delay_timestamp = 0;
if (rdata->flags & FC_RP_FLAGS_RETRY &&
rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
@@ -673,7 +674,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map SQ */
tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
- tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
&tgt->sq_dma, GFP_KERNEL);
@@ -686,7 +688,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map CQ */
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
- tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
&tgt->cq_dma, GFP_KERNEL);
@@ -699,7 +702,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map RQ and RQ PBL */
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
- tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
&tgt->rq_dma, GFP_KERNEL);
@@ -710,8 +714,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
}
memset(tgt->rq, 0, tgt->rq_mem_size);
- tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
- tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
+ tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
&tgt->rq_pbl_dma, GFP_KERNEL);
@@ -722,7 +727,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
}
memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
- num_pages = tgt->rq_mem_size / PAGE_SIZE;
+ num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
page = tgt->rq_dma;
pbl = (u32 *)tgt->rq_pbl;
@@ -731,13 +736,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
pbl++;
*pbl = (u32)((u64)page >> 32);
pbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
/* Allocate and map XFERQ */
tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
- tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
&tgt->xferq_dma, GFP_KERNEL);
@@ -750,8 +755,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map CONFQ & CONFQ PBL */
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
- tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
&tgt->confq_dma, GFP_KERNEL);
@@ -763,9 +768,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
memset(tgt->confq, 0, tgt->confq_mem_size);
tgt->confq_pbl_size =
- (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->confq_pbl_size =
- (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
tgt->confq_pbl_size,
@@ -777,7 +782,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
}
memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
- num_pages = tgt->confq_mem_size / PAGE_SIZE;
+ num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
page = tgt->confq_dma;
pbl = (u32 *)tgt->confq_pbl;
@@ -786,7 +791,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
pbl++;
*pbl = (u32)((u64)page >> 32);
pbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
/* Allocate and map ConnDB */
@@ -805,8 +810,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map LCQ */
tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
- tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
&tgt->lcq_dma, GFP_KERNEL);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e4cf23df4b4f..b5ffd280a1ae 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
* yield integral num of page buffers
*/
/* adjust SQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
if (hba->max_sqes < num_elements_per_pg)
hba->max_sqes = num_elements_per_pg;
else if (hba->max_sqes % num_elements_per_pg)
@@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
~(num_elements_per_pg - 1);
/* adjust CQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE;
if (hba->max_cqes < num_elements_per_pg)
hba->max_cqes = num_elements_per_pg;
else if (hba->max_cqes % num_elements_per_pg)
@@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
~(num_elements_per_pg - 1);
/* adjust RQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
if (hba->max_rqes < num_elements_per_pg)
hba->max_rqes = num_elements_per_pg;
else if (hba->max_rqes % num_elements_per_pg)
@@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
/* SQ page table */
memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
- num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.sq_phys;
if (cnic_dev_10g)
@@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
/* RQ page table */
memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
- num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.rq_phys;
if (cnic_dev_10g)
@@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
/* CQ page table */
memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
- num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.cq_phys;
if (cnic_dev_10g)
@@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
}
@@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate page table memory for SQ which is page aligned */
ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
ep->qp.sq_mem_size =
- (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.sq_pgtbl_size =
- (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.sq_pgtbl_size =
- (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.sq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
@@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate page table memory for CQ which is page aligned */
ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
ep->qp.cq_mem_size =
- (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.cq_pgtbl_size =
- (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.cq_pgtbl_size =
- (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.cq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
@@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate page table memory for RQ which is page aligned */
ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
ep->qp.rq_mem_size =
- (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.rq_pgtbl_size =
- (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.rq_pgtbl_size =
- (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.rq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
@@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
bnx2i_adjust_qp_size(hba);
iscsi_init.flags =
- ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+ (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
if (en_tcp_dack)
iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
iscsi_init.reserved0 = 0;
@@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
iscsi_init.num_ccells_per_conn = hba->num_ccell;
iscsi_init.num_tasks_per_conn = hba->max_sqes;
- iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
iscsi_init.sq_num_wqes = hba->max_sqes;
iscsi_init.cq_log_wqes_per_page =
- (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
+ (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE);
iscsi_init.cq_num_wqes = hba->max_cqes;
iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
- (PAGE_SIZE - 1)) / PAGE_SIZE;
+ (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
- (PAGE_SIZE - 1)) / PAGE_SIZE;
+ (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
iscsi_init.rq_num_wqes = hba->max_rqes;
@@ -1361,7 +1361,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
u32 datalen = 0;
resp_cqe = (struct bnx2i_cmd_response *)cqe;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->back_lock);
task = iscsi_itt_to_task(conn,
resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
if (!task)
@@ -1432,7 +1432,7 @@ done:
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
conn->data, datalen);
fail:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->back_lock);
return 0;
}
@@ -1457,7 +1457,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session,
int pad_len;
login = (struct bnx2i_login_response *) cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
if (!task)
@@ -1500,7 +1500,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session,
bnx2i_conn->gen_pdu.resp_buf,
bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
done:
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return 0;
}
@@ -1525,7 +1525,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session,
int pad_len;
text = (struct bnx2i_text_response *) cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
if (!task)
goto done;
@@ -1561,7 +1561,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session,
bnx2i_conn->gen_pdu.resp_wr_ptr -
bnx2i_conn->gen_pdu.resp_buf);
done:
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return 0;
}
@@ -1584,7 +1584,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session,
struct iscsi_tm_rsp *resp_hdr;
tmf_cqe = (struct bnx2i_tmf_response *)cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
if (!task)
@@ -1600,7 +1600,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session,
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
done:
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return 0;
}
@@ -1623,7 +1623,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session,
struct iscsi_logout_rsp *resp_hdr;
logout = (struct bnx2i_logout_response *) cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
if (!task)
@@ -1647,7 +1647,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session,
bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD;
done:
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return 0;
}
@@ -1668,12 +1668,12 @@ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
struct iscsi_task *task;
nop_in = (struct bnx2i_nop_in_msg *)cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
if (task)
__iscsi_put_task(task);
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
}
/**
@@ -1712,7 +1712,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
nop_in = (struct bnx2i_nop_in_msg *)cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
memset(hdr, 0, sizeof(struct iscsi_hdr));
hdr->opcode = nop_in->op_code;
@@ -1738,7 +1738,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
}
done:
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return tgt_async_nop;
}
@@ -1771,7 +1771,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,
return;
}
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
resp_hdr->opcode = async_cqe->op_code;
@@ -1790,7 +1790,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,
__iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
(struct iscsi_hdr *)resp_hdr, NULL, 0);
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
}
@@ -1817,7 +1817,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session,
} else
bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
memset(hdr, 0, sizeof(struct iscsi_hdr));
hdr->opcode = reject->op_code;
@@ -1828,7 +1828,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session,
hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
reject->data_length);
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
}
/**
@@ -1848,13 +1848,13 @@ static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
struct iscsi_task *task;
cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
if (!task)
printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
complete(&bnx2i_conn->cmd_cleanup_cmpl);
}
@@ -1921,11 +1921,11 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
int rc = 0;
int cpu;
- spin_lock(&session->lock);
+ spin_lock(&session->back_lock);
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
if (!task || !task->sc) {
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
return -EINVAL;
}
sc = task->sc;
@@ -1935,7 +1935,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
else
cpu = sc->request->cpu;
- spin_unlock(&session->lock);
+ spin_unlock(&session->back_lock);
p = &per_cpu(bnx2i_percpu, cpu);
spin_lock(&p->p_work_lock);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 854dad7d5b03..166543f7ef55 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
struct iscsi_bd *mp_bdt;
u64 addr;
- hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&hba->mp_bd_dma, GFP_KERNEL);
if (!hba->mp_bd_tbl) {
printk(KERN_ERR "unable to allocate Middle Path BDT\n");
@@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
goto out;
}
- hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ CNIC_PAGE_SIZE,
&hba->dummy_buf_dma, GFP_KERNEL);
if (!hba->dummy_buffer) {
printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
rc = -1;
@@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
addr = (unsigned long) hba->dummy_buf_dma;
mp_bdt->buffer_addr_lo = addr & 0xffffffff;
mp_bdt->buffer_addr_hi = addr >> 32;
- mp_bdt->buffer_length = PAGE_SIZE;
+ mp_bdt->buffer_length = CNIC_PAGE_SIZE;
mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
ISCSI_BD_FIRST_IN_BD_CHAIN;
out:
@@ -565,12 +566,12 @@ out:
static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
{
if (hba->mp_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
}
if (hba->dummy_buffer) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->dummy_buffer, hba->dummy_buf_dma);
hba->dummy_buffer = NULL;
}
@@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
struct bnx2i_conn *bnx2i_conn)
{
if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.resp_bd_tbl,
bnx2i_conn->gen_pdu.resp_bd_dma);
bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
}
if (bnx2i_conn->gen_pdu.req_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
@@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
bnx2i_conn->gen_pdu.req_bd_tbl =
- dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
goto login_req_bd_tbl_failure;
bnx2i_conn->gen_pdu.resp_bd_tbl =
- dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&bnx2i_conn->gen_pdu.resp_bd_dma,
GFP_KERNEL);
if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
@@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
return 0;
login_resp_bd_tbl_failure:
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
@@ -1169,10 +1170,10 @@ static void bnx2i_cleanup_task(struct iscsi_task *task)
if (task->state == ISCSI_TASK_ABRT_TMF) {
bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->back_lock);
wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
- spin_lock_bh(&conn->session->lock);
+ spin_lock_bh(&conn->session->back_lock);
}
bnx2i_iscsi_unmap_sg_list(task->dd_data);
}
@@ -2059,7 +2060,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
goto out;
if (session) {
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
if (session->state == ISCSI_STATE_LOGGING_OUT) {
if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
@@ -2075,7 +2076,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
} else
close = 1;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
}
bnx2i_ep->state = EP_STATE_DISCONN_START;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a9f84238a53..e8ee5e5fe0ef 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -175,52 +175,6 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
sizeof(struct fw_ofld_tx_data_wr));
}
-
-#define VLAN_NONE 0xfff
-#define FILTER_SEL_VLAN_NONE 0xffff
-#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
-#define FILTER_SEL_WIDTH_VIN_P_FC \
- (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
-#define FILTER_SEL_WIDTH_TAG_P_FC \
- (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
-#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
-
-static unsigned int select_ntuple(struct cxgbi_device *cdev,
- struct l2t_entry *l2t)
-{
- struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
- unsigned int ntuple = 0;
- u32 viid;
-
- switch (lldi->filt_mode) {
-
- /* default filter mode */
- case HW_TPL_FR_MT_PR_IV_P_FC:
- if (l2t->vlan == VLAN_NONE)
- ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
- else {
- ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
- ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- }
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- case HW_TPL_FR_MT_PR_OV_P_FC: {
- viid = cxgb4_port_viid(l2t->neigh->dev);
-
- ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
- ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
- ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- }
- default:
- break;
- }
- return ntuple;
-}
-
static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
@@ -248,8 +202,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
struct cpl_act_open_req *req =
(struct cpl_act_open_req *)skb->head;
- req = (struct cpl_act_open_req *)skb->head;
-
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
qid_atid));
@@ -258,7 +210,9 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
req->local_ip = csk->saddr.sin_addr.s_addr;
req->peer_ip = csk->daddr.sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
+ req->params = cpu_to_be32(cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t));
opt2 |= 1 << 22;
req->opt2 = cpu_to_be32(opt2);
@@ -271,8 +225,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
struct cpl_t5_act_open_req *req =
(struct cpl_t5_act_open_req *)skb->head;
- req = (struct cpl_t5_act_open_req *)skb->head;
-
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
qid_atid));
@@ -281,7 +233,10 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
req->local_ip = csk->saddr.sin_addr.s_addr;
req->peer_ip = csk->daddr.sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
+ req->params = cpu_to_be64(V_FILTER_TUPLE(
+ cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
opt2 |= 1 << 31;
req->opt2 = cpu_to_be32(opt2);
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index d01f01604140..eb29fe7eaf49 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -277,7 +277,7 @@ found:
/* With interrupts enabled, it will sometimes hang when doing heavy
* reads. So better not enable them until I finger it out. */
if (instance->irq != SCSI_IRQ_NONE)
- if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED,
+ if (request_irq(instance->irq, dtc_intr, 0,
"dtc", instance)) {
printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 94de88955a99..ebf57364df91 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1221,7 +1221,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
/* Board detected, allocate its IRQ */
if (request_irq(irq, do_interrupt_handler,
- IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0),
+ (subversion == ESA) ? IRQF_SHARED : 0,
driver_name, (void *)&sha[j])) {
printk("%s: unable to allocate IRQ %u, detaching.\n", name,
irq);
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 1663173cdb91..8319d2b417b8 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -687,7 +687,7 @@ static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev
return 0;
if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */
- if (!request_irq(gc->IRQ, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", sh)) {
+ if (!request_irq(gc->IRQ, do_eata_pio_int_handler, 0, "EATA-PIO", sh)) {
reg_IRQ[gc->IRQ]++;
if (!gc->IRQ_TR)
reg_IRQL[gc->IRQ] = 1; /* IRQ is edge triggered */
@@ -921,7 +921,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
for (i = 0; i < MAXIRQ; i++)
if (reg_IRQ[i])
- request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL);
+ request_irq(i, do_eata_pio_int_handler, 0, "EATA-PIO", NULL);
HBA_ptr = first_HBA;
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index b9750e296d71..6776931e25d4 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -231,7 +231,7 @@ use_legacy_interrupts:
static void esas2r_claim_interrupts(struct esas2r_adapter *a)
{
- unsigned long flags = IRQF_DISABLED;
+ unsigned long flags = 0;
if (a->intr_mode == INTR_MODE_LEGACY)
flags |= IRQF_SHARED;
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
index 9bf285df58dd..a82030aa8577 100644
--- a/drivers/scsi/esas2r/esas2r_log.c
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -165,13 +165,9 @@ static int esas2r_log_master(const long level,
/*
* Put a line break at the end of the formatted string so that
- * we don't wind up with run-on messages. only append if there
- * is enough space in the buffer.
+ * we don't wind up with run-on messages.
*/
- if (strlen(event_buffer) < buflen)
- strcat(buffer, "\n");
-
- printk(event_buffer);
+ printk("%s\n", event_buffer);
spin_unlock_irqrestore(&event_buffer_lock, flags);
}
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 5cec6c60ca22..7176365e916b 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -461,7 +461,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
if (instance->irq != SCSI_IRQ_NONE)
if (request_irq(instance->irq, generic_NCR5380_intr,
- IRQF_DISABLED, "NCR5380", instance)) {
+ 0, "NCR5380", instance)) {
printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE;
}
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index ce5ef0190bad..0f1ae13ce7c7 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4711,7 +4711,7 @@ static int __init gdth_isa_probe_one(u32 isa_bios)
printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n",
isa_bios, ha->irq, ha->drq);
- error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha);
+ error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
if (error) {
printk("GDT-ISA: Unable to allocate IRQ\n");
goto out_host_put;
@@ -4843,7 +4843,7 @@ static int __init gdth_eisa_probe_one(u16 eisa_slot)
printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
eisa_slot >> 12, ha->irq);
- error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha);
+ error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
if (error) {
printk("GDT-EISA: Unable to allocate IRQ\n");
goto out_host_put;
@@ -4979,7 +4979,7 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
ha->irq);
error = request_irq(ha->irq, gdth_interrupt,
- IRQF_DISABLED|IRQF_SHARED, "gdth", ha);
+ IRQF_SHARED, "gdth", ha);
if (error) {
printk("GDT-PCI: Unable to allocate IRQ\n");
goto out_host_put;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f28ea070d3df..3cbb57a8b846 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -398,7 +398,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->ordered_tag = sht->ordered_tag;
shost->no_write_same = sht->no_write_same;
- if (shost_eh_deadline == -1)
+ if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
shost->eh_deadline = -1;
else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
shost_printk(KERN_WARNING, shost,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 868318a7067c..8cf4a0c69baf 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,6 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
- * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -47,13 +47,13 @@
#include <linux/string.h>
#include <linux/bitmap.h>
#include <linux/atomic.h>
-#include <linux/kthread.h>
#include <linux/jiffies.h>
+#include <asm/div64.h>
#include "hpsa_cmd.h"
#include "hpsa.h"
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "3.4.0-1"
+#define HPSA_DRIVER_VERSION "3.4.4-1"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -118,6 +118,11 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
+ {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@@ -163,6 +168,11 @@ static struct board_type products[] = {
{0x21C7103C, "Smart Array", &SA5_access},
{0x21C8103C, "Smart Array", &SA5_access},
{0x21C9103C, "Smart Array", &SA5_access},
+ {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
+ {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
+ {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
+ {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
+ {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
@@ -182,8 +192,9 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
- void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
+ void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
int cmd_type);
+#define VPD_PAGE (1 << 8)
static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static void hpsa_scan_start(struct Scsi_Host *);
@@ -204,7 +215,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
struct CommandList *c);
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets,
- int nsgs, int *bucket_map);
+ int nsgs, int min_blocks, int *bucket_map);
static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
static inline u32 next_command(struct ctlr_info *h, u8 q);
static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
@@ -216,8 +227,14 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
int wait_for_ready);
static inline void finish_cmd(struct CommandList *c);
+static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
#define BOARD_NOT_READY 0
#define BOARD_READY 1
+static void hpsa_drain_accel_commands(struct ctlr_info *h);
+static void hpsa_flush_cache(struct ctlr_info *h);
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr);
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
{
@@ -280,6 +297,55 @@ static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
return 1;
}
+static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int status, len;
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ char tmpbuf[10];
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+ strncpy(tmpbuf, buf, len);
+ tmpbuf[len] = '\0';
+ if (sscanf(tmpbuf, "%d", &status) != 1)
+ return -EINVAL;
+ h = shost_to_hba(shost);
+ h->acciopath_status = !!status;
+ dev_warn(&h->pdev->dev,
+ "hpsa: HP SSD Smart Path %s via sysfs update.\n",
+ h->acciopath_status ? "enabled" : "disabled");
+ return count;
+}
+
+static ssize_t host_store_raid_offload_debug(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int debug_level, len;
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ char tmpbuf[10];
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+ strncpy(tmpbuf, buf, len);
+ tmpbuf[len] = '\0';
+ if (sscanf(tmpbuf, "%d", &debug_level) != 1)
+ return -EINVAL;
+ if (debug_level < 0)
+ debug_level = 0;
+ h = shost_to_hba(shost);
+ h->raid_offload_debug = debug_level;
+ dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
+ h->raid_offload_debug);
+ return count;
+}
+
static ssize_t host_store_rescan(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -327,6 +393,17 @@ static ssize_t host_show_transport_mode(struct device *dev,
"performant" : "simple");
}
+static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ h = shost_to_hba(shost);
+ return snprintf(buf, 30, "HP SSD Smart Path %s\n",
+ (h->acciopath_status == 1) ? "enabled" : "disabled");
+}
+
/* List of controllers which cannot be hard reset on kexec with reset_devices */
static u32 unresettable_controller[] = {
0x324a103C, /* Smart Array P712m */
@@ -416,6 +493,13 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
"1(ADM)", "UNKNOWN"
};
+#define HPSA_RAID_0 0
+#define HPSA_RAID_4 1
+#define HPSA_RAID_1 2 /* also used for RAID 10 */
+#define HPSA_RAID_5 3 /* also used for RAID 50 */
+#define HPSA_RAID_51 4
+#define HPSA_RAID_6 5 /* also used for RAID 60 */
+#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
static ssize_t raid_level_show(struct device *dev,
@@ -504,10 +588,39 @@ static ssize_t unique_id_show(struct device *dev,
sn[12], sn[13], sn[14], sn[15]);
}
+static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+ int offload_enabled;
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+ offload_enabled = hdev->offload_enabled;
+ spin_unlock_irqrestore(&h->lock, flags);
+ return snprintf(buf, 20, "%d\n", offload_enabled);
+}
+
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
+ host_show_hp_ssd_smart_path_enabled, NULL);
+static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
+ host_show_hp_ssd_smart_path_status,
+ host_store_hp_ssd_smart_path_status);
+static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
+ host_store_raid_offload_debug);
static DEVICE_ATTR(firmware_revision, S_IRUGO,
host_show_firmware_revision, NULL);
static DEVICE_ATTR(commands_outstanding, S_IRUGO,
@@ -521,6 +634,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level,
&dev_attr_lunid,
&dev_attr_unique_id,
+ &dev_attr_hp_ssd_smart_path_enabled,
NULL,
};
@@ -530,6 +644,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {
&dev_attr_commands_outstanding,
&dev_attr_transport_mode,
&dev_attr_resettable,
+ &dev_attr_hp_ssd_smart_path_status,
+ &dev_attr_raid_offload_debug,
NULL,
};
@@ -570,6 +686,9 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
struct reply_pool *rq = &h->reply_queue[q];
unsigned long flags;
+ if (h->transMethod & CFGTBL_Trans_io_accel1)
+ return h->access.command_completed(h, q);
+
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
return h->access.command_completed(h, q);
@@ -590,6 +709,32 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
return a;
}
+/*
+ * There are some special bits in the bus address of the
+ * command that we have to set for the controller to know
+ * how to process the command:
+ *
+ * Normal performant mode:
+ * bit 0: 1 means performant mode, 0 means simple mode.
+ * bits 1-3 = block fetch table entry
+ * bits 4-6 = command type (== 0)
+ *
+ * ioaccel1 mode:
+ * bit 0 = "performant mode" bit.
+ * bits 1-3 = block fetch table entry
+ * bits 4-6 = command type (== 110)
+ * (command type is needed because ioaccel1 mode
+ * commands are submitted through the same register as normal
+ * mode commands, so this is how the controller knows whether
+ * the command is normal mode or ioaccel1 mode.)
+ *
+ * ioaccel2 mode:
+ * bit 0 = "performant mode" bit.
+ * bits 1-4 = block fetch table entry (note extra bit)
+ * bits 4-6 = not needed, because ioaccel2 mode has
+ * a separate special register for submitting commands.
+ */
+
/* set_performant_mode: Modify the tag for cciss performant
* set bit 0 for pull model, bits 3-1 for block fetch
* register number
@@ -598,12 +743,47 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
{
if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
- if (likely(h->msix_vector))
+ if (likely(h->msix_vector > 0))
c->Header.ReplyQueue =
raw_smp_processor_id() % h->nreply_queues;
}
}
+static void set_ioaccel1_performant_mode(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
+
+ /* Tell the controller to post the reply to the queue for this
+ * processor. This seems to give the best I/O throughput.
+ */
+ cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
+ /* Set the bits in the address sent down to include:
+ * - performant mode bit (bit 0)
+ * - pull count (bits 1-3)
+ * - command type (bits 4-6)
+ */
+ c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
+ IOACCEL1_BUSADDR_CMDTYPE;
+}
+
+static void set_ioaccel2_performant_mode(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+
+ /* Tell the controller to post the reply to the queue for this
+ * processor. This seems to give the best I/O throughput.
+ */
+ cp->reply_queue = smp_processor_id() % h->nreply_queues;
+ /* Set the bits in the address sent down to include:
+ * - performant mode bit not used in ioaccel mode 2
+ * - pull count (bits 0-3)
+ * - command type isn't needed for ioaccel2
+ */
+ c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
+}
+
static int is_firmware_flash_cmd(u8 *cdb)
{
return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
@@ -638,7 +818,16 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
{
unsigned long flags;
- set_performant_mode(h, c);
+ switch (c->cmd_type) {
+ case CMD_IOACCEL1:
+ set_ioaccel1_performant_mode(h, c);
+ break;
+ case CMD_IOACCEL2:
+ set_ioaccel2_performant_mode(h, c);
+ break;
+ default:
+ set_performant_mode(h, c);
+ }
dial_down_lockup_detection_during_fw_flash(h, c);
spin_lock_irqsave(&h->lock, flags);
addQ(&h->reqQ, c);
@@ -782,6 +971,14 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
/* Raid level changed. */
h->dev[entry]->raid_level = new_entry->raid_level;
+
+ /* Raid offload parameters changed. */
+ h->dev[entry]->offload_config = new_entry->offload_config;
+ h->dev[entry]->offload_enabled = new_entry->offload_enabled;
+ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+ h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
+ h->dev[entry]->raid_map = new_entry->raid_map;
+
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
new_entry->target, new_entry->lun);
@@ -902,6 +1099,10 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
*/
if (dev1->raid_level != dev2->raid_level)
return 1;
+ if (dev1->offload_config != dev2->offload_config)
+ return 1;
+ if (dev1->offload_enabled != dev2->offload_enabled)
+ return 1;
return 0;
}
@@ -932,6 +1133,9 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
return DEVICE_UPDATED;
return DEVICE_SAME;
} else {
+ /* Keep offline devices offline */
+ if (needle->volume_offline)
+ return DEVICE_NOT_FOUND;
return DEVICE_CHANGED;
}
}
@@ -940,6 +1144,110 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
return DEVICE_NOT_FOUND;
}
+static void hpsa_monitor_offline_device(struct ctlr_info *h,
+ unsigned char scsi3addr[])
+{
+ struct offline_device_entry *device;
+ unsigned long flags;
+
+ /* Check to see if device is already on the list */
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ list_for_each_entry(device, &h->offline_device_list, offline_list) {
+ if (memcmp(device->scsi3addr, scsi3addr,
+ sizeof(device->scsi3addr)) == 0) {
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+
+ /* Device is not on the list, add it. */
+ device = kmalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
+ return;
+ }
+ memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ list_add_tail(&device->offline_list, &h->offline_device_list);
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+}
+
+/* Print a message explaining various offline volume states */
+static void hpsa_show_volume_status(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *sd)
+{
+ if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ switch (sd->volume_offline) {
+ case HPSA_LV_OK:
+ break;
+ case HPSA_LV_UNDERGOING_ERASE:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_UNDERGOING_RPI:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PENDING_RPI:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_ENCRYPTED_NO_KEY:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_UNDERGOING_ENCRYPTION:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PENDING_ENCRYPTION:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
+ dev_info(&h->pdev->dev,
+ "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
+ h->scsi_host->host_no,
+ sd->bus, sd->target, sd->lun);
+ break;
+ }
+}
+
static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
struct hpsa_scsi_dev_t *sd[], int nsds)
{
@@ -1004,6 +1312,20 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
for (i = 0; i < nsds; i++) {
if (!sd[i]) /* if already added above. */
continue;
+
+ /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
+ * as the SCSI mid-layer does not handle such devices well.
+ * It relentlessly loops sending TUR at 3Hz, then READ(10)
+ * at 160Hz, and prevents the system from coming up.
+ */
+ if (sd[i]->volume_offline) {
+ hpsa_show_volume_status(h, sd[i]);
+ dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
+ h->scsi_host->host_no,
+ sd[i]->bus, sd[i]->target, sd[i]->lun);
+ continue;
+ }
+
device_change = hpsa_scsi_find_entry(sd[i], h->dev,
h->ndevices, &entry);
if (device_change == DEVICE_NOT_FOUND) {
@@ -1022,6 +1344,17 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
}
spin_unlock_irqrestore(&h->devlock, flags);
+ /* Monitor devices which are in one of several NOT READY states to be
+ * brought online later. This must be done without holding h->devlock,
+ * so don't touch h->dev[]
+ */
+ for (i = 0; i < nsds; i++) {
+ if (!sd[i]) /* if already added above. */
+ continue;
+ if (sd[i]->volume_offline)
+ hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
+ }
+
/* Don't notify scsi mid layer of any changes the first time through
* (or if there are no changes) scsi_scan_host will do it later the
* first time through.
@@ -1187,11 +1520,163 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
}
+
+/* Decode the various types of errors on ioaccel2 path.
+ * Return 1 for any error that should generate a RAID path retry.
+ * Return 0 for errors that don't require a RAID path retry.
+ */
+static int handle_ioaccel_mode2_error(struct ctlr_info *h,
+ struct CommandList *c,
+ struct scsi_cmnd *cmd,
+ struct io_accel2_cmd *c2)
+{
+ int data_len;
+ int retry = 0;
+
+ switch (c2->error_data.serv_response) {
+ case IOACCEL2_SERV_RESPONSE_COMPLETE:
+ switch (c2->error_data.status) {
+ case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with check condition.\n",
+ "HP SSD Smart Path");
+ if (c2->error_data.data_present !=
+ IOACCEL2_SENSE_DATA_PRESENT)
+ break;
+ /* copy the sense data */
+ data_len = c2->error_data.sense_data_len;
+ if (data_len > SCSI_SENSE_BUFFERSIZE)
+ data_len = SCSI_SENSE_BUFFERSIZE;
+ if (data_len > sizeof(c2->error_data.sense_data_buff))
+ data_len =
+ sizeof(c2->error_data.sense_data_buff);
+ memcpy(cmd->sense_buffer,
+ c2->error_data.sense_data_buff, data_len);
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
+ retry = 1;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with BUSY status.\n",
+ "HP SSD Smart Path");
+ retry = 1;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with reservation conflict.\n",
+ "HP SSD Smart Path");
+ retry = 1;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
+ /* Make scsi midlayer do unlimited retries */
+ cmd->result = DID_IMM_RETRY << 16;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with aborted status.\n",
+ "HP SSD Smart Path");
+ retry = 1;
+ break;
+ default:
+ dev_warn(&h->pdev->dev,
+ "%s: task complete with unrecognized status: 0x%02x\n",
+ "HP SSD Smart Path", c2->error_data.status);
+ retry = 1;
+ break;
+ }
+ break;
+ case IOACCEL2_SERV_RESPONSE_FAILURE:
+ /* don't expect to get here. */
+ dev_warn(&h->pdev->dev,
+ "unexpected delivery or target failure, status = 0x%02x\n",
+ c2->error_data.status);
+ retry = 1;
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
+ dev_warn(&h->pdev->dev, "task management function rejected.\n");
+ retry = 1;
+ break;
+ case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
+ dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
+ break;
+ default:
+ dev_warn(&h->pdev->dev,
+ "%s: Unrecognized server response: 0x%02x\n",
+ "HP SSD Smart Path",
+ c2->error_data.serv_response);
+ retry = 1;
+ break;
+ }
+
+ return retry; /* retry on raid path? */
+}
+
+static void process_ioaccel2_completion(struct ctlr_info *h,
+ struct CommandList *c, struct scsi_cmnd *cmd,
+ struct hpsa_scsi_dev_t *dev)
+{
+ struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
+ int raid_retry = 0;
+
+ /* check for good status */
+ if (likely(c2->error_data.serv_response == 0 &&
+ c2->error_data.status == 0)) {
+ cmd_free(h, c);
+ cmd->scsi_done(cmd);
+ return;
+ }
+
+ /* Any RAID offload error results in retry which will use
+ * the normal I/O path so the controller can handle whatever's
+ * wrong.
+ */
+ if (is_logical_dev_addr_mode(dev->scsi3addr) &&
+ c2->error_data.serv_response ==
+ IOACCEL2_SERV_RESPONSE_FAILURE) {
+ if (c2->error_data.status ==
+ IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
+ dev_warn(&h->pdev->dev,
+ "%s: Path is unavailable, retrying on standard path.\n",
+ "HP SSD Smart Path");
+ else
+ dev_warn(&h->pdev->dev,
+ "%s: Error 0x%02x, retrying on standard path.\n",
+ "HP SSD Smart Path", c2->error_data.status);
+
+ dev->offload_enabled = 0;
+ h->drv_req_rescan = 1; /* schedule controller for a rescan */
+ cmd->result = DID_SOFT_ERROR << 16;
+ cmd_free(h, c);
+ cmd->scsi_done(cmd);
+ return;
+ }
+ raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
+ /* If error found, disable Smart Path, schedule a rescan,
+ * and force a retry on the standard path.
+ */
+ if (raid_retry) {
+ dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
+ "HP SSD Smart Path");
+ dev->offload_enabled = 0; /* Disable Smart Path */
+ h->drv_req_rescan = 1; /* schedule controller rescan */
+ cmd->result = DID_SOFT_ERROR << 16;
+ }
+ cmd_free(h, c);
+ cmd->scsi_done(cmd);
+}
+
static void complete_scsi_command(struct CommandList *cp)
{
struct scsi_cmnd *cmd;
struct ctlr_info *h;
struct ErrorInfo *ei;
+ struct hpsa_scsi_dev_t *dev;
unsigned char sense_key;
unsigned char asc; /* additional sense code */
@@ -1201,13 +1686,19 @@ static void complete_scsi_command(struct CommandList *cp)
ei = cp->err_info;
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
h = cp->h;
+ dev = cmd->device->hostdata;
scsi_dma_unmap(cmd); /* undo the DMA mappings */
- if (cp->Header.SGTotal > h->max_cmd_sg_entries)
+ if ((cp->cmd_type == CMD_SCSI) &&
+ (cp->Header.SGTotal > h->max_cmd_sg_entries))
hpsa_unmap_sg_chain_block(h, cp);
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+
+ if (cp->cmd_type == CMD_IOACCEL2)
+ return process_ioaccel2_completion(h, cp, cmd, dev);
+
cmd->result |= ei->ScsiStatus;
/* copy the sense data whether we need to or not. */
@@ -1227,6 +1718,32 @@ static void complete_scsi_command(struct CommandList *cp)
return;
}
+ /* For I/O accelerator commands, copy over some fields to the normal
+ * CISS header used below for error handling.
+ */
+ if (cp->cmd_type == CMD_IOACCEL1) {
+ struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
+ cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
+ cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
+ cp->Header.Tag.lower = c->Tag.lower;
+ cp->Header.Tag.upper = c->Tag.upper;
+ memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
+ memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
+
+ /* Any RAID offload error results in retry which will use
+ * the normal I/O path so the controller can handle whatever's
+ * wrong.
+ */
+ if (is_logical_dev_addr_mode(dev->scsi3addr)) {
+ if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
+ dev->offload_enabled = 0;
+ cmd->result = DID_SOFT_ERROR << 16;
+ cmd_free(h, cp);
+ cmd->scsi_done(cmd);
+ return;
+ }
+ }
+
/* an error has occurred */
switch (ei->CommandStatus) {
@@ -1389,6 +1906,14 @@ static void complete_scsi_command(struct CommandList *cp)
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "Command unabortable\n");
break;
+ case CMD_IOACCEL_DISABLED:
+ /* This only handles the direct pass-through case since RAID
+ * offload is handled above. Just attempt a retry.
+ */
+ cmd->result = DID_SOFT_ERROR << 16;
+ dev_warn(&h->pdev->dev,
+ "cp %p had HP SSD Smart Path error\n", cp);
+ break;
default:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
@@ -1438,6 +1963,7 @@ static int hpsa_map_one(struct pci_dev *pdev,
cp->SG[0].Addr.upper =
(u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Len = buflen;
+ cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
return 0;
@@ -1490,17 +2016,37 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
hpsa_pci_unmap(h->pdev, c, 1, data_direction);
}
-static void hpsa_scsi_interpret_error(struct CommandList *cp)
+static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
+ struct CommandList *c)
{
- struct ErrorInfo *ei;
+ const u8 *cdb = c->Request.CDB;
+ const u8 *lun = c->Header.LUN.LunAddrBytes;
+
+ dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
+ " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ txt, lun[0], lun[1], lun[2], lun[3],
+ lun[4], lun[5], lun[6], lun[7],
+ cdb[0], cdb[1], cdb[2], cdb[3],
+ cdb[4], cdb[5], cdb[6], cdb[7],
+ cdb[8], cdb[9], cdb[10], cdb[11],
+ cdb[12], cdb[13], cdb[14], cdb[15]);
+}
+
+static void hpsa_scsi_interpret_error(struct ctlr_info *h,
+ struct CommandList *cp)
+{
+ const struct ErrorInfo *ei = cp->err_info;
struct device *d = &cp->h->pdev->dev;
+ const u8 *sd = ei->SenseInfo;
- ei = cp->err_info;
switch (ei->CommandStatus) {
case CMD_TARGET_STATUS:
- dev_warn(d, "cmd %p has completed with errors\n", cp);
- dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
- ei->ScsiStatus);
+ hpsa_print_cmd(h, "SCSI status", cp);
+ if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
+ dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
+ sd[2] & 0x0f, sd[12], sd[13]);
+ else
+ dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
if (ei->ScsiStatus == 0)
dev_warn(d, "SCSI status is abnormally zero. "
"(probably indicates selection timeout "
@@ -1508,54 +2054,51 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)
"firmware bug, circa July, 2001.)\n");
break;
case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
- dev_info(d, "UNDERRUN\n");
break;
case CMD_DATA_OVERRUN:
- dev_warn(d, "cp %p has completed with data overrun\n", cp);
+ hpsa_print_cmd(h, "overrun condition", cp);
break;
case CMD_INVALID: {
/* controller unfortunately reports SCSI passthru's
* to non-existent targets as invalid commands.
*/
- dev_warn(d, "cp %p is reported invalid (probably means "
- "target device no longer present)\n", cp);
- /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
- print_cmd(cp); */
+ hpsa_print_cmd(h, "invalid command", cp);
+ dev_warn(d, "probably means device no longer present\n");
}
break;
case CMD_PROTOCOL_ERR:
- dev_warn(d, "cp %p has protocol error \n", cp);
+ hpsa_print_cmd(h, "protocol error", cp);
break;
case CMD_HARDWARE_ERR:
- /* cmd->result = DID_ERROR << 16; */
- dev_warn(d, "cp %p had hardware error\n", cp);
+ hpsa_print_cmd(h, "hardware error", cp);
break;
case CMD_CONNECTION_LOST:
- dev_warn(d, "cp %p had connection lost\n", cp);
+ hpsa_print_cmd(h, "connection lost", cp);
break;
case CMD_ABORTED:
- dev_warn(d, "cp %p was aborted\n", cp);
+ hpsa_print_cmd(h, "aborted", cp);
break;
case CMD_ABORT_FAILED:
- dev_warn(d, "cp %p reports abort failed\n", cp);
+ hpsa_print_cmd(h, "abort failed", cp);
break;
case CMD_UNSOLICITED_ABORT:
- dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
+ hpsa_print_cmd(h, "unsolicited abort", cp);
break;
case CMD_TIMEOUT:
- dev_warn(d, "cp %p timed out\n", cp);
+ hpsa_print_cmd(h, "timed out", cp);
break;
case CMD_UNABORTABLE:
- dev_warn(d, "Command unabortable\n");
+ hpsa_print_cmd(h, "unabortable", cp);
break;
default:
- dev_warn(d, "cp %p returned unknown status %x\n", cp,
+ hpsa_print_cmd(h, "unknown status", cp);
+ dev_warn(d, "Unknown command status %x\n",
ei->CommandStatus);
}
}
static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
- unsigned char page, unsigned char *buf,
+ u16 page, unsigned char *buf,
unsigned char bufsize)
{
int rc = IO_OK;
@@ -1577,7 +2120,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
- hpsa_scsi_interpret_error(c);
+ hpsa_scsi_interpret_error(h, c);
rc = -1;
}
out:
@@ -1585,7 +2128,39 @@ out:
return rc;
}
-static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
+static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
+ unsigned char *scsi3addr, unsigned char page,
+ struct bmic_controller_parameters *buf, size_t bufsize)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_special_alloc(h);
+
+ if (c == NULL) { /* trouble... */
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ return -ENOMEM;
+ }
+
+ if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ }
+out:
+ cmd_special_free(h, c);
+ return rc;
+ }
+
+static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+ u8 reset_type)
{
int rc = IO_OK;
struct CommandList *c;
@@ -1599,14 +2174,15 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
}
/* fill_cmd can't fail here, no data buffer to map. */
- (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
- NULL, 0, 0, scsi3addr, TYPE_MSG);
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+ scsi3addr, TYPE_MSG);
+ c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
ei = c->err_info;
if (ei->CommandStatus != 0) {
- hpsa_scsi_interpret_error(c);
+ hpsa_scsi_interpret_error(h, c);
rc = -1;
}
cmd_special_free(h, c);
@@ -1623,7 +2199,7 @@ static void hpsa_get_raid_level(struct ctlr_info *h,
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
return;
- rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
if (rc == 0)
*raid_level = buf[8];
if (*raid_level > RAID_UNKNOWN)
@@ -1632,6 +2208,204 @@ static void hpsa_get_raid_level(struct ctlr_info *h,
return;
}
+#define HPSA_MAP_DEBUG
+#ifdef HPSA_MAP_DEBUG
+static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
+ struct raid_map_data *map_buff)
+{
+ struct raid_map_disk_data *dd = &map_buff->data[0];
+ int map, row, col;
+ u16 map_cnt, row_cnt, disks_per_row;
+
+ if (rc != 0)
+ return;
+
+ /* Show details only if debugging has been activated. */
+ if (h->raid_offload_debug < 2)
+ return;
+
+ dev_info(&h->pdev->dev, "structure_size = %u\n",
+ le32_to_cpu(map_buff->structure_size));
+ dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
+ le32_to_cpu(map_buff->volume_blk_size));
+ dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
+ le64_to_cpu(map_buff->volume_blk_cnt));
+ dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
+ map_buff->phys_blk_shift);
+ dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
+ map_buff->parity_rotation_shift);
+ dev_info(&h->pdev->dev, "strip_size = %u\n",
+ le16_to_cpu(map_buff->strip_size));
+ dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
+ le64_to_cpu(map_buff->disk_starting_blk));
+ dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
+ le64_to_cpu(map_buff->disk_blk_cnt));
+ dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
+ le16_to_cpu(map_buff->data_disks_per_row));
+ dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
+ le16_to_cpu(map_buff->metadata_disks_per_row));
+ dev_info(&h->pdev->dev, "row_cnt = %u\n",
+ le16_to_cpu(map_buff->row_cnt));
+ dev_info(&h->pdev->dev, "layout_map_count = %u\n",
+ le16_to_cpu(map_buff->layout_map_count));
+ dev_info(&h->pdev->dev, "flags = %u\n",
+ le16_to_cpu(map_buff->flags));
+ if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
+ dev_info(&h->pdev->dev, "encrypytion = ON\n");
+ else
+ dev_info(&h->pdev->dev, "encrypytion = OFF\n");
+ dev_info(&h->pdev->dev, "dekindex = %u\n",
+ le16_to_cpu(map_buff->dekindex));
+
+ map_cnt = le16_to_cpu(map_buff->layout_map_count);
+ for (map = 0; map < map_cnt; map++) {
+ dev_info(&h->pdev->dev, "Map%u:\n", map);
+ row_cnt = le16_to_cpu(map_buff->row_cnt);
+ for (row = 0; row < row_cnt; row++) {
+ dev_info(&h->pdev->dev, " Row%u:\n", row);
+ disks_per_row =
+ le16_to_cpu(map_buff->data_disks_per_row);
+ for (col = 0; col < disks_per_row; col++, dd++)
+ dev_info(&h->pdev->dev,
+ " D%02u: h=0x%04x xor=%u,%u\n",
+ col, dd->ioaccel_handle,
+ dd->xor_mult[0], dd->xor_mult[1]);
+ disks_per_row =
+ le16_to_cpu(map_buff->metadata_disks_per_row);
+ for (col = 0; col < disks_per_row; col++, dd++)
+ dev_info(&h->pdev->dev,
+ " M%02u: h=0x%04x xor=%u,%u\n",
+ col, dd->ioaccel_handle,
+ dd->xor_mult[0], dd->xor_mult[1]);
+ }
+ }
+}
+#else
+static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
+ __attribute__((unused)) int rc,
+ __attribute__((unused)) struct raid_map_data *map_buff)
+{
+}
+#endif
+
+static int hpsa_get_raid_map(struct ctlr_info *h,
+ unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
+{
+ int rc = 0;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_special_alloc(h);
+ if (c == NULL) {
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ return -ENOMEM;
+ }
+ if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
+ sizeof(this_device->raid_map), 0,
+ scsi3addr, TYPE_CMD)) {
+ dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
+ cmd_special_free(h, c);
+ return -ENOMEM;
+ }
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ cmd_special_free(h, c);
+ return -1;
+ }
+ cmd_special_free(h, c);
+
+ /* @todo in the future, dynamically allocate RAID map memory */
+ if (le32_to_cpu(this_device->raid_map.structure_size) >
+ sizeof(this_device->raid_map)) {
+ dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
+ rc = -1;
+ }
+ hpsa_debug_map_buff(h, rc, &this_device->raid_map);
+ return rc;
+}
+
+static int hpsa_vpd_page_supported(struct ctlr_info *h,
+ unsigned char scsi3addr[], u8 page)
+{
+ int rc;
+ int i;
+ int pages;
+ unsigned char *buf, bufsize;
+
+ buf = kzalloc(256, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ /* Get the size of the page list first */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+ VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
+ buf, HPSA_VPD_HEADER_SZ);
+ if (rc != 0)
+ goto exit_unsupported;
+ pages = buf[3];
+ if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
+ bufsize = pages + HPSA_VPD_HEADER_SZ;
+ else
+ bufsize = 255;
+
+ /* Get the whole VPD page list */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+ VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
+ buf, bufsize);
+ if (rc != 0)
+ goto exit_unsupported;
+
+ pages = buf[3];
+ for (i = 1; i <= pages; i++)
+ if (buf[3 + i] == page)
+ goto exit_supported;
+exit_unsupported:
+ kfree(buf);
+ return 0;
+exit_supported:
+ kfree(buf);
+ return 1;
+}
+
+static void hpsa_get_ioaccel_status(struct ctlr_info *h,
+ unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
+{
+ int rc;
+ unsigned char *buf;
+ u8 ioaccel_status;
+
+ this_device->offload_config = 0;
+ this_device->offload_enabled = 0;
+
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return;
+ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
+ goto out;
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+ VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
+ if (rc != 0)
+ goto out;
+
+#define IOACCEL_STATUS_BYTE 4
+#define OFFLOAD_CONFIGURED_BIT 0x01
+#define OFFLOAD_ENABLED_BIT 0x02
+ ioaccel_status = buf[IOACCEL_STATUS_BYTE];
+ this_device->offload_config =
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+ if (this_device->offload_config) {
+ this_device->offload_enabled =
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+ if (hpsa_get_raid_map(h, scsi3addr, this_device))
+ this_device->offload_enabled = 0;
+ }
+out:
+ kfree(buf);
+ return;
+}
+
/* Get the device id from inquiry page 0x83 */
static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
unsigned char *device_id, int buflen)
@@ -1644,7 +2418,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
return -1;
- rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
if (rc == 0)
memcpy(device_id, &buf[8], buflen);
kfree(buf);
@@ -1678,8 +2452,16 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
ei = c->err_info;
if (ei->CommandStatus != 0 &&
ei->CommandStatus != CMD_DATA_UNDERRUN) {
- hpsa_scsi_interpret_error(c);
+ hpsa_scsi_interpret_error(h, c);
rc = -1;
+ } else {
+ if (buf->extended_response_flag != extended_response) {
+ dev_err(&h->pdev->dev,
+ "report luns requested format %u, got %u\n",
+ extended_response,
+ buf->extended_response_flag);
+ rc = -1;
+ }
}
out:
cmd_special_free(h, c);
@@ -1707,6 +2489,117 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
device->lun = lun;
}
+/* Use VPD inquiry to get details of volume status */
+static int hpsa_get_volume_status(struct ctlr_info *h,
+ unsigned char scsi3addr[])
+{
+ int rc;
+ int status;
+ int size;
+ unsigned char *buf;
+
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return HPSA_VPD_LV_STATUS_UNSUPPORTED;
+
+ /* Does controller have VPD for logical volume status? */
+ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) {
+ dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
+ goto exit_failed;
+ }
+
+ /* Get the size of the VPD return buffer */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
+ buf, HPSA_VPD_HEADER_SZ);
+ if (rc != 0) {
+ dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
+ goto exit_failed;
+ }
+ size = buf[3];
+
+ /* Now get the whole VPD buffer */
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
+ buf, size + HPSA_VPD_HEADER_SZ);
+ if (rc != 0) {
+ dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
+ goto exit_failed;
+ }
+ status = buf[4]; /* status byte */
+
+ kfree(buf);
+ return status;
+exit_failed:
+ kfree(buf);
+ return HPSA_VPD_LV_STATUS_UNSUPPORTED;
+}
+
+/* Determine offline status of a volume.
+ * Return either:
+ * 0 (not offline)
+ * -1 (offline for unknown reasons)
+ * # (integer code indicating one of several NOT READY states
+ * describing why a volume is to be kept offline)
+ */
+static unsigned char hpsa_volume_offline(struct ctlr_info *h,
+ unsigned char scsi3addr[])
+{
+ struct CommandList *c;
+ unsigned char *sense, sense_key, asc, ascq;
+ int ldstat = 0;
+ u16 cmd_status;
+ u8 scsi_status;
+#define ASC_LUN_NOT_READY 0x04
+#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
+#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
+
+ c = cmd_alloc(h);
+ if (!c)
+ return 0;
+ (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ sense = c->err_info->SenseInfo;
+ sense_key = sense[2];
+ asc = sense[12];
+ ascq = sense[13];
+ cmd_status = c->err_info->CommandStatus;
+ scsi_status = c->err_info->ScsiStatus;
+ cmd_free(h, c);
+ /* Is the volume 'not ready'? */
+ if (cmd_status != CMD_TARGET_STATUS ||
+ scsi_status != SAM_STAT_CHECK_CONDITION ||
+ sense_key != NOT_READY ||
+ asc != ASC_LUN_NOT_READY) {
+ return 0;
+ }
+
+ /* Determine the reason for not ready state */
+ ldstat = hpsa_get_volume_status(h, scsi3addr);
+
+ /* Keep volume offline in certain cases: */
+ switch (ldstat) {
+ case HPSA_LV_UNDERGOING_ERASE:
+ case HPSA_LV_UNDERGOING_RPI:
+ case HPSA_LV_PENDING_RPI:
+ case HPSA_LV_ENCRYPTED_NO_KEY:
+ case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+ case HPSA_LV_UNDERGOING_ENCRYPTION:
+ case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+ case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+ return ldstat;
+ case HPSA_VPD_LV_STATUS_UNSUPPORTED:
+ /* If VPD status page isn't available,
+ * use ASC/ASCQ to determine state
+ */
+ if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
+ (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
+ return ldstat;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int hpsa_update_device_info(struct ctlr_info *h,
unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
unsigned char *is_OBDR_device)
@@ -1745,10 +2638,18 @@ static int hpsa_update_device_info(struct ctlr_info *h,
sizeof(this_device->device_id));
if (this_device->devtype == TYPE_DISK &&
- is_logical_dev_addr_mode(scsi3addr))
+ is_logical_dev_addr_mode(scsi3addr)) {
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
- else
+ if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
+ hpsa_get_ioaccel_status(h, scsi3addr, this_device);
+ this_device->volume_offline =
+ hpsa_volume_offline(h, scsi3addr);
+ } else {
this_device->raid_level = RAID_UNKNOWN;
+ this_device->offload_config = 0;
+ this_device->offload_enabled = 0;
+ this_device->volume_offline = 0;
+ }
if (is_OBDR_device) {
/* See if this is a One-Button-Disaster-Recovery device
@@ -1878,6 +2779,105 @@ static int add_ext_target_dev(struct ctlr_info *h,
}
/*
+ * Get address of physical disk used for an ioaccel2 mode command:
+ * 1. Extract ioaccel2 handle from the command.
+ * 2. Find a matching ioaccel2 handle from list of physical disks.
+ * 3. Return:
+ * 1 and set scsi3addr to address of matching physical
+ * 0 if no matching physical disk was found.
+ */
+static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
+ struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
+{
+ struct ReportExtendedLUNdata *physicals = NULL;
+ int responsesize = 24; /* size of physical extended response */
+ int extended = 2; /* flag forces reporting 'other dev info'. */
+ int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
+ u32 nphysicals = 0; /* number of reported physical devs */
+ int found = 0; /* found match (1) or not (0) */
+ u32 find; /* handle we need to match */
+ int i;
+ struct scsi_cmnd *scmd; /* scsi command within request being aborted */
+ struct hpsa_scsi_dev_t *d; /* device of request being aborted */
+ struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
+ u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
+ u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
+
+ if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
+ return 0; /* no match */
+
+ /* point to the ioaccel2 device handle */
+ c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
+ if (c2a == NULL)
+ return 0; /* no match */
+
+ scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
+ if (scmd == NULL)
+ return 0; /* no match */
+
+ d = scmd->device->hostdata;
+ if (d == NULL)
+ return 0; /* no match */
+
+ it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
+ scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
+ find = c2a->scsi_nexus;
+
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ __func__, scsi_nexus,
+ d->device_id[0], d->device_id[1], d->device_id[2],
+ d->device_id[3], d->device_id[4], d->device_id[5],
+ d->device_id[6], d->device_id[7], d->device_id[8],
+ d->device_id[9], d->device_id[10], d->device_id[11],
+ d->device_id[12], d->device_id[13], d->device_id[14],
+ d->device_id[15]);
+
+ /* Get the list of physical devices */
+ physicals = kzalloc(reportsize, GFP_KERNEL);
+ if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
+ reportsize, extended)) {
+ dev_err(&h->pdev->dev,
+ "Can't lookup %s device handle: report physical LUNs failed.\n",
+ "HP SSD Smart Path");
+ kfree(physicals);
+ return 0;
+ }
+ nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
+ responsesize;
+
+
+ /* find ioaccel2 handle in list of physicals: */
+ for (i = 0; i < nphysicals; i++) {
+ /* handle is in bytes 28-31 of each lun */
+ if (memcmp(&((struct ReportExtendedLUNdata *)
+ physicals)->LUN[i][20], &find, 4) != 0) {
+ continue; /* didn't match */
+ }
+ found = 1;
+ memcpy(scsi3addr, &((struct ReportExtendedLUNdata *)
+ physicals)->LUN[i][0], 8);
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ __func__, find,
+ ((struct ReportExtendedLUNdata *)
+ physicals)->LUN[i][20],
+ scsi3addr[0], scsi3addr[1], scsi3addr[2],
+ scsi3addr[3], scsi3addr[4], scsi3addr[5],
+ scsi3addr[6], scsi3addr[7]);
+ break; /* found it */
+ }
+
+ kfree(physicals);
+ if (found)
+ return 1;
+ else
+ return 0;
+
+}
+/*
* Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
* logdev. The number of luns in physdev and logdev are returned in
* *nphysicals and *nlogicals, respectively.
@@ -1885,14 +2885,26 @@ static int add_ext_target_dev(struct ctlr_info *h,
*/
static int hpsa_gather_lun_info(struct ctlr_info *h,
int reportlunsize,
- struct ReportLUNdata *physdev, u32 *nphysicals,
+ struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
struct ReportLUNdata *logdev, u32 *nlogicals)
{
- if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
+ int physical_entry_size = 8;
+
+ *physical_mode = 0;
+
+ /* For I/O accelerator mode we need to read physical device handles */
+ if (h->transMethod & CFGTBL_Trans_io_accel1 ||
+ h->transMethod & CFGTBL_Trans_io_accel2) {
+ *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
+ physical_entry_size = 24;
+ }
+ if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
+ *physical_mode)) {
dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
return -1;
}
- *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
+ *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
+ physical_entry_size;
if (*nphysicals > HPSA_MAX_PHYS_LUN) {
dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
" %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
@@ -1923,7 +2935,8 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
}
u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
- int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
+ int nphysicals, int nlogicals,
+ struct ReportExtendedLUNdata *physdev_list,
struct ReportLUNdata *logdev_list)
{
/* Helper function, figure out where the LUN ID info is coming from
@@ -1947,6 +2960,24 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
return NULL;
}
+static int hpsa_hba_mode_enabled(struct ctlr_info *h)
+{
+ int rc;
+ struct bmic_controller_parameters *ctlr_params;
+ ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
+ GFP_KERNEL);
+
+ if (!ctlr_params)
+ return 0;
+ rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
+ sizeof(struct bmic_controller_parameters));
+ if (rc != 0) {
+ kfree(ctlr_params);
+ return 0;
+ }
+ return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0;
+}
+
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
{
/* the idea here is we could get notified
@@ -1959,16 +2990,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
* tell which devices we already know about, vs. new
* devices, vs. disappearing devices.
*/
- struct ReportLUNdata *physdev_list = NULL;
+ struct ReportExtendedLUNdata *physdev_list = NULL;
struct ReportLUNdata *logdev_list = NULL;
u32 nphysicals = 0;
u32 nlogicals = 0;
+ int physical_mode = 0;
u32 ndev_allocated = 0;
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
int ncurrent = 0;
- int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
+ int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
int i, n_ext_target_devs, ndevs_to_allocate;
int raid_ctlr_position;
+ u8 rescan_hba_mode;
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -1982,8 +3015,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
}
memset(lunzerobits, 0, sizeof(lunzerobits));
- if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
- logdev_list, &nlogicals))
+ rescan_hba_mode = hpsa_hba_mode_enabled(h);
+
+ if (!h->hba_mode_enabled && rescan_hba_mode)
+ dev_warn(&h->pdev->dev, "HBA mode enabled\n");
+ else if (h->hba_mode_enabled && !rescan_hba_mode)
+ dev_warn(&h->pdev->dev, "HBA mode disabled\n");
+
+ h->hba_mode_enabled = rescan_hba_mode;
+
+ if (hpsa_gather_lun_info(h, reportlunsize,
+ (struct ReportLUNdata *) physdev_list, &nphysicals,
+ &physical_mode, logdev_list, &nlogicals))
goto out;
/* We might see up to the maximum number of logical and physical disks
@@ -2064,9 +3107,28 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
ncurrent++;
break;
case TYPE_DISK:
- if (i < nphysicals)
+ if (h->hba_mode_enabled) {
+ /* never use raid mapper in HBA mode */
+ this_device->offload_enabled = 0;
+ ncurrent++;
break;
- ncurrent++;
+ } else if (h->acciopath_status) {
+ if (i >= nphysicals) {
+ ncurrent++;
+ break;
+ }
+ } else {
+ if (i < nphysicals)
+ break;
+ ncurrent++;
+ break;
+ }
+ if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
+ memcpy(&this_device->ioaccel_handle,
+ &lunaddrbytes[20],
+ sizeof(this_device->ioaccel_handle));
+ ncurrent++;
+ }
break;
case TYPE_TAPE:
case TYPE_MEDIUM_CHANGER:
@@ -2136,7 +3198,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
curr_sg->Len = len;
- curr_sg->Ext = 0; /* we are not chaining */
+ curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
curr_sg++;
}
@@ -2160,6 +3222,726 @@ sglist_finished:
return 0;
}
+#define IO_ACCEL_INELIGIBLE (1)
+static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
+{
+ int is_write = 0;
+ u32 block;
+ u32 block_cnt;
+
+ /* Perform some CDB fixups if needed using 10 byte reads/writes only */
+ switch (cdb[0]) {
+ case WRITE_6:
+ case WRITE_12:
+ is_write = 1;
+ case READ_6:
+ case READ_12:
+ if (*cdb_len == 6) {
+ block = (((u32) cdb[2]) << 8) | cdb[3];
+ block_cnt = cdb[4];
+ } else {
+ BUG_ON(*cdb_len != 12);
+ block = (((u32) cdb[2]) << 24) |
+ (((u32) cdb[3]) << 16) |
+ (((u32) cdb[4]) << 8) |
+ cdb[5];
+ block_cnt =
+ (((u32) cdb[6]) << 24) |
+ (((u32) cdb[7]) << 16) |
+ (((u32) cdb[8]) << 8) |
+ cdb[9];
+ }
+ if (block_cnt > 0xffff)
+ return IO_ACCEL_INELIGIBLE;
+
+ cdb[0] = is_write ? WRITE_10 : READ_10;
+ cdb[1] = 0;
+ cdb[2] = (u8) (block >> 24);
+ cdb[3] = (u8) (block >> 16);
+ cdb[4] = (u8) (block >> 8);
+ cdb[5] = (u8) (block);
+ cdb[6] = 0;
+ cdb[7] = (u8) (block_cnt >> 8);
+ cdb[8] = (u8) (block_cnt);
+ cdb[9] = 0;
+ *cdb_len = 10;
+ break;
+ }
+ return 0;
+}
+
+static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
+ unsigned int len;
+ unsigned int total_len = 0;
+ struct scatterlist *sg;
+ u64 addr64;
+ int use_sg, i;
+ struct SGDescriptor *curr_sg;
+ u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
+
+ /* TODO: implement chaining support */
+ if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+ return IO_ACCEL_INELIGIBLE;
+
+ BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
+
+ if (fixup_ioaccel_cdb(cdb, &cdb_len))
+ return IO_ACCEL_INELIGIBLE;
+
+ c->cmd_type = CMD_IOACCEL1;
+
+ /* Adjust the DMA address to point to the accelerated command buffer */
+ c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
+ (c->cmdindex * sizeof(*cp));
+ BUG_ON(c->busaddr & 0x0000007F);
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg < 0)
+ return use_sg;
+
+ if (use_sg) {
+ curr_sg = cp->SG;
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ addr64 = (u64) sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ total_len += len;
+ curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
+ curr_sg->Addr.upper =
+ (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
+ curr_sg->Len = len;
+
+ if (i == (scsi_sg_count(cmd) - 1))
+ curr_sg->Ext = HPSA_SG_LAST;
+ else
+ curr_sg->Ext = 0; /* we are not chaining */
+ curr_sg++;
+ }
+
+ switch (cmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ control |= IOACCEL1_CONTROL_DATA_OUT;
+ break;
+ case DMA_FROM_DEVICE:
+ control |= IOACCEL1_CONTROL_DATA_IN;
+ break;
+ case DMA_NONE:
+ control |= IOACCEL1_CONTROL_NODATAXFER;
+ break;
+ default:
+ dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+ cmd->sc_data_direction);
+ BUG();
+ break;
+ }
+ } else {
+ control |= IOACCEL1_CONTROL_NODATAXFER;
+ }
+
+ c->Header.SGList = use_sg;
+ /* Fill out the command structure to submit */
+ cp->dev_handle = ioaccel_handle & 0xFFFF;
+ cp->transfer_len = total_len;
+ cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
+ (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
+ cp->control = control;
+ memcpy(cp->CDB, cdb, cdb_len);
+ memcpy(cp->CISS_LUN, scsi3addr, 8);
+ /* Tag was already set at init time. */
+ enqueue_cmd_and_start_io(h, c);
+ return 0;
+}
+
+/*
+ * Queue a command directly to a device behind the controller using the
+ * I/O accelerator path.
+ */
+static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+
+ return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
+ cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
+}
+
+/*
+ * Set encryption parameters for the ioaccel2 request
+ */
+static void set_encrypt_ioaccel2(struct ctlr_info *h,
+ struct CommandList *c, struct io_accel2_cmd *cp)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+ struct raid_map_data *map = &dev->raid_map;
+ u64 first_block;
+
+ BUG_ON(!(dev->offload_config && dev->offload_enabled));
+
+ /* Are we doing encryption on this device */
+ if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
+ return;
+ /* Set the data encryption key index. */
+ cp->dekindex = map->dekindex;
+
+ /* Set the encryption enable flag, encoded into direction field. */
+ cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
+
+ /* Set encryption tweak values based on logical block address
+ * If block size is 512, tweak value is LBA.
+ * For other block sizes, tweak is (LBA * block size)/ 512)
+ */
+ switch (cmd->cmnd[0]) {
+ /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
+ case WRITE_6:
+ case READ_6:
+ if (map->volume_blk_size == 512) {
+ cp->tweak_lower =
+ (((u32) cmd->cmnd[2]) << 8) |
+ cmd->cmnd[3];
+ cp->tweak_upper = 0;
+ } else {
+ first_block =
+ (((u64) cmd->cmnd[2]) << 8) |
+ cmd->cmnd[3];
+ first_block = (first_block * map->volume_blk_size)/512;
+ cp->tweak_lower = (u32)first_block;
+ cp->tweak_upper = (u32)(first_block >> 32);
+ }
+ break;
+ case WRITE_10:
+ case READ_10:
+ if (map->volume_blk_size == 512) {
+ cp->tweak_lower =
+ (((u32) cmd->cmnd[2]) << 24) |
+ (((u32) cmd->cmnd[3]) << 16) |
+ (((u32) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ cp->tweak_upper = 0;
+ } else {
+ first_block =
+ (((u64) cmd->cmnd[2]) << 24) |
+ (((u64) cmd->cmnd[3]) << 16) |
+ (((u64) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ first_block = (first_block * map->volume_blk_size)/512;
+ cp->tweak_lower = (u32)first_block;
+ cp->tweak_upper = (u32)(first_block >> 32);
+ }
+ break;
+ /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
+ case WRITE_12:
+ case READ_12:
+ if (map->volume_blk_size == 512) {
+ cp->tweak_lower =
+ (((u32) cmd->cmnd[2]) << 24) |
+ (((u32) cmd->cmnd[3]) << 16) |
+ (((u32) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ cp->tweak_upper = 0;
+ } else {
+ first_block =
+ (((u64) cmd->cmnd[2]) << 24) |
+ (((u64) cmd->cmnd[3]) << 16) |
+ (((u64) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ first_block = (first_block * map->volume_blk_size)/512;
+ cp->tweak_lower = (u32)first_block;
+ cp->tweak_upper = (u32)(first_block >> 32);
+ }
+ break;
+ case WRITE_16:
+ case READ_16:
+ if (map->volume_blk_size == 512) {
+ cp->tweak_lower =
+ (((u32) cmd->cmnd[6]) << 24) |
+ (((u32) cmd->cmnd[7]) << 16) |
+ (((u32) cmd->cmnd[8]) << 8) |
+ cmd->cmnd[9];
+ cp->tweak_upper =
+ (((u32) cmd->cmnd[2]) << 24) |
+ (((u32) cmd->cmnd[3]) << 16) |
+ (((u32) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ } else {
+ first_block =
+ (((u64) cmd->cmnd[2]) << 56) |
+ (((u64) cmd->cmnd[3]) << 48) |
+ (((u64) cmd->cmnd[4]) << 40) |
+ (((u64) cmd->cmnd[5]) << 32) |
+ (((u64) cmd->cmnd[6]) << 24) |
+ (((u64) cmd->cmnd[7]) << 16) |
+ (((u64) cmd->cmnd[8]) << 8) |
+ cmd->cmnd[9];
+ first_block = (first_block * map->volume_blk_size)/512;
+ cp->tweak_lower = (u32)first_block;
+ cp->tweak_upper = (u32)(first_block >> 32);
+ }
+ break;
+ default:
+ dev_err(&h->pdev->dev,
+ "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
+ __func__);
+ BUG();
+ break;
+ }
+}
+
+static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+ struct ioaccel2_sg_element *curr_sg;
+ int use_sg, i;
+ struct scatterlist *sg;
+ u64 addr64;
+ u32 len;
+ u32 total_len = 0;
+
+ if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+ return IO_ACCEL_INELIGIBLE;
+
+ if (fixup_ioaccel_cdb(cdb, &cdb_len))
+ return IO_ACCEL_INELIGIBLE;
+ c->cmd_type = CMD_IOACCEL2;
+ /* Adjust the DMA address to point to the accelerated command buffer */
+ c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
+ (c->cmdindex * sizeof(*cp));
+ BUG_ON(c->busaddr & 0x0000007F);
+
+ memset(cp, 0, sizeof(*cp));
+ cp->IU_type = IOACCEL2_IU_TYPE;
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg < 0)
+ return use_sg;
+
+ if (use_sg) {
+ BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
+ curr_sg = cp->sg;
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ addr64 = (u64) sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ total_len += len;
+ curr_sg->address = cpu_to_le64(addr64);
+ curr_sg->length = cpu_to_le32(len);
+ curr_sg->reserved[0] = 0;
+ curr_sg->reserved[1] = 0;
+ curr_sg->reserved[2] = 0;
+ curr_sg->chain_indicator = 0;
+ curr_sg++;
+ }
+
+ switch (cmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_DATA_OUT;
+ break;
+ case DMA_FROM_DEVICE:
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_DATA_IN;
+ break;
+ case DMA_NONE:
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_NO_DATA;
+ break;
+ default:
+ dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+ cmd->sc_data_direction);
+ BUG();
+ break;
+ }
+ } else {
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+ cp->direction |= IOACCEL2_DIR_NO_DATA;
+ }
+
+ /* Set encryption parameters, if necessary */
+ set_encrypt_ioaccel2(h, c, cp);
+
+ cp->scsi_nexus = ioaccel_handle;
+ cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
+ DIRECT_LOOKUP_BIT;
+ memcpy(cp->cdb, cdb, sizeof(cp->cdb));
+
+ /* fill in sg elements */
+ cp->sg_count = (u8) use_sg;
+
+ cp->data_len = cpu_to_le32(total_len);
+ cp->err_ptr = cpu_to_le64(c->busaddr +
+ offsetof(struct io_accel2_cmd, error_data));
+ cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
+
+ enqueue_cmd_and_start_io(h, c);
+ return 0;
+}
+
+/*
+ * Queue a command to the correct I/O accelerator path.
+ */
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+ u8 *scsi3addr)
+{
+ if (h->transMethod & CFGTBL_Trans_io_accel1)
+ return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
+ cdb, cdb_len, scsi3addr);
+ else
+ return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
+ cdb, cdb_len, scsi3addr);
+}
+
+static void raid_map_helper(struct raid_map_data *map,
+ int offload_to_mirror, u32 *map_index, u32 *current_group)
+{
+ if (offload_to_mirror == 0) {
+ /* use physical disk in the first mirrored group. */
+ *map_index %= map->data_disks_per_row;
+ return;
+ }
+ do {
+ /* determine mirror group that *map_index indicates */
+ *current_group = *map_index / map->data_disks_per_row;
+ if (offload_to_mirror == *current_group)
+ continue;
+ if (*current_group < (map->layout_map_count - 1)) {
+ /* select map index from next group */
+ *map_index += map->data_disks_per_row;
+ (*current_group)++;
+ } else {
+ /* select map index from first group */
+ *map_index %= map->data_disks_per_row;
+ *current_group = 0;
+ }
+ } while (offload_to_mirror != *current_group);
+}
+
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct scsi_cmnd *cmd = c->scsi_cmd;
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+ struct raid_map_data *map = &dev->raid_map;
+ struct raid_map_disk_data *dd = &map->data[0];
+ int is_write = 0;
+ u32 map_index;
+ u64 first_block, last_block;
+ u32 block_cnt;
+ u32 blocks_per_row;
+ u64 first_row, last_row;
+ u32 first_row_offset, last_row_offset;
+ u32 first_column, last_column;
+ u64 r0_first_row, r0_last_row;
+ u32 r5or6_blocks_per_row;
+ u64 r5or6_first_row, r5or6_last_row;
+ u32 r5or6_first_row_offset, r5or6_last_row_offset;
+ u32 r5or6_first_column, r5or6_last_column;
+ u32 total_disks_per_row;
+ u32 stripesize;
+ u32 first_group, last_group, current_group;
+ u32 map_row;
+ u32 disk_handle;
+ u64 disk_block;
+ u32 disk_block_cnt;
+ u8 cdb[16];
+ u8 cdb_len;
+#if BITS_PER_LONG == 32
+ u64 tmpdiv;
+#endif
+ int offload_to_mirror;
+
+ BUG_ON(!(dev->offload_config && dev->offload_enabled));
+
+ /* check for valid opcode, get LBA and block count */
+ switch (cmd->cmnd[0]) {
+ case WRITE_6:
+ is_write = 1;
+ case READ_6:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 8) |
+ cmd->cmnd[3];
+ block_cnt = cmd->cmnd[4];
+ break;
+ case WRITE_10:
+ is_write = 1;
+ case READ_10:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 24) |
+ (((u64) cmd->cmnd[3]) << 16) |
+ (((u64) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ block_cnt =
+ (((u32) cmd->cmnd[7]) << 8) |
+ cmd->cmnd[8];
+ break;
+ case WRITE_12:
+ is_write = 1;
+ case READ_12:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 24) |
+ (((u64) cmd->cmnd[3]) << 16) |
+ (((u64) cmd->cmnd[4]) << 8) |
+ cmd->cmnd[5];
+ block_cnt =
+ (((u32) cmd->cmnd[6]) << 24) |
+ (((u32) cmd->cmnd[7]) << 16) |
+ (((u32) cmd->cmnd[8]) << 8) |
+ cmd->cmnd[9];
+ break;
+ case WRITE_16:
+ is_write = 1;
+ case READ_16:
+ first_block =
+ (((u64) cmd->cmnd[2]) << 56) |
+ (((u64) cmd->cmnd[3]) << 48) |
+ (((u64) cmd->cmnd[4]) << 40) |
+ (((u64) cmd->cmnd[5]) << 32) |
+ (((u64) cmd->cmnd[6]) << 24) |
+ (((u64) cmd->cmnd[7]) << 16) |
+ (((u64) cmd->cmnd[8]) << 8) |
+ cmd->cmnd[9];
+ block_cnt =
+ (((u32) cmd->cmnd[10]) << 24) |
+ (((u32) cmd->cmnd[11]) << 16) |
+ (((u32) cmd->cmnd[12]) << 8) |
+ cmd->cmnd[13];
+ break;
+ default:
+ return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
+ }
+ BUG_ON(block_cnt == 0);
+ last_block = first_block + block_cnt - 1;
+
+ /* check for write to non-RAID-0 */
+ if (is_write && dev->raid_level != 0)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* check for invalid block or wraparound */
+ if (last_block >= map->volume_blk_cnt || last_block < first_block)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* calculate stripe information for the request */
+ blocks_per_row = map->data_disks_per_row * map->strip_size;
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ (void) do_div(tmpdiv, blocks_per_row);
+ first_row = tmpdiv;
+ tmpdiv = last_block;
+ (void) do_div(tmpdiv, blocks_per_row);
+ last_row = tmpdiv;
+ first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
+ last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
+ tmpdiv = first_row_offset;
+ (void) do_div(tmpdiv, map->strip_size);
+ first_column = tmpdiv;
+ tmpdiv = last_row_offset;
+ (void) do_div(tmpdiv, map->strip_size);
+ last_column = tmpdiv;
+#else
+ first_row = first_block / blocks_per_row;
+ last_row = last_block / blocks_per_row;
+ first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
+ last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
+ first_column = first_row_offset / map->strip_size;
+ last_column = last_row_offset / map->strip_size;
+#endif
+
+ /* if this isn't a single row/column then give to the controller */
+ if ((first_row != last_row) || (first_column != last_column))
+ return IO_ACCEL_INELIGIBLE;
+
+ /* proceeding with driver mapping */
+ total_disks_per_row = map->data_disks_per_row +
+ map->metadata_disks_per_row;
+ map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
+ map->row_cnt;
+ map_index = (map_row * total_disks_per_row) + first_column;
+
+ switch (dev->raid_level) {
+ case HPSA_RAID_0:
+ break; /* nothing special to do */
+ case HPSA_RAID_1:
+ /* Handles load balance across RAID 1 members.
+ * (2-drive R1 and R10 with even # of drives.)
+ * Appropriate for SSDs, not optimal for HDDs
+ */
+ BUG_ON(map->layout_map_count != 2);
+ if (dev->offload_to_mirror)
+ map_index += map->data_disks_per_row;
+ dev->offload_to_mirror = !dev->offload_to_mirror;
+ break;
+ case HPSA_RAID_ADM:
+ /* Handles N-way mirrors (R1-ADM)
+ * and R10 with # of drives divisible by 3.)
+ */
+ BUG_ON(map->layout_map_count != 3);
+
+ offload_to_mirror = dev->offload_to_mirror;
+ raid_map_helper(map, offload_to_mirror,
+ &map_index, &current_group);
+ /* set mirror group to use next time */
+ offload_to_mirror =
+ (offload_to_mirror >= map->layout_map_count - 1)
+ ? 0 : offload_to_mirror + 1;
+ /* FIXME: remove after debug/dev */
+ BUG_ON(offload_to_mirror >= map->layout_map_count);
+ dev_warn(&h->pdev->dev,
+ "DEBUG: Using physical disk map index %d from mirror group %d\n",
+ map_index, offload_to_mirror);
+ dev->offload_to_mirror = offload_to_mirror;
+ /* Avoid direct use of dev->offload_to_mirror within this
+ * function since multiple threads might simultaneously
+ * increment it beyond the range of dev->layout_map_count -1.
+ */
+ break;
+ case HPSA_RAID_5:
+ case HPSA_RAID_6:
+ if (map->layout_map_count <= 1)
+ break;
+
+ /* Verify first and last block are in same RAID group */
+ r5or6_blocks_per_row =
+ map->strip_size * map->data_disks_per_row;
+ BUG_ON(r5or6_blocks_per_row == 0);
+ stripesize = r5or6_blocks_per_row * map->layout_map_count;
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ first_group = do_div(tmpdiv, stripesize);
+ tmpdiv = first_group;
+ (void) do_div(tmpdiv, r5or6_blocks_per_row);
+ first_group = tmpdiv;
+ tmpdiv = last_block;
+ last_group = do_div(tmpdiv, stripesize);
+ tmpdiv = last_group;
+ (void) do_div(tmpdiv, r5or6_blocks_per_row);
+ last_group = tmpdiv;
+#else
+ first_group = (first_block % stripesize) / r5or6_blocks_per_row;
+ last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+#endif
+ if (first_group != last_group)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* Verify request is in a single row of RAID 5/6 */
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ (void) do_div(tmpdiv, stripesize);
+ first_row = r5or6_first_row = r0_first_row = tmpdiv;
+ tmpdiv = last_block;
+ (void) do_div(tmpdiv, stripesize);
+ r5or6_last_row = r0_last_row = tmpdiv;
+#else
+ first_row = r5or6_first_row = r0_first_row =
+ first_block / stripesize;
+ r5or6_last_row = r0_last_row = last_block / stripesize;
+#endif
+ if (r5or6_first_row != r5or6_last_row)
+ return IO_ACCEL_INELIGIBLE;
+
+
+ /* Verify request is in a single column */
+#if BITS_PER_LONG == 32
+ tmpdiv = first_block;
+ first_row_offset = do_div(tmpdiv, stripesize);
+ tmpdiv = first_row_offset;
+ first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
+ r5or6_first_row_offset = first_row_offset;
+ tmpdiv = last_block;
+ r5or6_last_row_offset = do_div(tmpdiv, stripesize);
+ tmpdiv = r5or6_last_row_offset;
+ r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
+ tmpdiv = r5or6_first_row_offset;
+ (void) do_div(tmpdiv, map->strip_size);
+ first_column = r5or6_first_column = tmpdiv;
+ tmpdiv = r5or6_last_row_offset;
+ (void) do_div(tmpdiv, map->strip_size);
+ r5or6_last_column = tmpdiv;
+#else
+ first_row_offset = r5or6_first_row_offset =
+ (u32)((first_block % stripesize) %
+ r5or6_blocks_per_row);
+
+ r5or6_last_row_offset =
+ (u32)((last_block % stripesize) %
+ r5or6_blocks_per_row);
+
+ first_column = r5or6_first_column =
+ r5or6_first_row_offset / map->strip_size;
+ r5or6_last_column =
+ r5or6_last_row_offset / map->strip_size;
+#endif
+ if (r5or6_first_column != r5or6_last_column)
+ return IO_ACCEL_INELIGIBLE;
+
+ /* Request is eligible */
+ map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
+ map->row_cnt;
+
+ map_index = (first_group *
+ (map->row_cnt * total_disks_per_row)) +
+ (map_row * total_disks_per_row) + first_column;
+ break;
+ default:
+ return IO_ACCEL_INELIGIBLE;
+ }
+
+ disk_handle = dd[map_index].ioaccel_handle;
+ disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
+ (first_row_offset - (first_column * map->strip_size));
+ disk_block_cnt = block_cnt;
+
+ /* handle differing logical/physical block sizes */
+ if (map->phys_blk_shift) {
+ disk_block <<= map->phys_blk_shift;
+ disk_block_cnt <<= map->phys_blk_shift;
+ }
+ BUG_ON(disk_block_cnt > 0xffff);
+
+ /* build the new CDB for the physical disk I/O */
+ if (disk_block > 0xffffffff) {
+ cdb[0] = is_write ? WRITE_16 : READ_16;
+ cdb[1] = 0;
+ cdb[2] = (u8) (disk_block >> 56);
+ cdb[3] = (u8) (disk_block >> 48);
+ cdb[4] = (u8) (disk_block >> 40);
+ cdb[5] = (u8) (disk_block >> 32);
+ cdb[6] = (u8) (disk_block >> 24);
+ cdb[7] = (u8) (disk_block >> 16);
+ cdb[8] = (u8) (disk_block >> 8);
+ cdb[9] = (u8) (disk_block);
+ cdb[10] = (u8) (disk_block_cnt >> 24);
+ cdb[11] = (u8) (disk_block_cnt >> 16);
+ cdb[12] = (u8) (disk_block_cnt >> 8);
+ cdb[13] = (u8) (disk_block_cnt);
+ cdb[14] = 0;
+ cdb[15] = 0;
+ cdb_len = 16;
+ } else {
+ cdb[0] = is_write ? WRITE_10 : READ_10;
+ cdb[1] = 0;
+ cdb[2] = (u8) (disk_block >> 24);
+ cdb[3] = (u8) (disk_block >> 16);
+ cdb[4] = (u8) (disk_block >> 8);
+ cdb[5] = (u8) (disk_block);
+ cdb[6] = 0;
+ cdb[7] = (u8) (disk_block_cnt >> 8);
+ cdb[8] = (u8) (disk_block_cnt);
+ cdb[9] = 0;
+ cdb_len = 10;
+ }
+ return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
+ dev->scsi3addr);
+}
static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
@@ -2169,6 +3951,7 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
unsigned char scsi3addr[8];
struct CommandList *c;
unsigned long flags;
+ int rc = 0;
/* Get the ptr to our adapter structure out of cmd->host. */
h = sdev_to_hba(cmd->device);
@@ -2203,6 +3986,32 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
+
+ /* Call alternate submit routine for I/O accelerated commands.
+ * Retries always go down the normal I/O path.
+ */
+ if (likely(cmd->retries == 0 &&
+ cmd->request->cmd_type == REQ_TYPE_FS &&
+ h->acciopath_status)) {
+ if (dev->offload_enabled) {
+ rc = hpsa_scsi_ioaccel_raid_map(h, c);
+ if (rc == 0)
+ return 0; /* Sent on ioaccel path */
+ if (rc < 0) { /* scsi_dma_map failed. */
+ cmd_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ } else if (dev->ioaccel_handle) {
+ rc = hpsa_scsi_ioaccel_direct_map(h, c);
+ if (rc == 0)
+ return 0; /* Sent on direct map path */
+ if (rc < 0) { /* scsi_dma_map failed. */
+ cmd_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ }
+ }
+
c->Header.ReplyQueue = 0; /* unused in simple mode */
memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
@@ -2262,11 +4071,38 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
+static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
+{
+ unsigned long flags;
+
+ /*
+ * Don't let rescans be initiated on a controller known
+ * to be locked up. If the controller locks up *during*
+ * a rescan, that thread is probably hosed, but at least
+ * we can prevent new rescan threads from piling up on a
+ * locked up controller.
+ */
+ spin_lock_irqsave(&h->lock, flags);
+ if (unlikely(h->lockup_detected)) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ spin_lock_irqsave(&h->scan_lock, flags);
+ h->scan_finished = 1;
+ wake_up_all(&h->scan_wait_queue);
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+ return 1;
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
+ return 0;
+}
+
static void hpsa_scan_start(struct Scsi_Host *sh)
{
struct ctlr_info *h = shost_to_hba(sh);
unsigned long flags;
+ if (do_not_scan_if_controller_locked_up(h))
+ return;
+
/* wait until any scan already in progress is finished. */
while (1) {
spin_lock_irqsave(&h->scan_lock, flags);
@@ -2283,6 +4119,9 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
h->scan_finished = 0; /* mark scan as in progress */
spin_unlock_irqrestore(&h->scan_lock, flags);
+ if (do_not_scan_if_controller_locked_up(h))
+ return;
+
hpsa_update_scsi_devices(h, h->scsi_host->host_no);
spin_lock_irqsave(&h->scan_lock, flags);
@@ -2346,7 +4185,10 @@ static int hpsa_register_scsi(struct ctlr_info *h)
sh->max_lun = HPSA_MAX_LUN;
sh->max_id = HPSA_MAX_LUN;
sh->can_queue = h->nr_cmds;
- sh->cmd_per_lun = h->nr_cmds;
+ if (h->hba_mode_enabled)
+ sh->cmd_per_lun = 7;
+ else
+ sh->cmd_per_lun = h->nr_cmds;
sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
@@ -2372,7 +4214,7 @@ static int hpsa_register_scsi(struct ctlr_info *h)
static int wait_for_device_to_become_ready(struct ctlr_info *h,
unsigned char lunaddr[])
{
- int rc = 0;
+ int rc;
int count = 0;
int waittime = 1; /* seconds */
struct CommandList *c;
@@ -2392,6 +4234,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
*/
msleep(1000 * waittime);
count++;
+ rc = 0; /* Device ready. */
/* Increase wait time with each try, up to a point. */
if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
@@ -2448,7 +4291,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
/* send a reset to the SCSI LUN which the command was sent to */
- rc = hpsa_send_reset(h, dev->scsi3addr);
+ rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
return SUCCESS;
@@ -2471,12 +4314,36 @@ static void swizzle_abort_tag(u8 *tag)
tag[7] = original_tag[4];
}
+static void hpsa_get_tag(struct ctlr_info *h,
+ struct CommandList *c, u32 *taglower, u32 *tagupper)
+{
+ if (c->cmd_type == CMD_IOACCEL1) {
+ struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
+ &h->ioaccel_cmd_pool[c->cmdindex];
+ *tagupper = cm1->Tag.upper;
+ *taglower = cm1->Tag.lower;
+ return;
+ }
+ if (c->cmd_type == CMD_IOACCEL2) {
+ struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
+ &h->ioaccel2_cmd_pool[c->cmdindex];
+ /* upper tag not used in ioaccel2 mode */
+ memset(tagupper, 0, sizeof(*tagupper));
+ *taglower = cm2->Tag;
+ return;
+ }
+ *tagupper = c->Header.Tag.upper;
+ *taglower = c->Header.Tag.lower;
+}
+
+
static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
struct CommandList *abort, int swizzle)
{
int rc = IO_OK;
struct CommandList *c;
struct ErrorInfo *ei;
+ u32 tagupper, taglower;
c = cmd_special_alloc(h);
if (c == NULL) { /* trouble... */
@@ -2490,8 +4357,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
if (swizzle)
swizzle_abort_tag(&c->Request.CDB[4]);
hpsa_scsi_do_simple_cmd_core(h, c);
+ hpsa_get_tag(h, abort, &taglower, &tagupper);
dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
- __func__, abort->Header.Tag.upper, abort->Header.Tag.lower);
+ __func__, tagupper, taglower);
/* no unmap needed here because no data xfer. */
ei = c->err_info;
@@ -2503,15 +4371,14 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
break;
default:
dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
- __func__, abort->Header.Tag.upper,
- abort->Header.Tag.lower);
- hpsa_scsi_interpret_error(c);
+ __func__, tagupper, taglower);
+ hpsa_scsi_interpret_error(h, c);
rc = -1;
break;
}
cmd_special_free(h, c);
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
- abort->Header.Tag.upper, abort->Header.Tag.lower);
+ dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
+ __func__, tagupper, taglower);
return rc;
}
@@ -2565,6 +4432,83 @@ static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
return NULL;
}
+/* ioaccel2 path firmware cannot handle abort task requests.
+ * Change abort requests to physical target reset, and send to the
+ * address of the physical disk used for the ioaccel 2 command.
+ * Return 0 on success (IO_OK)
+ * -1 on failure
+ */
+
+static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
+ unsigned char *scsi3addr, struct CommandList *abort)
+{
+ int rc = IO_OK;
+ struct scsi_cmnd *scmd; /* scsi command within request being aborted */
+ struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
+ unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
+ unsigned char *psa = &phys_scsi3addr[0];
+
+ /* Get a pointer to the hpsa logical device. */
+ scmd = (struct scsi_cmnd *) abort->scsi_cmd;
+ dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
+ if (dev == NULL) {
+ dev_warn(&h->pdev->dev,
+ "Cannot abort: no device pointer for command.\n");
+ return -1; /* not abortable */
+ }
+
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
+ scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
+ scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
+
+ if (!dev->offload_enabled) {
+ dev_warn(&h->pdev->dev,
+ "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
+ return -1; /* not abortable */
+ }
+
+ /* Incoming scsi3addr is logical addr. We need physical disk addr. */
+ if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
+ dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
+ return -1; /* not abortable */
+ }
+
+ /* send the reset */
+ if (h->raid_offload_debug > 0)
+ dev_info(&h->pdev->dev,
+ "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+ rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
+ if (rc != 0) {
+ dev_warn(&h->pdev->dev,
+ "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+ return rc; /* failed to reset */
+ }
+
+ /* wait for device to recover */
+ if (wait_for_device_to_become_ready(h, psa) != 0) {
+ dev_warn(&h->pdev->dev,
+ "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+ return -1; /* failed to recover */
+ }
+
+ /* device recovered */
+ dev_info(&h->pdev->dev,
+ "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ psa[0], psa[1], psa[2], psa[3],
+ psa[4], psa[5], psa[6], psa[7]);
+
+ return rc; /* success */
+}
+
/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
* tell which kind we're dealing with, so we send the abort both ways. There
* shouldn't be any collisions between swizzled and unswizzled tags due to the
@@ -2578,6 +4522,14 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,
struct CommandList *c;
int rc = 0, rc2 = 0;
+ /* ioccelerator mode 2 commands should be aborted via the
+ * accelerated path, since RAID path is unaware of these commands,
+ * but underlying firmware can't handle abort TMF.
+ * Change abort to physical device reset.
+ */
+ if (abort->cmd_type == CMD_IOACCEL2)
+ return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
+
/* we do not expect to find the swizzled tag in our queue, but
* check anyway just to be sure the assumptions which make this
* the case haven't become wrong.
@@ -2616,6 +4568,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
char msg[256]; /* For debug messaging. */
int ml = 0;
+ u32 tagupper, taglower;
/* Find the controller of the command to be aborted */
h = sdev_to_hba(sc->device);
@@ -2648,9 +4601,8 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
msg);
return FAILED;
}
-
- ml += sprintf(msg+ml, "Tag:0x%08x:%08x ",
- abort->Header.Tag.upper, abort->Header.Tag.lower);
+ hpsa_get_tag(h, abort, &taglower, &tagupper);
+ ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
as = (struct scsi_cmnd *) abort->scsi_cmd;
if (as != NULL)
ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
@@ -2776,6 +4728,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
return NULL;
memset(c, 0, sizeof(*c));
+ c->cmd_type = CMD_SCSI;
c->cmdindex = -1;
c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
@@ -3038,7 +4991,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[0].Addr.lower = temp64.val32.lower;
c->SG[0].Addr.upper = temp64.val32.upper;
c->SG[0].Len = iocommand.buf_size;
- c->SG[0].Ext = 0; /* we are not chaining*/
+ c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
}
hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
if (iocommand.buf_size > 0)
@@ -3168,8 +5121,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[i].Addr.lower = temp64.val32.lower;
c->SG[i].Addr.upper = temp64.val32.upper;
c->SG[i].Len = buff_size[i];
- /* we are not chaining */
- c->SG[i].Ext = 0;
+ c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
}
}
hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
@@ -3304,7 +5256,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
}
static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
- void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
+ void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
int cmd_type)
{
int pci_dir = XFER_NONE;
@@ -3327,9 +5279,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
switch (cmd) {
case HPSA_INQUIRY:
/* are we trying to read a vital product page */
- if (page_code != 0) {
+ if (page_code & VPD_PAGE) {
c->Request.CDB[1] = 0x01;
- c->Request.CDB[2] = page_code;
+ c->Request.CDB[2] = (page_code & 0xff);
}
c->Request.CDBLen = 6;
c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -3369,6 +5321,28 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.Type.Direction = XFER_NONE;
c->Request.Timeout = 0;
break;
+ case HPSA_GET_RAID_MAP:
+ c->Request.CDBLen = 12;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = HPSA_CISS_READ;
+ c->Request.CDB[1] = cmd;
+ c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
+ c->Request.CDB[9] = size & 0xFF;
+ break;
+ case BMIC_SENSE_CONTROLLER_PARAMETERS:
+ c->Request.CDBLen = 10;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = BMIC_READ;
+ c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
+ break;
default:
dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
BUG();
@@ -3562,7 +5536,8 @@ static inline void finish_cmd(struct CommandList *c)
spin_unlock_irqrestore(&h->lock, flags);
dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
- if (likely(c->cmd_type == CMD_SCSI))
+ if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
+ || c->cmd_type == CMD_IOACCEL2))
complete_scsi_command(c);
else if (c->cmd_type == CMD_IOCTL_PEND)
complete(c->waiting);
@@ -4169,21 +6144,24 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
goto default_int_mode;
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
dev_info(&h->pdev->dev, "MSIX\n");
+ h->msix_vector = MAX_REPLY_QUEUES;
err = pci_enable_msix(h->pdev, hpsa_msix_entries,
- MAX_REPLY_QUEUES);
- if (!err) {
- for (i = 0; i < MAX_REPLY_QUEUES; i++)
- h->intr[i] = hpsa_msix_entries[i].vector;
- h->msix_vector = 1;
- return;
- }
+ h->msix_vector);
if (err > 0) {
dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
"available\n", err);
- goto default_int_mode;
+ h->msix_vector = err;
+ err = pci_enable_msix(h->pdev, hpsa_msix_entries,
+ h->msix_vector);
+ }
+ if (!err) {
+ for (i = 0; i < h->msix_vector; i++)
+ h->intr[i] = hpsa_msix_entries[i].vector;
+ return;
} else {
dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
err);
+ h->msix_vector = 0;
goto default_int_mode;
}
}
@@ -4336,6 +6314,7 @@ static void hpsa_find_board_params(struct ctlr_info *h)
hpsa_get_max_perf_mode_cmds(h);
h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
+ h->fw_support = readl(&(h->cfgtable->misc_fw_support));
/*
* Limit in-command s/g elements to 32 save dma'able memory.
* Howvever spec says if 0, use 31
@@ -4352,6 +6331,10 @@ static void hpsa_find_board_params(struct ctlr_info *h)
/* Find out what task management functions are supported and cache */
h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
+ if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
+ dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
+ if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
+ dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
}
static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
@@ -4390,6 +6373,23 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
}
+static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
+{
+ int i;
+ u32 doorbell_value;
+ unsigned long flags;
+ /* wait until the clear_event_notify bit 6 is cleared by controller. */
+ for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+ spin_lock_irqsave(&h->lock, flags);
+ doorbell_value = readl(h->vaddr + SA5_DOORBELL);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
+ break;
+ /* delay and try again */
+ msleep(20);
+ }
+}
+
static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
{
int i;
@@ -4420,18 +6420,20 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h)
return -ENOTSUPP;
h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+
/* Update the field, and then ring the doorbell */
writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+ writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
print_cfg_table(&h->pdev->dev, h->cfgtable);
- if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
- dev_warn(&h->pdev->dev,
- "unable to get board into simple mode\n");
- return -ENODEV;
- }
+ if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
+ goto error;
h->transMethod = CFGTBL_Trans_Simple;
return 0;
+error:
+ dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
+ return -ENODEV;
}
static int hpsa_pci_init(struct ctlr_info *h)
@@ -4577,11 +6579,19 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool, h->cmd_pool_dhandle);
+ if (h->ioaccel2_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
if (h->errinfo_pool)
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
+ if (h->ioaccel_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct io_accel1_cmd),
+ h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
}
static int hpsa_request_irq(struct ctlr_info *h,
@@ -4597,15 +6607,15 @@ static int hpsa_request_irq(struct ctlr_info *h,
for (i = 0; i < MAX_REPLY_QUEUES; i++)
h->q[i] = (u8) i;
- if (h->intr_mode == PERF_MODE_INT && h->msix_vector) {
+ if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
/* If performant mode and MSI-X, use multiple reply queues */
- for (i = 0; i < MAX_REPLY_QUEUES; i++)
+ for (i = 0; i < h->msix_vector; i++)
rc = request_irq(h->intr[i], msixhandler,
0, h->devname,
&h->q[i]);
} else {
/* Use single reply pool */
- if (h->msix_vector || h->msi_vector) {
+ if (h->msix_vector > 0 || h->msi_vector) {
rc = request_irq(h->intr[h->intr_mode],
msixhandler, 0, h->devname,
&h->q[h->intr_mode]);
@@ -4658,7 +6668,7 @@ static void free_irqs(struct ctlr_info *h)
return;
}
- for (i = 0; i < MAX_REPLY_QUEUES; i++)
+ for (i = 0; i < h->msix_vector; i++)
free_irq(h->intr[i], &h->q[i]);
}
@@ -4681,6 +6691,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
hpsa_free_irqs_and_disable_msix(h);
hpsa_free_sg_chain_blocks(h);
hpsa_free_cmd_pool(h);
+ kfree(h->ioaccel1_blockFetchTable);
kfree(h->blockFetchTable);
pci_free_consistent(h->pdev, h->reply_pool_size,
h->reply_pool, h->reply_pool_dhandle);
@@ -4760,6 +6771,92 @@ static void detect_controller_lockup(struct ctlr_info *h)
h->last_heartbeat_timestamp = now;
}
+static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+{
+ int i;
+ char *event_type;
+
+ /* Clear the driver-requested rescan flag */
+ h->drv_req_rescan = 0;
+
+ /* Ask the controller to clear the events we're handling. */
+ if ((h->transMethod & (CFGTBL_Trans_io_accel1
+ | CFGTBL_Trans_io_accel2)) &&
+ (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
+ h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
+
+ if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
+ event_type = "state change";
+ if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
+ event_type = "configuration change";
+ /* Stop sending new RAID offload reqs via the IO accelerator */
+ scsi_block_requests(h->scsi_host);
+ for (i = 0; i < h->ndevices; i++)
+ h->dev[i]->offload_enabled = 0;
+ hpsa_drain_accel_commands(h);
+ /* Set 'accelerator path config change' bit */
+ dev_warn(&h->pdev->dev,
+ "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
+ h->events, event_type);
+ writel(h->events, &(h->cfgtable->clear_event_notify));
+ /* Set the "clear event notify field update" bit 6 */
+ writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
+ /* Wait until ctlr clears 'clear event notify field', bit 6 */
+ hpsa_wait_for_clear_event_notify_ack(h);
+ scsi_unblock_requests(h->scsi_host);
+ } else {
+ /* Acknowledge controller notification events. */
+ writel(h->events, &(h->cfgtable->clear_event_notify));
+ writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
+ hpsa_wait_for_clear_event_notify_ack(h);
+#if 0
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ hpsa_wait_for_mode_change_ack(h);
+#endif
+ }
+ return;
+}
+
+/* Check a register on the controller to see if there are configuration
+ * changes (added/changed/removed logical drives, etc.) which mean that
+ * we should rescan the controller for devices.
+ * Also check flag for driver-initiated rescan.
+ */
+static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
+{
+ if (h->drv_req_rescan)
+ return 1;
+
+ if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
+ return 0;
+
+ h->events = readl(&(h->cfgtable->event_notify));
+ return h->events & RESCAN_REQUIRED_EVENT_BITS;
+}
+
+/*
+ * Check if any of the offline devices have become ready
+ */
+static int hpsa_offline_devices_ready(struct ctlr_info *h)
+{
+ unsigned long flags;
+ struct offline_device_entry *d;
+ struct list_head *this, *tmp;
+
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ list_for_each_safe(this, tmp, &h->offline_device_list) {
+ d = list_entry(this, struct offline_device_entry,
+ offline_list);
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+ if (!hpsa_volume_offline(h, d->scsi3addr))
+ return 1;
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ }
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
+ return 0;
+}
+
+
static void hpsa_monitor_ctlr_worker(struct work_struct *work)
{
unsigned long flags;
@@ -4768,6 +6865,15 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
detect_controller_lockup(h);
if (h->lockup_detected)
return;
+
+ if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
+ scsi_host_get(h->scsi_host);
+ h->drv_req_rescan = 0;
+ hpsa_ack_ctlr_events(h);
+ hpsa_scan_start(h->scsi_host);
+ scsi_host_put(h->scsi_host);
+ }
+
spin_lock_irqsave(&h->lock, flags);
if (h->remove_in_progress) {
spin_unlock_irqrestore(&h->lock, flags);
@@ -4807,7 +6913,7 @@ reinit_after_soft_reset:
* the 5 lower bits of the address are used by the hardware. and by
* the driver. See comments in hpsa.h for more info.
*/
-#define COMMANDLIST_ALIGNMENT 32
+#define COMMANDLIST_ALIGNMENT 128
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
@@ -4817,7 +6923,9 @@ reinit_after_soft_reset:
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
INIT_LIST_HEAD(&h->cmpQ);
INIT_LIST_HEAD(&h->reqQ);
+ INIT_LIST_HEAD(&h->offline_device_list);
spin_lock_init(&h->lock);
+ spin_lock_init(&h->offline_device_lock);
spin_lock_init(&h->scan_lock);
spin_lock_init(&h->passthru_count_lock);
rc = hpsa_pci_init(h);
@@ -4859,6 +6967,7 @@ reinit_after_soft_reset:
pci_set_drvdata(pdev, h);
h->ndevices = 0;
+ h->hba_mode_enabled = 0;
h->scsi_host = NULL;
spin_lock_init(&h->devlock);
hpsa_put_ctlr_into_performant_mode(h);
@@ -4918,6 +7027,11 @@ reinit_after_soft_reset:
goto reinit_after_soft_reset;
}
+ /* Enable Accelerated IO path at driver layer */
+ h->acciopath_status = 1;
+
+ h->drv_req_rescan = 0;
+
/* Turn the interrupts on so we can service requests */
h->access.set_intr_mask(h, HPSA_INTR_ON);
@@ -5034,6 +7148,8 @@ static void hpsa_remove_one(struct pci_dev *pdev)
h->reply_pool, h->reply_pool_dhandle);
kfree(h->cmd_pool_bits);
kfree(h->blockFetchTable);
+ kfree(h->ioaccel1_blockFetchTable);
+ kfree(h->ioaccel2_blockFetchTable);
kfree(h->hba_inquiry_data);
pci_disable_device(pdev);
pci_release_regions(pdev);
@@ -5074,20 +7190,17 @@ static struct pci_driver hpsa_pci_driver = {
* bits of the command address.
*/
static void calc_bucket_map(int bucket[], int num_buckets,
- int nsgs, int *bucket_map)
+ int nsgs, int min_blocks, int *bucket_map)
{
int i, j, b, size;
- /* even a command with 0 SGs requires 4 blocks */
-#define MINIMUM_TRANSFER_BLOCKS 4
-#define NUM_BUCKETS 8
/* Note, bucket_map must have nsgs+1 entries. */
for (i = 0; i <= nsgs; i++) {
/* Compute size of a command with i SG entries */
- size = i + MINIMUM_TRANSFER_BLOCKS;
+ size = i + min_blocks;
b = num_buckets; /* Assume the biggest bucket */
/* Find the bucket that is just big enough */
- for (j = 0; j < 8; j++) {
+ for (j = 0; j < num_buckets; j++) {
if (bucket[j] >= size) {
b = j;
break;
@@ -5098,10 +7211,16 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
+static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
{
int i;
unsigned long register_value;
+ unsigned long transMethod = CFGTBL_Trans_Performant |
+ (trans_support & CFGTBL_Trans_use_short_tags) |
+ CFGTBL_Trans_enable_directed_msix |
+ (trans_support & (CFGTBL_Trans_io_accel1 |
+ CFGTBL_Trans_io_accel2));
+ struct access_method access = SA5_performant_access;
/* This is a bit complicated. There are 8 registers on
* the controller which we write to to tell it 8 different
@@ -5121,6 +7240,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
* sizes for small commands, and fewer sizes for larger commands.
*/
int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
+#define MIN_IOACCEL2_BFT_ENTRY 5
+#define HPSA_IOACCEL2_HEADER_SZ 4
+ int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19,
+ HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
+ BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
+ BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
+ 16 * MIN_IOACCEL2_BFT_ENTRY);
+ BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
/* 5 = 1 s/g entry or 4k
* 6 = 2 s/g entry or 8k
@@ -5133,7 +7262,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
bft[7] = SG_ENTRIES_IN_CMD + 4;
calc_bucket_map(bft, ARRAY_SIZE(bft),
- SG_ENTRIES_IN_CMD, h->blockFetchTable);
+ SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
for (i = 0; i < 8; i++)
writel(bft[i], &h->transtable->BlockFetch[i]);
@@ -5150,9 +7279,22 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
&h->transtable->RepQAddr[i].lower);
}
- writel(CFGTBL_Trans_Performant | use_short_tags |
- CFGTBL_Trans_enable_directed_msix,
- &(h->cfgtable->HostWrite.TransportRequest));
+ writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
+ writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
+ /*
+ * enable outbound interrupt coalescing in accelerator mode;
+ */
+ if (trans_support & CFGTBL_Trans_io_accel1) {
+ access = SA5_ioaccel_mode1_access;
+ writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+ writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+ } else {
+ if (trans_support & CFGTBL_Trans_io_accel2) {
+ access = SA5_ioaccel_mode2_access;
+ writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+ writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+ }
+ }
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
register_value = readl(&(h->cfgtable->TransportActive));
@@ -5162,23 +7304,186 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
return;
}
/* Change the access methods to the performant access methods */
- h->access = SA5_performant_access;
- h->transMethod = CFGTBL_Trans_Performant;
+ h->access = access;
+ h->transMethod = transMethod;
+
+ if (!((trans_support & CFGTBL_Trans_io_accel1) ||
+ (trans_support & CFGTBL_Trans_io_accel2)))
+ return;
+
+ if (trans_support & CFGTBL_Trans_io_accel1) {
+ /* Set up I/O accelerator mode */
+ for (i = 0; i < h->nreply_queues; i++) {
+ writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
+ h->reply_queue[i].current_entry =
+ readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
+ }
+ bft[7] = h->ioaccel_maxsg + 8;
+ calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
+ h->ioaccel1_blockFetchTable);
+
+ /* initialize all reply queue entries to unused */
+ memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
+ h->reply_pool_size);
+
+ /* set all the constant fields in the accelerator command
+ * frames once at init time to save CPU cycles later.
+ */
+ for (i = 0; i < h->nr_cmds; i++) {
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
+
+ cp->function = IOACCEL1_FUNCTION_SCSIIO;
+ cp->err_info = (u32) (h->errinfo_pool_dhandle +
+ (i * sizeof(struct ErrorInfo)));
+ cp->err_info_len = sizeof(struct ErrorInfo);
+ cp->sgl_offset = IOACCEL1_SGLOFFSET;
+ cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
+ cp->timeout_sec = 0;
+ cp->ReplyQueue = 0;
+ cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
+ DIRECT_LOOKUP_BIT;
+ cp->Tag.upper = 0;
+ cp->host_addr.lower =
+ (u32) (h->ioaccel_cmd_pool_dhandle +
+ (i * sizeof(struct io_accel1_cmd)));
+ cp->host_addr.upper = 0;
+ }
+ } else if (trans_support & CFGTBL_Trans_io_accel2) {
+ u64 cfg_offset, cfg_base_addr_index;
+ u32 bft2_offset, cfg_base_addr;
+ int rc;
+
+ rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+ &cfg_base_addr_index, &cfg_offset);
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
+ bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
+ calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
+ 4, h->ioaccel2_blockFetchTable);
+ bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
+ BUILD_BUG_ON(offsetof(struct CfgTable,
+ io_accel_request_size_offset) != 0xb8);
+ h->ioaccel2_bft2_regs =
+ remap_pci_mem(pci_resource_start(h->pdev,
+ cfg_base_addr_index) +
+ cfg_offset + bft2_offset,
+ ARRAY_SIZE(bft2) *
+ sizeof(*h->ioaccel2_bft2_regs));
+ for (i = 0; i < ARRAY_SIZE(bft2); i++)
+ writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
+ }
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ hpsa_wait_for_mode_change_ack(h);
+}
+
+static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
+{
+ h->ioaccel_maxsg =
+ readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
+ if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
+ h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
+
+ /* Command structures must be aligned on a 128-byte boundary
+ * because the 7 lower bits of the address are used by the
+ * hardware.
+ */
+#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
+ BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
+ IOACCEL1_COMMANDLIST_ALIGNMENT);
+ h->ioaccel_cmd_pool =
+ pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+ &(h->ioaccel_cmd_pool_dhandle));
+
+ h->ioaccel1_blockFetchTable =
+ kmalloc(((h->ioaccel_maxsg + 1) *
+ sizeof(u32)), GFP_KERNEL);
+
+ if ((h->ioaccel_cmd_pool == NULL) ||
+ (h->ioaccel1_blockFetchTable == NULL))
+ goto clean_up;
+
+ memset(h->ioaccel_cmd_pool, 0,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
+ return 0;
+
+clean_up:
+ if (h->ioaccel_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+ h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
+ kfree(h->ioaccel1_blockFetchTable);
+ return 1;
+}
+
+static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
+{
+ /* Allocate ioaccel2 mode command blocks and block fetch table */
+
+ h->ioaccel_maxsg =
+ readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
+ if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
+ h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
+
+#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
+ BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
+ IOACCEL2_COMMANDLIST_ALIGNMENT);
+ h->ioaccel2_cmd_pool =
+ pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ &(h->ioaccel2_cmd_pool_dhandle));
+
+ h->ioaccel2_blockFetchTable =
+ kmalloc(((h->ioaccel_maxsg + 1) *
+ sizeof(u32)), GFP_KERNEL);
+
+ if ((h->ioaccel2_cmd_pool == NULL) ||
+ (h->ioaccel2_blockFetchTable == NULL))
+ goto clean_up;
+
+ memset(h->ioaccel2_cmd_pool, 0,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
+ return 0;
+
+clean_up:
+ if (h->ioaccel2_cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
+ kfree(h->ioaccel2_blockFetchTable);
+ return 1;
}
static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
+ unsigned long transMethod = CFGTBL_Trans_Performant |
+ CFGTBL_Trans_use_short_tags;
int i;
if (hpsa_simple_mode)
return;
+ /* Check for I/O accelerator mode support */
+ if (trans_support & CFGTBL_Trans_io_accel1) {
+ transMethod |= CFGTBL_Trans_io_accel1 |
+ CFGTBL_Trans_enable_directed_msix;
+ if (hpsa_alloc_ioaccel_cmd_and_bft(h))
+ goto clean_up;
+ } else {
+ if (trans_support & CFGTBL_Trans_io_accel2) {
+ transMethod |= CFGTBL_Trans_io_accel2 |
+ CFGTBL_Trans_enable_directed_msix;
+ if (ioaccel2_alloc_cmds_and_bft(h))
+ goto clean_up;
+ }
+ }
+
+ /* TODO, check that this next line h->nreply_queues is correct */
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & PERFORMANT_MODE))
return;
- h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1;
+ h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */
h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
@@ -5200,9 +7505,7 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|| (h->blockFetchTable == NULL))
goto clean_up;
- hpsa_enter_performant_mode(h,
- trans_support & CFGTBL_Trans_use_short_tags);
-
+ hpsa_enter_performant_mode(h, trans_support);
return;
clean_up:
@@ -5212,6 +7515,31 @@ clean_up:
kfree(h->blockFetchTable);
}
+static int is_accelerated_cmd(struct CommandList *c)
+{
+ return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
+}
+
+static void hpsa_drain_accel_commands(struct ctlr_info *h)
+{
+ struct CommandList *c = NULL;
+ unsigned long flags;
+ int accel_cmds_out;
+
+ do { /* wait for all outstanding commands to drain out */
+ accel_cmds_out = 0;
+ spin_lock_irqsave(&h->lock, flags);
+ list_for_each_entry(c, &h->cmpQ, list)
+ accel_cmds_out += is_accelerated_cmd(c);
+ list_for_each_entry(c, &h->reqQ, list)
+ accel_cmds_out += is_accelerated_cmd(c);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (accel_cmds_out <= 0)
+ break;
+ msleep(100);
+ } while (1);
+}
+
/*
* This is it. Register the PCI driver information for the cards we control
* the OS will call our registered routines when it finds one of our cards.
@@ -5226,5 +7554,83 @@ static void __exit hpsa_cleanup(void)
pci_unregister_driver(&hpsa_pci_driver);
}
+static void __attribute__((unused)) verify_offsets(void)
+{
+#define VERIFY_OFFSET(member, offset) \
+ BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
+
+ VERIFY_OFFSET(structure_size, 0);
+ VERIFY_OFFSET(volume_blk_size, 4);
+ VERIFY_OFFSET(volume_blk_cnt, 8);
+ VERIFY_OFFSET(phys_blk_shift, 16);
+ VERIFY_OFFSET(parity_rotation_shift, 17);
+ VERIFY_OFFSET(strip_size, 18);
+ VERIFY_OFFSET(disk_starting_blk, 20);
+ VERIFY_OFFSET(disk_blk_cnt, 28);
+ VERIFY_OFFSET(data_disks_per_row, 36);
+ VERIFY_OFFSET(metadata_disks_per_row, 38);
+ VERIFY_OFFSET(row_cnt, 40);
+ VERIFY_OFFSET(layout_map_count, 42);
+ VERIFY_OFFSET(flags, 44);
+ VERIFY_OFFSET(dekindex, 46);
+ /* VERIFY_OFFSET(reserved, 48 */
+ VERIFY_OFFSET(data, 64);
+
+#undef VERIFY_OFFSET
+
+#define VERIFY_OFFSET(member, offset) \
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
+
+ VERIFY_OFFSET(IU_type, 0);
+ VERIFY_OFFSET(direction, 1);
+ VERIFY_OFFSET(reply_queue, 2);
+ /* VERIFY_OFFSET(reserved1, 3); */
+ VERIFY_OFFSET(scsi_nexus, 4);
+ VERIFY_OFFSET(Tag, 8);
+ VERIFY_OFFSET(cdb, 16);
+ VERIFY_OFFSET(cciss_lun, 32);
+ VERIFY_OFFSET(data_len, 40);
+ VERIFY_OFFSET(cmd_priority_task_attr, 44);
+ VERIFY_OFFSET(sg_count, 45);
+ /* VERIFY_OFFSET(reserved3 */
+ VERIFY_OFFSET(err_ptr, 48);
+ VERIFY_OFFSET(err_len, 56);
+ /* VERIFY_OFFSET(reserved4 */
+ VERIFY_OFFSET(sg, 64);
+
+#undef VERIFY_OFFSET
+
+#define VERIFY_OFFSET(member, offset) \
+ BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
+
+ VERIFY_OFFSET(dev_handle, 0x00);
+ VERIFY_OFFSET(reserved1, 0x02);
+ VERIFY_OFFSET(function, 0x03);
+ VERIFY_OFFSET(reserved2, 0x04);
+ VERIFY_OFFSET(err_info, 0x0C);
+ VERIFY_OFFSET(reserved3, 0x10);
+ VERIFY_OFFSET(err_info_len, 0x12);
+ VERIFY_OFFSET(reserved4, 0x13);
+ VERIFY_OFFSET(sgl_offset, 0x14);
+ VERIFY_OFFSET(reserved5, 0x15);
+ VERIFY_OFFSET(transfer_len, 0x1C);
+ VERIFY_OFFSET(reserved6, 0x20);
+ VERIFY_OFFSET(io_flags, 0x24);
+ VERIFY_OFFSET(reserved7, 0x26);
+ VERIFY_OFFSET(LUN, 0x34);
+ VERIFY_OFFSET(control, 0x3C);
+ VERIFY_OFFSET(CDB, 0x40);
+ VERIFY_OFFSET(reserved8, 0x50);
+ VERIFY_OFFSET(host_context_flags, 0x60);
+ VERIFY_OFFSET(timeout_sec, 0x62);
+ VERIFY_OFFSET(ReplyQueue, 0x64);
+ VERIFY_OFFSET(reserved9, 0x65);
+ VERIFY_OFFSET(Tag, 0x68);
+ VERIFY_OFFSET(host_addr, 0x70);
+ VERIFY_OFFSET(CISS_LUN, 0x78);
+ VERIFY_OFFSET(SG, 0x78 + 8);
+#undef VERIFY_OFFSET
+}
+
module_init(hpsa_init);
module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 01c328349c83..44235a27e1b6 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,6 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
- * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -46,6 +46,15 @@ struct hpsa_scsi_dev_t {
unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
unsigned char model[16]; /* bytes 16-31 of inquiry data */
unsigned char raid_level; /* from inquiry page 0xC1 */
+ unsigned char volume_offline; /* discovered via TUR or VPD */
+ u32 ioaccel_handle;
+ int offload_config; /* I/O accel RAID offload configured */
+ int offload_enabled; /* I/O accel RAID offload enabled */
+ int offload_to_mirror; /* Send next I/O accelerator RAID
+ * offload request to mirror drive
+ */
+ struct raid_map_data raid_map; /* I/O accelerator RAID map */
+
};
struct reply_pool {
@@ -55,6 +64,46 @@ struct reply_pool {
u32 current_entry;
};
+#pragma pack(1)
+struct bmic_controller_parameters {
+ u8 led_flags;
+ u8 enable_command_list_verification;
+ u8 backed_out_write_drives;
+ u16 stripes_for_parity;
+ u8 parity_distribution_mode_flags;
+ u16 max_driver_requests;
+ u16 elevator_trend_count;
+ u8 disable_elevator;
+ u8 force_scan_complete;
+ u8 scsi_transfer_mode;
+ u8 force_narrow;
+ u8 rebuild_priority;
+ u8 expand_priority;
+ u8 host_sdb_asic_fix;
+ u8 pdpi_burst_from_host_disabled;
+ char software_name[64];
+ char hardware_name[32];
+ u8 bridge_revision;
+ u8 snapshot_priority;
+ u32 os_specific;
+ u8 post_prompt_timeout;
+ u8 automatic_drive_slamming;
+ u8 reserved1;
+ u8 nvram_flags;
+ u8 cache_nvram_flags;
+ u8 drive_config_flags;
+ u16 reserved2;
+ u8 temp_warning_level;
+ u8 temp_shutdown_level;
+ u8 temp_condition_reset;
+ u8 max_coalesce_commands;
+ u32 max_coalesce_delay;
+ u8 orca_password[4];
+ u8 access_id[16];
+ u8 reserved[356];
+};
+#pragma pack()
+
struct ctlr_info {
int ctlr;
char devname[8];
@@ -80,6 +129,7 @@ struct ctlr_info {
unsigned int msi_vector;
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access;
+ char hba_mode_enabled;
/* queue and queue Info */
struct list_head reqQ;
@@ -95,6 +145,10 @@ struct ctlr_info {
/* pointers to command and error info pool */
struct CommandList *cmd_pool;
dma_addr_t cmd_pool_dhandle;
+ struct io_accel1_cmd *ioaccel_cmd_pool;
+ dma_addr_t ioaccel_cmd_pool_dhandle;
+ struct io_accel2_cmd *ioaccel2_cmd_pool;
+ dma_addr_t ioaccel2_cmd_pool_dhandle;
struct ErrorInfo *errinfo_pool;
dma_addr_t errinfo_pool_dhandle;
unsigned long *cmd_pool_bits;
@@ -128,7 +182,14 @@ struct ctlr_info {
u8 nreply_queues;
dma_addr_t reply_pool_dhandle;
u32 *blockFetchTable;
+ u32 *ioaccel1_blockFetchTable;
+ u32 *ioaccel2_blockFetchTable;
+ u32 *ioaccel2_bft2_regs;
unsigned char *hba_inquiry_data;
+ u32 driver_support;
+ u32 fw_support;
+ int ioaccel_support;
+ int ioaccel_maxsg;
u64 last_intr_timestamp;
u32 last_heartbeat;
u64 last_heartbeat_timestamp;
@@ -161,7 +222,35 @@ struct ctlr_info {
#define HPSATMF_LOG_QRY_TASK (1 << 23)
#define HPSATMF_LOG_QRY_TSET (1 << 24)
#define HPSATMF_LOG_QRY_ASYNC (1 << 25)
+ u32 events;
+#define CTLR_STATE_CHANGE_EVENT (1 << 0)
+#define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
+#define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
+#define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
+#define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
+#define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
+#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
+
+#define RESCAN_REQUIRED_EVENT_BITS \
+ (CTLR_STATE_CHANGE_EVENT | \
+ CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
+ CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
+ CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
+ CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \
+ CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
+ CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
+ spinlock_t offline_device_lock;
+ struct list_head offline_device_list;
+ int acciopath_status;
+ int drv_req_rescan; /* flag for driver to request rescan event */
+ int raid_offload_debug;
};
+
+struct offline_device_entry {
+ unsigned char scsi3addr[8];
+ struct list_head offline_list;
+};
+
#define HPSA_ABORT_MSG 0
#define HPSA_DEVICE_RESET_MSG 1
#define HPSA_RESET_TYPE_CONTROLLER 0x00
@@ -242,6 +331,14 @@ struct ctlr_info {
#define HPSA_INTR_ON 1
#define HPSA_INTR_OFF 0
+
+/*
+ * Inbound Post Queue offsets for IO Accelerator Mode 2
+ */
+#define IOACCEL2_INBOUND_POSTQ_32 0x48
+#define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
+#define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
+
/*
Send the command to the hardware
*/
@@ -254,6 +351,18 @@ static void SA5_submit_command(struct ctlr_info *h,
(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
}
+static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
+ c->Header.Tag.lower);
+ if (c->cmd_type == CMD_IOACCEL2)
+ writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
+ else
+ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+}
+
/*
* This card is the opposite of the other cards.
* 0 turns interrupts on...
@@ -387,6 +496,50 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
return register_value & SA5_OUTDB_STATUS_PERF_BIT;
}
+#define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
+
+static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
+{
+ unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
+
+ return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
+ true : false;
+}
+
+#define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
+#define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
+#define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
+#define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
+
+static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
+{
+ u64 register_value;
+ struct reply_pool *rq = &h->reply_queue[q];
+ unsigned long flags;
+
+ BUG_ON(q >= h->nreply_queues);
+
+ register_value = rq->head[rq->current_entry];
+ if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
+ rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
+ if (++rq->current_entry == rq->size)
+ rq->current_entry = 0;
+ /*
+ * @todo
+ *
+ * Don't really need to write the new index after each command,
+ * but with current driver design this is easiest.
+ */
+ wmb();
+ writel((q << 24) | rq->current_entry, h->vaddr +
+ IOACCEL_MODE1_CONSUMER_INDEX);
+ spin_lock_irqsave(&h->lock, flags);
+ h->commands_outstanding--;
+ spin_unlock_irqrestore(&h->lock, flags);
+ }
+ return (unsigned long) register_value;
+}
+
static struct access_method SA5_access = {
SA5_submit_command,
SA5_intr_mask,
@@ -395,6 +548,22 @@ static struct access_method SA5_access = {
SA5_completed,
};
+static struct access_method SA5_ioaccel_mode1_access = {
+ SA5_submit_command,
+ SA5_performant_intr_mask,
+ SA5_fifo_full,
+ SA5_ioaccel_mode1_intr_pending,
+ SA5_ioaccel_mode1_completed,
+};
+
+static struct access_method SA5_ioaccel_mode2_access = {
+ SA5_submit_command_ioaccel2,
+ SA5_performant_intr_mask,
+ SA5_fifo_full,
+ SA5_performant_intr_pending,
+ SA5_performant_completed,
+};
+
static struct access_method SA5_performant_access = {
SA5_submit_command,
SA5_performant_intr_mask,
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index bfc8c4ea66f8..b5cc7052339f 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,6 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
- * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@
#define SENSEINFOBYTES 32 /* may vary between hbas */
#define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */
#define HPSA_SG_CHAIN 0x80000000
+#define HPSA_SG_LAST 0x40000000
#define MAXREPLYQS 256
/* Command Status value */
@@ -41,6 +42,8 @@
#define CMD_UNSOLICITED_ABORT 0x000A
#define CMD_TIMEOUT 0x000B
#define CMD_UNABORTABLE 0x000C
+#define CMD_IOACCEL_DISABLED 0x000E
+
/* Unit Attentions ASC's as defined for the MSA2012sa */
#define POWER_OR_RESET 0x29
@@ -79,8 +82,9 @@
#define ATTR_ACA 0x07
/* cdb type */
-#define TYPE_CMD 0x00
-#define TYPE_MSG 0x01
+#define TYPE_CMD 0x00
+#define TYPE_MSG 0x01
+#define TYPE_IOACCEL2_CMD 0x81 /* 0x81 is not used by hardware */
/* Message Types */
#define HPSA_TASK_MANAGEMENT 0x00
@@ -125,9 +129,12 @@
#define CFGTBL_AccCmds 0x00000001l
#define DOORBELL_CTLR_RESET 0x00000004l
#define DOORBELL_CTLR_RESET2 0x00000020l
+#define DOORBELL_CLEAR_EVENTS 0x00000040l
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_io_accel1 0x00000080l
+#define CFGTBL_Trans_io_accel2 0x00000100l
#define CFGTBL_Trans_use_short_tags 0x20000000l
#define CFGTBL_Trans_enable_directed_msix (1 << 30)
@@ -135,6 +142,28 @@
#define CFGTBL_BusType_Ultra3 0x00000002l
#define CFGTBL_BusType_Fibre1G 0x00000100l
#define CFGTBL_BusType_Fibre2G 0x00000200l
+
+/* VPD Inquiry types */
+#define HPSA_VPD_SUPPORTED_PAGES 0x00
+#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
+#define HPSA_VPD_LV_IOACCEL_STATUS 0xC2
+#define HPSA_VPD_LV_STATUS 0xC3
+#define HPSA_VPD_HEADER_SZ 4
+
+/* Logical volume states */
+#define HPSA_VPD_LV_STATUS_UNSUPPORTED -1
+#define HPSA_LV_OK 0x0
+#define HPSA_LV_UNDERGOING_ERASE 0x0F
+#define HPSA_LV_UNDERGOING_RPI 0x12
+#define HPSA_LV_PENDING_RPI 0x13
+#define HPSA_LV_ENCRYPTED_NO_KEY 0x14
+#define HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15
+#define HPSA_LV_UNDERGOING_ENCRYPTION 0x16
+#define HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17
+#define HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18
+#define HPSA_LV_PENDING_ENCRYPTION 0x19
+#define HPSA_LV_PENDING_ENCRYPTION_REKEYING 0x1A
+
struct vals32 {
u32 lower;
u32 upper;
@@ -162,9 +191,50 @@ struct InquiryData {
#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
+#define HPSA_REPORT_PHYS_EXTENDED 0x02
+#define HPSA_CISS_READ 0xc0 /* CISS Read */
+#define HPSA_GET_RAID_MAP 0xc8 /* CISS Get RAID Layout Map */
+
+#define RAID_MAP_MAX_ENTRIES 256
+
+struct raid_map_disk_data {
+ u32 ioaccel_handle; /**< Handle to access this disk via the
+ * I/O accelerator */
+ u8 xor_mult[2]; /**< XOR multipliers for this position,
+ * valid for data disks only */
+ u8 reserved[2];
+};
+
+struct raid_map_data {
+ u32 structure_size; /* Size of entire structure in bytes */
+ u32 volume_blk_size; /* bytes / block in the volume */
+ u64 volume_blk_cnt; /* logical blocks on the volume */
+ u8 phys_blk_shift; /* Shift factor to convert between
+ * units of logical blocks and physical
+ * disk blocks */
+ u8 parity_rotation_shift; /* Shift factor to convert between units
+ * of logical stripes and physical
+ * stripes */
+ u16 strip_size; /* blocks used on each disk / stripe */
+ u64 disk_starting_blk; /* First disk block used in volume */
+ u64 disk_blk_cnt; /* disk blocks used by volume / disk */
+ u16 data_disks_per_row; /* data disk entries / row in the map */
+ u16 metadata_disks_per_row; /* mirror/parity disk entries / row
+ * in the map */
+ u16 row_cnt; /* rows in each layout map */
+ u16 layout_map_count; /* layout maps (1 map per mirror/parity
+ * group) */
+ u16 flags; /* Bit 0 set if encryption enabled */
+#define RAID_MAP_FLAG_ENCRYPT_ON 0x01
+ u16 dekindex; /* Data encryption key index. */
+ u8 reserved[16];
+ struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
+};
+
struct ReportLUNdata {
u8 LUNListLength[4];
- u32 reserved;
+ u8 extended_response_flag;
+ u8 reserved[3];
u8 LUN[HPSA_MAX_LUN][8];
};
@@ -187,6 +257,7 @@ struct SenseSubsystem_info {
#define BMIC_CACHE_FLUSH 0xc2
#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
#define BMIC_FLASH_FIRMWARE 0xF7
+#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
/* Command List Structure */
union SCSI3Addr {
@@ -283,6 +354,8 @@ struct ErrorInfo {
/* Command types */
#define CMD_IOCTL_PEND 0x01
#define CMD_SCSI 0x03
+#define CMD_IOACCEL1 0x04
+#define CMD_IOACCEL2 0x05
#define DIRECT_LOOKUP_SHIFT 5
#define DIRECT_LOOKUP_BIT 0x10
@@ -314,7 +387,6 @@ struct CommandList {
int cmd_type;
long cmdindex;
struct list_head list;
- struct request *rq;
struct completion *waiting;
void *scsi_cmd;
@@ -327,16 +399,183 @@ struct CommandList {
*/
#define IS_32_BIT ((8 - sizeof(long))/4)
#define IS_64_BIT (!IS_32_BIT)
-#define PAD_32 (4)
-#define PAD_64 (4)
+#define PAD_32 (40)
+#define PAD_64 (12)
#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
u8 pad[COMMANDLIST_PAD];
};
+/* Max S/G elements in I/O accelerator command */
+#define IOACCEL1_MAXSGENTRIES 24
+#define IOACCEL2_MAXSGENTRIES 28
+
+/*
+ * Structure for I/O accelerator (mode 1) commands.
+ * Note that this structure must be 128-byte aligned in size.
+ */
+struct io_accel1_cmd {
+ u16 dev_handle; /* 0x00 - 0x01 */
+ u8 reserved1; /* 0x02 */
+ u8 function; /* 0x03 */
+ u8 reserved2[8]; /* 0x04 - 0x0B */
+ u32 err_info; /* 0x0C - 0x0F */
+ u8 reserved3[2]; /* 0x10 - 0x11 */
+ u8 err_info_len; /* 0x12 */
+ u8 reserved4; /* 0x13 */
+ u8 sgl_offset; /* 0x14 */
+ u8 reserved5[7]; /* 0x15 - 0x1B */
+ u32 transfer_len; /* 0x1C - 0x1F */
+ u8 reserved6[4]; /* 0x20 - 0x23 */
+ u16 io_flags; /* 0x24 - 0x25 */
+ u8 reserved7[14]; /* 0x26 - 0x33 */
+ u8 LUN[8]; /* 0x34 - 0x3B */
+ u32 control; /* 0x3C - 0x3F */
+ u8 CDB[16]; /* 0x40 - 0x4F */
+ u8 reserved8[16]; /* 0x50 - 0x5F */
+ u16 host_context_flags; /* 0x60 - 0x61 */
+ u16 timeout_sec; /* 0x62 - 0x63 */
+ u8 ReplyQueue; /* 0x64 */
+ u8 reserved9[3]; /* 0x65 - 0x67 */
+ struct vals32 Tag; /* 0x68 - 0x6F */
+ struct vals32 host_addr; /* 0x70 - 0x77 */
+ u8 CISS_LUN[8]; /* 0x78 - 0x7F */
+ struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
+#define IOACCEL1_PAD_64 0
+#define IOACCEL1_PAD_32 0
+#define IOACCEL1_PAD (IS_32_BIT * IOACCEL1_PAD_32 + \
+ IS_64_BIT * IOACCEL1_PAD_64)
+ u8 pad[IOACCEL1_PAD];
+};
+
+#define IOACCEL1_FUNCTION_SCSIIO 0x00
+#define IOACCEL1_SGLOFFSET 32
+
+#define IOACCEL1_IOFLAGS_IO_REQ 0x4000
+#define IOACCEL1_IOFLAGS_CDBLEN_MASK 0x001F
+#define IOACCEL1_IOFLAGS_CDBLEN_MAX 16
+
+#define IOACCEL1_CONTROL_NODATAXFER 0x00000000
+#define IOACCEL1_CONTROL_DATA_OUT 0x01000000
+#define IOACCEL1_CONTROL_DATA_IN 0x02000000
+#define IOACCEL1_CONTROL_TASKPRIO_MASK 0x00007800
+#define IOACCEL1_CONTROL_TASKPRIO_SHIFT 11
+#define IOACCEL1_CONTROL_SIMPLEQUEUE 0x00000000
+#define IOACCEL1_CONTROL_HEADOFQUEUE 0x00000100
+#define IOACCEL1_CONTROL_ORDEREDQUEUE 0x00000200
+#define IOACCEL1_CONTROL_ACA 0x00000400
+
+#define IOACCEL1_HCFLAGS_CISS_FORMAT 0x0013
+
+#define IOACCEL1_BUSADDR_CMDTYPE 0x00000060
+
+struct ioaccel2_sg_element {
+ u64 address;
+ u32 length;
+ u8 reserved[3];
+ u8 chain_indicator;
+#define IOACCEL2_CHAIN 0x80
+};
+
+/*
+ * SCSI Response Format structure for IO Accelerator Mode 2
+ */
+struct io_accel2_scsi_response {
+ u8 IU_type;
+#define IOACCEL2_IU_TYPE_SRF 0x60
+ u8 reserved1[3];
+ u8 req_id[4]; /* request identifier */
+ u8 reserved2[4];
+ u8 serv_response; /* service response */
+#define IOACCEL2_SERV_RESPONSE_COMPLETE 0x000
+#define IOACCEL2_SERV_RESPONSE_FAILURE 0x001
+#define IOACCEL2_SERV_RESPONSE_TMF_COMPLETE 0x002
+#define IOACCEL2_SERV_RESPONSE_TMF_SUCCESS 0x003
+#define IOACCEL2_SERV_RESPONSE_TMF_REJECTED 0x004
+#define IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN 0x005
+ u8 status; /* status */
+#define IOACCEL2_STATUS_SR_TASK_COMP_GOOD 0x00
+#define IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND 0x02
+#define IOACCEL2_STATUS_SR_TASK_COMP_BUSY 0x08
+#define IOACCEL2_STATUS_SR_TASK_COMP_RES_CON 0x18
+#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL 0x28
+#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED 0x40
+#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED 0x0E
+ u8 data_present; /* low 2 bits */
+#define IOACCEL2_NO_DATAPRESENT 0x000
+#define IOACCEL2_RESPONSE_DATAPRESENT 0x001
+#define IOACCEL2_SENSE_DATA_PRESENT 0x002
+#define IOACCEL2_RESERVED 0x003
+ u8 sense_data_len; /* sense/response data length */
+ u8 resid_cnt[4]; /* residual count */
+ u8 sense_data_buff[32]; /* sense/response data buffer */
+};
+
+#define IOACCEL2_64_PAD 76
+#define IOACCEL2_32_PAD 76
+#define IOACCEL2_PAD (IS_32_BIT * IOACCEL2_32_PAD + \
+ IS_64_BIT * IOACCEL2_64_PAD)
+/*
+ * Structure for I/O accelerator (mode 2 or m2) commands.
+ * Note that this structure must be 128-byte aligned in size.
+ */
+struct io_accel2_cmd {
+ u8 IU_type; /* IU Type */
+ u8 direction; /* direction, memtype, and encryption */
+#define IOACCEL2_DIRECTION_MASK 0x03 /* bits 0,1: direction */
+#define IOACCEL2_DIRECTION_MEMTYPE_MASK 0x04 /* bit 2: memtype source/dest */
+ /* 0b=PCIe, 1b=DDR */
+#define IOACCEL2_DIRECTION_ENCRYPT_MASK 0x08 /* bit 3: encryption flag */
+ /* 0=off, 1=on */
+ u8 reply_queue; /* Reply Queue ID */
+ u8 reserved1; /* Reserved */
+ u32 scsi_nexus; /* Device Handle */
+ u32 Tag; /* cciss tag, lower 4 bytes only */
+ u32 tweak_lower; /* Encryption tweak, lower 4 bytes */
+ u8 cdb[16]; /* SCSI Command Descriptor Block */
+ u8 cciss_lun[8]; /* 8 byte SCSI address */
+ u32 data_len; /* Total bytes to transfer */
+ u8 cmd_priority_task_attr; /* priority and task attrs */
+#define IOACCEL2_PRIORITY_MASK 0x78
+#define IOACCEL2_ATTR_MASK 0x07
+ u8 sg_count; /* Number of sg elements */
+ u16 dekindex; /* Data encryption key index */
+ u64 err_ptr; /* Error Pointer */
+ u32 err_len; /* Error Length*/
+ u32 tweak_upper; /* Encryption tweak, upper 4 bytes */
+ struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
+ struct io_accel2_scsi_response error_data;
+ u8 pad[IOACCEL2_PAD];
+};
+
+/*
+ * defines for Mode 2 command struct
+ * FIXME: this can't be all I need mfm
+ */
+#define IOACCEL2_IU_TYPE 0x40
+#define IOACCEL2_IU_TMF_TYPE 0x41
+#define IOACCEL2_DIR_NO_DATA 0x00
+#define IOACCEL2_DIR_DATA_IN 0x01
+#define IOACCEL2_DIR_DATA_OUT 0x02
+/*
+ * SCSI Task Management Request format for Accelerator Mode 2
+ */
+struct hpsa_tmf_struct {
+ u8 iu_type; /* Information Unit Type */
+ u8 reply_queue; /* Reply Queue ID */
+ u8 tmf; /* Task Management Function */
+ u8 reserved1; /* byte 3 Reserved */
+ u32 it_nexus; /* SCSI I-T Nexus */
+ u8 lun_id[8]; /* LUN ID for TMF request */
+ struct vals32 Tag; /* cciss tag associated w/ request */
+ struct vals32 abort_tag;/* cciss tag of SCSI cmd or task to abort */
+ u64 error_ptr; /* Error Pointer */
+ u32 error_len; /* Error Length */
+};
+
/* Configuration Table Structure */
struct HostWrite {
u32 TransportRequest;
- u32 Reserved;
+ u32 command_pool_addr_hi;
u32 CoalIntDelay;
u32 CoalIntCount;
};
@@ -344,6 +583,9 @@ struct HostWrite {
#define SIMPLE_MODE 0x02
#define PERFORMANT_MODE 0x04
#define MEMQ_MODE 0x08
+#define IOACCEL_MODE_1 0x80
+
+#define DRIVER_SUPPORT_UA_ENABLE 0x00000001
struct CfgTable {
u8 Signature[4];
@@ -373,8 +615,18 @@ struct CfgTable {
u32 misc_fw_support; /* offset 0x78 */
#define MISC_FW_DOORBELL_RESET (0x02)
#define MISC_FW_DOORBELL_RESET2 (0x010)
+#define MISC_FW_RAID_OFFLOAD_BASIC (0x020)
+#define MISC_FW_EVENT_NOTIFY (0x080)
u8 driver_version[32];
-
+ u32 max_cached_write_size;
+ u8 driver_scratchpad[16];
+ u32 max_error_info_length;
+ u32 io_accel_max_embedded_sg_count;
+ u32 io_accel_request_size_offset;
+ u32 event_notify;
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
+ u32 clear_event_notify;
};
#define NUM_BLOCKFETCH_ENTRIES 8
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index bf9eca845166..56f8a861ed72 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -589,7 +589,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
}
err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
- IRQF_DISABLED, "ibmvstgt", target);
+ 0, "ibmvstgt", target);
if (err)
goto req_irq_failed;
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index bf028218ac36..b1c4d831137d 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -2015,7 +2015,7 @@ static int __init in2000_detect(struct scsi_host_template * tpnt)
write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
write1_io(0, IO_INTR_MASK); /* allow all ints */
x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
- if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) {
+ if (request_irq(x, in2000_intr, 0, "in2000", instance)) {
printk("in2000_detect: Unable to allocate IRQ.\n");
detect_count--;
continue;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 280d5af113d1..e5dae7b54d9a 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2931,7 +2931,7 @@ static int initio_probe_one(struct pci_dev *pdev,
shost->base = host->addr;
shost->sg_tablesize = TOTAL_SG_ENTRY;
- error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost);
+ error = request_irq(pdev->irq, i91u_intr, IRQF_SHARED, "i91u", shost);
if (error < 0) {
printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
goto out_free_scbs;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3f5b56a99892..924b0ba74dfe 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1143,6 +1143,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
res->add_to_ml = 0;
res->del_from_ml = 0;
res->resetting_device = 0;
+ res->reset_occurred = 0;
res->sdev = NULL;
res->sata_port = NULL;
@@ -2367,6 +2368,42 @@ static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
}
/**
+ * ipr_log_sis64_device_error - Log a cache error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_21_error *error;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ error = &hostrcb->hcam.u.error64.u.type_21_error;
+
+ ipr_err("-----Failing Device Information-----\n");
+ ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
+ be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
+ be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
+ ipr_err("Device Resource Path: %s\n",
+ __ipr_format_res_path(error->res_path,
+ buffer, sizeof(buffer)));
+ error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
+ error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
+ ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
+ ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
+ ipr_err("SCSI Sense Data:\n");
+ ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
+ ipr_err("SCSI Command Descriptor Block: \n");
+ ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
+
+ ipr_err("Additional IOA Data:\n");
+ ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
+}
+
+/**
* ipr_get_error - Find the specfied IOASC in the ipr_error_table.
* @ioasc: IOASC
*
@@ -2467,6 +2504,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
case IPR_HOST_RCB_OVERLAY_ID_20:
ipr_log_fabric_error(ioa_cfg, hostrcb);
break;
+ case IPR_HOST_RCB_OVERLAY_ID_21:
+ ipr_log_sis64_device_error(ioa_cfg, hostrcb);
+ break;
case IPR_HOST_RCB_OVERLAY_ID_23:
ipr_log_sis64_config_error(ioa_cfg, hostrcb);
break;
@@ -3630,16 +3670,14 @@ static ssize_t ipr_store_iopoll_weight(struct device *dev,
return strlen(buf);
}
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
for (i = 1; i < ioa_cfg->hrrq_num; i++)
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
}
spin_lock_irqsave(shost->host_lock, lock_flags);
ioa_cfg->iopoll_weight = user_iopoll_weight;
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
ioa_cfg->iopoll_weight, ipr_iopoll);
@@ -5015,6 +5053,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
} else
rc = ipr_device_reset(ioa_cfg, res);
res->resetting_device = 0;
+ res->reset_occurred = 1;
LEAVE;
return rc ? FAILED : SUCCESS;
@@ -5484,8 +5523,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
return IRQ_NONE;
}
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
hrrq->toggle_bit) {
if (!blk_iopoll_sched_prep(&hrrq->iopoll))
@@ -6183,8 +6221,10 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
- if (ipr_is_gscsi(res))
+ if (ipr_is_gscsi(res) && res->reset_occurred) {
+ res->reset_occurred = 0;
ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
+ }
ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
}
@@ -8641,6 +8681,25 @@ static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called to tell us that the MMIO
+ * access to the IOA has been restored
+ */
+static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ unsigned long flags = 0;
+ struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ if (!ioa_cfg->probe_done)
+ pci_save_state(pdev);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
* ipr_pci_frozen - Called when slot has experienced a PCI bus error.
* @pdev: PCI device struct
*
@@ -8654,7 +8713,8 @@ static void ipr_pci_frozen(struct pci_dev *pdev)
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
+ if (ioa_cfg->probe_done)
+ _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -8672,11 +8732,14 @@ static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- if (ioa_cfg->needs_warm_reset)
- ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
- else
- _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
- IPR_SHUTDOWN_NONE);
+ if (ioa_cfg->probe_done) {
+ if (ioa_cfg->needs_warm_reset)
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ else
+ _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
+ IPR_SHUTDOWN_NONE);
+ } else
+ wake_up_all(&ioa_cfg->eeh_wait_q);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -8695,17 +8758,20 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
- ioa_cfg->sdt_state = ABORT_DUMP;
- ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
- ioa_cfg->in_ioa_bringdown = 1;
- for (i = 0; i < ioa_cfg->hrrq_num; i++) {
- spin_lock(&ioa_cfg->hrrq[i]._lock);
- ioa_cfg->hrrq[i].allow_cmds = 0;
- spin_unlock(&ioa_cfg->hrrq[i]._lock);
- }
- wmb();
- ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ if (ioa_cfg->probe_done) {
+ if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
+ ioa_cfg->sdt_state = ABORT_DUMP;
+ ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
+ ioa_cfg->in_ioa_bringdown = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ } else
+ wake_up_all(&ioa_cfg->eeh_wait_q);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -8725,7 +8791,7 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
switch (state) {
case pci_channel_io_frozen:
ipr_pci_frozen(pdev);
- return PCI_ERS_RESULT_NEED_RESET;
+ return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_perm_failure:
ipr_pci_perm_failure(pdev);
return PCI_ERS_RESULT_DISCONNECT;
@@ -8755,6 +8821,7 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
+ ioa_cfg->probe_done = 1;
if (ioa_cfg->needs_hard_reset) {
ioa_cfg->needs_hard_reset = 0;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -9030,16 +9097,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
if (!ioa_cfg->vpd_cbs)
goto out_free_res_entries;
- for (i = 0; i < ioa_cfg->hrrq_num; i++) {
- INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
- INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
- spin_lock_init(&ioa_cfg->hrrq[i]._lock);
- if (i == 0)
- ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
- else
- ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
- }
-
if (ipr_alloc_cmd_blks(ioa_cfg))
goto out_free_vpd_cbs;
@@ -9140,6 +9197,48 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
}
/**
+ * ipr_init_regs - Initialize IOA registers
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
+{
+ const struct ipr_interrupt_offsets *p;
+ struct ipr_interrupts *t;
+ void __iomem *base;
+
+ p = &ioa_cfg->chip_cfg->regs;
+ t = &ioa_cfg->regs;
+ base = ioa_cfg->hdw_dma_regs;
+
+ t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
+ t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
+ t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
+ t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
+ t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
+ t->clr_interrupt_reg = base + p->clr_interrupt_reg;
+ t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
+ t->sense_interrupt_reg = base + p->sense_interrupt_reg;
+ t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
+ t->ioarrin_reg = base + p->ioarrin_reg;
+ t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
+ t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
+ t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
+ t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
+ t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
+ t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
+
+ if (ioa_cfg->sis64) {
+ t->init_feedback_reg = base + p->init_feedback_reg;
+ t->dump_addr_reg = base + p->dump_addr_reg;
+ t->dump_data_reg = base + p->dump_data_reg;
+ t->endian_swap_reg = base + p->endian_swap_reg;
+ }
+}
+
+/**
* ipr_init_ioa_cfg - Initialize IOA config struct
* @ioa_cfg: ioa config struct
* @host: scsi host struct
@@ -9151,9 +9250,7 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
struct Scsi_Host *host, struct pci_dev *pdev)
{
- const struct ipr_interrupt_offsets *p;
- struct ipr_interrupts *t;
- void __iomem *base;
+ int i;
ioa_cfg->host = host;
ioa_cfg->pdev = pdev;
@@ -9173,6 +9270,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
init_waitqueue_head(&ioa_cfg->reset_wait_q);
init_waitqueue_head(&ioa_cfg->msi_wait_q);
+ init_waitqueue_head(&ioa_cfg->eeh_wait_q);
ioa_cfg->sdt_state = INACTIVE;
ipr_initialize_bus_attr(ioa_cfg);
@@ -9183,44 +9281,33 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
+ ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
+ + ((sizeof(struct ipr_config_table_entry64)
+ * ioa_cfg->max_devs_supported)));
} else {
host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
+ ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
+ + ((sizeof(struct ipr_config_table_entry)
+ * ioa_cfg->max_devs_supported)));
}
+
host->max_channel = IPR_MAX_BUS_TO_SCAN;
host->unique_id = host->host_no;
host->max_cmd_len = IPR_MAX_CDB_LEN;
host->can_queue = ioa_cfg->max_cmds;
pci_set_drvdata(pdev, ioa_cfg);
- p = &ioa_cfg->chip_cfg->regs;
- t = &ioa_cfg->regs;
- base = ioa_cfg->hdw_dma_regs;
-
- t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
- t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
- t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
- t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
- t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
- t->clr_interrupt_reg = base + p->clr_interrupt_reg;
- t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
- t->sense_interrupt_reg = base + p->sense_interrupt_reg;
- t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
- t->ioarrin_reg = base + p->ioarrin_reg;
- t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
- t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
- t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
- t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
- t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
- t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
-
- if (ioa_cfg->sis64) {
- t->init_feedback_reg = base + p->init_feedback_reg;
- t->dump_addr_reg = base + p->dump_addr_reg;
- t->dump_data_reg = base + p->dump_data_reg;
- t->endian_swap_reg = base + p->endian_swap_reg;
+ for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
+ spin_lock_init(&ioa_cfg->hrrq[i]._lock);
+ if (i == 0)
+ ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
+ else
+ ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
}
}
@@ -9243,54 +9330,63 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
return NULL;
}
+/**
+ * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
+ * during probe time
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * None
+ **/
+static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct pci_dev *pdev = ioa_cfg->pdev;
+
+ if (pci_channel_offline(pdev)) {
+ wait_event_timeout(ioa_cfg->eeh_wait_q,
+ !pci_channel_offline(pdev),
+ IPR_PCI_ERROR_RECOVERY_TIMEOUT);
+ pci_restore_state(pdev);
+ }
+}
+
static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
{
struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
- int i, err, vectors;
+ int i, vectors;
for (i = 0; i < ARRAY_SIZE(entries); ++i)
entries[i].entry = i;
- vectors = ipr_number_of_msix;
-
- while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
- vectors = err;
-
- if (err < 0) {
- pci_disable_msix(ioa_cfg->pdev);
- return err;
+ vectors = pci_enable_msix_range(ioa_cfg->pdev,
+ entries, 1, ipr_number_of_msix);
+ if (vectors < 0) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ return vectors;
}
- if (!err) {
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = entries[i].vector;
- ioa_cfg->nvectors = vectors;
- }
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = entries[i].vector;
+ ioa_cfg->nvectors = vectors;
- return err;
+ return 0;
}
static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
{
- int i, err, vectors;
-
- vectors = ipr_number_of_msix;
+ int i, vectors;
- while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
- vectors = err;
-
- if (err < 0) {
- pci_disable_msi(ioa_cfg->pdev);
- return err;
+ vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
+ if (vectors < 0) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ return vectors;
}
- if (!err) {
- for (i = 0; i < vectors; i++)
- ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
- ioa_cfg->nvectors = vectors;
- }
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
+ ioa_cfg->nvectors = vectors;
- return err;
+ return 0;
}
static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
@@ -9355,7 +9451,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
* ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
* @pdev: PCI device struct
*
- * Description: The return value from pci_enable_msi() can not always be
+ * Description: The return value from pci_enable_msi_range() can not always be
* trusted. This routine sets up and initiates a test interrupt to determine
* if the interrupt is received via the ipr_test_intr() service routine.
* If the tests fails, the driver will fall back to LSI.
@@ -9434,19 +9530,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ENTER;
- if ((rc = pci_enable_device(pdev))) {
- dev_err(&pdev->dev, "Cannot enable adapter\n");
- goto out;
- }
-
dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
-
host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
if (!host) {
dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
rc = -ENOMEM;
- goto out_disable;
+ goto out;
}
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
@@ -9476,6 +9566,8 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg->revid = pdev->revision;
+ ipr_init_ioa_cfg(ioa_cfg, host, pdev);
+
ipr_regs_pci = pci_resource_start(pdev, 0);
rc = pci_request_regions(pdev, IPR_NAME);
@@ -9485,22 +9577,35 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
goto out_scsi_host_put;
}
+ rc = pci_enable_device(pdev);
+
+ if (rc || pci_channel_offline(pdev)) {
+ if (pci_channel_offline(pdev)) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ rc = pci_enable_device(pdev);
+ }
+
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable adapter\n");
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ goto out_release_regions;
+ }
+ }
+
ipr_regs = pci_ioremap_bar(pdev, 0);
if (!ipr_regs) {
dev_err(&pdev->dev,
"Couldn't map memory range of registers\n");
rc = -ENOMEM;
- goto out_release_regions;
+ goto out_disable;
}
ioa_cfg->hdw_dma_regs = ipr_regs;
ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
- ipr_init_ioa_cfg(ioa_cfg, host, pdev);
-
- pci_set_master(pdev);
+ ipr_init_regs(ioa_cfg);
if (ioa_cfg->sis64) {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -9508,7 +9613,6 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
}
-
} else
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -9522,10 +9626,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
if (rc != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev, "Write of cache line size failed\n");
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
rc = -EIO;
goto cleanup_nomem;
}
+ /* Issue MMIO read to ensure card is not in EEH */
+ interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+
if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
dev_err(&pdev->dev, "The max number of MSIX is %d\n",
IPR_MAX_MSIX_VECTORS);
@@ -9544,10 +9653,22 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
dev_info(&pdev->dev, "Cannot enable MSI.\n");
}
+ pci_set_master(pdev);
+
+ if (pci_channel_offline(pdev)) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
+ pci_set_master(pdev);
+ if (pci_channel_offline(pdev)) {
+ rc = -EIO;
+ goto out_msi_disable;
+ }
+ }
+
if (ioa_cfg->intr_flag == IPR_USE_MSI ||
ioa_cfg->intr_flag == IPR_USE_MSIX) {
rc = ipr_test_msi(ioa_cfg, pdev);
if (rc == -EOPNOTSUPP) {
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
if (ioa_cfg->intr_flag == IPR_USE_MSI) {
ioa_cfg->intr_flag &= ~IPR_USE_MSI;
pci_disable_msi(pdev);
@@ -9577,30 +9698,12 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
(unsigned int)num_online_cpus(),
(unsigned int)IPR_MAX_HRRQ_NUM);
- /* Save away PCI config space for use following IOA reset */
- rc = pci_save_state(pdev);
-
- if (rc != PCIBIOS_SUCCESSFUL) {
- dev_err(&pdev->dev, "Failed to save PCI config space\n");
- rc = -EIO;
- goto out_msi_disable;
- }
-
if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
goto out_msi_disable;
if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
goto out_msi_disable;
- if (ioa_cfg->sis64)
- ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
- + ((sizeof(struct ipr_config_table_entry64)
- * ioa_cfg->max_devs_supported)));
- else
- ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
- + ((sizeof(struct ipr_config_table_entry)
- * ioa_cfg->max_devs_supported)));
-
rc = ipr_alloc_mem(ioa_cfg);
if (rc < 0) {
dev_err(&pdev->dev,
@@ -9608,6 +9711,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
goto out_msi_disable;
}
+ /* Save away PCI config space for use following IOA reset */
+ rc = pci_save_state(pdev);
+
+ if (rc != PCIBIOS_SUCCESSFUL) {
+ dev_err(&pdev->dev, "Failed to save PCI config space\n");
+ rc = -EIO;
+ goto cleanup_nolog;
+ }
+
/*
* If HRRQ updated interrupt is not masked, or reset alert is set,
* the card is in an unknown state and needs a hard reset
@@ -9664,18 +9776,19 @@ out:
cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
+ ipr_wait_for_pci_err_recovery(ioa_cfg);
if (ioa_cfg->intr_flag == IPR_USE_MSI)
pci_disable_msi(pdev);
else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
pci_disable_msix(pdev);
cleanup_nomem:
iounmap(ipr_regs);
+out_disable:
+ pci_disable_device(pdev);
out_release_regions:
pci_release_regions(pdev);
out_scsi_host_put:
scsi_host_put(host);
-out_disable:
- pci_disable_device(pdev);
goto out;
}
@@ -9859,8 +9972,7 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
ioa_cfg->host->max_channel = IPR_VSET_BUS;
ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
ioa_cfg->iopoll_weight, ipr_iopoll);
@@ -9889,8 +10001,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
ioa_cfg->iopoll_weight = 0;
for (i = 1; i < ioa_cfg->hrrq_num; i++)
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
@@ -9994,6 +10105,8 @@ static struct pci_device_id ipr_pci_table[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
@@ -10005,12 +10118,19 @@ static struct pci_device_id ipr_pci_table[] = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(pci, ipr_pci_table);
static const struct pci_error_handlers ipr_err_handler = {
.error_detected = ipr_pci_error_detected,
+ .mmio_enabled = ipr_pci_mmio_enabled,
.slot_reset = ipr_pci_slot_reset,
};
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 9ce38a22647e..31ed126f7143 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -101,12 +101,16 @@
#define IPR_SUBS_DEV_ID_57D7 0x03FF
#define IPR_SUBS_DEV_ID_57D8 0x03FE
#define IPR_SUBS_DEV_ID_57D9 0x046D
+#define IPR_SUBS_DEV_ID_57DA 0x04CA
#define IPR_SUBS_DEV_ID_57EB 0x0474
#define IPR_SUBS_DEV_ID_57EC 0x0475
#define IPR_SUBS_DEV_ID_57ED 0x0499
#define IPR_SUBS_DEV_ID_57EE 0x049A
#define IPR_SUBS_DEV_ID_57EF 0x049B
#define IPR_SUBS_DEV_ID_57F0 0x049C
+#define IPR_SUBS_DEV_ID_2CCA 0x04C7
+#define IPR_SUBS_DEV_ID_2CD2 0x04C8
+#define IPR_SUBS_DEV_ID_2CCD 0x04C9
#define IPR_NAME "ipr"
/*
@@ -230,6 +234,7 @@
#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ)
#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
+#define IPR_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
#define IPR_PCI_RESET_TIMEOUT (HZ / 2)
#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ)
#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ)
@@ -897,6 +902,18 @@ struct ipr_hostrcb_type_01_error {
__be32 ioa_data[236];
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb_type_21_error {
+ __be32 wwn[4];
+ u8 res_path[8];
+ u8 primary_problem_desc[32];
+ u8 second_problem_desc[32];
+ __be32 sense_data[8];
+ __be32 cdb[4];
+ __be32 residual_trans_length;
+ __be32 length_of_error;
+ __be32 ioa_data[236];
+}__attribute__((packed, aligned (4)));
+
struct ipr_hostrcb_type_02_error {
struct ipr_vpd ioa_vpd;
struct ipr_vpd cfc_vpd;
@@ -1126,6 +1143,7 @@ struct ipr_hostrcb64_error {
struct ipr_hostrcb_type_ff_error type_ff_error;
struct ipr_hostrcb_type_12_error type_12_error;
struct ipr_hostrcb_type_17_error type_17_error;
+ struct ipr_hostrcb_type_21_error type_21_error;
struct ipr_hostrcb_type_23_error type_23_error;
struct ipr_hostrcb_type_24_error type_24_error;
struct ipr_hostrcb_type_30_error type_30_error;
@@ -1169,6 +1187,7 @@ struct ipr_hcam {
#define IPR_HOST_RCB_OVERLAY_ID_16 0x16
#define IPR_HOST_RCB_OVERLAY_ID_17 0x17
#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
+#define IPR_HOST_RCB_OVERLAY_ID_21 0x21
#define IPR_HOST_RCB_OVERLAY_ID_23 0x23
#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
@@ -1252,6 +1271,7 @@ struct ipr_resource_entry {
u8 add_to_ml:1;
u8 del_from_ml:1;
u8 resetting_device:1;
+ u8 reset_occurred:1;
u32 bus; /* AKA channel */
u32 target; /* AKA id */
@@ -1441,6 +1461,7 @@ struct ipr_ioa_cfg {
u8 dump_timeout:1;
u8 cfg_locked:1;
u8 clear_isr:1;
+ u8 probe_done:1;
u8 revid;
@@ -1519,6 +1540,7 @@ struct ipr_ioa_cfg {
wait_queue_head_t reset_wait_q;
wait_queue_head_t msi_wait_q;
+ wait_queue_head_t eeh_wait_q;
struct ipr_dump *dump;
enum ipr_sdt_state sdt_state;
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 4911310a38f5..22a9bb1abae1 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
}
#define for_each_isci_host(id, ihost, pdev) \
- for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
- id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
- ihost = to_pci_info(pdev)->hosts[++id])
+ for (id = 0; id < SCI_MAX_CONTROLLERS && \
+ (ihost = to_pci_info(pdev)->hosts[id]); id++)
static inline void wait_for_start(struct isci_host *ihost)
{
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index d25d0d859f05..695b34e9154e 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -66,7 +66,7 @@
#include "probe_roms.h"
#define MAJ 1
-#define MIN 1
+#define MIN 2
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD)
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 85c77f6b802b..ac879745ef80 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
} else {
/* the phy is already the part of the port */
- u32 port_state = iport->sm.current_state_id;
-
- /* if the PORT'S state is resetting then the link up is from
- * port hard reset in this case, we need to tell the port
- * that link up is recieved
- */
- BUG_ON(port_state != SCI_PORT_RESETTING);
port_agent->phy_ready_mask |= 1 << phy_index;
sci_port_link_up(iport, iphy);
}
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 99d2930b18c8..56e38096f0c4 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2723,13 +2723,9 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_
memcpy(resp->ending_fis, fis, sizeof(*fis));
ts->buf_valid_size = sizeof(*resp);
- /* If the device fault bit is set in the status register, then
- * set the sense data and return.
- */
- if (fis->status & ATA_DF)
+ /* If an error is flagged let libata decode the fis */
+ if (ac_err_mask(fis->status))
ts->stat = SAS_PROTO_RESPONSE;
- else if (fis->status & ATA_ERR)
- ts->stat = SAM_STAT_CHECK_CONDITION;
else
ts->stat = SAM_STAT_GOOD;
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 0d30ca849e8f..5d6fda72d659 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
/* XXX: need to cleanup any ireqs targeting this
* domain_device
*/
- ret = TMF_RESP_FUNC_COMPLETE;
+ ret = -ENODEV;
goto out;
}
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 14c1c8f6a95e..680bf6f0ce76 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -490,5 +490,6 @@ void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset)
iscsi_boot_remove_kobj(boot_kobj);
kset_unregister(boot_kset->kset);
+ kfree(boot_kset);
}
EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index add6d1566ec8..bfb6d07d87f0 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -593,9 +593,9 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
tcp_sw_conn->sock = NULL;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
sockfd_put(sock);
}
@@ -663,10 +663,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
if (err)
goto free_socket;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/* setup Socket parameters */
sk = sock->sk;
@@ -726,14 +726,14 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
switch(param) {
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_CONN_ADDRESS:
- spin_lock_bh(&conn->session->lock);
+ spin_lock_bh(&conn->session->frwd_lock);
if (!tcp_sw_conn || !tcp_sw_conn->sock) {
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
return -ENOTCONN;
}
rc = kernel_getpeername(tcp_sw_conn->sock,
(struct sockaddr *)&addr, &len);
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
if (rc)
return rc;
@@ -759,23 +759,26 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
switch (param) {
case ISCSI_HOST_PARAM_IPADDRESS:
- spin_lock_bh(&session->lock);
+ if (!session)
+ return -ENOTCONN;
+
+ spin_lock_bh(&session->frwd_lock);
conn = session->leadconn;
if (!conn) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return -ENOTCONN;
}
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
if (!tcp_sw_conn->sock) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return -ENOTCONN;
}
rc = kernel_getsockname(tcp_sw_conn->sock,
(struct sockaddr *)&addr, &len);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
if (rc)
return rc;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 40462415291e..5b8605ca42fa 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -110,16 +110,8 @@ static void __iscsi_update_cmdsn(struct iscsi_session *session,
session->exp_cmdsn = exp_cmdsn;
if (max_cmdsn != session->max_cmdsn &&
- !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
+ !iscsi_sna_lt(max_cmdsn, session->max_cmdsn))
session->max_cmdsn = max_cmdsn;
- /*
- * if the window closed with IO queued, then kick the
- * xmit thread
- */
- if (!list_empty(&session->leadconn->cmdqueue) ||
- !list_empty(&session->leadconn->mgmtqueue))
- iscsi_conn_queue_work(session->leadconn);
- }
}
void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
@@ -481,7 +473,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
* iscsi_free_task - free a task
* @task: iscsi cmd task
*
- * Must be called with session lock.
+ * Must be called with session back_lock.
* This function returns the scsi command to scsi-ml or cleans
* up mgmt tasks then returns the task to the pool.
*/
@@ -535,9 +527,10 @@ void iscsi_put_task(struct iscsi_task *task)
{
struct iscsi_session *session = task->conn->session;
- spin_lock_bh(&session->lock);
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&session->back_lock);
__iscsi_put_task(task);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->back_lock);
}
EXPORT_SYMBOL_GPL(iscsi_put_task);
@@ -546,7 +539,7 @@ EXPORT_SYMBOL_GPL(iscsi_put_task);
* @task: iscsi cmd task
* @state: state to complete task with
*
- * Must be called with session lock.
+ * Must be called with session back_lock.
*/
static void iscsi_complete_task(struct iscsi_task *task, int state)
{
@@ -585,7 +578,7 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
* This is used when drivers do not need or cannot perform
* lower level pdu processing.
*
- * Called with session lock
+ * Called with session back_lock
*/
void iscsi_complete_scsi_task(struct iscsi_task *task,
uint32_t exp_cmdsn, uint32_t max_cmdsn)
@@ -602,7 +595,7 @@ EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
/*
- * session lock must be held and if not called for a task that is
+ * session back_lock must be held and if not called for a task that is
* still pending or from the xmit thread, then xmit thread must
* be suspended.
*/
@@ -642,7 +635,10 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
scsi_in(sc)->resid = scsi_in(sc)->length;
}
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&conn->session->back_lock);
iscsi_complete_task(task, state);
+ spin_unlock_bh(&conn->session->back_lock);
}
static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -780,7 +776,10 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
return task;
free_task:
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&session->back_lock);
__iscsi_put_task(task);
+ spin_unlock_bh(&session->back_lock);
return NULL;
}
@@ -791,10 +790,10 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
struct iscsi_session *session = conn->session;
int err = 0;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
err = -EPERM;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return err;
}
EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -1013,13 +1012,13 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
iscsi_conn_printk(KERN_ERR, conn,
"pdu (op 0x%x itt 0x%x) rejected "
"due to DataDigest error.\n",
- rejected_pdu.itt, opcode);
+ opcode, rejected_pdu.itt);
break;
case ISCSI_REASON_IMM_CMD_REJECT:
iscsi_conn_printk(KERN_ERR, conn,
"pdu (op 0x%x itt 0x%x) rejected. Too many "
"immediate commands.\n",
- rejected_pdu.itt, opcode);
+ opcode, rejected_pdu.itt);
/*
* We only send one TMF at a time so if the target could not
* handle it, then it should get fixed (RFC mandates that
@@ -1031,14 +1030,19 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (opcode != ISCSI_OP_NOOP_OUT)
return 0;
- if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG))
+ if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
/*
* nop-out in response to target's nop-out rejected.
* Just resend.
*/
+ /* In RX path we are under back lock */
+ spin_unlock(&conn->session->back_lock);
+ spin_lock(&conn->session->frwd_lock);
iscsi_send_nopout(conn,
(struct iscsi_nopin*)&rejected_pdu);
- else {
+ spin_unlock(&conn->session->frwd_lock);
+ spin_lock(&conn->session->back_lock);
+ } else {
struct iscsi_task *task;
/*
* Our nop as ping got dropped. We know the target
@@ -1059,8 +1063,8 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
default:
iscsi_conn_printk(KERN_ERR, conn,
"pdu (op 0x%x itt 0x%x) rejected. Reason "
- "code 0x%x\n", rejected_pdu.itt,
- rejected_pdu.opcode, reject->reason);
+ "code 0x%x\n", rejected_pdu.opcode,
+ rejected_pdu.itt, reject->reason);
break;
}
return rc;
@@ -1074,7 +1078,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
* This should be used for mgmt tasks like login and nops, or if
* the LDD's itt space does not include the session age.
*
- * The session lock must be held.
+ * The session back_lock must be held.
*/
struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
{
@@ -1103,7 +1107,7 @@ EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
* @datalen: len of data buffer
*
* Completes pdu processing by freeing any resources allocated at
- * queuecommand or send generic. session lock must be held and verify
+ * queuecommand or send generic. session back_lock must be held and verify
* itt must have been called.
*/
int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -1140,7 +1144,12 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
break;
+ /* In RX path we are under back lock */
+ spin_unlock(&session->back_lock);
+ spin_lock(&session->frwd_lock);
iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
+ spin_unlock(&session->frwd_lock);
+ spin_lock(&session->back_lock);
break;
case ISCSI_OP_REJECT:
rc = iscsi_handle_reject(conn, hdr, data, datalen);
@@ -1247,9 +1256,9 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
{
int rc;
- spin_lock(&conn->session->lock);
+ spin_lock(&conn->session->back_lock);
rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
- spin_unlock(&conn->session->lock);
+ spin_unlock(&conn->session->back_lock);
return rc;
}
EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
@@ -1293,7 +1302,7 @@ EXPORT_SYMBOL_GPL(iscsi_verify_itt);
*
* This should be used for cmd tasks.
*
- * The session lock must be held.
+ * The session back_lock must be held.
*/
struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
{
@@ -1323,15 +1332,15 @@ void iscsi_session_failure(struct iscsi_session *session,
struct iscsi_conn *conn;
struct device *dev;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
conn = session->leadconn;
if (session->state == ISCSI_STATE_TERMINATE || !conn) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return;
}
dev = get_device(&conn->cls_conn->dev);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
if (!dev)
return;
/*
@@ -1351,15 +1360,15 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
{
struct iscsi_session *session = conn->session;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (session->state == ISCSI_STATE_FAILED) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return;
}
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
@@ -1393,15 +1402,18 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
return -ENODATA;
__iscsi_get_task(task);
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
- spin_lock_bh(&conn->session->lock);
+ spin_lock_bh(&conn->session->frwd_lock);
if (!rc) {
/* done with this task */
task->last_xfer = jiffies;
conn->task = NULL;
}
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(task);
+ spin_unlock_bh(&conn->session->back_lock);
return rc;
}
@@ -1410,7 +1422,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
* @task: task to requeue
*
* LLDs that need to run a task from the session workqueue should call
- * this. The session lock must be held. This should only be called
+ * this. The session frwd_lock must be held. This should only be called
* by software drivers.
*/
void iscsi_requeue_task(struct iscsi_task *task)
@@ -1441,10 +1453,10 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
struct iscsi_task *task;
int rc = 0;
- spin_lock_bh(&conn->session->lock);
+ spin_lock_bh(&conn->session->frwd_lock);
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
}
@@ -1465,7 +1477,10 @@ check_mgmt:
struct iscsi_task, running);
list_del_init(&conn->task->running);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(conn->task);
+ spin_unlock_bh(&conn->session->back_lock);
conn->task = NULL;
continue;
}
@@ -1527,11 +1542,11 @@ check_mgmt:
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
done:
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
return rc;
}
@@ -1600,7 +1615,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
reason = iscsi_session_chkready(cls_session);
if (reason) {
@@ -1686,13 +1701,13 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
}
session->queued_cmdsn++;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
return 0;
prepd_reject:
iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
reject:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
sc->cmnd[0], reason);
return SCSI_MLQUEUE_TARGET_BUSY;
@@ -1700,7 +1715,7 @@ reject:
prepd_fault:
iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
fault:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
sc->cmnd[0], reason);
if (!scsi_bidi_cmnd(sc))
@@ -1748,14 +1763,14 @@ static void iscsi_tmf_timedout(unsigned long data)
struct iscsi_conn *conn = (struct iscsi_conn *)data;
struct iscsi_session *session = conn->session;
- spin_lock(&session->lock);
+ spin_lock(&session->frwd_lock);
if (conn->tmf_state == TMF_QUEUED) {
conn->tmf_state = TMF_TIMEDOUT;
ISCSI_DBG_EH(session, "tmf timedout\n");
/* unblock eh_abort() */
wake_up(&conn->ehwait);
}
- spin_unlock(&session->lock);
+ spin_unlock(&session->frwd_lock);
}
static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
@@ -1768,10 +1783,10 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
NULL, 0);
if (!task) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
return -EPERM;
}
conn->tmfcmd_pdus_cnt++;
@@ -1781,7 +1796,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
add_timer(&conn->tmf_timer);
ISCSI_DBG_EH(session, "tmf set timeout\n");
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
/*
@@ -1800,7 +1815,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
del_timer_sync(&conn->tmf_timer);
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
/* if the session drops it will clean up the task */
if (age != session->age ||
session->state != ISCSI_STATE_LOGGED_IN)
@@ -1837,7 +1852,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
* iscsi_suspend_queue - suspend iscsi_queuecommand
* @conn: iscsi conn to stop queueing IO on
*
- * This grabs the session lock to make sure no one is in
+ * This grabs the session frwd_lock to make sure no one is in
* xmit_task/queuecommand, and then sets suspend to prevent
* new commands from being queued. This only needs to be called
* by offload drivers that need to sync a path like ep disconnect
@@ -1846,9 +1861,9 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
*/
void iscsi_suspend_queue(struct iscsi_conn *conn)
{
- spin_lock_bh(&conn->session->lock);
+ spin_lock_bh(&conn->session->frwd_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- spin_unlock_bh(&conn->session->lock);
+ spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
@@ -1907,7 +1922,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
- spin_lock(&session->lock);
+ spin_lock(&session->frwd_lock);
task = (struct iscsi_task *)sc->SCp.ptr;
if (!task) {
/*
@@ -2021,7 +2036,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
done:
if (task)
task->last_timeout = jiffies;
- spin_unlock(&session->lock);
+ spin_unlock(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
"timer reset" : "nh");
return rc;
@@ -2033,7 +2048,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
struct iscsi_session *session = conn->session;
unsigned long recv_timeout, next_timeout = 0, last_recv;
- spin_lock(&session->lock);
+ spin_lock(&session->frwd_lock);
if (session->state != ISCSI_STATE_LOGGED_IN)
goto done;
@@ -2050,7 +2065,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
"last ping %lu, now %lu\n",
conn->ping_timeout, conn->recv_timeout,
last_recv, conn->last_ping, jiffies);
- spin_unlock(&session->lock);
+ spin_unlock(&session->frwd_lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return;
}
@@ -2066,7 +2081,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
mod_timer(&conn->transport_timer, next_timeout);
done:
- spin_unlock(&session->lock);
+ spin_unlock(&session->frwd_lock);
}
static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
@@ -2096,7 +2111,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
/*
* if session was ISCSI_STATE_IN_RECOVERY then we may not have
* got the command.
@@ -2104,7 +2119,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
if (!sc->SCp.ptr) {
ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
"it completed.\n");
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
}
@@ -2115,7 +2130,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
*/
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
sc->SCp.phase != session->age) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
ISCSI_DBG_EH(session, "failing abort due to dropped "
"session.\n");
@@ -2156,7 +2171,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
switch (conn->tmf_state) {
case TMF_SUCCESS:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/*
* stop tx side incase the target had sent a abort rsp but
* the initiator was still writing out data.
@@ -2167,15 +2182,15 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
* good and have never sent us a successful tmf response
* then sent more data for the cmd.
*/
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
fail_scsi_task(task, DID_ABORT);
conn->tmf_state = TMF_INITIAL;
memset(hdr, 0, sizeof(*hdr));
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
goto success_unlocked;
case TMF_TIMEDOUT:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto failed_unlocked;
case TMF_NOT_FOUND:
@@ -2194,7 +2209,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
}
success:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
success_unlocked:
ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
sc, task->itt);
@@ -2202,7 +2217,7 @@ success_unlocked:
return SUCCESS;
failed:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
failed_unlocked:
ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
task ? task->itt : 0);
@@ -2235,7 +2250,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
/*
* Just check if we are not logged in. We cannot check for
* the phase because the reset could come from a ioctl.
@@ -2262,7 +2277,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
case TMF_SUCCESS:
break;
case TMF_TIMEDOUT:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto done;
default:
@@ -2271,21 +2286,21 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
}
rc = SUCCESS;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_suspend_tx(conn);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
memset(hdr, 0, sizeof(*hdr));
fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
goto done;
unlock:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
done:
ISCSI_DBG_EH(session, "dev reset result = %s\n",
rc == SUCCESS ? "SUCCESS" : "FAILED");
@@ -2298,13 +2313,13 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
session->state = ISCSI_STATE_RECOVERY_FAILED;
if (session->leadconn)
wake_up(&session->leadconn->ehwait);
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
@@ -2326,19 +2341,19 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
conn = session->leadconn;
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (session->state == ISCSI_STATE_TERMINATE) {
failed:
ISCSI_DBG_EH(session,
"failing session reset: Could not log back into "
"%s, %s [age %d]\n", session->targetname,
conn->persistent_address, session->age);
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return FAILED;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
/*
* we drop the lock here but the leadconn cannot be destoyed while
@@ -2355,14 +2370,14 @@ failed:
flush_signals(current);
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (session->state == ISCSI_STATE_LOGGED_IN) {
ISCSI_DBG_EH(session,
"session reset succeeded for %s,%s\n",
session->targetname, conn->persistent_address);
} else
goto failed;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
}
@@ -2398,7 +2413,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
session->targetname);
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
/*
* Just check if we are not logged in. We cannot check for
* the phase because the reset could come from a ioctl.
@@ -2425,7 +2440,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
case TMF_SUCCESS:
break;
case TMF_TIMEDOUT:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto done;
default:
@@ -2434,21 +2449,21 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
}
rc = SUCCESS;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_suspend_tx(conn);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
memset(hdr, 0, sizeof(*hdr));
fail_scsi_tasks(conn, -1, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
goto done;
unlock:
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
done:
ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
rc == SUCCESS ? "SUCCESS" : "FAILED");
@@ -2746,8 +2761,10 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
session->max_r2t = 1;
session->tt = iscsit;
session->dd_data = cls_session->dd_data + sizeof(*session);
+
mutex_init(&session->eh_mutex);
- spin_lock_init(&session->lock);
+ spin_lock_init(&session->frwd_lock);
+ spin_lock_init(&session->back_lock);
/* initialize SCSI PDU commands pool */
if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
@@ -2861,14 +2878,14 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (!kfifo_out(&session->cmdpool.queue,
(void*)&conn->login_task,
sizeof(void*))) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
goto login_task_alloc_fail;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
data = (char *) __get_free_pages(GFP_KERNEL,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
@@ -2905,7 +2922,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
del_timer_sync(&conn->transport_timer);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) {
/*
@@ -2914,7 +2931,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
session->state = ISCSI_STATE_TERMINATE;
wake_up(&conn->ehwait);
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/*
* Block until all in-progress commands for this connection
@@ -2941,16 +2958,19 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
/* flush queued up work because we free the connection below */
iscsi_suspend_tx(conn);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
kfree(conn->persistent_address);
kfree(conn->local_ipaddr);
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&session->back_lock);
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
+ spin_unlock_bh(&session->back_lock);
if (session->leadconn == conn)
session->leadconn = NULL;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_destroy_conn(cls_conn);
}
@@ -2987,7 +3007,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
conn->ping_timeout = 5;
}
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_STARTED;
session->state = ISCSI_STATE_LOGGED_IN;
session->queued_cmdsn = session->cmdsn;
@@ -3016,7 +3036,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
default:
break;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
iscsi_unblock_session(session->cls_session);
wake_up(&conn->ehwait);
@@ -3055,9 +3075,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
int old_stop_stage;
mutex_lock(&session->eh_mutex);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (conn->stop_stage == STOP_CONN_TERM) {
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return;
}
@@ -3074,14 +3094,14 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
old_stop_stage = conn->stop_stage;
conn->stop_stage = flag;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
del_timer_sync(&conn->transport_timer);
iscsi_suspend_tx(conn);
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_STOPPED;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/*
* for connection level recovery we should not calculate
@@ -3102,11 +3122,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
/*
* flush queues.
*/
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
fail_mgmt_tasks(session, conn);
memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
}
@@ -3133,10 +3153,10 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&session->frwd_lock);
if (is_leading)
session->leadconn = conn;
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&session->frwd_lock);
/*
* Unblock xmitworker(), Login Phase will pass through.
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 1d58d5336018..60cb6dc3c6f0 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -446,7 +446,7 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
* iscsi_tcp_cleanup_task - free tcp_task resources
* @task: iscsi task
*
- * must be called with session lock
+ * must be called with session back_lock
*/
void iscsi_tcp_cleanup_task(struct iscsi_task *task)
{
@@ -457,6 +457,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
if (!task->sc)
return;
+ spin_lock_bh(&tcp_task->queue2pool);
/* flush task's r2t queues */
while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
@@ -470,6 +471,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
sizeof(void*));
tcp_task->r2t = NULL;
}
+ spin_unlock_bh(&tcp_task->queue2pool);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
@@ -529,6 +531,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
struct iscsi_r2t_info *r2t;
int r2tsn = be32_to_cpu(rhdr->r2tsn);
+ u32 data_length;
+ u32 data_offset;
int rc;
if (tcp_conn->in.datalen) {
@@ -554,40 +558,41 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
return 0;
}
- rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
- if (!rc) {
- iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
- "Target has sent more R2Ts than it "
- "negotiated for or driver has leaked.\n");
- return ISCSI_ERR_PROTO;
- }
-
- r2t->exp_statsn = rhdr->statsn;
- r2t->data_length = be32_to_cpu(rhdr->data_length);
- if (r2t->data_length == 0) {
+ data_length = be32_to_cpu(rhdr->data_length);
+ if (data_length == 0) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with zero data len\n");
- kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
- sizeof(void*));
return ISCSI_ERR_DATALEN;
}
- if (r2t->data_length > session->max_burst)
+ if (data_length > session->max_burst)
ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "
"burst %u. Attempting to execute request.\n",
- r2t->data_length, session->max_burst);
+ data_length, session->max_burst);
- r2t->data_offset = be32_to_cpu(rhdr->data_offset);
- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
+ data_offset = be32_to_cpu(rhdr->data_offset);
+ if (data_offset + data_length > scsi_out(task->sc)->length) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with data len %u at offset %u "
- "and total length %d\n", r2t->data_length,
- r2t->data_offset, scsi_out(task->sc)->length);
- kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
- sizeof(void*));
+ "and total length %d\n", data_length,
+ data_offset, scsi_out(task->sc)->length);
return ISCSI_ERR_DATALEN;
}
+ spin_lock(&tcp_task->pool2queue);
+ rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *));
+ if (!rc) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
+ "Target has sent more R2Ts than it "
+ "negotiated for or driver has leaked.\n");
+ spin_unlock(&tcp_task->pool2queue);
+ return ISCSI_ERR_PROTO;
+ }
+
+ r2t->exp_statsn = rhdr->statsn;
+ r2t->data_length = data_length;
+ r2t->data_offset = data_offset;
+
r2t->ttt = rhdr->ttt; /* no flip */
r2t->datasn = 0;
r2t->sent = 0;
@@ -595,6 +600,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
tcp_task->exp_datasn = r2tsn + 1;
kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
conn->r2t_pdus_cnt++;
+ spin_unlock(&tcp_task->pool2queue);
iscsi_requeue_task(task);
return 0;
@@ -667,14 +673,14 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
switch(opcode) {
case ISCSI_OP_SCSI_DATA_IN:
- spin_lock(&conn->session->lock);
+ spin_lock(&conn->session->back_lock);
task = iscsi_itt_to_ctask(conn, hdr->itt);
if (!task)
rc = ISCSI_ERR_BAD_ITT;
else
rc = iscsi_tcp_data_in(conn, task);
if (rc) {
- spin_unlock(&conn->session->lock);
+ spin_unlock(&conn->session->back_lock);
break;
}
@@ -707,11 +713,11 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
tcp_conn->in.datalen,
iscsi_tcp_process_data_in,
rx_hash);
- spin_unlock(&conn->session->lock);
+ spin_unlock(&conn->session->back_lock);
return rc;
}
rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
- spin_unlock(&conn->session->lock);
+ spin_unlock(&conn->session->back_lock);
break;
case ISCSI_OP_SCSI_CMD_RSP:
if (tcp_conn->in.datalen) {
@@ -721,18 +727,20 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
break;
case ISCSI_OP_R2T:
- spin_lock(&conn->session->lock);
+ spin_lock(&conn->session->back_lock);
task = iscsi_itt_to_ctask(conn, hdr->itt);
+ spin_unlock(&conn->session->back_lock);
if (!task)
rc = ISCSI_ERR_BAD_ITT;
else if (ahslen)
rc = ISCSI_ERR_AHSLEN;
else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
task->last_xfer = jiffies;
+ spin_lock(&conn->session->frwd_lock);
rc = iscsi_tcp_r2t_rsp(conn, task);
+ spin_unlock(&conn->session->frwd_lock);
} else
rc = ISCSI_ERR_PROTO;
- spin_unlock(&conn->session->lock);
break;
case ISCSI_OP_LOGIN_RSP:
case ISCSI_OP_TEXT_RSP:
@@ -980,14 +988,13 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);
static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
{
- struct iscsi_session *session = task->conn->session;
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_r2t_info *r2t = NULL;
if (iscsi_task_has_unsol_data(task))
r2t = &task->unsol_r2t;
else {
- spin_lock_bh(&session->lock);
+ spin_lock_bh(&tcp_task->queue2pool);
if (tcp_task->r2t) {
r2t = tcp_task->r2t;
/* Continue with this R2T? */
@@ -1009,7 +1016,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
else
r2t = tcp_task->r2t;
}
- spin_unlock_bh(&session->lock);
+ spin_unlock_bh(&tcp_task->queue2pool);
}
return r2t;
@@ -1139,6 +1146,8 @@ int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
iscsi_pool_free(&tcp_task->r2tpool);
goto r2t_alloc_fail;
}
+ spin_lock_init(&tcp_task->pool2queue);
+ spin_lock_init(&tcp_task->queue2pool);
}
return 0;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index d2895836f9fa..766098af4eb7 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -700,46 +700,26 @@ void sas_probe_sata(struct asd_sas_port *port)
}
-static bool sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
+static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
{
struct domain_device *dev, *n;
- bool retry = false;
list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
- int rc;
-
if (!dev_is_sata(dev))
continue;
sas_ata_wait_eh(dev);
- rc = dev->sata_dev.pm_result;
- if (rc == -EAGAIN)
- retry = true;
- else if (rc) {
- /* since we don't have a
- * ->port_{suspend|resume} routine in our
- * ata_port ops, and no entanglements with
- * acpi, suspend should just be mechanical trip
- * through eh, catch cases where these
- * assumptions are invalidated
- */
- WARN_ONCE(1, "failed %s %s error: %d\n", func,
- dev_name(&dev->rphy->dev), rc);
- }
/* if libata failed to power manage the device, tear it down */
if (ata_dev_disabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, func, -ENODEV);
}
-
- return retry;
}
void sas_suspend_sata(struct asd_sas_port *port)
{
struct domain_device *dev;
- retry:
mutex_lock(&port->ha->disco_mutex);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
struct sata_device *sata;
@@ -751,20 +731,17 @@ void sas_suspend_sata(struct asd_sas_port *port)
if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
continue;
- sata->pm_result = -EIO;
- ata_sas_port_async_suspend(sata->ap, &sata->pm_result);
+ ata_sas_port_suspend(sata->ap);
}
mutex_unlock(&port->ha->disco_mutex);
- if (sas_ata_flush_pm_eh(port, __func__))
- goto retry;
+ sas_ata_flush_pm_eh(port, __func__);
}
void sas_resume_sata(struct asd_sas_port *port)
{
struct domain_device *dev;
- retry:
mutex_lock(&port->ha->disco_mutex);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
struct sata_device *sata;
@@ -776,13 +753,11 @@ void sas_resume_sata(struct asd_sas_port *port)
if (sata->ap->pm_mesg.event == PM_EVENT_ON)
continue;
- sata->pm_result = -EIO;
- ata_sas_port_async_resume(sata->ap, &sata->pm_result);
+ ata_sas_port_resume(sata->ap);
}
mutex_unlock(&port->ha->disco_mutex);
- if (sas_ata_flush_pm_eh(port, __func__))
- goto retry;
+ sas_ata_flush_pm_eh(port, __func__);
}
/**
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 446b85110a1f..0cac7d8fd0f7 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
/* do we need to support multiple segments? */
- if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
- printk("%s: multiple segments req %u %u, rsp %u %u\n",
- __func__, bio_segments(req->bio), blk_rq_bytes(req),
- bio_segments(rsp->bio), blk_rq_bytes(rsp));
+ if (bio_multiple_segments(req->bio) ||
+ bio_multiple_segments(rsp->bio)) {
+ printk("%s: multiple segments req %u, rsp %u\n",
+ __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
return -EINVAL;
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index da3aee17faa5..25d0f127424d 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -862,7 +862,7 @@ out:
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
- scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd);
+ scmd_dbg(cmd, "command %p timed out\n", cmd);
return BLK_EH_NOT_HANDLED;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 4e1b75ca7451..94a3cafe7197 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -73,8 +73,6 @@ struct lpfc_sli2_slim;
*/
/* 1 Second */
#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
-/* 5 minutes */
-#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300))
/* Number of exchanges reserved for discovery to complete */
#define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -722,6 +720,20 @@ struct lpfc_hba {
uint32_t cfg_hba_queue_depth;
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
+ uint32_t cfg_fof;
+ uint32_t cfg_EnableXLane;
+ uint8_t cfg_oas_tgt_wwpn[8];
+ uint8_t cfg_oas_vpt_wwpn[8];
+ uint32_t cfg_oas_lun_state;
+#define OAS_LUN_ENABLE 1
+#define OAS_LUN_DISABLE 0
+ uint32_t cfg_oas_lun_status;
+#define OAS_LUN_STATUS_EXISTS 0x01
+ uint32_t cfg_oas_flags;
+#define OAS_FIND_ANY_VPORT 0x01
+#define OAS_FIND_ANY_TARGET 0x02
+#define OAS_LUN_VALID 0x04
+ uint32_t cfg_XLanePriority;
uint32_t cfg_enable_bg;
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
@@ -730,6 +742,7 @@ struct lpfc_hba {
uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
+ uint32_t cfg_rrq_xri_bitmap_sz;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
@@ -835,6 +848,7 @@ struct lpfc_hba {
mempool_t *mbox_mem_pool;
mempool_t *nlp_mem_pool;
mempool_t *rrq_pool;
+ mempool_t *active_rrq_pool;
struct fc_host_statistics link_stats;
enum intr_type_t intr_type;
@@ -869,7 +883,6 @@ struct lpfc_hba {
atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
unsigned long last_ramp_down_time;
- unsigned long last_ramp_up_time;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *hba_debugfs_root;
atomic_t debugfs_vport_count;
@@ -971,6 +984,9 @@ struct lpfc_hba {
atomic_t sdev_cnt;
uint8_t fips_spec_rev;
uint8_t fips_level;
+ spinlock_t devicelock; /* lock for luns list */
+ mempool_t *device_data_mem_pool;
+ struct list_head luns;
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 00656fc92b93..8d5b6ceec9c9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -529,6 +529,27 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
+ * (OAS) is supported.
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ phba->sli4_hba.pc_sli4_params.oas_supported);
+}
+
+/**
* lpfc_link_state_store - Transition the link_state on an HBA port
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -2041,9 +2062,53 @@ static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
lpfc_sriov_hw_max_virtfn_show, NULL);
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
+static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
+ NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+#define WWN_SZ 8
+/**
+ * lpfc_wwn_set - Convert string to the 8 byte WWN value.
+ * @buf: WWN string.
+ * @cnt: Length of string.
+ * @wwn: Array to receive converted wwn value.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain a valid wwn
+ * 0 success
+ **/
+static size_t
+lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
+{
+ unsigned int i, j;
+
+ /* Count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+ if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
+ ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+ return -EINVAL;
+
+ memset(wwn, 0, WWN_SZ);
+
+ /* Validate and store the new name */
+ for (i = 0, j = 0; i < 16; i++) {
+ if ((*buf >= 'a') && (*buf <= 'f'))
+ j = ((j << 4) | ((*buf++ - 'a') + 10));
+ else if ((*buf >= 'A') && (*buf <= 'F'))
+ j = ((j << 4) | ((*buf++ - 'A') + 10));
+ else if ((*buf >= '0') && (*buf <= '9'))
+ j = ((j << 4) | (*buf++ - '0'));
+ else
+ return -EINVAL;
+ if (i % 2) {
+ wwn[i/2] = j & 0xff;
+ j = 0;
+ }
+ }
+ return 0;
+}
/**
* lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
* @dev: class device that is converted into a Scsi_host.
@@ -2132,9 +2197,9 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
- int stat1=0, stat2=0;
- unsigned int i, j, cnt=count;
- u8 wwpn[8];
+ int stat1 = 0, stat2 = 0;
+ unsigned int cnt = count;
+ u8 wwpn[WWN_SZ];
int rc;
if (!phba->cfg_enable_hba_reset)
@@ -2149,29 +2214,19 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
if (buf[cnt-1] == '\n')
cnt--;
- if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
- ((cnt == 17) && (*buf++ != 'x')) ||
- ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+ if (!phba->soft_wwn_enable)
return -EINVAL;
+ /* lock setting wwpn, wwnn down */
phba->soft_wwn_enable = 0;
- memset(wwpn, 0, sizeof(wwpn));
-
- /* Validate and store the new name */
- for (i=0, j=0; i < 16; i++) {
- int value;
-
- value = hex_to_bin(*buf++);
- if (value >= 0)
- j = (j << 4) | value;
- else
- return -EINVAL;
- if (i % 2) {
- wwpn[i/2] = j & 0xff;
- j = 0;
- }
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (!rc) {
+ /* not able to set wwpn, unlock it */
+ phba->soft_wwn_enable = 1;
+ return rc;
}
+
phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
fc_host_port_name(shost) = phba->cfg_soft_wwpn;
if (phba->cfg_soft_wwnn)
@@ -2198,7 +2253,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
"reinit adapter - %d\n", stat2);
return (stat1 || stat2) ? -EIO : count;
}
-static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
/**
@@ -2235,39 +2290,25 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- unsigned int i, j, cnt=count;
- u8 wwnn[8];
+ unsigned int cnt = count;
+ u8 wwnn[WWN_SZ];
+ int rc;
/* count may include a LF at end of string */
if (buf[cnt-1] == '\n')
cnt--;
- if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
- ((cnt == 17) && (*buf++ != 'x')) ||
- ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+ if (!phba->soft_wwn_enable)
return -EINVAL;
- /*
- * Allow wwnn to be set many times, as long as the enable is set.
- * However, once the wwpn is set, everything locks.
- */
-
- memset(wwnn, 0, sizeof(wwnn));
-
- /* Validate and store the new name */
- for (i=0, j=0; i < 16; i++) {
- int value;
-
- value = hex_to_bin(*buf++);
- if (value >= 0)
- j = (j << 4) | value;
- else
- return -EINVAL;
- if (i % 2) {
- wwnn[i/2] = j & 0xff;
- j = 0;
- }
+ rc = lpfc_wwn_set(buf, cnt, wwnn);
+ if (!rc) {
+ /* Allow wwnn to be set many times, as long as the enable
+ * is set. However, once the wwpn is set, everything locks.
+ */
+ return rc;
}
+
phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
dev_printk(KERN_NOTICE, &phba->pcidev->dev,
@@ -2276,9 +2317,438 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
+/**
+ * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
+ * Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count
+ **/
+static ssize_t
+lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ wwn_to_u64(phba->cfg_oas_tgt_wwpn));
+}
+
+/**
+ * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
+ * Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ unsigned int cnt = count;
+ uint8_t wwpn[WWN_SZ];
+ int rc;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (rc)
+ return rc;
+
+ memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ if (wwn_to_u64(wwpn) == 0)
+ phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
+ else
+ phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
+ phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+ phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
+ lpfc_oas_tgt_show, lpfc_oas_tgt_store);
+
+/**
+ * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ wwn_to_u64(phba->cfg_oas_vpt_wwpn));
+}
+
+/**
+ * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ unsigned int cnt = count;
+ uint8_t wwpn[WWN_SZ];
+ int rc;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ rc = lpfc_wwn_set(buf, cnt, wwpn);
+ if (rc)
+ return rc;
+
+ memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+ if (wwn_to_u64(wwpn) == 0)
+ phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
+ else
+ phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
+ phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+ phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
+ lpfc_oas_vpt_show, lpfc_oas_vpt_store);
+
+/**
+ * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
+ * of whether luns will be enabled or disabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+}
+
+/**
+ * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
+ * of whether luns will be enabled or disabled
+ * for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ int val = 0;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ phba->cfg_oas_lun_state = val;
+
+ return strlen(buf);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
+ lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
+
+/**
+ * lpfc_oas_lun_status_show - Return the status of the Optimized Access
+ * Storage (OAS) lun returned by the
+ * lpfc_oas_lun_show function.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
+ return -EFAULT;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
+ lpfc_oas_lun_status_show, NULL);
+
+
+/**
+ * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
+ * (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @ndlp: pointer to fcp target node.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the lun.
+ *
+ * Returns:
+ * SUCCESS : 0
+ * -EPERM OAS is not enabled or not supported by this port.
+ *
+ */
+static size_t
+lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state)
+{
+
+ int rc = 0;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (oas_state) {
+ if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn, lun))
+ rc = -ENOMEM;
+ } else {
+ lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn, lun);
+ }
+ return rc;
+
+}
+
+/**
+ * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
+ * Access Storage (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: wwpn of the vport associated with the returned lun
+ * @tgt_wwpn: wwpn of the target associated with the returned lun
+ * @lun_status: status of the lun returned lun
+ *
+ * Returns the first or next lun enabled for OAS operations for the vport/target
+ * specified. If a lun is found, its vport wwpn, target wwpn and status is
+ * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
+ *
+ * Return:
+ * lun that is OAS enabled for the vport/target
+ * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
+ */
+static uint64_t
+lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint32_t *lun_status)
+{
+ uint64_t found_lun;
+
+ if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
+ return NOT_OAS_ENABLED_LUN;
+ if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
+ phba->sli4_hba.oas_next_vpt_wwpn,
+ (struct lpfc_name *)
+ phba->sli4_hba.oas_next_tgt_wwpn,
+ &phba->sli4_hba.oas_next_lun,
+ (struct lpfc_name *)vpt_wwpn,
+ (struct lpfc_name *)tgt_wwpn,
+ &found_lun, lun_status))
+ return found_lun;
+ else
+ return NOT_OAS_ENABLED_LUN;
+}
+
+/**
+ * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: vport wwpn by reference.
+ * @tgt_wwpn: target wwpn by reference.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the oas_lun.
+ *
+ * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
+ * a lun for OAS operations.
+ *
+ * Return:
+ * SUCCESS: 0
+ * -ENOMEM: failed to enable an lun for OAS operations
+ * -EPERM: OAS is not enabled
+ */
+static ssize_t
+lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+ uint8_t tgt_wwpn[], uint64_t lun,
+ uint32_t oas_state)
+{
+
+ int rc;
+
+ rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
+ oas_state);
+ return rc;
+}
+
+/**
+ * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This routine returns a lun enabled for OAS each time the function
+ * is called.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ **/
+static ssize_t
+lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ uint64_t oas_lun;
+ int len = 0;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+ if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
+ return -EFAULT;
+
+ if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+ if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
+ return -EFAULT;
+
+ oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
+ phba->cfg_oas_tgt_wwpn,
+ &phba->cfg_oas_lun_status);
+ if (oas_lun != NOT_OAS_ENABLED_LUN)
+ phba->cfg_oas_flags |= OAS_LUN_VALID;
+
+ len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+
+ return len;
+}
+
+/**
+ * lpfc_oas_lun_store - Sets the OAS state for lun
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This function sets the OAS state for lun. Before this function is called,
+ * the vport wwpn, target wwpn, and oas state need to be set.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ uint64_t scsi_lun;
+ ssize_t rc;
+
+ if (!phba->cfg_EnableXLane)
+ return -EPERM;
+
+ if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+ return -EFAULT;
+
+ if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+ return -EFAULT;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
+ return -EINVAL;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3372 Try to set vport 0x%llx target 0x%llx lun:%lld "
+ "with oas set to %d\n",
+ wwn_to_u64(phba->cfg_oas_vpt_wwpn),
+ wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
+ phba->cfg_oas_lun_state);
+
+ rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
+ phba->cfg_oas_tgt_wwpn, scsi_lun,
+ phba->cfg_oas_lun_state);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
+ lpfc_oas_lun_show, lpfc_oas_lun_store);
static int lpfc_poll = 0;
module_param(lpfc_poll, int, S_IRUGO);
@@ -3818,7 +4288,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_vector_map_info *cpup;
- int idx, len = 0;
+ int len = 0;
if ((phba->sli_rev != LPFC_SLI_REV4) ||
(phba->intr_type != MSIX))
@@ -3846,23 +4316,39 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
break;
}
- cpup = phba->sli4_hba.cpu_map;
- for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+ while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
+ cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
+
+ /* margin should fit in this and the truncated message */
if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
len += snprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d\n",
- idx, cpup->channel_id, cpup->phys_id,
+ phba->sli4_hba.curr_disp_cpu,
+ cpup->channel_id, cpup->phys_id,
cpup->core_id);
else
len += snprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d IRQ %d\n",
- idx, cpup->channel_id, cpup->phys_id,
+ phba->sli4_hba.curr_disp_cpu,
+ cpup->channel_id, cpup->phys_id,
cpup->core_id, cpup->irq);
- cpup++;
+ phba->sli4_hba.curr_disp_cpu++;
+
+ /* display max number of CPUs keeping some margin */
+ if (phba->sli4_hba.curr_disp_cpu <
+ phba->sli4_hba.num_present_cpu &&
+ (len >= (PAGE_SIZE - 64))) {
+ len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
+ break;
+ }
}
+
+ if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
+ phba->sli4_hba.curr_disp_cpu = 0;
+
return len;
}
@@ -4157,6 +4643,21 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
/*
+# lpfc_EnableXLane: Enable Express Lane Feature
+# 0x0 Express Lane Feature disabled
+# 0x1 Express Lane Feature enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
+
+/*
+# lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature
+# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
+# Value range is [0x0,0x7f]. Default value is 0
+*/
+LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
+
+/*
# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
# 0 = BlockGuard disabled (default)
# 1 = BlockGuard enabled
@@ -4317,6 +4818,13 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_soft_wwn_enable,
&dev_attr_lpfc_enable_hba_reset,
&dev_attr_lpfc_enable_hba_heartbeat,
+ &dev_attr_lpfc_EnableXLane,
+ &dev_attr_lpfc_XLanePriority,
+ &dev_attr_lpfc_xlane_lun,
+ &dev_attr_lpfc_xlane_tgt,
+ &dev_attr_lpfc_xlane_vpt,
+ &dev_attr_lpfc_xlane_lun_state,
+ &dev_attr_lpfc_xlane_lun_status,
&dev_attr_lpfc_sg_seg_cnt,
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
@@ -4335,6 +4843,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_dss,
&dev_attr_lpfc_sriov_hw_max_virtfn,
&dev_attr_protocol,
+ &dev_attr_lpfc_xlane_supported,
NULL,
};
@@ -5296,11 +5805,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+ lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ phba->cfg_EnableXLane = 0;
+ lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
+ memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
+ memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
+ phba->cfg_oas_lun_state = 0;
+ phba->cfg_oas_lun_status = 0;
+ phba->cfg_oas_flags = 0;
lpfc_enable_bg_init(phba, lpfc_enable_bg);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_poll = 0;
else
- phba->cfg_poll = lpfc_poll;
+ phba->cfg_poll = lpfc_poll;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 82134d20e2d8..ca2f4ea7cdef 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -4153,6 +4153,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
switch (opcode) {
case FCOE_OPCODE_READ_FCF:
+ case FCOE_OPCODE_GET_DPORT_RESULTS:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2957 Handled SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
@@ -4161,6 +4162,8 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
nemb_mse, dmabuf);
break;
case FCOE_OPCODE_ADD_FCF:
+ case FCOE_OPCODE_SET_DPORT_MODE:
+ case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2958 Handled SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 67f7d0a160d1..a94d4c9dfaa5 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -231,6 +231,8 @@ struct lpfc_sli_config_emb0_subsys {
#define SLI_CONFIG_SUBSYS_FCOE 0x0C
#define FCOE_OPCODE_READ_FCF 0x08
#define FCOE_OPCODE_ADD_FCF 0x09
+#define FCOE_OPCODE_SET_DPORT_MODE 0x27
+#define FCOE_OPCODE_GET_DPORT_RESULTS 0x28
};
struct lpfc_sli_config_emb1_subsys {
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index cda076a84239..adda0bf7a244 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -187,6 +187,11 @@ void lpfc_offline_prep(struct lpfc_hba *, int);
void lpfc_offline(struct lpfc_hba *);
void lpfc_reset_hba(struct lpfc_hba *);
+int lpfc_fof_queue_create(struct lpfc_hba *);
+int lpfc_fof_queue_setup(struct lpfc_hba *);
+int lpfc_fof_queue_destroy(struct lpfc_hba *);
+irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
+
int lpfc_sli_setup(struct lpfc_hba *);
int lpfc_sli_queue_setup(struct lpfc_hba *);
@@ -242,6 +247,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
+int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *);
void lpfc_mem_free_all(struct lpfc_hba *);
void lpfc_stop_vport_timers(struct lpfc_vport *);
@@ -399,7 +405,6 @@ void lpfc_fabric_block_timeout(unsigned long);
void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
void lpfc_rampdown_queue_depth(struct lpfc_hba *);
void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
-void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
void lpfc_scsi_dev_block(struct lpfc_hba *);
void
@@ -471,3 +476,20 @@ void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
void lpfc_sli4_offline_eratt(struct lpfc_hba *);
+
+struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
+ struct lpfc_name *,
+ struct lpfc_name *,
+ uint64_t, bool);
+void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
+struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
+ struct list_head *list,
+ struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t);
+bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t *, struct lpfc_name *,
+ struct lpfc_name *, uint64_t *, uint32_t *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index b800cc952ca6..828c08e9389e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2280,6 +2280,104 @@ proc_cq:
}
}
+ if (phba->cfg_fof) {
+ /* FOF EQ */
+ qp = phba->sli4_hba.fof_eq;
+ if (!qp)
+ goto out;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\nFOF EQ info: "
+ "EQ-STAT[max:x%x noE:x%x "
+ "bs:x%x proc:x%llx]\n",
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "EQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ /* Reset max counter */
+ qp->EQ_max_eqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ if (phba->cfg_EnableXLane) {
+
+ /* OAS CQ */
+ qp = phba->sli4_hba.oas_cq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tOAS CQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocEQID[%02d]: "
+ "CQ STAT[max:x%x relw:x%x "
+ "xabt:x%x wq:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tCQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+
+ /* OAS WQ */
+ qp = phba->sli4_hba.oas_wq;
+ if (qp) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tOAS WQ info: ");
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "AssocCQID[%02d]: "
+ "WQ-STAT[oflow:x%x posted:x%llx]\n",
+ qp->assoc_qid,
+ qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tWQID[%02d], "
+ "QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id,
+ qp->entry_count,
+ qp->entry_size,
+ qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ if (len >= max_cnt)
+ goto too_big;
+ }
+ }
+out:
spin_unlock_irq(&phba->hbalock);
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
@@ -3927,6 +4025,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
char name[64];
uint32_t num, i;
+ bool pport_setup = false;
if (!lpfc_debugfs_enable)
return;
@@ -3947,6 +4046,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
/* Setup funcX directory for specific HBA PCI function */
snprintf(name, sizeof(name), "fn%d", phba->brd_no);
if (!phba->hba_debugfs_root) {
+ pport_setup = true;
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
if (!phba->hba_debugfs_root) {
@@ -4239,6 +4339,14 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
/*
+ * The following section is for additional directories/files for the
+ * physical port.
+ */
+
+ if (!pport_setup)
+ goto debug_failed;
+
+ /*
* iDiag debugfs root entry points for SLI4 device only
*/
if (phba->sli_rev < LPFC_SLI_REV4)
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index e409ba5f728c..1a6fe524940d 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -116,7 +116,7 @@ struct lpfc_nodelist {
atomic_t cmd_pending;
uint32_t cmd_qdepth;
unsigned long last_change_time;
- struct lpfc_node_rrqs active_rrqs;
+ unsigned long *active_rrqs_xri_bitmap;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
};
struct lpfc_node_rrq {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 110445f0c58d..624fe0b3cc0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1516,7 +1516,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
uint32_t rc, keepDID = 0;
int put_node;
int put_rport;
- struct lpfc_node_rrqs rrq;
+ unsigned long *active_rrqs_xri_bitmap = NULL;
/* Fabric nodes can have the same WWPN so we don't bother searching
* by WWPN. Just return the ndlp that was given to us.
@@ -1534,7 +1534,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
return ndlp;
- memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
+ GFP_KERNEL);
+ if (active_rrqs_xri_bitmap)
+ memset(active_rrqs_xri_bitmap, 0,
+ phba->cfg_rrq_xri_bitmap_sz);
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
@@ -1543,41 +1549,58 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
if (!new_ndlp) {
rc = memcmp(&ndlp->nlp_portname, name,
sizeof(struct lpfc_name));
- if (!rc)
+ if (!rc) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
- if (!new_ndlp)
+ if (!new_ndlp) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
rc = memcmp(&ndlp->nlp_portname, name,
sizeof(struct lpfc_name));
- if (!rc)
+ if (!rc) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
new_ndlp = lpfc_enable_node(vport, new_ndlp,
NLP_STE_UNUSED_NODE);
- if (!new_ndlp)
+ if (!new_ndlp) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return ndlp;
+ }
keepDID = new_ndlp->nlp_DID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&rrq.xri_bitmap,
- &new_ndlp->active_rrqs.xri_bitmap,
- sizeof(new_ndlp->active_rrqs.xri_bitmap));
+ if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap,
+ new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
} else {
keepDID = new_ndlp->nlp_DID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&rrq.xri_bitmap,
- &new_ndlp->active_rrqs.xri_bitmap,
- sizeof(new_ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap,
+ new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
}
lpfc_unreg_rpi(vport, new_ndlp);
new_ndlp->nlp_DID = ndlp->nlp_DID;
new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(new_ndlp->active_rrqs.xri_bitmap,
- &ndlp->active_rrqs.xri_bitmap,
- sizeof(ndlp->active_rrqs.xri_bitmap));
+ memcpy(new_ndlp->active_rrqs_xri_bitmap,
+ ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -1619,10 +1642,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did on the nodelist */
ndlp->nlp_DID = keepDID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&ndlp->active_rrqs.xri_bitmap,
- &rrq.xri_bitmap,
- sizeof(ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(ndlp->active_rrqs_xri_bitmap,
+ active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
lpfc_drop_node(vport, ndlp);
}
else {
@@ -1634,10 +1658,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did */
ndlp->nlp_DID = keepDID;
- if (phba->sli_rev == LPFC_SLI_REV4)
- memcpy(&ndlp->active_rrqs.xri_bitmap,
- &rrq.xri_bitmap,
- sizeof(ndlp->active_rrqs.xri_bitmap));
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(ndlp->active_rrqs_xri_bitmap,
+ active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
/* Since we are swapping the ndlp passed in with the new one
* and the did has already been swapped, copy over state.
@@ -1668,6 +1693,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
put_device(&rport->dev);
}
}
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
return new_ndlp;
}
@@ -2772,6 +2801,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
*/
+
lpfc_nlp_put(ndlp);
return 0;
}
@@ -6193,11 +6223,11 @@ lpfc_els_timeout(unsigned long ptr)
spin_lock_irqsave(&vport->work_port_lock, iflag);
tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
- if (!tmo_posted)
+ if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
vport->work_port_events |= WORKER_ELS_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
- if (!tmo_posted)
+ if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
lpfc_worker_wake_up(phba);
return;
}
@@ -6223,19 +6253,26 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
uint32_t els_command = 0;
uint32_t timeout;
uint32_t remote_ID = 0xffffffff;
- LIST_HEAD(txcmplq_completions);
LIST_HEAD(abort_list);
timeout = (uint32_t)(phba->fc_ratov << 1);
pring = &phba->sli.ring[LPFC_ELS_RING];
-
+ if ((phba->pport->load_flag & FC_UNLOADING))
+ return;
spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txcmplq, &txcmplq_completions);
- spin_unlock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ if ((phba->pport->load_flag & FC_UNLOADING)) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
- list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
cmd = &piocb->iocb;
if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
@@ -6274,11 +6311,12 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
}
list_add_tail(&piocb->dlist, &abort_list);
}
- spin_lock_irq(&phba->hbalock);
- list_splice(&txcmplq_completions, &pring->txcmplq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+ cmd = &piocb->iocb;
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0127 ELS timeout Data: x%x x%x x%x "
"x%x\n", els_command,
@@ -6290,8 +6328,9 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
}
if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
- mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * timeout));
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ mod_timer(&vport->els_tmofunc,
+ jiffies + msecs_to_jiffies(1000 * timeout));
}
/**
@@ -6317,15 +6356,50 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
void
lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
- LIST_HEAD(completions);
+ LIST_HEAD(abort_list);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
lpfc_fabric_abort_vport(vport);
+ /*
+ * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
+ * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
+ * ultimately grabs the ring_lock, the driver must splice the list into
+ * a working list and release the locks before calling the abort.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+ continue;
+
+ if (piocb->vport != vport)
+ continue;
+ list_add_tail(&piocb->dlist, &abort_list);
+ }
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+ /* Abort each iocb on the aborted list and remove the dlist links. */
+ list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&piocb->dlist);
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ spin_unlock_irq(&phba->hbalock);
+ }
+ if (!list_empty(&abort_list))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "3387 abort list for txq not empty\n");
+ INIT_LIST_HEAD(&abort_list);
spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
cmd = &piocb->iocb;
@@ -6343,24 +6417,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (piocb->vport != vport)
continue;
- list_move_tail(&piocb->list, &completions);
- }
-
- list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
- continue;
- }
-
- if (piocb->vport != vport)
- continue;
-
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ list_del_init(&piocb->list);
+ list_add_tail(&piocb->list, &abort_list);
}
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
/* Cancell all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ lpfc_sli_cancel_iocbs(phba, &abort_list,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
return;
}
@@ -6385,35 +6451,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
void
lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
{
- LIST_HEAD(completions);
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
- struct lpfc_iocbq *tmp_iocb, *piocb;
- IOCB_t *cmd = NULL;
-
- lpfc_fabric_abort_hba(phba);
- spin_lock_irq(&phba->hbalock);
- list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
- cmd = &piocb->iocb;
- if (piocb->iocb_flag & LPFC_IO_LIBDFC)
- continue;
- /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
- if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
- cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
- cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- cmd->ulpCommand == CMD_ABORT_XRI_CN)
- continue;
- list_move_tail(&piocb->list, &completions);
- }
- list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->iocb_flag & LPFC_IO_LIBDFC)
- continue;
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
- }
- spin_unlock_irq(&phba->hbalock);
-
- /* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ struct lpfc_vport *vport;
+ list_for_each_entry(vport, &phba->port_list, listentry)
+ lpfc_els_flush_cmd(vport);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 883ea2d9f237..59b51c529ba0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -674,8 +674,6 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_fdmi_timeout_handler(vport);
if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
lpfc_ramp_down_queue_handler(phba);
- if (work_port_events & WORKER_RAMP_UP_QUEUE)
- lpfc_ramp_up_queue_handler(phba);
if (work_port_events & WORKER_DELAYED_DISC_TMO)
lpfc_delayed_disc_timeout_handler(vport);
}
@@ -2545,8 +2543,11 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (!new_fcf_record) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2766 Mailbox command READ_FCF_RECORD "
- "failed to retrieve a FCF record.\n");
- goto error_out;
+ "failed to retrieve a FCF record. "
+ "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
+ phba->fcf.fcf_flag);
+ lpfc_unregister_fcf_rescan(phba);
+ goto out;
}
/* Get the needed parameters from FCF record */
@@ -3973,7 +3974,10 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
vport->fc_map_cnt += count;
break;
case NLP_STE_NPR_NODE:
- vport->fc_npr_cnt += count;
+ if (vport->fc_npr_cnt == 0 && count == -1)
+ vport->fc_npr_cnt = 0;
+ else
+ vport->fc_npr_cnt += count;
break;
}
spin_unlock_irq(shost->host_lock);
@@ -4180,6 +4184,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_hba *phba = vport->phba;
uint32_t did;
unsigned long flags;
+ unsigned long *active_rrqs_xri_bitmap = NULL;
if (!ndlp)
return NULL;
@@ -4208,12 +4213,17 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Keep the original DID */
did = ndlp->nlp_DID;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
/* re-initialize ndlp except of ndlp linked list pointer */
memset((((char *)ndlp) + sizeof (struct list_head)), 0,
sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
lpfc_initialize_node(vport, ndlp, did);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
+
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
if (vport->phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
@@ -4799,9 +4809,10 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
((uint32_t) ndlp->nlp_rpi & 0xff));
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0929 FIND node DID "
- "Data: x%p x%x x%x x%x\n",
+ "Data: x%p x%x x%x x%x %p\n",
ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag, data1);
+ ndlp->nlp_flag, data1,
+ ndlp->active_rrqs_xri_bitmap);
return ndlp;
}
}
@@ -5618,8 +5629,13 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ ndlp->active_rrqs_xri_bitmap =
+ mempool_alloc(vport->phba->active_rrq_pool,
+ GFP_KERNEL);
+ }
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
@@ -5664,6 +5680,9 @@ lpfc_nlp_release(struct kref *kref)
/* free ndlp memory for final ndlp release */
if (NLP_CHK_FREE_REQ(ndlp)) {
kfree(ndlp->lat_data);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mempool_free(ndlp->active_rrqs_xri_bitmap,
+ ndlp->phba->active_rrq_pool);
mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
}
}
@@ -6170,10 +6189,6 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
memcpy(&conn_entry->conn_rec, &conn_rec[i],
sizeof(struct lpfc_fcf_conn_rec));
- conn_entry->conn_rec.vlan_tag =
- conn_entry->conn_rec.vlan_tag;
- conn_entry->conn_rec.flags =
- conn_entry->conn_rec.flags;
list_add_tail(&conn_entry->list,
&phba->fcf_conn_rec_list);
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 6f927d30ca69..3d9438ce59ab 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -45,6 +45,7 @@
#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
#define LPFC_FCP_NEXT_RING 3
+#define LPFC_FCP_OAS_RING 3
#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 5464b116d328..fd79f7de7666 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2616,6 +2616,9 @@ struct lpfc_sli4_parameters {
#define cfg_phwq_SHIFT 15
#define cfg_phwq_MASK 0x00000001
#define cfg_phwq_WORD word12
+#define cfg_oas_SHIFT 25
+#define cfg_oas_MASK 0x00000001
+#define cfg_oas_WORD word12
#define cfg_loopbk_scope_SHIFT 28
#define cfg_loopbk_scope_MASK 0x0000000f
#define cfg_loopbk_scope_WORD word12
@@ -3322,6 +3325,9 @@ struct wqe_common {
#define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f
#define wqe_ebde_cnt_WORD word10
+#define wqe_oas_SHIFT 6
+#define wqe_oas_MASK 0x00000001
+#define wqe_oas_WORD word10
#define wqe_lenloc_SHIFT 7
#define wqe_lenloc_MASK 0x00000003
#define wqe_lenloc_WORD word10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 68c94cc85c35..635eeb3d6987 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -80,6 +80,7 @@ static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
static void lpfc_sli4_disable_intr(struct lpfc_hba *);
static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
+static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1005,9 +1006,14 @@ lpfc_rrq_timeout(unsigned long ptr)
phba = (struct lpfc_hba *)ptr;
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
- phba->hba_flag |= HBA_RRQ_ACTIVE;
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
+ else
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
- lpfc_worker_wake_up(phba);
+
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_worker_wake_up(phba);
}
/**
@@ -1468,7 +1474,8 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
* for handling possible port resource change.
**/
static int
-lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
+lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
+ bool en_rn_msg)
{
int rc;
uint32_t intr_mode;
@@ -1480,9 +1487,10 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
rc = lpfc_sli4_pdev_status_reg_wait(phba);
if (!rc) {
/* need reset: attempt for port recovery */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2887 Reset Needed: Attempting Port "
- "Recovery...\n");
+ if (en_rn_msg)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2887 Reset Needed: Attempting Port "
+ "Recovery...\n");
lpfc_offline_prep(phba, mbx_action);
lpfc_offline(phba);
/* release interrupt for possible resource change */
@@ -1522,6 +1530,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
uint32_t reg_err1, reg_err2;
uint32_t uerrlo_reg, uemasklo_reg;
uint32_t pci_rd_rc1, pci_rd_rc2;
+ bool en_rn_msg = true;
int rc;
/* If the pci channel is offline, ignore possible errors, since
@@ -1572,10 +1581,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
break;
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
- reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+ reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3143 Port Down: Firmware Restarted\n");
- else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ "3143 Port Down: Firmware Update "
+ "Detected\n");
+ en_rn_msg = false;
+ } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3144 Port Down: Debug Dump\n");
@@ -1585,7 +1596,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
"3145 Port Down: Provisioning\n");
/* Check port status register for function reset */
- rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT);
+ rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
+ en_rn_msg);
if (rc == 0) {
/* don't report event on forced debug dump */
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
@@ -4856,6 +4868,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe;
int longs;
+ int fof_vectors = 0;
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -5061,6 +5074,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = lpfc_sli4_read_config(phba);
if (unlikely(rc))
goto out_free_bsmbx;
+ rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
+ if (unlikely(rc))
+ goto out_free_bsmbx;
/* IF Type 0 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
@@ -5118,6 +5134,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
}
mempool_free(mboxq, phba->mbox_mem_pool);
+
+ /* Verify OAS is supported */
+ lpfc_sli4_oas_verify(phba);
+ if (phba->cfg_fof)
+ fof_vectors = 1;
+
/* Verify all the SLI4 queues */
rc = lpfc_sli4_queue_verify(phba);
if (rc)
@@ -5159,7 +5181,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.fcp_eq_hdl =
kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
- phba->cfg_fcp_io_channel), GFP_KERNEL);
+ (fof_vectors + phba->cfg_fcp_io_channel)),
+ GFP_KERNEL);
if (!phba->sli4_hba.fcp_eq_hdl) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for "
@@ -5169,7 +5192,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
- phba->cfg_fcp_io_channel), GFP_KERNEL);
+ (fof_vectors +
+ phba->cfg_fcp_io_channel)), GFP_KERNEL);
if (!phba->sli4_hba.msix_entries) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2573 Failed allocate memory for msi-x "
@@ -5267,6 +5291,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
kfree(phba->sli4_hba.cpu_map);
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.num_online_cpu = 0;
+ phba->sli4_hba.curr_disp_cpu = 0;
/* Free memory allocated for msi-x interrupt vector entries */
kfree(phba->sli4_hba.msix_entries);
@@ -5390,6 +5415,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize FCF connection rec list */
INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+ /* Initialize OAS configuration list */
+ spin_lock_init(&phba->devicelock);
+ INIT_LIST_HEAD(&phba->luns);
+
return 0;
}
@@ -6816,6 +6845,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
int cfg_fcp_io_channel;
uint32_t cpu;
uint32_t i = 0;
+ int fof_vectors = phba->cfg_fof ? 1 : 0;
/*
* Sanity check for configured queue parameters against the run-time
@@ -6832,6 +6862,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
}
phba->sli4_hba.num_online_cpu = i;
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
+ phba->sli4_hba.curr_disp_cpu = 0;
if (i < cfg_fcp_io_channel) {
lpfc_printf_log(phba,
@@ -6842,7 +6873,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
cfg_fcp_io_channel = i;
}
- if (cfg_fcp_io_channel >
+ if (cfg_fcp_io_channel + fof_vectors >
phba->sli4_hba.max_cfg_param.max_eq) {
if (phba->sli4_hba.max_cfg_param.max_eq <
LPFC_FCP_IO_CHAN_MIN) {
@@ -6859,7 +6890,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
"available EQs: from %d to %d\n",
cfg_fcp_io_channel,
phba->sli4_hba.max_cfg_param.max_eq);
- cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
+ cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
+ fof_vectors;
}
/* The actual number of FCP event queues adopted */
@@ -7070,6 +7102,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
phba->sli4_hba.dat_rq = qdesc;
+ /* Create the Queues needed for Flash Optimized Fabric operations */
+ if (phba->cfg_fof)
+ lpfc_fof_queue_create(phba);
return 0;
out_error:
@@ -7094,6 +7129,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
{
int idx;
+ if (phba->cfg_fof)
+ lpfc_fof_queue_destroy(phba);
+
if (phba->sli4_hba.hba_eq != NULL) {
/* Release HBA event queue */
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
@@ -7478,8 +7516,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.hdr_rq->queue_id,
phba->sli4_hba.dat_rq->queue_id,
phba->sli4_hba.els_cq->queue_id);
+
+ if (phba->cfg_fof) {
+ rc = lpfc_fof_queue_setup(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0549 Failed setup of FOF Queues: "
+ "rc = 0x%x\n", rc);
+ goto out_destroy_els_rq;
+ }
+ }
return 0;
+out_destroy_els_rq:
+ lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
out_destroy_els_wq:
lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
out_destroy_mbx_wq:
@@ -7518,6 +7568,9 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
{
int fcp_qidx;
+ /* Unset the queues created for Flash Optimized Fabric operations */
+ if (phba->cfg_fof)
+ lpfc_fof_queue_destroy(phba);
/* Unset mailbox command work queue */
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
/* Unset ELS work queue */
@@ -8635,6 +8688,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Configure MSI-X capability structure */
vectors = phba->cfg_fcp_io_channel;
+ if (phba->cfg_fof) {
+ phba->sli4_hba.msix_entries[index].entry = index;
+ vectors++;
+ }
enable_msix_vectors:
rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
vectors);
@@ -8664,7 +8721,15 @@ enable_msix_vectors:
phba->sli4_hba.fcp_eq_hdl[index].idx = index;
phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
- rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
+ if (phba->cfg_fof && (index == (vectors - 1)))
+ rc = request_irq(
+ phba->sli4_hba.msix_entries[index].vector,
+ &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
+ (char *)&phba->sli4_hba.handler_name[index],
+ &phba->sli4_hba.fcp_eq_hdl[index]);
+ else
+ rc = request_irq(
+ phba->sli4_hba.msix_entries[index].vector,
&lpfc_sli4_hba_intr_handler, IRQF_SHARED,
(char *)&phba->sli4_hba.handler_name[index],
&phba->sli4_hba.fcp_eq_hdl[index]);
@@ -8676,6 +8741,9 @@ enable_msix_vectors:
}
}
+ if (phba->cfg_fof)
+ vectors--;
+
if (vectors != phba->cfg_fcp_io_channel) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3238 Reducing IO channels to match number of "
@@ -8721,7 +8789,10 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
free_irq(phba->sli4_hba.msix_entries[index].vector,
&phba->sli4_hba.fcp_eq_hdl[index]);
}
-
+ if (phba->cfg_fof) {
+ free_irq(phba->sli4_hba.msix_entries[index].vector,
+ &phba->sli4_hba.fcp_eq_hdl[index]);
+ }
/* Disable MSI-X */
pci_disable_msix(phba->pcidev);
@@ -8771,6 +8842,10 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
}
+ if (phba->cfg_fof) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ }
return 0;
}
@@ -8853,6 +8928,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
fcp_eq_in_use, 1);
}
+ if (phba->cfg_fof) {
+ phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+ phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
+ fcp_eq_in_use, 1);
+ }
}
}
return intr_mode;
@@ -9163,6 +9244,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+ sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
@@ -10796,6 +10878,169 @@ lpfc_io_resume(struct pci_dev *pdev)
return;
}
+/**
+ * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if OAS is supported for this adapter. If
+ * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
+ * the enable oas flag is cleared and the pool created for OAS device data
+ * is destroyed.
+ *
+ **/
+void
+lpfc_sli4_oas_verify(struct lpfc_hba *phba)
+{
+
+ if (!phba->cfg_EnableXLane)
+ return;
+
+ if (phba->sli4_hba.pc_sli4_params.oas_supported) {
+ phba->cfg_fof = 1;
+ } else {
+ phba->cfg_EnableXLane = 0;
+ if (phba->device_data_mem_pool)
+ mempool_destroy(phba->device_data_mem_pool);
+ phba->device_data_mem_pool = NULL;
+ }
+
+ return;
+}
+
+/**
+ * lpfc_fof_queue_setup - Set up all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the fof queues for the FC HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No available memory
+ **/
+int
+lpfc_fof_queue_setup(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ int rc;
+
+ rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
+ if (rc)
+ return -ENOMEM;
+
+ if (phba->cfg_EnableXLane) {
+
+ rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
+ phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
+ if (rc)
+ goto out_oas_cq;
+
+ rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
+ phba->sli4_hba.oas_cq, LPFC_FCP);
+ if (rc)
+ goto out_oas_wq;
+
+ phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
+ phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
+ }
+
+ return 0;
+
+out_oas_wq:
+ if (phba->cfg_EnableXLane)
+ lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
+out_oas_cq:
+ lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
+ return rc;
+
+}
+
+/**
+ * lpfc_fof_queue_create - Create all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the fof queues for the FC HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - No availble memory
+ * -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_fof_queue_create(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *qdesc;
+
+ /* Create FOF EQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ phba->sli4_hba.eq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.fof_eq = qdesc;
+
+ if (phba->cfg_EnableXLane) {
+
+ /* Create OAS CQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.oas_cq = qdesc;
+
+ /* Create OAS WQ */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ if (!qdesc)
+ goto out_error;
+
+ phba->sli4_hba.oas_wq = qdesc;
+
+ }
+ return 0;
+
+out_error:
+ lpfc_fof_queue_destroy(phba);
+ return -ENOMEM;
+}
+
+/**
+ * lpfc_fof_queue_destroy - Destroy all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FC HBA
+ * operation.
+ *
+ * Return codes
+ * 0 - successful
+ **/
+int
+lpfc_fof_queue_destroy(struct lpfc_hba *phba)
+{
+ /* Release FOF Event queue */
+ if (phba->sli4_hba.fof_eq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
+ phba->sli4_hba.fof_eq = NULL;
+ }
+
+ /* Release OAS Completion queue */
+ if (phba->sli4_hba.oas_cq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
+ phba->sli4_hba.oas_cq = NULL;
+ }
+
+ /* Release OAS Work queue */
+ if (phba->sli4_hba.oas_wq != NULL) {
+ lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
+ phba->sli4_hba.oas_wq = NULL;
+ }
+ return 0;
+}
+
static struct pci_device_id lpfc_id_table[] = {
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
PCI_ANY_ID, PCI_ANY_ID, },
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 812d0cd7c86d..ed419aad2b1f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -38,10 +38,29 @@
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
+#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
+int
+lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
+ size_t bytes;
+ int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+ if (max_xri <= 0)
+ return -ENOMEM;
+ bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
+ sizeof(unsigned long);
+ phba->cfg_rrq_xri_bitmap_sz = bytes;
+ phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+ bytes);
+ if (!phba->active_rrq_pool)
+ return -ENOMEM;
+ else
+ return 0;
+}
/**
* lpfc_mem_alloc - create and allocate all PCI and memory pools
@@ -146,6 +165,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
phba->lpfc_drb_pool = NULL;
}
+ if (phba->cfg_EnableXLane) {
+ phba->device_data_mem_pool = mempool_create_kmalloc_pool(
+ LPFC_DEVICE_DATA_POOL_SIZE,
+ sizeof(struct lpfc_device_data));
+ if (!phba->device_data_mem_pool)
+ goto fail_free_hrb_pool;
+ } else {
+ phba->device_data_mem_pool = NULL;
+ }
+
return 0;
fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
@@ -188,6 +217,7 @@ lpfc_mem_free(struct lpfc_hba *phba)
{
int i;
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ struct lpfc_device_data *device_data;
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
@@ -209,6 +239,10 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free NLP memory pool */
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
+ mempool_destroy(phba->active_rrq_pool);
+ phba->active_rrq_pool = NULL;
+ }
/* Free mbox memory pool */
mempool_destroy(phba->mbox_mem_pool);
@@ -227,6 +261,19 @@ lpfc_mem_free(struct lpfc_hba *phba)
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
phba->lpfc_scsi_dma_buf_pool = NULL;
+ /* Free Device Data memory pool */
+ if (phba->device_data_mem_pool) {
+ /* Ensure all objects have been returned to the pool */
+ while (!list_empty(&phba->luns)) {
+ device_data = list_first_entry(&phba->luns,
+ struct lpfc_device_data,
+ listentry);
+ list_del(&device_data->listentry);
+ mempool_free(device_data, phba->device_data_mem_pool);
+ }
+ mempool_destroy(phba->device_data_mem_pool);
+ }
+ phba->device_data_mem_pool = NULL;
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index abc361259d6d..c342f6afd747 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -203,8 +203,6 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int
lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
- LIST_HEAD(completions);
- LIST_HEAD(txcmplq_completions);
LIST_HEAD(abort_list);
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
@@ -216,32 +214,27 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
-
+ /* Clean up all fabric IOs first.*/
lpfc_fabric_abort_nport(ndlp);
- /* First check the txq */
+ /*
+ * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
+ * of all ELS IOs that need an ABTS. The IOs need to stay on the
+ * txcmplq so that the abort operation completes them successfully.
+ */
spin_lock_irq(&phba->hbalock);
- list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
- /* Check to see if iocb matches the nport we are looking for */
- if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
- /* It matches, so deque and call compl with anp error */
- list_move_tail(&iocb->list, &completions);
- }
- }
-
- /* Next check the txcmplq */
- list_splice_init(&pring->txcmplq, &txcmplq_completions);
- spin_unlock_irq(&phba->hbalock);
-
- list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
- /* Check to see if iocb matches the nport we are looking for */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+ /* Add to abort_list on on NDLP match. */
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
list_add_tail(&iocb->dlist, &abort_list);
}
- spin_lock_irq(&phba->hbalock);
- list_splice(&txcmplq_completions, &pring->txcmplq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
+ /* Abort the targeted IOs and remove them from the abort list. */
list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
spin_lock_irq(&phba->hbalock);
list_del_init(&iocb->dlist);
@@ -249,9 +242,28 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&phba->hbalock);
}
+ INIT_LIST_HEAD(&abort_list);
+
+ /* Now process the txq */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock(&pring->ring_lock);
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ /* Check to see if iocb matches the nport we are looking for */
+ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
+ list_del_init(&iocb->list);
+ list_add_tail(&iocb->list, &abort_list);
+ }
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+
/* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ lpfc_sli_cancel_iocbs(phba, &abort_list,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b2ede05a5f0a..462453ee0bda 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -68,6 +68,17 @@ struct scsi_dif_tuple {
__be32 ref_tag; /* Target LBA or indirect LBA */
};
+static struct lpfc_rport_data *
+lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
+
+ if (vport->phba->cfg_EnableXLane)
+ return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
+ else
+ return (struct lpfc_rport_data *)sdev->hostdata;
+}
+
static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
static void
@@ -304,9 +315,27 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
unsigned long new_queue_depth, old_queue_depth;
old_queue_depth = sdev->queue_depth;
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+
+ switch (reason) {
+ case SCSI_QDEPTH_DEFAULT:
+ /* change request from sysfs, fall through */
+ case SCSI_QDEPTH_RAMP_UP:
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+ break;
+ case SCSI_QDEPTH_QFULL:
+ if (scsi_track_queue_full(sdev, qdepth) == 0)
+ return sdev->queue_depth;
+
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "0711 detected queue full - lun queue "
+ "depth adjusted to %d.\n", sdev->queue_depth);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
new_queue_depth = sdev->queue_depth;
- rdata = sdev->hostdata;
+ rdata = lpfc_rport_data_from_scsi_device(sdev);
if (rdata)
lpfc_send_sdev_queuedepth_change_event(phba, vport,
rdata->pnode, sdev->lun,
@@ -377,50 +406,6 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
}
/**
- * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
- * @phba: The Hba for which this call is being executed.
- *
- * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
- * post at most 1 event every 5 minute after last_ramp_up_time or
- * last_rsrc_error_time. This routine wakes up worker thread of @phba
- * to process WORKER_RAM_DOWN_EVENT event.
- *
- * This routine should be called with no lock held.
- **/
-static inline void
-lpfc_rampup_queue_depth(struct lpfc_vport *vport,
- uint32_t queue_depth)
-{
- unsigned long flags;
- struct lpfc_hba *phba = vport->phba;
- uint32_t evt_posted;
- atomic_inc(&phba->num_cmd_success);
-
- if (vport->cfg_lun_queue_depth <= queue_depth)
- return;
- spin_lock_irqsave(&phba->hbalock, flags);
- if (time_before(jiffies,
- phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
- time_before(jiffies,
- phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return;
- }
- phba->last_ramp_up_time = jiffies;
- spin_unlock_irqrestore(&phba->hbalock, flags);
-
- spin_lock_irqsave(&phba->pport->work_port_lock, flags);
- evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
- if (!evt_posted)
- phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
- spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
-
- if (!evt_posted)
- lpfc_worker_wake_up(phba);
- return;
-}
-
-/**
* lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
* @phba: The Hba for which this call is being executed.
*
@@ -472,41 +457,6 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
}
/**
- * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
- * @phba: The Hba for which this call is being executed.
- *
- * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
- * thread.This routine increases queue depth for all scsi device on each vport
- * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
- * num_cmd_success to zero.
- **/
-void
-lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
-{
- struct lpfc_vport **vports;
- struct Scsi_Host *shost;
- struct scsi_device *sdev;
- int i;
-
- vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL)
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- shost_for_each_device(sdev, shost) {
- if (vports[i]->cfg_lun_queue_depth <=
- sdev->queue_depth)
- continue;
- lpfc_change_queue_depth(sdev,
- sdev->queue_depth+1,
- SCSI_QDEPTH_RAMP_UP);
- }
- }
- lpfc_destroy_vport_work_array(phba, vports);
- atomic_set(&phba->num_rsrc_err, 0);
- atomic_set(&phba->num_cmd_success, 0);
-}
-
-/**
* lpfc_scsi_dev_block - set all scsi hosts to block state
* @phba: Pointer to HBA context object.
*
@@ -1502,7 +1452,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
/* Next check if we need to match the remote NPortID or WWPN */
- rdata = sc->device->hostdata;
+ rdata = lpfc_rport_data_from_scsi_device(sc->device);
if (rdata && rdata->pnode) {
ndlp = rdata->pnode;
@@ -3507,6 +3457,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+
+ /*
+ * If the OAS driver feature is enabled and the lun is enabled for
+ * OAS, set the oas iocb related flags.
+ */
+ if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *)
+ scsi_cmnd->device->hostdata)->oas_enabled)
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
return 0;
}
@@ -4021,7 +3979,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_nodelist *pnode = rdata->pnode;
struct scsi_cmnd *cmd;
int result;
- struct scsi_device *tmp_sdev;
int depth;
unsigned long flags;
struct lpfc_fast_path_event *fast_path_evt;
@@ -4266,32 +4223,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
return;
}
- if (!result)
- lpfc_rampup_queue_depth(vport, queue_depth);
-
- /*
- * Check for queue full. If the lun is reporting queue full, then
- * back off the lun queue depth to prevent target overloads.
- */
- if (result == SAM_STAT_TASK_SET_FULL && pnode &&
- NLP_CHK_NODE_ACT(pnode)) {
- shost_for_each_device(tmp_sdev, shost) {
- if (tmp_sdev->id != scsi_id)
- continue;
- depth = scsi_track_queue_full(tmp_sdev,
- tmp_sdev->queue_depth-1);
- if (depth <= 0)
- continue;
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
- "0711 detected queue full - lun queue "
- "depth adjusted to %d.\n", depth);
- lpfc_send_sdev_queuedepth_change_event(phba, vport,
- pnode,
- tmp_sdev->lun,
- depth+1, depth);
- }
- }
-
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_cmd->pCmd = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -4492,6 +4423,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
}
piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
+ piocb->ulpPU = 0;
+ piocb->un.fcpi.fcpi_parm = 0;
/* ulpTimeout is only one byte */
if (lpfc_cmd->timeout > 0xff) {
@@ -4691,12 +4624,13 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
struct lpfc_scsi_buf *lpfc_cmd;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
err = fc_remote_port_chkready(rport);
if (err) {
cmnd->result = err;
@@ -4782,6 +4716,24 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err) {
atomic_dec(&ndlp->cmd_pending);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "3376 FCP could not issue IOCB err %x"
+ "FCP cmd x%x <%d/%d> "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x x%x\n",
+ err, cmnd->cmnd[0],
+ cmnd->device ? cmnd->device->id : 0xffff,
+ cmnd->device ? cmnd->device->lun : 0xffff,
+ vport->fc_myDID, ndlp->nlp_DID,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
+ lpfc_cmd->cur_iocbq.iocb.ulpContext,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
+ lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
+ (uint32_t)
+ (cmnd->request->timeout / 1000));
+
+
goto out_host_busy_free_buf;
}
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -5161,10 +5113,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
static int
lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
{
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned long later;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0797 Tgt Map rport failure: rdata x%p\n", rdata);
@@ -5182,7 +5135,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
return SUCCESS;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
- rdata = cmnd->device->hostdata;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata)
return FAILED;
pnode = rdata->pnode;
@@ -5254,13 +5207,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0798 Device Reset rport failure: rdata x%p\n", rdata);
@@ -5323,13 +5277,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
+ rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0799 Target Reset rport failure: rdata x%p\n", rdata);
@@ -5529,11 +5484,45 @@ lpfc_slave_alloc(struct scsi_device *sdev)
uint32_t num_to_alloc = 0;
int num_allocated = 0;
uint32_t sdev_cnt;
+ struct lpfc_device_data *device_data;
+ unsigned long flags;
+ struct lpfc_name target_wwpn;
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
- sdev->hostdata = rport->dd_data;
+ if (phba->cfg_EnableXLane) {
+
+ /*
+ * Check to see if the device data structure for the lun
+ * exists. If not, create one.
+ */
+
+ u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
+ spin_lock_irqsave(&phba->devicelock, flags);
+ device_data = __lpfc_get_device_data(phba,
+ &phba->luns,
+ &vport->fc_portname,
+ &target_wwpn,
+ sdev->lun);
+ if (!device_data) {
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ device_data = lpfc_create_device_data(phba,
+ &vport->fc_portname,
+ &target_wwpn,
+ sdev->lun, true);
+ if (!device_data)
+ return -ENOMEM;
+ spin_lock_irqsave(&phba->devicelock, flags);
+ list_add_tail(&device_data->listentry, &phba->luns);
+ }
+ device_data->rport_data = rport->dd_data;
+ device_data->available = true;
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ sdev->hostdata = device_data;
+ } else {
+ sdev->hostdata = rport->dd_data;
+ }
sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
/*
@@ -5623,11 +5612,344 @@ lpfc_slave_destroy(struct scsi_device *sdev)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
+ unsigned long flags;
+ struct lpfc_device_data *device_data = sdev->hostdata;
+
atomic_dec(&phba->sdev_cnt);
+ if ((phba->cfg_EnableXLane) && (device_data)) {
+ spin_lock_irqsave(&phba->devicelock, flags);
+ device_data->available = false;
+ if (!device_data->oas_enabled)
+ lpfc_delete_device_data(phba, device_data);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ }
sdev->hostdata = NULL;
return;
}
+/**
+ * lpfc_create_device_data - creates and initializes device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ * @atomic_create: Flag to indicate if memory should be allocated using the
+ * GFP_ATOMIC flag or not.
+ *
+ * This routine creates a device data structure which will contain identifying
+ * information for the device (host wwpn, target wwpn, lun), state of OAS,
+ * whether or not the corresponding lun is available by the system,
+ * and pointer to the rport data.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun,
+ bool atomic_create)
+{
+
+ struct lpfc_device_data *lun_info;
+ int memory_flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !(phba->cfg_EnableXLane))
+ return NULL;
+
+ /* Attempt to create the device data to contain lun info */
+
+ if (atomic_create)
+ memory_flags = GFP_ATOMIC;
+ else
+ memory_flags = GFP_KERNEL;
+ lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
+ if (!lun_info)
+ return NULL;
+ INIT_LIST_HEAD(&lun_info->listentry);
+ lun_info->rport_data = NULL;
+ memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name));
+ lun_info->device_id.lun = lun;
+ lun_info->oas_enabled = false;
+ lun_info->available = false;
+ return lun_info;
+}
+
+/**
+ * lpfc_delete_device_data - frees a device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @lun_info: Pointer to device data structure to free.
+ *
+ * This routine frees the previously allocated device data structure passed.
+ *
+ **/
+void
+lpfc_delete_device_data(struct lpfc_hba *phba,
+ struct lpfc_device_data *lun_info)
+{
+
+ if (unlikely(!phba) || !lun_info ||
+ !(phba->cfg_EnableXLane))
+ return;
+
+ if (!list_empty(&lun_info->listentry))
+ list_del(&lun_info->listentry);
+ mempool_free(lun_info, phba->device_data_mem_pool);
+ return;
+}
+
+/**
+ * __lpfc_get_device_data - returns the device data for the specified lun
+ * @pha: Pointer to host bus adapter structure.
+ * @list: Point to list to search.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ *
+ * This routine searches the list passed for the specified lun's device data.
+ * This function does not hold locks, it is the responsibility of the caller
+ * to ensure the proper lock is held before calling the function.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
+ struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+
+ if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_EnableXLane)
+ return NULL;
+
+ /* Check to see if the lun is already enabled for OAS. */
+
+ list_for_each_entry(lun_info, list, listentry) {
+ if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name)) == 0) &&
+ (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name)) == 0) &&
+ (lun_info->device_id.lun == lun))
+ return lun_info;
+ }
+
+ return NULL;
+}
+
+/**
+ * lpfc_find_next_oas_lun - searches for the next oas lun
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @starting_lun: Pointer to the lun to start searching for
+ * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
+ * @found_target_wwpn: Pointer to the found lun's target wwpn information
+ * @found_lun: Pointer to the found lun.
+ * @found_lun_status: Pointer to status of the found lun.
+ *
+ * This routine searches the luns list for the specified lun
+ * or the first lun for the vport/target. If the vport wwpn contains
+ * a zero value then a specific vport is not specified. In this case
+ * any vport which contains the lun will be considered a match. If the
+ * target wwpn contains a zero value then a specific target is not specified.
+ * In this case any target which contains the lun will be considered a
+ * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
+ * are returned. The function will also return the next lun if available.
+ * If the next lun is not found, starting_lun parameter will be set to
+ * NO_MORE_OAS_LUN.
+ *
+ * Return codes:
+ * non-0 - Error
+ * 0 - Success
+ **/
+bool
+lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t *starting_lun,
+ struct lpfc_name *found_vport_wwpn,
+ struct lpfc_name *found_target_wwpn,
+ uint64_t *found_lun,
+ uint32_t *found_lun_status)
+{
+
+ unsigned long flags;
+ struct lpfc_device_data *lun_info;
+ struct lpfc_device_id *device_id;
+ uint64_t lun;
+ bool found = false;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !starting_lun || !found_vport_wwpn ||
+ !found_target_wwpn || !found_lun || !found_lun_status ||
+ (*starting_lun == NO_MORE_OAS_LUN) ||
+ !phba->cfg_EnableXLane)
+ return false;
+
+ lun = *starting_lun;
+ *found_lun = NO_MORE_OAS_LUN;
+ *starting_lun = NO_MORE_OAS_LUN;
+
+ /* Search for lun or the lun closet in value */
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+ list_for_each_entry(lun_info, &phba->luns, listentry) {
+ if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
+ (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+ sizeof(struct lpfc_name)) == 0)) &&
+ ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
+ (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+ sizeof(struct lpfc_name)) == 0)) &&
+ (lun_info->oas_enabled)) {
+ device_id = &lun_info->device_id;
+ if ((!found) &&
+ ((lun == FIND_FIRST_OAS_LUN) ||
+ (device_id->lun == lun))) {
+ *found_lun = device_id->lun;
+ memcpy(found_vport_wwpn,
+ &device_id->vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(found_target_wwpn,
+ &device_id->target_wwpn,
+ sizeof(struct lpfc_name));
+ if (lun_info->available)
+ *found_lun_status =
+ OAS_LUN_STATUS_EXISTS;
+ else
+ *found_lun_status = 0;
+ if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
+ memset(vport_wwpn, 0x0,
+ sizeof(struct lpfc_name));
+ if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
+ memset(target_wwpn, 0x0,
+ sizeof(struct lpfc_name));
+ found = true;
+ } else if (found) {
+ *starting_lun = device_id->lun;
+ memcpy(vport_wwpn, &device_id->vport_wwpn,
+ sizeof(struct lpfc_name));
+ memcpy(target_wwpn, &device_id->target_wwpn,
+ sizeof(struct lpfc_name));
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return found;
+}
+
+/**
+ * lpfc_enable_oas_lun - enables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine enables a lun for oas operations. The routines does so by
+ * doing the following :
+ *
+ * 1) Checks to see if the device data for the lun has been created.
+ * 2) If found, sets the OAS enabled flag if not set and returns.
+ * 3) Otherwise, creates a device data structure.
+ * 4) If successfully created, indicates the device data is for an OAS lun,
+ * indicates the lun is not available and add to the list of luns.
+ *
+ * Return codes:
+ * false - Error
+ * true - Success
+ **/
+bool
+lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+ unsigned long flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_EnableXLane)
+ return false;
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+
+ /* Check to see if the device data for the lun has been created */
+ lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
+ target_wwpn, lun);
+ if (lun_info) {
+ if (!lun_info->oas_enabled)
+ lun_info->oas_enabled = true;
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+
+ /* Create an lun info structure and add to list of luns */
+ lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
+ false);
+ if (lun_info) {
+ lun_info->oas_enabled = true;
+ lun_info->available = false;
+ list_add_tail(&lun_info->listentry, &phba->luns);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return false;
+}
+
+/**
+ * lpfc_disable_oas_lun - disables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine disables a lun for oas operations. The routines does so by
+ * doing the following :
+ *
+ * 1) Checks to see if the device data for the lun is created.
+ * 2) If present, clears the flag indicating this lun is for OAS.
+ * 3) If the lun is not available by the system, the device data is
+ * freed.
+ *
+ * Return codes:
+ * false - Error
+ * true - Success
+ **/
+bool
+lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+ struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+ struct lpfc_device_data *lun_info;
+ unsigned long flags;
+
+ if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+ !phba->cfg_EnableXLane)
+ return false;
+
+ spin_lock_irqsave(&phba->devicelock, flags);
+
+ /* Check to see if the lun is available. */
+ lun_info = __lpfc_get_device_data(phba,
+ &phba->luns, vport_wwpn,
+ target_wwpn, lun);
+ if (lun_info) {
+ lun_info->oas_enabled = false;
+ if (!lun_info->available)
+ lpfc_delete_device_data(phba, lun_info);
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return true;
+ }
+
+ spin_unlock_irqrestore(&phba->devicelock, flags);
+ return false;
+}
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 852ff7def493..0120bfccf50b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -41,6 +41,20 @@ struct lpfc_rport_data {
struct lpfc_nodelist *pnode; /* Pointer to the node structure. */
};
+struct lpfc_device_id {
+ struct lpfc_name vport_wwpn;
+ struct lpfc_name target_wwpn;
+ uint64_t lun;
+};
+
+struct lpfc_device_data {
+ struct list_head listentry;
+ struct lpfc_rport_data *rport_data;
+ struct lpfc_device_id device_id;
+ bool oas_enabled;
+ bool available;
+};
+
struct fcp_rsp {
uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */
uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */
@@ -166,3 +180,7 @@ struct lpfc_scsi_buf {
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
+
+#define FIND_FIRST_OAS_LUN 0
+#define NO_MORE_OAS_LUN -1
+#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8f580fda443f..6bb51f8e3c1b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -635,7 +635,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
if (!ndlp)
goto out;
- if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
+ if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
@@ -678,7 +678,8 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
next_time = rrq->rrq_stop_time;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
- if (!list_empty(&phba->active_rrq_list))
+ if ((!list_empty(&phba->active_rrq_list)) &&
+ (!(phba->pport->load_flag & FC_UNLOADING)))
mod_timer(&phba->rrq_tmr, next_time);
list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
list_del(&rrq->list);
@@ -792,7 +793,9 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
list_del(&rrq->list);
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
}
- if (!list_empty(&phba->active_rrq_list))
+ if ((!list_empty(&phba->active_rrq_list)) &&
+ (!(phba->pport->load_flag & FC_UNLOADING)))
+
mod_timer(&phba->rrq_tmr, next_time);
}
@@ -813,7 +816,9 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
{
if (!ndlp)
return 0;
- if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+ if (!ndlp->active_rrqs_xri_bitmap)
+ return 0;
+ if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
return 1;
else
return 0;
@@ -863,7 +868,10 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
- if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+ if (!ndlp->active_rrqs_xri_bitmap)
+ goto out;
+
+ if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
goto out;
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -1318,7 +1326,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
+ (!(piocb->vport->load_flag & FC_UNLOADING))) {
if (!piocb->vport)
BUG();
else
@@ -4971,12 +4980,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
LPFC_QUEUE_REARM);
} while (++fcp_eqidx < phba->cfg_fcp_io_channel);
}
+
+ if (phba->cfg_EnableXLane)
+ lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
+
if (phba->sli4_hba.hba_eq) {
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
LPFC_QUEUE_REARM);
}
+
+ if (phba->cfg_fof)
+ lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
}
/**
@@ -8032,7 +8048,8 @@ lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
struct lpfc_vector_map_info *cpup;
int chann, cpu;
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
+ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
+ && phba->cfg_fcp_io_channel > 1) {
cpu = smp_processor_id();
if (cpu < phba->sli4_hba.num_present_cpu) {
cpup = phba->sli4_hba.cpu_map;
@@ -8250,6 +8267,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8271,6 +8296,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_FCP_ICMND64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8291,6 +8324,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
iocbq->iocb.ulpFCP2Rcvy);
+ if (iocbq->iocb_flag & LPFC_IO_OAS) {
+ bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+ if (phba->cfg_XLanePriority) {
+ bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+ }
break;
case CMD_GEN_REQUEST64_CR:
/* For this command calculate the xmit length of the
@@ -8523,6 +8564,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
{
struct lpfc_sglq *sglq;
union lpfc_wqe wqe;
+ struct lpfc_queue *wq;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
if (piocb->sli4_xritag == NO_XRI) {
@@ -8575,11 +8617,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_ERROR;
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
- (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return IOCB_ERROR;
- if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
- &wqe))
+ (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+ LPFC_IO_OAS))) {
+ wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
+ } else {
+ wq = phba->sli4_hba.oas_wq;
+ }
+ if (lpfc_sli4_wq_put(wq, &wqe))
return IOCB_ERROR;
} else {
if (unlikely(!phba->sli4_hba.els_wq))
@@ -8669,12 +8714,20 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
if (phba->sli_rev == LPFC_SLI_REV4) {
if (piocb->iocb_flag & LPFC_IO_FCP) {
- if (unlikely(!phba->sli4_hba.fcp_wq))
- return IOCB_ERROR;
- idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
- piocb->fcp_wqidx = idx;
- ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
-
+ if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+ LPFC_IO_OAS))) {
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return IOCB_ERROR;
+ idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ piocb->fcp_wqidx = idx;
+ ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+ } else {
+ if (unlikely(!phba->sli4_hba.oas_wq))
+ return IOCB_ERROR;
+ idx = 0;
+ piocb->fcp_wqidx = 0;
+ ring_number = LPFC_FCP_OAS_RING;
+ }
pring = &phba->sli.ring[ring_number];
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
@@ -12132,6 +12185,175 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
}
+
+/**
+ * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
+ * entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the Flash Optimized Fabric
+ * event queue. It will check the MajorCode and MinorCode to determine this
+ * is for a completion event on a completion queue, if not, an error shall be
+ * logged and just return. Otherwise, it will get to the corresponding
+ * completion queue and process all the entries on the completion queue, rearm
+ * the completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+ struct lpfc_queue *cq;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ uint16_t cqid;
+ int ecount = 0;
+
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9147 Not a valid completion "
+ "event: majorcode=x%x, minorcode=x%x\n",
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
+ return;
+ }
+
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+ /* Next check for OAS */
+ cq = phba->sli4_hba.oas_cq;
+ if (unlikely(!cq)) {
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9148 OAS completion queue "
+ "does not exist\n");
+ return;
+ }
+
+ if (unlikely(cqid != cq->queue_id)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9149 Miss-matched fast-path compl "
+ "queue id: eqcqid=%d, fcpcqid=%d\n",
+ cqid, cq->queue_id);
+ return;
+ }
+
+ /* Process all the entries to the OAS CQ */
+ while ((cqe = lpfc_sli4_cq_get(cq))) {
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+ if (!(++ecount % cq->entry_repost))
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+ }
+
+ /* Track the max number of CQEs processed in 1 EQ */
+ if (ecount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ecount;
+
+ /* Catch the no cq entry condition */
+ if (unlikely(ecount == 0))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9153 No entry from fast-path completion "
+ "queue fcpcqid=%d\n", cq->queue_id);
+
+ /* In any case, flash and re-arm the CQ */
+ lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
+ * IOCB ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The Flash Optimized Fabric ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the EQ to CQ are one-to-one map such that the EQ index is
+ * equal to that of CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_queue *eq;
+ struct lpfc_eqe *eqe;
+ unsigned long iflag;
+ int ecount = 0;
+ uint32_t eqidx;
+
+ /* Get the driver's phba structure from the dev_id */
+ fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+ phba = fcp_eq_hdl->phba;
+ eqidx = fcp_eq_hdl->idx;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ eq = phba->sli4_hba.fof_eq;
+ if (unlikely(!eq))
+ return IRQ_NONE;
+
+ /* Check device state for handling interrupt */
+ if (unlikely(lpfc_intr_state_check(phba))) {
+ eq->EQ_badstate++;
+ /* Check again for link_state with lock held */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->link_state < LPFC_LINK_DOWN)
+ /* Flush, clear interrupt, and rearm the EQ */
+ lpfc_sli4_eq_flush(phba, eq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Process all the event on FCP fast-path EQ
+ */
+ while ((eqe = lpfc_sli4_eq_get(eq))) {
+ lpfc_sli4_fof_handle_eqe(phba, eqe);
+ if (!(++ecount % eq->entry_repost))
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
+ eq->EQ_processed++;
+ }
+
+ /* Track the max number of EQEs processed in 1 intr */
+ if (ecount > eq->EQ_max_eqe)
+ eq->EQ_max_eqe = ecount;
+
+
+ if (unlikely(ecount == 0)) {
+ eq->EQ_no_entry++;
+
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "9145 MSI-X interrupt with no EQE\n");
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "9146 ISR interrupt with no EQE\n");
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+ }
+ /* Always clear and re-arm the fast-path EQ */
+ lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+ return IRQ_HANDLED;
+}
+
/**
* lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
* @irq: Interrupt number.
@@ -12287,6 +12509,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
hba_handled |= true;
}
+ if (phba->cfg_fof) {
+ hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
+ &phba->sli4_hba.fcp_eq_hdl[0]);
+ if (hba_irq_rc == IRQ_HANDLED)
+ hba_handled |= true;
+ }
+
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
@@ -16544,7 +16773,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
- struct lpfc_iocbq *piocbq = 0;
+ struct lpfc_iocbq *piocbq = NULL;
unsigned long iflags = 0;
char *fail_msg = NULL;
struct lpfc_sglq *sglq;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 6b0f2478706e..6f04080f4ea8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -78,6 +78,8 @@ struct lpfc_iocbq {
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14
+#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
+
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */
struct lpfc_vport *vport;/* virtual port pointer */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 298c8cd1a89d..9b8cda866176 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -39,6 +39,10 @@
#define LPFC_FCP_IO_CHAN_MIN 1
#define LPFC_FCP_IO_CHAN_MAX 16
+/* Number of channels used for Flash Optimized Fabric (FOF) operations */
+
+#define LPFC_FOF_IO_CHAN_NUM 1
+
/*
* Provide the default FCF Record attributes used by the driver
* when nonFIP mode is configured and there is no other default
@@ -399,6 +403,7 @@ struct lpfc_pc_sli4_params {
uint32_t if_page_sz;
uint32_t rq_db_window;
uint32_t loopbk_scope;
+ uint32_t oas_supported;
uint32_t eq_pages_max;
uint32_t eqe_size;
uint32_t cq_pages_max;
@@ -439,6 +444,8 @@ struct lpfc_sli4_lnk_info {
uint8_t lnk_no;
};
+#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \
+ LPFC_FOF_IO_CHAN_NUM)
#define LPFC_SLI4_HANDLER_NAME_SZ 16
/* Used for IRQ vector to CPU mapping */
@@ -507,7 +514,7 @@ struct lpfc_sli4_hba {
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
struct msix_entry *msix_entries;
- uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
+ uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
/* Pointers to the constructed SLI4 queues */
@@ -527,6 +534,17 @@ struct lpfc_sli4_hba {
uint32_t ulp0_mode; /* ULP0 protocol mode */
uint32_t ulp1_mode; /* ULP1 protocol mode */
+ struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
+
+ /* Optimized Access Storage specific queues/structures */
+
+ struct lpfc_queue *oas_cq; /* OAS completion queue */
+ struct lpfc_queue *oas_wq; /* OAS Work queue */
+ struct lpfc_sli_ring *oas_ring;
+ uint64_t oas_next_lun;
+ uint8_t oas_next_tgt_wwpn[8];
+ uint8_t oas_next_vpt_wwpn[8];
+
/* Setup information for various queue parameters */
int eq_esize;
int eq_ecount;
@@ -589,6 +607,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_online_cpu;
uint16_t num_present_cpu;
+ uint16_t curr_disp_cpu;
};
enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e3094c4e143b..e32cbec70324 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.43"
+#define LPFC_DRIVER_VERSION "8.3.45"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 816db12ef5d5..b7770516f4c2 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -531,13 +531,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
int target = 0;
int ldrv_num = 0; /* logical drive number */
-
- /*
- * filter the internal and ioctl commands
- */
- if((cmd->cmnd[0] == MEGA_INTERNAL_CMD))
- return (scb_t *)cmd->host_scribble;
-
/*
* We know what channels our logical drives are on - mega_find_card()
*/
@@ -1439,19 +1432,22 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
cmdid = completed[i];
- if( cmdid == CMDID_INT_CMDS ) { /* internal command */
+ /*
+ * Only free SCBs for the commands coming down from the
+ * mid-layer, not for which were issued internally
+ *
+ * For internal command, restore the status returned by the
+ * firmware so that user can interpret it.
+ */
+ if (cmdid == CMDID_INT_CMDS) {
scb = &adapter->int_scb;
- cmd = scb->cmd;
- mbox = (mbox_t *)scb->raw_mbox;
- /*
- * Internal command interface do not fire the extended
- * passthru or 64-bit passthru
- */
- pthru = scb->pthru;
+ list_del_init(&scb->list);
+ scb->state = SCB_FREE;
- }
- else {
+ adapter->int_status = status;
+ complete(&adapter->int_waitq);
+ } else {
scb = &adapter->scb_list[cmdid];
/*
@@ -1640,25 +1636,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
cmd->result |= (DID_BAD_TARGET << 16)|status;
}
- /*
- * Only free SCBs for the commands coming down from the
- * mid-layer, not for which were issued internally
- *
- * For internal command, restore the status returned by the
- * firmware so that user can interpret it.
- */
- if( cmdid == CMDID_INT_CMDS ) { /* internal command */
- cmd->result = status;
-
- /*
- * Remove the internal command from the pending list
- */
- list_del_init(&scb->list);
- scb->state = SCB_FREE;
- }
- else {
- mega_free_scb(adapter, scb);
- }
+ mega_free_scb(adapter, scb);
/* Add Scsi_Command to end of completed queue */
list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
@@ -4133,23 +4111,15 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
* The last argument is the address of the passthru structure if the command
* to be fired is a passthru command
*
- * lockscope specifies whether the caller has already acquired the lock. Of
- * course, the caller must know which lock we are talking about.
- *
* Note: parameter 'pthru' is null for non-passthru commands.
*/
static int
mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
{
- Scsi_Cmnd *scmd;
- struct scsi_device *sdev;
+ unsigned long flags;
scb_t *scb;
int rval;
- scmd = scsi_allocate_command(GFP_KERNEL);
- if (!scmd)
- return -ENOMEM;
-
/*
* The internal commands share one command id and hence are
* serialized. This is so because we want to reserve maximum number of
@@ -4160,73 +4130,45 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
scb = &adapter->int_scb;
memset(scb, 0, sizeof(scb_t));
- sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
- scmd->device = sdev;
-
- memset(adapter->int_cdb, 0, sizeof(adapter->int_cdb));
- scmd->cmnd = adapter->int_cdb;
- scmd->device->host = adapter->host;
- scmd->host_scribble = (void *)scb;
- scmd->cmnd[0] = MEGA_INTERNAL_CMD;
-
- scb->state |= SCB_ACTIVE;
- scb->cmd = scmd;
+ scb->idx = CMDID_INT_CMDS;
+ scb->state |= SCB_ACTIVE | SCB_PENDQ;
memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
/*
* Is it a passthru command
*/
- if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
-
+ if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
scb->pthru = pthru;
- }
-
- scb->idx = CMDID_INT_CMDS;
- megaraid_queue_lck(scmd, mega_internal_done);
+ spin_lock_irqsave(&adapter->lock, flags);
+ list_add_tail(&scb->list, &adapter->pending_list);
+ /*
+ * Check if the HBA is in quiescent state, e.g., during a
+ * delete logical drive opertion. If it is, don't run
+ * the pending_list.
+ */
+ if (atomic_read(&adapter->quiescent) == 0)
+ mega_runpendq(adapter);
+ spin_unlock_irqrestore(&adapter->lock, flags);
wait_for_completion(&adapter->int_waitq);
- rval = scmd->result;
- mc->status = scmd->result;
- kfree(sdev);
+ mc->status = rval = adapter->int_status;
/*
* Print a debug message for all failed commands. Applications can use
* this information.
*/
- if( scmd->result && trace_level ) {
+ if (rval && trace_level) {
printk("megaraid: cmd [%x, %x, %x] status:[%x]\n",
- mc->cmd, mc->opcode, mc->subopcode, scmd->result);
+ mc->cmd, mc->opcode, mc->subopcode, rval);
}
mutex_unlock(&adapter->int_mtx);
-
- scsi_free_command(GFP_KERNEL, scmd);
-
return rval;
}
-
-/**
- * mega_internal_done()
- * @scmd - internal scsi command
- *
- * Callback routine for internal commands.
- */
-static void
-mega_internal_done(Scsi_Cmnd *scmd)
-{
- adapter_t *adapter;
-
- adapter = (adapter_t *)scmd->device->host->hostdata;
-
- complete(&adapter->int_waitq);
-
-}
-
-
static struct scsi_host_template megaraid_template = {
.module = THIS_MODULE,
.name = "MegaRAID",
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 4d0ce4e78dfd..508d65e5a518 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -853,10 +853,10 @@ typedef struct {
u8 sglen; /* f/w supported scatter-gather list length */
- unsigned char int_cdb[MAX_COMMAND_SIZE];
scb_t int_scb;
struct mutex int_mtx; /* To synchronize the internal
commands */
+ int int_status; /* status of internal cmd */
struct completion int_waitq; /* wait queue for internal
cmds */
@@ -1004,7 +1004,6 @@ static int mega_del_logdrv(adapter_t *, int);
static int mega_do_del_logdrv(adapter_t *, int);
static void mega_get_max_sgl(adapter_t *);
static int mega_internal_command(adapter_t *, megacmd_t *, mega_passthru *);
-static void mega_internal_done(Scsi_Cmnd *);
static int mega_support_cluster(adapter_t *);
#endif
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index dfffd0f37916..a70692779a16 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -486,6 +486,8 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
pthru32->dataxferaddr = kioc->buf_paddr;
if (kioc->data_dir & UIOC_WR) {
+ if (pthru32->dataxferlen > kioc->xferlen)
+ return -EINVAL;
if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
pthru32->dataxferlen)) {
return (-EFAULT);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 34452ea386ac..32166c2c7854 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.700.06.00-rc1"
-#define MEGASAS_RELDATE "Aug. 31, 2013"
-#define MEGASAS_EXT_VERSION "Sat. Aug. 31 17:00:00 PDT 2013"
+#define MEGASAS_VERSION "06.803.01.00-rc1"
+#define MEGASAS_RELDATE "Mar. 10, 2014"
+#define MEGASAS_EXT_VERSION "Mon. Mar. 10 17:00:00 PDT 2014"
/*
* Device IDs
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073
#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071
#define PCI_DEVICE_ID_LSI_FUSION 0x005b
+#define PCI_DEVICE_ID_LSI_PLASMA 0x002f
#define PCI_DEVICE_ID_LSI_INVADER 0x005d
#define PCI_DEVICE_ID_LSI_FURY 0x005f
@@ -559,7 +560,8 @@ struct megasas_ctrl_info {
u8 PCIE:1;
u8 iSCSI:1;
u8 SAS_3G:1;
- u8 reserved_0:4;
+ u8 SRIOV:1;
+ u8 reserved_0:3;
u8 reserved_1[6];
u8 port_count;
u64 port_addr[8];
@@ -839,7 +841,12 @@ struct megasas_ctrl_info {
struct { /*7A4h */
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:11;
+ u32 reserved:5;
+ u32 activePassive:2;
+ u32 supportConfigAutoBalance:1;
+ u32 mpio:1;
+ u32 supportDataLDonSSCArray:1;
+ u32 supportPointInTimeProgress:1;
u32 supportUnevenSpans:1;
u32 dedicatedHotSparesLimited:1;
u32 headlessMode:1;
@@ -886,7 +893,12 @@ struct megasas_ctrl_info {
u32 supportUnevenSpans:1;
- u32 reserved:11;
+ u32 supportPointInTimeProgress:1;
+ u32 supportDataLDonSSCArray:1;
+ u32 mpio:1;
+ u32 supportConfigAutoBalance:1;
+ u32 activePassive:2;
+ u32 reserved:5;
#endif
} adapterOperations2;
@@ -914,8 +926,14 @@ struct megasas_ctrl_info {
} cluster;
char clusterId[16]; /*7D4h */
+ struct {
+ u8 maxVFsSupported; /*0x7E4*/
+ u8 numVFsEnabled; /*0x7E5*/
+ u8 requestorId; /*0x7E6 0:PF, 1:VF1, 2:VF2*/
+ u8 reserved; /*0x7E7*/
+ } iov;
- u8 pad[0x800-0x7E4]; /*7E4 */
+ u8 pad[0x800-0x7E8]; /*0x7E8 pad to 2k */
} __packed;
/*
@@ -986,7 +1004,9 @@ struct megasas_ctrl_info {
#define MFI_OB_INTR_STATUS_MASK 0x00000002
#define MFI_POLL_TIMEOUT_SECS 60
-
+#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ)
+#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30)
+#define MEGASAS_ROUTINE_WAIT_TIME_VF 300
#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004)
@@ -1347,9 +1367,15 @@ struct megasas_cmd;
union megasas_evt_class_locale {
struct {
+#ifndef __BIG_ENDIAN_BITFIELD
u16 locale;
u8 reserved;
s8 class;
+#else
+ s8 class;
+ u8 reserved;
+ u16 locale;
+#endif
} __attribute__ ((packed)) members;
u32 word;
@@ -1523,6 +1549,12 @@ struct megasas_instance {
dma_addr_t producer_h;
u32 *consumer;
dma_addr_t consumer_h;
+ struct MR_LD_VF_AFFILIATION *vf_affiliation;
+ dma_addr_t vf_affiliation_h;
+ struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111;
+ dma_addr_t vf_affiliation_111_h;
+ struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
+ dma_addr_t hb_host_mem_h;
u32 *reply_queue;
dma_addr_t reply_queue_h;
@@ -1598,10 +1630,73 @@ struct megasas_instance {
unsigned long bar;
long reset_flags;
struct mutex reset_mutex;
+ struct timer_list sriov_heartbeat_timer;
+ char skip_heartbeat_timer_del;
+ u8 requestorId;
+ u64 initiator_sas_address;
+ u64 ld_sas_address[64];
+ char PlasmaFW111;
+ char mpio;
int throttlequeuedepth;
u8 mask_interrupts;
u8 is_imr;
};
+struct MR_LD_VF_MAP {
+ u32 size;
+ union MR_LD_REF ref;
+ u8 ldVfCount;
+ u8 reserved[6];
+ u8 policy[1];
+};
+
+struct MR_LD_VF_AFFILIATION {
+ u32 size;
+ u8 ldCount;
+ u8 vfCount;
+ u8 thisVf;
+ u8 reserved[9];
+ struct MR_LD_VF_MAP map[1];
+};
+
+/* Plasma 1.11 FW backward compatibility structures */
+#define IOV_111_OFFSET 0x7CE
+#define MAX_VIRTUAL_FUNCTIONS 8
+
+struct IOV_111 {
+ u8 maxVFsSupported;
+ u8 numVFsEnabled;
+ u8 requestorId;
+ u8 reserved[5];
+};
+
+struct MR_LD_VF_MAP_111 {
+ u8 targetId;
+ u8 reserved[3];
+ u8 policy[MAX_VIRTUAL_FUNCTIONS];
+};
+
+struct MR_LD_VF_AFFILIATION_111 {
+ u8 vdCount;
+ u8 vfCount;
+ u8 thisVf;
+ u8 reserved[5];
+ struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES];
+};
+
+struct MR_CTRL_HB_HOST_MEM {
+ struct {
+ u32 fwCounter; /* Firmware heart beat counter */
+ struct {
+ u32 debugmode:1; /* 1=Firmware is in debug mode.
+ Heart beat will not be updated. */
+ u32 reserved:31;
+ } debug;
+ u32 reserved_fw[6];
+ u32 driverCounter; /* Driver heart beat counter. 0x20 */
+ u32 reserved_driver[7];
+ } HB;
+ u8 pad[0x400-0x40];
+};
enum {
MEGASAS_HBA_OPERATIONAL = 0,
@@ -1609,6 +1704,7 @@ enum {
MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
MEGASAS_ADPRESET_SM_OPERATIONAL = 3,
MEGASAS_HW_CRITICAL_ERROR = 4,
+ MEGASAS_ADPRESET_SM_POLLING = 5,
MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
};
@@ -1728,7 +1824,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3b7ad10497fe..d84d02c2aad9 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : 06.700.06.00-rc1
+ * Version : 06.803.01.00-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -75,6 +75,10 @@ static unsigned int msix_vectors;
module_param(msix_vectors, int, S_IRUGO);
MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
+static int allow_vf_ioctls;
+module_param(allow_vf_ioctls, int, S_IRUGO);
+MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
+
static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
module_param(throttlequeuedepth, int, S_IRUGO);
MODULE_PARM_DESC(throttlequeuedepth,
@@ -122,6 +126,8 @@ static struct pci_device_id megasas_pci_table[] = {
/* xscale IOP */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
/* Fusion */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
+ /* Plasma */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
/* Invader */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
@@ -132,7 +138,7 @@ static struct pci_device_id megasas_pci_table[] = {
MODULE_DEVICE_TABLE(pci, megasas_pci_table);
static int megasas_mgmt_majorno;
-static struct megasas_mgmt_info megasas_mgmt_info;
+struct megasas_mgmt_info megasas_mgmt_info;
static struct fasync_struct *megasas_async_queue;
static DEFINE_MUTEX(megasas_async_queue_mutex);
@@ -171,10 +177,15 @@ megasas_get_map_info(struct megasas_instance *instance);
int
megasas_sync_map_info(struct megasas_instance *instance);
int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds);
void megasas_reset_reply_desc(struct megasas_instance *instance);
-int megasas_reset_fusion(struct Scsi_Host *shost);
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);
void megasas_fusion_ocr_wq(struct work_struct *work);
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial);
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd);
void
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -224,6 +235,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
cmd->scmd = NULL;
cmd->frame_count = 0;
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
(reset_devices))
@@ -877,6 +889,7 @@ extern struct megasas_instance_template megasas_instance_template_fusion;
int
megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
+ int seconds;
struct megasas_header *frame_hdr = &cmd->frame->hdr;
@@ -891,13 +904,18 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
/*
* Wait for cmd_status to change
*/
- return wait_and_poll(instance, cmd);
+ if (instance->requestorId)
+ seconds = MEGASAS_ROUTINE_WAIT_TIME_VF;
+ else
+ seconds = MFI_POLL_TIMEOUT_SECS;
+ return wait_and_poll(instance, cmd, seconds);
}
/**
* megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
* @instance: Adapter soft state
* @cmd: Command to be issued
+ * @timeout: Timeout in seconds
*
* This function waits on an event for the command to be returned from ISR.
* Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
@@ -905,13 +923,20 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
*/
static int
megasas_issue_blocked_cmd(struct megasas_instance *instance,
- struct megasas_cmd *cmd)
+ struct megasas_cmd *cmd, int timeout)
{
+ int ret = 0;
cmd->cmd_status = ENODATA;
instance->instancet->issue_dcmd(instance, cmd);
-
- wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA);
+ if (timeout) {
+ ret = wait_event_timeout(instance->int_cmd_wait_q,
+ cmd->cmd_status != ENODATA, timeout * HZ);
+ if (!ret)
+ return 1;
+ } else
+ wait_event(instance->int_cmd_wait_q,
+ cmd->cmd_status != ENODATA);
return 0;
}
@@ -920,18 +945,20 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
* megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
* @instance: Adapter soft state
* @cmd_to_abort: Previously issued cmd to be aborted
+ * @timeout: Timeout in seconds
*
- * MFI firmware can abort previously issued AEN command (automatic event
+ * MFI firmware can abort previously issued AEN comamnd (automatic event
* notification). The megasas_issue_blocked_abort_cmd() issues such abort
* cmd and waits for return status.
* Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
*/
static int
megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
- struct megasas_cmd *cmd_to_abort)
+ struct megasas_cmd *cmd_to_abort, int timeout)
{
struct megasas_cmd *cmd;
struct megasas_abort_frame *abort_fr;
+ int ret = 0;
cmd = megasas_get_cmd(instance);
@@ -957,10 +984,18 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
instance->instancet->issue_dcmd(instance, cmd);
- /*
- * Wait for this cmd to complete
- */
- wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF);
+ if (timeout) {
+ ret = wait_event_timeout(instance->abort_cmd_wait_q,
+ cmd->cmd_status != ENODATA, timeout * HZ);
+ if (!ret) {
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+ return 1;
+ }
+ } else
+ wait_event(instance->abort_cmd_wait_q,
+ cmd->cmd_status != ENODATA);
+
cmd->sync_cmd = 0;
megasas_return_cmd(instance, cmd);
@@ -1514,9 +1549,23 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
spin_lock_irqsave(&instance->hba_lock, flags);
+ /* Check for an mpio path and adjust behavior */
+ if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ if (megasas_check_mpio_paths(instance, scmd) ==
+ (DID_RESET << 16)) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ } else {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ scmd->result = DID_NO_CONNECT << 16;
+ done(scmd);
+ return 0;
+ }
+ }
+
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
- scmd->result = DID_ERROR << 16;
+ scmd->result = DID_NO_CONNECT << 16;
done(scmd);
return 0;
}
@@ -1641,9 +1690,14 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+ /* Flush */
+ readl(&instance->reg_set->doorbell);
+ if (instance->mpio && instance->requestorId)
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
} else {
writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
}
@@ -1730,6 +1784,25 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
megasas_check_and_restore_queue_depth(instance);
}
+/**
+ * megasas_start_timer - Initializes a timer object
+ * @instance: Adapter soft state
+ * @timer: timer object to be initialized
+ * @fn: timer function
+ * @interval: time interval between timer function call
+ *
+ */
+void megasas_start_timer(struct megasas_instance *instance,
+ struct timer_list *timer,
+ void *fn, unsigned long interval)
+{
+ init_timer(timer);
+ timer->expires = jiffies + interval;
+ timer->data = (unsigned long)instance;
+ timer->function = fn;
+ add_timer(timer);
+}
+
static void
megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
@@ -1752,6 +1825,295 @@ void megasas_do_ocr(struct megasas_instance *instance)
process_fw_state_change_wq(&instance->work_init);
}
+/* This function will get the current SR-IOV LD/VF affiliation */
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
+ struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
+ struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
+ dma_addr_t new_affiliation_h;
+ dma_addr_t new_affiliation_111_h;
+ int ld, retval = 0;
+ u8 thisVf;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_get_ld_vf_"
+ "affiliation: Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (!instance->vf_affiliation && !instance->vf_affiliation_111) {
+ printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ if (initial)
+ if (instance->PlasmaFW111)
+ memset(instance->vf_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ else
+ memset(instance->vf_affiliation, 0,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ else {
+ if (instance->PlasmaFW111)
+ new_affiliation_111 =
+ pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h);
+ else
+ new_affiliation =
+ pci_alloc_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h);
+ if (!new_affiliation && !new_affiliation_111) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+ "memory for new affiliation for scsi%d.\n",
+ instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+ if (instance->PlasmaFW111)
+ memset(new_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ else
+ memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ if (instance->PlasmaFW111) {
+ dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
+ } else {
+ dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
+ }
+
+ if (initial) {
+ if (instance->PlasmaFW111)
+ dcmd->sgl.sge32[0].phys_addr =
+ instance->vf_affiliation_111_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr =
+ instance->vf_affiliation_h;
+ } else {
+ if (instance->PlasmaFW111)
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
+ }
+ if (instance->PlasmaFW111)
+ dcmd->sgl.sge32[0].length =
+ sizeof(struct MR_LD_VF_AFFILIATION_111);
+ else
+ dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+ "scsi%d\n", instance->host->host_no);
+
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+ " failed with status 0x%x for scsi%d.\n",
+ dcmd->cmd_status, instance->host->host_no);
+ retval = 1; /* Do a scan if we couldn't get affiliation */
+ goto out;
+ }
+
+ if (!initial) {
+ if (instance->PlasmaFW111) {
+ if (!new_affiliation_111->vdCount) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new "
+ "LD/VF affiliation for passive path "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ thisVf = new_affiliation_111->thisVf;
+ for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
+ if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "Got new LD/VF affiliation "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ memcpy(instance->vf_affiliation_111,
+ new_affiliation_111,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ retval = 1;
+ goto out;
+ }
+ } else {
+ if (!new_affiliation->ldCount) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new "
+ "LD/VF affiliation for passive "
+ "path for scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ newmap = new_affiliation->map;
+ savedmap = instance->vf_affiliation->map;
+ thisVf = new_affiliation->thisVf;
+ for (ld = 0 ; ld < new_affiliation->ldCount; ld++) {
+ if (savedmap->policy[thisVf] !=
+ newmap->policy[thisVf]) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "Got new LD/VF affiliation "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ memcpy(instance->vf_affiliation,
+ new_affiliation,
+ new_affiliation->size);
+ retval = 1;
+ goto out;
+ }
+ savedmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)savedmap +
+ savedmap->size);
+ newmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)newmap +
+ newmap->size);
+ }
+ }
+ }
+out:
+ if (new_affiliation) {
+ if (instance->PlasmaFW111)
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ new_affiliation_111,
+ new_affiliation_111_h);
+ else
+ pci_free_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ new_affiliation, new_affiliation_h);
+ }
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+/* This function will tell FW to start the SR-IOV heartbeat */
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ int retval = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: "
+ "Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (initial) {
+ instance->hb_host_mem =
+ pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_CTRL_HB_HOST_MEM),
+ &instance->hb_host_mem_h);
+ if (!instance->hb_host_mem) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate"
+ " memory for heartbeat host memory for "
+ "scsi%d.\n", instance->host->host_no);
+ retval = -ENOMEM;
+ goto out;
+ }
+ memset(instance->hb_host_mem, 0,
+ sizeof(struct MR_CTRL_HB_HOST_MEM));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC;
+ dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h;
+ dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
+ instance->host->host_no);
+
+ if (!megasas_issue_polled(instance, cmd)) {
+ retval = 0;
+ } else {
+ printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+ "_MEM_ALLOC DCMD timed out for scsi%d\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+ "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n",
+ dcmd->cmd_status,
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+
+out:
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+/* Handler for SR-IOV heartbeat */
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
+{
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+
+ if (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ mod_timer(&instance->sriov_heartbeat_timer,
+ jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ } else {
+ printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never "
+ "completed for scsi%d\n", instance->host->host_no);
+ schedule_work(&instance->work_init);
+ }
+}
+
/**
* megasas_wait_for_outstanding - Wait for all outstanding cmds
* @instance: Adapter soft state
@@ -2014,9 +2376,10 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
* First wait for all commands to complete
*/
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
- ret = megasas_reset_fusion(scmd->device->host);
+ ret = megasas_reset_fusion(scmd->device->host, 1);
else
ret = megasas_generic_reset(scmd);
@@ -2731,6 +3094,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FURY)) {
@@ -2755,6 +3120,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FURY)) {
@@ -2780,6 +3147,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
(instance->pdev->device
== PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device
+ == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device
== PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device
== PCI_DEVICE_ID_LSI_FURY)) {
@@ -2788,6 +3157,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
if ((instance->pdev->device ==
PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FURY)) {
@@ -3014,6 +3385,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0;
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
(reset_devices))
@@ -3620,6 +3992,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
struct megasas_ctrl_info *ctrl_info;
unsigned long bar_list;
int i, loop, fw_msix_count = 0;
+ struct IOV_111 *iovPtr;
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
@@ -3642,6 +4015,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
instance->instancet = &megasas_instance_template_fusion;
@@ -3696,7 +4070,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
scratch_pad_2 = readl
(&instance->reg_set->outbound_scratch_pad_2);
/* Check max MSI-X vectors */
- if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) {
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) {
instance->msix_vectors = (scratch_pad_2
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
fw_msix_count = instance->msix_vectors;
@@ -3763,7 +4138,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
memset(instance->pd_list, 0 ,
(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
- megasas_get_pd_list(instance);
+ if (megasas_get_pd_list(instance) < 0) {
+ printk(KERN_ERR "megasas: failed to get PD list\n");
+ goto fail_init_adapter;
+ }
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
if (megasas_ld_list_query(instance,
@@ -3807,6 +4185,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
/* adapterOperations2 are converted into CPU arch*/
le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
+ instance->mpio = ctrl_info->adapterOperations2.mpio;
instance->UnevenSpanSupport =
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
@@ -3819,6 +4198,20 @@ static int megasas_init_fw(struct megasas_instance *instance)
fusion->fast_path_io = 0;
}
+ if (ctrl_info->host_interface.SRIOV) {
+ if (!ctrl_info->adapterOperations2.activePassive)
+ instance->PlasmaFW111 = 1;
+
+ if (!instance->PlasmaFW111)
+ instance->requestorId =
+ ctrl_info->iov.requestorId;
+ else {
+ iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
+ instance->requestorId = iovPtr->requestorId;
+ }
+ printk(KERN_WARNING "megaraid_sas: I am VF "
+ "requestorId %d\n", instance->requestorId);
+ }
}
instance->max_sectors_per_req = instance->max_num_sge *
PAGE_SIZE / 512;
@@ -3851,6 +4244,17 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
+ /* Launch SR-IOV heartbeat timer */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 1))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
return 0;
fail_init_adapter:
@@ -3933,16 +4337,19 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
- megasas_issue_blocked_cmd(instance, cmd);
-
- /*
- * Copy the data back into callers buffer
- */
- eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
- eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
- eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
- eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
- eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+ else {
+ /*
+ * Copy the data back into callers buffer
+ */
+ eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
+ eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
+ eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
+ eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
+ eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+ }
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
@@ -4018,7 +4425,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
instance->aen_cmd->abort_aen = 1;
ret_val = megasas_issue_blocked_abort_cmd(instance,
instance->
- aen_cmd);
+ aen_cmd, 30);
if (ret_val) {
printk(KERN_DEBUG "megasas: Failed to abort "
@@ -4160,6 +4567,7 @@ static int megasas_io_attach(struct megasas_instance *instance)
/* Fusion only supports host reset */
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
host->hostt->eh_device_reset_handler = NULL;
@@ -4197,6 +4605,19 @@ megasas_set_dma_mask(struct pci_dev *pdev)
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
goto fail_set_dma_mask;
}
+ /*
+ * Ensure that all data structures are allocated in 32-bit
+ * memory.
+ */
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ /* Try 32bit DMA mask and 32 bit Consistent dma mask */
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ dev_info(&pdev->dev, "set 32bit DMA mask"
+ "and 32 bit consistent mask\n");
+ else
+ goto fail_set_dma_mask;
+ }
return 0;
@@ -4212,7 +4633,7 @@ fail_set_dma_mask:
static int megasas_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int rval, pos, i, j;
+ int rval, pos, i, j, cpu;
struct Scsi_Host *host;
struct megasas_instance *instance;
u16 control = 0;
@@ -4272,6 +4693,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
{
@@ -4368,6 +4790,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->UnevenSpanSupport = 0;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
@@ -4380,12 +4803,33 @@ static int megasas_probe_one(struct pci_dev *pdev,
if (megasas_init_fw(instance))
goto fail_init_mfi;
+ if (instance->requestorId) {
+ if (instance->PlasmaFW111) {
+ instance->vf_affiliation_111 =
+ pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &instance->vf_affiliation_111_h);
+ if (!instance->vf_affiliation_111)
+ printk(KERN_WARNING "megasas: Can't allocate "
+ "memory for VF affiliation buffer\n");
+ } else {
+ instance->vf_affiliation =
+ pci_alloc_consistent(pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &instance->vf_affiliation_h);
+ if (!instance->vf_affiliation)
+ printk(KERN_WARNING "megasas: Can't allocate "
+ "memory for VF affiliation buffer\n");
+ }
+ }
+
retry_irq_register:
/*
* Register IRQ
*/
if (instance->msix_vectors) {
- for (i = 0 ; i < instance->msix_vectors; i++) {
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
if (request_irq(instance->msixentry[i].vector,
@@ -4394,14 +4838,22 @@ retry_irq_register:
&instance->irq_context[i])) {
printk(KERN_DEBUG "megasas: Failed to "
"register IRQ for vector %d.\n", i);
- for (j = 0 ; j < i ; j++)
+ for (j = 0; j < i; j++) {
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
free_irq(
instance->msixentry[j].vector,
&instance->irq_context[j]);
+ }
/* Retry irq register for IO_APIC */
instance->msix_vectors = 0;
goto retry_irq_register;
}
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev, "Error setting"
+ "affinity hint for cpu %d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
}
} else {
instance->irq_context[0].instance = instance;
@@ -4455,13 +4907,17 @@ retry_irq_register:
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
- for (i = 0 ; i < instance->msix_vectors; i++)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
+ }
else
free_irq(instance->pdev->irq, &instance->irq_context[0]);
fail_irq:
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
megasas_release_fusion(instance);
@@ -4522,7 +4978,9 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
- megasas_issue_blocked_cmd(instance, cmd);
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ " from %s\n", __func__);
megasas_return_cmd(instance, cmd);
@@ -4549,10 +5007,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
return;
if (instance->aen_cmd)
- megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
+ megasas_issue_blocked_abort_cmd(instance,
+ instance->aen_cmd, 30);
if (instance->map_update_cmd)
megasas_issue_blocked_abort_cmd(instance,
- instance->map_update_cmd);
+ instance->map_update_cmd, 30);
dcmd = &cmd->frame->dcmd;
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4566,7 +5025,9 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dcmd->data_xfer_len = 0;
dcmd->opcode = cpu_to_le32(opcode);
- megasas_issue_blocked_cmd(instance, cmd);
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
megasas_return_cmd(instance, cmd);
@@ -4590,6 +5051,10 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
host = instance->host;
instance->unload = 1;
+ /* Shutdown SR-IOV heartbeat timer */
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
@@ -4606,9 +5071,12 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
- for (i = 0 ; i < instance->msix_vectors; i++)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
+ }
else
free_irq(instance->pdev->irq, &instance->irq_context[0]);
if (instance->msix_vectors)
@@ -4629,7 +5097,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
static int
megasas_resume(struct pci_dev *pdev)
{
- int rval, i, j;
+ int rval, i, j, cpu;
struct Scsi_Host *host;
struct megasas_instance *instance;
@@ -4673,6 +5141,7 @@ megasas_resume(struct pci_dev *pdev)
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
{
@@ -4701,6 +5170,7 @@ megasas_resume(struct pci_dev *pdev)
* Register IRQ
*/
if (instance->msix_vectors) {
+ cpu = cpumask_first(cpu_online_mask);
for (i = 0 ; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
@@ -4710,12 +5180,21 @@ megasas_resume(struct pci_dev *pdev)
&instance->irq_context[i])) {
printk(KERN_DEBUG "megasas: Failed to "
"register IRQ for vector %d.\n", i);
- for (j = 0 ; j < i ; j++)
+ for (j = 0; j < i; j++) {
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
free_irq(
instance->msixentry[j].vector,
&instance->irq_context[j]);
+ }
goto fail_irq;
}
+
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev, "Error setting"
+ "affinity hint for cpu %d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
}
} else {
instance->irq_context[0].instance = instance;
@@ -4728,6 +5207,17 @@ megasas_resume(struct pci_dev *pdev)
}
}
+ /* Re-launch SR-IOV heartbeat timer */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 0))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
instance->instancet->enable_intr(instance);
instance->unload = 0;
@@ -4782,6 +5272,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
host = instance->host;
fusion = instance->ctrl_context;
+ /* Shutdown SR-IOV heartbeat timer */
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
scsi_remove_host(instance->host);
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4793,6 +5287,9 @@ static void megasas_detach_one(struct pci_dev *pdev)
instance->ev = NULL;
}
+ /* cancel all wait events */
+ wake_up_all(&instance->int_cmd_wait_q);
+
tasklet_kill(&instance->isr_tasklet);
/*
@@ -4811,9 +5308,12 @@ static void megasas_detach_one(struct pci_dev *pdev)
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
- for (i = 0 ; i < instance->msix_vectors; i++)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
+ }
else
free_irq(instance->pdev->irq, &instance->irq_context[0]);
if (instance->msix_vectors)
@@ -4821,6 +5321,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
megasas_release_fusion(instance);
@@ -4847,6 +5348,24 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail, instance->evt_detail_h);
+
+ if (instance->vf_affiliation)
+ pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ instance->vf_affiliation,
+ instance->vf_affiliation_h);
+
+ if (instance->vf_affiliation_111)
+ pci_free_consistent(pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ instance->vf_affiliation_111,
+ instance->vf_affiliation_111_h);
+
+ if (instance->hb_host_mem)
+ pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+ instance->hb_host_mem,
+ instance->hb_host_mem_h);
+
scsi_host_put(host);
pci_disable_device(pdev);
@@ -4868,9 +5387,12 @@ static void megasas_shutdown(struct pci_dev *pdev)
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
- for (i = 0 ; i < instance->msix_vectors; i++)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
+ }
else
free_irq(instance->pdev->irq, &instance->irq_context[0]);
if (instance->msix_vectors)
@@ -5045,7 +5567,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* cmd to the SCSI mid-layer
*/
cmd->sync_cmd = 1;
- megasas_issue_blocked_cmd(instance, cmd);
+ megasas_issue_blocked_cmd(instance, cmd, 0);
cmd->sync_cmd = 0;
/*
@@ -5132,6 +5654,16 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
+ /* Adjust ioctl wait time for VF mode */
+ if (instance->requestorId)
+ wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+ /* Block ioctls in VF mode */
+ if (instance->requestorId && !allow_vf_ioctls) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
printk(KERN_ERR "Controller in crit error\n");
error = -ENODEV;
@@ -5441,7 +5973,7 @@ megasas_aen_polling(struct work_struct *work)
u16 pd_index = 0;
u16 ld_index = 0;
int i, j, doscan = 0;
- u32 seq_num;
+ u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
int error;
if (!instance) {
@@ -5449,6 +5981,23 @@ megasas_aen_polling(struct work_struct *work)
kfree(ev);
return;
}
+
+ /* Adjust event workqueue thread wait time for VF mode */
+ if (instance->requestorId)
+ wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+ /* Don't run the event workqueue thread if OCR is running */
+ for (i = 0; i < wait_time; i++) {
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
+ break;
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: %s waiting for "
+ "controller reset to finish for scsi%d\n",
+ __func__, instance->host->host_no);
+ }
+ msleep(1000);
+ }
+
instance->ev = NULL;
host = instance->host;
if (instance->evt_detail) {
@@ -5515,65 +6064,64 @@ megasas_aen_polling(struct work_struct *work)
case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host,
- MEGASAS_MAX_PD_CHANNELS + i,
- j,
- 0);
-
- if (instance->ld_ids[ld_index] != 0xff) {
- if (sdev1) {
- scsi_device_put(sdev1);
- }
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (sdev1)
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
}
}
- }
+ doscan = 0;
}
- doscan = 0;
break;
case MR_EVT_LD_CREATED:
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host,
- MEGASAS_MAX_PD_CHANNELS + i,
- j, 0);
-
- if (instance->ld_ids[ld_index] !=
- 0xff) {
- if (!sdev1) {
- scsi_add_device(host,
- MEGASAS_MAX_PD_CHANNELS + i,
- j, 0);
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
}
- }
- if (sdev1) {
- scsi_device_put(sdev1);
+ if (sdev1)
+ scsi_device_put(sdev1);
}
}
+ doscan = 0;
}
- doscan = 0;
break;
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
@@ -5591,50 +6139,55 @@ megasas_aen_polling(struct work_struct *work)
}
if (doscan) {
- printk(KERN_INFO "scanning ...\n");
- megasas_get_pd_list(instance);
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
- pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
- sdev1 = scsi_device_lookup(host, i, j, 0);
- if (instance->pd_list[pd_index].driveState ==
- MR_PD_STATE_SYSTEM) {
- if (!sdev1) {
- scsi_add_device(host, i, j, 0);
- }
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
+ printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n",
+ instance->host->host_no);
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+ sdev1 = scsi_device_lookup(host, i, j, 0);
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ if (!sdev1) {
+ scsi_add_device(host, i, j, 0);
+ }
+ if (sdev1)
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
}
}
}
}
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
- sdev1 = scsi_device_lookup(host,
- MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- if (instance->ld_ids[ld_index] != 0xff) {
- if (!sdev1) {
- scsi_add_device(host,
- MEGASAS_MAX_PD_CHANNELS + i,
- j, 0);
+ sdev1 = scsi_device_lookup(host,
+ MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ else
+ scsi_device_put(sdev1);
} else {
- scsi_device_put(sdev1);
- }
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
}
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e24b6eb645b5..081bfff12d00 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -143,12 +143,12 @@ u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
{
- return map->raidMap.ldSpanMap[ld].ldRaid.targetId;
+ return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
}
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
{
- return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]);
+ return map->raidMap.ldTgtIdToLd[ldTgtId];
}
static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
@@ -975,7 +975,10 @@ MR_BuildRaidContext(struct megasas_instance *instance,
regSize += stripSize;
}
- pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec);
+ pRAID_Context->timeoutValue =
+ cpu_to_le16(raid->fpIoTimeoutForLd ?
+ raid->fpIoTimeoutForLd :
+ map->raidMap.fpPdIoTimeoutSec);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
pRAID_Context->regLockFlags = (isRead) ?
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f6555921fd7a..22600419ae9f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -62,7 +62,8 @@ megasas_complete_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, u8 alt_status);
int megasas_is_ldio(struct scsi_cmnd *cmd);
int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds);
void
megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
@@ -81,6 +82,13 @@ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
void megaraid_sas_kill_hba(struct megasas_instance *instance);
extern u32 megasas_dbg_lvl;
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+ int initial);
+void megasas_start_timer(struct megasas_instance *instance,
+ struct timer_list *timer,
+ void *fn, unsigned long interval);
+extern struct megasas_mgmt_info megasas_mgmt_info;
extern int resetwaittime;
/**
@@ -549,12 +557,13 @@ fail_req_desc:
* For polling, MFI requires the cmd_status to be set to 0xFF before posting.
*/
int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd)
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds)
{
int i;
struct megasas_header *frame_hdr = &cmd->frame->hdr;
- u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
+ u32 msecs = seconds * 1000;
/*
* Wait for cmd_status to change
@@ -585,7 +594,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
struct megasas_cmd *cmd;
u8 ret;
struct fusion_context *fusion;
- union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
int i;
struct megasas_header *frame_hdr;
@@ -644,18 +653,18 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
- init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle);
+ init_frame->queue_info_new_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(ioc_init_handle));
+ init_frame->queue_info_new_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(ioc_init_handle));
init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
- req_desc =
- (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
-
- req_desc->Words = 0;
- req_desc->MFAIo.RequestFlags =
+ req_desc.Words = 0;
+ req_desc.MFAIo.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- cpu_to_le32s((u32 *)&req_desc->MFAIo);
- req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr);
+ cpu_to_le32s((u32 *)&req_desc.MFAIo);
+ req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr);
/*
* disable the intr before firing the init frame
@@ -669,10 +678,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
break;
}
- instance->instancet->fire_cmd(instance, req_desc->u.low,
- req_desc->u.high, instance->reg_set);
+ instance->instancet->fire_cmd(instance, req_desc.u.low,
+ req_desc.u.high, instance->reg_set);
- wait_and_poll(instance, cmd);
+ wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
frame_hdr = &cmd->frame->hdr;
if (frame_hdr->cmd_status != 0) {
@@ -723,7 +732,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
if (!fusion) {
megasas_return_cmd(instance, cmd);
- return 1;
+ return -ENXIO;
}
dcmd = &cmd->frame->dcmd;
@@ -1604,13 +1613,15 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
- io_request->IoFlags |=
- MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ io_request->IoFlags |= cpu_to_le16(
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
cmd->request_desc->SCSIIO.DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
/*
* If the command is for the tape device, set the
* FP timeout to the os layer timeout value.
@@ -1770,7 +1781,8 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
if (index >= instance->max_fw_cmds) {
printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
- "descriptor\n", index);
+ "descriptor for scsi%d\n", index,
+ instance->host->host_no);
return NULL;
}
fusion = instance->ctrl_context;
@@ -2038,8 +2050,11 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
/* If we didn't complete any commands, check for FW fault */
fw_state = instance->instancet->read_fw_status_reg(
instance->reg_set) & MFI_STATE_MASK;
- if (fw_state == MFI_STATE_FAULT)
+ if (fw_state == MFI_STATE_FAULT) {
+ printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
+ "for scsi%d\n", instance->host->host_no);
schedule_work(&instance->work_init);
+ }
}
return IRQ_HANDLED;
@@ -2210,9 +2225,10 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
}
/* This function waits for outstanding commands on fusion to complete */
-int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
+int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+ int iotimeout, int *convert)
{
- int i, outstanding, retval = 0;
+ int i, outstanding, retval = 0, hb_seconds_missed = 0;
u32 fw_state;
for (i = 0; i < resetwaittime; i++) {
@@ -2221,18 +2237,49 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
instance->reg_set) & MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT) {
printk(KERN_WARNING "megasas: Found FW in FAULT state,"
- " will reset adapter.\n");
+ " will reset adapter scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ /* If SR-IOV VF mode & heartbeat timeout, don't wait */
+ if (instance->requestorId && !iotimeout) {
retval = 1;
goto out;
}
+ /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
+ if (instance->requestorId && iotimeout) {
+ if (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ hb_seconds_missed = 0;
+ } else {
+ hb_seconds_missed++;
+ if (hb_seconds_missed ==
+ (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
+ printk(KERN_WARNING "megasas: SR-IOV:"
+ " Heartbeat never completed "
+ " while polling during I/O "
+ " timeout handling for "
+ "scsi%d.\n",
+ instance->host->host_no);
+ *convert = 1;
+ retval = 1;
+ goto out;
+ }
+ }
+ }
+
outstanding = atomic_read(&instance->fw_outstanding);
if (!outstanding)
goto out;
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
- "commands to complete\n", i, outstanding);
+ "commands to complete for scsi%d\n", i,
+ outstanding, instance->host->host_no);
megasas_complete_cmd_dpc_fusion(
(unsigned long)instance);
}
@@ -2241,7 +2288,8 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
if (atomic_read(&instance->fw_outstanding)) {
printk("megaraid_sas: pending commands remain after waiting, "
- "will reset adapter.\n");
+ "will reset adapter scsi%d.\n",
+ instance->host->host_no);
retval = 1;
}
out:
@@ -2263,10 +2311,34 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
reply_desc->Words = ULLONG_MAX;
}
+/* Check for a second path that is currently UP */
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ int i, j, retval = (DID_RESET << 16);
+
+ if (instance->mpio && instance->requestorId) {
+ for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
+ for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
+ if (megasas_mgmt_info.instance[i] &&
+ (megasas_mgmt_info.instance[i] != instance) &&
+ megasas_mgmt_info.instance[i]->mpio &&
+ megasas_mgmt_info.instance[i]->requestorId
+ &&
+ (megasas_mgmt_info.instance[i]->ld_ids[j]
+ == scmd->device->id)) {
+ retval = (DID_NO_CONNECT << 16);
+ goto out;
+ }
+ }
+out:
+ return retval;
+}
+
/* Core fusion reset function */
-int megasas_reset_fusion(struct Scsi_Host *shost)
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
{
- int retval = SUCCESS, i, j, retry = 0;
+ int retval = SUCCESS, i, j, retry = 0, convert = 0;
struct megasas_instance *instance;
struct megasas_cmd_fusion *cmd_fusion;
struct fusion_context *fusion;
@@ -2277,28 +2349,39 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
+ mutex_lock(&instance->reset_mutex);
+
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
- "returning FAILED.\n");
+ "returning FAILED for scsi%d.\n",
+ instance->host->host_no);
return FAILED;
}
- mutex_lock(&instance->reset_mutex);
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
instance->instancet->disable_intr(instance);
msleep(1000);
/* First try waiting for commands to complete */
- if (megasas_wait_for_outstanding_fusion(instance)) {
+ if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
+ &convert)) {
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
printk(KERN_WARNING "megaraid_sas: resetting fusion "
- "adapter.\n");
+ "adapter scsi%d.\n", instance->host->host_no);
+ if (convert)
+ iotimeout = 0;
+
/* Now return commands back to the OS */
for (i = 0 ; i < instance->max_fw_cmds; i++) {
cmd_fusion = fusion->cmd_list[i];
if (cmd_fusion->scmd) {
scsi_dma_unmap(cmd_fusion->scmd);
- cmd_fusion->scmd->result = (DID_RESET << 16);
+ cmd_fusion->scmd->result =
+ megasas_check_mpio_paths(instance,
+ cmd_fusion->scmd);
cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
megasas_return_cmd_fusion(instance, cmd_fusion);
atomic_dec(&instance->fw_outstanding);
@@ -2313,13 +2396,67 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
(abs_state == MFI_STATE_FAULT && !reset_adapter)) {
/* Reset not supported, kill adapter */
printk(KERN_WARNING "megaraid_sas: Reset not supported"
- ", killing adapter.\n");
+ ", killing adapter scsi%d.\n",
+ instance->host->host_no);
megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
retval = FAILED;
goto out;
}
+ /* Let SR-IOV VF & PF sync up if there was a HB failure */
+ if (instance->requestorId && !iotimeout) {
+ msleep(MEGASAS_OCR_SETTLE_TIME_VF);
+ /* Look for a late HB update after VF settle time */
+ if (abs_state == MFI_STATE_OPERATIONAL &&
+ (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter)) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ printk(KERN_WARNING "megasas: SR-IOV:"
+ "Late FW heartbeat update for "
+ "scsi%d.\n",
+ instance->host->host_no);
+ } else {
+ /* In VF mode, first poll for FW ready */
+ for (i = 0;
+ i < (MEGASAS_RESET_WAIT_TIME * 1000);
+ i += 20) {
+ status_reg =
+ instance->instancet->
+ read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg &
+ MFI_STATE_MASK;
+ if (abs_state == MFI_STATE_READY) {
+ printk(KERN_WARNING "megasas"
+ ": SR-IOV: FW was found"
+ "to be in ready state "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ break;
+ }
+ msleep(20);
+ }
+ if (abs_state != MFI_STATE_READY) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "FW not in ready state after %d"
+ " seconds for scsi%d, status_reg = "
+ "0x%x.\n",
+ MEGASAS_RESET_WAIT_TIME,
+ instance->host->host_no,
+ status_reg);
+ megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ instance->adprecovery =
+ MEGASAS_HW_CRITICAL_ERROR;
+ retval = FAILED;
+ goto out;
+ }
+ }
+ }
+
/* Now try to reset the chip */
for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
writel(MPI2_WRSEQ_FLUSH_KEY_VALUE,
@@ -2346,7 +2483,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
readl(&instance->reg_set->fusion_host_diag);
if (retry++ == 100) {
printk(KERN_WARNING "megaraid_sas: "
- "Host diag unlock failed!\n");
+ "Host diag unlock failed! "
+ "for scsi%d\n",
+ instance->host->host_no);
break;
}
}
@@ -2368,7 +2507,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
if (retry++ == 1000) {
printk(KERN_WARNING "megaraid_sas: "
"Diag reset adapter never "
- "cleared!\n");
+ "cleared for scsi%d!\n",
+ instance->host->host_no);
break;
}
}
@@ -2390,29 +2530,29 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
if (abs_state <= MFI_STATE_FW_INIT) {
printk(KERN_WARNING "megaraid_sas: firmware "
"state < MFI_STATE_FW_INIT, state = "
- "0x%x\n", abs_state);
+ "0x%x for scsi%d\n", abs_state,
+ instance->host->host_no);
continue;
}
/* Wait for FW to become ready */
if (megasas_transition_to_ready(instance, 1)) {
printk(KERN_WARNING "megaraid_sas: Failed to "
- "transition controller to ready.\n");
+ "transition controller to ready "
+ "for scsi%d.\n",
+ instance->host->host_no);
continue;
}
megasas_reset_reply_desc(instance);
if (megasas_ioc_init_fusion(instance)) {
printk(KERN_WARNING "megaraid_sas: "
- "megasas_ioc_init_fusion() failed!\n");
+ "megasas_ioc_init_fusion() failed!"
+ " for scsi%d\n",
+ instance->host->host_no);
continue;
}
- clear_bit(MEGASAS_FUSION_IN_RESET,
- &instance->reset_flags);
- instance->instancet->enable_intr(instance);
- instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
-
/* Re-fire management commands */
for (j = 0 ; j < instance->max_fw_cmds; j++) {
cmd_fusion = fusion->cmd_list[j];
@@ -2422,7 +2562,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
instance->
cmd_list[cmd_fusion->sync_cmd_idx];
if (cmd_mfi->frame->dcmd.opcode ==
- MR_DCMD_LD_MAP_GET_INFO) {
+ cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
megasas_return_cmd(instance,
cmd_mfi);
megasas_return_cmd_fusion(
@@ -2433,11 +2573,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
instance,
cmd_mfi->context.smid
-1);
- if (!req_desc)
+ if (!req_desc) {
printk(KERN_WARNING
"req_desc NULL"
- "\n");
- else {
+ " for scsi%d\n",
+ instance->host->host_no);
+ /* Return leaked MPT
+ frame */
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ } else {
instance->instancet->
fire_cmd(instance,
req_desc->
@@ -2451,6 +2595,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
}
}
+ clear_bit(MEGASAS_FUSION_IN_RESET,
+ &instance->reset_flags);
+ instance->instancet->enable_intr(instance);
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+
/* Reset load balance info */
memset(fusion->load_balance_info, 0,
sizeof(struct LD_LOAD_BALANCE_INFO)
@@ -2459,18 +2608,39 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
if (!megasas_get_map_info(instance))
megasas_sync_map_info(instance);
+ /* Restart SR-IOV heartbeat */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 0))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
/* Adapter reset completed successfully */
printk(KERN_WARNING "megaraid_sas: Reset "
- "successful.\n");
+ "successful for scsi%d.\n",
+ instance->host->host_no);
retval = SUCCESS;
goto out;
}
/* Reset failed, kill the adapter */
printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
- "adapter.\n");
+ "adapter scsi%d.\n", instance->host->host_no);
megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
retval = FAILED;
} else {
+ /* For VF: Restart HB timer if we didn't OCR */
+ if (instance->requestorId) {
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ }
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
instance->instancet->enable_intr(instance);
instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
@@ -2487,7 +2657,7 @@ void megasas_fusion_ocr_wq(struct work_struct *work)
struct megasas_instance *instance =
container_of(work, struct megasas_instance, work_init);
- megasas_reset_fusion(instance->host);
+ megasas_reset_fusion(instance->host, 0);
}
struct megasas_instance_template megasas_instance_template_fusion = {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 35a51397b364..e76af5459a09 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -485,6 +485,9 @@ struct MPI2_IOC_INIT_REQUEST {
#define MAX_PHYSICAL_DEVICES 256
#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
+#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
struct MR_DEV_HANDLE_INFO {
u16 curDevHdl;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 9d26637308be..410f4a3e8888 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
- int rc, i;
+ int rc;
u16 smid;
u32 ioc_state;
unsigned long timeleft;
@@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
void *pci_addr_out = NULL;
u16 wait_state_count;
struct request *rsp = req->next_rq;
- struct bio_vec *bvec = NULL;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
if (!rsp) {
printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
@@ -1942,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
ioc->transport_cmds.status = MPT2_CMD_PENDING;
/* Check if the request is split across multiple segments */
- if (bio_segments(req->bio) > 1) {
+ if (bio_multiple_segments(req->bio)) {
u32 offset = 0;
/* Allocate memory and copy the request */
@@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
goto out;
}
- bio_for_each_segment(bvec, req->bio, i) {
+ bio_for_each_segment(bvec, req->bio, iter) {
memcpy(pci_addr_out + offset,
- page_address(bvec->bv_page) + bvec->bv_offset,
- bvec->bv_len);
- offset += bvec->bv_len;
+ page_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1974,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* Check if the response needs to be populated across
* multiple segments */
- if (bio_segments(rsp->bio) > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
&pci_dma_in);
if (!pci_addr_in) {
@@ -2041,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- if (bio_segments(req->bio) > 1) {
+ if (bio_multiple_segments(req->bio)) {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(req) - 4), pci_dma_out);
} else {
@@ -2057,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
MPI2_SGE_FLAGS_END_OF_LIST);
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- if (bio_segments(rsp->bio) > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
ioc->base_add_sg_single(psge, sgl_flags |
(blk_rq_bytes(rsp) + 4), pci_dma_in);
} else {
@@ -2102,23 +2103,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
le16_to_cpu(mpi_reply->ResponseDataLength);
/* check if the resp needs to be copied from the allocated
* pci mem */
- if (bio_segments(rsp->bio) > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+ if (bytes_to_copy <= bvec.bv_len) {
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
offset, bytes_to_copy);
break;
} else {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
- offset, bvec->bv_len);
- bytes_to_copy -= bvec->bv_len;
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
+ offset, bvec.bv_len);
+ bytes_to_copy -= bvec.bv_len;
}
- offset += bvec->bv_len;
+ offset += bvec.bv_len;
}
}
} else {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index e771a88c6a74..65170cb1a00f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
Mpi2SmpPassthroughRequest_t *mpi_request;
Mpi2SmpPassthroughReply_t *mpi_reply;
- int rc, i;
+ int rc;
u16 smid;
u32 ioc_state;
unsigned long timeleft;
@@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
void *pci_addr_out = NULL;
u16 wait_state_count;
struct request *rsp = req->next_rq;
- struct bio_vec *bvec = NULL;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
if (!rsp) {
pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
@@ -1925,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
ioc->transport_cmds.status = MPT3_CMD_PENDING;
/* Check if the request is split across multiple segments */
- if (req->bio->bi_vcnt > 1) {
+ if (bio_multiple_segments(req->bio)) {
u32 offset = 0;
/* Allocate memory and copy the request */
@@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
goto out;
}
- bio_for_each_segment(bvec, req->bio, i) {
+ bio_for_each_segment(bvec, req->bio, iter) {
memcpy(pci_addr_out + offset,
- page_address(bvec->bv_page) + bvec->bv_offset,
- bvec->bv_len);
- offset += bvec->bv_len;
+ page_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1957,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* Check if the response needs to be populated across
* multiple segments */
- if (rsp->bio->bi_vcnt > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
&pci_dma_in);
if (!pci_addr_in) {
@@ -2018,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
psge = &mpi_request->SGL;
- if (req->bio->bi_vcnt > 1)
+ if (bio_multiple_segments(req->bio))
ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
pci_dma_in, (blk_rq_bytes(rsp) + 4));
else
@@ -2063,23 +2064,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* check if the resp needs to be copied from the allocated
* pci mem */
- if (rsp->bio->bi_vcnt > 1) {
+ if (bio_multiple_segments(rsp->bio)) {
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+ if (bytes_to_copy <= bvec.bv_len) {
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
offset, bytes_to_copy);
break;
} else {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
- offset, bvec->bv_len);
- bytes_to_copy -= bvec->bv_len;
+ memcpy(page_address(bvec.bv_page) +
+ bvec.bv_offset, pci_addr_in +
+ offset, bvec.bv_len);
+ bytes_to_copy -= bvec.bv_len;
}
- offset += bvec->bv_len;
+ offset += bvec.bv_len;
}
}
} else {
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index aa66361ed44b..bac04c2335aa 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or,
bio->bi_rw &= ~REQ_WRITE;
or->in.bio = bio;
- or->in.total_bytes = bio->bi_size;
+ or->in.total_bytes = bio->bi_iter.bi_size;
return 0;
}
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 62f1a6031765..0d78a4d5576c 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -453,7 +453,7 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
if (instance->irq != SCSI_IRQ_NONE)
- if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED,
+ if (request_irq(instance->irq, pas16_intr, 0,
"pas16", instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index a04b4ff8c7f6..28b4e8139153 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -323,24 +323,17 @@ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
int offset;
char *str = buf;
int start = 0;
-#define IB_MEMMAP(c) \
- (*(u32 *)((u8 *)pm8001_ha-> \
- memoryMap.region[IB].virt_ptr + \
+#define IB_MEMMAP(c) \
+ (*(u32 *)((u8 *)pm8001_ha-> \
+ memoryMap.region[IB].virt_ptr + \
pm8001_ha->evtlog_ib_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
- if (pm8001_ha->chip_id != chip_8001)
- str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
- else
- str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+ str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
start = start + 4;
}
pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
- if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
- && (pm8001_ha->chip_id != chip_8001))
- pm8001_ha->evtlog_ib_offset = 0;
- if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
- && (pm8001_ha->chip_id == chip_8001))
+ if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
pm8001_ha->evtlog_ib_offset = 0;
return str - buf;
@@ -363,24 +356,17 @@ static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
int offset;
char *str = buf;
int start = 0;
-#define OB_MEMMAP(c) \
- (*(u32 *)((u8 *)pm8001_ha-> \
- memoryMap.region[OB].virt_ptr + \
+#define OB_MEMMAP(c) \
+ (*(u32 *)((u8 *)pm8001_ha-> \
+ memoryMap.region[OB].virt_ptr + \
pm8001_ha->evtlog_ob_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
- if (pm8001_ha->chip_id != chip_8001)
- str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
- else
- str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+ str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
start = start + 4;
}
pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
- if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
- && (pm8001_ha->chip_id != chip_8001))
- pm8001_ha->evtlog_ob_offset = 0;
- if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
- && (pm8001_ha->chip_id == chip_8001))
+ if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
pm8001_ha->evtlog_ob_offset = 0;
return str - buf;
@@ -466,7 +452,7 @@ static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
- u32 count;
+ ssize_t count;
count = pm80xx_get_fatal_dump(cdev, attr, buf);
return count;
@@ -484,7 +470,7 @@ static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
- u32 count;
+ ssize_t count;
count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
return count;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 0a1296a87d66..a97be015e52e 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -644,7 +644,7 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
/* 8081 controllers need BAR shift to access MPI space
* as this is shared with BIOS data */
- if (deviceid == 0x8081) {
+ if (deviceid == 0x8081 || deviceid == 0x0042) {
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
@@ -673,7 +673,7 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
update_outbnd_queue_table(pm8001_ha, i);
/* 8081 controller donot require these operations */
- if (deviceid != 0x8081) {
+ if (deviceid != 0x8081 && deviceid != 0x0042) {
mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
/* 7->130ms, 34->500ms, 119->1.5s */
mpi_set_open_retry_interval_reg(pm8001_ha, 119);
@@ -701,7 +701,7 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
u32 gst_len_mpistate;
u16 deviceid;
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
- if (deviceid == 0x8081) {
+ if (deviceid == 0x8081 || deviceid == 0x0042) {
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
@@ -2502,11 +2502,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*in order to force CPU ordering*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2522,11 +2518,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2550,11 +2542,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2617,11 +2605,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2641,11 +2625,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2674,20 +2654,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else if (t->uldd_task) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
- } else if (!t->uldd_task) {
+ } else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
}
}
@@ -2796,11 +2765,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2909,20 +2874,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else if (t->uldd_task) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
- } else if (!t->uldd_task) {
+ } else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
}
}
@@ -4467,23 +4421,11 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
" stat 0x%x but aborted by upper layer "
"\n", task, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
- } else if (task->uldd_task) {
- spin_unlock_irqrestore(&task->task_state_lock,
- flags);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- task->task_done(task);
- spin_lock_irq(&pm8001_ha->lock);
- return 0;
- } else if (!task->uldd_task) {
+ } else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- task->task_done(task);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, task,
+ ccb, tag);
return 0;
}
}
@@ -5020,7 +4962,7 @@ pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
/* check max is 1 Mbytes */
if ((length > 0x100000) || (gsm_dump_offset & 3) ||
((gsm_dump_offset + length) > 0x1000000))
- return 1;
+ return -EINVAL;
if (pm8001_ha->chip_id == chip_8001)
bar = 2;
@@ -5048,12 +4990,12 @@ pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
gsm_base = GSM_BASE;
if (-1 == pm8001_bar4_shift(pm8001_ha,
(gsm_base + shift_value)))
- return 1;
+ return -EIO;
} else {
gsm_base = 0;
if (-1 == pm80xx_bar4_shift(pm8001_ha,
(gsm_base + shift_value)))
- return 1;
+ return -EIO;
}
gsm_dump_offset = (gsm_dump_offset + offset) &
0xFFFF0000;
@@ -5072,13 +5014,8 @@ pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
direct_data += sprintf(direct_data, "%08x ", value);
}
/* Shift back to BAR4 original address */
- if (pm8001_ha->chip_id == chip_8001) {
- if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
- return 1;
- } else {
- if (-1 == pm80xx_bar4_shift(pm8001_ha, 0))
- return 1;
- }
+ if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
+ return -EIO;
pm8001_ha->fatal_forensic_shift_offset += 1024;
if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 73a120d81b4d..c4f31b21feb8 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -625,7 +625,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->nvmd_completion = &completion;
if (pm8001_ha->chip_id == chip_8001) {
- if (deviceid == 0x8081) {
+ if (deviceid == 0x8081 || deviceid == 0x0042) {
payload.minor_function = 4;
payload.length = 4096;
} else {
@@ -646,6 +646,9 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
if (deviceid == 0x8081)
pm8001_ha->sas_addr[j] =
payload.func_specific[0x704 + i];
+ else if (deviceid == 0x0042)
+ pm8001_ha->sas_addr[j] =
+ payload.func_specific[0x010 + i];
} else
pm8001_ha->sas_addr[j] =
payload.func_specific[0x804 + i];
@@ -713,11 +716,9 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
number_of_intr = 1;
- flag |= IRQF_DISABLED;
} else {
number_of_intr = PM8001_MAX_MSIX_VEC;
flag &= ~IRQF_SHARED;
- flag |= IRQF_DISABLED;
}
max_entry = sizeof(pm8001_ha->msix_entries) /
@@ -1072,10 +1073,7 @@ err_out_enable:
*/
static struct pci_device_id pm8001_pci_table[] = {
{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
- {
- PCI_DEVICE(0x117c, 0x0042),
- .driver_data = chip_8001
- },
+ { PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
/* Support for SPC/SPCv/SPCve controllers */
{ PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
{ PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index f50ac44b950e..8a44bc92bc78 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -434,6 +434,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
ccb->n_elem = n_elem;
ccb->ccb_tag = tag;
ccb->task = t;
+ ccb->device = pm8001_dev;
switch (t->task_proto) {
case SAS_PROTOCOL_SMP:
rc = pm8001_task_prep_smp(pm8001_ha, ccb);
@@ -865,13 +866,11 @@ ex_err:
static void pm8001_dev_gone_notify(struct domain_device *dev)
{
unsigned long flags = 0;
- u32 tag;
struct pm8001_hba_info *pm8001_ha;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
spin_lock_irqsave(&pm8001_ha->lock, flags);
- pm8001_tag_alloc(pm8001_ha, &tag);
if (pm8001_dev) {
u32 device_id = pm8001_dev->device_id;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 6c5fd5ee22d3..1ee06f21803b 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -708,5 +708,17 @@ ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
/* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[];
+static inline void
+pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
+ struct sas_task *task, struct pm8001_ccb_info *ccb,
+ u32 ccb_idx)
+{
+ pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
+ smp_mb(); /*in order to force CPU ordering*/
+ spin_unlock(&pm8001_ha->lock);
+ task->task_done(task);
+ spin_lock(&pm8001_ha->lock);
+}
+
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index c950dc5c9943..d70587f96184 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -91,7 +91,6 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
- u32 status = 1;
u32 accum_len , reg_val, index, *temp;
unsigned long start;
u8 *direct_data;
@@ -111,13 +110,10 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
direct_data = (u8 *)fatal_error_data;
pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
- pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
- }
- if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
/* start to get data */
/* Program the MEMBASE II Shifting Register with 0x00.*/
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
@@ -126,6 +122,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
pm8001_ha->forensic_fatal_step = 0;
pm8001_ha->fatal_bar_loc = 0;
}
+
/* Read until accum_len is retrived */
accum_len = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
@@ -135,7 +132,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("Possible PCI issue 0x%x not expected\n",
accum_len));
- return status;
+ return -EIO;
}
if (accum_len == 0 || accum_len >= 0x100000) {
pm8001_ha->forensic_info.data_buf.direct_data +=
@@ -178,7 +175,6 @@ moreData:
pm8001_ha->forensic_fatal_step = 1;
pm8001_ha->fatal_forensic_shift_offset = 0;
pm8001_ha->forensic_last_offset = 0;
- status = 0;
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -194,7 +190,6 @@ moreData:
forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
- status = 0;
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -214,7 +209,6 @@ moreData:
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
pm8001_ha->fatal_bar_loc = 0;
- status = 0;
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
@@ -239,7 +233,7 @@ moreData:
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
" = 0x%x\n", reg_val));
- return -1;
+ return -EIO;
}
/* Read the next 64K of the debug data. */
@@ -259,7 +253,6 @@ moreData:
pm8001_ha->forensic_info.data_buf.direct_len = 0;
pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
- status = 0;
}
}
@@ -2175,11 +2168,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*in order to force CPU ordering*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2195,11 +2184,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2221,11 +2206,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2288,11 +2269,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2312,11 +2289,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2345,20 +2318,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else if (t->uldd_task) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
- } else if (!t->uldd_task) {
+ } else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
}
}
@@ -2470,11 +2432,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2596,20 +2554,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else if (t->uldd_task) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
- } else if (!t->uldd_task) {
+ } else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- t->task_done(t);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
}
}
@@ -4304,23 +4251,11 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
"\n", task, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
return 0;
- } else if (task->uldd_task) {
- spin_unlock_irqrestore(&task->task_state_lock,
- flags);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
- mb();/* ditto */
- spin_unlock_irq(&pm8001_ha->lock);
- task->task_done(task);
- spin_lock_irq(&pm8001_ha->lock);
- return 0;
- } else if (!task->uldd_task) {
+ } else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
- mb();/*ditto*/
- spin_unlock_irq(&pm8001_ha->lock);
- task->task_done(task);
- spin_lock_irq(&pm8001_ha->lock);
+ pm8001_ccb_task_free_done(pm8001_ha, task,
+ ccb, tag);
return 0;
}
}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 97dabd39b092..158020522dfb 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -379,14 +379,7 @@
#define DEBUG_PRINT_NVRAM 0
#define DEBUG_QLA1280 0
-/*
- * The SGI VISWS is broken and doesn't support MMIO ;-(
- */
-#ifdef CONFIG_X86_VISWS
-#define MEMORY_MAPPED_IO 0
-#else
#define MEMORY_MAPPED_IO 1
-#endif
#include "qla1280.h"
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index ff0fc7c7812f..44def6bb4bb0 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o qla_mr.o qla_nx2.o qla_target.o
+ qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 570c7fcc0c4d..07befcf365b8 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -147,6 +147,92 @@ static struct bin_attribute sysfs_fw_dump_attr = {
};
static ssize_t
+qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->fw_dump_template || !ha->fw_dump_template_len)
+ return 0;
+
+ ql_dbg(ql_dbg_user, vha, 0x70e2,
+ "chunk <- off=%llx count=%zx\n", off, count);
+ return memory_read_from_buffer(buf, count, &off,
+ ha->fw_dump_template, ha->fw_dump_template_len);
+}
+
+static ssize_t
+qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t size;
+
+ if (off == 0) {
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+
+ ha->fw_dump = NULL;
+ ha->fw_dump_len = 0;
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ size = qla27xx_fwdt_template_size(buf);
+ ql_dbg(ql_dbg_user, vha, 0x70d1,
+ "-> allocating fwdt (%x bytes)...\n", size);
+ ha->fw_dump_template = vmalloc(size);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x70d2,
+ "Failed allocate fwdt (%x bytes).\n", size);
+ return -ENOMEM;
+ }
+ ha->fw_dump_template_len = size;
+ }
+
+ if (off + count > ha->fw_dump_template_len) {
+ count = ha->fw_dump_template_len - off;
+ ql_dbg(ql_dbg_user, vha, 0x70d3,
+ "chunk -> truncating to %zx bytes.\n", count);
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x70d4,
+ "chunk -> off=%llx count=%zx\n", off, count);
+ memcpy(ha->fw_dump_template + off, buf, count);
+
+ if (off + count == ha->fw_dump_template_len) {
+ size = qla27xx_fwdt_calculate_dump_size(vha);
+ ql_dbg(ql_dbg_user, vha, 0x70d5,
+ "-> allocating fwdump (%x bytes)...\n", size);
+ ha->fw_dump = vmalloc(size);
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0x70d6,
+ "Failed allocate fwdump (%x bytes).\n", size);
+ return -ENOMEM;
+ }
+ ha->fw_dump_len = size;
+ }
+
+ return count;
+}
+static struct bin_attribute sysfs_fw_dump_template_attr = {
+ .attr = {
+ .name = "fw_dump_template",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = qla2x00_sysfs_read_fw_dump_template,
+ .write = qla2x00_sysfs_write_fw_dump_template,
+};
+
+static ssize_t
qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -241,12 +327,17 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ ssize_t rval = 0;
if (ha->optrom_state != QLA_SREADING)
return 0;
- return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
- ha->optrom_region_size);
+ mutex_lock(&ha->optrom_mutex);
+ rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
+ ha->optrom_region_size);
+ mutex_unlock(&ha->optrom_mutex);
+
+ return rval;
}
static ssize_t
@@ -265,7 +356,9 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
if (off + count > ha->optrom_region_size)
count = ha->optrom_region_size - off;
+ mutex_lock(&ha->optrom_mutex);
memcpy(&ha->optrom_buffer[off], buf, count);
+ mutex_unlock(&ha->optrom_mutex);
return count;
}
@@ -288,10 +381,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
-
uint32_t start = 0;
uint32_t size = ha->optrom_size;
int val, valid;
+ ssize_t rval = count;
if (off)
return -EINVAL;
@@ -304,12 +397,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
if (start > ha->optrom_size)
return -EINVAL;
+ mutex_lock(&ha->optrom_mutex);
switch (val) {
case 0:
if (ha->optrom_state != QLA_SREADING &&
- ha->optrom_state != QLA_SWRITING)
- return -EINVAL;
-
+ ha->optrom_state != QLA_SWRITING) {
+ rval = -EINVAL;
+ goto out;
+ }
ha->optrom_state = QLA_SWAITING;
ql_dbg(ql_dbg_user, vha, 0x7061,
@@ -320,8 +415,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_buffer = NULL;
break;
case 1:
- if (ha->optrom_state != QLA_SWAITING)
- return -EINVAL;
+ if (ha->optrom_state != QLA_SWAITING) {
+ rval = -EINVAL;
+ goto out;
+ }
ha->optrom_region_start = start;
ha->optrom_region_size = start + size > ha->optrom_size ?
@@ -335,13 +432,15 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
"(%x).\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto out;
}
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7063,
"HBA not online, failing NVRAM update.\n");
- return -EAGAIN;
+ rval = -EAGAIN;
+ goto out;
}
ql_dbg(ql_dbg_user, vha, 0x7064,
@@ -353,8 +452,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_region_start, ha->optrom_region_size);
break;
case 2:
- if (ha->optrom_state != QLA_SWAITING)
- return -EINVAL;
+ if (ha->optrom_state != QLA_SWAITING) {
+ rval = -EINVAL;
+ goto out;
+ }
/*
* We need to be more restrictive on which FLASH regions are
@@ -388,7 +489,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
if (!valid) {
ql_log(ql_log_warn, vha, 0x7065,
"Invalid start region 0x%x/0x%x.\n", start, size);
- return -EINVAL;
+ rval = -EINVAL;
+ goto out;
}
ha->optrom_region_start = start;
@@ -403,7 +505,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
"(%x)\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto out;
}
ql_dbg(ql_dbg_user, vha, 0x7067,
@@ -413,13 +516,16 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
break;
case 3:
- if (ha->optrom_state != QLA_SWRITING)
- return -EINVAL;
+ if (ha->optrom_state != QLA_SWRITING) {
+ rval = -EINVAL;
+ goto out;
+ }
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7068,
"HBA not online, failing flash update.\n");
- return -EAGAIN;
+ rval = -EAGAIN;
+ goto out;
}
ql_dbg(ql_dbg_user, vha, 0x7069,
@@ -430,9 +536,12 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_region_start, ha->optrom_region_size);
break;
default:
- return -EINVAL;
+ rval = -EINVAL;
}
- return count;
+
+out:
+ mutex_unlock(&ha->optrom_mutex);
+ return rval;
}
static struct bin_attribute sysfs_optrom_ctl_attr = {
@@ -822,6 +931,7 @@ static struct sysfs_entry {
int is4GBp_only;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr, },
+ { "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 },
{ "nvram", &sysfs_nvram_attr, },
{ "optrom", &sysfs_optrom_attr, },
{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
@@ -847,6 +957,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
+ if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+ continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
iter->attr);
@@ -1187,7 +1299,7 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
return scnprintf(buf, PAGE_SIZE, "\n");
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1391,6 +1503,37 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%d\n", size);
}
+static ssize_t
+qla2x00_allow_cna_fw_dump_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_P3P_TYPE(vha->hw))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+ else
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ vha->hw->allow_cna_fw_dump ? "true" : "false");
+}
+
+static ssize_t
+qla2x00_allow_cna_fw_dump_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (!IS_P3P_TYPE(vha->hw))
+ return -EINVAL;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ vha->hw->allow_cna_fw_dump = val != 0;
+
+ return strlen(buf);
+}
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1432,6 +1575,9 @@ static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
+static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
+ qla2x00_allow_cna_fw_dump_show,
+ qla2x00_allow_cna_fw_dump_store);
struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version,
@@ -1464,6 +1610,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_diag_requests,
&dev_attr_diag_megabytes,
&dev_attr_fw_dump_size,
+ &dev_attr_allow_cna_fw_dump,
NULL,
};
@@ -1509,6 +1656,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
case PORT_SPEED_16GB:
speed = FC_PORTSPEED_16GBIT;
break;
+ case PORT_SPEED_32GB:
+ speed = FC_PORTSPEED_32GBIT;
+ break;
}
fc_host_speed(shost) = speed;
}
@@ -1990,6 +2140,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
vha->flags.delete_progress = 1;
+ qlt_remove_target(ha, vha);
+
fc_remove_host(vha->host);
scsi_remove_host(vha->host);
@@ -2158,6 +2310,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
else if (IS_QLAFX00(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+ else if (IS_QLA27XX(ha))
+ speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
+ FC_PORTSPEED_8GBIT;
else
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(vha->host) = speed;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index f15d03e6b7ee..71ff340f6de4 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1437,9 +1437,12 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
if (ha->flags.nic_core_reset_hdlr_active)
return -EBUSY;
+ mutex_lock(&ha->optrom_mutex);
rval = qla2x00_optrom_setup(bsg_job, vha, 0);
- if (rval)
+ if (rval) {
+ mutex_unlock(&ha->optrom_mutex);
return rval;
+ }
ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
@@ -1453,6 +1456,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
+ mutex_unlock(&ha->optrom_mutex);
bsg_job->job_done(bsg_job);
return rval;
}
@@ -1465,9 +1469,12 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
struct qla_hw_data *ha = vha->hw;
int rval = 0;
+ mutex_lock(&ha->optrom_mutex);
rval = qla2x00_optrom_setup(bsg_job, vha, 1);
- if (rval)
+ if (rval) {
+ mutex_unlock(&ha->optrom_mutex);
return rval;
+ }
/* Set the isp82xx_no_md_cap not to capture minidump */
ha->flags.isp82xx_no_md_cap = 1;
@@ -1483,6 +1490,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
+ mutex_unlock(&ha->optrom_mutex);
bsg_job->job_done(bsg_job);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f6103f553bb1..97255f7c3975 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,13 +11,15 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x015b | 0x4b,0xba,0xfa |
- * | | | 0x0x015a |
- * | Mailbox commands | 0x1187 | 0x111a-0x111b |
- * | | | 0x1155-0x1158 |
- * | | | 0x1018-0x1019 |
+ * | Module Init and Probe | 0x017d | 0x004b,0x0141 |
+ * | | | 0x0144,0x0146 |
+ * | | | 0x015b-0x0160 |
+ * | | | 0x016e-0x0170 |
+ * | Mailbox commands | 0x1187 | 0x1018-0x1019 |
+ * | | | 0x10ca |
* | | | 0x1115-0x1116 |
- * | | | 0x10ca |
+ * | | | 0x111a-0x111b |
+ * | | | 0x1155-0x1158 |
* | Device Discovery | 0x2095 | 0x2020-0x2022, |
* | | | 0x2011-0x2012, |
* | | | 0x2016 |
@@ -32,18 +34,17 @@
* | | | 0x5047,0x5052 |
* | | | 0x5084,0x5075 |
* | | | 0x503d,0x5044 |
+ * | | | 0x507b |
* | Timer Routines | 0x6012 | |
- * | User Space Interactions | 0x70e1 | 0x7018,0x702e, |
- * | | | 0x7020,0x7024, |
- * | | | 0x7039,0x7045, |
- * | | | 0x7073-0x7075, |
- * | | | 0x707b,0x708c, |
- * | | | 0x70a5,0x70a6, |
- * | | | 0x70a8,0x70ab, |
- * | | | 0x70ad-0x70ae, |
- * | | | 0x70d1-0x70db, |
- * | | | 0x7047,0x703b |
- * | | | 0x70de-0x70df, |
+ * | User Space Interactions | 0x70e2 | 0x7018,0x702e |
+ * | | | 0x7020,0x7024 |
+ * | | | 0x7039,0x7045 |
+ * | | | 0x7073-0x7075 |
+ * | | | 0x70a5-0x70a6 |
+ * | | | 0x70a8,0x70ab |
+ * | | | 0x70ad-0x70ae |
+ * | | | 0x70d7-0x70db |
+ * | | | 0x70de-0x70df |
* | Task Management | 0x803d | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
@@ -59,7 +60,11 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc00c | |
- * | Misc | 0xd010 | |
+ * | Misc | 0xd2ff | 0xd017-0xd019 |
+ * | | | 0xd020 |
+ * | | | 0xd02e-0xd0ff |
+ * | | | 0xd101-0xd1fe |
+ * | | | 0xd212-0xd2fe |
* | Target Mode | 0xe070 | 0xe021 |
* | Target Mode Management | 0xf072 | 0xf002-0xf003 |
* | | | 0xf046-0xf049 |
@@ -104,7 +109,87 @@ qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
return ptr + (rsp->length * sizeof(response_t));
}
-static int
+int
+qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
+ uint32_t ram_dwords, void **nxt)
+{
+ int rval;
+ uint32_t cnt, stat, timer, dwords, idx;
+ uint16_t mb0, mb1;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ dma_addr_t dump_dma = ha->gid_list_dma;
+ uint32_t *dump = (uint32_t *)ha->gid_list;
+
+ rval = QLA_SUCCESS;
+ mb0 = 0;
+
+ WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ dwords = qla2x00_gid_list_size(ha) / 4;
+ for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
+ cnt += dwords, addr += dwords) {
+ if (cnt + dwords > ram_dwords)
+ dwords = ram_dwords - cnt;
+
+ WRT_REG_WORD(&reg->mailbox1, LSW(addr));
+ WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+
+ WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
+ WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+
+ WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
+ WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+
+ WRT_REG_WORD(&reg->mailbox9, 0);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+ ha->flags.mbox_int = 0;
+ for (timer = 6000000; timer; timer--) {
+ /* Check for pending interrupts. */
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (stat & HSRX_RISC_INT) {
+ stat &= 0xff;
+
+ if (stat == 0x1 || stat == 0x2 ||
+ stat == 0x10 || stat == 0x11) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb0 = RD_REG_WORD(&reg->mailbox0);
+ mb1 = RD_REG_WORD(&reg->mailbox1);
+
+ WRT_REG_DWORD(&reg->hccr,
+ HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ break;
+ }
+
+ /* Clear this intr; it wasn't a mailbox intr */
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ }
+ udelay(5);
+ }
+ ha->flags.mbox_int = 1;
+
+ if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ rval = mb0 & MBS_MASK;
+ for (idx = 0; idx < dwords; idx++)
+ ram[cnt + idx] = IS_QLA27XX(ha) ?
+ le32_to_cpu(dump[idx]) : swab32(dump[idx]);
+ } else {
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
+ return rval;
+}
+
+int
qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
uint32_t ram_dwords, void **nxt)
{
@@ -139,6 +224,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ ha->flags.mbox_int = 0;
for (timer = 6000000; timer; timer--) {
/* Check for pending interrupts. */
stat = RD_REG_DWORD(&reg->host_status);
@@ -164,11 +250,13 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
}
udelay(5);
}
+ ha->flags.mbox_int = 1;
if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
rval = mb0 & MBS_MASK;
for (idx = 0; idx < dwords; idx++)
- ram[cnt + idx] = swab32(dump[idx]);
+ ram[cnt + idx] = IS_QLA27XX(ha) ?
+ le32_to_cpu(dump[idx]) : swab32(dump[idx]);
} else {
rval = QLA_FUNCTION_FAILED;
}
@@ -208,7 +296,7 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
return buf;
}
-static inline int
+int
qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
{
int rval = QLA_SUCCESS;
@@ -227,7 +315,7 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
return rval;
}
-static int
+int
qla24xx_soft_reset(struct qla_hw_data *ha)
{
int rval = QLA_SUCCESS;
@@ -537,7 +625,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
struct qla2xxx_mq_chain *mq = ptr;
device_reg_t __iomem *reg;
- if (!ha->mqenable || IS_QLA83XX(ha))
+ if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
return ptr;
mq = ptr;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 35e20b4f8b6c..cc961040f8b1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,3 +348,10 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
#define ql_dbg_tgt 0x00004000 /* Target mode */
#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
+
+extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+ uint32_t, void **);
+extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+ uint32_t, void **);
+extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *);
+extern int qla24xx_soft_reset(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 41d6491d7bd9..6a106136716c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -654,7 +654,7 @@ typedef union {
struct device_reg_25xxmq isp25mq;
struct device_reg_82xx isp82;
struct device_reg_fx00 ispfx00;
-} device_reg_t;
+} __iomem device_reg_t;
#define ISP_REQ_Q_IN(ha, reg) \
(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
@@ -808,7 +808,7 @@ struct mbx_cmd_32 {
Notification */
#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */
-
+#define MBA_FW_INIT_INPROGRESS 0x8500 /* Firmware boot in progress */
/* 83XX FCoE specific */
#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
@@ -938,6 +938,7 @@ struct mbx_cmd_32 {
*/
#define MBC_WRITE_SERDES 0x3 /* Write serdes word. */
#define MBC_READ_SERDES 0x4 /* Read serdes word. */
+#define MBC_LOAD_DUMP_MPI_RAM 0x5 /* Load/Dump MPI RAM. */
#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */
#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */
#define MBC_PORT_PARAMS 0x1A /* Port iDMA Parameters. */
@@ -1197,30 +1198,6 @@ typedef struct {
uint8_t reserved_3[26];
} init_cb_t;
-
-struct init_cb_fx {
- uint16_t version;
- uint16_t reserved_1[13];
- __le16 request_q_outpointer;
- __le16 response_q_inpointer;
- uint16_t reserved_2[2];
- __le16 response_q_length;
- __le16 request_q_length;
- uint16_t reserved_3[2];
- __le32 request_q_address[2];
- __le32 response_q_address[2];
- uint16_t reserved_4[4];
- uint8_t response_q_msivec;
- uint8_t reserved_5[19];
- uint16_t interrupt_delay_timer;
- uint16_t reserved_6;
- uint32_t fwoptions1;
- uint32_t fwoptions2;
- uint32_t fwoptions3;
- uint8_t reserved_7[24];
-};
-
-
/*
* Get Link Status mailbox command return buffer.
*/
@@ -2172,6 +2149,7 @@ struct ct_fdmi_hba_attributes {
#define FDMI_PORT_SPEED_4GB 0x8
#define FDMI_PORT_SPEED_8GB 0x10
#define FDMI_PORT_SPEED_16GB 0x20
+#define FDMI_PORT_SPEED_32GB 0x40
#define FDMI_PORT_SPEED_UNKNOWN 0x8000
struct ct_fdmi_port_attr {
@@ -2680,7 +2658,7 @@ struct bidi_statistics {
#define QLA_MQ_SIZE 32
#define QLA_MAX_QUEUES 256
#define ISP_QUE_REG(ha, id) \
- ((ha->mqenable || IS_QLA83XX(ha)) ? \
+ ((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
((void __iomem *)ha->iobase))
#define QLA_REQ_QUE_ID(tag) \
@@ -2750,6 +2728,13 @@ struct qlfc_fw {
uint32_t len;
};
+struct scsi_qlt_host {
+ void *target_lport_ptr;
+ struct mutex tgt_mutex;
+ struct mutex tgt_host_action_mutex;
+ struct qla_tgt *qla_tgt;
+};
+
struct qlt_hw_data {
/* Protected by hw lock */
uint32_t enable_class_2:1;
@@ -2765,15 +2750,11 @@ struct qlt_hw_data {
uint32_t __iomem *atio_q_in;
uint32_t __iomem *atio_q_out;
- void *target_lport_ptr;
struct qla_tgt_func_tmpl *tgt_ops;
- struct qla_tgt *qla_tgt;
struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
uint16_t current_handle;
struct qla_tgt_vp_map *tgt_vp_map;
- struct mutex tgt_mutex;
- struct mutex tgt_host_action_mutex;
int saved_set;
uint16_t saved_exchange_count;
@@ -2815,7 +2796,6 @@ struct qla_hw_data {
uint32_t fac_supported :1;
uint32_t chip_reset_done :1;
- uint32_t port0 :1;
uint32_t running_gold_fw :1;
uint32_t eeh_busy :1;
uint32_t cpu_affinity_enabled :1;
@@ -2846,7 +2826,7 @@ struct qla_hw_data {
spinlock_t hardware_lock ____cacheline_aligned;
int bars;
int mem_only;
- device_reg_t __iomem *iobase; /* Base I/O address */
+ device_reg_t *iobase; /* Base I/O address */
resource_size_t pio_address;
#define MIN_IOBASE_LEN 0x100
@@ -2865,8 +2845,8 @@ struct qla_hw_data {
uint32_t rsp_que_off;
/* Multi queue data structs */
- device_reg_t __iomem *mqiobase;
- device_reg_t __iomem *msixbase;
+ device_reg_t *mqiobase;
+ device_reg_t *msixbase;
uint16_t msix_count;
uint8_t mqenable;
struct req_que **req_q_map;
@@ -2902,6 +2882,7 @@ struct qla_hw_data {
#define PORT_SPEED_4GB 0x03
#define PORT_SPEED_8GB 0x04
#define PORT_SPEED_16GB 0x05
+#define PORT_SPEED_32GB 0x06
#define PORT_SPEED_10GB 0x13
uint16_t link_data_rate; /* F/W operating speed */
@@ -2925,6 +2906,7 @@ struct qla_hw_data {
#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031
#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
+#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
uint32_t device_type;
#define DT_ISP2100 BIT_0
#define DT_ISP2200 BIT_1
@@ -2945,7 +2927,8 @@ struct qla_hw_data {
#define DT_ISP8031 BIT_16
#define DT_ISPFX00 BIT_17
#define DT_ISP8044 BIT_18
-#define DT_ISP_LAST (DT_ISP8044 << 1)
+#define DT_ISP2071 BIT_19
+#define DT_ISP_LAST (DT_ISP2071 << 1)
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
@@ -2975,6 +2958,7 @@ struct qla_hw_data {
#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
+#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2983,6 +2967,7 @@ struct qla_hw_data {
#define IS_QLA25XX(ha) (IS_QLA2532(ha))
#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
#define IS_QLA84XX(ha) (IS_QLA8432(ha))
+#define IS_QLA27XX(ha) (IS_QLA2071(ha))
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -2991,12 +2976,13 @@ struct qla_hw_data {
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
- IS_QLA8044(ha))
+ IS_QLA8044(ha) || IS_QLA27XX(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
-#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
- IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
-#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
-#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
+#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
+#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
@@ -3006,7 +2992,8 @@ struct qla_hw_data {
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
-#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha))
+#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha))
#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
/* Bit 21 of fw_attributes decides the MCTP capabilities */
#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
@@ -3131,6 +3118,9 @@ struct qla_hw_data {
uint16_t fw_xcb_count;
uint16_t fw_iocb_count;
+ uint32_t fw_shared_ram_start;
+ uint32_t fw_shared_ram_end;
+
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
uint16_t fw_seriallink_options24[4];
@@ -3139,6 +3129,9 @@ struct qla_hw_data {
uint32_t mpi_capabilities;
uint8_t phy_version[3];
+ /* Firmware dump template */
+ void *fw_dump_template;
+ uint32_t fw_dump_template_len;
/* Firmware dump information. */
struct qla2xxx_fw_dump *fw_dump;
uint32_t fw_dump_len;
@@ -3181,6 +3174,7 @@ struct qla_hw_data {
#define QLA_SWRITING 2
uint32_t optrom_region_start;
uint32_t optrom_region_size;
+ struct mutex optrom_mutex;
/* PCI expansion ROM image information. */
#define ROM_CODE_TYPE_BIOS 0
@@ -3307,6 +3301,7 @@ struct qla_hw_data {
struct mr_data_fx00 mr;
struct qlt_hw_data tgt;
+ int allow_cna_fw_dump;
};
/*
@@ -3435,6 +3430,7 @@ typedef struct scsi_qla_host {
#define VP_ERR_FAB_LOGOUT 4
#define VP_ERR_ADAP_NORESOURCES 5
struct qla_hw_data *hw;
+ struct scsi_qlt_host vha_tgt;
struct req_que *req;
int fw_heartbeat_counter;
int seconds_since_last_heartbeat;
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 792a29294b62..32ab80957688 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -114,7 +114,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
goto out;
if (!ha->fce)
goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 610d3aa905a0..3a7353eaccbd 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1378,6 +1378,10 @@ struct qla_flt_header {
#define FLT_REG_NVRAM_0 0x15
#define FLT_REG_VPD_1 0x16
#define FLT_REG_NVRAM_1 0x17
+#define FLT_REG_VPD_2 0xD4
+#define FLT_REG_NVRAM_2 0xD5
+#define FLT_REG_VPD_3 0xD6
+#define FLT_REG_NVRAM_3 0xD7
#define FLT_REG_FDT 0x1a
#define FLT_REG_FLT 0x1c
#define FLT_REG_HW_EVENT_0 0x1d
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1f426628a0a5..e665e8109933 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -330,6 +330,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
dma_addr_t);
extern int qla24xx_abort_command(srb_t *);
+extern int qla24xx_async_abort_command(srb_t *);
extern int
qla24xx_abort_target(struct fc_port *, unsigned int, int);
extern int
@@ -511,6 +512,16 @@ extern void qla2300_fw_dump(scsi_qla_host_t *, int);
extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla8044_fw_dump(scsi_qla_host_t *, int);
+
+extern void qla27xx_fwdump(scsi_qla_host_t *, int);
+extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
+extern int qla27xx_fwdt_template_valid(void *);
+extern ulong qla27xx_fwdt_template_size(void *);
+extern const void *qla27xx_fwdt_template_default(void);
+extern ulong qla27xx_fwdt_template_default_size(void);
+
extern void qla2x00_dump_regs(scsi_qla_host_t *);
extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
@@ -594,7 +605,6 @@ extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
extern irqreturn_t qlafx00_intr_handler(int, void *);
extern void qlafx00_enable_intrs(struct qla_hw_data *);
extern void qlafx00_disable_intrs(struct qla_hw_data *);
-extern int qlafx00_abort_command(srb_t *);
extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
extern int qlafx00_start_scsi(srb_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index cd47f1b32d9a..e377f9d2f92a 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1532,6 +1532,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
if (IS_CNA_CAPABLE(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_10GB);
+ else if (IS_QLA27XX(ha))
+ eiter->a.sup_speed = __constant_cpu_to_be32(
+ FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB|
+ FDMI_PORT_SPEED_8GB);
else if (IS_QLA25XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
@@ -1580,6 +1584,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
break;
+ case PORT_SPEED_32GB:
+ eiter->a.cur_speed =
+ __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB);
+ break;
default:
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
@@ -1889,6 +1897,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
case BIT_10:
list[i].fp_speed = PORT_SPEED_16GB;
break;
+ case BIT_8:
+ list[i].fp_speed = PORT_SPEED_32GB;
+ break;
}
ql_dbg(ql_dbg_disc, vha, 0x205b,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e7e5f4facf7f..38aeb54cd9d8 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -271,56 +271,46 @@ done:
}
static void
-qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
+qla2x00_tmf_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)ptr;
- struct srb_iocb *iocb = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
- uint32_t flags;
- uint16_t lun;
- int rval;
-
- if (!test_bit(UNLOADING, &vha->dpc_flags)) {
- flags = iocb->u.tmf.flags;
- lun = (uint16_t)iocb->u.tmf.lun;
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *tmf = &sp->u.iocb_cmd;
- /* Issue Marker IOCB */
- rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
- vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
- flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
+ tmf->u.tmf.comp_status = CS_TIMEOUT;
+ complete(&tmf->u.tmf.comp);
+}
- if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
- ql_dbg(ql_dbg_taskm, vha, 0x8030,
- "TM IOCB failed (%x).\n", rval);
- }
- }
- sp->free(sp->fcport->vha, sp);
+static void
+qla2x00_tmf_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *tmf = &sp->u.iocb_cmd;
+ complete(&tmf->u.tmf.comp);
}
int
-qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
uint32_t tag)
{
struct scsi_qla_host *vha = fcport->vha;
+ struct srb_iocb *tm_iocb;
srb_t *sp;
- struct srb_iocb *tcf;
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
- rval = QLA_FUNCTION_FAILED;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ tm_iocb = &sp->u.iocb_cmd;
sp->type = SRB_TM_CMD;
sp->name = "tmf";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- tcf = &sp->u.iocb_cmd;
- tcf->u.tmf.flags = tm_flags;
- tcf->u.tmf.lun = lun;
- tcf->u.tmf.data = tag;
- tcf->timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_tm_cmd_done;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+ tm_iocb->u.tmf.flags = flags;
+ tm_iocb->u.tmf.lun = lun;
+ tm_iocb->u.tmf.data = tag;
+ sp->done = qla2x00_tmf_sp_done;
+ tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
+ init_completion(&tm_iocb->u.tmf.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -330,14 +320,121 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
"Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ wait_for_completion(&tm_iocb->u.tmf.comp);
+
+ rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+ if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8030,
+ "TM IOCB failed (%x).\n", rval);
+ }
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
+ flags = tm_iocb->u.tmf.flags;
+ lun = (uint16_t)tm_iocb->u.tmf.lun;
+
+ /* Issue Marker IOCB */
+ qla2x00_marker(vha, vha->hw->req_q_map[0],
+ vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+ flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
+ }
+
+done_free_sp:
+ sp->free(vha, sp);
+done:
return rval;
+}
+
+static void
+qla24xx_abort_iocb_timeout(void *data)
+{
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+ abt->u.abt.comp_status = CS_TIMEOUT;
+ complete(&abt->u.abt.comp);
+}
+
+static void
+qla24xx_abort_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+ complete(&abt->u.abt.comp);
+}
+
+static int
+qla24xx_async_abort_cmd(srb_t *cmd_sp)
+{
+ scsi_qla_host_t *vha = cmd_sp->fcport->vha;
+ fc_port_t *fcport = cmd_sp->fcport;
+ struct srb_iocb *abt_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ abt_iocb = &sp->u.iocb_cmd;
+ sp->type = SRB_ABT_CMD;
+ sp->name = "abort";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+ abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+ sp->done = qla24xx_abort_sp_done;
+ abt_iocb->timeout = qla24xx_abort_iocb_timeout;
+ init_completion(&abt_iocb->u.abt.comp);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_async, vha, 0x507c,
+ "Abort command issued - hdl=%x, target_id=%x\n",
+ cmd_sp->handle, fcport->tgt_id);
+
+ wait_for_completion(&abt_iocb->u.abt.comp);
+
+ rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
done_free_sp:
- sp->free(fcport->vha, sp);
+ sp->free(vha, sp);
done:
return rval;
}
+int
+qla24xx_async_abort_command(srb_t *sp)
+{
+ unsigned long flags = 0;
+
+ uint32_t handle;
+ fc_port_t *fcport = sp->fcport;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = vha->req;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
+ if (req->outstanding_cmds[handle] == sp)
+ break;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (handle == req->num_outstanding_cmds) {
+ /* Command not found. */
+ return QLA_FUNCTION_FAILED;
+ }
+ if (sp->type == SRB_FXIOCB_DCMD)
+ return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+ FXDISC_ABORT_IOCTL);
+
+ return qla24xx_async_abort_cmd(sp);
+}
+
void
qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
@@ -1379,7 +1476,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
}
ha->fw_dumped = 0;
- fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+ dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+ req_q_size = rsp_q_size = 0;
+
+ if (IS_QLA27XX(ha))
+ goto try_fce;
+
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
fixed_size = sizeof(struct qla2100_fw_dump);
} else if (IS_QLA23XX(ha)) {
@@ -1395,6 +1497,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
else
fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
if (ha->mqenable) {
@@ -1412,9 +1515,16 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
if (ha->tgt.atio_ring)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
/* Allocate memory for Fibre Channel Event Buffer. */
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
goto try_eft;
+try_fce:
+ if (ha->fce)
+ dma_free_coherent(&ha->pdev->dev,
+ FCE_SIZE, ha->fce, ha->fce_dma);
+
+ /* Allocate memory for Fibre Channel Event Buffer. */
tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
GFP_KERNEL);
if (!tc) {
@@ -1442,7 +1552,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
+
try_eft:
+ if (ha->eft)
+ dma_free_coherent(&ha->pdev->dev,
+ EFT_SIZE, ha->eft, ha->eft_dma);
+
/* Allocate memory for Extended Trace Buffer. */
tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
GFP_KERNEL);
@@ -1469,15 +1584,28 @@ try_eft:
ha->eft_dma = tc_dma;
ha->eft = tc;
}
+
cont_alloc:
+ if (IS_QLA27XX(ha)) {
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x00ba,
+ "Failed missing fwdump template\n");
+ return;
+ }
+ dump_size = qla27xx_fwdt_calculate_dump_size(vha);
+ ql_dbg(ql_dbg_init, vha, 0x00fa,
+ "-> allocating fwdump (%x bytes)...\n", dump_size);
+ goto allocate;
+ }
+
req_q_size = req->length * sizeof(request_t);
rsp_q_size = rsp->length * sizeof(response_t);
-
dump_size = offsetof(struct qla2xxx_fw_dump, isp);
dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
ha->chain_offset = dump_size;
dump_size += mq_size + fce_size;
+allocate:
ha->fw_dump = vmalloc(dump_size);
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0x00c4,
@@ -1499,10 +1627,13 @@ cont_alloc:
}
return;
}
+ ha->fw_dump_len = dump_size;
ql_dbg(ql_dbg_init, vha, 0x00c5,
"Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
- ha->fw_dump_len = dump_size;
+ if (IS_QLA27XX(ha))
+ return;
+
ha->fw_dump->signature[0] = 'Q';
ha->fw_dump->signature[1] = 'L';
ha->fw_dump->signature[2] = 'G';
@@ -1718,9 +1849,6 @@ enable_82xx_npiv:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
- if (IS_QLA83XX(ha))
- goto skip_fac_check;
-
if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
uint32_t size;
@@ -1733,8 +1861,8 @@ enable_82xx_npiv:
"Unsupported FAC firmware (%d.%02d.%02d).\n",
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version);
-skip_fac_check:
- if (IS_QLA83XX(ha)) {
+
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ha->flags.fac_supported = 0;
rval = QLA_SUCCESS;
}
@@ -1933,7 +2061,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
- if (ha->mqenable || IS_QLA83XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
icb->rid = __constant_cpu_to_le16(rid);
if (ha->flags.msix_enabled) {
@@ -4792,13 +4920,14 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
nv = ha->nvram;
/* Determine NVRAM starting address. */
- if (ha->flags.port0) {
+ if (ha->port_no == 0) {
ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
ha->vpd_base = FA_NVRAM_VPD0_ADDR;
} else {
ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
ha->vpd_base = FA_NVRAM_VPD1_ADDR;
}
+
ha->nvram_size = sizeof(struct nvram_24xx);
ha->vpd_size = FA_NVRAM_VPD_SIZE;
@@ -4842,7 +4971,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
nv->exchange_count = __constant_cpu_to_le16(0);
nv->hard_address = __constant_cpu_to_le16(124);
nv->port_name[0] = 0x21;
- nv->port_name[1] = 0x00 + ha->port_no;
+ nv->port_name[1] = 0x00 + ha->port_no + 1;
nv->port_name[2] = 0x00;
nv->port_name[3] = 0xe0;
nv->port_name[4] = 0x8b;
@@ -5117,6 +5246,99 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
segments--;
}
+ if (!IS_QLA27XX(ha))
+ return rval;
+
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ ql_dbg(ql_dbg_init, vha, 0x0161,
+ "Loading fwdump template from %x\n", faddr);
+ qla24xx_read_flash_data(vha, dcode, faddr, 7);
+ risc_size = be32_to_cpu(dcode[2]);
+ ql_dbg(ql_dbg_init, vha, 0x0162,
+ "-> array size %x dwords\n", risc_size);
+ if (risc_size == 0 || risc_size == ~0)
+ goto default_template;
+
+ dlen = (risc_size - 8) * sizeof(*dcode);
+ ql_dbg(ql_dbg_init, vha, 0x0163,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x0164,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto default_template;
+ }
+
+ faddr += 7;
+ risc_size -= 8;
+ dcode = ha->fw_dump_template;
+ qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = le32_to_cpu(dcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(dcode)) {
+ ql_log(ql_log_warn, vha, 0x0165,
+ "Failed fwdump template validate\n");
+ goto default_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(dcode);
+ ql_dbg(ql_dbg_init, vha, 0x0166,
+ "-> template size %x bytes\n", dlen);
+ if (dlen > risc_size * sizeof(*dcode)) {
+ ql_log(ql_log_warn, vha, 0x0167,
+ "Failed fwdump template exceeds array by %x bytes\n",
+ (uint32_t)(dlen - risc_size * sizeof(*dcode)));
+ goto default_template;
+ }
+ ha->fw_dump_template_len = dlen;
+ return rval;
+
+default_template:
+ ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ dlen = qla27xx_fwdt_template_default_size();
+ ql_dbg(ql_dbg_init, vha, 0x0169,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x016a,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto failed_template;
+ }
+
+ dcode = ha->fw_dump_template;
+ risc_size = dlen / sizeof(*dcode);
+ memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = be32_to_cpu(dcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
+ ql_log(ql_log_warn, vha, 0x016b,
+ "Failed fwdump template validate\n");
+ goto failed_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
+ ql_dbg(ql_dbg_init, vha, 0x016c,
+ "-> template size %x bytes\n", dlen);
+ ha->fw_dump_template_len = dlen;
+ return rval;
+
+failed_template:
+ ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
return rval;
}
@@ -5231,7 +5453,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
uint32_t risc_size;
uint32_t i;
struct fw_blob *blob;
- uint32_t *fwcode, fwclen;
+ const uint32_t *fwcode;
+ uint32_t fwclen;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
@@ -5263,7 +5486,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ql_log(ql_log_fatal, vha, 0x0093,
"Unable to verify integrity of firmware image (%Zd).\n",
blob->fw->size);
- goto fail_fw_integrity;
+ return QLA_FUNCTION_FAILED;
}
for (i = 0; i < 4; i++)
dcode[i] = be32_to_cpu(fwcode[i + 4]);
@@ -5277,7 +5500,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ql_log(ql_log_fatal, vha, 0x0095,
"Firmware data: %08x %08x %08x %08x.\n",
dcode[0], dcode[1], dcode[2], dcode[3]);
- goto fail_fw_integrity;
+ return QLA_FUNCTION_FAILED;
}
while (segments && rval == QLA_SUCCESS) {
@@ -5291,8 +5514,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ql_log(ql_log_fatal, vha, 0x0096,
"Unable to verify integrity of firmware image "
"(%Zd).\n", blob->fw->size);
-
- goto fail_fw_integrity;
+ return QLA_FUNCTION_FAILED;
}
fragment = 0;
@@ -5326,10 +5548,100 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Next segment. */
segments--;
}
+
+ if (!IS_QLA27XX(ha))
+ return rval;
+
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ ql_dbg(ql_dbg_init, vha, 0x171,
+ "Loading fwdump template from %x\n",
+ (uint32_t)((void *)fwcode - (void *)blob->fw->data));
+ risc_size = be32_to_cpu(fwcode[2]);
+ ql_dbg(ql_dbg_init, vha, 0x172,
+ "-> array size %x dwords\n", risc_size);
+ if (risc_size == 0 || risc_size == ~0)
+ goto default_template;
+
+ dlen = (risc_size - 8) * sizeof(*fwcode);
+ ql_dbg(ql_dbg_init, vha, 0x0173,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x0174,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto default_template;
+ }
+
+ fwcode += 7;
+ risc_size -= 8;
+ dcode = ha->fw_dump_template;
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = le32_to_cpu(fwcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(dcode)) {
+ ql_log(ql_log_warn, vha, 0x0175,
+ "Failed fwdump template validate\n");
+ goto default_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(dcode);
+ ql_dbg(ql_dbg_init, vha, 0x0176,
+ "-> template size %x bytes\n", dlen);
+ if (dlen > risc_size * sizeof(*fwcode)) {
+ ql_log(ql_log_warn, vha, 0x0177,
+ "Failed fwdump template exceeds array by %x bytes\n",
+ (uint32_t)(dlen - risc_size * sizeof(*fwcode)));
+ goto default_template;
+ }
+ ha->fw_dump_template_len = dlen;
return rval;
-fail_fw_integrity:
- return QLA_FUNCTION_FAILED;
+default_template:
+ ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+
+ dlen = qla27xx_fwdt_template_default_size();
+ ql_dbg(ql_dbg_init, vha, 0x0179,
+ "-> template allocating %x bytes...\n", dlen);
+ ha->fw_dump_template = vmalloc(dlen);
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x017a,
+ "Failed fwdump template allocate %x bytes.\n", risc_size);
+ goto failed_template;
+ }
+
+ dcode = ha->fw_dump_template;
+ risc_size = dlen / sizeof(*fwcode);
+ fwcode = qla27xx_fwdt_template_default();
+ for (i = 0; i < risc_size; i++)
+ dcode[i] = be32_to_cpu(fwcode[i]);
+
+ if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
+ ql_log(ql_log_warn, vha, 0x017b,
+ "Failed fwdump template validate\n");
+ goto failed_template;
+ }
+
+ dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
+ ql_dbg(ql_dbg_init, vha, 0x017c,
+ "-> template size %x bytes\n", dlen);
+ ha->fw_dump_template_len = dlen;
+ return rval;
+
+failed_template:
+ ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
+ return rval;
}
int
@@ -5605,7 +5917,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
nv->exchange_count = __constant_cpu_to_le16(0);
nv->port_name[0] = 0x21;
- nv->port_name[1] = 0x00 + ha->port_no;
+ nv->port_name[1] = 0x00 + ha->port_no + 1;
nv->port_name[2] = 0x00;
nv->port_name[3] = 0xe0;
nv->port_name[4] = 0x8b;
@@ -5639,7 +5951,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->enode_mac[2] = 0xDD;
nv->enode_mac[3] = 0x04;
nv->enode_mac[4] = 0x05;
- nv->enode_mac[5] = 0x06 + ha->port_no;
+ nv->enode_mac[5] = 0x06 + ha->port_no + 1;
rval = 1;
}
@@ -5677,7 +5989,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
icb->enode_mac[2] = 0xDD;
icb->enode_mac[3] = 0x04;
icb->enode_mac[4] = 0x05;
- icb->enode_mac[5] = 0x06 + ha->port_no;
+ icb->enode_mac[5] = 0x06 + ha->port_no + 1;
}
/* Use extended-initialization control block. */
@@ -5780,7 +6092,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
ha->login_retry_count = ql2xloginretrycount;
/* if not running MSI-X we need handshaking on interrupts */
- if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha))
+ if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
/* Enable ZIO. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 46b9307e8be4..e607568bce49 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -488,7 +488,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
req->ring_ptr++;
/* Set chip new ring index. */
- if (ha->mqenable || IS_QLA83XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
WRT_REG_DWORD(req->req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
} else if (IS_QLAFX00(ha)) {
@@ -524,7 +524,6 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
{
mrk_entry_t *mrk;
struct mrk_entry_24xx *mrk24 = NULL;
- struct mrk_entry_fx00 *mrkfx = NULL;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
@@ -541,15 +540,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk->entry_type = MARKER_TYPE;
mrk->modifier = type;
if (type != MK_SYNC_ALL) {
- if (IS_QLAFX00(ha)) {
- mrkfx = (struct mrk_entry_fx00 *) mrk;
- mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
- mrkfx->handle_hi = 0;
- mrkfx->tgt_id = cpu_to_le16(loop_id);
- mrkfx->lun[1] = LSB(lun);
- mrkfx->lun[2] = MSB(lun);
- host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
- } else if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
mrk24->lun[1] = LSB(lun);
@@ -1823,7 +1814,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; req->num_outstanding_cmds; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
if (handle == req->num_outstanding_cmds)
handle = 1;
@@ -1848,7 +1839,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt) {
- if (ha->mqenable || IS_QLA83XX(ha))
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2594,6 +2585,29 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
+void
+qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
+{
+ struct srb_iocb *aio = &sp->u.iocb_cmd;
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct req_que *req = vha->req;
+
+ memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
+ abt_iocb->entry_type = ABORT_IOCB_TYPE;
+ abt_iocb->entry_count = 1;
+ abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ abt_iocb->handle_to_abort =
+ cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
+ abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ abt_iocb->vp_index = vha->vp_idx;
+ abt_iocb->req_que_no = cpu_to_le16(req->id);
+ /* Send the command to the firmware */
+ wmb();
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -2647,7 +2661,9 @@ qla2x00_start_sp(srb_t *sp)
qlafx00_fxdisc_iocb(sp, pkt);
break;
case SRB_ABT_CMD:
- qlafx00_abort_iocb(sp, pkt);
+ IS_QLAFX00(ha) ?
+ qlafx00_abort_iocb(sp, pkt) :
+ qla24xx_abort_iocb(sp, pkt);
break;
default:
break;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9bc86b9e86b1..95314ef2e505 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -356,15 +356,16 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
const char *
qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
{
- static const char * const link_speeds[] = {
- "1", "2", "?", "4", "8", "16", "10"
+ static const char *const link_speeds[] = {
+ "1", "2", "?", "4", "8", "16", "32", "10"
};
+#define QLA_LAST_SPEED 7
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return link_speeds[0];
else if (speed == 0x13)
- return link_speeds[6];
- else if (speed < 6)
+ return link_speeds[QLA_LAST_SPEED];
+ else if (speed < QLA_LAST_SPEED)
return link_speeds[speed];
else
return link_speeds[LS_UNKNOWN];
@@ -649,7 +650,7 @@ skip_rio:
break;
case MBA_SYSTEM_ERR: /* System Error */
- mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
+ mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
RD_REG_WORD(&reg24->mailbox7) : 0;
ql_log(ql_log_warn, vha, 0x5003,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
@@ -666,7 +667,7 @@ skip_rio:
vha->device_flags |= DFLG_DEV_FAILED;
} else {
/* Check to see if MPI timeout occurred */
- if ((mbx & MBX_3) && (ha->flags.port0))
+ if ((mbx & MBX_3) && (ha->port_no == 0))
set_bit(MPI_RESET_NEEDED,
&vha->dpc_flags);
@@ -1497,8 +1498,7 @@ logio_done:
}
static void
-qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
- struct tsk_mgmt_entry *tsk)
+qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
{
const char func[] = "TMF-IOCB";
const char *type;
@@ -1506,7 +1506,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
srb_t *sp;
struct srb_iocb *iocb;
struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
- int error = 1;
sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
if (!sp)
@@ -1515,37 +1514,35 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
iocb = &sp->u.iocb_cmd;
type = sp->name;
fcport = sp->fcport;
+ iocb->u.tmf.data = QLA_SUCCESS;
if (sts->entry_status) {
ql_log(ql_log_warn, fcport->vha, 0x5038,
"Async-%s error - hdl=%x entry-status(%x).\n",
type, sp->handle, sts->entry_status);
+ iocb->u.tmf.data = QLA_FUNCTION_FAILED;
} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
ql_log(ql_log_warn, fcport->vha, 0x5039,
"Async-%s error - hdl=%x completion status(%x).\n",
type, sp->handle, sts->comp_status);
- } else if (!(le16_to_cpu(sts->scsi_status) &
+ iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+ } else if ((le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
- ql_log(ql_log_warn, fcport->vha, 0x503a,
- "Async-%s error - hdl=%x no response info(%x).\n",
- type, sp->handle, sts->scsi_status);
- } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
- ql_log(ql_log_warn, fcport->vha, 0x503b,
- "Async-%s error - hdl=%x not enough response(%d).\n",
- type, sp->handle, sts->rsp_data_len);
- } else if (sts->data[3]) {
- ql_log(ql_log_warn, fcport->vha, 0x503c,
- "Async-%s error - hdl=%x response(%x).\n",
- type, sp->handle, sts->data[3]);
- } else {
- error = 0;
+ if (le32_to_cpu(sts->rsp_data_len) < 4) {
+ ql_log(ql_log_warn, fcport->vha, 0x503b,
+ "Async-%s error - hdl=%x not enough response(%d).\n",
+ type, sp->handle, sts->rsp_data_len);
+ } else if (sts->data[3]) {
+ ql_log(ql_log_warn, fcport->vha, 0x503c,
+ "Async-%s error - hdl=%x response(%x).\n",
+ type, sp->handle, sts->data[3]);
+ iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+ }
}
- if (error) {
- iocb->u.tmf.data = error;
+ if (iocb->u.tmf.data != QLA_SUCCESS)
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
(uint8_t *)sts, sizeof(*sts));
- }
sp->done(vha, sp, 0);
}
@@ -2025,6 +2022,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
+ /* Task Management completion. */
+ if (sp->type == SRB_TM_CMD) {
+ qla24xx_tm_iocb_entry(vha, req, pkt);
+ return;
+ }
+
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
qla2x00_process_completed_request(vha, req, handle);
@@ -2425,6 +2428,23 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
}
}
+static void
+qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct abort_entry_24xx *pkt)
+{
+ const char func[] = "ABT_IOCB";
+ srb_t *sp;
+ struct srb_iocb *abt;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ abt = &sp->u.iocb_cmd;
+ abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
+ sp->done(vha, sp, 0);
+}
+
/**
* qla24xx_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@@ -2474,10 +2494,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla24xx_logio_entry(vha, rsp->req,
(struct logio_entry_24xx *)pkt);
break;
- case TSK_MGMT_IOCB_TYPE:
- qla24xx_tm_iocb_entry(vha, rsp->req,
- (struct tsk_mgmt_entry *)pkt);
- break;
case CT_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
break;
@@ -2497,6 +2513,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
* from falling into default case
*/
break;
+ case ABORT_IOCB_TYPE:
+ qla24xx_abort_iocb_entry(vha, rsp->req,
+ (struct abort_entry_24xx *)pkt);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -2525,7 +2545,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
return;
rval = QLA_SUCCESS;
@@ -2880,6 +2901,7 @@ static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
#define MIN_MSIX_COUNT 2
+#define ATIO_VECTOR 2
int i, ret;
struct msix_entry *entries;
struct qla_msix_entry *qentry;
@@ -2936,36 +2958,49 @@ msix_failed:
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < ha->msix_count; i++) {
+ for (i = 0; i < 2; i++) {
qentry = &ha->msix_entries[i];
- if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
- ret = request_irq(qentry->vector,
- qla83xx_msix_entries[i].handler,
- 0, qla83xx_msix_entries[i].name, rsp);
- } else if (IS_P3P_TYPE(ha)) {
+ if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
0, qla82xx_msix_entries[i].name, rsp);
- } else {
+ else
ret = request_irq(qentry->vector,
msix_entries[i].handler,
0, msix_entries[i].name, rsp);
- }
- if (ret) {
- ql_log(ql_log_fatal, vha, 0x00cb,
- "MSI-X: unable to register handler -- %x/%d.\n",
- qentry->vector, ret);
- qla24xx_disable_msix(ha);
- ha->mqenable = 0;
- goto msix_out;
- }
+ if (ret)
+ goto msix_register_fail;
+ qentry->have_irq = 1;
+ qentry->rsp = rsp;
+ rsp->msix = qentry;
+ }
+
+ /*
+ * If target mode is enable, also request the vector for the ATIO
+ * queue.
+ */
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ qentry = &ha->msix_entries[ATIO_VECTOR];
+ ret = request_irq(qentry->vector,
+ qla83xx_msix_entries[ATIO_VECTOR].handler,
+ 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
qentry->have_irq = 1;
qentry->rsp = rsp;
rsp->msix = qentry;
}
+msix_register_fail:
+ if (ret) {
+ ql_log(ql_log_fatal, vha, 0x00cb,
+ "MSI-X: unable to register handler -- %x/%d.\n",
+ qentry->vector, ret);
+ qla24xx_disable_msix(ha);
+ ha->mqenable = 0;
+ goto msix_out;
+ }
+
/* Enable MSI-X vector for response queue update for queue 0 */
- if (IS_QLA83XX(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
if (ha->msixbase && ha->mqiobase &&
(ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
ha->mqenable = 1;
@@ -2989,12 +3024,13 @@ int
qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
{
int ret = QLA_FUNCTION_FAILED;
- device_reg_t __iomem *reg = ha->iobase;
+ device_reg_t *reg = ha->iobase;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
- !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
+ !IS_QLA27XX(ha))
goto skip_msi;
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -3029,7 +3065,8 @@ skip_msix:
"Falling back-to MSI mode -%d.\n", ret);
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
- !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha))
+ !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
+ !IS_QLA27XX(ha))
goto skip_msi;
ret = pci_enable_msi(ha->pdev);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b94511ae0051..2528709c4add 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -35,7 +35,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
{
int rval;
unsigned long flags = 0;
- device_reg_t __iomem *reg;
+ device_reg_t *reg;
uint8_t abort_active;
uint8_t io_lock_on;
uint16_t command = 0;
@@ -468,7 +468,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mcp->mb[1] = MSW(risc_addr);
mcp->mb[2] = LSW(risc_addr);
mcp->mb[3] = 0;
- if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) {
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha)) {
struct nvram_81xx *nv = ha->nvram;
mcp->mb[4] = (nv->enhanced_features &
EXTENDED_BB_CREDITS);
@@ -539,6 +540,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
if (IS_FWI2_CAPABLE(ha))
mcp->in_mb |= MBX_17|MBX_16|MBX_15;
+ if (IS_QLA27XX(ha))
+ mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18;
mcp->flags = 0;
mcp->tov = MBX_TOV_SECONDS;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -574,6 +577,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
"%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
__func__, mcp->mb[17], mcp->mb[16]);
}
+ if (IS_QLA27XX(ha)) {
+ ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
+ ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
+ }
failed:
if (rval != QLA_SUCCESS) {
@@ -1225,7 +1232,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
}
/* 1 and 2 should normally be captured. */
mcp->in_mb = MBX_2|MBX_1|MBX_0;
- if (IS_QLA83XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* mb3 is additional info about the installed SFP. */
mcp->in_mb |= MBX_3;
mcp->buf_size = size;
@@ -2349,7 +2356,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
+ if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
mcp->in_mb |= MBX_12;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -2590,6 +2597,9 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
+ if (ql2xasynctmfenable)
+ return qla24xx_async_abort_command(sp);
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -3032,7 +3042,7 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
"Entered %s.\n", __func__);
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
- !IS_QLA83XX(vha->hw))
+ !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -3662,7 +3672,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
mcp->mb[12] = req->qos;
mcp->mb[11] = req->vp_idx;
mcp->mb[13] = req->rid;
- if (IS_QLA83XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
mcp->mb[15] = 0;
mcp->mb[4] = req->id;
@@ -3676,9 +3686,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
mcp->flags = MBX_DMA_OUT;
mcp->tov = MBX_TOV_SECONDS * 2;
- if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
mcp->in_mb |= MBX_1;
- if (IS_QLA83XX(ha)) {
+ if (IS_QLA83XX(ha) || !IS_QLA27XX(ha)) {
mcp->out_mb |= MBX_15;
/* debug q create issue in SR-IOV */
mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
@@ -3687,7 +3697,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
WRT_REG_DWORD(req->req_q_in, 0);
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) || !IS_QLA27XX(ha))
WRT_REG_DWORD(req->req_q_out, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3725,7 +3735,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mcp->mb[5] = rsp->length;
mcp->mb[14] = rsp->msix->entry;
mcp->mb[13] = rsp->rid;
- if (IS_QLA83XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
mcp->mb[15] = 0;
mcp->mb[4] = rsp->id;
@@ -3742,7 +3752,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
if (IS_QLA81XX(ha)) {
mcp->out_mb |= MBX_12|MBX_11|MBX_10;
mcp->in_mb |= MBX_1;
- } else if (IS_QLA83XX(ha)) {
+ } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
mcp->in_mb |= MBX_1;
/* debug q create issue in SR-IOV */
@@ -3809,7 +3819,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
"Entered %s.\n", __func__);
- if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+ !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -3840,7 +3851,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+ !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
@@ -3874,7 +3886,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+ !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
@@ -4545,7 +4558,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mcp->mb[1] = 0;
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
- if (IS_QLA83XX(ha))
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -4574,7 +4587,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
"Entered %s.\n", __func__);
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
+ !IS_QLA27XX(ha))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_PORT_CONFIG;
mcp->out_mb = MBX_0;
@@ -5070,7 +5084,7 @@ qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
@@ -5145,7 +5159,7 @@ qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
struct qla_hw_data *ha = vha->hw;
unsigned long retry_max_time = jiffies + (2 * HZ);
- if (!IS_QLA83XX(ha))
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index a72df701fb38..f0a852257f99 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -630,7 +630,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
struct req_que *req = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
- device_reg_t __iomem *reg;
+ device_reg_t *reg;
uint32_t cnt;
req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
@@ -754,7 +754,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
struct rsp_que *rsp = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
- device_reg_t __iomem *reg;
+ device_reg_t *reg;
rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
if (rsp == NULL) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index ba6f8b139c98..0aaf6a9c87d3 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -40,7 +40,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
{
int rval;
unsigned long flags = 0;
- device_reg_t __iomem *reg;
+ device_reg_t *reg;
uint8_t abort_active;
uint8_t io_lock_on;
uint16_t command = 0;
@@ -631,20 +631,6 @@ qlafx00_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
- struct init_cb_fx *icb;
- struct req_que *req = ha->req_q_map[0];
- struct rsp_que *rsp = ha->rsp_q_map[0];
-
- /* Setup ring parameters in initialization control block. */
- icb = (struct init_cb_fx *)ha->init_cb;
- icb->request_q_outpointer = __constant_cpu_to_le16(0);
- icb->response_q_inpointer = __constant_cpu_to_le16(0);
- icb->request_q_length = cpu_to_le16(req->length);
- icb->response_q_length = cpu_to_le16(rsp->length);
- icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
- icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
- icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
- icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
WRT_REG_DWORD(&reg->req_q_in, 0);
WRT_REG_DWORD(&reg->req_q_out, 0);
@@ -699,78 +685,16 @@ qlafx00_disable_intrs(struct qla_hw_data *ha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-static void
-qlafx00_tmf_iocb_timeout(void *data)
-{
- srb_t *sp = (srb_t *)data;
- struct srb_iocb *tmf = &sp->u.iocb_cmd;
-
- tmf->u.tmf.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
- complete(&tmf->u.tmf.comp);
-}
-
-static void
-qlafx00_tmf_sp_done(void *data, void *ptr, int res)
-{
- srb_t *sp = (srb_t *)ptr;
- struct srb_iocb *tmf = &sp->u.iocb_cmd;
-
- complete(&tmf->u.tmf.comp);
-}
-
-static int
-qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
- uint32_t lun, uint32_t tag)
-{
- scsi_qla_host_t *vha = fcport->vha;
- struct srb_iocb *tm_iocb;
- srb_t *sp;
- int rval = QLA_FUNCTION_FAILED;
-
- sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
- if (!sp)
- goto done;
-
- tm_iocb = &sp->u.iocb_cmd;
- sp->type = SRB_TM_CMD;
- sp->name = "tmf";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
- tm_iocb->u.tmf.flags = flags;
- tm_iocb->u.tmf.lun = lun;
- tm_iocb->u.tmf.data = tag;
- sp->done = qlafx00_tmf_sp_done;
- tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
- init_completion(&tm_iocb->u.tmf.comp);
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
- ql_dbg(ql_dbg_async, vha, 0x507b,
- "Task management command issued target_id=%x\n",
- fcport->tgt_id);
-
- wait_for_completion(&tm_iocb->u.tmf.comp);
-
- rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
-
-done_free_sp:
- sp->free(vha, sp);
-done:
- return rval;
-}
-
int
qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
{
- return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+ return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
}
int
qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
{
- return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+ return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
}
int
@@ -997,6 +921,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
break;
default:
+ if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS)
+ break;
+
/* If fw is apparently not ready. In order to continue,
* we might need to issue Mbox cmd, but the problem is
* that the DoorBell vector values that come with the
@@ -2014,7 +1941,8 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
} else if (fx_type == FXDISC_ABORT_IOCTL)
fdisc->u.fxiocb.result =
- (fdisc->u.fxiocb.result == cpu_to_le32(0x68)) ?
+ (fdisc->u.fxiocb.result ==
+ cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ?
cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
rval = le32_to_cpu(fdisc->u.fxiocb.result);
@@ -2034,94 +1962,6 @@ done:
return rval;
}
-static void
-qlafx00_abort_iocb_timeout(void *data)
-{
- srb_t *sp = (srb_t *)data;
- struct srb_iocb *abt = &sp->u.iocb_cmd;
-
- abt->u.abt.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
- complete(&abt->u.abt.comp);
-}
-
-static void
-qlafx00_abort_sp_done(void *data, void *ptr, int res)
-{
- srb_t *sp = (srb_t *)ptr;
- struct srb_iocb *abt = &sp->u.iocb_cmd;
-
- complete(&abt->u.abt.comp);
-}
-
-static int
-qlafx00_async_abt_cmd(srb_t *cmd_sp)
-{
- scsi_qla_host_t *vha = cmd_sp->fcport->vha;
- fc_port_t *fcport = cmd_sp->fcport;
- struct srb_iocb *abt_iocb;
- srb_t *sp;
- int rval = QLA_FUNCTION_FAILED;
-
- sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
- if (!sp)
- goto done;
-
- abt_iocb = &sp->u.iocb_cmd;
- sp->type = SRB_ABT_CMD;
- sp->name = "abort";
- qla2x00_init_timer(sp, FXDISC_TIMEOUT);
- abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
- sp->done = qlafx00_abort_sp_done;
- abt_iocb->timeout = qlafx00_abort_iocb_timeout;
- init_completion(&abt_iocb->u.abt.comp);
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
- ql_dbg(ql_dbg_async, vha, 0x507c,
- "Abort command issued - hdl=%x, target_id=%x\n",
- cmd_sp->handle, fcport->tgt_id);
-
- wait_for_completion(&abt_iocb->u.abt.comp);
-
- rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
-
-done_free_sp:
- sp->free(vha, sp);
-done:
- return rval;
-}
-
-int
-qlafx00_abort_command(srb_t *sp)
-{
- unsigned long flags = 0;
-
- uint32_t handle;
- fc_port_t *fcport = sp->fcport;
- struct scsi_qla_host *vha = fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
- if (req->outstanding_cmds[handle] == sp)
- break;
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
- /* Command not found. */
- return QLA_FUNCTION_FAILED;
- }
- if (sp->type == SRB_FXIOCB_DCMD)
- return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
- FXDISC_ABORT_IOCTL);
-
- return qlafx00_async_abt_cmd(sp);
-}
-
/*
* qlafx00_initialize_adapter
* Initialize board.
@@ -2150,7 +1990,6 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
vha->device_flags = DFLG_NO_CABLE;
vha->dpc_flags = 0;
vha->flags.management_server_logged_in = 0;
- vha->marker_needed = 0;
ha->isp_abort_cnt = 0;
ha->beacon_blink_led = 0;
@@ -2354,8 +2193,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
fstatus.ioctl_flags = pkt->fw_iotcl_flags;
fstatus.ioctl_data = pkt->dataword_r;
fstatus.adapid = pkt->adapid;
- fstatus.adapid_hi = pkt->adapid_hi;
- fstatus.reserved_2 = pkt->reserved_1;
+ fstatus.reserved_2 = pkt->dataword_r_extra;
fstatus.res_count = pkt->residuallen;
fstatus.status = pkt->status;
fstatus.seq_number = pkt->seq_no;
@@ -2804,7 +2642,7 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
const char func[] = "ERROR-IOCB";
- uint16_t que = MSW(pkt->handle);
+ uint16_t que = 0;
struct req_que *req = NULL;
int res = DID_ERROR << 16;
@@ -2833,16 +2671,22 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
{
struct sts_entry_fx00 *pkt;
response_t *lptr;
+ uint16_t lreq_q_in = 0;
+ uint16_t lreq_q_out = 0;
- while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) !=
- RESPONSE_PROCESSED) {
+ lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
+ lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out);
+
+ while (lreq_q_in != lreq_q_out) {
lptr = rsp->ring_ptr;
memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
sizeof(rsp->rsp_pkt));
pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
rsp->ring_index++;
+ lreq_q_out++;
if (rsp->ring_index == rsp->length) {
+ lreq_q_out = 0;
rsp->ring_index = 0;
rsp->ring_ptr = rsp->ring;
} else {
@@ -2854,7 +2698,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
qlafx00_error_entry(vha, rsp,
(struct sts_entry_fx00 *)pkt, pkt->entry_status,
pkt->entry_type);
- goto next_iter;
continue;
}
@@ -2888,10 +2731,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
pkt->entry_type, pkt->entry_status);
break;
}
-next_iter:
- WRT_REG_DWORD((void __iomem *)&lptr->signature,
- RESPONSE_PROCESSED);
- wmb();
}
/* Adjust ring index */
@@ -2926,9 +2765,9 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break;
case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
- ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
- ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
- ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
+ ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
+ ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
+ ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
ql_dbg(ql_dbg_async, vha, 0x5077,
"Asynchronous port Update received "
"aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
@@ -2985,7 +2824,7 @@ static void
qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
{
uint16_t cnt;
- uint16_t __iomem *wptr;
+ uint32_t __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
@@ -2995,10 +2834,10 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out32[0] = mb0;
- wptr = (uint16_t __iomem *)&reg->mailbox17;
+ wptr = (uint32_t __iomem *)&reg->mailbox17;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out32[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
wptr++;
}
}
@@ -3025,6 +2864,7 @@ qlafx00_intr_handler(int irq, void *dev_id)
struct rsp_que *rsp;
unsigned long flags;
uint32_t clr_intr = 0;
+ uint32_t intr_stat = 0;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -3046,34 +2886,26 @@ qlafx00_intr_handler(int irq, void *dev_id)
stat = QLAFX00_RD_INTR_REG(ha);
if (qla2x00_check_reg_for_disconnect(vha, stat))
break;
- if ((stat & QLAFX00_HST_INT_STS_BITS) == 0)
+ intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
+ if (!intr_stat)
break;
- switch (stat & QLAFX00_HST_INT_STS_BITS) {
- case QLAFX00_INTR_MB_CMPLT:
- case QLAFX00_INTR_MB_RSP_CMPLT:
- case QLAFX00_INTR_MB_ASYNC_CMPLT:
- case QLAFX00_INTR_ALL_CMPLT:
+ if (stat & QLAFX00_INTR_MB_CMPLT) {
mb[0] = RD_REG_WORD(&reg->mailbox16);
qlafx00_mbx_completion(vha, mb[0]);
status |= MBX_INTERRUPT;
clr_intr |= QLAFX00_INTR_MB_CMPLT;
- break;
- case QLAFX00_INTR_ASYNC_CMPLT:
- case QLAFX00_INTR_RSP_ASYNC_CMPLT:
+ }
+ if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
qlafx00_async_event(vha);
clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
- break;
- case QLAFX00_INTR_RSP_CMPLT:
+ }
+ if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {
qlafx00_process_response_queue(vha, rsp);
clr_intr |= QLAFX00_INTR_RSP_CMPLT;
- break;
- default:
- ql_dbg(ql_dbg_async, vha, 0x507a,
- "Unrecognized interrupt type (%d).\n", stat);
- break;
}
+
QLAFX00_CLR_INTR_REG(ha, clr_intr);
QLAFX00_RD_INTR_REG(ha);
}
@@ -3223,17 +3055,6 @@ qlafx00_start_scsi(srb_t *sp)
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
- /* Forcing marker needed for now */
- vha->marker_needed = 0;
-
- /* Send marker if required */
- if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
- QLA_SUCCESS)
- return QLA_FUNCTION_FAILED;
- vha->marker_needed = 0;
- }
-
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3284,7 +3105,9 @@ qlafx00_start_scsi(srb_t *sp)
memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
- lcmd_pkt.handle_hi = 0;
+ lcmd_pkt.reserved_0 = 0;
+ lcmd_pkt.port_path_ctrl = 0;
+ lcmd_pkt.reserved_1 = 0;
lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
@@ -3364,8 +3187,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
tm_iocb.entry_count = 1;
tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
- tm_iocb.handle_hi = 0;
- tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
+ tm_iocb.reserved_0 = 0;
tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 6cd7072cc0ff..e529dfaeb854 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -22,13 +22,16 @@ struct cmd_type_7_fx00 {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint32_t handle_hi;
+ uint8_t reserved_0;
+ uint8_t port_path_ctrl;
+ uint16_t reserved_1;
__le16 tgt_idx; /* Target Idx. */
uint16_t timeout; /* Command timeout. */
__le16 dseg_count; /* Data segment count. */
- uint16_t scsi_rsp_dsd_len;
+ uint8_t scsi_rsp_dsd_len;
+ uint8_t reserved_2;
struct scsi_lun lun; /* LUN (LE). */
@@ -47,30 +50,6 @@ struct cmd_type_7_fx00 {
uint32_t dseg_0_len; /* Data segment 0 length. */
};
-/*
- * ISP queue - marker entry structure definition.
- */
-struct mrk_entry_fx00 {
- uint8_t entry_type; /* Entry type. */
- uint8_t entry_count; /* Entry count. */
- uint8_t handle_count; /* Handle count. */
- uint8_t entry_status; /* Entry Status. */
-
- uint32_t handle; /* System handle. */
- uint32_t handle_hi; /* System handle. */
-
- uint16_t tgt_id; /* Target ID. */
-
- uint8_t modifier; /* Modifier (7-0). */
- uint8_t reserved_1;
-
- uint8_t reserved_2[5];
-
- uint8_t lun[8]; /* FCP LUN (BE). */
- uint8_t reserved_3[36];
-};
-
-
#define STATUS_TYPE_FX00 0x01 /* Status entry. */
struct sts_entry_fx00 {
uint8_t entry_type; /* Entry type. */
@@ -79,7 +58,7 @@ struct sts_entry_fx00 {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint32_t handle_hi; /* System handle. */
+ uint32_t reserved_3; /* System handle. */
__le16 comp_status; /* Completion status. */
uint16_t reserved_0; /* OX_ID used by the firmware. */
@@ -102,7 +81,7 @@ struct sts_entry_fx00 {
struct multi_sts_entry_fx00 {
uint8_t entry_type; /* Entry type. */
- uint8_t sys_define; /* System defined. */
+ uint8_t entry_count; /* Entry count. */
uint8_t handle_count;
uint8_t entry_status;
@@ -118,15 +97,13 @@ struct tsk_mgmt_entry_fx00 {
__le32 handle; /* System handle. */
- uint32_t handle_hi; /* System handle. */
+ uint32_t reserved_0;
__le16 tgt_id; /* Target Idx. */
uint16_t reserved_1;
-
- uint16_t delay; /* Activity delay in seconds. */
-
- __le16 timeout; /* Command timeout. */
+ uint16_t reserved_3;
+ uint16_t reserved_4;
struct scsi_lun lun; /* LUN (LE). */
@@ -144,13 +121,13 @@ struct abort_iocb_entry_fx00 {
uint8_t entry_status; /* Entry Status. */
__le32 handle; /* System handle. */
- __le32 handle_hi; /* System handle. */
+ __le32 reserved_0;
__le16 tgt_id_sts; /* Completion status. */
__le16 options;
__le32 abort_handle; /* System handle. */
- __le32 abort_handle_hi; /* System handle. */
+ __le32 reserved_2;
__le16 req_que_no;
uint8_t reserved_1[38];
@@ -171,8 +148,7 @@ struct ioctl_iocb_entry_fx00 {
__le32 dataword_r; /* Data word returned */
uint32_t adapid; /* Adapter ID */
- uint32_t adapid_hi; /* Adapter ID high */
- uint32_t reserved_1;
+ uint32_t dataword_r_extra;
__le32 seq_no;
uint8_t reserved_2[20];
@@ -360,11 +336,7 @@ struct config_info_data {
#define QLAFX00_INTR_MB_CMPLT 0x1
#define QLAFX00_INTR_RSP_CMPLT 0x2
-#define QLAFX00_INTR_MB_RSP_CMPLT 0x3
#define QLAFX00_INTR_ASYNC_CMPLT 0x4
-#define QLAFX00_INTR_MB_ASYNC_CMPLT 0x5
-#define QLAFX00_INTR_RSP_ASYNC_CMPLT 0x6
-#define QLAFX00_INTR_ALL_CMPLT 0x7
#define QLAFX00_MBA_SYSTEM_ERR 0x8002
#define QLAFX00_MBA_TEMP_OVER 0x8005
@@ -548,4 +520,7 @@ struct mr_data_fx00 {
/* Max conncurrent IOs that can be queued */
#define QLAFX00_MAX_CANQUEUE 1024
+/* IOCTL IOCB abort success */
+#define QLAFX00_IOCTL_ICOB_ABORT_SUCCESS 0x68
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 1e6ba4a369e2..5511e24b1f11 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1664,10 +1664,10 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
/* Mapping of IO base pointer */
if (IS_QLA8044(ha)) {
ha->iobase =
- (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase);
+ (device_reg_t *)((uint8_t *)ha->nx_pcibase);
} else if (IS_QLA82XX(ha)) {
ha->iobase =
- (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
+ (device_reg_t *)((uint8_t *)ha->nx_pcibase +
0xbc000 + (ha->pdev->devfn << 11));
}
@@ -4502,3 +4502,20 @@ exit:
qla82xx_idc_unlock(ha);
return rval;
}
+
+void
+qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->allow_cna_fw_dump)
+ return;
+
+ scsi_block_requests(vha->host);
+ ha->flags.isp82xx_no_md_cap = 1;
+ qla82xx_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla82xx_idc_unlock(ha);
+ qla2x00_wait_for_chip_reset(vha);
+ scsi_unblock_requests(vha->host);
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index b45f4d78ee36..86cf10815db0 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1578,8 +1578,8 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha)
do {
if (time_after_eq(jiffies, dev_init_timeout)) {
ql_log(ql_log_info, vha, 0xb0c4,
- "%s: Non Reset owner DEV INIT "
- "TIMEOUT!\n", __func__);
+ "%s: Non Reset owner: Reset Ack Timeout!\n",
+ __func__);
break;
}
@@ -2014,8 +2014,6 @@ qla8044_watchdog(struct scsi_qla_host *vha)
/* don't poll if reset is going on or FW hang in quiescent state */
if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
@@ -2254,7 +2252,7 @@ qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
if (r_addr & 0xf) {
ql_dbg(ql_dbg_p3p, vha, 0xb0f1,
- "[%s]: Read addr 0x%x not 16 bytes alligned\n",
+ "[%s]: Read addr 0x%x not 16 bytes aligned\n",
__func__, r_addr);
return QLA_FUNCTION_FAILED;
}
@@ -3715,3 +3713,19 @@ exit_isp_reset:
return rval;
}
+void
+qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->allow_cna_fw_dump)
+ return;
+
+ scsi_block_requests(vha->host);
+ ha->flags.isp82xx_no_md_cap = 1;
+ qla8044_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla8044_idc_unlock(ha);
+ qla2x00_wait_for_chip_reset(vha);
+ scsi_unblock_requests(vha->host);
+}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 89a53002b585..19e99cc33724 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -120,15 +120,17 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
int ql2xenabledif = 2;
module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif,
- " Enable T10-CRC-DIF "
- " Default is 0 - No DIF Support. 1 - Enable it"
- ", 2 - Enable DIF for all types, except Type 0.");
+ " Enable T10-CRC-DIF:\n"
+ " Default is 2.\n"
+ " 0 -- No DIF Support\n"
+ " 1 -- Enable DIF for all types\n"
+ " 2 -- Enable DIF for all types, except Type 0.\n");
int ql2xenablehba_err_chk = 2;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk,
" Enable T10-CRC-DIF Error isolation by HBA:\n"
- " Default is 1.\n"
+ " Default is 2.\n"
" 0 -- Error isolation disabled\n"
" 1 -- Error isolation enabled only for DIX Type 0\n"
" 2 -- Error isolation enabled for all Types\n");
@@ -1975,7 +1977,7 @@ static struct isp_operations qla82xx_isp_ops = {
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = qla24xx_read_nvram_data,
.write_nvram = qla24xx_write_nvram_data,
- .fw_dump = qla24xx_fw_dump,
+ .fw_dump = qla82xx_fw_dump,
.beacon_on = qla82xx_beacon_on,
.beacon_off = qla82xx_beacon_off,
.beacon_blink = NULL,
@@ -2013,11 +2015,11 @@ static struct isp_operations qla8044_isp_ops = {
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = NULL,
.write_nvram = NULL,
- .fw_dump = qla24xx_fw_dump,
+ .fw_dump = qla8044_fw_dump,
.beacon_on = qla82xx_beacon_on,
.beacon_off = qla82xx_beacon_off,
.beacon_blink = NULL,
- .read_optrom = qla82xx_read_optrom_data,
+ .read_optrom = qla8044_read_optrom_data,
.write_optrom = qla8044_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
@@ -2078,7 +2080,7 @@ static struct isp_operations qlafx00_isp_ops = {
.intr_handler = qlafx00_intr_handler,
.enable_intrs = qlafx00_enable_intrs,
.disable_intrs = qlafx00_disable_intrs,
- .abort_command = qlafx00_abort_command,
+ .abort_command = qla24xx_async_abort_command,
.target_reset = qlafx00_abort_target,
.lun_reset = qlafx00_lun_reset,
.fabric_login = NULL,
@@ -2102,6 +2104,44 @@ static struct isp_operations qlafx00_isp_ops = {
.initialize_adapter = qlafx00_initialize_adapter,
};
+static struct isp_operations qla27xx_isp_ops = {
+ .pci_config = qla25xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla81xx_update_fw_options,
+ .load_risc = qla81xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = NULL,
+ .write_nvram = NULL,
+ .fw_dump = qla27xx_fwdump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla83xx_beacon_blink,
+ .read_optrom = qla25xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla83xx_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
static inline void
qla2x00_set_isp_flags(struct qla_hw_data *ha)
{
@@ -2223,21 +2263,29 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
case PCI_DEVICE_ID_QLOGIC_ISPF001:
ha->device_type |= DT_ISPFX00;
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2071:
+ ha->device_type |= DT_ISP2071;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
}
if (IS_QLA82XX(ha))
- ha->port_no = !(ha->portnum & 1);
- else
+ ha->port_no = ha->portnum & 1;
+ else {
/* Get adapter physical port no from interrupt pin register. */
pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+ if (IS_QLA27XX(ha))
+ ha->port_no--;
+ else
+ ha->port_no = !(ha->port_no & 1);
+ }
- if (ha->port_no & 1)
- ha->flags.port0 = 1;
- else
- ha->flags.port0 = 0;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
"device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
- ha->device_type, ha->flags.port0, ha->fw_srisc_address);
+ ha->device_type, ha->port_no, ha->fw_srisc_address);
}
static void
@@ -2297,7 +2345,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2334,13 +2383,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ha->hardware_lock);
spin_lock_init(&ha->vport_slock);
mutex_init(&ha->selflogin_lock);
+ mutex_init(&ha->optrom_mutex);
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
/* Set EEH reset type to fundamental if required by hba */
if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
- IS_QLA83XX(ha))
+ IS_QLA83XX(ha) || IS_QLA27XX(ha))
pdev->needs_freset = 1;
ha->prev_topology = 0;
@@ -2488,7 +2538,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
req_length = REQUEST_ENTRY_CNT_FX00;
rsp_length = RESPONSE_ENTRY_CNT_FX00;
- ha->init_cb_size = sizeof(struct init_cb_fx);
ha->isp_ops = &qlafx00_isp_ops;
ha->port_down_retry_count = 30; /* default value */
ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
@@ -2497,6 +2546,22 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mr.fw_hbt_en = 1;
ha->mr.host_info_resend = false;
ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
+ } else if (IS_QLA27XX(ha)) {
+ ha->portnum = PCI_FUNC(ha->pdev->devfn);
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_83XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla27xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
}
ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -2536,7 +2601,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flags.enable_64bit_addressing ? "enable" :
"disable");
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
- if (!ret) {
+ if (ret) {
ql_log_pci(ql_log_fatal, pdev, 0x0031,
"Failed to allocate memory for adapter, aborting.\n");
@@ -2561,10 +2626,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
- if (IS_QLAFX00(ha))
- host->can_queue = QLAFX00_MAX_CANQUEUE;
- else
- host->can_queue = req->length + 128;
if (IS_QLA2XXX_MIDTYPE(ha))
base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
else
@@ -2587,11 +2648,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!IS_QLA82XX(ha))
host->sg_tablesize = QLA_SG_ALL;
}
- ql_dbg(ql_dbg_init, base_vha, 0x0032,
- "can_queue=%d, req=%p, "
- "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
- host->can_queue, base_vha->req,
- base_vha->mgmt_svr_loop_id, host->sg_tablesize);
host->max_id = ha->max_fibre_devices;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
@@ -2646,7 +2702,7 @@ que_init:
req->req_q_out = &ha->iobase->isp24.req_q_out;
rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
- if (ha->mqenable || IS_QLA83XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -2707,6 +2763,16 @@ que_init:
goto probe_failed;
}
+ if (IS_QLAFX00(ha))
+ host->can_queue = QLAFX00_MAX_CANQUEUE;
+ else
+ host->can_queue = req->num_outstanding_cmds - 10;
+
+ ql_dbg(ql_dbg_init, base_vha, 0x0032,
+ "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
+ host->can_queue, base_vha->req,
+ base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+
if (ha->mqenable) {
if (qla25xx_setup_mode(base_vha)) {
ql_log(ql_log_warn, base_vha, 0x00ec,
@@ -2887,9 +2953,9 @@ probe_hw_failed:
iospace_config_failed:
if (IS_P3P_TYPE(ha)) {
if (!ha->nx_pcibase)
- iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ iounmap((device_reg_t *)ha->nx_pcibase);
if (!ql2xdbwr)
- iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ iounmap((device_reg_t *)ha->nxdb_wr_ptr);
} else {
if (ha->iobase)
iounmap(ha->iobase);
@@ -3020,9 +3086,9 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
{
if (IS_QLA82XX(ha)) {
- iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ iounmap((device_reg_t *)ha->nx_pcibase);
if (!ql2xdbwr)
- iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ iounmap((device_reg_t *)ha->nxdb_wr_ptr);
} else {
if (ha->iobase)
iounmap(ha->iobase);
@@ -3033,7 +3099,7 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
if (ha->mqiobase)
iounmap(ha->mqiobase);
- if (IS_QLA83XX(ha) && ha->msixbase)
+ if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
iounmap(ha->msixbase);
}
}
@@ -3447,7 +3513,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->npiv_info = NULL;
/* Get consistent memory allocated for EX-INIT-CB. */
- if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) {
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
@@ -3478,10 +3544,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
else {
qla2x00_set_reserved_loop_ids(ha);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
- "loop_id_map=%p. \n", ha->loop_id_map);
+ "loop_id_map=%p.\n", ha->loop_id_map);
}
- return 1;
+ return 0;
fail_async_pd:
dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
@@ -3562,22 +3628,28 @@ static void
qla2x00_free_fw_dump(struct qla_hw_data *ha)
{
if (ha->fce)
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
- ha->fce_dma);
+ dma_free_coherent(&ha->pdev->dev,
+ FCE_SIZE, ha->fce, ha->fce_dma);
- if (ha->fw_dump) {
- if (ha->eft)
- dma_free_coherent(&ha->pdev->dev,
- ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
+ if (ha->eft)
+ dma_free_coherent(&ha->pdev->dev,
+ EFT_SIZE, ha->eft, ha->eft_dma);
+
+ if (ha->fw_dump)
vfree(ha->fw_dump);
- }
+ if (ha->fw_dump_template)
+ vfree(ha->fw_dump_template);
+
ha->fce = NULL;
ha->fce_dma = 0;
ha->eft = NULL;
ha->eft_dma = 0;
- ha->fw_dump = NULL;
ha->fw_dumped = 0;
ha->fw_dump_reading = 0;
+ ha->fw_dump = NULL;
+ ha->fw_dump_len = 0;
+ ha->fw_dump_template = NULL;
+ ha->fw_dump_template_len = 0;
}
/*
@@ -5242,7 +5314,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Firmware interface routines. */
-#define FW_BLOBS 10
+#define FW_BLOBS 11
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
@@ -5253,6 +5325,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_ISP82XX 7
#define FW_ISP2031 8
#define FW_ISP8031 9
+#define FW_ISP2071 10
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -5264,6 +5337,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_FILE_ISP82XX "ql8200_fw.bin"
#define FW_FILE_ISP2031 "ql2600_fw.bin"
#define FW_FILE_ISP8031 "ql8300_fw.bin"
+#define FW_FILE_ISP2071 "ql2700_fw.bin"
+
static DEFINE_MUTEX(qla_fw_lock);
@@ -5278,6 +5353,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP82XX, },
{ .name = FW_FILE_ISP2031, },
{ .name = FW_FILE_ISP8031, },
+ { .name = FW_FILE_ISP2071, },
};
struct fw_blob *
@@ -5306,6 +5382,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
blob = &qla_fw_blobs[FW_ISP2031];
} else if (IS_QLA8031(ha)) {
blob = &qla_fw_blobs[FW_ISP8031];
+ } else if (IS_QLA2071(ha)) {
+ blob = &qla_fw_blobs[FW_ISP2071];
} else {
return NULL;
}
@@ -5635,6 +5713,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bd56cde795fc..f28123e8ed65 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -568,7 +568,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
else if (IS_P3P_TYPE(ha)) {
*start = FA_FLASH_LAYOUT_ADDR_82;
goto end;
- } else if (IS_QLA83XX(ha)) {
+ } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
*start = FA_FLASH_LAYOUT_ADDR_83;
goto end;
}
@@ -682,7 +682,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
/* Assign FCP prio region since older adapters may not have FLT, or
FCP prio region in it's FLT.
*/
- ha->flt_region_fcp_prio = ha->flags.port0 ?
+ ha->flt_region_fcp_prio = (ha->port_no == 0) ?
fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
ha->flt_region_flt = flt_addr;
@@ -743,47 +743,71 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_vpd_nvram = start;
if (IS_P3P_TYPE(ha))
break;
- if (ha->flags.port0)
+ if (ha->port_no == 0)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_1:
if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
break;
- if (!ha->flags.port0)
+ if (ha->port_no == 1)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_VPD_2:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 2)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_VPD_3:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 3)
ha->flt_region_vpd = start;
break;
case FLT_REG_NVRAM_0:
if (IS_QLA8031(ha))
break;
- if (ha->flags.port0)
+ if (ha->port_no == 0)
ha->flt_region_nvram = start;
break;
case FLT_REG_NVRAM_1:
if (IS_QLA8031(ha))
break;
- if (!ha->flags.port0)
+ if (ha->port_no == 1)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_NVRAM_2:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 2)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_NVRAM_3:
+ if (!IS_QLA27XX(ha))
+ break;
+ if (ha->port_no == 3)
ha->flt_region_nvram = start;
break;
case FLT_REG_FDT:
ha->flt_region_fdt = start;
break;
case FLT_REG_NPIV_CONF_0:
- if (ha->flags.port0)
+ if (ha->port_no == 0)
ha->flt_region_npiv_conf = start;
break;
case FLT_REG_NPIV_CONF_1:
- if (!ha->flags.port0)
+ if (ha->port_no == 1)
ha->flt_region_npiv_conf = start;
break;
case FLT_REG_GOLD_FW:
ha->flt_region_gold_fw = start;
break;
case FLT_REG_FCP_PRIO_0:
- if (ha->flags.port0)
+ if (ha->port_no == 0)
ha->flt_region_fcp_prio = start;
break;
case FLT_REG_FCP_PRIO_1:
- if (!ha->flags.port0)
+ if (ha->port_no == 1)
ha->flt_region_fcp_prio = start;
break;
case FLT_REG_BOOT_CODE_82XX:
@@ -813,13 +837,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_FCOE_NVRAM_0:
if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
break;
- if (ha->flags.port0)
+ if (ha->port_no == 0)
ha->flt_region_nvram = start;
break;
case FLT_REG_FCOE_NVRAM_1:
if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
break;
- if (!ha->flags.port0)
+ if (ha->port_no == 1)
ha->flt_region_nvram = start;
break;
}
@@ -832,12 +856,12 @@ no_flash_data:
ha->flt_region_fw = def_fw[def];
ha->flt_region_boot = def_boot[def];
ha->flt_region_vpd_nvram = def_vpd_nvram[def];
- ha->flt_region_vpd = ha->flags.port0 ?
+ ha->flt_region_vpd = (ha->port_no == 0) ?
def_vpd0[def] : def_vpd1[def];
- ha->flt_region_nvram = ha->flags.port0 ?
+ ha->flt_region_nvram = (ha->port_no == 0) ?
def_nvram0[def] : def_nvram1[def];
ha->flt_region_fdt = def_fdt[def];
- ha->flt_region_npiv_conf = ha->flags.port0 ?
+ ha->flt_region_npiv_conf = (ha->port_no == 0) ?
def_npiv_conf0[def] : def_npiv_conf1[def];
done:
ql_dbg(ql_dbg_init, vha, 0x004a,
@@ -989,7 +1013,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
- !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
return QLA_SUCCESS;
ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -1192,7 +1216,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
struct qla_hw_data *ha = vha->hw;
/* Prepare burst-capable write on supported ISPs. */
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+ if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha)) &&
!(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
@@ -1675,7 +1700,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
if (!IS_QLA83XX(ha))
goto out;
- if (ha->flags.port0)
+ if (ha->port_no == 0)
led_select_value = QLA83XX_LED_PORT0;
else
led_select_value = QLA83XX_LED_PORT1;
@@ -2332,7 +2357,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
*/
rest_addr = 0xffff;
sec_mask = 0x10000;
- break;
+ break;
}
/*
* ST m29w010b part - 16kb sector size
@@ -2558,7 +2583,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
uint32_t faddr, left, burst;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha))
goto try_fast;
if (offset & 0xfff)
goto slow_read;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 38a1257e76e1..0cb73074c199 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -590,7 +590,7 @@ static struct qla_tgt_sess *qlt_create_sess(
/* Check to avoid double sessions */
spin_lock_irqsave(&ha->hardware_lock, flags);
- list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
+ list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
sess_list_entry) {
if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
@@ -627,7 +627,7 @@ static struct qla_tgt_sess *qlt_create_sess(
return NULL;
}
- sess->tgt = ha->tgt.qla_tgt;
+ sess->tgt = vha->vha_tgt.qla_tgt;
sess->vha = vha;
sess->s_id = fcport->d_id;
sess->loop_id = fcport->loop_id;
@@ -635,7 +635,7 @@ static struct qla_tgt_sess *qlt_create_sess(
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
"Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
- sess, ha->tgt.qla_tgt);
+ sess, vha->vha_tgt.qla_tgt);
be_sid[0] = sess->s_id.b.domain;
be_sid[1] = sess->s_id.b.area;
@@ -662,8 +662,8 @@ static struct qla_tgt_sess *qlt_create_sess(
memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
spin_lock_irqsave(&ha->hardware_lock, flags);
- list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
- ha->tgt.qla_tgt->sess_count++;
+ list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
+ vha->vha_tgt.qla_tgt->sess_count++;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -682,7 +682,7 @@ static struct qla_tgt_sess *qlt_create_sess(
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess;
unsigned long flags;
@@ -692,6 +692,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
if (!tgt || (fcport->port_type != FCT_INITIATOR))
return;
+ if (qla_ini_mode_enabled(vha))
+ return;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
if (tgt->tgt_stop) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -701,9 +704,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
if (!sess) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_create_sess(vha, fcport, false);
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
} else {
@@ -739,7 +742,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess;
unsigned long flags;
@@ -787,17 +790,32 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt)
}
/* Called by tcm_qla2xxx configfs code */
-void qlt_stop_phase1(struct qla_tgt *tgt)
+int qlt_stop_phase1(struct qla_tgt *tgt)
{
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = tgt->ha;
unsigned long flags;
+ mutex_lock(&qla_tgt_mutex);
+ if (!vha->fc_vport) {
+ struct Scsi_Host *sh = vha->host;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
+ bool npiv_vports;
+
+ spin_lock_irqsave(sh->host_lock, flags);
+ npiv_vports = (fc_host->npiv_vports_inuse);
+ spin_unlock_irqrestore(sh->host_lock, flags);
+
+ if (npiv_vports) {
+ mutex_unlock(&qla_tgt_mutex);
+ return -EPERM;
+ }
+ }
if (tgt->tgt_stop || tgt->tgt_stopped) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
"Already in tgt->tgt_stop or tgt_stopped state\n");
- dump_stack();
- return;
+ mutex_unlock(&qla_tgt_mutex);
+ return -EPERM;
}
ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
@@ -806,12 +824,13 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
* Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
* Lock is needed, because we still can get an incoming packet.
*/
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
tgt->tgt_stop = 1;
qlt_clear_tgt_db(tgt, true);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ mutex_unlock(&qla_tgt_mutex);
flush_delayed_work(&tgt->sess_del_work);
@@ -838,6 +857,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
/* Wait for sessions to clear out (just in case) */
wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+ return 0;
}
EXPORT_SYMBOL(qlt_stop_phase1);
@@ -845,20 +865,21 @@ EXPORT_SYMBOL(qlt_stop_phase1);
void qlt_stop_phase2(struct qla_tgt *tgt)
{
struct qla_hw_data *ha = tgt->ha;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
unsigned long flags;
if (tgt->tgt_stopped) {
- ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
"Already in tgt->tgt_stopped state\n");
dump_stack();
return;
}
- ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
"Waiting for %d IRQ commands to complete (tgt %p)",
tgt->irq_cmd_count, tgt);
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
while (tgt->irq_cmd_count != 0) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -868,9 +889,9 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
tgt->tgt_stop = 0;
tgt->tgt_stopped = 1;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
- ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
tgt);
}
EXPORT_SYMBOL(qlt_stop_phase2);
@@ -878,14 +899,14 @@ EXPORT_SYMBOL(qlt_stop_phase2);
/* Called from qlt_remove_target() -> qla2x00_remove_one() */
static void qlt_release(struct qla_tgt *tgt)
{
- struct qla_hw_data *ha = tgt->ha;
+ scsi_qla_host_t *vha = tgt->vha;
- if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+ if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
qlt_stop_phase2(tgt);
- ha->tgt.qla_tgt = NULL;
+ vha->vha_tgt.qla_tgt = NULL;
- ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
"Release of tgt %p finished\n", tgt);
kfree(tgt);
@@ -949,8 +970,8 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
return;
}
- if (ha->tgt.qla_tgt != NULL)
- ha->tgt.qla_tgt->notify_ack_expected++;
+ if (vha->vha_tgt.qla_tgt != NULL)
+ vha->vha_tgt.qla_tgt->notify_ack_expected++;
pkt->entry_type = NOTIFY_ACK_TYPE;
pkt->entry_count = 1;
@@ -1054,7 +1075,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
/* Other bytes are zero */
}
- ha->tgt.qla_tgt->abts_resp_expected++;
+ vha->vha_tgt.qla_tgt->abts_resp_expected++;
qla2x00_start_iocbs(vha, vha->req);
}
@@ -1206,7 +1227,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
"qla_target(%d): task abort for non-existant session\n",
vha->vp_idx);
- rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
+ rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
if (rc != 0) {
qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
@@ -2157,8 +2178,7 @@ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd, void *ctio)
{
struct qla_tgt_srr_ctio *sc;
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_srr_imm *imm;
tgt->ctio_srr_id++;
@@ -2474,7 +2494,7 @@ static void qlt_do_work(struct work_struct *work)
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
scsi_qla_host_t *vha = cmd->vha;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess = NULL;
struct atio_from_isp *atio = &cmd->atio;
unsigned char *cdb;
@@ -2507,10 +2527,10 @@ static void qlt_do_work(struct work_struct *work)
goto out_term;
}
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has an extra creation ref. */
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
if (!sess)
goto out_term;
@@ -2576,8 +2596,7 @@ out_term:
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
{
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_cmd *cmd;
if (unlikely(tgt->tgt_stop)) {
@@ -2593,11 +2612,9 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -ENOMEM;
}
- INIT_LIST_HEAD(&cmd->cmd_list);
-
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
- cmd->tgt = ha->tgt.qla_tgt;
+ cmd->tgt = vha->vha_tgt.qla_tgt;
cmd->vha = vha;
INIT_WORK(&cmd->work, qlt_do_work);
@@ -2723,7 +2740,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
uint32_t lun, unpacked_lun;
int lun_size, fn;
- tgt = ha->tgt.qla_tgt;
+ tgt = vha->vha_tgt.qla_tgt;
lun = a->u.isp24.fcp_cmnd.lun;
lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
@@ -2797,7 +2814,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
"qla_target(%d): task abort for unexisting "
"session\n", vha->vp_idx);
- return qlt_sched_sess_work(ha->tgt.qla_tgt,
+ return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
}
@@ -2810,7 +2827,6 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *iocb)
{
- struct qla_hw_data *ha = vha->hw;
int res = 0;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
@@ -2828,7 +2844,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
case ELS_PDISC:
case ELS_ADISC:
{
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
if (tgt->link_reinit_iocb_pending) {
qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
0, 0, 0, 0, 0, 0);
@@ -3186,7 +3202,8 @@ restart:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
"SRR cmd %p (se_cmd %p, tag %d, op %x), "
"sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
- se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
+ se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->sg_cnt, cmd->offset);
qlt_handle_srr(vha, sctio, imm);
@@ -3202,8 +3219,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *iocb)
{
struct qla_tgt_srr_imm *imm;
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_srr_ctio *sctio;
tgt->imm_srr_id++;
@@ -3313,7 +3329,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
case IMM_NTFY_LIP_LINK_REINIT:
{
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
"qla_target(%d): LINK REINIT (loop %#x, "
"subcode %x)\n", vha->vp_idx,
@@ -3489,7 +3505,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int rc;
if (unlikely(tgt == NULL)) {
@@ -3591,7 +3607,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
if (unlikely(tgt == NULL)) {
ql_dbg(ql_dbg_tgt, vha, 0xe05d,
@@ -3794,7 +3810,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
uint16_t *mailbox)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int login_code;
ql_dbg(ql_dbg_tgt, vha, 0xe039,
@@ -3924,14 +3940,14 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
uint8_t *s_id)
{
- struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess = NULL;
fc_port_t *fcport = NULL;
int rc, global_resets;
uint16_t loop_id = 0;
retry:
- global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+ global_resets =
+ atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
if (rc != 0) {
@@ -3958,12 +3974,13 @@ retry:
return NULL;
if (global_resets !=
- atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+ atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
"qla_target(%d): global reset during session discovery "
"(counter was %d, new %d), retrying", vha->vp_idx,
global_resets,
- atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+ atomic_read(&vha->vha_tgt.
+ qla_tgt->tgt_global_resets_count));
goto retry;
}
@@ -3998,10 +4015,10 @@ static void qlt_abort_work(struct qla_tgt *tgt,
if (!sess) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has got an extra creation ref */
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!sess)
@@ -4052,10 +4069,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
if (!sess) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- mutex_lock(&ha->tgt.tgt_mutex);
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has got an extra creation ref */
- mutex_unlock(&ha->tgt.tgt_mutex);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!sess)
@@ -4141,9 +4158,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
}
ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
- "Registering target for host %ld(%p)", base_vha->host_no, ha);
+ "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
- BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
+ BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
if (!tgt) {
@@ -4171,7 +4188,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
atomic_set(&tgt->tgt_global_resets_count, 0);
- ha->tgt.qla_tgt = tgt;
+ base_vha->vha_tgt.qla_tgt = tgt;
ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
"qla_target(%d): using 64 Bit PCI addressing",
@@ -4182,6 +4199,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
+ if (base_vha->fc_vport)
+ return 0;
+
mutex_lock(&qla_tgt_mutex);
list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
mutex_unlock(&qla_tgt_mutex);
@@ -4192,16 +4212,20 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
/* Must be called under tgt_host_action_mutex */
int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
{
- if (!ha->tgt.qla_tgt)
+ if (!vha->vha_tgt.qla_tgt)
return 0;
+ if (vha->fc_vport) {
+ qlt_release(vha->vha_tgt.qla_tgt);
+ return 0;
+ }
mutex_lock(&qla_tgt_mutex);
- list_del(&ha->tgt.qla_tgt->tgt_list_entry);
+ list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
mutex_unlock(&qla_tgt_mutex);
ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
vha->host_no, ha);
- qlt_release(ha->tgt.qla_tgt);
+ qlt_release(vha->vha_tgt.qla_tgt);
return 0;
}
@@ -4235,8 +4259,9 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
* @callback: lport initialization callback for tcm_qla2xxx code
* @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
*/
-int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
- int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
+int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
+ u64 npiv_wwpn, u64 npiv_wwnn,
+ int (*callback)(struct scsi_qla_host *, void *, u64, u64))
{
struct qla_tgt *tgt;
struct scsi_qla_host *vha;
@@ -4255,19 +4280,22 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
if (!host)
continue;
- if (ha->tgt.tgt_ops != NULL)
- continue;
-
if (!(host->hostt->supported_mode & MODE_TARGET))
continue;
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (host->active_mode & MODE_TARGET) {
+ if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
host->host_no);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
continue;
}
+ if (tgt->tgt_stop) {
+ pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
+ host->host_no);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ continue;
+ }
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!scsi_host_get(host)) {
@@ -4276,23 +4304,16 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
" qla2xxx scsi_host\n");
continue;
}
- qlt_lport_dump(vha, wwpn, b);
+ qlt_lport_dump(vha, phys_wwpn, b);
if (memcmp(vha->port_name, b, WWN_SIZE)) {
scsi_host_put(host);
continue;
}
- /*
- * Setup passed parameters ahead of invoking callback
- */
- ha->tgt.tgt_ops = qla_tgt_ops;
- ha->tgt.target_lport_ptr = target_lport_ptr;
- rc = (*callback)(vha);
- if (rc != 0) {
- ha->tgt.tgt_ops = NULL;
- ha->tgt.target_lport_ptr = NULL;
+ rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
+ if (rc != 0)
scsi_host_put(host);
- }
+
mutex_unlock(&qla_tgt_mutex);
return rc;
}
@@ -4314,7 +4335,7 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
/*
* Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
*/
- ha->tgt.target_lport_ptr = NULL;
+ vha->vha_tgt.target_lport_ptr = NULL;
ha->tgt.tgt_ops = NULL;
/*
* Release the Scsi_Host reference for the underlying qla2xxx host
@@ -4376,8 +4397,9 @@ void
qlt_enable_vha(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
if (!tgt) {
ql_dbg(ql_dbg_tgt, vha, 0xe069,
@@ -4392,9 +4414,14 @@ qlt_enable_vha(struct scsi_qla_host *vha)
qlt_set_mode(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- qla2x00_wait_for_hba_online(vha);
+ if (vha->vp_idx) {
+ qla24xx_disable_vp(vha);
+ qla24xx_enable_vp(vha);
+ } else {
+ set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+ qla2xxx_wake_dpc(base_vha);
+ qla2x00_wait_for_hba_online(base_vha);
+ }
}
EXPORT_SYMBOL(qlt_enable_vha);
@@ -4407,7 +4434,7 @@ void
qlt_disable_vha(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
if (!tgt) {
@@ -4438,8 +4465,10 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
if (!qla_tgt_mode_enabled(vha))
return;
- mutex_init(&ha->tgt.tgt_mutex);
- mutex_init(&ha->tgt.tgt_host_action_mutex);
+ vha->vha_tgt.qla_tgt = NULL;
+
+ mutex_init(&vha->vha_tgt.tgt_mutex);
+ mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
qlt_clear_mode(vha);
@@ -4450,6 +4479,8 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
* assigning the value appropriately.
*/
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+
+ qlt_add_target(ha, vha);
}
void
@@ -4768,8 +4799,8 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
}
- mutex_init(&ha->tgt.tgt_mutex);
- mutex_init(&ha->tgt.tgt_host_action_mutex);
+ mutex_init(&base_vha->vha_tgt.tgt_mutex);
+ mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
qlt_clear_mode(base_vha);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index b33e411f28a0..ce33d8c26406 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -855,7 +855,6 @@ struct qla_tgt_cmd {
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
struct scsi_qla_host *vha;
- struct list_head cmd_list;
struct atio_from_isp atio;
};
@@ -932,8 +931,8 @@ void qlt_disable_vha(struct scsi_qla_host *);
*/
extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
-extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
- int (*callback)(struct scsi_qla_host *), void *);
+extern int qlt_lport_register(void *, u64, u64, u64,
+ int (*callback)(struct scsi_qla_host *, void *, u64, u64));
extern void qlt_lport_deregister(struct scsi_qla_host *);
extern void qlt_unreg_sess(struct qla_tgt_sess *);
extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
@@ -1002,7 +1001,7 @@ extern void qlt_modify_vp_config(struct scsi_qla_host *,
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
extern int qlt_mem_alloc(struct qla_hw_data *);
extern void qlt_mem_free(struct qla_hw_data *);
-extern void qlt_stop_phase1(struct qla_tgt *);
+extern int qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
extern void qlt_83xx_iospace_config(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
new file mode 100644
index 000000000000..a804e9b744bb
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -0,0 +1,909 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_tmpl.h"
+
+/* note default template is in big endian */
+static const uint32_t ql27xx_fwdt_default_template[] = {
+ 0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
+ 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x04010000, 0x14000000, 0x00000000,
+ 0x02000000, 0x44000000, 0x09010000, 0x10000000,
+ 0x00000000, 0x02000000, 0x01010000, 0x1c000000,
+ 0x00000000, 0x02000000, 0x00600000, 0x00000000,
+ 0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
+ 0x02000000, 0x00600000, 0x00000000, 0xcc000000,
+ 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
+ 0x10600000, 0x00000000, 0xd4000000, 0x01010000,
+ 0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
+ 0x00000060, 0xf0000000, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00700000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10700000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x40700000, 0x041000c0,
+ 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
+ 0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
+ 0x18000000, 0x00000000, 0x02000000, 0x007c0000,
+ 0x040300c4, 0x00010000, 0x18000000, 0x00000000,
+ 0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
+ 0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
+ 0x00000000, 0xc0000000, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x007c0000, 0x04200000,
+ 0x0b010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x0c000000, 0x00000000, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
+ 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+ 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x0a000000, 0x04200080, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00300000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x10300000, 0x041000c0, 0x00010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x20300000, 0x041000c0,
+ 0x00010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
+ 0x00000000, 0x02000000, 0x06010000, 0x1c000000,
+ 0x00000000, 0x02000000, 0x01000000, 0x00000200,
+ 0xff230200, 0x06010000, 0x1c000000, 0x00000000,
+ 0x02000000, 0x02000000, 0x00001000, 0x00000000,
+ 0x07010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x00000000, 0x01000000, 0x07010000, 0x18000000,
+ 0x00000000, 0x02000000, 0x00000000, 0x02000000,
+ 0x07010000, 0x18000000, 0x00000000, 0x02000000,
+ 0x00000000, 0x03000000, 0x0d010000, 0x14000000,
+ 0x00000000, 0x02000000, 0x00000000, 0xff000000,
+ 0x10000000, 0x00000000, 0x00000080,
+};
+
+static inline void __iomem *
+qla27xx_isp_reg(struct scsi_qla_host *vha)
+{
+ return &vha->hw->iobase->isp24;
+}
+
+static inline void
+qla27xx_insert16(uint16_t value, void *buf, ulong *len)
+{
+ if (buf) {
+ buf += *len;
+ *(__le16 *)buf = cpu_to_le16(value);
+ }
+ *len += sizeof(value);
+}
+
+static inline void
+qla27xx_insert32(uint32_t value, void *buf, ulong *len)
+{
+ if (buf) {
+ buf += *len;
+ *(__le32 *)buf = cpu_to_le32(value);
+ }
+ *len += sizeof(value);
+}
+
+static inline void
+qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
+{
+ ulong cnt = size;
+
+ if (buf && mem) {
+ buf += *len;
+ while (cnt >= sizeof(uint32_t)) {
+ *(__le32 *)buf = cpu_to_le32p(mem);
+ buf += sizeof(uint32_t);
+ mem += sizeof(uint32_t);
+ cnt -= sizeof(uint32_t);
+ }
+ if (cnt)
+ memcpy(buf, mem, cnt);
+ }
+ *len += size;
+}
+
+static inline void
+qla27xx_read8(void *window, void *buf, ulong *len)
+{
+ uint8_t value = ~0;
+
+ if (buf) {
+ value = RD_REG_BYTE((__iomem void *)window);
+ ql_dbg(ql_dbg_misc, NULL, 0xd011,
+ "%s: -> %x\n", __func__, value);
+ }
+ qla27xx_insert32(value, buf, len);
+}
+
+static inline void
+qla27xx_read16(void *window, void *buf, ulong *len)
+{
+ uint16_t value = ~0;
+
+ if (buf) {
+ value = RD_REG_WORD((__iomem void *)window);
+ ql_dbg(ql_dbg_misc, NULL, 0xd012,
+ "%s: -> %x\n", __func__, value);
+ }
+ qla27xx_insert32(value, buf, len);
+}
+
+static inline void
+qla27xx_read32(void *window, void *buf, ulong *len)
+{
+ uint32_t value = ~0;
+
+ if (buf) {
+ value = RD_REG_DWORD((__iomem void *)window);
+ ql_dbg(ql_dbg_misc, NULL, 0xd013,
+ "%s: -> %x\n", __func__, value);
+ }
+ qla27xx_insert32(value, buf, len);
+}
+
+static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *)
+{
+ return
+ (width == 1) ? qla27xx_read8 :
+ (width == 2) ? qla27xx_read16 :
+ qla27xx_read32;
+}
+
+static inline void
+qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
+ uint offset, void *buf, ulong *len)
+{
+ void *window = (void *)reg + offset;
+
+ if (buf) {
+ ql_dbg(ql_dbg_misc, NULL, 0xd014,
+ "%s: @%x\n", __func__, offset);
+ }
+ qla27xx_insert32(offset, buf, len);
+ qla27xx_read32(window, buf, len);
+}
+
+static inline void
+qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
+ uint offset, uint32_t data, void *buf)
+{
+ __iomem void *window = reg + offset;
+
+ if (buf) {
+ ql_dbg(ql_dbg_misc, NULL, 0xd015,
+ "%s: @%x <- %x\n", __func__, offset, data);
+ WRT_REG_DWORD(window, data);
+ }
+}
+
+static inline void
+qla27xx_read_window(__iomem struct device_reg_24xx *reg,
+ uint32_t base, uint offset, uint count, uint width, void *buf,
+ ulong *len)
+{
+ void *window = (void *)reg + offset;
+ void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
+
+ if (buf) {
+ ql_dbg(ql_dbg_misc, NULL, 0xd016,
+ "%s: base=%x offset=%x count=%x width=%x\n",
+ __func__, base, offset, count, width);
+ }
+ qla27xx_write_reg(reg, IOBASE_ADDR, base, buf);
+ while (count--) {
+ qla27xx_insert32(base, buf, len);
+ readn(window, buf, len);
+ window += width;
+ base += width;
+ }
+}
+
+static inline void
+qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
+{
+ if (buf)
+ ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
+}
+
+static int
+qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd100,
+ "%s: nop [%lx]\n", __func__, *len);
+ qla27xx_skip_entry(ent, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd1ff,
+ "%s: end [%lx]\n", __func__, *len);
+ qla27xx_skip_entry(ent, buf);
+
+ /* terminate */
+ return true;
+}
+
+static int
+qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd200,
+ "%s: rdio t1 [%lx]\n", __func__, *len);
+ qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
+ ent->t256.reg_count, ent->t256.reg_width, buf, len);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd201,
+ "%s: wrio t1 [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
+ qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd202,
+ "%s: rdio t2 [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
+ qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
+ ent->t258.reg_count, ent->t258.reg_width, buf, len);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd203,
+ "%s: wrio t2 [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
+ qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
+ qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd204,
+ "%s: rdpci [%lx]\n", __func__, *len);
+ qla27xx_read_reg(reg, ent->t260.pci_addr, buf, len);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd205,
+ "%s: wrpci [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, ent->t261.pci_addr, ent->t261.write_data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong dwords;
+ ulong start;
+ ulong end;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd206,
+ "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
+ start = ent->t262.start_addr;
+ end = ent->t262.end_addr;
+
+ if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
+ ;
+ } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
+ end = vha->hw->fw_memory_size;
+ if (buf)
+ ent->t262.end_addr = end;
+ } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
+ start = vha->hw->fw_shared_ram_start;
+ end = vha->hw->fw_shared_ram_end;
+ if (buf) {
+ ent->t262.start_addr = start;
+ ent->t262.end_addr = end;
+ }
+ } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
+ ql_dbg(ql_dbg_misc, vha, 0xd021,
+ "%s: unsupported ddr ram\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd022,
+ "%s: unknown area %u\n", __func__, ent->t262.ram_area);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+
+ if (end < start) {
+ ql_dbg(ql_dbg_misc, vha, 0xd023,
+ "%s: bad range (start=%x end=%x)\n", __func__,
+ ent->t262.end_addr, ent->t262.start_addr);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+
+ dwords = end - start + 1;
+ if (buf) {
+ ql_dbg(ql_dbg_misc, vha, 0xd024,
+ "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
+ buf += *len;
+ qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+ }
+ *len += dwords * sizeof(uint32_t);
+done:
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ uint count = 0;
+ uint i;
+ uint length;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd207,
+ "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
+ if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+ for (i = 0; i < vha->hw->max_req_queues; i++) {
+ struct req_que *req = vha->hw->req_q_map[i];
+ if (req || !buf) {
+ length = req ?
+ req->length : REQUEST_ENTRY_CNT_24XX;
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(length, buf, len);
+ qla27xx_insertbuf(req ? req->ring : NULL,
+ length * sizeof(*req->ring), buf, len);
+ count++;
+ }
+ }
+ } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+ for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+ if (rsp || !buf) {
+ length = rsp ?
+ rsp->length : RESPONSE_ENTRY_CNT_MQ;
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(length, buf, len);
+ qla27xx_insertbuf(rsp ? rsp->ring : NULL,
+ length * sizeof(*rsp->ring), buf, len);
+ count++;
+ }
+ }
+ } else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
+ ql_dbg(ql_dbg_misc, vha, 0xd025,
+ "%s: unsupported atio queue\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd026,
+ "%s: unknown queue %u\n", __func__, ent->t263.queue_type);
+ qla27xx_skip_entry(ent, buf);
+ goto done;
+ }
+
+ if (buf)
+ ent->t263.num_queues = count;
+done:
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd208,
+ "%s: getfce [%lx]\n", __func__, *len);
+ if (vha->hw->fce) {
+ if (buf) {
+ ent->t264.fce_trace_size = FCE_SIZE;
+ ent->t264.write_pointer = vha->hw->fce_wr;
+ ent->t264.base_pointer = vha->hw->fce_dma;
+ ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
+ ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
+ ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
+ ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
+ ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
+ ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
+ }
+ qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd027,
+ "%s: missing fce\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd209,
+ "%s: pause risc [%lx]\n", __func__, *len);
+ if (buf)
+ qla24xx_pause_risc(reg);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd20a,
+ "%s: reset risc [%lx]\n", __func__, *len);
+ if (buf)
+ qla24xx_soft_reset(vha->hw);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+ ql_dbg(ql_dbg_misc, vha, 0xd20b,
+ "%s: dis intr [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd20c,
+ "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
+ if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
+ if (vha->hw->eft) {
+ if (buf) {
+ ent->t268.buf_size = EFT_SIZE;
+ ent->t268.start_addr = vha->hw->eft_dma;
+ }
+ qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing eft\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ } else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) {
+ ql_dbg(ql_dbg_misc, vha, 0xd029,
+ "%s: unsupported exchange offload buffer\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ } else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) {
+ ql_dbg(ql_dbg_misc, vha, 0xd02a,
+ "%s: unsupported extended login buffer\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd02b,
+ "%s: unknown buf %x\n", __func__, ent->t268.buf_type);
+ qla27xx_skip_entry(ent, buf);
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd20d,
+ "%s: scratch [%lx]\n", __func__, *len);
+ qla27xx_insert32(0xaaaaaaaa, buf, len);
+ qla27xx_insert32(0xbbbbbbbb, buf, len);
+ qla27xx_insert32(0xcccccccc, buf, len);
+ qla27xx_insert32(0xdddddddd, buf, len);
+ qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
+ if (buf)
+ ent->t269.scratch_size = 5 * sizeof(uint32_t);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ void *window = (void *)reg + 0xc4;
+ ulong dwords = ent->t270.count;
+ ulong addr = ent->t270.addr;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd20e,
+ "%s: rdremreg [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+ while (dwords--) {
+ qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
+ qla27xx_read_reg(reg, 0xc4, buf, len);
+ qla27xx_insert32(addr, buf, len);
+ qla27xx_read32(window, buf, len);
+ addr++;
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+ ulong addr = ent->t271.addr;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd20f,
+ "%s: wrremreg [%lx]\n", __func__, *len);
+ qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+ qla27xx_read_reg(reg, 0xc4, buf, len);
+ qla27xx_insert32(addr, buf, len);
+ qla27xx_write_reg(reg, 0xc0, addr, buf);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong dwords = ent->t272.count;
+ ulong start = ent->t272.addr;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd210,
+ "%s: rdremram [%lx]\n", __func__, *len);
+ if (buf) {
+ ql_dbg(ql_dbg_misc, vha, 0xd02c,
+ "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
+ buf += *len;
+ qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
+ }
+ *len += dwords * sizeof(uint32_t);
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ulong dwords = ent->t273.count;
+ ulong addr = ent->t273.addr;
+ uint32_t value;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd211,
+ "%s: pcicfg [%lx]\n", __func__, *len);
+ while (dwords--) {
+ value = ~0;
+ if (pci_read_config_dword(vha->hw->pdev, addr, &value))
+ ql_dbg(ql_dbg_misc, vha, 0xd02d,
+ "%s: failed pcicfg read at %lx\n", __func__, addr);
+ qla27xx_insert32(addr, buf, len);
+ qla27xx_insert32(value, buf, len);
+ addr += 4;
+ }
+
+ return false;
+}
+
+static int
+qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+ ql_dbg(ql_dbg_misc, vha, 0xd2ff,
+ "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
+ qla27xx_skip_entry(ent, buf);
+
+ return false;
+}
+
+struct qla27xx_fwdt_entry_call {
+ int type;
+ int (*call)(
+ struct scsi_qla_host *,
+ struct qla27xx_fwdt_entry *,
+ void *,
+ ulong *);
+};
+
+static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
+ { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } ,
+ { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } ,
+ { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } ,
+ { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } ,
+ { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } ,
+ { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } ,
+ { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } ,
+ { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } ,
+ { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } ,
+ { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } ,
+ { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } ,
+ { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } ,
+ { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } ,
+ { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } ,
+ { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } ,
+ { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } ,
+ { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } ,
+ { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
+ { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
+ { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
+ { -1 , qla27xx_fwdt_entry_other }
+};
+
+static inline int (*qla27xx_find_entry(int type))
+ (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
+{
+ struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
+
+ while (list->type != -1 && list->type != type)
+ list++;
+
+ return list->call;
+}
+
+static inline void *
+qla27xx_next_entry(void *p)
+{
+ struct qla27xx_fwdt_entry *ent = p;
+
+ return p + ent->hdr.entry_size;
+}
+
+static void
+qla27xx_walk_template(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
+{
+ struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
+ ulong count = tmp->entry_count;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd01a,
+ "%s: entry count %lx\n", __func__, count);
+ while (count--) {
+ if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
+ break;
+ ent = qla27xx_next_entry(ent);
+ }
+ ql_dbg(ql_dbg_misc, vha, 0xd01b,
+ "%s: len=%lx\n", __func__, *len);
+}
+
+static void
+qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
+{
+ tmp->capture_timestamp = jiffies;
+}
+
+static void
+qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
+{
+ uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
+ int rval = 0;
+
+ rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
+ v+0, v+1, v+2, v+3, v+4, v+5);
+
+ tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
+ tmp->driver_info[1] = v[5] << 8 | v[4];
+ tmp->driver_info[2] = 0x12345678;
+}
+
+static void
+qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
+ struct scsi_qla_host *vha)
+{
+ tmp->firmware_version[0] = vha->hw->fw_major_version;
+ tmp->firmware_version[1] = vha->hw->fw_minor_version;
+ tmp->firmware_version[2] = vha->hw->fw_subminor_version;
+ tmp->firmware_version[3] =
+ vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
+ tmp->firmware_version[4] =
+ vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
+}
+
+static void
+ql27xx_edit_template(struct scsi_qla_host *vha,
+ struct qla27xx_fwdt_template *tmp)
+{
+ qla27xx_time_stamp(tmp);
+ qla27xx_driver_info(tmp);
+ qla27xx_firmware_info(tmp, vha);
+}
+
+static inline uint32_t
+qla27xx_template_checksum(void *p, ulong size)
+{
+ uint32_t *buf = p;
+ uint64_t sum = 0;
+
+ size /= sizeof(*buf);
+
+ while (size--)
+ sum += *buf++;
+
+ sum = (sum & 0xffffffff) + (sum >> 32);
+
+ return ~sum;
+}
+
+static inline int
+qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
+{
+ return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
+}
+
+static inline int
+qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
+{
+ return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
+}
+
+static void
+qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
+{
+ struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+ ulong len;
+
+ if (qla27xx_fwdt_template_valid(tmp)) {
+ len = tmp->template_size;
+ tmp = memcpy(vha->hw->fw_dump, tmp, len);
+ ql27xx_edit_template(vha, tmp);
+ qla27xx_walk_template(vha, tmp, tmp, &len);
+ vha->hw->fw_dump_len = len;
+ vha->hw->fw_dumped = 1;
+ }
+}
+
+ulong
+qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
+{
+ struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+ ulong len = 0;
+
+ if (qla27xx_fwdt_template_valid(tmp)) {
+ len = tmp->template_size;
+ qla27xx_walk_template(vha, tmp, NULL, &len);
+ }
+
+ return len;
+}
+
+ulong
+qla27xx_fwdt_template_size(void *p)
+{
+ struct qla27xx_fwdt_template *tmp = p;
+
+ return tmp->template_size;
+}
+
+ulong
+qla27xx_fwdt_template_default_size(void)
+{
+ return sizeof(ql27xx_fwdt_default_template);
+}
+
+const void *
+qla27xx_fwdt_template_default(void)
+{
+ return ql27xx_fwdt_default_template;
+}
+
+int
+qla27xx_fwdt_template_valid(void *p)
+{
+ struct qla27xx_fwdt_template *tmp = p;
+
+ if (!qla27xx_verify_template_header(tmp)) {
+ ql_log(ql_log_warn, NULL, 0xd01c,
+ "%s: template type %x\n", __func__, tmp->template_type);
+ return false;
+ }
+
+ if (!qla27xx_verify_template_checksum(tmp)) {
+ ql_log(ql_log_warn, NULL, 0xd01d,
+ "%s: failed template checksum\n", __func__);
+ return false;
+ }
+
+ return true;
+}
+
+void
+qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ ulong flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+
+ if (!vha->hw->fw_dump)
+ ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
+ else if (!vha->hw->fw_dump_template)
+ ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
+ else
+ qla27xx_execute_fwdt_template(vha);
+
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
new file mode 100644
index 000000000000..c9d2fff4d964
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -0,0 +1,205 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA_DMP27_H__
+#define __QLA_DMP27_H__
+
+#define IOBASE_ADDR offsetof(struct device_reg_24xx, iobase_addr)
+
+struct __packed qla27xx_fwdt_template {
+ uint32_t template_type;
+ uint32_t entry_offset;
+ uint32_t template_size;
+ uint32_t reserved_1;
+
+ uint32_t entry_count;
+ uint32_t template_version;
+ uint32_t capture_timestamp;
+ uint32_t template_checksum;
+
+ uint32_t reserved_2;
+ uint32_t driver_info[3];
+
+ uint32_t saved_state[16];
+
+ uint32_t reserved_3[8];
+ uint32_t firmware_version[5];
+};
+
+#define TEMPLATE_TYPE_FWDUMP 99
+
+#define ENTRY_TYPE_NOP 0
+#define ENTRY_TYPE_TMP_END 255
+#define ENTRY_TYPE_RD_IOB_T1 256
+#define ENTRY_TYPE_WR_IOB_T1 257
+#define ENTRY_TYPE_RD_IOB_T2 258
+#define ENTRY_TYPE_WR_IOB_T2 259
+#define ENTRY_TYPE_RD_PCI 260
+#define ENTRY_TYPE_WR_PCI 261
+#define ENTRY_TYPE_RD_RAM 262
+#define ENTRY_TYPE_GET_QUEUE 263
+#define ENTRY_TYPE_GET_FCE 264
+#define ENTRY_TYPE_PSE_RISC 265
+#define ENTRY_TYPE_RST_RISC 266
+#define ENTRY_TYPE_DIS_INTR 267
+#define ENTRY_TYPE_GET_HBUF 268
+#define ENTRY_TYPE_SCRATCH 269
+#define ENTRY_TYPE_RDREMREG 270
+#define ENTRY_TYPE_WRREMREG 271
+#define ENTRY_TYPE_RDREMRAM 272
+#define ENTRY_TYPE_PCICFG 273
+
+#define CAPTURE_FLAG_PHYS_ONLY BIT_0
+#define CAPTURE_FLAG_PHYS_VIRT BIT_1
+
+#define DRIVER_FLAG_SKIP_ENTRY BIT_7
+
+struct __packed qla27xx_fwdt_entry {
+ struct __packed {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t reserved_1;
+
+ uint8_t capture_flags;
+ uint8_t reserved_2[2];
+ uint8_t driver_flags;
+ } hdr;
+ union __packed {
+ struct __packed {
+ } t0;
+
+ struct __packed {
+ } t255;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint8_t reg_width;
+ uint16_t reg_count;
+ uint8_t pci_offset;
+ } t256;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint32_t write_data;
+ uint8_t pci_offset;
+ uint8_t reserved[3];
+ } t257;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint8_t reg_width;
+ uint16_t reg_count;
+ uint8_t pci_offset;
+ uint8_t banksel_offset;
+ uint8_t reserved[3];
+ uint32_t bank;
+ } t258;
+
+ struct __packed {
+ uint32_t base_addr;
+ uint32_t write_data;
+ uint8_t reserved[2];
+ uint8_t pci_offset;
+ uint8_t banksel_offset;
+ uint32_t bank;
+ } t259;
+
+ struct __packed {
+ uint8_t pci_addr;
+ uint8_t reserved[3];
+ } t260;
+
+ struct __packed {
+ uint8_t pci_addr;
+ uint8_t reserved[3];
+ uint32_t write_data;
+ } t261;
+
+ struct __packed {
+ uint8_t ram_area;
+ uint8_t reserved[3];
+ uint32_t start_addr;
+ uint32_t end_addr;
+ } t262;
+
+ struct __packed {
+ uint32_t num_queues;
+ uint8_t queue_type;
+ uint8_t reserved[3];
+ } t263;
+
+ struct __packed {
+ uint32_t fce_trace_size;
+ uint64_t write_pointer;
+ uint64_t base_pointer;
+ uint32_t fce_enable_mb0;
+ uint32_t fce_enable_mb2;
+ uint32_t fce_enable_mb3;
+ uint32_t fce_enable_mb4;
+ uint32_t fce_enable_mb5;
+ uint32_t fce_enable_mb6;
+ } t264;
+
+ struct __packed {
+ } t265;
+
+ struct __packed {
+ } t266;
+
+ struct __packed {
+ uint8_t pci_offset;
+ uint8_t reserved[3];
+ uint32_t data;
+ } t267;
+
+ struct __packed {
+ uint8_t buf_type;
+ uint8_t reserved[3];
+ uint32_t buf_size;
+ uint64_t start_addr;
+ } t268;
+
+ struct __packed {
+ uint32_t scratch_size;
+ } t269;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t count;
+ } t270;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t data;
+ } t271;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t count;
+ } t272;
+
+ struct __packed {
+ uint32_t addr;
+ uint32_t count;
+ } t273;
+ };
+};
+
+#define T262_RAM_AREA_CRITICAL_RAM 1
+#define T262_RAM_AREA_EXTERNAL_RAM 2
+#define T262_RAM_AREA_SHARED_RAM 3
+#define T262_RAM_AREA_DDR_RAM 4
+
+#define T263_QUEUE_TYPE_REQ 1
+#define T263_QUEUE_TYPE_RSP 2
+#define T263_QUEUE_TYPE_ATIO 3
+
+#define T268_BUF_TYPE_EXTD_TRACE 1
+#define T268_BUF_TYPE_EXCH_BUFOFF 2
+#define T268_BUF_TYPE_EXTD_LOGIN 3
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 31d19535b015..e36b94712544 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.06.00.12-k"
+#define QLA2XXX_VERSION "8.07.00.02-k"
#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 6
+#define QLA_DRIVER_MINOR_VER 7
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7eb19be35d46..788c4fe2b0c9 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -53,16 +53,6 @@
struct workqueue_struct *tcm_qla2xxx_free_wq;
struct workqueue_struct *tcm_qla2xxx_cmd_wq;
-static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
-{
- return 0;
-}
-
/*
* Parse WWN.
* If strict, we require lower-case hex and colon separators to be sure
@@ -174,7 +164,7 @@ static int tcm_qla2xxx_npiv_parse_wwn(
*wwnn = 0;
/* count may include a LF at end of string */
- if (name[cnt-1] == '\n')
+ if (name[cnt-1] == '\n' || name[cnt-1] == 0)
cnt--;
/* validate we have enough characters for WWPN */
@@ -192,20 +182,6 @@ static int tcm_qla2xxx_npiv_parse_wwn(
return 0;
}
-static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
- u64 wwpn, u64 wwnn)
-{
- u8 b[8], b2[8];
-
- put_unaligned_be64(wwpn, b);
- put_unaligned_be64(wwnn, b2);
- return snprintf(buf, len,
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
- b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
- b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
-}
-
static char *tcm_qla2xxx_npiv_get_fabric_name(void)
{
return "qla2xxx_npiv";
@@ -237,15 +213,6 @@ static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
return lport->lport_naa_name;
}
-static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
-{
- struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
- struct tcm_qla2xxx_tpg, se_tpg);
- struct tcm_qla2xxx_lport *lport = tpg->lport;
-
- return &lport->lport_npiv_name[0];
-}
-
static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -777,6 +744,9 @@ static void tcm_qla2xxx_put_session(struct se_session *se_sess)
static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
{
+ if (!sess)
+ return;
+
assert_spin_locked(&sess->vha->hw->hardware_lock);
kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
}
@@ -948,16 +918,41 @@ static ssize_t tcm_qla2xxx_tpg_show_enable(
atomic_read(&tpg->lport_tpg_enabled));
}
+static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
+{
+ struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+ struct tcm_qla2xxx_tpg, tpg_base_work);
+ struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+ struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+ if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
+ &se_tpg->tpg_group.cg_item)) {
+ atomic_set(&base_tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(base_vha);
+ }
+ complete(&base_tpg->tpg_base_comp);
+}
+
+static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
+{
+ struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+ struct tcm_qla2xxx_tpg, tpg_base_work);
+ struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+ struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+ if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
+ atomic_set(&base_tpg->lport_tpg_enabled, 0);
+ configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
+ &se_tpg->tpg_group.cg_item);
+ }
+ complete(&base_tpg->tpg_base_comp);
+}
+
static ssize_t tcm_qla2xxx_tpg_store_enable(
struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
- struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
- struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
- struct tcm_qla2xxx_lport, lport_wwn);
- struct scsi_qla_host *vha = lport->qla_vha;
- struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
unsigned long op;
@@ -972,19 +967,28 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
pr_err("Illegal value for tpg_enable: %lu\n", op);
return -EINVAL;
}
-
if (op) {
- atomic_set(&tpg->lport_tpg_enabled, 1);
- qlt_enable_vha(vha);
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EEXIST;
+
+ INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
} else {
- if (!ha->tgt.qla_tgt) {
- pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
- return -ENODEV;
- }
- atomic_set(&tpg->lport_tpg_enabled, 0);
- qlt_stop_phase1(ha->tgt.qla_tgt);
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return count;
+
+ INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
}
+ init_completion(&tpg->tpg_base_comp);
+ schedule_work(&tpg->tpg_base_work);
+ wait_for_completion(&tpg->tpg_base_comp);
+ if (op) {
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return -ENODEV;
+ } else {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EPERM;
+ }
return count;
}
@@ -1011,7 +1015,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
return ERR_PTR(-EINVAL);
- if (!lport->qla_npiv_vp && (tpgt != 1)) {
+ if ((tpgt != 1)) {
pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
return ERR_PTR(-ENOSYS);
}
@@ -1038,11 +1042,8 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
kfree(tpg);
return NULL;
}
- /*
- * Setup local TPG=1 pointer for non NPIV mode.
- */
- if (lport->qla_npiv_vp == NULL)
- lport->tpg_1 = tpg;
+
+ lport->tpg_1 = tpg;
return &tpg->se_tpg;
}
@@ -1053,24 +1054,75 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
struct scsi_qla_host *vha = lport->qla_vha;
- struct qla_hw_data *ha = vha->hw;
/*
* Call into qla2x_target.c LLD logic to shutdown the active
* FC Nexuses and disable target mode operation for this qla_hw_data
*/
- if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
- qlt_stop_phase1(ha->tgt.qla_tgt);
+ if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop)
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
core_tpg_deregister(se_tpg);
/*
* Clear local TPG=1 pointer for non NPIV mode.
*/
- if (lport->qla_npiv_vp == NULL)
- lport->tpg_1 = NULL;
-
+ lport->tpg_1 = NULL;
kfree(tpg);
}
+static ssize_t tcm_qla2xxx_npiv_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ return tcm_qla2xxx_tpg_show_enable(se_tpg, page);
+}
+
+static ssize_t tcm_qla2xxx_npiv_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long op;
+ int rc;
+
+ rc = kstrtoul(page, 0, &op);
+ if (rc < 0) {
+ pr_err("kstrtoul() returned %d\n", rc);
+ return -EINVAL;
+ }
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %lu\n", op);
+ return -EINVAL;
+ }
+ if (op) {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EEXIST;
+
+ atomic_set(&tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(vha);
+ } else {
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return count;
+
+ atomic_set(&tpg->lport_tpg_enabled, 0);
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ }
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
+ &tcm_qla2xxx_npiv_tpg_enable.attr,
+ NULL,
+};
+
static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
struct se_wwn *wwn,
struct config_group *group,
@@ -1095,12 +1147,22 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
tpg->lport = lport;
tpg->lport_tpgt = tpgt;
+ /*
+ * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+ * NodeACLs
+ */
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+
ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
return NULL;
}
+ lport->tpg_1 = tpg;
return &tpg->se_tpg;
}
@@ -1111,13 +1173,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
scsi_qla_host_t *vha,
const uint8_t *s_id)
{
- struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport;
struct se_node_acl *se_nacl;
struct tcm_qla2xxx_nacl *nacl;
u32 key;
- lport = ha->tgt.target_lport_ptr;
+ lport = vha->vha_tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
@@ -1221,13 +1282,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
scsi_qla_host_t *vha,
const uint16_t loop_id)
{
- struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport;
struct se_node_acl *se_nacl;
struct tcm_qla2xxx_nacl *nacl;
struct tcm_qla2xxx_fc_loopid *fc_loopid;
- lport = ha->tgt.target_lport_ptr;
+ lport = vha->vha_tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
@@ -1341,6 +1401,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
{
struct qla_tgt *tgt = sess->tgt;
struct qla_hw_data *ha = tgt->ha;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
struct se_session *se_sess;
struct se_node_acl *se_nacl;
struct tcm_qla2xxx_lport *lport;
@@ -1357,7 +1418,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
se_nacl = se_sess->se_node_acl;
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
- lport = ha->tgt.target_lport_ptr;
+ lport = vha->vha_tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
@@ -1391,7 +1452,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
unsigned char port_name[36];
unsigned long flags;
- lport = ha->tgt.target_lport_ptr;
+ lport = vha->vha_tgt.target_lport_ptr;
if (!lport) {
pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
dump_stack();
@@ -1455,7 +1516,8 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
{
struct qla_tgt *tgt = sess->tgt;
struct qla_hw_data *ha = tgt->ha;
- struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr;
struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
struct tcm_qla2xxx_nacl, se_node_acl);
@@ -1562,15 +1624,18 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
return 0;
}
-static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha,
+ void *target_lport_ptr,
+ u64 npiv_wwpn, u64 npiv_wwnn)
{
struct qla_hw_data *ha = vha->hw;
- struct tcm_qla2xxx_lport *lport;
+ struct tcm_qla2xxx_lport *lport =
+ (struct tcm_qla2xxx_lport *)target_lport_ptr;
/*
- * Setup local pointer to vha, NPIV VP pointer (if present) and
- * vha->tcm_lport pointer
+ * Setup tgt_ops, local pointer to vha and target_lport_ptr
*/
- lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
+ ha->tgt.tgt_ops = &tcm_qla2xxx_template;
+ vha->vha_tgt.target_lport_ptr = target_lport_ptr;
lport->qla_vha = vha;
return 0;
@@ -1602,8 +1667,8 @@ static struct se_wwn *tcm_qla2xxx_make_lport(
if (ret != 0)
goto out;
- ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
- tcm_qla2xxx_lport_register_cb, lport);
+ ret = qlt_lport_register(lport, wwpn, 0, 0,
+ tcm_qla2xxx_lport_register_cb);
if (ret != 0)
goto out_lport;
@@ -1621,7 +1686,6 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
struct scsi_qla_host *vha = lport->qla_vha;
- struct qla_hw_data *ha = vha->hw;
struct se_node_acl *node;
u32 key = 0;
@@ -1630,8 +1694,8 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
* shutdown of struct qla_tgt after the call to
* qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
*/
- if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
- qlt_stop_phase2(ha->tgt.qla_tgt);
+ if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped)
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
qlt_lport_deregister(vha);
@@ -1642,17 +1706,79 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
kfree(lport);
}
+static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
+ void *target_lport_ptr,
+ u64 npiv_wwpn, u64 npiv_wwnn)
+{
+ struct fc_vport *vport;
+ struct Scsi_Host *sh = base_vha->host;
+ struct scsi_qla_host *npiv_vha;
+ struct tcm_qla2xxx_lport *lport =
+ (struct tcm_qla2xxx_lport *)target_lport_ptr;
+ struct tcm_qla2xxx_lport *base_lport =
+ (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
+ struct tcm_qla2xxx_tpg *base_tpg;
+ struct fc_vport_identifiers vport_id;
+
+ if (!qla_tgt_mode_enabled(base_vha)) {
+ pr_err("qla2xxx base_vha not enabled for target mode\n");
+ return -EPERM;
+ }
+
+ if (!base_lport || !base_lport->tpg_1 ||
+ !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
+ pr_err("qla2xxx base_lport or tpg_1 not available\n");
+ return -EPERM;
+ }
+ base_tpg = base_lport->tpg_1;
+
+ memset(&vport_id, 0, sizeof(vport_id));
+ vport_id.port_name = npiv_wwpn;
+ vport_id.node_name = npiv_wwnn;
+ vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vport_id.vport_type = FC_PORTTYPE_NPIV;
+ vport_id.disable = false;
+
+ vport = fc_vport_create(sh, 0, &vport_id);
+ if (!vport) {
+ pr_err("fc_vport_create failed for qla2xxx_npiv\n");
+ return -ENODEV;
+ }
+ /*
+ * Setup local pointer to NPIV vhba + target_lport_ptr
+ */
+ npiv_vha = (struct scsi_qla_host *)vport->dd_data;
+ npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
+ lport->qla_vha = npiv_vha;
+ scsi_host_get(npiv_vha->host);
+ return 0;
+}
+
+
static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct tcm_qla2xxx_lport *lport;
- u64 npiv_wwpn, npiv_wwnn;
+ u64 phys_wwpn, npiv_wwpn, npiv_wwnn;
+ char *p, tmp[128];
int ret;
- if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
- &npiv_wwpn, &npiv_wwnn) < 0)
+ snprintf(tmp, 128, "%s", name);
+
+ p = strchr(tmp, '@');
+ if (!p) {
+ pr_err("Unable to locate NPIV '@' seperator\n");
+ return ERR_PTR(-EINVAL);
+ }
+ *p++ = '\0';
+
+ if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1,
+ &npiv_wwpn, &npiv_wwnn) < 0)
return ERR_PTR(-EINVAL);
lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
@@ -1662,16 +1788,21 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
}
lport->lport_npiv_wwpn = npiv_wwpn;
lport->lport_npiv_wwnn = npiv_wwnn;
- tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
- TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
-/* FIXME: tcm_qla2xxx_npiv_make_lport */
- ret = -ENOSYS;
+ ret = tcm_qla2xxx_init_lport(lport);
if (ret != 0)
goto out;
+ ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn,
+ tcm_qla2xxx_lport_register_npiv_cb);
+ if (ret != 0)
+ goto out_lport;
+
return &lport->lport_wwn;
+out_lport:
+ vfree(lport->lport_loopid_map);
+ btree_destroy32(&lport->lport_fcport_map);
out:
kfree(lport);
return ERR_PTR(ret);
@@ -1681,14 +1812,16 @@ static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
- struct scsi_qla_host *vha = lport->qla_vha;
- struct Scsi_Host *sh = vha->host;
+ struct scsi_qla_host *npiv_vha = lport->qla_vha;
+ struct qla_hw_data *ha = npiv_vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ scsi_host_put(npiv_vha->host);
/*
- * Notify libfc that we want to release the lport->npiv_vport
+ * Notify libfc that we want to release the vha->fc_vport
*/
- fc_vport_terminate(lport->npiv_vport);
-
- scsi_host_put(sh);
+ fc_vport_terminate(npiv_vha->fc_vport);
+ scsi_host_put(base_vha->host);
kfree(lport);
}
@@ -1763,20 +1896,22 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
.get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
- .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
+ .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag,
.tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
.tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
.tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
.tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
- .tpg_check_demo_mode = tcm_qla2xxx_check_false,
- .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
- .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
- .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
+ .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
+ .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
+ .tpg_check_prod_mode_write_protect =
+ tcm_qla2xxx_check_prod_write_protect,
.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
+ .check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
.put_session = tcm_qla2xxx_put_session,
.shutdown_session = tcm_qla2xxx_shutdown_session,
@@ -1871,7 +2006,8 @@ static int tcm_qla2xxx_register_configfs(void)
* Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
*/
npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
+ npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
+ tcm_qla2xxx_npiv_tpg_attrs;
npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 771f7b816443..33aaac8c7d59 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -4,8 +4,6 @@
#define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32
-/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
-#define TCM_QLA2XXX_NPIV_NAMELEN 66
#include "qla_target.h"
@@ -43,6 +41,9 @@ struct tcm_qla2xxx_tpg {
struct tcm_qla2xxx_tpg_attrib tpg_attrib;
/* Returned by tcm_qla2xxx_make_tpg() */
struct se_portal_group se_tpg;
+ /* Items for dealing with configfs_depend_item */
+ struct completion tpg_base_comp;
+ struct work_struct tpg_base_work;
};
struct tcm_qla2xxx_fc_loopid {
@@ -62,20 +63,14 @@ struct tcm_qla2xxx_lport {
char lport_name[TCM_QLA2XXX_NAMELEN];
/* ASCII formatted naa WWPN for VPD page 83 etc */
char lport_naa_name[TCM_QLA2XXX_NAMELEN];
- /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
- char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
/* map for fc_port pointers in 24-bit FC Port ID space */
struct btree_head32 lport_fcport_map;
/* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
/* Pointer to struct scsi_qla_host from qla2xxx LLD */
struct scsi_qla_host *qla_vha;
- /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
- struct scsi_qla_host *qla_npiv_vp;
/* Pointer to struct qla_tgt pointer */
struct qla_tgt lport_qla_tgt;
- /* Pointer to struct fc_vport for NPIV vport from libfc */
- struct fc_vport *npiv_vport;
/* Pointer to TPG=1 for non NPIV mode */
struct tcm_qla2xxx_tpg *tpg_1;
/* Returned by tcm_qla2xxx_make_lport() */
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 8196c2f7915c..2eba35365920 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -465,7 +465,7 @@ int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
}
/* Recovery Failed, some other function
* has the lock, wait for 2secs and retry */
- ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timout\n",
+ ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n",
__func__, ha->func_num);
timeout = 0;
}
@@ -1304,12 +1304,24 @@ static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
static int qla4_83xx_restart(struct scsi_qla_host *ha)
{
int ret_val = QLA_SUCCESS;
+ uint32_t idc_ctrl;
qla4_83xx_process_stop_seq(ha);
- /* Collect minidump*/
- if (!test_and_clear_bit(AF_83XX_NO_FW_DUMP, &ha->flags))
+ /*
+ * Collect minidump.
+ * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
+ * don't collect minidump
+ */
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+ qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
+ (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+ ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
+ __func__);
+ } else {
qla4_8xxx_get_minidump(ha);
+ }
qla4_83xx_process_init_seq(ha);
@@ -1664,3 +1676,23 @@ void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
__qla4_83xx_disable_pause(ha);
ha->isp_ops->idc_unlock(ha);
}
+
+/**
+ * qla4_83xx_is_detached - Check if we are marked invisible.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4_83xx_is_detached(struct scsi_qla_host *ha)
+{
+ uint32_t drv_active;
+
+ drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+ if (test_bit(AF_INIT_DONE, &ha->flags) &&
+ !(drv_active & (1 << ha->func_num))) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
+ __func__, drv_active));
+ return QLA_SUCCESS;
+ }
+
+ return QLA_ERROR;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
index 04a0027dbca0..9f92cbf96477 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.c
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -517,7 +517,7 @@ static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
(ha->idc_extend_tmo * HZ))) {
ha->notify_idc_comp = 0;
ha->notify_link_up_comp = 0;
- ql4_printk(KERN_WARNING, ha, "%s: IDC Complete notification not received",
+ ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
__func__);
status = QLA_ERROR;
goto exit_wait;
@@ -538,7 +538,7 @@ static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
if (!wait_for_completion_timeout(&ha->link_up_comp,
(IDC_COMP_TOV * HZ))) {
ha->notify_link_up_comp = 0;
- ql4_printk(KERN_WARNING, ha, "%s: LINK UP notification not received",
+ ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
__func__);
status = QLA_ERROR;
goto exit_wait;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index aa67bb9a4426..73a502288bde 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -194,7 +194,7 @@
#define ADAPTER_INIT_TOV 30
#define ADAPTER_RESET_TOV 180
#define EXTEND_CMD_TOV 60
-#define WAIT_CMD_TOV 30
+#define WAIT_CMD_TOV 5
#define EH_WAIT_CMD_TOV 120
#define FIRMWARE_UP_TOV 60
#define RESET_FIRMWARE_TOV 30
@@ -297,6 +297,8 @@ struct ddb_entry {
/* Driver Re-login */
unsigned long flags; /* DDB Flags */
+#define DDB_CONN_CLOSE_FAILURE 0 /* 0x00000001 */
+
uint16_t default_relogin_timeout; /* Max time to wait for
* relogin to complete */
atomic_t retry_relogin_timer; /* Min Time between relogins
@@ -580,7 +582,6 @@ struct scsi_qla_host {
#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
-#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */
#define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */
#define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */
@@ -595,10 +596,10 @@ struct scsi_qla_host {
#define DPC_AEN 9 /* 0x00000200 */
#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
#define DPC_LINK_CHANGED 18 /* 0x00040000 */
-#define DPC_RESET_ACTIVE 20 /* 0x00040000 */
-#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/
-#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/
-#define DPC_POST_IDC_ACK 23 /* 0x00200000 */
+#define DPC_RESET_ACTIVE 20 /* 0x00100000 */
+#define DPC_HA_UNRECOVERABLE 21 /* 0x00200000 ISP-82xx only*/
+#define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/
+#define DPC_POST_IDC_ACK 23 /* 0x00800000 */
#define DPC_RESTORE_ACB 24 /* 0x01000000 */
struct Scsi_Host *host; /* pointer to host data */
@@ -768,6 +769,7 @@ struct scsi_qla_host {
uint32_t fw_dump_capture_mask;
void *fw_dump_tmplt_hdr;
uint32_t fw_dump_tmplt_size;
+ uint32_t fw_dump_skip_size;
struct completion mbx_intr_comp;
@@ -910,7 +912,8 @@ static inline int is_qla80XX(struct scsi_qla_host *ha)
static inline int is_aer_supported(struct scsi_qla_host *ha)
{
return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) ||
- (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324));
+ (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324) ||
+ (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042));
}
static inline int adapter_up(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 8d4092b33c07..209853ce0bbc 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -390,6 +390,7 @@ struct qla_flt_region {
#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
#define MBOX_CMD_CONN_OPEN 0x0074
#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
+#define DDB_NOT_LOGGED_IN 0x09
#define LOGOUT_OPTION_CLOSE_SESSION 0x0002
#define LOGOUT_OPTION_RELOGIN 0x0004
#define LOGOUT_OPTION_FREE_DDB 0x0008
@@ -505,9 +506,9 @@ struct qla_flt_region {
#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028
#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029
#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED 0x802A
-#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B
-#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
-#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
+#define MBOX_ASTS_IPV6_LINK_MTU_CHANGE 0x802B
+#define MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED 0x802C
+#define MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED 0x802D
#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
#define MBOX_ASTS_INITIALIZATION_FAILED 0x8031
#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036
@@ -528,14 +529,14 @@ struct qla_flt_region {
#define ACB_CONFIG_DISABLE 0x00
#define ACB_CONFIG_SET 0x01
-/* ACB State Defines */
-#define ACB_STATE_UNCONFIGURED 0x00
-#define ACB_STATE_INVALID 0x01
-#define ACB_STATE_ACQUIRING 0x02
-#define ACB_STATE_TENTATIVE 0x03
-#define ACB_STATE_DEPRICATED 0x04
-#define ACB_STATE_VALID 0x05
-#define ACB_STATE_DISABLING 0x06
+/* ACB/IP Address State Defines */
+#define IP_ADDRSTATE_UNCONFIGURED 0
+#define IP_ADDRSTATE_INVALID 1
+#define IP_ADDRSTATE_ACQUIRING 2
+#define IP_ADDRSTATE_TENTATIVE 3
+#define IP_ADDRSTATE_DEPRICATED 4
+#define IP_ADDRSTATE_PREFERRED 5
+#define IP_ADDRSTATE_DISABLING 6
/* FLASH offsets */
#define FLASH_SEGMENT_IFCB 0x04000000
@@ -698,14 +699,6 @@ struct addr_ctrl_blk {
uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
uint8_t ipv6_addr0_state; /* 223 */
uint8_t ipv6_addr1_state; /* 224 */
-#define IP_ADDRSTATE_UNCONFIGURED 0
-#define IP_ADDRSTATE_INVALID 1
-#define IP_ADDRSTATE_ACQUIRING 2
-#define IP_ADDRSTATE_TENTATIVE 3
-#define IP_ADDRSTATE_DEPRICATED 4
-#define IP_ADDRSTATE_PREFERRED 5
-#define IP_ADDRSTATE_DISABLING 6
-
uint8_t ipv6_dflt_rtr_state; /* 225 */
#define IPV6_RTRSTATE_UNKNOWN 0
#define IPV6_RTRSTATE_MANUAL 1
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index d67c50e0b896..b1a19cd8d5b2 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -279,6 +279,8 @@ int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha,
uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
+int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha);
+int qla4_83xx_is_detached(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 7456eeb2e58a..28fbece7e08f 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -959,13 +959,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
qla4xxx_build_ddb_list(ha, is_reset);
set_bit(AF_ONLINE, &ha->flags);
-exit_init_hba:
- if (is_qla80XX(ha) && (status == QLA_ERROR)) {
- /* Since interrupts are registered in start_firmware for
- * 80XX, release them here if initialize_adapter fails */
- qla4xxx_free_irqs(ha);
- }
+exit_init_hba:
DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
return status;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index a3c8bc7706c2..b1925d195f41 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -385,9 +385,9 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
cls_conn = ddb_entry->conn;
conn = cls_conn->dd_data;
- spin_lock(&conn->session->lock);
+ spin_lock(&conn->session->back_lock);
task = iscsi_itt_to_task(conn, itt);
- spin_unlock(&conn->session->lock);
+ spin_unlock(&conn->session->back_lock);
if (task == NULL) {
ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
@@ -635,6 +635,18 @@ static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha,
}
}
+static void qla4xxx_default_router_changed(struct scsi_qla_host *ha,
+ uint32_t *mbox_sts)
+{
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0],
+ &mbox_sts[2], sizeof(uint32_t));
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1],
+ &mbox_sts[3], sizeof(uint32_t));
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2],
+ &mbox_sts[4], sizeof(uint32_t));
+ memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3],
+ &mbox_sts[5], sizeof(uint32_t));
+}
/**
* qla4xxx_isr_decode_mailbox - decodes mailbox status
@@ -781,27 +793,44 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
mbox_sts[3]);
/* mbox_sts[2] = Old ACB state
* mbox_sts[3] = new ACB state */
- if ((mbox_sts[3] == ACB_STATE_VALID) &&
- ((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
- (mbox_sts[2] == ACB_STATE_ACQUIRING))) {
+ if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) &&
+ ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) ||
+ (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) {
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
- } else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
- (mbox_sts[2] == ACB_STATE_VALID)) {
+ } else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) &&
+ (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) {
if (is_qla80XX(ha))
set_bit(DPC_RESET_HA_FW_CONTEXT,
&ha->dpc_flags);
else
set_bit(DPC_RESET_HA, &ha->dpc_flags);
- } else if (mbox_sts[3] == ACB_STATE_DISABLING) {
+ } else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
ha->host_no, __func__);
- } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) {
+ } else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) {
complete(&ha->disable_acb_comp);
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
ha->host_no, __func__);
}
break;
+ case MBOX_ASTS_IPV6_LINK_MTU_CHANGE:
+ case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED:
+ case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED:
+ /* No action */
+ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n",
+ ha->host_no, mbox_status));
+ break;
+
+ case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, IPv6 ERROR, "
+ "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ break;
+
case MBOX_ASTS_MAC_ADDRESS_CHANGED:
case MBOX_ASTS_DNS:
/* No action */
@@ -939,6 +968,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
ha->host_no, mbox_sts[0]));
+ qla4xxx_default_router_changed(ha, mbox_sts);
break;
case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
@@ -1022,7 +1052,8 @@ void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status)
{
/* Process response queue interrupt. */
- if (intr_status & HSRX_RISC_IOCB_INT)
+ if ((intr_status & HSRX_RISC_IOCB_INT) &&
+ test_bit(AF_INIT_DONE, &ha->flags))
qla4xxx_process_response_queue(ha);
/* Process mailbox/asynch event interrupt.*/
@@ -1399,6 +1430,7 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
{
struct scsi_qla_host *ha = dev_id;
unsigned long flags;
+ int intr_status;
uint32_t ival = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1412,8 +1444,15 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
qla4xxx_process_response_queue(ha);
writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
} else {
- qla4xxx_process_response_queue(ha);
- writel(0, &ha->qla4_82xx_reg->host_int);
+ intr_status = readl(&ha->qla4_82xx_reg->host_status);
+ if (intr_status & HSRX_RISC_IOCB_INT) {
+ qla4xxx_process_response_queue(ha);
+ writel(0, &ha->qla4_82xx_reg->host_int);
+ } else {
+ ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n",
+ __func__);
+ goto exit_msix_rsp_q;
+ }
}
ha->isr_count++;
exit_msix_rsp_q:
@@ -1488,6 +1527,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
int qla4xxx_request_irqs(struct scsi_qla_host *ha)
{
int ret;
+ int rval = QLA_ERROR;
if (is_qla40XX(ha))
goto try_intx;
@@ -1568,9 +1608,10 @@ irq_attached:
set_bit(AF_IRQ_ATTACHED, &ha->flags);
ha->host->irq = ha->pdev->irq;
ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
- __func__, ha->pdev->irq);
+ __func__, ha->pdev->irq);
+ rval = QLA_SUCCESS;
irq_not_attached:
- return ret;
+ return rval;
}
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 9ae8ca3b69f9..0a6b782d6fdb 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -212,9 +212,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
ha->host_no, __func__));
goto mbox_exit;
}
- DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...,"
- " Scheduling Adapter Reset\n", ha->host_no,
- mbx_cmd[0]));
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n",
+ ha->host_no, mbx_cmd[0]);
ha->mailbox_timeout_count++;
mbx_sts[0] = (-1);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -251,15 +250,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
break;
case MBOX_STS_BUSY:
- DEBUG2( printk("scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
- ha->host_no, __func__, mbx_cmd[0]));
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
+ ha->host_no, __func__, mbx_cmd[0]);
ha->mailbox_timeout_count++;
break;
default:
- DEBUG2(printk("scsi%ld: %s: **** FAILED, cmd = %08X, "
- "sts = %08X ****\n", ha->host_no, __func__,
- mbx_cmd[0], mbx_sts[0]));
+ ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n",
+ ha->host_no, __func__, mbx_cmd[0], mbx_sts[0],
+ mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4],
+ mbx_sts[5], mbx_sts[6], mbx_sts[7]);
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -383,7 +383,6 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
- mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN;
if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
QLA_SUCCESS) {
@@ -648,9 +647,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
goto exit_init_fw_cb;
}
- /* Initialize request and response queues. */
- qla4xxx_init_rings(ha);
-
/* Fill in the request and response queue information. */
init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
@@ -1002,6 +998,10 @@ int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
"%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
"failed sts %04X %04X", __func__,
mbox_sts[0], mbox_sts[1]));
+ if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) &&
+ (mbox_sts[1] == DDB_NOT_LOGGED_IN)) {
+ set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
+ }
}
return status;
@@ -1918,6 +1918,7 @@ int qla4xxx_disable_acb(struct scsi_qla_host *ha)
mbox_sts[0], mbox_sts[1], mbox_sts[2]));
} else {
if (is_qla8042(ha) &&
+ test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) &&
(mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
/*
* Disable ACB mailbox command takes time to complete
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index d001202d3565..63328c812b70 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2383,6 +2383,11 @@ static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
"scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
ha->host_no, index, entry_hdr->entry_type,
entry_hdr->d_ctrl.entry_capture_mask));
+ /* If driver encounters a new entry type that it cannot process,
+ * it should just skip the entry and adjust the total buffer size by
+ * from subtracting the skipped bytes from it
+ */
+ ha->fw_dump_skip_size += entry_hdr->entry_capture_size;
}
/* ISP83xx functions to process new minidump entries... */
@@ -2590,6 +2595,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
uint64_t now;
uint32_t timestamp;
+ ha->fw_dump_skip_size = 0;
if (!ha->fw_dump) {
ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
__func__, ha->host_no);
@@ -2761,7 +2767,7 @@ skip_nxt_entry:
entry_hdr->entry_size);
}
- if (data_collected != ha->fw_dump_size) {
+ if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) {
ql4_printk(KERN_INFO, ha,
"Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
data_collected, ha->fw_dump_size);
@@ -2820,63 +2826,35 @@ void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)
int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
{
int rval = QLA_ERROR;
- int i, timeout;
- uint32_t old_count, count, idc_ctrl;
- int need_reset = 0, peg_stuck = 1;
+ int i;
+ uint32_t old_count, count;
+ int need_reset = 0;
need_reset = ha->isp_ops->need_reset(ha);
- old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
-
- for (i = 0; i < 10; i++) {
- timeout = msleep_interruptible(200);
- if (timeout) {
- qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
- QLA8XXX_DEV_FAILED);
- return rval;
- }
-
- count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
- if (count != old_count)
- peg_stuck = 0;
- }
if (need_reset) {
/* We are trying to perform a recovery here. */
- if (peg_stuck)
+ if (test_bit(AF_FW_RECOVERY, &ha->flags))
ha->isp_ops->rom_lock_recovery(ha);
- goto dev_initialize;
} else {
- /* Start of day for this ha context. */
- if (peg_stuck) {
- /* Either we are the first or recovery in progress. */
- ha->isp_ops->rom_lock_recovery(ha);
- goto dev_initialize;
- } else {
- /* Firmware already running. */
- rval = QLA_SUCCESS;
- goto dev_ready;
+ old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+ for (i = 0; i < 10; i++) {
+ msleep(200);
+ count = qla4_8xxx_rd_direct(ha,
+ QLA8XXX_PEG_ALIVE_COUNTER);
+ if (count != old_count) {
+ rval = QLA_SUCCESS;
+ goto dev_ready;
+ }
}
+ ha->isp_ops->rom_lock_recovery(ha);
}
-dev_initialize:
/* set to DEV_INITIALIZING */
ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
QLA8XXX_DEV_INITIALIZING);
- /*
- * For ISP8324 and ISP8042, if IDC_CTRL GRACEFUL_RESET_BIT1 is set,
- * reset it after device goes to INIT state.
- */
- if (is_qla8032(ha) || is_qla8042(ha)) {
- idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
- if (idc_ctrl & GRACEFUL_RESET_BIT1) {
- qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
- (idc_ctrl & ~GRACEFUL_RESET_BIT1));
- set_bit(AF_83XX_NO_FW_DUMP, &ha->flags);
- }
- }
-
ha->isp_ops->idc_unlock(ha);
if (is_qla8022(ha))
@@ -3209,6 +3187,10 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
retval = qla4_8xxx_device_state_handler(ha);
+ /* Initialize request and response queues. */
+ if (retval == QLA_SUCCESS)
+ qla4xxx_init_rings(ha);
+
if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
retval = qla4xxx_request_irqs(ha);
@@ -3836,3 +3818,24 @@ qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
msix_out:
return ret;
}
+
+int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha)
+{
+ int status = QLA_SUCCESS;
+
+ /* Dont retry adapter initialization if IRQ allocation failed */
+ if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+ ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n",
+ __func__);
+ status = QLA_ERROR;
+ goto exit_init_adapter_failure;
+ }
+
+ /* Since interrupts are registered in start_firmware for
+ * 8xxx, release them here if initialize_adapter fails
+ * and retry adapter initialization */
+ qla4xxx_free_irqs(ha);
+
+exit_init_adapter_failure:
+ return status;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c21adc338cf1..459b9f7186fd 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1670,16 +1670,13 @@ qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
if (!shost) {
ret = -ENXIO;
- printk(KERN_ERR "%s: shost is NULL\n",
- __func__);
+ pr_err("%s: shost is NULL\n", __func__);
return ERR_PTR(ret);
}
ha = iscsi_host_priv(shost);
-
ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
if (!ep) {
ret = -ENOMEM;
@@ -1699,6 +1696,9 @@ qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
(char *)&addr6->sin6_addr));
+ } else {
+ ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n",
+ __func__);
}
qla_ep->host = shost;
@@ -1712,9 +1712,9 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
struct scsi_qla_host *ha;
int ret = 0;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
qla_ep = ep->dd_data;
ha = to_qla_host(qla_ep->host);
+ DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no));
if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
ret = 1;
@@ -1724,7 +1724,13 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
{
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ struct qla_endpoint *qla_ep;
+ struct scsi_qla_host *ha;
+
+ qla_ep = ep->dd_data;
+ ha = to_qla_host(qla_ep->host);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
iscsi_destroy_endpoint(ep);
}
@@ -1734,8 +1740,11 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
{
struct qla_endpoint *qla_ep = ep->dd_data;
struct sockaddr *dst_addr;
+ struct scsi_qla_host *ha;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ ha = to_qla_host(qla_ep->host);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@@ -1766,13 +1775,13 @@ static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
int ret;
dma_addr_t iscsi_stats_dma;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
-
cls_sess = iscsi_conn_to_session(cls_conn);
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
/* Allocate memory */
ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
@@ -2100,7 +2109,8 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE);
else
init_fw_cb->ipv6_tcp_opts &=
- cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE);
+ cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE &
+ 0xFFFF);
break;
case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
if (iface_param->iface_num & 0x1)
@@ -2297,7 +2307,8 @@ static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE);
else
init_fw_cb->ipv4_tcp_opts &=
- cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE);
+ cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE &
+ 0xFFFF);
break;
case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
if (iface_param->iface_num & 0x1)
@@ -3045,7 +3056,6 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
struct sockaddr *dst_addr;
int ret;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
if (!ep) {
printk(KERN_ERR "qla4xxx: missing ep.\n");
return NULL;
@@ -3054,6 +3064,8 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
qla_ep = ep->dd_data;
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
ha = to_qla_host(qla_ep->host);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
ret = qla4xxx_get_ddb_index(ha, &ddb_index);
if (ret == QLA_ERROR)
@@ -3074,6 +3086,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
ddb_entry->sess = cls_sess;
ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
ddb_entry->ddb_change = qla4xxx_ddb_change;
+ clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
ha->tot_ddbs++;
@@ -3092,10 +3105,11 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
uint32_t ddb_state;
int ret;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+ ha->host_no));
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
@@ -3123,7 +3137,8 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
destroy_session:
qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
-
+ if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags))
+ clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
spin_lock_irqsave(&ha->hardware_lock, flags);
qla4xxx_free_ddb(ha, ddb_entry);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3141,17 +3156,23 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
struct iscsi_cls_conn *cls_conn;
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
conn_idx);
- if (!cls_conn)
+ if (!cls_conn) {
+ pr_info("%s: Can not create connection for conn_idx = %u\n",
+ __func__, conn_idx);
return NULL;
+ }
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->conn = cls_conn;
+ ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__,
+ conn_idx));
return cls_conn;
}
@@ -3162,8 +3183,16 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_conn *conn;
struct qla_conn *qla_conn;
struct iscsi_endpoint *ep;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct iscsi_session *sess;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
+ cls_session->sid, cls_conn->cid));
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
@@ -3186,10 +3215,11 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
int ret = 0;
int status = QLA_SUCCESS;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
+ cls_sess->sid, cls_conn->cid));
/* Check if we have matching FW DDB, if yes then do not
* login to this target. This could cause target to logout previous
@@ -3263,10 +3293,11 @@ static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
struct ddb_entry *ddb_entry;
int options;
- DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__,
+ cls_conn->cid));
options = LOGOUT_OPTION_CLOSE_SESSION;
if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
@@ -4372,6 +4403,11 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
uint32_t dev_state;
uint32_t idc_ctrl;
+ if (is_qla8032(ha) &&
+ (qla4_83xx_is_detached(ha) == QLA_SUCCESS))
+ WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n",
+ __func__, ha->func_num);
+
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
@@ -4554,11 +4590,19 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
uint32_t index = 0;
unsigned long flags;
struct scsi_cmnd *cmd;
+ unsigned long wtime;
+ uint32_t wtmo;
+
+ if (is_qla40XX(ha))
+ wtmo = WAIT_CMD_TOV;
+ else
+ wtmo = ha->nx_reset_timeout / 2;
- unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
+ wtime = jiffies + (wtmo * HZ);
- DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
- "complete\n", WAIT_CMD_TOV));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Wait up to %u seconds for cmds to complete\n",
+ wtmo));
while (!time_after_eq(jiffies, wtime)) {
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -4861,11 +4905,11 @@ chip_reset:
qla4xxx_cmd_wait(ha);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
- qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s - Performing chip reset..\n",
ha->host_no, __func__));
status = ha->isp_ops->reset_chip(ha);
+ qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
}
/* Flush any pending ddb changed AENs */
@@ -4881,8 +4925,21 @@ recover_ha_init_adapter:
ssleep(6);
/* NOTE: AF_ONLINE flag set upon successful completion of
- * qla4xxx_initialize_adapter */
+ * qla4xxx_initialize_adapter */
status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
+ if (is_qla80XX(ha) && (status == QLA_ERROR)) {
+ status = qla4_8xxx_check_init_adapter_retry(ha);
+ if (status == QLA_ERROR) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n",
+ ha->host_no, __func__);
+ qla4xxx_dead_adapter_cleanup(ha);
+ clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ goto exit_recover;
+ }
+ }
}
/* Retry failed adapter initialization, if necessary
@@ -5228,9 +5285,9 @@ static void qla4xxx_do_dpc(struct work_struct *work)
container_of(work, struct scsi_qla_host, dpc_work);
int status = QLA_ERROR;
- DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
- "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
- ha->host_no, __func__, ha->flags, ha->dpc_flags))
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n",
+ ha->host_no, __func__, ha->flags, ha->dpc_flags));
/* Initialization not yet finished. Don't do anything yet. */
if (!test_bit(AF_INIT_DONE, &ha->flags))
@@ -8681,11 +8738,8 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
/* Dont retry adapter initialization if IRQ allocation failed */
- if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
- ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
- __func__);
+ if (is_qla80XX(ha) && (status == QLA_ERROR))
goto skip_retry_init;
- }
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
@@ -8709,6 +8763,10 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
continue;
status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
+ if (is_qla80XX(ha) && (status == QLA_ERROR)) {
+ if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR)
+ goto skip_retry_init;
+ }
}
skip_retry_init:
@@ -8857,10 +8915,56 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
}
}
+static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ unsigned long wtime;
+ uint32_t ddb_state;
+ int options;
+ int status;
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
+ goto clear_ddb;
+ }
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto clear_ddb;
+ }
+
+ wtime = jiffies + (HZ * LOGOUT_TOV);
+ do {
+ status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (status == QLA_ERROR)
+ goto free_ddb;
+
+ if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ goto free_ddb;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+free_ddb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+clear_ddb:
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+}
+
static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
{
struct ddb_entry *ddb_entry;
- int options;
int idx;
for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
@@ -8869,13 +8973,7 @@ static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
if ((ddb_entry != NULL) &&
(ddb_entry->ddb_type == FLASH_DDB)) {
- options = LOGOUT_OPTION_CLOSE_SESSION;
- if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
- == QLA_ERROR)
- ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
- __func__);
-
- qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+ qla4xxx_destroy_ddb(ha, ddb_entry);
/*
* we have decremented the reference count of the driver
* when we setup the session to have the driver unload
@@ -9136,14 +9234,15 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
int ret = SUCCESS;
int wait = 0;
- ql4_printk(KERN_INFO, ha,
- "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
- ha->host_no, id, lun, cmd);
+ ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Abort command issued cmd=%p, cdb=0x%x\n",
+ ha->host_no, id, lun, cmd, cmd->cmnd[0]);
spin_lock_irqsave(&ha->hardware_lock, flags);
srb = (struct srb *) CMD_SP(cmd);
if (!srb) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Specified command has already completed.\n",
+ ha->host_no, id, lun);
return SUCCESS;
}
kref_get(&srb->srb_ref);
@@ -9560,28 +9659,36 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
}
fn = PCI_FUNC(ha->pdev->devfn);
- while (fn > 0) {
- fn--;
- ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
- "func %x\n", ha->host_no, __func__, fn);
- /* Get the pci device given the domain, bus,
- * slot/function number */
- other_pdev =
- pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
- ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
- fn));
-
- if (!other_pdev)
- continue;
+ if (is_qla8022(ha)) {
+ while (fn > 0) {
+ fn--;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n",
+ ha->host_no, __func__, fn);
+ /* Get the pci device given the domain, bus,
+ * slot/function number */
+ other_pdev = pci_get_domain_bus_and_slot(
+ pci_domain_nr(ha->pdev->bus),
+ ha->pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+ fn));
+
+ if (!other_pdev)
+ continue;
- if (atomic_read(&other_pdev->enable_cnt)) {
- ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
- "func in enabled state%x\n", ha->host_no,
- __func__, fn);
+ if (atomic_read(&other_pdev->enable_cnt)) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n",
+ ha->host_no, __func__, fn);
+ pci_dev_put(other_pdev);
+ break;
+ }
pci_dev_put(other_pdev);
- break;
}
- pci_dev_put(other_pdev);
+ } else {
+ /* this case is meant for ISP83xx/ISP84xx only */
+ if (qla4_83xx_can_perform_reset(ha)) {
+ /* reset fn as iSCSI is going to perform the reset */
+ fn = 0;
+ }
}
/* The first function on the card, the reset owner will
@@ -9615,6 +9722,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
if (rval != QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
"FAILED\n", ha->host_no, __func__);
+ qla4xxx_free_irqs(ha);
ha->isp_ops->idc_lock(ha);
qla4_8xxx_clear_drv_active(ha);
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
@@ -9642,6 +9750,8 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
if (rval == QLA_SUCCESS)
ha->isp_ops->enable_intrs(ha);
+ else
+ qla4xxx_free_irqs(ha);
ha->isp_ops->idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 9b2946658683..c6ba0a6b8458 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.04.00-k3"
+#define QLA4XXX_DRIVER_VERSION "5.04.00-k4"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d8afec8317cf..c4d632c27a3e 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -161,47 +161,20 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
static DEFINE_MUTEX(host_cmd_pool_mutex);
/**
- * scsi_pool_alloc_command - internal function to get a fully allocated command
- * @pool: slab pool to allocate the command from
- * @gfp_mask: mask for the allocation
- *
- * Returns a fully allocated command (with the allied sense buffer) or
- * NULL on failure
- */
-static struct scsi_cmnd *
-scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
-{
- struct scsi_cmnd *cmd;
-
- cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
- if (!cmd)
- return NULL;
-
- cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
- gfp_mask | pool->gfp_mask);
- if (!cmd->sense_buffer) {
- kmem_cache_free(pool->cmd_slab, cmd);
- return NULL;
- }
-
- return cmd;
-}
-
-/**
- * scsi_pool_free_command - internal function to release a command
- * @pool: slab pool to allocate the command from
+ * scsi_host_free_command - internal function to release a command
+ * @shost: host to free the command for
* @cmd: command to release
*
* the command must previously have been allocated by
- * scsi_pool_alloc_command.
+ * scsi_host_alloc_command.
*/
static void
-scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
- struct scsi_cmnd *cmd)
+scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
+ struct scsi_host_cmd_pool *pool = shost->cmd_pool;
+
if (cmd->prot_sdb)
kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
-
kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
kmem_cache_free(pool->cmd_slab, cmd);
}
@@ -217,22 +190,32 @@ scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
static struct scsi_cmnd *
scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
{
+ struct scsi_host_cmd_pool *pool = shost->cmd_pool;
struct scsi_cmnd *cmd;
- cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
+ cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
if (!cmd)
- return NULL;
+ goto fail;
+
+ cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
+ gfp_mask | pool->gfp_mask);
+ if (!cmd->sense_buffer)
+ goto fail_free_cmd;
if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
-
- if (!cmd->prot_sdb) {
- scsi_pool_free_command(shost->cmd_pool, cmd);
- return NULL;
- }
+ if (!cmd->prot_sdb)
+ goto fail_free_sense;
}
return cmd;
+
+fail_free_sense:
+ kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
+fail_free_cmd:
+ kmem_cache_free(pool->cmd_slab, cmd);
+fail:
+ return NULL;
}
/**
@@ -284,27 +267,19 @@ EXPORT_SYMBOL_GPL(__scsi_get_command);
*/
struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
{
- struct scsi_cmnd *cmd;
+ struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
+ unsigned long flags;
- /* Bail if we can't get a reference to the device */
- if (!get_device(&dev->sdev_gendev))
+ if (unlikely(cmd == NULL))
return NULL;
- cmd = __scsi_get_command(dev->host, gfp_mask);
-
- if (likely(cmd != NULL)) {
- unsigned long flags;
-
- cmd->device = dev;
- INIT_LIST_HEAD(&cmd->list);
- INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
- spin_lock_irqsave(&dev->list_lock, flags);
- list_add_tail(&cmd->list, &dev->cmd_list);
- spin_unlock_irqrestore(&dev->list_lock, flags);
- cmd->jiffies_at_alloc = jiffies;
- } else
- put_device(&dev->sdev_gendev);
-
+ cmd->device = dev;
+ INIT_LIST_HEAD(&cmd->list);
+ INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
+ spin_lock_irqsave(&dev->list_lock, flags);
+ list_add_tail(&cmd->list, &dev->cmd_list);
+ spin_unlock_irqrestore(&dev->list_lock, flags);
+ cmd->jiffies_at_alloc = jiffies;
return cmd;
}
EXPORT_SYMBOL(scsi_get_command);
@@ -313,25 +288,22 @@ EXPORT_SYMBOL(scsi_get_command);
* __scsi_put_command - Free a struct scsi_cmnd
* @shost: dev->host
* @cmd: Command to free
- * @dev: parent scsi device
*/
-void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
- struct device *dev)
+void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
unsigned long flags;
- /* changing locks here, don't need to restore the irq state */
- spin_lock_irqsave(&shost->free_list_lock, flags);
if (unlikely(list_empty(&shost->free_list))) {
- list_add(&cmd->list, &shost->free_list);
- cmd = NULL;
+ spin_lock_irqsave(&shost->free_list_lock, flags);
+ if (list_empty(&shost->free_list)) {
+ list_add(&cmd->list, &shost->free_list);
+ cmd = NULL;
+ }
+ spin_unlock_irqrestore(&shost->free_list_lock, flags);
}
- spin_unlock_irqrestore(&shost->free_list_lock, flags);
if (likely(cmd != NULL))
- scsi_pool_free_command(shost->cmd_pool, cmd);
-
- put_device(dev);
+ scsi_host_free_command(shost, cmd);
}
EXPORT_SYMBOL(__scsi_put_command);
@@ -345,7 +317,6 @@ EXPORT_SYMBOL(__scsi_put_command);
*/
void scsi_put_command(struct scsi_cmnd *cmd)
{
- struct scsi_device *sdev = cmd->device;
unsigned long flags;
/* serious error if the command hasn't come from a device list */
@@ -356,50 +327,107 @@ void scsi_put_command(struct scsi_cmnd *cmd)
cancel_delayed_work(&cmd->abort_work);
- __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
+ __scsi_put_command(cmd->device->host, cmd);
}
EXPORT_SYMBOL(scsi_put_command);
-static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask)
+static struct scsi_host_cmd_pool *
+scsi_find_host_cmd_pool(struct Scsi_Host *shost)
+{
+ if (shost->hostt->cmd_size)
+ return shost->hostt->cmd_pool;
+ if (shost->unchecked_isa_dma)
+ return &scsi_cmd_dma_pool;
+ return &scsi_cmd_pool;
+}
+
+static void
+scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
{
+ kfree(pool->sense_name);
+ kfree(pool->cmd_name);
+ kfree(pool);
+}
+
+static struct scsi_host_cmd_pool *
+scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
+{
+ struct scsi_host_template *hostt = shost->hostt;
+ struct scsi_host_cmd_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name);
+ pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name);
+ if (!pool->cmd_name || !pool->sense_name) {
+ scsi_free_host_cmd_pool(pool);
+ return NULL;
+ }
+
+ pool->slab_flags = SLAB_HWCACHE_ALIGN;
+ if (shost->unchecked_isa_dma) {
+ pool->slab_flags |= SLAB_CACHE_DMA;
+ pool->gfp_mask = __GFP_DMA;
+ }
+ return pool;
+}
+
+static struct scsi_host_cmd_pool *
+scsi_get_host_cmd_pool(struct Scsi_Host *shost)
+{
+ struct scsi_host_template *hostt = shost->hostt;
struct scsi_host_cmd_pool *retval = NULL, *pool;
+ size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
+
/*
* Select a command slab for this host and create it if not
* yet existent.
*/
mutex_lock(&host_cmd_pool_mutex);
- pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
- &scsi_cmd_pool;
+ pool = scsi_find_host_cmd_pool(shost);
+ if (!pool) {
+ pool = scsi_alloc_host_cmd_pool(shost);
+ if (!pool)
+ goto out;
+ }
+
if (!pool->users) {
- pool->cmd_slab = kmem_cache_create(pool->cmd_name,
- sizeof(struct scsi_cmnd), 0,
+ pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
pool->slab_flags, NULL);
if (!pool->cmd_slab)
- goto fail;
+ goto out_free_pool;
pool->sense_slab = kmem_cache_create(pool->sense_name,
SCSI_SENSE_BUFFERSIZE, 0,
pool->slab_flags, NULL);
- if (!pool->sense_slab) {
- kmem_cache_destroy(pool->cmd_slab);
- goto fail;
- }
+ if (!pool->sense_slab)
+ goto out_free_slab;
}
pool->users++;
retval = pool;
- fail:
+out:
mutex_unlock(&host_cmd_pool_mutex);
return retval;
+
+out_free_slab:
+ kmem_cache_destroy(pool->cmd_slab);
+out_free_pool:
+ if (hostt->cmd_size)
+ scsi_free_host_cmd_pool(pool);
+ goto out;
}
-static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
+static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
{
+ struct scsi_host_template *hostt = shost->hostt;
struct scsi_host_cmd_pool *pool;
mutex_lock(&host_cmd_pool_mutex);
- pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
- &scsi_cmd_pool;
+ pool = scsi_find_host_cmd_pool(shost);
+
/*
* This may happen if a driver has a mismatched get and put
* of the command pool; the driver should be implicated in
@@ -410,67 +438,13 @@ static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
if (!--pool->users) {
kmem_cache_destroy(pool->cmd_slab);
kmem_cache_destroy(pool->sense_slab);
+ if (hostt->cmd_size)
+ scsi_free_host_cmd_pool(pool);
}
mutex_unlock(&host_cmd_pool_mutex);
}
/**
- * scsi_allocate_command - get a fully allocated SCSI command
- * @gfp_mask: allocation mask
- *
- * This function is for use outside of the normal host based pools.
- * It allocates the relevant command and takes an additional reference
- * on the pool it used. This function *must* be paired with
- * scsi_free_command which also has the identical mask, otherwise the
- * free pool counts will eventually go wrong and you'll trigger a bug.
- *
- * This function should *only* be used by drivers that need a static
- * command allocation at start of day for internal functions.
- */
-struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask)
-{
- struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
-
- if (!pool)
- return NULL;
-
- return scsi_pool_alloc_command(pool, gfp_mask);
-}
-EXPORT_SYMBOL(scsi_allocate_command);
-
-/**
- * scsi_free_command - free a command allocated by scsi_allocate_command
- * @gfp_mask: mask used in the original allocation
- * @cmd: command to free
- *
- * Note: using the original allocation mask is vital because that's
- * what determines which command pool we use to free the command. Any
- * mismatch will cause the system to BUG eventually.
- */
-void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd)
-{
- struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
-
- /*
- * this could trigger if the mask to scsi_allocate_command
- * doesn't match this mask. Otherwise we're guaranteed that this
- * succeeds because scsi_allocate_command must have taken a reference
- * on the pool
- */
- BUG_ON(!pool);
-
- scsi_pool_free_command(pool, cmd);
- /*
- * scsi_put_host_cmd_pool is called twice; once to release the
- * reference we took above, and once to release the reference
- * originally taken by scsi_allocate_command
- */
- scsi_put_host_cmd_pool(gfp_mask);
- scsi_put_host_cmd_pool(gfp_mask);
-}
-EXPORT_SYMBOL(scsi_free_command);
-
-/**
* scsi_setup_command_freelist - Setup the command freelist for a scsi host.
* @shost: host to allocate the freelist for.
*
@@ -482,14 +456,13 @@ EXPORT_SYMBOL(scsi_free_command);
*/
int scsi_setup_command_freelist(struct Scsi_Host *shost)
{
- struct scsi_cmnd *cmd;
const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
+ struct scsi_cmnd *cmd;
spin_lock_init(&shost->free_list_lock);
INIT_LIST_HEAD(&shost->free_list);
- shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask);
-
+ shost->cmd_pool = scsi_get_host_cmd_pool(shost);
if (!shost->cmd_pool)
return -ENOMEM;
@@ -498,7 +471,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
*/
cmd = scsi_host_alloc_command(shost, gfp_mask);
if (!cmd) {
- scsi_put_host_cmd_pool(gfp_mask);
+ scsi_put_host_cmd_pool(shost);
shost->cmd_pool = NULL;
return -ENOMEM;
}
@@ -524,10 +497,10 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)
cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
list_del_init(&cmd->list);
- scsi_pool_free_command(shost->cmd_pool, cmd);
+ scsi_host_free_command(shost, cmd);
}
shost->cmd_pool = NULL;
- scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL);
+ scsi_put_host_cmd_pool(shost);
}
#ifdef CONFIG_SCSI_LOGGING
@@ -954,7 +927,7 @@ EXPORT_SYMBOL(scsi_track_queue_full);
* This is an internal helper function. You probably want to use
* scsi_get_vpd_page instead.
*
- * Returns 0 on success or a negative error number.
+ * Returns size of the vpd page on success or a negative error number.
*/
static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
u8 page, unsigned len)
@@ -962,6 +935,9 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
int result;
unsigned char cmd[16];
+ if (len < 4)
+ return -EINVAL;
+
cmd[0] = INQUIRY;
cmd[1] = 1; /* EVPD */
cmd[2] = page;
@@ -976,13 +952,13 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
len, NULL, 30 * HZ, 3, NULL);
if (result)
- return result;
+ return -EIO;
/* Sanity check that we got the page back that we asked for */
if (buffer[1] != page)
return -EIO;
- return 0;
+ return get_unaligned_be16(&buffer[2]) + 4;
}
/**
@@ -1009,18 +985,18 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
/* Ask for all the pages supported by this device */
result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
- if (result)
+ if (result < 4)
goto fail;
/* If the user actually wanted this page, we can skip the rest */
if (page == 0)
return 0;
- for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
- if (buf[i + 4] == page)
+ for (i = 4; i < min(result, buf_len); i++)
+ if (buf[i] == page)
goto found;
- if (i < buf[3] && i >= buf_len - 4)
+ if (i < result && i >= buf_len)
/* ran off the end of the buffer, give us benefit of doubt */
goto found;
/* The device claims it doesn't support the requested page */
@@ -1028,7 +1004,7 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
found:
result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
- if (result)
+ if (result < 0)
goto fail;
return 0;
@@ -1039,6 +1015,93 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
/**
+ * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
+ * @sdev: The device to ask
+ *
+ * Attach the 'Device Identification' VPD page (0x83) and the
+ * 'Unit Serial Number' VPD page (0x80) to a SCSI device
+ * structure. This information can be used to identify the device
+ * uniquely.
+ */
+void scsi_attach_vpd(struct scsi_device *sdev)
+{
+ int result, i;
+ int vpd_len = SCSI_VPD_PG_LEN;
+ int pg80_supported = 0;
+ int pg83_supported = 0;
+ unsigned char *vpd_buf;
+
+ if (sdev->skip_vpd_pages)
+ return;
+retry_pg0:
+ vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+ if (!vpd_buf)
+ return;
+
+ /* Ask for all the pages supported by this device */
+ result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
+ if (result < 0) {
+ kfree(vpd_buf);
+ return;
+ }
+ if (result > vpd_len) {
+ vpd_len = result;
+ kfree(vpd_buf);
+ goto retry_pg0;
+ }
+
+ for (i = 4; i < result; i++) {
+ if (vpd_buf[i] == 0x80)
+ pg80_supported = 1;
+ if (vpd_buf[i] == 0x83)
+ pg83_supported = 1;
+ }
+ kfree(vpd_buf);
+ vpd_len = SCSI_VPD_PG_LEN;
+
+ if (pg80_supported) {
+retry_pg80:
+ vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+ if (!vpd_buf)
+ return;
+
+ result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
+ if (result < 0) {
+ kfree(vpd_buf);
+ return;
+ }
+ if (result > vpd_len) {
+ vpd_len = result;
+ kfree(vpd_buf);
+ goto retry_pg80;
+ }
+ sdev->vpd_pg80_len = result;
+ sdev->vpd_pg80 = vpd_buf;
+ vpd_len = SCSI_VPD_PG_LEN;
+ }
+
+ if (pg83_supported) {
+retry_pg83:
+ vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+ if (!vpd_buf)
+ return;
+
+ result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
+ if (result < 0) {
+ kfree(vpd_buf);
+ return;
+ }
+ if (result > vpd_len) {
+ vpd_len = result;
+ kfree(vpd_buf);
+ goto retry_pg83;
+ }
+ sdev->vpd_pg83_len = result;
+ sdev->vpd_pg83 = vpd_buf;
+ }
+}
+
+/**
* scsi_report_opcode - Find out if a given command opcode is supported
* @sdev: scsi device to query
* @buffer: scratch buffer (must be at least 20 bytes long)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2decc6417518..f3e9cc038d1d 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -64,6 +64,7 @@ static const char * scsi_debug_version_date = "20100324";
/* Additional Sense Code (ASC) */
#define NO_ADDITIONAL_SENSE 0x0
#define LOGICAL_UNIT_NOT_READY 0x4
+#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
#define UNRECOVERED_READ_ERR 0x11
#define PARAMETER_LIST_LENGTH_ERR 0x1a
#define INVALID_OPCODE 0x20
@@ -195,6 +196,7 @@ static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
static bool scsi_debug_removable = DEF_REMOVABLE;
+static bool scsi_debug_clustering;
static int scsi_debug_cmnd_count = 0;
@@ -1780,7 +1782,6 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
be32_to_cpu(sdt->ref_tag) != ei_lba) {
pr_err("%s: REF check failed on sector %lu\n",
__func__, (unsigned long)sector);
- dif_errors++;
return 0x03;
}
return 0;
@@ -1789,23 +1790,27 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
unsigned int sectors, bool read)
{
- unsigned int i, resid;
- struct scatterlist *psgl;
+ size_t resid;
void *paddr;
const void *dif_store_end = dif_storep + sdebug_store_sectors;
+ struct sg_mapping_iter miter;
/* Bytes of protection data to copy into sgl */
resid = sectors * sizeof(*dif_storep);
- scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
- int len = min(psgl->length, resid);
+ sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
+ scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
+ (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
+
+ while (sg_miter_next(&miter) && resid > 0) {
+ size_t len = min(miter.length, resid);
void *start = dif_store(sector);
- int rest = 0;
+ size_t rest = 0;
if (dif_store_end < start + len)
rest = start + len - dif_store_end;
- paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
+ paddr = miter.addr;
if (read)
memcpy(paddr, start, len - rest);
@@ -1821,8 +1826,8 @@ static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
sector += len / sizeof(*dif_storep);
resid -= len;
- kunmap_atomic(paddr);
}
+ sg_miter_stop(&miter);
}
static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
@@ -1832,7 +1837,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
struct sd_dif_tuple *sdt;
sector_t sector;
- for (i = 0; i < sectors; i++) {
+ for (i = 0; i < sectors; i++, ei_lba++) {
int ret;
sector = start_sec + i;
@@ -1846,8 +1851,6 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
dif_errors++;
return ret;
}
-
- ei_lba++;
}
dif_copy_prot(SCpnt, start_sec, sectors, true);
@@ -1886,17 +1889,19 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
return check_condition_result;
}
+ read_lock_irqsave(&atomic_rw, iflags);
+
/* DIX + T10 DIF */
if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
if (prot_ret) {
+ read_unlock_irqrestore(&atomic_rw, iflags);
mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
return illegal_condition_result;
}
}
- read_lock_irqsave(&atomic_rw, iflags);
ret = do_device_access(SCpnt, devip, lba, num, 0);
read_unlock_irqrestore(&atomic_rw, iflags);
if (ret == -1)
@@ -1931,55 +1936,62 @@ void dump_sector(unsigned char *buf, int len)
static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
- int i, j, ret;
+ int ret;
struct sd_dif_tuple *sdt;
- struct scatterlist *dsgl;
- struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
- void *daddr, *paddr;
+ void *daddr;
sector_t sector = start_sec;
int ppage_offset;
+ int dpage_offset;
+ struct sg_mapping_iter diter;
+ struct sg_mapping_iter piter;
BUG_ON(scsi_sg_count(SCpnt) == 0);
BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
- ppage_offset = 0;
-
- /* For each data page */
- scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
- daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
- paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
-
- /* For each sector-sized chunk in data page */
- for (j = 0; j < dsgl->length; j += scsi_debug_sector_size) {
+ sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
+ scsi_prot_sg_count(SCpnt),
+ SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+ sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
+ SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+
+ /* For each protection page */
+ while (sg_miter_next(&piter)) {
+ dpage_offset = 0;
+ if (WARN_ON(!sg_miter_next(&diter))) {
+ ret = 0x01;
+ goto out;
+ }
+ for (ppage_offset = 0; ppage_offset < piter.length;
+ ppage_offset += sizeof(struct sd_dif_tuple)) {
/* If we're at the end of the current
- * protection page advance to the next one
+ * data page advance to the next one
*/
- if (ppage_offset >= psgl->length) {
- kunmap_atomic(paddr);
- psgl = sg_next(psgl);
- BUG_ON(psgl == NULL);
- paddr = kmap_atomic(sg_page(psgl))
- + psgl->offset;
- ppage_offset = 0;
+ if (dpage_offset >= diter.length) {
+ if (WARN_ON(!sg_miter_next(&diter))) {
+ ret = 0x01;
+ goto out;
+ }
+ dpage_offset = 0;
}
- sdt = paddr + ppage_offset;
+ sdt = piter.addr + ppage_offset;
+ daddr = diter.addr + dpage_offset;
- ret = dif_verify(sdt, daddr + j, sector, ei_lba);
+ ret = dif_verify(sdt, daddr, sector, ei_lba);
if (ret) {
- dump_sector(daddr + j, scsi_debug_sector_size);
+ dump_sector(daddr, scsi_debug_sector_size);
goto out;
}
sector++;
ei_lba++;
- ppage_offset += sizeof(struct sd_dif_tuple);
+ dpage_offset += scsi_debug_sector_size;
}
-
- kunmap_atomic(paddr);
- kunmap_atomic(daddr);
+ diter.consumed = dpage_offset;
+ sg_miter_stop(&diter);
}
+ sg_miter_stop(&piter);
dif_copy_prot(SCpnt, start_sec, sectors, false);
dix_writes++;
@@ -1988,8 +2000,8 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
out:
dif_errors++;
- kunmap_atomic(paddr);
- kunmap_atomic(daddr);
+ sg_miter_stop(&diter);
+ sg_miter_stop(&piter);
return ret;
}
@@ -2089,17 +2101,19 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
if (ret)
return ret;
+ write_lock_irqsave(&atomic_rw, iflags);
+
/* DIX + T10 DIF */
if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
if (prot_ret) {
+ write_unlock_irqrestore(&atomic_rw, iflags);
mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
return illegal_condition_result;
}
}
- write_lock_irqsave(&atomic_rw, iflags);
ret = do_device_access(SCpnt, devip, lba, num, 1);
if (scsi_debug_lbp())
map_region(lba, num);
@@ -2178,6 +2192,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
struct unmap_block_desc *desc;
unsigned int i, payload_len, descriptors;
int ret;
+ unsigned long iflags;
ret = check_readiness(scmd, 1, devip);
if (ret)
@@ -2199,6 +2214,8 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
desc = (void *)&buf[8];
+ write_lock_irqsave(&atomic_rw, iflags);
+
for (i = 0 ; i < descriptors ; i++) {
unsigned long long lba = get_unaligned_be64(&desc[i].lba);
unsigned int num = get_unaligned_be32(&desc[i].blocks);
@@ -2213,6 +2230,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
ret = 0;
out:
+ write_unlock_irqrestore(&atomic_rw, iflags);
kfree(buf);
return ret;
@@ -2313,36 +2331,37 @@ static int resp_report_luns(struct scsi_cmnd * scp,
static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
unsigned int num, struct sdebug_dev_info *devip)
{
- int i, j, ret = -1;
+ int j;
unsigned char *kaddr, *buf;
unsigned int offset;
- struct scatterlist *sg;
struct scsi_data_buffer *sdb = scsi_in(scp);
+ struct sg_mapping_iter miter;
/* better not to use temporary buffer. */
buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
- if (!buf)
- return ret;
+ if (!buf) {
+ mk_sense_buffer(devip, NOT_READY,
+ LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+ return check_condition_result;
+ }
scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
offset = 0;
- for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
- kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
- if (!kaddr)
- goto out;
+ sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
+ SG_MITER_ATOMIC | SG_MITER_TO_SG);
- for (j = 0; j < sg->length; j++)
- *(kaddr + sg->offset + j) ^= *(buf + offset + j);
+ while (sg_miter_next(&miter)) {
+ kaddr = miter.addr;
+ for (j = 0; j < miter.length; j++)
+ *(kaddr + j) ^= *(buf + offset + j);
- offset += sg->length;
- kunmap_atomic(kaddr);
+ offset += miter.length;
}
- ret = 0;
-out:
+ sg_miter_stop(&miter);
kfree(buf);
- return ret;
+ return 0;
}
/* When timer goes off this function is called. */
@@ -2744,6 +2763,7 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
*/
module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
+module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
@@ -2787,6 +2807,7 @@ MODULE_VERSION(SCSI_DEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
@@ -3248,7 +3269,7 @@ static struct attribute *sdebug_drv_attrs[] = {
};
ATTRIBUTE_GROUPS(sdebug_drv);
-struct device *pseudo_primary;
+static struct device *pseudo_primary;
static int __init scsi_debug_init(void)
{
@@ -3934,6 +3955,8 @@ static int sdebug_driver_probe(struct device * dev)
sdbg_host = to_sdebug_host(dev);
sdebug_driver_template.can_queue = scsi_debug_max_queue;
+ if (scsi_debug_clustering)
+ sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
if (NULL == hpnt) {
printk(KERN_ERR "%s: scsi_register failed\n", __func__);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 78b004da2885..771c16bfdbac 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2288,6 +2288,11 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
if (scsi_autopm_get_host(shost) < 0)
return FAILED;
+ if (!get_device(&dev->sdev_gendev)) {
+ rtn = FAILED;
+ goto out_put_autopm_host;
+ }
+
scmd = scsi_get_command(dev, GFP_KERNEL);
blk_rq_init(NULL, &req);
scmd->request = &req;
@@ -2345,6 +2350,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
scsi_run_host_queues(shost);
scsi_next_command(scmd);
+out_put_autopm_host:
scsi_autopm_put_host(shost);
return rtn;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7bd7f0d5f050..5681c05ac506 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -75,28 +75,6 @@ struct kmem_cache *scsi_sdb_cache;
*/
#define SCSI_QUEUE_DELAY 3
-/*
- * Function: scsi_unprep_request()
- *
- * Purpose: Remove all preparation done for a request, including its
- * associated scsi_cmnd, so that it can be requeued.
- *
- * Arguments: req - request to unprepare
- *
- * Lock status: Assumed that no locks are held upon entry.
- *
- * Returns: Nothing.
- */
-static void scsi_unprep_request(struct request *req)
-{
- struct scsi_cmnd *cmd = req->special;
-
- blk_unprep_request(req);
- req->special = NULL;
-
- scsi_put_command(cmd);
-}
-
/**
* __scsi_queue_insert - private queue insertion
* @cmd: The SCSI command being requeued
@@ -385,29 +363,12 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost)
return 0;
}
-/*
- * Function: scsi_run_queue()
- *
- * Purpose: Select a proper request queue to serve next
- *
- * Arguments: q - last request's queue
- *
- * Returns: Nothing
- *
- * Notes: The previous command was completely finished, start
- * a new one if possible.
- */
-static void scsi_run_queue(struct request_queue *q)
+static void scsi_starved_list_run(struct Scsi_Host *shost)
{
- struct scsi_device *sdev = q->queuedata;
- struct Scsi_Host *shost;
LIST_HEAD(starved_list);
+ struct scsi_device *sdev;
unsigned long flags;
- shost = sdev->host;
- if (scsi_target(sdev)->single_lun)
- scsi_single_lun_run(sdev);
-
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->starved_list, &starved_list);
@@ -459,6 +420,28 @@ static void scsi_run_queue(struct request_queue *q)
/* put any unprocessed entries back */
list_splice(&starved_list, &shost->starved_list);
spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/*
+ * Function: scsi_run_queue()
+ *
+ * Purpose: Select a proper request queue to serve next
+ *
+ * Arguments: q - last request's queue
+ *
+ * Returns: Nothing
+ *
+ * Notes: The previous command was completely finished, start
+ * a new one if possible.
+ */
+static void scsi_run_queue(struct request_queue *q)
+{
+ struct scsi_device *sdev = q->queuedata;
+
+ if (scsi_target(sdev)->single_lun)
+ scsi_single_lun_run(sdev);
+ if (!list_empty(&sdev->host->starved_list))
+ scsi_starved_list_run(sdev->host);
blk_run_queue(q);
}
@@ -497,16 +480,10 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
struct request *req = cmd->request;
unsigned long flags;
- /*
- * We need to hold a reference on the device to avoid the queue being
- * killed after the unlock and before scsi_run_queue is invoked which
- * may happen because scsi_unprep_request() puts the command which
- * releases its reference on the device.
- */
- get_device(&sdev->sdev_gendev);
-
spin_lock_irqsave(q->queue_lock, flags);
- scsi_unprep_request(req);
+ blk_unprep_request(req);
+ req->special = NULL;
+ scsi_put_command(cmd);
blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
@@ -520,13 +497,9 @@ void scsi_next_command(struct scsi_cmnd *cmd)
struct scsi_device *sdev = cmd->device;
struct request_queue *q = sdev->request_queue;
- /* need to hold a reference on the device before we let go of the cmd */
- get_device(&sdev->sdev_gendev);
-
scsi_put_command(cmd);
scsi_run_queue(q);
- /* ok to remove device now */
put_device(&sdev->sdev_gendev);
}
@@ -788,6 +761,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
char *description = NULL;
+ unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
if (result) {
sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -989,6 +963,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
action = ACTION_FAIL;
}
+ if (action != ACTION_FAIL &&
+ time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+ action = ACTION_FAIL;
+ description = "Command timed out";
+ }
+
switch (action) {
case ACTION_FAIL:
/* Give up and fail the remainder of the request */
@@ -1111,6 +1091,7 @@ err_exit:
scsi_release_buffers(cmd);
cmd->request->special = NULL;
scsi_put_command(cmd);
+ put_device(&cmd->device->sdev_gendev);
return error;
}
EXPORT_SYMBOL(scsi_init_io);
@@ -1121,9 +1102,15 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
struct scsi_cmnd *cmd;
if (!req->special) {
+ /* Bail if we can't get a reference to the device */
+ if (!get_device(&sdev->sdev_gendev))
+ return NULL;
+
cmd = scsi_get_command(sdev, GFP_ATOMIC);
- if (unlikely(!cmd))
+ if (unlikely(!cmd)) {
+ put_device(&sdev->sdev_gendev);
return NULL;
+ }
req->special = cmd;
} else {
cmd = req->special;
@@ -1286,6 +1273,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
struct scsi_cmnd *cmd = req->special;
scsi_release_buffers(cmd);
scsi_put_command(cmd);
+ put_device(&cmd->device->sdev_gendev);
req->special = NULL;
}
break;
@@ -1543,16 +1531,14 @@ static void scsi_softirq_done(struct request *rq)
* Lock status: IO request lock assumed to be held when called.
*/
static void scsi_request_fn(struct request_queue *q)
+ __releases(q->queue_lock)
+ __acquires(q->queue_lock)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
struct scsi_cmnd *cmd;
struct request *req;
- if(!get_device(&sdev->sdev_gendev))
- /* We must be tearing the block queue down already */
- return;
-
/*
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
@@ -1641,7 +1627,7 @@ static void scsi_request_fn(struct request_queue *q)
goto out_delay;
}
- goto out;
+ return;
not_ready:
spin_unlock_irq(shost->host_lock);
@@ -1660,12 +1646,6 @@ static void scsi_request_fn(struct request_queue *q)
out_delay:
if (sdev->device_busy == 0)
blk_delay_queue(q, SCSI_QUEUE_DELAY);
-out:
- /* must be careful here...if we trigger the ->remove() function
- * we cannot be holding the q lock */
- spin_unlock_irq(q->queue_lock);
- put_device(&sdev->sdev_gendev);
- spin_lock_irq(q->queue_lock);
}
u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
@@ -1684,7 +1664,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
host_dev = scsi_get_device(shost);
if (host_dev && host_dev->dma_mask)
- bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
+ bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
return bounce_limit;
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 307a81137607..27f96d5b7680 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -320,6 +320,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
struct Scsi_Host *shost = dev_to_shost(dev->parent);
unsigned long flags;
+ starget->state = STARGET_DEL;
transport_destroy_device(dev);
spin_lock_irqsave(shost->host_lock, flags);
if (shost->hostt->target_destroy)
@@ -371,6 +372,37 @@ static struct scsi_target *__scsi_find_target(struct device *parent,
}
/**
+ * scsi_target_reap_ref_release - remove target from visibility
+ * @kref: the reap_ref in the target being released
+ *
+ * Called on last put of reap_ref, which is the indication that no device
+ * under this target is visible anymore, so render the target invisible in
+ * sysfs. Note: we have to be in user context here because the target reaps
+ * should be done in places where the scsi device visibility is being removed.
+ */
+static void scsi_target_reap_ref_release(struct kref *kref)
+{
+ struct scsi_target *starget
+ = container_of(kref, struct scsi_target, reap_ref);
+
+ /*
+ * if we get here and the target is still in the CREATED state that
+ * means it was allocated but never made visible (because a scan
+ * turned up no LUNs), so don't call device_del() on it.
+ */
+ if (starget->state != STARGET_CREATED) {
+ transport_remove_device(&starget->dev);
+ device_del(&starget->dev);
+ }
+ scsi_target_destroy(starget);
+}
+
+static void scsi_target_reap_ref_put(struct scsi_target *starget)
+{
+ kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
+}
+
+/**
* scsi_alloc_target - allocate a new or find an existing target
* @parent: parent of the target (need not be a scsi host)
* @channel: target channel number (zero if no channels)
@@ -392,7 +424,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
+ shost->transportt->target_size;
struct scsi_target *starget;
struct scsi_target *found_target;
- int error;
+ int error, ref_got;
starget = kzalloc(size, GFP_KERNEL);
if (!starget) {
@@ -401,7 +433,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
}
dev = &starget->dev;
device_initialize(dev);
- starget->reap_ref = 1;
+ kref_init(&starget->reap_ref);
dev->parent = get_device(parent);
dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
dev->bus = &scsi_bus_type;
@@ -441,29 +473,36 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
return starget;
found:
- found_target->reap_ref++;
+ /*
+ * release routine already fired if kref is zero, so if we can still
+ * take the reference, the target must be alive. If we can't, it must
+ * be dying and we need to wait for a new target
+ */
+ ref_got = kref_get_unless_zero(&found_target->reap_ref);
+
spin_unlock_irqrestore(shost->host_lock, flags);
- if (found_target->state != STARGET_DEL) {
+ if (ref_got) {
put_device(dev);
return found_target;
}
- /* Unfortunately, we found a dying target; need to
- * wait until it's dead before we can get a new one */
+ /*
+ * Unfortunately, we found a dying target; need to wait until it's
+ * dead before we can get a new one. There is an anomaly here. We
+ * *should* call scsi_target_reap() to balance the kref_get() of the
+ * reap_ref above. However, since the target being released, it's
+ * already invisible and the reap_ref is irrelevant. If we call
+ * scsi_target_reap() we might spuriously do another device_del() on
+ * an already invisible target.
+ */
put_device(&found_target->dev);
- flush_scheduled_work();
+ /*
+ * length of time is irrelevant here, we just want to yield the CPU
+ * for a tick to avoid busy waiting for the target to die.
+ */
+ msleep(1);
goto retry;
}
-static void scsi_target_reap_usercontext(struct work_struct *work)
-{
- struct scsi_target *starget =
- container_of(work, struct scsi_target, ew.work);
-
- transport_remove_device(&starget->dev);
- device_del(&starget->dev);
- scsi_target_destroy(starget);
-}
-
/**
* scsi_target_reap - check to see if target is in use and destroy if not
* @starget: target to be checked
@@ -474,28 +513,13 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
*/
void scsi_target_reap(struct scsi_target *starget)
{
- struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
- unsigned long flags;
- enum scsi_target_state state;
- int empty = 0;
-
- spin_lock_irqsave(shost->host_lock, flags);
- state = starget->state;
- if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
- empty = 1;
- starget->state = STARGET_DEL;
- }
- spin_unlock_irqrestore(shost->host_lock, flags);
-
- if (!empty)
- return;
-
- BUG_ON(state == STARGET_DEL);
- if (state == STARGET_CREATED)
- scsi_target_destroy(starget);
- else
- execute_in_process_context(scsi_target_reap_usercontext,
- &starget->ew);
+ /*
+ * serious problem if this triggers: STARGET_DEL is only set in the if
+ * the reap_ref drops to zero, so we're trying to do another final put
+ * on an already released kref
+ */
+ BUG_ON(starget->state == STARGET_DEL);
+ scsi_target_reap_ref_put(starget);
}
/**
@@ -946,6 +970,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
}
+ if (sdev->scsi_level >= SCSI_3)
+ scsi_attach_vpd(sdev);
+
sdev->max_queue_depth = sdev->queue_depth;
/*
@@ -1532,6 +1559,10 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
}
mutex_unlock(&shost->scan_mutex);
scsi_autopm_put_target(starget);
+ /*
+ * paired with scsi_alloc_target(). Target will be destroyed unless
+ * scsi_probe_and_add_lun made an underlying device visible
+ */
scsi_target_reap(starget);
put_device(&starget->dev);
@@ -1612,8 +1643,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
out_reap:
scsi_autopm_put_target(starget);
- /* now determine if the target has any children at all
- * and if not, nuke it */
+ /*
+ * paired with scsi_alloc_target(): determine if the target has
+ * any children at all and if not, nuke it
+ */
scsi_target_reap(starget);
put_device(&starget->dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 9117d0bf408e..074e8cc30955 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -300,7 +300,9 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
int ret = -EINVAL;
unsigned long deadline, flags;
- if (shost->transportt && shost->transportt->eh_strategy_handler)
+ if (shost->transportt &&
+ (shost->transportt->eh_strategy_handler ||
+ !shost->hostt->eh_host_reset_handler))
return ret;
if (!strncmp(buf, "off", strlen("off")))
@@ -383,17 +385,14 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
{
struct scsi_device *sdev;
struct device *parent;
- struct scsi_target *starget;
struct list_head *this, *tmp;
unsigned long flags;
sdev = container_of(work, struct scsi_device, ew.work);
parent = sdev->sdev_gendev.parent;
- starget = to_scsi_target(parent);
spin_lock_irqsave(sdev->host->host_lock, flags);
- starget->reap_ref++;
list_del(&sdev->siblings);
list_del(&sdev->same_target_siblings);
list_del(&sdev->starved_entry);
@@ -413,8 +412,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
/* NULL queue means the device can't be used */
sdev->request_queue = NULL;
- scsi_target_reap(scsi_target(sdev));
-
+ kfree(sdev->vpd_pg83);
+ kfree(sdev->vpd_pg80);
kfree(sdev->inquiry);
kfree(sdev);
@@ -579,7 +578,6 @@ static int scsi_sdev_check_buf_bit(const char *buf)
* Create the actual show/store functions and data structures.
*/
sdev_rd_attr (device_blocked, "%d\n");
-sdev_rd_attr (queue_depth, "%d\n");
sdev_rd_attr (device_busy, "%d\n");
sdev_rd_attr (type, "%d\n");
sdev_rd_attr (scsi_level, "%d\n");
@@ -649,23 +647,12 @@ store_rescan_field (struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
-static void sdev_store_delete_callback(struct device *dev)
-{
- scsi_remove_device(to_scsi_device(dev));
-}
-
static ssize_t
sdev_store_delete(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int rc;
-
- /* An attribute cannot be unregistered by one of its own methods,
- * so we have to use this roundabout approach.
- */
- rc = device_schedule_callback(dev, sdev_store_delete_callback);
- if (rc)
- count = rc;
+ if (device_remove_file_self(dev, attr))
+ scsi_remove_device(to_scsi_device(dev));
return count;
};
static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
@@ -723,10 +710,64 @@ show_queue_type_field(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%s\n", name);
}
-static DEVICE_ATTR(queue_type, S_IRUGO, show_queue_type_field, NULL);
+static ssize_t
+store_queue_type_field(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_host_template *sht = sdev->host->hostt;
+ int tag_type = 0, retval;
+ int prev_tag_type = scsi_get_tag_type(sdev);
+
+ if (!sdev->tagged_supported || !sht->change_queue_type)
+ return -EINVAL;
+
+ if (strncmp(buf, "ordered", 7) == 0)
+ tag_type = MSG_ORDERED_TAG;
+ else if (strncmp(buf, "simple", 6) == 0)
+ tag_type = MSG_SIMPLE_TAG;
+ else if (strncmp(buf, "none", 4) != 0)
+ return -EINVAL;
+
+ if (tag_type == prev_tag_type)
+ return count;
+
+ retval = sht->change_queue_type(sdev, tag_type);
+ if (retval < 0)
+ return retval;
+
+ return count;
+}
+
+static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
+ store_queue_type_field);
+
+#define sdev_vpd_pg_attr(_page) \
+static ssize_t \
+show_vpd_##_page(struct file *filp, struct kobject *kobj, \
+ struct bin_attribute *bin_attr, \
+ char *buf, loff_t off, size_t count) \
+{ \
+ struct device *dev = container_of(kobj, struct device, kobj); \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+ if (!sdev->vpd_##_page) \
+ return -EINVAL; \
+ return memory_read_from_buffer(buf, count, &off, \
+ sdev->vpd_##_page, \
+ sdev->vpd_##_page##_len); \
+} \
+static struct bin_attribute dev_attr_vpd_##_page = { \
+ .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \
+ .size = 0, \
+ .read = show_vpd_##_page, \
+};
+
+sdev_vpd_pg_attr(pg83);
+sdev_vpd_pg_attr(pg80);
static ssize_t
-show_iostat_counterbits(struct device *dev, struct device_attribute *attr, char *buf)
+show_iostat_counterbits(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
}
@@ -797,46 +838,9 @@ DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED)
DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED)
DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED)
-/* Default template for device attributes. May NOT be modified */
-static struct attribute *scsi_sdev_attrs[] = {
- &dev_attr_device_blocked.attr,
- &dev_attr_type.attr,
- &dev_attr_scsi_level.attr,
- &dev_attr_device_busy.attr,
- &dev_attr_vendor.attr,
- &dev_attr_model.attr,
- &dev_attr_rev.attr,
- &dev_attr_rescan.attr,
- &dev_attr_delete.attr,
- &dev_attr_state.attr,
- &dev_attr_timeout.attr,
- &dev_attr_eh_timeout.attr,
- &dev_attr_iocounterbits.attr,
- &dev_attr_iorequest_cnt.attr,
- &dev_attr_iodone_cnt.attr,
- &dev_attr_ioerr_cnt.attr,
- &dev_attr_modalias.attr,
- REF_EVT(media_change),
- REF_EVT(inquiry_change_reported),
- REF_EVT(capacity_change_reported),
- REF_EVT(soft_threshold_reached),
- REF_EVT(mode_parameter_change_reported),
- REF_EVT(lun_change_reported),
- NULL
-};
-
-static struct attribute_group scsi_sdev_attr_group = {
- .attrs = scsi_sdev_attrs,
-};
-
-static const struct attribute_group *scsi_sdev_attr_groups[] = {
- &scsi_sdev_attr_group,
- NULL
-};
-
static ssize_t
-sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int depth, retval;
struct scsi_device *sdev = to_scsi_device(dev);
@@ -859,10 +863,10 @@ sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
return count;
}
+sdev_show_function(queue_depth, "%d\n");
-static struct device_attribute sdev_attr_queue_depth_rw =
- __ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
- sdev_store_queue_depth_rw);
+static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
+ sdev_store_queue_depth);
static ssize_t
sdev_show_queue_ramp_up_period(struct device *dev,
@@ -890,40 +894,79 @@ sdev_store_queue_ramp_up_period(struct device *dev,
return period;
}
-static struct device_attribute sdev_attr_queue_ramp_up_period =
- __ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
- sdev_show_queue_ramp_up_period,
- sdev_store_queue_ramp_up_period);
+static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
+ sdev_show_queue_ramp_up_period,
+ sdev_store_queue_ramp_up_period);
-static ssize_t
-sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
{
+ struct device *dev = container_of(kobj, struct device, kobj);
struct scsi_device *sdev = to_scsi_device(dev);
- struct scsi_host_template *sht = sdev->host->hostt;
- int tag_type = 0, retval;
- int prev_tag_type = scsi_get_tag_type(sdev);
- if (!sdev->tagged_supported || !sht->change_queue_type)
- return -EINVAL;
- if (strncmp(buf, "ordered", 7) == 0)
- tag_type = MSG_ORDERED_TAG;
- else if (strncmp(buf, "simple", 6) == 0)
- tag_type = MSG_SIMPLE_TAG;
- else if (strncmp(buf, "none", 4) != 0)
- return -EINVAL;
+ if (attr == &dev_attr_queue_depth.attr &&
+ !sdev->host->hostt->change_queue_depth)
+ return S_IRUGO;
- if (tag_type == prev_tag_type)
- return count;
+ if (attr == &dev_attr_queue_ramp_up_period.attr &&
+ !sdev->host->hostt->change_queue_depth)
+ return 0;
- retval = sht->change_queue_type(sdev, tag_type);
- if (retval < 0)
- return retval;
+ if (attr == &dev_attr_queue_type.attr &&
+ !sdev->host->hostt->change_queue_type)
+ return S_IRUGO;
- return count;
+ return attr->mode;
}
+/* Default template for device attributes. May NOT be modified */
+static struct attribute *scsi_sdev_attrs[] = {
+ &dev_attr_device_blocked.attr,
+ &dev_attr_type.attr,
+ &dev_attr_scsi_level.attr,
+ &dev_attr_device_busy.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_model.attr,
+ &dev_attr_rev.attr,
+ &dev_attr_rescan.attr,
+ &dev_attr_delete.attr,
+ &dev_attr_state.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_eh_timeout.attr,
+ &dev_attr_iocounterbits.attr,
+ &dev_attr_iorequest_cnt.attr,
+ &dev_attr_iodone_cnt.attr,
+ &dev_attr_ioerr_cnt.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_queue_depth.attr,
+ &dev_attr_queue_type.attr,
+ &dev_attr_queue_ramp_up_period.attr,
+ REF_EVT(media_change),
+ REF_EVT(inquiry_change_reported),
+ REF_EVT(capacity_change_reported),
+ REF_EVT(soft_threshold_reached),
+ REF_EVT(mode_parameter_change_reported),
+ REF_EVT(lun_change_reported),
+ NULL
+};
+
+static struct bin_attribute *scsi_sdev_bin_attrs[] = {
+ &dev_attr_vpd_pg83,
+ &dev_attr_vpd_pg80,
+ NULL
+};
+static struct attribute_group scsi_sdev_attr_group = {
+ .attrs = scsi_sdev_attrs,
+ .bin_attrs = scsi_sdev_bin_attrs,
+ .is_visible = scsi_sdev_attr_is_visible,
+};
+
+static const struct attribute_group *scsi_sdev_attr_groups[] = {
+ &scsi_sdev_attr_group,
+ NULL
+};
+
static int scsi_target_add(struct scsi_target *starget)
{
int error;
@@ -946,10 +989,6 @@ static int scsi_target_add(struct scsi_target *starget)
return 0;
}
-static struct device_attribute sdev_attr_queue_type_rw =
- __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
- sdev_store_queue_type_rw);
-
/**
* scsi_sysfs_add_sdev - add scsi device to sysfs
* @sdev: scsi_device to add
@@ -1003,25 +1042,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
transport_add_device(&sdev->sdev_gendev);
sdev->is_visible = 1;
- /* create queue files, which may be writable, depending on the host */
- if (sdev->host->hostt->change_queue_depth) {
- error = device_create_file(&sdev->sdev_gendev,
- &sdev_attr_queue_depth_rw);
- error = device_create_file(&sdev->sdev_gendev,
- &sdev_attr_queue_ramp_up_period);
- }
- else
- error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
- if (error)
- return error;
-
- if (sdev->host->hostt->change_queue_type)
- error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
- else
- error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
- if (error)
- return error;
-
error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
if (error)
@@ -1071,6 +1091,13 @@ void __scsi_remove_device(struct scsi_device *sdev)
sdev->host->hostt->slave_destroy(sdev);
transport_destroy_device(dev);
+ /*
+ * Paired with the kref_get() in scsi_sysfs_initialize(). We have
+ * remoed sysfs visibility from the device, so make the target
+ * invisible if this was the last device underneath it.
+ */
+ scsi_target_reap(scsi_target(sdev));
+
put_device(dev);
}
@@ -1133,7 +1160,7 @@ void scsi_remove_target(struct device *dev)
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
/* assuming new targets arrive at the end */
- starget->reap_ref++;
+ kref_get(&starget->reap_ref);
spin_unlock_irqrestore(shost->host_lock, flags);
if (last)
scsi_target_reap(last);
@@ -1217,6 +1244,12 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
list_add_tail(&sdev->same_target_siblings, &starget->devices);
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * device can now only be removed via __scsi_remove_device() so hold
+ * the target. Target will be held in CREATED state until something
+ * beneath it becomes visible (in which case it moves to RUNNING)
+ */
+ kref_get(&starget->reap_ref);
}
int scsi_is_sdev_device(const struct device *dev)
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 84a1fdf67864..e51add05fb8d 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -155,7 +155,8 @@ void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
__blk_put_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
- __scsi_put_command(shost, cmd, &shost->shost_gendev);
+ __scsi_put_command(shost, cmd);
+ put_device(&shost->shost_gendev);
}
EXPORT_SYMBOL_GPL(scsi_host_put_command);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 4628fd5e0688..f80908f74ca9 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -261,6 +261,7 @@ static const struct {
{ FC_PORTSPEED_10GBIT, "10 Gbit" },
{ FC_PORTSPEED_8GBIT, "8 Gbit" },
{ FC_PORTSPEED_16GBIT, "16 Gbit" },
+ { FC_PORTSPEED_32GBIT, "32 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fd8ffe6bcfdd..0102a2d70dd8 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1225,7 +1225,7 @@ struct bus_type iscsi_flashnode_bus = {
* Adds a sysfs entry for the flashnode session attributes
*
* Returns:
- * pointer to allocated flashnode sess on sucess
+ * pointer to allocated flashnode sess on success
* %NULL on failure
*/
struct iscsi_bus_flash_session *
@@ -1423,7 +1423,7 @@ static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data)
}
/**
- * iscsi_destroy_flashnode_sess - destory flashnode session entry
+ * iscsi_destroy_flashnode_sess - destroy flashnode session entry
* @fnode_sess: pointer to flashnode session entry to be destroyed
*
* Deletes the flashnode session entry and all children flashnode connection
@@ -1453,7 +1453,7 @@ static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data)
}
/**
- * iscsi_destroy_all_flashnode - destory all flashnode session entries
+ * iscsi_destroy_all_flashnode - destroy all flashnode session entries
* @shost: pointer to host data
*
* Destroys all the flashnode session entries and all corresponding children
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 2700a5a09bd4..d47ffc8d3e43 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -64,10 +64,14 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
/**
* srp_tmo_valid() - check timeout combination validity
+ * @reconnect_delay: Reconnect delay in seconds.
+ * @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
+ * @dev_loss_tmo: Device loss timeout in seconds.
*
* The combination of the timeout parameters must be such that SCSI commands
* are finished in a reasonable time. Hence do not allow the fast I/O fail
- * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT. Furthermore, these
+ * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to
+ * exceed that limit if failing I/O fast has been disabled. Furthermore, these
* parameters must be such that multipath can detect failed paths timely.
* Hence do not allow all three parameters to be disabled simultaneously.
*/
@@ -79,6 +83,9 @@ int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo)
return -EINVAL;
if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
return -EINVAL;
+ if (fast_io_fail_tmo < 0 &&
+ dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+ return -EINVAL;
if (dev_loss_tmo >= LONG_MAX / HZ)
return -EINVAL;
if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
@@ -368,6 +375,7 @@ invalid:
/**
* srp_reconnect_work() - reconnect and schedule a new attempt if necessary
+ * @work: Work structure used for scheduling this operation.
*/
static void srp_reconnect_work(struct work_struct *work)
{
@@ -408,6 +416,7 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
/**
* rport_fast_io_fail_timedout() - fast I/O failure timeout handler
+ * @work: Work structure used for scheduling this operation.
*/
static void rport_fast_io_fail_timedout(struct work_struct *work)
{
@@ -426,6 +435,7 @@ static void rport_fast_io_fail_timedout(struct work_struct *work)
/**
* rport_dev_loss_timedout() - device loss timeout handler
+ * @work: Work structure used for scheduling this operation.
*/
static void rport_dev_loss_timedout(struct work_struct *work)
{
@@ -452,42 +462,35 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport)
lockdep_assert_held(&rport->mutex);
- if (!rport->deleted) {
- delay = rport->reconnect_delay;
- fast_io_fail_tmo = rport->fast_io_fail_tmo;
- dev_loss_tmo = rport->dev_loss_tmo;
- pr_debug("%s current state: %d\n",
- dev_name(&shost->shost_gendev), rport->state);
+ delay = rport->reconnect_delay;
+ fast_io_fail_tmo = rport->fast_io_fail_tmo;
+ dev_loss_tmo = rport->dev_loss_tmo;
+ pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev),
+ rport->state);
- if (delay > 0)
- queue_delayed_work(system_long_wq,
- &rport->reconnect_work,
- 1UL * delay * HZ);
- if (fast_io_fail_tmo >= 0 &&
- srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
- pr_debug("%s new state: %d\n",
- dev_name(&shost->shost_gendev),
- rport->state);
- scsi_target_block(&shost->shost_gendev);
+ if (rport->state == SRP_RPORT_LOST)
+ return;
+ if (delay > 0)
+ queue_delayed_work(system_long_wq, &rport->reconnect_work,
+ 1UL * delay * HZ);
+ if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
+ pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
+ rport->state);
+ scsi_target_block(&shost->shost_gendev);
+ if (fast_io_fail_tmo >= 0)
queue_delayed_work(system_long_wq,
&rport->fast_io_fail_work,
1UL * fast_io_fail_tmo * HZ);
- }
if (dev_loss_tmo >= 0)
queue_delayed_work(system_long_wq,
&rport->dev_loss_work,
1UL * dev_loss_tmo * HZ);
- } else {
- pr_debug("%s has already been deleted\n",
- dev_name(&shost->shost_gendev));
- srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST);
- scsi_target_unblock(&shost->shost_gendev,
- SDEV_TRANSPORT_OFFLINE);
}
}
/**
* srp_start_tl_fail_timers() - start the transport layer failure timers
+ * @rport: SRP target port.
*
* Start the transport layer fast I/O failure and device loss timers. Do not
* modify a timer that was already started.
@@ -502,6 +505,7 @@ EXPORT_SYMBOL(srp_start_tl_fail_timers);
/**
* scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
*/
static int scsi_request_fn_active(struct Scsi_Host *shost)
{
@@ -522,6 +526,7 @@ static int scsi_request_fn_active(struct Scsi_Host *shost)
/**
* srp_reconnect_rport() - reconnect to an SRP target port
+ * @rport: SRP target port.
*
* Blocks SCSI command queueing before invoking reconnect() such that
* queuecommand() won't be invoked concurrently with reconnect() from outside
@@ -556,7 +561,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
scsi_target_block(&shost->shost_gendev);
while (scsi_request_fn_active(shost))
msleep(20);
- res = i->f->reconnect(rport);
+ res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
if (res == 0) {
@@ -578,9 +583,9 @@ int srp_reconnect_rport(struct srp_rport *rport)
spin_unlock_irq(shost->host_lock);
} else if (rport->state == SRP_RPORT_RUNNING) {
/*
- * srp_reconnect_rport() was invoked with fast_io_fail
- * off. Mark the port as failed and start the TL failure
- * timers if these had not yet been started.
+ * srp_reconnect_rport() has been invoked with fast_io_fail
+ * and dev_loss off. Mark the port as failed and start the TL
+ * failure timers if these had not yet been started.
*/
__rport_fail_io_fast(rport);
scsi_target_unblock(&shost->shost_gendev,
@@ -599,6 +604,7 @@ EXPORT_SYMBOL(srp_reconnect_rport);
/**
* srp_timed_out() - SRP transport intercept of the SCSI timeout EH
+ * @scmd: SCSI command.
*
* If a timeout occurs while an rport is in the blocked state, ask the SCSI
* EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
@@ -622,10 +628,6 @@ static void srp_rport_release(struct device *dev)
{
struct srp_rport *rport = dev_to_rport(dev);
- cancel_delayed_work_sync(&rport->reconnect_work);
- cancel_delayed_work_sync(&rport->fast_io_fail_work);
- cancel_delayed_work_sync(&rport->dev_loss_work);
-
put_device(dev->parent);
kfree(rport);
}
@@ -674,6 +676,7 @@ static int srp_host_match(struct attribute_container *cont, struct device *dev)
/**
* srp_rport_get() - increment rport reference count
+ * @rport: SRP target port.
*/
void srp_rport_get(struct srp_rport *rport)
{
@@ -683,6 +686,7 @@ EXPORT_SYMBOL(srp_rport_get);
/**
* srp_rport_put() - decrement rport reference count
+ * @rport: SRP target port.
*/
void srp_rport_put(struct srp_rport *rport)
{
@@ -780,12 +784,6 @@ void srp_rport_del(struct srp_rport *rport)
device_del(dev);
transport_destroy_device(dev);
- mutex_lock(&rport->mutex);
- if (rport->state == SRP_RPORT_BLOCKED)
- __rport_fail_io_fast(rport);
- rport->deleted = true;
- mutex_unlock(&rport->mutex);
-
put_device(dev);
}
EXPORT_SYMBOL_GPL(srp_rport_del);
@@ -810,6 +808,27 @@ void srp_remove_host(struct Scsi_Host *shost)
}
EXPORT_SYMBOL_GPL(srp_remove_host);
+/**
+ * srp_stop_rport_timers - stop the transport layer recovery timers
+ *
+ * Must be called after srp_remove_host() and scsi_remove_host(). The caller
+ * must hold a reference on the rport (rport->dev) and on the SCSI host
+ * (rport->dev.parent).
+ */
+void srp_stop_rport_timers(struct srp_rport *rport)
+{
+ mutex_lock(&rport->mutex);
+ if (rport->state == SRP_RPORT_BLOCKED)
+ __rport_fail_io_fast(rport);
+ srp_rport_set_state(rport, SRP_RPORT_LOST);
+ mutex_unlock(&rport->mutex);
+
+ cancel_delayed_work_sync(&rport->reconnect_work);
+ cancel_delayed_work_sync(&rport->fast_io_fail_work);
+ cancel_delayed_work_sync(&rport->dev_loss_work);
+}
+EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
+
static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
int result)
{
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9846c6ab2aaa..89e6c04ac595 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -801,7 +801,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
if (sdkp->device->no_write_same)
return BLKPREP_KILL;
- BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size);
+ BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
sector >>= ilog2(sdp->sector_size) - 9;
nr_sectors >>= ilog2(sdp->sector_size) - 9;
@@ -1463,8 +1463,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
sd_print_sense_hdr(sdkp, &sshdr);
/* we need to evaluate the error return */
if (scsi_sense_valid(&sshdr) &&
- /* 0x3a is medium not present */
- sshdr.asc == 0x3a)
+ (sshdr.asc == 0x3a || /* medium not present */
+ sshdr.asc == 0x20)) /* invalid command */
/* this is no error here */
return 0;
@@ -2281,7 +2281,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
set_disk_ro(sdkp->disk, 0);
if (sdp->skip_ms_page_3f) {
- sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
+ sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
return;
}
@@ -2313,7 +2313,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
}
if (!scsi_status_is_good(res)) {
- sd_printk(KERN_WARNING, sdkp,
+ sd_first_printk(KERN_WARNING, sdkp,
"Test WP failed, assume Write Enabled\n");
} else {
sdkp->write_prot = ((data.device_specific & 0x80) != 0);
@@ -2381,7 +2381,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
if (!data.header_length) {
modepage = 6;
first_len = 0;
- sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
+ sd_first_printk(KERN_ERR, sdkp,
+ "Missing header in MODE_SENSE response\n");
}
/* that went OK, now ask for the proper length */
@@ -2394,7 +2395,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
if (len < 3)
goto bad_sense;
else if (len > SD_BUF_SIZE) {
- sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
+ sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
"data from %d to %d bytes\n", len, SD_BUF_SIZE);
len = SD_BUF_SIZE;
}
@@ -2417,8 +2418,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
/* We're interested only in the first 3 bytes.
*/
if (len - offset <= 2) {
- sd_printk(KERN_ERR, sdkp, "Incomplete "
- "mode parameter data\n");
+ sd_first_printk(KERN_ERR, sdkp,
+ "Incomplete mode parameter "
+ "data\n");
goto defaults;
} else {
modepage = page_code;
@@ -2432,14 +2434,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
else if (!spf && len - offset > 1)
offset += 2 + buffer[offset+1];
else {
- sd_printk(KERN_ERR, sdkp, "Incomplete "
- "mode parameter data\n");
+ sd_first_printk(KERN_ERR, sdkp,
+ "Incomplete mode "
+ "parameter data\n");
goto defaults;
}
}
}
- sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
+ sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
goto defaults;
Page_found:
@@ -2453,7 +2456,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
- sd_printk(KERN_NOTICE, sdkp,
+ sd_first_printk(KERN_NOTICE, sdkp,
"Uses READ/WRITE(6), disabling FUA\n");
sdkp->DPOFUA = 0;
}
@@ -2475,16 +2478,19 @@ bad_sense:
sshdr.sense_key == ILLEGAL_REQUEST &&
sshdr.asc == 0x24 && sshdr.ascq == 0x0)
/* Invalid field in CDB */
- sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
+ sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
else
- sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
+ sd_first_printk(KERN_ERR, sdkp,
+ "Asking for cache data failed\n");
defaults:
if (sdp->wce_default_on) {
- sd_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n");
+ sd_first_printk(KERN_NOTICE, sdkp,
+ "Assuming drive cache: write back\n");
sdkp->WCE = 1;
} else {
- sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
+ sd_first_printk(KERN_ERR, sdkp,
+ "Assuming drive cache: write through\n");
sdkp->WCE = 0;
}
sdkp->RCD = 0;
@@ -2513,7 +2519,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
if (!scsi_status_is_good(res) || !data.header_length ||
data.length < 6) {
- sd_printk(KERN_WARNING, sdkp,
+ sd_first_printk(KERN_WARNING, sdkp,
"getting Control mode page failed, assume no ATO\n");
if (scsi_sense_valid(&sshdr))
@@ -2525,7 +2531,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
offset = data.header_length + data.block_descriptor_length;
if ((buffer[offset] & 0x3f) != 0x0a) {
- sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
+ sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
return;
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 26895ff247c5..620871efbf0a 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -104,6 +104,12 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
(sdsk)->disk->disk_name, ##a) : \
sdev_printk(prefix, (sdsk)->device, fmt, ##a)
+#define sd_first_printk(prefix, sdsk, fmt, a...) \
+ do { \
+ if ((sdkp)->first_scan) \
+ sd_printk(prefix, sdsk, fmt, ##a); \
+ } while (0)
+
static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
{
switch (scmd->cmnd[0]) {
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 6174ca4ea275..a7a691d0af7d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -365,7 +365,6 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
struct bio *bio;
struct scsi_disk *sdkp;
struct sd_dif_tuple *sdt;
- unsigned int i, j;
u32 phys, virt;
sdkp = rq->bio->bi_bdev->bd_disk->private_data;
@@ -376,19 +375,21 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
phys = hw_sector & 0xffffffff;
__rq_for_each_bio(bio, rq) {
- struct bio_vec *iv;
+ struct bio_vec iv;
+ struct bvec_iter iter;
+ unsigned int j;
/* Already remapped? */
if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
break;
- virt = bio->bi_integrity->bip_sector & 0xffffffff;
+ virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
- bip_for_each_vec(iv, bio->bi_integrity, i) {
- sdt = kmap_atomic(iv->bv_page)
- + iv->bv_offset;
+ bip_for_each_vec(iv, bio->bi_integrity, iter) {
+ sdt = kmap_atomic(iv.bv_page)
+ + iv.bv_offset;
- for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+ for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
if (be32_to_cpu(sdt->ref_tag) == virt)
sdt->ref_tag = cpu_to_be32(phys);
@@ -414,7 +415,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
struct scsi_disk *sdkp;
struct bio *bio;
struct sd_dif_tuple *sdt;
- unsigned int i, j, sectors, sector_sz;
+ unsigned int j, sectors, sector_sz;
u32 phys, virt;
sdkp = scsi_disk(scmd->request->rq_disk);
@@ -430,15 +431,16 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
phys >>= 3;
__rq_for_each_bio(bio, scmd->request) {
- struct bio_vec *iv;
+ struct bio_vec iv;
+ struct bvec_iter iter;
- virt = bio->bi_integrity->bip_sector & 0xffffffff;
+ virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
- bip_for_each_vec(iv, bio->bi_integrity, i) {
- sdt = kmap_atomic(iv->bv_page)
- + iv->bv_offset;
+ bip_for_each_vec(iv, bio->bi_integrity, iter) {
+ sdt = kmap_atomic(iv.bv_page)
+ + iv.bv_offset;
- for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+ for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
if (sectors == 0) {
kunmap_atomic(sdt);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index eba183c428cf..80bfece1a2de 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/enclosure.h>
+#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -448,27 +449,18 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
static void ses_match_to_enclosure(struct enclosure_device *edev,
struct scsi_device *sdev)
{
- unsigned char *buf;
unsigned char *desc;
- unsigned int vpd_len;
struct efd efd = {
.addr = 0,
};
- buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
- if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE))
- goto free;
-
ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
- vpd_len = ((buf[2] << 8) | buf[3]) + 4;
- kfree(buf);
- buf = kmalloc(vpd_len, GFP_KERNEL);
- if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len))
- goto free;
+ if (!sdev->vpd_pg83_len)
+ return;
- desc = buf + 4;
- while (desc < buf + vpd_len) {
+ desc = sdev->vpd_pg83 + 4;
+ while (desc < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
enum scsi_protocol proto = desc[0] >> 4;
u8 code_set = desc[0] & 0x0f;
u8 piv = desc[1] & 0x80;
@@ -478,25 +470,15 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
if (piv && code_set == 1 && assoc == 1
&& proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8)
- efd.addr = (u64)desc[4] << 56 |
- (u64)desc[5] << 48 |
- (u64)desc[6] << 40 |
- (u64)desc[7] << 32 |
- (u64)desc[8] << 24 |
- (u64)desc[9] << 16 |
- (u64)desc[10] << 8 |
- (u64)desc[11];
+ efd.addr = get_unaligned_be64(&desc[4]);
desc += len + 4;
}
- if (!efd.addr)
- goto free;
+ if (efd.addr) {
+ efd.dev = &sdev->sdev_gendev;
- efd.dev = &sdev->sdev_gendev;
-
- enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
- free:
- kfree(buf);
+ enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
+ }
}
static int ses_intf_add(struct device *cdev,
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a1d6986261a3..afc834e172c6 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2198,12 +2198,19 @@ static int st_set_options(struct scsi_tape *STp, long options)
struct st_modedef *STm;
char *name = tape_name(STp);
struct cdev *cd0, *cd1;
+ struct device *d0, *d1;
STm = &(STp->modes[STp->current_mode]);
if (!STm->defined) {
- cd0 = STm->cdevs[0]; cd1 = STm->cdevs[1];
+ cd0 = STm->cdevs[0];
+ cd1 = STm->cdevs[1];
+ d0 = STm->devs[0];
+ d1 = STm->devs[1];
memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef));
- STm->cdevs[0] = cd0; STm->cdevs[1] = cd1;
+ STm->cdevs[0] = cd0;
+ STm->cdevs[1] = cd1;
+ STm->devs[0] = d0;
+ STm->devs[1] = d1;
modes_defined = 1;
DEBC(printk(ST_DEB_MSG
"%s: Initialized mode %d definition from mode 0\n",
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 17d740427240..9969fa1ef7c4 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
{
struct stor_mem_pools *memp = sdevice->hostdata;
+ if (!memp)
+ return;
+
mempool_destroy(memp->request_mempool);
kmem_cache_destroy(memp->request_pool);
kfree(memp);
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index f1e4b4148c75..a4abce9d526e 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -259,7 +259,7 @@ found:
instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
if (instance->irq != SCSI_IRQ_NONE)
- if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128",
+ if (request_irq(instance->irq, t128_intr, 0, "t128",
instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 9c216e563568..5a03bb3bcfef 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -873,7 +873,7 @@ static int port_detect \
/* Board detected, allocate its IRQ */
if (request_irq(irq, do_interrupt_handler,
- IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0),
+ (subversion == ESA) ? IRQF_SHARED : 0,
driver_name, (void *) &sha[j])) {
printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
goto freelock;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index c3173dced870..16bfd50cd3fe 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -956,6 +956,10 @@ static void virtscsi_remove(struct virtio_device *vdev)
#ifdef CONFIG_PM_SLEEP
static int virtscsi_freeze(struct virtio_device *vdev)
{
+ struct Scsi_Host *sh = virtio_scsi_host(vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+
+ unregister_hotcpu_notifier(&vscsi->nb);
virtscsi_remove_vqs(vdev);
return 0;
}
@@ -964,8 +968,17 @@ static int virtscsi_restore(struct virtio_device *vdev)
{
struct Scsi_Host *sh = virtio_scsi_host(vdev);
struct virtio_scsi *vscsi = shost_priv(sh);
+ int err;
+
+ err = virtscsi_init(vdev, vscsi);
+ if (err)
+ return err;
+
+ err = register_hotcpu_notifier(&vscsi->nb);
+ if (err)
+ vdev->config->del_vqs(vdev);
- return virtscsi_init(vdev, vscsi);
+ return err;
}
#endif
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index b9755ec0e812..c88e1468aad7 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's para-virtualized SCSI HBA.
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -32,6 +32,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
#include "vmw_pvscsi.h"
@@ -44,7 +45,7 @@ MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
-#define PVSCSI_DEFAULT_QUEUE_DEPTH 64
+#define PVSCSI_DEFAULT_QUEUE_DEPTH 254
#define SGL_SIZE PAGE_SIZE
struct pvscsi_sg_list {
@@ -62,6 +63,7 @@ struct pvscsi_ctx {
dma_addr_t dataPA;
dma_addr_t sensePA;
dma_addr_t sglPA;
+ struct completion *abort_cmp;
};
struct pvscsi_adapter {
@@ -71,6 +73,7 @@ struct pvscsi_adapter {
bool use_msi;
bool use_msix;
bool use_msg;
+ bool use_req_threshold;
spinlock_t hw_lock;
@@ -102,18 +105,22 @@ struct pvscsi_adapter {
/* Command line parameters */
-static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
+static int pvscsi_ring_pages;
static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
static bool pvscsi_disable_msi;
static bool pvscsi_disable_msix;
static bool pvscsi_use_msg = true;
+static bool pvscsi_use_req_threshold = true;
#define PVSCSI_RW (S_IRUSR | S_IWUSR)
module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
- __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING)
+ "[up to 16 targets],"
+ __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)
+ "[for 16+ targets])");
module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
@@ -121,7 +128,7 @@ MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
- __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
+ __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")");
module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
@@ -132,6 +139,10 @@ MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
+module_param_named(use_req_threshold, pvscsi_use_req_threshold,
+ bool, PVSCSI_RW);
+MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)");
+
static const struct pci_device_id pvscsi_pci_tbl[] = {
{ PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
{ 0 }
@@ -177,6 +188,7 @@ static void pvscsi_release_context(struct pvscsi_adapter *adapter,
struct pvscsi_ctx *ctx)
{
ctx->cmd = NULL;
+ ctx->abort_cmp = NULL;
list_add(&ctx->list, &adapter->cmd_pool);
}
@@ -280,10 +292,15 @@ static int scsi_is_rw(unsigned char op)
static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
unsigned char op)
{
- if (scsi_is_rw(op))
- pvscsi_kick_rw_io(adapter);
- else
+ if (scsi_is_rw(op)) {
+ struct PVSCSIRingsState *s = adapter->rings_state;
+
+ if (!adapter->use_req_threshold ||
+ s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold)
+ pvscsi_kick_rw_io(adapter);
+ } else {
pvscsi_process_request_ring(adapter);
+ }
}
static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
@@ -487,6 +504,35 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
}
}
+static int pvscsi_change_queue_depth(struct scsi_device *sdev,
+ int qdepth,
+ int reason)
+{
+ int max_depth;
+ struct Scsi_Host *shost = sdev->host;
+
+ if (reason != SCSI_QDEPTH_DEFAULT)
+ /*
+ * We support only changing default.
+ */
+ return -EOPNOTSUPP;
+
+ max_depth = shost->can_queue;
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+
+ if (sdev->inquiry_len > 7)
+ sdev_printk(KERN_INFO, sdev,
+ "qdepth(%d), tagged(%d), simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
+ sdev->queue_depth, sdev->tagged_supported,
+ sdev->simple_tags, sdev->ordered_tags,
+ sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1);
+ return sdev->queue_depth;
+}
+
/*
* Pull a completion descriptor off and pass the completion back
* to the SCSI mid layer.
@@ -496,15 +542,27 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
{
struct pvscsi_ctx *ctx;
struct scsi_cmnd *cmd;
+ struct completion *abort_cmp;
u32 btstat = e->hostStatus;
u32 sdstat = e->scsiStatus;
ctx = pvscsi_get_context(adapter, e->context);
cmd = ctx->cmd;
+ abort_cmp = ctx->abort_cmp;
pvscsi_unmap_buffers(adapter, ctx);
pvscsi_release_context(adapter, ctx);
- cmd->result = 0;
+ if (abort_cmp) {
+ /*
+ * The command was requested to be aborted. Just signal that
+ * the request completed and swallow the actual cmd completion
+ * here. The abort handler will post a completion for this
+ * command indicating that it got successfully aborted.
+ */
+ complete(abort_cmp);
+ return;
+ }
+ cmd->result = 0;
if (sdstat != SAM_STAT_GOOD &&
(btstat == BTSTAT_SUCCESS ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
@@ -726,6 +784,8 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
struct pvscsi_ctx *ctx;
unsigned long flags;
+ int result = SUCCESS;
+ DECLARE_COMPLETION_ONSTACK(abort_cmp);
scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
adapter->host->host_no, cmd);
@@ -748,13 +808,40 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
goto out;
}
+ /*
+ * Mark that the command has been requested to be aborted and issue
+ * the abort.
+ */
+ ctx->abort_cmp = &abort_cmp;
+
pvscsi_abort_cmd(adapter, ctx);
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ /* Wait for 2 secs for the completion. */
+ wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
+ spin_lock_irqsave(&adapter->hw_lock, flags);
- pvscsi_process_completion_ring(adapter);
+ if (!completion_done(&abort_cmp)) {
+ /*
+ * Failed to abort the command, unmark the fact that it
+ * was requested to be aborted.
+ */
+ ctx->abort_cmp = NULL;
+ result = FAILED;
+ scmd_printk(KERN_DEBUG, cmd,
+ "Failed to get completion for aborted cmd %p\n",
+ cmd);
+ goto out;
+ }
+
+ /*
+ * Successfully aborted the command.
+ */
+ cmd->result = (DID_ABORT << 16);
+ cmd->scsi_done(cmd);
out:
spin_unlock_irqrestore(&adapter->hw_lock, flags);
- return SUCCESS;
+ return result;
}
/*
@@ -911,6 +998,7 @@ static struct scsi_host_template pvscsi_template = {
.dma_boundary = UINT_MAX,
.max_sectors = 0xffff,
.use_clustering = ENABLE_CLUSTERING,
+ .change_queue_depth = pvscsi_change_queue_depth,
.eh_abort_handler = pvscsi_abort,
.eh_device_reset_handler = pvscsi_device_reset,
.eh_bus_reset_handler = pvscsi_bus_reset,
@@ -1034,6 +1122,34 @@ static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
return 1;
}
+static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter,
+ bool enable)
+{
+ u32 val;
+
+ if (!pvscsi_use_req_threshold)
+ return false;
+
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
+ PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
+ val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS);
+ if (val == -1) {
+ printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n");
+ return false;
+ } else {
+ struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 };
+ cmd_msg.enable = enable;
+ printk(KERN_INFO
+ "vmw_pvscsi: %sabling reqCallThreshold\n",
+ enable ? "en" : "dis");
+ pvscsi_write_cmd_desc(adapter,
+ PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
+ &cmd_msg, sizeof(cmd_msg));
+ return pvscsi_reg_read(adapter,
+ PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0;
+ }
+}
+
static irqreturn_t pvscsi_isr(int irq, void *devp)
{
struct pvscsi_adapter *adapter = devp;
@@ -1236,11 +1352,12 @@ exit:
static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pvscsi_adapter *adapter;
- struct Scsi_Host *host;
- struct device *dev;
+ struct pvscsi_adapter adapter_temp;
+ struct Scsi_Host *host = NULL;
unsigned int i;
unsigned long flags = 0;
int error;
+ u32 max_id;
error = -ENODEV;
@@ -1258,34 +1375,19 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_disable_device;
}
- pvscsi_template.can_queue =
- min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
- PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
- pvscsi_template.cmd_per_lun =
- min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
- host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
- if (!host) {
- printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
- goto out_disable_device;
- }
-
- adapter = shost_priv(host);
+ /*
+ * Let's use a temp pvscsi_adapter struct until we find the number of
+ * targets on the adapter, after that we will switch to the real
+ * allocated struct.
+ */
+ adapter = &adapter_temp;
memset(adapter, 0, sizeof(*adapter));
adapter->dev = pdev;
- adapter->host = host;
-
- spin_lock_init(&adapter->hw_lock);
-
- host->max_channel = 0;
- host->max_id = 16;
- host->max_lun = 1;
- host->max_cmd_len = 16;
-
adapter->rev = pdev->revision;
if (pci_request_regions(pdev, "vmw_pvscsi")) {
printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
- goto out_free_host;
+ goto out_disable_device;
}
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1301,7 +1403,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (i == DEVICE_COUNT_RESOURCE) {
printk(KERN_ERR
"vmw_pvscsi: adapter has no suitable MMIO region\n");
- goto out_release_resources;
+ goto out_release_resources_and_disable;
}
adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
@@ -1310,10 +1412,60 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(KERN_ERR
"vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
i, PVSCSI_MEM_SPACE_SIZE);
- goto out_release_resources;
+ goto out_release_resources_and_disable;
}
pci_set_master(pdev);
+
+ /*
+ * Ask the device for max number of targets before deciding the
+ * default pvscsi_ring_pages value.
+ */
+ max_id = pvscsi_get_max_targets(adapter);
+ printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id);
+
+ if (pvscsi_ring_pages == 0)
+ /*
+ * Set the right default value. Up to 16 it is 8, above it is
+ * max.
+ */
+ pvscsi_ring_pages = (max_id > 16) ?
+ PVSCSI_SETUP_RINGS_MAX_NUM_PAGES :
+ PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
+ printk(KERN_INFO
+ "vmw_pvscsi: setting ring_pages to %d\n",
+ pvscsi_ring_pages);
+
+ pvscsi_template.can_queue =
+ min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
+ PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+ pvscsi_template.cmd_per_lun =
+ min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
+ host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
+ if (!host) {
+ printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
+ goto out_release_resources_and_disable;
+ }
+
+ /*
+ * Let's use the real pvscsi_adapter struct here onwards.
+ */
+ adapter = shost_priv(host);
+ memset(adapter, 0, sizeof(*adapter));
+ adapter->dev = pdev;
+ adapter->host = host;
+ /*
+ * Copy back what we already have to the allocated adapter struct.
+ */
+ adapter->rev = adapter_temp.rev;
+ adapter->mmioBase = adapter_temp.mmioBase;
+
+ spin_lock_init(&adapter->hw_lock);
+ host->max_channel = 0;
+ host->max_lun = 1;
+ host->max_cmd_len = 16;
+ host->max_id = max_id;
+
pci_set_drvdata(pdev, host);
ll_adapter_reset(adapter);
@@ -1327,13 +1479,6 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/*
- * Ask the device for max number of targets.
- */
- host->max_id = pvscsi_get_max_targets(adapter);
- dev = pvscsi_dev(adapter);
- dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id);
-
- /*
* From this point on we should reset the adapter if anything goes
* wrong.
*/
@@ -1373,6 +1518,10 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
flags = IRQF_SHARED;
}
+ adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
+ printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
+ adapter->use_req_threshold ? "en" : "dis");
+
error = request_irq(adapter->irq, pvscsi_isr, flags,
"vmw_pvscsi", adapter);
if (error) {
@@ -1402,12 +1551,15 @@ out_reset_adapter:
ll_adapter_reset(adapter);
out_release_resources:
pvscsi_release_resources(adapter);
-out_free_host:
scsi_host_put(host);
out_disable_device:
pci_disable_device(pdev);
return error;
+
+out_release_resources_and_disable:
+ pvscsi_release_resources(adapter);
+ goto out_disable_device;
}
static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 3546e8662e30..ce4588851274 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -1,7 +1,7 @@
/*
* VMware PVSCSI header file
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -26,7 +26,7 @@
#include <linux/types.h>
-#define PVSCSI_DRIVER_VERSION_STRING "1.0.2.0-k"
+#define PVSCSI_DRIVER_VERSION_STRING "1.0.5.0-k"
#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
@@ -117,8 +117,9 @@ enum PVSCSICommands {
PVSCSI_CMD_CONFIG = 7,
PVSCSI_CMD_SETUP_MSG_RING = 8,
PVSCSI_CMD_DEVICE_UNPLUG = 9,
+ PVSCSI_CMD_SETUP_REQCALLTHRESHOLD = 10,
- PVSCSI_CMD_LAST = 10 /* has to be last */
+ PVSCSI_CMD_LAST = 11 /* has to be last */
};
/*
@@ -141,6 +142,14 @@ struct PVSCSICmdDescConfigCmd {
u32 _pad;
} __packed;
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_REQCALLTHRESHOLD --
+ */
+
+struct PVSCSICmdDescSetupReqCall {
+ u32 enable;
+} __packed;
+
enum PVSCSIConfigPageType {
PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958,
PVSCSI_CONFIG_PAGE_PHY = 0x1959,
@@ -261,7 +270,9 @@ struct PVSCSIRingsState {
u32 cmpConsIdx;
u32 cmpNumEntriesLog2;
- u8 _pad[104];
+ u32 reqCallThreshold;
+
+ u8 _pad[100];
u32 msgProdIdx;
u32 msgConsIdx;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index f9a6e4b0affe..32674236fec7 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1252,7 +1252,7 @@ static int wd7000_init(Adapter * host)
return 0;
- if (request_irq(host->irq, wd7000_intr, IRQF_DISABLED, "wd7000", host)) {
+ if (request_irq(host->irq, wd7000_intr, 0, "wd7000", host)) {
printk("wd7000_init: can't get IRQ %d.\n", host->irq);
return (0);
}
diff --git a/drivers/sfi/sfi_acpi.c b/drivers/sfi/sfi_acpi.c
index f5b4ca581541..d277b36eb389 100644
--- a/drivers/sfi/sfi_acpi.c
+++ b/drivers/sfi/sfi_acpi.c
@@ -60,9 +60,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
-#include <acpi/acpi.h>
-
-#include <linux/sfi.h>
+#include <linux/sfi_acpi.h>
#include "sfi_core.h"
/*
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index e2dd2fbec5ee..efe1960af2b3 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -118,6 +118,13 @@ config SPI_BCM63XX
help
Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
+config SPI_BCM63XX_HSSPI
+ tristate "Broadcom BCM63XX HS SPI controller driver"
+ depends on BCM63XX || COMPILE_TEST
+ help
+ This enables support for the High Speed SPI controller present on
+ newer Broadcom BCM63XX SoCs.
+
config SPI_BITBANG
tristate "Utilities for Bitbanging SPI masters"
help
@@ -143,7 +150,7 @@ config SPI_BUTTERFLY
config SPI_CLPS711X
tristate "CLPS711X host SPI controller"
- depends on ARCH_CLPS711X
+ depends on ARCH_CLPS711X || COMPILE_TEST
help
This enables dedicated general purpose SPI/Microwire1-compatible
master mode interface (SSI1) for CLPS711X-based CPUs.
@@ -159,7 +166,6 @@ config SPI_DAVINCI
tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
depends on ARCH_DAVINCI || ARCH_KEYSTONE
select SPI_BITBANG
- select TI_EDMA
help
SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
@@ -206,7 +212,6 @@ config SPI_IMX
tristate "Freescale i.MX SPI controllers"
depends on ARCH_MXC || COMPILE_TEST
select SPI_BITBANG
- default m if IMX_HAVE_PLATFORM_SPI_IMX
help
This enables using the Freescale i.MX SPI controllers in master
mode.
@@ -264,6 +269,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select SPI_BITBANG
+ select REGMAP_MMIO
depends on SOC_VF610 || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
@@ -301,6 +307,7 @@ config SPI_OMAP_UWIRE
config SPI_OMAP24XX
tristate "McSPI driver for OMAP"
+ depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
SPI master controller for OMAP24XX and later Multichannel SPI
@@ -369,10 +376,23 @@ config SPI_PXA2XX_PCI
def_tristate SPI_PXA2XX && PCI
config SPI_RSPI
- tristate "Renesas RSPI controller"
- depends on (SUPERH || ARCH_SHMOBILE) && SH_DMAE_BASE
+ tristate "Renesas RSPI/QSPI controller"
+ depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE
help
- SPI driver for Renesas RSPI blocks.
+ SPI driver for Renesas RSPI and QSPI blocks.
+
+config SPI_QUP
+ tristate "Qualcomm SPI controller with QUP interface"
+ depends on ARCH_MSM_DT || (ARM && COMPILE_TEST)
+ help
+ Qualcomm Universal Peripheral (QUP) core is an AHB slave that
+ provides a common data path (an output FIFO and an input FIFO)
+ for serial peripheral interface (SPI) mini-core. SPI in master
+ mode supports up to 50MHz, up to four chip selects, programmable
+ data path from 4 bits to 32 bits and numerous protocol variants.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi_qup.
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
@@ -407,8 +427,8 @@ config SPI_SC18IS602
config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller"
- depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
- select SPI_BITBANG
+ depends on HAVE_CLK
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
help
SPI driver for SuperH and SH Mobile MSIOF blocks.
@@ -438,6 +458,19 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
+config SPI_SUN4I
+ tristate "Allwinner A10 SoCs SPI controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ SPI driver for Allwinner sun4i, sun5i and sun7i SoCs
+
+config SPI_SUN6I
+ tristate "Allwinner A31 SPI controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ This enables using the SPI controller on the Allwinner A31 SoCs.
+
config SPI_MXS
tristate "Freescale MXS SPI controller"
depends on ARCH_MXS
@@ -448,6 +481,7 @@ config SPI_MXS
config SPI_TEGRA114
tristate "NVIDIA Tegra114 SPI Controller"
depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
+ depends on RESET_CONTROLLER
help
SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller
is different than the older SoCs SPI controller and also register interface
@@ -456,6 +490,7 @@ config SPI_TEGRA114
config SPI_TEGRA20_SFLASH
tristate "Nvidia Tegra20 Serial flash Controller"
depends on ARCH_TEGRA || COMPILE_TEST
+ depends on RESET_CONTROLLER
help
SPI driver for Nvidia Tegra20 Serial flash Controller interface.
The main usecase of this controller is to use spi flash as boot
@@ -464,16 +499,10 @@ config SPI_TEGRA20_SFLASH
config SPI_TEGRA20_SLINK
tristate "Nvidia Tegra20/Tegra30 SLINK Controller"
depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
+ depends on RESET_CONTROLLER
help
SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
-config SPI_TI_SSP
- tristate "TI Sequencer Serial Port - SPI Support"
- depends on MFD_TI_SSP
- help
- This selects an SPI master implementation using a TI sequencer
- serial port.
-
config SPI_TOPCLIFF_PCH
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
depends on PCI
@@ -509,6 +538,19 @@ config SPI_XILINX
Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
+config SPI_XTENSA_XTFPGA
+ tristate "Xtensa SPI controller for xtfpga"
+ depends on (XTENSA && XTENSA_PLATFORM_XTFPGA) || COMPILE_TEST
+ select SPI_BITBANG
+ help
+ SPI driver for xtfpga SPI master controller.
+
+ This simple SPI master controller is built into xtfpga bitstreams
+ and is used to control daughterboard audio codec. It always transfers
+ 16 bit words in SPI mode 0, automatically asserting CS on transfer
+ start and deasserting on end.
+
+
config SPI_NUC900
tristate "Nuvoton NUC900 series SPI"
depends on ARCH_W90X900
@@ -535,7 +577,7 @@ config SPI_DW_MID_DMA
config SPI_DW_MMIO
tristate "Memory-mapped io interface driver for DW SPI core"
- depends on SPI_DESIGNWARE && HAVE_CLK
+ depends on SPI_DESIGNWARE
#
# There are lots of SPI device types, with sensors and memory
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index ab8d8644af0e..bd792669e563 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
+obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
obj-$(CONFIG_SPI_BFIN_V3) += spi-bfin-v3.o
obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
@@ -58,6 +59,7 @@ spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o
spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
+obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
spi-s3c24xx-hw-y := spi-s3c24xx.o
@@ -69,12 +71,14 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
+obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
+obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
-obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
+obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 595b62cb545d..5b5709a5c957 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -13,7 +13,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -200,7 +199,6 @@ static irqreturn_t altera_spi_irq(int irq, void *dev)
static int altera_spi_probe(struct platform_device *pdev)
{
- struct altera_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct altera_spi *hw;
struct spi_master *master;
struct resource *res;
@@ -214,14 +212,14 @@ static int altera_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->num_chipselect = 16;
master->mode_bits = SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->dev.of_node = pdev->dev.of_node;
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
/* setup the state for the bitbang driver */
hw->bitbang.master = master;
- if (!hw->bitbang.master)
- return err;
hw->bitbang.chipselect = altera_spi_chipsel;
hw->bitbang.txrx_bufs = altera_spi_txrx;
@@ -247,9 +245,6 @@ static int altera_spi_probe(struct platform_device *pdev)
if (err)
goto exit;
}
- /* find platform data */
- if (!platp)
- hw->bitbang.master->dev.of_node = pdev->dev.of_node;
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 821bf7ac218d..3898b0b9ee77 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -132,9 +131,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
flags = GPIOF_DIR_OUT;
if (spi->mode & SPI_CS_HIGH)
- flags |= GPIOF_INIT_HIGH;
- else
flags |= GPIOF_INIT_LOW;
+ else
+ flags |= GPIOF_INIT_HIGH;
status = gpio_request_one(cdata->gpio, flags,
dev_name(&spi->dev));
@@ -243,21 +242,21 @@ static int ath79_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
- sp->base = ioremap(r->start, resource_size(r));
+ sp->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!sp->base) {
ret = -ENXIO;
goto err_put_master;
}
- sp->clk = clk_get(&pdev->dev, "ahb");
+ sp->clk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sp->clk)) {
ret = PTR_ERR(sp->clk);
- goto err_unmap;
+ goto err_put_master;
}
ret = clk_enable(sp->clk);
if (ret)
- goto err_clk_put;
+ goto err_put_master;
rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
if (!rate) {
@@ -280,10 +279,6 @@ err_disable:
ath79_spi_disable(sp);
err_clk_disable:
clk_disable(sp->clk);
-err_clk_put:
- clk_put(sp->clk);
-err_unmap:
- iounmap(sp->base);
err_put_master:
spi_master_put(sp->bitbang.master);
@@ -297,8 +292,6 @@ static int ath79_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&sp->bitbang);
ath79_spi_disable(sp);
clk_disable(sp->clk);
- clk_put(sp->clk);
- iounmap(sp->base);
spi_master_put(sp->bitbang.master);
return 0;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 273db0beb2b8..8005f9869481 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -9,7 +9,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -26,6 +25,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
/* SPI register offsets */
#define SPI_CR 0x0000
@@ -189,6 +189,8 @@
*/
#define DMA_MIN_BYTES 16
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
struct atmel_spi_dma {
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
@@ -220,17 +222,13 @@ struct atmel_spi {
int irq;
struct clk *clk;
struct platform_device *pdev;
- struct spi_device *stay;
- u8 stopping;
- struct list_head queue;
- struct tasklet_struct tasklet;
struct spi_transfer *current_transfer;
unsigned long current_remaining_bytes;
- struct spi_transfer *next_transfer;
- unsigned long next_remaining_bytes;
int done_status;
+ struct completion xfer_completion;
+
/* scratch buffer */
void *buffer;
dma_addr_t buffer_dma;
@@ -241,6 +239,9 @@ struct atmel_spi {
bool use_pdc;
/* dmaengine data */
struct atmel_spi_dma dma;
+
+ bool keep_cs;
+ bool cs_active;
};
/* Controller-specific per-slave state */
@@ -376,17 +377,6 @@ static inline bool atmel_spi_use_dma(struct atmel_spi *as,
return as->use_dma && xfer->len >= DMA_MIN_BYTES;
}
-static inline int atmel_spi_xfer_is_last(struct spi_message *msg,
- struct spi_transfer *xfer)
-{
- return msg->transfers.prev == &xfer->transfer_list;
-}
-
-static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer)
-{
- return xfer->delay_usecs == 0 && !xfer->cs_change;
-}
-
static int atmel_spi_dma_slave_config(struct atmel_spi *as,
struct dma_slave_config *slave_config,
u8 bits_per_word)
@@ -513,23 +503,20 @@ static void dma_callback(void *data)
struct spi_master *master = data;
struct atmel_spi *as = spi_master_get_devdata(master);
- /* trigger SPI tasklet */
- tasklet_schedule(&as->tasklet);
+ complete(&as->xfer_completion);
}
/*
* Next transfer using PIO.
- * lock is held, spi tasklet is blocked
*/
static void atmel_spi_next_xfer_pio(struct spi_master *master,
struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_master_get_devdata(master);
+ unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
- as->current_remaining_bytes = xfer->len;
-
/* Make sure data is not remaining in RDR */
spi_readl(as, RDR);
while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
@@ -537,13 +524,14 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
cpu_relax();
}
- if (xfer->tx_buf)
+ if (xfer->tx_buf) {
if (xfer->bits_per_word > 8)
- spi_writel(as, TDR, *(u16 *)(xfer->tx_buf));
+ spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
else
- spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
- else
+ spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
+ } else {
spi_writel(as, TDR, 0);
+ }
dev_dbg(master->dev.parent,
" start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
@@ -556,7 +544,6 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
/*
* Submit next transfer for DMA.
- * lock is held, spi tasklet is blocked
*/
static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
struct spi_transfer *xfer,
@@ -694,74 +681,90 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
*plen = len;
}
+static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ u32 scbr, csr;
+ unsigned long bus_hz;
+
+ /* v1 chips start out at half the peripheral bus speed. */
+ bus_hz = clk_get_rate(as->clk);
+ if (!atmel_spi_is_v2(as))
+ bus_hz /= 2;
+
+ /*
+ * Calculate the lowest divider that satisfies the
+ * constraint, assuming div32/fdiv/mbz == 0.
+ */
+ if (xfer->speed_hz)
+ scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
+ else
+ /*
+ * This can happend if max_speed is null.
+ * In this case, we set the lowest possible speed
+ */
+ scbr = 0xff;
+
+ /*
+ * If the resulting divider doesn't fit into the
+ * register bitfield, we can't satisfy the constraint.
+ */
+ if (scbr >= (1 << SPI_SCBR_SIZE)) {
+ dev_err(&spi->dev,
+ "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
+ xfer->speed_hz, scbr, bus_hz/255);
+ return -EINVAL;
+ }
+ if (scbr == 0) {
+ dev_err(&spi->dev,
+ "setup: %d Hz too high, scbr %u; max %ld Hz\n",
+ xfer->speed_hz, scbr, bus_hz);
+ return -EINVAL;
+ }
+ csr = spi_readl(as, CSR0 + 4 * spi->chip_select);
+ csr = SPI_BFINS(SCBR, scbr, csr);
+ spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
+
+ return 0;
+}
+
/*
* Submit next transfer for PDC.
* lock is held, spi irq is blocked
*/
static void atmel_spi_pdc_next_xfer(struct spi_master *master,
- struct spi_message *msg)
+ struct spi_message *msg,
+ struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_transfer *xfer;
- u32 len, remaining;
- u32 ieval;
+ u32 len;
dma_addr_t tx_dma, rx_dma;
- if (!as->current_transfer)
- xfer = list_entry(msg->transfers.next,
- struct spi_transfer, transfer_list);
- else if (!as->next_transfer)
- xfer = list_entry(as->current_transfer->transfer_list.next,
- struct spi_transfer, transfer_list);
- else
- xfer = NULL;
-
- if (xfer) {
- spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
-
- len = xfer->len;
- atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
- remaining = xfer->len - len;
-
- spi_writel(as, RPR, rx_dma);
- spi_writel(as, TPR, tx_dma);
-
- if (msg->spi->bits_per_word > 8)
- len >>= 1;
- spi_writel(as, RCR, len);
- spi_writel(as, TCR, len);
-
- dev_dbg(&msg->spi->dev,
- " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
- xfer, xfer->len, xfer->tx_buf,
- (unsigned long long)xfer->tx_dma, xfer->rx_buf,
- (unsigned long long)xfer->rx_dma);
- } else {
- xfer = as->next_transfer;
- remaining = as->next_remaining_bytes;
- }
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
- as->current_transfer = xfer;
- as->current_remaining_bytes = remaining;
+ len = as->current_remaining_bytes;
+ atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ as->current_remaining_bytes -= len;
- if (remaining > 0)
- len = remaining;
- else if (!atmel_spi_xfer_is_last(msg, xfer)
- && atmel_spi_xfer_can_be_chained(xfer)) {
- xfer = list_entry(xfer->transfer_list.next,
- struct spi_transfer, transfer_list);
- len = xfer->len;
- } else
- xfer = NULL;
+ spi_writel(as, RPR, rx_dma);
+ spi_writel(as, TPR, tx_dma);
- as->next_transfer = xfer;
+ if (msg->spi->bits_per_word > 8)
+ len >>= 1;
+ spi_writel(as, RCR, len);
+ spi_writel(as, TCR, len);
- if (xfer) {
- u32 total;
+ dev_dbg(&msg->spi->dev,
+ " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf,
+ (unsigned long long)xfer->tx_dma, xfer->rx_buf,
+ (unsigned long long)xfer->rx_dma);
- total = len;
+ if (as->current_remaining_bytes) {
+ len = as->current_remaining_bytes;
atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
- as->next_remaining_bytes = total - len;
+ as->current_remaining_bytes -= len;
spi_writel(as, RNPR, rx_dma);
spi_writel(as, TNPR, tx_dma);
@@ -776,11 +779,6 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
(unsigned long long)xfer->rx_dma);
- ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES);
- } else {
- spi_writel(as, RNCR, 0);
- spi_writel(as, TNCR, 0);
- ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES);
}
/* REVISIT: We're waiting for ENDRX before we start the next
@@ -793,83 +791,11 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
*
* It should be doable, though. Just not now...
*/
- spi_writel(as, IER, ieval);
+ spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
}
/*
- * Choose way to submit next transfer and start it.
- * lock is held, spi tasklet is blocked
- */
-static void atmel_spi_dma_next_xfer(struct spi_master *master,
- struct spi_message *msg)
-{
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_transfer *xfer;
- u32 remaining, len;
-
- remaining = as->current_remaining_bytes;
- if (remaining) {
- xfer = as->current_transfer;
- len = remaining;
- } else {
- if (!as->current_transfer)
- xfer = list_entry(msg->transfers.next,
- struct spi_transfer, transfer_list);
- else
- xfer = list_entry(
- as->current_transfer->transfer_list.next,
- struct spi_transfer, transfer_list);
-
- as->current_transfer = xfer;
- len = xfer->len;
- }
-
- if (atmel_spi_use_dma(as, xfer)) {
- u32 total = len;
- if (!atmel_spi_next_xfer_dma_submit(master, xfer, &len)) {
- as->current_remaining_bytes = total - len;
- return;
- } else {
- dev_err(&msg->spi->dev, "unable to use DMA, fallback to PIO\n");
- }
- }
-
- /* use PIO if error appened using DMA */
- atmel_spi_next_xfer_pio(master, xfer);
-}
-
-static void atmel_spi_next_message(struct spi_master *master)
-{
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_message *msg;
- struct spi_device *spi;
-
- BUG_ON(as->current_transfer);
-
- msg = list_entry(as->queue.next, struct spi_message, queue);
- spi = msg->spi;
-
- dev_dbg(master->dev.parent, "start message %p for %s\n",
- msg, dev_name(&spi->dev));
-
- /* select chip if it's not still active */
- if (as->stay) {
- if (as->stay != spi) {
- cs_deactivate(as, as->stay);
- cs_activate(as, spi);
- }
- as->stay = NULL;
- } else
- cs_activate(as, spi);
-
- if (as->use_pdc)
- atmel_spi_pdc_next_xfer(master, msg);
- else
- atmel_spi_dma_next_xfer(master, msg);
-}
-
-/*
* For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
* - The buffer is either valid for CPU access, else NULL
* - If the buffer is valid, so is its DMA address
@@ -924,41 +850,7 @@ static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
}
-static void
-atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
- struct spi_message *msg, int stay)
-{
- if (!stay || as->done_status < 0)
- cs_deactivate(as, msg->spi);
- else
- as->stay = msg->spi;
-
- list_del(&msg->queue);
- msg->status = as->done_status;
-
- dev_dbg(master->dev.parent,
- "xfer complete: %u bytes transferred\n",
- msg->actual_length);
-
- atmel_spi_unlock(as);
- msg->complete(msg->context);
- atmel_spi_lock(as);
-
- as->current_transfer = NULL;
- as->next_transfer = NULL;
- as->done_status = 0;
-
- /* continue if needed */
- if (list_empty(&as->queue) || as->stopping) {
- if (as->use_pdc)
- atmel_spi_disable_pdc_transfer(as);
- } else {
- atmel_spi_next_message(master);
- }
-}
-
/* Called from IRQ
- * lock is held
*
* Must update "current_remaining_bytes" to keep track of data
* to transfer.
@@ -966,9 +858,7 @@ atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
static void
atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
{
- u8 *txp;
u8 *rxp;
- u16 *txp16;
u16 *rxp16;
unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
@@ -990,96 +880,12 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
} else {
as->current_remaining_bytes--;
}
-
- if (as->current_remaining_bytes) {
- if (xfer->tx_buf) {
- if (xfer->bits_per_word > 8) {
- txp16 = (u16 *)(((u8 *)xfer->tx_buf)
- + xfer_pos + 2);
- spi_writel(as, TDR, *txp16);
- } else {
- txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
- spi_writel(as, TDR, *txp);
- }
- } else {
- spi_writel(as, TDR, 0);
- }
- }
-}
-
-/* Tasklet
- * Called from DMA callback + pio transfer and overrun IRQ.
- */
-static void atmel_spi_tasklet_func(unsigned long data)
-{
- struct spi_master *master = (struct spi_master *)data;
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_message *msg;
- struct spi_transfer *xfer;
-
- dev_vdbg(master->dev.parent, "atmel_spi_tasklet_func\n");
-
- atmel_spi_lock(as);
-
- xfer = as->current_transfer;
-
- if (xfer == NULL)
- /* already been there */
- goto tasklet_out;
-
- msg = list_entry(as->queue.next, struct spi_message, queue);
-
- if (as->current_remaining_bytes == 0) {
- if (as->done_status < 0) {
- /* error happened (overrun) */
- if (atmel_spi_use_dma(as, xfer))
- atmel_spi_stop_dma(as);
- } else {
- /* only update length if no error */
- msg->actual_length += xfer->len;
- }
-
- if (atmel_spi_use_dma(as, xfer))
- if (!msg->is_dma_mapped)
- atmel_spi_dma_unmap_xfer(master, xfer);
-
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
-
- if (atmel_spi_xfer_is_last(msg, xfer) || as->done_status < 0) {
- /* report completed (or erroneous) message */
- atmel_spi_msg_done(master, as, msg, xfer->cs_change);
- } else {
- if (xfer->cs_change) {
- cs_deactivate(as, msg->spi);
- udelay(1);
- cs_activate(as, msg->spi);
- }
-
- /*
- * Not done yet. Submit the next transfer.
- *
- * FIXME handle protocol options for xfer
- */
- atmel_spi_dma_next_xfer(master, msg);
- }
- } else {
- /*
- * Keep going, we still have data to send in
- * the current transfer.
- */
- atmel_spi_dma_next_xfer(master, msg);
- }
-
-tasklet_out:
- atmel_spi_unlock(as);
}
/* Interrupt
*
* No need for locking in this Interrupt handler: done_status is the
- * only information modified. What we need is the update of this field
- * before tasklet runs. This is ensured by using barrier.
+ * only information modified.
*/
static irqreturn_t
atmel_spi_pio_interrupt(int irq, void *dev_id)
@@ -1107,8 +913,6 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
*
* We will also not process any remaning transfers in
* the message.
- *
- * All actions are done in tasklet with done_status indication
*/
as->done_status = -EIO;
smp_wmb();
@@ -1116,7 +920,7 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
/* Clear any overrun happening while cleaning up */
spi_readl(as, SR);
- tasklet_schedule(&as->tasklet);
+ complete(&as->xfer_completion);
} else if (pending & SPI_BIT(RDRF)) {
atmel_spi_lock(as);
@@ -1125,11 +929,10 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
ret = IRQ_HANDLED;
xfer = as->current_transfer;
atmel_spi_pump_pio_data(as, xfer);
- if (!as->current_remaining_bytes) {
- /* no more data to xfer, kick tasklet */
+ if (!as->current_remaining_bytes)
spi_writel(as, IDR, pending);
- tasklet_schedule(&as->tasklet);
- }
+
+ complete(&as->xfer_completion);
}
atmel_spi_unlock(as);
@@ -1147,116 +950,35 @@ atmel_spi_pdc_interrupt(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_message *msg;
- struct spi_transfer *xfer;
u32 status, pending, imr;
int ret = IRQ_NONE;
- atmel_spi_lock(as);
-
- xfer = as->current_transfer;
- msg = list_entry(as->queue.next, struct spi_message, queue);
-
imr = spi_readl(as, IMR);
status = spi_readl(as, SR);
pending = status & imr;
if (pending & SPI_BIT(OVRES)) {
- int timeout;
ret = IRQ_HANDLED;
spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
| SPI_BIT(OVRES)));
- /*
- * When we get an overrun, we disregard the current
- * transfer. Data will not be copied back from any
- * bounce buffer and msg->actual_len will not be
- * updated with the last xfer.
- *
- * We will also not process any remaning transfers in
- * the message.
- *
- * First, stop the transfer and unmap the DMA buffers.
- */
- spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
- if (!msg->is_dma_mapped)
- atmel_spi_dma_unmap_xfer(master, xfer);
-
- /* REVISIT: udelay in irq is unfriendly */
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
-
- dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n",
- spi_readl(as, TCR), spi_readl(as, RCR));
-
- /*
- * Clean up DMA registers and make sure the data
- * registers are empty.
- */
- spi_writel(as, RNCR, 0);
- spi_writel(as, TNCR, 0);
- spi_writel(as, RCR, 0);
- spi_writel(as, TCR, 0);
- for (timeout = 1000; timeout; timeout--)
- if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
- break;
- if (!timeout)
- dev_warn(master->dev.parent,
- "timeout waiting for TXEMPTY");
- while (spi_readl(as, SR) & SPI_BIT(RDRF))
- spi_readl(as, RDR);
-
/* Clear any overrun happening while cleaning up */
spi_readl(as, SR);
as->done_status = -EIO;
- atmel_spi_msg_done(master, as, msg, 0);
+
+ complete(&as->xfer_completion);
+
} else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
ret = IRQ_HANDLED;
spi_writel(as, IDR, pending);
- if (as->current_remaining_bytes == 0) {
- msg->actual_length += xfer->len;
-
- if (!msg->is_dma_mapped)
- atmel_spi_dma_unmap_xfer(master, xfer);
-
- /* REVISIT: udelay in irq is unfriendly */
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
-
- if (atmel_spi_xfer_is_last(msg, xfer)) {
- /* report completed message */
- atmel_spi_msg_done(master, as, msg,
- xfer->cs_change);
- } else {
- if (xfer->cs_change) {
- cs_deactivate(as, msg->spi);
- udelay(1);
- cs_activate(as, msg->spi);
- }
-
- /*
- * Not done yet. Submit the next transfer.
- *
- * FIXME handle protocol options for xfer
- */
- atmel_spi_pdc_next_xfer(master, msg);
- }
- } else {
- /*
- * Keep going, we still have data to send in
- * the current transfer.
- */
- atmel_spi_pdc_next_xfer(master, msg);
- }
+ complete(&as->xfer_completion);
}
- atmel_spi_unlock(as);
-
return ret;
}
@@ -1264,24 +986,13 @@ static int atmel_spi_setup(struct spi_device *spi)
{
struct atmel_spi *as;
struct atmel_spi_device *asd;
- u32 scbr, csr;
+ u32 csr;
unsigned int bits = spi->bits_per_word;
- unsigned long bus_hz;
unsigned int npcs_pin;
int ret;
as = spi_master_get_devdata(spi->master);
- if (as->stopping)
- return -ESHUTDOWN;
-
- if (spi->chip_select > spi->master->num_chipselect) {
- dev_dbg(&spi->dev,
- "setup: invalid chipselect %u (%u defined)\n",
- spi->chip_select, spi->master->num_chipselect);
- return -EINVAL;
- }
-
/* see notes above re chipselect */
if (!atmel_spi_is_v2(as)
&& spi->chip_select == 0
@@ -1290,33 +1001,7 @@ static int atmel_spi_setup(struct spi_device *spi)
return -EINVAL;
}
- /* v1 chips start out at half the peripheral bus speed. */
- bus_hz = clk_get_rate(as->clk);
- if (!atmel_spi_is_v2(as))
- bus_hz /= 2;
-
- if (spi->max_speed_hz) {
- /*
- * Calculate the lowest divider that satisfies the
- * constraint, assuming div32/fdiv/mbz == 0.
- */
- scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz);
-
- /*
- * If the resulting divider doesn't fit into the
- * register bitfield, we can't satisfy the constraint.
- */
- if (scbr >= (1 << SPI_SCBR_SIZE)) {
- dev_dbg(&spi->dev,
- "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
- spi->max_speed_hz, scbr, bus_hz/255);
- return -EINVAL;
- }
- } else
- /* speed zero means "as slow as possible" */
- scbr = 0xff;
-
- csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8);
+ csr = SPI_BF(BITS, bits - 8);
if (spi->mode & SPI_CPOL)
csr |= SPI_BIT(CPOL);
if (!(spi->mode & SPI_CPHA))
@@ -1352,19 +1037,13 @@ static int atmel_spi_setup(struct spi_device *spi)
asd->npcs_pin = npcs_pin;
spi->controller_state = asd;
gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
- } else {
- atmel_spi_lock(as);
- if (as->stay == spi)
- as->stay = NULL;
- cs_deactivate(as, spi);
- atmel_spi_unlock(as);
}
asd->csr = csr;
dev_dbg(&spi->dev,
- "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n",
- bus_hz / scbr, bits, spi->mode, spi->chip_select, csr);
+ "setup: bpw %u mode 0x%x -> csr%d %08x\n",
+ bits, spi->mode, spi->chip_select, csr);
if (!atmel_spi_is_v2(as))
spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
@@ -1372,103 +1051,207 @@ static int atmel_spi_setup(struct spi_device *spi)
return 0;
}
-static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+static int atmel_spi_one_transfer(struct spi_master *master,
+ struct spi_message *msg,
+ struct spi_transfer *xfer)
{
struct atmel_spi *as;
- struct spi_transfer *xfer;
- struct device *controller = spi->master->dev.parent;
+ struct spi_device *spi = msg->spi;
u8 bits;
+ u32 len;
struct atmel_spi_device *asd;
+ int timeout;
+ int ret;
- as = spi_master_get_devdata(spi->master);
-
- dev_dbg(controller, "new message %p submitted for %s\n",
- msg, dev_name(&spi->dev));
+ as = spi_master_get_devdata(master);
- if (unlikely(list_empty(&msg->transfers)))
+ if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
+ dev_dbg(&spi->dev, "missing rx or tx buf\n");
return -EINVAL;
+ }
- if (as->stopping)
- return -ESHUTDOWN;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
- dev_dbg(&spi->dev, "missing rx or tx buf\n");
- return -EINVAL;
+ if (xfer->bits_per_word) {
+ asd = spi->controller_state;
+ bits = (asd->csr >> 4) & 0xf;
+ if (bits != xfer->bits_per_word - 8) {
+ dev_dbg(&spi->dev,
+ "you can't yet change bits_per_word in transfers\n");
+ return -ENOPROTOOPT;
}
+ }
+
+ /*
+ * DMA map early, for performance (empties dcache ASAP) and
+ * better fault reporting.
+ */
+ if ((!msg->is_dma_mapped)
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) {
+ if (atmel_spi_dma_map_xfer(as, xfer) < 0)
+ return -ENOMEM;
+ }
+
+ atmel_spi_set_xfer_speed(as, msg->spi, xfer);
- if (xfer->bits_per_word) {
- asd = spi->controller_state;
- bits = (asd->csr >> 4) & 0xf;
- if (bits != xfer->bits_per_word - 8) {
- dev_dbg(&spi->dev,
- "you can't yet change bits_per_word in transfers\n");
- return -ENOPROTOOPT;
+ as->done_status = 0;
+ as->current_transfer = xfer;
+ as->current_remaining_bytes = xfer->len;
+ while (as->current_remaining_bytes) {
+ reinit_completion(&as->xfer_completion);
+
+ if (as->use_pdc) {
+ atmel_spi_pdc_next_xfer(master, msg, xfer);
+ } else if (atmel_spi_use_dma(as, xfer)) {
+ len = as->current_remaining_bytes;
+ ret = atmel_spi_next_xfer_dma_submit(master,
+ xfer, &len);
+ if (ret) {
+ dev_err(&spi->dev,
+ "unable to use DMA, fallback to PIO\n");
+ atmel_spi_next_xfer_pio(master, xfer);
+ } else {
+ as->current_remaining_bytes -= len;
}
+ } else {
+ atmel_spi_next_xfer_pio(master, xfer);
}
- if (xfer->bits_per_word > 8) {
- if (xfer->len % 2) {
- dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n");
- return -EINVAL;
- }
+ ret = wait_for_completion_timeout(&as->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(&spi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ as->done_status = -EIO;
+ } else {
+ ret = 0;
}
- /* FIXME implement these protocol options!! */
- if (xfer->speed_hz < spi->max_speed_hz) {
- dev_dbg(&spi->dev, "can't change speed in transfer\n");
- return -ENOPROTOOPT;
+ if (as->done_status)
+ break;
+ }
+
+ if (as->done_status) {
+ if (as->use_pdc) {
+ dev_warn(master->dev.parent,
+ "overrun (%u/%u remaining)\n",
+ spi_readl(as, TCR), spi_readl(as, RCR));
+
+ /*
+ * Clean up DMA registers and make sure the data
+ * registers are empty.
+ */
+ spi_writel(as, RNCR, 0);
+ spi_writel(as, TNCR, 0);
+ spi_writel(as, RCR, 0);
+ spi_writel(as, TCR, 0);
+ for (timeout = 1000; timeout; timeout--)
+ if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
+ break;
+ if (!timeout)
+ dev_warn(master->dev.parent,
+ "timeout waiting for TXEMPTY");
+ while (spi_readl(as, SR) & SPI_BIT(RDRF))
+ spi_readl(as, RDR);
+
+ /* Clear any overrun happening while cleaning up */
+ spi_readl(as, SR);
+
+ } else if (atmel_spi_use_dma(as, xfer)) {
+ atmel_spi_stop_dma(as);
}
- /*
- * DMA map early, for performance (empties dcache ASAP) and
- * better fault reporting.
- */
- if ((!msg->is_dma_mapped) && (atmel_spi_use_dma(as, xfer)
- || as->use_pdc)) {
- if (atmel_spi_dma_map_xfer(as, xfer) < 0)
- return -ENOMEM;
+ if (!msg->is_dma_mapped
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ atmel_spi_dma_unmap_xfer(master, xfer);
+
+ return 0;
+
+ } else {
+ /* only update length if no error */
+ msg->actual_length += xfer->len;
+ }
+
+ if (!msg->is_dma_mapped
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ atmel_spi_dma_unmap_xfer(master, xfer);
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change) {
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ as->keep_cs = true;
+ } else {
+ as->cs_active = !as->cs_active;
+ if (as->cs_active)
+ cs_activate(as, msg->spi);
+ else
+ cs_deactivate(as, msg->spi);
}
}
-#ifdef VERBOSE
+ return 0;
+}
+
+static int atmel_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct atmel_spi *as;
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret = 0;
+
+ as = spi_master_get_devdata(master);
+
+ dev_dbg(&spi->dev, "new message %p submitted for %s\n",
+ msg, dev_name(&spi->dev));
+
+ atmel_spi_lock(as);
+ cs_activate(as, spi);
+
+ as->cs_active = true;
+ as->keep_cs = false;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ ret = atmel_spi_one_transfer(master, msg, xfer);
+ if (ret)
+ goto msg_done;
+ }
+
+ if (as->use_pdc)
+ atmel_spi_disable_pdc_transfer(as);
+
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- dev_dbg(controller,
- " xfer %p: len %u tx %p/%08x rx %p/%08x\n",
+ dev_dbg(&spi->dev,
+ " xfer %p: len %u tx %p/%pad rx %p/%pad\n",
xfer, xfer->len,
- xfer->tx_buf, xfer->tx_dma,
- xfer->rx_buf, xfer->rx_dma);
+ xfer->tx_buf, &xfer->tx_dma,
+ xfer->rx_buf, &xfer->rx_dma);
}
-#endif
- msg->status = -EINPROGRESS;
- msg->actual_length = 0;
+msg_done:
+ if (!as->keep_cs)
+ cs_deactivate(as, msg->spi);
- atmel_spi_lock(as);
- list_add_tail(&msg->queue, &as->queue);
- if (!as->current_transfer)
- atmel_spi_next_message(spi->master);
atmel_spi_unlock(as);
- return 0;
+ msg->status = as->done_status;
+ spi_finalize_current_message(spi->master);
+
+ return ret;
}
static void atmel_spi_cleanup(struct spi_device *spi)
{
- struct atmel_spi *as = spi_master_get_devdata(spi->master);
struct atmel_spi_device *asd = spi->controller_state;
unsigned gpio = (unsigned) spi->controller_data;
if (!asd)
return;
- atmel_spi_lock(as);
- if (as->stay == spi) {
- as->stay = NULL;
- cs_deactivate(as, spi);
- }
- atmel_spi_unlock(as);
-
spi->controller_state = NULL;
gpio_free(gpio);
kfree(asd);
@@ -1502,6 +1285,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct atmel_spi *as;
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(&pdev->dev);
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENXIO;
@@ -1510,7 +1296,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- clk = clk_get(&pdev->dev, "spi_clk");
+ clk = devm_clk_get(&pdev->dev, "spi_clk");
if (IS_ERR(clk))
return PTR_ERR(clk);
@@ -1527,7 +1313,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->num_chipselect = master->dev.of_node ? 0 : 4;
master->setup = atmel_spi_setup;
- master->transfer = atmel_spi_transfer;
+ master->transfer_one_message = atmel_spi_transfer_one_message;
master->cleanup = atmel_spi_cleanup;
platform_set_drvdata(pdev, master);
@@ -1543,7 +1329,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
goto out_free;
spin_lock_init(&as->lock);
- INIT_LIST_HEAD(&as->queue);
as->pdev = pdev;
as->regs = devm_ioremap_resource(&pdev->dev, regs);
@@ -1555,6 +1340,8 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->irq = irq;
as->clk = clk;
+ init_completion(&as->xfer_completion);
+
atmel_get_caps(as);
as->use_dma = false;
@@ -1570,14 +1357,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
if (as->use_pdc) {
- ret = request_irq(irq, atmel_spi_pdc_interrupt, 0,
- dev_name(&pdev->dev), master);
+ ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
+ 0, dev_name(&pdev->dev), master);
} else {
- tasklet_init(&as->tasklet, atmel_spi_tasklet_func,
- (unsigned long)master);
-
- ret = request_irq(irq, atmel_spi_pio_interrupt, 0,
- dev_name(&pdev->dev), master);
+ ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
+ 0, dev_name(&pdev->dev), master);
}
if (ret)
goto out_unmap_regs;
@@ -1603,7 +1387,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
(unsigned long)regs->start, irq);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
goto out_free_dma;
@@ -1617,15 +1401,11 @@ out_free_dma:
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
clk_disable_unprepare(clk);
out_free_irq:
- free_irq(irq, master);
out_unmap_regs:
out_free_buffer:
- if (!as->use_pdc)
- tasklet_kill(&as->tasklet);
dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
as->buffer_dma);
out_free:
- clk_put(clk);
spi_master_put(master);
return ret;
}
@@ -1634,12 +1414,9 @@ static int atmel_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
- struct spi_message *msg;
- struct spi_transfer *xfer;
/* reset the hardware and block queue progress */
spin_lock_irq(&as->lock);
- as->stopping = 1;
if (as->use_dma) {
atmel_spi_stop_dma(as);
atmel_spi_release_dma(as);
@@ -1650,28 +1427,10 @@ static int atmel_spi_remove(struct platform_device *pdev)
spi_readl(as, SR);
spin_unlock_irq(&as->lock);
- /* Terminate remaining queued transfers */
- list_for_each_entry(msg, &as->queue, queue) {
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (!msg->is_dma_mapped
- && (atmel_spi_use_dma(as, xfer)
- || as->use_pdc))
- atmel_spi_dma_unmap_xfer(master, xfer);
- }
- msg->status = -ESHUTDOWN;
- msg->complete(msg->context);
- }
-
- if (!as->use_pdc)
- tasklet_kill(&as->tasklet);
dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
as->buffer_dma);
clk_disable_unprepare(as->clk);
- clk_put(as->clk);
- free_irq(as->irq, master);
-
- spi_unregister_master(master);
return 0;
}
@@ -1681,8 +1440,19 @@ static int atmel_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
+ int ret;
+
+ /* Stop the queue running */
+ ret = spi_master_suspend(master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
+ }
clk_disable_unprepare(as->clk);
+
+ pinctrl_pm_select_sleep_state(dev);
+
return 0;
}
@@ -1690,9 +1460,18 @@ static int atmel_spi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
+ int ret;
+
+ pinctrl_pm_select_default_state(dev);
clk_prepare_enable(as->clk);
- return 0;
+
+ /* Start the queue running */
+ ret = spi_master_resume(master);
+ if (ret)
+ dev_err(dev, "problem starting queue (%d)\n", ret);
+
+ return ret;
}
static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index c4141c92bcff..67375a11d4bd 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -55,8 +55,6 @@ struct au1550_spi {
volatile psc_spi_t __iomem *regs;
int irq;
- unsigned freq_max;
- unsigned freq_min;
unsigned len;
unsigned tx_count;
@@ -248,11 +246,8 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
hz = t->speed_hz;
}
- if (hz > spi->max_speed_hz || hz > hw->freq_max || hz < hw->freq_min) {
- dev_err(&spi->dev, "setupxfer: clock rate=%d out of range\n",
- hz);
+ if (!hz)
return -EINVAL;
- }
au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
@@ -287,23 +282,6 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
return 0;
}
-static int au1550_spi_setup(struct spi_device *spi)
-{
- struct au1550_spi *hw = spi_master_get_devdata(spi->master);
-
- if (spi->max_speed_hz == 0)
- spi->max_speed_hz = hw->freq_max;
- if (spi->max_speed_hz > hw->freq_max
- || spi->max_speed_hz < hw->freq_min)
- return -EINVAL;
- /*
- * NOTE: cannot change speed and other hw settings immediately,
- * otherwise sharing of spi bus is not possible,
- * so do not call setupxfer(spi, NULL) here
- */
- return 0;
-}
-
/*
* for dma spi transfers, we have to setup rx channel, otherwise there is
* no reliable way how to recognize that spi transfer is done
@@ -838,7 +816,6 @@ static int au1550_spi_probe(struct platform_device *pdev)
hw->bitbang.master = hw->master;
hw->bitbang.setup_transfer = au1550_spi_setupxfer;
hw->bitbang.chipselect = au1550_spi_chipsel;
- hw->bitbang.master->setup = au1550_spi_setup;
hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
if (hw->usedma) {
@@ -909,8 +886,9 @@ static int au1550_spi_probe(struct platform_device *pdev)
{
int min_div = (2 << 0) * (2 * (4 + 1));
int max_div = (2 << 3) * (2 * (63 + 1));
- hw->freq_max = hw->pdata->mainclk_hz / min_div;
- hw->freq_min = hw->pdata->mainclk_hz / (max_div + 1) + 1;
+ master->max_speed_hz = hw->pdata->mainclk_hz / min_div;
+ master->min_speed_hz =
+ hw->pdata->mainclk_hz / (max_div + 1) + 1;
}
au1550_spi_setup_psc_as_spi(hw);
@@ -999,6 +977,15 @@ static int __init au1550_spi_init(void)
* create memory device with 8 bits dev_devwidth
* needed for proper byte ordering to spi fifo
*/
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ case ALCHEMY_CPU_AU1300:
+ break;
+ default:
+ return -ENODEV;
+ }
+
if (usedma) {
ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev);
if (!ddma_memid)
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 9025edd7dc45..69167456ec1e 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -315,7 +315,6 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
master->mode_bits = BCM2835_SPI_MODE_BITS;
master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->bus_num = -1;
master->num_chipselect = 3;
master->transfer_one_message = bcm2835_spi_transfer_one;
master->dev.of_node = pdev->dev.of_node;
@@ -347,8 +346,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
clk_prepare_enable(bs->clk);
- err = request_irq(bs->irq, bcm2835_spi_interrupt, 0,
- dev_name(&pdev->dev), master);
+ err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
+ dev_name(&pdev->dev), master);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_clk_disable;
@@ -361,13 +360,11 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
- goto out_free_irq;
+ goto out_clk_disable;
}
return 0;
-out_free_irq:
- free_irq(bs->irq, master);
out_clk_disable:
clk_disable_unprepare(bs->clk);
out_master_put:
@@ -380,8 +377,6 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct bcm2835_spi *bs = spi_master_get_devdata(master);
- free_irq(bs->irq, master);
-
/* Clear FIFOs, and disable the HW block */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
new file mode 100644
index 000000000000..5a211e98383b
--- /dev/null
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -0,0 +1,475 @@
+/*
+ * Broadcom BCM63XX High Speed SPI Controller driver
+ *
+ * Copyright 2000-2010 Broadcom Corporation
+ * Copyright 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+#define HSSPI_GLOBAL_CTRL_REG 0x0
+#define GLOBAL_CTRL_CS_POLARITY_SHIFT 0
+#define GLOBAL_CTRL_CS_POLARITY_MASK 0x000000ff
+#define GLOBAL_CTRL_PLL_CLK_CTRL_SHIFT 8
+#define GLOBAL_CTRL_PLL_CLK_CTRL_MASK 0x0000ff00
+#define GLOBAL_CTRL_CLK_GATE_SSOFF BIT(16)
+#define GLOBAL_CTRL_CLK_POLARITY BIT(17)
+#define GLOBAL_CTRL_MOSI_IDLE BIT(18)
+
+#define HSSPI_GLOBAL_EXT_TRIGGER_REG 0x4
+
+#define HSSPI_INT_STATUS_REG 0x8
+#define HSSPI_INT_STATUS_MASKED_REG 0xc
+#define HSSPI_INT_MASK_REG 0x10
+
+#define HSSPI_PINGx_CMD_DONE(i) BIT((i * 8) + 0)
+#define HSSPI_PINGx_RX_OVER(i) BIT((i * 8) + 1)
+#define HSSPI_PINGx_TX_UNDER(i) BIT((i * 8) + 2)
+#define HSSPI_PINGx_POLL_TIMEOUT(i) BIT((i * 8) + 3)
+#define HSSPI_PINGx_CTRL_INVAL(i) BIT((i * 8) + 4)
+
+#define HSSPI_INT_CLEAR_ALL 0xff001f1f
+
+#define HSSPI_PINGPONG_COMMAND_REG(x) (0x80 + (x) * 0x40)
+#define PINGPONG_CMD_COMMAND_MASK 0xf
+#define PINGPONG_COMMAND_NOOP 0
+#define PINGPONG_COMMAND_START_NOW 1
+#define PINGPONG_COMMAND_START_TRIGGER 2
+#define PINGPONG_COMMAND_HALT 3
+#define PINGPONG_COMMAND_FLUSH 4
+#define PINGPONG_CMD_PROFILE_SHIFT 8
+#define PINGPONG_CMD_SS_SHIFT 12
+
+#define HSSPI_PINGPONG_STATUS_REG(x) (0x84 + (x) * 0x40)
+
+#define HSSPI_PROFILE_CLK_CTRL_REG(x) (0x100 + (x) * 0x20)
+#define CLK_CTRL_FREQ_CTRL_MASK 0x0000ffff
+#define CLK_CTRL_SPI_CLK_2X_SEL BIT(14)
+#define CLK_CTRL_ACCUM_RST_ON_LOOP BIT(15)
+
+#define HSSPI_PROFILE_SIGNAL_CTRL_REG(x) (0x104 + (x) * 0x20)
+#define SIGNAL_CTRL_LATCH_RISING BIT(12)
+#define SIGNAL_CTRL_LAUNCH_RISING BIT(13)
+#define SIGNAL_CTRL_ASYNC_INPUT_PATH BIT(16)
+
+#define HSSPI_PROFILE_MODE_CTRL_REG(x) (0x108 + (x) * 0x20)
+#define MODE_CTRL_MULTIDATA_RD_STRT_SHIFT 8
+#define MODE_CTRL_MULTIDATA_WR_STRT_SHIFT 12
+#define MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT 16
+#define MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT 18
+#define MODE_CTRL_MODE_3WIRE BIT(20)
+#define MODE_CTRL_PREPENDBYTE_CNT_SHIFT 24
+
+#define HSSPI_FIFO_REG(x) (0x200 + (x) * 0x200)
+
+
+#define HSSPI_OP_CODE_SHIFT 13
+#define HSSPI_OP_SLEEP (0 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ_WRITE (1 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_WRITE (2 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ (3 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_SETIRQ (4 << HSSPI_OP_CODE_SHIFT)
+
+#define HSSPI_BUFFER_LEN 512
+#define HSSPI_OPCODE_LEN 2
+
+#define HSSPI_MAX_PREPEND_LEN 15
+
+#define HSSPI_MAX_SYNC_CLOCK 30000000
+
+#define HSSPI_BUS_NUM 1 /* 0 is legacy SPI */
+
+struct bcm63xx_hsspi {
+ struct completion done;
+ struct mutex bus_mutex;
+
+ struct platform_device *pdev;
+ struct clk *clk;
+ void __iomem *regs;
+ u8 __iomem *fifo;
+
+ u32 speed_hz;
+ u8 cs_polarity;
+};
+
+static void bcm63xx_hsspi_set_cs(struct bcm63xx_hsspi *bs, unsigned cs,
+ bool active)
+{
+ u32 reg;
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ reg &= ~BIT(cs);
+ if (active == !(bs->cs_polarity & BIT(cs)))
+ reg |= BIT(cs);
+
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static void bcm63xx_hsspi_set_clk(struct bcm63xx_hsspi *bs,
+ struct spi_device *spi, int hz)
+{
+ unsigned profile = spi->chip_select;
+ u32 reg;
+
+ reg = DIV_ROUND_UP(2048, DIV_ROUND_UP(bs->speed_hz, hz));
+ __raw_writel(CLK_CTRL_ACCUM_RST_ON_LOOP | reg,
+ bs->regs + HSSPI_PROFILE_CLK_CTRL_REG(profile));
+
+ reg = __raw_readl(bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+ if (hz > HSSPI_MAX_SYNC_CLOCK)
+ reg |= SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ else
+ reg &= ~SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ __raw_writel(reg, bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+
+ mutex_lock(&bs->bus_mutex);
+ /* setup clock polarity */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CLK_POLARITY;
+ if (spi->mode & SPI_CPOL)
+ reg |= GLOBAL_CTRL_CLK_POLARITY;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ unsigned chip_select = spi->chip_select;
+ u16 opcode = 0;
+ int pending = t->len;
+ int step_size = HSSPI_BUFFER_LEN;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+
+ bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+
+ if (tx && rx)
+ opcode = HSSPI_OP_READ_WRITE;
+ else if (tx)
+ opcode = HSSPI_OP_WRITE;
+ else if (rx)
+ opcode = HSSPI_OP_READ;
+
+ if (opcode != HSSPI_OP_READ)
+ step_size -= HSSPI_OPCODE_LEN;
+
+ __raw_writel(0 << MODE_CTRL_PREPENDBYTE_CNT_SHIFT |
+ 2 << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT |
+ 2 << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT | 0xff,
+ bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+
+ while (pending > 0) {
+ int curr_step = min_t(int, step_size, pending);
+
+ reinit_completion(&bs->done);
+ if (tx) {
+ memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
+ tx += curr_step;
+ }
+
+ __raw_writew(opcode | curr_step, bs->fifo);
+
+ /* enable interrupt */
+ __raw_writel(HSSPI_PINGx_CMD_DONE(0),
+ bs->regs + HSSPI_INT_MASK_REG);
+
+ /* start the transfer */
+ __raw_writel(!chip_select << PINGPONG_CMD_SS_SHIFT |
+ chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+ PINGPONG_COMMAND_START_NOW,
+ bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+
+ if (wait_for_completion_timeout(&bs->done, HZ) == 0) {
+ dev_err(&bs->pdev->dev, "transfer timed out!\n");
+ return -ETIMEDOUT;
+ }
+
+ if (rx) {
+ memcpy_fromio(rx, bs->fifo, curr_step);
+ rx += curr_step;
+ }
+
+ pending -= curr_step;
+ }
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_setup(struct spi_device *spi)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = __raw_readl(bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+ reg &= ~(SIGNAL_CTRL_LAUNCH_RISING | SIGNAL_CTRL_LATCH_RISING);
+ if (spi->mode & SPI_CPHA)
+ reg |= SIGNAL_CTRL_LAUNCH_RISING;
+ else
+ reg |= SIGNAL_CTRL_LATCH_RISING;
+ __raw_writel(reg, bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ /* only change actual polarities if there is no transfer */
+ if ((reg & GLOBAL_CTRL_CS_POLARITY_MASK) == bs->cs_polarity) {
+ if (spi->mode & SPI_CS_HIGH)
+ reg |= BIT(spi->chip_select);
+ else
+ reg &= ~BIT(spi->chip_select);
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ }
+
+ if (spi->mode & SPI_CS_HIGH)
+ bs->cs_polarity |= BIT(spi->chip_select);
+ else
+ bs->cs_polarity &= ~BIT(spi->chip_select);
+
+ mutex_unlock(&bs->bus_mutex);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ struct spi_device *spi = msg->spi;
+ int status = -EINVAL;
+ int dummy_cs;
+ u32 reg;
+
+ /* This controller does not support keeping CS active during idle.
+ * To work around this, we use the following ugly hack:
+ *
+ * a. Invert the target chip select's polarity so it will be active.
+ * b. Select a "dummy" chip select to use as the hardware target.
+ * c. Invert the dummy chip select's polarity so it will be inactive
+ * during the actual transfers.
+ * d. Tell the hardware to send to the dummy chip select. Thanks to
+ * the multiplexed nature of SPI the actual target will receive
+ * the transfer and we see its response.
+ *
+ * e. At the end restore the polarities again to their default values.
+ */
+
+ dummy_cs = !spi->chip_select;
+ bcm63xx_hsspi_set_cs(bs, dummy_cs, true);
+
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ status = bcm63xx_hsspi_do_txrx(spi, t);
+ if (status)
+ break;
+
+ msg->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (t->cs_change)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+ }
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CS_POLARITY_MASK;
+ reg |= bs->cs_polarity;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static irqreturn_t bcm63xx_hsspi_interrupt(int irq, void *dev_id)
+{
+ struct bcm63xx_hsspi *bs = (struct bcm63xx_hsspi *)dev_id;
+
+ if (__raw_readl(bs->regs + HSSPI_INT_STATUS_MASKED_REG) == 0)
+ return IRQ_NONE;
+
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ complete(&bs->done);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm63xx_hsspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcm63xx_hsspi *bs;
+ struct resource *res_mem;
+ void __iomem *regs;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int irq, ret;
+ u32 reg, rate;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq\n");
+ return -ENXIO;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res_mem);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ clk = devm_clk_get(dev, "hsspi");
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rate = clk_get_rate(clk);
+ if (!rate)
+ return -EINVAL;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ ret = -ENOMEM;
+ goto out_disable_clk;
+ }
+
+ bs = spi_master_get_devdata(master);
+ bs->pdev = pdev;
+ bs->clk = clk;
+ bs->regs = regs;
+ bs->speed_hz = rate;
+ bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
+
+ mutex_init(&bs->bus_mutex);
+ init_completion(&bs->done);
+
+ master->bus_num = HSSPI_BUS_NUM;
+ master->num_chipselect = 8;
+ master->setup = bcm63xx_hsspi_setup;
+ master->transfer_one_message = bcm63xx_hsspi_transfer_one;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ /* Initialize the hardware */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ /* clean up any pending interrupts */
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+
+ /* read out default CS polarities */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ bs->cs_polarity = reg & GLOBAL_CTRL_CS_POLARITY_MASK;
+ __raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
+ bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ ret = devm_request_irq(dev, irq, bcm63xx_hsspi_interrupt, IRQF_SHARED,
+ pdev->name, bs);
+
+ if (ret)
+ goto out_put_master;
+
+ /* register and we are done */
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto out_put_master;
+
+ return 0;
+
+out_put_master:
+ spi_master_put(master);
+out_disable_clk:
+ clk_disable_unprepare(clk);
+ return ret;
+}
+
+
+static int bcm63xx_hsspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+ /* reset the hardware and block queue progress */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcm63xx_hsspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
+
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bcm63xx_hsspi_pm_ops, bcm63xx_hsspi_suspend,
+ bcm63xx_hsspi_resume);
+
+static struct platform_driver bcm63xx_hsspi_driver = {
+ .driver = {
+ .name = "bcm63xx-hsspi",
+ .owner = THIS_MODULE,
+ .pm = &bcm63xx_hsspi_pm_ops,
+ },
+ .probe = bcm63xx_hsspi_probe,
+ .remove = bcm63xx_hsspi_remove,
+};
+
+module_platform_driver(bcm63xx_hsspi_driver);
+
+MODULE_ALIAS("platform:bcm63xx_hsspi");
+MODULE_DESCRIPTION("Broadcom BCM63xx High Speed SPI Controller driver");
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 469ecd876358..0250fa721cea 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -20,7 +20,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -35,8 +34,6 @@
#include <bcm63xx_dev_spi.h>
-#define PFX KBUILD_MODNAME
-
#define BCM63XX_SPI_MAX_PREPEND 15
struct bcm63xx_spi {
@@ -169,9 +166,7 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
transfer_list);
}
- len -= prepend_len;
-
- init_completion(&bs->done);
+ reinit_completion(&bs->done);
/* Fill in the Message control register */
msg_ctl = (len << SPI_BYTE_CNT_SHIFT);
@@ -205,13 +200,7 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
if (!timeout)
return -ETIMEDOUT;
- /* read out all data */
- rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
-
- if (do_rx && rx_tail != len)
- return -EIO;
-
- if (!rx_tail)
+ if (!do_rx)
return 0;
len = 0;
@@ -345,25 +334,23 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "no irq\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
- clk = clk_get(dev, "spi");
+ clk = devm_clk_get(dev, "spi");
if (IS_ERR(clk)) {
dev_err(dev, "no clock for device\n");
- ret = PTR_ERR(clk);
- goto out;
+ return PTR_ERR(clk);
}
master = spi_alloc_master(dev, sizeof(*bs));
if (!master) {
dev_err(dev, "out of memory\n");
- ret = -ENOMEM;
- goto out_clk;
+ return -ENOMEM;
}
bs = spi_master_get_devdata(master);
+ init_completion(&bs->done);
platform_set_drvdata(pdev, master);
bs->pdev = pdev;
@@ -408,7 +395,10 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
}
/* Initialize hardware */
- clk_prepare_enable(bs->clk);
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ goto out_err;
+
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
/* register and we are done */
@@ -427,9 +417,6 @@ out_clk_disable:
clk_disable_unprepare(clk);
out_err:
spi_master_put(master);
-out_clk:
- clk_put(clk);
-out:
return ret;
}
@@ -443,12 +430,11 @@ static int bcm63xx_spi_remove(struct platform_device *pdev)
/* HW shutdown */
clk_disable_unprepare(bs->clk);
- clk_put(bs->clk);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int bcm63xx_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -465,29 +451,27 @@ static int bcm63xx_spi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ int ret;
- clk_prepare_enable(bs->clk);
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
spi_master_resume(master);
return 0;
}
+#endif
static const struct dev_pm_ops bcm63xx_spi_pm_ops = {
- .suspend = bcm63xx_spi_suspend,
- .resume = bcm63xx_spi_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(bcm63xx_spi_suspend, bcm63xx_spi_resume)
};
-#define BCM63XX_SPI_PM_OPS (&bcm63xx_spi_pm_ops)
-#else
-#define BCM63XX_SPI_PM_OPS NULL
-#endif
-
static struct platform_driver bcm63xx_spi_driver = {
.driver = {
.name = "bcm63xx-spi",
.owner = THIS_MODULE,
- .pm = BCM63XX_SPI_PM_OPS,
+ .pm = &bcm63xx_spi_pm_ops,
},
.probe = bcm63xx_spi_probe,
.remove = bcm63xx_spi_remove,
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 38941e5920b5..f515c5e9db57 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -8,7 +8,6 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/spi/spi-bfin-v3.c b/drivers/spi/spi-bfin-v3.c
index 8f8598834b30..4089d0e0d84e 100644
--- a/drivers/spi/spi-bfin-v3.c
+++ b/drivers/spi/spi-bfin-v3.c
@@ -822,7 +822,8 @@ static int bfin_spi_probe(struct platform_device *pdev)
master->cleanup = bfin_spi_cleanup;
master->setup = bfin_spi_setup;
master->transfer_one_message = bfin_spi_transfer_one_message;
- master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
drv_data = spi_master_get_devdata(master);
drv_data->master = master;
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index f0f195af75d4..55e57c3eb9bd 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -350,7 +350,6 @@ static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data)
static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
{
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
- struct spi_transfer *last_transfer;
unsigned long flags;
struct spi_message *msg;
@@ -362,9 +361,6 @@ static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
queue_work(drv_data->workqueue, &drv_data->pump_messages);
spin_unlock_irqrestore(&drv_data->lock, flags);
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer, transfer_list);
-
msg->state = NULL;
if (!drv_data->cs_change)
@@ -1030,10 +1026,6 @@ static int bfin_spi_setup(struct spi_device *spi)
}
/* translate common spi framework into our register */
- if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) {
- dev_err(&spi->dev, "unsupported spi modes detected\n");
- goto error;
- }
if (spi->mode & SPI_CPOL)
chip->ctl_reg |= BIT_CTL_CPOL;
if (spi->mode & SPI_CPHA)
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index c16bf853c3eb..c616e41521be 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -38,7 +38,7 @@
*
* Since this is software, the timings may not be exactly what your board's
* chips need ... there may be several reasons you'd need to tweak timings
- * in these routines, not just make to make it faster or slower to match a
+ * in these routines, not just to make it faster or slower to match a
* particular CPU clock rate.
*/
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index bd222f6b677d..dc7d2c2d643e 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -16,7 +16,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
@@ -467,11 +466,9 @@ EXPORT_SYMBOL_GPL(spi_bitbang_start);
/**
* spi_bitbang_stop - stops the task providing spi communication
*/
-int spi_bitbang_stop(struct spi_bitbang *bitbang)
+void spi_bitbang_stop(struct spi_bitbang *bitbang)
{
spi_unregister_master(bitbang->master);
-
- return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_stop);
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 8081f96bd1d5..ee4f91ccd8fd 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -309,7 +309,6 @@ done:
static void butterfly_detach(struct parport *p)
{
struct butterfly *pp;
- int status;
/* FIXME this global is ugly ... but, how to quickly get from
* the parport to the "struct butterfly" associated with it?
@@ -321,7 +320,7 @@ static void butterfly_detach(struct parport *p)
butterfly = NULL;
/* stop() unregisters child devices too */
- status = spi_bitbang_stop(&pp->bitbang);
+ spi_bitbang_stop(&pp->bitbang);
/* turn off VCC */
parport_write_data(pp->port, 0);
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 6f03d7e6435d..4cd62f636547 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -1,7 +1,7 @@
/*
* CLPS711X SPI bus driver
*
- * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
+ * Copyright (C) 2012-2014 Alexander Shiyan <shc_work@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,158 +11,125 @@
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/clps711x.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/spi-clps711x.h>
-#include <mach/hardware.h>
-
#define DRIVER_NAME "spi-clps711x"
-struct spi_clps711x_data {
- struct completion done;
+#define SYNCIO_FRMLEN(x) ((x) << 8)
+#define SYNCIO_TXFRMEN (1 << 14)
+struct spi_clps711x_data {
+ void __iomem *syncio;
+ struct regmap *syscon;
+ struct regmap *syscon1;
struct clk *spi_clk;
- u32 max_speed_hz;
u8 *tx_buf;
u8 *rx_buf;
- int count;
+ unsigned int bpw;
int len;
-
- int chipselect[0];
};
static int spi_clps711x_setup(struct spi_device *spi)
{
- struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
-
/* We are expect that SPI-device is not selected */
- gpio_direction_output(hw->chipselect[spi->chip_select],
- !(spi->mode & SPI_CS_HIGH));
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
return 0;
}
-static void spi_clps711x_setup_mode(struct spi_device *spi)
-{
- /* Setup edge for transfer */
- if (spi->mode & SPI_CPHA)
- clps_writew(clps_readw(SYSCON3) | SYSCON3_ADCCKNSEN, SYSCON3);
- else
- clps_writew(clps_readw(SYSCON3) & ~SYSCON3_ADCCKNSEN, SYSCON3);
-}
-
-static int spi_clps711x_setup_xfer(struct spi_device *spi,
- struct spi_transfer *xfer)
+static void spi_clps711x_setup_xfer(struct spi_device *spi,
+ struct spi_transfer *xfer)
{
- u32 speed = xfer->speed_hz ? : spi->max_speed_hz;
- u8 bpw = xfer->bits_per_word;
- struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
-
- if (bpw != 8) {
- dev_err(&spi->dev, "Unsupported master bus width %i\n", bpw);
- return -EINVAL;
- }
+ struct spi_master *master = spi->master;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
/* Setup SPI frequency divider */
- if (!speed || (speed >= hw->max_speed_hz))
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(3), SYSCON1);
- else if (speed >= (hw->max_speed_hz / 2))
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(2), SYSCON1);
- else if (speed >= (hw->max_speed_hz / 8))
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(1), SYSCON1);
+ if (xfer->speed_hz >= master->max_speed_hz)
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(3));
+ else if (xfer->speed_hz >= (master->max_speed_hz / 2))
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(2));
+ else if (xfer->speed_hz >= (master->max_speed_hz / 8))
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(1));
else
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(0), SYSCON1);
-
- return 0;
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(0));
}
-static int spi_clps711x_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static int spi_clps711x_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
struct spi_clps711x_data *hw = spi_master_get_devdata(master);
- struct spi_transfer *xfer;
- int status = 0, cs = hw->chipselect[msg->spi->chip_select];
- u32 data;
-
- spi_clps711x_setup_mode(msg->spi);
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (spi_clps711x_setup_xfer(msg->spi, xfer)) {
- status = -EINVAL;
- goto out_xfr;
- }
-
- gpio_set_value(cs, !!(msg->spi->mode & SPI_CS_HIGH));
-
- reinit_completion(&hw->done);
-
- hw->count = 0;
- hw->len = xfer->len;
- hw->tx_buf = (u8 *)xfer->tx_buf;
- hw->rx_buf = (u8 *)xfer->rx_buf;
+ struct spi_device *spi = msg->spi;
- /* Initiate transfer */
- data = hw->tx_buf ? hw->tx_buf[hw->count] : 0;
- clps_writel(data | SYNCIO_FRMLEN(8) | SYNCIO_TXFRMEN, SYNCIO);
-
- wait_for_completion(&hw->done);
+ /* Setup mode for transfer */
+ return regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCKNSEN,
+ (spi->mode & SPI_CPHA) ?
+ SYSCON3_ADCCKNSEN : 0);
+}
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+static int spi_clps711x_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
- if (xfer->cs_change ||
- list_is_last(&xfer->transfer_list, &msg->transfers))
- gpio_set_value(cs, !(msg->spi->mode & SPI_CS_HIGH));
+ spi_clps711x_setup_xfer(spi, xfer);
- msg->actual_length += xfer->len;
- }
+ hw->len = xfer->len;
+ hw->bpw = xfer->bits_per_word;
+ hw->tx_buf = (u8 *)xfer->tx_buf;
+ hw->rx_buf = (u8 *)xfer->rx_buf;
-out_xfr:
- msg->status = status;
- spi_finalize_current_message(master);
+ /* Initiate transfer */
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN, hw->syncio);
- return 0;
+ return 1;
}
static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
{
- struct spi_clps711x_data *hw = (struct spi_clps711x_data *)dev_id;
- u32 data;
+ struct spi_master *master = dev_id;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
/* Handle RX */
- data = clps_readb(SYNCIO);
+ data = readb(hw->syncio);
if (hw->rx_buf)
- hw->rx_buf[hw->count] = (u8)data;
-
- hw->count++;
+ *hw->rx_buf++ = data;
/* Handle TX */
- if (hw->count < hw->len) {
- data = hw->tx_buf ? hw->tx_buf[hw->count] : 0;
- clps_writel(data | SYNCIO_FRMLEN(8) | SYNCIO_TXFRMEN, SYNCIO);
+ if (--hw->len > 0) {
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN,
+ hw->syncio);
} else
- complete(&hw->done);
+ spi_finalize_current_transfer(master);
return IRQ_HANDLED;
}
static int spi_clps711x_probe(struct platform_device *pdev)
{
- int i, ret;
- struct spi_master *master;
struct spi_clps711x_data *hw;
struct spi_clps711x_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct spi_master *master;
+ struct resource *res;
+ int i, irq, ret;
if (!pdata) {
dev_err(&pdev->dev, "No platform data supplied\n");
@@ -174,33 +141,37 @@ static int spi_clps711x_probe(struct platform_device *pdev)
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev,
- sizeof(struct spi_clps711x_data) +
- sizeof(int) * pdata->num_chipselect);
- if (!master) {
- dev_err(&pdev->dev, "SPI allocating memory error\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*hw));
+ if (!master)
return -ENOMEM;
+
+ master->cs_gpios = devm_kzalloc(&pdev->dev, sizeof(int) *
+ pdata->num_chipselect, GFP_KERNEL);
+ if (!master->cs_gpios) {
+ ret = -ENOMEM;
+ goto err_out;
}
master->bus_num = pdev->id;
master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
master->num_chipselect = pdata->num_chipselect;
master->setup = spi_clps711x_setup;
- master->transfer_one_message = spi_clps711x_transfer_one_message;
+ master->prepare_message = spi_clps711x_prepare_message;
+ master->transfer_one = spi_clps711x_transfer_one;
hw = spi_master_get_devdata(master);
for (i = 0; i < master->num_chipselect; i++) {
- hw->chipselect[i] = pdata->chipselect[i];
- if (!gpio_is_valid(hw->chipselect[i])) {
- dev_err(&pdev->dev, "Invalid CS GPIO %i\n", i);
- ret = -EINVAL;
- goto err_out;
- }
- if (gpio_request(hw->chipselect[i], DRIVER_NAME)) {
+ master->cs_gpios[i] = pdata->chipselect[i];
+ ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
+ DRIVER_NAME);
+ if (ret) {
dev_err(&pdev->dev, "Can't get CS GPIO %i\n", i);
- ret = -EINVAL;
goto err_out;
}
}
@@ -211,67 +182,66 @@ static int spi_clps711x_probe(struct platform_device *pdev)
ret = PTR_ERR(hw->spi_clk);
goto err_out;
}
- hw->max_speed_hz = clk_get_rate(hw->spi_clk);
+ master->max_speed_hz = clk_get_rate(hw->spi_clk);
- init_completion(&hw->done);
platform_set_drvdata(pdev, master);
+ hw->syscon = syscon_regmap_lookup_by_pdevname("syscon.3");
+ if (IS_ERR(hw->syscon)) {
+ ret = PTR_ERR(hw->syscon);
+ goto err_out;
+ }
+
+ hw->syscon1 = syscon_regmap_lookup_by_pdevname("syscon.1");
+ if (IS_ERR(hw->syscon1)) {
+ ret = PTR_ERR(hw->syscon1);
+ goto err_out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->syncio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->syncio)) {
+ ret = PTR_ERR(hw->syncio);
+ goto err_out;
+ }
+
/* Disable extended mode due hardware problems */
- clps_writew(clps_readw(SYSCON3) & ~SYSCON3_ADCCON, SYSCON3);
+ regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCON, 0);
/* Clear possible pending interrupt */
- clps_readl(SYNCIO);
+ readl(hw->syncio);
- ret = devm_request_irq(&pdev->dev, IRQ_SSEOTI, spi_clps711x_isr, 0,
- dev_name(&pdev->dev), hw);
- if (ret) {
- dev_err(&pdev->dev, "Can't request IRQ\n");
+ ret = devm_request_irq(&pdev->dev, irq, spi_clps711x_isr, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
goto err_out;
- }
ret = devm_spi_register_master(&pdev->dev, master);
if (!ret) {
dev_info(&pdev->dev,
"SPI bus driver initialized. Master clock %u Hz\n",
- hw->max_speed_hz);
+ master->max_speed_hz);
return 0;
}
dev_err(&pdev->dev, "Failed to register master\n");
err_out:
- while (--i >= 0)
- if (gpio_is_valid(hw->chipselect[i]))
- gpio_free(hw->chipselect[i]);
-
spi_master_put(master);
return ret;
}
-static int spi_clps711x_remove(struct platform_device *pdev)
-{
- int i;
- struct spi_master *master = platform_get_drvdata(pdev);
- struct spi_clps711x_data *hw = spi_master_get_devdata(master);
-
- for (i = 0; i < master->num_chipselect; i++)
- if (gpio_is_valid(hw->chipselect[i]))
- gpio_free(hw->chipselect[i]);
-
- return 0;
-}
-
static struct platform_driver clps711x_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = spi_clps711x_probe,
- .remove = spi_clps711x_remove,
};
module_platform_driver(clps711x_spi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("CLPS711X SPI bus driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index cc5b75d10c38..e2fa628e55e7 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -77,8 +77,6 @@ struct mcfqspi {
struct mcfqspi_cs_control *cs_control;
wait_queue_head_t waitq;
-
- struct device *dev;
};
static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
@@ -135,13 +133,13 @@ static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select,
static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi)
{
- return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ?
+ return (mcfqspi->cs_control->setup) ?
mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0;
}
static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi)
{
- if (mcfqspi->cs_control && mcfqspi->cs_control->teardown)
+ if (mcfqspi->cs_control->teardown)
mcfqspi->cs_control->teardown(mcfqspi->cs_control);
}
@@ -300,68 +298,45 @@ static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
}
}
-static int mcfqspi_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static void mcfqspi_set_cs(struct spi_device *spi, bool enable)
{
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- struct spi_device *spi = msg->spi;
- struct spi_transfer *t;
- int status = 0;
-
- list_for_each_entry(t, &msg->transfers, transfer_list) {
- bool cs_high = spi->mode & SPI_CS_HIGH;
- u16 qmr = MCFQSPI_QMR_MSTR;
-
- qmr |= t->bits_per_word << 10;
- if (spi->mode & SPI_CPHA)
- qmr |= MCFQSPI_QMR_CPHA;
- if (spi->mode & SPI_CPOL)
- qmr |= MCFQSPI_QMR_CPOL;
- if (t->speed_hz)
- qmr |= mcfqspi_qmr_baud(t->speed_hz);
- else
- qmr |= mcfqspi_qmr_baud(spi->max_speed_hz);
- mcfqspi_wr_qmr(mcfqspi, qmr);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(spi->master);
+ bool cs_high = spi->mode & SPI_CS_HIGH;
+ if (enable)
mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
+ else
+ mcfqspi_cs_deselect(mcfqspi, spi->chip_select, cs_high);
+}
- mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
- if (t->bits_per_word == 8)
- mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf,
- t->rx_buf);
- else
- mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
- t->rx_buf);
- mcfqspi_wr_qir(mcfqspi, 0);
-
- if (t->delay_usecs)
- udelay(t->delay_usecs);
- if (t->cs_change) {
- if (!list_is_last(&t->transfer_list, &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
- cs_high);
- } else {
- if (list_is_last(&t->transfer_list, &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
- cs_high);
- }
- msg->actual_length += t->len;
- }
- msg->status = status;
- spi_finalize_current_message(master);
-
- return status;
+static int mcfqspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ u16 qmr = MCFQSPI_QMR_MSTR;
+
+ qmr |= t->bits_per_word << 10;
+ if (spi->mode & SPI_CPHA)
+ qmr |= MCFQSPI_QMR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ qmr |= MCFQSPI_QMR_CPOL;
+ qmr |= mcfqspi_qmr_baud(t->speed_hz);
+ mcfqspi_wr_qmr(mcfqspi, qmr);
+
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
+ if (t->bits_per_word == 8)
+ mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf, t->rx_buf);
+ else
+ mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
+ t->rx_buf);
+ mcfqspi_wr_qir(mcfqspi, 0);
+ return 0;
}
static int mcfqspi_setup(struct spi_device *spi)
{
- if (spi->chip_select >= spi->master->num_chipselect) {
- dev_dbg(&spi->dev, "%d chip select is out of range\n",
- spi->chip_select);
- return -EINVAL;
- }
-
mcfqspi_cs_deselect(spi_master_get_devdata(spi->master),
spi->chip_select, spi->mode & SPI_CS_HIGH);
@@ -388,6 +363,11 @@ static int mcfqspi_probe(struct platform_device *pdev)
return -ENOENT;
}
+ if (!pdata->cs_control) {
+ dev_dbg(&pdev->dev, "pdata->cs_control is NULL\n");
+ return -EINVAL;
+ }
+
master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi));
if (master == NULL) {
dev_dbg(&pdev->dev, "spi_alloc_master failed\n");
@@ -397,44 +377,31 @@ static int mcfqspi_probe(struct platform_device *pdev)
mcfqspi = spi_master_get_devdata(master);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_dbg(&pdev->dev, "platform_get_resource failed\n");
- status = -ENXIO;
- goto fail0;
- }
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_dbg(&pdev->dev, "request_mem_region failed\n");
- status = -EBUSY;
+ mcfqspi->iobase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcfqspi->iobase)) {
+ status = PTR_ERR(mcfqspi->iobase);
goto fail0;
}
- mcfqspi->iobase = ioremap(res->start, resource_size(res));
- if (!mcfqspi->iobase) {
- dev_dbg(&pdev->dev, "ioremap failed\n");
- status = -ENOMEM;
- goto fail1;
- }
-
mcfqspi->irq = platform_get_irq(pdev, 0);
if (mcfqspi->irq < 0) {
dev_dbg(&pdev->dev, "platform_get_irq failed\n");
status = -ENXIO;
- goto fail2;
+ goto fail0;
}
- status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, 0,
- pdev->name, mcfqspi);
+ status = devm_request_irq(&pdev->dev, mcfqspi->irq, mcfqspi_irq_handler,
+ 0, pdev->name, mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "request_irq failed\n");
- goto fail2;
+ goto fail0;
}
- mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk");
+ mcfqspi->clk = devm_clk_get(&pdev->dev, "qspi_clk");
if (IS_ERR(mcfqspi->clk)) {
dev_dbg(&pdev->dev, "clk_get failed\n");
status = PTR_ERR(mcfqspi->clk);
- goto fail3;
+ goto fail0;
}
clk_enable(mcfqspi->clk);
@@ -445,42 +412,35 @@ static int mcfqspi_probe(struct platform_device *pdev)
status = mcfqspi_cs_setup(mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "error initializing cs_control\n");
- goto fail4;
+ goto fail1;
}
init_waitqueue_head(&mcfqspi->waitq);
- mcfqspi->dev = &pdev->dev;
master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
master->setup = mcfqspi_setup;
- master->transfer_one_message = mcfqspi_transfer_one_message;
+ master->set_cs = mcfqspi_set_cs;
+ master->transfer_one = mcfqspi_transfer_one;
master->auto_runtime_pm = true;
platform_set_drvdata(pdev, master);
- status = spi_register_master(master);
+ status = devm_spi_register_master(&pdev->dev, master);
if (status) {
dev_dbg(&pdev->dev, "spi_register_master failed\n");
- goto fail5;
+ goto fail2;
}
- pm_runtime_enable(mcfqspi->dev);
+ pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
return 0;
-fail5:
- mcfqspi_cs_teardown(mcfqspi);
-fail4:
- clk_disable(mcfqspi->clk);
- clk_put(mcfqspi->clk);
-fail3:
- free_irq(mcfqspi->irq, mcfqspi);
fail2:
- iounmap(mcfqspi->iobase);
+ mcfqspi_cs_teardown(mcfqspi);
fail1:
- release_mem_region(res->start, resource_size(res));
+ clk_disable(mcfqspi->clk);
fail0:
spi_master_put(master);
@@ -493,19 +453,13 @@ static int mcfqspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pm_runtime_disable(mcfqspi->dev);
+ pm_runtime_disable(&pdev->dev);
/* disable the hardware (set the baud rate to 0) */
mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
mcfqspi_cs_teardown(mcfqspi);
clk_disable(mcfqspi->clk);
- clk_put(mcfqspi->clk);
- free_irq(mcfqspi->irq, mcfqspi);
- iounmap(mcfqspi->iobase);
- release_mem_region(res->start, resource_size(res));
- spi_unregister_master(master);
return 0;
}
@@ -515,8 +469,11 @@ static int mcfqspi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ int ret;
- spi_master_suspend(master);
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
clk_disable(mcfqspi->clk);
@@ -528,18 +485,17 @@ static int mcfqspi_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- spi_master_resume(master);
-
clk_enable(mcfqspi->clk);
- return 0;
+ return spi_master_resume(master);
}
#endif
#ifdef CONFIG_PM_RUNTIME
static int mcfqspi_runtime_suspend(struct device *dev)
{
- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
clk_disable(mcfqspi->clk);
@@ -548,7 +504,8 @@ static int mcfqspi_runtime_suspend(struct device *dev)
static int mcfqspi_runtime_resume(struct device *dev)
{
- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
clk_enable(mcfqspi->clk);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 50b2d88c8190..50f750989258 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -396,10 +396,6 @@ static int davinci_spi_setup(struct spi_device *spi)
dspi = spi_master_get_devdata(spi->master);
pdata = &dspi->pdata;
- /* if bits per word length is zero then set it default 8 */
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
if (!(spi->mode & SPI_NO_CS)) {
if ((pdata->chip_sel == NULL) ||
(pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS))
@@ -806,8 +802,7 @@ static int spi_davinci_get_pdata(struct platform_device *pdev,
pdata = &dspi->pdata;
pdata->version = SPI_VERSION_1;
- match = of_match_device(of_match_ptr(davinci_spi_of_match),
- &pdev->dev);
+ match = of_match_device(davinci_spi_of_match, &pdev->dev);
if (!match)
return -ENODEV;
@@ -828,7 +823,6 @@ static int spi_davinci_get_pdata(struct platform_device *pdev,
return 0;
}
#else
-#define davinci_spi_of_match NULL
static struct davinci_spi_platform_data
*spi_davinci_get_pdata(struct platform_device *pdev,
struct davinci_spi *dspi)
@@ -853,7 +847,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct davinci_spi *dspi;
struct davinci_spi_platform_data *pdata;
- struct resource *r, *mem;
+ struct resource *r;
resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
int i = 0, ret = 0;
@@ -868,10 +862,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
dspi = spi_master_get_devdata(master);
- if (dspi == NULL) {
- ret = -ENOENT;
- goto free_master;
- }
if (dev_get_platdata(&pdev->dev)) {
pdata = dev_get_platdata(&pdev->dev);
@@ -894,39 +884,29 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->pbase = r->start;
- mem = request_mem_region(r->start, resource_size(r), pdev->name);
- if (mem == NULL) {
- ret = -EBUSY;
+ dspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(dspi->base)) {
+ ret = PTR_ERR(dspi->base);
goto free_master;
}
- dspi->base = ioremap(r->start, resource_size(r));
- if (dspi->base == NULL) {
- ret = -ENOMEM;
- goto release_region;
- }
-
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq <= 0) {
ret = -EINVAL;
- goto unmap_io;
+ goto free_master;
}
- ret = request_threaded_irq(dspi->irq, davinci_spi_irq, dummy_thread_fn,
- 0, dev_name(&pdev->dev), dspi);
+ ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
+ dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
if (ret)
- goto unmap_io;
+ goto free_master;
dspi->bitbang.master = master;
- if (dspi->bitbang.master == NULL) {
- ret = -ENODEV;
- goto irq_free;
- }
- dspi->clk = clk_get(&pdev->dev, NULL);
+ dspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dspi->clk)) {
ret = -ENODEV;
- goto irq_free;
+ goto free_master;
}
clk_prepare_enable(dspi->clk);
@@ -963,8 +943,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_clk;
dev_info(&pdev->dev, "DMA: supported\n");
- dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
- "event queue: %d\n", dma_rx_chan, dma_tx_chan,
+ dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, "
+ "event queue: %d\n", &dma_rx_chan, &dma_tx_chan,
pdata->dma_event_q);
}
@@ -1015,13 +995,6 @@ free_dma:
dma_release_channel(dspi->dma_tx);
free_clk:
clk_disable_unprepare(dspi->clk);
- clk_put(dspi->clk);
-irq_free:
- free_irq(dspi->irq, dspi);
-unmap_io:
- iounmap(dspi->base);
-release_region:
- release_mem_region(dspi->pbase, resource_size(r));
free_master:
spi_master_put(master);
err:
@@ -1041,7 +1014,6 @@ static int davinci_spi_remove(struct platform_device *pdev)
{
struct davinci_spi *dspi;
struct spi_master *master;
- struct resource *r;
master = platform_get_drvdata(pdev);
dspi = spi_master_get_devdata(master);
@@ -1049,11 +1021,6 @@ static int davinci_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&dspi->bitbang);
clk_disable_unprepare(dspi->clk);
- clk_put(dspi->clk);
- free_irq(dspi->irq, dspi);
- iounmap(dspi->base);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(dspi->pbase, resource_size(r));
spi_master_put(master);
return 0;
@@ -1063,7 +1030,7 @@ static struct platform_driver davinci_spi_driver = {
.driver = {
.name = "spi_davinci",
.owner = THIS_MODULE,
- .of_match_table = davinci_spi_of_match,
+ .of_match_table = of_match_ptr(davinci_spi_of_match),
},
.probe = davinci_spi_probe,
.remove = davinci_spi_remove,
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 168c620947f4..1492f5ee9aaa 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -30,14 +30,13 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio;
struct dw_spi *dws;
- struct resource *mem, *ioarea;
+ struct resource *mem;
int ret;
- dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL);
- if (!dwsmmio) {
- ret = -ENOMEM;
- goto err_end;
- }
+ dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_mmio),
+ GFP_KERNEL);
+ if (!dwsmmio)
+ return -ENOMEM;
dws = &dwsmmio->dws;
@@ -45,80 +44,51 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
- ret = -EINVAL;
- goto err_kfree;
+ return -EINVAL;
}
- ioarea = request_mem_region(mem->start, resource_size(mem),
- pdev->name);
- if (!ioarea) {
- dev_err(&pdev->dev, "SPI region already claimed\n");
- ret = -EBUSY;
- goto err_kfree;
- }
-
- dws->regs = ioremap_nocache(mem->start, resource_size(mem));
- if (!dws->regs) {
- dev_err(&pdev->dev, "SPI region already mapped\n");
- ret = -ENOMEM;
- goto err_release_reg;
+ dws->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dws->regs)) {
+ dev_err(&pdev->dev, "SPI region map failed\n");
+ return PTR_ERR(dws->regs);
}
dws->irq = platform_get_irq(pdev, 0);
if (dws->irq < 0) {
dev_err(&pdev->dev, "no irq resource?\n");
- ret = dws->irq; /* -ENXIO */
- goto err_unmap;
+ return dws->irq; /* -ENXIO */
}
- dwsmmio->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(dwsmmio->clk)) {
- ret = PTR_ERR(dwsmmio->clk);
- goto err_unmap;
- }
- clk_enable(dwsmmio->clk);
+ dwsmmio->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dwsmmio->clk))
+ return PTR_ERR(dwsmmio->clk);
+ ret = clk_prepare_enable(dwsmmio->clk);
+ if (ret)
+ return ret;
- dws->parent_dev = &pdev->dev;
- dws->bus_num = 0;
+ dws->bus_num = pdev->id;
dws->num_cs = 4;
dws->max_freq = clk_get_rate(dwsmmio->clk);
- ret = dw_spi_add_host(dws);
+ ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
- goto err_clk;
+ goto out;
platform_set_drvdata(pdev, dwsmmio);
return 0;
-err_clk:
- clk_disable(dwsmmio->clk);
- clk_put(dwsmmio->clk);
- dwsmmio->clk = NULL;
-err_unmap:
- iounmap(dws->regs);
-err_release_reg:
- release_mem_region(mem->start, resource_size(mem));
-err_kfree:
- kfree(dwsmmio);
-err_end:
+out:
+ clk_disable_unprepare(dwsmmio->clk);
return ret;
}
static int dw_spi_mmio_remove(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
- struct resource *mem;
-
- clk_disable(dwsmmio->clk);
- clk_put(dwsmmio->clk);
- dwsmmio->clk = NULL;
+ clk_disable_unprepare(dwsmmio->clk);
dw_spi_remove_host(&dwsmmio->dws);
- iounmap(dwsmmio->dws.regs);
- kfree(dwsmmio);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, resource_size(mem));
return 0;
}
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 66fa9955ea14..3f3dc1226edf 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -43,35 +43,25 @@ static int spi_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
- dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL);
- if (!dwpci) {
- ret = -ENOMEM;
- goto err_disable;
- }
+ dwpci = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_pci),
+ GFP_KERNEL);
+ if (!dwpci)
+ return -ENOMEM;
dwpci->pdev = pdev;
dws = &dwpci->dws;
/* Get basic io resource and map it */
dws->paddr = pci_resource_start(pdev, pci_bar);
- dws->iolen = pci_resource_len(pdev, pci_bar);
- ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+ ret = pcim_iomap_regions(pdev, 1, dev_name(&pdev->dev));
if (ret)
- goto err_kfree;
-
- dws->regs = ioremap_nocache((unsigned long)dws->paddr,
- pci_resource_len(pdev, pci_bar));
- if (!dws->regs) {
- ret = -ENOMEM;
- goto err_release_reg;
- }
+ return ret;
- dws->parent_dev = &pdev->dev;
dws->bus_num = 0;
dws->num_cs = 4;
dws->irq = pdev->irq;
@@ -83,26 +73,17 @@ static int spi_pci_probe(struct pci_dev *pdev,
if (pdev->device == 0x0800) {
ret = dw_spi_mid_init(dws);
if (ret)
- goto err_unmap;
+ return ret;
}
- ret = dw_spi_add_host(dws);
+ ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
- goto err_unmap;
+ return ret;
/* PCI hook and SPI hook use the same drv data */
pci_set_drvdata(pdev, dwpci);
- return 0;
-err_unmap:
- iounmap(dws->regs);
-err_release_reg:
- pci_release_region(pdev, pci_bar);
-err_kfree:
- kfree(dwpci);
-err_disable:
- pci_disable_device(pdev);
- return ret;
+ return 0;
}
static void spi_pci_remove(struct pci_dev *pdev)
@@ -110,10 +91,6 @@ static void spi_pci_remove(struct pci_dev *pdev)
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
dw_spi_remove_host(&dwpci->dws);
- iounmap(dwpci->dws.regs);
- pci_release_region(pdev, 0);
- kfree(dwpci);
- pci_disable_device(pdev);
}
#ifdef CONFIG_PM
@@ -148,7 +125,7 @@ static int spi_resume(struct pci_dev *pdev)
#define spi_resume NULL
#endif
-static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+static const struct pci_device_id pci_ids[] = {
/* Intel MID platform SPI controller 0 */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
{},
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b897c4adb39d..712ac5629cd4 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -276,8 +276,7 @@ static void giveback(struct dw_spi *dws)
queue_work(dws->workqueue, &dws->pump_messages);
spin_unlock_irqrestore(&dws->lock, flags);
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer,
+ last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
transfer_list);
if (!last_transfer->cs_change && dws->cs_control)
@@ -427,7 +426,6 @@ static void pump_transfers(unsigned long data)
dws->tx_end = dws->tx + transfer->len;
dws->rx = transfer->rx_buf;
dws->rx_end = dws->rx + transfer->len;
- dws->cs_change = transfer->cs_change;
dws->len = dws->cur_transfer->len;
if (chip != dws->prev_chip)
cs_change = 1;
@@ -440,12 +438,6 @@ static void pump_transfers(unsigned long data)
if (transfer->speed_hz != speed) {
speed = transfer->speed_hz;
- if (speed > dws->max_freq) {
- printk(KERN_ERR "MRST SPI0: unsupported"
- "freq: %dHz\n", speed);
- message->status = -EIO;
- goto early_exit;
- }
/* clk_div doesn't support odd number */
clk_div = dws->max_freq / speed;
@@ -620,9 +612,11 @@ static int dw_spi_setup(struct spi_device *spi)
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
- chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
+ GFP_KERNEL);
if (!chip)
return -ENOMEM;
+ spi_set_ctldata(spi, chip);
}
/*
@@ -667,16 +661,9 @@ static int dw_spi_setup(struct spi_device *spi)
| (spi->mode << SPI_MODE_OFFSET)
| (chip->tmode << SPI_TMOD_OFFSET);
- spi_set_ctldata(spi, chip);
return 0;
}
-static void dw_spi_cleanup(struct spi_device *spi)
-{
- struct chip_data *chip = spi_get_ctldata(spi);
- kfree(chip);
-}
-
static int init_queue(struct dw_spi *dws)
{
INIT_LIST_HEAD(&dws->queue);
@@ -776,18 +763,16 @@ static void spi_hw_init(struct dw_spi *dws)
}
}
-int dw_spi_add_host(struct dw_spi *dws)
+int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
{
struct spi_master *master;
int ret;
BUG_ON(dws == NULL);
- master = spi_alloc_master(dws->parent_dev, 0);
- if (!master) {
- ret = -ENOMEM;
- goto exit;
- }
+ master = spi_alloc_master(dev, 0);
+ if (!master)
+ return -ENOMEM;
dws->master = master;
dws->type = SSI_MOTO_SPI;
@@ -797,7 +782,7 @@ int dw_spi_add_host(struct dw_spi *dws)
snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
dws->bus_num);
- ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
+ ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED,
dws->name, dws);
if (ret < 0) {
dev_err(&master->dev, "can not get IRQ\n");
@@ -808,9 +793,9 @@ int dw_spi_add_host(struct dw_spi *dws)
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->bus_num = dws->bus_num;
master->num_chipselect = dws->num_cs;
- master->cleanup = dw_spi_cleanup;
master->setup = dw_spi_setup;
master->transfer = dw_spi_transfer;
+ master->max_speed_hz = dws->max_freq;
/* Basic HW init */
spi_hw_init(dws);
@@ -836,7 +821,7 @@ int dw_spi_add_host(struct dw_spi *dws)
}
spi_master_set_devdata(master, dws);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(dev, master);
if (ret) {
dev_err(&master->dev, "problem registering spi master\n");
goto err_queue_alloc;
@@ -851,10 +836,8 @@ err_queue_alloc:
dws->dma_ops->dma_exit(dws);
err_diable_hw:
spi_enable_chip(dws, 0);
- free_irq(dws->irq, dws);
err_free_master:
spi_master_put(master);
-exit:
return ret;
}
EXPORT_SYMBOL_GPL(dw_spi_add_host);
@@ -878,10 +861,6 @@ void dw_spi_remove_host(struct dw_spi *dws)
spi_enable_chip(dws, 0);
/* Disable clk */
spi_set_clk(dws, 0);
- free_irq(dws->irq, dws);
-
- /* Disconnect from the SPI framework */
- spi_unregister_master(dws->master);
}
EXPORT_SYMBOL_GPL(dw_spi_remove_host);
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 9c57c078031e..587643dae11e 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -92,13 +92,11 @@ struct dw_spi_dma_ops {
struct dw_spi {
struct spi_master *master;
struct spi_device *cur_dev;
- struct device *parent_dev;
enum dw_ssi_type type;
char name[16];
void __iomem *regs;
unsigned long paddr;
- u32 iolen;
int irq;
u32 fifo_len; /* depth of the FIFO buffer */
u32 max_freq; /* max bus freq supported */
@@ -135,7 +133,6 @@ struct dw_spi {
u8 n_bytes; /* current is a 1/2 bytes op */
u8 max_bits_per_word; /* maxim is 16b */
u32 dma_width;
- int cs_change;
irqreturn_t (*transfer_handler)(struct dw_spi *dws);
void (*cs_control)(u32 command);
@@ -231,7 +228,7 @@ struct dw_spi_chip {
void (*cs_control)(u32 command);
};
-extern int dw_spi_add_host(struct dw_spi *dws);
+extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
extern void dw_spi_remove_host(struct dw_spi *dws);
extern int dw_spi_suspend_host(struct dw_spi *dws);
extern int dw_spi_resume_host(struct dw_spi *dws);
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
index d4d3cc534792..be44a3eeb5e8 100644
--- a/drivers/spi/spi-efm32.c
+++ b/drivers/spi/spi-efm32.c
@@ -198,7 +198,7 @@ static int efm32_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
efm32_spi_filltx(ddata);
- init_completion(&ddata->done);
+ reinit_completion(&ddata->done);
efm32_spi_write32(ddata, REG_IF_TXBL | REG_IF_RXDATAV, REG_IEN);
@@ -287,17 +287,17 @@ static u32 efm32_spi_get_configured_location(struct efm32_spi_ddata *ddata)
return (reg & REG_ROUTE_LOCATION__MASK) >> __ffs(REG_ROUTE_LOCATION__MASK);
}
-static int efm32_spi_probe_dt(struct platform_device *pdev,
+static void efm32_spi_probe_dt(struct platform_device *pdev,
struct spi_master *master, struct efm32_spi_ddata *ddata)
{
struct device_node *np = pdev->dev.of_node;
u32 location;
int ret;
- if (!np)
- return 1;
-
- ret = of_property_read_u32(np, "location", &location);
+ ret = of_property_read_u32(np, "efm32,location", &location);
+ if (ret)
+ /* fall back to old and (wrongly) generic property "location" */
+ ret = of_property_read_u32(np, "location", &location);
if (!ret) {
dev_dbg(&pdev->dev, "using location %u\n", location);
} else {
@@ -308,11 +308,6 @@ static int efm32_spi_probe_dt(struct platform_device *pdev,
}
ddata->pdata.location = location;
-
- /* spi core takes care about the bus number using an alias */
- master->bus_num = -1;
-
- return 0;
}
static int efm32_spi_probe(struct platform_device *pdev)
@@ -322,9 +317,14 @@ static int efm32_spi_probe(struct platform_device *pdev)
int ret;
struct spi_master *master;
struct device_node *np = pdev->dev.of_node;
- unsigned int num_cs, i;
+ int num_cs, i;
+
+ if (!np)
+ return -EINVAL;
num_cs = of_gpio_named_count(np, "cs-gpios");
+ if (num_cs < 0)
+ return num_cs;
master = spi_alloc_master(&pdev->dev,
sizeof(*ddata) + num_cs * sizeof(unsigned));
@@ -349,6 +349,7 @@ static int efm32_spi_probe(struct platform_device *pdev)
ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs;
spin_lock_init(&ddata->lock);
+ init_completion(&ddata->done);
ddata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ddata->clk)) {
@@ -415,23 +416,7 @@ static int efm32_spi_probe(struct platform_device *pdev)
goto err;
}
- ret = efm32_spi_probe_dt(pdev, master, ddata);
- if (ret > 0) {
- /* not created by device tree */
- const struct efm32_spi_pdata *pdata =
- dev_get_platdata(&pdev->dev);
-
- if (pdata)
- ddata->pdata = *pdata;
- else
- ddata->pdata.location =
- efm32_spi_get_configured_location(ddata);
-
- master->bus_num = pdev->id;
-
- } else if (ret < 0) {
- goto err_disable_clk;
- }
+ efm32_spi_probe_dt(pdev, master, ddata);
efm32_spi_write32(ddata, 0, REG_IEN);
efm32_spi_write32(ddata, REG_ROUTE_TXPEN | REG_ROUTE_RXPEN |
@@ -487,6 +472,9 @@ static int efm32_spi_remove(struct platform_device *pdev)
static const struct of_device_id efm32_spi_dt_ids[] = {
{
+ .compatible = "energymicro,efm32-spi",
+ }, {
+ /* doesn't follow the "vendor,device" scheme, don't use */
.compatible = "efm32,spi",
}, {
/* sentinel */
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 1bfaed6e4073..2f675d32df0e 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -73,8 +73,6 @@
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
* @sspdr_phys: physical address of the SSPDR register
- * @min_rate: minimum clock rate (in Hz) supported by the controller
- * @max_rate: maximum clock rate (in Hz) supported by the controller
* @wait: wait here until given transfer is completed
* @current_msg: message that is currently processed (or %NULL if none)
* @tx: current byte in transfer to transmit
@@ -95,8 +93,6 @@ struct ep93xx_spi {
struct clk *clk;
void __iomem *regs_base;
unsigned long sspdr_phys;
- unsigned long min_rate;
- unsigned long max_rate;
struct completion wait;
struct spi_message *current_msg;
size_t tx;
@@ -199,9 +195,9 @@ static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
* @div_scr: pointer to return the scr divider
*/
static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
- unsigned long rate,
- u8 *div_cpsr, u8 *div_scr)
+ u32 rate, u8 *div_cpsr, u8 *div_scr)
{
+ struct spi_master *master = platform_get_drvdata(espi->pdev);
unsigned long spi_clk_rate = clk_get_rate(espi->clk);
int cpsr, scr;
@@ -210,7 +206,7 @@ static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
* controller. Note that minimum value is already checked in
* ep93xx_spi_transfer_one_message().
*/
- rate = clamp(rate, espi->min_rate, espi->max_rate);
+ rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
/*
* Calculate divisors so that we can get speed according the
@@ -735,13 +731,6 @@ static int ep93xx_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct ep93xx_spi *espi = spi_master_get_devdata(master);
- struct spi_transfer *t;
-
- /* first validate each transfer */
- list_for_each_entry(t, &msg->transfers, transfer_list) {
- if (t->speed_hz < espi->min_rate)
- return -EINVAL;
- }
msg->state = NULL;
msg->status = 0;
@@ -917,8 +906,8 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
* Calculate maximum and minimum supported clock rates
* for the controller.
*/
- espi->max_rate = clk_get_rate(espi->clk) / 2;
- espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
+ master->max_speed_hz = clk_get_rate(espi->clk) / 2;
+ master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
espi->pdev = pdev;
espi->sspdr_phys = res->start + SSPDR;
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index c7a74f0ef892..09965f069a1c 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -312,9 +312,6 @@ static int falcon_sflash_setup(struct spi_device *spi)
unsigned int i;
unsigned long flags;
- if (spi->chip_select > 0)
- return -ENODEV;
-
spin_lock_irqsave(&ebu_lock, flags);
if (spi->max_speed_hz >= CLOCK_100M) {
@@ -422,9 +419,7 @@ static int falcon_sflash_probe(struct platform_device *pdev)
priv->master = master;
master->mode_bits = SPI_MODE_3;
- master->num_chipselect = 1;
master->flags = SPI_MASTER_HALF_DUPLEX;
- master->bus_num = -1;
master->setup = falcon_sflash_setup;
master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
master->transfer_one_message = falcon_sflash_xfer_one;
@@ -433,21 +428,12 @@ static int falcon_sflash_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
spi_master_put(master);
return ret;
}
-static int falcon_sflash_remove(struct platform_device *pdev)
-{
- struct falcon_sflash *priv = platform_get_drvdata(pdev);
-
- spi_unregister_master(priv->master);
-
- return 0;
-}
-
static const struct of_device_id falcon_sflash_match[] = {
{ .compatible = "lantiq,sflash-falcon" },
{},
@@ -456,7 +442,6 @@ MODULE_DEVICE_TABLE(of, falcon_sflash_match);
static struct platform_driver falcon_sflash_driver = {
.probe = falcon_sflash_probe,
- .remove = falcon_sflash_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 8641b03bdd7a..d565eeee3bd8 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -108,11 +109,11 @@ struct fsl_dspi {
struct spi_bitbang bitbang;
struct platform_device *pdev;
- void __iomem *base;
+ struct regmap *regmap;
int irq;
- struct clk *clk;
+ struct clk *clk;
- struct spi_transfer *cur_transfer;
+ struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
size_t len;
void *tx;
@@ -123,24 +124,17 @@ struct fsl_dspi {
u8 cs;
u16 void_write_data;
- wait_queue_head_t waitq;
- u32 waitflags;
+ wait_queue_head_t waitq;
+ u32 waitflags;
};
static inline int is_double_byte_mode(struct fsl_dspi *dspi)
{
- return ((readl(dspi->base + SPI_CTAR(dspi->cs)) & SPI_FRAME_BITS_MASK)
- == SPI_FRAME_BITS(8)) ? 0 : 1;
-}
+ unsigned int val;
-static void set_bit_mode(struct fsl_dspi *dspi, unsigned char bits)
-{
- u32 temp;
+ regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val);
- temp = readl(dspi->base + SPI_CTAR(dspi->cs));
- temp &= ~SPI_FRAME_BITS_MASK;
- temp |= SPI_FRAME_BITS(bits);
- writel(temp, dspi->base + SPI_CTAR(dspi->cs));
+ return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
}
static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
@@ -188,7 +182,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
*/
if (tx_word && (dspi->len == 1)) {
dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
- set_bit_mode(dspi, 8);
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
tx_word = 0;
}
@@ -238,7 +233,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
dspi_pushr |= SPI_PUSHR_CTCNT; /* clear counter */
}
- writel(dspi_pushr, dspi->base + SPI_PUSHR);
+ regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
+
tx_count++;
}
@@ -253,17 +249,23 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
while ((dspi->rx < dspi->rx_end)
&& (rx_count < DSPI_FIFO_SIZE)) {
if (rx_word) {
+ unsigned int val;
+
if ((dspi->rx_end - dspi->rx) == 1)
break;
- d = SPI_POPR_RXDATA(readl(dspi->base + SPI_POPR));
+ regmap_read(dspi->regmap, SPI_POPR, &val);
+ d = SPI_POPR_RXDATA(val);
if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
*(u16 *)dspi->rx = d;
dspi->rx += 2;
} else {
- d = SPI_POPR_RXDATA(readl(dspi->base + SPI_POPR));
+ unsigned int val;
+
+ regmap_read(dspi->regmap, SPI_POPR, &val);
+ d = SPI_POPR_RXDATA(val);
if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
*(u8 *)dspi->rx = d;
dspi->rx++;
@@ -295,13 +297,13 @@ static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t)
if (!dspi->tx)
dspi->dataflags |= TRAN_STATE_TX_VOID;
- writel(dspi->cur_chip->mcr_val, dspi->base + SPI_MCR);
- writel(dspi->cur_chip->ctar_val, dspi->base + SPI_CTAR(dspi->cs));
- writel(SPI_RSER_EOQFE, dspi->base + SPI_RSER);
+ regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
+ regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val);
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
if (t->speed_hz)
- writel(dspi->cur_chip->ctar_val,
- dspi->base + SPI_CTAR(dspi->cs));
+ regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
+ dspi->cur_chip->ctar_val);
dspi_transfer_write(dspi);
@@ -315,16 +317,20 @@ static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t)
static void dspi_chipselect(struct spi_device *spi, int value)
{
struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
- u32 pushr = readl(dspi->base + SPI_PUSHR);
+ unsigned int pushr;
+
+ regmap_read(dspi->regmap, SPI_PUSHR, &pushr);
switch (value) {
case BITBANG_CS_ACTIVE:
pushr |= SPI_PUSHR_CONT;
+ break;
case BITBANG_CS_INACTIVE:
pushr &= ~SPI_PUSHR_CONT;
+ break;
}
- writel(pushr, dspi->base + SPI_PUSHR);
+ regmap_write(dspi->regmap, SPI_PUSHR, pushr);
}
static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
@@ -336,7 +342,8 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (chip == NULL) {
- chip = kcalloc(1, sizeof(struct chip_data), GFP_KERNEL);
+ chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
+ GFP_KERNEL);
if (!chip)
return -ENOMEM;
}
@@ -347,7 +354,6 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
fmsz = spi->bits_per_word - 1;
} else {
pr_err("Invalid wordsize\n");
- kfree(chip);
return -ENODEV;
}
@@ -373,9 +379,6 @@ static int dspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
return dspi_setup_transfer(spi, NULL);
}
@@ -383,13 +386,15 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
{
struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
- writel(SPI_SR_EOQF, dspi->base + SPI_SR);
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
dspi_transfer_read(dspi);
if (!dspi->len) {
if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
- set_bit_mode(dspi, 16);
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
+
dspi->waitflags = 1;
wake_up_interruptible(&dspi->waitq);
} else {
@@ -421,7 +426,6 @@ static int dspi_suspend(struct device *dev)
static int dspi_resume(struct device *dev)
{
-
struct spi_master *master = dev_get_drvdata(dev);
struct fsl_dspi *dspi = spi_master_get_devdata(master);
@@ -432,8 +436,13 @@ static int dspi_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static const struct dev_pm_ops dspi_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(dspi_suspend, dspi_resume)
+static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+
+static struct regmap_config dspi_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x88,
};
static int dspi_probe(struct platform_device *pdev)
@@ -442,6 +451,7 @@ static int dspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct fsl_dspi *dspi;
struct resource *res;
+ void __iomem *base;
int ret = 0, cs_num, bus_num;
master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
@@ -476,12 +486,24 @@ static int dspi_probe(struct platform_device *pdev)
master->bus_num = bus_num;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dspi->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dspi->base)) {
- ret = PTR_ERR(dspi->base);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
goto out_master_put;
}
+ dspi_regmap_config.lock_arg = dspi;
+ dspi_regmap_config.val_format_endian =
+ of_property_read_bool(np, "big-endian")
+ ? REGMAP_ENDIAN_BIG : REGMAP_ENDIAN_DEFAULT;
+ dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base,
+ &dspi_regmap_config);
+ if (IS_ERR(dspi->regmap)) {
+ dev_err(&pdev->dev, "failed to init regmap: %ld\n",
+ PTR_ERR(dspi->regmap));
+ return PTR_ERR(dspi->regmap);
+ }
+
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq < 0) {
dev_err(&pdev->dev, "can't get platform irq\n");
@@ -505,7 +527,7 @@ static int dspi_probe(struct platform_device *pdev)
clk_prepare_enable(dspi->clk);
init_waitqueue_head(&dspi->waitq);
- platform_set_drvdata(pdev, dspi);
+ platform_set_drvdata(pdev, master);
ret = spi_bitbang_start(&dspi->bitbang);
if (ret != 0) {
@@ -526,7 +548,8 @@ out_master_put:
static int dspi_remove(struct platform_device *pdev)
{
- struct fsl_dspi *dspi = platform_get_drvdata(pdev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
/* Disconnect from the SPI framework */
spi_bitbang_stop(&dspi->bitbang);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 80d8f40f7e05..6fb2b75df821 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -219,13 +219,8 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
unsigned int len = t->len;
- u8 bits_per_word;
int ret;
- bits_per_word = spi->bits_per_word;
- if (t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
mpc8xxx_spi->len = t->len;
len = roundup(len, 4) / 4;
@@ -705,7 +700,7 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
goto err;
irq = irq_of_parse_and_map(np, 0);
- if (!ret) {
+ if (!irq) {
ret = -EINVAL;
goto err;
}
@@ -727,6 +722,66 @@ static int of_fsl_espi_remove(struct platform_device *dev)
return mpc8xxx_spi_remove(&dev->dev);
}
+#ifdef CONFIG_PM_SLEEP
+static int of_fsl_espi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ u32 regval;
+ int ret;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ ret = spi_master_suspend(master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
+ }
+
+ regval = mpc8xxx_spi_read_reg(&reg_base->mode);
+ regval &= ~SPMODE_ENABLE;
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ return 0;
+}
+
+static int of_fsl_espi_resume(struct device *dev)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ u32 regval;
+ int i;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ /* SPI controller initializations */
+ mpc8xxx_spi_write_reg(&reg_base->mode, 0);
+ mpc8xxx_spi_write_reg(&reg_base->mask, 0);
+ mpc8xxx_spi_write_reg(&reg_base->command, 0);
+ mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
+
+ /* Init eSPI CS mode register */
+ for (i = 0; i < pdata->max_chipselect; i++)
+ mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL);
+
+ /* Enable SPI interface */
+ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops espi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume)
+};
+
static const struct of_device_id of_fsl_espi_match[] = {
{ .compatible = "fsl,mpc8536-espi" },
{}
@@ -738,6 +793,7 @@ static struct platform_driver fsl_espi_driver = {
.name = "fsl_espi",
.owner = THIS_MODULE,
.of_match_table = of_fsl_espi_match,
+ .pm = &espi_pm,
},
.probe = of_fsl_espi_probe,
.remove = of_fsl_espi_remove,
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 0b75f26158ab..e5d45fca3551 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -200,7 +200,7 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
const void *prop;
int ret = -ENOMEM;
- pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(&ofdev->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
@@ -215,15 +215,13 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
pdata->sysclk = get_brgfreq();
if (pdata->sysclk == -1) {
pdata->sysclk = fsl_get_sys_freq();
- if (pdata->sysclk == -1) {
- ret = -ENODEV;
- goto err;
- }
+ if (pdata->sysclk == -1)
+ return -ENODEV;
}
#else
ret = of_property_read_u32(np, "clock-frequency", &pdata->sysclk);
if (ret)
- goto err;
+ return ret;
#endif
prop = of_get_property(np, "mode", NULL);
@@ -237,8 +235,4 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
pdata->flags = SPI_CPM_MODE | SPI_CPM1;
return 0;
-
-err:
- kfree(pinfo);
- return ret;
}
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 119f7af94537..f35488ed62a9 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -239,12 +239,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
if (!bits_per_word)
bits_per_word = spi->bits_per_word;
- /* Make sure its a bit width we support [4..16, 32] */
- if ((bits_per_word < 4)
- || ((bits_per_word > 16) && (bits_per_word != 32))
- || (bits_per_word > mpc8xxx_spi->max_bits_per_word))
- return -EINVAL;
-
if (!hz)
hz = spi->max_speed_hz;
@@ -362,18 +356,28 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
static void fsl_spi_do_one_msg(struct spi_message *m)
{
struct spi_device *spi = m->spi;
- struct spi_transfer *t;
+ struct spi_transfer *t, *first;
unsigned int cs_change;
const int nsecs = 50;
int status;
- cs_change = 1;
- status = 0;
+ /* Don't allow changes if CS is active */
+ first = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->bits_per_word || t->speed_hz) {
- /* Don't allow changes if CS is active */
+ if ((first->bits_per_word != t->bits_per_word) ||
+ (first->speed_hz != t->speed_hz)) {
status = -EINVAL;
+ dev_err(&spi->dev,
+ "bits_per_word/speed_hz should be same for the same SPI transfer\n");
+ return;
+ }
+ }
+ cs_change = 1;
+ status = -EINVAL;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
if (cs_change)
status = fsl_spi_setup_transfer(spi, t);
if (status < 0)
@@ -641,6 +645,10 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
if (mpc8xxx_spi->type == TYPE_GRLIB)
fsl_spi_grlib_probe(dev);
+ master->bits_per_word_mask =
+ (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32)) &
+ SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
+
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
mpc8xxx_spi->set_shifts = fsl_spi_qe_cpu_set_shifts;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 3fb09f981980..09823076df88 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -19,7 +19,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/of.h>
@@ -115,17 +114,17 @@ spi_to_pdata(const struct spi_device *spi)
static inline void setsck(const struct spi_device *spi, int is_on)
{
- gpio_set_value(SPI_SCK_GPIO, is_on);
+ gpio_set_value_cansleep(SPI_SCK_GPIO, is_on);
}
static inline void setmosi(const struct spi_device *spi, int is_on)
{
- gpio_set_value(SPI_MOSI_GPIO, is_on);
+ gpio_set_value_cansleep(SPI_MOSI_GPIO, is_on);
}
static inline int getmiso(const struct spi_device *spi)
{
- return !!gpio_get_value(SPI_MISO_GPIO);
+ return !!gpio_get_value_cansleep(SPI_MISO_GPIO);
}
#undef pdata
@@ -229,7 +228,7 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
if (cs != SPI_GPIO_NO_CHIPSELECT) {
/* SPI is normally active-low */
- gpio_set_value(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
+ gpio_set_value_cansleep(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
}
}
@@ -250,7 +249,7 @@ static int spi_gpio_setup(struct spi_device *spi)
/*
* ... otherwise, take it from spi->controller_data
*/
- cs = (unsigned int) spi->controller_data;
+ cs = (unsigned int)(uintptr_t) spi->controller_data;
}
if (!spi->controller_state) {
@@ -503,13 +502,12 @@ static int spi_gpio_remove(struct platform_device *pdev)
{
struct spi_gpio *spi_gpio;
struct spi_gpio_platform_data *pdata;
- int status;
spi_gpio = platform_get_drvdata(pdev);
pdata = dev_get_platdata(&pdev->dev);
/* stop() unregisters child devices too */
- status = spi_bitbang_stop(&spi_gpio->bitbang);
+ spi_bitbang_stop(&spi_gpio->bitbang);
if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
gpio_free(SPI_MISO_GPIO);
@@ -518,7 +516,7 @@ static int spi_gpio_remove(struct platform_device *pdev)
gpio_free(SPI_SCK_GPIO);
spi_master_put(spi_gpio->bitbang.master);
- return status;
+ return 0;
}
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index b80f2f70fef7..5daff2054ae4 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -206,7 +205,8 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
#define MX51_ECSPI_STAT_RR (1 << 3)
/* MX51 eCSPI */
-static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
+static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi,
+ unsigned int *fres)
{
/*
* there are two 4-bit dividers, the pre-divider divides by
@@ -234,6 +234,10 @@ static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
__func__, fin, fspi, post, pre);
+
+ /* Resulting frequency for the SCLK line. */
+ *fres = (fin / (pre + 1)) >> post;
+
return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
}
@@ -264,6 +268,7 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
+ u32 clk = config->speed_hz, delay;
/*
* The hardware seems to have a race condition when changing modes. The
@@ -275,7 +280,7 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/* set clock speed */
- ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz);
+ ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk);
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
@@ -297,6 +302,23 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
+ /*
+ * Wait until the changes in the configuration register CONFIGREG
+ * propagate into the hardware. It takes exactly one tick of the
+ * SCLK clock, but we will wait two SCLK clock just to be sure. The
+ * effect of the delay it takes for the hardware to apply changes
+ * is noticable if the SCLK clock run very slow. In such a case, if
+ * the polarity of SCLK should be inverted, the GPIO ChipSelect might
+ * be asserted before the SCLK polarity changes, which would disrupt
+ * the SPI communication as the device on the other end would consider
+ * the change of SCLK polarity as a clock tick already.
+ */
+ delay = (2 * 1000000) / clk;
+ if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
+ udelay(delay);
+ else /* SCLK is _very_ slow */
+ usleep_range(delay, delay + 10);
+
return 0;
}
@@ -718,7 +740,7 @@ static int spi_imx_transfer(struct spi_device *spi,
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
- init_completion(&spi_imx->xfer_done);
+ reinit_completion(&spi_imx->xfer_done);
spi_imx_push(spi_imx);
@@ -857,12 +879,12 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->irq = platform_get_irq(pdev, 0);
if (spi_imx->irq < 0) {
- ret = -EINVAL;
+ ret = spi_imx->irq;
goto out_master_put;
}
ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0,
- DRIVER_NAME, spi_imx);
+ dev_name(&pdev->dev), spi_imx);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
goto out_master_put;
@@ -925,8 +947,8 @@ static int spi_imx_remove(struct platform_device *pdev)
spi_bitbang_stop(&spi_imx->bitbang);
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_disable_unprepare(spi_imx->clk_ipg);
- clk_disable_unprepare(spi_imx->clk_per);
+ clk_unprepare(spi_imx->clk_ipg);
+ clk_unprepare(spi_imx->clk_per);
spi_master_put(master);
return 0;
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 87676587d783..3822eef2ef9d 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
@@ -40,6 +39,7 @@ struct mpc512x_psc_spi {
unsigned int irq;
u8 bits_per_word;
struct clk *clk_mclk;
+ struct clk *clk_ipg;
u32 mclk_rate;
struct completion txisrdone;
@@ -465,18 +465,14 @@ static void mpc512x_spi_cs_control(struct spi_device *spi, bool onoff)
gpio_set_value(spi->cs_gpio, onoff);
}
-/* bus_num is used only for the case dev->platform_data == NULL */
static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
- u32 size, unsigned int irq,
- s16 bus_num)
+ u32 size, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc512x_psc_spi *mps;
struct spi_master *master;
int ret;
void *tempp;
- int psc_num;
- char clk_name[16];
struct clk *clk;
master = spi_alloc_master(dev, sizeof *mps);
@@ -489,7 +485,6 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
if (pdata == NULL) {
mps->cs_control = mpc512x_spi_cs_control;
- master->bus_num = bus_num;
} else {
mps->cs_control = pdata->cs_control;
master->bus_num = pdata->bus_num;
@@ -504,7 +499,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
master->cleanup = mpc512x_psc_spi_cleanup;
master->dev.of_node = dev->of_node;
- tempp = ioremap(regaddr, size);
+ tempp = devm_ioremap(dev, regaddr, size);
if (!tempp) {
dev_err(dev, "could not ioremap I/O port range\n");
ret = -EFAULT;
@@ -513,43 +508,48 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
mps->psc = tempp;
mps->fifo =
(struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc));
-
- ret = request_irq(mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED,
- "mpc512x-psc-spi", mps);
+ ret = devm_request_irq(dev, mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED,
+ "mpc512x-psc-spi", mps);
if (ret)
goto free_master;
init_completion(&mps->txisrdone);
- psc_num = master->bus_num;
- snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
- clk = devm_clk_get(dev, clk_name);
+ clk = devm_clk_get(dev, "mclk");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- goto free_irq;
+ goto free_master;
}
ret = clk_prepare_enable(clk);
if (ret)
- goto free_irq;
+ goto free_master;
mps->clk_mclk = clk;
mps->mclk_rate = clk_get_rate(clk);
+ clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto free_mclk_clock;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto free_mclk_clock;
+ mps->clk_ipg = clk;
+
ret = mpc512x_psc_spi_port_config(master, mps);
if (ret < 0)
- goto free_clock;
+ goto free_ipg_clock;
ret = devm_spi_register_master(dev, master);
if (ret < 0)
- goto free_clock;
+ goto free_ipg_clock;
return ret;
-free_clock:
+free_ipg_clock:
+ clk_disable_unprepare(mps->clk_ipg);
+free_mclk_clock:
clk_disable_unprepare(mps->clk_mclk);
-free_irq:
- free_irq(mps->irq, mps);
free_master:
- if (mps->psc)
- iounmap(mps->psc);
spi_master_put(master);
return ret;
@@ -561,9 +561,7 @@ static int mpc512x_psc_spi_do_remove(struct device *dev)
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
clk_disable_unprepare(mps->clk_mclk);
- free_irq(mps->irq, mps);
- if (mps->psc)
- iounmap(mps->psc);
+ clk_disable_unprepare(mps->clk_ipg);
return 0;
}
@@ -572,7 +570,6 @@ static int mpc512x_psc_spi_of_probe(struct platform_device *op)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
- s16 id = -1;
regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
if (!regaddr_p) {
@@ -581,16 +578,8 @@ static int mpc512x_psc_spi_of_probe(struct platform_device *op)
}
regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
- /* get PSC id (0..11, used by port_config) */
- id = of_alias_get_id(op->dev.of_node, "spi");
- if (id < 0) {
- dev_err(&op->dev, "no alias id for %s\n",
- op->dev.of_node->full_name);
- return id;
- }
-
return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
- irq_of_parse_and_map(op->dev.of_node, 0), id);
+ irq_of_parse_and_map(op->dev.of_node, 0));
}
static int mpc512x_psc_spi_of_remove(struct platform_device *op)
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 00ba910ab302..3d18d9351185 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 7c675fe83101..aac2a5ddd964 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
@@ -357,20 +356,6 @@ static void mpc52xx_spi_wq(struct work_struct *work)
* spi_master ops
*/
-static int mpc52xx_spi_setup(struct spi_device *spi)
-{
- if (spi->bits_per_word % 8)
- return -EINVAL;
-
- if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST))
- return -EINVAL;
-
- if (spi->chip_select >= spi->master->num_chipselect)
- return -EINVAL;
-
- return 0;
-}
-
static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
{
struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
@@ -433,9 +418,9 @@ static int mpc52xx_spi_probe(struct platform_device *op)
goto err_alloc;
}
- master->setup = mpc52xx_spi_setup;
master->transfer = mpc52xx_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = op->dev.of_node;
platform_set_drvdata(op, master);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 3adebfa22e3d..2884f0c2f5f0 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -29,7 +29,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -111,14 +110,6 @@ static int mxs_spi_setup_transfer(struct spi_device *dev,
return 0;
}
-static int mxs_spi_setup(struct spi_device *dev)
-{
- if (!dev->bits_per_word)
- dev->bits_per_word = 8;
-
- return 0;
-}
-
static u32 mxs_spi_cs_to_reg(unsigned cs)
{
u32 select = 0;
@@ -379,7 +370,7 @@ static int mxs_spi_transfer_one(struct spi_master *master,
{
struct mxs_spi *spi = spi_master_get_devdata(master);
struct mxs_ssp *ssp = &spi->ssp;
- struct spi_transfer *t, *tmp_t;
+ struct spi_transfer *t;
unsigned int flag;
int status = 0;
@@ -389,7 +380,7 @@ static int mxs_spi_transfer_one(struct spi_master *master,
writel(mxs_spi_cs_to_reg(m->spi->chip_select),
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
- list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
status = mxs_spi_setup_transfer(m->spi, t);
if (status)
@@ -481,7 +472,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_err = platform_get_irq(pdev, 0);
if (irq_err < 0)
- return -EINVAL;
+ return irq_err;
base = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(base))
@@ -502,7 +493,6 @@ static int mxs_spi_probe(struct platform_device *pdev)
return -ENOMEM;
master->transfer_one_message = mxs_spi_transfer_one;
- master->setup = mxs_spi_setup;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->num_chipselect = 3;
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index e0c32bc69ee2..16e30de650b0 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -9,7 +9,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
@@ -38,7 +37,9 @@
/* usi register bit */
#define ENINT (0x01 << 17)
#define ENFLG (0x01 << 16)
+#define SLEEP (0x0f << 12)
#define TXNUM (0x03 << 8)
+#define TXBITLEN (0x1f << 3)
#define TXNEG (0x01 << 2)
#define RXNEG (0x01 << 1)
#define LSB (0x01 << 10)
@@ -57,13 +58,9 @@ struct nuc900_spi {
const unsigned char *tx;
unsigned char *rx;
struct clk *clk;
- struct resource *ioarea;
struct spi_master *master;
- struct spi_device *curdev;
- struct device *dev;
struct nuc900_spi_info *pdata;
spinlock_t lock;
- struct resource *res;
};
static inline struct nuc900_spi *to_hw(struct spi_device *sdev)
@@ -120,19 +117,16 @@ static void nuc900_spi_chipsel(struct spi_device *spi, int value)
}
}
-static void nuc900_spi_setup_txnum(struct nuc900_spi *hw,
- unsigned int txnum)
+static void nuc900_spi_setup_txnum(struct nuc900_spi *hw, unsigned int txnum)
{
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~TXNUM;
- if (!txnum)
- val &= ~TXNUM;
- else
+ if (txnum)
val |= txnum << 0x08;
__raw_writel(val, hw->regs + USI_CNT);
@@ -149,7 +143,7 @@ static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw,
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~TXBITLEN;
val |= (txbitlen << 0x03);
@@ -288,12 +282,11 @@ static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep)
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~SLEEP;
if (sleep)
val |= (sleep << 12);
- else
- val &= ~(0x0f << 12);
+
__raw_writel(val, hw->regs + USI_CNT);
spin_unlock_irqrestore(&hw->lock, flags);
@@ -339,19 +332,18 @@ static int nuc900_spi_probe(struct platform_device *pdev)
{
struct nuc900_spi *hw;
struct spi_master *master;
+ struct resource *res;
int err = 0;
master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
if (master == NULL) {
dev_err(&pdev->dev, "No memory for spi_master\n");
- err = -ENOMEM;
- goto err_nomem;
+ return -ENOMEM;
}
hw = spi_master_get_devdata(master);
hw->master = master;
hw->pdata = dev_get_platdata(&pdev->dev);
- hw->dev = &pdev->dev;
if (hw->pdata == NULL) {
dev_err(&pdev->dev, "No platform data supplied\n");
@@ -363,53 +355,40 @@ static int nuc900_spi_probe(struct platform_device *pdev)
init_completion(&hw->done);
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ if (hw->pdata->lsb)
+ master->mode_bits |= SPI_LSB_FIRST;
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = hw->pdata->bus_num;
hw->bitbang.master = hw->master;
hw->bitbang.chipselect = nuc900_spi_chipsel;
hw->bitbang.txrx_bufs = nuc900_spi_txrx;
- hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (hw->res == NULL) {
- dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- err = -ENOENT;
- goto err_pdata;
- }
-
- hw->ioarea = request_mem_region(hw->res->start,
- resource_size(hw->res), pdev->name);
-
- if (hw->ioarea == NULL) {
- dev_err(&pdev->dev, "Cannot reserve region\n");
- err = -ENXIO;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->regs)) {
+ err = PTR_ERR(hw->regs);
goto err_pdata;
}
- hw->regs = ioremap(hw->res->start, resource_size(hw->res));
- if (hw->regs == NULL) {
- dev_err(&pdev->dev, "Cannot map IO\n");
- err = -ENXIO;
- goto err_iomap;
- }
-
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq < 0) {
dev_err(&pdev->dev, "No IRQ specified\n");
err = -ENOENT;
- goto err_irq;
+ goto err_pdata;
}
- err = request_irq(hw->irq, nuc900_spi_irq, 0, pdev->name, hw);
+ err = devm_request_irq(&pdev->dev, hw->irq, nuc900_spi_irq, 0,
+ pdev->name, hw);
if (err) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto err_irq;
+ goto err_pdata;
}
- hw->clk = clk_get(&pdev->dev, "spi");
+ hw->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(hw->clk)) {
dev_err(&pdev->dev, "No clock for device\n");
err = PTR_ERR(hw->clk);
- goto err_clk;
+ goto err_pdata;
}
mfp_set_groupg(&pdev->dev, NULL);
@@ -425,17 +404,8 @@ static int nuc900_spi_probe(struct platform_device *pdev)
err_register:
clk_disable(hw->clk);
- clk_put(hw->clk);
-err_clk:
- free_irq(hw->irq, hw);
-err_irq:
- iounmap(hw->regs);
-err_iomap:
- release_mem_region(hw->res->start, resource_size(hw->res));
- kfree(hw->ioarea);
err_pdata:
spi_master_put(hw->master);
-err_nomem:
return err;
}
@@ -443,18 +413,8 @@ static int nuc900_spi_remove(struct platform_device *dev)
{
struct nuc900_spi *hw = platform_get_drvdata(dev);
- free_irq(hw->irq, hw);
-
spi_bitbang_stop(&hw->bitbang);
-
clk_disable(hw->clk);
- clk_put(hw->clk);
-
- iounmap(hw->regs);
-
- release_mem_region(hw->res->start, resource_size(hw->res));
- kfree(hw->ioarea);
-
spi_master_put(hw->master);
return 0;
}
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 91c668596202..8998d11c7238 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -15,7 +15,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -153,62 +152,22 @@ static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
}
wait_for_completion(&hw->done);
- } else if (txp && rxp) {
- /* we need to tighten the transfer loop */
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
- u8 rx, tx = *txp++;
- tiny_spi_wait_txr(hw);
- rx = readb(hw->base + TINY_SPI_TXDATA);
- writeb(tx, hw->base + TINY_SPI_TXDATA);
- *rxp++ = rx;
- }
- tiny_spi_wait_txr(hw);
- *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
- }
- tiny_spi_wait_txe(hw);
- *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
- } else if (rxp) {
- writeb(0, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(0,
- hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
- u8 rx;
- tiny_spi_wait_txr(hw);
- rx = readb(hw->base + TINY_SPI_TXDATA);
- writeb(0, hw->base + TINY_SPI_TXDATA);
- *rxp++ = rx;
- }
- tiny_spi_wait_txr(hw);
- *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
- }
- tiny_spi_wait_txe(hw);
- *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
- } else if (txp) {
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(*txp++, hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
- u8 tx = *txp++;
- tiny_spi_wait_txr(hw);
- writeb(tx, hw->base + TINY_SPI_TXDATA);
- }
- }
- tiny_spi_wait_txe(hw);
} else {
- writeb(0, hw->base + TINY_SPI_TXDATA);
- if (t->len > 1) {
- writeb(0, hw->base + TINY_SPI_TXDATA);
- for (i = 2; i < t->len; i++) {
+ /* we need to tighten the transfer loop */
+ writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
+ for (i = 1; i < t->len; i++) {
+ writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
+
+ if (rxp || (i != t->len - 1))
tiny_spi_wait_txr(hw);
- writeb(0, hw->base + TINY_SPI_TXDATA);
- }
+ if (rxp)
+ *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
}
tiny_spi_wait_txe(hw);
+ if (rxp)
+ *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
}
+
return t->len;
}
@@ -307,8 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
/* setup the state for the bitbang driver */
hw->bitbang.master = master;
- if (!hw->bitbang.master)
- return err;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
hw->bitbang.chipselect = tiny_spi_chipselect;
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index 67249a48b391..c5e2f718eebd 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -11,7 +11,6 @@
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -33,13 +32,6 @@ struct octeon_spi {
u64 cs_enax;
};
-struct octeon_spi_setup {
- u32 max_speed_hz;
- u8 chip_select;
- u8 mode;
- u8 bits_per_word;
-};
-
static void octeon_spi_wait_ready(struct octeon_spi *p)
{
union cvmx_mpi_sts mpi_sts;
@@ -57,6 +49,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
struct spi_transfer *xfer,
bool last_xfer)
{
+ struct spi_device *spi = msg->spi;
union cvmx_mpi_cfg mpi_cfg;
union cvmx_mpi_tx mpi_tx;
unsigned int clkdiv;
@@ -68,18 +61,11 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
int len;
int i;
- struct octeon_spi_setup *msg_setup = spi_get_ctldata(msg->spi);
-
- speed_hz = msg_setup->max_speed_hz;
- mode = msg_setup->mode;
+ mode = spi->mode;
cpha = mode & SPI_CPHA;
cpol = mode & SPI_CPOL;
- if (xfer->speed_hz)
- speed_hz = xfer->speed_hz;
-
- if (speed_hz > OCTEON_SPI_MAX_CLOCK_HZ)
- speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+ speed_hz = xfer->speed_hz ? : spi->max_speed_hz;
clkdiv = octeon_get_io_clock_rate() / (2 * speed_hz);
@@ -93,8 +79,8 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
mpi_cfg.s.cslate = cpha ? 1 : 0;
mpi_cfg.s.enable = 1;
- if (msg_setup->chip_select < 4)
- p->cs_enax |= 1ull << (12 + msg_setup->chip_select);
+ if (spi->chip_select < 4)
+ p->cs_enax |= 1ull << (12 + spi->chip_select);
mpi_cfg.u64 |= p->cs_enax;
if (mpi_cfg.u64 != p->last_cfg) {
@@ -114,7 +100,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
cvmx_write_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i), d);
}
mpi_tx.u64 = 0;
- mpi_tx.s.csid = msg_setup->chip_select;
+ mpi_tx.s.csid = spi->chip_select;
mpi_tx.s.leavecs = 1;
mpi_tx.s.txnum = tx_buf ? OCTEON_SPI_MAX_BYTES : 0;
mpi_tx.s.totnum = OCTEON_SPI_MAX_BYTES;
@@ -139,7 +125,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
}
mpi_tx.u64 = 0;
- mpi_tx.s.csid = msg_setup->chip_select;
+ mpi_tx.s.csid = spi->chip_select;
if (last_xfer)
mpi_tx.s.leavecs = xfer->cs_change;
else
@@ -169,17 +155,9 @@ static int octeon_spi_transfer_one_message(struct spi_master *master,
int status = 0;
struct spi_transfer *xfer;
- /*
- * We better have set the configuration via a call to .setup
- * before we get here.
- */
- if (spi_get_ctldata(msg->spi) == NULL) {
- status = -EINVAL;
- goto err;
- }
-
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- bool last_xfer = &xfer->transfer_list == msg->transfers.prev;
+ bool last_xfer = list_is_last(&xfer->transfer_list,
+ &msg->transfers);
int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
if (r < 0) {
status = r;
@@ -194,41 +172,6 @@ err:
return status;
}
-static struct octeon_spi_setup *octeon_spi_new_setup(struct spi_device *spi)
-{
- struct octeon_spi_setup *setup = kzalloc(sizeof(*setup), GFP_KERNEL);
- if (!setup)
- return NULL;
-
- setup->max_speed_hz = spi->max_speed_hz;
- setup->chip_select = spi->chip_select;
- setup->mode = spi->mode;
- setup->bits_per_word = spi->bits_per_word;
- return setup;
-}
-
-static int octeon_spi_setup(struct spi_device *spi)
-{
- struct octeon_spi_setup *new_setup;
- struct octeon_spi_setup *old_setup = spi_get_ctldata(spi);
-
- new_setup = octeon_spi_new_setup(spi);
- if (!new_setup)
- return -ENOMEM;
-
- spi_set_ctldata(spi, new_setup);
- kfree(old_setup);
-
- return 0;
-}
-
-static void octeon_spi_cleanup(struct spi_device *spi)
-{
- struct octeon_spi_setup *old_setup = spi_get_ctldata(spi);
- spi_set_ctldata(spi, NULL);
- kfree(old_setup);
-}
-
static int octeon_spi_probe(struct platform_device *pdev)
{
struct resource *res_mem;
@@ -257,8 +200,6 @@ static int octeon_spi_probe(struct platform_device *pdev)
p->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
resource_size(res_mem));
- /* Dynamic bus numbering */
- master->bus_num = -1;
master->num_chipselect = 4;
master->mode_bits = SPI_CPHA |
SPI_CPOL |
@@ -266,10 +207,9 @@ static int octeon_spi_probe(struct platform_device *pdev)
SPI_LSB_FIRST |
SPI_3WIRE;
- master->setup = octeon_spi_setup;
- master->cleanup = octeon_spi_cleanup;
master->transfer_one_message = octeon_spi_transfer_one_message;
master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
master->dev.of_node = pdev->dev.of_node;
err = devm_spi_register_master(&pdev->dev, master);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index b6ed82beb01d..e7ffcded4e14 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -83,15 +83,11 @@
#define SPI_SHUTDOWN 1
struct omap1_spi100k {
- struct spi_master *master;
struct clk *ick;
struct clk *fck;
/* Virtual base address of the controller */
void __iomem *base;
-
- /* State of the SPI */
- unsigned int state;
};
struct omap1_spi100k_cs {
@@ -99,13 +95,6 @@ struct omap1_spi100k_cs {
int word_len;
};
-#define MOD_REG_BIT(val, mask, set) do { \
- if (set) \
- val |= mask; \
- else \
- val &= ~mask; \
-} while (0)
-
static void spi100k_enable_clock(struct spi_master *master)
{
unsigned int val;
@@ -139,7 +128,7 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
}
spi100k_enable_clock(master);
- writew( data , spi100k->base + SPI_TX_MSB);
+ writew(data , spi100k->base + SPI_TX_MSB);
writew(SPI_CTRL_SEN(0) |
SPI_CTRL_WORD_SIZE(len) |
@@ -147,7 +136,8 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
spi100k->base + SPI_CTRL);
/* Wait for bit ack send change */
- while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE);
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE)
+ ;
udelay(1000);
spi100k_disable_clock(master);
@@ -155,7 +145,7 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
static int spi100k_read_data(struct spi_master *master, int len)
{
- int dataH,dataL;
+ int dataH, dataL;
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
/* Always do at least 16 bits */
@@ -168,7 +158,8 @@ static int spi100k_read_data(struct spi_master *master, int len)
SPI_CTRL_RD,
spi100k->base + SPI_CTRL);
- while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD);
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD)
+ ;
udelay(1000);
dataL = readw(spi100k->base + SPI_RX_LSB);
@@ -204,12 +195,10 @@ static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable)
static unsigned
omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct omap1_spi100k *spi100k;
struct omap1_spi100k_cs *cs = spi->controller_state;
unsigned int count, c;
int word_len;
- spi100k = spi_master_get_devdata(spi->master);
count = xfer->len;
c = count;
word_len = cs->word_len;
@@ -221,12 +210,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=1;
+ c -= 1;
if (xfer->tx_buf != NULL)
spi100k_write_data(spi->master, word_len, *tx++);
if (xfer->rx_buf != NULL)
*rx++ = spi100k_read_data(spi->master, word_len);
- } while(c);
+ } while (c);
} else if (word_len <= 16) {
u16 *rx;
const u16 *tx;
@@ -234,12 +223,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=2;
+ c -= 2;
if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master,word_len, *tx++);
+ spi100k_write_data(spi->master, word_len, *tx++);
if (xfer->rx_buf != NULL)
- *rx++ = spi100k_read_data(spi->master,word_len);
- } while(c);
+ *rx++ = spi100k_read_data(spi->master, word_len);
+ } while (c);
} else if (word_len <= 32) {
u32 *rx;
const u32 *tx;
@@ -247,12 +236,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=4;
+ c -= 4;
if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master,word_len, *tx);
+ spi100k_write_data(spi->master, word_len, *tx);
if (xfer->rx_buf != NULL)
- *rx = spi100k_read_data(spi->master,word_len);
- } while(c);
+ *rx = spi100k_read_data(spi->master, word_len);
+ } while (c);
}
return count - c;
}
@@ -294,7 +283,7 @@ static int omap1_spi100k_setup(struct spi_device *spi)
spi100k = spi_master_get_devdata(spi->master);
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = spi100k->base + spi->chip_select * 0x14;
@@ -411,14 +400,14 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
if (!pdev->id)
return -EINVAL;
- master = spi_alloc_master(&pdev->dev, sizeof *spi100k);
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi100k));
if (master == NULL) {
dev_dbg(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
}
if (pdev->id != -1)
- master->bus_num = pdev->id;
+ master->bus_num = pdev->id;
master->setup = omap1_spi100k_setup;
master->transfer_one_message = omap1_spi100k_transfer_one_message;
@@ -434,7 +423,6 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
spi100k = spi_master_get_devdata(master);
- spi100k->master = master;
/*
* The memory region base address is taken as the platform_data.
@@ -461,8 +449,6 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
if (status < 0)
goto err;
- spi100k->state = SPI_RUNNING;
-
return status;
err:
@@ -470,31 +456,12 @@ err:
return status;
}
-static int omap1_spi100k_remove(struct platform_device *pdev)
-{
- struct spi_master *master;
- struct omap1_spi100k *spi100k;
- struct resource *r;
- int status = 0;
-
- master = platform_get_drvdata(pdev);
- spi100k = spi_master_get_devdata(master);
-
- if (status != 0)
- return status;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- return 0;
-}
-
static struct platform_driver omap1_spi100k_driver = {
.driver = {
.name = "omap1_spi100k",
.owner = THIS_MODULE,
},
.probe = omap1_spi100k_probe,
- .remove = omap1_spi100k_remove,
};
module_platform_driver(omap1_spi100k_driver);
@@ -502,4 +469,3 @@ module_platform_driver(omap1_spi100k_driver);
MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver");
MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 9313fd3b413d..be2a2e108e2f 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -99,7 +99,6 @@ struct uwire_spi {
};
struct uwire_state {
- unsigned bits_per_word;
unsigned div1_idx;
};
@@ -210,9 +209,8 @@ static void uwire_chipselect(struct spi_device *spi, int value)
static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
{
- struct uwire_state *ust = spi->controller_state;
unsigned len = t->len;
- unsigned bits = ust->bits_per_word;
+ unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned bytes;
u16 val, w;
int status = 0;
@@ -220,10 +218,6 @@ static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
if (!t->tx_buf && !t->rx_buf)
return 0;
- /* Microwire doesn't read and write concurrently */
- if (t->tx_buf && t->rx_buf)
- return -EPERM;
-
w = spi->chip_select << 10;
w |= CS_CMD;
@@ -322,7 +316,6 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
struct uwire_state *ust = spi->controller_state;
struct uwire_spi *uwire;
unsigned flags = 0;
- unsigned bits;
unsigned hz;
unsigned long rate;
int div1_idx;
@@ -332,23 +325,6 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
uwire = spi_master_get_devdata(spi->master);
- if (spi->chip_select > 3) {
- pr_debug("%s: cs%d?\n", dev_name(&spi->dev), spi->chip_select);
- status = -ENODEV;
- goto done;
- }
-
- bits = spi->bits_per_word;
- if (t != NULL && t->bits_per_word)
- bits = t->bits_per_word;
-
- if (bits > 16) {
- pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits);
- status = -ENODEV;
- goto done;
- }
- ust->bits_per_word = bits;
-
/* mode 0..3, clock inverted separately;
* standard nCS signaling;
* don't treat DI=high as "not ready"
@@ -502,6 +478,7 @@ static int uwire_probe(struct platform_device *pdev)
status = PTR_ERR(uwire->ck);
dev_dbg(&pdev->dev, "no functional clock?\n");
spi_master_put(master);
+ iounmap(uwire_base);
return status;
}
clk_enable(uwire->ck);
@@ -515,7 +492,7 @@ static int uwire_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
master->flags = SPI_MASTER_HALF_DUPLEX;
master->bus_num = 2; /* "official" */
@@ -539,14 +516,13 @@ static int uwire_probe(struct platform_device *pdev)
static int uwire_remove(struct platform_device *pdev)
{
struct uwire_spi *uwire = platform_get_drvdata(pdev);
- int status;
// FIXME remove all child devices, somewhere ...
- status = spi_bitbang_stop(&uwire->bitbang);
+ spi_bitbang_stop(&uwire->bitbang);
uwire_off(uwire);
iounmap(uwire_base);
- return status;
+ return 0;
}
/* work with hotplug and coldplug */
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 443df39840bc..2941c5b96ebc 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -22,7 +22,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -45,6 +44,7 @@
#include <linux/platform_data/spi-omap2-mcspi.h>
#define OMAP2_MCSPI_MAX_FREQ 48000000
+#define OMAP2_MCSPI_MAX_DIVIDER 4096
#define OMAP2_MCSPI_MAX_FIFODEPTH 64
#define OMAP2_MCSPI_MAX_FIFOWCNT 0xFFFF
#define SPI_AUTOSUSPEND_TIMEOUT 2000
@@ -89,6 +89,7 @@
#define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
#define OMAP2_MCSPI_CHCONF_FFET BIT(27)
#define OMAP2_MCSPI_CHCONF_FFER BIT(28)
+#define OMAP2_MCSPI_CHCONF_CLKG BIT(29)
#define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
#define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
@@ -96,6 +97,7 @@
#define OMAP2_MCSPI_CHSTAT_TXFFE BIT(3)
#define OMAP2_MCSPI_CHCTRL_EN BIT(0)
+#define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK (0xff << 8)
#define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
@@ -149,7 +151,7 @@ struct omap2_mcspi_cs {
int word_len;
struct list_head node;
/* Context save and restore shadow register */
- u32 chconf0;
+ u32 chconf0, chctrl0;
};
static inline void mcspi_write_reg(struct spi_master *master,
@@ -157,14 +159,14 @@ static inline void mcspi_write_reg(struct spi_master *master,
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
- __raw_writel(val, mcspi->base + idx);
+ writel_relaxed(val, mcspi->base + idx);
}
static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
- return __raw_readl(mcspi->base + idx);
+ return readl_relaxed(mcspi->base + idx);
}
static inline void mcspi_write_cs_reg(const struct spi_device *spi,
@@ -172,14 +174,14 @@ static inline void mcspi_write_cs_reg(const struct spi_device *spi,
{
struct omap2_mcspi_cs *cs = spi->controller_state;
- __raw_writel(val, cs->base + idx);
+ writel_relaxed(val, cs->base + idx);
}
static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
{
struct omap2_mcspi_cs *cs = spi->controller_state;
- return __raw_readl(cs->base + idx);
+ return readl_relaxed(cs->base + idx);
}
static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
@@ -230,10 +232,16 @@ static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
u32 l;
- l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
+ l = cs->chctrl0;
+ if (enable)
+ l |= OMAP2_MCSPI_CHCTRL_EN;
+ else
+ l &= ~OMAP2_MCSPI_CHCTRL_EN;
+ cs->chctrl0 = l;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
/* Flash post-writes */
mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
}
@@ -338,7 +346,7 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
list_for_each_entry(cs, &ctx->cs, node)
- __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
@@ -346,9 +354,9 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(1000);
- while (!(__raw_readl(reg) & bit)) {
+ while (!(readl_relaxed(reg) & bit)) {
if (time_after(jiffies, timeout)) {
- if (!(__raw_readl(reg) & bit))
+ if (!(readl_relaxed(reg) & bit))
return -ETIMEDOUT;
else
return 0;
@@ -675,7 +683,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
dev_vdbg(&spi->dev, "write-%d %02x\n",
word_len, *tx);
- __raw_writel(*tx++, tx_reg);
+ writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -687,7 +695,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
if (c == 1 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -701,7 +709,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
omap2_mcspi_set_enable(spi, 0);
}
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %02x\n",
word_len, *(rx - 1));
}
@@ -722,7 +730,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
dev_vdbg(&spi->dev, "write-%d %04x\n",
word_len, *tx);
- __raw_writel(*tx++, tx_reg);
+ writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -734,7 +742,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
if (c == 2 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -748,7 +756,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
omap2_mcspi_set_enable(spi, 0);
}
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %04x\n",
word_len, *(rx - 1));
}
@@ -769,7 +777,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
}
dev_vdbg(&spi->dev, "write-%d %08x\n",
word_len, *tx);
- __raw_writel(*tx++, tx_reg);
+ writel_relaxed(*tx++, tx_reg);
}
if (rx != NULL) {
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -781,7 +789,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
if (c == 4 && tx == NULL &&
(l & OMAP2_MCSPI_CHCONF_TURBO)) {
omap2_mcspi_set_enable(spi, 0);
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
if (mcspi_wait_for_reg_bit(chstat_reg,
@@ -795,7 +803,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
omap2_mcspi_set_enable(spi, 0);
}
- *rx++ = __raw_readl(rx_reg);
+ *rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
}
@@ -840,7 +848,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
struct spi_master *spi_cntrl;
- u32 l = 0, div = 0;
+ u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
u8 word_len = spi->bits_per_word;
u32 speed_hz = spi->max_speed_hz;
@@ -856,7 +864,17 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
speed_hz = t->speed_hz;
speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
- div = omap2_mcspi_calc_divisor(speed_hz);
+ if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+ clkd = omap2_mcspi_calc_divisor(speed_hz);
+ speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
+ clkg = 0;
+ } else {
+ div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+ speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
+ clkd = (div - 1) & 0xf;
+ extclk = (div - 1) >> 4;
+ clkg = OMAP2_MCSPI_CHCONF_CLKG;
+ }
l = mcspi_cached_chconf0(spi);
@@ -885,7 +903,16 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
/* set clock divisor */
l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
- l |= div << 2;
+ l |= clkd << 2;
+
+ /* set clock granularity */
+ l &= ~OMAP2_MCSPI_CHCONF_CLKG;
+ l |= clkg;
+ if (clkg) {
+ cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
+ cs->chctrl0 |= extclk << 8;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
+ }
/* set SPI mode 0..3 */
if (spi->mode & SPI_CPOL)
@@ -900,7 +927,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
mcspi_write_chconf0(spi, l);
dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
- OMAP2_MCSPI_MAX_FREQ >> div,
+ speed_hz,
(spi->mode & SPI_CPHA) ? "trailing" : "leading",
(spi->mode & SPI_CPOL) ? "inverted" : "normal");
@@ -972,6 +999,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
cs->base = mcspi->base + spi->chip_select * 0x14;
cs->phys = mcspi->phys + spi->chip_select * 0x14;
cs->chconf0 = 0;
+ cs->chctrl0 = 0;
spi->controller_state = cs;
/* Link this to context save list */
list_add_tail(&cs->node, &ctx->cs);
@@ -1057,12 +1085,15 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
status = -EINVAL;
break;
}
- if (par_override || t->speed_hz || t->bits_per_word) {
+ if (par_override ||
+ (t->speed_hz != spi->max_speed_hz) ||
+ (t->bits_per_word != spi->bits_per_word)) {
par_override = 1;
status = omap2_mcspi_setup_transfer(spi, t);
if (status < 0)
break;
- if (!t->speed_hz && !t->bits_per_word)
+ if (t->speed_hz == spi->max_speed_hz &&
+ t->bits_per_word == spi->bits_per_word)
par_override = 0;
}
if (cd && cd->cs_per_word) {
@@ -1107,7 +1138,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
/* RX_ONLY mode needs dummy data in TX reg */
if (t->tx_buf == NULL)
- __raw_writel(0, cs->base
+ writel_relaxed(0, cs->base
+ OMAP2_MCSPI_TX0);
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
@@ -1176,16 +1207,12 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
m->actual_length = 0;
m->status = 0;
- /* reject invalid messages and transfers */
- if (list_empty(&m->transfers))
- return -EINVAL;
list_for_each_entry(t, &m->transfers, transfer_list) {
const void *tx_buf = t->tx_buf;
void *rx_buf = t->rx_buf;
unsigned len = t->len;
- if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
- || (len && !(rx_buf || tx_buf))) {
+ if ((len && !(rx_buf || tx_buf))) {
dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
t->speed_hz,
len,
@@ -1194,12 +1221,6 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
t->bits_per_word);
return -EINVAL;
}
- if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
- dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
- t->speed_hz,
- OMAP2_MCSPI_MAX_FREQ >> 15);
- return -EINVAL;
- }
if (m->is_dma_mapped || len < DMA_MIN_BYTES)
continue;
@@ -1311,6 +1332,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->transfer_one_message = omap2_mcspi_transfer_one_message;
master->cleanup = omap2_mcspi_cleanup;
master->dev.of_node = node;
+ master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+ master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
platform_set_drvdata(pdev, master);
@@ -1470,9 +1493,9 @@ static int omap2_mcspi_resume(struct device *dev)
* change in account.
*/
cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
- __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
- __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
}
pm_runtime_mark_last_busy(mcspi->dev);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 744841e095e4..d018a4aac3a1 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -9,7 +9,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -43,8 +42,6 @@
struct orion_spi {
struct spi_master *master;
void __iomem *base;
- unsigned int max_speed;
- unsigned int min_speed;
struct clk *clk;
};
@@ -75,23 +72,6 @@ orion_spi_clrbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
writel(val, reg_addr);
}
-static int orion_spi_set_transfer_size(struct orion_spi *orion_spi, int size)
-{
- if (size == 16) {
- orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
- ORION_SPI_IF_8_16_BIT_MODE);
- } else if (size == 8) {
- orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
- ORION_SPI_IF_8_16_BIT_MODE);
- } else {
- pr_debug("Bad bits per word value %d (only 8 or 16 are allowed).\n",
- size);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
{
u32 tclk_hz;
@@ -170,7 +150,14 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if (rc)
return rc;
- return orion_spi_set_transfer_size(orion_spi, bits_per_word);
+ if (bits_per_word == 16)
+ orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+ else
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+
+ return 0;
}
static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable)
@@ -260,11 +247,9 @@ orion_spi_write_read_16bit(struct spi_device *spi,
static unsigned int
orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct orion_spi *orion_spi;
unsigned int count;
int word_len;
- orion_spi = spi_master_get_devdata(spi->master);
word_len = spi->bits_per_word;
count = xfer->len;
@@ -310,27 +295,6 @@ static int orion_spi_transfer_one_message(struct spi_master *master,
goto msg_done;
list_for_each_entry(t, &m->transfers, transfer_list) {
- /* make sure buffer length is even when working in 16
- * bit mode*/
- if ((t->bits_per_word == 16) && (t->len & 1)) {
- dev_err(&spi->dev,
- "message rejected : "
- "odd data length %d while in 16 bit mode\n",
- t->len);
- status = -EIO;
- goto msg_done;
- }
-
- if (t->speed_hz && t->speed_hz < orion_spi->min_speed) {
- dev_err(&spi->dev,
- "message rejected : "
- "device min speed (%d Hz) exceeds "
- "required transfer speed (%d Hz)\n",
- orion_spi->min_speed, t->speed_hz);
- status = -EIO;
- goto msg_done;
- }
-
if (par_override || t->speed_hz || t->bits_per_word) {
par_override = 1;
status = orion_spi_setup_transfer(spi, t);
@@ -375,28 +339,6 @@ static int orion_spi_reset(struct orion_spi *orion_spi)
return 0;
}
-static int orion_spi_setup(struct spi_device *spi)
-{
- struct orion_spi *orion_spi;
-
- orion_spi = spi_master_get_devdata(spi->master);
-
- if ((spi->max_speed_hz == 0)
- || (spi->max_speed_hz > orion_spi->max_speed))
- spi->max_speed_hz = orion_spi->max_speed;
-
- if (spi->max_speed_hz < orion_spi->min_speed) {
- dev_err(&spi->dev, "setup: requested speed too low %d Hz\n",
- spi->max_speed_hz);
- return -EINVAL;
- }
-
- /*
- * baudrate & width will be set orion_spi_setup_transfer
- */
- return 0;
-}
-
static int orion_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -425,16 +367,16 @@ static int orion_spi_probe(struct platform_device *pdev)
/* we support only mode 0, and no options */
master->mode_bits = SPI_CPHA | SPI_CPOL;
- master->setup = orion_spi_setup;
master->transfer_one_message = orion_spi_transfer_one_message;
master->num_chipselect = ORION_NUM_CHIPSELECTS;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
platform_set_drvdata(pdev, master);
spi = spi_master_get_devdata(master);
spi->master = master;
- spi->clk = clk_get(&pdev->dev, NULL);
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk)) {
status = PTR_ERR(spi->clk);
goto out;
@@ -443,8 +385,8 @@ static int orion_spi_probe(struct platform_device *pdev)
clk_prepare(spi->clk);
clk_enable(spi->clk);
tclk_hz = clk_get_rate(spi->clk);
- spi->max_speed = DIV_ROUND_UP(tclk_hz, 4);
- spi->min_speed = DIV_ROUND_UP(tclk_hz, 30);
+ master->max_speed_hz = DIV_ROUND_UP(tclk_hz, 4);
+ master->min_speed_hz = DIV_ROUND_UP(tclk_hz, 30);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi->base = devm_ioremap_resource(&pdev->dev, r);
@@ -465,7 +407,6 @@ static int orion_spi_probe(struct platform_device *pdev)
out_rel_clk:
clk_disable_unprepare(spi->clk);
- clk_put(spi->clk);
out:
spi_master_put(master);
return status;
@@ -481,7 +422,6 @@ static int orion_spi_remove(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
clk_disable_unprepare(spi->clk);
- clk_put(spi->clk);
return 0;
}
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 2789b452e711..51d99779682f 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -459,9 +459,8 @@ static void giveback(struct pl022 *pl022)
struct spi_transfer *last_transfer;
pl022->next_msg_cs_active = false;
- last_transfer = list_entry(pl022->cur_msg->transfers.prev,
- struct spi_transfer,
- transfer_list);
+ last_transfer = list_last_entry(&pl022->cur_msg->transfers,
+ struct spi_transfer, transfer_list);
/* Delay if requested before any change in chip select */
if (last_transfer->delay_usecs)
@@ -2109,8 +2108,6 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
pl022->chipselects = devm_kzalloc(dev, num_cs * sizeof(int),
GFP_KERNEL);
- pinctrl_pm_select_default_state(dev);
-
/*
* Bus Number Which has been Assigned to this SSP controller
* on this board
@@ -2183,13 +2180,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_no_clk;
}
- status = clk_prepare(pl022->clk);
- if (status) {
- dev_err(&adev->dev, "could not prepare SSP/SPI bus clock\n");
- goto err_clk_prep;
- }
-
- status = clk_enable(pl022->clk);
+ status = clk_prepare_enable(pl022->clk);
if (status) {
dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
goto err_no_clk_en;
@@ -2250,10 +2241,8 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
if (platform_info->enable_dma)
pl022_dma_remove(pl022);
err_no_irq:
- clk_disable(pl022->clk);
+ clk_disable_unprepare(pl022->clk);
err_no_clk_en:
- clk_unprepare(pl022->clk);
- err_clk_prep:
err_no_clk:
err_no_ioremap:
amba_release_regions(adev);
@@ -2281,42 +2270,13 @@ pl022_remove(struct amba_device *adev)
if (pl022->master_info->enable_dma)
pl022_dma_remove(pl022);
- clk_disable(pl022->clk);
- clk_unprepare(pl022->clk);
+ clk_disable_unprepare(pl022->clk);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
return 0;
}
-#if defined(CONFIG_SUSPEND) || defined(CONFIG_PM_RUNTIME)
-/*
- * These two functions are used from both suspend/resume and
- * the runtime counterparts to handle external resources like
- * clocks, pins and regulators when going to sleep.
- */
-static void pl022_suspend_resources(struct pl022 *pl022, bool runtime)
-{
- clk_disable(pl022->clk);
-
- if (runtime)
- pinctrl_pm_select_idle_state(&pl022->adev->dev);
- else
- pinctrl_pm_select_sleep_state(&pl022->adev->dev);
-}
-
-static void pl022_resume_resources(struct pl022 *pl022, bool runtime)
-{
- /* First go to the default state */
- pinctrl_pm_select_default_state(&pl022->adev->dev);
- if (!runtime)
- /* Then let's idle the pins until the next transfer happens */
- pinctrl_pm_select_idle_state(&pl022->adev->dev);
-
- clk_enable(pl022->clk);
-}
-#endif
-
-#ifdef CONFIG_SUSPEND
+#ifdef CONFIG_PM_SLEEP
static int pl022_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
@@ -2328,8 +2288,13 @@ static int pl022_suspend(struct device *dev)
return ret;
}
- pm_runtime_get_sync(dev);
- pl022_suspend_resources(pl022, false);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret) {
+ spi_master_resume(pl022->master);
+ return ret;
+ }
+
+ pinctrl_pm_select_sleep_state(dev);
dev_dbg(dev, "suspended\n");
return 0;
@@ -2340,8 +2305,9 @@ static int pl022_resume(struct device *dev)
struct pl022 *pl022 = dev_get_drvdata(dev);
int ret;
- pl022_resume_resources(pl022, false);
- pm_runtime_put(dev);
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ dev_err(dev, "problem resuming\n");
/* Start the queue running */
ret = spi_master_resume(pl022->master);
@@ -2352,14 +2318,16 @@ static int pl022_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM */
+#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int pl022_runtime_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_suspend_resources(pl022, true);
+ clk_disable_unprepare(pl022->clk);
+ pinctrl_pm_select_idle_state(dev);
+
return 0;
}
@@ -2367,14 +2335,16 @@ static int pl022_runtime_resume(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_resume_resources(pl022, true);
+ pinctrl_pm_select_default_state(dev);
+ clk_prepare_enable(pl022->clk);
+
return 0;
}
#endif
static const struct dev_pm_ops pl022_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
- SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
+ SET_PM_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
};
static struct vendor_data vendor_arm = {
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 5ee56726f8d0..80b8408ac3e3 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -24,7 +24,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 3c0b55125f1e..713af4806f26 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -9,7 +9,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 74bc18775658..3f006d3ed2a8 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -62,7 +62,7 @@ static void ce4100_spi_remove(struct pci_dev *dev)
platform_device_unregister(pdev);
}
-static DEFINE_PCI_DEVICE_TABLE(ce4100_spi_devices) = {
+static const struct pci_device_id ce4100_spi_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
{ },
};
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
index 2916efc7cfe5..e8a26f25d5c0 100644
--- a/drivers/spi/spi-pxa2xx-pxadma.c
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -18,7 +18,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 7765b1999537..41185d0557fa 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -362,8 +362,7 @@ static void giveback(struct driver_data *drv_data)
drv_data->cur_msg = NULL;
drv_data->cur_transfer = NULL;
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer,
+ last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
transfer_list);
/* Delay if requested before any change in chip select */
@@ -1066,6 +1065,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
pdata->num_chipselect = 1;
pdata->enable_dma = true;
+ pdata->tx_chan_id = -1;
+ pdata->rx_chan_id = -1;
return pdata;
}
@@ -1266,7 +1267,7 @@ static void pxa2xx_spi_shutdown(struct platform_device *pdev)
dev_err(&pdev->dev, "shutdown failed with %d\n", status);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int pxa2xx_spi_suspend(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
new file mode 100644
index 000000000000..b032e8885e24
--- /dev/null
+++ b/drivers/spi/spi-qup.c
@@ -0,0 +1,779 @@
+/*
+ * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License rev 2 and
+ * only rev 2 as published by the free Software foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#define QUP_CONFIG 0x0000
+#define QUP_STATE 0x0004
+#define QUP_IO_M_MODES 0x0008
+#define QUP_SW_RESET 0x000c
+#define QUP_OPERATIONAL 0x0018
+#define QUP_ERROR_FLAGS 0x001c
+#define QUP_ERROR_FLAGS_EN 0x0020
+#define QUP_OPERATIONAL_MASK 0x0028
+#define QUP_HW_VERSION 0x0030
+#define QUP_MX_OUTPUT_CNT 0x0100
+#define QUP_OUTPUT_FIFO 0x0110
+#define QUP_MX_WRITE_CNT 0x0150
+#define QUP_MX_INPUT_CNT 0x0200
+#define QUP_MX_READ_CNT 0x0208
+#define QUP_INPUT_FIFO 0x0218
+
+#define SPI_CONFIG 0x0300
+#define SPI_IO_CONTROL 0x0304
+#define SPI_ERROR_FLAGS 0x0308
+#define SPI_ERROR_FLAGS_EN 0x030c
+
+/* QUP_CONFIG fields */
+#define QUP_CONFIG_SPI_MODE (1 << 8)
+#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
+#define QUP_CONFIG_NO_INPUT BIT(7)
+#define QUP_CONFIG_NO_OUTPUT BIT(6)
+#define QUP_CONFIG_N 0x001f
+
+/* QUP_STATE fields */
+#define QUP_STATE_VALID BIT(2)
+#define QUP_STATE_RESET 0
+#define QUP_STATE_RUN 1
+#define QUP_STATE_PAUSE 3
+#define QUP_STATE_MASK 3
+#define QUP_STATE_CLEAR 2
+
+#define QUP_HW_VERSION_2_1_1 0x20010001
+
+/* QUP_IO_M_MODES fields */
+#define QUP_IO_M_PACK_EN BIT(15)
+#define QUP_IO_M_UNPACK_EN BIT(14)
+#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
+#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
+#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
+#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
+
+#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
+#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
+#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
+#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
+
+#define QUP_IO_M_MODE_FIFO 0
+#define QUP_IO_M_MODE_BLOCK 1
+#define QUP_IO_M_MODE_DMOV 2
+#define QUP_IO_M_MODE_BAM 3
+
+/* QUP_OPERATIONAL fields */
+#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
+#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
+#define QUP_OP_IN_SERVICE_FLAG BIT(9)
+#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
+#define QUP_OP_IN_FIFO_FULL BIT(7)
+#define QUP_OP_OUT_FIFO_FULL BIT(6)
+#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
+#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
+
+/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
+#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
+#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
+#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
+#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
+
+/* SPI_CONFIG fields */
+#define SPI_CONFIG_HS_MODE BIT(10)
+#define SPI_CONFIG_INPUT_FIRST BIT(9)
+#define SPI_CONFIG_LOOPBACK BIT(8)
+
+/* SPI_IO_CONTROL fields */
+#define SPI_IO_C_FORCE_CS BIT(11)
+#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
+#define SPI_IO_C_MX_CS_MODE BIT(8)
+#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
+#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
+#define SPI_IO_C_CS_SELECT_MASK 0x000c
+#define SPI_IO_C_TRISTATE_CS BIT(1)
+#define SPI_IO_C_NO_TRI_STATE BIT(0)
+
+/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
+#define SPI_ERROR_CLK_OVER_RUN BIT(1)
+#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
+
+#define SPI_NUM_CHIPSELECTS 4
+
+/* high speed mode is when bus rate is greater then 26MHz */
+#define SPI_HS_MIN_RATE 26000000
+#define SPI_MAX_RATE 50000000
+
+#define SPI_DELAY_THRESHOLD 1
+#define SPI_DELAY_RETRY 10
+
+struct spi_qup {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *cclk; /* core clock */
+ struct clk *iclk; /* interface clock */
+ int irq;
+ spinlock_t lock;
+
+ int in_fifo_sz;
+ int out_fifo_sz;
+ int in_blk_sz;
+ int out_blk_sz;
+
+ struct spi_transfer *xfer;
+ struct completion done;
+ int error;
+ int w_size; /* bytes per SPI word */
+ int tx_bytes;
+ int rx_bytes;
+};
+
+
+static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
+{
+ u32 opstate = readl_relaxed(controller->base + QUP_STATE);
+
+ return opstate & QUP_STATE_VALID;
+}
+
+static int spi_qup_set_state(struct spi_qup *controller, u32 state)
+{
+ unsigned long loop;
+ u32 cur_state;
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ if (loop)
+ dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
+ loop, state);
+
+ cur_state = readl_relaxed(controller->base + QUP_STATE);
+ /*
+ * Per spec: for PAUSE_STATE to RESET_STATE, two writes
+ * of (b10) are required
+ */
+ if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
+ (state == QUP_STATE_RESET)) {
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ } else {
+ cur_state &= ~QUP_STATE_MASK;
+ cur_state |= state;
+ writel_relaxed(cur_state, controller->base + QUP_STATE);
+ }
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static void spi_qup_fifo_read(struct spi_qup *controller,
+ struct spi_transfer *xfer)
+{
+ u8 *rx_buf = xfer->rx_buf;
+ u32 word, state;
+ int idx, shift, w_size;
+
+ w_size = controller->w_size;
+
+ while (controller->rx_bytes < xfer->len) {
+
+ state = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
+ break;
+
+ word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
+
+ if (!rx_buf) {
+ controller->rx_bytes += w_size;
+ continue;
+ }
+
+ for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
+ /*
+ * The data format depends on bytes per SPI word:
+ * 4 bytes: 0x12345678
+ * 2 bytes: 0x00001234
+ * 1 byte : 0x00000012
+ */
+ shift = BITS_PER_BYTE;
+ shift *= (w_size - idx - 1);
+ rx_buf[controller->rx_bytes] = word >> shift;
+ }
+ }
+}
+
+static void spi_qup_fifo_write(struct spi_qup *controller,
+ struct spi_transfer *xfer)
+{
+ const u8 *tx_buf = xfer->tx_buf;
+ u32 word, state, data;
+ int idx, w_size;
+
+ w_size = controller->w_size;
+
+ while (controller->tx_bytes < xfer->len) {
+
+ state = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (state & QUP_OP_OUT_FIFO_FULL)
+ break;
+
+ word = 0;
+ for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
+
+ if (!tx_buf) {
+ controller->tx_bytes += w_size;
+ break;
+ }
+
+ data = tx_buf[controller->tx_bytes];
+ word |= data << (BITS_PER_BYTE * (3 - idx));
+ }
+
+ writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
+ }
+}
+
+static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
+{
+ struct spi_qup *controller = dev_id;
+ struct spi_transfer *xfer;
+ u32 opflags, qup_err, spi_err;
+ unsigned long flags;
+ int error = 0;
+
+ spin_lock_irqsave(&controller->lock, flags);
+ xfer = controller->xfer;
+ controller->xfer = NULL;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
+ spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
+ opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
+
+ writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
+ writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
+ writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
+
+ if (!xfer) {
+ dev_err_ratelimited(controller->dev, "unexpected irq %x08 %x08 %x08\n",
+ qup_err, spi_err, opflags);
+ return IRQ_HANDLED;
+ }
+
+ if (qup_err) {
+ if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
+ dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
+ dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
+ dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
+ dev_warn(controller->dev, "INPUT_OVER_RUN\n");
+
+ error = -EIO;
+ }
+
+ if (spi_err) {
+ if (spi_err & SPI_ERROR_CLK_OVER_RUN)
+ dev_warn(controller->dev, "CLK_OVER_RUN\n");
+ if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
+ dev_warn(controller->dev, "CLK_UNDER_RUN\n");
+
+ error = -EIO;
+ }
+
+ if (opflags & QUP_OP_IN_SERVICE_FLAG)
+ spi_qup_fifo_read(controller, xfer);
+
+ if (opflags & QUP_OP_OUT_SERVICE_FLAG)
+ spi_qup_fifo_write(controller, xfer);
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->error = error;
+ controller->xfer = xfer;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (controller->rx_bytes == xfer->len || error)
+ complete(&controller->done);
+
+ return IRQ_HANDLED;
+}
+
+
+/* set clock freq ... bits per word */
+static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(spi->master);
+ u32 config, iomode, mode;
+ int ret, n_words, w_size;
+
+ if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
+ dev_err(controller->dev, "too big size for loopback %d > %d\n",
+ xfer->len, controller->in_fifo_sz);
+ return -EIO;
+ }
+
+ ret = clk_set_rate(controller->cclk, xfer->speed_hz);
+ if (ret) {
+ dev_err(controller->dev, "fail to set frequency %d",
+ xfer->speed_hz);
+ return -EIO;
+ }
+
+ if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
+ dev_err(controller->dev, "cannot set RESET state\n");
+ return -EIO;
+ }
+
+ w_size = 4;
+ if (xfer->bits_per_word <= 8)
+ w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ w_size = 2;
+
+ n_words = xfer->len / w_size;
+ controller->w_size = w_size;
+
+ if (n_words <= controller->in_fifo_sz) {
+ mode = QUP_IO_M_MODE_FIFO;
+ writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
+ /* must be zero for FIFO */
+ writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
+ } else {
+ mode = QUP_IO_M_MODE_BLOCK;
+ writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
+ /* must be zero for BLOCK and BAM */
+ writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
+ }
+
+ iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
+ /* Set input and output transfer mode */
+ iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
+ iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
+ iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
+ iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
+
+ writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
+
+ config = readl_relaxed(controller->base + SPI_CONFIG);
+
+ if (spi->mode & SPI_LOOP)
+ config |= SPI_CONFIG_LOOPBACK;
+ else
+ config &= ~SPI_CONFIG_LOOPBACK;
+
+ if (spi->mode & SPI_CPHA)
+ config &= ~SPI_CONFIG_INPUT_FIRST;
+ else
+ config |= SPI_CONFIG_INPUT_FIRST;
+
+ /*
+ * HS_MODE improves signal stability for spi-clk high rates,
+ * but is invalid in loop back mode.
+ */
+ if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
+ config |= SPI_CONFIG_HS_MODE;
+ else
+ config &= ~SPI_CONFIG_HS_MODE;
+
+ writel_relaxed(config, controller->base + SPI_CONFIG);
+
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
+ config |= xfer->bits_per_word - 1;
+ config |= QUP_CONFIG_SPI_MODE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+
+ writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
+ return 0;
+}
+
+static void spi_qup_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_qup *controller = spi_master_get_devdata(spi->master);
+
+ u32 iocontol, mask;
+
+ iocontol = readl_relaxed(controller->base + SPI_IO_CONTROL);
+
+ /* Disable auto CS toggle and use manual */
+ iocontol &= ~SPI_IO_C_MX_CS_MODE;
+ iocontol |= SPI_IO_C_FORCE_CS;
+
+ iocontol &= ~SPI_IO_C_CS_SELECT_MASK;
+ iocontol |= SPI_IO_C_CS_SELECT(spi->chip_select);
+
+ mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
+
+ if (enable)
+ iocontol |= mask;
+ else
+ iocontol &= ~mask;
+
+ writel_relaxed(iocontol, controller->base + SPI_IO_CONTROL);
+}
+
+static int spi_qup_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ unsigned long timeout, flags;
+ int ret = -EIO;
+
+ ret = spi_qup_io_config(spi, xfer);
+ if (ret)
+ return ret;
+
+ timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
+ timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
+ timeout = 100 * msecs_to_jiffies(timeout);
+
+ reinit_completion(&controller->done);
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = xfer;
+ controller->error = 0;
+ controller->rx_bytes = 0;
+ controller->tx_bytes = 0;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
+ dev_warn(controller->dev, "cannot set RUN state\n");
+ goto exit;
+ }
+
+ if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
+ dev_warn(controller->dev, "cannot set PAUSE state\n");
+ goto exit;
+ }
+
+ spi_qup_fifo_write(controller, xfer);
+
+ if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
+ dev_warn(controller->dev, "cannot set EXECUTE state\n");
+ goto exit;
+ }
+
+ if (!wait_for_completion_timeout(&controller->done, timeout))
+ ret = -ETIMEDOUT;
+exit:
+ spi_qup_set_state(controller, QUP_STATE_RESET);
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = NULL;
+ if (!ret)
+ ret = controller->error;
+ spin_unlock_irqrestore(&controller->lock, flags);
+ return ret;
+}
+
+static int spi_qup_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct clk *iclk, *cclk;
+ struct spi_qup *controller;
+ struct resource *res;
+ struct device *dev;
+ void __iomem *base;
+ u32 data, max_freq, iomode;
+ int ret, irq, size;
+
+ dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ cclk = devm_clk_get(dev, "core");
+ if (IS_ERR(cclk))
+ return PTR_ERR(cclk);
+
+ iclk = devm_clk_get(dev, "iface");
+ if (IS_ERR(iclk))
+ return PTR_ERR(iclk);
+
+ /* This is optional parameter */
+ if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
+ max_freq = SPI_MAX_RATE;
+
+ if (!max_freq || max_freq > SPI_MAX_RATE) {
+ dev_err(dev, "invalid clock frequency %d\n", max_freq);
+ return -ENXIO;
+ }
+
+ ret = clk_prepare_enable(cclk);
+ if (ret) {
+ dev_err(dev, "cannot enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(iclk);
+ if (ret) {
+ clk_disable_unprepare(cclk);
+ dev_err(dev, "cannot enable iface clock\n");
+ return ret;
+ }
+
+ data = readl_relaxed(base + QUP_HW_VERSION);
+
+ if (data < QUP_HW_VERSION_2_1_1) {
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ dev_err(dev, "v.%08x is not supported\n", data);
+ return -ENXIO;
+ }
+
+ master = spi_alloc_master(dev, sizeof(struct spi_qup));
+ if (!master) {
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ dev_err(dev, "cannot allocate master\n");
+ return -ENOMEM;
+ }
+
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ master->num_chipselect = SPI_NUM_CHIPSELECTS;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->max_speed_hz = max_freq;
+ master->set_cs = spi_qup_set_cs;
+ master->transfer_one = spi_qup_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ controller = spi_master_get_devdata(master);
+
+ controller->dev = dev;
+ controller->base = base;
+ controller->iclk = iclk;
+ controller->cclk = cclk;
+ controller->irq = irq;
+
+ spin_lock_init(&controller->lock);
+ init_completion(&controller->done);
+
+ iomode = readl_relaxed(base + QUP_IO_M_MODES);
+
+ size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->out_blk_sz = size * 16;
+ else
+ controller->out_blk_sz = 4;
+
+ size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->in_blk_sz = size * 16;
+ else
+ controller->in_blk_sz = 4;
+
+ size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
+ controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
+
+ size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
+ controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
+
+ dev_info(dev, "v.%08x IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
+ data, controller->in_blk_sz, controller->in_fifo_sz,
+ controller->out_blk_sz, controller->out_fifo_sz);
+
+ writel_relaxed(1, base + QUP_SW_RESET);
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret) {
+ dev_err(dev, "cannot set RESET state\n");
+ goto error;
+ }
+
+ writel_relaxed(0, base + QUP_OPERATIONAL);
+ writel_relaxed(0, base + QUP_IO_M_MODES);
+ writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
+ writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
+ base + SPI_ERROR_FLAGS_EN);
+
+ writel_relaxed(0, base + SPI_CONFIG);
+ writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
+
+ ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
+ IRQF_TRIGGER_HIGH, pdev->name, controller);
+ if (ret)
+ goto error;
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto error;
+
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ return 0;
+
+error:
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ spi_master_put(master);
+ return ret;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int spi_qup_pm_suspend_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+
+ /* Enable clocks auto gaiting */
+ config = readl(controller->base + QUP_CONFIG);
+ config |= QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+ return 0;
+}
+
+static int spi_qup_pm_resume_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+
+ /* Disable clocks auto gaiting */
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+ return 0;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_qup_suspend(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ return 0;
+}
+
+static int spi_qup_resume(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(controller->iclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(controller->cclk);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int spi_qup_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = dev_get_drvdata(&pdev->dev);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct of_device_id spi_qup_dt_match[] = {
+ { .compatible = "qcom,spi-qup-v2.1.1", },
+ { .compatible = "qcom,spi-qup-v2.2.1", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
+
+static const struct dev_pm_ops spi_qup_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
+ SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
+ spi_qup_pm_resume_runtime,
+ NULL)
+};
+
+static struct platform_driver spi_qup_driver = {
+ .driver = {
+ .name = "spi_qup",
+ .owner = THIS_MODULE,
+ .pm = &spi_qup_dev_pm_ops,
+ .of_match_table = spi_qup_dt_match,
+ },
+ .probe = spi_qup_probe,
+ .remove = spi_qup_remove,
+};
+module_platform_driver(spi_qup_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_qup");
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 9e829cee7357..1fb0ad213324 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1,7 +1,8 @@
/*
* SH RSPI driver
*
- * Copyright (C) 2012 Renesas Solutions Corp.
+ * Copyright (C) 2012, 2013 Renesas Solutions Corp.
+ * Copyright (C) 2014 Glider bvba
*
* Based on spi-sh.c:
* Copyright (C) 2011 Renesas Solutions Corp.
@@ -25,202 +26,252 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
#include <linux/spi/spi.h>
#include <linux/spi/rspi.h>
-#define RSPI_SPCR 0x00
-#define RSPI_SSLP 0x01
-#define RSPI_SPPCR 0x02
-#define RSPI_SPSR 0x03
-#define RSPI_SPDR 0x04
-#define RSPI_SPSCR 0x08
-#define RSPI_SPSSR 0x09
-#define RSPI_SPBR 0x0a
-#define RSPI_SPDCR 0x0b
-#define RSPI_SPCKD 0x0c
-#define RSPI_SSLND 0x0d
-#define RSPI_SPND 0x0e
-#define RSPI_SPCR2 0x0f
-#define RSPI_SPCMD0 0x10
-#define RSPI_SPCMD1 0x12
-#define RSPI_SPCMD2 0x14
-#define RSPI_SPCMD3 0x16
-#define RSPI_SPCMD4 0x18
-#define RSPI_SPCMD5 0x1a
-#define RSPI_SPCMD6 0x1c
-#define RSPI_SPCMD7 0x1e
-
-/*qspi only */
-#define QSPI_SPBFCR 0x18
-#define QSPI_SPBDCR 0x1a
-#define QSPI_SPBMUL0 0x1c
-#define QSPI_SPBMUL1 0x20
-#define QSPI_SPBMUL2 0x24
-#define QSPI_SPBMUL3 0x28
-
-/* SPCR */
-#define SPCR_SPRIE 0x80
-#define SPCR_SPE 0x40
-#define SPCR_SPTIE 0x20
-#define SPCR_SPEIE 0x10
-#define SPCR_MSTR 0x08
-#define SPCR_MODFEN 0x04
-#define SPCR_TXMD 0x02
-#define SPCR_SPMS 0x01
-
-/* SSLP */
-#define SSLP_SSL1P 0x02
-#define SSLP_SSL0P 0x01
-
-/* SPPCR */
-#define SPPCR_MOIFE 0x20
-#define SPPCR_MOIFV 0x10
+#define RSPI_SPCR 0x00 /* Control Register */
+#define RSPI_SSLP 0x01 /* Slave Select Polarity Register */
+#define RSPI_SPPCR 0x02 /* Pin Control Register */
+#define RSPI_SPSR 0x03 /* Status Register */
+#define RSPI_SPDR 0x04 /* Data Register */
+#define RSPI_SPSCR 0x08 /* Sequence Control Register */
+#define RSPI_SPSSR 0x09 /* Sequence Status Register */
+#define RSPI_SPBR 0x0a /* Bit Rate Register */
+#define RSPI_SPDCR 0x0b /* Data Control Register */
+#define RSPI_SPCKD 0x0c /* Clock Delay Register */
+#define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */
+#define RSPI_SPND 0x0e /* Next-Access Delay Register */
+#define RSPI_SPCR2 0x0f /* Control Register 2 (SH only) */
+#define RSPI_SPCMD0 0x10 /* Command Register 0 */
+#define RSPI_SPCMD1 0x12 /* Command Register 1 */
+#define RSPI_SPCMD2 0x14 /* Command Register 2 */
+#define RSPI_SPCMD3 0x16 /* Command Register 3 */
+#define RSPI_SPCMD4 0x18 /* Command Register 4 */
+#define RSPI_SPCMD5 0x1a /* Command Register 5 */
+#define RSPI_SPCMD6 0x1c /* Command Register 6 */
+#define RSPI_SPCMD7 0x1e /* Command Register 7 */
+#define RSPI_SPCMD(i) (RSPI_SPCMD0 + (i) * 2)
+#define RSPI_NUM_SPCMD 8
+#define RSPI_RZ_NUM_SPCMD 4
+#define QSPI_NUM_SPCMD 4
+
+/* RSPI on RZ only */
+#define RSPI_SPBFCR 0x20 /* Buffer Control Register */
+#define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */
+
+/* QSPI only */
+#define QSPI_SPBFCR 0x18 /* Buffer Control Register */
+#define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */
+#define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */
+#define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */
+#define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */
+#define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */
+#define QSPI_SPBMUL(i) (QSPI_SPBMUL0 + (i) * 4)
+
+/* SPCR - Control Register */
+#define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */
+#define SPCR_SPE 0x40 /* Function Enable */
+#define SPCR_SPTIE 0x20 /* Transmit Interrupt Enable */
+#define SPCR_SPEIE 0x10 /* Error Interrupt Enable */
+#define SPCR_MSTR 0x08 /* Master/Slave Mode Select */
+#define SPCR_MODFEN 0x04 /* Mode Fault Error Detection Enable */
+/* RSPI on SH only */
+#define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */
+#define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */
+/* QSPI on R-Car M2 only */
+#define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */
+#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
+
+/* SSLP - Slave Select Polarity Register */
+#define SSLP_SSL1P 0x02 /* SSL1 Signal Polarity Setting */
+#define SSLP_SSL0P 0x01 /* SSL0 Signal Polarity Setting */
+
+/* SPPCR - Pin Control Register */
+#define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */
+#define SPPCR_MOIFV 0x10 /* MOSI Idle Fixed Value */
#define SPPCR_SPOM 0x04
-#define SPPCR_SPLP2 0x02
-#define SPPCR_SPLP 0x01
-
-/* SPSR */
-#define SPSR_SPRF 0x80
-#define SPSR_SPTEF 0x20
-#define SPSR_PERF 0x08
-#define SPSR_MODF 0x04
-#define SPSR_IDLNF 0x02
-#define SPSR_OVRF 0x01
-
-/* SPSCR */
-#define SPSCR_SPSLN_MASK 0x07
-
-/* SPSSR */
-#define SPSSR_SPECM_MASK 0x70
-#define SPSSR_SPCP_MASK 0x07
-
-/* SPDCR */
-#define SPDCR_SPLW 0x20
-#define SPDCR_SPRDTD 0x10
+#define SPPCR_SPLP2 0x02 /* Loopback Mode 2 (non-inverting) */
+#define SPPCR_SPLP 0x01 /* Loopback Mode (inverting) */
+
+#define SPPCR_IO3FV 0x04 /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
+#define SPPCR_IO2FV 0x04 /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
+
+/* SPSR - Status Register */
+#define SPSR_SPRF 0x80 /* Receive Buffer Full Flag */
+#define SPSR_TEND 0x40 /* Transmit End */
+#define SPSR_SPTEF 0x20 /* Transmit Buffer Empty Flag */
+#define SPSR_PERF 0x08 /* Parity Error Flag */
+#define SPSR_MODF 0x04 /* Mode Fault Error Flag */
+#define SPSR_IDLNF 0x02 /* RSPI Idle Flag */
+#define SPSR_OVRF 0x01 /* Overrun Error Flag (RSPI only) */
+
+/* SPSCR - Sequence Control Register */
+#define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */
+
+/* SPSSR - Sequence Status Register */
+#define SPSSR_SPECM_MASK 0x70 /* Command Error Mask */
+#define SPSSR_SPCP_MASK 0x07 /* Command Pointer Mask */
+
+/* SPDCR - Data Control Register */
+#define SPDCR_TXDMY 0x80 /* Dummy Data Transmission Enable */
+#define SPDCR_SPLW1 0x40 /* Access Width Specification (RZ) */
+#define SPDCR_SPLW0 0x20 /* Access Width Specification (RZ) */
+#define SPDCR_SPLLWORD (SPDCR_SPLW1 | SPDCR_SPLW0)
+#define SPDCR_SPLWORD SPDCR_SPLW1
+#define SPDCR_SPLBYTE SPDCR_SPLW0
+#define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */
+#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select (SH) */
#define SPDCR_SLSEL1 0x08
#define SPDCR_SLSEL0 0x04
-#define SPDCR_SLSEL_MASK 0x0c
+#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select (SH) */
#define SPDCR_SPFC1 0x02
#define SPDCR_SPFC0 0x01
+#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) (SH) */
-/* SPCKD */
-#define SPCKD_SCKDL_MASK 0x07
+/* SPCKD - Clock Delay Register */
+#define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */
-/* SSLND */
-#define SSLND_SLNDL_MASK 0x07
+/* SSLND - Slave Select Negation Delay Register */
+#define SSLND_SLNDL_MASK 0x07 /* SSL Negation Delay Setting (1-8) */
-/* SPND */
-#define SPND_SPNDL_MASK 0x07
+/* SPND - Next-Access Delay Register */
+#define SPND_SPNDL_MASK 0x07 /* Next-Access Delay Setting (1-8) */
-/* SPCR2 */
-#define SPCR2_PTE 0x08
-#define SPCR2_SPIE 0x04
-#define SPCR2_SPOE 0x02
-#define SPCR2_SPPE 0x01
+/* SPCR2 - Control Register 2 */
+#define SPCR2_PTE 0x08 /* Parity Self-Test Enable */
+#define SPCR2_SPIE 0x04 /* Idle Interrupt Enable */
+#define SPCR2_SPOE 0x02 /* Odd Parity Enable (vs. Even) */
+#define SPCR2_SPPE 0x01 /* Parity Enable */
-/* SPCMDn */
-#define SPCMD_SCKDEN 0x8000
-#define SPCMD_SLNDEN 0x4000
-#define SPCMD_SPNDEN 0x2000
-#define SPCMD_LSBF 0x1000
-#define SPCMD_SPB_MASK 0x0f00
+/* SPCMDn - Command Registers */
+#define SPCMD_SCKDEN 0x8000 /* Clock Delay Setting Enable */
+#define SPCMD_SLNDEN 0x4000 /* SSL Negation Delay Setting Enable */
+#define SPCMD_SPNDEN 0x2000 /* Next-Access Delay Enable */
+#define SPCMD_LSBF 0x1000 /* LSB First */
+#define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */
#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
-#define SPCMD_SPB_8BIT 0x0000 /* qspi only */
+#define SPCMD_SPB_8BIT 0x0000 /* QSPI only */
#define SPCMD_SPB_16BIT 0x0100
#define SPCMD_SPB_20BIT 0x0000
#define SPCMD_SPB_24BIT 0x0100
#define SPCMD_SPB_32BIT 0x0200
-#define SPCMD_SSLKP 0x0080
-#define SPCMD_SSLA_MASK 0x0030
-#define SPCMD_BRDV_MASK 0x000c
-#define SPCMD_CPOL 0x0002
-#define SPCMD_CPHA 0x0001
-
-/* SPBFCR */
-#define SPBFCR_TXRST 0x80 /* qspi only */
-#define SPBFCR_RXRST 0x40 /* qspi only */
+#define SPCMD_SSLKP 0x0080 /* SSL Signal Level Keeping */
+#define SPCMD_SPIMOD_MASK 0x0060 /* SPI Operating Mode (QSPI only) */
+#define SPCMD_SPIMOD1 0x0040
+#define SPCMD_SPIMOD0 0x0020
+#define SPCMD_SPIMOD_SINGLE 0
+#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
+#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
+#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
+#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */
+#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
+#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
+#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
+
+/* SPBFCR - Buffer Control Register */
+#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset */
+#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */
+#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
+#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
+
+#define DUMMY_DATA 0x00
struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
struct spi_master *master;
- struct list_head queue;
- struct work_struct ws;
wait_queue_head_t wait;
- spinlock_t lock;
struct clk *clk;
- unsigned char spsr;
+ u16 spcmd;
+ u8 spsr;
+ u8 sppcr;
+ int rx_irq, tx_irq;
const struct spi_ops *ops;
/* for dmaengine */
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
- int irq;
unsigned dma_width_16bit:1;
unsigned dma_callbacked:1;
+ unsigned byte_access:1;
};
-static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
+static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
{
iowrite8(data, rspi->addr + offset);
}
-static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
+static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
{
iowrite16(data, rspi->addr + offset);
}
-static void rspi_write32(struct rspi_data *rspi, u32 data, u16 offset)
+static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
{
iowrite32(data, rspi->addr + offset);
}
-static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
+static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
{
return ioread8(rspi->addr + offset);
}
-static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
+static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
{
return ioread16(rspi->addr + offset);
}
+static void rspi_write_data(const struct rspi_data *rspi, u16 data)
+{
+ if (rspi->byte_access)
+ rspi_write8(rspi, data, RSPI_SPDR);
+ else /* 16 bit */
+ rspi_write16(rspi, data, RSPI_SPDR);
+}
+
+static u16 rspi_read_data(const struct rspi_data *rspi)
+{
+ if (rspi->byte_access)
+ return rspi_read8(rspi, RSPI_SPDR);
+ else /* 16 bit */
+ return rspi_read16(rspi, RSPI_SPDR);
+}
+
/* optional functions */
struct spi_ops {
int (*set_config_register)(struct rspi_data *rspi, int access_size);
- int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t);
- int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t);
-
+ int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer);
+ u16 mode_bits;
};
/*
- * functions for RSPI
+ * functions for RSPI on legacy SH
*/
static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
{
int spbr;
- /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
- rspi_write8(rspi, 0x00, RSPI_SPPCR);
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
- /* Sets number of frames to be used: 1 frame */
- rspi_write8(rspi, 0x00, RSPI_SPDCR);
+ /* Disable dummy transmission, set 16-bit word access, 1 frame */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 0;
/* Sets RSPCK, SSL, next-access delay value */
rspi_write8(rspi, 0x00, RSPI_SPCKD);
@@ -231,8 +282,41 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
rspi_write8(rspi, 0x00, RSPI_SPCR2);
/* Sets SPCMD */
- rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
- RSPI_SPCMD0);
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+/*
+ * functions for RSPI on RZ
+ */
+static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ int spbr;
+
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
+ rspi->byte_access = 1;
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Sets SPCMD */
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
/* Sets RSPI mode */
rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
@@ -245,18 +329,18 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
*/
static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
{
- u16 spcmd;
int spbr;
- /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
- rspi_write8(rspi, 0x00, RSPI_SPPCR);
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz);
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
- /* Sets number of frames to be used: 1 frame */
- rspi_write8(rspi, 0x00, RSPI_SPDCR);
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 1;
/* Sets RSPCK, SSL, next-access delay value */
rspi_write8(rspi, 0x00, RSPI_SPCKD);
@@ -265,13 +349,13 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
/* Data Length Setting */
if (access_size == 8)
- spcmd = SPCMD_SPB_8BIT;
+ rspi->spcmd |= SPCMD_SPB_8BIT;
else if (access_size == 16)
- spcmd = SPCMD_SPB_16BIT;
- else if (access_size == 32)
- spcmd = SPCMD_SPB_32BIT;
+ rspi->spcmd |= SPCMD_SPB_16BIT;
+ else
+ rspi->spcmd |= SPCMD_SPB_32BIT;
- spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SSLKP | SPCMD_SPNDEN;
+ rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
/* Resets transfer data length */
rspi_write32(rspi, 0, QSPI_SPBMUL0);
@@ -282,9 +366,9 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
rspi_write8(rspi, 0x00, QSPI_SPBFCR);
/* Sets SPCMD */
- rspi_write16(rspi, spcmd, RSPI_SPCMD0);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
- /* Enables SPI function in a master mode */
+ /* Enables SPI function in master mode */
rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
return 0;
@@ -292,12 +376,12 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
-static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
+static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
}
-static void rspi_disable_irq(struct rspi_data *rspi, u8 disable)
+static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
}
@@ -308,6 +392,9 @@ static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
int ret;
rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (rspi->spsr & wait_mask)
+ return 0;
+
rspi_enable_irq(rspi, enable_bit);
ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
if (ret == 0 && !(rspi->spsr & wait_mask))
@@ -316,81 +403,39 @@ static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
return 0;
}
-static void rspi_assert_ssl(struct rspi_data *rspi)
-{
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
-}
-
-static void rspi_negate_ssl(struct rspi_data *rspi)
+static int rspi_data_out(struct rspi_data *rspi, u8 data)
{
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
+ dev_err(&rspi->master->dev, "transmit timeout\n");
+ return -ETIMEDOUT;
+ }
+ rspi_write_data(rspi, data);
+ return 0;
}
-static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static int rspi_data_in(struct rspi_data *rspi)
{
- int remain = t->len;
- u8 *data;
-
- data = (u8 *)t->tx_buf;
- while (remain > 0) {
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
- RSPI_SPCR);
+ u8 data;
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
-
- rspi_write16(rspi, *data, RSPI_SPDR);
- data++;
- remain--;
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
+ dev_err(&rspi->master->dev, "receive timeout\n");
+ return -ETIMEDOUT;
}
-
- /* Waiting for the last transmition */
- rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
-
- return 0;
+ data = rspi_read_data(rspi);
+ return data;
}
-static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static int rspi_data_out_in(struct rspi_data *rspi, u8 data)
{
- int remain = t->len;
- u8 *data;
-
- rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR);
- rspi_write8(rspi, 0x00, QSPI_SPBFCR);
-
- data = (u8 *)t->tx_buf;
- while (remain > 0) {
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
- rspi_write8(rspi, *data++, RSPI_SPDR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: receive timeout\n", __func__);
- return -ETIMEDOUT;
- }
- rspi_read8(rspi, RSPI_SPDR);
-
- remain--;
- }
+ int ret;
- /* Waiting for the last transmition */
- rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+ ret = rspi_data_out(rspi, data);
+ if (ret < 0)
+ return ret;
- return 0;
+ return rspi_data_in(rspi);
}
-#define send_pio(spi, mesg, t) spi->ops->send_pio(spi, mesg, t)
-
static void rspi_dma_complete(void *arg)
{
struct rspi_data *rspi = arg;
@@ -399,8 +444,8 @@ static void rspi_dma_complete(void *arg)
wake_up_interruptible(&rspi->wait);
}
-static int rspi_dma_map_sg(struct scatterlist *sg, void *buf, unsigned len,
- struct dma_chan *chan,
+static int rspi_dma_map_sg(struct scatterlist *sg, const void *buf,
+ unsigned len, struct dma_chan *chan,
enum dma_transfer_direction dir)
{
sg_init_table(sg, 1);
@@ -440,12 +485,13 @@ static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
{
struct scatterlist sg;
- void *buf = NULL;
+ const void *buf = NULL;
struct dma_async_tx_descriptor *desc;
- unsigned len;
+ unsigned int len;
int ret = 0;
if (rspi->dma_width_16bit) {
+ void *tmp;
/*
* If DMAC bus width is 16-bit, the driver allocates a dummy
* buffer. And, the driver converts original data into the
@@ -454,13 +500,14 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
* DMAC data: 1st byte, dummy, 2nd byte, dummy ...
*/
len = t->len * 2;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
+ tmp = kmalloc(len, GFP_KERNEL);
+ if (!tmp)
return -ENOMEM;
- rspi_memory_to_8bit(buf, t->tx_buf, t->len);
+ rspi_memory_to_8bit(tmp, t->tx_buf, t->len);
+ buf = tmp;
} else {
len = t->len;
- buf = (void *)t->tx_buf;
+ buf = t->tx_buf;
}
if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
@@ -478,7 +525,7 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
* DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer.
*/
- disable_irq(rspi->irq);
+ disable_irq(rspi->tx_irq);
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
rspi_enable_irq(rspi, SPCR_SPTIE);
@@ -497,7 +544,7 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
ret = -ETIMEDOUT;
rspi_disable_irq(rspi, SPCR_SPTIE);
- enable_irq(rspi->irq);
+ enable_irq(rspi->tx_irq);
end:
rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
@@ -508,105 +555,42 @@ end_nomap:
return ret;
}
-static void rspi_receive_init(struct rspi_data *rspi)
+static void rspi_receive_init(const struct rspi_data *rspi)
{
- unsigned char spsr;
+ u8 spsr;
spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
- rspi_read16(rspi, RSPI_SPDR); /* dummy read */
+ rspi_read_data(rspi); /* dummy read */
if (spsr & SPSR_OVRF)
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
- RSPI_SPCR);
+ RSPI_SPSR);
}
-static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static void rspi_rz_receive_init(const struct rspi_data *rspi)
{
- int remain = t->len;
- u8 *data;
-
rspi_receive_init(rspi);
-
- data = (u8 *)t->rx_buf;
- while (remain > 0) {
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
- RSPI_SPCR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* dummy write for generate clock */
- rspi_write16(rspi, 0x00, RSPI_SPDR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: receive timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* SPDR allows 16 or 32-bit access only */
- *data = (u8)rspi_read16(rspi, RSPI_SPDR);
-
- data++;
- remain--;
- }
-
- return 0;
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
+ rspi_write8(rspi, 0, RSPI_SPBFCR);
}
-static void qspi_receive_init(struct rspi_data *rspi)
+static void qspi_receive_init(const struct rspi_data *rspi)
{
- unsigned char spsr;
+ u8 spsr;
spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
- rspi_read8(rspi, RSPI_SPDR); /* dummy read */
+ rspi_read_data(rspi); /* dummy read */
rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
- rspi_write8(rspi, 0x00, QSPI_SPBFCR);
-}
-
-static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
-{
- int remain = t->len;
- u8 *data;
-
- qspi_receive_init(rspi);
-
- data = (u8 *)t->rx_buf;
- while (remain > 0) {
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* dummy write for generate clock */
- rspi_write8(rspi, 0x00, RSPI_SPDR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: receive timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* SPDR allows 8, 16 or 32-bit access */
- *data++ = rspi_read8(rspi, RSPI_SPDR);
- remain--;
- }
-
- return 0;
+ rspi_write8(rspi, 0, QSPI_SPBFCR);
}
-#define receive_pio(spi, mesg, t) spi->ops->receive_pio(spi, mesg, t)
-
static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
{
struct scatterlist sg, sg_dummy;
void *dummy = NULL, *rx_buf = NULL;
struct dma_async_tx_descriptor *desc, *desc_dummy;
- unsigned len;
+ unsigned int len;
int ret = 0;
if (rspi->dma_width_16bit) {
@@ -664,7 +648,9 @@ static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
* DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer.
*/
- disable_irq(rspi->irq);
+ disable_irq(rspi->tx_irq);
+ if (rspi->rx_irq != rspi->tx_irq)
+ disable_irq(rspi->rx_irq);
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
@@ -687,7 +673,9 @@ static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
ret = -ETIMEDOUT;
rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
- enable_irq(rspi->irq);
+ enable_irq(rspi->tx_irq);
+ if (rspi->rx_irq != rspi->tx_irq)
+ enable_irq(rspi->rx_irq);
end:
rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
@@ -704,7 +692,7 @@ end_nomap:
return ret;
}
-static int rspi_is_dma(struct rspi_data *rspi, struct spi_transfer *t)
+static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t)
{
if (t->tx_buf && rspi->chan_tx)
return 1;
@@ -715,97 +703,303 @@ static int rspi_is_dma(struct rspi_data *rspi, struct spi_transfer *t)
return 0;
}
-static void rspi_work(struct work_struct *work)
+static int rspi_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
{
- struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
- struct spi_message *mesg;
- struct spi_transfer *t;
- unsigned long flags;
- int ret;
+ int remain = xfer->len, ret;
+ const u8 *tx_buf = xfer->tx_buf;
+ u8 *rx_buf = xfer->rx_buf;
+ u8 spcr, data;
- while (1) {
- spin_lock_irqsave(&rspi->lock, flags);
- if (list_empty(&rspi->queue)) {
- spin_unlock_irqrestore(&rspi->lock, flags);
- break;
- }
- mesg = list_entry(rspi->queue.next, struct spi_message, queue);
- list_del_init(&mesg->queue);
- spin_unlock_irqrestore(&rspi->lock, flags);
-
- rspi_assert_ssl(rspi);
-
- list_for_each_entry(t, &mesg->transfers, transfer_list) {
- if (t->tx_buf) {
- if (rspi_is_dma(rspi, t))
- ret = rspi_send_dma(rspi, t);
- else
- ret = send_pio(rspi, mesg, t);
- if (ret < 0)
- goto error;
- }
- if (t->rx_buf) {
- if (rspi_is_dma(rspi, t))
- ret = rspi_receive_dma(rspi, t);
- else
- ret = receive_pio(rspi, mesg, t);
- if (ret < 0)
- goto error;
- }
- mesg->actual_length += t->len;
+ rspi_receive_init(rspi);
+
+ spcr = rspi_read8(rspi, RSPI_SPCR);
+ if (rx_buf)
+ spcr &= ~SPCR_TXMD;
+ else
+ spcr |= SPCR_TXMD;
+ rspi_write8(rspi, spcr, RSPI_SPCR);
+
+ while (remain > 0) {
+ data = tx_buf ? *tx_buf++ : DUMMY_DATA;
+ ret = rspi_data_out(rspi, data);
+ if (ret < 0)
+ return ret;
+ if (rx_buf) {
+ ret = rspi_data_in(rspi);
+ if (ret < 0)
+ return ret;
+ *rx_buf++ = ret;
}
- rspi_negate_ssl(rspi);
+ remain--;
+ }
+
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
- mesg->status = 0;
- mesg->complete(mesg->context);
+ return 0;
+}
+
+static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (!rspi_is_dma(rspi, xfer))
+ return rspi_transfer_out_in(rspi, xfer);
+
+ if (xfer->tx_buf) {
+ ret = rspi_send_dma(rspi, xfer);
+ if (ret < 0)
+ return ret;
+ }
+ if (xfer->rx_buf)
+ return rspi_receive_dma(rspi, xfer);
+
+ return 0;
+}
+
+static int rspi_rz_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ int remain = xfer->len, ret;
+ const u8 *tx_buf = xfer->tx_buf;
+ u8 *rx_buf = xfer->rx_buf;
+ u8 data;
+
+ rspi_rz_receive_init(rspi);
+
+ while (remain > 0) {
+ data = tx_buf ? *tx_buf++ : DUMMY_DATA;
+ ret = rspi_data_out_in(rspi, data);
+ if (ret < 0)
+ return ret;
+ if (rx_buf)
+ *rx_buf++ = ret;
+ remain--;
}
- return;
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
-error:
- mesg->status = ret;
- mesg->complete(mesg->context);
+ return 0;
+}
+
+static int rspi_rz_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ return rspi_rz_transfer_out_in(rspi, xfer);
+}
+
+static int qspi_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ int remain = xfer->len, ret;
+ const u8 *tx_buf = xfer->tx_buf;
+ u8 *rx_buf = xfer->rx_buf;
+ u8 data;
+
+ qspi_receive_init(rspi);
+
+ while (remain > 0) {
+ data = tx_buf ? *tx_buf++ : DUMMY_DATA;
+ ret = rspi_data_out_in(rspi, data);
+ if (ret < 0)
+ return ret;
+ if (rx_buf)
+ *rx_buf++ = ret;
+ remain--;
+ }
+
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+
+ return 0;
+}
+
+static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
+{
+ const u8 *buf = xfer->tx_buf;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < xfer->len; i++) {
+ ret = rspi_data_out(rspi, *buf++);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+
+ return 0;
+}
+
+static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
+{
+ u8 *buf = xfer->rx_buf;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < xfer->len; i++) {
+ ret = rspi_data_in(rspi);
+ if (ret < 0)
+ return ret;
+ *buf++ = ret;
+ }
+
+ return 0;
+}
+
+static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ if (spi->mode & SPI_LOOP) {
+ return qspi_transfer_out_in(rspi, xfer);
+ } else if (xfer->tx_buf && xfer->tx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Write */
+ return qspi_transfer_out(rspi, xfer);
+ } else if (xfer->rx_buf && xfer->rx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Read */
+ return qspi_transfer_in(rspi, xfer);
+ } else {
+ /* Single SPI Transfer */
+ return qspi_transfer_out_in(rspi, xfer);
+ }
}
static int rspi_setup(struct spi_device *spi)
{
struct rspi_data *rspi = spi_master_get_devdata(spi->master);
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
rspi->max_speed_hz = spi->max_speed_hz;
+ rspi->spcmd = SPCMD_SSLKP;
+ if (spi->mode & SPI_CPOL)
+ rspi->spcmd |= SPCMD_CPOL;
+ if (spi->mode & SPI_CPHA)
+ rspi->spcmd |= SPCMD_CPHA;
+
+ /* CMOS output mode and MOSI signal from previous transfer */
+ rspi->sppcr = 0;
+ if (spi->mode & SPI_LOOP)
+ rspi->sppcr |= SPPCR_SPLP;
+
set_config_register(rspi, 8);
return 0;
}
-static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg)
+static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(spi->master);
- unsigned long flags;
+ if (xfer->tx_buf)
+ switch (xfer->tx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL;
+ default:
+ return 0;
+ }
+ if (xfer->rx_buf)
+ switch (xfer->rx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
+ default:
+ return 0;
+ }
- mesg->actual_length = 0;
- mesg->status = -EINPROGRESS;
+ return 0;
+}
- spin_lock_irqsave(&rspi->lock, flags);
- list_add_tail(&mesg->queue, &rspi->queue);
- schedule_work(&rspi->ws);
- spin_unlock_irqrestore(&rspi->lock, flags);
+static int qspi_setup_sequencer(struct rspi_data *rspi,
+ const struct spi_message *msg)
+{
+ const struct spi_transfer *xfer;
+ unsigned int i = 0, len = 0;
+ u16 current_mode = 0xffff, mode;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ mode = qspi_transfer_mode(xfer);
+ if (mode == current_mode) {
+ len += xfer->len;
+ continue;
+ }
+
+ /* Transfer mode change */
+ if (i) {
+ /* Set transfer data length of previous transfer */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ }
+ if (i >= QSPI_NUM_SPCMD) {
+ dev_err(&msg->spi->dev,
+ "Too many different transfer modes");
+ return -EINVAL;
+ }
+
+ /* Program transfer mode for this transfer */
+ rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
+ current_mode = mode;
+ len = xfer->len;
+ i++;
+ }
+ if (i) {
+ /* Set final transfer data length and sequence length */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ rspi_write8(rspi, i - 1, RSPI_SPSCR);
+ }
+
+ return 0;
+}
+
+static int rspi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (msg->spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
+ /* Setup sequencer for messages with multiple transfer modes */
+ ret = qspi_setup_sequencer(rspi, msg);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable SPI function in master mode */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
return 0;
}
-static void rspi_cleanup(struct spi_device *spi)
+static int rspi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ /* Disable SPI function */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
+
+ /* Reset sequencer for Single SPI Transfers */
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+ rspi_write8(rspi, 0, RSPI_SPSCR);
+ return 0;
}
-static irqreturn_t rspi_irq(int irq, void *_sr)
+static irqreturn_t rspi_irq_mux(int irq, void *_sr)
{
- struct rspi_data *rspi = (struct rspi_data *)_sr;
- unsigned long spsr;
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
irqreturn_t ret = IRQ_NONE;
- unsigned char disable_irq = 0;
+ u8 disable_irq = 0;
rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
@@ -822,10 +1016,40 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
return ret;
}
+static irqreturn_t rspi_irq_rx(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF) {
+ rspi_disable_irq(rspi, SPCR_SPRIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
+}
+
+static irqreturn_t rspi_irq_tx(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPTEF) {
+ rspi_disable_irq(rspi, SPCR_SPTIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
+}
+
static int rspi_request_dma(struct rspi_data *rspi,
struct platform_device *pdev)
{
- struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
+ const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dma_cap_mask_t mask;
struct dma_slave_config cfg;
@@ -887,44 +1111,90 @@ static int rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = platform_get_drvdata(pdev);
- spi_unregister_master(rspi->master);
rspi_release_dma(rspi);
- free_irq(platform_get_irq(pdev, 0), rspi);
- clk_put(rspi->clk);
- iounmap(rspi->addr);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
+static const struct spi_ops rspi_ops = {
+ .set_config_register = rspi_set_config_register,
+ .transfer_one = rspi_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
+};
+
+static const struct spi_ops rspi_rz_ops = {
+ .set_config_register = rspi_rz_set_config_register,
+ .transfer_one = rspi_rz_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
+};
+
+static const struct spi_ops qspi_ops = {
+ .set_config_register = qspi_set_config_register,
+ .transfer_one = qspi_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
+ SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_RX_DUAL | SPI_RX_QUAD,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id rspi_of_match[] = {
+ /* RSPI on legacy SH */
+ { .compatible = "renesas,rspi", .data = &rspi_ops },
+ /* RSPI on RZ/A1H */
+ { .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
+ /* QSPI on R-Car Gen2 */
+ { .compatible = "renesas,qspi", .data = &qspi_ops },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rspi_of_match);
+
+static int rspi_parse_dt(struct device *dev, struct spi_master *master)
+{
+ u32 num_cs;
+ int error;
+
+ /* Parse DT properties */
+ error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
+ if (error) {
+ dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
+ return error;
+ }
+
+ master->num_chipselect = num_cs;
+ return 0;
+}
+#else
+#define rspi_of_match NULL
+static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+static int rspi_request_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, const char *suffix,
+ void *dev_id)
+{
+ const char *base = dev_name(dev);
+ size_t len = strlen(base) + strlen(suffix) + 2;
+ char *name = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ snprintf(name, len, "%s:%s", base, suffix);
+ return devm_request_irq(dev, irq, handler, 0, name, dev_id);
+}
+
static int rspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
struct rspi_data *rspi;
- int ret, irq;
- char clk_name[16];
- struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
+ int ret;
+ const struct of_device_id *of_id;
+ const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
- const struct platform_device_id *id_entry = pdev->id_entry;
-
- ops = (struct spi_ops *)id_entry->driver_data;
- /* ops parameter check */
- if (!ops->set_config_register) {
- dev_err(&pdev->dev, "there is no set_config_register\n");
- return -ENODEV;
- }
- /* get base addr */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(res == NULL)) {
- dev_err(&pdev->dev, "invalid resource\n");
- return -EINVAL;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq error\n");
- return -ENODEV;
- }
master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
if (master == NULL) {
@@ -932,90 +1202,124 @@ static int rspi_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ of_id = of_match_device(rspi_of_match, &pdev->dev);
+ if (of_id) {
+ ops = of_id->data;
+ ret = rspi_parse_dt(&pdev->dev, master);
+ if (ret)
+ goto error1;
+ } else {
+ ops = (struct spi_ops *)pdev->id_entry->driver_data;
+ rspi_pd = dev_get_platdata(&pdev->dev);
+ if (rspi_pd && rspi_pd->num_chipselect)
+ master->num_chipselect = rspi_pd->num_chipselect;
+ else
+ master->num_chipselect = 2; /* default */
+ };
+
+ /* ops parameter check */
+ if (!ops->set_config_register) {
+ dev_err(&pdev->dev, "there is no set_config_register\n");
+ ret = -ENODEV;
+ goto error1;
+ }
+
rspi = spi_master_get_devdata(master);
platform_set_drvdata(pdev, rspi);
rspi->ops = ops;
rspi->master = master;
- rspi->addr = ioremap(res->start, resource_size(res));
- if (rspi->addr == NULL) {
- dev_err(&pdev->dev, "ioremap error.\n");
- ret = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rspi->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rspi->addr)) {
+ ret = PTR_ERR(rspi->addr);
goto error1;
}
- snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id);
- rspi->clk = clk_get(&pdev->dev, clk_name);
+ rspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rspi->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(rspi->clk);
- goto error2;
+ goto error1;
}
- clk_enable(rspi->clk);
- INIT_LIST_HEAD(&rspi->queue);
- spin_lock_init(&rspi->lock);
- INIT_WORK(&rspi->ws, rspi_work);
- init_waitqueue_head(&rspi->wait);
+ pm_runtime_enable(&pdev->dev);
- master->num_chipselect = rspi_pd->num_chipselect;
- if (!master->num_chipselect)
- master->num_chipselect = 2; /* default */
+ init_waitqueue_head(&rspi->wait);
master->bus_num = pdev->id;
master->setup = rspi_setup;
- master->transfer = rspi_transfer;
- master->cleanup = rspi_cleanup;
+ master->auto_runtime_pm = true;
+ master->transfer_one = ops->transfer_one;
+ master->prepare_message = rspi_prepare_message;
+ master->unprepare_message = rspi_unprepare_message;
+ master->mode_bits = ops->mode_bits;
+ master->dev.of_node = pdev->dev.of_node;
+
+ ret = platform_get_irq_byname(pdev, "rx");
+ if (ret < 0) {
+ ret = platform_get_irq_byname(pdev, "mux");
+ if (ret < 0)
+ ret = platform_get_irq(pdev, 0);
+ if (ret >= 0)
+ rspi->rx_irq = rspi->tx_irq = ret;
+ } else {
+ rspi->rx_irq = ret;
+ ret = platform_get_irq_byname(pdev, "tx");
+ if (ret >= 0)
+ rspi->tx_irq = ret;
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "platform_get_irq error\n");
+ goto error2;
+ }
- ret = request_irq(irq, rspi_irq, 0, dev_name(&pdev->dev), rspi);
+ if (rspi->rx_irq == rspi->tx_irq) {
+ /* Single multiplexed interrupt */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
+ "mux", rspi);
+ } else {
+ /* Multi-interrupt mode, only SPRI and SPTI are used */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
+ "rx", rspi);
+ if (!ret)
+ ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
+ rspi_irq_tx, "tx", rspi);
+ }
if (ret < 0) {
dev_err(&pdev->dev, "request_irq error\n");
- goto error3;
+ goto error2;
}
- rspi->irq = irq;
ret = rspi_request_dma(rspi, pdev);
if (ret < 0) {
dev_err(&pdev->dev, "rspi_request_dma failed.\n");
- goto error4;
+ goto error3;
}
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
- goto error4;
+ goto error3;
}
dev_info(&pdev->dev, "probed\n");
return 0;
-error4:
- rspi_release_dma(rspi);
- free_irq(irq, rspi);
error3:
- clk_put(rspi->clk);
+ rspi_release_dma(rspi);
error2:
- iounmap(rspi->addr);
+ pm_runtime_disable(&pdev->dev);
error1:
spi_master_put(master);
return ret;
}
-static struct spi_ops rspi_ops = {
- .set_config_register = rspi_set_config_register,
- .send_pio = rspi_send_pio,
- .receive_pio = rspi_receive_pio,
-};
-
-static struct spi_ops qspi_ops = {
- .set_config_register = qspi_set_config_register,
- .send_pio = qspi_send_pio,
- .receive_pio = qspi_receive_pio,
-};
-
static struct platform_device_id spi_driver_ids[] = {
{ "rspi", (kernel_ulong_t)&rspi_ops },
+ { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
{ "qspi", (kernel_ulong_t)&qspi_ops },
{},
};
@@ -1029,6 +1333,7 @@ static struct platform_driver rspi_driver = {
.driver = {
.name = "renesas_spi",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(rspi_of_match),
},
};
module_platform_driver(rspi_driver);
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 0dc32a11bd3c..bed23384dfab 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -9,7 +9,6 @@
*
*/
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
@@ -29,7 +28,6 @@
#include <plat/regs-spi.h>
-#include <plat/fiq.h>
#include <asm/fiq.h>
#include "spi-s3c24xx-fiq.h"
@@ -78,14 +76,12 @@ struct s3c24xx_spi {
unsigned char *rx;
struct clk *clk;
- struct resource *ioarea;
struct spi_master *master;
struct spi_device *curdev;
struct device *dev;
struct s3c2410_spi_info *pdata;
};
-
#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
@@ -126,25 +122,15 @@ static int s3c24xx_spi_update_state(struct spi_device *spi,
{
struct s3c24xx_spi *hw = to_hw(spi);
struct s3c24xx_spi_devstate *cs = spi->controller_state;
- unsigned int bpw;
unsigned int hz;
unsigned int div;
unsigned long clk;
- bpw = t ? t->bits_per_word : spi->bits_per_word;
hz = t ? t->speed_hz : spi->max_speed_hz;
- if (!bpw)
- bpw = 8;
-
if (!hz)
hz = spi->max_speed_hz;
- if (bpw != 8) {
- dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw);
- return -EINVAL;
- }
-
if (spi->mode != cs->mode) {
u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
@@ -517,8 +503,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
if (master == NULL) {
dev_err(&pdev->dev, "No memory for spi_master\n");
- err = -ENOMEM;
- goto err_nomem;
+ return -ENOMEM;
}
hw = spi_master_get_devdata(master);
@@ -548,6 +533,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = pdata->bus_num;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
/* setup the state for the bitbang driver */
@@ -562,48 +548,32 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
/* find and map our resources */
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- err = -ENOENT;
- goto err_no_iores;
- }
-
- hw->ioarea = request_mem_region(res->start, resource_size(res),
- pdev->name);
-
- if (hw->ioarea == NULL) {
- dev_err(&pdev->dev, "Cannot reserve region\n");
- err = -ENXIO;
- goto err_no_iores;
- }
-
- hw->regs = ioremap(res->start, resource_size(res));
- if (hw->regs == NULL) {
- dev_err(&pdev->dev, "Cannot map IO\n");
- err = -ENXIO;
- goto err_no_iomap;
+ hw->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->regs)) {
+ err = PTR_ERR(hw->regs);
+ goto err_no_pdata;
}
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq < 0) {
dev_err(&pdev->dev, "No IRQ specified\n");
err = -ENOENT;
- goto err_no_irq;
+ goto err_no_pdata;
}
- err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw);
+ err = devm_request_irq(&pdev->dev, hw->irq, s3c24xx_spi_irq, 0,
+ pdev->name, hw);
if (err) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto err_no_irq;
+ goto err_no_pdata;
}
- hw->clk = clk_get(&pdev->dev, "spi");
+ hw->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(hw->clk)) {
dev_err(&pdev->dev, "No clock for device\n");
err = PTR_ERR(hw->clk);
- goto err_no_clk;
+ goto err_no_pdata;
}
/* setup any gpio we can */
@@ -615,7 +585,8 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
goto err_register;
}
- err = gpio_request(pdata->pin_cs, dev_name(&pdev->dev));
+ err = devm_gpio_request(&pdev->dev, pdata->pin_cs,
+ dev_name(&pdev->dev));
if (err) {
dev_err(&pdev->dev, "Failed to get gpio for cs\n");
goto err_register;
@@ -639,27 +610,10 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
return 0;
err_register:
- if (hw->set_cs == s3c24xx_spi_gpiocs)
- gpio_free(pdata->pin_cs);
-
clk_disable(hw->clk);
- clk_put(hw->clk);
-
- err_no_clk:
- free_irq(hw->irq, hw);
-
- err_no_irq:
- iounmap(hw->regs);
-
- err_no_iomap:
- release_resource(hw->ioarea);
- kfree(hw->ioarea);
- err_no_iores:
err_no_pdata:
spi_master_put(hw->master);
-
- err_nomem:
return err;
}
@@ -668,19 +622,7 @@ static int s3c24xx_spi_remove(struct platform_device *dev)
struct s3c24xx_spi *hw = platform_get_drvdata(dev);
spi_bitbang_stop(&hw->bitbang);
-
clk_disable(hw->clk);
- clk_put(hw->clk);
-
- free_irq(hw->irq, hw);
- iounmap(hw->regs);
-
- if (hw->set_cs == s3c24xx_spi_gpiocs)
- gpio_free(hw->pdata->pin_cs);
-
- release_resource(hw->ioarea);
- kfree(hw->ioarea);
-
spi_master_put(hw->master);
return 0;
}
@@ -691,6 +633,11 @@ static int s3c24xx_spi_remove(struct platform_device *dev)
static int s3c24xx_spi_suspend(struct device *dev)
{
struct s3c24xx_spi *hw = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(hw->master);
+ if (ret)
+ return ret;
if (hw->pdata && hw->pdata->gpio_setup)
hw->pdata->gpio_setup(hw->pdata, 0);
@@ -704,7 +651,7 @@ static int s3c24xx_spi_resume(struct device *dev)
struct s3c24xx_spi *hw = dev_get_drvdata(dev);
s3c24xx_spi_initialsetup(hw);
- return 0;
+ return spi_master_resume(hw->master);
}
static const struct dev_pm_ops s3c24xx_spi_pmops = {
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 4c4b0a1219a7..f19cd97855e8 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -34,10 +34,6 @@
#include <linux/platform_data/spi-s3c64xx.h>
-#ifdef CONFIG_S3C_DMA
-#include <mach/dma.h>
-#endif
-
#define MAX_SPI_PORTS 3
#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
@@ -200,9 +196,6 @@ struct s3c64xx_spi_driver_data {
unsigned cur_speed;
struct s3c64xx_spi_dma_data rx_dma;
struct s3c64xx_spi_dma_data tx_dma;
-#ifdef CONFIG_S3C_DMA
- struct samsung_dma_ops *ops;
-#endif
struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
bool cs_gpio;
@@ -284,104 +277,8 @@ static void s3c64xx_spi_dmacb(void *data)
spin_unlock_irqrestore(&sdd->lock, flags);
}
-#ifdef CONFIG_S3C_DMA
-/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */
-
-static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
- .name = "samsung-spi-dma",
-};
-
static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
- unsigned len, dma_addr_t buf)
-{
- struct s3c64xx_spi_driver_data *sdd;
- struct samsung_dma_prep info;
- struct samsung_dma_config config;
-
- if (dma->direction == DMA_DEV_TO_MEM) {
- sdd = container_of((void *)dma,
- struct s3c64xx_spi_driver_data, rx_dma);
- config.direction = sdd->rx_dma.direction;
- config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
- config.width = sdd->cur_bpw / 8;
- sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config);
- } else {
- sdd = container_of((void *)dma,
- struct s3c64xx_spi_driver_data, tx_dma);
- config.direction = sdd->tx_dma.direction;
- config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
- config.width = sdd->cur_bpw / 8;
- sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config);
- }
-
- info.cap = DMA_SLAVE;
- info.len = len;
- info.fp = s3c64xx_spi_dmacb;
- info.fp_param = dma;
- info.direction = dma->direction;
- info.buf = buf;
-
- sdd->ops->prepare((enum dma_ch)dma->ch, &info);
- sdd->ops->trigger((enum dma_ch)dma->ch);
-}
-
-static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
-{
- struct samsung_dma_req req;
- struct device *dev = &sdd->pdev->dev;
-
- sdd->ops = samsung_dma_get_ops();
-
- req.cap = DMA_SLAVE;
- req.client = &s3c64xx_spi_dma_client;
-
- sdd->rx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
- sdd->rx_dma.dmach, &req, dev, "rx");
- sdd->tx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
- sdd->tx_dma.dmach, &req, dev, "tx");
-
- return 1;
-}
-
-static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
- /*
- * If DMA resource was not available during
- * probe, no need to continue with dma requests
- * else Acquire DMA channels
- */
- while (!is_polling(sdd) && !acquire_dma(sdd))
- usleep_range(10000, 11000);
-
- return 0;
-}
-
-static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
- /* Free DMA channels */
- if (!is_polling(sdd)) {
- sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
- &s3c64xx_spi_dma_client);
- sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
- &s3c64xx_spi_dma_client);
- }
-
- return 0;
-}
-
-static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
- struct s3c64xx_spi_dma_data *dma)
-{
- sdd->ops->stop((enum dma_ch)dma->ch);
-}
-#else
-
-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
- unsigned len, dma_addr_t buf)
+ struct sg_table *sgt)
{
struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config;
@@ -407,8 +304,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
dmaengine_slave_config(dma->ch, &config);
}
- desc = dmaengine_prep_slave_single(dma->ch, buf, len,
- dma->direction, DMA_PREP_INTERRUPT);
+ desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
+ dma->direction, DMA_PREP_INTERRUPT);
desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma;
@@ -437,6 +334,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
ret = -EBUSY;
goto out;
}
+ spi->dma_rx = sdd->rx_dma.ch;
sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
(void *)sdd->tx_dma.dmach, dev, "tx");
@@ -445,6 +343,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
ret = -EBUSY;
goto out_rx;
}
+ spi->dma_tx = sdd->tx_dma.ch;
}
ret = pm_runtime_get_sync(&sdd->pdev->dev);
@@ -477,12 +376,14 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
return 0;
}
-static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
- struct s3c64xx_spi_dma_data *dma)
+static bool s3c64xx_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
{
- dmaengine_terminate_all(dma->ch);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
}
-#endif
static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi,
@@ -515,7 +416,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
- prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
+ prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else {
switch (sdd->cur_bpw) {
case 32:
@@ -547,7 +448,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
- prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
+ prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
}
}
@@ -555,23 +456,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
}
-static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
- struct spi_device *spi)
-{
- if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
- if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
- /* Deselect the last toggled device */
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio,
- spi->mode & SPI_CS_HIGH ? 0 : 1);
- }
- sdd->tgl_spi = NULL;
- }
-
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0);
-}
-
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
int timeout_ms)
{
@@ -593,112 +477,111 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
return RX_FIFO_LVL(status, sdd);
}
-static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
- struct spi_transfer *xfer, int dma_mode)
+static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
{
void __iomem *regs = sdd->regs;
unsigned long val;
+ u32 status;
int ms;
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
ms += 10; /* some tolerance */
- if (dma_mode) {
- val = msecs_to_jiffies(ms) + 10;
- val = wait_for_completion_timeout(&sdd->xfer_completion, val);
- } else {
- u32 status;
- val = msecs_to_loops(ms);
- do {
+ val = msecs_to_jiffies(ms) + 10;
+ val = wait_for_completion_timeout(&sdd->xfer_completion, val);
+
+ /*
+ * If the previous xfer was completed within timeout, then
+ * proceed further else return -EIO.
+ * DmaTx returns after simply writing data in the FIFO,
+ * w/o waiting for real transmission on the bus to finish.
+ * DmaRx returns only after Dma read data from FIFO which
+ * needs bus transmission to finish, so we don't worry if
+ * Xfer involved Rx(with or without Tx).
+ */
+ if (val && !xfer->rx_buf) {
+ val = msecs_to_loops(10);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ while ((TX_FIFO_LVL(status, sdd)
+ || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
+ && --val) {
+ cpu_relax();
status = readl(regs + S3C64XX_SPI_STATUS);
- } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
+ }
+
}
- if (dma_mode) {
- u32 status;
-
- /*
- * If the previous xfer was completed within timeout, then
- * proceed further else return -EIO.
- * DmaTx returns after simply writing data in the FIFO,
- * w/o waiting for real transmission on the bus to finish.
- * DmaRx returns only after Dma read data from FIFO which
- * needs bus transmission to finish, so we don't worry if
- * Xfer involved Rx(with or without Tx).
- */
- if (val && !xfer->rx_buf) {
- val = msecs_to_loops(10);
- status = readl(regs + S3C64XX_SPI_STATUS);
- while ((TX_FIFO_LVL(status, sdd)
- || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
- && --val) {
- cpu_relax();
- status = readl(regs + S3C64XX_SPI_STATUS);
- }
+ /* If timed out while checking rx/tx status return error */
+ if (!val)
+ return -EIO;
- }
+ return 0;
+}
- /* If timed out while checking rx/tx status return error */
- if (!val)
- return -EIO;
- } else {
- int loops;
- u32 cpy_len;
- u8 *buf;
-
- /* If it was only Tx */
- if (!xfer->rx_buf) {
- sdd->state &= ~TXBUSY;
- return 0;
- }
+static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long val;
+ u32 status;
+ int loops;
+ u32 cpy_len;
+ u8 *buf;
+ int ms;
- /*
- * If the receive length is bigger than the controller fifo
- * size, calculate the loops and read the fifo as many times.
- * loops = length / max fifo size (calculated by using the
- * fifo mask).
- * For any size less than the fifo size the below code is
- * executed atleast once.
- */
- loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
- buf = xfer->rx_buf;
- do {
- /* wait for data to be received in the fifo */
- cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
- (loops ? ms : 0));
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
+ ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ ms += 10; /* some tolerance */
- switch (sdd->cur_bpw) {
- case 32:
- ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len / 4);
- break;
- case 16:
- ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len / 2);
- break;
- default:
- ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len);
- break;
- }
+ val = msecs_to_loops(ms);
+ do {
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
- buf = buf + cpy_len;
- } while (loops--);
- sdd->state &= ~RXBUSY;
+
+ /* If it was only Tx */
+ if (!xfer->rx_buf) {
+ sdd->state &= ~TXBUSY;
+ return 0;
}
- return 0;
-}
+ /*
+ * If the receive length is bigger than the controller fifo
+ * size, calculate the loops and read the fifo as many times.
+ * loops = length / max fifo size (calculated by using the
+ * fifo mask).
+ * For any size less than the fifo size the below code is
+ * executed atleast once.
+ */
+ loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
+ buf = xfer->rx_buf;
+ do {
+ /* wait for data to be received in the fifo */
+ cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
+ (loops ? ms : 0));
+
+ switch (sdd->cur_bpw) {
+ case 32:
+ ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 4);
+ break;
+ case 16:
+ ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 2);
+ break;
+ default:
+ ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len);
+ break;
+ }
-static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
- struct spi_device *spi)
-{
- if (sdd->tgl_spi == spi)
- sdd->tgl_spi = NULL;
+ buf = buf + cpy_len;
+ } while (loops--);
+ sdd->state &= ~RXBUSY;
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+ return 0;
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
@@ -774,81 +657,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
-static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
- struct spi_message *msg)
-{
- struct device *dev = &sdd->pdev->dev;
- struct spi_transfer *xfer;
-
- if (is_polling(sdd) || msg->is_dma_mapped)
- return 0;
-
- /* First mark all xfer unmapped */
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- xfer->rx_dma = XFER_DMAADDR_INVALID;
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- }
-
- /* Map until end or first fail */
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-
- if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
- continue;
-
- if (xfer->tx_buf != NULL) {
- xfer->tx_dma = dma_map_single(dev,
- (void *)xfer->tx_buf, xfer->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, xfer->tx_dma)) {
- dev_err(dev, "dma_map_single Tx failed\n");
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- return -ENOMEM;
- }
- }
-
- if (xfer->rx_buf != NULL) {
- xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
- xfer->len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, xfer->rx_dma)) {
- dev_err(dev, "dma_map_single Rx failed\n");
- dma_unmap_single(dev, xfer->tx_dma,
- xfer->len, DMA_TO_DEVICE);
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- xfer->rx_dma = XFER_DMAADDR_INVALID;
- return -ENOMEM;
- }
- }
- }
-
- return 0;
-}
-
-static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
- struct spi_message *msg)
-{
- struct device *dev = &sdd->pdev->dev;
- struct spi_transfer *xfer;
-
- if (is_polling(sdd) || msg->is_dma_mapped)
- return;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-
- if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
- continue;
-
- if (xfer->rx_buf != NULL
- && xfer->rx_dma != XFER_DMAADDR_INVALID)
- dma_unmap_single(dev, xfer->rx_dma,
- xfer->len, DMA_FROM_DEVICE);
-
- if (xfer->tx_buf != NULL
- && xfer->tx_dma != XFER_DMAADDR_INVALID)
- dma_unmap_single(dev, xfer->tx_dma,
- xfer->len, DMA_TO_DEVICE);
- }
-}
-
static int s3c64xx_spi_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -866,13 +674,6 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master,
s3c64xx_spi_config(sdd);
}
- /* Map all the transfers if needed */
- if (s3c64xx_spi_map_mssg(sdd, msg)) {
- dev_err(&spi->dev,
- "Xfer: Unable to map message buffers!\n");
- return -ENOMEM;
- }
-
/* Configure feedback delay */
writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
@@ -890,19 +691,12 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
unsigned long flags;
int use_dma;
- reinit_completion(&sdd->xfer_completion);
+ reinit_completion(&sdd->xfer_completion);
/* Only BPW and Speed may change across transfers */
bpw = xfer->bits_per_word;
speed = xfer->speed_hz ? : spi->max_speed_hz;
- if (xfer->len % (bpw / 8)) {
- dev_err(&spi->dev,
- "Xfer length(%u) not a multiple of word size(%u)\n",
- xfer->len, bpw / 8);
- return -EIO;
- }
-
if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
sdd->cur_bpw = bpw;
sdd->cur_speed = speed;
@@ -927,12 +721,12 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
/* Start the signals */
writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- /* Start the signals */
- writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
-
spin_unlock_irqrestore(&sdd->lock, flags);
- status = wait_for_xfer(sdd, xfer, use_dma);
+ if (use_dma)
+ status = wait_for_dma(sdd, xfer);
+ else
+ status = wait_for_pio(sdd, xfer);
if (status) {
dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
@@ -944,10 +738,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
if (use_dma) {
if (xfer->tx_buf != NULL
&& (sdd->state & TXBUSY))
- s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
+ dmaengine_terminate_all(sdd->tx_dma.ch);
if (xfer->rx_buf != NULL
&& (sdd->state & RXBUSY))
- s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
+ dmaengine_terminate_all(sdd->rx_dma.ch);
}
} else {
flush_fifo(sdd);
@@ -956,16 +750,6 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
return status;
}
-static int s3c64xx_spi_unprepare_message(struct spi_master *master,
- struct spi_message *msg)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
-
- s3c64xx_spi_unmap_mssg(sdd, msg);
-
- return 0;
-}
-
static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
struct spi_device *spi)
{
@@ -1095,14 +879,12 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
pm_runtime_put(&sdd->pdev->dev);
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- disable_cs(sdd, spi);
return 0;
setup_exit:
pm_runtime_put(&sdd->pdev->dev);
/* setup() returns with device de-selected */
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- disable_cs(sdd, spi);
gpio_free(cs->line);
spi_set_ctldata(spi, NULL);
@@ -1341,7 +1123,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
- master->unprepare_message = s3c64xx_spi_unprepare_message;
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
@@ -1350,6 +1131,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->auto_runtime_pm = true;
+ if (!is_polling(sdd))
+ master->can_dma = s3c64xx_spi_can_dma;
sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(sdd->regs)) {
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 9eda21d739c6..237f2e7a7179 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -183,24 +183,9 @@ static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
static int sc18is602_check_transfer(struct spi_device *spi,
struct spi_transfer *t, int tlen)
{
- int bpw;
- uint32_t hz;
-
if (t && t->len + tlen > SC18IS602_BUFSIZ)
return -EINVAL;
- bpw = spi->bits_per_word;
- if (t && t->bits_per_word)
- bpw = t->bits_per_word;
- if (bpw != 8)
- return -EINVAL;
-
- hz = spi->max_speed_hz;
- if (t && t->speed_hz)
- hz = t->speed_hz;
- if (hz == 0)
- return -EINVAL;
-
return 0;
}
@@ -212,22 +197,15 @@ static int sc18is602_transfer_one(struct spi_master *master,
struct spi_transfer *t;
int status = 0;
- /* SC18IS602 does not support CS2 */
- if (hw->id == sc18is602 && spi->chip_select == 2) {
- status = -ENXIO;
- goto error;
- }
-
hw->tlen = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
- u32 hz = t->speed_hz ? : spi->max_speed_hz;
bool do_transfer;
status = sc18is602_check_transfer(spi, t, hw->tlen);
if (status < 0)
break;
- status = sc18is602_setup_transfer(hw, hz, spi->mode);
+ status = sc18is602_setup_transfer(hw, t->speed_hz, spi->mode);
if (status < 0)
break;
@@ -245,7 +223,6 @@ static int sc18is602_transfer_one(struct spi_master *master,
if (t->delay_usecs)
udelay(t->delay_usecs);
}
-error:
m->status = status;
spi_finalize_current_message(master);
@@ -254,13 +231,13 @@ error:
static int sc18is602_setup(struct spi_device *spi)
{
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
+ struct sc18is602 *hw = spi_master_get_devdata(spi->master);
- if (spi->mode & ~(SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST))
- return -EINVAL;
+ /* SC18IS602 does not support CS2 */
+ if (hw->id == sc18is602 && spi->chip_select == 2)
+ return -ENXIO;
- return sc18is602_check_transfer(spi, NULL, 0);
+ return 0;
}
static int sc18is602_probe(struct i2c_client *client,
@@ -315,11 +292,14 @@ static int sc18is602_probe(struct i2c_client *client,
}
master->bus_num = client->adapter->nr;
master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->setup = sc18is602_setup;
master->transfer_one_message = sc18is602_transfer_one;
master->dev.of_node = np;
+ master->min_speed_hz = hw->freq / 128;
+ master->max_speed_hz = hw->freq / 4;
- error = spi_register_master(master);
+ error = devm_spi_register_master(dev, master);
if (error)
goto error_reg;
@@ -330,16 +310,6 @@ error_reg:
return error;
}
-static int sc18is602_remove(struct i2c_client *client)
-{
- struct sc18is602 *hw = i2c_get_clientdata(client);
- struct spi_master *master = hw->master;
-
- spi_unregister_master(master);
-
- return 0;
-}
-
static const struct i2c_device_id sc18is602_id[] = {
{ "sc18is602", sc18is602 },
{ "sc18is602b", sc18is602b },
@@ -353,7 +323,6 @@ static struct i2c_driver sc18is602_driver = {
.name = "sc18is602",
},
.probe = sc18is602_probe,
- .remove = sc18is602_remove,
.id_table = sc18is602_id,
};
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 292567ab4c6c..9009456bdf4d 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -46,8 +46,6 @@
/* SPSR */
#define RXFL (1 << 2)
-#define hspi2info(h) (h->dev->platform_data)
-
struct hspi_priv {
void __iomem *addr;
struct spi_master *master;
@@ -113,14 +111,9 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
{
struct spi_device *spi = msg->spi;
struct device *dev = hspi->dev;
- u32 target_rate;
u32 spcr, idiv_clk;
u32 rate, best_rate, min, tmp;
- target_rate = t ? t->speed_hz : 0;
- if (!target_rate)
- target_rate = spi->max_speed_hz;
-
/*
* find best IDIV/CLKCx settings
*/
@@ -140,7 +133,7 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
rate /= (((idiv_clk & 0x1F) + 1) * 2);
/* save best settings */
- tmp = abs(target_rate - rate);
+ tmp = abs(t->speed_hz - rate);
if (tmp < min) {
min = tmp;
spcr = idiv_clk;
@@ -153,7 +146,7 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
if (spi->mode & SPI_CPOL)
spcr |= 1 << 6;
- dev_dbg(dev, "speed %d/%d\n", target_rate, best_rate);
+ dev_dbg(dev, "speed %d/%d\n", t->speed_hz, best_rate);
hspi_write(hspi, SPCR, spcr);
hspi_write(hspi, SPSR, 0x0);
@@ -197,7 +190,7 @@ static int hspi_transfer_one_message(struct spi_master *master,
hspi_write(hspi, SPTBR, tx);
- /* wait recive */
+ /* wait receive */
ret = hspi_status_check_timeout(hspi, 0x4, 0x4);
if (ret < 0)
break;
@@ -230,29 +223,6 @@ static int hspi_transfer_one_message(struct spi_master *master,
return ret;
}
-static int hspi_setup(struct spi_device *spi)
-{
- struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
- struct device *dev = hspi->dev;
-
- if (8 != spi->bits_per_word) {
- dev_err(dev, "bits_per_word should be 8\n");
- return -EIO;
- }
-
- dev_dbg(dev, "%s setup\n", spi->modalias);
-
- return 0;
-}
-
-static void hspi_cleanup(struct spi_device *spi)
-{
- struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
- struct device *dev = hspi->dev;
-
- dev_dbg(dev, "%s cleanup\n", spi->modalias);
-}
-
static int hspi_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -298,22 +268,23 @@ static int hspi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- master->num_chipselect = 1;
master->bus_num = pdev->id;
- master->setup = hspi_setup;
- master->cleanup = hspi_cleanup;
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
master->transfer_one_message = hspi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
- goto error1;
+ goto error2;
}
return 0;
+ error2:
+ pm_runtime_disable(&pdev->dev);
error1:
clk_put(clk);
error0:
@@ -353,4 +324,4 @@ module_platform_driver(hspi_driver);
MODULE_DESCRIPTION("SuperH HSPI bus driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
-MODULE_ALIAS("platform:sh_spi");
+MODULE_ALIAS("platform:sh-hspi");
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index c74298cf70e2..e850d03e7190 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -15,59 +15,108 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/sh_msiof.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
#include <asm/unaligned.h>
+
+struct sh_msiof_chipdata {
+ u16 tx_fifo_size;
+ u16 rx_fifo_size;
+ u16 master_flags;
+};
+
struct sh_msiof_spi_priv {
- struct spi_bitbang bitbang; /* must be first for spi_bitbang.c */
void __iomem *mapbase;
struct clk *clk;
struct platform_device *pdev;
+ const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct completion done;
- unsigned long flags;
int tx_fifo_size;
int rx_fifo_size;
};
-#define TMDR1 0x00
-#define TMDR2 0x04
-#define TMDR3 0x08
-#define RMDR1 0x10
-#define RMDR2 0x14
-#define RMDR3 0x18
-#define TSCR 0x20
-#define RSCR 0x22
-#define CTR 0x28
-#define FCTR 0x30
-#define STR 0x40
-#define IER 0x44
-#define TDR1 0x48
-#define TDR2 0x4c
-#define TFDR 0x50
-#define RDR1 0x58
-#define RDR2 0x5c
-#define RFDR 0x60
-
-#define CTR_TSCKE (1 << 15)
-#define CTR_TFSE (1 << 14)
-#define CTR_TXE (1 << 9)
-#define CTR_RXE (1 << 8)
-
-#define STR_TEOF (1 << 23)
-#define STR_REOF (1 << 7)
+#define TMDR1 0x00 /* Transmit Mode Register 1 */
+#define TMDR2 0x04 /* Transmit Mode Register 2 */
+#define TMDR3 0x08 /* Transmit Mode Register 3 */
+#define RMDR1 0x10 /* Receive Mode Register 1 */
+#define RMDR2 0x14 /* Receive Mode Register 2 */
+#define RMDR3 0x18 /* Receive Mode Register 3 */
+#define TSCR 0x20 /* Transmit Clock Select Register */
+#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define CTR 0x28 /* Control Register */
+#define FCTR 0x30 /* FIFO Control Register */
+#define STR 0x40 /* Status Register */
+#define IER 0x44 /* Interrupt Enable Register */
+#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define TFDR 0x50 /* Transmit FIFO Data Register */
+#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define RFDR 0x60 /* Receive FIFO Data Register */
+
+/* TMDR1 and RMDR1 */
+#define MDR1_TRMD 0x80000000 /* Transfer Mode (1 = Master mode) */
+#define MDR1_SYNCMD_MASK 0x30000000 /* SYNC Mode */
+#define MDR1_SYNCMD_SPI 0x20000000 /* Level mode/SPI */
+#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
+#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_SHIFT 2
+#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
+/* TMDR1 */
+#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
+
+/* TMDR2 and RMDR2 */
+#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define MDR2_GRPMASK1 0x00000001 /* Group Output Mask 1 (SH, A1) */
+
+/* TSCR and RSCR */
+#define SCR_BRPS_MASK 0x1f00 /* Prescaler Setting (1-32) */
+#define SCR_BRPS(i) (((i) - 1) << 8)
+#define SCR_BRDV_MASK 0x0007 /* Baud Rate Generator's Division Ratio */
+#define SCR_BRDV_DIV_2 0x0000
+#define SCR_BRDV_DIV_4 0x0001
+#define SCR_BRDV_DIV_8 0x0002
+#define SCR_BRDV_DIV_16 0x0003
+#define SCR_BRDV_DIV_32 0x0004
+#define SCR_BRDV_DIV_1 0x0007
+
+/* CTR */
+#define CTR_TSCKIZ_MASK 0xc0000000 /* Transmit Clock I/O Polarity Select */
+#define CTR_TSCKIZ_SCK 0x80000000 /* Disable SCK when TX disabled */
+#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define CTR_RSCKIZ_MASK 0x30000000 /* Receive Clock Polarity Select */
+#define CTR_RSCKIZ_SCK 0x20000000 /* Must match CTR_TSCKIZ_SCK */
+#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define CTR_TXDIZ_MASK 0x00c00000 /* Pin Output When TX is Disabled */
+#define CTR_TXDIZ_LOW 0x00000000 /* 0 */
+#define CTR_TXDIZ_HIGH 0x00400000 /* 1 */
+#define CTR_TXDIZ_HIZ 0x00800000 /* High-impedance */
+#define CTR_TSCKE 0x00008000 /* Transmit Serial Clock Output Enable */
+#define CTR_TFSE 0x00004000 /* Transmit Frame Sync Signal Output Enable */
+#define CTR_TXE 0x00000200 /* Transmit Enable */
+#define CTR_RXE 0x00000100 /* Receive Enable */
+
+/* STR and IER */
+#define STR_TEOF 0x00800000 /* Frame Transmission End */
+#define STR_REOF 0x00000080 /* Frame Reception End */
+
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
@@ -131,28 +180,27 @@ static struct {
unsigned short div;
unsigned short scr;
} const sh_msiof_spi_clk_table[] = {
- { 1, 0x0007 },
- { 2, 0x0000 },
- { 4, 0x0001 },
- { 8, 0x0002 },
- { 16, 0x0003 },
- { 32, 0x0004 },
- { 64, 0x1f00 },
- { 128, 0x1f01 },
- { 256, 0x1f02 },
- { 512, 0x1f03 },
- { 1024, 0x1f04 },
+ { 1, SCR_BRPS( 1) | SCR_BRDV_DIV_1 },
+ { 2, SCR_BRPS( 1) | SCR_BRDV_DIV_2 },
+ { 4, SCR_BRPS( 1) | SCR_BRDV_DIV_4 },
+ { 8, SCR_BRPS( 1) | SCR_BRDV_DIV_8 },
+ { 16, SCR_BRPS( 1) | SCR_BRDV_DIV_16 },
+ { 32, SCR_BRPS( 1) | SCR_BRDV_DIV_32 },
+ { 64, SCR_BRPS(32) | SCR_BRDV_DIV_2 },
+ { 128, SCR_BRPS(32) | SCR_BRDV_DIV_4 },
+ { 256, SCR_BRPS(32) | SCR_BRDV_DIV_8 },
+ { 512, SCR_BRPS(32) | SCR_BRDV_DIV_16 },
+ { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 },
};
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
- unsigned long parent_rate,
- unsigned long spi_hz)
+ unsigned long parent_rate, u32 spi_hz)
{
unsigned long div = 1024;
size_t k;
if (!WARN_ON(!spi_hz || !parent_rate))
- div = parent_rate / spi_hz;
+ div = DIV_ROUND_UP(parent_rate, spi_hz);
/* TODO: make more fine grained */
@@ -164,12 +212,13 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1);
sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr);
- sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
+ if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
+ sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
}
static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
u32 cpol, u32 cpha,
- u32 tx_hi_z, u32 lsb_first)
+ u32 tx_hi_z, u32 lsb_first, u32 cs_high)
{
u32 tmp;
int edge;
@@ -182,18 +231,26 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
* 1 1 11 11 1 1
*/
sh_msiof_write(p, FCTR, 0);
- sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24));
- sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24));
- tmp = 0xa0000000;
- tmp |= cpol << 30; /* TSCKIZ */
- tmp |= cpol << 28; /* RSCKIZ */
+ tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
+ tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
+ tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+ sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+ if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
+ /* These bits are reserved if RX needs TX */
+ tmp &= ~0x0000ffff;
+ }
+ sh_msiof_write(p, RMDR1, tmp);
+
+ tmp = 0;
+ tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
+ tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
edge = cpol ^ !cpha;
- tmp |= edge << 27; /* TEDG */
- tmp |= edge << 26; /* REDG */
- tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */
+ tmp |= edge << CTR_TEDG_SHIFT;
+ tmp |= edge << CTR_REDG_SHIFT;
+ tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
sh_msiof_write(p, CTR, tmp);
}
@@ -201,12 +258,12 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
u32 bits, u32 words)
{
- u32 dr2 = ((bits - 1) << 24) | ((words - 1) << 16);
+ u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
- if (tx_buf)
+ if (tx_buf || (p->chipdata->master_flags & SPI_MASTER_MUST_TX))
sh_msiof_write(p, TMDR2, dr2);
else
- sh_msiof_write(p, TMDR2, dr2 | 1);
+ sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
if (rx_buf)
sh_msiof_write(p, RMDR2, dr2);
@@ -359,76 +416,45 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
}
-static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t)
-{
- int bits;
-
- bits = t ? t->bits_per_word : 0;
- if (!bits)
- bits = spi->bits_per_word;
- return bits;
-}
-
-static unsigned long sh_msiof_spi_hz(struct spi_device *spi,
- struct spi_transfer *t)
+static int sh_msiof_spi_setup(struct spi_device *spi)
{
- unsigned long hz;
-
- hz = t ? t->speed_hz : 0;
- if (!hz)
- hz = spi->max_speed_hz;
- return hz;
-}
+ struct device_node *np = spi->master->dev.of_node;
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
-static int sh_msiof_spi_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- int bits;
+ if (!np) {
+ /*
+ * Use spi->controller_data for CS (same strategy as spi_gpio),
+ * if any. otherwise let HW control CS
+ */
+ spi->cs_gpio = (uintptr_t)spi->controller_data;
+ }
- /* noting to check hz values against since parent clock is disabled */
+ /* Configure pins before deasserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST),
+ !!(spi->mode & SPI_CS_HIGH));
- bits = sh_msiof_spi_bits(spi, t);
- if (bits < 8)
- return -EINVAL;
- if (bits > 32)
- return -EINVAL;
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
- return spi_bitbang_setup_transfer(spi, t);
+ return 0;
}
-static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on)
+static int sh_msiof_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
- int value;
-
- /* chip select is active low unless SPI_CS_HIGH is set */
- if (spi->mode & SPI_CS_HIGH)
- value = (is_on == BITBANG_CS_ACTIVE) ? 1 : 0;
- else
- value = (is_on == BITBANG_CS_ACTIVE) ? 0 : 1;
-
- if (is_on == BITBANG_CS_ACTIVE) {
- if (!test_and_set_bit(0, &p->flags)) {
- pm_runtime_get_sync(&p->pdev->dev);
- clk_enable(p->clk);
- }
-
- /* Configure pins before asserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
- !!(spi->mode & SPI_CPHA),
- !!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST));
- }
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ const struct spi_device *spi = msg->spi;
- /* use spi->controller data for CS (same strategy as spi_gpio) */
- gpio_set_value((unsigned)spi->controller_data, value);
-
- if (is_on == BITBANG_CS_INACTIVE) {
- if (test_and_clear_bit(0, &p->flags)) {
- clk_disable(p->clk);
- pm_runtime_put(&p->pdev->dev);
- }
- }
+ /* Configure pins before asserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST),
+ !!(spi->mode & SPI_CS_HIGH));
+ return 0;
}
static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
@@ -482,7 +508,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
/* clear status bits */
sh_msiof_reset_str(p);
- /* shut down frame, tx/tx and clock signals */
+ /* shut down frame, rx/tx and clock signals */
ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
if (rx_buf)
@@ -500,9 +526,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
return ret;
}
-static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+static int sh_msiof_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
int bits;
@@ -512,7 +540,7 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
int n;
bool swab;
- bits = sh_msiof_spi_bits(spi, t);
+ bits = t->bits_per_word;
if (bits <= 8 && t->len > 15 && !(t->len & 3)) {
bits = 32;
@@ -562,8 +590,7 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
}
/* setup clocks (clock already enabled in chipselect()) */
- sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk),
- sh_msiof_spi_hz(spi, t));
+ sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
/* transfer in fifo sized chunks */
words = t->len / bytes_per_word;
@@ -583,22 +610,36 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
words -= n;
}
- return bytes_done;
-}
-
-static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
- u32 word, u8 bits)
-{
- BUG(); /* unused but needed by bitbang code */
return 0;
}
+static const struct sh_msiof_chipdata sh_data = {
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .master_flags = 0,
+};
+
+static const struct sh_msiof_chipdata r8a779x_data = {
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 256,
+ .master_flags = SPI_MASTER_MUST_TX,
+};
+
+static const struct of_device_id sh_msiof_match[] = {
+ { .compatible = "renesas,sh-msiof", .data = &sh_data },
+ { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_msiof_match);
+
#ifdef CONFIG_OF
static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
{
struct sh_msiof_spi_info *info;
struct device_node *np = dev->of_node;
- u32 num_cs = 0;
+ u32 num_cs = 1;
info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
if (!info) {
@@ -628,6 +669,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
{
struct resource *r;
struct spi_master *master;
+ const struct of_device_id *of_id;
struct sh_msiof_spi_priv *p;
int i;
int ret;
@@ -635,17 +677,21 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
if (master == NULL) {
dev_err(&pdev->dev, "failed to allocate spi master\n");
- ret = -ENOMEM;
- goto err0;
+ return -ENOMEM;
}
p = spi_master_get_devdata(master);
platform_set_drvdata(pdev, p);
- if (pdev->dev.of_node)
+
+ of_id = of_match_device(sh_msiof_match, &pdev->dev);
+ if (of_id) {
+ p->chipdata = of_id->data;
p->info = sh_msiof_spi_parse_dt(&pdev->dev);
- else
+ } else {
+ p->chipdata = (const void *)pdev->id_entry->driver_data;
p->info = dev_get_platdata(&pdev->dev);
+ }
if (!p->info) {
dev_err(&pdev->dev, "failed to obtain device info\n");
@@ -655,108 +701,91 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
init_completion(&p->done);
- p->clk = clk_get(&pdev->dev, NULL);
+ p->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err1;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i = platform_get_irq(pdev, 0);
- if (!r || i < 0) {
- dev_err(&pdev->dev, "cannot get platform resources\n");
+ if (i < 0) {
+ dev_err(&pdev->dev, "cannot get platform IRQ\n");
ret = -ENOENT;
- goto err2;
+ goto err1;
}
- p->mapbase = ioremap_nocache(r->start, resource_size(r));
- if (!p->mapbase) {
- dev_err(&pdev->dev, "unable to ioremap\n");
- ret = -ENXIO;
- goto err2;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p->mapbase = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(p->mapbase)) {
+ ret = PTR_ERR(p->mapbase);
+ goto err1;
}
- ret = request_irq(i, sh_msiof_spi_irq, 0,
- dev_name(&pdev->dev), p);
+ ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
+ dev_name(&pdev->dev), p);
if (ret) {
dev_err(&pdev->dev, "unable to request irq\n");
- goto err3;
+ goto err1;
}
p->pdev = pdev;
pm_runtime_enable(&pdev->dev);
- /* The standard version of MSIOF use 64 word FIFOs */
- p->tx_fifo_size = 64;
- p->rx_fifo_size = 64;
-
/* Platform data may override FIFO sizes */
+ p->tx_fifo_size = p->chipdata->tx_fifo_size;
+ p->rx_fifo_size = p->chipdata->rx_fifo_size;
if (p->info->tx_fifo_override)
p->tx_fifo_size = p->info->tx_fifo_override;
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
- /* init master and bitbang code */
+ /* init master code */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
- master->flags = 0;
+ master->flags = p->chipdata->master_flags;
master->bus_num = pdev->id;
+ master->dev.of_node = pdev->dev.of_node;
master->num_chipselect = p->info->num_chipselect;
- master->setup = spi_bitbang_setup;
- master->cleanup = spi_bitbang_cleanup;
-
- p->bitbang.master = master;
- p->bitbang.chipselect = sh_msiof_spi_chipselect;
- p->bitbang.setup_transfer = sh_msiof_spi_setup_transfer;
- p->bitbang.txrx_bufs = sh_msiof_spi_txrx;
- p->bitbang.txrx_word[SPI_MODE_0] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_1] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_2] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_3] = sh_msiof_spi_txrx_word;
-
- ret = spi_bitbang_start(&p->bitbang);
- if (ret == 0)
- return 0;
+ master->setup = sh_msiof_spi_setup;
+ master->prepare_message = sh_msiof_prepare_message;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ master->auto_runtime_pm = true;
+ master->transfer_one = sh_msiof_transfer_one;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master error.\n");
+ goto err2;
+ }
+
+ return 0;
- pm_runtime_disable(&pdev->dev);
- err3:
- iounmap(p->mapbase);
err2:
- clk_put(p->clk);
+ pm_runtime_disable(&pdev->dev);
err1:
spi_master_put(master);
- err0:
return ret;
}
static int sh_msiof_spi_remove(struct platform_device *pdev)
{
- struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
- int ret;
-
- ret = spi_bitbang_stop(&p->bitbang);
- if (!ret) {
- pm_runtime_disable(&pdev->dev);
- free_irq(platform_get_irq(pdev, 0), p);
- iounmap(p->mapbase);
- clk_put(p->clk);
- spi_master_put(p->bitbang.master);
- }
- return ret;
+ pm_runtime_disable(&pdev->dev);
+ return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id sh_msiof_match[] = {
- { .compatible = "renesas,sh-msiof", },
- { .compatible = "renesas,sh-mobile-msiof", },
+static struct platform_device_id spi_driver_ids[] = {
+ { "spi_sh_msiof", (kernel_ulong_t)&sh_data },
+ { "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data },
+ { "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data },
{},
};
-MODULE_DEVICE_TABLE(of, sh_msiof_match);
-#endif
+MODULE_DEVICE_TABLE(platform, spi_driver_ids);
static struct platform_driver sh_msiof_spi_drv = {
.probe = sh_msiof_spi_probe,
.remove = sh_msiof_spi_remove,
+ .id_table = spi_driver_ids,
.driver = {
.name = "spi_sh_msiof",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 38eb24df796c..8b44b71f5024 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -14,7 +14,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -109,7 +108,7 @@ static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
{
struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
- if (sp->info && sp->info->chip_select)
+ if (sp->info->chip_select)
(sp->info->chip_select)(sp->info, dev->chip_select, value);
}
@@ -131,6 +130,11 @@ static int sh_sci_spi_probe(struct platform_device *dev)
platform_set_drvdata(dev, sp);
sp->info = dev_get_platdata(&dev->dev);
+ if (!sp->info) {
+ dev_err(&dev->dev, "platform data is missing\n");
+ ret = -ENOENT;
+ goto err1;
+ }
/* setup spi bitbang adaptor */
sp->bitbang.master = master;
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index c120a70094f2..f6f2c7010177 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -171,7 +171,6 @@ static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
int remain = t->len;
int cur_len;
unsigned char *data;
- unsigned long tmp;
long ret;
if (t->len)
@@ -213,9 +212,7 @@ static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
}
if (list_is_last(&t->transfer_list, &mesg->transfers)) {
- tmp = spi_sh_read(ss, SPI_SH_CR1);
- tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
- spi_sh_write(ss, tmp, SPI_SH_CR1);
+ spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
ss->cr1 &= ~SPI_SH_TBE;
@@ -239,7 +236,6 @@ static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
int remain = t->len;
int cur_len;
unsigned char *data;
- unsigned long tmp;
long ret;
if (t->len > SPI_SH_MAX_BYTE)
@@ -247,9 +243,7 @@ static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
else
spi_sh_write(ss, t->len, SPI_SH_CR3);
- tmp = spi_sh_read(ss, SPI_SH_CR1);
- tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
- spi_sh_write(ss, tmp, SPI_SH_CR1);
+ spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
spi_sh_wait_write_buffer_empty(ss);
@@ -358,9 +352,6 @@ static int spi_sh_setup(struct spi_device *spi)
{
struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
pr_debug("%s: enter\n", __func__);
spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index ed5e501c4652..1a77ad52812f 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -22,7 +22,6 @@
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
-#include <linux/sirfsoc_dma.h>
#define DRIVER_NAME "sirfsoc_spi"
@@ -132,6 +131,8 @@
#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
+#define SIRFSOC_MAX_CMD_BYTES 4
+
struct sirfsoc_spi {
struct spi_bitbang bitbang;
struct completion rx_done;
@@ -162,6 +163,12 @@ struct sirfsoc_spi {
void *dummypage;
int word_width; /* in bytes */
+ /*
+ * if tx size is not more than 4 and rx size is NULL, use
+ * command model
+ */
+ bool tx_by_cmd;
+
int chipselect[0];
};
@@ -260,6 +267,12 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
writel(spi_stat, sspi->base + SIRFSOC_SPI_INT_STATUS);
+ if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
+ complete(&sspi->tx_done);
+ writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
+ return IRQ_HANDLED;
+ }
+
/* Error Conditions */
if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
spi_stat & SIRFSOC_SPI_TX_UFLOW) {
@@ -310,6 +323,34 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
+ /*
+ * fill tx_buf into command register and wait for its completion
+ */
+ if (sspi->tx_by_cmd) {
+ u32 cmd;
+ memcpy(&cmd, sspi->tx, t->len);
+
+ if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
+ cmd = cpu_to_be32(cmd) >>
+ ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
+ if (sspi->word_width == 2 && t->len == 4 &&
+ (!(spi->mode & SPI_LSB_FIRST)))
+ cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
+
+ writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
+ writel(SIRFSOC_SPI_FRM_END_INT_EN,
+ sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_CMD_TX_EN,
+ sspi->base + SIRFSOC_SPI_TX_RX_EN);
+
+ if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
+ dev_err(&spi->dev, "transfer timeout\n");
+ return 0;
+ }
+
+ return t->len;
+ }
+
if (sspi->left_tx_word == 1) {
writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
SIRFSOC_SPI_ENA_AUTO_CLR,
@@ -459,11 +500,6 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
sspi->rx_word = spi_sirfsoc_rx_word_u8;
sspi->tx_word = spi_sirfsoc_tx_word_u8;
- txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_BYTE;
- rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_BYTE;
- sspi->word_width = 1;
break;
case 12:
case 16:
@@ -471,26 +507,22 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
sspi->rx_word = spi_sirfsoc_rx_word_u16;
sspi->tx_word = spi_sirfsoc_tx_word_u16;
- txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_WORD;
- rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_WORD;
- sspi->word_width = 2;
break;
case 32:
regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
sspi->rx_word = spi_sirfsoc_rx_word_u32;
sspi->tx_word = spi_sirfsoc_tx_word_u32;
- txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_DWORD;
- rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- SIRFSOC_SPI_FIFO_WIDTH_DWORD;
- sspi->word_width = 4;
break;
default:
BUG();
}
+ sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
+ txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ sspi->word_width;
+ rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ sspi->word_width;
+
if (!(spi->mode & SPI_CS_HIGH))
regval |= SIRFSOC_SPI_CS_IDLE_STAT;
if (!(spi->mode & SPI_LSB_FIRST))
@@ -519,6 +551,14 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
+ if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
+ regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
+ SIRFSOC_SPI_CMD_MODE);
+ sspi->tx_by_cmd = true;
+ } else {
+ regval &= ~SIRFSOC_SPI_CMD_MODE;
+ sspi->tx_by_cmd = false;
+ }
writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
if (IS_DMA_VALID(t)) {
@@ -536,16 +576,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
static int spi_sirfsoc_setup(struct spi_device *spi)
{
- struct sirfsoc_spi *sspi;
-
if (!spi->max_speed_hz)
return -EINVAL;
- sspi = spi_master_get_devdata(spi->master);
-
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
return spi_sirfsoc_setup_transfer(spi, NULL);
}
@@ -555,8 +588,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
struct spi_master *master;
struct resource *mem_res;
int num_cs, cs_gpio, irq;
- u32 rx_dma_ch, tx_dma_ch;
- dma_cap_mask_t dma_cap_mask;
int i;
int ret;
@@ -567,20 +598,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
goto err_cs;
}
- ret = of_property_read_u32(pdev->dev.of_node,
- "sirf,spi-dma-rx-channel", &rx_dma_ch);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to get rx dma channel\n");
- goto err_cs;
- }
-
- ret = of_property_read_u32(pdev->dev.of_node,
- "sirf,spi-dma-tx-channel", &tx_dma_ch);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to get tx dma channel\n");
- goto err_cs;
- }
-
master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs);
if (!master) {
dev_err(&pdev->dev, "Unable to allocate SPI master\n");
@@ -644,18 +661,13 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
/* request DMA channels */
- dma_cap_zero(dma_cap_mask);
- dma_cap_set(DMA_INTERLEAVE, dma_cap_mask);
-
- sspi->rx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
- (void *)rx_dma_ch);
+ sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
if (!sspi->rx_chan) {
dev_err(&pdev->dev, "can not allocate rx dma channel\n");
ret = -ENODEV;
goto free_master;
}
- sspi->tx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
- (void *)tx_dma_ch);
+ sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
if (!sspi->tx_chan) {
dev_err(&pdev->dev, "can not allocate tx dma channel\n");
ret = -ENODEV;
@@ -731,11 +743,16 @@ static int spi_sirfsoc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int spi_sirfsoc_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
clk_disable(sspi->clk);
return 0;
@@ -752,15 +769,13 @@ static int spi_sirfsoc_resume(struct device *dev)
writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- return 0;
+ return spi_master_resume(master);
}
-
-static const struct dev_pm_ops spi_sirfsoc_pm_ops = {
- .suspend = spi_sirfsoc_suspend,
- .resume = spi_sirfsoc_resume,
-};
#endif
+static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
+ spi_sirfsoc_resume);
+
static const struct of_device_id spi_sirfsoc_of_match[] = {
{ .compatible = "sirf,prima2-spi", },
{ .compatible = "sirf,marco-spi", },
@@ -772,9 +787,7 @@ static struct platform_driver spi_sirfsoc_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &spi_sirfsoc_pm_ops,
-#endif
.of_match_table = spi_sirfsoc_of_match,
},
.probe = spi_sirfsoc_probe,
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
new file mode 100644
index 000000000000..d266a8702067
--- /dev/null
+++ b/drivers/spi/spi-sun4i.c
@@ -0,0 +1,478 @@
+/*
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+
+#include <linux/spi/spi.h>
+
+#define SUN4I_FIFO_DEPTH 64
+
+#define SUN4I_RXDATA_REG 0x00
+
+#define SUN4I_TXDATA_REG 0x04
+
+#define SUN4I_CTL_REG 0x08
+#define SUN4I_CTL_ENABLE BIT(0)
+#define SUN4I_CTL_MASTER BIT(1)
+#define SUN4I_CTL_CPHA BIT(2)
+#define SUN4I_CTL_CPOL BIT(3)
+#define SUN4I_CTL_CS_ACTIVE_LOW BIT(4)
+#define SUN4I_CTL_LMTF BIT(6)
+#define SUN4I_CTL_TF_RST BIT(8)
+#define SUN4I_CTL_RF_RST BIT(9)
+#define SUN4I_CTL_XCH BIT(10)
+#define SUN4I_CTL_CS_MASK 0x3000
+#define SUN4I_CTL_CS(cs) (((cs) << 12) & SUN4I_CTL_CS_MASK)
+#define SUN4I_CTL_DHB BIT(15)
+#define SUN4I_CTL_CS_MANUAL BIT(16)
+#define SUN4I_CTL_CS_LEVEL BIT(17)
+#define SUN4I_CTL_TP BIT(18)
+
+#define SUN4I_INT_CTL_REG 0x0c
+#define SUN4I_INT_CTL_TC BIT(16)
+
+#define SUN4I_INT_STA_REG 0x10
+
+#define SUN4I_DMA_CTL_REG 0x14
+
+#define SUN4I_WAIT_REG 0x18
+
+#define SUN4I_CLK_CTL_REG 0x1c
+#define SUN4I_CLK_CTL_CDR2_MASK 0xff
+#define SUN4I_CLK_CTL_CDR2(div) ((div) & SUN4I_CLK_CTL_CDR2_MASK)
+#define SUN4I_CLK_CTL_CDR1_MASK 0xf
+#define SUN4I_CLK_CTL_CDR1(div) (((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN4I_CLK_CTL_DRS BIT(12)
+
+#define SUN4I_BURST_CNT_REG 0x20
+#define SUN4I_BURST_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN4I_XMIT_CNT_REG 0x24
+#define SUN4I_XMIT_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN4I_FIFO_STA_REG 0x28
+#define SUN4I_FIFO_STA_RF_CNT_MASK 0x7f
+#define SUN4I_FIFO_STA_RF_CNT_BITS 0
+#define SUN4I_FIFO_STA_TF_CNT_MASK 0x7f
+#define SUN4I_FIFO_STA_TF_CNT_BITS 16
+
+struct sun4i_spi {
+ struct spi_master *master;
+ void __iomem *base_addr;
+ struct clk *hclk;
+ struct clk *mclk;
+
+ struct completion done;
+
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+};
+
+static inline u32 sun4i_spi_read(struct sun4i_spi *sspi, u32 reg)
+{
+ return readl(sspi->base_addr + reg);
+}
+
+static inline void sun4i_spi_write(struct sun4i_spi *sspi, u32 reg, u32 value)
+{
+ writel(value, sspi->base_addr + reg);
+}
+
+static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
+{
+ u32 reg, cnt;
+ u8 byte;
+
+ /* See how much data is available */
+ reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG);
+ reg &= SUN4I_FIFO_STA_RF_CNT_MASK;
+ cnt = reg >> SUN4I_FIFO_STA_RF_CNT_BITS;
+
+ if (len > cnt)
+ len = cnt;
+
+ while (len--) {
+ byte = readb(sspi->base_addr + SUN4I_RXDATA_REG);
+ if (sspi->rx_buf)
+ *sspi->rx_buf++ = byte;
+ }
+}
+
+static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len)
+{
+ u8 byte;
+
+ if (len > sspi->len)
+ len = sspi->len;
+
+ while (len--) {
+ byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
+ writeb(byte, sspi->base_addr + SUN4I_TXDATA_REG);
+ sspi->len--;
+ }
+}
+
+static void sun4i_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct sun4i_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+
+ reg &= ~SUN4I_CTL_CS_MASK;
+ reg |= SUN4I_CTL_CS(spi->chip_select);
+
+ if (enable)
+ reg |= SUN4I_CTL_CS_LEVEL;
+ else
+ reg &= ~SUN4I_CTL_CS_LEVEL;
+
+ /*
+ * Even though this looks irrelevant since we are supposed to
+ * be controlling the chip select manually, this bit also
+ * controls the levels of the chip select for inactive
+ * devices.
+ *
+ * If we don't set it, the chip select level will go low by
+ * default when the device is idle, which is not really
+ * expected in the common case where the chip select is active
+ * low.
+ */
+ if (spi->mode & SPI_CS_HIGH)
+ reg &= ~SUN4I_CTL_CS_ACTIVE_LOW;
+ else
+ reg |= SUN4I_CTL_CS_ACTIVE_LOW;
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
+}
+
+static int sun4i_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ unsigned int mclk_rate, div, timeout;
+ unsigned int tx_len = 0;
+ int ret = 0;
+ u32 reg;
+
+ /* We don't support transfer larger than the FIFO */
+ if (tfr->len > SUN4I_FIFO_DEPTH)
+ return -EINVAL;
+
+ reinit_completion(&sspi->done);
+ sspi->tx_buf = tfr->tx_buf;
+ sspi->rx_buf = tfr->rx_buf;
+ sspi->len = tfr->len;
+
+ /* Clear pending interrupts */
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, ~0);
+
+
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+
+ /* Reset FIFOs */
+ sun4i_spi_write(sspi, SUN4I_CTL_REG,
+ reg | SUN4I_CTL_RF_RST | SUN4I_CTL_TF_RST);
+
+ /*
+ * Setup the transfer control register: Chip Select,
+ * polarities, etc.
+ */
+ if (spi->mode & SPI_CPOL)
+ reg |= SUN4I_CTL_CPOL;
+ else
+ reg &= ~SUN4I_CTL_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= SUN4I_CTL_CPHA;
+ else
+ reg &= ~SUN4I_CTL_CPHA;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= SUN4I_CTL_LMTF;
+ else
+ reg &= ~SUN4I_CTL_LMTF;
+
+
+ /*
+ * If it's a TX only transfer, we don't want to fill the RX
+ * FIFO with bogus data
+ */
+ if (sspi->rx_buf)
+ reg &= ~SUN4I_CTL_DHB;
+ else
+ reg |= SUN4I_CTL_DHB;
+
+ /* We want to control the chip select manually */
+ reg |= SUN4I_CTL_CS_MANUAL;
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
+
+ /* Ensure that we have a parent clock fast enough */
+ mclk_rate = clk_get_rate(sspi->mclk);
+ if (mclk_rate < (2 * spi->max_speed_hz)) {
+ clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz);
+ mclk_rate = clk_get_rate(sspi->mclk);
+ }
+
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ (cdr + 1))
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Wether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+ div = mclk_rate / (2 * spi->max_speed_hz);
+ if (div <= (SUN4I_CLK_CTL_CDR2_MASK + 1)) {
+ if (div > 0)
+ div--;
+
+ reg = SUN4I_CLK_CTL_CDR2(div) | SUN4I_CLK_CTL_DRS;
+ } else {
+ div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz);
+ reg = SUN4I_CLK_CTL_CDR1(div);
+ }
+
+ sun4i_spi_write(sspi, SUN4I_CLK_CTL_REG, reg);
+
+ /* Setup the transfer now... */
+ if (sspi->tx_buf)
+ tx_len = tfr->len;
+
+ /* Setup the counters */
+ sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
+ sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
+
+ /* Fill the TX FIFO */
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+ /* Enable the interrupts */
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
+
+ /* Start the transfer */
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+
+ timeout = wait_for_completion_timeout(&sspi->done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+out:
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0);
+
+ return ret;
+}
+
+static irqreturn_t sun4i_spi_handler(int irq, void *dev_id)
+{
+ struct sun4i_spi *sspi = dev_id;
+ u32 status = sun4i_spi_read(sspi, SUN4I_INT_STA_REG);
+
+ /* Transfer complete */
+ if (status & SUN4I_INT_CTL_TC) {
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TC);
+ complete(&sspi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sun4i_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sspi->hclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable AHB clock\n");
+ goto out;
+ }
+
+ ret = clk_prepare_enable(sspi->mclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable module clock\n");
+ goto err;
+ }
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG,
+ SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(sspi->hclk);
+out:
+ return ret;
+}
+
+static int sun4i_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(sspi->mclk);
+ clk_disable_unprepare(sspi->hclk);
+
+ return 0;
+}
+
+static int sun4i_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sun4i_spi *sspi;
+ struct resource *res;
+ int ret = 0, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sspi->base_addr)) {
+ ret = PTR_ERR(sspi->base_addr);
+ goto err_free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No spi IRQ specified\n");
+ ret = -ENXIO;
+ goto err_free_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sun4i_spi_handler,
+ 0, "sun4i-spi", sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_free_master;
+ }
+
+ sspi->master = master;
+ master->set_cs = sun4i_spi_set_cs;
+ master->transfer_one = sun4i_spi_transfer_one;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sspi->hclk)) {
+ dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+ ret = PTR_ERR(sspi->hclk);
+ goto err_free_master;
+ }
+
+ sspi->mclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(sspi->mclk)) {
+ dev_err(&pdev->dev, "Unable to acquire module clock\n");
+ ret = PTR_ERR(sspi->mclk);
+ goto err_free_master;
+ }
+
+ init_completion(&sspi->done);
+
+ /*
+ * This wake-up/shutdown pattern is to be able to have the
+ * device woken up, even if runtime_pm is disabled
+ */
+ ret = sun4i_spi_runtime_resume(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't resume the device\n");
+ goto err_free_master;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ goto err_pm_disable;
+ }
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ sun4i_spi_runtime_suspend(&pdev->dev);
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int sun4i_spi_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_spi_match[] = {
+ { .compatible = "allwinner,sun4i-a10-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun4i_spi_match);
+
+static const struct dev_pm_ops sun4i_spi_pm_ops = {
+ .runtime_resume = sun4i_spi_runtime_resume,
+ .runtime_suspend = sun4i_spi_runtime_suspend,
+};
+
+static struct platform_driver sun4i_spi_driver = {
+ .probe = sun4i_spi_probe,
+ .remove = sun4i_spi_remove,
+ .driver = {
+ .name = "sun4i-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = sun4i_spi_match,
+ .pm = &sun4i_spi_pm_ops,
+ },
+};
+module_platform_driver(sun4i_spi_driver);
+
+MODULE_AUTHOR("Pan Nan <pannan@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A1X/A20 SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
new file mode 100644
index 000000000000..b3e3498a7e6f
--- /dev/null
+++ b/drivers/spi/spi-sun6i.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/workqueue.h>
+
+#include <linux/spi/spi.h>
+
+#define SUN6I_FIFO_DEPTH 128
+
+#define SUN6I_GBL_CTL_REG 0x04
+#define SUN6I_GBL_CTL_BUS_ENABLE BIT(0)
+#define SUN6I_GBL_CTL_MASTER BIT(1)
+#define SUN6I_GBL_CTL_TP BIT(7)
+#define SUN6I_GBL_CTL_RST BIT(31)
+
+#define SUN6I_TFR_CTL_REG 0x08
+#define SUN6I_TFR_CTL_CPHA BIT(0)
+#define SUN6I_TFR_CTL_CPOL BIT(1)
+#define SUN6I_TFR_CTL_SPOL BIT(2)
+#define SUN6I_TFR_CTL_CS_MASK 0x30
+#define SUN6I_TFR_CTL_CS(cs) (((cs) << 4) & SUN6I_TFR_CTL_CS_MASK)
+#define SUN6I_TFR_CTL_CS_MANUAL BIT(6)
+#define SUN6I_TFR_CTL_CS_LEVEL BIT(7)
+#define SUN6I_TFR_CTL_DHB BIT(8)
+#define SUN6I_TFR_CTL_FBS BIT(12)
+#define SUN6I_TFR_CTL_XCH BIT(31)
+
+#define SUN6I_INT_CTL_REG 0x10
+#define SUN6I_INT_CTL_RF_OVF BIT(8)
+#define SUN6I_INT_CTL_TC BIT(12)
+
+#define SUN6I_INT_STA_REG 0x14
+
+#define SUN6I_FIFO_CTL_REG 0x18
+#define SUN6I_FIFO_CTL_RF_RST BIT(15)
+#define SUN6I_FIFO_CTL_TF_RST BIT(31)
+
+#define SUN6I_FIFO_STA_REG 0x1c
+#define SUN6I_FIFO_STA_RF_CNT_MASK 0x7f
+#define SUN6I_FIFO_STA_RF_CNT_BITS 0
+#define SUN6I_FIFO_STA_TF_CNT_MASK 0x7f
+#define SUN6I_FIFO_STA_TF_CNT_BITS 16
+
+#define SUN6I_CLK_CTL_REG 0x24
+#define SUN6I_CLK_CTL_CDR2_MASK 0xff
+#define SUN6I_CLK_CTL_CDR2(div) (((div) & SUN6I_CLK_CTL_CDR2_MASK) << 0)
+#define SUN6I_CLK_CTL_CDR1_MASK 0xf
+#define SUN6I_CLK_CTL_CDR1(div) (((div) & SUN6I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN6I_CLK_CTL_DRS BIT(12)
+
+#define SUN6I_BURST_CNT_REG 0x30
+#define SUN6I_BURST_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_XMIT_CNT_REG 0x34
+#define SUN6I_XMIT_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_BURST_CTL_CNT_REG 0x38
+#define SUN6I_BURST_CTL_CNT_STC(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_TXDATA_REG 0x200
+#define SUN6I_RXDATA_REG 0x300
+
+struct sun6i_spi {
+ struct spi_master *master;
+ void __iomem *base_addr;
+ struct clk *hclk;
+ struct clk *mclk;
+ struct reset_control *rstc;
+
+ struct completion done;
+
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+};
+
+static inline u32 sun6i_spi_read(struct sun6i_spi *sspi, u32 reg)
+{
+ return readl(sspi->base_addr + reg);
+}
+
+static inline void sun6i_spi_write(struct sun6i_spi *sspi, u32 reg, u32 value)
+{
+ writel(value, sspi->base_addr + reg);
+}
+
+static inline void sun6i_spi_drain_fifo(struct sun6i_spi *sspi, int len)
+{
+ u32 reg, cnt;
+ u8 byte;
+
+ /* See how much data is available */
+ reg = sun6i_spi_read(sspi, SUN6I_FIFO_STA_REG);
+ reg &= SUN6I_FIFO_STA_RF_CNT_MASK;
+ cnt = reg >> SUN6I_FIFO_STA_RF_CNT_BITS;
+
+ if (len > cnt)
+ len = cnt;
+
+ while (len--) {
+ byte = readb(sspi->base_addr + SUN6I_RXDATA_REG);
+ if (sspi->rx_buf)
+ *sspi->rx_buf++ = byte;
+ }
+}
+
+static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi, int len)
+{
+ u8 byte;
+
+ if (len > sspi->len)
+ len = sspi->len;
+
+ while (len--) {
+ byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
+ writeb(byte, sspi->base_addr + SUN6I_TXDATA_REG);
+ sspi->len--;
+ }
+}
+
+static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct sun6i_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ reg &= ~SUN6I_TFR_CTL_CS_MASK;
+ reg |= SUN6I_TFR_CTL_CS(spi->chip_select);
+
+ if (enable)
+ reg |= SUN6I_TFR_CTL_CS_LEVEL;
+ else
+ reg &= ~SUN6I_TFR_CTL_CS_LEVEL;
+
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
+}
+
+
+static int sun6i_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ unsigned int mclk_rate, div, timeout;
+ unsigned int tx_len = 0;
+ int ret = 0;
+ u32 reg;
+
+ /* We don't support transfer larger than the FIFO */
+ if (tfr->len > SUN6I_FIFO_DEPTH)
+ return -EINVAL;
+
+ reinit_completion(&sspi->done);
+ sspi->tx_buf = tfr->tx_buf;
+ sspi->rx_buf = tfr->rx_buf;
+ sspi->len = tfr->len;
+
+ /* Clear pending interrupts */
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, ~0);
+
+ /* Reset FIFO */
+ sun6i_spi_write(sspi, SUN6I_FIFO_CTL_REG,
+ SUN6I_FIFO_CTL_RF_RST | SUN6I_FIFO_CTL_TF_RST);
+
+ /*
+ * Setup the transfer control register: Chip Select,
+ * polarities, etc.
+ */
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+
+ if (spi->mode & SPI_CPOL)
+ reg |= SUN6I_TFR_CTL_CPOL;
+ else
+ reg &= ~SUN6I_TFR_CTL_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= SUN6I_TFR_CTL_CPHA;
+ else
+ reg &= ~SUN6I_TFR_CTL_CPHA;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= SUN6I_TFR_CTL_FBS;
+ else
+ reg &= ~SUN6I_TFR_CTL_FBS;
+
+ /*
+ * If it's a TX only transfer, we don't want to fill the RX
+ * FIFO with bogus data
+ */
+ if (sspi->rx_buf)
+ reg &= ~SUN6I_TFR_CTL_DHB;
+ else
+ reg |= SUN6I_TFR_CTL_DHB;
+
+ /* We want to control the chip select manually */
+ reg |= SUN6I_TFR_CTL_CS_MANUAL;
+
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
+
+ /* Ensure that we have a parent clock fast enough */
+ mclk_rate = clk_get_rate(sspi->mclk);
+ if (mclk_rate < (2 * spi->max_speed_hz)) {
+ clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz);
+ mclk_rate = clk_get_rate(sspi->mclk);
+ }
+
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ cdr)
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Wether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+ div = mclk_rate / (2 * spi->max_speed_hz);
+ if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
+ if (div > 0)
+ div--;
+
+ reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
+ } else {
+ div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz);
+ reg = SUN6I_CLK_CTL_CDR1(div);
+ }
+
+ sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
+
+ /* Setup the transfer now... */
+ if (sspi->tx_buf)
+ tx_len = tfr->len;
+
+ /* Setup the counters */
+ sun6i_spi_write(sspi, SUN6I_BURST_CNT_REG, SUN6I_BURST_CNT(tfr->len));
+ sun6i_spi_write(sspi, SUN6I_XMIT_CNT_REG, SUN6I_XMIT_CNT(tx_len));
+ sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG,
+ SUN6I_BURST_CTL_CNT_STC(tx_len));
+
+ /* Fill the TX FIFO */
+ sun6i_spi_fill_fifo(sspi, SUN6I_FIFO_DEPTH);
+
+ /* Enable the interrupts */
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, SUN6I_INT_CTL_TC);
+
+ /* Start the transfer */
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+
+ timeout = wait_for_completion_timeout(&sspi->done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ sun6i_spi_drain_fifo(sspi, SUN6I_FIFO_DEPTH);
+
+out:
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
+
+ return ret;
+}
+
+static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
+{
+ struct sun6i_spi *sspi = dev_id;
+ u32 status = sun6i_spi_read(sspi, SUN6I_INT_STA_REG);
+
+ /* Transfer complete */
+ if (status & SUN6I_INT_CTL_TC) {
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
+ complete(&sspi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sun6i_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sspi->hclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable AHB clock\n");
+ goto out;
+ }
+
+ ret = clk_prepare_enable(sspi->mclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable module clock\n");
+ goto err;
+ }
+
+ ret = reset_control_deassert(sspi->rstc);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert the device from reset\n");
+ goto err2;
+ }
+
+ sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG,
+ SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
+
+ return 0;
+
+err2:
+ clk_disable_unprepare(sspi->mclk);
+err:
+ clk_disable_unprepare(sspi->hclk);
+out:
+ return ret;
+}
+
+static int sun6i_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+
+ reset_control_assert(sspi->rstc);
+ clk_disable_unprepare(sspi->mclk);
+ clk_disable_unprepare(sspi->hclk);
+
+ return 0;
+}
+
+static int sun6i_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sun6i_spi *sspi;
+ struct resource *res;
+ int ret = 0, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sspi->base_addr)) {
+ ret = PTR_ERR(sspi->base_addr);
+ goto err_free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No spi IRQ specified\n");
+ ret = -ENXIO;
+ goto err_free_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sun6i_spi_handler,
+ 0, "sun6i-spi", sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_free_master;
+ }
+
+ sspi->master = master;
+ master->set_cs = sun6i_spi_set_cs;
+ master->transfer_one = sun6i_spi_transfer_one;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sspi->hclk)) {
+ dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+ ret = PTR_ERR(sspi->hclk);
+ goto err_free_master;
+ }
+
+ sspi->mclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(sspi->mclk)) {
+ dev_err(&pdev->dev, "Unable to acquire module clock\n");
+ ret = PTR_ERR(sspi->mclk);
+ goto err_free_master;
+ }
+
+ init_completion(&sspi->done);
+
+ sspi->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(sspi->rstc)) {
+ dev_err(&pdev->dev, "Couldn't get reset controller\n");
+ ret = PTR_ERR(sspi->rstc);
+ goto err_free_master;
+ }
+
+ /*
+ * This wake-up/shutdown pattern is to be able to have the
+ * device woken up, even if runtime_pm is disabled
+ */
+ ret = sun6i_spi_runtime_resume(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't resume the device\n");
+ goto err_free_master;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ goto err_pm_disable;
+ }
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ sun6i_spi_runtime_suspend(&pdev->dev);
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int sun6i_spi_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_spi_match[] = {
+ { .compatible = "allwinner,sun6i-a31-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun6i_spi_match);
+
+static const struct dev_pm_ops sun6i_spi_pm_ops = {
+ .runtime_resume = sun6i_spi_runtime_resume,
+ .runtime_suspend = sun6i_spi_runtime_suspend,
+};
+
+static struct platform_driver sun6i_spi_driver = {
+ .probe = sun6i_spi_probe,
+ .remove = sun6i_spi_remove,
+ .driver = {
+ .name = "sun6i-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = sun6i_spi_match,
+ .pm = &sun6i_spi_pm_ops,
+ },
+};
+module_platform_driver(sun6i_spi_driver);
+
+MODULE_AUTHOR("Pan Nan <pannan@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index aaecfb3ebf58..400649595505 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -17,14 +17,12 @@
*/
#include <linux/clk.h>
-#include <linux/clk/tegra.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -34,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/reset.h>
#include <linux/spi/spi.h>
#define SPI_COMMAND1 0x000
@@ -54,11 +53,8 @@
#define SPI_CS_SS_VAL (1 << 20)
#define SPI_CS_SW_HW (1 << 21)
/* SPI_CS_POL_INACTIVE bits are default high */
-#define SPI_CS_POL_INACTIVE 22
-#define SPI_CS_POL_INACTIVE_0 (1 << 22)
-#define SPI_CS_POL_INACTIVE_1 (1 << 23)
-#define SPI_CS_POL_INACTIVE_2 (1 << 24)
-#define SPI_CS_POL_INACTIVE_3 (1 << 25)
+ /* n from 0 to 3 */
+#define SPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
#define SPI_CS_POL_INACTIVE_MASK (0xF << 22)
#define SPI_CS_SEL_0 (0 << 26)
@@ -165,26 +161,21 @@
#define MAX_HOLD_CYCLES 16
#define SPI_DEFAULT_SPEED 25000000
-#define MAX_CHIP_SELECT 4
-#define SPI_FIFO_DEPTH 64
-
struct tegra_spi_data {
struct device *dev;
struct spi_master *master;
spinlock_t lock;
struct clk *clk;
+ struct reset_control *rst;
void __iomem *base;
phys_addr_t phys;
unsigned irq;
- int dma_req_sel;
- u32 spi_max_frequency;
u32 cur_speed;
struct spi_device *cur_spi;
struct spi_device *cs_control;
unsigned cur_pos;
- unsigned cur_len;
unsigned words_per_32bit;
unsigned bytes_per_word;
unsigned curr_dma_words;
@@ -204,12 +195,10 @@ struct tegra_spi_data {
u32 rx_status;
u32 status_reg;
bool is_packed;
- unsigned long packed_size;
u32 command1_reg;
u32 dma_control_reg;
u32 def_command1_reg;
- u32 spi_cs_timing;
struct completion xfer_completion;
struct spi_transfer *curr_xfer;
@@ -227,14 +216,14 @@ struct tegra_spi_data {
static int tegra_spi_runtime_suspend(struct device *dev);
static int tegra_spi_runtime_resume(struct device *dev);
-static inline unsigned long tegra_spi_readl(struct tegra_spi_data *tspi,
+static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi,
unsigned long reg)
{
return readl(tspi->base + reg);
}
static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
- unsigned long val, unsigned long reg)
+ u32 val, unsigned long reg)
{
writel(val, tspi->base + reg);
@@ -245,7 +234,7 @@ static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
{
- unsigned long val;
+ u32 val;
/* Write 1 to clear status register */
val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
@@ -296,10 +285,9 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
{
unsigned nbytes;
unsigned tx_empty_count;
- unsigned long fifo_status;
+ u32 fifo_status;
unsigned max_n_32bit;
unsigned i, count;
- unsigned long x;
unsigned int written_words;
unsigned fifo_words_left;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
@@ -313,9 +301,9 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
nbytes = written_words * tspi->bytes_per_word;
max_n_32bit = DIV_ROUND_UP(nbytes, 4);
for (count = 0; count < max_n_32bit; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; (i < 4) && nbytes; i++, nbytes--)
- x |= (*tx_buf++) << (i*8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
} else {
@@ -323,10 +311,10 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
written_words = max_n_32bit;
nbytes = written_words * tspi->bytes_per_word;
for (count = 0; count < max_n_32bit; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; nbytes && (i < tspi->bytes_per_word);
i++, nbytes--)
- x |= ((*tx_buf++) << i*8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
}
@@ -338,9 +326,8 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
unsigned rx_full_count;
- unsigned long fifo_status;
+ u32 fifo_status;
unsigned i, count;
- unsigned long x;
unsigned int read_words = 0;
unsigned len;
u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
@@ -350,20 +337,16 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
for (count = 0; count < rx_full_count; count++) {
- x = tegra_spi_readl(tspi, SPI_RX_FIFO);
+ u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
read_words += tspi->curr_dma_words;
} else {
- unsigned int rx_mask;
- unsigned int bits_per_word = t->bits_per_word;
-
- rx_mask = (1 << bits_per_word) - 1;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
for (count = 0; count < rx_full_count; count++) {
- x = tegra_spi_readl(tspi, SPI_RX_FIFO);
- x &= rx_mask;
+ u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@@ -376,27 +359,24 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
- unsigned len;
-
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
tspi->dma_buf_size, DMA_TO_DEVICE);
if (tspi->is_packed) {
- len = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
} else {
unsigned int i;
unsigned int count;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
- unsigned int x;
for (count = 0; count < tspi->curr_dma_words; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; consume && (i < tspi->bytes_per_word);
i++, consume--)
- x |= ((*tx_buf++) << i * 8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tspi->tx_dma_buf[count] = x;
}
}
@@ -410,27 +390,21 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
- unsigned len;
-
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
tspi->dma_buf_size, DMA_FROM_DEVICE);
if (tspi->is_packed) {
- len = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
} else {
unsigned int i;
unsigned int count;
unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
- unsigned int x;
- unsigned int rx_mask;
- unsigned int bits_per_word = t->bits_per_word;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
- rx_mask = (1 << bits_per_word) - 1;
for (count = 0; count < tspi->curr_dma_words; count++) {
- x = tspi->rx_dma_buf[count];
- x &= rx_mask;
+ u32 x = tspi->rx_dma_buf[count] & rx_mask;
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@@ -490,16 +464,16 @@ static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
static int tegra_spi_start_dma_based_transfer(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
- unsigned long val;
+ u32 val;
unsigned int len;
int ret = 0;
- unsigned long status;
+ u32 status;
/* Make sure that Rx and Tx fifo are empty */
status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
- dev_err(tspi->dev,
- "Rx/Tx fifo are not empty status 0x%08lx\n", status);
+ dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
+ (unsigned)status);
return -EIO;
}
@@ -564,7 +538,7 @@ static int tegra_spi_start_dma_based_transfer(
static int tegra_spi_start_cpu_based_transfer(
struct tegra_spi_data *tspi, struct spi_transfer *t)
{
- unsigned long val;
+ u32 val;
unsigned cur_words;
if (tspi->cur_direction & DATA_DIR_TX)
@@ -600,15 +574,15 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
dma_addr_t dma_phys;
int ret;
struct dma_slave_config dma_sconfig;
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_chan = dma_request_channel(mask, NULL, NULL);
- if (!dma_chan) {
- dev_err(tspi->dev,
- "Dma channel is not available, will try later\n");
- return -EPROBE_DEFER;
+
+ dma_chan = dma_request_slave_channel_reason(tspi->dev,
+ dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(tspi->dev,
+ "Dma channel is not available: %d\n", ret);
+ return ret;
}
dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
@@ -619,7 +593,6 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
return -ENOMEM;
}
- dma_sconfig.slave_id = tspi->dma_req_sel;
if (dma_to_memory) {
dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -677,13 +650,13 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
dma_release_channel(dma_chan);
}
-static unsigned long tegra_spi_setup_transfer_one(struct spi_device *spi,
+static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
struct spi_transfer *t, bool is_first_of_msg)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
u32 speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
- unsigned long command1;
+ u32 command1;
int req_mode;
if (speed != tspi->cur_speed) {
@@ -738,7 +711,7 @@ static unsigned long tegra_spi_setup_transfer_one(struct spi_device *spi,
}
static int tegra_spi_start_transfer_one(struct spi_device *spi,
- struct spi_transfer *t, unsigned long command1)
+ struct spi_transfer *t, u32 command1)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
unsigned total_fifo_words;
@@ -763,8 +736,8 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
tegra_spi_writel(tspi, command1, SPI_COMMAND1);
tspi->command1_reg = command1;
- dev_dbg(tspi->dev, "The def 0x%x and written 0x%lx\n",
- tspi->def_command1_reg, command1);
+ dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
+ tspi->def_command1_reg, (unsigned)command1);
if (total_fifo_words > SPI_FIFO_DEPTH)
ret = tegra_spi_start_dma_based_transfer(tspi, t);
@@ -776,15 +749,9 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
static int tegra_spi_setup(struct spi_device *spi)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
- unsigned long val;
+ u32 val;
unsigned long flags;
int ret;
- unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
- SPI_CS_POL_INACTIVE_0,
- SPI_CS_POL_INACTIVE_1,
- SPI_CS_POL_INACTIVE_2,
- SPI_CS_POL_INACTIVE_3,
- };
dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
spi->bits_per_word,
@@ -792,11 +759,6 @@ static int tegra_spi_setup(struct spi_device *spi)
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
- BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
-
- /* Set speed to the spi max fequency if spi device has not set */
- spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
-
ret = pm_runtime_get_sync(tspi->dev);
if (ret < 0) {
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
@@ -806,9 +768,9 @@ static int tegra_spi_setup(struct spi_device *spi)
spin_lock_irqsave(&tspi->lock, flags);
val = tspi->def_command1_reg;
if (spi->mode & SPI_CS_HIGH)
- val &= ~cs_pol_bit[spi->chip_select];
+ val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
else
- val |= cs_pol_bit[spi->chip_select];
+ val |= SPI_CS_POL_INACTIVE(spi->chip_select);
tspi->def_command1_reg = val;
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
spin_unlock_irqrestore(&tspi->lock, flags);
@@ -842,7 +804,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
msg->actual_length = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- unsigned long cmd1;
+ u32 cmd1;
reinit_completion(&tspi->xfer_completion);
@@ -884,8 +846,8 @@ complete_xfer:
SPI_COMMAND1);
tegra_spi_transfer_delay(xfer->delay_usecs);
goto exit;
- } else if (msg->transfers.prev == &xfer->transfer_list) {
- /* This is the last transfer in message */
+ } else if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
if (xfer->cs_change)
tspi->cs_control = spi;
else {
@@ -918,9 +880,9 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
tspi->status_reg);
dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
- tegra_periph_reset_assert(tspi->clk);
+ reset_control_assert(tspi->rst);
udelay(2);
- tegra_periph_reset_deassert(tspi->clk);
+ reset_control_deassert(tspi->rst);
complete(&tspi->xfer_completion);
goto exit;
}
@@ -990,9 +952,9 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
tspi->status_reg);
dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
tspi->command1_reg, tspi->dma_control_reg);
- tegra_periph_reset_assert(tspi->clk);
+ reset_control_assert(tspi->rst);
udelay(2);
- tegra_periph_reset_deassert(tspi->clk);
+ reset_control_deassert(tspi->rst);
complete(&tspi->xfer_completion);
spin_unlock_irqrestore(&tspi->lock, flags);
return IRQ_HANDLED;
@@ -1050,21 +1012,6 @@ static irqreturn_t tegra_spi_isr(int irq, void *context_data)
return IRQ_WAKE_THREAD;
}
-static void tegra_spi_parse_dt(struct platform_device *pdev,
- struct tegra_spi_data *tspi)
-{
- struct device_node *np = pdev->dev.of_node;
- u32 of_dma[2];
-
- if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
- of_dma, 2) >= 0)
- tspi->dma_req_sel = of_dma[1];
-
- if (of_property_read_u32(np, "spi-max-frequency",
- &tspi->spi_max_frequency))
- tspi->spi_max_frequency = 25000000; /* 25MHz */
-}
-
static struct of_device_id tegra_spi_of_match[] = {
{ .compatible = "nvidia,tegra114-spi", },
{}
@@ -1086,15 +1033,15 @@ static int tegra_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
tspi = spi_master_get_devdata(master);
- /* Parse DT */
- tegra_spi_parse_dt(pdev, tspi);
+ if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tegra_spi_setup;
master->transfer_one_message = tegra_spi_transfer_one_message;
master->num_chipselect = MAX_CHIP_SELECT;
- master->bus_num = -1;
master->auto_runtime_pm = true;
tspi->master = master;
@@ -1127,25 +1074,25 @@ static int tegra_spi_probe(struct platform_device *pdev)
goto exit_free_irq;
}
+ tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tspi->rst);
+ goto exit_free_irq;
+ }
+
tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
- if (tspi->dma_req_sel) {
- ret = tegra_spi_init_dma_param(tspi, true);
- if (ret < 0) {
- dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret);
- goto exit_free_irq;
- }
-
- ret = tegra_spi_init_dma_param(tspi, false);
- if (ret < 0) {
- dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret);
- goto exit_rx_dma_free;
- }
- tspi->max_buf_size = tspi->dma_buf_size;
- init_completion(&tspi->tx_dma_complete);
- init_completion(&tspi->rx_dma_complete);
- }
+ ret = tegra_spi_init_dma_param(tspi, true);
+ if (ret < 0)
+ goto exit_free_irq;
+ ret = tegra_spi_init_dma_param(tspi, false);
+ if (ret < 0)
+ goto exit_rx_dma_free;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
init_completion(&tspi->xfer_completion);
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 4dc8e8129459..47869ea636e1 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -22,7 +22,6 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -32,8 +31,8 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/reset.h>
#include <linux/spi/spi.h>
-#include <linux/clk/tegra.h>
#define SPI_COMMAND 0x000
#define SPI_GO BIT(30)
@@ -118,9 +117,9 @@ struct tegra_sflash_data {
spinlock_t lock;
struct clk *clk;
+ struct reset_control *rst;
void __iomem *base;
unsigned irq;
- u32 spi_max_frequency;
u32 cur_speed;
struct spi_device *cur_spi;
@@ -148,14 +147,14 @@ struct tegra_sflash_data {
static int tegra_sflash_runtime_suspend(struct device *dev);
static int tegra_sflash_runtime_resume(struct device *dev);
-static inline unsigned long tegra_sflash_readl(struct tegra_sflash_data *tsd,
+static inline u32 tegra_sflash_readl(struct tegra_sflash_data *tsd,
unsigned long reg)
{
return readl(tsd->base + reg);
}
static inline void tegra_sflash_writel(struct tegra_sflash_data *tsd,
- unsigned long val, unsigned long reg)
+ u32 val, unsigned long reg)
{
writel(val, tsd->base + reg);
}
@@ -185,7 +184,7 @@ static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
unsigned nbytes;
- unsigned long status;
+ u32 status;
unsigned max_n_32bit = tsd->curr_xfer_words;
u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos;
@@ -196,11 +195,11 @@ static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
status = tegra_sflash_readl(tsd, SPI_STATUS);
while (!(status & SPI_TXF_FULL)) {
int i;
- unsigned int x = 0;
+ u32 x = 0;
for (i = 0; nbytes && (i < tsd->bytes_per_word);
i++, nbytes--)
- x |= ((*tx_buf++) << i*8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tegra_sflash_writel(tsd, x, SPI_TX_FIFO);
if (!nbytes)
break;
@@ -214,16 +213,14 @@ static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
- unsigned long status;
+ u32 status;
unsigned int read_words = 0;
u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos;
status = tegra_sflash_readl(tsd, SPI_STATUS);
while (!(status & SPI_RXF_EMPTY)) {
int i;
- unsigned long x;
-
- x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
+ u32 x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
for (i = 0; (i < tsd->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
read_words++;
@@ -236,7 +233,7 @@ static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
static int tegra_sflash_start_cpu_based_transfer(
struct tegra_sflash_data *tsd, struct spi_transfer *t)
{
- unsigned long val = 0;
+ u32 val = 0;
unsigned cur_words;
if (tsd->cur_direction & DATA_DIR_TX)
@@ -266,7 +263,7 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
{
struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
u32 speed;
- unsigned long command;
+ u32 command;
speed = t->speed_hz;
if (speed != tsd->cur_speed) {
@@ -313,16 +310,7 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
tegra_sflash_writel(tsd, command, SPI_COMMAND);
tsd->command_reg = command;
- return tegra_sflash_start_cpu_based_transfer(tsd, t);
-}
-
-static int tegra_sflash_setup(struct spi_device *spi)
-{
- struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
-
- /* Set speed to the spi max fequency if spi device has not set */
- spi->max_speed_hz = spi->max_speed_hz ? : tsd->spi_max_frequency;
- return 0;
+ return tegra_sflash_start_cpu_based_transfer(tsd, t);
}
static int tegra_sflash_transfer_one_message(struct spi_master *master,
@@ -389,9 +377,9 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd)
dev_err(tsd->dev,
"CpuXfer 0x%08x:0x%08x\n", tsd->command_reg,
tsd->dma_control_reg);
- tegra_periph_reset_assert(tsd->clk);
+ reset_control_assert(tsd->rst);
udelay(2);
- tegra_periph_reset_deassert(tsd->clk);
+ reset_control_deassert(tsd->rst);
complete(&tsd->xfer_completion);
goto exit;
}
@@ -431,15 +419,6 @@ static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
return handle_cpu_based_xfer(tsd);
}
-static void tegra_sflash_parse_dt(struct tegra_sflash_data *tsd)
-{
- struct device_node *np = tsd->dev->of_node;
-
- if (of_property_read_u32(np, "spi-max-frequency",
- &tsd->spi_max_frequency))
- tsd->spi_max_frequency = 25000000; /* 25MHz */
-}
-
static struct of_device_id tegra_sflash_of_match[] = {
{ .compatible = "nvidia,tegra20-sflash", },
{}
@@ -468,11 +447,9 @@ static int tegra_sflash_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA;
- master->setup = tegra_sflash_setup;
master->transfer_one_message = tegra_sflash_transfer_one_message;
master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
- master->bus_num = -1;
platform_set_drvdata(pdev, master);
tsd = spi_master_get_devdata(master);
@@ -480,7 +457,9 @@ static int tegra_sflash_probe(struct platform_device *pdev)
tsd->dev = &pdev->dev;
spin_lock_init(&tsd->lock);
- tegra_sflash_parse_dt(tsd);
+ if (of_property_read_u32(tsd->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tsd->base = devm_ioremap_resource(&pdev->dev, r);
@@ -505,6 +484,13 @@ static int tegra_sflash_probe(struct platform_device *pdev)
goto exit_free_irq;
}
+ tsd->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tsd->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tsd->rst);
+ goto exit_free_irq;
+ }
+
init_completion(&tsd->xfer_completion);
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
@@ -520,9 +506,9 @@ static int tegra_sflash_probe(struct platform_device *pdev)
}
/* Reset controller */
- tegra_periph_reset_assert(tsd->clk);
+ reset_control_assert(tsd->rst);
udelay(2);
- tegra_periph_reset_deassert(tsd->clk);
+ reset_control_deassert(tsd->rst);
tsd->def_command_reg = SPI_M_S | SPI_CS_SW;
tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index e66715ba37ed..e3c1b93e45d1 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -23,7 +23,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -33,8 +32,8 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/reset.h>
#include <linux/spi/spi.h>
-#include <linux/clk/tegra.h>
#define SLINK_COMMAND 0x000
#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
@@ -167,11 +166,10 @@ struct tegra_slink_data {
spinlock_t lock;
struct clk *clk;
+ struct reset_control *rst;
void __iomem *base;
phys_addr_t phys;
unsigned irq;
- int dma_req_sel;
- u32 spi_max_frequency;
u32 cur_speed;
struct spi_device *cur_spi;
@@ -196,7 +194,7 @@ struct tegra_slink_data {
u32 rx_status;
u32 status_reg;
bool is_packed;
- unsigned long packed_size;
+ u32 packed_size;
u32 command_reg;
u32 command2_reg;
@@ -220,14 +218,14 @@ struct tegra_slink_data {
static int tegra_slink_runtime_suspend(struct device *dev);
static int tegra_slink_runtime_resume(struct device *dev);
-static inline unsigned long tegra_slink_readl(struct tegra_slink_data *tspi,
+static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
unsigned long reg)
{
return readl(tspi->base + reg);
}
static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
- unsigned long val, unsigned long reg)
+ u32 val, unsigned long reg)
{
writel(val, tspi->base + reg);
@@ -238,38 +236,30 @@ static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
{
- unsigned long val;
- unsigned long val_write = 0;
+ u32 val_write;
- val = tegra_slink_readl(tspi, SLINK_STATUS);
+ tegra_slink_readl(tspi, SLINK_STATUS);
/* Write 1 to clear status register */
val_write = SLINK_RDY | SLINK_FIFO_ERROR;
tegra_slink_writel(tspi, val_write, SLINK_STATUS);
}
-static unsigned long tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
+static u32 tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
struct spi_transfer *t)
{
- unsigned long val;
-
switch (tspi->bytes_per_word) {
case 0:
- val = SLINK_PACK_SIZE_4;
- break;
+ return SLINK_PACK_SIZE_4;
case 1:
- val = SLINK_PACK_SIZE_8;
- break;
+ return SLINK_PACK_SIZE_8;
case 2:
- val = SLINK_PACK_SIZE_16;
- break;
+ return SLINK_PACK_SIZE_16;
case 4:
- val = SLINK_PACK_SIZE_32;
- break;
+ return SLINK_PACK_SIZE_32;
default:
- val = 0;
+ return 0;
}
- return val;
}
static unsigned tegra_slink_calculate_curr_xfer_param(
@@ -312,10 +302,9 @@ static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
{
unsigned nbytes;
unsigned tx_empty_count;
- unsigned long fifo_status;
+ u32 fifo_status;
unsigned max_n_32bit;
unsigned i, count;
- unsigned long x;
unsigned int written_words;
unsigned fifo_words_left;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
@@ -329,9 +318,9 @@ static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
nbytes = written_words * tspi->bytes_per_word;
max_n_32bit = DIV_ROUND_UP(nbytes, 4);
for (count = 0; count < max_n_32bit; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; (i < 4) && nbytes; i++, nbytes--)
- x |= (*tx_buf++) << (i*8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
}
} else {
@@ -339,10 +328,10 @@ static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
written_words = max_n_32bit;
nbytes = written_words * tspi->bytes_per_word;
for (count = 0; count < max_n_32bit; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; nbytes && (i < tspi->bytes_per_word);
i++, nbytes--)
- x |= ((*tx_buf++) << i*8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
}
}
@@ -354,9 +343,8 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
unsigned rx_full_count;
- unsigned long fifo_status;
+ u32 fifo_status;
unsigned i, count;
- unsigned long x;
unsigned int read_words = 0;
unsigned len;
u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
@@ -366,7 +354,7 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
if (tspi->is_packed) {
len = tspi->curr_dma_words * tspi->bytes_per_word;
for (count = 0; count < rx_full_count; count++) {
- x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
@@ -374,7 +362,7 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
read_words += tspi->curr_dma_words;
} else {
for (count = 0; count < rx_full_count; count++) {
- x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@@ -387,27 +375,24 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
- unsigned len;
-
/* Make the dma buffer to read by cpu */
dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
tspi->dma_buf_size, DMA_TO_DEVICE);
if (tspi->is_packed) {
- len = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
} else {
unsigned int i;
unsigned int count;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
- unsigned int x;
for (count = 0; count < tspi->curr_dma_words; count++) {
- x = 0;
+ u32 x = 0;
for (i = 0; consume && (i < tspi->bytes_per_word);
i++, consume--)
- x |= ((*tx_buf++) << i * 8);
+ x |= (u32)(*tx_buf++) << (i * 8);
tspi->tx_dma_buf[count] = x;
}
}
@@ -434,14 +419,10 @@ static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
unsigned int i;
unsigned int count;
unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
- unsigned int x;
- unsigned int rx_mask, bits_per_word;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
- bits_per_word = t->bits_per_word;
- rx_mask = (1 << bits_per_word) - 1;
for (count = 0; count < tspi->curr_dma_words; count++) {
- x = tspi->rx_dma_buf[count];
- x &= rx_mask;
+ u32 x = tspi->rx_dma_buf[count] & rx_mask;
for (i = 0; (i < tspi->bytes_per_word); i++)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
@@ -501,17 +482,16 @@ static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
static int tegra_slink_start_dma_based_transfer(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
- unsigned long val;
- unsigned long test_val;
+ u32 val;
unsigned int len;
int ret = 0;
- unsigned long status;
+ u32 status;
/* Make sure that Rx and Tx fifo are empty */
status = tegra_slink_readl(tspi, SLINK_STATUS);
if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
- dev_err(tspi->dev,
- "Rx/Tx fifo are not empty status 0x%08lx\n", status);
+ dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
+ (unsigned)status);
return -EIO;
}
@@ -551,9 +531,9 @@ static int tegra_slink_start_dma_based_transfer(
}
/* Wait for tx fifo to be fill before starting slink */
- test_val = tegra_slink_readl(tspi, SLINK_STATUS);
- while (!(test_val & SLINK_TX_FULL))
- test_val = tegra_slink_readl(tspi, SLINK_STATUS);
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ while (!(status & SLINK_TX_FULL))
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
}
if (tspi->cur_direction & DATA_DIR_RX) {
@@ -587,7 +567,7 @@ static int tegra_slink_start_dma_based_transfer(
static int tegra_slink_start_cpu_based_transfer(
struct tegra_slink_data *tspi, struct spi_transfer *t)
{
- unsigned long val;
+ u32 val;
unsigned cur_words;
val = tspi->packed_size;
@@ -629,15 +609,15 @@ static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
dma_addr_t dma_phys;
int ret;
struct dma_slave_config dma_sconfig;
- dma_cap_mask_t mask;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_chan = dma_request_channel(mask, NULL, NULL);
- if (!dma_chan) {
- dev_err(tspi->dev,
- "Dma channel is not available, will try later\n");
- return -EPROBE_DEFER;
+ dma_chan = dma_request_slave_channel_reason(tspi->dev,
+ dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(tspi->dev,
+ "Dma channel is not available: %d\n", ret);
+ return ret;
}
dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
@@ -648,7 +628,6 @@ static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
return -ENOMEM;
}
- dma_sconfig.slave_id = tspi->dma_req_sel;
if (dma_to_memory) {
dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -714,8 +693,8 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
u8 bits_per_word;
unsigned total_fifo_words;
int ret;
- unsigned long command;
- unsigned long command2;
+ u32 command;
+ u32 command2;
bits_per_word = t->bits_per_word;
speed = t->speed_hz;
@@ -762,27 +741,24 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
static int tegra_slink_setup(struct spi_device *spi)
{
- struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
- unsigned long val;
- unsigned long flags;
- int ret;
- unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
+ static const u32 cs_pol_bit[MAX_CHIP_SELECT] = {
SLINK_CS_POLARITY,
SLINK_CS_POLARITY1,
SLINK_CS_POLARITY2,
SLINK_CS_POLARITY3,
};
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ u32 val;
+ unsigned long flags;
+ int ret;
+
dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
spi->bits_per_word,
spi->mode & SPI_CPOL ? "" : "~",
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
- BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
-
- /* Set speed to the spi max fequency if spi device has not set */
- spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
ret = pm_runtime_get_sync(tspi->dev);
if (ret < 0) {
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
@@ -884,9 +860,9 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
dev_err(tspi->dev,
"CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
tspi->command2_reg, tspi->dma_control_reg);
- tegra_periph_reset_assert(tspi->clk);
+ reset_control_assert(tspi->rst);
udelay(2);
- tegra_periph_reset_deassert(tspi->clk);
+ reset_control_deassert(tspi->rst);
complete(&tspi->xfer_completion);
goto exit;
}
@@ -957,9 +933,9 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi)
dev_err(tspi->dev,
"DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
tspi->command2_reg, tspi->dma_control_reg);
- tegra_periph_reset_assert(tspi->clk);
+ reset_control_assert(tspi->rst);
udelay(2);
- tegra_periph_reset_deassert(tspi->clk);
+ reset_control_assert(tspi->rst);
complete(&tspi->xfer_completion);
spin_unlock_irqrestore(&tspi->lock, flags);
return IRQ_HANDLED;
@@ -1017,20 +993,6 @@ static irqreturn_t tegra_slink_isr(int irq, void *context_data)
return IRQ_WAKE_THREAD;
}
-static void tegra_slink_parse_dt(struct tegra_slink_data *tspi)
-{
- struct device_node *np = tspi->dev->of_node;
- u32 of_dma[2];
-
- if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
- of_dma, 2) >= 0)
- tspi->dma_req_sel = of_dma[1];
-
- if (of_property_read_u32(np, "spi-max-frequency",
- &tspi->spi_max_frequency))
- tspi->spi_max_frequency = 25000000; /* 25MHz */
-}
-
static const struct tegra_slink_chip_data tegra30_spi_cdata = {
.cs_hold_time = true,
};
@@ -1076,7 +1038,6 @@ static int tegra_slink_probe(struct platform_device *pdev)
master->unprepare_message = tegra_slink_unprepare_message;
master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
- master->bus_num = -1;
platform_set_drvdata(pdev, master);
tspi = spi_master_get_devdata(master);
@@ -1085,7 +1046,9 @@ static int tegra_slink_probe(struct platform_device *pdev)
tspi->chip_data = cdata;
spin_lock_init(&tspi->lock);
- tegra_slink_parse_dt(tspi);
+ if (of_property_read_u32(tspi->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -1118,25 +1081,25 @@ static int tegra_slink_probe(struct platform_device *pdev)
goto exit_free_irq;
}
+ tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tspi->rst);
+ goto exit_free_irq;
+ }
+
tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
- if (tspi->dma_req_sel) {
- ret = tegra_slink_init_dma_param(tspi, true);
- if (ret < 0) {
- dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret);
- goto exit_free_irq;
- }
-
- ret = tegra_slink_init_dma_param(tspi, false);
- if (ret < 0) {
- dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret);
- goto exit_rx_dma_free;
- }
- tspi->max_buf_size = tspi->dma_buf_size;
- init_completion(&tspi->tx_dma_complete);
- init_completion(&tspi->rx_dma_complete);
- }
+ ret = tegra_slink_init_dma_param(tspi, true);
+ if (ret < 0)
+ goto exit_free_irq;
+ ret = tegra_slink_init_dma_param(tspi, false);
+ if (ret < 0)
+ goto exit_rx_dma_free;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
init_completion(&tspi->xfer_completion);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 4396bd448540..6c211d1910b0 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -46,6 +46,8 @@ struct ti_qspi {
struct spi_master *master;
void __iomem *base;
+ void __iomem *ctrl_base;
+ void __iomem *mmap_base;
struct clk *fclk;
struct device *dev;
@@ -54,6 +56,8 @@ struct ti_qspi {
u32 spi_max_frequency;
u32 cmd;
u32 dc;
+
+ bool ctrl_mod;
};
#define QSPI_PID (0x0)
@@ -204,53 +208,36 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
txbuf = t->tx_buf;
cmd = qspi->cmd | QSPI_WR_SNGL;
count = t->len;
- wlen = t->bits_per_word;
+ wlen = t->bits_per_word >> 3; /* in bytes */
while (count) {
switch (wlen) {
- case 8:
+ case 1:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
cmd, qspi->dc, *txbuf);
writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
- ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
- ret = wait_for_completion_timeout(&qspi->transfer_complete,
- QSPI_COMPLETION_TIMEOUT);
- if (ret == 0) {
- dev_err(qspi->dev, "write timed out\n");
- return -ETIMEDOUT;
- }
- txbuf += 1;
- count -= 1;
break;
- case 16:
+ case 2:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
cmd, qspi->dc, *txbuf);
writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
- ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
- ret = wait_for_completion_timeout(&qspi->transfer_complete,
- QSPI_COMPLETION_TIMEOUT);
- if (ret == 0) {
- dev_err(qspi->dev, "write timed out\n");
- return -ETIMEDOUT;
- }
- txbuf += 2;
- count -= 2;
break;
- case 32:
+ case 4:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
cmd, qspi->dc, *txbuf);
writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
- ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
- ret = wait_for_completion_timeout(&qspi->transfer_complete,
- QSPI_COMPLETION_TIMEOUT);
- if (ret == 0) {
- dev_err(qspi->dev, "write timed out\n");
- return -ETIMEDOUT;
- }
- txbuf += 4;
- count -= 4;
break;
}
+
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT);
+ if (ret == 0) {
+ dev_err(qspi->dev, "write timed out\n");
+ return -ETIMEDOUT;
+ }
+ txbuf += wlen;
+ count -= wlen;
}
return 0;
@@ -276,7 +263,7 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
break;
}
count = t->len;
- wlen = t->bits_per_word;
+ wlen = t->bits_per_word >> 3; /* in bytes */
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
@@ -288,22 +275,18 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
return -ETIMEDOUT;
}
switch (wlen) {
- case 8:
+ case 1:
*rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
- rxbuf += 1;
- count -= 1;
break;
- case 16:
+ case 2:
*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
- rxbuf += 2;
- count -= 2;
break;
- case 32:
+ case 4:
*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
- rxbuf += 4;
- count -= 4;
break;
}
+ rxbuf += wlen;
+ count -= wlen;
}
return 0;
@@ -417,10 +400,8 @@ out:
static int ti_qspi_runtime_resume(struct device *dev)
{
struct ti_qspi *qspi;
- struct spi_master *master;
- master = dev_get_drvdata(dev);
- qspi = spi_master_get_devdata(master);
+ qspi = dev_get_drvdata(dev);
ti_qspi_restore_ctx(qspi);
return 0;
@@ -437,7 +418,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
{
struct ti_qspi *qspi;
struct spi_master *master;
- struct resource *r;
+ struct resource *r, *res_ctrl, *res_mmap;
struct device_node *np = pdev->dev.of_node;
u32 max_freq;
int ret = 0, num_cs, irq;
@@ -448,13 +429,13 @@ static int ti_qspi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
- master->bus_num = -1;
master->flags = SPI_MASTER_HALF_DUPLEX;
master->setup = ti_qspi_setup;
master->auto_runtime_pm = true;
master->transfer_one_message = ti_qspi_start_transfer_one;
master->dev.of_node = pdev->dev.of_node;
- master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
if (!of_property_read_u32(np, "num-cs", &num_cs))
master->num_chipselect = num_cs;
@@ -464,7 +445,34 @@ static int ti_qspi_probe(struct platform_device *pdev)
qspi->dev = &pdev->dev;
platform_set_drvdata(pdev, qspi);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
+ if (r == NULL) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+ }
+
+ res_mmap = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "qspi_mmap");
+ if (res_mmap == NULL) {
+ res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_mmap == NULL) {
+ dev_err(&pdev->dev,
+ "memory mapped resource not required\n");
+ }
+ }
+
+ res_ctrl = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "qspi_ctrlmod");
+ if (res_ctrl == NULL) {
+ res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (res_ctrl == NULL) {
+ dev_dbg(&pdev->dev,
+ "control module resources not required\n");
+ }
+ }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -480,6 +488,23 @@ static int ti_qspi_probe(struct platform_device *pdev)
goto free_master;
}
+ if (res_ctrl) {
+ qspi->ctrl_mod = true;
+ qspi->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl);
+ if (IS_ERR(qspi->ctrl_base)) {
+ ret = PTR_ERR(qspi->ctrl_base);
+ goto free_master;
+ }
+ }
+
+ if (res_mmap) {
+ qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
+ if (IS_ERR(qspi->mmap_base)) {
+ ret = PTR_ERR(qspi->mmap_base);
+ goto free_master;
+ }
+ }
+
ret = devm_request_irq(&pdev->dev, irq, ti_qspi_isr, 0,
dev_name(&pdev->dev), qspi);
if (ret < 0) {
@@ -516,13 +541,9 @@ free_master:
static int ti_qspi_remove(struct platform_device *pdev)
{
- struct spi_master *master;
- struct ti_qspi *qspi;
+ struct ti_qspi *qspi = platform_get_drvdata(pdev);
int ret;
- master = platform_get_drvdata(pdev);
- qspi = spi_master_get_devdata(master);
-
ret = pm_runtime_get_sync(qspi->dev);
if (ret < 0) {
dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
@@ -534,8 +555,6 @@ static int ti_qspi_remove(struct platform_device *pdev)
pm_runtime_put(qspi->dev);
pm_runtime_disable(&pdev->dev);
- spi_unregister_master(master);
-
return 0;
}
@@ -547,7 +566,7 @@ static struct platform_driver ti_qspi_driver = {
.probe = ti_qspi_probe,
.remove = ti_qspi_remove,
.driver = {
- .name = "ti,dra7xxx-qspi",
+ .name = "ti-qspi",
.owner = THIS_MODULE,
.pm = &ti_qspi_pm_ops,
.of_match_table = ti_qspi_match,
@@ -559,3 +578,4 @@ module_platform_driver(ti_qspi_driver);
MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI QSPI controller driver");
+MODULE_ALIAS("platform:ti-qspi");
diff --git a/drivers/spi/spi-ti-ssp.c b/drivers/spi/spi-ti-ssp.c
deleted file mode 100644
index 7d20e121e4c1..000000000000
--- a/drivers/spi/spi-ti-ssp.c
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Sequencer Serial Port (SSP) based SPI master driver
- *
- * Copyright (C) 2010 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/mfd/ti_ssp.h>
-
-#define MODE_BITS (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH)
-
-struct ti_ssp_spi {
- struct spi_master *master;
- struct device *dev;
- spinlock_t lock;
- struct list_head msg_queue;
- struct completion complete;
- bool shutdown;
- struct workqueue_struct *workqueue;
- struct work_struct work;
- u8 mode, bpw;
- int cs_active;
- u32 pc_en, pc_dis, pc_wr, pc_rd;
- void (*select)(int cs);
-};
-
-static u32 ti_ssp_spi_rx(struct ti_ssp_spi *hw)
-{
- u32 ret;
-
- ti_ssp_run(hw->dev, hw->pc_rd, 0, &ret);
- return ret;
-}
-
-static void ti_ssp_spi_tx(struct ti_ssp_spi *hw, u32 data)
-{
- ti_ssp_run(hw->dev, hw->pc_wr, data << (32 - hw->bpw), NULL);
-}
-
-static int ti_ssp_spi_txrx(struct ti_ssp_spi *hw, struct spi_message *msg,
- struct spi_transfer *t)
-{
- int count;
-
- if (hw->bpw <= 8) {
- u8 *rx = t->rx_buf;
- const u8 *tx = t->tx_buf;
-
- for (count = 0; count < t->len; count += 1) {
- if (t->tx_buf)
- ti_ssp_spi_tx(hw, *tx++);
- if (t->rx_buf)
- *rx++ = ti_ssp_spi_rx(hw);
- }
- } else if (hw->bpw <= 16) {
- u16 *rx = t->rx_buf;
- const u16 *tx = t->tx_buf;
-
- for (count = 0; count < t->len; count += 2) {
- if (t->tx_buf)
- ti_ssp_spi_tx(hw, *tx++);
- if (t->rx_buf)
- *rx++ = ti_ssp_spi_rx(hw);
- }
- } else {
- u32 *rx = t->rx_buf;
- const u32 *tx = t->tx_buf;
-
- for (count = 0; count < t->len; count += 4) {
- if (t->tx_buf)
- ti_ssp_spi_tx(hw, *tx++);
- if (t->rx_buf)
- *rx++ = ti_ssp_spi_rx(hw);
- }
- }
-
- msg->actual_length += count; /* bytes transferred */
-
- dev_dbg(&msg->spi->dev, "xfer %s%s, %d bytes, %d bpw, count %d%s\n",
- t->tx_buf ? "tx" : "", t->rx_buf ? "rx" : "", t->len,
- hw->bpw, count, (count < t->len) ? " (under)" : "");
-
- return (count < t->len) ? -EIO : 0; /* left over data */
-}
-
-static void ti_ssp_spi_chip_select(struct ti_ssp_spi *hw, int cs_active)
-{
- cs_active = !!cs_active;
- if (cs_active == hw->cs_active)
- return;
- ti_ssp_run(hw->dev, cs_active ? hw->pc_en : hw->pc_dis, 0, NULL);
- hw->cs_active = cs_active;
-}
-
-#define __SHIFT_OUT(bits) (SSP_OPCODE_SHIFT | SSP_OUT_MODE | \
- cs_en | clk | SSP_COUNT((bits) * 2 - 1))
-#define __SHIFT_IN(bits) (SSP_OPCODE_SHIFT | SSP_IN_MODE | \
- cs_en | clk | SSP_COUNT((bits) * 2 - 1))
-
-static int ti_ssp_spi_setup_transfer(struct ti_ssp_spi *hw, u8 bpw, u8 mode)
-{
- int error, idx = 0;
- u32 seqram[16];
- u32 cs_en, cs_dis, clk;
- u32 topbits, botbits;
-
- mode &= MODE_BITS;
- if (mode == hw->mode && bpw == hw->bpw)
- return 0;
-
- cs_en = (mode & SPI_CS_HIGH) ? SSP_CS_HIGH : SSP_CS_LOW;
- cs_dis = (mode & SPI_CS_HIGH) ? SSP_CS_LOW : SSP_CS_HIGH;
- clk = (mode & SPI_CPOL) ? SSP_CLK_HIGH : SSP_CLK_LOW;
-
- /* Construct instructions */
-
- /* Disable Chip Select */
- hw->pc_dis = idx;
- seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_dis | clk;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_dis | clk;
-
- /* Enable Chip Select */
- hw->pc_en = idx;
- seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_en | clk;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
-
- /* Reads and writes need to be split for bpw > 16 */
- topbits = (bpw > 16) ? 16 : bpw;
- botbits = bpw - topbits;
-
- /* Write */
- hw->pc_wr = idx;
- seqram[idx++] = __SHIFT_OUT(topbits) | SSP_ADDR_REG;
- if (botbits)
- seqram[idx++] = __SHIFT_OUT(botbits) | SSP_DATA_REG;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
-
- /* Read */
- hw->pc_rd = idx;
- if (botbits)
- seqram[idx++] = __SHIFT_IN(botbits) | SSP_ADDR_REG;
- seqram[idx++] = __SHIFT_IN(topbits) | SSP_DATA_REG;
- seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
-
- error = ti_ssp_load(hw->dev, 0, seqram, idx);
- if (error < 0)
- return error;
-
- error = ti_ssp_set_mode(hw->dev, ((mode & SPI_CPHA) ?
- 0 : SSP_EARLY_DIN));
- if (error < 0)
- return error;
-
- hw->bpw = bpw;
- hw->mode = mode;
-
- return error;
-}
-
-static void ti_ssp_spi_work(struct work_struct *work)
-{
- struct ti_ssp_spi *hw = container_of(work, struct ti_ssp_spi, work);
-
- spin_lock(&hw->lock);
-
- while (!list_empty(&hw->msg_queue)) {
- struct spi_message *m;
- struct spi_device *spi;
- struct spi_transfer *t = NULL;
- int status = 0;
-
- m = container_of(hw->msg_queue.next, struct spi_message,
- queue);
-
- list_del_init(&m->queue);
-
- spin_unlock(&hw->lock);
-
- spi = m->spi;
-
- if (hw->select)
- hw->select(spi->chip_select);
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- int bpw = spi->bits_per_word;
- int xfer_status;
-
- if (t->bits_per_word)
- bpw = t->bits_per_word;
-
- if (ti_ssp_spi_setup_transfer(hw, bpw, spi->mode) < 0)
- break;
-
- ti_ssp_spi_chip_select(hw, 1);
-
- xfer_status = ti_ssp_spi_txrx(hw, m, t);
- if (xfer_status < 0)
- status = xfer_status;
-
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- if (t->cs_change)
- ti_ssp_spi_chip_select(hw, 0);
- }
-
- ti_ssp_spi_chip_select(hw, 0);
- m->status = status;
- m->complete(m->context);
-
- spin_lock(&hw->lock);
- }
-
- if (hw->shutdown)
- complete(&hw->complete);
-
- spin_unlock(&hw->lock);
-}
-
-static int ti_ssp_spi_transfer(struct spi_device *spi, struct spi_message *m)
-{
- struct ti_ssp_spi *hw;
- struct spi_transfer *t;
- int error = 0;
-
- m->actual_length = 0;
- m->status = -EINPROGRESS;
-
- hw = spi_master_get_devdata(spi->master);
-
- if (list_empty(&m->transfers) || !m->complete)
- return -EINVAL;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->len && !(t->rx_buf || t->tx_buf)) {
- dev_err(&spi->dev, "invalid xfer, no buffer\n");
- return -EINVAL;
- }
-
- if (t->len && t->rx_buf && t->tx_buf) {
- dev_err(&spi->dev, "invalid xfer, full duplex\n");
- return -EINVAL;
- }
- }
-
- spin_lock(&hw->lock);
- if (hw->shutdown) {
- error = -ESHUTDOWN;
- goto error_unlock;
- }
- list_add_tail(&m->queue, &hw->msg_queue);
- queue_work(hw->workqueue, &hw->work);
-error_unlock:
- spin_unlock(&hw->lock);
- return error;
-}
-
-static int ti_ssp_spi_probe(struct platform_device *pdev)
-{
- const struct ti_ssp_spi_data *pdata;
- struct ti_ssp_spi *hw;
- struct spi_master *master;
- struct device *dev = &pdev->dev;
- int error = 0;
-
- pdata = dev_get_platdata(dev);
- if (!pdata) {
- dev_err(dev, "platform data not found\n");
- return -EINVAL;
- }
-
- master = spi_alloc_master(dev, sizeof(struct ti_ssp_spi));
- if (!master) {
- dev_err(dev, "cannot allocate SPI master\n");
- return -ENOMEM;
- }
-
- hw = spi_master_get_devdata(master);
- platform_set_drvdata(pdev, hw);
-
- hw->master = master;
- hw->dev = dev;
- hw->select = pdata->select;
-
- spin_lock_init(&hw->lock);
- init_completion(&hw->complete);
- INIT_LIST_HEAD(&hw->msg_queue);
- INIT_WORK(&hw->work, ti_ssp_spi_work);
-
- hw->workqueue = create_singlethread_workqueue(dev_name(dev));
- if (!hw->workqueue) {
- error = -ENOMEM;
- dev_err(dev, "work queue creation failed\n");
- goto error_wq;
- }
-
- error = ti_ssp_set_iosel(hw->dev, pdata->iosel);
- if (error < 0) {
- dev_err(dev, "io setup failed\n");
- goto error_iosel;
- }
-
- master->bus_num = pdev->id;
- master->num_chipselect = pdata->num_cs;
- master->mode_bits = MODE_BITS;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->flags = SPI_MASTER_HALF_DUPLEX;
- master->transfer = ti_ssp_spi_transfer;
-
- error = spi_register_master(master);
- if (error) {
- dev_err(dev, "master registration failed\n");
- goto error_reg;
- }
-
- return 0;
-
-error_reg:
-error_iosel:
- destroy_workqueue(hw->workqueue);
-error_wq:
- spi_master_put(master);
- return error;
-}
-
-static int ti_ssp_spi_remove(struct platform_device *pdev)
-{
- struct ti_ssp_spi *hw = platform_get_drvdata(pdev);
- int error;
-
- hw->shutdown = 1;
- while (!list_empty(&hw->msg_queue)) {
- error = wait_for_completion_interruptible(&hw->complete);
- if (error < 0) {
- hw->shutdown = 0;
- return error;
- }
- }
- destroy_workqueue(hw->workqueue);
- spi_unregister_master(hw->master);
-
- return 0;
-}
-
-static struct platform_driver ti_ssp_spi_driver = {
- .probe = ti_ssp_spi_probe,
- .remove = ti_ssp_spi_remove,
- .driver = {
- .name = "ti-ssp-spi",
- .owner = THIS_MODULE,
- },
-};
-module_platform_driver(ti_ssp_spi_driver);
-
-MODULE_DESCRIPTION("SSP SPI Master");
-MODULE_AUTHOR("Cyril Chemparathy");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ti-ssp-spi");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 446131308acb..f406b30af961 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -217,7 +217,7 @@ struct pch_pd_dev_save {
struct pch_spi_board_data *board_dat;
};
-static DEFINE_PCI_DEVICE_TABLE(pch_spi_pcidev_id) = {
+static const struct pci_device_id pch_spi_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
@@ -332,7 +332,7 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
data->transfer_active = false;
wake_up(&data->wait);
} else {
- dev_err(&data->master->dev,
+ dev_vdbg(&data->master->dev,
"%s : Transfer is not completed",
__func__);
}
@@ -464,26 +464,6 @@ static void pch_spi_reset(struct spi_master *master)
pch_spi_writereg(master, PCH_SRST, 0x0);
}
-static int pch_spi_setup(struct spi_device *pspi)
-{
- /* check bits per word */
- if (pspi->bits_per_word == 0) {
- pspi->bits_per_word = 8;
- dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__);
- }
-
- /* Check baud rate setting */
- /* if baud rate of chip is greater than
- max we can support,return error */
- if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE)
- pspi->max_speed_hz = PCH_MAX_BAUDRATE;
-
- dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__,
- (pspi->mode) & (SPI_CPOL | SPI_CPHA));
-
- return 0;
-}
-
static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
{
@@ -492,23 +472,6 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
int retval;
unsigned long flags;
- /* validate spi message and baud rate */
- if (unlikely(list_empty(&pmsg->transfers) == 1)) {
- dev_err(&pspi->dev, "%s list empty\n", __func__);
- retval = -EINVAL;
- goto err_out;
- }
-
- if (unlikely(pspi->max_speed_hz == 0)) {
- dev_err(&pspi->dev, "%s pch_spi_transfer maxspeed=%d\n",
- __func__, pspi->max_speed_hz);
- retval = -EINVAL;
- goto err_out;
- }
-
- dev_dbg(&pspi->dev,
- "%s Transfer List not empty. Transfer Speed is set.\n", __func__);
-
spin_lock_irqsave(&data->lock, flags);
/* validate Tx/Rx buffers and Transfer length */
list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
@@ -529,10 +492,6 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
dev_dbg(&pspi->dev,
"%s Tx/Rx buffer valid. Transfer length valid\n",
__func__);
-
- /* if baud rate has been specified validate the same */
- if (transfer->speed_hz > PCH_MAX_BAUDRATE)
- transfer->speed_hz = PCH_MAX_BAUDRATE;
}
spin_unlock_irqrestore(&data->lock, flags);
@@ -921,7 +880,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Tx DMA */
param = &dma->param_tx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
+ param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
param->tx_reg = data->io_base_addr + PCH_SPDWR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -936,7 +895,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Rx DMA */
param = &dma->param_rx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
+ param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
param->rx_reg = data->io_base_addr + PCH_SPDRR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -1157,8 +1116,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
dma->nent = num;
dma->desc_tx = desc_tx;
- dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
- "0x2 to SSNXCR\n", __func__);
+ dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
spin_lock_irqsave(&data->lock, flags);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
@@ -1424,10 +1382,10 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
/* initialize members of SPI master */
master->num_chipselect = PCH_MAX_CS;
- master->setup = pch_spi_setup;
master->transfer = pch_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->max_speed_hz = PCH_MAX_BAUDRATE;
data->board_dat = board_dat;
data->plat_dev = plat_dev;
@@ -1458,6 +1416,11 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
pch_spi_set_master_mode(master);
+ if (use_dma) {
+ dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+ pch_alloc_dma_buf(board_dat, data);
+ }
+
ret = spi_register_master(master);
if (ret != 0) {
dev_err(&plat_dev->dev,
@@ -1465,14 +1428,10 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
goto err_spi_register_master;
}
- if (use_dma) {
- dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
- pch_alloc_dma_buf(board_dat, data);
- }
-
return 0;
err_spi_register_master:
+ pch_free_dma_buf(board_dat, data);
free_irq(board_dat->pdev->irq, data);
err_request_irq:
pch_spi_free_resources(board_dat, data);
@@ -1610,8 +1569,7 @@ static struct platform_driver pch_spi_pd_driver = {
.resume = pch_spi_pd_resume
};
-static int pch_spi_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pch_spi_board_data *board_dat;
struct platform_device *pd_dev = NULL;
@@ -1681,6 +1639,8 @@ static int pch_spi_probe(struct pci_dev *pdev,
return 0;
err_platform_device:
+ while (--i >= 0)
+ platform_device_unregister(pd_dev_save->pd_save[i]);
pci_disable_device(pdev);
pci_enable_device:
pci_release_regions(pdev);
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 18c9bb2b5f39..820b499816f8 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -80,7 +80,6 @@ struct txx9spi {
void __iomem *membase;
int baseclk;
struct clk *clk;
- u32 max_speed_hz, min_speed_hz;
int last_chipselect;
int last_chipselect_val;
};
@@ -117,9 +116,7 @@ static int txx9spi_setup(struct spi_device *spi)
{
struct txx9spi *c = spi_master_get_devdata(spi->master);
- if (!spi->max_speed_hz
- || spi->max_speed_hz > c->max_speed_hz
- || spi->max_speed_hz < c->min_speed_hz)
+ if (!spi->max_speed_hz)
return -EINVAL;
if (gpio_direction_output(spi->chip_select,
@@ -309,15 +306,8 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
/* check each transfer's parameters */
list_for_each_entry(t, &m->transfers, transfer_list) {
- u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
- u8 bits_per_word = t->bits_per_word;
-
if (!t->tx_buf && !t->rx_buf && t->len)
return -EINVAL;
- if (t->len & ((bits_per_word >> 3) - 1))
- return -EINVAL;
- if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz)
- return -EINVAL;
}
spin_lock_irqsave(&c->lock, flags);
@@ -348,7 +338,7 @@ static int txx9spi_probe(struct platform_device *dev)
INIT_LIST_HEAD(&c->queue);
init_waitqueue_head(&c->waitq);
- c->clk = clk_get(&dev->dev, "spi-baseclk");
+ c->clk = devm_clk_get(&dev->dev, "spi-baseclk");
if (IS_ERR(c->clk)) {
ret = PTR_ERR(c->clk);
c->clk = NULL;
@@ -356,22 +346,16 @@ static int txx9spi_probe(struct platform_device *dev)
}
ret = clk_enable(c->clk);
if (ret) {
- clk_put(c->clk);
c->clk = NULL;
goto exit;
}
c->baseclk = clk_get_rate(c->clk);
- c->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1);
- c->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1);
+ master->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1);
+ master->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1);
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- goto exit_busy;
- if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res),
- "spi_txx9"))
- goto exit_busy;
- c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res));
- if (!c->membase)
+ c->membase = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(c->membase))
goto exit_busy;
/* enter config mode */
@@ -415,10 +399,8 @@ exit_busy:
exit:
if (c->workqueue)
destroy_workqueue(c->workqueue);
- if (c->clk) {
+ if (c->clk)
clk_disable(c->clk);
- clk_put(c->clk);
- }
spi_master_put(master);
return ret;
}
@@ -430,7 +412,6 @@ static int txx9spi_remove(struct platform_device *dev)
destroy_workqueue(c->workqueue);
clk_disable(c->clk);
- clk_put(c->clk);
return 0;
}
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 4258c712ad3c..bb478dccf1d8 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -74,15 +73,13 @@ static void spi_xcomm_chipselect(struct spi_xcomm *spi_xcomm,
static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm,
struct spi_device *spi, struct spi_transfer *t, unsigned int *settings)
{
- unsigned int speed;
-
if (t->len > 62)
return -EINVAL;
- speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+ if (t->speed_hz != spi_xcomm->current_speed) {
+ unsigned int divider;
- if (speed != spi_xcomm->current_speed) {
- unsigned int divider = DIV_ROUND_UP(SPI_XCOMM_CLOCK, speed);
+ divider = DIV_ROUND_UP(SPI_XCOMM_CLOCK, t->speed_hz);
if (divider >= 64)
*settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_64;
else if (divider >= 16)
@@ -90,7 +87,7 @@ static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm,
else
*settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_4;
- spi_xcomm->current_speed = speed;
+ spi_xcomm->current_speed = t->speed_hz;
}
if (spi->mode & SPI_CPOL)
@@ -148,8 +145,6 @@ static int spi_xcomm_transfer_one(struct spi_master *master,
int status = 0;
bool is_last;
- is_first = true;
-
spi_xcomm_chipselect(spi_xcomm, spi, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
@@ -231,22 +226,13 @@ static int spi_xcomm_probe(struct i2c_client *i2c,
master->dev.of_node = i2c->dev.of_node;
i2c_set_clientdata(i2c, master);
- ret = spi_register_master(master);
+ ret = devm_spi_register_master(&i2c->dev, master);
if (ret < 0)
spi_master_put(master);
return ret;
}
-static int spi_xcomm_remove(struct i2c_client *i2c)
-{
- struct spi_master *master = i2c_get_clientdata(i2c);
-
- spi_unregister_master(master);
-
- return 0;
-}
-
static const struct i2c_device_id spi_xcomm_ids[] = {
{ "spi-xcomm" },
{ },
@@ -259,7 +245,6 @@ static struct i2c_driver spi_xcomm_driver = {
},
.id_table = spi_xcomm_ids,
.probe = spi_xcomm_probe,
- .remove = spi_xcomm_remove,
};
module_i2c_driver(spi_xcomm_driver);
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 6d4ce4615163..a3b0b9944bf0 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -88,10 +87,10 @@ struct xilinx_spi {
const u8 *tx_ptr; /* pointer in the Rx buffer */
int remaining_bytes; /* the number of bytes left to transfer */
u8 bits_per_word;
- unsigned int (*read_fn) (void __iomem *);
- void (*write_fn) (u32, void __iomem *);
- void (*tx_fn) (struct xilinx_spi *);
- void (*rx_fn) (struct xilinx_spi *);
+ unsigned int (*read_fn)(void __iomem *);
+ void (*write_fn)(u32, void __iomem *);
+ void (*tx_fn)(struct xilinx_spi *);
+ void (*rx_fn)(struct xilinx_spi *);
};
static void xspi_write32(u32 val, void __iomem *addr)
@@ -209,26 +208,11 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
}
/* spi_bitbang requires custom setup_transfer() to be defined if there is a
- * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
- * supports 8 or 16 bits per word which cannot be changed in software.
- * SPI clock can't be changed in software either.
- * Check for correct bits per word. Chip select delay calculations could be
- * added here as soon as bitbang_work() can be made aware of the delay value.
+ * custom txrx_bufs().
*/
static int xilinx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
- u8 bits_per_word;
-
- bits_per_word = (t && t->bits_per_word)
- ? t->bits_per_word : spi->bits_per_word;
- if (bits_per_word != xspi->bits_per_word) {
- dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, bits_per_word);
- return -EINVAL;
- }
-
return 0;
}
@@ -407,6 +391,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
xspi->write_fn = xspi_write32_be;
}
+ master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
xspi->bits_per_word = bits_per_word;
if (xspi->bits_per_word == 8) {
xspi->tx_fn = xspi_tx8;
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
new file mode 100644
index 000000000000..41e158187f9d
--- /dev/null
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -0,0 +1,170 @@
+/*
+ * Xtensa xtfpga SPI controller driver
+ *
+ * Copyright (c) 2014 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#define XTFPGA_SPI_NAME "xtfpga_spi"
+
+#define XTFPGA_SPI_START 0x0
+#define XTFPGA_SPI_BUSY 0x4
+#define XTFPGA_SPI_DATA 0x8
+
+#define BUSY_WAIT_US 100
+
+struct xtfpga_spi {
+ struct spi_bitbang bitbang;
+ void __iomem *regs;
+ u32 data;
+ unsigned data_sz;
+};
+
+static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
+ unsigned addr, u32 val)
+{
+ iowrite32(val, spi->regs + addr);
+}
+
+static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
+ unsigned addr)
+{
+ return ioread32(spi->regs + addr);
+}
+
+static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
+{
+ unsigned i;
+ for (i = 0; xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY) &&
+ i < BUSY_WAIT_US; ++i)
+ udelay(1);
+ WARN_ON_ONCE(i == BUSY_WAIT_US);
+}
+
+static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
+ u32 v, u8 bits)
+{
+ struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+
+ xspi->data = (xspi->data << bits) | (v & GENMASK(bits - 1, 0));
+ xspi->data_sz += bits;
+ if (xspi->data_sz >= 16) {
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_DATA,
+ xspi->data >> (xspi->data_sz - 16));
+ xspi->data_sz -= 16;
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 1);
+ xtfpga_spi_wait_busy(xspi);
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
+ }
+
+ return 0;
+}
+
+static void xtfpga_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+
+ WARN_ON(xspi->data_sz != 0);
+ xspi->data_sz = 0;
+}
+
+static int xtfpga_spi_probe(struct platform_device *pdev)
+{
+ struct xtfpga_spi *xspi;
+ struct resource *mem;
+ int ret;
+ struct spi_master *master;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
+ if (!master)
+ return -ENOMEM;
+
+ master->flags = SPI_MASTER_NO_RX;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->bus_num = pdev->dev.id;
+ master->dev.of_node = pdev->dev.of_node;
+
+ xspi = spi_master_get_devdata(master);
+ xspi->bitbang.master = master;
+ xspi->bitbang.chipselect = xtfpga_spi_chipselect;
+ xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ xspi->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(xspi->regs)) {
+ ret = PTR_ERR(xspi->regs);
+ goto err;
+ }
+
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
+ usleep_range(1000, 2000);
+ if (xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY)) {
+ dev_err(&pdev->dev, "Device stuck in busy state\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ret = spi_bitbang_start(&xspi->bitbang);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_bitbang_start failed\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, master);
+ return 0;
+err:
+ spi_master_put(master);
+ return ret;
+}
+
+static int xtfpga_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct xtfpga_spi *xspi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&xspi->bitbang);
+ spi_master_put(master);
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:" XTFPGA_SPI_NAME);
+
+#ifdef CONFIG_OF
+static const struct of_device_id xtfpga_spi_of_match[] = {
+ { .compatible = "cdns,xtfpga-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xtfpga_spi_of_match);
+#endif
+
+static struct platform_driver xtfpga_spi_driver = {
+ .probe = xtfpga_spi_probe,
+ .remove = xtfpga_spi_remove,
+ .driver = {
+ .name = XTFPGA_SPI_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(xtfpga_spi_of_match),
+ },
+};
+module_platform_driver(xtfpga_spi_driver);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_DESCRIPTION("xtensa xtfpga SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 349ebba4b199..4eb9bf02996c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -24,6 +24,8 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -58,6 +60,11 @@ static ssize_t
modalias_show(struct device *dev, struct device_attribute *a, char *buf)
{
const struct spi_device *spi = to_spi_device(dev);
+ int len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
}
@@ -114,6 +121,11 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
{
const struct spi_device *spi = to_spi_device(dev);
+ int rc;
+
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
return 0;
@@ -245,13 +257,12 @@ EXPORT_SYMBOL_GPL(spi_bus_type);
static int spi_drv_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
- struct spi_device *spi = to_spi_device(dev);
int ret;
- acpi_dev_pm_attach(&spi->dev, true);
- ret = sdrv->probe(spi);
+ acpi_dev_pm_attach(dev, true);
+ ret = sdrv->probe(to_spi_device(dev));
if (ret)
- acpi_dev_pm_detach(&spi->dev, true);
+ acpi_dev_pm_detach(dev, true);
return ret;
}
@@ -259,11 +270,10 @@ static int spi_drv_probe(struct device *dev)
static int spi_drv_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
- struct spi_device *spi = to_spi_device(dev);
int ret;
- ret = sdrv->remove(spi);
- acpi_dev_pm_detach(&spi->dev, true);
+ ret = sdrv->remove(to_spi_device(dev));
+ acpi_dev_pm_detach(dev, true);
return ret;
}
@@ -370,6 +380,17 @@ static void spi_dev_set_name(struct spi_device *spi)
spi->chip_select);
}
+static int spi_dev_check(struct device *dev, void *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_device *new_spi = data;
+
+ if (spi->master == new_spi->master &&
+ spi->chip_select == new_spi->chip_select)
+ return -EBUSY;
+ return 0;
+}
+
/**
* spi_add_device - Add spi_device allocated with spi_alloc_device
* @spi: spi_device to register
@@ -384,7 +405,6 @@ int spi_add_device(struct spi_device *spi)
static DEFINE_MUTEX(spi_add_lock);
struct spi_master *master = spi->master;
struct device *dev = master->dev.parent;
- struct device *d;
int status;
/* Chipselects are numbered 0..max; validate. */
@@ -404,12 +424,10 @@ int spi_add_device(struct spi_device *spi)
*/
mutex_lock(&spi_add_lock);
- d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev));
- if (d != NULL) {
+ status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
+ if (status) {
dev_err(dev, "chipselect %d already in use\n",
spi->chip_select);
- put_device(d);
- status = -EBUSY;
goto done;
}
@@ -562,6 +580,169 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
spi->master->set_cs(spi, !enable);
}
+static int spi_map_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ const bool vmalloced_buf = is_vmalloc_addr(buf);
+ const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
+ const int sgs = DIV_ROUND_UP(len, desc_len);
+ struct page *vm_page;
+ void *sg_buf;
+ size_t min;
+ int i, ret;
+
+ ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < sgs; i++) {
+ min = min_t(size_t, len, desc_len);
+
+ if (vmalloced_buf) {
+ vm_page = vmalloc_to_page(buf);
+ if (!vm_page) {
+ sg_free_table(sgt);
+ return -ENOMEM;
+ }
+ sg_buf = page_address(vm_page) +
+ ((size_t)buf & ~PAGE_MASK);
+ } else {
+ sg_buf = buf;
+ }
+
+ sg_set_buf(&sgt->sgl[i], sg_buf, min);
+
+ buf += min;
+ len -= min;
+ }
+
+ ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
+ if (ret < 0) {
+ sg_free_table(sgt);
+ return ret;
+ }
+
+ sgt->nents = ret;
+
+ return 0;
+}
+
+static void spi_unmap_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ if (sgt->orig_nents) {
+ dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+ sg_free_table(sgt);
+ }
+}
+
+static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct device *tx_dev, *rx_dev;
+ struct spi_transfer *xfer;
+ void *tmp;
+ unsigned int max_tx, max_rx;
+ int ret;
+
+ if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+ max_tx = 0;
+ max_rx = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if ((master->flags & SPI_MASTER_MUST_TX) &&
+ !xfer->tx_buf)
+ max_tx = max(xfer->len, max_tx);
+ if ((master->flags & SPI_MASTER_MUST_RX) &&
+ !xfer->rx_buf)
+ max_rx = max(xfer->len, max_rx);
+ }
+
+ if (max_tx) {
+ tmp = krealloc(master->dummy_tx, max_tx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_tx = tmp;
+ memset(tmp, 0, max_tx);
+ }
+
+ if (max_rx) {
+ tmp = krealloc(master->dummy_rx, max_rx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_rx = tmp;
+ }
+
+ if (max_tx || max_rx) {
+ list_for_each_entry(xfer, &msg->transfers,
+ transfer_list) {
+ if (!xfer->tx_buf)
+ xfer->tx_buf = master->dummy_tx;
+ if (!xfer->rx_buf)
+ xfer->rx_buf = master->dummy_rx;
+ }
+ }
+ }
+
+ if (!master->can_dma)
+ return 0;
+
+ tx_dev = &master->dma_tx->dev->device;
+ rx_dev = &master->dma_rx->dev->device;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ if (xfer->tx_buf != NULL) {
+ ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
+ (void *)xfer->tx_buf, xfer->len,
+ DMA_TO_DEVICE);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (xfer->rx_buf != NULL) {
+ ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE);
+ if (ret != 0) {
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE);
+ return ret;
+ }
+ }
+ }
+
+ master->cur_msg_mapped = true;
+
+ return 0;
+}
+
+static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ struct device *tx_dev, *rx_dev;
+
+ if (!master->cur_msg_mapped || !master->can_dma)
+ return 0;
+
+ tx_dev = &master->dma_tx->dev->device;
+ rx_dev = &master->dma_rx->dev->device;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
@@ -573,9 +754,9 @@ static int spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct spi_transfer *xfer;
- bool cur_cs = true;
bool keep_cs = false;
int ret = 0;
+ int ms = 1;
spi_set_cs(msg->spi, true);
@@ -591,8 +772,19 @@ static int spi_transfer_one_message(struct spi_master *master,
goto out;
}
- if (ret > 0)
- wait_for_completion(&master->xfer_completion);
+ if (ret > 0) {
+ ret = 0;
+ ms = xfer->len * 8 * 1000 / xfer->speed_hz;
+ ms += 10; /* some tolerance */
+
+ ms = wait_for_completion_timeout(&master->xfer_completion,
+ msecs_to_jiffies(ms));
+ }
+
+ if (ms == 0) {
+ dev_err(&msg->spi->dev, "SPI transfer timed out\n");
+ msg->status = -ETIMEDOUT;
+ }
trace_spi_transfer_stop(msg, xfer);
@@ -607,8 +799,9 @@ static int spi_transfer_one_message(struct spi_master *master,
&msg->transfers)) {
keep_cs = true;
} else {
- cur_cs = !cur_cs;
- spi_set_cs(msg->spi, cur_cs);
+ spi_set_cs(msg->spi, false);
+ udelay(10);
+ spi_set_cs(msg->spi, true);
}
}
@@ -632,7 +825,7 @@ out:
*
* Called by SPI drivers using the core transfer_one_message()
* implementation to notify it that the current interrupt driven
- * transfer has finised and the next one may be scheduled.
+ * transfer has finished and the next one may be scheduled.
*/
void spi_finalize_current_transfer(struct spi_master *master)
{
@@ -666,6 +859,10 @@ static void spi_pump_messages(struct kthread_work *work)
}
master->busy = false;
spin_unlock_irqrestore(&master->queue_lock, flags);
+ kfree(master->dummy_rx);
+ master->dummy_rx = NULL;
+ kfree(master->dummy_tx);
+ master->dummy_tx = NULL;
if (master->unprepare_transfer_hardware &&
master->unprepare_transfer_hardware(master))
dev_err(&master->dev,
@@ -685,7 +882,7 @@ static void spi_pump_messages(struct kthread_work *work)
}
/* Extract head of queue */
master->cur_msg =
- list_entry(master->queue.next, struct spi_message, queue);
+ list_first_entry(&master->queue, struct spi_message, queue);
list_del_init(&master->cur_msg->queue);
if (master->busy)
@@ -732,6 +929,13 @@ static void spi_pump_messages(struct kthread_work *work)
master->cur_msg_prepared = true;
}
+ ret = spi_map_msg(master, master->cur_msg);
+ if (ret) {
+ master->cur_msg->status = ret;
+ spi_finalize_current_message(master);
+ return;
+ }
+
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
@@ -791,11 +995,8 @@ struct spi_message *spi_get_next_queued_message(struct spi_master *master)
/* get a pointer to the next message, if any */
spin_lock_irqsave(&master->queue_lock, flags);
- if (list_empty(&master->queue))
- next = NULL;
- else
- next = list_entry(master->queue.next,
- struct spi_message, queue);
+ next = list_first_entry_or_null(&master->queue, struct spi_message,
+ queue);
spin_unlock_irqrestore(&master->queue_lock, flags);
return next;
@@ -822,6 +1023,8 @@ void spi_finalize_current_message(struct spi_master *master)
queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
+ spi_unmap_msg(master, mesg);
+
if (master->cur_msg_prepared && master->unprepare_message) {
ret = master->unprepare_message(master, mesg);
if (ret) {
@@ -875,7 +1078,7 @@ static int spi_stop_queue(struct spi_master *master)
*/
while ((!list_empty(&master->queue) || master->busy) && limit--) {
spin_unlock_irqrestore(&master->queue_lock, flags);
- msleep(10);
+ usleep_range(10000, 11000);
spin_lock_irqsave(&master->queue_lock, flags);
}
@@ -1355,6 +1558,8 @@ int spi_register_master(struct spi_master *master)
mutex_init(&master->bus_lock_mutex);
master->bus_lock_flag = 0;
init_completion(&master->xfer_completion);
+ if (!master->max_dma_len)
+ master->max_dma_len = INT_MAX;
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
@@ -1580,6 +1785,9 @@ int spi_setup(struct spi_device *spi)
if (!spi->bits_per_word)
spi->bits_per_word = 8;
+ if (!spi->max_speed_hz)
+ spi->max_speed_hz = spi->master->max_speed_hz;
+
if (spi->master->setup)
status = spi->master->setup(spi);
@@ -1596,19 +1804,14 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
-static int __spi_async(struct spi_device *spi, struct spi_message *message)
+static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
struct spi_transfer *xfer;
-
- message->spi = spi;
-
- trace_spi_message_submit(message);
+ int w_size;
if (list_empty(&message->transfers))
return -EINVAL;
- if (!message->complete)
- return -EINVAL;
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
@@ -1639,12 +1842,13 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
message->frame_length += xfer->len;
if (!xfer->bits_per_word)
xfer->bits_per_word = spi->bits_per_word;
- if (!xfer->speed_hz) {
+
+ if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
- if (master->max_speed_hz &&
- xfer->speed_hz > master->max_speed_hz)
- xfer->speed_hz = master->max_speed_hz;
- }
+
+ if (master->max_speed_hz &&
+ xfer->speed_hz > master->max_speed_hz)
+ xfer->speed_hz = master->max_speed_hz;
if (master->bits_per_word_mask) {
/* Only 32 bits fit in the mask */
@@ -1655,21 +1859,32 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
return -EINVAL;
}
+ /*
+ * SPI transfer length should be multiple of SPI word size
+ * where SPI word size should be power-of-two multiple
+ */
+ if (xfer->bits_per_word <= 8)
+ w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ w_size = 2;
+ else
+ w_size = 4;
+
+ /* No partial transfers accepted */
+ if (xfer->len % w_size)
+ return -EINVAL;
+
if (xfer->speed_hz && master->min_speed_hz &&
xfer->speed_hz < master->min_speed_hz)
return -EINVAL;
- if (xfer->speed_hz && master->max_speed_hz &&
- xfer->speed_hz > master->max_speed_hz)
- return -EINVAL;
if (xfer->tx_buf && !xfer->tx_nbits)
xfer->tx_nbits = SPI_NBITS_SINGLE;
if (xfer->rx_buf && !xfer->rx_nbits)
xfer->rx_nbits = SPI_NBITS_SINGLE;
/* check transfer tx/rx_nbits:
- * 1. keep the value is not out of single, dual and quad
- * 2. keep tx/rx_nbits is contained by mode in spi_device
- * 3. if SPI_3WIRE, tx/rx_nbits should be in single
+ * 1. check the value matches one of single, dual and quad
+ * 2. check tx/rx_nbits match the mode in spi_device
*/
if (xfer->tx_buf) {
if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
@@ -1682,9 +1897,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
!(spi->mode & SPI_TX_QUAD))
return -EINVAL;
- if ((spi->mode & SPI_3WIRE) &&
- (xfer->tx_nbits != SPI_NBITS_SINGLE))
- return -EINVAL;
}
/* check transfer rx_nbits */
if (xfer->rx_buf) {
@@ -1698,13 +1910,22 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
!(spi->mode & SPI_RX_QUAD))
return -EINVAL;
- if ((spi->mode & SPI_3WIRE) &&
- (xfer->rx_nbits != SPI_NBITS_SINGLE))
- return -EINVAL;
}
}
message->status = -EINPROGRESS;
+
+ return 0;
+}
+
+static int __spi_async(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+
+ message->spi = spi;
+
+ trace_spi_message_submit(message);
+
return master->transfer(spi, message);
}
@@ -1743,6 +1964,10 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
int ret;
unsigned long flags;
+ ret = __spi_validate(spi, message);
+ if (ret != 0)
+ return ret;
+
spin_lock_irqsave(&master->bus_lock_spinlock, flags);
if (master->bus_lock_flag)
@@ -1791,6 +2016,10 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
int ret;
unsigned long flags;
+ ret = __spi_validate(spi, message);
+ if (ret != 0)
+ return ret;
+
spin_lock_irqsave(&master->bus_lock_spinlock, flags);
ret = __spi_async(spi, message);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d7c6e36021e8..e3bc23bb5883 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -73,7 +73,8 @@ static DECLARE_BITMAP(minors, N_SPI_MINORS);
*/
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
- | SPI_NO_CS | SPI_READY)
+ | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
+ | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)
struct spidev_data {
dev_t devt;
@@ -265,6 +266,8 @@ static int spidev_message(struct spidev_data *spidev,
buf += k_tmp->len;
k_tmp->cs_change = !!u_tmp->cs_change;
+ k_tmp->tx_nbits = u_tmp->tx_nbits;
+ k_tmp->rx_nbits = u_tmp->rx_nbits;
k_tmp->bits_per_word = u_tmp->bits_per_word;
k_tmp->delay_usecs = u_tmp->delay_usecs;
k_tmp->speed_hz = u_tmp->speed_hz;
@@ -359,6 +362,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = __put_user(spi->mode & SPI_MODE_MASK,
(__u8 __user *)arg);
break;
+ case SPI_IOC_RD_MODE32:
+ retval = __put_user(spi->mode & SPI_MODE_MASK,
+ (__u32 __user *)arg);
+ break;
case SPI_IOC_RD_LSB_FIRST:
retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
(__u8 __user *)arg);
@@ -372,9 +379,13 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* write requests */
case SPI_IOC_WR_MODE:
- retval = __get_user(tmp, (u8 __user *)arg);
+ case SPI_IOC_WR_MODE32:
+ if (cmd == SPI_IOC_WR_MODE)
+ retval = __get_user(tmp, (u8 __user *)arg);
+ else
+ retval = __get_user(tmp, (u32 __user *)arg);
if (retval == 0) {
- u8 save = spi->mode;
+ u32 save = spi->mode;
if (tmp & ~SPI_MODE_MASK) {
retval = -EINVAL;
@@ -382,18 +393,18 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
tmp |= spi->mode & ~SPI_MODE_MASK;
- spi->mode = (u8)tmp;
+ spi->mode = (u16)tmp;
retval = spi_setup(spi);
if (retval < 0)
spi->mode = save;
else
- dev_dbg(&spi->dev, "spi mode %02x\n", tmp);
+ dev_dbg(&spi->dev, "spi mode %x\n", tmp);
}
break;
case SPI_IOC_WR_LSB_FIRST:
retval = __get_user(tmp, (__u8 __user *)arg);
if (retval == 0) {
- u8 save = spi->mode;
+ u32 save = spi->mode;
if (tmp)
spi->mode |= SPI_LSB_FIRST;
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
new file mode 100644
index 000000000000..bf1295e19f89
--- /dev/null
+++ b/drivers/spmi/Kconfig
@@ -0,0 +1,27 @@
+#
+# SPMI driver configuration
+#
+menuconfig SPMI
+ tristate "SPMI support"
+ help
+ SPMI (System Power Management Interface) is a two-wire
+ serial interface between baseband and application processors
+ and Power Management Integrated Circuits (PMIC).
+
+if SPMI
+
+config SPMI_MSM_PMIC_ARB
+ tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)"
+ depends on ARM
+ depends on IRQ_DOMAIN
+ depends on ARCH_QCOM || COMPILE_TEST
+ default ARCH_QCOM
+ help
+ If you say yes to this option, support will be included for the
+ built-in SPMI PMIC Arbiter interface on Qualcomm MSM family
+ processors.
+
+ This is required for communicating with Qualcomm PMICs and
+ other devices that have the SPMI interface.
+
+endif
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
new file mode 100644
index 000000000000..fc75104a5aab
--- /dev/null
+++ b/drivers/spmi/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for kernel SPMI framework.
+#
+obj-$(CONFIG_SPMI) += spmi.o
+
+obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
new file mode 100644
index 000000000000..246e03a18c94
--- /dev/null
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -0,0 +1,778 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+
+/* PMIC Arbiter configuration registers */
+#define PMIC_ARB_VERSION 0x0000
+#define PMIC_ARB_INT_EN 0x0004
+
+/* PMIC Arbiter channel registers */
+#define PMIC_ARB_CMD(N) (0x0800 + (0x80 * (N)))
+#define PMIC_ARB_CONFIG(N) (0x0804 + (0x80 * (N)))
+#define PMIC_ARB_STATUS(N) (0x0808 + (0x80 * (N)))
+#define PMIC_ARB_WDATA0(N) (0x0810 + (0x80 * (N)))
+#define PMIC_ARB_WDATA1(N) (0x0814 + (0x80 * (N)))
+#define PMIC_ARB_RDATA0(N) (0x0818 + (0x80 * (N)))
+#define PMIC_ARB_RDATA1(N) (0x081C + (0x80 * (N)))
+
+/* Interrupt Controller */
+#define SPMI_PIC_OWNER_ACC_STATUS(M, N) (0x0000 + ((32 * (M)) + (4 * (N))))
+#define SPMI_PIC_ACC_ENABLE(N) (0x0200 + (4 * (N)))
+#define SPMI_PIC_IRQ_STATUS(N) (0x0600 + (4 * (N)))
+#define SPMI_PIC_IRQ_CLEAR(N) (0x0A00 + (4 * (N)))
+
+/* Mapping Table */
+#define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N)))
+#define SPMI_MAPPING_BIT_INDEX(X) (((X) >> 18) & 0xF)
+#define SPMI_MAPPING_BIT_IS_0_FLAG(X) (((X) >> 17) & 0x1)
+#define SPMI_MAPPING_BIT_IS_0_RESULT(X) (((X) >> 9) & 0xFF)
+#define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1)
+#define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF)
+
+#define SPMI_MAPPING_TABLE_LEN 255
+#define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */
+
+/* Ownership Table */
+#define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N)))
+#define SPMI_OWNERSHIP_PERIPH2OWNER(X) ((X) & 0x7)
+
+/* Channel Status fields */
+enum pmic_arb_chnl_status {
+ PMIC_ARB_STATUS_DONE = (1 << 0),
+ PMIC_ARB_STATUS_FAILURE = (1 << 1),
+ PMIC_ARB_STATUS_DENIED = (1 << 2),
+ PMIC_ARB_STATUS_DROPPED = (1 << 3),
+};
+
+/* Command register fields */
+#define PMIC_ARB_CMD_MAX_BYTE_COUNT 8
+
+/* Command Opcodes */
+enum pmic_arb_cmd_op_code {
+ PMIC_ARB_OP_EXT_WRITEL = 0,
+ PMIC_ARB_OP_EXT_READL = 1,
+ PMIC_ARB_OP_EXT_WRITE = 2,
+ PMIC_ARB_OP_RESET = 3,
+ PMIC_ARB_OP_SLEEP = 4,
+ PMIC_ARB_OP_SHUTDOWN = 5,
+ PMIC_ARB_OP_WAKEUP = 6,
+ PMIC_ARB_OP_AUTHENTICATE = 7,
+ PMIC_ARB_OP_MSTR_READ = 8,
+ PMIC_ARB_OP_MSTR_WRITE = 9,
+ PMIC_ARB_OP_EXT_READ = 13,
+ PMIC_ARB_OP_WRITE = 14,
+ PMIC_ARB_OP_READ = 15,
+ PMIC_ARB_OP_ZERO_WRITE = 16,
+};
+
+/* Maximum number of support PMIC peripherals */
+#define PMIC_ARB_MAX_PERIPHS 256
+#define PMIC_ARB_PERIPH_ID_VALID (1 << 15)
+#define PMIC_ARB_TIMEOUT_US 100
+#define PMIC_ARB_MAX_TRANS_BYTES (8)
+
+#define PMIC_ARB_APID_MASK 0xFF
+#define PMIC_ARB_PPID_MASK 0xFFF
+
+/* interrupt enable bit */
+#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
+
+/**
+ * spmi_pmic_arb_dev - SPMI PMIC Arbiter object
+ *
+ * @base: address of the PMIC Arbiter core registers.
+ * @intr: address of the SPMI interrupt control registers.
+ * @cnfg: address of the PMIC Arbiter configuration registers.
+ * @lock: lock to synchronize accesses.
+ * @channel: which channel to use for accesses.
+ * @irq: PMIC ARB interrupt.
+ * @ee: the current Execution Environment
+ * @min_apid: minimum APID (used for bounding IRQ search)
+ * @max_apid: maximum APID
+ * @mapping_table: in-memory copy of PPID -> APID mapping table.
+ * @domain: irq domain object for PMIC IRQ domain
+ * @spmic: SPMI controller object
+ * @apid_to_ppid: cached mapping from APID to PPID
+ */
+struct spmi_pmic_arb_dev {
+ void __iomem *base;
+ void __iomem *intr;
+ void __iomem *cnfg;
+ raw_spinlock_t lock;
+ u8 channel;
+ int irq;
+ u8 ee;
+ u8 min_apid;
+ u8 max_apid;
+ u32 mapping_table[SPMI_MAPPING_TABLE_LEN];
+ struct irq_domain *domain;
+ struct spmi_controller *spmic;
+ u16 apid_to_ppid[256];
+};
+
+static inline u32 pmic_arb_base_read(struct spmi_pmic_arb_dev *dev, u32 offset)
+{
+ return readl_relaxed(dev->base + offset);
+}
+
+static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev,
+ u32 offset, u32 val)
+{
+ writel_relaxed(val, dev->base + offset);
+}
+
+/**
+ * pa_read_data: reads pmic-arb's register and copy 1..4 bytes to buf
+ * @bc: byte count -1. range: 0..3
+ * @reg: register's address
+ * @buf: output parameter, length must be bc + 1
+ */
+static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
+{
+ u32 data = pmic_arb_base_read(dev, reg);
+ memcpy(buf, &data, (bc & 3) + 1);
+}
+
+/**
+ * pa_write_data: write 1..4 bytes from buf to pmic-arb's register
+ * @bc: byte-count -1. range: 0..3.
+ * @reg: register's address.
+ * @buf: buffer to write. length must be bc + 1.
+ */
+static void
+pa_write_data(struct spmi_pmic_arb_dev *dev, const u8 *buf, u32 reg, u8 bc)
+{
+ u32 data = 0;
+ memcpy(&data, buf, (bc & 3) + 1);
+ pmic_arb_base_write(dev, reg, data);
+}
+
+static int pmic_arb_wait_for_done(struct spmi_controller *ctrl)
+{
+ struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
+ u32 status = 0;
+ u32 timeout = PMIC_ARB_TIMEOUT_US;
+ u32 offset = PMIC_ARB_STATUS(dev->channel);
+
+ while (timeout--) {
+ status = pmic_arb_base_read(dev, offset);
+
+ if (status & PMIC_ARB_STATUS_DONE) {
+ if (status & PMIC_ARB_STATUS_DENIED) {
+ dev_err(&ctrl->dev,
+ "%s: transaction denied (0x%x)\n",
+ __func__, status);
+ return -EPERM;
+ }
+
+ if (status & PMIC_ARB_STATUS_FAILURE) {
+ dev_err(&ctrl->dev,
+ "%s: transaction failed (0x%x)\n",
+ __func__, status);
+ return -EIO;
+ }
+
+ if (status & PMIC_ARB_STATUS_DROPPED) {
+ dev_err(&ctrl->dev,
+ "%s: transaction dropped (0x%x)\n",
+ __func__, status);
+ return -EIO;
+ }
+
+ return 0;
+ }
+ udelay(1);
+ }
+
+ dev_err(&ctrl->dev,
+ "%s: timeout, status 0x%x\n",
+ __func__, status);
+ return -ETIMEDOUT;
+}
+
+/* Non-data command */
+static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
+{
+ struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ u32 cmd;
+ int rc;
+
+ /* Check for valid non-data command */
+ if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
+ return -EINVAL;
+
+ cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
+
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
+ rc = pmic_arb_wait_for_done(ctrl);
+ raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+
+ return rc;
+}
+
+static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ u8 bc = len - 1;
+ u32 cmd;
+ int rc;
+
+ if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
+ dev_err(&ctrl->dev,
+ "pmic-arb supports 1..%d bytes per trans, but %d requested",
+ PMIC_ARB_MAX_TRANS_BYTES, len);
+ return -EINVAL;
+ }
+
+ /* Check the opcode */
+ if (opc >= 0x60 && opc <= 0x7F)
+ opc = PMIC_ARB_OP_READ;
+ else if (opc >= 0x20 && opc <= 0x2F)
+ opc = PMIC_ARB_OP_EXT_READ;
+ else if (opc >= 0x38 && opc <= 0x3F)
+ opc = PMIC_ARB_OP_EXT_READL;
+ else
+ return -EINVAL;
+
+ cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
+
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
+ rc = pmic_arb_wait_for_done(ctrl);
+ if (rc)
+ goto done;
+
+ pa_read_data(pmic_arb, buf, PMIC_ARB_RDATA0(pmic_arb->channel),
+ min_t(u8, bc, 3));
+
+ if (bc > 3)
+ pa_read_data(pmic_arb, buf + 4,
+ PMIC_ARB_RDATA1(pmic_arb->channel), bc - 4);
+
+done:
+ raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+ return rc;
+}
+
+static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, const u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ u8 bc = len - 1;
+ u32 cmd;
+ int rc;
+
+ if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
+ dev_err(&ctrl->dev,
+ "pmic-arb supports 1..%d bytes per trans, but:%d requested",
+ PMIC_ARB_MAX_TRANS_BYTES, len);
+ return -EINVAL;
+ }
+
+ /* Check the opcode */
+ if (opc >= 0x40 && opc <= 0x5F)
+ opc = PMIC_ARB_OP_WRITE;
+ else if (opc >= 0x00 && opc <= 0x0F)
+ opc = PMIC_ARB_OP_EXT_WRITE;
+ else if (opc >= 0x30 && opc <= 0x37)
+ opc = PMIC_ARB_OP_EXT_WRITEL;
+ else if (opc >= 0x80 && opc <= 0xFF)
+ opc = PMIC_ARB_OP_ZERO_WRITE;
+ else
+ return -EINVAL;
+
+ cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
+
+ /* Write data to FIFOs */
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ pa_write_data(pmic_arb, buf, PMIC_ARB_WDATA0(pmic_arb->channel)
+ , min_t(u8, bc, 3));
+ if (bc > 3)
+ pa_write_data(pmic_arb, buf + 4,
+ PMIC_ARB_WDATA1(pmic_arb->channel), bc - 4);
+
+ /* Start the transaction */
+ pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
+ rc = pmic_arb_wait_for_done(ctrl);
+ raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+
+ return rc;
+}
+
+enum qpnpint_regs {
+ QPNPINT_REG_RT_STS = 0x10,
+ QPNPINT_REG_SET_TYPE = 0x11,
+ QPNPINT_REG_POLARITY_HIGH = 0x12,
+ QPNPINT_REG_POLARITY_LOW = 0x13,
+ QPNPINT_REG_LATCHED_CLR = 0x14,
+ QPNPINT_REG_EN_SET = 0x15,
+ QPNPINT_REG_EN_CLR = 0x16,
+ QPNPINT_REG_LATCHED_STS = 0x18,
+};
+
+struct spmi_pmic_arb_qpnpint_type {
+ u8 type; /* 1 -> edge */
+ u8 polarity_high;
+ u8 polarity_low;
+} __packed;
+
+/* Simplified accessor functions for irqchip callbacks */
+static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
+ size_t len)
+{
+ struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
+ u8 sid = d->hwirq >> 24;
+ u8 per = d->hwirq >> 16;
+
+ if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+ (per << 8) + reg, buf, len))
+ dev_err_ratelimited(&pa->spmic->dev,
+ "failed irqchip transaction on %x\n",
+ d->irq);
+}
+
+static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
+{
+ struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
+ u8 sid = d->hwirq >> 24;
+ u8 per = d->hwirq >> 16;
+
+ if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
+ (per << 8) + reg, buf, len))
+ dev_err_ratelimited(&pa->spmic->dev,
+ "failed irqchip transaction on %x\n",
+ d->irq);
+}
+
+static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
+{
+ unsigned int irq;
+ u32 status;
+ int id;
+
+ status = readl_relaxed(pa->intr + SPMI_PIC_IRQ_STATUS(apid));
+ while (status) {
+ id = ffs(status) - 1;
+ status &= ~(1 << id);
+ irq = irq_find_mapping(pa->domain,
+ pa->apid_to_ppid[apid] << 16
+ | id << 8
+ | apid);
+ generic_handle_irq(irq);
+ }
+}
+
+static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct spmi_pmic_arb_dev *pa = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+ void __iomem *intr = pa->intr;
+ int first = pa->min_apid >> 5;
+ int last = pa->max_apid >> 5;
+ u32 status;
+ int i, id;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = first; i <= last; ++i) {
+ status = readl_relaxed(intr +
+ SPMI_PIC_OWNER_ACC_STATUS(pa->ee, i));
+ while (status) {
+ id = ffs(status) - 1;
+ status &= ~(1 << id);
+ periph_interrupt(pa, id + i * 32);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void qpnpint_irq_ack(struct irq_data *d)
+{
+ struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
+ u8 irq = d->hwirq >> 8;
+ u8 apid = d->hwirq;
+ unsigned long flags;
+ u8 data;
+
+ raw_spin_lock_irqsave(&pa->lock, flags);
+ writel_relaxed(1 << irq, pa->intr + SPMI_PIC_IRQ_CLEAR(apid));
+ raw_spin_unlock_irqrestore(&pa->lock, flags);
+
+ data = 1 << irq;
+ qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
+}
+
+static void qpnpint_irq_mask(struct irq_data *d)
+{
+ struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
+ u8 irq = d->hwirq >> 8;
+ u8 apid = d->hwirq;
+ unsigned long flags;
+ u32 status;
+ u8 data;
+
+ raw_spin_lock_irqsave(&pa->lock, flags);
+ status = readl_relaxed(pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ if (status & SPMI_PIC_ACC_ENABLE_BIT) {
+ status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
+ writel_relaxed(status, pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ }
+ raw_spin_unlock_irqrestore(&pa->lock, flags);
+
+ data = 1 << irq;
+ qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
+}
+
+static void qpnpint_irq_unmask(struct irq_data *d)
+{
+ struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
+ u8 irq = d->hwirq >> 8;
+ u8 apid = d->hwirq;
+ unsigned long flags;
+ u32 status;
+ u8 data;
+
+ raw_spin_lock_irqsave(&pa->lock, flags);
+ status = readl_relaxed(pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
+ writel_relaxed(status | SPMI_PIC_ACC_ENABLE_BIT,
+ pa->intr + SPMI_PIC_ACC_ENABLE(apid));
+ }
+ raw_spin_unlock_irqrestore(&pa->lock, flags);
+
+ data = 1 << irq;
+ qpnpint_spmi_write(d, QPNPINT_REG_EN_SET, &data, 1);
+}
+
+static void qpnpint_irq_enable(struct irq_data *d)
+{
+ u8 irq = d->hwirq >> 8;
+ u8 data;
+
+ qpnpint_irq_unmask(d);
+
+ data = 1 << irq;
+ qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
+}
+
+static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct spmi_pmic_arb_qpnpint_type type;
+ u8 irq = d->hwirq >> 8;
+
+ qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+
+ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ type.type |= 1 << irq;
+ if (flow_type & IRQF_TRIGGER_RISING)
+ type.polarity_high |= 1 << irq;
+ if (flow_type & IRQF_TRIGGER_FALLING)
+ type.polarity_low |= 1 << irq;
+ } else {
+ if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
+ (flow_type & (IRQF_TRIGGER_LOW)))
+ return -EINVAL;
+
+ type.type &= ~(1 << irq); /* level trig */
+ if (flow_type & IRQF_TRIGGER_HIGH)
+ type.polarity_high |= 1 << irq;
+ else
+ type.polarity_low |= 1 << irq;
+ }
+
+ qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+ return 0;
+}
+
+static struct irq_chip pmic_arb_irqchip = {
+ .name = "pmic_arb",
+ .irq_enable = qpnpint_irq_enable,
+ .irq_ack = qpnpint_irq_ack,
+ .irq_mask = qpnpint_irq_mask,
+ .irq_unmask = qpnpint_irq_unmask,
+ .irq_set_type = qpnpint_irq_set_type,
+ .flags = IRQCHIP_MASK_ON_SUSPEND
+ | IRQCHIP_SKIP_SET_WAKE,
+};
+
+struct spmi_pmic_arb_irq_spec {
+ unsigned slave:4;
+ unsigned per:8;
+ unsigned irq:3;
+};
+
+static int search_mapping_table(struct spmi_pmic_arb_dev *pa,
+ struct spmi_pmic_arb_irq_spec *spec,
+ u8 *apid)
+{
+ u16 ppid = spec->slave << 8 | spec->per;
+ u32 *mapping_table = pa->mapping_table;
+ int index = 0, i;
+ u32 data;
+
+ for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
+ data = mapping_table[index];
+
+ if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
+ if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
+ index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+ } else {
+ *apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+ return 0;
+ }
+ } else {
+ if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
+ index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+ } else {
+ *apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+ return 0;
+ }
+ }
+ }
+
+ return -ENODEV;
+}
+
+static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ struct spmi_pmic_arb_dev *pa = d->host_data;
+ struct spmi_pmic_arb_irq_spec spec;
+ int err;
+ u8 apid;
+
+ dev_dbg(&pa->spmic->dev,
+ "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
+ intspec[0], intspec[1], intspec[2]);
+
+ if (d->of_node != controller)
+ return -EINVAL;
+ if (intsize != 4)
+ return -EINVAL;
+ if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
+ return -EINVAL;
+
+ spec.slave = intspec[0];
+ spec.per = intspec[1];
+ spec.irq = intspec[2];
+
+ err = search_mapping_table(pa, &spec, &apid);
+ if (err)
+ return err;
+
+ pa->apid_to_ppid[apid] = spec.slave << 8 | spec.per;
+
+ /* Keep track of {max,min}_apid for bounding search during interrupt */
+ if (apid > pa->max_apid)
+ pa->max_apid = apid;
+ if (apid < pa->min_apid)
+ pa->min_apid = apid;
+
+ *out_hwirq = spec.slave << 24
+ | spec.per << 16
+ | spec.irq << 8
+ | apid;
+ *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
+
+ dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
+
+ return 0;
+}
+
+static int qpnpint_irq_domain_map(struct irq_domain *d,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct spmi_pmic_arb_dev *pa = d->host_data;
+
+ dev_dbg(&pa->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq);
+
+ irq_set_chip_and_handler(virq, &pmic_arb_irqchip, handle_level_irq);
+ irq_set_chip_data(virq, d->host_data);
+ irq_set_noprobe(virq);
+ return 0;
+}
+
+static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
+ .map = qpnpint_irq_domain_map,
+ .xlate = qpnpint_irq_domain_dt_translate,
+};
+
+static int spmi_pmic_arb_probe(struct platform_device *pdev)
+{
+ struct spmi_pmic_arb_dev *pa;
+ struct spmi_controller *ctrl;
+ struct resource *res;
+ u32 channel, ee;
+ int err, i;
+
+ ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
+ if (!ctrl)
+ return -ENOMEM;
+
+ pa = spmi_controller_get_drvdata(ctrl);
+ pa->spmic = ctrl;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+ pa->base = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pa->base)) {
+ err = PTR_ERR(pa->base);
+ goto err_put_ctrl;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
+ pa->intr = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pa->intr)) {
+ err = PTR_ERR(pa->intr);
+ goto err_put_ctrl;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
+ pa->cnfg = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pa->cnfg)) {
+ err = PTR_ERR(pa->cnfg);
+ goto err_put_ctrl;
+ }
+
+ pa->irq = platform_get_irq_byname(pdev, "periph_irq");
+ if (pa->irq < 0) {
+ err = pa->irq;
+ goto err_put_ctrl;
+ }
+
+ err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel);
+ if (err) {
+ dev_err(&pdev->dev, "channel unspecified.\n");
+ goto err_put_ctrl;
+ }
+
+ if (channel > 5) {
+ dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
+ channel);
+ goto err_put_ctrl;
+ }
+
+ pa->channel = channel;
+
+ err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee);
+ if (err) {
+ dev_err(&pdev->dev, "EE unspecified.\n");
+ goto err_put_ctrl;
+ }
+
+ if (ee > 5) {
+ dev_err(&pdev->dev, "invalid EE (%u) specified\n", ee);
+ err = -EINVAL;
+ goto err_put_ctrl;
+ }
+
+ pa->ee = ee;
+
+ for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i)
+ pa->mapping_table[i] = readl_relaxed(
+ pa->cnfg + SPMI_MAPPING_TABLE_REG(i));
+
+ /* Initialize max_apid/min_apid to the opposite bounds, during
+ * the irq domain translation, we are sure to update these */
+ pa->max_apid = 0;
+ pa->min_apid = PMIC_ARB_MAX_PERIPHS - 1;
+
+ platform_set_drvdata(pdev, ctrl);
+ raw_spin_lock_init(&pa->lock);
+
+ ctrl->cmd = pmic_arb_cmd;
+ ctrl->read_cmd = pmic_arb_read_cmd;
+ ctrl->write_cmd = pmic_arb_write_cmd;
+
+ dev_dbg(&pdev->dev, "adding irq domain\n");
+ pa->domain = irq_domain_add_tree(pdev->dev.of_node,
+ &pmic_arb_irq_domain_ops, pa);
+ if (!pa->domain) {
+ dev_err(&pdev->dev, "unable to create irq_domain\n");
+ err = -ENOMEM;
+ goto err_put_ctrl;
+ }
+
+ irq_set_handler_data(pa->irq, pa);
+ irq_set_chained_handler(pa->irq, pmic_arb_chained_irq);
+
+ err = spmi_controller_add(ctrl);
+ if (err)
+ goto err_domain_remove;
+
+ dev_dbg(&ctrl->dev, "PMIC Arb Version 0x%x\n",
+ pmic_arb_base_read(pa, PMIC_ARB_VERSION));
+
+ return 0;
+
+err_domain_remove:
+ irq_set_chained_handler(pa->irq, NULL);
+ irq_set_handler_data(pa->irq, NULL);
+ irq_domain_remove(pa->domain);
+err_put_ctrl:
+ spmi_controller_put(ctrl);
+ return err;
+}
+
+static int spmi_pmic_arb_remove(struct platform_device *pdev)
+{
+ struct spmi_controller *ctrl = platform_get_drvdata(pdev);
+ struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl);
+ spmi_controller_remove(ctrl);
+ irq_set_chained_handler(pa->irq, NULL);
+ irq_set_handler_data(pa->irq, NULL);
+ irq_domain_remove(pa->domain);
+ spmi_controller_put(ctrl);
+ return 0;
+}
+
+static const struct of_device_id spmi_pmic_arb_match_table[] = {
+ { .compatible = "qcom,spmi-pmic-arb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, spmi_pmic_arb_match_table);
+
+static struct platform_driver spmi_pmic_arb_driver = {
+ .probe = spmi_pmic_arb_probe,
+ .remove = spmi_pmic_arb_remove,
+ .driver = {
+ .name = "spmi_pmic_arb",
+ .owner = THIS_MODULE,
+ .of_match_table = spmi_pmic_arb_match_table,
+ },
+};
+module_platform_driver(spmi_pmic_arb_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spmi_pmic_arb");
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
new file mode 100644
index 000000000000..3b5780710d50
--- /dev/null
+++ b/drivers/spmi/spmi.c
@@ -0,0 +1,574 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spmi.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <dt-bindings/spmi/spmi.h>
+
+static DEFINE_IDA(ctrl_ida);
+
+static void spmi_dev_release(struct device *dev)
+{
+ struct spmi_device *sdev = to_spmi_device(dev);
+ kfree(sdev);
+}
+
+static const struct device_type spmi_dev_type = {
+ .release = spmi_dev_release,
+};
+
+static void spmi_ctrl_release(struct device *dev)
+{
+ struct spmi_controller *ctrl = to_spmi_controller(dev);
+ ida_simple_remove(&ctrl_ida, ctrl->nr);
+ kfree(ctrl);
+}
+
+static const struct device_type spmi_ctrl_type = {
+ .release = spmi_ctrl_release,
+};
+
+static int spmi_device_match(struct device *dev, struct device_driver *drv)
+{
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (drv->name)
+ return strncmp(dev_name(dev), drv->name,
+ SPMI_NAME_SIZE) == 0;
+
+ return 0;
+}
+
+/**
+ * spmi_device_add() - add a device previously constructed via spmi_device_alloc()
+ * @sdev: spmi_device to be added
+ */
+int spmi_device_add(struct spmi_device *sdev)
+{
+ struct spmi_controller *ctrl = sdev->ctrl;
+ int err;
+
+ dev_set_name(&sdev->dev, "%d-%02x", ctrl->nr, sdev->usid);
+
+ err = device_add(&sdev->dev);
+ if (err < 0) {
+ dev_err(&sdev->dev, "Can't add %s, status %d\n",
+ dev_name(&sdev->dev), err);
+ goto err_device_add;
+ }
+
+ dev_dbg(&sdev->dev, "device %s registered\n", dev_name(&sdev->dev));
+
+err_device_add:
+ return err;
+}
+EXPORT_SYMBOL_GPL(spmi_device_add);
+
+/**
+ * spmi_device_remove(): remove an SPMI device
+ * @sdev: spmi_device to be removed
+ */
+void spmi_device_remove(struct spmi_device *sdev)
+{
+ device_unregister(&sdev->dev);
+}
+EXPORT_SYMBOL_GPL(spmi_device_remove);
+
+static inline int
+spmi_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid)
+{
+ if (!ctrl || !ctrl->cmd || ctrl->dev.type != &spmi_ctrl_type)
+ return -EINVAL;
+
+ return ctrl->cmd(ctrl, opcode, sid);
+}
+
+static inline int spmi_read_cmd(struct spmi_controller *ctrl, u8 opcode,
+ u8 sid, u16 addr, u8 *buf, size_t len)
+{
+ if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type)
+ return -EINVAL;
+
+ return ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len);
+}
+
+static inline int spmi_write_cmd(struct spmi_controller *ctrl, u8 opcode,
+ u8 sid, u16 addr, const u8 *buf, size_t len)
+{
+ if (!ctrl || !ctrl->write_cmd || ctrl->dev.type != &spmi_ctrl_type)
+ return -EINVAL;
+
+ return ctrl->write_cmd(ctrl, opcode, sid, addr, buf, len);
+}
+
+/**
+ * spmi_register_read() - register read
+ * @sdev: SPMI device.
+ * @addr: slave register address (5-bit address).
+ * @buf: buffer to be populated with data from the Slave.
+ *
+ * Reads 1 byte of data from a Slave device register.
+ */
+int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf)
+{
+ /* 5-bit register address */
+ if (addr > 0x1F)
+ return -EINVAL;
+
+ return spmi_read_cmd(sdev->ctrl, SPMI_CMD_READ, sdev->usid, addr,
+ buf, 1);
+}
+EXPORT_SYMBOL_GPL(spmi_register_read);
+
+/**
+ * spmi_ext_register_read() - extended register read
+ * @sdev: SPMI device.
+ * @addr: slave register address (8-bit address).
+ * @buf: buffer to be populated with data from the Slave.
+ * @len: the request number of bytes to read (up to 16 bytes).
+ *
+ * Reads up to 16 bytes of data from the extended register space on a
+ * Slave device.
+ */
+int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
+ size_t len)
+{
+ /* 8-bit register address, up to 16 bytes */
+ if (len == 0 || len > 16)
+ return -EINVAL;
+
+ return spmi_read_cmd(sdev->ctrl, SPMI_CMD_EXT_READ, sdev->usid, addr,
+ buf, len);
+}
+EXPORT_SYMBOL_GPL(spmi_ext_register_read);
+
+/**
+ * spmi_ext_register_readl() - extended register read long
+ * @sdev: SPMI device.
+ * @addr: slave register address (16-bit address).
+ * @buf: buffer to be populated with data from the Slave.
+ * @len: the request number of bytes to read (up to 8 bytes).
+ *
+ * Reads up to 8 bytes of data from the extended register space on a
+ * Slave device using 16-bit address.
+ */
+int spmi_ext_register_readl(struct spmi_device *sdev, u16 addr, u8 *buf,
+ size_t len)
+{
+ /* 16-bit register address, up to 8 bytes */
+ if (len == 0 || len > 8)
+ return -EINVAL;
+
+ return spmi_read_cmd(sdev->ctrl, SPMI_CMD_EXT_READL, sdev->usid, addr,
+ buf, len);
+}
+EXPORT_SYMBOL_GPL(spmi_ext_register_readl);
+
+/**
+ * spmi_register_write() - register write
+ * @sdev: SPMI device
+ * @addr: slave register address (5-bit address).
+ * @data: buffer containing the data to be transferred to the Slave.
+ *
+ * Writes 1 byte of data to a Slave device register.
+ */
+int spmi_register_write(struct spmi_device *sdev, u8 addr, u8 data)
+{
+ /* 5-bit register address */
+ if (addr > 0x1F)
+ return -EINVAL;
+
+ return spmi_write_cmd(sdev->ctrl, SPMI_CMD_WRITE, sdev->usid, addr,
+ &data, 1);
+}
+EXPORT_SYMBOL_GPL(spmi_register_write);
+
+/**
+ * spmi_register_zero_write() - register zero write
+ * @sdev: SPMI device.
+ * @data: the data to be written to register 0 (7-bits).
+ *
+ * Writes data to register 0 of the Slave device.
+ */
+int spmi_register_zero_write(struct spmi_device *sdev, u8 data)
+{
+ return spmi_write_cmd(sdev->ctrl, SPMI_CMD_ZERO_WRITE, sdev->usid, 0,
+ &data, 1);
+}
+EXPORT_SYMBOL_GPL(spmi_register_zero_write);
+
+/**
+ * spmi_ext_register_write() - extended register write
+ * @sdev: SPMI device.
+ * @addr: slave register address (8-bit address).
+ * @buf: buffer containing the data to be transferred to the Slave.
+ * @len: the request number of bytes to read (up to 16 bytes).
+ *
+ * Writes up to 16 bytes of data to the extended register space of a
+ * Slave device.
+ */
+int spmi_ext_register_write(struct spmi_device *sdev, u8 addr, const u8 *buf,
+ size_t len)
+{
+ /* 8-bit register address, up to 16 bytes */
+ if (len == 0 || len > 16)
+ return -EINVAL;
+
+ return spmi_write_cmd(sdev->ctrl, SPMI_CMD_EXT_WRITE, sdev->usid, addr,
+ buf, len);
+}
+EXPORT_SYMBOL_GPL(spmi_ext_register_write);
+
+/**
+ * spmi_ext_register_writel() - extended register write long
+ * @sdev: SPMI device.
+ * @addr: slave register address (16-bit address).
+ * @buf: buffer containing the data to be transferred to the Slave.
+ * @len: the request number of bytes to read (up to 8 bytes).
+ *
+ * Writes up to 8 bytes of data to the extended register space of a
+ * Slave device using 16-bit address.
+ */
+int spmi_ext_register_writel(struct spmi_device *sdev, u16 addr, const u8 *buf,
+ size_t len)
+{
+ /* 4-bit Slave Identifier, 16-bit register address, up to 8 bytes */
+ if (len == 0 || len > 8)
+ return -EINVAL;
+
+ return spmi_write_cmd(sdev->ctrl, SPMI_CMD_EXT_WRITEL, sdev->usid,
+ addr, buf, len);
+}
+EXPORT_SYMBOL_GPL(spmi_ext_register_writel);
+
+/**
+ * spmi_command_reset() - sends RESET command to the specified slave
+ * @sdev: SPMI device.
+ *
+ * The Reset command initializes the Slave and forces all registers to
+ * their reset values. The Slave shall enter the STARTUP state after
+ * receiving a Reset command.
+ */
+int spmi_command_reset(struct spmi_device *sdev)
+{
+ return spmi_cmd(sdev->ctrl, SPMI_CMD_RESET, sdev->usid);
+}
+EXPORT_SYMBOL_GPL(spmi_command_reset);
+
+/**
+ * spmi_command_sleep() - sends SLEEP command to the specified SPMI device
+ * @sdev: SPMI device.
+ *
+ * The Sleep command causes the Slave to enter the user defined SLEEP state.
+ */
+int spmi_command_sleep(struct spmi_device *sdev)
+{
+ return spmi_cmd(sdev->ctrl, SPMI_CMD_SLEEP, sdev->usid);
+}
+EXPORT_SYMBOL_GPL(spmi_command_sleep);
+
+/**
+ * spmi_command_wakeup() - sends WAKEUP command to the specified SPMI device
+ * @sdev: SPMI device.
+ *
+ * The Wakeup command causes the Slave to move from the SLEEP state to
+ * the ACTIVE state.
+ */
+int spmi_command_wakeup(struct spmi_device *sdev)
+{
+ return spmi_cmd(sdev->ctrl, SPMI_CMD_WAKEUP, sdev->usid);
+}
+EXPORT_SYMBOL_GPL(spmi_command_wakeup);
+
+/**
+ * spmi_command_shutdown() - sends SHUTDOWN command to the specified SPMI device
+ * @sdev: SPMI device.
+ *
+ * The Shutdown command causes the Slave to enter the SHUTDOWN state.
+ */
+int spmi_command_shutdown(struct spmi_device *sdev)
+{
+ return spmi_cmd(sdev->ctrl, SPMI_CMD_SHUTDOWN, sdev->usid);
+}
+EXPORT_SYMBOL_GPL(spmi_command_shutdown);
+
+static int spmi_drv_probe(struct device *dev)
+{
+ const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
+ struct spmi_device *sdev = to_spmi_device(dev);
+ int err;
+
+ /* Ensure the slave is in ACTIVE state */
+ err = spmi_command_wakeup(sdev);
+ if (err)
+ goto fail_wakeup;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ err = sdrv->probe(sdev);
+ if (err)
+ goto fail_probe;
+
+ return 0;
+
+fail_probe:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+fail_wakeup:
+ return err;
+}
+
+static int spmi_drv_remove(struct device *dev)
+{
+ const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
+
+ pm_runtime_get_sync(dev);
+ sdrv->remove(to_spmi_device(dev));
+ pm_runtime_put_noidle(dev);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+ return 0;
+}
+
+static struct bus_type spmi_bus_type = {
+ .name = "spmi",
+ .match = spmi_device_match,
+ .probe = spmi_drv_probe,
+ .remove = spmi_drv_remove,
+};
+
+/**
+ * spmi_controller_alloc() - Allocate a new SPMI device
+ * @ctrl: associated controller
+ *
+ * Caller is responsible for either calling spmi_device_add() to add the
+ * newly allocated controller, or calling spmi_device_put() to discard it.
+ */
+struct spmi_device *spmi_device_alloc(struct spmi_controller *ctrl)
+{
+ struct spmi_device *sdev;
+
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return NULL;
+
+ sdev->ctrl = ctrl;
+ device_initialize(&sdev->dev);
+ sdev->dev.parent = &ctrl->dev;
+ sdev->dev.bus = &spmi_bus_type;
+ sdev->dev.type = &spmi_dev_type;
+ return sdev;
+}
+EXPORT_SYMBOL_GPL(spmi_device_alloc);
+
+/**
+ * spmi_controller_alloc() - Allocate a new SPMI controller
+ * @parent: parent device
+ * @size: size of private data
+ *
+ * Caller is responsible for either calling spmi_controller_add() to add the
+ * newly allocated controller, or calling spmi_controller_put() to discard it.
+ * The allocated private data region may be accessed via
+ * spmi_controller_get_drvdata()
+ */
+struct spmi_controller *spmi_controller_alloc(struct device *parent,
+ size_t size)
+{
+ struct spmi_controller *ctrl;
+ int id;
+
+ if (WARN_ON(!parent))
+ return NULL;
+
+ ctrl = kzalloc(sizeof(*ctrl) + size, GFP_KERNEL);
+ if (!ctrl)
+ return NULL;
+
+ device_initialize(&ctrl->dev);
+ ctrl->dev.type = &spmi_ctrl_type;
+ ctrl->dev.bus = &spmi_bus_type;
+ ctrl->dev.parent = parent;
+ ctrl->dev.of_node = parent->of_node;
+ spmi_controller_set_drvdata(ctrl, &ctrl[1]);
+
+ id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(parent,
+ "unable to allocate SPMI controller identifier.\n");
+ spmi_controller_put(ctrl);
+ return NULL;
+ }
+
+ ctrl->nr = id;
+ dev_set_name(&ctrl->dev, "spmi-%d", id);
+
+ dev_dbg(&ctrl->dev, "allocated controller 0x%p id %d\n", ctrl, id);
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(spmi_controller_alloc);
+
+static void of_spmi_register_devices(struct spmi_controller *ctrl)
+{
+ struct device_node *node;
+ int err;
+
+ if (!ctrl->dev.of_node)
+ return;
+
+ for_each_available_child_of_node(ctrl->dev.of_node, node) {
+ struct spmi_device *sdev;
+ u32 reg[2];
+
+ dev_dbg(&ctrl->dev, "adding child %s\n", node->full_name);
+
+ err = of_property_read_u32_array(node, "reg", reg, 2);
+ if (err) {
+ dev_err(&ctrl->dev,
+ "node %s err (%d) does not have 'reg' property\n",
+ node->full_name, err);
+ continue;
+ }
+
+ if (reg[1] != SPMI_USID) {
+ dev_err(&ctrl->dev,
+ "node %s contains unsupported 'reg' entry\n",
+ node->full_name);
+ continue;
+ }
+
+ if (reg[0] >= SPMI_MAX_SLAVE_ID) {
+ dev_err(&ctrl->dev,
+ "invalid usid on node %s\n",
+ node->full_name);
+ continue;
+ }
+
+ dev_dbg(&ctrl->dev, "read usid %02x\n", reg[0]);
+
+ sdev = spmi_device_alloc(ctrl);
+ if (!sdev)
+ continue;
+
+ sdev->dev.of_node = node;
+ sdev->usid = (u8) reg[0];
+
+ err = spmi_device_add(sdev);
+ if (err) {
+ dev_err(&sdev->dev,
+ "failure adding device. status %d\n", err);
+ spmi_device_put(sdev);
+ }
+ }
+}
+
+/**
+ * spmi_controller_add() - Add an SPMI controller
+ * @ctrl: controller to be registered.
+ *
+ * Register a controller previously allocated via spmi_controller_alloc() with
+ * the SPMI core.
+ */
+int spmi_controller_add(struct spmi_controller *ctrl)
+{
+ int ret;
+
+ /* Can't register until after driver model init */
+ if (WARN_ON(!spmi_bus_type.p))
+ return -EAGAIN;
+
+ ret = device_add(&ctrl->dev);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_OF))
+ of_spmi_register_devices(ctrl);
+
+ dev_dbg(&ctrl->dev, "spmi-%d registered: dev:%p\n",
+ ctrl->nr, &ctrl->dev);
+
+ return 0;
+};
+EXPORT_SYMBOL_GPL(spmi_controller_add);
+
+/* Remove a device associated with a controller */
+static int spmi_ctrl_remove_device(struct device *dev, void *data)
+{
+ struct spmi_device *spmidev = to_spmi_device(dev);
+ if (dev->type == &spmi_dev_type)
+ spmi_device_remove(spmidev);
+ return 0;
+}
+
+/**
+ * spmi_controller_remove(): remove an SPMI controller
+ * @ctrl: controller to remove
+ *
+ * Remove a SPMI controller. Caller is responsible for calling
+ * spmi_controller_put() to discard the allocated controller.
+ */
+void spmi_controller_remove(struct spmi_controller *ctrl)
+{
+ int dummy;
+
+ if (!ctrl)
+ return;
+
+ dummy = device_for_each_child(&ctrl->dev, NULL,
+ spmi_ctrl_remove_device);
+ device_del(&ctrl->dev);
+}
+EXPORT_SYMBOL_GPL(spmi_controller_remove);
+
+/**
+ * spmi_driver_register() - Register client driver with SPMI core
+ * @sdrv: client driver to be associated with client-device.
+ *
+ * This API will register the client driver with the SPMI framework.
+ * It is typically called from the driver's module-init function.
+ */
+int spmi_driver_register(struct spmi_driver *sdrv)
+{
+ sdrv->driver.bus = &spmi_bus_type;
+ return driver_register(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(spmi_driver_register);
+
+static void __exit spmi_exit(void)
+{
+ bus_unregister(&spmi_bus_type);
+}
+module_exit(spmi_exit);
+
+static int __init spmi_init(void)
+{
+ return bus_register(&spmi_bus_type);
+}
+postcore_initcall(spmi_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SPMI module");
+MODULE_ALIAS("platform:spmi");
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 2cd9b0e44a41..75b3603906c1 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -168,6 +168,7 @@ config SSB_DRIVER_GIGE
config SSB_DRIVER_GPIO
bool "SSB GPIO driver"
depends on SSB && GPIOLIB
+ select IRQ_DOMAIN if SSB_EMBEDDED
help
Driver to provide access to the GPIO pins on the bus.
diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c
index 50328de712fa..937fc31971a7 100644
--- a/drivers/ssb/driver_chipcommon_sflash.c
+++ b/drivers/ssb/driver_chipcommon_sflash.c
@@ -37,7 +37,7 @@ static const struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = {
{ "M25P32", 0x15, 0x10000, 64, },
{ "M25P64", 0x16, 0x10000, 128, },
{ "M25FL128", 0x17, 0x10000, 256, },
- { 0 },
+ { NULL },
};
static const struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = {
@@ -55,7 +55,7 @@ static const struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = {
{ "SST25VF016", 0x41, 0x1000, 512, },
{ "SST25VF032", 0x4a, 0x1000, 1024, },
{ "SST25VF064", 0x4b, 0x1000, 2048, },
- { 0 },
+ { NULL },
};
static const struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = {
@@ -66,7 +66,7 @@ static const struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = {
{ "AT45DB161", 0x2c, 512, 4096, },
{ "AT45DB321", 0x34, 512, 8192, },
{ "AT45DB642", 0x3c, 1024, 8192, },
- { 0 },
+ { NULL },
};
static void ssb_sflash_cmd(struct ssb_chipcommon *cc, u32 opcode)
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index dc109de228c6..ba350d2035c0 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -9,16 +9,40 @@
*/
#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/export.h>
#include <linux/ssb/ssb.h>
#include "ssb_private.h"
+
+/**************************************************
+ * Shared
+ **************************************************/
+
static struct ssb_bus *ssb_gpio_get_bus(struct gpio_chip *chip)
{
return container_of(chip, struct ssb_bus, gpio);
}
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ if (bus->bustype == SSB_BUSTYPE_SSB)
+ return irq_find_mapping(bus->irq_domain, gpio);
+ else
+ return -EINVAL;
+}
+#endif
+
+/**************************************************
+ * ChipCommon
+ **************************************************/
+
static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
@@ -74,19 +98,129 @@ static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio)
ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0);
}
-static int ssb_gpio_chipco_to_irq(struct gpio_chip *chip, unsigned gpio)
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+static void ssb_gpio_irq_chipco_mask(struct irq_data *d)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
- if (bus->bustype == SSB_BUSTYPE_SSB)
- return ssb_mips_irq(bus->chipco.dev) + 2;
- else
- return -EINVAL;
+ ssb_chipco_gpio_intmask(&bus->chipco, BIT(gpio), 0);
+}
+
+static void ssb_gpio_irq_chipco_unmask(struct irq_data *d)
+{
+ struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
+ u32 val = ssb_chipco_gpio_in(&bus->chipco, BIT(gpio));
+
+ ssb_chipco_gpio_polarity(&bus->chipco, BIT(gpio), val);
+ ssb_chipco_gpio_intmask(&bus->chipco, BIT(gpio), BIT(gpio));
+}
+
+static struct irq_chip ssb_gpio_irq_chipco_chip = {
+ .name = "SSB-GPIO-CC",
+ .irq_mask = ssb_gpio_irq_chipco_mask,
+ .irq_unmask = ssb_gpio_irq_chipco_unmask,
+};
+
+static irqreturn_t ssb_gpio_irq_chipco_handler(int irq, void *dev_id)
+{
+ struct ssb_bus *bus = dev_id;
+ struct ssb_chipcommon *chipco = &bus->chipco;
+ u32 val = chipco_read32(chipco, SSB_CHIPCO_GPIOIN);
+ u32 mask = chipco_read32(chipco, SSB_CHIPCO_GPIOIRQ);
+ u32 pol = chipco_read32(chipco, SSB_CHIPCO_GPIOPOL);
+ unsigned long irqs = (val ^ pol) & mask;
+ int gpio;
+
+ if (!irqs)
+ return IRQ_NONE;
+
+ for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
+ generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
+ ssb_chipco_gpio_polarity(chipco, irqs, val & irqs);
+
+ return IRQ_HANDLED;
+}
+
+static int ssb_gpio_irq_chipco_domain_init(struct ssb_bus *bus)
+{
+ struct ssb_chipcommon *chipco = &bus->chipco;
+ struct gpio_chip *chip = &bus->gpio;
+ int gpio, hwirq, err;
+
+ if (bus->bustype != SSB_BUSTYPE_SSB)
+ return 0;
+
+ bus->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
+ &irq_domain_simple_ops, chipco);
+ if (!bus->irq_domain) {
+ err = -ENODEV;
+ goto err_irq_domain;
+ }
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_create_mapping(bus->irq_domain, gpio);
+
+ irq_set_chip_data(irq, bus);
+ irq_set_chip_and_handler(irq, &ssb_gpio_irq_chipco_chip,
+ handle_simple_irq);
+ }
+
+ hwirq = ssb_mips_irq(bus->chipco.dev) + 2;
+ err = request_irq(hwirq, ssb_gpio_irq_chipco_handler, IRQF_SHARED,
+ "gpio", bus);
+ if (err)
+ goto err_req_irq;
+
+ ssb_chipco_gpio_intmask(&bus->chipco, ~0, 0);
+ chipco_set32(chipco, SSB_CHIPCO_IRQMASK, SSB_CHIPCO_IRQ_GPIO);
+
+ return 0;
+
+err_req_irq:
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(bus->irq_domain);
+err_irq_domain:
+ return err;
+}
+
+static void ssb_gpio_irq_chipco_domain_exit(struct ssb_bus *bus)
+{
+ struct ssb_chipcommon *chipco = &bus->chipco;
+ struct gpio_chip *chip = &bus->gpio;
+ int gpio;
+
+ if (bus->bustype != SSB_BUSTYPE_SSB)
+ return;
+
+ chipco_mask32(chipco, SSB_CHIPCO_IRQMASK, ~SSB_CHIPCO_IRQ_GPIO);
+ free_irq(ssb_mips_irq(bus->chipco.dev) + 2, chipco);
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(bus->irq_domain);
+}
+#else
+static int ssb_gpio_irq_chipco_domain_init(struct ssb_bus *bus)
+{
+ return 0;
}
+static void ssb_gpio_irq_chipco_domain_exit(struct ssb_bus *bus)
+{
+}
+#endif
+
static int ssb_gpio_chipco_init(struct ssb_bus *bus)
{
struct gpio_chip *chip = &bus->gpio;
+ int err;
chip->label = "ssb_chipco_gpio";
chip->owner = THIS_MODULE;
@@ -96,7 +230,9 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
chip->set = ssb_gpio_chipco_set_value;
chip->direction_input = ssb_gpio_chipco_direction_input;
chip->direction_output = ssb_gpio_chipco_direction_output;
- chip->to_irq = ssb_gpio_chipco_to_irq;
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+ chip->to_irq = ssb_gpio_to_irq;
+#endif
chip->ngpio = 16;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
@@ -106,9 +242,23 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
else
chip->base = -1;
- return gpiochip_add(chip);
+ err = ssb_gpio_irq_chipco_domain_init(bus);
+ if (err)
+ return err;
+
+ err = gpiochip_add(chip);
+ if (err) {
+ ssb_gpio_irq_chipco_domain_exit(bus);
+ return err;
+ }
+
+ return 0;
}
+/**************************************************
+ * EXTIF
+ **************************************************/
+
#ifdef CONFIG_SSB_DRIVER_EXTIF
static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio)
@@ -145,19 +295,127 @@ static int ssb_gpio_extif_direction_output(struct gpio_chip *chip,
return 0;
}
-static int ssb_gpio_extif_to_irq(struct gpio_chip *chip, unsigned gpio)
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+static void ssb_gpio_irq_extif_mask(struct irq_data *d)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
- if (bus->bustype == SSB_BUSTYPE_SSB)
- return ssb_mips_irq(bus->extif.dev) + 2;
- else
- return -EINVAL;
+ ssb_extif_gpio_intmask(&bus->extif, BIT(gpio), 0);
+}
+
+static void ssb_gpio_irq_extif_unmask(struct irq_data *d)
+{
+ struct ssb_bus *bus = irq_data_get_irq_chip_data(d);
+ int gpio = irqd_to_hwirq(d);
+ u32 val = ssb_extif_gpio_in(&bus->extif, BIT(gpio));
+
+ ssb_extif_gpio_polarity(&bus->extif, BIT(gpio), val);
+ ssb_extif_gpio_intmask(&bus->extif, BIT(gpio), BIT(gpio));
+}
+
+static struct irq_chip ssb_gpio_irq_extif_chip = {
+ .name = "SSB-GPIO-EXTIF",
+ .irq_mask = ssb_gpio_irq_extif_mask,
+ .irq_unmask = ssb_gpio_irq_extif_unmask,
+};
+
+static irqreturn_t ssb_gpio_irq_extif_handler(int irq, void *dev_id)
+{
+ struct ssb_bus *bus = dev_id;
+ struct ssb_extif *extif = &bus->extif;
+ u32 val = ssb_read32(extif->dev, SSB_EXTIF_GPIO_IN);
+ u32 mask = ssb_read32(extif->dev, SSB_EXTIF_GPIO_INTMASK);
+ u32 pol = ssb_read32(extif->dev, SSB_EXTIF_GPIO_INTPOL);
+ unsigned long irqs = (val ^ pol) & mask;
+ int gpio;
+
+ if (!irqs)
+ return IRQ_NONE;
+
+ for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
+ generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
+ ssb_extif_gpio_polarity(extif, irqs, val & irqs);
+
+ return IRQ_HANDLED;
+}
+
+static int ssb_gpio_irq_extif_domain_init(struct ssb_bus *bus)
+{
+ struct ssb_extif *extif = &bus->extif;
+ struct gpio_chip *chip = &bus->gpio;
+ int gpio, hwirq, err;
+
+ if (bus->bustype != SSB_BUSTYPE_SSB)
+ return 0;
+
+ bus->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
+ &irq_domain_simple_ops, extif);
+ if (!bus->irq_domain) {
+ err = -ENODEV;
+ goto err_irq_domain;
+ }
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_create_mapping(bus->irq_domain, gpio);
+
+ irq_set_chip_data(irq, bus);
+ irq_set_chip_and_handler(irq, &ssb_gpio_irq_extif_chip,
+ handle_simple_irq);
+ }
+
+ hwirq = ssb_mips_irq(bus->extif.dev) + 2;
+ err = request_irq(hwirq, ssb_gpio_irq_extif_handler, IRQF_SHARED,
+ "gpio", bus);
+ if (err)
+ goto err_req_irq;
+
+ ssb_extif_gpio_intmask(&bus->extif, ~0, 0);
+
+ return 0;
+
+err_req_irq:
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(bus->irq_domain);
+err_irq_domain:
+ return err;
+}
+
+static void ssb_gpio_irq_extif_domain_exit(struct ssb_bus *bus)
+{
+ struct ssb_extif *extif = &bus->extif;
+ struct gpio_chip *chip = &bus->gpio;
+ int gpio;
+
+ if (bus->bustype != SSB_BUSTYPE_SSB)
+ return;
+
+ free_irq(ssb_mips_irq(bus->extif.dev) + 2, extif);
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ int irq = irq_find_mapping(bus->irq_domain, gpio);
+
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(bus->irq_domain);
}
+#else
+static int ssb_gpio_irq_extif_domain_init(struct ssb_bus *bus)
+{
+ return 0;
+}
+
+static void ssb_gpio_irq_extif_domain_exit(struct ssb_bus *bus)
+{
+}
+#endif
static int ssb_gpio_extif_init(struct ssb_bus *bus)
{
struct gpio_chip *chip = &bus->gpio;
+ int err;
chip->label = "ssb_extif_gpio";
chip->owner = THIS_MODULE;
@@ -165,7 +423,9 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
chip->set = ssb_gpio_extif_set_value;
chip->direction_input = ssb_gpio_extif_direction_input;
chip->direction_output = ssb_gpio_extif_direction_output;
- chip->to_irq = ssb_gpio_extif_to_irq;
+#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+ chip->to_irq = ssb_gpio_to_irq;
+#endif
chip->ngpio = 5;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
@@ -175,7 +435,17 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
else
chip->base = -1;
- return gpiochip_add(chip);
+ err = ssb_gpio_irq_extif_domain_init(bus);
+ if (err)
+ return err;
+
+ err = gpiochip_add(chip);
+ if (err) {
+ ssb_gpio_irq_extif_domain_exit(bus);
+ return err;
+ }
+
+ return 0;
}
#else
@@ -185,6 +455,10 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
}
#endif
+/**************************************************
+ * Init
+ **************************************************/
+
int ssb_gpio_init(struct ssb_bus *bus)
{
if (ssb_chipco_available(&bus->chipco))
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 32a811d11c25..2fead3820849 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -593,6 +593,13 @@ static int ssb_attach_queued_buses(void)
ssb_pcicore_init(&bus->pcicore);
if (bus->bustype == SSB_BUSTYPE_SSB)
ssb_watchdog_register(bus);
+
+ err = ssb_gpio_init(bus);
+ if (err == -ENOTSUPP)
+ ssb_dbg("GPIO driver not activated\n");
+ else if (err)
+ ssb_dbg("Error registering GPIO driver: %i\n", err);
+
ssb_bus_may_powerdown(bus);
err = ssb_devices_register(bus);
@@ -830,11 +837,6 @@ static int ssb_bus_register(struct ssb_bus *bus,
ssb_chipcommon_init(&bus->chipco);
ssb_extif_init(&bus->extif);
ssb_mipscore_init(&bus->mipscore);
- err = ssb_gpio_init(bus);
- if (err == -ENOTSUPP)
- ssb_dbg("GPIO driver not activated\n");
- else if (err)
- ssb_dbg("Error registering GPIO driver: %i\n", err);
err = ssb_fetch_invariants(bus, get_invariants);
if (err) {
ssb_bus_may_powerdown(bus);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4bb6b11166b3..47cf17543008 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -34,8 +34,6 @@ source "drivers/staging/winbond/Kconfig"
source "drivers/staging/wlan-ng/Kconfig"
-source "drivers/staging/echo/Kconfig"
-
source "drivers/staging/comedi/Kconfig"
source "drivers/staging/olpc_dcon/Kconfig"
@@ -52,6 +50,8 @@ source "drivers/staging/rtl8712/Kconfig"
source "drivers/staging/rtl8188eu/Kconfig"
+source "drivers/staging/rtl8821ae/Kconfig"
+
source "drivers/staging/rts5139/Kconfig"
source "drivers/staging/rts5208/Kconfig"
@@ -76,16 +76,10 @@ source "drivers/staging/sep/Kconfig"
source "drivers/staging/iio/Kconfig"
-source "drivers/staging/zsmalloc/Kconfig"
-
-source "drivers/staging/zram/Kconfig"
-
source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
-source "drivers/staging/sm7xxfb/Kconfig"
-
source "drivers/staging/crystalhd/Kconfig"
source "drivers/staging/cxt1e1/Kconfig"
@@ -130,8 +124,6 @@ source "drivers/staging/imx-drm/Kconfig"
source "drivers/staging/dgrp/Kconfig"
-source "drivers/staging/sb105x/Kconfig"
-
source "drivers/staging/fwserial/Kconfig"
source "drivers/staging/goldfish/Kconfig"
@@ -148,4 +140,10 @@ source "drivers/staging/dgnc/Kconfig"
source "drivers/staging/dgap/Kconfig"
+source "drivers/staging/gs_fpgaboot/Kconfig"
+
+source "drivers/staging/nokia_h4p/Kconfig"
+
+source "drivers/staging/unisys/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 9f07e5e16094..d12f6189db46 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_USBIP_CORE) += usbip/
obj-$(CONFIG_W35UND) += winbond/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
-obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
obj-$(CONFIG_PANEL) += panel/
@@ -18,6 +17,7 @@ obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
+obj-$(CONFIG_R8821AE) += rtl8821ae/
obj-$(CONFIG_RTS5139) += rts5139/
obj-$(CONFIG_RTS5208) += rts5208/
obj-$(CONFIG_TRANZPORT) += frontier/
@@ -32,11 +32,8 @@ obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
-obj-$(CONFIG_ZRAM) += zram/
-obj-$(CONFIG_ZSMALLOC) += zsmalloc/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
-obj-$(CONFIG_FB_SM7XX) += sm7xxfb/
obj-$(CONFIG_CRYSTALHD) += crystalhd/
obj-$(CONFIG_CXT1E1) += cxt1e1/
obj-$(CONFIG_FB_XGI) += xgifb/
@@ -58,7 +55,6 @@ obj-$(CONFIG_NET_VENDOR_SILICOM) += silicom/
obj-$(CONFIG_CED1401) += ced1401/
obj-$(CONFIG_DRM_IMX) += imx-drm/
obj-$(CONFIG_DGRP) += dgrp/
-obj-$(CONFIG_SB105X) += sb105x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_LUSTRE_FS) += lustre/
@@ -66,3 +62,6 @@ obj-$(CONFIG_XILLYBUS) += xillybus/
obj-$(CONFIG_DGNC) += dgnc/
obj-$(CONFIG_DGAP) += dgap/
obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/
+obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
+obj-$(CONFIG_BT_NOKIA_H4P) += nokia_h4p/
+obj-$(CONFIG_UNISYSSPAR) += unisys/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index b91c758883bf..99e484f845f2 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -2,7 +2,6 @@ menu "Android"
config ANDROID
bool "Android Drivers"
- default N
---help---
Enable support for various drivers needed on the Android platform
@@ -20,6 +19,19 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_IPC_32BIT
+ bool
+ depends on !64BIT && ANDROID_BINDER_IPC
+ default y
+ ---help---
+ The Binder API has been changed to support both 32 and 64bit
+ applications in a mixed environment.
+
+ Enable this to support an old 32-bit Android user-space (v4.4 and
+ earlier).
+
+ Note that enabling this will break newer Android user-space.
+
config ASHMEM
bool "Enable the Anonymous Shared Memory Subsystem"
default n
@@ -60,7 +72,6 @@ config ANDROID_TIMED_GPIO
config ANDROID_LOW_MEMORY_KILLER
bool "Android Low Memory Killer"
- default N
---help---
Registers processes to be killed when memory is low
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
index 4fd32f337f9c..495b20cf3bf6 100644
--- a/drivers/staging/android/android_alarm.h
+++ b/drivers/staging/android/android_alarm.h
@@ -16,50 +16,10 @@
#ifndef _LINUX_ANDROID_ALARM_H
#define _LINUX_ANDROID_ALARM_H
-#include <linux/ioctl.h>
-#include <linux/time.h>
#include <linux/compat.h>
+#include <linux/ioctl.h>
-enum android_alarm_type {
- /* return code bit numbers or set alarm arg */
- ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME,
-
- ANDROID_ALARM_TYPE_COUNT,
-
- /* return code bit numbers */
- /* ANDROID_ALARM_TIME_CHANGE = 16 */
-};
-
-enum android_alarm_return_flags {
- ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
- ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
-};
-
-/* Disable alarm */
-#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
-
-/* Ack last alarm and wait for next */
-#define ANDROID_ALARM_WAIT _IO('a', 1)
-
-#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
-/* Set alarm */
-#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
-#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
-#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
-#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
-#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
-
+#include "uapi/android_alarm.h"
#ifdef CONFIG_COMPAT
#define ANDROID_ALARM_SET_COMPAT(type) ALARM_IOW(2, type, \
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 23948f167012..713a97226787 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -295,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
- goto out;
+ goto out_unlock;
if (!asma->file) {
ret = -EBADF;
- goto out;
+ goto out_unlock;
}
- ret = asma->file->f_op->read(asma->file, buf, len, pos);
- if (ret < 0)
- goto out;
+ mutex_unlock(&ashmem_mutex);
- /** Update backing file pos, since f_ops->read() doesn't */
- asma->file->f_pos = *pos;
+ /*
+ * asma and asma->file are used outside the lock here. We assume
+ * once asma->file is set it will never be changed, and will not
+ * be destroyed until all references to the file are dropped and
+ * ashmem_release is called.
+ */
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret >= 0) {
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+ }
+ return ret;
-out:
+out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
@@ -498,6 +506,7 @@ out:
static int set_name(struct ashmem_area *asma, void __user *name)
{
+ int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
@@ -510,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name)
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
- if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
- return -EFAULT;
-
+ len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+ if (len < 0)
+ return len;
+ if (len == ASHMEM_NAME_LEN)
+ local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
- if (unlikely(asma->file)) {
+ if (unlikely(asma->file))
ret = -EINVAL;
- goto out;
- }
- memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
- local_name, ASHMEM_NAME_LEN);
- asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
-out:
- mutex_unlock(&ashmem_mutex);
+ else
+ strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
+ mutex_unlock(&ashmem_mutex);
return ret;
}
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
index 8dc0f0d3adf3..5abcfd7aa706 100644
--- a/drivers/staging/android/ashmem.h
+++ b/drivers/staging/android/ashmem.h
@@ -16,35 +16,7 @@
#include <linux/ioctl.h>
#include <linux/compat.h>
-#define ASHMEM_NAME_LEN 256
-
-#define ASHMEM_NAME_DEF "dev/ashmem"
-
-/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
-#define ASHMEM_NOT_PURGED 0
-#define ASHMEM_WAS_PURGED 1
-
-/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
-#define ASHMEM_IS_UNPINNED 0
-#define ASHMEM_IS_PINNED 1
-
-struct ashmem_pin {
- __u32 offset; /* offset into region, in bytes, page-aligned */
- __u32 len; /* length forward from offset, in bytes, page-aligned */
-};
-
-#define __ASHMEMIOC 0x77
-
-#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
-#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
-#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
-#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
-#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
-#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
-#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
-#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
-#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
-#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+#include "uapi/ashmem.h"
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index eaec1dab7fe4..cfe4bc8f05cb 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -228,8 +228,8 @@ struct binder_node {
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
- void __user *ptr;
- void __user *cookie;
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
unsigned has_strong_ref:1;
unsigned pending_strong_ref:1;
unsigned has_weak_ref:1;
@@ -242,7 +242,7 @@ struct binder_node {
struct binder_ref_death {
struct binder_work work;
- void __user *cookie;
+ binder_uintptr_t cookie;
};
struct binder_ref {
@@ -515,14 +515,14 @@ static void binder_insert_allocated_buffer(struct binder_proc *proc,
}
static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
- void __user *user_ptr)
+ uintptr_t user_ptr)
{
struct rb_node *n = proc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
struct binder_buffer *kern_ptr;
- kern_ptr = user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data);
+ kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
+ - offsetof(struct binder_buffer, data));
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -856,7 +856,7 @@ static void binder_free_buf(struct binder_proc *proc,
}
static struct binder_node *binder_get_node(struct binder_proc *proc,
- void __user *ptr)
+ binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
@@ -875,8 +875,8 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
}
static struct binder_node *binder_new_node(struct binder_proc *proc,
- void __user *ptr,
- void __user *cookie)
+ binder_uintptr_t ptr,
+ binder_uintptr_t cookie)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
@@ -908,9 +908,9 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p created\n",
+ "%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
- node->ptr, node->cookie);
+ (u64)node->ptr, (u64)node->cookie);
return node;
}
@@ -1226,9 +1226,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
- size_t *failed_at)
+ binder_size_t *failed_at)
{
- size_t *offp, *off_end;
+ binder_size_t *offp, *off_end;
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -1239,7 +1239,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
- offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+ offp = (binder_size_t *)(buffer->data +
+ ALIGN(buffer->data_size, sizeof(void *)));
if (failed_at)
off_end = failed_at;
else
@@ -1249,8 +1250,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
if (*offp > buffer->data_size - sizeof(*fp) ||
buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
- pr_err("transaction release %d bad offset %zd, size %zd\n",
- debug_id, *offp, buffer->data_size);
+ pr_err("transaction release %d bad offset %lld, size %zd\n",
+ debug_id, (u64)*offp, buffer->data_size);
continue;
}
fp = (struct flat_binder_object *)(buffer->data + *offp);
@@ -1259,13 +1260,13 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_WEAK_BINDER: {
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
- pr_err("transaction release %d bad node %p\n",
- debug_id, fp->binder);
+ pr_err("transaction release %d bad node %016llx\n",
+ debug_id, (u64)fp->binder);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
- " node %d u%p\n",
- node->debug_id, node->ptr);
+ " node %d u%016llx\n",
+ node->debug_id, (u64)node->ptr);
binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
} break;
case BINDER_TYPE_HANDLE:
@@ -1303,7 +1304,7 @@ static void binder_transaction(struct binder_proc *proc,
{
struct binder_transaction *t;
struct binder_work *tcomplete;
- size_t *offp, *off_end;
+ binder_size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -1432,18 +1433,20 @@ static void binder_transaction(struct binder_proc *proc,
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
- tr->data.ptr.buffer, tr->data.ptr.offsets,
- tr->data_size, tr->offsets_size);
+ (u64)tr->data.ptr.buffer,
+ (u64)tr->data.ptr.offsets,
+ (u64)tr->data_size, (u64)tr->offsets_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
- tr->data.ptr.buffer, tr->data.ptr.offsets,
- tr->data_size, tr->offsets_size);
+ (u64)tr->data.ptr.buffer,
+ (u64)tr->data.ptr.offsets,
+ (u64)tr->data_size, (u64)tr->offsets_size);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
@@ -1472,23 +1475,26 @@ static void binder_transaction(struct binder_proc *proc,
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
- offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+ offp = (binder_size_t *)(t->buffer->data +
+ ALIGN(tr->data_size, sizeof(void *)));
- if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+ if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+ tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
- if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+ if (copy_from_user(offp, (const void __user *)(uintptr_t)
+ tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
- if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
- binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n",
- proc->pid, thread->pid, tr->offsets_size);
+ if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
+ binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
+ proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
@@ -1498,8 +1504,8 @@ static void binder_transaction(struct binder_proc *proc,
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
- binder_user_error("%d:%d got transaction with invalid offset, %zd\n",
- proc->pid, thread->pid, *offp);
+ binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
+ proc->pid, thread->pid, (u64)*offp);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
@@ -1519,10 +1525,10 @@ static void binder_transaction(struct binder_proc *proc,
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
- binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
- fp->binder, node->debug_id,
- fp->cookie, node->cookie);
+ (u64)fp->binder, node->debug_id,
+ (u64)fp->cookie, (u64)node->cookie);
goto err_binder_get_ref_for_node_failed;
}
ref = binder_get_ref_for_node(target_proc, node);
@@ -1540,9 +1546,9 @@ static void binder_transaction(struct binder_proc *proc,
trace_binder_transaction_node_to_ref(t, node, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " node %d u%p -> ref %d desc %d\n",
- node->debug_id, node->ptr, ref->debug_id,
- ref->desc);
+ " node %d u%016llx -> ref %d desc %d\n",
+ node->debug_id, (u64)node->ptr,
+ ref->debug_id, ref->desc);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
@@ -1564,9 +1570,9 @@ static void binder_transaction(struct binder_proc *proc,
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
trace_binder_transaction_ref_to_node(t, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d -> node %d u%p\n",
+ " ref %d desc %d -> node %d u%016llx\n",
ref->debug_id, ref->desc, ref->node->debug_id,
- ref->node->ptr);
+ (u64)ref->node->ptr);
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
@@ -1682,9 +1688,9 @@ err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d, size %zd-%zd\n",
+ "%d:%d transaction failed %d, size %lld-%lld\n",
proc->pid, thread->pid, return_error,
- tr->data_size, tr->offsets_size);
+ (u64)tr->data_size, (u64)tr->offsets_size);
{
struct binder_transaction_log_entry *fe;
@@ -1702,9 +1708,11 @@ err_no_context_mgr_node:
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
- void __user *buffer, size_t size, size_t *consumed)
+ binder_uintptr_t binder_buffer, size_t size,
+ binder_size_t *consumed)
{
uint32_t cmd;
+ void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -1773,33 +1781,33 @@ static int binder_thread_write(struct binder_proc *proc,
}
case BC_INCREFS_DONE:
case BC_ACQUIRE_DONE: {
- void __user *node_ptr;
- void __user *cookie;
+ binder_uintptr_t node_ptr;
+ binder_uintptr_t cookie;
struct binder_node *node;
- if (get_user(node_ptr, (void * __user *)ptr))
+ if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
- if (get_user(cookie, (void * __user *)ptr))
+ ptr += sizeof(binder_uintptr_t);
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
node = binder_get_node(proc, node_ptr);
if (node == NULL) {
- binder_user_error("%d:%d %s u%p no match\n",
+ binder_user_error("%d:%d %s u%016llx no match\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" :
"BC_ACQUIRE_DONE",
- node_ptr);
+ (u64)node_ptr);
break;
}
if (cookie != node->cookie) {
- binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
- node_ptr, node->debug_id,
- cookie, node->cookie);
+ (u64)node_ptr, node->debug_id,
+ (u64)cookie, (u64)node->cookie);
break;
}
if (cmd == BC_ACQUIRE_DONE) {
@@ -1835,27 +1843,28 @@ static int binder_thread_write(struct binder_proc *proc,
return -EINVAL;
case BC_FREE_BUFFER: {
- void __user *data_ptr;
+ binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
- if (get_user(data_ptr, (void * __user *)ptr))
+ if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
buffer = binder_buffer_lookup(proc, data_ptr);
if (buffer == NULL) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n",
- proc->pid, thread->pid, data_ptr);
+ binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
+ proc->pid, thread->pid, (u64)data_ptr);
break;
}
if (!buffer->allow_user_free) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n",
- proc->pid, thread->pid, data_ptr);
+ binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
+ proc->pid, thread->pid, (u64)data_ptr);
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
- "%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
- proc->pid, thread->pid, data_ptr, buffer->debug_id,
+ "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
+ proc->pid, thread->pid, (u64)data_ptr,
+ buffer->debug_id,
buffer->transaction ? "active" : "finished");
if (buffer->transaction) {
@@ -1925,16 +1934,16 @@ static int binder_thread_write(struct binder_proc *proc,
case BC_REQUEST_DEATH_NOTIFICATION:
case BC_CLEAR_DEATH_NOTIFICATION: {
uint32_t target;
- void __user *cookie;
+ binder_uintptr_t cookie;
struct binder_ref *ref;
struct binder_ref_death *death;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (get_user(cookie, (void __user * __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
ref = binder_get_ref(proc, target);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
@@ -1947,12 +1956,12 @@ static int binder_thread_write(struct binder_proc *proc,
}
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
- "%d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+ "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
proc->pid, thread->pid,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
- cookie, ref->debug_id, ref->desc,
+ (u64)cookie, ref->debug_id, ref->desc,
ref->strong, ref->weak, ref->node->debug_id);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
@@ -1990,9 +1999,10 @@ static int binder_thread_write(struct binder_proc *proc,
}
death = ref->death;
if (death->cookie != cookie) {
- binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
- death->cookie, cookie);
+ (u64)death->cookie,
+ (u64)cookie);
break;
}
ref->death = NULL;
@@ -2012,9 +2022,9 @@ static int binder_thread_write(struct binder_proc *proc,
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
- void __user *cookie;
+ binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
- if (get_user(cookie, (void __user * __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(void *);
@@ -2026,11 +2036,12 @@ static int binder_thread_write(struct binder_proc *proc,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
- "%d:%d BC_DEAD_BINDER_DONE %p found %p\n",
- proc->pid, thread->pid, cookie, death);
+ "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+ proc->pid, thread->pid, (u64)cookie,
+ death);
if (death == NULL) {
- binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n",
- proc->pid, thread->pid, cookie);
+ binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
+ proc->pid, thread->pid, (u64)cookie);
break;
}
@@ -2082,9 +2093,10 @@ static int binder_has_thread_work(struct binder_thread *thread)
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
- void __user *buffer, size_t size,
- size_t *consumed, int non_block)
+ binder_uintptr_t binder_buffer, size_t size,
+ binder_size_t *consumed, int non_block)
{
+ void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -2229,32 +2241,40 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(node->ptr, (void * __user *)ptr))
+ if (put_user(node->ptr,
+ (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
- if (put_user(node->cookie, (void * __user *)ptr))
+ ptr += sizeof(binder_uintptr_t);
+ if (put_user(node->cookie,
+ (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s %d u%p c%p\n",
- proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
+ "%d:%d %s %d u%016llx c%016llx\n",
+ proc->pid, thread->pid, cmd_name,
+ node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
} else {
list_del_init(&w->entry);
if (!weak && !strong) {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p deleted\n",
- proc->pid, thread->pid, node->debug_id,
- node->ptr, node->cookie);
+ "%d:%d node %d u%016llx c%016llx deleted\n",
+ proc->pid, thread->pid,
+ node->debug_id,
+ (u64)node->ptr,
+ (u64)node->cookie);
rb_erase(&node->rb_node, &proc->nodes);
kfree(node);
binder_stats_deleted(BINDER_STAT_NODE);
} else {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p state unchanged\n",
- proc->pid, thread->pid, node->debug_id, node->ptr,
- node->cookie);
+ "%d:%d node %d u%016llx c%016llx state unchanged\n",
+ proc->pid, thread->pid,
+ node->debug_id,
+ (u64)node->ptr,
+ (u64)node->cookie);
}
}
} break;
@@ -2272,17 +2292,18 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(death->cookie, (void * __user *)ptr))
+ if (put_user(death->cookie,
+ (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
- "%d:%d %s %p\n",
+ "%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- death->cookie);
+ (u64)death->cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
list_del(&w->entry);
@@ -2312,8 +2333,8 @@ retry:
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
- tr.target.ptr = NULL;
- tr.cookie = NULL;
+ tr.target.ptr = 0;
+ tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
@@ -2330,8 +2351,9 @@ retry:
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (void *)t->buffer->data +
- proc->user_buffer_offset;
+ tr.data.ptr.buffer = (binder_uintptr_t)(
+ (uintptr_t)t->buffer->data +
+ proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
@@ -2346,14 +2368,14 @@ retry:
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n",
+ "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
t->debug_id, t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
- tr.data.ptr.buffer, tr.data.ptr.offsets);
+ (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
@@ -2423,8 +2445,8 @@ static void binder_release_work(struct list_head *list)
death = container_of(w, struct binder_ref_death, work);
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
- "undelivered death notification, %p\n",
- death->cookie);
+ "undelivered death notification, %016llx\n",
+ (u64)death->cookie);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} break;
@@ -2580,12 +2602,16 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d write %zd at %016lx, read %zd at %016lx\n",
- proc->pid, thread->pid, bwr.write_size,
- bwr.write_buffer, bwr.read_size, bwr.read_buffer);
+ "%d:%d write %lld at %016llx, read %lld at %016llx\n",
+ proc->pid, thread->pid,
+ (u64)bwr.write_size, (u64)bwr.write_buffer,
+ (u64)bwr.read_size, (u64)bwr.read_buffer);
if (bwr.write_size > 0) {
- ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+ ret = binder_thread_write(proc, thread,
+ bwr.write_buffer,
+ bwr.write_size,
+ &bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
@@ -2595,7 +2621,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
if (bwr.read_size > 0) {
- ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+ ret = binder_thread_read(proc, thread, bwr.read_buffer,
+ bwr.read_size,
+ &bwr.read_consumed,
+ filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
@@ -2606,9 +2635,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d wrote %zd of %zd, read return %zd of %zd\n",
- proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
- bwr.read_consumed, bwr.read_size);
+ "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
+ proc->pid, thread->pid,
+ (u64)bwr.write_consumed, (u64)bwr.write_size,
+ (u64)bwr.read_consumed, (u64)bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
@@ -2637,7 +2667,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
} else
binder_context_mgr_uid = current->cred->euid;
- binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+ binder_context_mgr_node = binder_new_node(proc, 0, 0);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
@@ -2904,7 +2934,7 @@ static int binder_node_release(struct binder_node *node, int refs)
refs++;
if (!ref->death)
- goto out;
+ continue;
death++;
@@ -2917,7 +2947,6 @@ static int binder_node_release(struct binder_node *node, int refs)
BUG();
}
-out:
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
@@ -3133,8 +3162,9 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
break;
case BINDER_WORK_NODE:
node = container_of(w, struct binder_node, work);
- seq_printf(m, "%snode work %d: u%p c%p\n",
- prefix, node->debug_id, node->ptr, node->cookie);
+ seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+ prefix, node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
break;
case BINDER_WORK_DEAD_BINDER:
seq_printf(m, "%shas dead binder\n", prefix);
@@ -3194,8 +3224,8 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
- node->debug_id, node->ptr, node->cookie,
+ seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+ node->debug_id, (u64)node->ptr, (u64)node->cookie,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count);
@@ -3497,6 +3527,7 @@ static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
+ .compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
index cbe345168067..eb0834656dfe 100644
--- a/drivers/staging/android/binder.h
+++ b/drivers/staging/android/binder.h
@@ -20,311 +20,11 @@
#ifndef _LINUX_BINDER_H
#define _LINUX_BINDER_H
-#include <linux/ioctl.h>
+#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
+#define BINDER_IPC_32BIT 1
+#endif
-#define B_PACK_CHARS(c1, c2, c3, c4) \
- ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
-#define B_TYPE_LARGE 0x85
-
-enum {
- BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
- BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
- BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
- BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
- BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
-};
-
-enum {
- FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
- FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
-};
-
-/*
- * This is the flattened representation of a Binder object for transfer
- * between processes. The 'offsets' supplied as part of a binder transaction
- * contains offsets into the data where these structures occur. The Binder
- * driver takes care of re-writing the structure type and data as it moves
- * between processes.
- */
-struct flat_binder_object {
- /* 8 bytes for large_flat_header. */
- __u32 type;
- __u32 flags;
-
- /* 8 bytes of data. */
- union {
- void __user *binder; /* local object */
- __u32 handle; /* remote object */
- };
-
- /* extra data associated with local object */
- void __user *cookie;
-};
-
-/*
- * On 64-bit platforms where user code may run in 32-bits the driver must
- * translate the buffer (and local binder) addresses appropriately.
- */
-
-struct binder_write_read {
- size_t write_size; /* bytes to write */
- size_t write_consumed; /* bytes consumed by driver */
- unsigned long write_buffer;
- size_t read_size; /* bytes to read */
- size_t read_consumed; /* bytes consumed by driver */
- unsigned long read_buffer;
-};
-
-/* Use with BINDER_VERSION, driver fills in fields. */
-struct binder_version {
- /* driver protocol version -- increment with incompatible change */
- __s32 protocol_version;
-};
-
-/* This is the current protocol version. */
-#define BINDER_CURRENT_PROTOCOL_VERSION 7
-
-#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
-#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
-#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
-#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
-#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
-#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
-
-/*
- * NOTE: Two special error codes you should check for when calling
- * in to the driver are:
- *
- * EINTR -- The operation has been interupted. This should be
- * handled by retrying the ioctl() until a different error code
- * is returned.
- *
- * ECONNREFUSED -- The driver is no longer accepting operations
- * from your process. That is, the process is being destroyed.
- * You should handle this by exiting from your process. Note
- * that once this error code is returned, all further calls to
- * the driver from any thread will return this same code.
- */
-
-enum transaction_flags {
- TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
- TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
- TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
- TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
-};
-
-struct binder_transaction_data {
- /* The first two are only used for bcTRANSACTION and brTRANSACTION,
- * identifying the target and contents of the transaction.
- */
- union {
- __u32 handle; /* target descriptor of command transaction */
- void *ptr; /* target descriptor of return transaction */
- } target;
- void *cookie; /* target object cookie */
- __u32 code; /* transaction command */
-
- /* General information about the transaction. */
- __u32 flags;
- pid_t sender_pid;
- uid_t sender_euid;
- size_t data_size; /* number of bytes of data */
- size_t offsets_size; /* number of bytes of offsets */
-
- /* If this transaction is inline, the data immediately
- * follows here; otherwise, it ends with a pointer to
- * the data buffer.
- */
- union {
- struct {
- /* transaction data */
- const void __user *buffer;
- /* offsets from buffer to flat_binder_object structs */
- const void __user *offsets;
- } ptr;
- __u8 buf[8];
- } data;
-};
-
-struct binder_ptr_cookie {
- void *ptr;
- void *cookie;
-};
-
-struct binder_pri_desc {
- __s32 priority;
- __u32 desc;
-};
-
-struct binder_pri_ptr_cookie {
- __s32 priority;
- void *ptr;
- void *cookie;
-};
-
-enum binder_driver_return_protocol {
- BR_ERROR = _IOR('r', 0, __s32),
- /*
- * int: error code
- */
-
- BR_OK = _IO('r', 1),
- /* No parameters! */
-
- BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
- BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
- /*
- * binder_transaction_data: the received command.
- */
-
- BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
- /*
- * not currently supported
- * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
- * Else the remote object has acquired a primary reference.
- */
-
- BR_DEAD_REPLY = _IO('r', 5),
- /*
- * The target of the last transaction (either a bcTRANSACTION or
- * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
- */
-
- BR_TRANSACTION_COMPLETE = _IO('r', 6),
- /*
- * No parameters... always refers to the last transaction requested
- * (including replies). Note that this will be sent even for
- * asynchronous transactions.
- */
-
- BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
- BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
- BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
- BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
- /*
- * not currently supported
- * int: priority
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BR_NOOP = _IO('r', 12),
- /*
- * No parameters. Do nothing and examine the next command. It exists
- * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
- */
-
- BR_SPAWN_LOOPER = _IO('r', 13),
- /*
- * No parameters. The driver has determined that a process has no
- * threads waiting to service incoming transactions. When a process
- * receives this command, it must spawn a new service thread and
- * register it via bcENTER_LOOPER.
- */
-
- BR_FINISHED = _IO('r', 14),
- /*
- * not currently supported
- * stop threadpool thread
- */
-
- BR_DEAD_BINDER = _IOR('r', 15, void *),
- /*
- * void *: cookie
- */
- BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
- /*
- * void *: cookie
- */
-
- BR_FAILED_REPLY = _IO('r', 17),
- /*
- * The the last transaction (either a bcTRANSACTION or
- * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
- */
-};
-
-enum binder_driver_command_protocol {
- BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
- BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
- /*
- * binder_transaction_data: the sent command.
- */
-
- BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
- /*
- * not currently supported
- * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
- * Else you have acquired a primary reference on the object.
- */
-
- BC_FREE_BUFFER = _IOW('c', 3, void *),
- /*
- * void *: ptr to transaction data received on a read
- */
-
- BC_INCREFS = _IOW('c', 4, __u32),
- BC_ACQUIRE = _IOW('c', 5, __u32),
- BC_RELEASE = _IOW('c', 6, __u32),
- BC_DECREFS = _IOW('c', 7, __u32),
- /*
- * int: descriptor
- */
-
- BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
- BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
- /*
- * not currently supported
- * int: priority
- * int: descriptor
- */
-
- BC_REGISTER_LOOPER = _IO('c', 11),
- /*
- * No parameters.
- * Register a spawned looper thread with the device.
- */
-
- BC_ENTER_LOOPER = _IO('c', 12),
- BC_EXIT_LOOPER = _IO('c', 13),
- /*
- * No parameters.
- * These two commands are sent as an application-level thread
- * enters and exits the binder loop, respectively. They are
- * used so the binder can have an accurate count of the number
- * of looping threads it has available.
- */
-
- BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie
- */
-
- BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie
- */
-
- BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
- /*
- * void *: cookie
- */
-};
+#include "uapi/binder.h"
#endif /* _LINUX_BINDER_H */
diff --git a/drivers/staging/android/binder_trace.h b/drivers/staging/android/binder_trace.h
index 82a567c2af67..7f20f3dc8369 100644
--- a/drivers/staging/android/binder_trace.h
+++ b/drivers/staging/android/binder_trace.h
@@ -152,7 +152,7 @@ TRACE_EVENT(binder_transaction_node_to_ref,
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, node_debug_id)
- __field(void __user *, node_ptr)
+ __field(binder_uintptr_t, node_ptr)
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
),
@@ -163,8 +163,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
),
- TP_printk("transaction=%d node=%d src_ptr=0x%p ==> dest_ref=%d dest_desc=%d",
- __entry->debug_id, __entry->node_debug_id, __entry->node_ptr,
+ TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
+ __entry->debug_id, __entry->node_debug_id,
+ (u64)__entry->node_ptr,
__entry->ref_debug_id, __entry->ref_desc)
);
@@ -177,7 +178,7 @@ TRACE_EVENT(binder_transaction_ref_to_node,
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
__field(int, node_debug_id)
- __field(void __user *, node_ptr)
+ __field(binder_uintptr_t, node_ptr)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
@@ -186,9 +187,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
__entry->node_debug_id = ref->node->debug_id;
__entry->node_ptr = ref->node->ptr;
),
- TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%p",
+ TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
- __entry->ref_debug_id, __entry->ref_desc, __entry->node_ptr)
+ __entry->ref_debug_id, __entry->ref_desc,
+ (u64)__entry->node_ptr)
);
TRACE_EVENT(binder_transaction_ref_to_ref,
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
index af6cd370b30f..ee3a7380e53b 100644
--- a/drivers/staging/android/ion/compat_ion.c
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -35,9 +35,14 @@ struct compat_ion_custom_data {
compat_ulong_t arg;
};
+struct compat_ion_handle_data {
+ compat_int_t handle;
+};
+
#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct compat_ion_allocation_data)
-#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
+ struct compat_ion_handle_data)
#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
struct compat_ion_custom_data)
@@ -64,6 +69,19 @@ static int compat_get_ion_allocation_data(
return err;
}
+static int compat_get_ion_handle_data(
+ struct compat_ion_handle_data __user *data32,
+ struct ion_handle_data __user *data)
+{
+ compat_int_t i;
+ int err;
+
+ err = get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+
+ return err;
+}
+
static int compat_put_ion_allocation_data(
struct compat_ion_allocation_data __user *data32,
struct ion_allocation_data __user *data)
@@ -132,8 +150,8 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
case COMPAT_ION_IOC_FREE:
{
- struct compat_ion_allocation_data __user *data32;
- struct ion_allocation_data __user *data;
+ struct compat_ion_handle_data __user *data32;
+ struct ion_handle_data __user *data;
int err;
data32 = compat_ptr(arg);
@@ -141,7 +159,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (data == NULL)
return -EFAULT;
- err = compat_get_ion_allocation_data(data32, data);
+ err = compat_get_ion_handle_data(data32, data);
if (err)
return err;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 574066ff73f8..3d5bf1472236 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -16,6 +16,7 @@
*/
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/fs.h>
@@ -55,10 +56,12 @@ struct ion_device {
struct mutex buffer_lock;
struct rw_semaphore lock;
struct plist_head heaps;
- long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
- unsigned long arg);
+ long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
struct rb_root clients;
struct dentry *debug_root;
+ struct dentry *heaps_debug_root;
+ struct dentry *clients_debug_root;
};
/**
@@ -69,6 +72,8 @@ struct ion_device {
* @idr: an idr space for allocating handle ids
* @lock: lock protecting the tree of handles
* @name: used for debugging
+ * @display_name: used for debugging (unique version of @name)
+ * @display_serial: used for debugging (to make display_name unique)
* @task: used for debugging
*
* A client represents a list of buffers this client may access.
@@ -82,6 +87,8 @@ struct ion_client {
struct idr idr;
struct mutex lock;
const char *name;
+ char *display_name;
+ int display_serial;
struct task_struct *task;
pid_t pid;
struct dentry *debug_root;
@@ -208,7 +215,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
if (IS_ERR(table)) {
heap->ops->free(buffer);
kfree(buffer);
- return ERR_PTR(PTR_ERR(table));
+ return ERR_CAST(table);
}
buffer->sg_table = table;
if (ion_buffer_fault_user_mappings(buffer)) {
@@ -429,7 +436,7 @@ static bool ion_handle_validate(struct ion_client *client,
struct ion_handle *handle)
{
WARN_ON(!mutex_is_locked(&client->lock));
- return (idr_find(&client->idr, handle->id) == handle);
+ return idr_find(&client->idr, handle->id) == handle;
}
static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
@@ -501,7 +508,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
return ERR_PTR(-ENODEV);
if (IS_ERR(buffer))
- return ERR_PTR(PTR_ERR(buffer));
+ return ERR_CAST(buffer);
handle = ion_handle_create(client, buffer);
@@ -708,6 +715,21 @@ static const struct file_operations debug_client_fops = {
.release = single_release,
};
+static int ion_get_client_serial(const struct rb_root *root,
+ const unsigned char *name)
+{
+ int serial = -1;
+ struct rb_node *node;
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ struct ion_client *client = rb_entry(node, struct ion_client,
+ node);
+ if (strcmp(client->name, name))
+ continue;
+ serial = max(serial, client->display_serial);
+ }
+ return serial + 1;
+}
+
struct ion_client *ion_client_create(struct ion_device *dev,
const char *name)
{
@@ -716,9 +738,13 @@ struct ion_client *ion_client_create(struct ion_device *dev,
struct rb_node **p;
struct rb_node *parent = NULL;
struct ion_client *entry;
- char debug_name[64];
pid_t pid;
+ if (!name) {
+ pr_err("%s: Name cannot be null\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
get_task_struct(current->group_leader);
task_lock(current->group_leader);
pid = task_pid_nr(current->group_leader);
@@ -733,21 +759,27 @@ struct ion_client *ion_client_create(struct ion_device *dev,
task_unlock(current->group_leader);
client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
- if (!client) {
- if (task)
- put_task_struct(current->group_leader);
- return ERR_PTR(-ENOMEM);
- }
+ if (!client)
+ goto err_put_task_struct;
client->dev = dev;
client->handles = RB_ROOT;
idr_init(&client->idr);
mutex_init(&client->lock);
- client->name = name;
client->task = task;
client->pid = pid;
+ client->name = kstrdup(name, GFP_KERNEL);
+ if (!client->name)
+ goto err_free_client;
down_write(&dev->lock);
+ client->display_serial = ion_get_client_serial(&dev->clients, name);
+ client->display_name = kasprintf(
+ GFP_KERNEL, "%s-%d", name, client->display_serial);
+ if (!client->display_name) {
+ up_write(&dev->lock);
+ goto err_free_client_name;
+ }
p = &dev->clients.rb_node;
while (*p) {
parent = *p;
@@ -761,13 +793,28 @@ struct ion_client *ion_client_create(struct ion_device *dev,
rb_link_node(&client->node, parent, p);
rb_insert_color(&client->node, &dev->clients);
- snprintf(debug_name, 64, "%u", client->pid);
- client->debug_root = debugfs_create_file(debug_name, 0664,
- dev->debug_root, client,
- &debug_client_fops);
+ client->debug_root = debugfs_create_file(client->display_name, 0664,
+ dev->clients_debug_root,
+ client, &debug_client_fops);
+ if (!client->debug_root) {
+ char buf[256], *path;
+ path = dentry_path(dev->clients_debug_root, buf, 256);
+ pr_err("Failed to create client debugfs at %s/%s\n",
+ path, client->display_name);
+ }
+
up_write(&dev->lock);
return client;
+
+err_free_client_name:
+ kfree(client->name);
+err_free_client:
+ kfree(client);
+err_put_task_struct:
+ if (task)
+ put_task_struct(current->group_leader);
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(ion_client_create);
@@ -792,6 +839,8 @@ void ion_client_destroy(struct ion_client *client)
debugfs_remove_recursive(client->debug_root);
up_write(&dev->lock);
+ kfree(client->display_name);
+ kfree(client->name);
kfree(client);
}
EXPORT_SYMBOL(ion_client_destroy);
@@ -954,8 +1003,8 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
int ret = 0;
if (!buffer->heap->ops->map_user) {
- pr_err("%s: this heap does not define a method for mapping "
- "to userspace\n", __func__);
+ pr_err("%s: this heap does not define a method for mapping to userspace\n",
+ __func__);
return -EINVAL;
}
@@ -1017,9 +1066,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
mutex_unlock(&buffer->lock);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
- return 0;
+ return PTR_ERR_OR_ZERO(vaddr);
}
static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
@@ -1100,7 +1147,7 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf))
- return ERR_PTR(PTR_ERR(dmabuf));
+ return ERR_CAST(dmabuf);
/* if this memory came from ion */
if (dmabuf->ops != &dma_buf_ops) {
@@ -1293,9 +1340,11 @@ static int ion_open(struct inode *inode, struct file *file)
struct miscdevice *miscdev = file->private_data;
struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
struct ion_client *client;
+ char debug_name[64];
pr_debug("%s: %d\n", __func__, __LINE__);
- client = ion_client_create(dev, "user");
+ snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
+ client = ion_client_create(dev, debug_name);
if (IS_ERR(client))
return PTR_ERR(client);
file->private_data = client;
@@ -1338,7 +1387,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
size_t total_orphaned_size = 0;
seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
- seq_printf(s, "----------------------------------------------------\n");
+ seq_puts(s, "----------------------------------------------------\n");
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
@@ -1357,9 +1406,8 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
client->pid, size);
}
}
- seq_printf(s, "----------------------------------------------------\n");
- seq_printf(s, "orphaned allocations (info is from last known client):"
- "\n");
+ seq_puts(s, "----------------------------------------------------\n");
+ seq_puts(s, "orphaned allocations (info is from last known client):\n");
mutex_lock(&dev->buffer_lock);
for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
@@ -1376,14 +1424,14 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
}
}
mutex_unlock(&dev->buffer_lock);
- seq_printf(s, "----------------------------------------------------\n");
+ seq_puts(s, "----------------------------------------------------\n");
seq_printf(s, "%16.s %16zu\n", "total orphaned",
total_orphaned_size);
seq_printf(s, "%16.s %16zu\n", "total ", total_size);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
seq_printf(s, "%16.s %16zu\n", "deferred free",
heap->free_list_size);
- seq_printf(s, "----------------------------------------------------\n");
+ seq_puts(s, "----------------------------------------------------\n");
if (heap->debug_show)
heap->debug_show(heap, s, unused);
@@ -1443,6 +1491,8 @@ DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
+ struct dentry *debug_file;
+
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
!heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
@@ -1451,21 +1501,40 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_init_deferred_free(heap);
+ if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
+ ion_heap_init_shrinker(heap);
+
heap->dev = dev;
down_write(&dev->lock);
/* use negative heap->id to reverse the priority -- when traversing
the list later attempt higher id numbers first */
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
- debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
- &debug_heap_fops);
+ debug_file = debugfs_create_file(heap->name, 0664,
+ dev->heaps_debug_root, heap,
+ &debug_heap_fops);
+
+ if (!debug_file) {
+ char buf[256], *path;
+ path = dentry_path(dev->heaps_debug_root, buf, 256);
+ pr_err("Failed to create heap debugfs at %s/%s\n",
+ path, heap->name);
+ }
+
#ifdef DEBUG_HEAP_SHRINKER
if (heap->shrinker.shrink) {
char debug_name[64];
snprintf(debug_name, 64, "%s_shrink", heap->name);
- debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
- &debug_shrink_fops);
+ debug_file = debugfs_create_file(
+ debug_name, 0644, dev->heaps_debug_root, heap,
+ &debug_shrink_fops);
+ if (!debug_file) {
+ char buf[256], *path;
+ path = dentry_path(dev->heaps_debug_root, buf, 256);
+ pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
+ path, debug_name);
+ }
}
#endif
up_write(&dev->lock);
@@ -1494,8 +1563,21 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
}
idev->debug_root = debugfs_create_dir("ion", NULL);
- if (!idev->debug_root)
- pr_err("ion: failed to create debug files.\n");
+ if (!idev->debug_root) {
+ pr_err("ion: failed to create debugfs root directory.\n");
+ goto debugfs_done;
+ }
+ idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
+ if (!idev->heaps_debug_root) {
+ pr_err("ion: failed to create debugfs heaps directory.\n");
+ goto debugfs_done;
+ }
+ idev->clients_debug_root = debugfs_create_dir("clients",
+ idev->debug_root);
+ if (!idev->clients_debug_root)
+ pr_err("ion: failed to create debugfs clients directory.\n");
+
+debugfs_done:
idev->custom_ioctl = custom_ioctl;
idev->buffers = RB_ROOT;
@@ -1509,6 +1591,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
void ion_device_destroy(struct ion_device *dev)
{
misc_deregister(&dev->dev);
+ debugfs_remove_recursive(dev->debug_root);
/* XXX need to free the heaps and clients ? */
kfree(dev);
}
@@ -1527,8 +1610,7 @@ void __init ion_reserve(struct ion_platform_data *data)
data->heaps[i].align,
MEMBLOCK_ALLOC_ANYWHERE);
if (!paddr) {
- pr_err("%s: error allocating memblock for "
- "heap %d\n",
+ pr_err("%s: error allocating memblock for heap %d\n",
__func__, i);
continue;
}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index f0f98897e4b9..ce68ecfed31f 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -135,7 +135,7 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
struct device *dev = cma_heap->dev;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+ dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
&info->handle);
*addr = info->handle;
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 55b2002753f2..3a45e79fe444 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -17,19 +17,21 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/sizes.h>
+#include <linux/io.h>
#include "ion.h"
#include "ion_priv.h"
-struct ion_device *idev;
-struct ion_heap **heaps;
+static struct ion_device *idev;
+static struct ion_heap **heaps;
-void *carveout_ptr;
-void *chunk_ptr;
+static void *carveout_ptr;
+static void *chunk_ptr;
-struct ion_platform_heap dummy_heaps[] = {
+static struct ion_platform_heap dummy_heaps[] = {
{
.id = ION_HEAP_TYPE_SYSTEM,
.type = ION_HEAP_TYPE_SYSTEM,
@@ -56,8 +58,8 @@ struct ion_platform_heap dummy_heaps[] = {
},
};
-struct ion_platform_data dummy_ion_pdata = {
- .nr = 4,
+static struct ion_platform_data dummy_ion_pdata = {
+ .nr = ARRAY_SIZE(dummy_heaps),
.heaps = dummy_heaps,
};
@@ -69,7 +71,7 @@ static int __init ion_dummy_init(void)
heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
GFP_KERNEL);
if (!heaps)
- return PTR_ERR(heaps);
+ return -ENOMEM;
/* Allocate a dummy carveout heap */
@@ -128,6 +130,7 @@ err:
}
return err;
}
+device_initcall(ion_dummy_init);
static void __exit ion_dummy_exit(void)
{
@@ -152,7 +155,4 @@ static void __exit ion_dummy_exit(void)
return;
}
-
-module_init(ion_dummy_init);
-module_exit(ion_dummy_exit);
-
+__exitcall(ion_dummy_exit);
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 296c74f98dc0..bdc6a28ba8c9 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -178,7 +178,8 @@ size_t ion_heap_freelist_size(struct ion_heap *heap)
return size;
}
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
+ bool skip_pools)
{
struct ion_buffer *buffer;
size_t total_drained = 0;
@@ -197,6 +198,8 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
list);
list_del(&buffer->list);
heap->free_list_size -= buffer->size;
+ if (skip_pools)
+ buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
total_drained += buffer->size;
spin_unlock(&heap->free_lock);
ion_buffer_destroy(buffer);
@@ -207,6 +210,16 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
return total_drained;
}
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, false);
+}
+
+size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, true);
+}
+
static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;
@@ -243,15 +256,65 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name);
- sched_setscheduler(heap->task, SCHED_IDLE, &param);
if (IS_ERR(heap->task)) {
pr_err("%s: creating thread for deferred free failed\n",
__func__);
- return PTR_RET(heap->task);
+ return PTR_ERR_OR_ZERO(heap->task);
}
+ sched_setscheduler(heap->task, SCHED_IDLE, &param);
return 0;
}
+static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+ shrinker);
+ int total = 0;
+
+ total = ion_heap_freelist_size(heap) / PAGE_SIZE;
+ if (heap->ops->shrink)
+ total += heap->ops->shrink(heap, sc->gfp_mask, 0);
+ return total;
+}
+
+static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+ shrinker);
+ int freed = 0;
+ int to_scan = sc->nr_to_scan;
+
+ if (to_scan == 0)
+ return 0;
+
+ /*
+ * shrink the free list first, no point in zeroing the memory if we're
+ * just going to reclaim it. Also, skip any possible page pooling.
+ */
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
+ PAGE_SIZE;
+
+ to_scan -= freed;
+ if (to_scan <= 0)
+ return freed;
+
+ if (heap->ops->shrink)
+ freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
+ return freed;
+}
+
+void ion_heap_init_shrinker(struct ion_heap *heap)
+{
+ heap->shrinker.count_objects = ion_heap_shrink_count;
+ heap->shrinker.scan_objects = ion_heap_shrink_scan;
+ heap->shrinker.seeks = DEFAULT_SEEKS;
+ heap->shrinker.batch = 0;
+ register_shrinker(&heap->shrinker);
+}
+
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_heap *heap = NULL;
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index fa693c23681a..ecb5fc34ec5c 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -130,8 +130,7 @@ static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int nr_to_scan)
{
- int nr_freed = 0;
- int i;
+ int freed;
bool high;
high = !!(gfp_mask & __GFP_HIGHMEM);
@@ -139,7 +138,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
if (nr_to_scan == 0)
return ion_page_pool_total(pool, high);
- for (i = 0; i < nr_to_scan; i++) {
+ for (freed = 0; freed < nr_to_scan; freed++) {
struct page *page;
mutex_lock(&pool->mutex);
@@ -153,10 +152,9 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
}
mutex_unlock(&pool->mutex);
ion_page_pool_free_pages(pool, page);
- nr_freed += (1 << pool->order);
}
- return nr_freed;
+ return freed;
}
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index d98673981cc4..1eba3f2076a9 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -17,6 +17,7 @@
#ifndef _ION_PRIV_H
#define _ION_PRIV_H
+#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
@@ -37,6 +38,7 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
* @dev: back pointer to the ion_device
* @heap: back pointer to the heap the buffer came from
* @flags: buffer specific flags
+ * @private_flags: internal buffer specific flags
* @size: size of the buffer
* @priv_virt: private data to the buffer representable as
* a void *
@@ -65,6 +67,7 @@ struct ion_buffer {
struct ion_device *dev;
struct ion_heap *heap;
unsigned long flags;
+ unsigned long private_flags;
size_t size;
union {
void *priv_virt;
@@ -97,22 +100,27 @@ void ion_buffer_destroy(struct ion_buffer *buffer);
* @map_user map memory to userspace
*
* allocate, phys, and map_user return 0 on success, -errno on error.
- * map_dma and map_kernel return pointer on success, ERR_PTR on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
*/
struct ion_heap_ops {
- int (*allocate) (struct ion_heap *heap,
- struct ion_buffer *buffer, unsigned long len,
- unsigned long align, unsigned long flags);
- void (*free) (struct ion_buffer *buffer);
- int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len);
- struct sg_table *(*map_dma) (struct ion_heap *heap,
- struct ion_buffer *buffer);
- void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
- void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
- void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
- int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma);
+ int (*allocate)(struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long align, unsigned long flags);
+ void (*free)(struct ion_buffer *buffer);
+ int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len);
+ struct sg_table * (*map_dma)(struct ion_heap *heap,
+ struct ion_buffer *buffer);
+ void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
+ void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+ void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+ int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+ int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
};
/**
@@ -121,6 +129,17 @@ struct ion_heap_ops {
#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
/**
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+
+/**
* struct ion_heap - represents a heap in the system
* @node: rb node to put the heap on the device's tree of heaps
* @dev: back pointer to the ion_device
@@ -131,10 +150,7 @@ struct ion_heap_ops {
* allocating. These are specified by platform data and
* MUST be unique
* @name: used for debugging
- * @shrinker: a shrinker for the heap, if the heap caches system
- * memory, it must define a shrinker to return it on low
- * memory conditions, this includes system memory cached
- * in the deferred free lists for heaps that support it
+ * @shrinker: a shrinker for the heap
* @free_list: free list head if deferred free is used
* @free_list_size size of the deferred free list in bytes
* @lock: protects the free list
@@ -218,6 +234,16 @@ int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
/**
+ * ion_heap_init_shrinker
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
+/**
* ion_heap_init_deferred_free -- initialize deferred free functionality
* @heap: the heap
*
@@ -249,6 +275,29 @@ void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
/**
+ * ion_heap_freelist_shrink - drain the deferred free
+ * list, skipping any heap-specific
+ * pooling or caching mechanisms
+ *
+ * @heap: the heap
+ * @size: amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
+ */
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+ size_t size);
+
+/**
* ion_heap_freelist_size - returns the size of the freelist in bytes
* @heap: the heap
*/
@@ -304,13 +353,8 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
* @low_count: number of lowmem items in the pool
* @high_items: list of highmem items
* @low_items: list of lowmem items
- * @shrinker: a shrinker for the items
* @mutex: lock protecting this struct and especially the count
* item list
- * @alloc: function to be used to allocate pageory when the pool
- * is empty
- * @free: function to be used to free pageory back to the system
- * when the shrinker fires
* @gfp_mask: gfp_mask to use from alloc
* @order: order of pages in the pool
* @list: plist node for list of pools
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7f0729130d65..c92363356ae1 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -90,7 +90,7 @@ static void free_buffer_page(struct ion_system_heap *heap,
{
bool cached = ion_buffer_cached(buffer);
- if (!cached) {
+ if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
struct ion_page_pool *pool = heap->pools[order_to_index(order)];
ion_page_pool_free(pool, page);
} else {
@@ -124,6 +124,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
info->page = page;
info->order = orders[i];
+ INIT_LIST_HEAD(&info->list);
return info;
}
kfree(info);
@@ -145,12 +146,15 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
struct list_head pages;
struct page_info *info, *tmp_info;
int i = 0;
- long size_remaining = PAGE_ALIGN(size);
+ unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
if (align > PAGE_SIZE)
return -EINVAL;
+ if (size / PAGE_SIZE > totalram_pages / 2)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
info = alloc_largest_available(sys_heap, buffer, size_remaining,
@@ -205,7 +209,7 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
/* uncached pages come from the page pools, zero them before returning
for security purposes (other allocations are zerod at alloc time */
- if (!cached)
+ if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
@@ -227,75 +231,34 @@ static void ion_system_heap_unmap_dma(struct ion_heap *heap,
return;
}
-static struct ion_heap_ops system_heap_ops = {
- .allocate = ion_system_heap_allocate,
- .free = ion_system_heap_free,
- .map_dma = ion_system_heap_map_dma,
- .unmap_dma = ion_system_heap_unmap_dma,
- .map_kernel = ion_heap_map_kernel,
- .unmap_kernel = ion_heap_unmap_kernel,
- .map_user = ion_heap_map_user,
-};
-
-static unsigned long ion_system_heap_shrink_count(struct shrinker *shrinker,
- struct shrink_control *sc)
+static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+ int nr_to_scan)
{
- struct ion_heap *heap = container_of(shrinker, struct ion_heap,
- shrinker);
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
+ struct ion_system_heap *sys_heap;
int nr_total = 0;
int i;
- /* total number of items is whatever the page pools are holding
- plus whatever's in the freelist */
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->pools[i];
- nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
- }
- nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
- return nr_total;
-
-}
-
-static unsigned long ion_system_heap_shrink_scan(struct shrinker *shrinker,
- struct shrink_control *sc)
-{
-
- struct ion_heap *heap = container_of(shrinker, struct ion_heap,
- shrinker);
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- int nr_freed = 0;
- int i;
-
- if (sc->nr_to_scan == 0)
- goto end;
-
- /* shrink the free list first, no point in zeroing the memory if
- we're just going to reclaim it */
- nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
- PAGE_SIZE;
-
- if (nr_freed >= sc->nr_to_scan)
- goto end;
+ sys_heap = container_of(heap, struct ion_system_heap, heap);
for (i = 0; i < num_orders; i++) {
struct ion_page_pool *pool = sys_heap->pools[i];
-
- nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
- sc->nr_to_scan);
- if (nr_freed >= sc->nr_to_scan)
- break;
+ nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
}
-end:
- return nr_freed;
-
+ return nr_total;
}
+static struct ion_heap_ops system_heap_ops = {
+ .allocate = ion_system_heap_allocate,
+ .free = ion_system_heap_free,
+ .map_dma = ion_system_heap_map_dma,
+ .unmap_dma = ion_system_heap_unmap_dma,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+ .shrink = ion_system_heap_shrink,
+};
+
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
void *unused)
{
@@ -343,11 +306,6 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
heap->pools[i] = pool;
}
- heap->heap.shrinker.scan_objects = ion_system_heap_shrink_scan;
- heap->heap.shrinker.count_objects = ion_system_heap_shrink_count;
- heap->heap.shrinker.seeks = DEFAULT_SEEKS;
- heap->heap.shrinker.batch = 0;
- register_shrinker(&heap->heap.shrinker);
heap->heap.debug_show = ion_system_heap_debug_show;
return &heap->heap;
err_create_pool:
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
index 3474c65f87fa..11c7cceb3c7d 100644
--- a/drivers/staging/android/ion/tegra/tegra_ion.c
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -32,13 +32,13 @@ static int tegra_ion_probe(struct platform_device *pdev)
num_heaps = pdata->nr;
- heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+ heaps = devm_kzalloc(&pdev->dev,
+ sizeof(struct ion_heap *) * pdata->nr,
+ GFP_KERNEL);
idev = ion_device_create(NULL);
- if (IS_ERR_OR_NULL(idev)) {
- kfree(heaps);
+ if (IS_ERR_OR_NULL(idev))
return PTR_ERR(idev);
- }
/* create the heaps as specified in the board file */
for (i = 0; i < num_heaps; i++) {
@@ -58,7 +58,6 @@ err:
if (heaps[i])
ion_heap_destroy(heaps[i]);
}
- kfree(heaps);
return err;
}
@@ -70,7 +69,6 @@ static int tegra_ion_remove(struct platform_device *pdev)
ion_device_destroy(idev);
for (i = 0; i < num_heaps; i++)
ion_heap_destroy(heaps[i]);
- kfree(heaps);
return 0;
}
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 6f094b37f1f1..b545d3d1da3e 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -88,7 +88,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
int other_file = global_page_state(NR_FILE_PAGES) -
- global_page_state(NR_SHMEM);
+ global_page_state(NR_SHMEM) -
+ total_swapcache_pages();
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
@@ -159,8 +160,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
selected->pid, selected->comm,
selected_oom_score_adj, selected_tasksize);
lowmem_deathpending_timeout = jiffies + HZ;
- send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
+ send_sig(SIGKILL, selected, 0);
rem += selected_tasksize;
}
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 585040be5f18..1a50669ec8a9 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -18,10 +18,9 @@
#define _LINUX_SW_SYNC_H
#include <linux/types.h>
-
-#ifdef __KERNEL__
-
+#include <linux/kconfig.h>
#include "sync.h"
+#include "uapi/sw_sync.h"
struct sw_sync_timeline {
struct sync_timeline obj;
@@ -35,24 +34,26 @@ struct sw_sync_pt {
u32 value;
};
+#if IS_ENABLED(CONFIG_SW_SYNC)
struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
-
-#endif /* __KERNEL __ */
-
-struct sw_sync_create_fence_data {
- __u32 value;
- char name[32];
- __s32 fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC 'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
- struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+ return NULL;
+}
+
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
+
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+ u32 value)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
#endif /* _LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 38e5d3b5ed9b..3d05f662110b 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref)
container_of(kref, struct sync_timeline, kref);
unsigned long flags;
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_del(&obj->sync_timeline_list);
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
kfree(obj);
}
void sync_timeline_destroy(struct sync_timeline *obj)
{
obj->destroyed = true;
+ smp_wmb();
/*
- * If this is not the last reference, signal any children
- * that their parent is going away.
+ * signal any children that their parent is going away.
*/
+ sync_timeline_signal(obj);
- if (!kref_put(&obj->kref, sync_timeline_free))
- sync_timeline_signal(obj);
+ kref_put(&obj->kref, sync_timeline_free);
}
EXPORT_SYMBOL(sync_timeline_destroy);
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index 62e2255b1c1e..eaf57cccf626 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -14,14 +14,14 @@
#define _LINUX_SYNC_H
#include <linux/types.h>
-#ifdef __KERNEL__
-
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include "uapi/sync.h"
+
struct sync_timeline;
struct sync_pt;
struct sync_fence;
@@ -53,7 +53,7 @@ struct sync_timeline_ops {
const char *driver_name;
/* required */
- struct sync_pt *(*dup)(struct sync_pt *pt);
+ struct sync_pt * (*dup)(struct sync_pt *pt);
/* required */
int (*has_signaled)(struct sync_pt *pt);
@@ -341,86 +341,4 @@ int sync_fence_cancel_async(struct sync_fence *fence,
*/
int sync_fence_wait(struct sync_fence *fence, long timeout);
-#endif /* __KERNEL__ */
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2: file descriptor of second fence
- * @name: name of new fence
- * @fence: returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
- __s32 fd2; /* fd of second fence */
- char name[32]; /* name of new fence */
- __s32 fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_pt_info - detailed sync_pt information
- * @len: length of sync_pt_info including any driver_data
- * @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implementing the parent
- * @status: status of the sync_pt 0:active 1:signaled <0:error
- * @timestamp_ns: timestamp of status change in nanoseconds
- * @driver_data: any driver dependent data
- */
-struct sync_pt_info {
- __u32 len;
- char obj_name[32];
- char driver_name[32];
- __s32 status;
- __u64 timestamp_ns;
-
- __u8 driver_data[0];
-};
-
-/**
- * struct sync_fence_info_data - data returned from fence info ioctl
- * @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_fence_data returned to userspace
- * including pt_info.
- * @name: name of fence
- * @status: status of fence. 1: signaled 0:active <0:error
- * @pt_info: a sync_pt_info struct for every sync_pt in the fence
- */
-struct sync_fence_info_data {
- __u32 len;
- char name[32];
- __s32 status;
-
- __u8 pt_info[0];
-};
-
-#define SYNC_IOC_MAGIC '>'
-
-/**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds. Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data. Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len. On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
- struct sync_fence_info_data)
-
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index e81451425c01..0c7fdc83b336 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -90,8 +90,9 @@ static int timed_gpio_probe(struct platform_device *pdev)
if (!pdata)
return -EBUSY;
- gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios,
- GFP_KERNEL);
+ gpio_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct timed_gpio_data) * pdata->num_gpios,
+ GFP_KERNEL);
if (!gpio_data)
return -ENOMEM;
@@ -131,7 +132,6 @@ err_out:
timed_output_dev_unregister(&gpio_data[i].dev);
gpio_free(gpio_data[i].gpio);
}
- kfree(gpio_data);
return ret;
}
@@ -147,8 +147,6 @@ static int timed_gpio_remove(struct platform_device *pdev)
gpio_free(gpio_data[i].gpio);
}
- kfree(gpio_data);
-
return 0;
}
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
index 905c7cc9588e..13d2ca51cbe8 100644
--- a/drivers/staging/android/timed_output.h
+++ b/drivers/staging/android/timed_output.h
@@ -20,10 +20,10 @@ struct timed_output_dev {
const char *name;
/* enable the output and set the timer */
- void (*enable)(struct timed_output_dev *sdev, int timeout);
+ void (*enable)(struct timed_output_dev *sdev, int timeout);
/* returns the current number of milliseconds remaining on the timer */
- int (*get_time)(struct timed_output_dev *sdev);
+ int (*get_time)(struct timed_output_dev *sdev);
/* private data */
struct device *dev;
diff --git a/drivers/staging/android/uapi/android_alarm.h b/drivers/staging/android/uapi/android_alarm.h
new file mode 100644
index 000000000000..aa013f6f5f3a
--- /dev/null
+++ b/drivers/staging/android/uapi/android_alarm.h
@@ -0,0 +1,62 @@
+/* drivers/staging/android/uapi/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ANDROID_ALARM_H
+#define _UAPI_LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+ /* return code bit numbers or set alarm arg */
+ ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME,
+
+ ANDROID_ALARM_TYPE_COUNT,
+
+ /* return code bit numbers */
+ /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+enum android_alarm_return_flags {
+ ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+ ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT _IO('a', 1)
+
+#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
new file mode 100644
index 000000000000..ba4743c71d6b
--- /dev/null
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -0,0 +1,47 @@
+/*
+ * drivers/staging/android/uapi/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed. It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _UAPI_LINUX_ASHMEM_H
+#define _UAPI_LINUX_ASHMEM_H
+
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN 256
+
+#define ASHMEM_NAME_DEF "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED 0
+#define ASHMEM_WAS_PURGED 1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED 0
+#define ASHMEM_IS_PINNED 1
+
+struct ashmem_pin {
+ __u32 offset; /* offset into region, in bytes, page-aligned */
+ __u32 len; /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC 0x77
+
+#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+
+#endif /* _UAPI_LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/uapi/binder.h b/drivers/staging/android/uapi/binder.h
new file mode 100644
index 000000000000..904adb7600cf
--- /dev/null
+++ b/drivers/staging/android/uapi/binder.h
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_H
+#define _UAPI_LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+#ifdef BINDER_IPC_32BIT
+typedef __u32 binder_size_t;
+typedef __u32 binder_uintptr_t;
+#else
+typedef __u64 binder_size_t;
+typedef __u64 binder_uintptr_t;
+#endif
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes. The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur. The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+ /* 8 bytes for large_flat_header. */
+ __u32 type;
+ __u32 flags;
+
+ /* 8 bytes of data. */
+ union {
+ binder_uintptr_t binder; /* local object */
+ __u32 handle; /* remote object */
+ };
+
+ /* extra data associated with local object */
+ binder_uintptr_t cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses appropriately.
+ */
+
+struct binder_write_read {
+ binder_size_t write_size; /* bytes to write */
+ binder_size_t write_consumed; /* bytes consumed by driver */
+ binder_uintptr_t write_buffer;
+ binder_size_t read_size; /* bytes to read */
+ binder_size_t read_consumed; /* bytes consumed by driver */
+ binder_uintptr_t read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+ /* driver protocol version -- increment with incompatible change */
+ __s32 protocol_version;
+};
+
+/* This is the current protocol version. */
+#ifdef BINDER_IPC_32BIT
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+#else
+#define BINDER_CURRENT_PROTOCOL_VERSION 8
+#endif
+
+#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
+#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
+#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
+#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
+#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
+#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted. This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process. That is, the process is being destroyed.
+ * You should handle this by exiting from your process. Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
+ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
+ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
+ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+ /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+ * identifying the target and contents of the transaction.
+ */
+ union {
+ /* target descriptor of command transaction */
+ __u32 handle;
+ /* target descriptor of return transaction */
+ binder_uintptr_t ptr;
+ } target;
+ binder_uintptr_t cookie; /* target object cookie */
+ __u32 code; /* transaction command */
+
+ /* General information about the transaction. */
+ __u32 flags;
+ pid_t sender_pid;
+ uid_t sender_euid;
+ binder_size_t data_size; /* number of bytes of data */
+ binder_size_t offsets_size; /* number of bytes of offsets */
+
+ /* If this transaction is inline, the data immediately
+ * follows here; otherwise, it ends with a pointer to
+ * the data buffer.
+ */
+ union {
+ struct {
+ /* transaction data */
+ binder_uintptr_t buffer;
+ /* offsets from buffer to flat_binder_object structs */
+ binder_uintptr_t offsets;
+ } ptr;
+ __u8 buf[8];
+ } data;
+};
+
+struct binder_ptr_cookie {
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+};
+
+struct binder_handle_cookie {
+ __u32 handle;
+ binder_uintptr_t cookie;
+} __attribute__((packed));
+
+struct binder_pri_desc {
+ __s32 priority;
+ __u32 desc;
+};
+
+struct binder_pri_ptr_cookie {
+ __s32 priority;
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+};
+
+enum binder_driver_return_protocol {
+ BR_ERROR = _IOR('r', 0, __s32),
+ /*
+ * int: error code
+ */
+
+ BR_OK = _IO('r', 1),
+ /* No parameters! */
+
+ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+ BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the received command.
+ */
+
+ BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
+ /*
+ * not currently supported
+ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+ * Else the remote object has acquired a primary reference.
+ */
+
+ BR_DEAD_REPLY = _IO('r', 5),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
+ */
+
+ BR_TRANSACTION_COMPLETE = _IO('r', 6),
+ /*
+ * No parameters... always refers to the last transaction requested
+ * (including replies). Note that this will be sent even for
+ * asynchronous transactions.
+ */
+
+ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+ /*
+ * not currently supported
+ * int: priority
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_NOOP = _IO('r', 12),
+ /*
+ * No parameters. Do nothing and examine the next command. It exists
+ * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+ */
+
+ BR_SPAWN_LOOPER = _IO('r', 13),
+ /*
+ * No parameters. The driver has determined that a process has no
+ * threads waiting to service incoming transactions. When a process
+ * receives this command, it must spawn a new service thread and
+ * register it via bcENTER_LOOPER.
+ */
+
+ BR_FINISHED = _IO('r', 14),
+ /*
+ * not currently supported
+ * stop threadpool thread
+ */
+
+ BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+
+ BR_FAILED_REPLY = _IO('r', 17),
+ /*
+ * The the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
+ */
+};
+
+enum binder_driver_command_protocol {
+ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+ BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the sent command.
+ */
+
+ BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
+ /*
+ * not currently supported
+ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+ * Else you have acquired a primary reference on the object.
+ */
+
+ BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
+ /*
+ * void *: ptr to transaction data received on a read
+ */
+
+ BC_INCREFS = _IOW('c', 4, __u32),
+ BC_ACQUIRE = _IOW('c', 5, __u32),
+ BC_RELEASE = _IOW('c', 6, __u32),
+ BC_DECREFS = _IOW('c', 7, __u32),
+ /*
+ * int: descriptor
+ */
+
+ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+ /*
+ * not currently supported
+ * int: priority
+ * int: descriptor
+ */
+
+ BC_REGISTER_LOOPER = _IO('c', 11),
+ /*
+ * No parameters.
+ * Register a spawned looper thread with the device.
+ */
+
+ BC_ENTER_LOOPER = _IO('c', 12),
+ BC_EXIT_LOOPER = _IO('c', 13),
+ /*
+ * No parameters.
+ * These two commands are sent as an application-level thread
+ * enters and exits the binder loop, respectively. They are
+ * used so the binder can have an accurate count of the number
+ * of looping threads it has available.
+ */
+
+ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,
+ struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,
+ struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+};
+
+#endif /* _UAPI_LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
new file mode 100644
index 000000000000..9b5d4869505c
--- /dev/null
+++ b/drivers/staging/android/uapi/sw_sync.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SW_SYNC_H
+#define _UAPI_LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
new file mode 100644
index 000000000000..e964c751f6b8
--- /dev/null
+++ b/drivers/staging/android/uapi/sync.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SYNC_H
+#define _UAPI_LINUX_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2: file descriptor of second fence
+ * @name: name of new fence
+ * @fence: returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+ __s32 fd2; /* fd of second fence */
+ char name[32]; /* name of new fence */
+ __s32 fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len: length of sync_pt_info including any driver_data
+ * @obj_name: name of parent sync_timeline
+ * @driver_name: name of driver implementing the parent
+ * @status: status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns: timestamp of status change in nanoseconds
+ * @driver_data: any driver dependent data
+ */
+struct sync_pt_info {
+ __u32 len;
+ char obj_name[32];
+ char driver_name[32];
+ __s32 status;
+ __u64 timestamp_ns;
+
+ __u8 driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len: ioctl caller writes the size of the buffer its passing in.
+ * ioctl returns length of sync_fence_data returned to userspace
+ * including pt_info.
+ * @name: name of fence
+ * @status: status of fence. 1: signaled 0:active <0:error
+ * @pt_info: a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+ __u32 len;
+ char name[32];
+ __s32 status;
+
+ __u8 pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC '>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds. Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data. Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len. On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
+ struct sync_fence_info_data)
+
+#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h
index f0d6f0c38207..1b2d9f3bd55a 100644
--- a/drivers/staging/bcm/Adapter.h
+++ b/drivers/staging/bcm/Adapter.h
@@ -37,8 +37,10 @@ struct bcm_link_request {
union u_ip_address {
struct {
- ULONG ulIpv4Addr[MAX_IP_RANGE_LENGTH]; /* Source Ip Address Range */
- ULONG ulIpv4Mask[MAX_IP_RANGE_LENGTH]; /* Source Ip Mask Address Range */
+ /* Source Ip Address Range */
+ ULONG ulIpv4Addr[MAX_IP_RANGE_LENGTH];
+ /* Source Ip Mask Address Range */
+ ULONG ulIpv4Mask[MAX_IP_RANGE_LENGTH];
};
struct {
ULONG ulIpv6Addr[MAX_IP_RANGE_LENGTH * 4]; /* Source Ip Address Range */
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index f1b6de0293c8..ae7490b4d70e 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -150,1903 +150,2392 @@ static ssize_t bcm_char_read(struct file *filp, char __user *buf, size_t size,
return PktLen;
}
-static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
+static int bcm_char_ioctl_reg_read_private(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
{
- struct bcm_tarang_data *pTarang = filp->private_data;
- void __user *argp = (void __user *)arg;
- struct bcm_mini_adapter *Adapter = pTarang->Adapter;
- INT Status = STATUS_FAILURE;
- int timeout = 0;
+ struct bcm_rdm_buffer sRdmBuffer = {0};
struct bcm_ioctl_buffer IoBuffer;
+ PCHAR temp_buff;
+ INT Status = STATUS_FAILURE;
+ UINT Bufflen;
+ u16 temp_value;
int bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX",
- cmd, arg);
-
- if (_IOC_TYPE(cmd) != BCM_IOCTL)
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
- if (_IOC_DIR(cmd) & _IOC_READ)
- Status = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
- else if (_IOC_DIR(cmd) & _IOC_WRITE)
- Status = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
- else if (_IOC_NONE == (_IOC_DIR(cmd) & _IOC_NONE))
- Status = STATUS_SUCCESS;
- if (Status)
- return -EFAULT;
+ if (IoBuffer.InputLength > sizeof(sRdmBuffer))
+ return -EINVAL;
- if (Adapter->device_removed)
+ if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
return -EFAULT;
- if (false == Adapter->fw_download_done) {
- switch (cmd) {
- case IOCTL_MAC_ADDR_REQ:
- case IOCTL_LINK_REQ:
- case IOCTL_CM_REQUEST:
- case IOCTL_SS_INFO_REQ:
- case IOCTL_SEND_CONTROL_MESSAGE:
- case IOCTL_IDLE_REQ:
- case IOCTL_BCM_GPIO_SET_REQUEST:
- case IOCTL_BCM_GPIO_STATUS_REQUEST:
- return -EACCES;
- default:
- break;
- }
+ if (IoBuffer.OutputLength > USHRT_MAX ||
+ IoBuffer.OutputLength == 0) {
+ return -EINVAL;
}
- Status = vendorextnIoctl(Adapter, cmd, arg);
- if (Status != CONTINUE_COMMON_PATH)
- return Status;
+ Bufflen = IoBuffer.OutputLength;
+ temp_value = 4 - (Bufflen % 4);
+ Bufflen += temp_value % 4;
- switch (cmd) {
- /* Rdms for Swin Idle... */
- case IOCTL_BCM_REGISTER_READ_PRIVATE: {
- struct bcm_rdm_buffer sRdmBuffer = {0};
- PCHAR temp_buff;
- UINT Bufflen;
- u16 temp_value;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (IoBuffer.InputLength > sizeof(sRdmBuffer))
- return -EINVAL;
+ temp_buff = kmalloc(Bufflen, GFP_KERNEL);
+ if (!temp_buff)
+ return -ENOMEM;
- if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ bytes = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
+ (PUINT)temp_buff, Bufflen);
+ if (bytes > 0) {
+ Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
+ kfree(temp_buff);
return -EFAULT;
-
- if (IoBuffer.OutputLength > USHRT_MAX ||
- IoBuffer.OutputLength == 0) {
- return -EINVAL;
}
+ } else {
+ Status = bytes;
+ }
- Bufflen = IoBuffer.OutputLength;
- temp_value = 4 - (Bufflen % 4);
- Bufflen += temp_value % 4;
+ kfree(temp_buff);
+ return Status;
+}
- temp_buff = kmalloc(Bufflen, GFP_KERNEL);
- if (!temp_buff)
- return -ENOMEM;
+static int bcm_char_ioctl_reg_write_private(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_wrm_buffer sWrmBuffer = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ UINT uiTempVar = 0;
+ INT Status;
- bytes = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
- (PUINT)temp_buff, Bufflen);
- if (bytes > 0) {
- Status = STATUS_SUCCESS;
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
- kfree(temp_buff);
- return -EFAULT;
- }
- } else {
- Status = bytes;
- }
+ /* Copy Ioctl Buffer structure */
- kfree(temp_buff);
- break;
- }
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- case IOCTL_BCM_REGISTER_WRITE_PRIVATE: {
- struct bcm_wrm_buffer sWrmBuffer = {0};
- UINT uiTempVar = 0;
- /* Copy Ioctl Buffer structure */
+ if (IoBuffer.InputLength > sizeof(sWrmBuffer))
+ return -EINVAL;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ /* Get WrmBuffer structure */
+ if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
- if (IoBuffer.InputLength > sizeof(sWrmBuffer))
- return -EINVAL;
+ uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
+ if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
+ ((uiTempVar == EEPROM_REJECT_REG_1) ||
+ (uiTempVar == EEPROM_REJECT_REG_2) ||
+ (uiTempVar == EEPROM_REJECT_REG_3) ||
+ (uiTempVar == EEPROM_REJECT_REG_4))) {
- /* Get WrmBuffer structure */
- if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "EEPROM Access Denied, not in VSG Mode\n");
+ return -EFAULT;
+ }
- uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
- if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
- ((uiTempVar == EEPROM_REJECT_REG_1) ||
- (uiTempVar == EEPROM_REJECT_REG_2) ||
- (uiTempVar == EEPROM_REJECT_REG_3) ||
- (uiTempVar == EEPROM_REJECT_REG_4))) {
+ Status = wrmalt(Adapter, (UINT)sWrmBuffer.Register,
+ (PUINT)sWrmBuffer.Data, sizeof(ULONG));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "EEPROM Access Denied, not in VSG Mode\n");
- return -EFAULT;
- }
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Done\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Failed\n");
+ Status = -EFAULT;
+ }
+ return Status;
+}
- Status = wrmalt(Adapter, (UINT)sWrmBuffer.Register,
- (PUINT)sWrmBuffer.Data, sizeof(ULONG));
+static int bcm_char_ioctl_eeprom_reg_read(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_rdm_buffer sRdmBuffer = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ PCHAR temp_buff = NULL;
+ UINT uiTempVar = 0;
+ INT Status;
+ int bytes;
- if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Done\n");
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Failed\n");
- Status = -EFAULT;
- }
- break;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device in Idle Mode, Blocking Rdms\n");
+ return -EACCES;
}
- case IOCTL_BCM_REGISTER_READ:
- case IOCTL_BCM_EEPROM_REGISTER_READ: {
- struct bcm_rdm_buffer sRdmBuffer = {0};
- PCHAR temp_buff = NULL;
- UINT uiTempVar = 0;
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Device in Idle Mode, Blocking Rdms\n");
- return -EACCES;
- }
+ if (IoBuffer.InputLength > sizeof(sRdmBuffer))
+ return -EINVAL;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
- if (IoBuffer.InputLength > sizeof(sRdmBuffer))
- return -EINVAL;
+ if (IoBuffer.OutputLength > USHRT_MAX ||
+ IoBuffer.OutputLength == 0) {
+ return -EINVAL;
+ }
- if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
+ if (!temp_buff)
+ return STATUS_FAILURE;
- if (IoBuffer.OutputLength > USHRT_MAX ||
- IoBuffer.OutputLength == 0) {
- return -EINVAL;
- }
+ if ((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
+ ((ULONG)sRdmBuffer.Register & 0x3)) {
- temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
- if (!temp_buff)
- return STATUS_FAILURE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM Done On invalid Address : %x Access Denied.\n",
+ (int)sRdmBuffer.Register);
- if ((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
- ((ULONG)sRdmBuffer.Register & 0x3)) {
+ kfree(temp_buff);
+ return -EINVAL;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "RDM Done On invalid Address : %x Access Denied.\n",
- (int)sRdmBuffer.Register);
+ uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
+ bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register,
+ (PUINT)temp_buff, IoBuffer.OutputLength);
+ if (bytes > 0) {
+ Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
kfree(temp_buff);
- return -EINVAL;
+ return -EFAULT;
}
+ } else {
+ Status = bytes;
+ }
- uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
- bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register,
- (PUINT)temp_buff, IoBuffer.OutputLength);
+ kfree(temp_buff);
+ return Status;
+}
- if (bytes > 0) {
- Status = STATUS_SUCCESS;
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
- kfree(temp_buff);
- return -EFAULT;
- }
- } else {
- Status = bytes;
- }
+static int bcm_char_ioctl_eeprom_reg_write(void __user *argp,
+ struct bcm_mini_adapter *Adapter, UINT cmd)
+{
+ struct bcm_wrm_buffer sWrmBuffer = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ UINT uiTempVar = 0;
+ INT Status;
- kfree(temp_buff);
- break;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device in Idle Mode, Blocking Wrms\n");
+ return -EACCES;
}
- case IOCTL_BCM_REGISTER_WRITE:
- case IOCTL_BCM_EEPROM_REGISTER_WRITE: {
- struct bcm_wrm_buffer sWrmBuffer = {0};
- UINT uiTempVar = 0;
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Device in Idle Mode, Blocking Wrms\n");
- return -EACCES;
- }
+ if (IoBuffer.InputLength > sizeof(sWrmBuffer))
+ return -EINVAL;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ /* Get WrmBuffer structure */
+ if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
- if (IoBuffer.InputLength > sizeof(sWrmBuffer))
- return -EINVAL;
+ if ((((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
+ ((ULONG)sWrmBuffer.Register & 0x3)) {
- /* Get WrmBuffer structure */
- if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM Done On invalid Address : %x Access Denied.\n",
+ (int)sWrmBuffer.Register);
+ return -EINVAL;
+ }
- if ((((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
- ((ULONG)sWrmBuffer.Register & 0x3)) {
+ uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
+ if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
+ ((uiTempVar == EEPROM_REJECT_REG_1) ||
+ (uiTempVar == EEPROM_REJECT_REG_2) ||
+ (uiTempVar == EEPROM_REJECT_REG_3) ||
+ (uiTempVar == EEPROM_REJECT_REG_4)) &&
+ (cmd == IOCTL_BCM_REGISTER_WRITE)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "WRM Done On invalid Address : %x Access Denied.\n",
- (int)sWrmBuffer.Register);
- return -EINVAL;
- }
-
- uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
- if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
- ((uiTempVar == EEPROM_REJECT_REG_1) ||
- (uiTempVar == EEPROM_REJECT_REG_2) ||
- (uiTempVar == EEPROM_REJECT_REG_3) ||
- (uiTempVar == EEPROM_REJECT_REG_4)) &&
- (cmd == IOCTL_BCM_REGISTER_WRITE)) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "EEPROM Access Denied, not in VSG Mode\n");
- return -EFAULT;
- }
+ "EEPROM Access Denied, not in VSG Mode\n");
+ return -EFAULT;
+ }
- Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register,
- (PUINT)sWrmBuffer.Data,
- sWrmBuffer.Length);
+ Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register,
+ (PUINT)sWrmBuffer.Data,
+ sWrmBuffer.Length);
- if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG,
- DBG_LVL_ALL, "WRM Done\n");
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Failed\n");
- Status = -EFAULT;
- }
- break;
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Done\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "WRM Failed\n");
+ Status = -EFAULT;
}
- case IOCTL_BCM_GPIO_SET_REQUEST: {
- UCHAR ucResetValue[4];
- UINT value = 0;
- UINT uiBit = 0;
- UINT uiOperation = 0;
- struct bcm_gpio_info gpio_info = {0};
-
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ return Status;
+}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "GPIO Can't be set/clear in Low power Mode");
- return -EACCES;
- }
+static int bcm_char_ioctl_gpio_set_request(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_gpio_info gpio_info = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ UCHAR ucResetValue[4];
+ UINT value = 0;
+ UINT uiBit = 0;
+ UINT uiOperation = 0;
+ INT Status;
+ int bytes;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- if (IoBuffer.InputLength > sizeof(gpio_info))
- return -EINVAL;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "GPIO Can't be set/clear in Low power Mode");
+ return -EACCES;
+ }
- if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- uiBit = gpio_info.uiGpioNumber;
- uiOperation = gpio_info.uiGpioValue;
- value = (1<<uiBit);
+ if (IoBuffer.InputLength > sizeof(gpio_info))
+ return -EINVAL;
- if (IsReqGpioIsLedInNVM(Adapter, value) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!",
- value);
- Status = -EINVAL;
- break;
- }
+ if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
- /* Set - setting 1 */
- if (uiOperation) {
- /* Set the gpio output register */
- Status = wrmaltWithLock(Adapter,
- BCM_GPIO_OUTPUT_SET_REG,
- (PUINT)(&value), sizeof(UINT));
+ uiBit = gpio_info.uiGpioNumber;
+ uiOperation = gpio_info.uiGpioValue;
+ value = (1<<uiBit);
- if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Set the GPIO bit\n");
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Failed to set the %dth GPIO\n",
- uiBit);
- break;
- }
- } else {
- /* Set the gpio output register */
- Status = wrmaltWithLock(Adapter,
- BCM_GPIO_OUTPUT_CLR_REG,
- (PUINT)(&value), sizeof(UINT));
+ if (IsReqGpioIsLedInNVM(Adapter, value) == false) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!",
+ value);
+ return -EINVAL;
+ }
- if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Set the GPIO bit\n");
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Failed to clear the %dth GPIO\n",
- uiBit);
- break;
- }
- }
+ /* Set - setting 1 */
+ if (uiOperation) {
+ /* Set the gpio output register */
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_SET_REG,
+ (PUINT)(&value), sizeof(UINT));
- bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER,
- (PUINT)ucResetValue, sizeof(UINT));
- if (bytes < 0) {
- Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "GPIO_MODE_REGISTER read failed");
- break;
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Set the GPIO bit\n");
} else {
- Status = STATUS_SUCCESS;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Failed to set the %dth GPIO\n",
+ uiBit);
+ return Status;
}
-
- /* Set the gpio mode register to output */
- *(UINT *)ucResetValue |= (1<<uiBit);
- Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER,
- (PUINT)ucResetValue, sizeof(UINT));
+ } else {
+ /* Set the gpio output register */
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_CLR_REG,
+ (PUINT)(&value), sizeof(UINT));
if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Set the GPIO to output Mode\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Set the GPIO bit\n");
} else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Failed to put GPIO in Output Mode\n");
- break;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Failed to clear the %dth GPIO\n",
+ uiBit);
+ return Status;
}
}
- break;
- case BCM_LED_THREAD_STATE_CHANGE_REQ: {
- struct bcm_user_thread_req threadReq = {0};
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER,
+ (PUINT)ucResetValue, sizeof(UINT));
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "User made LED thread InActive");
+ "GPIO_MODE_REGISTER read failed");
+ return Status;
+ } else {
+ Status = STATUS_SUCCESS;
+ }
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ /* Set the gpio mode register to output */
+ *(UINT *)ucResetValue |= (1<<uiBit);
+ Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER,
+ (PUINT)ucResetValue, sizeof(UINT));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "GPIO Can't be set/clear in Low power Mode");
- Status = -EACCES;
- break;
- }
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Set the GPIO to output Mode\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Failed to put GPIO in Output Mode\n");
+ }
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ return Status;
+}
- if (IoBuffer.InputLength > sizeof(threadReq))
- return -EINVAL;
+static int bcm_char_ioctl_led_thread_state_change_req(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_user_thread_req threadReq = {0};
+ struct bcm_ioctl_buffer IoBuffer;
- if (copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "User made LED thread InActive");
- /* if LED thread is running(Actively or Inactively) set it state to make inactive */
- if (Adapter->LEDInfo.led_thread_running) {
- if (threadReq.ThreadState == LED_THREAD_ACTIVATION_REQ) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Activating thread req");
- Adapter->DriverState = LED_THREAD_ACTIVE;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "DeActivating Thread req.....");
- Adapter->DriverState = LED_THREAD_INACTIVE;
- }
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- /* signal thread. */
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "GPIO Can't be set/clear in Low power Mode");
+ return -EACCES;
}
- break;
-
- case IOCTL_BCM_GPIO_STATUS_REQUEST: {
- ULONG uiBit = 0;
- UCHAR ucRead[4];
- struct bcm_gpio_info gpio_info = {0};
-
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE))
- return -EACCES;
-
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
- if (IoBuffer.InputLength > sizeof(gpio_info))
- return -EINVAL;
-
- if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- uiBit = gpio_info.uiGpioNumber;
+ if (IoBuffer.InputLength > sizeof(threadReq))
+ return -EINVAL;
- /* Set the gpio output register */
- bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
- (PUINT)ucRead, sizeof(UINT));
+ if (copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
- if (bytes < 0) {
- Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "RDM Failed\n");
- return Status;
+ /* if LED thread is running(Actively or Inactively)
+ * set it state to make inactive
+ */
+ if (Adapter->LEDInfo.led_thread_running) {
+ if (threadReq.ThreadState == LED_THREAD_ACTIVATION_REQ) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "Activating thread req");
+ Adapter->DriverState = LED_THREAD_ACTIVE;
} else {
- Status = STATUS_SUCCESS;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "DeActivating Thread req.....");
+ Adapter->DriverState = LED_THREAD_INACTIVE;
}
+
+ /* signal thread. */
+ wake_up(&Adapter->LEDInfo.notify_led_event);
}
- break;
+ return STATUS_SUCCESS;
+}
- case IOCTL_BCM_GPIO_MULTI_REQUEST: {
- UCHAR ucResetValue[4];
- struct bcm_gpio_multi_info gpio_multi_info[MAX_IDX];
- struct bcm_gpio_multi_info *pgpio_multi_info = (struct bcm_gpio_multi_info *)gpio_multi_info;
+static int bcm_char_ioctl_gpio_status_request(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_gpio_info gpio_info = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ ULONG uiBit = 0;
+ UCHAR ucRead[4];
+ INT Status;
+ int bytes;
- memset(pgpio_multi_info, 0, MAX_IDX * sizeof(struct bcm_gpio_multi_info));
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE))
+ return -EACCES;
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE))
- return -EINVAL;
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ if (IoBuffer.InputLength > sizeof(gpio_info))
+ return -EINVAL;
- if (IoBuffer.InputLength > sizeof(gpio_multi_info))
- return -EINVAL;
+ if (copy_from_user(&gpio_info, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
- if (copy_from_user(&gpio_multi_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ uiBit = gpio_info.uiGpioNumber;
- if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
- pgpio_multi_info[WIMAX_IDX].uiGPIOMask,
- Adapter->gpioBitMap);
- Status = -EINVAL;
- break;
- }
+ /* Set the gpio output register */
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
+ (PUINT)ucRead, sizeof(UINT));
- /* Set the gpio output register */
- if ((pgpio_multi_info[WIMAX_IDX].uiGPIOMask) &
- (pgpio_multi_info[WIMAX_IDX].uiGPIOCommand)) {
- /* Set 1's in GPIO OUTPUT REGISTER */
- *(UINT *)ucResetValue = pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
- pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
- pgpio_multi_info[WIMAX_IDX].uiGPIOValue;
+ if (bytes < 0) {
+ Status = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM Failed\n");
+ return Status;
+ } else {
+ Status = STATUS_SUCCESS;
+ }
+ return Status;
+}
+
+static int bcm_char_ioctl_gpio_multi_request(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_gpio_multi_info gpio_multi_info[MAX_IDX];
+ struct bcm_gpio_multi_info *pgpio_multi_info =
+ (struct bcm_gpio_multi_info *)gpio_multi_info;
+ struct bcm_ioctl_buffer IoBuffer;
+ UCHAR ucResetValue[4];
+ INT Status = STATUS_FAILURE;
+ int bytes;
- if (*(UINT *) ucResetValue)
- Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG,
- (PUINT)ucResetValue, sizeof(ULONG));
+ memset(pgpio_multi_info, 0, MAX_IDX * sizeof(struct bcm_gpio_multi_info));
- if (Status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
- return Status;
- }
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE))
+ return -EINVAL;
- /* Clear to 0's in GPIO OUTPUT REGISTER */
- *(UINT *)ucResetValue = (pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
- pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
- (~(pgpio_multi_info[WIMAX_IDX].uiGPIOValue)));
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (*(UINT *) ucResetValue)
- Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)ucResetValue, sizeof(ULONG));
+ if (IoBuffer.InputLength > sizeof(gpio_multi_info))
+ return -EINVAL;
+ if (IoBuffer.OutputLength > sizeof(gpio_multi_info))
+ IoBuffer.OutputLength = sizeof(gpio_multi_info);
- if (Status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
- return Status;
- }
- }
+ if (copy_from_user(&gpio_multi_info, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
- if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) {
- bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ if (IsReqGpioIsLedInNVM(Adapter,
+ pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == false) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL,
+ "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
+ pgpio_multi_info[WIMAX_IDX].uiGPIOMask,
+ Adapter->gpioBitMap);
+ return -EINVAL;
+ }
- if (bytes < 0) {
- Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "RDM to GPIO_PIN_STATE_REGISTER Failed.");
- return Status;
- } else {
- Status = STATUS_SUCCESS;
- }
+ /* Set the gpio output register */
+ if ((pgpio_multi_info[WIMAX_IDX].uiGPIOMask) &
+ (pgpio_multi_info[WIMAX_IDX].uiGPIOCommand)) {
+ /* Set 1's in GPIO OUTPUT REGISTER */
+ *(UINT *)ucResetValue = pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOValue;
- pgpio_multi_info[WIMAX_IDX].uiGPIOValue = (*(UINT *)ucResetValue &
- pgpio_multi_info[WIMAX_IDX].uiGPIOMask);
- }
+ if (*(UINT *) ucResetValue)
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_SET_REG,
+ (PUINT)ucResetValue, sizeof(ULONG));
- Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_info, IoBuffer.OutputLength);
- if (Status) {
+ if (Status != STATUS_SUCCESS) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Failed while copying Content to IOBufer for user space err:%d", Status);
- return -EFAULT;
+ "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
+ return Status;
}
- }
- break;
-
- case IOCTL_BCM_GPIO_MODE_REQUEST: {
- UCHAR ucResetValue[4];
- struct bcm_gpio_multi_mode gpio_multi_mode[MAX_IDX];
- struct bcm_gpio_multi_mode *pgpio_multi_mode = (struct bcm_gpio_multi_mode *)gpio_multi_mode;
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE))
- return -EINVAL;
-
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ /* Clear to 0's in GPIO OUTPUT REGISTER */
+ *(UINT *)ucResetValue = (pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
+ (~(pgpio_multi_info[WIMAX_IDX].uiGPIOValue)));
- if (IoBuffer.InputLength > sizeof(gpio_multi_mode))
- return -EINVAL;
+ if (*(UINT *) ucResetValue)
+ Status = wrmaltWithLock(Adapter,
+ BCM_GPIO_OUTPUT_CLR_REG, (PUINT)ucResetValue,
+ sizeof(ULONG));
- if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ if (Status != STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
+ return Status;
+ }
+ }
- bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) {
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
+ (PUINT)ucResetValue, sizeof(UINT));
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read of GPIO_MODE_REGISTER failed");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "RDM to GPIO_PIN_STATE_REGISTER Failed.");
return Status;
} else {
Status = STATUS_SUCCESS;
}
- /* Validating the request */
- if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap);
- Status = -EINVAL;
- break;
- }
+ pgpio_multi_info[WIMAX_IDX].uiGPIOValue = (*(UINT *)ucResetValue &
+ pgpio_multi_info[WIMAX_IDX].uiGPIOMask);
+ }
- if (pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) {
- /* write all OUT's (1's) */
- *(UINT *) ucResetValue |= (pgpio_multi_mode[WIMAX_IDX].uiGPIOMode &
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
+ Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_info,
+ IoBuffer.OutputLength);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Failed while copying Content to IOBufer for user space err:%d",
+ Status);
+ return -EFAULT;
+ }
+ return Status;
+}
- /* write all IN's (0's) */
- *(UINT *) ucResetValue &= ~((~pgpio_multi_mode[WIMAX_IDX].uiGPIOMode) &
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
+static int bcm_char_ioctl_gpio_mode_request(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_gpio_multi_mode gpio_multi_mode[MAX_IDX];
+ struct bcm_gpio_multi_mode *pgpio_multi_mode =
+ (struct bcm_gpio_multi_mode *)gpio_multi_mode;
+ struct bcm_ioctl_buffer IoBuffer;
+ UCHAR ucResetValue[4];
+ INT Status;
+ int bytes;
- /* Currently implemented return the modes of all GPIO's
- * else needs to bit AND with mask
- */
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE))
+ return -EINVAL;
- Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(ULONG));
- if (Status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "WRM to GPIO_MODE_REGISTER Done");
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "WRM to GPIO_MODE_REGISTER Failed");
- Status = -EFAULT;
- break;
- }
- } else {
-/* if uiGPIOMask is 0 then return mode register configuration */
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
- }
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_mode, IoBuffer.OutputLength);
- if (Status) {
+ if (IoBuffer.InputLength > sizeof(gpio_multi_mode))
+ return -EINVAL;
+ if (IoBuffer.OutputLength > sizeof(gpio_multi_mode))
+ IoBuffer.OutputLength = sizeof(gpio_multi_mode);
+
+ if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer,
+ IoBuffer.InputLength))
+ return -EFAULT;
+
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER,
+ (PUINT)ucResetValue, sizeof(UINT));
+
+ if (bytes < 0) {
+ Status = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Read of GPIO_MODE_REGISTER failed");
+ return Status;
+ } else {
+ Status = STATUS_SUCCESS;
+ }
+
+ /* Validating the request */
+ if (IsReqGpioIsLedInNVM(Adapter,
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) == false) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMask,
+ Adapter->gpioBitMap);
+ return -EINVAL;
+ }
+
+ if (pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) {
+ /* write all OUT's (1's) */
+ *(UINT *) ucResetValue |= (pgpio_multi_mode[WIMAX_IDX].uiGPIOMode &
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
+
+ /* write all IN's (0's) */
+ *(UINT *) ucResetValue &= ~((~pgpio_multi_mode[WIMAX_IDX].uiGPIOMode) &
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
+
+ /* Currently implemented return the modes of all GPIO's
+ * else needs to bit AND with mask
+ */
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
+
+ Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER,
+ (PUINT)ucResetValue, sizeof(ULONG));
+ if (Status == STATUS_SUCCESS) {
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "WRM to GPIO_MODE_REGISTER Done");
+ } else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Failed while copying Content to IOBufer for user space err:%d", Status);
+ "WRM to GPIO_MODE_REGISTER Failed");
return -EFAULT;
}
+ } else {
+ /* if uiGPIOMask is 0 then return mode register configuration */
+ pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue;
}
- break;
- case IOCTL_MAC_ADDR_REQ:
- case IOCTL_LINK_REQ:
- case IOCTL_CM_REQUEST:
- case IOCTL_SS_INFO_REQ:
- case IOCTL_SEND_CONTROL_MESSAGE:
- case IOCTL_IDLE_REQ: {
- PVOID pvBuffer = NULL;
+ Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_mode,
+ IoBuffer.OutputLength);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Failed while copying Content to IOBufer for user space err:%d",
+ Status);
+ return -EFAULT;
+ }
+ return Status;
+}
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+static int bcm_char_ioctl_misc_request(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ PVOID pvBuffer = NULL;
+ INT Status;
- if (IoBuffer.InputLength < sizeof(struct bcm_link_request))
- return -EINVAL;
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE)
- return -EINVAL;
+ if (IoBuffer.InputLength < sizeof(struct bcm_link_request))
+ return -EINVAL;
- pvBuffer = memdup_user(IoBuffer.InputBuffer,
- IoBuffer.InputLength);
- if (IS_ERR(pvBuffer))
- return PTR_ERR(pvBuffer);
-
- down(&Adapter->LowPowerModeSync);
- Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
- !Adapter->bPreparingForLowPowerMode,
- (1 * HZ));
- if (Status == -ERESTARTSYS)
- goto cntrlEnd;
-
- if (Adapter->bPreparingForLowPowerMode) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Preparing Idle Mode is still True - Hence Rejecting control message\n");
- Status = STATUS_FAILURE;
- goto cntrlEnd;
- }
- Status = CopyBufferToControlPacket(Adapter, (PVOID)pvBuffer);
+ if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE)
+ return -EINVAL;
-cntrlEnd:
- up(&Adapter->LowPowerModeSync);
- kfree(pvBuffer);
- break;
+ pvBuffer = memdup_user(IoBuffer.InputBuffer,
+ IoBuffer.InputLength);
+ if (IS_ERR(pvBuffer))
+ return PTR_ERR(pvBuffer);
+
+ down(&Adapter->LowPowerModeSync);
+ Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue,
+ !Adapter->bPreparingForLowPowerMode,
+ (1 * HZ));
+ if (Status == -ERESTARTSYS)
+ goto cntrlEnd;
+
+ if (Adapter->bPreparingForLowPowerMode) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Preparing Idle Mode is still True - Hence Rejecting control message\n");
+ Status = STATUS_FAILURE;
+ goto cntrlEnd;
}
+ Status = CopyBufferToControlPacket(Adapter, (PVOID)pvBuffer);
- case IOCTL_BCM_BUFFER_DOWNLOAD_START: {
- if (down_trylock(&Adapter->NVMRdmWrmLock)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
- return -EACCES;
- }
+cntrlEnd:
+ up(&Adapter->LowPowerModeSync);
+ kfree(pvBuffer);
+ return Status;
+}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Starting the firmware download PID =0x%x!!!!\n", current->pid);
-
- if (down_trylock(&Adapter->fw_download_sema))
- return -EBUSY;
-
- Adapter->bBinDownloaded = false;
- Adapter->fw_download_process_pid = current->pid;
- Adapter->bCfgDownloaded = false;
- Adapter->fw_download_done = false;
- netif_carrier_off(Adapter->dev);
- netif_stop_queue(Adapter->dev);
- Status = reset_card_proc(Adapter);
- if (Status) {
- pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- return Status;
- }
- mdelay(10);
+static int bcm_char_ioctl_buffer_download_start(
+ struct bcm_mini_adapter *Adapter)
+{
+ INT Status;
+ if (down_trylock(&Adapter->NVMRdmWrmLock)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
+ return -EACCES;
+ }
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Starting the firmware download PID =0x%x!!!!\n", current->pid);
+
+ if (down_trylock(&Adapter->fw_download_sema))
+ return -EBUSY;
+
+ Adapter->bBinDownloaded = false;
+ Adapter->fw_download_process_pid = current->pid;
+ Adapter->bCfgDownloaded = false;
+ Adapter->fw_download_done = false;
+ netif_carrier_off(Adapter->dev);
+ netif_stop_queue(Adapter->dev);
+ Status = reset_card_proc(Adapter);
+ if (Status) {
+ pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
+ up(&Adapter->fw_download_sema);
up(&Adapter->NVMRdmWrmLock);
return Status;
}
+ mdelay(10);
- case IOCTL_BCM_BUFFER_DOWNLOAD: {
- struct bcm_firmware_info *psFwInfo = NULL;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
+}
- if (!down_trylock(&Adapter->fw_download_sema)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Invalid way to download buffer. Use Start and then call this!!!\n");
- up(&Adapter->fw_download_sema);
- Status = -EINVAL;
- return Status;
- }
+static int bcm_char_ioctl_buffer_download(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_firmware_info *psFwInfo = NULL;
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
- up(&Adapter->fw_download_sema);
- return -EFAULT;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Starting the firmware download PID =0x%x!!!!\n", current->pid);
+ if (!down_trylock(&Adapter->fw_download_sema)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Length for FW DLD is : %lx\n", IoBuffer.InputLength);
-
- if (IoBuffer.InputLength > sizeof(struct bcm_firmware_info)) {
- up(&Adapter->fw_download_sema);
- return -EINVAL;
- }
-
- psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
- if (!psFwInfo) {
- up(&Adapter->fw_download_sema);
- return -ENOMEM;
- }
-
- if (copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
- up(&Adapter->fw_download_sema);
- kfree(psFwInfo);
- return -EFAULT;
- }
-
- if (!psFwInfo->pvMappedFirmwareAddress ||
- (psFwInfo->u32FirmwareLength == 0)) {
+ "Invalid way to download buffer. Use Start and then call this!!!\n");
+ up(&Adapter->fw_download_sema);
+ return -EINVAL;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n",
- psFwInfo->u32FirmwareLength);
- up(&Adapter->fw_download_sema);
- kfree(psFwInfo);
- Status = -EINVAL;
- return Status;
- }
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
+ up(&Adapter->fw_download_sema);
+ return -EFAULT;
+ }
- Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Length for FW DLD is : %lx\n", IoBuffer.InputLength);
- if (Status != STATUS_SUCCESS) {
- if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n");
- else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n");
+ if (IoBuffer.InputLength > sizeof(struct bcm_firmware_info)) {
+ up(&Adapter->fw_download_sema);
+ return -EINVAL;
+ }
- /* up(&Adapter->fw_download_sema); */
+ psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
+ if (!psFwInfo) {
+ up(&Adapter->fw_download_sema);
+ return -ENOMEM;
+ }
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = DRIVER_INIT;
- Adapter->LEDInfo.bLedInitDone = false;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
- }
+ if (copy_from_user(psFwInfo, IoBuffer.InputBuffer,
+ IoBuffer.InputLength)) {
+ up(&Adapter->fw_download_sema);
+ kfree(psFwInfo);
+ return -EFAULT;
+ }
- if (Status != STATUS_SUCCESS)
- up(&Adapter->fw_download_sema);
+ if (!psFwInfo->pvMappedFirmwareAddress ||
+ (psFwInfo->u32FirmwareLength == 0)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "IOCTL: Firmware File Uploaded\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Something else is wrong %lu\n",
+ psFwInfo->u32FirmwareLength);
+ up(&Adapter->fw_download_sema);
kfree(psFwInfo);
+ Status = -EINVAL;
return Status;
}
- case IOCTL_BCM_BUFFER_DOWNLOAD_STOP: {
- if (!down_trylock(&Adapter->fw_download_sema)) {
- up(&Adapter->fw_download_sema);
- return -EINVAL;
- }
+ Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
- if (down_trylock(&Adapter->NVMRdmWrmLock)) {
+ if (Status != STATUS_SUCCESS) {
+ if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "FW download blocked as EEPROM Read/Write is in progress\n");
- up(&Adapter->fw_download_sema);
- return -EACCES;
- }
-
- Adapter->bBinDownloaded = TRUE;
- Adapter->bCfgDownloaded = TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->downloadDDR = 0;
-
- /* setting the Mips to Run */
- Status = run_card_proc(Adapter);
+ "IOCTL: Configuration File Upload Failed\n");
+ else
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "IOCTL: Firmware File Upload Failed\n");
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n");
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- return Status;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "Firm Download Over...\n");
- }
+ /* up(&Adapter->fw_download_sema); */
- mdelay(10);
-
- /* Wait for MailBox Interrupt */
- if (StartInterruptUrb((struct bcm_interface_adapter *)Adapter->pvInterfaceAdapter))
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
-
- timeout = 5*HZ;
- Adapter->waiting_to_fw_download_done = false;
- wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
- Adapter->waiting_to_fw_download_done, timeout);
- Adapter->fw_download_process_pid = INVALID_PID;
- Adapter->fw_download_done = TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->PrevNumRecvDescs = 0;
- atomic_set(&Adapter->cntrlpktCnt, 0);
- Adapter->LinkUpStatus = 0;
- Adapter->LinkStatus = 0;
-
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = FW_DOWNLOAD_DONE;
+ if (Adapter->LEDInfo.led_thread_running &
+ BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = DRIVER_INIT;
+ Adapter->LEDInfo.bLedInitDone = false;
wake_up(&Adapter->LEDInfo.notify_led_event);
}
+ }
+
+ if (Status != STATUS_SUCCESS)
+ up(&Adapter->fw_download_sema);
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL: Firmware File Uploaded\n");
+ kfree(psFwInfo);
+ return Status;
+}
- if (!timeout)
- Status = -ENODEV;
+static int bcm_char_ioctl_buffer_download_stop(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ INT Status;
+ int timeout = 0;
+ if (!down_trylock(&Adapter->fw_download_sema)) {
up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- return Status;
+ return -EINVAL;
}
- case IOCTL_BE_BUCKET_SIZE:
- Status = 0;
- if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
- Status = -EFAULT;
- break;
+ if (down_trylock(&Adapter->NVMRdmWrmLock)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "FW download blocked as EEPROM Read/Write is in progress\n");
+ up(&Adapter->fw_download_sema);
+ return -EACCES;
+ }
- case IOCTL_RTPS_BUCKET_SIZE:
- Status = 0;
- if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
- Status = -EFAULT;
- break;
+ Adapter->bBinDownloaded = TRUE;
+ Adapter->bCfgDownloaded = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->downloadDDR = 0;
- case IOCTL_CHIP_RESET: {
- INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
- if (NVMAccess) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
- return -EACCES;
- }
+ /* setting the Mips to Run */
+ Status = run_card_proc(Adapter);
- down(&Adapter->RxAppControlQueuelock);
- Status = reset_card_proc(Adapter);
- flushAllAppQ();
- up(&Adapter->RxAppControlQueuelock);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Firm Download Failed\n");
+ up(&Adapter->fw_download_sema);
up(&Adapter->NVMRdmWrmLock);
- ResetCounters(Adapter);
- break;
+ return Status;
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Firm Download Over...\n");
}
- case IOCTL_QOS_THRESHOLD: {
- USHORT uiLoopIndex;
+ mdelay(10);
- Status = 0;
- for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
- if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
- (unsigned long __user *)arg)) {
- Status = -EFAULT;
- break;
- }
- }
- break;
+ /* Wait for MailBox Interrupt */
+ if (StartInterruptUrb((struct bcm_interface_adapter *)Adapter->pvInterfaceAdapter))
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Unable to send interrupt...\n");
+
+ timeout = 5*HZ;
+ Adapter->waiting_to_fw_download_done = false;
+ wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
+ Adapter->waiting_to_fw_download_done, timeout);
+ Adapter->fw_download_process_pid = INVALID_PID;
+ Adapter->fw_download_done = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->PrevNumRecvDescs = 0;
+ atomic_set(&Adapter->cntrlpktCnt, 0);
+ Adapter->LinkUpStatus = 0;
+ Adapter->LinkStatus = 0;
+
+ if (Adapter->LEDInfo.led_thread_running &
+ BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = FW_DOWNLOAD_DONE;
+ wake_up(&Adapter->LEDInfo.notify_led_event);
}
- case IOCTL_DUMP_PACKET_INFO:
- DumpPackInfo(Adapter);
- DumpPhsRules(&Adapter->stBCMPhsContext);
- Status = STATUS_SUCCESS;
- break;
+ if (!timeout)
+ Status = -ENODEV;
- case IOCTL_GET_PACK_INFO:
- if (copy_to_user(argp, &Adapter->PackInfo, sizeof(struct bcm_packet_info)*NO_OF_QUEUES))
- return -EFAULT;
- Status = STATUS_SUCCESS;
- break;
+ up(&Adapter->fw_download_sema);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
+}
- case IOCTL_BCM_SWITCH_TRANSFER_MODE: {
- UINT uiData = 0;
- if (copy_from_user(&uiData, argp, sizeof(UINT)))
- return -EFAULT;
+static int bcm_char_ioctl_chip_reset(struct bcm_mini_adapter *Adapter)
+{
+ INT Status;
+ INT NVMAccess;
- if (uiData) {
- /* Allow All Packets */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n");
- Adapter->TransferMode = ETH_PACKET_TUNNELING_MODE;
- } else {
- /* Allow IP only Packets */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: IP_PACKET_ONLY_MODE\n");
- Adapter->TransferMode = IP_PACKET_ONLY_MODE;
- }
- Status = STATUS_SUCCESS;
- break;
+ NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
+ if (NVMAccess) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
+ return -EACCES;
}
- case IOCTL_BCM_GET_DRIVER_VERSION: {
- ulong len;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ down(&Adapter->RxAppControlQueuelock);
+ Status = reset_card_proc(Adapter);
+ flushAllAppQ();
+ up(&Adapter->RxAppControlQueuelock);
+ up(&Adapter->NVMRdmWrmLock);
+ ResetCounters(Adapter);
+ return Status;
+}
- len = min_t(ulong, IoBuffer.OutputLength, strlen(DRV_VERSION) + 1);
+static int bcm_char_ioctl_qos_threshold(ULONG arg,
+ struct bcm_mini_adapter *Adapter)
+{
+ USHORT uiLoopIndex;
- if (copy_to_user(IoBuffer.OutputBuffer, DRV_VERSION, len))
+ for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
+ if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
+ (unsigned long __user *)arg)) {
return -EFAULT;
- Status = STATUS_SUCCESS;
- break;
+ }
}
+ return 0;
+}
- case IOCTL_BCM_GET_CURRENT_STATUS: {
- struct bcm_link_state link_state;
+static int bcm_char_ioctl_switch_transfer_mode(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ UINT uiData = 0;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user failed..\n");
- return -EFAULT;
- }
+ if (copy_from_user(&uiData, argp, sizeof(UINT)))
+ return -EFAULT;
- if (IoBuffer.OutputLength != sizeof(link_state)) {
- Status = -EINVAL;
- break;
- }
+ if (uiData) {
+ /* Allow All Packets */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n");
+ Adapter->TransferMode = ETH_PACKET_TUNNELING_MODE;
+ } else {
+ /* Allow IP only Packets */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_SWITCH_TRANSFER_MODE: IP_PACKET_ONLY_MODE\n");
+ Adapter->TransferMode = IP_PACKET_ONLY_MODE;
+ }
+ return STATUS_SUCCESS;
+}
- memset(&link_state, 0, sizeof(link_state));
- link_state.bIdleMode = Adapter->IdleMode;
- link_state.bShutdownMode = Adapter->bShutStatus;
- link_state.ucLinkStatus = Adapter->LinkStatus;
+static int bcm_char_ioctl_get_driver_version(void __user *argp)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ ulong len;
- if (copy_to_user(IoBuffer.OutputBuffer, &link_state, min_t(size_t, sizeof(link_state), IoBuffer.OutputLength))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
- return -EFAULT;
- }
- Status = STATUS_SUCCESS;
- break;
- }
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- case IOCTL_BCM_SET_MAC_TRACING: {
- UINT tracing_flag;
+ len = min_t(ulong, IoBuffer.OutputLength, strlen(DRV_VERSION) + 1);
- /* copy ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ if (copy_to_user(IoBuffer.OutputBuffer, DRV_VERSION, len))
+ return -EFAULT;
- if (copy_from_user(&tracing_flag, IoBuffer.InputBuffer, sizeof(UINT)))
- return -EFAULT;
+ return STATUS_SUCCESS;
+}
- if (tracing_flag)
- Adapter->pTarangs->MacTracingEnabled = TRUE;
- else
- Adapter->pTarangs->MacTracingEnabled = false;
- break;
- }
+static int bcm_char_ioctl_get_current_status(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_link_state link_state;
+ struct bcm_ioctl_buffer IoBuffer;
- case IOCTL_BCM_GET_DSX_INDICATION: {
- ULONG ulSFId = 0;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "copy_from_user failed..\n");
+ return -EFAULT;
+ }
- if (IoBuffer.OutputLength < sizeof(struct bcm_add_indication_alt)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Mismatch req: %lx needed is =0x%zx!!!",
- IoBuffer.OutputLength, sizeof(struct bcm_add_indication_alt));
- return -EINVAL;
- }
+ if (IoBuffer.OutputLength != sizeof(link_state))
+ return -EINVAL;
- if (copy_from_user(&ulSFId, IoBuffer.InputBuffer, sizeof(ulSFId)))
- return -EFAULT;
+ memset(&link_state, 0, sizeof(link_state));
+ link_state.bIdleMode = Adapter->IdleMode;
+ link_state.bShutdownMode = Adapter->bShutStatus;
+ link_state.ucLinkStatus = Adapter->LinkStatus;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Get DSX Data SF ID is =%lx\n", ulSFId);
- get_dsx_sf_data_to_application(Adapter, ulSFId, IoBuffer.OutputBuffer);
- Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, &link_state, min_t(size_t,
+ sizeof(link_state), IoBuffer.OutputLength))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy_to_user Failed..\n");
+ return -EFAULT;
}
- break;
+ return STATUS_SUCCESS;
+}
- case IOCTL_BCM_GET_HOST_MIBS: {
- PVOID temp_buff;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+static int bcm_char_ioctl_set_mac_tracing(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ UINT tracing_flag;
- if (IoBuffer.OutputLength != sizeof(struct bcm_host_stats_mibs)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Length Check failed %lu %zd\n",
- IoBuffer.OutputLength, sizeof(struct bcm_host_stats_mibs));
- return -EINVAL;
- }
+ /* copy ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- /* FIXME: HOST_STATS are too big for kmalloc (122048)! */
- temp_buff = kzalloc(sizeof(struct bcm_host_stats_mibs), GFP_KERNEL);
- if (!temp_buff)
- return STATUS_FAILURE;
+ if (copy_from_user(&tracing_flag, IoBuffer.InputBuffer, sizeof(UINT)))
+ return -EFAULT;
- Status = ProcessGetHostMibs(Adapter, temp_buff);
- GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
+ if (tracing_flag)
+ Adapter->pTarangs->MacTracingEnabled = TRUE;
+ else
+ Adapter->pTarangs->MacTracingEnabled = false;
- if (Status != STATUS_FAILURE)
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(struct bcm_host_stats_mibs))) {
- kfree(temp_buff);
- return -EFAULT;
- }
+ return STATUS_SUCCESS;
+}
- kfree(temp_buff);
- break;
+static int bcm_char_ioctl_get_dsx_indication(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ ULONG ulSFId = 0;
+
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
+
+ if (IoBuffer.OutputLength < sizeof(struct bcm_add_indication_alt)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Mismatch req: %lx needed is =0x%zx!!!",
+ IoBuffer.OutputLength,
+ sizeof(struct bcm_add_indication_alt));
+ return -EINVAL;
}
- case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE:
- if ((false == Adapter->bTriedToWakeUpFromlowPowerMode) && (TRUE == Adapter->IdleMode)) {
- Adapter->usIdleModePattern = ABORT_IDLE_MODE;
- Adapter->bWakeUpDevice = TRUE;
- wake_up(&Adapter->process_rx_cntrlpkt);
- }
+ if (copy_from_user(&ulSFId, IoBuffer.InputBuffer, sizeof(ulSFId)))
+ return -EFAULT;
- Status = STATUS_SUCCESS;
- break;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Get DSX Data SF ID is =%lx\n", ulSFId);
+ get_dsx_sf_data_to_application(Adapter, ulSFId, IoBuffer.OutputBuffer);
+ return STATUS_SUCCESS;
+}
- case IOCTL_BCM_BULK_WRM: {
- struct bcm_bulk_wrm_buffer *pBulkBuffer;
- UINT uiTempVar = 0;
- PCHAR pvBuffer = NULL;
+static int bcm_char_ioctl_get_host_mibs(void __user *argp,
+ struct bcm_mini_adapter *Adapter, struct bcm_tarang_data *pTarang)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_FAILURE;
+ PVOID temp_buff;
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle/Shutdown Mode, Blocking Wrms\n");
- Status = -EACCES;
- break;
- }
+ if (IoBuffer.OutputLength != sizeof(struct bcm_host_stats_mibs)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Length Check failed %lu %zd\n", IoBuffer.OutputLength,
+ sizeof(struct bcm_host_stats_mibs));
+ return -EINVAL;
+ }
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ /* FIXME: HOST_STATS are too big for kmalloc (122048)! */
+ temp_buff = kzalloc(sizeof(struct bcm_host_stats_mibs), GFP_KERNEL);
+ if (!temp_buff)
+ return STATUS_FAILURE;
+
+ Status = ProcessGetHostMibs(Adapter, temp_buff);
+ GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
+
+ if (Status != STATUS_FAILURE) {
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff,
+ sizeof(struct bcm_host_stats_mibs))) {
+ kfree(temp_buff);
return -EFAULT;
+ }
+ }
- if (IoBuffer.InputLength < sizeof(ULONG) * 2)
- return -EINVAL;
+ kfree(temp_buff);
+ return Status;
+}
- pvBuffer = memdup_user(IoBuffer.InputBuffer,
- IoBuffer.InputLength);
- if (IS_ERR(pvBuffer))
- return PTR_ERR(pvBuffer);
+static int bcm_char_ioctl_bulk_wrm(void __user *argp,
+ struct bcm_mini_adapter *Adapter, UINT cmd)
+{
+ struct bcm_bulk_wrm_buffer *pBulkBuffer;
+ struct bcm_ioctl_buffer IoBuffer;
+ UINT uiTempVar = 0;
+ INT Status = STATUS_FAILURE;
+ PCHAR pvBuffer = NULL;
- pBulkBuffer = (struct bcm_bulk_wrm_buffer *)pvBuffer;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- if (((ULONG)pBulkBuffer->Register & 0x0F000000) != 0x0F000000 ||
- ((ULONG)pBulkBuffer->Register & 0x3)) {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n", (int)pBulkBuffer->Register);
- kfree(pvBuffer);
- Status = -EINVAL;
- break;
- }
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device in Idle/Shutdown Mode, Blocking Wrms\n");
+ return -EACCES;
+ }
- uiTempVar = pBulkBuffer->Register & EEPROM_REJECT_MASK;
- if (!((Adapter->pstargetparams->m_u32Customize)&VSG_MODE) &&
- ((uiTempVar == EEPROM_REJECT_REG_1) ||
- (uiTempVar == EEPROM_REJECT_REG_2) ||
- (uiTempVar == EEPROM_REJECT_REG_3) ||
- (uiTempVar == EEPROM_REJECT_REG_4)) &&
- (cmd == IOCTL_BCM_REGISTER_WRITE)) {
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- kfree(pvBuffer);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
- Status = -EFAULT;
- break;
- }
+ if (IoBuffer.InputLength < sizeof(ULONG) * 2)
+ return -EINVAL;
- if (pBulkBuffer->SwapEndian == false)
- Status = wrmWithLock(Adapter, (UINT)pBulkBuffer->Register, (PCHAR)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
- else
- Status = wrmaltWithLock(Adapter, (UINT)pBulkBuffer->Register, (PUINT)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG));
+ pvBuffer = memdup_user(IoBuffer.InputBuffer,
+ IoBuffer.InputLength);
+ if (IS_ERR(pvBuffer))
+ return PTR_ERR(pvBuffer);
- if (Status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n");
+ pBulkBuffer = (struct bcm_bulk_wrm_buffer *)pvBuffer;
+ if (((ULONG)pBulkBuffer->Register & 0x0F000000) != 0x0F000000 ||
+ ((ULONG)pBulkBuffer->Register & 0x3)) {
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "WRM Done On invalid Address : %x Access Denied.\n",
+ (int)pBulkBuffer->Register);
kfree(pvBuffer);
- break;
+ return -EINVAL;
}
- case IOCTL_BCM_GET_NVM_SIZE:
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ uiTempVar = pBulkBuffer->Register & EEPROM_REJECT_MASK;
+ if (!((Adapter->pstargetparams->m_u32Customize)&VSG_MODE) &&
+ ((uiTempVar == EEPROM_REJECT_REG_1) ||
+ (uiTempVar == EEPROM_REJECT_REG_2) ||
+ (uiTempVar == EEPROM_REJECT_REG_3) ||
+ (uiTempVar == EEPROM_REJECT_REG_4)) &&
+ (cmd == IOCTL_BCM_REGISTER_WRITE)) {
+
+ kfree(pvBuffer);
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "EEPROM Access Denied, not in VSG Mode\n");
+ return -EFAULT;
+ }
+
+ if (pBulkBuffer->SwapEndian == false)
+ Status = wrmWithLock(Adapter, (UINT)pBulkBuffer->Register,
+ (PCHAR)pBulkBuffer->Values,
+ IoBuffer.InputLength - 2*sizeof(ULONG));
+ else
+ Status = wrmaltWithLock(Adapter, (UINT)pBulkBuffer->Register,
+ (PUINT)pBulkBuffer->Values,
+ IoBuffer.InputLength - 2*sizeof(ULONG));
+
+ if (Status != STATUS_SUCCESS)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n");
+
+ kfree(pvBuffer);
+ return Status;
+}
+
+static int bcm_char_ioctl_get_nvm_size(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
+
+ if (Adapter->eNVMType == NVM_EEPROM || Adapter->eNVMType == NVM_FLASH) {
+ if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiNVMDSDSize,
+ sizeof(UINT)))
return -EFAULT;
+ }
- if (Adapter->eNVMType == NVM_EEPROM || Adapter->eNVMType == NVM_FLASH) {
- if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiNVMDSDSize, sizeof(UINT)))
- return -EFAULT;
- }
+ return STATUS_SUCCESS;
+}
- Status = STATUS_SUCCESS;
- break;
+static int bcm_char_ioctl_cal_init(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ UINT uiSectorSize = 0;
+ INT Status = STATUS_FAILURE;
- case IOCTL_BCM_CAL_INIT: {
- UINT uiSectorSize = 0;
- if (Adapter->eNVMType == NVM_FLASH) {
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ if (Adapter->eNVMType == NVM_FLASH) {
+ if (copy_from_user(&IoBuffer, argp,
+ sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (copy_from_user(&uiSectorSize, IoBuffer.InputBuffer, sizeof(UINT)))
- return -EFAULT;
+ if (copy_from_user(&uiSectorSize, IoBuffer.InputBuffer,
+ sizeof(UINT)))
+ return -EFAULT;
- if ((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) {
- if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize,
- sizeof(UINT)))
+ if ((uiSectorSize < MIN_SECTOR_SIZE) ||
+ (uiSectorSize > MAX_SECTOR_SIZE)) {
+ if (copy_to_user(IoBuffer.OutputBuffer,
+ &Adapter->uiSectorSize, sizeof(UINT)))
+ return -EFAULT;
+ } else {
+ if (IsFlash2x(Adapter)) {
+ if (copy_to_user(IoBuffer.OutputBuffer,
+ &Adapter->uiSectorSize, sizeof(UINT)))
return -EFAULT;
} else {
- if (IsFlash2x(Adapter)) {
- if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize, sizeof(UINT)))
- return -EFAULT;
- } else {
- if ((TRUE == Adapter->bShutStatus) || (TRUE == Adapter->IdleMode)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device is in Idle/Shutdown Mode\n");
- return -EACCES;
- }
-
- Adapter->uiSectorSize = uiSectorSize;
- BcmUpdateSectorSize(Adapter, Adapter->uiSectorSize);
+ if ((TRUE == Adapter->bShutStatus) ||
+ (TRUE == Adapter->IdleMode)) {
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_PRINTK, 0, 0,
+ "Device is in Idle/Shutdown Mode\n");
+ return -EACCES;
}
+
+ Adapter->uiSectorSize = uiSectorSize;
+ BcmUpdateSectorSize(Adapter,
+ Adapter->uiSectorSize);
}
- Status = STATUS_SUCCESS;
- } else {
- Status = STATUS_FAILURE;
}
+ Status = STATUS_SUCCESS;
+ } else {
+ Status = STATUS_FAILURE;
}
- break;
+ return Status;
+}
- case IOCTL_BCM_SET_DEBUG:
+static int bcm_char_ioctl_set_debug(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
#ifdef DEBUG
- {
- struct bcm_user_debug_state sUserDebugState;
+ struct bcm_ioctl_buffer IoBuffer;
+ struct bcm_user_debug_state sUserDebugState;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "In SET_DEBUG ioctl\n");
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "In SET_DEBUG ioctl\n");
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer, sizeof(struct bcm_user_debug_state)))
- return -EFAULT;
+ if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer,
+ sizeof(struct bcm_user_debug_state)))
+ return -EFAULT;
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
- sUserDebugState.OnOff, sUserDebugState.Type);
- /* sUserDebugState.Subtype <<= 1; */
- sUserDebugState.Subtype = 1 << sUserDebugState.Subtype;
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "actual Subtype=0x%x\n", sUserDebugState.Subtype);
-
- /* Update new 'DebugState' in the Adapter */
- Adapter->stDebugState.type |= sUserDebugState.Type;
- /* Subtype: A bitmap of 32 bits for Subtype per Type.
- * Valid indexes in 'subtype' array: 1,2,4,8
- * corresponding to valid Type values. Hence we can use the 'Type' field
- * as the index value, ignoring the array entries 0,3,5,6,7 !
- */
- if (sUserDebugState.OnOff)
- Adapter->stDebugState.subtype[sUserDebugState.Type] |= sUserDebugState.Subtype;
- else
- Adapter->stDebugState.subtype[sUserDebugState.Type] &= ~sUserDebugState.Subtype;
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
+ sUserDebugState.OnOff, sUserDebugState.Type);
+ /* sUserDebugState.Subtype <<= 1; */
+ sUserDebugState.Subtype = 1 << sUserDebugState.Subtype;
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "actual Subtype=0x%x\n", sUserDebugState.Subtype);
+
+ /* Update new 'DebugState' in the Adapter */
+ Adapter->stDebugState.type |= sUserDebugState.Type;
+ /* Subtype: A bitmap of 32 bits for Subtype per Type.
+ * Valid indexes in 'subtype' array: 1,2,4,8
+ * corresponding to valid Type values. Hence we can use the 'Type' field
+ * as the index value, ignoring the array entries 0,3,5,6,7 !
+ */
+ if (sUserDebugState.OnOff)
+ Adapter->stDebugState.subtype[sUserDebugState.Type] |=
+ sUserDebugState.Subtype;
+ else
+ Adapter->stDebugState.subtype[sUserDebugState.Type] &=
+ ~sUserDebugState.Subtype;
+
+ BCM_SHOW_DEBUG_BITMAP(Adapter);
+#endif
+ return STATUS_SUCCESS;
+}
+
+static int bcm_char_ioctl_nvm_rw(void __user *argp,
+ struct bcm_mini_adapter *Adapter, UINT cmd)
+{
+ struct bcm_nvm_readwrite stNVMReadWrite;
+ struct timeval tv0, tv1;
+ struct bcm_ioctl_buffer IoBuffer;
+ PUCHAR pReadData = NULL;
+ ULONG ulDSDMagicNumInUsrBuff = 0;
+ INT Status = STATUS_FAILURE;
- BCM_SHOW_DEBUG_BITMAP(Adapter);
+ memset(&tv0, 0, sizeof(struct timeval));
+ memset(&tv1, 0, sizeof(struct timeval));
+ if ((Adapter->eNVMType == NVM_FLASH) &&
+ (Adapter->uiFlashLayoutMajorVersion == 0)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
+ return -EFAULT;
}
-#endif
- break;
- case IOCTL_BCM_NVM_READ:
- case IOCTL_BCM_NVM_WRITE: {
- struct bcm_nvm_readwrite stNVMReadWrite;
- PUCHAR pReadData = NULL;
- ULONG ulDSDMagicNumInUsrBuff = 0;
- struct timeval tv0, tv1;
- memset(&tv0, 0, sizeof(struct timeval));
- memset(&tv1, 0, sizeof(struct timeval));
- if ((Adapter->eNVMType == NVM_FLASH) && (Adapter->uiFlashLayoutMajorVersion == 0)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
- return -EFAULT;
+ if (IsFlash2x(Adapter)) {
+ if ((Adapter->eActiveDSD != DSD0) &&
+ (Adapter->eActiveDSD != DSD1) &&
+ (Adapter->eActiveDSD != DSD2)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "No DSD is active..hence NVM Command is blocked");
+ return STATUS_FAILURE;
}
+ }
- if (IsFlash2x(Adapter)) {
- if ((Adapter->eActiveDSD != DSD0) &&
- (Adapter->eActiveDSD != DSD1) &&
- (Adapter->eActiveDSD != DSD2)) {
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No DSD is active..hence NVM Command is blocked");
- return STATUS_FAILURE;
- }
- }
+ if (copy_from_user(&stNVMReadWrite,
+ (IOCTL_BCM_NVM_READ == cmd) ?
+ IoBuffer.OutputBuffer : IoBuffer.InputBuffer,
+ sizeof(struct bcm_nvm_readwrite)))
+ return -EFAULT;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ /*
+ * Deny the access if the offset crosses the cal area limit.
+ */
+ if (stNVMReadWrite.uiNumBytes > Adapter->uiNVMDSDSize)
+ return STATUS_FAILURE;
- if (copy_from_user(&stNVMReadWrite,
- (IOCTL_BCM_NVM_READ == cmd) ? IoBuffer.OutputBuffer : IoBuffer.InputBuffer,
- sizeof(struct bcm_nvm_readwrite)))
- return -EFAULT;
+ if (stNVMReadWrite.uiOffset >
+ Adapter->uiNVMDSDSize - stNVMReadWrite.uiNumBytes)
+ return STATUS_FAILURE;
- /*
- * Deny the access if the offset crosses the cal area limit.
- */
- if (stNVMReadWrite.uiNumBytes > Adapter->uiNVMDSDSize)
- return STATUS_FAILURE;
+ pReadData = memdup_user(stNVMReadWrite.pBuffer,
+ stNVMReadWrite.uiNumBytes);
+ if (IS_ERR(pReadData))
+ return PTR_ERR(pReadData);
- if (stNVMReadWrite.uiOffset > Adapter->uiNVMDSDSize - stNVMReadWrite.uiNumBytes) {
- /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */
- return STATUS_FAILURE;
+ do_gettimeofday(&tv0);
+ if (IOCTL_BCM_NVM_READ == cmd) {
+ down(&Adapter->NVMRdmWrmLock);
+
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadData);
+ return -EACCES;
}
- pReadData = memdup_user(stNVMReadWrite.pBuffer,
- stNVMReadWrite.uiNumBytes);
- if (IS_ERR(pReadData))
- return PTR_ERR(pReadData);
+ Status = BeceemNVMRead(Adapter, (PUINT)pReadData,
+ stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes);
+ up(&Adapter->NVMRdmWrmLock);
- do_gettimeofday(&tv0);
- if (IOCTL_BCM_NVM_READ == cmd) {
- down(&Adapter->NVMRdmWrmLock);
+ if (Status != STATUS_SUCCESS) {
+ kfree(pReadData);
+ return Status;
+ }
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ if (copy_to_user(stNVMReadWrite.pBuffer, pReadData,
+ stNVMReadWrite.uiNumBytes)) {
+ kfree(pReadData);
+ return -EFAULT;
+ }
+ } else {
+ down(&Adapter->NVMRdmWrmLock);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return -EACCES;
- }
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- Status = BeceemNVMRead(Adapter, (PUINT)pReadData, stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes);
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadData);
+ return -EACCES;
+ }
- if (Status != STATUS_SUCCESS) {
- kfree(pReadData);
- return Status;
- }
-
- if (copy_to_user(stNVMReadWrite.pBuffer, pReadData, stNVMReadWrite.uiNumBytes)) {
- kfree(pReadData);
- return -EFAULT;
- }
- } else {
- down(&Adapter->NVMRdmWrmLock);
+ Adapter->bHeaderChangeAllowed = TRUE;
+ if (IsFlash2x(Adapter)) {
+ /*
+ * New Requirement:-
+ * DSD section updation will be allowed in two case:-
+ * 1. if DSD sig is present in DSD header means dongle
+ * is ok and updation is fruitfull
+ * 2. if point 1 failes then user buff should have
+ * DSD sig. this point ensures that if dongle is
+ * corrupted then user space program first modify
+ * the DSD header with valid DSD sig so that this
+ * as well as further write may be worthwhile.
+ *
+ * This restriction has been put assuming that
+ * if DSD sig is corrupted, DSD data won't be
+ * considered valid.
+ */
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ Status = BcmFlash2xCorruptSig(Adapter,
+ Adapter->eActiveDSD);
+ if (Status != STATUS_SUCCESS) {
+ if (((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) !=
+ Adapter->uiNVMDSDSize) ||
+ (stNVMReadWrite.uiNumBytes < SIGNATURE_SIZE)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return -EACCES;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL,
+ "DSD Sig is present neither in Flash nor User provided Input..");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadData);
+ return Status;
+ }
- Adapter->bHeaderChangeAllowed = TRUE;
- if (IsFlash2x(Adapter)) {
- /*
- * New Requirement:-
- * DSD section updation will be allowed in two case:-
- * 1. if DSD sig is present in DSD header means dongle is ok and updation is fruitfull
- * 2. if point 1 failes then user buff should have DSD sig. this point ensures that if dongle is
- * corrupted then user space program first modify the DSD header with valid DSD sig so
- * that this as well as further write may be worthwhile.
- *
- * This restriction has been put assuming that if DSD sig is corrupted, DSD
- * data won't be considered valid.
- */
-
- Status = BcmFlash2xCorruptSig(Adapter, Adapter->eActiveDSD);
- if (Status != STATUS_SUCCESS) {
- if (((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) != Adapter->uiNVMDSDSize) ||
- (stNVMReadWrite.uiNumBytes < SIGNATURE_SIZE)) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DSD Sig is present neither in Flash nor User provided Input..");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return Status;
- }
-
- ulDSDMagicNumInUsrBuff = ntohl(*(PUINT)(pReadData + stNVMReadWrite.uiNumBytes - SIGNATURE_SIZE));
- if (ulDSDMagicNumInUsrBuff != DSD_IMAGE_MAGIC_NUMBER) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DSD Sig is present neither in Flash nor User provided Input..");
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadData);
- return Status;
- }
+ ulDSDMagicNumInUsrBuff = ntohl(*(PUINT)(pReadData + stNVMReadWrite.uiNumBytes - SIGNATURE_SIZE));
+ if (ulDSDMagicNumInUsrBuff !=
+ DSD_IMAGE_MAGIC_NUMBER) {
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "DSD Sig is present neither in Flash nor User provided Input..");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadData);
+ return Status;
}
}
+ }
- Status = BeceemNVMWrite(Adapter, (PUINT)pReadData, stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes, stNVMReadWrite.bVerify);
- if (IsFlash2x(Adapter))
- BcmFlash2xWriteSig(Adapter, Adapter->eActiveDSD);
+ Status = BeceemNVMWrite(Adapter, (PUINT)pReadData,
+ stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes,
+ stNVMReadWrite.bVerify);
+ if (IsFlash2x(Adapter))
+ BcmFlash2xWriteSig(Adapter, Adapter->eActiveDSD);
- Adapter->bHeaderChangeAllowed = false;
+ Adapter->bHeaderChangeAllowed = false;
- up(&Adapter->NVMRdmWrmLock);
+ up(&Adapter->NVMRdmWrmLock);
- if (Status != STATUS_SUCCESS) {
- kfree(pReadData);
- return Status;
- }
+ if (Status != STATUS_SUCCESS) {
+ kfree(pReadData);
+ return Status;
}
+ }
+
+ do_gettimeofday(&tv1);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ " timetaken by Write/read :%ld msec\n",
+ (tv1.tv_sec - tv0.tv_sec)*1000 + (tv1.tv_usec - tv0.tv_usec)/1000);
+
+ kfree(pReadData);
+ return STATUS_SUCCESS;
+}
- do_gettimeofday(&tv1);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " timetaken by Write/read :%ld msec\n", (tv1.tv_sec - tv0.tv_sec)*1000 + (tv1.tv_usec - tv0.tv_usec)/1000);
+static int bcm_char_ioctl_flash2x_section_read(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_flash2x_readwrite sFlash2xRead = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ PUCHAR pReadBuff = NULL;
+ UINT NOB = 0;
+ UINT BuffSize = 0;
+ UINT ReadBytes = 0;
+ UINT ReadOffset = 0;
+ INT Status = STATUS_FAILURE;
+ void __user *OutPutBuff;
- kfree(pReadData);
- return STATUS_SUCCESS;
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash Does not have 2.x map");
+ return -EINVAL;
}
- case IOCTL_BCM_FLASH2X_SECTION_READ: {
- struct bcm_flash2x_readwrite sFlash2xRead = {0};
- PUCHAR pReadBuff = NULL;
- UINT NOB = 0;
- UINT BuffSize = 0;
- UINT ReadBytes = 0;
- UINT ReadOffset = 0;
- void __user *OutPutBuff;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called");
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (IsFlash2x(Adapter) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
- return -EINVAL;
- }
+ /* Reading FLASH 2.x READ structure */
+ if (copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer,
+ sizeof(struct bcm_flash2x_readwrite)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called");
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.Section :%x", sFlash2xRead.Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.offset :%x", sFlash2xRead.offset);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.numOfBytes :%x", sFlash2xRead.numOfBytes);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.bVerify :%x\n", sFlash2xRead.bVerify);
- /* Reading FLASH 2.x READ structure */
- if (copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer, sizeof(struct bcm_flash2x_readwrite)))
- return -EFAULT;
+ /* This was internal to driver for raw read.
+ * now it has ben exposed to user space app.
+ */
+ if (validateFlash2xReadWrite(Adapter, &sFlash2xRead) == false)
+ return STATUS_FAILURE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.Section :%x", sFlash2xRead.Section);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.offset :%x", sFlash2xRead.offset);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.numOfBytes :%x", sFlash2xRead.numOfBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.bVerify :%x\n", sFlash2xRead.bVerify);
+ NOB = sFlash2xRead.numOfBytes;
+ if (NOB > Adapter->uiSectorSize)
+ BuffSize = Adapter->uiSectorSize;
+ else
+ BuffSize = NOB;
- /* This was internal to driver for raw read. now it has ben exposed to user space app. */
- if (validateFlash2xReadWrite(Adapter, &sFlash2xRead) == false)
- return STATUS_FAILURE;
+ ReadOffset = sFlash2xRead.offset;
+ OutPutBuff = IoBuffer.OutputBuffer;
+ pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL);
+
+ if (pReadBuff == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Memory allocation failed for Flash 2.x Read Structure");
+ return -ENOMEM;
+ }
+ down(&Adapter->NVMRdmWrmLock);
+
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return -EACCES;
+ }
- NOB = sFlash2xRead.numOfBytes;
+ while (NOB) {
if (NOB > Adapter->uiSectorSize)
- BuffSize = Adapter->uiSectorSize;
+ ReadBytes = Adapter->uiSectorSize;
else
- BuffSize = NOB;
-
- ReadOffset = sFlash2xRead.offset;
- OutPutBuff = IoBuffer.OutputBuffer;
- pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL);
+ ReadBytes = NOB;
- if (pReadBuff == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for Flash 2.x Read Structure");
- return -ENOMEM;
+ /* Reading the data from Flash 2.x */
+ Status = BcmFlash2xBulkRead(Adapter, (PUINT)pReadBuff,
+ sFlash2xRead.Section, ReadOffset, ReadBytes);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Flash 2x read err with Status :%d",
+ Status);
+ break;
}
- down(&Adapter->NVMRdmWrmLock);
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, pReadBuff, ReadBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter,
+ DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Copy to use failed with status :%d", Status);
up(&Adapter->NVMRdmWrmLock);
kfree(pReadBuff);
- return -EACCES;
+ return -EFAULT;
}
+ NOB = NOB - ReadBytes;
+ if (NOB) {
+ ReadOffset = ReadOffset + ReadBytes;
+ OutPutBuff = OutPutBuff + ReadBytes;
+ }
+ }
- while (NOB) {
- if (NOB > Adapter->uiSectorSize)
- ReadBytes = Adapter->uiSectorSize;
- else
- ReadBytes = NOB;
-
- /* Reading the data from Flash 2.x */
- Status = BcmFlash2xBulkRead(Adapter, (PUINT)pReadBuff, sFlash2xRead.Section, ReadOffset, ReadBytes);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Flash 2x read err with Status :%d", Status);
- break;
- }
-
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pReadBuff, ReadBytes);
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return Status;
+}
- Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy to use failed with status :%d", Status);
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadBuff);
- return -EFAULT;
- }
- NOB = NOB - ReadBytes;
- if (NOB) {
- ReadOffset = ReadOffset + ReadBytes;
- OutPutBuff = OutPutBuff + ReadBytes;
- }
- }
+static int bcm_char_ioctl_flash2x_section_write(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_flash2x_readwrite sFlash2xWrite = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ PUCHAR pWriteBuff;
+ void __user *InputAddr;
+ UINT NOB = 0;
+ UINT BuffSize = 0;
+ UINT WriteOffset = 0;
+ UINT WriteBytes = 0;
+ INT Status = STATUS_FAILURE;
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadBuff);
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash Does not have 2.x map");
+ return -EINVAL;
}
- break;
- case IOCTL_BCM_FLASH2X_SECTION_WRITE: {
- struct bcm_flash2x_readwrite sFlash2xWrite = {0};
- PUCHAR pWriteBuff;
- void __user *InputAddr;
- UINT NOB = 0;
- UINT BuffSize = 0;
- UINT WriteOffset = 0;
- UINT WriteBytes = 0;
+ /* First make this False so that we can enable the Sector
+ * Permission Check in BeceemFlashBulkWrite
+ */
+ Adapter->bAllDSDWriteAllow = false;
- if (IsFlash2x(Adapter) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
- return -EINVAL;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
- /* First make this False so that we can enable the Sector Permission Check in BeceemFlashBulkWrite */
- Adapter->bAllDSDWriteAllow = false;
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
+ /* Reading FLASH 2.x READ structure */
+ if (copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer,
+ sizeof(struct bcm_flash2x_readwrite)))
+ return -EFAULT;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.Section :%x", sFlash2xWrite.Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.offset :%d", sFlash2xWrite.offset);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.numOfBytes :%x", sFlash2xWrite.numOfBytes);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\nsFlash2xRead.bVerify :%x\n", sFlash2xWrite.bVerify);
- /* Reading FLASH 2.x READ structure */
- if (copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer, sizeof(struct bcm_flash2x_readwrite)))
- return -EFAULT;
+ if ((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1)
+ && (sFlash2xWrite.Section != VSA2)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Only VSA write is allowed");
+ return -EINVAL;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.Section :%x", sFlash2xWrite.Section);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.offset :%d", sFlash2xWrite.offset);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.numOfBytes :%x", sFlash2xWrite.numOfBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.bVerify :%x\n", sFlash2xWrite.bVerify);
+ if (validateFlash2xReadWrite(Adapter, &sFlash2xWrite) == false)
+ return STATUS_FAILURE;
- if ((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1) && (sFlash2xWrite.Section != VSA2)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Only VSA write is allowed");
- return -EINVAL;
- }
+ InputAddr = sFlash2xWrite.pDataBuff;
+ WriteOffset = sFlash2xWrite.offset;
+ NOB = sFlash2xWrite.numOfBytes;
- if (validateFlash2xReadWrite(Adapter, &sFlash2xWrite) == false)
- return STATUS_FAILURE;
+ if (NOB > Adapter->uiSectorSize)
+ BuffSize = Adapter->uiSectorSize;
+ else
+ BuffSize = NOB;
- InputAddr = sFlash2xWrite.pDataBuff;
- WriteOffset = sFlash2xWrite.offset;
- NOB = sFlash2xWrite.numOfBytes;
+ pWriteBuff = kmalloc(BuffSize, GFP_KERNEL);
- if (NOB > Adapter->uiSectorSize)
- BuffSize = Adapter->uiSectorSize;
- else
- BuffSize = NOB;
+ if (pWriteBuff == NULL)
+ return -ENOMEM;
- pWriteBuff = kmalloc(BuffSize, GFP_KERNEL);
+ /* extracting the remainder of the given offset. */
+ WriteBytes = Adapter->uiSectorSize;
+ if (WriteOffset % Adapter->uiSectorSize)
+ WriteBytes = Adapter->uiSectorSize - (WriteOffset % Adapter->uiSectorSize);
- if (pWriteBuff == NULL)
- return -ENOMEM;
+ if (NOB < WriteBytes)
+ WriteBytes = NOB;
- /* extracting the remainder of the given offset. */
- WriteBytes = Adapter->uiSectorSize;
- if (WriteOffset % Adapter->uiSectorSize)
- WriteBytes = Adapter->uiSectorSize - (WriteOffset % Adapter->uiSectorSize);
+ down(&Adapter->NVMRdmWrmLock);
- if (NOB < WriteBytes)
- WriteBytes = NOB;
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- down(&Adapter->NVMRdmWrmLock);
-
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pWriteBuff);
+ return -EACCES;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ BcmFlash2xCorruptSig(Adapter, sFlash2xWrite.Section);
+ do {
+ Status = copy_from_user(pWriteBuff, InputAddr, WriteBytes);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy to user failed with status :%d", Status);
up(&Adapter->NVMRdmWrmLock);
kfree(pWriteBuff);
- return -EACCES;
+ return -EFAULT;
}
+ BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS,
+ OSAL_DBG, DBG_LVL_ALL, pWriteBuff, WriteBytes);
- BcmFlash2xCorruptSig(Adapter, sFlash2xWrite.Section);
- do {
- Status = copy_from_user(pWriteBuff, InputAddr, WriteBytes);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to user failed with status :%d", Status);
- up(&Adapter->NVMRdmWrmLock);
- kfree(pWriteBuff);
- return -EFAULT;
- }
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pWriteBuff, WriteBytes);
+ /* Writing the data from Flash 2.x */
+ Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pWriteBuff,
+ sFlash2xWrite.Section, WriteOffset, WriteBytes,
+ sFlash2xWrite.bVerify);
- /* Writing the data from Flash 2.x */
- Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pWriteBuff, sFlash2xWrite.Section, WriteOffset, WriteBytes, sFlash2xWrite.bVerify);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash 2x read err with Status :%d", Status);
+ break;
+ }
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash 2x read err with Status :%d", Status);
- break;
- }
+ NOB = NOB - WriteBytes;
+ if (NOB) {
+ WriteOffset = WriteOffset + WriteBytes;
+ InputAddr = InputAddr + WriteBytes;
+ if (NOB > Adapter->uiSectorSize)
+ WriteBytes = Adapter->uiSectorSize;
+ else
+ WriteBytes = NOB;
+ }
+ } while (NOB > 0);
- NOB = NOB - WriteBytes;
- if (NOB) {
- WriteOffset = WriteOffset + WriteBytes;
- InputAddr = InputAddr + WriteBytes;
- if (NOB > Adapter->uiSectorSize)
- WriteBytes = Adapter->uiSectorSize;
- else
- WriteBytes = NOB;
- }
- } while (NOB > 0);
+ BcmFlash2xWriteSig(Adapter, sFlash2xWrite.Section);
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pWriteBuff);
+ return Status;
+}
+
+static int bcm_char_ioctl_flash2x_section_bitmap(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_flash2x_bitmap *psFlash2xBitMap;
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_FAILURE;
+
+BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called");
+
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
+
+ if (IoBuffer.OutputLength != sizeof(struct bcm_flash2x_bitmap))
+ return -EINVAL;
+
+ psFlash2xBitMap = kzalloc(sizeof(struct bcm_flash2x_bitmap), GFP_KERNEL);
+ if (psFlash2xBitMap == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Memory is not available");
+ return -ENOMEM;
+ }
+
+ /* Reading the Flash Sectio Bit map */
+ down(&Adapter->NVMRdmWrmLock);
+
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- BcmFlash2xWriteSig(Adapter, sFlash2xWrite.Section);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
- kfree(pWriteBuff);
+ kfree(psFlash2xBitMap);
+ return -EACCES;
}
- break;
- case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP: {
- struct bcm_flash2x_bitmap *psFlash2xBitMap;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called");
+ BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
+ up(&Adapter->NVMRdmWrmLock);
+ if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap,
+ sizeof(struct bcm_flash2x_bitmap))) {
+ kfree(psFlash2xBitMap);
+ return -EFAULT;
+ }
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ kfree(psFlash2xBitMap);
+ return Status;
+}
- if (IoBuffer.OutputLength != sizeof(struct bcm_flash2x_bitmap))
- return -EINVAL;
+static int bcm_char_ioctl_set_active_section(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ enum bcm_flash2x_section_val eFlash2xSectionVal = 0;
+ INT Status = STATUS_FAILURE;
+ struct bcm_ioctl_buffer IoBuffer;
- psFlash2xBitMap = kzalloc(sizeof(struct bcm_flash2x_bitmap), GFP_KERNEL);
- if (psFlash2xBitMap == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory is not available");
- return -ENOMEM;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_SET_ACTIVE_SECTION Called");
- /* Reading the Flash Sectio Bit map */
- down(&Adapter->NVMRdmWrmLock);
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash Does not have 2.x map");
+ return -EINVAL;
+ }
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of IOCTL BUFFER failed");
+ return -EFAULT;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- kfree(psFlash2xBitMap);
- return -EACCES;
- }
+ Status = copy_from_user(&eFlash2xSectionVal,
+ IoBuffer.InputBuffer, sizeof(INT));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of flash section val failed");
+ return -EFAULT;
+ }
+
+ down(&Adapter->NVMRdmWrmLock);
+
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
- if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(struct bcm_flash2x_bitmap))) {
- kfree(psFlash2xBitMap);
- return -EFAULT;
- }
+ return -EACCES;
+ }
- kfree(psFlash2xBitMap);
+ Status = BcmSetActiveSection(Adapter, eFlash2xSectionVal);
+ if (Status)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Failed to make it's priority Highest. Status %d",
+ Status);
+
+ up(&Adapter->NVMRdmWrmLock);
+
+ return Status;
+}
+
+static int bcm_char_ioctl_copy_section(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_flash2x_copy_section sCopySectStrut = {0};
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_SUCCESS;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_COPY_SECTION Called");
+
+ Adapter->bAllDSDWriteAllow = false;
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash Does not have 2.x map");
+ return -EINVAL;
}
- break;
- case IOCTL_BCM_SET_ACTIVE_SECTION: {
- enum bcm_flash2x_section_val eFlash2xSectionVal = 0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SET_ACTIVE_SECTION Called");
+ Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of IOCTL BUFFER failed Status :%d", Status);
+ return -EFAULT;
+ }
- if (IsFlash2x(Adapter) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
- return -EINVAL;
- }
+ Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer,
+ sizeof(struct bcm_flash2x_copy_section));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of Copy_Section_Struct failed with Status :%d",
+ Status);
+ return -EFAULT;
+ }
- Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return -EFAULT;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Source SEction :%x", sCopySectStrut.SrcSection);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Destination SEction :%x", sCopySectStrut.DstSection);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "offset :%x", sCopySectStrut.offset);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "NOB :%x", sCopySectStrut.numOfBytes);
- Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return -EFAULT;
- }
+ if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == false) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Source Section<%x> does not exist in Flash ",
+ sCopySectStrut.SrcSection);
+ return -EINVAL;
+ }
- down(&Adapter->NVMRdmWrmLock);
+ if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == false) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Destinatio Section<%x> does not exist in Flash ",
+ sCopySectStrut.DstSection);
+ return -EINVAL;
+ }
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ if (sCopySectStrut.SrcSection == sCopySectStrut.DstSection) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Source and Destination section should be different");
+ return -EINVAL;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- return -EACCES;
- }
+ down(&Adapter->NVMRdmWrmLock);
- Status = BcmSetActiveSection(Adapter, eFlash2xSectionVal);
- if (Status)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Failed to make it's priority Highest. Status %d", Status);
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
+ return -EACCES;
}
- break;
- case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION: {
- /* Right Now we are taking care of only DSD */
- Adapter->bAllDSDWriteAllow = false;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
- Status = STATUS_SUCCESS;
+ if (sCopySectStrut.SrcSection == ISO_IMAGE1 ||
+ sCopySectStrut.SrcSection == ISO_IMAGE2) {
+ if (IsNonCDLessDevice(Adapter)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Device is Non-CDLess hence won't have ISO !!");
+ Status = -EINVAL;
+ } else if (sCopySectStrut.numOfBytes == 0) {
+ Status = BcmCopyISO(Adapter, sCopySectStrut);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Partial Copy of ISO section is not Allowed..");
+ Status = STATUS_FAILURE;
+ }
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
}
- break;
- case IOCTL_BCM_COPY_SECTION: {
- struct bcm_flash2x_copy_section sCopySectStrut = {0};
- Status = STATUS_SUCCESS;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_COPY_SECTION Called");
+ Status = BcmCopySection(Adapter, sCopySectStrut.SrcSection,
+ sCopySectStrut.DstSection,
+ sCopySectStrut.offset,
+ sCopySectStrut.numOfBytes);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
+}
- Adapter->bAllDSDWriteAllow = false;
- if (IsFlash2x(Adapter) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
+static int bcm_char_ioctl_get_flash_cs_info(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_SUCCESS;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ " IOCTL_BCM_GET_FLASH_CS_INFO Called");
+
+ Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of IOCTL BUFFER failed");
+ return -EFAULT;
+ }
+
+ if (Adapter->eNVMType != NVM_FLASH) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Connected device does not have flash");
+ return -EINVAL;
+ }
+
+ if (IsFlash2x(Adapter) == TRUE) {
+ if (IoBuffer.OutputLength < sizeof(struct bcm_flash2x_cs_info))
return -EINVAL;
- }
- Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed Status :%d", Status);
+ if (copy_to_user(IoBuffer.OutputBuffer,
+ Adapter->psFlash2xCSInfo,
+ sizeof(struct bcm_flash2x_cs_info)))
return -EFAULT;
- }
+ } else {
+ if (IoBuffer.OutputLength < sizeof(struct bcm_flash_cs_info))
+ return -EINVAL;
- Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer, sizeof(struct bcm_flash2x_copy_section));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of Copy_Section_Struct failed with Status :%d", Status);
+ if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo,
+ sizeof(struct bcm_flash_cs_info)))
return -EFAULT;
- }
+ }
+ return Status;
+}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source SEction :%x", sCopySectStrut.SrcSection);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Destination SEction :%x", sCopySectStrut.DstSection);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "offset :%x", sCopySectStrut.offset);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "NOB :%x", sCopySectStrut.numOfBytes);
+static int bcm_char_ioctl_select_dsd(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_FAILURE;
+ UINT SectOfset = 0;
+ enum bcm_flash2x_section_val eFlash2xSectionVal;
- if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exist in Flash ", sCopySectStrut.SrcSection);
- return -EINVAL;
- }
+ eFlash2xSectionVal = NO_SECTION_VAL;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_SELECT_DSD Called");
- if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exist in Flash ", sCopySectStrut.DstSection);
- return -EINVAL;
- }
+ if (IsFlash2x(Adapter) != TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash Does not have 2.x map");
+ return -EINVAL;
+ }
- if (sCopySectStrut.SrcSection == sCopySectStrut.DstSection) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source and Destination section should be different");
- return -EINVAL;
- }
+ Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of IOCTL BUFFER failed");
+ return -EFAULT;
+ }
+ Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer,
+ sizeof(INT));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy of flash section val failed");
+ return -EFAULT;
+ }
- down(&Adapter->NVMRdmWrmLock);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Read Section :%d", eFlash2xSectionVal);
+ if ((eFlash2xSectionVal != DSD0) &&
+ (eFlash2xSectionVal != DSD1) &&
+ (eFlash2xSectionVal != DSD2)) {
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Passed section<%x> is not DSD section",
+ eFlash2xSectionVal);
+ return STATUS_FAILURE;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- up(&Adapter->NVMRdmWrmLock);
- return -EACCES;
- }
+ SectOfset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
+ if (SectOfset == INVALID_OFFSET) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Provided Section val <%d> does not exist in Flash 2.x",
+ eFlash2xSectionVal);
+ return -EINVAL;
+ }
- if (sCopySectStrut.SrcSection == ISO_IMAGE1 || sCopySectStrut.SrcSection == ISO_IMAGE2) {
- if (IsNonCDLessDevice(Adapter)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device is Non-CDLess hence won't have ISO !!");
- Status = -EINVAL;
- } else if (sCopySectStrut.numOfBytes == 0) {
- Status = BcmCopyISO(Adapter, sCopySectStrut);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Partial Copy of ISO section is not Allowed..");
- Status = STATUS_FAILURE;
- }
- up(&Adapter->NVMRdmWrmLock);
- return Status;
- }
+ Adapter->bAllDSDWriteAllow = TRUE;
+ Adapter->ulFlashCalStart = SectOfset;
+ Adapter->eActiveDSD = eFlash2xSectionVal;
- Status = BcmCopySection(Adapter, sCopySectStrut.SrcSection,
- sCopySectStrut.DstSection, sCopySectStrut.offset, sCopySectStrut.numOfBytes);
- up(&Adapter->NVMRdmWrmLock);
+ return STATUS_SUCCESS;
+}
+
+static int bcm_char_ioctl_nvm_raw_read(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_nvm_readwrite stNVMRead;
+ struct bcm_ioctl_buffer IoBuffer;
+ unsigned int NOB;
+ INT BuffSize;
+ INT ReadOffset = 0;
+ UINT ReadBytes = 0;
+ PUCHAR pReadBuff;
+ void __user *OutPutBuff;
+ INT Status = STATUS_FAILURE;
+
+ if (Adapter->eNVMType != NVM_FLASH) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "NVM TYPE is not Flash");
+ return -EINVAL;
}
- break;
- case IOCTL_BCM_GET_FLASH_CS_INFO: {
- Status = STATUS_SUCCESS;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " IOCTL_BCM_GET_FLASH_CS_INFO Called");
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "copy_from_user 1 failed\n");
+ return -EFAULT;
+ }
- Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return -EFAULT;
- }
+ if (copy_from_user(&stNVMRead, IoBuffer.OutputBuffer,
+ sizeof(struct bcm_nvm_readwrite)))
+ return -EFAULT;
- if (Adapter->eNVMType != NVM_FLASH) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Connected device does not have flash");
- Status = -EINVAL;
- break;
- }
+ NOB = stNVMRead.uiNumBytes;
+ /* In Raw-Read max Buff size : 64MB */
- if (IsFlash2x(Adapter) == TRUE) {
- if (IoBuffer.OutputLength < sizeof(struct bcm_flash2x_cs_info))
- return -EINVAL;
+ if (NOB > DEFAULT_BUFF_SIZE)
+ BuffSize = DEFAULT_BUFF_SIZE;
+ else
+ BuffSize = NOB;
- if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(struct bcm_flash2x_cs_info)))
- return -EFAULT;
- } else {
- if (IoBuffer.OutputLength < sizeof(struct bcm_flash_cs_info))
- return -EINVAL;
+ ReadOffset = stNVMRead.uiOffset;
+ OutPutBuff = stNVMRead.pBuffer;
- if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(struct bcm_flash_cs_info)))
- return -EFAULT;
- }
+ pReadBuff = kzalloc(BuffSize , GFP_KERNEL);
+ if (pReadBuff == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Memory allocation failed for Flash 2.x Read Structure");
+ return -ENOMEM;
}
- break;
+ down(&Adapter->NVMRdmWrmLock);
- case IOCTL_BCM_SELECT_DSD: {
- UINT SectOfset = 0;
- enum bcm_flash2x_section_val eFlash2xSectionVal;
- eFlash2xSectionVal = NO_SECTION_VAL;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SELECT_DSD Called");
+ if ((Adapter->IdleMode == TRUE) ||
+ (Adapter->bShutStatus == TRUE) ||
+ (Adapter->bPreparingForLowPowerMode == TRUE)) {
- if (IsFlash2x(Adapter) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map");
- return -EINVAL;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
+ kfree(pReadBuff);
+ up(&Adapter->NVMRdmWrmLock);
+ return -EACCES;
+ }
+
+ Adapter->bFlashRawRead = TRUE;
+
+ while (NOB) {
+ if (NOB > DEFAULT_BUFF_SIZE)
+ ReadBytes = DEFAULT_BUFF_SIZE;
+ else
+ ReadBytes = NOB;
- Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ /* Reading the data from Flash 2.x */
+ Status = BeceemNVMRead(Adapter, (PUINT)pReadBuff,
+ ReadOffset, ReadBytes);
if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return -EFAULT;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Flash 2x read err with Status :%d", Status);
+ break;
}
- Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
+
+ BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, pReadBuff, ReadBytes);
+
+ Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Copy to use failed with status :%d", Status);
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
return -EFAULT;
}
+ NOB = NOB - ReadBytes;
+ if (NOB) {
+ ReadOffset = ReadOffset + ReadBytes;
+ OutPutBuff = OutPutBuff + ReadBytes;
+ }
+ }
+ Adapter->bFlashRawRead = false;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return Status;
+}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Read Section :%d", eFlash2xSectionVal);
- if ((eFlash2xSectionVal != DSD0) &&
- (eFlash2xSectionVal != DSD1) &&
- (eFlash2xSectionVal != DSD2)) {
+static int bcm_char_ioctl_cntrlmsg_mask(void __user *argp,
+ struct bcm_mini_adapter *Adapter, struct bcm_tarang_data *pTarang)
+{
+ struct bcm_ioctl_buffer IoBuffer;
+ INT Status = STATUS_FAILURE;
+ ULONG RxCntrlMsgBitMask = 0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Passed section<%x> is not DSD section", eFlash2xSectionVal);
- return STATUS_FAILURE;
- }
+ /* Copy Ioctl Buffer structure */
+ Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "copy of Ioctl buffer is failed from user space");
+ return -EFAULT;
+ }
- SectOfset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
- if (SectOfset == INVALID_OFFSET) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section val <%d> does not exist in Flash 2.x", eFlash2xSectionVal);
- return -EINVAL;
- }
+ if (IoBuffer.InputLength != sizeof(unsigned long))
+ return -EINVAL;
- Adapter->bAllDSDWriteAllow = TRUE;
- Adapter->ulFlashCalStart = SectOfset;
- Adapter->eActiveDSD = eFlash2xSectionVal;
+ Status = copy_from_user(&RxCntrlMsgBitMask,
+ IoBuffer.InputBuffer, IoBuffer.InputLength);
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "copy of control bit mask failed from user space");
+ return -EFAULT;
}
- Status = STATUS_SUCCESS;
- break;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "\n Got user defined cntrl msg bit mask :%lx",
+ RxCntrlMsgBitMask);
+ pTarang->RxCntrlMsgBitMask = RxCntrlMsgBitMask;
- case IOCTL_BCM_NVM_RAW_READ: {
- struct bcm_nvm_readwrite stNVMRead;
- INT NOB;
- INT BuffSize;
- INT ReadOffset = 0;
- UINT ReadBytes = 0;
- PUCHAR pReadBuff;
- void __user *OutPutBuff;
+ return Status;
+}
- if (Adapter->eNVMType != NVM_FLASH) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "NVM TYPE is not Flash");
- return -EINVAL;
- }
+static int bcm_char_ioctl_get_device_driver_info(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_driver_info DevInfo;
+ struct bcm_ioctl_buffer IoBuffer;
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user 1 failed\n");
- return -EFAULT;
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
- if (copy_from_user(&stNVMRead, IoBuffer.OutputBuffer, sizeof(struct bcm_nvm_readwrite)))
- return -EFAULT;
+ memset(&DevInfo, 0, sizeof(DevInfo));
+ DevInfo.MaxRDMBufferSize = BUFFER_4K;
+ DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
+ DevInfo.u32RxAlignmentCorrection = 0;
+ DevInfo.u32NVMType = Adapter->eNVMType;
+ DevInfo.u32InterfaceType = BCM_USB;
- NOB = stNVMRead.uiNumBytes;
- /* In Raw-Read max Buff size : 64MB */
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
- if (NOB > DEFAULT_BUFF_SIZE)
- BuffSize = DEFAULT_BUFF_SIZE;
- else
- BuffSize = NOB;
+ if (IoBuffer.OutputLength < sizeof(DevInfo))
+ return -EINVAL;
- ReadOffset = stNVMRead.uiOffset;
- OutPutBuff = stNVMRead.pBuffer;
+ if (copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo)))
+ return -EFAULT;
- pReadBuff = kzalloc(BuffSize , GFP_KERNEL);
- if (pReadBuff == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for Flash 2.x Read Structure");
- Status = -ENOMEM;
- break;
- }
- down(&Adapter->NVMRdmWrmLock);
+ return STATUS_SUCCESS;
+}
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
+static int bcm_char_ioctl_time_since_net_entry(void __user *argp,
+ struct bcm_mini_adapter *Adapter)
+{
+ struct bcm_time_elapsed stTimeElapsedSinceNetEntry = {0};
+ struct bcm_ioctl_buffer IoBuffer;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n");
- kfree(pReadBuff);
- up(&Adapter->NVMRdmWrmLock);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_TIME_SINCE_NET_ENTRY called");
+
+ if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
+ return -EFAULT;
+
+ if (IoBuffer.OutputLength < sizeof(struct bcm_time_elapsed))
+ return -EINVAL;
+
+ stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry =
+ get_seconds() - Adapter->liTimeSinceLastNetEntry;
+
+ if (copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry,
+ sizeof(struct bcm_time_elapsed)))
+ return -EFAULT;
+
+ return STATUS_SUCCESS;
+}
+
+
+static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
+{
+ struct bcm_tarang_data *pTarang = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ struct bcm_mini_adapter *Adapter = pTarang->Adapter;
+ INT Status = STATUS_FAILURE;
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX",
+ cmd, arg);
+
+ if (_IOC_TYPE(cmd) != BCM_IOCTL)
+ return -EFAULT;
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ Status = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ Status = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
+ else if (_IOC_NONE == (_IOC_DIR(cmd) & _IOC_NONE))
+ Status = STATUS_SUCCESS;
+
+ if (Status)
+ return -EFAULT;
+
+ if (Adapter->device_removed)
+ return -EFAULT;
+
+ if (false == Adapter->fw_download_done) {
+ switch (cmd) {
+ case IOCTL_MAC_ADDR_REQ:
+ case IOCTL_LINK_REQ:
+ case IOCTL_CM_REQUEST:
+ case IOCTL_SS_INFO_REQ:
+ case IOCTL_SEND_CONTROL_MESSAGE:
+ case IOCTL_IDLE_REQ:
+ case IOCTL_BCM_GPIO_SET_REQUEST:
+ case IOCTL_BCM_GPIO_STATUS_REQUEST:
return -EACCES;
+ default:
+ break;
}
+ }
- Adapter->bFlashRawRead = TRUE;
+ Status = vendorextnIoctl(Adapter, cmd, arg);
+ if (Status != CONTINUE_COMMON_PATH)
+ return Status;
- while (NOB) {
- if (NOB > DEFAULT_BUFF_SIZE)
- ReadBytes = DEFAULT_BUFF_SIZE;
- else
- ReadBytes = NOB;
+ switch (cmd) {
+ /* Rdms for Swin Idle... */
+ case IOCTL_BCM_REGISTER_READ_PRIVATE:
+ Status = bcm_char_ioctl_reg_read_private(argp, Adapter);
+ return Status;
- /* Reading the data from Flash 2.x */
- Status = BeceemNVMRead(Adapter, (PUINT)pReadBuff, ReadOffset, ReadBytes);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash 2x read err with Status :%d", Status);
- break;
- }
+ case IOCTL_BCM_REGISTER_WRITE_PRIVATE:
+ Status = bcm_char_ioctl_reg_write_private(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_REGISTER_READ:
+ case IOCTL_BCM_EEPROM_REGISTER_READ:
+ Status = bcm_char_ioctl_eeprom_reg_read(argp, Adapter);
+ return Status;
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pReadBuff, ReadBytes);
+ case IOCTL_BCM_REGISTER_WRITE:
+ case IOCTL_BCM_EEPROM_REGISTER_WRITE:
+ Status = bcm_char_ioctl_eeprom_reg_write(argp, Adapter, cmd);
+ return Status;
- Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to use failed with status :%d", Status);
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadBuff);
- return -EFAULT;
- }
- NOB = NOB - ReadBytes;
- if (NOB) {
- ReadOffset = ReadOffset + ReadBytes;
- OutPutBuff = OutPutBuff + ReadBytes;
- }
- }
- Adapter->bFlashRawRead = false;
- up(&Adapter->NVMRdmWrmLock);
- kfree(pReadBuff);
+ case IOCTL_BCM_GPIO_SET_REQUEST:
+ Status = bcm_char_ioctl_gpio_set_request(argp, Adapter);
+ return Status;
+
+ case BCM_LED_THREAD_STATE_CHANGE_REQ:
+ Status = bcm_char_ioctl_led_thread_state_change_req(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GPIO_STATUS_REQUEST:
+ Status = bcm_char_ioctl_gpio_status_request(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GPIO_MULTI_REQUEST:
+ Status = bcm_char_ioctl_gpio_multi_request(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GPIO_MODE_REQUEST:
+ Status = bcm_char_ioctl_gpio_mode_request(argp, Adapter);
+ return Status;
+
+ case IOCTL_MAC_ADDR_REQ:
+ case IOCTL_LINK_REQ:
+ case IOCTL_CM_REQUEST:
+ case IOCTL_SS_INFO_REQ:
+ case IOCTL_SEND_CONTROL_MESSAGE:
+ case IOCTL_IDLE_REQ:
+ Status = bcm_char_ioctl_misc_request(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_BUFFER_DOWNLOAD_START:
+ Status = bcm_char_ioctl_buffer_download_start(Adapter);
+ return Status;
+
+ case IOCTL_BCM_BUFFER_DOWNLOAD:
+ Status = bcm_char_ioctl_buffer_download(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_BUFFER_DOWNLOAD_STOP:
+ Status = bcm_char_ioctl_buffer_download_stop(argp, Adapter);
+ return Status;
+
+
+ case IOCTL_BE_BUCKET_SIZE:
+ Status = 0;
+ if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
+ Status = -EFAULT;
+ break;
+
+ case IOCTL_RTPS_BUCKET_SIZE:
+ Status = 0;
+ if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
+ Status = -EFAULT;
break;
- }
- case IOCTL_BCM_CNTRLMSG_MASK: {
- ULONG RxCntrlMsgBitMask = 0;
+ case IOCTL_CHIP_RESET:
+ Status = bcm_char_ioctl_chip_reset(Adapter);
+ return Status;
- /* Copy Ioctl Buffer structure */
- Status = copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of Ioctl buffer is failed from user space");
- return -EFAULT;
- }
+ case IOCTL_QOS_THRESHOLD:
+ Status = bcm_char_ioctl_qos_threshold(arg, Adapter);
+ return Status;
- if (IoBuffer.InputLength != sizeof(unsigned long)) {
- Status = -EINVAL;
- break;
- }
+ case IOCTL_DUMP_PACKET_INFO:
+ DumpPackInfo(Adapter);
+ DumpPhsRules(&Adapter->stBCMPhsContext);
+ Status = STATUS_SUCCESS;
+ break;
- Status = copy_from_user(&RxCntrlMsgBitMask, IoBuffer.InputBuffer, IoBuffer.InputLength);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of control bit mask failed from user space");
+ case IOCTL_GET_PACK_INFO:
+ if (copy_to_user(argp, &Adapter->PackInfo,
+ sizeof(struct bcm_packet_info)*NO_OF_QUEUES))
return -EFAULT;
+ Status = STATUS_SUCCESS;
+ break;
+
+ case IOCTL_BCM_SWITCH_TRANSFER_MODE:
+ Status = bcm_char_ioctl_switch_transfer_mode(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GET_DRIVER_VERSION:
+ Status = bcm_char_ioctl_get_driver_version(argp);
+ return Status;
+
+ case IOCTL_BCM_GET_CURRENT_STATUS:
+ Status = bcm_char_ioctl_get_current_status(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_SET_MAC_TRACING:
+ Status = bcm_char_ioctl_set_mac_tracing(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GET_DSX_INDICATION:
+ Status = bcm_char_ioctl_get_dsx_indication(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GET_HOST_MIBS:
+ Status = bcm_char_ioctl_get_host_mibs(argp, Adapter, pTarang);
+ return Status;
+
+ case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE:
+ if ((false == Adapter->bTriedToWakeUpFromlowPowerMode) &&
+ (TRUE == Adapter->IdleMode)) {
+ Adapter->usIdleModePattern = ABORT_IDLE_MODE;
+ Adapter->bWakeUpDevice = TRUE;
+ wake_up(&Adapter->process_rx_cntrlpkt);
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\n Got user defined cntrl msg bit mask :%lx", RxCntrlMsgBitMask);
- pTarang->RxCntrlMsgBitMask = RxCntrlMsgBitMask;
- }
- break;
- case IOCTL_BCM_GET_DEVICE_DRIVER_INFO: {
- struct bcm_driver_info DevInfo;
+ Status = STATUS_SUCCESS;
+ break;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
+ case IOCTL_BCM_BULK_WRM:
+ Status = bcm_char_ioctl_bulk_wrm(argp, Adapter, cmd);
+ return Status;
- memset(&DevInfo, 0, sizeof(DevInfo));
- DevInfo.MaxRDMBufferSize = BUFFER_4K;
- DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
- DevInfo.u32RxAlignmentCorrection = 0;
- DevInfo.u32NVMType = Adapter->eNVMType;
- DevInfo.u32InterfaceType = BCM_USB;
+ case IOCTL_BCM_GET_NVM_SIZE:
+ Status = bcm_char_ioctl_get_nvm_size(argp, Adapter);
+ return Status;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ case IOCTL_BCM_CAL_INIT:
+ Status = bcm_char_ioctl_cal_init(argp, Adapter);
+ return Status;
- if (IoBuffer.OutputLength < sizeof(DevInfo))
- return -EINVAL;
+ case IOCTL_BCM_SET_DEBUG:
+ Status = bcm_char_ioctl_set_debug(argp, Adapter);
+ return Status;
- if (copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo)))
- return -EFAULT;
- }
- break;
+ case IOCTL_BCM_NVM_READ:
+ case IOCTL_BCM_NVM_WRITE:
+ Status = bcm_char_ioctl_nvm_rw(argp, Adapter, cmd);
+ return Status;
- case IOCTL_BCM_TIME_SINCE_NET_ENTRY: {
- struct bcm_time_elapsed stTimeElapsedSinceNetEntry = {0};
+ case IOCTL_BCM_FLASH2X_SECTION_READ:
+ Status = bcm_char_ioctl_flash2x_section_read(argp, Adapter);
+ return Status;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_TIME_SINCE_NET_ENTRY called");
+ case IOCTL_BCM_FLASH2X_SECTION_WRITE:
+ Status = bcm_char_ioctl_flash2x_section_write(argp, Adapter);
+ return Status;
- if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
+ case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP:
+ Status = bcm_char_ioctl_flash2x_section_bitmap(argp, Adapter);
+ return Status;
- if (IoBuffer.OutputLength < sizeof(struct bcm_time_elapsed))
- return -EINVAL;
+ case IOCTL_BCM_SET_ACTIVE_SECTION:
+ Status = bcm_char_ioctl_set_active_section(argp, Adapter);
+ return Status;
- stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = get_seconds() - Adapter->liTimeSinceLastNetEntry;
+ case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION:
+ /* Right Now we are taking care of only DSD */
+ Adapter->bAllDSDWriteAllow = false;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
+ Status = STATUS_SUCCESS;
+ break;
- if (copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry, sizeof(struct bcm_time_elapsed)))
- return -EFAULT;
- }
- break;
+ case IOCTL_BCM_COPY_SECTION:
+ Status = bcm_char_ioctl_copy_section(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_GET_FLASH_CS_INFO:
+ Status = bcm_char_ioctl_get_flash_cs_info(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_SELECT_DSD:
+ Status = bcm_char_ioctl_select_dsd(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_NVM_RAW_READ:
+ Status = bcm_char_ioctl_nvm_raw_read(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_CNTRLMSG_MASK:
+ Status = bcm_char_ioctl_cntrlmsg_mask(argp, Adapter, pTarang);
+ return Status;
+
+ case IOCTL_BCM_GET_DEVICE_DRIVER_INFO:
+ Status = bcm_char_ioctl_get_device_driver_info(argp, Adapter);
+ return Status;
+
+ case IOCTL_BCM_TIME_SINCE_NET_ENTRY:
+ Status = bcm_char_ioctl_time_since_net_entry(argp, Adapter);
+ return Status;
case IOCTL_CLOSE_NOTIFICATION:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_CLOSE_NOTIFICATION");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "IOCTL_CLOSE_NOTIFICATION");
break;
default:
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 8dfdd2732bdc..95a2358267ba 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -40,7 +40,7 @@ static INT bcm_close(struct net_device *dev)
}
static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
return ClassifyPacket(netdev_priv(dev), skb);
}
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
index cc91b5e934aa..632f81a7c63f 100644
--- a/drivers/staging/bcm/CmHost.c
+++ b/drivers/staging/bcm/CmHost.c
@@ -975,8 +975,8 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: "
- "%pM", psfCSType->cCPacketClassificationRule.
+ DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: %pM",
+ psfCSType->cCPacketClassificationRule.
u8EthernetSourceMACAddress);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ",
@@ -1092,18 +1092,16 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: "
- "0x%*ph ", 4, psfCSType->
- cCPacketClassificationRule.
+ DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%*ph ",
+ 4, psfCSType->cCPacketClassificationRule.
u8ProtocolSourcePortRange);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: "
- "0x%*ph ", 4, psfCSType->
- cCPacketClassificationRule.
+ DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%*ph ",
+ 4, psfCSType->cCPacketClassificationRule.
u8ProtocolDestPortRange);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
@@ -1118,8 +1116,8 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: "
- "%pM", psfCSType->cCPacketClassificationRule.
+ DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: %pM",
+ psfCSType->cCPacketClassificationRule.
u8EthernetSourceMACAddress);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ", psfCSType->cCPacketClassificationRule.u8EthertypeLength);
@@ -1637,6 +1635,8 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
struct bcm_add_indication_alt *pstAddIndication = NULL;
struct bcm_change_indication *pstChangeIndication = NULL;
struct bcm_leader *pLeader = NULL;
+ INT uiSearchRuleIndex = 0;
+ ULONG ulSFID;
/*
* Otherwise the message contains a target address from where we need to
@@ -1660,7 +1660,6 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n", pstAddIndication->u16TID);
switch (pstAddIndication->u8Type) {
case DSA_REQ:
- {
pLeader->PLength = sizeof(struct bcm_add_indication_alt);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength);
@@ -1671,22 +1670,16 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
kfree(pstAddIndication);
- }
- break;
+ break;
case DSA_RSP:
- {
pLeader->PLength = sizeof(struct bcm_add_indication_alt);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
pLeader->PLength);
*((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
= *pstAddIndication;
((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
-
- } /* no break here..we should go down. */
+ /* FALLTHROUGH */
case DSA_ACK:
- {
- UINT uiSearchRuleIndex = 0;
-
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
ntohs(pstAddIndication->u16VCID));
uiSearchRuleIndex = SearchFreeSfid(Adapter);
@@ -1694,7 +1687,7 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
uiSearchRuleIndex);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Direction:0x%X ",
pstAddIndication->u8Direction);
- if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
+ if (uiSearchRuleIndex < NO_OF_QUEUES) {
Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
pstAddIndication->u8Direction;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
@@ -1769,10 +1762,8 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
kfree(pstAddIndication);
return false;
}
- }
- break;
+ break;
case DSC_REQ:
- {
pLeader->PLength = sizeof(struct bcm_change_indication);
pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
@@ -1782,26 +1773,21 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
kfree(pstAddIndication);
- }
- break;
+ break;
case DSC_RSP:
- {
pLeader->PLength = sizeof(struct bcm_change_indication);
pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
*((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
- }
+ /* FALLTHROUGH */
case DSC_ACK:
- {
- UINT uiSearchRuleIndex = 0;
-
pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
uiSearchRuleIndex = SearchSfid(Adapter, ntohl(pstChangeIndication->sfActiveSet.u32SFID));
if (uiSearchRuleIndex > NO_OF_QUEUES-1)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
- if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
+ if (uiSearchRuleIndex < NO_OF_QUEUES) {
Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
if (pstChangeIndication->sfActiveSet.bValid == TRUE)
Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
@@ -1849,13 +1835,8 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
kfree(pstAddIndication);
return false;
}
- }
- break;
+ break;
case DSD_REQ:
- {
- UINT uiSearchRuleIndex;
- ULONG ulSFID;
-
pLeader->PLength = sizeof(struct bcm_del_indication);
*((struct bcm_del_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((struct bcm_del_indication *)pstAddIndication);
@@ -1872,12 +1853,10 @@ bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
((struct bcm_del_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
- }
+ /* FALLTHROUGH */
case DSD_RSP:
- {
/* Do nothing as SF has already got Deleted */
- }
- break;
+ break;
case DSD_ACK:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
break;
diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c
index ed285b2d892d..f1d7cb82fd7e 100644
--- a/drivers/staging/bcm/DDRInit.c
+++ b/drivers/staging/bcm/DDRInit.c
@@ -3,7 +3,7 @@
#define DDR_DUMP_INTERNAL_DEVICE_MEMORY 0xBFC02B00
-#define MIPS_CLOCK_REG 0x0f000820
+#define MIPS_CLOCK_REG 0x0f000820
/* DDR INIT-133Mhz */
#define T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 12 /* index for 0x0F007000 */
@@ -818,13 +818,13 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
if ((Adapter->chip_id != BCS220_2) &&
(Adapter->chip_id != BCS220_2BC) &&
(Adapter->chip_id != BCS220_3)) {
- retval = rdmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue |= 0x44;
- retval = wrmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
+ retval = wrmalt(Adapter, (UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
@@ -871,7 +871,7 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
case 0xbece0121:
case 0xbece0130:
case 0xbece0300:
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting);
switch (Adapter->DDRSetting) {
case DDR_80_MHZ:
psDDRSetting = asT3_DDRSetting80MHz;
@@ -933,7 +933,7 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
}
value = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Register Count is =%lu\n", RegCount);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Register Count is =%lu\n", RegCount);
while (RegCount && !retval) {
if (uiClockSetting && psDDRSetting->ulRegAddress == MIPS_CLOCK_REG)
value = uiClockSetting;
@@ -990,12 +990,12 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
* we will change this when we will have internal PMU.
*/
if (Adapter->PmuMode == HYBRID_MODE_7C) {
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
@@ -1006,12 +1006,12 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
@@ -1024,12 +1024,12 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
}
} else if (Adapter->PmuMode == HYBRID_MODE_6) {
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
@@ -1040,12 +1040,12 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
- retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
+ retval = rdmalt(Adapter, (UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if (retval < 0) {
BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
@@ -1094,8 +1094,8 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
break;
- default:
- return -EINVAL;
+ default:
+ return -EINVAL;
}
break;
@@ -1215,7 +1215,7 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
ul_ddr_setting_load_addr += sizeof(ULONG);
if (!retval) {
if (bOverrideSelfRefresh && (psDDRSetting->ulRegAddress == 0x0F007018)) {
- value = (psDDRSetting->ulRegValue |(1<<8));
+ value = (psDDRSetting->ulRegValue | (1<<8));
if (STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr,
&value, sizeof(value))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index 94f32728f7c8..7c04c73e3bc8 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -1,5 +1,5 @@
#include "headers.h"
-
+#include <linux/usb/ch9.h>
static struct usb_device_id InterfaceUsbtable[] = {
{ USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) },
{ USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3B) },
@@ -30,19 +30,22 @@ static void InterfaceAdapterFree(struct bcm_interface_adapter *psIntfAdapter)
int i = 0;
/* Wake up the wait_queue... */
- if (psIntfAdapter->psAdapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ if (psIntfAdapter->psAdapter->LEDInfo.led_thread_running &
+ BCM_LED_THREAD_RUNNING_ACTIVELY) {
psIntfAdapter->psAdapter->DriverState = DRIVER_HALT;
wake_up(&psIntfAdapter->psAdapter->LEDInfo.notify_led_event);
}
reset_card_proc(psIntfAdapter->psAdapter);
/*
- * worst case time taken by the RDM/WRM will be 5 sec. will check after every 100 ms
- * to accertain the device is not being accessed. After this No RDM/WRM should be made.
+ * worst case time taken by the RDM/WRM will be 5 sec. will check after
+ * every 100 ms to accertain the device is not being accessed. After
+ * this No RDM/WRM should be made.
*/
while (psIntfAdapter->psAdapter->DeviceAccess) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Device is being accessed.\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT,
+ DRV_ENTRY, DBG_LVL_ALL,
+ "Device is being accessed.\n");
msleep(100);
}
/* Free interrupt URB */
@@ -71,6 +74,7 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter
{
u32 ulReg;
int bytes;
+ struct bcm_interface_adapter *interfaceAdapter;
/* Program EP2 MAX_PKT_SIZE */
ulReg = ntohl(EP2_MPS_REG);
@@ -80,7 +84,9 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter
ulReg = ntohl(EP2_CFG_REG);
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x132, 4, TRUE);
- if (((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter))->bHighSpeedDevice == TRUE) {
+ interfaceAdapter =
+ (struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter);
+ if (interfaceAdapter->bHighSpeedDevice) {
ulReg = ntohl(EP2_CFG_INT);
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE);
} else {
@@ -98,8 +104,8 @@ static void ConfigureEndPointTypesThroughEEPROM(struct bcm_mini_adapter *Adapter
/* Program TX EP as interrupt(Alternate Setting) */
bytes = rdmalt(Adapter, 0x0F0110F8, &ulReg, sizeof(u32));
if (bytes < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "reading of Tx EP failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL, "reading of Tx EP failed\n");
return;
}
ulReg |= 0x6;
@@ -150,7 +156,8 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
struct net_device *ndev;
/* Reserve one extra queue for the bit-bucket */
- ndev = alloc_etherdev_mq(sizeof(struct bcm_mini_adapter), NO_OF_QUEUES+1);
+ ndev = alloc_etherdev_mq(sizeof(struct bcm_mini_adapter),
+ NO_OF_QUEUES + 1);
if (ndev == NULL) {
dev_err(&udev->dev, DRV_NAME ": no memory for device\n");
return -ENOMEM;
@@ -169,13 +176,14 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
/*
* Technically, one can start using BCM_DEBUG_PRINT after this point.
- * However, realize that by default the Type/Subtype bitmaps are all zero now;
- * so no prints will actually appear until the TestApp turns on debug paths via
- * the ioctl(); so practically speaking, in early init, no logging happens.
+ * However, realize that by default the Type/Subtype bitmaps are all
+ * zero now; so no prints will actually appear until the TestApp turns
+ * on debug paths via the ioctl(); so practically speaking, in early
+ * init, no logging happens.
*
- * A solution (used below): we explicitly set the bitmaps to 1 for Type=DBG_TYPE_INITEXIT
- * and ALL subtype's of the same. Now all bcm debug statements get logged, enabling debug
- * during early init.
+ * A solution (used below): we explicitly set the bitmaps to 1 for
+ * Type=DBG_TYPE_INITEXIT and ALL subtype's of the same. Now all bcm
+ * debug statements get logged, enabling debug during early init.
* Further, we turn this OFF once init_module() completes.
*/
@@ -191,7 +199,7 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
/* Allocate interface adapter structure */
psIntfAdapter = kzalloc(sizeof(struct bcm_interface_adapter),
- GFP_KERNEL);
+ GFP_KERNEL);
if (psIntfAdapter == NULL) {
AdapterFree(psAdapter);
return -ENOMEM;
@@ -205,7 +213,7 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
usb_set_intfdata(intf, psIntfAdapter);
BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "psIntfAdapter 0x%p\n", psIntfAdapter);
+ "psIntfAdapter 0x%p\n", psIntfAdapter);
retval = InterfaceAdapterInit(psIntfAdapter);
if (retval) {
/* If the Firmware/Cfg File is not present
@@ -213,12 +221,13 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
* download the files.
*/
if (-ENOENT == retval) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "File Not Found. Use app to download.\n");
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "File Not Found. Use app to download.\n");
return STATUS_SUCCESS;
}
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "InterfaceAdapterInit failed.\n");
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL, "InterfaceAdapterInit failed.\n");
usb_set_intfdata(intf, NULL);
udev = interface_to_usbdev(intf);
usb_put_dev(udev);
@@ -228,7 +237,10 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
if (psAdapter->chip_id > T3) {
uint32_t uiNackZeroLengthInt = 4;
- retval = wrmalt(psAdapter, DISABLE_USB_ZERO_LEN_INT, &uiNackZeroLengthInt, sizeof(uiNackZeroLengthInt));
+ retval =
+ wrmalt(psAdapter, DISABLE_USB_ZERO_LEN_INT,
+ &uiNackZeroLengthInt,
+ sizeof(uiNackZeroLengthInt));
if (retval)
return retval;
}
@@ -242,9 +254,11 @@ static int usbbcm_device_probe(struct usb_interface *intf, const struct usb_devi
intf->needs_remote_wakeup = 1;
usb_enable_autosuspend(udev);
device_init_wakeup(&intf->dev, 1);
- INIT_WORK(&psIntfAdapter->usbSuspendWork, putUsbSuspend);
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Enabling USB Auto-Suspend\n");
+ INIT_WORK(&psIntfAdapter->usbSuspendWork,
+ putUsbSuspend);
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Enabling USB Auto-Suspend\n");
#endif
} else {
intf->needs_remote_wakeup = 0;
@@ -271,7 +285,7 @@ static void usbbcm_disconnect(struct usb_interface *intf)
if (psAdapter->bDoSuspend)
intf->needs_remote_wakeup = 0;
- psAdapter->device_removed = TRUE ;
+ psAdapter->device_removed = TRUE;
usb_set_intfdata(intf, NULL);
InterfaceAdapterFree(psIntfAdapter);
usb_put_dev(udev);
@@ -285,8 +299,10 @@ static int AllocUsbCb(struct bcm_interface_adapter *psIntfAdapter)
psIntfAdapter->asUsbTcb[i].urb = usb_alloc_urb(0, GFP_KERNEL);
if (psIntfAdapter->asUsbTcb[i].urb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Tx urb for index %d\n", i);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_PRINTK, 0, 0,
+ "Can't allocate Tx urb for index %d\n",
+ i);
return -ENOMEM;
}
}
@@ -295,19 +311,25 @@ static int AllocUsbCb(struct bcm_interface_adapter *psIntfAdapter)
psIntfAdapter->asUsbRcb[i].urb = usb_alloc_urb(0, GFP_KERNEL);
if (psIntfAdapter->asUsbRcb[i].urb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Rx urb for index %d\n", i);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_PRINTK, 0, 0,
+ "Can't allocate Rx urb for index %d\n",
+ i);
return -ENOMEM;
}
- psIntfAdapter->asUsbRcb[i].urb->transfer_buffer = kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL);
+ psIntfAdapter->asUsbRcb[i].urb->transfer_buffer =
+ kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL);
if (psIntfAdapter->asUsbRcb[i].urb->transfer_buffer == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Rx buffer for index %d\n", i);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_PRINTK, 0, 0,
+ "Can't allocate Rx buffer for index %d\n",
+ i);
return -ENOMEM;
}
- psIntfAdapter->asUsbRcb[i].urb->transfer_buffer_length = MAX_DATA_BUFFER_SIZE;
+ psIntfAdapter->asUsbRcb[i].urb->transfer_buffer_length =
+ MAX_DATA_BUFFER_SIZE;
}
return 0;
}
@@ -322,24 +344,29 @@ static int device_run(struct bcm_interface_adapter *psIntfAdapter)
pr_err(DRV_NAME "InitCardAndDownloadFirmware failed.\n");
return status;
}
- if (TRUE == psIntfAdapter->psAdapter->fw_download_done) {
+ if (psIntfAdapter->psAdapter->fw_download_done) {
if (StartInterruptUrb(psIntfAdapter)) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Cannot send interrupt in URB\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Cannot send interrupt in URB\n");
}
/*
- * now register the cntrl interface.
- * after downloading the f/w waiting for 5 sec to get the mailbox interrupt.
+ * now register the cntrl interface. after downloading the f/w
+ * waiting for 5 sec to get the mailbox interrupt.
*/
psIntfAdapter->psAdapter->waiting_to_fw_download_done = false;
- value = wait_event_timeout(psIntfAdapter->psAdapter->ioctl_fw_dnld_wait_queue,
- psIntfAdapter->psAdapter->waiting_to_fw_download_done, 5*HZ);
+ value = wait_event_timeout(
+ psIntfAdapter->psAdapter->ioctl_fw_dnld_wait_queue,
+ psIntfAdapter->psAdapter->waiting_to_fw_download_done,
+ 5 * HZ);
if (value == 0)
pr_err(DRV_NAME ": Timeout waiting for mailbox interrupt.\n");
- if (register_control_device_interface(psIntfAdapter->psAdapter) < 0) {
+ if (register_control_device_interface(
+ psIntfAdapter->psAdapter) < 0) {
pr_err(DRV_NAME ": Register Control Device failed.\n");
return -EIO;
}
@@ -347,81 +374,6 @@ static int device_run(struct bcm_interface_adapter *psIntfAdapter)
return 0;
}
-
-static inline int bcm_usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
-{
- return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
-}
-
-static inline int bcm_usb_endpoint_type(const struct usb_endpoint_descriptor *epd)
-{
- return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
-}
-
-static inline int bcm_usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
-}
-
-static inline int bcm_usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
-}
-
-static inline int bcm_usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_BULK);
-}
-
-static inline int bcm_usb_endpoint_xfer_control(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_CONTROL);
-}
-
-static inline int bcm_usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_INT);
-}
-
-static inline int bcm_usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_ISOC);
-}
-
-static inline int bcm_usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_bulk(epd) && bcm_usb_endpoint_dir_in(epd);
-}
-
-static inline int bcm_usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_bulk(epd) && bcm_usb_endpoint_dir_out(epd);
-}
-
-static inline int bcm_usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_int(epd) && bcm_usb_endpoint_dir_in(epd);
-}
-
-static inline int bcm_usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_int(epd) && bcm_usb_endpoint_dir_out(epd);
-}
-
-static inline int bcm_usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_isoc(epd) && bcm_usb_endpoint_dir_in(epd);
-}
-
-static inline int bcm_usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd)
-{
- return bcm_usb_endpoint_xfer_isoc(epd) && bcm_usb_endpoint_dir_out(epd);
-}
-
static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
{
struct usb_host_interface *iface_desc;
@@ -429,23 +381,27 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
size_t buffer_size;
unsigned long value;
int retval = 0;
- int usedIntOutForBulkTransfer = 0 ;
+ int usedIntOutForBulkTransfer = 0;
bool bBcm16 = false;
UINT uiData = 0;
int bytes;
/* Store the usb dev into interface adapter */
- psIntfAdapter->udev = usb_get_dev(interface_to_usbdev(psIntfAdapter->interface));
+ psIntfAdapter->udev =
+ usb_get_dev(interface_to_usbdev(psIntfAdapter->interface));
- psIntfAdapter->bHighSpeedDevice = (psIntfAdapter->udev->speed == USB_SPEED_HIGH);
+ psIntfAdapter->bHighSpeedDevice =
+ (psIntfAdapter->udev->speed == USB_SPEED_HIGH);
psIntfAdapter->psAdapter->interface_rdm = BcmRDM;
psIntfAdapter->psAdapter->interface_wrm = BcmWRM;
bytes = rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG,
- (u32 *)&(psIntfAdapter->psAdapter->chip_id), sizeof(u32));
+ (u32 *) &(psIntfAdapter->psAdapter->chip_id),
+ sizeof(u32));
if (bytes < 0) {
retval = bytes;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "CHIP ID Read Failed\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
+ "CHIP ID Read Failed\n");
return retval;
}
@@ -453,81 +409,119 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
psIntfAdapter->psAdapter->chip_id &= ~0xF0;
dev_info(&psIntfAdapter->udev->dev, "RDM Chip ID 0x%lx\n",
- psIntfAdapter->psAdapter->chip_id);
+ psIntfAdapter->psAdapter->chip_id);
iface_desc = psIntfAdapter->interface->cur_altsetting;
if (psIntfAdapter->psAdapter->chip_id == T3B) {
- /* T3B device will have EEPROM, check if EEPROM is proper and BCM16 can be done or not. */
+ /* T3B device will have EEPROM, check if EEPROM is proper and
+ * BCM16 can be done or not. */
BeceemEEPROMBulkRead(psIntfAdapter->psAdapter, &uiData, 0x0, 4);
if (uiData == BECM)
bBcm16 = TRUE;
- dev_info(&psIntfAdapter->udev->dev, "number of alternate setting %d\n",
- psIntfAdapter->interface->num_altsetting);
+ dev_info(&psIntfAdapter->udev->dev,
+ "number of alternate setting %d\n",
+ psIntfAdapter->interface->num_altsetting);
if (bBcm16 == TRUE) {
- /* selecting alternate setting one as a default setting for High Speed modem. */
+ /* selecting alternate setting one as a default setting
+ * for High Speed modem. */
if (psIntfAdapter->bHighSpeedDevice)
- retval = usb_set_interface(psIntfAdapter->udev, DEFAULT_SETTING_0, ALTERNATE_SETTING_1);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "BCM16 is applicable on this dongle\n");
- if (retval || (psIntfAdapter->bHighSpeedDevice == false)) {
- usedIntOutForBulkTransfer = EP2 ;
+ retval = usb_set_interface(psIntfAdapter->udev,
+ DEFAULT_SETTING_0,
+ ALTERNATE_SETTING_1);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "BCM16 is applicable on this dongle\n");
+ if (retval || !psIntfAdapter->bHighSpeedDevice) {
+ usedIntOutForBulkTransfer = EP2;
endpoint = &iface_desc->endpoint[EP2].desc;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Interface altsetting failed or modem is configured to Full Speed, hence will work on default setting 0\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Interface altsetting failed or modem is configured to Full Speed, hence will work on default setting 0\n");
/*
- * If Modem is high speed device EP2 should be INT OUT End point
- * If Mode is FS then EP2 should be bulk end point
+ * If Modem is high speed device EP2 should be
+ * INT OUT End point
+ *
+ * If Mode is FS then EP2 should be bulk end
+ * point
*/
- if (((psIntfAdapter->bHighSpeedDevice == TRUE) && (bcm_usb_endpoint_is_int_out(endpoint) == false))
- || ((psIntfAdapter->bHighSpeedDevice == false) && (bcm_usb_endpoint_is_bulk_out(endpoint) == false))) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Configuring the EEPROM\n");
+ if ((psIntfAdapter->bHighSpeedDevice &&
+ !usb_endpoint_is_int_out(endpoint)) ||
+ (!psIntfAdapter->bHighSpeedDevice &&
+ !usb_endpoint_is_bulk_out(endpoint))) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT,
+ DRV_ENTRY, DBG_LVL_ALL,
+ "Configuring the EEPROM\n");
/* change the EP2, EP4 to INT OUT end point */
- ConfigureEndPointTypesThroughEEPROM(psIntfAdapter->psAdapter);
+ ConfigureEndPointTypesThroughEEPROM(
+ psIntfAdapter->psAdapter);
/*
- * It resets the device and if any thing gets changed
- * in USB descriptor it will show fail and re-enumerate
- * the device
+ * It resets the device and if any thing
+ * gets changed in USB descriptor it
+ * will show fail and re-enumerate the
+ * device
*/
- retval = usb_reset_device(psIntfAdapter->udev);
+ retval = usb_reset_device(
+ psIntfAdapter->udev);
if (retval) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "reset failed. Re-enumerating the device.\n");
- return retval ;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT,
+ DRV_ENTRY,
+ DBG_LVL_ALL,
+ "reset failed. Re-enumerating the device.\n");
+ return retval;
}
}
- if ((psIntfAdapter->bHighSpeedDevice == false) && bcm_usb_endpoint_is_bulk_out(endpoint)) {
+ if (!psIntfAdapter->bHighSpeedDevice &&
+ usb_endpoint_is_bulk_out(endpoint)) {
/* Once BULK is selected in FS mode. Revert it back to INT. Else USB_IF will fail. */
UINT _uiData = ntohl(EP2_CFG_INT);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Reverting Bulk to INT as it is in Full Speed mode.\n");
- BeceemEEPROMBulkWrite(psIntfAdapter->psAdapter, (PUCHAR)&_uiData, 0x136, 4, TRUE);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT,
+ DRV_ENTRY, DBG_LVL_ALL,
+ "Reverting Bulk to INT as it is in Full Speed mode.\n");
+ BeceemEEPROMBulkWrite(
+ psIntfAdapter->psAdapter,
+ (PUCHAR) & _uiData,
+ 0x136, 4, TRUE);
}
} else {
- usedIntOutForBulkTransfer = EP4 ;
+ usedIntOutForBulkTransfer = EP4;
endpoint = &iface_desc->endpoint[EP4].desc;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Choosing AltSetting as a default setting.\n");
- if (bcm_usb_endpoint_is_int_out(endpoint) == false) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Dongle does not have BCM16 Fix.\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Choosing AltSetting as a default setting.\n");
+ if (!usb_endpoint_is_int_out(endpoint)) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT,
+ DRV_ENTRY, DBG_LVL_ALL,
+ "Dongle does not have BCM16 Fix.\n");
/* change the EP2, EP4 to INT OUT end point and use EP4 in altsetting */
- ConfigureEndPointTypesThroughEEPROM(psIntfAdapter->psAdapter);
+ ConfigureEndPointTypesThroughEEPROM(
+ psIntfAdapter->psAdapter);
/*
- * It resets the device and if any thing gets changed in
- * USB descriptor it will show fail and re-enumerate the
+ * It resets the device and if any thing
+ * gets changed in USB descriptor it
+ * will show fail and re-enumerate the
* device
*/
- retval = usb_reset_device(psIntfAdapter->udev);
+ retval = usb_reset_device(
+ psIntfAdapter->udev);
if (retval) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "reset failed. Re-enumerating the device.\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT,
+ DRV_ENTRY,
+ DBG_LVL_ALL,
+ "reset failed. Re-enumerating the device.\n");
return retval;
}
@@ -541,49 +535,69 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
for (value = 0; value < iface_desc->desc.bNumEndpoints; ++value) {
endpoint = &iface_desc->endpoint[value].desc;
- if (!psIntfAdapter->sBulkIn.bulk_in_endpointAddr && bcm_usb_endpoint_is_bulk_in(endpoint)) {
+ if (!psIntfAdapter->sBulkIn.bulk_in_endpointAddr &&
+ usb_endpoint_is_bulk_in(endpoint)) {
buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
psIntfAdapter->sBulkIn.bulk_in_size = buffer_size;
- psIntfAdapter->sBulkIn.bulk_in_endpointAddr = endpoint->bEndpointAddress;
- psIntfAdapter->sBulkIn.bulk_in_pipe =
- usb_rcvbulkpipe(psIntfAdapter->udev,
- psIntfAdapter->sBulkIn.bulk_in_endpointAddr);
+ psIntfAdapter->sBulkIn.bulk_in_endpointAddr =
+ endpoint->bEndpointAddress;
+ psIntfAdapter->sBulkIn.bulk_in_pipe = usb_rcvbulkpipe(
+ psIntfAdapter->udev,
+ psIntfAdapter->sBulkIn.bulk_in_endpointAddr);
}
- if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr && bcm_usb_endpoint_is_bulk_out(endpoint)) {
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr = endpoint->bEndpointAddress;
- psIntfAdapter->sBulkOut.bulk_out_pipe =
- usb_sndbulkpipe(psIntfAdapter->udev,
+ if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
+ usb_endpoint_is_bulk_out(endpoint)) {
+ psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
+ endpoint->bEndpointAddress;
+ psIntfAdapter->sBulkOut.bulk_out_pipe = usb_sndbulkpipe(
+ psIntfAdapter->udev,
psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
}
- if (!psIntfAdapter->sIntrIn.int_in_endpointAddr && bcm_usb_endpoint_is_int_in(endpoint)) {
+ if (!psIntfAdapter->sIntrIn.int_in_endpointAddr &&
+ usb_endpoint_is_int_in(endpoint)) {
buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
psIntfAdapter->sIntrIn.int_in_size = buffer_size;
- psIntfAdapter->sIntrIn.int_in_endpointAddr = endpoint->bEndpointAddress;
- psIntfAdapter->sIntrIn.int_in_interval = endpoint->bInterval;
+ psIntfAdapter->sIntrIn.int_in_endpointAddr =
+ endpoint->bEndpointAddress;
+ psIntfAdapter->sIntrIn.int_in_interval =
+ endpoint->bInterval;
psIntfAdapter->sIntrIn.int_in_buffer =
- kmalloc(buffer_size, GFP_KERNEL);
+ kmalloc(buffer_size, GFP_KERNEL);
if (!psIntfAdapter->sIntrIn.int_in_buffer)
return -EINVAL;
}
- if (!psIntfAdapter->sIntrOut.int_out_endpointAddr && bcm_usb_endpoint_is_int_out(endpoint)) {
+ if (!psIntfAdapter->sIntrOut.int_out_endpointAddr &&
+ usb_endpoint_is_int_out(endpoint)) {
if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
- (psIntfAdapter->psAdapter->chip_id == T3B) && (value == usedIntOutForBulkTransfer)) {
+ (psIntfAdapter->psAdapter->chip_id == T3B) &&
+ (value == usedIntOutForBulkTransfer)) {
/* use first intout end point as a bulk out end point */
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sBulkOut.bulk_out_size = buffer_size;
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr = endpoint->bEndpointAddress;
- psIntfAdapter->sBulkOut.bulk_out_pipe = usb_sndintpipe(psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
- psIntfAdapter->sBulkOut.int_out_interval = endpoint->bInterval;
+ buffer_size =
+ le16_to_cpu(endpoint->wMaxPacketSize);
+ psIntfAdapter->sBulkOut.bulk_out_size =
+ buffer_size;
+ psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
+ endpoint->bEndpointAddress;
+ psIntfAdapter->sBulkOut.bulk_out_pipe =
+ usb_sndintpipe(psIntfAdapter->udev,
+ psIntfAdapter->sBulkOut
+ .bulk_out_endpointAddr);
+ psIntfAdapter->sBulkOut.int_out_interval =
+ endpoint->bInterval;
} else if (value == EP6) {
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sIntrOut.int_out_size = buffer_size;
- psIntfAdapter->sIntrOut.int_out_endpointAddr = endpoint->bEndpointAddress;
- psIntfAdapter->sIntrOut.int_out_interval = endpoint->bInterval;
- psIntfAdapter->sIntrOut.int_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
+ buffer_size =
+ le16_to_cpu(endpoint->wMaxPacketSize);
+ psIntfAdapter->sIntrOut.int_out_size =
+ buffer_size;
+ psIntfAdapter->sIntrOut.int_out_endpointAddr =
+ endpoint->bEndpointAddress;
+ psIntfAdapter->sIntrOut.int_out_interval =
+ endpoint->bInterval;
+ psIntfAdapter->sIntrOut.int_out_buffer =
+ kmalloc(buffer_size, GFP_KERNEL);
if (!psIntfAdapter->sIntrOut.int_out_buffer)
return -EINVAL;
}
@@ -594,14 +608,14 @@ static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
psIntfAdapter->psAdapter->bcm_file_download = InterfaceFileDownload;
psIntfAdapter->psAdapter->bcm_file_readback_from_chip =
- InterfaceFileReadbackFromChip;
+ InterfaceFileReadbackFromChip;
psIntfAdapter->psAdapter->interface_transmit = InterfaceTransmitPacket;
retval = CreateInterruptUrb(psIntfAdapter);
if (retval) {
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Cannot create interrupt urb\n");
+ "Cannot create interrupt urb\n");
return retval;
}
@@ -618,17 +632,21 @@ static int InterfaceSuspend(struct usb_interface *intf, pm_message_t message)
psIntfAdapter->bSuspended = TRUE;
- if (TRUE == psIntfAdapter->bPreparingForBusSuspend) {
+ if (psIntfAdapter->bPreparingForBusSuspend) {
psIntfAdapter->bPreparingForBusSuspend = false;
if (psIntfAdapter->psAdapter->LinkStatus == LINKUP_DONE) {
- psIntfAdapter->psAdapter->IdleMode = TRUE ;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Host Entered in PMU Idle Mode.\n");
+ psIntfAdapter->psAdapter->IdleMode = TRUE;
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Host Entered in PMU Idle Mode.\n");
} else {
psIntfAdapter->psAdapter->bShutStatus = TRUE;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Host Entered in PMU Shutdown Mode.\n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_INITEXIT, DRV_ENTRY,
+ DBG_LVL_ALL,
+ "Host Entered in PMU Shutdown Mode.\n");
}
}
psIntfAdapter->psAdapter->bPreparingForLowPowerMode = false;
diff --git a/drivers/staging/bcm/InterfaceIsr.c b/drivers/staging/bcm/InterfaceIsr.c
index 7b39f4f5f1ab..b9f8a7aa24fe 100644
--- a/drivers/staging/bcm/InterfaceIsr.c
+++ b/drivers/staging/bcm/InterfaceIsr.c
@@ -4,108 +4,126 @@
static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/)
{
int status = urb->status;
- struct bcm_interface_adapter *psIntfAdapter = (struct bcm_interface_adapter *)urb->context;
- struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter ;
+ struct bcm_interface_adapter *psIntfAdapter =
+ (struct bcm_interface_adapter *)urb->context;
+ struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter;
if (netif_msg_intr(Adapter))
pr_info(PFX "%s: interrupt status %d\n",
- Adapter->dev->name, status);
+ Adapter->dev->name, status);
- if(Adapter->device_removed == TRUE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Device has Got Removed.");
- return ;
+ if (Adapter->device_removed) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL, "Device has Got Removed.");
+ return;
}
- if(((Adapter->bPreparingForLowPowerMode == TRUE) && (Adapter->bDoSuspend == TRUE)) ||
- psIntfAdapter->bSuspended ||
- psIntfAdapter->bPreparingForBusSuspend)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Interrupt call back is called while suspending the device");
- return ;
+ if ((Adapter->bPreparingForLowPowerMode && Adapter->bDoSuspend) ||
+ psIntfAdapter->bSuspended ||
+ psIntfAdapter->bPreparingForBusSuspend) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL,
+ "Interrupt call back is called while suspending the device");
+ return;
}
- //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "interrupt urb status %d", status);
switch (status) {
- /* success */
- case STATUS_SUCCESS:
- if ( urb->actual_length )
- {
-
- if(psIntfAdapter->ulInterruptData[1] & 0xFF)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL, "Got USIM interrupt");
+ /* success */
+ case STATUS_SUCCESS:
+ if (urb->actual_length) {
+
+ if (psIntfAdapter->ulInterruptData[1] & 0xFF) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ INTF_INIT, DBG_LVL_ALL,
+ "Got USIM interrupt");
}
- if(psIntfAdapter->ulInterruptData[1] & 0xFF00)
- {
+ if (psIntfAdapter->ulInterruptData[1] & 0xFF00) {
atomic_set(&Adapter->CurrNumFreeTxDesc,
- (psIntfAdapter->ulInterruptData[1] & 0xFF00) >> 8);
- atomic_set (&Adapter->uiMBupdate, TRUE);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL, "TX mailbox contains %d",
+ (psIntfAdapter->ulInterruptData[1] &
+ 0xFF00) >> 8);
+ atomic_set(&Adapter->uiMBupdate, TRUE);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ INTF_INIT, DBG_LVL_ALL,
+ "TX mailbox contains %d",
atomic_read(&Adapter->CurrNumFreeTxDesc));
}
- if(psIntfAdapter->ulInterruptData[1] >> 16)
- {
- Adapter->CurrNumRecvDescs=
+ if (psIntfAdapter->ulInterruptData[1] >> 16) {
+ Adapter->CurrNumRecvDescs =
(psIntfAdapter->ulInterruptData[1] >> 16);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"RX mailbox contains %d",
- Adapter->CurrNumRecvDescs);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ INTF_INIT, DBG_LVL_ALL,
+ "RX mailbox contains %d",
+ Adapter->CurrNumRecvDescs);
InterfaceRx(psIntfAdapter);
}
- if(Adapter->fw_download_done &&
+ if (Adapter->fw_download_done &&
!Adapter->downloadDDR &&
- atomic_read(&Adapter->CurrNumFreeTxDesc))
- {
- psIntfAdapter->psAdapter->downloadDDR +=1;
+ atomic_read(&Adapter->CurrNumFreeTxDesc)) {
+
+ psIntfAdapter->psAdapter->downloadDDR += 1;
wake_up(&Adapter->tx_packet_wait_queue);
}
- if(false == Adapter->waiting_to_fw_download_done)
- {
+ if (!Adapter->waiting_to_fw_download_done) {
Adapter->waiting_to_fw_download_done = TRUE;
wake_up(&Adapter->ioctl_fw_dnld_wait_queue);
}
- if(!atomic_read(&Adapter->TxPktAvail))
- {
+ if (!atomic_read(&Adapter->TxPktAvail)) {
atomic_set(&Adapter->TxPktAvail, 1);
wake_up(&Adapter->tx_packet_wait_queue);
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Firing interrupt in URB");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL, "Firing interrupt in URB");
}
break;
- case -ENOENT :
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"URB has got disconnected ....");
- return ;
- }
- case -EINPROGRESS:
- {
- //This situation may happened when URBunlink is used. for detail check usb_unlink_urb documentation.
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Impossibe condition has occurred... something very bad is going on");
- break ;
- //return;
- }
- case -EPIPE:
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Interrupt IN endPoint has got halted/stalled...need to clear this");
- Adapter->bEndPointHalted = TRUE ;
- wake_up(&Adapter->tx_packet_wait_queue);
- urb->status = STATUS_SUCCESS ;
- return;
- }
- /* software-driven interface shutdown */
- case -ECONNRESET: //URB got unlinked.
- case -ESHUTDOWN: // hardware gone. this is the serious problem.
- //Occurs only when something happens with the host controller device
- case -ENODEV : //Device got removed
- case -EINVAL : //Some thing very bad happened with the URB. No description is available.
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"interrupt urb error %d", status);
- urb->status = STATUS_SUCCESS ;
- break ;
- //return;
- default:
- //This is required to check what is the defaults conditions when it occurs..
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"GOT DEFAULT INTERRUPT URB STATUS :%d..Please Analyze it...", status);
+ case -ENOENT:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL, "URB has got disconnected....");
+ return;
+ case -EINPROGRESS:
+ /*
+ * This situation may happened when URBunlink is used. for
+ * detail check usb_unlink_urb documentation.
+ */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL,
+ "Impossibe condition has occurred... something very bad is going on");
+ break;
+ /* return; */
+ case -EPIPE:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL,
+ "Interrupt IN endPoint has got halted/stalled...need to clear this");
+ Adapter->bEndPointHalted = TRUE;
+ wake_up(&Adapter->tx_packet_wait_queue);
+ urb->status = STATUS_SUCCESS;
+ return;
+ /* software-driven interface shutdown */
+ case -ECONNRESET: /* URB got unlinked */
+ case -ESHUTDOWN: /* hardware gone. this is the serious problem */
+ /*
+ * Occurs only when something happens with the
+ * host controller device
+ */
+ case -ENODEV: /* Device got removed */
+ case -EINVAL:
+ /*
+ * Some thing very bad happened with the URB. No
+ * description is available.
+ */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL, "interrupt urb error %d", status);
+ urb->status = STATUS_SUCCESS;
+ break;
+ /* return; */
+ default:
+ /*
+ * This is required to check what is the defaults conditions
+ * when it occurs..
+ */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,
+ "GOT DEFAULT INTERRUPT URB STATUS :%d..Please Analyze it...",
+ status);
break;
}
@@ -117,28 +135,30 @@ static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/)
int CreateInterruptUrb(struct bcm_interface_adapter *psIntfAdapter)
{
psIntfAdapter->psInterruptUrb = usb_alloc_urb(0, GFP_KERNEL);
- if (!psIntfAdapter->psInterruptUrb)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Cannot allocate interrupt urb");
+ if (!psIntfAdapter->psInterruptUrb) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS,
+ INTF_INIT, DBG_LVL_ALL,
+ "Cannot allocate interrupt urb");
return -ENOMEM;
}
psIntfAdapter->psInterruptUrb->transfer_buffer =
- psIntfAdapter->ulInterruptData;
+ psIntfAdapter->ulInterruptData;
psIntfAdapter->psInterruptUrb->transfer_buffer_length =
- sizeof(psIntfAdapter->ulInterruptData);
+ sizeof(psIntfAdapter->ulInterruptData);
psIntfAdapter->sIntrIn.int_in_pipe = usb_rcvintpipe(psIntfAdapter->udev,
- psIntfAdapter->sIntrIn.int_in_endpointAddr);
+ psIntfAdapter->sIntrIn.int_in_endpointAddr);
usb_fill_int_urb(psIntfAdapter->psInterruptUrb, psIntfAdapter->udev,
- psIntfAdapter->sIntrIn.int_in_pipe,
- psIntfAdapter->psInterruptUrb->transfer_buffer,
- psIntfAdapter->psInterruptUrb->transfer_buffer_length,
- read_int_callback, psIntfAdapter,
- psIntfAdapter->sIntrIn.int_in_interval);
-
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Interrupt Interval: %d\n",
- psIntfAdapter->sIntrIn.int_in_interval);
+ psIntfAdapter->sIntrIn.int_in_pipe,
+ psIntfAdapter->psInterruptUrb->transfer_buffer,
+ psIntfAdapter->psInterruptUrb->transfer_buffer_length,
+ read_int_callback, psIntfAdapter,
+ psIntfAdapter->sIntrIn.int_in_interval);
+
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, INTF_INIT,
+ DBG_LVL_ALL, "Interrupt Interval: %d\n",
+ psIntfAdapter->sIntrIn.int_in_interval);
return 0;
}
@@ -147,19 +167,20 @@ INT StartInterruptUrb(struct bcm_interface_adapter *psIntfAdapter)
{
INT status = 0;
- if( false == psIntfAdapter->psAdapter->device_removed &&
- false == psIntfAdapter->psAdapter->bEndPointHalted &&
- false == psIntfAdapter->bSuspended &&
- false == psIntfAdapter->bPreparingForBusSuspend &&
- false == psIntfAdapter->psAdapter->StopAllXaction)
- {
- status = usb_submit_urb(psIntfAdapter->psInterruptUrb, GFP_ATOMIC);
- if (status)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Cannot send int urb %d\n", status);
- if(status == -EPIPE)
- {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE ;
+ if (!(psIntfAdapter->psAdapter->device_removed ||
+ psIntfAdapter->psAdapter->bEndPointHalted ||
+ psIntfAdapter->bSuspended ||
+ psIntfAdapter->bPreparingForBusSuspend ||
+ psIntfAdapter->psAdapter->StopAllXaction)) {
+ status =
+ usb_submit_urb(psIntfAdapter->psInterruptUrb, GFP_ATOMIC);
+ if (status) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
+ DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,
+ "Cannot send inturb %d\n", status);
+ if (status == -EPIPE) {
+ psIntfAdapter->psAdapter->bEndPointHalted =
+ TRUE;
wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
}
}
diff --git a/drivers/staging/bcm/Kconfig b/drivers/staging/bcm/Kconfig
index 83c9752504d4..8acf4b24a7c9 100644
--- a/drivers/staging/bcm/Kconfig
+++ b/drivers/staging/bcm/Kconfig
@@ -1,7 +1,6 @@
config BCM_WIMAX
tristate "Beceem BCS200/BCS220-3 and BCSM250 wimax support"
depends on USB && NET
- default N
help
This is an experimental driver for the Beceem WIMAX chipset used
by Sprint 4G.
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 0727599bf5fa..4f315835ddfc 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -222,10 +222,7 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
//Checking classifier validity
if (!pstClassifierRule->bUsed || pstClassifierRule->ucDirection == DOWNLINK_DIR)
- {
- bClassificationSucceed = false;
break;
- }
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "is IPv6 check!");
if (pstClassifierRule->bIpv6Protocol)
@@ -233,51 +230,47 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
//**************Checking IP header parameter**************************//
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to match Source IP Address");
- if (false == (bClassificationSucceed =
- MatchSrcIpAddress(pstClassifierRule, iphd->saddr)))
+ if (!MatchSrcIpAddress(pstClassifierRule, iphd->saddr))
break;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source IP Address Matched");
- if (false == (bClassificationSucceed =
- MatchDestIpAddress(pstClassifierRule, iphd->daddr)))
+ if (!MatchDestIpAddress(pstClassifierRule, iphd->daddr))
break;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination IP Address Matched");
- if (false == (bClassificationSucceed =
- MatchTos(pstClassifierRule, iphd->tos)))
- {
+ if (!MatchTos(pstClassifierRule, iphd->tos)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Match failed\n");
break;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Matched");
- if (false == (bClassificationSucceed =
- MatchProtocol(pstClassifierRule, iphd->protocol)))
+ if (!MatchProtocol(pstClassifierRule, iphd->protocol))
break;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Matched");
//if protocol is not TCP or UDP then no need of comparing source port and destination port
- if (iphd->protocol != TCP && iphd->protocol != UDP)
+ if (iphd->protocol != TCP && iphd->protocol != UDP) {
+ bClassificationSucceed = TRUE;
break;
+ }
//******************Checking Transport Layer Header field if present *****************//
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source Port %04x",
(iphd->protocol == UDP) ? xprt_hdr->uhdr.source : xprt_hdr->thdr.source);
- if (false == (bClassificationSucceed =
- MatchSrcPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP) ?
- xprt_hdr->uhdr.source : xprt_hdr->thdr.source))))
+ if (!MatchSrcPort(pstClassifierRule,
+ ntohs((iphd->protocol == UDP) ?
+ xprt_hdr->uhdr.source : xprt_hdr->thdr.source)))
break;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Port Matched");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Port %04x",
(iphd->protocol == UDP) ? xprt_hdr->uhdr.dest :
xprt_hdr->thdr.dest);
- if (false == (bClassificationSucceed =
- MatchDestPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP) ?
- xprt_hdr->uhdr.dest : xprt_hdr->thdr.dest))))
+ if (!MatchDestPort(pstClassifierRule,
+ ntohs((iphd->protocol == UDP) ?
+ xprt_hdr->uhdr.dest : xprt_hdr->thdr.dest)))
break;
+ bClassificationSucceed = TRUE;
} while (0);
if (TRUE == bClassificationSucceed)
diff --git a/drivers/staging/bcm/Typedefs.h b/drivers/staging/bcm/Typedefs.h
index 832adcfd1e3a..90b3b25dd606 100644
--- a/drivers/staging/bcm/Typedefs.h
+++ b/drivers/staging/bcm/Typedefs.h
@@ -25,16 +25,16 @@ typedef unsigned int B_UINT32;
typedef unsigned long ULONG;
typedef unsigned long DWORD;
-typedef char* PCHAR;
-typedef short* PSHORT;
-typedef int* PINT;
-typedef long* PLONG;
-typedef void* PVOID;
-
-typedef unsigned char* PUCHAR;
-typedef unsigned short* PUSHORT;
-typedef unsigned int* PUINT;
-typedef unsigned long* PULONG;
+typedef char *PCHAR;
+typedef short *PSHORT;
+typedef int *PINT;
+typedef long *PLONG;
+typedef void *PVOID;
+
+typedef unsigned char *PUCHAR;
+typedef unsigned short *PUSHORT;
+typedef unsigned int *PUINT;
+typedef unsigned long *PULONG;
typedef unsigned long long ULONG64;
typedef unsigned long long LARGE_INTEGER;
typedef unsigned int UINT32;
@@ -43,5 +43,5 @@ typedef unsigned int UINT32;
#endif
-#endif //__TYPEDEFS_H__
+#endif /* __TYPEDEFS_H__ */
diff --git a/drivers/staging/bcm/headers.h b/drivers/staging/bcm/headers.h
index 7fd21c6923cb..6f3270cc4176 100644
--- a/drivers/staging/bcm/headers.h
+++ b/drivers/staging/bcm/headers.h
@@ -34,7 +34,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/usb.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/ip.h>
#include "Typedefs.h"
diff --git a/drivers/staging/bcm/hostmibs.c b/drivers/staging/bcm/hostmibs.c
index f55300db1d48..39ace5510c43 100644
--- a/drivers/staging/bcm/hostmibs.c
+++ b/drivers/staging/bcm/hostmibs.c
@@ -27,9 +27,9 @@ INT ProcessGetHostMibs(struct bcm_mini_adapter *Adapter, struct bcm_host_stats_m
/* Copy the classifier Table */
for (nClassifierIndex = 0; nClassifierIndex < MAX_CLASSIFIERS; nClassifierIndex++) {
if (Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
- memcpy((PVOID) & pstHostMibs->
+ memcpy((PVOID) &pstHostMibs->
astClassifierTable[nClassifierIndex],
- (PVOID) & Adapter->
+ (PVOID) &Adapter->
astClassifierTable[nClassifierIndex],
sizeof(struct bcm_mibs_classifier_rule));
}
@@ -37,8 +37,8 @@ INT ProcessGetHostMibs(struct bcm_mini_adapter *Adapter, struct bcm_host_stats_m
/* Copy the SF Table */
for (nSfIndex = 0; nSfIndex < NO_OF_QUEUES; nSfIndex++) {
if (Adapter->PackInfo[nSfIndex].bValid) {
- memcpy((PVOID) & pstHostMibs->astSFtable[nSfIndex],
- (PVOID) & Adapter->PackInfo[nSfIndex],
+ memcpy((PVOID) &pstHostMibs->astSFtable[nSfIndex],
+ (PVOID) &Adapter->PackInfo[nSfIndex],
sizeof(struct bcm_mibs_table));
} else {
/* If index in not valid,
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index fca164f51f4b..63be3be62ebd 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -122,7 +122,7 @@ static UCHAR ReadEEPROMStatusRegister(struct bcm_mini_adapter *Adapter)
* OSAL_STATUS_CODE:
*/
-int ReadBeceemEEPROMBulk(struct bcm_mini_adapter *Adapter,
+static int ReadBeceemEEPROMBulk(struct bcm_mini_adapter *Adapter,
DWORD dwAddress,
DWORD *pdwData,
DWORD dwNumWords)
@@ -2698,7 +2698,7 @@ int BcmGetSectionValStartOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash
* On Failure -returns STATUS_FAILURE
*/
-int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
+static int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
{
int SectEndOffset = 0;
@@ -2980,7 +2980,7 @@ static int BcmGetActiveISO(struct bcm_mini_adapter *Adapter)
*
*/
-B_UINT8 IsOffsetWritable(struct bcm_mini_adapter *Adapter, unsigned int uiOffset)
+static B_UINT8 IsOffsetWritable(struct bcm_mini_adapter *Adapter, unsigned int uiOffset)
{
unsigned int uiSectorNum = 0;
unsigned int uiWordOfSectorPermission = 0;
@@ -3374,7 +3374,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
case DSD2:
if (ReadDSDSignature(Adapter, eFlash2xSectVal) == DSD_IMAGE_MAGIC_NUMBER) {
HighestPriDSD = getHighestPriDSD(Adapter);
- if ((HighestPriDSD == eFlash2xSectVal)) {
+ if (HighestPriDSD == eFlash2xSectVal) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given DSD<%x> already has highest priority", eFlash2xSectVal);
Status = STATUS_SUCCESS;
break;
@@ -3402,7 +3402,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
HighestPriDSD = getHighestPriDSD(Adapter);
- if ((HighestPriDSD == eFlash2xSectVal)) {
+ if (HighestPriDSD == eFlash2xSectVal) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Made the DSD: %x highest by reducing priority of other\n", eFlash2xSectVal);
Status = STATUS_SUCCESS;
break;
@@ -3421,7 +3421,7 @@ int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_secti
}
HighestPriDSD = getHighestPriDSD(Adapter);
- if ((HighestPriDSD == eFlash2xSectVal)) {
+ if (HighestPriDSD == eFlash2xSectVal) {
Status = STATUS_SUCCESS;
break;
}
@@ -4074,7 +4074,7 @@ int BcmCopySection(struct bcm_mini_adapter *Adapter,
* Faillure :- Return negative error code
*/
-int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned int uiOffset)
+static int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned int uiOffset)
{
unsigned int offsetToProtect = 0, HeaderSizeToProtect = 0;
bool bHasHeader = false;
@@ -4213,7 +4213,7 @@ static int BcmDoChipSelect(struct bcm_mini_adapter *Adapter, unsigned int offset
return STATUS_SUCCESS;
}
-int ReadDSDSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
+static int ReadDSDSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
{
unsigned int uiDSDsig = 0;
/* unsigned int sigoffsetInMap = 0;
@@ -4238,7 +4238,7 @@ int ReadDSDSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_
return uiDSDsig;
}
-int ReadDSDPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
+static int ReadDSDPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
{
/* unsigned int priOffsetInMap = 0 ; */
unsigned int uiDSDPri = STATUS_FAILURE;
@@ -4261,7 +4261,7 @@ int ReadDSDPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_v
return uiDSDPri;
}
-enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter)
+static enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter)
{
int DSDHighestPri = STATUS_FAILURE;
int DsdPri = 0;
@@ -4293,7 +4293,7 @@ enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter)
return HighestPriDSD;
}
-int ReadISOSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
+static int ReadISOSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
{
unsigned int uiISOsig = 0;
/* unsigned int sigoffsetInMap = 0;
@@ -4316,7 +4316,7 @@ int ReadISOSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_
return uiISOsig;
}
-int ReadISOPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
+static int ReadISOPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
{
unsigned int ISOPri = STATUS_FAILURE;
if (IsSectionWritable(Adapter, iso)) {
@@ -4335,7 +4335,7 @@ int ReadISOPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_v
return ISOPri;
}
-enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter)
+static enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter)
{
int ISOHighestPri = STATUS_FAILURE;
int ISOPri = 0;
@@ -4359,7 +4359,7 @@ enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter)
return HighestPriISO;
}
-int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter,
+static int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter,
PUINT pBuff,
enum bcm_flash2x_section_val eFlash2xSectionVal,
unsigned int uiOffset,
@@ -4472,7 +4472,7 @@ bool IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_se
return SectionPresent;
}
-int IsSectionWritable(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val Section)
+static int IsSectionWritable(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val Section)
{
int offset = STATUS_FAILURE;
int Status = false;
diff --git a/drivers/staging/ced1401/ced_ioc.c b/drivers/staging/ced1401/ced_ioc.c
index bf532b1bd345..ebbc5090f219 100644
--- a/drivers/staging/ced1401/ced_ioc.c
+++ b/drivers/staging/ced1401/ced_ioc.c
@@ -38,8 +38,8 @@
****************************************************************************/
static void FlushOutBuff(DEVICE_EXTENSION *pdx)
{
- dev_dbg(&pdx->interface->dev, "%s currentState=%d", __func__,
- pdx->sCurrentState);
+ dev_dbg(&pdx->interface->dev, "%s: currentState=%d\n",
+ __func__, pdx->sCurrentState);
if (pdx->sCurrentState == U14ERR_TIME) /* Do nothing if hardware in trouble */
return;
/* Kill off any pending I/O */
@@ -59,8 +59,8 @@ static void FlushOutBuff(DEVICE_EXTENSION *pdx)
****************************************************************************/
static void FlushInBuff(DEVICE_EXTENSION *pdx)
{
- dev_dbg(&pdx->interface->dev, "%s currentState=%d", __func__,
- pdx->sCurrentState);
+ dev_dbg(&pdx->interface->dev, "%s: currentState=%d\n",
+ __func__, pdx->sCurrentState);
if (pdx->sCurrentState == U14ERR_TIME) /* Do nothing if hardware in trouble */
return;
/* Kill off any pending I/O */
@@ -118,8 +118,8 @@ int SendString(DEVICE_EXTENSION *pdx, const char __user *pData,
mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
if (n > 0) { /* do nothing if nowt to do! */
- dev_dbg(&pdx->interface->dev, "%s n=%d>%s<", __func__, n,
- buffer);
+ dev_dbg(&pdx->interface->dev, "%s: n=%d>%s<\n",
+ __func__, n, buffer);
iReturn = PutChars(pdx, buffer, n);
}
@@ -139,7 +139,7 @@ int SendChar(DEVICE_EXTENSION *pdx, char c)
int iReturn;
mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
iReturn = PutChars(pdx, &c, 1);
- dev_dbg(&pdx->interface->dev, "SendChar >%c< (0x%02x)", c, c);
+ dev_dbg(&pdx->interface->dev, "SendChar >%c< (0x%02x)\n", c, c);
Allowi(pdx); /* Make sure char reads are running */
mutex_unlock(&pdx->io_mutex);
return iReturn;
@@ -174,7 +174,7 @@ int SendChar(DEVICE_EXTENSION *pdx, char c)
int Get1401State(DEVICE_EXTENSION *pdx, __u32 *state, __u32 *error)
{
int nGot;
- dev_dbg(&pdx->interface->dev, "Get1401State() entry");
+ dev_dbg(&pdx->interface->dev, "%s: entry\n", __func__);
*state = 0xFFFFFFFF; /* Start off with invalid state */
nGot = usb_control_msg(pdx->udev, usb_rcvctrlpipe(pdx->udev, 0),
@@ -182,15 +182,15 @@ int Get1401State(DEVICE_EXTENSION *pdx, __u32 *state, __u32 *error)
pdx->statBuf, sizeof(pdx->statBuf), HZ);
if (nGot != sizeof(pdx->statBuf)) {
dev_err(&pdx->interface->dev,
- "Get1401State() FAILED, return code %d", nGot);
+ "%s: FAILED, return code %d\n", __func__, nGot);
pdx->sCurrentState = U14ERR_TIME; /* Indicate that things are very wrong indeed */
*state = 0; /* Force status values to a known state */
*error = 0;
} else {
int nDevice;
dev_dbg(&pdx->interface->dev,
- "Get1401State() Success, state: 0x%x, 0x%x",
- pdx->statBuf[0], pdx->statBuf[1]);
+ "%s: Success, state: 0x%x, 0x%x\n",
+ __func__, pdx->statBuf[0], pdx->statBuf[1]);
*state = pdx->statBuf[0]; /* Return the state values to the calling code */
*error = pdx->statBuf[1];
@@ -220,8 +220,8 @@ int Get1401State(DEVICE_EXTENSION *pdx, __u32 *state, __u32 *error)
****************************************************************************/
int ReadWrite_Cancel(DEVICE_EXTENSION *pdx)
{
- dev_dbg(&pdx->interface->dev, "ReadWrite_Cancel entry %d",
- pdx->bStagedUrbPending);
+ dev_dbg(&pdx->interface->dev, "%s: entry %d\n",
+ __func__, pdx->bStagedUrbPending);
#ifdef NOT_WRITTEN_YET
int ntStatus = STATUS_SUCCESS;
bool bResult = false;
@@ -231,7 +231,7 @@ int ReadWrite_Cancel(DEVICE_EXTENSION *pdx)
if (pdx->bStagedUrbPending) { /* anything to be cancelled? May need more... */
dev_info(&pdx->interface - dev,
- "ReadWrite_Cancel about to cancel Urb");
+ "ReadWrite_Cancel about to cancel Urb\n");
/* Clear the staging done flag */
/* KeClearEvent(&pdx->StagingDoneEvent); */
USB_ASSERT(pdx->pStagedIrp != NULL);
@@ -244,14 +244,14 @@ int ReadWrite_Cancel(DEVICE_EXTENSION *pdx)
LARGE_INTEGER timeout;
timeout.QuadPart = -10000000; /* Use a timeout of 1 second */
dev_info(&pdx->interface - dev,
- "ReadWrite_Cancel about to wait till done");
+ "%s: about to wait till done\n", __func__);
ntStatus =
KeWaitForSingleObject(&pdx->StagingDoneEvent,
Executive, KernelMode, FALSE,
&timeout);
} else {
dev_info(&pdx->interface - dev,
- "ReadWrite_Cancel, cancellation failed");
+ "%s: cancellation failed\n", __func__);
ntStatus = U14ERR_FAIL;
}
USB_KdPrint(DBGLVL_DEFAULT,
@@ -260,7 +260,7 @@ int ReadWrite_Cancel(DEVICE_EXTENSION *pdx)
} else
spin_unlock_irq(&pdx->stagedLock);
- dev_info(&pdx->interface - dev, "ReadWrite_Cancel done");
+ dev_info(&pdx->interface - dev, "%s: done\n", __func__);
return ntStatus;
#else
return U14ERR_NOERROR;
@@ -304,7 +304,7 @@ static int InSelfTest(DEVICE_EXTENSION *pdx, unsigned int *pState)
bool Is1401(DEVICE_EXTENSION *pdx)
{
int iReturn;
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
ced_draw_down(pdx); /* wait for, then kill outstanding Urbs */
FlushInBuff(pdx); /* Clear out input buffer & pipe */
@@ -368,7 +368,7 @@ bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset)
(pdx->sCurrentState >= U14ERR_STD)); /* No 1401 errors stored */
dev_dbg(&pdx->interface->dev,
- "%s DMAFlag:%d, state:%d, force:%d, testBuff:%d, short:%d",
+ "%s: DMAFlag:%d, state:%d, force:%d, testBuff:%d, short:%d\n",
__func__, pdx->dwDMAFlag, pdx->sCurrentState, pdx->bForceReset,
bTestBuff, bShortTest);
@@ -376,13 +376,13 @@ bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset)
(pdx->dwNumInput || pdx->dwNumOutput)) { /* ...characters were in the buffer? */
bShortTest = false; /* Then do the full test */
dev_dbg(&pdx->interface->dev,
- "%s will reset as buffers not empty", __func__);
+ "%s: will reset as buffers not empty\n", __func__);
}
if (bShortTest || !bCanReset) { /* Still OK to try the short test? */
/* Always test if no reset - we want state update */
unsigned int state, error;
- dev_dbg(&pdx->interface->dev, "%s->Get1401State", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: Get1401State\n", __func__);
if (Get1401State(pdx, &state, &error) == U14ERR_NOERROR) { /* Check on the 1401 state */
if ((state & 0xFF) == 0) /* If call worked, check the status value */
bRet = true; /* If that was zero, all is OK, no reset needed */
@@ -390,7 +390,7 @@ bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset)
}
if (!bRet && bCanReset) { /* If all not OK, then */
- dev_info(&pdx->interface->dev, "%s->Is1401 %d %d %d %d",
+ dev_info(&pdx->interface->dev, "%s: Is1401 %d %d %d %d\n",
__func__, bShortTest, pdx->sCurrentState, bTestBuff,
pdx->bForceReset);
bRet = Is1401(pdx); /* do full test */
@@ -407,7 +407,8 @@ bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset)
int Reset1401(DEVICE_EXTENSION *pdx)
{
mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
- dev_dbg(&pdx->interface->dev, "ABout to call QuickCheck");
+ dev_dbg(&pdx->interface->dev, "%s: About to call QuickCheck\n",
+ __func__);
QuickCheck(pdx, true, true); /* Check 1401, reset if not OK */
mutex_unlock(&pdx->io_mutex);
return U14ERR_NOERROR;
@@ -423,7 +424,7 @@ int GetChar(DEVICE_EXTENSION *pdx)
int iReturn = U14ERR_NOIN; /* assume we will get nothing */
mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
- dev_dbg(&pdx->interface->dev, "GetChar");
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
Allowi(pdx); /* Make sure char reads are running */
SendChars(pdx); /* and send any buffered chars */
@@ -497,8 +498,8 @@ int GetString(DEVICE_EXTENSION *pdx, char __user *pUser, int n)
pdx->dwNumInput -= nGot;
spin_unlock_irq(&pdx->charInLock);
- dev_dbg(&pdx->interface->dev,
- "GetString read %d characters >%s<", nGot, buffer);
+ dev_dbg(&pdx->interface->dev, "%s: read %d characters >%s<\n",
+ __func__, nGot, buffer);
if (copy_to_user(pUser, buffer, nCopyToUser))
iReturn = -EFAULT;
else
@@ -555,7 +556,7 @@ int LineCount(DEVICE_EXTENSION *pdx)
}
spin_unlock_irq(&pdx->charInLock);
- dev_dbg(&pdx->interface->dev, "LineCount returned %d", iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: returned %d\n", __func__, iReturn);
mutex_unlock(&pdx->io_mutex); /* Protect disconnect from new i/o */
return iReturn;
}
@@ -571,7 +572,7 @@ int GetOutBufSpace(DEVICE_EXTENSION *pdx)
mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
SendChars(pdx); /* send any buffered chars */
iReturn = (int)(OUTBUF_SZ - pdx->dwNumOutput); /* no lock needed for single read */
- dev_dbg(&pdx->interface->dev, "OutBufSpace %d", iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: %d\n", __func__, iReturn);
mutex_unlock(&pdx->io_mutex); /* Protect disconnect from new i/o */
return iReturn;
}
@@ -589,7 +590,7 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
if ((nArea < 0) || (nArea >= MAX_TRANSAREAS)) {
iReturn = U14ERR_BADAREA;
- dev_err(&pdx->interface->dev, "%s Attempt to clear area %d",
+ dev_err(&pdx->interface->dev, "%s: Attempt to clear area %d\n",
__func__, nArea);
} else {
TRANSAREA *pTA = &pdx->rTransDef[nArea]; /* to save typing */
@@ -602,14 +603,14 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
int nPages = 0; /* and number of pages */
int np;
- dev_dbg(&pdx->interface->dev, "%s area %d", __func__,
- nArea);
+ dev_dbg(&pdx->interface->dev, "%s: area %d\n",
+ __func__, nArea);
spin_lock_irq(&pdx->stagedLock);
if ((pdx->StagedId == nArea)
&& (pdx->dwDMAFlag > MODE_CHAR)) {
iReturn = U14ERR_UNLOCKFAIL; /* cannot delete as in use */
dev_err(&pdx->interface->dev,
- "%s call on area %d while active",
+ "%s: call on area %d while active\n",
__func__, nArea);
} else {
pPages = pTA->pPages; /* save page address list */
@@ -633,7 +634,7 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
/* Now we must undo the pinning down of the pages. We will assume the worst and mark */
/* all the pages as dirty. Don't be tempted to move this up above as you must not be */
/* holding a spin lock to do this stuff as it is not atomic. */
- dev_dbg(&pdx->interface->dev, "%s nPages=%d",
+ dev_dbg(&pdx->interface->dev, "%s: nPages=%d\n",
__func__, nPages);
for (np = 0; np < nPages; ++np) {
@@ -645,7 +646,7 @@ int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
kfree(pPages);
dev_dbg(&pdx->interface->dev,
- "%s kfree(pPages) done", __func__);
+ "%s: kfree(pPages) done\n", __func__);
}
}
}
@@ -687,12 +688,12 @@ static int SetArea(DEVICE_EXTENSION *pdx, int nArea, char __user *puBuf,
iReturn = U14ERR_NOMEMORY;
goto error;
}
- dev_dbg(&pdx->interface->dev, "%s %p, length=%06x, circular %d",
+ dev_dbg(&pdx->interface->dev, "%s: %p, length=%06x, circular %d\n",
__func__, puBuf, dwLength, bCircular);
/* To pin down user pages we must first acquire the mapping semaphore. */
nPages = get_user_pages_fast(ulStart, len, 1, pPages);
- dev_dbg(&pdx->interface->dev, "%s nPages = %d", __func__, nPages);
+ dev_dbg(&pdx->interface->dev, "%s: nPages = %d\n", __func__, nPages);
if (nPages > 0) { /* if we succeeded */
/* If you are tempted to use page_address (form LDD3), forget it. You MUST use */
@@ -735,17 +736,17 @@ error:
** unset it. Unsetting will fail if the area is booked, and a transfer to that
** area is in progress. Otherwise, we will release the area and re-assign it.
****************************************************************************/
-int SetTransfer(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD)
+int SetTransfer(DEVICE_EXTENSION *pdx, struct transfer_area_desc __user *pTD)
{
int iReturn;
- TRANSFERDESC td;
+ struct transfer_area_desc td;
if (copy_from_user(&td, pTD, sizeof(td)))
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s area:%d, size:%08x", __func__,
- td.wAreaNum, td.dwLength);
+ dev_dbg(&pdx->interface->dev, "%s: area:%d, size:%08x\n",
+ __func__, td.wAreaNum, td.dwLength);
/* The strange cast is done so that we don't get warnings in 32-bit linux about the size of the */
/* pointer. The pointer is always passed as a 64-bit object so that we don't have problems using */
/* a 32-bit program on a 64-bit system. unsigned long is 64-bits on a 64-bit system. */
@@ -778,10 +779,10 @@ int UnsetTransfer(DEVICE_EXTENSION *pdx, int nArea)
** pretend that whatever the user asked for was achieved, so we return 1 if
** try to create one, and 0 if they ask to remove (assuming all else was OK).
****************************************************************************/
-int SetEvent(DEVICE_EXTENSION *pdx, TRANSFEREVENT __user *pTE)
+int SetEvent(DEVICE_EXTENSION *pdx, struct transfer_event __user *pTE)
{
int iReturn = U14ERR_NOERROR;
- TRANSFEREVENT te;
+ struct transfer_event te;
/* get a local copy of the data */
if (copy_from_user(&te, pTE, sizeof(te)))
@@ -922,7 +923,7 @@ int GetTransfer(DEVICE_EXTENSION *pdx, TGET_TX_BLOCK __user *pTX)
****************************************************************************/
int KillIO1401(DEVICE_EXTENSION *pdx)
{
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
mutex_lock(&pdx->io_mutex);
FlushOutBuff(pdx);
FlushInBuff(pdx);
@@ -938,7 +939,7 @@ int KillIO1401(DEVICE_EXTENSION *pdx)
int BlkTransState(DEVICE_EXTENSION *pdx)
{
int iReturn = pdx->dwDMAFlag != MODE_CHAR;
- dev_dbg(&pdx->interface->dev, "%s = %d", __func__, iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: %d\n", __func__, iReturn);
return iReturn;
}
@@ -956,7 +957,7 @@ int StateOf1401(DEVICE_EXTENSION *pdx)
iReturn = pdx->sCurrentState;
mutex_unlock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s = %d", __func__, iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: %d\n", __func__, iReturn);
return iReturn;
}
@@ -971,7 +972,7 @@ int StartSelfTest(DEVICE_EXTENSION *pdx)
{
int nGot;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
ced_draw_down(pdx); /* wait for, then kill outstanding Urbs */
FlushInBuff(pdx); /* Clear out input buffer & pipe */
@@ -987,7 +988,7 @@ int StartSelfTest(DEVICE_EXTENSION *pdx)
mutex_unlock(&pdx->io_mutex);
if (nGot < 0)
- dev_err(&pdx->interface->dev, "%s err=%d", __func__, nGot);
+ dev_err(&pdx->interface->dev, "%s: err=%d\n", __func__, nGot);
return nGot < 0 ? U14ERR_FAIL : U14ERR_NOERROR;
}
@@ -1005,7 +1006,7 @@ int CheckSelfTest(DEVICE_EXTENSION *pdx, TGET_SELFTEST __user *pGST)
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
iReturn = Get1401State(pdx, &state, &error);
if (iReturn == U14ERR_NOERROR) /* Only accept zero if it happens twice */
iReturn = Get1401State(pdx, &state, &error);
@@ -1013,8 +1014,8 @@ int CheckSelfTest(DEVICE_EXTENSION *pdx, TGET_SELFTEST __user *pGST)
if (iReturn != U14ERR_NOERROR) { /* Self-test can cause comms errors */
/* so we assume still testing */
dev_err(&pdx->interface->dev,
- "%s Get1401State=%d, assuming still testing", __func__,
- iReturn);
+ "%s: Get1401State=%d, assuming still testing\n",
+ __func__, iReturn);
state = 0x80; /* Force still-testing, no error */
error = 0;
iReturn = U14ERR_NOERROR;
@@ -1022,7 +1023,7 @@ int CheckSelfTest(DEVICE_EXTENSION *pdx, TGET_SELFTEST __user *pGST)
if ((state == -1) && (error == -1)) { /* If Get1401State had problems */
dev_err(&pdx->interface->dev,
- "%s Get1401State failed, assuming still testing",
+ "%s: Get1401State failed, assuming still testing\n",
__func__);
state = 0x80; /* Force still-testing, no error */
error = 0;
@@ -1033,21 +1034,21 @@ int CheckSelfTest(DEVICE_EXTENSION *pdx, TGET_SELFTEST __user *pGST)
gst.code = (state & 0x00FF0000) >> 16; /* read the error code */
gst.x = error & 0x0000FFFF; /* Error data X */
gst.y = (error & 0xFFFF0000) >> 16; /* and data Y */
- dev_dbg(&pdx->interface->dev, "Self-test error code %d",
- gst.code);
+ dev_dbg(&pdx->interface->dev,
+ "Self-test error code %d\n", gst.code);
} else { /* No error, check for timeout */
unsigned long ulNow = jiffies; /* get current time */
if (time_after(ulNow, pdx->ulSelfTestTime)) {
gst.code = -2; /* Flag the timeout */
dev_dbg(&pdx->interface->dev,
- "Self-test timed-out");
+ "Self-test timed-out\n");
} else
dev_dbg(&pdx->interface->dev,
- "Self-test on-going");
+ "Self-test on-going\n");
}
} else {
gst.code = -1; /* Flag the test is done */
- dev_dbg(&pdx->interface->dev, "Self-test done");
+ dev_dbg(&pdx->interface->dev, "Self-test done\n");
}
if (gst.code < 0) { /* If we have a problem or finished */
@@ -1074,7 +1075,7 @@ int TypeOf1401(DEVICE_EXTENSION *pdx)
{
int iReturn = TYPEUNKNOWN;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
switch (pdx->s1401Type) {
case TYPE1401:
@@ -1092,7 +1093,7 @@ int TypeOf1401(DEVICE_EXTENSION *pdx)
else /* for up-coming 1401 designs */
iReturn = TYPEUNKNOWN; /* Don't know or not there */
}
- dev_dbg(&pdx->interface->dev, "%s %d", __func__, iReturn);
+ dev_dbg(&pdx->interface->dev, "%s %d\n", __func__, iReturn);
mutex_unlock(&pdx->io_mutex);
return iReturn;
@@ -1107,7 +1108,7 @@ int TransferFlags(DEVICE_EXTENSION *pdx)
{
int iReturn = U14TF_MULTIA | U14TF_DIAG | /* we always have multiple DMA area */
U14TF_NOTIFY | U14TF_CIRCTH; /* diagnostics, notify and circular */
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
mutex_lock(&pdx->io_mutex);
if (pdx->bIsUSB2) /* Set flag for USB2 if appropriate */
iReturn |= U14TF_USB2;
@@ -1125,15 +1126,15 @@ static int DbgCmd1401(DEVICE_EXTENSION *pdx, unsigned char cmd,
unsigned int data)
{
int iReturn;
- dev_dbg(&pdx->interface->dev, "%s entry", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: entry\n", __func__);
iReturn = usb_control_msg(pdx->udev, usb_sndctrlpipe(pdx->udev, 0), cmd,
(H_TO_D | VENDOR | DEVREQ),
(unsigned short)data,
(unsigned short)(data >> 16), NULL, 0, HZ);
/* allow 1 second timeout */
if (iReturn < 0)
- dev_err(&pdx->interface->dev, "%s fail code=%d", __func__,
- iReturn);
+ dev_err(&pdx->interface->dev, "%s: fail code=%d\n",
+ __func__, iReturn);
return iReturn;
}
@@ -1152,7 +1153,7 @@ int DbgPeek(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s @ %08x", __func__, db.iAddr);
+ dev_dbg(&pdx->interface->dev, "%s: @ %08x\n", __func__, db.iAddr);
iReturn = DbgCmd1401(pdx, DB_SETADD, db.iAddr);
if (iReturn == U14ERR_NOERROR)
@@ -1181,7 +1182,7 @@ int DbgPoke(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s @ %08x", __func__, db.iAddr);
+ dev_dbg(&pdx->interface->dev, "%s: @ %08x\n", __func__, db.iAddr);
iReturn = DbgCmd1401(pdx, DB_SETADD, db.iAddr);
if (iReturn == U14ERR_NOERROR)
@@ -1210,7 +1211,7 @@ int DbgRampData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s @ %08x", __func__, db.iAddr);
+ dev_dbg(&pdx->interface->dev, "%s: @ %08x\n", __func__, db.iAddr);
iReturn = DbgCmd1401(pdx, DB_SETADD, db.iAddr);
if (iReturn == U14ERR_NOERROR)
@@ -1242,7 +1243,7 @@ int DbgRampAddr(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
iReturn = DbgCmd1401(pdx, DB_SETDEF, db.iDefault);
if (iReturn == U14ERR_NOERROR)
@@ -1270,7 +1271,7 @@ int DbgGetData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
memset(&db, 0, sizeof(db)); /* fill returned block with 0s */
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
/* Read back the last peeked value from the 1401. */
iReturn = usb_control_msg(pdx->udev, usb_rcvctrlpipe(pdx->udev, 0),
@@ -1282,8 +1283,8 @@ int DbgGetData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
else
iReturn = U14ERR_NOERROR;
} else
- dev_err(&pdx->interface->dev, "%s failed, code %d", __func__,
- iReturn);
+ dev_err(&pdx->interface->dev, "%s: failed, code %d\n",
+ __func__, iReturn);
mutex_unlock(&pdx->io_mutex);
@@ -1302,7 +1303,7 @@ int DbgStopLoop(DEVICE_EXTENSION *pdx)
unsigned int uState, uErr;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
iReturn = Get1401State(pdx, &uState, &uErr);
mutex_unlock(&pdx->io_mutex);
@@ -1317,18 +1318,18 @@ int DbgStopLoop(DEVICE_EXTENSION *pdx)
** booked and a transfer to that area is in progress. Otherwise, we will
** release the area and re-assign it.
****************************************************************************/
-int SetCircular(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD)
+int SetCircular(DEVICE_EXTENSION *pdx, struct transfer_area_desc __user *pTD)
{
int iReturn;
bool bToHost;
- TRANSFERDESC td;
+ struct transfer_area_desc td;
if (copy_from_user(&td, pTD, sizeof(td)))
return -EFAULT;
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s area:%d, size:%08x", __func__,
- td.wAreaNum, td.dwLength);
+ dev_dbg(&pdx->interface->dev, "%s: area:%d, size:%08x\n",
+ __func__, td.wAreaNum, td.dwLength);
bToHost = td.eSize != 0; /* this is used as the tohost flag */
/* The strange cast is done so that we don't get warnings in 32-bit linux about the size of the */
@@ -1353,7 +1354,7 @@ int GetCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
unsigned int nArea;
TCIRCBLOCK cb;
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
if (copy_from_user(&cb, pCB, sizeof(cb)))
return -EFAULT;
@@ -1374,7 +1375,7 @@ int GetCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
cb.dwOffset = pArea->aBlocks[0].dwOffset;
cb.dwSize = pArea->aBlocks[0].dwSize;
dev_dbg(&pdx->interface->dev,
- "%s return block 0: %d bytes at %d",
+ "%s: return block 0: %d bytes at %d\n",
__func__, cb.dwSize, cb.dwOffset);
}
} else
@@ -1402,7 +1403,7 @@ int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
unsigned int nArea, uStart, uSize;
TCIRCBLOCK cb;
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
if (copy_from_user(&cb, pCB, sizeof(cb)))
return -EFAULT;
@@ -1437,7 +1438,7 @@ int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
}
dev_dbg(&pdx->interface->dev,
- "%s free %d bytes at %d, return %d bytes at %d, wait=%d",
+ "%s: free %d bytes at %d, return %d bytes at %d, wait=%d\n",
__func__, uSize, uStart,
pArea->aBlocks[0].dwSize,
pArea->aBlocks[0].dwOffset,
@@ -1453,13 +1454,13 @@ int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
bWaiting = pdx->bXFerWaiting;
if (bWaiting && pdx->bStagedUrbPending) {
dev_err(&pdx->interface->dev,
- "%s ERROR: waiting xfer and staged Urb pending!",
+ "%s: ERROR: waiting xfer and staged Urb pending!\n",
__func__);
bWaiting = false;
}
} else {
dev_err(&pdx->interface->dev,
- "%s ERROR: freeing %d bytes at %d, block 0 is %d bytes at %d",
+ "%s: ERROR: freeing %d bytes at %d, block 0 is %d bytes at %d\n",
__func__, uSize, uStart,
pArea->aBlocks[0].dwSize,
pArea->aBlocks[0].dwOffset);
@@ -1475,7 +1476,7 @@ int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
pdx->rDMAInfo.dwSize);
if (RWMStat != U14ERR_NOERROR)
dev_err(&pdx->interface->dev,
- "%s rw setup failed %d",
+ "%s: rw setup failed %d\n",
__func__, RWMStat);
}
} else
diff --git a/drivers/staging/ced1401/ced_ioctl.h b/drivers/staging/ced1401/ced_ioctl.h
index aa68878bd251..4b6c9dedb21e 100644
--- a/drivers/staging/ced1401/ced_ioctl.h
+++ b/drivers/staging/ced1401/ced_ioctl.h
@@ -26,24 +26,21 @@
** TypeDefs
*****************************************************************************/
-typedef unsigned short TBLOCKENTRY; /* index the blk transfer table 0-7 */
-
-typedef struct TransferDesc {
+struct transfer_area_desc {
long long lpvBuff; /* address of transfer area (for 64 or 32 bit) */
unsigned int dwLength; /* length of the area */
- TBLOCKENTRY wAreaNum; /* number of transfer area to set up */
+ unsigned short wAreaNum; /* number of transfer area to set up */
short eSize; /* element size - is tohost flag for circular */
-} TRANSFERDESC;
+};
-typedef TRANSFERDESC *LPTRANSFERDESC;
-typedef struct TransferEvent {
+struct transfer_event {
unsigned int dwStart; /* offset into the area */
unsigned int dwLength; /* length of the region */
unsigned short wAreaNum; /* the area number */
unsigned short wFlags; /* bit 0 set for toHost */
int iSetEvent; /* could be dummy in LINUX */
-} TRANSFEREVENT;
+};
#define MAX_TRANSFER_SIZE 0x4000 /* Maximum data bytes per IRP */
#define MAX_AREA_LENGTH 0x100000 /* Maximum size of transfer area */
@@ -85,12 +82,6 @@ typedef struct TCSBlock {
*/
#define CED_MAGIC_IOC 0xce
-/* NBNB: READ and WRITE are from the point of view of the device, not user. */
-typedef struct ced_ioc_string {
- int nChars;
- char buffer[256];
-} CED_IOC_STRING;
-
#define IOCTL_CED_SENDSTRING(n) _IOC(_IOC_WRITE, CED_MAGIC_IOC, 2, n)
#define IOCTL_CED_RESET1401 _IO(CED_MAGIC_IOC, 3)
@@ -100,9 +91,9 @@ typedef struct ced_ioc_string {
#define IOCTL_CED_LINECOUNT _IO(CED_MAGIC_IOC, 7)
#define IOCTL_CED_GETSTRING(nMax) _IOC(_IOC_READ, CED_MAGIC_IOC, 8, nMax)
-#define IOCTL_CED_SETTRANSFER _IOW(CED_MAGIC_IOC, 11, TRANSFERDESC)
+#define IOCTL_CED_SETTRANSFER _IOW(CED_MAGIC_IOC, 11, struct transfer_area_desc)
#define IOCTL_CED_UNSETTRANSFER _IO(CED_MAGIC_IOC, 12)
-#define IOCTL_CED_SETEVENT _IOW(CED_MAGIC_IOC, 13, TRANSFEREVENT)
+#define IOCTL_CED_SETEVENT _IOW(CED_MAGIC_IOC, 13, struct transfer_event)
#define IOCTL_CED_GETOUTBUFSPACE _IO(CED_MAGIC_IOC, 14)
#define IOCTL_CED_GETBASEADDRESS _IO(CED_MAGIC_IOC, 15)
#define IOCTL_CED_GETDRIVERREVISION _IO(CED_MAGIC_IOC, 16)
@@ -126,7 +117,7 @@ typedef struct ced_ioc_string {
#define IOCTL_CED_DBGGETDATA _IOR(CED_MAGIC_IOC, 39, TDBGBLOCK)
#define IOCTL_CED_DBGSTOPLOOP _IO(CED_MAGIC_IOC, 40)
#define IOCTL_CED_FULLRESET _IO(CED_MAGIC_IOC, 41)
-#define IOCTL_CED_SETCIRCULAR _IOW(CED_MAGIC_IOC, 42, TRANSFERDESC)
+#define IOCTL_CED_SETCIRCULAR _IOW(CED_MAGIC_IOC, 42, struct transfer_area_desc)
#define IOCTL_CED_GETCIRCBLOCK _IOWR(CED_MAGIC_IOC, 43, TCIRCBLOCK)
#define IOCTL_CED_FREECIRCBLOCK _IOWR(CED_MAGIC_IOC, 44, TCIRCBLOCK)
#define IOCTL_CED_WAITEVENT _IO(CED_MAGIC_IOC, 45)
@@ -198,7 +189,7 @@ inline int CED_GetDriverRevision(int fh)
return ioctl(fh, IOCTL_CED_GETDRIVERREVISION);
}
-inline int CED_SetTransfer(int fh, TRANSFERDESC *pTD)
+inline int CED_SetTransfer(int fh, struct transfer_area_desc *pTD)
{
return ioctl(fh, IOCTL_CED_SETTRANSFER, pTD);
}
@@ -208,7 +199,7 @@ inline int CED_UnsetTransfer(int fh, int nArea)
return ioctl(fh, IOCTL_CED_UNSETTRANSFER, nArea);
}
-inline int CED_SetEvent(int fh, TRANSFEREVENT *pTE)
+inline int CED_SetEvent(int fh, struct transfer_event *pTE)
{
return ioctl(fh, IOCTL_CED_SETEVENT, pTE);
}
@@ -299,7 +290,7 @@ inline int CED_FullReset(int fh)
return ioctl(fh, IOCTL_CED_FULLRESET);
}
-inline int CED_SetCircular(int fh, TRANSFERDESC *pTD)
+inline int CED_SetCircular(int fh, struct transfer_area_desc *pTD)
{
return ioctl(fh, IOCTL_CED_SETCIRCULAR, pTD);
}
diff --git a/drivers/staging/ced1401/usb1401.c b/drivers/staging/ced1401/usb1401.c
index efc310ca789e..284abc08922c 100644
--- a/drivers/staging/ced1401/usb1401.c
+++ b/drivers/staging/ced1401/usb1401.c
@@ -166,7 +166,7 @@ static int ced_open(struct inode *inode, struct file *file)
goto exit;
}
- dev_dbg(&interface->dev, "%s got pdx", __func__);
+ dev_dbg(&interface->dev, "%s: got pdx\n", __func__);
/* increment our usage count for the device */
kref_get(&pdx->kref);
@@ -184,7 +184,7 @@ static int ced_open(struct inode *inode, struct file *file)
goto exit;
}
} else { /* uncomment this block if you want exclusive open */
- dev_err(&interface->dev, "%s fail: already open", __func__);
+ dev_err(&interface->dev, "%s: fail: already open\n", __func__);
retval = -EBUSY;
pdx->open_count--;
mutex_unlock(&pdx->io_mutex);
@@ -207,7 +207,7 @@ static int ced_release(struct inode *inode, struct file *file)
if (pdx == NULL)
return -ENODEV;
- dev_dbg(&pdx->interface->dev, "%s called", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: called\n", __func__);
mutex_lock(&pdx->io_mutex);
if (!--pdx->open_count && pdx->interface) /* Allow autosuspend */
usb_autopm_put_interface(pdx->interface);
@@ -224,12 +224,12 @@ static int ced_flush(struct file *file, fl_owner_t id)
if (pdx == NULL)
return -ENODEV;
- dev_dbg(&pdx->interface->dev, "%s char in pend=%d", __func__,
- pdx->bReadCharsPending);
+ dev_dbg(&pdx->interface->dev, "%s: char in pend=%d\n",
+ __func__, pdx->bReadCharsPending);
/* wait for io to stop */
mutex_lock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s got io_mutex", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: got io_mutex\n", __func__);
ced_draw_down(pdx);
/* read out errors, leave subsequent opens a clean slate */
@@ -239,7 +239,7 @@ static int ced_flush(struct file *file, fl_owner_t id)
spin_unlock_irq(&pdx->err_lock);
mutex_unlock(&pdx->io_mutex);
- dev_dbg(&pdx->interface->dev, "%s exit reached", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: exit reached\n", __func__);
return res;
}
@@ -270,7 +270,7 @@ static void ced_writechar_callback(struct urb *pUrb)
(pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
|| pUrb->status == -ESHUTDOWN)) {
dev_err(&pdx->interface->dev,
- "%s - nonzero write bulk status received: %d",
+ "%s: nonzero write bulk status received: %d\n",
__func__, pUrb->status);
}
@@ -287,10 +287,10 @@ static void ced_writechar_callback(struct urb *pUrb)
pdx->bSendCharsPending = false; /* Allow other threads again */
spin_unlock(&pdx->charOutLock); /* already at irq level */
dev_dbg(&pdx->interface->dev,
- "%s - char out done, 0 chars sent", __func__);
+ "%s: char out done, 0 chars sent\n", __func__);
} else {
dev_dbg(&pdx->interface->dev,
- "%s - char out done, %d chars sent", __func__, nGot);
+ "%s: char out done, %d chars sent\n", __func__, nGot);
spin_lock(&pdx->charOutLock); /* already at irq level */
pdx->dwNumOutput -= nGot; /* Now adjust the char send buffer */
pdx->dwOutBuffGet += nGot; /* to match what we did */
@@ -315,15 +315,15 @@ static void ced_writechar_callback(struct urb *pUrb)
URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(pdx->pUrbCharOut, &pdx->submitted); /* in case we need to kill it */
iReturn = usb_submit_urb(pdx->pUrbCharOut, GFP_ATOMIC);
- dev_dbg(&pdx->interface->dev, "%s n=%d>%s<", __func__,
- dwCount, pDat);
+ dev_dbg(&pdx->interface->dev, "%s: n=%d>%s<\n",
+ __func__, dwCount, pDat);
spin_lock(&pdx->charOutLock); /* grab lock for errors */
if (iReturn) {
pdx->bPipeError[nPipe] = 1; /* Flag an error to be handled later */
pdx->bSendCharsPending = false; /* Allow other threads again */
usb_unanchor_urb(pdx->pUrbCharOut);
dev_err(&pdx->interface->dev,
- "%s usb_submit_urb() returned %d",
+ "%s: usb_submit_urb() returned %d\n",
__func__, iReturn);
}
} else
@@ -350,8 +350,8 @@ int SendChars(DEVICE_EXTENSION *pdx)
pdx->bSendCharsPending = true; /* Set flag to lock out other threads */
dev_dbg(&pdx->interface->dev,
- "Send %d chars to 1401, EP0 flag %d\n", dwCount,
- pdx->nPipes == 3);
+ "Send %d chars to 1401, EP0 flag %d\n",
+ dwCount, pdx->nPipes == 3);
/* If we have only 3 end points we must send the characters to the 1401 using EP0. */
if (pdx->nPipes == 3) {
/* For EP0 character transmissions to the 1401, we have to hang about until they */
@@ -375,11 +375,11 @@ int SendChars(DEVICE_EXTENSION *pdx)
if (nSent <= 0) {
iReturn = nSent ? nSent : -ETIMEDOUT; /* if 0 chars says we timed out */
dev_err(&pdx->interface->dev,
- "Send %d chars by EP0 failed: %d",
+ "Send %d chars by EP0 failed: %d\n",
n, iReturn);
} else {
dev_dbg(&pdx->interface->dev,
- "Sent %d chars by EP0", n);
+ "Sent %d chars by EP0\n", n);
count -= nSent;
index += nSent;
}
@@ -416,9 +416,9 @@ int SendChars(DEVICE_EXTENSION *pdx)
}
} else if (pdx->bSendCharsPending && (pdx->dwNumOutput > 0))
dev_dbg(&pdx->interface->dev,
- "SendChars bSendCharsPending:true");
+ "%s: bSendCharsPending:true\n", __func__);
- dev_dbg(&pdx->interface->dev, "SendChars exit code: %d", iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: exit code: %d\n", __func__, iReturn);
spin_unlock_irq(&pdx->charOutLock); /* Now let go of the spinlock */
return iReturn;
}
@@ -446,7 +446,7 @@ static void CopyUserSpace(DEVICE_EXTENSION *pdx, int n)
pdx->StagedDone + pdx->StagedOffset + pArea->dwBaseOffset;
char *pCoherBuf = pdx->pCoherStagedIO; /* coherent buffer */
if (!pArea->bUsed) {
- dev_err(&pdx->interface->dev, "%s area %d unused",
+ dev_err(&pdx->interface->dev, "%s: area %d unused\n",
__func__, nArea);
return;
}
@@ -474,21 +474,21 @@ static void CopyUserSpace(DEVICE_EXTENSION *pdx, int n)
n -= uiXfer;
} else {
dev_err(&pdx->interface->dev,
- "%s did not map page %d",
+ "%s: did not map page %d\n",
__func__, nPage);
return;
}
} else {
dev_err(&pdx->interface->dev,
- "%s exceeded pages %d", __func__,
- nPage);
+ "%s: exceeded pages %d\n",
+ __func__, nPage);
return;
}
}
} else
- dev_err(&pdx->interface->dev, "%s bad area %d", __func__,
- nArea);
+ dev_err(&pdx->interface->dev, "%s: bad area %d\n",
+ __func__, nArea);
}
/* Forward declarations for stuff used circularly */
@@ -513,11 +513,11 @@ static void staged_callback(struct urb *pUrb)
(pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
|| pUrb->status == -ESHUTDOWN)) {
dev_err(&pdx->interface->dev,
- "%s - nonzero write bulk status received: %d",
+ "%s: nonzero write bulk status received: %d\n",
__func__, pUrb->status);
} else
dev_info(&pdx->interface->dev,
- "%s - staged xfer cancelled", __func__);
+ "%s: staged xfer cancelled\n", __func__);
spin_lock(&pdx->err_lock);
pdx->errors = pUrb->status;
@@ -525,26 +525,26 @@ static void staged_callback(struct urb *pUrb)
nGot = 0; /* and tidy up again if so */
bCancel = true;
} else {
- dev_dbg(&pdx->interface->dev, "%s %d chars xferred", __func__,
- nGot);
+ dev_dbg(&pdx->interface->dev, "%s: %d chars xferred\n",
+ __func__, nGot);
if (pdx->StagedRead) /* if reading, save to user space */
CopyUserSpace(pdx, nGot); /* copy from buffer to user */
if (nGot == 0)
- dev_dbg(&pdx->interface->dev, "%s ZLP", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: ZLP\n", __func__);
}
/* Update the transfer length based on the TransferBufferLength value in the URB */
pdx->StagedDone += nGot;
- dev_dbg(&pdx->interface->dev, "%s, done %d bytes of %d", __func__,
- pdx->StagedDone, pdx->StagedLength);
+ dev_dbg(&pdx->interface->dev, "%s: done %d bytes of %d\n",
+ __func__, pdx->StagedDone, pdx->StagedLength);
if ((pdx->StagedDone == pdx->StagedLength) || /* If no more to do */
(bCancel)) { /* or this IRP was cancelled */
TRANSAREA *pArea = &pdx->rTransDef[pdx->StagedId]; /* Transfer area info */
dev_dbg(&pdx->interface->dev,
- "%s transfer done, bytes %d, cancel %d", __func__,
- pdx->StagedDone, bCancel);
+ "%s: transfer done, bytes %d, cancel %d\n",
+ __func__, pdx->StagedDone, bCancel);
/* Here is where we sort out what to do with this transfer if using a circular buffer. We have */
/* a completed transfer that can be assumed to fit into the transfer area. We should be able to */
@@ -559,7 +559,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[1].dwSize +=
pdx->StagedLength;
dev_dbg(&pdx->interface->dev,
- "RWM_Complete, circ block 1 now %d bytes at %d",
+ "RWM_Complete, circ block 1 now %d bytes at %d\n",
pArea->aBlocks[1].dwSize,
pArea->aBlocks[1].dwOffset);
} else {
@@ -569,7 +569,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[1].dwSize =
pdx->StagedLength;
dev_err(&pdx->interface->dev,
- "%s ERROR, circ block 1 re-started %d bytes at %d",
+ "%s: ERROR, circ block 1 re-started %d bytes at %d\n",
__func__,
pArea->aBlocks[1].dwSize,
pArea->aBlocks[1].dwOffset);
@@ -582,7 +582,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[0].dwSize)) {
pArea->aBlocks[0].dwSize += pdx->StagedLength; /* Just add this transfer in */
dev_dbg(&pdx->interface->dev,
- "RWM_Complete, circ block 0 now %d bytes at %d",
+ "RWM_Complete, circ block 0 now %d bytes at %d\n",
pArea->aBlocks[0].
dwSize,
pArea->aBlocks[0].
@@ -593,7 +593,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[1].dwSize =
pdx->StagedLength;
dev_dbg(&pdx->interface->dev,
- "RWM_Complete, circ block 1 started %d bytes at %d",
+ "RWM_Complete, circ block 1 started %d bytes at %d\n",
pArea->aBlocks[1].
dwSize,
pArea->aBlocks[1].
@@ -605,7 +605,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[0].dwSize =
pdx->StagedLength;
dev_dbg(&pdx->interface->dev,
- "RWM_Complete, circ block 0 started %d bytes at %d",
+ "RWM_Complete, circ block 0 started %d bytes at %d\n",
pArea->aBlocks[0].dwSize,
pArea->aBlocks[0].dwOffset);
}
@@ -614,7 +614,7 @@ static void staged_callback(struct urb *pUrb)
if (!bCancel) { /* Don't generate an event if cancelled */
dev_dbg(&pdx->interface->dev,
- "RWM_Complete, bCircular %d, bToHost %d, eStart %d, eSize %d",
+ "RWM_Complete, bCircular %d, bToHost %d, eStart %d, eSize %d\n",
pArea->bCircular, pArea->bEventToHost,
pArea->dwEventSt, pArea->dwEventSz);
if ((pArea->dwEventSz) && /* Set a user-mode event... */
@@ -641,7 +641,7 @@ static void staged_callback(struct urb *pUrb)
if (iWakeUp) {
dev_dbg(&pdx->interface->dev,
- "About to set event to notify app");
+ "About to set event to notify app\n");
wake_up_interruptible(&pArea->wqEvent); /* wake up waiting processes */
++pArea->iWakeUp; /* increment wakeup count */
}
@@ -655,7 +655,7 @@ static void staged_callback(struct urb *pUrb)
if (pdx->bXFerWaiting) { /* Got a block xfer waiting? */
int iReturn;
dev_info(&pdx->interface->dev,
- "*** RWM_Complete *** pending transfer will now be set up!!!");
+ "*** RWM_Complete *** pending transfer will now be set up!!!\n");
iReturn =
ReadWriteMem(pdx, !pdx->rDMAInfo.bOutWard,
pdx->rDMAInfo.wIdent,
@@ -664,7 +664,7 @@ static void staged_callback(struct urb *pUrb)
if (iReturn)
dev_err(&pdx->interface->dev,
- "RWM_Complete rw setup failed %d",
+ "RWM_Complete rw setup failed %d\n",
iReturn);
}
}
@@ -685,7 +685,7 @@ static void staged_callback(struct urb *pUrb)
/* not be upset by char input during DMA... sigh. Needs sorting out. */
if (bRestartCharInput) /* may be out of date, but... */
Allowi(pdx); /* ...Allowi tests a lock too. */
- dev_dbg(&pdx->interface->dev, "%s done", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: done\n", __func__);
}
/****************************************************************************
@@ -707,7 +707,7 @@ static int StageChunk(DEVICE_EXTENSION *pdx)
return U14ERR_FAIL;
if (!CanAcceptIoRequests(pdx)) { /* got sudden remove? */
- dev_info(&pdx->interface->dev, "%s sudden remove, giving up",
+ dev_info(&pdx->interface->dev, "%s: sudden remove, giving up\n",
__func__);
return U14ERR_FAIL; /* could do with a better error */
}
@@ -731,11 +731,11 @@ static int StageChunk(DEVICE_EXTENSION *pdx)
if (iReturn) {
usb_unanchor_urb(pdx->pStagedUrb); /* kill it */
pdx->bPipeError[nPipe] = 1; /* Flag an error to be handled later */
- dev_err(&pdx->interface->dev, "%s submit urb failed, code %d",
+ dev_err(&pdx->interface->dev, "%s: submit urb failed, code %d\n",
__func__, iReturn);
} else
pdx->bStagedUrbPending = true; /* Set the flag for staged URB pending */
- dev_dbg(&pdx->interface->dev, "%s done so far:%d, this size:%d",
+ dev_dbg(&pdx->interface->dev, "%s: done so far:%d, this size:%d\n",
__func__, pdx->StagedDone, ChunkSize);
return iReturn;
@@ -764,28 +764,28 @@ int ReadWriteMem(DEVICE_EXTENSION *pdx, bool Read, unsigned short wIdent,
TRANSAREA *pArea = &pdx->rTransDef[wIdent]; /* Transfer area info */
if (!CanAcceptIoRequests(pdx)) { /* Are we in a state to accept new requests? */
- dev_err(&pdx->interface->dev, "%s can't accept requests",
+ dev_err(&pdx->interface->dev, "%s: can't accept requests\n",
__func__);
return U14ERR_FAIL;
}
dev_dbg(&pdx->interface->dev,
- "%s xfer %d bytes to %s, offset %d, area %d", __func__, dwLen,
- Read ? "host" : "1401", dwOffs, wIdent);
+ "%s: xfer %d bytes to %s, offset %d, area %d\n",
+ __func__, dwLen, Read ? "host" : "1401", dwOffs, wIdent);
/* Amazingly, we can get an escape sequence back before the current staged Urb is done, so we */
/* have to check for this situation and, if so, wait until all is OK. */
if (pdx->bStagedUrbPending) {
pdx->bXFerWaiting = true; /* Flag we are waiting */
dev_info(&pdx->interface->dev,
- "%s xfer is waiting, as previous staged pending",
+ "%s: xfer is waiting, as previous staged pending\n",
__func__);
return U14ERR_NOERROR;
}
if (dwLen == 0) { /* allow 0-len read or write; just return success */
dev_dbg(&pdx->interface->dev,
- "%s OK; zero-len read/write request", __func__);
+ "%s: OK; zero-len read/write request\n", __func__);
return U14ERR_NOERROR;
}
@@ -795,7 +795,7 @@ int ReadWriteMem(DEVICE_EXTENSION *pdx, bool Read, unsigned short wIdent,
bool bWait = false; /* Flag for transfer having to wait */
dev_dbg(&pdx->interface->dev,
- "Circular buffers are %d at %d and %d at %d",
+ "Circular buffers are %d at %d and %d at %d\n",
pArea->aBlocks[0].dwSize, pArea->aBlocks[0].dwOffset,
pArea->aBlocks[1].dwSize, pArea->aBlocks[1].dwOffset);
if (pArea->aBlocks[1].dwSize > 0) { /* Using the second block already? */
@@ -819,14 +819,14 @@ int ReadWriteMem(DEVICE_EXTENSION *pdx, bool Read, unsigned short wIdent,
if (bWait) { /* This transfer will have to wait? */
pdx->bXFerWaiting = true; /* Flag we are waiting */
dev_dbg(&pdx->interface->dev,
- "%s xfer waiting for circular buffer space",
+ "%s: xfer waiting for circular buffer space\n",
__func__);
return U14ERR_NOERROR;
}
dev_dbg(&pdx->interface->dev,
- "%s circular xfer, %d bytes starting at %d", __func__,
- dwLen, dwOffs);
+ "%s: circular xfer, %d bytes starting at %d\n",
+ __func__, dwLen, dwOffs);
}
/* Save the parameters for the read\write transfer */
pdx->StagedRead = Read; /* Save the parameters for this read */
@@ -948,7 +948,7 @@ static bool ReadDMAInfo(volatile DMADESC *pDmaDesc, DEVICE_EXTENSION *pdx,
unsigned char ucData;
unsigned int dDone = 0; /* We haven't parsed anything so far */
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
if (ReadChar(&ucData, pBuf, &dDone, dwCount)) {
unsigned char ucTransCode = (ucData & 0x0F); /* get code for transfer type */
@@ -960,8 +960,8 @@ static bool ReadDMAInfo(volatile DMADESC *pDmaDesc, DEVICE_EXTENSION *pdx,
pDmaDesc->dwSize = 0; /* initialise other bits */
pDmaDesc->dwOffset = 0;
- dev_dbg(&pdx->interface->dev, "%s type: %d ident: %d", __func__,
- pDmaDesc->wTransType, pDmaDesc->wIdent);
+ dev_dbg(&pdx->interface->dev, "%s: type: %d ident: %d\n",
+ __func__, pDmaDesc->wTransType, pDmaDesc->wIdent);
pDmaDesc->bOutWard = (ucTransCode != TM_EXTTOHOST); /* set transfer direction */
@@ -976,7 +976,7 @@ static bool ReadDMAInfo(volatile DMADESC *pDmaDesc, DEVICE_EXTENSION *pdx,
&dDone, dwCount);
if (bResult) {
dev_dbg(&pdx->interface->dev,
- "%s xfer offset & size %d %d",
+ "%s: xfer offset & size %d %d\n",
__func__, pDmaDesc->dwOffset,
pDmaDesc->dwSize);
@@ -989,7 +989,7 @@ static bool ReadDMAInfo(volatile DMADESC *pDmaDesc, DEVICE_EXTENSION *pdx,
dwLength))) {
bResult = false; /* bad parameter(s) */
dev_dbg(&pdx->interface->dev,
- "%s bad param - id %d, bUsed %d, offset %d, size %d, area length %d",
+ "%s: bad param - id %d, bUsed %d, offset %d, size %d, area length %d\n",
__func__, wIdent,
pdx->rTransDef[wIdent].
bUsed,
@@ -1008,7 +1008,7 @@ static bool ReadDMAInfo(volatile DMADESC *pDmaDesc, DEVICE_EXTENSION *pdx,
bResult = false;
if (!bResult) /* now check parameters for validity */
- dev_err(&pdx->interface->dev, "%s error reading Esc sequence",
+ dev_err(&pdx->interface->dev, "%s: error reading Esc sequence\n",
__func__);
return bResult;
@@ -1045,15 +1045,15 @@ static int Handle1401Esc(DEVICE_EXTENSION *pdx, char *pCh,
unsigned short wTransType = pdx->rDMAInfo.wTransType; /* check transfer type */
dev_dbg(&pdx->interface->dev,
- "%s xfer to %s, offset %d, length %d", __func__,
+ "%s: xfer to %s, offset %d, length %d\n",
+ __func__,
pdx->rDMAInfo.bOutWard ? "1401" : "host",
pdx->rDMAInfo.dwOffset, pdx->rDMAInfo.dwSize);
if (pdx->bXFerWaiting) { /* Check here for badly out of kilter... */
/* This can never happen, really */
dev_err(&pdx->interface->dev,
- "ERROR: DMA setup while transfer still waiting");
- spin_unlock(&pdx->stagedLock);
+ "ERROR: DMA setup while transfer still waiting\n");
} else {
if ((wTransType == TM_EXTTOHOST)
|| (wTransType == TM_EXTTO1401)) {
@@ -1066,21 +1066,21 @@ static int Handle1401Esc(DEVICE_EXTENSION *pdx, char *pCh,
pdx->rDMAInfo.dwSize);
if (iReturn != U14ERR_NOERROR)
dev_err(&pdx->interface->dev,
- "%s ReadWriteMem() failed %d",
+ "%s: ReadWriteMem() failed %d\n",
__func__, iReturn);
} else /* This covers non-linear transfer setup */
dev_err(&pdx->interface->dev,
- "%s Unknown block xfer type %d",
+ "%s: Unknown block xfer type %d\n",
__func__, wTransType);
}
} else /* Failed to read parameters */
- dev_err(&pdx->interface->dev, "%s ReadDMAInfo() fail",
+ dev_err(&pdx->interface->dev, "%s: ReadDMAInfo() fail\n",
__func__);
spin_unlock(&pdx->stagedLock); /* OK here */
}
- dev_dbg(&pdx->interface->dev, "%s returns %d", __func__, iReturn);
+ dev_dbg(&pdx->interface->dev, "%s: returns %d\n", __func__, iReturn);
return iReturn;
}
@@ -1100,11 +1100,11 @@ static void ced_readchar_callback(struct urb *pUrb)
(pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
|| pUrb->status == -ESHUTDOWN)) {
dev_err(&pdx->interface->dev,
- "%s - nonzero write bulk status received: %d",
+ "%s: nonzero write bulk status received: %d\n",
__func__, pUrb->status);
} else
dev_dbg(&pdx->interface->dev,
- "%s - 0 chars pUrb->status=%d (shutdown?)",
+ "%s: 0 chars pUrb->status=%d (shutdown?)\n",
__func__, pUrb->status);
spin_lock(&pdx->err_lock);
@@ -1125,7 +1125,7 @@ static void ced_readchar_callback(struct urb *pUrb)
if (nGot < INBUF_SZ) {
pdx->pCoherCharIn[nGot] = 0; /* tidy the string */
dev_dbg(&pdx->interface->dev,
- "%s got %d chars >%s<",
+ "%s: got %d chars >%s<\n",
__func__, nGot,
pdx->pCoherCharIn);
}
@@ -1140,7 +1140,7 @@ static void ced_readchar_callback(struct urb *pUrb)
if ((pdx->dwNumInput + nGot) <= INBUF_SZ)
pdx->dwNumInput += nGot; /* Adjust the buffer count accordingly */
} else
- dev_dbg(&pdx->interface->dev, "%s read ZLP",
+ dev_dbg(&pdx->interface->dev, "%s: read ZLP\n",
__func__);
}
}
@@ -1178,7 +1178,7 @@ int Allowi(DEVICE_EXTENSION *pdx)
unsigned int nMax = INBUF_SZ - pdx->dwNumInput; /* max we could read */
int nPipe = pdx->nPipes == 4 ? 1 : 0; /* The pipe number to use */
- dev_dbg(&pdx->interface->dev, "%s %d chars in input buffer",
+ dev_dbg(&pdx->interface->dev, "%s: %d chars in input buffer\n",
__func__, pdx->dwNumInput);
usb_fill_int_urb(pdx->pUrbCharIn, pdx->udev,
@@ -1192,7 +1192,8 @@ int Allowi(DEVICE_EXTENSION *pdx)
usb_unanchor_urb(pdx->pUrbCharIn); /* remove from list of active Urbs */
pdx->bPipeError[nPipe] = 1; /* Flag an error to be handled later */
dev_err(&pdx->interface->dev,
- "%s submit urb failed: %d", __func__, iReturn);
+ "%s: submit urb failed: %d\n",
+ __func__, iReturn);
} else
pdx->bReadCharsPending = true; /* Flag that we are active here */
}
@@ -1250,13 +1251,13 @@ static long ced_ioctl(struct file *file, unsigned int cmd, unsigned long ulArg)
return GetString(pdx, (char __user *)ulArg, _IOC_SIZE(cmd));
case _IOC_NR(IOCTL_CED_SETTRANSFER):
- return SetTransfer(pdx, (TRANSFERDESC __user *) ulArg);
+ return SetTransfer(pdx, (struct transfer_area_desc __user *) ulArg);
case _IOC_NR(IOCTL_CED_UNSETTRANSFER):
return UnsetTransfer(pdx, (int)ulArg);
case _IOC_NR(IOCTL_CED_SETEVENT):
- return SetEvent(pdx, (TRANSFEREVENT __user *) ulArg);
+ return SetEvent(pdx, (struct transfer_event __user *) ulArg);
case _IOC_NR(IOCTL_CED_GETOUTBUFSPACE):
return GetOutBufSpace(pdx);
@@ -1315,7 +1316,7 @@ static long ced_ioctl(struct file *file, unsigned int cmd, unsigned long ulArg)
break;
case _IOC_NR(IOCTL_CED_SETCIRCULAR):
- return SetCircular(pdx, (TRANSFERDESC __user *) ulArg);
+ return SetCircular(pdx, (struct transfer_area_desc __user *) ulArg);
case _IOC_NR(IOCTL_CED_GETCIRCBLOCK):
return GetCircBlock(pdx, (TCIRCBLOCK __user *) ulArg);
@@ -1397,7 +1398,7 @@ static int ced_probe(struct usb_interface *interface,
else if ((i >= 1) && (i <= 23))
pdx->s1401Type = i + 2;
else {
- dev_err(&interface->dev, "%s Unknown device. bcdDevice = %d",
+ dev_err(&interface->dev, "%s: Unknown device. bcdDevice = %d\n",
__func__, bcdDevice);
goto error;
}
@@ -1405,7 +1406,7 @@ static int ced_probe(struct usb_interface *interface,
/* we know that we are dealing with a 1401 device. */
iface_desc = interface->cur_altsetting;
pdx->nPipes = iface_desc->desc.bNumEndpoints;
- dev_info(&interface->dev, "1401Type=%d with %d End Points",
+ dev_info(&interface->dev, "1401Type=%d with %d End Points\n",
pdx->s1401Type, pdx->nPipes);
if ((pdx->nPipes < 3) || (pdx->nPipes > 4))
goto error;
@@ -1415,7 +1416,7 @@ static int ced_probe(struct usb_interface *interface,
pdx->pUrbCharIn = usb_alloc_urb(0, GFP_KERNEL); /* character input URB */
pdx->pStagedUrb = usb_alloc_urb(0, GFP_KERNEL); /* block transfer URB */
if (!pdx->pUrbCharOut || !pdx->pUrbCharIn || !pdx->pStagedUrb) {
- dev_err(&interface->dev, "%s URB alloc failed", __func__);
+ dev_err(&interface->dev, "%s: URB alloc failed\n", __func__);
goto error;
}
@@ -1429,7 +1430,7 @@ static int ced_probe(struct usb_interface *interface,
usb_alloc_coherent(pdx->udev, INBUF_SZ, GFP_KERNEL,
&pdx->pUrbCharIn->transfer_dma);
if (!pdx->pCoherCharOut || !pdx->pCoherCharIn || !pdx->pCoherStagedIO) {
- dev_err(&interface->dev, "%s Coherent buffer alloc failed",
+ dev_err(&interface->dev, "%s: Coherent buffer alloc failed\n",
__func__);
goto error;
}
@@ -1437,19 +1438,19 @@ static int ced_probe(struct usb_interface *interface,
for (i = 0; i < pdx->nPipes; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
pdx->epAddr[i] = endpoint->bEndpointAddress;
- dev_info(&interface->dev, "Pipe %d, ep address %02x", i,
- pdx->epAddr[i]);
+ dev_info(&interface->dev, "Pipe %d, ep address %02x\n",
+ i, pdx->epAddr[i]);
if (((pdx->nPipes == 3) && (i == 0)) || /* if char input end point */
((pdx->nPipes == 4) && (i == 1))) {
pdx->bInterval = endpoint->bInterval; /* save the endpoint interrupt interval */
- dev_info(&interface->dev, "Pipe %d, bInterval = %d", i,
- pdx->bInterval);
+ dev_info(&interface->dev, "Pipe %d, bInterval = %d\n",
+ i, pdx->bInterval);
}
/* Detect USB2 by checking last ep size (64 if USB1) */
if (i == pdx->nPipes - 1) { /* if this is the last ep (bulk) */
pdx->bIsUSB2 =
le16_to_cpu(endpoint->wMaxPacketSize) > 64;
- dev_info(&pdx->interface->dev, "USB%d",
+ dev_info(&pdx->interface->dev, "USB%d\n",
pdx->bIsUSB2 + 1);
}
}
@@ -1462,14 +1463,14 @@ static int ced_probe(struct usb_interface *interface,
if (retval) {
/* something prevented us from registering this driver */
dev_err(&interface->dev,
- "Not able to get a minor for this device.\n");
+ "Not able to get a minor for this device\n");
usb_set_intfdata(interface, NULL);
goto error;
}
/* let the user know what node this device is now attached to */
dev_info(&interface->dev,
- "USB CEDUSB device now attached to cedusb #%d",
+ "USB CEDUSB device now attached to cedusb #%d\n",
interface->minor);
return 0;
@@ -1493,7 +1494,7 @@ static void ced_disconnect(struct usb_interface *interface)
for (i = 0; i < MAX_TRANSAREAS; ++i) {
int iErr = ClearArea(pdx, i); /* ...release any used memory */
if (iErr == U14ERR_UNLOCKFAIL)
- dev_err(&pdx->interface->dev, "%s Area %d was in used",
+ dev_err(&pdx->interface->dev, "%s: Area %d was in used\n",
__func__, i);
}
pdx->interface = NULL; /* ...we kill off link to interface */
@@ -1503,7 +1504,7 @@ static void ced_disconnect(struct usb_interface *interface)
kref_put(&pdx->kref, ced_delete); /* decrement our usage count */
- dev_info(&interface->dev, "USB cedusb #%d now disconnected", minor);
+ dev_info(&interface->dev, "USB cedusb #%d now disconnected\n", minor);
}
/* Wait for all the urbs we know of to be done with, then kill off any that */
@@ -1513,13 +1514,13 @@ static void ced_disconnect(struct usb_interface *interface)
void ced_draw_down(DEVICE_EXTENSION *pdx)
{
int time;
- dev_dbg(&pdx->interface->dev, "%s called", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: called\n", __func__);
pdx->bInDrawDown = true;
time = usb_wait_anchor_empty_timeout(&pdx->submitted, 3000);
if (!time) { /* if we timed out we kill the urbs */
usb_kill_anchored_urbs(&pdx->submitted);
- dev_err(&pdx->interface->dev, "%s timed out", __func__);
+ dev_err(&pdx->interface->dev, "%s: timed out\n", __func__);
}
pdx->bInDrawDown = false;
}
@@ -1531,7 +1532,7 @@ static int ced_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
ced_draw_down(pdx);
- dev_dbg(&pdx->interface->dev, "%s called", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: called\n", __func__);
return 0;
}
@@ -1540,14 +1541,14 @@ static int ced_resume(struct usb_interface *intf)
DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
if (!pdx)
return 0;
- dev_dbg(&pdx->interface->dev, "%s called", __func__);
+ dev_dbg(&pdx->interface->dev, "%s: called\n", __func__);
return 0;
}
static int ced_pre_reset(struct usb_interface *intf)
{
DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
mutex_lock(&pdx->io_mutex);
ced_draw_down(pdx);
return 0;
@@ -1556,7 +1557,7 @@ static int ced_pre_reset(struct usb_interface *intf)
static int ced_post_reset(struct usb_interface *intf)
{
DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
- dev_dbg(&pdx->interface->dev, "%s", __func__);
+ dev_dbg(&pdx->interface->dev, "%s\n", __func__);
/* we are sure no URBs are active - no locking needed */
pdx->errors = -EPIPE;
diff --git a/drivers/staging/ced1401/usb1401.h b/drivers/staging/ced1401/usb1401.h
index f031e3a2c7cf..ea0fe6398a01 100644
--- a/drivers/staging/ced1401/usb1401.h
+++ b/drivers/staging/ced1401/usb1401.h
@@ -218,9 +218,9 @@ extern bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset);
extern int Reset1401(DEVICE_EXTENSION *pdx);
extern int GetChar(DEVICE_EXTENSION *pdx);
extern int GetString(DEVICE_EXTENSION *pdx, char __user *pUser, int n);
-extern int SetTransfer(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD);
+extern int SetTransfer(DEVICE_EXTENSION *pdx, struct transfer_area_desc __user *pTD);
extern int UnsetTransfer(DEVICE_EXTENSION *pdx, int nArea);
-extern int SetEvent(DEVICE_EXTENSION *pdx, TRANSFEREVENT __user *pTE);
+extern int SetEvent(DEVICE_EXTENSION *pdx, struct transfer_event __user *pTE);
extern int Stat1401(DEVICE_EXTENSION *pdx);
extern int LineCount(DEVICE_EXTENSION *pdx);
extern int GetOutBufSpace(DEVICE_EXTENSION *pdx);
@@ -238,7 +238,7 @@ extern int DbgRampData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB);
extern int DbgRampAddr(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB);
extern int DbgGetData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB);
extern int DbgStopLoop(DEVICE_EXTENSION *pdx);
-extern int SetCircular(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD);
+extern int SetCircular(DEVICE_EXTENSION *pdx, struct transfer_area_desc __user *pTD);
extern int GetCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB);
extern int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB);
extern int WaitEvent(DEVICE_EXTENSION *pdx, int nArea, int msTimeOut);
diff --git a/drivers/staging/ced1401/use14_ioc.h b/drivers/staging/ced1401/use14_ioc.h
index 97d7913840dc..42d2e4e6e9a9 100644
--- a/drivers/staging/ced1401/use14_ioc.h
+++ b/drivers/staging/ced1401/use14_ioc.h
@@ -276,15 +276,14 @@ typedef struct paramBlk {
typedef PARAMBLK* PPARAMBLK;
-typedef struct TransferDesc /* Structure and type for SetTransArea */
+struct transfer_area_desc /* Structure and type for SetTransArea */
{
unsigned short wArea; /* number of transfer area to set up */
void FAR *lpvBuff; /* address of transfer area */
unsigned int dwLength; /* length of area to set up */
short eSize; /* size to move (for swapping on MAC) */
-} TRANSFERDESC;
+};
-typedef TRANSFERDESC FAR *LPTRANSFERDESC;
/* This is the structure used to set up a transfer area */
typedef struct VXTransferDesc /* use1401.c and use1432x.x use only */
diff --git a/drivers/staging/ced1401/userspace/use1401.c b/drivers/staging/ced1401/userspace/use1401.c
index c9bc2ebfef1a..7b8a2227fe5b 100644
--- a/drivers/staging/ced1401/userspace/use1401.c
+++ b/drivers/staging/ced1401/userspace/use1401.c
@@ -2231,7 +2231,7 @@ U14API(short) U14UnSetTransfer(short hand, unsigned short wArea)
U14API(short) U14SetTransArea(short hand, unsigned short wArea, void *pvBuff,
unsigned int dwLength, short eSz)
{
- TRANSFERDESC td;
+ struct transfer_area_desc td;
short sErr = CheckHandle(hand);
if (sErr != U14ERR_NOERROR)
return sErr;
@@ -2292,7 +2292,7 @@ U14API(short) U14SetTransArea(short hand, unsigned short wArea, void *pvBuff,
td.eSize = 0; // Dummy element size
if (DeviceIoControl(aHand1401[hand],(unsigned int)U14_SETTRANSFER,
- &td,sizeof(TRANSFERDESC),
+ &td,sizeof(struct transfer_area_desc),
&rWork,sizeof(PARAMBLK),&dwBytes,NULL))
{
if (dwBytes >= sizeof(PARAMBLK)) // maybe error from driver?
@@ -2496,14 +2496,14 @@ U14API(short) U14SetCircular(short hand, unsigned short wArea, BOOL bToHost,
{
PARAMBLK rWork;
unsigned int dwBytes;
- TRANSFERDESC txDesc;
+ struct transfer_area_desc txDesc;
txDesc.wArea = wArea; /* Pure NT - put data into struct */
txDesc.lpvBuff = pvBuff;
txDesc.dwLength = dwLength;
txDesc.eSize = (short)bToHost; /* Use this for direction flag */
if (DeviceIoControl(aHand1401[hand],(unsigned int)U14_SETCIRCULAR,
- &txDesc, sizeof(TRANSFERDESC),
+ &txDesc, sizeof(struct transfer_area_desc),
&rWork, sizeof(PARAMBLK),&dwBytes,NULL))
{
if (dwBytes >= sizeof(PARAMBLK)) /* error from driver? */
@@ -2528,7 +2528,7 @@ U14API(short) U14SetCircular(short hand, unsigned short wArea, BOOL bToHost,
#ifdef LINUX
else
{
- TRANSFERDESC td;
+ struct transfer_area_desc td;
td.lpvBuff = (long long)((unsigned long)pvBuff);
td.wAreaNum = wArea;
td.dwLength = dwLength;
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 89e25b4203ad..703c5d40ae5c 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -177,6 +177,7 @@ config COMEDI_PCL730
config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_FC
---help---
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
@@ -188,6 +189,7 @@ config COMEDI_PCL812
config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support"
depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_FC
---help---
Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -197,6 +199,7 @@ config COMEDI_PCL816
config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support"
depends on VIRT_TO_BUS && ISA_DMA_API
+ select COMEDI_FC
---help---
Enable support for Advantech PCL-818 ISA cards
PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
@@ -257,6 +260,14 @@ config COMEDI_RTI802
To compile this driver as a module, choose M here: the module will be
called rti802.
+config COMEDI_DAC02
+ tristate "Keithley Metrabyte DAC02 compatible ISA card support"
+ ---help---
+ Enable support for Keithley Metrabyte DAC02 compatible ISA cards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called dac02.
+
config COMEDI_DAS16M1
tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
select COMEDI_8255
@@ -559,15 +570,6 @@ config COMEDI_MULTIQ3
To compile this driver as a module, choose M here: the module will be
called multiq3.
-config COMEDI_POC
- tristate "Generic driver for very simple devices"
- ---help---
- Enable generic support for very simple / POC (Piece of Crap) boards,
- Keithley Metrabyte DAC-02 (dac02).
-
- To compile this driver as a module, choose M here: the module will be
- called poc.
-
config COMEDI_S526
tristate "Sensoray s526 support"
---help---
@@ -753,6 +755,7 @@ config COMEDI_ADL_PCI9118
config COMEDI_ADV_PCI1710
tristate "Advantech PCI-171x, PCI-1720 and PCI-1731 support"
+ select COMEDI_FC
---help---
Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
PCI-1713, PCI-1720 and PCI-1731
@@ -856,6 +859,7 @@ config COMEDI_DAS08_PCI
config COMEDI_DT3000
tristate "Data Translation DT3000 series support"
+ select COMEDI_FC
---help---
Enable support for Data Translation DT3000 series
DT3001, DT3001-PGL, DT3002, DT3003, DT3003-PGL, DT3004, DT3005 and
@@ -1101,6 +1105,7 @@ config COMEDI_S626
config COMEDI_MITE
depends on HAS_DMA
+ select COMEDI_FC
tristate
config COMEDI_NI_TIOCMD
@@ -1179,6 +1184,7 @@ config COMEDI_NI_MIO_CS
config COMEDI_QUATECH_DAQP_CS
tristate "Quatech DAQP PCMCIA data capture card support"
+ select COMEDI_FC
---help---
Enable support for the Quatech DAQP PCMCIA data capture cards
DAQP-208 and DAQP-308
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index c22c617b0da1..ea6dc36d753b 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -297,7 +297,7 @@ static ssize_t max_read_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%i\n", size);
+ return snprintf(buf, PAGE_SIZE, "%u\n", size);
}
static ssize_t max_read_buffer_kb_store(struct device *csdev,
@@ -353,7 +353,7 @@ static ssize_t read_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%i\n", size);
+ return snprintf(buf, PAGE_SIZE, "%u\n", size);
}
static ssize_t read_buffer_kb_store(struct device *csdev,
@@ -410,7 +410,7 @@ static ssize_t max_write_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%i\n", size);
+ return snprintf(buf, PAGE_SIZE, "%u\n", size);
}
static ssize_t max_write_buffer_kb_store(struct device *csdev,
@@ -466,7 +466,7 @@ static ssize_t write_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%i\n", size);
+ return snprintf(buf, PAGE_SIZE, "%u\n", size);
}
static ssize_t write_buffer_kb_store(struct device *csdev,
@@ -1194,6 +1194,11 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
switch (insn->insn) {
case INSN_READ:
ret = s->insn_read(dev, s, insn, data);
+ if (ret == -ETIMEDOUT) {
+ dev_dbg(dev->class_dev,
+ "subdevice %d read instruction timed out\n",
+ s->index);
+ }
break;
case INSN_WRITE:
maxdata = s->maxdata_list
@@ -1207,8 +1212,14 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
break;
}
}
- if (ret == 0)
+ if (ret == 0) {
ret = s->insn_write(dev, s, insn, data);
+ if (ret == -ETIMEDOUT) {
+ dev_dbg(dev->class_dev,
+ "subdevice %d write instruction timed out\n",
+ s->index);
+ }
+ }
break;
case INSN_BITS:
if (insn->n != 2) {
@@ -1405,41 +1416,94 @@ error:
return ret;
}
-static int do_cmd_ioctl(struct comedi_device *dev,
- struct comedi_cmd __user *arg, void *file)
+static int __comedi_get_user_cmd(struct comedi_device *dev,
+ struct comedi_cmd __user *arg,
+ struct comedi_cmd *cmd)
{
- struct comedi_cmd cmd;
struct comedi_subdevice *s;
- struct comedi_async *async;
- int ret = 0;
- unsigned int __user *user_chanlist;
- if (copy_from_user(&cmd, arg, sizeof(cmd))) {
+ if (copy_from_user(cmd, arg, sizeof(*cmd))) {
dev_dbg(dev->class_dev, "bad cmd address\n");
return -EFAULT;
}
- /* save user's chanlist pointer so it can be restored later */
- user_chanlist = (unsigned int __user *)cmd.chanlist;
- if (cmd.subdev >= dev->n_subdevices) {
- dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd.subdev);
+ if (cmd->subdev >= dev->n_subdevices) {
+ dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd->subdev);
return -ENODEV;
}
- s = &dev->subdevices[cmd.subdev];
- async = s->async;
+ s = &dev->subdevices[cmd->subdev];
if (s->type == COMEDI_SUBD_UNUSED) {
- dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd.subdev);
+ dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd->subdev);
return -EIO;
}
if (!s->do_cmd || !s->do_cmdtest || !s->async) {
dev_dbg(dev->class_dev,
- "subdevice %i does not support commands\n", cmd.subdev);
+ "subdevice %d does not support commands\n", cmd->subdev);
return -EIO;
}
+ /* make sure channel/gain list isn't too long */
+ if (cmd->chanlist_len > s->len_chanlist) {
+ dev_dbg(dev->class_dev, "channel/gain list too long %d > %d\n",
+ cmd->chanlist_len, s->len_chanlist);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __comedi_get_user_chanlist(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int __user *user_chanlist,
+ struct comedi_cmd *cmd)
+{
+ unsigned int *chanlist;
+ int ret;
+
+ /* user_chanlist could be NULL for do_cmdtest ioctls */
+ if (!user_chanlist)
+ return 0;
+
+ chanlist = memdup_user(user_chanlist,
+ cmd->chanlist_len * sizeof(unsigned int));
+ if (IS_ERR(chanlist))
+ return PTR_ERR(chanlist);
+
+ /* make sure each element in channel/gain list is valid */
+ ret = comedi_check_chanlist(s, cmd->chanlist_len, chanlist);
+ if (ret < 0) {
+ kfree(chanlist);
+ return ret;
+ }
+
+ cmd->chanlist = chanlist;
+
+ return 0;
+}
+
+static int do_cmd_ioctl(struct comedi_device *dev,
+ struct comedi_cmd __user *arg, void *file)
+{
+ struct comedi_cmd cmd;
+ struct comedi_subdevice *s;
+ struct comedi_async *async;
+ unsigned int __user *user_chanlist;
+ int ret;
+
+ /* get the user's cmd and do some simple validation */
+ ret = __comedi_get_user_cmd(dev, arg, &cmd);
+ if (ret)
+ return ret;
+
+ /* save user's chanlist pointer so it can be restored later */
+ user_chanlist = (unsigned int __user *)cmd.chanlist;
+
+ s = &dev->subdevices[cmd.subdev];
+ async = s->async;
+
/* are we locked? (ioctl lock) */
if (s->lock && s->lock != file) {
dev_dbg(dev->class_dev, "subdevice locked\n");
@@ -1452,13 +1516,6 @@ static int do_cmd_ioctl(struct comedi_device *dev,
return -EBUSY;
}
- /* make sure channel/gain list isn't too long */
- if (cmd.chanlist_len > s->len_chanlist) {
- dev_dbg(dev->class_dev, "channel/gain list too long %u > %d\n",
- cmd.chanlist_len, s->len_chanlist);
- return -EINVAL;
- }
-
/* make sure channel/gain list isn't too short */
if (cmd.chanlist_len < 1) {
dev_dbg(dev->class_dev, "channel/gain list too short %u < 1\n",
@@ -1468,25 +1525,11 @@ static int do_cmd_ioctl(struct comedi_device *dev,
async->cmd = cmd;
async->cmd.data = NULL;
- /* load channel/gain list */
- async->cmd.chanlist = memdup_user(user_chanlist,
- async->cmd.chanlist_len * sizeof(int));
- if (IS_ERR(async->cmd.chanlist)) {
- ret = PTR_ERR(async->cmd.chanlist);
- async->cmd.chanlist = NULL;
- dev_dbg(dev->class_dev, "memdup_user failed with code %d\n",
- ret);
- goto cleanup;
- }
- /* make sure each element in channel/gain list is valid */
- ret = comedi_check_chanlist(s,
- async->cmd.chanlist_len,
- async->cmd.chanlist);
- if (ret < 0) {
- dev_dbg(dev->class_dev, "bad chanlist\n");
+ /* load channel/gain list */
+ ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &async->cmd);
+ if (ret)
goto cleanup;
- }
ret = s->do_cmdtest(dev, s, &async->cmd);
@@ -1554,63 +1597,24 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
{
struct comedi_cmd cmd;
struct comedi_subdevice *s;
- int ret = 0;
unsigned int *chanlist = NULL;
unsigned int __user *user_chanlist;
+ int ret;
+
+ /* get the user's cmd and do some simple validation */
+ ret = __comedi_get_user_cmd(dev, arg, &cmd);
+ if (ret)
+ return ret;
- if (copy_from_user(&cmd, arg, sizeof(cmd))) {
- dev_dbg(dev->class_dev, "bad cmd address\n");
- return -EFAULT;
- }
/* save user's chanlist pointer so it can be restored later */
user_chanlist = (unsigned int __user *)cmd.chanlist;
- if (cmd.subdev >= dev->n_subdevices) {
- dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd.subdev);
- return -ENODEV;
- }
-
s = &dev->subdevices[cmd.subdev];
- if (s->type == COMEDI_SUBD_UNUSED) {
- dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd.subdev);
- return -EIO;
- }
-
- if (!s->do_cmd || !s->do_cmdtest) {
- dev_dbg(dev->class_dev,
- "subdevice %i does not support commands\n", cmd.subdev);
- return -EIO;
- }
-
- /* make sure channel/gain list isn't too long */
- if (cmd.chanlist_len > s->len_chanlist) {
- dev_dbg(dev->class_dev, "channel/gain list too long %d > %d\n",
- cmd.chanlist_len, s->len_chanlist);
- ret = -EINVAL;
- goto cleanup;
- }
/* load channel/gain list */
- if (cmd.chanlist) {
- chanlist = memdup_user(user_chanlist,
- cmd.chanlist_len * sizeof(int));
- if (IS_ERR(chanlist)) {
- ret = PTR_ERR(chanlist);
- chanlist = NULL;
- dev_dbg(dev->class_dev,
- "memdup_user exited with code %d", ret);
- goto cleanup;
- }
-
- /* make sure each element in channel/gain list is valid */
- ret = comedi_check_chanlist(s, cmd.chanlist_len, chanlist);
- if (ret < 0) {
- dev_dbg(dev->class_dev, "bad chanlist\n");
- goto cleanup;
- }
-
- cmd.chanlist = chanlist;
- }
+ ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd);
+ if (ret)
+ return ret;
ret = s->do_cmdtest(dev, s, &cmd);
@@ -1620,9 +1624,8 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
if (copy_to_user(arg, &cmd, sizeof(cmd))) {
dev_dbg(dev->class_dev, "bad cmd address\n");
ret = -EFAULT;
- goto cleanup;
}
-cleanup:
+
kfree(chanlist);
return ret;
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index f82bd4256d51..d46123a97582 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -61,31 +61,31 @@ struct comedi_subdevice {
unsigned int *chanlist; /* driver-owned chanlist (not used) */
- int (*insn_read) (struct comedi_device *, struct comedi_subdevice *,
+ int (*insn_read)(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *);
+ int (*insn_write)(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *);
- int (*insn_write) (struct comedi_device *, struct comedi_subdevice *,
+ int (*insn_bits)(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *);
+ int (*insn_config)(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *);
- int (*insn_bits) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*insn_config) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
- int (*do_cmd) (struct comedi_device *, struct comedi_subdevice *);
- int (*do_cmdtest) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_cmd *);
- int (*poll) (struct comedi_device *, struct comedi_subdevice *);
- int (*cancel) (struct comedi_device *, struct comedi_subdevice *);
+
+ int (*do_cmd)(struct comedi_device *, struct comedi_subdevice *);
+ int (*do_cmdtest)(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_cmd *);
+ int (*poll)(struct comedi_device *, struct comedi_subdevice *);
+ int (*cancel)(struct comedi_device *, struct comedi_subdevice *);
/* int (*do_lock)(struct comedi_device *, struct comedi_subdevice *); */
/* int (*do_unlock)(struct comedi_device *, \
struct comedi_subdevice *); */
/* called when the buffer changes */
- int (*buf_change) (struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned long new_size);
+ int (*buf_change)(struct comedi_device *dev,
+ struct comedi_subdevice *s, unsigned long new_size);
- void (*munge) (struct comedi_device *dev, struct comedi_subdevice *s,
- void *data, unsigned int num_bytes,
- unsigned int start_chan_index);
+ void (*munge)(struct comedi_device *dev, struct comedi_subdevice *s,
+ void *data, unsigned int num_bytes,
+ unsigned int start_chan_index);
enum dma_data_direction async_dma_dir;
unsigned int state;
@@ -146,8 +146,8 @@ struct comedi_async {
unsigned int cb_mask;
- int (*inttrig) (struct comedi_device *dev, struct comedi_subdevice *s,
- unsigned int x);
+ int (*inttrig)(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned int x);
};
struct comedi_driver {
@@ -155,9 +155,9 @@ struct comedi_driver {
const char *driver_name;
struct module *module;
- int (*attach) (struct comedi_device *, struct comedi_devconfig *);
- void (*detach) (struct comedi_device *);
- int (*auto_attach) (struct comedi_device *, unsigned long);
+ int (*attach)(struct comedi_device *, struct comedi_devconfig *);
+ void (*detach)(struct comedi_device *);
+ int (*auto_attach)(struct comedi_device *, unsigned long);
/* number of elements in board_name and board_id arrays */
unsigned int num_names;
@@ -202,8 +202,8 @@ struct comedi_device {
struct fasync_struct *async_queue;
- int (*open) (struct comedi_device *dev);
- void (*close) (struct comedi_device *dev);
+ int (*open)(struct comedi_device *dev);
+ void (*close)(struct comedi_device *dev);
};
static inline const void *comedi_board(const struct comedi_device *dev)
@@ -353,6 +353,14 @@ void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
/* drivers.c - general comedi driver functions */
+#define COMEDI_TIMEOUT_MS 1000
+
+int comedi_timeout(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *,
+ int (*cb)(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned long context),
+ unsigned long context);
+
int comedi_dio_insn_config(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *data,
unsigned int mask);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 246080316c90..ab0e8ed47291 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -155,6 +155,36 @@ int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
}
/**
+ * comedi_timeout() - busy-wait for a driver condition to occur.
+ * @dev: comedi_device struct
+ * @s: comedi_subdevice struct
+ * @insn: comedi_insn struct
+ * @cb: callback to check for the condition
+ * @context: private context from the driver
+ */
+int comedi_timeout(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ int (*cb)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context),
+ unsigned long context)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(COMEDI_TIMEOUT_MS);
+ int ret;
+
+ while (time_before(jiffies, timeout)) {
+ ret = cb(dev, s, insn, context);
+ if (ret != -EBUSY)
+ return ret; /* success (0) or non EBUSY errno */
+ cpu_relax();
+ }
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(comedi_timeout);
+
+/**
* comedi_dio_insn_config() - boilerplate (*insn_config) for DIO subdevices.
* @dev: comedi_device struct
* @s: comedi_subdevice struct
@@ -616,8 +646,6 @@ int comedi_auto_config(struct device *hardware_device,
ret = driver->auto_attach(dev, context);
if (ret >= 0)
ret = comedi_device_postconfig(dev);
- if (ret < 0)
- comedi_device_detach(dev);
mutex_unlock(&dev->mutex);
if (ret < 0) {
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 8a57c3c1ade0..46a385c29ba8 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -56,6 +56,7 @@ Configuration Options: not applicable, uses PCI auto config
#include "../comedidev.h"
#include "8255.h"
+#include "mite.h"
enum pci_8255_boardid {
BOARD_ADLINK_PCI7224,
@@ -79,6 +80,7 @@ struct pci_8255_boardinfo {
const char *name;
int dio_badr;
int n_8255;
+ unsigned int has_mite:1;
};
static const struct pci_8255_boardinfo pci_8255_boards[] = {
@@ -126,36 +128,43 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
.name = "ni_pci-dio-96",
.dio_badr = 1,
.n_8255 = 4,
+ .has_mite = 1,
},
[BOARD_NI_PCIDIO96B] = {
.name = "ni_pci-dio-96b",
.dio_badr = 1,
.n_8255 = 4,
+ .has_mite = 1,
},
[BOARD_NI_PXI6508] = {
.name = "ni_pxi-6508",
.dio_badr = 1,
.n_8255 = 4,
+ .has_mite = 1,
},
[BOARD_NI_PCI6503] = {
.name = "ni_pci-6503",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
[BOARD_NI_PCI6503B] = {
.name = "ni_pci-6503b",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
[BOARD_NI_PCI6503X] = {
.name = "ni_pci-6503x",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
[BOARD_NI_PXI_6503] = {
.name = "ni_pxi-6503",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
};
@@ -163,6 +172,25 @@ struct pci_8255_private {
void __iomem *mmio_base;
};
+static int pci_8255_mite_init(struct pci_dev *pcidev)
+{
+ void __iomem *mite_base;
+ u32 main_phys_addr;
+
+ /* ioremap the MITE registers (BAR 0) temporarily */
+ mite_base = pci_ioremap_bar(pcidev, 0);
+ if (!mite_base)
+ return -ENOMEM;
+
+ /* set data window to main registers (BAR 1) */
+ main_phys_addr = pci_resource_start(pcidev, 1);
+ writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
+
+ /* finished with MITE registers */
+ iounmap(mite_base);
+ return 0;
+}
+
static int pci_8255_mmio(int dir, int port, int data, unsigned long iobase)
{
void __iomem *mmio_base = (void __iomem *)iobase;
@@ -201,6 +229,12 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
+ if (board->has_mite) {
+ ret = pci_8255_mite_init(pcidev);
+ if (ret)
+ return ret;
+ }
+
is_mmio = (pci_resource_flags(pcidev, board->dio_badr) &
IORESOURCE_MEM) != 0;
if (is_mmio) {
@@ -235,9 +269,6 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
return ret;
}
- dev_info(dev->class_dev, "%s attached (%d digital i/o channels)\n",
- dev->board_name, board->n_8255 * 24);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 2706f583d8f0..0757a82ddcfa 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_COMEDI_PCL818) += pcl818.o
obj-$(CONFIG_COMEDI_PCM3724) += pcm3724.o
obj-$(CONFIG_COMEDI_RTI800) += rti800.o
obj-$(CONFIG_COMEDI_RTI802) += rti802.o
+obj-$(CONFIG_COMEDI_DAC02) += dac02.o
obj-$(CONFIG_COMEDI_DAS16M1) += das16m1.o
obj-$(CONFIG_COMEDI_DAS08_ISA) += das08_isa.o
obj-$(CONFIG_COMEDI_DAS16) += das16.o
@@ -53,7 +54,6 @@ obj-$(CONFIG_COMEDI_PCMDA12) += pcmda12.o
obj-$(CONFIG_COMEDI_PCMMIO) += pcmmio.o
obj-$(CONFIG_COMEDI_PCMUIO) += pcmuio.o
obj-$(CONFIG_COMEDI_MULTIQ3) += multiq3.o
-obj-$(CONFIG_COMEDI_POC) += poc.o
obj-$(CONFIG_COMEDI_S526) += s526.o
# Comedi PCI drivers
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
index 1128c22e7517..28450f65a134 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
@@ -1,46 +1,24 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-035 | Compiler : GCC |
- | Module name : hwdrv_apci035.c | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-035 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ */
/* Card Specific information */
#define APCI035_ADDRESS_RANGE 255
@@ -106,99 +84,66 @@ static struct comedi_lrange range_apci035_ai = {
}
};
-static int i_WatchdogNbr = 0;
-static int i_Temp = 0;
+static int i_WatchdogNbr;
+static int i_Temp;
static int i_Flag = 1;
+
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_ConfigTimerWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures The Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 0 Configure As Timer |
-| 1 Configure As Watchdog |
-| data[1] : Watchdog number
-| data[2] : Time base Unit |
-| data[3] : Reload Value |
-| data[4] : External Trigger |
-| 1:Enable
-| 0:Disable
-| data[5] :External Trigger Level
-| 00 Trigger Disabled
-| 01 Trigger Enabled (Low level)
-| 10 Trigger Enabled (High Level)
-| 11 Trigger Enabled (High/Low level)
-| data[6] : External Gate |
-| 1:Enable
-| 0:Disable
-| data[7] : External Gate level
-| 00 Gate Disabled
-| 01 Gate Enabled (Low level)
-| 10 Gate Enabled (High Level)
-| data[8] :Warning Relay
-| 1: ENABLE
-| 0: DISABLE
-| data[9] :Warning Delay available
-| data[10] :Warning Relay Time unit
-| data[11] :Warning Relay Time Reload value
-| data[12] :Reset Relay
-| 1 : ENABLE
-| 0 : DISABLE
-| data[13] :Interrupt
-| 1 : ENABLE
-| 0 : DISABLE
-|
-|
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_ConfigTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures The Timer , Counter or Watchdog
+ *
+ * data[0] 0 = Configure As Timer, 1 = Configure As Watchdog
+ * data[1] Watchdog number
+ * data[2] Time base Unit
+ * data[3] Reload Value
+ * data[4] External Trigger, 1 = Enable, 0 = Disable
+ * data[5] External Trigger Level
+ * 00 = Trigger Disabled
+ * 01 = Trigger Enabled (Low level)
+ * 10 = Trigger Enabled (High Level)
+ * 11 = Trigger Enabled (High/Low level)
+ * data[6] External Gate, 1 = Enable, 0 = Disable
+ * data[7] External Gate level
+ * 00 = Gate Disabled
+ * 01 = Gate Enabled (Low level)
+ * 10 = Gate Enabled (High Level)
+ * data[8] Warning Relay, 1 = Enable, 0 = Disable
+ * data[9] Warning Delay available
+ * data[10] Warning Relay Time unit
+ * data[11] Warning Relay Time Reload value
+ * data[12] Reset Relay, 1 = Enable, 0 = Disable
+ * data[13] Interrupt, 1 = Enable, 0 = Disable
+ */
+static int apci035_timer_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- unsigned int ui_Status = 0;
- unsigned int ui_Command = 0;
- unsigned int ui_Mode = 0;
+ unsigned int ui_Status;
+ unsigned int ui_Command;
+ unsigned int ui_Mode;
i_Temp = 0;
devpriv->tsk_Current = current;
devpriv->b_TimerSelectMode = data[0];
i_WatchdogNbr = data[1];
- if (data[0] == 0) {
+ if (data[0] == 0)
ui_Mode = 2;
- } else {
+ else
ui_Mode = 0;
- }
-/* ui_Command = inl(devpriv->iobase+((i_WatchdogNbr-1)*32)+12); */
+
ui_Command = 0;
-/* ui_Command = ui_Command & 0xFFFFF9FEUL; */
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- ui_Command = 0;
+
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-/************************/
-/* Set the reload value */
-/************************/
+
+ /* Set the reload value */
outl(data[3], devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 4);
-/*********************/
-/* Set the time unit */
-/*********************/
+
+ /* Set the time unit */
outl(data[2], devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 8);
if (data[0] == ADDIDATA_TIMER) {
- /******************************/
/* Set the mode : */
/* - Disable the hardware */
/* - Disable the counter mode */
@@ -206,101 +151,82 @@ static int i_APCI035_ConfigTimerWatchdog(struct comedi_device *dev,
/* - Disable the reset */
/* - Enable the timer mode */
/* - Set the timer mode */
- /******************************/
ui_Command =
(ui_Command & 0xFFF719E2UL) | ui_Mode << 13UL | 0x10UL;
- } /* if (data[0] == ADDIDATA_TIMER) */
- else {
- if (data[0] == ADDIDATA_WATCHDOG) {
+ } else if (data[0] == ADDIDATA_WATCHDOG) {
- /******************************/
- /* Set the mode : */
- /* - Disable the hardware */
- /* - Disable the counter mode */
- /* - Disable the warning */
- /* - Disable the reset */
- /* - Disable the timer mode */
- /******************************/
+ /* Set the mode : */
+ /* - Disable the hardware */
+ /* - Disable the counter mode */
+ /* - Disable the warning */
+ /* - Disable the reset */
+ /* - Disable the timer mode */
- ui_Command = ui_Command & 0xFFF819E2UL;
+ ui_Command = ui_Command & 0xFFF819E2UL;
- } else {
- printk("\n The parameter for Timer/watchdog selection is in error\n");
- return -EINVAL;
- }
+ } else {
+ dev_err(dev->class_dev, "The parameter for Timer/watchdog selection is in error\n");
+ return -EINVAL;
}
+
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- ui_Command = 0;
+
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-/********************************/
-/* Disable the hardware trigger */
-/********************************/
+
+ /* Disable the hardware trigger */
ui_Command = ui_Command & 0xFFFFF89FUL;
if (data[4] == ADDIDATA_ENABLE) {
- /**********************************/
+
/* Set the hardware trigger level */
- /**********************************/
ui_Command = ui_Command | (data[5] << 5);
}
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- ui_Command = 0;
+
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-/*****************************/
-/* Disable the hardware gate */
-/*****************************/
+
+ /* Disable the hardware gate */
ui_Command = ui_Command & 0xFFFFF87FUL;
if (data[6] == ADDIDATA_ENABLE) {
-/*******************************/
-/* Set the hardware gate level */
-/*******************************/
+
+ /* Set the hardware gate level */
ui_Command = ui_Command | (data[7] << 7);
}
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- ui_Command = 0;
+
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-/*******************************/
-/* Disable the hardware output */
-/*******************************/
+
+ /* Disable the hardware output */
ui_Command = ui_Command & 0xFFFFF9FBUL;
-/*********************************/
-/* Set the hardware output level */
-/*********************************/
+
+ /* Set the hardware output level */
ui_Command = ui_Command | (data[8] << 2);
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
if (data[9] == ADDIDATA_ENABLE) {
- /************************/
+
/* Set the reload value */
- /************************/
outl(data[11],
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 24);
- /**********************/
+
/* Set the time unite */
- /**********************/
outl(data[10],
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 28);
}
- ui_Command = 0;
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- /*******************************/
+
/* Disable the hardware output */
- /*******************************/
ui_Command = ui_Command & 0xFFFFF9F7UL;
- /*********************************/
+
/* Set the hardware output level */
- /*********************************/
ui_Command = ui_Command | (data[12] << 3);
outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- /*************************************/
- /** Enable the watchdog interrupt **/
- /*************************************/
- ui_Command = 0;
+
+ /* Enable the watchdog interrupt */
ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-/*******************************/
-/* Set the interrupt selection */
-/*******************************/
+
+ /* Set the interrupt selection */
ui_Status = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 16);
ui_Command = (ui_Command & 0xFFFFF9FDUL) | (data[13] << 1);
@@ -310,82 +236,63 @@ static int i_APCI035_ConfigTimerWatchdog(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_StartStopWriteTimerWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Start / Stop The Selected Timer , or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 0 - Stop Selected Timer/Watchdog |
-| 1 - Start Selected Timer/Watchdog |
-| 2 - Trigger Selected Timer/Watchdog |
-| 3 - Stop All Timer/Watchdog |
-| 4 - Start All Timer/Watchdog |
-| 5 - Trigger All Timer/Watchdog |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_StartStopWriteTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Start / Stop The Selected Timer , or Watchdog
+ *
+ * data[0]
+ * 0 - Stop Selected Timer/Watchdog
+ * 1 - Start Selected Timer/Watch*dog
+ * 2 - Trigger Selected Timer/Watchdog
+ * 3 - Stop All Timer/Watchdog
+ * 4 - Start All Timer/Watchdog
+ * 5 - Trigger All Timer/Watchdog
+ */
+static int apci035_timer_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- unsigned int ui_Command = 0;
- int i_Count = 0;
+ unsigned int ui_Command;
+ int i_Count;
if (data[0] == 1) {
ui_Command =
inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- /**********************/
+
/* Start the hardware */
- /**********************/
ui_Command = (ui_Command & 0xFFFFF9FFUL) | 0x1UL;
outl(ui_Command,
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- } /* if (data[0]==1) */
+ }
if (data[0] == 2) {
ui_Command =
inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- /***************************/
+
/* Set the trigger command */
- /***************************/
ui_Command = (ui_Command & 0xFFFFF9FFUL) | 0x200UL;
outl(ui_Command,
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
}
- if (data[0] == 0) /* Stop The Watchdog */
- {
+ if (data[0] == 0) {
/* Stop The Watchdog */
ui_Command = 0;
-/*
-* ui_Command = inl(devpriv->iobase+((i_WatchdogNbr-1)*32)+12);
-* ui_Command = ui_Command & 0xFFFFF9FEUL;
-*/
+ /*
+ * ui_Command = inl(devpriv->iobase+((i_WatchdogNbr-1)*32)+12);
+ * ui_Command = ui_Command & 0xFFFFF9FEUL;
+ */
outl(ui_Command,
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- } /* if (data[1]==0) */
- if (data[0] == 3) /* stop all Watchdogs */
- {
+ }
+ if (data[0] == 3) {
+ /* stop all Watchdogs */
ui_Command = 0;
for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
+ if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
ui_Command = 0x2UL;
- } else {
+ else
ui_Command = 0x10UL;
- }
+
i_WatchdogNbr = i_Count;
outl(ui_Command,
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) +
@@ -393,30 +300,29 @@ static int i_APCI035_StartStopWriteTimerWatchdog(struct comedi_device *dev,
}
}
- if (data[0] == 4) /* start all Watchdogs */
- {
+ if (data[0] == 4) {
+ /* start all Watchdogs */
ui_Command = 0;
for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
+ if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
ui_Command = 0x1UL;
- } else {
+ else
ui_Command = 0x8UL;
- }
+
i_WatchdogNbr = i_Count;
outl(ui_Command,
devpriv->iobase + ((i_WatchdogNbr - 1) * 32) +
0);
}
}
- if (data[0] == 5) /* trigger all Watchdogs */
- {
+ if (data[0] == 5) {
+ /* trigger all Watchdogs */
ui_Command = 0;
for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
+ if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
ui_Command = 0x4UL;
- } else {
+ else
ui_Command = 0x20UL;
- }
i_WatchdogNbr = i_Count;
outl(ui_Command,
@@ -429,109 +335,61 @@ static int i_APCI035_StartStopWriteTimerWatchdog(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_ReadTimerWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read The Selected Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : data[0] : software trigger status
-| data[1] : hardware trigger status
-| data[2] : Software clear status
-| data[3] : Overflow status
-| data[4] : Timer actual value
-|
-
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_ReadTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Read The Selected Timer , Counter or Watchdog
+ *
+ * data[0] software trigger status
+ * data[1] hardware trigger status
+ * data[2] Software clear status
+ * data[3] Overflow status
+ * data[4] Timer actual value
+ */
+static int apci035_timer_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- unsigned int ui_Status = 0; /* Status register */
+ unsigned int ui_Status; /* Status register */
i_WatchdogNbr = insn->unused[0];
- /******************/
/* Get the status */
- /******************/
-
ui_Status = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 16);
- /***********************************/
/* Get the software trigger status */
- /***********************************/
-
data[0] = ((ui_Status >> 1) & 1);
- /***********************************/
+
/* Get the hardware trigger status */
- /***********************************/
data[1] = ((ui_Status >> 2) & 1);
- /*********************************/
+
/* Get the software clear status */
- /*********************************/
data[2] = ((ui_Status >> 3) & 1);
- /***************************/
+
/* Get the overflow status */
- /***************************/
data[3] = ((ui_Status >> 0) & 1);
- if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
+ if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER)
data[4] = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 0);
- } /* if (devpriv->b_TimerSelectMode==ADDIDATA_TIMER) */
-
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_ConfigAnalogInput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures The Analog Input Subdevice |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s : Subdevice Pointer |
-| struct comedi_insn *insn : Insn Structure Pointer |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| data[0] : Warning delay value
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_ConfigAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures The Analog Input Subdevice
+ *
+ * data[0] Warning delay value
+ */
+static int apci035_ai_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
devpriv->tsk_Current = current;
outl(0x200 | 0, devpriv->iobase + 128 + 0x4);
outl(0, devpriv->iobase + 128 + 0);
-/********************************/
-/* Initialise the warning value */
-/********************************/
+
+ /* Initialise the warning value */
outl(0x300 | 0, devpriv->iobase + 128 + 0x4);
outl((data[0] << 8), devpriv->iobase + 128 + 0);
outl(0x200000UL, devpriv->iobase + 128 + 12);
@@ -540,145 +398,88 @@ static int i_APCI035_ConfigAnalogInput(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_ReadAnalogInput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read value of the selected channel |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int ui_NoOfChannels : No Of Channels To read |
-| unsigned int *data : Data Pointer to read status |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-| data[0] : Digital Value Of Input |
-| |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_ReadAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Read value of the selected channel
+ *
+ * data[0] Digital Value Of Input
+ */
+static int apci035_ai_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- unsigned int ui_CommandRegister = 0;
+ unsigned int ui_CommandRegister;
-/******************/
-/* Set the start */
-/******************/
+ /* Set the start */
ui_CommandRegister = 0x80000;
- /******************************/
+
/* Write the command register */
- /******************************/
outl(ui_CommandRegister, devpriv->iobase + 128 + 8);
-/***************************************/
-/* Read the digital value of the input */
-/***************************************/
+ /* Read the digital value of the input */
data[0] = inl(devpriv->iobase + 128 + 28);
return insn->n;
}
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI035_Reset(struct comedi_device *dev) |
-| |
-+----------------------------------------------------------------------------+
-| Task :Resets the registers of the card |
-+----------------------------------------------------------------------------+
-| Input Parameters : |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI035_Reset(struct comedi_device *dev)
+static int apci035_reset(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
- int i_Count = 0;
+ int i_Count;
for (i_Count = 1; i_Count <= 4; i_Count++) {
i_WatchdogNbr = i_Count;
- outl(0x0, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 0); /* stop all timers */
+
+ /* stop all timers */
+ outl(0x0, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 0);
}
outl(0x0, devpriv->iobase + 128 + 12); /* Disable the warning delay */
return 0;
}
-/*
-+----------------------------------------------------------------------------+
-| Function Name : static void v_APCI035_Interrupt |
-| (int irq , void *d) |
-+----------------------------------------------------------------------------+
-| Task : Interrupt processing Routine |
-+----------------------------------------------------------------------------+
-| Input Parameters : int irq : irq number |
-| void *d : void pointer |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static void v_APCI035_Interrupt(int irq, void *d)
+static void apci035_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
- unsigned int ui_StatusRegister1 = 0;
- unsigned int ui_StatusRegister2 = 0;
- unsigned int ui_ReadCommand = 0;
- unsigned int ui_ChannelNumber = 0;
- unsigned int ui_DigitalTemperature = 0;
+ unsigned int ui_StatusRegister1;
+ unsigned int ui_StatusRegister2;
+ unsigned int ui_ReadCommand;
+ unsigned int ui_ChannelNumber;
+ unsigned int ui_DigitalTemperature;
if (i_Temp == 1) {
i_WatchdogNbr = i_Flag;
i_Flag = i_Flag + 1;
}
- /**************************************/
+
/* Read the interrupt status register of temperature Warning */
- /**************************************/
ui_StatusRegister1 = inl(devpriv->iobase + 128 + 16);
- /**************************************/
- /* Read the interrupt status register for Watchdog/timer */
- /**************************************/
+ /* Read the interrupt status register for Watchdog/timer */
ui_StatusRegister2 =
inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 20);
- if ((((ui_StatusRegister1) & 0x8) == 0x8)) /* Test if warning relay interrupt */
- {
- /**********************************/
+ /* Test if warning relay interrupt */
+ if ((((ui_StatusRegister1) & 0x8) == 0x8)) {
+
/* Disable the temperature warning */
- /**********************************/
ui_ReadCommand = inl(devpriv->iobase + 128 + 12);
ui_ReadCommand = ui_ReadCommand & 0xFFDF0000UL;
outl(ui_ReadCommand, devpriv->iobase + 128 + 12);
- /***************************/
+
/* Read the channel number */
- /***************************/
ui_ChannelNumber = inl(devpriv->iobase + 128 + 60);
- /**************************************/
+
/* Read the digital temperature value */
- /**************************************/
ui_DigitalTemperature = inl(devpriv->iobase + 128 + 60);
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- } /* if (((ui_StatusRegister1 & 0x8) == 0x8)) */
- else {
- if ((ui_StatusRegister2 & 0x1) == 0x1) {
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- }
- } /* else if (((ui_StatusRegister1 & 0x8) == 0x8)) */
+ /* send signal to the sample */
+ send_sig(SIGIO, devpriv->tsk_Current, 0);
+
+ } else if ((ui_StatusRegister2 & 0x1) == 0x1) {
+ /* send signal to the sample */
+ send_sig(SIGIO, devpriv->tsk_Current, 0);
+ }
return;
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
index 054910511e9e..a633957890d7 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
@@ -1,48 +1,25 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-1500 | Compiler : GCC |
- | Module name : hwdrv_apci1500.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-1500 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
-
-/********* Definitions for APCI-1500 card *****/
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ */
/* Card Specific information */
#define APCI1500_ADDRESS_RANGE 4
@@ -141,99 +118,45 @@ enum {
APCI1500_RW_PORT_B_PATTERN_MASK
};
-static int i_TimerCounter1Init = 0;
-static int i_TimerCounter2Init = 0;
-static int i_WatchdogCounter3Init = 0;
-static int i_Event1Status = 0, i_Event2Status = 0;
-static int i_TimerCounterWatchdogInterrupt = 0;
-static int i_Logic = 0, i_CounterLogic = 0;
-static int i_InterruptMask = 0;
-static int i_InputChannel = 0;
-static int i_TimerCounter1Enabled = 0, i_TimerCounter2Enabled = 0,
- i_WatchdogCounter3Enabled = 0;
+static int i_TimerCounter1Init;
+static int i_TimerCounter2Init;
+static int i_WatchdogCounter3Init;
+static int i_Event1Status, i_Event2Status;
+static int i_TimerCounterWatchdogInterrupt;
+static int i_Logic, i_CounterLogic;
+static int i_InterruptMask;
+static int i_InputChannel;
+static int i_TimerCounter1Enabled, i_TimerCounter2Enabled,
+ i_WatchdogCounter3Enabled;
/*
- +----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ConfigDigitalInputEvent |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : An event can be generated for each port. |
-| The first event is related to the first 8 channels |
-| (port 1) and the second to the following 6 channels |
-| (port 2). An interrupt is generated when one or both |
-| events have occurred |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] :Number of the input port on |
-| which the event will take place |
-| (1 or 2)
-| data[1] : The event logic for port 1 has |
-| three possibilities |
-| :0 APCI1500_AND :This logic |
-| links |
-| the inputs |
-| with an AND |
-| logic. |
-| 1 APCI1500_OR :This logic |
-| links |
-| the inputs |
-| with a |
-| OR logic. |
-| 2 APCI1500_OR_PRIORITY |
-| :This logic |
-| links |
-| the inputs |
-| with a |
-| priority |
-| OR logic. |
-| Input 1 |
-| has the |
-| highest |
-| priority |
-| level and |
-| input 8 |
-| the smallest|
-| For the second port the user has|
-| 1 possibility: |
-| APCI1500_OR :This logic |
-| links |
-| the inputs |
-| with a |
-| polarity |
-| OR logic |
-| data[2] : These 8-character word for port1|
-| and 6-character word for port 2 |
-| give the mask of the event. |
-| Each place gives the state |
-| of the input channels and can |
-| have one of these six characters|
-| |
-| 0 : This input must be on 0 |
-| 1 : This input must be on 1 |
-| 2 : This input reacts to |
-| a falling edge |
-| 3 : This input reacts to a |
-| rising edge |
-| 4 : This input reacts to both edges |
-|
-| 5 : This input is not |
-| used for event |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * An event can be generated for each port. The first event is related to the
+ * first 8 channels (port 1) and the second to the following 6 channels (port 2)
+ * An interrupt is generated when one or both events have occurred.
+ *
+ * data[0] Number of the input port on which the event will take place (1 or 2)
+ * data[1] The event logic for port 1 has three possibilities:
+ * APCI1500_AND This logic links the inputs with an AND logic.
+ * APCI1500_OR This logic links the inputs with a OR logic.
+ * APCI1500_OR_PRIORITY This logic links the inputs with a priority OR
+ * logic. Input 1 has the highest priority level
+ * and input 8 the smallest.
+ * For the second port the user has 1 possibility:
+ * APCI1500_OR This logic links the inputs with a polarity OR logic
+ * data[2] These 8-character word for port1 and 6-character word for port 2
+ * give the mask of the event. Each place gives the state of the input
+ * channels and can have one of these six characters
+ * 0 This input must be on 0
+ * 1 This input must be on 1
+ * 2 This input reacts to a falling edge
+ * 3 This input reacts to a rising edge
+ * 4 This input reacts to both edges
+ * 5 This input is not used for event
+ */
+static int apci1500_di_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_PatternPolarity = 0, i_PatternTransition = 0, i_PatternMask = 0;
@@ -241,14 +164,10 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
int i_PatternTransitionCount = 0, i_RegValue;
int i;
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Disables the main interrupt on the board */
- /**********************************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if (data[0] == 1) {
@@ -259,7 +178,8 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
i_MaxChannel = 6;
} /* if(data[0]==2) */
else {
- printk("\nThe specified port event does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified port event does not exist\n");
return -EINVAL;
} /* else if(data[0]==2) */
} /* else if (data[0] == 1) */
@@ -274,7 +194,8 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
data[1] = APCI1500_OR_PRIORITY;
break;
default:
- printk("\nThe specified interrupt logic does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified interrupt logic does not exist\n");
return -EINVAL;
} /* switch(data[1]); */
@@ -318,37 +239,30 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
case 5:
break;
default:
- printk("\nThe option indicated in the event mask does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The option indicated in the event mask does not exist\n");
return -EINVAL;
} /* switch(i_EventMask) */
} /* for (i_Count = i_MaxChannel; i_Count >0;i_Count --) */
if (data[0] == 1) {
- /****************************/
/* Test the interrupt logic */
- /****************************/
if (data[1] == APCI1500_AND ||
data[1] == APCI1500_OR ||
data[1] == APCI1500_OR_PRIORITY) {
- /**************************************/
/* Tests if a transition was declared */
/* for a OR PRIORITY logic */
- /**************************************/
if (data[1] == APCI1500_OR_PRIORITY
&& i_PatternTransition != 0) {
- /********************************************/
- /* Transition error on an OR PRIORITY logic */
- /********************************************/
- printk("\nTransition error on an OR PRIORITY logic\n");
+ dev_warn(dev->hw_dev,
+ "Transition error on an OR PRIORITY logic\n");
return -EINVAL;
} /* if (data[1]== APCI1500_OR_PRIORITY && i_PatternTransition != 0) */
- /*************************************/
/* Tests if more than one transition */
/* was declared for an AND logic */
- /*************************************/
if (data[1] == APCI1500_AND) {
for (i_Count = 0; i_Count < 8; i_Count++) {
@@ -360,29 +274,21 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
} /* for (i_Count = 0; i_Count < 8; i_Count++) */
if (i_PatternTransitionCount > 1) {
- /****************************************/
- /* Transition error on an AND logic */
- /****************************************/
- printk("\n Transition error on an AND logic\n");
+ dev_warn(dev->hw_dev,
+ "Transition error on an AND logic\n");
return -EINVAL;
} /* if (i_PatternTransitionCount > 1) */
} /* if (data[1]== APCI1500_AND) */
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port A */
- /******************/
outb(0xF0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Selects the polarity register of port 1 */
- /**********************************************/
outb(APCI1500_RW_PORT_A_PATTERN_POLARITY,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -390,20 +296,16 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*********************************************/
/* Selects the pattern mask register of */
/* port 1 */
- /*********************************************/
outb(APCI1500_RW_PORT_A_PATTERN_MASK,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
outb(i_PatternMask,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************************/
/* Selects the pattern transition register */
/* of port 1 */
- /********************************************/
outb(APCI1500_RW_PORT_A_PATTERN_TRANSITION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -411,10 +313,8 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************/
/* Selects the mode specification mask */
/* register of port 1 */
- /******************************************/
outb(APCI1500_RW_PORT_A_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -422,17 +322,13 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************/
/* Selects the mode specification mask */
/* register of port 1 */
- /******************************************/
outb(APCI1500_RW_PORT_A_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************/
/* Port A new mode */
- /**********************/
i_RegValue = (i_RegValue & 0xF9) | data[1] | 0x9;
outb(i_RegValue,
@@ -441,53 +337,40 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
i_Event1Status = 1;
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port A */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
} /* if(data[1]==APCI1500_AND||data[1]==APCI1500_OR||data[1]==APCI1500_OR_PRIORITY) */
else {
- printk("\nThe choice for interrupt logic does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The choice for interrupt logic does not exist\n");
return -EINVAL;
} /* else }// if(data[1]==APCI1500_AND||data[1]==APCI1500_OR||data[1]==APCI1500_OR_PRIORITY) */
} /* if (data[0]== 1) */
- /************************************/
/* Test if event setting for port 2 */
- /************************************/
if (data[0] == 2) {
- /************************/
/* Test the event logic */
- /************************/
if (data[1] == APCI1500_OR) {
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port B */
- /******************/
outb(0x74,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************/
/* Selects the mode specification mask */
/* register of port B */
- /****************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -495,10 +378,8 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************/
/* Selects the mode specification mask */
/* register of port B */
- /******************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -507,37 +388,29 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************/
/* Selects error channels 1 and 2 */
- /**********************************/
i_PatternMask = (i_PatternMask | 0xC0);
i_PatternPolarity = (i_PatternPolarity | 0xC0);
i_PatternTransition = (i_PatternTransition | 0xC0);
- /**********************************************/
/* Selects the polarity register of port 2 */
- /**********************************************/
outb(APCI1500_RW_PORT_B_PATTERN_POLARITY,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
outb(i_PatternPolarity,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Selects the pattern transition register */
/* of port 2 */
- /**********************************************/
outb(APCI1500_RW_PORT_B_PATTERN_TRANSITION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
outb(i_PatternTransition,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Selects the pattern Mask register */
/* of port 2 */
- /**********************************************/
outb(APCI1500_RW_PORT_B_PATTERN_MASK,
devpriv->iobase +
@@ -546,20 +419,16 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************/
/* Selects the mode specification mask */
/* register of port 2 */
- /******************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************/
/* Selects the mode specification mask */
/* register of port 2 */
- /******************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -569,23 +438,20 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
APCI1500_Z8536_CONTROL_REGISTER);
i_Event2Status = 1;
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port B */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
} /* if (data[1] == APCI1500_OR) */
else {
- printk("\nThe choice for interrupt logic does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The choice for interrupt logic does not exist\n");
return -EINVAL;
} /* elseif (data[1] == APCI1500_OR) */
} /* if(data[0]==2) */
@@ -594,31 +460,15 @@ static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_StartStopInputEvent |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Allows or disallows a port event |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int ui_Channel : Channel number to read |
-| unsigned int *data : Data Pointer to read status |
-| data[0] :0 Start input event
-| 1 Stop input event
-| data[1] :No of port (1 or 2)
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_StartStopInputEvent(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Allows or disallows a port event
+ *
+ * data[0] 0 = Start input event, 1 = Stop input event
+ * data[1] Number of port (1 or 2)
+ */
+static int apci1500_di_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_Event1InterruptStatus = 0, i_Event2InterruptStatus =
@@ -626,48 +476,30 @@ static int i_APCI1500_StartStopInputEvent(struct comedi_device *dev,
switch (data[0]) {
case START:
- /*************************/
/* Tests the port number */
- /*************************/
if (data[1] == 1 || data[1] == 2) {
- /***************************/
/* Test if port 1 selected */
- /***************************/
if (data[1] == 1) {
- /*****************************/
/* Test if event initialised */
- /*****************************/
if (i_Event1Status == 1) {
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port A */
- /******************/
outb(0xF0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the command and status register of */
/* port 1 */
- /***************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************/
/* Allows the pattern interrupt */
- /*************************************/
outb(0xC0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port A */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -680,185 +512,142 @@ static int i_APCI1500_StartStopInputEvent(struct comedi_device *dev,
APCI1500_Z8536_CONTROL_REGISTER);
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Authorizes the main interrupt on the board */
- /**********************************************/
outb(0xD0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
} /* if(i_Event1Status==1) */
else {
- printk("\nEvent 1 not initialised\n");
+ dev_warn(dev->hw_dev,
+ "Event 1 not initialised\n");
return -EINVAL;
} /* else if(i_Event1Status==1) */
} /* if (data[1]==1) */
if (data[1] == 2) {
if (i_Event2Status == 1) {
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port B */
- /******************/
outb(0x74,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the command and status register of */
/* port 2 */
- /***************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************/
/* Allows the pattern interrupt */
- /*************************************/
outb(0xC0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port B */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Authorizes the main interrupt on the board */
- /**********************************************/
outb(0xD0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
i_Event2InterruptStatus = 1;
} /* if(i_Event2Status==1) */
else {
- printk("\nEvent 2 not initialised\n");
+ dev_warn(dev->hw_dev,
+ "Event 2 not initialised\n");
return -EINVAL;
} /* else if(i_Event2Status==1) */
} /* if(data[1]==2) */
} /* if (data[1] == 1 || data[0] == 2) */
else {
- printk("\nThe port parameter is in error\n");
+ dev_warn(dev->hw_dev,
+ "The port parameter is in error\n");
return -EINVAL;
} /* else if (data[1] == 1 || data[0] == 2) */
break;
case STOP:
- /*************************/
/* Tests the port number */
- /*************************/
if (data[1] == 1 || data[1] == 2) {
- /***************************/
/* Test if port 1 selected */
- /***************************/
if (data[1] == 1) {
- /*****************************/
/* Test if event initialised */
- /*****************************/
if (i_Event1Status == 1) {
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port A */
- /******************/
outb(0xF0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the command and status register of */
/* port 1 */
- /***************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************/
/* Inhibits the pattern interrupt */
- /*************************************/
outb(0xE0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port A */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
i_Event1InterruptStatus = 0;
} /* if(i_Event1Status==1) */
else {
- printk("\nEvent 1 not initialised\n");
+ dev_warn(dev->hw_dev,
+ "Event 1 not initialised\n");
return -EINVAL;
} /* else if(i_Event1Status==1) */
} /* if (data[1]==1) */
if (data[1] == 2) {
- /*****************************/
/* Test if event initialised */
- /*****************************/
if (i_Event2Status == 1) {
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************/
/* Disable Port B */
- /******************/
outb(0x74,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the command and status register of */
/* port 2 */
- /***************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************/
/* Inhibits the pattern interrupt */
- /*************************************/
outb(0xE0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************************/
/* Selects the APCI1500_RW_MASTER_CONFIGURATION_CONTROL register */
- /*****************************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************/
/* Enable Port B */
- /*****************/
outb(0xF4,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
i_Event2InterruptStatus = 0;
} /* if(i_Event2Status==1) */
else {
- printk("\nEvent 2 not initialised\n");
+ dev_warn(dev->hw_dev,
+ "Event 2 not initialised\n");
return -EINVAL;
} /* else if(i_Event2Status==1) */
} /* if(data[1]==2) */
} /* if (data[1] == 1 || data[1] == 2) */
else {
- printk("\nThe port parameter is in error\n");
+ dev_warn(dev->hw_dev,
+ "The port parameter is in error\n");
return -EINVAL;
} /* else if (data[1] == 1 || data[1] == 2) */
break;
default:
- printk("\nThe option of START/STOP logic does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The option of START/STOP logic does not exist\n");
return -EINVAL;
} /* switch(data[0]) */
@@ -866,35 +655,17 @@ static int i_APCI1500_StartStopInputEvent(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_Initialisation |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Return the status of the digital input |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int ui_Channel : Channel number to read |
-| unsigned int *data : Data Pointer to read status |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_Initialisation(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Return the status of the digital input
+ */
+static int apci1500_di_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_DummyRead = 0;
- /******************/
/* Software reset */
- /******************/
i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -902,16 +673,12 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
outb(1, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the master configuration control register */
- /*****************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0xF4, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the mode specification register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -943,9 +710,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
/* Deletes the register */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the mode specification register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -975,9 +740,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
/* Deletes the register */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the data path polarity register of port C */
- /*****************************************************/
outb(APCI1500_RW_PORT_C_DATA_PCITCH_POLARITY,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* High level of port C means 1 */
@@ -992,9 +755,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes it */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 1 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -1004,9 +765,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates the interrupt management of timer 1 */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 2 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -1016,9 +775,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates Timer 2 interrupt management: */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 3 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -1028,9 +785,7 @@ static int i_APCI1500_Initialisation(struct comedi_device *dev,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates interrupt management of timer 3: */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes all interrupts */
@@ -1051,37 +806,15 @@ static int apci1500_di_insn_bits(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ConfigDigitalOutputErrorInterrupt
-| (struct comedi_device *dev,struct comedi_subdevice *s struct comedi_insn
-| *insn,unsigned int *data) |
-| |
-+----------------------------------------------------------------------------+
-| Task : Configures the digital output memory and the digital
-| output error interrupt |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| data[0] :1:Memory on |
-| 0:Memory off |
-| data[1] :1 Enable the voltage error interrupt
-| :0 Disable the voltage error interrupt |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ConfigDigitalOutputErrorInterrupt(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures the digital output memory and the digital output error interrupt
+ *
+ * data[1] 1 = Enable the voltage error interrupt
+ * 2 = Disable the voltage error interrupt
+ */
+static int apci1500_do_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
@@ -1090,31 +823,15 @@ static int i_APCI1500_ConfigDigitalOutputErrorInterrupt(struct comedi_device *de
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_WriteDigitalOutput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Writes port value To the selected port |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int ui_NoOfChannels : No Of Channels To Write |
-| unsigned int *data : Data Pointer to read status |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_WriteDigitalOutput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Writes port value to the selected port
+ */
+static int apci1500_do_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
- static unsigned int ui_Temp = 0;
+ static unsigned int ui_Temp;
unsigned int ui_Temp1;
unsigned int ui_NoOfChannel = CR_CHAN(insn->chanspec); /* get the channel */
@@ -1165,7 +882,9 @@ static int i_APCI1500_WriteDigitalOutput(struct comedi_device *dev,
APCI1500_DIGITAL_OP);
} /* if(data[1]==1) */
else {
- printk("\nSpecified channel not supported\n");
+ dev_warn(dev->hw_dev,
+ "Specified channel not supported\n");
+ return -EINVAL;
} /* else if(data[1]==1) */
} /* elseif(data[1]==0) */
} /* if(data[3]==0) */
@@ -1242,12 +961,15 @@ static int i_APCI1500_WriteDigitalOutput(struct comedi_device *dev,
APCI1500_DIGITAL_OP);
} /* if(data[1]==1) */
else {
- printk("\nSpecified channel not supported\n");
+ dev_warn(dev->hw_dev,
+ "Specified channel not supported\n");
+ return -EINVAL;
} /* else if(data[1]==1) */
} /* elseif(data[1]==0) */
} /* if(data[3]==1); */
else {
- printk("\nSpecified functionality does not exist\n");
+ dev_warn(dev->hw_dev,
+ "Specified functionality does not exist\n");
return -EINVAL;
} /* if else data[3]==1) */
} /* if else data[3]==0) */
@@ -1256,57 +978,23 @@ static int i_APCI1500_WriteDigitalOutput(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ConfigCounterTimerWatchdog(comedi_device
-| *dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data)|
-| |
-+----------------------------------------------------------------------------+
-| Task : Configures The Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer to read status data[0] : 2 APCI1500_1_8_KHZ
-| 1 APCI1500_3_6_KHZ |
-| 0 APCI1500_115_KHZ
-| data[1] : 0 Counter1/Timer1
-| 1 Counter2/Timer2
-| 2 Counter3/Watchdog
-| data[2] : 0 Counter
-| 1 Timer/Watchdog
-| data[3] : This parameter has |
-| two meanings. |
-| - If the counter/timer |
-| is used as a counter |
-| the limit value of |
-| the counter is given |
-| |
-| - If the counter/timer |
-| is used as a timer, |
-| the divider factor |
-| for the output is |
-| given.
-| data[4] : 0 APCI1500_CONTINUOUS
-| 1 APCI1500_SINGLE
-| data[5] : 0 Software Trigger
-| 1 Hardware Trigger
-|
-| data[6] :0 Software gate
-| 1 Hardware gate
-| data[7] :0 Interrupt Disable
-| 1 Interrupt Enable
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures The Watchdog
+ *
+ * data[0] 0 = APCI1500_115_KHZ, 1 = APCI1500_3_6_KHZ, 2 = APCI1500_1_8_KHZ
+ * data[1] 0 = Counter1/Timer1, 1 = Counter2/Timer2, 2 = Counter3/Watchdog
+ * data[2] 0 = Counter, 1 = Timer/Watchdog
+ * data[3] This parameter has two meanings. If the counter/timer is used as
+ * a counter the limit value of the counter is given. If the counter/timer
+ * is used as a timer, the divider factor for the output is given.
+ * data[4] 0 = APCI1500_CONTINUOUS, 1 = APCI1500_SINGLE
+ * data[5] 0 = Software Trigger, 1 = Hardware Trigger
+ * data[6] 0 = Software gate, 1 = Hardware gate
+ * data[7] 0 = Interrupt Disable, 1 = Interrupt Enable
+ */
+static int apci1500_timer_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_TimerCounterMode, i_MasterConfiguration;
@@ -1319,7 +1007,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
} /* if(data[0]==0||data[0]==1||data[0]==2) */
else {
if (data[0] != 3) {
- printk("\nThe option for input clock selection does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The option for input clock selection does not exist\n");
return -EINVAL;
} /* if(data[0]!=3) */
} /* elseif(data[0]==0||data[0]==1||data[0]==2) */
@@ -1335,7 +1024,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[2] = APCI1500_TIMER;
break;
default:
- printk("\nThis choice is not a timer nor a counter\n");
+ dev_warn(dev->hw_dev,
+ "This choice is not a timer nor a counter\n");
return -EINVAL;
} /* switch(data[2]) */
@@ -1348,139 +1038,110 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[4] = APCI1500_SINGLE;
break;
default:
- printk("\nThis option for single/continuous mode does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This option for single/continuous mode does not exist\n");
return -EINVAL;
} /* switch(data[4]) */
i_TimerCounterMode = data[2] | data[4] | 7;
- /*************************/
/* Test the reload value */
- /*************************/
if ((data[3] >= 0) && (data[3] <= 65535)) {
if (data[7] == APCI1500_ENABLE
|| data[7] == APCI1500_DISABLE) {
- /************************************************/
/* Selects the mode register of timer/counter 1 */
- /************************************************/
outb(APCI1500_RW_CPT_TMR1_MODE_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************/
/* Writes the new mode */
- /***********************/
outb(i_TimerCounterMode,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of timer/counter 1 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR1_TIME_CST_LOW,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************/
/* Writes the low value */
- /*************************/
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of timer/counter 1 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR1_TIME_CST_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**************************/
/* Writes the high value */
- /**************************/
data[3] = data[3] >> 8;
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************/
/* Reads the register */
- /**********************/
i_MasterConfiguration =
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************************************/
/* Enables timer/counter 1 and triggers timer/counter 1 */
- /********************************************************/
i_MasterConfiguration =
i_MasterConfiguration | 0x40;
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************/
/* Writes the new configuration */
- /********************************/
outb(i_MasterConfiguration,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************/
/* Selects the commands register of */
/* timer/counter 1 */
- /****************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************/
/* Disable timer/counter 1 */
- /***************************/
outb(0x0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************/
/* Selects the commands register of */
/* timer/counter 1 */
- /****************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************/
/* Trigger timer/counter 1 */
- /***************************/
outb(0x2,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
} /* if(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
else {
- printk("\nError in selection of interrupt enable or disable\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of interrupt enable or disable\n");
return -EINVAL;
} /* elseif(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
} /* if ((data[3]>= 0) && (data[3] <= 65535)) */
else {
- printk("\nError in selection of reload value\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of reload value\n");
return -EINVAL;
} /* else if ((data[3]>= 0) && (data[3] <= 65535)) */
i_TimerCounterWatchdogInterrupt = data[7];
@@ -1496,7 +1157,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[2] = APCI1500_TIMER;
break;
default:
- printk("\nThis choice is not a timer nor a counter\n");
+ dev_warn(dev->hw_dev,
+ "This choice is not a timer nor a counter\n");
return -EINVAL;
} /* switch(data[2]) */
@@ -1509,7 +1171,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[4] = APCI1500_SINGLE;
break;
default:
- printk("\nThis option for single/continuous mode does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This option for single/continuous mode does not exist\n");
return -EINVAL;
} /* switch(data[4]) */
@@ -1522,7 +1185,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[5] = APCI1500_HARDWARE_TRIGGER;
break;
default:
- printk("\nThis choice for software or hardware trigger does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This choice for software or hardware trigger does not exist\n");
return -EINVAL;
} /* switch(data[5]) */
@@ -1535,140 +1199,111 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[6] = APCI1500_HARDWARE_GATE;
break;
default:
- printk("\nThis choice for software or hardware gate does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This choice for software or hardware gate does not exist\n");
return -EINVAL;
} /* switch(data[6]) */
i_TimerCounterMode = data[2] | data[4] | data[5] | data[6] | 7;
- /*************************/
/* Test the reload value */
- /*************************/
if ((data[3] >= 0) && (data[3] <= 65535)) {
if (data[7] == APCI1500_ENABLE
|| data[7] == APCI1500_DISABLE) {
- /************************************************/
/* Selects the mode register of timer/counter 2 */
- /************************************************/
outb(APCI1500_RW_CPT_TMR2_MODE_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************/
/* Writes the new mode */
- /***********************/
outb(i_TimerCounterMode,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of timer/counter 2 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR2_TIME_CST_LOW,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************/
/* Writes the low value */
- /*************************/
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of timer/counter 2 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR2_TIME_CST_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**************************/
/* Writes the high value */
- /**************************/
data[3] = data[3] >> 8;
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************/
/* Reads the register */
- /**********************/
i_MasterConfiguration =
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************************************/
/* Enables timer/counter 2 and triggers timer/counter 2 */
- /********************************************************/
i_MasterConfiguration =
i_MasterConfiguration | 0x20;
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************/
/* Writes the new configuration */
- /********************************/
outb(i_MasterConfiguration,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************/
/* Selects the commands register of */
/* timer/counter 2 */
- /****************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************/
/* Disable timer/counter 2 */
- /***************************/
outb(0x0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************/
/* Selects the commands register of */
/* timer/counter 2 */
- /****************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************/
/* Trigger timer/counter 1 */
- /***************************/
outb(0x2,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
} /* if(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
else {
- printk("\nError in selection of interrupt enable or disable\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of interrupt enable or disable\n");
return -EINVAL;
} /* elseif(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
} /* if ((data[3]>= 0) && (data[3] <= 65535)) */
else {
- printk("\nError in selection of reload value\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of reload value\n");
return -EINVAL;
} /* else if ((data[3]>= 0) && (data[3] <= 65535)) */
i_TimerCounterWatchdogInterrupt = data[7];
@@ -1684,7 +1319,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[2] = APCI1500_WATCHDOG;
break;
default:
- printk("\nThis choice is not a watchdog nor a counter\n");
+ dev_warn(dev->hw_dev,
+ "This choice is not a watchdog nor a counter\n");
return -EINVAL;
} /* switch(data[2]) */
@@ -1697,7 +1333,8 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[4] = APCI1500_SINGLE;
break;
default:
- printk("\nThis option for single/continuous mode does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This option for single/continuous mode does not exist\n");
return -EINVAL;
} /* switch(data[4]) */
@@ -1710,146 +1347,109 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
data[6] = APCI1500_HARDWARE_GATE;
break;
default:
- printk("\nThis choice for software or hardware gate does not exist\n");
+ dev_warn(dev->hw_dev,
+ "This choice for software or hardware gate does not exist\n");
return -EINVAL;
} /* switch(data[6]) */
- /*****************************/
/* Test if used for watchdog */
- /*****************************/
if (data[2] == APCI1500_WATCHDOG) {
- /*****************************/
/* - Enables the output line */
/* - Enables retrigger */
/* - Pulses output */
- /*****************************/
i_TimerCounterMode = data[2] | data[4] | 0x54;
} /* if (data[2] == APCI1500_WATCHDOG) */
else {
i_TimerCounterMode = data[2] | data[4] | data[6] | 7;
} /* elseif (data[2] == APCI1500_WATCHDOG) */
- /*************************/
/* Test the reload value */
- /*************************/
if ((data[3] >= 0) && (data[3] <= 65535)) {
if (data[7] == APCI1500_ENABLE
|| data[7] == APCI1500_DISABLE) {
- /************************************************/
/* Selects the mode register of watchdog/counter 3 */
- /************************************************/
outb(APCI1500_RW_CPT_TMR3_MODE_SPECIFICATION,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************/
/* Writes the new mode */
- /***********************/
outb(i_TimerCounterMode,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of watchdog/counter 3 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR3_TIME_CST_LOW,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************/
/* Writes the low value */
- /*************************/
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /****************************************************/
/* Selects the constant register of watchdog/counter 3 */
- /****************************************************/
outb(APCI1500_RW_CPT_TMR3_TIME_CST_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**************************/
/* Writes the high value */
- /**************************/
data[3] = data[3] >> 8;
outb(data[3],
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /**********************/
/* Reads the register */
- /**********************/
i_MasterConfiguration =
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************************************/
/* Enables watchdog/counter 3 and triggers watchdog/counter 3 */
- /********************************************************/
i_MasterConfiguration =
i_MasterConfiguration | 0x10;
- /*********************************************/
/* Selects the master configuration register */
- /*********************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************************/
/* Writes the new configuration */
- /********************************/
outb(i_MasterConfiguration,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /********************/
/* Test if COUNTER */
- /********************/
if (data[2] == APCI1500_COUNTER) {
- /*************************************/
/* Selects the command register of */
/* watchdog/counter 3 */
- /*************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************************/
/* Disable the watchdog/counter 3 and starts it */
- /*************************************************/
outb(0x0,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************/
/* Selects the command register of */
/* watchdog/counter 3 */
- /*************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************************/
/* Trigger the watchdog/counter 3 and starts it */
- /*************************************************/
outb(0x2,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -1858,12 +1458,14 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
} /* if(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
else {
- printk("\nError in selection of interrupt enable or disable\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of interrupt enable or disable\n");
return -EINVAL;
} /* elseif(data[7]== APCI1500_ENABLE ||data[7]== APCI1500_DISABLE) */
} /* if ((data[3]>= 0) && (data[3] <= 65535)) */
else {
- printk("\nError in selection of reload value\n");
+ dev_warn(dev->hw_dev,
+ "Error in selection of reload value\n");
return -EINVAL;
} /* else if ((data[3]>= 0) && (data[3] <= 65535)) */
i_TimerCounterWatchdogInterrupt = data[7];
@@ -1871,44 +1473,25 @@ static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
break;
default:
- printk("\nThe specified counter\timer option does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified counter/timer option does not exist\n");
+ return -EINVAL;
} /* switch(data[1]) */
i_CounterLogic = data[2];
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_StartStopTriggerTimerCounterWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s,
-| struct comedi_insn *insn,unsigned int *data); |
-+----------------------------------------------------------------------------+
-| Task : Start / Stop or trigger the timer counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer to read status |
-| data[0] : 0 Counter1/Timer1
-| 1 Counter2/Timer2
-| 2 Counter3/Watchdog
-| data[1] : 0 start
-| 1 stop
-| 2 Trigger
-| data[2] : 0 Counter
-| 1 Timer/Watchdog
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Start / Stop or trigger the timer counter or Watchdog
+ *
+ * data[0] 0 = Counter1/Timer1, 1 = Counter2/Timer2, 2 = Counter3/Watchdog
+ * data[1] 0 = Start, 1 = Stop, 2 = Trigger
+ * data[2] 0 = Counter, 1 = Timer/Watchdog
+ */
+static int apci1500_timer_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_CommandAndStatusValue;
@@ -1924,13 +1507,9 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
else {
i_CommandAndStatusValue = 0xE4; /* disable the interrupt */
} /* elseif(i_TimerCounterWatchdogInterrupt==1) */
- /**************************/
/* Starts timer/counter 1 */
- /**************************/
i_TimerCounter1Enabled = 1;
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -1939,20 +1518,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter1Init==1) */
else {
- printk("\nCounter/Timer1 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Counter/Timer1 not configured\n");
return -EINVAL;
}
break;
case STOP:
- /**************************/
/* Stop timer/counter 1 */
- /**************************/
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -1965,23 +1541,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
case TRIGGER:
if (i_TimerCounter1Init == 1) {
if (i_TimerCounter1Enabled == 1) {
- /************************/
/* Set Trigger and gate */
- /************************/
i_CommandAndStatusValue = 0x6;
} /* if( i_TimerCounter1Enabled==1) */
else {
- /***************/
/* Set Trigger */
- /***************/
i_CommandAndStatusValue = 0x2;
} /* elseif(i_TimerCounter1Enabled==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -1990,13 +1560,15 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter1Init==1) */
else {
- printk("\nCounter/Timer1 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Counter/Timer1 not configured\n");
return -EINVAL;
}
break;
default:
- printk("\nThe specified option for start/stop/trigger does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified option for start/stop/trigger does not exist\n");
return -EINVAL;
} /* switch(data[1]) */
break;
@@ -2011,13 +1583,9 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
else {
i_CommandAndStatusValue = 0xE4; /* disable the interrupt */
} /* elseif(i_TimerCounterWatchdogInterrupt==1) */
- /**************************/
/* Starts timer/counter 2 */
- /**************************/
i_TimerCounter2Enabled = 1;
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2026,20 +1594,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter2Init==1) */
else {
- printk("\nCounter/Timer2 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Counter/Timer2 not configured\n");
return -EINVAL;
}
break;
case STOP:
- /**************************/
/* Stop timer/counter 2 */
- /**************************/
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2051,23 +1616,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
case TRIGGER:
if (i_TimerCounter2Init == 1) {
if (i_TimerCounter2Enabled == 1) {
- /************************/
/* Set Trigger and gate */
- /************************/
i_CommandAndStatusValue = 0x6;
} /* if( i_TimerCounter2Enabled==1) */
else {
- /***************/
/* Set Trigger */
- /***************/
i_CommandAndStatusValue = 0x2;
} /* elseif(i_TimerCounter2Enabled==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2076,12 +1635,14 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter2Init==1) */
else {
- printk("\nCounter/Timer2 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Counter/Timer2 not configured\n");
return -EINVAL;
}
break;
default:
- printk("\nThe specified option for start/stop/trigger does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified option for start/stop/trigger does not exist\n");
return -EINVAL;
} /* switch(data[1]) */
break;
@@ -2096,13 +1657,9 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
else {
i_CommandAndStatusValue = 0xE4; /* disable the interrupt */
} /* elseif(i_TimerCounterWatchdogInterrupt==1) */
- /**************************/
/* Starts Watchdog/counter 3 */
- /**************************/
i_WatchdogCounter3Enabled = 1;
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2112,20 +1669,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
} /* if( i_WatchdogCounter3init==1) */
else {
- printk("\nWatchdog/Counter3 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Watchdog/Counter3 not configured\n");
return -EINVAL;
}
break;
case STOP:
- /**************************/
/* Stop Watchdog/counter 3 */
- /**************************/
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2140,23 +1694,17 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
case 0: /* triggering counter 3 */
if (i_WatchdogCounter3Init == 1) {
if (i_WatchdogCounter3Enabled == 1) {
- /************************/
/* Set Trigger and gate */
- /************************/
i_CommandAndStatusValue = 0x6;
} /* if( i_WatchdogCounter3Enabled==1) */
else {
- /***************/
/* Set Trigger */
- /***************/
i_CommandAndStatusValue = 0x2;
} /* elseif(i_WatchdogCounter3Enabled==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2165,7 +1713,8 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_WatchdogCounter3Init==1) */
else {
- printk("\nCounter3 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Counter3 not configured\n");
return -EINVAL;
}
break;
@@ -2173,9 +1722,7 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
/* triggering Watchdog 3 */
if (i_WatchdogCounter3Init == 1) {
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2184,55 +1731,40 @@ static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_WatchdogCounter3Init==1) */
else {
- printk("\nWatchdog 3 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Watchdog 3 not configured\n");
return -EINVAL;
}
break;
default:
- printk("\nWrong choice of watchdog/counter3\n");
+ dev_warn(dev->hw_dev,
+ "Wrong choice of watchdog/counter3\n");
return -EINVAL;
} /* switch(data[2]) */
break;
default:
- printk("\nThe specified option for start/stop/trigger does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified option for start/stop/trigger does not exist\n");
return -EINVAL;
} /* switch(data[1]) */
break;
default:
- printk("\nThe specified choice for counter/watchdog/timer does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The specified choice for counter/watchdog/timer does not exist\n");
return -EINVAL;
} /* switch(data[0]) */
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ReadCounterTimerWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,
-| unsigned int *data); |
-+----------------------------------------------------------------------------+
-| Task : Read The Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer to read status |
-| data[0] : 0 Counter1/Timer1
-| 1 Counter2/Timer2
-| 2 Counter3/Watchdog
-|
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Read The Watchdog
+ *
+ * data[0] 0 = Counter1/Timer1, 1 = Counter2/Timer2, 2 = Counter3/Watchdog
+ */
+static int apci1500_timer_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
int i_CommandAndStatusValue;
@@ -2242,23 +1774,17 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
/* Read counter/timer1 */
if (i_TimerCounter1Init == 1) {
if (i_TimerCounter1Enabled == 1) {
- /************************/
/* Set RCC and gate */
- /************************/
i_CommandAndStatusValue = 0xC;
} /* if( i_TimerCounter1Init==1) */
else {
- /***************/
/* Set RCC */
- /***************/
i_CommandAndStatusValue = 0x8;
} /* elseif(i_TimerCounter1Init==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2266,9 +1792,7 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************/
/* Selects the counter register (high) */
- /***************************************/
outb(APCI1500_R_CPT_TMR1_VALUE_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2285,7 +1809,8 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter1Init==1) */
else {
- printk("\nTimer/Counter1 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Timer/Counter1 not configured\n");
return -EINVAL;
} /* elseif( i_TimerCounter1Init==1) */
break;
@@ -2293,23 +1818,17 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
/* Read counter/timer2 */
if (i_TimerCounter2Init == 1) {
if (i_TimerCounter2Enabled == 1) {
- /************************/
/* Set RCC and gate */
- /************************/
i_CommandAndStatusValue = 0xC;
} /* if( i_TimerCounter2Init==1) */
else {
- /***************/
/* Set RCC */
- /***************/
i_CommandAndStatusValue = 0x8;
} /* elseif(i_TimerCounter2Init==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2317,9 +1836,7 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************/
/* Selects the counter register (high) */
- /***************************************/
outb(APCI1500_R_CPT_TMR2_VALUE_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2336,7 +1853,8 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_TimerCounter2Init==1) */
else {
- printk("\nTimer/Counter2 not configured\n");
+ dev_warn(dev->hw_dev,
+ "Timer/Counter2 not configured\n");
return -EINVAL;
} /* elseif( i_TimerCounter2Init==1) */
break;
@@ -2344,23 +1862,17 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
/* Read counter/watchdog2 */
if (i_WatchdogCounter3Init == 1) {
if (i_WatchdogCounter3Enabled == 1) {
- /************************/
/* Set RCC and gate */
- /************************/
i_CommandAndStatusValue = 0xC;
} /* if( i_TimerCounter2Init==1) */
else {
- /***************/
/* Set RCC */
- /***************/
i_CommandAndStatusValue = 0x8;
} /* elseif(i_WatchdogCounter3Init==1) */
- /********************************************/
/* Selects the commands and status register */
- /********************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2368,9 +1880,7 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************/
/* Selects the counter register (high) */
- /***************************************/
outb(APCI1500_R_CPT_TMR3_VALUE_HIGH,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2387,12 +1897,14 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
APCI1500_Z8536_CONTROL_REGISTER);
} /* if( i_WatchdogCounter3Init==1) */
else {
- printk("\nWatchdogCounter3 not configured\n");
+ dev_warn(dev->hw_dev,
+ "WatchdogCounter3 not configured\n");
return -EINVAL;
} /* elseif( i_WatchdogCounter3Init==1) */
break;
default:
- printk("\nThe choice of timer/counter/watchdog does not exist\n");
+ dev_warn(dev->hw_dev,
+ "The choice of timer/counter/watchdog does not exist\n");
return -EINVAL;
} /* switch(data[0]) */
@@ -2400,31 +1912,15 @@ static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ReadInterruptMask |
-| (struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,
-| unsigned int *data); |
-+----------------------------------------------------------------------------+
-| Task : Read the interrupt mask |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer to read status |
-
-
-+----------------------------------------------------------------------------+
-| Output Parameters : -- data[0]:The interrupt mask value data[1]:Channel no
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ReadInterruptMask(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Read the interrupt mask
+ *
+ * data[0] The interrupt mask value
+ * data[1] Channel Number
+ */
+static int apci1500_timer_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
data[0] = i_InterruptMask;
data[1] = i_InputChannel;
@@ -2433,31 +1929,12 @@ static int i_APCI1500_ReadInterruptMask(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_ConfigureInterrupt |
-| (struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,
-| unsigned int *data); |
-+----------------------------------------------------------------------------+
-| Task : Configures the interrupt registers |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| struct comedi_subdevice *s, :pointer to subdevice structure
-| struct comedi_insn *insn :pointer to insn structure |
-| unsigned int *data : Data Pointer |
-|
-
-+----------------------------------------------------------------------------+
-| Output Parameters : --
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_ConfigureInterrupt(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures the interrupt registers
+ */
+static int apci1500_do_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ui_Status;
@@ -2474,140 +1951,101 @@ static int i_APCI1500_ConfigureInterrupt(struct comedi_device *dev,
i_Constant = 0x00;
} /* if{data[0]==0) */
else {
- printk("\nThe parameter passed to driver is in error for enabling the voltage interrupt\n");
+ dev_warn(dev->hw_dev,
+ "The parameter passed to driver is in error for enabling the voltage interrupt\n");
return -EINVAL;
} /* else if(data[0]==0) */
} /* elseif(data[0]==1) */
- /*****************************************************/
/* Selects the mode specification register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*********************************************/
/* Writes the new configuration (APCI1500_OR) */
- /*********************************************/
i_RegValue = (i_RegValue & 0xF9) | APCI1500_OR;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************/
/* Authorises the interrupt on the board */
- /*****************************************/
outb(0xC0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the pattern polarity register of port B */
- /***************************************************/
outb(APCI1500_RW_PORT_B_PATTERN_POLARITY,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(i_Constant, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the pattern transition register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_PATTERN_TRANSITION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(i_Constant, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************************/
/* Selects the pattern mask register of port B */
- /***********************************************/
outb(APCI1500_RW_PORT_B_PATTERN_MASK,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(i_Constant, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of port A */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of port B */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of timer 1 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 1 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of timer 2 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 2 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of timer 3 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 3 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Authorizes the main interrupt on the board */
- /**********************************************/
outb(0xD0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /***************************/
/* Enables the PCI interrupt */
- /*****************************/
outl(0x3000, devpriv->i_IobaseAmcc + 0x38);
ui_Status = inl(devpriv->i_IobaseAmcc + 0x10);
ui_Status = inl(devpriv->i_IobaseAmcc + 0x38);
@@ -2616,24 +2054,7 @@ static int i_APCI1500_ConfigureInterrupt(struct comedi_device *dev,
return insn->n;
}
-/*
-+----------------------------------------------------------------------------+
-| Function Name : static void v_APCI1500_Interrupt |
-| (int irq , void *d) |
-+----------------------------------------------------------------------------+
-| Task : Interrupt handler |
-+----------------------------------------------------------------------------+
-| Input Parameters : int irq : irq number |
-| void *d : void pointer |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static void v_APCI1500_Interrupt(int irq, void *d)
+static void apci1500_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
@@ -2642,44 +2063,28 @@ static void v_APCI1500_Interrupt(int irq, void *d)
int i_RegValue = 0;
i_InterruptMask = 0;
- /***********************************/
/* Read the board interrupt status */
- /***********************************/
ui_InterruptStatus = inl(devpriv->i_IobaseAmcc + 0x38);
- /***************************************/
/* Test if board generated a interrupt */
- /***************************************/
if ((ui_InterruptStatus & 0x800000) == 0x800000) {
- /************************/
/* Disable all Interrupt */
- /************************/
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
/* outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,devpriv->iobase+APCI1500_Z8536_CONTROL_REGISTER); */
- /**********************************************/
/* Disables the main interrupt on the board */
- /**********************************************/
/* outb(0x00,devpriv->iobase+APCI1500_Z8536_CONTROL_REGISTER); */
- /*****************************************************/
/* Selects the command and status register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if ((i_RegValue & 0x60) == 0x60) {
- /*****************************************************/
/* Selects the command and status register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of port A */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue,
devpriv->iobase +
@@ -2693,9 +2098,7 @@ static void v_APCI1500_Interrupt(int irq, void *d)
inb(devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***************************************************/
/* Selects the interrupt vector register of port A */
- /***************************************************/
outb(APCI1500_RW_PORT_A_INTERRUPT_CONTROL,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
@@ -2711,45 +2114,32 @@ static void v_APCI1500_Interrupt(int irq, void *d)
} /* elseif(i_Logic==APCI1500_OR_PRIORITY) */
} /* if ((i_RegValue & 0x60) == 0x60) */
- /*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if ((i_RegValue & 0x60) == 0x60) {
- /*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of port B */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- printk("\n\n\n");
- /****************/
/* Reads port B */
- /****************/
i_RegValue =
inb((unsigned int) devpriv->iobase +
APCI1500_Z8536_PORT_B);
i_RegValue = i_RegValue & 0xC0;
- /**************************************/
/* Tests if this is an external error */
- /**************************************/
if (i_RegValue) {
/* Disable the interrupt */
- /*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outl(0x0, devpriv->i_IobaseAmcc + 0x38);
if (i_RegValue & 0x80) {
@@ -2767,46 +2157,34 @@ static void v_APCI1500_Interrupt(int irq, void *d)
} /* if (i_RegValue) */
} /* if ((i_RegValue & 0x60) == 0x60) */
- /*****************************************************/
/* Selects the command and status register of timer 1 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if ((i_RegValue & 0x60) == 0x60) {
- /*****************************************************/
/* Selects the command and status register of timer 1 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 1 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
i_InterruptMask = i_InterruptMask | 4;
} /* if ((i_RegValue & 0x60) == 0x60) */
- /*****************************************************/
/* Selects the command and status register of timer 2 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if ((i_RegValue & 0x60) == 0x60) {
- /*****************************************************/
/* Selects the command and status register of timer 2 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 2 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue,
devpriv->iobase +
@@ -2814,23 +2192,17 @@ static void v_APCI1500_Interrupt(int irq, void *d)
i_InterruptMask = i_InterruptMask | 8;
} /* if ((i_RegValue & 0x60) == 0x60) */
- /*****************************************************/
/* Selects the command and status register of timer 3 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_RegValue =
inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
if ((i_RegValue & 0x60) == 0x60) {
- /*****************************************************/
/* Selects the command and status register of timer 3 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase +
APCI1500_Z8536_CONTROL_REGISTER);
- /***********************************/
/* Deletes the interrupt of timer 3 */
- /***********************************/
i_RegValue = (i_RegValue & 0x0F) | 0x20;
outb(i_RegValue,
devpriv->iobase +
@@ -2844,42 +2216,23 @@ static void v_APCI1500_Interrupt(int irq, void *d)
} /* if ((i_RegValue & 0x60) == 0x60) */
send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- /***********************/
/* Enable all Interrupts */
- /***********************/
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /**********************************************/
/* Authorizes the main interrupt on the board */
- /**********************************************/
outb(0xD0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
} /* if ((ui_InterruptStatus & 0x800000) == 0x800000) */
else {
- printk("\nInterrupt from unknown source\n");
+ dev_warn(dev->hw_dev,
+ "Interrupt from unknown source\n");
} /* else if ((ui_InterruptStatus & 0x800000) == 0x800000) */
return;
}
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1500_Reset(struct comedi_device *dev) | |
-+----------------------------------------------------------------------------+
-| Task :resets all the registers |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1500_Reset(struct comedi_device *dev)
+static int apci1500_reset(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
int i_DummyRead = 0;
@@ -2898,9 +2251,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
i_TimerCounter2Enabled = 0;
i_WatchdogCounter3Enabled = 0;
- /******************/
/* Software reset */
- /******************/
i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
i_DummyRead = inb(devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -2908,16 +2259,12 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
outb(1, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the master configuration control register */
- /*****************************************************/
outb(APCI1500_RW_MASTER_CONFIGURATION_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0xF4, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the mode specification register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -2949,9 +2296,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
/* Deletes the register */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the mode specification register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_SPECIFICATION,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
outb(0x10, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
@@ -2981,9 +2326,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
/* Deletes the register */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the data path polarity register of port C */
- /*****************************************************/
outb(APCI1500_RW_PORT_C_DATA_PCITCH_POLARITY,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* High level of port C means 1 */
@@ -2998,9 +2341,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes it */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 1 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -3010,9 +2351,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates the interrupt management of timer 1 */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 2 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -3022,9 +2361,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates Timer 2 interrupt management: */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /******************************************************/
/* Selects the command and status register of timer 3 */
- /******************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes IP and IUS */
@@ -3034,71 +2371,43 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deactivates interrupt management of timer 3: */
outb(0xE0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* Deletes all interrupts */
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
/* reset all the digital outputs */
outw(0x0, devpriv->i_IobaseAddon + APCI1500_DIGITAL_OP);
-/*******************************/
/* Disable the board interrupt */
-/*******************************/
- /*************************************************/
/* Selects the master interrupt control register */
- /*************************************************/
outb(APCI1500_RW_MASTER_INTERRUPT_CONTROL,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
- /*****************************************************/
/* Selects the command and status register of port A */
- /*****************************************************/
outb(APCI1500_RW_PORT_A_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/*****************************************************/
/* Selects the command and status register of port B */
- /*****************************************************/
outb(APCI1500_RW_PORT_B_COMMAND_AND_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/*****************************************************/
/* Selects the command and status register of timer 1 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR1_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/*****************************************************/
/* Selects the command and status register of timer 2 */
- /*****************************************************/
outb(APCI1500_RW_CPT_TMR2_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/*****************************************************/
/* Selects the command and status register of timer 3*/
-/*****************************************************/
outb(APCI1500_RW_CPT_TMR3_CMD_STATUS,
devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
-/****************************/
/* Deactivates all interrupts */
-/******************************/
outb(0x00, devpriv->iobase + APCI1500_Z8536_CONTROL_REGISTER);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
index 84668544f52d..9c86b02eb6da 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
@@ -1,74 +1,32 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-1564 | Compiler : GCC |
- | Module name : hwdrv_apci1564.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-1564 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
-
-/********* Definitions for APCI-1564 card *****/
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ */
#define APCI1564_ADDRESS_RANGE 128
-/* DIGITAL INPUT-OUTPUT DEFINE */
-/* Input defines */
-#define APCI1564_DIGITAL_IP 0x04
-#define APCI1564_DIGITAL_IP_INTERRUPT_MODE1 4
-#define APCI1564_DIGITAL_IP_INTERRUPT_MODE2 8
-#define APCI1564_DIGITAL_IP_IRQ 16
-
-/* Output defines */
-#define APCI1564_DIGITAL_OP 0x18
-#define APCI1564_DIGITAL_OP_RW 0
-#define APCI1564_DIGITAL_OP_INTERRUPT 4
-#define APCI1564_DIGITAL_OP_IRQ 12
-
/* Digital Input IRQ Function Selection */
#define ADDIDATA_OR 0
#define ADDIDATA_AND 1
-/* Digital Input Interrupt Status */
-#define APCI1564_DIGITAL_IP_INTERRUPT_STATUS 12
-
-/* Digital Output Interrupt Status */
-#define APCI1564_DIGITAL_OP_INTERRUPT_STATUS 8
-
/* Digital Input Interrupt Enable Disable. */
#define APCI1564_DIGITAL_IP_INTERRUPT_ENABLE 0x4
#define APCI1564_DIGITAL_IP_INTERRUPT_DISABLE 0xfffffffb
@@ -80,98 +38,91 @@ This program is distributed in the hope that it will be useful, but WITHOUT ANY
#define APCI1564_DIGITAL_OP_CC_INTERRUPT_DISABLE 0xfffffffd
/* TIMER COUNTER WATCHDOG DEFINES */
-
#define ADDIDATA_TIMER 0
#define ADDIDATA_COUNTER 1
#define ADDIDATA_WATCHDOG 2
-#define APCI1564_DIGITAL_OP_WATCHDOG 0x28
-#define APCI1564_TIMER 0x48
-#define APCI1564_COUNTER1 0x0
-#define APCI1564_COUNTER2 0x20
-#define APCI1564_COUNTER3 0x40
-#define APCI1564_COUNTER4 0x60
-#define APCI1564_TCW_SYNC_ENABLEDISABLE 0
-#define APCI1564_TCW_RELOAD_VALUE 4
-#define APCI1564_TCW_TIMEBASE 8
-#define APCI1564_TCW_PROG 12
-#define APCI1564_TCW_TRIG_STATUS 16
-#define APCI1564_TCW_IRQ 20
-#define APCI1564_TCW_WARN_TIMEVAL 24
-#define APCI1564_TCW_WARN_TIMEBASE 28
+#define APCI1564_COUNTER1 0
+#define APCI1564_COUNTER2 1
+#define APCI1564_COUNTER3 2
+#define APCI1564_COUNTER4 3
+
+/*
+ * devpriv->i_IobaseAmcc Register Map
+ */
+#define APCI1564_DI_REG 0x04
+#define APCI1564_DI_INT_MODE1_REG 0x08
+#define APCI1564_DI_INT_MODE2_REG 0x0c
+#define APCI1564_DI_INT_STATUS_REG 0x10
+#define APCI1564_DI_IRQ_REG 0x14
+#define APCI1564_DO_REG 0x18
+#define APCI1564_DO_INT_CTRL_REG 0x1c
+#define APCI1564_DO_INT_STATUS_REG 0x20
+#define APCI1564_DO_IRQ_REG 0x24
+#define APCI1564_WDOG_REG 0x28
+#define APCI1564_WDOG_RELOAD_REG 0x2c
+#define APCI1564_WDOG_TIMEBASE_REG 0x30
+#define APCI1564_WDOG_CTRL_REG 0x34
+#define APCI1564_WDOG_STATUS_REG 0x38
+#define APCI1564_WDOG_IRQ_REG 0x3c
+#define APCI1564_WDOG_WARN_TIMEVAL_REG 0x40
+#define APCI1564_WDOG_WARN_TIMEBASE_REG 0x44
+#define APCI1564_TIMER_REG 0x48
+#define APCI1564_TIMER_RELOAD_REG 0x4c
+#define APCI1564_TIMER_TIMEBASE_REG 0x50
+#define APCI1564_TIMER_CTRL_REG 0x54
+#define APCI1564_TIMER_STATUS_REG 0x58
+#define APCI1564_TIMER_IRQ_REG 0x5c
+#define APCI1564_TIMER_WARN_TIMEVAL_REG 0x60
+#define APCI1564_TIMER_WARN_TIMEBASE_REG 0x64
+
+/*
+ * devpriv->iobase Register Map
+ */
+#define APCI1564_TCW_REG(x) (0x00 + ((x) * 0x20))
+#define APCI1564_TCW_RELOAD_REG(x) (0x04 + ((x) * 0x20))
+#define APCI1564_TCW_TIMEBASE_REG(x) (0x08 + ((x) * 0x20))
+#define APCI1564_TCW_CTRL_REG(x) (0x0c + ((x) * 0x20))
+#define APCI1564_TCW_STATUS_REG(x) (0x10 + ((x) * 0x20))
+#define APCI1564_TCW_IRQ_REG(x) (0x14 + ((x) * 0x20))
+#define APCI1564_TCW_WARN_TIMEVAL_REG(x) (0x18 + ((x) * 0x20))
+#define APCI1564_TCW_WARN_TIMEBASE_REG(x) (0x1c + ((x) * 0x20))
/* Global variables */
-static unsigned int ui_InterruptStatus_1564 = 0;
+static unsigned int ui_InterruptStatus_1564;
static unsigned int ui_InterruptData, ui_Type;
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_ConfigDigitalInput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures the digital input Subdevice |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 1 Enable Digital Input Interrupt |
-| 0 Disable Digital Input Interrupt |
-| data[1] : 0 ADDIDATA Interrupt OR LOGIC |
-| : 1 ADDIDATA Interrupt AND LOGIC |
-| data[2] : Interrupt mask for the mode 1 |
-| data[3] : Interrupt mask for the mode 2 |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1564_ConfigDigitalInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures the digital input Subdevice
+ *
+ * data[0] 1 = Enable interrupt, 0 = Disable interrupt
+ * data[1] 0 = ADDIDATA Interrupt OR LOGIC, 1 = ADDIDATA Interrupt AND LOGIC
+ * data[2] Interrupt mask for the mode 1
+ * data[3] Interrupt mask for the mode 2
+ */
+static int apci1564_di_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
devpriv->tsk_Current = current;
- /*******************************/
+
/* Set the digital input logic */
- /*******************************/
if (data[0] == ADDIDATA_ENABLE) {
data[2] = data[2] << 4;
data[3] = data[3] << 4;
- outl(data[2],
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_INTERRUPT_MODE1);
- outl(data[3],
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_INTERRUPT_MODE2);
- if (data[1] == ADDIDATA_OR) {
- outl(0x4,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
- } /* if (data[1] == ADDIDATA_OR) */
- else {
- outl(0x6,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
- } /* else if (data[1] == ADDIDATA_OR) */
- } /* if (data[0] == ADDIDATA_ENABLE) */
- else {
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_INTERRUPT_MODE1);
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_INTERRUPT_MODE2);
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
- } /* else if (data[0] == ADDIDATA_ENABLE) */
+ outl(data[2], devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE1_REG);
+ outl(data[3], devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE2_REG);
+ if (data[1] == ADDIDATA_OR)
+ outl(0x4, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ else
+ outl(0x6, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ } else {
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE1_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE2_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ }
return insn->n;
}
@@ -183,40 +134,21 @@ static int apci1564_di_insn_bits(struct comedi_device *dev,
{
struct addi_private *devpriv = dev->private;
- data[1] = inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP);
+ data[1] = inl(devpriv->i_IobaseAmcc + APCI1564_DI_REG);
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_ConfigDigitalOutput |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures The Digital Output Subdevice. |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[1] : 1 Enable VCC Interrupt |
-| 0 Disable VCC Interrupt |
-| data[2] : 1 Enable CC Interrupt |
-| 0 Disable CC Interrupt |
-| |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1564_ConfigDigitalOutput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures The Digital Output Subdevice.
+ *
+ * data[1] 0 = Disable VCC Interrupt, 1 = Enable VCC Interrupt
+ * data[2] 0 = Disable CC Interrupt, 1 = Enable CC Interrupt
+ */
+static int apci1564_do_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ul_Command = 0;
@@ -225,31 +157,25 @@ static int i_APCI1564_ConfigDigitalOutput(struct comedi_device *dev,
comedi_error(dev,
"Not a valid Data !!! ,Data should be 1 or 0\n");
return -EINVAL;
- } /* if ((data[0]!=0) && (data[0]!=1)) */
- if (data[0]) {
+ }
+
+ if (data[0])
devpriv->b_OutputMemoryStatus = ADDIDATA_ENABLE;
- } /* if (data[0]) */
- else {
+ else
devpriv->b_OutputMemoryStatus = ADDIDATA_DISABLE;
- } /* else if (data[0]) */
- if (data[1] == ADDIDATA_ENABLE) {
+
+ if (data[1] == ADDIDATA_ENABLE)
ul_Command = ul_Command | 0x1;
- } /* if (data[1] == ADDIDATA_ENABLE) */
- else {
+ else
ul_Command = ul_Command & 0xFFFFFFFE;
- } /* else if (data[1] == ADDIDATA_ENABLE) */
- if (data[2] == ADDIDATA_ENABLE) {
+
+ if (data[2] == ADDIDATA_ENABLE)
ul_Command = ul_Command | 0x2;
- } /* if (data[2] == ADDIDATA_ENABLE) */
- else {
+ else
ul_Command = ul_Command & 0xFFFFFFFD;
- } /* else if (data[2] == ADDIDATA_ENABLE) */
- outl(ul_Command,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_INTERRUPT);
- ui_InterruptData =
- inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_INTERRUPT);
+
+ outl(ul_Command, devpriv->i_IobaseAmcc + APCI1564_DO_INT_CTRL_REG);
+ ui_InterruptData = inl(devpriv->i_IobaseAmcc + APCI1564_DO_INT_CTRL_REG);
devpriv->tsk_Current = current;
return insn->n;
}
@@ -261,12 +187,10 @@ static int apci1564_do_insn_bits(struct comedi_device *dev,
{
struct addi_private *devpriv = dev->private;
- s->state = inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_RW);
+ s->state = inl(devpriv->i_IobaseAmcc + APCI1564_DO_REG);
if (comedi_dio_update_state(s, data))
- outl(s->state, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_RW);
+ outl(s->state, devpriv->i_IobaseAmcc + APCI1564_DO_REG);
data[1] = s->state;
@@ -274,39 +198,20 @@ static int apci1564_do_insn_bits(struct comedi_device *dev,
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_ConfigTimerCounterWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Configures The Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 0 Configure As Timer |
-| 1 Configure As Counter |
-| 2 Configure As Watchdog |
-| data[1] : 1 Enable Interrupt |
-| 0 Disable Interrupt |
-| data[2] : Time Unit |
-| data[3] : Reload Value |
-| data[4] : Timer Mode |
-| data[5] : Timer Counter Watchdog Number|
- data[6] : Counter Direction
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1564_ConfigTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Configures The Timer, Counter or Watchdog
+ *
+ * data[0] Configure as: 0 = Timer, 1 = Counter, 2 = Watchdog
+ * data[1] 1 = Enable Interrupt, 0 = Disable Interrupt
+ * data[2] Time Unit
+ * data[3] Reload Value
+ * data[4] Timer Mode
+ * data[5] Timer Counter Watchdog Number
+ * data[6] Counter Direction
+ */
+static int apci1564_timer_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
@@ -316,89 +221,59 @@ static int i_APCI1564_ConfigTimerCounterWatchdog(struct comedi_device *dev,
devpriv->b_TimerSelectMode = ADDIDATA_WATCHDOG;
/* Disable the watchdog */
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_PROG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_WDOG_CTRL_REG);
/* Loading the Reload value */
- outl(data[3],
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_RELOAD_VALUE);
- } /* if (data[0]==ADDIDATA_WATCHDOG) */
- else if (data[0] == ADDIDATA_TIMER) {
+ outl(data[3], devpriv->i_IobaseAmcc + APCI1564_WDOG_RELOAD_REG);
+ } else if (data[0] == ADDIDATA_TIMER) {
/* First Stop The Timer */
- ul_Command1 =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
+ ul_Command1 = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER + APCI1564_TCW_PROG); /* Stop The Timer */
+ /* Stop The Timer */
+ outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
devpriv->b_TimerSelectMode = ADDIDATA_TIMER;
if (data[1] == 1) {
- outl(0x02, devpriv->i_IobaseAmcc + APCI1564_TIMER + APCI1564_TCW_PROG); /* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_IRQ);
+ /* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
+ outl(0x02, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DO_IRQ_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_WDOG_IRQ_REG);
outl(0x0,
- devpriv->i_IobaseAmcc +
- APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_IRQ);
+ devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER1));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER1 +
- APCI1564_TCW_IRQ);
+ devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER2));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER2 +
- APCI1564_TCW_IRQ);
+ devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER3));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER3 +
- APCI1564_TCW_IRQ);
- outl(0x0,
- devpriv->iobase + APCI1564_COUNTER4 +
- APCI1564_TCW_IRQ);
- } /* if (data[1]==1) */
- else {
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER + APCI1564_TCW_PROG); /* disable Timer interrupt */
- } /* else if (data[1]==1) */
+ devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER4));
+ } else {
+ /* disable Timer interrupt */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ }
/* Loading Timebase */
-
- outl(data[2],
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_TIMEBASE);
+ outl(data[2], devpriv->i_IobaseAmcc + APCI1564_TIMER_TIMEBASE_REG);
/* Loading the Reload value */
- outl(data[3],
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_RELOAD_VALUE);
+ outl(data[3], devpriv->i_IobaseAmcc + APCI1564_TIMER_RELOAD_REG);
- ul_Command1 =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
- ul_Command1 =
- (ul_Command1 & 0xFFF719E2UL) | 2UL << 13UL | 0x10UL;
- outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER + APCI1564_TCW_PROG); /* mode 2 */
- } /* else if (data[0]==ADDIDATA_TIMER) */
- else if (data[0] == ADDIDATA_COUNTER) {
+ ul_Command1 = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ ul_Command1 = (ul_Command1 & 0xFFF719E2UL) | 2UL << 13UL | 0x10UL;
+ /* mode 2 */
+ outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ } else if (data[0] == ADDIDATA_COUNTER) {
devpriv->b_TimerSelectMode = ADDIDATA_COUNTER;
devpriv->b_ModeSelectRegister = data[5];
/* First Stop The Counter */
- ul_Command1 =
- inl(devpriv->iobase + ((data[5] - 1) * 0x20) +
- APCI1564_TCW_PROG);
+ ul_Command1 = inl(devpriv->iobase + APCI1564_TCW_CTRL_REG(data[5] - 1));
ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1, devpriv->iobase + ((data[5] - 1) * 0x20) + APCI1564_TCW_PROG); /* Stop The Timer */
+ /* Stop The Timer */
+ outl(ul_Command1, devpriv->iobase + APCI1564_TCW_CTRL_REG(data[5] - 1));
- /************************/
/* Set the reload value */
- /************************/
- outl(data[3],
- devpriv->iobase + ((data[5] - 1) * 0x20) +
- APCI1564_TCW_RELOAD_VALUE);
+ outl(data[3], devpriv->iobase + APCI1564_TCW_RELOAD_REG(data[5] - 1));
- /******************************/
/* Set the mode : */
/* - Disable the hardware */
/* - Disable the counter mode */
@@ -406,65 +281,36 @@ static int i_APCI1564_ConfigTimerCounterWatchdog(struct comedi_device *dev,
/* - Disable the reset */
/* - Disable the timer mode */
/* - Enable the counter mode */
- /******************************/
+
ul_Command1 =
(ul_Command1 & 0xFFFC19E2UL) | 0x80000UL |
(unsigned int) ((unsigned int) data[4] << 16UL);
- outl(ul_Command1,
- devpriv->iobase + ((data[5] - 1) * 0x20) +
- APCI1564_TCW_PROG);
+ outl(ul_Command1, devpriv->iobase + APCI1564_TCW_CTRL_REG(data[5] - 1));
/* Enable or Disable Interrupt */
ul_Command1 = (ul_Command1 & 0xFFFFF9FD) | (data[1] << 1);
- outl(ul_Command1,
- devpriv->iobase + ((data[5] - 1) * 0x20) +
- APCI1564_TCW_PROG);
+ outl(ul_Command1, devpriv->iobase + APCI1564_TCW_CTRL_REG(data[5] - 1));
- /*****************************/
/* Set the Up/Down selection */
- /*****************************/
ul_Command1 = (ul_Command1 & 0xFFFBF9FFUL) | (data[6] << 18);
- outl(ul_Command1,
- devpriv->iobase + ((data[5] - 1) * 0x20) +
- APCI1564_TCW_PROG);
- } /* else if (data[0]==ADDIDATA_COUNTER) */
- else {
- printk(" Invalid subdevice.");
- } /* else if (data[0]==ADDIDATA_WATCHDOG) */
+ outl(ul_Command1, devpriv->iobase + APCI1564_TCW_CTRL_REG(data[5] - 1));
+ } else {
+ dev_err(dev->class_dev, "Invalid subdevice.\n");
+ }
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_StartStopWriteTimerCounterWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Start / Stop The Selected Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-| data[0] : 0 Timer |
-| 1 Counter |
-| 2 Watchdog | | data[1] : 1 Start |
-| 0 Stop |
-| 2 Trigger |
-| Clear (Only Counter) |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1564_StartStopWriteTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Start / Stop The Selected Timer, Counter or Watchdog
+ *
+ * data[0] Configure as: 0 = Timer, 1 = Counter, 2 = Watchdog
+ * data[1] 0 = Stop, 1 = Start, 2 = Trigger Clear (Only Counter)
+ */
+static int apci1564_timer_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
@@ -472,203 +318,122 @@ static int i_APCI1564_StartStopWriteTimerCounterWatchdog(struct comedi_device *d
if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
switch (data[1]) {
case 0: /* stop the watchdog */
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP_WATCHDOG + APCI1564_TCW_PROG); /* disable the watchdog */
+ /* disable the watchdog */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_WDOG_CTRL_REG);
break;
case 1: /* start the watchdog */
- outl(0x0001,
- devpriv->i_IobaseAmcc +
- APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_PROG);
+ outl(0x0001, devpriv->i_IobaseAmcc + APCI1564_WDOG_CTRL_REG);
break;
case 2: /* Software trigger */
- outl(0x0201,
- devpriv->i_IobaseAmcc +
- APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_PROG);
+ outl(0x0201, devpriv->i_IobaseAmcc + APCI1564_WDOG_CTRL_REG);
break;
default:
- printk("\nSpecified functionality does not exist\n");
+ dev_err(dev->class_dev, "Specified functionality does not exist.\n");
return -EINVAL;
- } /* switch (data[1]) */
- } /* if (devpriv->b_TimerSelectMode==ADDIDATA_WATCHDOG) */
+ }
+ }
if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
if (data[1] == 1) {
- ul_Command1 =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
+ ul_Command1 = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
/* Enable the Timer */
- outl(ul_Command1,
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
- } /* if (data[1]==1) */
- else if (data[1] == 0) {
+ outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ } else if (data[1] == 0) {
/* Stop The Timer */
- ul_Command1 =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
+ ul_Command1 = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1,
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
- } /* else if(data[1]==0) */
- } /* if (devpriv->b_TimerSelectMode==ADDIDATA_TIMER) */
+ outl(ul_Command1, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ }
+ }
if (devpriv->b_TimerSelectMode == ADDIDATA_COUNTER) {
ul_Command1 =
- inl(devpriv->iobase + ((devpriv->b_ModeSelectRegister -
- 1) * 0x20) + APCI1564_TCW_PROG);
+ inl(devpriv->iobase +
+ APCI1564_TCW_CTRL_REG(devpriv->b_ModeSelectRegister - 1));
if (data[1] == 1) {
/* Start the Counter subdevice */
ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
- } /* if (data[1] == 1) */
- else if (data[1] == 0) {
+ } else if (data[1] == 0) {
/* Stops the Counter subdevice */
ul_Command1 = 0;
- } /* else if (data[1] == 0) */
- else if (data[1] == 2) {
+ } else if (data[1] == 2) {
/* Clears the Counter subdevice */
ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x400;
- } /* else if (data[1] == 3) */
+ }
outl(ul_Command1,
- devpriv->iobase + ((devpriv->b_ModeSelectRegister -
- 1) * 0x20) + APCI1564_TCW_PROG);
- } /* if (devpriv->b_TimerSelectMode==ADDIDATA_COUNTER) */
+ devpriv->iobase +
+ APCI1564_TCW_CTRL_REG(devpriv->b_ModeSelectRegister - 1));
+ }
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_ReadTimerCounterWatchdog |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read The Selected Timer , Counter or Watchdog |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev : Driver handle |
-| unsigned int *data : Data Pointer contains |
-| configuration parameters as below |
-| |
-
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1564_ReadTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Read The Selected Timer, Counter or Watchdog
+ */
+static int apci1564_timer_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
/* Stores the status of the Watchdog */
- data[0] =
- inl(devpriv->i_IobaseAmcc +
- APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_TRIG_STATUS) & 0x1;
- data[1] =
- inl(devpriv->i_IobaseAmcc +
- APCI1564_DIGITAL_OP_WATCHDOG);
- } /* if (devpriv->b_TimerSelectMode==ADDIDATA_WATCHDOG) */
- else if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
+ data[0] = inl(devpriv->i_IobaseAmcc + APCI1564_WDOG_STATUS_REG) & 0x1;
+ data[1] = inl(devpriv->i_IobaseAmcc + APCI1564_WDOG_REG);
+ } else if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
/* Stores the status of the Timer */
- data[0] =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_TRIG_STATUS) & 0x1;
+ data[0] = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_STATUS_REG) & 0x1;
/* Stores the Actual value of the Timer */
- data[1] = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER);
- } /* else if (devpriv->b_TimerSelectMode==ADDIDATA_TIMER) */
- else if (devpriv->b_TimerSelectMode == ADDIDATA_COUNTER) {
+ data[1] = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_REG);
+ } else if (devpriv->b_TimerSelectMode == ADDIDATA_COUNTER) {
/* Read the Counter Actual Value. */
data[0] =
- inl(devpriv->iobase + ((devpriv->b_ModeSelectRegister -
- 1) * 0x20) +
- APCI1564_TCW_SYNC_ENABLEDISABLE);
+ inl(devpriv->iobase +
+ APCI1564_TCW_REG(devpriv->b_ModeSelectRegister - 1));
ul_Command1 =
- inl(devpriv->iobase + ((devpriv->b_ModeSelectRegister -
- 1) * 0x20) + APCI1564_TCW_TRIG_STATUS);
+ inl(devpriv->iobase +
+ APCI1564_TCW_STATUS_REG(devpriv->b_ModeSelectRegister - 1));
- /***********************************/
/* Get the software trigger status */
- /***********************************/
data[1] = (unsigned char) ((ul_Command1 >> 1) & 1);
- /***********************************/
/* Get the hardware trigger status */
- /***********************************/
data[2] = (unsigned char) ((ul_Command1 >> 2) & 1);
- /*********************************/
/* Get the software clear status */
- /*********************************/
data[3] = (unsigned char) ((ul_Command1 >> 3) & 1);
- /***************************/
/* Get the overflow status */
- /***************************/
data[4] = (unsigned char) ((ul_Command1 >> 0) & 1);
- } /* else if (devpriv->b_TimerSelectMode==ADDIDATA_COUNTER) */
- else if ((devpriv->b_TimerSelectMode != ADDIDATA_TIMER)
+ } else if ((devpriv->b_TimerSelectMode != ADDIDATA_TIMER)
&& (devpriv->b_TimerSelectMode != ADDIDATA_WATCHDOG)
&& (devpriv->b_TimerSelectMode != ADDIDATA_COUNTER)) {
- printk("\n Invalid Subdevice !!!\n");
- } /* else if ((devpriv->b_TimerSelectMode!=ADDIDATA_TIMER) && (devpriv->b_TimerSelectMode!=ADDIDATA_WATCHDOG)&& (devpriv->b_TimerSelectMode!=ADDIDATA_COUNTER)) */
+ dev_err(dev->class_dev, "Invalid Subdevice!\n");
+ }
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_ReadInterruptStatus |
-| (struct comedi_device *dev,struct comedi_subdevice *s, |
-| struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task :Reads the interrupt status register |
-+----------------------------------------------------------------------------+
-| Input Parameters : |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : |
-| |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1564_ReadInterruptStatus(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ * Reads the interrupt status register
+ */
+static int apci1564_do_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
*data = ui_Type;
return insn->n;
}
/*
-+----------------------------------------------------------------------------+
-| Function Name : static void v_APCI1564_Interrupt |
-| (int irq , void *d) |
-+----------------------------------------------------------------------------+
-| Task : Interrupt handler for the interruptible digital inputs |
-+----------------------------------------------------------------------------+
-| Input Parameters : int irq : irq number |
-| void *d : void pointer |
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : TRUE : No error occur |
-| : FALSE : Error occur. Return the error |
-| |
-+----------------------------------------------------------------------------+
-*/
-static void v_APCI1564_Interrupt(int irq, void *d)
+ * Interrupt handler for the interruptible digital inputs
+ */
+static void apci1564_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
@@ -677,79 +442,63 @@ static void v_APCI1564_Interrupt(int irq, void *d)
unsigned int ui_C1, ui_C2, ui_C3, ui_C4;
unsigned int ul_Command2 = 0;
- ui_DI = inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ) & 0x01;
- ui_DO = inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_IRQ) & 0x01;
- ui_Timer =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_IRQ) & 0x01;
- ui_C1 = inl(devpriv->iobase + APCI1564_COUNTER1 +
- APCI1564_TCW_IRQ) & 0x1;
- ui_C2 = inl(devpriv->iobase + APCI1564_COUNTER2 +
- APCI1564_TCW_IRQ) & 0x1;
- ui_C3 = inl(devpriv->iobase + APCI1564_COUNTER3 +
- APCI1564_TCW_IRQ) & 0x1;
- ui_C4 = inl(devpriv->iobase + APCI1564_COUNTER4 +
- APCI1564_TCW_IRQ) & 0x1;
+ ui_DI = inl(devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG) & 0x01;
+ ui_DO = inl(devpriv->i_IobaseAmcc + APCI1564_DO_IRQ_REG) & 0x01;
+ ui_Timer = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_IRQ_REG) & 0x01;
+ ui_C1 =
+ inl(devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER1)) & 0x1;
+ ui_C2 =
+ inl(devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER2)) & 0x1;
+ ui_C3 =
+ inl(devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER3)) & 0x1;
+ ui_C4 =
+ inl(devpriv->iobase + APCI1564_TCW_IRQ_REG(APCI1564_COUNTER4)) & 0x1;
if (ui_DI == 0 && ui_DO == 0 && ui_Timer == 0 && ui_C1 == 0
&& ui_C2 == 0 && ui_C3 == 0 && ui_C4 == 0) {
- printk("\nInterrupt from unknown source\n");
- } /* if(ui_DI==0 && ui_DO==0 && ui_Timer==0 && ui_C1==0 && ui_C2==0 && ui_C3==0 && ui_C4==0) */
+ dev_err(dev->class_dev, "Interrupt from unknown source.\n");
+ }
if (ui_DI == 1) {
- ui_DI = inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_IRQ);
+ ui_DI = inl(devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
ui_InterruptStatus_1564 =
- inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP +
- APCI1564_DIGITAL_IP_INTERRUPT_STATUS);
+ inl(devpriv->i_IobaseAmcc + APCI1564_DI_INT_STATUS_REG);
ui_InterruptStatus_1564 = ui_InterruptStatus_1564 & 0X000FFFF0;
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- outl(ui_DI, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP + APCI1564_DIGITAL_IP_IRQ); /* enable the interrupt */
+ /* send signal to the sample */
+ send_sig(SIGIO, devpriv->tsk_Current, 0);
+ /* enable the interrupt */
+ outl(ui_DI, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
return;
}
if (ui_DO == 1) {
- /* Check for Digital Output interrupt Type - 1: Vcc interrupt 2: CC interrupt. */
- ui_Type =
- inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_INTERRUPT_STATUS) & 0x3;
+ /* Check for Digital Output interrupt Type */
+ /* 1: VCC interrupt */
+ /* 2: CC interrupt */
+ ui_Type = inl(devpriv->i_IobaseAmcc + APCI1564_DO_INT_STATUS_REG) & 0x3;
/* Disable the Interrupt */
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP +
- APCI1564_DIGITAL_OP_INTERRUPT);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DO_INT_CTRL_REG);
/* Sends signal to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
-
- } /* if (ui_DO) */
+ }
if (ui_Timer == 1) {
devpriv->b_TimerSelectMode = ADDIDATA_TIMER;
if (devpriv->b_TimerSelectMode) {
/* Disable Timer Interrupt */
- ul_Command2 =
- inl(devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
+ ul_Command2 = inl(devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
/* Enable Timer Interrupt */
- outl(ul_Command2,
- devpriv->i_IobaseAmcc + APCI1564_TIMER +
- APCI1564_TCW_PROG);
+ outl(ul_Command2, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
}
- }/* if (ui_Timer == 1) */
-
+ }
if (ui_C1 == 1) {
devpriv->b_TimerSelectMode = ADDIDATA_COUNTER;
@@ -757,21 +506,18 @@ static void v_APCI1564_Interrupt(int irq, void *d)
/* Disable Counter Interrupt */
ul_Command2 =
- inl(devpriv->iobase + APCI1564_COUNTER1 +
- APCI1564_TCW_PROG);
+ inl(devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER1));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER1 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER1));
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
/* Enable Counter Interrupt */
outl(ul_Command2,
- devpriv->iobase + APCI1564_COUNTER1 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER1));
}
- } /* if (ui_C1 == 1) */
+ }
if (ui_C2 == 1) {
devpriv->b_TimerSelectMode = ADDIDATA_COUNTER;
@@ -779,21 +525,18 @@ static void v_APCI1564_Interrupt(int irq, void *d)
/* Disable Counter Interrupt */
ul_Command2 =
- inl(devpriv->iobase + APCI1564_COUNTER2 +
- APCI1564_TCW_PROG);
+ inl(devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER2));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER2 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER2));
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
/* Enable Counter Interrupt */
outl(ul_Command2,
- devpriv->iobase + APCI1564_COUNTER2 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER2));
}
- } /* if ((ui_C2 == 1) */
+ }
if (ui_C3 == 1) {
devpriv->b_TimerSelectMode = ADDIDATA_COUNTER;
@@ -801,21 +544,18 @@ static void v_APCI1564_Interrupt(int irq, void *d)
/* Disable Counter Interrupt */
ul_Command2 =
- inl(devpriv->iobase + APCI1564_COUNTER3 +
- APCI1564_TCW_PROG);
+ inl(devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER3));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER3 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER3));
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
/* Enable Counter Interrupt */
outl(ul_Command2,
- devpriv->iobase + APCI1564_COUNTER3 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER3));
}
- } /* if ((ui_C3 == 1) */
+ }
if (ui_C4 == 1) {
devpriv->b_TimerSelectMode = ADDIDATA_COUNTER;
@@ -823,60 +563,45 @@ static void v_APCI1564_Interrupt(int irq, void *d)
/* Disable Counter Interrupt */
ul_Command2 =
- inl(devpriv->iobase + APCI1564_COUNTER4 +
- APCI1564_TCW_PROG);
+ inl(devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER4));
outl(0x0,
- devpriv->iobase + APCI1564_COUNTER4 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER4));
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
/* Enable Counter Interrupt */
outl(ul_Command2,
- devpriv->iobase + APCI1564_COUNTER4 +
- APCI1564_TCW_PROG);
+ devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER4));
}
- } /* if (ui_C4 == 1) */
+ }
return;
}
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1564_Reset(struct comedi_device *dev) | |
-+----------------------------------------------------------------------------+
-| Task :resets all the registers |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev
-+----------------------------------------------------------------------------+
-| Output Parameters : -- |
-+----------------------------------------------------------------------------+
-| Return Value : |
-| |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1564_Reset(struct comedi_device *dev)
+static int apci1564_reset(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP_IRQ); /* disable the interrupts */
- inl(devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP_INTERRUPT_STATUS); /* Reset the interrupt status register */
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP_INTERRUPT_MODE1); /* Disable the and/or interrupt */
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_IP_INTERRUPT_MODE2);
+ /* disable the interrupts */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_IRQ_REG);
+ /* Reset the interrupt status register */
+ inl(devpriv->i_IobaseAmcc + APCI1564_DI_INT_STATUS_REG);
+ /* Disable the and/or interrupt */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE1_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DI_INT_MODE2_REG);
devpriv->b_DigitalOutputRegister = 0;
ui_Type = 0;
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP); /* Resets the output channels */
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP_INTERRUPT); /* Disables the interrupt. */
- outl(0x0,
- devpriv->i_IobaseAmcc + APCI1564_DIGITAL_OP_WATCHDOG +
- APCI1564_TCW_RELOAD_VALUE);
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER);
- outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER + APCI1564_TCW_PROG);
-
- outl(0x0, devpriv->iobase + APCI1564_COUNTER1 + APCI1564_TCW_PROG);
- outl(0x0, devpriv->iobase + APCI1564_COUNTER2 + APCI1564_TCW_PROG);
- outl(0x0, devpriv->iobase + APCI1564_COUNTER3 + APCI1564_TCW_PROG);
- outl(0x0, devpriv->iobase + APCI1564_COUNTER4 + APCI1564_TCW_PROG);
+ /* Resets the output channels */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DO_REG);
+ /* Disables the interrupt. */
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_DO_INT_CTRL_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_WDOG_RELOAD_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER_REG);
+ outl(0x0, devpriv->i_IobaseAmcc + APCI1564_TIMER_CTRL_REG);
+
+ outl(0x0, devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER1));
+ outl(0x0, devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER2));
+ outl(0x0, devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER3));
+ outl(0x0, devpriv->iobase + APCI1564_TCW_CTRL_REG(APCI1564_COUNTER4));
return 0;
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
index bd05857b82f2..70e8f426285c 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
@@ -248,10 +248,10 @@ static const struct comedi_lrange range_apci3120_ao = {
+----------------------------------------------------------------------------+
*/
-static int i_APCI3120_InsnConfigAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3120_ai_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
const struct addi_board *this_board = comedi_board(dev);
struct addi_private *devpriv = dev->private;
@@ -304,11 +304,11 @@ static int i_APCI3120_InsnConfigAnalogInput(struct comedi_device *dev,
* If the last argument of function "check"is 1 then it only checks
* the channel list is ok or not.
*/
-static int i_APCI3120_SetupChannelList(struct comedi_device *dev,
- struct comedi_subdevice *s,
- int n_chan,
- unsigned int *chanlist,
- char check)
+static int apci3120_setup_chan_list(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ int n_chan,
+ unsigned int *chanlist,
+ char check)
{
struct addi_private *devpriv = dev->private;
unsigned int i; /* , differencial=0, bipolar=0; */
@@ -358,10 +358,10 @@ static int i_APCI3120_SetupChannelList(struct comedi_device *dev,
* as per configured if no conversion time is set uses default
* conversion time 10 microsec.
*/
-static int i_APCI3120_InsnReadAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3120_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
const struct addi_board *this_board = comedi_board(dev);
struct addi_private *devpriv = dev->private;
@@ -417,10 +417,7 @@ static int i_APCI3120_InsnReadAnalogInput(struct comedi_device *dev,
inw(devpriv->iobase + APCI3120_RESET_FIFO);
/* Initialize the sequence array */
-
- /* if (!i_APCI3120_SetupChannelList(dev,s,1,chanlist,0)) return -EINVAL; */
-
- if (!i_APCI3120_SetupChannelList(dev, s, 1,
+ if (!apci3120_setup_chan_list(dev, s, 1,
&insn->chanspec, 0))
return -EINVAL;
@@ -512,7 +509,7 @@ static int i_APCI3120_InsnReadAnalogInput(struct comedi_device *dev,
outw(devpriv->us_OutputRegister,
devpriv->iobase + APCI3120_WR_ADDRESS);
- if (!i_APCI3120_SetupChannelList(dev, s,
+ if (!apci3120_setup_chan_list(dev, s,
devpriv->ui_AiNbrofChannels,
devpriv->ui_AiChannelList, 0))
return -EINVAL;
@@ -606,7 +603,7 @@ static int i_APCI3120_InsnReadAnalogInput(struct comedi_device *dev,
}
-static int i_APCI3120_Reset(struct comedi_device *dev)
+static int apci3120_reset(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
unsigned int i;
@@ -663,7 +660,7 @@ static int i_APCI3120_Reset(struct comedi_device *dev)
return 0;
}
-static int i_APCI3120_ExttrigEnable(struct comedi_device *dev)
+static int apci3120_exttrig_enable(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
@@ -672,7 +669,7 @@ static int i_APCI3120_ExttrigEnable(struct comedi_device *dev)
return 0;
}
-static int i_APCI3120_ExttrigDisable(struct comedi_device *dev)
+static int apci3120_exttrig_disable(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
@@ -681,8 +678,8 @@ static int i_APCI3120_ExttrigDisable(struct comedi_device *dev)
return 0;
}
-static int i_APCI3120_StopCyclicAcquisition(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int apci3120_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct addi_private *devpriv = dev->private;
@@ -705,7 +702,7 @@ static int i_APCI3120_StopCyclicAcquisition(struct comedi_device *dev,
* devpriv->i_IobaseAmcc+AMCC_OP_REG_MCSR); stop DMA */
/* Disable ext trigger */
- i_APCI3120_ExttrigDisable(dev);
+ apci3120_exttrig_disable(dev);
devpriv->us_OutputRegister = 0;
/* stop counters */
@@ -729,13 +726,13 @@ static int i_APCI3120_StopCyclicAcquisition(struct comedi_device *dev,
devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE;
devpriv->b_InterruptMode = APCI3120_EOC_MODE;
devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- i_APCI3120_Reset(dev);
+ apci3120_reset(dev);
return 0;
}
-static int i_APCI3120_CommandTestAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
+static int apci3120_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
const struct addi_board *this_board = comedi_board(dev);
int err = 0;
@@ -818,9 +815,9 @@ static int i_APCI3120_CommandTestAnalogInput(struct comedi_device *dev,
* If DMA is configured does DMA initialization otherwise does the
* acquisition with EOS interrupt.
*/
-static int i_APCI3120_CyclicAnalogInput(int mode,
- struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int apci3120_cyclic_ai(int mode,
+ struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
const struct addi_board *this_board = comedi_board(dev);
struct addi_private *devpriv = dev->private;
@@ -904,7 +901,7 @@ static int i_APCI3120_CyclicAnalogInput(int mode,
/**********************************/
/* Initializes the sequence array */
/**********************************/
- if (!i_APCI3120_SetupChannelList(dev, s, devpriv->ui_AiNbrofChannels,
+ if (!apci3120_setup_chan_list(dev, s, devpriv->ui_AiNbrofChannels,
devpriv->pui_AiChannelList, 0))
return -EINVAL;
@@ -957,7 +954,7 @@ static int i_APCI3120_CyclicAnalogInput(int mode,
/*** EL241003 End ******************************************************************************/
if (devpriv->b_ExttrigEnable == APCI3120_ENABLE)
- i_APCI3120_ExttrigEnable(dev); /* activate EXT trigger */
+ apci3120_exttrig_enable(dev); /* activate EXT trigger */
switch (mode) {
case 1:
/* init timer0 in mode 2 */
@@ -1333,8 +1330,8 @@ static int i_APCI3120_CyclicAnalogInput(int mode,
* Does asynchronous acquisition.
* Determines the mode 1 or 2.
*/
-static int i_APCI3120_CommandAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int apci3120_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct addi_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
@@ -1371,7 +1368,7 @@ static int i_APCI3120_CommandAnalogInput(struct comedi_device *dev,
devpriv->ui_AiTimer0 = cmd->convert_arg; /* timer constant in nano seconds */
/* return this_board->ai_cmd(1,dev,s); */
- return i_APCI3120_CyclicAnalogInput(1, dev, s);
+ return apci3120_cyclic_ai(1, dev, s);
}
}
@@ -1381,7 +1378,7 @@ static int i_APCI3120_CommandAnalogInput(struct comedi_device *dev,
devpriv->ui_AiTimer1 = cmd->scan_begin_arg;
devpriv->ui_AiTimer0 = cmd->convert_arg; /* variable changed timer2 to timer0 */
/* return this_board->ai_cmd(2,dev,s); */
- return i_APCI3120_CyclicAnalogInput(2, dev, s);
+ return apci3120_cyclic_ai(2, dev, s);
}
return -1;
}
@@ -1410,7 +1407,7 @@ static void v_APCI3120_InterruptDmaMoveBlock16bit(struct comedi_device *dev,
* For continuous DMA it reinitializes the DMA operation.
* For single mode DMA it stop the acquisition.
*/
-static void v_APCI3120_InterruptDma(int irq, void *d)
+static void apci3120_interrupt_dma(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
@@ -1429,7 +1426,7 @@ static void v_APCI3120_InterruptDma(int irq, void *d)
}
if (samplesinbuf & 1) {
comedi_error(dev, "Odd count of bytes in DMA ring!");
- i_APCI3120_StopCyclicAcquisition(dev, s);
+ apci3120_cancel(dev, s);
devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE;
return;
@@ -1500,7 +1497,7 @@ static void v_APCI3120_InterruptDma(int irq, void *d)
if (!devpriv->b_AiContinuous)
if (devpriv->ui_AiActualScan >= devpriv->ui_AiNbrofScans) {
/* all data sampled */
- i_APCI3120_StopCyclicAcquisition(dev, s);
+ apci3120_cancel(dev, s);
devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE;
s->async->events |= COMEDI_CB_EOA;
comedi_event(dev, s);
@@ -1565,7 +1562,7 @@ static void v_APCI3120_InterruptDma(int irq, void *d)
* This function handles EOS interrupt.
* This function copies the acquired data(from FIFO) to Comedi buffer.
*/
-static int i_APCI3120_InterruptHandleEos(struct comedi_device *dev)
+static int apci3120_interrupt_handle_eos(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
@@ -1574,8 +1571,6 @@ static int i_APCI3120_InterruptHandleEos(struct comedi_device *dev)
n_chan = devpriv->ui_AiNbrofChannels;
- s->async->events = 0;
-
for (i = 0; i < n_chan; i++)
err &= comedi_buf_put(s->async, inw(dev->iobase + 0));
@@ -1589,7 +1584,7 @@ static int i_APCI3120_InterruptHandleEos(struct comedi_device *dev)
return 0;
}
-static void v_APCI3120_Interrupt(int irq, void *d)
+static void apci3120_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
@@ -1615,7 +1610,7 @@ static void v_APCI3120_Interrupt(int irq, void *d)
if (devpriv->b_ExttrigEnable == APCI3120_ENABLE) {
/* Disable ext trigger */
- i_APCI3120_ExttrigDisable(dev);
+ apci3120_exttrig_disable(dev);
devpriv->b_ExttrigEnable = APCI3120_DISABLE;
}
/* clear the timer 2 interrupt */
@@ -1655,7 +1650,7 @@ static void v_APCI3120_Interrupt(int irq, void *d)
if (devpriv->b_AiCyclicAcquisition == APCI3120_ENABLE) {
ui_Check = 0;
- i_APCI3120_InterruptHandleEos(dev);
+ apci3120_interrupt_handle_eos(dev);
devpriv->ui_AiActualScan++;
devpriv->b_ModeSelectRegister =
devpriv->
@@ -1711,7 +1706,7 @@ static void v_APCI3120_Interrupt(int irq, void *d)
dev->iobase + APCI3120_WR_ADDRESS);
/* stop timer 0 and timer 1 */
- i_APCI3120_StopCyclicAcquisition(dev, s);
+ apci3120_cancel(dev, s);
devpriv->b_AiCyclicAcquisition = APCI3120_DISABLE;
/* UPDATE-0.7.57->0.7.68comedi_done(dev,s); */
@@ -1765,7 +1760,8 @@ static void v_APCI3120_Interrupt(int irq, void *d)
/* Clears the timer status register */
/************************************/
inw(dev->iobase + APCI3120_TIMER_STATUS_REGISTER);
- v_APCI3120_InterruptDma(irq, d); /* do some data transfer */
+ /* do some data transfer */
+ apci3120_interrupt_dma(irq, d);
} else {
/* Stops the Timer */
outw(devpriv->
@@ -1787,7 +1783,7 @@ static void v_APCI3120_Interrupt(int irq, void *d)
* data[1] = Timer constant
* data[2] = Timer2 interrupt (1)enable or(0) disable
*/
-static int i_APCI3120_InsnConfigTimer(struct comedi_device *dev,
+static int apci3120_config_insn_timer(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
@@ -1932,7 +1928,7 @@ static int i_APCI3120_InsnConfigTimer(struct comedi_device *dev,
* = 1 Timer
* = 2 Watch dog
*/
-static int i_APCI3120_InsnWriteTimer(struct comedi_device *dev,
+static int apci3120_write_insn_timer(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
@@ -2104,7 +2100,7 @@ static int i_APCI3120_InsnWriteTimer(struct comedi_device *dev,
* for watchdog: data[0] = 0 (still running)
* = 1 (run down)
*/
-static int i_APCI3120_InsnReadTimer(struct comedi_device *dev,
+static int apci3120_read_insn_timer(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
@@ -2189,10 +2185,10 @@ static int apci3120_do_insn_bits(struct comedi_device *dev,
return insn->n;
}
-static int i_APCI3120_InsnWriteAnalogOutput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3120_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ui_Range, ui_Channel;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index 8c85a09d1c66..0536d8373861 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -1268,7 +1268,7 @@ static int i_APCI3200_ReadCJCCalGain(struct comedi_device *dev,
return 0;
}
-static int i_APCI3200_Reset(struct comedi_device *dev)
+static int apci3200_reset(struct comedi_device *dev)
{
struct addi_private *devpriv = dev->private;
int i_Temp;
@@ -1322,10 +1322,10 @@ static int i_APCI3200_Reset(struct comedi_device *dev)
* data[7] : Channel current source from eeprom
* data[8] : Channle gain factor from eeprom
*/
-static int i_APCI3200_ReadAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3200_ai_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
unsigned int ui_DummyValue = 0;
int i_ConvertCJCCalibration;
@@ -1336,7 +1336,7 @@ static int i_APCI3200_ReadAnalogInput(struct comedi_device *dev,
if (s_BoardInfos[dev->minor].i_Initialised == 0)
/* END JK 06.07.04: Management of sevrals boards */
{
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return -EINVAL;
} /* if(i_Initialised==0); */
@@ -1586,7 +1586,7 @@ static int i_APCI3200_ReadAnalogInput(struct comedi_device *dev,
break;
default:
printk("\nThe parameters passed are in error\n");
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return -EINVAL;
} /* switch(insn->unused[0]) */
@@ -1626,10 +1626,10 @@ static int i_APCI3200_ReadAnalogInput(struct comedi_device *dev,
* = 2 RTD 3 wire connection
* = 3 RTD 4 wire connection
*/
-static int i_APCI3200_ConfigAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3200_ai_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ul_Config = 0, ul_Temp = 0;
@@ -1970,7 +1970,7 @@ static int i_APCI3200_ConfigAnalogInput(struct comedi_device *dev,
} /* switch(data[11]) */
} /* elseif(data[12]==0 || data[12]==1) */
if (i_err) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return -EINVAL;
}
/* if(i_ScanType!=1) */
@@ -2079,7 +2079,7 @@ static int i_APCI3200_ConfigAnalogInput(struct comedi_device *dev,
/* END JK 06.07.04: Management of sevrals boards */
insn->unused[0] = 0;
- i_APCI3200_ReadAnalogInput(dev, s, insn, &ui_Dummy);
+ apci3200_ai_read(dev, s, insn, &ui_Dummy);
}
return insn->n;
@@ -2095,10 +2095,10 @@ static int i_APCI3200_ConfigAnalogInput(struct comedi_device *dev,
* data[1] : calibration offset
* data[2] : calibration gain
*/
-static int i_APCI3200_InsnBits_AnalogInput_Test(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3200_ai_bits_test(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct addi_private *devpriv = dev->private;
unsigned int ui_Configuration = 0;
@@ -2107,12 +2107,12 @@ static int i_APCI3200_InsnBits_AnalogInput_Test(struct comedi_device *dev,
/* if(i_Initialised==0) */
if (s_BoardInfos[dev->minor].i_Initialised == 0) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return -EINVAL;
} /* if(i_Initialised==0); */
if (data[0] != 0 && data[0] != 1) {
printk("\nError in selection of functionality\n");
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return -EINVAL;
} /* if(data[0]!=0 && data[0]!=1) */
@@ -2202,18 +2202,18 @@ static int i_APCI3200_InsnBits_AnalogInput_Test(struct comedi_device *dev,
return insn->n;
}
-static int i_APCI3200_InsnWriteReleaseAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3200_ai_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return insn->n;
}
-static int i_APCI3200_CommandTestAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
+static int apci3200_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
int err = 0;
@@ -2241,7 +2241,7 @@ static int i_APCI3200_CommandTestAnalogInput(struct comedi_device *dev,
err |= -EINVAL;
if (err) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return 1;
}
@@ -2267,7 +2267,7 @@ static int i_APCI3200_CommandTestAnalogInput(struct comedi_device *dev,
}
if (err) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return 2;
}
/* i_FirstChannel=cmd->chanlist[0]; */
@@ -2308,7 +2308,7 @@ static int i_APCI3200_CommandTestAnalogInput(struct comedi_device *dev,
printk("\nThe Delay time value is in error\n");
}
if (err) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return 3;
}
fpu_begin();
@@ -2366,15 +2366,15 @@ static int i_APCI3200_CommandTestAnalogInput(struct comedi_device *dev,
} /* else if(cmd->scan_begin_src==TRIG_FOLLOW) */
if (err) {
- i_APCI3200_Reset(dev);
+ apci3200_reset(dev);
return 4;
}
return 0;
}
-static int i_APCI3200_StopCyclicAcquisition(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int apci3200_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct addi_private *devpriv = dev->private;
unsigned int ui_Configuration = 0;
@@ -2410,8 +2410,8 @@ static int i_APCI3200_StopCyclicAcquisition(struct comedi_device *dev,
* Does asynchronous acquisition
* Determines the mode 1 or 2.
*/
-static int i_APCI3200_CommandAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int apci3200_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct addi_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
@@ -2619,7 +2619,6 @@ static int i_APCI3200_InterruptHandleEos(struct comedi_device *dev)
/* BEGIN JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
/* This value is not used */
/* ui_ChannelNumber = inl(devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 24); */
- s->async->events = 0;
/* END JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
/*************************************/
@@ -2730,7 +2729,7 @@ static int i_APCI3200_InterruptHandleEos(struct comedi_device *dev)
return 0;
}
-static void v_APCI3200_Interrupt(int irq, void *d)
+static void apci3200_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct addi_private *devpriv = dev->private;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
index ebc1534a8df8..20e89b0bdc4d 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
@@ -16,10 +16,10 @@
* data[2] : Time Unit
* data[3] : Reload Value
*/
-static int i_APCI3501_ConfigTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3501_config_insn_timer(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci3501_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
@@ -86,10 +86,10 @@ static int i_APCI3501_ConfigTimerCounterWatchdog(struct comedi_device *dev,
* 0 Stop
* 2 Trigger
*/
-static int i_APCI3501_StartStopWriteTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3501_write_insn_timer(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci3501_private *devpriv = dev->private;
unsigned int ul_Command1 = 0;
@@ -153,10 +153,10 @@ static int i_APCI3501_StartStopWriteTimerCounterWatchdog(struct comedi_device *d
* 2 Watchdog
* data[1] : Timer Counter Watchdog Number
*/
-static int i_APCI3501_ReadTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci3501_read_insn_timer(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci3501_private *devpriv = dev->private;
diff --git a/drivers/staging/comedi/drivers/addi_apci_035.c b/drivers/staging/comedi/drivers/addi_apci_035.c
index ccd49211ea17..4da9db35b8e2 100644
--- a/drivers/staging/comedi/drivers/addi_apci_035.c
+++ b/drivers/staging/comedi/drivers/addi_apci_035.c
@@ -27,13 +27,13 @@ static const struct addi_board apci035_boardtypes[] = {
.i_Timer = 1,
.ui_MinAcquisitiontimeNs = 10000,
.ui_MinDelaytimeNs = 100000,
- .interrupt = v_APCI035_Interrupt,
- .reset = i_APCI035_Reset,
- .ai_config = i_APCI035_ConfigAnalogInput,
- .ai_read = i_APCI035_ReadAnalogInput,
- .timer_config = i_APCI035_ConfigTimerWatchdog,
- .timer_write = i_APCI035_StartStopWriteTimerWatchdog,
- .timer_read = i_APCI035_ReadTimerWatchdog,
+ .interrupt = apci035_interrupt,
+ .reset = apci035_reset,
+ .ai_config = apci035_ai_config,
+ .ai_read = apci035_ai_read,
+ .timer_config = apci035_timer_config,
+ .timer_write = apci035_timer_write,
+ .timer_read = apci035_timer_read,
},
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index 74f7ace8adbc..bd8e08ca14c0 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -20,19 +20,19 @@ static const struct addi_board apci1500_boardtypes[] = {
.i_NbrDoChannel = 16,
.i_DoMaxdata = 0xffff,
.i_Timer = 1,
- .interrupt = v_APCI1500_Interrupt,
- .reset = i_APCI1500_Reset,
- .di_config = i_APCI1500_ConfigDigitalInputEvent,
- .di_read = i_APCI1500_Initialisation,
- .di_write = i_APCI1500_StartStopInputEvent,
+ .interrupt = apci1500_interrupt,
+ .reset = apci1500_reset,
+ .di_config = apci1500_di_config,
+ .di_read = apci1500_di_read,
+ .di_write = apci1500_di_write,
.di_bits = apci1500_di_insn_bits,
- .do_config = i_APCI1500_ConfigDigitalOutputErrorInterrupt,
- .do_write = i_APCI1500_WriteDigitalOutput,
- .do_bits = i_APCI1500_ConfigureInterrupt,
- .timer_config = i_APCI1500_ConfigCounterTimerWatchdog,
- .timer_write = i_APCI1500_StartStopTriggerTimerCounterWatchdog,
- .timer_read = i_APCI1500_ReadInterruptMask,
- .timer_bits = i_APCI1500_ReadCounterTimerWatchdog,
+ .do_config = apci1500_do_config,
+ .do_write = apci1500_do_write,
+ .do_bits = apci1500_do_bits,
+ .timer_config = apci1500_timer_config,
+ .timer_write = apci1500_timer_write,
+ .timer_read = apci1500_timer_read,
+ .timer_bits = apci1500_timer_bits,
},
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 6248284caaf5..27aa9ae1bdd9 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -21,16 +21,16 @@ static const struct addi_board apci1564_boardtypes[] = {
.i_NbrDoChannel = 32,
.i_DoMaxdata = 0xffffffff,
.i_Timer = 1,
- .interrupt = v_APCI1564_Interrupt,
- .reset = i_APCI1564_Reset,
- .di_config = i_APCI1564_ConfigDigitalInput,
+ .interrupt = apci1564_interrupt,
+ .reset = apci1564_reset,
+ .di_config = apci1564_di_config,
.di_bits = apci1564_di_insn_bits,
- .do_config = i_APCI1564_ConfigDigitalOutput,
+ .do_config = apci1564_do_config,
.do_bits = apci1564_do_insn_bits,
- .do_read = i_APCI1564_ReadInterruptStatus,
- .timer_config = i_APCI1564_ConfigTimerCounterWatchdog,
- .timer_write = i_APCI1564_StartStopWriteTimerCounterWatchdog,
- .timer_read = i_APCI1564_ReadTimerCounterWatchdog,
+ .do_read = apci1564_do_read,
+ .timer_config = apci1564_timer_config,
+ .timer_write = apci1564_timer_write,
+ .timer_read = apci1564_timer_read,
},
};
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index 1e6fa56c516e..57ee6e5c7635 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -26,7 +26,7 @@ static const struct addi_board apci3120_boardtypes[] = {
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 0x0f,
- .interrupt = v_APCI3120_Interrupt,
+ .interrupt = apci3120_interrupt,
},
[BOARD_APCI3001] = {
.pc_DriverName = "apci3001",
@@ -37,7 +37,7 @@ static const struct addi_board apci3120_boardtypes[] = {
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 0x0f,
- .interrupt = v_APCI3120_Interrupt,
+ .interrupt = apci3120_interrupt,
},
};
@@ -136,11 +136,11 @@ static int apci3120_auto_attach(struct comedi_device *dev,
s->len_chanlist = this_board->i_AiChannelList;
s->range_table = &range_apci3120_ai;
- s->insn_config = i_APCI3120_InsnConfigAnalogInput;
- s->insn_read = i_APCI3120_InsnReadAnalogInput;
- s->do_cmdtest = i_APCI3120_CommandTestAnalogInput;
- s->do_cmd = i_APCI3120_CommandAnalogInput;
- s->cancel = i_APCI3120_StopCyclicAcquisition;
+ s->insn_config = apci3120_ai_insn_config;
+ s->insn_read = apci3120_ai_insn_read;
+ s->do_cmdtest = apci3120_ai_cmdtest;
+ s->do_cmd = apci3120_ai_cmd;
+ s->cancel = apci3120_cancel;
/* Allocate and Initialise AO Subdevice Structures */
s = &dev->subdevices[1];
@@ -151,7 +151,7 @@ static int apci3120_auto_attach(struct comedi_device *dev,
s->maxdata = this_board->i_AoMaxdata;
s->len_chanlist = this_board->i_NbrAoChannel;
s->range_table = &range_apci3120_ao;
- s->insn_write = i_APCI3120_InsnWriteAnalogOutput;
+ s->insn_write = apci3120_ao_insn_write;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -186,11 +186,11 @@ static int apci3120_auto_attach(struct comedi_device *dev,
s->len_chanlist = 1;
s->range_table = &range_digital;
- s->insn_write = i_APCI3120_InsnWriteTimer;
- s->insn_read = i_APCI3120_InsnReadTimer;
- s->insn_config = i_APCI3120_InsnConfigTimer;
+ s->insn_write = apci3120_write_insn_timer;
+ s->insn_read = apci3120_read_insn_timer;
+ s->insn_config = apci3120_config_insn_timer;
- i_APCI3120_Reset(dev);
+ apci3120_reset(dev);
return 0;
}
@@ -200,7 +200,7 @@ static void apci3120_detach(struct comedi_device *dev)
if (devpriv) {
if (dev->iobase)
- i_APCI3120_Reset(dev);
+ apci3120_reset(dev);
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv->ul_DmaBufferVirtual[0]) {
diff --git a/drivers/staging/comedi/drivers/addi_apci_3200.c b/drivers/staging/comedi/drivers/addi_apci_3200.c
index 9868a5369aff..f0f891a482a3 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3200.c
@@ -43,15 +43,15 @@ static const struct addi_board apci3200_boardtypes[] = {
.i_NbrDoChannel = 4,
.ui_MinAcquisitiontimeNs = 10000,
.ui_MinDelaytimeNs = 100000,
- .interrupt = v_APCI3200_Interrupt,
- .reset = i_APCI3200_Reset,
- .ai_config = i_APCI3200_ConfigAnalogInput,
- .ai_read = i_APCI3200_ReadAnalogInput,
- .ai_write = i_APCI3200_InsnWriteReleaseAnalogInput,
- .ai_bits = i_APCI3200_InsnBits_AnalogInput_Test,
- .ai_cmdtest = i_APCI3200_CommandTestAnalogInput,
- .ai_cmd = i_APCI3200_CommandAnalogInput,
- .ai_cancel = i_APCI3200_StopCyclicAcquisition,
+ .interrupt = apci3200_interrupt,
+ .reset = apci3200_reset,
+ .ai_config = apci3200_ai_config,
+ .ai_read = apci3200_ai_read,
+ .ai_write = apci3200_ai_write,
+ .ai_bits = apci3200_ai_bits_test,
+ .ai_cmdtest = apci3200_ai_cmdtest,
+ .ai_cmd = apci3200_ai_cmd,
+ .ai_cancel = apci3200_cancel,
.di_bits = apci3200_di_insn_bits,
.do_bits = apci3200_do_insn_bits,
},
@@ -68,15 +68,15 @@ static const struct addi_board apci3200_boardtypes[] = {
.i_NbrDoChannel = 4,
.ui_MinAcquisitiontimeNs = 10000,
.ui_MinDelaytimeNs = 100000,
- .interrupt = v_APCI3200_Interrupt,
- .reset = i_APCI3200_Reset,
- .ai_config = i_APCI3200_ConfigAnalogInput,
- .ai_read = i_APCI3200_ReadAnalogInput,
- .ai_write = i_APCI3200_InsnWriteReleaseAnalogInput,
- .ai_bits = i_APCI3200_InsnBits_AnalogInput_Test,
- .ai_cmdtest = i_APCI3200_CommandTestAnalogInput,
- .ai_cmd = i_APCI3200_CommandAnalogInput,
- .ai_cancel = i_APCI3200_StopCyclicAcquisition,
+ .interrupt = apci3200_interrupt,
+ .reset = apci3200_reset,
+ .ai_config = apci3200_ai_config,
+ .ai_read = apci3200_ai_read,
+ .ai_write = apci3200_ai_write,
+ .ai_bits = apci3200_ai_bits_test,
+ .ai_cmdtest = apci3200_ai_cmdtest,
+ .ai_cmd = apci3200_ai_cmd,
+ .ai_cancel = apci3200_cancel,
.di_bits = apci3200_di_insn_bits,
.do_bits = apci3200_do_insn_bits,
},
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index 4cb69ec54c9b..49bf1fb840f6 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -390,9 +390,9 @@ static int apci3501_auto_attach(struct comedi_device *dev,
s->maxdata = 0;
s->len_chanlist = 1;
s->range_table = &range_digital;
- s->insn_write = i_APCI3501_StartStopWriteTimerCounterWatchdog;
- s->insn_read = i_APCI3501_ReadTimerCounterWatchdog;
- s->insn_config = i_APCI3501_ConfigTimerCounterWatchdog;
+ s->insn_write = apci3501_write_insn_timer;
+ s->insn_read = apci3501_read_insn_timer;
+ s->insn_config = apci3501_config_insn_timer;
/* Initialize the eeprom subdevice */
s = &dev->subdevices[4];
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index ceadf8eff47f..6dc11c407f57 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -434,13 +434,26 @@ static int apci3xxx_ai_setup(struct comedi_device *dev, unsigned int chanspec)
return 0;
}
+static int apci3xxx_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct apci3xxx_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readl(devpriv->mmio + 20);
+ if (status & 0x1)
+ return 0;
+ return -EBUSY;
+}
+
static int apci3xxx_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct apci3xxx_private *devpriv = dev->private;
- unsigned int val;
int ret;
int i;
@@ -453,10 +466,9 @@ static int apci3xxx_ai_insn_read(struct comedi_device *dev,
writel(0x80000, devpriv->mmio + 8);
/* Wait the EOS */
- do {
- val = readl(devpriv->mmio + 20);
- val &= 0x1;
- } while (!val);
+ ret = comedi_timeout(dev, s, insn, apci3xxx_ai_eoc, 0);
+ if (ret)
+ return ret;
/* Read the analog value */
data[i] = readl(devpriv->mmio + 28);
@@ -622,6 +634,20 @@ static int apci3xxx_ai_cancel(struct comedi_device *dev,
return 0;
}
+static int apci3xxx_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct apci3xxx_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readl(devpriv->mmio + 96);
+ if (status & 0x100)
+ return 0;
+ return -EBUSY;
+}
+
static int apci3xxx_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -630,7 +656,7 @@ static int apci3xxx_ao_insn_write(struct comedi_device *dev,
struct apci3xxx_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int status;
+ int ret;
int i;
for (i = 0; i < insn->n; i++) {
@@ -641,9 +667,9 @@ static int apci3xxx_ao_insn_write(struct comedi_device *dev,
writel((data[i] << 8) | chan, devpriv->mmio + 100);
/* Wait the end of transfer */
- do {
- status = readl(devpriv->mmio + 96);
- } while ((status & 0x100) != 0x100);
+ ret = comedi_timeout(dev, s, insn, apci3xxx_ao_eoc, 0);
+ if (ret)
+ return ret;
}
return insn->n;
@@ -680,7 +706,7 @@ static int apci3xxx_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
+ unsigned int mask = 0;
int ret;
/*
@@ -688,12 +714,13 @@ static int apci3xxx_dio_insn_config(struct comedi_device *dev,
* Port 1 (channels 8-15) are always outputs
* Port 2 (channels 16-23) are programmable i/o
*/
- if (chan < 16) {
- if (data[0] != INSN_CONFIG_DIO_QUERY)
+ if (data[0] != INSN_CONFIG_DIO_QUERY) {
+ /* ignore all other instructions for ports 0 and 1 */
+ if (chan < 16)
return -EINVAL;
- } else {
- /* changing any channel in port 2 changes the entire port */
- mask = 0xff0000;
+ else
+ /* changing any channel in port 2 changes the entire port */
+ mask = 0xff0000;
}
ret = comedi_dio_insn_config(dev, s, insn, data, mask);
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index 5c1413abc52d..921f6942dfce 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -73,19 +73,17 @@ struct pci6208_private {
unsigned int ao_readback[PCI6208_MAX_AO_CHANNELS];
};
-static int pci6208_ao_wait_for_data_send(struct comedi_device *dev,
- unsigned int timeout)
+static int pci6208_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
unsigned int status;
- while (timeout--) {
- status = inw(dev->iobase + PCI6208_AO_STATUS);
- if ((status & PCI6208_AO_STATUS_DATA_SEND) == 0)
- return 0;
- udelay(1);
- }
-
- return -ETIME;
+ status = inw(dev->iobase + PCI6208_AO_STATUS);
+ if ((status & PCI6208_AO_STATUS_DATA_SEND) == 0)
+ return 0;
+ return -EBUSY;
}
static int pci6208_ao_insn_write(struct comedi_device *dev,
@@ -102,8 +100,8 @@ static int pci6208_ao_insn_write(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
val = data[i];
- /* D/A transfer rate is 2.2us, wait up to 10us */
- ret = pci6208_ao_wait_for_data_send(dev, 10);
+ /* D/A transfer rate is 2.2us */
+ ret = comedi_timeout(dev, s, insn, pci6208_ao_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index 6f622b4de698..5e3cc77a8a0c 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -239,9 +239,6 @@ static int adl_pci7x3x_auto_attach(struct comedi_device *dev,
}
}
- dev_info(dev->class_dev, "%s attached (%d inputs/%d outputs)\n",
- dev->board_name, board->di_nchan, board->do_nchan);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index 363f2e42a27f..a29ceacb966d 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -84,7 +84,6 @@ TODO:
#define PCI9111_RANGE_SETTING_DELAY 10
#define PCI9111_AI_INSTANT_READ_UDELAY_US 2
-#define PCI9111_AI_INSTANT_READ_TIMEOUT 100
/*
* IO address map and bit defines
@@ -490,29 +489,18 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
dev->iobase + PCI9111_AI_RANGE_STAT_REG);
/* Set counter */
-
- switch (async_cmd->stop_src) {
- case TRIG_COUNT:
+ if (async_cmd->stop_src == TRIG_COUNT) {
dev_private->stop_counter =
async_cmd->stop_arg * async_cmd->chanlist_len;
dev_private->stop_is_none = 0;
- break;
-
- case TRIG_NONE:
+ } else { /* TRIG_NONE */
dev_private->stop_counter = 0;
dev_private->stop_is_none = 1;
- break;
-
- default:
- comedi_error(dev, "Invalid stop trigger");
- return -1;
}
/* Set timer pacer */
-
dev_private->scan_delay = 0;
- switch (async_cmd->convert_src) {
- case TRIG_TIMER:
+ if (async_cmd->convert_src == TRIG_TIMER) {
pci9111_trigger_source_set(dev, software);
pci9111_timer_set(dev);
pci9111_fifo_reset(dev);
@@ -528,11 +516,7 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
(async_cmd->convert_arg *
async_cmd->chanlist_len)) - 1;
}
-
- break;
-
- case TRIG_EXT:
-
+ } else { /* TRIG_EXT */
pci9111_trigger_source_set(dev, external);
pci9111_fifo_reset(dev);
pci9111_interrupt_source_set(dev, irq_on_fifo_half_full,
@@ -540,11 +524,6 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
plx9050_interrupt_control(dev_private->lcr_io_base, true, true,
false, true, true);
- break;
-
- default:
- comedi_error(dev, "Invalid convert trigger");
- return -1;
}
dev_private->stop_counter *= (1 + dev_private->scan_delay);
@@ -613,9 +592,8 @@ static irqreturn_t pci9111_interrupt(int irq, void *p_device)
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
comedi_error(dev, PCI9111_DRIVER_NAME " fifo overflow");
outb(0, dev->iobase + PCI9111_INT_CLR_REG);
- pci9111_ai_cancel(dev, s);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -693,20 +671,31 @@ static irqreturn_t pci9111_interrupt(int irq, void *p_device)
}
}
- if ((dev_private->stop_counter == 0) && (!dev_private->stop_is_none)) {
+ if (dev_private->stop_counter == 0 && !dev_private->stop_is_none)
async->events |= COMEDI_CB_EOA;
- pci9111_ai_cancel(dev, s);
- }
outb(0, dev->iobase + PCI9111_INT_CLR_REG);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
+static int pci9111_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + PCI9111_AI_RANGE_STAT_REG);
+ if (status & PCI9111_AI_STAT_FF_EF)
+ return 0;
+ return -EBUSY;
+}
+
static int pci9111_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -717,7 +706,7 @@ static int pci9111_ai_insn_read(struct comedi_device *dev,
unsigned int invert = (maxdata + 1) >> 1;
unsigned int shift = (maxdata == 0xffff) ? 0 : 4;
unsigned int status;
- int timeout;
+ int ret;
int i;
outb(chan, dev->iobase + PCI9111_AI_CHANNEL_REG);
@@ -734,22 +723,12 @@ static int pci9111_ai_insn_read(struct comedi_device *dev,
/* Generate a software trigger */
outb(0, dev->iobase + PCI9111_SOFT_TRIG_REG);
- timeout = PCI9111_AI_INSTANT_READ_TIMEOUT;
-
- while (timeout--) {
- status = inb(dev->iobase + PCI9111_AI_RANGE_STAT_REG);
- /* '1' means FIFO is not empty */
- if (status & PCI9111_AI_STAT_FF_EF)
- goto conversion_done;
+ ret = comedi_timeout(dev, s, insn, pci9111_ai_eoc, 0);
+ if (ret) {
+ pci9111_fifo_reset(dev);
+ return ret;
}
- comedi_error(dev, "A/D read timeout");
- data[i] = 0;
- pci9111_fifo_reset(dev);
- return -ETIME;
-
-conversion_done:
-
data[i] = inw(dev->iobase + PCI9111_AI_FIFO_REG);
data[i] = ((data[i] >> shift) & maxdata) ^ invert;
}
@@ -906,8 +885,6 @@ static int pci9111_auto_attach(struct comedi_device *dev,
s->range_table = &range_digital;
s->insn_bits = pci9111_do_insn_bits;
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index 4bdd9720e9eb..3cfa1756fa6a 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -571,12 +571,26 @@ static int setup_channel_list(struct comedi_device *dev,
return 1; /* we can serve this with scan logic */
}
+static int pci9118_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inl(dev->iobase + PCI9118_ADSTAT);
+ if (status & AdStatus_ADrdy)
+ return 0;
+ return -EBUSY;
+}
+
static int pci9118_insn_read_ai(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct pci9118_private *devpriv = dev->private;
- int n, timeout;
+ int ret;
+ int n;
devpriv->AdControlReg = AdControl_Int & 0xff;
devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg;
@@ -597,19 +611,13 @@ static int pci9118_insn_read_ai(struct comedi_device *dev,
for (n = 0; n < insn->n; n++) {
outw(0, dev->iobase + PCI9118_SOFTTRG); /* start conversion */
udelay(2);
- timeout = 100;
- while (timeout--) {
- if (inl(dev->iobase + PCI9118_ADSTAT) & AdStatus_ADrdy)
- goto conv_finish;
- udelay(1);
- }
- comedi_error(dev, "A/D insn timeout");
- data[n] = 0;
- outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
- return -ETIME;
+ ret = comedi_timeout(dev, s, insn, pci9118_ai_eoc, 0);
+ if (ret) {
+ outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
+ return ret;
+ }
-conv_finish:
if (devpriv->ai16bits) {
data[n] =
(inl(dev->iobase +
@@ -911,8 +919,7 @@ static char pci9118_decode_error_status(struct comedi_device *dev,
}
if (m & devpriv->ai_maskharderr) {
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- pci9118_ai_cancel(dev, s);
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return 1;
}
@@ -948,8 +955,6 @@ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
struct pci9118_private *devpriv = dev->private;
unsigned short sampl;
- s->async->events = 0;
-
if (int_adstat & devpriv->ai_maskerr)
if (pci9118_decode_error_status(dev, s, int_adstat))
return;
@@ -965,8 +970,7 @@ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
sampl & 0x000f,
devpriv->chanlist[s->async->cur_chan]);
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- pci9118_ai_cancel(dev, s);
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
}
@@ -977,16 +981,14 @@ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
/* one scan done */
s->async->cur_chan %= devpriv->ai_n_scanlen;
devpriv->ai_act_scan++;
- if (!(devpriv->ai_neverending))
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- /* all data sampled */
- pci9118_ai_cancel(dev, s);
+ if (!devpriv->ai_neverending) {
+ /* all data sampled? */
+ if (devpriv->ai_act_scan >= devpriv->ai_scans)
s->async->events |= COMEDI_CB_EOA;
- }
+ }
}
- if (s->async->events)
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
@@ -1001,16 +1003,14 @@ static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
if (int_amcc & MASTER_ABORT_INT) {
comedi_error(dev, "AMCC IRQ - MASTER DMA ABORT!");
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- pci9118_ai_cancel(dev, s);
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
if (int_amcc & TARGET_ABORT_INT) {
comedi_error(dev, "AMCC IRQ - TARGET DMA ABORT!");
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- pci9118_ai_cancel(dev, s);
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
if (int_adstat & devpriv->ai_maskerr)
@@ -1048,12 +1048,11 @@ static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
m = m - sampls; /* m= how many samples was transferred */
}
- if (!devpriv->ai_neverending)
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- /* all data sampled */
- pci9118_ai_cancel(dev, s);
+ if (!devpriv->ai_neverending) {
+ /* all data sampled? */
+ if (devpriv->ai_act_scan >= devpriv->ai_scans)
s->async->events |= COMEDI_CB_EOA;
- }
+ }
if (devpriv->dma_doublebuf) { /* switch dma buffers */
devpriv->dma_actbuf = 1 - devpriv->dma_actbuf;
@@ -1066,7 +1065,7 @@ static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
interrupt_pci9118_ai_mode4_switch(dev);
}
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
static irqreturn_t interrupt_pci9118(int irq, void *d)
@@ -1936,28 +1935,6 @@ static struct pci_dev *pci9118_find_pci(struct comedi_device *dev,
return NULL;
}
-static void pci9118_report_attach(struct comedi_device *dev, unsigned int irq)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct pci9118_private *devpriv = dev->private;
- char irqbuf[30];
- char muxbuf[30];
-
- if (irq)
- snprintf(irqbuf, sizeof(irqbuf), "irq %u%s", irq,
- (dev->irq ? "" : " UNAVAILABLE"));
- else
- snprintf(irqbuf, sizeof(irqbuf), "irq DISABLED");
- if (devpriv->usemux)
- snprintf(muxbuf, sizeof(muxbuf), "ext mux %u chans",
- devpriv->usemux);
- else
- snprintf(muxbuf, sizeof(muxbuf), "no ext mux");
- dev_info(dev->class_dev, "%s (pci %s, %s, %sbus master, %s) attached\n",
- dev->board_name, pci_name(pcidev), irqbuf,
- (devpriv->master ? "" : "no "), muxbuf);
-}
-
static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
int master, int ext_mux, int softsshdelay,
int hw_err_mask)
@@ -2113,7 +2090,6 @@ static int pci9118_common_attach(struct comedi_device *dev, int disable_irq,
break;
}
- pci9118_report_attach(dev, dev->irq);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/adq12b.c b/drivers/staging/comedi/drivers/adq12b.c
index 3190ef7d285e..b4ea37704eaf 100644
--- a/drivers/staging/comedi/drivers/adq12b.c
+++ b/drivers/staging/comedi/drivers/adq12b.c
@@ -94,8 +94,6 @@ If you do not specify any options, they will default to
/* mask of the bit at STINR to check end of conversion */
#define ADQ12B_EOC 0x20
-#define TIMEOUT 20
-
/* available ranges through the PGA gains */
static const struct comedi_lrange range_adq12b_ai_bipolar = {
4, {
@@ -122,19 +120,28 @@ struct adq12b_private {
int last_range;
};
-/*
- * "instructions" read/write data in "one-shot" or "software-triggered"
- * mode.
- */
+static int adq12b_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned char status;
+
+ status = inb(dev->iobase + ADQ12B_STINR);
+ if (status & ADQ12B_EOC)
+ return 0;
+ return -EBUSY;
+}
static int adq12b_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
struct adq12b_private *devpriv = dev->private;
- int n, i;
+ int n;
int range, channel;
unsigned char hi, lo, status;
+ int ret;
/* change channel and range only if it is different from the previous */
range = CR_RANGE(insn->chanspec);
@@ -151,13 +158,9 @@ static int adq12b_ai_rinsn(struct comedi_device *dev,
for (n = 0; n < insn->n; n++) {
/* wait for end of conversion */
- i = 0;
- do {
- /* udelay(1); */
- status = inb(dev->iobase + ADQ12B_STINR);
- status = status & ADQ12B_EOC;
- } while (status == 0 && ++i < TIMEOUT);
- /* } while (++i < 10); */
+ ret = comedi_timeout(dev, s, insn, adq12b_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
hi = inb(dev->iobase + ADQ12B_ADHIG);
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 593676cf706a..28ec48548317 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -429,15 +429,26 @@ static void setup_channel_list(struct comedi_device *dev,
outw(devpriv->ai_et_MuxVal, dev->iobase + PCI171x_MUX);
}
-/*
-==============================================================================
-*/
+static int pci171x_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + PCI171x_STATUS);
+ if ((status & Status_FE) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int pci171x_insn_read_ai(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
- int n, timeout;
+ int ret;
+ int n;
#ifdef PCI171x_PARANOIDCHECK
const struct boardtype *this_board = comedi_board(dev);
unsigned int idata;
@@ -453,19 +464,14 @@ static int pci171x_insn_read_ai(struct comedi_device *dev,
for (n = 0; n < insn->n; n++) {
outw(0, dev->iobase + PCI171x_SOFTTRG); /* start conversion */
- /* udelay(1); */
- timeout = 100;
- while (timeout--) {
- if (!(inw(dev->iobase + PCI171x_STATUS) & Status_FE))
- goto conv_finish;
+
+ ret = comedi_timeout(dev, s, insn, pci171x_ai_eoc, 0);
+ if (ret) {
+ outb(0, dev->iobase + PCI171x_CLRFIFO);
+ outb(0, dev->iobase + PCI171x_CLRINT);
+ return ret;
}
- comedi_error(dev, "A/D insn timeout");
- outb(0, dev->iobase + PCI171x_CLRFIFO);
- outb(0, dev->iobase + PCI171x_CLRINT);
- data[n] = 0;
- return -ETIME;
-conv_finish:
#ifdef PCI171x_PARANOIDCHECK
idata = inw(dev->iobase + PCI171x_AD_DATA);
if (this_board->cardtype != TYPE_PCI1713)
@@ -494,6 +500,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, chan, range, ofs;
chan = CR_CHAN(insn->chanspec);
@@ -509,11 +516,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
ofs = PCI171x_DA1;
}
+ val = devpriv->ao_data[chan];
- for (n = 0; n < insn->n; n++)
- outw(data[n], dev->iobase + ofs);
+ for (n = 0; n < insn->n; n++) {
+ val = data[n];
+ outw(val, dev->iobase + ofs);
+ }
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
@@ -679,6 +689,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, rangereg, chan;
chan = CR_CHAN(insn->chanspec);
@@ -688,13 +699,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
outb(rangereg, dev->iobase + PCI1720_RANGE);
devpriv->da_ranges = rangereg;
}
+ val = devpriv->ao_data[chan];
for (n = 0; n < insn->n; n++) {
- outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1));
+ val = data[n];
+ outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
}
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
}
@@ -746,17 +759,15 @@ static void interrupt_pci1710_every_sample(void *d)
m = inw(dev->iobase + PCI171x_STATUS);
if (m & Status_FE) {
dev_dbg(dev->class_dev, "A/D FIFO empty (%4x)\n", m);
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
if (m & Status_FF) {
dev_dbg(dev->class_dev,
"A/D FIFO Full status (Fatal Error!) (%4x)\n", m);
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
@@ -775,10 +786,9 @@ static void interrupt_pci1710_every_sample(void *d)
act_chanlist[s->
async->cur_chan] & 0xf000) >>
12);
- pci171x_ai_cancel(dev, s);
s->async->events |=
COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
comedi_buf_put(s->async, sampl & 0x0fff);
@@ -797,9 +807,8 @@ static void interrupt_pci1710_every_sample(void *d)
if ((!devpriv->neverending_ai) &&
(devpriv->ai_act_scan >= devpriv->ai_scans)) {
/* all data sampled */
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
}
@@ -807,7 +816,7 @@ static void interrupt_pci1710_every_sample(void *d)
outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
/*
@@ -835,10 +844,9 @@ static int move_block_from_fifo(struct comedi_device *dev,
(devpriv->act_chanlist[j] & 0xf000) >> 12,
i, j, devpriv->ai_act_scan, n, turn,
sampl);
- pci171x_ai_cancel(dev, s);
s->async->events |=
COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return 1;
}
comedi_buf_put(s->async, sampl & 0x0fff);
@@ -870,17 +878,15 @@ static void interrupt_pci1710_half_fifo(void *d)
m = inw(dev->iobase + PCI171x_STATUS);
if (!(m & Status_FH)) {
dev_dbg(dev->class_dev, "A/D FIFO not half full! (%4x)\n", m);
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
if (m & Status_FF) {
dev_dbg(dev->class_dev,
"A/D FIFO Full status (Fatal Error!) (%4x)\n", m);
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
@@ -900,14 +906,13 @@ static void interrupt_pci1710_half_fifo(void *d)
if (!devpriv->neverending_ai)
if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data
sampled */
- pci171x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
/*
@@ -1347,9 +1352,6 @@ static int pci1710_auto_attach(struct comedi_device *dev,
subdev++;
}
- dev_info(dev->class_dev, "%s attached, irq %sabled\n",
- dev->board_name, dev->irq ? "en" : "dis");
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index 72394267ddfe..07b107d1ab33 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -280,8 +280,6 @@ static int pci1723_auto_attach(struct comedi_device *dev,
pci1723_reset(dev);
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index d4bd61d84daf..2d966a87f2e8 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -1130,10 +1130,12 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
for (i = 0; i < MAX_DIO_SUBDEVG; i++)
for (j = 0; j < this_board->sdio[i].regs; j++) {
s = &dev->subdevices[subdev];
- subdev_8255_init(dev, s, NULL,
- dev->iobase +
- this_board->sdio[i].addr +
- SIZE_8255 * j);
+ ret = subdev_8255_init(dev, s, NULL,
+ dev->iobase +
+ this_board->sdio[i].addr +
+ SIZE_8255 * j);
+ if (ret)
+ return ret;
subdev++;
}
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
index 68c3fb3226ca..324746b14931 100644
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ b/drivers/staging/comedi/drivers/aio_aio12_8.c
@@ -101,14 +101,27 @@ struct aio12_8_private {
unsigned int ao_readback[4];
};
+static int aio_aio12_8_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + AIO12_8_STATUS_REG);
+ if (status & AIO12_8_STATUS_ADC_EOC)
+ return 0;
+ return -EBUSY;
+}
+
static int aio_aio12_8_ai_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
- unsigned int val;
unsigned char control;
+ int ret;
int n;
/*
@@ -122,20 +135,13 @@ static int aio_aio12_8_ai_read(struct comedi_device *dev,
inb(dev->iobase + AIO12_8_STATUS_REG);
for (n = 0; n < insn->n; n++) {
- int timeout = 5;
-
/* Setup and start conversion */
outb(control, dev->iobase + AIO12_8_ADC_REG);
/* Wait for conversion to complete */
- do {
- val = inb(dev->iobase + AIO12_8_STATUS_REG);
- timeout--;
- if (timeout == 0) {
- dev_err(dev->class_dev, "ADC timeout\n");
- return -ETIMEDOUT;
- }
- } while (!(val & AIO12_8_STATUS_ADC_EOC));
+ ret = comedi_timeout(dev, s, insn, aio_aio12_8_ai_eoc, 0);
+ if (ret)
+ return ret;
data[n] = inw(dev->iobase + AIO12_8_ADC_REG) & s->maxdata;
}
@@ -247,9 +253,6 @@ static int aio_aio12_8_attach(struct comedi_device *dev,
/* 8254 counter/timer subdevice */
s->type = COMEDI_SUBD_UNUSED;
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/aio_iiro_16.c b/drivers/staging/comedi/drivers/aio_iiro_16.c
index 22b3dda135ff..781104aa533e 100644
--- a/drivers/staging/comedi/drivers/aio_iiro_16.c
+++ b/drivers/staging/comedi/drivers/aio_iiro_16.c
@@ -98,7 +98,7 @@ static int aio_iiro_16_attach(struct comedi_device *dev,
s->range_table = &range_digital;
s->insn_bits = aio_iiro_16_dio_insn_bits_read;
- return 1;
+ return 0;
}
static struct comedi_driver aio_iiro_16_driver = {
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index 9c9559ffc643..818a0d7e3d58 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -1208,7 +1208,7 @@ int amplc_dio200_common_attach(struct comedi_device *dev, unsigned int irq,
"warning! irq %u unavailable!\n", irq);
}
}
- dev_info(dev->class_dev, "attached\n");
+
return 0;
}
EXPORT_SYMBOL_GPL(amplc_dio200_common_attach);
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index 31734e1142fd..b21d7b48f1da 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -368,32 +368,6 @@ static irqreturn_t pc236_interrupt(int irq, void *d)
return IRQ_RETVAL(handled);
}
-static void pc236_report_attach(struct comedi_device *dev, unsigned int irq)
-{
- const struct pc236_board *thisboard = comedi_board(dev);
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- char tmpbuf[60];
- int tmplen;
-
- if (is_isa_board(thisboard))
- tmplen = scnprintf(tmpbuf, sizeof(tmpbuf),
- "(base %#lx) ", dev->iobase);
- else if (is_pci_board(thisboard))
- tmplen = scnprintf(tmpbuf, sizeof(tmpbuf),
- "(pci %s) ", pci_name(pcidev));
- else
- tmplen = 0;
- if (irq)
- tmplen += scnprintf(&tmpbuf[tmplen], sizeof(tmpbuf) - tmplen,
- "(irq %u%s) ", irq,
- (dev->irq ? "" : " UNAVAILABLE"));
- else
- tmplen += scnprintf(&tmpbuf[tmplen], sizeof(tmpbuf) - tmplen,
- "(no irq) ");
- dev_info(dev->class_dev, "%s %sattached\n",
- dev->board_name, tmpbuf);
-}
-
static int pc236_common_attach(struct comedi_device *dev, unsigned long iobase,
unsigned int irq, unsigned long req_irq_flags)
{
@@ -411,10 +385,9 @@ static int pc236_common_attach(struct comedi_device *dev, unsigned long iobase,
s = &dev->subdevices[0];
/* digital i/o subdevice (8255) */
ret = subdev_8255_init(dev, s, NULL, iobase);
- if (ret < 0) {
- dev_err(dev->class_dev, "error! out of memory!\n");
+ if (ret)
return ret;
- }
+
s = &dev->subdevices[1];
dev->read_subdev = s;
s->type = COMEDI_SUBD_UNUSED;
@@ -434,8 +407,8 @@ static int pc236_common_attach(struct comedi_device *dev, unsigned long iobase,
s->cancel = pc236_intr_cancel;
}
}
- pc236_report_attach(dev, irq);
- return 1;
+
+ return 0;
}
static int pc236_pci_common_attach(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index 5b4b5ab34e2e..7c10d28d2784 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -94,8 +94,6 @@ static int pc263_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* read initial relay state */
s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
- dev_info(dev->class_dev, "%s (base %#lx) attached\n", dev->board_name,
- dev->iobase);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index ae4048a624fa..29e01e280039 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -533,9 +533,8 @@ static void pci224_ao_start(struct comedi_device *dev,
set_bit(AO_CMD_STARTED, &devpriv->state);
if (!devpriv->ao_stop_continuous && devpriv->ao_stop_count == 0) {
/* An empty acquisition! */
- pci224_ao_stop(dev, s);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
} else {
/* Enable interrupts. */
spin_lock_irqsave(&devpriv->ao_spinlock, flags);
@@ -585,9 +584,8 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
room = PCI224_FIFO_ROOM_EMPTY;
if (!devpriv->ao_stop_continuous && devpriv->ao_stop_count == 0) {
/* FIFO empty at end of counted acquisition. */
- pci224_ao_stop(dev, s);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
break;
@@ -605,9 +603,8 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
/* FIFO is less than half-full. */
if (num_scans == 0) {
/* Nothing left to put in the FIFO. */
- pci224_ao_stop(dev, s);
- s->async->events |= COMEDI_CB_OVERFLOW;
dev_err(dev->class_dev, "AO buffer underrun\n");
+ s->async->events |= COMEDI_CB_OVERFLOW;
}
}
/* Determine how many new scans can be put in the FIFO. */
@@ -670,9 +667,8 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
PCI224_DACCON_TRIG_MASK);
outw(devpriv->daccon, dev->iobase + PCI224_DACCON);
}
- if (s->async->events)
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
/*
@@ -1237,20 +1233,6 @@ static struct pci_dev *pci224_find_pci_dev(struct comedi_device *dev,
return NULL;
}
-static void pci224_report_attach(struct comedi_device *dev, unsigned int irq)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- char tmpbuf[30];
-
- if (irq)
- snprintf(tmpbuf, sizeof(tmpbuf), "irq %u%s", irq,
- (dev->irq ? "" : " UNAVAILABLE"));
- else
- snprintf(tmpbuf, sizeof(tmpbuf), "no irq");
- dev_info(dev->class_dev, "%s (pci %s) (%s) attached\n",
- dev->board_name, pci_name(pcidev), tmpbuf);
-}
-
/*
* Common part of attach and auto_attach.
*/
@@ -1399,8 +1381,7 @@ static int pci224_attach_common(struct comedi_device *dev,
}
}
- pci224_report_attach(dev, irq);
- return 1;
+ return 0;
}
static int pci224_attach(struct comedi_device *dev, struct comedi_devconfig *it)
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index c08dfbbe4062..99e60835dc4a 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -799,19 +799,29 @@ static void pci230_cancel_ct(struct comedi_device *dev, unsigned int ct)
/* Counter ct, 8254 mode 1, initial count not written. */
}
-/*
- * COMEDI_SUBD_AI instruction;
- */
+static int pci230_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + PCI230_ADCCON);
+ if ((status & PCI230_ADC_FIFO_EMPTY) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int pci230_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
struct pci230_private *devpriv = dev->private;
- unsigned int n, i;
+ unsigned int n;
unsigned int chan, range, aref;
unsigned int gainshift;
- unsigned int status;
unsigned short adccon, adcen;
+ int ret;
/* Unpack channel and range. */
chan = CR_CHAN(insn->chanspec);
@@ -883,18 +893,10 @@ static int pci230_ai_rinsn(struct comedi_device *dev,
i8254_set_mode(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, 2,
I8254_MODE1);
-#define TIMEOUT 100
/* wait for conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- status = inw(dev->iobase + PCI230_ADCCON);
- if (!(status & PCI230_ADC_FIFO_EMPTY))
- break;
- udelay(1);
- }
- if (i == TIMEOUT) {
- dev_err(dev->class_dev, "timeout\n");
- return -ETIMEDOUT;
- }
+ ret = comedi_timeout(dev, s, insn, pci230_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
data[n] = pci230_ai_read(dev);
@@ -2762,14 +2764,14 @@ static int pci230_attach_common(struct comedi_device *dev,
/* digital i/o subdevice */
if (thisboard->have_dio) {
rc = subdev_8255_init(dev, s, NULL,
- (devpriv->iobase1 + PCI230_PPI_X_BASE));
- if (rc < 0)
+ devpriv->iobase1 + PCI230_PPI_X_BASE);
+ if (rc)
return rc;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
- dev_info(dev->class_dev, "attached\n");
- return 1;
+
+ return 0;
}
static int pci230_attach(struct comedi_device *dev, struct comedi_devconfig *it)
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index be28e6cbea20..93ed03ee416a 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -84,8 +84,6 @@ static int pci263_auto_attach(struct comedi_device *dev,
/* read initial relay state */
s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
- dev_info(dev->class_dev, "%s (pci %s) attached\n", dev->board_name,
- pci_name(pci_dev));
return 0;
}
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index 5034f663eec9..e03dd6e71415 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -1,34 +1,33 @@
/*
- comedi/drivers/c6xdigio.c
-
- Hardware driver for Mechatronic Systems Inc. C6x_DIGIO DSP daughter card.
- (http://robot0.ge.uiuc.edu/~spong/mecha/)
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999 Dan Block
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * c6xdigio.c
+ * Hardware driver for Mechatronic Systems Inc. C6x_DIGIO DSP daughter card.
+ * http://web.archive.org/web/%2A/http://robot0.ge.uiuc.edu/~spong/mecha/
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1999 Dan Block
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
-/*
-Driver: c6xdigio
-Description: Mechatronic Systems Inc. C6x_DIGIO DSP daughter card
-Author: Dan Block
-Status: unknown
-Devices: [Mechatronic Systems Inc.] C6x_DIGIO DSP daughter card (c6xdigio)
-Updated: Sun Nov 20 20:18:34 EST 2005
-
-This driver will not work with a 2.4 kernel.
-http://robot0.ge.uiuc.edu/~spong/mecha/
-*/
+/*
+ * Driver: c6xdigio
+ * Description: Mechatronic Systems Inc. C6x_DIGIO DSP daughter card
+ * Author: Dan Block
+ * Status: unknown
+ * Devices: (Mechatronic Systems Inc.) C6x_DIGIO DSP daughter card [c6xdigio]
+ * Updated: Sun Nov 20 20:18:34 EST 2005
+ *
+ * Configuration Options:
+ * [0] - base address
+ */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -43,309 +42,194 @@ http://robot0.ge.uiuc.edu/~spong/mecha/
#include "../comedidev.h"
-static u8 ReadByteFromHwPort(unsigned long addr)
-{
- u8 result = inb(addr);
- return result;
-}
-
-static void WriteByteToHwPort(unsigned long addr, u8 val)
-{
- outb_p(val, addr);
-}
-
-#define C6XDIGIO_SIZE 3
-
/*
- * port offsets
+ * Register I/O map
*/
-#define C6XDIGIO_PARALLEL_DATA 0
-#define C6XDIGIO_PARALLEL_STATUS 1
-#define C6XDIGIO_PARALLEL_CONTROL 2
-struct pwmbitstype {
- unsigned sb0:2;
- unsigned sb1:2;
- unsigned sb2:2;
- unsigned sb3:2;
- unsigned sb4:2;
-};
-union pwmcmdtype {
- unsigned cmd; /* assuming here that int is 32bit */
- struct pwmbitstype bits;
-};
-struct encbitstype {
- unsigned sb0:3;
- unsigned sb1:3;
- unsigned sb2:3;
- unsigned sb3:3;
- unsigned sb4:3;
- unsigned sb5:3;
- unsigned sb6:3;
- unsigned sb7:3;
-};
-union encvaluetype {
- unsigned value;
- struct encbitstype bits;
-};
+#define C6XDIGIO_DATA_REG 0x00
+#define C6XDIGIO_DATA_CHAN(x) (((x) + 1) << 4)
+#define C6XDIGIO_DATA_PWM (1 << 5)
+#define C6XDIGIO_DATA_ENCODER (1 << 6)
+#define C6XDIGIO_STATUS_REG 0x01
+#define C6XDIGIO_CTRL_REG 0x02
#define C6XDIGIO_TIME_OUT 20
-static void C6X_pwmInit(unsigned long baseAddr)
+static int c6xdigio_chk_status(struct comedi_device *dev, unsigned long context)
{
+ unsigned int status;
int timeout = 0;
- WriteByteToHwPort(baseAddr, 0x70);
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0)
- && (timeout < C6XDIGIO_TIME_OUT)) {
+ do {
+ status = inb(dev->iobase + C6XDIGIO_STATUS_REG);
+ if ((status & 0x80) != context)
+ return 0;
timeout++;
- }
+ } while (timeout < C6XDIGIO_TIME_OUT);
- WriteByteToHwPort(baseAddr, 0x74);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
+ return -EBUSY;
+}
- WriteByteToHwPort(baseAddr, 0x70);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x0)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
+static int c6xdigio_write_data(struct comedi_device *dev,
+ unsigned int val, unsigned int status)
+{
+ outb_p(val, dev->iobase + C6XDIGIO_DATA_REG);
+ return c6xdigio_chk_status(dev, status);
+}
- WriteByteToHwPort(baseAddr, 0x0);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
+static int c6xdigio_get_encoder_bits(struct comedi_device *dev,
+ unsigned int *bits,
+ unsigned int cmd,
+ unsigned int status)
+{
+ unsigned int val;
+
+ val = inb(dev->iobase + C6XDIGIO_STATUS_REG);
+ val >>= 3;
+ val &= 0x07;
+
+ *bits = val;
+ return c6xdigio_write_data(dev, cmd, status);
}
-static void C6X_pwmOutput(unsigned long baseAddr, unsigned channel, int value)
+static void c6xdigio_pwm_write(struct comedi_device *dev,
+ unsigned int chan, unsigned int val)
{
- unsigned ppcmd;
- union pwmcmdtype pwm;
- int timeout = 0;
- unsigned tmp;
-
- pwm.cmd = value;
- if (pwm.cmd > 498)
- pwm.cmd = 498;
- if (pwm.cmd < 2)
- pwm.cmd = 2;
-
- if (channel == 0) {
- ppcmd = 0x28;
- } else { /* if channel == 1 */
- ppcmd = 0x30;
- } /* endif */
-
- WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb0);
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ unsigned int cmd = C6XDIGIO_DATA_PWM | C6XDIGIO_DATA_CHAN(chan);
+ unsigned int bits;
+
+ if (val > 498)
+ val = 498;
+ if (val < 2)
+ val = 2;
+
+ bits = (val >> 0) & 0x03;
+ c6xdigio_write_data(dev, cmd | bits | (0 << 2), 0x00);
+ bits = (val >> 2) & 0x03;
+ c6xdigio_write_data(dev, cmd | bits | (1 << 2), 0x80);
+ bits = (val >> 4) & 0x03;
+ c6xdigio_write_data(dev, cmd | bits | (0 << 2), 0x00);
+ bits = (val >> 6) & 0x03;
+ c6xdigio_write_data(dev, cmd | bits | (1 << 2), 0x80);
+ bits = (val >> 8) & 0x03;
+ c6xdigio_write_data(dev, cmd | bits | (0 << 2), 0x00);
+
+ c6xdigio_write_data(dev, 0x00, 0x80);
+}
- WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb1 + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+static int c6xdigio_encoder_read(struct comedi_device *dev,
+ unsigned int chan)
+{
+ unsigned int cmd = C6XDIGIO_DATA_ENCODER | C6XDIGIO_DATA_CHAN(chan);
+ unsigned int val = 0;
+ unsigned int bits;
- WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb2);
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_write_data(dev, cmd, 0x00);
- WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb3 + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
+ val |= (bits << 0);
- WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb4);
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
+ val |= (bits << 3);
- WriteByteToHwPort(baseAddr, 0x0);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
+ val |= (bits << 6);
-}
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
+ val |= (bits << 9);
-static int C6X_encInput(unsigned long baseAddr, unsigned channel)
-{
- unsigned ppcmd;
- union encvaluetype enc;
- int timeout = 0;
- int tmp;
-
- enc.value = 0;
- if (channel == 0)
- ppcmd = 0x48;
- else
- ppcmd = 0x50;
-
- WriteByteToHwPort(baseAddr, ppcmd);
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
+ val |= (bits << 12);
- enc.bits.sb0 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb1 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb2 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb3 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb4 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb5 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb6 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd + 0x4);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
- enc.bits.sb7 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
- WriteByteToHwPort(baseAddr, ppcmd);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x0) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
+ val |= (bits << 15);
- WriteByteToHwPort(baseAddr, 0x0);
- timeout = 0;
- tmp = ReadByteFromHwPort(baseAddr + 1);
- while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
- tmp = ReadByteFromHwPort(baseAddr + 1);
- timeout++;
- }
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (1 << 2), 0x80);
+ val |= (bits << 18);
+
+ c6xdigio_get_encoder_bits(dev, &bits, cmd | (0 << 2), 0x00);
+ val |= (bits << 21);
+
+ c6xdigio_write_data(dev, 0x00, 0x80);
- return enc.value ^ 0x800000;
+ return val;
}
-static void C6X_encResetAll(unsigned long baseAddr)
+static int c6xdigio_pwm_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned timeout = 0;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val = (s->state >> (16 * chan)) & 0xffff;
+ int i;
- WriteByteToHwPort(baseAddr, 0x68);
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
- WriteByteToHwPort(baseAddr, 0x6C);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
- WriteByteToHwPort(baseAddr, 0x68);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x0)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
- }
- WriteByteToHwPort(baseAddr, 0x0);
- timeout = 0;
- while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
- && (timeout < C6XDIGIO_TIME_OUT)) {
- timeout++;
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+ c6xdigio_pwm_write(dev, chan, val);
}
+
+ /*
+ * There are only 2 PWM channels and they have a maxdata of 500.
+ * Instead of allocating private data to save the values in for
+ * readback this driver just packs the values for the two channels
+ * in the s->state.
+ */
+ s->state &= (0xffff << (16 * chan));
+ s->state |= (val << (16 * chan));
+
+ return insn->n;
}
-static int c6xdigio_pwmo_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int c6xdigio_pwm_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
int i;
- int chan = CR_CHAN(insn->chanspec);
- for (i = 0; i < insn->n; i++) {
- C6X_pwmOutput(dev->iobase, chan, data[i]);
- /* devpriv->ao_readback[chan] = data[i]; */
- }
- return i;
+ val = (s->state >> (16 * chan)) & 0xffff;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = val;
+
+ return insn->n;
}
-static int c6xdigio_ei_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int c6xdigio_encoder_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- int chan = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+ int i;
- for (n = 0; n < insn->n; n++)
- data[n] = (C6X_encInput(dev->iobase, chan) & 0xffffff);
+ for (i = 0; i < insn->n; i++) {
+ val = c6xdigio_encoder_read(dev, chan);
+
+ /* munge two's complement value to offset binary */
+ data[i] = comedi_offset_munge(s, val);
+ }
- return n;
+ return insn->n;
}
-static void board_init(struct comedi_device *dev)
+static void c6xdigio_init(struct comedi_device *dev)
{
- C6X_pwmInit(dev->iobase);
- C6X_encResetAll(dev->iobase);
+ /* Initialize the PWM */
+ c6xdigio_write_data(dev, 0x70, 0x00);
+ c6xdigio_write_data(dev, 0x74, 0x80);
+ c6xdigio_write_data(dev, 0x70, 0x00);
+ c6xdigio_write_data(dev, 0x00, 0x80);
+
+ /* Reset the encoders */
+ c6xdigio_write_data(dev, 0x68, 0x00);
+ c6xdigio_write_data(dev, 0x6c, 0x80);
+ c6xdigio_write_data(dev, 0x68, 0x00);
+ c6xdigio_write_data(dev, 0x00, 0x80);
}
static const struct pnp_device_id c6xdigio_pnp_tbl[] = {
@@ -367,7 +251,7 @@ static int c6xdigio_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- ret = comedi_request_region(dev, it->options[0], C6XDIGIO_SIZE);
+ ret = comedi_request_region(dev, it->options[0], 0x03);
if (ret)
return ret;
@@ -380,27 +264,26 @@ static int c6xdigio_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
/* pwm output subdevice */
- s->type = COMEDI_SUBD_AO; /* Not sure what to put here */
- s->subdev_flags = SDF_WRITEABLE;
- s->n_chan = 2;
- /* s->trig[0] = c6xdigio_pwmo; */
- s->insn_write = c6xdigio_pwmo_insn_write;
- s->maxdata = 500;
- s->range_table = &range_bipolar10; /* A suitable lie */
+ s->type = COMEDI_SUBD_PWM;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 2;
+ s->maxdata = 500;
+ s->range_table = &range_unknown;
+ s->insn_write = c6xdigio_pwm_insn_write;
+ s->insn_read = c6xdigio_pwm_insn_read;
s = &dev->subdevices[1];
/* encoder (counter) subdevice */
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
- s->n_chan = 2;
- /* s->trig[0] = c6xdigio_ei; */
- s->insn_read = c6xdigio_ei_insn_read;
- s->maxdata = 0xffffff;
- s->range_table = &range_unknown;
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
+ s->n_chan = 2;
+ s->maxdata = 0xffffff;
+ s->range_table = &range_unknown;
+ s->insn_read = c6xdigio_encoder_insn_read;
/* I will call this init anyway but more than likely the DSP board */
/* will not be connected when device driver is loaded. */
- board_init(dev);
+ c6xdigio_init(dev);
return 0;
}
@@ -420,5 +303,5 @@ static struct comedi_driver c6xdigio_driver = {
module_comedi_driver(c6xdigio_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for the C6x_DIGIO DSP daughter card");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 64d5f291553f..645fcb0cf17d 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -95,10 +95,17 @@ static const struct comedi_lrange das16cs_ai_range = {
}
};
-static irqreturn_t das16cs_interrupt(int irq, void *d)
+static int das16cs_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- /* struct comedi_device *dev = d; */
- return IRQ_HANDLED;
+ unsigned int status;
+
+ status = inw(dev->iobase + DAS16CS_MISC1);
+ if (status & 0x0080)
+ return 0;
+ return -EBUSY;
}
static int das16cs_ai_rinsn(struct comedi_device *dev,
@@ -109,8 +116,8 @@ static int das16cs_ai_rinsn(struct comedi_device *dev,
int chan = CR_CHAN(insn->chanspec);
int range = CR_RANGE(insn->chanspec);
int aref = CR_AREF(insn->chanspec);
+ int ret;
int i;
- int to;
outw(chan, dev->iobase + DAS16CS_DIO_MUX);
@@ -138,132 +145,16 @@ static int das16cs_ai_rinsn(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
outw(0, dev->iobase + DAS16CS_ADC_DATA);
-#define TIMEOUT 1000
- for (to = 0; to < TIMEOUT; to++) {
- if (inw(dev->iobase + DAS16CS_MISC1) & 0x0080)
- break;
- }
- if (to == TIMEOUT) {
- dev_dbg(dev->class_dev, "cb_das16_cs: ai timeout\n");
- return -ETIME;
- }
+ ret = comedi_timeout(dev, s, insn, das16cs_ai_eoc, 0);
+ if (ret)
+ return ret;
+
data[i] = inw(dev->iobase + DAS16CS_ADC_DATA);
}
return i;
}
-static int das16cs_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- return -EINVAL;
-}
-
-static int das16cs_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
- int tmp;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_EXT);
- err |= cfc_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER | TRIG_EXT);
- err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
- err |= cfc_check_trigger_is_unique(cmd->convert_src);
- err |= cfc_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
-
-#define MAX_SPEED 10000 /* in nanoseconds */
-#define MIN_SPEED 1000000000 /* in nanoseconds */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
- MAX_SPEED);
- err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg,
- MIN_SPEED);
- } else {
- /* external trigger */
- /* should be level/edge, hi/lo specification here */
- /* should specify multiple external triggers */
- err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 9);
- }
- if (cmd->convert_src == TRIG_TIMER) {
- err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
- MAX_SPEED);
- err |= cfc_check_trigger_arg_max(&cmd->convert_arg,
- MIN_SPEED);
- } else {
- /* external trigger */
- /* see above */
- err |= cfc_check_trigger_arg_max(&cmd->convert_arg, 9);
- }
-
- err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= cfc_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff);
- else /* TRIG_NONE */
- err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- unsigned int div1 = 0, div2 = 0;
-
- tmp = cmd->scan_begin_arg;
- i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
- &div1, &div2,
- &cmd->scan_begin_arg, cmd->flags);
- if (tmp != cmd->scan_begin_arg)
- err++;
- }
- if (cmd->convert_src == TRIG_TIMER) {
- unsigned int div1 = 0, div2 = 0;
-
- tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
- &div1, &div2,
- &cmd->scan_begin_arg, cmd->flags);
- if (tmp != cmd->convert_arg)
- err++;
- if (cmd->scan_begin_src == TRIG_TIMER &&
- cmd->scan_begin_arg <
- cmd->convert_arg * cmd->scan_end_arg) {
- cmd->scan_begin_arg =
- cmd->convert_arg * cmd->scan_end_arg;
- err++;
- }
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
static int das16cs_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -401,10 +292,6 @@ static int das16cs_auto_attach(struct comedi_device *dev,
dev->iobase = link->resource[0]->start;
link->priv = dev;
- ret = pcmcia_request_irq(link, das16cs_interrupt);
- if (ret)
- return ret;
- dev->irq = link->irq;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -415,7 +302,6 @@ static int das16cs_auto_attach(struct comedi_device *dev,
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
/* analog input subdevice */
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ;
@@ -424,8 +310,6 @@ static int das16cs_auto_attach(struct comedi_device *dev,
s->range_table = &das16cs_ai_range;
s->len_chanlist = 16;
s->insn_read = das16cs_ai_rinsn;
- s->do_cmd = das16cs_ai_cmd;
- s->do_cmdtest = das16cs_ai_cmdtest;
s = &dev->subdevices[1];
/* analog output subdevice */
@@ -451,10 +335,6 @@ static int das16cs_auto_attach(struct comedi_device *dev,
s->insn_bits = das16cs_dio_insn_bits;
s->insn_config = das16cs_dio_insn_config;
- dev_info(dev->class_dev, "%s: %s, I/O base=0x%04lx, irq=%u\n",
- dev->driver->driver_name, dev->board_name,
- dev->iobase, dev->irq);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 9819be092f8d..83a265f3408c 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -376,6 +376,20 @@ static inline unsigned int cal_enable_bits(struct comedi_device *dev)
return CAL_EN_BIT | CAL_SRC_BITS(devpriv->calibration_source);
}
+static int cb_pcidas_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct cb_pcidas_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = inw(devpriv->control_status + ADCMUX_CONT);
+ if (status & EOC)
+ return 0;
+ return -EBUSY;
+}
+
static int cb_pcidas_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -385,7 +399,8 @@ static int cb_pcidas_ai_rinsn(struct comedi_device *dev,
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int aref = CR_AREF(insn->chanspec);
unsigned int bits;
- int n, i;
+ int ret;
+ int n;
/* enable calibration input if appropriate */
if (insn->chanspec & CR_ALT_SOURCE) {
@@ -415,13 +430,9 @@ static int cb_pcidas_ai_rinsn(struct comedi_device *dev,
outw(0, devpriv->adc_fifo + ADCDATA);
/* wait for conversion to end */
- /* return -ETIMEDOUT if there is a timeout */
- for (i = 0; i < 10000; i++) {
- if (inw(devpriv->control_status + ADCMUX_CONT) & EOC)
- break;
- }
- if (i == 10000)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, cb_pcidas_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
data[n] = inw(devpriv->adc_fifo + ADCDATA);
@@ -1006,9 +1017,9 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
/* set start trigger and burst mode */
bits = 0;
- if (cmd->start_src == TRIG_NOW)
+ if (cmd->start_src == TRIG_NOW) {
bits |= SW_TRIGGER;
- else if (cmd->start_src == TRIG_EXT) {
+ } else { /* TRIG_EXT */
bits |= EXT_TRIGGER | TGEN | XTRCL;
if (thisboard->is_1602) {
if (cmd->start_arg & CR_INVERT)
@@ -1016,9 +1027,6 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
if (cmd->start_arg & CR_EDGE)
bits |= TGSEL;
}
- } else {
- comedi_error(dev, "bug!");
- return -1;
}
if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1)
bits |= BURSTE;
@@ -1269,8 +1277,6 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
unsigned int num_points;
unsigned long flags;
- async->events = 0;
-
if (status & DAEMI) {
/* clear dac empty interrupt latch */
spin_lock_irqsave(&dev->spinlock, flags);
@@ -1282,7 +1288,6 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
(cmd->stop_src == TRIG_COUNT
&& devpriv->ao_count)) {
comedi_error(dev, "dac fifo underflow");
- cb_pcidas_ao_cancel(dev, s);
async->events |= COMEDI_CB_ERROR;
}
async->events |= COMEDI_CB_EOA;
@@ -1312,7 +1317,7 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
spin_unlock_irqrestore(&dev->spinlock, flags);
}
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
@@ -1332,7 +1337,6 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
return IRQ_NONE;
async = s->async;
- async->events = 0;
s5933_status = inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR);
@@ -1364,10 +1368,8 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
cfc_write_array_to_buffer(s, devpriv->ai_buffer,
num_samples * sizeof(short));
devpriv->count -= num_samples;
- if (async->cmd.stop_src == TRIG_COUNT && devpriv->count == 0) {
+ if (async->cmd.stop_src == TRIG_COUNT && devpriv->count == 0)
async->events |= COMEDI_CB_EOA;
- cb_pcidas_cancel(dev, s);
- }
/* clear half-full interrupt latch */
spin_lock_irqsave(&dev->spinlock, flags);
outw(devpriv->adc_fifo_bits | INT,
@@ -1384,7 +1386,6 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
if (async->cmd.stop_src == TRIG_COUNT &&
--devpriv->count == 0) {
/* end of acquisition */
- cb_pcidas_cancel(dev, s);
async->events |= COMEDI_CB_EOA;
break;
}
@@ -1411,11 +1412,10 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
outw(devpriv->adc_fifo_bits | LADFUL,
devpriv->control_status + INT_ADCFIFO);
spin_unlock_irqrestore(&dev->spinlock, flags);
- cb_pcidas_cancel(dev, s);
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
}
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -1576,9 +1576,6 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
outl(devpriv->s5933_intcsr_bits | INTCSR_INBOX_INTR_STATUS,
devpriv->s5933_config + AMCC_OP_REG_INTCSR);
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 4fff1738e3f8..f9afcbe1da54 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1664,15 +1664,36 @@ static void i2c_write(struct comedi_device *dev, unsigned int address,
i2c_stop(dev);
}
+static int cb_pcidas64_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ const struct pcidas64_board *thisboard = comedi_board(dev);
+ struct pcidas64_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readw(devpriv->main_iobase + HW_STATUS_REG);
+ if (thisboard->layout == LAYOUT_4020) {
+ status = readw(devpriv->main_iobase + ADC_WRITE_PNTR_REG);
+ if (status)
+ return 0;
+ } else {
+ if (pipe_full_bits(status))
+ return 0;
+ }
+ return -EBUSY;
+}
+
static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
const struct pcidas64_board *thisboard = comedi_board(dev);
struct pcidas64_private *devpriv = dev->private;
- unsigned int bits = 0, n, i;
+ unsigned int bits = 0, n;
unsigned int channel, range, aref;
unsigned long flags;
- static const int timeout = 100;
+ int ret;
channel = CR_CHAN(insn->chanspec);
range = CR_RANGE(insn->chanspec);
@@ -1770,23 +1791,10 @@ static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
devpriv->main_iobase + ADC_CONVERT_REG);
/* wait for data */
- for (i = 0; i < timeout; i++) {
- bits = readw(devpriv->main_iobase + HW_STATUS_REG);
- if (thisboard->layout == LAYOUT_4020) {
- if (readw(devpriv->main_iobase +
- ADC_WRITE_PNTR_REG))
- break;
- } else {
- if (pipe_full_bits(bits))
- break;
- }
- udelay(1);
- }
- if (i == timeout) {
- comedi_error(dev, " analog input read insn timed out");
- dev_info(dev->class_dev, "status 0x%x\n", bits);
- return -ETIME;
- }
+ ret = comedi_timeout(dev, s, insn, cb_pcidas64_ai_eoc, 0);
+ if (ret)
+ return ret;
+
if (thisboard->layout == LAYOUT_4020)
data[n] = readl(devpriv->dio_counter_iobase +
ADC_FIFO_REG) & 0xffff;
@@ -3816,16 +3824,19 @@ static int setup_subdevices(struct comedi_device *dev)
if (thisboard->has_8255) {
if (thisboard->layout == LAYOUT_4020) {
dio_8255_iobase = devpriv->main_iobase + I8255_4020_REG;
- subdev_8255_init(dev, s, dio_callback_4020,
- (unsigned long)dio_8255_iobase);
+ ret = subdev_8255_init(dev, s, dio_callback_4020,
+ (unsigned long)dio_8255_iobase);
} else {
dio_8255_iobase =
devpriv->dio_counter_iobase + DIO_8255_OFFSET;
- subdev_8255_init(dev, s, dio_callback,
- (unsigned long)dio_8255_iobase);
+ ret = subdev_8255_init(dev, s, dio_callback,
+ (unsigned long)dio_8255_iobase);
}
- } else
+ if (ret)
+ return ret;
+ } else {
s->type = COMEDI_SUBD_UNUSED;
+ }
/* 8 channel dio for 60xx */
s = &dev->subdevices[5];
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index 8cca0518cfda..901dc5d1bb72 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -388,8 +388,6 @@ static int cb_pcidda_auto_attach(struct comedi_device *dev,
for (i = 0; i < thisboard->ao_chans; i++)
cb_pcidda_calibrate(dev, i, devpriv->ao_range[i]);
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 57295d189ff6..d3141c865fe7 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -85,21 +85,31 @@ struct cb_pcimdas_private {
unsigned int ao_readback[2];
};
-/*
- * "instructions" read/write data in "one-shot" or "software-triggered"
- * mode.
- */
+static int cb_pcimdas_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct cb_pcimdas_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = inb(devpriv->BADR3 + 2);
+ if ((status & 0x80) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int cb_pcimdas_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct cb_pcimdas_private *devpriv = dev->private;
- int n, i;
+ int n;
unsigned int d;
- unsigned int busy;
int chan = CR_CHAN(insn->chanspec);
unsigned short chanlims;
int maxchans;
+ int ret;
/* only support sw initiated reads from a single channel */
@@ -133,17 +143,10 @@ static int cb_pcimdas_ai_rinsn(struct comedi_device *dev,
/* trigger conversion */
outw(0, dev->iobase + 0);
-#define TIMEOUT 1000 /* typically takes 5 loops on a lightly loaded Pentium 100MHz, */
- /* this is likely to be 100 loops on a 2GHz machine, so set 1000 as the limit. */
-
/* wait for conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- busy = inb(devpriv->BADR3 + 2) & 0x80;
- if (!busy)
- break;
- }
- if (i == TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, cb_pcimdas_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
data[n] = inw(dev->iobase + 0);
@@ -247,9 +250,9 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[2];
/* digital i/o subdevice */
- subdev_8255_init(dev, s, NULL, iobase_8255);
-
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
+ ret = subdev_8255_init(dev, s, NULL, iobase_8255);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index 43a86630a66e..4a2b200de01b 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -187,9 +187,7 @@ static int cb_pcimdda_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
- return 1;
+ return 0;
}
static struct comedi_driver cb_pcimdda_driver = {
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 51a59e5b8ec5..8450c99af8b0 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -211,7 +211,7 @@ static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
return -EINVAL;
}
- snprintf(file, sizeof(file), "/dev/comedi%u", minor);
+ snprintf(file, sizeof(file), "/dev/comedi%d", minor);
file[sizeof(file) - 1] = 0;
d = comedi_open(file);
@@ -254,6 +254,7 @@ static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devs) {
dev_err(dev->class_dev,
"Could not allocate memory. Out of memory?\n");
+ kfree(bdev);
return -ENOMEM;
}
devpriv->devs = devs;
@@ -263,7 +264,7 @@ static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
char buf[20];
int left =
MAX_BOARD_NAME - strlen(devpriv->name) - 1;
- snprintf(buf, sizeof(buf), "%d:%d ",
+ snprintf(buf, sizeof(buf), "%u:%u ",
bdev->minor, bdev->subdev);
buf[sizeof(buf) - 1] = 0;
strncat(devpriv->name, buf, left);
diff --git a/drivers/staging/comedi/drivers/comedi_fc.c b/drivers/staging/comedi/drivers/comedi_fc.c
index 26d9dbcf8bd4..9d9b1469e89a 100644
--- a/drivers/staging/comedi/drivers/comedi_fc.c
+++ b/drivers/staging/comedi/drivers/comedi_fc.c
@@ -1,34 +1,53 @@
/*
- comedi/drivers/comedi_fc.c
-
- This is a place for code driver writers wish to share between
- two or more drivers. fc is short
- for frank-common.
-
- Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- Copyright (C) 2002 Frank Mori Hess
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi_fc.c
+ * This is a place for code driver writers wish to share between
+ * two or more drivers. fc is short for frank-common.
+ *
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2002 Frank Mori Hess
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#include <linux/module.h>
#include "../comedidev.h"
#include "comedi_fc.h"
-static void increment_scan_progress(struct comedi_subdevice *subd,
- unsigned int num_bytes)
+unsigned int cfc_bytes_per_scan(struct comedi_subdevice *s)
+{
+ unsigned int chanlist_len = s->async->cmd.chanlist_len;
+ unsigned int num_samples;
+ unsigned int bits_per_sample;
+
+ switch (s->type) {
+ case COMEDI_SUBD_DI:
+ case COMEDI_SUBD_DO:
+ case COMEDI_SUBD_DIO:
+ bits_per_sample = 8 * bytes_per_sample(s);
+ num_samples = (chanlist_len + bits_per_sample - 1) /
+ bits_per_sample;
+ break;
+ default:
+ num_samples = chanlist_len;
+ break;
+ }
+ return num_samples * bytes_per_sample(s);
+}
+EXPORT_SYMBOL_GPL(cfc_bytes_per_scan);
+
+void cfc_inc_scan_progress(struct comedi_subdevice *s, unsigned int num_bytes)
{
- struct comedi_async *async = subd->async;
- unsigned int scan_length = cfc_bytes_per_scan(subd);
+ struct comedi_async *async = s->async;
+ unsigned int scan_length = cfc_bytes_per_scan(s);
async->scan_progress += num_bytes;
if (async->scan_progress >= scan_length) {
@@ -36,12 +55,13 @@ static void increment_scan_progress(struct comedi_subdevice *subd,
async->events |= COMEDI_CB_EOS;
}
}
+EXPORT_SYMBOL_GPL(cfc_inc_scan_progress);
/* Writes an array of data points to comedi's buffer */
-unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *subd,
+unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *s,
void *data, unsigned int num_bytes)
{
- struct comedi_async *async = subd->async;
+ struct comedi_async *async = s->async;
unsigned int retval;
if (num_bytes == 0)
@@ -49,24 +69,24 @@ unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *subd,
retval = comedi_buf_write_alloc(async, num_bytes);
if (retval != num_bytes) {
- dev_warn(subd->device->class_dev, "comedi: buffer overrun\n");
+ dev_warn(s->device->class_dev, "buffer overrun\n");
async->events |= COMEDI_CB_OVERFLOW;
return 0;
}
comedi_buf_memcpy_to(async, 0, data, num_bytes);
comedi_buf_write_free(async, num_bytes);
- increment_scan_progress(subd, num_bytes);
+ cfc_inc_scan_progress(s, num_bytes);
async->events |= COMEDI_CB_BLOCK;
return num_bytes;
}
EXPORT_SYMBOL_GPL(cfc_write_array_to_buffer);
-unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *subd,
+unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *s,
void *data, unsigned int num_bytes)
{
- struct comedi_async *async = subd->async;
+ struct comedi_async *async = s->async;
if (num_bytes == 0)
return 0;
@@ -74,7 +94,7 @@ unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *subd,
num_bytes = comedi_buf_read_alloc(async, num_bytes);
comedi_buf_memcpy_from(async, 0, data, num_bytes);
comedi_buf_read_free(async, num_bytes);
- increment_scan_progress(subd, num_bytes);
+ cfc_inc_scan_progress(s, num_bytes);
async->events |= COMEDI_CB_BLOCK;
return num_bytes;
@@ -82,34 +102,33 @@ unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *subd,
EXPORT_SYMBOL_GPL(cfc_read_array_from_buffer);
unsigned int cfc_handle_events(struct comedi_device *dev,
- struct comedi_subdevice *subd)
+ struct comedi_subdevice *s)
{
- unsigned int events = subd->async->events;
+ unsigned int events = s->async->events;
if (events == 0)
return events;
if (events & (COMEDI_CB_EOA | COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW))
- subd->cancel(dev, subd);
+ s->cancel(dev, s);
- comedi_event(dev, subd);
+ comedi_event(dev, s);
return events;
}
EXPORT_SYMBOL_GPL(cfc_handle_events);
-MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
-MODULE_DESCRIPTION("Shared functions for Comedi low-level drivers");
-MODULE_LICENSE("GPL");
-
static int __init comedi_fc_init_module(void)
{
return 0;
}
+module_init(comedi_fc_init_module);
static void __exit comedi_fc_cleanup_module(void)
{
}
-
-module_init(comedi_fc_init_module);
module_exit(comedi_fc_cleanup_module);
+
+MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
+MODULE_DESCRIPTION("Shared functions for Comedi low-level drivers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_fc.h b/drivers/staging/comedi/drivers/comedi_fc.h
index 8558b07f8df3..541b9371d3da 100644
--- a/drivers/staging/comedi/drivers/comedi_fc.h
+++ b/drivers/staging/comedi/drivers/comedi_fc.h
@@ -1,72 +1,52 @@
/*
- comedi_fc.h
-
- This is a place for code driver writers wish to share between
- two or more drivers. These functions are meant to be used only
- by drivers, they are NOT part of the kcomedilib API!
-
- Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- Copyright (C) 2002 Frank Mori Hess
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi_fc.h
+ * This is a place for code driver writers wish to share between
+ * two or more drivers. These functions are meant to be used only
+ * by drivers, they are NOT part of the kcomedilib API!
+ *
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2002 Frank Mori Hess
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _COMEDI_FC_H
#define _COMEDI_FC_H
#include "../comedidev.h"
+unsigned int cfc_bytes_per_scan(struct comedi_subdevice *);
+void cfc_inc_scan_progress(struct comedi_subdevice *, unsigned int num_bytes);
+
/* Writes an array of data points to comedi's buffer */
-extern unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *subd,
- void *data,
- unsigned int num_bytes);
+unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *,
+ void *data, unsigned int num_bytes);
-static inline unsigned int cfc_write_to_buffer(struct comedi_subdevice *subd,
+static inline unsigned int cfc_write_to_buffer(struct comedi_subdevice *s,
unsigned short data)
{
- return cfc_write_array_to_buffer(subd, &data, sizeof(data));
+ return cfc_write_array_to_buffer(s, &data, sizeof(data));
};
-static inline unsigned int cfc_write_long_to_buffer(struct comedi_subdevice
- *subd, unsigned int data)
+static inline unsigned int cfc_write_long_to_buffer(struct comedi_subdevice *s,
+ unsigned int data)
{
- return cfc_write_array_to_buffer(subd, &data, sizeof(data));
+ return cfc_write_array_to_buffer(s, &data, sizeof(data));
};
-extern unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *subd,
- void *data,
- unsigned int num_bytes);
+unsigned int cfc_read_array_from_buffer(struct comedi_subdevice *,
+ void *data, unsigned int num_bytes);
-extern unsigned int cfc_handle_events(struct comedi_device *dev,
- struct comedi_subdevice *subd);
-
-static inline unsigned int cfc_bytes_per_scan(struct comedi_subdevice *subd)
-{
- int num_samples;
- int bits_per_sample;
-
- switch (subd->type) {
- case COMEDI_SUBD_DI:
- case COMEDI_SUBD_DO:
- case COMEDI_SUBD_DIO:
- bits_per_sample = 8 * bytes_per_sample(subd);
- num_samples = (subd->async->cmd.chanlist_len +
- bits_per_sample - 1) / bits_per_sample;
- break;
- default:
- num_samples = subd->async->cmd.chanlist_len;
- break;
- }
- return num_samples * bytes_per_sample(subd);
-}
+unsigned int cfc_handle_events(struct comedi_device *,
+ struct comedi_subdevice *);
/**
* cfc_check_trigger_src() - trivially validate a comedi_cmd trigger source
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index d539eaf53b63..cd9562556d2c 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -185,7 +185,6 @@ static void waveform_ai_interrupt(unsigned long arg)
(devpriv->usec_remainder + elapsed_time) / devpriv->scan_period;
devpriv->usec_remainder =
(devpriv->usec_remainder + elapsed_time) % devpriv->scan_period;
- async->events = 0;
if (cmd->stop_src == TRIG_COUNT) {
unsigned int remaining = cmd->stop_arg - devpriv->ai_count;
@@ -318,12 +317,8 @@ static int waveform_ai_cmd(struct comedi_device *dev,
if (cmd->convert_src == TRIG_NOW)
devpriv->convert_period = 0;
- else if (cmd->convert_src == TRIG_TIMER)
+ else /* TRIG_TIMER */
devpriv->convert_period = cmd->convert_arg / nano_per_micro;
- else {
- comedi_error(dev, "bug setting conversion period");
- return -1;
- }
do_gettimeofday(&devpriv->last);
devpriv->usec_current = devpriv->last.tv_usec % devpriv->usec_period;
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 323a7f39cd97..0a9c32e9db4a 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -92,8 +92,6 @@ static int contec_auto_attach(struct comedi_device *dev,
s->range_table = &range_digital;
s->insn_bits = contec_do_insn_bits;
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/dac02.c b/drivers/staging/comedi/drivers/dac02.c
new file mode 100644
index 000000000000..df46e0a5bade
--- /dev/null
+++ b/drivers/staging/comedi/drivers/dac02.c
@@ -0,0 +1,172 @@
+/*
+ * dac02.c
+ * Comedi driver for DAC02 compatible boards
+ * Copyright (C) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * Based on the poc driver
+ * Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2001 David A. Schleef <ds@schleef.org>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Driver: dac02
+ * Description: Comedi driver for DAC02 compatible boards
+ * Devices: (Keithley Metrabyte) DAC-02 [dac02]
+ * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
+ * Updated: Tue, 11 Mar 2014 11:27:19 -0700
+ * Status: unknown
+ *
+ * Configuration options:
+ * [0] - I/O port base
+ */
+
+#include <linux/module.h>
+
+#include "../comedidev.h"
+
+/*
+ * The output range is selected by jumpering pins on the I/O connector.
+ *
+ * Range Chan # Jumper pins Output
+ * ------------- ------ ------------- -----------------
+ * 0 to 5V 0 21 to 22 24
+ * 1 15 to 16 18
+ * 0 to 10V 0 20 to 22 24
+ * 1 14 to 16 18
+ * +/-5V 0 21 to 22 23
+ * 1 15 to 16 17
+ * +/-10V 0 20 to 22 23
+ * 1 14 to 16 17
+ * 4 to 20mA 0 21 to 22 25
+ * 1 15 to 16 19
+ * AC reference 0 In on pin 22 24 (2-quadrant)
+ * In on pin 22 23 (4-quadrant)
+ * 1 In on pin 16 18 (2-quadrant)
+ * In on pin 16 17 (4-quadrant)
+ */
+static const struct comedi_lrange das02_ao_ranges = {
+ 6, {
+ UNI_RANGE(5),
+ UNI_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ RANGE_mA(4, 20),
+ RANGE_ext(0, 1)
+ }
+};
+
+struct dac02_private {
+ unsigned int ao_readback[2];
+};
+
+/*
+ * Register I/O map
+ */
+#define DAC02_AO_LSB(x) (0x00 + ((x) * 2))
+#define DAC02_AO_MSB(x) (0x01 + ((x) * 2))
+
+static int dac02_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct dac02_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val;
+ int i;
+
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+
+ devpriv->ao_readback[chan] = val;
+
+ /*
+ * Unipolar outputs are true binary encoding.
+ * Bipolar outputs are complementary offset binary
+ * (that is, 0 = +full scale, maxdata = -full scale).
+ */
+ if (comedi_range_is_bipolar(s, range))
+ val = s->maxdata - val;
+
+ /*
+ * DACs are double-buffered.
+ * Write LSB then MSB to latch output.
+ */
+ outb((val << 4) & 0xf0, dev->iobase + DAC02_AO_LSB(chan));
+ outb((val >> 4) & 0xff, dev->iobase + DAC02_AO_MSB(chan));
+ }
+
+ return insn->n;
+}
+
+static int dac02_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct dac02_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
+
+ return insn->n;
+}
+
+static int dac02_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+{
+ struct dac02_private *devpriv;
+ struct comedi_subdevice *s;
+ int ret;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
+
+ ret = comedi_request_region(dev, it->options[0], 0x08);
+ if (ret)
+ return ret;
+
+ ret = comedi_alloc_subdevices(dev, 1);
+ if (ret)
+ return ret;
+
+ /* Analog Output subdevice */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 2;
+ s->maxdata = 0x0fff;
+ s->range_table = &das02_ao_ranges;
+ s->insn_write = dac02_ao_insn_write;
+ s->insn_read = dac02_ao_insn_read;
+
+ return 0;
+}
+
+static struct comedi_driver dac02_driver = {
+ .driver_name = "dac02",
+ .module = THIS_MODULE,
+ .attach = dac02_attach,
+ .detach = comedi_legacy_detach,
+};
+module_comedi_driver(dac02_driver);
+
+MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_DESCRIPTION("Comedi driver for DAC02 compatible boards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index ce153fcb8b2a..a8f6036ad82b 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -333,14 +333,28 @@ static void setup_sampling(struct comedi_device *dev, int chan, int gain)
writeAcqScanListEntry(dev, word3);
}
+static int daqboard2000_ai_status(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct daqboard2000_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readw(devpriv->daq + acqControl);
+ if (status & context)
+ return 0;
+ return -EBUSY;
+}
+
static int daqboard2000_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct daqboard2000_private *devpriv = dev->private;
- unsigned int val;
- int gain, chan, timeout;
+ int gain, chan;
+ int ret;
int i;
writew(DAQBOARD2000_AcqResetScanListFifo |
@@ -367,25 +381,24 @@ static int daqboard2000_ai_insn_read(struct comedi_device *dev,
/* Enable reading from the scanlist FIFO */
writew(DAQBOARD2000_SeqStartScanList,
devpriv->daq + acqControl);
- for (timeout = 0; timeout < 20; timeout++) {
- val = readw(devpriv->daq + acqControl);
- if (val & DAQBOARD2000_AcqConfigPipeFull)
- break;
- /* udelay(2); */
- }
+
+ ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
+ DAQBOARD2000_AcqConfigPipeFull);
+ if (ret)
+ return ret;
+
writew(DAQBOARD2000_AdcPacerEnable, devpriv->daq + acqControl);
- for (timeout = 0; timeout < 20; timeout++) {
- val = readw(devpriv->daq + acqControl);
- if (val & DAQBOARD2000_AcqLogicScanning)
- break;
- /* udelay(2); */
- }
- for (timeout = 0; timeout < 20; timeout++) {
- val = readw(devpriv->daq + acqControl);
- if (val & DAQBOARD2000_AcqResultsFIFOHasValidData)
- break;
- /* udelay(2); */
- }
+
+ ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
+ DAQBOARD2000_AcqLogicScanning);
+ if (ret)
+ return ret;
+
+ ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
+ DAQBOARD2000_AcqResultsFIFOHasValidData);
+ if (ret)
+ return ret;
+
data[i] = readw(devpriv->daq + acqResultsFIFO);
writew(DAQBOARD2000_AdcPacerDisable, devpriv->daq + acqControl);
writew(DAQBOARD2000_SeqStopScanList, devpriv->daq + acqControl);
@@ -409,6 +422,21 @@ static int daqboard2000_ao_insn_read(struct comedi_device *dev,
return i;
}
+static int daqboard2000_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct daqboard2000_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int status;
+
+ status = readw(devpriv->daq + dacControl);
+ if ((status & ((chan + 1) * 0x0010)) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int daqboard2000_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -416,8 +444,7 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
{
struct daqboard2000_private *devpriv = dev->private;
int chan = CR_CHAN(insn->chanspec);
- unsigned int val;
- int timeout;
+ int ret;
int i;
for (i = 0; i < insn->n; i++) {
@@ -431,12 +458,11 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
udelay(1000);
#endif
writew(data[i], devpriv->daq + dacSetting(chan));
- for (timeout = 0; timeout < 20; timeout++) {
- val = readw(devpriv->daq + dacControl);
- if ((val & ((chan + 1) * 0x0010)) == 0)
- break;
- /* udelay(2); */
- }
+
+ ret = comedi_timeout(dev, s, insn, daqboard2000_ao_eoc, 0);
+ if (ret)
+ return ret;
+
devpriv->ao_readback[chan] = data[i];
#if 0
/*
@@ -737,9 +763,6 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
if (result)
return result;
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index e5c0ee9a09c2..c5e352fb5555 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -201,17 +201,29 @@ static const int *const das08_gainlists[] = {
das08_pgm_gainlist,
};
-#define TIMEOUT 100000
+static int das08_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + DAS08_STATUS);
+ if ((status & DAS08_EOC) == 0)
+ return 0;
+ return -EBUSY;
+}
static int das08_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
const struct das08_board_struct *thisboard = comedi_board(dev);
struct das08_private_struct *devpriv = dev->private;
- int i, n;
+ int n;
int chan;
int range;
int lsb, msb;
+ int ret;
chan = CR_CHAN(insn->chanspec);
range = CR_RANGE(insn->chanspec);
@@ -244,14 +256,10 @@ static int das08_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
/* trigger conversion */
outb_p(0, dev->iobase + DAS08_TRIG_12BIT);
- for (i = 0; i < TIMEOUT; i++) {
- if (!(inb(dev->iobase + DAS08_STATUS) & DAS08_EOC))
- break;
- }
- if (i == TIMEOUT) {
- dev_err(dev->class_dev, "timeout\n");
- return -ETIME;
- }
+ ret = comedi_timeout(dev, s, insn, das08_ai_eoc, 0);
+ if (ret)
+ return ret;
+
msb = inb(dev->iobase + DAS08_MSB);
lsb = inb(dev->iobase + DAS08_LSB);
if (thisboard->ai_encoding == das08_encode12) {
@@ -529,9 +537,10 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
s = &dev->subdevices[4];
/* 8255 */
if (thisboard->i8255_offset != 0) {
- subdev_8255_init(dev, s, NULL, (unsigned long)(dev->iobase +
- thisboard->
- i8255_offset));
+ ret = subdev_8255_init(dev, s, NULL,
+ dev->iobase + thisboard->i8255_offset);
+ if (ret)
+ return ret;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index a8446ca04110..6a7d652ff564 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -856,18 +856,17 @@ static void das16_ai_munge(struct comedi_device *dev,
}
}
-static int das16_ai_wait_for_conv(struct comedi_device *dev,
- unsigned int timeout)
+static int das16_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
unsigned int status;
- int i;
- for (i = 0; i < timeout; i++) {
- status = inb(dev->iobase + DAS16_STATUS_REG);
- if (!(status & DAS16_STATUS_BUSY))
- return 0;
- }
- return -ETIME;
+ status = inb(dev->iobase + DAS16_STATUS_REG);
+ if ((status & DAS16_STATUS_BUSY) == 0)
+ return 0;
+ return -EBUSY;
}
static int das16_ai_insn_read(struct comedi_device *dev,
@@ -897,7 +896,7 @@ static int das16_ai_insn_read(struct comedi_device *dev,
/* trigger conversion */
outb_p(0, dev->iobase + DAS16_TRIG_REG);
- ret = das16_ai_wait_for_conv(dev, 1000);
+ ret = comedi_timeout(dev, s, insn, das16_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index fee5facff8dd..779225831dc0 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -331,14 +331,27 @@ static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
+static int das16m1_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + DAS16M1_CS);
+ if (status & IRQDATA)
+ return 0;
+ return -EBUSY;
+}
+
static int das16m1_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct das16m1_private_struct *devpriv = dev->private;
- int i, n;
+ int ret;
+ int n;
int byte;
- const int timeout = 1000;
/* disable interrupts and internal pacer */
devpriv->control_state &= ~INTE & ~PACER_MASK;
@@ -356,14 +369,10 @@ static int das16m1_ai_rinsn(struct comedi_device *dev,
/* trigger conversion */
outb(0, dev->iobase);
- for (i = 0; i < timeout; i++) {
- if (inb(dev->iobase + DAS16M1_CS) & IRQDATA)
- break;
- }
- if (i == timeout) {
- comedi_error(dev, "timeout");
- return -ETIME;
- }
+ ret = comedi_timeout(dev, s, insn, das16m1_ai_eoc, 0);
+ if (ret)
+ return ret;
+
data[n] = munge_sample(inw(dev->iobase));
}
@@ -407,7 +416,6 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
s = dev->read_subdev;
async = s->async;
- async->events = 0;
cmd = &async->cmd;
/* figure out how many samples are in fifo */
@@ -440,8 +448,8 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
devpriv->adc_count += num_samples;
if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->adc_count >= cmd->stop_arg * cmd->chanlist_len) { /* end of acquisition */
- das16m1_cancel(dev, s);
+ if (devpriv->adc_count >= cmd->stop_arg * cmd->chanlist_len) {
+ /* end of acquisition */
async->events |= COMEDI_CB_EOA;
}
}
@@ -449,13 +457,11 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
/* this probably won't catch overruns since the card doesn't generate
* overrun interrupts, but we might as well try */
if (status & OVRUN) {
- das16m1_cancel(dev, s);
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_error(dev, "fifo overflow");
}
- comedi_event(dev, s);
-
+ cfc_handle_events(dev, s);
}
static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s)
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 320d95a5f47b..8e975d6b06db 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -646,7 +646,6 @@ static void das1800_ai_handler(struct comedi_device *dev)
struct comedi_cmd *cmd = &async->cmd;
unsigned int status = inb(dev->iobase + DAS1800_STATUS);
- async->events = 0;
/* select adc for base address + 0 */
outb(ADC, dev->iobase + DAS1800_SELECT);
/* dma buffer full */
@@ -665,9 +664,8 @@ static void das1800_ai_handler(struct comedi_device *dev)
/* clear OVF interrupt bit */
outb(CLEAR_INTR_MASK & ~OVF, dev->iobase + DAS1800_STATUS);
comedi_error(dev, "DAS1800 FIFO overflow");
- das1800_cancel(dev, s);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return;
}
/* stop taking data if appropriate */
@@ -680,16 +678,12 @@ static void das1800_ai_handler(struct comedi_device *dev)
das1800_flush_dma(dev, s);
else
das1800_handle_fifo_not_empty(dev, s);
- das1800_cancel(dev, s); /* disable hardware conversions */
async->events |= COMEDI_CB_EOA;
} else if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0) { /* stop_src TRIG_COUNT */
- das1800_cancel(dev, s); /* disable hardware conversions */
async->events |= COMEDI_CB_EOA;
}
- comedi_event(dev, s);
-
- return;
+ cfc_handle_events(dev, s);
}
static int das1800_ai_poll(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index 43027ee500e3..e0cfb6cb547b 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -1,309 +1,532 @@
/*
- Some comments on the code..
-
- - it shouldn't be necessary to use outb_p().
-
- - ignoreirq creates a race condition. It needs to be fixed.
-
+ * das6402.c
+ * Comedi driver for DAS6402 compatible boards
+ * Copyright(c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * Rewrite of an experimental driver by:
+ * Copyright (C) 1999 Oystein Svendsen <svendsen@pvv.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
/*
- comedi/drivers/das6402.c
- An experimental driver for Computerboards' DAS6402 I/O card
+ * Driver: das6402
+ * Description: Keithley Metrabyte DAS6402 (& compatibles)
+ * Devices: (Keithley Metrabyte) DAS6402-12 (das6402-12)
+ * (Keithley Metrabyte) DAS6402-16 (das6402-16)
+ * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
+ * Updated: Fri, 14 Mar 2014 10:18:43 -0700
+ * Status: unknown
+ *
+ * Configuration Options:
+ * [0] - I/O base address
+ * [1] - IRQ (optional, needed for async command support)
+ */
- Copyright (C) 1999 Oystein Svendsen <svendsen@pvv.org>
+#include <linux/module.h>
+#include <linux/interrupt.h>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+#include "../comedidev.h"
+#include "8253.h"
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- */
/*
-Driver: das6402
-Description: Keithley Metrabyte DAS6402 (& compatibles)
-Author: Oystein Svendsen <svendsen@pvv.org>
-Status: bitrotten
-Devices: [Keithley Metrabyte] DAS6402 (das6402)
+ * Register I/O map
+ */
+#define DAS6402_AI_DATA_REG 0x00
+#define DAS6402_AI_MUX_REG 0x02
+#define DAS6402_AI_MUX_LO(x) (((x) & 0x3f) << 0)
+#define DAS6402_AI_MUX_HI(x) (((x) & 0x3f) << 8)
+#define DAS6402_DI_DO_REG 0x03
+#define DAS6402_AO_DATA_REG(x) (0x04 + ((x) * 2))
+#define DAS6402_AO_LSB_REG(x) (0x04 + ((x) * 2))
+#define DAS6402_AO_MSB_REG(x) (0x05 + ((x) * 2))
+#define DAS6402_STATUS_REG 0x08
+#define DAS6402_STATUS_FFNE (1 << 0)
+#define DAS6402_STATUS_FHALF (1 << 1)
+#define DAS6402_STATUS_FFULL (1 << 2)
+#define DAS6402_STATUS_XINT (1 << 3)
+#define DAS6402_STATUS_INT (1 << 4)
+#define DAS6402_STATUS_XTRIG (1 << 5)
+#define DAS6402_STATUS_INDGT (1 << 6)
+#define DAS6402_STATUS_10MHZ (1 << 7)
+#define DAS6402_STATUS_W_CLRINT (1 << 0)
+#define DAS6402_STATUS_W_CLRXTR (1 << 1)
+#define DAS6402_STATUS_W_CLRXIN (1 << 2)
+#define DAS6402_STATUS_W_EXTEND (1 << 4)
+#define DAS6402_STATUS_W_ARMED (1 << 5)
+#define DAS6402_STATUS_W_POSTMODE (1 << 6)
+#define DAS6402_STATUS_W_10MHZ (1 << 7)
+#define DAS6402_CTRL_REG 0x09
+#define DAS6402_CTRL_SOFT_TRIG (0 << 0)
+#define DAS6402_CTRL_EXT_FALL_TRIG (1 << 0)
+#define DAS6402_CTRL_EXT_RISE_TRIG (2 << 0)
+#define DAS6402_CTRL_PACER_TRIG (3 << 0)
+#define DAS6402_CTRL_BURSTEN (1 << 2)
+#define DAS6402_CTRL_XINTE (1 << 3)
+#define DAS6402_CTRL_IRQ(x) ((x) << 4)
+#define DAS6402_CTRL_INTE (1 << 7)
+#define DAS6402_TRIG_REG 0x0a
+#define DAS6402_TRIG_TGEN (1 << 0)
+#define DAS6402_TRIG_TGSEL (1 << 1)
+#define DAS6402_TRIG_TGPOL (1 << 2)
+#define DAS6402_TRIG_PRETRIG (1 << 3)
+#define DAS6402_AO_RANGE(_chan, _range) ((_range) << ((_chan) ? 6 : 4))
+#define DAS6402_AO_RANGE_MASK(_chan) (3 << ((_chan) ? 6 : 4))
+#define DAS6402_MODE_REG 0x0b
+#define DAS6402_MODE_RANGE(x) ((x) << 0)
+#define DAS6402_MODE_POLLED (0 << 2)
+#define DAS6402_MODE_FIFONEPTY (1 << 2)
+#define DAS6402_MODE_FIFOHFULL (2 << 2)
+#define DAS6402_MODE_EOB (3 << 2)
+#define DAS6402_MODE_ENHANCED (1 << 4)
+#define DAS6402_MODE_SE (1 << 5)
+#define DAS6402_MODE_UNI (1 << 6)
+#define DAS6402_MODE_DMA1 (0 << 7)
+#define DAS6402_MODE_DMA3 (1 << 7)
+#define DAS6402_TIMER_BASE 0x0c
+
+static const struct comedi_lrange das6402_ai_ranges = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
+};
-This driver has suffered bitrot.
-*/
+/*
+ * Analog output ranges are programmable on the DAS6402/12.
+ * For the DAS6402/16 the range bits have no function, the
+ * DAC ranges are selected by switches on the board.
+ */
+static const struct comedi_lrange das6402_ao_ranges = {
+ 4, {
+ BIP_RANGE(5),
+ BIP_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(10)
+ }
+};
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include "../comedidev.h"
+struct das6402_boardinfo {
+ const char *name;
+ unsigned int maxdata;
+};
-#define DAS6402_SIZE 16
-
-#define N_WORDS (3000*64)
-
-#define STOP 0
-#define START 1
-
-#define SCANL 0x3f00
-#define BYTE unsigned char
-#define WORD unsigned short
-
-/*----- register 8 ----*/
-#define CLRINT 0x01
-#define CLRXTR 0x02
-#define CLRXIN 0x04
-#define EXTEND 0x10
-#define ARMED 0x20 /* enable conting of post sample conv */
-#define POSTMODE 0x40
-#define MHZ 0x80 /* 10 MHz clock */
-/*---------------------*/
-
-/*----- register 9 ----*/
-#define IRQ (0x04 << 4) /* these two are */
-#define IRQV 10 /* dependent on each other */
-
-#define CONVSRC 0x03 /* trig src is Intarnal pacer */
-#define BURSTEN 0x04 /* enable burst */
-#define XINTE 0x08 /* use external int. trig */
-#define INTE 0x80 /* enable analog interrupts */
-/*---------------------*/
-
-/*----- register 10 ---*/
-#define TGEN 0x01 /* Use pin DI1 for externl trigging? */
-#define TGSEL 0x02 /* Use edge triggering */
-#define TGPOL 0x04 /* active edge is falling */
-#define PRETRIG 0x08 /* pretrig */
-/*---------------------*/
-
-/*----- register 11 ---*/
-#define EOB 0x0c
-#define FIFOHFULL 0x08
-#define GAIN 0x01
-#define FIFONEPTY 0x04
-#define MODE 0x10
-#define SEM 0x20
-#define BIP 0x40
-/*---------------------*/
-
-#define M0 0x00
-#define M2 0x04
-
-#define C0 0x00
-#define C1 0x40
-#define C2 0x80
-#define RWLH 0x30
+struct das6402_boardinfo das6402_boards[] = {
+ {
+ .name = "das6402-12",
+ .maxdata = 0x0fff,
+ }, {
+ .name = "das6402-16",
+ .maxdata = 0xffff,
+ },
+};
struct das6402_private {
- int ai_bytes_to_read;
+ unsigned int irq;
+
+ unsigned int count;
+ unsigned int divider1;
+ unsigned int divider2;
- int das6402_ignoreirq;
+ unsigned int ao_range;
+ unsigned int ao_readback[2];
};
-static void das6402_ai_fifo_dregs(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static void das6402_set_mode(struct comedi_device *dev,
+ unsigned int mode)
{
- while (1) {
- if (!(inb(dev->iobase + 8) & 0x01))
- return;
- comedi_buf_put(s->async, inw(dev->iobase));
- }
+ outb(DAS6402_MODE_ENHANCED | mode, dev->iobase + DAS6402_MODE_REG);
}
-static void das6402_setcounter(struct comedi_device *dev)
+static void das6402_set_extended(struct comedi_device *dev,
+ unsigned int val)
{
- BYTE p;
- unsigned short ctrlwrd;
-
- /* set up counter0 first, mode 0 */
- p = M0 | C0 | RWLH;
- outb_p(p, dev->iobase + 15);
- ctrlwrd = 2000;
- p = (BYTE) (0xff & ctrlwrd);
- outb_p(p, dev->iobase + 12);
- p = (BYTE) (0xff & (ctrlwrd >> 8));
- outb_p(p, dev->iobase + 12);
-
- /* set up counter1, mode 2 */
- p = M2 | C1 | RWLH;
- outb_p(p, dev->iobase + 15);
- ctrlwrd = 10;
- p = (BYTE) (0xff & ctrlwrd);
- outb_p(p, dev->iobase + 13);
- p = (BYTE) (0xff & (ctrlwrd >> 8));
- outb_p(p, dev->iobase + 13);
-
- /* set up counter1, mode 2 */
- p = M2 | C2 | RWLH;
- outb_p(p, dev->iobase + 15);
- ctrlwrd = 1000;
- p = (BYTE) (0xff & ctrlwrd);
- outb_p(p, dev->iobase + 14);
- p = (BYTE) (0xff & (ctrlwrd >> 8));
- outb_p(p, dev->iobase + 14);
+ outb(DAS6402_STATUS_W_EXTEND, dev->iobase + DAS6402_STATUS_REG);
+ outb(DAS6402_STATUS_W_EXTEND | val, dev->iobase + DAS6402_STATUS_REG);
+ outb(val, dev->iobase + DAS6402_STATUS_REG);
}
-static irqreturn_t intr_handler(int irq, void *d)
+static void das6402_clear_all_interrupts(struct comedi_device *dev)
+{
+ outb(DAS6402_STATUS_W_CLRINT |
+ DAS6402_STATUS_W_CLRXTR |
+ DAS6402_STATUS_W_CLRXIN, dev->iobase + DAS6402_STATUS_REG);
+}
+
+static void das6402_ai_clear_eoc(struct comedi_device *dev)
+{
+ outb(DAS6402_STATUS_W_CLRINT, dev->iobase + DAS6402_STATUS_REG);
+}
+
+static void das6402_enable_counter(struct comedi_device *dev, bool load)
{
- struct comedi_device *dev = d;
struct das6402_private *devpriv = dev->private;
- struct comedi_subdevice *s = &dev->subdevices[0];
+ unsigned long timer_iobase = dev->iobase + DAS6402_TIMER_BASE;
- if (!dev->attached || devpriv->das6402_ignoreirq) {
- dev_warn(dev->class_dev, "BUG: spurious interrupt\n");
- return IRQ_HANDLED;
- }
+ if (load) {
+ i8254_set_mode(timer_iobase, 0, 0, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_iobase, 0, 1, I8254_MODE2 | I8254_BINARY);
+ i8254_set_mode(timer_iobase, 0, 2, I8254_MODE2 | I8254_BINARY);
- das6402_ai_fifo_dregs(dev, s);
+ i8254_write(timer_iobase, 0, 0, devpriv->count);
+ i8254_write(timer_iobase, 0, 1, devpriv->divider1);
+ i8254_write(timer_iobase, 0, 2, devpriv->divider2);
- if (s->async->buf_write_count >= devpriv->ai_bytes_to_read) {
- outw_p(SCANL, dev->iobase + 2); /* clears the fifo */
- outb(0x07, dev->iobase + 8); /* clears all flip-flops */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ } else {
+ i8254_set_mode(timer_iobase, 0, 0, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_iobase, 0, 1, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_iobase, 0, 2, I8254_MODE0 | I8254_BINARY);
}
+}
- outb(0x01, dev->iobase + 8); /* clear only the interrupt flip-flop */
+static irqreturn_t das6402_interrupt(int irq, void *d)
+{
+ struct comedi_device *dev = d;
+
+ das6402_clear_all_interrupts(dev);
- comedi_event(dev, s);
return IRQ_HANDLED;
}
-#if 0
-static void das6402_ai_fifo_read(struct comedi_device *dev, short *data, int n)
+static int das6402_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- int i;
+ return -EINVAL;
+}
- for (i = 0; i < n; i++)
- data[i] = inw(dev->iobase);
+static int das6402_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ return -EINVAL;
}
-#endif
static int das6402_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
+ outb(DAS6402_CTRL_SOFT_TRIG, dev->iobase + DAS6402_CTRL_REG);
+
+ return 0;
+}
+
+static void das6402_ai_soft_trig(struct comedi_device *dev)
+{
+ outw(0, dev->iobase + DAS6402_AI_DATA_REG);
+}
+
+static int das6402_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + DAS6402_STATUS_REG);
+ if (status & DAS6402_STATUS_FFNE)
+ return 0;
+ return -EBUSY;
+}
+
+static int das6402_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int aref = CR_AREF(insn->chanspec);
+ unsigned int val;
+ int ret;
+ int i;
+
+ val = DAS6402_MODE_RANGE(range) | DAS6402_MODE_POLLED;
+ if (aref == AREF_DIFF) {
+ if (chan > s->n_chan / 2)
+ return -EINVAL;
+ } else {
+ val |= DAS6402_MODE_SE;
+ }
+ if (comedi_range_is_unipolar(s, range))
+ val |= DAS6402_MODE_UNI;
+
+ /* enable software conversion trigger */
+ outb(DAS6402_CTRL_SOFT_TRIG, dev->iobase + DAS6402_CTRL_REG);
+
+ das6402_set_mode(dev, val);
+
+ /* load the mux for single channel conversion */
+ outw(DAS6402_AI_MUX_HI(chan) | DAS6402_AI_MUX_LO(chan),
+ dev->iobase + DAS6402_AI_MUX_REG);
+
+ for (i = 0; i < insn->n; i++) {
+ das6402_ai_clear_eoc(dev);
+ das6402_ai_soft_trig(dev);
+
+ ret = comedi_timeout(dev, s, insn, das6402_ai_eoc, 0);
+ if (ret)
+ break;
+
+ val = inw(dev->iobase + DAS6402_AI_DATA_REG);
+
+ if (s->maxdata == 0x0fff)
+ val >>= 4;
+
+ data[i] = val;
+ }
+
+ das6402_ai_clear_eoc(dev);
+
+ return insn->n;
+}
+
+static int das6402_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
struct das6402_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val;
+ int i;
+
+ /* set the range for this channel */
+ val = devpriv->ao_range;
+ val &= ~DAS6402_AO_RANGE_MASK(chan);
+ val |= DAS6402_AO_RANGE(chan, range);
+ if (val != devpriv->ao_range) {
+ devpriv->ao_range = val;
+ outb(val, dev->iobase + DAS6402_TRIG_REG);
+ }
/*
- * This function should reset the board from whatever condition it
- * is in (i.e., acquiring data), to a non-active state.
+ * The DAS6402/16 has a jumper to select either individual
+ * update (UPDATE) or simultaneous updating (XFER) of both
+ * DAC's. In UPDATE mode, when the MSB is written, that DAC
+ * is updated. In XFER mode, after both DAC's are loaded,
+ * a read cycle of any DAC register will update both DAC's
+ * simultaneously.
+ *
+ * If you have XFER mode enabled a (*insn_read) will need
+ * to be performed in order to update the DAC's with the
+ * last value written.
*/
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+
+ devpriv->ao_readback[chan] = val;
+
+ if (s->maxdata == 0x0fff) {
+ /*
+ * DAS6402/12 has the two 8-bit DAC registers, left
+ * justified (the 4 LSB bits are don't care). Data
+ * can be written as one word.
+ */
+ val <<= 4;
+ outw(val, dev->iobase + DAS6402_AO_DATA_REG(chan));
+ } else {
+ /*
+ * DAS6402/16 uses both 8-bit DAC registers and needs
+ * to be written LSB then MSB.
+ */
+ outb(val & 0xff,
+ dev->iobase + DAS6402_AO_LSB_REG(chan));
+ outb((val >> 8) & 0xff,
+ dev->iobase + DAS6402_AO_LSB_REG(chan));
+ }
+ }
- devpriv->das6402_ignoreirq = 1;
- dev_dbg(dev->class_dev, "Stopping acquisition\n");
- devpriv->das6402_ignoreirq = 1;
- outb_p(0x02, dev->iobase + 10); /* disable external trigging */
- outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */
- outb_p(0, dev->iobase + 9); /* disables interrupts */
-
- outw_p(SCANL, dev->iobase + 2);
-
- return 0;
+ return insn->n;
}
-#ifdef unused
-static int das6402_ai_mode2(struct comedi_device *dev,
- struct comedi_subdevice *s, comedi_trig *it)
+static int das6402_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct das6402_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
- devpriv->das6402_ignoreirq = 1;
- dev_dbg(dev->class_dev, "Starting acquisition\n");
- outb_p(0x03, dev->iobase + 10); /* enable external trigging */
- outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */
- outb_p(IRQ | CONVSRC | BURSTEN | INTE, dev->iobase + 9);
+ /*
+ * If XFER mode is enabled, reading any DAC register
+ * will update both DAC's simultaneously.
+ */
+ inw(dev->iobase + DAS6402_AO_LSB_REG(chan));
- devpriv->ai_bytes_to_read = it->n * sizeof(short);
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
- /* um... ignoreirq is a nasty race condition */
- devpriv->das6402_ignoreirq = 0;
+ return insn->n;
+}
- outw_p(SCANL, dev->iobase + 2);
+static int das6402_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = inb(dev->iobase + DAS6402_DI_DO_REG);
- return 0;
+ return insn->n;
}
-#endif
-static int board_init(struct comedi_device *dev)
+static int das6402_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct das6402_private *devpriv = dev->private;
- BYTE b;
+ if (comedi_dio_update_state(s, data))
+ outb(s->state, dev->iobase + DAS6402_DI_DO_REG);
- devpriv->das6402_ignoreirq = 1;
+ data[1] = s->state;
- outb(0x07, dev->iobase + 8);
+ return insn->n;
+}
- /* register 11 */
- outb_p(MODE, dev->iobase + 11);
- b = BIP | SEM | MODE | GAIN | FIFOHFULL;
- outb_p(b, dev->iobase + 11);
+static void das6402_reset(struct comedi_device *dev)
+{
+ struct das6402_private *devpriv = dev->private;
- /* register 8 */
- outb_p(EXTEND, dev->iobase + 8);
- b = EXTEND | MHZ;
- outb_p(b, dev->iobase + 8);
- b = MHZ | CLRINT | CLRXTR | CLRXIN;
- outb_p(b, dev->iobase + 8);
+ /* enable "Enhanced" mode */
+ outb(DAS6402_MODE_ENHANCED, dev->iobase + DAS6402_MODE_REG);
- /* register 9 */
- b = IRQ | CONVSRC | BURSTEN | INTE;
- outb_p(b, dev->iobase + 9);
+ /* enable 10MHz pacer clock */
+ das6402_set_extended(dev, DAS6402_STATUS_W_10MHZ);
- /* register 10 */
- b = TGSEL | TGEN;
- outb_p(b, dev->iobase + 10);
+ /* enable software conversion trigger */
+ outb(DAS6402_CTRL_SOFT_TRIG, dev->iobase + DAS6402_CTRL_REG);
- b = 0x07;
- outb_p(b, dev->iobase + 8);
+ /* default ADC to single-ended unipolar 10V inputs */
+ das6402_set_mode(dev, DAS6402_MODE_RANGE(0) |
+ DAS6402_MODE_POLLED |
+ DAS6402_MODE_SE |
+ DAS6402_MODE_UNI);
- das6402_setcounter(dev);
+ /* default mux for single channel conversion (channel 0) */
+ outw(DAS6402_AI_MUX_HI(0) | DAS6402_AI_MUX_LO(0),
+ dev->iobase + DAS6402_AI_MUX_REG);
- outw_p(SCANL, dev->iobase + 2); /* reset card fifo */
+ /* set both DAC's for unipolar 5V output range */
+ devpriv->ao_range = DAS6402_AO_RANGE(0, 2) | DAS6402_AO_RANGE(1, 2);
+ outb(devpriv->ao_range, dev->iobase + DAS6402_TRIG_REG);
- devpriv->das6402_ignoreirq = 0;
+ /* set both DAC's to 0V */
+ outw(0, dev->iobase + DAS6402_AO_DATA_REG(0));
+ outw(0, dev->iobase + DAS6402_AO_DATA_REG(0));
+ inw(dev->iobase + DAS6402_AO_LSB_REG(0));
- return 0;
+ das6402_enable_counter(dev, false);
+
+ /* set all digital outputs low */
+ outb(0, dev->iobase + DAS6402_DI_DO_REG);
+
+ das6402_clear_all_interrupts(dev);
}
static int das6402_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
+ const struct das6402_boardinfo *board = comedi_board(dev);
struct das6402_private *devpriv;
- unsigned int irq;
- int ret;
struct comedi_subdevice *s;
-
- ret = comedi_request_region(dev, it->options[0], DAS6402_SIZE);
- if (ret)
- return ret;
-
- irq = it->options[0];
- dev_dbg(dev->class_dev, "( irq = %u )\n", irq);
- ret = request_irq(irq, intr_handler, 0, "das6402", dev);
- if (ret < 0)
- return ret;
-
- dev->irq = irq;
+ int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- ret = comedi_alloc_subdevices(dev, 1);
+ ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
- /* ai subdevice */
+ das6402_reset(dev);
+
+ /* IRQs 2,3,5,6,7, 10,11,15 are valid for "enhanced" mode */
+ if ((1 << it->options[1]) & 0x8cec) {
+ ret = request_irq(it->options[1], das6402_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0) {
+ dev->irq = it->options[1];
+
+ switch (dev->irq) {
+ case 10:
+ devpriv->irq = 4;
+ break;
+ case 11:
+ devpriv->irq = 1;
+ break;
+ case 15:
+ devpriv->irq = 6;
+ break;
+ default:
+ devpriv->irq = dev->irq;
+ break;
+ }
+ }
+ }
+
+ ret = comedi_alloc_subdevices(dev, 4);
+ if (ret)
+ return ret;
+
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 8;
- /* s->trig[2]=das6402_ai_mode2; */
- s->cancel = das6402_ai_cancel;
- s->maxdata = (1 << 12) - 1;
- s->len_chanlist = 16; /* ? */
- s->range_table = &range_unknown;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
+ s->n_chan = 64;
+ s->maxdata = board->maxdata;
+ s->range_table = &das6402_ai_ranges;
+ s->insn_read = das6402_ai_insn_read;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmdtest = das6402_ai_cmdtest;
+ s->do_cmd = das6402_ai_cmd;
+ s->cancel = das6402_ai_cancel;
+ }
- board_init(dev);
+ /* Analog Output subdevice */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 2;
+ s->maxdata = board->maxdata;
+ s->range_table = &das6402_ao_ranges;
+ s->insn_write = das6402_ao_insn_write;
+ s->insn_read = das6402_ao_insn_read;
+
+ /* Digital Input subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das6402_di_insn_bits;
+
+ /* Digital Input subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das6402_do_insn_bits;
return 0;
}
@@ -313,9 +536,12 @@ static struct comedi_driver das6402_driver = {
.module = THIS_MODULE,
.attach = das6402_attach,
.detach = comedi_legacy_detach,
+ .board_name = &das6402_boards[0].name,
+ .num_names = ARRAY_SIZE(das6402_boards),
+ .offset = sizeof(struct das6402_boardinfo),
};
module_comedi_driver(das6402_driver)
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_DESCRIPTION("Comedi driver for DAS6402 compatible boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index 5af0a5764a8c..3e408370dcf3 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -420,17 +420,12 @@ static int das800_ai_do_cmd(struct comedi_device *dev,
gain &= 0xf;
outb(gain, dev->iobase + DAS800_GAIN);
- switch (async->cmd.stop_src) {
- case TRIG_COUNT:
+ if (async->cmd.stop_src == TRIG_COUNT) {
devpriv->count = async->cmd.stop_arg * async->cmd.chanlist_len;
devpriv->forever = false;
- break;
- case TRIG_NONE:
+ } else { /* TRIG_NONE */
devpriv->forever = true;
devpriv->count = 0;
- break;
- default:
- break;
}
/* enable auto channel scan, send interrupts on end of conversion
@@ -440,26 +435,19 @@ static int das800_ai_do_cmd(struct comedi_device *dev,
conv_bits |= EACS | IEOC;
if (async->cmd.start_src == TRIG_EXT)
conv_bits |= DTEN;
- switch (async->cmd.convert_src) {
- case TRIG_TIMER:
+ if (async->cmd.convert_src == TRIG_TIMER) {
conv_bits |= CASC | ITE;
/* set conversion frequency */
if (das800_set_frequency(dev) < 0) {
comedi_error(dev, "Error setting up counters");
return -1;
}
- break;
- case TRIG_EXT:
- break;
- default:
- break;
}
spin_lock_irqsave(&dev->spinlock, irq_flags);
das800_ind_write(dev, conv_bits, CONV_CONTROL);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- async->events = 0;
das800_enable(dev);
return 0;
}
@@ -532,10 +520,8 @@ static irqreturn_t das800_interrupt(int irq, void *d)
if (fifo_overflow) {
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- das800_cancel(dev, s);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
- async->events = 0;
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -551,20 +537,21 @@ static irqreturn_t das800_interrupt(int irq, void *d)
das800_disable(dev);
async->events |= COMEDI_CB_EOA;
}
- comedi_event(dev, s);
- async->events = 0;
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
-static int das800_wait_for_conv(struct comedi_device *dev, int timeout)
+static int das800_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- int i;
+ unsigned int status;
- for (i = 0; i < timeout; i++) {
- if (!(inb(dev->iobase + DAS800_STATUS) & BUSY))
- return 0;
- }
- return -ETIME;
+ status = inb(dev->iobase + DAS800_STATUS);
+ if ((status & BUSY) == 0)
+ return 0;
+ return -EBUSY;
}
static int das800_ai_insn_read(struct comedi_device *dev,
@@ -599,7 +586,7 @@ static int das800_ai_insn_read(struct comedi_device *dev,
/* trigger conversion */
outb_p(0, dev->iobase + DAS800_MSB);
- ret = das800_wait_for_conv(dev, 1000);
+ ret = comedi_timeout(dev, s, insn, das800_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index 78a19629ff56..c8a36eb5f015 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -164,16 +164,29 @@ struct dmm32at_private {
};
+static int dmm32at_ai_status(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned char status;
+
+ status = inb(dev->iobase + context);
+ if ((status & DMM32AT_STATUS) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int dmm32at_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- int n, i;
+ int n;
unsigned int d;
- unsigned char status;
unsigned short msb, lsb;
unsigned char chan;
int range;
+ int ret;
/* get the channel and range number */
@@ -190,26 +203,20 @@ static int dmm32at_ai_rinsn(struct comedi_device *dev,
outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AICONF);
/* wait for circuit to settle */
- for (i = 0; i < 40000; i++) {
- status = inb(dev->iobase + DMM32AT_AIRBACK);
- if ((status & DMM32AT_STATUS) == 0)
- break;
- }
- if (i == 40000)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, dmm32at_ai_status, DMM32AT_AIRBACK);
+ if (ret)
+ return ret;
/* convert n samples */
for (n = 0; n < insn->n; n++) {
/* trigger conversion */
outb(0xff, dev->iobase + DMM32AT_CONV);
+
/* wait for conversion to end */
- for (i = 0; i < 40000; i++) {
- status = inb(dev->iobase + DMM32AT_AISTAT);
- if ((status & DMM32AT_STATUS) == 0)
- break;
- }
- if (i == 40000)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, dmm32at_ai_status,
+ DMM32AT_AISTAT);
+ if (ret)
+ return ret;
/* read data */
lsb = inb(dev->iobase + DMM32AT_AILSB);
@@ -403,8 +410,9 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct dmm32at_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- int i, range;
- unsigned char chanlo, chanhi, status;
+ int range;
+ unsigned char chanlo, chanhi;
+ int ret;
if (!cmd->chanlist)
return -EINVAL;
@@ -439,14 +447,13 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
* isr */
}
- /* wait for circuit to settle */
- for (i = 0; i < 40000; i++) {
- status = inb(dev->iobase + DMM32AT_AIRBACK);
- if ((status & DMM32AT_STATUS) == 0)
- break;
- }
- if (i == 40000)
- return -ETIMEDOUT;
+ /*
+ * wait for circuit to settle
+ * we don't have the 'insn' here but it's not needed
+ */
+ ret = comedi_timeout(dev, s, NULL, dmm32at_ai_status, DMM32AT_AIRBACK);
+ if (ret)
+ return ret;
if (devpriv->ai_scans_left > 1) {
/* start the clock and enable the interrupts */
@@ -525,6 +532,19 @@ static irqreturn_t dmm32at_isr(int irq, void *d)
return IRQ_HANDLED;
}
+static int dmm32at_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned char status;
+
+ status = inb(dev->iobase + DMM32AT_DACSTAT);
+ if ((status & DMM32AT_DACBUSY) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int dmm32at_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -533,6 +553,7 @@ static int dmm32at_ao_winsn(struct comedi_device *dev,
int i;
int chan = CR_CHAN(insn->chanspec);
unsigned char hi, lo, status;
+ int ret;
/* Writing a list of values to an AO channel is probably not
* very useful, but that's how the interface is defined. */
@@ -549,13 +570,9 @@ static int dmm32at_ao_winsn(struct comedi_device *dev,
outb(hi, dev->iobase + DMM32AT_DACMSB);
/* wait for circuit to settle */
- for (i = 0; i < 40000; i++) {
- status = inb(dev->iobase + DMM32AT_DACSTAT);
- if ((status & DMM32AT_DACBUSY) == 0)
- break;
- }
- if (i == 40000)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, dmm32at_ao_eoc, 0);
+ if (ret)
+ return ret;
/* dummy read to update trigger the output */
status = inb(dev->iobase + DMM32AT_DACMSB);
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 4271903facd7..ba7c2ba618e6 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -224,23 +224,32 @@ static const struct comedi_lrange *dac_range_types[] = {
&range_unipolar5
};
-#define DT2811_TIMEOUT 5
+static int dt2811_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + DT2811_ADCSR);
+ if ((status & DT2811_ADBUSY) == 0)
+ return 0;
+ return -EBUSY;
+}
static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int chan = CR_CHAN(insn->chanspec);
- int timeout = DT2811_TIMEOUT;
+ int ret;
int i;
for (i = 0; i < insn->n; i++) {
outb(chan, dev->iobase + DT2811_ADGCR);
- while (timeout
- && inb(dev->iobase + DT2811_ADCSR) & DT2811_ADBUSY)
- timeout--;
- if (!timeout)
- return -ETIME;
+ ret = comedi_timeout(dev, s, insn, dt2811_ai_eoc, 0);
+ if (ret)
+ return ret;
data[i] = inb(dev->iobase + DT2811_ADDATLO);
data[i] |= inb(dev->iobase + DT2811_ADDATHI) << 8;
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index abad6e49c1c1..3794b7e52091 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -66,26 +66,35 @@ struct dt2814_private {
#define DT2814_TIMEOUT 10
#define DT2814_MAX_SPEED 100000 /* Arbitrary 10 khz limit */
+static int dt2814_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + DT2814_CSR);
+ if (status & DT2814_FINISH)
+ return 0;
+ return -EBUSY;
+}
+
static int dt2814_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- int n, i, hi, lo;
+ int n, hi, lo;
int chan;
- int status = 0;
+ int ret;
for (n = 0; n < insn->n; n++) {
chan = CR_CHAN(insn->chanspec);
outb(chan, dev->iobase + DT2814_CSR);
- for (i = 0; i < DT2814_TIMEOUT; i++) {
- status = inb(dev->iobase + DT2814_CSR);
- udelay(10);
- if (status & DT2814_FINISH)
- break;
- }
- if (i >= DT2814_TIMEOUT)
- return -ETIMEDOUT;
+
+ ret = comedi_timeout(dev, s, insn, dt2814_ai_eoc, 0);
+ if (ret)
+ return ret;
hi = inb(dev->iobase + DT2814_DATA);
lo = inb(dev->iobase + DT2814_DATA);
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index ee24717821e1..b9ac4ed8babb 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -67,15 +67,17 @@ struct dt2815_private {
unsigned int ao_readback[8];
};
-static int dt2815_wait_for_status(struct comedi_device *dev, int status)
+static int dt2815_ao_status(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- int i;
+ unsigned int status;
- for (i = 0; i < 100; i++) {
- if (inb(dev->iobase + DT2815_STATUS) == status)
- break;
- }
- return status;
+ status = inb(dev->iobase + DT2815_STATUS);
+ if (status == context)
+ return 0;
+ return -EBUSY;
}
static int dt2815_ao_insn_read(struct comedi_device *dev,
@@ -98,30 +100,23 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct dt2815_private *devpriv = dev->private;
int i;
int chan = CR_CHAN(insn->chanspec);
- unsigned int status;
unsigned int lo, hi;
+ int ret;
for (i = 0; i < insn->n; i++) {
lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01;
hi = (data[i] & 0xff0) >> 4;
- status = dt2815_wait_for_status(dev, 0x00);
- if (status != 0) {
- dev_dbg(dev->class_dev,
- "failed to write low byte on %d reason %x\n",
- chan, status);
- return -EBUSY;
- }
+ ret = comedi_timeout(dev, s, insn, dt2815_ao_status, 0x00);
+ if (ret)
+ return ret;
outb(lo, dev->iobase + DT2815_DATA);
- status = dt2815_wait_for_status(dev, 0x10);
- if (status != 0x10) {
- dev_dbg(dev->class_dev,
- "failed to write high byte on %d reason %x\n",
- chan, status);
- return -EBUSY;
- }
+ ret = comedi_timeout(dev, s, insn, dt2815_ao_status, 0x10);
+ if (ret)
+ return ret;
+
devpriv->ao_readback[chan] = data[i];
}
return i;
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 895f73a19023..16cc100531e5 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -63,7 +63,6 @@ Notes:
#include "comedi_fc.h"
-#define DT2821_TIMEOUT 100 /* 500 us */
#define DT2821_SIZE 0x10
/*
@@ -248,27 +247,6 @@ struct dt282x_private {
* Some useless abstractions
*/
#define chan_to_DAC(a) ((a)&1)
-#define mux_busy() (inw(dev->iobase+DT2821_ADCSR)&DT2821_MUXBUSY)
-#define ad_done() (inw(dev->iobase+DT2821_ADCSR)&DT2821_ADDONE)
-
-/*
- * danger! macro abuse... a is the expression to wait on, and b is
- * the statement(s) to execute if it doesn't happen.
- */
-#define wait_for(a, b) \
- do { \
- int _i; \
- for (_i = 0; _i < DT2821_TIMEOUT; _i++) { \
- if (a) { \
- _i = 0; \
- break; \
- } \
- udelay(5); \
- } \
- if (_i) { \
- b \
- } \
- } while (0)
static int prep_ai_dma(struct comedi_device *dev, int chan, int size);
static int prep_ao_dma(struct comedi_device *dev, int chan, int size);
@@ -328,7 +306,6 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev)
size = cfc_read_array_from_buffer(s, ptr, devpriv->dma_maxsize);
if (size == 0) {
dev_err(dev->class_dev, "AO underrun\n");
- dt282x_ao_cancel(dev, s);
s->async->events |= COMEDI_CB_OVERFLOW;
return;
}
@@ -363,7 +340,7 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev)
dt282x_munge(dev, ptr, size);
ret = cfc_write_array_to_buffer(s, ptr, size);
if (ret != size) {
- dt282x_ai_cancel(dev, s);
+ s->async->events |= COMEDI_CB_OVERFLOW;
return;
}
devpriv->nread -= size / 2;
@@ -373,7 +350,6 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev)
devpriv->nread = 0;
}
if (!devpriv->nread) {
- dt282x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
return;
}
@@ -471,15 +447,13 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
if (adcsr & DT2821_ADERR) {
if (devpriv->nread != 0) {
comedi_error(dev, "A/D error");
- dt282x_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_ERROR;
}
handled = 1;
}
if (dacsr & DT2821_DAERR) {
comedi_error(dev, "D/A error");
- dt282x_ao_cancel(dev, s_ao);
- s->async->events |= COMEDI_CB_ERROR;
+ s_ao->async->events |= COMEDI_CB_ERROR;
handled = 1;
}
#if 0
@@ -508,7 +482,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
handled = 1;
}
#endif
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
+ cfc_handle_events(dev, s_ao);
return IRQ_RETVAL(handled);
}
@@ -530,6 +505,29 @@ static void dt282x_load_changain(struct comedi_device *dev, int n,
outw(n - 1, dev->iobase + DT2821_CHANCSR);
}
+static int dt282x_ai_timeout(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + DT2821_ADCSR);
+ switch (context) {
+ case DT2821_MUXBUSY:
+ if ((status & DT2821_MUXBUSY) == 0)
+ return 0;
+ break;
+ case DT2821_ADDONE:
+ if (status & DT2821_ADDONE)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return -EBUSY;
+}
+
/*
* Performs a single A/D conversion.
* - Put channel/gain into channel-gain list
@@ -542,6 +540,7 @@ static int dt282x_ai_insn_read(struct comedi_device *dev,
{
const struct dt282x_board *board = comedi_board(dev);
struct dt282x_private *devpriv = dev->private;
+ int ret;
int i;
/* XXX should we really be enabling the ad clock here? */
@@ -551,13 +550,18 @@ static int dt282x_ai_insn_read(struct comedi_device *dev,
dt282x_load_changain(dev, 1, &insn->chanspec);
outw(devpriv->supcsr | DT2821_PRLD, dev->iobase + DT2821_SUPCSR);
- wait_for(!mux_busy(), comedi_error(dev, "timeout\n"); return -ETIME;);
+ ret = comedi_timeout(dev, s, insn, dt282x_ai_timeout, DT2821_MUXBUSY);
+ if (ret)
+ return ret;
for (i = 0; i < insn->n; i++) {
outw(devpriv->supcsr | DT2821_STRIG,
dev->iobase + DT2821_SUPCSR);
- wait_for(ad_done(), comedi_error(dev, "timeout\n");
- return -ETIME;);
+
+ ret = comedi_timeout(dev, s, insn, dt282x_ai_timeout,
+ DT2821_ADDONE);
+ if (ret)
+ return ret;
data[i] =
inw(dev->iobase +
@@ -646,6 +650,7 @@ static int dt282x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
struct dt282x_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
int timer;
+ int ret;
if (devpriv->usedma == 0) {
comedi_error(dev,
@@ -691,7 +696,9 @@ static int dt282x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
outw(devpriv->adcsr, dev->iobase + DT2821_ADCSR);
outw(devpriv->supcsr | DT2821_PRLD, dev->iobase + DT2821_SUPCSR);
- wait_for(!mux_busy(), comedi_error(dev, "timeout\n"); return -ETIME;);
+ ret = comedi_timeout(dev, s, NULL, dt282x_ai_timeout, DT2821_MUXBUSY);
+ if (ret)
+ return ret;
if (cmd->scan_begin_src == TRIG_FOLLOW) {
outw(devpriv->supcsr | DT2821_STRIG,
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index f52a4476cb73..436e451cadf5 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -369,12 +369,10 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
debug_n_ints++;
- if (debug_n_ints >= 10) {
- dt3k_ai_cancel(dev, s);
+ if (debug_n_ints >= 10)
s->async->events |= COMEDI_CB_EOA;
- }
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -767,8 +765,6 @@ static int dt3000_auto_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_PROC;
#endif
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index f224825830ba..e5593f8c7406 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -57,18 +57,27 @@ struct dyna_pci10xx_private {
unsigned long BADR3;
};
-/******************************************************************************/
-/************************** READ WRITE FUNCTIONS ******************************/
-/******************************************************************************/
+static int dyna_pci10xx_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw_p(dev->iobase);
+ if (status & (1 << 15))
+ return 0;
+ return -EBUSY;
+}
-/* analog input callback */
static int dyna_pci10xx_insn_read_ai(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct dyna_pci10xx_private *devpriv = dev->private;
- int n, counter;
+ int n;
u16 d = 0;
+ int ret = 0;
unsigned int chan, range;
/* get the channel number and range */
@@ -82,18 +91,13 @@ static int dyna_pci10xx_insn_read_ai(struct comedi_device *dev,
smp_mb();
outw_p(0x0000 + range + chan, dev->iobase + 2);
udelay(10);
+
+ ret = comedi_timeout(dev, s, insn, dyna_pci10xx_ai_eoc, 0);
+ if (ret)
+ break;
+
/* read data */
- for (counter = 0; counter < READ_TIMEOUT; counter++) {
- d = inw_p(dev->iobase);
-
- /* check if read is successful if the EOC bit is set */
- if (d & (1 << 15))
- goto conv_finish;
- }
- data[n] = 0;
- dev_dbg(dev->class_dev, "timeout reading analog input\n");
- continue;
-conv_finish:
+ d = inw_p(dev->iobase);
/* mask the first 4 bits - EOC bits */
d &= 0x0FFF;
data[n] = d;
@@ -101,7 +105,7 @@ conv_finish:
mutex_unlock(&devpriv->mutex);
/* return the number of samples read/written */
- return n;
+ return ret ? ret : n;
}
/* analog output callback */
@@ -232,8 +236,6 @@ static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
s->state = 0;
s->insn_bits = dyna_pci10xx_do_insn_bits;
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index a99ddf00ddc4..4e410f3b0e24 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -1,27 +1,49 @@
/*
- comedi/drivers/fl512.c
- Anders Gnistrup <ex18@kalman.iau.dtu.dk>
-*/
+ * fl512.c
+ * Anders Gnistrup <ex18@kalman.iau.dtu.dk>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-Driver: fl512
-Description: unknown
-Author: Anders Gnistrup <ex18@kalman.iau.dtu.dk>
-Devices: [unknown] FL512 (fl512)
-Status: unknown
-
-Digital I/O is not supported.
-
-Configuration options:
- [0] - I/O port base address
-*/
+ * Driver: fl512
+ * Description: unknown
+ * Author: Anders Gnistrup <ex18@kalman.iau.dtu.dk>
+ * Devices: [unknown] FL512 (fl512)
+ * Status: unknown
+ *
+ * Digital I/O is not supported.
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ */
#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#define FL512_SIZE 16 /* the size of the used memory */
+/*
+ * Register I/O map
+ */
+#define FL512_AI_LSB_REG 0x02
+#define FL512_AI_MSB_REG 0x03
+#define FL512_AI_MUX_REG 0x02
+#define FL512_AI_START_CONV_REG 0x03
+#define FL512_AO_DATA_REG(x) (0x04 + ((x) * 2))
+#define FL512_AO_TRIG_REG(x) (0x04 + ((x) * 2))
+
struct fl512_private {
unsigned short ao_readback[2];
};
@@ -38,72 +60,69 @@ static const struct comedi_lrange range_fl512 = {
}
};
-/*
- * fl512_ai_insn : this is the analog input function
- */
-static int fl512_ai_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
+static int fl512_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- unsigned int lo_byte, hi_byte;
- char chan = CR_CHAN(insn->chanspec);
- unsigned long iobase = dev->iobase;
-
- for (n = 0; n < insn->n; n++) { /* sample n times on selected channel */
- /* XXX probably can move next step out of for() loop -- will
- * make AI a little bit faster. */
- outb(chan, iobase + 2); /* select chan */
- outb(0, iobase + 3); /* start conversion */
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+ int i;
+
+ outb(chan, dev->iobase + FL512_AI_MUX_REG);
+
+ for (i = 0; i < insn->n; i++) {
+ outb(0, dev->iobase + FL512_AI_START_CONV_REG);
+
/* XXX should test "done" flag instead of delay */
- udelay(30); /* sleep 30 usec */
- lo_byte = inb(iobase + 2); /* low 8 byte */
- hi_byte = inb(iobase + 3) & 0xf; /* high 4 bit and mask */
- data[n] = lo_byte + (hi_byte << 8);
+ udelay(30);
+
+ val = inb(dev->iobase + FL512_AI_LSB_REG);
+ val |= (inb(dev->iobase + FL512_AI_MSB_REG) << 8);
+ val &= s->maxdata;
+
+ data[i] = val;
}
- return n;
+
+ return insn->n;
}
-/*
- * fl512_ao_insn : used to write to a DA port n times
- */
-static int fl512_ao_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
+static int fl512_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct fl512_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec); /* get chan to write */
- unsigned long iobase = dev->iobase; /* get base address */
-
- for (n = 0; n < insn->n; n++) { /* write n data set */
- /* write low byte */
- outb(data[n] & 0x0ff, iobase + 4 + 2 * chan);
- /* write high byte */
- outb((data[n] & 0xf00) >> 8, iobase + 4 + 2 * chan);
- inb(iobase + 4 + 2 * chan); /* trig */
-
- devpriv->ao_readback[chan] = data[n];
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val = devpriv->ao_readback[chan];
+ int i;
+
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
+
+ /* write LSB, MSB then trigger conversion */
+ outb(val & 0x0ff, dev->iobase + FL512_AO_DATA_REG(chan));
+ outb((val >> 8) & 0xf, dev->iobase + FL512_AO_DATA_REG(chan));
+ inb(dev->iobase + FL512_AO_TRIG_REG(chan));
}
- return n;
+ devpriv->ao_readback[chan] = val;
+
+ return insn->n;
}
-/*
- * fl512_ao_insn_readback : used to read previous values written to
- * DA port
- */
-static int fl512_ao_insn_readback(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int fl512_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct fl512_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
- for (n = 0; n < insn->n; n++)
- data[n] = devpriv->ao_readback[chan];
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
- return n;
+ return insn->n;
}
static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -112,7 +131,7 @@ static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
struct comedi_subdevice *s;
int ret;
- ret = comedi_request_region(dev, it->options[0], FL512_SIZE);
+ ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
@@ -124,42 +143,26 @@ static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- /*
- * this if the definitions of the supdevices, 2 have been defined
- */
- /* Analog indput */
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- /* define subdevice as Analog In */
- s->type = COMEDI_SUBD_AI;
- /* you can read it from userspace */
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- /* Number of Analog input channels */
- s->n_chan = 16;
- /* accept only 12 bits of data */
- s->maxdata = 0x0fff;
- /* device use one of the ranges */
- s->range_table = &range_fl512;
- /* function to call when read AD */
- s->insn_read = fl512_ai_insn;
-
- /* Analog output */
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ s->n_chan = 16;
+ s->maxdata = 0x0fff;
+ s->range_table = &range_fl512;
+ s->insn_read = fl512_ai_insn_read;
+
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- /* define subdevice as Analog OUT */
- s->type = COMEDI_SUBD_AO;
- /* you can write it from userspace */
- s->subdev_flags = SDF_WRITABLE;
- /* Number of Analog output channels */
- s->n_chan = 2;
- /* accept only 12 bits of data */
- s->maxdata = 0x0fff;
- /* device use one of the ranges */
- s->range_table = &range_fl512;
- /* function to call when write DA */
- s->insn_write = fl512_ao_insn;
- /* function to call when reading DA */
- s->insn_read = fl512_ao_insn_readback;
-
- return 1;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 2;
+ s->maxdata = 0x0fff;
+ s->range_table = &range_fl512;
+ s->insn_write = fl512_ao_insn_write;
+ s->insn_read = fl512_ao_insn_read;
+
+ return 0;
}
static struct comedi_driver fl512_driver = {
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index de60a2871d70..08d7655e24e7 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -1,24 +1,24 @@
/*
- comedi/drivers/gsc_hpdi.c
- This is a driver for the General Standards Corporation High
- Speed Parallel Digital Interface rs485 boards.
-
- Author: Frank Mori Hess <fmhess@users.sourceforge.net>
- Copyright (C) 2003 Coherent Imaging Systems
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * gsc_hpdi.c
+ * Comedi driver the General Standards Corporation
+ * High Speed Parallel Digital Interface rs485 boards.
+ *
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2003 Coherent Imaging Systems
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Driver: gsc_hpdi
@@ -40,8 +40,6 @@
* support could be added to this driver.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -52,149 +50,105 @@
#include "plx9080.h"
#include "comedi_fc.h"
-static void abort_dma(struct comedi_device *dev, unsigned int channel);
-static int hpdi_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
-static int hpdi_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd);
-static int hpdi_cancel(struct comedi_device *dev, struct comedi_subdevice *s);
-static irqreturn_t handle_interrupt(int irq, void *d);
-static int dio_config_block_size(struct comedi_device *dev, unsigned int *data);
-
-#define TIMER_BASE 50 /* 20MHz master clock */
-#define DMA_BUFFER_SIZE 0x10000
-#define NUM_DMA_BUFFERS 4
-#define NUM_DMA_DESCRIPTORS 256
-
-enum hpdi_registers {
- FIRMWARE_REV_REG = 0x0,
- BOARD_CONTROL_REG = 0x4,
- BOARD_STATUS_REG = 0x8,
- TX_PROG_ALMOST_REG = 0xc,
- RX_PROG_ALMOST_REG = 0x10,
- FEATURES_REG = 0x14,
- FIFO_REG = 0x18,
- TX_STATUS_COUNT_REG = 0x1c,
- TX_LINE_VALID_COUNT_REG = 0x20,
- TX_LINE_INVALID_COUNT_REG = 0x24,
- RX_STATUS_COUNT_REG = 0x28,
- RX_LINE_COUNT_REG = 0x2c,
- INTERRUPT_CONTROL_REG = 0x30,
- INTERRUPT_STATUS_REG = 0x34,
- TX_CLOCK_DIVIDER_REG = 0x38,
- TX_FIFO_SIZE_REG = 0x40,
- RX_FIFO_SIZE_REG = 0x44,
- TX_FIFO_WORDS_REG = 0x48,
- RX_FIFO_WORDS_REG = 0x4c,
- INTERRUPT_EDGE_LEVEL_REG = 0x50,
- INTERRUPT_POLARITY_REG = 0x54,
-};
-
-/* bit definitions */
-
-enum firmware_revision_bits {
- FEATURES_REG_PRESENT_BIT = 0x8000,
-};
-
-enum board_control_bits {
- BOARD_RESET_BIT = 0x1, /* wait 10usec before accessing fifos */
- TX_FIFO_RESET_BIT = 0x2,
- RX_FIFO_RESET_BIT = 0x4,
- TX_ENABLE_BIT = 0x10,
- RX_ENABLE_BIT = 0x20,
- DEMAND_DMA_DIRECTION_TX_BIT = 0x40,
- /* for ch 0, ch 1 can only transmit (when present) */
- LINE_VALID_ON_STATUS_VALID_BIT = 0x80,
- START_TX_BIT = 0x10,
- CABLE_THROTTLE_ENABLE_BIT = 0x20,
- TEST_MODE_ENABLE_BIT = 0x80000000,
-};
-
-enum board_status_bits {
- COMMAND_LINE_STATUS_MASK = 0x7f,
- TX_IN_PROGRESS_BIT = 0x80,
- TX_NOT_EMPTY_BIT = 0x100,
- TX_NOT_ALMOST_EMPTY_BIT = 0x200,
- TX_NOT_ALMOST_FULL_BIT = 0x400,
- TX_NOT_FULL_BIT = 0x800,
- RX_NOT_EMPTY_BIT = 0x1000,
- RX_NOT_ALMOST_EMPTY_BIT = 0x2000,
- RX_NOT_ALMOST_FULL_BIT = 0x4000,
- RX_NOT_FULL_BIT = 0x8000,
- BOARD_JUMPER0_INSTALLED_BIT = 0x10000,
- BOARD_JUMPER1_INSTALLED_BIT = 0x20000,
- TX_OVERRUN_BIT = 0x200000,
- RX_UNDERRUN_BIT = 0x400000,
- RX_OVERRUN_BIT = 0x800000,
-};
-
-static uint32_t almost_full_bits(unsigned int num_words)
-{
- /* XXX need to add or subtract one? */
- return (num_words << 16) & 0xff0000;
-}
-
-static uint32_t almost_empty_bits(unsigned int num_words)
-{
- return num_words & 0xffff;
-}
-
-enum features_bits {
- FIFO_SIZE_PRESENT_BIT = 0x1,
- FIFO_WORDS_PRESENT_BIT = 0x2,
- LEVEL_EDGE_INTERRUPTS_PRESENT_BIT = 0x4,
- GPIO_SUPPORTED_BIT = 0x8,
- PLX_DMA_CH1_SUPPORTED_BIT = 0x10,
- OVERRUN_UNDERRUN_SUPPORTED_BIT = 0x20,
-};
-
-enum interrupt_sources {
- FRAME_VALID_START_INTR = 0,
- FRAME_VALID_END_INTR = 1,
- TX_FIFO_EMPTY_INTR = 8,
- TX_FIFO_ALMOST_EMPTY_INTR = 9,
- TX_FIFO_ALMOST_FULL_INTR = 10,
- TX_FIFO_FULL_INTR = 11,
- RX_EMPTY_INTR = 12,
- RX_ALMOST_EMPTY_INTR = 13,
- RX_ALMOST_FULL_INTR = 14,
- RX_FULL_INTR = 15,
-};
-
-static uint32_t intr_bit(int interrupt_source)
-{
- return 0x1 << interrupt_source;
-}
-
-static unsigned int fifo_size(uint32_t fifo_size_bits)
-{
- return fifo_size_bits & 0xfffff;
-}
+/*
+ * PCI BAR2 Register map (devpriv->mmio)
+ */
+#define FIRMWARE_REV_REG 0x00
+#define FEATURES_REG_PRESENT_BIT (1 << 15)
+#define BOARD_CONTROL_REG 0x04
+#define BOARD_RESET_BIT (1 << 0)
+#define TX_FIFO_RESET_BIT (1 << 1)
+#define RX_FIFO_RESET_BIT (1 << 2)
+#define TX_ENABLE_BIT (1 << 4)
+#define RX_ENABLE_BIT (1 << 5)
+#define DEMAND_DMA_DIRECTION_TX_BIT (1 << 6) /* ch 0 only */
+#define LINE_VALID_ON_STATUS_VALID_BIT (1 << 7)
+#define START_TX_BIT (1 << 8)
+#define CABLE_THROTTLE_ENABLE_BIT (1 << 9)
+#define TEST_MODE_ENABLE_BIT (1 << 31)
+#define BOARD_STATUS_REG 0x08
+#define COMMAND_LINE_STATUS_MASK (0x7f << 0)
+#define TX_IN_PROGRESS_BIT (1 << 7)
+#define TX_NOT_EMPTY_BIT (1 << 8)
+#define TX_NOT_ALMOST_EMPTY_BIT (1 << 9)
+#define TX_NOT_ALMOST_FULL_BIT (1 << 10)
+#define TX_NOT_FULL_BIT (1 << 11)
+#define RX_NOT_EMPTY_BIT (1 << 12)
+#define RX_NOT_ALMOST_EMPTY_BIT (1 << 13)
+#define RX_NOT_ALMOST_FULL_BIT (1 << 14)
+#define RX_NOT_FULL_BIT (1 << 15)
+#define BOARD_JUMPER0_INSTALLED_BIT (1 << 16)
+#define BOARD_JUMPER1_INSTALLED_BIT (1 << 17)
+#define TX_OVERRUN_BIT (1 << 21)
+#define RX_UNDERRUN_BIT (1 << 22)
+#define RX_OVERRUN_BIT (1 << 23)
+#define TX_PROG_ALMOST_REG 0x0c
+#define RX_PROG_ALMOST_REG 0x10
+#define ALMOST_EMPTY_BITS(x) (((x) & 0xffff) << 0)
+#define ALMOST_FULL_BITS(x) (((x) & 0xff) << 16)
+#define FEATURES_REG 0x14
+#define FIFO_SIZE_PRESENT_BIT (1 << 0)
+#define FIFO_WORDS_PRESENT_BIT (1 << 1)
+#define LEVEL_EDGE_INTERRUPTS_PRESENT_BIT (1 << 2)
+#define GPIO_SUPPORTED_BIT (1 << 3)
+#define PLX_DMA_CH1_SUPPORTED_BIT (1 << 4)
+#define OVERRUN_UNDERRUN_SUPPORTED_BIT (1 << 5)
+#define FIFO_REG 0x18
+#define TX_STATUS_COUNT_REG 0x1c
+#define TX_LINE_VALID_COUNT_REG 0x20,
+#define TX_LINE_INVALID_COUNT_REG 0x24
+#define RX_STATUS_COUNT_REG 0x28
+#define RX_LINE_COUNT_REG 0x2c
+#define INTERRUPT_CONTROL_REG 0x30
+#define FRAME_VALID_START_INTR (1 << 0)
+#define FRAME_VALID_END_INTR (1 << 1)
+#define TX_FIFO_EMPTY_INTR (1 << 8)
+#define TX_FIFO_ALMOST_EMPTY_INTR (1 << 9)
+#define TX_FIFO_ALMOST_FULL_INTR (1 << 10)
+#define TX_FIFO_FULL_INTR (1 << 11)
+#define RX_EMPTY_INTR (1 << 12)
+#define RX_ALMOST_EMPTY_INTR (1 << 13)
+#define RX_ALMOST_FULL_INTR (1 << 14)
+#define RX_FULL_INTR (1 << 15)
+#define INTERRUPT_STATUS_REG 0x34
+#define TX_CLOCK_DIVIDER_REG 0x38
+#define TX_FIFO_SIZE_REG 0x40
+#define RX_FIFO_SIZE_REG 0x44
+#define FIFO_SIZE_MASK (0xfffff << 0)
+#define TX_FIFO_WORDS_REG 0x48
+#define RX_FIFO_WORDS_REG 0x4c
+#define INTERRUPT_EDGE_LEVEL_REG 0x50
+#define INTERRUPT_POLARITY_REG 0x54
+
+#define TIMER_BASE 50 /* 20MHz master clock */
+#define DMA_BUFFER_SIZE 0x10000
+#define NUM_DMA_BUFFERS 4
+#define NUM_DMA_DESCRIPTORS 256
struct hpdi_board {
- const char *name; /* board name */
- int device_id; /* pci device id */
- int subdevice_id; /* pci subdevice id */
+ const char *name;
+ int device_id;
+ int subdevice_id;
};
static const struct hpdi_board hpdi_boards[] = {
{
- .name = "pci-hpdi32",
- .device_id = PCI_DEVICE_ID_PLX_9080,
- .subdevice_id = 0x2400,
+ .name = "pci-hpdi32",
+ .device_id = PCI_DEVICE_ID_PLX_9080,
+ .subdevice_id = 0x2400,
},
#if 0
{
- .name = "pxi-hpdi32",
- .device_id = 0x9656,
- .subdevice_id = 0x2705,
+ .name = "pxi-hpdi32",
+ .device_id = 0x9656,
+ .subdevice_id = 0x2705,
},
#endif
};
struct hpdi_private {
- /* base addresses (ioremapped) */
- void __iomem *plx9080_iobase;
- void __iomem *hpdi_iobase;
+ void __iomem *plx9080_mmio;
+ void __iomem *mmio;
uint32_t *dio_buffer[NUM_DMA_BUFFERS]; /* dma buffers */
/* physical addresses of dma buffers */
dma_addr_t dio_buffer_phys_addr[NUM_DMA_BUFFERS];
@@ -207,26 +161,334 @@ struct hpdi_private {
/* pointer to start of buffers indexed by descriptor */
uint32_t *desc_dio_buffer[NUM_DMA_DESCRIPTORS];
/* index of the dma descriptor that is currently being used */
- volatile unsigned int dma_desc_index;
+ unsigned int dma_desc_index;
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
- volatile unsigned long dio_count;
- /* software copies of values written to hpdi registers */
- volatile uint32_t bits[24];
+ unsigned long dio_count;
/* number of bytes at which to generate COMEDI_CB_BLOCK events */
- volatile unsigned int block_size;
+ unsigned int block_size;
};
-static int dio_config_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static void gsc_hpdi_drain_dma(struct comedi_device *dev, unsigned int channel)
+{
+ struct hpdi_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int idx;
+ unsigned int start;
+ unsigned int desc;
+ unsigned int size;
+ unsigned int next;
+
+ if (channel)
+ next = readl(devpriv->plx9080_mmio + PLX_DMA1_PCI_ADDRESS_REG);
+ else
+ next = readl(devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
+
+ idx = devpriv->dma_desc_index;
+ start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr);
+ /* loop until we have read all the full buffers */
+ for (desc = 0; (next < start || next >= start + devpriv->block_size) &&
+ desc < devpriv->num_dma_descriptors; desc++) {
+ /* transfer data from dma buffer to comedi buffer */
+ size = devpriv->block_size / sizeof(uint32_t);
+ if (cmd->stop_src == TRIG_COUNT) {
+ if (size > devpriv->dio_count)
+ size = devpriv->dio_count;
+ devpriv->dio_count -= size;
+ }
+ cfc_write_array_to_buffer(s, devpriv->desc_dio_buffer[idx],
+ size * sizeof(uint32_t));
+ idx++;
+ idx %= devpriv->num_dma_descriptors;
+ start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr);
+
+ devpriv->dma_desc_index = idx;
+ }
+ /* XXX check for buffer overrun somehow */
+}
+
+static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
+{
+ struct comedi_device *dev = d;
+ struct hpdi_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ uint32_t hpdi_intr_status, hpdi_board_status;
+ uint32_t plx_status;
+ uint32_t plx_bits;
+ uint8_t dma0_status, dma1_status;
+ unsigned long flags;
+
+ if (!dev->attached)
+ return IRQ_NONE;
+
+ plx_status = readl(devpriv->plx9080_mmio + PLX_INTRCS_REG);
+ if ((plx_status & (ICS_DMA0_A | ICS_DMA1_A | ICS_LIA)) == 0)
+ return IRQ_NONE;
+
+ hpdi_intr_status = readl(devpriv->mmio + INTERRUPT_STATUS_REG);
+ hpdi_board_status = readl(devpriv->mmio + BOARD_STATUS_REG);
+
+ if (hpdi_intr_status)
+ writel(hpdi_intr_status, devpriv->mmio + INTERRUPT_STATUS_REG);
+
+ /* spin lock makes sure no one else changes plx dma control reg */
+ spin_lock_irqsave(&dev->spinlock, flags);
+ dma0_status = readb(devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+ if (plx_status & ICS_DMA0_A) { /* dma chan 0 interrupt */
+ writeb((dma0_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
+ devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+
+ if (dma0_status & PLX_DMA_EN_BIT)
+ gsc_hpdi_drain_dma(dev, 0);
+ }
+ spin_unlock_irqrestore(&dev->spinlock, flags);
+
+ /* spin lock makes sure no one else changes plx dma control reg */
+ spin_lock_irqsave(&dev->spinlock, flags);
+ dma1_status = readb(devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
+ if (plx_status & ICS_DMA1_A) { /* XXX *//* dma chan 1 interrupt */
+ writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
+ devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
+ }
+ spin_unlock_irqrestore(&dev->spinlock, flags);
+
+ /* clear possible plx9080 interrupt sources */
+ if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */
+ plx_bits = readl(devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
+ writel(plx_bits, devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
+ }
+
+ if (hpdi_board_status & RX_OVERRUN_BIT) {
+ dev_err(dev->class_dev, "rx fifo overrun\n");
+ async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ }
+
+ if (hpdi_board_status & RX_UNDERRUN_BIT) {
+ dev_err(dev->class_dev, "rx fifo underrun\n");
+ async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ }
+
+ if (devpriv->dio_count == 0)
+ async->events |= COMEDI_CB_EOA;
+
+ cfc_handle_events(dev, s);
+
+ return IRQ_HANDLED;
+}
+
+static void gsc_hpdi_abort_dma(struct comedi_device *dev, unsigned int channel)
+{
+ struct hpdi_private *devpriv = dev->private;
+ unsigned long flags;
+
+ /* spinlock for plx dma control/status reg */
+ spin_lock_irqsave(&dev->spinlock, flags);
+
+ plx9080_abort_dma(devpriv->plx9080_mmio, channel);
+
+ spin_unlock_irqrestore(&dev->spinlock, flags);
+}
+
+static int gsc_hpdi_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct hpdi_private *devpriv = dev->private;
+
+ writel(0, devpriv->mmio + BOARD_CONTROL_REG);
+ writel(0, devpriv->mmio + INTERRUPT_CONTROL_REG);
+
+ gsc_hpdi_abort_dma(dev, 0);
+
+ return 0;
+}
+
+static int gsc_hpdi_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct hpdi_private *devpriv = dev->private;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned long flags;
+ uint32_t bits;
+
+ if (s->io_bits)
+ return -EINVAL;
+
+ writel(RX_FIFO_RESET_BIT, devpriv->mmio + BOARD_CONTROL_REG);
+
+ gsc_hpdi_abort_dma(dev, 0);
+
+ devpriv->dma_desc_index = 0;
+
+ /*
+ * These register are supposedly unused during chained dma,
+ * but I have found that left over values from last operation
+ * occasionally cause problems with transfer of first dma
+ * block. Initializing them to zero seems to fix the problem.
+ */
+ writel(0, devpriv->plx9080_mmio + PLX_DMA0_TRANSFER_SIZE_REG);
+ writel(0, devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
+ writel(0, devpriv->plx9080_mmio + PLX_DMA0_LOCAL_ADDRESS_REG);
+
+ /* give location of first dma descriptor */
+ bits = devpriv->dma_desc_phys_addr | PLX_DESC_IN_PCI_BIT |
+ PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI;
+ writel(bits, devpriv->plx9080_mmio + PLX_DMA0_DESCRIPTOR_REG);
+
+ /* enable dma transfer */
+ spin_lock_irqsave(&dev->spinlock, flags);
+ writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT,
+ devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
+
+ if (cmd->stop_src == TRIG_COUNT)
+ devpriv->dio_count = cmd->stop_arg;
+ else
+ devpriv->dio_count = 1;
+
+ /* clear over/under run status flags */
+ writel(RX_UNDERRUN_BIT | RX_OVERRUN_BIT,
+ devpriv->mmio + BOARD_STATUS_REG);
+
+ /* enable interrupts */
+ writel(RX_FULL_INTR, devpriv->mmio + INTERRUPT_CONTROL_REG);
+
+ writel(RX_ENABLE_BIT, devpriv->mmio + BOARD_CONTROL_REG);
+
+ return 0;
+}
+
+static int gsc_hpdi_cmd_test(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ int err = 0;
+ int i;
+
+ if (s->io_bits)
+ return -EINVAL;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+
+ err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+ /* Step 2b : and mutually compatible */
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ if (!cmd->chanlist_len || !cmd->chanlist) {
+ cmd->chanlist_len = 32;
+ err |= -EINVAL;
+ }
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+
+ if (cmd->stop_src == TRIG_COUNT)
+ err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
+ else /* TRIG_NONE */
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* step 4: fix up any arguments */
+
+ if (err)
+ return 4;
+
+ /* step 5: complain about special chanlist considerations */
+
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ if (CR_CHAN(cmd->chanlist[i]) != i) {
+ /* XXX could support 8 or 16 channels */
+ dev_err(dev->class_dev,
+ "chanlist must be ch 0 to 31 in order");
+ err |= -EINVAL;
+ break;
+ }
+ }
+
+ if (err)
+ return 5;
+
+ return 0;
+
+}
+
+/* setup dma descriptors so a link completes every 'len' bytes */
+static int gsc_hpdi_setup_dma_descriptors(struct comedi_device *dev,
+ unsigned int len)
+{
+ struct hpdi_private *devpriv = dev->private;
+ dma_addr_t phys_addr = devpriv->dma_desc_phys_addr;
+ uint32_t next_bits = PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
+ PLX_XFER_LOCAL_TO_PCI;
+ unsigned int offset = 0;
+ unsigned int idx = 0;
+ unsigned int i;
+
+ if (len > DMA_BUFFER_SIZE)
+ len = DMA_BUFFER_SIZE;
+ len -= len % sizeof(uint32_t);
+ if (len == 0)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_DMA_DESCRIPTORS && idx < NUM_DMA_BUFFERS; i++) {
+ devpriv->dma_desc[i].pci_start_addr =
+ cpu_to_le32(devpriv->dio_buffer_phys_addr[idx] + offset);
+ devpriv->dma_desc[i].local_start_addr = cpu_to_le32(FIFO_REG);
+ devpriv->dma_desc[i].transfer_size = cpu_to_le32(len);
+ devpriv->dma_desc[i].next = cpu_to_le32((phys_addr +
+ (i + 1) * sizeof(devpriv->dma_desc[0])) | next_bits);
+
+ devpriv->desc_dio_buffer[i] = devpriv->dio_buffer[idx] +
+ (offset / sizeof(uint32_t));
+
+ offset += len;
+ if (len + offset > DMA_BUFFER_SIZE) {
+ offset = 0;
+ idx++;
+ }
+ }
+ devpriv->num_dma_descriptors = i;
+ /* fix last descriptor to point back to first */
+ devpriv->dma_desc[i - 1].next = cpu_to_le32(phys_addr | next_bits);
+
+ devpriv->block_size = len;
+
+ return len;
+}
+
+static int gsc_hpdi_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int ret;
switch (data[0]) {
case INSN_CONFIG_BLOCK_SIZE:
- return dio_config_block_size(dev, data);
+ ret = gsc_hpdi_setup_dma_descriptors(dev, data[1]);
+ if (ret)
+ return ret;
+
+ data[1] = ret;
+ break;
default:
ret = comedi_dio_insn_config(dev, s, insn, data, 0xffffffff);
if (ret)
@@ -237,31 +499,53 @@ static int dio_config_insn(struct comedi_device *dev,
return insn->n;
}
-static void disable_plx_interrupts(struct comedi_device *dev)
+static int gsc_hpdi_init(struct comedi_device *dev)
{
struct hpdi_private *devpriv = dev->private;
+ uint32_t plx_intcsr_bits;
+
+ /* wait 10usec after reset before accessing fifos */
+ writel(BOARD_RESET_BIT, devpriv->mmio + BOARD_CONTROL_REG);
+ udelay(10);
+
+ writel(ALMOST_EMPTY_BITS(32) | ALMOST_FULL_BITS(32),
+ devpriv->mmio + RX_PROG_ALMOST_REG);
+ writel(ALMOST_EMPTY_BITS(32) | ALMOST_FULL_BITS(32),
+ devpriv->mmio + TX_PROG_ALMOST_REG);
- writel(0, devpriv->plx9080_iobase + PLX_INTRCS_REG);
+ devpriv->tx_fifo_size = readl(devpriv->mmio + TX_FIFO_SIZE_REG) &
+ FIFO_SIZE_MASK;
+ devpriv->rx_fifo_size = readl(devpriv->mmio + RX_FIFO_SIZE_REG) &
+ FIFO_SIZE_MASK;
+
+ writel(0, devpriv->mmio + INTERRUPT_CONTROL_REG);
+
+ /* enable interrupts */
+ plx_intcsr_bits =
+ ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
+ ICS_DMA0_E;
+ writel(plx_intcsr_bits, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+
+ return 0;
}
-/* initialize plx9080 chip */
-static void init_plx9080(struct comedi_device *dev)
+static void gsc_hpdi_init_plx9080(struct comedi_device *dev)
{
struct hpdi_private *devpriv = dev->private;
uint32_t bits;
- void __iomem *plx_iobase = devpriv->plx9080_iobase;
+ void __iomem *plx_iobase = devpriv->plx9080_mmio;
#ifdef __BIG_ENDIAN
bits = BIGEND_DMA0 | BIGEND_DMA1;
#else
bits = 0;
#endif
- writel(bits, devpriv->plx9080_iobase + PLX_BIGEND_REG);
+ writel(bits, devpriv->plx9080_mmio + PLX_BIGEND_REG);
- disable_plx_interrupts(dev);
+ writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
- abort_dma(dev, 0);
- abort_dma(dev, 1);
+ gsc_hpdi_abort_dma(dev, 0);
+ gsc_hpdi_abort_dma(dev, 1);
/* configure dma0 mode */
bits = 0;
@@ -285,117 +569,7 @@ static void init_plx9080(struct comedi_device *dev)
writel(bits, plx_iobase + PLX_DMA0_MODE_REG);
}
-/* Allocate and initialize the subdevice structures.
- */
-static int setup_subdevices(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* analog input subdevice */
- dev->read_subdev = s;
-/* dev->write_subdev = s; */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags =
- SDF_READABLE | SDF_WRITEABLE | SDF_LSAMPL | SDF_CMD_READ;
- s->n_chan = 32;
- s->len_chanlist = 32;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_config = dio_config_insn;
- s->do_cmd = hpdi_cmd;
- s->do_cmdtest = hpdi_cmd_test;
- s->cancel = hpdi_cancel;
-
- return 0;
-}
-
-static int init_hpdi(struct comedi_device *dev)
-{
- struct hpdi_private *devpriv = dev->private;
- uint32_t plx_intcsr_bits;
-
- writel(BOARD_RESET_BIT, devpriv->hpdi_iobase + BOARD_CONTROL_REG);
- udelay(10);
-
- writel(almost_empty_bits(32) | almost_full_bits(32),
- devpriv->hpdi_iobase + RX_PROG_ALMOST_REG);
- writel(almost_empty_bits(32) | almost_full_bits(32),
- devpriv->hpdi_iobase + TX_PROG_ALMOST_REG);
-
- devpriv->tx_fifo_size = fifo_size(readl(devpriv->hpdi_iobase +
- TX_FIFO_SIZE_REG));
- devpriv->rx_fifo_size = fifo_size(readl(devpriv->hpdi_iobase +
- RX_FIFO_SIZE_REG));
-
- writel(0, devpriv->hpdi_iobase + INTERRUPT_CONTROL_REG);
-
- /* enable interrupts */
- plx_intcsr_bits =
- ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
- ICS_DMA0_E;
- writel(plx_intcsr_bits, devpriv->plx9080_iobase + PLX_INTRCS_REG);
-
- return 0;
-}
-
-/* setup dma descriptors so a link completes every 'transfer_size' bytes */
-static int setup_dma_descriptors(struct comedi_device *dev,
- unsigned int transfer_size)
-{
- struct hpdi_private *devpriv = dev->private;
- unsigned int buffer_index, buffer_offset;
- uint32_t next_bits = PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
- PLX_XFER_LOCAL_TO_PCI;
- unsigned int i;
-
- if (transfer_size > DMA_BUFFER_SIZE)
- transfer_size = DMA_BUFFER_SIZE;
- transfer_size -= transfer_size % sizeof(uint32_t);
- if (transfer_size == 0)
- return -1;
-
- buffer_offset = 0;
- buffer_index = 0;
- for (i = 0; i < NUM_DMA_DESCRIPTORS &&
- buffer_index < NUM_DMA_BUFFERS; i++) {
- devpriv->dma_desc[i].pci_start_addr =
- cpu_to_le32(devpriv->dio_buffer_phys_addr[buffer_index] +
- buffer_offset);
- devpriv->dma_desc[i].local_start_addr = cpu_to_le32(FIFO_REG);
- devpriv->dma_desc[i].transfer_size =
- cpu_to_le32(transfer_size);
- devpriv->dma_desc[i].next =
- cpu_to_le32((devpriv->dma_desc_phys_addr + (i +
- 1) *
- sizeof(devpriv->dma_desc[0])) | next_bits);
-
- devpriv->desc_dio_buffer[i] =
- devpriv->dio_buffer[buffer_index] +
- (buffer_offset / sizeof(uint32_t));
-
- buffer_offset += transfer_size;
- if (transfer_size + buffer_offset > DMA_BUFFER_SIZE) {
- buffer_offset = 0;
- buffer_index++;
- }
- }
- devpriv->num_dma_descriptors = i;
- /* fix last descriptor to point back to first */
- devpriv->dma_desc[i - 1].next =
- cpu_to_le32(devpriv->dma_desc_phys_addr | next_bits);
-
- devpriv->block_size = transfer_size;
-
- return transfer_size;
-}
-
-static const struct hpdi_board *hpdi_find_board(struct pci_dev *pcidev)
+static const struct hpdi_board *gsc_hpdi_find_board(struct pci_dev *pcidev)
{
unsigned int i;
@@ -406,16 +580,17 @@ static const struct hpdi_board *hpdi_find_board(struct pci_dev *pcidev)
return NULL;
}
-static int hpdi_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
+static int gsc_hpdi_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct hpdi_board *thisboard;
struct hpdi_private *devpriv;
+ struct comedi_subdevice *s;
int i;
int retval;
- thisboard = hpdi_find_board(pcidev);
+ thisboard = gsc_hpdi_find_board(pcidev);
if (!thisboard) {
dev_err(dev->class_dev, "gsc_hpdi: pci %s not supported\n",
pci_name(pcidev));
@@ -433,17 +608,17 @@ static int hpdi_auto_attach(struct comedi_device *dev,
return retval;
pci_set_master(pcidev);
- devpriv->plx9080_iobase = pci_ioremap_bar(pcidev, 0);
- devpriv->hpdi_iobase = pci_ioremap_bar(pcidev, 2);
- if (!devpriv->plx9080_iobase || !devpriv->hpdi_iobase) {
+ devpriv->plx9080_mmio = pci_ioremap_bar(pcidev, 0);
+ devpriv->mmio = pci_ioremap_bar(pcidev, 2);
+ if (!devpriv->plx9080_mmio || !devpriv->mmio) {
dev_warn(dev->class_dev, "failed to remap io memory\n");
return -ENOMEM;
}
- init_plx9080(dev);
+ gsc_hpdi_init_plx9080(dev);
/* get irq */
- if (request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
+ if (request_irq(pcidev->irq, gsc_hpdi_interrupt, IRQF_SHARED,
dev->board_name, dev)) {
dev_warn(dev->class_dev,
"unable to allocate irq %u\n", pcidev->irq);
@@ -470,18 +645,33 @@ static int hpdi_auto_attach(struct comedi_device *dev,
return -EIO;
}
- retval = setup_dma_descriptors(dev, 0x1000);
+ retval = gsc_hpdi_setup_dma_descriptors(dev, 0x1000);
if (retval < 0)
return retval;
- retval = setup_subdevices(dev);
- if (retval < 0)
+ retval = comedi_alloc_subdevices(dev, 1);
+ if (retval)
return retval;
- return init_hpdi(dev);
+ /* Digital I/O subdevice */
+ s = &dev->subdevices[0];
+ dev->read_subdev = s;
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITEABLE | SDF_LSAMPL |
+ SDF_CMD_READ;
+ s->n_chan = 32;
+ s->len_chanlist = 32;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_config = gsc_hpdi_dio_insn_config;
+ s->do_cmd = gsc_hpdi_cmd;
+ s->do_cmdtest = gsc_hpdi_cmd_test;
+ s->cancel = gsc_hpdi_cancel;
+
+ return gsc_hpdi_init(dev);
}
-static void hpdi_detach(struct comedi_device *dev)
+static void gsc_hpdi_detach(struct comedi_device *dev)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct hpdi_private *devpriv = dev->private;
@@ -490,12 +680,12 @@ static void hpdi_detach(struct comedi_device *dev)
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv) {
- if (devpriv->plx9080_iobase) {
- disable_plx_interrupts(dev);
- iounmap(devpriv->plx9080_iobase);
+ if (devpriv->plx9080_mmio) {
+ writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+ iounmap(devpriv->plx9080_mmio);
}
- if (devpriv->hpdi_iobase)
- iounmap(devpriv->hpdi_iobase);
+ if (devpriv->mmio)
+ iounmap(devpriv->mmio);
/* free pci dma buffers */
for (i = 0; i < NUM_DMA_BUFFERS; i++) {
if (devpriv->dio_buffer[i])
@@ -516,318 +706,11 @@ static void hpdi_detach(struct comedi_device *dev)
comedi_pci_disable(dev);
}
-static int dio_config_block_size(struct comedi_device *dev, unsigned int *data)
-{
- unsigned int requested_block_size;
- int retval;
-
- requested_block_size = data[1];
-
- retval = setup_dma_descriptors(dev, requested_block_size);
- if (retval < 0)
- return retval;
-
- data[1] = retval;
-
- return 2;
-}
-
-static int di_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
- int i;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT);
- err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW);
- err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= cfc_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- if (!cmd->chanlist_len) {
- cmd->chanlist_len = 32;
- err |= -EINVAL;
- }
- err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
-
- switch (cmd->stop_src) {
- case TRIG_COUNT:
- err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
- break;
- case TRIG_NONE:
- err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
- break;
- default:
- break;
- }
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (err)
- return 4;
-
- if (!cmd->chanlist)
- return 0;
-
- for (i = 1; i < cmd->chanlist_len; i++) {
- if (CR_CHAN(cmd->chanlist[i]) != i) {
- /* XXX could support 8 or 16 channels */
- comedi_error(dev,
- "chanlist must be ch 0 to 31 in order");
- err++;
- break;
- }
- }
-
- if (err)
- return 5;
-
- return 0;
-}
-
-static int hpdi_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- if (s->io_bits)
- return -EINVAL;
- else
- return di_cmd_test(dev, s, cmd);
-}
-
-static inline void hpdi_writel(struct comedi_device *dev, uint32_t bits,
- unsigned int offset)
-{
- struct hpdi_private *devpriv = dev->private;
-
- writel(bits | devpriv->bits[offset / sizeof(uint32_t)],
- devpriv->hpdi_iobase + offset);
-}
-
-static int di_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct hpdi_private *devpriv = dev->private;
- uint32_t bits;
- unsigned long flags;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
-
- hpdi_writel(dev, RX_FIFO_RESET_BIT, BOARD_CONTROL_REG);
-
- abort_dma(dev, 0);
-
- devpriv->dma_desc_index = 0;
-
- /* These register are supposedly unused during chained dma,
- * but I have found that left over values from last operation
- * occasionally cause problems with transfer of first dma
- * block. Initializing them to zero seems to fix the problem. */
- writel(0, devpriv->plx9080_iobase + PLX_DMA0_TRANSFER_SIZE_REG);
- writel(0, devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG);
- writel(0, devpriv->plx9080_iobase + PLX_DMA0_LOCAL_ADDRESS_REG);
- /* give location of first dma descriptor */
- bits =
- devpriv->dma_desc_phys_addr | PLX_DESC_IN_PCI_BIT |
- PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI;
- writel(bits, devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
-
- /* spinlock for plx dma control/status reg */
- spin_lock_irqsave(&dev->spinlock, flags);
- /* enable dma transfer */
- writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->dio_count = cmd->stop_arg;
- else
- devpriv->dio_count = 1;
-
- /* clear over/under run status flags */
- writel(RX_UNDERRUN_BIT | RX_OVERRUN_BIT,
- devpriv->hpdi_iobase + BOARD_STATUS_REG);
- /* enable interrupts */
- writel(intr_bit(RX_FULL_INTR),
- devpriv->hpdi_iobase + INTERRUPT_CONTROL_REG);
-
- hpdi_writel(dev, RX_ENABLE_BIT, BOARD_CONTROL_REG);
-
- return 0;
-}
-
-static int hpdi_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- if (s->io_bits)
- return -EINVAL;
- else
- return di_cmd(dev, s);
-}
-
-static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
-{
- struct hpdi_private *devpriv = dev->private;
- struct comedi_async *async = dev->read_subdev->async;
- uint32_t next_transfer_addr;
- int j;
- int num_samples = 0;
- void __iomem *pci_addr_reg;
-
- if (channel)
- pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG;
- else
- pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG;
-
- /* loop until we have read all the full buffers */
- j = 0;
- for (next_transfer_addr = readl(pci_addr_reg);
- (next_transfer_addr <
- le32_to_cpu(devpriv->dma_desc[devpriv->dma_desc_index].
- pci_start_addr)
- || next_transfer_addr >=
- le32_to_cpu(devpriv->dma_desc[devpriv->dma_desc_index].
- pci_start_addr) + devpriv->block_size)
- && j < devpriv->num_dma_descriptors; j++) {
- /* transfer data from dma buffer to comedi buffer */
- num_samples = devpriv->block_size / sizeof(uint32_t);
- if (async->cmd.stop_src == TRIG_COUNT) {
- if (num_samples > devpriv->dio_count)
- num_samples = devpriv->dio_count;
- devpriv->dio_count -= num_samples;
- }
- cfc_write_array_to_buffer(dev->read_subdev,
- devpriv->desc_dio_buffer[devpriv->
- dma_desc_index],
- num_samples * sizeof(uint32_t));
- devpriv->dma_desc_index++;
- devpriv->dma_desc_index %= devpriv->num_dma_descriptors;
- }
- /* XXX check for buffer overrun somehow */
-}
-
-static irqreturn_t handle_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct hpdi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- uint32_t hpdi_intr_status, hpdi_board_status;
- uint32_t plx_status;
- uint32_t plx_bits;
- uint8_t dma0_status, dma1_status;
- unsigned long flags;
-
- if (!dev->attached)
- return IRQ_NONE;
-
- plx_status = readl(devpriv->plx9080_iobase + PLX_INTRCS_REG);
- if ((plx_status & (ICS_DMA0_A | ICS_DMA1_A | ICS_LIA)) == 0)
- return IRQ_NONE;
-
- hpdi_intr_status = readl(devpriv->hpdi_iobase + INTERRUPT_STATUS_REG);
- hpdi_board_status = readl(devpriv->hpdi_iobase + BOARD_STATUS_REG);
-
- async->events = 0;
-
- if (hpdi_intr_status) {
- writel(hpdi_intr_status,
- devpriv->hpdi_iobase + INTERRUPT_STATUS_REG);
- }
- /* spin lock makes sure no one else changes plx dma control reg */
- spin_lock_irqsave(&dev->spinlock, flags);
- dma0_status = readb(devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- if (plx_status & ICS_DMA0_A) { /* dma chan 0 interrupt */
- writeb((dma0_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
-
- if (dma0_status & PLX_DMA_EN_BIT)
- drain_dma_buffers(dev, 0);
- }
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* spin lock makes sure no one else changes plx dma control reg */
- spin_lock_irqsave(&dev->spinlock, flags);
- dma1_status = readb(devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- if (plx_status & ICS_DMA1_A) { /* XXX *//* dma chan 1 interrupt */
- writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- }
- spin_unlock_irqrestore(&dev->spinlock, flags);
-
- /* clear possible plx9080 interrupt sources */
- if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */
- plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- }
-
- if (hpdi_board_status & RX_OVERRUN_BIT) {
- comedi_error(dev, "rx fifo overrun");
- async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- }
-
- if (hpdi_board_status & RX_UNDERRUN_BIT) {
- comedi_error(dev, "rx fifo underrun");
- async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- }
-
- if (devpriv->dio_count == 0)
- async->events |= COMEDI_CB_EOA;
-
- cfc_handle_events(dev, s);
-
- return IRQ_HANDLED;
-}
-
-static void abort_dma(struct comedi_device *dev, unsigned int channel)
-{
- struct hpdi_private *devpriv = dev->private;
- unsigned long flags;
-
- /* spinlock for plx dma control/status reg */
- spin_lock_irqsave(&dev->spinlock, flags);
-
- plx9080_abort_dma(devpriv->plx9080_iobase, channel);
-
- spin_unlock_irqrestore(&dev->spinlock, flags);
-}
-
-static int hpdi_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct hpdi_private *devpriv = dev->private;
-
- hpdi_writel(dev, 0, BOARD_CONTROL_REG);
-
- writel(0, devpriv->hpdi_iobase + INTERRUPT_CONTROL_REG);
-
- abort_dma(dev, 0);
-
- return 0;
-}
-
static struct comedi_driver gsc_hpdi_driver = {
.driver_name = "gsc_hpdi",
.module = THIS_MODULE,
- .auto_attach = hpdi_auto_attach,
- .detach = hpdi_detach,
+ .auto_attach = gsc_hpdi_auto_attach,
+ .detach = gsc_hpdi_detach,
};
static int gsc_hpdi_pci_probe(struct pci_dev *dev,
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index 80539b2bea1a..0b8b2162b76b 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -171,12 +171,27 @@ static void setup_channel_list(struct comedi_device *dev,
}
}
+static int icp_multi_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct icp_multi_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readw(devpriv->io_addr + ICP_MULTI_ADC_CSR);
+ if ((status & ADC_BSY) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int icp_multi_insn_read_ai(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct icp_multi_private *devpriv = dev->private;
- int n, timeout;
+ int ret = 0;
+ int n;
/* Disable A/D conversion ready interrupt */
devpriv->IntEnable &= ~ADC_READY;
@@ -199,33 +214,10 @@ static int icp_multi_insn_read_ai(struct comedi_device *dev,
udelay(1);
/* Wait for conversion to complete, or get fed up waiting */
- timeout = 100;
- while (timeout--) {
- if (!(readw(devpriv->io_addr +
- ICP_MULTI_ADC_CSR) & ADC_BSY))
- goto conv_finish;
-
- udelay(1);
- }
-
- /* If we reach here, a timeout has occurred */
- comedi_error(dev, "A/D insn timeout");
-
- /* Disable interrupt */
- devpriv->IntEnable &= ~ADC_READY;
- writew(devpriv->IntEnable, devpriv->io_addr + ICP_MULTI_INT_EN);
-
- /* Clear interrupt status */
- devpriv->IntStatus |= ADC_READY;
- writew(devpriv->IntStatus,
- devpriv->io_addr + ICP_MULTI_INT_STAT);
+ ret = comedi_timeout(dev, s, insn, icp_multi_ai_eoc, 0);
+ if (ret)
+ break;
- /* Clear data received */
- data[n] = 0;
-
- return -ETIME;
-
-conv_finish:
data[n] =
(readw(devpriv->io_addr + ICP_MULTI_AI) >> 4) & 0x0fff;
}
@@ -238,7 +230,21 @@ conv_finish:
devpriv->IntStatus |= ADC_READY;
writew(devpriv->IntStatus, devpriv->io_addr + ICP_MULTI_INT_STAT);
- return n;
+ return ret ? ret : n;
+}
+
+static int icp_multi_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct icp_multi_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readw(devpriv->io_addr + ICP_MULTI_DAC_CSR);
+ if ((status & DAC_BSY) == 0)
+ return 0;
+ return -EBUSY;
}
static int icp_multi_insn_write_ao(struct comedi_device *dev,
@@ -246,7 +252,8 @@ static int icp_multi_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct icp_multi_private *devpriv = dev->private;
- int n, chan, range, timeout;
+ int n, chan, range;
+ int ret;
/* Disable D/A conversion ready interrupt */
devpriv->IntEnable &= ~DAC_READY;
@@ -274,33 +281,24 @@ static int icp_multi_insn_write_ao(struct comedi_device *dev,
for (n = 0; n < insn->n; n++) {
/* Wait for analogue output data register to be
* ready for new data, or get fed up waiting */
- timeout = 100;
- while (timeout--) {
- if (!(readw(devpriv->io_addr +
- ICP_MULTI_DAC_CSR) & DAC_BSY))
- goto dac_ready;
-
- udelay(1);
+ ret = comedi_timeout(dev, s, insn, icp_multi_ao_eoc, 0);
+ if (ret) {
+ /* Disable interrupt */
+ devpriv->IntEnable &= ~DAC_READY;
+ writew(devpriv->IntEnable,
+ devpriv->io_addr + ICP_MULTI_INT_EN);
+
+ /* Clear interrupt status */
+ devpriv->IntStatus |= DAC_READY;
+ writew(devpriv->IntStatus,
+ devpriv->io_addr + ICP_MULTI_INT_STAT);
+
+ /* Clear data received */
+ devpriv->ao_data[chan] = 0;
+
+ return ret;
}
- /* If we reach here, a timeout has occurred */
- comedi_error(dev, "D/A insn timeout");
-
- /* Disable interrupt */
- devpriv->IntEnable &= ~DAC_READY;
- writew(devpriv->IntEnable, devpriv->io_addr + ICP_MULTI_INT_EN);
-
- /* Clear interrupt status */
- devpriv->IntStatus |= DAC_READY;
- writew(devpriv->IntStatus,
- devpriv->io_addr + ICP_MULTI_INT_STAT);
-
- /* Clear data received */
- devpriv->ao_data[chan] = 0;
-
- return -ETIME;
-
-dac_ready:
/* Write data to analogue output data register */
writew(data[n], devpriv->io_addr + ICP_MULTI_AO);
@@ -565,9 +563,6 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
devpriv->valid = 1;
- dev_info(dev->class_dev, "%s attached, irq %sabled\n",
- dev->board_name, dev->irq ? "en" : "dis");
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index 8577778441fa..3558ab3b6e1f 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -190,20 +190,18 @@ static int ii20k_ao_insn_write(struct comedi_device *dev,
return insn->n;
}
-static int ii20k_ai_wait_eoc(struct comedi_device *dev,
- struct comedi_subdevice *s,
- int timeout)
+static int ii20k_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
void __iomem *iobase = ii20k_module_iobase(dev, s);
unsigned char status;
- do {
- status = readb(iobase + II20K_AI_STATUS_REG);
- if ((status & II20K_AI_STATUS_INT) == 0)
- return 0;
- } while (timeout--);
-
- return -ETIME;
+ status = readb(iobase + II20K_AI_STATUS_REG);
+ if ((status & II20K_AI_STATUS_INT) == 0)
+ return 0;
+ return -EBUSY;
}
static void ii20k_ai_setup(struct comedi_device *dev,
@@ -263,7 +261,7 @@ static int ii20k_ai_insn_read(struct comedi_device *dev,
/* generate a software start convert signal */
readb(iobase + II20K_AI_PACER_RESET_REG);
- ret = ii20k_ai_wait_eoc(dev, s, 100);
+ ret = comedi_timeout(dev, s, insn, ii20k_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 6100c12c164f..a8db9d86aadc 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -51,23 +51,55 @@
#include "jr3_pci.h"
#define PCI_VENDOR_ID_JR3 0x1762
-#define PCI_DEVICE_ID_JR3_1_CHANNEL 0x3111
-#define PCI_DEVICE_ID_JR3_1_CHANNEL_NEW 0x1111
-#define PCI_DEVICE_ID_JR3_2_CHANNEL 0x3112
-#define PCI_DEVICE_ID_JR3_3_CHANNEL 0x3113
-#define PCI_DEVICE_ID_JR3_4_CHANNEL 0x3114
-struct jr3_pci_dev_private {
- struct jr3_t __iomem *iobase;
- int n_channels;
- struct timer_list timer;
+enum jr3_pci_boardid {
+ BOARD_JR3_1,
+ BOARD_JR3_2,
+ BOARD_JR3_3,
+ BOARD_JR3_4,
};
-struct poll_delay_t {
+struct jr3_pci_board {
+ const char *name;
+ int n_subdevs;
+};
+
+static const struct jr3_pci_board jr3_pci_boards[] = {
+ [BOARD_JR3_1] = {
+ .name = "jr3_pci_1",
+ .n_subdevs = 1,
+ },
+ [BOARD_JR3_2] = {
+ .name = "jr3_pci_2",
+ .n_subdevs = 2,
+ },
+ [BOARD_JR3_3] = {
+ .name = "jr3_pci_3",
+ .n_subdevs = 3,
+ },
+ [BOARD_JR3_4] = {
+ .name = "jr3_pci_4",
+ .n_subdevs = 4,
+ },
+};
+
+struct jr3_pci_transform {
+ struct {
+ u16 link_type;
+ s16 link_amount;
+ } link[8];
+};
+
+struct jr3_pci_poll_delay {
int min;
int max;
};
+struct jr3_pci_dev_private {
+ struct jr3_t __iomem *iobase;
+ struct timer_list timer;
+};
+
struct jr3_pci_subdev_private {
struct jr3_channel __iomem *channel;
unsigned long next_time_min;
@@ -79,7 +111,6 @@ struct jr3_pci_subdev_private {
state_jr3_init_use_offset_complete,
state_jr3_done
} state;
- int channel_no;
int serial_no;
int model_no;
struct {
@@ -92,9 +123,9 @@ struct jr3_pci_subdev_private {
int retries;
};
-static struct poll_delay_t poll_delay_min_max(int min, int max)
+static struct jr3_pci_poll_delay poll_delay_min_max(int min, int max)
{
- struct poll_delay_t result;
+ struct jr3_pci_poll_delay result;
result.min = min;
result.max = max;
@@ -106,15 +137,8 @@ static int is_complete(struct jr3_channel __iomem *channel)
return get_s16(&channel->command_word0) == 0;
}
-struct transform_t {
- struct {
- u16 link_type;
- s16 link_amount;
- } link[8];
-};
-
static void set_transforms(struct jr3_channel __iomem *channel,
- struct transform_t transf, short num)
+ struct jr3_pci_transform transf, short num)
{
int i;
@@ -194,110 +218,99 @@ static struct six_axis_t get_max_full_scales(struct jr3_channel __iomem
return result;
}
+static unsigned int jr3_pci_ai_read_chan(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chan)
+{
+ struct jr3_pci_subdev_private *spriv = s->private;
+ unsigned int val = 0;
+
+ if (spriv->state != state_jr3_done)
+ return 0;
+
+ if (chan < 56) {
+ unsigned int axis = chan % 8;
+ unsigned filter = chan / 8;
+
+ switch (axis) {
+ case 0:
+ val = get_s16(&spriv->channel->filter[filter].fx);
+ break;
+ case 1:
+ val = get_s16(&spriv->channel->filter[filter].fy);
+ break;
+ case 2:
+ val = get_s16(&spriv->channel->filter[filter].fz);
+ break;
+ case 3:
+ val = get_s16(&spriv->channel->filter[filter].mx);
+ break;
+ case 4:
+ val = get_s16(&spriv->channel->filter[filter].my);
+ break;
+ case 5:
+ val = get_s16(&spriv->channel->filter[filter].mz);
+ break;
+ case 6:
+ val = get_s16(&spriv->channel->filter[filter].v1);
+ break;
+ case 7:
+ val = get_s16(&spriv->channel->filter[filter].v2);
+ break;
+ }
+ val += 0x4000;
+ } else if (chan == 56) {
+ val = get_u16(&spriv->channel->model_no);
+ } else if (chan == 57) {
+ val = get_u16(&spriv->channel->serial_no);
+ }
+
+ return val;
+}
+
static int jr3_pci_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int result;
- struct jr3_pci_subdev_private *p;
- int channel;
-
- p = s->private;
- channel = CR_CHAN(insn->chanspec);
- if (p == NULL || channel > 57) {
- result = -EINVAL;
- } else {
- int i;
-
- result = insn->n;
- if (p->state != state_jr3_done ||
- (get_u16(&p->channel->errors) & (watch_dog | watch_dog2 |
- sensor_change))) {
- /* No sensor or sensor changed */
- if (p->state == state_jr3_done) {
- /* Restart polling */
- p->state = state_jr3_poll;
- }
- result = -EAGAIN;
- }
- for (i = 0; i < insn->n; i++) {
- if (channel < 56) {
- int axis, filter;
-
- axis = channel % 8;
- filter = channel / 8;
- if (p->state != state_jr3_done) {
- data[i] = 0;
- } else {
- int F = 0;
- switch (axis) {
- case 0:
- F = get_s16(&p->channel->
- filter[filter].fx);
- break;
- case 1:
- F = get_s16(&p->channel->
- filter[filter].fy);
- break;
- case 2:
- F = get_s16(&p->channel->
- filter[filter].fz);
- break;
- case 3:
- F = get_s16(&p->channel->
- filter[filter].mx);
- break;
- case 4:
- F = get_s16(&p->channel->
- filter[filter].my);
- break;
- case 5:
- F = get_s16(&p->channel->
- filter[filter].mz);
- break;
- case 6:
- F = get_s16(&p->channel->
- filter[filter].v1);
- break;
- case 7:
- F = get_s16(&p->channel->
- filter[filter].v2);
- break;
- }
- data[i] = F + 0x4000;
- }
- } else if (channel == 56) {
- if (p->state != state_jr3_done)
- data[i] = 0;
- else
- data[i] =
- get_u16(&p->channel->model_no);
- } else if (channel == 57) {
- if (p->state != state_jr3_done)
- data[i] = 0;
- else
- data[i] =
- get_u16(&p->channel->serial_no);
- }
+ struct jr3_pci_subdev_private *spriv = s->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ u16 errors;
+ int i;
+
+ if (!spriv)
+ return -EINVAL;
+
+ errors = get_u16(&spriv->channel->errors);
+ if (spriv->state != state_jr3_done ||
+ (errors & (watch_dog | watch_dog2 | sensor_change))) {
+ /* No sensor or sensor changed */
+ if (spriv->state == state_jr3_done) {
+ /* Restart polling */
+ spriv->state = state_jr3_poll;
}
+ return -EAGAIN;
}
- return result;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = jr3_pci_ai_read_chan(dev, s, chan);
+
+ return insn->n;
}
static int jr3_pci_open(struct comedi_device *dev)
{
+ struct jr3_pci_subdev_private *spriv;
+ struct comedi_subdevice *s;
int i;
- struct jr3_pci_dev_private *devpriv = dev->private;
dev_dbg(dev->class_dev, "jr3_pci_open\n");
- for (i = 0; i < devpriv->n_channels; i++) {
- struct jr3_pci_subdev_private *p;
-
- p = dev->subdevices[i].private;
- if (p) {
- dev_dbg(dev->class_dev, "serial: %p %d (%d)\n", p,
- p->serial_no, p->channel_no);
- }
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ spriv = s->private;
+ if (spriv)
+ dev_dbg(dev->class_dev, "serial: %p %d (%d)\n",
+ spriv, spriv->serial_no, s->index);
}
return 0;
}
@@ -326,271 +339,262 @@ static int read_idm_word(const u8 *data, size_t size, int *pos,
return result;
}
-static int jr3_download_firmware(struct comedi_device *dev,
- const u8 *data, size_t size,
- unsigned long context)
+static int jr3_check_firmware(struct comedi_device *dev,
+ const u8 *data, size_t size)
{
+ int more = 1;
+ int pos = 0;
+
/*
* IDM file format is:
* { count, address, data <count> } *
* ffff
*/
- int result, more, pos, OK;
-
- result = 0;
- more = 1;
- pos = 0;
- OK = 0;
while (more) {
- unsigned int count, addr;
+ unsigned int count = 0;
+ unsigned int addr = 0;
more = more && read_idm_word(data, size, &pos, &count);
- if (more && count == 0xffff) {
- OK = 1;
- break;
- }
+ if (more && count == 0xffff)
+ return 0;
+
more = more && read_idm_word(data, size, &pos, &addr);
while (more && count > 0) {
- unsigned int dummy;
+ unsigned int dummy = 0;
+
more = more && read_idm_word(data, size, &pos, &dummy);
count--;
}
}
- if (!OK) {
- result = -ENODATA;
- } else {
- int i;
- struct jr3_pci_dev_private *p = dev->private;
+ return -ENODATA;
+}
+
+static void jr3_write_firmware(struct comedi_device *dev,
+ int subdev, const u8 *data, size_t size)
+{
+ struct jr3_pci_dev_private *devpriv = dev->private;
+ struct jr3_t __iomem *iobase = devpriv->iobase;
+ u32 __iomem *lo;
+ u32 __iomem *hi;
+ int more = 1;
+ int pos = 0;
+
+ while (more) {
+ unsigned int count = 0;
+ unsigned int addr = 0;
+
+ more = more && read_idm_word(data, size, &pos, &count);
+ if (more && count == 0xffff)
+ return;
+
+ more = more && read_idm_word(data, size, &pos, &addr);
+
+ dev_dbg(dev->class_dev, "Loading#%d %4.4x bytes at %4.4x\n",
+ subdev, count, addr);
+
+ while (more && count > 0) {
+ if (addr & 0x4000) {
+ /* 16 bit data, never seen in real life!! */
+ unsigned int data1 = 0;
- for (i = 0; i < p->n_channels; i++) {
- struct jr3_pci_subdev_private *sp;
+ more = more &&
+ read_idm_word(data, size, &pos, &data1);
+ count--;
+ /* jr3[addr + 0x20000 * pnum] = data1; */
+ } else {
+ /* Download 24 bit program */
+ unsigned int data1 = 0;
+ unsigned int data2 = 0;
+
+ lo = &iobase->channel[subdev].program_lo[addr];
+ hi = &iobase->channel[subdev].program_hi[addr];
- sp = dev->subdevices[i].private;
- more = 1;
- pos = 0;
- while (more) {
- unsigned int count, addr;
more = more &&
- read_idm_word(data, size, &pos, &count);
- if (more && count == 0xffff)
- break;
+ read_idm_word(data, size, &pos, &data1);
more = more &&
- read_idm_word(data, size, &pos, &addr);
- dev_dbg(dev->class_dev,
- "Loading#%d %4.4x bytes at %4.4x\n",
- i, count, addr);
- while (more && count > 0) {
- if (addr & 0x4000) {
- /* 16 bit data, never seen
- * in real life!! */
- unsigned int data1;
-
- more = more &&
- read_idm_word(data,
- size, &pos,
- &data1);
- count--;
- /* jr3[addr + 0x20000 * pnum] =
- data1; */
- } else {
- /* Download 24 bit program */
- unsigned int data1, data2;
-
- more = more &&
- read_idm_word(data,
- size, &pos,
- &data1);
- more = more &&
- read_idm_word(data, size,
- &pos,
- &data2);
- count -= 2;
- if (more) {
- set_u16(&p->
- iobase->channel
- [i].program_low
- [addr], data1);
- udelay(1);
- set_u16(&p->
- iobase->channel
- [i].program_high
- [addr], data2);
- udelay(1);
- }
- }
- addr++;
+ read_idm_word(data, size, &pos, &data2);
+ count -= 2;
+ if (more) {
+ set_u16(lo, data1);
+ udelay(1);
+ set_u16(hi, data2);
+ udelay(1);
}
}
+ addr++;
}
}
- return result;
}
-static struct poll_delay_t jr3_pci_poll_subdevice(struct comedi_subdevice *s)
+static int jr3_download_firmware(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context)
{
- struct poll_delay_t result = poll_delay_min_max(1000, 2000);
- struct jr3_pci_subdev_private *p = s->private;
- int i;
+ int subdev;
+ int ret;
- if (p) {
- struct jr3_channel __iomem *channel = p->channel;
- int errors = get_u16(&channel->errors);
-
- if (errors != p->errors)
- p->errors = errors;
-
- if (errors & (watch_dog | watch_dog2 | sensor_change))
- /* Sensor communication lost, force poll mode */
- p->state = state_jr3_poll;
-
- switch (p->state) {
- case state_jr3_poll: {
- u16 model_no = get_u16(&channel->model_no);
- u16 serial_no = get_u16(&channel->serial_no);
- if ((errors & (watch_dog | watch_dog2)) ||
- model_no == 0 || serial_no == 0) {
- /*
- * Still no sensor, keep on polling.
- * Since it takes up to 10 seconds
- * for offsets to stabilize, polling
- * each second should suffice.
- */
- result = poll_delay_min_max(1000, 2000);
- } else {
- p->retries = 0;
- p->state =
- state_jr3_init_wait_for_offset;
- result = poll_delay_min_max(1000, 2000);
- }
- }
- break;
- case state_jr3_init_wait_for_offset:
- p->retries++;
- if (p->retries < 10) {
- /* Wait for offeset to stabilize
- * (< 10 s according to manual) */
- result = poll_delay_min_max(1000, 2000);
- } else {
- struct transform_t transf;
+ /* verify IDM file format */
+ ret = jr3_check_firmware(dev, data, size);
+ if (ret)
+ return ret;
- p->model_no = get_u16(&channel->model_no);
- p->serial_no = get_u16(&channel->serial_no);
+ /* write firmware to each subdevice */
+ for (subdev = 0; subdev < dev->n_subdevices; subdev++)
+ jr3_write_firmware(dev, subdev, data, size);
- /* Transformation all zeros */
- for (i = 0; i < ARRAY_SIZE(transf.link); i++) {
- transf.link[i].link_type =
- (enum link_types)0;
- transf.link[i].link_amount = 0;
- }
+ return 0;
+}
- set_transforms(channel, transf, 0);
- use_transform(channel, 0);
- p->state = state_jr3_init_transform_complete;
- /* Allow 20 ms for completion */
- result = poll_delay_min_max(20, 100);
- }
- break;
- case state_jr3_init_transform_complete:
- if (!is_complete(channel)) {
- result = poll_delay_min_max(20, 100);
- } else {
- /* Set full scale */
- struct six_axis_t min_full_scale;
- struct six_axis_t max_full_scale;
-
- min_full_scale = get_min_full_scales(channel);
- max_full_scale = get_max_full_scales(channel);
- set_full_scales(channel, max_full_scale);
-
- p->state =
- state_jr3_init_set_full_scale_complete;
- /* Allow 20 ms for completion */
- result = poll_delay_min_max(20, 100);
- }
- break;
- case state_jr3_init_set_full_scale_complete:
- if (!is_complete(channel)) {
- result = poll_delay_min_max(20, 100);
- } else {
- struct force_array __iomem *full_scale;
-
- /* Use ranges in kN or we will
- * overflow around 2000N! */
- full_scale = &channel->full_scale;
- p->range[0].range.min =
- -get_s16(&full_scale->fx) * 1000;
- p->range[0].range.max =
- get_s16(&full_scale->fx) * 1000;
- p->range[1].range.min =
- -get_s16(&full_scale->fy) * 1000;
- p->range[1].range.max =
- get_s16(&full_scale->fy) * 1000;
- p->range[2].range.min =
- -get_s16(&full_scale->fz) * 1000;
- p->range[2].range.max =
- get_s16(&full_scale->fz) * 1000;
- p->range[3].range.min =
- -get_s16(&full_scale->mx) * 100;
- p->range[3].range.max =
- get_s16(&full_scale->mx) * 100;
- p->range[4].range.min =
- -get_s16(&full_scale->my) * 100;
- p->range[4].range.max =
- get_s16(&full_scale->my) * 100;
- p->range[5].range.min =
- -get_s16(&full_scale->mz) * 100;
- p->range[5].range.max =
- get_s16(&full_scale->mz) * 100; /* ?? */
- p->range[6].range.min =
- -get_s16(&full_scale->v1) * 100;/* ?? */
- p->range[6].range.max =
- get_s16(&full_scale->v1) * 100; /* ?? */
- p->range[7].range.min =
- -get_s16(&full_scale->v2) * 100;/* ?? */
- p->range[7].range.max =
- get_s16(&full_scale->v2) * 100; /* ?? */
- p->range[8].range.min = 0;
- p->range[8].range.max = 65535;
-
- use_offset(channel, 0);
- p->state = state_jr3_init_use_offset_complete;
- /* Allow 40 ms for completion */
- result = poll_delay_min_max(40, 100);
- }
- break;
- case state_jr3_init_use_offset_complete:
- if (!is_complete(channel)) {
- result = poll_delay_min_max(20, 100);
- } else {
- set_s16(&channel->offsets.fx, 0);
- set_s16(&channel->offsets.fy, 0);
- set_s16(&channel->offsets.fz, 0);
- set_s16(&channel->offsets.mx, 0);
- set_s16(&channel->offsets.my, 0);
- set_s16(&channel->offsets.mz, 0);
+static struct jr3_pci_poll_delay jr3_pci_poll_subdevice(struct comedi_subdevice *s)
+{
+ struct jr3_pci_subdev_private *spriv = s->private;
+ struct jr3_pci_poll_delay result = poll_delay_min_max(1000, 2000);
+ struct jr3_channel __iomem *channel;
+ u16 model_no;
+ u16 serial_no;
+ int errors;
+ int i;
- set_offset(channel);
+ if (!spriv)
+ return result;
- p->state = state_jr3_done;
+ channel = spriv->channel;
+ errors = get_u16(&channel->errors);
+
+ if (errors != spriv->errors)
+ spriv->errors = errors;
+
+ /* Sensor communication lost? force poll mode */
+ if (errors & (watch_dog | watch_dog2 | sensor_change))
+ spriv->state = state_jr3_poll;
+
+ switch (spriv->state) {
+ case state_jr3_poll:
+ model_no = get_u16(&channel->model_no);
+ serial_no = get_u16(&channel->serial_no);
+
+ if ((errors & (watch_dog | watch_dog2)) ||
+ model_no == 0 || serial_no == 0) {
+ /*
+ * Still no sensor, keep on polling.
+ * Since it takes up to 10 seconds for offsets to
+ * stabilize, polling each second should suffice.
+ */
+ } else {
+ spriv->retries = 0;
+ spriv->state = state_jr3_init_wait_for_offset;
+ }
+ break;
+ case state_jr3_init_wait_for_offset:
+ spriv->retries++;
+ if (spriv->retries < 10) {
+ /*
+ * Wait for offeset to stabilize
+ * (< 10 s according to manual)
+ */
+ } else {
+ struct jr3_pci_transform transf;
+
+ spriv->model_no = get_u16(&channel->model_no);
+ spriv->serial_no = get_u16(&channel->serial_no);
+
+ /* Transformation all zeros */
+ for (i = 0; i < ARRAY_SIZE(transf.link); i++) {
+ transf.link[i].link_type = (enum link_types)0;
+ transf.link[i].link_amount = 0;
}
- break;
- case state_jr3_done:
- poll_delay_min_max(10000, 20000);
- break;
- default:
- poll_delay_min_max(1000, 2000);
- break;
+
+ set_transforms(channel, transf, 0);
+ use_transform(channel, 0);
+ spriv->state = state_jr3_init_transform_complete;
+ /* Allow 20 ms for completion */
+ result = poll_delay_min_max(20, 100);
+ }
+ break;
+ case state_jr3_init_transform_complete:
+ if (!is_complete(channel)) {
+ result = poll_delay_min_max(20, 100);
+ } else {
+ /* Set full scale */
+ struct six_axis_t min_full_scale;
+ struct six_axis_t max_full_scale;
+
+ min_full_scale = get_min_full_scales(channel);
+ max_full_scale = get_max_full_scales(channel);
+ set_full_scales(channel, max_full_scale);
+
+ spriv->state = state_jr3_init_set_full_scale_complete;
+ /* Allow 20 ms for completion */
+ result = poll_delay_min_max(20, 100);
+ }
+ break;
+ case state_jr3_init_set_full_scale_complete:
+ if (!is_complete(channel)) {
+ result = poll_delay_min_max(20, 100);
+ } else {
+ struct force_array __iomem *fs = &channel->full_scale;
+
+ /* Use ranges in kN or we will overflow around 2000N! */
+ spriv->range[0].range.min = -get_s16(&fs->fx) * 1000;
+ spriv->range[0].range.max = get_s16(&fs->fx) * 1000;
+ spriv->range[1].range.min = -get_s16(&fs->fy) * 1000;
+ spriv->range[1].range.max = get_s16(&fs->fy) * 1000;
+ spriv->range[2].range.min = -get_s16(&fs->fz) * 1000;
+ spriv->range[2].range.max = get_s16(&fs->fz) * 1000;
+ spriv->range[3].range.min = -get_s16(&fs->mx) * 100;
+ spriv->range[3].range.max = get_s16(&fs->mx) * 100;
+ spriv->range[4].range.min = -get_s16(&fs->my) * 100;
+ spriv->range[4].range.max = get_s16(&fs->my) * 100;
+ spriv->range[5].range.min = -get_s16(&fs->mz) * 100;
+ /* the next five are questionable */
+ spriv->range[5].range.max = get_s16(&fs->mz) * 100;
+ spriv->range[6].range.min = -get_s16(&fs->v1) * 100;
+ spriv->range[6].range.max = get_s16(&fs->v1) * 100;
+ spriv->range[7].range.min = -get_s16(&fs->v2) * 100;
+ spriv->range[7].range.max = get_s16(&fs->v2) * 100;
+ spriv->range[8].range.min = 0;
+ spriv->range[8].range.max = 65535;
+
+ use_offset(channel, 0);
+ spriv->state = state_jr3_init_use_offset_complete;
+ /* Allow 40 ms for completion */
+ result = poll_delay_min_max(40, 100);
+ }
+ break;
+ case state_jr3_init_use_offset_complete:
+ if (!is_complete(channel)) {
+ result = poll_delay_min_max(20, 100);
+ } else {
+ set_s16(&channel->offsets.fx, 0);
+ set_s16(&channel->offsets.fy, 0);
+ set_s16(&channel->offsets.fz, 0);
+ set_s16(&channel->offsets.mx, 0);
+ set_s16(&channel->offsets.my, 0);
+ set_s16(&channel->offsets.mz, 0);
+
+ set_offset(channel);
+
+ spriv->state = state_jr3_done;
}
+ break;
+ case state_jr3_done:
+ result = poll_delay_min_max(10000, 20000);
+ break;
+ default:
+ break;
}
+
return result;
}
static void jr3_pci_poll_dev(unsigned long data)
{
- unsigned long flags;
struct comedi_device *dev = (struct comedi_device *)data;
struct jr3_pci_dev_private *devpriv = dev->private;
+ struct jr3_pci_subdev_private *spriv;
+ struct comedi_subdevice *s;
+ unsigned long flags;
unsigned long now;
int delay;
int i;
@@ -598,18 +602,22 @@ static void jr3_pci_poll_dev(unsigned long data)
spin_lock_irqsave(&dev->spinlock, flags);
delay = 1000;
now = jiffies;
- /* Poll all channels that are ready to be polled */
- for (i = 0; i < devpriv->n_channels; i++) {
- struct jr3_pci_subdev_private *subdevpriv =
- dev->subdevices[i].private;
- if (now > subdevpriv->next_time_min) {
- struct poll_delay_t sub_delay;
-
- sub_delay = jr3_pci_poll_subdevice(&dev->subdevices[i]);
- subdevpriv->next_time_min =
- jiffies + msecs_to_jiffies(sub_delay.min);
- subdevpriv->next_time_max =
- jiffies + msecs_to_jiffies(sub_delay.max);
+
+ /* Poll all channels that are ready to be polled */
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ spriv = s->private;
+
+ if (now > spriv->next_time_min) {
+ struct jr3_pci_poll_delay sub_delay;
+
+ sub_delay = jr3_pci_poll_subdevice(s);
+
+ spriv->next_time_min = jiffies +
+ msecs_to_jiffies(sub_delay.min);
+ spriv->next_time_max = jiffies +
+ msecs_to_jiffies(sub_delay.max);
+
if (sub_delay.max && sub_delay.max < delay)
/*
* Wake up as late as possible ->
@@ -624,13 +632,58 @@ static void jr3_pci_poll_dev(unsigned long data)
add_timer(&devpriv->timer);
}
+static struct jr3_pci_subdev_private *
+jr3_pci_alloc_spriv(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ struct jr3_pci_dev_private *devpriv = dev->private;
+ struct jr3_pci_subdev_private *spriv;
+ int j;
+ int k;
+
+ spriv = comedi_alloc_spriv(s, sizeof(*spriv));
+ if (!spriv)
+ return NULL;
+
+ spriv->channel = &devpriv->iobase->channel[s->index].data;
+
+ for (j = 0; j < 8; j++) {
+ spriv->range[j].length = 1;
+ spriv->range[j].range.min = -1000000;
+ spriv->range[j].range.max = 1000000;
+
+ for (k = 0; k < 7; k++) {
+ spriv->range_table_list[j + k * 8] =
+ (struct comedi_lrange *)&spriv->range[j];
+ spriv->maxdata_list[j + k * 8] = 0x7fff;
+ }
+ }
+ spriv->range[8].length = 1;
+ spriv->range[8].range.min = 0;
+ spriv->range[8].range.max = 65536;
+
+ spriv->range_table_list[56] = (struct comedi_lrange *)&spriv->range[8];
+ spriv->range_table_list[57] = (struct comedi_lrange *)&spriv->range[8];
+ spriv->maxdata_list[56] = 0xffff;
+ spriv->maxdata_list[57] = 0xffff;
+
+ dev_dbg(dev->class_dev, "p->channel %p %p (%tx)\n",
+ spriv->channel, devpriv->iobase,
+ ((char __iomem *)spriv->channel -
+ (char __iomem *)devpriv->iobase));
+
+ return spriv;
+}
+
static int jr3_pci_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
+ unsigned long context)
{
- int result;
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- int i;
+ static const struct jr3_pci_board *board = NULL;
struct jr3_pci_dev_private *devpriv;
+ struct jr3_pci_subdev_private *spriv;
+ struct comedi_subdevice *s;
+ int ret;
+ int i;
if (sizeof(struct jr3_channel) != 0xc00) {
dev_err(dev->class_dev,
@@ -639,106 +692,56 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
return -EINVAL;
}
+ if (context < ARRAY_SIZE(jr3_pci_boards))
+ board = &jr3_pci_boards[context];
+ if (!board)
+ return -ENODEV;
+ dev->board_ptr = board;
+ dev->board_name = board->name;
+
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
init_timer(&devpriv->timer);
- switch (pcidev->device) {
- case PCI_DEVICE_ID_JR3_1_CHANNEL:
- case PCI_DEVICE_ID_JR3_1_CHANNEL_NEW:
- devpriv->n_channels = 1;
- break;
- case PCI_DEVICE_ID_JR3_2_CHANNEL:
- devpriv->n_channels = 2;
- break;
- case PCI_DEVICE_ID_JR3_3_CHANNEL:
- devpriv->n_channels = 3;
- break;
- case PCI_DEVICE_ID_JR3_4_CHANNEL:
- devpriv->n_channels = 4;
- break;
- default:
- dev_err(dev->class_dev, "jr3_pci: pci %s not supported\n",
- pci_name(pcidev));
- return -EINVAL;
- break;
- }
- result = comedi_pci_enable(dev);
- if (result)
- return result;
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
devpriv->iobase = pci_ioremap_bar(pcidev, 0);
if (!devpriv->iobase)
return -ENOMEM;
- result = comedi_alloc_subdevices(dev, devpriv->n_channels);
- if (result)
- return result;
+ ret = comedi_alloc_subdevices(dev, board->n_subdevs);
+ if (ret)
+ return ret;
dev->open = jr3_pci_open;
- for (i = 0; i < devpriv->n_channels; i++) {
- dev->subdevices[i].type = COMEDI_SUBD_AI;
- dev->subdevices[i].subdev_flags = SDF_READABLE | SDF_GROUND;
- dev->subdevices[i].n_chan = 8 * 7 + 2;
- dev->subdevices[i].insn_read = jr3_pci_ai_insn_read;
- dev->subdevices[i].private =
- kzalloc(sizeof(struct jr3_pci_subdev_private),
- GFP_KERNEL);
- if (dev->subdevices[i].private) {
- struct jr3_pci_subdev_private *p;
- int j;
-
- p = dev->subdevices[i].private;
- p->channel = &devpriv->iobase->channel[i].data;
- dev_dbg(dev->class_dev, "p->channel %p %p (%tx)\n",
- p->channel, devpriv->iobase,
- ((char __iomem *)p->channel -
- (char __iomem *)devpriv->iobase));
- p->channel_no = i;
- for (j = 0; j < 8; j++) {
- int k;
-
- p->range[j].length = 1;
- p->range[j].range.min = -1000000;
- p->range[j].range.max = 1000000;
- for (k = 0; k < 7; k++) {
- p->range_table_list[j + k * 8] =
- (struct comedi_lrange *)&p->
- range[j];
- p->maxdata_list[j + k * 8] = 0x7fff;
- }
- }
- p->range[8].length = 1;
- p->range[8].range.min = 0;
- p->range[8].range.max = 65536;
-
- p->range_table_list[56] =
- (struct comedi_lrange *)&p->range[8];
- p->range_table_list[57] =
- (struct comedi_lrange *)&p->range[8];
- p->maxdata_list[56] = 0xffff;
- p->maxdata_list[57] = 0xffff;
- /* Channel specific range and maxdata */
- dev->subdevices[i].range_table = NULL;
- dev->subdevices[i].range_table_list =
- p->range_table_list;
- dev->subdevices[i].maxdata = 0;
- dev->subdevices[i].maxdata_list = p->maxdata_list;
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ s->n_chan = 8 * 7 + 2;
+ s->insn_read = jr3_pci_ai_insn_read;
+
+ spriv = jr3_pci_alloc_spriv(dev, s);
+ if (spriv) {
+ /* Channel specific range and maxdata */
+ s->range_table_list = spriv->range_table_list;
+ s->maxdata_list = spriv->maxdata_list;
}
}
/* Reset DSP card */
writel(0, &devpriv->iobase->channel[0].reset);
- result = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
- "comedi/jr3pci.idm",
- jr3_download_firmware, 0);
- dev_dbg(dev->class_dev, "Firmare load %d\n", result);
-
- if (result < 0)
- return result;
+ ret = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
+ "comedi/jr3pci.idm",
+ jr3_download_firmware, 0);
+ dev_dbg(dev->class_dev, "Firmare load %d\n", ret);
+ if (ret < 0)
+ return ret;
/*
* TODO: use firmware to load preferred offset tables. Suggested
* format:
@@ -761,11 +764,12 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
}
/* Start card timer */
- for (i = 0; i < devpriv->n_channels; i++) {
- struct jr3_pci_subdev_private *p = dev->subdevices[i].private;
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ spriv = s->private;
- p->next_time_min = jiffies + msecs_to_jiffies(500);
- p->next_time_max = jiffies + msecs_to_jiffies(2000);
+ spriv->next_time_min = jiffies + msecs_to_jiffies(500);
+ spriv->next_time_max = jiffies + msecs_to_jiffies(2000);
}
devpriv->timer.data = (unsigned long)dev;
@@ -773,21 +777,16 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
devpriv->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&devpriv->timer);
- return result;
+ return 0;
}
static void jr3_pci_detach(struct comedi_device *dev)
{
- int i;
struct jr3_pci_dev_private *devpriv = dev->private;
if (devpriv) {
del_timer_sync(&devpriv->timer);
- if (dev->subdevices) {
- for (i = 0; i < devpriv->n_channels; i++)
- kfree(dev->subdevices[i].private);
- }
if (devpriv->iobase)
iounmap(devpriv->iobase);
}
@@ -808,11 +807,11 @@ static int jr3_pci_pci_probe(struct pci_dev *dev,
}
static const struct pci_device_id jr3_pci_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL) },
- { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW) },
- { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL) },
- { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL) },
- { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_4_CHANNEL) },
+ { PCI_VDEVICE(JR3, 0x1111), BOARD_JR3_1 },
+ { PCI_VDEVICE(JR3, 0x3111), BOARD_JR3_1 },
+ { PCI_VDEVICE(JR3, 0x3112), BOARD_JR3_2 },
+ { PCI_VDEVICE(JR3, 0x3113), BOARD_JR3_3 },
+ { PCI_VDEVICE(JR3, 0x3114), BOARD_JR3_4 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, jr3_pci_pci_table);
diff --git a/drivers/staging/comedi/drivers/jr3_pci.h b/drivers/staging/comedi/drivers/jr3_pci.h
index 3317f7a04c48..20478ae8fad6 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.h
+++ b/drivers/staging/comedi/drivers/jr3_pci.h
@@ -671,11 +671,11 @@ struct jr3_channel {
struct jr3_t {
struct {
- u32 program_low[0x4000]; /* 0x00000 - 0x10000 */
+ u32 program_lo[0x4000]; /* 0x00000 - 0x10000 */
struct jr3_channel data; /* 0x10000 - 0x10c00 */
char pad2[0x30000 - 0x00c00]; /* 0x10c00 - 0x40000 */
- u32 program_high[0x8000]; /* 0x40000 - 0x60000 */
- u32 reset; /* 0x60000 - 0x60004 */
+ u32 program_hi[0x8000]; /* 0x40000 - 0x60000 */
+ u32 reset; /* 0x60000 - 0x60004 */
char pad3[0x20000 - 0x00004]; /* 0x60004 - 0x80000 */
} channel[4];
};
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index 6b9846fd8c48..ec43c38958de 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -1,92 +1,113 @@
/*
- comedi/drivers/ke_counter.c
- Comedi driver for Kolter-Electronic PCI Counter 1 Card
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ke_counter
-Description: Driver for Kolter Electronic Counter Card
-Devices: [Kolter Electronic] PCI Counter Card (ke_counter)
-Author: Michael Hillmann
-Updated: Mon, 14 Apr 2008 15:42:42 +0100
-Status: tested
-
-Configuration Options: not applicable, uses PCI auto config
+ * ke_counter.c
+ * Comedi driver for Kolter-Electronic PCI Counter 1 Card
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
-This driver is a simple driver to read the counter values from
-Kolter Electronic PCI Counter Card.
-*/
+/*
+ * Driver: ke_counter
+ * Description: Driver for Kolter Electronic Counter Card
+ * Devices: (Kolter Electronic) PCI Counter Card [ke_counter]
+ * Author: Michael Hillmann
+ * Updated: Mon, 14 Apr 2008 15:42:42 +0100
+ * Status: tested
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ */
#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
-#define CNT_CARD_DEVICE_ID 0x0014
+/*
+ * PCI BAR 0 Register I/O map
+ */
+#define KE_RESET_REG(x) (0x00 + ((x) * 0x20))
+#define KE_LATCH_REG(x) (0x00 + ((x) * 0x20))
+#define KE_LSB_REG(x) (0x04 + ((x) * 0x20))
+#define KE_MID_REG(x) (0x08 + ((x) * 0x20))
+#define KE_MSB_REG(x) (0x0c + ((x) * 0x20))
+#define KE_SIGN_REG(x) (0x10 + ((x) * 0x20))
+#define KE_OSC_SEL_REG 0xf8
+#define KE_OSC_SEL_EXT (1 << 0)
+#define KE_OSC_SEL_4MHZ (2 << 0)
+#define KE_OSC_SEL_20MHZ (3 << 0)
+#define KE_DO_REG 0xfc
+
+static int ke_counter_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+ int i;
-/*-- counter write ----------------------------------------------------------*/
+ for (i = 0; i < insn->n; i++) {
+ val = data[0];
-/* This should be used only for resetting the counters; maybe it is better
- to make a special command 'reset'. */
-static int cnt_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- int chan = CR_CHAN(insn->chanspec);
-
- outb((unsigned char)((data[0] >> 24) & 0xff),
- dev->iobase + chan * 0x20 + 0x10);
- outb((unsigned char)((data[0] >> 16) & 0xff),
- dev->iobase + chan * 0x20 + 0x0c);
- outb((unsigned char)((data[0] >> 8) & 0xff),
- dev->iobase + chan * 0x20 + 0x08);
- outb((unsigned char)((data[0] >> 0) & 0xff),
- dev->iobase + chan * 0x20 + 0x04);
-
- /* return the number of samples written */
- return 1;
-}
+ /* Order matters */
+ outb((val >> 24) & 0xff, dev->iobase + KE_SIGN_REG(chan));
+ outb((val >> 16) & 0xff, dev->iobase + KE_MSB_REG(chan));
+ outb((val >> 8) & 0xff, dev->iobase + KE_MID_REG(chan));
+ outb((val >> 0) & 0xff, dev->iobase + KE_LSB_REG(chan));
+ }
-/*-- counter read -----------------------------------------------------------*/
+ return insn->n;
+}
-static int cnt_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
+static int ke_counter_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned char a0, a1, a2, a3, a4;
- int chan = CR_CHAN(insn->chanspec);
- int result;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+ int i;
- a0 = inb(dev->iobase + chan * 0x20);
- a1 = inb(dev->iobase + chan * 0x20 + 0x04);
- a2 = inb(dev->iobase + chan * 0x20 + 0x08);
- a3 = inb(dev->iobase + chan * 0x20 + 0x0c);
- a4 = inb(dev->iobase + chan * 0x20 + 0x10);
+ for (i = 0; i < insn->n; i++) {
+ /* Order matters */
+ inb(dev->iobase + KE_LATCH_REG(chan));
- result = (a1 + (a2 * 256) + (a3 * 65536));
- if (a4 > 0)
- result = result - s->maxdata;
+ val = inb(dev->iobase + KE_LSB_REG(chan));
+ val |= (inb(dev->iobase + KE_MID_REG(chan)) << 8);
+ val |= (inb(dev->iobase + KE_MSB_REG(chan)) << 16);
+ val |= (inb(dev->iobase + KE_SIGN_REG(chan)) << 24);
- *data = (unsigned int)result;
+ data[i] = val;
+ }
- /* return the number of samples read */
- return 1;
+ return insn->n;
}
-static int cnt_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
+static int ke_counter_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ if (comedi_dio_update_state(s, data))
+ outb(s->state, dev->iobase + KE_DO_REG);
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static int ke_counter_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct comedi_subdevice *s;
@@ -97,30 +118,32 @@ static int cnt_auto_attach(struct comedi_device *dev,
return ret;
dev->iobase = pci_resource_start(pcidev, 0);
- ret = comedi_alloc_subdevices(dev, 1);
+ ret = comedi_alloc_subdevices(dev, 2);
if (ret)
return ret;
s = &dev->subdevices[0];
- dev->read_subdev = s;
-
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE /* | SDF_COMMON */ ;
- s->n_chan = 3;
- s->maxdata = 0x00ffffff;
- s->insn_read = cnt_rinsn;
- s->insn_write = cnt_winsn;
-
- /* select 20MHz clock */
- outb(3, dev->iobase + 248);
-
- /* reset all counters */
- outb(0, dev->iobase);
- outb(0, dev->iobase + 0x20);
- outb(0, dev->iobase + 0x40);
-
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 3;
+ s->maxdata = 0x01ffffff;
+ s->range_table = &range_unknown;
+ s->insn_read = ke_counter_insn_read;
+ s->insn_write = ke_counter_insn_write;
+
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 3;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = ke_counter_do_insn_bits;
+
+ outb(KE_OSC_SEL_20MHZ, dev->iobase + KE_OSC_SEL_REG);
+
+ outb(0, dev->iobase + KE_RESET_REG(0));
+ outb(0, dev->iobase + KE_RESET_REG(1));
+ outb(0, dev->iobase + KE_RESET_REG(2));
return 0;
}
@@ -128,7 +151,7 @@ static int cnt_auto_attach(struct comedi_device *dev,
static struct comedi_driver ke_counter_driver = {
.driver_name = "ke_counter",
.module = THIS_MODULE,
- .auto_attach = cnt_auto_attach,
+ .auto_attach = ke_counter_auto_attach,
.detach = comedi_pci_disable,
};
@@ -140,7 +163,7 @@ static int ke_counter_pci_probe(struct pci_dev *dev,
}
static const struct pci_device_id ke_counter_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_KOLTER, 0x0014) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ke_counter_pci_table);
@@ -154,5 +177,5 @@ static struct pci_driver ke_counter_pci_driver = {
module_comedi_pci_driver(ke_counter_driver, ke_counter_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Kolter Electronic Counter Card");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index e739bcd66a04..f02b31b317ec 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -1112,9 +1112,6 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
if (!dev->attached)
return IRQ_NONE;
- /* Reset all events */
- s->async->events = 0;
-
if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
ME4000_IRQ_STATUS_BIT_AI_HF) {
/* Read status register to find out what happened */
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 7f6687896401..0ff126b1fdfd 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -248,6 +248,20 @@ static int me_dio_insn_bits(struct comedi_device *dev,
return insn->n;
}
+static int me_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct me_private_data *dev_private = dev->private;
+ unsigned int status;
+
+ status = readw(dev_private->me_regbase + ME_STATUS);
+ if ((status & 0x0004) == 0)
+ return 0;
+ return -EBUSY;
+}
+
static int me_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -258,7 +272,7 @@ static int me_ai_insn_read(struct comedi_device *dev,
unsigned int rang = CR_RANGE(insn->chanspec);
unsigned int aref = CR_AREF(insn->chanspec);
unsigned short val;
- int i;
+ int ret;
/* stop any running conversion */
dev_private->control_1 &= 0xFFFC;
@@ -290,19 +304,14 @@ static int me_ai_insn_read(struct comedi_device *dev,
readw(dev_private->me_regbase + ME_ADC_START);
/* wait for ADC fifo not empty flag */
- for (i = 100000; i > 0; i--)
- if (!(readw(dev_private->me_regbase + ME_STATUS) & 0x0004))
- break;
+ ret = comedi_timeout(dev, s, insn, me_ai_eoc, 0);
+ if (ret)
+ return ret;
/* get value from ADC fifo */
- if (i) {
- val = readw(dev_private->me_regbase + ME_READ_AD_FIFO);
- val = (val ^ 0x800) & 0x0fff;
- data[0] = val;
- } else {
- dev_err(dev->class_dev, "Cannot get single value\n");
- return -EIO;
- }
+ val = readw(dev_private->me_regbase + ME_READ_AD_FIFO);
+ val = (val ^ 0x800) & 0x0fff;
+ data[0] = val;
/* stop any running conversion */
dev_private->control_1 &= 0xFFFC;
@@ -542,9 +551,6 @@ static int me_auto_attach(struct comedi_device *dev,
s->insn_bits = me_dio_insn_bits;
s->insn_config = me_dio_insn_config;
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
index 81b78e053f4e..a4f7d6f138df 100644
--- a/drivers/staging/comedi/drivers/mf6x4.c
+++ b/drivers/staging/comedi/drivers/mf6x4.c
@@ -133,21 +133,18 @@ static int mf6x4_do_insn_bits(struct comedi_device *dev,
return insn->n;
}
-static int mf6x4_ai_wait_for_eoc(struct comedi_device *dev,
- unsigned int timeout)
+static int mf6x4_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
struct mf6x4_private *devpriv = dev->private;
- unsigned int eolc;
+ unsigned int status;
- while (timeout--) {
- eolc = ioread32(devpriv->gpioc_R) & MF6X4_GPIOC_EOLC;
- if (eolc)
- return 0;
-
- udelay(1);
- }
-
- return -ETIME;
+ status = ioread32(devpriv->gpioc_R);
+ if (status & MF6X4_GPIOC_EOLC)
+ return 0;
+ return -EBUSY;
}
static int mf6x4_ai_insn_read(struct comedi_device *dev,
@@ -168,7 +165,7 @@ static int mf6x4_ai_insn_read(struct comedi_device *dev,
/* Trigger ADC conversion by reading ADSTART */
ioread16(devpriv->bar1_mem + MF6X4_ADSTART_R);
- ret = mf6x4_ai_wait_for_eoc(dev, 100);
+ ret = comedi_timeout(dev, s, insn, mf6x4_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 9c9a0ee432cf..1a572c83f996 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -527,9 +527,9 @@ EXPORT_SYMBOL_GPL(mite_dma_disarm);
int mite_sync_input_dma(struct mite_channel *mite_chan,
struct comedi_async *async)
{
+ struct comedi_subdevice *s = async->subdevice;
int count;
unsigned int nbytes, old_alloc_count;
- const unsigned bytes_per_scan = cfc_bytes_per_scan(async->subdevice);
old_alloc_count = async->buf_write_alloc_count;
/* write alloc as much as we can */
@@ -538,7 +538,7 @@ int mite_sync_input_dma(struct mite_channel *mite_chan,
nbytes = mite_bytes_written_to_memory_lb(mite_chan);
if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
old_alloc_count) > 0) {
- dev_warn(async->subdevice->device->class_dev,
+ dev_warn(s->device->class_dev,
"mite: DMA overwrite of free area\n");
async->events |= COMEDI_CB_OVERFLOW;
return -1;
@@ -551,12 +551,7 @@ int mite_sync_input_dma(struct mite_channel *mite_chan,
return 0;
comedi_buf_write_free(async, count);
-
- async->scan_progress += count;
- if (async->scan_progress >= bytes_per_scan) {
- async->scan_progress %= bytes_per_scan;
- async->events |= COMEDI_CB_EOS;
- }
+ cfc_inc_scan_progress(s, count);
async->events |= COMEDI_CB_BLOCK;
return 0;
}
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index bcf2f972376e..78f235747991 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -29,9 +29,9 @@
#define MAX_MITE_DMA_CHANNELS 8
struct mite_dma_descriptor {
- u32 count;
- u32 addr;
- u32 next;
+ __le32 count;
+ __le32 addr;
+ __le32 next;
u32 dar;
};
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
index fe4621ea65c3..f770400a0e81 100644
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ b/drivers/staging/comedi/drivers/mpc624.c
@@ -142,8 +142,18 @@ static const struct comedi_lrange range_mpc624_bipolar10 = {
}
};
-/* Timeout 200ms */
-#define TIMEOUT 200
+static int mpc624_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned char status;
+
+ status = inb(dev->iobase + MPC624_ADC);
+ if ((status & MPC624_ADBUSY) == 0)
+ return 0;
+ return -EBUSY;
+}
static int mpc624_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
@@ -152,7 +162,7 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
struct mpc624_private *devpriv = dev->private;
int n, i;
unsigned long int data_in, data_out;
- unsigned char ucPort;
+ int ret;
/*
* WARNING:
@@ -170,15 +180,9 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
udelay(1);
/* Wait for the conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- ucPort = inb(dev->iobase + MPC624_ADC);
- if (ucPort & MPC624_ADBUSY)
- udelay(1000);
- else
- break;
- }
- if (i == TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, mpc624_ai_eoc, 0);
+ if (ret)
+ return ret;
/* Start reading data */
data_in = 0;
@@ -341,7 +345,7 @@ static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->len_chanlist = 1;
s->insn_read = mpc624_ai_rinsn;
- return 1;
+ return 0;
}
static struct comedi_driver mpc624_driver = {
diff --git a/drivers/staging/comedi/drivers/multiq3.c b/drivers/staging/comedi/drivers/multiq3.c
index 3ca755eca285..b74b9e9bfd4a 100644
--- a/drivers/staging/comedi/drivers/multiq3.c
+++ b/drivers/staging/comedi/drivers/multiq3.c
@@ -81,34 +81,44 @@ struct multiq3_private {
unsigned int ao_readback[2];
};
+static int multiq3_ai_status(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + MULTIQ3_STATUS);
+ if (status & context)
+ return 0;
+ return -EBUSY;
+}
+
static int multiq3_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- int i, n;
+ int n;
int chan;
unsigned int hi, lo;
+ int ret;
chan = CR_CHAN(insn->chanspec);
outw(MULTIQ3_CONTROL_MUST | MULTIQ3_AD_MUX_EN | (chan << 3),
dev->iobase + MULTIQ3_CONTROL);
- for (i = 0; i < MULTIQ3_TIMEOUT; i++) {
- if (inw(dev->iobase + MULTIQ3_STATUS) & MULTIQ3_STATUS_EOC)
- break;
- }
- if (i == MULTIQ3_TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, multiq3_ai_status,
+ MULTIQ3_STATUS_EOC);
+ if (ret)
+ return ret;
for (n = 0; n < insn->n; n++) {
outw(0, dev->iobase + MULTIQ3_AD_CS);
- for (i = 0; i < MULTIQ3_TIMEOUT; i++) {
- if (inw(dev->iobase +
- MULTIQ3_STATUS) & MULTIQ3_STATUS_EOC_I)
- break;
- }
- if (i == MULTIQ3_TIMEOUT)
- return -ETIMEDOUT;
+
+ ret = comedi_timeout(dev, s, insn, multiq3_ai_status,
+ MULTIQ3_STATUS_EOC_I);
+ if (ret)
+ return ret;
hi = inb(dev->iobase + MULTIQ3_AD_CS);
lo = inb(dev->iobase + MULTIQ3_AD_CS);
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index df42e3906171..0d4b9019f76a 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -40,6 +40,7 @@
#include "../comedidev.h"
+#include "comedi_fc.h"
#include "mite.h"
#include "ni_tio.h"
@@ -789,13 +790,7 @@ static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
struct ni_gpct *counter = s->private;
ni_tio_handle_interrupt(counter, s);
- if (s->async->events) {
- if (s->async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR |
- COMEDI_CB_OVERFLOW)) {
- ni_660x_cancel(dev, s);
- }
- comedi_event(dev, s);
- }
+ cfc_handle_events(dev, s);
}
static irqreturn_t ni_660x_interrupt(int irq, void *d)
@@ -1187,7 +1182,7 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
global_interrupt_config_bits |= Cascade_Int_Enable_Bit;
ni_660x_write_register(dev, 0, global_interrupt_config_bits,
NI660X_GLOBAL_INT_CFG);
- dev_info(dev->class_dev, "ni_660x: %s attached\n", dev->board_name);
+
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index 8550fdc4ccd3..1002ceacfdcc 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -246,9 +246,6 @@ static int ni_670x_auto_attach(struct comedi_device *dev,
/* Config of ao registers */
writel(0x00, devpriv->mite->daq_io_addr + AO_CONTROL_OFFSET);
- dev_info(dev->class_dev, "%s: %s attached\n",
- dev->driver->driver_name, dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index f83eb9ebe278..4e39b1f63d81 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -184,7 +184,6 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
}
/* initialize async here to make sure s is not NULL */
async = s->async;
- async->events = 0;
cmd = &async->cmd;
status = inw(dev->iobase + STATUS_REG);
@@ -196,15 +195,14 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
if (status & OVFL_BIT) {
comedi_error(dev, "fifo overflow");
- a2150_cancel(dev, s);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ cfc_handle_events(dev, s);
}
if ((status & DMA_TC_BIT) == 0) {
comedi_error(dev, "caught non-dma interrupt? Aborting.");
- a2150_cancel(dev, s);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -249,7 +247,6 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
cfc_write_to_buffer(s, dpnt);
if (cmd->stop_src == TRIG_COUNT) {
if (--devpriv->count == 0) { /* end of acquisition */
- a2150_cancel(dev, s);
async->events |= COMEDI_CB_EOA;
break;
}
@@ -265,7 +262,7 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
async->events |= COMEDI_CB_BLOCK;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
/* clear interrupt */
outw(0x00, dev->iobase + DMA_TC_CLEAR_REG);
@@ -488,13 +485,25 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
+static int a2150_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + STATUS_REG);
+ if (status & FNE_BIT)
+ return 0;
+ return -EBUSY;
+}
+
static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct a2150_private *devpriv = dev->private;
- unsigned int i, n;
- static const int timeout = 100000;
- static const int filter_delay = 36;
+ unsigned int n;
+ int ret;
/* clear fifo and reset triggering circuitry */
outw(0, dev->iobase + FIFO_RESET_REG);
@@ -524,30 +533,20 @@ static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
* there is a 35.6 sample delay for data to get through the
* antialias filter
*/
- for (n = 0; n < filter_delay; n++) {
- for (i = 0; i < timeout; i++) {
- if (inw(dev->iobase + STATUS_REG) & FNE_BIT)
- break;
- udelay(1);
- }
- if (i == timeout) {
- comedi_error(dev, "timeout");
- return -ETIME;
- }
+ for (n = 0; n < 36; n++) {
+ ret = comedi_timeout(dev, s, insn, a2150_ai_eoc, 0);
+ if (ret)
+ return ret;
+
inw(dev->iobase + FIFO_DATA_REG);
}
/* read data */
for (n = 0; n < insn->n; n++) {
- for (i = 0; i < timeout; i++) {
- if (inw(dev->iobase + STATUS_REG) & FNE_BIT)
- break;
- udelay(1);
- }
- if (i == timeout) {
- comedi_error(dev, "timeout");
- return -ETIME;
- }
+ ret = comedi_timeout(dev, s, insn, a2150_ai_eoc, 0);
+ if (ret)
+ return ret;
+
data[n] = inw(dev->iobase + FIFO_DATA_REG);
data[n] ^= 0x8000;
}
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index e8cd5ddb85c5..4262385e4f46 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -96,7 +96,6 @@ Devices: [National Instruments] AT-MIO-16 (atmio16), AT-MIO-16D (atmio16d)
#define CLOCK_100_HZ 0x8F25
/* Other miscellaneous defines */
#define ATMIO16D_SIZE 32 /* bus address range */
-#define ATMIO16D_TIMEOUT 10
struct atmio16_board_t {
@@ -448,16 +447,32 @@ static int atmio16d_ai_cancel(struct comedi_device *dev,
return 0;
}
-/* Mode 0 is used to get a single conversion on demand */
+static int atmio16d_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + STAT_REG);
+ if (status & STAT_AD_CONVAVAIL)
+ return 0;
+ if (status & STAT_AD_OVERFLOW) {
+ outw(0, dev->iobase + AD_CLEAR_REG);
+ return -EOVERFLOW;
+ }
+ return -EBUSY;
+}
+
static int atmio16d_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct atmio16d_private *devpriv = dev->private;
- int i, t;
+ int i;
int chan;
int gain;
- int status;
+ int ret;
chan = CR_CHAN(insn->chanspec);
gain = CR_RANGE(insn->chanspec);
@@ -473,26 +488,17 @@ static int atmio16d_ai_insn_read(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
/* start the conversion */
outw(0, dev->iobase + START_CONVERT_REG);
+
/* wait for it to finish */
- for (t = 0; t < ATMIO16D_TIMEOUT; t++) {
- /* check conversion status */
- status = inw(dev->iobase + STAT_REG);
- if (status & STAT_AD_CONVAVAIL) {
- /* read the data now */
- data[i] = inw(dev->iobase + AD_FIFO_REG);
- /* change to two's complement if need be */
- if (devpriv->adc_coding == adc_2comp)
- data[i] ^= 0x800;
- break;
- }
- if (status & STAT_AD_OVERFLOW) {
- outw(0, dev->iobase + AD_CLEAR_REG);
- return -ETIME;
- }
- }
- /* end waiting, now check if it timed out */
- if (t == ATMIO16D_TIMEOUT)
- return -ETIME;
+ ret = comedi_timeout(dev, s, insn, atmio16d_ai_eoc, 0);
+ if (ret)
+ return ret;
+
+ /* read the data now */
+ data[i] = inw(dev->iobase + AD_FIFO_REG);
+ /* change to two's complement if need be */
+ if (devpriv->adc_coding == adc_2comp)
+ data[i] ^= 0x800;
}
return i;
@@ -723,10 +729,13 @@ static int atmio16d_attach(struct comedi_device *dev,
/* 8255 subdevice */
s = &dev->subdevices[3];
- if (board->has_8255)
- subdev_8255_init(dev, s, NULL, dev->iobase);
- else
+ if (board->has_8255) {
+ ret = subdev_8255_init(dev, s, NULL, dev->iobase);
+ if (ret)
+ return ret;
+ } else {
s->type = COMEDI_SUBD_UNUSED;
+ }
/* don't yet know how to deal with counter/timers */
#if 0
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index e4cdca349157..171a71d20c88 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -109,14 +109,31 @@ static int daq700_dio_insn_config(struct comedi_device *dev,
return insn->n;
}
+static int daq700_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + STA_R2);
+ if ((status & 0x03))
+ return -EOVERFLOW;
+ status = inb(dev->iobase + STA_R1);
+ if ((status & 0x02))
+ return -ENODATA;
+ if ((status & 0x11) == 0x01)
+ return 0;
+ return -EBUSY;
+}
+
static int daq700_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- int n, i, chan;
+ int n, chan;
int d;
- unsigned int status;
- enum { TIMEOUT = 100 };
+ int ret;
chan = CR_CHAN(insn->chanspec);
/* write channel to multiplexer */
@@ -130,30 +147,12 @@ static int daq700_ai_rinsn(struct comedi_device *dev,
outb(0x30, dev->iobase + CMO_R); /* mode 0 out0 L, from H */
/* mode 1 out0 H, L to H, start conversion */
outb(0x32, dev->iobase + CMO_R);
+
/* wait for conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- status = inb(dev->iobase + STA_R2);
- if ((status & 0x03) != 0) {
- dev_info(dev->class_dev,
- "Overflow/run Error\n");
- return -EOVERFLOW;
- }
- status = inb(dev->iobase + STA_R1);
- if ((status & 0x02) != 0) {
- dev_info(dev->class_dev, "Data Error\n");
- return -ENODATA;
- }
- if ((status & 0x11) == 0x01) {
- /* ADC conversion complete */
- break;
- }
- udelay(1);
- }
- if (i == TIMEOUT) {
- dev_info(dev->class_dev,
- "timeout during ADC conversion\n");
- return -ETIMEDOUT;
- }
+ ret = comedi_timeout(dev, s, insn, daq700_ai_eoc, 0);
+ if (ret)
+ return ret;
+
/* read data */
d = inw(dev->iobase + ADFIFO_R);
/* mangle the data as necessary */
@@ -229,11 +228,6 @@ static int daq700_auto_attach(struct comedi_device *dev,
s->insn_read = daq700_ai_rinsn;
daq700_ai_config(dev, s);
- dev_info(dev->class_dev, "%s: %s, io 0x%lx\n",
- dev->driver->driver_name,
- dev->board_name,
- dev->iobase);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index 335ea34fa57c..925e82c65b2d 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -3,8 +3,8 @@
Driver for National Instruments PCMCIA DAQ-Card DIO-24
Copyright (C) 2002 Daniel Vecino Castel <dvecino@able.es>
- PCMCIA crap at end of file is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13
- from the pcmcia package.
+ PCMCIA crap at end of file is adapted from dummy_cs.c 1.31
+ 2001/08/24 12:13:13 from the pcmcia package.
The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
<dahinds@users.sourceforge.net>. Portions created by David A. Hinds
are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 0512445df08e..f4216e825f03 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -73,7 +73,6 @@
#include "ni_labpc_isadma.h"
#define LABPC_SIZE 0x20 /* size of ISA io region */
-#define LABPC_ADC_TIMEOUT 1000
enum scan_mode {
MODE_SINGLE_CHAN,
@@ -308,19 +307,17 @@ static void labpc_clear_adc_fifo(struct comedi_device *dev)
labpc_read_adc_fifo(dev);
}
-static int labpc_ai_wait_for_data(struct comedi_device *dev,
- int timeout)
+static int labpc_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
struct labpc_private *devpriv = dev->private;
- int i;
- for (i = 0; i < timeout; i++) {
- devpriv->stat1 = devpriv->read_byte(dev->iobase + STAT1_REG);
- if (devpriv->stat1 & STAT1_DAVAIL)
- return 0;
- udelay(1);
- }
- return -ETIME;
+ devpriv->stat1 = devpriv->read_byte(dev->iobase + STAT1_REG);
+ if (devpriv->stat1 & STAT1_DAVAIL)
+ return 0;
+ return -EBUSY;
}
static int labpc_ai_insn_read(struct comedi_device *dev,
@@ -363,7 +360,7 @@ static int labpc_ai_insn_read(struct comedi_device *dev,
/* trigger conversion */
devpriv->write_byte(0x1, dev->iobase + ADC_START_CONVERT_REG);
- ret = labpc_ai_wait_for_data(dev, LABPC_ADC_TIMEOUT);
+ ret = comedi_timeout(dev, s, insn, labpc_ai_eoc, 0);
if (ret)
return ret;
@@ -950,7 +947,6 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
async = s->async;
cmd = &async->cmd;
- async->events = 0;
/* read board status */
devpriv->stat1 = devpriv->read_byte(dev->iobase + STAT1_REG);
@@ -968,7 +964,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
/* clear error interrupt */
devpriv->write_byte(0x1, dev->iobase + ADC_FIFO_CLEAR_REG);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
comedi_error(dev, "overrun");
return IRQ_HANDLED;
}
@@ -988,7 +984,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
/* clear error interrupt */
devpriv->write_byte(0x1, dev->iobase + ADC_FIFO_CLEAR_REG);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
comedi_error(dev, "overflow");
return IRQ_HANDLED;
}
@@ -996,20 +992,17 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
if (cmd->stop_src == TRIG_EXT) {
if (devpriv->stat2 & STAT2_OUTA1) {
labpc_drain_dregs(dev);
- labpc_cancel(dev, s);
async->events |= COMEDI_CB_EOA;
}
}
/* TRIG_COUNT end of acquisition */
if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->count == 0) {
- labpc_cancel(dev, s);
+ if (devpriv->count == 0)
async->events |= COMEDI_CB_EOA;
- }
}
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 457b88481db0..8a0e3b7236ad 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -256,7 +256,6 @@ static int ni_rtsi_insn_config(struct comedi_device *dev,
static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s);
static int ni_read_eeprom(struct comedi_device *dev, int addr);
-static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s);
#ifndef PCIDMA
static void ni_handle_fifo_half_full(struct comedi_device *dev);
static int ni_ao_fifo_half_empty(struct comedi_device *dev,
@@ -272,15 +271,12 @@ static void shutdown_ai_command(struct comedi_device *dev);
static int ni_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum);
-static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s);
-
static int ni_8255_callback(int dir, int port, int data, unsigned long arg);
#ifdef PCIDMA
static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
+static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s);
#endif
-static int ni_gpct_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s);
static void handle_gpct_interrupt(struct comedi_device *dev,
unsigned short counter_index);
@@ -687,12 +683,22 @@ static void ni_clear_ai_fifo(struct comedi_device *dev)
{
const struct ni_board_struct *board = comedi_board(dev);
struct ni_private *devpriv = dev->private;
+ static const int timeout = 10000;
+ int i;
if (board->reg_type == ni_reg_6143) {
/* Flush the 6143 data FIFO */
ni_writel(0x10, AIFIFO_Control_6143); /* Flush fifo */
ni_writel(0x00, AIFIFO_Control_6143); /* Flush fifo */
- while (ni_readl(AIFIFO_Status_6143) & 0x10) ; /* Wait for complete */
+ /* Wait for complete */
+ for (i = 0; i < timeout; i++) {
+ if (!(ni_readl(AIFIFO_Status_6143) & 0x10))
+ break;
+ udelay(1);
+ }
+ if (i == timeout) {
+ comedi_error(dev, "FIFO flush timeout.");
+ }
} else {
devpriv->stc_writew(dev, 1, ADC_FIFO_Clear);
if (board->reg_type == ni_reg_625x) {
@@ -937,32 +943,6 @@ static void shutdown_ai_command(struct comedi_device *dev)
s->async->events |= COMEDI_CB_EOA;
}
-static void ni_event(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- if (s->
- async->events & (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW |
- COMEDI_CB_EOA)) {
- switch (s->index) {
- case NI_AI_SUBDEV:
- ni_ai_reset(dev, s);
- break;
- case NI_AO_SUBDEV:
- ni_ao_reset(dev, s);
- break;
- case NI_GPCT0_SUBDEV:
- case NI_GPCT1_SUBDEV:
- ni_gpct_cancel(dev, s);
- break;
- case NI_DIO_SUBDEV:
- ni_cdio_cancel(dev, s);
- break;
- default:
- break;
- }
- }
- comedi_event(dev, s);
-}
-
static void handle_gpct_interrupt(struct comedi_device *dev,
unsigned short counter_index)
{
@@ -974,8 +954,7 @@ static void handle_gpct_interrupt(struct comedi_device *dev,
ni_tio_handle_interrupt(&devpriv->counter_dev->counters[counter_index],
s);
- if (s->async->events)
- ni_event(dev, s);
+ cfc_handle_events(dev, s);
#endif
}
@@ -1033,7 +1012,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (comedi_is_subdevice_running(s)) {
s->async->events |=
COMEDI_CB_ERROR | COMEDI_CB_EOA;
- ni_event(dev, s);
+ cfc_handle_events(dev, s);
}
return;
}
@@ -1048,8 +1027,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (status & (AI_Overrun_St | AI_Overflow_St))
s->async->events |= COMEDI_CB_OVERFLOW;
- ni_event(dev, s);
-
+ cfc_handle_events(dev, s);
return;
}
if (status & AI_SC_TC_St) {
@@ -1076,7 +1054,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if ((status & AI_STOP_St))
ni_handle_eos(dev, s);
- ni_event(dev, s);
+ cfc_handle_events(dev, s);
}
static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
@@ -1151,7 +1129,7 @@ static void handle_b_interrupt(struct comedi_device *dev,
}
#endif
- ni_event(dev, s);
+ cfc_handle_events(dev, s);
}
#ifndef PCIDMA
@@ -3662,7 +3640,7 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
M_Offset_CDIO_Command);
/* s->async->events |= COMEDI_CB_EOA; */
}
- ni_event(dev, s);
+ cfc_handle_events(dev, s);
}
static int ni_serial_insn_config(struct comedi_device *dev,
@@ -4282,10 +4260,14 @@ static int ni_E_init(struct comedi_device *dev)
/* 8255 device */
s = &dev->subdevices[NI_8255_DIO_SUBDEV];
- if (board->has_8255)
- subdev_8255_init(dev, s, ni_8255_callback, (unsigned long)dev);
- else
+ if (board->has_8255) {
+ ret = subdev_8255_init(dev, s, ni_8255_callback,
+ (unsigned long)dev);
+ if (ret)
+ return ret;
+ } else {
s->type = COMEDI_SUBD_UNUSED;
+ }
/* formerly general purpose counter/timer device, but no longer used */
s = &dev->subdevices[NI_UNUSED_SUBDEV];
@@ -4393,6 +4375,9 @@ static int ni_E_init(struct comedi_device *dev)
&ni_gpct_read_register,
counter_variant,
NUM_GPCT);
+ if (!devpriv->counter_dev)
+ return -ENOMEM;
+
/* General purpose counters */
for (j = 0; j < NUM_GPCT; ++j) {
s = &dev->subdevices[NI_GPCT_SUBDEV(j)];
@@ -4483,7 +4468,6 @@ static int ni_E_init(struct comedi_device *dev)
ni_writeb(0x0, M_Offset_AO_Calibration);
}
- printk("\n");
return 0;
}
@@ -4992,9 +4976,9 @@ static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
#endif
+#ifdef PCIDMA
static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
-#ifdef PCIDMA
struct ni_gpct *counter = s->private;
int retval;
@@ -5002,10 +4986,8 @@ static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
ni_e_series_enable_second_irq(dev, counter->counter_index, 0);
ni_release_gpct_mite_channel(dev, counter->counter_index);
return retval;
-#else
- return 0;
-#endif
}
+#endif
/*
*
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 30c46a3c1767..85ac2d964f5c 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -263,9 +263,6 @@ enum FPGA_Control_Bits {
#define IntEn (TransferReady|CountExpired|Waited|PrimaryTC|SecondaryTC)
#endif
-static int ni_pcidio_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s);
-
enum nidio_boardid {
BOARD_PCIDIO_32HS,
BOARD_PXI6533,
@@ -353,17 +350,6 @@ static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev)
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
-static void ni_pcidio_event(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- if (s->
- async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR |
- COMEDI_CB_OVERFLOW)) {
- ni_pcidio_cancel(dev, s);
- }
- comedi_event(dev, s);
-}
-
static int ni_pcidio_poll(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct nidio96_private *devpriv = dev->private;
@@ -501,7 +487,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
}
out:
- ni_pcidio_event(dev, s);
+ cfc_handle_events(dev, s);
#if 0
if (!tag) {
writeb(0x03,
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 0ed980455875..d40df072583c 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1551,7 +1551,7 @@ static int pcimio_auto_attach(struct comedi_device *dev,
dev->subdevices[NI_GPCT_SUBDEV(1)].buf_change = &pcimio_gpct1_change;
dev->subdevices[NI_DIO_SUBDEV].buf_change = &pcimio_dio_change;
- return ret;
+ return 0;
}
static int pcimio_ai_change(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
index 68378dab4e70..1056bf001e5e 100644
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ b/drivers/staging/comedi/drivers/ni_tio.h
@@ -115,10 +115,10 @@ struct ni_gpct {
struct ni_gpct_device {
struct comedi_device *dev;
- void (*write_register) (struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg);
- unsigned (*read_register) (struct ni_gpct *counter,
- enum ni_gpct_register reg);
+ void (*write_register)(struct ni_gpct *counter, unsigned bits,
+ enum ni_gpct_register reg);
+ unsigned (*read_register)(struct ni_gpct *counter,
+ enum ni_gpct_register reg);
enum ni_gpct_variant variant;
struct ni_gpct *counters;
unsigned num_counters;
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
index f0fc123ef566..7c03a5d17b1b 100644
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ b/drivers/staging/comedi/drivers/pcl711.c
@@ -253,18 +253,17 @@ static void pcl711_set_changain(struct comedi_device *dev,
outb(mux | PCL711_MUX_CHAN(chan), dev->iobase + PCL711_MUX_REG);
}
-static int pcl711_ai_wait_for_eoc(struct comedi_device *dev,
- unsigned int timeout)
+static int pcl711_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- unsigned int msb;
+ unsigned int status;
- while (timeout--) {
- msb = inb(dev->iobase + PCL711_AI_MSB_REG);
- if ((msb & PCL711_AI_MSB_DRDY) == 0)
- return 0;
- udelay(1);
- }
- return -ETIME;
+ status = inb(dev->iobase + PCL711_AI_MSB_REG);
+ if ((status & PCL711_AI_MSB_DRDY) == 0)
+ return 0;
+ return -EBUSY;
}
static int pcl711_ai_insn_read(struct comedi_device *dev,
@@ -282,7 +281,7 @@ static int pcl711_ai_insn_read(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
outb(PCL711_SOFTTRIG, dev->iobase + PCL711_SOFTTRIG_REG);
- ret = pcl711_ai_wait_for_eoc(dev, 100);
+ ret = comedi_timeout(dev, s, insn, pcl711_ai_eoc, 0);
if (ret)
return ret;
@@ -336,11 +335,8 @@ static int pcl711_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
- if (cmd->stop_src == TRIG_NONE) {
+ if (cmd->stop_src == TRIG_NONE)
err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
- } else {
- /* ignore */
- }
if (err)
return 3;
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index 53613b385f35..160eac8083db 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -131,34 +131,32 @@
#define boardACL8216 8 /* and ICP DAS A-826PG */
#define boardA821 9 /* PGH, PGL, PGL/NDA versions */
-#define PCLx1x_IORANGE 16
-
-#define PCL812_CTR0 0
-#define PCL812_CTR1 1
-#define PCL812_CTR2 2
-#define PCL812_CTRCTL 3
-#define PCL812_AD_LO 4
-#define PCL812_DA1_LO 4
-#define PCL812_AD_HI 5
-#define PCL812_DA1_HI 5
-#define PCL812_DA2_LO 6
-#define PCL812_DI_LO 6
-#define PCL812_DA2_HI 7
-#define PCL812_DI_HI 7
-#define PCL812_CLRINT 8
-#define PCL812_GAIN 9
-#define PCL812_MUX 10
-#define PCL812_MODE 11
-#define PCL812_CNTENABLE 10
-#define PCL812_SOFTTRIG 12
-#define PCL812_DO_LO 13
-#define PCL812_DO_HI 14
-
-#define PCL812_DRDY 0x10 /* =0 data ready */
-
-#define ACL8216_STATUS 8 /* 5. bit signalize data ready */
-
-#define ACL8216_DRDY 0x20 /* =0 data ready */
+/*
+ * Register I/O map
+ */
+#define PCL812_TIMER_BASE 0x00
+#define PCL812_AI_LSB_REG 0x04
+#define PCL812_AI_MSB_REG 0x05
+#define PCL812_AI_MSB_DRDY (1 << 4)
+#define PCL812_AO_LSB_REG(x) (0x04 + ((x) * 2))
+#define PCL812_AO_MSB_REG(x) (0x05 + ((x) * 2))
+#define PCL812_DI_LSB_REG 0x06
+#define PCL812_DI_MSB_REG 0x07
+#define PCL812_STATUS_REG 0x08
+#define PCL812_STATUS_DRDY (1 << 5)
+#define PCL812_RANGE_REG 0x09
+#define PCL812_MUX_REG 0x0a
+#define PCL812_MUX_CHAN(x) ((x) << 0)
+#define PCL812_MUX_CS0 (1 << 4)
+#define PCL812_MUX_CS1 (1 << 5)
+#define PCL812_CTRL_REG 0x0b
+#define PCL812_CTRL_DISABLE_TRIG (0 << 0)
+#define PCL812_CTRL_SOFT_TRIG (1 << 0)
+#define PCL812_CTRL_PACER_DMA_TRIG (2 << 0)
+#define PCL812_CTRL_PACER_EOC_TRIG (6 << 0)
+#define PCL812_SOFTTRIG_REG 0x0c
+#define PCL812_DO_LSB_REG 0x0d
+#define PCL812_DO_MSB_REG 0x0e
#define MAX_CHANLIST_LEN 256 /* length of scan list */
@@ -331,213 +329,391 @@ static const struct comedi_lrange range_a821pgh_ai = {
};
struct pcl812_board {
+ const char *name;
+ int board_type;
+ int n_aichan;
+ int n_aochan;
+ unsigned int ai_ns_min;
+ const struct comedi_lrange *rangelist_ai;
+ unsigned int IRQbits;
+ unsigned int has_dma:1;
+ unsigned int has_16bit_ai:1;
+ unsigned int has_mpc508_mux:1;
+ unsigned int has_dio:1;
+};
- const char *name; /* board name */
- int board_type; /* type of this board */
- int n_aichan; /* num of AI chans in S.E. */
- int n_aichan_diff; /* DIFF num of chans */
- int n_aochan; /* num of DA chans */
- int n_dichan; /* DI and DO chans */
- int n_dochan;
- int ai_maxdata; /* AI resolution */
- unsigned int ai_ns_min; /* max sample speed of card v ns */
- unsigned int i8254_osc_base; /* clock base */
- const struct comedi_lrange *rangelist_ai; /* rangelist for A/D */
- const struct comedi_lrange *rangelist_ao; /* rangelist for D/A */
- unsigned int IRQbits; /* allowed IRQ */
- unsigned char DMAbits; /* allowed DMA chans */
- unsigned char io_range; /* iorange for this board */
- unsigned char haveMPC508; /* 1=board use MPC508A multiplexor */
+static const struct pcl812_board boardtypes[] = {
+ {
+ .name = "pcl812",
+ .board_type = boardPCL812,
+ .n_aichan = 16,
+ .n_aochan = 2,
+ .ai_ns_min = 33000,
+ .rangelist_ai = &range_bipolar10,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "pcl812pg",
+ .board_type = boardPCL812PG,
+ .n_aichan = 16,
+ .n_aochan = 2,
+ .ai_ns_min = 33000,
+ .rangelist_ai = &range_pcl812pg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "acl8112pg",
+ .board_type = boardPCL812PG,
+ .n_aichan = 16,
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_pcl812pg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "acl8112dg",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_acl8112dg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_mpc508_mux = 1,
+ .has_dio = 1,
+ }, {
+ .name = "acl8112hg",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_acl8112hg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_mpc508_mux = 1,
+ .has_dio = 1,
+ }, {
+ .name = "a821pgl",
+ .board_type = boardA821,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 1,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_pcl813b_ai,
+ .IRQbits = 0x000c,
+ .has_dio = 1,
+ }, {
+ .name = "a821pglnda",
+ .board_type = boardA821,
+ .n_aichan = 16, /* 8 differential */
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_pcl813b_ai,
+ .IRQbits = 0x000c,
+ }, {
+ .name = "a821pgh",
+ .board_type = boardA821,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 1,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_a821pgh_ai,
+ .IRQbits = 0x000c,
+ .has_dio = 1,
+ }, {
+ .name = "a822pgl",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_acl8112dg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "a822pgh",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_acl8112hg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "a823pgl",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 8000,
+ .rangelist_ai = &range_acl8112dg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "a823pgh",
+ .board_type = boardACL8112,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 8000,
+ .rangelist_ai = &range_acl8112hg_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_dio = 1,
+ }, {
+ .name = "pcl813",
+ .board_type = boardPCL813,
+ .n_aichan = 32,
+ .rangelist_ai = &range_pcl813b_ai,
+ }, {
+ .name = "pcl813b",
+ .board_type = boardPCL813B,
+ .n_aichan = 32,
+ .rangelist_ai = &range_pcl813b_ai,
+ }, {
+ .name = "acl8113",
+ .board_type = boardACL8113,
+ .n_aichan = 32,
+ .rangelist_ai = &range_acl8113_1_ai,
+ }, {
+ .name = "iso813",
+ .board_type = boardISO813,
+ .n_aichan = 32,
+ .rangelist_ai = &range_iso813_1_ai,
+ }, {
+ .name = "acl8216",
+ .board_type = boardACL8216,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_pcl813b2_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_16bit_ai = 1,
+ .has_mpc508_mux = 1,
+ .has_dio = 1,
+ }, {
+ .name = "a826pg",
+ .board_type = boardACL8216,
+ .n_aichan = 16, /* 8 differential */
+ .n_aochan = 2,
+ .ai_ns_min = 10000,
+ .rangelist_ai = &range_pcl813b2_ai,
+ .IRQbits = 0xdcfc,
+ .has_dma = 1,
+ .has_16bit_ai = 1,
+ .has_dio = 1,
+ },
};
struct pcl812_private {
-
- unsigned char valid; /* =1 device is OK */
unsigned char dma; /* >0 use dma ( usedDMA channel) */
- unsigned char use_diff; /* =1 diff inputs */
- unsigned char use_MPC; /* 1=board uses MPC508A multiplexor */
- unsigned char use_ext_trg; /* 1=board uses external trigger */
unsigned char range_correction; /* =1 we must add 1 to range number */
- unsigned char old_chan_reg; /* lastly used chan/gain pair */
- unsigned char old_gain_reg;
+ unsigned int last_ai_chanspec;
unsigned char mode_reg_int; /* there is stored INT number for some card */
- unsigned char ai_neverending; /* =1 we do unlimited AI */
- unsigned char ai_eos; /* 1=EOS wake up */
- unsigned char ai_dma; /* =1 we use DMA */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
- unsigned int ai_scans; /* len of scanlist */
unsigned int ai_act_scan; /* how many scans we finished */
- unsigned int ai_chanlist[MAX_CHANLIST_LEN]; /* our copy of channel/range list */
- unsigned int ai_n_chan; /* how many channels is measured */
- unsigned int ai_flags; /* flaglist */
- unsigned int ai_data_len; /* len of data buffer */
- unsigned int ai_is16b; /* =1 we have 16 bit card */
+ unsigned int dmapages;
+ unsigned int hwdmasize;
unsigned long dmabuf[2]; /* PTR to DMA buf */
- unsigned int dmapages[2]; /* how many pages we have allocated */
unsigned int hwdmaptr[2]; /* HW PTR to DMA buf */
- unsigned int hwdmasize[2]; /* DMA buf size in bytes */
unsigned int dmabytestomove[2]; /* how many bytes DMA transfer */
int next_dma_buf; /* which buffer is next to use */
unsigned int dma_runs_to_end; /* how many times we must switch DMA buffers */
unsigned int last_dma_run; /* how many bytes to transfer on last DMA buffer */
unsigned int max_812_ai_mode0_rangewait; /* setling time for gain */
unsigned int ao_readback[2]; /* data for AO readback */
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int use_diff:1;
+ unsigned int use_mpc508:1;
+ unsigned int use_ext_trg:1;
+ unsigned int ai_dma:1;
+ unsigned int ai_eos:1;
};
-/*
-==============================================================================
-*/
-static void start_pacer(struct comedi_device *dev, int mode,
- unsigned int divisor1, unsigned int divisor2);
-static void setup_range_channel(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int rangechan, char wait);
-static int pcl812_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s);
-/*
-==============================================================================
-*/
-static int pcl812_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl812_start_pacer(struct comedi_device *dev, bool load_timers)
{
struct pcl812_private *devpriv = dev->private;
- int n;
- int timeout, hi;
-
- /* select software trigger */
- outb(devpriv->mode_reg_int | 1, dev->iobase + PCL812_MODE);
- /* select channel and renge */
- setup_range_channel(dev, s, insn->chanspec, 1);
- for (n = 0; n < insn->n; n++) {
- /* start conversion */
- outb(255, dev->iobase + PCL812_SOFTTRIG);
- udelay(5);
- timeout = 50; /* wait max 50us, it must finish under 33us */
- while (timeout--) {
- hi = inb(dev->iobase + PCL812_AD_HI);
- if (!(hi & PCL812_DRDY))
- goto conv_finish;
- udelay(1);
- }
- dev_dbg(dev->class_dev, "A/D insn read timeout\n");
- outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
- return -ETIME;
+ unsigned long timer_base = dev->iobase + PCL812_TIMER_BASE;
+
+ i8254_set_mode(timer_base, 0, 2, I8254_MODE2 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 1, I8254_MODE2 | I8254_BINARY);
+ udelay(1);
-conv_finish:
- data[n] = ((hi & 0xf) << 8) | inb(dev->iobase + PCL812_AD_LO);
+ if (load_timers) {
+ i8254_write(timer_base, 0, 2, devpriv->divisor2);
+ i8254_write(timer_base, 0, 1, devpriv->divisor1);
}
- outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
- return n;
}
-/*
-==============================================================================
-*/
-static int acl8216_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl812_ai_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- int n;
- int timeout;
-
- /* select software trigger */
- outb(1, dev->iobase + PCL812_MODE);
- /* select channel and renge */
- setup_range_channel(dev, s, insn->chanspec, 1);
- for (n = 0; n < insn->n; n++) {
- /* start conversion */
- outb(255, dev->iobase + PCL812_SOFTTRIG);
- udelay(5);
- timeout = 50; /* wait max 50us, it must finish under 33us */
- while (timeout--) {
- if (!(inb(dev->iobase + ACL8216_STATUS) & ACL8216_DRDY))
- goto conv_finish;
- udelay(1);
+ struct pcl812_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int dma_flags;
+ unsigned int bytes;
+
+ /* we use EOS, so adapt DMA buffer to one scan */
+ if (devpriv->ai_eos) {
+ devpriv->dmabytestomove[0] =
+ cmd->chanlist_len * sizeof(short);
+ devpriv->dmabytestomove[1] =
+ cmd->chanlist_len * sizeof(short);
+ devpriv->dma_runs_to_end = 1;
+ } else {
+ devpriv->dmabytestomove[0] = devpriv->hwdmasize;
+ devpriv->dmabytestomove[1] = devpriv->hwdmasize;
+ if (s->async->prealloc_bufsz < devpriv->hwdmasize) {
+ devpriv->dmabytestomove[0] =
+ s->async->prealloc_bufsz;
+ devpriv->dmabytestomove[1] =
+ s->async->prealloc_bufsz;
}
- dev_dbg(dev->class_dev, "A/D insn read timeout\n");
- outb(0, dev->iobase + PCL812_MODE);
- return -ETIME;
-
-conv_finish:
- data[n] =
- (inb(dev->iobase +
- PCL812_AD_HI) << 8) | inb(dev->iobase + PCL812_AD_LO);
+ if (cmd->stop_src == TRIG_NONE) {
+ devpriv->dma_runs_to_end = 1;
+ } else {
+ /* how many samples we must transfer? */
+ bytes = cmd->chanlist_len *
+ cmd->stop_arg * sizeof(short);
+
+ /* how many DMA pages we must fill */
+ devpriv->dma_runs_to_end =
+ bytes / devpriv->dmabytestomove[0];
+
+ /* on last dma transfer must be moved */
+ devpriv->last_dma_run =
+ bytes % devpriv->dmabytestomove[0];
+ if (devpriv->dma_runs_to_end == 0)
+ devpriv->dmabytestomove[0] =
+ devpriv->last_dma_run;
+ devpriv->dma_runs_to_end--;
+ }
+ }
+ if (devpriv->dmabytestomove[0] > devpriv->hwdmasize) {
+ devpriv->dmabytestomove[0] = devpriv->hwdmasize;
+ devpriv->ai_eos = 0;
}
- outb(0, dev->iobase + PCL812_MODE);
- return n;
+ if (devpriv->dmabytestomove[1] > devpriv->hwdmasize) {
+ devpriv->dmabytestomove[1] = devpriv->hwdmasize;
+ devpriv->ai_eos = 0;
+ }
+ devpriv->next_dma_buf = 0;
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ dma_flags = claim_dma_lock();
+ clear_dma_ff(devpriv->dma);
+ set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
+ set_dma_count(devpriv->dma, devpriv->dmabytestomove[0]);
+ release_dma_lock(dma_flags);
+ enable_dma(devpriv->dma);
}
-/*
-==============================================================================
-*/
-static int pcl812_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl812_ai_setup_next_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- int i;
+ unsigned long dma_flags;
- for (i = 0; i < insn->n; i++) {
- outb((data[i] & 0xff),
- dev->iobase + (chan ? PCL812_DA2_LO : PCL812_DA1_LO));
- outb((data[i] >> 8) & 0x0f,
- dev->iobase + (chan ? PCL812_DA2_HI : PCL812_DA1_HI));
- devpriv->ao_readback[chan] = data[i];
+ devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
+ disable_dma(devpriv->dma);
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ dma_flags = claim_dma_lock();
+ set_dma_addr(devpriv->dma, devpriv->hwdmaptr[devpriv->next_dma_buf]);
+ if (devpriv->ai_eos) {
+ set_dma_count(devpriv->dma,
+ devpriv->dmabytestomove[devpriv->next_dma_buf]);
+ } else {
+ if (devpriv->dma_runs_to_end) {
+ set_dma_count(devpriv->dma,
+ devpriv->dmabytestomove[devpriv->
+ next_dma_buf]);
+ } else {
+ set_dma_count(devpriv->dma, devpriv->last_dma_run);
+ }
+ devpriv->dma_runs_to_end--;
}
-
- return i;
+ release_dma_lock(dma_flags);
+ enable_dma(devpriv->dma);
}
-/*
-==============================================================================
-*/
-static int pcl812_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl812_ai_set_chan_range(struct comedi_device *dev,
+ unsigned int chanspec, char wait)
{
struct pcl812_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- int i;
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int mux = 0;
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->ao_readback[chan];
+ if (chanspec == devpriv->last_ai_chanspec)
+ return;
+
+ devpriv->last_ai_chanspec = chanspec;
+
+ if (devpriv->use_mpc508) {
+ if (devpriv->use_diff) {
+ mux |= PCL812_MUX_CS0 | PCL812_MUX_CS1;
+ } else {
+ if (chan < 8)
+ mux |= PCL812_MUX_CS0;
+ else
+ mux |= PCL812_MUX_CS1;
+ }
+ }
+
+ outb(mux | PCL812_MUX_CHAN(chan), dev->iobase + PCL812_MUX_REG);
+ outb(range + devpriv->range_correction, dev->iobase + PCL812_RANGE_REG);
- return i;
+ if (wait)
+ /*
+ * XXX this depends on selected range and can be very long for
+ * some high gain ranges!
+ */
+ udelay(devpriv->max_812_ai_mode0_rangewait);
}
-/*
-==============================================================================
-*/
-static int pcl812_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl812_ai_clear_eoc(struct comedi_device *dev)
{
- data[1] = inb(dev->iobase + PCL812_DI_LO);
- data[1] |= inb(dev->iobase + PCL812_DI_HI) << 8;
+ /* writing any value clears the interrupt request */
+ outb(0, dev->iobase + PCL812_STATUS_REG);
+}
- return insn->n;
+static void pcl812_ai_soft_trig(struct comedi_device *dev)
+{
+ /* writing any value triggers a software conversion */
+ outb(255, dev->iobase + PCL812_SOFTTRIG_REG);
}
-static int pcl812_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static unsigned int pcl812_ai_get_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase + PCL812_DO_LO);
- outb((s->state >> 8), dev->iobase + PCL812_DO_HI);
- }
+ unsigned int val;
- data[1] = s->state;
+ val = inb(dev->iobase + PCL812_AI_MSB_REG) << 8;
+ val |= inb(dev->iobase + PCL812_AI_LSB_REG);
- return insn->n;
+ return val & s->maxdata;
+}
+
+static int pcl812_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ if (s->maxdata > 0x0fff) {
+ status = inb(dev->iobase + PCL812_STATUS_REG);
+ if ((status & PCL812_STATUS_DRDY) == 0)
+ return 0;
+ } else {
+ status = inb(dev->iobase + PCL812_AI_MSB_REG);
+ if ((status & PCL812_AI_MSB_DRDY) == 0)
+ return 0;
+ }
+ return -EBUSY;
}
-/*
-==============================================================================
-*/
static int pcl812_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
@@ -545,7 +721,7 @@ static int pcl812_ai_cmdtest(struct comedi_device *dev,
struct pcl812_private *devpriv = dev->private;
int err = 0;
unsigned int flags;
- int tmp, divisor1, divisor2;
+ int tmp;
/* Step 1 : check if triggers are trivially valid */
@@ -600,8 +776,9 @@ static int pcl812_ai_cmdtest(struct comedi_device *dev,
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer(board->i8254_osc_base,
- &divisor1, &divisor2,
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_2MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
&cmd->convert_arg, cmd->flags);
if (cmd->convert_arg < board->ai_ns_min)
cmd->convert_arg = board->ai_ns_min;
@@ -615,54 +792,21 @@ static int pcl812_ai_cmdtest(struct comedi_device *dev,
return 0;
}
-/*
-==============================================================================
-*/
static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- const struct pcl812_board *board = comedi_board(dev);
struct pcl812_private *devpriv = dev->private;
- unsigned int divisor1 = 0, divisor2 = 0, i, dma_flags, bytes;
struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int ctrl = 0;
+ unsigned int i;
- if (cmd->start_src != TRIG_NOW)
- return -EINVAL;
- if (cmd->scan_begin_src != TRIG_FOLLOW)
- return -EINVAL;
- if (devpriv->use_ext_trg) {
- if (cmd->convert_src != TRIG_EXT)
- return -EINVAL;
- } else {
- if (cmd->convert_src != TRIG_TIMER)
- return -EINVAL;
- }
- if (cmd->scan_end_src != TRIG_COUNT)
- return -EINVAL;
- if (cmd->scan_end_arg != cmd->chanlist_len)
- return -EINVAL;
- if (cmd->chanlist_len > MAX_CHANLIST_LEN)
- return -EINVAL;
-
- if (cmd->convert_src == TRIG_TIMER) {
- if (cmd->convert_arg < board->ai_ns_min)
- cmd->convert_arg = board->ai_ns_min;
- i8253_cascade_ns_to_timer(board->i8254_osc_base,
- &divisor1, &divisor2,
- &cmd->convert_arg, cmd->flags);
- }
-
- start_pacer(dev, -1, 0, 0); /* stop pacer */
+ pcl812_start_pacer(dev, false);
- devpriv->ai_n_chan = cmd->chanlist_len;
- memcpy(devpriv->ai_chanlist, cmd->chanlist,
- sizeof(unsigned int) * cmd->scan_end_arg);
- /* select first channel and range */
- setup_range_channel(dev, s, devpriv->ai_chanlist[0], 1);
+ pcl812_ai_set_chan_range(dev, cmd->chanlist[0], 1);
if (devpriv->dma) { /* check if we can use DMA transfer */
devpriv->ai_dma = 1;
- for (i = 1; i < devpriv->ai_n_chan; i++)
- if (devpriv->ai_chanlist[0] != devpriv->ai_chanlist[i]) {
+ for (i = 1; i < cmd->chanlist_len; i++)
+ if (cmd->chanlist[0] != cmd->chanlist[i]) {
/* we cann't use DMA :-( */
devpriv->ai_dma = 0;
break;
@@ -670,212 +814,105 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
} else
devpriv->ai_dma = 0;
- devpriv->ai_flags = cmd->flags;
- devpriv->ai_data_len = s->async->prealloc_bufsz;
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ai_scans = cmd->stop_arg;
- devpriv->ai_neverending = 0;
- } else {
- devpriv->ai_scans = 0;
- devpriv->ai_neverending = 1;
- }
-
devpriv->ai_act_scan = 0;
devpriv->ai_poll_ptr = 0;
s->async->cur_chan = 0;
/* don't we want wake up every scan? */
- if ((devpriv->ai_flags & TRIG_WAKE_EOS)) {
+ if (cmd->flags & TRIG_WAKE_EOS) {
devpriv->ai_eos = 1;
/* DMA is useless for this situation */
- if (devpriv->ai_n_chan == 1)
+ if (cmd->chanlist_len == 1)
devpriv->ai_dma = 0;
}
- if (devpriv->ai_dma) {
- /* we use EOS, so adapt DMA buffer to one scan */
- if (devpriv->ai_eos) {
- devpriv->dmabytestomove[0] =
- devpriv->ai_n_chan * sizeof(short);
- devpriv->dmabytestomove[1] =
- devpriv->ai_n_chan * sizeof(short);
- devpriv->dma_runs_to_end = 1;
- } else {
- devpriv->dmabytestomove[0] = devpriv->hwdmasize[0];
- devpriv->dmabytestomove[1] = devpriv->hwdmasize[1];
- if (devpriv->ai_data_len < devpriv->hwdmasize[0])
- devpriv->dmabytestomove[0] =
- devpriv->ai_data_len;
- if (devpriv->ai_data_len < devpriv->hwdmasize[1])
- devpriv->dmabytestomove[1] =
- devpriv->ai_data_len;
- if (devpriv->ai_neverending) {
- devpriv->dma_runs_to_end = 1;
- } else {
- /* how many samples we must transfer? */
- bytes = devpriv->ai_n_chan *
- devpriv->ai_scans * sizeof(short);
-
- /* how many DMA pages we must fill */
- devpriv->dma_runs_to_end =
- bytes / devpriv->dmabytestomove[0];
-
- /* on last dma transfer must be moved */
- devpriv->last_dma_run =
- bytes % devpriv->dmabytestomove[0];
- if (devpriv->dma_runs_to_end == 0)
- devpriv->dmabytestomove[0] =
- devpriv->last_dma_run;
- devpriv->dma_runs_to_end--;
- }
- }
- if (devpriv->dmabytestomove[0] > devpriv->hwdmasize[0]) {
- devpriv->dmabytestomove[0] = devpriv->hwdmasize[0];
- devpriv->ai_eos = 0;
- }
- if (devpriv->dmabytestomove[1] > devpriv->hwdmasize[1]) {
- devpriv->dmabytestomove[1] = devpriv->hwdmasize[1];
- devpriv->ai_eos = 0;
- }
- devpriv->next_dma_buf = 0;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma);
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
- set_dma_count(devpriv->dma, devpriv->dmabytestomove[0]);
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
- }
+ if (devpriv->ai_dma)
+ pcl812_ai_setup_dma(dev, s);
switch (cmd->convert_src) {
case TRIG_TIMER:
- start_pacer(dev, 1, divisor1, divisor2);
+ pcl812_start_pacer(dev, true);
break;
}
- if (devpriv->ai_dma) /* let's go! */
- outb(devpriv->mode_reg_int | 2, dev->iobase + PCL812_MODE);
- else /* let's go! */
- outb(devpriv->mode_reg_int | 6, dev->iobase + PCL812_MODE);
+ if (devpriv->ai_dma)
+ ctrl |= PCL812_CTRL_PACER_DMA_TRIG;
+ else
+ ctrl |= PCL812_CTRL_PACER_EOC_TRIG;
+ outb(devpriv->mode_reg_int | ctrl, dev->iobase + PCL812_CTRL_REG);
return 0;
}
-/*
-==============================================================================
-*/
-static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d)
+static bool pcl812_ai_next_chan(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- char err = 1;
- unsigned int mask, timeout;
- struct comedi_device *dev = d;
struct pcl812_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int next_chan;
+ struct comedi_cmd *cmd = &s->async->cmd;
- s->async->events = 0;
+ s->async->events |= COMEDI_CB_BLOCK;
- timeout = 50; /* wait max 50us, it must finish under 33us */
- if (devpriv->ai_is16b) {
- mask = 0xffff;
- while (timeout--) {
- if (!(inb(dev->iobase + ACL8216_STATUS) & ACL8216_DRDY)) {
- err = 0;
- break;
- }
- udelay(1);
- }
- } else {
- mask = 0x0fff;
- while (timeout--) {
- if (!(inb(dev->iobase + PCL812_AD_HI) & PCL812_DRDY)) {
- err = 0;
- break;
- }
- udelay(1);
- }
+ s->async->cur_chan++;
+ if (s->async->cur_chan >= cmd->chanlist_len) {
+ s->async->cur_chan = 0;
+ devpriv->ai_act_scan++;
+ s->async->events |= COMEDI_CB_EOS;
+ }
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ devpriv->ai_act_scan >= cmd->stop_arg) {
+ /* all data sampled */
+ s->async->events |= COMEDI_CB_EOA;
+ return false;
}
- if (err) {
+ return true;
+}
+
+static void pcl812_handle_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int next_chan;
+
+ if (pcl812_ai_eoc(dev, s, NULL, 0)) {
dev_dbg(dev->class_dev, "A/D cmd IRQ without DRDY!\n");
- pcl812_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ return;
}
- comedi_buf_put(s->async,
- ((inb(dev->iobase + PCL812_AD_HI) << 8) |
- inb(dev->iobase + PCL812_AD_LO)) & mask);
+ comedi_buf_put(s->async, pcl812_ai_get_sample(dev, s));
/* Set up next channel. Added by abbotti 2010-01-20, but untested. */
next_chan = s->async->cur_chan + 1;
- if (next_chan >= devpriv->ai_n_chan)
+ if (next_chan >= cmd->chanlist_len)
next_chan = 0;
- if (devpriv->ai_chanlist[s->async->cur_chan] !=
- devpriv->ai_chanlist[next_chan])
- setup_range_channel(dev, s, devpriv->ai_chanlist[next_chan], 0);
-
- outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
+ if (cmd->chanlist[s->async->cur_chan] != cmd->chanlist[next_chan])
+ pcl812_ai_set_chan_range(dev, cmd->chanlist[next_chan], 0);
- s->async->cur_chan = next_chan;
- if (next_chan == 0) { /* one scan done */
- devpriv->ai_act_scan++;
- if (!(devpriv->ai_neverending))
- /* all data sampled */
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- pcl812_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- }
- }
-
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ pcl812_ai_next_chan(dev, s);
}
-/*
-==============================================================================
-*/
static void transfer_from_dma_buf(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned short *ptr,
unsigned int bufptr, unsigned int len)
{
- struct pcl812_private *devpriv = dev->private;
unsigned int i;
- s->async->events = 0;
for (i = len; i; i--) {
- /* get one sample */
comedi_buf_put(s->async, ptr[bufptr++]);
- s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan++;
- if (!devpriv->ai_neverending)
- /* all data sampled */
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- pcl812_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- break;
- }
- }
+ if (!pcl812_ai_next_chan(dev, s))
+ break;
}
-
- comedi_event(dev, s);
}
-/*
-==============================================================================
-*/
-static irqreturn_t interrupt_pcl812_ai_dma(int irq, void *d)
+static void pcl812_handle_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct comedi_device *dev = d;
struct pcl812_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned long dma_flags;
int len, bufptr;
unsigned short *ptr;
@@ -883,58 +920,36 @@ static irqreturn_t interrupt_pcl812_ai_dma(int irq, void *d)
len = (devpriv->dmabytestomove[devpriv->next_dma_buf] >> 1) -
devpriv->ai_poll_ptr;
- devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
- disable_dma(devpriv->dma);
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[devpriv->next_dma_buf]);
- if (devpriv->ai_eos) {
- set_dma_count(devpriv->dma,
- devpriv->dmabytestomove[devpriv->next_dma_buf]);
- } else {
- if (devpriv->dma_runs_to_end) {
- set_dma_count(devpriv->dma,
- devpriv->dmabytestomove[devpriv->
- next_dma_buf]);
- } else {
- set_dma_count(devpriv->dma, devpriv->last_dma_run);
- }
- devpriv->dma_runs_to_end--;
- }
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
-
- outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
+ pcl812_ai_setup_next_dma(dev, s);
bufptr = devpriv->ai_poll_ptr;
devpriv->ai_poll_ptr = 0;
transfer_from_dma_buf(dev, s, ptr, bufptr, len);
-
- return IRQ_HANDLED;
}
-/*
-==============================================================================
-*/
-static irqreturn_t interrupt_pcl812(int irq, void *d)
+static irqreturn_t pcl812_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
+ struct comedi_subdevice *s = dev->read_subdev;
struct pcl812_private *devpriv = dev->private;
if (!dev->attached) {
- comedi_error(dev, "spurious interrupt");
+ pcl812_ai_clear_eoc(dev);
return IRQ_HANDLED;
}
+
if (devpriv->ai_dma)
- return interrupt_pcl812_ai_dma(irq, d);
+ pcl812_handle_dma(dev, s);
else
- return interrupt_pcl812_ai_int(irq, d);
+ pcl812_handle_eoc(dev, s);
+
+ pcl812_ai_clear_eoc(dev);
+
+ cfc_handle_events(dev, s);
+ return IRQ_HANDLED;
}
-/*
-==============================================================================
-*/
static int pcl812_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
@@ -979,204 +994,308 @@ static int pcl812_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
return s->async->buf_write_count - s->async->buf_read_count;
}
-/*
-==============================================================================
-*/
-static void setup_range_channel(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int rangechan, char wait)
+static int pcl812_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
- unsigned char chan_reg = CR_CHAN(rangechan); /* normal board */
- /* gain index */
- unsigned char gain_reg = CR_RANGE(rangechan) +
- devpriv->range_correction;
- if ((chan_reg == devpriv->old_chan_reg)
- && (gain_reg == devpriv->old_gain_reg))
- return; /* we can return, no change */
+ if (devpriv->ai_dma)
+ disable_dma(devpriv->dma);
- devpriv->old_chan_reg = chan_reg;
- devpriv->old_gain_reg = gain_reg;
+ outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
+ dev->iobase + PCL812_CTRL_REG);
+ pcl812_start_pacer(dev, false);
+ pcl812_ai_clear_eoc(dev);
+ return 0;
+}
- if (devpriv->use_MPC) {
- if (devpriv->use_diff) {
- chan_reg = chan_reg | 0x30; /* DIFF inputs */
- } else {
- if (chan_reg & 0x80)
- /* SE inputs 8-15 */
- chan_reg = chan_reg | 0x20;
- else
- /* SE inputs 0-7 */
- chan_reg = chan_reg | 0x10;
- }
- }
+static int pcl812_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct pcl812_private *devpriv = dev->private;
+ int ret = 0;
+ int i;
- outb(chan_reg, dev->iobase + PCL812_MUX); /* select channel */
- outb(gain_reg, dev->iobase + PCL812_GAIN); /* select gain */
+ outb(devpriv->mode_reg_int | PCL812_CTRL_SOFT_TRIG,
+ dev->iobase + PCL812_CTRL_REG);
+ pcl812_ai_set_chan_range(dev, insn->chanspec, 1);
- if (wait)
- /*
- * XXX this depends on selected range and can be very long for
- * some high gain ranges!
- */
- udelay(devpriv->max_812_ai_mode0_rangewait);
+ for (i = 0; i < insn->n; i++) {
+ pcl812_ai_clear_eoc(dev);
+ pcl812_ai_soft_trig(dev);
+
+ ret = comedi_timeout(dev, s, insn, pcl812_ai_eoc, 0);
+ if (ret)
+ break;
+
+ data[i] = pcl812_ai_get_sample(dev, s);
+ }
+ outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
+ dev->iobase + PCL812_CTRL_REG);
+ pcl812_ai_clear_eoc(dev);
+
+ return ret ? ret : insn->n;
}
-/*
-==============================================================================
-*/
-static void start_pacer(struct comedi_device *dev, int mode,
- unsigned int divisor1, unsigned int divisor2)
+static int pcl812_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- outb(0xb4, dev->iobase + PCL812_CTRCTL);
- outb(0x74, dev->iobase + PCL812_CTRCTL);
- udelay(1);
+ struct pcl812_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
- if (mode == 1) {
- outb(divisor2 & 0xff, dev->iobase + PCL812_CTR2);
- outb((divisor2 >> 8) & 0xff, dev->iobase + PCL812_CTR2);
- outb(divisor1 & 0xff, dev->iobase + PCL812_CTR1);
- outb((divisor1 >> 8) & 0xff, dev->iobase + PCL812_CTR1);
+ for (i = 0; i < insn->n; i++) {
+ outb((data[i] & 0xff),
+ dev->iobase + PCL812_AO_LSB_REG(chan));
+ outb((data[i] >> 8) & 0x0f,
+ dev->iobase + PCL812_AO_MSB_REG(chan));
+ devpriv->ao_readback[chan] = data[i];
}
+
+ return insn->n;
}
-/*
-==============================================================================
-*/
-static int pcl812_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int pcl812_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pcl812_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
- if (devpriv->ai_dma)
- disable_dma(devpriv->dma);
- outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
- /* Stop A/D */
- outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
- start_pacer(dev, -1, 0, 0); /* stop 8254 */
- outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
- return 0;
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
+
+ return insn->n;
+}
+
+static int pcl812_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = inb(dev->iobase + PCL812_DI_LSB_REG) |
+ (inb(dev->iobase + PCL812_DI_MSB_REG) << 8);
+
+ return insn->n;
+}
+
+static int pcl812_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ if (comedi_dio_update_state(s, data)) {
+ outb(s->state & 0xff, dev->iobase + PCL812_DO_LSB_REG);
+ outb((s->state >> 8), dev->iobase + PCL812_DO_MSB_REG);
+ }
+
+ data[1] = s->state;
+
+ return insn->n;
}
-/*
-==============================================================================
-*/
static void pcl812_reset(struct comedi_device *dev)
{
const struct pcl812_board *board = comedi_board(dev);
struct pcl812_private *devpriv = dev->private;
+ unsigned int chan;
+
+ /* disable analog input trigger */
+ outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
+ dev->iobase + PCL812_CTRL_REG);
+ pcl812_ai_clear_eoc(dev);
+
+ /* stop pacer */
+ if (board->IRQbits)
+ pcl812_start_pacer(dev, false);
+
+ /*
+ * Invalidate last_ai_chanspec then set analog input to
+ * known channel/range.
+ */
+ devpriv->last_ai_chanspec = CR_PACK(16, 0, 0);
+ pcl812_ai_set_chan_range(dev, CR_PACK(0, 0, 0), 0);
+
+ /* set analog output channels to 0V */
+ for (chan = 0; chan < board->n_aochan; chan++) {
+ outb(0, dev->iobase + PCL812_AO_LSB_REG(chan));
+ outb(0, dev->iobase + PCL812_AO_MSB_REG(chan));
+ }
- outb(0, dev->iobase + PCL812_MUX);
- outb(0 + devpriv->range_correction, dev->iobase + PCL812_GAIN);
- devpriv->old_chan_reg = -1; /* invalidate chain/gain memory */
- devpriv->old_gain_reg = -1;
+ /* set all digital outputs low */
+ if (board->has_dio) {
+ outb(0, dev->iobase + PCL812_DO_MSB_REG);
+ outb(0, dev->iobase + PCL812_DO_LSB_REG);
+ }
+}
+static void pcl812_set_ai_range_table(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_devconfig *it)
+{
+ const struct pcl812_board *board = comedi_board(dev);
+ struct pcl812_private *devpriv = dev->private;
+
+ /* default to the range table from the boardinfo */
+ s->range_table = board->rangelist_ai;
+
+ /* now check the user config option based on the boardtype */
switch (board->board_type) {
case boardPCL812PG:
+ if (it->options[4] == 1)
+ s->range_table = &range_pcl812pg2_ai;
+ break;
case boardPCL812:
- case boardACL8112:
- case boardACL8216:
- outb(0, dev->iobase + PCL812_DA2_LO);
- outb(0, dev->iobase + PCL812_DA2_HI);
- case boardA821:
- outb(0, dev->iobase + PCL812_DA1_LO);
- outb(0, dev->iobase + PCL812_DA1_HI);
- start_pacer(dev, -1, 0, 0); /* stop 8254 */
- outb(0, dev->iobase + PCL812_DO_HI);
- outb(0, dev->iobase + PCL812_DO_LO);
- outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
- outb(0, dev->iobase + PCL812_CLRINT);
+ switch (it->options[4]) {
+ case 0:
+ s->range_table = &range_bipolar10;
+ break;
+ case 1:
+ s->range_table = &range_bipolar5;
+ break;
+ case 2:
+ s->range_table = &range_bipolar2_5;
+ break;
+ case 3:
+ s->range_table = &range812_bipolar1_25;
+ break;
+ case 4:
+ s->range_table = &range812_bipolar0_625;
+ break;
+ case 5:
+ s->range_table = &range812_bipolar0_3125;
+ break;
+ default:
+ s->range_table = &range_bipolar10;
+ break;
+ }
break;
case boardPCL813B:
- case boardPCL813:
+ if (it->options[1] == 1)
+ s->range_table = &range_pcl813b2_ai;
+ break;
case boardISO813:
+ switch (it->options[1]) {
+ case 0:
+ s->range_table = &range_iso813_1_ai;
+ break;
+ case 1:
+ s->range_table = &range_iso813_1_2_ai;
+ break;
+ case 2:
+ s->range_table = &range_iso813_2_ai;
+ devpriv->range_correction = 1;
+ break;
+ case 3:
+ s->range_table = &range_iso813_2_2_ai;
+ devpriv->range_correction = 1;
+ break;
+ default:
+ s->range_table = &range_iso813_1_ai;
+ break;
+ }
+ break;
case boardACL8113:
- udelay(5);
+ switch (it->options[1]) {
+ case 0:
+ s->range_table = &range_acl8113_1_ai;
+ break;
+ case 1:
+ s->range_table = &range_acl8113_1_2_ai;
+ break;
+ case 2:
+ s->range_table = &range_acl8113_2_ai;
+ devpriv->range_correction = 1;
+ break;
+ case 3:
+ s->range_table = &range_acl8113_2_2_ai;
+ devpriv->range_correction = 1;
+ break;
+ default:
+ s->range_table = &range_acl8113_1_ai;
+ break;
+ }
break;
}
- udelay(5);
}
static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl812_board *board = comedi_board(dev);
struct pcl812_private *devpriv;
- int ret, subdev;
- unsigned int dma;
- unsigned long pages;
struct comedi_subdevice *s;
int n_subdevices;
-
- ret = comedi_request_region(dev, it->options[0], board->io_range);
- if (ret)
- return ret;
+ int subdev;
+ int ret;
+ int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
+ ret = comedi_request_region(dev, it->options[0], 0x10);
+ if (ret)
+ return ret;
+
if ((1 << it->options[1]) & board->IRQbits) {
- ret = request_irq(it->options[1], interrupt_pcl812, 0,
+ ret = request_irq(it->options[1], pcl812_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
- dma = 0;
- devpriv->dma = dma;
- if (!dev->irq)
- goto no_dma; /* if we haven't IRQ, we can't use DMA */
- if (board->DMAbits != 0) { /* board support DMA */
- dma = it->options[2];
- if (((1 << dma) & board->DMAbits) == 0) {
- dev_err(dev->class_dev,
- "DMA is out of allowed range, FAIL!\n");
- return -EINVAL; /* Bad DMA */
- }
- ret = request_dma(dma, dev->board_name);
+ /* we need an IRQ to do DMA on channel 3 or 1 */
+ if (dev->irq && board->has_dma &&
+ (it->options[2] == 3 || it->options[2] == 1)) {
+ ret = request_dma(it->options[2], dev->board_name);
if (ret) {
dev_err(dev->class_dev,
- "unable to allocate DMA %u, FAIL!\n", dma);
- return -EBUSY; /* DMA isn't free */
- }
- devpriv->dma = dma;
- pages = 1; /* we want 8KB */
- devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[0]) {
- dev_err(dev->class_dev,
- "unable to allocate DMA buffer, FAIL!\n");
- /*
- * maybe experiment with try_to_free_pages()
- * will help ....
- */
- return -EBUSY; /* no buffer :-( */
- }
- devpriv->dmapages[0] = pages;
- devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
- devpriv->hwdmasize[0] = PAGE_SIZE * (1 << pages);
- devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[1]) {
- dev_err(dev->class_dev,
- "unable to allocate DMA buffer, FAIL!\n");
+ "unable to request DMA channel %d\n",
+ it->options[2]);
return -EBUSY;
}
- devpriv->dmapages[1] = pages;
- devpriv->hwdmaptr[1] = virt_to_bus((void *)devpriv->dmabuf[1]);
- devpriv->hwdmasize[1] = PAGE_SIZE * (1 << pages);
+ devpriv->dma = it->options[2];
+
+ devpriv->dmapages = 1; /* we want 8KB */
+ devpriv->hwdmasize = (1 << devpriv->dmapages) * PAGE_SIZE;
+
+ for (i = 0; i < 2; i++) {
+ unsigned long dmabuf;
+
+ dmabuf = __get_dma_pages(GFP_KERNEL, devpriv->dmapages);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ devpriv->dmabuf[i] = dmabuf;
+ devpriv->hwdmaptr[i] = virt_to_bus((void *)dmabuf);
+ }
}
-no_dma:
- n_subdevices = 0;
- if (board->n_aichan > 0)
- n_subdevices++;
+ /* differential analog inputs? */
+ switch (board->board_type) {
+ case boardA821:
+ if (it->options[2] == 1)
+ devpriv->use_diff = 1;
+ break;
+ case boardACL8112:
+ case boardACL8216:
+ if (it->options[4] == 1)
+ devpriv->use_diff = 1;
+ break;
+ }
+
+ n_subdevices = 1; /* all boardtypes have analog inputs */
if (board->n_aochan > 0)
n_subdevices++;
- if (board->n_dichan > 0)
- n_subdevices++;
- if (board->n_dochan > 0)
- n_subdevices++;
+ if (board->has_dio)
+ n_subdevices += 2;
ret = comedi_alloc_subdevices(dev, n_subdevices);
if (ret)
@@ -1184,146 +1303,47 @@ no_dma:
subdev = 0;
- /* analog input */
- if (board->n_aichan > 0) {
- s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE;
- switch (board->board_type) {
- case boardA821:
- if (it->options[2] == 1) {
- s->n_chan = board->n_aichan_diff;
- s->subdev_flags |= SDF_DIFF;
- devpriv->use_diff = 1;
- } else {
- s->n_chan = board->n_aichan;
- s->subdev_flags |= SDF_GROUND;
- }
- break;
- case boardACL8112:
- case boardACL8216:
- if (it->options[4] == 1) {
- s->n_chan = board->n_aichan_diff;
- s->subdev_flags |= SDF_DIFF;
- devpriv->use_diff = 1;
- } else {
- s->n_chan = board->n_aichan;
- s->subdev_flags |= SDF_GROUND;
- }
- break;
- default:
- s->n_chan = board->n_aichan;
- s->subdev_flags |= SDF_GROUND;
- break;
- }
- s->maxdata = board->ai_maxdata;
- s->range_table = board->rangelist_ai;
- if (board->board_type == boardACL8216)
- s->insn_read = acl8216_ai_insn_read;
- else
- s->insn_read = pcl812_ai_insn_read;
-
- devpriv->use_MPC = board->haveMPC508;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = MAX_CHANLIST_LEN;
- s->do_cmdtest = pcl812_ai_cmdtest;
- s->do_cmd = pcl812_ai_cmd;
- s->poll = pcl812_ai_poll;
- s->cancel = pcl812_ai_cancel;
- }
- switch (board->board_type) {
- case boardPCL812PG:
- if (it->options[4] == 1)
- s->range_table = &range_pcl812pg2_ai;
- break;
- case boardPCL812:
- switch (it->options[4]) {
- case 0:
- s->range_table = &range_bipolar10;
- break;
- case 1:
- s->range_table = &range_bipolar5;
- break;
- case 2:
- s->range_table = &range_bipolar2_5;
- break;
- case 3:
- s->range_table = &range812_bipolar1_25;
- break;
- case 4:
- s->range_table = &range812_bipolar0_625;
- break;
- case 5:
- s->range_table = &range812_bipolar0_3125;
- break;
- default:
- s->range_table = &range_bipolar10;
- break;
- }
- break;
- break;
- case boardPCL813B:
- if (it->options[1] == 1)
- s->range_table = &range_pcl813b2_ai;
- break;
- case boardISO813:
- switch (it->options[1]) {
- case 0:
- s->range_table = &range_iso813_1_ai;
- break;
- case 1:
- s->range_table = &range_iso813_1_2_ai;
- break;
- case 2:
- s->range_table = &range_iso813_2_ai;
- devpriv->range_correction = 1;
- break;
- case 3:
- s->range_table = &range_iso813_2_2_ai;
- devpriv->range_correction = 1;
- break;
- default:
- s->range_table = &range_iso813_1_ai;
- break;
- }
- break;
- case boardACL8113:
- switch (it->options[1]) {
- case 0:
- s->range_table = &range_acl8113_1_ai;
- break;
- case 1:
- s->range_table = &range_acl8113_1_2_ai;
- break;
- case 2:
- s->range_table = &range_acl8113_2_ai;
- devpriv->range_correction = 1;
- break;
- case 3:
- s->range_table = &range_acl8113_2_2_ai;
- devpriv->range_correction = 1;
- break;
- default:
- s->range_table = &range_acl8113_1_ai;
- break;
- }
- break;
- }
- subdev++;
+ /* Analog Input subdevice */
+ s = &dev->subdevices[subdev];
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE;
+ if (devpriv->use_diff) {
+ s->subdev_flags |= SDF_DIFF;
+ s->n_chan = board->n_aichan / 2;
+ } else {
+ s->subdev_flags |= SDF_GROUND;
+ s->n_chan = board->n_aichan;
+ }
+ s->maxdata = board->has_16bit_ai ? 0xffff : 0x0fff;
+
+ pcl812_set_ai_range_table(dev, s, it);
+
+ s->insn_read = pcl812_ai_insn_read;
+
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = MAX_CHANLIST_LEN;
+ s->do_cmdtest = pcl812_ai_cmdtest;
+ s->do_cmd = pcl812_ai_cmd;
+ s->poll = pcl812_ai_poll;
+ s->cancel = pcl812_ai_cancel;
}
+ devpriv->use_mpc508 = board->has_mpc508_mux;
+
+ subdev++;
+
/* analog output */
if (board->n_aochan > 0) {
s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->n_aochan;
- s->maxdata = 0xfff;
- s->range_table = board->rangelist_ao;
- s->insn_read = pcl812_ao_insn_read;
- s->insn_write = pcl812_ao_insn_write;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
+ s->n_chan = board->n_aochan;
+ s->maxdata = 0xfff;
+ s->range_table = &range_unipolar5;
+ s->insn_read = pcl812_ao_insn_read;
+ s->insn_write = pcl812_ao_insn_write;
switch (board->board_type) {
case boardA821:
if (it->options[3] == 1)
@@ -1342,33 +1362,30 @@ no_dma:
subdev++;
}
- /* digital input */
- if (board->n_dichan > 0) {
+ if (board->has_dio) {
+ /* Digital Input subdevice */
s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = board->n_dichan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl812_di_insn_bits;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl812_di_insn_bits;
subdev++;
- }
- /* digital output */
- if (board->n_dochan > 0) {
+ /* Digital Output subdevice */
s = &dev->subdevices[subdev];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_dochan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl812_do_insn_bits;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl812_do_insn_bits;
subdev++;
}
switch (board->board_type) {
case boardACL8216:
- devpriv->ai_is16b = 1;
case boardPCL812PG:
case boardPCL812:
case boardACL8112:
@@ -1390,8 +1407,6 @@ no_dma:
break;
}
- devpriv->valid = 1;
-
pcl812_reset(dev);
return 0;
@@ -1403,72 +1418,15 @@ static void pcl812_detach(struct comedi_device *dev)
if (devpriv) {
if (devpriv->dmabuf[0])
- free_pages(devpriv->dmabuf[0], devpriv->dmapages[0]);
+ free_pages(devpriv->dmabuf[0], devpriv->dmapages);
if (devpriv->dmabuf[1])
- free_pages(devpriv->dmabuf[1], devpriv->dmapages[1]);
+ free_pages(devpriv->dmabuf[1], devpriv->dmapages);
if (devpriv->dma)
free_dma(devpriv->dma);
}
comedi_legacy_detach(dev);
}
-static const struct pcl812_board boardtypes[] = {
- {"pcl812", boardPCL812, 16, 0, 2, 16, 16, 0x0fff,
- 33000, I8254_OSC_BASE_2MHZ, &range_bipolar10, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"pcl812pg", boardPCL812PG, 16, 0, 2, 16, 16, 0x0fff,
- 33000, I8254_OSC_BASE_2MHZ, &range_pcl812pg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"acl8112pg", boardPCL812PG, 16, 0, 2, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_pcl812pg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"acl8112dg", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_acl8112dg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
- {"acl8112hg", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_acl8112hg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
- {"a821pgl", boardA821, 16, 8, 1, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b_ai, &range_unipolar5,
- 0x000c, 0x00, PCLx1x_IORANGE, 0},
- {"a821pglnda", boardA821, 16, 8, 0, 0, 0, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b_ai, NULL,
- 0x000c, 0x00, PCLx1x_IORANGE, 0},
- {"a821pgh", boardA821, 16, 8, 1, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_a821pgh_ai, &range_unipolar5,
- 0x000c, 0x00, PCLx1x_IORANGE, 0},
- {"a822pgl", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_acl8112dg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"a822pgh", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 10000, I8254_OSC_BASE_2MHZ, &range_acl8112hg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"a823pgl", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 8000, I8254_OSC_BASE_2MHZ, &range_acl8112dg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"a823pgh", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
- 8000, I8254_OSC_BASE_2MHZ, &range_acl8112hg_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
- {"pcl813", boardPCL813, 32, 0, 0, 0, 0, 0x0fff,
- 0, 0, &range_pcl813b_ai, NULL,
- 0x0000, 0x00, PCLx1x_IORANGE, 0},
- {"pcl813b", boardPCL813B, 32, 0, 0, 0, 0, 0x0fff,
- 0, 0, &range_pcl813b_ai, NULL,
- 0x0000, 0x00, PCLx1x_IORANGE, 0},
- {"acl8113", boardACL8113, 32, 0, 0, 0, 0, 0x0fff,
- 0, 0, &range_acl8113_1_ai, NULL,
- 0x0000, 0x00, PCLx1x_IORANGE, 0},
- {"iso813", boardISO813, 32, 0, 0, 0, 0, 0x0fff,
- 0, 0, &range_iso813_1_ai, NULL,
- 0x0000, 0x00, PCLx1x_IORANGE, 0},
- {"acl8216", boardACL8216, 16, 8, 2, 16, 16, 0xffff,
- 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b2_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
- {"a826pg", boardACL8216, 16, 8, 2, 16, 16, 0xffff,
- 10000, I8254_OSC_BASE_2MHZ, &range_pcl813b2_ai, &range_unipolar5,
- 0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
-};
-
static struct comedi_driver pcl812_driver = {
.driver_name = "pcl812",
.module = THIS_MODULE,
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index e9d470459933..6f276f23fabe 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -44,40 +44,38 @@ Configuration Options:
#include "comedi_fc.h"
#include "8253.h"
-/* boards constants */
-/* IO space len */
-#define PCLx1x_RANGE 16
-
-/* INTEL 8254 counters */
-#define PCL816_CTR0 4
-#define PCL816_CTR1 5
-#define PCL816_CTR2 6
-/* R: counter read-back register W: counter control */
-#define PCL816_CTRCTL 7
-
-/* R: A/D high byte W: A/D range control */
-#define PCL816_RANGE 9
-/* W: clear INT request */
-#define PCL816_CLRINT 10
-/* R: next mux scan channel W: mux scan channel & range control pointer */
-#define PCL816_MUX 11
-/* R/W: operation control register */
-#define PCL816_CONTROL 12
-
-/* R: return status byte W: set DMA/IRQ */
-#define PCL816_STATUS 13
-#define PCL816_STATUS_DRDY_MASK 0x80
-
-/* R: low byte of A/D W: soft A/D trigger */
-#define PCL816_AD_LO 8
-/* R: high byte of A/D W: A/D range control */
-#define PCL816_AD_HI 9
-
-/* type of interrupt handler */
-#define INT_TYPE_AI1_INT 1
-#define INT_TYPE_AI1_DMA 2
-#define INT_TYPE_AI3_INT 4
-#define INT_TYPE_AI3_DMA 5
+/*
+ * Register I/O map
+ */
+#define PCL816_DO_DI_LSB_REG 0x00
+#define PCL816_DO_DI_MSB_REG 0x01
+#define PCL816_TIMER_BASE 0x04
+#define PCL816_AI_LSB_REG 0x08
+#define PCL816_AI_MSB_REG 0x09
+#define PCL816_RANGE_REG 0x09
+#define PCL816_CLRINT_REG 0x0a
+#define PCL816_MUX_REG 0x0b
+#define PCL816_MUX_SCAN(_first, _last) (((_last) << 4) | (_first))
+#define PCL816_CTRL_REG 0x0c
+#define PCL816_CTRL_DISABLE_TRIG (0 << 0)
+#define PCL816_CTRL_SOFT_TRIG (1 << 0)
+#define PCL816_CTRL_PACER_TRIG (1 << 1)
+#define PCL816_CTRL_EXT_TRIG (1 << 2)
+#define PCL816_CTRL_POE (1 << 3)
+#define PCL816_CTRL_DMAEN (1 << 4)
+#define PCL816_CTRL_INTEN (1 << 5)
+#define PCL816_CTRL_DMASRC_SLOT0 (0 << 6)
+#define PCL816_CTRL_DMASRC_SLOT1 (1 << 6)
+#define PCL816_CTRL_DMASRC_SLOT2 (2 << 6)
+#define PCL816_STATUS_REG 0x0d
+#define PCL816_STATUS_NEXT_CHAN_MASK (0xf << 0)
+#define PCL816_STATUS_INTSRC_MASK (3 << 4)
+#define PCL816_STATUS_INTSRC_SLOT0 (0 << 4)
+#define PCL816_STATUS_INTSRC_SLOT1 (1 << 4)
+#define PCL816_STATUS_INTSRC_SLOT2 (2 << 4)
+#define PCL816_STATUS_INTSRC_DMA (3 << 4)
+#define PCL816_STATUS_INTACT (1 << 6)
+#define PCL816_STATUS_DRDY (1 << 7)
#define MAGIC_DMA_WORD 0x5a5a
@@ -95,314 +93,284 @@ static const struct comedi_lrange range_pcl816 = {
};
struct pcl816_board {
+ const char *name;
+ int ai_maxdata;
+ int ao_maxdata;
+ int ai_chanlist;
+};
- const char *name; /* board name */
- int n_ranges; /* len of range list */
- int n_aichan; /* num of A/D chans in diferencial mode */
- unsigned int ai_ns_min; /* minimal allowed delay between samples (in ns) */
- int n_aochan; /* num of D/A chans */
- int n_dichan; /* num of DI chans */
- int n_dochan; /* num of DO chans */
- const struct comedi_lrange *ai_range_type; /* default A/D rangelist */
- const struct comedi_lrange *ao_range_type; /* default D/A rangelist */
- unsigned int io_range; /* len of IO space */
- unsigned int IRQbits; /* allowed interrupts */
- unsigned int DMAbits; /* allowed DMA chans */
- int ai_maxdata; /* maxdata for A/D */
- int ao_maxdata; /* maxdata for D/A */
- int ai_chanlist; /* allowed len of channel list A/D */
- int ao_chanlist; /* allowed len of channel list D/A */
- int i8254_osc_base; /* 1/frequency of on board oscilator in ns */
+static const struct pcl816_board boardtypes[] = {
+ {
+ .name = "pcl816",
+ .ai_maxdata = 0xffff,
+ .ao_maxdata = 0xffff,
+ .ai_chanlist = 1024,
+ }, {
+ .name = "pcl814b",
+ .ai_maxdata = 0x3fff,
+ .ao_maxdata = 0x3fff,
+ .ai_chanlist = 1024,
+ },
};
struct pcl816_private {
-
unsigned int dma; /* used DMA, 0=don't use DMA */
+ unsigned int dmapages;
+ unsigned int hwdmasize;
unsigned long dmabuf[2]; /* pointers to begin of DMA buffers */
- unsigned int dmapages[2]; /* len of DMA buffers in PAGE_SIZEs */
unsigned int hwdmaptr[2]; /* hardware address of DMA buffers */
- unsigned int hwdmasize[2]; /* len of DMA buffers in Bytes */
- unsigned int dmasamplsize; /* size in samples hwdmasize[0]/2 */
int next_dma_buf; /* which DMA buffer will be used next round */
long dma_runs_to_end; /* how many we must permorm DMA transfer to end of record */
unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
-
- unsigned int ai_scans; /* len of scanlist */
- unsigned char ai_neverending; /* if=1, then we do neverending record (you must use cancel()) */
- int irq_blocked; /* 1=IRQ now uses any subdev */
- int irq_was_now_closed; /* when IRQ finish, there's stored int816_mode for last interrupt */
- int int816_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */
- struct comedi_subdevice *last_int_sub; /* ptr to subdevice which now finish */
int ai_act_scan; /* how many scans we finished */
- unsigned int ai_act_chanlist[16]; /* MUX setting for actual AI operations */
- unsigned int ai_act_chanlist_len; /* how long is actual MUX list */
- unsigned int ai_act_chanlist_pos; /* actual position in MUX list */
- unsigned int ai_n_chan; /* how many channels per scan */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int ai_cmd_running:1;
+ unsigned int ai_cmd_canceled:1;
};
-/*
-==============================================================================
-*/
static int check_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int *chanlist, unsigned int chanlen);
-static void setup_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int seglen);
-static int pcl816_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s);
-static void start_pacer(struct comedi_device *dev, int mode,
- unsigned int divisor1, unsigned int divisor2);
-static int pcl816_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd);
-static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
-
-/*
-==============================================================================
- ANALOG INPUT MODE0, 816 cards, slow version
-*/
-static int pcl816_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl816_start_pacer(struct comedi_device *dev, bool load_counters)
{
- int n;
- int timeout;
-
- /* software trigger, DMA and INT off */
- outb(0, dev->iobase + PCL816_CONTROL);
- /* clear INT (conversion end) flag */
- outb(0, dev->iobase + PCL816_CLRINT);
-
- /* Set the input channel */
- outb(CR_CHAN(insn->chanspec) & 0xf, dev->iobase + PCL816_MUX);
- /* select gain */
- outb(CR_RANGE(insn->chanspec), dev->iobase + PCL816_RANGE);
-
- for (n = 0; n < insn->n; n++) {
-
- outb(0, dev->iobase + PCL816_AD_LO); /* start conversion */
-
- timeout = 100;
- while (timeout--) {
- if (!(inb(dev->iobase + PCL816_STATUS) &
- PCL816_STATUS_DRDY_MASK)) {
- /* return read value */
- data[n] =
- ((inb(dev->iobase +
- PCL816_AD_HI) << 8) |
- (inb(dev->iobase + PCL816_AD_LO)));
- /* clear INT (conversion end) flag */
- outb(0, dev->iobase + PCL816_CLRINT);
- break;
- }
- udelay(1);
- }
- /* Return timeout error */
- if (!timeout) {
- comedi_error(dev, "A/D insn timeout\n");
- data[0] = 0;
- /* clear INT (conversion end) flag */
- outb(0, dev->iobase + PCL816_CLRINT);
- return -EIO;
- }
+ struct pcl816_private *devpriv = dev->private;
+ unsigned long timer_base = dev->iobase + PCL816_TIMER_BASE;
+
+ i8254_set_mode(timer_base, 0, 0, I8254_MODE1 | I8254_BINARY);
+ i8254_write(timer_base, 0, 0, 0x00ff);
+ udelay(1);
+
+ i8254_set_mode(timer_base, 0, 2, I8254_MODE2 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 1, I8254_MODE2 | I8254_BINARY);
+ udelay(1);
+ if (load_counters) {
+ i8254_write(timer_base, 0, 2, devpriv->divisor2);
+ i8254_write(timer_base, 0, 1, devpriv->divisor1);
}
- return n;
}
-/*
-==============================================================================
- analog input interrupt mode 1 & 3, 818 cards
- one sample per interrupt version
-*/
-static irqreturn_t interrupt_pcl816_ai_mode13_int(int irq, void *d)
+static void pcl816_ai_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct comedi_device *dev = d;
struct pcl816_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned char low, hi;
- int timeout = 50; /* wait max 50us */
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int dma_flags;
+ unsigned int bytes;
- while (timeout--) {
- if (!(inb(dev->iobase + PCL816_STATUS) &
- PCL816_STATUS_DRDY_MASK))
- break;
- udelay(1);
+ bytes = devpriv->hwdmasize;
+ if (cmd->stop_src == TRIG_COUNT) {
+ /* how many */
+ bytes = s->async->cmd.chanlist_len *
+ s->async->cmd.chanlist_len *
+ sizeof(short);
+
+ /* how many DMA pages we must fill */
+ devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize;
+
+ /* on last dma transfer must be moved */
+ devpriv->last_dma_run = bytes % devpriv->hwdmasize;
+ devpriv->dma_runs_to_end--;
+ if (devpriv->dma_runs_to_end >= 0)
+ bytes = devpriv->hwdmasize;
+ } else
+ devpriv->dma_runs_to_end = -1;
+
+ devpriv->next_dma_buf = 0;
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ dma_flags = claim_dma_lock();
+ clear_dma_ff(devpriv->dma);
+ set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
+ set_dma_count(devpriv->dma, bytes);
+ release_dma_lock(dma_flags);
+ enable_dma(devpriv->dma);
+}
+
+static void pcl816_ai_setup_next_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pcl816_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned long dma_flags;
+
+ disable_dma(devpriv->dma);
+ if (devpriv->dma_runs_to_end > -1 || cmd->stop_src == TRIG_NONE) {
+ /* switch dma bufs */
+ devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ dma_flags = claim_dma_lock();
+ set_dma_addr(devpriv->dma,
+ devpriv->hwdmaptr[devpriv->next_dma_buf]);
+ if (devpriv->dma_runs_to_end)
+ set_dma_count(devpriv->dma, devpriv->hwdmasize);
+ else
+ set_dma_count(devpriv->dma, devpriv->last_dma_run);
+ release_dma_lock(dma_flags);
+ enable_dma(devpriv->dma);
}
- if (!timeout) { /* timeout, bail error */
- outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
- comedi_error(dev, "A/D mode1/3 IRQ without DRDY!");
- pcl816_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ devpriv->dma_runs_to_end--;
+}
+
+static void pcl816_ai_set_chan_range(struct comedi_device *dev,
+ unsigned int chan,
+ unsigned int range)
+{
+ outb(chan, dev->iobase + PCL816_MUX_REG);
+ outb(range, dev->iobase + PCL816_RANGE_REG);
+}
+
+static void pcl816_ai_set_chan_scan(struct comedi_device *dev,
+ unsigned int first_chan,
+ unsigned int last_chan)
+{
+ outb(PCL816_MUX_SCAN(first_chan, last_chan),
+ dev->iobase + PCL816_MUX_REG);
+}
+
+static void pcl816_ai_setup_chanlist(struct comedi_device *dev,
+ unsigned int *chanlist,
+ unsigned int seglen)
+{
+ unsigned int first_chan = CR_CHAN(chanlist[0]);
+ unsigned int last_chan;
+ unsigned int range;
+ unsigned int i;
+
+ /* store range list to card */
+ for (i = 0; i < seglen; i++) {
+ last_chan = CR_CHAN(chanlist[i]);
+ range = CR_RANGE(chanlist[i]);
+
+ pcl816_ai_set_chan_range(dev, last_chan, range);
}
- /* get the sample */
- low = inb(dev->iobase + PCL816_AD_LO);
- hi = inb(dev->iobase + PCL816_AD_HI);
+ udelay(1);
+
+ pcl816_ai_set_chan_scan(dev, first_chan, last_chan);
+}
+
+static void pcl816_ai_clear_eoc(struct comedi_device *dev)
+{
+ /* writing any value clears the interrupt request */
+ outb(0, dev->iobase + PCL816_CLRINT_REG);
+}
+
+static void pcl816_ai_soft_trig(struct comedi_device *dev)
+{
+ /* writing any value triggers a software conversion */
+ outb(0, dev->iobase + PCL816_AI_LSB_REG);
+}
- comedi_buf_put(s->async, (hi << 8) | low);
+static unsigned int pcl816_ai_get_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int val;
- outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
+ val = inb(dev->iobase + PCL816_AI_MSB_REG) << 8;
+ val |= inb(dev->iobase + PCL816_AI_LSB_REG);
- if (++devpriv->ai_act_chanlist_pos >= devpriv->ai_act_chanlist_len)
- devpriv->ai_act_chanlist_pos = 0;
+ return val & s->maxdata;
+}
+
+static int pcl816_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inb(dev->iobase + PCL816_STATUS_REG);
+ if ((status & PCL816_STATUS_DRDY) == 0)
+ return 0;
+ return -EBUSY;
+}
+
+static bool pcl816_ai_next_chan(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pcl816_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+
+ s->async->events |= COMEDI_CB_BLOCK;
s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
+ if (s->async->cur_chan >= cmd->chanlist_len) {
s->async->cur_chan = 0;
devpriv->ai_act_scan++;
+ s->async->events |= COMEDI_CB_EOS;
}
- if (!devpriv->ai_neverending)
- /* all data sampled */
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- /* all data sampled */
- pcl816_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- }
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ if (cmd->stop_src == TRIG_COUNT &&
+ devpriv->ai_act_scan >= cmd->stop_arg) {
+ /* all data sampled */
+ s->async->events |= COMEDI_CB_EOA;
+ return false;
+ }
+
+ return true;
}
-/*
-==============================================================================
- analog input dma mode 1 & 3, 816 cards
-*/
static void transfer_from_dma_buf(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned short *ptr,
unsigned int bufptr, unsigned int len)
{
- struct pcl816_private *devpriv = dev->private;
int i;
- s->async->events = 0;
-
for (i = 0; i < len; i++) {
-
comedi_buf_put(s->async, ptr[bufptr++]);
- if (++devpriv->ai_act_chanlist_pos >=
- devpriv->ai_act_chanlist_len) {
- devpriv->ai_act_chanlist_pos = 0;
- }
-
- s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan++;
- }
-
- if (!devpriv->ai_neverending)
- /* all data sampled */
- if (devpriv->ai_act_scan >= devpriv->ai_scans) {
- pcl816_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_BLOCK;
- break;
- }
+ if (!pcl816_ai_next_chan(dev, s))
+ return;
}
-
- comedi_event(dev, s);
}
-static irqreturn_t interrupt_pcl816_ai_mode13_dma(int irq, void *d)
+static irqreturn_t pcl816_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct pcl816_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- int len, bufptr, this_dma_buf;
- unsigned long dma_flags;
+ struct pcl816_private *devpriv = dev->private;
unsigned short *ptr;
+ unsigned int bufptr;
+ unsigned int len;
- disable_dma(devpriv->dma);
- this_dma_buf = devpriv->next_dma_buf;
-
- /* switch dma bufs */
- if ((devpriv->dma_runs_to_end > -1) || devpriv->ai_neverending) {
+ if (!dev->attached || !devpriv->ai_cmd_running) {
+ pcl816_ai_clear_eoc(dev);
+ return IRQ_HANDLED;
+ }
- devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
-/* clear_dma_ff (devpriv->dma); */
- set_dma_addr(devpriv->dma,
- devpriv->hwdmaptr[devpriv->next_dma_buf]);
- if (devpriv->dma_runs_to_end) {
- set_dma_count(devpriv->dma,
- devpriv->hwdmasize[devpriv->
- next_dma_buf]);
- } else {
- set_dma_count(devpriv->dma, devpriv->last_dma_run);
- }
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
+ if (devpriv->ai_cmd_canceled) {
+ devpriv->ai_cmd_canceled = 0;
+ pcl816_ai_clear_eoc(dev);
+ return IRQ_HANDLED;
}
- devpriv->dma_runs_to_end--;
- outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
+ ptr = (unsigned short *)devpriv->dmabuf[devpriv->next_dma_buf];
- ptr = (unsigned short *)devpriv->dmabuf[this_dma_buf];
+ pcl816_ai_setup_next_dma(dev, s);
- len = (devpriv->hwdmasize[0] >> 1) - devpriv->ai_poll_ptr;
+ len = (devpriv->hwdmasize >> 1) - devpriv->ai_poll_ptr;
bufptr = devpriv->ai_poll_ptr;
devpriv->ai_poll_ptr = 0;
transfer_from_dma_buf(dev, s, ptr, bufptr, len);
- return IRQ_HANDLED;
-}
-
-/*
-==============================================================================
- INT procedure
-*/
-static irqreturn_t interrupt_pcl816(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct pcl816_private *devpriv = dev->private;
- if (!dev->attached) {
- comedi_error(dev, "premature interrupt");
- return IRQ_HANDLED;
- }
-
- switch (devpriv->int816_mode) {
- case INT_TYPE_AI1_DMA:
- case INT_TYPE_AI3_DMA:
- return interrupt_pcl816_ai_mode13_dma(irq, d);
- case INT_TYPE_AI1_INT:
- case INT_TYPE_AI3_INT:
- return interrupt_pcl816_ai_mode13_int(irq, d);
- }
+ pcl816_ai_clear_eoc(dev);
- outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
- if (!dev->irq || !devpriv->irq_blocked || !devpriv->int816_mode) {
- if (devpriv->irq_was_now_closed) {
- devpriv->irq_was_now_closed = 0;
- /* comedi_error(dev,"last IRQ.."); */
- return IRQ_HANDLED;
- }
- comedi_error(dev, "bad IRQ!");
- return IRQ_NONE;
- }
- comedi_error(dev, "IRQ from unknown source!");
- return IRQ_NONE;
+ cfc_handle_events(dev, s);
+ return IRQ_HANDLED;
}
-/*
-==============================================================================
-*/
static int pcl816_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
- const struct pcl816_board *board = comedi_board(dev);
+ struct pcl816_private *devpriv = dev->private;
int err = 0;
- int tmp, divisor1 = 0, divisor2 = 0;
+ int tmp;
/* Step 1 : check if triggers are trivially valid */
@@ -432,8 +400,7 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
if (cmd->convert_src == TRIG_TIMER)
- err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
- board->ai_ns_min);
+ err |= cfc_check_trigger_arg_min(&cmd->convert_arg, 10000);
else /* TRIG_EXT */
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
@@ -451,11 +418,12 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
/* step 4: fix up any arguments */
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
- i8253_cascade_ns_to_timer(board->i8254_osc_base,
- &divisor1, &divisor2,
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
&cmd->convert_arg, cmd->flags);
- if (cmd->convert_arg < board->ai_ns_min)
- cmd->convert_arg = board->ai_ns_min;
+ if (cmd->convert_arg < 10000)
+ cmd->convert_arg = 10000;
if (tmp != cmd->convert_arg)
err++;
}
@@ -477,120 +445,40 @@ static int pcl816_ai_cmdtest(struct comedi_device *dev,
static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- const struct pcl816_board *board = comedi_board(dev);
struct pcl816_private *devpriv = dev->private;
- unsigned int divisor1 = 0, divisor2 = 0, dma_flags, bytes, dmairq;
struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int ctrl;
unsigned int seglen;
- if (cmd->start_src != TRIG_NOW)
- return -EINVAL;
- if (cmd->scan_begin_src != TRIG_FOLLOW)
- return -EINVAL;
- if (cmd->scan_end_src != TRIG_COUNT)
- return -EINVAL;
- if (cmd->scan_end_arg != cmd->chanlist_len)
- return -EINVAL;
-/* if(cmd->chanlist_len>MAX_CHANLIST_LEN) return -EINVAL; */
- if (devpriv->irq_blocked)
+ if (devpriv->ai_cmd_running)
return -EBUSY;
- if (cmd->convert_src == TRIG_TIMER) {
- if (cmd->convert_arg < board->ai_ns_min)
- cmd->convert_arg = board->ai_ns_min;
-
- i8253_cascade_ns_to_timer(board->i8254_osc_base,
- &divisor1, &divisor2,
- &cmd->convert_arg, cmd->flags);
-
- /* PCL816 crash if any divisor is set to 1 */
- if (divisor1 == 1) {
- divisor1 = 2;
- divisor2 /= 2;
- }
- if (divisor2 == 1) {
- divisor2 = 2;
- divisor1 /= 2;
- }
- }
-
- start_pacer(dev, -1, 0, 0); /* stop pacer */
+ pcl816_start_pacer(dev, false);
seglen = check_channel_list(dev, s, cmd->chanlist, cmd->chanlist_len);
if (seglen < 1)
return -EINVAL;
- setup_channel_list(dev, s, cmd->chanlist, seglen);
+ pcl816_ai_setup_chanlist(dev, cmd->chanlist, seglen);
udelay(1);
- devpriv->ai_n_chan = cmd->chanlist_len;
devpriv->ai_act_scan = 0;
s->async->cur_chan = 0;
- devpriv->irq_blocked = 1;
+ devpriv->ai_cmd_running = 1;
devpriv->ai_poll_ptr = 0;
- devpriv->irq_was_now_closed = 0;
+ devpriv->ai_cmd_canceled = 0;
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ai_scans = cmd->stop_arg;
- devpriv->ai_neverending = 0;
- } else {
- devpriv->ai_scans = 0;
- devpriv->ai_neverending = 1;
- }
-
- if (devpriv->dma) {
- bytes = devpriv->hwdmasize[0];
- if (!devpriv->ai_neverending) {
- /* how many */
- bytes = s->async->cmd.chanlist_len *
- s->async->cmd.chanlist_len *
- sizeof(short);
-
- /* how many DMA pages we must fill */
- devpriv->dma_runs_to_end = bytes /
- devpriv->hwdmasize[0];
-
- /* on last dma transfer must be moved */
- devpriv->last_dma_run = bytes % devpriv->hwdmasize[0];
- devpriv->dma_runs_to_end--;
- if (devpriv->dma_runs_to_end >= 0)
- bytes = devpriv->hwdmasize[0];
- } else
- devpriv->dma_runs_to_end = -1;
-
- devpriv->next_dma_buf = 0;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- dma_flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma);
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
- set_dma_count(devpriv->dma, bytes);
- release_dma_lock(dma_flags);
- enable_dma(devpriv->dma);
- }
-
- start_pacer(dev, 1, divisor1, divisor2);
- dmairq = ((devpriv->dma & 0x3) << 4) | (dev->irq & 0x7);
-
- switch (cmd->convert_src) {
- case TRIG_TIMER:
- devpriv->int816_mode = INT_TYPE_AI1_DMA;
-
- /* Pacer+IRQ+DMA */
- outb(0x32, dev->iobase + PCL816_CONTROL);
+ pcl816_ai_setup_dma(dev, s);
- /* write irq and DMA to card */
- outb(dmairq, dev->iobase + PCL816_STATUS);
- break;
+ pcl816_start_pacer(dev, true);
- default:
- devpriv->int816_mode = INT_TYPE_AI3_DMA;
-
- /* Ext trig+IRQ+DMA */
- outb(0x34, dev->iobase + PCL816_CONTROL);
+ ctrl = PCL816_CTRL_INTEN | PCL816_CTRL_DMAEN | PCL816_CTRL_DMASRC_SLOT0;
+ if (cmd->convert_src == TRIG_TIMER)
+ ctrl |= PCL816_CTRL_PACER_TRIG;
+ else /* TRIG_EXT */
+ ctrl |= PCL816_CTRL_EXT_TRIG;
- /* write irq to card */
- outb(dmairq, dev->iobase + PCL816_STATUS);
- break;
- }
+ outb(ctrl, dev->iobase + PCL816_CTRL_REG);
+ outb((devpriv->dma << 4) | dev->irq, dev->iobase + PCL816_STATUS_REG);
return 0;
}
@@ -601,9 +489,6 @@ static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned long flags;
unsigned int top1, top2, i;
- if (!devpriv->dma)
- return 0; /* poll is valid only for DMA transfer */
-
spin_lock_irqsave(&dev->spinlock, flags);
for (i = 0; i < 20; i++) {
@@ -618,7 +503,7 @@ static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
}
/* where is now DMA in buffer */
- top1 = devpriv->hwdmasize[0] - top1;
+ top1 = devpriv->hwdmasize - top1;
top1 >>= 1; /* sample position */
top2 = top1 - devpriv->ai_poll_ptr;
if (top2 < 1) { /* no new samples */
@@ -634,134 +519,34 @@ static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_poll_ptr = top1; /* new buffer position */
spin_unlock_irqrestore(&dev->spinlock, flags);
+ cfc_handle_events(dev, s);
+
return s->async->buf_write_count - s->async->buf_read_count;
}
-/*
-==============================================================================
- cancel any mode 1-4 AI
-*/
static int pcl816_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl816_private *devpriv = dev->private;
- if (devpriv->irq_blocked > 0) {
- switch (devpriv->int816_mode) {
- case INT_TYPE_AI1_DMA:
- case INT_TYPE_AI3_DMA:
- disable_dma(devpriv->dma);
- case INT_TYPE_AI1_INT:
- case INT_TYPE_AI3_INT:
- outb(inb(dev->iobase + PCL816_CONTROL) & 0x73,
- dev->iobase + PCL816_CONTROL); /* Stop A/D */
- udelay(1);
- outb(0, dev->iobase + PCL816_CONTROL); /* Stop A/D */
-
- /* Stop pacer */
- outb(0xb0, dev->iobase + PCL816_CTRCTL);
- outb(0x70, dev->iobase + PCL816_CTRCTL);
- outb(0, dev->iobase + PCL816_AD_LO);
- inb(dev->iobase + PCL816_AD_LO);
- inb(dev->iobase + PCL816_AD_HI);
-
- /* clear INT request */
- outb(0, dev->iobase + PCL816_CLRINT);
-
- /* Stop A/D */
- outb(0, dev->iobase + PCL816_CONTROL);
- devpriv->irq_blocked = 0;
- devpriv->irq_was_now_closed = devpriv->int816_mode;
- devpriv->int816_mode = 0;
- devpriv->last_int_sub = s;
-/* s->busy = 0; */
- break;
- }
- }
- return 0;
-}
-
-/*
-==============================================================================
- chech for PCL816
-*/
-static int pcl816_check(unsigned long iobase)
-{
- outb(0x00, iobase + PCL816_MUX);
- udelay(1);
- if (inb(iobase + PCL816_MUX) != 0x00)
- return 1; /* there isn't card */
- outb(0x55, iobase + PCL816_MUX);
- udelay(1);
- if (inb(iobase + PCL816_MUX) != 0x55)
- return 1; /* there isn't card */
- outb(0x00, iobase + PCL816_MUX);
- udelay(1);
- outb(0x18, iobase + PCL816_CONTROL);
- udelay(1);
- if (inb(iobase + PCL816_CONTROL) != 0x18)
- return 1; /* there isn't card */
- return 0; /* ok, card exist */
-}
-
-/*
-==============================================================================
- reset whole PCL-816 cards
-*/
-static void pcl816_reset(struct comedi_device *dev)
-{
-/* outb (0, dev->iobase + PCL818_DA_LO); DAC=0V */
-/* outb (0, dev->iobase + PCL818_DA_HI); */
-/* udelay (1); */
-/* outb (0, dev->iobase + PCL818_DO_HI); DO=$0000 */
-/* outb (0, dev->iobase + PCL818_DO_LO); */
-/* udelay (1); */
- outb(0, dev->iobase + PCL816_CONTROL);
- outb(0, dev->iobase + PCL816_MUX);
- outb(0, dev->iobase + PCL816_CLRINT);
- outb(0xb0, dev->iobase + PCL816_CTRCTL); /* Stop pacer */
- outb(0x70, dev->iobase + PCL816_CTRCTL);
- outb(0x30, dev->iobase + PCL816_CTRCTL);
- outb(0, dev->iobase + PCL816_RANGE);
-}
+ if (!devpriv->ai_cmd_running)
+ return 0;
-/*
-==============================================================================
- Start/stop pacer onboard pacer
-*/
-static void
-start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1,
- unsigned int divisor2)
-{
- outb(0x32, dev->iobase + PCL816_CTRCTL);
- outb(0xff, dev->iobase + PCL816_CTR0);
- outb(0x00, dev->iobase + PCL816_CTR0);
- udelay(1);
+ outb(PCL816_CTRL_DISABLE_TRIG, dev->iobase + PCL816_CTRL_REG);
+ pcl816_ai_clear_eoc(dev);
- /* set counter 2 as mode 3 */
- outb(0xb4, dev->iobase + PCL816_CTRCTL);
- /* set counter 1 as mode 3 */
- outb(0x74, dev->iobase + PCL816_CTRCTL);
- udelay(1);
+ /* Stop pacer */
+ i8254_set_mode(dev->iobase + PCL816_TIMER_BASE, 0,
+ 2, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(dev->iobase + PCL816_TIMER_BASE, 0,
+ 1, I8254_MODE0 | I8254_BINARY);
- if (mode == 1) {
- dev_dbg(dev->class_dev, "mode %d, divisor1 %d, divisor2 %d\n",
- mode, divisor1, divisor2);
- outb(divisor2 & 0xff, dev->iobase + PCL816_CTR2);
- outb((divisor2 >> 8) & 0xff, dev->iobase + PCL816_CTR2);
- outb(divisor1 & 0xff, dev->iobase + PCL816_CTR1);
- outb((divisor1 >> 8) & 0xff, dev->iobase + PCL816_CTR1);
- }
+ devpriv->ai_cmd_running = 0;
+ devpriv->ai_cmd_canceled = 1;
- /* clear pending interrupts (just in case) */
-/* outb(0, dev->iobase + PCL816_CLRINT); */
+ return 0;
}
-/*
-==============================================================================
- Check if channel list from user is built correctly
- If it's ok, then return non-zero length of repeated segment of channel list
-*/
static int
check_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned int *chanlist,
@@ -818,180 +603,181 @@ check_channel_list(struct comedi_device *dev,
return seglen; /* we can serve this with MUX logic */
}
-/*
-==============================================================================
- Program scan/gain logic with channel list.
-*/
-static void
-setup_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int *chanlist,
- unsigned int seglen)
+static int pcl816_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct pcl816_private *devpriv = dev->private;
- unsigned int i;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ int ret = 0;
+ int i;
- devpriv->ai_act_chanlist_len = seglen;
- devpriv->ai_act_chanlist_pos = 0;
+ outb(PCL816_CTRL_SOFT_TRIG, dev->iobase + PCL816_CTRL_REG);
- for (i = 0; i < seglen; i++) { /* store range list to card */
- devpriv->ai_act_chanlist[i] = CR_CHAN(chanlist[i]);
- outb(CR_CHAN(chanlist[0]) & 0xf, dev->iobase + PCL816_MUX);
- /* select gain */
- outb(CR_RANGE(chanlist[0]), dev->iobase + PCL816_RANGE);
+ pcl816_ai_set_chan_range(dev, chan, range);
+ pcl816_ai_set_chan_scan(dev, chan, chan);
+
+ for (i = 0; i < insn->n; i++) {
+ pcl816_ai_clear_eoc(dev);
+ pcl816_ai_soft_trig(dev);
+
+ ret = comedi_timeout(dev, s, insn, pcl816_ai_eoc, 0);
+ if (ret)
+ break;
+
+ data[i] = pcl816_ai_get_sample(dev, s);
}
+ outb(PCL816_CTRL_DISABLE_TRIG, dev->iobase + PCL816_CTRL_REG);
+ pcl816_ai_clear_eoc(dev);
- udelay(1);
- /* select channel interval to scan */
- outb(devpriv->ai_act_chanlist[0] |
- (devpriv->ai_act_chanlist[seglen - 1] << 4),
- dev->iobase + PCL816_MUX);
+ return ret ? ret : insn->n;
+}
+
+static int pcl816_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = inb(dev->iobase + PCL816_DO_DI_LSB_REG) |
+ (inb(dev->iobase + PCL816_DO_DI_MSB_REG) << 8);
+
+ return insn->n;
+}
+
+static int pcl816_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ if (comedi_dio_update_state(s, data)) {
+ outb(s->state & 0xff, dev->iobase + PCL816_DO_DI_LSB_REG);
+ outb((s->state >> 8), dev->iobase + PCL816_DO_DI_MSB_REG);
+ }
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static void pcl816_reset(struct comedi_device *dev)
+{
+ unsigned long timer_base = dev->iobase + PCL816_TIMER_BASE;
+
+ outb(PCL816_CTRL_DISABLE_TRIG, dev->iobase + PCL816_CTRL_REG);
+ pcl816_ai_set_chan_range(dev, 0, 0);
+ pcl816_ai_clear_eoc(dev);
+
+ /* Stop pacer */
+ i8254_set_mode(timer_base, 0, 2, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 1, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 0, I8254_MODE0 | I8254_BINARY);
+
+ /* set all digital outputs low */
+ outb(0, dev->iobase + PCL816_DO_DI_LSB_REG);
+ outb(0, dev->iobase + PCL816_DO_DI_MSB_REG);
}
static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl816_board *board = comedi_board(dev);
struct pcl816_private *devpriv;
- int ret;
- unsigned int dma;
- unsigned long pages;
- /* int i; */
struct comedi_subdevice *s;
-
- ret = comedi_request_region(dev, it->options[0], board->io_range);
- if (ret)
- return ret;
-
- if (pcl816_check(dev->iobase)) {
- dev_err(dev->class_dev, "I can't detect board. FAIL!\n");
- return -EIO;
- }
+ int ret;
+ int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- if ((1 << it->options[1]) & board->IRQbits) {
- ret = request_irq(it->options[1], interrupt_pcl816, 0,
+ ret = comedi_request_region(dev, it->options[0], 0x10);
+ if (ret)
+ return ret;
+
+ /* we can use IRQ 2-7 for async command support */
+ if (it->options[1] >= 2 && it->options[1] <= 7) {
+ ret = request_irq(it->options[1], pcl816_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
- devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
- devpriv->int816_mode = 0; /* mode of irq */
-
- /* grab our DMA */
- dma = 0;
- devpriv->dma = dma;
- if (!dev->irq)
- goto no_dma; /* if we haven't IRQ, we can't use DMA */
-
- if (board->DMAbits != 0) { /* board support DMA */
- dma = it->options[2];
- if (dma < 1)
- goto no_dma; /* DMA disabled */
-
- if (((1 << dma) & board->DMAbits) == 0) {
- dev_err(dev->class_dev,
- "DMA is out of allowed range, FAIL!\n");
- return -EINVAL; /* Bad DMA */
- }
- ret = request_dma(dma, dev->board_name);
+ /* we need an IRQ to do DMA on channel 3 or 1 */
+ if (dev->irq && (it->options[2] == 3 || it->options[2] == 1)) {
+ ret = request_dma(it->options[2], dev->board_name);
if (ret) {
dev_err(dev->class_dev,
- "unable to allocate DMA %u, FAIL!\n", dma);
- return -EBUSY; /* DMA isn't free */
+ "unable to request DMA channel %d\n",
+ it->options[2]);
+ return -EBUSY;
}
+ devpriv->dma = it->options[2];
- devpriv->dma = dma;
- pages = 2; /* we need 16KB */
- devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
+ devpriv->dmapages = 2; /* we need 16KB */
+ devpriv->hwdmasize = (1 << devpriv->dmapages) * PAGE_SIZE;
- if (!devpriv->dmabuf[0]) {
- dev_err(dev->class_dev,
- "unable to allocate DMA buffer, FAIL!\n");
- /*
- * maybe experiment with try_to_free_pages()
- * will help ....
- */
- return -EBUSY; /* no buffer :-( */
- }
- devpriv->dmapages[0] = pages;
- devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
- devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
+ for (i = 0; i < 2; i++) {
+ unsigned long dmabuf;
- devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[1]) {
- dev_err(dev->class_dev,
- "unable to allocate DMA buffer, FAIL!\n");
- return -EBUSY;
+ dmabuf = __get_dma_pages(GFP_KERNEL, devpriv->dmapages);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ devpriv->dmabuf[i] = dmabuf;
+ devpriv->hwdmaptr[i] = virt_to_bus((void *)dmabuf);
}
- devpriv->dmapages[1] = pages;
- devpriv->hwdmaptr[1] = virt_to_bus((void *)devpriv->dmabuf[1]);
- devpriv->hwdmasize[1] = (1 << pages) * PAGE_SIZE;
}
-no_dma:
-
-/* if (board->n_aochan > 0)
- subdevs[1] = COMEDI_SUBD_AO;
- if (board->n_dichan > 0)
- subdevs[2] = COMEDI_SUBD_DI;
- if (board->n_dochan > 0)
- subdevs[3] = COMEDI_SUBD_DO;
-*/
-
- ret = comedi_alloc_subdevices(dev, 1);
+ ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
- if (board->n_aichan > 0) {
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_CMD_READ | SDF_DIFF;
- s->n_chan = board->n_aichan;
- s->maxdata = board->ai_maxdata;
- s->range_table = board->ai_range_type;
- s->insn_read = pcl816_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = board->ai_chanlist;
- s->do_cmdtest = pcl816_ai_cmdtest;
- s->do_cmd = pcl816_ai_cmd;
- s->poll = pcl816_ai_poll;
- s->cancel = pcl816_ai_cancel;
- }
- } else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_CMD_READ | SDF_DIFF;
+ s->n_chan = 16;
+ s->maxdata = board->ai_maxdata;
+ s->range_table = &range_pcl816;
+ s->insn_read = pcl816_ai_insn_read;
+ if (devpriv->dma) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = board->ai_chanlist;
+ s->do_cmdtest = pcl816_ai_cmdtest;
+ s->do_cmd = pcl816_ai_cmd;
+ s->poll = pcl816_ai_poll;
+ s->cancel = pcl816_ai_cancel;
}
+ /* Analog OUtput subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_UNUSED;
#if 0
-case COMEDI_SUBD_AO:
+ subdevs[1] = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->n_aochan;
+ s->n_chan = 1;
s->maxdata = board->ao_maxdata;
- s->len_chanlist = board->ao_chanlist;
- s->range_table = board->ao_range_type;
- break;
-
-case COMEDI_SUBD_DI:
- s->subdev_flags = SDF_READABLE;
- s->n_chan = board->n_dichan;
- s->maxdata = 1;
- s->len_chanlist = board->n_dichan;
- s->range_table = &range_digital;
- break;
-
-case COMEDI_SUBD_DO:
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_dochan;
- s->maxdata = 1;
- s->len_chanlist = board->n_dochan;
- s->range_table = &range_digital;
- break;
+ s->range_table = &range_pcl816;
#endif
+ /* Digital Input subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl816_di_insn_bits;
+
+ /* Digital Output subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl816_do_insn_bits;
+
pcl816_reset(dev);
return 0;
@@ -1007,34 +793,13 @@ static void pcl816_detach(struct comedi_device *dev)
if (devpriv->dma)
free_dma(devpriv->dma);
if (devpriv->dmabuf[0])
- free_pages(devpriv->dmabuf[0], devpriv->dmapages[0]);
+ free_pages(devpriv->dmabuf[0], devpriv->dmapages);
if (devpriv->dmabuf[1])
- free_pages(devpriv->dmabuf[1], devpriv->dmapages[1]);
+ free_pages(devpriv->dmabuf[1], devpriv->dmapages);
}
comedi_legacy_detach(dev);
}
-static const struct pcl816_board boardtypes[] = {
- {"pcl816", 8, 16, 10000, 1, 16, 16, &range_pcl816,
- &range_pcl816, PCLx1x_RANGE,
- 0x00fc, /* IRQ mask */
- 0x0a, /* DMA mask */
- 0xffff, /* 16-bit card */
- 0xffff, /* D/A maxdata */
- 1024,
- 1, /* ao chan list */
- I8254_OSC_BASE_10MHZ},
- {"pcl814b", 8, 16, 10000, 1, 16, 16, &range_pcl816,
- &range_pcl816, PCLx1x_RANGE,
- 0x00fc,
- 0x0a,
- 0x3fff, /* 14 bit card */
- 0x3fff,
- 1024,
- 1,
- I8254_OSC_BASE_10MHZ},
-};
-
static struct comedi_driver pcl816_driver = {
.driver_name = "pcl816",
.module = THIS_MODULE,
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index fa1758ad49d5..6463476ce45a 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -110,8 +110,6 @@ A word or two about DMA. Driver support DMA operations at two ways:
#include "comedi_fc.h"
#include "8253.h"
-/* #define PCL818_MODE13_AO 1 */
-
/* boards constants */
#define boardPCL818L 0
@@ -121,46 +119,38 @@ A word or two about DMA. Driver support DMA operations at two ways:
#define boardPCL818 4
#define boardPCL718 5
-/* IO space len */
-#define PCLx1x_RANGE 16
-/* IO space len if we use FIFO */
-#define PCLx1xFIFO_RANGE 32
-
-/* W: clear INT request */
-#define PCL818_CLRINT 8
-/* R: return status byte */
-#define PCL818_STATUS 8
-/* R: A/D high byte W: A/D range control */
-#define PCL818_RANGE 1
-/* R: next mux scan channel W: mux scan channel & range control pointer */
-#define PCL818_MUX 2
-/* R/W: operation control register */
-#define PCL818_CONTROL 9
-/* W: counter enable */
-#define PCL818_CNTENABLE 10
-
-/* R: low byte of A/D W: soft A/D trigger */
-#define PCL818_AD_LO 0
-/* R: high byte of A/D W: A/D range control */
-#define PCL818_AD_HI 1
-/* W: D/A low&high byte */
-#define PCL818_DA_LO 4
-#define PCL818_DA_HI 5
-/* R: low&high byte of DI */
-#define PCL818_DI_LO 3
-#define PCL818_DI_HI 11
-/* W: low&high byte of DO */
-#define PCL818_DO_LO 3
-#define PCL818_DO_HI 11
-/* W: PCL718 second D/A */
-#define PCL718_DA2_LO 6
-#define PCL718_DA2_HI 7
-/* counters */
-#define PCL818_CTR0 12
-#define PCL818_CTR1 13
-#define PCL818_CTR2 14
-/* W: counter control */
-#define PCL818_CTRCTL 15
+/*
+ * Register I/O map
+ */
+#define PCL818_AI_LSB_REG 0x00
+#define PCL818_AI_MSB_REG 0x01
+#define PCL818_RANGE_REG 0x01
+#define PCL818_MUX_REG 0x02
+#define PCL818_MUX_SCAN(_first, _last) (((_last) << 4) | (_first))
+#define PCL818_DO_DI_LSB_REG 0x03
+#define PCL818_AO_LSB_REG(x) (0x04 + ((x) * 2))
+#define PCL818_AO_MSB_REG(x) (0x05 + ((x) * 2))
+#define PCL818_STATUS_REG 0x08
+#define PCL818_STATUS_NEXT_CHAN_MASK (0xf << 0)
+#define PCL818_STATUS_INT (1 << 4)
+#define PCL818_STATUS_MUX (1 << 5)
+#define PCL818_STATUS_UNI (1 << 6)
+#define PCL818_STATUS_EOC (1 << 7)
+#define PCL818_CTRL_REG 0x09
+#define PCL818_CTRL_DISABLE_TRIG (0 << 0)
+#define PCL818_CTRL_SOFT_TRIG (1 << 0)
+#define PCL818_CTRL_EXT_TRIG (2 << 0)
+#define PCL818_CTRL_PACER_TRIG (3 << 0)
+#define PCL818_CTRL_DMAE (1 << 2)
+#define PCL818_CTRL_IRQ(x) ((x) << 4)
+#define PCL818_CTRL_INTE (1 << 7)
+#define PCL818_CNTENABLE_REG 0x0a
+#define PCL818_CNTENABLE_PACER_ENA (0 << 0)
+#define PCL818_CNTENABLE_PACER_TRIG0 (1 << 0)
+#define PCL818_CNTENABLE_CNT0_EXT_CLK (0 << 1)
+#define PCL818_CNTENABLE_CNT0_INT_CLK (1 << 1)
+#define PCL818_DO_DI_MSB_REG 0x0b
+#define PCL818_TIMER_BASE 0x0c
/* W: fifo enable/disable */
#define PCL818_FI_ENABLE 6
@@ -172,19 +162,7 @@ A word or two about DMA. Driver support DMA operations at two ways:
#define PCL818_FI_STATUS 25
/* R: one record from FIFO */
#define PCL818_FI_DATALO 23
-#define PCL818_FI_DATAHI 23
-
-/* type of interrupt handler */
-#define INT_TYPE_AI1_INT 1
-#define INT_TYPE_AI1_DMA 2
-#define INT_TYPE_AI1_FIFO 3
-#define INT_TYPE_AI3_INT 4
-#define INT_TYPE_AI3_DMA 5
-#define INT_TYPE_AI3_FIFO 6
-#ifdef PCL818_MODE13_AO
-#define INT_TYPE_AO1_INT 7
-#define INT_TYPE_AO3_INT 8
-#endif
+#define PCL818_FI_DATAHI 24
#define MAGIC_DMA_WORD 0x5a5a
@@ -262,630 +240,439 @@ static const struct comedi_lrange range718_unipolar1 = {
};
struct pcl818_board {
+ const char *name;
+ unsigned int ns_min;
+ int n_aochan;
+ const struct comedi_lrange *ai_range_type;
+ unsigned int has_dma:1;
+ unsigned int has_fifo:1;
+ unsigned int is_818:1;
+};
- const char *name; /* driver name */
- int n_ranges; /* len of range list */
- int n_aichan_se; /* num of A/D chans in single ended mode */
- int n_aichan_diff; /* num of A/D chans in diferencial mode */
- unsigned int ns_min; /* minimal allowed delay between samples (in ns) */
- int n_aochan; /* num of D/A chans */
- int n_dichan; /* num of DI chans */
- int n_dochan; /* num of DO chans */
- const struct comedi_lrange *ai_range_type; /* default A/D rangelist */
- const struct comedi_lrange *ao_range_type; /* default D/A rangelist */
- unsigned int io_range; /* len of IO space */
- unsigned int IRQbits; /* allowed interrupts */
- unsigned int DMAbits; /* allowed DMA chans */
- int ai_maxdata; /* maxdata for A/D */
- int ao_maxdata; /* maxdata for D/A */
- unsigned char fifo; /* 1=board has FIFO */
- int is_818;
+static const struct pcl818_board boardtypes[] = {
+ {
+ .name = "pcl818l",
+ .ns_min = 25000,
+ .n_aochan = 1,
+ .ai_range_type = &range_pcl818l_l_ai,
+ .has_dma = 1,
+ .is_818 = 1,
+ }, {
+ .name = "pcl818h",
+ .ns_min = 10000,
+ .n_aochan = 1,
+ .ai_range_type = &range_pcl818h_ai,
+ .has_dma = 1,
+ .is_818 = 1,
+ }, {
+ .name = "pcl818hd",
+ .ns_min = 10000,
+ .n_aochan = 1,
+ .ai_range_type = &range_pcl818h_ai,
+ .has_dma = 1,
+ .has_fifo = 1,
+ .is_818 = 1,
+ }, {
+ .name = "pcl818hg",
+ .ns_min = 10000,
+ .n_aochan = 1,
+ .ai_range_type = &range_pcl818hg_ai,
+ .has_dma = 1,
+ .has_fifo = 1,
+ .is_818 = 1,
+ }, {
+ .name = "pcl818",
+ .ns_min = 10000,
+ .n_aochan = 2,
+ .ai_range_type = &range_pcl818h_ai,
+ .has_dma = 1,
+ .is_818 = 1,
+ }, {
+ .name = "pcl718",
+ .ns_min = 16000,
+ .n_aochan = 2,
+ .ai_range_type = &range_unipolar5,
+ .has_dma = 1,
+ }, {
+ .name = "pcm3718",
+ .ns_min = 10000,
+ .ai_range_type = &range_pcl818h_ai,
+ .has_dma = 1,
+ .is_818 = 1,
+ },
};
struct pcl818_private {
-
unsigned int dma; /* used DMA, 0=don't use DMA */
- unsigned int io_range;
+ unsigned int dmapages;
+ unsigned int hwdmasize;
unsigned long dmabuf[2]; /* pointers to begin of DMA buffers */
- unsigned int dmapages[2]; /* len of DMA buffers in PAGE_SIZEs */
unsigned int hwdmaptr[2]; /* hardware address of DMA buffers */
- unsigned int hwdmasize[2]; /* len of DMA buffers in Bytes */
int next_dma_buf; /* which DMA buffer will be used next round */
long dma_runs_to_end; /* how many we must permorm DMA transfer to end of record */
unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
- unsigned char neverending_ai; /* if=1, then we do neverending record (you must use cancel()) */
unsigned int ns_min; /* manimal allowed delay between samples (in us) for actual card */
int i8253_osc_base; /* 1/frequency of on board oscilator in ns */
- int irq_blocked; /* 1=IRQ now uses any subdev */
- int irq_was_now_closed; /* when IRQ finish, there's stored int818_mode for last interrupt */
- int ai_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */
- struct comedi_subdevice *last_int_sub; /* ptr to subdevice which now finish */
int ai_act_scan; /* how many scans we finished */
int ai_act_chan; /* actual position in actual scan */
unsigned int act_chanlist[16]; /* MUX setting for actual AI operations */
unsigned int act_chanlist_len; /* how long is actual MUX list */
unsigned int act_chanlist_pos; /* actual position in MUX list */
- unsigned int ai_scans; /* len of scanlist */
- unsigned int ai_n_chan; /* how many channels is measured */
- unsigned int *ai_chanlist; /* actaul chanlist */
- unsigned int ai_flags; /* flaglist */
unsigned int ai_data_len; /* len of data buffer */
- unsigned int ai_timer1; /* timers */
- unsigned int ai_timer2;
- unsigned char usefifo; /* 1=use fifo */
unsigned int ao_readback[2];
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int usefifo:1;
+ unsigned int ai_cmd_running:1;
+ unsigned int ai_cmd_canceled:1;
};
-static const unsigned int muxonechan[] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, /* used for gain list programming */
- 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff
-};
-
-/*
-==============================================================================
-*/
-static void setup_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int n_chan,
- unsigned int seglen);
-static int check_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int n_chan);
-
-static int pcl818_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s);
-static void start_pacer(struct comedi_device *dev, int mode,
- unsigned int divisor1, unsigned int divisor2);
-
-/*
-==============================================================================
- ANALOG INPUT MODE0, 818 cards, slow version
-*/
-static int pcl818_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl818_start_pacer(struct comedi_device *dev, bool load_counters)
{
- int n;
- int timeout;
+ struct pcl818_private *devpriv = dev->private;
+ unsigned long timer_base = dev->iobase + PCL818_TIMER_BASE;
- /* software trigger, DMA and INT off */
- outb(0, dev->iobase + PCL818_CONTROL);
+ i8254_set_mode(timer_base, 0, 2, I8254_MODE2 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 1, I8254_MODE2 | I8254_BINARY);
+ udelay(1);
- /* select channel */
- outb(muxonechan[CR_CHAN(insn->chanspec)], dev->iobase + PCL818_MUX);
+ if (load_counters) {
+ i8254_write(timer_base, 0, 2, devpriv->divisor2);
+ i8254_write(timer_base, 0, 1, devpriv->divisor1);
+ }
+}
- /* select gain */
- outb(CR_RANGE(insn->chanspec), dev->iobase + PCL818_RANGE);
+static void pcl818_ai_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pcl818_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int flags;
+ unsigned int bytes;
- for (n = 0; n < insn->n; n++) {
+ disable_dma(devpriv->dma); /* disable dma */
+ bytes = devpriv->hwdmasize;
+ if (cmd->stop_src == TRIG_COUNT) {
+ bytes = cmd->chanlist_len * cmd->stop_arg * sizeof(short);
+ devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize;
+ devpriv->last_dma_run = bytes % devpriv->hwdmasize;
+ devpriv->dma_runs_to_end--;
+ if (devpriv->dma_runs_to_end >= 0)
+ bytes = devpriv->hwdmasize;
+ }
- /* clear INT (conversion end) flag */
- outb(0, dev->iobase + PCL818_CLRINT);
+ devpriv->next_dma_buf = 0;
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ flags = claim_dma_lock();
+ clear_dma_ff(devpriv->dma);
+ set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
+ set_dma_count(devpriv->dma, bytes);
+ release_dma_lock(flags);
+ enable_dma(devpriv->dma);
+}
- /* start conversion */
- outb(0, dev->iobase + PCL818_AD_LO);
+static void pcl818_ai_setup_next_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pcl818_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned long flags;
- timeout = 100;
- while (timeout--) {
- if (inb(dev->iobase + PCL818_STATUS) & 0x10)
- goto conv_finish;
- udelay(1);
- }
- comedi_error(dev, "A/D insn timeout");
- /* clear INT (conversion end) flag */
- outb(0, dev->iobase + PCL818_CLRINT);
- return -EIO;
-
-conv_finish:
- data[n] = ((inb(dev->iobase + PCL818_AD_HI) << 4) |
- (inb(dev->iobase + PCL818_AD_LO) >> 4));
+ disable_dma(devpriv->dma);
+ devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
+ if (devpriv->dma_runs_to_end > -1 || cmd->stop_src == TRIG_NONE) {
+ /* switch dma bufs */
+ set_dma_mode(devpriv->dma, DMA_MODE_READ);
+ flags = claim_dma_lock();
+ set_dma_addr(devpriv->dma,
+ devpriv->hwdmaptr[devpriv->next_dma_buf]);
+ if (devpriv->dma_runs_to_end || cmd->stop_src == TRIG_NONE)
+ set_dma_count(devpriv->dma, devpriv->hwdmasize);
+ else
+ set_dma_count(devpriv->dma, devpriv->last_dma_run);
+ release_dma_lock(flags);
+ enable_dma(devpriv->dma);
}
- return n;
+ devpriv->dma_runs_to_end--;
}
-/*
-==============================================================================
- ANALOG OUTPUT MODE0, 818 cards
- only one sample per call is supported
-*/
-static int pcl818_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl818_ai_set_chan_range(struct comedi_device *dev,
+ unsigned int chan,
+ unsigned int range)
{
- struct pcl818_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec);
-
- for (n = 0; n < insn->n; n++)
- data[n] = devpriv->ao_readback[chan];
+ outb(chan, dev->iobase + PCL818_MUX_REG);
+ outb(range, dev->iobase + PCL818_RANGE_REG);
+}
- return n;
+static void pcl818_ai_set_chan_scan(struct comedi_device *dev,
+ unsigned int first_chan,
+ unsigned int last_chan)
+{
+ outb(PCL818_MUX_SCAN(first_chan, last_chan),
+ dev->iobase + PCL818_MUX_REG);
}
-static int pcl818_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl818_ai_setup_chanlist(struct comedi_device *dev,
+ unsigned int *chanlist,
+ unsigned int seglen)
{
struct pcl818_private *devpriv = dev->private;
- int n;
- int chan = CR_CHAN(insn->chanspec);
-
- for (n = 0; n < insn->n; n++) {
- devpriv->ao_readback[chan] = data[n];
- outb((data[n] & 0x000f) << 4, dev->iobase +
- (chan ? PCL718_DA2_LO : PCL818_DA_LO));
- outb((data[n] & 0x0ff0) >> 4, dev->iobase +
- (chan ? PCL718_DA2_HI : PCL818_DA_HI));
+ unsigned int first_chan = CR_CHAN(chanlist[0]);
+ unsigned int last_chan;
+ unsigned int range;
+ int i;
+
+ devpriv->act_chanlist_len = seglen;
+ devpriv->act_chanlist_pos = 0;
+
+ /* store range list to card */
+ for (i = 0; i < seglen; i++) {
+ last_chan = CR_CHAN(chanlist[i]);
+ range = CR_RANGE(chanlist[i]);
+
+ devpriv->act_chanlist[i] = last_chan;
+
+ pcl818_ai_set_chan_range(dev, last_chan, range);
}
- return n;
+ udelay(1);
+
+ pcl818_ai_set_chan_scan(dev, first_chan, last_chan);
}
-/*
-==============================================================================
- DIGITAL INPUT MODE0, 818 cards
+static void pcl818_ai_clear_eoc(struct comedi_device *dev)
+{
+ /* writing any value clears the interrupt request */
+ outb(0, dev->iobase + PCL818_STATUS_REG);
+}
- only one sample per call is supported
-*/
-static int pcl818_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void pcl818_ai_soft_trig(struct comedi_device *dev)
{
- data[1] = inb(dev->iobase + PCL818_DI_LO) |
- (inb(dev->iobase + PCL818_DI_HI) << 8);
+ /* writing any value triggers a software conversion */
+ outb(0, dev->iobase + PCL818_AI_LSB_REG);
+}
- return insn->n;
+static unsigned int pcl818_ai_get_fifo_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int *chan)
+{
+ unsigned int val;
+
+ val = inb(dev->iobase + PCL818_FI_DATALO);
+ val |= (inb(dev->iobase + PCL818_FI_DATAHI) << 8);
+
+ if (chan)
+ *chan = val & 0xf;
+
+ return (val >> 4) & s->maxdata;
}
-static int pcl818_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static unsigned int pcl818_ai_get_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int *chan)
{
- if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase + PCL818_DO_LO);
- outb((s->state >> 8), dev->iobase + PCL818_DO_HI);
- }
+ unsigned int val;
- data[1] = s->state;
+ val = inb(dev->iobase + PCL818_AI_MSB_REG) << 8;
+ val |= inb(dev->iobase + PCL818_AI_LSB_REG);
- return insn->n;
+ if (chan)
+ *chan = val & 0xf;
+
+ return (val >> 4) & s->maxdata;
}
-/*
-==============================================================================
- analog input interrupt mode 1 & 3, 818 cards
- one sample per interrupt version
-*/
-static irqreturn_t interrupt_pcl818_ai_mode13_int(int irq, void *d)
+static int pcl818_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- struct comedi_device *dev = d;
- struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned char low;
- int timeout = 50; /* wait max 50us */
+ unsigned int status;
- while (timeout--) {
- if (inb(dev->iobase + PCL818_STATUS) & 0x10)
- goto conv_finish;
- udelay(1);
- }
- outb(0, dev->iobase + PCL818_STATUS); /* clear INT request */
- comedi_error(dev, "A/D mode1/3 IRQ without DRDY!");
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ status = inb(dev->iobase + PCL818_STATUS_REG);
+ if (status & PCL818_STATUS_INT)
+ return 0;
+ return -EBUSY;
+}
-conv_finish:
- low = inb(dev->iobase + PCL818_AD_LO);
- comedi_buf_put(s->async, ((inb(dev->iobase + PCL818_AD_HI) << 4) | (low >> 4))); /* get one sample */
- outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
+static bool pcl818_ai_dropout(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chan)
+{
+ struct pcl818_private *devpriv = dev->private;
+ unsigned int expected_chan;
- if ((low & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
+ expected_chan = devpriv->act_chanlist[devpriv->act_chanlist_pos];
+ if (chan != expected_chan) {
dev_dbg(dev->class_dev,
- "A/D mode1/3 IRQ - channel dropout %x!=%x !\n",
- (low & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos]);
- pcl818_ai_cancel(dev, s);
+ "A/D mode1/3 %s - channel dropout %d!=%d !\n",
+ (devpriv->dma) ? "DMA" :
+ (devpriv->usefifo) ? "FIFO" : "IRQ",
+ chan, expected_chan);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ return true;
}
+ return false;
+}
+
+static bool pcl818_ai_next_chan(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct pcl818_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+
+ s->async->events |= COMEDI_CB_BLOCK;
+
devpriv->act_chanlist_pos++;
if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
+ if (s->async->cur_chan >= cmd->chanlist_len) {
s->async->cur_chan = 0;
devpriv->ai_act_scan--;
+ s->async->events |= COMEDI_CB_EOS;
}
- if (!devpriv->neverending_ai) {
- if (devpriv->ai_act_scan == 0) { /* all data sampled */
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- }
+ if (cmd->stop_src == TRIG_COUNT && devpriv->ai_act_scan == 0) {
+ /* all data sampled */
+ s->async->events |= COMEDI_CB_EOA;
+ return false;
}
- comedi_event(dev, s);
- return IRQ_HANDLED;
+
+ return true;
}
-/*
-==============================================================================
- analog input dma mode 1 & 3, 818 cards
-*/
-static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
+static void pcl818_handle_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int chan;
+ unsigned int val;
+
+ if (pcl818_ai_eoc(dev, s, NULL, 0)) {
+ comedi_error(dev, "A/D mode1/3 IRQ without DRDY!");
+ s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
+ return;
+ }
+
+ val = pcl818_ai_get_sample(dev, s, &chan);
+
+ if (pcl818_ai_dropout(dev, s, chan))
+ return;
+
+ comedi_buf_put(s->async, val);
+
+ pcl818_ai_next_chan(dev, s);
+}
+
+static void pcl818_handle_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- int i, len, bufptr;
- unsigned long flags;
unsigned short *ptr;
+ unsigned int chan;
+ unsigned int val;
+ int i, len, bufptr;
- disable_dma(devpriv->dma);
- devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
- if ((devpriv->dma_runs_to_end) > -1 || devpriv->neverending_ai) { /* switch dma bufs */
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- flags = claim_dma_lock();
- set_dma_addr(devpriv->dma,
- devpriv->hwdmaptr[devpriv->next_dma_buf]);
- if (devpriv->dma_runs_to_end || devpriv->neverending_ai) {
- set_dma_count(devpriv->dma,
- devpriv->hwdmasize[devpriv->
- next_dma_buf]);
- } else {
- set_dma_count(devpriv->dma, devpriv->last_dma_run);
- }
- release_dma_lock(flags);
- enable_dma(devpriv->dma);
- }
+ pcl818_ai_setup_next_dma(dev, s);
- devpriv->dma_runs_to_end--;
- outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
ptr = (unsigned short *)devpriv->dmabuf[1 - devpriv->next_dma_buf];
- len = devpriv->hwdmasize[0] >> 1;
+ len = devpriv->hwdmasize >> 1;
bufptr = 0;
for (i = 0; i < len; i++) {
- if ((ptr[bufptr] & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
- dev_dbg(dev->class_dev,
- "A/D mode1/3 DMA - channel dropout %d(card)!=%d(chanlist) at %d !\n",
- (ptr[bufptr] & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos],
- devpriv->act_chanlist_pos);
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
- }
+ val = ptr[bufptr++];
+ chan = val & 0xf;
+ val = (val >> 4) & s->maxdata;
- comedi_buf_put(s->async, ptr[bufptr++] >> 4); /* get one sample */
+ if (pcl818_ai_dropout(dev, s, chan))
+ break;
- devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
- devpriv->act_chanlist_pos = 0;
+ comedi_buf_put(s->async, val);
- s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan--;
- }
-
- if (!devpriv->neverending_ai)
- if (devpriv->ai_act_scan == 0) { /* all data sampled */
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return IRQ_HANDLED;
- }
+ if (!pcl818_ai_next_chan(dev, s))
+ break;
}
-
- if (len > 0)
- comedi_event(dev, s);
- return IRQ_HANDLED;
}
-/*
-==============================================================================
- analog input interrupt mode 1 & 3, 818HD/HG cards
-*/
-static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
+static void pcl818_handle_fifo(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct comedi_device *dev = d;
- struct pcl818_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
+ unsigned int status;
+ unsigned int chan;
+ unsigned int val;
int i, len;
- unsigned char lo;
- outb(0, dev->iobase + PCL818_FI_INTCLR); /* clear fifo int request */
+ status = inb(dev->iobase + PCL818_FI_STATUS);
- lo = inb(dev->iobase + PCL818_FI_STATUS);
-
- if (lo & 4) {
+ if (status & 4) {
comedi_error(dev, "A/D mode1/3 FIFO overflow!");
- pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ return;
}
- if (lo & 1) {
+ if (status & 1) {
comedi_error(dev, "A/D mode1/3 FIFO interrupt without data!");
- pcl818_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
+ return;
}
- if (lo & 2)
+ if (status & 2)
len = 512;
else
len = 0;
for (i = 0; i < len; i++) {
- lo = inb(dev->iobase + PCL818_FI_DATALO);
- if ((lo & 0xf) != devpriv->act_chanlist[devpriv->act_chanlist_pos]) { /* dropout! */
- dev_dbg(dev->class_dev,
- "A/D mode1/3 FIFO - channel dropout %d!=%d !\n",
- (lo & 0xf),
- devpriv->act_chanlist[devpriv->act_chanlist_pos]);
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- comedi_event(dev, s);
- return IRQ_HANDLED;
- }
+ val = pcl818_ai_get_fifo_sample(dev, s, &chan);
- comedi_buf_put(s->async, (lo >> 4) | (inb(dev->iobase + PCL818_FI_DATAHI) << 4)); /* get one sample */
-
- devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
- devpriv->act_chanlist_pos = 0;
+ if (pcl818_ai_dropout(dev, s, chan))
+ break;
- s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan--;
- }
+ comedi_buf_put(s->async, val);
- if (!devpriv->neverending_ai)
- if (devpriv->ai_act_scan == 0) { /* all data sampled */
- pcl818_ai_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return IRQ_HANDLED;
- }
+ if (!pcl818_ai_next_chan(dev, s))
+ break;
}
-
- if (len > 0)
- comedi_event(dev, s);
- return IRQ_HANDLED;
}
-/*
-==============================================================================
- INT procedure
-*/
-static irqreturn_t interrupt_pcl818(int irq, void *d)
+static irqreturn_t pcl818_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
- if (!dev->attached) {
- comedi_error(dev, "premature interrupt");
+ if (!dev->attached || !devpriv->ai_cmd_running) {
+ pcl818_ai_clear_eoc(dev);
return IRQ_HANDLED;
}
- if (devpriv->irq_blocked && devpriv->irq_was_now_closed) {
- if ((devpriv->neverending_ai || (!devpriv->neverending_ai &&
- devpriv->ai_act_scan > 0)) &&
- (devpriv->ai_mode == INT_TYPE_AI1_DMA ||
- devpriv->ai_mode == INT_TYPE_AI3_DMA)) {
- /* The cleanup from ai_cancel() has been delayed
- until now because the card doesn't seem to like
- being reprogrammed while a DMA transfer is in
- progress.
- */
- devpriv->ai_act_scan = 0;
- devpriv->neverending_ai = 0;
- pcl818_ai_cancel(dev, dev->read_subdev);
- }
-
- outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
-
+ if (devpriv->ai_cmd_canceled) {
+ /*
+ * The cleanup from ai_cancel() has been delayed
+ * until now because the card doesn't seem to like
+ * being reprogrammed while a DMA transfer is in
+ * progress.
+ */
+ devpriv->ai_act_scan = 0;
+ s->cancel(dev, s);
return IRQ_HANDLED;
}
- switch (devpriv->ai_mode) {
- case INT_TYPE_AI1_DMA:
- case INT_TYPE_AI3_DMA:
- return interrupt_pcl818_ai_mode13_dma(irq, d);
- case INT_TYPE_AI1_INT:
- case INT_TYPE_AI3_INT:
- return interrupt_pcl818_ai_mode13_int(irq, d);
- case INT_TYPE_AI1_FIFO:
- case INT_TYPE_AI3_FIFO:
- return interrupt_pcl818_ai_mode13_fifo(irq, d);
-#ifdef PCL818_MODE13_AO
- case INT_TYPE_AO1_INT:
- case INT_TYPE_AO3_INT:
- return interrupt_pcl818_ao_mode13_int(irq, d);
-#endif
- default:
- break;
- }
-
- outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
-
- if (!devpriv->irq_blocked || !devpriv->ai_mode) {
- comedi_error(dev, "bad IRQ!");
- return IRQ_NONE;
- }
-
- comedi_error(dev, "IRQ from unknown source!");
- return IRQ_NONE;
-}
-
-/*
-==============================================================================
- ANALOG INPUT MODE 1 or 3 DMA , 818 cards
-*/
-static void pcl818_ai_mode13dma_int(int mode, struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl818_private *devpriv = dev->private;
- unsigned int flags;
- unsigned int bytes;
-
- disable_dma(devpriv->dma); /* disable dma */
- bytes = devpriv->hwdmasize[0];
- if (!devpriv->neverending_ai) {
- bytes = devpriv->ai_n_chan * devpriv->ai_scans * sizeof(short); /* how many */
- devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize[0]; /* how many DMA pages we must fiil */
- devpriv->last_dma_run = bytes % devpriv->hwdmasize[0]; /* on last dma transfer must be moved */
- devpriv->dma_runs_to_end--;
- if (devpriv->dma_runs_to_end >= 0)
- bytes = devpriv->hwdmasize[0];
- }
-
- devpriv->next_dma_buf = 0;
- set_dma_mode(devpriv->dma, DMA_MODE_READ);
- flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma);
- set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
- set_dma_count(devpriv->dma, bytes);
- release_dma_lock(flags);
- enable_dma(devpriv->dma);
-
- if (mode == 1) {
- devpriv->ai_mode = INT_TYPE_AI1_DMA;
- outb(0x87 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Pacer+IRQ+DMA */
- } else {
- devpriv->ai_mode = INT_TYPE_AI3_DMA;
- outb(0x86 | (dev->irq << 4), dev->iobase + PCL818_CONTROL); /* Ext trig+IRQ+DMA */
- }
-}
-
-/*
-==============================================================================
- ANALOG INPUT MODE 1 or 3, 818 cards
-*/
-static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pcl818_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int divisor1 = 0, divisor2 = 0;
- unsigned int seglen;
-
- if (devpriv->irq_blocked)
- return -EBUSY;
-
- start_pacer(dev, -1, 0, 0); /* stop pacer */
-
- seglen = check_channel_list(dev, s, devpriv->ai_chanlist,
- devpriv->ai_n_chan);
- if (seglen < 1)
- return -EINVAL;
- setup_channel_list(dev, s, devpriv->ai_chanlist,
- devpriv->ai_n_chan, seglen);
-
- udelay(1);
-
- devpriv->ai_act_scan = devpriv->ai_scans;
- devpriv->ai_act_chan = 0;
- devpriv->irq_blocked = 1;
- devpriv->irq_was_now_closed = 0;
- devpriv->neverending_ai = 0;
- devpriv->act_chanlist_pos = 0;
- devpriv->dma_runs_to_end = 0;
-
- if ((devpriv->ai_scans == 0) || (devpriv->ai_scans == -1))
- devpriv->neverending_ai = 1; /* well, user want neverending */
-
- if (mode == 1) {
- i8253_cascade_ns_to_timer(devpriv->i8253_osc_base,
- &divisor1, &divisor2,
- &cmd->convert_arg,
- TRIG_ROUND_NEAREST);
- if (divisor1 == 1) { /* PCL718/818 crash if any divisor is set to 1 */
- divisor1 = 2;
- divisor2 /= 2;
- }
- if (divisor2 == 1) {
- divisor2 = 2;
- divisor1 /= 2;
- }
- }
-
- outb(0, dev->iobase + PCL818_CNTENABLE); /* enable pacer */
-
- switch (devpriv->dma) {
- case 1: /* DMA */
- case 3:
- pcl818_ai_mode13dma_int(mode, dev, s);
- break;
- case 0:
- if (!devpriv->usefifo) {
- /* IRQ */
- if (mode == 1) {
- devpriv->ai_mode = INT_TYPE_AI1_INT;
- /* Pacer+IRQ */
- outb(0x83 | (dev->irq << 4),
- dev->iobase + PCL818_CONTROL);
- } else {
- devpriv->ai_mode = INT_TYPE_AI3_INT;
- /* Ext trig+IRQ */
- outb(0x82 | (dev->irq << 4),
- dev->iobase + PCL818_CONTROL);
- }
- } else {
- /* FIFO */
- /* enable FIFO */
- outb(1, dev->iobase + PCL818_FI_ENABLE);
- if (mode == 1) {
- devpriv->ai_mode = INT_TYPE_AI1_FIFO;
- /* Pacer */
- outb(0x03, dev->iobase + PCL818_CONTROL);
- } else {
- devpriv->ai_mode = INT_TYPE_AI3_FIFO;
- outb(0x02, dev->iobase + PCL818_CONTROL);
- }
- }
- }
-
- start_pacer(dev, mode, divisor1, divisor2);
-
- return 0;
-}
+ if (devpriv->dma)
+ pcl818_handle_dma(dev, s);
+ else if (devpriv->usefifo)
+ pcl818_handle_fifo(dev, s);
+ else
+ pcl818_handle_eoc(dev, s);
-/*
-==============================================================================
- Start/stop pacer onboard pacer
-*/
-static void start_pacer(struct comedi_device *dev, int mode,
- unsigned int divisor1, unsigned int divisor2)
-{
- outb(0xb4, dev->iobase + PCL818_CTRCTL);
- outb(0x74, dev->iobase + PCL818_CTRCTL);
- udelay(1);
+ pcl818_ai_clear_eoc(dev);
- if (mode == 1) {
- outb(divisor2 & 0xff, dev->iobase + PCL818_CTR2);
- outb((divisor2 >> 8) & 0xff, dev->iobase + PCL818_CTR2);
- outb(divisor1 & 0xff, dev->iobase + PCL818_CTR1);
- outb((divisor1 >> 8) & 0xff, dev->iobase + PCL818_CTR1);
- }
+ cfc_handle_events(dev, s);
+ return IRQ_HANDLED;
}
-/*
-==============================================================================
- Check if channel list from user is builded correctly
- If it's ok, then program scan/gain logic
-*/
static int check_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int *chanlist, unsigned int n_chan)
@@ -941,52 +728,20 @@ static int check_channel_list(struct comedi_device *dev,
return seglen;
}
-static void setup_channel_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int *chanlist, unsigned int n_chan,
- unsigned int seglen)
-{
- struct pcl818_private *devpriv = dev->private;
- int i;
-
- devpriv->act_chanlist_len = seglen;
- devpriv->act_chanlist_pos = 0;
-
- for (i = 0; i < seglen; i++) { /* store range list to card */
- devpriv->act_chanlist[i] = CR_CHAN(chanlist[i]);
- outb(muxonechan[CR_CHAN(chanlist[i])], dev->iobase + PCL818_MUX); /* select channel */
- outb(CR_RANGE(chanlist[i]), dev->iobase + PCL818_RANGE); /* select gain */
- }
-
- udelay(1);
-
- /* select channel interval to scan */
- outb(devpriv->act_chanlist[0] | (devpriv->act_chanlist[seglen -
- 1] << 4),
- dev->iobase + PCL818_MUX);
-}
-
-/*
-==============================================================================
- Check if board is switched to SE (1) or DIFF(0) mode
-*/
static int check_single_ended(unsigned int port)
{
- if (inb(port + PCL818_STATUS) & 0x20)
+ if (inb(port + PCL818_STATUS_REG) & PCL818_STATUS_MUX)
return 1;
return 0;
}
-/*
-==============================================================================
-*/
static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
const struct pcl818_board *board = comedi_board(dev);
struct pcl818_private *devpriv = dev->private;
int err = 0;
- int tmp, divisor1 = 0, divisor2 = 0;
+ int tmp;
/* Step 1 : check if triggers are trivially valid */
@@ -1035,7 +790,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
i8253_cascade_ns_to_timer(devpriv->i8253_osc_base,
- &divisor1, &divisor2,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
&cmd->convert_arg, cmd->flags);
if (cmd->convert_arg < board->ns_min)
cmd->convert_arg = board->ns_min;
@@ -1057,152 +813,272 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
return 0;
}
-/*
-==============================================================================
-*/
-static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+static int pcl818_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct pcl818_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- int retval;
+ unsigned int ctrl = 0;
+ unsigned int seglen;
+
+ if (devpriv->ai_cmd_running)
+ return -EBUSY;
+
+ pcl818_start_pacer(dev, false);
+
+ seglen = check_channel_list(dev, s, cmd->chanlist, cmd->chanlist_len);
+ if (seglen < 1)
+ return -EINVAL;
+ pcl818_ai_setup_chanlist(dev, cmd->chanlist, seglen);
- devpriv->ai_n_chan = cmd->chanlist_len;
- devpriv->ai_chanlist = cmd->chanlist;
- devpriv->ai_flags = cmd->flags;
devpriv->ai_data_len = s->async->prealloc_bufsz;
- devpriv->ai_timer1 = 0;
- devpriv->ai_timer2 = 0;
+ devpriv->ai_act_scan = cmd->stop_arg;
+ devpriv->ai_act_chan = 0;
+ devpriv->ai_cmd_running = 1;
+ devpriv->ai_cmd_canceled = 0;
+ devpriv->act_chanlist_pos = 0;
+ devpriv->dma_runs_to_end = 0;
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ai_scans = cmd->stop_arg;
+ if (cmd->convert_src == TRIG_TIMER)
+ ctrl |= PCL818_CTRL_PACER_TRIG;
else
- devpriv->ai_scans = 0;
+ ctrl |= PCL818_CTRL_EXT_TRIG;
- if (cmd->scan_begin_src == TRIG_FOLLOW) { /* mode 1, 3 */
- if (cmd->convert_src == TRIG_TIMER) { /* mode 1 */
- devpriv->ai_timer1 = cmd->convert_arg;
- retval = pcl818_ai_cmd_mode(1, dev, s);
- return retval;
- }
- if (cmd->convert_src == TRIG_EXT) { /* mode 3 */
- return pcl818_ai_cmd_mode(3, dev, s);
- }
+ outb(PCL818_CNTENABLE_PACER_ENA, dev->iobase + PCL818_CNTENABLE_REG);
+
+ if (devpriv->dma) {
+ pcl818_ai_setup_dma(dev, s);
+
+ ctrl |= PCL818_CTRL_INTE | PCL818_CTRL_IRQ(dev->irq) |
+ PCL818_CTRL_DMAE;
+ } else if (devpriv->usefifo) {
+ /* enable FIFO */
+ outb(1, dev->iobase + PCL818_FI_ENABLE);
+ } else {
+ ctrl |= PCL818_CTRL_INTE | PCL818_CTRL_IRQ(dev->irq);
}
+ outb(ctrl, dev->iobase + PCL818_CTRL_REG);
+
+ if (cmd->convert_src == TRIG_TIMER)
+ pcl818_start_pacer(dev, true);
- return -1;
+ return 0;
}
-/*
-==============================================================================
- cancel any mode 1-4 AI
-*/
static int pcl818_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl818_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
- if (devpriv->irq_blocked > 0) {
- devpriv->irq_was_now_closed = 1;
-
- switch (devpriv->ai_mode) {
- case INT_TYPE_AI1_DMA:
- case INT_TYPE_AI3_DMA:
- if (devpriv->neverending_ai ||
- (!devpriv->neverending_ai &&
- devpriv->ai_act_scan > 0)) {
- /* wait for running dma transfer to end, do cleanup in interrupt */
- goto end;
- }
- disable_dma(devpriv->dma);
- case INT_TYPE_AI1_INT:
- case INT_TYPE_AI3_INT:
- case INT_TYPE_AI1_FIFO:
- case INT_TYPE_AI3_FIFO:
-#ifdef PCL818_MODE13_AO
- case INT_TYPE_AO1_INT:
- case INT_TYPE_AO3_INT:
-#endif
- outb(inb(dev->iobase + PCL818_CONTROL) & 0x73, dev->iobase + PCL818_CONTROL); /* Stop A/D */
- udelay(1);
- start_pacer(dev, -1, 0, 0);
- outb(0, dev->iobase + PCL818_AD_LO);
- inb(dev->iobase + PCL818_AD_LO);
- inb(dev->iobase + PCL818_AD_HI);
- outb(0, dev->iobase + PCL818_CLRINT); /* clear INT request */
- outb(0, dev->iobase + PCL818_CONTROL); /* Stop A/D */
- if (devpriv->usefifo) { /* FIFO shutdown */
- outb(0, dev->iobase + PCL818_FI_INTCLR);
- outb(0, dev->iobase + PCL818_FI_FLUSH);
- outb(0, dev->iobase + PCL818_FI_ENABLE);
+ if (!devpriv->ai_cmd_running)
+ return 0;
+
+ if (devpriv->dma) {
+ if (cmd->stop_src == TRIG_NONE ||
+ (cmd->stop_src == TRIG_COUNT && devpriv->ai_act_scan > 0)) {
+ if (!devpriv->ai_cmd_canceled) {
+ /*
+ * Wait for running dma transfer to end,
+ * do cleanup in interrupt.
+ */
+ devpriv->ai_cmd_canceled = 1;
+ return 0;
}
- devpriv->irq_blocked = 0;
- devpriv->last_int_sub = s;
- devpriv->neverending_ai = 0;
- devpriv->ai_mode = 0;
- devpriv->irq_was_now_closed = 0;
- break;
}
+ disable_dma(devpriv->dma);
+ }
+
+ outb(PCL818_CTRL_DISABLE_TRIG, dev->iobase + PCL818_CTRL_REG);
+ pcl818_start_pacer(dev, false);
+ pcl818_ai_clear_eoc(dev);
+
+ if (devpriv->usefifo) { /* FIFO shutdown */
+ outb(0, dev->iobase + PCL818_FI_INTCLR);
+ outb(0, dev->iobase + PCL818_FI_FLUSH);
+ outb(0, dev->iobase + PCL818_FI_ENABLE);
}
+ devpriv->ai_cmd_running = 0;
+ devpriv->ai_cmd_canceled = 0;
-end:
return 0;
}
-/*
-==============================================================================
- chech for PCL818
-*/
-static int pcl818_check(unsigned long iobase)
+static int pcl818_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- outb(0x00, iobase + PCL818_MUX);
- udelay(1);
- if (inb(iobase + PCL818_MUX) != 0x00)
- return 1; /* there isn't card */
- outb(0x55, iobase + PCL818_MUX);
- udelay(1);
- if (inb(iobase + PCL818_MUX) != 0x55)
- return 1; /* there isn't card */
- outb(0x00, iobase + PCL818_MUX);
- udelay(1);
- outb(0x18, iobase + PCL818_CONTROL);
- udelay(1);
- if (inb(iobase + PCL818_CONTROL) != 0x18)
- return 1; /* there isn't card */
- return 0; /* ok, card exist */
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ int ret = 0;
+ int i;
+
+ outb(PCL818_CTRL_SOFT_TRIG, dev->iobase + PCL818_CTRL_REG);
+
+ pcl818_ai_set_chan_range(dev, chan, range);
+ pcl818_ai_set_chan_scan(dev, chan, chan);
+
+ for (i = 0; i < insn->n; i++) {
+ pcl818_ai_clear_eoc(dev);
+ pcl818_ai_soft_trig(dev);
+
+ ret = comedi_timeout(dev, s, insn, pcl818_ai_eoc, 0);
+ if (ret)
+ break;
+
+ data[i] = pcl818_ai_get_sample(dev, s, NULL);
+ }
+ pcl818_ai_clear_eoc(dev);
+
+ return ret ? ret : insn->n;
+}
+
+static int pcl818_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct pcl818_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++) {
+ devpriv->ao_readback[chan] = data[i];
+ outb((data[i] & 0x000f) << 4,
+ dev->iobase + PCL818_AO_LSB_REG(chan));
+ outb((data[i] & 0x0ff0) >> 4,
+ dev->iobase + PCL818_AO_MSB_REG(chan));
+ }
+
+ return insn->n;
+}
+
+static int pcl818_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct pcl818_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_readback[chan];
+
+ return insn->n;
+}
+
+static int pcl818_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = inb(dev->iobase + PCL818_DO_DI_LSB_REG) |
+ (inb(dev->iobase + PCL818_DO_DI_MSB_REG) << 8);
+
+ return insn->n;
+}
+
+static int pcl818_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ if (comedi_dio_update_state(s, data)) {
+ outb(s->state & 0xff, dev->iobase + PCL818_DO_DI_LSB_REG);
+ outb((s->state >> 8), dev->iobase + PCL818_DO_DI_MSB_REG);
+ }
+
+ data[1] = s->state;
+
+ return insn->n;
}
-/*
-==============================================================================
- reset whole PCL-818 cards
-*/
static void pcl818_reset(struct comedi_device *dev)
{
const struct pcl818_board *board = comedi_board(dev);
- struct pcl818_private *devpriv = dev->private;
+ unsigned long timer_base = dev->iobase + PCL818_TIMER_BASE;
+ unsigned int chan;
- if (devpriv->usefifo) { /* FIFO shutdown */
+ /* flush and disable the FIFO */
+ if (board->has_fifo) {
outb(0, dev->iobase + PCL818_FI_INTCLR);
outb(0, dev->iobase + PCL818_FI_FLUSH);
outb(0, dev->iobase + PCL818_FI_ENABLE);
}
- outb(0, dev->iobase + PCL818_DA_LO); /* DAC=0V */
- outb(0, dev->iobase + PCL818_DA_HI);
- udelay(1);
- outb(0, dev->iobase + PCL818_DO_HI); /* DO=$0000 */
- outb(0, dev->iobase + PCL818_DO_LO);
- udelay(1);
- outb(0, dev->iobase + PCL818_CONTROL);
- outb(0, dev->iobase + PCL818_CNTENABLE);
- outb(0, dev->iobase + PCL818_MUX);
- outb(0, dev->iobase + PCL818_CLRINT);
- outb(0xb0, dev->iobase + PCL818_CTRCTL); /* Stop pacer */
- outb(0x70, dev->iobase + PCL818_CTRCTL);
- outb(0x30, dev->iobase + PCL818_CTRCTL);
+
+ /* disable analog input trigger */
+ outb(PCL818_CTRL_DISABLE_TRIG, dev->iobase + PCL818_CTRL_REG);
+ pcl818_ai_clear_eoc(dev);
+
+ pcl818_ai_set_chan_range(dev, 0, 0);
+
+ /* stop pacer */
+ outb(PCL818_CNTENABLE_PACER_ENA, dev->iobase + PCL818_CNTENABLE_REG);
+ i8254_set_mode(timer_base, 0, 2, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 1, I8254_MODE0 | I8254_BINARY);
+ i8254_set_mode(timer_base, 0, 0, I8254_MODE0 | I8254_BINARY);
+
+ /* set analog output channels to 0V */
+ for (chan = 0; chan < board->n_aochan; chan++) {
+ outb(0, dev->iobase + PCL818_AO_LSB_REG(chan));
+ outb(0, dev->iobase + PCL818_AO_MSB_REG(chan));
+ }
+
+ /* set all digital outputs low */
+ outb(0, dev->iobase + PCL818_DO_DI_MSB_REG);
+ outb(0, dev->iobase + PCL818_DO_DI_LSB_REG);
+}
+
+static void pcl818_set_ai_range_table(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_devconfig *it)
+{
+ const struct pcl818_board *board = comedi_board(dev);
+
+ /* default to the range table from the boardinfo */
+ s->range_table = board->ai_range_type;
+
+ /* now check the user config option based on the boardtype */
if (board->is_818) {
- outb(0, dev->iobase + PCL818_RANGE);
+ if (it->options[4] == 1 || it->options[4] == 10) {
+ /* secondary range list jumper selectable */
+ s->range_table = &range_pcl818l_h_ai;
+ }
} else {
- outb(0, dev->iobase + PCL718_DA2_LO);
- outb(0, dev->iobase + PCL718_DA2_HI);
+ switch (it->options[4]) {
+ case 0:
+ s->range_table = &range_bipolar10;
+ break;
+ case 1:
+ s->range_table = &range_bipolar5;
+ break;
+ case 2:
+ s->range_table = &range_bipolar2_5;
+ break;
+ case 3:
+ s->range_table = &range718_bipolar1;
+ break;
+ case 4:
+ s->range_table = &range718_bipolar0_5;
+ break;
+ case 6:
+ s->range_table = &range_unipolar10;
+ break;
+ case 7:
+ s->range_table = &range_unipolar5;
+ break;
+ case 8:
+ s->range_table = &range718_unipolar2;
+ break;
+ case 9:
+ s->range_table = &range718_unipolar1;
+ break;
+ default:
+ s->range_table = &range_unknown;
+ break;
+ }
}
}
@@ -1210,154 +1086,96 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl818_board *board = comedi_board(dev);
struct pcl818_private *devpriv;
- int ret;
- int dma;
- unsigned long pages;
struct comedi_subdevice *s;
+ int ret;
+ int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- devpriv->io_range = board->io_range;
- if ((board->fifo) && (it->options[2] == -1)) {
- /* we've board with FIFO and we want to use FIFO */
- devpriv->io_range = PCLx1xFIFO_RANGE;
- devpriv->usefifo = 1;
- }
- ret = comedi_request_region(dev, it->options[0], devpriv->io_range);
+ ret = comedi_request_region(dev, it->options[0],
+ board->has_fifo ? 0x20 : 0x10);
if (ret)
return ret;
- if (pcl818_check(dev->iobase)) {
- comedi_error(dev, "I can't detect board. FAIL!\n");
- return -EIO;
- }
-
- if ((1 << it->options[1]) & board->IRQbits) {
- ret = request_irq(it->options[1], interrupt_pcl818, 0,
+ /* we can use IRQ 2-7 for async command support */
+ if (it->options[1] >= 2 && it->options[1] <= 7) {
+ ret = request_irq(it->options[1], pcl818_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
- devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
- devpriv->ai_mode = 0; /* mode of irq */
-
- /* grab our DMA */
- dma = 0;
- devpriv->dma = dma;
- if (!dev->irq)
- goto no_dma; /* if we haven't IRQ, we can't use DMA */
- if (board->DMAbits != 0) { /* board support DMA */
- dma = it->options[2];
- if (dma < 1)
- goto no_dma; /* DMA disabled */
- if (((1 << dma) & board->DMAbits) == 0) {
+ /* should we use the FIFO? */
+ if (dev->irq && board->has_fifo && it->options[2] == -1)
+ devpriv->usefifo = 1;
+
+ /* we need an IRQ to do DMA on channel 3 or 1 */
+ if (dev->irq && board->has_dma &&
+ (it->options[2] == 3 || it->options[2] == 1)) {
+ ret = request_dma(it->options[2], dev->board_name);
+ if (ret) {
dev_err(dev->class_dev,
- "DMA is out of allowed range, FAIL!\n");
- return -EINVAL; /* Bad DMA */
- }
- ret = request_dma(dma, dev->board_name);
- if (ret)
- return -EBUSY; /* DMA isn't free */
- devpriv->dma = dma;
- pages = 2; /* we need 16KB */
- devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[0])
- /* maybe experiment with try_to_free_pages() will help .... */
- return -EBUSY; /* no buffer :-( */
- devpriv->dmapages[0] = pages;
- devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
- devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
- devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[1])
+ "unable to request DMA channel %d\n",
+ it->options[2]);
return -EBUSY;
- devpriv->dmapages[1] = pages;
- devpriv->hwdmaptr[1] = virt_to_bus((void *)devpriv->dmabuf[1]);
- devpriv->hwdmasize[1] = (1 << pages) * PAGE_SIZE;
- }
+ }
+ devpriv->dma = it->options[2];
+
+ devpriv->dmapages = 2; /* we need 16KB */
+ devpriv->hwdmasize = (1 << devpriv->dmapages) * PAGE_SIZE;
-no_dma:
+ for (i = 0; i < 2; i++) {
+ unsigned long dmabuf;
+
+ dmabuf = __get_dma_pages(GFP_KERNEL, devpriv->dmapages);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ devpriv->dmabuf[i] = dmabuf;
+ devpriv->hwdmaptr[i] = virt_to_bus((void *)dmabuf);
+ }
+ }
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
- if (!board->n_aichan_se) {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE;
+ if (check_single_ended(dev->iobase)) {
+ s->n_chan = 16;
+ s->subdev_flags |= SDF_COMMON | SDF_GROUND;
} else {
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE;
- if (check_single_ended(dev->iobase)) {
- s->n_chan = board->n_aichan_se;
- s->subdev_flags |= SDF_COMMON | SDF_GROUND;
- } else {
- s->n_chan = board->n_aichan_diff;
- s->subdev_flags |= SDF_DIFF;
- }
- s->maxdata = board->ai_maxdata;
- s->range_table = board->ai_range_type;
- s->insn_read = pcl818_ai_insn_read;
- if (dev->irq) {
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmdtest = ai_cmdtest;
- s->do_cmd = ai_cmd;
- s->cancel = pcl818_ai_cancel;
- }
- if (board->is_818) {
- if ((it->options[4] == 1) || (it->options[4] == 10))
- s->range_table = &range_pcl818l_h_ai; /* secondary range list jumper selectable */
- } else {
- switch (it->options[4]) {
- case 0:
- s->range_table = &range_bipolar10;
- break;
- case 1:
- s->range_table = &range_bipolar5;
- break;
- case 2:
- s->range_table = &range_bipolar2_5;
- break;
- case 3:
- s->range_table = &range718_bipolar1;
- break;
- case 4:
- s->range_table = &range718_bipolar0_5;
- break;
- case 6:
- s->range_table = &range_unipolar10;
- break;
- case 7:
- s->range_table = &range_unipolar5;
- break;
- case 8:
- s->range_table = &range718_unipolar2;
- break;
- case 9:
- s->range_table = &range718_unipolar1;
- break;
- default:
- s->range_table = &range_unknown;
- break;
- }
- }
+ s->n_chan = 8;
+ s->subdev_flags |= SDF_DIFF;
+ }
+ s->maxdata = 0x0fff;
+
+ pcl818_set_ai_range_table(dev, s, it);
+
+ s->insn_read = pcl818_ai_insn_read;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmdtest = ai_cmdtest;
+ s->do_cmd = pcl818_ai_cmd;
+ s->cancel = pcl818_ai_cancel;
}
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- if (!board->n_aochan) {
- s->type = COMEDI_SUBD_UNUSED;
- } else {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = board->n_aochan;
- s->maxdata = board->ao_maxdata;
- s->range_table = board->ao_range_type;
- s->insn_read = pcl818_ao_insn_read;
- s->insn_write = pcl818_ao_insn_write;
+ if (board->n_aochan) {
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
+ s->n_chan = board->n_aochan;
+ s->maxdata = 0x0fff;
+ s->range_table = &range_unipolar5;
+ s->insn_read = pcl818_ao_insn_read;
+ s->insn_write = pcl818_ao_insn_write;
if (board->is_818) {
if ((it->options[4] == 1) || (it->options[4] == 10))
s->range_table = &range_unipolar10;
@@ -1369,31 +1187,27 @@ no_dma:
if (it->options[5] == 2)
s->range_table = &range_unknown;
}
- }
-
- s = &dev->subdevices[2];
- if (!board->n_dichan) {
- s->type = COMEDI_SUBD_UNUSED;
} else {
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = board->n_dichan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl818_di_insn_bits;
+ s->type = COMEDI_SUBD_UNUSED;
}
+ /* Digital Input subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl818_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[3];
- if (!board->n_dochan) {
- s->type = COMEDI_SUBD_UNUSED;
- } else {
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->n_dochan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pcl818_do_insn_bits;
- }
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl818_do_insn_bits;
/* select 1/10MHz oscilator */
if ((it->options[3] == 0) || (it->options[3] == 10))
@@ -1424,38 +1238,13 @@ static void pcl818_detach(struct comedi_device *dev)
if (devpriv->dma)
free_dma(devpriv->dma);
if (devpriv->dmabuf[0])
- free_pages(devpriv->dmabuf[0], devpriv->dmapages[0]);
+ free_pages(devpriv->dmabuf[0], devpriv->dmapages);
if (devpriv->dmabuf[1])
- free_pages(devpriv->dmabuf[1], devpriv->dmapages[1]);
+ free_pages(devpriv->dmabuf[1], devpriv->dmapages);
}
comedi_legacy_detach(dev);
}
-static const struct pcl818_board boardtypes[] = {
- {"pcl818l", 4, 16, 8, 25000, 1, 16, 16, &range_pcl818l_l_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 0, 1},
- {"pcl818h", 9, 16, 8, 10000, 1, 16, 16, &range_pcl818h_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 0, 1},
- {"pcl818hd", 9, 16, 8, 10000, 1, 16, 16, &range_pcl818h_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 1, 1},
- {"pcl818hg", 12, 16, 8, 10000, 1, 16, 16, &range_pcl818hg_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 1, 1},
- {"pcl818", 9, 16, 8, 10000, 2, 16, 16, &range_pcl818h_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 0, 1},
- {"pcl718", 1, 16, 8, 16000, 2, 16, 16, &range_unipolar5,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 0, 0},
- /* pcm3718 */
- {"pcm3718", 9, 16, 8, 10000, 0, 16, 16, &range_pcl818h_ai,
- &range_unipolar5, PCLx1x_RANGE, 0x00fc,
- 0x0a, 0xfff, 0xfff, 0, 1 /* XXX ? */ },
-};
-
static struct comedi_driver pcl818_driver = {
.driver_name = "pcl818",
.module = THIS_MODULE,
diff --git a/drivers/staging/comedi/drivers/pcm3724.c b/drivers/staging/comedi/drivers/pcm3724.c
index f4a49bd649f0..53e73737a906 100644
--- a/drivers/staging/comedi/drivers/pcm3724.c
+++ b/drivers/staging/comedi/drivers/pcm3724.c
@@ -225,8 +225,10 @@ static int pcm3724_attach(struct comedi_device *dev,
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
- subdev_8255_init(dev, s, subdev_8255_cb,
- (unsigned long)(dev->iobase + SIZE_8255 * i));
+ ret = subdev_8255_init(dev, s, subdev_8255_cb,
+ dev->iobase + SIZE_8255 * i);
+ if (ret)
+ return ret;
s->insn_config = subdev_3724_insn_config;
}
return 0;
diff --git a/drivers/staging/comedi/drivers/pcmad.c b/drivers/staging/comedi/drivers/pcmad.c
index fe482fdd512e..87c61d9b11da 100644
--- a/drivers/staging/comedi/drivers/pcmad.c
+++ b/drivers/staging/comedi/drivers/pcmad.c
@@ -61,18 +61,17 @@ static const struct pcmad_board_struct pcmad_boards[] = {
},
};
-#define TIMEOUT 100
-
-static int pcmad_ai_wait_for_eoc(struct comedi_device *dev,
- int timeout)
+static int pcmad_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- int i;
+ unsigned int status;
- for (i = 0; i < timeout; i++) {
- if ((inb(dev->iobase + PCMAD_STATUS) & 0x3) == 0x3)
- return 0;
- }
- return -ETIME;
+ status = inb(dev->iobase + PCMAD_STATUS);
+ if ((status & 0x3) == 0x3)
+ return 0;
+ return -EBUSY;
}
static int pcmad_ai_insn_read(struct comedi_device *dev,
@@ -89,7 +88,7 @@ static int pcmad_ai_insn_read(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
outb(chan, dev->iobase + PCMAD_CONVERT);
- ret = pcmad_ai_wait_for_eoc(dev, TIMEOUT);
+ ret = comedi_timeout(dev, s, insn, pcmad_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index c388f7f32227..e89bca845349 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -589,16 +589,17 @@ static int pcmmio_cmdtest(struct comedi_device *dev,
return 0;
}
-static int pcmmio_ai_wait_for_eoc(unsigned long iobase, unsigned int timeout)
+static int pcmmio_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
unsigned char status;
- while (timeout--) {
- status = inb(iobase + PCMMIO_AI_STATUS_REG);
- if (status & PCMMIO_AI_STATUS_DATA_READY)
- return 0;
- }
- return -ETIME;
+ status = inb(dev->iobase + PCMMIO_AI_STATUS_REG);
+ if (status & PCMMIO_AI_STATUS_DATA_READY)
+ return 0;
+ return -EBUSY;
}
static int pcmmio_ai_insn_read(struct comedi_device *dev,
@@ -643,7 +644,8 @@ static int pcmmio_ai_insn_read(struct comedi_device *dev,
cmd |= PCMMIO_AI_CMD_RANGE(range);
outb(cmd, iobase + PCMMIO_AI_CMD_REG);
- ret = pcmmio_ai_wait_for_eoc(iobase, 100000);
+
+ ret = comedi_timeout(dev, s, insn, pcmmio_ai_eoc, 0);
if (ret)
return ret;
@@ -652,7 +654,8 @@ static int pcmmio_ai_insn_read(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
outb(cmd, iobase + PCMMIO_AI_CMD_REG);
- ret = pcmmio_ai_wait_for_eoc(iobase, 100000);
+
+ ret = comedi_timeout(dev, s, insn, pcmmio_ai_eoc, 0);
if (ret)
return ret;
@@ -684,16 +687,17 @@ static int pcmmio_ao_insn_read(struct comedi_device *dev,
return insn->n;
}
-static int pcmmio_ao_wait_for_eoc(unsigned long iobase, unsigned int timeout)
+static int pcmmio_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
unsigned char status;
- while (timeout--) {
- status = inb(iobase + PCMMIO_AO_STATUS_REG);
- if (status & PCMMIO_AO_STATUS_DATA_READY)
- return 0;
- }
- return -ETIME;
+ status = inb(dev->iobase + PCMMIO_AO_STATUS_REG);
+ if (status & PCMMIO_AO_STATUS_DATA_READY)
+ return 0;
+ return -EBUSY;
}
static int pcmmio_ao_insn_write(struct comedi_device *dev,
@@ -726,7 +730,8 @@ static int pcmmio_ao_insn_write(struct comedi_device *dev,
outb(PCMMIO_AO_LSB_SPAN(range), iobase + PCMMIO_AO_LSB_REG);
outb(0, iobase + PCMMIO_AO_MSB_REG);
outb(cmd | PCMMIO_AO_CMD_WR_SPAN_UPDATE, iobase + PCMMIO_AO_CMD_REG);
- ret = pcmmio_ao_wait_for_eoc(iobase, 100000);
+
+ ret = comedi_timeout(dev, s, insn, pcmmio_ao_eoc, 0);
if (ret)
return ret;
@@ -738,7 +743,8 @@ static int pcmmio_ao_insn_write(struct comedi_device *dev,
outb((val >> 8) & 0xff, iobase + PCMMIO_AO_MSB_REG);
outb(cmd | PCMMIO_AO_CMD_WR_CODE_UPDATE,
iobase + PCMMIO_AO_CMD_REG);
- ret = pcmmio_ao_wait_for_eoc(iobase, 100000);
+
+ ret = comedi_timeout(dev, s, insn, pcmmio_ao_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index 55e3c2e2bc52..25706531b885 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -29,13 +29,13 @@
/* descriptor block used for chained dma transfers */
struct plx_dma_desc {
- volatile uint32_t pci_start_addr;
- volatile uint32_t local_start_addr;
+ __le32 pci_start_addr;
+ __le32 local_start_addr;
/* transfer_size is in bytes, only first 23 bits of register are used */
- volatile uint32_t transfer_size;
+ __le32 transfer_size;
/* address of next descriptor (quad word aligned), plus some
* additional bits (see PLX_DMA0_DESCRIPTOR_REG) */
- volatile uint32_t next;
+ __le32 next;
};
/**********************************************************************
diff --git a/drivers/staging/comedi/drivers/poc.c b/drivers/staging/comedi/drivers/poc.c
deleted file mode 100644
index 2ae4ee15704c..000000000000
--- a/drivers/staging/comedi/drivers/poc.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- comedi/drivers/poc.c
- Mini-drivers for POC (Piece of Crap) boards
- Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
- Copyright (C) 2001 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: poc
-Description: Generic driver for very simple devices
-Author: ds
-Devices: [Keithley Metrabyte] DAC-02 (dac02)
-Updated: Sat, 16 Mar 2002 17:34:48 -0800
-Status: unknown
-
-This driver is indended to support very simple ISA-based devices,
-including:
- dac02 - Keithley DAC-02 analog output board
-
-Configuration options:
- [0] - I/O port base
-*/
-
-#include <linux/module.h>
-#include "../comedidev.h"
-
-struct boarddef_struct {
- const char *name;
- unsigned int iosize;
- int (*setup) (struct comedi_device *);
- int type;
- int n_chan;
- int n_bits;
- int (*winsn) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*rinsn) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*insnbits) (struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- const struct comedi_lrange *range;
-};
-
-struct poc_private {
- unsigned int ao_readback[32];
-};
-
-static int readback_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct poc_private *devpriv = dev->private;
- int chan;
-
- chan = CR_CHAN(insn->chanspec);
- data[0] = devpriv->ao_readback[chan];
-
- return 1;
-}
-
-/* DAC-02 registers */
-#define DAC02_LSB(a) (2 * a)
-#define DAC02_MSB(a) (2 * a + 1)
-
-static int dac02_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct poc_private *devpriv = dev->private;
- int temp;
- int chan;
- int output;
-
- chan = CR_CHAN(insn->chanspec);
- devpriv->ao_readback[chan] = data[0];
- output = data[0];
-#ifdef wrong
- /* convert to complementary binary if range is bipolar */
- if ((CR_RANGE(insn->chanspec) & 0x2) == 0)
- output = ~output;
-#endif
- temp = (output << 4) & 0xf0;
- outb(temp, dev->iobase + DAC02_LSB(chan));
- temp = (output >> 4) & 0xff;
- outb(temp, dev->iobase + DAC02_MSB(chan));
-
- return 1;
-}
-
-static int poc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- const struct boarddef_struct *board = comedi_board(dev);
- struct poc_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], board->iosize);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 1);
- if (ret)
- return ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- /* analog output subdevice */
- s = &dev->subdevices[0];
- s->type = board->type;
- s->n_chan = board->n_chan;
- s->maxdata = (1 << board->n_bits) - 1;
- s->range_table = board->range;
- s->insn_write = board->winsn;
- s->insn_read = board->rinsn;
- s->insn_bits = board->insnbits;
- if (s->type == COMEDI_SUBD_AO || s->type == COMEDI_SUBD_DO)
- s->subdev_flags = SDF_WRITABLE;
-
- return 0;
-}
-
-static const struct boarddef_struct boards[] = {
- {
- .name = "dac02",
- .iosize = 8,
- /* .setup = dac02_setup, */
- .type = COMEDI_SUBD_AO,
- .n_chan = 2,
- .n_bits = 12,
- .winsn = dac02_ao_winsn,
- .rinsn = readback_insn,
- .range = &range_unknown,
- },
-};
-
-static struct comedi_driver poc_driver = {
- .driver_name = "poc",
- .module = THIS_MODULE,
- .attach = poc_attach,
- .detach = comedi_legacy_detach,
- .board_name = &boards[0].name,
- .num_names = ARRAY_SIZE(boards),
- .offset = sizeof(boards[0]),
-};
-module_comedi_driver(poc_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 96a46954b3c0..298dba03f902 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -214,7 +214,6 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
s->async->events |=
COMEDI_CB_EOA | COMEDI_CB_OVERFLOW;
dev_warn(dev->class_dev, "data lost\n");
- daqp_ai_cancel(dev, s);
break;
}
@@ -231,7 +230,6 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
if (devpriv->count > 0) {
devpriv->count--;
if (devpriv->count == 0) {
- daqp_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
break;
}
@@ -244,13 +242,12 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
if (loop_limit <= 0) {
dev_warn(dev->class_dev,
"loop_limit reached in daqp_interrupt()\n");
- daqp_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
}
s->async->events |= COMEDI_CB_BLOCK;
- comedi_event(dev, s);
+ cfc_handle_events(dev, s);
}
return IRQ_HANDLED;
}
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 0f026afde9be..cd3fdf973bdd 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -237,20 +237,6 @@
/* The board support a channel list up to the FIFO length (1K or 8K) */
#define RTD_MAX_CHANLIST 128 /* max channel list that we allow */
-/* tuning for ai/ao instruction done polling */
-#ifdef FAST_SPIN
-#define WAIT_QUIETLY /* as nothing, spin on done bit */
-#define RTD_ADC_TIMEOUT 66000 /* 2 msec at 33mhz bus rate */
-#define RTD_DAC_TIMEOUT 66000
-#define RTD_DMA_TIMEOUT 33000 /* 1 msec */
-#else
-/* by delaying, power and electrical noise are reduced somewhat */
-#define WAIT_QUIETLY udelay(1)
-#define RTD_ADC_TIMEOUT 2000 /* in usec */
-#define RTD_DAC_TIMEOUT 2000 /* in usec */
-#define RTD_DMA_TIMEOUT 1000 /* in usec */
-#endif
-
/*======================================================================
Board specific stuff
======================================================================*/
@@ -562,21 +548,27 @@ static int rtd520_probe_fifo_depth(struct comedi_device *dev)
return fifo_size;
}
-/*
- "instructions" read/write data in "one-shot" or "software-triggered"
- mode (simplest case).
- This doesn't use interrupts.
+static int rtd_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct rtd_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readl(devpriv->las0 + LAS0_ADC);
+ if (status & FS_ADC_NOT_EMPTY)
+ return 0;
+ return -EBUSY;
+}
- Note, we don't do any settling delays. Use a instruction list to
- select, delay, then read.
- */
static int rtd_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
struct rtd_private *devpriv = dev->private;
- int n, ii;
- int stat;
+ int ret;
+ int n;
/* clear any old fifo data */
writel(0, devpriv->las0 + LAS0_ADC_FIFO_CLEAR);
@@ -593,14 +585,9 @@ static int rtd_ai_rinsn(struct comedi_device *dev,
/* trigger conversion */
writew(0, devpriv->las0 + LAS0_ADC);
- for (ii = 0; ii < RTD_ADC_TIMEOUT; ++ii) {
- stat = readl(devpriv->las0 + LAS0_ADC);
- if (stat & FS_ADC_NOT_EMPTY) /* 1 -> not empty */
- break;
- WAIT_QUIETLY;
- }
- if (ii >= RTD_ADC_TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, rtd_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
@@ -1116,9 +1103,22 @@ static int rtd_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
-/*
- Output one (or more) analog values to a single port as fast as possible.
-*/
+static int rtd_ao_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct rtd_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int bit = (chan == 0) ? FS_DAC1_NOT_EMPTY : FS_DAC2_NOT_EMPTY;
+ unsigned int status;
+
+ status = readl(devpriv->las0 + LAS0_ADC);
+ if (status & bit)
+ return 0;
+ return -EBUSY;
+}
+
static int rtd_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
@@ -1127,6 +1127,7 @@ static int rtd_ao_winsn(struct comedi_device *dev,
int i;
int chan = CR_CHAN(insn->chanspec);
int range = CR_RANGE(insn->chanspec);
+ int ret;
/* Configure the output range (table index matches the range values) */
writew(range & 7, devpriv->las0 +
@@ -1136,8 +1137,6 @@ static int rtd_ao_winsn(struct comedi_device *dev,
* very useful, but that's how the interface is defined. */
for (i = 0; i < insn->n; ++i) {
int val = data[i] << 3;
- int stat = 0; /* initialize to avoid bogus warning */
- int ii;
/* VERIFY: comedi range and offset conversions */
@@ -1157,16 +1156,9 @@ static int rtd_ao_winsn(struct comedi_device *dev,
devpriv->ao_readback[chan] = data[i];
- for (ii = 0; ii < RTD_DAC_TIMEOUT; ++ii) {
- stat = readl(devpriv->las0 + LAS0_ADC);
- /* 1 -> not empty */
- if (stat & ((0 == chan) ? FS_DAC1_NOT_EMPTY :
- FS_DAC2_NOT_EMPTY))
- break;
- WAIT_QUIETLY;
- }
- if (ii >= RTD_DAC_TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, rtd_ao_eoc, 0);
+ if (ret)
+ return ret;
}
/* return the number of samples read/written */
@@ -1382,8 +1374,6 @@ static int rtd_auto_attach(struct comedi_device *dev,
if (dev->irq)
writel(ICS_PIE | ICS_PLIE, devpriv->lcfg + PLX_INTRCS_REG);
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/rti800.c b/drivers/staging/comedi/drivers/rti800.c
index e1f3671ac056..bd447b2add7b 100644
--- a/drivers/staging/comedi/drivers/rti800.c
+++ b/drivers/staging/comedi/drivers/rti800.c
@@ -83,8 +83,6 @@
#define RTI800_IOSIZE 0x10
-#define RTI800_AI_TIMEOUT 100
-
static const struct comedi_lrange range_rti800_ai_10_bipolar = {
4, {
BIP_RANGE(10),
@@ -145,23 +143,21 @@ struct rti800_private {
unsigned char muxgain_bits;
};
-static int rti800_ai_wait_for_conversion(struct comedi_device *dev,
- int timeout)
+static int rti800_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
unsigned char status;
- int i;
- for (i = 0; i < timeout; i++) {
- status = inb(dev->iobase + RTI800_CSR);
- if (status & RTI800_CSR_OVERRUN) {
- outb(0, dev->iobase + RTI800_CLRFLAGS);
- return -EIO;
- }
- if (status & RTI800_CSR_DONE)
- return 0;
- udelay(1);
+ status = inb(dev->iobase + RTI800_CSR);
+ if (status & RTI800_CSR_OVERRUN) {
+ outb(0, dev->iobase + RTI800_CLRFLAGS);
+ return -EOVERFLOW;
}
- return -ETIME;
+ if (status & RTI800_CSR_DONE)
+ return 0;
+ return -EBUSY;
}
static int rti800_ai_insn_read(struct comedi_device *dev,
@@ -198,7 +194,8 @@ static int rti800_ai_insn_read(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
outb(0, dev->iobase + RTI800_CONVERT);
- ret = rti800_ai_wait_for_conversion(dev, RTI800_AI_TIMEOUT);
+
+ ret = comedi_timeout(dev, s, insn, rti800_ai_eoc, 0);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/rti802.c b/drivers/staging/comedi/drivers/rti802.c
index a3fa2a4baef4..605a31d702e0 100644
--- a/drivers/staging/comedi/drivers/rti802.c
+++ b/drivers/staging/comedi/drivers/rti802.c
@@ -1,45 +1,44 @@
/*
- comedi/drivers/rti802.c
- Hardware driver for Analog Devices RTI-802 board
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * rti802.c
+ * Comedi driver for Analog Devices RTI-802 board
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
+
/*
-Driver: rti802
-Description: Analog Devices RTI-802
-Author: Anders Blomdell <anders.blomdell@control.lth.se>
-Devices: [Analog Devices] RTI-802 (rti802)
-Status: works
-
-Configuration Options:
- [0] - i/o base
- [1] - unused
- [2] - dac#0 0=two's comp, 1=straight
- [3] - dac#0 0=bipolar, 1=unipolar
- [4] - dac#1 ...
- ...
- [17] - dac#7 ...
-*/
+ * Driver: rti802
+ * Description: Analog Devices RTI-802
+ * Author: Anders Blomdell <anders.blomdell@control.lth.se>
+ * Devices: (Analog Devices) RTI-802 [rti802]
+ * Status: works
+ *
+ * Configuration Options:
+ * [0] - i/o base
+ * [1] - unused
+ * [2,4,6,8,10,12,14,16] - dac#[0-7] 0=two's comp, 1=straight
+ * [3,5,7,9,11,13,15,17] - dac#[0-7] 0=bipolar, 1=unipolar
+ */
#include <linux/module.h>
#include "../comedidev.h"
-#define RTI802_SIZE 4
-
-#define RTI802_SELECT 0
-#define RTI802_DATALOW 1
-#define RTI802_DATAHIGH 2
+/*
+ * Register I/O map
+ */
+#define RTI802_SELECT 0x00
+#define RTI802_DATALOW 0x01
+#define RTI802_DATAHIGH 0x02
struct rti802_private {
enum {
@@ -51,34 +50,45 @@ struct rti802_private {
static int rti802_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct rti802_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
for (i = 0; i < insn->n; i++)
- data[i] = devpriv->ao_readback[CR_CHAN(insn->chanspec)];
+ data[i] = devpriv->ao_readback[chan];
- return i;
+ return insn->n;
}
static int rti802_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct rti802_private *devpriv = dev->private;
- int i, d;
- int chan = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
+ int i;
+
+ outb(chan, dev->iobase + RTI802_SELECT);
for (i = 0; i < insn->n; i++) {
- d = devpriv->ao_readback[chan] = data[i];
+ val = data[i];
+
+ devpriv->ao_readback[chan] = val;
+
+ /* munge offset binary to two's complement if needed */
if (devpriv->dac_coding[chan] == dac_2comp)
- d ^= 0x800;
- outb(chan, dev->iobase + RTI802_SELECT);
- outb(d & 0xff, dev->iobase + RTI802_DATALOW);
- outb(d >> 8, dev->iobase + RTI802_DATAHIGH);
+ val = comedi_offset_munge(s, val);
+
+ outb(val & 0xff, dev->iobase + RTI802_DATALOW);
+ outb((val >> 8) & 0xff, dev->iobase + RTI802_DATAHIGH);
}
- return i;
+
+ return insn->n;
}
static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -88,7 +98,7 @@ static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int i;
int ret;
- ret = comedi_request_region(dev, it->options[0], RTI802_SIZE);
+ ret = comedi_request_region(dev, it->options[0], 0x04);
if (ret)
return ret;
@@ -100,22 +110,21 @@ static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
+ /* Analog Output subdevice */
s = &dev->subdevices[0];
- /* ao subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->maxdata = 0xfff;
- s->n_chan = 8;
- s->insn_read = rti802_ao_insn_read;
- s->insn_write = rti802_ao_insn_write;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->maxdata = 0xfff;
+ s->n_chan = 8;
+ s->insn_read = rti802_ao_insn_read;
+ s->insn_write = rti802_ao_insn_write;
s->range_table_list = devpriv->range_type_list;
for (i = 0; i < 8; i++) {
devpriv->dac_coding[i] = (it->options[3 + 2 * i])
- ? (dac_straight)
- : (dac_2comp);
+ ? (dac_straight) : (dac_2comp);
devpriv->range_type_list[i] = (it->options[2 + 2 * i])
- ? &range_unipolar10 : &range_bipolar10;
+ ? &range_unipolar10 : &range_bipolar10;
}
return 0;
@@ -130,5 +139,5 @@ static struct comedi_driver rti802_driver = {
module_comedi_driver(rti802_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Analog Devices RTI-802 board");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index 9950f59b1192..85d2b7a3c125 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -420,15 +420,28 @@ static int s526_ai_insn_config(struct comedi_device *dev,
return result;
}
+static int s526_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + REG_ISR);
+ if (status & ISR_ADC_DONE)
+ return 0;
+ return -EBUSY;
+}
+
static int s526_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct s526_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
- int n, i;
+ int n;
unsigned short value;
unsigned int d;
- unsigned int status;
+ int ret;
/* Set configured delay, enable channel for this channel only,
* select "ADC read" channel, set "ADC start" bit. */
@@ -440,17 +453,12 @@ static int s526_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
/* trigger conversion */
outw(value, dev->iobase + REG_ADC);
-#define TIMEOUT 100
/* wait for conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- status = inw(dev->iobase + REG_ISR);
- if (status & ISR_ADC_DONE) {
- outw(ISR_ADC_DONE, dev->iobase + REG_ISR);
- break;
- }
- }
- if (i == TIMEOUT)
- return -ETIMEDOUT;
+ ret = comedi_timeout(dev, s, insn, s526_ai_eoc, 0);
+ if (ret)
+ return ret;
+
+ outw(ISR_ADC_DONE, dev->iobase + REG_ISR);
/* read data */
d = inw(dev->iobase + REG_ADD);
@@ -604,7 +612,7 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->insn_bits = s526_dio_insn_bits;
s->insn_config = s526_dio_insn_config;
- return 1;
+ return 0;
}
static struct comedi_driver s526_driver = {
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 19da1dbea494..95fadf343f27 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -106,16 +106,16 @@ struct s626_private {
struct s626_enc_info {
/* Pointers to functions that differ for A and B counters: */
/* Return clock enable. */
- uint16_t(*get_enable)(struct comedi_device *dev,
+ uint16_t (*get_enable)(struct comedi_device *dev,
const struct s626_enc_info *k);
/* Return interrupt source. */
- uint16_t(*get_int_src)(struct comedi_device *dev,
+ uint16_t (*get_int_src)(struct comedi_device *dev,
const struct s626_enc_info *k);
/* Return preload trigger source. */
- uint16_t(*get_load_trig)(struct comedi_device *dev,
+ uint16_t (*get_load_trig)(struct comedi_device *dev,
const struct s626_enc_info *k);
/* Return standardized operating mode. */
- uint16_t(*get_mode)(struct comedi_device *dev,
+ uint16_t (*get_mode)(struct comedi_device *dev,
const struct s626_enc_info *k);
/* Generate soft index strobe. */
void (*pulse_index)(struct comedi_device *dev,
@@ -209,6 +209,8 @@ static const struct comedi_lrange s626_range_table = {
static void s626_debi_transfer(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
+ static const int timeout = 10000;
+ int i;
/* Initiate upload of shadow RAM to DEBI control register */
s626_mc_enable(dev, S626_MC2_UPLD_DEBI, S626_P_MC2);
@@ -217,12 +219,23 @@ static void s626_debi_transfer(struct comedi_device *dev)
* Wait for completion of upload from shadow RAM to
* DEBI control register.
*/
- while (!s626_mc_test(dev, S626_MC2_UPLD_DEBI, S626_P_MC2))
- ;
+ for (i = 0; i < timeout; i++) {
+ if (s626_mc_test(dev, S626_MC2_UPLD_DEBI, S626_P_MC2))
+ break;
+ udelay(1);
+ }
+ if (i == timeout)
+ comedi_error(dev,
+ "Timeout while uploading to DEBI control register.");
/* Wait until DEBI transfer is done */
- while (readl(devpriv->mmio + S626_P_PSR) & S626_PSR_DEBI_S)
- ;
+ for (i = 0; i < timeout; i++) {
+ if (!(readl(devpriv->mmio + S626_P_PSR) & S626_PSR_DEBI_S))
+ break;
+ udelay(1);
+ }
+ if (i == timeout)
+ comedi_error(dev, "DEBI transfer timeout.");
}
/*
@@ -351,14 +364,57 @@ static const uint8_t s626_trimadrs[] = {
0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63
};
+enum {
+ s626_send_dac_wait_not_mc1_a2out,
+ s626_send_dac_wait_ssr_af2_out,
+ s626_send_dac_wait_fb_buffer2_msb_00,
+ s626_send_dac_wait_fb_buffer2_msb_ff
+};
+
+static int s626_send_dac_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct s626_private *devpriv = dev->private;
+ unsigned int status;
+
+ switch (context) {
+ case s626_send_dac_wait_not_mc1_a2out:
+ status = readl(devpriv->mmio + S626_P_MC1);
+ if (!(status & S626_MC1_A2OUT))
+ return 0;
+ break;
+ case s626_send_dac_wait_ssr_af2_out:
+ status = readl(devpriv->mmio + S626_P_SSR);
+ if (status & S626_SSR_AF2_OUT)
+ return 0;
+ break;
+ case s626_send_dac_wait_fb_buffer2_msb_00:
+ status = readl(devpriv->mmio + S626_P_FB_BUFFER2);
+ if (!(status & 0xff000000))
+ return 0;
+ break;
+ case s626_send_dac_wait_fb_buffer2_msb_ff:
+ status = readl(devpriv->mmio + S626_P_FB_BUFFER2);
+ if (status & 0xff000000)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return -EBUSY;
+}
+
/*
* Private helper function: Transmit serial data to DAC via Audio
* channel 2. Assumes: (1) TSL2 slot records initialized, and (2)
* dacpol contains valid target image.
*/
-static void s626_send_dac(struct comedi_device *dev, uint32_t val)
+static int s626_send_dac(struct comedi_device *dev, uint32_t val)
{
struct s626_private *devpriv = dev->private;
+ int ret;
/* START THE SERIAL CLOCK RUNNING ------------- */
@@ -404,8 +460,12 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val)
* Done by polling the DMAC enable flag; this flag is automatically
* cleared when the transfer has finished.
*/
- while (readl(devpriv->mmio + S626_P_MC1) & S626_MC1_A2OUT)
- ;
+ ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
+ s626_send_dac_wait_not_mc1_a2out);
+ if (ret) {
+ comedi_error(dev, "DMA transfer timeout.");
+ return ret;
+ }
/* START THE OUTPUT STREAM TO THE TARGET DAC -------------------- */
@@ -425,8 +485,12 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val)
* finished transferring the DAC's data DWORD from the output FIFO
* to the output buffer register.
*/
- while (!(readl(devpriv->mmio + S626_P_SSR) & S626_SSR_AF2_OUT))
- ;
+ ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
+ s626_send_dac_wait_ssr_af2_out);
+ if (ret) {
+ comedi_error(dev, "TSL timeout waiting for slot 1 to execute.");
+ return ret;
+ }
/*
* Set up to trap execution at slot 0 when the TSL sequencer cycles
@@ -466,8 +530,13 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val)
* from 0xFF to 0x00, which slot 0 causes to happen by shifting
* out/in on SD2 the 0x00 that is always referenced by slot 5.
*/
- while (readl(devpriv->mmio + S626_P_FB_BUFFER2) & 0xff000000)
- ;
+ ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
+ s626_send_dac_wait_fb_buffer2_msb_00);
+ if (ret) {
+ comedi_error(dev,
+ "TSL timeout waiting for slot 0 to execute.");
+ return ret;
+ }
}
/*
* Either (1) we were too late setting the slot 0 trap; the TSL
@@ -486,14 +555,19 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val)
* the next DAC write. This is detected when FB_BUFFER2 MSB changes
* from 0x00 to 0xFF.
*/
- while (!(readl(devpriv->mmio + S626_P_FB_BUFFER2) & 0xff000000))
- ;
+ ret = comedi_timeout(dev, NULL, NULL, s626_send_dac_eoc,
+ s626_send_dac_wait_fb_buffer2_msb_ff);
+ if (ret) {
+ comedi_error(dev, "TSL timeout waiting for slot 0 to execute.");
+ return ret;
+ }
+ return 0;
}
/*
* Private helper function: Write setpoint to an application DAC channel.
*/
-static void s626_set_dac(struct comedi_device *dev, uint16_t chan,
+static int s626_set_dac(struct comedi_device *dev, uint16_t chan,
int16_t dacdata)
{
struct s626_private *devpriv = dev->private;
@@ -556,10 +630,10 @@ static void s626_set_dac(struct comedi_device *dev, uint16_t chan,
val |= ((uint32_t)(chan & 1) << 15); /* Address the DAC channel
* within the device. */
val |= (uint32_t)dacdata; /* Include DAC setpoint data. */
- s626_send_dac(dev, val);
+ return s626_send_dac(dev, val);
}
-static void s626_write_trim_dac(struct comedi_device *dev, uint8_t logical_chan,
+static int s626_write_trim_dac(struct comedi_device *dev, uint8_t logical_chan,
uint8_t dac_data)
{
struct s626_private *devpriv = dev->private;
@@ -606,17 +680,22 @@ static void s626_write_trim_dac(struct comedi_device *dev, uint8_t logical_chan,
* Address the DAC channel within the trimdac device.
* Include DAC setpoint data.
*/
- s626_send_dac(dev, (chan << 8) | dac_data);
+ return s626_send_dac(dev, (chan << 8) | dac_data);
}
-static void s626_load_trim_dacs(struct comedi_device *dev)
+static int s626_load_trim_dacs(struct comedi_device *dev)
{
uint8_t i;
+ int ret;
/* Copy TrimDac setpoint values from EEPROM to TrimDacs. */
- for (i = 0; i < ARRAY_SIZE(s626_trimchan); i++)
- s626_write_trim_dac(dev, i,
+ for (i = 0; i < ARRAY_SIZE(s626_trimchan); i++) {
+ ret = s626_write_trim_dac(dev, i,
s626_i2c_read(dev, s626_trimadrs[i]));
+ if (ret)
+ return ret;
+ }
+ return 0;
}
/* ****** COUNTER FUNCTIONS ******* */
@@ -1846,6 +1925,20 @@ static int s626_ai_rinsn(struct comedi_device *dev,
}
#endif
+static int s626_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ struct s626_private *devpriv = dev->private;
+ unsigned int status;
+
+ status = readl(devpriv->mmio + S626_P_PSR);
+ if (status & S626_PSR_GPIO2)
+ return 0;
+ return -EBUSY;
+}
+
static int s626_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -1856,6 +1949,7 @@ static int s626_ai_insn_read(struct comedi_device *dev,
uint16_t adc_spec = 0;
uint32_t gpio_image;
uint32_t tmp;
+ int ret;
int n;
/*
@@ -1897,8 +1991,9 @@ static int s626_ai_insn_read(struct comedi_device *dev,
*/
/* Wait for ADC done */
- while (!(readl(devpriv->mmio + S626_P_PSR) & S626_PSR_GPIO2))
- ;
+ ret = comedi_timeout(dev, s, insn, s626_ai_eoc, 0);
+ if (ret)
+ return ret;
/* Fetch ADC data */
if (n != 0) {
@@ -2299,6 +2394,7 @@ static int s626_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
{
struct s626_private *devpriv = dev->private;
int i;
+ int ret;
uint16_t chan = CR_CHAN(insn->chanspec);
int16_t dacdata;
@@ -2307,7 +2403,9 @@ static int s626_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
devpriv->ao_readback[CR_CHAN(insn->chanspec)] = data[i];
dacdata -= (0x1fff);
- s626_set_dac(dev, chan, dacdata);
+ ret = s626_set_dac(dev, chan, dacdata);
+ if (ret)
+ return ret;
}
return i;
@@ -2543,12 +2641,13 @@ static int s626_allocate_dma_buffers(struct comedi_device *dev)
return 0;
}
-static void s626_initialize(struct comedi_device *dev)
+static int s626_initialize(struct comedi_device *dev)
{
struct s626_private *devpriv = dev->private;
dma_addr_t phys_buf;
uint16_t chan;
int i;
+ int ret;
/* Enable DEBI and audio pins, enable I2C interface */
s626_mc_enable(dev, S626_MC1_DEBI | S626_MC1_AUDIO | S626_MC1_I2C,
@@ -2749,7 +2848,9 @@ static void s626_initialize(struct comedi_device *dev)
* sometimes causes the first few TrimDAC writes to malfunction.
*/
s626_load_trim_dacs(dev);
- s626_load_trim_dacs(dev);
+ ret = s626_load_trim_dacs(dev);
+ if (ret)
+ return ret;
/*
* Manually init all gate array hardware in case this is a soft
@@ -2763,8 +2864,11 @@ static void s626_initialize(struct comedi_device *dev)
* Init all DAC outputs to 0V and init all DAC setpoint and
* polarity images.
*/
- for (chan = 0; chan < S626_DAC_CHANNELS; chan++)
- s626_set_dac(dev, chan, 0);
+ for (chan = 0; chan < S626_DAC_CHANNELS; chan++) {
+ ret = s626_set_dac(dev, chan, 0);
+ if (ret)
+ return ret;
+ }
/* Init counters */
s626_counters_init(dev);
@@ -2780,6 +2884,8 @@ static void s626_initialize(struct comedi_device *dev)
/* Initialize the digital I/O subsystem */
s626_dio_init(dev);
+
+ return 0;
}
static int s626_auto_attach(struct comedi_device *dev,
@@ -2900,9 +3006,9 @@ static int s626_auto_attach(struct comedi_device *dev,
s->insn_read = s626_enc_insn_read;
s->insn_write = s626_enc_insn_write;
- s626_initialize(dev);
-
- dev_info(dev->class_dev, "%s attached\n", dev->board_name);
+ ret = s626_initialize(dev);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/staging/comedi/drivers/skel.c b/drivers/staging/comedi/drivers/skel.c
index 77e2059ff62e..39008cf30ecb 100644
--- a/drivers/staging/comedi/drivers/skel.c
+++ b/drivers/staging/comedi/drivers/skel.c
@@ -142,6 +142,29 @@ static int skel_ns_to_timer(unsigned int *ns, int round)
}
/*
+ * This function doesn't require a particular form, this is just
+ * what happens to be used in some of the drivers. The comedi_timeout()
+ * helper uses this callback to check for the end-of-conversion while
+ * waiting for up to 1 second. This function should return 0 when the
+ * conversion is finished and -EBUSY to keep waiting. Any other errno
+ * will terminate comedi_timeout() and return that errno to the caller.
+ * If the timeout occurs, comedi_timeout() will return -ETIMEDOUT.
+ */
+static int skel_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ /* status = inb(dev->iobase + SKEL_STATUS); */
+ status = 1;
+ if (status)
+ return 0;
+ return -EBUSY;
+}
+
+/*
* "instructions" read/write data in "one-shot" or "software-triggered"
* mode.
*/
@@ -149,9 +172,9 @@ static int skel_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
const struct skel_board *thisboard = comedi_board(dev);
- int n, i;
+ int n;
unsigned int d;
- unsigned int status;
+ int ret;
/* a typical programming sequence */
@@ -165,18 +188,10 @@ static int skel_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
/* trigger conversion */
/* outw(0,dev->iobase + SKEL_CONVERT); */
-#define TIMEOUT 100
/* wait for conversion to end */
- for (i = 0; i < TIMEOUT; i++) {
- status = 1;
- /* status = inb(dev->iobase + SKEL_STATUS); */
- if (status)
- break;
- }
- if (i == TIMEOUT) {
- dev_warn(dev->class_dev, "ai timeout\n");
- return -ETIMEDOUT;
- }
+ ret = comedi_timeout(dev, s, insn, skel_ai_eoc, 0);
+ if (ret)
+ return ret;
/* read data */
/* d = inw(dev->iobase + SKEL_AI_DATA); */
@@ -456,8 +471,6 @@ static int skel_common_attach(struct comedi_device *dev)
s->type = COMEDI_SUBD_UNUSED;
}
- dev_info(dev->class_dev, "skel: attached\n");
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
index df22a78d2b7e..848c30801580 100644
--- a/drivers/staging/comedi/drivers/ssv_dnp.c
+++ b/drivers/staging/comedi/drivers/ssv_dnp.c
@@ -161,8 +161,7 @@ static int dnp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
outb(PCMR, CSCIR);
outb((inb(CSCDR) & 0xAA), CSCDR);
- dev_info(dev->class_dev, "%s: attached\n", dev->board_name);
- return 1;
+ return 0;
}
static void dnp_detach(struct comedi_device *dev)
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 5f85c55c1109..d6fae11ee4e0 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -372,7 +372,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_src(&cmd->start_src,
TRIG_NOW | TRIG_EXT | TRIG_INT);
err |= cfc_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_FOLLOW | TRIG_EXT);
+ TRIG_FOLLOW | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
@@ -424,9 +424,6 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, tmp);
}
- if (cmd->scan_begin_src == TRIG_TIMER)
- err |= -EINVAL;
-
/* stop source */
switch (cmd->stop_src) {
case TRIG_COUNT:
@@ -514,36 +511,28 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
*/
devpriv->ignore = PACKETS_TO_IGNORE;
- if (cmd->chanlist_len > 0) {
- gain = CR_RANGE(cmd->chanlist[0]);
- for (i = 0; i < cmd->chanlist_len; ++i) {
- chan = CR_CHAN(cmd->chanlist[i]);
- if (chan != i) {
- dev_err(dev->class_dev,
- "channels are not consecutive\n");
- up(&devpriv->sem);
- return -EINVAL;
- }
- if ((gain != CR_RANGE(cmd->chanlist[i]))
- && (cmd->chanlist_len > 3)) {
- dev_err(dev->class_dev,
- "gain must be the same for all channels\n");
- up(&devpriv->sem);
- return -EINVAL;
- }
- if (i >= NUMCHANNELS) {
- dev_err(dev->class_dev, "chanlist too long\n");
- break;
- }
+ gain = CR_RANGE(cmd->chanlist[0]);
+ for (i = 0; i < cmd->chanlist_len; ++i) {
+ chan = CR_CHAN(cmd->chanlist[i]);
+ if (chan != i) {
+ dev_err(dev->class_dev,
+ "channels are not consecutive\n");
+ up(&devpriv->sem);
+ return -EINVAL;
+ }
+ if ((gain != CR_RANGE(cmd->chanlist[i]))
+ && (cmd->chanlist_len > 3)) {
+ dev_err(dev->class_dev,
+ "gain must be the same for all channels\n");
+ up(&devpriv->sem);
+ return -EINVAL;
+ }
+ if (i >= NUMCHANNELS) {
+ dev_err(dev->class_dev, "chanlist too long\n");
+ break;
}
}
steps = 0;
- if (cmd->scan_begin_src == TRIG_TIMER) {
- dev_err(dev->class_dev,
- "scan_begin_src==TRIG_TIMER not valid\n");
- up(&devpriv->sem);
- return -EINVAL;
- }
if (cmd->convert_src == TRIG_TIMER)
steps = (cmd->convert_arg * 30) / 1000;
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 3beeb1254152..88c60b6020c4 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -48,6 +48,7 @@
#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
+#include <asm/unaligned.h>
#include "comedi_fc.h"
#include "../comedidev.h"
@@ -792,7 +793,8 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev,
}
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1)));
+ val = be32_to_cpu(get_unaligned((uint32_t
+ *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
@@ -1357,7 +1359,7 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan)
return ret;
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1)));
+ val = be32_to_cpu(get_unaligned((uint32_t *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index 7dc5a18e69d4..8777f958c041 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -41,7 +41,8 @@ struct comedi_device *comedi_open(const char *filename)
if (strncmp(filename, "/dev/comedi", 11) != 0)
return NULL;
- minor = simple_strtoul(filename + 11, NULL, 0);
+ if (kstrtouint(filename + 11, 0, &minor))
+ return NULL;
if (minor >= COMEDI_NUM_BOARD_MINORS)
return NULL;
diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c
index da6bc5855ebc..91dea25b5724 100644
--- a/drivers/staging/comedi/proc.c
+++ b/drivers/staging/comedi/proc.c
@@ -1,27 +1,26 @@
/*
- module/proc.c
- /proc interface for comedi
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * /proc interface for comedi
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- This is some serious bloatware.
-
- Taken from Dave A.'s PCL-711 driver, 'cuz I thought it
- was cool.
-*/
+ * This is some serious bloatware.
+ *
+ * Taken from Dave A.'s PCL-711 driver, 'cuz I thought it
+ * was cool.
+ */
#include "comedidev.h"
#include "comedi_internal.h"
@@ -34,11 +33,8 @@ static int comedi_read(struct seq_file *m, void *v)
int devices_q = 0;
struct comedi_driver *driv;
- seq_printf(m,
- "comedi version " COMEDI_RELEASE "\n"
- "format string: %s\n",
- "\"%2d: %-20s %-20s %4d\", i, "
- "driver_name, board_name, n_subdevices");
+ seq_printf(m, "comedi version " COMEDI_RELEASE "\nformat string: %s\n",
+ "\"%2d: %-20s %-20s %4d\", i, driver_name, board_name, n_subdevices");
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
struct comedi_device *dev = comedi_dev_get_from_minor(i);
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index 46b3da686384..b6849545b810 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -143,28 +143,23 @@ int comedi_check_chanlist(struct comedi_subdevice *s, int n,
unsigned int chanspec;
int chan, range_len, i;
- if (s->range_table || s->range_table_list) {
- for (i = 0; i < n; i++) {
- chanspec = chanlist[i];
- chan = CR_CHAN(chanspec);
- if (s->range_table)
- range_len = s->range_table->length;
- else if (s->range_table_list && chan < s->n_chan)
- range_len = s->range_table_list[chan]->length;
- else
- range_len = 0;
- if (chan >= s->n_chan ||
- CR_RANGE(chanspec) >= range_len ||
- aref_invalid(s, chanspec)) {
- dev_warn(dev->class_dev,
- "bad chanlist[%d]=0x%08x chan=%d range length=%d\n",
- i, chanspec, chan, range_len);
- return -EINVAL;
- }
+ for (i = 0; i < n; i++) {
+ chanspec = chanlist[i];
+ chan = CR_CHAN(chanspec);
+ if (s->range_table)
+ range_len = s->range_table->length;
+ else if (s->range_table_list && chan < s->n_chan)
+ range_len = s->range_table_list[chan]->length;
+ else
+ range_len = 0;
+ if (chan >= s->n_chan ||
+ CR_RANGE(chanspec) >= range_len ||
+ aref_invalid(s, chanspec)) {
+ dev_warn(dev->class_dev,
+ "bad chanlist[%d]=0x%08x chan=%d range length=%d\n",
+ i, chanspec, chan, range_len);
+ return -EINVAL;
}
- } else {
- dev_err(dev->class_dev, "(bug) no range type list!\n");
- return -EINVAL;
}
return 0;
}
diff --git a/drivers/staging/crystalhd/bcm_70012_regs.h b/drivers/staging/crystalhd/bcm_70012_regs.h
index f3ab3146cd90..da199ad8e27e 100644
--- a/drivers/staging/crystalhd/bcm_70012_regs.h
+++ b/drivers/staging/crystalhd/bcm_70012_regs.h
@@ -31,7 +31,8 @@
#define BRCM_SHIFT(c, r, f) c##_##r##_##f##_SHIFT
#define GET_FIELD(m, c, r, f) \
- ((((m) & BRCM_MASK(c, r, f)) >> BRCM_SHIFT(c, r, f)) << BRCM_ALIGN(c, r, f))
+ ((((m) & BRCM_MASK(c, r, f)) >> BRCM_SHIFT(c, r, f)) << \
+ BRCM_ALIGN(c, r, f))
#define SET_FIELD(m, c, r, f, d) \
((m) = (((m) & ~BRCM_MASK(c, r, f)) | ((((d) >> BRCM_ALIGN(c, r, f)) << \
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index 8d0680d93684..4765d528279b 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -433,10 +433,12 @@ static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
}
#define crystalhd_hw_delete_ioq(adp, q) \
+do { \
if (q) { \
crystalhd_delete_dioq(adp, q); \
q = NULL; \
- }
+ } \
+} while (0)
static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
{
@@ -1437,7 +1439,7 @@ static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw,
crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
}
- return (tmp_lsts != hw->rx_list_sts[0]);
+ return tmp_lsts != hw->rx_list_sts[0];
}
static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw,
@@ -1507,7 +1509,7 @@ static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw,
crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
}
- return (tmp_lsts != hw->rx_list_sts[1]);
+ return tmp_lsts != hw->rx_list_sts[1];
}
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index 99eefd0291c3..20be9571990a 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -111,7 +111,7 @@ static void chd_dec_free_iodata(struct crystalhd_adp *adp,
spin_unlock_irqrestore(&adp->lock, flags);
}
-static inline int crystalhd_user_data(unsigned long ud, void *dr,
+static inline int crystalhd_user_data(void __user *ud, void *dr,
int size, int set)
{
int rc;
@@ -122,9 +122,9 @@ static inline int crystalhd_user_data(unsigned long ud, void *dr,
}
if (set)
- rc = copy_to_user((void *)ud, dr, size);
+ rc = copy_to_user(ud, dr, size);
else
- rc = copy_from_user(dr, (void *)ud, size);
+ rc = copy_from_user(dr, ud, size);
if (rc) {
BCMLOG_ERR("Invalid args for command\n");
@@ -153,7 +153,8 @@ static int chd_dec_fetch_cdata(struct crystalhd_adp *adp,
io->add_cdata_sz = m_sz;
ua_off = ua + sizeof(io->udata);
- rc = crystalhd_user_data(ua_off, io->add_cdata, io->add_cdata_sz, 0);
+ rc = crystalhd_user_data((void __user *)ua_off, io->add_cdata,
+ io->add_cdata_sz, 0);
if (rc) {
BCMLOG_ERR("failed to pull add_cdata sz:%x ua_off:%x\n",
io->add_cdata_sz, (unsigned int)ua_off);
@@ -178,7 +179,7 @@ static int chd_dec_release_cdata(struct crystalhd_adp *adp,
if (io->cmd != BCM_IOC_FW_DOWNLOAD) {
ua_off = ua + sizeof(io->udata);
- rc = crystalhd_user_data(ua_off, io->add_cdata,
+ rc = crystalhd_user_data((void __user *)ua_off, io->add_cdata,
io->add_cdata_sz, 1);
if (rc) {
BCMLOG_ERR(
@@ -208,7 +209,8 @@ static int chd_dec_proc_user_data(struct crystalhd_adp *adp,
return -EINVAL;
}
- rc = crystalhd_user_data(ua, &io->udata, sizeof(io->udata), set);
+ rc = crystalhd_user_data((void __user *)ua, &io->udata,
+ sizeof(io->udata), set);
if (rc) {
BCMLOG_ERR("failed to %s iodata\n", (set ? "set" : "get"));
return rc;
@@ -546,9 +548,10 @@ static int chd_dec_pci_probe(struct pci_dev *pdev,
int rc;
enum BC_STATUS sts = BC_STS_SUCCESS;
- BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x s_vendor:0x%04x s_device: 0x%04x\n",
- pdev->vendor, pdev->device, pdev->subsystem_vendor,
- pdev->subsystem_device);
+ BCMLOG(BCMLOG_DBG,
+ "PCI_INFO: Vendor:0x%04x Device:0x%04x s_vendor:0x%04x s_device: 0x%04x\n",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
pinfo = kzalloc(sizeof(struct crystalhd_adp), GFP_KERNEL);
if (!pinfo) {
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h
index 816e1cd5db62..49e1ef3a19af 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.h
+++ b/drivers/staging/crystalhd/crystalhd_lnx.h
@@ -58,11 +58,11 @@ struct crystalhd_adp {
unsigned long pci_mem_start;
uint32_t pci_mem_len;
- void *addr;
+ void __iomem *addr;
unsigned long pci_i2o_start;
uint32_t pci_i2o_len;
- void *i2o_addr;
+ void __iomem *i2o_addr;
unsigned int drv_data;
unsigned int dmabits; /* 32 | 64 */
diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c
index c3d024406337..3aabf75b7d97 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.c
+++ b/drivers/staging/crystalhd/crystalhd_misc.c
@@ -740,7 +740,7 @@ enum BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
dio->fb_size = ubuff_sz & 0x03;
if (dio->fb_size) {
res = copy_from_user(dio->fb_va,
- (void *)(uaddr + count - dio->fb_size),
+ (void __user *)(uaddr + count - dio->fb_size),
dio->fb_size);
if (res) {
BCMLOG_ERR("failed %d to copy %u fill bytes from %p\n",
diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
index 77ab72a2a061..0f63827acfb4 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.h
+++ b/drivers/staging/crystalhd/crystalhd_misc.h
@@ -225,7 +225,7 @@ do { \
#define BCMLOG_ERR(fmt, args...) \
do { \
if (g_linklog_level & BCMLOG_ERROR) \
- printk(KERN_ERR "*ERR*:%s:%d: "fmt, \
+ pr_err("*ERR*:%s:%d: "fmt, \
__FILE__, __LINE__, ##args); \
} while (0)
diff --git a/drivers/staging/cxt1e1/Makefile b/drivers/staging/cxt1e1/Makefile
index 2f217e9daac1..b879e7b553c2 100644
--- a/drivers/staging/cxt1e1/Makefile
+++ b/drivers/staging/cxt1e1/Makefile
@@ -4,7 +4,6 @@ ccflags-y := -DSBE_PMCC4_ENABLE
ccflags-y += -DSBE_ISR_TASKLET
cxt1e1-y := \
- ossiRelease.o \
musycc.o \
pmcc4_drv.o \
comet.o \
diff --git a/drivers/staging/cxt1e1/comet.c b/drivers/staging/cxt1e1/comet.c
index c4c8c0f9c959..7005ad022339 100644
--- a/drivers/staging/cxt1e1/comet.c
+++ b/drivers/staging/cxt1e1/comet.c
@@ -22,18 +22,20 @@
#include "comet.h"
#include "comet_tables.h"
-extern int cxt1e1_log_level;
#define COMET_NUM_SAMPLES 24 /* Number of entries in the waveform table */
#define COMET_NUM_UNITS 5 /* Number of points per entry in table */
/* forward references */
static void SetPwrLevel(struct s_comet_reg *comet);
-static void WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table);
-static void WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
+static void WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet,
+ u_int32_t *table);
+static void WrtXmtWaveformTbl(ci_t *ci, struct s_comet_reg *comet,
+ u_int8_t table[COMET_NUM_SAMPLES]
+ [COMET_NUM_UNITS]);
-void *TWV_table[12] = {
+static void *TWV_table[12] = {
TWVLongHaul0DB, TWVLongHaul7_5DB, TWVLongHaul15DB, TWVLongHaul22_5DB,
TWVShortHaul0, TWVShortHaul1, TWVShortHaul2, TWVShortHaul3,
TWVShortHaul4, TWVShortHaul5,
@@ -50,6 +52,7 @@ lbo_tbl_lkup(int t1, int lbo) {
if (t1)
/* default T1 waveform table */
lbo = CFG_LBO_LH0;
+
else
/* default E1 waveform table */
lbo = CFG_LBO_E120;
@@ -58,8 +61,8 @@ lbo_tbl_lkup(int t1, int lbo) {
return lbo - 1;
}
-void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int clockmaster,
- u_int8_t moreParams)
+void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode,
+ int clockmaster, u_int8_t moreParams)
{
u_int8_t isT1mode;
/* T1 default */
@@ -146,7 +149,9 @@ void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int cl
/* t1RBOC enable(BOC:BitOriented Code) */
pci_write_32((u_int32_t *) &comet->t1_rboc_ena, 0x00);
if (isT1mode) {
- /* IBCD cfg: aka Inband Code Detection ** loopback code length set to */
+ /* IBCD cfg: aka Inband Code Detection ** loopback code length
+ * set to
+ */
/* 6 bit down, 5 bit up (assert) */
pci_write_32((u_int32_t *) &comet->ibcd_cfg, 0x04);
/* line loopback activate pattern */
@@ -286,7 +291,9 @@ void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int cl
/* 0x30: "BRIF cfg"; 0x20 is 'CMODE', 0x03 is (bit) rate */
/* note "rate bits can only be set once after reset" */
if (clockmaster) {
- /* CMODE == clockMode, 0=clock master (so all 3 others should be slave) */
+ /* CMODE == clockMode, 0=clock master
+ * (so all 3 others should be slave)
+ */
/* rate = 1.544 Mb/s */
if (isT1mode)
/* Comet 0 Master Mode(CMODE=0) */
@@ -398,7 +405,8 @@ void init_comet(void *ci, struct s_comet_reg *comet, u_int32_t port_mode, int cl
** Returns: Nothing
*/
static void
-WrtXmtWaveform(ci_t *ci, struct s_comet_reg *comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
+WrtXmtWaveform(ci_t *ci, struct s_comet_reg *comet, u_int32_t sample,
+ u_int32_t unit, u_int8_t data)
{
u_int8_t WaveformAddr;
@@ -447,7 +455,7 @@ static void
WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table)
{
u_int32_t ramaddr;
- volatile u_int32_t value;
+ u_int32_t value;
for (ramaddr = 0; ramaddr < 256; ramaddr++) {
/*** the following lines are per Errata 7, 2.5 ***/
@@ -515,7 +523,7 @@ WrtRcvEqualizerTbl(ci_t *ci, struct s_comet_reg *comet, u_int32_t *table)
static void
SetPwrLevel(struct s_comet_reg *comet)
{
- volatile u_int32_t temp;
+ u_int32_t temp;
/*
** Algorithm to Balance the Power Distribution of Ttip Tring
@@ -557,17 +565,21 @@ SetPwrLevel(struct s_comet_reg *comet)
static void
SetCometOps(struct s_comet_reg *comet)
{
- volatile u_int8_t rd_value;
+ u_int8_t rd_value;
if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2)) {
/* read the BRIF Configuration */
- rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_cfg);
+ rd_value = (u_int8_t) pci_read_32((u_int32_t *)
+ &comet->brif_cfg);
rd_value &= ~0x20;
- pci_write_32((u_int32_t *) &comet->brif_cfg, (u_int32_t) rd_value);
+ pci_write_32((u_int32_t *) &comet->brif_cfg,
+ (u_int32_t) rd_value);
/* read the BRIF Frame Pulse Configuration */
- rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_fpcfg);
+ rd_value = (u_int8_t) pci_read_32((u_int32_t *)
+ &comet->brif_fpcfg);
rd_value &= ~0x20;
- pci_write_32((u_int32_t *) &comet->brif_fpcfg, (u_int8_t) rd_value);
+ pci_write_32((u_int32_t *) &comet->brif_fpcfg,
+ (u_int8_t) rd_value);
} else {
/* read the BRIF Configuration */
rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_cfg);
diff --git a/drivers/staging/cxt1e1/comet_tables.c b/drivers/staging/cxt1e1/comet_tables.c
index 84931117da35..e96665ea3662 100644
--- a/drivers/staging/cxt1e1/comet_tables.c
+++ b/drivers/staging/cxt1e1/comet_tables.c
@@ -19,6 +19,7 @@
*/
#include <linux/types.h>
+#include "comet_tables.h"
/*****************************************************************************
*
diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c
index 95218e283966..ee9d39bbd251 100644
--- a/drivers/staging/cxt1e1/functions.c
+++ b/drivers/staging/cxt1e1/functions.c
@@ -25,7 +25,8 @@
#include "pmcc4.h"
#if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \
- defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
+defined(CONFIG_SBE_HDLC_V7_MODULE) || \
+defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
#define _v7_hdlc_ 1
#else
#define _v7_hdlc_ 0
@@ -33,9 +34,9 @@
#if _v7_hdlc_
#define V7(x) (x ## _v7)
-extern int hdlc_netif_rx_v7 (hdlc_device *, struct sk_buff *);
-extern int register_hdlc_device_v7 (hdlc_device *);
-extern int unregister_hdlc_device_v7 (hdlc_device *);
+extern int hdlc_netif_rx_v7(hdlc_device *, struct sk_buff *);
+extern int register_hdlc_device_v7(hdlc_device *);
+extern int unregister_hdlc_device_v7(hdlc_device *);
#else
#define V7(x) x
@@ -47,55 +48,54 @@ static int dummy = 0;
#endif
-extern int cxt1e1_log_level;
extern int drvr_state;
#if 1
u_int32_t
-pci_read_32 (u_int32_t *p)
+pci_read_32(u_int32_t *p)
{
#ifdef FLOW_DEBUG
- u_int32_t v;
+ u_int32_t v;
- FLUSH_PCI_READ ();
- v = le32_to_cpu (*p);
- if (cxt1e1_log_level >= LOG_DEBUG)
- pr_info("pci_read : %x = %x\n", (u_int32_t) p, v);
- return v;
+ FLUSH_PCI_READ();
+ v = le32_to_cpu(*p);
+ if (cxt1e1_log_level >= LOG_DEBUG)
+ pr_info("pci_read : %x = %x\n", (u_int32_t) p, v);
+ return v;
#else
- FLUSH_PCI_READ (); /* */
- return le32_to_cpu (*p);
+ FLUSH_PCI_READ(); /* */
+ return le32_to_cpu(*p);
#endif
}
void
-pci_write_32 (u_int32_t *p, u_int32_t v)
+pci_write_32(u_int32_t *p, u_int32_t v)
{
#ifdef FLOW_DEBUG
- if (cxt1e1_log_level >= LOG_DEBUG)
- pr_info("pci_write: %x = %x\n", (u_int32_t) p, v);
+ if (cxt1e1_log_level >= LOG_DEBUG)
+ pr_info("pci_write: %x = %x\n", (u_int32_t) p, v);
#endif
- *p = cpu_to_le32 (v);
- FLUSH_PCI_WRITE (); /* This routine is called from routines
- * which do multiple register writes
- * which themselves need flushing between
- * writes in order to guarantee write
- * ordering. It is less code-cumbersome
- * to flush here-in then to investigate
- * and code the many other register
- * writing routines. */
+ *p = cpu_to_le32 (v);
+ FLUSH_PCI_WRITE(); /* This routine is called from routines
+ * which do multiple register writes
+ * which themselves need flushing between
+ * writes in order to guarantee write
+ * ordering. It is less code-cumbersome
+ * to flush here-in then to investigate
+ * and code the many other register
+ * writing routines. */
}
#endif
void
-pci_flush_write (ci_t *ci)
+pci_flush_write(ci_t *ci)
{
- volatile u_int32_t v;
+ volatile u_int32_t v;
/* issue a PCI read to flush PCI write thru bridge */
- v = *(u_int32_t *) &ci->reg->glcd; /* any address would do */
+ v = *(u_int32_t *) &ci->reg->glcd; /* any address would do */
/*
* return nothing, this just reads PCI bridge interface to flush
@@ -105,55 +105,53 @@ pci_flush_write (ci_t *ci)
static void
-watchdog_func (unsigned long arg)
+watchdog_func(unsigned long arg)
{
- struct watchdog *wd = (void *) arg;
-
- if (drvr_state != SBE_DRVR_AVAILABLE)
- {
- if (cxt1e1_log_level >= LOG_MONITOR)
- pr_warning("%s: drvr not available (%x)\n", __func__, drvr_state);
- return;
- }
- schedule_work (&wd->work);
- mod_timer (&wd->h, jiffies + wd->ticks);
+ struct watchdog *wd = (void *) arg;
+
+ if (drvr_state != SBE_DRVR_AVAILABLE) {
+ if (cxt1e1_log_level >= LOG_MONITOR)
+ pr_warning("%s: drvr not available (%x)\n",
+ __func__, drvr_state);
+ return;
+ }
+ schedule_work(&wd->work);
+ mod_timer(&wd->h, jiffies + wd->ticks);
}
-int OS_init_watchdog(struct watchdog *wdp, void (*f) (void *), void *c, int usec)
+int OS_init_watchdog(struct watchdog *wdp, void (*f) (void *),
+ void *c, int usec)
{
- wdp->func = f;
- wdp->softc = c;
- wdp->ticks = (HZ) * (usec / 1000) / 1000;
- INIT_WORK(&wdp->work, (void *)f);
- init_timer (&wdp->h);
- {
- ci_t *ci = (ci_t *) c;
-
- wdp->h.data = (unsigned long) &ci->wd;
- }
- wdp->h.function = watchdog_func;
- return 0;
+ wdp->func = f;
+ wdp->softc = c;
+ wdp->ticks = (HZ) * (usec / 1000) / 1000;
+ INIT_WORK(&wdp->work, (void *)f);
+ init_timer(&wdp->h);
+ {
+ ci_t *ci = (ci_t *) c;
+
+ wdp->h.data = (unsigned long) &ci->wd;
+ }
+ wdp->h.function = watchdog_func;
+ return 0;
}
void
-OS_uwait (int usec, char *description)
+OS_uwait(int usec, char *description)
{
- int tmp;
-
- if (usec >= 1000)
- {
- mdelay (usec / 1000);
- /* now delay residual */
- tmp = (usec / 1000) * 1000; /* round */
- tmp = usec - tmp; /* residual */
- if (tmp)
- { /* wait on residual */
- udelay (tmp);
- }
- } else
- {
- udelay (usec);
- }
+ int tmp;
+
+ if (usec >= 1000) {
+ mdelay(usec / 1000);
+ /* now delay residual */
+ tmp = (usec / 1000) * 1000; /* round */
+ tmp = usec - tmp; /* residual */
+ if (tmp) { /* wait on residual */
+ udelay(tmp);
+ }
+ } else {
+ udelay(usec);
+ }
}
/* dummy short delay routine called as a subroutine so that compiler
@@ -161,96 +159,95 @@ OS_uwait (int usec, char *description)
*/
void
-OS_uwait_dummy (void)
+OS_uwait_dummy(void)
{
#ifndef USE_MAX_INT_DELAY
- dummy++;
+ dummy++;
#else
- udelay (1);
+ udelay(1);
#endif
}
void
-OS_sem_init (void *sem, int state)
+OS_sem_init(void *sem, int state)
{
- switch (state)
- {
- case SEM_TAKEN:
- sema_init((struct semaphore *) sem, 0);
- break;
- case SEM_AVAILABLE:
+ switch (state) {
+ case SEM_TAKEN:
+ sema_init((struct semaphore *) sem, 0);
+ break;
+ case SEM_AVAILABLE:
sema_init((struct semaphore *) sem, 1);
- break;
- default: /* otherwise, set sem.count to state's
- * value */
- sema_init (sem, state);
- break;
- }
+ break;
+ default: /* otherwise, set sem.count to state's
+ * value */
+ sema_init(sem, state);
+ break;
+ }
}
int
-sd_line_is_ok (void *user)
+sd_line_is_ok(void *user)
{
- struct net_device *ndev = (struct net_device *) user;
+ struct net_device *ndev = (struct net_device *) user;
- return netif_carrier_ok (ndev);
+ return netif_carrier_ok(ndev);
}
void
-sd_line_is_up (void *user)
+sd_line_is_up(void *user)
{
- struct net_device *ndev = (struct net_device *) user;
+ struct net_device *ndev = (struct net_device *) user;
- netif_carrier_on (ndev);
- return;
+ netif_carrier_on(ndev);
+ return;
}
void
-sd_line_is_down (void *user)
+sd_line_is_down(void *user)
{
- struct net_device *ndev = (struct net_device *) user;
+ struct net_device *ndev = (struct net_device *) user;
- netif_carrier_off (ndev);
- return;
+ netif_carrier_off(ndev);
+ return;
}
void
-sd_disable_xmit (void *user)
+sd_disable_xmit(void *user)
{
- struct net_device *dev = (struct net_device *) user;
+ struct net_device *dev = (struct net_device *) user;
- netif_stop_queue (dev);
- return;
+ netif_stop_queue(dev);
+ return;
}
void
-sd_enable_xmit (void *user)
+sd_enable_xmit(void *user)
{
- struct net_device *dev = (struct net_device *) user;
+ struct net_device *dev = (struct net_device *) user;
- netif_wake_queue (dev);
- return;
+ netif_wake_queue(dev);
+ return;
}
int
-sd_queue_stopped (void *user)
+sd_queue_stopped(void *user)
{
- struct net_device *ndev = (struct net_device *) user;
+ struct net_device *ndev = (struct net_device *) user;
- return netif_queue_stopped (ndev);
+ return netif_queue_stopped(ndev);
}
void sd_recv_consume(void *token, size_t len, void *user)
{
- struct net_device *ndev = user;
- struct sk_buff *skb = token;
+ struct net_device *ndev = user;
+ struct sk_buff *skb = token;
- skb->dev = ndev;
- skb_put (skb, len);
- skb->protocol = hdlc_type_trans(skb, ndev);
- netif_rx(skb);
+ skb->dev = ndev;
+ skb_put(skb, len);
+ skb->protocol = hdlc_type_trans(skb, ndev);
+ netif_rx(skb);
}
@@ -263,86 +260,76 @@ void sd_recv_consume(void *token, size_t len, void *user)
extern ci_t *CI; /* dummy pointer to board ZERO's data */
void
-VMETRO_TRACE (void *x)
+VMETRO_TRIGGER(ci_t *ci, int x)
{
- u_int32_t y = (u_int32_t) x;
-
- pci_write_32 ((u_int32_t *) &CI->cpldbase->leds, y);
-}
-
-
-void
-VMETRO_TRIGGER (ci_t *ci, int x)
-{
- struct s_comet_reg *comet;
- volatile u_int32_t data;
-
- comet = ci->port[0].cometbase; /* default to COMET # 0 */
-
- switch (x)
- {
- default:
- case 0:
- data = pci_read_32 ((u_int32_t *) &comet->__res24); /* 0x90 */
- break;
- case 1:
- data = pci_read_32 ((u_int32_t *) &comet->__res25); /* 0x94 */
- break;
- case 2:
- data = pci_read_32 ((u_int32_t *) &comet->__res26); /* 0x98 */
- break;
- case 3:
- data = pci_read_32 ((u_int32_t *) &comet->__res27); /* 0x9C */
- break;
- case 4:
- data = pci_read_32 ((u_int32_t *) &comet->__res88); /* 0x220 */
- break;
- case 5:
- data = pci_read_32 ((u_int32_t *) &comet->__res89); /* 0x224 */
- break;
- case 6:
- data = pci_read_32 ((u_int32_t *) &comet->__res8A); /* 0x228 */
- break;
- case 7:
- data = pci_read_32 ((u_int32_t *) &comet->__res8B); /* 0x22C */
- break;
- case 8:
- data = pci_read_32 ((u_int32_t *) &comet->__resA0); /* 0x280 */
- break;
- case 9:
- data = pci_read_32 ((u_int32_t *) &comet->__resA1); /* 0x284 */
- break;
- case 10:
- data = pci_read_32 ((u_int32_t *) &comet->__resA2); /* 0x288 */
- break;
- case 11:
- data = pci_read_32 ((u_int32_t *) &comet->__resA3); /* 0x28C */
- break;
- case 12:
- data = pci_read_32 ((u_int32_t *) &comet->__resA4); /* 0x290 */
- break;
- case 13:
- data = pci_read_32 ((u_int32_t *) &comet->__resA5); /* 0x294 */
- break;
- case 14:
- data = pci_read_32 ((u_int32_t *) &comet->__resA6); /* 0x298 */
- break;
- case 15:
- data = pci_read_32 ((u_int32_t *) &comet->__resA7); /* 0x29C */
- break;
- case 16:
- data = pci_read_32 ((u_int32_t *) &comet->__res74); /* 0x1D0 */
- break;
- case 17:
- data = pci_read_32 ((u_int32_t *) &comet->__res75); /* 0x1D4 */
- break;
- case 18:
- data = pci_read_32 ((u_int32_t *) &comet->__res76); /* 0x1D8 */
- break;
- case 19:
- data = pci_read_32 ((u_int32_t *) &comet->__res77); /* 0x1DC */
- break;
- }
+ struct s_comet_reg *comet;
+ volatile u_int32_t data;
+
+ comet = ci->port[0].cometbase; /* default to COMET # 0 */
+
+ switch (x) {
+ default:
+ case 0:
+ data = pci_read_32((u_int32_t *) &comet->__res24); /* 0x90 */
+ break;
+ case 1:
+ data = pci_read_32((u_int32_t *) &comet->__res25); /* 0x94 */
+ break;
+ case 2:
+ data = pci_read_32((u_int32_t *) &comet->__res26); /* 0x98 */
+ break;
+ case 3:
+ data = pci_read_32((u_int32_t *) &comet->__res27); /* 0x9C */
+ break;
+ case 4:
+ data = pci_read_32((u_int32_t *) &comet->__res88); /* 0x220 */
+ break;
+ case 5:
+ data = pci_read_32((u_int32_t *) &comet->__res89); /* 0x224 */
+ break;
+ case 6:
+ data = pci_read_32((u_int32_t *) &comet->__res8A); /* 0x228 */
+ break;
+ case 7:
+ data = pci_read_32((u_int32_t *) &comet->__res8B); /* 0x22C */
+ break;
+ case 8:
+ data = pci_read_32((u_int32_t *) &comet->__resA0); /* 0x280 */
+ break;
+ case 9:
+ data = pci_read_32((u_int32_t *) &comet->__resA1); /* 0x284 */
+ break;
+ case 10:
+ data = pci_read_32((u_int32_t *) &comet->__resA2); /* 0x288 */
+ break;
+ case 11:
+ data = pci_read_32((u_int32_t *) &comet->__resA3); /* 0x28C */
+ break;
+ case 12:
+ data = pci_read_32((u_int32_t *) &comet->__resA4); /* 0x290 */
+ break;
+ case 13:
+ data = pci_read_32((u_int32_t *) &comet->__resA5); /* 0x294 */
+ break;
+ case 14:
+ data = pci_read_32((u_int32_t *) &comet->__resA6); /* 0x298 */
+ break;
+ case 15:
+ data = pci_read_32((u_int32_t *) &comet->__resA7); /* 0x29C */
+ break;
+ case 16:
+ data = pci_read_32((u_int32_t *) &comet->__res74); /* 0x1D0 */
+ break;
+ case 17:
+ data = pci_read_32((u_int32_t *) &comet->__res75); /* 0x1D4 */
+ break;
+ case 18:
+ data = pci_read_32((u_int32_t *) &comet->__res76); /* 0x1D8 */
+ break;
+ case 19:
+ data = pci_read_32((u_int32_t *) &comet->__res77); /* 0x1DC */
+ break;
+ }
}
diff --git a/drivers/staging/cxt1e1/hwprobe.c b/drivers/staging/cxt1e1/hwprobe.c
index 02b4f8f1aca5..9b4198b1e634 100644
--- a/drivers/staging/cxt1e1/hwprobe.c
+++ b/drivers/staging/cxt1e1/hwprobe.c
@@ -31,360 +31,352 @@
#include "sbeproc.h"
#endif
-extern int cxt1e1_log_level;
extern int error_flag;
extern int drvr_state;
/* forward references */
-void c4_stopwd (ci_t *);
-struct net_device * __init c4_add_dev (hdw_info_t *, int, unsigned long, unsigned long, int, int);
+void c4_stopwd(ci_t *);
+struct net_device * __init c4_add_dev(hdw_info_t *, int, unsigned long,
+ unsigned long, int, int);
struct s_hdw_info hdw_info[MAX_BOARDS];
-void __init
-show_two (hdw_info_t *hi, int brdno)
+void __init
+show_two(hdw_info_t *hi, int brdno)
{
- ci_t *ci;
- struct pci_dev *pdev;
- char *bid;
- char *bp, banner[80];
- char sn[6];
-
- bp = banner;
- memset (banner, 0, 80); /* clear print buffer */
-
- ci = (ci_t *)(netdev_priv(hi->ndev));
- bid = sbeid_get_bdname (ci);
- switch (hi->promfmt)
- {
- case PROM_FORMAT_TYPE1:
- memcpy (sn, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
- break;
- case PROM_FORMAT_TYPE2:
- memcpy (sn, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
- break;
- default:
- memset (sn, 0, 6);
- break;
- }
-
- sprintf (banner, "%s: %s S/N %06X, MUSYCC Rev %02X",
- hi->devname, bid,
- ((sn[3] << 16) & 0xff0000) |
- ((sn[4] << 8) & 0x00ff00) |
- (sn[5] & 0x0000ff),
- (u_int8_t) hi->revid[0]);
-
- pr_info("%s\n", banner);
-
- pdev = hi->pdev[0];
- pr_info("%s: %s at v/p=%lx/%lx (%02x:%02x.%x) irq %d\n",
- hi->devname, "MUSYCC",
- (unsigned long) hi->addr_mapped[0], hi->addr[0],
- hi->pci_busno, (u_int8_t) PCI_SLOT (pdev->devfn),
- (u_int8_t) PCI_FUNC (pdev->devfn), pdev->irq);
-
- pdev = hi->pdev[1];
- pr_info("%s: %s at v/p=%lx/%lx (%02x:%02x.%x) irq %d\n",
- hi->devname, "EBUS ",
- (unsigned long) hi->addr_mapped[1], hi->addr[1],
- hi->pci_busno, (u_int8_t) PCI_SLOT (pdev->devfn),
- (u_int8_t) PCI_FUNC (pdev->devfn), pdev->irq);
+ ci_t *ci;
+ struct pci_dev *pdev;
+ char *bid;
+ char banner[80];
+ char sn[6] = {0,};
+
+ ci = (ci_t *)(netdev_priv(hi->ndev));
+ bid = sbeid_get_bdname(ci);
+ switch (hi->promfmt) {
+ case PROM_FORMAT_TYPE1:
+ memcpy(sn, hi->mfg_info.pft1.Serial, 6);
+ break;
+ case PROM_FORMAT_TYPE2:
+ memcpy(sn, hi->mfg_info.pft2.Serial, 6);
+ break;
+ }
+
+ sprintf(banner, "%s: %s S/N %06X, MUSYCC Rev %02X",
+ hi->devname, bid,
+ ((sn[3] << 16) & 0xff0000) |
+ ((sn[4] << 8) & 0x00ff00) |
+ (sn[5] & 0x0000ff),
+ (u_int8_t) hi->revid[0]);
+
+ pr_info("%s\n", banner);
+
+ pdev = hi->pdev[0];
+ pr_info("%s: %s at v/p=%lx/%lx (%02x:%02x.%x) irq %d\n",
+ hi->devname, "MUSYCC",
+ (unsigned long) hi->addr_mapped[0], hi->addr[0],
+ hi->pci_busno, (u_int8_t) PCI_SLOT(pdev->devfn),
+ (u_int8_t) PCI_FUNC(pdev->devfn), pdev->irq);
+
+ pdev = hi->pdev[1];
+ pr_info("%s: %s at v/p=%lx/%lx (%02x:%02x.%x) irq %d\n",
+ hi->devname, "EBUS ",
+ (unsigned long) hi->addr_mapped[1], hi->addr[1],
+ hi->pci_busno, (u_int8_t) PCI_SLOT(pdev->devfn),
+ (u_int8_t) PCI_FUNC(pdev->devfn), pdev->irq);
}
-void __init
-hdw_sn_get (hdw_info_t *hi, int brdno)
+void __init
+hdw_sn_get(hdw_info_t *hi, int brdno)
{
- /* obtain hardware EEPROM information */
- long addr;
+ /* obtain hardware EEPROM information */
+ long addr;
- addr = (long) hi->addr_mapped[1] + EEPROM_OFFSET;
+ addr = (long) hi->addr_mapped[1] + EEPROM_OFFSET;
- /* read EEPROM with largest known format size... */
- pmc_eeprom_read_buffer (addr, 0, (char *) hi->mfg_info.data, sizeof (FLD_TYPE2));
+ /* read EEPROM with largest known format size... */
+ pmc_eeprom_read_buffer(addr, 0, (char *)hi->mfg_info.data,
+ sizeof(FLD_TYPE2));
#if 0
- {
- unsigned char *ucp = (unsigned char *) &hi->mfg_info.data;
-
- pr_info("eeprom[00]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 0), *(ucp + 1), *(ucp + 2), *(ucp + 3), *(ucp + 4), *(ucp + 5), *(ucp + 6), *(ucp + 7));
- pr_info("eeprom[08]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 8), *(ucp + 9), *(ucp + 10), *(ucp + 11), *(ucp + 12), *(ucp + 13), *(ucp + 14), *(ucp + 15));
- pr_info("eeprom[16]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 16), *(ucp + 17), *(ucp + 18), *(ucp + 19), *(ucp + 20), *(ucp + 21), *(ucp + 22), *(ucp + 23));
- pr_info("eeprom[24]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 24), *(ucp + 25), *(ucp + 26), *(ucp + 27), *(ucp + 28), *(ucp + 29), *(ucp + 30), *(ucp + 31));
- pr_info("eeprom[32]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 32), *(ucp + 33), *(ucp + 34), *(ucp + 35), *(ucp + 36), *(ucp + 37), *(ucp + 38), *(ucp + 39));
- pr_info("eeprom[40]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- *(ucp + 40), *(ucp + 41), *(ucp + 42), *(ucp + 43), *(ucp + 44), *(ucp + 45), *(ucp + 46), *(ucp + 47));
- }
+ {
+ unsigned char *ucp = (unsigned char *) &hi->mfg_info.data;
+
+ pr_info("eeprom[00]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 0), *(ucp + 1), *(ucp + 2), *(ucp + 3),
+ *(ucp + 4), *(ucp + 5), *(ucp + 6), *(ucp + 7));
+ pr_info("eeprom[08]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 8), *(ucp + 9), *(ucp + 10), *(ucp + 11),
+ *(ucp + 12), *(ucp + 13), *(ucp + 14), *(ucp + 15));
+ pr_info("eeprom[16]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 16), *(ucp + 17), *(ucp + 18), *(ucp + 19),
+ *(ucp + 20), *(ucp + 21), *(ucp + 22), *(ucp + 23));
+ pr_info("eeprom[24]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 24), *(ucp + 25), *(ucp + 26), *(ucp + 27),
+ *(ucp + 28), *(ucp + 29), *(ucp + 30), *(ucp + 31));
+ pr_info("eeprom[32]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 32), *(ucp + 33), *(ucp + 34), *(ucp + 35),
+ *(ucp + 36), *(ucp + 37), *(ucp + 38), *(ucp + 39));
+ pr_info("eeprom[40]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 40), *(ucp + 41), *(ucp + 42), *(ucp + 43),
+ *(ucp + 44), *(ucp + 45), *(ucp + 46), *(ucp + 47));
+ }
#endif
#if 0
- pr_info("sn: %x %x %x %x %x %x\n",
- hi->mfg_info.Serial[0],
- hi->mfg_info.Serial[1],
- hi->mfg_info.Serial[2],
- hi->mfg_info.Serial[3],
- hi->mfg_info.Serial[4],
- hi->mfg_info.Serial[5]);
+ pr_info("sn: %x %x %x %x %x %x\n",
+ hi->mfg_info.Serial[0],
+ hi->mfg_info.Serial[1],
+ hi->mfg_info.Serial[2],
+ hi->mfg_info.Serial[3],
+ hi->mfg_info.Serial[4],
+ hi->mfg_info.Serial[5]);
#endif
- if ((hi->promfmt = pmc_verify_cksum (&hi->mfg_info.data)) == PROM_FORMAT_Unk)
- {
- /* bad crc, data is suspect */
- if (cxt1e1_log_level >= LOG_WARN)
- pr_info("%s: EEPROM cksum error\n", hi->devname);
- hi->mfg_info_sts = EEPROM_CRCERR;
- } else
- hi->mfg_info_sts = EEPROM_OK;
+ hi->promfmt = pmc_verify_cksum(&hi->mfg_info.data);
+ if (hi->promfmt == PROM_FORMAT_Unk) {
+ /* bad crc, data is suspect */
+ if (cxt1e1_log_level >= LOG_WARN)
+ pr_info("%s: EEPROM cksum error\n", hi->devname);
+ hi->mfg_info_sts = EEPROM_CRCERR;
+ } else
+ hi->mfg_info_sts = EEPROM_OK;
}
-void __init
-prep_hdw_info (void)
+ void __init
+prep_hdw_info(void)
{
- hdw_info_t *hi;
- int i;
-
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- hi->pci_busno = 0xff;
- hi->pci_slot = 0xff;
- hi->pci_pin[0] = 0;
- hi->pci_pin[1] = 0;
- hi->ndev = NULL;
- hi->addr[0] = 0L;
- hi->addr[1] = 0L;
- hi->addr_mapped[0] = 0L;
- hi->addr_mapped[1] = 0L;
- }
+ hdw_info_t *hi;
+ int i;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ hi->pci_busno = 0xff;
+ hi->pci_slot = 0xff;
+ hi->pci_pin[0] = 0;
+ hi->pci_pin[1] = 0;
+ hi->ndev = NULL;
+ hi->addr[0] = 0L;
+ hi->addr[1] = 0L;
+ hi->addr_mapped[0] = 0L;
+ hi->addr_mapped[1] = 0L;
+ }
}
void
-cleanup_ioremap (void)
+cleanup_ioremap(void)
{
- hdw_info_t *hi;
- int i;
-
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->pci_slot == 0xff)
- break;
- if (hi->addr_mapped[0])
- {
- iounmap ((void *) (hi->addr_mapped[0]));
- release_mem_region ((long) hi->addr[0], hi->len[0]);
- hi->addr_mapped[0] = 0;
- }
- if (hi->addr_mapped[1])
- {
- iounmap ((void *) (hi->addr_mapped[1]));
- release_mem_region ((long) hi->addr[1], hi->len[1]);
- hi->addr_mapped[1] = 0;
- }
- }
+ hdw_info_t *hi;
+ int i;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->pci_slot == 0xff)
+ break;
+ if (hi->addr_mapped[0]) {
+ iounmap((void *)(hi->addr_mapped[0]));
+ release_mem_region((long) hi->addr[0], hi->len[0]);
+ hi->addr_mapped[0] = 0;
+ }
+ if (hi->addr_mapped[1]) {
+ iounmap((void *)(hi->addr_mapped[1]));
+ release_mem_region((long) hi->addr[1], hi->len[1]);
+ hi->addr_mapped[1] = 0;
+ }
+ }
}
void
-cleanup_devs (void)
+cleanup_devs(void)
{
- hdw_info_t *hi;
- int i;
-
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->pci_slot == 0xff || !hi->ndev)
- break;
- c4_stopwd(netdev_priv(hi->ndev));
+ hdw_info_t *hi;
+ int i;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->pci_slot == 0xff || !hi->ndev)
+ break;
+ c4_stopwd(netdev_priv(hi->ndev));
#ifdef CONFIG_PROC_FS
- sbecom_proc_brd_cleanup(netdev_priv(hi->ndev));
+ sbecom_proc_brd_cleanup(netdev_priv(hi->ndev));
#endif
- unregister_netdev (hi->ndev);
- free_irq (hi->pdev[0]->irq, hi->ndev);
+ unregister_netdev(hi->ndev);
+ free_irq(hi->pdev[0]->irq, hi->ndev);
#ifdef CONFIG_SBE_PMCC4_NCOMM
- free_irq (hi->pdev[1]->irq, hi->ndev);
+ free_irq(hi->pdev[1]->irq, hi->ndev);
#endif
- OS_kfree (hi->ndev);
- }
+ OS_kfree(hi->ndev);
+ }
}
static int __init
-c4_hdw_init (struct pci_dev *pdev, int found)
+c4_hdw_init(struct pci_dev *pdev, int found)
{
- hdw_info_t *hi;
- int i;
- int fun, slot;
- unsigned char busno = 0xff;
-
- /* our MUSYCC chip supports two functions, 0 & 1 */
- if ((fun = PCI_FUNC (pdev->devfn)) > 1)
- {
- pr_warning("unexpected devfun: 0x%x\n", pdev->devfn);
- return 0;
- }
- if (pdev->bus) /* obtain bus number */
- busno = pdev->bus->number;
- else
- busno = 0; /* default for system PCI inconsistency */
- slot = pdev->devfn & ~0x07;
-
- /*
- * Functions 0 & 1 for a given board (identified by same bus(busno) and
- * slot(slot)) are placed into the same 'hardware' structure. The first
- * part of the board's functionality will be placed into an unpopulated
- * element, identified by "slot==(0xff)". The second part of a board's
- * functionality will match the previously loaded slot/busno.
- */
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- /*
- * match with board's first found interface, otherwise this is first
- * found
- */
- if ((hi->pci_slot == 0xff) || /* new board */
- ((hi->pci_slot == slot) && (hi->bus == pdev->bus)))
- break; /* found for-loop exit */
- }
- if (i == MAX_BOARDS) /* no match in above loop means MAX
- * exceeded */
- {
- pr_warning("exceeded number of allowed devices (>%d)?\n", MAX_BOARDS);
- return 0;
- }
- if (pdev->bus)
- hi->pci_busno = pdev->bus->number;
- else
- hi->pci_busno = 0; /* default for system PCI inconsistency */
- hi->pci_slot = slot;
- pci_read_config_byte (pdev, PCI_INTERRUPT_PIN, &hi->pci_pin[fun]);
- pci_read_config_byte (pdev, PCI_REVISION_ID, &hi->revid[fun]);
- hi->bus = pdev->bus;
- hi->addr[fun] = pci_resource_start (pdev, 0);
- hi->len[fun] = pci_resource_end (pdev, 0) - hi->addr[fun] + 1;
- hi->pdev[fun] = pdev;
-
- {
- /*
- * create device name from module name, plus add the appropriate
- * board number
- */
- char *cp = hi->devname;
-
- strcpy (cp, KBUILD_MODNAME);
- cp += strlen (cp); /* reposition */
- *cp++ = '-';
- *cp++ = '0' + (found / 2); /* there are two found interfaces per
- * board */
- *cp = 0; /* termination */
- }
-
- return 1;
+ hdw_info_t *hi;
+ int i;
+ int fun, slot;
+ unsigned char busno = 0xff;
+
+ /* our MUSYCC chip supports two functions, 0 & 1 */
+ fun = PCI_FUNC(pdev->devfn);
+ if (fun > 1) {
+ pr_warning("unexpected devfun: 0x%x\n", pdev->devfn);
+ return 0;
+ }
+
+ /* obtain bus number */
+ if (pdev->bus)
+ busno = pdev->bus->number;
+ else
+ busno = 0; /* default for system PCI inconsistency */
+ slot = pdev->devfn & ~0x07;
+
+ /*
+ * Functions 0 & 1 for a given board (identified by same bus(busno) and
+ * slot(slot)) are placed into the same 'hardware' structure. The first
+ * part of the board's functionality will be placed into an unpopulated
+ * element, identified by "slot==(0xff)". The second part of a board's
+ * functionality will match the previously loaded slot/busno.
+ */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ /*
+ * match with board's first found interface, otherwise this is
+ * fisrt found
+ */
+ if ((hi->pci_slot == 0xff) || /* new board */
+ ((hi->pci_slot == slot) && (hi->bus == pdev->bus)))
+ break; /* found for-loop exit */
+ }
+
+ /* no match in above loop means MAX exceeded */
+ if (i == MAX_BOARDS) {
+ pr_warning("exceeded number of allowed devices (>%d)?\n",
+ MAX_BOARDS);
+ return 0;
+ }
+
+ if (pdev->bus)
+ hi->pci_busno = pdev->bus->number;
+ else
+ hi->pci_busno = 0; /* default for system PCI inconsistency */
+
+ hi->pci_slot = slot;
+ pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &hi->pci_pin[fun]);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hi->revid[fun]);
+ hi->bus = pdev->bus;
+ hi->addr[fun] = pci_resource_start(pdev, 0);
+ hi->len[fun] = pci_resource_end(pdev, 0) - hi->addr[fun] + 1;
+ hi->pdev[fun] = pdev;
+
+ {
+ /*
+ * create device name from module name, plus add the appropriate
+ * board number
+ */
+ char *cp = hi->devname;
+
+ strcpy(cp, KBUILD_MODNAME);
+ cp += strlen(cp); /* reposition */
+ *cp++ = '-';
+ *cp++ = '0' + (found / 2); /* there are two found interfaces per
+ * board */
+ *cp = 0; /* termination */
+ }
+
+ return 1;
}
-
-status_t __init
-c4hw_attach_all (void)
+status_t __init
+c4hw_attach_all(void)
{
- hdw_info_t *hi;
- struct pci_dev *pdev = NULL;
- int found = 0, i, j;
-
- error_flag = 0;
- prep_hdw_info ();
- /*** scan PCI bus for all possible boards */
- while ((pdev = pci_get_device (PCI_VENDOR_ID_CONEXANT,
- PCI_DEVICE_ID_CN8474,
- pdev)))
- {
- if (c4_hdw_init (pdev, found))
- found++;
- }
- if (!found)
- {
- pr_warning("No boards found\n");
- return -ENODEV;
- }
- /* sanity check for consistent hardware found */
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->pci_slot != 0xff && (!hi->addr[0] || !hi->addr[1]))
- {
- pr_warning("%s: something very wrong with pci_get_device\n",
- hi->devname);
- return -EIO;
- }
- }
- /* bring board's memory regions on/line */
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->pci_slot == 0xff)
- break;
- for (j = 0; j < 2; j++)
- {
- if (!request_mem_region (hi->addr[j], hi->len[j], hi->devname))
- {
- pr_warning("%s: memory in use, addr=0x%lx, len=0x%lx ?\n",
- hi->devname, hi->addr[j], hi->len[j]);
- cleanup_ioremap ();
- return -ENOMEM;
- }
- hi->addr_mapped[j] = (unsigned long) ioremap (hi->addr[j], hi->len[j]);
- if (!hi->addr_mapped[j])
- {
- pr_warning("%s: ioremap fails, addr=0x%lx, len=0x%lx ?\n",
- hi->devname, hi->addr[j], hi->len[j]);
- cleanup_ioremap ();
- return -ENOMEM;
- }
+ hdw_info_t *hi;
+ struct pci_dev *pdev = NULL;
+ int found = 0, i, j;
+
+ error_flag = 0;
+ prep_hdw_info();
+ /*** scan PCI bus for all possible boards */
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_CONEXANT,
+ PCI_DEVICE_ID_CN8474,
+ pdev))) {
+ if (c4_hdw_init(pdev, found))
+ found++;
+ }
+
+ if (!found) {
+ pr_warning("No boards found\n");
+ return -ENODEV;
+ }
+
+ /* sanity check for consistent hardware found */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->pci_slot != 0xff && (!hi->addr[0] || !hi->addr[1])) {
+ pr_warning("%s: something very wrong with pci_get_device\n",
+ hi->devname);
+ return -EIO;
+ }
+ }
+ /* bring board's memory regions on/line */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->pci_slot == 0xff)
+ break;
+ for (j = 0; j < 2; j++) {
+ if (!request_mem_region(hi->addr[j], hi->len[j], hi->devname)) {
+ pr_warning("%s: memory in use, addr=0x%lx, len=0x%lx ?\n",
+ hi->devname, hi->addr[j], hi->len[j]);
+ cleanup_ioremap();
+ return -ENOMEM;
+ }
+
+ hi->addr_mapped[j] = (unsigned long)ioremap(hi->addr[j], hi->len[j]);
+ if (!hi->addr_mapped[j]) {
+ pr_warning("%s: ioremap fails, addr=0x%lx, len=0x%lx ?\n",
+ hi->devname, hi->addr[j], hi->len[j]);
+ cleanup_ioremap();
+ return -ENOMEM;
+ }
#ifdef SBE_MAP_DEBUG
- pr_warning("%s: io remapped from phys %x to virt %x\n",
- hi->devname, (u_int32_t) hi->addr[j], (u_int32_t) hi->addr_mapped[j]);
+ pr_warning("%s: io remapped from phys %x to virt %x\n",
+ hi->devname, (u_int32_t) hi->addr[j],
+ (u_int32_t) hi->addr_mapped[j]);
#endif
- }
- }
-
- drvr_state = SBE_DRVR_AVAILABLE;
-
- /* Have now memory mapped all boards. Now allow board's access to system */
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->pci_slot == 0xff)
- break;
- if (pci_enable_device (hi->pdev[0]) ||
- pci_enable_device (hi->pdev[1]))
- {
- drvr_state = SBE_DRVR_DOWN;
- pr_warning("%s: failed to enable card %d slot %d\n",
- hi->devname, i, hi->pci_slot);
- cleanup_devs ();
- cleanup_ioremap ();
- return -EIO;
- }
- pci_set_master (hi->pdev[0]);
- pci_set_master (hi->pdev[1]);
- if (!(hi->ndev = c4_add_dev (hi, i, (long) hi->addr_mapped[0],
- (long) hi->addr_mapped[1],
- hi->pdev[0]->irq,
- hi->pdev[1]->irq)))
- {
- drvr_state = SBE_DRVR_DOWN;
- cleanup_ioremap ();
- /* NOTE: c4_add_dev() does its own device cleanup */
+ }
+ }
+
+ drvr_state = SBE_DRVR_AVAILABLE;
+
+ /* Have now memory mapped all boards. Now allow board's access to system */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->pci_slot == 0xff)
+ break;
+ if (pci_enable_device(hi->pdev[0]) ||
+ pci_enable_device(hi->pdev[1])) {
+ drvr_state = SBE_DRVR_DOWN;
+ pr_warning("%s: failed to enable card %d slot %d\n",
+ hi->devname, i, hi->pci_slot);
+ cleanup_devs();
+ cleanup_ioremap();
+ return -EIO;
+ }
+ pci_set_master(hi->pdev[0]);
+ pci_set_master(hi->pdev[1]);
+ hi->ndev = c4_add_dev(hi, i, (long) hi->addr_mapped[0],
+ (long) hi->addr_mapped[1],
+ hi->pdev[0]->irq,
+ hi->pdev[1]->irq);
+ if (!hi->ndev) {
+ drvr_state = SBE_DRVR_DOWN;
+ cleanup_ioremap();
+ /* NOTE: c4_add_dev() does its own device cleanup */
#if 0
- cleanup_devs ();
+ cleanup_devs();
#endif
- return error_flag; /* error_flag set w/in add_dev() */
- }
- show_two (hi, i); /* displays found information */
- }
- return 0;
+ return error_flag; /* error_flag set w/in add_dev() */
+ }
+ show_two(hi, i); /* displays found information */
+ }
+ return 0;
}
/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/libsbew.h b/drivers/staging/cxt1e1/libsbew.h
index 4254c0426db9..bd2bfba604b3 100644
--- a/drivers/staging/cxt1e1/libsbew.h
+++ b/drivers/staging/cxt1e1/libsbew.h
@@ -514,36 +514,36 @@ struct sbecom_port_param
};
typedef struct wancfg wcfg_t;
- extern wcfg_t *wancfg_init (char *, char *);
- extern int wancfg_card_blink (wcfg_t *, int);
- extern int wancfg_ctl (wcfg_t *, int, void *, int, void *, int);
- extern int wancfg_del_card_stats (wcfg_t *);
- extern int wancfg_del_chan_stats (wcfg_t *, int);
- extern int wancfg_enable_ports (wcfg_t *, int);
- extern int wancfg_free (wcfg_t *);
- extern int wancfg_get_brdaddr (wcfg_t *, struct sbe_brd_addr *);
- extern int wancfg_get_brdinfo (wcfg_t *, struct sbe_brd_info *);
- extern int wancfg_get_card (wcfg_t *, struct sbecom_card_param *);
- extern int wancfg_get_card_chan_stats (wcfg_t *, struct sbecom_chan_stats *);
- extern int wancfg_get_card_sn (wcfg_t *);
- extern int wancfg_get_card_stats (wcfg_t *, struct temux_card_stats *);
- extern int wancfg_get_chan (wcfg_t *, int, struct sbecom_chan_param *);
- extern int wancfg_get_chan_stats (wcfg_t *, int, struct sbecom_chan_stats *);
- extern int wancfg_get_drvinfo (wcfg_t *, int, struct sbe_drv_info *);
- extern int wancfg_get_framer (wcfg_t *, int, struct sbecom_framer_param *);
- extern int wancfg_get_iid (wcfg_t *, int, struct sbe_iid_info *);
- extern int wancfg_get_sn (wcfg_t *, unsigned int *);
- extern int wancfg_read (wcfg_t *, int, struct sbecom_wrt_vec *);
- extern int wancfg_reset_device (wcfg_t *, int);
- extern int wancfg_set_card (wcfg_t *, struct sbecom_card_param *);
- extern int wancfg_set_chan (wcfg_t *, int, struct sbecom_chan_param *);
- extern int wancfg_set_framer (wcfg_t *, int, struct sbecom_framer_param *);
- extern int wancfg_set_loglevel (wcfg_t *, uint);
- extern int wancfg_write (wcfg_t *, int, struct sbecom_wrt_vec *);
+ extern wcfg_t *wancfg_init(char *, char *);
+ extern int wancfg_card_blink(wcfg_t *, int);
+ extern int wancfg_ctl(wcfg_t *, int, void *, int, void *, int);
+ extern int wancfg_del_card_stats(wcfg_t *);
+ extern int wancfg_del_chan_stats(wcfg_t *, int);
+ extern int wancfg_enable_ports(wcfg_t *, int);
+ extern int wancfg_free(wcfg_t *);
+ extern int wancfg_get_brdaddr(wcfg_t *, struct sbe_brd_addr *);
+ extern int wancfg_get_brdinfo(wcfg_t *, struct sbe_brd_info *);
+ extern int wancfg_get_card(wcfg_t *, struct sbecom_card_param *);
+ extern int wancfg_get_card_chan_stats(wcfg_t *, struct sbecom_chan_stats *);
+ extern int wancfg_get_card_sn(wcfg_t *);
+ extern int wancfg_get_card_stats(wcfg_t *, struct temux_card_stats *);
+ extern int wancfg_get_chan(wcfg_t *, int, struct sbecom_chan_param *);
+ extern int wancfg_get_chan_stats(wcfg_t *, int, struct sbecom_chan_stats *);
+ extern int wancfg_get_drvinfo(wcfg_t *, int, struct sbe_drv_info *);
+ extern int wancfg_get_framer(wcfg_t *, int, struct sbecom_framer_param *);
+ extern int wancfg_get_iid(wcfg_t *, int, struct sbe_iid_info *);
+ extern int wancfg_get_sn(wcfg_t *, unsigned int *);
+ extern int wancfg_read(wcfg_t *, int, struct sbecom_wrt_vec *);
+ extern int wancfg_reset_device(wcfg_t *, int);
+ extern int wancfg_set_card(wcfg_t *, struct sbecom_card_param *);
+ extern int wancfg_set_chan(wcfg_t *, int, struct sbecom_chan_param *);
+ extern int wancfg_set_framer(wcfg_t *, int, struct sbecom_framer_param *);
+ extern int wancfg_set_loglevel(wcfg_t *, uint);
+ extern int wancfg_write(wcfg_t *, int, struct sbecom_wrt_vec *);
#ifdef NOT_YET_COMMON
- extern int wancfg_get_tsioc (wcfg_t *, struct wanc1t3_ts_hdr *, struct wanc1t3_ts_param *);
- extern int wancfg_set_tsioc (wcfg_t *, struct wanc1t3_ts_param *);
+ extern int wancfg_get_tsioc(wcfg_t *, struct wanc1t3_ts_hdr *, struct wanc1t3_ts_param *);
+ extern int wancfg_set_tsioc(wcfg_t *, struct wanc1t3_ts_param *);
#endif
#endif /*** _INC_LIBSBEW_H_ ***/
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 9b483739881a..b02f5ade6661 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -31,7 +31,7 @@
#include "pmcc4_private.h"
#include "sbeproc.h"
-/*****************************************************************************************
+/*******************************************************************************
* Error out early if we have compiler trouble.
*
* (This section is included from the kernel's init/main.c as a friendly
@@ -50,43 +50,42 @@
#warning gcc-4.1.0 is known to miscompile the kernel. A different compiler version is recommended.
#endif
-/*****************************************************************************************/
+/*******************************************************************************/
#define CHANNAME "hdlc"
/*******************************************************************/
/* forward references */
-status_t c4_chan_work_init (mpi_t *, mch_t *);
-void musycc_wq_chan_restart (void *);
-status_t __init c4_init (ci_t *, u_char *, u_char *);
-status_t __init c4_init2 (ci_t *);
-ci_t *__init c4_new (void *);
-int __init c4hw_attach_all (void);
-void __init hdw_sn_get (hdw_info_t *, int);
+status_t c4_chan_work_init(mpi_t *, mch_t *);
+void musycc_wq_chan_restart(void *);
+status_t __init c4_init(ci_t *, u_char *, u_char *);
+status_t __init c4_init2(ci_t *);
+ci_t *__init c4_new(void *);
+int __init c4hw_attach_all(void);
+void __init hdw_sn_get(hdw_info_t *, int);
#ifdef CONFIG_SBE_PMCC4_NCOMM
-irqreturn_t c4_ebus_intr_th_handler (void *);
+irqreturn_t c4_ebus_intr_th_handler(void *);
#endif
-int c4_frame_rw (ci_t *, struct sbecom_port_param *);
-status_t c4_get_port (ci_t *, int);
-int c4_loop_port (ci_t *, int, u_int8_t);
-int c4_musycc_rw (ci_t *, struct c4_musycc_param *);
-int c4_new_chan (ci_t *, int, int, void *);
-status_t c4_set_port (ci_t *, int);
-int c4_pld_rw (ci_t *, struct sbecom_port_param *);
-void cleanup_devs (void);
-void cleanup_ioremap (void);
-status_t musycc_chan_down (ci_t *, int);
-irqreturn_t musycc_intr_th_handler (void *);
-int musycc_start_xmit (ci_t *, int, void *);
-
-extern char pmcc4_OSSI_release[];
+int c4_frame_rw(ci_t *, struct sbecom_port_param *);
+status_t c4_get_port(ci_t *, int);
+int c4_loop_port(ci_t *, int, u_int8_t);
+int c4_musycc_rw(ci_t *, struct c4_musycc_param *);
+int c4_new_chan(ci_t *, int, int, void *);
+status_t c4_set_port(ci_t *, int);
+int c4_pld_rw(ci_t *, struct sbecom_port_param *);
+void cleanup_devs(void);
+void cleanup_ioremap(void);
+status_t musycc_chan_down(ci_t *, int);
+irqreturn_t musycc_intr_th_handler(void *);
+int musycc_start_xmit(ci_t *, int, void *);
+
extern ci_t *CI;
extern struct s_hdw_info hdw_info[];
#if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \
- defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
+ defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
#define _v7_hdlc_ 1
#else
#define _v7_hdlc_ 0
@@ -94,9 +93,9 @@ extern struct s_hdw_info hdw_info[];
#if _v7_hdlc_
#define V7(x) (x ## _v7)
-extern int hdlc_netif_rx_v7 (hdlc_device *, struct sk_buff *);
-extern int register_hdlc_device_v7 (hdlc_device *);
-extern int unregister_hdlc_device_v7 (hdlc_device *);
+extern int hdlc_netif_rx_v7(hdlc_device *, struct sk_buff *);
+extern int register_hdlc_device_v7(hdlc_device *);
+extern int unregister_hdlc_device_v7(hdlc_device *);
#else
#define V7(x) x
@@ -104,11 +103,11 @@ extern int unregister_hdlc_device_v7 (hdlc_device *);
int error_flag; /* module load error reporting */
int cxt1e1_log_level = LOG_ERROR;
-int log_level_default = LOG_ERROR;
+static int log_level_default = LOG_ERROR;
module_param(cxt1e1_log_level, int, 0444);
int cxt1e1_max_mru = MUSYCC_MRU;
-int max_mru_default = MUSYCC_MRU;
+static int max_mru_default = MUSYCC_MRU;
module_param(cxt1e1_max_mru, int, 0444);
int cxt1e1_max_mtu = MUSYCC_MTU;
@@ -127,33 +126,23 @@ module_param(max_rxdesc_used, int, 0444);
/****************************************************************************/
/****************************************************************************/
-void *
-getuserbychan (int channum)
+void *
+getuserbychan(int channum)
{
- mch_t *ch;
+ mch_t *ch;
- ch = c4_find_chan (channum);
- return ch ? ch->user : NULL;
+ ch = c4_find_chan(channum);
+ return ch ? ch->user : NULL;
}
-char *
-get_hdlc_name (hdlc_device *hdlc)
+char *
+get_hdlc_name(hdlc_device *hdlc)
{
- struct c4_priv *priv = hdlc->priv;
- struct net_device *dev = getuserbychan (priv->channum);
-
- return dev->name;
-}
+ struct c4_priv *priv = hdlc->priv;
+ struct net_device *dev = getuserbychan(priv->channum);
-
-static status_t
-mkret (int bsd)
-{
- if (bsd > 0)
- return -bsd;
- else
- return bsd;
+ return dev->name;
}
/***************************************************************************/
@@ -179,957 +168,946 @@ mkret (int bsd)
* within a port's group.
*/
void
-c4_wk_chan_restart (mch_t *ch)
+c4_wk_chan_restart(mch_t *ch)
{
- mpi_t *pi = ch->up;
+ mpi_t *pi = ch->up;
#ifdef RLD_RESTART_DEBUG
- pr_info(">> %s: queueing Port %d Chan %d, mch_t @ %p\n",
- __func__, pi->portnum, ch->channum, ch);
+ pr_info(">> %s: queueing Port %d Chan %d, mch_t @ %p\n",
+ __func__, pi->portnum, ch->channum, ch);
#endif
- /* create new entry w/in workqueue for this channel and let'er rip */
+ /* create new entry w/in workqueue for this channel and let'er rip */
- /** queue_work (struct workqueue_struct *queue,
- ** struct work_struct *work);
- **/
- queue_work (pi->wq_port, &ch->ch_work);
+ /** queue_work(struct workqueue_struct *queue,
+ ** struct work_struct *work);
+ **/
+ queue_work(pi->wq_port, &ch->ch_work);
}
status_t
-c4_wk_chan_init (mpi_t *pi, mch_t *ch)
+c4_wk_chan_init(mpi_t *pi, mch_t *ch)
{
- /*
- * this will be used to restart a stopped channel
- */
-
- /** INIT_WORK (struct work_struct *work,
- ** void (*function)(void *),
- ** void *data);
- **/
- INIT_WORK(&ch->ch_work, (void *)musycc_wq_chan_restart);
- return 0; /* success */
+ /*
+ * this will be used to restart a stopped channel
+ */
+
+ /** INIT_WORK(struct work_struct *work,
+ ** void (*function)(void *),
+ ** void *data);
+ **/
+ INIT_WORK(&ch->ch_work, (void *)musycc_wq_chan_restart);
+ return 0; /* success */
}
status_t
-c4_wq_port_init (mpi_t *pi)
+c4_wq_port_init(mpi_t *pi)
{
- char name[16], *np; /* NOTE: name of the queue limited by system
- * to 10 characters */
-
- if (pi->wq_port)
- return 0; /* already initialized */
+ char name[16]; /* NOTE: name of the queue limited by system
+ * to 10 characters */
+ if (pi->wq_port)
+ return 0; /* already initialized */
- np = name;
- memset (name, 0, 16);
- sprintf (np, "%s%d", pi->up->devname, pi->portnum); /* IE pmcc4-01) */
+ /* IE pmcc4-01 */
+ snprintf(name, sizeof(name), "%s%d", pi->up->devname, pi->portnum);
#ifdef RLD_RESTART_DEBUG
- pr_info(">> %s: creating workqueue <%s> for Port %d.\n",
- __func__, name, pi->portnum); /* RLD DEBUG */
+ pr_info(">> %s: creating workqueue <%s> for Port %d.\n",
+ __func__, name, pi->portnum); /* RLD DEBUG */
#endif
- if (!(pi->wq_port = create_singlethread_workqueue (name)))
- return -ENOMEM;
- return 0; /* success */
+ pi->wq_port = create_singlethread_workqueue(name);
+ if (!pi->wq_port)
+ return -ENOMEM;
+ return 0; /* success */
}
void
-c4_wq_port_cleanup (mpi_t *pi)
+c4_wq_port_cleanup(mpi_t *pi)
{
- /*
- * PORT POINT: cannot call this if WQ is statically allocated w/in
- * structure since it calls kfree(wq);
- */
- if (pi->wq_port)
- {
- destroy_workqueue (pi->wq_port); /* this also calls
- * flush_workqueue() */
- pi->wq_port = NULL;
- }
+ /*
+ * PORT POINT: cannot call this if WQ is statically allocated w/in
+ * structure since it calls kfree(wq);
+ */
+ if (pi->wq_port) {
+ destroy_workqueue(pi->wq_port); /* this also calls
+ * flush_workqueue() */
+ pi->wq_port = NULL;
+ }
}
/***************************************************************************/
-irqreturn_t
-c4_linux_interrupt (int irq, void *dev_instance)
+static irqreturn_t
+c4_linux_interrupt(int irq, void *dev_instance)
{
- struct net_device *ndev = dev_instance;
+ struct net_device *ndev = dev_instance;
- return musycc_intr_th_handler(netdev_priv(ndev));
+ return musycc_intr_th_handler(netdev_priv(ndev));
}
#ifdef CONFIG_SBE_PMCC4_NCOMM
-irqreturn_t
-c4_ebus_interrupt (int irq, void *dev_instance)
+static irqreturn_t
+c4_ebus_interrupt(int irq, void *dev_instance)
{
- struct net_device *ndev = dev_instance;
+ struct net_device *ndev = dev_instance;
- return c4_ebus_intr_th_handler(netdev_priv(ndev));
+ return c4_ebus_intr_th_handler(netdev_priv(ndev));
}
#endif
static int
-void_open (struct net_device *ndev)
+void_open(struct net_device *ndev)
{
- pr_info("%s: trying to open master device !\n", ndev->name);
- return -1;
+ pr_info("%s: trying to open master device !\n", ndev->name);
+ return -1;
}
static int
-chan_open (struct net_device *ndev)
+chan_open(struct net_device *ndev)
{
- hdlc_device *hdlc = dev_to_hdlc (ndev);
- const struct c4_priv *priv = hdlc->priv;
- int ret;
-
- if ((ret = hdlc_open (ndev)))
- {
- pr_info("hdlc_open failure, err %d.\n", ret);
- return ret;
- }
- if ((ret = c4_chan_up (priv->ci, priv->channum)))
- return -ret;
- try_module_get (THIS_MODULE);
- netif_start_queue (ndev);
- return 0; /* no error = success */
+ hdlc_device *hdlc = dev_to_hdlc(ndev);
+ const struct c4_priv *priv = hdlc->priv;
+ int ret;
+
+ ret = hdlc_open(ndev);
+ if (ret) {
+ pr_info("hdlc_open failure, err %d.\n", ret);
+ return ret;
+ }
+
+ ret = c4_chan_up(priv->ci, priv->channum);
+ if (ret < 0)
+ return ret;
+ try_module_get(THIS_MODULE);
+ netif_start_queue(ndev);
+ return 0; /* no error = success */
}
static int
-chan_close (struct net_device *ndev)
+chan_close(struct net_device *ndev)
{
- hdlc_device *hdlc = dev_to_hdlc (ndev);
- const struct c4_priv *priv = hdlc->priv;
-
- netif_stop_queue (ndev);
- musycc_chan_down ((ci_t *) 0, priv->channum);
- hdlc_close (ndev);
- module_put (THIS_MODULE);
- return 0;
+ hdlc_device *hdlc = dev_to_hdlc(ndev);
+ const struct c4_priv *priv = hdlc->priv;
+
+ netif_stop_queue(ndev);
+ musycc_chan_down((ci_t *) 0, priv->channum);
+ hdlc_close(ndev);
+ module_put(THIS_MODULE);
+ return 0;
}
static int
-chan_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
+chan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- return hdlc_ioctl (dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifr, cmd);
}
static int
-chan_attach_noop (struct net_device *ndev, unsigned short foo_1, unsigned short foo_2)
+chan_attach_noop(struct net_device *ndev, unsigned short foo_1,
+ unsigned short foo_2)
{
- return 0; /* our driver has nothing to do here, show's
- * over, go home */
+ /* our driver has nothing to do here, show's
+ * over, go home
+ */
+ return 0;
}
static struct net_device_stats *
-chan_get_stats (struct net_device *ndev)
+chan_get_stats(struct net_device *ndev)
{
- mch_t *ch;
- struct net_device_stats *nstats;
- struct sbecom_chan_stats *stats;
- int channum;
-
- {
- struct c4_priv *priv;
-
- priv = (struct c4_priv *) dev_to_hdlc (ndev)->priv;
- channum = priv->channum;
- }
-
- ch = c4_find_chan (channum);
- if (ch == NULL)
- return NULL;
-
- nstats = &ndev->stats;
- stats = &ch->s;
-
- memset (nstats, 0, sizeof (struct net_device_stats));
- nstats->rx_packets = stats->rx_packets;
- nstats->tx_packets = stats->tx_packets;
- nstats->rx_bytes = stats->rx_bytes;
- nstats->tx_bytes = stats->tx_bytes;
- nstats->rx_errors = stats->rx_length_errors +
- stats->rx_over_errors +
- stats->rx_crc_errors +
- stats->rx_frame_errors +
- stats->rx_fifo_errors +
- stats->rx_missed_errors;
- nstats->tx_errors = stats->tx_dropped +
- stats->tx_aborted_errors +
- stats->tx_fifo_errors;
- nstats->rx_dropped = stats->rx_dropped;
- nstats->tx_dropped = stats->tx_dropped;
-
- nstats->rx_length_errors = stats->rx_length_errors;
- nstats->rx_over_errors = stats->rx_over_errors;
- nstats->rx_crc_errors = stats->rx_crc_errors;
- nstats->rx_frame_errors = stats->rx_frame_errors;
- nstats->rx_fifo_errors = stats->rx_fifo_errors;
- nstats->rx_missed_errors = stats->rx_missed_errors;
-
- nstats->tx_aborted_errors = stats->tx_aborted_errors;
- nstats->tx_fifo_errors = stats->tx_fifo_errors;
-
- return nstats;
+ mch_t *ch;
+ struct net_device_stats *nstats;
+ struct sbecom_chan_stats *stats;
+ int channum;
+
+ {
+ struct c4_priv *priv;
+
+ priv = (struct c4_priv *)dev_to_hdlc(ndev)->priv;
+ channum = priv->channum;
+ }
+
+ ch = c4_find_chan(channum);
+ if (ch == NULL)
+ return NULL;
+
+ nstats = &ndev->stats;
+ stats = &ch->s;
+
+ memset(nstats, 0, sizeof(struct net_device_stats));
+ nstats->rx_packets = stats->rx_packets;
+ nstats->tx_packets = stats->tx_packets;
+ nstats->rx_bytes = stats->rx_bytes;
+ nstats->tx_bytes = stats->tx_bytes;
+ nstats->rx_errors = stats->rx_length_errors +
+ stats->rx_over_errors +
+ stats->rx_crc_errors +
+ stats->rx_frame_errors +
+ stats->rx_fifo_errors +
+ stats->rx_missed_errors;
+ nstats->tx_errors = stats->tx_dropped +
+ stats->tx_aborted_errors +
+ stats->tx_fifo_errors;
+ nstats->rx_dropped = stats->rx_dropped;
+ nstats->tx_dropped = stats->tx_dropped;
+
+ nstats->rx_length_errors = stats->rx_length_errors;
+ nstats->rx_over_errors = stats->rx_over_errors;
+ nstats->rx_crc_errors = stats->rx_crc_errors;
+ nstats->rx_frame_errors = stats->rx_frame_errors;
+ nstats->rx_fifo_errors = stats->rx_fifo_errors;
+ nstats->rx_missed_errors = stats->rx_missed_errors;
+
+ nstats->tx_aborted_errors = stats->tx_aborted_errors;
+ nstats->tx_fifo_errors = stats->tx_fifo_errors;
+
+ return nstats;
}
static ci_t *
-get_ci_by_dev (struct net_device *ndev)
+get_ci_by_dev(struct net_device *ndev)
{
- return (ci_t *)(netdev_priv(ndev));
+ return (ci_t *)(netdev_priv(ndev));
}
static int
-c4_linux_xmit (struct sk_buff *skb, struct net_device *ndev)
+c4_linux_xmit(struct sk_buff *skb, struct net_device *ndev)
{
- const struct c4_priv *priv;
- int rval;
+ const struct c4_priv *priv;
+ int rval;
- hdlc_device *hdlc = dev_to_hdlc (ndev);
+ hdlc_device *hdlc = dev_to_hdlc(ndev);
- priv = hdlc->priv;
+ priv = hdlc->priv;
- rval = musycc_start_xmit (priv->ci, priv->channum, skb);
- return rval;
+ rval = musycc_start_xmit(priv->ci, priv->channum, skb);
+ return rval;
}
static const struct net_device_ops chan_ops = {
- .ndo_open = chan_open,
- .ndo_stop = chan_close,
- .ndo_start_xmit = c4_linux_xmit,
- .ndo_do_ioctl = chan_dev_ioctl,
- .ndo_get_stats = chan_get_stats,
+ .ndo_open = chan_open,
+ .ndo_stop = chan_close,
+ .ndo_start_xmit = c4_linux_xmit,
+ .ndo_do_ioctl = chan_dev_ioctl,
+ .ndo_get_stats = chan_get_stats,
};
static struct net_device *
-create_chan (struct net_device *ndev, ci_t *ci,
- struct sbecom_chan_param *cp)
+create_chan(struct net_device *ndev, ci_t *ci,
+ struct sbecom_chan_param *cp)
{
- hdlc_device *hdlc;
- struct net_device *dev;
- hdw_info_t *hi;
- int ret;
-
- if (c4_find_chan (cp->channum))
- return NULL; /* channel already exists */
-
- {
- struct c4_priv *priv;
-
- /* allocate then fill in private data structure */
- priv = OS_kmalloc (sizeof (struct c4_priv));
- if (!priv)
- {
- pr_warning("%s: no memory for net_device !\n", ci->devname);
- return NULL;
- }
- dev = alloc_hdlcdev (priv);
- if (!dev)
- {
- pr_warning("%s: no memory for hdlc_device !\n", ci->devname);
- OS_kfree (priv);
- return NULL;
- }
- priv->ci = ci;
- priv->channum = cp->channum;
- }
-
- hdlc = dev_to_hdlc (dev);
-
- dev->base_addr = 0; /* not I/O mapped */
- dev->irq = ndev->irq;
- dev->type = ARPHRD_RAWHDLC;
- *dev->name = 0; /* default ifconfig name = "hdlc" */
-
- hi = (hdw_info_t *) ci->hdw_info;
- if (hi->mfg_info_sts == EEPROM_OK)
- {
- switch (hi->promfmt)
- {
- case PROM_FORMAT_TYPE1:
- memcpy (dev->dev_addr, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
- break;
- case PROM_FORMAT_TYPE2:
- memcpy (dev->dev_addr, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
- break;
- default:
- memset (dev->dev_addr, 0, 6);
- break;
- }
- } else
- {
- memset (dev->dev_addr, 0, 6);
- }
-
- hdlc->xmit = c4_linux_xmit;
-
- dev->netdev_ops = &chan_ops;
- /*
- * The native hdlc stack calls this 'attach' routine during
- * hdlc_raw_ioctl(), passing parameters for line encoding and parity.
- * Since hdlc_raw_ioctl() stack does not interrogate whether an 'attach'
- * routine is actually registered or not, we supply a dummy routine which
- * does nothing (since encoding and parity are setup for our driver via a
- * special configuration application).
- */
-
- hdlc->attach = chan_attach_noop;
-
- rtnl_unlock (); /* needed due to Ioctl calling sequence */
- ret = register_hdlc_device (dev);
- /* NOTE: <stats> setting must occur AFTER registration in order to "take" */
- dev->tx_queue_len = MAX_DEFAULT_IFQLEN;
-
- rtnl_lock (); /* needed due to Ioctl calling sequence */
- if (ret)
- {
- if (cxt1e1_log_level >= LOG_WARN)
- pr_info("%s: create_chan[%d] registration error = %d.\n",
- ci->devname, cp->channum, ret);
- free_netdev (dev); /* cleanup */
- return NULL; /* failed to register */
- }
- return dev;
+ hdlc_device *hdlc;
+ struct net_device *dev;
+ hdw_info_t *hi;
+ int ret;
+
+ if (c4_find_chan(cp->channum))
+ return NULL; /* channel already exists */
+
+ {
+ struct c4_priv *priv;
+
+ /* allocate then fill in private data structure */
+ priv = OS_kmalloc(sizeof(struct c4_priv));
+ if (!priv) {
+ pr_warning("%s: no memory for net_device !\n",
+ ci->devname);
+ return NULL;
+ }
+ dev = alloc_hdlcdev(priv);
+ if (!dev) {
+ pr_warning("%s: no memory for hdlc_device !\n",
+ ci->devname);
+ OS_kfree(priv);
+ return NULL;
+ }
+ priv->ci = ci;
+ priv->channum = cp->channum;
+ }
+
+ hdlc = dev_to_hdlc(dev);
+
+ dev->base_addr = 0; /* not I/O mapped */
+ dev->irq = ndev->irq;
+ dev->type = ARPHRD_RAWHDLC;
+ *dev->name = 0; /* default ifconfig name = "hdlc" */
+
+ hi = (hdw_info_t *)ci->hdw_info;
+ if (hi->mfg_info_sts == EEPROM_OK) {
+ switch (hi->promfmt) {
+ case PROM_FORMAT_TYPE1:
+ memcpy(dev->dev_addr,
+ (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
+ break;
+ case PROM_FORMAT_TYPE2:
+ memcpy(dev->dev_addr,
+ (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
+ break;
+ default:
+ memset(dev->dev_addr, 0, 6);
+ break;
+ }
+ } else
+ memset(dev->dev_addr, 0, 6);
+
+ hdlc->xmit = c4_linux_xmit;
+
+ dev->netdev_ops = &chan_ops;
+ /*
+ * The native hdlc stack calls this 'attach' routine during
+ * hdlc_raw_ioctl(), passing parameters for line encoding and parity.
+ * Since hdlc_raw_ioctl() stack does not interrogate whether an 'attach'
+ * routine is actually registered or not, we supply a dummy routine which
+ * does nothing (since encoding and parity are setup for our driver via a
+ * special configuration application).
+ */
+
+ hdlc->attach = chan_attach_noop;
+
+ /* needed due to Ioctl calling sequence */
+ rtnl_unlock();
+ ret = register_hdlc_device(dev);
+ /* NOTE: <stats> setting must occur AFTER registration in order to "take" */
+ dev->tx_queue_len = MAX_DEFAULT_IFQLEN;
+
+ /* needed due to Ioctl calling sequence */
+ rtnl_lock();
+ if (ret) {
+ if (cxt1e1_log_level >= LOG_WARN)
+ pr_info("%s: create_chan[%d] registration error = %d.\n",
+ ci->devname, cp->channum, ret);
+ /* cleanup */
+ free_netdev(dev);
+ /* failed to register */
+ return NULL;
+ }
+ return dev;
}
/* the idea here is to get port information and pass it back (using pointer) */
-static status_t
-do_get_port (struct net_device *ndev, void *data)
+static status_t
+do_get_port(struct net_device *ndev, void *data)
{
- int ret;
- ci_t *ci; /* ci stands for card information */
- struct sbecom_port_param pp;/* copy data to kernel land */
-
- if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- if (pp.portnum >= MUSYCC_NPORTS)
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL; /* get card info */
-
- ret = mkret (c4_get_port (ci, pp.portnum));
- if (ret)
- return ret;
- if (copy_to_user (data, &ci->port[pp.portnum].p,
- sizeof (struct sbecom_port_param)))
- return -EFAULT;
- return 0;
+ int ret;
+ ci_t *ci; /* ci stands for card information */
+ struct sbecom_port_param pp;/* copy data to kernel land */
+
+ if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ if (pp.portnum >= MUSYCC_NPORTS)
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL; /* get card info */
+
+ ret = c4_get_port(ci, pp.portnum);
+ if (ret < 0)
+ return ret;
+ if (copy_to_user(data, &ci->port[pp.portnum].p,
+ sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ return 0;
}
/* this function copys the user data and then calls the real action function */
-static status_t
-do_set_port (struct net_device *ndev, void *data)
+static status_t
+do_set_port(struct net_device *ndev, void *data)
{
- ci_t *ci; /* ci stands for card information */
- struct sbecom_port_param pp;/* copy data to kernel land */
-
- if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- if (pp.portnum >= MUSYCC_NPORTS)
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL; /* get card info */
-
- if (pp.portnum >= ci->max_port) /* sanity check */
- return -ENXIO;
-
- memcpy (&ci->port[pp.portnum].p, &pp, sizeof (struct sbecom_port_param));
- return mkret (c4_set_port (ci, pp.portnum));
+ ci_t *ci; /* ci stands for card information */
+ struct sbecom_port_param pp;/* copy data to kernel land */
+
+ if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ if (pp.portnum >= MUSYCC_NPORTS)
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL; /* get card info */
+
+ if (pp.portnum >= ci->max_port) /* sanity check */
+ return -ENXIO;
+
+ memcpy(&ci->port[pp.portnum].p, &pp, sizeof(struct sbecom_port_param));
+ return c4_set_port(ci, pp.portnum);
}
/* work the port loopback mode as per directed */
-static status_t
-do_port_loop (struct net_device *ndev, void *data)
+static status_t
+do_port_loop(struct net_device *ndev, void *data)
{
- struct sbecom_port_param pp;
- ci_t *ci;
-
- if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- return mkret (c4_loop_port (ci, pp.portnum, pp.port_mode));
+ struct sbecom_port_param pp;
+ ci_t *ci;
+
+ if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ return c4_loop_port(ci, pp.portnum, pp.port_mode);
}
/* set the specified register with the given value / or just read it */
-static status_t
-do_framer_rw (struct net_device *ndev, void *data)
+static status_t
+do_framer_rw(struct net_device *ndev, void *data)
{
- struct sbecom_port_param pp;
- ci_t *ci;
- int ret;
-
- if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- ret = mkret (c4_frame_rw (ci, &pp));
- if (ret)
- return ret;
- if (copy_to_user (data, &pp, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- return 0;
+ struct sbecom_port_param pp;
+ ci_t *ci;
+ int ret;
+
+ if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ ret = c4_frame_rw(ci, &pp);
+ if (ret < 0)
+ return ret;
+ if (copy_to_user(data, &pp, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ return 0;
}
/* set the specified register with the given value / or just read it */
-static status_t
-do_pld_rw (struct net_device *ndev, void *data)
+static status_t
+do_pld_rw(struct net_device *ndev, void *data)
{
- struct sbecom_port_param pp;
- ci_t *ci;
- int ret;
-
- if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- ret = mkret (c4_pld_rw (ci, &pp));
- if (ret)
- return ret;
- if (copy_to_user (data, &pp, sizeof (struct sbecom_port_param)))
- return -EFAULT;
- return 0;
+ struct sbecom_port_param pp;
+ ci_t *ci;
+ int ret;
+
+ if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+
+ ret = c4_pld_rw(ci, &pp);
+ if (ret)
+ return ret;
+ if (copy_to_user(data, &pp, sizeof(struct sbecom_port_param)))
+ return -EFAULT;
+ return 0;
}
/* set the specified register with the given value / or just read it */
-static status_t
-do_musycc_rw (struct net_device *ndev, void *data)
+static status_t
+do_musycc_rw(struct net_device *ndev, void *data)
{
- struct c4_musycc_param mp;
- ci_t *ci;
- int ret;
-
- if (copy_from_user (&mp, data, sizeof (struct c4_musycc_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- ret = mkret (c4_musycc_rw (ci, &mp));
- if (ret)
- return ret;
- if (copy_to_user (data, &mp, sizeof (struct c4_musycc_param)))
- return -EFAULT;
- return 0;
+ struct c4_musycc_param mp;
+ ci_t *ci;
+ int ret;
+
+ if (copy_from_user(&mp, data, sizeof(struct c4_musycc_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ ret = c4_musycc_rw(ci, &mp);
+ if (ret < 0)
+ return ret;
+ if (copy_to_user(data, &mp, sizeof(struct c4_musycc_param)))
+ return -EFAULT;
+ return 0;
}
-static status_t
-do_get_chan (struct net_device *ndev, void *data)
+static status_t
+do_get_chan(struct net_device *ndev, void *data)
{
- struct sbecom_chan_param cp;
- int ret;
+ struct sbecom_chan_param cp;
+ int ret;
- if (copy_from_user (&cp, data,
- sizeof (struct sbecom_chan_param)))
- return -EFAULT;
+ if (copy_from_user(&cp, data,
+ sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
- if ((ret = mkret (c4_get_chan (cp.channum, &cp))))
- return ret;
+ ret = c4_get_chan(cp.channum, &cp);
+ if (ret < 0)
+ return ret;
- if (copy_to_user (data, &cp, sizeof (struct sbecom_chan_param)))
- return -EFAULT;
- return 0;
+ if (copy_to_user(data, &cp, sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
+ return 0;
}
-static status_t
-do_set_chan (struct net_device *ndev, void *data)
+static status_t
+do_set_chan(struct net_device *ndev, void *data)
{
- struct sbecom_chan_param cp;
- int ret;
- ci_t *ci;
-
- if (copy_from_user (&cp, data, sizeof (struct sbecom_chan_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- switch (ret = mkret (c4_set_chan (cp.channum, &cp)))
- {
- case 0:
- return 0;
- default:
- return ret;
- }
+ struct sbecom_chan_param cp;
+ ci_t *ci;
+
+ if (copy_from_user(&cp, data, sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ return c4_set_chan(cp.channum, &cp);
}
-static status_t
-do_create_chan (struct net_device *ndev, void *data)
+static status_t
+do_create_chan(struct net_device *ndev, void *data)
{
- ci_t *ci;
- struct net_device *dev;
- struct sbecom_chan_param cp;
- int ret;
-
- if (copy_from_user (&cp, data, sizeof (struct sbecom_chan_param)))
- return -EFAULT;
- ci = get_ci_by_dev (ndev);
- if (!ci)
- return -EINVAL;
- dev = create_chan (ndev, ci, &cp);
- if (!dev)
- return -EBUSY;
- ret = mkret (c4_new_chan (ci, cp.port, cp.channum, dev));
- if (ret)
- {
- rtnl_unlock (); /* needed due to Ioctl calling sequence */
- unregister_hdlc_device (dev);
- rtnl_lock (); /* needed due to Ioctl calling sequence */
- free_netdev (dev);
- }
- return ret;
+ ci_t *ci;
+ struct net_device *dev;
+ struct sbecom_chan_param cp;
+ int ret;
+
+ if (copy_from_user(&cp, data, sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ dev = create_chan(ndev, ci, &cp);
+ if (!dev)
+ return -EBUSY;
+ ret = c4_new_chan(ci, cp.port, cp.channum, dev);
+ if (ret < 0) {
+ /* needed due to Ioctl calling sequence */
+ rtnl_unlock();
+ unregister_hdlc_device(dev);
+ /* needed due to Ioctl calling sequence */
+ rtnl_lock();
+ free_netdev(dev);
+ }
+ return ret;
}
-static status_t
-do_get_chan_stats (struct net_device *ndev, void *data)
+static status_t
+do_get_chan_stats(struct net_device *ndev, void *data)
{
- struct c4_chan_stats_wrap ccs;
- int ret;
-
- if (copy_from_user (&ccs, data,
- sizeof (struct c4_chan_stats_wrap)))
- return -EFAULT;
- switch (ret = mkret (c4_get_chan_stats (ccs.channum, &ccs.stats)))
- {
- case 0:
- break;
- default:
- return ret;
- }
- if (copy_to_user (data, &ccs,
- sizeof (struct c4_chan_stats_wrap)))
- return -EFAULT;
- return 0;
+ struct c4_chan_stats_wrap ccs;
+ int ret;
+
+ if (copy_from_user(&ccs, data,
+ sizeof(struct c4_chan_stats_wrap)))
+ return -EFAULT;
+
+ ret = c4_get_chan_stats(ccs.channum, &ccs.stats);
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user(data, &ccs,
+ sizeof(struct c4_chan_stats_wrap)))
+ return -EFAULT;
+ return 0;
}
-static status_t
-do_set_loglevel (struct net_device *ndev, void *data)
+static status_t
+do_set_loglevel(struct net_device *ndev, void *data)
{
- unsigned int cxt1e1_log_level;
+ unsigned int cxt1e1_log_level;
- if (copy_from_user (&cxt1e1_log_level, data, sizeof (int)))
- return -EFAULT;
- sbecom_set_loglevel (cxt1e1_log_level);
- return 0;
+ if (copy_from_user(&cxt1e1_log_level, data, sizeof(int)))
+ return -EFAULT;
+ sbecom_set_loglevel(cxt1e1_log_level);
+ return 0;
}
-static status_t
-do_deluser (struct net_device *ndev, int lockit)
+static status_t
+do_deluser(struct net_device *ndev, int lockit)
{
- if (ndev->flags & IFF_UP)
- return -EBUSY;
-
- {
- ci_t *ci;
- mch_t *ch;
- const struct c4_priv *priv;
- int channum;
-
- priv = (struct c4_priv *) dev_to_hdlc (ndev)->priv;
- ci = priv->ci;
- channum = priv->channum;
-
- ch = c4_find_chan (channum);
- if (ch == NULL)
- return -ENOENT;
- ch->user = NULL; /* will be freed, below */
- }
-
- if (lockit)
- rtnl_unlock (); /* needed if Ioctl calling sequence */
- unregister_hdlc_device (ndev);
- if (lockit)
- rtnl_lock (); /* needed if Ioctl calling sequence */
- free_netdev (ndev);
- return 0;
+ if (ndev->flags & IFF_UP)
+ return -EBUSY;
+
+ {
+ ci_t *ci;
+ mch_t *ch;
+ const struct c4_priv *priv;
+ int channum;
+
+ priv = (struct c4_priv *)dev_to_hdlc(ndev)->priv;
+ ci = priv->ci;
+ channum = priv->channum;
+
+ ch = c4_find_chan(channum);
+ if (ch == NULL)
+ return -ENOENT;
+ ch->user = NULL; /* will be freed, below */
+ }
+
+ /* needed if Ioctl calling sequence */
+ if (lockit)
+ rtnl_unlock();
+ unregister_hdlc_device(ndev);
+ /* needed if Ioctl calling sequence */
+ if (lockit)
+ rtnl_lock();
+ free_netdev(ndev);
+ return 0;
}
int
-do_del_chan (struct net_device *musycc_dev, void *data)
+do_del_chan(struct net_device *musycc_dev, void *data)
{
- struct sbecom_chan_param cp;
- char buf[sizeof (CHANNAME) + 3];
- struct net_device *dev;
- int ret;
-
- if (copy_from_user (&cp, data,
- sizeof (struct sbecom_chan_param)))
- return -EFAULT;
- if (cp.channum > 999)
- return -EINVAL;
- snprintf (buf, sizeof(buf), CHANNAME "%d", cp.channum);
- if (!(dev = dev_get_by_name (&init_net, buf)))
- return -ENOENT;
- dev_put (dev);
- ret = do_deluser (dev, 1);
- if (ret)
- return ret;
- return c4_del_chan (cp.channum);
+ struct sbecom_chan_param cp;
+ char buf[sizeof(CHANNAME) + 3];
+ struct net_device *dev;
+ int ret;
+
+ if (copy_from_user(&cp, data,
+ sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
+ if (cp.channum > 999)
+ return -EINVAL;
+ snprintf(buf, sizeof(buf), CHANNAME "%d", cp.channum);
+ dev = __dev_get_by_name(&init_net, buf);
+ if (!dev)
+ return -ENODEV;
+ ret = do_deluser(dev, 1);
+ if (ret)
+ return ret;
+ return c4_del_chan(cp.channum);
}
-int c4_reset_board (void *);
+int c4_reset_board(void *);
int
-do_reset (struct net_device *musycc_dev, void *data)
+do_reset(struct net_device *musycc_dev, void *data)
{
- const struct c4_priv *priv;
- int i;
-
- for (i = 0; i < 128; i++)
- {
- struct net_device *ndev;
- char buf[sizeof (CHANNAME) + 3];
-
- sprintf (buf, CHANNAME "%d", i);
- if (!(ndev = dev_get_by_name(&init_net, buf)))
- continue;
- priv = dev_to_hdlc (ndev)->priv;
-
- if ((unsigned long) (priv->ci) ==
- (unsigned long) (netdev_priv(musycc_dev)))
- {
- ndev->flags &= ~IFF_UP;
- dev_put (ndev);
- netif_stop_queue (ndev);
- do_deluser (ndev, 1);
- } else
- dev_put (ndev);
- }
- return 0;
+ const struct c4_priv *priv;
+ int i;
+
+ for (i = 0; i < 128; i++) {
+ struct net_device *ndev;
+ char buf[sizeof(CHANNAME) + 3];
+
+ sprintf(buf, CHANNAME "%d", i);
+ ndev = __dev_get_by_name(&init_net, buf);
+ if (!ndev)
+ continue;
+ priv = dev_to_hdlc(ndev)->priv;
+
+ if ((unsigned long) (priv->ci) ==
+ (unsigned long) (netdev_priv(musycc_dev))) {
+ ndev->flags &= ~IFF_UP;
+ netif_stop_queue(ndev);
+ do_deluser(ndev, 1);
+ }
+ }
+ return 0;
}
int
-do_reset_chan_stats (struct net_device *musycc_dev, void *data)
+do_reset_chan_stats(struct net_device *musycc_dev, void *data)
{
- struct sbecom_chan_param cp;
+ struct sbecom_chan_param cp;
- if (copy_from_user (&cp, data,
- sizeof (struct sbecom_chan_param)))
- return -EFAULT;
- return mkret (c4_del_chan_stats (cp.channum));
+ if (copy_from_user(&cp, data,
+ sizeof(struct sbecom_chan_param)))
+ return -EFAULT;
+ return c4_del_chan_stats(cp.channum);
}
-static status_t
-c4_ioctl (struct net_device *ndev, struct ifreq *ifr, int cmd)
+static status_t
+c4_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
{
- ci_t *ci;
- void *data;
- int iocmd, iolen;
- status_t ret;
- static struct data
- {
- union
- {
- u_int8_t c;
- u_int32_t i;
- struct sbe_brd_info bip;
- struct sbe_drv_info dip;
- struct sbe_iid_info iip;
- struct sbe_brd_addr bap;
- struct sbecom_chan_stats stats;
- struct sbecom_chan_param param;
- struct temux_card_stats cards;
- struct sbecom_card_param cardp;
- struct sbecom_framer_param frp;
- } u;
- } arg;
-
-
- if (!capable (CAP_SYS_ADMIN))
- return -EPERM;
- if (cmd != SIOCDEVPRIVATE + 15)
- return -EINVAL;
- if (!(ci = get_ci_by_dev (ndev)))
- return -EINVAL;
- if (ci->state != C_RUNNING)
- return -ENODEV;
- if (copy_from_user (&iocmd, ifr->ifr_data, sizeof (iocmd)))
- return -EFAULT;
+ ci_t *ci;
+ void *data;
+ int iocmd, iolen;
+ status_t ret;
+ static struct data {
+ union {
+ u_int8_t c;
+ u_int32_t i;
+ struct sbe_brd_info bip;
+ struct sbe_drv_info dip;
+ struct sbe_iid_info iip;
+ struct sbe_brd_addr bap;
+ struct sbecom_chan_stats stats;
+ struct sbecom_chan_param param;
+ struct temux_card_stats cards;
+ struct sbecom_card_param cardp;
+ struct sbecom_framer_param frp;
+ } u;
+ } arg;
+
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (cmd != SIOCDEVPRIVATE + 15)
+ return -EINVAL;
+ ci = get_ci_by_dev(ndev);
+ if (!ci)
+ return -EINVAL;
+ if (ci->state != C_RUNNING)
+ return -ENODEV;
+ if (copy_from_user(&iocmd, ifr->ifr_data, sizeof(iocmd)))
+ return -EFAULT;
#if 0
- if (copy_from_user (&len, ifr->ifr_data + sizeof (iocmd), sizeof (len)))
- return -EFAULT;
+ if (copy_from_user(&len, ifr->ifr_data + sizeof(iocmd), sizeof(len)))
+ return -EFAULT;
#endif
#if 0
- pr_info("c4_ioctl: iocmd %x, dir %x type %x nr %x iolen %d.\n", iocmd,
- _IOC_DIR (iocmd), _IOC_TYPE (iocmd), _IOC_NR (iocmd),
- _IOC_SIZE (iocmd));
+ pr_info("c4_ioctl: iocmd %x, dir %x type %x nr %x iolen %d.\n", iocmd,
+ _IOC_DIR(iocmd), _IOC_TYPE(iocmd), _IOC_NR(iocmd),
+ _IOC_SIZE(iocmd));
#endif
- iolen = _IOC_SIZE (iocmd);
- data = ifr->ifr_data + sizeof (iocmd);
- if (copy_from_user (&arg, data, iolen))
- return -EFAULT;
-
- ret = 0;
- switch (iocmd)
- {
- case SBE_IOC_PORT_GET:
- //pr_info(">> SBE_IOC_PORT_GET Ioctl...\n");
- ret = do_get_port (ndev, data);
- break;
- case SBE_IOC_PORT_SET:
- //pr_info(">> SBE_IOC_PORT_SET Ioctl...\n");
- ret = do_set_port (ndev, data);
- break;
- case SBE_IOC_CHAN_GET:
- //pr_info(">> SBE_IOC_CHAN_GET Ioctl...\n");
- ret = do_get_chan (ndev, data);
- break;
- case SBE_IOC_CHAN_SET:
- //pr_info(">> SBE_IOC_CHAN_SET Ioctl...\n");
- ret = do_set_chan (ndev, data);
- break;
- case C4_DEL_CHAN:
- //pr_info(">> C4_DEL_CHAN Ioctl...\n");
- ret = do_del_chan (ndev, data);
- break;
- case SBE_IOC_CHAN_NEW:
- ret = do_create_chan (ndev, data);
- break;
- case SBE_IOC_CHAN_GET_STAT:
- ret = do_get_chan_stats (ndev, data);
- break;
- case SBE_IOC_LOGLEVEL:
- ret = do_set_loglevel (ndev, data);
- break;
- case SBE_IOC_RESET_DEV:
- ret = do_reset (ndev, data);
- break;
- case SBE_IOC_CHAN_DEL_STAT:
- ret = do_reset_chan_stats (ndev, data);
- break;
- case C4_LOOP_PORT:
- ret = do_port_loop (ndev, data);
- break;
- case C4_RW_FRMR:
- ret = do_framer_rw (ndev, data);
- break;
- case C4_RW_MSYC:
- ret = do_musycc_rw (ndev, data);
- break;
- case C4_RW_PLD:
- ret = do_pld_rw (ndev, data);
- break;
- case SBE_IOC_IID_GET:
- ret = (iolen == sizeof (struct sbe_iid_info)) ? c4_get_iidinfo (ci, &arg.u.iip) : -EFAULT;
- if (ret == 0) /* no error, copy data */
- if (copy_to_user (data, &arg, iolen))
- return -EFAULT;
- break;
- default:
- //pr_info(">> c4_ioctl: EINVAL - unknown iocmd <%x>\n", iocmd);
- ret = -EINVAL;
- break;
- }
- return mkret (ret);
+ iolen = _IOC_SIZE(iocmd);
+ if (iolen > sizeof(arg))
+ return -EFAULT;
+ data = ifr->ifr_data + sizeof(iocmd);
+ if (copy_from_user(&arg, data, iolen))
+ return -EFAULT;
+
+ ret = 0;
+ switch (iocmd) {
+ case SBE_IOC_PORT_GET:
+ ret = do_get_port(ndev, data);
+ break;
+ case SBE_IOC_PORT_SET:
+ ret = do_set_port(ndev, data);
+ break;
+ case SBE_IOC_CHAN_GET:
+ ret = do_get_chan(ndev, data);
+ break;
+ case SBE_IOC_CHAN_SET:
+ ret = do_set_chan(ndev, data);
+ break;
+ case C4_DEL_CHAN:
+ ret = do_del_chan(ndev, data);
+ break;
+ case SBE_IOC_CHAN_NEW:
+ ret = do_create_chan(ndev, data);
+ break;
+ case SBE_IOC_CHAN_GET_STAT:
+ ret = do_get_chan_stats(ndev, data);
+ break;
+ case SBE_IOC_LOGLEVEL:
+ ret = do_set_loglevel(ndev, data);
+ break;
+ case SBE_IOC_RESET_DEV:
+ ret = do_reset(ndev, data);
+ break;
+ case SBE_IOC_CHAN_DEL_STAT:
+ ret = do_reset_chan_stats(ndev, data);
+ break;
+ case C4_LOOP_PORT:
+ ret = do_port_loop(ndev, data);
+ break;
+ case C4_RW_FRMR:
+ ret = do_framer_rw(ndev, data);
+ break;
+ case C4_RW_MSYC:
+ ret = do_musycc_rw(ndev, data);
+ break;
+ case C4_RW_PLD:
+ ret = do_pld_rw(ndev, data);
+ break;
+ case SBE_IOC_IID_GET:
+ ret = (iolen == sizeof(struct sbe_iid_info)) ?
+ c4_get_iidinfo(ci, &arg.u.iip) : -EFAULT;
+ if (ret == 0) /* no error, copy data */
+ if (copy_to_user(data, &arg, iolen))
+ return -EFAULT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
}
static const struct net_device_ops c4_ops = {
- .ndo_open = void_open,
- .ndo_start_xmit = c4_linux_xmit,
- .ndo_do_ioctl = c4_ioctl,
+ .ndo_open = void_open,
+ .ndo_start_xmit = c4_linux_xmit,
+ .ndo_do_ioctl = c4_ioctl,
};
static void c4_setup(struct net_device *dev)
{
- dev->type = ARPHRD_VOID;
- dev->netdev_ops = &c4_ops;
+ dev->type = ARPHRD_VOID;
+ dev->netdev_ops = &c4_ops;
}
struct net_device *__init
-c4_add_dev (hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
- int irq0, int irq1)
+c4_add_dev(hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
+ int irq0, int irq1)
{
- struct net_device *ndev;
- ci_t *ci;
-
- ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup);
- if (!ndev)
- {
- pr_warning("%s: no memory for struct net_device !\n", hi->devname);
- error_flag = ENOMEM;
- return NULL;
- }
- ci = (ci_t *)(netdev_priv(ndev));
- ndev->irq = irq0;
-
- ci->hdw_info = hi;
- ci->state = C_INIT; /* mark as hardware not available */
- ci->next = c4_list;
- c4_list = ci;
- ci->brdno = ci->next ? ci->next->brdno + 1 : 0;
-
- if (!CI)
- CI = ci; /* DEBUG, only board 0 usage */
-
- strcpy (ci->devname, hi->devname);
- ci->release = &pmcc4_OSSI_release[0];
-
- /* tasklet */
+ struct net_device *ndev;
+ ci_t *ci;
+
+ ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup);
+ if (!ndev) {
+ pr_warning("%s: no memory for struct net_device !\n",
+ hi->devname);
+ error_flag = -ENOMEM;
+ return NULL;
+ }
+ ci = (ci_t *)(netdev_priv(ndev));
+ ndev->irq = irq0;
+
+ ci->hdw_info = hi;
+ ci->state = C_INIT; /* mark as hardware not available */
+ ci->next = c4_list;
+ c4_list = ci;
+ ci->brdno = ci->next ? ci->next->brdno + 1 : 0;
+
+ if (!CI)
+ CI = ci; /* DEBUG, only board 0 usage */
+
+ strcpy(ci->devname, hi->devname);
+
+ /* tasklet */
#if defined(SBE_ISR_TASKLET)
- tasklet_init (&ci->ci_musycc_isr_tasklet,
- (void (*) (unsigned long)) musycc_intr_bh_tasklet,
- (unsigned long) ci);
+ tasklet_init(&ci->ci_musycc_isr_tasklet,
+ (void (*) (unsigned long)) musycc_intr_bh_tasklet,
+ (unsigned long) ci);
- if (atomic_read (&ci->ci_musycc_isr_tasklet.count) == 0)
- tasklet_disable_nosync (&ci->ci_musycc_isr_tasklet);
+ if (atomic_read(&ci->ci_musycc_isr_tasklet.count) == 0)
+ tasklet_disable_nosync(&ci->ci_musycc_isr_tasklet);
#elif defined(SBE_ISR_IMMEDIATE)
- ci->ci_musycc_isr_tq.routine = (void *) (unsigned long) musycc_intr_bh_tasklet;
- ci->ci_musycc_isr_tq.data = ci;
+ ci->ci_musycc_isr_tq.routine = (void *)(unsigned long)musycc_intr_bh_tasklet;
+ ci->ci_musycc_isr_tq.data = ci;
#endif
- if (register_netdev (ndev) ||
- (c4_init (ci, (u_char *) f0, (u_char *) f1) != SBE_DRVR_SUCCESS))
- {
- OS_kfree (netdev_priv(ndev));
- OS_kfree (ndev);
- error_flag = ENODEV;
- return NULL;
- }
- /*************************************************************
- * int request_irq(unsigned int irq,
- * void (*handler)(int, void *, struct pt_regs *),
- * unsigned long flags, const char *dev_name, void *dev_id);
- * wherein:
- * irq -> The interrupt number that is being requested.
- * handler -> Pointer to handling function being installed.
- * flags -> A bit mask of options related to interrupt management.
- * dev_name -> String used in /proc/interrupts to show owner of interrupt.
- * dev_id -> Pointer (for shared interrupt lines) to point to its own
- * private data area (to identify which device is interrupting).
- *
- * extern void free_irq(unsigned int irq, void *dev_id);
- **************************************************************/
-
- if (request_irq (irq0, &c4_linux_interrupt,
- IRQF_SHARED,
- ndev->name, ndev))
- {
- pr_warning("%s: MUSYCC could not get irq: %d\n", ndev->name, irq0);
- unregister_netdev (ndev);
- OS_kfree (netdev_priv(ndev));
- OS_kfree (ndev);
- error_flag = EIO;
- return NULL;
- }
+ if (register_netdev(ndev) ||
+ (c4_init(ci, (u_char *) f0, (u_char *) f1) != SBE_DRVR_SUCCESS)) {
+ OS_kfree(netdev_priv(ndev));
+ OS_kfree(ndev);
+ error_flag = -ENODEV;
+ return NULL;
+ }
+ /*************************************************************
+ * int request_irq(unsigned int irq,
+ * void (*handler)(int, void *, struct pt_regs *),
+ * unsigned long flags, const char *dev_name, void *dev_id);
+ * wherein:
+ * irq -> The interrupt number that is being requested.
+ * handler -> Pointer to handling function being installed.
+ * flags -> A bit mask of options related to interrupt management.
+ * dev_name -> String used in /proc/interrupts to show owner of interrupt.
+ * dev_id -> Pointer (for shared interrupt lines) to point to its own
+ * private data area (to identify which device is interrupting).
+ *
+ * extern void free_irq(unsigned int irq, void *dev_id);
+ **************************************************************/
+
+ if (request_irq(irq0, &c4_linux_interrupt,
+ IRQF_SHARED,
+ ndev->name, ndev)) {
+ pr_warning("%s: MUSYCC could not get irq: %d\n",
+ ndev->name, irq0);
+ unregister_netdev(ndev);
+ OS_kfree(netdev_priv(ndev));
+ OS_kfree(ndev);
+ error_flag = -EIO;
+ return NULL;
+ }
#ifdef CONFIG_SBE_PMCC4_NCOMM
- if (request_irq (irq1, &c4_ebus_interrupt, IRQF_SHARED, ndev->name, ndev))
- {
- pr_warning("%s: EBUS could not get irq: %d\n", hi->devname, irq1);
- unregister_netdev (ndev);
- free_irq (irq0, ndev);
- OS_kfree (netdev_priv(ndev));
- OS_kfree (ndev);
- error_flag = EIO;
- return NULL;
- }
+ if (request_irq(irq1, &c4_ebus_interrupt, IRQF_SHARED, ndev->name, ndev)) {
+ pr_warning("%s: EBUS could not get irq: %d\n", hi->devname, irq1);
+ unregister_netdev(ndev);
+ free_irq(irq0, ndev);
+ OS_kfree(netdev_priv(ndev));
+ OS_kfree(ndev);
+ error_flag = -EIO;
+ return NULL;
+ }
#endif
- /* setup board identification information */
-
- {
- u_int32_t tmp;
-
- hdw_sn_get (hi, brdno); /* also sets PROM format type (promfmt)
- * for later usage */
-
- switch (hi->promfmt)
- {
- case PROM_FORMAT_TYPE1:
- memcpy (ndev->dev_addr, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
- memcpy (&tmp, (FLD_TYPE1 *) (hi->mfg_info.pft1.Id), 4); /* unaligned data
- * acquisition */
- ci->brd_id = cpu_to_be32 (tmp);
- break;
- case PROM_FORMAT_TYPE2:
- memcpy (ndev->dev_addr, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
- memcpy (&tmp, (FLD_TYPE2 *) (hi->mfg_info.pft2.Id), 4); /* unaligned data
- * acquisition */
- ci->brd_id = cpu_to_be32 (tmp);
- break;
- default:
- ci->brd_id = 0;
- memset (ndev->dev_addr, 0, 6);
- break;
- }
+ /* setup board identification information */
+
+ {
+ u_int32_t tmp;
+
+ /* also sets PROM format type (promfmt) for later usage */
+ hdw_sn_get(hi, brdno);
+
+ switch (hi->promfmt) {
+ case PROM_FORMAT_TYPE1:
+ memcpy(ndev->dev_addr,
+ (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
+ /* unaligned data acquisition */
+ memcpy(&tmp, (FLD_TYPE1 *) (hi->mfg_info.pft1.Id), 4);
+ ci->brd_id = cpu_to_be32(tmp);
+ break;
+ case PROM_FORMAT_TYPE2:
+ memcpy(ndev->dev_addr,
+ (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
+ /* unaligned data acquisition */
+ memcpy(&tmp, (FLD_TYPE2 *) (hi->mfg_info.pft2.Id), 4);
+ ci->brd_id = cpu_to_be32(tmp);
+ break;
+ default:
+ ci->brd_id = 0;
+ memset(ndev->dev_addr, 0, 6);
+ break;
+ }
#if 1
- sbeid_set_hdwbid (ci); /* requires bid to be preset */
+ /* requires bid to be preset */
+ sbeid_set_hdwbid(ci);
#else
- sbeid_set_bdtype (ci); /* requires hdw_bid to be preset */
+ /* requires hdw_bid to be preset */
+ sbeid_set_bdtype(ci);
#endif
-
- }
+ }
#ifdef CONFIG_PROC_FS
- sbecom_proc_brd_init (ci);
+ sbecom_proc_brd_init(ci);
#endif
#if defined(SBE_ISR_TASKLET)
- tasklet_enable (&ci->ci_musycc_isr_tasklet);
+ tasklet_enable(&ci->ci_musycc_isr_tasklet);
#endif
-
- if ((error_flag = c4_init2 (ci)) != SBE_DRVR_SUCCESS)
- {
+ error_flag = c4_init2(ci);
+ if (error_flag != SBE_DRVR_SUCCESS) {
#ifdef CONFIG_PROC_FS
- sbecom_proc_brd_cleanup (ci);
+ sbecom_proc_brd_cleanup(ci);
#endif
- unregister_netdev (ndev);
- free_irq (irq1, ndev);
- free_irq (irq0, ndev);
- OS_kfree (netdev_priv(ndev));
- OS_kfree (ndev);
- return NULL; /* failure, error_flag is set */
- }
- return ndev;
+ unregister_netdev(ndev);
+ free_irq(irq1, ndev);
+ free_irq(irq0, ndev);
+ OS_kfree(netdev_priv(ndev));
+ OS_kfree(ndev);
+ /* failure, error_flag is set */
+ return NULL;
+ }
+ return ndev;
}
static int __init
-c4_mod_init (void)
+c4_mod_init(void)
{
- int rtn;
-
- pr_warning("%s\n", pmcc4_OSSI_release);
- if ((rtn = c4hw_attach_all ()))
- return -rtn; /* installation failure - see system log */
-
- /* housekeeping notifications */
- if (cxt1e1_log_level != log_level_default)
- pr_info("NOTE: driver parameter <cxt1e1_log_level> changed from default %d to %d.\n",
- log_level_default, cxt1e1_log_level);
- if (cxt1e1_max_mru != max_mru_default)
- pr_info("NOTE: driver parameter <cxt1e1_max_mru> changed from default %d to %d.\n",
- max_mru_default, cxt1e1_max_mru);
- if (cxt1e1_max_mtu != max_mtu_default)
- pr_info("NOTE: driver parameter <cxt1e1_max_mtu> changed from default %d to %d.\n",
- max_mtu_default, cxt1e1_max_mtu);
- if (max_rxdesc_used != max_rxdesc_default)
- {
- if (max_rxdesc_used > 2000)
- max_rxdesc_used = 2000; /* out-of-bounds reset */
- pr_info("NOTE: driver parameter <max_rxdesc_used> changed from default %d to %d.\n",
- max_rxdesc_default, max_rxdesc_used);
- }
- if (max_txdesc_used != max_txdesc_default)
- {
- if (max_txdesc_used > 1000)
- max_txdesc_used = 1000; /* out-of-bounds reset */
- pr_info("NOTE: driver parameter <max_txdesc_used> changed from default %d to %d.\n",
- max_txdesc_default, max_txdesc_used);
- }
- return 0; /* installation success */
+ int rtn;
+
+ rtn = c4hw_attach_all();
+ if (rtn)
+ return -rtn; /* installation failure - see system log */
+
+ /* housekeeping notifications */
+ if (cxt1e1_log_level != log_level_default)
+ pr_info("NOTE: driver parameter <cxt1e1_log_level> changed from default %d to %d.\n",
+ log_level_default, cxt1e1_log_level);
+ if (cxt1e1_max_mru != max_mru_default)
+ pr_info("NOTE: driver parameter <cxt1e1_max_mru> changed from default %d to %d.\n",
+ max_mru_default, cxt1e1_max_mru);
+ if (cxt1e1_max_mtu != max_mtu_default)
+ pr_info("NOTE: driver parameter <cxt1e1_max_mtu> changed from default %d to %d.\n",
+ max_mtu_default, cxt1e1_max_mtu);
+ if (max_rxdesc_used != max_rxdesc_default) {
+ if (max_rxdesc_used > 2000)
+ max_rxdesc_used = 2000; /* out-of-bounds reset */
+ pr_info("NOTE: driver parameter <max_rxdesc_used> changed from default %d to %d.\n",
+ max_rxdesc_default, max_rxdesc_used);
+ }
+ if (max_txdesc_used != max_txdesc_default) {
+ if (max_txdesc_used > 1000)
+ max_txdesc_used = 1000; /* out-of-bounds reset */
+ pr_info("NOTE: driver parameter <max_txdesc_used> changed from default %d to %d.\n",
+ max_txdesc_default, max_txdesc_used);
+ }
+ return 0; /* installation success */
}
@@ -1139,31 +1117,29 @@ c4_mod_init (void)
*/
static void __exit
-cleanup_hdlc (void)
+cleanup_hdlc(void)
{
- hdw_info_t *hi;
- ci_t *ci;
- struct net_device *ndev;
- int i, j, k;
-
- for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
- {
- if (hi->ndev) /* a board has been attached */
- {
- ci = (ci_t *)(netdev_priv(hi->ndev));
- for (j = 0; j < ci->max_port; j++)
- for (k = 0; k < MUSYCC_NCHANS; k++)
- if ((ndev = ci->port[j].chan[k]->user))
- {
- do_deluser (ndev, 0);
- }
- }
- }
+ hdw_info_t *hi;
+ ci_t *ci;
+ struct net_device *ndev;
+ int i, j, k;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
+ if (hi->ndev) { /* a board has been attached */
+ ci = (ci_t *)(netdev_priv(hi->ndev));
+ for (j = 0; j < ci->max_port; j++)
+ for (k = 0; k < MUSYCC_NCHANS; k++) {
+ ndev = ci->port[j].chan[k]->user;
+ if (ndev)
+ do_deluser(ndev, 0);
+ }
+ }
+ }
}
static void __exit
-c4_mod_remove (void)
+c4_mod_remove(void)
{
cleanup_hdlc(); /* delete any missed channels */
cleanup_devs();
@@ -1172,13 +1148,13 @@ c4_mod_remove (void)
pr_info("SBE - driver removed.\n");
}
-module_init (c4_mod_init);
-module_exit (c4_mod_remove);
+module_init(c4_mod_init);
+module_exit(c4_mod_remove);
-MODULE_AUTHOR ("SBE Technical Services <support@sbei.com>");
-MODULE_DESCRIPTION ("wanPCI-CxT1E1 Generic HDLC WAN Driver module");
+MODULE_AUTHOR("SBE Technical Services <support@sbei.com>");
+MODULE_DESCRIPTION("wanPCI-CxT1E1 Generic HDLC WAN Driver module");
#ifdef MODULE_LICENSE
-MODULE_LICENSE ("GPL");
+MODULE_LICENSE("GPL");
#endif
/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index 7a3a30cd0f7f..7b4f6f2108e3 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -42,7 +42,6 @@ static unsigned int max_bh = 0;
/* global driver variables */
extern ci_t *c4_list;
extern int drvr_state;
-extern int cxt1e1_log_level;
extern int cxt1e1_max_mru;
extern int cxt1e1_max_mtu;
@@ -217,7 +216,8 @@ musycc_dump_ring(ci_t *ci, unsigned int chan)
max_intcnt = 0; /* reset counter */
}
- if (!(ch = sd_find_chan(dummy, chan))) {
+ ch = sd_find_chan(dummy, chan);
+ if (!ch) {
pr_info(">> musycc_dump_ring: channel %d not up.\n", chan);
return ENOENT;
}
@@ -1044,17 +1044,19 @@ musycc_bh_rx_eom(mpi_t *pi, int gchan)
#endif /*** CONFIG_SBE_WAN256T3_NCOMM ***/
{
- if ((m2 = OS_mem_token_alloc(cxt1e1_max_mru))) {
- /* substitute the mbuf+cluster */
- md->mem_token = m2;
- md->data = cpu_to_le32(OS_vtophys(OS_mem_token_data(m2)));
-
- /* pass the received mbuf upward */
- sd_recv_consume(m, status & LENGTH_MASK, ch->user);
- ch->s.rx_packets++;
- ch->s.rx_bytes += status & LENGTH_MASK;
+ m2 = OS_mem_token_alloc(cxt1e1_max_mru);
+ if (m2) {
+ /* substitute the mbuf+cluster */
+ md->mem_token = m2;
+ md->data = cpu_to_le32(OS_vtophys(
+ OS_mem_token_data(m2)));
+
+ /* pass the received mbuf upward */
+ sd_recv_consume(m, status & LENGTH_MASK, ch->user);
+ ch->s.rx_packets++;
+ ch->s.rx_bytes += status & LENGTH_MASK;
} else
- ch->s.rx_dropped++;
+ ch->s.rx_dropped++;
}
} else if (error == ERR_FCS)
ch->s.rx_crc_errors++;
@@ -1545,8 +1547,9 @@ musycc_chan_down(ci_t *dummy, int channum)
mch_t *ch;
int i, gchan;
- if (!(ch = sd_find_chan(dummy, channum)))
- return EINVAL;
+ ch = sd_find_chan(dummy, channum);
+ if (!ch)
+ return -EINVAL;
pi = ch->up;
gchan = ch->gchan;
@@ -1589,6 +1592,8 @@ musycc_chan_down(ci_t *dummy, int channum)
#endif
+#if 0
+/* TODO: determine if these functions will not be needed and can be removed */
int
musycc_del_chan(ci_t *ci, int channum)
{
@@ -1596,7 +1601,8 @@ musycc_del_chan(ci_t *ci, int channum)
if ((channum < 0) || (channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS))) /* sanity chk param */
return ECHRNG;
- if (!(ch = sd_find_chan(ci, channum)))
+ ch = sd_find_chan(ci, channum);
+ if (!ch)
return ENOENT;
if (ch->state == UP)
musycc_chan_down(ci, channum);
@@ -1612,12 +1618,14 @@ musycc_del_chan_stats(ci_t *ci, int channum)
if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */
return ECHRNG;
- if (!(ch = sd_find_chan(ci, channum)))
+ ch = sd_find_chan(ci, channum);
+ if (!ch)
return ENOENT;
memset(&ch->s, 0, sizeof(struct sbecom_chan_stats));
return 0;
}
+#endif
int
@@ -1632,7 +1640,8 @@ musycc_start_xmit(ci_t *ci, int channum, void *mem_token)
int txd_need_cnt;
u_int32_t len;
- if (!(ch = sd_find_chan(ci, channum)))
+ ch = sd_find_chan(ci, channum);
+ if (!ch)
return -ENOENT;
if (ci->state != C_RUNNING) /* full interrupt processing available */
diff --git a/drivers/staging/cxt1e1/ossiRelease.c b/drivers/staging/cxt1e1/ossiRelease.c
deleted file mode 100644
index f17a902e95e3..000000000000
--- a/drivers/staging/cxt1e1/ossiRelease.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*-----------------------------------------------------------------------------
- * ossiRelease.c -
- *
- * This string will be embedded into the executable and will track the
- * release. The embedded string may be displayed using the following:
- *
- * strings <filename> | grep \$Rel
- *
- * Copyright (C) 2002-2008 One Stop Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * For further information, contact via email: support@onestopsystems.com
- * One Stop Systems, Inc. Escondido, California U.S.A.
- *
- *-----------------------------------------------------------------------------
- */
-
-char pmcc4_OSSI_release[] = "$Release: PMCC4_3_1B, Copyright (c) 2008 One Stop Systems$";
-
-/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/pmc93x6_eeprom.c b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
index 137b63cb5537..78cc1709ebdb 100644
--- a/drivers/staging/cxt1e1/pmc93x6_eeprom.c
+++ b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
@@ -90,7 +90,7 @@ static int ByteReverseBuilt = FALSE;
*------------------------------------------------------------------------
*/
-short mfg_template[sizeof (FLD_TYPE2)] =
+static u8 mfg_template[sizeof(FLD_TYPE2)] =
{
PROM_FORMAT_TYPE2, /* type; */
0x00, 0x1A, /* length[2]; */
@@ -491,13 +491,11 @@ pmc_init_seeprom (u_int32_t addr, u_int32_t serialNum)
PROMFORMAT buffer; /* Memory image of structure */
u_int32_t crc; /* CRC of structure */
time_t createTime;
- int i;
createTime = get_seconds ();
/* use template data */
- for (i = 0; i < sizeof (FLD_TYPE2); ++i)
- buffer.bytes[i] = mfg_template[i];
+ memcpy(&buffer.fldType2, mfg_template, sizeof(buffer.fldType2));
/* Update serial number field in buffer */
pmcSetBuffValue (&buffer.fldType2.Serial[3], serialNum, 3);
diff --git a/drivers/staging/cxt1e1/pmcc4.h b/drivers/staging/cxt1e1/pmcc4.h
index 003eb8690190..b4b5e5ad791b 100644
--- a/drivers/staging/cxt1e1/pmcc4.h
+++ b/drivers/staging/cxt1e1/pmcc4.h
@@ -96,7 +96,6 @@ void sbeid_set_bdtype (ci_t *ci);
void sbeid_set_hdwbid (ci_t *ci);
u_int32_t sbeCrc (u_int8_t *, u_int32_t, u_int32_t, u_int32_t *);
-void VMETRO_TRACE (void *); /* put data into 8 LEDs */
void VMETRO_TRIGGER (ci_t *, int); /* Note: int = 0(default)
* thru 15 */
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index a9d95753be20..621a72926475 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -112,12 +112,12 @@ c4_find_chan (int channum)
for (portnum = 0; portnum < ci->max_port; portnum++)
for (gchan = 0; gchan < MUSYCC_NCHANS; gchan++)
{
- if ((ch = ci->port[portnum].chan[gchan]))
- {
- if ((ch->state != UNASSIGNED) &&
- (ch->channum == channum))
- return ch;
- }
+ ch = ci->port[portnum].chan[gchan];
+ if (ch) {
+ if ((ch->state != UNASSIGNED) &&
+ (ch->channum == channum))
+ return ch;
+ }
}
return NULL;
}
@@ -668,8 +668,9 @@ c4_init2 (ci_t *ci)
status_t ret;
/* PORT POINT: this routine generates first interrupt */
- if ((ret = musycc_init (ci)) != SBE_DRVR_SUCCESS)
- return ret;
+ ret = musycc_init(ci);
+ if (ret != SBE_DRVR_SUCCESS)
+ return ret;
#if 0
ci->p.framing_type = FRAMING_CBP;
@@ -756,7 +757,7 @@ c4_frame_rw (ci_t *ci, struct sbecom_port_param *pp)
volatile u_int32_t data;
if (pp->portnum >= ci->max_port)/* sanity check */
- return ENXIO;
+ return -ENXIO;
comet = ci->port[pp->portnum].cometbase;
data = pci_read_32 ((u_int32_t *) comet + pp->port_mode) & 0xff;
@@ -845,7 +846,7 @@ c4_musycc_rw (ci_t *ci, struct c4_musycc_param *mcp)
*/
portnum = (mcp->offset % 0x6000) / 0x800;
if (portnum >= ci->max_port)
- return ENXIO;
+ return -ENXIO;
pi = &ci->port[portnum];
if (mcp->offset >= 0x6000)
offset += 0x6000; /* put back in MsgCfgDesc address offset */
@@ -894,7 +895,7 @@ status_t
c4_get_port (ci_t *ci, int portnum)
{
if (portnum >= ci->max_port) /* sanity check */
- return ENXIO;
+ return -ENXIO;
SD_SEM_TAKE (&ci->sem_wdbusy, "_wd_"); /* only 1 thru here, per
* board */
@@ -915,7 +916,7 @@ c4_set_port (ci_t *ci, int portnum)
int i;
if (portnum >= ci->max_port) /* sanity check */
- return ENXIO;
+ return -ENXIO;
pi = &ci->port[portnum];
pp = &ci->port[portnum].p;
@@ -927,15 +928,15 @@ c4_set_port (ci_t *ci, int portnum)
portnum, e1mode, pi->openchans);
}
if (pi->openchans)
- return EBUSY; /* group needs initialization only for
+ return -EBUSY; /* group needs initialization only for
* first channel of a group */
{
status_t ret;
- if ((ret = c4_wq_port_init (pi))) /* create/init
- * workqueue_struct */
- return ret;
+ ret = c4_wq_port_init(pi);
+ if (ret) /* create/init workqueue_struct */
+ return ret;
}
init_comet (ci, pi->cometbase, pp->port_mode, 1 /* clockmaster == true */ , pp->portP);
@@ -1018,10 +1019,10 @@ c4_new_chan (ci_t *ci, int portnum, int channum, void *user)
int gchan;
if (c4_find_chan (channum)) /* a new channel shouldn't already exist */
- return EEXIST;
+ return -EEXIST;
if (portnum >= ci->max_port) /* sanity check */
- return ENXIO;
+ return -ENXIO;
pi = &(ci->port[portnum]);
/* find any available channel within this port */
@@ -1032,7 +1033,7 @@ c4_new_chan (ci_t *ci, int portnum, int channum, void *user)
break;
}
if (gchan == MUSYCC_NCHANS) /* exhausted table, all were assigned */
- return ENFILE;
+ return -ENFILE;
ch->up = pi;
@@ -1055,8 +1056,9 @@ c4_new_chan (ci_t *ci, int portnum, int channum, void *user)
{
status_t ret;
- if ((ret = c4_wk_chan_init (pi, ch)))
- return ret;
+ ret = c4_wk_chan_init(pi, ch);
+ if (ret)
+ return ret;
}
/* save off interface assignments which bound a board */
@@ -1079,8 +1081,10 @@ c4_del_chan (int channum)
{
mch_t *ch;
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
+
if (ch->state == UP)
musycc_chan_down ((ci_t *) 0, channum);
ch->state = UNASSIGNED;
@@ -1095,8 +1099,9 @@ c4_del_chan_stats (int channum)
{
mch_t *ch;
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
memset (&ch->s, 0, sizeof (struct sbecom_chan_stats));
return 0;
@@ -1109,19 +1114,20 @@ c4_set_chan (int channum, struct sbecom_chan_param *p)
mch_t *ch;
int i, x = 0;
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
#if 1
if (ch->p.card != p->card ||
ch->p.port != p->port ||
ch->p.channum != p->channum)
- return EINVAL;
+ return -EINVAL;
#endif
if (!(ch->up->group_is_set))
{
- return EIO; /* out of order, SET_PORT command
+ return -EIO; /* out of order, SET_PORT command
* required prior to first group's
* SET_CHAN command */
}
@@ -1143,10 +1149,12 @@ c4_set_chan (int channum, struct sbecom_chan_param *p)
{
status_t ret;
- if ((ret = musycc_chan_down ((ci_t *) 0, channum)))
- return ret;
- if ((ret = c4_chan_up (ch->up->up, channum)))
- return ret;
+ ret = musycc_chan_down((ci_t *)0, channum);
+ if (ret)
+ return ret;
+ ret = c4_chan_up(ch->up->up, channum);
+ if (ret)
+ return ret;
sd_enable_xmit (ch->user); /* re-enable to catch flow controlled
* channel */
}
@@ -1159,8 +1167,10 @@ c4_get_chan (int channum, struct sbecom_chan_param *p)
{
mch_t *ch;
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
+
*p = ch->p;
return 0;
}
@@ -1170,8 +1180,10 @@ c4_get_chan_stats (int channum, struct sbecom_chan_stats *p)
{
mch_t *ch;
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
+
*p = ch->s;
p->tx_pending = atomic_read (&ch->tx_pending);
return 0;
@@ -1240,8 +1252,10 @@ c4_chan_up (ci_t *ci, int channum)
u_int32_t tmp; /* for optimizing conversion across BE
* platform */
- if (!(ch = c4_find_chan (channum)))
- return ENOENT;
+ ch = c4_find_chan(channum);
+ if (!ch)
+ return -ENOENT;
+
if (ch->state == UP)
{
if (cxt1e1_log_level >= LOG_MONITOR)
@@ -1264,7 +1278,7 @@ c4_chan_up (ci_t *ci, int channum)
pr_info("+ ask4 %x, currently %x\n",
ch->p.bitmask[i], pi->tsm[i]);
}
- return EINVAL;
+ return -EINVAL;
}
for (j = 0; j < 8; j++)
if (ch->p.bitmask[i] & (1 << j))
@@ -1277,7 +1291,7 @@ c4_chan_up (ci_t *ci, int channum)
/* if( cxt1e1_log_level >= LOG_WARN) */
pr_info("%s: c4_chan_up[%d] ENOBUFS (no TimeSlots assigned)\n",
ci->devname, channum);
- return ENOBUFS; /* this should not happen */
+ return -ENOBUFS; /* this should not happen */
}
addr = c4_fifo_alloc (pi, gchan, &nbuf);
ch->state = UP;
@@ -1372,12 +1386,13 @@ c4_chan_up (ci_t *ci, int channum)
}
md->next = cpu_to_le32 (OS_vtophys (md->snext));
- if (!(m = OS_mem_token_alloc (cxt1e1_max_mru)))
- {
- if (cxt1e1_log_level >= LOG_MONITOR)
- pr_info("%s: c4_chan_up[%d] - token alloc failure, size = %d.\n",
- ci->devname, channum, cxt1e1_max_mru);
- goto errfree;
+ m = OS_mem_token_alloc(cxt1e1_max_mru);
+ if (!m) {
+ if (cxt1e1_log_level >= LOG_MONITOR)
+ pr_info(
+ "%s: c4_chan_up[%d] - token alloc failure, size = %d.\n",
+ ci->devname, channum, cxt1e1_max_mru);
+ goto errfree;
}
md->mem_token = m;
md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m)));
@@ -1454,7 +1469,7 @@ errfree:
ch->mdr = NULL;
ch->rxd_num = 0;
ch->state = DOWN;
- return ENOBUFS;
+ return -ENOBUFS;
}
/* stop the hardware from servicing & interrupting */
@@ -1533,8 +1548,9 @@ c4_get_iidinfo (ci_t *ci, struct sbe_iid_info *iip)
struct net_device *dev;
char *np;
- if (!(dev = getuserbychan (iip->channum)))
- return ENOENT;
+ dev = getuserbychan(iip->channum);
+ if (!dev)
+ return -ENOENT;
np = dev->name;
strncpy (iip->iname, np, CHNM_STRLEN - 1);
diff --git a/drivers/staging/cxt1e1/pmcc4_private.h b/drivers/staging/cxt1e1/pmcc4_private.h
index 7edbd4e492e3..eb28f095ff8c 100644
--- a/drivers/staging/cxt1e1/pmcc4_private.h
+++ b/drivers/staging/cxt1e1/pmcc4_private.h
@@ -213,7 +213,6 @@ struct sbe_card_info
struct sbe_card_info *next;
u_int32_t *eeprombase; /* mapped address of board's EEPROM */
c4cpld_t *cpldbase; /* mapped address of board's CPLD hardware */
- char *release; /* SBE ID string w/in sbeRelease.c */
void *hdw_info;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir_dev;
diff --git a/drivers/staging/cxt1e1/pmcc4_sysdep.h b/drivers/staging/cxt1e1/pmcc4_sysdep.h
index 697f1943670f..2916c2cb13f9 100644
--- a/drivers/staging/cxt1e1/pmcc4_sysdep.h
+++ b/drivers/staging/cxt1e1/pmcc4_sysdep.h
@@ -60,3 +60,4 @@ void sd_line_is_down (void *user);
int sd_queue_stopped (void *user);
#endif /*** _INC_PMCC4_SYSDEP_H_ ***/
+extern int cxt1e1_log_level;
diff --git a/drivers/staging/cxt1e1/sbeproc.c b/drivers/staging/cxt1e1/sbeproc.c
index 353c001d3fbe..840c647fbf41 100644
--- a/drivers/staging/cxt1e1/sbeproc.c
+++ b/drivers/staging/cxt1e1/sbeproc.c
@@ -72,7 +72,8 @@ static int sbecom_proc_get_sbe_info(struct seq_file *m, void *v)
char *spd;
struct sbe_brd_info *bip;
- if (!(bip = OS_kmalloc(sizeof(struct sbe_brd_info))))
+ bip = OS_kmalloc(sizeof(struct sbe_brd_info));
+ if (!bip)
return -ENOMEM;
pr_devel(">> sbecom_proc_get_sbe_info: entered\n");
@@ -150,7 +151,6 @@ static int sbecom_proc_get_sbe_info(struct seq_file *m, void *v)
break;
}
seq_printf(m, "PCI Bus Speed: %s\n", spd);
- seq_printf(m, "Release: %s\n", ci->release);
#ifdef SBE_PMCC4_ENABLE
{
diff --git a/drivers/staging/dgap/Kconfig b/drivers/staging/dgap/Kconfig
index 31f1d7533eec..3bbe9e122365 100644
--- a/drivers/staging/dgap/Kconfig
+++ b/drivers/staging/dgap/Kconfig
@@ -1,6 +1,6 @@
config DGAP
tristate "Digi EPCA PCI products"
default n
- depends on TTY
+ depends on TTY && HAS_IOMEM
---help---
Driver for the Digi International EPCA PCI based product line
diff --git a/drivers/staging/dgap/Makefile b/drivers/staging/dgap/Makefile
index 3abe8d2bb748..0063d044ca71 100644
--- a/drivers/staging/dgap/Makefile
+++ b/drivers/staging/dgap/Makefile
@@ -1,7 +1 @@
obj-$(CONFIG_DGAP) += dgap.o
-
-
-dgap-objs := dgap_driver.o dgap_fep5.o \
- dgap_parse.o dgap_trace.o \
- dgap_tty.o dgap_sysfs.o
-
diff --git a/drivers/staging/dgap/dgap.c b/drivers/staging/dgap/dgap.c
new file mode 100644
index 000000000000..a5fc3c75ed4e
--- /dev/null
+++ b/drivers/staging/dgap/dgap.c
@@ -0,0 +1,7675 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+/*
+ * In the original out of kernel Digi dgap driver, firmware
+ * loading was done via user land to driver handshaking.
+ *
+ * For cards that support a concentrator (port expander),
+ * I believe the concentrator its self told the card which
+ * concentrator is actually attached and then that info
+ * was used to tell user land which concentrator firmware
+ * image was to be downloaded. I think even the BIOS or
+ * FEP images required could change with the connection
+ * of a particular concentrator.
+ *
+ * Since I have no access to any of these cards or
+ * concentrators, I cannot put the correct concentrator
+ * firmware file names into the firmware_info structure
+ * as is now done for the BIOS and FEP images.
+ *
+ * I think, but am not certain, that the cards supporting
+ * concentrators will function without them. So support
+ * of these cards has been left in this driver.
+ *
+ * In order to fully support those cards, they would
+ * either have to be acquired for dissection or maybe
+ * Digi International could provide some assistance.
+ */
+#undef DIGI_CONCENTRATORS_SUPPORTED
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h> /* For udelay */
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/ctype.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/io.h> /* For read[bwl]/write[bwl] */
+
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+#include <linux/firmware.h>
+
+#include "dgap.h"
+
+#define init_MUTEX(sem) sema_init(sem, 1)
+#define DECLARE_MUTEX(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Digi International, http://www.digi.com");
+MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
+MODULE_SUPPORTED_DEVICE("dgap");
+
+/**************************************************************************
+ *
+ * protos for this file
+ *
+ */
+
+static int dgap_start(void);
+static void dgap_init_globals(void);
+static int dgap_found_board(struct pci_dev *pdev, int id);
+static void dgap_cleanup_board(struct board_t *brd);
+static void dgap_poll_handler(ulong dummy);
+static int dgap_init_pci(void);
+static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void dgap_remove_one(struct pci_dev *dev);
+static int dgap_probe1(struct pci_dev *pdev, int card_type);
+static int dgap_do_remap(struct board_t *brd);
+static irqreturn_t dgap_intr(int irq, void *voidbrd);
+
+/* Our function prototypes */
+static int dgap_tty_open(struct tty_struct *tty, struct file *file);
+static void dgap_tty_close(struct tty_struct *tty, struct file *file);
+static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
+ struct channel_t *ch);
+static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
+static int dgap_tty_digigeta(struct tty_struct *tty,
+ struct digi_t __user *retinfo);
+static int dgap_tty_digiseta(struct tty_struct *tty,
+ struct digi_t __user *new_info);
+static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo);
+static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info);
+static int dgap_tty_write_room(struct tty_struct *tty);
+static int dgap_tty_chars_in_buffer(struct tty_struct *tty);
+static void dgap_tty_start(struct tty_struct *tty);
+static void dgap_tty_stop(struct tty_struct *tty);
+static void dgap_tty_throttle(struct tty_struct *tty);
+static void dgap_tty_unthrottle(struct tty_struct *tty);
+static void dgap_tty_flush_chars(struct tty_struct *tty);
+static void dgap_tty_flush_buffer(struct tty_struct *tty);
+static void dgap_tty_hangup(struct tty_struct *tty);
+static int dgap_wait_for_drain(struct tty_struct *tty);
+static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command,
+ unsigned int __user *value);
+static int dgap_get_modem_info(struct channel_t *ch,
+ unsigned int __user *value);
+static int dgap_tty_digisetcustombaud(struct tty_struct *tty,
+ int __user *new_info);
+static int dgap_tty_digigetcustombaud(struct tty_struct *tty,
+ int __user *retinfo);
+static int dgap_tty_tiocmget(struct tty_struct *tty);
+static int dgap_tty_tiocmset(struct tty_struct *tty, unsigned int set,
+ unsigned int clear);
+static int dgap_tty_send_break(struct tty_struct *tty, int msec);
+static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout);
+static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
+ int count);
+static void dgap_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios);
+static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c);
+static void dgap_tty_send_xchar(struct tty_struct *tty, char ch);
+
+static int dgap_tty_register(struct board_t *brd);
+static int dgap_tty_init(struct board_t *);
+static void dgap_tty_uninit(struct board_t *);
+static void dgap_carrier(struct channel_t *ch);
+static void dgap_input(struct channel_t *ch);
+
+/*
+ * Our function prototypes from dgap_fep5
+ */
+static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds);
+static int dgap_event(struct board_t *bd);
+
+static void dgap_poll_tasklet(unsigned long data);
+static void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1,
+ uchar byte2, uint ncmds);
+static void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds);
+static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt);
+static int dgap_param(struct tty_struct *tty);
+static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
+ unsigned char *fbuf, int *len);
+static uint dgap_get_custom_baud(struct channel_t *ch);
+static void dgap_firmware_reset_port(struct channel_t *ch);
+
+/*
+ * Function prototypes from dgap_parse.c.
+ */
+static int dgap_gettok(char **in, struct cnode *p);
+static char *dgap_getword(char **in);
+static char *dgap_savestring(char *s);
+static struct cnode *dgap_newnode(int t);
+static int dgap_checknode(struct cnode *p);
+static void dgap_err(char *s);
+
+/*
+ * Function prototypes from dgap_sysfs.h
+ */
+struct board_t;
+struct channel_t;
+struct un_t;
+struct pci_driver;
+struct class_device;
+
+static void dgap_create_ports_sysfiles(struct board_t *bd);
+static void dgap_remove_ports_sysfiles(struct board_t *bd);
+
+static int dgap_create_driver_sysfiles(struct pci_driver *);
+static void dgap_remove_driver_sysfiles(struct pci_driver *);
+
+static void dgap_create_tty_sysfs(struct un_t *un, struct device *c);
+static void dgap_remove_tty_sysfs(struct device *c);
+
+/*
+ * Function prototypes from dgap_parse.h
+ */
+static int dgap_parsefile(char **in, int Remove);
+static struct cnode *dgap_find_config(int type, int bus, int slot);
+static uint dgap_config_get_num_prts(struct board_t *bd);
+static char *dgap_create_config_string(struct board_t *bd, char *string);
+static uint dgap_config_get_useintr(struct board_t *bd);
+static uint dgap_config_get_altpin(struct board_t *bd);
+
+static int dgap_ms_sleep(ulong ms);
+static void dgap_do_bios_load(struct board_t *brd, const uchar *ubios, int len);
+static void dgap_do_fep_load(struct board_t *brd, const uchar *ufep, int len);
+#ifdef DIGI_CONCENTRATORS_SUPPORTED
+static void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len);
+#endif
+static int dgap_after_config_loaded(int board);
+static int dgap_finalize_board_init(struct board_t *brd);
+
+static void dgap_get_vpd(struct board_t *brd);
+static void dgap_do_reset_board(struct board_t *brd);
+static int dgap_do_wait_for_bios(struct board_t *brd);
+static int dgap_do_wait_for_fep(struct board_t *brd);
+static int dgap_tty_register_ports(struct board_t *brd);
+static int dgap_firmware_load(struct pci_dev *pdev, int card_type);
+
+/* Driver unload function */
+static void dgap_cleanup_module(void);
+
+module_exit(dgap_cleanup_module);
+
+/*
+ * File operations permitted on Control/Management major.
+ */
+static const struct file_operations DgapBoardFops = {
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Globals
+ */
+static uint dgap_NumBoards;
+static struct board_t *dgap_Board[MAXBOARDS];
+static ulong dgap_poll_counter;
+static char *dgap_config_buf;
+static int dgap_driver_state = DRIVER_INITIALIZED;
+DEFINE_SPINLOCK(dgap_dl_lock);
+static wait_queue_head_t dgap_dl_wait;
+static int dgap_dl_action;
+static int dgap_poll_tick = 20; /* Poll interval - 20 ms */
+
+/*
+ * Static vars.
+ */
+static struct class *dgap_class;
+
+static struct board_t *dgap_BoardsByMajor[256];
+static uint dgap_count = 500;
+
+/*
+ * Poller stuff
+ */
+DEFINE_SPINLOCK(dgap_poll_lock); /* Poll scheduling lock */
+static ulong dgap_poll_time; /* Time of next poll */
+static uint dgap_poll_stop; /* Used to tell poller to stop */
+static struct timer_list dgap_poll_timer;
+
+/*
+ SUPPORTED PRODUCTS
+
+ Card Model Number of Ports Interface
+ ----------------------------------------------------------------
+ Acceleport Xem 4 - 64 (EIA232 & EIA422)
+ Acceleport Xr 4 & 8 (EIA232)
+ Acceleport Xr 920 4 & 8 (EIA232)
+ Acceleport C/X 8 - 128 (EIA232)
+ Acceleport EPC/X 8 - 224 (EIA232)
+ Acceleport Xr/422 4 & 8 (EIA422)
+ Acceleport 2r/920 2 (EIA232)
+ Acceleport 4r/920 4 (EIA232)
+ Acceleport 8r/920 8 (EIA232)
+
+ IBM 8-Port Asynchronous PCI Adapter (EIA232)
+ IBM 128-Port Asynchronous PCI Adapter (EIA232 & EIA422)
+*/
+
+static struct pci_device_id dgap_pci_tbl[] = {
+ { DIGI_VID, PCI_DEV_XEM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { DIGI_VID, PCI_DEV_CX_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { DIGI_VID, PCI_DEV_CX_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { DIGI_VID, PCI_DEV_EPCJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { DIGI_VID, PCI_DEV_920_2_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { DIGI_VID, PCI_DEV_920_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { DIGI_VID, PCI_DEV_920_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { DIGI_VID, PCI_DEV_XR_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ { DIGI_VID, PCI_DEV_XRJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ { DIGI_VID, PCI_DEV_XR_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ { DIGI_VID, PCI_DEV_XR_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ { DIGI_VID, PCI_DEV_XR_SAIP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ { DIGI_VID, PCI_DEV_XR_BULL_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ { DIGI_VID, PCI_DEV_920_8_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
+ { DIGI_VID, PCI_DEV_XEM_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
+ {0,} /* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, dgap_pci_tbl);
+
+/*
+ * A generic list of Product names, PCI Vendor ID, and PCI Device ID.
+ */
+struct board_id {
+ uint config_type;
+ uchar *name;
+ uint maxports;
+ uint dpatype;
+};
+
+static struct board_id dgap_Ids[] = {
+ { PPCM, PCI_DEV_XEM_NAME, 64, (T_PCXM|T_PCLITE|T_PCIBUS) },
+ { PCX, PCI_DEV_CX_NAME, 128, (T_CX|T_PCIBUS) },
+ { PCX, PCI_DEV_CX_IBM_NAME, 128, (T_CX|T_PCIBUS) },
+ { PEPC, PCI_DEV_EPCJ_NAME, 224, (T_EPC|T_PCIBUS) },
+ { APORT2_920P, PCI_DEV_920_2_NAME, 2, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { APORT4_920P, PCI_DEV_920_4_NAME, 4, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { APORT8_920P, PCI_DEV_920_8_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XR_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XRJ_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XR_422_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XR_IBM_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XR_SAIP_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PAPORT8, PCI_DEV_XR_BULL_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { APORT8_920P, PCI_DEV_920_8_HP_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
+ { PPCM, PCI_DEV_XEM_HP_NAME, 64, (T_PCXM|T_PCLITE|T_PCIBUS) },
+ {0,} /* 0 terminated list. */
+};
+
+static struct pci_driver dgap_driver = {
+ .name = "dgap",
+ .probe = dgap_init_one,
+ .id_table = dgap_pci_tbl,
+ .remove = dgap_remove_one,
+};
+
+struct firmware_info {
+ uchar *conf_name; /* dgap.conf */
+ uchar *bios_name; /* BIOS filename */
+ uchar *fep_name; /* FEP filename */
+ uchar *con_name; /* Concentrator filename FIXME*/
+ int num; /* sequence number */
+};
+
+/*
+ * Firmware - BIOS, FEP, and CONC filenames
+ */
+static struct firmware_info fw_info[] = {
+ { "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", 0, 0 },
+ { "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", 0, 1 },
+ { "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", 0, 2 },
+ { "dgap/dgap.conf", "dgap/pcibios.bin", "dgap/pcifep.bin", 0, 3 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 4 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 5 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 6 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 7 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 8 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 9 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 10 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 11 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 12 },
+ { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", 0, 13 },
+ { "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", 0, 14 },
+ {0,}
+};
+
+/*
+ * Default transparent print information.
+ */
+static struct digi_t dgap_digi_init = {
+ .digi_flags = DIGI_COOK, /* Flags */
+ .digi_maxcps = 100, /* Max CPS */
+ .digi_maxchar = 50, /* Max chars in print queue */
+ .digi_bufsize = 100, /* Printer buffer size */
+ .digi_onlen = 4, /* size of printer on string */
+ .digi_offlen = 4, /* size of printer off string */
+ .digi_onstr = "\033[5i", /* ANSI printer on string ] */
+ .digi_offstr = "\033[4i", /* ANSI printer off string ] */
+ .digi_term = "ansi" /* default terminal type */
+};
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially.
+ *
+ * This defines a raw port at 9600 baud, 8 data bits, no parity,
+ * 1 stop bit.
+ */
+
+static struct ktermios DgapDefaultTermios = {
+ .c_iflag = (DEFAULT_IFLAGS), /* iflags */
+ .c_oflag = (DEFAULT_OFLAGS), /* oflags */
+ .c_cflag = (DEFAULT_CFLAGS), /* cflags */
+ .c_lflag = (DEFAULT_LFLAGS), /* lflags */
+ .c_cc = INIT_C_CC,
+ .c_line = 0,
+};
+
+static const struct tty_operations dgap_tty_ops = {
+ .open = dgap_tty_open,
+ .close = dgap_tty_close,
+ .write = dgap_tty_write,
+ .write_room = dgap_tty_write_room,
+ .flush_buffer = dgap_tty_flush_buffer,
+ .chars_in_buffer = dgap_tty_chars_in_buffer,
+ .flush_chars = dgap_tty_flush_chars,
+ .ioctl = dgap_tty_ioctl,
+ .set_termios = dgap_tty_set_termios,
+ .stop = dgap_tty_stop,
+ .start = dgap_tty_start,
+ .throttle = dgap_tty_throttle,
+ .unthrottle = dgap_tty_unthrottle,
+ .hangup = dgap_tty_hangup,
+ .put_char = dgap_tty_put_char,
+ .tiocmget = dgap_tty_tiocmget,
+ .tiocmset = dgap_tty_tiocmset,
+ .break_ctl = dgap_tty_send_break,
+ .wait_until_sent = dgap_tty_wait_until_sent,
+ .send_xchar = dgap_tty_send_xchar
+};
+
+/*
+ * Our needed internal static variables from dgap_parse.c
+ */
+static struct cnode dgap_head;
+#define MAXCWORD 200
+static char dgap_cword[MAXCWORD];
+
+struct toklist {
+ int token;
+ char *string;
+};
+
+static struct toklist dgap_tlist[] = {
+ { BEGIN, "config_begin" },
+ { END, "config_end" },
+ { BOARD, "board" },
+ { PCX, "Digi_AccelePort_C/X_PCI" },
+ { PEPC, "Digi_AccelePort_EPC/X_PCI" },
+ { PPCM, "Digi_AccelePort_Xem_PCI" },
+ { APORT2_920P, "Digi_AccelePort_2r_920_PCI" },
+ { APORT4_920P, "Digi_AccelePort_4r_920_PCI" },
+ { APORT8_920P, "Digi_AccelePort_8r_920_PCI" },
+ { PAPORT4, "Digi_AccelePort_4r_PCI(EIA-232/RS-422)" },
+ { PAPORT8, "Digi_AccelePort_8r_PCI(EIA-232/RS-422)" },
+ { IO, "io" },
+ { PCIINFO, "pciinfo" },
+ { LINE, "line" },
+ { CONC, "conc" },
+ { CONC, "concentrator" },
+ { CX, "cx" },
+ { CX, "ccon" },
+ { EPC, "epccon" },
+ { EPC, "epc" },
+ { MOD, "module" },
+ { ID, "id" },
+ { STARTO, "start" },
+ { SPEED, "speed" },
+ { CABLE, "cable" },
+ { CONNECT, "connect" },
+ { METHOD, "method" },
+ { STATUS, "status" },
+ { CUSTOM, "Custom" },
+ { BASIC, "Basic" },
+ { MEM, "mem" },
+ { MEM, "memory" },
+ { PORTS, "ports" },
+ { MODEM, "modem" },
+ { NPORTS, "nports" },
+ { TTYN, "ttyname" },
+ { CU, "cuname" },
+ { PRINT, "prname" },
+ { CMAJOR, "major" },
+ { ALTPIN, "altpin" },
+ { USEINTR, "useintr" },
+ { TTSIZ, "ttysize" },
+ { CHSIZ, "chsize" },
+ { BSSIZ, "boardsize" },
+ { UNTSIZ, "schedsize" },
+ { F2SIZ, "f2200size" },
+ { VPSIZ, "vpixsize" },
+ { 0, NULL }
+};
+
+/************************************************************************
+ *
+ * Driver load/unload functions
+ *
+ ************************************************************************/
+
+/*
+ * init_module()
+ *
+ * Module load. This is where it all starts.
+ */
+static int dgap_init_module(void)
+{
+ int rc = 0;
+
+ pr_info("%s, Digi International Part Number %s\n", DG_NAME, DG_PART);
+
+ rc = dgap_start();
+ if (rc)
+ return rc;
+
+ rc = dgap_init_pci();
+ if (rc)
+ goto err_cleanup;
+
+ rc = dgap_create_driver_sysfiles(&dgap_driver);
+ if (rc)
+ goto err_cleanup;
+
+ dgap_driver_state = DRIVER_READY;
+
+ return 0;
+
+err_cleanup:
+
+ dgap_cleanup_module();
+
+ return rc;
+}
+module_init(dgap_init_module);
+
+/*
+ * Start of driver.
+ */
+static int dgap_start(void)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct device *device;
+
+ /*
+ * make sure that the globals are
+ * init'd before we do anything else
+ */
+ dgap_init_globals();
+
+ dgap_NumBoards = 0;
+
+ pr_info("For the tools package please visit http://www.digi.com\n");
+
+ /*
+ * Register our base character device into the kernel.
+ */
+
+ /*
+ * Register management/dpa devices
+ */
+ rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &DgapBoardFops);
+ if (rc < 0)
+ return rc;
+
+ dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
+ if (IS_ERR(dgap_class)) {
+ rc = PTR_ERR(dgap_class);
+ goto failed_class;
+ }
+
+ device = device_create(dgap_class, NULL,
+ MKDEV(DIGI_DGAP_MAJOR, 0),
+ NULL, "dgap_mgmt");
+ if (IS_ERR(device)) {
+ rc = PTR_ERR(device);
+ goto failed_device;
+ }
+
+ /* Start the poller */
+ spin_lock_irqsave(&dgap_poll_lock, flags);
+ init_timer(&dgap_poll_timer);
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ dgap_poll_timer.expires = dgap_poll_time;
+ spin_unlock_irqrestore(&dgap_poll_lock, flags);
+
+ add_timer(&dgap_poll_timer);
+
+ return rc;
+
+failed_device:
+ class_destroy(dgap_class);
+failed_class:
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+ return rc;
+}
+
+/*
+ * Register pci driver, and return how many boards we have.
+ */
+static int dgap_init_pci(void)
+{
+ return pci_register_driver(&dgap_driver);
+}
+
+/* returns count (>= 0), or negative on error */
+static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+
+ /* wake up and enable device */
+ rc = pci_enable_device(pdev);
+
+ if (rc < 0) {
+ rc = -EIO;
+ } else {
+ rc = dgap_probe1(pdev, ent->driver_data);
+ if (rc == 0) {
+ dgap_NumBoards++;
+ rc = dgap_firmware_load(pdev, ent->driver_data);
+ }
+ }
+ return rc;
+}
+
+static int dgap_probe1(struct pci_dev *pdev, int card_type)
+{
+ return dgap_found_board(pdev, card_type);
+}
+
+static void dgap_remove_one(struct pci_dev *dev)
+{
+ /* Do Nothing */
+}
+
+/*
+ * dgap_cleanup_module()
+ *
+ * Module unload. This is where it all ends.
+ */
+static void dgap_cleanup_module(void)
+{
+ int i;
+ ulong lock_flags;
+
+ spin_lock_irqsave(&dgap_poll_lock, lock_flags);
+ dgap_poll_stop = 1;
+ spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
+
+ /* Turn off poller right away. */
+ del_timer_sync(&dgap_poll_timer);
+
+ dgap_remove_driver_sysfiles(&dgap_driver);
+
+ device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
+ class_destroy(dgap_class);
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+
+ kfree(dgap_config_buf);
+
+ for (i = 0; i < dgap_NumBoards; ++i) {
+ dgap_remove_ports_sysfiles(dgap_Board[i]);
+ dgap_tty_uninit(dgap_Board[i]);
+ dgap_cleanup_board(dgap_Board[i]);
+ }
+
+ if (dgap_NumBoards)
+ pci_unregister_driver(&dgap_driver);
+}
+
+/*
+ * dgap_cleanup_board()
+ *
+ * Free all the memory associated with a board
+ */
+static void dgap_cleanup_board(struct board_t *brd)
+{
+ int i = 0;
+
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ if (brd->intr_used && brd->irq)
+ free_irq(brd->irq, brd);
+
+ tasklet_kill(&brd->helper_tasklet);
+
+ if (brd->re_map_port) {
+ release_mem_region(brd->membase + 0x200000, 0x200000);
+ iounmap(brd->re_map_port);
+ brd->re_map_port = NULL;
+ }
+
+ if (brd->re_map_membase) {
+ release_mem_region(brd->membase, 0x200000);
+ iounmap(brd->re_map_membase);
+ brd->re_map_membase = NULL;
+ }
+
+ /* Free all allocated channels structs */
+ for (i = 0; i < MAXPORTS ; i++)
+ kfree(brd->channels[i]);
+
+ kfree(brd->flipbuf);
+ kfree(brd->flipflagbuf);
+
+ dgap_Board[brd->boardnum] = NULL;
+
+ kfree(brd);
+}
+
+/*
+ * dgap_found_board()
+ *
+ * A board has been found, init it.
+ */
+static int dgap_found_board(struct pci_dev *pdev, int id)
+{
+ struct board_t *brd;
+ unsigned int pci_irq;
+ int i = 0;
+
+ /* get the board structure and prep it */
+ brd = kzalloc(sizeof(struct board_t), GFP_KERNEL);
+ if (!brd)
+ return -ENOMEM;
+
+ dgap_Board[dgap_NumBoards] = brd;
+
+ /* store the info for the board we've found */
+ brd->magic = DGAP_BOARD_MAGIC;
+ brd->boardnum = dgap_NumBoards;
+ brd->firstminor = 0;
+ brd->vendor = dgap_pci_tbl[id].vendor;
+ brd->device = dgap_pci_tbl[id].device;
+ brd->pdev = pdev;
+ brd->pci_bus = pdev->bus->number;
+ brd->pci_slot = PCI_SLOT(pdev->devfn);
+ brd->name = dgap_Ids[id].name;
+ brd->maxports = dgap_Ids[id].maxports;
+ brd->type = dgap_Ids[id].config_type;
+ brd->dpatype = dgap_Ids[id].dpatype;
+ brd->dpastatus = BD_NOFEP;
+ init_waitqueue_head(&brd->state_wait);
+
+ spin_lock_init(&brd->bd_lock);
+
+ brd->runwait = 0;
+ brd->inhibit_poller = FALSE;
+ brd->wait_for_bios = 0;
+ brd->wait_for_fep = 0;
+
+ for (i = 0; i < MAXPORTS; i++)
+ brd->channels[i] = NULL;
+
+ /* store which card & revision we have */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
+
+ pci_irq = pdev->irq;
+ brd->irq = pci_irq;
+
+ /* get the PCI Base Address Registers */
+
+ /* Xr Jupiter and EPC use BAR 2 */
+ if (brd->device == PCI_DEV_XRJ_DID || brd->device == PCI_DEV_EPCJ_DID) {
+ brd->membase = pci_resource_start(pdev, 2);
+ brd->membase_end = pci_resource_end(pdev, 2);
+ }
+ /* Everyone else uses BAR 0 */
+ else {
+ brd->membase = pci_resource_start(pdev, 0);
+ brd->membase_end = pci_resource_end(pdev, 0);
+ }
+
+ if (!brd->membase)
+ return -ENODEV;
+
+ if (brd->membase & 1)
+ brd->membase &= ~3;
+ else
+ brd->membase &= ~15;
+
+ /*
+ * On the PCI boards, there is no IO space allocated
+ * The I/O registers will be in the first 3 bytes of the
+ * upper 2MB of the 4MB memory space. The board memory
+ * will be mapped into the low 2MB of the 4MB memory space
+ */
+ brd->port = brd->membase + PCI_IO_OFFSET;
+ brd->port_end = brd->port + PCI_IO_SIZE;
+
+ /*
+ * Special initialization for non-PLX boards
+ */
+ if (brd->device != PCI_DEV_XRJ_DID && brd->device != PCI_DEV_EPCJ_DID) {
+ unsigned short cmd;
+
+ pci_write_config_byte(pdev, 0x40, 0);
+ pci_write_config_byte(pdev, 0x46, 0);
+
+ /* Limit burst length to 2 doubleword transactions */
+ pci_write_config_byte(pdev, 0x42, 1);
+
+ /*
+ * Enable IO and mem if not already done.
+ * This was needed for support on Itanium.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ /* init our poll helper tasklet */
+ tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet,
+ (unsigned long) brd);
+
+ i = dgap_do_remap(brd);
+ if (i)
+ brd->state = BOARD_FAILED;
+
+ pr_info("dgap: board %d: %s (rev %d), irq %ld, %s\n",
+ dgap_NumBoards, brd->name, brd->rev, brd->irq,
+ brd->state ? "NOT READY\0" : "READY\0");
+
+ return 0;
+}
+
+
+static int dgap_finalize_board_init(struct board_t *brd)
+{
+ int rc;
+
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return -ENODEV;
+
+ brd->use_interrupts = dgap_config_get_useintr(brd);
+
+ /*
+ * Set up our interrupt handler if we are set to do interrupts.
+ */
+ if (brd->use_interrupts && brd->irq) {
+
+ rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
+
+ if (rc)
+ brd->intr_used = 0;
+ else
+ brd->intr_used = 1;
+ } else {
+ brd->intr_used = 0;
+ }
+
+ return 0;
+}
+
+static int dgap_firmware_load(struct pci_dev *pdev, int card_type)
+{
+ struct board_t *brd = dgap_Board[dgap_NumBoards - 1];
+ const struct firmware *fw;
+ int ret;
+
+ dgap_get_vpd(brd);
+ dgap_do_reset_board(brd);
+
+ if (fw_info[card_type].conf_name) {
+ ret = request_firmware(&fw, fw_info[card_type].conf_name,
+ &pdev->dev);
+ if (ret) {
+ pr_err("dgap: config file %s not found\n",
+ fw_info[card_type].conf_name);
+ return ret;
+ }
+ if (!dgap_config_buf) {
+ dgap_config_buf = kmalloc(fw->size + 1, GFP_ATOMIC);
+ if (!dgap_config_buf) {
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+ }
+
+ memcpy(dgap_config_buf, fw->data, fw->size);
+ release_firmware(fw);
+ dgap_config_buf[fw->size + 1] = '\0';
+
+ if (dgap_parsefile(&dgap_config_buf, TRUE) != 0)
+ return -EINVAL;
+ }
+
+ ret = dgap_after_config_loaded(brd->boardnum);
+ if (ret)
+ return ret;
+ /*
+ * Match this board to a config the user created for us.
+ */
+ brd->bd_config =
+ dgap_find_config(brd->type, brd->pci_bus, brd->pci_slot);
+
+ /*
+ * Because the 4 port Xr products share the same PCI ID
+ * as the 8 port Xr products, if we receive a NULL config
+ * back, and this is a PAPORT8 board, retry with a
+ * PAPORT4 attempt as well.
+ */
+ if (brd->type == PAPORT8 && !brd->bd_config)
+ brd->bd_config =
+ dgap_find_config(PAPORT4, brd->pci_bus, brd->pci_slot);
+
+ if (!brd->bd_config) {
+ pr_err("dgap: No valid configuration found\n");
+ return -EINVAL;
+ }
+
+ dgap_tty_register(brd);
+ dgap_finalize_board_init(brd);
+
+ if (fw_info[card_type].bios_name) {
+ ret = request_firmware(&fw, fw_info[card_type].bios_name,
+ &pdev->dev);
+ if (ret) {
+ pr_err("dgap: bios file %s not found\n",
+ fw_info[card_type].bios_name);
+ return ret;
+ }
+ dgap_do_bios_load(brd, fw->data, fw->size);
+ release_firmware(fw);
+
+ /* Wait for BIOS to test board... */
+ if (!dgap_do_wait_for_bios(brd))
+ return -ENXIO;
+ }
+
+ if (fw_info[card_type].fep_name) {
+ ret = request_firmware(&fw, fw_info[card_type].fep_name,
+ &pdev->dev);
+ if (ret) {
+ pr_err("dgap: fep file %s not found\n",
+ fw_info[card_type].fep_name);
+ return ret;
+ }
+ dgap_do_fep_load(brd, fw->data, fw->size);
+ release_firmware(fw);
+
+ /* Wait for FEP to load on board... */
+ if (!dgap_do_wait_for_fep(brd))
+ return -ENXIO;
+ }
+
+#ifdef DIGI_CONCENTRATORS_SUPPORTED
+ /*
+ * If this is a CX or EPCX, we need to see if the firmware
+ * is requesting a concentrator image from us.
+ */
+ if ((bd->type == PCX) || (bd->type == PEPC)) {
+ chk_addr = (u16 *) (vaddr + DOWNREQ);
+ /* Nonzero if FEP is requesting concentrator image. */
+ check = readw(chk_addr);
+ vaddr = brd->re_map_membase;
+ }
+
+ if (fw_info[card_type].con_name && check && vaddr) {
+ ret = request_firmware(&fw, fw_info[card_type].con_name,
+ &pdev->dev);
+ if (ret) {
+ pr_err("dgap: conc file %s not found\n",
+ fw_info[card_type].con_name);
+ return ret;
+ }
+ /* Put concentrator firmware loading code here */
+ offset = readw((u16 *) (vaddr + DOWNREQ));
+ memcpy_toio(offset, fw->data, fw->size);
+
+ dgap_do_conc_load(brd, (char *)fw->data, fw->size)
+ release_firmware(fw);
+ }
+#endif
+ /*
+ * Do tty device initialization.
+ */
+ ret = dgap_tty_init(brd);
+ if (ret < 0) {
+ dgap_tty_uninit(brd);
+ return ret;
+ }
+
+ ret = dgap_tty_register_ports(brd);
+ if (ret)
+ return ret;
+
+ brd->state = BOARD_READY;
+ brd->dpastatus = BD_RUNNING;
+
+ return 0;
+}
+
+/*
+ * Remap PCI memory.
+ */
+static int dgap_do_remap(struct board_t *brd)
+{
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return -ENXIO;
+
+ if (!request_mem_region(brd->membase, 0x200000, "dgap"))
+ return -ENOMEM;
+
+ if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000,
+ "dgap")) {
+ release_mem_region(brd->membase, 0x200000);
+ return -ENOMEM;
+ }
+
+ brd->re_map_membase = ioremap(brd->membase, 0x200000);
+ if (!brd->re_map_membase) {
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ return -ENOMEM;
+ }
+
+ brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
+ if (!brd->re_map_port) {
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ iounmap(brd->re_map_membase);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*****************************************************************************
+*
+* Function:
+*
+* dgap_poll_handler
+*
+* Author:
+*
+* Scott H Kilau
+*
+* Parameters:
+*
+* dummy -- ignored
+*
+* Return Values:
+*
+* none
+*
+* Description:
+*
+* As each timer expires, it determines (a) whether the "transmit"
+* waiter needs to be woken up, and (b) whether the poller needs to
+* be rescheduled.
+*
+******************************************************************************/
+
+static void dgap_poll_handler(ulong dummy)
+{
+ int i;
+ struct board_t *brd;
+ unsigned long lock_flags;
+ ulong new_time;
+
+ dgap_poll_counter++;
+
+ /*
+ * Do not start the board state machine until
+ * driver tells us its up and running, and has
+ * everything it needs.
+ */
+ if (dgap_driver_state != DRIVER_READY)
+ goto schedule_poller;
+
+ /*
+ * If we have just 1 board, or the system is not SMP,
+ * then use the typical old style poller.
+ * Otherwise, use our new tasklet based poller, which should
+ * speed things up for multiple boards.
+ */
+ if ((dgap_NumBoards == 1) || (num_online_cpus() <= 1)) {
+ for (i = 0; i < dgap_NumBoards; i++) {
+
+ brd = dgap_Board[i];
+
+ if (brd->state == BOARD_FAILED)
+ continue;
+ if (!brd->intr_running)
+ /* Call the real board poller directly */
+ dgap_poll_tasklet((unsigned long) brd);
+ }
+ } else {
+ /*
+ * Go thru each board, kicking off a
+ * tasklet for each if needed
+ */
+ for (i = 0; i < dgap_NumBoards; i++) {
+ brd = dgap_Board[i];
+
+ /*
+ * Attempt to grab the board lock.
+ *
+ * If we can't get it, no big deal, the next poll
+ * will get it. Basically, I just really don't want
+ * to spin in here, because I want to kick off my
+ * tasklets as fast as I can, and then get out the
+ * poller.
+ */
+ if (!spin_trylock(&brd->bd_lock))
+ continue;
+
+ /*
+ * If board is in a failed state, don't bother
+ * scheduling a tasklet
+ */
+ if (brd->state == BOARD_FAILED) {
+ spin_unlock(&brd->bd_lock);
+ continue;
+ }
+
+ /* Schedule a poll helper task */
+ if (!brd->intr_running)
+ tasklet_schedule(&brd->helper_tasklet);
+
+ /*
+ * Can't do DGAP_UNLOCK here, as we don't have
+ * lock_flags because we did a trylock above.
+ */
+ spin_unlock(&brd->bd_lock);
+ }
+ }
+
+schedule_poller:
+
+ /*
+ * Schedule ourself back at the nominal wakeup interval.
+ */
+ spin_lock_irqsave(&dgap_poll_lock, lock_flags);
+ dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
+
+ new_time = dgap_poll_time - jiffies;
+
+ if ((ulong) new_time >= 2 * dgap_poll_tick) {
+ dgap_poll_time =
+ jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ }
+
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_timer.expires = dgap_poll_time;
+ spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
+
+ if (!dgap_poll_stop)
+ add_timer(&dgap_poll_timer);
+}
+
+/*
+ * dgap_intr()
+ *
+ * Driver interrupt handler.
+ */
+static irqreturn_t dgap_intr(int irq, void *voidbrd)
+{
+ struct board_t *brd = (struct board_t *) voidbrd;
+
+ if (!brd)
+ return IRQ_NONE;
+
+ /*
+ * Check to make sure its for us.
+ */
+ if (brd->magic != DGAP_BOARD_MAGIC)
+ return IRQ_NONE;
+
+ brd->intr_count++;
+
+ /*
+ * Schedule tasklet to run at a better time.
+ */
+ tasklet_schedule(&brd->helper_tasklet);
+ return IRQ_HANDLED;
+}
+
+/*
+ * dgap_init_globals()
+ *
+ * This is where we initialize the globals from the static insmod
+ * configuration variables. These are declared near the head of
+ * this file.
+ */
+static void dgap_init_globals(void)
+{
+ int i = 0;
+
+ for (i = 0; i < MAXBOARDS; i++)
+ dgap_Board[i] = NULL;
+
+ init_timer(&dgap_poll_timer);
+
+ init_waitqueue_head(&dgap_dl_wait);
+ dgap_dl_action = 0;
+}
+
+/************************************************************************
+ *
+ * Utility functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_ms_sleep()
+ *
+ * Put the driver to sleep for x ms's
+ *
+ * Returns 0 if timed out, !0 (showing signal) if interrupted by a signal.
+ */
+static int dgap_ms_sleep(ulong ms)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout((ms * HZ) / 1000);
+ return signal_pending(current);
+}
+
+/************************************************************************
+ *
+ * TTY Initialization/Cleanup Functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_tty_register()
+ *
+ * Init the tty subsystem for this board.
+ */
+static int dgap_tty_register(struct board_t *brd)
+{
+ int rc = 0;
+
+ brd->SerialDriver = alloc_tty_driver(MAXPORTS);
+
+ snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgap_%d_", brd->boardnum);
+ brd->SerialDriver->name = brd->SerialName;
+ brd->SerialDriver->name_base = 0;
+ brd->SerialDriver->major = 0;
+ brd->SerialDriver->minor_start = 0;
+ brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL;
+ brd->SerialDriver->init_termios = DgapDefaultTermios;
+ brd->SerialDriver->driver_name = DRVSTR;
+ brd->SerialDriver->flags = (TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
+
+ /* The kernel wants space to store pointers to tty_structs */
+ brd->SerialDriver->ttys =
+ kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->SerialDriver->ttys)
+ return -ENOMEM;
+
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(brd->SerialDriver, &dgap_tty_ops);
+
+ /*
+ * If we're doing transparent print, we have to do all of the above
+ * again, separately so we don't get the LD confused about what major
+ * we are when we get into the dgap_tty_open() routine.
+ */
+ brd->PrintDriver = alloc_tty_driver(MAXPORTS);
+
+ snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgap_%d_", brd->boardnum);
+ brd->PrintDriver->name = brd->PrintName;
+ brd->PrintDriver->name_base = 0;
+ brd->PrintDriver->major = 0;
+ brd->PrintDriver->minor_start = 0;
+ brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->PrintDriver->subtype = SERIAL_TYPE_NORMAL;
+ brd->PrintDriver->init_termios = DgapDefaultTermios;
+ brd->PrintDriver->driver_name = DRVSTR;
+ brd->PrintDriver->flags = (TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
+
+ /* The kernel wants space to store pointers to tty_structs */
+ brd->PrintDriver->ttys =
+ kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->PrintDriver->ttys)
+ return -ENOMEM;
+
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(brd->PrintDriver, &dgap_tty_ops);
+
+ if (!brd->dgap_Major_Serial_Registered) {
+ /* Register tty devices */
+ rc = tty_register_driver(brd->SerialDriver);
+ if (rc < 0)
+ return rc;
+ brd->dgap_Major_Serial_Registered = TRUE;
+ dgap_BoardsByMajor[brd->SerialDriver->major] = brd;
+ brd->dgap_Serial_Major = brd->SerialDriver->major;
+ }
+
+ if (!brd->dgap_Major_TransparentPrint_Registered) {
+ /* Register Transparent Print devices */
+ rc = tty_register_driver(brd->PrintDriver);
+ if (rc < 0)
+ return rc;
+ brd->dgap_Major_TransparentPrint_Registered = TRUE;
+ dgap_BoardsByMajor[brd->PrintDriver->major] = brd;
+ brd->dgap_TransparentPrint_Major = brd->PrintDriver->major;
+ }
+
+ return rc;
+}
+
+/*
+ * dgap_tty_init()
+ *
+ * Init the tty subsystem. Called once per board after board has been
+ * downloaded and init'ed.
+ */
+static int dgap_tty_init(struct board_t *brd)
+{
+ int i;
+ int tlw;
+ uint true_count = 0;
+ uchar *vaddr;
+ uchar modem = 0;
+ struct channel_t *ch;
+ struct bs_t *bs;
+ struct cm_t *cm;
+
+ if (!brd)
+ return -ENXIO;
+
+ /*
+ * Initialize board structure elements.
+ */
+
+ vaddr = brd->re_map_membase;
+ true_count = readw((vaddr + NCHAN));
+
+ brd->nasync = dgap_config_get_num_prts(brd);
+
+ if (!brd->nasync)
+ brd->nasync = brd->maxports;
+
+ if (brd->nasync > brd->maxports)
+ brd->nasync = brd->maxports;
+
+ if (true_count != brd->nasync) {
+ if ((brd->type == PPCM) && (true_count == 64)) {
+ pr_warn("dgap: %s configured for %d ports, has %d ports.\n",
+ brd->name, brd->nasync, true_count);
+ pr_warn("dgap: Please make SURE the EBI cable running from the card\n");
+ pr_warn("dgap: to each EM module is plugged into EBI IN!\n");
+ } else if ((brd->type == PPCM) && (true_count == 0)) {
+ pr_warn("dgap: %s configured for %d ports, has %d ports.\n",
+ brd->name, brd->nasync, true_count);
+ pr_warn("dgap: Please make SURE the EBI cable running from the card\n");
+ pr_warn("dgap: to each EM module is plugged into EBI IN!\n");
+ } else
+ pr_warn("dgap: %s configured for %d ports, has %d ports.\n",
+ brd->name, brd->nasync, true_count);
+
+ brd->nasync = true_count;
+
+ /* If no ports, don't bother going any further */
+ if (!brd->nasync) {
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return -ENXIO;
+ }
+ }
+
+ /*
+ * Allocate channel memory that might not have been allocated
+ * when the driver was first loaded.
+ */
+ for (i = 0; i < brd->nasync; i++) {
+ if (!brd->channels[i]) {
+ brd->channels[i] =
+ kzalloc(sizeof(struct channel_t), GFP_ATOMIC);
+ if (!brd->channels[i])
+ return -ENOMEM;
+ }
+ }
+
+ ch = brd->channels[0];
+ vaddr = brd->re_map_membase;
+
+ bs = (struct bs_t *) ((ulong) vaddr + CHANBUF);
+ cm = (struct cm_t *) ((ulong) vaddr + CMDBUF);
+
+ brd->bd_bs = bs;
+
+ /* Set up channel variables */
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
+
+ if (!brd->channels[i])
+ continue;
+
+ spin_lock_init(&ch->ch_lock);
+
+ /* Store all our magic numbers */
+ ch->magic = DGAP_CHANNEL_MAGIC;
+ ch->ch_tun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_tun.un_type = DGAP_SERIAL;
+ ch->ch_tun.un_ch = ch;
+ ch->ch_tun.un_dev = i;
+
+ ch->ch_pun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_pun.un_type = DGAP_PRINT;
+ ch->ch_pun.un_ch = ch;
+ ch->ch_pun.un_dev = i;
+
+ ch->ch_vaddr = vaddr;
+ ch->ch_bs = bs;
+ ch->ch_cm = cm;
+ ch->ch_bd = brd;
+ ch->ch_portnum = i;
+ ch->ch_digi = dgap_digi_init;
+
+ /*
+ * Set up digi dsr and dcd bits based on altpin flag.
+ */
+ if (dgap_config_get_altpin(brd)) {
+ ch->ch_dsr = DM_CD;
+ ch->ch_cd = DM_DSR;
+ ch->ch_digi.digi_flags |= DIGI_ALTPIN;
+ } else {
+ ch->ch_cd = DM_CD;
+ ch->ch_dsr = DM_DSR;
+ }
+
+ ch->ch_taddr = vaddr + ((ch->ch_bs->tx_seg) << 4);
+ ch->ch_raddr = vaddr + ((ch->ch_bs->rx_seg) << 4);
+ ch->ch_tx_win = 0;
+ ch->ch_rx_win = 0;
+ ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
+ ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
+ ch->ch_tstart = 0;
+ ch->ch_rstart = 0;
+
+ /* .25 second delay */
+ ch->ch_close_delay = 250;
+
+ /*
+ * Set queue water marks, interrupt mask,
+ * and general tty parameters.
+ */
+ tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) :
+ ch->ch_tsize / 2;
+ ch->ch_tlw = tlw;
+
+ dgap_cmdw(ch, STLOW, tlw, 0);
+
+ dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
+
+ dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
+
+ ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
+
+ init_waitqueue_head(&ch->ch_flags_wait);
+ init_waitqueue_head(&ch->ch_tun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_pun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_sniff_wait);
+
+ /* Turn on all modem interrupts for now */
+ modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
+ writeb(modem, &(ch->ch_bs->m_int));
+
+ /*
+ * Set edelay to 0 if interrupts are turned on,
+ * otherwise set edelay to the usual 100.
+ */
+ if (brd->intr_used)
+ writew(0, &(ch->ch_bs->edelay));
+ else
+ writew(100, &(ch->ch_bs->edelay));
+
+ writeb(1, &(ch->ch_bs->idata));
+ }
+
+ return 0;
+}
+
+/*
+ * dgap_tty_uninit()
+ *
+ * Uninitialize the TTY portion of this driver. Free all memory and
+ * resources.
+ */
+static void dgap_tty_uninit(struct board_t *brd)
+{
+ struct device *dev;
+ int i = 0;
+
+ if (brd->dgap_Major_Serial_Registered) {
+ dgap_BoardsByMajor[brd->SerialDriver->major] = NULL;
+ brd->dgap_Serial_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_destroy(&brd->SerialPorts[i]);
+ dev = brd->channels[i]->ch_tun.un_sysfs;
+ dgap_remove_tty_sysfs(dev);
+ tty_unregister_device(brd->SerialDriver, i);
+ }
+ tty_unregister_driver(brd->SerialDriver);
+ kfree(brd->SerialDriver->ttys);
+ brd->SerialDriver->ttys = NULL;
+ put_tty_driver(brd->SerialDriver);
+ kfree(brd->SerialPorts);
+ brd->dgap_Major_Serial_Registered = FALSE;
+ }
+
+ if (brd->dgap_Major_TransparentPrint_Registered) {
+ dgap_BoardsByMajor[brd->PrintDriver->major] = NULL;
+ brd->dgap_TransparentPrint_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_destroy(&brd->PrinterPorts[i]);
+ dev = brd->channels[i]->ch_pun.un_sysfs;
+ dgap_remove_tty_sysfs(dev);
+ tty_unregister_device(brd->PrintDriver, i);
+ }
+ tty_unregister_driver(brd->PrintDriver);
+ kfree(brd->PrintDriver->ttys);
+ brd->PrintDriver->ttys = NULL;
+ put_tty_driver(brd->PrintDriver);
+ kfree(brd->PrinterPorts);
+ brd->dgap_Major_TransparentPrint_Registered = FALSE;
+ }
+}
+
+#define TMPBUFLEN (1024)
+/*
+ * dgap_sniff - Dump data out to the "sniff" buffer if the
+ * proc sniff file is opened...
+ */
+static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text,
+ uchar *buf, int len)
+{
+ struct timeval tv;
+ int n;
+ int r;
+ int nbuf;
+ int i;
+ int tmpbuflen;
+ char tmpbuf[TMPBUFLEN];
+ char *p = tmpbuf;
+ int too_much_data;
+
+ /* Leave if sniff not open */
+ if (!(ch->ch_sniff_flags & SNIFF_OPEN))
+ return;
+
+ do_gettimeofday(&tv);
+
+ /* Create our header for data dump */
+ p += sprintf(p, "<%ld %ld><%s><", tv.tv_sec, tv.tv_usec, text);
+ tmpbuflen = p - tmpbuf;
+
+ do {
+ too_much_data = 0;
+
+ for (i = 0; i < len && tmpbuflen < (TMPBUFLEN - 4); i++) {
+ p += sprintf(p, "%02x ", *buf);
+ buf++;
+ tmpbuflen = p - tmpbuf;
+ }
+
+ if (tmpbuflen < (TMPBUFLEN - 4)) {
+ if (i > 0)
+ p += sprintf(p - 1, "%s\n", ">");
+ else
+ p += sprintf(p, "%s\n", ">");
+ } else {
+ too_much_data = 1;
+ len -= i;
+ }
+
+ nbuf = strlen(tmpbuf);
+ p = tmpbuf;
+
+ /*
+ * Loop while data remains.
+ */
+ while (nbuf > 0 && ch->ch_sniff_buf) {
+ /*
+ * Determine the amount of available space left in the
+ * buffer. If there's none, wait until some appears.
+ */
+ n = (ch->ch_sniff_out - ch->ch_sniff_in - 1) &
+ SNIFF_MASK;
+
+ /*
+ * If there is no space left to write to in our sniff
+ * buffer, we have no choice but to drop the data.
+ * We *cannot* sleep here waiting for space, because
+ * this function was probably called by the
+ * interrupt/timer routines!
+ */
+ if (n == 0)
+ return;
+
+ /*
+ * Copy as much data as will fit.
+ */
+
+ if (n > nbuf)
+ n = nbuf;
+
+ r = SNIFF_MAX - ch->ch_sniff_in;
+
+ if (r <= n) {
+ memcpy(ch->ch_sniff_buf +
+ ch->ch_sniff_in, p, r);
+
+ n -= r;
+ ch->ch_sniff_in = 0;
+ p += r;
+ nbuf -= r;
+ }
+
+ memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, n);
+
+ ch->ch_sniff_in += n;
+ p += n;
+ nbuf -= n;
+
+ /*
+ * Wakeup any thread waiting for data
+ */
+ if (ch->ch_sniff_flags & SNIFF_WAIT_DATA) {
+ ch->ch_sniff_flags &= ~SNIFF_WAIT_DATA;
+ wake_up_interruptible(&ch->ch_sniff_wait);
+ }
+ }
+
+ /*
+ * If the user sent us too much data to push into our tmpbuf,
+ * we need to keep looping around on all the data.
+ */
+ if (too_much_data) {
+ p = tmpbuf;
+ tmpbuflen = 0;
+ }
+
+ } while (too_much_data);
+}
+
+/*=======================================================================
+ *
+ * dgap_input - Process received data.
+ *
+ * ch - Pointer to channel structure.
+ *
+ *=======================================================================*/
+
+static void dgap_input(struct channel_t *ch)
+{
+ struct board_t *bd;
+ struct bs_t *bs;
+ struct tty_struct *tp;
+ struct tty_ldisc *ld;
+ uint rmask;
+ uint head;
+ uint tail;
+ int data_len;
+ ulong lock_flags;
+ ulong lock_flags2;
+ int flip_len;
+ int len = 0;
+ int n = 0;
+ uchar *buf;
+ uchar tmpchar;
+ int s = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ tp = ch->ch_tun.un_tty;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ /*
+ * Figure the number of characters in the buffer.
+ * Exit immediately if none.
+ */
+
+ rmask = ch->ch_rsize - 1;
+
+ head = readw(&(bs->rx_head));
+ head &= rmask;
+ tail = readw(&(bs->rx_tail));
+ tail &= rmask;
+
+ data_len = (head - tail) & rmask;
+
+ if (data_len == 0) {
+ writeb(1, &(bs->idata));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * If the device is not open, or CREAD is off, flush
+ * input data and return immediately.
+ */
+ if ((bd->state != BOARD_READY) || !tp ||
+ (tp->magic != TTY_MAGIC) ||
+ !(ch->ch_tun.un_flags & UN_ISOPEN) ||
+ !(tp->termios.c_cflag & CREAD) ||
+ (ch->ch_tun.un_flags & UN_CLOSING)) {
+
+ writew(head, &(bs->rx_tail));
+ writeb(1, &(bs->idata));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * If we are throttled, simply don't read any data.
+ */
+ if (ch->ch_flags & CH_RXBLOCK) {
+ writeb(1, &(bs->idata));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * Ignore oruns.
+ */
+ tmpchar = readb(&(bs->orun));
+ if (tmpchar) {
+ ch->ch_err_overrun++;
+ writeb(0, &(bs->orun));
+ }
+
+ /* Decide how much data we can send into the tty layer */
+ flip_len = TTY_FLIPBUF_SIZE;
+
+ /* Chop down the length, if needed */
+ len = min(data_len, flip_len);
+ len = min(len, (N_TTY_BUF_SIZE - 1));
+
+ ld = tty_ldisc_ref(tp);
+
+#ifdef TTY_DONT_FLIP
+ /*
+ * If the DONT_FLIP flag is on, don't flush our buffer, and act
+ * like the ld doesn't have any space to put the data right now.
+ */
+ if (test_bit(TTY_DONT_FLIP, &tp->flags))
+ len = 0;
+#endif
+
+ /*
+ * If we were unable to get a reference to the ld,
+ * don't flush our buffer, and act like the ld doesn't
+ * have any space to put the data right now.
+ */
+ if (!ld) {
+ len = 0;
+ } else {
+ /*
+ * If ld doesn't have a pointer to a receive_buf function,
+ * flush the data, then act like the ld doesn't have any
+ * space to put the data right now.
+ */
+ if (!ld->ops->receive_buf) {
+ writew(head, &(bs->rx_tail));
+ len = 0;
+ }
+ }
+
+ if (len <= 0) {
+ writeb(1, &(bs->idata));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (ld)
+ tty_ldisc_deref(ld);
+ return;
+ }
+
+ buf = ch->ch_bd->flipbuf;
+ n = len;
+
+ /*
+ * n now contains the most amount of data we can copy,
+ * bounded either by our buffer size or the amount
+ * of data the card actually has pending...
+ */
+ while (n) {
+
+ s = ((head >= tail) ? head : ch->ch_rsize) - tail;
+ s = min(s, n);
+
+ if (s <= 0)
+ break;
+
+ memcpy_fromio(buf, (char *) ch->ch_raddr + tail, s);
+ dgap_sniff_nowait_nolock(ch, "USER READ", buf, s);
+
+ tail += s;
+ buf += s;
+
+ n -= s;
+ /* Flip queue if needed */
+ tail &= rmask;
+ }
+
+ writew(tail, &(bs->rx_tail));
+ writeb(1, &(bs->idata));
+ ch->ch_rxcount += len;
+
+ /*
+ * If we are completely raw, we don't need to go through a lot
+ * of the tty layers that exist.
+ * In this case, we take the shortest and fastest route we
+ * can to relay the data to the user.
+ *
+ * On the other hand, if we are not raw, we need to go through
+ * the tty layer, which has its API more well defined.
+ */
+ if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
+ dgap_parity_scan(ch, ch->ch_bd->flipbuf,
+ ch->ch_bd->flipflagbuf, &len);
+
+ len = tty_buffer_request_room(tp->port, len);
+ tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf,
+ ch->ch_bd->flipflagbuf, len);
+ } else {
+ len = tty_buffer_request_room(tp->port, len);
+ tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len);
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ /* Tell the tty layer its okay to "eat" the data now */
+ tty_flip_buffer_push(tp->port);
+
+ if (ld)
+ tty_ldisc_deref(ld);
+
+}
+
+/************************************************************************
+ * Determines when CARRIER changes state and takes appropriate
+ * action.
+ ************************************************************************/
+static void dgap_carrier(struct channel_t *ch)
+{
+ struct board_t *bd;
+
+ int virt_carrier = 0;
+ int phys_carrier = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ /* Make sure altpin is always set correctly */
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
+ ch->ch_dsr = DM_CD;
+ ch->ch_cd = DM_DSR;
+ } else {
+ ch->ch_dsr = DM_DSR;
+ ch->ch_cd = DM_CD;
+ }
+
+ if (ch->ch_mistat & D_CD(ch))
+ phys_carrier = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FORCEDCD)
+ virt_carrier = 1;
+
+ if (ch->ch_c_cflag & CLOCAL)
+ virt_carrier = 1;
+
+ /*
+ * Test for a VIRTUAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL transition to low, so long as we aren't
+ * currently ignoring physical transitions (which is what "virtual
+ * carrier" indicates).
+ *
+ * The transition of the virtual carrier to low really doesn't
+ * matter... it really only means "ignore carrier state", not
+ * "make pretend that carrier is there".
+ */
+ if ((virt_carrier == 0) &&
+ ((ch->ch_flags & CH_CD) != 0) &&
+ (phys_carrier == 0)) {
+
+ /*
+ * When carrier drops:
+ *
+ * Drop carrier on all open units.
+ *
+ * Flush queues, waking up any task waiting in the
+ * line discipline.
+ *
+ * Send a hangup to the control terminal.
+ *
+ * Enable all select calls.
+ */
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+
+ if (ch->ch_tun.un_open_count > 0)
+ tty_hangup(ch->ch_tun.un_tty);
+
+ if (ch->ch_pun.un_open_count > 0)
+ tty_hangup(ch->ch_pun.un_tty);
+ }
+
+ /*
+ * Make sure that our cached values reflect the current reality.
+ */
+ if (virt_carrier == 1)
+ ch->ch_flags |= CH_FCAR;
+ else
+ ch->ch_flags &= ~CH_FCAR;
+
+ if (phys_carrier == 1)
+ ch->ch_flags |= CH_CD;
+ else
+ ch->ch_flags &= ~CH_CD;
+}
+
+/************************************************************************
+ *
+ * TTY Entry points and helper functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_tty_open()
+ *
+ */
+static int dgap_tty_open(struct tty_struct *tty, struct file *file)
+{
+ struct board_t *brd;
+ struct channel_t *ch;
+ struct un_t *un;
+ struct bs_t *bs;
+ uint major = 0;
+ uint minor = 0;
+ int rc = 0;
+ ulong lock_flags;
+ ulong lock_flags2;
+ u16 head;
+
+ rc = 0;
+
+ major = MAJOR(tty_devnum(tty));
+ minor = MINOR(tty_devnum(tty));
+
+ if (major > 255)
+ return -ENXIO;
+
+ /* Get board pointer from our array of majors we have allocated */
+ brd = dgap_BoardsByMajor[major];
+ if (!brd)
+ return -ENXIO;
+
+ /*
+ * If board is not yet up to a state of READY, go to
+ * sleep waiting for it to happen or they cancel the open.
+ */
+ rc = wait_event_interruptible(brd->state_wait,
+ (brd->state & BOARD_READY));
+
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&brd->bd_lock, lock_flags);
+
+ /* The wait above should guarantee this cannot happen */
+ if (brd->state != BOARD_READY) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* If opened device is greater than our number of ports, bail. */
+ if (MINOR(tty_devnum(tty)) > brd->nasync) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ ch = brd->channels[minor];
+ if (!ch) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* Grab channel lock */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ /* Figure out our type */
+ if (major == brd->dgap_Serial_Major) {
+ un = &brd->channels[minor]->ch_tun;
+ un->un_type = DGAP_SERIAL;
+ } else if (major == brd->dgap_TransparentPrint_Major) {
+ un = &brd->channels[minor]->ch_pun;
+ un->un_type = DGAP_PRINT;
+ } else {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* Store our unit into driver_data, so we always have it available. */
+ tty->driver_data = un;
+
+ /*
+ * Error if channel info pointer is NULL.
+ */
+ bs = ch->ch_bs;
+ if (!bs) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /*
+ * Initialize tty's
+ */
+ if (!(un->un_flags & UN_ISOPEN)) {
+ /* Store important variables. */
+ un->un_tty = tty;
+
+ /* Maybe do something here to the TTY struct as well? */
+ }
+
+ /*
+ * Initialize if neither terminal or printer is open.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
+
+ ch->ch_mforce = 0;
+ ch->ch_mval = 0;
+
+ /*
+ * Flush input queue.
+ */
+ head = readw(&(bs->rx_head));
+ writew(head, &(bs->rx_tail));
+
+ ch->ch_flags = 0;
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ /* TODO: flush our TTY struct here? */
+ }
+
+ dgap_carrier(ch);
+ /*
+ * Run param in case we changed anything
+ */
+ dgap_param(tty);
+
+ /*
+ * follow protocol for opening port
+ */
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+
+ rc = dgap_block_til_ready(tty, file, ch);
+
+ if (!un->un_tty)
+ return -ENODEV;
+
+ /* No going back now, increment our unit and channel counters */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ ch->ch_open_count++;
+ un->un_open_count++;
+ un->un_flags |= (UN_ISOPEN);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ return rc;
+}
+
+/*
+ * dgap_block_til_ready()
+ *
+ * Wait for DCD, if needed.
+ */
+static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
+ struct channel_t *ch)
+{
+ int retval = 0;
+ struct un_t *un = NULL;
+ ulong lock_flags;
+ uint old_flags = 0;
+ int sleep_on_un_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC || !file || !ch ||
+ ch->magic != DGAP_CHANNEL_MAGIC)
+ return -ENXIO;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -ENXIO;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ ch->ch_wopen++;
+
+ /* Loop forever */
+ while (1) {
+
+ sleep_on_un_flags = 0;
+
+ /*
+ * If board has failed somehow during our sleep,
+ * bail with error.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ retval = -ENXIO;
+ break;
+ }
+
+ /* If tty was hung up, break out of loop and set error. */
+ if (tty_hung_up_p(file)) {
+ retval = -EAGAIN;
+ break;
+ }
+
+ /*
+ * If either unit is in the middle of the fragile part of close,
+ * we just cannot touch the channel safely.
+ * Go back to sleep, knowing that when the channel can be
+ * touched safely, the close routine will signal the
+ * ch_wait_flags to wake us back up.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) &
+ UN_CLOSING)) {
+
+ /*
+ * Our conditions to leave cleanly and happily:
+ * 1) NONBLOCKING on the tty is set.
+ * 2) CLOCAL is set.
+ * 3) DCD (fake or real) is active.
+ */
+
+ if (file->f_flags & O_NONBLOCK)
+ break;
+
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ break;
+
+ if (ch->ch_flags & CH_CD)
+ break;
+
+ if (ch->ch_flags & CH_FCAR)
+ break;
+ } else {
+ sleep_on_un_flags = 1;
+ }
+
+ /*
+ * If there is a signal pending, the user probably
+ * interrupted (ctrl-c) us.
+ * Leave loop with error set.
+ */
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ /*
+ * Store the flags before we let go of channel lock
+ */
+ if (sleep_on_un_flags)
+ old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
+ else
+ old_flags = ch->ch_flags;
+
+ /*
+ * Let go of channel lock before calling schedule.
+ * Our poller will get any FEP events and wake us up when DCD
+ * eventually goes active.
+ */
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ /*
+ * Wait for something in the flags to change
+ * from the current value.
+ */
+ if (sleep_on_un_flags) {
+ retval = wait_event_interruptible(un->un_flags_wait,
+ (old_flags != (ch->ch_tun.un_flags |
+ ch->ch_pun.un_flags)));
+ } else {
+ retval = wait_event_interruptible(ch->ch_flags_wait,
+ (old_flags != ch->ch_flags));
+ }
+
+ /*
+ * We got woken up for some reason.
+ * Before looping around, grab our channel lock.
+ */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ }
+
+ ch->ch_wopen--;
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+/*
+ * dgap_tty_hangup()
+ *
+ * Hangup the port. Like a close, but don't wait for output to drain.
+ */
+static void dgap_tty_hangup(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ /* flush the transmit queues */
+ dgap_tty_flush_buffer(tty);
+
+}
+
+/*
+ * dgap_tty_close()
+ *
+ */
+static void dgap_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ int rc = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ ts = &tty->termios;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ /*
+ * Determine if this is the last close or not - and if we agree about
+ * which type of close it is with the Line Discipline
+ */
+ if ((tty->count == 1) && (un->un_open_count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. un_open_count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ un->un_open_count = 1;
+ }
+
+ if (--un->un_open_count < 0)
+ un->un_open_count = 0;
+
+ ch->ch_open_count--;
+
+ if (ch->ch_open_count && un->un_open_count) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* OK, its the last close on the unit */
+
+ un->un_flags |= UN_CLOSING;
+
+ tty->closing = 1;
+
+ /*
+ * Only officially close channel if count is 0 and
+ * DIGI_PRINTER bit is not set.
+ */
+ if ((ch->ch_open_count == 0) &&
+ !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
+
+ ch->ch_flags &= ~(CH_RXBLOCK);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ /* wait for output to drain */
+ /* This will also return if we take an interrupt */
+
+ rc = dgap_wait_for_drain(tty);
+
+ dgap_tty_flush_buffer(tty);
+ tty_ldisc_flush(tty);
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ tty->closing = 0;
+
+ /*
+ * If we have HUPCL set, lower DTR and RTS
+ */
+ if (ch->ch_c_cflag & HUPCL) {
+ ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
+ dgap_cmdb(ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0);
+
+ /*
+ * Go to sleep to ensure RTS/DTR
+ * have been dropped for modems to see it.
+ */
+ if (ch->ch_close_delay) {
+ spin_unlock_irqrestore(&ch->ch_lock,
+ lock_flags);
+ dgap_ms_sleep(ch->ch_close_delay);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ }
+ }
+
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+ ch->ch_baud_info = 0;
+
+ }
+
+ /*
+ * turn off print device when closing print device.
+ */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ un->un_tty = NULL;
+ un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
+ tty->driver_data = NULL;
+
+ wake_up_interruptible(&ch->ch_flags_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+}
+
+/*
+ * dgap_tty_chars_in_buffer()
+ *
+ * Return number of characters that have not been transmitted yet.
+ *
+ * This routine is used by the line discipline to determine if there
+ * is data waiting to be transmitted/drained/flushed or not.
+ */
+static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct board_t *bd = NULL;
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ uchar tbusy;
+ uint chars = 0;
+ u16 thead, ttail, tmask, chead, ctail;
+ ulong lock_flags = 0;
+ ulong lock_flags2 = 0;
+
+ if (tty == NULL)
+ return 0;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return 0;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ tmask = (ch->ch_tsize - 1);
+
+ /* Get Transmit queue pointers */
+ thead = readw(&(bs->tx_head)) & tmask;
+ ttail = readw(&(bs->tx_tail)) & tmask;
+
+ /* Get tbusy flag */
+ tbusy = readb(&(bs->tbusy));
+
+ /* Get Command queue pointers */
+ chead = readw(&(ch->ch_cm->cm_head));
+ ctail = readw(&(ch->ch_cm->cm_tail));
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ /*
+ * The only way we know for sure if there is no pending
+ * data left to be transferred, is if:
+ * 1) Transmit head and tail are equal (empty).
+ * 2) Command queue head and tail are equal (empty).
+ * 3) The "TBUSY" flag is 0. (Transmitter not busy).
+ */
+
+ if ((ttail == thead) && (tbusy == 0) && (chead == ctail)) {
+ chars = 0;
+ } else {
+ if (thead >= ttail)
+ chars = thead - ttail;
+ else
+ chars = thead - ttail + ch->ch_tsize;
+ /*
+ * Fudge factor here.
+ * If chars is zero, we know that the command queue had
+ * something in it or tbusy was set. Because we cannot
+ * be sure if there is still some data to be transmitted,
+ * lets lie, and tell ld we have 1 byte left.
+ */
+ if (chars == 0) {
+ /*
+ * If TBUSY is still set, and our tx buffers are empty,
+ * force the firmware to send me another wakeup after
+ * TBUSY has been cleared.
+ */
+ if (tbusy != 0) {
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ spin_unlock_irqrestore(&ch->ch_lock,
+ lock_flags);
+ }
+ chars = 1;
+ }
+ }
+
+ return chars;
+}
+
+static int dgap_wait_for_drain(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ struct bs_t *bs;
+ int ret = -EIO;
+ uint count = 1;
+ ulong lock_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return ret;
+
+ ret = 0;
+
+ /* Loop until data is drained */
+ while (count != 0) {
+
+ count = dgap_tty_chars_in_buffer(tty);
+
+ if (count == 0)
+ break;
+
+ /* Set flag waiting for drain */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ /* Go to sleep till we get woken up */
+ ret = wait_event_interruptible(un->un_flags_wait,
+ ((un->un_flags & UN_EMPTY) == 0));
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (ret)
+ break;
+ }
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ un->un_flags &= ~(UN_EMPTY);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ return ret;
+}
+
+/*
+ * dgap_maxcps_room
+ *
+ * Reduces bytes_available to the max number of characters
+ * that can be sent currently given the maxcps value, and
+ * returns the new bytes_available. This only affects printer
+ * output.
+ */
+static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+
+ if (tty == NULL)
+ return bytes_available;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return bytes_available;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return bytes_available;
+
+ /*
+ * If its not the Transparent print device, return
+ * the full data amount.
+ */
+ if (un->un_type != DGAP_PRINT)
+ return bytes_available;
+
+ if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) {
+ int cps_limit = 0;
+ unsigned long current_time = jiffies;
+ unsigned long buffer_time = current_time +
+ (HZ * ch->ch_digi.digi_bufsize) /
+ ch->ch_digi.digi_maxcps;
+
+ if (ch->ch_cpstime < current_time) {
+ /* buffer is empty */
+ ch->ch_cpstime = current_time; /* reset ch_cpstime */
+ cps_limit = ch->ch_digi.digi_bufsize;
+ } else if (ch->ch_cpstime < buffer_time) {
+ /* still room in the buffer */
+ cps_limit = ((buffer_time - ch->ch_cpstime) *
+ ch->ch_digi.digi_maxcps) / HZ;
+ } else {
+ /* no room in the buffer */
+ cps_limit = 0;
+ }
+
+ bytes_available = min(cps_limit, bytes_available);
+ }
+
+ return bytes_available;
+}
+
+static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
+{
+ struct channel_t *ch = NULL;
+ struct bs_t *bs = NULL;
+
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+ bs = ch->ch_bs;
+ if (!bs)
+ return;
+
+ if ((event & UN_LOW) != 0) {
+ if ((un->un_flags & UN_LOW) == 0) {
+ un->un_flags |= UN_LOW;
+ writeb(1, &(bs->ilow));
+ }
+ }
+ if ((event & UN_LOW) != 0) {
+ if ((un->un_flags & UN_EMPTY) == 0) {
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ }
+ }
+}
+
+/*
+ * dgap_tty_write_room()
+ *
+ * Return space available in Tx buffer
+ */
+static int dgap_tty_write_room(struct tty_struct *tty)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ u16 head, tail, tmask;
+ int ret = 0;
+ ulong lock_flags = 0;
+
+ if (!tty)
+ return 0;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return 0;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ tmask = ch->ch_tsize - 1;
+ head = readw(&(bs->tx_head)) & tmask;
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ ret = tail - head - 1;
+ if (ret < 0)
+ ret += ch->ch_tsize;
+
+ /* Limit printer to maxcps */
+ ret = dgap_maxcps_room(tty, ret);
+
+ /*
+ * If we are printer device, leave space for
+ * possibly both the on and off strings.
+ */
+ if (un->un_type == DGAP_PRINT) {
+ if (!(ch->ch_flags & CH_PRON))
+ ret -= ch->ch_digi.digi_onlen;
+ ret -= ch->ch_digi.digi_offlen;
+ } else {
+ if (ch->ch_flags & CH_PRON)
+ ret -= ch->ch_digi.digi_offlen;
+ }
+
+ if (ret < 0)
+ ret = 0;
+
+ /*
+ * Schedule FEP to wake us up if needed.
+ *
+ * TODO: This might be overkill...
+ * Do we really need to schedule callbacks from the FEP
+ * in every case? Can we get smarter based on ret?
+ */
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ return ret;
+}
+
+/*
+ * dgap_tty_put_char()
+ *
+ * Put a character into ch->ch_buf
+ *
+ * - used by the line discipline for OPOST processing
+ */
+static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
+{
+ /*
+ * Simply call tty_write.
+ */
+ dgap_tty_write(tty, &c, 1);
+ return 1;
+}
+
+/*
+ * dgap_tty_write()
+ *
+ * Take data from the user or kernel and send it out to the FEP.
+ * In here exists all the Transparent Print magic as well.
+ */
+static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ char *vaddr = NULL;
+ u16 head, tail, tmask, remain;
+ int bufcount = 0, n = 0;
+ int orig_count = 0;
+ ulong lock_flags;
+
+ if (!tty)
+ return 0;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return 0;
+
+ if (!count)
+ return 0;
+
+ /*
+ * Store original amount of characters passed in.
+ * This helps to figure out if we should ask the FEP
+ * to send us an event when it has more space available.
+ */
+ orig_count = count;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ /* Get our space available for the channel from the board */
+ tmask = ch->ch_tsize - 1;
+ head = readw(&(bs->tx_head)) & tmask;
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ bufcount = tail - head - 1;
+ if (bufcount < 0)
+ bufcount += ch->ch_tsize;
+
+ /*
+ * Limit printer output to maxcps overall, with bursts allowed
+ * up to bufsize characters.
+ */
+ bufcount = dgap_maxcps_room(tty, bufcount);
+
+ /*
+ * Take minimum of what the user wants to send, and the
+ * space available in the FEP buffer.
+ */
+ count = min(count, bufcount);
+
+ /*
+ * Bail if no space left.
+ */
+ if (count <= 0) {
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ return 0;
+ }
+
+ /*
+ * Output the printer ON string, if we are in terminal mode, but
+ * need to be in printer mode.
+ */
+ if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_onstr,
+ (int) ch->ch_digi.digi_onlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags |= CH_PRON;
+ }
+
+ /*
+ * On the other hand, output the printer OFF string, if we are
+ * currently in printer mode, but need to output to the terminal.
+ */
+ if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ /*
+ * If there is nothing left to copy, or
+ * I can't handle any more data, leave.
+ */
+ if (count <= 0) {
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ return 0;
+ }
+
+ n = count;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ remain = ch->ch_tstart + ch->ch_tsize - head;
+
+ if (n >= remain) {
+ n -= remain;
+ vaddr = ch->ch_taddr + head;
+
+ memcpy_toio(vaddr, (uchar *) buf, remain);
+ dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *) buf,
+ remain);
+
+ head = ch->ch_tstart;
+ buf += remain;
+ }
+
+ if (n > 0) {
+
+ /*
+ * Move rest of data.
+ */
+ vaddr = ch->ch_taddr + head;
+ remain = n;
+
+ memcpy_toio(vaddr, (uchar *) buf, remain);
+ dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *)buf,
+ remain);
+
+ head += remain;
+
+ }
+
+ if (count) {
+ ch->ch_txcount += count;
+ head &= tmask;
+ writew(head, &(bs->tx_head));
+ }
+
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+
+ /*
+ * If this is the print device, and the
+ * printer is still on, we need to turn it
+ * off before going idle. If the buffer is
+ * non-empty, wait until it goes empty.
+ * Otherwise turn it off right now.
+ */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ if (tail != head) {
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ } else {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+ }
+
+ /* Update printer buffer empty time. */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0)
+ && (ch->ch_digi.digi_bufsize > 0)) {
+ ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ return count;
+}
+
+/*
+ * Return modem signals to ld.
+ */
+static int dgap_tty_tiocmget(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int result = -EIO;
+ uchar mstat = 0;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return result;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return result;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return result;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ mstat = readb(&(ch->ch_bs->m_stat));
+ /* Append any outbound signals that might be pending... */
+ mstat |= ch->ch_mostat;
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & D_DTR(ch))
+ result |= TIOCM_DTR;
+ if (mstat & D_RTS(ch))
+ result |= TIOCM_RTS;
+ if (mstat & D_CTS(ch))
+ result |= TIOCM_CTS;
+ if (mstat & D_DSR(ch))
+ result |= TIOCM_DSR;
+ if (mstat & D_RI(ch))
+ result |= TIOCM_RI;
+ if (mstat & D_CD(ch))
+ result |= TIOCM_CD;
+
+ return result;
+}
+
+/*
+ * dgap_tty_tiocmset()
+ *
+ * Set modem signals, called by ld.
+ */
+static int dgap_tty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ if (set & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval |= D_RTS(ch);
+ }
+
+ if (set & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval |= D_DTR(ch);
+ }
+
+ if (clear & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval &= ~(D_RTS(ch));
+ }
+
+ if (clear & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval &= ~(D_DTR(ch));
+ }
+
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * dgap_tty_send_break()
+ *
+ * Send a Break, called by ld.
+ */
+static int dgap_tty_send_break(struct tty_struct *tty, int msec)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ switch (msec) {
+ case -1:
+ msec = 0xFFFF;
+ break;
+ case 0:
+ msec = 1;
+ break;
+ default:
+ msec /= 10;
+ break;
+ }
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+#if 0
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+#endif
+ dgap_cmdw(ch, SBREAK, (u16) msec, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * dgap_tty_wait_until_sent()
+ *
+ * wait until data has been transmitted, called by ld.
+ */
+static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ dgap_wait_for_drain(tty);
+}
+
+/*
+ * dgap_send_xchar()
+ *
+ * send a high priority character, called by ld.
+ */
+static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ /*
+ * This is technically what we should do.
+ * However, the NIST tests specifically want
+ * to see each XON or XOFF character that it
+ * sends, so lets just send each character
+ * by hand...
+ */
+#if 0
+ if (c == STOP_CHAR(tty))
+ dgap_cmdw(ch, RPAUSE, 0, 0);
+ else if (c == START_CHAR(tty))
+ dgap_cmdw(ch, RRESUME, 0, 0);
+ else
+ dgap_wmove(ch, &c, 1);
+#else
+ dgap_wmove(ch, &c, 1);
+#endif
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return;
+}
+
+/*
+ * Return modem signals to ld.
+ */
+static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value)
+{
+ int result = 0;
+ uchar mstat = 0;
+ ulong lock_flags;
+ int rc = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -ENXIO;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ mstat = readb(&(ch->ch_bs->m_stat));
+ /* Append any outbound signals that might be pending... */
+ mstat |= ch->ch_mostat;
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & D_DTR(ch))
+ result |= TIOCM_DTR;
+ if (mstat & D_RTS(ch))
+ result |= TIOCM_RTS;
+ if (mstat & D_CTS(ch))
+ result |= TIOCM_CTS;
+ if (mstat & D_DSR(ch))
+ result |= TIOCM_DSR;
+ if (mstat & D_RI(ch))
+ result |= TIOCM_RI;
+ if (mstat & D_CD(ch))
+ result |= TIOCM_CD;
+
+ rc = put_user(result, value);
+
+ return rc;
+}
+
+/*
+ * dgap_set_modem_info()
+ *
+ * Set modem signals, called by ld.
+ */
+static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command,
+ unsigned int __user *value)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -ENXIO;
+ unsigned int arg = 0;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ ret = get_user(arg, value);
+ if (ret)
+ return ret;
+
+ switch (command) {
+ case TIOCMBIS:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval |= D_RTS(ch);
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval |= D_DTR(ch);
+ }
+
+ break;
+
+ case TIOCMBIC:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval &= ~(D_RTS(ch));
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval &= ~(D_DTR(ch));
+ }
+
+ break;
+
+ case TIOCMSET:
+ ch->ch_mforce = D_DTR(ch)|D_RTS(ch);
+
+ if (arg & TIOCM_RTS)
+ ch->ch_mval |= D_RTS(ch);
+ else
+ ch->ch_mval &= ~(D_RTS(ch));
+
+ if (arg & TIOCM_DTR)
+ ch->ch_mval |= (D_DTR(ch));
+ else
+ ch->ch_mval &= ~(D_DTR(ch));
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digigeta()
+ *
+ * Ioctl to get the information for ditty.
+ *
+ *
+ *
+ */
+static int dgap_tty_digigeta(struct tty_struct *tty,
+ struct digi_t __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return -EFAULT;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digiseta()
+ *
+ * Ioctl to set the information for ditty.
+ *
+ *
+ *
+ */
+static int dgap_tty_digiseta(struct tty_struct *tty,
+ struct digi_t __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t new_digi;
+ ulong lock_flags = 0;
+ unsigned long lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -EFAULT;
+
+ if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
+
+ if (ch->ch_digi.digi_maxcps < 1)
+ ch->ch_digi.digi_maxcps = 1;
+
+ if (ch->ch_digi.digi_maxcps > 10000)
+ ch->ch_digi.digi_maxcps = 10000;
+
+ if (ch->ch_digi.digi_bufsize < 10)
+ ch->ch_digi.digi_bufsize = 10;
+
+ if (ch->ch_digi.digi_maxchar < 1)
+ ch->ch_digi.digi_maxchar = 1;
+
+ if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
+ ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
+
+ if (ch->ch_digi.digi_onlen > DIGI_PLEN)
+ ch->ch_digi.digi_onlen = DIGI_PLEN;
+
+ if (ch->ch_digi.digi_offlen > DIGI_PLEN)
+ ch->ch_digi.digi_offlen = DIGI_PLEN;
+
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digigetedelay()
+ *
+ * Ioctl to get the current edelay setting.
+ *
+ *
+ *
+ */
+static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return -EFAULT;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ tmp = readw(&(ch->ch_bs->edelay));
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digisetedelay()
+ *
+ * Ioctl to set the EDELAY setting
+ *
+ */
+static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int new_digi;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -EFAULT;
+
+ if (copy_from_user(&new_digi, new_info, sizeof(int)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ writew((u16) new_digi, &(ch->ch_bs->edelay));
+
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digigetcustombaud()
+ *
+ * Ioctl to get the current custom baud rate setting.
+ */
+static int dgap_tty_digigetcustombaud(struct tty_struct *tty,
+ int __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return -EFAULT;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ tmp = dgap_get_custom_baud(ch);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * dgap_tty_digisetcustombaud()
+ *
+ * Ioctl to set the custom baud rate setting
+ */
+static int dgap_tty_digisetcustombaud(struct tty_struct *tty,
+ int __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ uint new_rate;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -EFAULT;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -EFAULT;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -EFAULT;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -EFAULT;
+
+
+ if (copy_from_user(&new_rate, new_info, sizeof(unsigned int)))
+ return -EFAULT;
+
+ if (bd->bd_flags & BD_FEP5PLUS) {
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_custom_speed = new_rate;
+
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ }
+
+ return 0;
+}
+
+/*
+ * dgap_set_termios()
+ */
+static void dgap_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ unsigned long lock_flags;
+ unsigned long lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ dgap_carrier(ch);
+ dgap_param(tty);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+}
+
+static void dgap_tty_throttle(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_flags |= (CH_RXBLOCK);
+#if 1
+ dgap_cmdw(ch, RPAUSE, 0, 0);
+#endif
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+}
+
+static void dgap_tty_unthrottle(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_flags &= ~(CH_RXBLOCK);
+
+#if 1
+ dgap_cmdw(ch, RRESUME, 0, 0);
+#endif
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+}
+
+static void dgap_tty_start(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+}
+
+static void dgap_tty_stop(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, PAUSETX, 0, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+}
+
+/*
+ * dgap_tty_flush_chars()
+ *
+ * Flush the cook buffer
+ *
+ * Note to self, and any other poor souls who venture here:
+ *
+ * flush in this case DOES NOT mean dispose of the data.
+ * instead, it means "stop buffering and send it if you
+ * haven't already." Just guess how I figured that out... SRW 2-Jun-98
+ *
+ * It is also always called in interrupt context - JAR 8-Sept-99
+ */
+static void dgap_tty_flush_chars(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ /* TODO: Do something here */
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+}
+
+/*
+ * dgap_tty_flush_buffer()
+ *
+ * Flush Tx buffer (make in == out)
+ */
+static void dgap_tty_flush_buffer(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+ u16 head = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->tx_head));
+ dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+ tty_wakeup(tty);
+}
+
+/*****************************************************************************
+ *
+ * The IOCTL function and all of its helpers
+ *
+ *****************************************************************************/
+
+/*
+ * dgap_tty_ioctl()
+ *
+ * The usual assortment of ioctl's
+ */
+static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc;
+ u16 head = 0;
+ ulong lock_flags = 0;
+ ulong lock_flags2 = 0;
+ void __user *uarg = (void __user *) arg;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -ENODEV;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -ENODEV;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -ENODEV;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -ENODEV;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ if (un->un_open_count <= 0) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ switch (cmd) {
+
+ /* Here are all the standard ioctl's that we MUST implement */
+
+ case TCSBRK:
+ /*
+ * TCSBRK is SVID version: non-zero arg --> no break
+ * this behaviour is exploited by tcdrain().
+ *
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (rc)
+ return rc;
+
+ rc = dgap_wait_for_drain(tty);
+
+ if (rc)
+ return -EINTR;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+
+ case TCSBRKP:
+ /* support for POSIX tcsendbreak()
+
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (rc)
+ return rc;
+
+ rc = dgap_wait_for_drain(tty);
+ if (rc)
+ return -EINTR;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+
+ case TIOCSBRK:
+ /*
+ * FEP5 doesn't support turning on a break unconditionally.
+ * The FEP5 device will stop sending a break automatically
+ * after the specified time value that was sent when turning on
+ * the break.
+ */
+ rc = tty_check_change(tty);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (rc)
+ return rc;
+
+ rc = dgap_wait_for_drain(tty);
+ if (rc)
+ return -EINTR;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+
+ case TIOCCBRK:
+ /*
+ * FEP5 doesn't support turning off a break unconditionally.
+ * The FEP5 device will stop sending a break automatically
+ * after the specified time value that was sent when turning on
+ * the break.
+ */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return 0;
+
+ case TIOCGSOFTCAR:
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ rc = put_user(C_CLOCAL(tty) ? 1 : 0,
+ (unsigned long __user *) arg);
+ return rc;
+
+ case TIOCSSOFTCAR:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ rc = get_user(arg, (unsigned long __user *) arg);
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+ tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) |
+ (arg ? CLOCAL : 0));
+ dgap_param(tty);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+
+ case TIOCMGET:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_get_modem_info(ch, uarg);
+
+ case TIOCMBIS:
+ case TIOCMBIC:
+ case TIOCMSET:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_set_modem_info(tty, cmd, uarg);
+
+ /*
+ * Here are any additional ioctl's that we want to implement
+ */
+
+ case TCFLSH:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ rc = tty_check_change(tty);
+ if (rc) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return rc;
+ }
+
+ if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
+ if (!(un->un_type == DGAP_PRINT)) {
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+ writeb(0, &(ch->ch_bs->orun));
+ }
+ }
+
+ if ((arg != TCOFLUSH) && (arg != TCIOFLUSH)) {
+ /* pretend we didn't recognize this IOCTL */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return -ENOIOCTLCMD;
+ }
+
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->tx_head));
+ dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+
+ /* Can't hold any locks when calling tty_wakeup! */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ tty_wakeup(tty);
+
+ /* pretend we didn't recognize this IOCTL */
+ return -ENOIOCTLCMD;
+
+ case TCSETSF:
+ case TCSETSW:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ if (cmd == TCSETSF) {
+ /* flush rx */
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+ }
+
+ /* now wait for all the output to drain */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc)
+ return -EINTR;
+
+ /* pretend we didn't recognize this */
+ return -ENOIOCTLCMD;
+
+ case TCSETAW:
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc)
+ return -EINTR;
+
+ /* pretend we didn't recognize this */
+ return -ENOIOCTLCMD;
+
+ case TCXONC:
+ /*
+ * The Linux Line Discipline (LD) would do this for us if we
+ * let it, but we have the special firmware options to do this
+ * the "right way" regardless of hardware or software flow
+ * control so we'll do it outselves instead of letting the LD
+ * do it.
+ */
+ rc = tty_check_change(tty);
+ if (rc) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return rc;
+ }
+
+ switch (arg) {
+
+ case TCOON:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ dgap_tty_start(tty);
+ return 0;
+ case TCOOFF:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ dgap_tty_stop(tty);
+ return 0;
+ case TCION:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ /* Make the ld do it */
+ return -ENOIOCTLCMD;
+ case TCIOFF:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ /* Make the ld do it */
+ return -ENOIOCTLCMD;
+ default:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return -EINVAL;
+ }
+
+ case DIGI_GETA:
+ /* get information for ditty */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digigeta(tty, uarg);
+
+ case DIGI_SETAW:
+ case DIGI_SETAF:
+
+ /* set information for ditty */
+ if (cmd == (DIGI_SETAW)) {
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc)
+ return -EINTR;
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+ } else
+ tty_ldisc_flush(tty);
+ /* fall thru */
+
+ case DIGI_SETA:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digiseta(tty, uarg);
+
+ case DIGI_GEDELAY:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digigetedelay(tty, uarg);
+
+ case DIGI_SEDELAY:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digisetedelay(tty, uarg);
+
+ case DIGI_GETCUSTOMBAUD:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digigetcustombaud(tty, uarg);
+
+ case DIGI_SETCUSTOMBAUD:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return dgap_tty_digisetcustombaud(tty, uarg);
+
+ case DIGI_RESET_PORT:
+ dgap_firmware_reset_port(ch);
+ dgap_param(tty);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return 0;
+
+ default:
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return -ENOIOCTLCMD;
+ }
+}
+
+static int dgap_after_config_loaded(int board)
+{
+ /*
+ * Initialize KME waitqueues...
+ */
+ init_waitqueue_head(&(dgap_Board[board]->kme_wait));
+
+ /*
+ * allocate flip buffer for board.
+ */
+ dgap_Board[board]->flipbuf = kmalloc(MYFLIPLEN, GFP_ATOMIC);
+ if (!dgap_Board[board]->flipbuf)
+ return -ENOMEM;
+
+ dgap_Board[board]->flipflagbuf = kmalloc(MYFLIPLEN, GFP_ATOMIC);
+ if (!dgap_Board[board]->flipflagbuf) {
+ kfree(dgap_Board[board]->flipbuf);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Create pr and tty device entries
+ */
+static int dgap_tty_register_ports(struct board_t *brd)
+{
+ struct channel_t *ch;
+ int i;
+
+ brd->SerialPorts = kcalloc(brd->nasync, sizeof(*brd->SerialPorts),
+ GFP_KERNEL);
+ if (brd->SerialPorts == NULL)
+ return -ENOMEM;
+ for (i = 0; i < brd->nasync; i++)
+ tty_port_init(&brd->SerialPorts[i]);
+
+ brd->PrinterPorts = kcalloc(brd->nasync, sizeof(*brd->PrinterPorts),
+ GFP_KERNEL);
+ if (brd->PrinterPorts == NULL) {
+ kfree(brd->SerialPorts);
+ return -ENOMEM;
+ }
+ for (i = 0; i < brd->nasync; i++)
+ tty_port_init(&brd->PrinterPorts[i]);
+
+ ch = brd->channels[0];
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
+
+ struct device *classp;
+
+ classp = tty_port_register_device(&brd->SerialPorts[i],
+ brd->SerialDriver,
+ brd->firstminor + i, NULL);
+
+ dgap_create_tty_sysfs(&ch->ch_tun, classp);
+ ch->ch_tun.un_sysfs = classp;
+
+ classp = tty_port_register_device(&brd->PrinterPorts[i],
+ brd->PrintDriver,
+ brd->firstminor + i, NULL);
+
+ dgap_create_tty_sysfs(&ch->ch_pun, classp);
+ ch->ch_pun.un_sysfs = classp;
+ }
+ dgap_create_ports_sysfiles(brd);
+
+ return 0;
+}
+
+/*
+ * Copies the BIOS code from the user to the board,
+ * and starts the BIOS running.
+ */
+static void dgap_do_bios_load(struct board_t *brd, const uchar *ubios, int len)
+{
+ uchar *addr;
+ uint offset;
+ int i;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ addr = brd->re_map_membase;
+
+ /*
+ * clear POST area
+ */
+ for (i = 0; i < 16; i++)
+ writeb(0, addr + POSTAREA + i);
+
+ /*
+ * Download bios
+ */
+ offset = 0x1000;
+ memcpy_toio(addr + offset, ubios, len);
+
+ writel(0x0bf00401, addr);
+ writel(0, (addr + 4));
+
+ /* Clear the reset, and change states. */
+ writeb(FEPCLR, brd->re_map_port);
+}
+
+/*
+ * Checks to see if the BIOS completed running on the card.
+ */
+static int dgap_do_wait_for_bios(struct board_t *brd)
+{
+ uchar *addr;
+ u16 word;
+ u16 err1;
+ u16 err2;
+ int ret = 0;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return ret;
+
+ addr = brd->re_map_membase;
+ word = readw(addr + POSTAREA);
+
+ /*
+ * It can take 5-6 seconds for a board to
+ * pass the bios self test and post results.
+ * Give it 10 seconds.
+ */
+ brd->wait_for_bios = 0;
+ while (brd->wait_for_bios < 1000) {
+ /* Check to see if BIOS thinks board is good. (GD). */
+ if (word == *(u16 *) "GD")
+ return 1;
+ msleep_interruptible(10);
+ brd->wait_for_bios++;
+ word = readw(addr + POSTAREA);
+ }
+
+ /* Gave up on board after too long of time taken */
+ err1 = readw(addr + SEQUENCE);
+ err2 = readw(addr + ERROR);
+ pr_warn("dgap: %s failed diagnostics. Error #(%x,%x).\n",
+ brd->name, err1, err2);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOBIOS;
+
+ return ret;
+}
+
+/*
+ * Copies the FEP code from the user to the board,
+ * and starts the FEP running.
+ */
+static void dgap_do_fep_load(struct board_t *brd, const uchar *ufep, int len)
+{
+ uchar *addr;
+ uint offset;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ addr = brd->re_map_membase;
+
+ /*
+ * Download FEP
+ */
+ offset = 0x1000;
+ memcpy_toio(addr + offset, ufep, len);
+
+ /*
+ * If board is a concentrator product, we need to give
+ * it its config string describing how the concentrators look.
+ */
+ if ((brd->type == PCX) || (brd->type == PEPC)) {
+ uchar string[100];
+ uchar *config, *xconfig;
+ int i = 0;
+
+ xconfig = dgap_create_config_string(brd, string);
+
+ /* Write string to board memory */
+ config = addr + CONFIG;
+ for (; i < CONFIGSIZE; i++, config++, xconfig++) {
+ writeb(*xconfig, config);
+ if ((*xconfig & 0xff) == 0xff)
+ break;
+ }
+ }
+
+ writel(0xbfc01004, (addr + 0xc34));
+ writel(0x3, (addr + 0xc30));
+
+}
+
+/*
+ * Waits for the FEP to report thats its ready for us to use.
+ */
+static int dgap_do_wait_for_fep(struct board_t *brd)
+{
+ uchar *addr;
+ u16 word;
+ u16 err1;
+ u16 err2;
+ int ret = 0;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return ret;
+
+ addr = brd->re_map_membase;
+ word = readw(addr + FEPSTAT);
+
+ /*
+ * It can take 2-3 seconds for the FEP to
+ * be up and running. Give it 5 secs.
+ */
+ brd->wait_for_fep = 0;
+ while (brd->wait_for_fep < 500) {
+ /* Check to see if FEP is up and running now. */
+ if (word == *(u16 *) "OS") {
+ /*
+ * Check to see if the board can support FEP5+ commands.
+ */
+ word = readw(addr + FEP5_PLUS);
+ if (word == *(u16 *) "5A")
+ brd->bd_flags |= BD_FEP5PLUS;
+
+ return 1;
+ }
+ msleep_interruptible(10);
+ brd->wait_for_fep++;
+ word = readw(addr + FEPSTAT);
+ }
+
+ /* Gave up on board after too long of time taken */
+ err1 = readw(addr + SEQUENCE);
+ err2 = readw(addr + ERROR);
+ pr_warn("dgap: FEPOS for %s not functioning. Error #(%x,%x).\n",
+ brd->name, err1, err2);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+
+ return ret;
+}
+
+/*
+ * Physically forces the FEP5 card to reset itself.
+ */
+static void dgap_do_reset_board(struct board_t *brd)
+{
+ uchar check;
+ u32 check1;
+ u32 check2;
+ int i = 0;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) ||
+ !brd->re_map_membase || !brd->re_map_port)
+ return;
+
+ /* FEPRST does not vary among supported boards */
+ writeb(FEPRST, brd->re_map_port);
+
+ for (i = 0; i <= 1000; i++) {
+ check = readb(brd->re_map_port) & 0xe;
+ if (check == FEPRST)
+ break;
+ udelay(10);
+
+ }
+ if (i > 1000) {
+ pr_warn("dgap: Board not resetting... Failing board.\n");
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
+
+ /*
+ * Make sure there really is memory out there.
+ */
+ writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
+ writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
+ check1 = readl(brd->re_map_membase + LOWMEM);
+ check2 = readl(brd->re_map_membase + HIGHMEM);
+
+ if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
+ pr_warn("dgap: No memory at %p for board.\n",
+ brd->re_map_membase);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
+
+}
+
+#ifdef DIGI_CONCENTRATORS_SUPPORTED
+/*
+ * Sends a concentrator image into the FEP5 board.
+ */
+static void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len)
+{
+ char *vaddr;
+ u16 offset = 0;
+ struct downld_t *to_dp;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ vaddr = brd->re_map_membase;
+
+ offset = readw((u16 *) (vaddr + DOWNREQ));
+ to_dp = (struct downld_t *) (vaddr + (int) offset);
+ memcpy_toio(to_dp, uaddr, len);
+
+ /* Tell card we have data for it */
+ writew(0, vaddr + (DOWNREQ));
+
+ brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
+}
+#endif
+
+#define EXPANSION_ROM_SIZE (64 * 1024)
+#define FEP5_ROM_MAGIC (0xFEFFFFFF)
+
+static void dgap_get_vpd(struct board_t *brd)
+{
+ u32 magic;
+ u32 base_offset;
+ u16 rom_offset;
+ u16 vpd_offset;
+ u16 image_length;
+ u16 i;
+ uchar byte1;
+ uchar byte2;
+
+ /*
+ * Poke the magic number at the PCI Rom Address location.
+ * If VPD is supported, the value read from that address
+ * will be non-zero.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
+
+ /* VPD not supported, bail */
+ if (!magic)
+ return;
+
+ /*
+ * To get to the OTPROM memory, we have to send the boards base
+ * address or'ed with 1 into the PCI Rom Address location.
+ */
+ magic = brd->membase | 0x01;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
+
+ byte1 = readb(brd->re_map_membase);
+ byte2 = readb(brd->re_map_membase + 1);
+
+ /*
+ * If the board correctly swapped to the OTPROM memory,
+ * the first 2 bytes (header) should be 0x55, 0xAA
+ */
+ if (byte1 == 0x55 && byte2 == 0xAA) {
+
+ base_offset = 0;
+
+ /*
+ * We have to run through all the OTPROM memory looking
+ * for the VPD offset.
+ */
+ while (base_offset <= EXPANSION_ROM_SIZE) {
+
+ /*
+ * Lots of magic numbers here.
+ *
+ * The VPD offset is located inside the ROM Data
+ * Structure.
+ *
+ * We also have to remember the length of each
+ * ROM Data Structure, so we can "hop" to the next
+ * entry if the VPD isn't in the current
+ * ROM Data Structure.
+ */
+ rom_offset = readw(brd->re_map_membase +
+ base_offset + 0x18);
+ image_length = readw(brd->re_map_membase +
+ rom_offset + 0x10) * 512;
+ vpd_offset = readw(brd->re_map_membase +
+ rom_offset + 0x08);
+
+ /* Found the VPD entry */
+ if (vpd_offset)
+ break;
+
+ /* We didn't find a VPD entry, go to next ROM entry. */
+ base_offset += image_length;
+
+ byte1 = readb(brd->re_map_membase + base_offset);
+ byte2 = readb(brd->re_map_membase + base_offset + 1);
+
+ /*
+ * If the new ROM offset doesn't have 0x55, 0xAA
+ * as its header, we have run out of ROM.
+ */
+ if (byte1 != 0x55 || byte2 != 0xAA)
+ break;
+ }
+
+ /*
+ * If we have a VPD offset, then mark the board
+ * as having a valid VPD, and copy VPDSIZE (512) bytes of
+ * that VPD to the buffer we have in our board structure.
+ */
+ if (vpd_offset) {
+ brd->bd_flags |= BD_HAS_VPD;
+ for (i = 0; i < VPDSIZE; i++) {
+ brd->vpd[i] = readb(brd->re_map_membase +
+ vpd_offset + i);
+ }
+ }
+ }
+
+ /*
+ * We MUST poke the magic number at the PCI Rom Address location again.
+ * This makes the card report the regular board memory back to us,
+ * rather than the OTPROM memory.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+}
+
+/*
+ * Our board poller function.
+ */
+static void dgap_poll_tasklet(unsigned long data)
+{
+ struct board_t *bd = (struct board_t *) data;
+ ulong lock_flags;
+ char *vaddr;
+ u16 head, tail;
+
+ if (!bd || (bd->magic != DGAP_BOARD_MAGIC))
+ return;
+
+ if (bd->inhibit_poller)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
+
+ /*
+ * If board is ready, parse deeper to see if there is anything to do.
+ */
+ if (bd->state == BOARD_READY) {
+
+ struct ev_t *eaddr = NULL;
+
+ if (!bd->re_map_membase) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+ if (!bd->re_map_port) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ if (!bd->nasync)
+ goto out;
+
+ eaddr = (struct ev_t *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * If there is an event pending. Go service it.
+ */
+ if (head != tail) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ dgap_event(bd);
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ }
+
+out:
+ /*
+ * If board is doing interrupts, ACK the interrupt.
+ */
+ if (bd && bd->intr_running)
+ readb(bd->re_map_port + 2);
+
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdb - Sends a 2 byte command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * byte1 - Integer containing first byte to be sent.
+ * byte2 - Integer containing second byte to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1,
+ uchar byte2, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+ writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
+ writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
+ writeb(byte1, (char *) (vaddr + head + CMDSTART + 2));
+ writeb(byte2, (char *) (vaddr + head + CMDSTART + 3));
+
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdw - Sends a 1 word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+ writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
+ writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
+ writew((u16) word, (char *) (vaddr + head + CMDSTART + 2));
+
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdw_ext - Sends a extended word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+
+ /* Write an FF to tell the FEP that we want an extended command */
+ writeb((uchar) 0xff, (char *) (vaddr + head + CMDSTART + 0));
+
+ writeb((uchar) ch->ch_portnum, (uchar *) (vaddr + head + CMDSTART + 1));
+ writew((u16) cmd, (char *) (vaddr + head + CMDSTART + 2));
+
+ /*
+ * If the second part of the command won't fit,
+ * put it at the beginning of the circular buffer.
+ */
+ if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03)))
+ writew((u16) word, (char *) (vaddr + CMDSTART));
+ else
+ writew((u16) word, (char *) (vaddr + head + CMDSTART + 4));
+
+ head = (head + 8) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+/*=======================================================================
+ *
+ * dgap_wmove - Write data to FEP buffer.
+ *
+ * ch - Pointer to channel structure.
+ * buf - Poiter to characters to be moved.
+ * cnt - Number of characters to move.
+ *
+ *=======================================================================*/
+static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
+{
+ int n;
+ char *taddr;
+ struct bs_t *bs;
+ u16 head;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check parameters.
+ */
+ bs = ch->ch_bs;
+ head = readw(&(bs->tx_head));
+
+ /*
+ * If pointers are out of range, just return.
+ */
+ if ((cnt > ch->ch_tsize) ||
+ (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize)
+ return;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ n = ch->ch_tstart + ch->ch_tsize - head;
+
+ if (cnt >= n) {
+ cnt -= n;
+ taddr = ch->ch_taddr + head;
+ memcpy_toio(taddr, buf, n);
+ head = ch->ch_tstart;
+ buf += n;
+ }
+
+ /*
+ * Move rest of data.
+ */
+ taddr = ch->ch_taddr + head;
+ n = cnt;
+ memcpy_toio(taddr, buf, n);
+ head += cnt;
+
+ writew(head, &(bs->tx_head));
+}
+
+/*
+ * Retrives the current custom baud rate from FEP memory,
+ * and returns it back to the user.
+ * Returns 0 on error.
+ */
+static uint dgap_get_custom_baud(struct channel_t *ch)
+{
+ uchar *vaddr;
+ ulong offset = 0;
+ uint value = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+
+ if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+
+ if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
+ return 0;
+
+ vaddr = ch->ch_bd->re_map_membase;
+
+ if (!vaddr)
+ return 0;
+
+ /*
+ * Go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) +
+ (ch->ch_portnum * 0x28) + LINE_SPEED));
+
+ value = readw(vaddr + offset);
+ return value;
+}
+
+/*
+ * Calls the firmware to reset this channel.
+ */
+static void dgap_firmware_reset_port(struct channel_t *ch)
+{
+ dgap_cmdb(ch, CHRESET, 0, 0, 0);
+
+ /*
+ * Now that the channel is reset, we need to make sure
+ * all the current settings get reapplied to the port
+ * in the firmware.
+ *
+ * So we will set the driver's cache of firmware
+ * settings all to 0, and then call param.
+ */
+ ch->ch_fepiflag = 0;
+ ch->ch_fepcflag = 0;
+ ch->ch_fepoflag = 0;
+ ch->ch_fepstartc = 0;
+ ch->ch_fepstopc = 0;
+ ch->ch_fepastartc = 0;
+ ch->ch_fepastopc = 0;
+ ch->ch_mostat = 0;
+ ch->ch_hflow = 0;
+}
+
+/*=======================================================================
+ *
+ * dgap_param - Set Digi parameters.
+ *
+ * struct tty_struct * - TTY for port.
+ *
+ *=======================================================================*/
+static int dgap_param(struct tty_struct *tty)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct bs_t *bs;
+ struct un_t *un;
+ u16 head;
+ u16 cflag;
+ u16 iflag;
+ uchar mval;
+ uchar hflow;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return -ENXIO;
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return -ENXIO;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return -ENXIO;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -ENXIO;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return -ENXIO;
+
+ ts = &tty->termios;
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+
+ /* flush rx */
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+
+ /* flush tx */
+ head = readw(&(ch->ch_bs->tx_head));
+ writew(head, &(ch->ch_bs->tx_tail));
+
+ ch->ch_flags |= (CH_BAUD0);
+
+ /* Drop RTS and DTR */
+ ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
+ mval = D_DTR(ch) | D_RTS(ch);
+ ch->ch_baud_info = 0;
+
+ } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
+ /*
+ * Tell the fep to do the command
+ */
+
+ dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
+
+ /*
+ * Now go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ ch->ch_custom_speed = dgap_get_custom_baud(ch);
+ ch->ch_baud_info = ch->ch_custom_speed;
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
+
+ } else {
+ /*
+ * Set baud rate, character size, and parity.
+ */
+
+
+ int iindex = 0;
+ int jindex = 0;
+ int baud = 0;
+
+ ulong bauds[4][16] = {
+ { /* slowbaud */
+ 0, 50, 75, 110,
+ 134, 150, 200, 300,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* slowbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud */
+ 0, 57600, 76800, 115200,
+ 14400, 57600, 230400, 76800,
+ 115200, 230400, 28800, 460800,
+ 921600, 9600, 19200, 38400 },
+ { /* fastbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 }
+ };
+
+ /*
+ * Only use the TXPrint baud rate if the
+ * terminal unit is NOT open
+ */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
+ (un->un_type == DGAP_PRINT))
+ baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
+ else
+ baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
+
+ if (ch->ch_c_cflag & CBAUDEX)
+ iindex = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FAST)
+ iindex += 2;
+
+ jindex = baud;
+
+ if ((iindex >= 0) && (iindex < 4) &&
+ (jindex >= 0) && (jindex < 16))
+ baud = bauds[iindex][jindex];
+ else
+ baud = 0;
+
+ if (baud == 0)
+ baud = 9600;
+
+ ch->ch_baud_info = baud;
+
+ /*
+ * CBAUD has bit position 0x1000 set these days to
+ * indicate Linux baud rate remap.
+ * We use a different bit assignment for high speed.
+ * Clear this bit out while grabbing the parts of
+ * "cflag" we want.
+ */
+ cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB |
+ CSTOPB | CSIZE);
+
+ /*
+ * HUPCL bit is used by FEP to indicate fast baud
+ * table is to be used.
+ */
+ if ((ch->ch_digi.digi_flags & DIGI_FAST) ||
+ (ch->ch_c_cflag & CBAUDEX))
+ cflag |= HUPCL;
+
+ if ((ch->ch_c_cflag & CBAUDEX) &&
+ !(ch->ch_digi.digi_flags & DIGI_FAST)) {
+ /*
+ * The below code is trying to guarantee that only
+ * baud rates 115200, 230400, 460800, 921600 are
+ * remapped. We use exclusive or because the various
+ * baud rates share common bit positions and therefore
+ * can't be tested for easily.
+ */
+ tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
+ int baudpart = 0;
+
+ /*
+ * Map high speed requests to index
+ * into FEP's baud table
+ */
+ switch (tcflag) {
+ case B57600:
+ baudpart = 1;
+ break;
+#ifdef B76800
+ case B76800:
+ baudpart = 2;
+ break;
+#endif
+ case B115200:
+ baudpart = 3;
+ break;
+ case B230400:
+ baudpart = 9;
+ break;
+ case B460800:
+ baudpart = 11;
+ break;
+#ifdef B921600
+ case B921600:
+ baudpart = 12;
+ break;
+#endif
+ default:
+ baudpart = 0;
+ }
+
+ if (baudpart)
+ cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
+ }
+
+ cflag &= 0xffff;
+
+ if (cflag != ch->ch_fepcflag) {
+ ch->ch_fepcflag = (u16) (cflag & 0xffff);
+
+ /*
+ * Okay to have channel and board
+ * locks held calling this
+ */
+ dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
+ }
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
+ }
+
+ /*
+ * Get input flags.
+ */
+ iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
+ INPCK | ISTRIP | IXON | IXANY | IXOFF);
+
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE)) {
+ iflag &= ~(IXON | IXOFF);
+ ch->ch_c_iflag &= ~(IXON | IXOFF);
+ }
+
+ /*
+ * Only the IBM Xr card can switch between
+ * 232 and 422 modes on the fly
+ */
+ if (bd->device == PCI_DEV_XR_IBM_DID) {
+ if (ch->ch_digi.digi_flags & DIGI_422)
+ dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
+ else
+ dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
+ }
+
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
+ iflag |= IALTPIN;
+
+ if (iflag != ch->ch_fepiflag) {
+ ch->ch_fepiflag = iflag;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
+ }
+
+ /*
+ * Select hardware handshaking.
+ */
+ hflow = 0;
+
+ if (ch->ch_c_cflag & CRTSCTS)
+ hflow |= (D_RTS(ch) | D_CTS(ch));
+ if (ch->ch_digi.digi_flags & RTSPACE)
+ hflow |= D_RTS(ch);
+ if (ch->ch_digi.digi_flags & DTRPACE)
+ hflow |= D_DTR(ch);
+ if (ch->ch_digi.digi_flags & CTSPACE)
+ hflow |= D_CTS(ch);
+ if (ch->ch_digi.digi_flags & DSRPACE)
+ hflow |= D_DSR(ch);
+ if (ch->ch_digi.digi_flags & DCDPACE)
+ hflow |= D_CD(ch);
+
+ if (hflow != ch->ch_hflow) {
+ ch->ch_hflow = hflow;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SHFLOW, (uchar) hflow, 0xff, 0);
+ }
+
+
+ /*
+ * Set RTS and/or DTR Toggle if needed,
+ * but only if product is FEP5+ based.
+ */
+ if (bd->bd_flags & BD_FEP5PLUS) {
+ u16 hflow2 = 0;
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
+ hflow2 |= (D_RTS(ch));
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
+ hflow2 |= (D_DTR(ch));
+
+ dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
+ }
+
+ /*
+ * Set modem control lines.
+ */
+
+ mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
+
+ if (ch->ch_mostat ^ mval) {
+ ch->ch_mostat = mval;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SMODEM, (uchar) mval, D_RTS(ch)|D_DTR(ch), 0);
+ }
+
+ /*
+ * Read modem signals, and then call carrier function.
+ */
+ ch->ch_mistat = readb(&(bs->m_stat));
+ dgap_carrier(ch);
+
+ /*
+ * Set the start and stop characters.
+ */
+ if (ch->ch_startc != ch->ch_fepstartc ||
+ ch->ch_stopc != ch->ch_fepstopc) {
+ ch->ch_fepstartc = ch->ch_startc;
+ ch->ch_fepstopc = ch->ch_stopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
+ }
+
+ /*
+ * Set the Auxiliary start and stop characters.
+ */
+ if (ch->ch_astartc != ch->ch_fepastartc ||
+ ch->ch_astopc != ch->ch_fepastopc) {
+ ch->ch_fepastartc = ch->ch_astartc;
+ ch->ch_fepastopc = ch->ch_astopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * dgap_parity_scan()
+ *
+ * Convert the FEP5 way of reporting parity errors and breaks into
+ * the Linux line discipline way.
+ */
+static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
+ unsigned char *fbuf, int *len)
+{
+ int l = *len;
+ int count = 0;
+ unsigned char *in, *cout, *fout;
+ unsigned char c;
+
+ in = cbuf;
+ cout = cbuf;
+ fout = fbuf;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ while (l--) {
+ c = *in++;
+ switch (ch->pscan_state) {
+ default:
+ /* reset to sanity and fall through */
+ ch->pscan_state = 0;
+
+ case 0:
+ /* No FF seen yet */
+ if (c == (unsigned char) '\377')
+ /* delete this character from stream */
+ ch->pscan_state = 1;
+ else {
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ }
+ break;
+
+ case 1:
+ /* first FF seen */
+ if (c == (unsigned char) '\377') {
+ /* doubled ff, transform to single ff */
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ ch->pscan_state = 0;
+ } else {
+ /* save value examination in next state */
+ ch->pscan_savechar = c;
+ ch->pscan_state = 2;
+ }
+ break;
+
+ case 2:
+ /* third character of ff sequence */
+
+ *cout++ = c;
+
+ if (ch->pscan_savechar == 0x0) {
+
+ if (c == 0x0) {
+ ch->ch_err_break++;
+ *fout++ = TTY_BREAK;
+ } else {
+ ch->ch_err_parity++;
+ *fout++ = TTY_PARITY;
+ }
+ }
+
+ count += 1;
+ ch->pscan_state = 0;
+ }
+ }
+ *len = count;
+}
+
+static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
+ struct un_t *un, u32 mask,
+ unsigned long *irq_flags1,
+ unsigned long *irq_flags2)
+{
+ if (!(un->un_flags & mask))
+ return;
+
+ un->un_flags &= ~mask;
+
+ if (!(un->un_flags & UN_ISOPEN))
+ return;
+
+ if ((un->un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ un->un_tty->ldisc->ops->write_wakeup) {
+ spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, *irq_flags1);
+
+ (un->un_tty->ldisc->ops->write_wakeup)(un->un_tty);
+
+ spin_lock_irqsave(&bd->bd_lock, *irq_flags1);
+ spin_lock_irqsave(&ch->ch_lock, *irq_flags2);
+ }
+ wake_up_interruptible(&un->un_tty->write_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+}
+
+/*=======================================================================
+ *
+ * dgap_event - FEP to host event processing routine.
+ *
+ * bd - Board of current event.
+ *
+ *=======================================================================*/
+static int dgap_event(struct board_t *bd)
+{
+ struct channel_t *ch;
+ ulong lock_flags;
+ ulong lock_flags2;
+ struct bs_t *bs;
+ uchar *event;
+ uchar *vaddr = NULL;
+ struct ev_t *eaddr = NULL;
+ uint head;
+ uint tail;
+ int port;
+ int reason;
+ int modem;
+ int b1;
+
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -ENXIO;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
+
+ if (!vaddr) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ eaddr = (struct ev_t *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+
+ if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
+ (head | tail) & 03) {
+ /* Let go of board lock */
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /*
+ * Loop to process all the events in the buffer.
+ */
+ while (tail != head) {
+
+ /*
+ * Get interrupt information.
+ */
+
+ event = bd->re_map_membase + tail + EVSTART;
+
+ port = event[0];
+ reason = event[1];
+ modem = event[2];
+ b1 = event[3];
+
+ /*
+ * Make sure the interrupt is valid.
+ */
+ if (port >= bd->nasync)
+ goto next;
+
+ if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA)))
+ goto next;
+
+ ch = bd->channels[port];
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ goto next;
+
+ /*
+ * If we have made it here, the event was valid.
+ * Lock down the channel.
+ */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ bs = ch->ch_bs;
+
+ if (!bs) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ goto next;
+ }
+
+ /*
+ * Process received data.
+ */
+ if (reason & IFDATA) {
+
+ /*
+ * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
+ * input could send some data to ld, which in turn
+ * could do a callback to one of our other functions.
+ */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ dgap_input(ch);
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ if (ch->ch_flags & CH_RACTIVE)
+ ch->ch_flags |= CH_RENABLE;
+ else
+ writeb(1, &(bs->idata));
+
+ if (ch->ch_flags & CH_RWAIT) {
+ ch->ch_flags &= ~CH_RWAIT;
+
+ wake_up_interruptible
+ (&ch->ch_tun.un_flags_wait);
+ }
+ }
+
+ /*
+ * Process Modem change signals.
+ */
+ if (reason & IFMODEM) {
+ ch->ch_mistat = modem;
+ dgap_carrier(ch);
+ }
+
+ /*
+ * Process break.
+ */
+ if (reason & IFBREAK) {
+
+ if (ch->ch_tun.un_tty) {
+ /* A break has been indicated */
+ ch->ch_err_break++;
+ tty_buffer_request_room
+ (ch->ch_tun.un_tty->port, 1);
+ tty_insert_flip_char(ch->ch_tun.un_tty->port,
+ 0, TTY_BREAK);
+ tty_flip_buffer_push(ch->ch_tun.un_tty->port);
+ }
+ }
+
+ /*
+ * Process Transmit low.
+ */
+ if (reason & IFTLW) {
+ dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW,
+ &lock_flags, &lock_flags2);
+ dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW,
+ &lock_flags, &lock_flags2);
+ if (ch->ch_flags & CH_WLOW) {
+ ch->ch_flags &= ~CH_WLOW;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ /*
+ * Process Transmit empty.
+ */
+ if (reason & IFTEM) {
+ dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY,
+ &lock_flags, &lock_flags2);
+ dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY,
+ &lock_flags, &lock_flags2);
+ if (ch->ch_flags & CH_WEMPTY) {
+ ch->ch_flags &= ~CH_WEMPTY;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+
+next:
+ tail = (tail + 4) & (EVMAX - EVSTART - 4);
+ }
+
+ writew(tail, &(eaddr->ev_tail));
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
+}
+static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
+
+
+static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", dgap_NumBoards);
+}
+static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
+
+
+static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
+}
+static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
+
+
+static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
+}
+static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
+
+static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
+}
+
+static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ if (sscanf(buf, "%d\n", &dgap_poll_tick) != 1)
+ return -EINVAL;
+ return count;
+}
+static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show,
+ dgap_driver_pollrate_store);
+
+static int dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ int rc = 0;
+ struct device_driver *driverfs = &dgap_driver->driver;
+
+ rc |= driver_create_file(driverfs, &driver_attr_version);
+ rc |= driver_create_file(driverfs, &driver_attr_boards);
+ rc |= driver_create_file(driverfs, &driver_attr_maxboards);
+ rc |= driver_create_file(driverfs, &driver_attr_pollrate);
+ rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
+
+ return rc;
+}
+
+static void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ struct device_driver *driverfs = &dgap_driver->driver;
+ driver_remove_file(driverfs, &driver_attr_version);
+ driver_remove_file(driverfs, &driver_attr_boards);
+ driver_remove_file(driverfs, &driver_attr_maxboards);
+ driver_remove_file(driverfs, &driver_attr_pollrate);
+ driver_remove_file(driverfs, &driver_attr_pollcounter);
+}
+
+static struct board_t *dgap_verify_board(struct device *p)
+{
+ struct board_t *bd;
+
+ if (!p)
+ return NULL;
+
+ bd = dev_get_drvdata(p);
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC || bd->state != BOARD_READY)
+ return NULL;
+
+ return bd;
+}
+
+static ssize_t dgap_ports_state_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s\n", bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_open_count ? "Open" : "Closed");
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_state, S_IRUSR, dgap_ports_state_show, NULL);
+
+static ssize_t dgap_ports_baud_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %d\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_baud_info);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_baud, S_IRUSR, dgap_ports_baud_show, NULL);
+
+static ssize_t dgap_ports_msignals_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++) {
+ if (bd->channels[i]->ch_open_count)
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s %s %s %s %s %s\n",
+ bd->channels[i]->ch_portnum,
+ (bd->channels[i]->ch_mostat &
+ UART_MCR_RTS) ? "RTS" : "",
+ (bd->channels[i]->ch_mistat &
+ UART_MSR_CTS) ? "CTS" : "",
+ (bd->channels[i]->ch_mostat &
+ UART_MCR_DTR) ? "DTR" : "",
+ (bd->channels[i]->ch_mistat &
+ UART_MSR_DSR) ? "DSR" : "",
+ (bd->channels[i]->ch_mistat &
+ UART_MSR_DCD) ? "DCD" : "",
+ (bd->channels[i]->ch_mistat &
+ UART_MSR_RI) ? "RI" : "");
+ else
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d\n", bd->channels[i]->ch_portnum);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_msignals, S_IRUSR, dgap_ports_msignals_show, NULL);
+
+static ssize_t dgap_ports_iflag_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_c_iflag);
+ return count;
+}
+static DEVICE_ATTR(ports_iflag, S_IRUSR, dgap_ports_iflag_show, NULL);
+
+static ssize_t dgap_ports_cflag_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_c_cflag);
+ return count;
+}
+static DEVICE_ATTR(ports_cflag, S_IRUSR, dgap_ports_cflag_show, NULL);
+
+static ssize_t dgap_ports_oflag_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_c_oflag);
+ return count;
+}
+static DEVICE_ATTR(ports_oflag, S_IRUSR, dgap_ports_oflag_show, NULL);
+
+static ssize_t dgap_ports_lflag_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_c_lflag);
+ return count;
+}
+static DEVICE_ATTR(ports_lflag, S_IRUSR, dgap_ports_lflag_show, NULL);
+
+static ssize_t dgap_ports_digi_flag_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_digi.digi_flags);
+ return count;
+}
+static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgap_ports_digi_flag_show, NULL);
+
+static ssize_t dgap_ports_rxcount_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_rxcount);
+ return count;
+}
+static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgap_ports_rxcount_show, NULL);
+
+static ssize_t dgap_ports_txcount_show(struct device *p,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ bd = dgap_verify_board(p);
+ if (!bd)
+ return 0;
+
+ for (i = 0; i < bd->nasync; i++)
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_txcount);
+ return count;
+}
+static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
+
+/* this function creates the sys files that will export each signal status
+ * to sysfs each value will be put in a separate filename
+ */
+static void dgap_create_ports_sysfiles(struct board_t *bd)
+{
+ dev_set_drvdata(&bd->pdev->dev, bd);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+}
+
+/* removes all the sys files created for that port */
+static void dgap_remove_ports_sysfiles(struct board_t *bd)
+{
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+}
+
+static ssize_t dgap_tty_state_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ?
+ "Open" : "Closed");
+}
+static DEVICE_ATTR(state, S_IRUSR, dgap_tty_state_show, NULL);
+
+static ssize_t dgap_tty_baud_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info);
+}
+static DEVICE_ATTR(baud, S_IRUSR, dgap_tty_baud_show, NULL);
+
+static ssize_t dgap_tty_msignals_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ if (ch->ch_open_count) {
+ return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
+ (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ }
+ return 0;
+}
+static DEVICE_ATTR(msignals, S_IRUSR, dgap_tty_msignals_show, NULL);
+
+static ssize_t dgap_tty_iflag_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
+}
+static DEVICE_ATTR(iflag, S_IRUSR, dgap_tty_iflag_show, NULL);
+
+static ssize_t dgap_tty_cflag_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
+}
+static DEVICE_ATTR(cflag, S_IRUSR, dgap_tty_cflag_show, NULL);
+
+static ssize_t dgap_tty_oflag_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
+}
+static DEVICE_ATTR(oflag, S_IRUSR, dgap_tty_oflag_show, NULL);
+
+static ssize_t dgap_tty_lflag_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
+}
+static DEVICE_ATTR(lflag, S_IRUSR, dgap_tty_lflag_show, NULL);
+
+static ssize_t dgap_tty_digi_flag_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
+}
+static DEVICE_ATTR(digi_flag, S_IRUSR, dgap_tty_digi_flag_show, NULL);
+
+static ssize_t dgap_tty_rxcount_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
+}
+static DEVICE_ATTR(rxcount, S_IRUSR, dgap_tty_rxcount_show, NULL);
+
+static ssize_t dgap_tty_txcount_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
+}
+static DEVICE_ATTR(txcount, S_IRUSR, dgap_tty_txcount_show, NULL);
+
+static ssize_t dgap_tty_name_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int cn;
+ int bn;
+ struct cnode *cptr = NULL;
+ int found = FALSE;
+ int ncount = 0;
+ int starto = 0;
+ int i = 0;
+
+ if (!d)
+ return 0;
+ un = dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return 0;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
+ if (bd->state != BOARD_READY)
+ return 0;
+
+ bn = bd->boardnum;
+ cn = ch->ch_portnum;
+
+ for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
+
+ if ((cptr->type == BNODE) &&
+ ((cptr->u.board.type == APORT2_920P) ||
+ (cptr->u.board.type == APORT4_920P) ||
+ (cptr->u.board.type == APORT8_920P) ||
+ (cptr->u.board.type == PAPORT4) ||
+ (cptr->u.board.type == PAPORT8))) {
+
+ found = TRUE;
+ if (cptr->u.board.v_start)
+ starto = cptr->u.board.start;
+ else
+ starto = 1;
+ }
+
+ if (cptr->type == TNODE && found == TRUE) {
+ char *ptr1;
+ if (strstr(cptr->u.ttyname, "tty")) {
+ ptr1 = cptr->u.ttyname;
+ ptr1 += 3;
+ } else
+ ptr1 = cptr->u.ttyname;
+
+ for (i = 0; i < dgap_config_get_num_prts(bd); i++) {
+ if (cn != i)
+ continue;
+
+ return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
+ (un->un_type == DGAP_PRINT) ?
+ "pr" : "tty",
+ ptr1, i + starto);
+ }
+ }
+
+ if (cptr->type == CNODE) {
+
+ for (i = 0; i < cptr->u.conc.nport; i++) {
+ if (cn != (i + ncount))
+ continue;
+
+ return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
+ (un->un_type == DGAP_PRINT) ?
+ "pr" : "tty",
+ cptr->u.conc.id,
+ i + (cptr->u.conc.v_start ?
+ cptr->u.conc.start : 1));
+ }
+
+ ncount += cptr->u.conc.nport;
+ }
+
+ if (cptr->type == MNODE) {
+
+ for (i = 0; i < cptr->u.module.nport; i++) {
+ if (cn != (i + ncount))
+ continue;
+
+ return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
+ (un->un_type == DGAP_PRINT) ?
+ "pr" : "tty",
+ cptr->u.module.id,
+ i + (cptr->u.module.v_start ?
+ cptr->u.module.start : 1));
+ }
+
+ ncount += cptr->u.module.nport;
+
+ }
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s_dgap_%d_%d\n",
+ (un->un_type == DGAP_PRINT) ? "pr" : "tty", bn, cn);
+
+}
+static DEVICE_ATTR(custom_name, S_IRUSR, dgap_tty_name_show, NULL);
+
+static struct attribute *dgap_sysfs_tty_entries[] = {
+ &dev_attr_state.attr,
+ &dev_attr_baud.attr,
+ &dev_attr_msignals.attr,
+ &dev_attr_iflag.attr,
+ &dev_attr_cflag.attr,
+ &dev_attr_oflag.attr,
+ &dev_attr_lflag.attr,
+ &dev_attr_digi_flag.attr,
+ &dev_attr_rxcount.attr,
+ &dev_attr_txcount.attr,
+ &dev_attr_custom_name.attr,
+ NULL
+};
+
+static struct attribute_group dgap_tty_attribute_group = {
+ .name = NULL,
+ .attrs = dgap_sysfs_tty_entries,
+};
+
+static void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
+{
+ int ret;
+
+ ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
+ if (ret)
+ return;
+
+ dev_set_drvdata(c, un);
+
+}
+
+static void dgap_remove_tty_sysfs(struct device *c)
+{
+ sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
+}
+
+/*
+ * Parse a configuration file read into memory as a string.
+ */
+static int dgap_parsefile(char **in, int Remove)
+{
+ struct cnode *p, *brd, *line, *conc;
+ int rc;
+ char *s = NULL;
+ int linecnt = 0;
+
+ p = &dgap_head;
+ brd = line = conc = NULL;
+
+ /* perhaps we are adding to an existing list? */
+ while (p->next != NULL)
+ p = p->next;
+
+ /* file must start with a BEGIN */
+ while ((rc = dgap_gettok(in, p)) != BEGIN) {
+ if (rc == 0) {
+ dgap_err("unexpected EOF");
+ return -1;
+ }
+ }
+
+ for (; ;) {
+ rc = dgap_gettok(in, p);
+ if (rc == 0) {
+ dgap_err("unexpected EOF");
+ return -1;
+ }
+
+ switch (rc) {
+ case 0:
+ dgap_err("unexpected end of file");
+ return -1;
+
+ case BEGIN: /* should only be 1 begin */
+ dgap_err("unexpected config_begin\n");
+ return -1;
+
+ case END:
+ return 0;
+
+ case BOARD: /* board info */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(BNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+
+ p->u.board.status = dgap_savestring("No");
+ line = conc = NULL;
+ brd = p;
+ linecnt = -1;
+ break;
+
+ case APORT2_920P: /* AccelePort_4 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_2r_920 string");
+ return -1;
+ }
+ p->u.board.type = APORT2_920P;
+ p->u.board.v_type = 1;
+ break;
+
+ case APORT4_920P: /* AccelePort_4 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_4r_920 string");
+ return -1;
+ }
+ p->u.board.type = APORT4_920P;
+ p->u.board.v_type = 1;
+ break;
+
+ case APORT8_920P: /* AccelePort_8 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_8r_920 string");
+ return -1;
+ }
+ p->u.board.type = APORT8_920P;
+ p->u.board.v_type = 1;
+ break;
+
+ case PAPORT4: /* AccelePort_4 PCI */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_4r(PCI) string");
+ return -1;
+ }
+ p->u.board.type = PAPORT4;
+ p->u.board.v_type = 1;
+ break;
+
+ case PAPORT8: /* AccelePort_8 PCI */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_8r string");
+ return -1;
+ }
+ p->u.board.type = PAPORT8;
+ p->u.board.v_type = 1;
+ break;
+
+ case PCX: /* PCI C/X */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_C/X_(PCI) string");
+ return -1;
+ }
+ p->u.board.type = PCX;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ p->u.board.module1 = 0;
+ p->u.board.module2 = 0;
+ break;
+
+ case PEPC: /* PCI EPC/X */
+ if (p->type != BNODE) {
+ dgap_err("unexpected \"Digi_EPC/X_(PCI)\" string");
+ return -1;
+ }
+ p->u.board.type = PEPC;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ p->u.board.module1 = 0;
+ p->u.board.module2 = 0;
+ break;
+
+ case PPCM: /* PCI/Xem */
+ if (p->type != BNODE) {
+ dgap_err("unexpected PCI/Xem string");
+ return -1;
+ }
+ p->u.board.type = PPCM;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ break;
+
+ case IO: /* i/o port */
+ if (p->type != BNODE) {
+ dgap_err("IO port only vaild for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.portstr = dgap_savestring(s);
+ if (kstrtol(s, 0, &p->u.board.port)) {
+ dgap_err("bad number for IO port");
+ return -1;
+ }
+ p->u.board.v_port = 1;
+ break;
+
+ case MEM: /* memory address */
+ if (p->type != BNODE) {
+ dgap_err("memory address only vaild for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.addrstr = dgap_savestring(s);
+ if (kstrtoul(s, 0, &p->u.board.addr)) {
+ dgap_err("bad number for memory address");
+ return -1;
+ }
+ p->u.board.v_addr = 1;
+ break;
+
+ case PCIINFO: /* pci information */
+ if (p->type != BNODE) {
+ dgap_err("memory address only vaild for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.pcibusstr = dgap_savestring(s);
+ if (kstrtoul(s, 0, &p->u.board.pcibus)) {
+ dgap_err("bad number for pci bus");
+ return -1;
+ }
+ p->u.board.v_pcibus = 1;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.pcislotstr = dgap_savestring(s);
+ if (kstrtoul(s, 0, &p->u.board.pcislot)) {
+ dgap_err("bad number for pci slot");
+ return -1;
+ }
+ p->u.board.v_pcislot = 1;
+ break;
+
+ case METHOD:
+ if (p->type != BNODE) {
+ dgap_err("install method only vaild for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.method = dgap_savestring(s);
+ p->u.board.v_method = 1;
+ break;
+
+ case STATUS:
+ if (p->type != BNODE) {
+ dgap_err("config status only vaild for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.status = dgap_savestring(s);
+ break;
+
+ case NPORTS: /* number of ports */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.board.nport)) {
+ dgap_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.board.v_nport = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.nport)) {
+ dgap_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.conc.v_nport = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.module.nport)) {
+ dgap_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.module.v_nport = 1;
+ } else {
+ dgap_err("nports only valid for concentrators or modules");
+ return -1;
+ }
+ break;
+
+ case ID: /* letter ID used in tty name */
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+
+ p->u.board.status = dgap_savestring(s);
+
+ if (p->type == CNODE) {
+ p->u.conc.id = dgap_savestring(s);
+ p->u.conc.v_id = 1;
+ } else if (p->type == MNODE) {
+ p->u.module.id = dgap_savestring(s);
+ p->u.module.v_id = 1;
+ } else {
+ dgap_err("id only valid for concentrators or modules");
+ return -1;
+ }
+ break;
+
+ case STARTO: /* start offset of ID */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.board.start)) {
+ dgap_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.board.v_start = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.start)) {
+ dgap_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.conc.v_start = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.module.start)) {
+ dgap_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.module.v_start = 1;
+ } else {
+ dgap_err("start only valid for concentrators or modules");
+ return -1;
+ }
+ break;
+
+ case TTYN: /* tty name prefix */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(TNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (!s) {
+ dgap_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.ttyname = dgap_savestring(s);
+ if (!p->u.ttyname) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ break;
+
+ case CU: /* cu name prefix */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(CUNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (!s) {
+ dgap_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.cuname = dgap_savestring(s);
+ if (!p->u.cuname) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ break;
+
+ case LINE: /* line information */
+ if (dgap_checknode(p))
+ return -1;
+ if (brd == NULL) {
+ dgap_err("must specify board before line info");
+ return -1;
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ dgap_err("line not vaild for PC/em");
+ return -1;
+ }
+ p->next = dgap_newnode(LNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ conc = NULL;
+ line = p;
+ linecnt++;
+ break;
+
+ case CONC: /* concentrator information */
+ if (dgap_checknode(p))
+ return -1;
+ if (line == NULL) {
+ dgap_err("must specify line info before concentrator");
+ return -1;
+ }
+ p->next = dgap_newnode(CNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ conc = p;
+ if (linecnt)
+ brd->u.board.conc2++;
+ else
+ brd->u.board.conc1++;
+
+ break;
+
+ case CX: /* c/x type concentrator */
+ if (p->type != CNODE) {
+ dgap_err("cx only valid for concentrators");
+ return -1;
+ }
+ p->u.conc.type = CX;
+ p->u.conc.v_type = 1;
+ break;
+
+ case EPC: /* epc type concentrator */
+ if (p->type != CNODE) {
+ dgap_err("cx only valid for concentrators");
+ return -1;
+ }
+ p->u.conc.type = EPC;
+ p->u.conc.v_type = 1;
+ break;
+
+ case MOD: /* EBI module */
+ if (dgap_checknode(p))
+ return -1;
+ if (brd == NULL) {
+ dgap_err("must specify board info before EBI modules");
+ return -1;
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ linecnt = 0;
+ break;
+ default:
+ if (conc == NULL) {
+ dgap_err("must specify concentrator info before EBI module");
+ return -1;
+ }
+ }
+ p->next = dgap_newnode(MNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ if (linecnt)
+ brd->u.board.module2++;
+ else
+ brd->u.board.module1++;
+
+ break;
+
+ case PORTS: /* ports type EBI module */
+ if (p->type != MNODE) {
+ dgap_err("ports only valid for EBI modules");
+ return -1;
+ }
+ p->u.module.type = PORTS;
+ p->u.module.v_type = 1;
+ break;
+
+ case MODEM: /* ports type EBI module */
+ if (p->type != MNODE) {
+ dgap_err("modem only valid for modem modules");
+ return -1;
+ }
+ p->u.module.type = MODEM;
+ p->u.module.v_type = 1;
+ break;
+
+ case CABLE:
+ if (p->type == LNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.line.cable = dgap_savestring(s);
+ p->u.line.v_cable = 1;
+ }
+ break;
+
+ case SPEED: /* sync line speed indication */
+ if (p->type == LNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.line.speed)) {
+ dgap_err("bad number for line speed");
+ return -1;
+ }
+ p->u.line.v_speed = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.speed)) {
+ dgap_err("bad number for line speed");
+ return -1;
+ }
+ p->u.conc.v_speed = 1;
+ } else {
+ dgap_err("speed valid only for lines or concentrators.");
+ return -1;
+ }
+ break;
+
+ case CONNECT:
+ if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ p->u.conc.connect = dgap_savestring(s);
+ p->u.conc.v_connect = 1;
+ }
+ break;
+ case PRINT: /* transparent print name prefix */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(PNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (!s) {
+ dgap_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.printname = dgap_savestring(s);
+ if (!p->u.printname) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ break;
+
+ case CMAJOR: /* major number */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(JNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.majornumber)) {
+ dgap_err("bad number for major number");
+ return -1;
+ }
+ break;
+
+ case ALTPIN: /* altpin setting */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(ANODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.altpin)) {
+ dgap_err("bad number for altpin");
+ return -1;
+ }
+ break;
+
+ case USEINTR: /* enable interrupt setting */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(INTRNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.useintr)) {
+ dgap_err("bad number for useintr");
+ return -1;
+ }
+ break;
+
+ case TTSIZ: /* size of tty structure */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(TSNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.ttysize)) {
+ dgap_err("bad number for ttysize");
+ return -1;
+ }
+ break;
+
+ case CHSIZ: /* channel structure size */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(CSNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.chsize)) {
+ dgap_err("bad number for chsize");
+ return -1;
+ }
+ break;
+
+ case BSSIZ: /* board structure size */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(BSNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.bssize)) {
+ dgap_err("bad number for bssize");
+ return -1;
+ }
+ break;
+
+ case UNTSIZ: /* sched structure size */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(USNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.unsize)) {
+ dgap_err("bad number for schedsize");
+ return -1;
+ }
+ break;
+
+ case F2SIZ: /* f2200 structure size */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(FSNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.f2size)) {
+ dgap_err("bad number for f2200size");
+ return -1;
+ }
+ break;
+
+ case VPSIZ: /* vpix structure size */
+ if (dgap_checknode(p))
+ return -1;
+ p->next = dgap_newnode(VSNODE);
+ if (!p->next) {
+ dgap_err("out of memory");
+ return -1;
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.vpixsize)) {
+ dgap_err("bad number for vpixsize");
+ return -1;
+ }
+ break;
+ }
+ }
+}
+
+/*
+ * dgap_sindex: much like index(), but it looks for a match of any character in
+ * the group, and returns that position. If the first character is a ^, then
+ * this will match the first occurrence not in that group.
+ */
+static char *dgap_sindex(char *string, char *group)
+{
+ char *ptr;
+
+ if (!string || !group)
+ return (char *) NULL;
+
+ if (*group == '^') {
+ group++;
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ break;
+ }
+ if (*ptr == '\0')
+ return string;
+ }
+ } else {
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ return string;
+ }
+ }
+ }
+
+ return (char *) NULL;
+}
+
+/*
+ * Get a token from the input file; return 0 if end of file is reached
+ */
+static int dgap_gettok(char **in, struct cnode *p)
+{
+ char *w;
+ struct toklist *t;
+
+ if (strstr(dgap_cword, "boar")) {
+ w = dgap_getword(in);
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_tlist; t->token != 0; t++) {
+ if (!strcmp(w, t->string))
+ return t->token;
+ }
+ dgap_err("board !!type not specified");
+ return 1;
+ } else {
+ while ((w = dgap_getword(in))) {
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_tlist; t->token != 0; t++) {
+ if (!strcmp(w, t->string))
+ return t->token;
+ }
+ }
+ return 0;
+ }
+}
+
+/*
+ * get a word from the input stream, also keep track of current line number.
+ * words are separated by whitespace.
+ */
+static char *dgap_getword(char **in)
+{
+ char *ret_ptr = *in;
+
+ char *ptr = dgap_sindex(*in, " \t\n");
+
+ /* If no word found, return null */
+ if (!ptr)
+ return NULL;
+
+ /* Mark new location for our buffer */
+ *ptr = '\0';
+ *in = ptr + 1;
+
+ /* Eat any extra spaces/tabs/newlines that might be present */
+ while (*in && **in && ((**in == ' ') ||
+ (**in == '\t') ||
+ (**in == '\n'))) {
+ **in = '\0';
+ *in = *in + 1;
+ }
+
+ return ret_ptr;
+}
+
+/*
+ * print an error message, giving the line number in the file where
+ * the error occurred.
+ */
+static void dgap_err(char *s)
+{
+ pr_err("dgap: parse: %s\n", s);
+}
+
+/*
+ * allocate a new configuration node of type t
+ */
+static struct cnode *dgap_newnode(int t)
+{
+ struct cnode *n;
+
+ n = kmalloc(sizeof(struct cnode), GFP_ATOMIC);
+ if (n != NULL) {
+ memset((char *)n, 0, sizeof(struct cnode));
+ n->type = t;
+ }
+ return n;
+}
+
+/*
+ * dgap_checknode: see if all the necessary info has been supplied for a node
+ * before creating the next node.
+ */
+static int dgap_checknode(struct cnode *p)
+{
+ switch (p->type) {
+ case BNODE:
+ if (p->u.board.v_type == 0) {
+ dgap_err("board type !not specified");
+ return 1;
+ }
+
+ return 0;
+
+ case LNODE:
+ if (p->u.line.v_speed == 0) {
+ dgap_err("line speed not specified");
+ return 1;
+ }
+ return 0;
+
+ case CNODE:
+ if (p->u.conc.v_type == 0) {
+ dgap_err("concentrator type not specified");
+ return 1;
+ }
+ if (p->u.conc.v_speed == 0) {
+ dgap_err("concentrator line speed not specified");
+ return 1;
+ }
+ if (p->u.conc.v_nport == 0) {
+ dgap_err("number of ports on concentrator not specified");
+ return 1;
+ }
+ if (p->u.conc.v_id == 0) {
+ dgap_err("concentrator id letter not specified");
+ return 1;
+ }
+ return 0;
+
+ case MNODE:
+ if (p->u.module.v_type == 0) {
+ dgap_err("EBI module type not specified");
+ return 1;
+ }
+ if (p->u.module.v_nport == 0) {
+ dgap_err("number of ports on EBI module not specified");
+ return 1;
+ }
+ if (p->u.module.v_id == 0) {
+ dgap_err("EBI module id letter not specified");
+ return 1;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * save a string somewhere
+ */
+static char *dgap_savestring(char *s)
+{
+ char *p;
+
+ p = kmalloc(strlen(s) + 1, GFP_ATOMIC);
+ if (p)
+ strcpy(p, s);
+ return p;
+}
+
+/*
+ * Given a board pointer, returns whether we should use interrupts or not.
+ */
+static uint dgap_config_get_useintr(struct board_t *bd)
+{
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return 0;
+
+ for (p = bd->bd_config; p; p = p->next) {
+ switch (p->type) {
+ case INTRNODE:
+ /*
+ * check for pcxr types.
+ */
+ return p->u.useintr;
+ default:
+ break;
+ }
+ }
+
+ /* If not found, then don't turn on interrupts. */
+ return 0;
+}
+
+/*
+ * Given a board pointer, returns whether we turn on altpin or not.
+ */
+static uint dgap_config_get_altpin(struct board_t *bd)
+{
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return 0;
+
+ for (p = bd->bd_config; p; p = p->next) {
+ switch (p->type) {
+ case ANODE:
+ /*
+ * check for pcxr types.
+ */
+ return p->u.altpin;
+ default:
+ break;
+ }
+ }
+
+ /* If not found, then don't turn on interrupts. */
+ return 0;
+}
+
+/*
+ * Given a specific type of board, if found, detached link and
+ * returns the first occurrence in the list.
+ */
+static struct cnode *dgap_find_config(int type, int bus, int slot)
+{
+ struct cnode *p, *prev = NULL, *prev2 = NULL, *found = NULL;
+
+ p = &dgap_head;
+
+ while (p->next != NULL) {
+ prev = p;
+ p = p->next;
+
+ if (p->type == BNODE) {
+
+ if (p->u.board.type == type) {
+
+ if (p->u.board.v_pcibus &&
+ p->u.board.pcibus != bus)
+ continue;
+ if (p->u.board.v_pcislot &&
+ p->u.board.pcislot != slot)
+ continue;
+
+ found = p;
+ /*
+ * Keep walking thru the list till we
+ * find the next board.
+ */
+ while (p->next != NULL) {
+ prev2 = p;
+ p = p->next;
+ if (p->type == BNODE) {
+
+ /*
+ * Mark the end of our 1 board
+ * chain of configs.
+ */
+ prev2->next = NULL;
+
+ /*
+ * Link the "next" board to the
+ * previous board, effectively
+ * "unlinking" our board from
+ * the main config.
+ */
+ prev->next = p;
+
+ return found;
+ }
+ }
+ /*
+ * It must be the last board in the list.
+ */
+ prev->next = NULL;
+ return found;
+ }
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Given a board pointer, walks the config link, counting up
+ * all ports user specified should be on the board.
+ * (This does NOT mean they are all actually present right now tho)
+ */
+static uint dgap_config_get_num_prts(struct board_t *bd)
+{
+ int count = 0;
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return 0;
+
+ for (p = bd->bd_config; p; p = p->next) {
+
+ switch (p->type) {
+ case BNODE:
+ /*
+ * check for pcxr types.
+ */
+ if (p->u.board.type > EPCFE)
+ count += p->u.board.nport;
+ break;
+ case CNODE:
+ count += p->u.conc.nport;
+ break;
+ case MNODE:
+ count += p->u.module.nport;
+ break;
+ }
+ }
+ return count;
+}
+
+static char *dgap_create_config_string(struct board_t *bd, char *string)
+{
+ char *ptr = string;
+ struct cnode *p = NULL;
+ struct cnode *q = NULL;
+ int speed;
+
+ if (!bd) {
+ *ptr = 0xff;
+ return string;
+ }
+
+ for (p = bd->bd_config; p; p = p->next) {
+
+ switch (p->type) {
+ case LNODE:
+ *ptr = '\0';
+ ptr++;
+ *ptr = p->u.line.speed;
+ ptr++;
+ break;
+ case CNODE:
+ /*
+ * Because the EPC/con concentrators can have EM modules
+ * hanging off of them, we have to walk ahead in the
+ * list and keep adding the number of ports on each EM
+ * to the config. UGH!
+ */
+ speed = p->u.conc.speed;
+ q = p->next;
+ if ((q != NULL) && (q->type == MNODE)) {
+ *ptr = (p->u.conc.nport + 0x80);
+ ptr++;
+ p = q;
+ while ((q->next != NULL) &&
+ (q->next->type) == MNODE) {
+
+ *ptr = (q->u.module.nport + 0x80);
+ ptr++;
+ p = q;
+ q = q->next;
+ }
+ *ptr = q->u.module.nport;
+ ptr++;
+ } else {
+ *ptr = p->u.conc.nport;
+ ptr++;
+ }
+
+ *ptr = speed;
+ ptr++;
+ break;
+ }
+ }
+
+ *ptr = 0xff;
+ return string;
+}
diff --git a/drivers/staging/dgap/dgap.h b/drivers/staging/dgap/dgap.h
new file mode 100644
index 000000000000..6b8f5f858327
--- /dev/null
+++ b/drivers/staging/dgap/dgap.h
@@ -0,0 +1,1322 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *************************************************************************
+ *
+ * Driver includes
+ *
+ *************************************************************************/
+
+#ifndef __DGAP_DRIVER_H
+#define __DGAP_DRIVER_H
+
+#include <linux/types.h> /* To pick up the varions Linux types */
+#include <linux/tty.h> /* To pick up the various tty structs/defines */
+#include <linux/interrupt.h> /* For irqreturn_t type */
+
+#ifndef TRUE
+# define TRUE 1
+#endif
+
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+/* Required for our shared headers! */
+typedef unsigned char uchar;
+
+#if !defined(TTY_FLIPBUF_SIZE)
+# define TTY_FLIPBUF_SIZE 512
+#endif
+
+/*************************************************************************
+ *
+ * Driver defines
+ *
+ *************************************************************************/
+
+/*
+ * Driver identification
+ */
+#define DG_NAME "dgap-1.3-16"
+#define DG_PART "40002347_C"
+#define DRVSTR "dgap"
+
+/*
+ * defines from dgap_pci.h
+ */
+#define PCIMAX 32 /* maximum number of PCI boards */
+
+#define DIGI_VID 0x114F
+
+#define PCI_DEV_EPC_DID 0x0002
+#define PCI_DEV_XEM_DID 0x0004
+#define PCI_DEV_XR_DID 0x0005
+#define PCI_DEV_CX_DID 0x0006
+#define PCI_DEV_XRJ_DID 0x0009 /* PLX-based Xr adapter */
+#define PCI_DEV_XR_IBM_DID 0x0011 /* IBM 8-port Async Adapter */
+#define PCI_DEV_XR_BULL_DID 0x0013 /* BULL 8-port Async Adapter */
+#define PCI_DEV_XR_SAIP_DID 0x001c /* SAIP card - Xr adapter */
+#define PCI_DEV_XR_422_DID 0x0012 /* Xr-422 */
+#define PCI_DEV_920_2_DID 0x0034 /* XR-Plus 920 K, 2 port */
+#define PCI_DEV_920_4_DID 0x0026 /* XR-Plus 920 K, 4 port */
+#define PCI_DEV_920_8_DID 0x0027 /* XR-Plus 920 K, 8 port */
+#define PCI_DEV_EPCJ_DID 0x000a /* PLX 9060 chip for PCI */
+#define PCI_DEV_CX_IBM_DID 0x001b /* IBM 128-port Async Adapter */
+#define PCI_DEV_920_8_HP_DID 0x0058 /* HP XR-Plus 920 K, 8 port */
+#define PCI_DEV_XEM_HP_DID 0x0059 /* HP Xem PCI */
+
+#define PCI_DEV_XEM_NAME "AccelePort XEM"
+#define PCI_DEV_CX_NAME "AccelePort CX"
+#define PCI_DEV_XR_NAME "AccelePort Xr"
+#define PCI_DEV_XRJ_NAME "AccelePort Xr (PLX)"
+#define PCI_DEV_XR_SAIP_NAME "AccelePort Xr (SAIP)"
+#define PCI_DEV_920_2_NAME "AccelePort Xr920 2 port"
+#define PCI_DEV_920_4_NAME "AccelePort Xr920 4 port"
+#define PCI_DEV_920_8_NAME "AccelePort Xr920 8 port"
+#define PCI_DEV_XR_422_NAME "AccelePort Xr 422"
+#define PCI_DEV_EPCJ_NAME "AccelePort EPC (PLX)"
+#define PCI_DEV_XR_BULL_NAME "AccelePort Xr (BULL)"
+#define PCI_DEV_XR_IBM_NAME "AccelePort Xr (IBM)"
+#define PCI_DEV_CX_IBM_NAME "AccelePort CX (IBM)"
+#define PCI_DEV_920_8_HP_NAME "AccelePort Xr920 8 port (HP)"
+#define PCI_DEV_XEM_HP_NAME "AccelePort XEM (HP)"
+
+/*
+ * On the PCI boards, there is no IO space allocated
+ * The I/O registers will be in the first 3 bytes of the
+ * upper 2MB of the 4MB memory space. The board memory
+ * will be mapped into the low 2MB of the 4MB memory space
+ */
+
+/* Potential location of PCI Bios from E0000 to FFFFF*/
+#define PCI_BIOS_SIZE 0x00020000
+
+/* Size of Memory and I/O for PCI (4MB) */
+#define PCI_RAM_SIZE 0x00400000
+
+/* Size of Memory (2MB) */
+#define PCI_MEM_SIZE 0x00200000
+
+/* Max PCI Window Size (2MB) */
+#define PCI_WIN_SIZE 0x00200000
+
+#define PCI_WIN_SHIFT 21 /* 21 bits max */
+
+/* Offset of I/0 in Memory (2MB) */
+#define PCI_IO_OFFSET 0x00200000
+
+/* Size of IO (2MB) */
+#define PCI_IO_SIZE 0x00200000
+
+/* Number of boards we support at once. */
+#define MAXBOARDS 32
+#define MAXPORTS 224
+#define MAXTTYNAMELEN 200
+
+/* Our 3 magic numbers for our board, channel and unit structs */
+#define DGAP_BOARD_MAGIC 0x5c6df104
+#define DGAP_CHANNEL_MAGIC 0x6c6df104
+#define DGAP_UNIT_MAGIC 0x7c6df104
+
+/* Serial port types */
+#define DGAP_SERIAL 0
+#define DGAP_PRINT 1
+
+#define SERIAL_TYPE_NORMAL 1
+
+/* 4 extra for alignment play space */
+#define WRITEBUFLEN ((4096) + 4)
+#define MYFLIPLEN N_TTY_BUF_SIZE
+
+#define SBREAK_TIME 0x25
+#define U2BSIZE 0x400
+
+#define dgap_jiffies_from_ms(a) (((a) * HZ) / 1000)
+
+/*
+ * Our major for the mgmt devices.
+ *
+ * We can use 22, because Digi was allocated 22 and 23 for the epca driver.
+ * 22 has now become obsolete now that the "cu" devices have
+ * been removed from 2.6.
+ * Also, this *IS* the epca driver, just PCI only now.
+ */
+#ifndef DIGI_DGAP_MAJOR
+# define DIGI_DGAP_MAJOR 22
+#endif
+
+/*
+ * The parameters we use to define the periods of the moving averages.
+ */
+#define MA_PERIOD (HZ / 10)
+#define SMA_DUR (1 * HZ)
+#define EMA_DUR (1 * HZ)
+#define SMA_NPERIODS (SMA_DUR / MA_PERIOD)
+#define EMA_NPERIODS (EMA_DUR / MA_PERIOD)
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially. This is the same structure that is defined
+ * as the default in tty_io.c with the same settings overriden as in serial.c
+ *
+ * In short, this should match the internal serial ports' defaults.
+ */
+#define DEFAULT_IFLAGS (ICRNL | IXON)
+#define DEFAULT_OFLAGS (OPOST | ONLCR)
+#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL)
+#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \
+ ECHOCTL | ECHOKE | IEXTEN)
+
+#ifndef _POSIX_VDISABLE
+#define _POSIX_VDISABLE '\0'
+#endif
+
+#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
+#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
+
+#define VPDSIZE (512)
+
+/************************************************************************
+ * FEP memory offsets
+ ************************************************************************/
+#define START 0x0004L /* Execution start address */
+
+#define CMDBUF 0x0d10L /* Command (cm_t) structure offset */
+#define CMDSTART 0x0400L /* Start of command buffer */
+#define CMDMAX 0x0800L /* End of command buffer */
+
+#define EVBUF 0x0d18L /* Event (ev_t) structure */
+#define EVSTART 0x0800L /* Start of event buffer */
+#define EVMAX 0x0c00L /* End of event buffer */
+#define FEP5_PLUS 0x0E40 /* ASCII '5' and ASCII 'A' is here */
+#define ECS_SEG 0x0E44 /* Segment of the extended channel structure */
+#define LINE_SPEED 0x10 /* Offset into ECS_SEG for line speed */
+ /* if the fep has extended capabilities */
+
+/* BIOS MAGIC SPOTS */
+#define ERROR 0x0C14L /* BIOS error code */
+#define SEQUENCE 0x0C12L /* BIOS sequence indicator */
+#define POSTAREA 0x0C00L /* POST complete message area */
+
+/* FEP MAGIC SPOTS */
+#define FEPSTAT POSTAREA /* OS here when FEP comes up */
+#define NCHAN 0x0C02L /* number of ports FEP sees */
+#define PANIC 0x0C10L /* PANIC area for FEP */
+#define KMEMEM 0x0C30L /* Memory for KME use */
+#define CONFIG 0x0CD0L /* Concentrator configuration info */
+#define CONFIGSIZE 0x0030 /* configuration info size */
+#define DOWNREQ 0x0D00 /* Download request buffer pointer */
+
+#define CHANBUF 0x1000L /* Async channel (bs_t) structs */
+#define FEPOSSIZE 0x1FFF /* 8K FEPOS */
+
+#define XEMPORTS 0xC02 /*
+ * Offset in board memory where FEP5 stores
+ * how many ports it has detected.
+ * NOTE: FEP5 reports 64 ports when the user
+ * has the cable in EBI OUT instead of EBI IN.
+ */
+
+#define FEPCLR 0x00
+#define FEPMEM 0x02
+#define FEPRST 0x04
+#define FEPINT 0x08
+#define FEPMASK 0x0e
+#define FEPWIN 0x80
+
+#define LOWMEM 0x0100
+#define HIGHMEM 0x7f00
+
+#define FEPTIMEOUT 200000
+
+#define ENABLE_INTR 0x0e04 /* Enable interrupts flag */
+#define FEPPOLL_MIN 1 /* minimum of 1 millisecond */
+#define FEPPOLL_MAX 20 /* maximum of 20 milliseconds */
+#define FEPPOLL 0x0c26 /* Fep event poll interval */
+
+#define IALTPIN 0x0080 /* Input flag to swap DSR <-> DCD */
+
+/************************************************************************
+ * FEP supported functions
+ ************************************************************************/
+#define SRLOW 0xe0 /* Set receive low water */
+#define SRHIGH 0xe1 /* Set receive high water */
+#define FLUSHTX 0xe2 /* Flush transmit buffer */
+#define PAUSETX 0xe3 /* Pause data transmission */
+#define RESUMETX 0xe4 /* Resume data transmission */
+#define SMINT 0xe5 /* Set Modem Interrupt */
+#define SAFLOWC 0xe6 /* Set Aux. flow control chars */
+#define SBREAK 0xe8 /* Send break */
+#define SMODEM 0xe9 /* Set 8530 modem control lines */
+#define SIFLAG 0xea /* Set UNIX iflags */
+#define SFLOWC 0xeb /* Set flow control characters */
+#define STLOW 0xec /* Set transmit low water mark */
+#define RPAUSE 0xee /* Pause receive */
+#define RRESUME 0xef /* Resume receive */
+#define CHRESET 0xf0 /* Reset Channel */
+#define BUFSETALL 0xf2 /* Set Tx & Rx buffer size avail*/
+#define SOFLAG 0xf3 /* Set UNIX oflags */
+#define SHFLOW 0xf4 /* Set hardware handshake */
+#define SCFLAG 0xf5 /* Set UNIX cflags */
+#define SVNEXT 0xf6 /* Set VNEXT character */
+#define SPINTFC 0xfc /* Reserved */
+#define SCOMMODE 0xfd /* Set RS232/422 mode */
+
+
+/************************************************************************
+ * Modes for SCOMMODE
+ ************************************************************************/
+#define MODE_232 0x00
+#define MODE_422 0x01
+
+
+/************************************************************************
+ * Event flags.
+ ************************************************************************/
+#define IFBREAK 0x01 /* Break received */
+#define IFTLW 0x02 /* Transmit low water */
+#define IFTEM 0x04 /* Transmitter empty */
+#define IFDATA 0x08 /* Receive data present */
+#define IFMODEM 0x20 /* Modem status change */
+
+/************************************************************************
+ * Modem flags
+ ************************************************************************/
+# define DM_RTS 0x02 /* Request to send */
+# define DM_CD 0x80 /* Carrier detect */
+# define DM_DSR 0x20 /* Data set ready */
+# define DM_CTS 0x10 /* Clear to send */
+# define DM_RI 0x40 /* Ring indicator */
+# define DM_DTR 0x01 /* Data terminal ready */
+
+/*
+ * defines from dgap_conf.h
+ */
+#define NULLNODE 0 /* header node, not used */
+#define BNODE 1 /* Board node */
+#define LNODE 2 /* Line node */
+#define CNODE 3 /* Concentrator node */
+#define MNODE 4 /* EBI Module node */
+#define TNODE 5 /* tty name prefix node */
+#define CUNODE 6 /* cu name prefix (non-SCO) */
+#define PNODE 7 /* trans. print prefix node */
+#define JNODE 8 /* maJor number node */
+#define ANODE 9 /* altpin */
+#define TSNODE 10 /* tty structure size */
+#define CSNODE 11 /* channel structure size */
+#define BSNODE 12 /* board structure size */
+#define USNODE 13 /* unit schedule structure size */
+#define FSNODE 14 /* f2200 structure size */
+#define VSNODE 15 /* size of VPIX structures */
+#define INTRNODE 16 /* enable interrupt */
+
+/* Enumeration of tokens */
+#define BEGIN 1
+#define END 2
+#define BOARD 10
+
+#define EPCFS 11 /* start of EPC family definitions */
+#define ICX 11
+#define MCX 13
+#define PCX 14
+#define IEPC 15
+#define EEPC 16
+#define MEPC 17
+#define IPCM 18
+#define EPCM 19
+#define MPCM 20
+#define PEPC 21
+#define PPCM 22
+#ifdef CP
+#define ICP 23
+#define ECP 24
+#define MCP 25
+#endif
+#define EPCFE 25 /* end of EPC family definitions */
+#define PC2E 26
+#define PC4E 27
+#define PC4E8K 28
+#define PC8E 29
+#define PC8E8K 30
+#define PC16E 31
+#define MC2E8K 34
+#define MC4E8K 35
+#define MC8E8K 36
+
+#define AVANFS 42 /* start of Avanstar family definitions */
+#define A8P 42
+#define A16P 43
+#define AVANFE 43 /* end of Avanstar family definitions */
+
+#define DA2000FS 44 /* start of AccelePort 2000 family definitions */
+#define DA22 44 /* AccelePort 2002 */
+#define DA24 45 /* AccelePort 2004 */
+#define DA28 46 /* AccelePort 2008 */
+#define DA216 47 /* AccelePort 2016 */
+#define DAR4 48 /* AccelePort RAS 4 port */
+#define DAR8 49 /* AccelePort RAS 8 port */
+#define DDR24 50 /* DataFire RAS 24 port */
+#define DDR30 51 /* DataFire RAS 30 port */
+#define DDR48 52 /* DataFire RAS 48 port */
+#define DDR60 53 /* DataFire RAS 60 port */
+#define DA2000FE 53 /* end of AccelePort 2000/RAS family definitions */
+
+#define PCXRFS 106 /* start of PCXR family definitions */
+#define APORT4 106
+#define APORT8 107
+#define PAPORT4 108
+#define PAPORT8 109
+#define APORT4_920I 110
+#define APORT8_920I 111
+#define APORT4_920P 112
+#define APORT8_920P 113
+#define APORT2_920P 114
+#define PCXRFE 117 /* end of PCXR family definitions */
+
+#define LINE 82
+#ifdef T1
+#define T1M 83
+#define E1M 84
+#endif
+#define CONC 64
+#define CX 65
+#define EPC 66
+#define MOD 67
+#define PORTS 68
+#define METHOD 69
+#define CUSTOM 70
+#define BASIC 71
+#define STATUS 72
+#define MODEM 73
+/* The following tokens can appear in multiple places */
+#define SPEED 74
+#define NPORTS 75
+#define ID 76
+#define CABLE 77
+#define CONNECT 78
+#define IO 79
+#define MEM 80
+#define DPSZ 81
+
+#define TTYN 90
+#define CU 91
+#define PRINT 92
+#define XPRINT 93
+#define CMAJOR 94
+#define ALTPIN 95
+#define STARTO 96
+#define USEINTR 97
+#define PCIINFO 98
+
+#define TTSIZ 100
+#define CHSIZ 101
+#define BSSIZ 102
+#define UNTSIZ 103
+#define F2SIZ 104
+#define VPSIZ 105
+
+#define TOTAL_BOARD 2
+#define CURRENT_BRD 4
+#define BOARD_TYPE 6
+#define IO_ADDRESS 8
+#define MEM_ADDRESS 10
+
+#define FIELDS_PER_PAGE 18
+
+#define TB_FIELD 1
+#define CB_FIELD 3
+#define BT_FIELD 5
+#define IO_FIELD 7
+#define ID_FIELD 8
+#define ME_FIELD 9
+#define TTY_FIELD 11
+#define CU_FIELD 13
+#define PR_FIELD 15
+#define MPR_FIELD 17
+
+#define MAX_FIELD 512
+
+#define INIT 0
+#define NITEMS 128
+#define MAX_ITEM 512
+
+#define DSCRINST 1
+#define DSCRNUM 3
+#define ALTPINQ 5
+#define SSAVE 7
+
+#define DSCR "32"
+#define ONETONINE "123456789"
+#define ALL "1234567890"
+
+/*
+ * All the possible states the driver can be while being loaded.
+ */
+enum {
+ DRIVER_INITIALIZED = 0,
+ DRIVER_READY
+};
+
+/*
+ * All the possible states the board can be while booting up.
+ */
+enum {
+ BOARD_FAILED = 0,
+ BOARD_READY
+};
+
+/*
+ * All the possible states that a requested concentrator image can be in.
+ */
+enum {
+ NO_PENDING_CONCENTRATOR_REQUESTS = 0,
+ NEED_CONCENTRATOR,
+ REQUESTED_CONCENTRATOR
+};
+
+
+
+/*
+ * Modem line constants are defined as macros because DSR and
+ * DCD are swapable using the ditty altpin option.
+ */
+#define D_CD(ch) ch->ch_cd /* Carrier detect */
+#define D_DSR(ch) ch->ch_dsr /* Data set ready */
+#define D_RTS(ch) DM_RTS /* Request to send */
+#define D_CTS(ch) DM_CTS /* Clear to send */
+#define D_RI(ch) DM_RI /* Ring indicator */
+#define D_DTR(ch) DM_DTR /* Data terminal ready */
+
+
+/*************************************************************************
+ *
+ * Structures and closely related defines.
+ *
+ *************************************************************************/
+
+
+/*
+ * A structure to hold a statistics counter. We also
+ * compute moving averages for this counter.
+ */
+struct macounter {
+ u32 cnt; /* Total count */
+ ulong accum; /* Acuumulator per period */
+ ulong sma; /* Simple moving average */
+ ulong ema; /* Exponential moving average */
+};
+
+
+/************************************************************************
+ * Device flag definitions for bd_flags.
+ ************************************************************************/
+#define BD_FEP5PLUS 0x0001 /* Supports FEP5 Plus commands */
+#define BD_HAS_VPD 0x0002 /* Board has VPD info available */
+
+/*
+ * Per-board information
+ */
+struct board_t {
+ int magic; /* Board Magic number. */
+ int boardnum; /* Board number: 0-3 */
+ int firstminor; /* First minor, e.g. 0, 30, 60 */
+
+ int type; /* Type of board */
+ char *name; /* Product Name */
+ struct pci_dev *pdev; /* Pointer to the pci_dev struct */
+ u16 vendor; /* PCI vendor ID */
+ u16 device; /* PCI device ID */
+ u16 subvendor; /* PCI subsystem vendor ID */
+ u16 subdevice; /* PCI subsystem device ID */
+ uchar rev; /* PCI revision ID */
+ uint pci_bus; /* PCI bus value */
+ uint pci_slot; /* PCI slot value */
+ u16 maxports; /* MAX ports this board can handle */
+ uchar vpd[VPDSIZE]; /* VPD of board, if found */
+ u32 bd_flags; /* Board flags */
+
+ spinlock_t bd_lock; /* Used to protect board */
+
+ u32 state; /* State of card. */
+ wait_queue_head_t state_wait; /* Place to sleep on for state change */
+
+ struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
+
+ u32 wait_for_bios;
+ u32 wait_for_fep;
+
+ struct cnode *bd_config; /* Config of board */
+
+ u16 nasync; /* Number of ports on card */
+
+ u32 use_interrupts; /* Should we be interrupt driven? */
+ ulong irq; /* Interrupt request number */
+ ulong intr_count; /* Count of interrupts */
+ u32 intr_used; /* Non-zero if using interrupts */
+ u32 intr_running; /* Non-zero if FEP knows its doing interrupts */
+
+ ulong port; /* Start of base io port of the card */
+ ulong port_end; /* End of base io port of the card */
+ ulong membase; /* Start of base memory of the card */
+ ulong membase_end; /* End of base memory of the card */
+
+ uchar *re_map_port; /* Remapped io port of the card */
+ uchar *re_map_membase;/* Remapped memory of the card */
+
+ uchar runwait; /* # Processes waiting for FEP */
+ uchar inhibit_poller; /* Tells the poller to leave us alone */
+
+ struct channel_t *channels[MAXPORTS]; /* array of pointers to our channels. */
+
+ struct tty_driver *SerialDriver;
+ struct tty_port *SerialPorts;
+ char SerialName[200];
+ struct tty_driver *PrintDriver;
+ struct tty_port *PrinterPorts;
+ char PrintName[200];
+
+ u32 dgap_Major_Serial_Registered;
+ u32 dgap_Major_TransparentPrint_Registered;
+
+ u32 dgap_Serial_Major;
+ u32 dgap_TransparentPrint_Major;
+
+ struct bs_t *bd_bs; /* Base structure pointer */
+
+ char *flipbuf; /* Our flip buffer, alloced if board is found */
+ char *flipflagbuf; /* Our flip flag buffer, alloced if board is found */
+
+ u16 dpatype; /* The board "type", as defined by DPA */
+ u16 dpastatus; /* The board "status", as defined by DPA */
+ wait_queue_head_t kme_wait; /* Needed for DPA support */
+
+ u32 conc_dl_status; /* Status of any pending conc download */
+};
+
+
+
+/************************************************************************
+ * Unit flag definitions for un_flags.
+ ************************************************************************/
+#define UN_ISOPEN 0x0001 /* Device is open */
+#define UN_CLOSING 0x0002 /* Line is being closed */
+#define UN_IMM 0x0004 /* Service immediately */
+#define UN_BUSY 0x0008 /* Some work this channel */
+#define UN_BREAKI 0x0010 /* Input break received */
+#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
+#define UN_TIME 0x0040 /* Waiting on time */
+#define UN_EMPTY 0x0080 /* Waiting output queue empty */
+#define UN_LOW 0x0100 /* Waiting output low water mark*/
+#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
+#define UN_WOPEN 0x0400 /* Device waiting for open */
+#define UN_WIOCTL 0x0800 /* Device waiting for open */
+#define UN_HANGUP 0x8000 /* Carrier lost */
+
+struct device;
+
+/************************************************************************
+ * Structure for terminal or printer unit.
+ ************************************************************************/
+struct un_t {
+ int magic; /* Unit Magic Number. */
+ struct channel_t *un_ch;
+ u32 un_time;
+ u32 un_type;
+ u32 un_open_count; /* Counter of opens to port */
+ struct tty_struct *un_tty;/* Pointer to unit tty structure */
+ u32 un_flags; /* Unit flags */
+ wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
+ u32 un_dev; /* Minor device number */
+ tcflag_t un_oflag; /* oflags being done on board */
+ tcflag_t un_lflag; /* lflags being done on board */
+ struct device *un_sysfs;
+};
+
+
+/************************************************************************
+ * Device flag definitions for ch_flags.
+ ************************************************************************/
+#define CH_PRON 0x0001 /* Printer on string */
+#define CH_OUT 0x0002 /* Dial-out device open */
+#define CH_STOP 0x0004 /* Output is stopped */
+#define CH_STOPI 0x0008 /* Input is stopped */
+#define CH_CD 0x0010 /* Carrier is present */
+#define CH_FCAR 0x0020 /* Carrier forced on */
+
+#define CH_RXBLOCK 0x0080 /* Enable rx blocked flag */
+#define CH_WLOW 0x0100 /* Term waiting low event */
+#define CH_WEMPTY 0x0200 /* Term waiting empty event */
+#define CH_RENABLE 0x0400 /* Buffer just emptied */
+#define CH_RACTIVE 0x0800 /* Process active in xxread() */
+#define CH_RWAIT 0x1000 /* Process waiting in xxread() */
+#define CH_BAUD0 0x2000 /* Used for checking B0 transitions */
+#define CH_HANGUP 0x8000 /* Hangup received */
+
+/*
+ * Definitions for ch_sniff_flags
+ */
+#define SNIFF_OPEN 0x1
+#define SNIFF_WAIT_DATA 0x2
+#define SNIFF_WAIT_SPACE 0x4
+
+
+/************************************************************************
+ *** Definitions for Digi ditty(1) command.
+ ************************************************************************/
+
+/************************************************************************
+ * This module provides application access to special Digi
+ * serial line enhancements which are not standard UNIX(tm) features.
+ ************************************************************************/
+
+#if !defined(TIOCMODG)
+
+#define TIOCMODG (('d'<<8) | 250) /* get modem ctrl state */
+#define TIOCMODS (('d'<<8) | 251) /* set modem ctrl state */
+
+#ifndef TIOCM_LE
+#define TIOCM_LE 0x01 /* line enable */
+#define TIOCM_DTR 0x02 /* data terminal ready */
+#define TIOCM_RTS 0x04 /* request to send */
+#define TIOCM_ST 0x08 /* secondary transmit */
+#define TIOCM_SR 0x10 /* secondary receive */
+#define TIOCM_CTS 0x20 /* clear to send */
+#define TIOCM_CAR 0x40 /* carrier detect */
+#define TIOCM_RNG 0x80 /* ring indicator */
+#define TIOCM_DSR 0x100 /* data set ready */
+#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
+#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
+#endif
+
+#endif
+
+#if !defined(TIOCMSET)
+#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */
+#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */
+#endif
+
+#if !defined(TIOCMBIC)
+#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */
+#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */
+#endif
+
+
+#if !defined(TIOCSDTR)
+#define TIOCSDTR (('e'<<8) | 0) /* set DTR */
+#define TIOCCDTR (('e'<<8) | 1) /* clear DTR */
+#endif
+
+/************************************************************************
+ * Ioctl command arguments for DIGI parameters.
+ ************************************************************************/
+#define DIGI_GETA (('e'<<8) | 94) /* Read params */
+
+#define DIGI_SETA (('e'<<8) | 95) /* Set params */
+#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */
+#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */
+
+#define DIGI_KME (('e'<<8) | 98) /* Read/Write Host */
+ /* Adapter Memory */
+
+#define DIGI_GETFLOW (('e'<<8) | 99) /* Get startc/stopc flow */
+ /* control characters */
+#define DIGI_SETFLOW (('e'<<8) | 100) /* Set startc/stopc flow */
+ /* control characters */
+#define DIGI_GETAFLOW (('e'<<8) | 101) /* Get Aux. startc/stopc */
+ /* flow control chars */
+#define DIGI_SETAFLOW (('e'<<8) | 102) /* Set Aux. startc/stopc */
+ /* flow control chars */
+
+#define DIGI_GEDELAY (('d'<<8) | 246) /* Get edelay */
+#define DIGI_SEDELAY (('d'<<8) | 247) /* Set edelay */
+
+struct digiflow_t {
+ unsigned char startc; /* flow cntl start char */
+ unsigned char stopc; /* flow cntl stop char */
+};
+
+
+#ifdef FLOW_2200
+#define F2200_GETA (('e'<<8) | 104) /* Get 2x36 flow cntl flags */
+#define F2200_SETAW (('e'<<8) | 105) /* Set 2x36 flow cntl flags */
+#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */
+#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */
+#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */
+#define F2200_XON 0xf8
+#define P2200_XON 0xf9
+#define F2200_XOFF 0xfa
+#define P2200_XOFF 0xfb
+
+#define FXOFF_MASK 0x03 /* 2200 flow status mask */
+#define RCVD_FXOFF 0x01 /* 2x36 Terminal XOFF rcvd */
+#define RCVD_PXOFF 0x02 /* 2x36 Printer XOFF rcvd */
+#endif
+
+/************************************************************************
+ * Values for digi_flags
+ ************************************************************************/
+#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
+#define DIGI_FAST 0x0002 /* Fast baud rates */
+#define RTSPACE 0x0004 /* RTS input flow control */
+#define CTSPACE 0x0008 /* CTS output flow control */
+#define DSRPACE 0x0010 /* DSR output flow control */
+#define DCDPACE 0x0020 /* DCD output flow control */
+#define DTRPACE 0x0040 /* DTR input flow control */
+#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
+#define DIGI_FORCEDCD 0x0100 /* Force carrier */
+#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
+#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
+#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
+#define DIGI_PP_INPUT 0x1000 /* Change parallel port to input*/
+#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
+#define DIGI_422 0x4000 /* for 422/232 selectable panel */
+#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
+
+/************************************************************************
+ * These options are not supported on the comxi.
+ ************************************************************************/
+#define DIGI_COMXI (DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE)
+
+#define DIGI_PLEN 28 /* String length */
+#define DIGI_TSIZ 10 /* Terminal string len */
+
+/************************************************************************
+ * Structure used with ioctl commands for DIGI parameters.
+ ************************************************************************/
+struct digi_t {
+ unsigned short digi_flags; /* Flags (see above) */
+ unsigned short digi_maxcps; /* Max printer CPS */
+ unsigned short digi_maxchar; /* Max chars in print queue */
+ unsigned short digi_bufsize; /* Buffer size */
+ unsigned char digi_onlen; /* Length of ON string */
+ unsigned char digi_offlen; /* Length of OFF string */
+ char digi_onstr[DIGI_PLEN]; /* Printer on string */
+ char digi_offstr[DIGI_PLEN]; /* Printer off string */
+ char digi_term[DIGI_TSIZ]; /* terminal string */
+};
+
+/************************************************************************
+ * KME definitions and structures.
+ ************************************************************************/
+#define RW_IDLE 0 /* Operation complete */
+#define RW_READ 1 /* Read Concentrator Memory */
+#define RW_WRITE 2 /* Write Concentrator Memory */
+
+struct rw_t {
+ unsigned char rw_req; /* Request type */
+ unsigned char rw_board; /* Host Adapter board number */
+ unsigned char rw_conc; /* Concentrator number */
+ unsigned char rw_reserved; /* Reserved for expansion */
+ unsigned long rw_addr; /* Address in concentrator */
+ unsigned short rw_size; /* Read/write request length */
+ unsigned char rw_data[128]; /* Data to read/write */
+};
+
+/***********************************************************************
+ * Shrink Buffer and Board Information definitions and structures.
+
+ ************************************************************************/
+ /* Board type return codes */
+#define PCXI_TYPE 1 /* Board type at the designated port is a PC/Xi */
+#define PCXM_TYPE 2 /* Board type at the designated port is a PC/Xm */
+#define PCXE_TYPE 3 /* Board type at the designated port is a PC/Xe */
+#define MCXI_TYPE 4 /* Board type at the designated port is a MC/Xi */
+#define COMXI_TYPE 5 /* Board type at the designated port is a COM/Xi */
+
+ /* Non-Zero Result codes. */
+#define RESULT_NOBDFND 1 /* A Digi product at that port is not config installed */
+#define RESULT_NODESCT 2 /* A memory descriptor was not obtainable */
+#define RESULT_NOOSSIG 3 /* FEP/OS signature was not detected on the board */
+#define RESULT_TOOSML 4 /* Too small an area to shrink. */
+#define RESULT_NOCHAN 5 /* Channel structure for the board was not found */
+
+struct shrink_buf_struct {
+ unsigned long shrink_buf_vaddr; /* Virtual address of board */
+ unsigned long shrink_buf_phys; /* Physical address of board */
+ unsigned long shrink_buf_bseg; /* Amount of board memory */
+ unsigned long shrink_buf_hseg; /* '186 Beginning of Dual-Port */
+
+ unsigned long shrink_buf_lseg; /* '186 Beginning of freed memory */
+ unsigned long shrink_buf_mseg; /* Linear address from start of
+ dual-port were freed memory
+ begins, host viewpoint. */
+
+ unsigned long shrink_buf_bdparam; /* Parameter for xxmemon and
+ xxmemoff */
+
+ unsigned long shrink_buf_reserva; /* Reserved */
+ unsigned long shrink_buf_reservb; /* Reserved */
+ unsigned long shrink_buf_reservc; /* Reserved */
+ unsigned long shrink_buf_reservd; /* Reserved */
+
+ unsigned char shrink_buf_result; /* Reason for call failing
+ Zero is Good return */
+ unsigned char shrink_buf_init; /* Non-Zero if it caused an
+ xxinit call. */
+
+ unsigned char shrink_buf_anports; /* Number of async ports */
+ unsigned char shrink_buf_snports; /* Number of sync ports */
+ unsigned char shrink_buf_type; /* Board type 1 = PC/Xi,
+ 2 = PC/Xm,
+ 3 = PC/Xe
+ 4 = MC/Xi
+ 5 = COMX/i */
+ unsigned char shrink_buf_card; /* Card number */
+
+};
+
+/************************************************************************
+ * Structure to get driver status information
+ ************************************************************************/
+struct digi_dinfo {
+ unsigned long dinfo_nboards; /* # boards configured */
+ char dinfo_reserved[12]; /* for future expansion */
+ char dinfo_version[16]; /* driver version */
+};
+
+#define DIGI_GETDD (('d'<<8) | 248) /* get driver info */
+
+/************************************************************************
+ * Structure used with ioctl commands for per-board information
+ *
+ * physsize and memsize differ when board has "windowed" memory
+ ************************************************************************/
+struct digi_info {
+ unsigned long info_bdnum; /* Board number (0 based) */
+ unsigned long info_ioport; /* io port address */
+ unsigned long info_physaddr; /* memory address */
+ unsigned long info_physsize; /* Size of host mem window */
+ unsigned long info_memsize; /* Amount of dual-port mem */
+ /* on board */
+ unsigned short info_bdtype; /* Board type */
+ unsigned short info_nports; /* number of ports */
+ char info_bdstate; /* board state */
+ char info_reserved[7]; /* for future expansion */
+};
+
+#define DIGI_GETBD (('d'<<8) | 249) /* get board info */
+
+struct digi_stat {
+ unsigned int info_chan; /* Channel number (0 based) */
+ unsigned int info_brd; /* Board number (0 based) */
+ unsigned long info_cflag; /* cflag for channel */
+ unsigned long info_iflag; /* iflag for channel */
+ unsigned long info_oflag; /* oflag for channel */
+ unsigned long info_mstat; /* mstat for channel */
+ unsigned long info_tx_data; /* tx_data for channel */
+ unsigned long info_rx_data; /* rx_data for channel */
+ unsigned long info_hflow; /* hflow for channel */
+ unsigned long info_reserved[8]; /* for future expansion */
+};
+
+#define DIGI_GETSTAT (('d'<<8) | 244) /* get board info */
+/************************************************************************
+ *
+ * Structure used with ioctl commands for per-channel information
+ *
+ ************************************************************************/
+struct digi_ch {
+ unsigned long info_bdnum; /* Board number (0 based) */
+ unsigned long info_channel; /* Channel index number */
+ unsigned long info_ch_cflag; /* Channel cflag */
+ unsigned long info_ch_iflag; /* Channel iflag */
+ unsigned long info_ch_oflag; /* Channel oflag */
+ unsigned long info_chsize; /* Channel structure size */
+ unsigned long info_sleep_stat; /* sleep status */
+ dev_t info_dev; /* device number */
+ unsigned char info_initstate; /* Channel init state */
+ unsigned char info_running; /* Channel running state */
+ long reserved[8]; /* reserved for future use */
+};
+
+/*
+* This structure is used with the DIGI_FEPCMD ioctl to
+* tell the driver which port to send the command for.
+*/
+struct digi_cmd {
+ int cmd;
+ int word;
+ int ncmds;
+ int chan; /* channel index (zero based) */
+ int bdid; /* board index (zero based) */
+};
+
+/*
+* info_sleep_stat defines
+*/
+#define INFO_RUNWAIT 0x0001
+#define INFO_WOPEN 0x0002
+#define INFO_TTIOW 0x0004
+#define INFO_CH_RWAIT 0x0008
+#define INFO_CH_WEMPTY 0x0010
+#define INFO_CH_WLOW 0x0020
+#define INFO_XXBUF_BUSY 0x0040
+
+#define DIGI_GETCH (('d'<<8) | 245) /* get board info */
+
+/* Board type definitions */
+
+#define SUBTYPE 0007
+#define T_PCXI 0000
+#define T_PCXM 0001
+#define T_PCXE 0002
+#define T_PCXR 0003
+#define T_SP 0004
+#define T_SP_PLUS 0005
+# define T_HERC 0000
+# define T_HOU 0001
+# define T_LON 0002
+# define T_CHA 0003
+#define FAMILY 0070
+#define T_COMXI 0000
+#define T_PCXX 0010
+#define T_CX 0020
+#define T_EPC 0030
+#define T_PCLITE 0040
+#define T_SPXX 0050
+#define T_AVXX 0060
+#define T_DXB 0070
+#define T_A2K_4_8 0070
+#define BUSTYPE 0700
+#define T_ISABUS 0000
+#define T_MCBUS 0100
+#define T_EISABUS 0200
+#define T_PCIBUS 0400
+
+/* Board State Definitions */
+
+#define BD_RUNNING 0x0
+#define BD_REASON 0x7f
+#define BD_NOTFOUND 0x1
+#define BD_NOIOPORT 0x2
+#define BD_NOMEM 0x3
+#define BD_NOBIOS 0x4
+#define BD_NOFEP 0x5
+#define BD_FAILED 0x6
+#define BD_ALLOCATED 0x7
+#define BD_TRIBOOT 0x8
+#define BD_BADKME 0x80
+
+#define DIGI_LOOPBACK (('d'<<8) | 252) /* Enable/disable UART internal loopback */
+#define DIGI_SPOLL (('d'<<8) | 254) /* change poller rate */
+
+#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
+#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
+#define DIGI_RESET_PORT (('e'<<8) | 93) /* Reset port */
+
+/************************************************************************
+ * Channel information structure.
+ ************************************************************************/
+struct channel_t {
+ int magic; /* Channel Magic Number */
+ struct bs_t *ch_bs; /* Base structure pointer */
+ struct cm_t *ch_cm; /* Command queue pointer */
+ struct board_t *ch_bd; /* Board structure pointer */
+ unsigned char *ch_vaddr; /* FEP memory origin */
+ unsigned char *ch_taddr; /* Write buffer origin */
+ unsigned char *ch_raddr; /* Read buffer origin */
+ struct digi_t ch_digi; /* Transparent Print structure */
+ struct un_t ch_tun; /* Terminal unit info */
+ struct un_t ch_pun; /* Printer unit info */
+
+ spinlock_t ch_lock; /* provide for serialization */
+ wait_queue_head_t ch_flags_wait;
+
+ u32 pscan_state;
+ uchar pscan_savechar;
+
+ u32 ch_portnum; /* Port number, 0 offset. */
+ u32 ch_open_count; /* open count */
+ u32 ch_flags; /* Channel flags */
+
+
+ u32 ch_close_delay; /* How long we should drop RTS/DTR for */
+
+ u32 ch_cpstime; /* Time for CPS calculations */
+
+ tcflag_t ch_c_iflag; /* channel iflags */
+ tcflag_t ch_c_cflag; /* channel cflags */
+ tcflag_t ch_c_oflag; /* channel oflags */
+ tcflag_t ch_c_lflag; /* channel lflags */
+
+ u16 ch_fepiflag; /* FEP tty iflags */
+ u16 ch_fepcflag; /* FEP tty cflags */
+ u16 ch_fepoflag; /* FEP tty oflags */
+ u16 ch_wopen; /* Waiting for open process cnt */
+ u16 ch_tstart; /* Transmit buffer start */
+ u16 ch_tsize; /* Transmit buffer size */
+ u16 ch_rstart; /* Receive buffer start */
+ u16 ch_rsize; /* Receive buffer size */
+ u16 ch_rdelay; /* Receive delay time */
+
+ u16 ch_tlw; /* Our currently set low water mark */
+
+ u16 ch_cook; /* Output character mask */
+
+ uchar ch_card; /* Card channel is on */
+ uchar ch_stopc; /* Stop character */
+ uchar ch_startc; /* Start character */
+
+ uchar ch_mostat; /* FEP output modem status */
+ uchar ch_mistat; /* FEP input modem status */
+ uchar ch_mforce; /* Modem values to be forced */
+ uchar ch_mval; /* Force values */
+ uchar ch_fepstopc; /* FEP stop character */
+ uchar ch_fepstartc; /* FEP start character */
+
+ uchar ch_astopc; /* Auxiliary Stop character */
+ uchar ch_astartc; /* Auxiliary Start character */
+ uchar ch_fepastopc; /* Auxiliary FEP stop char */
+ uchar ch_fepastartc; /* Auxiliary FEP start char */
+
+ uchar ch_hflow; /* FEP hardware handshake */
+ uchar ch_dsr; /* stores real dsr value */
+ uchar ch_cd; /* stores real cd value */
+ uchar ch_tx_win; /* channel tx buffer window */
+ uchar ch_rx_win; /* channel rx buffer window */
+ uint ch_custom_speed; /* Custom baud, if set */
+ uint ch_baud_info; /* Current baud info for /proc output */
+ ulong ch_rxcount; /* total of data received so far */
+ ulong ch_txcount; /* total of data transmitted so far */
+ ulong ch_err_parity; /* Count of parity errors on channel */
+ ulong ch_err_frame; /* Count of framing errors on channel */
+ ulong ch_err_break; /* Count of breaks on channel */
+ ulong ch_err_overrun; /* Count of overruns on channel */
+
+ uint ch_sniff_in;
+ uint ch_sniff_out;
+ char *ch_sniff_buf; /* Sniff buffer for proc */
+ ulong ch_sniff_flags; /* Channel flags */
+ wait_queue_head_t ch_sniff_wait;
+};
+
+/************************************************************************
+ * Command structure definition.
+ ************************************************************************/
+struct cm_t {
+ volatile unsigned short cm_head; /* Command buffer head offset */
+ volatile unsigned short cm_tail; /* Command buffer tail offset */
+ volatile unsigned short cm_start; /* start offset of buffer */
+ volatile unsigned short cm_max; /* last offset of buffer */
+};
+
+/************************************************************************
+ * Event structure definition.
+ ************************************************************************/
+struct ev_t {
+ volatile unsigned short ev_head; /* Command buffer head offset */
+ volatile unsigned short ev_tail; /* Command buffer tail offset */
+ volatile unsigned short ev_start; /* start offset of buffer */
+ volatile unsigned short ev_max; /* last offset of buffer */
+};
+
+/************************************************************************
+ * Download buffer structure.
+ ************************************************************************/
+struct downld_t {
+ uchar dl_type; /* Header */
+ uchar dl_seq; /* Download sequence */
+ ushort dl_srev; /* Software revision number */
+ ushort dl_lrev; /* Low revision number */
+ ushort dl_hrev; /* High revision number */
+ ushort dl_seg; /* Start segment address */
+ ushort dl_size; /* Number of bytes to download */
+ uchar dl_data[1024]; /* Download data */
+};
+
+/************************************************************************
+ * Per channel buffer structure
+ ************************************************************************
+ * Base Structure Entries Usage Meanings to Host *
+ * *
+ * W = read write R = read only *
+ * C = changed by commands only *
+ * U = unknown (may be changed w/o notice) *
+ ************************************************************************/
+struct bs_t {
+ volatile unsigned short tp_jmp; /* Transmit poll jump */
+ volatile unsigned short tc_jmp; /* Cooked procedure jump */
+ volatile unsigned short ri_jmp; /* Not currently used */
+ volatile unsigned short rp_jmp; /* Receive poll jump */
+
+ volatile unsigned short tx_seg; /* W Tx segment */
+ volatile unsigned short tx_head; /* W Tx buffer head offset */
+ volatile unsigned short tx_tail; /* R Tx buffer tail offset */
+ volatile unsigned short tx_max; /* W Tx buffer size - 1 */
+
+ volatile unsigned short rx_seg; /* W Rx segment */
+ volatile unsigned short rx_head; /* W Rx buffer head offset */
+ volatile unsigned short rx_tail; /* R Rx buffer tail offset */
+ volatile unsigned short rx_max; /* W Rx buffer size - 1 */
+
+ volatile unsigned short tx_lw; /* W Tx buffer low water mark */
+ volatile unsigned short rx_lw; /* W Rx buffer low water mark */
+ volatile unsigned short rx_hw; /* W Rx buffer high water mark */
+ volatile unsigned short incr; /* W Increment to next channel */
+
+ volatile unsigned short fepdev; /* U SCC device base address */
+ volatile unsigned short edelay; /* W Exception delay */
+ volatile unsigned short blen; /* W Break length */
+ volatile unsigned short btime; /* U Break complete time */
+
+ volatile unsigned short iflag; /* C UNIX input flags */
+ volatile unsigned short oflag; /* C UNIX output flags */
+ volatile unsigned short cflag; /* C UNIX control flags */
+ volatile unsigned short wfill[13]; /* U Reserved for expansion */
+
+ volatile unsigned char num; /* U Channel number */
+ volatile unsigned char ract; /* U Receiver active counter */
+ volatile unsigned char bstat; /* U Break status bits */
+ volatile unsigned char tbusy; /* W Transmit busy */
+ volatile unsigned char iempty; /* W Transmit empty event enable */
+ volatile unsigned char ilow; /* W Transmit low-water event enable */
+ volatile unsigned char idata; /* W Receive data interrupt enable */
+ volatile unsigned char eflag; /* U Host event flags */
+
+ volatile unsigned char tflag; /* U Transmit flags */
+ volatile unsigned char rflag; /* U Receive flags */
+ volatile unsigned char xmask; /* U Transmit ready flags */
+ volatile unsigned char xval; /* U Transmit ready value */
+ volatile unsigned char m_stat; /* RC Modem status bits */
+ volatile unsigned char m_change; /* U Modem bits which changed */
+ volatile unsigned char m_int; /* W Modem interrupt enable bits */
+ volatile unsigned char m_last; /* U Last modem status */
+
+ volatile unsigned char mtran; /* C Unreported modem trans */
+ volatile unsigned char orun; /* C Buffer overrun occurred */
+ volatile unsigned char astartc; /* W Auxiliary Xon char */
+ volatile unsigned char astopc; /* W Auxiliary Xoff char */
+ volatile unsigned char startc; /* W Xon character */
+ volatile unsigned char stopc; /* W Xoff character */
+ volatile unsigned char vnextc; /* W Vnext character */
+ volatile unsigned char hflow; /* C Software flow control */
+
+ volatile unsigned char fillc; /* U Delay Fill character */
+ volatile unsigned char ochar; /* U Saved output character */
+ volatile unsigned char omask; /* U Output character mask */
+
+ volatile unsigned char bfill[13]; /* U Reserved for expansion */
+
+ volatile unsigned char scc[16]; /* U SCC registers */
+};
+
+struct cnode {
+ struct cnode *next;
+ int type;
+ int numbrd;
+
+ union {
+ struct {
+ char type; /* Board Type */
+ long port; /* I/O Address */
+ char *portstr; /* I/O Address in string */
+ long addr; /* Memory Address */
+ char *addrstr; /* Memory Address in string */
+ long pcibus; /* PCI BUS */
+ char *pcibusstr; /* PCI BUS in string */
+ long pcislot; /* PCI SLOT */
+ char *pcislotstr; /* PCI SLOT in string */
+ long nport; /* Number of Ports */
+ char *id; /* tty id */
+ long start; /* start of tty counting */
+ char *method; /* Install method */
+ char v_type;
+ char v_port;
+ char v_addr;
+ char v_pcibus;
+ char v_pcislot;
+ char v_nport;
+ char v_id;
+ char v_start;
+ char v_method;
+ char line1;
+ char line2;
+ char conc1; /* total concs in line1 */
+ char conc2; /* total concs in line2 */
+ char module1; /* total modules for line1 */
+ char module2; /* total modules for line2 */
+ char *status; /* config status */
+ char *dimstatus; /* Y/N */
+ int status_index; /* field pointer */
+ } board;
+
+ struct {
+ char *cable;
+ char v_cable;
+ long speed;
+ char v_speed;
+ } line;
+
+ struct {
+ char type;
+ char *connect;
+ long speed;
+ long nport;
+ char *id;
+ char *idstr;
+ long start;
+ char v_type;
+ char v_connect;
+ char v_speed;
+ char v_nport;
+ char v_id;
+ char v_start;
+ } conc;
+
+ struct {
+ char type;
+ long nport;
+ char *id;
+ char *idstr;
+ long start;
+ char v_type;
+ char v_nport;
+ char v_id;
+ char v_start;
+ } module;
+
+ char *ttyname;
+
+ char *cuname;
+
+ char *printname;
+
+ long majornumber;
+
+ long altpin;
+
+ long ttysize;
+
+ long chsize;
+
+ long bssize;
+
+ long unsize;
+
+ long f2size;
+
+ long vpixsize;
+
+ long useintr;
+ } u;
+};
+
+#endif
diff --git a/drivers/staging/dgap/dgap_conf.h b/drivers/staging/dgap/dgap_conf.h
deleted file mode 100644
index 484ed726a4d6..000000000000
--- a/drivers/staging/dgap/dgap_conf.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *****************************************************************************
- *
- * dgap_conf.h - Header file for installations and parse files.
- *
- * $Id: dgap_conf.h,v 1.1 2009/10/23 14:01:57 markh Exp $
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef _DGAP_CONF_H
-#define _DGAP_CONF_H
-
-#define NULLNODE 0 /* header node, not used */
-#define BNODE 1 /* Board node */
-#define LNODE 2 /* Line node */
-#define CNODE 3 /* Concentrator node */
-#define MNODE 4 /* EBI Module node */
-#define TNODE 5 /* tty name prefix node */
-#define CUNODE 6 /* cu name prefix (non-SCO) */
-#define PNODE 7 /* trans. print prefix node */
-#define JNODE 8 /* maJor number node */
-#define ANODE 9 /* altpin */
-#define TSNODE 10 /* tty structure size */
-#define CSNODE 11 /* channel structure size */
-#define BSNODE 12 /* board structure size */
-#define USNODE 13 /* unit schedule structure size */
-#define FSNODE 14 /* f2200 structure size */
-#define VSNODE 15 /* size of VPIX structures */
-#define INTRNODE 16 /* enable interrupt */
-
-/* Enumeration of tokens */
-#define BEGIN 1
-#define END 2
-#define BOARD 10
-
-#define EPCFS 11 /* start of EPC family definitions */
-#define ICX 11
-#define MCX 13
-#define PCX 14
-#define IEPC 15
-#define EEPC 16
-#define MEPC 17
-#define IPCM 18
-#define EPCM 19
-#define MPCM 20
-#define PEPC 21
-#define PPCM 22
-#ifdef CP
-#define ICP 23
-#define ECP 24
-#define MCP 25
-#endif
-#define EPCFE 25 /* end of EPC family definitions */
-#define PC2E 26
-#define PC4E 27
-#define PC4E8K 28
-#define PC8E 29
-#define PC8E8K 30
-#define PC16E 31
-#define MC2E8K 34
-#define MC4E8K 35
-#define MC8E8K 36
-
-#define AVANFS 42 /* start of Avanstar family definitions */
-#define A8P 42
-#define A16P 43
-#define AVANFE 43 /* end of Avanstar family definitions */
-
-#define DA2000FS 44 /* start of AccelePort 2000 family definitions */
-#define DA22 44 /* AccelePort 2002 */
-#define DA24 45 /* AccelePort 2004 */
-#define DA28 46 /* AccelePort 2008 */
-#define DA216 47 /* AccelePort 2016 */
-#define DAR4 48 /* AccelePort RAS 4 port */
-#define DAR8 49 /* AccelePort RAS 8 port */
-#define DDR24 50 /* DataFire RAS 24 port */
-#define DDR30 51 /* DataFire RAS 30 port */
-#define DDR48 52 /* DataFire RAS 48 port */
-#define DDR60 53 /* DataFire RAS 60 port */
-#define DA2000FE 53 /* end of AccelePort 2000/RAS family definitions */
-
-#define PCXRFS 106 /* start of PCXR family definitions */
-#define APORT4 106
-#define APORT8 107
-#define PAPORT4 108
-#define PAPORT8 109
-#define APORT4_920I 110
-#define APORT8_920I 111
-#define APORT4_920P 112
-#define APORT8_920P 113
-#define APORT2_920P 114
-#define PCXRFE 117 /* end of PCXR family definitions */
-
-#define LINE 82
-#ifdef T1
-#define T1M 83
-#define E1M 84
-#endif
-#define CONC 64
-#define CX 65
-#define EPC 66
-#define MOD 67
-#define PORTS 68
-#define METHOD 69
-#define CUSTOM 70
-#define BASIC 71
-#define STATUS 72
-#define MODEM 73
-/* The following tokens can appear in multiple places */
-#define SPEED 74
-#define NPORTS 75
-#define ID 76
-#define CABLE 77
-#define CONNECT 78
-#define IO 79
-#define MEM 80
-#define DPSZ 81
-
-#define TTYN 90
-#define CU 91
-#define PRINT 92
-#define XPRINT 93
-#define CMAJOR 94
-#define ALTPIN 95
-#define STARTO 96
-#define USEINTR 97
-#define PCIINFO 98
-
-#define TTSIZ 100
-#define CHSIZ 101
-#define BSSIZ 102
-#define UNTSIZ 103
-#define F2SIZ 104
-#define VPSIZ 105
-
-#define TOTAL_BOARD 2
-#define CURRENT_BRD 4
-#define BOARD_TYPE 6
-#define IO_ADDRESS 8
-#define MEM_ADDRESS 10
-
-#define FIELDS_PER_PAGE 18
-
-#define TB_FIELD 1
-#define CB_FIELD 3
-#define BT_FIELD 5
-#define IO_FIELD 7
-#define ID_FIELD 8
-#define ME_FIELD 9
-#define TTY_FIELD 11
-#define CU_FIELD 13
-#define PR_FIELD 15
-#define MPR_FIELD 17
-
-#define MAX_FIELD 512
-
-#define INIT 0
-#define NITEMS 128
-#define MAX_ITEM 512
-
-#define DSCRINST 1
-#define DSCRNUM 3
-#define ALTPINQ 5
-#define SSAVE 7
-
-#define DSCR "32"
-#define ONETONINE "123456789"
-#define ALL "1234567890"
-
-
-struct cnode {
- struct cnode *next;
- int type;
- int numbrd;
-
- union {
- struct {
- char type; /* Board Type */
- short port; /* I/O Address */
- char *portstr; /* I/O Address in string */
- long addr; /* Memory Address */
- char *addrstr; /* Memory Address in string */
- long pcibus; /* PCI BUS */
- char *pcibusstr; /* PCI BUS in string */
- long pcislot; /* PCI SLOT */
- char *pcislotstr; /* PCI SLOT in string */
- char nport; /* Number of Ports */
- char *id; /* tty id */
- int start; /* start of tty counting */
- char *method; /* Install method */
- char v_type;
- char v_port;
- char v_addr;
- char v_pcibus;
- char v_pcislot;
- char v_nport;
- char v_id;
- char v_start;
- char v_method;
- char line1;
- char line2;
- char conc1; /* total concs in line1 */
- char conc2; /* total concs in line2 */
- char module1; /* total modules for line1 */
- char module2; /* total modules for line2 */
- char *status; /* config status */
- char *dimstatus; /* Y/N */
- int status_index; /* field pointer */
- } board;
-
- struct {
- char *cable;
- char v_cable;
- char speed;
- char v_speed;
- } line;
-
- struct {
- char type;
- char *connect;
- char speed;
- char nport;
- char *id;
- char *idstr;
- int start;
- char v_type;
- char v_connect;
- char v_speed;
- char v_nport;
- char v_id;
- char v_start;
- } conc;
-
- struct {
- char type;
- char nport;
- char *id;
- char *idstr;
- int start;
- char v_type;
- char v_nport;
- char v_id;
- char v_start;
- } module;
-
- char *ttyname;
-
- char *cuname;
-
- char *printname;
-
- int majornumber;
-
- int altpin;
-
- int ttysize;
-
- int chsize;
-
- int bssize;
-
- int unsize;
-
- int f2size;
-
- int vpixsize;
-
- int useintr;
- } u;
-};
-
-#endif
diff --git a/drivers/staging/dgap/dgap_downld.h b/drivers/staging/dgap/dgap_downld.h
deleted file mode 100644
index 271ac19257f9..000000000000
--- a/drivers/staging/dgap/dgap_downld.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * $Id: dgap_downld.h,v 1.1 2009/10/23 14:01:57 markh Exp $
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- */
-
-/*
-** downld.h
-** - describes the interface between the user level download process
-** and the concentrator download driver.
-*/
-
-#ifndef _DGAP_DOWNLD_H_
-#define _DGAP_DOWNLD_H_
-
-
-struct fepimg {
- int type; /* board type */
- int len; /* length of image */
- char fepimage[1]; /* beginning of image */
-};
-
-struct downldio {
- unsigned int req_type; /* FEP or concentrator */
- unsigned int bdid; /* opaque board identifier */
- union {
- struct downld_t dl; /* download structure */
- struct fepimg fi; /* fep/bios image structure */
- } image;
-};
-
-#define DIGI_DLREQ_GET (('d'<<8) | 220)
-#define DIGI_DLREQ_SET (('d'<<8) | 221)
-
-#define DIGI_DL_NUKE (('d'<<8) | 222) /* Not really a dl request, but
- dangerous enuff to not put in
- digi.h */
-/* Packed bits of intarg for DIGI_DL_NUKE */
-#define DIGI_NUKE_RESET_ALL (1 << 31)
-#define DIGI_NUKE_INHIBIT_POLLER (1 << 30)
-#define DIGI_NUKE_BRD_NUMB 0x0f
-
-
-
-#define DLREQ_BIOS 0
-#define DLREQ_FEP 1
-#define DLREQ_CONC 2
-#define DLREQ_CONFIG 3
-#define DLREQ_DEVCREATE 4
-
-#endif
diff --git a/drivers/staging/dgap/dgap_driver.c b/drivers/staging/dgap/dgap_driver.c
deleted file mode 100644
index 089d017fc291..000000000000
--- a/drivers/staging/dgap/dgap_driver.c
+++ /dev/null
@@ -1,1030 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- *
- * $Id: dgap_driver.c,v 1.3 2011/06/21 10:35:16 markh Exp $
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/delay.h> /* For udelay */
-#include <linux/slab.h>
-#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
-#include <linux/sched.h>
-
-#include "dgap_driver.h"
-#include "dgap_pci.h"
-#include "dgap_fep5.h"
-#include "dgap_tty.h"
-#include "dgap_conf.h"
-#include "dgap_parse.h"
-#include "dgap_trace.h"
-#include "dgap_sysfs.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Digi International, http://www.digi.com");
-MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
-MODULE_SUPPORTED_DEVICE("dgap");
-
-/*
- * insmod command line overrideable parameters
- *
- * NOTE: we use a set of macros to create the variables, which allows
- * us to specify the variable type, name, initial value, and description.
- */
-PARM_INT(debug, 0x00, 0644, "Driver debugging level");
-PARM_INT(rawreadok, 1, 0644, "Bypass flip buffers on input");
-PARM_INT(trcbuf_size, 0x100000, 0644, "Debugging trace buffer size.");
-
-
-/**************************************************************************
- *
- * protos for this file
- *
- */
-
-static int dgap_start(void);
-static void dgap_init_globals(void);
-static int dgap_found_board(struct pci_dev *pdev, int id);
-static void dgap_cleanup_board(struct board_t *brd);
-static void dgap_poll_handler(ulong dummy);
-static int dgap_init_pci(void);
-static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void dgap_remove_one(struct pci_dev *dev);
-static int dgap_probe1(struct pci_dev *pdev, int card_type);
-static void dgap_mbuf(struct board_t *brd, const char *fmt, ...);
-static int dgap_do_remap(struct board_t *brd);
-static irqreturn_t dgap_intr(int irq, void *voidbrd);
-
-/* Driver load/unload functions */
-int dgap_init_module(void);
-void dgap_cleanup_module(void);
-
-module_init(dgap_init_module);
-module_exit(dgap_cleanup_module);
-
-
-/*
- * File operations permitted on Control/Management major.
- */
-static struct file_operations DgapBoardFops =
-{
- .owner = THIS_MODULE,
-};
-
-
-/*
- * Globals
- */
-uint dgap_NumBoards;
-struct board_t *dgap_Board[MAXBOARDS];
-DEFINE_SPINLOCK(dgap_global_lock);
-ulong dgap_poll_counter;
-char *dgap_config_buf;
-int dgap_driver_state = DRIVER_INITIALIZED;
-DEFINE_SPINLOCK(dgap_dl_lock);
-wait_queue_head_t dgap_dl_wait;
-int dgap_dl_action;
-int dgap_poll_tick = 20; /* Poll interval - 20 ms */
-
-/*
- * Static vars.
- */
-static int dgap_Major_Control_Registered = FALSE;
-static uint dgap_driver_start = FALSE;
-
-static struct class * dgap_class;
-
-/*
- * Poller stuff
- */
-static DEFINE_SPINLOCK(dgap_poll_lock); /* Poll scheduling lock */
-static ulong dgap_poll_time; /* Time of next poll */
-static uint dgap_poll_stop; /* Used to tell poller to stop */
-static struct timer_list dgap_poll_timer;
-
-
-static struct pci_device_id dgap_pci_tbl[] = {
- { DIGI_VID, PCI_DEVICE_XEM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { DIGI_VID, PCI_DEVICE_CX_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
- { DIGI_VID, PCI_DEVICE_CX_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { DIGI_VID, PCI_DEVICE_EPCJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
- { DIGI_VID, PCI_DEVICE_920_2_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
- { DIGI_VID, PCI_DEVICE_920_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
- { DIGI_VID, PCI_DEVICE_920_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
- { DIGI_VID, PCI_DEVICE_XR_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
- { DIGI_VID, PCI_DEVICE_XRJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
- { DIGI_VID, PCI_DEVICE_XR_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
- { DIGI_VID, PCI_DEVICE_XR_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
- { DIGI_VID, PCI_DEVICE_XR_SAIP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
- { DIGI_VID, PCI_DEVICE_XR_BULL_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
- { DIGI_VID, PCI_DEVICE_920_8_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
- { DIGI_VID, PCI_DEVICE_XEM_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
- {0,} /* 0 terminated list. */
-};
-MODULE_DEVICE_TABLE(pci, dgap_pci_tbl);
-
-
-/*
- * A generic list of Product names, PCI Vendor ID, and PCI Device ID.
- */
-struct board_id {
- uint config_type;
- uchar *name;
- uint maxports;
- uint dpatype;
-};
-
-static struct board_id dgap_Ids[] =
-{
- { PPCM, PCI_DEVICE_XEM_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS) },
- { PCX, PCI_DEVICE_CX_NAME, 128, (T_CX | T_PCIBUS) },
- { PCX, PCI_DEVICE_CX_IBM_NAME, 128, (T_CX | T_PCIBUS) },
- { PEPC, PCI_DEVICE_EPCJ_NAME, 224, (T_EPC | T_PCIBUS) },
- { APORT2_920P, PCI_DEVICE_920_2_NAME, 2, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { APORT4_920P, PCI_DEVICE_920_4_NAME, 4, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { APORT8_920P, PCI_DEVICE_920_8_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XR_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XRJ_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XR_422_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XR_IBM_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XR_SAIP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PAPORT8, PCI_DEVICE_XR_BULL_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { APORT8_920P, PCI_DEVICE_920_8_HP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
- { PPCM, PCI_DEVICE_XEM_HP_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS) },
- {0,} /* 0 terminated list. */
-};
-
-static struct pci_driver dgap_driver = {
- .name = "dgap",
- .probe = dgap_init_one,
- .id_table = dgap_pci_tbl,
- .remove = dgap_remove_one,
-};
-
-
-char *dgap_state_text[] = {
- "Board Failed",
- "Configuration for board not found.\n\t\t\tRun mpi to configure board.",
- "Board Found",
- "Need Reset",
- "Finished Reset",
- "Need Config",
- "Finished Config",
- "Need Device Creation",
- "Requested Device Creation",
- "Finished Device Creation",
- "Need BIOS Load",
- "Requested BIOS",
- "Doing BIOS Load",
- "Finished BIOS Load",
- "Need FEP Load",
- "Requested FEP",
- "Doing FEP Load",
- "Finished FEP Load",
- "Requested PROC creation",
- "Finished PROC creation",
- "Board READY",
-};
-
-char *dgap_driver_state_text[] = {
- "Driver Initialized",
- "Driver needs configuration load.",
- "Driver requested configuration from download daemon.",
- "Driver Ready."
-};
-
-
-
-/************************************************************************
- *
- * Driver load/unload functions
- *
- ************************************************************************/
-
-/*
- * init_module()
- *
- * Module load. This is where it all starts.
- */
-int dgap_init_module(void)
-{
- int rc = 0;
-
- APR(("%s, Digi International Part Number %s\n", DG_NAME, DG_PART));
-
- /*
- * Initialize global stuff
- */
- rc = dgap_start();
-
- if (rc < 0) {
- return(rc);
- }
-
- /*
- * Find and configure all the cards
- */
- rc = dgap_init_pci();
-
- /*
- * If something went wrong in the scan, bail out of driver.
- */
- if (rc < 0) {
- /* Only unregister the pci driver if it was actually registered. */
- if (dgap_NumBoards)
- pci_unregister_driver(&dgap_driver);
- else
- printk("WARNING: dgap driver load failed. No DGAP boards found.\n");
-
- dgap_cleanup_module();
- }
- else {
- dgap_create_driver_sysfiles(&dgap_driver);
- }
-
- DPR_INIT(("Finished init_module. Returning %d\n", rc));
- return (rc);
-}
-
-
-/*
- * Start of driver.
- */
-static int dgap_start(void)
-{
- int rc = 0;
- unsigned long flags;
-
- if (dgap_driver_start == FALSE) {
-
- dgap_driver_start = TRUE;
-
- /* make sure that the globals are init'd before we do anything else */
- dgap_init_globals();
-
- dgap_NumBoards = 0;
-
- APR(("For the tools package or updated drivers please visit http://www.digi.com\n"));
-
- /*
- * Register our base character device into the kernel.
- * This allows the download daemon to connect to the downld device
- * before any of the boards are init'ed.
- */
- if (!dgap_Major_Control_Registered) {
- /*
- * Register management/dpa devices
- */
- rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &DgapBoardFops);
- if (rc < 0) {
- APR(("Can't register dgap driver device (%d)\n", rc));
- return (rc);
- }
-
- dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
- device_create(dgap_class, NULL,
- MKDEV(DIGI_DGAP_MAJOR, 0),
- NULL, "dgap_mgmt");
- device_create(dgap_class, NULL,
- MKDEV(DIGI_DGAP_MAJOR, 1),
- NULL, "dgap_downld");
- dgap_Major_Control_Registered = TRUE;
- }
-
- /*
- * Init any global tty stuff.
- */
- rc = dgap_tty_preinit();
-
- if (rc < 0) {
- APR(("tty preinit - not enough memory (%d)\n", rc));
- return(rc);
- }
-
- /* Start the poller */
- DGAP_LOCK(dgap_poll_lock, flags);
- init_timer(&dgap_poll_timer);
- dgap_poll_timer.function = dgap_poll_handler;
- dgap_poll_timer.data = 0;
- dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- dgap_poll_timer.expires = dgap_poll_time;
- DGAP_UNLOCK(dgap_poll_lock, flags);
-
- add_timer(&dgap_poll_timer);
-
- dgap_driver_state = DRIVER_NEED_CONFIG_LOAD;
- }
-
- return (rc);
-}
-
-
-/*
- * Register pci driver, and return how many boards we have.
- */
-static int dgap_init_pci(void)
-{
- return pci_register_driver(&dgap_driver);
-}
-
-
-/* returns count (>= 0), or negative on error */
-static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int rc;
-
- /* wake up and enable device */
- rc = pci_enable_device(pdev);
-
- if (rc < 0) {
- rc = -EIO;
- } else {
- rc = dgap_probe1(pdev, ent->driver_data);
- if (rc == 0) {
- dgap_NumBoards++;
- DPR_INIT(("Incrementing numboards to %d\n", dgap_NumBoards));
- }
- }
- return rc;
-}
-
-
-static int dgap_probe1(struct pci_dev *pdev, int card_type)
-{
- return dgap_found_board(pdev, card_type);
-}
-
-
-static void dgap_remove_one(struct pci_dev *dev)
-{
- /* Do Nothing */
-}
-
-
-/*
- * dgap_cleanup_module()
- *
- * Module unload. This is where it all ends.
- */
-void dgap_cleanup_module(void)
-{
- int i;
- ulong lock_flags;
-
- DGAP_LOCK(dgap_poll_lock, lock_flags);
- dgap_poll_stop = 1;
- DGAP_UNLOCK(dgap_poll_lock, lock_flags);
-
- /* Turn off poller right away. */
- del_timer_sync( &dgap_poll_timer);
-
- dgap_remove_driver_sysfiles(&dgap_driver);
-
-
- if (dgap_Major_Control_Registered) {
- device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
- device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 1));
- class_destroy(dgap_class);
- unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
- }
-
- kfree(dgap_config_buf);
-
- for (i = 0; i < dgap_NumBoards; ++i) {
- dgap_remove_ports_sysfiles(dgap_Board[i]);
- dgap_tty_uninit(dgap_Board[i]);
- dgap_cleanup_board(dgap_Board[i]);
- }
-
- dgap_tty_post_uninit();
-
-#if defined(DGAP_TRACER)
- /* last thing, make sure we release the tracebuffer */
- dgap_tracer_free();
-#endif
- if (dgap_NumBoards)
- pci_unregister_driver(&dgap_driver);
-}
-
-
-/*
- * dgap_cleanup_board()
- *
- * Free all the memory associated with a board
- */
-static void dgap_cleanup_board(struct board_t *brd)
-{
- int i = 0;
-
- if(!brd || brd->magic != DGAP_BOARD_MAGIC)
- return;
-
- if (brd->intr_used && brd->irq)
- free_irq(brd->irq, brd);
-
- tasklet_kill(&brd->helper_tasklet);
-
- if (brd->re_map_port) {
- release_mem_region(brd->membase + 0x200000, 0x200000);
- iounmap(brd->re_map_port);
- brd->re_map_port = NULL;
- }
-
- if (brd->re_map_membase) {
- release_mem_region(brd->membase, 0x200000);
- iounmap(brd->re_map_membase);
- brd->re_map_membase = NULL;
- }
-
- if (brd->msgbuf_head) {
- unsigned long flags;
-
- DGAP_LOCK(dgap_global_lock, flags);
- brd->msgbuf = NULL;
- printk("%s", brd->msgbuf_head);
- kfree(brd->msgbuf_head);
- brd->msgbuf_head = NULL;
- DGAP_UNLOCK(dgap_global_lock, flags);
- }
-
- /* Free all allocated channels structs */
- for (i = 0; i < MAXPORTS ; i++) {
- if (brd->channels[i]) {
- kfree(brd->channels[i]);
- brd->channels[i] = NULL;
- }
- }
-
- kfree(brd->flipbuf);
- kfree(brd->flipflagbuf);
-
- dgap_Board[brd->boardnum] = NULL;
-
- kfree(brd);
-}
-
-
-/*
- * dgap_found_board()
- *
- * A board has been found, init it.
- */
-static int dgap_found_board(struct pci_dev *pdev, int id)
-{
- struct board_t *brd;
- unsigned int pci_irq;
- int i = 0;
- unsigned long flags;
-
- /* get the board structure and prep it */
- brd = dgap_Board[dgap_NumBoards] =
- (struct board_t *) kzalloc(sizeof(struct board_t), GFP_KERNEL);
- if (!brd) {
- APR(("memory allocation for board structure failed\n"));
- return(-ENOMEM);
- }
-
- /* make a temporary message buffer for the boot messages */
- brd->msgbuf = brd->msgbuf_head =
- (char *) kzalloc(sizeof(char) * 8192, GFP_KERNEL);
- if(!brd->msgbuf) {
- kfree(brd);
- APR(("memory allocation for board msgbuf failed\n"));
- return(-ENOMEM);
- }
-
- /* store the info for the board we've found */
- brd->magic = DGAP_BOARD_MAGIC;
- brd->boardnum = dgap_NumBoards;
- brd->firstminor = 0;
- brd->vendor = dgap_pci_tbl[id].vendor;
- brd->device = dgap_pci_tbl[id].device;
- brd->pdev = pdev;
- brd->pci_bus = pdev->bus->number;
- brd->pci_slot = PCI_SLOT(pdev->devfn);
- brd->name = dgap_Ids[id].name;
- brd->maxports = dgap_Ids[id].maxports;
- brd->type = dgap_Ids[id].config_type;
- brd->dpatype = dgap_Ids[id].dpatype;
- brd->dpastatus = BD_NOFEP;
- init_waitqueue_head(&brd->state_wait);
-
- DGAP_SPINLOCK_INIT(brd->bd_lock);
-
- brd->state = BOARD_FOUND;
- brd->runwait = 0;
- brd->inhibit_poller = FALSE;
- brd->wait_for_bios = 0;
- brd->wait_for_fep = 0;
-
- for (i = 0; i < MAXPORTS; i++) {
- brd->channels[i] = NULL;
- }
-
- /* store which card & revision we have */
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
-
- pci_irq = pdev->irq;
- brd->irq = pci_irq;
-
- /* get the PCI Base Address Registers */
-
- /* Xr Jupiter and EPC use BAR 2 */
- if (brd->device == PCI_DEVICE_XRJ_DID || brd->device == PCI_DEVICE_EPCJ_DID) {
- brd->membase = pci_resource_start(pdev, 2);
- brd->membase_end = pci_resource_end(pdev, 2);
- }
- /* Everyone else uses BAR 0 */
- else {
- brd->membase = pci_resource_start(pdev, 0);
- brd->membase_end = pci_resource_end(pdev, 0);
- }
-
- if (!brd->membase) {
- APR(("card has no PCI IO resources, failing board.\n"));
- return -ENODEV;
- }
-
- if (brd->membase & 1)
- brd->membase &= ~3;
- else
- brd->membase &= ~15;
-
- /*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space. The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
- brd->port = brd->membase + PCI_IO_OFFSET;
- brd->port_end = brd->port + PCI_IO_SIZE;
-
-
- /*
- * Special initialization for non-PLX boards
- */
- if (brd->device != PCI_DEVICE_XRJ_DID && brd->device != PCI_DEVICE_EPCJ_DID) {
- unsigned short cmd;
-
- pci_write_config_byte(pdev, 0x40, 0);
- pci_write_config_byte(pdev, 0x46, 0);
-
- /* Limit burst length to 2 doubleword transactions */
- pci_write_config_byte(pdev, 0x42, 1);
-
- /*
- * Enable IO and mem if not already done.
- * This was needed for support on Itanium.
- */
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
- }
-
- /* init our poll helper tasklet */
- tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet, (unsigned long) brd);
-
- /* Log the information about the board */
- dgap_mbuf(brd, DRVSTR": board %d: %s (rev %d), irq %d\n",
- dgap_NumBoards, brd->name, brd->rev, brd->irq);
-
- DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i));
- DGAP_LOCK(dgap_global_lock, flags);
- brd->msgbuf = NULL;
- printk("%s", brd->msgbuf_head);
- kfree(brd->msgbuf_head);
- brd->msgbuf_head = NULL;
- DGAP_UNLOCK(dgap_global_lock, flags);
-
- i = dgap_do_remap(brd);
- if (i)
- brd->state = BOARD_FAILED;
- else
- brd->state = NEED_RESET;
-
- return(0);
-}
-
-
-int dgap_finalize_board_init(struct board_t *brd) {
-
- int rc;
-
- DPR_INIT(("dgap_finalize_board_init() - start\n"));
-
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return(-ENODEV);
-
- DPR_INIT(("dgap_finalize_board_init() - start #2\n"));
-
- brd->use_interrupts = dgap_config_get_useintr(brd);
-
- /*
- * Set up our interrupt handler if we are set to do interrupts.
- */
- if (brd->use_interrupts && brd->irq) {
-
- rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
-
- if (rc) {
- dgap_mbuf(brd, DRVSTR": Failed to hook IRQ %d. Board will work in poll mode.\n",
- brd->irq);
- brd->intr_used = 0;
- }
- else
- brd->intr_used = 1;
- } else {
- brd->intr_used = 0;
- }
-
- return(0);
-}
-
-
-/*
- * Remap PCI memory.
- */
-static int dgap_do_remap(struct board_t *brd)
-{
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -ENXIO;
-
- if (!request_mem_region(brd->membase, 0x200000, "dgap")) {
- APR(("dgap: mem_region %lx already in use.\n", brd->membase));
- return -ENOMEM;
- }
-
- if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000, "dgap")) {
- APR(("dgap: mem_region IO %lx already in use.\n",
- brd->membase + PCI_IO_OFFSET));
- release_mem_region(brd->membase, 0x200000);
- return -ENOMEM;
- }
-
- brd->re_map_membase = ioremap(brd->membase, 0x200000);
- if (!brd->re_map_membase) {
- APR(("dgap: ioremap mem %lx cannot be mapped.\n", brd->membase));
- release_mem_region(brd->membase, 0x200000);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- return -ENOMEM;
- }
-
- brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
- if (!brd->re_map_port) {
- release_mem_region(brd->membase, 0x200000);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- iounmap(brd->re_map_membase);
- APR(("dgap: ioremap IO mem %lx cannot be mapped.\n",
- brd->membase + PCI_IO_OFFSET));
- return -ENOMEM;
- }
-
- DPR_INIT(("remapped io: 0x%p remapped mem: 0x%p\n",
- brd->re_map_port, brd->re_map_membase));
- return 0;
-}
-
-
-/*****************************************************************************
-*
-* Function:
-*
-* dgap_poll_handler
-*
-* Author:
-*
-* Scott H Kilau
-*
-* Parameters:
-*
-* dummy -- ignored
-*
-* Return Values:
-*
-* none
-*
-* Description:
-*
-* As each timer expires, it determines (a) whether the "transmit"
-* waiter needs to be woken up, and (b) whether the poller needs to
-* be rescheduled.
-*
-******************************************************************************/
-
-static void dgap_poll_handler(ulong dummy)
-{
- int i;
- struct board_t *brd;
- unsigned long lock_flags;
- unsigned long lock_flags2;
- ulong new_time;
-
- dgap_poll_counter++;
-
-
- /*
- * If driver needs the config file still,
- * keep trying to wake up the downloader to
- * send us the file.
- */
- if (dgap_driver_state == DRIVER_NEED_CONFIG_LOAD) {
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
- goto schedule_poller;
- }
- /*
- * Do not start the board state machine until
- * driver tells us its up and running, and has
- * everything it needs.
- */
- else if (dgap_driver_state != DRIVER_READY) {
- goto schedule_poller;
- }
-
- /*
- * If we have just 1 board, or the system is not SMP,
- * then use the typical old style poller.
- * Otherwise, use our new tasklet based poller, which should
- * speed things up for multiple boards.
- */
- if ( (dgap_NumBoards == 1) || (num_online_cpus() <= 1) ) {
- for (i = 0; i < dgap_NumBoards; i++) {
-
- brd = dgap_Board[i];
-
- if (brd->state == BOARD_FAILED) {
- continue;
- }
- if (!brd->intr_running) {
- /* Call the real board poller directly */
- dgap_poll_tasklet((unsigned long) brd);
- }
- }
- }
- else {
- /* Go thru each board, kicking off a tasklet for each if needed */
- for (i = 0; i < dgap_NumBoards; i++) {
- brd = dgap_Board[i];
-
- /*
- * Attempt to grab the board lock.
- *
- * If we can't get it, no big deal, the next poll will get it.
- * Basically, I just really don't want to spin in here, because I want
- * to kick off my tasklets as fast as I can, and then get out the poller.
- */
- if (!spin_trylock(&brd->bd_lock)) {
- continue;
- }
-
- /* If board is in a failed state, don't bother scheduling a tasklet */
- if (brd->state == BOARD_FAILED) {
- spin_unlock(&brd->bd_lock);
- continue;
- }
-
- /* Schedule a poll helper task */
- if (!brd->intr_running) {
- tasklet_schedule(&brd->helper_tasklet);
- }
-
- /*
- * Can't do DGAP_UNLOCK here, as we don't have
- * lock_flags because we did a trylock above.
- */
- spin_unlock(&brd->bd_lock);
- }
- }
-
-schedule_poller:
-
- /*
- * Schedule ourself back at the nominal wakeup interval.
- */
- DGAP_LOCK(dgap_poll_lock, lock_flags );
- dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
-
- new_time = dgap_poll_time - jiffies;
-
- if ((ulong) new_time >= 2 * dgap_poll_tick) {
- dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- }
-
- dgap_poll_timer.function = dgap_poll_handler;
- dgap_poll_timer.data = 0;
- dgap_poll_timer.expires = dgap_poll_time;
- DGAP_UNLOCK(dgap_poll_lock, lock_flags );
-
- if (!dgap_poll_stop)
- add_timer(&dgap_poll_timer);
-}
-
-
-
-
-/*
- * dgap_intr()
- *
- * Driver interrupt handler.
- */
-static irqreturn_t dgap_intr(int irq, void *voidbrd)
-{
- struct board_t *brd = (struct board_t *) voidbrd;
-
- if (!brd) {
- APR(("Received interrupt (%d) with null board associated\n", irq));
- return IRQ_NONE;
- }
-
- /*
- * Check to make sure its for us.
- */
- if (brd->magic != DGAP_BOARD_MAGIC) {
- APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
- return IRQ_NONE;
- }
-
- brd->intr_count++;
-
- /*
- * Schedule tasklet to run at a better time.
- */
- tasklet_schedule(&brd->helper_tasklet);
- return IRQ_HANDLED;
-}
-
-
-/*
- * dgap_init_globals()
- *
- * This is where we initialize the globals from the static insmod
- * configuration variables. These are declared near the head of
- * this file.
- */
-static void dgap_init_globals(void)
-{
- int i = 0;
-
- dgap_rawreadok = rawreadok;
- dgap_trcbuf_size = trcbuf_size;
- dgap_debug = debug;
-
- for (i = 0; i < MAXBOARDS; i++) {
- dgap_Board[i] = NULL;
- }
-
- init_timer( &dgap_poll_timer );
-
- init_waitqueue_head(&dgap_dl_wait);
- dgap_dl_action = 0;
-}
-
-
-/************************************************************************
- *
- * Utility functions
- *
- ************************************************************************/
-
-
-/*
- * dgap_mbuf()
- *
- * Used to print to the message buffer during board init.
- */
-static void dgap_mbuf(struct board_t *brd, const char *fmt, ...) {
- va_list ap;
- char buf[1024];
- int i;
- unsigned long flags;
- size_t length;
-
- DGAP_LOCK(dgap_global_lock, flags);
-
- /* Format buf using fmt and arguments contained in ap. */
- va_start(ap, fmt);
- i = vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
-
- DPR((buf));
-
- if (!brd || !brd->msgbuf) {
- printk("%s", buf);
- DGAP_UNLOCK(dgap_global_lock, flags);
- return;
- }
-
- length = strlen(buf) + 1;
- if (brd->msgbuf - brd->msgbuf_head < length)
- length = brd->msgbuf - brd->msgbuf_head;
- memcpy(brd->msgbuf, buf, length);
- brd->msgbuf += length;
-
- DGAP_UNLOCK(dgap_global_lock, flags);
-}
-
-
-/*
- * dgap_ms_sleep()
- *
- * Put the driver to sleep for x ms's
- *
- * Returns 0 if timed out, !0 (showing signal) if interrupted by a signal.
- */
-int dgap_ms_sleep(ulong ms)
-{
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout((ms * HZ) / 1000);
- return (signal_pending(current));
-}
-
-
-
-/*
- * dgap_ioctl_name() : Returns a text version of each ioctl value.
- */
-char *dgap_ioctl_name(int cmd)
-{
- switch(cmd) {
-
- case TCGETA: return("TCGETA");
- case TCGETS: return("TCGETS");
- case TCSETA: return("TCSETA");
- case TCSETS: return("TCSETS");
- case TCSETAW: return("TCSETAW");
- case TCSETSW: return("TCSETSW");
- case TCSETAF: return("TCSETAF");
- case TCSETSF: return("TCSETSF");
- case TCSBRK: return("TCSBRK");
- case TCXONC: return("TCXONC");
- case TCFLSH: return("TCFLSH");
- case TIOCGSID: return("TIOCGSID");
-
- case TIOCGETD: return("TIOCGETD");
- case TIOCSETD: return("TIOCSETD");
- case TIOCGWINSZ: return("TIOCGWINSZ");
- case TIOCSWINSZ: return("TIOCSWINSZ");
-
- case TIOCMGET: return("TIOCMGET");
- case TIOCMSET: return("TIOCMSET");
- case TIOCMBIS: return("TIOCMBIS");
- case TIOCMBIC: return("TIOCMBIC");
-
- /* from digi.h */
- case DIGI_SETA: return("DIGI_SETA");
- case DIGI_SETAW: return("DIGI_SETAW");
- case DIGI_SETAF: return("DIGI_SETAF");
- case DIGI_SETFLOW: return("DIGI_SETFLOW");
- case DIGI_SETAFLOW: return("DIGI_SETAFLOW");
- case DIGI_GETFLOW: return("DIGI_GETFLOW");
- case DIGI_GETAFLOW: return("DIGI_GETAFLOW");
- case DIGI_GETA: return("DIGI_GETA");
- case DIGI_GEDELAY: return("DIGI_GEDELAY");
- case DIGI_SEDELAY: return("DIGI_SEDELAY");
- case DIGI_GETCUSTOMBAUD: return("DIGI_GETCUSTOMBAUD");
- case DIGI_SETCUSTOMBAUD: return("DIGI_SETCUSTOMBAUD");
- case TIOCMODG: return("TIOCMODG");
- case TIOCMODS: return("TIOCMODS");
- case TIOCSDTR: return("TIOCSDTR");
- case TIOCCDTR: return("TIOCCDTR");
-
- default: return("unknown");
- }
-}
diff --git a/drivers/staging/dgap/dgap_driver.h b/drivers/staging/dgap/dgap_driver.h
deleted file mode 100644
index 2f7a55a7e40d..000000000000
--- a/drivers/staging/dgap/dgap_driver.h
+++ /dev/null
@@ -1,620 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- *************************************************************************
- *
- * Driver includes
- *
- *************************************************************************/
-
-#ifndef __DGAP_DRIVER_H
-#define __DGAP_DRIVER_H
-
-#include <linux/version.h> /* To get the current Linux version */
-#include <linux/types.h> /* To pick up the varions Linux types */
-#include <linux/tty.h> /* To pick up the various tty structs/defines */
-#include <linux/interrupt.h> /* For irqreturn_t type */
-
-#include "dgap_types.h" /* Additional types needed by the Digi header files */
-#include "digi.h" /* Digi specific ioctl header */
-#include "dgap_kcompat.h" /* Kernel 2.4/2.6 compat includes */
-#include "dgap_sysfs.h" /* Support for SYSFS */
-
-/*************************************************************************
- *
- * Driver defines
- *
- *************************************************************************/
-
-/*
- * Driver identification, error and debugging statments
- *
- * In theory, you can change all occurrences of "digi" in the next
- * three lines, and the driver printk's will all automagically change.
- *
- * APR((fmt, args, ...)); Always prints message
- * DPR((fmt, args, ...)); Only prints if DGAP_TRACER is defined at
- * compile time and dgap_debug!=0
- */
-#define DG_NAME "dgap-1.3-16"
-#define DG_PART "40002347_C"
-
-#define PROCSTR "dgap" /* /proc entries */
-#define DEVSTR "/dev/dg/dgap" /* /dev entries */
-#define DRVSTR "dgap" /* Driver name string
- * displayed by APR */
-#define APR(args) do { PRINTF_TO_KMEM(args); printk(DRVSTR": "); printk args; \
- } while (0)
-#define RAPR(args) do { PRINTF_TO_KMEM(args); printk args; } while (0)
-
-#define TRC_TO_CONSOLE 1
-
-/*
- * Debugging levels can be set using debug insmod variable
- * They can also be compiled out completely.
- */
-
-#define DBG_INIT (dgap_debug & 0x01)
-#define DBG_BASIC (dgap_debug & 0x02)
-#define DBG_CORE (dgap_debug & 0x04)
-
-#define DBG_OPEN (dgap_debug & 0x08)
-#define DBG_CLOSE (dgap_debug & 0x10)
-#define DBG_READ (dgap_debug & 0x20)
-#define DBG_WRITE (dgap_debug & 0x40)
-
-#define DBG_IOCTL (dgap_debug & 0x80)
-
-#define DBG_PROC (dgap_debug & 0x100)
-#define DBG_PARAM (dgap_debug & 0x200)
-#define DBG_PSCAN (dgap_debug & 0x400)
-#define DBG_EVENT (dgap_debug & 0x800)
-
-#define DBG_DRAIN (dgap_debug & 0x1000)
-#define DBG_CARR (dgap_debug & 0x2000)
-
-#define DBG_MGMT (dgap_debug & 0x4000)
-
-
-#if defined(DGAP_TRACER)
-
-# if defined(TRC_TO_KMEM)
-/* Choose one: */
-# define TRC_ON_OVERFLOW_WRAP_AROUND
-# undef TRC_ON_OVERFLOW_SHIFT_BUFFER
-# endif //TRC_TO_KMEM
-
-# define TRC_MAXMSG 1024
-# define TRC_OVERFLOW "(OVERFLOW)"
-# define TRC_DTRC "/usr/bin/dtrc"
-
-#if defined TRC_TO_CONSOLE
-#define PRINTF_TO_CONSOLE(args) { printk(DRVSTR": "); printk args; }
-#else //!defined TRACE_TO_CONSOLE
-#define PRINTF_TO_CONSOLE(args)
-#endif
-
-#if defined TRC_TO_KMEM
-#define PRINTF_TO_KMEM(args) dgap_tracef args
-#else //!defined TRC_TO_KMEM
-#define PRINTF_TO_KMEM(args)
-#endif
-
-#define TRC(args) { PRINTF_TO_KMEM(args); PRINTF_TO_CONSOLE(args) }
-
-# define DPR_INIT(ARGS) if (DBG_INIT) TRC(ARGS)
-# define DPR_BASIC(ARGS) if (DBG_BASIC) TRC(ARGS)
-# define DPR_CORE(ARGS) if (DBG_CORE) TRC(ARGS)
-# define DPR_OPEN(ARGS) if (DBG_OPEN) TRC(ARGS)
-# define DPR_CLOSE(ARGS) if (DBG_CLOSE) TRC(ARGS)
-# define DPR_READ(ARGS) if (DBG_READ) TRC(ARGS)
-# define DPR_WRITE(ARGS) if (DBG_WRITE) TRC(ARGS)
-# define DPR_IOCTL(ARGS) if (DBG_IOCTL) TRC(ARGS)
-# define DPR_PROC(ARGS) if (DBG_PROC) TRC(ARGS)
-# define DPR_PARAM(ARGS) if (DBG_PARAM) TRC(ARGS)
-# define DPR_PSCAN(ARGS) if (DBG_PSCAN) TRC(ARGS)
-# define DPR_EVENT(ARGS) if (DBG_EVENT) TRC(ARGS)
-# define DPR_DRAIN(ARGS) if (DBG_DRAIN) TRC(ARGS)
-# define DPR_CARR(ARGS) if (DBG_CARR) TRC(ARGS)
-# define DPR_MGMT(ARGS) if (DBG_MGMT) TRC(ARGS)
-
-# define DPR(ARGS) if (dgap_debug) TRC(ARGS)
-# define P(X) dgap_tracef(#X "=%p\n", X)
-# define X(X) dgap_tracef(#X "=%x\n", X)
-
-#else//!defined DGAP_TRACER
-
-#define PRINTF_TO_KMEM(args)
-# define TRC(ARGS)
-# define DPR_INIT(ARGS)
-# define DPR_BASIC(ARGS)
-# define DPR_CORE(ARGS)
-# define DPR_OPEN(ARGS)
-# define DPR_CLOSE(ARGS)
-# define DPR_READ(ARGS)
-# define DPR_WRITE(ARGS)
-# define DPR_IOCTL(ARGS)
-# define DPR_PROC(ARGS)
-# define DPR_PARAM(ARGS)
-# define DPR_PSCAN(ARGS)
-# define DPR_EVENT(ARGS)
-# define DPR_DRAIN(ARGS)
-# define DPR_CARR(ARGS)
-# define DPR_MGMT(ARGS)
-
-# define DPR(args)
-
-#endif//DGAP_TRACER
-
-/* Number of boards we support at once. */
-#define MAXBOARDS 32
-#define MAXPORTS 224
-#define MAXTTYNAMELEN 200
-
-/* Our 3 magic numbers for our board, channel and unit structs */
-#define DGAP_BOARD_MAGIC 0x5c6df104
-#define DGAP_CHANNEL_MAGIC 0x6c6df104
-#define DGAP_UNIT_MAGIC 0x7c6df104
-
-/* Serial port types */
-#define DGAP_SERIAL 0
-#define DGAP_PRINT 1
-
-#define SERIAL_TYPE_NORMAL 1
-
-/* 4 extra for alignment play space */
-#define WRITEBUFLEN ((4096) + 4)
-#define MYFLIPLEN N_TTY_BUF_SIZE
-
-#define SBREAK_TIME 0x25
-#define U2BSIZE 0x400
-
-#define dgap_jiffies_from_ms(a) (((a) * HZ) / 1000)
-
-/*
- * Our major for the mgmt devices.
- *
- * We can use 22, because Digi was allocated 22 and 23 for the epca driver.
- * 22 has now become obsolete now that the "cu" devices have
- * been removed from 2.6.
- * Also, this *IS* the epca driver, just PCI only now.
- */
-#ifndef DIGI_DGAP_MAJOR
-# define DIGI_DGAP_MAJOR 22
-#endif
-
-/*
- * The parameters we use to define the periods of the moving averages.
- */
-#define MA_PERIOD (HZ / 10)
-#define SMA_DUR (1 * HZ)
-#define EMA_DUR (1 * HZ)
-#define SMA_NPERIODS (SMA_DUR / MA_PERIOD)
-#define EMA_NPERIODS (EMA_DUR / MA_PERIOD)
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially. This is the same structure that is defined
- * as the default in tty_io.c with the same settings overriden as in serial.c
- *
- * In short, this should match the internal serial ports' defaults.
- */
-#define DEFAULT_IFLAGS (ICRNL | IXON)
-#define DEFAULT_OFLAGS (OPOST | ONLCR)
-#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL)
-#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \
- ECHOCTL | ECHOKE | IEXTEN)
-
-#ifndef _POSIX_VDISABLE
-#define _POSIX_VDISABLE '\0'
-#endif
-
-#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
-#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
-
-#define VPDSIZE (512)
-
-/*
- * Lock function/defines.
- * Makes spotting lock/unlock locations easier.
- */
-# define DGAP_SPINLOCK_INIT(x) spin_lock_init(&(x))
-# define DGAP_LOCK(x,y) spin_lock_irqsave(&(x), y)
-# define DGAP_UNLOCK(x,y) spin_unlock_irqrestore(&(x), y)
-# define DGAP_TRYLOCK(x,y) spin_trylock(&(x))
-
-/*
- * All the possible states the driver can be while being loaded.
- */
-enum {
- DRIVER_INITIALIZED = 0,
- DRIVER_NEED_CONFIG_LOAD,
- DRIVER_REQUESTED_CONFIG,
- DRIVER_READY
-};
-
-/*
- * All the possible states the board can be while booting up.
- */
-enum {
- BOARD_FAILED = 0,
- CONFIG_NOT_FOUND,
- BOARD_FOUND,
- NEED_RESET,
- FINISHED_RESET,
- NEED_CONFIG,
- FINISHED_CONFIG,
- NEED_DEVICE_CREATION,
- REQUESTED_DEVICE_CREATION,
- FINISHED_DEVICE_CREATION,
- NEED_BIOS_LOAD,
- REQUESTED_BIOS,
- WAIT_BIOS_LOAD,
- FINISHED_BIOS_LOAD,
- NEED_FEP_LOAD,
- REQUESTED_FEP,
- WAIT_FEP_LOAD,
- FINISHED_FEP_LOAD,
- NEED_PROC_CREATION,
- FINISHED_PROC_CREATION,
- BOARD_READY
-};
-
-/*
- * All the possible states that a requested concentrator image can be in.
- */
-enum {
- NO_PENDING_CONCENTRATOR_REQUESTS = 0,
- NEED_CONCENTRATOR,
- REQUESTED_CONCENTRATOR
-};
-
-extern char *dgap_state_text[];
-extern char *dgap_driver_state_text[];
-
-
-/*
- * Modem line constants are defined as macros because DSR and
- * DCD are swapable using the ditty altpin option.
- */
-#define D_CD(ch) ch->ch_cd /* Carrier detect */
-#define D_DSR(ch) ch->ch_dsr /* Data set ready */
-#define D_RTS(ch) DM_RTS /* Request to send */
-#define D_CTS(ch) DM_CTS /* Clear to send */
-#define D_RI(ch) DM_RI /* Ring indicator */
-#define D_DTR(ch) DM_DTR /* Data terminal ready */
-
-
-/*************************************************************************
- *
- * Structures and closely related defines.
- *
- *************************************************************************/
-
-
-/*
- * A structure to hold a statistics counter. We also
- * compute moving averages for this counter.
- */
-struct macounter
-{
- u32 cnt; /* Total count */
- ulong accum; /* Acuumulator per period */
- ulong sma; /* Simple moving average */
- ulong ema; /* Exponential moving average */
-};
-
-
-/************************************************************************
- * Device flag definitions for bd_flags.
- ************************************************************************/
-#define BD_FEP5PLUS 0x0001 /* Supports FEP5 Plus commands */
-#define BD_HAS_VPD 0x0002 /* Board has VPD info available */
-
-
-/*
- * Per-board information
- */
-struct board_t
-{
- int magic; /* Board Magic number. */
- int boardnum; /* Board number: 0-3 */
- int firstminor; /* First minor, e.g. 0, 30, 60 */
-
- int type; /* Type of board */
- char *name; /* Product Name */
- struct pci_dev *pdev; /* Pointer to the pci_dev struct */
- u16 vendor; /* PCI vendor ID */
- u16 device; /* PCI device ID */
- u16 subvendor; /* PCI subsystem vendor ID */
- u16 subdevice; /* PCI subsystem device ID */
- uchar rev; /* PCI revision ID */
- uint pci_bus; /* PCI bus value */
- uint pci_slot; /* PCI slot value */
- u16 maxports; /* MAX ports this board can handle */
- uchar vpd[VPDSIZE]; /* VPD of board, if found */
- u32 bd_flags; /* Board flags */
-
- spinlock_t bd_lock; /* Used to protect board */
-
- u32 state; /* State of card. */
- wait_queue_head_t state_wait; /* Place to sleep on for state change */
-
- struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
-
- u32 wait_for_bios;
- u32 wait_for_fep;
-
- struct cnode * bd_config; /* Config of board */
-
- u16 nasync; /* Number of ports on card */
-
- u32 use_interrupts; /* Should we be interrupt driven? */
- ulong irq; /* Interrupt request number */
- ulong intr_count; /* Count of interrupts */
- u32 intr_used; /* Non-zero if using interrupts */
- u32 intr_running; /* Non-zero if FEP knows its doing interrupts */
-
- ulong port; /* Start of base io port of the card */
- ulong port_end; /* End of base io port of the card */
- ulong membase; /* Start of base memory of the card */
- ulong membase_end; /* End of base memory of the card */
-
- uchar *re_map_port; /* Remapped io port of the card */
- uchar *re_map_membase;/* Remapped memory of the card */
-
- uchar runwait; /* # Processes waiting for FEP */
- uchar inhibit_poller; /* Tells the poller to leave us alone */
-
- struct channel_t *channels[MAXPORTS]; /* array of pointers to our channels. */
-
- struct tty_driver *SerialDriver;
- char SerialName[200];
- struct tty_driver *PrintDriver;
- char PrintName[200];
-
- u32 dgap_Major_Serial_Registered;
- u32 dgap_Major_TransparentPrint_Registered;
-
- u32 dgap_Serial_Major;
- u32 dgap_TransparentPrint_Major;
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
- u32 TtyRefCnt;
-#endif
-
- struct bs_t *bd_bs; /* Base structure pointer */
-
- char *flipbuf; /* Our flip buffer, alloced if board is found */
- char *flipflagbuf; /* Our flip flag buffer, alloced if board is found */
-
- u16 dpatype; /* The board "type", as defined by DPA */
- u16 dpastatus; /* The board "status", as defined by DPA */
- wait_queue_head_t kme_wait; /* Needed for DPA support */
-
- u32 conc_dl_status; /* Status of any pending conc download */
- /*
- * Mgmt data.
- */
- char *msgbuf_head;
- char *msgbuf;
-};
-
-
-
-/************************************************************************
- * Unit flag definitions for un_flags.
- ************************************************************************/
-#define UN_ISOPEN 0x0001 /* Device is open */
-#define UN_CLOSING 0x0002 /* Line is being closed */
-#define UN_IMM 0x0004 /* Service immediately */
-#define UN_BUSY 0x0008 /* Some work this channel */
-#define UN_BREAKI 0x0010 /* Input break received */
-#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
-#define UN_TIME 0x0040 /* Waiting on time */
-#define UN_EMPTY 0x0080 /* Waiting output queue empty */
-#define UN_LOW 0x0100 /* Waiting output low water mark*/
-#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
-#define UN_WOPEN 0x0400 /* Device waiting for open */
-#define UN_WIOCTL 0x0800 /* Device waiting for open */
-#define UN_HANGUP 0x8000 /* Carrier lost */
-
-struct device;
-
-/************************************************************************
- * Structure for terminal or printer unit.
- ************************************************************************/
-struct un_t {
- int magic; /* Unit Magic Number. */
- struct channel_t *un_ch;
- u32 un_time;
- u32 un_type;
- u32 un_open_count; /* Counter of opens to port */
- struct tty_struct *un_tty;/* Pointer to unit tty structure */
- u32 un_flags; /* Unit flags */
- wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
- u32 un_dev; /* Minor device number */
- tcflag_t un_oflag; /* oflags being done on board */
- tcflag_t un_lflag; /* lflags being done on board */
- struct device *un_sysfs;
-};
-
-
-/************************************************************************
- * Device flag definitions for ch_flags.
- ************************************************************************/
-#define CH_PRON 0x0001 /* Printer on string */
-#define CH_OUT 0x0002 /* Dial-out device open */
-#define CH_STOP 0x0004 /* Output is stopped */
-#define CH_STOPI 0x0008 /* Input is stopped */
-#define CH_CD 0x0010 /* Carrier is present */
-#define CH_FCAR 0x0020 /* Carrier forced on */
-
-#define CH_RXBLOCK 0x0080 /* Enable rx blocked flag */
-#define CH_WLOW 0x0100 /* Term waiting low event */
-#define CH_WEMPTY 0x0200 /* Term waiting empty event */
-#define CH_RENABLE 0x0400 /* Buffer just emptied */
-#define CH_RACTIVE 0x0800 /* Process active in xxread() */
-#define CH_RWAIT 0x1000 /* Process waiting in xxread() */
-#define CH_BAUD0 0x2000 /* Used for checking B0 transitions */
-#define CH_HANGUP 0x8000 /* Hangup received */
-
-/*
- * Definitions for ch_sniff_flags
- */
-#define SNIFF_OPEN 0x1
-#define SNIFF_WAIT_DATA 0x2
-#define SNIFF_WAIT_SPACE 0x4
-
-
-/************************************************************************
- * Channel information structure.
- ************************************************************************/
-struct channel_t {
- int magic; /* Channel Magic Number */
- struct bs_t *ch_bs; /* Base structure pointer */
- struct cm_t *ch_cm; /* Command queue pointer */
- struct board_t *ch_bd; /* Board structure pointer */
- unsigned char *ch_vaddr; /* FEP memory origin */
- unsigned char *ch_taddr; /* Write buffer origin */
- unsigned char *ch_raddr; /* Read buffer origin */
- struct digi_t ch_digi; /* Transparent Print structure */
- struct un_t ch_tun; /* Terminal unit info */
- struct un_t ch_pun; /* Printer unit info */
-
- spinlock_t ch_lock; /* provide for serialization */
- wait_queue_head_t ch_flags_wait;
-
- u32 pscan_state;
- uchar pscan_savechar;
-
- u32 ch_portnum; /* Port number, 0 offset. */
- u32 ch_open_count; /* open count */
- u32 ch_flags; /* Channel flags */
-
-
- u32 ch_close_delay; /* How long we should drop RTS/DTR for */
-
- u32 ch_cpstime; /* Time for CPS calculations */
-
- tcflag_t ch_c_iflag; /* channel iflags */
- tcflag_t ch_c_cflag; /* channel cflags */
- tcflag_t ch_c_oflag; /* channel oflags */
- tcflag_t ch_c_lflag; /* channel lflags */
-
- u16 ch_fepiflag; /* FEP tty iflags */
- u16 ch_fepcflag; /* FEP tty cflags */
- u16 ch_fepoflag; /* FEP tty oflags */
- u16 ch_wopen; /* Waiting for open process cnt */
- u16 ch_tstart; /* Transmit buffer start */
- u16 ch_tsize; /* Transmit buffer size */
- u16 ch_rstart; /* Receive buffer start */
- u16 ch_rsize; /* Receive buffer size */
- u16 ch_rdelay; /* Receive delay time */
-
- u16 ch_tlw; /* Our currently set low water mark */
-
- u16 ch_cook; /* Output character mask */
-
- uchar ch_card; /* Card channel is on */
- uchar ch_stopc; /* Stop character */
- uchar ch_startc; /* Start character */
-
- uchar ch_mostat; /* FEP output modem status */
- uchar ch_mistat; /* FEP input modem status */
- uchar ch_mforce; /* Modem values to be forced */
- uchar ch_mval; /* Force values */
- uchar ch_fepstopc; /* FEP stop character */
- uchar ch_fepstartc; /* FEP start character */
-
- uchar ch_astopc; /* Auxiliary Stop character */
- uchar ch_astartc; /* Auxiliary Start character */
- uchar ch_fepastopc; /* Auxiliary FEP stop char */
- uchar ch_fepastartc; /* Auxiliary FEP start char */
-
- uchar ch_hflow; /* FEP hardware handshake */
- uchar ch_dsr; /* stores real dsr value */
- uchar ch_cd; /* stores real cd value */
- uchar ch_tx_win; /* channel tx buffer window */
- uchar ch_rx_win; /* channel rx buffer window */
- uint ch_custom_speed; /* Custom baud, if set */
- uint ch_baud_info; /* Current baud info for /proc output */
- ulong ch_rxcount; /* total of data received so far */
- ulong ch_txcount; /* total of data transmitted so far */
- ulong ch_err_parity; /* Count of parity errors on channel */
- ulong ch_err_frame; /* Count of framing errors on channel */
- ulong ch_err_break; /* Count of breaks on channel */
- ulong ch_err_overrun; /* Count of overruns on channel */
-
- uint ch_sniff_in;
- uint ch_sniff_out;
- char *ch_sniff_buf; /* Sniff buffer for proc */
- ulong ch_sniff_flags; /* Channel flags */
- wait_queue_head_t ch_sniff_wait;
-};
-
-
-/*************************************************************************
- *
- * Prototypes for non-static functions used in more than one module
- *
- *************************************************************************/
-
-extern int dgap_ms_sleep(ulong ms);
-extern char *dgap_ioctl_name(int cmd);
-extern void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len);
-extern void dgap_do_fep_load(struct board_t *brd, uchar __user *ufep, int len);
-extern void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len);
-extern void dgap_do_config_load(uchar __user *uaddr, int len);
-extern int dgap_after_config_loaded(void);
-extern int dgap_finalize_board_init(struct board_t *brd);
-
-/*
- * Our Global Variables.
- */
-extern int dgap_driver_state; /* The state of the driver */
-extern int dgap_debug; /* Debug variable */
-extern int dgap_rawreadok; /* Set if user wants rawreads */
-extern int dgap_poll_tick; /* Poll interval - 20 ms */
-extern spinlock_t dgap_global_lock; /* Driver global spinlock */
-extern uint dgap_NumBoards; /* Total number of boards */
-extern struct board_t *dgap_Board[MAXBOARDS]; /* Array of board structs */
-extern ulong dgap_poll_counter; /* Times the poller has run */
-extern char *dgap_config_buf; /* The config file buffer */
-extern spinlock_t dgap_dl_lock; /* Downloader spinlock */
-extern wait_queue_head_t dgap_dl_wait; /* Wait queue for downloader */
-extern int dgap_dl_action; /* Action flag for downloader */
-extern int dgap_registerttyswithsysfs; /* Should we register the */
- /* ttys with sysfs or not */
-
-/*
- * Global functions declared in dgap_fep5.c, but must be hidden from
- * user space programs.
- */
-extern void dgap_poll_tasklet(unsigned long data);
-extern void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint ncmds);
-extern void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds);
-extern void dgap_wmove(struct channel_t *ch, char *buf, uint cnt);
-extern int dgap_param(struct tty_struct *tty);
-extern void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *fbuf, int *len);
-extern uint dgap_get_custom_baud(struct channel_t *ch);
-extern void dgap_firmware_reset_port(struct channel_t *ch);
-
-#endif
diff --git a/drivers/staging/dgap/dgap_fep5.c b/drivers/staging/dgap/dgap_fep5.c
deleted file mode 100644
index f75831a422e8..000000000000
--- a/drivers/staging/dgap/dgap_fep5.c
+++ /dev/null
@@ -1,1938 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- *
- * $Id: dgap_fep5.c,v 1.2 2011/06/21 10:35:40 markh Exp $
- */
-
-
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/delay.h> /* For udelay */
-#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
-#include <linux/tty.h>
-#include <linux/tty_flip.h> /* For tty_schedule_flip */
-#include <linux/slab.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
-#include <linux/sched.h>
-#endif
-
-#include "dgap_driver.h"
-#include "dgap_pci.h"
-#include "dgap_fep5.h"
-#include "dgap_tty.h"
-#include "dgap_conf.h"
-#include "dgap_parse.h"
-#include "dgap_trace.h"
-
-/*
- * Our function prototypes
- */
-static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds);
-static int dgap_event(struct board_t *bd);
-
-/*
- * internal variables
- */
-static uint dgap_count = 500;
-
-
-/*
- * Loads the dgap.conf config file from the user.
- */
-void dgap_do_config_load(uchar __user *uaddr, int len)
-{
- int orig_len = len;
- char *to_addr;
- uchar __user *from_addr = uaddr;
- char buf[U2BSIZE];
- int n;
-
- to_addr = dgap_config_buf = kzalloc(len + 1, GFP_ATOMIC);
- if (!dgap_config_buf) {
- DPR_INIT(("dgap_do_config_load - unable to allocate memory for file\n"));
- dgap_driver_state = DRIVER_NEED_CONFIG_LOAD;
- return;
- }
-
- n = U2BSIZE;
- while (len) {
-
- if (n > len)
- n = len;
-
- if (copy_from_user((char *) &buf, from_addr, n) == -1 )
- return;
-
- /* Copy data from buffer to kernel memory */
- memcpy(to_addr, buf, n);
-
- /* increment counts */
- len -= n;
- to_addr += n;
- from_addr += n;
- n = U2BSIZE;
- }
-
- dgap_config_buf[orig_len] = '\0';
-
- to_addr = dgap_config_buf;
- dgap_parsefile(&to_addr, TRUE);
-
- DPR_INIT(("dgap_config_load() finish\n"));
-
- return;
-}
-
-
-int dgap_after_config_loaded(void)
-{
- int i = 0;
- int rc = 0;
-
- /*
- * Register our ttys, now that we have the config loaded.
- */
- for (i = 0; i < dgap_NumBoards; ++i) {
-
- /*
- * Initialize KME waitqueues...
- */
- init_waitqueue_head(&(dgap_Board[i]->kme_wait));
-
- /*
- * allocate flip buffer for board.
- */
- dgap_Board[i]->flipbuf = kzalloc(MYFLIPLEN, GFP_ATOMIC);
- dgap_Board[i]->flipflagbuf = kzalloc(MYFLIPLEN, GFP_ATOMIC);
- }
-
- return rc;
-}
-
-
-
-/*=======================================================================
- *
- * usertoboard - copy from user space to board space.
- *
- *=======================================================================*/
-static int dgap_usertoboard(struct board_t *brd, char *to_addr, char __user *from_addr, int len)
-{
- char buf[U2BSIZE];
- int n = U2BSIZE;
-
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -EFAULT;
-
- while (len) {
- if (n > len)
- n = len;
-
- if (copy_from_user((char *) &buf, from_addr, n) == -1 ) {
- return -EFAULT;
- }
-
- /* Copy data from buffer to card memory */
- memcpy_toio(to_addr, buf, n);
-
- /* increment counts */
- len -= n;
- to_addr += n;
- from_addr += n;
- n = U2BSIZE;
- }
- return 0;
-}
-
-
-/*
- * Copies the BIOS code from the user to the board,
- * and starts the BIOS running.
- */
-void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len)
-{
- uchar *addr;
- uint offset;
- int i;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- DPR_INIT(("dgap_do_bios_load() start\n"));
-
- addr = brd->re_map_membase;
-
- /*
- * clear POST area
- */
- for (i = 0; i < 16; i++)
- writeb(0, addr + POSTAREA + i);
-
- /*
- * Download bios
- */
- offset = 0x1000;
- if (dgap_usertoboard(brd, addr + offset, ubios, len) == -1 ) {
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-
- writel(0x0bf00401, addr);
- writel(0, (addr + 4));
-
- /* Clear the reset, and change states. */
- writeb(FEPCLR, brd->re_map_port);
- brd->state = WAIT_BIOS_LOAD;
-}
-
-
-/*
- * Checks to see if the BIOS completed running on the card.
- */
-static void dgap_do_wait_for_bios(struct board_t *brd)
-{
- uchar *addr;
- u16 word;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
- word = readw(addr + POSTAREA);
-
- /* Check to see if BIOS thinks board is good. (GD). */
- if (word == *(u16 *) "GD") {
- DPR_INIT(("GOT GD in memory, moving states.\n"));
- brd->state = FINISHED_BIOS_LOAD;
- return;
- }
-
- /* Give up on board after too long of time taken */
- if (brd->wait_for_bios++ > 5000) {
- u16 err1 = readw(addr + SEQUENCE);
- u16 err2 = readw(addr + ERROR);
- APR(("***WARNING*** %s failed diagnostics. Error #(%x,%x).\n",
- brd->name, err1, err2));
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- }
-}
-
-
-/*
- * Copies the FEP code from the user to the board,
- * and starts the FEP running.
- */
-void dgap_do_fep_load(struct board_t *brd, uchar __user *ufep, int len)
-{
- uchar *addr;
- uint offset;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- DPR_INIT(("dgap_do_fep_load() for board %s : start\n", brd->name));
-
- /*
- * Download FEP
- */
- offset = 0x1000;
- if (dgap_usertoboard(brd, addr + offset, ufep, len) == -1 ) {
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-
- /*
- * If board is a concentrator product, we need to give
- * it its config string describing how the concentrators look.
- */
- if ((brd->type == PCX) || (brd->type == PEPC)) {
- uchar string[100];
- uchar *config, *xconfig;
- int i = 0;
-
- xconfig = dgap_create_config_string(brd, string);
-
- /* Write string to board memory */
- config = addr + CONFIG;
- for (; i < CONFIGSIZE; i++, config++, xconfig++) {
- writeb(*xconfig, config);
- if ((*xconfig & 0xff) == 0xff)
- break;
- }
- }
-
- writel(0xbfc01004, (addr + 0xc34));
- writel(0x3, (addr + 0xc30));
-
- /* change states. */
- brd->state = WAIT_FEP_LOAD;
-
- DPR_INIT(("dgap_do_fep_load() for board %s : finish\n", brd->name));
-
-}
-
-
-/*
- * Waits for the FEP to report thats its ready for us to use.
- */
-static void dgap_do_wait_for_fep(struct board_t *brd)
-{
- uchar *addr;
- u16 word;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- DPR_INIT(("dgap_do_wait_for_fep() for board %s : start. addr: %p\n", brd->name, addr));
-
- word = readw(addr + FEPSTAT);
-
- /* Check to see if FEP is up and running now. */
- if (word == *(u16 *) "OS") {
- DPR_INIT(("GOT OS in memory for board %s, moving states.\n", brd->name));
- brd->state = FINISHED_FEP_LOAD;
-
- /*
- * Check to see if the board can support FEP5+ commands.
- */
- word = readw(addr + FEP5_PLUS);
- if (word == *(u16 *) "5A") {
- DPR_INIT(("GOT 5A in memory for board %s, board supports extended FEP5 commands.\n", brd->name));
- brd->bd_flags |= BD_FEP5PLUS;
- }
-
- return;
- }
-
- /* Give up on board after too long of time taken */
- if (brd->wait_for_fep++ > 5000) {
- u16 err1 = readw(addr + SEQUENCE);
- u16 err2 = readw(addr + ERROR);
- APR(("***WARNING*** FEPOS for %s not functioning. Error #(%x,%x).\n",
- brd->name, err1, err2));
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- }
-
- DPR_INIT(("dgap_do_wait_for_fep() for board %s : finish\n", brd->name));
-}
-
-
-/*
- * Physically forces the FEP5 card to reset itself.
- */
-static void dgap_do_reset_board(struct board_t *brd)
-{
- uchar check;
- u32 check1;
- u32 check2;
- int i = 0;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase || !brd->re_map_port) {
- DPR_INIT(("dgap_do_reset_board() start. bad values. brd: %p mem: %p io: %p\n",
- brd, brd ? brd->re_map_membase : 0, brd ? brd->re_map_port : 0));
- return;
- }
-
- DPR_INIT(("dgap_do_reset_board() start. io: %p\n", brd->re_map_port));
-
- /* FEPRST does not vary among supported boards */
- writeb(FEPRST, brd->re_map_port);
-
- for (i = 0; i <= 1000; i++) {
- check = readb(brd->re_map_port) & 0xe;
- if (check == FEPRST)
- break;
- udelay(10);
-
- }
- if (i > 1000) {
- APR(("*** WARNING *** Board not resetting... Failing board.\n"));
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- goto failed;
- }
-
- /*
- * Make sure there really is memory out there.
- */
- writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
- writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
- check1 = readl(brd->re_map_membase + LOWMEM);
- check2 = readl(brd->re_map_membase + HIGHMEM);
-
- if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
- APR(("*** Warning *** No memory at %p for board.\n", brd->re_map_membase));
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- goto failed;
- }
-
- if (brd->state != BOARD_FAILED)
- brd->state = FINISHED_RESET;
-
-failed:
- DPR_INIT(("dgap_do_reset_board() finish\n"));
-}
-
-
-/*
- * Sends a concentrator image into the FEP5 board.
- */
-void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len)
-{
- char *vaddr;
- u16 offset = 0;
- struct downld_t *to_dp;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- vaddr = brd->re_map_membase;
-
- offset = readw((u16 *) (vaddr + DOWNREQ));
- to_dp = (struct downld_t *) (vaddr + (int) offset);
-
- /*
- * The image was already read into kernel space,
- * we do NOT need a user space read here
- */
- memcpy_toio((char *) to_dp, uaddr, sizeof(struct downld_t));
-
- /* Tell card we have data for it */
- writew(0, vaddr + (DOWNREQ));
-
- brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
-}
-
-
-#define EXPANSION_ROM_SIZE (64 * 1024)
-#define FEP5_ROM_MAGIC (0xFEFFFFFF)
-
-static void dgap_get_vpd(struct board_t *brd)
-{
- u32 magic;
- u32 base_offset;
- u16 rom_offset;
- u16 vpd_offset;
- u16 image_length;
- u16 i;
- uchar byte1;
- uchar byte2;
-
- /*
- * Poke the magic number at the PCI Rom Address location.
- * If VPD is supported, the value read from that address
- * will be non-zero.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- /* VPD not supported, bail */
- if (!magic)
- return;
-
- /*
- * To get to the OTPROM memory, we have to send the boards base
- * address or'ed with 1 into the PCI Rom Address location.
- */
- magic = brd->membase | 0x01;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- byte1 = readb(brd->re_map_membase);
- byte2 = readb(brd->re_map_membase + 1);
-
- /*
- * If the board correctly swapped to the OTPROM memory,
- * the first 2 bytes (header) should be 0x55, 0xAA
- */
- if (byte1 == 0x55 && byte2 == 0xAA) {
-
- base_offset = 0;
-
- /*
- * We have to run through all the OTPROM memory looking
- * for the VPD offset.
- */
- while (base_offset <= EXPANSION_ROM_SIZE) {
-
- /*
- * Lots of magic numbers here.
- *
- * The VPD offset is located inside the ROM Data Structure.
- * We also have to remember the length of each
- * ROM Data Structure, so we can "hop" to the next
- * entry if the VPD isn't in the current
- * ROM Data Structure.
- */
- rom_offset = readw(brd->re_map_membase + base_offset + 0x18);
- image_length = readw(brd->re_map_membase + rom_offset + 0x10) * 512;
- vpd_offset = readw(brd->re_map_membase + rom_offset + 0x08);
-
- /* Found the VPD entry */
- if (vpd_offset)
- break;
-
- /* We didn't find a VPD entry, go to next ROM entry. */
- base_offset += image_length;
-
- byte1 = readb(brd->re_map_membase + base_offset);
- byte2 = readb(brd->re_map_membase + base_offset + 1);
-
- /*
- * If the new ROM offset doesn't have 0x55, 0xAA
- * as its header, we have run out of ROM.
- */
- if (byte1 != 0x55 || byte2 != 0xAA)
- break;
- }
-
- /*
- * If we have a VPD offset, then mark the board
- * as having a valid VPD, and copy VPDSIZE (512) bytes of
- * that VPD to the buffer we have in our board structure.
- */
- if (vpd_offset) {
- brd->bd_flags |= BD_HAS_VPD;
- for (i = 0; i < VPDSIZE; i++)
- brd->vpd[i] = readb(brd->re_map_membase + vpd_offset + i);
- }
- }
-
- /*
- * We MUST poke the magic number at the PCI Rom Address location again.
- * This makes the card report the regular board memory back to us,
- * rather than the OTPROM memory.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
-}
-
-
-/*
- * Our board poller function.
- */
-void dgap_poll_tasklet(unsigned long data)
-{
- struct board_t *bd = (struct board_t *) data;
- ulong lock_flags;
- ulong lock_flags2;
- char *vaddr;
- u16 head, tail;
- u16 *chk_addr;
- u16 check = 0;
-
- if (!bd || (bd->magic != DGAP_BOARD_MAGIC)) {
- APR(("dgap_poll_tasklet() - NULL or bad bd.\n"));
- return;
- }
-
- if (bd->inhibit_poller)
- return;
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
- if (bd->state == BOARD_READY) {
-
- struct ev_t *eaddr = NULL;
-
- if (!bd->re_map_membase) {
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return;
- }
- if (!bd->re_map_port) {
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return;
- }
-
- if (!bd->nasync) {
- goto out;
- }
-
- /*
- * If this is a CX or EPCX, we need to see if the firmware
- * is requesting a concentrator image from us.
- */
- if ((bd->type == PCX) || (bd->type == PEPC)) {
- chk_addr = (u16 *) (vaddr + DOWNREQ);
- check = readw(chk_addr);
- /* Nonzero if FEP is requesting concentrator image. */
- if (check) {
- if (bd->conc_dl_status == NO_PENDING_CONCENTRATOR_REQUESTS)
- bd->conc_dl_status = NEED_CONCENTRATOR;
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
-
- }
- }
-
- eaddr = (struct ev_t *) (vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&(eaddr->ev_head));
- tail = readw(&(eaddr->ev_tail));
-
- /*
- * If there is an event pending. Go service it.
- */
- if (head != tail) {
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- dgap_event(bd);
- DGAP_LOCK(bd->bd_lock, lock_flags);
- }
-
-out:
- /*
- * If board is doing interrupts, ACK the interrupt.
- */
- if (bd && bd->intr_running) {
- readb(bd->re_map_port + 2);
- }
-
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return;
- }
-
- /* Our state machine to get the board up and running */
-
- /* Reset board */
- if (bd->state == NEED_RESET) {
-
- /* Get VPD info */
- dgap_get_vpd(bd);
-
- dgap_do_reset_board(bd);
- }
-
- /* Move to next state */
- if (bd->state == FINISHED_RESET) {
- bd->state = NEED_CONFIG;
- }
-
- if (bd->state == NEED_CONFIG) {
- /*
- * Match this board to a config the user created for us.
- */
- bd->bd_config = dgap_find_config(bd->type, bd->pci_bus, bd->pci_slot);
-
- /*
- * Because the 4 port Xr products share the same PCI ID
- * as the 8 port Xr products, if we receive a NULL config
- * back, and this is a PAPORT8 board, retry with a
- * PAPORT4 attempt as well.
- */
- if (bd->type == PAPORT8 && !bd->bd_config) {
- bd->bd_config = dgap_find_config(PAPORT4, bd->pci_bus, bd->pci_slot);
- }
-
- /*
- * Register the ttys (if any) into the kernel.
- */
- if (bd->bd_config) {
- bd->state = FINISHED_CONFIG;
- }
- else {
- bd->state = CONFIG_NOT_FOUND;
- }
- }
-
- /* Move to next state */
- if (bd->state == FINISHED_CONFIG) {
- bd->state = NEED_DEVICE_CREATION;
- }
-
- /* Move to next state */
- if (bd->state == NEED_DEVICE_CREATION) {
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
- }
-
- /* Move to next state */
- if (bd->state == FINISHED_DEVICE_CREATION) {
- bd->state = NEED_BIOS_LOAD;
- }
-
- /* Move to next state */
- if (bd->state == NEED_BIOS_LOAD) {
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
- }
-
- /* Wait for BIOS to test board... */
- if (bd->state == WAIT_BIOS_LOAD) {
- dgap_do_wait_for_bios(bd);
- }
-
- /* Move to next state */
- if (bd->state == FINISHED_BIOS_LOAD) {
- bd->state = NEED_FEP_LOAD;
-
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
- }
-
- /* Wait for FEP to load on board... */
- if (bd->state == WAIT_FEP_LOAD) {
- dgap_do_wait_for_fep(bd);
- }
-
-
- /* Move to next state */
- if (bd->state == FINISHED_FEP_LOAD) {
-
- /*
- * Do tty device initialization.
- */
- int rc = dgap_tty_init(bd);
-
- if (rc < 0) {
- dgap_tty_uninit(bd);
- APR(("Can't init tty devices (%d)\n", rc));
- bd->state = BOARD_FAILED;
- bd->dpastatus = BD_NOFEP;
- }
- else {
- bd->state = NEED_PROC_CREATION;
-
- /*
- * Signal downloader, its got some work to do.
- */
- DGAP_LOCK(dgap_dl_lock, lock_flags2);
- if (dgap_dl_action != 1) {
- dgap_dl_action = 1;
- wake_up_interruptible(&dgap_dl_wait);
- }
- DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
- }
- }
-
- /* Move to next state */
- if (bd->state == FINISHED_PROC_CREATION) {
-
- bd->state = BOARD_READY;
- bd->dpastatus = BD_RUNNING;
-
- /*
- * If user requested the board to run in interrupt mode,
- * go and set it up on the board.
- */
- if (bd->intr_used) {
- writew(1, (bd->re_map_membase + ENABLE_INTR));
- /*
- * Tell the board to poll the UARTS as fast as possible.
- */
- writew(FEPPOLL_MIN, (bd->re_map_membase + FEPPOLL));
- bd->intr_running = 1;
- }
-
- /* Wake up anyone waiting for board state to change to ready */
- wake_up_interruptible(&bd->state_wait);
- }
-
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-}
-
-
-/*=======================================================================
- *
- * dgap_cmdb - Sends a 2 byte command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * byte1 - Integer containing first byte to be sent.
- * byte2 - Integer containing second byte to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint ncmds)
-{
- char *vaddr = NULL;
- struct cm_t *cm_addr = NULL;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED) {
- DPR_CORE(("%s:%d board is in failed state.\n", __FILE__, __LINE__));
- return;
- }
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- DPR_CORE(("%s:%d pointers out of range, failing board!\n", __FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
- writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
- writeb(byte1, (char *) (vaddr + head + CMDSTART + 2));
- writeb(byte2, (char *) (vaddr + head + CMDSTART + 3));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- DPR_CORE(("%s:%d failing board.\n",__FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-
-/*=======================================================================
- *
- * dgap_cmdw - Sends a 1 word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
-{
- char *vaddr = NULL;
- struct cm_t *cm_addr = NULL;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED) {
- DPR_CORE(("%s:%d board is failed!\n", __FILE__, __LINE__));
- return;
- }
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- DPR_CORE(("%s:%d Pointers out of range. Failing board.\n",__FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
- writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
- writew((u16) word, (char *) (vaddr + head + CMDSTART + 2));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- DPR_CORE(("%s:%d Failing board.\n",__FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-
-
-/*=======================================================================
- *
- * dgap_cmdw_ext - Sends a extended word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
-{
- char *vaddr = NULL;
- struct cm_t *cm_addr = NULL;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED) {
- DPR_CORE(("%s:%d board is failed!\n", __FILE__, __LINE__));
- return;
- }
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- DPR_CORE(("%s:%d Pointers out of range. Failing board.\n",__FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
-
- /* Write an FF to tell the FEP that we want an extended command */
- writeb((uchar) 0xff, (char *) (vaddr + head + CMDSTART + 0));
-
- writeb((uchar) ch->ch_portnum, (uchar *) (vaddr + head + CMDSTART + 1));
- writew((u16) cmd, (char *) (vaddr + head + CMDSTART + 2));
-
- /*
- * If the second part of the command won't fit,
- * put it at the beginning of the circular buffer.
- */
- if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03))) {
- writew((u16) word, (char *) (vaddr + CMDSTART));
- } else {
- writew((u16) word, (char *) (vaddr + head + CMDSTART + 4));
- }
-
- head = (head + 8) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- DPR_CORE(("%s:%d Failing board.\n",__FILE__, __LINE__));
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-
-/*=======================================================================
- *
- * dgap_wmove - Write data to FEP buffer.
- *
- * ch - Pointer to channel structure.
- * buf - Poiter to characters to be moved.
- * cnt - Number of characters to move.
- *
- *=======================================================================*/
-void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
-{
- int n;
- char *taddr;
- struct bs_t *bs;
- u16 head;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check parameters.
- */
- bs = ch->ch_bs;
- head = readw(&(bs->tx_head));
-
- /*
- * If pointers are out of range, just return.
- */
- if ((cnt > ch->ch_tsize) || (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize) {
- DPR_CORE(("%s:%d pointer out of range", __FILE__, __LINE__));
- return;
- }
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- n = ch->ch_tstart + ch->ch_tsize - head;
-
- if (cnt >= n) {
- cnt -= n;
- taddr = ch->ch_taddr + head;
- memcpy_toio(taddr, buf, n);
- head = ch->ch_tstart;
- buf += n;
- }
-
- /*
- * Move rest of data.
- */
- taddr = ch->ch_taddr + head;
- n = cnt;
- memcpy_toio(taddr, buf, n);
- head += cnt;
-
- writew(head, &(bs->tx_head));
-}
-
-/*
- * Retrives the current custom baud rate from FEP memory,
- * and returns it back to the user.
- * Returns 0 on error.
- */
-uint dgap_get_custom_baud(struct channel_t *ch)
-{
- uchar *vaddr;
- ulong offset = 0;
- uint value = 0;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
- return 0;
- }
-
- if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC) {
- return 0;
- }
-
- if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
- return 0;
-
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return 0;
-
- /*
- * Go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) +
- (ch->ch_portnum * 0x28) + LINE_SPEED));
-
- value = readw(vaddr + offset);
- return value;
-}
-
-
-/*
- * Calls the firmware to reset this channel.
- */
-void dgap_firmware_reset_port(struct channel_t *ch)
-{
- dgap_cmdb(ch, CHRESET, 0, 0, 0);
-
- /*
- * Now that the channel is reset, we need to make sure
- * all the current settings get reapplied to the port
- * in the firmware.
- *
- * So we will set the driver's cache of firmware
- * settings all to 0, and then call param.
- */
- ch->ch_fepiflag = 0;
- ch->ch_fepcflag = 0;
- ch->ch_fepoflag = 0;
- ch->ch_fepstartc = 0;
- ch->ch_fepstopc = 0;
- ch->ch_fepastartc = 0;
- ch->ch_fepastopc = 0;
- ch->ch_mostat = 0;
- ch->ch_hflow = 0;
-}
-
-
-/*=======================================================================
- *
- * dgap_param - Set Digi parameters.
- *
- * struct tty_struct * - TTY for port.
- *
- *=======================================================================*/
-int dgap_param(struct tty_struct *tty)
-{
- struct ktermios *ts;
- struct board_t *bd;
- struct channel_t *ch;
- struct bs_t *bs;
- struct un_t *un;
- u16 head;
- u16 cflag;
- u16 iflag;
- uchar mval;
- uchar hflow;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -ENXIO;
-
- un = (struct un_t *) tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -ENXIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -ENXIO;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -ENXIO;
-
- bs = ch->ch_bs;
- if (!bs)
- return -ENXIO;
-
- DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
- ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
-
- ts = &tty->termios;
-
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
-
- /* flush rx */
- head = readw(&(ch->ch_bs->rx_head));
- writew(head, &(ch->ch_bs->rx_tail));
-
- /* flush tx */
- head = readw(&(ch->ch_bs->tx_head));
- writew(head, &(ch->ch_bs->tx_tail));
-
- ch->ch_flags |= (CH_BAUD0);
-
- /* Drop RTS and DTR */
- ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
- mval = D_DTR(ch) | D_RTS(ch);
- ch->ch_baud_info = 0;
-
- } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
- /*
- * Tell the fep to do the command
- */
-
- DPR_PARAM(("param: Want %d speed\n", ch->ch_custom_speed));
-
- dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
-
- /*
- * Now go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- ch->ch_baud_info = ch->ch_custom_speed = dgap_get_custom_baud(ch);
-
- DPR_PARAM(("param: Got %d speed\n", ch->ch_custom_speed));
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
-
- } else {
- /*
- * Set baud rate, character size, and parity.
- */
-
-
- int iindex = 0;
- int jindex = 0;
- int baud = 0;
-
- ulong bauds[4][16] = {
- { /* slowbaud */
- 0, 50, 75, 110,
- 134, 150, 200, 300,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* slowbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud */
- 0, 57600, 76800, 115200,
- 14400, 57600, 230400, 76800,
- 115200, 230400, 28800, 460800,
- 921600, 9600, 19200, 38400 },
- { /* fastbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 }
- };
-
- /* Only use the TXPrint baud rate if the terminal unit is NOT open */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGAP_PRINT))
- baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
- else
- baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
-
- if (ch->ch_c_cflag & CBAUDEX)
- iindex = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FAST)
- iindex += 2;
-
- jindex = baud;
-
- if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) {
- baud = bauds[iindex][jindex];
- } else {
- DPR_IOCTL(("baud indices were out of range (%d)(%d)",
- iindex, jindex));
- baud = 0;
- }
-
- if (baud == 0)
- baud = 9600;
-
- ch->ch_baud_info = baud;
-
-
- /*
- * CBAUD has bit position 0x1000 set these days to indicate Linux
- * baud rate remap.
- * We use a different bit assignment for high speed. Clear this
- * bit out while grabbing the parts of "cflag" we want.
- */
- cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE);
-
- /*
- * HUPCL bit is used by FEP to indicate fast baud
- * table is to be used.
- */
- if ((ch->ch_digi.digi_flags & DIGI_FAST) || (ch->ch_c_cflag & CBAUDEX))
- cflag |= HUPCL;
-
-
- if ((ch->ch_c_cflag & CBAUDEX) && !(ch->ch_digi.digi_flags & DIGI_FAST)) {
- /*
- * The below code is trying to guarantee that only baud rates
- * 115200, 230400, 460800, 921600 are remapped. We use exclusive or
- * because the various baud rates share common bit positions
- * and therefore can't be tested for easily.
- */
- tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
- int baudpart = 0;
-
- /* Map high speed requests to index into FEP's baud table */
- switch (tcflag) {
- case B57600 :
- baudpart = 1;
- break;
-#ifdef B76800
- case B76800 :
- baudpart = 2;
- break;
-#endif
- case B115200 :
- baudpart = 3;
- break;
- case B230400 :
- baudpart = 9;
- break;
- case B460800 :
- baudpart = 11;
- break;
-#ifdef B921600
- case B921600 :
- baudpart = 12;
- break;
-#endif
- default:
- baudpart = 0;
- }
-
- if (baudpart)
- cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
- }
-
- cflag &= 0xffff;
-
- if (cflag != ch->ch_fepcflag) {
- ch->ch_fepcflag = (u16) (cflag & 0xffff);
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
- }
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
- }
-
- /*
- * Get input flags.
- */
- iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | INPCK | ISTRIP | IXON | IXANY | IXOFF);
-
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE)) {
- iflag &= ~(IXON | IXOFF);
- ch->ch_c_iflag &= ~(IXON | IXOFF);
- }
-
- /*
- * Only the IBM Xr card can switch between
- * 232 and 422 modes on the fly
- */
- if (bd->device == PCI_DEVICE_XR_IBM_DID) {
- if (ch->ch_digi.digi_flags & DIGI_422)
- dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
- else
- dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
- }
-
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
- iflag |= IALTPIN ;
-
- if (iflag != ch->ch_fepiflag) {
- ch->ch_fepiflag = iflag;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
- }
-
- /*
- * Select hardware handshaking.
- */
- hflow = 0;
-
- if (ch->ch_c_cflag & CRTSCTS) {
- hflow |= (D_RTS(ch) | D_CTS(ch));
- }
- if (ch->ch_digi.digi_flags & RTSPACE)
- hflow |= D_RTS(ch);
- if (ch->ch_digi.digi_flags & DTRPACE)
- hflow |= D_DTR(ch);
- if (ch->ch_digi.digi_flags & CTSPACE)
- hflow |= D_CTS(ch);
- if (ch->ch_digi.digi_flags & DSRPACE)
- hflow |= D_DSR(ch);
- if (ch->ch_digi.digi_flags & DCDPACE)
- hflow |= D_CD(ch);
-
- if (hflow != ch->ch_hflow) {
- ch->ch_hflow = hflow;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SHFLOW, (uchar) hflow, 0xff, 0);
- }
-
-
- /*
- * Set RTS and/or DTR Toggle if needed, but only if product is FEP5+ based.
- */
- if (bd->bd_flags & BD_FEP5PLUS) {
- u16 hflow2 = 0;
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
- hflow2 |= (D_RTS(ch));
- }
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
- hflow2 |= (D_DTR(ch));
- }
-
- dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
- }
-
- /*
- * Set modem control lines.
- */
-
- mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
-
- DPR_PARAM(("dgap_param: mval: %x ch_mforce: %x ch_mval: %x ch_mostat: %x\n",
- mval, ch->ch_mforce, ch->ch_mval, ch->ch_mostat));
-
- if (ch->ch_mostat ^ mval) {
- ch->ch_mostat = mval;
-
- /* Okay to have channel and board locks held calling this */
- DPR_PARAM(("dgap_param: Sending SMODEM mval: %x\n", mval));
- dgap_cmdb(ch, SMODEM, (uchar) mval, D_RTS(ch)|D_DTR(ch), 0);
- }
-
- /*
- * Read modem signals, and then call carrier function.
- */
- ch->ch_mistat = readb(&(bs->m_stat));
- dgap_carrier(ch);
-
- /*
- * Set the start and stop characters.
- */
- if (ch->ch_startc != ch->ch_fepstartc || ch->ch_stopc != ch->ch_fepstopc) {
- ch->ch_fepstartc = ch->ch_startc;
- ch->ch_fepstopc = ch->ch_stopc;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
- }
-
- /*
- * Set the Auxiliary start and stop characters.
- */
- if (ch->ch_astartc != ch->ch_fepastartc || ch->ch_astopc != ch->ch_fepastopc) {
- ch->ch_fepastartc = ch->ch_astartc;
- ch->ch_fepastopc = ch->ch_astopc;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
- }
-
- DPR_PARAM(("param finish\n"));
-
- return 0;
-}
-
-
-/*
- * dgap_parity_scan()
- *
- * Convert the FEP5 way of reporting parity errors and breaks into
- * the Linux line discipline way.
- */
-void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *fbuf, int *len)
-{
- int l = *len;
- int count = 0;
- unsigned char *in, *cout, *fout;
- unsigned char c;
-
- in = cbuf;
- cout = cbuf;
- fout = fbuf;
-
- DPR_PSCAN(("dgap_parity_scan start\n"));
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- while (l--) {
- c = *in++;
- switch (ch->pscan_state) {
- default:
- /* reset to sanity and fall through */
- ch->pscan_state = 0;
-
- case 0:
- /* No FF seen yet */
- if (c == (unsigned char) '\377') {
- /* delete this character from stream */
- ch->pscan_state = 1;
- } else {
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- }
- break;
-
- case 1:
- /* first FF seen */
- if (c == (unsigned char) '\377') {
- /* doubled ff, transform to single ff */
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- ch->pscan_state = 0;
- } else {
- /* save value examination in next state */
- ch->pscan_savechar = c;
- ch->pscan_state = 2;
- }
- break;
-
- case 2:
- /* third character of ff sequence */
-
- *cout++ = c;
-
- if (ch->pscan_savechar == 0x0) {
-
- if (c == 0x0) {
- DPR_PSCAN(("dgap_parity_scan in 3rd char of ff seq. c: %x setting break.\n", c));
- ch->ch_err_break++;
- *fout++ = TTY_BREAK;
- }
- else {
- DPR_PSCAN(("dgap_parity_scan in 3rd char of ff seq. c: %x setting parity.\n", c));
- ch->ch_err_parity++;
- *fout++ = TTY_PARITY;
- }
- }
- else {
- DPR_PSCAN(("%s:%d Logic Error.\n", __FILE__, __LINE__));
- }
-
- count += 1;
- ch->pscan_state = 0;
- }
- }
- *len = count;
- DPR_PSCAN(("dgap_parity_scan finish\n"));
-}
-
-
-
-
-/*=======================================================================
- *
- * dgap_event - FEP to host event processing routine.
- *
- * bd - Board of current event.
- *
- *=======================================================================*/
-static int dgap_event(struct board_t *bd)
-{
- struct channel_t *ch;
- ulong lock_flags;
- ulong lock_flags2;
- struct bs_t *bs;
- uchar *event;
- uchar *vaddr = NULL;
- struct ev_t *eaddr = NULL;
- uint head;
- uint tail;
- int port;
- int reason;
- int modem;
- int b1;
-
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -ENXIO;
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- if (!vaddr) {
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return -ENXIO;
- }
-
- eaddr = (struct ev_t *) (vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&(eaddr->ev_head));
- tail = readw(&(eaddr->ev_tail));
-
- /*
- * Forget it if pointers out of range.
- */
-
- if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
- (head | tail) & 03) {
- DPR_EVENT(("should be calling xxfail %d\n", __LINE__));
- /* Let go of board lock */
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return -ENXIO;
- }
-
- /*
- * Loop to process all the events in the buffer.
- */
- while (tail != head) {
-
- /*
- * Get interrupt information.
- */
-
- event = bd->re_map_membase + tail + EVSTART;
-
- port = event[0];
- reason = event[1];
- modem = event[2];
- b1 = event[3];
-
- DPR_EVENT(("event: jiffies: %ld port: %d reason: %x modem: %x\n",
- jiffies, port, reason, modem));
-
- /*
- * Make sure the interrupt is valid.
- */
- if (port >= bd->nasync)
- goto next;
-
- if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA))) {
- goto next;
- }
-
- ch = bd->channels[port];
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
- goto next;
- }
-
- /*
- * If we have made it here, the event was valid.
- * Lock down the channel.
- */
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- bs = ch->ch_bs;
-
- if (!bs) {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- goto next;
- }
-
- /*
- * Process received data.
- */
- if (reason & IFDATA) {
-
- /*
- * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
- * input could send some data to ld, which in turn
- * could do a callback to one of our other functions.
- */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- dgap_input(ch);
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- if (ch->ch_flags & CH_RACTIVE)
- ch->ch_flags |= CH_RENABLE;
- else
- writeb(1, &(bs->idata));
-
- if (ch->ch_flags & CH_RWAIT) {
- ch->ch_flags &= ~CH_RWAIT;
-
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- }
-
- /*
- * Process Modem change signals.
- */
- if (reason & IFMODEM) {
- ch->ch_mistat = modem;
- dgap_carrier(ch);
- }
-
- /*
- * Process break.
- */
- if (reason & IFBREAK) {
-
- DPR_EVENT(("got IFBREAK\n"));
-
- if (ch->ch_tun.un_tty) {
- /* A break has been indicated */
- ch->ch_err_break++;
- tty_buffer_request_room(ch->ch_tun.un_tty->port, 1);
- tty_insert_flip_char(ch->ch_tun.un_tty->port, 0, TTY_BREAK);
- tty_flip_buffer_push(ch->ch_tun.un_tty->port);
- }
- }
-
- /*
- * Process Transmit low.
- */
- if (reason & IFTLW) {
-
- DPR_EVENT(("event: got low event\n"));
-
- if (ch->ch_tun.un_flags & UN_LOW) {
- ch->ch_tun.un_flags &= ~UN_LOW;
-
- if (ch->ch_tun.un_flags & UN_ISOPEN) {
- if ((ch->ch_tun.un_tty->flags &
- (1 << TTY_DO_WRITE_WAKEUP)) &&
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
-#else
- ch->ch_tun.un_tty->ldisc.ops->write_wakeup)
-#endif
- {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
-#else
- (ch->ch_tun.un_tty->ldisc.ops->write_wakeup)(ch->ch_tun.un_tty);
-#endif
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
- wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
-
- DPR_EVENT(("event: Got low event. jiffies: %lu\n", jiffies));
- }
- }
-
- if (ch->ch_pun.un_flags & UN_LOW) {
- ch->ch_pun.un_flags &= ~UN_LOW;
- if (ch->ch_pun.un_flags & UN_ISOPEN) {
- if ((ch->ch_pun.un_tty->flags &
- (1 << TTY_DO_WRITE_WAKEUP)) &&
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
-#else
- ch->ch_pun.un_tty->ldisc.ops->write_wakeup)
-#endif
- {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
-#else
- (ch->ch_pun.un_tty->ldisc.ops->write_wakeup)(ch->ch_pun.un_tty);
-#endif
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
- wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
- }
-
- if (ch->ch_flags & CH_WLOW) {
- ch->ch_flags &= ~CH_WLOW;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- /*
- * Process Transmit empty.
- */
- if (reason & IFTEM) {
- DPR_EVENT(("event: got empty event\n"));
-
- if (ch->ch_tun.un_flags & UN_EMPTY) {
- ch->ch_tun.un_flags &= ~UN_EMPTY;
- if (ch->ch_tun.un_flags & UN_ISOPEN) {
- if ((ch->ch_tun.un_tty->flags &
- (1 << TTY_DO_WRITE_WAKEUP)) &&
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
-#else
- ch->ch_tun.un_tty->ldisc.ops->write_wakeup)
-#endif
- {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
-#else
- (ch->ch_tun.un_tty->ldisc.ops->write_wakeup)(ch->ch_tun.un_tty);
-#endif
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
- wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- }
-
- if (ch->ch_pun.un_flags & UN_EMPTY) {
- ch->ch_pun.un_flags &= ~UN_EMPTY;
- if (ch->ch_pun.un_flags & UN_ISOPEN) {
- if ((ch->ch_pun.un_tty->flags &
- (1 << TTY_DO_WRITE_WAKEUP)) &&
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
-#else
- ch->ch_pun.un_tty->ldisc.ops->write_wakeup)
-#endif
- {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
- (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
-#else
- (ch->ch_pun.un_tty->ldisc.ops->write_wakeup)(ch->ch_pun.un_tty);
-#endif
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
- wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
- }
-
-
- if (ch->ch_flags & CH_WEMPTY) {
- ch->ch_flags &= ~CH_WEMPTY;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
-
-next:
- tail = (tail + 4) & (EVMAX - EVSTART - 4);
- }
-
- writew(tail, &(eaddr->ev_tail));
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- return 0;
-}
diff --git a/drivers/staging/dgap/dgap_fep5.h b/drivers/staging/dgap/dgap_fep5.h
deleted file mode 100644
index c9abc406a1e0..000000000000
--- a/drivers/staging/dgap/dgap_fep5.h
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- ************************************************************************
- *** FEP Version 5 dependent definitions
- ************************************************************************/
-
-#ifndef __DGAP_FEP5_H
-#define __DGAP_FEP5_H
-
-/************************************************************************
- * FEP memory offsets
- ************************************************************************/
-#define START 0x0004L /* Execution start address */
-
-#define CMDBUF 0x0d10L /* Command (cm_t) structure offset */
-#define CMDSTART 0x0400L /* Start of command buffer */
-#define CMDMAX 0x0800L /* End of command buffer */
-
-#define EVBUF 0x0d18L /* Event (ev_t) structure */
-#define EVSTART 0x0800L /* Start of event buffer */
-#define EVMAX 0x0c00L /* End of event buffer */
-#define FEP5_PLUS 0x0E40 /* ASCII '5' and ASCII 'A' is here */
-#define ECS_SEG 0x0E44 /* Segment of the extended channel structure */
-#define LINE_SPEED 0x10 /* Offset into ECS_SEG for line speed */
- /* if the fep has extended capabilities */
-
-/* BIOS MAGIC SPOTS */
-#define ERROR 0x0C14L /* BIOS error code */
-#define SEQUENCE 0x0C12L /* BIOS sequence indicator */
-#define POSTAREA 0x0C00L /* POST complete message area */
-
-/* FEP MAGIC SPOTS */
-#define FEPSTAT POSTAREA /* OS here when FEP comes up */
-#define NCHAN 0x0C02L /* number of ports FEP sees */
-#define PANIC 0x0C10L /* PANIC area for FEP */
-#define KMEMEM 0x0C30L /* Memory for KME use */
-#define CONFIG 0x0CD0L /* Concentrator configuration info */
-#define CONFIGSIZE 0x0030 /* configuration info size */
-#define DOWNREQ 0x0D00 /* Download request buffer pointer */
-
-#define CHANBUF 0x1000L /* Async channel (bs_t) structs */
-#define FEPOSSIZE 0x1FFF /* 8K FEPOS */
-
-#define XEMPORTS 0xC02 /*
- * Offset in board memory where FEP5 stores
- * how many ports it has detected.
- * NOTE: FEP5 reports 64 ports when the user
- * has the cable in EBI OUT instead of EBI IN.
- */
-
-#define FEPCLR 0x00
-#define FEPMEM 0x02
-#define FEPRST 0x04
-#define FEPINT 0x08
-#define FEPMASK 0x0e
-#define FEPWIN 0x80
-
-#define LOWMEM 0x0100
-#define HIGHMEM 0x7f00
-
-#define FEPTIMEOUT 200000
-
-#define ENABLE_INTR 0x0e04 /* Enable interrupts flag */
-#define FEPPOLL_MIN 1 /* minimum of 1 millisecond */
-#define FEPPOLL_MAX 20 /* maximum of 20 milliseconds */
-#define FEPPOLL 0x0c26 /* Fep event poll interval */
-
-#define IALTPIN 0x0080 /* Input flag to swap DSR <-> DCD */
-
-/************************************************************************
- * Command structure definition.
- ************************************************************************/
-struct cm_t {
- volatile unsigned short cm_head; /* Command buffer head offset */
- volatile unsigned short cm_tail; /* Command buffer tail offset */
- volatile unsigned short cm_start; /* start offset of buffer */
- volatile unsigned short cm_max; /* last offset of buffer */
-};
-
-/************************************************************************
- * Event structure definition.
- ************************************************************************/
-struct ev_t {
- volatile unsigned short ev_head; /* Command buffer head offset */
- volatile unsigned short ev_tail; /* Command buffer tail offset */
- volatile unsigned short ev_start; /* start offset of buffer */
- volatile unsigned short ev_max; /* last offset of buffer */
-};
-
-/************************************************************************
- * Download buffer structure.
- ************************************************************************/
-struct downld_t {
- uchar dl_type; /* Header */
- uchar dl_seq; /* Download sequence */
- ushort dl_srev; /* Software revision number */
- ushort dl_lrev; /* Low revision number */
- ushort dl_hrev; /* High revision number */
- ushort dl_seg; /* Start segment address */
- ushort dl_size; /* Number of bytes to download */
- uchar dl_data[1024]; /* Download data */
-};
-
-/************************************************************************
- * Per channel buffer structure
- ************************************************************************
- * Base Structure Entries Usage Meanings to Host *
- * *
- * W = read write R = read only *
- * C = changed by commands only *
- * U = unknown (may be changed w/o notice) *
- ************************************************************************/
-struct bs_t {
- volatile unsigned short tp_jmp; /* Transmit poll jump */
- volatile unsigned short tc_jmp; /* Cooked procedure jump */
- volatile unsigned short ri_jmp; /* Not currently used */
- volatile unsigned short rp_jmp; /* Receive poll jump */
-
- volatile unsigned short tx_seg; /* W Tx segment */
- volatile unsigned short tx_head; /* W Tx buffer head offset */
- volatile unsigned short tx_tail; /* R Tx buffer tail offset */
- volatile unsigned short tx_max; /* W Tx buffer size - 1 */
-
- volatile unsigned short rx_seg; /* W Rx segment */
- volatile unsigned short rx_head; /* W Rx buffer head offset */
- volatile unsigned short rx_tail; /* R Rx buffer tail offset */
- volatile unsigned short rx_max; /* W Rx buffer size - 1 */
-
- volatile unsigned short tx_lw; /* W Tx buffer low water mark */
- volatile unsigned short rx_lw; /* W Rx buffer low water mark */
- volatile unsigned short rx_hw; /* W Rx buffer high water mark */
- volatile unsigned short incr; /* W Increment to next channel */
-
- volatile unsigned short fepdev; /* U SCC device base address */
- volatile unsigned short edelay; /* W Exception delay */
- volatile unsigned short blen; /* W Break length */
- volatile unsigned short btime; /* U Break complete time */
-
- volatile unsigned short iflag; /* C UNIX input flags */
- volatile unsigned short oflag; /* C UNIX output flags */
- volatile unsigned short cflag; /* C UNIX control flags */
- volatile unsigned short wfill[13]; /* U Reserved for expansion */
-
- volatile unsigned char num; /* U Channel number */
- volatile unsigned char ract; /* U Receiver active counter */
- volatile unsigned char bstat; /* U Break status bits */
- volatile unsigned char tbusy; /* W Transmit busy */
- volatile unsigned char iempty; /* W Transmit empty event enable */
- volatile unsigned char ilow; /* W Transmit low-water event enable */
- volatile unsigned char idata; /* W Receive data interrupt enable */
- volatile unsigned char eflag; /* U Host event flags */
-
- volatile unsigned char tflag; /* U Transmit flags */
- volatile unsigned char rflag; /* U Receive flags */
- volatile unsigned char xmask; /* U Transmit ready flags */
- volatile unsigned char xval; /* U Transmit ready value */
- volatile unsigned char m_stat; /* RC Modem status bits */
- volatile unsigned char m_change; /* U Modem bits which changed */
- volatile unsigned char m_int; /* W Modem interrupt enable bits */
- volatile unsigned char m_last; /* U Last modem status */
-
- volatile unsigned char mtran; /* C Unreported modem trans */
- volatile unsigned char orun; /* C Buffer overrun occurred */
- volatile unsigned char astartc; /* W Auxiliary Xon char */
- volatile unsigned char astopc; /* W Auxiliary Xoff char */
- volatile unsigned char startc; /* W Xon character */
- volatile unsigned char stopc; /* W Xoff character */
- volatile unsigned char vnextc; /* W Vnext character */
- volatile unsigned char hflow; /* C Software flow control */
-
- volatile unsigned char fillc; /* U Delay Fill character */
- volatile unsigned char ochar; /* U Saved output character */
- volatile unsigned char omask; /* U Output character mask */
-
- volatile unsigned char bfill[13]; /* U Reserved for expansion */
-
- volatile unsigned char scc[16]; /* U SCC registers */
-};
-
-
-/************************************************************************
- * FEP supported functions
- ************************************************************************/
-#define SRLOW 0xe0 /* Set receive low water */
-#define SRHIGH 0xe1 /* Set receive high water */
-#define FLUSHTX 0xe2 /* Flush transmit buffer */
-#define PAUSETX 0xe3 /* Pause data transmission */
-#define RESUMETX 0xe4 /* Resume data transmission */
-#define SMINT 0xe5 /* Set Modem Interrupt */
-#define SAFLOWC 0xe6 /* Set Aux. flow control chars */
-#define SBREAK 0xe8 /* Send break */
-#define SMODEM 0xe9 /* Set 8530 modem control lines */
-#define SIFLAG 0xea /* Set UNIX iflags */
-#define SFLOWC 0xeb /* Set flow control characters */
-#define STLOW 0xec /* Set transmit low water mark */
-#define RPAUSE 0xee /* Pause receive */
-#define RRESUME 0xef /* Resume receive */
-#define CHRESET 0xf0 /* Reset Channel */
-#define BUFSETALL 0xf2 /* Set Tx & Rx buffer size avail*/
-#define SOFLAG 0xf3 /* Set UNIX oflags */
-#define SHFLOW 0xf4 /* Set hardware handshake */
-#define SCFLAG 0xf5 /* Set UNIX cflags */
-#define SVNEXT 0xf6 /* Set VNEXT character */
-#define SPINTFC 0xfc /* Reserved */
-#define SCOMMODE 0xfd /* Set RS232/422 mode */
-
-
-/************************************************************************
- * Modes for SCOMMODE
- ************************************************************************/
-#define MODE_232 0x00
-#define MODE_422 0x01
-
-
-/************************************************************************
- * Event flags.
- ************************************************************************/
-#define IFBREAK 0x01 /* Break received */
-#define IFTLW 0x02 /* Transmit low water */
-#define IFTEM 0x04 /* Transmitter empty */
-#define IFDATA 0x08 /* Receive data present */
-#define IFMODEM 0x20 /* Modem status change */
-
-/************************************************************************
- * Modem flags
- ************************************************************************/
-# define DM_RTS 0x02 /* Request to send */
-# define DM_CD 0x80 /* Carrier detect */
-# define DM_DSR 0x20 /* Data set ready */
-# define DM_CTS 0x10 /* Clear to send */
-# define DM_RI 0x40 /* Ring indicator */
-# define DM_DTR 0x01 /* Data terminal ready */
-
-
-#endif
diff --git a/drivers/staging/dgap/dgap_kcompat.h b/drivers/staging/dgap/dgap_kcompat.h
deleted file mode 100644
index 0dc2404922ff..000000000000
--- a/drivers/staging/dgap/dgap_kcompat.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 2004 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- *************************************************************************
- *
- * This file is intended to contain all the kernel "differences" between the
- * various kernels that we support.
- *
- *************************************************************************/
-
-#ifndef __DGAP_KCOMPAT_H
-#define __DGAP_KCOMPAT_H
-
-#if !defined(TTY_FLIPBUF_SIZE)
-# define TTY_FLIPBUF_SIZE 512
-#endif
-
-
-/* Sparse stuff */
-# ifndef __user
-# define __user
-# define __kernel
-# define __safe
-# define __force
-# define __chk_user_ptr(x) (void)0
-# endif
-
-
-# define PARM_STR(VAR, INIT, PERM, DESC) \
- static char *VAR = INIT; \
- char *dgap_##VAR; \
- module_param(VAR, charp, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
-# define PARM_INT(VAR, INIT, PERM, DESC) \
- static int VAR = INIT; \
- int dgap_##VAR; \
- module_param(VAR, int, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
-# define PARM_ULONG(VAR, INIT, PERM, DESC) \
- static ulong VAR = INIT; \
- ulong dgap_##VAR; \
- module_param(VAR, long, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
-#endif /* ! __DGAP_KCOMPAT_H */
diff --git a/drivers/staging/dgap/dgap_parse.c b/drivers/staging/dgap/dgap_parse.c
deleted file mode 100644
index 36fd93d3f5f6..000000000000
--- a/drivers/staging/dgap/dgap_parse.c
+++ /dev/null
@@ -1,1374 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- *
- *
- *****************************************************************************
- *
- * dgap_parse.c - Parses the configuration information from the input file.
- *
- * $Id: dgap_parse.c,v 1.1 2009/10/23 14:01:57 markh Exp $
- *
- */
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/slab.h>
-
-#include "dgap_types.h"
-#include "dgap_fep5.h"
-#include "dgap_driver.h"
-#include "dgap_parse.h"
-#include "dgap_conf.h"
-
-
-/*
- * Function prototypes.
- */
-static int dgap_gettok(char **in, struct cnode *p);
-static char *dgap_getword(char **in);
-static char *dgap_savestring(char *s);
-static struct cnode *dgap_newnode(int t);
-static int dgap_checknode(struct cnode *p);
-static void dgap_err(char *s);
-
-/*
- * Our needed internal static variables...
- */
-static struct cnode dgap_head;
-#define MAXCWORD 200
-static char dgap_cword[MAXCWORD];
-
-struct toklist {
- int token;
- char *string;
-};
-
-static struct toklist dgap_tlist[] = {
- { BEGIN, "config_begin" },
- { END, "config_end" },
- { BOARD, "board" },
- { PCX, "Digi_AccelePort_C/X_PCI" }, /* C/X_PCI */
- { PEPC, "Digi_AccelePort_EPC/X_PCI" }, /* EPC/X_PCI */
- { PPCM, "Digi_AccelePort_Xem_PCI" }, /* PCI/Xem */
- { APORT2_920P, "Digi_AccelePort_2r_920_PCI" },
- { APORT4_920P, "Digi_AccelePort_4r_920_PCI" },
- { APORT8_920P, "Digi_AccelePort_8r_920_PCI" },
- { PAPORT4, "Digi_AccelePort_4r_PCI(EIA-232/RS-422)" },
- { PAPORT8, "Digi_AccelePort_8r_PCI(EIA-232/RS-422)" },
- { IO, "io" },
- { PCIINFO, "pciinfo" },
- { LINE, "line" },
- { CONC, "conc" },
- { CONC, "concentrator" },
- { CX, "cx" },
- { CX, "ccon" },
- { EPC, "epccon" },
- { EPC, "epc" },
- { MOD, "module" },
- { ID, "id" },
- { STARTO, "start" },
- { SPEED, "speed" },
- { CABLE, "cable" },
- { CONNECT, "connect" },
- { METHOD, "method" },
- { STATUS, "status" },
- { CUSTOM, "Custom" },
- { BASIC, "Basic" },
- { MEM, "mem" },
- { MEM, "memory" },
- { PORTS, "ports" },
- { MODEM, "modem" },
- { NPORTS, "nports" },
- { TTYN, "ttyname" },
- { CU, "cuname" },
- { PRINT, "prname" },
- { CMAJOR, "major" },
- { ALTPIN, "altpin" },
- { USEINTR, "useintr" },
- { TTSIZ, "ttysize" },
- { CHSIZ, "chsize" },
- { BSSIZ, "boardsize" },
- { UNTSIZ, "schedsize" },
- { F2SIZ, "f2200size" },
- { VPSIZ, "vpixsize" },
- { 0, NULL }
-};
-
-
-/*
- * Parse a configuration file read into memory as a string.
- */
-int dgap_parsefile(char **in, int Remove)
-{
- struct cnode *p, *brd, *line, *conc;
- int rc;
- char *s = NULL, *s2 = NULL;
- int linecnt = 0;
-
- p = &dgap_head;
- brd = line = conc = NULL;
-
- /* perhaps we are adding to an existing list? */
- while (p->next != NULL) {
- p = p->next;
- }
-
- /* file must start with a BEGIN */
- while ( (rc = dgap_gettok(in,p)) != BEGIN ) {
- if (rc == 0) {
- dgap_err("unexpected EOF");
- return(-1);
- }
- }
-
- for (; ; ) {
- rc = dgap_gettok(in,p);
- if (rc == 0) {
- dgap_err("unexpected EOF");
- return(-1);
- }
-
- switch (rc) {
- case 0:
- dgap_err("unexpected end of file");
- return(-1);
-
- case BEGIN: /* should only be 1 begin */
- dgap_err("unexpected config_begin\n");
- return(-1);
-
- case END:
- return(0);
-
- case BOARD: /* board info */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(BNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
-
- p->u.board.status = dgap_savestring("No");
- line = conc = NULL;
- brd = p;
- linecnt = -1;
- break;
-
- case APORT2_920P: /* AccelePort_4 */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_2r_920 string");
- return(-1);
- }
- p->u.board.type = APORT2_920P;
- p->u.board.v_type = 1;
- DPR_INIT(("Adding Digi_2r_920 PCI to config...\n"));
- break;
-
- case APORT4_920P: /* AccelePort_4 */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_4r_920 string");
- return(-1);
- }
- p->u.board.type = APORT4_920P;
- p->u.board.v_type = 1;
- DPR_INIT(("Adding Digi_4r_920 PCI to config...\n"));
- break;
-
- case APORT8_920P: /* AccelePort_8 */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_8r_920 string");
- return(-1);
- }
- p->u.board.type = APORT8_920P;
- p->u.board.v_type = 1;
- DPR_INIT(("Adding Digi_8r_920 PCI to config...\n"));
- break;
-
- case PAPORT4: /* AccelePort_4 PCI */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_4r(PCI) string");
- return(-1);
- }
- p->u.board.type = PAPORT4;
- p->u.board.v_type = 1;
- DPR_INIT(("Adding Digi_4r PCI to config...\n"));
- break;
-
- case PAPORT8: /* AccelePort_8 PCI */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_8r string");
- return(-1);
- }
- p->u.board.type = PAPORT8;
- p->u.board.v_type = 1;
- DPR_INIT(("Adding Digi_8r PCI to config...\n"));
- break;
-
- case PCX: /* PCI C/X */
- if (p->type != BNODE) {
- dgap_err("unexpected Digi_C/X_(PCI) string");
- return(-1);
- }
- p->u.board.type = PCX;
- p->u.board.v_type = 1;
- p->u.board.conc1 = 0;
- p->u.board.conc2 = 0;
- p->u.board.module1 = 0;
- p->u.board.module2 = 0;
- DPR_INIT(("Adding PCI C/X to config...\n"));
- break;
-
- case PEPC: /* PCI EPC/X */
- if (p->type != BNODE) {
- dgap_err("unexpected \"Digi_EPC/X_(PCI)\" string");
- return(-1);
- }
- p->u.board.type = PEPC;
- p->u.board.v_type = 1;
- p->u.board.conc1 = 0;
- p->u.board.conc2 = 0;
- p->u.board.module1 = 0;
- p->u.board.module2 = 0;
- DPR_INIT(("Adding PCI EPC/X to config...\n"));
- break;
-
- case PPCM: /* PCI/Xem */
- if (p->type != BNODE) {
- dgap_err("unexpected PCI/Xem string");
- return(-1);
- }
- p->u.board.type = PPCM;
- p->u.board.v_type = 1;
- p->u.board.conc1 = 0;
- p->u.board.conc2 = 0;
- DPR_INIT(("Adding PCI XEM to config...\n"));
- break;
-
- case IO: /* i/o port */
- if (p->type != BNODE) {
- dgap_err("IO port only vaild for boards");
- return(-1);
- }
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.portstr = dgap_savestring(s);
- p->u.board.port = (short)simple_strtol(s, &s2, 0);
- if ((short)strlen(s) > (short)(s2 - s)) {
- dgap_err("bad number for IO port");
- return(-1);
- }
- p->u.board.v_port = 1;
- DPR_INIT(("Adding IO (%s) to config...\n", s));
- break;
-
- case MEM: /* memory address */
- if (p->type != BNODE) {
- dgap_err("memory address only vaild for boards");
- return(-1);
- }
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.addrstr = dgap_savestring(s);
- p->u.board.addr = simple_strtoul(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for memory address");
- return(-1);
- }
- p->u.board.v_addr = 1;
- DPR_INIT(("Adding MEM (%s) to config...\n", s));
- break;
-
- case PCIINFO: /* pci information */
- if (p->type != BNODE) {
- dgap_err("memory address only vaild for boards");
- return(-1);
- }
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.pcibusstr = dgap_savestring(s);
- p->u.board.pcibus = simple_strtoul(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for pci bus");
- return(-1);
- }
- p->u.board.v_pcibus = 1;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.pcislotstr = dgap_savestring(s);
- p->u.board.pcislot = simple_strtoul(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for pci slot");
- return(-1);
- }
- p->u.board.v_pcislot = 1;
-
- DPR_INIT(("Adding PCIINFO (%s %s) to config...\n", p->u.board.pcibusstr,
- p->u.board.pcislotstr));
- break;
-
- case METHOD:
- if (p->type != BNODE) {
- dgap_err("install method only vaild for boards");
- return(-1);
- }
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.method = dgap_savestring(s);
- p->u.board.v_method = 1;
- DPR_INIT(("Adding METHOD (%s) to config...\n", s));
- break;
-
- case STATUS:
- if (p->type != BNODE) {
- dgap_err("config status only vaild for boards");
- return(-1);
- }
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.status = dgap_savestring(s);
- DPR_INIT(("Adding STATUS (%s) to config...\n", s));
- break;
-
- case NPORTS: /* number of ports */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.nport = (char)simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for number of ports");
- return(-1);
- }
- p->u.board.v_nport = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.conc.nport = (char)simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for number of ports");
- return(-1);
- }
- p->u.conc.v_nport = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.module.nport = (char)simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for number of ports");
- return(-1);
- }
- p->u.module.v_nport = 1;
- } else {
- dgap_err("nports only valid for concentrators or modules");
- return(-1);
- }
- DPR_INIT(("Adding NPORTS (%s) to config...\n", s));
- break;
-
- case ID: /* letter ID used in tty name */
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
-
- p->u.board.status = dgap_savestring(s);
-
- if (p->type == CNODE) {
- p->u.conc.id = dgap_savestring(s);
- p->u.conc.v_id = 1;
- } else if (p->type == MNODE) {
- p->u.module.id = dgap_savestring(s);
- p->u.module.v_id = 1;
- } else {
- dgap_err("id only valid for concentrators or modules");
- return(-1);
- }
- DPR_INIT(("Adding ID (%s) to config...\n", s));
- break;
-
- case STARTO: /* start offset of ID */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.board.start = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for start of tty count");
- return(-1);
- }
- p->u.board.v_start = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.conc.start = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for start of tty count");
- return(-1);
- }
- p->u.conc.v_start = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.module.start = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for start of tty count");
- return(-1);
- }
- p->u.module.v_start = 1;
- } else {
- dgap_err("start only valid for concentrators or modules");
- return(-1);
- }
- DPR_INIT(("Adding START (%s) to config...\n", s));
- break;
-
- case TTYN: /* tty name prefix */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(TNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- if ( (s = dgap_getword(in)) == NULL ) {
- dgap_err("unexpeced end of file");
- return(-1);
- }
- if ( (p->u.ttyname = dgap_savestring(s)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- DPR_INIT(("Adding TTY (%s) to config...\n", s));
- break;
-
- case CU: /* cu name prefix */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(CUNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- if ( (s = dgap_getword(in)) == NULL ) {
- dgap_err("unexpeced end of file");
- return(-1);
- }
- if ( (p->u.cuname = dgap_savestring(s)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- DPR_INIT(("Adding CU (%s) to config...\n", s));
- break;
-
- case LINE: /* line information */
- if (dgap_checknode(p))
- return(-1);
- if (brd == NULL) {
- dgap_err("must specify board before line info");
- return(-1);
- }
- switch (brd->u.board.type) {
- case PPCM:
- dgap_err("line not vaild for PC/em");
- return(-1);
- }
- if ( (p->next = dgap_newnode(LNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- conc = NULL;
- line = p;
- linecnt++;
- DPR_INIT(("Adding LINE to config...\n"));
- break;
-
- case CONC: /* concentrator information */
- if (dgap_checknode(p))
- return(-1);
- if (line == NULL) {
- dgap_err("must specify line info before concentrator");
- return(-1);
- }
- if ( (p->next = dgap_newnode(CNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- conc = p;
- if (linecnt)
- brd->u.board.conc2++;
- else
- brd->u.board.conc1++;
-
- DPR_INIT(("Adding CONC to config...\n"));
- break;
-
- case CX: /* c/x type concentrator */
- if (p->type != CNODE) {
- dgap_err("cx only valid for concentrators");
- return(-1);
- }
- p->u.conc.type = CX;
- p->u.conc.v_type = 1;
- DPR_INIT(("Adding CX to config...\n"));
- break;
-
- case EPC: /* epc type concentrator */
- if (p->type != CNODE) {
- dgap_err("cx only valid for concentrators");
- return(-1);
- }
- p->u.conc.type = EPC;
- p->u.conc.v_type = 1;
- DPR_INIT(("Adding EPC to config...\n"));
- break;
-
- case MOD: /* EBI module */
- if (dgap_checknode(p))
- return(-1);
- if (brd == NULL) {
- dgap_err("must specify board info before EBI modules");
- return(-1);
- }
- switch (brd->u.board.type) {
- case PPCM:
- linecnt = 0;
- break;
- default:
- if (conc == NULL) {
- dgap_err("must specify concentrator info before EBI module");
- return(-1);
- }
- }
- if ( (p->next = dgap_newnode(MNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- if (linecnt)
- brd->u.board.module2++;
- else
- brd->u.board.module1++;
-
- DPR_INIT(("Adding MOD to config...\n"));
- break;
-
- case PORTS: /* ports type EBI module */
- if (p->type != MNODE) {
- dgap_err("ports only valid for EBI modules");
- return(-1);
- }
- p->u.module.type = PORTS;
- p->u.module.v_type = 1;
- DPR_INIT(("Adding PORTS to config...\n"));
- break;
-
- case MODEM: /* ports type EBI module */
- if (p->type != MNODE) {
- dgap_err("modem only valid for modem modules");
- return(-1);
- }
- p->u.module.type = MODEM;
- p->u.module.v_type = 1;
- DPR_INIT(("Adding MODEM to config...\n"));
- break;
-
- case CABLE:
- if (p->type == LNODE) {
- if ((s = dgap_getword(in)) == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.line.cable = dgap_savestring(s);
- p->u.line.v_cable = 1;
- }
- DPR_INIT(("Adding CABLE (%s) to config...\n", s));
- break;
-
- case SPEED: /* sync line speed indication */
- if (p->type == LNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.line.speed = (char)simple_strtol(s, &s2, 0);
- if ((short)strlen(s) > (short)(s2 - s)) {
- dgap_err("bad number for line speed");
- return(-1);
- }
- p->u.line.v_speed = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.conc.speed = (char)simple_strtol(s, &s2, 0);
- if ((short)strlen(s) > (short)(s2 - s)) {
- dgap_err("bad number for line speed");
- return(-1);
- }
- p->u.conc.v_speed = 1;
- } else {
- dgap_err("speed valid only for lines or concentrators.");
- return(-1);
- }
- DPR_INIT(("Adding SPEED (%s) to config...\n", s));
- break;
-
- case CONNECT:
- if (p->type == CNODE) {
- if ((s = dgap_getword(in)) == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.conc.connect = dgap_savestring(s);
- p->u.conc.v_connect = 1;
- }
- DPR_INIT(("Adding CONNECT (%s) to config...\n", s));
- break;
- case PRINT: /* transparent print name prefix */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(PNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- if ( (s = dgap_getword(in)) == NULL ) {
- dgap_err("unexpeced end of file");
- return(-1);
- }
- if ( (p->u.printname = dgap_savestring(s)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- DPR_INIT(("Adding PRINT (%s) to config...\n", s));
- break;
-
- case CMAJOR: /* major number */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(JNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.majornumber = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for major number");
- return(-1);
- }
- DPR_INIT(("Adding CMAJOR (%s) to config...\n", s));
- break;
-
- case ALTPIN: /* altpin setting */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(ANODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.altpin = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for altpin");
- return(-1);
- }
- DPR_INIT(("Adding ALTPIN (%s) to config...\n", s));
- break;
-
- case USEINTR: /* enable interrupt setting */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(INTRNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.useintr = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for useintr");
- return(-1);
- }
- DPR_INIT(("Adding USEINTR (%s) to config...\n", s));
- break;
-
- case TTSIZ: /* size of tty structure */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(TSNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.ttysize = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for ttysize");
- return(-1);
- }
- DPR_INIT(("Adding TTSIZ (%s) to config...\n", s));
- break;
-
- case CHSIZ: /* channel structure size */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(CSNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.chsize = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for chsize");
- return(-1);
- }
- DPR_INIT(("Adding CHSIZE (%s) to config...\n", s));
- break;
-
- case BSSIZ: /* board structure size */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(BSNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.bssize = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for bssize");
- return(-1);
- }
- DPR_INIT(("Adding BSSIZ (%s) to config...\n", s));
- break;
-
- case UNTSIZ: /* sched structure size */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(USNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.unsize = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for schedsize");
- return(-1);
- }
- DPR_INIT(("Adding UNTSIZ (%s) to config...\n", s));
- break;
-
- case F2SIZ: /* f2200 structure size */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(FSNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.f2size = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for f2200size");
- return(-1);
- }
- DPR_INIT(("Adding F2SIZ (%s) to config...\n", s));
- break;
-
- case VPSIZ: /* vpix structure size */
- if (dgap_checknode(p))
- return(-1);
- if ( (p->next = dgap_newnode(VSNODE)) == NULL ) {
- dgap_err("out of memory");
- return(-1);
- }
- p = p->next;
- s = dgap_getword(in);
- if (s == NULL) {
- dgap_err("unexpected end of file");
- return(-1);
- }
- p->u.vpixsize = simple_strtol(s, &s2, 0);
- if ((int)strlen(s) > (int)(s2 - s)) {
- dgap_err("bad number for vpixsize");
- return(-1);
- }
- DPR_INIT(("Adding VPSIZ (%s) to config...\n", s));
- break;
- }
- }
-}
-
-
-/*
- * dgap_sindex: much like index(), but it looks for a match of any character in
- * the group, and returns that position. If the first character is a ^, then
- * this will match the first occurrence not in that group.
- */
-static char *dgap_sindex (char *string, char *group)
-{
- char *ptr;
-
- if (!string || !group)
- return (char *) NULL;
-
- if (*group == '^') {
- group++;
- for (; *string; string++) {
- for (ptr = group; *ptr; ptr++) {
- if (*ptr == *string)
- break;
- }
- if (*ptr == '\0')
- return string;
- }
- }
- else {
- for (; *string; string++) {
- for (ptr = group; *ptr; ptr++) {
- if (*ptr == *string)
- return string;
- }
- }
- }
-
- return (char *) NULL;
-}
-
-
-/*
- * Get a token from the input file; return 0 if end of file is reached
- */
-static int dgap_gettok(char **in, struct cnode *p)
-{
- char *w;
- struct toklist *t;
-
- if (strstr(dgap_cword, "boar")) {
- w = dgap_getword(in);
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_tlist; t->token != 0; t++) {
- if ( !strcmp(w, t->string)) {
- return(t->token);
- }
- }
- dgap_err("board !!type not specified");
- return(1);
- }
- else {
- while ( (w = dgap_getword(in)) != NULL ) {
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_tlist; t->token != 0; t++) {
- if ( !strcmp(w, t->string) )
- return(t->token);
- }
- }
- return(0);
- }
-}
-
-
-/*
- * get a word from the input stream, also keep track of current line number.
- * words are separated by whitespace.
- */
-static char *dgap_getword(char **in)
-{
- char *ret_ptr = *in;
-
- char *ptr = dgap_sindex(*in, " \t\n");
-
- /* If no word found, return null */
- if (!ptr)
- return NULL;
-
- /* Mark new location for our buffer */
- *ptr = '\0';
- *in = ptr + 1;
-
- /* Eat any extra spaces/tabs/newlines that might be present */
- while (*in && **in && ((**in == ' ') || (**in == '\t') || (**in == '\n'))) {
- **in = '\0';
- *in = *in + 1;
- }
-
- return ret_ptr;
-}
-
-
-/*
- * print an error message, giving the line number in the file where
- * the error occurred.
- */
-static void dgap_err(char *s)
-{
- printk("DGAP: parse: %s\n", s);
-}
-
-
-/*
- * allocate a new configuration node of type t
- */
-static struct cnode *dgap_newnode(int t)
-{
- struct cnode *n;
-
- n = kmalloc(sizeof(struct cnode), GFP_ATOMIC);
- if (n != NULL) {
- memset((char *)n, 0, sizeof(struct cnode));
- n->type = t;
- }
- return(n);
-}
-
-
-/*
- * dgap_checknode: see if all the necessary info has been supplied for a node
- * before creating the next node.
- */
-static int dgap_checknode(struct cnode *p)
-{
- switch (p->type) {
- case BNODE:
- if (p->u.board.v_type == 0) {
- dgap_err("board type !not specified");
- return(1);
- }
-
- return(0);
-
- case LNODE:
- if (p->u.line.v_speed == 0) {
- dgap_err("line speed not specified");
- return(1);
- }
- return(0);
-
- case CNODE:
- if (p->u.conc.v_type == 0) {
- dgap_err("concentrator type not specified");
- return(1);
- }
- if (p->u.conc.v_speed == 0) {
- dgap_err("concentrator line speed not specified");
- return(1);
- }
- if (p->u.conc.v_nport == 0) {
- dgap_err("number of ports on concentrator not specified");
- return(1);
- }
- if (p->u.conc.v_id == 0) {
- dgap_err("concentrator id letter not specified");
- return(1);
- }
- return(0);
-
- case MNODE:
- if (p->u.module.v_type == 0) {
- dgap_err("EBI module type not specified");
- return(1);
- }
- if (p->u.module.v_nport == 0) {
- dgap_err("number of ports on EBI module not specified");
- return(1);
- }
- if (p->u.module.v_id == 0) {
- dgap_err("EBI module id letter not specified");
- return(1);
- }
- return(0);
- }
- return(0);
-}
-
-/*
- * save a string somewhere
- */
-static char *dgap_savestring(char *s)
-{
- char *p;
- if ( (p = kmalloc(strlen(s) + 1, GFP_ATOMIC) ) != NULL) {
- strcpy(p, s);
- }
- return(p);
-}
-
-
-/*
- * Given a board pointer, returns whether we should use interrupts or not.
- */
-uint dgap_config_get_useintr(struct board_t *bd)
-{
- struct cnode *p = NULL;
-
- if (!bd)
- return(0);
-
- for (p = bd->bd_config; p; p = p->next) {
- switch (p->type) {
- case INTRNODE:
- /*
- * check for pcxr types.
- */
- return p->u.useintr;
- default:
- break;
- }
- }
-
- /* If not found, then don't turn on interrupts. */
- return 0;
-}
-
-
-/*
- * Given a board pointer, returns whether we turn on altpin or not.
- */
-uint dgap_config_get_altpin(struct board_t *bd)
-{
- struct cnode *p = NULL;
-
- if (!bd)
- return(0);
-
- for (p = bd->bd_config; p; p = p->next) {
- switch (p->type) {
- case ANODE:
- /*
- * check for pcxr types.
- */
- return p->u.altpin;
- default:
- break;
- }
- }
-
- /* If not found, then don't turn on interrupts. */
- return 0;
-}
-
-
-
-/*
- * Given a specific type of board, if found, detached link and
- * returns the first occurrence in the list.
- */
-struct cnode *dgap_find_config(int type, int bus, int slot)
-{
- struct cnode *p, *prev = NULL, *prev2 = NULL, *found = NULL;
-
- p = &dgap_head;
-
- while (p->next != NULL) {
- prev = p;
- p = p->next;
-
- if (p->type == BNODE) {
-
- if (p->u.board.type == type) {
-
- if (p->u.board.v_pcibus && p->u.board.pcibus != bus) {
- DPR(("Found matching board, but wrong bus position. System says bus %d, we want bus %ld\n",
- bus, p->u.board.pcibus));
- continue;
- }
- if (p->u.board.v_pcislot && p->u.board.pcislot != slot) {
- DPR_INIT(("Found matching board, but wrong slot position. System says slot %d, we want slot %ld\n",
- slot, p->u.board.pcislot));
- continue;
- }
-
- DPR_INIT(("Matched type in config file\n"));
-
- found = p;
- /*
- * Keep walking thru the list till we find the next board.
- */
- while (p->next != NULL) {
- prev2 = p;
- p = p->next;
- if (p->type == BNODE) {
-
- /*
- * Mark the end of our 1 board chain of configs.
- */
- prev2->next = NULL;
-
- /*
- * Link the "next" board to the previous board,
- * effectively "unlinking" our board from the main config.
- */
- prev->next = p;
-
- return found;
- }
- }
- /*
- * It must be the last board in the list.
- */
- prev->next = NULL;
- return found;
- }
- }
- }
- return NULL;
-}
-
-/*
- * Given a board pointer, walks the config link, counting up
- * all ports user specified should be on the board.
- * (This does NOT mean they are all actually present right now tho)
- */
-uint dgap_config_get_number_of_ports(struct board_t *bd)
-{
- int count = 0;
- struct cnode *p = NULL;
-
- if (!bd)
- return(0);
-
- for (p = bd->bd_config; p; p = p->next) {
-
- switch (p->type) {
- case BNODE:
- /*
- * check for pcxr types.
- */
- if (p->u.board.type > EPCFE)
- count += p->u.board.nport;
- break;
- case CNODE:
- count += p->u.conc.nport;
- break;
- case MNODE:
- count += p->u.module.nport;
- break;
- }
- }
- return (count);
-}
-
-char *dgap_create_config_string(struct board_t *bd, char *string)
-{
- char *ptr = string;
- struct cnode *p = NULL;
- struct cnode *q = NULL;
- int speed;
-
- if (!bd) {
- *ptr = 0xff;
- return string;
- }
-
- for (p = bd->bd_config; p; p = p->next) {
-
- switch (p->type) {
- case LNODE:
- *ptr = '\0';
- ptr++;
- *ptr = p->u.line.speed;
- ptr++;
- break;
- case CNODE:
- /*
- * Because the EPC/con concentrators can have EM modules
- * hanging off of them, we have to walk ahead in the list
- * and keep adding the number of ports on each EM to the config.
- * UGH!
- */
- speed = p->u.conc.speed;
- q = p->next;
- if ((q != NULL) && (q->type == MNODE) ) {
- *ptr = (p->u.conc.nport + 0x80);
- ptr++;
- p = q;
- while ((q->next != NULL) && (q->next->type) == MNODE) {
- *ptr = (q->u.module.nport + 0x80);
- ptr++;
- p = q;
- q = q->next;
- }
- *ptr = q->u.module.nport;
- ptr++;
- } else {
- *ptr = p->u.conc.nport;
- ptr++;
- }
-
- *ptr = speed;
- ptr++;
- break;
- }
- }
-
- *ptr = 0xff;
- return string;
-}
-
-
-
-char *dgap_get_config_letters(struct board_t *bd, char *string)
-{
- int found = FALSE;
- char *ptr = string;
- struct cnode *cptr = NULL;
- int len = 0;
- int left = MAXTTYNAMELEN;
-
- if (!bd) {
- return "<NULL>";
- }
-
- for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
-
- if ((cptr->type == BNODE) &&
- ((cptr->u.board.type == APORT2_920P) || (cptr->u.board.type == APORT4_920P) ||
- (cptr->u.board.type == APORT8_920P) || (cptr->u.board.type == PAPORT4) ||
- (cptr->u.board.type == PAPORT8))) {
-
- found = TRUE;
- }
-
- if (cptr->type == TNODE && found == TRUE) {
- char *ptr1;
- if (strstr(cptr->u.ttyname, "tty")) {
- ptr1 = cptr->u.ttyname;
- ptr1 += 3;
- }
- else {
- ptr1 = cptr->u.ttyname;
- }
- if (ptr1) {
- len = snprintf(ptr, left, "%s", ptr1);
- left -= len;
- ptr += len;
- if (left <= 0)
- break;
- }
- }
-
- if (cptr->type == CNODE) {
- if (cptr->u.conc.id) {
- len = snprintf(ptr, left, "%s", cptr->u.conc.id);
- left -= len;
- ptr += len;
- if (left <= 0)
- break;
- }
- }
-
- if (cptr->type == MNODE) {
- if (cptr->u.module.id) {
- len = snprintf(ptr, left, "%s", cptr->u.module.id);
- left -= len;
- ptr += len;
- if (left <= 0)
- break;
- }
- }
- }
-
- return string;
-}
diff --git a/drivers/staging/dgap/dgap_parse.h b/drivers/staging/dgap/dgap_parse.h
deleted file mode 100644
index 8128c47343cb..000000000000
--- a/drivers/staging/dgap/dgap_parse.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef _DGAP_PARSE_H
-#define _DGAP_PARSE_H
-
-#include "dgap_driver.h"
-
-extern int dgap_parsefile(char **in, int Remove);
-extern struct cnode *dgap_find_config(int type, int bus, int slot);
-extern uint dgap_config_get_number_of_ports(struct board_t *bd);
-extern char *dgap_create_config_string(struct board_t *bd, char *string);
-extern char *dgap_get_config_letters(struct board_t *bd, char *string);
-extern uint dgap_config_get_useintr(struct board_t *bd);
-extern uint dgap_config_get_altpin(struct board_t *bd);
-
-#endif
diff --git a/drivers/staging/dgap/dgap_pci.h b/drivers/staging/dgap/dgap_pci.h
deleted file mode 100644
index 05ed374f08e1..000000000000
--- a/drivers/staging/dgap/dgap_pci.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-/* $Id: dgap_pci.h,v 1.1 2009/10/23 14:01:57 markh Exp $ */
-
-#ifndef __DGAP_PCI_H
-#define __DGAP_PCI_H
-
-#define PCIMAX 32 /* maximum number of PCI boards */
-
-#define DIGI_VID 0x114F
-
-#define PCI_DEVICE_EPC_DID 0x0002
-#define PCI_DEVICE_XEM_DID 0x0004
-#define PCI_DEVICE_XR_DID 0x0005
-#define PCI_DEVICE_CX_DID 0x0006
-#define PCI_DEVICE_XRJ_DID 0x0009 /* PLX-based Xr adapter */
-#define PCI_DEVICE_XR_IBM_DID 0x0011 /* IBM 8-port Async Adapter */
-#define PCI_DEVICE_XR_BULL_DID 0x0013 /* BULL 8-port Async Adapter */
-#define PCI_DEVICE_XR_SAIP_DID 0x001c /* SAIP card - Xr adapter */
-#define PCI_DEVICE_XR_422_DID 0x0012 /* Xr-422 */
-#define PCI_DEVICE_920_2_DID 0x0034 /* XR-Plus 920 K, 2 port */
-#define PCI_DEVICE_920_4_DID 0x0026 /* XR-Plus 920 K, 4 port */
-#define PCI_DEVICE_920_8_DID 0x0027 /* XR-Plus 920 K, 8 port */
-#define PCI_DEVICE_EPCJ_DID 0x000a /* PLX 9060 chip for PCI */
-#define PCI_DEVICE_CX_IBM_DID 0x001b /* IBM 128-port Async Adapter */
-#define PCI_DEVICE_920_8_HP_DID 0x0058 /* HP XR-Plus 920 K, 8 port */
-#define PCI_DEVICE_XEM_HP_DID 0x0059 /* HP Xem PCI */
-
-#define PCI_DEVICE_XEM_NAME "AccelePort XEM"
-#define PCI_DEVICE_CX_NAME "AccelePort CX"
-#define PCI_DEVICE_XR_NAME "AccelePort Xr"
-#define PCI_DEVICE_XRJ_NAME "AccelePort Xr (PLX)"
-#define PCI_DEVICE_XR_SAIP_NAME "AccelePort Xr (SAIP)"
-#define PCI_DEVICE_920_2_NAME "AccelePort Xr920 2 port"
-#define PCI_DEVICE_920_4_NAME "AccelePort Xr920 4 port"
-#define PCI_DEVICE_920_8_NAME "AccelePort Xr920 8 port"
-#define PCI_DEVICE_XR_422_NAME "AccelePort Xr 422"
-#define PCI_DEVICE_EPCJ_NAME "AccelePort EPC (PLX)"
-#define PCI_DEVICE_XR_BULL_NAME "AccelePort Xr (BULL)"
-#define PCI_DEVICE_XR_IBM_NAME "AccelePort Xr (IBM)"
-#define PCI_DEVICE_CX_IBM_NAME "AccelePort CX (IBM)"
-#define PCI_DEVICE_920_8_HP_NAME "AccelePort Xr920 8 port (HP)"
-#define PCI_DEVICE_XEM_HP_NAME "AccelePort XEM (HP)"
-
-
-/*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space. The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
-
-/* Potential location of PCI Bios from E0000 to FFFFF*/
-#define PCI_BIOS_SIZE 0x00020000
-
-/* Size of Memory and I/O for PCI (4MB) */
-#define PCI_RAM_SIZE 0x00400000
-
-/* Size of Memory (2MB) */
-#define PCI_MEM_SIZE 0x00200000
-
-/* Max PCI Window Size (2MB) */
-#define PCI_WIN_SIZE 0x00200000
-
-#define PCI_WIN_SHIFT 21 /* 21 bits max */
-
-/* Offset of I/0 in Memory (2MB) */
-#define PCI_IO_OFFSET 0x00200000
-
-/* Size of IO (2MB) */
-#define PCI_IO_SIZE 0x00200000
-
-#endif
diff --git a/drivers/staging/dgap/dgap_sysfs.c b/drivers/staging/dgap/dgap_sysfs.c
deleted file mode 100644
index 7f4ec9a18293..000000000000
--- a/drivers/staging/dgap/dgap_sysfs.c
+++ /dev/null
@@ -1,793 +0,0 @@
-/*
- * Copyright 2004 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- *
- *
- *
- * $Id: dgap_sysfs.c,v 1.1 2009/10/23 14:01:57 markh Exp $
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/serial_reg.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-
-#include "dgap_driver.h"
-#include "dgap_conf.h"
-#include "dgap_parse.h"
-
-
-static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
-}
-static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
-
-
-static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgap_NumBoards);
-}
-static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
-
-
-static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
-}
-static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
-
-
-static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
-}
-static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
-
-
-static ssize_t dgap_driver_state_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", dgap_driver_state_text[dgap_driver_state]);
-}
-static DRIVER_ATTR(state, S_IRUSR, dgap_driver_state_show, NULL);
-
-
-static ssize_t dgap_driver_debug_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%x\n", dgap_debug);
-}
-
-static ssize_t dgap_driver_debug_store(struct device_driver *ddp, const char *buf, size_t count)
-{
- sscanf(buf, "0x%x\n", &dgap_debug);
- return count;
-}
-static DRIVER_ATTR(debug, (S_IRUSR | S_IWUSR), dgap_driver_debug_show, dgap_driver_debug_store);
-
-
-static ssize_t dgap_driver_rawreadok_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%x\n", dgap_rawreadok);
-}
-
-static ssize_t dgap_driver_rawreadok_store(struct device_driver *ddp, const char *buf, size_t count)
-{
- sscanf(buf, "0x%x\n", &dgap_rawreadok);
- return count;
-}
-static DRIVER_ATTR(rawreadok, (S_IRUSR | S_IWUSR), dgap_driver_rawreadok_show, dgap_driver_rawreadok_store);
-
-
-static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
-}
-
-static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp, const char *buf, size_t count)
-{
- sscanf(buf, "%d\n", &dgap_poll_tick);
- return count;
-}
-static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show, dgap_driver_pollrate_store);
-
-
-void dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
-{
- int rc = 0;
- struct device_driver *driverfs = &dgap_driver->driver;
-
- rc |= driver_create_file(driverfs, &driver_attr_version);
- rc |= driver_create_file(driverfs, &driver_attr_boards);
- rc |= driver_create_file(driverfs, &driver_attr_maxboards);
- rc |= driver_create_file(driverfs, &driver_attr_debug);
- rc |= driver_create_file(driverfs, &driver_attr_rawreadok);
- rc |= driver_create_file(driverfs, &driver_attr_pollrate);
- rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
- rc |= driver_create_file(driverfs, &driver_attr_state);
- if (rc) {
- printk(KERN_ERR "DGAP: sysfs driver_create_file failed!\n");
- }
-}
-
-
-void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
-{
- struct device_driver *driverfs = &dgap_driver->driver;
- driver_remove_file(driverfs, &driver_attr_version);
- driver_remove_file(driverfs, &driver_attr_boards);
- driver_remove_file(driverfs, &driver_attr_maxboards);
- driver_remove_file(driverfs, &driver_attr_debug);
- driver_remove_file(driverfs, &driver_attr_rawreadok);
- driver_remove_file(driverfs, &driver_attr_pollrate);
- driver_remove_file(driverfs, &driver_attr_pollcounter);
- driver_remove_file(driverfs, &driver_attr_state);
-}
-
-
-#define DGAP_VERIFY_BOARD(p, bd) \
- if (!p) \
- return (0); \
- \
- bd = dev_get_drvdata(p); \
- if (!bd || bd->magic != DGAP_BOARD_MAGIC) \
- return (0); \
- if (bd->state != BOARD_READY) \
- return (0); \
-
-
-static ssize_t dgap_ports_state_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s\n", bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_open_count ? "Open" : "Closed");
- }
- return count;
-}
-static DEVICE_ATTR(ports_state, S_IRUSR, dgap_ports_state_show, NULL);
-
-
-static ssize_t dgap_ports_baud_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %d\n", bd->channels[i]->ch_portnum, bd->channels[i]->ch_baud_info);
- }
- return count;
-}
-static DEVICE_ATTR(ports_baud, S_IRUSR, dgap_ports_baud_show, NULL);
-
-
-static ssize_t dgap_ports_msignals_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- if (bd->channels[i]->ch_open_count) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s %s %s %s %s %s\n", bd->channels[i]->ch_portnum,
- (bd->channels[i]->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (bd->channels[i]->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_RI) ? "RI" : "");
- } else {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d\n", bd->channels[i]->ch_portnum);
- }
- }
- return count;
-}
-static DEVICE_ATTR(ports_msignals, S_IRUSR, dgap_ports_msignals_show, NULL);
-
-
-static ssize_t dgap_ports_iflag_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_iflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_iflag, S_IRUSR, dgap_ports_iflag_show, NULL);
-
-
-static ssize_t dgap_ports_cflag_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_cflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_cflag, S_IRUSR, dgap_ports_cflag_show, NULL);
-
-
-static ssize_t dgap_ports_oflag_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_oflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_oflag, S_IRUSR, dgap_ports_oflag_show, NULL);
-
-
-static ssize_t dgap_ports_lflag_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_lflag);
- }
- return count;
-}
-static DEVICE_ATTR(ports_lflag, S_IRUSR, dgap_ports_lflag_show, NULL);
-
-
-static ssize_t dgap_ports_digi_flag_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_digi.digi_flags);
- }
- return count;
-}
-static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgap_ports_digi_flag_show, NULL);
-
-
-static ssize_t dgap_ports_rxcount_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_rxcount);
- }
- return count;
-}
-static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgap_ports_rxcount_show, NULL);
-
-
-static ssize_t dgap_ports_txcount_show(struct device *p, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- int count = 0;
- int i = 0;
-
- DGAP_VERIFY_BOARD(p, bd);
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum, bd->channels[i]->ch_txcount);
- }
- return count;
-}
-static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
-
-
-/* this function creates the sys files that will export each signal status
- * to sysfs each value will be put in a separate filename
- */
-void dgap_create_ports_sysfiles(struct board_t *bd)
-{
- int rc = 0;
-
- dev_set_drvdata(&bd->pdev->dev, bd);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
- if (rc) {
- printk(KERN_ERR "DGAP: sysfs device_create_file failed!\n");
- }
-}
-
-
-/* removes all the sys files created for that port */
-void dgap_remove_ports_sysfiles(struct board_t *bd)
-{
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
-}
-
-
-static ssize_t dgap_tty_state_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ? "Open" : "Closed");
-}
-static DEVICE_ATTR(state, S_IRUSR, dgap_tty_state_show, NULL);
-
-
-static ssize_t dgap_tty_baud_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info);
-}
-static DEVICE_ATTR(baud, S_IRUSR, dgap_tty_baud_show, NULL);
-
-
-static ssize_t dgap_tty_msignals_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- if (ch->ch_open_count) {
- return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
- (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
- }
- return 0;
-}
-static DEVICE_ATTR(msignals, S_IRUSR, dgap_tty_msignals_show, NULL);
-
-
-static ssize_t dgap_tty_iflag_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
-}
-static DEVICE_ATTR(iflag, S_IRUSR, dgap_tty_iflag_show, NULL);
-
-
-static ssize_t dgap_tty_cflag_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
-}
-static DEVICE_ATTR(cflag, S_IRUSR, dgap_tty_cflag_show, NULL);
-
-
-static ssize_t dgap_tty_oflag_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
-}
-static DEVICE_ATTR(oflag, S_IRUSR, dgap_tty_oflag_show, NULL);
-
-
-static ssize_t dgap_tty_lflag_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
-}
-static DEVICE_ATTR(lflag, S_IRUSR, dgap_tty_lflag_show, NULL);
-
-
-static ssize_t dgap_tty_digi_flag_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
-}
-static DEVICE_ATTR(digi_flag, S_IRUSR, dgap_tty_digi_flag_show, NULL);
-
-
-static ssize_t dgap_tty_rxcount_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
-}
-static DEVICE_ATTR(rxcount, S_IRUSR, dgap_tty_rxcount_show, NULL);
-
-
-static ssize_t dgap_tty_txcount_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
-}
-static DEVICE_ATTR(txcount, S_IRUSR, dgap_tty_txcount_show, NULL);
-
-
-static ssize_t dgap_tty_name_show(struct device *d, struct device_attribute *attr, char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int cn;
- int bn;
- struct cnode *cptr = NULL;
- int found = FALSE;
- int ncount = 0;
- int starto = 0;
- int i = 0;
-
- if (!d)
- return (0);
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
- if (bd->state != BOARD_READY)
- return (0);
-
- bn = bd->boardnum;
- cn = ch->ch_portnum;
-
- for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
-
- if ((cptr->type == BNODE) &&
- ((cptr->u.board.type == APORT2_920P) || (cptr->u.board.type == APORT4_920P) ||
- (cptr->u.board.type == APORT8_920P) || (cptr->u.board.type == PAPORT4) ||
- (cptr->u.board.type == PAPORT8))) {
-
- found = TRUE;
- if (cptr->u.board.v_start)
- starto = cptr->u.board.start;
- else
- starto = 1;
- }
-
- if (cptr->type == TNODE && found == TRUE) {
- char *ptr1;
- if (strstr(cptr->u.ttyname, "tty")) {
- ptr1 = cptr->u.ttyname;
- ptr1 += 3;
- }
- else {
- ptr1 = cptr->u.ttyname;
- }
-
- for (i = 0; i < dgap_config_get_number_of_ports(bd); i++) {
- if (cn == i) {
- return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
- (un->un_type == DGAP_PRINT) ? "pr" : "tty",
- ptr1, i + starto);
- }
- }
- }
-
- if (cptr->type == CNODE) {
-
- for (i = 0; i < cptr->u.conc.nport; i++) {
- if (cn == (i + ncount)) {
-
- return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
- (un->un_type == DGAP_PRINT) ? "pr" : "tty",
- cptr->u.conc.id,
- i + (cptr->u.conc.v_start ? cptr->u.conc.start : 1));
- }
- }
-
- ncount += cptr->u.conc.nport;
- }
-
- if (cptr->type == MNODE) {
-
- for (i = 0; i < cptr->u.module.nport; i++) {
- if (cn == (i + ncount)) {
- return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
- (un->un_type == DGAP_PRINT) ? "pr" : "tty",
- cptr->u.module.id,
- i + (cptr->u.module.v_start ? cptr->u.module.start : 1));
- }
- }
-
- ncount += cptr->u.module.nport;
-
- }
- }
-
- return snprintf(buf, PAGE_SIZE, "%s_dgap_%d_%d\n",
- (un->un_type == DGAP_PRINT) ? "pr" : "tty", bn, cn);
-
-}
-static DEVICE_ATTR(custom_name, S_IRUSR, dgap_tty_name_show, NULL);
-
-
-static struct attribute *dgap_sysfs_tty_entries[] = {
- &dev_attr_state.attr,
- &dev_attr_baud.attr,
- &dev_attr_msignals.attr,
- &dev_attr_iflag.attr,
- &dev_attr_cflag.attr,
- &dev_attr_oflag.attr,
- &dev_attr_lflag.attr,
- &dev_attr_digi_flag.attr,
- &dev_attr_rxcount.attr,
- &dev_attr_txcount.attr,
- &dev_attr_custom_name.attr,
- NULL
-};
-
-
-static struct attribute_group dgap_tty_attribute_group = {
- .name = NULL,
- .attrs = dgap_sysfs_tty_entries,
-};
-
-
-
-
-void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
-{
- int ret;
-
- ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
- if (ret) {
- printk(KERN_ERR "dgap: failed to create sysfs tty device attributes.\n");
- sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
- return;
- }
-
- dev_set_drvdata(c, un);
-
-}
-
-
-void dgap_remove_tty_sysfs(struct device *c)
-{
- sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
-}
diff --git a/drivers/staging/dgap/dgap_sysfs.h b/drivers/staging/dgap/dgap_sysfs.h
deleted file mode 100644
index dde690eec5cf..000000000000
--- a/drivers/staging/dgap/dgap_sysfs.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef __DGAP_SYSFS_H
-#define __DGAP_SYSFS_H
-
-#include "dgap_driver.h"
-
-#include <linux/device.h>
-
-struct board_t;
-struct channel_t;
-struct un_t;
-struct pci_driver;
-struct class_device;
-
-extern void dgap_create_ports_sysfiles(struct board_t *bd);
-extern void dgap_remove_ports_sysfiles(struct board_t *bd);
-
-extern void dgap_create_driver_sysfiles(struct pci_driver *);
-extern void dgap_remove_driver_sysfiles(struct pci_driver *);
-
-extern int dgap_tty_class_init(void);
-extern int dgap_tty_class_destroy(void);
-
-extern void dgap_create_tty_sysfs(struct un_t *un, struct device *c);
-extern void dgap_remove_tty_sysfs(struct device *c);
-
-
-#endif
diff --git a/drivers/staging/dgap/dgap_trace.c b/drivers/staging/dgap/dgap_trace.c
deleted file mode 100644
index a53db9e0a577..000000000000
--- a/drivers/staging/dgap/dgap_trace.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- *
- */
-
-/* $Id: dgap_trace.c,v 1.1 2009/10/23 14:01:57 markh Exp $ */
-
-#include <linux/kernel.h>
-#include <linux/sched.h> /* For jiffies, task states */
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/vmalloc.h>
-
-#include "dgap_driver.h"
-#include "dgap_trace.h"
-
-#define TRC_TO_CONSOLE 1
-
-/* file level globals */
-static char *dgap_trcbuf; /* the ringbuffer */
-
-#if defined(TRC_TO_KMEM)
-static int dgap_trcbufi = 0; /* index of the tilde at the end of */
-#endif
-
-extern int dgap_trcbuf_size; /* size of the ringbuffer */
-
-#if defined(TRC_TO_KMEM)
-static DEFINE_SPINLOCK(dgap_tracef_lock);
-#endif
-
-#if 0
-
-#if !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE)
-void dgap_tracef(const char *fmt, ...)
-{
- return;
-}
-
-#else /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
-
-void dgap_tracef(const char *fmt, ...)
-{
- va_list ap;
- char buf[TRC_MAXMSG+1];
- size_t lenbuf;
- int i;
- static int failed = FALSE;
-# if defined(TRC_TO_KMEM)
- unsigned long flags;
-#endif
-
- if(failed)
- return;
-# if defined(TRC_TO_KMEM)
- DGAP_LOCK(dgap_tracef_lock, flags);
-#endif
-
- /* Format buf using fmt and arguments contained in ap. */
- va_start(ap, fmt);
- i = vsprintf(buf, fmt, ap);
- va_end(ap);
- lenbuf = strlen(buf);
-
-# if defined(TRC_TO_KMEM)
- {
- static int initd=0;
-
- /*
- * Now, in addition to (or instead of) printing this stuff out
- * (which is a buffered operation), also tuck it away into a
- * corner of memory which can be examined post-crash in kdb.
- */
- if (!initd) {
- dgap_trcbuf = (char *) vmalloc(dgap_trcbuf_size);
- if(!dgap_trcbuf) {
- failed = TRUE;
- printk("dgap: tracing init failed!\n");
- return;
- }
-
- memset(dgap_trcbuf, '\0', dgap_trcbuf_size);
- dgap_trcbufi = 0;
- initd++;
-
- printk("dgap: tracing enabled - " TRC_DTRC
- " 0x%lx 0x%x\n",
- (unsigned long)dgap_trcbuf,
- dgap_trcbuf_size);
- }
-
-# if defined(TRC_ON_OVERFLOW_WRAP_AROUND)
- /*
- * This is the less CPU-intensive way to do things. We simply
- * wrap around before we fall off the end of the buffer. A
- * tilde (~) demarcates the current end of the trace.
- *
- * This method should be used if you are concerned about race
- * conditions as it is less likely to affect the timing of
- * things.
- */
-
- if (dgap_trcbufi + lenbuf >= dgap_trcbuf_size) {
- /* We are wrapping, so wipe out the last tilde. */
- dgap_trcbuf[dgap_trcbufi] = '\0';
- /* put the new string at the beginning of the buffer */
- dgap_trcbufi = 0;
- }
-
- strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
- dgap_trcbufi += lenbuf;
- dgap_trcbuf[dgap_trcbufi] = '~';
-
-# elif defined(TRC_ON_OVERFLOW_SHIFT_BUFFER)
- /*
- * This is the more CPU-intensive way to do things. If we
- * venture into the last 1/8 of the buffer, we shift the
- * last 7/8 of the buffer forward, wiping out the first 1/8.
- * Advantage: No wrap-around, only truncation from the
- * beginning.
- *
- * This method should not be used if you are concerned about
- * timing changes affecting the behaviour of the driver (ie,
- * race conditions).
- */
- strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
- dgap_trcbufi += lenbuf;
- dgap_trcbuf[dgap_trcbufi] = '~';
- dgap_trcbuf[dgap_trcbufi+1] = '\0';
-
- /* If we're near the end of the trace buffer... */
- if (dgap_trcbufi > (dgap_trcbuf_size/8)*7) {
- /* Wipe out the first eighth to make some more room. */
- strcpy(dgap_trcbuf, &dgap_trcbuf[dgap_trcbuf_size/8]);
- dgap_trcbufi = strlen(dgap_trcbuf)-1;
- /* Plop overflow message at the top of the buffer. */
- bcopy(TRC_OVERFLOW, dgap_trcbuf, strlen(TRC_OVERFLOW));
- }
-# else
-# error "TRC_ON_OVERFLOW_WRAP_AROUND or TRC_ON_OVERFLOW_SHIFT_BUFFER?"
-# endif
- }
- DGAP_UNLOCK(dgap_tracef_lock, flags);
-
-# endif /* defined(TRC_TO_KMEM) */
-}
-
-#endif /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
-
-#endif
-
-/*
- * dgap_tracer_free()
- *
- *
- */
-void dgap_tracer_free(void)
-{
- if(dgap_trcbuf)
- vfree(dgap_trcbuf);
-}
diff --git a/drivers/staging/dgap/dgap_trace.h b/drivers/staging/dgap/dgap_trace.h
deleted file mode 100644
index b21f46198e71..000000000000
--- a/drivers/staging/dgap/dgap_trace.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- *****************************************************************************
- * Header file for dgap_trace.c
- *
- * $Id: dgap_trace.h,v 1.1 2009/10/23 14:01:57 markh Exp $
- */
-
-#ifndef __DGAP_TRACE_H
-#define __DGAP_TRACE_H
-
-#include "dgap_driver.h"
-
-void dgap_tracef(const char *fmt, ...);
-void dgap_tracer_free(void);
-
-#endif
-
diff --git a/drivers/staging/dgap/dgap_tty.c b/drivers/staging/dgap/dgap_tty.c
deleted file mode 100644
index 39fb4dfb8b7e..000000000000
--- a/drivers/staging/dgap/dgap_tty.c
+++ /dev/null
@@ -1,3580 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
- *
- * This is shared code between Digi's CVS archive and the
- * Linux Kernel sources.
- * Changing the source just for reformatting needlessly breaks
- * our CVS diff history.
- *
- * Send any bug fixes/changes to: Eng.Linux at digi dot com.
- * Thank you.
- */
-
-/************************************************************************
- *
- * This file implements the tty driver functionality for the
- * FEP5 based product lines.
- *
- ************************************************************************
- *
- * $Id: dgap_tty.c,v 1.3 2011/06/23 12:11:31 markh Exp $
- */
-
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <linux/sched.h> /* For jiffies, task states */
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_reg.h>
-#include <linux/slab.h>
-#include <linux/delay.h> /* For udelay */
-#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
-#include <asm/io.h> /* For read[bwl]/write[bwl] */
-#include <linux/pci.h>
-
-#include "dgap_driver.h"
-#include "dgap_tty.h"
-#include "dgap_types.h"
-#include "dgap_fep5.h"
-#include "dgap_parse.h"
-#include "dgap_conf.h"
-#include "dgap_sysfs.h"
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
-#define init_MUTEX(sem) sema_init(sem, 1)
-#define DECLARE_MUTEX(name) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
-#endif
-
-/*
- * internal variables
- */
-static struct board_t *dgap_BoardsByMajor[256];
-static uchar *dgap_TmpWriteBuf = NULL;
-static DECLARE_MUTEX(dgap_TmpWriteSem);
-
-/*
- * Default transparent print information.
- */
-static struct digi_t dgap_digi_init = {
- .digi_flags = DIGI_COOK, /* Flags */
- .digi_maxcps = 100, /* Max CPS */
- .digi_maxchar = 50, /* Max chars in print queue */
- .digi_bufsize = 100, /* Printer buffer size */
- .digi_onlen = 4, /* size of printer on string */
- .digi_offlen = 4, /* size of printer off string */
- .digi_onstr = "\033[5i", /* ANSI printer on string ] */
- .digi_offstr = "\033[4i", /* ANSI printer off string ] */
- .digi_term = "ansi" /* default terminal type */
-};
-
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially.
- *
- * This defines a raw port at 9600 baud, 8 data bits, no parity,
- * 1 stop bit.
- */
-
-static struct ktermios DgapDefaultTermios =
-{
- .c_iflag = (DEFAULT_IFLAGS), /* iflags */
- .c_oflag = (DEFAULT_OFLAGS), /* oflags */
- .c_cflag = (DEFAULT_CFLAGS), /* cflags */
- .c_lflag = (DEFAULT_LFLAGS), /* lflags */
- .c_cc = INIT_C_CC,
- .c_line = 0,
-};
-
-/* Our function prototypes */
-static int dgap_tty_open(struct tty_struct *tty, struct file *file);
-static void dgap_tty_close(struct tty_struct *tty, struct file *file);
-static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch);
-static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
-static int dgap_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo);
-static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info);
-static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo);
-static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info);
-static int dgap_tty_write_room(struct tty_struct* tty);
-static int dgap_tty_chars_in_buffer(struct tty_struct* tty);
-static void dgap_tty_start(struct tty_struct *tty);
-static void dgap_tty_stop(struct tty_struct *tty);
-static void dgap_tty_throttle(struct tty_struct *tty);
-static void dgap_tty_unthrottle(struct tty_struct *tty);
-static void dgap_tty_flush_chars(struct tty_struct *tty);
-static void dgap_tty_flush_buffer(struct tty_struct *tty);
-static void dgap_tty_hangup(struct tty_struct *tty);
-static int dgap_wait_for_drain(struct tty_struct *tty);
-static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value);
-static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value);
-static int dgap_tty_digisetcustombaud(struct tty_struct *tty, int __user *new_info);
-static int dgap_tty_digigetcustombaud(struct tty_struct *tty, int __user *retinfo);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
-static int dgap_tty_tiocmget(struct tty_struct *tty);
-static int dgap_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear);
-#else
-static int dgap_tty_tiocmget(struct tty_struct *tty, struct file *file);
-static int dgap_tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear);
-#endif
-static int dgap_tty_send_break(struct tty_struct *tty, int msec);
-static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout);
-static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
-static void dgap_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios);
-static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c);
-static void dgap_tty_send_xchar(struct tty_struct *tty, char ch);
-
-static const struct tty_operations dgap_tty_ops = {
- .open = dgap_tty_open,
- .close = dgap_tty_close,
- .write = dgap_tty_write,
- .write_room = dgap_tty_write_room,
- .flush_buffer = dgap_tty_flush_buffer,
- .chars_in_buffer = dgap_tty_chars_in_buffer,
- .flush_chars = dgap_tty_flush_chars,
- .ioctl = dgap_tty_ioctl,
- .set_termios = dgap_tty_set_termios,
- .stop = dgap_tty_stop,
- .start = dgap_tty_start,
- .throttle = dgap_tty_throttle,
- .unthrottle = dgap_tty_unthrottle,
- .hangup = dgap_tty_hangup,
- .put_char = dgap_tty_put_char,
- .tiocmget = dgap_tty_tiocmget,
- .tiocmset = dgap_tty_tiocmset,
- .break_ctl = dgap_tty_send_break,
- .wait_until_sent = dgap_tty_wait_until_sent,
- .send_xchar = dgap_tty_send_xchar
-};
-
-
-
-
-
-/************************************************************************
- *
- * TTY Initialization/Cleanup Functions
- *
- ************************************************************************/
-
-/*
- * dgap_tty_preinit()
- *
- * Initialize any global tty related data before we download any boards.
- */
-int dgap_tty_preinit(void)
-{
- unsigned long flags;
-
- DGAP_LOCK(dgap_global_lock, flags);
-
- /*
- * Allocate a buffer for doing the copy from user space to
- * kernel space in dgap_input(). We only use one buffer and
- * control access to it with a semaphore. If we are paging, we
- * are already in trouble so one buffer won't hurt much anyway.
- */
- dgap_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_ATOMIC);
-
- if (!dgap_TmpWriteBuf) {
- DGAP_UNLOCK(dgap_global_lock, flags);
- DPR_INIT(("unable to allocate tmp write buf"));
- return (-ENOMEM);
- }
-
- DGAP_UNLOCK(dgap_global_lock, flags);
- return(0);
-}
-
-
-/*
- * dgap_tty_register()
- *
- * Init the tty subsystem for this board.
- */
-int dgap_tty_register(struct board_t *brd)
-{
- int rc = 0;
-
- DPR_INIT(("tty_register start"));
-
- brd->SerialDriver = alloc_tty_driver(MAXPORTS);
-
- snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgap_%d_", brd->boardnum);
- brd->SerialDriver->name = brd->SerialName;
- brd->SerialDriver->name_base = 0;
- brd->SerialDriver->major = 0;
- brd->SerialDriver->minor_start = 0;
- brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL;
- brd->SerialDriver->init_termios = DgapDefaultTermios;
- brd->SerialDriver->driver_name = DRVSTR;
- brd->SerialDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
-
- /* The kernel wants space to store pointers to tty_structs */
- brd->SerialDriver->ttys = kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
- if (!brd->SerialDriver->ttys)
- return(-ENOMEM);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
- brd->SerialDriver->refcount = brd->TtyRefCnt;
-#endif
-
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->SerialDriver, &dgap_tty_ops);
-
- /*
- * If we're doing transparent print, we have to do all of the above
- * again, separately so we don't get the LD confused about what major
- * we are when we get into the dgap_tty_open() routine.
- */
- brd->PrintDriver = alloc_tty_driver(MAXPORTS);
-
- snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgap_%d_", brd->boardnum);
- brd->PrintDriver->name = brd->PrintName;
- brd->PrintDriver->name_base = 0;
- brd->PrintDriver->major = 0;
- brd->PrintDriver->minor_start = 0;
- brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->PrintDriver->subtype = SERIAL_TYPE_NORMAL;
- brd->PrintDriver->init_termios = DgapDefaultTermios;
- brd->PrintDriver->driver_name = DRVSTR;
- brd->PrintDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
-
- /* The kernel wants space to store pointers to tty_structs */
- brd->PrintDriver->ttys = kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
- if (!brd->PrintDriver->ttys)
- return(-ENOMEM);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
- brd->PrintDriver->refcount = brd->TtyRefCnt;
-#endif
-
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->PrintDriver, &dgap_tty_ops);
-
- if (!brd->dgap_Major_Serial_Registered) {
- /* Register tty devices */
- rc = tty_register_driver(brd->SerialDriver);
- if (rc < 0) {
- APR(("Can't register tty device (%d)\n", rc));
- return(rc);
- }
- brd->dgap_Major_Serial_Registered = TRUE;
- dgap_BoardsByMajor[brd->SerialDriver->major] = brd;
- brd->dgap_Serial_Major = brd->SerialDriver->major;
- }
-
- if (!brd->dgap_Major_TransparentPrint_Registered) {
- /* Register Transparent Print devices */
- rc = tty_register_driver(brd->PrintDriver);
- if (rc < 0) {
- APR(("Can't register Transparent Print device (%d)\n", rc));
- return(rc);
- }
- brd->dgap_Major_TransparentPrint_Registered = TRUE;
- dgap_BoardsByMajor[brd->PrintDriver->major] = brd;
- brd->dgap_TransparentPrint_Major = brd->PrintDriver->major;
- }
-
- DPR_INIT(("DGAP REGISTER TTY: MAJORS: %d %d\n", brd->SerialDriver->major,
- brd->PrintDriver->major));
-
- return (rc);
-}
-
-
-/*
- * dgap_tty_init()
- *
- * Init the tty subsystem. Called once per board after board has been
- * downloaded and init'ed.
- */
-int dgap_tty_init(struct board_t *brd)
-{
- int i;
- int tlw;
- uint true_count = 0;
- uchar *vaddr;
- uchar modem = 0;
- struct channel_t *ch;
- struct bs_t *bs;
- struct cm_t *cm;
-
- if (!brd)
- return (-ENXIO);
-
- DPR_INIT(("dgap_tty_init start\n"));
-
- /*
- * Initialize board structure elements.
- */
-
- vaddr = brd->re_map_membase;
- true_count = readw((vaddr + NCHAN));
-
- brd->nasync = dgap_config_get_number_of_ports(brd);
-
- if (!brd->nasync) {
- brd->nasync = brd->maxports;
- }
-
- if (brd->nasync > brd->maxports) {
- brd->nasync = brd->maxports;
- }
-
- if (true_count != brd->nasync) {
- if ((brd->type == PPCM) && (true_count == 64)) {
- APR(("***WARNING**** %s configured for %d ports, has %d ports.\nPlease make SURE the EBI cable running from the card\nto each EM module is plugged into EBI IN!\n",
- brd->name, brd->nasync, true_count));
- }
- else if ((brd->type == PPCM) && (true_count == 0)) {
- APR(("***WARNING**** %s configured for %d ports, has %d ports.\nPlease make SURE the EBI cable running from the card\nto each EM module is plugged into EBI IN!\n",
- brd->name, brd->nasync, true_count));
- }
- else {
- APR(("***WARNING**** %s configured for %d ports, has %d ports.\n",
- brd->name, brd->nasync, true_count));
- }
-
- brd->nasync = true_count;
-
- /* If no ports, don't bother going any further */
- if (!brd->nasync) {
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return(-ENXIO);
- }
- }
-
- /*
- * Allocate channel memory that might not have been allocated
- * when the driver was first loaded.
- */
- for (i = 0; i < brd->nasync; i++) {
- if (!brd->channels[i]) {
- brd->channels[i] = kzalloc(sizeof(struct channel_t), GFP_ATOMIC);
- if (!brd->channels[i]) {
- DPR_CORE(("%s:%d Unable to allocate memory for channel struct\n",
- __FILE__, __LINE__));
- }
- }
- }
-
- ch = brd->channels[0];
- vaddr = brd->re_map_membase;
-
- bs = (struct bs_t *) ((ulong) vaddr + CHANBUF);
- cm = (struct cm_t *) ((ulong) vaddr + CMDBUF);
-
- brd->bd_bs = bs;
-
- /* Set up channel variables */
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
-
- if (!brd->channels[i])
- continue;
-
- DGAP_SPINLOCK_INIT(ch->ch_lock);
-
- /* Store all our magic numbers */
- ch->magic = DGAP_CHANNEL_MAGIC;
- ch->ch_tun.magic = DGAP_UNIT_MAGIC;
- ch->ch_tun.un_type = DGAP_SERIAL;
- ch->ch_tun.un_ch = ch;
- ch->ch_tun.un_dev = i;
-
- ch->ch_pun.magic = DGAP_UNIT_MAGIC;
- ch->ch_pun.un_type = DGAP_PRINT;
- ch->ch_pun.un_ch = ch;
- ch->ch_pun.un_dev = i;
-
- ch->ch_vaddr = vaddr;
- ch->ch_bs = bs;
- ch->ch_cm = cm;
- ch->ch_bd = brd;
- ch->ch_portnum = i;
- ch->ch_digi = dgap_digi_init;
-
- /*
- * Set up digi dsr and dcd bits based on altpin flag.
- */
- if (dgap_config_get_altpin(brd)) {
- ch->ch_dsr = DM_CD;
- ch->ch_cd = DM_DSR;
- ch->ch_digi.digi_flags |= DIGI_ALTPIN;
- }
- else {
- ch->ch_cd = DM_CD;
- ch->ch_dsr = DM_DSR;
- }
-
- ch->ch_taddr = vaddr + ((ch->ch_bs->tx_seg) << 4);
- ch->ch_raddr = vaddr + ((ch->ch_bs->rx_seg) << 4);
- ch->ch_tx_win = 0;
- ch->ch_rx_win = 0;
- ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
- ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
- ch->ch_tstart = 0;
- ch->ch_rstart = 0;
-
- /* .25 second delay */
- ch->ch_close_delay = 250;
-
- /*
- * Set queue water marks, interrupt mask,
- * and general tty parameters.
- */
- ch->ch_tlw = tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) : ch->ch_tsize / 2;
-
- dgap_cmdw(ch, STLOW, tlw, 0);
-
- dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
-
- dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
-
- ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
-
- init_waitqueue_head(&ch->ch_flags_wait);
- init_waitqueue_head(&ch->ch_tun.un_flags_wait);
- init_waitqueue_head(&ch->ch_pun.un_flags_wait);
- init_waitqueue_head(&ch->ch_sniff_wait);
-
- /* Turn on all modem interrupts for now */
- modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
- writeb(modem, &(ch->ch_bs->m_int));
-
- /*
- * Set edelay to 0 if interrupts are turned on,
- * otherwise set edelay to the usual 100.
- */
- if (brd->intr_used)
- writew(0, &(ch->ch_bs->edelay));
- else
- writew(100, &(ch->ch_bs->edelay));
-
- writeb(1, &(ch->ch_bs->idata));
- }
-
-
- DPR_INIT(("dgap_tty_init finish\n"));
-
- return (0);
-}
-
-
-/*
- * dgap_tty_post_uninit()
- *
- * UnInitialize any global tty related data.
- */
-void dgap_tty_post_uninit(void)
-{
- kfree(dgap_TmpWriteBuf);
- dgap_TmpWriteBuf = NULL;
-}
-
-
-/*
- * dgap_tty_uninit()
- *
- * Uninitialize the TTY portion of this driver. Free all memory and
- * resources.
- */
-void dgap_tty_uninit(struct board_t *brd)
-{
- int i = 0;
-
- if (brd->dgap_Major_Serial_Registered) {
- dgap_BoardsByMajor[brd->SerialDriver->major] = NULL;
- brd->dgap_Serial_Major = 0;
- for (i = 0; i < brd->nasync; i++) {
- dgap_remove_tty_sysfs(brd->channels[i]->ch_tun.un_sysfs);
- tty_unregister_device(brd->SerialDriver, i);
- }
- tty_unregister_driver(brd->SerialDriver);
- kfree(brd->SerialDriver->ttys);
- brd->SerialDriver->ttys = NULL;
- put_tty_driver(brd->SerialDriver);
- brd->dgap_Major_Serial_Registered = FALSE;
- }
-
- if (brd->dgap_Major_TransparentPrint_Registered) {
- dgap_BoardsByMajor[brd->PrintDriver->major] = NULL;
- brd->dgap_TransparentPrint_Major = 0;
- for (i = 0; i < brd->nasync; i++) {
- dgap_remove_tty_sysfs(brd->channels[i]->ch_pun.un_sysfs);
- tty_unregister_device(brd->PrintDriver, i);
- }
- tty_unregister_driver(brd->PrintDriver);
- kfree(brd->PrintDriver->ttys);
- brd->PrintDriver->ttys = NULL;
- put_tty_driver(brd->PrintDriver);
- brd->dgap_Major_TransparentPrint_Registered = FALSE;
- }
-}
-
-
-#define TMPBUFLEN (1024)
-
-/*
- * dgap_sniff - Dump data out to the "sniff" buffer if the
- * proc sniff file is opened...
- */
-static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *buf, int len)
-{
- struct timeval tv;
- int n;
- int r;
- int nbuf;
- int i;
- int tmpbuflen;
- char tmpbuf[TMPBUFLEN];
- char *p = tmpbuf;
- int too_much_data;
-
- /* Leave if sniff not open */
- if (!(ch->ch_sniff_flags & SNIFF_OPEN))
- return;
-
- do_gettimeofday(&tv);
-
- /* Create our header for data dump */
- p += sprintf(p, "<%ld %ld><%s><", tv.tv_sec, tv.tv_usec, text);
- tmpbuflen = p - tmpbuf;
-
- do {
- too_much_data = 0;
-
- for (i = 0; i < len && tmpbuflen < (TMPBUFLEN - 4); i++) {
- p += sprintf(p, "%02x ", *buf);
- buf++;
- tmpbuflen = p - tmpbuf;
- }
-
- if (tmpbuflen < (TMPBUFLEN - 4)) {
- if (i > 0)
- p += sprintf(p - 1, "%s\n", ">");
- else
- p += sprintf(p, "%s\n", ">");
- } else {
- too_much_data = 1;
- len -= i;
- }
-
- nbuf = strlen(tmpbuf);
- p = tmpbuf;
-
- /*
- * Loop while data remains.
- */
- while (nbuf > 0 && ch->ch_sniff_buf) {
- /*
- * Determine the amount of available space left in the
- * buffer. If there's none, wait until some appears.
- */
- n = (ch->ch_sniff_out - ch->ch_sniff_in - 1) & SNIFF_MASK;
-
- /*
- * If there is no space left to write to in our sniff buffer,
- * we have no choice but to drop the data.
- * We *cannot* sleep here waiting for space, because this
- * function was probably called by the interrupt/timer routines!
- */
- if (n == 0) {
- return;
- }
-
- /*
- * Copy as much data as will fit.
- */
-
- if (n > nbuf)
- n = nbuf;
-
- r = SNIFF_MAX - ch->ch_sniff_in;
-
- if (r <= n) {
- memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, r);
-
- n -= r;
- ch->ch_sniff_in = 0;
- p += r;
- nbuf -= r;
- }
-
- memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, n);
-
- ch->ch_sniff_in += n;
- p += n;
- nbuf -= n;
-
- /*
- * Wakeup any thread waiting for data
- */
- if (ch->ch_sniff_flags & SNIFF_WAIT_DATA) {
- ch->ch_sniff_flags &= ~SNIFF_WAIT_DATA;
- wake_up_interruptible(&ch->ch_sniff_wait);
- }
- }
-
- /*
- * If the user sent us too much data to push into our tmpbuf,
- * we need to keep looping around on all the data.
- */
- if (too_much_data) {
- p = tmpbuf;
- tmpbuflen = 0;
- }
-
- } while (too_much_data);
-}
-
-
-/*=======================================================================
- *
- * dgap_input - Process received data.
- *
- * ch - Pointer to channel structure.
- *
- *=======================================================================*/
-
-void dgap_input(struct channel_t *ch)
-{
- struct board_t *bd;
- struct bs_t *bs;
- struct tty_struct *tp;
- struct tty_ldisc *ld;
- uint rmask;
- uint head;
- uint tail;
- int data_len;
- ulong lock_flags;
- ulong lock_flags2;
- int flip_len;
- int len = 0;
- int n = 0;
- uchar *buf;
- uchar tmpchar;
- int s = 0;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- tp = ch->ch_tun.un_tty;
-
- bs = ch->ch_bs;
- if (!bs) {
- return;
- }
-
- bd = ch->ch_bd;
- if(!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_READ(("dgap_input start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- /*
- * Figure the number of characters in the buffer.
- * Exit immediately if none.
- */
-
- rmask = ch->ch_rsize - 1;
-
- head = readw(&(bs->rx_head));
- head &= rmask;
- tail = readw(&(bs->rx_tail));
- tail &= rmask;
-
- data_len = (head - tail) & rmask;
-
- if (data_len == 0) {
- writeb(1, &(bs->idata));
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_READ(("No data on port %d\n", ch->ch_portnum));
- return;
- }
-
- /*
- * If the device is not open, or CREAD is off, flush
- * input data and return immediately.
- */
- if ((bd->state != BOARD_READY) || !tp || (tp->magic != TTY_MAGIC) ||
- !(ch->ch_tun.un_flags & UN_ISOPEN) || !(tp->termios.c_cflag & CREAD) ||
- (ch->ch_tun.un_flags & UN_CLOSING)) {
-
- DPR_READ(("input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum));
- DPR_READ(("input. tp: %p tp->magic: %x MAGIC:%x ch flags: %x\n",
- tp, tp ? tp->magic : 0, TTY_MAGIC, ch->ch_tun.un_flags));
- writew(head, &(bs->rx_tail));
- writeb(1, &(bs->idata));
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return;
- }
-
- /*
- * If we are throttled, simply don't read any data.
- */
- if (ch->ch_flags & CH_RXBLOCK) {
- writeb(1, &(bs->idata));
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_READ(("Port %d throttled, not reading any data. head: %x tail: %x\n",
- ch->ch_portnum, head, tail));
- return;
- }
-
- /*
- * Ignore oruns.
- */
- tmpchar = readb(&(bs->orun));
- if (tmpchar) {
- ch->ch_err_overrun++;
- writeb(0, &(bs->orun));
- }
-
- DPR_READ(("dgap_input start 2\n"));
-
- /* Decide how much data we can send into the tty layer */
- flip_len = TTY_FLIPBUF_SIZE;
-
- /* Chop down the length, if needed */
- len = min(data_len, flip_len);
- len = min(len, (N_TTY_BUF_SIZE - 1));
-
- ld = tty_ldisc_ref(tp);
-
-#ifdef TTY_DONT_FLIP
- /*
- * If the DONT_FLIP flag is on, don't flush our buffer, and act
- * like the ld doesn't have any space to put the data right now.
- */
- if (test_bit(TTY_DONT_FLIP, &tp->flags))
- len = 0;
-#endif
-
- /*
- * If we were unable to get a reference to the ld,
- * don't flush our buffer, and act like the ld doesn't
- * have any space to put the data right now.
- */
- if (!ld) {
- len = 0;
- } else {
- /*
- * If ld doesn't have a pointer to a receive_buf function,
- * flush the data, then act like the ld doesn't have any
- * space to put the data right now.
- */
- if (!ld->ops->receive_buf) {
- writew(head, &(bs->rx_tail));
- len = 0;
- }
- }
-
- if (len <= 0) {
- writeb(1, &(bs->idata));
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- DPR_READ(("dgap_input 1 - finish\n"));
- if (ld)
- tty_ldisc_deref(ld);
- return;
- }
-
- buf = ch->ch_bd->flipbuf;
- n = len;
-
- /*
- * n now contains the most amount of data we can copy,
- * bounded either by our buffer size or the amount
- * of data the card actually has pending...
- */
- while (n) {
-
- s = ((head >= tail) ? head : ch->ch_rsize) - tail;
- s = min(s, n);
-
- if (s <= 0)
- break;
-
- memcpy_fromio(buf, (char *) ch->ch_raddr + tail, s);
- dgap_sniff_nowait_nolock(ch, "USER READ", buf, s);
-
- tail += s;
- buf += s;
-
- n -= s;
- /* Flip queue if needed */
- tail &= rmask;
- }
-
- writew(tail, &(bs->rx_tail));
- writeb(1, &(bs->idata));
- ch->ch_rxcount += len;
-
- /*
- * If we are completely raw, we don't need to go through a lot
- * of the tty layers that exist.
- * In this case, we take the shortest and fastest route we
- * can to relay the data to the user.
- *
- * On the other hand, if we are not raw, we need to go through
- * the tty layer, which has its API more well defined.
- */
- if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
- dgap_parity_scan(ch, ch->ch_bd->flipbuf, ch->ch_bd->flipflagbuf, &len);
-
- len = tty_buffer_request_room(tp->port, len);
- tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf,
- ch->ch_bd->flipflagbuf, len);
- }
- else {
- len = tty_buffer_request_room(tp->port, len);
- tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len);
- }
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- /* Tell the tty layer its okay to "eat" the data now */
- tty_flip_buffer_push(tp->port);
-
- if (ld)
- tty_ldisc_deref(ld);
-
- DPR_READ(("dgap_input - finish\n"));
-}
-
-
-/************************************************************************
- * Determines when CARRIER changes state and takes appropriate
- * action.
- ************************************************************************/
-void dgap_carrier(struct channel_t *ch)
-{
- struct board_t *bd;
-
- int virt_carrier = 0;
- int phys_carrier = 0;
-
- DPR_CARR(("dgap_carrier called...\n"));
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
-
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- /* Make sure altpin is always set correctly */
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
- ch->ch_dsr = DM_CD;
- ch->ch_cd = DM_DSR;
- }
- else {
- ch->ch_dsr = DM_DSR;
- ch->ch_cd = DM_CD;
- }
-
- if (ch->ch_mistat & D_CD(ch)) {
- DPR_CARR(("mistat: %x D_CD: %x\n", ch->ch_mistat, D_CD(ch)));
- phys_carrier = 1;
- }
-
- if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) {
- virt_carrier = 1;
- }
-
- if (ch->ch_c_cflag & CLOCAL) {
- virt_carrier = 1;
- }
-
-
- DPR_CARR(("DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier));
-
- /*
- * Test for a VIRTUAL carrier transition to HIGH.
- */
- if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
-
- /*
- * When carrier rises, wake any threads waiting
- * for carrier in the open routine.
- */
-
- DPR_CARR(("carrier: virt DCD rose\n"));
-
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
- }
-
- /*
- * Test for a PHYSICAL carrier transition to HIGH.
- */
- if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
-
- /*
- * When carrier rises, wake any threads waiting
- * for carrier in the open routine.
- */
-
- DPR_CARR(("carrier: physical DCD rose\n"));
-
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
- }
-
- /*
- * Test for a PHYSICAL transition to low, so long as we aren't
- * currently ignoring physical transitions (which is what "virtual
- * carrier" indicates).
- *
- * The transition of the virtual carrier to low really doesn't
- * matter... it really only means "ignore carrier state", not
- * "make pretend that carrier is there".
- */
- if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) &&
- (phys_carrier == 0))
- {
-
- /*
- * When carrier drops:
- *
- * Drop carrier on all open units.
- *
- * Flush queues, waking up any task waiting in the
- * line discipline.
- *
- * Send a hangup to the control terminal.
- *
- * Enable all select calls.
- */
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
-
- if (ch->ch_tun.un_open_count > 0) {
- DPR_CARR(("Sending tty hangup\n"));
- tty_hangup(ch->ch_tun.un_tty);
- }
-
- if (ch->ch_pun.un_open_count > 0) {
- DPR_CARR(("Sending pr hangup\n"));
- tty_hangup(ch->ch_pun.un_tty);
- }
- }
-
- /*
- * Make sure that our cached values reflect the current reality.
- */
- if (virt_carrier == 1)
- ch->ch_flags |= CH_FCAR;
- else
- ch->ch_flags &= ~CH_FCAR;
-
- if (phys_carrier == 1)
- ch->ch_flags |= CH_CD;
- else
- ch->ch_flags &= ~CH_CD;
-}
-
-
-/************************************************************************
- *
- * TTY Entry points and helper functions
- *
- ************************************************************************/
-
-/*
- * dgap_tty_open()
- *
- */
-static int dgap_tty_open(struct tty_struct *tty, struct file *file)
-{
- struct board_t *brd;
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t *bs;
- uint major = 0;
- uint minor = 0;
- int rc = 0;
- ulong lock_flags;
- ulong lock_flags2;
- u16 head;
-
- rc = 0;
-
- major = MAJOR(tty_devnum(tty));
- minor = MINOR(tty_devnum(tty));
-
- if (major > 255) {
- return -ENXIO;
- }
-
- /* Get board pointer from our array of majors we have allocated */
- brd = dgap_BoardsByMajor[major];
- if (!brd) {
- return -ENXIO;
- }
-
- /*
- * If board is not yet up to a state of READY, go to
- * sleep waiting for it to happen or they cancel the open.
- */
- rc = wait_event_interruptible(brd->state_wait,
- (brd->state & BOARD_READY));
-
- if (rc) {
- return rc;
- }
-
- DGAP_LOCK(brd->bd_lock, lock_flags);
-
- /* The wait above should guarantee this cannot happen */
- if (brd->state != BOARD_READY) {
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
- return -ENXIO;
- }
-
- /* If opened device is greater than our number of ports, bail. */
- if (MINOR(tty_devnum(tty)) > brd->nasync) {
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
- return -ENXIO;
- }
-
- ch = brd->channels[minor];
- if (!ch) {
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
- return -ENXIO;
- }
-
- /* Grab channel lock */
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- /* Figure out our type */
- if (major == brd->dgap_Serial_Major) {
- un = &brd->channels[minor]->ch_tun;
- un->un_type = DGAP_SERIAL;
- }
- else if (major == brd->dgap_TransparentPrint_Major) {
- un = &brd->channels[minor]->ch_pun;
- un->un_type = DGAP_PRINT;
- }
- else {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
- DPR_OPEN(("%d Unknown TYPE!\n", __LINE__));
- return -ENXIO;
- }
-
- /* Store our unit into driver_data, so we always have it available. */
- tty->driver_data = un;
-
- DPR_OPEN(("Open called. MAJOR: %d MINOR:%d unit: %p NAME: %s\n",
- MAJOR(tty_devnum(tty)), MINOR(tty_devnum(tty)), un, brd->name));
-
- /*
- * Error if channel info pointer is NULL.
- */
- bs = ch->ch_bs;
- if (!bs) {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
- DPR_OPEN(("%d BS is 0!\n", __LINE__));
- return -ENXIO;
- }
-
- DPR_OPEN(("%d: tflag=%x pflag=%x\n", __LINE__, ch->ch_tun.un_flags, ch->ch_pun.un_flags));
-
- /*
- * Initialize tty's
- */
- if (!(un->un_flags & UN_ISOPEN)) {
- /* Store important variables. */
- un->un_tty = tty;
-
- /* Maybe do something here to the TTY struct as well? */
- }
-
- /*
- * Initialize if neither terminal or printer is open.
- */
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
-
- DPR_OPEN(("dgap_open: initializing channel in open...\n"));
-
- ch->ch_mforce = 0;
- ch->ch_mval = 0;
-
- /*
- * Flush input queue.
- */
- head = readw(&(bs->rx_head));
- writew(head, &(bs->rx_tail));
-
- ch->ch_flags = 0;
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
-
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
-
- /* TODO: flush our TTY struct here? */
- }
-
- dgap_carrier(ch);
- /*
- * Run param in case we changed anything
- */
- dgap_param(tty);
-
- /*
- * follow protocol for opening port
- */
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(brd->bd_lock, lock_flags);
-
- rc = dgap_block_til_ready(tty, file, ch);
-
- if (!un->un_tty) {
- return -ENODEV;
- }
-
- if (rc) {
- DPR_OPEN(("dgap_tty_open returning after dgap_block_til_ready "
- "with %d\n", rc));
- }
-
- /* No going back now, increment our unit and channel counters */
- DGAP_LOCK(ch->ch_lock, lock_flags);
- ch->ch_open_count++;
- un->un_open_count++;
- un->un_flags |= (UN_ISOPEN);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_OPEN(("dgap_tty_open finished\n"));
- return (rc);
-}
-
-
-/*
- * dgap_block_til_ready()
- *
- * Wait for DCD, if needed.
- */
-static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch)
-{
- int retval = 0;
- struct un_t *un = NULL;
- ulong lock_flags;
- uint old_flags = 0;
- int sleep_on_un_flags = 0;
-
- if (!tty || tty->magic != TTY_MAGIC || !file || !ch || ch->magic != DGAP_CHANNEL_MAGIC) {
- return (-ENXIO);
- }
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC) {
- return (-ENXIO);
- }
-
- DPR_OPEN(("dgap_block_til_ready - before block.\n"));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- ch->ch_wopen++;
-
- /* Loop forever */
- while (1) {
-
- sleep_on_un_flags = 0;
-
- /*
- * If board has failed somehow during our sleep, bail with error.
- */
- if (ch->ch_bd->state == BOARD_FAILED) {
- retval = -ENXIO;
- break;
- }
-
- /* If tty was hung up, break out of loop and set error. */
- if (tty_hung_up_p(file)) {
- retval = -EAGAIN;
- break;
- }
-
- /*
- * If either unit is in the middle of the fragile part of close,
- * we just cannot touch the channel safely.
- * Go back to sleep, knowing that when the channel can be
- * touched safely, the close routine will signal the
- * ch_wait_flags to wake us back up.
- */
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) {
-
- /*
- * Our conditions to leave cleanly and happily:
- * 1) NONBLOCKING on the tty is set.
- * 2) CLOCAL is set.
- * 3) DCD (fake or real) is active.
- */
-
- if (file->f_flags & O_NONBLOCK) {
- break;
- }
-
- if (tty->flags & (1 << TTY_IO_ERROR)) {
- break;
- }
-
- if (ch->ch_flags & CH_CD) {
- DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
- break;
- }
-
- if (ch->ch_flags & CH_FCAR) {
- DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
- break;
- }
- }
- else {
- sleep_on_un_flags = 1;
- }
-
- /*
- * If there is a signal pending, the user probably
- * interrupted (ctrl-c) us.
- * Leave loop with error set.
- */
- if (signal_pending(current)) {
- DPR_OPEN(("%d: signal pending...\n", __LINE__));
- retval = -ERESTARTSYS;
- break;
- }
-
- DPR_OPEN(("dgap_block_til_ready - blocking.\n"));
-
- /*
- * Store the flags before we let go of channel lock
- */
- if (sleep_on_un_flags)
- old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
- else
- old_flags = ch->ch_flags;
-
- /*
- * Let go of channel lock before calling schedule.
- * Our poller will get any FEP events and wake us up when DCD
- * eventually goes active.
- */
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_OPEN(("Going to sleep on %s flags...\n",
- (sleep_on_un_flags ? "un" : "ch")));
-
- /*
- * Wait for something in the flags to change from the current value.
- */
- if (sleep_on_un_flags) {
- retval = wait_event_interruptible(un->un_flags_wait,
- (old_flags != (ch->ch_tun.un_flags | ch->ch_pun.un_flags)));
- }
- else {
- retval = wait_event_interruptible(ch->ch_flags_wait,
- (old_flags != ch->ch_flags));
- }
-
- DPR_OPEN(("After sleep... retval: %x\n", retval));
-
- /*
- * We got woken up for some reason.
- * Before looping around, grab our channel lock.
- */
- DGAP_LOCK(ch->ch_lock, lock_flags);
- }
-
- ch->ch_wopen--;
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_OPEN(("dgap_block_til_ready - after blocking.\n"));
-
- if (retval) {
- DPR_OPEN(("dgap_block_til_ready - done. error. retval: %x\n", retval));
- return(retval);
- }
-
- DPR_OPEN(("dgap_block_til_ready - done no error. jiffies: %lu\n", jiffies));
-
- return(0);
-}
-
-
-/*
- * dgap_tty_hangup()
- *
- * Hangup the port. Like a close, but don't wait for output to drain.
- */
-static void dgap_tty_hangup(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_CLOSE(("dgap_hangup called. ch->ch_open_count: %d un->un_open_count: %d\n",
- ch->ch_open_count, un->un_open_count));
-
- /* flush the transmit queues */
- dgap_tty_flush_buffer(tty);
-
- DPR_CLOSE(("dgap_hangup finished. ch->ch_open_count: %d un->un_open_count: %d\n",
- ch->ch_open_count, un->un_open_count));
-}
-
-
-
-/*
- * dgap_tty_close()
- *
- */
-static void dgap_tty_close(struct tty_struct *tty, struct file *file)
-{
- struct ktermios *ts;
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- int rc = 0;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- ts = &tty->termios;
-
- DPR_CLOSE(("Close called\n"));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- /*
- * Determine if this is the last close or not - and if we agree about
- * which type of close it is with the Line Discipline
- */
- if ((tty->count == 1) && (un->un_open_count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. un_open_count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- APR(("tty->count is 1, un open count is %d\n", un->un_open_count));
- un->un_open_count = 1;
- }
-
- if (--un->un_open_count < 0) {
- APR(("bad serial port open count of %d\n", un->un_open_count));
- un->un_open_count = 0;
- }
-
- ch->ch_open_count--;
-
- if (ch->ch_open_count && un->un_open_count) {
- DPR_CLOSE(("dgap_tty_close: not last close ch: %d un:%d\n",
- ch->ch_open_count, un->un_open_count));
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- return;
- }
-
- /* OK, its the last close on the unit */
- DPR_CLOSE(("dgap_tty_close - last close on unit procedures\n"));
-
- un->un_flags |= UN_CLOSING;
-
- tty->closing = 1;
-
- /*
- * Only officially close channel if count is 0 and
- * DIGI_PRINTER bit is not set.
- */
- if ((ch->ch_open_count == 0) && !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
-
- ch->ch_flags &= ~(CH_RXBLOCK);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- /* wait for output to drain */
- /* This will also return if we take an interrupt */
-
- DPR_CLOSE(("Calling wait_for_drain\n"));
- rc = dgap_wait_for_drain(tty);
- DPR_CLOSE(("After calling wait_for_drain\n"));
-
- if (rc) {
- DPR_BASIC(("dgap_tty_close - bad return: %d ", rc));
- }
-
- dgap_tty_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- tty->closing = 0;
-
- /*
- * If we have HUPCL set, lower DTR and RTS
- */
- if (ch->ch_c_cflag & HUPCL ) {
- DPR_CLOSE(("Close. HUPCL set, dropping DTR/RTS\n"));
- ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
- dgap_cmdb( ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0 );
-
- /*
- * Go to sleep to ensure RTS/DTR
- * have been dropped for modems to see it.
- */
- if (ch->ch_close_delay) {
- DPR_CLOSE(("Close. Sleeping for RTS/DTR drop\n"));
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- dgap_ms_sleep(ch->ch_close_delay);
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- DPR_CLOSE(("Close. After sleeping for RTS/DTR drop\n"));
- }
- }
-
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
- ch->ch_baud_info = 0;
-
- }
-
- /*
- * turn off print device when closing print device.
- */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON) ) {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- ch->ch_flags &= ~CH_PRON;
- }
-
- un->un_tty = NULL;
- un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
- tty->driver_data = NULL;
-
- DPR_CLOSE(("Close. Doing wakeups\n"));
- wake_up_interruptible(&ch->ch_flags_wait);
- wake_up_interruptible(&un->un_flags_wait);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_BASIC(("dgap_tty_close - complete\n"));
-}
-
-
-/*
- * dgap_tty_chars_in_buffer()
- *
- * Return number of characters that have not been transmitted yet.
- *
- * This routine is used by the line discipline to determine if there
- * is data waiting to be transmitted/drained/flushed or not.
- */
-static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
-{
- struct board_t *bd = NULL;
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
- struct bs_t *bs = NULL;
- uchar tbusy;
- uint chars = 0;
- u16 thead, ttail, tmask, chead, ctail;
- ulong lock_flags = 0;
- ulong lock_flags2 = 0;
-
- if (tty == NULL)
- return(0);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (0);
-
- bs = ch->ch_bs;
- if (!bs)
- return (0);
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- tmask = (ch->ch_tsize - 1);
-
- /* Get Transmit queue pointers */
- thead = readw(&(bs->tx_head)) & tmask;
- ttail = readw(&(bs->tx_tail)) & tmask;
-
- /* Get tbusy flag */
- tbusy = readb(&(bs->tbusy));
-
- /* Get Command queue pointers */
- chead = readw(&(ch->ch_cm->cm_head));
- ctail = readw(&(ch->ch_cm->cm_tail));
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- /*
- * The only way we know for sure if there is no pending
- * data left to be transferred, is if:
- * 1) Transmit head and tail are equal (empty).
- * 2) Command queue head and tail are equal (empty).
- * 3) The "TBUSY" flag is 0. (Transmitter not busy).
- */
-
- if ((ttail == thead) && (tbusy == 0) && (chead == ctail)) {
- chars = 0;
- }
- else {
- if (thead >= ttail)
- chars = thead - ttail;
- else
- chars = thead - ttail + ch->ch_tsize;
- /*
- * Fudge factor here.
- * If chars is zero, we know that the command queue had
- * something in it or tbusy was set. Because we cannot
- * be sure if there is still some data to be transmitted,
- * lets lie, and tell ld we have 1 byte left.
- */
- if (chars == 0) {
- /*
- * If TBUSY is still set, and our tx buffers are empty,
- * force the firmware to send me another wakeup after
- * TBUSY has been cleared.
- */
- if (tbusy != 0) {
- DGAP_LOCK(ch->ch_lock, lock_flags);
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- }
- chars = 1;
- }
- }
-
- DPR_WRITE(("dgap_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d tsize: %d)\n",
- ch->ch_portnum, chars, thead, ttail, ch->ch_tsize));
- return(chars);
-}
-
-
-static int dgap_wait_for_drain(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t *bs;
- int ret = -EIO;
- uint count = 1;
- ulong lock_flags = 0;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return ret;
-
- bs = ch->ch_bs;
- if (!bs)
- return ret;
-
- ret = 0;
-
- DPR_DRAIN(("dgap_wait_for_drain start\n"));
-
- /* Loop until data is drained */
- while (count != 0) {
-
- count = dgap_tty_chars_in_buffer(tty);
-
- if (count == 0)
- break;
-
- /* Set flag waiting for drain */
- DGAP_LOCK(ch->ch_lock, lock_flags);
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- /* Go to sleep till we get woken up */
- ret = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
- /* If ret is non-zero, user ctrl-c'ed us */
- if (ret) {
- break;
- }
- }
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
- un->un_flags &= ~(UN_EMPTY);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_DRAIN(("dgap_wait_for_drain finish\n"));
- return (ret);
-}
-
-
-/*
- * dgap_maxcps_room
- *
- * Reduces bytes_available to the max number of characters
- * that can be sent currently given the maxcps value, and
- * returns the new bytes_available. This only affects printer
- * output.
- */
-static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available)
-{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
-
- if (tty == NULL)
- return (bytes_available);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (bytes_available);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (bytes_available);
-
- /*
- * If its not the Transparent print device, return
- * the full data amount.
- */
- if (un->un_type != DGAP_PRINT)
- return (bytes_available);
-
- if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0 ) {
- int cps_limit = 0;
- unsigned long current_time = jiffies;
- unsigned long buffer_time = current_time +
- (HZ * ch->ch_digi.digi_bufsize) / ch->ch_digi.digi_maxcps;
-
- if (ch->ch_cpstime < current_time) {
- /* buffer is empty */
- ch->ch_cpstime = current_time; /* reset ch_cpstime */
- cps_limit = ch->ch_digi.digi_bufsize;
- }
- else if (ch->ch_cpstime < buffer_time) {
- /* still room in the buffer */
- cps_limit = ((buffer_time - ch->ch_cpstime) * ch->ch_digi.digi_maxcps) / HZ;
- }
- else {
- /* no room in the buffer */
- cps_limit = 0;
- }
-
- bytes_available = min(cps_limit, bytes_available);
- }
-
- return (bytes_available);
-}
-
-
-static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
-{
- struct channel_t *ch = NULL;
- struct bs_t *bs = NULL;
-
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
- bs = ch->ch_bs;
- if (!bs)
- return;
-
- if ((event & UN_LOW) != 0) {
- if ((un->un_flags & UN_LOW) == 0) {
- un->un_flags |= UN_LOW;
- writeb(1, &(bs->ilow));
- }
- }
- if ((event & UN_LOW) != 0) {
- if ((un->un_flags & UN_EMPTY) == 0) {
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- }
- }
-}
-
-
-/*
- * dgap_tty_write_room()
- *
- * Return space available in Tx buffer
- */
-static int dgap_tty_write_room(struct tty_struct *tty)
-{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
- struct bs_t *bs = NULL;
- u16 head, tail, tmask;
- int ret = 0;
- ulong lock_flags = 0;
-
- if (tty == NULL || dgap_TmpWriteBuf == NULL)
- return(0);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (0);
-
- bs = ch->ch_bs;
- if (!bs)
- return (0);
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- tmask = ch->ch_tsize - 1;
- head = readw(&(bs->tx_head)) & tmask;
- tail = readw(&(bs->tx_tail)) & tmask;
-
- if ((ret = tail - head - 1) < 0)
- ret += ch->ch_tsize;
-
- /* Limit printer to maxcps */
- ret = dgap_maxcps_room(tty, ret);
-
- /*
- * If we are printer device, leave space for
- * possibly both the on and off strings.
- */
- if (un->un_type == DGAP_PRINT) {
- if (!(ch->ch_flags & CH_PRON))
- ret -= ch->ch_digi.digi_onlen;
- ret -= ch->ch_digi.digi_offlen;
- }
- else {
- if (ch->ch_flags & CH_PRON)
- ret -= ch->ch_digi.digi_offlen;
- }
-
- if (ret < 0)
- ret = 0;
-
- /*
- * Schedule FEP to wake us up if needed.
- *
- * TODO: This might be overkill...
- * Do we really need to schedule callbacks from the FEP
- * in every case? Can we get smarter based on ret?
- */
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_WRITE(("dgap_tty_write_room - %d tail: %d head: %d\n", ret, tail, head));
-
- return(ret);
-}
-
-
-/*
- * dgap_tty_put_char()
- *
- * Put a character into ch->ch_buf
- *
- * - used by the line discipline for OPOST processing
- */
-static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
-{
- /*
- * Simply call tty_write.
- */
- DPR_WRITE(("dgap_tty_put_char called\n"));
- dgap_tty_write(tty, &c, 1);
- return 1;
-}
-
-
-/*
- * dgap_tty_write()
- *
- * Take data from the user or kernel and send it out to the FEP.
- * In here exists all the Transparent Print magic as well.
- */
-static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
- struct bs_t *bs = NULL;
- char *vaddr = NULL;
- u16 head, tail, tmask, remain;
- int bufcount = 0, n = 0;
- int orig_count = 0;
- ulong lock_flags;
- int from_user = 0;
-
- if (tty == NULL || dgap_TmpWriteBuf == NULL)
- return(0);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (0);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return(0);
-
- bs = ch->ch_bs;
- if (!bs)
- return(0);
-
- if (!count)
- return(0);
-
- DPR_WRITE(("dgap_tty_write: Port: %x tty=%p user=%d len=%d\n",
- ch->ch_portnum, tty, from_user, count));
-
- /*
- * Store original amount of characters passed in.
- * This helps to figure out if we should ask the FEP
- * to send us an event when it has more space available.
- */
- orig_count = count;
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- /* Get our space available for the channel from the board */
- tmask = ch->ch_tsize - 1;
- head = readw(&(bs->tx_head)) & tmask;
- tail = readw(&(bs->tx_tail)) & tmask;
-
- if ((bufcount = tail - head - 1) < 0)
- bufcount += ch->ch_tsize;
-
- DPR_WRITE(("%d: bufcount: %x count: %x tail: %x head: %x tmask: %x\n",
- __LINE__, bufcount, count, tail, head, tmask));
-
- /*
- * Limit printer output to maxcps overall, with bursts allowed
- * up to bufsize characters.
- */
- bufcount = dgap_maxcps_room(tty, bufcount);
-
- /*
- * Take minimum of what the user wants to send, and the
- * space available in the FEP buffer.
- */
- count = min(count, bufcount);
-
- /*
- * Bail if no space left.
- */
- if (count <= 0) {
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- return(0);
- }
-
- /*
- * Output the printer ON string, if we are in terminal mode, but
- * need to be in printer mode.
- */
- if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_onstr,
- (int) ch->ch_digi.digi_onlen);
- head = readw(&(bs->tx_head)) & tmask;
- ch->ch_flags |= CH_PRON;
- }
-
- /*
- * On the other hand, output the printer OFF string, if we are
- * currently in printer mode, but need to output to the terminal.
- */
- if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- head = readw(&(bs->tx_head)) & tmask;
- ch->ch_flags &= ~CH_PRON;
- }
-
- /*
- * If there is nothing left to copy, or I can't handle any more data, leave.
- */
- if (count <= 0) {
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- return(0);
- }
-
- if (from_user) {
-
- count = min(count, WRITEBUFLEN);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- /*
- * If data is coming from user space, copy it into a temporary
- * buffer so we don't get swapped out while doing the copy to
- * the board.
- */
- /* we're allowed to block if it's from_user */
- if (down_interruptible(&dgap_TmpWriteSem)) {
- return (-EINTR);
- }
-
- if (copy_from_user(dgap_TmpWriteBuf, (const uchar __user *) buf, count)) {
- up(&dgap_TmpWriteSem);
- printk("Write: Copy from user failed!\n");
- return -EFAULT;
- }
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- buf = dgap_TmpWriteBuf;
- }
-
- n = count;
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- remain = ch->ch_tstart + ch->ch_tsize - head;
-
- if (n >= remain) {
- n -= remain;
- vaddr = ch->ch_taddr + head;
-
- memcpy_toio(vaddr, (uchar *) buf, remain);
- dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *) buf, remain);
-
- head = ch->ch_tstart;
- buf += remain;
- }
-
- if (n > 0) {
-
- /*
- * Move rest of data.
- */
- vaddr = ch->ch_taddr + head;
- remain = n;
-
- memcpy_toio(vaddr, (uchar *) buf, remain);
- dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *) buf, remain);
-
- head += remain;
-
- }
-
- if (count) {
- ch->ch_txcount += count;
- head &= tmask;
- writew(head, &(bs->tx_head));
- }
-
-
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
-
- /*
- * If this is the print device, and the
- * printer is still on, we need to turn it
- * off before going idle. If the buffer is
- * non-empty, wait until it goes empty.
- * Otherwise turn it off right now.
- */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- tail = readw(&(bs->tx_tail)) & tmask;
-
- if (tail != head) {
- un->un_flags |= UN_EMPTY;
- writeb(1, &(bs->iempty));
- }
- else {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- head = readw(&(bs->tx_head)) & tmask;
- ch->ch_flags &= ~CH_PRON;
- }
- }
-
- /* Update printer buffer empty time. */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0)
- && (ch->ch_digi.digi_bufsize > 0)) {
- ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
- }
-
- if (from_user) {
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- up(&dgap_TmpWriteSem);
- }
- else {
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
- }
-
- DPR_WRITE(("Write finished - Write %d bytes of %d.\n", count, orig_count));
-
- return (count);
-}
-
-
-
-/*
- * Return modem signals to ld.
- */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
-static int dgap_tty_tiocmget(struct tty_struct *tty)
-#else
-static int dgap_tty_tiocmget(struct tty_struct *tty, struct file *file)
-#endif
-{
- struct channel_t *ch;
- struct un_t *un;
- int result = -EIO;
- uchar mstat = 0;
- ulong lock_flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return result;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return result;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return result;
-
- DPR_IOCTL(("dgap_tty_tiocmget start\n"));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- mstat = readb(&(ch->ch_bs->m_stat));
- /* Append any outbound signals that might be pending... */
- mstat |= ch->ch_mostat;
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- result = 0;
-
- if (mstat & D_DTR(ch))
- result |= TIOCM_DTR;
- if (mstat & D_RTS(ch))
- result |= TIOCM_RTS;
- if (mstat & D_CTS(ch))
- result |= TIOCM_CTS;
- if (mstat & D_DSR(ch))
- result |= TIOCM_DSR;
- if (mstat & D_RI(ch))
- result |= TIOCM_RI;
- if (mstat & D_CD(ch))
- result |= TIOCM_CD;
-
- DPR_IOCTL(("dgap_tty_tiocmget finish\n"));
-
- return result;
-}
-
-
-/*
- * dgap_tty_tiocmset()
- *
- * Set modem signals, called by ld.
- */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
-static int dgap_tty_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-#else
-static int dgap_tty_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
-#endif
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int ret = -EIO;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return ret;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return ret;
-
- DPR_IOCTL(("dgap_tty_tiocmset start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- if (set & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval |= D_RTS(ch);
- }
-
- if (set & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval |= D_DTR(ch);
- }
-
- if (clear & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval &= ~(D_RTS(ch));
- }
-
- if (clear & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval &= ~(D_DTR(ch));
- }
-
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_tiocmset finish\n"));
-
- return (0);
-}
-
-
-
-/*
- * dgap_tty_send_break()
- *
- * Send a Break, called by ld.
- */
-static int dgap_tty_send_break(struct tty_struct *tty, int msec)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int ret = -EIO;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return ret;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return ret;
-
- switch (msec) {
- case -1:
- msec = 0xFFFF;
- break;
- case 0:
- msec = 1;
- break;
- default:
- msec /= 10;
- break;
- }
-
- DPR_IOCTL(("dgap_tty_send_break start 1. %lx\n", jiffies));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-#if 0
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-#endif
- dgap_cmdw(ch, SBREAK, (u16) msec, 0);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_send_break finish\n"));
-
- return (0);
-}
-
-
-
-
-/*
- * dgap_tty_wait_until_sent()
- *
- * wait until data has been transmitted, called by ld.
- */
-static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- int rc;
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return;
- }
- return;
-}
-
-
-
-/*
- * dgap_send_xchar()
- *
- * send a high priority character, called by ld.
- */
-static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_send_xchar start 1. %lx\n", jiffies));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- /*
- * This is technically what we should do.
- * However, the NIST tests specifically want
- * to see each XON or XOFF character that it
- * sends, so lets just send each character
- * by hand...
- */
-#if 0
- if (c == STOP_CHAR(tty)) {
- dgap_cmdw(ch, RPAUSE, 0, 0);
- }
- else if (c == START_CHAR(tty)) {
- dgap_cmdw(ch, RRESUME, 0, 0);
- }
- else {
- dgap_wmove(ch, &c, 1);
- }
-#else
- dgap_wmove(ch, &c, 1);
-#endif
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_send_xchar finish\n"));
-
- return;
-}
-
-
-
-
-/*
- * Return modem signals to ld.
- */
-static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value)
-{
- int result = 0;
- uchar mstat = 0;
- ulong lock_flags;
- int rc = 0;
-
- DPR_IOCTL(("dgap_get_modem_info start\n"));
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return(-ENXIO);
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
-
- mstat = readb(&(ch->ch_bs->m_stat));
- /* Append any outbound signals that might be pending... */
- mstat |= ch->ch_mostat;
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- result = 0;
-
- if (mstat & D_DTR(ch))
- result |= TIOCM_DTR;
- if (mstat & D_RTS(ch))
- result |= TIOCM_RTS;
- if (mstat & D_CTS(ch))
- result |= TIOCM_CTS;
- if (mstat & D_DSR(ch))
- result |= TIOCM_DSR;
- if (mstat & D_RI(ch))
- result |= TIOCM_RI;
- if (mstat & D_CD(ch))
- result |= TIOCM_CD;
-
- rc = put_user(result, value);
-
- DPR_IOCTL(("dgap_get_modem_info finish\n"));
- return(rc);
-}
-
-
-/*
- * dgap_set_modem_info()
- *
- * Set modem signals, called by ld.
- */
-static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int ret = -ENXIO;
- unsigned int arg = 0;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return ret;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return ret;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return ret;
-
- DPR_IOCTL(("dgap_set_modem_info() start\n"));
-
- ret = get_user(arg, value);
- if (ret) {
- DPR_IOCTL(("dgap_set_modem_info %d ret: %x. finished.\n", __LINE__, ret));
- return(ret);
- }
-
- DPR_IOCTL(("dgap_set_modem_info: command: %x arg: %x\n", command, arg));
-
- switch (command) {
- case TIOCMBIS:
- if (arg & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval |= D_RTS(ch);
- }
-
- if (arg & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval |= D_DTR(ch);
- }
-
- break;
-
- case TIOCMBIC:
- if (arg & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval &= ~(D_RTS(ch));
- }
-
- if (arg & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval &= ~(D_DTR(ch));
- }
-
- break;
-
- case TIOCMSET:
- ch->ch_mforce = D_DTR(ch)|D_RTS(ch);
-
- if (arg & TIOCM_RTS) {
- ch->ch_mval |= D_RTS(ch);
- }
- else {
- ch->ch_mval &= ~(D_RTS(ch));
- }
-
- if (arg & TIOCM_DTR) {
- ch->ch_mval |= (D_DTR(ch));
- }
- else {
- ch->ch_mval &= ~(D_DTR(ch));
- }
-
- break;
-
- default:
- return(-EINVAL);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_set_modem_info finish\n"));
-
- return (0);
-}
-
-
-/*
- * dgap_tty_digigeta()
- *
- * Ioctl to get the information for ditty.
- *
- *
- *
- */
-static int dgap_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo)
-{
- struct channel_t *ch;
- struct un_t *un;
- struct digi_t tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return (-EFAULT);
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- memset(&tmp, 0, sizeof(tmp));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
- memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return (-EFAULT);
-
- return (0);
-}
-
-
-/*
- * dgap_tty_digiseta()
- *
- * Ioctl to set the information for ditty.
- *
- *
- *
- */
-static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- struct digi_t new_digi;
- ulong lock_flags = 0;
- unsigned long lock_flags2;
-
- DPR_IOCTL(("DIGI_SETA start\n"));
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (-EFAULT);
-
- if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t))) {
- DPR_IOCTL(("DIGI_SETA failed copy_from_user\n"));
- return(-EFAULT);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
-
- if (ch->ch_digi.digi_maxcps < 1)
- ch->ch_digi.digi_maxcps = 1;
-
- if (ch->ch_digi.digi_maxcps > 10000)
- ch->ch_digi.digi_maxcps = 10000;
-
- if (ch->ch_digi.digi_bufsize < 10)
- ch->ch_digi.digi_bufsize = 10;
-
- if (ch->ch_digi.digi_maxchar < 1)
- ch->ch_digi.digi_maxchar = 1;
-
- if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
- ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
-
- if (ch->ch_digi.digi_onlen > DIGI_PLEN)
- ch->ch_digi.digi_onlen = DIGI_PLEN;
-
- if (ch->ch_digi.digi_offlen > DIGI_PLEN)
- ch->ch_digi.digi_offlen = DIGI_PLEN;
-
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("DIGI_SETA finish\n"));
-
- return(0);
-}
-
-
-/*
- * dgap_tty_digigetedelay()
- *
- * Ioctl to get the current edelay setting.
- *
- *
- *
- */
-static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
-{
- struct channel_t *ch;
- struct un_t *un;
- int tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return (-EFAULT);
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- memset(&tmp, 0, sizeof(tmp));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
- tmp = readw(&(ch->ch_bs->edelay));
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return (-EFAULT);
-
- return (0);
-}
-
-
-/*
- * dgap_tty_digisetedelay()
- *
- * Ioctl to set the EDELAY setting
- *
- */
-static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int new_digi;
- ulong lock_flags;
- ulong lock_flags2;
-
- DPR_IOCTL(("DIGI_SETA start\n"));
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (-EFAULT);
-
- if (copy_from_user(&new_digi, new_info, sizeof(int))) {
- DPR_IOCTL(("DIGI_SETEDELAY failed copy_from_user\n"));
- return(-EFAULT);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- writew((u16) new_digi, &(ch->ch_bs->edelay));
-
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("DIGI_SETA finish\n"));
-
- return(0);
-}
-
-
-/*
- * dgap_tty_digigetcustombaud()
- *
- * Ioctl to get the current custom baud rate setting.
- */
-static int dgap_tty_digigetcustombaud(struct tty_struct *tty, int __user *retinfo)
-{
- struct channel_t *ch;
- struct un_t *un;
- int tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return (-EFAULT);
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- memset(&tmp, 0, sizeof(tmp));
-
- DGAP_LOCK(ch->ch_lock, lock_flags);
- tmp = dgap_get_custom_baud(ch);
- DGAP_UNLOCK(ch->ch_lock, lock_flags);
-
- DPR_IOCTL(("DIGI_GETCUSTOMBAUD. Returning %d\n", tmp));
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return (-EFAULT);
-
- return (0);
-}
-
-
-/*
- * dgap_tty_digisetcustombaud()
- *
- * Ioctl to set the custom baud rate setting
- */
-static int dgap_tty_digisetcustombaud(struct tty_struct *tty, int __user *new_info)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- uint new_rate;
- ulong lock_flags;
- ulong lock_flags2;
-
- DPR_IOCTL(("DIGI_SETCUSTOMBAUD start\n"));
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-EFAULT);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-EFAULT);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-EFAULT);
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (-EFAULT);
-
-
- if (copy_from_user(&new_rate, new_info, sizeof(unsigned int))) {
- DPR_IOCTL(("DIGI_SETCUSTOMBAUD failed copy_from_user\n"));
- return(-EFAULT);
- }
-
- if (bd->bd_flags & BD_FEP5PLUS) {
-
- DPR_IOCTL(("DIGI_SETCUSTOMBAUD. Setting %d\n", new_rate));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- ch->ch_custom_speed = new_rate;
-
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- }
-
- DPR_IOCTL(("DIGI_SETCUSTOMBAUD finish\n"));
-
- return(0);
-}
-
-
-/*
- * dgap_set_termios()
- */
-static void dgap_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- unsigned long lock_flags;
- unsigned long lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
-
- dgap_carrier(ch);
- dgap_param(tty);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-}
-
-
-static void dgap_tty_throttle(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_throttle start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- ch->ch_flags |= (CH_RXBLOCK);
-#if 1
- dgap_cmdw(ch, RPAUSE, 0, 0);
-#endif
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_throttle finish\n"));
-}
-
-
-static void dgap_tty_unthrottle(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_unthrottle start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- ch->ch_flags &= ~(CH_RXBLOCK);
-
-#if 1
- dgap_cmdw(ch, RRESUME, 0, 0);
-#endif
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_unthrottle finish\n"));
-}
-
-
-static void dgap_tty_start(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_start start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, RESUMETX, 0, 0);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_start finish\n"));
-}
-
-
-static void dgap_tty_stop(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_stop start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, PAUSETX, 0, 0);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_stop finish\n"));
-}
-
-
-/*
- * dgap_tty_flush_chars()
- *
- * Flush the cook buffer
- *
- * Note to self, and any other poor souls who venture here:
- *
- * flush in this case DOES NOT mean dispose of the data.
- * instead, it means "stop buffering and send it if you
- * haven't already." Just guess how I figured that out... SRW 2-Jun-98
- *
- * It is also always called in interrupt context - JAR 8-Sept-99
- */
-static void dgap_tty_flush_chars(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_flush_chars start\n"));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- /* TODO: Do something here */
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_flush_chars finish\n"));
-}
-
-
-
-/*
- * dgap_tty_flush_buffer()
- *
- * Flush Tx buffer (make in == out)
- */
-static void dgap_tty_flush_buffer(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
- u16 head = 0;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- DPR_IOCTL(("dgap_tty_flush_buffer on port: %d start\n", ch->ch_portnum));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- ch->ch_flags &= ~CH_STOP;
- head = readw(&(ch->ch_bs->tx_head));
- dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
- dgap_cmdw(ch, RESUMETX, 0, 0);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- if (waitqueue_active(&tty->write_wait))
- wake_up_interruptible(&tty->write_wait);
- tty_wakeup(tty);
-
- DPR_IOCTL(("dgap_tty_flush_buffer finish\n"));
-}
-
-
-
-/*****************************************************************************
- *
- * The IOCTL function and all of its helpers
- *
- *****************************************************************************/
-
-/*
- * dgap_tty_ioctl()
- *
- * The usual assortment of ioctl's
- */
-static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int rc;
- u16 head = 0;
- ulong lock_flags = 0;
- ulong lock_flags2 = 0;
- void __user *uarg = (void __user *) arg;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return (-ENODEV);
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return (-ENODEV);
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return (-ENODEV);
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return (-ENODEV);
-
- DPR_IOCTL(("dgap_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n",
- ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- if (un->un_open_count <= 0) {
- DPR_BASIC(("dgap_tty_ioctl - unit not open.\n"));
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(-EIO);
- }
-
- switch (cmd) {
-
- /* Here are all the standard ioctl's that we MUST implement */
-
- case TCSBRK:
- /*
- * TCSBRK is SVID version: non-zero arg --> no break
- * this behaviour is exploited by tcdrain().
- *
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- if (rc) {
- return(rc);
- }
-
- rc = dgap_wait_for_drain(tty);
-
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- if(((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP)) {
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
- }
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
- ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- return(0);
-
-
- case TCSBRKP:
- /* support for POSIX tcsendbreak()
-
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- if (rc) {
- return(rc);
- }
-
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
- ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- return(0);
-
- case TIOCSBRK:
- /*
- * FEP5 doesn't support turning on a break unconditionally.
- * The FEP5 device will stop sending a break automatically
- * after the specified time value that was sent when turning on
- * the break.
- */
- rc = tty_check_change(tty);
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- if (rc) {
- return(rc);
- }
-
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
- ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- return 0;
-
- case TIOCCBRK:
- /*
- * FEP5 doesn't support turning off a break unconditionally.
- * The FEP5 device will stop sending a break automatically
- * after the specified time value that was sent when turning on
- * the break.
- */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return 0;
-
- case TIOCGSOFTCAR:
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) arg);
- return(rc);
-
- case TIOCSSOFTCAR:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- rc = get_user(arg, (unsigned long __user *) arg);
- if (rc)
- return(rc);
-
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
- dgap_param(tty);
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- return(0);
-
- case TIOCMGET:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_get_modem_info(ch, uarg));
-
- case TIOCMBIS:
- case TIOCMBIC:
- case TIOCMSET:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_set_modem_info(tty, cmd, uarg));
-
- /*
- * Here are any additional ioctl's that we want to implement
- */
-
- case TCFLSH:
- /*
- * The linux tty driver doesn't have a flush
- * input routine for the driver, assuming all backed
- * up data is in the line disc. buffers. However,
- * we all know that's not the case. Here, we
- * act on the ioctl, but then lie and say we didn't
- * so the line discipline will process the flush
- * also.
- */
- rc = tty_check_change(tty);
- if (rc) {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(rc);
- }
-
- if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
- if (!(un->un_type == DGAP_PRINT)) {
- head = readw(&(ch->ch_bs->rx_head));
- writew(head, &(ch->ch_bs->rx_tail));
- writeb(0, &(ch->ch_bs->orun));
- }
- }
-
- if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
- ch->ch_flags &= ~CH_STOP;
- head = readw(&(ch->ch_bs->tx_head));
- dgap_cmdw(ch, FLUSHTX, (u16) head, 0 );
- dgap_cmdw(ch, RESUMETX, 0, 0);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
- if (waitqueue_active(&tty->write_wait))
- wake_up_interruptible(&tty->write_wait);
-
- /* Can't hold any locks when calling tty_wakeup! */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- tty_wakeup(tty);
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
-
- /* pretend we didn't recognize this IOCTL */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_ioctl (LINE:%d) finish on port %d - cmd %s (%x), arg %lx\n",
- __LINE__, ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- return(-ENOIOCTLCMD);
-
- case TCSETSF:
- case TCSETSW:
- /*
- * The linux tty driver doesn't have a flush
- * input routine for the driver, assuming all backed
- * up data is in the line disc. buffers. However,
- * we all know that's not the case. Here, we
- * act on the ioctl, but then lie and say we didn't
- * so the line discipline will process the flush
- * also.
- */
- if (cmd == TCSETSF) {
- /* flush rx */
- ch->ch_flags &= ~CH_STOP;
- head = readw(&(ch->ch_bs->rx_head));
- writew(head, &(ch->ch_bs->rx_tail));
- }
-
- /* now wait for all the output to drain */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
-
- DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
- ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
-
- /* pretend we didn't recognize this */
- return(-ENOIOCTLCMD);
-
- case TCSETAW:
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
-
- /* pretend we didn't recognize this */
- return(-ENOIOCTLCMD);
-
- case TCXONC:
- /*
- * The Linux Line Discipline (LD) would do this for us if we
- * let it, but we have the special firmware options to do this
- * the "right way" regardless of hardware or software flow
- * control so we'll do it outselves instead of letting the LD
- * do it.
- */
- rc = tty_check_change(tty);
- if (rc) {
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(rc);
- }
-
- DPR_IOCTL(("dgap_ioctl - in TCXONC - %d\n", cmd));
- switch (arg) {
-
- case TCOON:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- dgap_tty_start(tty);
- return(0);
- case TCOOFF:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- dgap_tty_stop(tty);
- return(0);
- case TCION:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- /* Make the ld do it */
- return(-ENOIOCTLCMD);
- case TCIOFF:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- /* Make the ld do it */
- return(-ENOIOCTLCMD);
- default:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(-EINVAL);
- }
-
- case DIGI_GETA:
- /* get information for ditty */
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digigeta(tty, uarg));
-
- case DIGI_SETAW:
- case DIGI_SETAF:
-
- /* set information for ditty */
- if (cmd == (DIGI_SETAW)) {
-
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc) {
- DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
- return(-EINTR);
- }
- DGAP_LOCK(bd->bd_lock, lock_flags);
- DGAP_LOCK(ch->ch_lock, lock_flags2);
- }
- else {
- tty_ldisc_flush(tty);
- }
- /* fall thru */
-
- case DIGI_SETA:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digiseta(tty, uarg));
-
- case DIGI_GEDELAY:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digigetedelay(tty, uarg));
-
- case DIGI_SEDELAY:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digisetedelay(tty, uarg));
-
- case DIGI_GETCUSTOMBAUD:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digigetcustombaud(tty, uarg));
-
- case DIGI_SETCUSTOMBAUD:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return(dgap_tty_digisetcustombaud(tty, uarg));
-
- case DIGI_RESET_PORT:
- dgap_firmware_reset_port(ch);
- dgap_param(tty);
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
- return 0;
-
- default:
- DGAP_UNLOCK(ch->ch_lock, lock_flags2);
- DGAP_UNLOCK(bd->bd_lock, lock_flags);
-
- DPR_IOCTL(("dgap_tty_ioctl - in default\n"));
- DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n",
- dgap_ioctl_name(cmd), cmd, arg));
-
- return(-ENOIOCTLCMD);
- }
-}
diff --git a/drivers/staging/dgap/dgap_tty.h b/drivers/staging/dgap/dgap_tty.h
deleted file mode 100644
index 464a460b6be8..000000000000
--- a/drivers/staging/dgap/dgap_tty.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef __DGAP_TTY_H
-#define __DGAP_TTY_H
-
-#include "dgap_driver.h"
-
-int dgap_tty_register(struct board_t *brd);
-
-int dgap_tty_preinit(void);
-int dgap_tty_init(struct board_t *);
-
-void dgap_tty_post_uninit(void);
-void dgap_tty_uninit(struct board_t *);
-
-void dgap_carrier(struct channel_t *ch);
-void dgap_input(struct channel_t *ch);
-
-
-#endif
diff --git a/drivers/staging/dgap/dgap_types.h b/drivers/staging/dgap/dgap_types.h
deleted file mode 100644
index eca38c7f359d..000000000000
--- a/drivers/staging/dgap/dgap_types.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef __DGAP_TYPES_H
-#define __DGAP_TYPES_H
-
-#ifndef TRUE
-# define TRUE 1
-#endif
-
-#ifndef FALSE
-# define FALSE 0
-#endif
-
-/* Required for our shared headers! */
-typedef unsigned char uchar;
-
-#endif
diff --git a/drivers/staging/dgap/digi.h b/drivers/staging/dgap/digi.h
deleted file mode 100644
index bcea4f734a32..000000000000
--- a/drivers/staging/dgap/digi.h
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * $Id: digi.h,v 1.1 2009/10/23 14:01:57 markh Exp $
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- */
-
-#ifndef __DIGI_H
-#define __DIGI_H
-
-/************************************************************************
- *** Definitions for Digi ditty(1) command.
- ************************************************************************/
-
-
-/*
- * Copyright (c) 1988-96 Digi International Inc., All Rights Reserved.
- */
-
-/************************************************************************
- * This module provides application access to special Digi
- * serial line enhancements which are not standard UNIX(tm) features.
- ************************************************************************/
-
-#if !defined(TIOCMODG)
-
-#define TIOCMODG ('d'<<8) | 250 /* get modem ctrl state */
-#define TIOCMODS ('d'<<8) | 251 /* set modem ctrl state */
-
-#ifndef TIOCM_LE
-#define TIOCM_LE 0x01 /* line enable */
-#define TIOCM_DTR 0x02 /* data terminal ready */
-#define TIOCM_RTS 0x04 /* request to send */
-#define TIOCM_ST 0x08 /* secondary transmit */
-#define TIOCM_SR 0x10 /* secondary receive */
-#define TIOCM_CTS 0x20 /* clear to send */
-#define TIOCM_CAR 0x40 /* carrier detect */
-#define TIOCM_RNG 0x80 /* ring indicator */
-#define TIOCM_DSR 0x100 /* data set ready */
-#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
-#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
-#endif
-
-#endif
-
-#if !defined(TIOCMSET)
-#define TIOCMSET ('d'<<8) | 252 /* set modem ctrl state */
-#define TIOCMGET ('d'<<8) | 253 /* set modem ctrl state */
-#endif
-
-#if !defined(TIOCMBIC)
-#define TIOCMBIC ('d'<<8) | 254 /* set modem ctrl state */
-#define TIOCMBIS ('d'<<8) | 255 /* set modem ctrl state */
-#endif
-
-
-#if !defined(TIOCSDTR)
-#define TIOCSDTR ('e'<<8) | 0 /* set DTR */
-#define TIOCCDTR ('e'<<8) | 1 /* clear DTR */
-#endif
-
-/************************************************************************
- * Ioctl command arguments for DIGI parameters.
- ************************************************************************/
-#define DIGI_GETA ('e'<<8) | 94 /* Read params */
-
-#define DIGI_SETA ('e'<<8) | 95 /* Set params */
-#define DIGI_SETAW ('e'<<8) | 96 /* Drain & set params */
-#define DIGI_SETAF ('e'<<8) | 97 /* Drain, flush & set params */
-
-#define DIGI_KME ('e'<<8) | 98 /* Read/Write Host */
- /* Adapter Memory */
-
-#define DIGI_GETFLOW ('e'<<8) | 99 /* Get startc/stopc flow */
- /* control characters */
-#define DIGI_SETFLOW ('e'<<8) | 100 /* Set startc/stopc flow */
- /* control characters */
-#define DIGI_GETAFLOW ('e'<<8) | 101 /* Get Aux. startc/stopc */
- /* flow control chars */
-#define DIGI_SETAFLOW ('e'<<8) | 102 /* Set Aux. startc/stopc */
- /* flow control chars */
-
-#define DIGI_GEDELAY ('d'<<8) | 246 /* Get edelay */
-#define DIGI_SEDELAY ('d'<<8) | 247 /* Set edelay */
-
-struct digiflow_t {
- unsigned char startc; /* flow cntl start char */
- unsigned char stopc; /* flow cntl stop char */
-};
-
-
-#ifdef FLOW_2200
-#define F2200_GETA ('e'<<8) | 104 /* Get 2x36 flow cntl flags */
-#define F2200_SETAW ('e'<<8) | 105 /* Set 2x36 flow cntl flags */
-#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */
-#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */
-#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */
-#define F2200_XON 0xf8
-#define P2200_XON 0xf9
-#define F2200_XOFF 0xfa
-#define P2200_XOFF 0xfb
-
-#define FXOFF_MASK 0x03 /* 2200 flow status mask */
-#define RCVD_FXOFF 0x01 /* 2x36 Terminal XOFF rcvd */
-#define RCVD_PXOFF 0x02 /* 2x36 Printer XOFF rcvd */
-#endif
-
-/************************************************************************
- * Values for digi_flags
- ************************************************************************/
-#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
-#define DIGI_FAST 0x0002 /* Fast baud rates */
-#define RTSPACE 0x0004 /* RTS input flow control */
-#define CTSPACE 0x0008 /* CTS output flow control */
-#define DSRPACE 0x0010 /* DSR output flow control */
-#define DCDPACE 0x0020 /* DCD output flow control */
-#define DTRPACE 0x0040 /* DTR input flow control */
-#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
-#define DIGI_FORCEDCD 0x0100 /* Force carrier */
-#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
-#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
-#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
-#define DIGI_PP_INPUT 0x1000 /* Change parallel port to input*/
-#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
-#define DIGI_422 0x4000 /* for 422/232 selectable panel */
-#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
-
-/************************************************************************
- * These options are not supported on the comxi.
- ************************************************************************/
-#define DIGI_COMXI (DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE)
-
-#define DIGI_PLEN 28 /* String length */
-#define DIGI_TSIZ 10 /* Terminal string len */
-
-/************************************************************************
- * Structure used with ioctl commands for DIGI parameters.
- ************************************************************************/
-struct digi_t {
- unsigned short digi_flags; /* Flags (see above) */
- unsigned short digi_maxcps; /* Max printer CPS */
- unsigned short digi_maxchar; /* Max chars in print queue */
- unsigned short digi_bufsize; /* Buffer size */
- unsigned char digi_onlen; /* Length of ON string */
- unsigned char digi_offlen; /* Length of OFF string */
- char digi_onstr[DIGI_PLEN]; /* Printer on string */
- char digi_offstr[DIGI_PLEN]; /* Printer off string */
- char digi_term[DIGI_TSIZ]; /* terminal string */
-};
-
-/************************************************************************
- * KME definitions and structures.
- ************************************************************************/
-#define RW_IDLE 0 /* Operation complete */
-#define RW_READ 1 /* Read Concentrator Memory */
-#define RW_WRITE 2 /* Write Concentrator Memory */
-
-struct rw_t {
- unsigned char rw_req; /* Request type */
- unsigned char rw_board; /* Host Adapter board number */
- unsigned char rw_conc; /* Concentrator number */
- unsigned char rw_reserved; /* Reserved for expansion */
- unsigned long rw_addr; /* Address in concentrator */
- unsigned short rw_size; /* Read/write request length */
- unsigned char rw_data[128]; /* Data to read/write */
-};
-
-/***********************************************************************
- * Shrink Buffer and Board Information definitions and structures.
-
- ************************************************************************/
- /* Board type return codes */
-#define PCXI_TYPE 1 /* Board type at the designated port is a PC/Xi */
-#define PCXM_TYPE 2 /* Board type at the designated port is a PC/Xm */
-#define PCXE_TYPE 3 /* Board type at the designated port is a PC/Xe */
-#define MCXI_TYPE 4 /* Board type at the designated port is a MC/Xi */
-#define COMXI_TYPE 5 /* Board type at the designated port is a COM/Xi */
-
- /* Non-Zero Result codes. */
-#define RESULT_NOBDFND 1 /* A Digi product at that port is not config installed */
-#define RESULT_NODESCT 2 /* A memory descriptor was not obtainable */
-#define RESULT_NOOSSIG 3 /* FEP/OS signature was not detected on the board */
-#define RESULT_TOOSML 4 /* Too small an area to shrink. */
-#define RESULT_NOCHAN 5 /* Channel structure for the board was not found */
-
-struct shrink_buf_struct {
- unsigned long shrink_buf_vaddr; /* Virtual address of board */
- unsigned long shrink_buf_phys; /* Physical address of board */
- unsigned long shrink_buf_bseg; /* Amount of board memory */
- unsigned long shrink_buf_hseg; /* '186 Beginning of Dual-Port */
-
- unsigned long shrink_buf_lseg; /* '186 Beginning of freed memory */
- unsigned long shrink_buf_mseg; /* Linear address from start of
- dual-port were freed memory
- begins, host viewpoint. */
-
- unsigned long shrink_buf_bdparam; /* Parameter for xxmemon and
- xxmemoff */
-
- unsigned long shrink_buf_reserva; /* Reserved */
- unsigned long shrink_buf_reservb; /* Reserved */
- unsigned long shrink_buf_reservc; /* Reserved */
- unsigned long shrink_buf_reservd; /* Reserved */
-
- unsigned char shrink_buf_result; /* Reason for call failing
- Zero is Good return */
- unsigned char shrink_buf_init; /* Non-Zero if it caused an
- xxinit call. */
-
- unsigned char shrink_buf_anports; /* Number of async ports */
- unsigned char shrink_buf_snports; /* Number of sync ports */
- unsigned char shrink_buf_type; /* Board type 1 = PC/Xi,
- 2 = PC/Xm,
- 3 = PC/Xe
- 4 = MC/Xi
- 5 = COMX/i */
- unsigned char shrink_buf_card; /* Card number */
-
-};
-
-/************************************************************************
- * Structure to get driver status information
- ************************************************************************/
-struct digi_dinfo {
- unsigned long dinfo_nboards; /* # boards configured */
- char dinfo_reserved[12]; /* for future expansion */
- char dinfo_version[16]; /* driver version */
-};
-
-#define DIGI_GETDD ('d'<<8) | 248 /* get driver info */
-
-/************************************************************************
- * Structure used with ioctl commands for per-board information
- *
- * physsize and memsize differ when board has "windowed" memory
- ************************************************************************/
-struct digi_info {
- unsigned long info_bdnum; /* Board number (0 based) */
- unsigned long info_ioport; /* io port address */
- unsigned long info_physaddr; /* memory address */
- unsigned long info_physsize; /* Size of host mem window */
- unsigned long info_memsize; /* Amount of dual-port mem */
- /* on board */
- unsigned short info_bdtype; /* Board type */
- unsigned short info_nports; /* number of ports */
- char info_bdstate; /* board state */
- char info_reserved[7]; /* for future expansion */
-};
-
-#define DIGI_GETBD ('d'<<8) | 249 /* get board info */
-
-struct digi_stat {
- unsigned int info_chan; /* Channel number (0 based) */
- unsigned int info_brd; /* Board number (0 based) */
- unsigned long info_cflag; /* cflag for channel */
- unsigned long info_iflag; /* iflag for channel */
- unsigned long info_oflag; /* oflag for channel */
- unsigned long info_mstat; /* mstat for channel */
- unsigned long info_tx_data; /* tx_data for channel */
- unsigned long info_rx_data; /* rx_data for channel */
- unsigned long info_hflow; /* hflow for channel */
- unsigned long info_reserved[8]; /* for future expansion */
-};
-
-#define DIGI_GETSTAT ('d'<<8) | 244 /* get board info */
-/************************************************************************
- *
- * Structure used with ioctl commands for per-channel information
- *
- ************************************************************************/
-struct digi_ch {
- unsigned long info_bdnum; /* Board number (0 based) */
- unsigned long info_channel; /* Channel index number */
- unsigned long info_ch_cflag; /* Channel cflag */
- unsigned long info_ch_iflag; /* Channel iflag */
- unsigned long info_ch_oflag; /* Channel oflag */
- unsigned long info_chsize; /* Channel structure size */
- unsigned long info_sleep_stat; /* sleep status */
- dev_t info_dev; /* device number */
- unsigned char info_initstate; /* Channel init state */
- unsigned char info_running; /* Channel running state */
- long reserved[8]; /* reserved for future use */
-};
-
-/*
-* This structure is used with the DIGI_FEPCMD ioctl to
-* tell the driver which port to send the command for.
-*/
-struct digi_cmd {
- int cmd;
- int word;
- int ncmds;
- int chan; /* channel index (zero based) */
- int bdid; /* board index (zero based) */
-};
-
-/*
-* info_sleep_stat defines
-*/
-#define INFO_RUNWAIT 0x0001
-#define INFO_WOPEN 0x0002
-#define INFO_TTIOW 0x0004
-#define INFO_CH_RWAIT 0x0008
-#define INFO_CH_WEMPTY 0x0010
-#define INFO_CH_WLOW 0x0020
-#define INFO_XXBUF_BUSY 0x0040
-
-#define DIGI_GETCH ('d'<<8) | 245 /* get board info */
-
-/* Board type definitions */
-
-#define SUBTYPE 0007
-#define T_PCXI 0000
-#define T_PCXM 0001
-#define T_PCXE 0002
-#define T_PCXR 0003
-#define T_SP 0004
-#define T_SP_PLUS 0005
-# define T_HERC 0000
-# define T_HOU 0001
-# define T_LON 0002
-# define T_CHA 0003
-#define FAMILY 0070
-#define T_COMXI 0000
-#define T_PCXX 0010
-#define T_CX 0020
-#define T_EPC 0030
-#define T_PCLITE 0040
-#define T_SPXX 0050
-#define T_AVXX 0060
-#define T_DXB 0070
-#define T_A2K_4_8 0070
-#define BUSTYPE 0700
-#define T_ISABUS 0000
-#define T_MCBUS 0100
-#define T_EISABUS 0200
-#define T_PCIBUS 0400
-
-/* Board State Definitions */
-
-#define BD_RUNNING 0x0
-#define BD_REASON 0x7f
-#define BD_NOTFOUND 0x1
-#define BD_NOIOPORT 0x2
-#define BD_NOMEM 0x3
-#define BD_NOBIOS 0x4
-#define BD_NOFEP 0x5
-#define BD_FAILED 0x6
-#define BD_ALLOCATED 0x7
-#define BD_TRIBOOT 0x8
-#define BD_BADKME 0x80
-
-#define DIGI_LOOPBACK ('d'<<8) | 252 /* Enable/disable UART internal loopback */
-#define DIGI_SPOLL ('d'<<8) | 254 /* change poller rate */
-
-#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
-#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
-#define DIGI_RESET_PORT ('e'<<8) | 93 /* Reset port */
-
-#endif /* DIGI_H */
diff --git a/drivers/staging/dgap/downld.c b/drivers/staging/dgap/downld.c
deleted file mode 100644
index 1f4aa2eca437..000000000000
--- a/drivers/staging/dgap/downld.c
+++ /dev/null
@@ -1,798 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * $Id: downld.c,v 1.6 2009/01/14 14:10:54 markh Exp $
- */
-
-/*
-** downld.c
-**
-** This is the daemon that sends the fep, bios, and concentrator images
-** from user space to the driver.
-** BUGS:
-** If the file changes in the middle of the download, you probably
-** will get what you deserve.
-**
-*/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <fcntl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/errno.h>
-
-#include "dgap_types.h"
-#include "digi.h"
-#include "dgap_fep5.h"
-
-#include "dgap_downld.h"
-
-#include <string.h>
-#include <malloc.h>
-#include <stddef.h>
-#include <unistd.h>
-
-char *pgm;
-void myperror();
-
-/*
-** This structure is used to keep track of the different images available
-** to give to the driver. It is arranged so that the things that are
-** constants or that have defaults are first inthe strucutre to simplify
-** the table of initializers.
-*/
-struct image_info {
- short type; /* bios, fep, conc */
- short family; /* boards this applies to */
- short subtype; /* subtype */
- int len; /* size of image */
- char *image; /* ioctl struct + image */
- char *name;
- char *fname; /* filename of binary (i.e. "asfep.bin") */
- char *pathname; /* pathname to this binary ("/etc/dgap/xrfep.bin"); */
- time_t mtime; /* Last modification time */
-};
-
-#define IBIOS 0
-#define IFEP 1
-#define ICONC 2
-#define ICONFIG 3
-#define IBAD 4
-
-#define DEFAULT_LOC "/lib/firmware/dgap/"
-
-struct image_info *image_list;
-int nimages, count;
-
-struct image_info images[] = {
-{IBIOS, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxbios.bin", DEFAULT_LOC "fxbios.bin", 0 },
-{IFEP, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxfep.bin", DEFAULT_LOC "fxfep.bin", 0 },
-{ICONC, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxcon.bin", DEFAULT_LOC "fxcon.bin", 0 },
-
-{IBIOS, T_CX, SUBTYPE, 0, NULL, "C/X", "cxbios.bin", DEFAULT_LOC "cxbios.bin", 0 },
-{IFEP, T_CX, SUBTYPE, 0, NULL, "C/X", "cxhost.bin", DEFAULT_LOC "cxhost.bin", 0 },
-
-{IBIOS, T_CX, T_PCIBUS, 0, NULL, "C/X PCI", "cxpbios.bin", DEFAULT_LOC "cxpbios.bin", 0 },
-{IFEP, T_CX, T_PCIBUS, 0, NULL, "C/X PCI", "cxpfep.bin", DEFAULT_LOC "cxpfep.bin", 0 },
-
-{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "cxcon.bin", DEFAULT_LOC "cxcon.bin", 0 },
-{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "ibmcxcon.bin", DEFAULT_LOC "ibmcxcon.bin", 0 },
-{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "ibmencon.bin", DEFAULT_LOC "ibmencon.bin", 0 },
-
-{IBIOS, FAMILY, T_PCXR, 0, NULL, "PCXR", "xrbios.bin", DEFAULT_LOC "xrbios.bin", 0 },
-{IFEP, FAMILY, T_PCXR, 0, NULL, "PCXR", "xrfep.bin", DEFAULT_LOC "xrfep.bin", 0 },
-
-{IBIOS, T_PCLITE, SUBTYPE, 0, NULL, "X/em", "sxbios.bin", DEFAULT_LOC "sxbios.bin", 0 },
-{IFEP, T_PCLITE, SUBTYPE, 0, NULL, "X/em", "sxfep.bin", DEFAULT_LOC "sxfep.bin", 0 },
-
-{IBIOS, T_EPC, T_PCIBUS, 0, NULL, "PCI", "pcibios.bin", DEFAULT_LOC "pcibios.bin", 0 },
-{IFEP, T_EPC, T_PCIBUS, 0, NULL, "PCI", "pcifep.bin", DEFAULT_LOC "pcifep.bin", 0 },
-{ICONFIG, 0, 0, 0, NULL, NULL, "dgap.conf", "/etc/dgap.conf", 0 },
-
-/* IBAD/NULL entry indicating end-of-table */
-
-{IBAD, 0, 0, 0, NULL, NULL, NULL, NULL, 0 }
-
-} ;
-
-int errorprint = 1;
-int nodldprint = 1;
-int debugflag;
-int fd;
-
-struct downld_t *ip; /* Image pointer in current image */
-struct downld_t *dp; /* conc. download */
-
-
-/*
- * The same for either the FEP or the BIOS.
- * Append the downldio header, issue the ioctl, then free
- * the buffer. Not horribly CPU efficient, but quite RAM efficient.
- */
-
-void squirt(int req_type, int bdid, struct image_info *ii)
-{
- struct downldio *dliop;
- int size_buf;
- int sfd;
- struct stat sb;
-
- /*
- * If this binary comes from a file, stat it to see how
- * large it is. Yes, we intentionally do this each
- * time for the binary may change between loads.
- */
-
- if (ii->pathname) {
- sfd = open(ii->pathname, O_RDONLY);
-
- if (sfd < 0 ) {
- myperror(ii->pathname);
- goto squirt_end;
- }
-
- if (fstat(sfd, &sb) == -1 ) {
- myperror(ii->pathname);
- goto squirt_end;
- }
-
- ii->len = sb.st_size;
- }
-
- size_buf = ii->len + sizeof(struct downldio);
-
- /*
- * This buffer will be freed at the end of this function. It is
- * not resilient and should be around only long enough for the d/l
- * to happen.
- */
- dliop = (struct downldio *) malloc(size_buf);
-
- if (dliop == NULL) {
- fprintf(stderr,"%s: can't get %d bytes of memory; aborting\n",
- pgm, size_buf);
- exit (1);
- }
-
- /* Now, stick the image in fepimage. This can come from either
- * the compiled-in image or from the filesystem.
- */
- if (ii->pathname)
- read(sfd, dliop->image.fi.fepimage, ii->len);
- else
- memcpy(dliop ->image.fi.fepimage, ii->image, ii->len);
-
- dliop->req_type = req_type;
- dliop->bdid = bdid;
-
- dliop->image.fi.len = ii->len;
-
- if (debugflag)
- printf("sending %d bytes of %s %s from %s\n",
- ii->len,
- (ii->type == IFEP) ? "FEP" : (ii->type == IBIOS) ? "BIOS" : "CONFIG",
- ii->name ? ii->name : "",
- (ii->pathname) ? ii->pathname : "internal image" );
-
- if (ioctl(fd, DIGI_DLREQ_SET, (char *) dliop) == -1) {
- if(errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",pgm);
- errorprint = 0;
- }
- sleep(2);
- }
-
-squirt_end:
-
- if (ii->pathname) {
- close(sfd);
- }
- free(dliop);
-}
-
-
-/*
- * See if we need to reload the download image in core
- *
- */
-void consider_file_rescan(struct image_info *ii)
-{
- int sfd;
- int len;
- struct stat sb;
-
- /* This operation only makes sense when we're working from a file */
-
- if (ii->pathname) {
-
- sfd = open (ii->pathname, O_RDONLY) ;
- if (sfd < 0 ) {
- myperror(ii->pathname);
- exit(1) ;
- }
-
- if( fstat(sfd,&sb) == -1 ) {
- myperror(ii->pathname);
- exit(1);
- }
-
- /* If the file hasn't changed since we last did this,
- * and we have not done a free() on the image, bail
- */
- if (ii->image && (sb.st_mtime == ii->mtime))
- goto end_rescan;
-
- ii->len = len = sb.st_size;
-
- /* Record the timestamp of the file */
- ii->mtime = sb.st_mtime;
-
- /* image should be NULL unless there is an image malloced
- * in already. Before we malloc again, make sure we don't
- * have a memory leak.
- */
- if ( ii->image ) {
- free( ii->image );
- /* ii->image = NULL; */ /* not necessary */
- }
-
- /* This image will be kept only long enough for the
- * download to happen. After sending the last block,
- * it will be freed
- */
- ii->image = malloc(len) ;
-
- if (ii->image == NULL) {
- fprintf(stderr,
- "%s: can't get %d bytes of memory; aborting\n",
- pgm, len);
- exit (1);
- }
-
- if (read(sfd, ii->image, len) < len) {
- fprintf(stderr,"%s: read error on %s; aborting\n",
- pgm, ii->pathname);
- exit (1);
- }
-
-end_rescan:
- close(sfd);
-
- }
-}
-
-/*
- * Scan for images to match the driver requests
- */
-
-struct image_info * find_conc_image()
-{
- int x;
- struct image_info *i = NULL;
-
- for ( x = 0; x < nimages; x++ ) {
- i=&image_list[x];
-
- if(i->type != ICONC)
- continue;
-
- consider_file_rescan(i) ;
-
- ip = (struct downld_t *) image_list[x].image;
- if (ip == NULL) continue;
-
- /*
- * When I removed Clusterport, I kept only the code that I
- * was SURE wasn't ClusterPort. We may not need the next two
- * lines of code.
- */
- if ((dp->dl_type != 'P' ) && ( ip->dl_srev == dp->dl_srev ))
- return i;
- }
- return NULL;
-}
-
-
-int main(int argc, char **argv)
-{
- struct downldio dlio;
- int offset, bsize;
- int x;
- char *down, *image, *fname;
- struct image_info *ii;
-
- pgm = argv[0];
- dp = &dlio.image.dl; /* conc. download */
-
- while((argc > 2) && !strcmp(argv[1],"-d")) {
- debugflag++ ;
- argc-- ;
- argv++ ;
- }
-
- if(argc < 2) {
- fprintf(stderr,
- "usage: %s download-device [image-file] ...\n",
- pgm);
- exit(1);
- }
-
-
-
- /*
- * Daemonize, unless debugging is turned on.
- */
- if (debugflag == 0) {
- switch (fork())
- {
- case 0:
- break;
-
- case -1:
- return 1;
-
- default:
- return 0;
- }
-
- setsid();
-
- /*
- * The child no longer needs "stdin", "stdout", or "stderr",
- * and should not block processes waiting for them to close.
- */
- fclose(stdin);
- fclose(stdout);
- fclose(stderr);
-
- }
-
- while (1) {
- if( (fd = open(argv[1], O_RDWR)) == -1 ) {
- sleep(1);
- }
- else
- break;
- }
-
- /*
- ** create a list of images to search through when trying to match
- ** requests from the driver. Put images from the command line in
- ** the list before built in images so that the command line images
- ** can override the built in ones.
- */
-
- /* allocate space for the list */
-
- nimages = argc - 2;
-
- /* count the number of default list entries */
-
- for (count = 0; images[count].type != IBAD; ++count) ;
-
- nimages += count;
-
- /* Really should just remove the variable "image_list".... robertl */
- image_list = images;
-
- /* get the images from the command line */
- for(x = 2; x < argc; x++) {
- int xx;
-
- /*
- * strip off any leading path information for
- * determining file type
- */
- if( (fname = strrchr(argv[x],'/')) == NULL)
- fname = argv[x];
- else
- fname++; /* skip the slash */
-
- for (xx = 0; xx < count; xx++) {
- if (strcmp(fname, images[xx].fname) == 0 ) {
- images[xx].pathname = argv[x];
-
- /* image should be NULL until */
- /* space is malloced */
- images[xx].image = NULL;
- }
- }
- }
-
- sleep(3);
-
- /*
- ** Endless loop: get a request from the fep, and service that request.
- */
- for(;;) {
- /* get the request */
- if (debugflag)
- printf("b4 get ioctl...");
-
- if (ioctl(fd,DIGI_DLREQ_GET, &dlio) == -1 ) {
- if (errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",
- pgm);
- errorprint = 0;
- }
- sleep(2);
- } else {
- if (debugflag)
- printf("dlio.req_type is %d bd %d\n",
- dlio.req_type,dlio.bdid);
-
- switch(dlio.req_type) {
- case DLREQ_BIOS:
- /*
- ** find the bios image for this type
- */
- for ( x = 0; x < nimages; x++ ) {
- if(image_list[x].type != IBIOS)
- continue;
-
- if ((dlio.image.fi.type & FAMILY) ==
- image_list[x].family) {
-
- if ( image_list[x].family == T_CX ) {
- if ((dlio.image.fi.type & BUSTYPE)
- == T_PCIBUS ) {
- if ( image_list[x].subtype
- == T_PCIBUS )
- break;
- }
- else {
- break;
- }
- }
- else if ( image_list[x].family == T_EPC ) {
- /* If subtype of image is T_PCIBUS, it is */
- /* a PCI EPC image, so the board must */
- /* have bus type T_PCIBUS to match */
- if ((dlio.image.fi.type & BUSTYPE)
- == T_PCIBUS ) {
- if ( image_list[x].subtype
- == T_PCIBUS )
- break;
- }
- else {
- /* NON PCI EPC doesn't use PCI image */
- if ( image_list[x].subtype
- != T_PCIBUS )
- break;
- }
- }
- else
- break;
- }
- else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
- /* PCXR board will break out of the loop here */
- if ( image_list[x].subtype == T_PCXR ) {
- break;
- }
- }
- }
-
- if ( x >= nimages) {
- /*
- ** no valid images exist
- */
- if(nodldprint) {
- fprintf(stderr,
- "%s: cannot find correct BIOS image\n",
- pgm);
- nodldprint = 0;
- }
- dlio.image.fi.type = -1;
- if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1) {
- if (errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",
- pgm);
- errorprint = 0;
- }
- sleep(2);
- }
- break;
- }
- squirt(dlio.req_type, dlio.bdid, &image_list[x]);
- break ;
-
- case DLREQ_FEP:
- /*
- ** find the fep image for this type
- */
- for ( x = 0; x < nimages; x++ ) {
- if(image_list[x].type != IFEP)
- continue;
- if( (dlio.image.fi.type & FAMILY) ==
- image_list[x].family ) {
- if ( image_list[x].family == T_CX ) {
- /* C/X PCI board */
- if ((dlio.image.fi.type & BUSTYPE)
- == T_PCIBUS ) {
- if ( image_list[x].subtype
- == T_PCIBUS )
- break;
- }
- else {
- /* Regular CX */
- break;
- }
- }
- else if ( image_list[x].family == T_EPC ) {
- /* If subtype of image is T_PCIBUS, it is */
- /* a PCI EPC image, so the board must */
- /* have bus type T_PCIBUS to match */
- if ((dlio.image.fi.type & BUSTYPE)
- == T_PCIBUS ) {
- if ( image_list[x].subtype
- == T_PCIBUS )
- break;
- }
- else {
- /* NON PCI EPC doesn't use PCI image */
- if ( image_list[x].subtype
- != T_PCIBUS )
- break;
- }
- }
- else
- break;
- }
- else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
- /* PCXR board will break out of the loop here */
- if ( image_list[x].subtype == T_PCXR ) {
- break;
- }
- }
- }
-
- if ( x >= nimages) {
- /*
- ** no valid images exist
- */
- if(nodldprint) {
- fprintf(stderr,
- "%s: cannot find correct FEP image\n",
- pgm);
- nodldprint = 0;
- }
- dlio.image.fi.type=-1;
- if( ioctl(fd,DIGI_DLREQ_SET,&dlio) == -1 ) {
- if(errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",
- pgm);
- errorprint=0;
- }
- sleep(2);
- }
- break;
- }
- squirt(dlio.req_type, dlio.bdid, &image_list[x]);
- break;
-
- case DLREQ_DEVCREATE:
- {
- char string[1024];
-#if 0
- sprintf(string, "%s /proc/dgap/%d/mknod", DEFSHELL, dlio.bdid);
-#endif
- sprintf(string, "%s /usr/sbin/dgap_updatedevs %d", DEFSHELL, dlio.bdid);
- system(string);
-
- if (debugflag)
- printf("Created Devices.\n");
- if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
- if(errorprint) {
- fprintf(stderr, "%s: warning - DEVCREATE ioctl failed\n",pgm);
- errorprint = 0;
- }
- sleep(2);
- }
- if (debugflag)
- printf("After ioctl set - Created Device.\n");
- }
-
- break;
-
- case DLREQ_CONFIG:
- for ( x = 0; x < nimages; x++ ) {
- if(image_list[x].type != ICONFIG)
- continue;
- else
- break;
- }
-
- if ( x >= nimages) {
- /*
- ** no valid images exist
- */
- if(nodldprint) {
- fprintf(stderr,
- "%s: cannot find correct CONFIG image\n",
- pgm);
- nodldprint = 0;
- }
- dlio.image.fi.type=-1;
- if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
- if(errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",
- pgm);
- errorprint=0;
- }
- sleep(2);
- }
- break;
- }
-
- squirt(dlio.req_type, dlio.bdid, &image_list[x]);
- break;
-
- case DLREQ_CONC:
- /*
- ** find the image needed for this download
- */
- if ( dp->dl_seq == 0 ) {
- /*
- ** find image for hardware rev range
- */
- for ( x = 0; x < nimages; x++ ) {
- ii=&image_list[x];
-
- if(image_list[x].type != ICONC)
- continue;
-
- consider_file_rescan(ii) ;
-
- ip = (struct downld_t *) image_list[x].image;
- if (ip == NULL) continue;
-
- /*
- * When I removed Clusterport, I kept only the
- * code that I was SURE wasn't ClusterPort.
- * We may not need the next four lines of code.
- */
-
- if ((dp->dl_type != 'P' ) &&
- (ip->dl_lrev <= dp->dl_lrev ) &&
- ( dp->dl_lrev <= ip->dl_hrev))
- break;
- }
-
- if ( x >= nimages ) {
- /*
- ** No valid images exist
- */
- if(nodldprint) {
- fprintf(stderr,
- "%s: cannot find correct download image %d\n",
- pgm, dp->dl_lrev);
- nodldprint=0;
- }
- continue;
- }
-
- } else {
- /*
- ** find image version required
- */
- if ((ii = find_conc_image()) == NULL ) {
- /*
- ** No valid images exist
- */
- fprintf(stderr,
- "%s: can't find rest of download image??\n",
- pgm);
- continue;
- }
- }
-
- /*
- ** download block of image
- */
-
- offset = 1024 * dp->dl_seq;
-
- /*
- ** test if block requested within image
- */
- if ( offset < ii->len ) {
-
- /*
- ** if it is, determine block size, set segment,
- ** set size, set pointers, and copy block
- */
- if (( bsize = ii->len - offset ) > 1024 )
- bsize = 1024;
-
- /*
- ** copy image version info to download area
- */
- dp->dl_srev = ip->dl_srev;
- dp->dl_lrev = ip->dl_lrev;
- dp->dl_hrev = ip->dl_hrev;
-
- dp->dl_seg = (64 * dp->dl_seq) + ip->dl_seg;
- dp->dl_size = bsize;
-
- down = (char *)&dp->dl_data[0];
- image = (char *)((char *)ip + offset);
-
- memcpy(down, image, bsize);
- }
- else {
- /*
- ** Image has been downloaded, set segment and
- ** size to indicate no more blocks
- */
- dp->dl_seg = ip->dl_seg;
- dp->dl_size = 0;
-
- /* Now, we can release the concentrator */
- /* image from memory if we're running */
- /* from filesystem images */
-
- if (ii->pathname)
- if (ii->image) {
- free(ii->image);
- ii->image = NULL;
- }
- }
-
- if (debugflag)
- printf(
- "sending conc dl section %d to %s from %s\n",
- dp->dl_seq, ii->name,
- ii->pathname ? ii->pathname : "Internal Image");
-
- if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
- if (errorprint) {
- fprintf(stderr,
- "%s: warning - download ioctl failed\n",
- pgm);
- errorprint=0;
- }
- sleep(2);
- }
- break;
- } /* switch */
- }
- if (debugflag > 1) {
- printf("pausing: "); fflush(stdout);
- fflush(stdin);
- while(getchar() != '\n');
- printf("continuing\n");
- }
- }
-}
-
-/*
-** myperror()
-**
-** Same as normal perror(), but places the program name at the beginning
-** of the message.
-*/
-void myperror(char *s)
-{
- fprintf(stderr,"%s: %s: %s.\n",pgm, s, strerror(errno));
-}
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index 708adbbcedbd..60d9b62c6f1f 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -827,9 +827,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
* Check to make sure its for us.
*/
if (brd->magic != DGNC_BOARD_MAGIC) {
- APR((
- "Received interrupt (%d) with a board pointer "
- "that wasn't ours!\n", irq));
+ APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n",
+ irq));
return IRQ_NONE;
}
@@ -846,8 +845,7 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
/* If 0, no interrupts pending */
if (!poll_reg) {
DPR_INTR((
- "Kernel interrupted to me, but no pending "
- "interrupts...\n"));
+ "Kernel interrupted to me, but no pending interrupts...\n"));
DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
return IRQ_NONE;
}
@@ -1388,8 +1386,7 @@ static void cls_send_break(struct channel_t *ch, int msecs)
writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr);
ch->ch_flags |= (CH_BREAK_SENDING);
DPR_IOCTL((
- "Port %d. Starting UART_LCR_SBC! start: %lx "
- "should end: %lx\n",
+ "Port %d. Starting UART_LCR_SBC! start: %lx should end: %lx\n",
ch->ch_portnum, jiffies, ch->ch_stop_sending_break));
}
}
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index c204266cb69f..b1a39b25b419 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -88,7 +88,7 @@ module_exit(dgnc_cleanup_module);
/*
* File operations permitted on Control/Management major.
*/
-static struct file_operations dgnc_BoardFops =
+static const struct file_operations dgnc_BoardFops =
{
.owner = THIS_MODULE,
.unlocked_ioctl = dgnc_mgmt_ioctl,
@@ -236,7 +236,7 @@ int dgnc_init_module(void)
if (dgnc_NumBoards)
pci_unregister_driver(&dgnc_driver);
else
- printk("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
+ pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
dgnc_cleanup_module();
}
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index 1c5ab3d007b0..c5b425bf6692 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -42,7 +42,7 @@
#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
#include <linux/serial_reg.h>
#include <linux/termios.h>
-#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+#include <linux/uaccess.h> /* For copy_from_user/copy_to_user */
#include "dgnc_driver.h"
#include "dgnc_pci.h"
@@ -77,8 +77,7 @@ int dgnc_mgmt_open(struct inode *inode, struct file *file)
return -EBUSY;
}
dgnc_mgmt_in_use[minor]++;
- }
- else {
+ } else {
DGNC_UNLOCK(dgnc_global_lock, lock_flags);
return -ENXIO;
}
@@ -107,9 +106,8 @@ int dgnc_mgmt_close(struct inode *inode, struct file *file)
/* mgmt device */
if (minor < MAXMGMTDEVICES) {
- if (dgnc_mgmt_in_use[minor]) {
+ if (dgnc_mgmt_in_use[minor])
dgnc_mgmt_in_use[minor] = 0;
- }
}
DGNC_UNLOCK(dgnc_global_lock, lock_flags);
@@ -153,7 +151,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
DPR_MGMT(("DIGI_GETDD returning numboards: %d version: %s\n",
ddi.dinfo_nboards, ddi.dinfo_version));
- if (copy_to_user(uarg, &ddi, sizeof (ddi)))
+ if (copy_to_user(uarg, &ddi, sizeof(ddi)))
return -EFAULT;
break;
@@ -165,13 +163,13 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct digi_info di;
- if (copy_from_user(&brd, uarg, sizeof(int))) {
+ if (copy_from_user(&brd, uarg, sizeof(int)))
return -EFAULT;
- }
DPR_MGMT(("DIGI_GETBD asking about board: %d\n", brd));
- if ((brd < 0) || (brd > dgnc_NumBoards) || (dgnc_NumBoards == 0))
+ if ((brd < 0) || (brd > dgnc_NumBoards) ||
+ (dgnc_NumBoards == 0))
return -ENODEV;
memset(&di, 0, sizeof(di));
@@ -195,7 +193,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
DPR_MGMT(("DIGI_GETBD returning type: %x state: %x ports: %x size: %x\n",
di.info_bdtype, di.info_bdstate, di.info_nports, di.info_physsize));
- if (copy_to_user(uarg, &di, sizeof (di)))
+ if (copy_to_user(uarg, &di, sizeof(di)))
return -EFAULT;
break;
@@ -209,9 +207,8 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
uint board = 0;
uint channel = 0;
- if (copy_from_user(&ni, uarg, sizeof(ni))) {
+ if (copy_from_user(&ni, uarg, sizeof(ni)))
return -EFAULT;
- }
DPR_MGMT(("DIGI_GETBD asking about board: %d channel: %d\n",
ni.board, ni.channel));
@@ -268,12 +265,14 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ni.cflag = ch->ch_c_cflag;
ni.lflag = ch->ch_c_lflag;
- if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS)
+ if (ch->ch_digi.digi_flags & CTSPACE ||
+ ch->ch_c_cflag & CRTSCTS)
ni.hflow = 1;
else
ni.hflow = 0;
- if ((ch->ch_flags & CH_STOPI) || (ch->ch_flags & CH_FORCED_STOPI))
+ if ((ch->ch_flags & CH_STOPI) ||
+ (ch->ch_flags & CH_FORCED_STOPI))
ni.recv_stopped = 1;
else
ni.recv_stopped = 0;
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index dc5a138d8d4a..cf22c7b725f9 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -1201,7 +1201,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
ch->ch_cached_lsr = 0;
/* Store how much space we have left in the queue */
- if ((qleft = tail - head - 1) < 0)
+ qleft = tail - head - 1;
+ if (qleft < 0)
qleft += RQUEUEMASK + 1;
/*
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index a6c6aba82d72..f0b17c36edd8 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -964,8 +964,10 @@ static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
int deltahigh;
int deltalow;
- if (newrate < 0)
- newrate = 0;
+ if (newrate <= 0) {
+ ch->ch_custom_speed = 0;
+ return;
+ }
/*
* Since the divisor is stored in a 16-bit integer, we make sure
@@ -978,7 +980,7 @@ static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
if (newrate && newrate > ch->ch_bd->bd_dividend)
newrate = ch->ch_bd->bd_dividend;
- while (newrate > 0) {
+ if (newrate > 0) {
testdiv = ch->ch_bd->bd_dividend / newrate;
/*
@@ -995,28 +997,23 @@ static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
* If the rate for the requested divisor is correct, just
* use it and be done.
*/
- if (testrate_high == newrate )
- break;
-
- /*
- * Otherwise, pick the rate that is closer (i.e. whichever rate
- * has a smaller delta).
- */
- deltahigh = testrate_high - newrate;
- deltalow = newrate - testrate_low;
+ if (testrate_high != newrate) {
+ /*
+ * Otherwise, pick the rate that is closer (i.e. whichever rate
+ * has a smaller delta).
+ */
+ deltahigh = testrate_high - newrate;
+ deltalow = newrate - testrate_low;
- if (deltahigh < deltalow) {
- newrate = testrate_high;
- } else {
- newrate = testrate_low;
+ if (deltahigh < deltalow) {
+ newrate = testrate_high;
+ } else {
+ newrate = testrate_low;
+ }
}
-
- break;
}
ch->ch_custom_speed = newrate;
-
- return;
}
@@ -1025,7 +1022,8 @@ void dgnc_check_queue_flow_control(struct channel_t *ch)
int qleft = 0;
/* Store how much space we have left in the queue */
- if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0)
+ qleft = ch->ch_r_tail - ch->ch_r_head - 1;
+ if (qleft < 0)
qleft += RQUEUEMASK + 1;
/*
@@ -1119,7 +1117,8 @@ void dgnc_wakeup_writes(struct channel_t *ch)
/*
* If channel now has space, wake up anyone waiting on the condition.
*/
- if ((qlen = ch->ch_w_head - ch->ch_w_tail) < 0)
+ qlen = ch->ch_w_head - ch->ch_w_tail;
+ if (qlen < 0)
qlen += WQUEUESIZE;
if (qlen >= (WQUEUESIZE - 256)) {
@@ -1917,7 +1916,8 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
head = (ch->ch_w_head) & tmask;
tail = (ch->ch_w_tail) & tmask;
- if ((ret = tail - head - 1) < 0)
+ ret = tail - head - 1;
+ if (ret < 0)
ret += WQUEUESIZE;
/* Limit printer to maxcps */
@@ -2017,7 +2017,8 @@ static int dgnc_tty_write(struct tty_struct *tty,
head = (ch->ch_w_head) & tmask;
tail = (ch->ch_w_tail) & tmask;
- if ((bufcount = tail - head - 1) < 0)
+ bufcount = tail - head - 1;
+ if (bufcount < 0)
bufcount += WQUEUESIZE;
DPR_WRITE(("%d: bufcount: %x count: %x tail: %x head: %x tmask: %x\n",
@@ -3316,10 +3317,10 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case DIGI_SETCUSTOMBAUD:
{
- uint new_rate;
+ int new_rate;
/* Let go of locks when accessing user space, could sleep */
DGNC_UNLOCK(ch->ch_lock, lock_flags);
- rc = get_user(new_rate, (unsigned int __user *) arg);
+ rc = get_user(new_rate, (int __user *) arg);
if (rc)
return rc;
DGNC_LOCK(ch->ch_lock, lock_flags);
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 1f61b89eca44..33ac7fb88cbd 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -2232,177 +2232,6 @@ done:
return rtn;
}
-/*
- * Common Packet Handling code
- */
-
-static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch,
- long dlen, long plen, int n1, u8 *dbuf)
-{
- char *error;
- long n;
- long remain;
- u8 *buf;
- u8 *b;
-
- remain = nd->nd_remain;
- nd->nd_tx_work = 1;
-
- /*
- * Otherwise data should appear only when we are
- * in the CS_READY state.
- */
-
- if (ch->ch_state < CS_READY) {
- error = "Data received before RWIN established";
- nd->nd_remain = 0;
- nd->nd_state = NS_SEND_ERROR;
- nd->nd_error = error;
- }
-
- /*
- * Assure that the data received is within the
- * allowable window.
- */
-
- n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
-
- if (dlen > n) {
- error = "Receive data overrun";
- nd->nd_remain = 0;
- nd->nd_state = NS_SEND_ERROR;
- nd->nd_error = error;
- }
-
- /*
- * If we received 3 or less characters,
- * assume it is a human typing, and set RTIME
- * to 10 milliseconds.
- *
- * If we receive 10 or more characters,
- * assume its not a human typing, and set RTIME
- * to 100 milliseconds.
- */
-
- if (ch->ch_edelay != DGRP_RTIME) {
- if (ch->ch_rtime != ch->ch_edelay) {
- ch->ch_rtime = ch->ch_edelay;
- ch->ch_flag |= CH_PARAM;
- }
- } else if (dlen <= 3) {
- if (ch->ch_rtime != 10) {
- ch->ch_rtime = 10;
- ch->ch_flag |= CH_PARAM;
- }
- } else {
- if (ch->ch_rtime != DGRP_RTIME) {
- ch->ch_rtime = DGRP_RTIME;
- ch->ch_flag |= CH_PARAM;
- }
- }
-
- /*
- * If a portion of the packet is outside the
- * buffer, shorten the effective length of the
- * data packet to be the amount of data received.
- */
-
- if (remain < plen)
- dlen -= plen - remain;
-
- /*
- * Detect if receive flush is now complete.
- */
-
- if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
- ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
- ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
- ch->ch_flag &= ~CH_RX_FLUSH;
- }
-
- /*
- * If we are ready to receive, move the data into
- * the receive buffer.
- */
-
- ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
-
- if (ch->ch_state == CS_READY &&
- (ch->ch_tun.un_open_count != 0) &&
- (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
- (ch->ch_cflag & CF_CREAD) != 0 &&
- (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
- (ch->ch_send & RR_RX_FLUSH) == 0) {
-
- if (ch->ch_rin + dlen >= RBUF_MAX) {
- n = RBUF_MAX - ch->ch_rin;
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
-
- ch->ch_rin = 0;
- dbuf += n;
- dlen -= n;
- }
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
-
- ch->ch_rin += dlen;
-
-
- /*
- * If we are not in fastcook mode, or
- * if there is a fastcook thread
- * waiting for data, send the data to
- * the line discipline.
- */
-
- if ((ch->ch_flag & CH_FAST_READ) == 0 ||
- ch->ch_inwait != 0) {
- dgrp_input(ch);
- }
-
- /*
- * If there is a read thread waiting
- * in select, and we are in fastcook
- * mode, wake him up.
- */
-
- if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
- (ch->ch_flag & CH_FAST_READ) != 0)
- wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
-
- /*
- * Wake any thread waiting in the
- * fastcook loop.
- */
-
- if ((ch->ch_flag & CH_INPUT) != 0) {
- ch->ch_flag &= ~CH_INPUT;
- wake_up_interruptible(&ch->ch_flag_wait);
- }
- }
-
- /*
- * Fabricate and insert a data packet header to
- * preced the remaining data when it comes in.
- */
-
- if (remain < plen) {
- dlen = plen - remain;
- b = buf;
-
- b[0] = 0x90 + n1;
- put_unaligned_be16(dlen, b + 1);
-
- remain = 3;
- if (remain > 0 && b != buf)
- memcpy(buf, b, remain);
-
- nd->nd_remain = remain;
- return;
- }
-}
-
/**
* dgrp_receive() -- decode data packets received from the remote PortServer.
* @nd: pointer to a node structure
@@ -2477,8 +2306,7 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 1;
dbuf = b + 1;
- handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
- break;
+ goto data;
/*
* Process 2-byte header data packet.
@@ -2492,8 +2320,7 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 2;
dbuf = b + 2;
- handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
- break;
+ goto data;
/*
* Process 3-byte header data packet.
@@ -2508,6 +2335,159 @@ static void dgrp_receive(struct nd_struct *nd)
dbuf = b + 3;
+ /*
+ * Common packet handling code.
+ */
+
+data:
+ nd->nd_tx_work = 1;
+
+ /*
+ * Otherwise data should appear only when we are
+ * in the CS_READY state.
+ */
+
+ if (ch->ch_state < CS_READY) {
+ error = "Data received before RWIN established";
+ goto prot_error;
+ }
+
+ /*
+ * Assure that the data received is within the
+ * allowable window.
+ */
+
+ n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
+
+ if (dlen > n) {
+ error = "Receive data overrun";
+ goto prot_error;
+ }
+
+ /*
+ * If we received 3 or less characters,
+ * assume it is a human typing, and set RTIME
+ * to 10 milliseconds.
+ *
+ * If we receive 10 or more characters,
+ * assume its not a human typing, and set RTIME
+ * to 100 milliseconds.
+ */
+
+ if (ch->ch_edelay != DGRP_RTIME) {
+ if (ch->ch_rtime != ch->ch_edelay) {
+ ch->ch_rtime = ch->ch_edelay;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else if (dlen <= 3) {
+ if (ch->ch_rtime != 10) {
+ ch->ch_rtime = 10;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else {
+ if (ch->ch_rtime != DGRP_RTIME) {
+ ch->ch_rtime = DGRP_RTIME;
+ ch->ch_flag |= CH_PARAM;
+ }
+ }
+
+ /*
+ * If a portion of the packet is outside the
+ * buffer, shorten the effective length of the
+ * data packet to be the amount of data received.
+ */
+
+ if (remain < plen)
+ dlen -= plen - remain;
+
+ /*
+ * Detect if receive flush is now complete.
+ */
+
+ if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
+ ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
+ ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
+ ch->ch_flag &= ~CH_RX_FLUSH;
+ }
+
+ /*
+ * If we are ready to receive, move the data into
+ * the receive buffer.
+ */
+
+ ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
+
+ if (ch->ch_state == CS_READY &&
+ (ch->ch_tun.un_open_count != 0) &&
+ (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
+ (ch->ch_cflag & CF_CREAD) != 0 &&
+ (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
+ (ch->ch_send & RR_RX_FLUSH) == 0) {
+
+ if (ch->ch_rin + dlen >= RBUF_MAX) {
+ n = RBUF_MAX - ch->ch_rin;
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
+
+ ch->ch_rin = 0;
+ dbuf += n;
+ dlen -= n;
+ }
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
+
+ ch->ch_rin += dlen;
+
+
+ /*
+ * If we are not in fastcook mode, or
+ * if there is a fastcook thread
+ * waiting for data, send the data to
+ * the line discipline.
+ */
+
+ if ((ch->ch_flag & CH_FAST_READ) == 0 ||
+ ch->ch_inwait != 0) {
+ dgrp_input(ch);
+ }
+
+ /*
+ * If there is a read thread waiting
+ * in select, and we are in fastcook
+ * mode, wake him up.
+ */
+
+ if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
+ (ch->ch_flag & CH_FAST_READ) != 0)
+ wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
+
+ /*
+ * Wake any thread waiting in the
+ * fastcook loop.
+ */
+
+ if ((ch->ch_flag & CH_INPUT) != 0) {
+ ch->ch_flag &= ~CH_INPUT;
+
+ wake_up_interruptible(&ch->ch_flag_wait);
+ }
+ }
+
+ /*
+ * Fabricate and insert a data packet header to
+ * preced the remaining data when it comes in.
+ */
+
+ if (remain < plen) {
+ dlen = plen - remain;
+ b = buf;
+
+ b[0] = 0x90 + n1;
+ put_unaligned_be16(dlen, b + 1);
+
+ remain = 3;
+ goto done;
+ }
break;
/*
diff --git a/drivers/staging/dgrp/dgrp_sysfs.c b/drivers/staging/dgrp/dgrp_sysfs.c
index 9a18a2c9e73b..2f9345ff0abb 100644
--- a/drivers/staging/dgrp/dgrp_sysfs.c
+++ b/drivers/staging/dgrp/dgrp_sysfs.c
@@ -65,7 +65,9 @@ static ssize_t dgrp_class_pollrate_store(struct device *c,
struct device_attribute *attr,
const char *buf, size_t count)
{
- sscanf(buf, "0x%x\n", &dgrp_poll_tick);
+ if (sscanf(buf, "0x%x\n", &dgrp_poll_tick) != 1)
+ return -EINVAL;
+
return count;
}
static DEVICE_ATTR(pollrate, 0600, dgrp_class_pollrate_show,
diff --git a/drivers/staging/dgrp/dgrp_tty.c b/drivers/staging/dgrp/dgrp_tty.c
index 7a9694c1d9c4..30d26029b21e 100644
--- a/drivers/staging/dgrp/dgrp_tty.c
+++ b/drivers/staging/dgrp/dgrp_tty.c
@@ -1319,7 +1319,8 @@ static int dgrp_calculate_txprint_bounds(struct ch_struct *ch, int space,
if (ch->ch_tun.un_open_count != 0 &&
ch->ch_tun.un_tty->ops->chars_in_buffer &&
- ((ch->ch_tun.un_tty->ops->chars_in_buffer)(ch->ch_tun.un_tty) != 0)) {
+ ((ch->ch_tun.un_tty->ops->chars_in_buffer)
+ (ch->ch_tun.un_tty) != 0)) {
*un_flag = UN_PWAIT;
return 0;
}
@@ -1501,7 +1502,8 @@ static int dgrp_tty_write(struct tty_struct *tty,
*/
if (ch->ch_tun.un_open_count != 0 &&
- ((ch->ch_tun.un_tty->ops->chars_in_buffer)(ch->ch_tun.un_tty) != 0)) {
+ ((ch->ch_tun.un_tty->ops->chars_in_buffer)
+ (ch->ch_tun.un_tty) != 0)) {
un->un_flag |= UN_PWAIT;
count = 0;
goto out;
@@ -1666,7 +1668,8 @@ static int dgrp_tty_write(struct tty_struct *tty,
if (n >= t) {
memcpy(ch->ch_tbuf + ch->ch_tin, buf, t);
- if (nd->nd_dpa_debug && nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(un->un_tty))))
+ if (nd->nd_dpa_debug && nd->nd_dpa_port ==
+ PORT_NUM(MINOR(tty_devnum(un->un_tty))))
dgrp_dpa_data(nd, 0, (char *) buf, t);
buf += t;
n -= t;
@@ -1675,7 +1678,8 @@ static int dgrp_tty_write(struct tty_struct *tty,
}
memcpy(ch->ch_tbuf + ch->ch_tin, buf, n);
- if (nd->nd_dpa_debug && nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(un->un_tty))))
+ if (nd->nd_dpa_debug && nd->nd_dpa_port ==
+ PORT_NUM(MINOR(tty_devnum(un->un_tty))))
dgrp_dpa_data(nd, 0, (char *) buf, n);
buf += n;
ch->ch_tin += n;
@@ -2656,7 +2660,8 @@ static int dgrp_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
ch->ch_send |= RR_RX_FLUSH;
(ch->ch_nd)->nd_tx_work = 1;
(ch->ch_nd)->nd_tx_ready = 1;
- wake_up_interruptible(&(ch->ch_nd)->nd_tx_waitq);
+ wake_up_interruptible(
+ &(ch->ch_nd)->nd_tx_waitq);
}
if (arg == TCIFLUSH)
break;
@@ -2682,7 +2687,8 @@ static int dgrp_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
Linux HPUX Function
TCSETA TCSETA - set the termios
TCSETAF TCSETAF - wait for drain first, then set termios
- TCSETAW TCSETAW - wait for drain, flush the input queue, then set termios
+ TCSETAW TCSETAW - wait for drain,
+ flush the input queue, then set termios
- looking at the tty_ioctl code, these command all call our
tty_set_termios at the driver's end, when a TCSETA* is sent,
it is expecting the tty to have a termio structure,
@@ -2798,6 +2804,7 @@ static int dgrp_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
}
/* pretend we didn't recognize this */
+ /* fall-through */
case DIGI_SETA:
return dgrp_tty_digiseta(tty, (struct digi_struct *) arg);
@@ -3207,7 +3214,8 @@ dgrp_tty_init(struct nd_struct *nd)
int max_majors = 1U << (32 - MINORBITS);
for (i = 256; i < max_majors; i++) {
nd->nd_serial_ttdriver->major = i;
- rc = tty_register_driver(nd->nd_serial_ttdriver);
+ rc = tty_register_driver
+ (nd->nd_serial_ttdriver);
if (rc >= 0)
break;
}
diff --git a/drivers/staging/echo/TODO b/drivers/staging/echo/TODO
deleted file mode 100644
index 72a311a5a9cc..000000000000
--- a/drivers/staging/echo/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-TODO:
- - send to lkml for review
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: Steve
-Underwood <steveu@coppice.org> and David Rowe <david@rowetel.com>
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index e516bb69f3b4..d329cf314360 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -485,8 +485,6 @@ struct et131x_adapter {
u8 eeprom_data[2];
/* Spinlocks */
- spinlock_t lock;
-
spinlock_t tcb_send_qlock;
spinlock_t tcb_ready_qlock;
spinlock_t send_hw_lock;
@@ -1388,6 +1386,7 @@ static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
mii_indicator);
status = -EIO;
+ goto out;
}
/* If we hit here we were able to read the register and we need to
@@ -1395,6 +1394,7 @@ static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
*/
*value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
+out:
/* Stop the read operation */
writel(0, &mac->mii_mgmt_cmd);
@@ -2124,7 +2124,11 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
/* Alloc memory for the lookup table */
rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
+ if (rx_ring->fbr[0] == NULL)
+ return -ENOMEM;
rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
+ if (rx_ring->fbr[1] == NULL)
+ return -ENOMEM;
/* The first thing we will do is configure the sizes of the buffer
* rings. These will change based on jumbo packet support. Larger
@@ -2289,7 +2293,7 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
for (id = 0; id < NUM_FBRS; id++) {
fbr = rx_ring->fbr[id];
- if (!fbr->ring_virtaddr)
+ if (!fbr || !fbr->ring_virtaddr)
continue;
/* First the packet memory */
@@ -3523,7 +3527,7 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
goto err_out;
}
}
- memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
+ ether_addr_copy(adapter->addr, adapter->rom_addr);
out:
return rc;
err_out:
@@ -3591,6 +3595,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
if (status) {
dev_err(&adapter->pdev->dev,
"et131x_tx_dma_memory_alloc FAILED\n");
+ et131x_tx_dma_memory_free(adapter);
return status;
}
/* Receive buffer memory allocation */
@@ -3598,7 +3603,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
if (status) {
dev_err(&adapter->pdev->dev,
"et131x_rx_dma_memory_alloc FAILED\n");
- et131x_tx_dma_memory_free(adapter);
+ et131x_adapter_memory_free(adapter);
return status;
}
@@ -3760,7 +3765,6 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
adapter->netdev = netdev;
/* Initialize spinlocks here */
- spin_lock_init(&adapter->lock);
spin_lock_init(&adapter->tcb_send_qlock);
spin_lock_init(&adapter->tcb_ready_qlock);
spin_lock_init(&adapter->send_hw_lock);
@@ -3770,7 +3774,7 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
/* Set the MAC address to a default */
- memcpy(adapter->addr, default_mac, ETH_ALEN);
+ ether_addr_copy(adapter->addr, default_mac);
return adapter;
}
@@ -4292,12 +4296,9 @@ static void et131x_multicast(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
int packet_filter;
- unsigned long flags;
struct netdev_hw_addr *ha;
int i;
- spin_lock_irqsave(&adapter->lock, flags);
-
/* Before we modify the platform-independent filter flags, store them
* locally. This allows us to determine if anything's changed and if
* we even need to bother the hardware
@@ -4349,8 +4350,6 @@ static void et131x_multicast(struct net_device *netdev)
*/
if (packet_filter != adapter->packet_filter)
et131x_set_packet_filter(adapter);
-
- spin_unlock_irqrestore(&adapter->lock, flags);
}
/* et131x_tx - The handler to tx a packet on the device */
diff --git a/drivers/staging/frontier/Kconfig b/drivers/staging/frontier/Kconfig
index 7121853bd397..4da290b2f5bd 100644
--- a/drivers/staging/frontier/Kconfig
+++ b/drivers/staging/frontier/Kconfig
@@ -1,6 +1,5 @@
config TRANZPORT
tristate "Frontier Tranzport and Alphatrack support"
depends on USB
- default N
---help---
Enable support for the Frontier Tranzport and Alphatrack devices.
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index edd5cef300d0..226b23163109 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -208,7 +208,9 @@ static void usb_alphatrack_delete(struct usb_alphatrack *dev)
kfree(dev->ring_buffer);
kfree(dev->interrupt_in_buffer);
kfree(dev->interrupt_out_buffer);
- kfree(dev); /* fixme oldi_buffer */
+ kfree(dev->oldi_buffer);
+ kfree(dev->write_buffer);
+ kfree(dev);
}
/** usb_alphatrack_interrupt_in_callback */
@@ -233,8 +235,8 @@ static void usb_alphatrack_interrupt_in_callback(struct urb *urb)
if (urb->actual_length != INPUT_CMD_SIZE) {
dev_warn(&dev->intf->dev,
- "Urb length was %d bytes!!"
- "Do something intelligent\n", urb->actual_length);
+ "Urb length was %d bytes!! Do something intelligent\n",
+ urb->actual_length);
} else {
alphatrack_ocmd_info(&dev->intf->dev,
&(*dev->ring_buffer)[dev->ring_tail].cmd,
@@ -688,8 +690,7 @@ static int usb_alphatrack_probe(struct usb_interface *intf,
}
if (dev->interrupt_out_endpoint == NULL)
dev_warn(&intf->dev,
- "Interrupt out endpoint not found"
- "(using control endpoint instead)\n");
+ "Interrupt out endpoint not found (using control endpoint instead)\n");
dev->interrupt_in_endpoint_size =
le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize);
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 0e499ce5f0d7..0571988c58fc 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -257,8 +257,7 @@ static void usb_tranzport_interrupt_in_callback(struct urb *urb)
if (urb->actual_length != 8) {
dev_warn(&dev->intf->dev,
- "Urb length was %d bytes!!"
- "Do something intelligent\n",
+ "Urb length was %d bytes!! Do something intelligent\n",
urb->actual_length);
} else {
dbg_info(&dev->intf->dev,
@@ -542,8 +541,7 @@ static ssize_t usb_tranzport_read(struct file *file, char __user *buffer,
}
dbg_info(&dev->intf->dev,
- "%s: copying to userspace: "
- "%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ "%s: copying to userspace: %02x%02x%02x%02x%02x%02x%02x%02x\n",
__func__,
(*dev->ring_buffer)[dev->ring_tail].cmd[0],
(*dev->ring_buffer)[dev->ring_tail].cmd[1],
@@ -570,8 +568,7 @@ static ssize_t usb_tranzport_read(struct file *file, char __user *buffer,
* and we are the same sign, we can compress +- 7F
*/
dbg_info(&dev->intf->dev,
- "%s: trying to compress: "
- "%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ "%s: trying to compress: %02x%02x%02x%02x%02x%02x%02x%02x\n",
__func__,
(*dev->ring_buffer)[dev->ring_tail].cmd[0],
(*dev->ring_buffer)[dev->ring_tail].cmd[1],
@@ -830,8 +827,7 @@ static int usb_tranzport_probe(struct usb_interface *intf,
}
if (dev->interrupt_out_endpoint == NULL)
dev_warn(&intf->dev,
- "Interrupt out endpoint not found"
- "(using control endpoint instead)\n");
+ "Interrupt out endpoint not found (using control endpoint instead)\n");
dev->interrupt_in_endpoint_size =
le16_to_cpu(dev->interrupt_in_endpoint->wMaxPacketSize);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index a433e33049b5..b6a77088cfe4 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -1145,7 +1145,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
char *cmdbuffer = kmalloc(1600, GFP_KERNEL);
if (!cmdbuffer)
- return -1;
+ return -ENOMEM;
status = ft1000_read_dpram32(dev, 0x200, cmdbuffer, size);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
index 24b8d77a132c..419e534874f9 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
@@ -30,7 +30,7 @@
typedef struct _IOCTL_GET_VER {
unsigned long drv_ver;
-} __attribute__ ((packed)) IOCTL_GET_VER, *PIOCTL_GET_VER;
+} __packed IOCTL_GET_VER, *PIOCTL_GET_VER;
/* Data structure for Dsp statistics */
typedef struct _IOCTL_GET_DSP_STAT {
@@ -67,19 +67,19 @@ typedef struct _IOCTL_GET_DSP_STAT {
unsigned long ConTm; /* Current session connection time in seconds */
unsigned char CalVer[CALVERSZ]; /* Proprietary Calibration Version */
unsigned char CalDate[CALDATESZ]; /* Proprietary Calibration Date */
-} __attribute__ ((packed)) IOCTL_GET_DSP_STAT, *PIOCTL_GET_DSP_STAT;
+} __packed IOCTL_GET_DSP_STAT, *PIOCTL_GET_DSP_STAT;
/* Data structure for Dual Ported RAM messaging between Host and Dsp */
typedef struct _IOCTL_DPRAM_BLK {
unsigned short total_len;
struct pseudo_hdr pseudohdr;
unsigned char buffer[1780];
-} __attribute__ ((packed)) IOCTL_DPRAM_BLK, *PIOCTL_DPRAM_BLK;
+} __packed IOCTL_DPRAM_BLK, *PIOCTL_DPRAM_BLK;
typedef struct _IOCTL_DPRAM_COMMAND {
unsigned short extra;
IOCTL_DPRAM_BLK dpram_blk;
-} __attribute__ ((packed)) IOCTL_DPRAM_COMMAND, *PIOCTL_DPRAM_COMMAND;
+} __packed IOCTL_DPRAM_COMMAND, *PIOCTL_DPRAM_COMMAND;
/*
* Custom IOCTL command codes
diff --git a/drivers/staging/ft1000/ft1000.h b/drivers/staging/ft1000/ft1000.h
index 175abfa7682e..ccb821a1f370 100644
--- a/drivers/staging/ft1000/ft1000.h
+++ b/drivers/staging/ft1000/ft1000.h
@@ -21,34 +21,64 @@
#define FT1000_REG_SUP_CTRL 0x0020 /* HCTR - Host Control Register */
#define FT1000_REG_SUP_STAT 0x0022 /* HSTAT - Host Status Register */
#define FT1000_REG_RESET 0x0024 /* HCTR - Host Control Register */
-#define FT1000_REG_SUP_ISR 0x0026 /* HISR - Host Interrupt Status Register */
+#define FT1000_REG_SUP_ISR 0x0026 /* HISR - Host Interrupt Status
+ * Register
+ */
#define FT1000_REG_SUP_IMASK 0x0028 /* HIMASK - Host Interrupt Mask */
#define FT1000_REG_DOORBELL 0x002a /* DBELL - Door Bell Register */
-#define FT1000_REG_ASIC_ID 0x002e /* ASICID - ASIC Identification Number */
+#define FT1000_REG_ASIC_ID 0x002e /* ASICID - ASIC Identification
+ * Number
+ */
/* MEMORY MAP FOR ELECTRABUZZ ASIC */
#define FT1000_REG_UFIFO_STAT 0x0000 /* UFSR - Uplink FIFO status register */
-#define FT1000_REG_UFIFO_BEG 0x0002 /* UFBR - Uplink FIFO beginning register */
+#define FT1000_REG_UFIFO_BEG 0x0002 /* UFBR - Uplink FIFO beginning
+ * register
+ */
#define FT1000_REG_UFIFO_MID 0x0004 /* UFMR - Uplink FIFO middle register */
#define FT1000_REG_UFIFO_END 0x0006 /* UFER - Uplink FIFO end register */
-#define FT1000_REG_DFIFO_STAT 0x0008 /* DFSR - Downlink FIFO status register */
+#define FT1000_REG_DFIFO_STAT 0x0008 /* DFSR - Downlink FIFO status
+ * register
+ */
#define FT1000_REG_DFIFO 0x000A /* DFR - Downlink FIFO Register */
-#define FT1000_REG_DPRAM_DATA 0x000C /* DPRAM - Dual Port Indirect Data Register */
+#define FT1000_REG_DPRAM_DATA 0x000C /* DPRAM - Dual Port Indirect
+ * Data Register
+ */
#define FT1000_REG_WATERMARK 0x0010 /* WMARK - Watermark Register */
/* MEMORY MAP FOR MAGNEMITE */
-#define FT1000_REG_MAG_UFDR 0x0000 /* UFDR - Uplink FIFO Data Register (32-bits) */
-#define FT1000_REG_MAG_UFDRL 0x0000 /* UFDRL - Uplink FIFO Data Register low-word (16-bits) */
-#define FT1000_REG_MAG_UFDRH 0x0002 /* UFDRH - Uplink FIFO Data Register high-word (16-bits) */
+#define FT1000_REG_MAG_UFDR 0x0000 /* UFDR - Uplink FIFO Data
+ * Register (32-bits)
+ */
+#define FT1000_REG_MAG_UFDRL 0x0000 /* UFDRL - Uplink FIFO Data
+ * Register low-word (16-bits)
+ */
+#define FT1000_REG_MAG_UFDRH 0x0002 /* UFDRH - Uplink FIFO Data Register
+ * high-word (16-bits)
+ */
#define FT1000_REG_MAG_UFER 0x0004 /* UFER - Uplink FIFO End Register */
#define FT1000_REG_MAG_UFSR 0x0006 /* UFSR - Uplink FIFO Status Register */
-#define FT1000_REG_MAG_DFR 0x0008 /* DFR - Downlink FIFO Register (32-bits) */
-#define FT1000_REG_MAG_DFRL 0x0008 /* DFRL - Downlink FIFO Register low-word (16-bits) */
-#define FT1000_REG_MAG_DFRH 0x000a /* DFRH - Downlink FIFO Register high-word (16-bits) */
-#define FT1000_REG_MAG_DFSR 0x000c /* DFSR - Downlink FIFO Status Register */
-#define FT1000_REG_MAG_DPDATA 0x0010 /* DPDATA - Dual Port RAM Indirect Data Register (32-bits) */
-#define FT1000_REG_MAG_DPDATAL 0x0010 /* DPDATAL - Dual Port RAM Indirect Data Register low-word (16-bits) */
-#define FT1000_REG_MAG_DPDATAH 0x0012 /* DPDATAH - Dual Port RAM Indirect Data Register high-word (16-bits) */
+#define FT1000_REG_MAG_DFR 0x0008 /* DFR - Downlink FIFO Register
+ * (32-bits)
+ */
+#define FT1000_REG_MAG_DFRL 0x0008 /* DFRL - Downlink FIFO Register
+ * low-word (16-bits)
+ */
+#define FT1000_REG_MAG_DFRH 0x000a /* DFRH - Downlink FIFO Register
+ * high-word (16-bits)
+ */
+#define FT1000_REG_MAG_DFSR 0x000c /* DFSR - Downlink FIFO Status
+ * Register
+ */
+#define FT1000_REG_MAG_DPDATA 0x0010 /* DPDATA - Dual Port RAM Indirect
+ * Data Register (32-bits)
+ */
+#define FT1000_REG_MAG_DPDATAL 0x0010 /* DPDATAL - Dual Port RAM Indirect
+ * Data Register low-word (16-bits)
+ */
+#define FT1000_REG_MAG_DPDATAH 0x0012 /* DPDATAH - Dual Port RAM Indirect Data
+ * Register high-word (16-bits)
+ */
#define FT1000_REG_MAG_WATERMARK 0x002c /* WMARK - Watermark Register */
#define FT1000_REG_MAG_VERSION 0x0030 /* LLC Version */
@@ -57,7 +87,9 @@
#define FT1000_DPRAM_RX_BASE 0x0800 /* PC Card to Host Messaging Area */
#define FT1000_FIFO_LEN 0x07FC /* total length for DSP FIFO tracking */
#define FT1000_HI_HO 0x07FE /* heartbeat with HI/HO */
-#define FT1000_DSP_STATUS 0x0FFE /* dsp status - non-zero is a request to reset dsp */
+#define FT1000_DSP_STATUS 0x0FFE /* dsp status - non-zero is a request
+ * to reset dsp
+ */
#define FT1000_DSP_LED 0x0FFA /* dsp led status for PAD device */
#define FT1000_DSP_CON_STATE 0x0FF8 /* DSP Connection Status Info */
#define FT1000_DPRAM_FEFE 0x0002 /* location for dsp ready indicator */
@@ -67,26 +99,48 @@
#define FT1000_DSP_TIMER3 0x1FF6 /* Timer Field from Basestation */
/* Reserved Dual Port RAM offsets for Magnemite */
-#define FT1000_DPRAM_MAG_TX_BASE 0x0000 /* Host to PC Card Messaging Area */
-#define FT1000_DPRAM_MAG_RX_BASE 0x0200 /* PC Card to Host Messaging Area */
+#define FT1000_DPRAM_MAG_TX_BASE 0x0000 /* Host to PC Card
+ * Messaging Area
+ */
+#define FT1000_DPRAM_MAG_RX_BASE 0x0200 /* PC Card to Host
+ * Messaging Area
+ */
-#define FT1000_MAG_FIFO_LEN 0x1FF /* total length for DSP FIFO tracking */
+#define FT1000_MAG_FIFO_LEN 0x1FF /* total length for DSP
+ * FIFO tracking
+ */
#define FT1000_MAG_FIFO_LEN_INDX 0x1 /* low-word index */
#define FT1000_MAG_HI_HO 0x1FF /* heartbeat with HI/HO */
#define FT1000_MAG_HI_HO_INDX 0x0 /* high-word index */
-#define FT1000_MAG_DSP_LED 0x3FE /* dsp led status for PAD device */
-#define FT1000_MAG_DSP_LED_INDX 0x0 /* dsp led status for PAD device */
+#define FT1000_MAG_DSP_LED 0x3FE /* dsp led status for
+ * PAD device
+ */
+#define FT1000_MAG_DSP_LED_INDX 0x0 /* dsp led status for
+ * PAD device
+ */
#define FT1000_MAG_DSP_CON_STATE 0x3FE /* DSP Connection Status Info */
#define FT1000_MAG_DSP_CON_STATE_INDX 0x1 /* DSP Connection Status Info */
-#define FT1000_MAG_DPRAM_FEFE 0x000 /* location for dsp ready indicator */
-#define FT1000_MAG_DPRAM_FEFE_INDX 0x0 /* location for dsp ready indicator */
-#define FT1000_MAG_DSP_TIMER0 0x3FC /* Timer Field from Basestation */
+#define FT1000_MAG_DPRAM_FEFE 0x000 /* location for dsp ready
+ * indicator
+ */
+#define FT1000_MAG_DPRAM_FEFE_INDX 0x0 /* location for dsp ready
+ * indicator
+ */
+#define FT1000_MAG_DSP_TIMER0 0x3FC /* Timer Field from
+ * Basestation
+ */
#define FT1000_MAG_DSP_TIMER0_INDX 0x1
-#define FT1000_MAG_DSP_TIMER1 0x3FC /* Timer Field from Basestation */
+#define FT1000_MAG_DSP_TIMER1 0x3FC /* Timer Field from
+ * Basestation
+ */
#define FT1000_MAG_DSP_TIMER1_INDX 0x0
-#define FT1000_MAG_DSP_TIMER2 0x3FD /* Timer Field from Basestation */
+#define FT1000_MAG_DSP_TIMER2 0x3FD /* Timer Field from
+ * Basestation
+ */
#define FT1000_MAG_DSP_TIMER2_INDX 0x1
-#define FT1000_MAG_DSP_TIMER3 0x3FD /* Timer Field from Basestation */
+#define FT1000_MAG_DSP_TIMER3 0x3FD /* Timer Field from
+ * Basestation
+ */
#define FT1000_MAG_DSP_TIMER3_INDX 0x0
#define FT1000_MAG_TOTAL_LEN 0x200
#define FT1000_MAG_TOTAL_LEN_INDX 0x1
@@ -99,24 +153,38 @@
#define HOST_INTF_BE 0x1 /* Host interface big endian mode */
/* FT1000 to Host Doorbell assignments */
-#define FT1000_DB_DPRAM_RX 0x0001 /* this value indicates that DSP has data for host in DPRAM */
+#define FT1000_DB_DPRAM_RX 0x0001 /* this value indicates that DSP
+ * has data for host in DPRAM
+ */
#define FT1000_DB_DNLD_RX 0x0002 /* Downloader handshake doorbell */
-#define FT1000_ASIC_RESET_REQ 0x0004 /* DSP requesting host to reset the ASIC */
-#define FT1000_DSP_ASIC_RESET 0x0008 /* DSP indicating host that it will reset the ASIC */
+#define FT1000_ASIC_RESET_REQ 0x0004 /* DSP requesting host to
+ * reset the ASIC
+ */
+#define FT1000_DSP_ASIC_RESET 0x0008 /* DSP indicating host that
+ * it will reset the ASIC
+ */
#define FT1000_DB_COND_RESET 0x0010 /* DSP request for a card reset. */
/* Host to FT1000 Doorbell assignments */
-#define FT1000_DB_DPRAM_TX 0x0100 /* this value indicates that host has data for DSP in DPRAM. */
+#define FT1000_DB_DPRAM_TX 0x0100 /* this value indicates that host
+ * has data for DSP in DPRAM.
+ */
#define FT1000_DB_DNLD_TX 0x0200 /* Downloader handshake doorbell */
#define FT1000_ASIC_RESET_DSP 0x0400 /* Responds to FT1000_ASIC_RESET_REQ */
-#define FT1000_DB_HB 0x1000 /* Indicates that supervisor has a heartbeat message for DSP. */
+#define FT1000_DB_HB 0x1000 /* Indicates that supervisor has a
+ * heartbeat message for DSP.
+ */
#define hi 0x6869 /* PC Card heartbeat values */
#define ho 0x686f /* PC Card heartbeat values */
/* Magnemite specific defines */
-#define hi_mag 0x6968 /* Byte swap hi to avoid additional system call */
-#define ho_mag 0x6f68 /* Byte swap ho to avoid additional system call */
+#define hi_mag 0x6968 /* Byte swap hi to avoid
+ * additional system call
+ */
+#define ho_mag 0x6f68 /* Byte swap ho to avoid
+ * additional system call
+ */
/* Bit field definitions for Host Interrupt Status Register */
/* Indicate the cause of an interrupt. */
@@ -133,13 +201,19 @@
#define ISR_MASK_RCV 0x0004 /* Downlink Packet available mask */
#define ISR_MASK_WATERMARK 0x0008 /* Watermark interrupt mask */
#define ISR_MASK_ALL 0xffff /* Mask all interrupts */
-/* Default interrupt mask (Enable Doorbell pending and Packet available interrupts) */
+/* Default interrupt mask
+ * (Enable Doorbell pending and Packet available interrupts)
+ */
#define ISR_DEFAULT_MASK 0x7ff9
/* Bit field definition for Host Control Register */
-#define DSP_RESET_BIT 0x0001 /* Bit field to control dsp reset state */
+#define DSP_RESET_BIT 0x0001 /* Bit field to control
+ * dsp reset state
+ */
/* (0 = out of reset 1 = reset) */
-#define ASIC_RESET_BIT 0x0002 /* Bit field to control ASIC reset state */
+#define ASIC_RESET_BIT 0x0002 /* Bit field to control
+ * ASIC reset state
+ */
/* (0 = out of reset 1 = reset) */
#define DSP_UNENCRYPTED 0x0004
#define DSP_ENCRYPTED 0x0008
@@ -195,7 +269,9 @@ struct pseudo_hdr {
unsigned char source; /* hardware source id */
/* Host = 0x10 */
/* Dsp = 0x20 */
- unsigned char destination; /* hardware destination id (refer to source) */
+ unsigned char destination; /* hardware destination id
+ * (refer to source)
+ */
unsigned char portdest; /* software destination port id */
/* Host = 0x00 */
/* Applicaton Broadcast = 0x10 */
@@ -204,7 +280,9 @@ struct pseudo_hdr {
/* Dsp Airlink = 0x90 */
/* Dsp Loader = 0xa0 */
/* Dsp MIP = 0xb0 */
- unsigned char portsrc; /* software source port id (refer to portdest) */
+ unsigned char portsrc; /* software source port id
+ * (refer to portdest)
+ */
unsigned short sh_str_id; /* not used */
unsigned char control; /* not used */
unsigned char rsvd1;
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 8af136e9c9dc..b22142ee5262 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -2036,6 +2036,13 @@ static void fwserial_auto_connect(struct work_struct *work)
schedule_delayed_work(&peer->connect, CONNECT_RETRY_DELAY);
}
+static void fwserial_peer_workfn(struct work_struct *work)
+{
+ struct fwtty_peer *peer = to_peer(work, work);
+
+ peer->workfn(work);
+}
+
/**
* fwserial_add_peer - add a newly probed 'serial' unit device as a 'peer'
* @serial: aggregate representing the specific fw_card to add the peer to
@@ -2100,7 +2107,7 @@ static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit)
peer->port = NULL;
init_timer(&peer->timer);
- INIT_WORK(&peer->work, NULL);
+ INIT_WORK(&peer->work, fwserial_peer_workfn);
INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect);
/* associate peer with specific fw_card */
@@ -2702,7 +2709,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
} else {
peer->work_params.plug_req = pkt->plug_req;
- PREPARE_WORK(&peer->work, fwserial_handle_plug_req);
+ peer->workfn = fwserial_handle_plug_req;
queue_work(system_unbound_wq, &peer->work);
}
break;
@@ -2731,7 +2738,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
fwtty_err(&peer->unit, "unplug req: busy\n");
rcode = RCODE_CONFLICT_ERROR;
} else {
- PREPARE_WORK(&peer->work, fwserial_handle_unplug_req);
+ peer->workfn = fwserial_handle_unplug_req;
queue_work(system_unbound_wq, &peer->work);
}
break;
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 54f7f9b9b212..98b853d4acbc 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -91,6 +91,7 @@ struct fwtty_peer {
struct rcu_head rcu;
spinlock_t lock;
+ work_func_t workfn;
struct work_struct work;
struct peer_work_params work_params;
struct timer_list timer;
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 74a03608b2dd..64c55b99fda4 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -131,7 +131,8 @@ static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
/* Get the pointer of the original request */
arp_in = (struct arphdr *)(skb_in->data + mac_header_len);
- arp_data_in = (struct arpdata *)(skb_in->data + mac_header_len + sizeof(struct arphdr));
+ arp_data_in = (struct arpdata *)(skb_in->data + mac_header_len +
+ sizeof(struct arphdr));
/* Get the pointer of the outgoing response */
arp_out = (struct arphdr *)arp_temp;
@@ -160,9 +161,12 @@ static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
return -ENOMEM;
skb_reserve(skb_out, NET_IP_ALIGN);
- memcpy(skb_put(skb_out, mac_header_len), mac_header_data, mac_header_len);
- memcpy(skb_put(skb_out, sizeof(struct arphdr)), arp_out, sizeof(struct arphdr));
- memcpy(skb_put(skb_out, sizeof(struct arpdata)), arp_data_out, sizeof(struct arpdata));
+ memcpy(skb_put(skb_out, mac_header_len), mac_header_data,
+ mac_header_len);
+ memcpy(skb_put(skb_out, sizeof(struct arphdr)), arp_out,
+ sizeof(struct arphdr));
+ memcpy(skb_put(skb_out, sizeof(struct arpdata)), arp_data_out,
+ sizeof(struct arpdata));
skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
skb_out->dev = skb_in->dev;
@@ -198,7 +202,7 @@ static int icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
pseudo_header.ph.ph_nxt = ipv6->nexthdr;
w = (u16 *)&pseudo_header;
- for (i = 0; i < sizeof(pseudo_header.pa) / sizeof(pseudo_header.pa[0]); i++)
+ for (i = 0; i < ARRAY_SIZE(pseudo_header.pa); i++)
sum += pseudo_header.pa[i];
w = ptr;
@@ -260,11 +264,14 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
return -1;
/* Check if this is NDP packet */
- icmp6_in = (struct icmp6hdr *)(skb_in->data + mac_header_len + sizeof(struct ipv6hdr));
+ icmp6_in = (struct icmp6hdr *)(skb_in->data + mac_header_len +
+ sizeof(struct ipv6hdr));
if (icmp6_in->icmp6_type == NDISC_ROUTER_SOLICITATION) { /* Check RS */
return -1;
- } else if (icmp6_in->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { /* Check NS */
- u8 icmp_na[sizeof(struct icmp6hdr) + sizeof(struct neighbour_advertisement)];
+ } else if (icmp6_in->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+ /* Check NS */
+ u8 icmp_na[sizeof(struct icmp6hdr) +
+ sizeof(struct neighbour_advertisement)];
u8 zero_addr8[16] = {0,};
if (memcmp(ipv6_in->saddr.in6_u.u6_addr8, zero_addr8, 16) == 0)
@@ -276,7 +283,9 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
icmp6_out.icmp6_cksum = 0;
icmp6_out.icmp6_dataun.un_data32[0] = htonl(0x60000000); /* R=0, S=1, O=1 */
- ns = (struct neighbour_solicitation *)(skb_in->data + mac_header_len + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
+ ns = (struct neighbour_solicitation *)
+ (skb_in->data + mac_header_len +
+ sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
memcpy(&na.target_address, ns->target_address, 16);
na.type = 0x02;
na.length = 1;
@@ -289,13 +298,17 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
memcpy(&ipv6_out, ipv6_in, sizeof(struct ipv6hdr));
memcpy(ipv6_out.saddr.in6_u.u6_addr8, &na.target_address, 16);
- memcpy(ipv6_out.daddr.in6_u.u6_addr8, ipv6_in->saddr.in6_u.u6_addr8, 16);
- ipv6_out.payload_len = htons(sizeof(struct icmp6hdr) + sizeof(struct neighbour_advertisement));
+ memcpy(ipv6_out.daddr.in6_u.u6_addr8,
+ ipv6_in->saddr.in6_u.u6_addr8, 16);
+ ipv6_out.payload_len = htons(sizeof(struct icmp6hdr) +
+ sizeof(struct neighbour_advertisement));
memcpy(icmp_na, &icmp6_out, sizeof(struct icmp6hdr));
- memcpy(icmp_na + sizeof(struct icmp6hdr), &na, sizeof(struct neighbour_advertisement));
+ memcpy(icmp_na + sizeof(struct icmp6hdr), &na,
+ sizeof(struct neighbour_advertisement));
- icmp6_out.icmp6_cksum = icmp6_checksum(&ipv6_out, (u16 *)icmp_na, sizeof(icmp_na));
+ icmp6_out.icmp6_cksum = icmp6_checksum(&ipv6_out,
+ (u16 *)icmp_na, sizeof(icmp_na));
} else {
return -1;
}
@@ -311,10 +324,14 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
return -ENOMEM;
skb_reserve(skb_out, NET_IP_ALIGN);
- memcpy(skb_put(skb_out, mac_header_len), mac_header_data, mac_header_len);
- memcpy(skb_put(skb_out, sizeof(struct ipv6hdr)), &ipv6_out, sizeof(struct ipv6hdr));
- memcpy(skb_put(skb_out, sizeof(struct icmp6hdr)), &icmp6_out, sizeof(struct icmp6hdr));
- memcpy(skb_put(skb_out, sizeof(struct neighbour_advertisement)), &na, sizeof(struct neighbour_advertisement));
+ memcpy(skb_put(skb_out, mac_header_len), mac_header_data,
+ mac_header_len);
+ memcpy(skb_put(skb_out, sizeof(struct ipv6hdr)), &ipv6_out,
+ sizeof(struct ipv6hdr));
+ memcpy(skb_put(skb_out, sizeof(struct icmp6hdr)), &icmp6_out,
+ sizeof(struct icmp6hdr));
+ memcpy(skb_put(skb_out, sizeof(struct neighbour_advertisement)), &na,
+ sizeof(struct neighbour_advertisement));
skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
skb_out->dev = skb_in->dev;
@@ -363,7 +380,8 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
/* Check DHCPv4 */
if (ip->protocol == IPPROTO_UDP) {
- struct udphdr *udp = (struct udphdr *)(network_data + sizeof(struct iphdr));
+ struct udphdr *udp = (struct udphdr *)
+ (network_data + sizeof(struct iphdr));
if (ntohs(udp->dest) == 67 || ntohs(udp->dest) == 68)
nic_type |= NIC_TYPE_F_DHCP;
}
@@ -373,12 +391,13 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
ipv6 = (struct ipv6hdr *)network_data;
if (ipv6->nexthdr == IPPROTO_ICMPV6) /* Check NDP request */ {
- struct icmp6hdr *icmp6 = (struct icmp6hdr *)(network_data + sizeof(struct ipv6hdr));
- if (/*icmp6->icmp6_type == NDISC_ROUTER_SOLICITATION || */
- icmp6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
+ struct icmp6hdr *icmp6 = (struct icmp6hdr *)
+ (network_data + sizeof(struct ipv6hdr));
+ if (icmp6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
nic_type |= NIC_TYPE_ICMPV6;
} else if (ipv6->nexthdr == IPPROTO_UDP) /* Check DHCPv6 */ {
- struct udphdr *udp = (struct udphdr *)(network_data + sizeof(struct ipv6hdr));
+ struct udphdr *udp = (struct udphdr *)
+ (network_data + sizeof(struct ipv6hdr));
if (ntohs(udp->dest) == 546 || ntohs(udp->dest) == 547)
nic_type |= NIC_TYPE_F_DHCP;
}
@@ -420,11 +439,12 @@ static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
}
/*
- Need byte shift (that is, remove VLAN tag) if there is one
- For the case of ARP, this breaks the offset as vlan_ethhdr+4 is treated as ethhdr
- However, it shouldn't be a problem as the response starts from arp_hdr and ethhdr
- is created by this driver based on the NIC mac
- */
+ * Need byte shift (that is, remove VLAN tag) if there is one
+ * For the case of ARP, this breaks the offset as vlan_ethhdr+4
+ * is treated as ethhdr However, it shouldn't be a problem as
+ * the response starts from arp_hdr and ethhdr is created by this
+ * driver based on the NIC mac
+ */
if (nic_type & NIC_TYPE_F_VLAN) {
struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr *)skb->data;
nic->vlan_id = ntohs(vlan_eth->h_vlan_TCI) & VLAN_VID_MASK;
@@ -436,15 +456,23 @@ static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
data_len = skb->len;
}
- /* If it is a ICMPV6 packet, clear all the other bits : for backward compatibility with the firmware */
+ /* If it is a ICMPV6 packet, clear all the other bits :
+ * for backward compatibility with the firmware
+ */
if (nic_type & NIC_TYPE_ICMPV6)
nic_type = NIC_TYPE_ICMPV6;
- /* If it is not a dhcp packet, clear all the flag bits : original NIC, otherwise the special flag (IPVX | DHCP) */
+ /* If it is not a dhcp packet, clear all the flag bits :
+ * original NIC, otherwise the special flag (IPVX | DHCP)
+ */
if (!(nic_type & NIC_TYPE_F_DHCP))
nic_type &= NIC_TYPE_MASK;
- sscanf(dev->name, "lte%d", &idx);
+ ret = sscanf(dev->name, "lte%d", &idx);
+ if (ret != 1) {
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
ret = nic->phy_dev->send_sdu_func(nic->phy_dev->priv_dev,
data_buf, data_len,
@@ -485,8 +513,11 @@ static int gdm_lte_event_send(struct net_device *dev, char *buf, int len)
struct nic *nic = netdev_priv(dev);
struct hci_packet *hci = (struct hci_packet *)buf;
int idx;
+ int ret;
- sscanf(dev->name, "lte%d", &idx);
+ ret = sscanf(dev->name, "lte%d", &idx);
+ if (ret != 1)
+ return -EINVAL;
return netlink_send(lte_event.sock, idx, 0, buf,
gdm_dev16_to_cpu(
@@ -495,7 +526,8 @@ static int gdm_lte_event_send(struct net_device *dev, char *buf, int len)
+ HCI_HEADER_SIZE);
}
-static void gdm_lte_event_rcv(struct net_device *dev, u16 type, void *msg, int len)
+static void gdm_lte_event_rcv(struct net_device *dev, u16 type,
+ void *msg, int len)
{
struct nic *nic = netdev_priv(dev);
@@ -536,7 +568,8 @@ static u8 find_dev_index(u32 nic_type)
return index;
}
-static void gdm_lte_netif_rx(struct net_device *dev, char *buf, int len, int flagged_nic_type)
+static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
+ int len, int flagged_nic_type)
{
u32 nic_type;
struct nic *nic;
@@ -551,28 +584,46 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf, int len, int fla
nic = netdev_priv(dev);
if (flagged_nic_type & NIC_TYPE_F_DHCP) {
- /* Change the destination mac address with the one requested the IP */
+ /* Change the destination mac address
+ * with the one requested the IP
+ */
if (flagged_nic_type & NIC_TYPE_F_IPV4) {
struct dhcp_packet {
u8 op; /* BOOTREQUEST or BOOTREPLY */
- u8 htype; /* hardware address type. 1 = 10mb ethernet */
+ u8 htype; /* hardware address type.
+ * 1 = 10mb ethernet
+ */
u8 hlen; /* hardware address length */
u8 hops; /* used by relay agents only */
u32 xid; /* unique id */
- u16 secs; /* elapsed since client began acquisition/renewal */
+ u16 secs; /* elapsed since client began
+ * acquisition/renewal
+ */
u16 flags; /* only one flag so far: */
- #define BROADCAST_FLAG 0x8000 /* "I need broadcast replies" */
- u32 ciaddr; /* client IP (if client is in BOUND, RENEW or REBINDING state) */
+ #define BROADCAST_FLAG 0x8000
+ /* "I need broadcast replies" */
+ u32 ciaddr; /* client IP (if client is in
+ * BOUND, RENEW or REBINDING state)
+ */
u32 yiaddr; /* 'your' (client) IP address */
- /* IP address of next server to use in bootstrap, returned in DHCPOFFER, DHCPACK by server */
+ /* IP address of next server to use in
+ * bootstrap, returned in DHCPOFFER,
+ * DHCPACK by server
+ */
u32 siaddr_nip;
u32 gateway_nip; /* relay agent IP address */
- u8 chaddr[16]; /* link-layer client hardware address (MAC) */
+ u8 chaddr[16]; /* link-layer client hardware
+ * address (MAC)
+ */
u8 sname[64]; /* server host name (ASCIZ) */
u8 file[128]; /* boot file name (ASCIZ) */
- u32 cookie; /* fixed first four option bytes (99,130,83,99 dec) */
+ u32 cookie; /* fixed first four option
+ * bytes (99,130,83,99 dec)
+ */
} __packed;
- void *addr = buf + sizeof(struct iphdr) + sizeof(struct udphdr) + offsetof(struct dhcp_packet, chaddr);
+ void *addr = buf + sizeof(struct iphdr) +
+ sizeof(struct udphdr) +
+ offsetof(struct dhcp_packet, chaddr);
memcpy(nic->dest_mac_addr, addr, ETH_ALEN);
}
}
@@ -593,7 +644,9 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf, int len, int fla
vlan_eth.h_vlan_proto = htons(ETH_P_8021Q);
if (nic_type == NIC_TYPE_ARP) {
- /* Should be response: Only happens because there was a request from the host */
+ /* Should be response: Only happens because
+ * there was a request from the host
+ */
eth.h_proto = htons(ETH_P_ARP);
vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_ARP);
} else {
@@ -640,15 +693,20 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
u32 nic_type;
u8 index;
- hci_len = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), multi_sdu->len);
- num_packet = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), multi_sdu->num_packet);
+ hci_len = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev),
+ multi_sdu->len);
+ num_packet = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev),
+ multi_sdu->num_packet);
for (i = 0; i < num_packet; i++) {
sdu = (struct sdu *)data;
- cmd_evt = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->cmd_evt);
- hci_len = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->len);
- nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->nic_type);
+ cmd_evt = gdm_dev16_to_cpu(phy_dev->
+ get_endian(phy_dev->priv_dev), sdu->cmd_evt);
+ hci_len = gdm_dev16_to_cpu(phy_dev->
+ get_endian(phy_dev->priv_dev), sdu->len);
+ nic_type = gdm_dev32_to_cpu(phy_dev->
+ get_endian(phy_dev->priv_dev), sdu->nic_type);
if (cmd_evt != LTE_RX_SDU) {
pr_err("rx sdu wrong hci %04x\n", cmd_evt);
@@ -662,7 +720,8 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
index = find_dev_index(nic_type);
if (index < MAX_NIC_TYPE) {
dev = phy_dev->dev[index];
- gdm_lte_netif_rx(dev, (char *)sdu->data, (int)(hci_len-12), nic_type);
+ gdm_lte_netif_rx(dev, (char *)sdu->data,
+ (int)(hci_len-12), nic_type);
} else {
pr_err("rx sdu invalid nic_type :%x\n", nic_type);
}
@@ -709,7 +768,8 @@ static int gdm_lte_receive_pkt(struct phy_dev *phy_dev, char *buf, int len)
if (!len)
return ret;
- cmd_evt = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), hci->cmd_evt);
+ cmd_evt = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev),
+ hci->cmd_evt);
dev = phy_dev->dev[0];
if (dev == NULL)
@@ -718,7 +778,8 @@ static int gdm_lte_receive_pkt(struct phy_dev *phy_dev, char *buf, int len)
switch (cmd_evt) {
case LTE_RX_SDU:
sdu = (struct sdu *)hci->data;
- nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->nic_type);
+ nic_type = gdm_dev32_to_cpu(phy_dev->
+ get_endian(phy_dev->priv_dev), sdu->nic_type);
index = find_dev_index(nic_type);
dev = phy_dev->dev[index];
gdm_lte_netif_rx(dev, hci->data, len, nic_type);
@@ -733,7 +794,9 @@ static int gdm_lte_receive_pkt(struct phy_dev *phy_dev, char *buf, int len)
break;
case LTE_PDN_TABLE_IND:
pdn_table = (struct hci_pdn_table_ind *)buf;
- nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), pdn_table->nic_type);
+ nic_type = gdm_dev32_to_cpu(phy_dev->
+ get_endian(phy_dev->priv_dev),
+ pdn_table->nic_type);
index = find_dev_index(nic_type);
dev = phy_dev->dev[index];
gdm_lte_pdn_table(dev, buf, len);
@@ -758,7 +821,8 @@ void start_rx_proc(struct phy_dev *phy_dev)
int i;
for (i = 0; i < MAX_RX_SUBMIT_COUNT; i++)
- phy_dev->rcv_func(phy_dev->priv_dev, rx_complete, phy_dev, USB_COMPLETE);
+ phy_dev->rcv_func(phy_dev->priv_dev,
+ rx_complete, phy_dev, USB_COMPLETE);
}
static struct net_device_ops gdm_netdev_ops = {
@@ -771,7 +835,8 @@ static struct net_device_ops gdm_netdev_ops = {
static u8 gdm_lte_macaddr[ETH_ALEN] = {0x00, 0x0a, 0x3b, 0x00, 0x00, 0x00};
-static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest, u8 *mac_address, u8 index)
+static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest,
+ u8 *mac_address, u8 index)
{
/* Form the dev_addr */
if (!mac_address)
@@ -779,10 +844,14 @@ static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest, u8 *mac_ad
else
memcpy(dev_addr, mac_address, ETH_ALEN);
- /* The last byte of the mac address should be less than or equal to 0xFC */
+ /* The last byte of the mac address
+ * should be less than or equal to 0xFC
+ */
dev_addr[ETH_ALEN-1] += index;
- /* Create random nic src and copy the first 3 bytes to be the same as dev_addr */
+ /* Create random nic src and copy the first
+ * 3 bytes to be the same as dev_addr
+ */
random_ether_addr(nic_src);
memcpy(nic_src, dev_addr, 3);
@@ -799,7 +868,8 @@ static void validate_mac_address(u8 *mac_address)
}
}
-int register_lte_device(struct phy_dev *phy_dev, struct device *dev, u8 *mac_address)
+int register_lte_device(struct phy_dev *phy_dev,
+ struct device *dev, u8 *mac_address)
{
struct nic *nic;
struct net_device *net;
@@ -814,7 +884,8 @@ int register_lte_device(struct phy_dev *phy_dev, struct device *dev, u8 *mac_add
sprintf(pdn_dev_name, "lte%%dpdn%d", index);
/* Allocate netdev */
- net = alloc_netdev(sizeof(struct nic), pdn_dev_name, ether_setup);
+ net = alloc_netdev(sizeof(struct nic), pdn_dev_name,
+ ether_setup);
if (net == NULL) {
pr_err("alloc_netdev failed\n");
ret = -ENOMEM;
diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h
index 9287d310d8e2..88414e5a70cc 100644
--- a/drivers/staging/gdm724x/gdm_lte.h
+++ b/drivers/staging/gdm724x/gdm_lte.h
@@ -56,7 +56,7 @@ struct phy_dev {
int (*cb)(void *cb_data, void *data, int len,
int context),
void *cb_data, int context);
- struct gdm_endian *(*get_endian)(void *priv_dev);
+ struct gdm_endian * (*get_endian)(void *priv_dev);
};
struct nic {
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 2fa3a5a6580f..10ce2c1805bb 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -165,7 +165,8 @@ static int up_to_host(struct mux_rx *r)
int len = r->len;
while (1) {
- mux_header = (struct mux_pkt_header *)(r->buf + packet_size_sum);
+ mux_header = (struct mux_pkt_header *)(r->buf +
+ packet_size_sum);
start_flag = __le32_to_cpu(mux_header->start_flag);
payload_size = __le32_to_cpu(mux_header->payload_size);
packet_type = __le16_to_cpu(mux_header->packet_type);
@@ -231,7 +232,8 @@ static void do_rx(struct work_struct *work)
spin_unlock_irqrestore(&rx->to_host_lock, flags);
break;
}
- r = list_entry(rx->to_host_list.next, struct mux_rx, to_host_list);
+ r = list_entry(rx->to_host_list.next, struct mux_rx,
+ to_host_list);
list_del(&r->to_host_list);
spin_unlock_irqrestore(&rx->to_host_lock, flags);
@@ -249,7 +251,8 @@ static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
struct mux_rx *r_remove, *r_remove_next;
spin_lock_irqsave(&rx->submit_list_lock, flags);
- list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list) {
+ list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list,
+ rx_submit_list) {
if (r == r_remove)
list_del(&r->rx_submit_list);
}
@@ -279,9 +282,8 @@ static void gdm_mux_rcv_complete(struct urb *urb)
}
}
-static int gdm_mux_recv(void *priv_dev,
- int (*cb)(void *data, int len, int tty_index, struct tty_dev *tty_dev, int complete)
- )
+static int gdm_mux_recv(void *priv_dev, int (*cb)(void *data, int len,
+ int tty_index, struct tty_dev *tty_dev, int complete))
{
struct mux_dev *mux_dev = priv_dev;
struct usb_device *usbdev = mux_dev->usbdev;
@@ -416,7 +418,8 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
return ret;
}
-static int gdm_mux_send_control(void *priv_dev, int request, int value, void *buf, int len)
+static int gdm_mux_send_control(void *priv_dev, int request, int value,
+ void *buf, int len)
{
struct mux_dev *mux_dev = priv_dev;
struct usb_device *usbdev = mux_dev->usbdev;
@@ -448,7 +451,8 @@ static void release_usb(struct mux_dev *mux_dev)
cancel_delayed_work(&mux_dev->work_rx);
spin_lock_irqsave(&rx->submit_list_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
+ rx_submit_list) {
spin_unlock_irqrestore(&rx->submit_list_lock, flags);
usb_kill_urb(r->urb);
spin_lock_irqsave(&rx->submit_list_lock, flags);
@@ -503,7 +507,8 @@ static int init_usb(struct mux_dev *mux_dev)
return ret;
}
-static int gdm_mux_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int gdm_mux_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct mux_dev *mux_dev;
struct tty_dev *tty_dev;
@@ -610,7 +615,8 @@ static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
spin_lock_irqsave(&rx->submit_list_lock, flags);
- list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
+ rx_submit_list) {
spin_unlock_irqrestore(&rx->submit_list_lock, flags);
usb_kill_urb(r->urb);
spin_lock_irqsave(&rx->submit_list_lock, flags);
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index 33458a583142..ee6e40facca7 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -30,14 +30,17 @@
#include "gdm_endian.h"
#define USB_DEVICE_CDC_DATA(vid, pid) \
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_CLASS | \
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
.idVendor = vid,\
.idProduct = pid,\
.bInterfaceClass = USB_CLASS_COMM,\
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
#define USB_DEVICE_MASS_DATA(vid, pid) \
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,\
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_INFO,\
.idVendor = vid,\
.idProduct = pid,\
.bInterfaceSubClass = USB_SC_SCSI, \
@@ -59,7 +62,8 @@ static void do_tx(struct work_struct *work);
static void do_rx(struct work_struct *work);
static int gdm_usb_recv(void *priv_dev,
- int (*cb)(void *cb_data, void *data, int len, int context),
+ int (*cb)(void *cb_data,
+ void *data, int len, int context),
void *cb_data,
int context);
@@ -119,28 +123,15 @@ out:
static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
{
- struct usb_tx_sdu *t_sdu = NULL;
- int ret = 0;
-
+ struct usb_tx_sdu *t_sdu;
t_sdu = kzalloc(sizeof(struct usb_tx_sdu), GFP_ATOMIC);
- if (!t_sdu) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!t_sdu)
+ return NULL;
t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_ATOMIC);
if (!t_sdu->buf) {
- ret = -ENOMEM;
- goto out;
- }
-out:
-
- if (ret < 0) {
- if (t_sdu) {
- kfree(t_sdu->buf);
- kfree(t_sdu);
- }
+ kfree(t_sdu);
return NULL;
}
@@ -388,7 +379,8 @@ static int set_mac_address(u8 *data, void *arg)
if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
memcpy(mac_address, tlv->data, tlv->len);
- if (register_lte_device(phy_dev, &udev->intf->dev, mac_address) < 0)
+ if (register_lte_device(phy_dev,
+ &udev->intf->dev, mac_address) < 0)
pr_err("register lte device failed\n");
udev->request_mac_addr = 0;
@@ -401,7 +393,8 @@ static int set_mac_address(u8 *data, void *arg)
static void do_rx(struct work_struct *work)
{
- struct lte_udev *udev = container_of(work, struct lte_udev, work_rx.work);
+ struct lte_udev *udev =
+ container_of(work, struct lte_udev, work_rx.work);
struct rx_cxt *rx = &udev->rx;
struct usb_rx *r;
struct hci_packet *hci;
@@ -416,7 +409,8 @@ static void do_rx(struct work_struct *work)
spin_unlock_irqrestore(&rx->to_host_lock, flags);
break;
}
- r = list_entry(rx->to_host_list.next, struct usb_rx, to_host_list);
+ r = list_entry(rx->to_host_list.next,
+ struct usb_rx, to_host_list);
list_del(&r->to_host_list);
spin_unlock_irqrestore(&rx->to_host_lock, flags);
@@ -463,7 +457,8 @@ static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
struct usb_rx *r_remove, *r_remove_next;
spin_lock_irqsave(&rx->submit_lock, flags);
- list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list)
+ list_for_each_entry_safe(r_remove,
+ r_remove_next, &rx->rx_submit_list, rx_submit_list)
{
if (r == r_remove) {
list_del(&r->rx_submit_list);
@@ -500,7 +495,8 @@ static void gdm_usb_rcv_complete(struct urb *urb)
}
static int gdm_usb_recv(void *priv_dev,
- int (*cb)(void *cb_data, void *data, int len, int context),
+ int (*cb)(void *cb_data,
+ void *data, int len, int context),
void *cb_data,
int context)
{
@@ -654,7 +650,8 @@ static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
static void do_tx(struct work_struct *work)
{
- struct lte_udev *udev = container_of(work, struct lte_udev, work_tx.work);
+ struct lte_udev *udev =
+ container_of(work, struct lte_udev, work_tx.work);
struct usb_device *usbdev = udev->usbdev;
struct tx_cxt *tx = &udev->tx;
struct usb_tx *t = NULL;
@@ -813,7 +810,8 @@ static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
return &udev->gdm_ed;
}
-static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int gdm_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
int ret = 0;
struct phy_dev *phy_dev = NULL;
@@ -861,7 +859,9 @@ static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id
usb_enable_autosuspend(usbdev);
pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
- /* List up hosts with big endians, otherwise, defaults to little endian */
+ /* List up hosts with big endians, otherwise,
+ * defaults to little endian
+ */
if (idProduct == PID_GDM7243)
gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
else
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
index 77fc64e28428..5ddd36948a2f 100644
--- a/drivers/staging/gdm724x/netlink_k.c
+++ b/drivers/staging/gdm724x/netlink_k.c
@@ -32,7 +32,8 @@ static struct semaphore netlink_mutex;
#define ND_MAX_GROUP 30
#define ND_IFINDEX_LEN sizeof(int)
#define ND_NLMSG_SPACE(len) (NLMSG_SPACE(len) + ND_IFINDEX_LEN)
-#define ND_NLMSG_DATA(nlh) ((void *)((char *)NLMSG_DATA(nlh) + ND_IFINDEX_LEN))
+#define ND_NLMSG_DATA(nlh) ((void *)((char *)NLMSG_DATA(nlh) + \
+ ND_IFINDEX_LEN))
#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN)
#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN)
#define ND_NLMSG_IFIDX(nlh) NLMSG_DATA(nlh)
diff --git a/drivers/staging/gdm72xx/TODO b/drivers/staging/gdm72xx/TODO
index 30ac01ab972f..5ab27fb29594 100644
--- a/drivers/staging/gdm72xx/TODO
+++ b/drivers/staging/gdm72xx/TODO
@@ -1,5 +1,3 @@
TODO:
- Replace kernel_thread with kthread in gdm_usb.c
-- Replace hard-coded firmware paths with request_firmware in
- sdio_boot.c and usb_boot.c
- Clean up coding style to meet kernel standard.
diff --git a/drivers/staging/gdm72xx/gdm_sdio.c b/drivers/staging/gdm72xx/gdm_sdio.c
index 047a4d77f5ee..c24653739e13 100644
--- a/drivers/staging/gdm72xx/gdm_sdio.c
+++ b/drivers/staging/gdm72xx/gdm_sdio.c
@@ -38,26 +38,9 @@
#define TX_HZ 2000
#define TX_INTERVAL (1000000/TX_HZ)
-/*#define DEBUG*/
-
static int init_sdio(struct sdiowm_dev *sdev);
static void release_sdio(struct sdiowm_dev *sdev);
-#ifdef DEBUG
-static void hexdump(char *title, u8 *data, int len)
-{
- int i;
-
- printk(KERN_DEBUG "%s: length = %d\n", title, len);
- for (i = 0; i < len; i++) {
- printk(KERN_DEBUG "%02x ", data[i]);
- if ((i & 0xf) == 0xf)
- printk(KERN_DEBUG "\n");
- }
- printk(KERN_DEBUG "\n");
-}
-#endif
-
static struct sdio_tx *alloc_tx_struct(struct tx_cxt *tx)
{
struct sdio_tx *t = kzalloc(sizeof(*t), GFP_ATOMIC);
@@ -297,10 +280,9 @@ static void send_sdu(struct sdio_func *func, struct tx_cxt *tx)
spin_unlock_irqrestore(&tx->lock, flags);
-#ifdef DEBUG
- hexdump("sdio_send", tx->sdu_buf + TYPE_A_HEADER_SIZE,
- aggr_len - TYPE_A_HEADER_SIZE);
-#endif
+ print_hex_dump_debug("sdio_send: ", DUMP_PREFIX_NONE, 16, 1,
+ tx->sdu_buf + TYPE_A_HEADER_SIZE,
+ aggr_len - TYPE_A_HEADER_SIZE, false);
for (pos = TYPE_A_HEADER_SIZE; pos < aggr_len; pos += TX_CHUNK_SIZE) {
len = aggr_len - pos;
@@ -335,10 +317,9 @@ static void send_hci(struct sdio_func *func, struct tx_cxt *tx,
{
unsigned long flags;
-#ifdef DEBUG
- hexdump("sdio_send", t->buf + TYPE_A_HEADER_SIZE,
- t->len - TYPE_A_HEADER_SIZE);
-#endif
+ print_hex_dump_debug("sdio_send: ", DUMP_PREFIX_NONE, 16, 1,
+ t->buf + TYPE_A_HEADER_SIZE,
+ t->len - TYPE_A_HEADER_SIZE, false);
send_sdio_pkt(func, t->buf, t->len);
spin_lock_irqsave(&tx->lock, flags);
@@ -474,14 +455,10 @@ static int control_sdu_tx_flow(struct sdiowm_dev *sdev, u8 *hci_data, int len)
goto out;
if (hci_data[4] == 0) {
-#ifdef DEBUG
- printk(KERN_DEBUG "WIMAX ==> STOP SDU TX\n");
-#endif
+ dev_dbg(&sdev->func->dev, "WIMAX ==> STOP SDU TX\n");
tx->stop_sdu_tx = 1;
} else if (hci_data[4] == 1) {
-#ifdef DEBUG
- printk(KERN_DEBUG "WIMAX ==> START SDU TX\n");
-#endif
+ dev_dbg(&sdev->func->dev, "WIMAX ==> START SDU TX\n");
tx->stop_sdu_tx = 0;
if (tx->can_send)
schedule_work(&sdev->ws);
@@ -532,18 +509,14 @@ static void gdm_sdio_irq(struct sdio_func *func)
}
if (hdr[3] == 1) { /* Ack */
-#ifdef DEBUG
u32 *ack_seq = (u32 *)&hdr[4];
-#endif
spin_lock_irqsave(&tx->lock, flags);
tx->can_send = 1;
if (!list_empty(&tx->sdu_list) || !list_empty(&tx->hci_list))
schedule_work(&sdev->ws);
spin_unlock_irqrestore(&tx->lock, flags);
-#ifdef DEBUG
- printk(KERN_DEBUG "Ack... %0x\n", ntohl(*ack_seq));
-#endif
+ dev_dbg(&func->dev, "Ack... %0x\n", ntohl(*ack_seq));
goto done;
}
@@ -579,9 +552,8 @@ static void gdm_sdio_irq(struct sdio_func *func)
}
end_io:
-#ifdef DEBUG
- hexdump("sdio_receive", rx->rx_buf, len);
-#endif
+ print_hex_dump_debug("sdio_receive: ", DUMP_PREFIX_NONE, 16, 1,
+ rx->rx_buf, len, false);
len = control_sdu_tx_flow(sdev, rx->rx_buf, len);
spin_lock_irqsave(&rx->lock, flags);
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
index f8788bf0a7d3..20539d809397 100644
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ b/drivers/staging/gdm72xx/gdm_usb.c
@@ -55,22 +55,6 @@ static int k_mode_stop;
static int init_usb(struct usbwm_dev *udev);
static void release_usb(struct usbwm_dev *udev);
-/*#define DEBUG */
-#ifdef DEBUG
-static void hexdump(char *title, u8 *data, int len)
-{
- int i;
-
- printk(KERN_DEBUG "%s: length = %d\n", title, len);
- for (i = 0; i < len; i++) {
- printk(KERN_DEBUG "%02x ", data[i]);
- if ((i & 0xf) == 0xf)
- printk(KERN_DEBUG "\n");
- }
- printk(KERN_DEBUG "\n");
-}
-#endif
-
static struct usb_tx *alloc_tx_struct(struct tx_cxt *tx)
{
struct usb_tx *t = kzalloc(sizeof(*t), GFP_ATOMIC);
@@ -368,9 +352,8 @@ static int gdm_usb_send(void *priv_dev, void *data, int len,
gdm_usb_send_complete,
t);
-#ifdef DEBUG
- hexdump("usb_send", t->buf, len + padding);
-#endif
+ print_hex_dump_debug("usb_send: ", DUMP_PREFIX_NONE, 16, 1,
+ t->buf, len + padding, false);
#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
if (usbdev->state & USB_STATE_SUSPENDED) {
list_add_tail(&t->p_list, &tx->pending_list);
@@ -438,10 +421,7 @@ static void gdm_usb_rcv_complete(struct urb *urb)
struct usb_tx *t;
u16 cmd_evt;
unsigned long flags, flags2;
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
struct usb_device *dev = urb->dev;
-#endif
/* Completion by usb_unlink_urb */
if (urb->status == -ECONNRESET)
@@ -451,20 +431,15 @@ static void gdm_usb_rcv_complete(struct urb *urb)
if (!urb->status) {
cmd_evt = (r->buf[0] << 8) | (r->buf[1]);
-#ifdef DEBUG
- hexdump("usb_receive", r->buf, urb->actual_length);
-#endif
+ print_hex_dump_debug("usb_receive: ", DUMP_PREFIX_NONE, 16, 1,
+ r->buf, urb->actual_length, false);
if (cmd_evt == WIMAX_SDU_TX_FLOW) {
if (r->buf[4] == 0) {
-#ifdef DEBUG
- printk(KERN_DEBUG "WIMAX ==> STOP SDU TX\n");
-#endif
+ dev_dbg(&dev->dev, "WIMAX ==> STOP SDU TX\n");
list_for_each_entry(t, &tx->sdu_list, list)
usb_unlink_urb(t->urb);
} else if (r->buf[4] == 1) {
-#ifdef DEBUG
- printk(KERN_DEBUG "WIMAX ==> START SDU TX\n");
-#endif
+ dev_dbg(&dev->dev, "WIMAX ==> START SDU TX\n");
list_for_each_entry(t, &tx->sdu_list, list) {
usb_submit_urb(t->urb, GFP_ATOMIC);
}
@@ -635,11 +610,14 @@ static int gdm_usb_probe(struct usb_interface *intf,
#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
ret = register_wimax_device(phy_dev, &intf->dev);
+ if (ret)
+ release_usb(udev);
out:
if (ret) {
kfree(phy_dev);
kfree(udev);
+ usb_put_dev(usbdev);
} else {
usb_set_intfdata(intf, phy_dev);
}
diff --git a/drivers/staging/gdm72xx/gdm_wimax.c b/drivers/staging/gdm72xx/gdm_wimax.c
index dd854975db7d..05ce2a22c220 100644
--- a/drivers/staging/gdm72xx/gdm_wimax.c
+++ b/drivers/staging/gdm72xx/gdm_wimax.c
@@ -62,21 +62,6 @@ static u8 gdm_wimax_macaddr[6] = {0x00, 0x0a, 0x3b, 0xf0, 0x01, 0x30};
static void gdm_wimax_ind_fsm_update(struct net_device *dev, struct fsm_s *fsm);
static void gdm_wimax_ind_if_updown(struct net_device *dev, int if_up);
-#if defined(DEBUG_SDU)
-static void printk_hex(u8 *buf, u32 size)
-{
- int i;
-
- for (i = 0; i < size; i++) {
- if (i && i % 16 == 0)
- printk(KERN_DEBUG "\n%02x ", *buf++);
- else
- printk(KERN_DEBUG "%02x ", *buf++);
- }
-
- printk(KERN_DEBUG "\n");
-}
-
static const char *get_protocol_name(u16 protocol)
{
static char buf[32];
@@ -140,7 +125,8 @@ static const char *get_port_name(u16 port)
return buf;
}
-static void dump_eth_packet(const char *title, u8 *data, int len)
+static void dump_eth_packet(struct net_device *dev, const char *title,
+ u8 *data, int len)
{
struct iphdr *ih = NULL;
struct udphdr *uh = NULL;
@@ -162,48 +148,21 @@ static void dump_eth_packet(const char *title, u8 *data, int len)
port = ntohs(uh->dest);
}
- printk(KERN_DEBUG "[%s] len=%d, %s, %s, %s\n",
+ netdev_dbg(dev, "[%s] len=%d, %s, %s, %s\n",
title, len,
get_protocol_name(protocol),
get_ip_protocol_name(ip_protocol),
get_port_name(port));
if (!(data[0] == 0xff && data[1] == 0xff)) {
- if (protocol == ETH_P_IP) {
- printk(KERN_DEBUG " src=%pI4\n", &ih->saddr);
- } else if (protocol == ETH_P_IPV6) {
- printk(KERN_DEBUG " src=%pI6\n", &ih->saddr);
- }
+ if (protocol == ETH_P_IP)
+ netdev_dbg(dev, " src=%pI4\n", &ih->saddr);
+ else if (protocol == ETH_P_IPV6)
+ netdev_dbg(dev, " src=%pI6\n", &ih->saddr);
}
- #if (DUMP_PACKET & DUMP_SDU_ALL)
- printk_hex(data, len);
- #else
- #if (DUMP_PACKET & DUMP_SDU_ARP)
- if (protocol == ETH_P_ARP)
- printk_hex(data, len);
- #endif
- #if (DUMP_PACKET & DUMP_SDU_IP)
- if (protocol == ETH_P_IP || protocol == ETH_P_IPV6)
- printk_hex(data, len);
- #else
- #if (DUMP_PACKET & DUMP_SDU_IP_TCP)
- if (ip_protocol == IPPROTO_TCP)
- printk_hex(data, len);
- #endif
- #if (DUMP_PACKET & DUMP_SDU_IP_UDP)
- if (ip_protocol == IPPROTO_UDP)
- printk_hex(data, len);
- #endif
- #if (DUMP_PACKET & DUMP_SDU_IP_ICMP)
- if (ip_protocol == IPPROTO_ICMP)
- printk_hex(data, len);
- #endif
- #endif
- #endif
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, data, len, false);
}
-#endif
-
static inline int gdm_wimax_header(struct sk_buff **pskb)
{
@@ -237,12 +196,10 @@ static void gdm_wimax_event_rcv(struct net_device *dev, u16 type, void *msg,
{
struct nic *nic = netdev_priv(dev);
- #if defined(DEBUG_HCI)
u8 *buf = (u8 *) msg;
u16 hci_cmd = (buf[0]<<8) | buf[1];
u16 hci_len = (buf[2]<<8) | buf[3];
- printk(KERN_DEBUG "H=>D: 0x%04x(%d)\n", hci_cmd, hci_len);
- #endif
+ netdev_dbg(dev, "H=>D: 0x%04x(%d)\n", hci_cmd, hci_len);
gdm_wimax_send(nic, msg, len);
}
@@ -351,11 +308,9 @@ static int gdm_wimax_event_send(struct net_device *dev, char *buf, int size)
struct evt_entry *e;
unsigned long flags;
- #if defined(DEBUG_HCI)
u16 hci_cmd = ((u8)buf[0]<<8) | (u8)buf[1];
u16 hci_len = ((u8)buf[2]<<8) | (u8)buf[3];
- printk(KERN_DEBUG "D=>H: 0x%04x(%d)\n", hci_cmd, hci_len);
- #endif
+ netdev_dbg(dev, "D=>H: 0x%04x(%d)\n", hci_cmd, hci_len);
spin_lock_irqsave(&wm_event.evt_lock, flags);
@@ -415,9 +370,7 @@ static int gdm_wimax_tx(struct sk_buff *skb, struct net_device *dev)
struct nic *nic = netdev_priv(dev);
struct fsm_s *fsm = (struct fsm_s *) nic->sdk_data[SIOC_DATA_FSM].buf;
- #if defined(DEBUG_SDU)
- dump_eth_packet("TX", skb->data, skb->len);
- #endif
+ dump_eth_packet(dev, "TX", skb->data, skb->len);
ret = gdm_wimax_header(&skb);
if (ret < 0) {
@@ -540,7 +493,7 @@ static int gdm_wimax_ioctl_get_data(struct data_s *dst, struct data_s *src)
if (src->size) {
if (!dst->buf)
return -EINVAL;
- if (copy_to_user(dst->buf, src->buf, size))
+ if (copy_to_user((void __user *)dst->buf, src->buf, size))
return -EFAULT;
}
return 0;
@@ -563,7 +516,7 @@ static int gdm_wimax_ioctl_set_data(struct data_s *dst, struct data_s *src)
return -ENOMEM;
}
- if (copy_from_user(dst->buf, src->buf, src->size)) {
+ if (copy_from_user(dst->buf, (void __user *)src->buf, src->size)) {
kdelete(&dst->buf);
return -EFAULT;
}
@@ -756,9 +709,7 @@ static void gdm_wimax_netif_rx(struct net_device *dev, char *buf, int len)
struct sk_buff *skb;
int ret;
- #if defined(DEBUG_SDU)
- dump_eth_packet("RX", buf, len);
- #endif
+ dump_eth_packet(dev, "RX", buf, len);
skb = dev_alloc_skb(len + 2);
if (!skb) {
diff --git a/drivers/staging/gdm72xx/gdm_wimax.h b/drivers/staging/gdm72xx/gdm_wimax.h
index 6ec0ab43e9cc..1fcfc8555417 100644
--- a/drivers/staging/gdm72xx/gdm_wimax.h
+++ b/drivers/staging/gdm72xx/gdm_wimax.h
@@ -62,26 +62,6 @@ struct nic {
};
-
-#if 0
-#define dprintk(fmt, args ...) printk(KERN_DEBUG " [GDM] " fmt, ## args)
-#else
-#define dprintk(...)
-#endif
-
-/*#define DEBUG_SDU */
-#if defined(DEBUG_SDU)
-#define DUMP_SDU_ALL (1<<0)
-#define DUMP_SDU_ARP (1<<1)
-#define DUMP_SDU_IP (1<<2)
-#define DUMP_SDU_IP_TCP (1<<8)
-#define DUMP_SDU_IP_UDP (1<<9)
-#define DUMP_SDU_IP_ICMP (1<<10)
-#define DUMP_PACKET (DUMP_SDU_ALL)
-#endif
-
-/*#define DEBUG_HCI */
-
/*#define LOOPBACK_TEST */
extern int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev);
diff --git a/drivers/staging/gs_fpgaboot/Kconfig b/drivers/staging/gs_fpgaboot/Kconfig
new file mode 100644
index 000000000000..550645291fab
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/Kconfig
@@ -0,0 +1,8 @@
+#
+# "xilinx FPGA firmware download, fpgaboot"
+#
+config GS_FPGABOOT
+ tristate "Xilinx FPGA firmware download module"
+ default n
+ help
+ Xilinx FPGA firmware download module
diff --git a/drivers/staging/gs_fpgaboot/Makefile b/drivers/staging/gs_fpgaboot/Makefile
new file mode 100644
index 000000000000..34cb606e0e3d
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/Makefile
@@ -0,0 +1,4 @@
+gs_fpga-y += gs_fpgaboot.o io.o
+obj-$(CONFIG_GS_FPGABOOT) += gs_fpga.o
+
+ccflags-$(CONFIG_GS_FPGA_DEBUG) := -DDEBUG
diff --git a/drivers/staging/gs_fpgaboot/README b/drivers/staging/gs_fpgaboot/README
new file mode 100644
index 000000000000..cfa8624304e5
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/README
@@ -0,0 +1,71 @@
+==============================================================================
+Linux Driver Source for Xilinx FPGA firmware download
+==============================================================================
+
+
+TABLE OF CONTENTS.
+
+1. SUMMARY
+2. BACKGROUND
+3. DESIGN
+4. HOW TO USE
+5. REFERENCE
+
+1. SUMMARY
+
+ - Download Xilinx FPGA firmware
+ - This module downloads Xilinx FPGA firmware using gpio pins.
+
+2. BACKGROUND
+
+ An FPGA (Field Programmable Gate Array) is a programmable hardware that is
+ used in various applications. Hardware design needs to programmed through
+ a dedicated device or CPU assisted way (serial or parallel).
+ This driver provides a way to download FPGA firmware.
+
+3. DESIGN
+
+ - load Xilinx FPGA bitstream format[1] firmware image file using
+ kernel firmware framework, request_firmware()
+ - program the Xilinx FPGA using SelectMAP (parallel) mode [2]
+ - FPGA prgram is done by gpio based bit-banging, as an example
+ - platform independent file: gs_fpgaboot.c
+ - platform dependent file: io.c
+
+
+4. HOW TO USE
+
+ $ insmod gs_fpga.ko file="xlinx_fpga_top_bitstream.bit"
+ $ rmmod gs_fpga
+
+5. USE CASE (from a mailing list discussion with Greg)
+
+ a. As a FPGA development support tool,
+ During FPGA firmware development, you need to download a new FPGA
+ image frequently.
+ You would do that with a dedicated JTAG, which usually a limited
+ resource in the lab.
+ However, if you use my driver, you don't have to have a dedicated JTAG.
+ This is a real gain :)
+
+ b. For the FPGA that runs without config after the download, which
+ doesn't talk to any of Linux interfaces (such as PCIE).
+
+ We download FPGA firmware from user triggered or some other way, and that's it.
+ Since that FPGA runs on its own, it doesn't require a linux driver
+ after the download.
+
+ c. For the FPGA that requires config after the download, which talk to
+ any of linux interfaces (such as PCIE)
+
+ Then, this type of FPGA config can be put into device tree and have a
+ separate driver (pcie or others), then THAT driver calls my driver to
+ download FPGA firmware during the Linux boot, the take over the device
+ through the interface.
+
+6. REFERENCE
+
+ 1. Xilinx APP NOTE XAPP583:
+ http://www.xilinx.com/support/documentation/application_notes/xapp583-fpga-configuration.pdf
+ 2. bitstream file info:
+ http://home.earthlink.net/~davesullins/software/bitinfo.html
diff --git a/drivers/staging/gs_fpgaboot/TODO b/drivers/staging/gs_fpgaboot/TODO
new file mode 100644
index 000000000000..2d9fb17d606d
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/TODO
@@ -0,0 +1,7 @@
+TODO:
+ - get bus width input instead of hardcoded bus width
+ - get it reviewed
+
+Please send any patches for this driver to Insop Song<insop.song@gainspeed.com>
+and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
+And please CC to "Staging subsystem" mail list <devel@driverdev.osuosl.org> too.
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
new file mode 100644
index 000000000000..89bc84d833e6
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -0,0 +1,422 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/firmware.h>
+
+#include "gs_fpgaboot.h"
+#include "io.h"
+
+#define DEVICE_NAME "device"
+#define CLASS_NAME "fpgaboot"
+
+static uint8_t bits_magic[] = {
+ 0x0, 0x9, 0xf, 0xf0, 0xf, 0xf0,
+ 0xf, 0xf0, 0xf, 0xf0, 0x0, 0x0, 0x1};
+
+/* fake device for request_firmware */
+static struct platform_device *firmware_pdev;
+
+static char *file = "xlinx_fpga_firmware.bit";
+module_param(file, charp, S_IRUGO);
+MODULE_PARM_DESC(file, "Xilinx FPGA firmware file.");
+
+#ifdef DEBUG_FPGA
+static void datadump(char *msg, void *m, int n)
+{
+ int i;
+ unsigned char *c;
+
+ pr_info("=== %s ===\n", msg);
+
+ c = m;
+
+ for (i = 0; i < n; i++) {
+ if ((i&0xf) == 0)
+ pr_info(KERN_INFO "\n 0x%4x: ", i);
+
+ pr_info("%02X ", c[i]);
+ }
+
+ pr_info("\n");
+}
+#endif /* DEBUG_FPGA */
+
+static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize)
+{
+ memcpy(buf, bitdata + *offset, rdsize);
+ *offset += rdsize;
+}
+
+static void readinfo_bitstream(char *bitdata, char *buf, int *offset)
+{
+ char tbuf[64];
+ int32_t len;
+
+ /* read section char */
+ read_bitstream(bitdata, tbuf, offset, 1);
+
+ /* read length */
+ read_bitstream(bitdata, tbuf, offset, 2);
+
+ len = tbuf[0] << 8 | tbuf[1];
+
+ read_bitstream(bitdata, buf, offset, len);
+ buf[len] = '\0';
+}
+
+/*
+ * read bitdata length
+ */
+static int readlength_bitstream(char *bitdata, int *lendata, int *offset)
+{
+ char tbuf[64];
+
+ /* read section char */
+ read_bitstream(bitdata, tbuf, offset, 1);
+
+ /* make sure it is section 'e' */
+ if (tbuf[0] != 'e') {
+ pr_err("error: length section is not 'e', but %c\n", tbuf[0]);
+ return -1;
+ }
+
+ /* read 4bytes length */
+ read_bitstream(bitdata, tbuf, offset, 4);
+
+ *lendata = tbuf[0] << 24 | tbuf[1] << 16 |
+ tbuf[2] << 8 | tbuf[3];
+
+ return 0;
+}
+
+
+/*
+ * read first 13 bytes to check bitstream magic number
+ */
+static int readmagic_bitstream(char *bitdata, int *offset)
+{
+ char buf[13];
+ int r;
+
+ read_bitstream(bitdata, buf, offset, 13);
+ r = memcmp(buf, bits_magic, 13);
+ if (r) {
+ pr_err("error: corrupted header");
+ return -1;
+ }
+ pr_info("bitstream file magic number Ok\n");
+
+ *offset = 13; /* magic length */
+
+ return 0;
+}
+
+/*
+ * NOTE: supports only bitstream format
+ */
+static enum fmt_image get_imageformat(struct fpgaimage *fimage)
+{
+ return f_bit;
+}
+
+static void gs_print_header(struct fpgaimage *fimage)
+{
+ pr_info("file: %s\n", fimage->filename);
+ pr_info("part: %s\n", fimage->part);
+ pr_info("date: %s\n", fimage->date);
+ pr_info("time: %s\n", fimage->time);
+ pr_info("lendata: %d\n", fimage->lendata);
+}
+
+static void gs_read_bitstream(struct fpgaimage *fimage)
+{
+ char *bitdata;
+ int size;
+ int offset;
+
+ offset = 0;
+ bitdata = (char *)fimage->fw_entry->data;
+ size = fimage->fw_entry->size;
+
+ readmagic_bitstream(bitdata, &offset);
+ readinfo_bitstream(bitdata, fimage->filename, &offset);
+ readinfo_bitstream(bitdata, fimage->part, &offset);
+ readinfo_bitstream(bitdata, fimage->date, &offset);
+ readinfo_bitstream(bitdata, fimage->time, &offset);
+ readlength_bitstream(bitdata, &fimage->lendata, &offset);
+
+ fimage->fpgadata = bitdata + offset;
+}
+
+static int gs_read_image(struct fpgaimage *fimage)
+{
+ int img_fmt;
+
+ img_fmt = get_imageformat(fimage);
+
+ switch (img_fmt) {
+ case f_bit:
+ pr_info("image is bitstream format\n");
+ gs_read_bitstream(fimage);
+ break;
+ default:
+ pr_err("unsupported fpga image format\n");
+ return -1;
+ }
+
+ gs_print_header(fimage);
+
+ return 0;
+}
+
+static int gs_load_image(struct fpgaimage *fimage, char *file)
+{
+ int err;
+
+ pr_info("load fpgaimage %s\n", file);
+
+ err = request_firmware(&fimage->fw_entry, file, &firmware_pdev->dev);
+ if (err != 0) {
+ pr_err("firmware %s is missing, cannot continue.\n", file);
+ return err;
+ }
+
+ return 0;
+}
+
+static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
+{
+ char *bitdata;
+ int size, i, cnt;
+ cnt = 0;
+
+ bitdata = (char *)fimage->fpgadata;
+ size = fimage->lendata;
+
+#ifdef DEBUG_FPGA
+ datadump("bitfile sample", bitdata, 0x100);
+#endif /* DEBUG_FPGA */
+
+ if (!xl_supported_prog_bus_width(bus_bytes)) {
+ pr_err("unsupported program bus width %d\n",
+ bus_bytes);
+ return -1;
+ }
+
+ /* Bring csi_b, rdwr_b Low and program_b High */
+ xl_program_b(1);
+ xl_rdwr_b(0);
+ xl_csi_b(0);
+
+ /* Configuration reset */
+ xl_program_b(0);
+ msleep(20);
+ xl_program_b(1);
+
+ /* Wait for Device Initialization */
+ while (xl_get_init_b() == 0)
+ ;
+
+ pr_info("device init done\n");
+
+ for (i = 0; i < size; i += bus_bytes)
+ xl_shift_bytes_out(bus_bytes, bitdata+i);
+
+ pr_info("program done\n");
+
+ /* Check INIT_B */
+ if (xl_get_init_b() == 0) {
+ pr_err("init_b 0\n");
+ return -1;
+ }
+
+ while (xl_get_done_b() == 0) {
+ if (cnt++ > MAX_WAIT_DONE) {
+ pr_err("init_B %d\n", xl_get_init_b());
+ break;
+ }
+ }
+
+ if (cnt > MAX_WAIT_DONE) {
+ pr_err("fpga download fail\n");
+ return -1;
+ }
+
+ pr_info("download fpgaimage\n");
+
+ /* Compensate for Special Startup Conditions */
+ xl_shift_cclk(8);
+
+ return 0;
+}
+
+static int gs_release_image(struct fpgaimage *fimage)
+{
+ release_firmware(fimage->fw_entry);
+ pr_info("release fpgaimage\n");
+
+ return 0;
+}
+
+/*
+ * NOTE: supports systemmap parallel programming
+ */
+static int gs_set_download_method(struct fpgaimage *fimage)
+{
+ pr_info("set program method\n");
+
+ fimage->dmethod = m_systemmap;
+
+ pr_info("systemmap program method\n");
+
+ return 0;
+}
+
+static int init_driver(void)
+{
+ firmware_pdev = platform_device_register_simple("fpgaboot", -1,
+ NULL, 0);
+ return PTR_ERR_OR_ZERO(firmware_pdev);
+}
+
+static void finish_driver(void)
+{
+ platform_device_unregister(firmware_pdev);
+}
+
+static int gs_fpgaboot(void)
+{
+ int err;
+ struct fpgaimage *fimage;
+
+ fimage = kmalloc(sizeof(struct fpgaimage), GFP_KERNEL);
+ if (fimage == NULL) {
+ pr_err("No memory is available\n");
+ goto err_out;
+ }
+
+ err = gs_load_image(fimage, file);
+ if (err) {
+ pr_err("gs_load_image error\n");
+ goto err_out1;
+ }
+
+ err = gs_read_image(fimage);
+ if (err) {
+ pr_err("gs_read_image error\n");
+ goto err_out2;
+ }
+
+ err = gs_set_download_method(fimage);
+ if (err) {
+ pr_err("gs_set_download_method error\n");
+ goto err_out2;
+ }
+
+ err = gs_download_image(fimage, bus_2byte);
+ if (err) {
+ pr_err("gs_download_image error\n");
+ goto err_out2;
+ }
+
+ err = gs_release_image(fimage);
+ if (err) {
+ pr_err("gs_release_image error\n");
+ goto err_out1;
+ }
+
+ kfree(fimage);
+ return 0;
+
+err_out2:
+ err = gs_release_image(fimage);
+ if (err)
+ pr_err("gs_release_image error\n");
+err_out1:
+ kfree(fimage);
+
+err_out:
+ return -1;
+
+}
+
+static int __init gs_fpgaboot_init(void)
+{
+ int err, r;
+
+ r = -1;
+
+ pr_info("FPGA DOWNLOAD --->\n");
+ pr_info("built at %s UTC\n", __TIMESTAMP__);
+
+ pr_info("FPGA image file name: %s\n", file);
+
+ err = init_driver();
+ if (err != 0) {
+ pr_err("FPGA DRIVER INIT FAIL!!\n");
+ return r;
+ }
+
+ err = xl_init_io();
+ if (err) {
+ pr_err("GPIO INIT FAIL!!\n");
+ r = -1;
+ goto errout;
+ }
+
+ err = gs_fpgaboot();
+ if (err) {
+ pr_err("FPGA DOWNLOAD FAIL!!\n");
+ r = -1;
+ goto errout;
+ }
+
+ pr_info("FPGA DOWNLOAD DONE <---\n");
+
+ r = 0;
+ return r;
+
+errout:
+ finish_driver();
+
+ return r;
+}
+
+static void __exit gs_fpgaboot_exit(void)
+{
+ finish_driver();
+ pr_info("FPGA image download module removed\n");
+}
+
+module_init(gs_fpgaboot_init);
+module_exit(gs_fpgaboot_exit);
+
+MODULE_AUTHOR("Insop Song");
+MODULE_DESCRIPTION("Xlinix FPGA firmware download");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
new file mode 100644
index 000000000000..f41f4cc798cc
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
@@ -0,0 +1,56 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/firmware.h>
+
+#define MAX_STR 256
+
+enum fmt_image {
+ f_bit, /* only bitstream is supported */
+ f_rbt,
+ f_bin,
+ f_mcs,
+ f_hex,
+};
+
+enum mdownload {
+ m_systemmap, /* only system map is supported */
+ m_serial,
+ m_jtag,
+};
+
+/*
+ * xilinx fpgaimage information
+ * NOTE: use MAX_STR instead of dynamic alloc for simplicity
+ */
+struct fpgaimage {
+ enum fmt_image fmt_img;
+ enum mdownload dmethod;
+
+ const struct firmware *fw_entry;
+
+ /*
+ * the followings can be read from bitstream,
+ * but other image format should have as well
+ */
+ char filename[MAX_STR];
+ char part[MAX_STR];
+ char date[MAX_STR];
+ char time[MAX_STR];
+ int32_t lendata;
+ char *fpgadata;
+};
diff --git a/drivers/staging/gs_fpgaboot/io.c b/drivers/staging/gs_fpgaboot/io.c
new file mode 100644
index 000000000000..b7be8e37b8d1
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/io.c
@@ -0,0 +1,294 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+
+#include "io.h"
+
+#ifdef CONFIG_B4860G100
+static struct gpiobus gbus;
+#endif /* CONFIG_B4860G100 */
+
+static inline void byte0_out(unsigned char data);
+static inline void byte1_out(unsigned char data);
+static inline void xl_cclk_b(int32_t i);
+
+
+/* Assert and Deassert CCLK */
+void xl_shift_cclk(int count)
+{
+ int i;
+ for (i = 0; i < count; i++) {
+ xl_cclk_b(1);
+ xl_cclk_b(0);
+ }
+}
+
+int xl_supported_prog_bus_width(enum wbus bus_bytes)
+{
+ switch (bus_bytes) {
+ case bus_1byte:
+ break;
+ case bus_2byte:
+ break;
+ default:
+ pr_err("unsupported program bus width %d\n",
+ bus_bytes);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Serialize byte and clock each bit on target's DIN and CCLK pins */
+void xl_shift_bytes_out(enum wbus bus_byte, unsigned char *pdata)
+{
+ /*
+ * supports 1 and 2 bytes programming mode
+ */
+ if (likely(bus_byte == bus_2byte))
+ byte0_out(pdata[0]);
+
+ byte1_out(pdata[1]);
+ xl_shift_cclk(1);
+}
+
+/*
+ * generic bit swap for xilinx SYSTEMMAP FPGA programming
+ */
+static inline unsigned char bitswap(unsigned char s)
+{
+ unsigned char d;
+ d = (((s&0x80)>>7) | ((s&0x40)>>5) | ((s&0x20)>>3) | ((s&0x10)>>1) |
+ ((s&0x08)<<1) | ((s&0x04)<<3) | ((s&0x02)<<5) | ((s&0x01)<<7));
+ return d;
+}
+
+#ifdef CONFIG_B4860G100
+/*
+ * ======================================================================
+ * board specific configuration
+ */
+
+static inline void mpc85xx_gpio_set_dir(
+ int32_t port,
+ uint32_t mask,
+ uint32_t dir)
+{
+ dir |= (in_be32(gbus.r[port]+GPDIR) & ~mask);
+ out_be32(gbus.r[port]+GPDIR, dir);
+}
+
+static inline void mpc85xx_gpio_set(int32_t port, uint32_t mask, uint32_t val)
+{
+ /* First mask off the unwanted parts of "dir" and "val" */
+ val &= mask;
+
+ /* Now read in the values we're supposed to preserve */
+ val |= (in_be32(gbus.r[port]+GPDAT) & ~mask);
+
+ out_be32(gbus.r[port]+GPDAT, val);
+}
+
+static inline uint32_t mpc85xx_gpio_get(int32_t port, uint32_t mask)
+{
+ /* Read the requested values */
+ return in_be32(gbus.r[port]+GPDAT) & mask;
+}
+
+static inline void mpc85xx_gpio_set_low(int32_t port, uint32_t gpios)
+{
+ mpc85xx_gpio_set(port, gpios, 0x00000000);
+}
+
+static inline void mpc85xx_gpio_set_high(int32_t port, uint32_t gpios)
+{
+ mpc85xx_gpio_set(port, gpios, 0xFFFFFFFF);
+}
+
+static inline void gpio_set_value(int32_t port, uint32_t gpio, uint32_t value)
+{
+ int32_t g;
+ g = 31 - gpio;
+ if (value)
+ mpc85xx_gpio_set_high(port, 1U << g);
+ else
+ mpc85xx_gpio_set_low(port, 1U << g);
+}
+
+static inline int gpio_get_value(int32_t port, uint32_t gpio)
+{
+ int32_t g;
+ g = 31 - gpio;
+ return !!mpc85xx_gpio_get(port, 1U << g);
+}
+
+static inline void xl_cclk_b(int32_t i)
+{
+ gpio_set_value(XL_CCLK_PORT, XL_CCLK_PIN, i);
+}
+
+void xl_program_b(int32_t i)
+{
+ gpio_set_value(XL_PROGN_PORT, XL_PROGN_PIN, i);
+}
+
+void xl_rdwr_b(int32_t i)
+{
+ gpio_set_value(XL_RDWRN_PORT, XL_RDWRN_PIN, i);
+}
+
+void xl_csi_b(int32_t i)
+{
+ gpio_set_value(XL_CSIN_PORT, XL_CSIN_PIN, i);
+}
+
+int xl_get_init_b(void)
+{
+ return gpio_get_value(XL_INITN_PORT, XL_INITN_PIN);
+}
+
+int xl_get_done_b(void)
+{
+ return gpio_get_value(XL_DONE_PORT, XL_DONE_PIN);
+}
+
+
+/* G100 specific bit swap and remmap (to gpio pins) for byte 0 */
+static inline uint32_t bit_remap_byte0(uint32_t s)
+{
+ uint32_t d;
+ d = (((s&0x80)>>7) | ((s&0x40)>>5) | ((s&0x20)>>3) | ((s&0x10)>>1) |
+ ((s&0x08)<<1) | ((s&0x04)<<3) | ((s&0x02)<<6) | ((s&0x01)<<9));
+ return d;
+}
+
+/*
+ * G100 specific MSB, in this order [byte0 | byte1], out
+ */
+static inline void byte0_out(unsigned char data)
+{
+ uint32_t swap32;
+ swap32 = bit_remap_byte0((uint32_t) data) << 8;
+
+ mpc85xx_gpio_set(0, 0x0002BF00, (uint32_t) swap32);
+}
+
+/*
+ * G100 specific LSB, in this order [byte0 | byte1], out
+ */
+static inline void byte1_out(unsigned char data)
+{
+ mpc85xx_gpio_set(0, 0x000000FF, (uint32_t) bitswap(data));
+}
+
+/*
+ * configurable per device type for different I/O config
+ */
+int xl_init_io(void)
+{
+ struct device_node *np;
+ const u32 *p_reg;
+ int reg, cnt;
+
+ cnt = 0;
+ memset(&gbus, 0, sizeof(struct gpiobus));
+ for_each_compatible_node(np, NULL, "fsl,qoriq-gpio") {
+ p_reg = of_get_property(np, "reg", NULL);
+ if (p_reg == NULL)
+ break;
+ reg = (int) *p_reg;
+ gbus.r[cnt] = of_iomap(np, 0);
+
+ if (!gbus.r[cnt]) {
+ pr_err("not findding gpio cell-index %d\n", cnt);
+ return -ENODEV;
+ }
+ cnt++;
+ }
+ mpc85xx_gpio_set_dir(0, 0x0002BFFF, 0x0002BFFF);
+ mpc85xx_gpio_set_dir(1, 0x00240060, 0x00240060);
+
+ gbus.ngpio = cnt;
+
+ return 0;
+}
+
+
+#else /* placeholder for boards with different config */
+
+void xl_program_b(int32_t i)
+{
+ return;
+}
+
+void xl_rdwr_b(int32_t i)
+{
+ return;
+}
+
+void xl_csi_b(int32_t i)
+{
+ return;
+}
+
+int xl_get_init_b(void)
+{
+ return -1;
+}
+
+int xl_get_done_b(void)
+{
+ return -1;
+}
+
+static inline void byte0_out(unsigned char data)
+{
+ return;
+}
+
+static inline void byte1_out(unsigned char data)
+{
+ return;
+}
+
+static inline void xl_cclk_b(int32_t i)
+{
+ return;
+}
+
+/*
+ * configurable per device type for different I/O config
+ */
+int xl_init_io(void)
+{
+ return -1;
+}
+
+#endif /* CONFIG_B4860G100 */
diff --git a/drivers/staging/gs_fpgaboot/io.h b/drivers/staging/gs_fpgaboot/io.h
new file mode 100644
index 000000000000..7b46ac24b74e
--- /dev/null
+++ b/drivers/staging/gs_fpgaboot/io.h
@@ -0,0 +1,90 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define GPDIR 0
+#define GPCFG 4 /* open drain or not */
+#define GPDAT 8
+
+/*
+ * gpio port and pin definitions
+ * NOTE: port number starts from 0
+ */
+#define XL_INITN_PORT 1
+#define XL_INITN_PIN 14
+#define XL_RDWRN_PORT 1
+#define XL_RDWRN_PIN 13
+#define XL_CCLK_PORT 1
+#define XL_CCLK_PIN 10
+#define XL_PROGN_PORT 1
+#define XL_PROGN_PIN 25
+#define XL_CSIN_PORT 1
+#define XL_CSIN_PIN 26
+#define XL_DONE_PORT 1
+#define XL_DONE_PIN 27
+
+/*
+ * gpio mapping
+ *
+ XL_config_D0 – gpio1_31
+ Xl_config_d1 – gpio1_30
+ Xl_config_d2 – gpio1_29
+ Xl_config_d3 – gpio1_28
+ Xl_config_d4 – gpio1_27
+ Xl_config_d5 – gpio1_26
+ Xl_config_d6 – gpio1_25
+ Xl_config_d7 – gpio1_24
+ Xl_config_d8 – gpio1_23
+ Xl_config_d9 – gpio1_22
+ Xl_config_d10 – gpio1_21
+ Xl_config_d11 – gpio1_20
+ Xl_config_d12 – gpio1_19
+ Xl_config_d13 – gpio1_18
+ Xl_config_d14 – gpio1_16
+ Xl_config_d15 – gpio1_14
+*
+*/
+
+/*
+ * program bus width in bytes
+ */
+enum wbus {
+ bus_1byte = 1,
+ bus_2byte = 2,
+};
+
+
+#define MAX_WAIT_DONE 10000
+
+
+struct gpiobus {
+ int ngpio;
+ void __iomem *r[4];
+};
+
+int xl_supported_prog_bus_width(enum wbus bus_bytes);
+
+void xl_program_b(int32_t i);
+void xl_rdwr_b(int32_t i);
+void xl_csi_b(int32_t i);
+
+int xl_get_init_b(void);
+int xl_get_done_b(void);
+
+void xl_shift_cclk(int count);
+void xl_shift_bytes_out(enum wbus bus_byte, unsigned char *pdata);
+
+int xl_init_io(void);
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 35154d60faf6..2064839ef2cd 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -77,7 +77,6 @@ struct iio_channel_info {
uint64_t mask;
unsigned be;
unsigned is_signed;
- unsigned enabled;
unsigned location;
};
@@ -335,6 +334,7 @@ inline int build_channel_array(const char *device_dir,
while (ent = readdir(dp), ent != NULL) {
if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
"_en") == 0) {
+ int current_enabled = 0;
current = &(*ci_array)[count++];
ret = asprintf(&filename,
"%s/%s", scan_el_dir, ent->d_name);
@@ -350,10 +350,10 @@ inline int build_channel_array(const char *device_dir,
ret = -errno;
goto error_cleanup_array;
}
- fscanf(sysfsfp, "%u", &current->enabled);
+ fscanf(sysfsfp, "%u", &current_enabled);
fclose(sysfsfp);
- if (!current->enabled) {
+ if (!current_enabled) {
free(filename);
count--;
continue;
@@ -652,3 +652,25 @@ error_free:
free(temp);
return ret;
}
+
+read_sysfs_string(const char *filename, const char *basedir, char *str)
+{
+ float ret = 0;
+ FILE *sysfsfp;
+ char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+ if (temp == NULL) {
+ printf("Memory allocation failed");
+ return -ENOMEM;
+ }
+ sprintf(temp, "%s/%s", basedir, filename);
+ sysfsfp = fopen(temp, "r");
+ if (sysfsfp == NULL) {
+ ret = -errno;
+ goto error_free;
+ }
+ fscanf(sysfsfp, "%s\n", str);
+ fclose(sysfsfp);
+error_free:
+ free(temp);
+ return ret;
+}
diff --git a/drivers/staging/iio/Documentation/lsiio.c b/drivers/staging/iio/Documentation/lsiio.c
new file mode 100644
index 000000000000..24ae9694eb41
--- /dev/null
+++ b/drivers/staging/iio/Documentation/lsiio.c
@@ -0,0 +1,157 @@
+/*
+ * Industrial I/O utilities - lsiio.c
+ *
+ * Copyright (c) 2010 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <string.h>
+#include <dirent.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/dir.h>
+#include "iio_utils.h"
+
+
+static enum verbosity {
+ VERBLEVEL_DEFAULT, /* 0 gives lspci behaviour */
+ VERBLEVEL_SENSORS, /* 1 lists sensors */
+} verblevel = VERBLEVEL_DEFAULT;
+
+const char *type_device = "iio:device";
+const char *type_trigger = "trigger";
+
+
+static inline int check_prefix(const char *str, const char *prefix)
+{
+ return strlen(str) > strlen(prefix) &&
+ strncmp(str, prefix, strlen(prefix)) == 0;
+}
+
+static inline int check_postfix(const char *str, const char *postfix)
+{
+ return strlen(str) > strlen(postfix) &&
+ strcmp(str + strlen(str) - strlen(postfix), postfix) == 0;
+}
+
+static int dump_channels(const char *dev_dir_name)
+{
+ DIR *dp;
+ const struct dirent *ent;
+ dp = opendir(dev_dir_name);
+ if (dp == NULL)
+ return -errno;
+ while (ent = readdir(dp), ent != NULL)
+ if (check_prefix(ent->d_name, "in_") &&
+ check_postfix(ent->d_name, "_raw")) {
+ printf(" %-10s\n", ent->d_name);
+ }
+
+ return 0;
+}
+
+static int dump_one_device(const char *dev_dir_name)
+{
+ char name[IIO_MAX_NAME_LENGTH];
+ int dev_idx;
+
+ sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_device),
+ "%i", &dev_idx);
+ read_sysfs_string("name", dev_dir_name, name);
+ printf("Device %03d: %s\n", dev_idx, name);
+
+ if (verblevel >= VERBLEVEL_SENSORS) {
+ int ret = dump_channels(dev_dir_name);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int dump_one_trigger(const char *dev_dir_name)
+{
+ char name[IIO_MAX_NAME_LENGTH];
+ int dev_idx;
+
+ sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_trigger),
+ "%i", &dev_idx);
+ read_sysfs_string("name", dev_dir_name, name);
+ printf("Trigger %03d: %s\n", dev_idx, name);
+ return 0;
+}
+
+static void dump_devices(void)
+{
+ const struct dirent *ent;
+ int number, numstrlen;
+
+ FILE *nameFile;
+ DIR *dp;
+ char thisname[IIO_MAX_NAME_LENGTH];
+ char *filename;
+
+ dp = opendir(iio_dir);
+ if (dp == NULL) {
+ printf("No industrial I/O devices available\n");
+ return;
+ }
+
+ while (ent = readdir(dp), ent != NULL) {
+ if (check_prefix(ent->d_name, type_device)) {
+ char *dev_dir_name;
+ asprintf(&dev_dir_name, "%s%s", iio_dir, ent->d_name);
+ dump_one_device(dev_dir_name);
+ free(dev_dir_name);
+ if (verblevel >= VERBLEVEL_SENSORS)
+ printf("\n");
+ }
+ }
+ rewinddir(dp);
+ while (ent = readdir(dp), ent != NULL) {
+ if (check_prefix(ent->d_name, type_trigger)) {
+ char *dev_dir_name;
+ asprintf(&dev_dir_name, "%s%s", iio_dir, ent->d_name);
+ dump_one_trigger(dev_dir_name);
+ free(dev_dir_name);
+ }
+ }
+ closedir(dp);
+}
+
+int main(int argc, char **argv)
+{
+ int c, err = 0;
+
+ while ((c = getopt(argc, argv, "d:D:v")) != EOF) {
+ switch (c) {
+ case 'v':
+ verblevel++;
+ break;
+
+ case '?':
+ default:
+ err++;
+ break;
+ }
+ }
+ if (err || argc > optind) {
+ fprintf(stderr, "Usage: lsiio [options]...\n"
+ "List industrial I/O devices\n"
+ " -v, --verbose\n"
+ " Increase verbosity (may be given multiple times)\n"
+ );
+ exit(1);
+ }
+
+ dump_devices();
+
+ return 0;
+}
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
index c1016c510dae..b284e5a6cac1 100644
--- a/drivers/staging/iio/accel/sca3000.h
+++ b/drivers/staging/iio/accel/sca3000.h
@@ -65,7 +65,8 @@
#define SCA3000_RING_BUF_ENABLE 0x80
#define SCA3000_RING_BUF_8BIT 0x40
-/* Free fall detection triggers an interrupt if the acceleration
+/*
+ * Free fall detection triggers an interrupt if the acceleration
* is below a threshold for equivalent of 25cm drop
*/
#define SCA3000_FREE_FALL_DETECT 0x10
@@ -73,8 +74,9 @@
#define SCA3000_MEAS_MODE_OP_1 0x01
#define SCA3000_MEAS_MODE_OP_2 0x02
-/* In motion detection mode the accelerations are band pass filtered
- * (aprox 1 - 25Hz) and then a programmable threshold used to trigger
+/*
+ * In motion detection mode the accelerations are band pass filtered
+ * (approx 1 - 25Hz) and then a programmable threshold used to trigger
* and interrupt.
*/
#define SCA3000_MEAS_MODE_MOT_DET 0x03
@@ -99,8 +101,10 @@
#define SCA3000_REG_CTRL_SEL_MD_Y_TH 0x03
#define SCA3000_REG_CTRL_SEL_MD_X_TH 0x04
#define SCA3000_REG_CTRL_SEL_MD_Z_TH 0x05
-/* BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
- will not function */
+/*
+ * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
+ * will not function
+ */
#define SCA3000_REG_CTRL_SEL_OUT_CTRL 0x0B
#define SCA3000_OUT_CTRL_PROT_MASK 0xE0
#define SCA3000_OUT_CTRL_BUF_X_EN 0x10
@@ -109,8 +113,9 @@
#define SCA3000_OUT_CTRL_BUF_DIV_4 0x02
#define SCA3000_OUT_CTRL_BUF_DIV_2 0x01
-/* Control which motion detector interrupts are on.
- * For now only OR combinations are supported.x
+/*
+ * Control which motion detector interrupts are on.
+ * For now only OR combinations are supported.
*/
#define SCA3000_MD_CTRL_PROT_MASK 0xC0
#define SCA3000_MD_CTRL_OR_Y 0x01
@@ -121,7 +126,8 @@
#define SCA3000_MD_CTRL_AND_X 0x10
#define SAC3000_MD_CTRL_AND_Z 0x20
-/* Some control registers of complex access methods requiring this register to
+/*
+ * Some control registers of complex access methods requiring this register to
* be used to remove a lock.
*/
#define SCA3000_REG_ADDR_UNLOCK 0x1e
@@ -139,7 +145,8 @@
/* Values of multiplexed registers (write to ctrl_data after select) */
#define SCA3000_REG_ADDR_CTRL_DATA 0x22
-/* Measurement modes available on some sca3000 series chips. Code assumes others
+/*
+ * Measurement modes available on some sca3000 series chips. Code assumes others
* may become available in the future.
*
* Bypass - Bypass the low-pass filter in the signal channel so as to increase
@@ -160,7 +167,6 @@
* struct sca3000_state - device instance state information
* @us: the associated spi device
* @info: chip variant information
- * @indio_dev: device information used by the IIO core
* @interrupt_handler_ws: event interrupt handler for all events
* @last_timestamp: the timestamp of the last event
* @mo_det_use_count: reference counter for the motion detection unit
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 7f6ccdfaf168..ed30e32e60de 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -32,7 +32,8 @@ enum sca3000_variant {
e05,
};
-/* Note where option modes are not defined, the chip simply does not
+/*
+ * Note where option modes are not defined, the chip simply does not
* support any.
* Other chips in the sca3000 series use i2c and are not included here.
*
@@ -191,7 +192,6 @@ error_ret:
return ret;
}
-/* Crucial that lock is called before calling this */
/**
* sca3000_read_ctrl_reg() read from lock protected control register.
*
@@ -250,9 +250,8 @@ error_ret:
}
#endif /* SCA3000_DEBUG */
-
/**
- * sca3000_show_reg() - sysfs interface to read the chip revision number
+ * sca3000_show_rev() - sysfs interface to read the chip revision number
**/
static ssize_t sca3000_show_rev(struct device *dev,
struct device_attribute *attr,
@@ -312,7 +311,7 @@ sca3000_show_available_measurement_modes(struct device *dev,
}
/**
- * sca3000_show_measurmenet_mode() sysfs read of current mode
+ * sca3000_show_measurement_mode() sysfs read of current mode
**/
static ssize_t
sca3000_show_measurement_mode(struct device *dev,
@@ -403,7 +402,8 @@ error_ret:
}
-/* Not even vaguely standard attributes so defined here rather than
+/*
+ * Not even vaguely standard attributes so defined here rather than
* in the relevant IIO core headers
*/
static IIO_DEVICE_ATTR(measurement_mode_available, S_IRUGO,
@@ -450,6 +450,18 @@ static const struct iio_chan_spec sca3000_channels[] = {
SCA3000_CHAN(2, IIO_MOD_Z),
};
+static const struct iio_chan_spec sca3000_channels_with_temp[] = {
+ SCA3000_CHAN(0, IIO_MOD_X),
+ SCA3000_CHAN(1, IIO_MOD_Y),
+ SCA3000_CHAN(2, IIO_MOD_Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ },
+};
+
static u8 sca3000_addresses[3][3] = {
[0] = {SCA3000_REG_ADDR_X_MSB, SCA3000_REG_CTRL_SEL_MD_X_TH,
SCA3000_MD_CTRL_OR_X},
@@ -472,19 +484,30 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&st->lock);
- if (st->mo_det_use_count) {
- mutex_unlock(&st->lock);
- return -EBUSY;
- }
- address = sca3000_addresses[chan->address][0];
- ret = sca3000_read_data_short(st, address, 2);
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
+ if (chan->type == IIO_ACCEL) {
+ if (st->mo_det_use_count) {
+ mutex_unlock(&st->lock);
+ return -EBUSY;
+ }
+ address = sca3000_addresses[chan->address][0];
+ ret = sca3000_read_data_short(st, address, 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
+ *val = ((*val) << (sizeof(*val)*8 - 13)) >>
+ (sizeof(*val)*8 - 13);
+ } else {
+ /* get the temperature when available */
+ ret = sca3000_read_data_short(st,
+ SCA3000_REG_ADDR_TEMP_MSB, 2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ *val = ((st->rx[0] & 0x3F) << 3) | ((st->rx[1] & 0xE0) >> 5);
}
- *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
- *val = ((*val) << (sizeof(*val)*8 - 13)) >>
- (sizeof(*val)*8 - 13);
mutex_unlock(&st->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
@@ -494,6 +517,10 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
else /* temperature */
*val2 = 555556;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -214;
+ *val2 = 600000;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -547,7 +574,7 @@ error_ret:
return ret;
}
/**
- * __sca3000_get_base_frequency() obtain mode specific base frequency
+ * __sca3000_get_base_freq() obtain mode specific base frequency
*
* lock must be held
**/
@@ -663,7 +690,8 @@ error_free_lock:
return ret ? ret : len;
}
-/* Should only really be registered if ring buffer support is compiled in.
+/*
+ * Should only really be registered if ring buffer support is compiled in.
* Does no harm however and doing it right would add a fair bit of complexity
*/
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
@@ -672,37 +700,6 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
sca3000_read_frequency,
sca3000_set_frequency);
-
-/**
- * sca3000_read_temp() sysfs interface to get the temperature when available
- *
-* The alignment of data in here is downright odd. See data sheet.
-* Converting this into a meaningful value is left to inline functions in
-* userspace part of header.
-**/
-static ssize_t sca3000_read_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct sca3000_state *st = iio_priv(indio_dev);
- int ret;
- int val;
- ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_TEMP_MSB, 2);
- if (ret < 0)
- goto error_ret;
- val = ((st->rx[0] & 0x3F) << 3) | ((st->rx[1] & 0xE0) >> 5);
-
- return sprintf(buf, "%d\n", val);
-
-error_ret:
- return ret;
-}
-static IIO_DEV_ATTR_TEMP_RAW(sca3000_read_temp);
-
-static IIO_CONST_ATTR_TEMP_SCALE("0.555556");
-static IIO_CONST_ATTR_TEMP_OFFSET("-214.6");
-
/**
* sca3000_read_thresh() - query of a threshold
**/
@@ -782,33 +779,16 @@ static struct attribute *sca3000_attributes[] = {
NULL,
};
-static struct attribute *sca3000_attributes_with_temp[] = {
- &iio_dev_attr_revision.dev_attr.attr,
- &iio_dev_attr_measurement_mode_available.dev_attr.attr,
- &iio_dev_attr_measurement_mode.dev_attr.attr,
- &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- /* Only present if temp sensor is */
- &iio_dev_attr_in_temp_raw.dev_attr.attr,
- &iio_const_attr_in_temp_offset.dev_attr.attr,
- &iio_const_attr_in_temp_scale.dev_attr.attr,
- NULL,
-};
-
static const struct attribute_group sca3000_attribute_group = {
.attrs = sca3000_attributes,
};
-static const struct attribute_group sca3000_attribute_group_with_temp = {
- .attrs = sca3000_attributes_with_temp,
-};
-
-/* RING RELATED interrupt handler */
-/* depending on event, push to the ring buffer event chrdev or the event one */
-
/**
* sca3000_event_handler() - handling ring and non ring events
*
+ * Ring related interrupt handler. Depending on event, push to
+ * the ring buffer event chrdev or the event one.
+ *
* This function is complicated by the fact that the devices can signify ring
* and non ring events via the same interrupt line and they can only
* be distinguished via a read of the relevant status register.
@@ -820,7 +800,8 @@ static irqreturn_t sca3000_event_handler(int irq, void *private)
int ret, val;
s64 last_timestamp = iio_get_time_ns();
- /* Could lead if badly timed to an extra read of status reg,
+ /*
+ * Could lead if badly timed to an extra read of status reg,
* but ensures no interrupt is missed.
*/
mutex_lock(&st->lock);
@@ -935,7 +916,6 @@ static ssize_t sca3000_query_free_fall_mode(struct device *dev,
* the device falls more than 25cm. This has not been tested due
* to fragile wiring.
**/
-
static ssize_t sca3000_set_free_fall_mode(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -957,7 +937,7 @@ static ssize_t sca3000_set_free_fall_mode(struct device *dev,
if (ret)
goto error_ret;
- /*if off and should be on*/
+ /* if off and should be on */
if (val && !(st->rx[0] & protect_mask))
ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
(st->rx[0] | SCA3000_FREE_FALL_DETECT));
@@ -972,7 +952,7 @@ error_ret:
}
/**
- * sca3000_set_mo_det() simple on off control for motion detector
+ * sca3000_write_event_config() simple on off control for motion detector
*
* This is a per axis control, but enabling any will result in the
* motion detector unit being enabled.
@@ -992,13 +972,15 @@ static int sca3000_write_event_config(struct iio_dev *indio_dev,
int num = chan->channel2;
mutex_lock(&st->lock);
- /* First read the motion detector config to find out if
- * this axis is on*/
+ /*
+ * First read the motion detector config to find out if
+ * this axis is on
+ */
ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
if (ret < 0)
goto exit_point;
ctrlval = ret;
- /* Off and should be on */
+ /* if off and should be on */
if (state && !(ctrlval & sca3000_addresses[num][2])) {
ret = sca3000_write_ctrl_reg(st,
SCA3000_REG_CTRL_SEL_MD_CTRL,
@@ -1021,7 +1003,7 @@ static int sca3000_write_event_config(struct iio_dev *indio_dev,
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
if (ret)
goto exit_point;
- /*if off and should be on*/
+ /* if off and should be on */
if ((st->mo_det_use_count)
&& ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET))
ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
@@ -1067,7 +1049,7 @@ static struct attribute_group sca3000_event_attribute_group = {
* Devices use flash memory to store many of the register values
* and hence can come up in somewhat unpredictable states.
* Hence reset everything on driver load.
- **/
+ **/
static int sca3000_clean_setup(struct sca3000_state *st)
{
int ret;
@@ -1107,9 +1089,11 @@ static int sca3000_clean_setup(struct sca3000_state *st)
| SCA3000_INT_MASK_ACTIVE_LOW);
if (ret)
goto error_ret;
- /* Select normal measurement mode, free fall off, ring off */
- /* Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
- * as that occurs in one of the example on the datasheet */
+ /*
+ * Select normal measurement mode, free fall off, ring off
+ * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
+ * as that occurs in one of the example on the datasheet
+ */
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
if (ret)
goto error_ret;
@@ -1133,16 +1117,6 @@ static const struct iio_info sca3000_info = {
.driver_module = THIS_MODULE,
};
-static const struct iio_info sca3000_info_with_temp = {
- .attrs = &sca3000_attribute_group_with_temp,
- .read_raw = &sca3000_read_raw,
- .read_event_value = &sca3000_read_thresh,
- .write_event_value = &sca3000_write_thresh,
- .read_event_config = &sca3000_read_event_config,
- .write_event_config = &sca3000_write_event_config,
- .driver_module = THIS_MODULE,
-};
-
static int sca3000_probe(struct spi_device *spi)
{
int ret;
@@ -1162,10 +1136,12 @@ static int sca3000_probe(struct spi_device *spi)
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
- if (st->info->temp_output)
- indio_dev->info = &sca3000_info_with_temp;
- else {
- indio_dev->info = &sca3000_info;
+ indio_dev->info = &sca3000_info;
+ if (st->info->temp_output) {
+ indio_dev->channels = sca3000_channels_with_temp;
+ indio_dev->num_channels =
+ ARRAY_SIZE(sca3000_channels_with_temp);
+ } else {
indio_dev->channels = sca3000_channels;
indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
}
@@ -1236,7 +1212,7 @@ static int sca3000_remove(struct spi_device *spi)
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct sca3000_state *st = iio_priv(indio_dev);
- /* Must ensure no interrupts can be generated after this!*/
+ /* Must ensure no interrupts can be generated after this! */
sca3000_stop_all_interrupts(st);
if (spi->irq)
free_irq(spi->irq, indio_dev);
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index ea0af6d81d2b..198710651e0e 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -309,7 +309,7 @@ int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
if (ret)
goto error_ret;
if (state) {
- printk(KERN_INFO "supposedly enabling ring buffer\n");
+ dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
ret = sca3000_write_reg(st,
SCA3000_REG_ADDR_MODE,
(st->rx[0] | SCA3000_RING_BUF_ENABLE));
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index e3d643001952..363329808a4f 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -103,6 +103,7 @@ config AD7280
config LPC32XX_ADC
tristate "NXP LPC32XX ADC"
depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Say yes here to build support for the integrated ADC inside the
LPC32XX SoC. Note that this feature uses the same hardware as the
@@ -128,6 +129,7 @@ config MXS_LRADC
config SPEAR_ADC
tristate "ST SPEAr ADC"
depends on PLAT_SPEAR || COMPILE_TEST
+ depends on HAS_IOMEM
help
Say yes here to build support for the integrated ADC inside the
ST SPEAr SoC. Provides direct access via sysfs.
diff --git a/drivers/staging/iio/adc/ad799x.h b/drivers/staging/iio/adc/ad799x.h
index a591aa6feae1..fc8c85298feb 100644
--- a/drivers/staging/iio/adc/ad799x.h
+++ b/drivers/staging/iio/adc/ad799x.h
@@ -95,7 +95,7 @@ struct ad799x_state {
struct i2c_client *client;
const struct ad799x_chip_info *chip_info;
struct regulator *reg;
- u16 int_vref_mv;
+ struct regulator *vref;
unsigned id;
u16 config;
@@ -103,14 +103,6 @@ struct ad799x_state {
unsigned int transfer_size;
};
-/*
- * TODO: struct ad799x_platform_data needs to go into include/linux/iio
- */
-
-struct ad799x_platform_data {
- u16 vref_mv;
-};
-
#ifdef CONFIG_AD799X_RING_BUFFER
int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad799x_ring_cleanup(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 5ea36410f716..979ec77d6c2d 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -1,6 +1,6 @@
/*
* iio/adc/ad799x.c
- * Copyright (C) 2010-1011 Michael Hennerich, Analog Devices Inc.
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
*
* based on iio/adc/max1363
* Copyright (C) 2008-2010 Jonathan Cameron
@@ -179,7 +179,10 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
RES_MASK(chan->scan_type.realbits);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = st->int_vref_mv;
+ ret = regulator_get_voltage(st->vref);
+ if (ret < 0)
+ return ret;
+ *val = ret / 1000;
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
}
@@ -393,7 +396,7 @@ static const struct iio_event_spec ad799x_events[] = {
}, {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
}, {
.type = IIO_EV_TYPE_THRESH,
@@ -409,7 +412,13 @@ static const struct iio_event_spec ad799x_events[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = (_index), \
- .scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_realbits), \
+ .storagebits = 16, \
+ .shift = 12 - (_realbits), \
+ .endianness = IIO_BE, \
+ }, \
.event_spec = _ev_spec, \
.num_event_specs = _num_ev_spec, \
}
@@ -527,7 +536,6 @@ static int ad799x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret;
- struct ad799x_platform_data *pdata = client->dev.platform_data;
struct ad799x_state *st;
struct iio_dev *indio_dev;
@@ -545,17 +553,21 @@ static int ad799x_probe(struct i2c_client *client,
/* TODO: Add pdata options for filtering and bit delay */
- if (!pdata)
- return -EINVAL;
-
- st->int_vref_mv = pdata->vref_mv;
-
st->reg = devm_regulator_get(&client->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
- if (ret)
- return ret;
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ st->vref = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(st->vref)) {
+ ret = PTR_ERR(st->vref);
+ goto error_disable_reg;
}
+ ret = regulator_enable(st->vref);
+ if (ret)
+ goto error_disable_reg;
+
st->client = client;
indio_dev->dev.parent = &client->dev;
@@ -571,27 +583,28 @@ static int ad799x_probe(struct i2c_client *client,
goto error_disable_reg;
if (client->irq > 0) {
- ret = request_threaded_irq(client->irq,
- NULL,
- ad799x_event_handler,
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- client->name,
- indio_dev);
+ ret = devm_request_threaded_irq(&client->dev,
+ client->irq,
+ NULL,
+ ad799x_event_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ client->name,
+ indio_dev);
if (ret)
goto error_cleanup_ring;
}
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_irq;
+ goto error_cleanup_ring;
return 0;
-error_free_irq:
- free_irq(client->irq, indio_dev);
error_cleanup_ring:
ad799x_ring_cleanup(indio_dev);
error_disable_reg:
+ if (!IS_ERR(st->vref))
+ regulator_disable(st->vref);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
@@ -604,10 +617,10 @@ static int ad799x_remove(struct i2c_client *client)
struct ad799x_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (client->irq > 0)
- free_irq(client->irq, indio_dev);
ad799x_ring_cleanup(indio_dev);
+ if (!IS_ERR(st->vref))
+ regulator_disable(st->vref);
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
kfree(st->rx_buf);
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index df71669bb60e..11fb95201545 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -757,6 +757,7 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
}
/* if it is released, wait for the next touch via IRQ */
+ lradc->cur_plate = LRADC_TOUCH;
mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
}
@@ -846,7 +847,8 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
/* Clean the slot's previous content, then set new one. */
- mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0), LRADC_CTRL4);
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0),
+ LRADC_CTRL4);
mxs_lradc_reg_set(lradc, chan, LRADC_CTRL4);
mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(0));
@@ -897,10 +899,6 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
{
struct mxs_lradc *lradc = iio_priv(iio_dev);
- /* Check for invalid channel */
- if (chan->channel > LRADC_MAX_TOTAL_CHANS)
- return -EINVAL;
-
switch (m) {
case IIO_CHAN_INFO_RAW:
if (chan->type == IIO_TEMP)
@@ -1035,8 +1033,6 @@ SHOW_SCALE_AVAILABLE_ATTR(4);
SHOW_SCALE_AVAILABLE_ATTR(5);
SHOW_SCALE_AVAILABLE_ATTR(6);
SHOW_SCALE_AVAILABLE_ATTR(7);
-SHOW_SCALE_AVAILABLE_ATTR(8);
-SHOW_SCALE_AVAILABLE_ATTR(9);
SHOW_SCALE_AVAILABLE_ATTR(10);
SHOW_SCALE_AVAILABLE_ATTR(11);
SHOW_SCALE_AVAILABLE_ATTR(12);
@@ -1053,8 +1049,6 @@ static struct attribute *mxs_lradc_attributes[] = {
&iio_dev_attr_in_voltage5_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage6_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage7_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage8_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage9_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage10_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage11_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage12_scale_available.dev_attr.attr,
@@ -1176,7 +1170,8 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
else if (reg & LRADC_CTRL1_LRADC_IRQ(0))
complete(&lradc->completion);
- mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc), LRADC_CTRL1);
+ mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc),
+ LRADC_CTRL1);
return IRQ_HANDLED;
}
@@ -1267,7 +1262,8 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
uint32_t ctrl1_irq = 0;
const uint32_t chan_value = LRADC_CH_ACCUMULATE |
((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
- const int len = bitmap_weight(iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS);
+ const int len = bitmap_weight(iio->active_scan_mask,
+ LRADC_MAX_TOTAL_CHANS);
if (!len)
return -EINVAL;
@@ -1566,7 +1562,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
for (i = 0; i < of_cfg->irq_count; i++) {
lradc->irq[i] = platform_get_irq(pdev, i);
if (lradc->irq[i] < 0)
- return -EINVAL;
+ return lradc->irq[i];
ret = devm_request_irq(dev, lradc->irq[i],
mxs_lradc_handle_irq, 0,
@@ -1613,7 +1609,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
* of the array.
*/
scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >>
- (iio->channels[i].scan_type.realbits - s);
+ (LRADC_RESOLUTION - s);
lradc->scale_avail[i][s].nano =
do_div(scale_uv, 100000000) * 10;
lradc->scale_avail[i][s].integer = scale_uv;
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 16a8201228ff..9f0ebb329008 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -859,11 +859,14 @@ static ssize_t adt7316_show_DAC_update_mode(struct device *dev,
else {
switch (chip->dac_config & ADT7316_DA_EN_MODE_MASK) {
case ADT7316_DA_EN_MODE_SINGLE:
- return sprintf(buf, "0 - auto at any MSB DAC writing\n");
+ return sprintf(buf,
+ "0 - auto at any MSB DAC writing\n");
case ADT7316_DA_EN_MODE_AB_CD:
- return sprintf(buf, "1 - auto at MSB DAC AB and CD writing\n");
+ return sprintf(buf,
+ "1 - auto at MSB DAC AB and CD writing\n");
case ADT7316_DA_EN_MODE_ABCD:
- return sprintf(buf, "2 - auto at MSB DAC ABCD writing\n");
+ return sprintf(buf,
+ "2 - auto at MSB DAC ABCD writing\n");
default: /* ADT7316_DA_EN_MODE_LDAC */
return sprintf(buf, "3 - manual\n");
}
@@ -1102,7 +1105,8 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
ldac_config = chip->ldac_config | ADT7316_DAC_IN_VREF;
}
- ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG, ldac_config);
+ ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG,
+ ldac_config);
if (ret)
return -EIO;
@@ -1224,7 +1228,8 @@ static ssize_t adt7316_show_ex_temp_AIN1(struct device *dev,
return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_EX, buf);
}
-static IIO_DEVICE_ATTR(ex_temp_AIN1, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0);
+static IIO_DEVICE_ATTR(ex_temp_AIN1, S_IRUGO, adt7316_show_ex_temp_AIN1,
+ NULL, 0);
static IIO_DEVICE_ATTR(ex_temp, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0);
static ssize_t adt7316_show_AIN2(struct device *dev,
@@ -1319,7 +1324,8 @@ static ssize_t adt7316_store_in_temp_offset(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return adt7316_store_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf, len);
+ return adt7316_store_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf,
+ len);
}
static IIO_DEVICE_ATTR(in_temp_offset, S_IRUGO | S_IWUSR,
@@ -1344,7 +1350,8 @@ static ssize_t adt7316_store_ex_temp_offset(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- return adt7316_store_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf, len);
+ return adt7316_store_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf,
+ len);
}
static IIO_DEVICE_ATTR(ex_temp_offset, S_IRUGO | S_IWUSR,
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
index 2dbfb499528d..ec50bf34628d 100644
--- a/drivers/staging/iio/addac/adt7316.h
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -18,10 +18,10 @@ struct adt7316_bus {
void *client;
int irq;
int irq_flags;
- int (*read) (void *client, u8 reg, u8 *data);
- int (*write) (void *client, u8 reg, u8 val);
- int (*multi_read) (void *client, u8 first_reg, u8 count, u8 *data);
- int (*multi_write) (void *client, u8 first_reg, u8 count, u8 *data);
+ int (*read)(void *client, u8 reg, u8 *data);
+ int (*write)(void *client, u8 reg, u8 val);
+ int (*multi_read)(void *client, u8 first_reg, u8 count, u8 *data);
+ int (*multi_write)(void *client, u8 first_reg, u8 count, u8 *data);
};
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 0a4298b744e6..2b96665da8a2 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -629,7 +629,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
struct iio_buffer *buffer;
buffer = iio_kfifo_allocate(indio_dev);
- if (buffer)
+ if (!buffer)
return -ENOMEM;
iio_device_attach_buffer(indio_dev, buffer);
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index f8c659568c38..0a60def92735 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -433,7 +433,7 @@ static int taos_chip_on(struct iio_dev *indio_dev)
TSL258X_CMD_REG | TSL258X_CNTRL, utmp);
if (ret < 0) {
dev_err(&chip->client->dev, "taos_chip_on failed on CNTRL reg.\n");
- return -1;
+ return ret;
}
/* Use the following shadow copy for our delay before enabling ADC.
@@ -445,7 +445,7 @@ static int taos_chip_on(struct iio_dev *indio_dev)
if (ret < 0) {
dev_err(&chip->client->dev,
"taos_chip_on failed on reg %d.\n", i);
- return -1;
+ return ret;
}
}
@@ -458,7 +458,7 @@ static int taos_chip_on(struct iio_dev *indio_dev)
utmp);
if (ret < 0) {
dev_err(&chip->client->dev, "taos_chip_on failed on 2nd CTRL reg.\n");
- return -1;
+ return ret;
}
chip->taos_chip_status = TSL258X_CHIP_WORKING;
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 1e538086d48b..9e0f2a9c73ae 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -352,7 +352,7 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
/* device is not enabled */
dev_err(&chip->client->dev, "%s: device is not enabled\n",
__func__);
- ret = -EBUSY ;
+ ret = -EBUSY;
goto out_unlock;
}
@@ -1507,16 +1507,16 @@ static int tsl2x7x_device_id(unsigned char *id, int target)
case tsl2571:
case tsl2671:
case tsl2771:
- return ((*id & 0xf0) == TRITON_ID);
+ return (*id & 0xf0) == TRITON_ID;
case tmd2671:
case tmd2771:
- return ((*id & 0xf0) == HALIBUT_ID);
+ return (*id & 0xf0) == HALIBUT_ID;
case tsl2572:
case tsl2672:
case tmd2672:
case tsl2772:
case tmd2772:
- return ((*id & 0xf0) == SWORDFISH_ID);
+ return (*id & 0xf0) == SWORDFISH_ID;
}
return -EINVAL;
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 6966d5f76648..7fbaba41c872 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -312,7 +312,7 @@ static ssize_t ad2s1210_store_control(struct device *dev,
if (st->pdata->gpioin) {
data = ad2s1210_read_resolution_pin(st);
if (data != st->resolution)
- pr_warning("ad2s1210: resolution settings not match\n");
+ pr_warn("ad2s1210: resolution settings not match\n");
} else
ad2s1210_set_resolution_pin(st);
@@ -372,7 +372,7 @@ static ssize_t ad2s1210_store_resolution(struct device *dev,
if (st->pdata->gpioin) {
data = ad2s1210_read_resolution_pin(st);
if (data != st->resolution)
- pr_warning("ad2s1210: resolution settings not match\n");
+ pr_warn("ad2s1210: resolution settings not match\n");
} else
ad2s1210_set_resolution_pin(st);
ret = len;
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 78319ad176cd..c6e8ba7b3e4e 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -20,6 +20,7 @@ config DRM_IMX_FB_HELPER
config DRM_IMX_PARALLEL_DISPLAY
tristate "Support for parallel displays"
+ select DRM_PANEL
depends on DRM_IMX
select VIDEOMODE_HELPERS
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile
index 4677585b5ad5..129e3a3f59f1 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/staging/imx-drm/Makefile
@@ -1,12 +1,11 @@
-imxdrm-objs := imx-drm-core.o imx-fb.o
+imxdrm-objs := imx-drm-core.o
obj-$(CONFIG_DRM_IMX) += imxdrm.o
obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
-obj-$(CONFIG_DRM_IMX_FB_HELPER) += imx-fbdev.o
obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/
imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
diff --git a/drivers/staging/imx-drm/TODO b/drivers/staging/imx-drm/TODO
index 6a9da94c9573..29636fb13959 100644
--- a/drivers/staging/imx-drm/TODO
+++ b/drivers/staging/imx-drm/TODO
@@ -1,15 +1,10 @@
TODO:
- get DRM Maintainer review for this code
-- Wait for common display framework to hit mainline and update the IPU
- driver to use it. This will most probably make changes to the devicetree
- bindings necessary.
-- Factor out more code to common helper functions
- decide where to put the base driver. It is not specific to a subsystem
and would be used by DRM/KMS and media/V4L2
Missing features (not necessarily for moving out of staging):
-- Add i.MX6 HDMI support
- Add support for IC (Image converter)
- Add support for CSI (CMOS Sensor interface)
- Add support for VDIC (Video Deinterlacer)
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 09ef5fb8bae6..4144a75e5f71 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -13,14 +13,15 @@
* GNU General Public License for more details.
*
*/
-
+#include <linux/component.h>
#include <linux/device.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <linux/fb.h>
-#include <linux/module.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
@@ -28,45 +29,29 @@
#define MAX_CRTC 4
-struct crtc_cookie {
- void *cookie;
- int id;
+struct imx_drm_crtc;
+
+struct imx_drm_component {
+ struct device_node *of_node;
struct list_head list;
};
struct imx_drm_device {
struct drm_device *drm;
- struct device *dev;
- struct list_head crtc_list;
- struct list_head encoder_list;
- struct list_head connector_list;
- struct mutex mutex;
+ struct imx_drm_crtc *crtc[MAX_CRTC];
int pipes;
struct drm_fbdev_cma *fbhelper;
};
struct imx_drm_crtc {
struct drm_crtc *crtc;
- struct list_head list;
- struct imx_drm_device *imxdrm;
int pipe;
struct imx_drm_crtc_helper_funcs imx_drm_helper_funcs;
- struct module *owner;
- struct crtc_cookie cookie;
+ struct device_node *port;
};
-struct imx_drm_encoder {
- struct drm_encoder *encoder;
- struct list_head list;
- struct module *owner;
- struct list_head possible_crtcs;
-};
-
-struct imx_drm_connector {
- struct drm_connector *connector;
- struct list_head list;
- struct module *owner;
-};
+static int legacyfb_depth = 16;
+module_param(legacyfb_depth, int, 0444);
int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
{
@@ -76,95 +61,96 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
static void imx_drm_driver_lastclose(struct drm_device *drm)
{
+#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
struct imx_drm_device *imxdrm = drm->dev_private;
if (imxdrm->fbhelper)
drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
+#endif
}
static int imx_drm_driver_unload(struct drm_device *drm)
{
+#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
struct imx_drm_device *imxdrm = drm->dev_private;
+#endif
- imx_drm_device_put();
+ drm_kms_helper_poll_fini(drm);
- drm_vblank_cleanup(imxdrm->drm);
- drm_kms_helper_poll_fini(imxdrm->drm);
- drm_mode_config_cleanup(imxdrm->drm);
+#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
+ if (imxdrm->fbhelper)
+ drm_fbdev_cma_fini(imxdrm->fbhelper);
+#endif
+
+ component_unbind_all(drm->dev, drm);
+
+ drm_vblank_cleanup(drm);
+ drm_mode_config_cleanup(drm);
return 0;
}
-/*
- * We don't care at all for crtc numbers, but the core expects the
- * crtcs to be numbered
- */
-static struct imx_drm_crtc *imx_drm_crtc_by_num(struct imx_drm_device *imxdrm,
- int num)
+static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
{
- struct imx_drm_crtc *imx_drm_crtc;
+ struct imx_drm_device *imxdrm = crtc->dev->dev_private;
+ unsigned i;
+
+ for (i = 0; i < MAX_CRTC; i++)
+ if (imxdrm->crtc[i] && imxdrm->crtc[i]->crtc == crtc)
+ return imxdrm->crtc[i];
- list_for_each_entry(imx_drm_crtc, &imxdrm->crtc_list, list)
- if (imx_drm_crtc->pipe == num)
- return imx_drm_crtc;
return NULL;
}
-int imx_drm_crtc_panel_format_pins(struct drm_crtc *crtc, u32 encoder_type,
+int imx_drm_panel_format_pins(struct drm_encoder *encoder,
u32 interface_pix_fmt, int hsync_pin, int vsync_pin)
{
- struct imx_drm_device *imxdrm = crtc->dev->dev_private;
- struct imx_drm_crtc *imx_crtc;
struct imx_drm_crtc_helper_funcs *helper;
+ struct imx_drm_crtc *imx_crtc;
- list_for_each_entry(imx_crtc, &imxdrm->crtc_list, list)
- if (imx_crtc->crtc == crtc)
- goto found;
+ imx_crtc = imx_drm_find_crtc(encoder->crtc);
+ if (!imx_crtc)
+ return -EINVAL;
- return -EINVAL;
-found:
helper = &imx_crtc->imx_drm_helper_funcs;
if (helper->set_interface_pix_fmt)
- return helper->set_interface_pix_fmt(crtc,
- encoder_type, interface_pix_fmt,
+ return helper->set_interface_pix_fmt(encoder->crtc,
+ encoder->encoder_type, interface_pix_fmt,
hsync_pin, vsync_pin);
return 0;
}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format_pins);
+EXPORT_SYMBOL_GPL(imx_drm_panel_format_pins);
-int imx_drm_crtc_panel_format(struct drm_crtc *crtc, u32 encoder_type,
- u32 interface_pix_fmt)
+int imx_drm_panel_format(struct drm_encoder *encoder, u32 interface_pix_fmt)
{
- return imx_drm_crtc_panel_format_pins(crtc, encoder_type,
- interface_pix_fmt, 2, 3);
+ return imx_drm_panel_format_pins(encoder, interface_pix_fmt, 2, 3);
}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format);
+EXPORT_SYMBOL_GPL(imx_drm_panel_format);
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
- return drm_vblank_get(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_vblank_put(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_handle_vblank(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
static int imx_drm_enable_vblank(struct drm_device *drm, int crtc)
{
struct imx_drm_device *imxdrm = drm->dev_private;
- struct imx_drm_crtc *imx_drm_crtc;
+ struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[crtc];
int ret;
- imx_drm_crtc = imx_drm_crtc_by_num(imxdrm, crtc);
if (!imx_drm_crtc)
return -EINVAL;
@@ -180,9 +166,8 @@ static int imx_drm_enable_vblank(struct drm_device *drm, int crtc)
static void imx_drm_disable_vblank(struct drm_device *drm, int crtc)
{
struct imx_drm_device *imxdrm = drm->dev_private;
- struct imx_drm_crtc *imx_drm_crtc;
+ struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[crtc];
- imx_drm_crtc = imx_drm_crtc_by_num(imxdrm, crtc);
if (!imx_drm_crtc)
return;
@@ -215,195 +200,54 @@ static const struct file_operations imx_drm_driver_fops = {
.llseek = noop_llseek,
};
-static struct imx_drm_device *imx_drm_device;
-
-static struct imx_drm_device *__imx_drm_device(void)
+int imx_drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- return imx_drm_device;
+ return MODE_OK;
}
+EXPORT_SYMBOL(imx_drm_connector_mode_valid);
-struct drm_device *imx_drm_device_get(void)
+void imx_drm_connector_destroy(struct drm_connector *connector)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_encoder *enc;
- struct imx_drm_connector *con;
- struct imx_drm_crtc *crtc;
-
- list_for_each_entry(enc, &imxdrm->encoder_list, list) {
- if (!try_module_get(enc->owner)) {
- dev_err(imxdrm->dev, "could not get module %s\n",
- module_name(enc->owner));
- goto unwind_enc;
- }
- }
-
- list_for_each_entry(con, &imxdrm->connector_list, list) {
- if (!try_module_get(con->owner)) {
- dev_err(imxdrm->dev, "could not get module %s\n",
- module_name(con->owner));
- goto unwind_con;
- }
- }
-
- list_for_each_entry(crtc, &imxdrm->crtc_list, list) {
- if (!try_module_get(crtc->owner)) {
- dev_err(imxdrm->dev, "could not get module %s\n",
- module_name(crtc->owner));
- goto unwind_crtc;
- }
- }
-
- return imxdrm->drm;
-
-unwind_crtc:
- list_for_each_entry_continue_reverse(crtc, &imxdrm->crtc_list, list)
- module_put(crtc->owner);
-unwind_con:
- list_for_each_entry_continue_reverse(con, &imxdrm->connector_list, list)
- module_put(con->owner);
-unwind_enc:
- list_for_each_entry_continue_reverse(enc, &imxdrm->encoder_list, list)
- module_put(enc->owner);
-
- mutex_unlock(&imxdrm->mutex);
-
- return NULL;
-
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
}
-EXPORT_SYMBOL_GPL(imx_drm_device_get);
+EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
-void imx_drm_device_put(void)
+void imx_drm_encoder_destroy(struct drm_encoder *encoder)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_encoder *enc;
- struct imx_drm_connector *con;
- struct imx_drm_crtc *crtc;
-
- mutex_lock(&imxdrm->mutex);
-
- list_for_each_entry(crtc, &imxdrm->crtc_list, list)
- module_put(crtc->owner);
-
- list_for_each_entry(con, &imxdrm->connector_list, list)
- module_put(con->owner);
-
- list_for_each_entry(enc, &imxdrm->encoder_list, list)
- module_put(enc->owner);
-
- mutex_unlock(&imxdrm->mutex);
+ drm_encoder_cleanup(encoder);
}
-EXPORT_SYMBOL_GPL(imx_drm_device_put);
+EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
-static int drm_mode_group_reinit(struct drm_device *dev)
-{
- struct drm_mode_group *group = &dev->primary->mode_group;
- uint32_t *id_list = group->id_list;
- int ret;
-
- ret = drm_mode_group_init_legacy_group(dev, group);
- if (ret < 0)
- return ret;
-
- kfree(id_list);
- return 0;
-}
-
-/*
- * register an encoder to the drm core
- */
-static int imx_drm_encoder_register(struct imx_drm_encoder *imx_drm_encoder)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- INIT_LIST_HEAD(&imx_drm_encoder->possible_crtcs);
-
- drm_encoder_init(imxdrm->drm, imx_drm_encoder->encoder,
- imx_drm_encoder->encoder->funcs,
- imx_drm_encoder->encoder->encoder_type);
-
- drm_mode_group_reinit(imxdrm->drm);
-
- return 0;
-}
-
-/*
- * unregister an encoder from the drm core
- */
-static void imx_drm_encoder_unregister(struct imx_drm_encoder
- *imx_drm_encoder)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- drm_encoder_cleanup(imx_drm_encoder->encoder);
-
- drm_mode_group_reinit(imxdrm->drm);
-}
-
-/*
- * register a connector to the drm core
- */
-static int imx_drm_connector_register(
- struct imx_drm_connector *imx_drm_connector)
+static void imx_drm_output_poll_changed(struct drm_device *drm)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- drm_connector_init(imxdrm->drm, imx_drm_connector->connector,
- imx_drm_connector->connector->funcs,
- imx_drm_connector->connector->connector_type);
- drm_mode_group_reinit(imxdrm->drm);
-
- return drm_sysfs_connector_add(imx_drm_connector->connector);
-}
-
-/*
- * unregister a connector from the drm core
- */
-static void imx_drm_connector_unregister(
- struct imx_drm_connector *imx_drm_connector)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- drm_sysfs_connector_remove(imx_drm_connector->connector);
- drm_connector_cleanup(imx_drm_connector->connector);
+#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
+ struct imx_drm_device *imxdrm = drm->dev_private;
- drm_mode_group_reinit(imxdrm->drm);
+ drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
+#endif
}
-/*
- * register a crtc to the drm core
- */
-static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- int ret;
-
- ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
- if (ret)
- return ret;
-
- drm_crtc_helper_add(imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
-
- drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
-
- drm_mode_group_reinit(imxdrm->drm);
-
- return 0;
-}
+static struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = imx_drm_output_poll_changed,
+};
/*
- * Called by the CRTC driver when all CRTCs are registered. This
- * puts all the pieces together and initializes the driver.
- * Once this is called no more CRTCs can be registered since
- * the drm core has hardcoded the number of crtcs in several
- * places.
+ * Main DRM initialisation. This binds, initialises and registers
+ * with DRM the subcomponents of the driver.
*/
static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
+ struct imx_drm_device *imxdrm;
+ struct drm_connector *connector;
int ret;
+ imxdrm = devm_kzalloc(drm->dev, sizeof(*imxdrm), GFP_KERNEL);
+ if (!imxdrm)
+ return -ENOMEM;
+
imxdrm->drm = drm;
drm->dev_private = imxdrm;
@@ -419,139 +263,136 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
*/
drm->irq_enabled = true;
- drm_mode_config_init(drm);
- imx_drm_mode_config_init(drm);
-
- mutex_lock(&imxdrm->mutex);
+ /*
+ * set max width and height as default value(4096x4096).
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ drm->mode_config.min_width = 64;
+ drm->mode_config.min_height = 64;
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+ drm->mode_config.funcs = &imx_drm_mode_config_funcs;
- drm_kms_helper_poll_init(imxdrm->drm);
+ drm_mode_config_init(drm);
- /* setup the grouping for the legacy output */
- ret = drm_mode_group_init_legacy_group(imxdrm->drm,
- &imxdrm->drm->primary->mode_group);
+ ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
goto err_kms;
- ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
+ /*
+ * with vblank_disable_allowed = true, vblank interrupt will be
+ * disabled by drm timer once a current process gives up ownership
+ * of vblank event. (after drm_vblank_put function is called)
+ */
+ drm->vblank_disable_allowed = true;
+
+ platform_set_drvdata(drm->platformdev, drm);
+
+ /* Now try and bind all our sub-components */
+ ret = component_bind_all(drm->dev, drm);
if (ret)
- goto err_kms;
+ goto err_vblank;
/*
- * with vblank_disable_allowed = true, vblank interrupt will be disabled
- * by drm timer once a current process gives up ownership of
- * vblank event.(after drm_vblank_put function is called)
+ * All components are now added, we can publish the connector sysfs
+ * entries to userspace. This will generate hotplug events and so
+ * userspace will expect to be able to access DRM at this point.
*/
- imxdrm->drm->vblank_disable_allowed = true;
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ ret = drm_sysfs_connector_add(connector);
+ if (ret) {
+ dev_err(drm->dev,
+ "[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector), ret);
+ goto err_unbind;
+ }
+ }
- if (!imx_drm_device_get()) {
- ret = -EINVAL;
- goto err_vblank;
+ /*
+ * All components are now initialised, so setup the fb helper.
+ * The fb helper takes copies of key hardware information, so the
+ * crtcs/connectors/encoders must not change after this point.
+ */
+#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
+ if (legacyfb_depth != 16 && legacyfb_depth != 32) {
+ dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
+ legacyfb_depth = 16;
}
+ imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
+ drm->mode_config.num_crtc, MAX_CRTC);
+ if (IS_ERR(imxdrm->fbhelper)) {
+ ret = PTR_ERR(imxdrm->fbhelper);
+ imxdrm->fbhelper = NULL;
+ goto err_unbind;
+ }
+#endif
+
+ drm_kms_helper_poll_init(drm);
- platform_set_drvdata(drm->platformdev, drm);
- mutex_unlock(&imxdrm->mutex);
return 0;
+err_unbind:
+ component_unbind_all(drm->dev, drm);
err_vblank:
drm_vblank_cleanup(drm);
err_kms:
- drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
- mutex_unlock(&imxdrm->mutex);
return ret;
}
-static void imx_drm_update_possible_crtcs(void)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_crtc *imx_drm_crtc;
- struct imx_drm_encoder *enc;
- struct crtc_cookie *cookie;
-
- list_for_each_entry(enc, &imxdrm->encoder_list, list) {
- u32 possible_crtcs = 0;
-
- list_for_each_entry(cookie, &enc->possible_crtcs, list) {
- list_for_each_entry(imx_drm_crtc, &imxdrm->crtc_list, list) {
- if (imx_drm_crtc->cookie.cookie == cookie->cookie &&
- imx_drm_crtc->cookie.id == cookie->id) {
- possible_crtcs |= 1 << imx_drm_crtc->pipe;
- }
- }
- }
- enc->encoder->possible_crtcs = possible_crtcs;
- enc->encoder->possible_clones = possible_crtcs;
- }
-}
-
/*
* imx_drm_add_crtc - add a new crtc
- *
- * The return value if !NULL is a cookie for the caller to pass to
- * imx_drm_remove_crtc later.
*/
-int imx_drm_add_crtc(struct drm_crtc *crtc,
+int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
struct imx_drm_crtc **new_crtc,
const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs,
- struct module *owner, void *cookie, int id)
+ struct device_node *port)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
+ struct imx_drm_device *imxdrm = drm->dev_private;
struct imx_drm_crtc *imx_drm_crtc;
int ret;
- mutex_lock(&imxdrm->mutex);
-
/*
* The vblank arrays are dimensioned by MAX_CRTC - we can't
* pass IDs greater than this to those functions.
*/
- if (imxdrm->pipes >= MAX_CRTC) {
- ret = -EINVAL;
- goto err_busy;
- }
+ if (imxdrm->pipes >= MAX_CRTC)
+ return -EINVAL;
- if (imxdrm->drm->open_count) {
- ret = -EBUSY;
- goto err_busy;
- }
+ if (imxdrm->drm->open_count)
+ return -EBUSY;
imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
- if (!imx_drm_crtc) {
- ret = -ENOMEM;
- goto err_alloc;
- }
+ if (!imx_drm_crtc)
+ return -ENOMEM;
imx_drm_crtc->imx_drm_helper_funcs = *imx_drm_helper_funcs;
imx_drm_crtc->pipe = imxdrm->pipes++;
- imx_drm_crtc->cookie.cookie = cookie;
- imx_drm_crtc->cookie.id = id;
-
+ imx_drm_crtc->port = port;
imx_drm_crtc->crtc = crtc;
- imx_drm_crtc->imxdrm = imxdrm;
-
- imx_drm_crtc->owner = owner;
- list_add_tail(&imx_drm_crtc->list, &imxdrm->crtc_list);
+ imxdrm->crtc[imx_drm_crtc->pipe] = imx_drm_crtc;
*new_crtc = imx_drm_crtc;
- ret = imx_drm_crtc_register(imx_drm_crtc);
+ ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
if (ret)
goto err_register;
- imx_drm_update_possible_crtcs();
+ drm_crtc_helper_add(crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
- mutex_unlock(&imxdrm->mutex);
+ drm_crtc_init(drm, crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
return 0;
err_register:
- list_del(&imx_drm_crtc->list);
+ imxdrm->crtc[imx_drm_crtc->pipe] = NULL;
kfree(imx_drm_crtc);
-err_alloc:
-err_busy:
- mutex_unlock(&imxdrm->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
@@ -561,17 +402,11 @@ EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
*/
int imx_drm_remove_crtc(struct imx_drm_crtc *imx_drm_crtc)
{
- struct imx_drm_device *imxdrm = imx_drm_crtc->imxdrm;
-
- mutex_lock(&imxdrm->mutex);
+ struct imx_drm_device *imxdrm = imx_drm_crtc->crtc->dev->dev_private;
drm_crtc_cleanup(imx_drm_crtc->crtc);
- list_del(&imx_drm_crtc->list);
-
- drm_mode_group_reinit(imxdrm->drm);
-
- mutex_unlock(&imxdrm->mutex);
+ imxdrm->crtc[imx_drm_crtc->pipe] = NULL;
kfree(imx_drm_crtc);
@@ -580,220 +415,115 @@ int imx_drm_remove_crtc(struct imx_drm_crtc *imx_drm_crtc)
EXPORT_SYMBOL_GPL(imx_drm_remove_crtc);
/*
- * imx_drm_add_encoder - add a new encoder
+ * Find the DRM CRTC possible mask for the connected endpoint.
+ *
+ * The encoder possible masks are defined by their position in the
+ * mode_config crtc_list. This means that CRTCs must not be added
+ * or removed once the DRM device has been fully initialised.
*/
-int imx_drm_add_encoder(struct drm_encoder *encoder,
- struct imx_drm_encoder **newenc, struct module *owner)
+static uint32_t imx_drm_find_crtc_mask(struct imx_drm_device *imxdrm,
+ struct device_node *endpoint)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_encoder *imx_drm_encoder;
- int ret;
-
- mutex_lock(&imxdrm->mutex);
-
- if (imxdrm->drm->open_count) {
- ret = -EBUSY;
- goto err_busy;
- }
+ struct device_node *port;
+ unsigned i;
- imx_drm_encoder = kzalloc(sizeof(*imx_drm_encoder), GFP_KERNEL);
- if (!imx_drm_encoder) {
- ret = -ENOMEM;
- goto err_alloc;
- }
-
- imx_drm_encoder->encoder = encoder;
- imx_drm_encoder->owner = owner;
+ port = of_graph_get_remote_port(endpoint);
+ if (!port)
+ return 0;
+ of_node_put(port);
- ret = imx_drm_encoder_register(imx_drm_encoder);
- if (ret) {
- ret = -ENOMEM;
- goto err_register;
+ for (i = 0; i < MAX_CRTC; i++) {
+ struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[i];
+ if (imx_drm_crtc && imx_drm_crtc->port == port)
+ return drm_crtc_mask(imx_drm_crtc->crtc);
}
- list_add_tail(&imx_drm_encoder->list, &imxdrm->encoder_list);
-
- *newenc = imx_drm_encoder;
-
- mutex_unlock(&imxdrm->mutex);
-
return 0;
+}
-err_register:
- kfree(imx_drm_encoder);
-err_alloc:
-err_busy:
- mutex_unlock(&imxdrm->mutex);
-
- return ret;
+static struct device_node *imx_drm_of_get_next_endpoint(
+ const struct device_node *parent, struct device_node *prev)
+{
+ struct device_node *node = of_graph_get_next_endpoint(parent, prev);
+ of_node_put(prev);
+ return node;
}
-EXPORT_SYMBOL_GPL(imx_drm_add_encoder);
-int imx_drm_encoder_add_possible_crtcs(
- struct imx_drm_encoder *imx_drm_encoder,
- struct device_node *np)
+int imx_drm_encoder_parse_of(struct drm_device *drm,
+ struct drm_encoder *encoder, struct device_node *np)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct of_phandle_args args;
- struct crtc_cookie *c;
- int ret = 0;
+ struct imx_drm_device *imxdrm = drm->dev_private;
+ struct device_node *ep = NULL;
+ uint32_t crtc_mask = 0;
int i;
- if (!list_empty(&imx_drm_encoder->possible_crtcs))
- return -EBUSY;
+ for (i = 0; ; i++) {
+ u32 mask;
- for (i = 0; !ret; i++) {
- ret = of_parse_phandle_with_args(np, "crtcs",
- "#crtc-cells", i, &args);
- if (ret < 0)
+ ep = imx_drm_of_get_next_endpoint(np, ep);
+ if (!ep)
break;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c) {
- of_node_put(args.np);
- return -ENOMEM;
- }
-
- c->cookie = args.np;
- c->id = args.args_count > 0 ? args.args[0] : 0;
-
- of_node_put(args.np);
-
- mutex_lock(&imxdrm->mutex);
-
- list_add_tail(&c->list, &imx_drm_encoder->possible_crtcs);
-
- mutex_unlock(&imxdrm->mutex);
- }
-
- imx_drm_update_possible_crtcs();
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_add_possible_crtcs);
+ mask = imx_drm_find_crtc_mask(imxdrm, ep);
-int imx_drm_encoder_get_mux_id(struct imx_drm_encoder *imx_drm_encoder,
- struct drm_crtc *crtc)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_crtc *imx_crtc;
- int i = 0;
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (mask == 0)
+ return -EPROBE_DEFER;
- list_for_each_entry(imx_crtc, &imxdrm->crtc_list, list) {
- if (imx_crtc->crtc == crtc)
- goto found;
- i++;
+ crtc_mask |= mask;
}
- return -EINVAL;
-found:
- return i;
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id);
-
-/*
- * imx_drm_remove_encoder - remove an encoder
- */
-int imx_drm_remove_encoder(struct imx_drm_encoder *imx_drm_encoder)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct crtc_cookie *c, *tmp;
-
- mutex_lock(&imxdrm->mutex);
-
- imx_drm_encoder_unregister(imx_drm_encoder);
-
- list_del(&imx_drm_encoder->list);
+ if (ep)
+ of_node_put(ep);
+ if (i == 0)
+ return -ENOENT;
- list_for_each_entry_safe(c, tmp, &imx_drm_encoder->possible_crtcs,
- list)
- kfree(c);
+ encoder->possible_crtcs = crtc_mask;
- mutex_unlock(&imxdrm->mutex);
-
- kfree(imx_drm_encoder);
+ /* FIXME: this is the mask of outputs which can clone this output. */
+ encoder->possible_clones = ~0;
return 0;
}
-EXPORT_SYMBOL_GPL(imx_drm_remove_encoder);
+EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of);
/*
- * imx_drm_add_connector - add a connector
+ * @node: device tree node containing encoder input ports
+ * @encoder: drm_encoder
*/
-int imx_drm_add_connector(struct drm_connector *connector,
- struct imx_drm_connector **new_con,
- struct module *owner)
+int imx_drm_encoder_get_mux_id(struct device_node *node,
+ struct drm_encoder *encoder)
{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- struct imx_drm_connector *imx_drm_connector;
+ struct imx_drm_crtc *imx_crtc = imx_drm_find_crtc(encoder->crtc);
+ struct device_node *ep = NULL;
+ struct of_endpoint endpoint;
+ struct device_node *port;
int ret;
- mutex_lock(&imxdrm->mutex);
-
- if (imxdrm->drm->open_count) {
- ret = -EBUSY;
- goto err_busy;
- }
-
- imx_drm_connector = kzalloc(sizeof(*imx_drm_connector), GFP_KERNEL);
- if (!imx_drm_connector) {
- ret = -ENOMEM;
- goto err_alloc;
- }
-
- imx_drm_connector->connector = connector;
- imx_drm_connector->owner = owner;
-
- ret = imx_drm_connector_register(imx_drm_connector);
- if (ret)
- goto err_register;
-
- list_add_tail(&imx_drm_connector->list, &imxdrm->connector_list);
-
- *new_con = imx_drm_connector;
-
- mutex_unlock(&imxdrm->mutex);
-
- return 0;
-
-err_register:
- kfree(imx_drm_connector);
-err_alloc:
-err_busy:
- mutex_unlock(&imxdrm->mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(imx_drm_add_connector);
-
-void imx_drm_fb_helper_set(struct drm_fbdev_cma *fbdev_helper)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- imxdrm->fbhelper = fbdev_helper;
-}
-EXPORT_SYMBOL_GPL(imx_drm_fb_helper_set);
-
-/*
- * imx_drm_remove_connector - remove a connector
- */
-int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
-
- mutex_lock(&imxdrm->mutex);
-
- imx_drm_connector_unregister(imx_drm_connector);
-
- list_del(&imx_drm_connector->list);
+ if (!node || !imx_crtc)
+ return -EINVAL;
- mutex_unlock(&imxdrm->mutex);
+ do {
+ ep = imx_drm_of_get_next_endpoint(node, ep);
+ if (!ep)
+ break;
- kfree(imx_drm_connector);
+ port = of_graph_get_remote_port(ep);
+ of_node_put(port);
+ if (port == imx_crtc->port) {
+ ret = of_graph_parse_endpoint(ep, &endpoint);
+ return ret ? ret : endpoint.id;
+ }
+ } while (ep);
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL_GPL(imx_drm_remove_connector);
+EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id);
static const struct drm_ioctl_desc imx_drm_ioctls[] = {
/* none so far */
@@ -834,80 +564,156 @@ static struct drm_driver imx_drm_driver = {
.patchlevel = 0,
};
-static int imx_drm_platform_probe(struct platform_device *pdev)
+static int compare_of(struct device *dev, void *data)
{
- int ret;
-
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
+ struct device_node *np = data;
- imx_drm_device->dev = &pdev->dev;
+ /* Special case for LDB, one device for two channels */
+ if (of_node_cmp(np->name, "lvds-channel") == 0) {
+ np = of_get_parent(np);
+ of_node_put(np);
+ }
- return drm_platform_init(&imx_drm_driver, pdev);
+ return dev->of_node == np;
}
-static int imx_drm_platform_remove(struct platform_device *pdev)
+static LIST_HEAD(imx_drm_components);
+
+static int imx_drm_add_components(struct device *master, struct master *m)
{
- drm_put_dev(platform_get_drvdata(pdev));
+ struct imx_drm_component *component;
+ int ret;
+ list_for_each_entry(component, &imx_drm_components, list) {
+ ret = component_master_add_child(m, compare_of,
+ component->of_node);
+ if (ret)
+ return ret;
+ }
return 0;
}
-static struct platform_driver imx_drm_pdrv = {
- .probe = imx_drm_platform_probe,
- .remove = imx_drm_platform_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "imx-drm",
- },
+static int imx_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&imx_drm_driver, to_platform_device(dev));
+}
+
+static void imx_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops imx_drm_ops = {
+ .add_components = imx_drm_add_components,
+ .bind = imx_drm_bind,
+ .unbind = imx_drm_unbind,
};
-static struct platform_device *imx_drm_pdev;
+static struct imx_drm_component *imx_drm_find_component(struct device *dev,
+ struct device_node *node)
+{
+ struct imx_drm_component *component;
+
+ list_for_each_entry(component, &imx_drm_components, list)
+ if (component->of_node == node)
+ return component;
+
+ return NULL;
+}
-static int __init imx_drm_init(void)
+static int imx_drm_add_component(struct device *dev, struct device_node *node)
{
- int ret;
+ struct imx_drm_component *component;
+
+ if (imx_drm_find_component(dev, node))
+ return 0;
- imx_drm_device = kzalloc(sizeof(*imx_drm_device), GFP_KERNEL);
- if (!imx_drm_device)
+ component = devm_kzalloc(dev, sizeof(*component), GFP_KERNEL);
+ if (!component)
return -ENOMEM;
- mutex_init(&imx_drm_device->mutex);
- INIT_LIST_HEAD(&imx_drm_device->crtc_list);
- INIT_LIST_HEAD(&imx_drm_device->connector_list);
- INIT_LIST_HEAD(&imx_drm_device->encoder_list);
+ component->of_node = node;
+ list_add_tail(&component->list, &imx_drm_components);
+
+ return 0;
+}
+
+static int imx_drm_platform_probe(struct platform_device *pdev)
+{
+ struct device_node *ep, *port, *remote;
+ int ret;
+ int i;
+
+ /*
+ * Bind the IPU display interface ports first, so that
+ * imx_drm_encoder_parse_of called from encoder .bind callbacks
+ * works as expected.
+ */
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(pdev->dev.of_node, "ports", i);
+ if (!port)
+ break;
- imx_drm_pdev = platform_device_register_simple("imx-drm", -1, NULL, 0);
- if (IS_ERR(imx_drm_pdev)) {
- ret = PTR_ERR(imx_drm_pdev);
- goto err_pdev;
+ ret = imx_drm_add_component(&pdev->dev, port);
+ if (ret < 0)
+ return ret;
}
- ret = platform_driver_register(&imx_drm_pdrv);
- if (ret)
- goto err_pdrv;
+ if (i == 0) {
+ dev_err(&pdev->dev, "missing 'ports' property\n");
+ return -ENODEV;
+ }
- return 0;
+ /* Then bind all encoders */
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(pdev->dev.of_node, "ports", i);
+ if (!port)
+ break;
-err_pdrv:
- platform_device_unregister(imx_drm_pdev);
-err_pdev:
- kfree(imx_drm_device);
+ for_each_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
+ continue;
+ }
- return ret;
+ ret = imx_drm_add_component(&pdev->dev, remote);
+ of_node_put(remote);
+ if (ret < 0)
+ return ret;
+ }
+ of_node_put(port);
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ return component_master_add(&pdev->dev, &imx_drm_ops);
}
-static void __exit imx_drm_exit(void)
+static int imx_drm_platform_remove(struct platform_device *pdev)
{
- platform_device_unregister(imx_drm_pdev);
- platform_driver_unregister(&imx_drm_pdrv);
-
- kfree(imx_drm_device);
+ component_master_del(&pdev->dev, &imx_drm_ops);
+ return 0;
}
-module_init(imx_drm_init);
-module_exit(imx_drm_exit);
+static const struct of_device_id imx_drm_dt_ids[] = {
+ { .compatible = "fsl,imx-display-subsystem", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx_drm_dt_ids);
+
+static struct platform_driver imx_drm_pdrv = {
+ .probe = imx_drm_platform_probe,
+ .remove = imx_drm_platform_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "imx-drm",
+ .of_match_table = imx_drm_dt_ids,
+ },
+};
+module_platform_driver(imx_drm_pdrv);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX drm driver core");
diff --git a/drivers/staging/imx-drm/imx-drm.h b/drivers/staging/imx-drm/imx-drm.h
index ae90c9c15312..a322bac55414 100644
--- a/drivers/staging/imx-drm/imx-drm.h
+++ b/drivers/staging/imx-drm/imx-drm.h
@@ -1,17 +1,15 @@
#ifndef _IMX_DRM_H_
#define _IMX_DRM_H_
-#include <linux/videodev2.h>
-
-#define IPU_PIX_FMT_GBR24 v4l2_fourcc('G', 'B', 'R', '3')
-
+struct device_node;
struct drm_crtc;
struct drm_connector;
struct drm_device;
+struct drm_display_mode;
struct drm_encoder;
-struct imx_drm_crtc;
struct drm_fbdev_cma;
struct drm_framebuffer;
+struct imx_drm_crtc;
struct platform_device;
int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
@@ -25,10 +23,10 @@ struct imx_drm_crtc_helper_funcs {
const struct drm_crtc_funcs *crtc_funcs;
};
-int imx_drm_add_crtc(struct drm_crtc *crtc,
+int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
struct imx_drm_crtc **new_crtc,
const struct imx_drm_crtc_helper_funcs *imx_helper_funcs,
- struct module *owner, void *cookie, int id);
+ struct device_node *port);
int imx_drm_remove_crtc(struct imx_drm_crtc *);
int imx_drm_init_drm(struct platform_device *pdev,
int preferred_bpp);
@@ -38,35 +36,23 @@ int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc);
void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc);
void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc);
-struct imx_drm_encoder;
-int imx_drm_add_encoder(struct drm_encoder *encoder,
- struct imx_drm_encoder **new_enc,
- struct module *owner);
-int imx_drm_remove_encoder(struct imx_drm_encoder *);
-
-struct imx_drm_connector;
-int imx_drm_add_connector(struct drm_connector *connector,
- struct imx_drm_connector **new_con,
- struct module *owner);
-int imx_drm_remove_connector(struct imx_drm_connector *);
-
void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
-struct drm_device *imx_drm_device_get(void);
-void imx_drm_device_put(void);
-int imx_drm_crtc_panel_format_pins(struct drm_crtc *crtc, u32 encoder_type,
+int imx_drm_panel_format_pins(struct drm_encoder *encoder,
u32 interface_pix_fmt, int hsync_pin, int vsync_pin);
-int imx_drm_crtc_panel_format(struct drm_crtc *crtc, u32 encoder_type,
+int imx_drm_panel_format(struct drm_encoder *encoder,
u32 interface_pix_fmt);
-void imx_drm_fb_helper_set(struct drm_fbdev_cma *fbdev_helper);
-struct device_node;
+int imx_drm_encoder_get_mux_id(struct device_node *node,
+ struct drm_encoder *encoder);
+int imx_drm_encoder_parse_of(struct drm_device *drm,
+ struct drm_encoder *encoder, struct device_node *np);
-int imx_drm_encoder_get_mux_id(struct imx_drm_encoder *imx_drm_encoder,
- struct drm_crtc *crtc);
-int imx_drm_encoder_add_possible_crtcs(struct imx_drm_encoder *imx_drm_encoder,
- struct device_node *np);
+int imx_drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+void imx_drm_connector_destroy(struct drm_connector *connector);
+void imx_drm_encoder_destroy(struct drm_encoder *encoder);
#endif /* _IMX_DRM_H_ */
diff --git a/drivers/staging/imx-drm/imx-fb.c b/drivers/staging/imx-drm/imx-fb.c
deleted file mode 100644
index 03a7b4e14f67..000000000000
--- a/drivers/staging/imx-drm/imx-fb.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * i.MX drm driver
- *
- * Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * Based on Samsung Exynos code
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "imx-drm.h"
-
-static struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
-};
-
-void imx_drm_mode_config_init(struct drm_device *dev)
-{
- dev->mode_config.min_width = 64;
- dev->mode_config.min_height = 64;
-
- /*
- * set max width and height as default value(4096x4096).
- * this value would be used to check framebuffer size limitation
- * at drm_mode_addfb().
- */
- dev->mode_config.max_width = 4096;
- dev->mode_config.max_height = 4096;
-
- dev->mode_config.funcs = &imx_drm_mode_config_funcs;
-}
diff --git a/drivers/staging/imx-drm/imx-fbdev.c b/drivers/staging/imx-drm/imx-fbdev.c
deleted file mode 100644
index 8331739c3d08..000000000000
--- a/drivers/staging/imx-drm/imx-fbdev.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * i.MX drm driver
- *
- * Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * Based on Samsung Exynos code
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "imx-drm.h"
-
-#define MAX_CONNECTOR 4
-#define PREFERRED_BPP 16
-
-static struct drm_fbdev_cma *fbdev_cma;
-
-static int legacyfb_depth = 16;
-
-module_param(legacyfb_depth, int, 0444);
-
-static int __init imx_fb_helper_init(void)
-{
- struct drm_device *drm = imx_drm_device_get();
-
- if (!drm)
- return -EINVAL;
-
- if (legacyfb_depth != 16 && legacyfb_depth != 32) {
- pr_warn("i.MX legacyfb: invalid legacyfb_depth setting. defaulting to 16bpp\n");
- legacyfb_depth = 16;
- }
-
- fbdev_cma = drm_fbdev_cma_init(drm, legacyfb_depth,
- drm->mode_config.num_crtc, MAX_CONNECTOR);
-
- if (IS_ERR(fbdev_cma)) {
- imx_drm_device_put();
- return PTR_ERR(fbdev_cma);
- }
-
- imx_drm_fb_helper_set(fbdev_cma);
-
- return 0;
-}
-
-static void __exit imx_fb_helper_exit(void)
-{
- imx_drm_fb_helper_set(NULL);
- drm_fbdev_cma_fini(fbdev_cma);
- imx_drm_device_put();
-}
-
-late_initcall(imx_fb_helper_init);
-module_exit(imx_fb_helper_exit);
-
-MODULE_DESCRIPTION("Freescale i.MX legacy fb driver");
-MODULE_AUTHOR("Sascha Hauer, Pengutronix");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
index f3a1f5e2e492..d47dedd2cdb4 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -12,10 +12,12 @@
* Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*/
+#include <linux/component.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/hdmi.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
@@ -52,11 +54,6 @@ enum hdmi_datamap {
YCbCr422_12B = 0x12,
};
-enum hdmi_colorimetry {
- ITU601,
- ITU709,
-};
-
enum imx_hdmi_devtype {
IMX6Q_HDMI,
IMX6DL_HDMI,
@@ -116,15 +113,15 @@ struct hdmi_data_info {
struct imx_hdmi {
struct drm_connector connector;
- struct imx_drm_connector *imx_drm_connector;
struct drm_encoder encoder;
- struct imx_drm_encoder *imx_drm_encoder;
enum imx_hdmi_devtype dev_type;
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
+ enum drm_connector_status connector_status;
+
struct hdmi_data_info hdmi_data;
int vic;
@@ -138,7 +135,6 @@ struct imx_hdmi {
struct i2c_adapter *ddc;
void __iomem *regs;
- unsigned long pixel_clk_rate;
unsigned int sample_rate;
int ratio;
};
@@ -160,37 +156,34 @@ static inline u8 hdmi_readb(struct imx_hdmi *hdmi, int offset)
return readb(hdmi->regs + offset);
}
+static void hdmi_modb(struct imx_hdmi *hdmi, u8 data, u8 mask, unsigned reg)
+{
+ u8 val = hdmi_readb(hdmi, reg) & ~mask;
+ val |= data & mask;
+ hdmi_writeb(hdmi, val, reg);
+}
+
static void hdmi_mask_writeb(struct imx_hdmi *hdmi, u8 data, unsigned int reg,
u8 shift, u8 mask)
{
- u8 value = hdmi_readb(hdmi, reg) & ~mask;
- value |= (data << shift) & mask;
- hdmi_writeb(hdmi, value, reg);
+ hdmi_modb(hdmi, data << shift, mask, reg);
}
static void hdmi_set_clock_regenerator_n(struct imx_hdmi *hdmi,
unsigned int value)
{
- u8 val;
-
hdmi_writeb(hdmi, value & 0xff, HDMI_AUD_N1);
hdmi_writeb(hdmi, (value >> 8) & 0xff, HDMI_AUD_N2);
hdmi_writeb(hdmi, (value >> 16) & 0x0f, HDMI_AUD_N3);
/* nshift factor = 0 */
- val = hdmi_readb(hdmi, HDMI_AUD_CTS3);
- val &= ~HDMI_AUD_CTS3_N_SHIFT_MASK;
- hdmi_writeb(hdmi, val, HDMI_AUD_CTS3);
+ hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_N_SHIFT_MASK, HDMI_AUD_CTS3);
}
static void hdmi_regenerate_cts(struct imx_hdmi *hdmi, unsigned int cts)
{
- u8 val;
-
/* Must be set/cleared first */
- val = hdmi_readb(hdmi, HDMI_AUD_CTS3);
- val &= ~HDMI_AUD_CTS3_CTS_MANUAL;
- hdmi_writeb(hdmi, val, HDMI_AUD_CTS3);
+ hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1);
hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2);
@@ -335,34 +328,25 @@ static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
return (cts * ratio) / 100;
}
-static void hdmi_get_pixel_clk(struct imx_hdmi *hdmi)
-{
- unsigned long rate;
-
- rate = 65000000; /* FIXME */
-
- if (rate)
- hdmi->pixel_clk_rate = rate;
-}
-
-static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi)
+static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi,
+ unsigned long pixel_clk)
{
unsigned int clk_n, clk_cts;
- clk_n = hdmi_compute_n(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ clk_n = hdmi_compute_n(hdmi->sample_rate, pixel_clk,
hdmi->ratio);
- clk_cts = hdmi_compute_cts(hdmi->sample_rate, hdmi->pixel_clk_rate,
+ clk_cts = hdmi_compute_cts(hdmi->sample_rate, pixel_clk,
hdmi->ratio);
if (!clk_cts) {
dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
- __func__, hdmi->pixel_clk_rate);
+ __func__, pixel_clk);
return;
}
dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n",
__func__, hdmi->sample_rate, hdmi->ratio,
- hdmi->pixel_clk_rate, clk_n, clk_cts);
+ pixel_clk, clk_n, clk_cts);
hdmi_set_clock_regenerator_n(hdmi, clk_n);
hdmi_regenerate_cts(hdmi, clk_cts);
@@ -370,32 +354,12 @@ static void hdmi_set_clk_regenerator(struct imx_hdmi *hdmi)
static void hdmi_init_clk_regenerator(struct imx_hdmi *hdmi)
{
- unsigned int clk_n, clk_cts;
-
- clk_n = hdmi_compute_n(hdmi->sample_rate, hdmi->pixel_clk_rate,
- hdmi->ratio);
- clk_cts = hdmi_compute_cts(hdmi->sample_rate, hdmi->pixel_clk_rate,
- hdmi->ratio);
-
- if (!clk_cts) {
- dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
- __func__, hdmi->pixel_clk_rate);
- return;
- }
-
- dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n",
- __func__, hdmi->sample_rate, hdmi->ratio,
- hdmi->pixel_clk_rate, clk_n, clk_cts);
-
- hdmi_set_clock_regenerator_n(hdmi, clk_n);
- hdmi_regenerate_cts(hdmi, clk_cts);
+ hdmi_set_clk_regenerator(hdmi, 74250000);
}
static void hdmi_clk_regenerator_update_pixel_clock(struct imx_hdmi *hdmi)
{
- /* Get pixel clock from ipu */
- hdmi_get_pixel_clk(hdmi);
- hdmi_set_clk_regenerator(hdmi);
+ hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
}
/*
@@ -463,38 +427,45 @@ static void hdmi_video_sample(struct imx_hdmi *hdmi)
static int is_color_space_conversion(struct imx_hdmi *hdmi)
{
- return (hdmi->hdmi_data.enc_in_format !=
- hdmi->hdmi_data.enc_out_format);
+ return hdmi->hdmi_data.enc_in_format != hdmi->hdmi_data.enc_out_format;
}
static int is_color_space_decimation(struct imx_hdmi *hdmi)
{
- return ((hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) &&
- (hdmi->hdmi_data.enc_in_format == RGB ||
- hdmi->hdmi_data.enc_in_format == YCBCR444));
+ if (hdmi->hdmi_data.enc_out_format != YCBCR422_8BITS)
+ return 0;
+ if (hdmi->hdmi_data.enc_in_format == RGB ||
+ hdmi->hdmi_data.enc_in_format == YCBCR444)
+ return 1;
+ return 0;
}
static int is_color_space_interpolation(struct imx_hdmi *hdmi)
{
- return ((hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) &&
- (hdmi->hdmi_data.enc_out_format == RGB ||
- hdmi->hdmi_data.enc_out_format == YCBCR444));
+ if (hdmi->hdmi_data.enc_in_format != YCBCR422_8BITS)
+ return 0;
+ if (hdmi->hdmi_data.enc_out_format == RGB ||
+ hdmi->hdmi_data.enc_out_format == YCBCR444)
+ return 1;
+ return 0;
}
static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
{
const u16 (*csc_coeff)[3][4] = &csc_coeff_default;
+ unsigned i;
u32 csc_scale = 1;
- u8 val;
if (is_color_space_conversion(hdmi)) {
if (hdmi->hdmi_data.enc_out_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry ==
+ HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_out_eitu601;
else
csc_coeff = &csc_coeff_rgb_out_eitu709;
} else if (hdmi->hdmi_data.enc_in_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry ==
+ HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_in_eitu601;
else
csc_coeff = &csc_coeff_rgb_in_eitu709;
@@ -502,37 +473,24 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
}
}
- hdmi_writeb(hdmi, ((*csc_coeff)[0][0] & 0xff), HDMI_CSC_COEF_A1_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][0] >> 8), HDMI_CSC_COEF_A1_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][1] & 0xff), HDMI_CSC_COEF_A2_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][1] >> 8), HDMI_CSC_COEF_A2_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][2] & 0xff), HDMI_CSC_COEF_A3_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][2] >> 8), HDMI_CSC_COEF_A3_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][3] & 0xff), HDMI_CSC_COEF_A4_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[0][3] >> 8), HDMI_CSC_COEF_A4_MSB);
-
- hdmi_writeb(hdmi, ((*csc_coeff)[1][0] & 0xff), HDMI_CSC_COEF_B1_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][0] >> 8), HDMI_CSC_COEF_B1_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][1] & 0xff), HDMI_CSC_COEF_B2_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][1] >> 8), HDMI_CSC_COEF_B2_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][2] & 0xff), HDMI_CSC_COEF_B3_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][2] >> 8), HDMI_CSC_COEF_B3_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][3] & 0xff), HDMI_CSC_COEF_B4_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[1][3] >> 8), HDMI_CSC_COEF_B4_MSB);
-
- hdmi_writeb(hdmi, ((*csc_coeff)[2][0] & 0xff), HDMI_CSC_COEF_C1_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][0] >> 8), HDMI_CSC_COEF_C1_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][1] & 0xff), HDMI_CSC_COEF_C2_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][1] >> 8), HDMI_CSC_COEF_C2_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][2] & 0xff), HDMI_CSC_COEF_C3_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][2] >> 8), HDMI_CSC_COEF_C3_MSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][3] & 0xff), HDMI_CSC_COEF_C4_LSB);
- hdmi_writeb(hdmi, ((*csc_coeff)[2][3] >> 8), HDMI_CSC_COEF_C4_MSB);
-
- val = hdmi_readb(hdmi, HDMI_CSC_SCALE);
- val &= ~HDMI_CSC_SCALE_CSCSCALE_MASK;
- val |= csc_scale & HDMI_CSC_SCALE_CSCSCALE_MASK;
- hdmi_writeb(hdmi, val, HDMI_CSC_SCALE);
+ /* The CSC registers are sequential, alternating MSB then LSB */
+ for (i = 0; i < ARRAY_SIZE(csc_coeff_default[0]); i++) {
+ u16 coeff_a = (*csc_coeff)[0][i];
+ u16 coeff_b = (*csc_coeff)[1][i];
+ u16 coeff_c = (*csc_coeff)[2][i];
+
+ hdmi_writeb(hdmi, coeff_a & 0xff,
+ HDMI_CSC_COEF_A1_LSB + i * 2);
+ hdmi_writeb(hdmi, coeff_a >> 8, HDMI_CSC_COEF_A1_MSB + i * 2);
+ hdmi_writeb(hdmi, coeff_b & 0xff, HDMI_CSC_COEF_B1_LSB + i * 2);
+ hdmi_writeb(hdmi, coeff_b >> 8, HDMI_CSC_COEF_B1_MSB + i * 2);
+ hdmi_writeb(hdmi, coeff_c & 0xff,
+ HDMI_CSC_COEF_C1_LSB + i * 2);
+ hdmi_writeb(hdmi, coeff_c >> 8, HDMI_CSC_COEF_C1_MSB + i * 2);
+ }
+
+ hdmi_modb(hdmi, csc_scale, HDMI_CSC_SCALE_CSCSCALE_MASK,
+ HDMI_CSC_SCALE);
}
static void hdmi_video_csc(struct imx_hdmi *hdmi)
@@ -540,7 +498,6 @@ static void hdmi_video_csc(struct imx_hdmi *hdmi)
int color_depth = 0;
int interpolation = HDMI_CSC_CFG_INTMODE_DISABLE;
int decimation = 0;
- u8 val;
/* YCC422 interpolation to 444 mode */
if (is_color_space_interpolation(hdmi))
@@ -561,10 +518,8 @@ static void hdmi_video_csc(struct imx_hdmi *hdmi)
/* Configure the CSC registers */
hdmi_writeb(hdmi, interpolation | decimation, HDMI_CSC_CFG);
- val = hdmi_readb(hdmi, HDMI_CSC_SCALE);
- val &= ~HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK;
- val |= color_depth;
- hdmi_writeb(hdmi, val, HDMI_CSC_SCALE);
+ hdmi_modb(hdmi, color_depth, HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK,
+ HDMI_CSC_SCALE);
imx_hdmi_update_csc_coeffs(hdmi);
}
@@ -580,7 +535,7 @@ static void hdmi_video_packetize(struct imx_hdmi *hdmi)
unsigned int remap_size = HDMI_VP_REMAP_YCC422_16bit;
unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP;
struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
- u8 val;
+ u8 val, vp_conf;
if (hdmi_data->enc_out_format == RGB
|| hdmi_data->enc_out_format == YCBCR444) {
@@ -619,107 +574,75 @@ static void hdmi_video_packetize(struct imx_hdmi *hdmi)
HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK);
hdmi_writeb(hdmi, val, HDMI_VP_PR_CD);
- val = hdmi_readb(hdmi, HDMI_VP_STUFF);
- val &= ~HDMI_VP_STUFF_PR_STUFFING_MASK;
- val |= HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE;
- hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+ hdmi_modb(hdmi, HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE,
+ HDMI_VP_STUFF_PR_STUFFING_MASK, HDMI_VP_STUFF);
/* Data from pixel repeater block */
if (hdmi_data->pix_repet_factor > 1) {
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~(HDMI_VP_CONF_PR_EN_MASK |
- HDMI_VP_CONF_BYPASS_SELECT_MASK);
- val |= HDMI_VP_CONF_PR_EN_ENABLE |
- HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ vp_conf = HDMI_VP_CONF_PR_EN_ENABLE |
+ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER;
} else { /* data from packetizer block */
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~(HDMI_VP_CONF_PR_EN_MASK |
- HDMI_VP_CONF_BYPASS_SELECT_MASK);
- val |= HDMI_VP_CONF_PR_EN_DISABLE |
- HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ vp_conf = HDMI_VP_CONF_PR_EN_DISABLE |
+ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER;
}
- val = hdmi_readb(hdmi, HDMI_VP_STUFF);
- val &= ~HDMI_VP_STUFF_IDEFAULT_PHASE_MASK;
- val |= 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET;
- hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+ hdmi_modb(hdmi, vp_conf,
+ HDMI_VP_CONF_PR_EN_MASK |
+ HDMI_VP_CONF_BYPASS_SELECT_MASK, HDMI_VP_CONF);
+
+ hdmi_modb(hdmi, 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET,
+ HDMI_VP_STUFF_IDEFAULT_PHASE_MASK, HDMI_VP_STUFF);
hdmi_writeb(hdmi, remap_size, HDMI_VP_REMAP);
if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_PP) {
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
- HDMI_VP_CONF_PP_EN_ENMASK |
- HDMI_VP_CONF_YCC422_EN_MASK);
- val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
- HDMI_VP_CONF_PP_EN_ENABLE |
- HDMI_VP_CONF_YCC422_EN_DISABLE;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ vp_conf = HDMI_VP_CONF_BYPASS_EN_DISABLE |
+ HDMI_VP_CONF_PP_EN_ENABLE |
+ HDMI_VP_CONF_YCC422_EN_DISABLE;
} else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422) {
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
- HDMI_VP_CONF_PP_EN_ENMASK |
- HDMI_VP_CONF_YCC422_EN_MASK);
- val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
- HDMI_VP_CONF_PP_EN_DISABLE |
- HDMI_VP_CONF_YCC422_EN_ENABLE;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ vp_conf = HDMI_VP_CONF_BYPASS_EN_DISABLE |
+ HDMI_VP_CONF_PP_EN_DISABLE |
+ HDMI_VP_CONF_YCC422_EN_ENABLE;
} else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS) {
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
- HDMI_VP_CONF_PP_EN_ENMASK |
- HDMI_VP_CONF_YCC422_EN_MASK);
- val |= HDMI_VP_CONF_BYPASS_EN_ENABLE |
- HDMI_VP_CONF_PP_EN_DISABLE |
- HDMI_VP_CONF_YCC422_EN_DISABLE;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ vp_conf = HDMI_VP_CONF_BYPASS_EN_ENABLE |
+ HDMI_VP_CONF_PP_EN_DISABLE |
+ HDMI_VP_CONF_YCC422_EN_DISABLE;
} else {
return;
}
- val = hdmi_readb(hdmi, HDMI_VP_STUFF);
- val &= ~(HDMI_VP_STUFF_PP_STUFFING_MASK |
- HDMI_VP_STUFF_YCC422_STUFFING_MASK);
- val |= HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE |
- HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE;
- hdmi_writeb(hdmi, val, HDMI_VP_STUFF);
+ hdmi_modb(hdmi, vp_conf,
+ HDMI_VP_CONF_BYPASS_EN_MASK | HDMI_VP_CONF_PP_EN_ENMASK |
+ HDMI_VP_CONF_YCC422_EN_MASK, HDMI_VP_CONF);
+
+ hdmi_modb(hdmi, HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE |
+ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE,
+ HDMI_VP_STUFF_PP_STUFFING_MASK |
+ HDMI_VP_STUFF_YCC422_STUFFING_MASK, HDMI_VP_STUFF);
- val = hdmi_readb(hdmi, HDMI_VP_CONF);
- val &= ~HDMI_VP_CONF_OUTPUT_SELECTOR_MASK;
- val |= output_select;
- hdmi_writeb(hdmi, val, HDMI_VP_CONF);
+ hdmi_modb(hdmi, output_select, HDMI_VP_CONF_OUTPUT_SELECTOR_MASK,
+ HDMI_VP_CONF);
}
static inline void hdmi_phy_test_clear(struct imx_hdmi *hdmi,
unsigned char bit)
{
- u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
- val &= ~HDMI_PHY_TST0_TSTCLR_MASK;
- val |= (bit << HDMI_PHY_TST0_TSTCLR_OFFSET) &
- HDMI_PHY_TST0_TSTCLR_MASK;
- hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+ hdmi_modb(hdmi, bit << HDMI_PHY_TST0_TSTCLR_OFFSET,
+ HDMI_PHY_TST0_TSTCLR_MASK, HDMI_PHY_TST0);
}
static inline void hdmi_phy_test_enable(struct imx_hdmi *hdmi,
unsigned char bit)
{
- u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
- val &= ~HDMI_PHY_TST0_TSTEN_MASK;
- val |= (bit << HDMI_PHY_TST0_TSTEN_OFFSET) &
- HDMI_PHY_TST0_TSTEN_MASK;
- hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+ hdmi_modb(hdmi, bit << HDMI_PHY_TST0_TSTEN_OFFSET,
+ HDMI_PHY_TST0_TSTEN_MASK, HDMI_PHY_TST0);
}
static inline void hdmi_phy_test_clock(struct imx_hdmi *hdmi,
unsigned char bit)
{
- u8 val = hdmi_readb(hdmi, HDMI_PHY_TST0);
- val &= ~HDMI_PHY_TST0_TSTCLK_MASK;
- val |= (bit << HDMI_PHY_TST0_TSTCLK_OFFSET) &
- HDMI_PHY_TST0_TSTCLK_MASK;
- hdmi_writeb(hdmi, val, HDMI_PHY_TST0);
+ hdmi_modb(hdmi, bit << HDMI_PHY_TST0_TSTCLK_OFFSET,
+ HDMI_PHY_TST0_TSTCLK_MASK, HDMI_PHY_TST0);
}
static inline void hdmi_phy_test_din(struct imx_hdmi *hdmi,
@@ -810,19 +733,94 @@ static void imx_hdmi_phy_sel_interface_control(struct imx_hdmi *hdmi, u8 enable)
HDMI_PHY_CONF0_SELDIPIF_MASK);
}
+enum {
+ RES_8,
+ RES_10,
+ RES_12,
+ RES_MAX,
+};
+
+struct mpll_config {
+ unsigned long mpixelclock;
+ struct {
+ u16 cpce;
+ u16 gmp;
+ } res[RES_MAX];
+};
+
+static const struct mpll_config mpll_config[] = {
+ {
+ 45250000, {
+ { 0x01e0, 0x0000 },
+ { 0x21e1, 0x0000 },
+ { 0x41e2, 0x0000 }
+ },
+ }, {
+ 92500000, {
+ { 0x0140, 0x0005 },
+ { 0x2141, 0x0005 },
+ { 0x4142, 0x0005 },
+ },
+ }, {
+ 148500000, {
+ { 0x00a0, 0x000a },
+ { 0x20a1, 0x000a },
+ { 0x40a2, 0x000a },
+ },
+ }, {
+ ~0UL, {
+ { 0x00a0, 0x000a },
+ { 0x2001, 0x000f },
+ { 0x4002, 0x000f },
+ },
+ }
+};
+
+struct curr_ctrl {
+ unsigned long mpixelclock;
+ u16 curr[RES_MAX];
+};
+
+static const struct curr_ctrl curr_ctrl[] = {
+ /* pixelclk bpp8 bpp10 bpp12 */
+ {
+ 54000000, { 0x091c, 0x091c, 0x06dc },
+ }, {
+ 58400000, { 0x091c, 0x06dc, 0x06dc },
+ }, {
+ 72000000, { 0x06dc, 0x06dc, 0x091c },
+ }, {
+ 74250000, { 0x06dc, 0x0b5c, 0x091c },
+ }, {
+ 118800000, { 0x091c, 0x091c, 0x06dc },
+ }, {
+ 216000000, { 0x06dc, 0x0b5c, 0x091c },
+ }
+};
+
static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
unsigned char res, int cscon)
{
+ unsigned res_idx, i;
u8 val, msec;
- /* color resolution 0 is 8 bit colour depth */
- if (!res)
- res = 8;
-
if (prep)
return -EINVAL;
- else if (res != 8 && res != 12)
+
+ switch (res) {
+ case 0: /* color resolution 0 is 8 bit colour depth */
+ case 8:
+ res_idx = RES_8;
+ break;
+ case 10:
+ res_idx = RES_10;
+ break;
+ case 12:
+ res_idx = RES_12;
+ break;
+ default:
return -EINVAL;
+ }
/* Enable csc path */
if (cscon)
@@ -849,165 +847,30 @@ static int hdmi_phy_configure(struct imx_hdmi *hdmi, unsigned char prep,
HDMI_PHY_I2CM_SLAVE_ADDR);
hdmi_phy_test_clear(hdmi, 0);
- if (hdmi->hdmi_data.video_mode.mpixelclock <= 45250000) {
- switch (res) {
- case 8:
- /* PLL/MPLL Cfg */
- hdmi_phy_i2c_write(hdmi, 0x01e0, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0000, 0x15); /* GMPCTRL */
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x21e1, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x41e2, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
- break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 92500000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x0140, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x2141, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
+ /* PLL/MPLL Cfg - always match on final entry */
+ for (i = 0; i < ARRAY_SIZE(mpll_config) - 1; i++)
+ if (hdmi->hdmi_data.video_mode.mpixelclock <=
+ mpll_config[i].mpixelclock)
break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x4142, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 148500000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x20a1, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x40a2, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
- default:
- return -EINVAL;
- }
- } else {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x2001, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x4002, 0x06);
- hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
- default:
- return -EINVAL;
- }
- }
- if (hdmi->hdmi_data.video_mode.mpixelclock <= 54000000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); /* CURRCTRL */
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 58400000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 72000000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 74250000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 118800000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- default:
- return -EINVAL;
- }
- } else if (hdmi->hdmi_data.video_mode.mpixelclock <= 216000000) {
- switch (res) {
- case 8:
- hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
- break;
- case 10:
- hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
- break;
- case 12:
- hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
+ hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].cpce, 0x06);
+ hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].gmp, 0x15);
+
+ for (i = 0; i < ARRAY_SIZE(curr_ctrl); i++)
+ if (hdmi->hdmi_data.video_mode.mpixelclock <=
+ curr_ctrl[i].mpixelclock)
break;
- default:
- return -EINVAL;
- }
- } else {
+
+ if (i >= ARRAY_SIZE(curr_ctrl)) {
dev_err(hdmi->dev,
"Pixel clock %d - unsupported by HDMI\n",
hdmi->hdmi_data.video_mode.mpixelclock);
return -EINVAL;
}
+ /* CURRCTRL */
+ hdmi_phy_i2c_write(hdmi, curr_ctrl[i].curr[res_idx], 0x10);
+
hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
/* RESISTANCE TERM 133Ohm Cfg */
@@ -1076,7 +939,7 @@ static int imx_hdmi_phy_init(struct imx_hdmi *hdmi)
static void hdmi_tx_hdcp_config(struct imx_hdmi *hdmi)
{
- u8 de, val;
+ u8 de;
if (hdmi->hdmi_data.video_mode.mdataenablepolarity)
de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH;
@@ -1084,20 +947,13 @@ static void hdmi_tx_hdcp_config(struct imx_hdmi *hdmi)
de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW;
/* disable rx detect */
- val = hdmi_readb(hdmi, HDMI_A_HDCPCFG0);
- val &= HDMI_A_HDCPCFG0_RXDETECT_MASK;
- val |= HDMI_A_HDCPCFG0_RXDETECT_DISABLE;
- hdmi_writeb(hdmi, val, HDMI_A_HDCPCFG0);
+ hdmi_modb(hdmi, HDMI_A_HDCPCFG0_RXDETECT_DISABLE,
+ HDMI_A_HDCPCFG0_RXDETECT_MASK, HDMI_A_HDCPCFG0);
- val = hdmi_readb(hdmi, HDMI_A_VIDPOLCFG);
- val &= HDMI_A_VIDPOLCFG_DATAENPOL_MASK;
- val |= de;
- hdmi_writeb(hdmi, val, HDMI_A_VIDPOLCFG);
+ hdmi_modb(hdmi, de, HDMI_A_VIDPOLCFG_DATAENPOL_MASK, HDMI_A_VIDPOLCFG);
- val = hdmi_readb(hdmi, HDMI_A_HDCPCFG1);
- val &= HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK;
- val |= HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE;
- hdmi_writeb(hdmi, val, HDMI_A_HDCPCFG1);
+ hdmi_modb(hdmi, HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE,
+ HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK, HDMI_A_HDCPCFG1);
}
static void hdmi_config_AVI(struct imx_hdmi *hdmi)
@@ -1140,16 +996,16 @@ static void hdmi_config_AVI(struct imx_hdmi *hdmi)
/* Set up colorimetry */
if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
ext_colorimetry =
HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
- else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
ext_colorimetry =
HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
} else if (hdmi->hdmi_data.enc_out_format != RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
- else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
} else { /* Carries no data */
@@ -1321,11 +1177,7 @@ static void imx_hdmi_enable_video_path(struct imx_hdmi *hdmi)
static void hdmi_enable_audio_clk(struct imx_hdmi *hdmi)
{
- u8 clkdis;
-
- clkdis = hdmi_readb(hdmi, HDMI_MC_CLKDIS);
- clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
- hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+ hdmi_modb(hdmi, 0, HDMI_MC_CLKDIS_AUDCLK_DISABLE, HDMI_MC_CLKDIS);
}
/* Workaround to clear the overflow condition */
@@ -1379,9 +1231,9 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
(hdmi->vic == 21) || (hdmi->vic == 22) ||
(hdmi->vic == 2) || (hdmi->vic == 3) ||
(hdmi->vic == 17) || (hdmi->vic == 18))
- hdmi->hdmi_data.colorimetry = ITU601;
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
else
- hdmi->hdmi_data.colorimetry = ITU709;
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
(hdmi->vic == 12) || (hdmi->vic == 13) ||
@@ -1460,9 +1312,6 @@ static int imx_hdmi_fb_registered(struct imx_hdmi *hdmi)
/* Clear Hotplug interrupts */
hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
- /* Unmute interrupts */
- hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
-
return 0;
}
@@ -1531,12 +1380,9 @@ static void imx_hdmi_poweroff(struct imx_hdmi *hdmi)
static enum drm_connector_status imx_hdmi_connector_detect(struct drm_connector
*connector, bool force)
{
- /* FIXME */
- return connector_status_connected;
-}
-
-static void imx_hdmi_connector_destroy(struct drm_connector *connector)
-{
+ struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
+ connector);
+ return hdmi->connector_status;
}
static int imx_hdmi_connector_get_modes(struct drm_connector *connector)
@@ -1564,13 +1410,6 @@ static int imx_hdmi_connector_get_modes(struct drm_connector *connector)
return 0;
}
-static int imx_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
-
- return MODE_OK;
-}
-
static struct drm_encoder *imx_hdmi_connector_best_encoder(struct drm_connector
*connector)
{
@@ -1618,28 +1457,21 @@ static void imx_hdmi_encoder_prepare(struct drm_encoder *encoder)
struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
imx_hdmi_poweroff(hdmi);
- imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_NONE,
- V4L2_PIX_FMT_RGB24);
+ imx_drm_panel_format(encoder, V4L2_PIX_FMT_RGB24);
}
static void imx_hdmi_encoder_commit(struct drm_encoder *encoder)
{
struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
- int mux = imx_drm_encoder_get_mux_id(hdmi->imx_drm_encoder,
- encoder->crtc);
+ int mux = imx_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder);
imx_hdmi_set_ipu_di_mux(hdmi, mux);
imx_hdmi_poweron(hdmi);
}
-static void imx_hdmi_encoder_destroy(struct drm_encoder *encoder)
-{
- return;
-}
-
static struct drm_encoder_funcs imx_hdmi_encoder_funcs = {
- .destroy = imx_hdmi_encoder_destroy,
+ .destroy = imx_drm_encoder_destroy,
};
static struct drm_encoder_helper_funcs imx_hdmi_encoder_helper_funcs = {
@@ -1655,21 +1487,32 @@ static struct drm_connector_funcs imx_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_hdmi_connector_detect,
- .destroy = imx_hdmi_connector_destroy,
+ .destroy = imx_drm_connector_destroy,
};
static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = {
.get_modes = imx_hdmi_connector_get_modes,
- .mode_valid = imx_hdmi_connector_mode_valid,
+ .mode_valid = imx_drm_connector_mode_valid,
.best_encoder = imx_hdmi_connector_best_encoder,
};
+static irqreturn_t imx_hdmi_hardirq(int irq, void *dev_id)
+{
+ struct imx_hdmi *hdmi = dev_id;
+ u8 intr_stat;
+
+ intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
+ if (intr_stat)
+ hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
+
+ return intr_stat ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
static irqreturn_t imx_hdmi_irq(int irq, void *dev_id)
{
struct imx_hdmi *hdmi = dev_id;
u8 intr_stat;
u8 phy_int_pol;
- u8 val;
intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
@@ -1679,55 +1522,47 @@ static irqreturn_t imx_hdmi_irq(int irq, void *dev_id)
if (phy_int_pol & HDMI_PHY_HPD) {
dev_dbg(hdmi->dev, "EVENT=plugin\n");
- val = hdmi_readb(hdmi, HDMI_PHY_POL0);
- val &= ~HDMI_PHY_HPD;
- hdmi_writeb(hdmi, val, HDMI_PHY_POL0);
+ hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0);
+ hdmi->connector_status = connector_status_connected;
imx_hdmi_poweron(hdmi);
} else {
dev_dbg(hdmi->dev, "EVENT=plugout\n");
- val = hdmi_readb(hdmi, HDMI_PHY_POL0);
- val |= HDMI_PHY_HPD;
- hdmi_writeb(hdmi, val, HDMI_PHY_POL0);
+ hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD,
+ HDMI_PHY_POL0);
+ hdmi->connector_status = connector_status_disconnected;
imx_hdmi_poweroff(hdmi);
}
+ drm_helper_hpd_irq_event(hdmi->connector.dev);
}
hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
+ hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
return IRQ_HANDLED;
}
-static int imx_hdmi_register(struct imx_hdmi *hdmi)
+static int imx_hdmi_register(struct drm_device *drm, struct imx_hdmi *hdmi)
{
int ret;
- hdmi->connector.funcs = &imx_hdmi_connector_funcs;
- hdmi->encoder.funcs = &imx_hdmi_encoder_funcs;
+ ret = imx_drm_encoder_parse_of(drm, &hdmi->encoder,
+ hdmi->dev->of_node);
+ if (ret)
+ return ret;
- hdmi->encoder.encoder_type = DRM_MODE_ENCODER_TMDS;
- hdmi->connector.connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
drm_encoder_helper_add(&hdmi->encoder, &imx_hdmi_encoder_helper_funcs);
- ret = imx_drm_add_encoder(&hdmi->encoder, &hdmi->imx_drm_encoder,
- THIS_MODULE);
- if (ret) {
- dev_err(hdmi->dev, "adding encoder failed: %d\n", ret);
- return ret;
- }
+ drm_encoder_init(drm, &hdmi->encoder, &imx_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
drm_connector_helper_add(&hdmi->connector,
&imx_hdmi_connector_helper_funcs);
-
- ret = imx_drm_add_connector(&hdmi->connector,
- &hdmi->imx_drm_connector, THIS_MODULE);
- if (ret) {
- imx_drm_remove_encoder(hdmi->imx_drm_encoder);
- dev_err(hdmi->dev, "adding connector failed: %d\n", ret);
- return ret;
- }
+ drm_connector_init(drm, &hdmi->connector, &imx_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
hdmi->connector.encoder = &hdmi->encoder;
@@ -1754,28 +1589,33 @@ static const struct of_device_id imx_hdmi_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
-static int imx_hdmi_platform_probe(struct platform_device *pdev)
+static int imx_hdmi_bind(struct device *dev, struct device *master, void *data)
{
+ struct platform_device *pdev = to_platform_device(dev);
const struct of_device_id *of_id =
- of_match_device(imx_hdmi_dt_ids, &pdev->dev);
- struct device_node *np = pdev->dev.of_node;
+ of_match_device(imx_hdmi_dt_ids, dev);
+ struct drm_device *drm = data;
+ struct device_node *np = dev->of_node;
struct device_node *ddc_node;
struct imx_hdmi *hdmi;
struct resource *iores;
int ret, irq;
- hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
- hdmi->dev = &pdev->dev;
+ hdmi->dev = dev;
+ hdmi->connector_status = connector_status_disconnected;
+ hdmi->sample_rate = 48000;
+ hdmi->ratio = 100;
if (of_id) {
const struct platform_device_id *device_id = of_id->data;
hdmi->dev_type = device_id->driver_data;
}
- ddc_node = of_parse_phandle(np, "ddc", 0);
+ ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
if (ddc_node) {
hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
if (!hdmi->ddc)
@@ -1790,13 +1630,14 @@ static int imx_hdmi_platform_probe(struct platform_device *pdev)
if (irq < 0)
return -EINVAL;
- ret = devm_request_irq(&pdev->dev, irq, imx_hdmi_irq, 0,
- dev_name(&pdev->dev), hdmi);
+ ret = devm_request_threaded_irq(dev, irq, imx_hdmi_hardirq,
+ imx_hdmi_irq, IRQF_SHARED,
+ dev_name(dev), hdmi);
if (ret)
return ret;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->regs = devm_ioremap_resource(&pdev->dev, iores);
+ hdmi->regs = devm_ioremap_resource(dev, iores);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
@@ -1835,7 +1676,7 @@ static int imx_hdmi_platform_probe(struct platform_device *pdev)
}
/* Product and revision IDs */
- dev_info(&pdev->dev,
+ dev_info(dev,
"Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n",
hdmi_readb(hdmi, HDMI_DESIGN_ID),
hdmi_readb(hdmi, HDMI_REVISION_ID),
@@ -1863,13 +1704,14 @@ static int imx_hdmi_platform_probe(struct platform_device *pdev)
if (ret)
goto err_iahb;
- ret = imx_hdmi_register(hdmi);
+ ret = imx_hdmi_register(drm, hdmi);
if (ret)
goto err_iahb;
- imx_drm_encoder_add_possible_crtcs(hdmi->imx_drm_encoder, np);
+ /* Unmute interrupts */
+ hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
- platform_set_drvdata(pdev, hdmi);
+ dev_set_drvdata(dev, hdmi);
return 0;
@@ -1881,20 +1723,35 @@ err_isfr:
return ret;
}
-static int imx_hdmi_platform_remove(struct platform_device *pdev)
+static void imx_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
{
- struct imx_hdmi *hdmi = platform_get_drvdata(pdev);
- struct drm_connector *connector = &hdmi->connector;
- struct drm_encoder *encoder = &hdmi->encoder;
+ struct imx_hdmi *hdmi = dev_get_drvdata(dev);
- drm_mode_connector_detach_encoder(connector, encoder);
- imx_drm_remove_connector(hdmi->imx_drm_connector);
- imx_drm_remove_encoder(hdmi->imx_drm_encoder);
+ /* Disable all interrupts */
+ hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
+
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
i2c_put_adapter(hdmi->ddc);
+}
+
+static const struct component_ops hdmi_ops = {
+ .bind = imx_hdmi_bind,
+ .unbind = imx_hdmi_unbind,
+};
+
+static int imx_hdmi_platform_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &hdmi_ops);
+}
+static int imx_hdmi_platform_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &hdmi_ops);
return 0;
}
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c
index 7e593296ac47..fe4c1ef4e7a5 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/staging/imx-drm/imx-ldb.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/clk.h>
+#include <linux/component.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
@@ -58,9 +59,8 @@ struct imx_ldb;
struct imx_ldb_channel {
struct imx_ldb *ldb;
struct drm_connector connector;
- struct imx_drm_connector *imx_drm_connector;
struct drm_encoder encoder;
- struct imx_drm_encoder *imx_drm_encoder;
+ struct device_node *child;
int chno;
void *edid;
int edid_len;
@@ -91,11 +91,6 @@ static enum drm_connector_status imx_ldb_connector_detect(
return connector_status_connected;
}
-static void imx_ldb_connector_destroy(struct drm_connector *connector)
-{
- /* do not free here */
-}
-
static int imx_ldb_connector_get_modes(struct drm_connector *connector)
{
struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
@@ -111,6 +106,8 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return -EINVAL;
drm_mode_copy(mode, &imx_ldb_ch->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
@@ -120,12 +117,6 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
return num_modes;
}
-static int imx_ldb_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return 0;
-}
-
static struct drm_encoder *imx_ldb_connector_best_encoder(
struct drm_connector *connector)
{
@@ -168,7 +159,9 @@ static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
/* set display clock mux to LDB input clock */
ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk[chno]);
if (ret)
- dev_err(ldb->dev, "unable to set di%d parent clock to ldb_di%d\n", mux, chno);
+ dev_err(ldb->dev,
+ "unable to set di%d parent clock to ldb_di%d\n", mux,
+ chno);
}
static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
@@ -179,8 +172,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
u32 pixel_fmt;
unsigned long serial_clk;
unsigned long di_clk = mode->clock * 1000;
- int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->imx_drm_encoder,
- encoder->crtc);
+ int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
/* dual channel LVDS mode */
@@ -189,7 +181,8 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
} else {
serial_clk = 7000UL * mode->clock;
- imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, di_clk);
+ imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
+ di_clk);
}
switch (imx_ldb_ch->chno) {
@@ -207,8 +200,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
pixel_fmt = V4L2_PIX_FMT_RGB24;
}
- imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_LVDS,
- pixel_fmt);
+ imx_drm_panel_format(encoder, pixel_fmt);
}
static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
@@ -216,8 +208,7 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
- int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->imx_drm_encoder,
- encoder->crtc);
+ int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
if (dual) {
clk_prepare_enable(ldb->clk[0]);
@@ -316,26 +307,21 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
}
}
-static void imx_ldb_encoder_destroy(struct drm_encoder *encoder)
-{
- /* do not free here */
-}
-
static struct drm_connector_funcs imx_ldb_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_ldb_connector_detect,
- .destroy = imx_ldb_connector_destroy,
+ .destroy = imx_drm_connector_destroy,
};
static struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
.get_modes = imx_ldb_connector_get_modes,
.best_encoder = imx_ldb_connector_best_encoder,
- .mode_valid = imx_ldb_connector_mode_valid,
+ .mode_valid = imx_drm_connector_mode_valid,
};
static struct drm_encoder_funcs imx_ldb_encoder_funcs = {
- .destroy = imx_ldb_encoder_destroy,
+ .destroy = imx_drm_encoder_destroy,
};
static struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
@@ -351,56 +337,47 @@ static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
{
char clkname[16];
- sprintf(clkname, "di%d", chno);
+ snprintf(clkname, sizeof(clkname), "di%d", chno);
ldb->clk[chno] = devm_clk_get(ldb->dev, clkname);
if (IS_ERR(ldb->clk[chno]))
return PTR_ERR(ldb->clk[chno]);
- sprintf(clkname, "di%d_pll", chno);
+ snprintf(clkname, sizeof(clkname), "di%d_pll", chno);
ldb->clk_pll[chno] = devm_clk_get(ldb->dev, clkname);
return PTR_ERR_OR_ZERO(ldb->clk_pll[chno]);
}
-static int imx_ldb_register(struct imx_ldb_channel *imx_ldb_ch)
+static int imx_ldb_register(struct drm_device *drm,
+ struct imx_ldb_channel *imx_ldb_ch)
{
- int ret;
struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int ret;
+
+ ret = imx_drm_encoder_parse_of(drm, &imx_ldb_ch->encoder,
+ imx_ldb_ch->child);
+ if (ret)
+ return ret;
ret = imx_ldb_get_clk(ldb, imx_ldb_ch->chno);
if (ret)
return ret;
+
if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
- ret |= imx_ldb_get_clk(ldb, 1);
+ ret = imx_ldb_get_clk(ldb, 1);
if (ret)
return ret;
}
- imx_ldb_ch->connector.funcs = &imx_ldb_connector_funcs;
- imx_ldb_ch->encoder.funcs = &imx_ldb_encoder_funcs;
-
- imx_ldb_ch->encoder.encoder_type = DRM_MODE_ENCODER_LVDS;
- imx_ldb_ch->connector.connector_type = DRM_MODE_CONNECTOR_LVDS;
-
drm_encoder_helper_add(&imx_ldb_ch->encoder,
&imx_ldb_encoder_helper_funcs);
- ret = imx_drm_add_encoder(&imx_ldb_ch->encoder,
- &imx_ldb_ch->imx_drm_encoder, THIS_MODULE);
- if (ret) {
- dev_err(ldb->dev, "adding encoder failed with %d\n", ret);
- return ret;
- }
+ drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
drm_connector_helper_add(&imx_ldb_ch->connector,
&imx_ldb_connector_helper_funcs);
-
- ret = imx_drm_add_connector(&imx_ldb_ch->connector,
- &imx_ldb_ch->imx_drm_connector, THIS_MODULE);
- if (ret) {
- imx_drm_remove_encoder(imx_ldb_ch->imx_drm_encoder);
- dev_err(ldb->dev, "adding connector failed with %d\n", ret);
- return ret;
- }
+ drm_connector_init(drm, &imx_ldb_ch->connector,
+ &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
&imx_ldb_ch->encoder);
@@ -459,11 +436,12 @@ static const struct of_device_id imx_ldb_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids);
-static int imx_ldb_probe(struct platform_device *pdev)
+static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
{
- struct device_node *np = pdev->dev.of_node;
+ struct drm_device *drm = data;
+ struct device_node *np = dev->of_node;
const struct of_device_id *of_id =
- of_match_device(imx_ldb_dt_ids, &pdev->dev);
+ of_match_device(imx_ldb_dt_ids, dev);
struct device_node *child;
const u8 *edidp;
struct imx_ldb *imx_ldb;
@@ -473,17 +451,17 @@ static int imx_ldb_probe(struct platform_device *pdev)
int ret;
int i;
- imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
+ imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL);
if (!imx_ldb)
return -ENOMEM;
imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
if (IS_ERR(imx_ldb->regmap)) {
- dev_err(&pdev->dev, "failed to get parent regmap\n");
+ dev_err(dev, "failed to get parent regmap\n");
return PTR_ERR(imx_ldb->regmap);
}
- imx_ldb->dev = &pdev->dev;
+ imx_ldb->dev = dev;
if (of_id)
imx_ldb->lvds_mux = of_id->data;
@@ -521,7 +499,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
return -EINVAL;
if (dual && i > 0) {
- dev_warn(&pdev->dev, "dual-channel mode, ignoring second output\n");
+ dev_warn(dev, "dual-channel mode, ignoring second output\n");
continue;
}
@@ -531,6 +509,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
channel = &imx_ldb->channel[i];
channel->ldb = imx_ldb;
channel->chno = i;
+ channel->child = child;
edidp = of_get_property(child, "edid", &channel->edid_len);
if (edidp) {
@@ -553,54 +532,67 @@ static int imx_ldb_probe(struct platform_device *pdev)
case LVDS_BIT_MAP_SPWG:
if (datawidth == 24) {
if (i == 0 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+ imx_ldb->ldb_ctrl |=
+ LDB_DATA_WIDTH_CH0_24;
if (i == 1 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+ imx_ldb->ldb_ctrl |=
+ LDB_DATA_WIDTH_CH1_24;
}
break;
case LVDS_BIT_MAP_JEIDA:
if (datawidth == 18) {
- dev_err(&pdev->dev, "JEIDA standard only supported in 24 bit\n");
+ dev_err(dev, "JEIDA standard only supported in 24 bit\n");
return -EINVAL;
}
if (i == 0 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 | LDB_BIT_MAP_CH0_JEIDA;
+ imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
+ LDB_BIT_MAP_CH0_JEIDA;
if (i == 1 || dual)
- imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 | LDB_BIT_MAP_CH1_JEIDA;
+ imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
+ LDB_BIT_MAP_CH1_JEIDA;
break;
default:
- dev_err(&pdev->dev, "data mapping not specified or invalid\n");
+ dev_err(dev, "data mapping not specified or invalid\n");
return -EINVAL;
}
- ret = imx_ldb_register(channel);
+ ret = imx_ldb_register(drm, channel);
if (ret)
return ret;
-
- imx_drm_encoder_add_possible_crtcs(channel->imx_drm_encoder, child);
}
- platform_set_drvdata(pdev, imx_ldb);
+ dev_set_drvdata(dev, imx_ldb);
return 0;
}
-static int imx_ldb_remove(struct platform_device *pdev)
+static void imx_ldb_unbind(struct device *dev, struct device *master,
+ void *data)
{
- struct imx_ldb *imx_ldb = platform_get_drvdata(pdev);
+ struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
int i;
for (i = 0; i < 2; i++) {
struct imx_ldb_channel *channel = &imx_ldb->channel[i];
- struct drm_connector *connector = &channel->connector;
- struct drm_encoder *encoder = &channel->encoder;
-
- drm_mode_connector_detach_encoder(connector, encoder);
- imx_drm_remove_connector(channel->imx_drm_connector);
- imx_drm_remove_encoder(channel->imx_drm_encoder);
+ channel->connector.funcs->destroy(&channel->connector);
+ channel->encoder.funcs->destroy(&channel->encoder);
}
+}
+
+static const struct component_ops imx_ldb_ops = {
+ .bind = imx_ldb_bind,
+ .unbind = imx_ldb_unbind,
+};
+static int imx_ldb_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &imx_ldb_ops);
+}
+
+static int imx_ldb_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx_ldb_ops);
return 0;
}
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 9abc7ca8b6cf..575533f4fd64 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/component.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
@@ -30,6 +31,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include "ipu-v3/imx-ipu-v3.h"
#include "imx-drm.h"
#define TVE_COM_CONF_REG 0x00
@@ -110,9 +112,7 @@ enum {
struct imx_tve {
struct drm_connector connector;
- struct imx_drm_connector *imx_drm_connector;
struct drm_encoder encoder;
- struct imx_drm_encoder *imx_drm_encoder;
struct device *dev;
spinlock_t lock; /* register lock */
bool enabled;
@@ -225,11 +225,6 @@ static enum drm_connector_status imx_tve_connector_detect(
return connector_status_connected;
}
-static void imx_tve_connector_destroy(struct drm_connector *connector)
-{
- /* do not free here */
-}
-
static int imx_tve_connector_get_modes(struct drm_connector *connector)
{
struct imx_tve *tve = con_to_tve(connector);
@@ -254,6 +249,11 @@ static int imx_tve_connector_mode_valid(struct drm_connector *connector,
{
struct imx_tve *tve = con_to_tve(connector);
unsigned long rate;
+ int ret;
+
+ ret = imx_drm_connector_mode_valid(connector, mode);
+ if (ret != MODE_OK)
+ return ret;
/* pixel clock with 2x oversampling */
rate = clk_round_rate(tve->clk, 2000UL * mode->clock) / 2000;
@@ -305,13 +305,11 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
switch (tve->mode) {
case TVE_MODE_VGA:
- imx_drm_crtc_panel_format_pins(encoder->crtc,
- DRM_MODE_ENCODER_DAC, IPU_PIX_FMT_GBR24,
+ imx_drm_panel_format_pins(encoder, IPU_PIX_FMT_GBR24,
tve->hsync_pin, tve->vsync_pin);
break;
case TVE_MODE_TVOUT:
- imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_TVDAC,
- V4L2_PIX_FMT_YUV444);
+ imx_drm_panel_format(encoder, V4L2_PIX_FMT_YUV444);
break;
}
}
@@ -364,16 +362,11 @@ static void imx_tve_encoder_disable(struct drm_encoder *encoder)
tve_disable(tve);
}
-static void imx_tve_encoder_destroy(struct drm_encoder *encoder)
-{
- /* do not free here */
-}
-
static struct drm_connector_funcs imx_tve_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_tve_connector_detect,
- .destroy = imx_tve_connector_destroy,
+ .destroy = imx_drm_connector_destroy,
};
static struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
@@ -383,7 +376,7 @@ static struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
};
static struct drm_encoder_funcs imx_tve_encoder_funcs = {
- .destroy = imx_tve_encoder_destroy,
+ .destroy = imx_drm_encoder_destroy,
};
static struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
@@ -503,34 +496,27 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base)
return 0;
}
-static int imx_tve_register(struct imx_tve *tve)
+static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
{
+ int encoder_type;
int ret;
- tve->connector.funcs = &imx_tve_connector_funcs;
- tve->encoder.funcs = &imx_tve_encoder_funcs;
+ encoder_type = tve->mode == TVE_MODE_VGA ?
+ DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
- tve->encoder.encoder_type = DRM_MODE_ENCODER_NONE;
- tve->connector.connector_type = DRM_MODE_CONNECTOR_VGA;
+ ret = imx_drm_encoder_parse_of(drm, &tve->encoder,
+ tve->dev->of_node);
+ if (ret)
+ return ret;
drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
- ret = imx_drm_add_encoder(&tve->encoder, &tve->imx_drm_encoder,
- THIS_MODULE);
- if (ret) {
- dev_err(tve->dev, "adding encoder failed with %d\n", ret);
- return ret;
- }
+ drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
+ encoder_type);
drm_connector_helper_add(&tve->connector,
&imx_tve_connector_helper_funcs);
-
- ret = imx_drm_add_connector(&tve->connector,
- &tve->imx_drm_connector, THIS_MODULE);
- if (ret) {
- imx_drm_remove_encoder(tve->imx_drm_encoder);
- dev_err(tve->dev, "adding connector failed with %d\n", ret);
- return ret;
- }
+ drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
drm_mode_connector_attach_encoder(&tve->connector, &tve->encoder);
@@ -576,9 +562,11 @@ static const int of_get_tve_mode(struct device_node *np)
return -EINVAL;
}
-static int imx_tve_probe(struct platform_device *pdev)
+static int imx_tve_bind(struct device *dev, struct device *master, void *data)
{
- struct device_node *np = pdev->dev.of_node;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct device_node *np = dev->of_node;
struct device_node *ddc_node;
struct imx_tve *tve;
struct resource *res;
@@ -587,14 +575,14 @@ static int imx_tve_probe(struct platform_device *pdev)
int irq;
int ret;
- tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL);
+ tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL);
if (!tve)
return -ENOMEM;
- tve->dev = &pdev->dev;
+ tve->dev = dev;
spin_lock_init(&tve->lock);
- ddc_node = of_parse_phandle(np, "ddc", 0);
+ ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0);
if (ddc_node) {
tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
@@ -602,7 +590,7 @@ static int imx_tve_probe(struct platform_device *pdev)
tve->mode = of_get_tve_mode(np);
if (tve->mode != TVE_MODE_VGA) {
- dev_err(&pdev->dev, "only VGA mode supported, currently\n");
+ dev_err(dev, "only VGA mode supported, currently\n");
return -EINVAL;
}
@@ -611,7 +599,7 @@ static int imx_tve_probe(struct platform_device *pdev)
&tve->hsync_pin);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to get vsync pin\n");
+ dev_err(dev, "failed to get vsync pin\n");
return ret;
}
@@ -619,40 +607,40 @@ static int imx_tve_probe(struct platform_device *pdev)
&tve->vsync_pin);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to get vsync pin\n");
+ dev_err(dev, "failed to get vsync pin\n");
return ret;
}
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
tve_regmap_config.lock_arg = tve;
- tve->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "tve", base,
+ tve->regmap = devm_regmap_init_mmio_clk(dev, "tve", base,
&tve_regmap_config);
if (IS_ERR(tve->regmap)) {
- dev_err(&pdev->dev, "failed to init regmap: %ld\n",
+ dev_err(dev, "failed to init regmap: %ld\n",
PTR_ERR(tve->regmap));
return PTR_ERR(tve->regmap);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq\n");
+ dev_err(dev, "failed to get irq\n");
return irq;
}
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ ret = devm_request_threaded_irq(dev, irq, NULL,
imx_tve_irq_handler, IRQF_ONESHOT,
"imx-tve", tve);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
+ dev_err(dev, "failed to request irq: %d\n", ret);
return ret;
}
- tve->dac_reg = devm_regulator_get(&pdev->dev, "dac");
+ tve->dac_reg = devm_regulator_get(dev, "dac");
if (!IS_ERR(tve->dac_reg)) {
regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
ret = regulator_enable(tve->dac_reg);
@@ -660,17 +648,17 @@ static int imx_tve_probe(struct platform_device *pdev)
return ret;
}
- tve->clk = devm_clk_get(&pdev->dev, "tve");
+ tve->clk = devm_clk_get(dev, "tve");
if (IS_ERR(tve->clk)) {
- dev_err(&pdev->dev, "failed to get high speed tve clock: %ld\n",
+ dev_err(dev, "failed to get high speed tve clock: %ld\n",
PTR_ERR(tve->clk));
return PTR_ERR(tve->clk);
}
/* this is the IPU DI clock input selector, can be parented to tve_di */
- tve->di_sel_clk = devm_clk_get(&pdev->dev, "di_sel");
+ tve->di_sel_clk = devm_clk_get(dev, "di_sel");
if (IS_ERR(tve->di_sel_clk)) {
- dev_err(&pdev->dev, "failed to get ipu di mux clock: %ld\n",
+ dev_err(dev, "failed to get ipu di mux clock: %ld\n",
PTR_ERR(tve->di_sel_clk));
return PTR_ERR(tve->di_sel_clk);
}
@@ -681,42 +669,51 @@ static int imx_tve_probe(struct platform_device *pdev)
ret = regmap_read(tve->regmap, TVE_COM_CONF_REG, &val);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to read configuration register: %d\n", ret);
+ dev_err(dev, "failed to read configuration register: %d\n", ret);
return ret;
}
if (val != 0x00100000) {
- dev_err(&pdev->dev, "configuration register default value indicates this is not a TVEv2\n");
+ dev_err(dev, "configuration register default value indicates this is not a TVEv2\n");
return -ENODEV;
}
/* disable cable detection for VGA mode */
ret = regmap_write(tve->regmap, TVE_CD_CONT_REG, 0);
- ret = imx_tve_register(tve);
+ ret = imx_tve_register(drm, tve);
if (ret)
return ret;
- ret = imx_drm_encoder_add_possible_crtcs(tve->imx_drm_encoder, np);
-
- platform_set_drvdata(pdev, tve);
+ dev_set_drvdata(dev, tve);
return 0;
}
-static int imx_tve_remove(struct platform_device *pdev)
+static void imx_tve_unbind(struct device *dev, struct device *master,
+ void *data)
{
- struct imx_tve *tve = platform_get_drvdata(pdev);
- struct drm_connector *connector = &tve->connector;
- struct drm_encoder *encoder = &tve->encoder;
+ struct imx_tve *tve = dev_get_drvdata(dev);
- drm_mode_connector_detach_encoder(connector, encoder);
-
- imx_drm_remove_connector(tve->imx_drm_connector);
- imx_drm_remove_encoder(tve->imx_drm_encoder);
+ tve->connector.funcs->destroy(&tve->connector);
+ tve->encoder.funcs->destroy(&tve->encoder);
if (!IS_ERR(tve->dac_reg))
regulator_disable(tve->dac_reg);
+}
+
+static const struct component_ops imx_tve_ops = {
+ .bind = imx_tve_bind,
+ .unbind = imx_tve_unbind,
+};
+static int imx_tve_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &imx_tve_ops);
+}
+
+static int imx_tve_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx_tve_ops);
return 0;
}
diff --git a/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h b/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
index 4826b5c0249d..c4d14ead5837 100644
--- a/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
+++ b/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
@@ -25,6 +25,8 @@ enum ipuv3_type {
IPUV3H,
};
+#define IPU_PIX_FMT_GBR24 v4l2_fourcc('G', 'B', 'R', '3')
+
/*
* Bitfield of Display Interface signal polarities.
*/
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c b/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
index d0e3bc3c53e7..d5de8bb5c803 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
@@ -262,7 +262,7 @@ void ipu_dc_disable_channel(struct ipu_dc *dc)
/* Wait for DC triple buffer to empty */
while ((readl(priv->dc_reg + DC_STAT) & val) != val) {
- msleep(2);
+ usleep_range(2000, 20000);
timeout -= 2;
if (timeout <= 0)
break;
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-di.c b/drivers/staging/imx-drm/ipu-v3/ipu-di.c
index 948a49b289ef..82a9ebad697c 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-di.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-di.c
@@ -19,9 +19,6 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include "imx-ipu-v3.h"
#include "ipu-prv.h"
@@ -33,10 +30,7 @@ struct ipu_di {
struct clk *clk_di; /* display input clock */
struct clk *clk_ipu; /* IPU bus clock */
struct clk *clk_di_pixel; /* resulting pixel clock */
- struct clk_hw clk_hw_out;
- char *clk_name;
bool inuse;
- unsigned long clkflags;
struct ipu_soc *ipu;
};
@@ -141,130 +135,6 @@ static inline void ipu_di_write(struct ipu_di *di, u32 value, unsigned offset)
writel(value, di->base + offset);
}
-static int ipu_di_clk_calc_div(unsigned long inrate, unsigned long outrate)
-{
- u64 tmp = inrate;
- int div;
-
- tmp *= 16;
-
- do_div(tmp, outrate);
-
- div = tmp;
-
- if (div < 0x10)
- div = 0x10;
-
-#ifdef WTF_IS_THIS
- /*
- * Freescale has this in their Kernel. It is neither clear what
- * it does nor why it does it
- */
- if (div & 0x10)
- div &= ~0x7;
- else {
- /* Round up divider if it gets us closer to desired pix clk */
- if ((div & 0xC) == 0xC) {
- div += 0x10;
- div &= ~0xF;
- }
- }
-#endif
- return div;
-}
-
-static unsigned long clk_di_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct ipu_di *di = container_of(hw, struct ipu_di, clk_hw_out);
- unsigned long outrate;
- u32 div = ipu_di_read(di, DI_BS_CLKGEN0);
-
- if (div < 0x10)
- div = 0x10;
-
- outrate = (parent_rate / div) * 16;
-
- return outrate;
-}
-
-static long clk_di_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct ipu_di *di = container_of(hw, struct ipu_di, clk_hw_out);
- unsigned long outrate;
- int div;
- u32 val;
-
- div = ipu_di_clk_calc_div(*prate, rate);
-
- outrate = (*prate / div) * 16;
-
- val = ipu_di_read(di, DI_GENERAL);
-
- if (!(val & DI_GEN_DI_CLK_EXT) && outrate > *prate / 2)
- outrate = *prate / 2;
-
- dev_dbg(di->ipu->dev,
- "%s: inrate: %ld div: 0x%08x outrate: %ld wanted: %ld\n",
- __func__, *prate, div, outrate, rate);
-
- return outrate;
-}
-
-static int clk_di_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct ipu_di *di = container_of(hw, struct ipu_di, clk_hw_out);
- int div;
- u32 clkgen0;
-
- clkgen0 = ipu_di_read(di, DI_BS_CLKGEN0) & ~0xfff;
-
- div = ipu_di_clk_calc_div(parent_rate, rate);
-
- ipu_di_write(di, clkgen0 | div, DI_BS_CLKGEN0);
-
- dev_dbg(di->ipu->dev, "%s: inrate: %ld desired: %ld div: 0x%08x\n",
- __func__, parent_rate, rate, div);
- return 0;
-}
-
-static u8 clk_di_get_parent(struct clk_hw *hw)
-{
- struct ipu_di *di = container_of(hw, struct ipu_di, clk_hw_out);
- u32 val;
-
- val = ipu_di_read(di, DI_GENERAL);
-
- return val & DI_GEN_DI_CLK_EXT ? 1 : 0;
-}
-
-static int clk_di_set_parent(struct clk_hw *hw, u8 index)
-{
- struct ipu_di *di = container_of(hw, struct ipu_di, clk_hw_out);
- u32 val;
-
- val = ipu_di_read(di, DI_GENERAL);
-
- if (index)
- val |= DI_GEN_DI_CLK_EXT;
- else
- val &= ~DI_GEN_DI_CLK_EXT;
-
- ipu_di_write(di, val, DI_GENERAL);
-
- return 0;
-}
-
-static struct clk_ops clk_di_ops = {
- .round_rate = clk_di_round_rate,
- .set_rate = clk_di_set_rate,
- .recalc_rate = clk_di_recalc_rate,
- .set_parent = clk_di_set_parent,
- .get_parent = clk_di_get_parent,
-};
-
static void ipu_di_data_wave_config(struct ipu_di *di,
int wave_gen,
int access_size, int component_size)
@@ -528,15 +398,125 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
ipu_di_sync_config(di, cfg_vga, 0, ARRAY_SIZE(cfg_vga));
}
+static void ipu_di_config_clock(struct ipu_di *di,
+ const struct ipu_di_signal_cfg *sig)
+{
+ struct clk *clk;
+ unsigned clkgen0;
+ uint32_t val;
+
+ if (sig->clkflags & IPU_DI_CLKMODE_EXT) {
+ /*
+ * CLKMODE_EXT means we must use the DI clock: this is
+ * needed for things like LVDS which needs to feed the
+ * DI and LDB with the same pixel clock.
+ */
+ clk = di->clk_di;
+
+ if (sig->clkflags & IPU_DI_CLKMODE_SYNC) {
+ /*
+ * CLKMODE_SYNC means that we want the DI to be
+ * clocked at the same rate as the parent clock.
+ * This is needed (eg) for LDB which needs to be
+ * fed with the same pixel clock. We assume that
+ * the LDB clock has already been set correctly.
+ */
+ clkgen0 = 1 << 4;
+ } else {
+ /*
+ * We can use the divider. We should really have
+ * a flag here indicating whether the bridge can
+ * cope with a fractional divider or not. For the
+ * time being, let's go for simplicitly and
+ * reliability.
+ */
+ unsigned long in_rate;
+ unsigned div;
+
+ clk_set_rate(clk, sig->pixelclock);
+
+ in_rate = clk_get_rate(clk);
+ div = (in_rate + sig->pixelclock / 2) / sig->pixelclock;
+ if (div == 0)
+ div = 1;
+
+ clkgen0 = div << 4;
+ }
+ } else {
+ /*
+ * For other interfaces, we can arbitarily select between
+ * the DI specific clock and the internal IPU clock. See
+ * DI_GENERAL bit 20. We select the IPU clock if it can
+ * give us a clock rate within 1% of the requested frequency,
+ * otherwise we use the DI clock.
+ */
+ unsigned long rate, clkrate;
+ unsigned div, error;
+
+ clkrate = clk_get_rate(di->clk_ipu);
+ div = (clkrate + sig->pixelclock / 2) / sig->pixelclock;
+ rate = clkrate / div;
+
+ error = rate / (sig->pixelclock / 1000);
+
+ dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
+ rate, div, (signed)(error - 1000) / 10, error % 10);
+
+ /* Allow a 1% error */
+ if (error < 1010 && error >= 990) {
+ clk = di->clk_ipu;
+
+ clkgen0 = div << 4;
+ } else {
+ unsigned long in_rate;
+ unsigned div;
+
+ clk = di->clk_di;
+
+ clk_set_rate(clk, sig->pixelclock);
+
+ in_rate = clk_get_rate(clk);
+ div = (in_rate + sig->pixelclock / 2) / sig->pixelclock;
+ if (div == 0)
+ div = 1;
+
+ clkgen0 = div << 4;
+ }
+ }
+
+ di->clk_di_pixel = clk;
+
+ /* Set the divider */
+ ipu_di_write(di, clkgen0, DI_BS_CLKGEN0);
+
+ /*
+ * Set the high/low periods. Bits 24:16 give us the falling edge,
+ * and bits 8:0 give the rising edge. LSB is fraction, and is
+ * based on the divider above. We want a 50% duty cycle, so set
+ * the falling edge to be half the divider.
+ */
+ ipu_di_write(di, (clkgen0 >> 4) << 16, DI_BS_CLKGEN1);
+
+ /* Finally select the input clock */
+ val = ipu_di_read(di, DI_GENERAL) & ~DI_GEN_DI_CLK_EXT;
+ if (clk == di->clk_di)
+ val |= DI_GEN_DI_CLK_EXT;
+ ipu_di_write(di, val, DI_GENERAL);
+
+ dev_dbg(di->ipu->dev, "Want %luHz IPU %luHz DI %luHz using %s, %luHz\n",
+ sig->pixelclock,
+ clk_get_rate(di->clk_ipu),
+ clk_get_rate(di->clk_di),
+ clk == di->clk_di ? "DI" : "IPU",
+ clk_get_rate(di->clk_di_pixel) / (clkgen0 >> 4));
+}
+
int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
{
u32 reg;
u32 di_gen, vsync_cnt;
u32 div;
u32 h_total, v_total;
- int ret;
- unsigned long round;
- struct clk *parent;
dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
di->id, sig->width, sig->height);
@@ -544,33 +524,20 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
if ((sig->v_sync_width == 0) || (sig->h_sync_width == 0))
return -EINVAL;
- if (sig->clkflags & IPU_DI_CLKMODE_EXT)
- parent = di->clk_di;
- else
- parent = di->clk_ipu;
-
- ret = clk_set_parent(di->clk_di_pixel, parent);
- if (ret) {
- dev_err(di->ipu->dev,
- "setting pixel clock to parent %s failed with %d\n",
- __clk_get_name(parent), ret);
- return ret;
- }
-
- if (sig->clkflags & IPU_DI_CLKMODE_SYNC)
- round = clk_get_rate(parent);
- else
- round = clk_round_rate(di->clk_di_pixel, sig->pixelclock);
-
- ret = clk_set_rate(di->clk_di_pixel, round);
-
h_total = sig->width + sig->h_sync_width + sig->h_start_width +
sig->h_end_width;
v_total = sig->height + sig->v_sync_width + sig->v_start_width +
sig->v_end_width;
+ dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
+ clk_get_rate(di->clk_ipu),
+ clk_get_rate(di->clk_di),
+ sig->pixelclock);
+
mutex_lock(&di_mutex);
+ ipu_di_config_clock(di, sig);
+
div = ipu_di_read(di, DI_BS_CLKGEN0) & 0xfff;
div = div / 16; /* Now divider is integer portion */
@@ -654,7 +621,11 @@ EXPORT_SYMBOL_GPL(ipu_di_init_sync_panel);
int ipu_di_enable(struct ipu_di *di)
{
- int ret = clk_prepare_enable(di->clk_di_pixel);
+ int ret;
+
+ WARN_ON(IS_ERR(di->clk_di_pixel));
+
+ ret = clk_prepare_enable(di->clk_di_pixel);
if (ret)
return ret;
@@ -666,6 +637,8 @@ EXPORT_SYMBOL_GPL(ipu_di_enable);
int ipu_di_disable(struct ipu_di *di)
{
+ WARN_ON(IS_ERR(di->clk_di_pixel));
+
ipu_module_disable(di->ipu, di->module);
clk_disable_unprepare(di->clk_di_pixel);
@@ -721,13 +694,6 @@ int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
u32 module, struct clk *clk_ipu)
{
struct ipu_di *di;
- int ret;
- const char *di_parent[2];
- struct clk_init_data init = {
- .ops = &clk_di_ops,
- .num_parents = 2,
- .flags = 0,
- };
if (id > 1)
return -ENODEV;
@@ -749,45 +715,16 @@ int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
if (!di->base)
return -ENOMEM;
- di_parent[0] = __clk_get_name(di->clk_ipu);
- di_parent[1] = __clk_get_name(di->clk_di);
-
ipu_di_write(di, 0x10, DI_BS_CLKGEN0);
- init.parent_names = (const char **)&di_parent;
- di->clk_name = kasprintf(GFP_KERNEL, "%s_di%d_pixel",
- dev_name(dev), id);
- if (!di->clk_name)
- return -ENOMEM;
-
- init.name = di->clk_name;
-
- di->clk_hw_out.init = &init;
- di->clk_di_pixel = clk_register(dev, &di->clk_hw_out);
-
- if (IS_ERR(di->clk_di_pixel)) {
- ret = PTR_ERR(di->clk_di_pixel);
- goto failed_clk_register;
- }
-
dev_dbg(dev, "DI%d base: 0x%08lx remapped to %p\n",
id, base, di->base);
di->inuse = false;
di->ipu = ipu;
return 0;
-
-failed_clk_register:
-
- kfree(di->clk_name);
-
- return ret;
}
void ipu_di_exit(struct ipu_soc *ipu, int id)
{
- struct ipu_di *di = ipu->di_priv[id];
-
- clk_unregister(di->clk_di_pixel);
- kfree(di->clk_name);
}
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c b/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
index 98070dd8c920..45213017fa4b 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
@@ -161,9 +161,6 @@ static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots,
"dmfc: using %d slots starting from segment %d for IPU channel %d\n",
slots, segment, dmfc->data->ipu_channel);
- if (!dmfc)
- return -EINVAL;
-
switch (slots) {
case 1:
field = DMFC_FIFO_SIZE_64;
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 22be104fbda9..a8d017854615 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -17,6 +17,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
+#include <linux/component.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/device.h>
@@ -284,6 +285,7 @@ static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, u32 encoder_type,
ipu_crtc->di_clkflags = IPU_DI_CLKMODE_SYNC |
IPU_DI_CLKMODE_EXT;
break;
+ case DRM_MODE_ENCODER_TMDS:
case DRM_MODE_ENCODER_NONE:
ipu_crtc->di_clkflags = 0;
break;
@@ -334,7 +336,7 @@ err_out:
}
static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
- struct ipu_client_platformdata *pdata)
+ struct ipu_client_platformdata *pdata, struct drm_device *drm)
{
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
int dp = -EINVAL;
@@ -348,10 +350,8 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
return ret;
}
- ret = imx_drm_add_crtc(&ipu_crtc->base,
- &ipu_crtc->imx_crtc,
- &ipu_crtc_helper_funcs, THIS_MODULE,
- ipu_crtc->dev->parent->of_node, pdata->di);
+ ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
+ &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node);
if (ret) {
dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
goto err_put_resources;
@@ -399,43 +399,96 @@ err_put_resources:
return ret;
}
-static int ipu_drm_probe(struct platform_device *pdev)
+static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent,
+ int port_id)
{
- struct ipu_client_platformdata *pdata = pdev->dev.platform_data;
- struct ipu_crtc *ipu_crtc;
- int ret;
+ struct device_node *port;
+ int id, ret;
+
+ port = of_get_child_by_name(parent, "port");
+ while (port) {
+ ret = of_property_read_u32(port, "reg", &id);
+ if (!ret && id == port_id)
+ return port;
+
+ do {
+ port = of_get_next_child(parent, port);
+ if (!port)
+ return NULL;
+ } while (of_node_cmp(port->name, "port"));
+ }
- if (!pdata)
- return -EINVAL;
+ return NULL;
+}
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
+static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
+{
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+ struct drm_device *drm = data;
+ struct ipu_crtc *ipu_crtc;
+ int ret;
- ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL);
+ ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
if (!ipu_crtc)
return -ENOMEM;
- ipu_crtc->dev = &pdev->dev;
+ ipu_crtc->dev = dev;
- ret = ipu_crtc_init(ipu_crtc, pdata);
+ ret = ipu_crtc_init(ipu_crtc, pdata, drm);
if (ret)
return ret;
- platform_set_drvdata(pdev, ipu_crtc);
+ dev_set_drvdata(dev, ipu_crtc);
return 0;
}
-static int ipu_drm_remove(struct platform_device *pdev)
+static void ipu_drm_unbind(struct device *dev, struct device *master,
+ void *data)
{
- struct ipu_crtc *ipu_crtc = platform_get_drvdata(pdev);
+ struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
ipu_plane_put_resources(ipu_crtc->plane[0]);
ipu_put_resources(ipu_crtc);
+}
+
+static const struct component_ops ipu_crtc_ops = {
+ .bind = ipu_drm_bind,
+ .unbind = ipu_drm_unbind,
+};
+static int ipu_drm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+ int ret;
+
+ if (!dev->platform_data)
+ return -EINVAL;
+
+ if (!dev->of_node) {
+ /* Associate crtc device with the corresponding DI port node */
+ dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node,
+ pdata->di + 2);
+ if (!dev->of_node) {
+ dev_err(dev, "missing port@%d node in %s\n",
+ pdata->di + 2, dev->parent->of_node->full_name);
+ return -ENODEV;
+ }
+ }
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ return component_add(dev, &ipu_crtc_ops);
+}
+
+static int ipu_drm_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &ipu_crtc_ops);
return 0;
}
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c
index 34b642a12f8b..b0c9b6ce4854 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.c
+++ b/drivers/staging/imx-drm/ipuv3-plane.c
@@ -72,8 +72,8 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
return -EFAULT;
}
- dev_dbg(ipu_plane->base.dev->dev, "phys = 0x%x, x = %d, y = %d",
- cma_obj->paddr, x, y);
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
+ &cma_obj->paddr, x, y);
cpmem = ipu_get_cpmem(ipu_plane->ipu_ch);
ipu_cpmem_set_stride(cpmem, fb->pitches[0]);
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/staging/imx-drm/parallel-display.c
index 351d61dede00..c60b6c645f42 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/staging/imx-drm/parallel-display.c
@@ -18,10 +18,12 @@
* MA 02110-1301, USA.
*/
+#include <linux/component.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include <linux/videodev2.h>
#include <video/of_display_timing.h>
@@ -32,15 +34,14 @@
struct imx_parallel_display {
struct drm_connector connector;
- struct imx_drm_connector *imx_drm_connector;
struct drm_encoder encoder;
- struct imx_drm_encoder *imx_drm_encoder;
struct device *dev;
void *edid;
int edid_len;
u32 interface_pix_fmt;
int mode_valid;
struct drm_display_mode mode;
+ struct drm_panel *panel;
};
static enum drm_connector_status imx_pd_connector_detect(
@@ -49,17 +50,19 @@ static enum drm_connector_status imx_pd_connector_detect(
return connector_status_connected;
}
-static void imx_pd_connector_destroy(struct drm_connector *connector)
-{
- /* do not free here */
-}
-
static int imx_pd_connector_get_modes(struct drm_connector *connector)
{
struct imx_parallel_display *imxpd = con_to_imxpd(connector);
struct device_node *np = imxpd->dev->of_node;
int num_modes = 0;
+ if (imxpd->panel && imxpd->panel->funcs &&
+ imxpd->panel->funcs->get_modes) {
+ num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
+ if (num_modes > 0)
+ return num_modes;
+ }
+
if (imxpd->edid) {
drm_mode_connector_update_edid_property(connector, imxpd->edid);
num_modes = drm_add_edid_modes(connector, imxpd->edid);
@@ -67,6 +70,8 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (imxpd->mode_valid) {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return -EINVAL;
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
drm_mode_probed_add(connector, mode);
@@ -75,6 +80,8 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (np) {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return -EINVAL;
of_get_drm_display_mode(np, &imxpd->mode, OF_USE_NATIVE_MODE);
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
@@ -85,12 +92,6 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
return num_modes;
}
-static int imx_pd_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return 0;
-}
-
static struct drm_encoder *imx_pd_connector_best_encoder(
struct drm_connector *connector)
{
@@ -101,6 +102,12 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
{
+ struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+
+ if (mode != DRM_MODE_DPMS_ON)
+ drm_panel_disable(imxpd->panel);
+ else
+ drm_panel_enable(imxpd->panel);
}
static bool imx_pd_encoder_mode_fixup(struct drm_encoder *encoder,
@@ -114,8 +121,7 @@ static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
- imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_NONE,
- imxpd->interface_pix_fmt);
+ imx_drm_panel_format(encoder, imxpd->interface_pix_fmt);
}
static void imx_pd_encoder_commit(struct drm_encoder *encoder)
@@ -132,26 +138,21 @@ static void imx_pd_encoder_disable(struct drm_encoder *encoder)
{
}
-static void imx_pd_encoder_destroy(struct drm_encoder *encoder)
-{
- /* do not free here */
-}
-
static struct drm_connector_funcs imx_pd_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_pd_connector_detect,
- .destroy = imx_pd_connector_destroy,
+ .destroy = imx_drm_connector_destroy,
};
static struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
.get_modes = imx_pd_connector_get_modes,
.best_encoder = imx_pd_connector_best_encoder,
- .mode_valid = imx_pd_connector_mode_valid,
+ .mode_valid = imx_drm_connector_mode_valid,
};
static struct drm_encoder_funcs imx_pd_encoder_funcs = {
- .destroy = imx_pd_encoder_destroy,
+ .destroy = imx_drm_encoder_destroy,
};
static struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
@@ -163,51 +164,46 @@ static struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
.disable = imx_pd_encoder_disable,
};
-static int imx_pd_register(struct imx_parallel_display *imxpd)
+static int imx_pd_register(struct drm_device *drm,
+ struct imx_parallel_display *imxpd)
{
int ret;
- drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder);
-
- imxpd->connector.funcs = &imx_pd_connector_funcs;
- imxpd->encoder.funcs = &imx_pd_encoder_funcs;
-
- imxpd->encoder.encoder_type = DRM_MODE_ENCODER_NONE;
- imxpd->connector.connector_type = DRM_MODE_CONNECTOR_VGA;
+ ret = imx_drm_encoder_parse_of(drm, &imxpd->encoder,
+ imxpd->dev->of_node);
+ if (ret)
+ return ret;
drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
- ret = imx_drm_add_encoder(&imxpd->encoder, &imxpd->imx_drm_encoder,
- THIS_MODULE);
- if (ret) {
- dev_err(imxpd->dev, "adding encoder failed with %d\n", ret);
- return ret;
- }
+ drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
+ DRM_MODE_ENCODER_NONE);
drm_connector_helper_add(&imxpd->connector,
&imx_pd_connector_helper_funcs);
+ drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
- ret = imx_drm_add_connector(&imxpd->connector,
- &imxpd->imx_drm_connector, THIS_MODULE);
- if (ret) {
- imx_drm_remove_encoder(imxpd->imx_drm_encoder);
- dev_err(imxpd->dev, "adding connector failed with %d\n", ret);
- return ret;
- }
+ if (imxpd->panel)
+ drm_panel_attach(imxpd->panel, &imxpd->connector);
+
+ drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder);
imxpd->connector.encoder = &imxpd->encoder;
return 0;
}
-static int imx_pd_probe(struct platform_device *pdev)
+static int imx_pd_bind(struct device *dev, struct device *master, void *data)
{
- struct device_node *np = pdev->dev.of_node;
+ struct drm_device *drm = data;
+ struct device_node *np = dev->of_node;
+ struct device_node *panel_node;
const u8 *edidp;
struct imx_parallel_display *imxpd;
int ret;
const char *fmt;
- imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
+ imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
if (!imxpd)
return -ENOMEM;
@@ -225,30 +221,43 @@ static int imx_pd_probe(struct platform_device *pdev)
imxpd->interface_pix_fmt = V4L2_PIX_FMT_BGR666;
}
- imxpd->dev = &pdev->dev;
+ panel_node = of_parse_phandle(np, "fsl,panel", 0);
+ if (panel_node)
+ imxpd->panel = of_drm_find_panel(panel_node);
- ret = imx_pd_register(imxpd);
+ imxpd->dev = dev;
+
+ ret = imx_pd_register(drm, imxpd);
if (ret)
return ret;
- ret = imx_drm_encoder_add_possible_crtcs(imxpd->imx_drm_encoder, np);
-
- platform_set_drvdata(pdev, imxpd);
+ dev_set_drvdata(dev, imxpd);
return 0;
}
-static int imx_pd_remove(struct platform_device *pdev)
+static void imx_pd_unbind(struct device *dev, struct device *master,
+ void *data)
{
- struct imx_parallel_display *imxpd = platform_get_drvdata(pdev);
- struct drm_connector *connector = &imxpd->connector;
- struct drm_encoder *encoder = &imxpd->encoder;
+ struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
- drm_mode_connector_detach_encoder(connector, encoder);
+ imxpd->encoder.funcs->destroy(&imxpd->encoder);
+ imxpd->connector.funcs->destroy(&imxpd->connector);
+}
- imx_drm_remove_connector(imxpd->imx_drm_connector);
- imx_drm_remove_encoder(imxpd->imx_drm_encoder);
+static const struct component_ops imx_pd_ops = {
+ .bind = imx_pd_bind,
+ .unbind = imx_pd_unbind,
+};
+static int imx_pd_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &imx_pd_ops);
+}
+
+static int imx_pd_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx_pd_ops);
return 0;
}
diff --git a/drivers/staging/keucr/common.h b/drivers/staging/keucr/common.h
index cf347ccd6a6e..f0b977616cd5 100644
--- a/drivers/staging/keucr/common.h
+++ b/drivers/staging/keucr/common.h
@@ -1,14 +1,6 @@
#ifndef COMMON_INCD
#define COMMON_INCD
-typedef u8 BOOLEAN;
-typedef u8 BYTE;
-typedef u8 *PBYTE;
-typedef u16 WORD;
-typedef u16 *PWORD;
-typedef u32 DWORD;
-typedef u32 *PDWORD;
-
#define BYTE_MASK 0xff
#endif
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index f5d41e0348ce..e61183906548 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -17,7 +17,7 @@
int ENE_InitMedia(struct us_data *us)
{
int result;
- BYTE MiscReg03 = 0;
+ u8 MiscReg03 = 0;
dev_info(&us->pusb_dev->dev, "--- Init Media ---\n");
result = ene_read_byte(us, REG_CARD_STATUS, &MiscReg03);
@@ -41,7 +41,7 @@ int ENE_InitMedia(struct us_data *us)
/*
* ene_read_byte() :
*/
-int ene_read_byte(struct us_data *us, WORD index, void *buf)
+int ene_read_byte(struct us_data *us, u16 index, void *buf)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
@@ -51,8 +51,8 @@ int ene_read_byte(struct us_data *us, WORD index, void *buf)
bcb->DataTransferLength = 0x01;
bcb->Flags = 0x80;
bcb->CDB[0] = 0xED;
- bcb->CDB[2] = (BYTE)(index>>8);
- bcb->CDB[3] = (BYTE)index;
+ bcb->CDB[2] = (u8)(index>>8);
+ bcb->CDB[3] = (u8)index;
result = ENE_SendScsiCmd(us, FDIR_READ, buf, 0);
return result;
@@ -65,7 +65,7 @@ int ENE_SMInit(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- BYTE buf[0x200];
+ u8 buf[0x200];
dev_dbg(&us->pusb_dev->dev, "transport --- ENE_SMInit\n");
@@ -122,12 +122,12 @@ int ENE_SMInit(struct us_data *us)
/*
* ENE_LoadBinCode()
*/
-int ENE_LoadBinCode(struct us_data *us, BYTE flag)
+int ENE_LoadBinCode(struct us_data *us, u8 flag)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
/* void *buf; */
- PBYTE buf;
+ u8 *buf;
/* dev_info(&us->pusb_dev->dev, "transport --- ENE_LoadBinCode\n"); */
if (us->BIN_FLAG == flag)
@@ -164,7 +164,7 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag)
/*
* ENE_SendScsiCmd():
*/
-int ENE_SendScsiCmd(struct us_data *us, BYTE fDir, void *buf, int use_sg)
+int ENE_SendScsiCmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
diff --git a/drivers/staging/keucr/init.h b/drivers/staging/keucr/init.h
index c8b2cd604460..98d2e3ba8545 100644
--- a/drivers/staging/keucr/init.h
+++ b/drivers/staging/keucr/init.h
@@ -1,11 +1,11 @@
#include "common.h"
-extern DWORD MediaChange;
+extern u32 MediaChange;
extern int Check_D_MediaFmt(struct us_data *);
-static BYTE SM_Init[] = {
+static u8 SM_Init[] = {
0x7B, 0x09, 0x7C, 0xF0, 0x7D, 0x10, 0x7E, 0xE9,
0x7F, 0xCC, 0x12, 0x2F, 0x71, 0x90, 0xE9, 0xCC,
0xE0, 0xB4, 0x07, 0x12, 0x90, 0xFF, 0x09, 0xE0,
@@ -263,7 +263,7 @@ static BYTE SM_Init[] = {
0x58, 0x44, 0x2D, 0x49, 0x6E, 0x69, 0x74, 0x20,
0x20, 0x20, 0x20, 0x31, 0x30, 0x30, 0x30, 0x31 };
-static BYTE SM_Rdwr[] = {
+static u8 SM_Rdwr[] = {
0x7B, 0x0C, 0x7C, 0xF0, 0x7D, 0x10, 0x7E, 0xE9,
0x7F, 0xCC, 0x12, 0x2F, 0x71, 0x90, 0xE9, 0xC3,
0xE0, 0xB4, 0x73, 0x04, 0x74, 0x40, 0x80, 0x09,
diff --git a/drivers/staging/keucr/smil.h b/drivers/staging/keucr/smil.h
index 9136e9447261..39951738d231 100644
--- a/drivers/staging/keucr/smil.h
+++ b/drivers/staging/keucr/smil.h
@@ -169,29 +169,29 @@ SmartMedia Model & Attribute
Struct Definition
***************************************************************************/
struct keucr_media_info {
- BYTE Model;
- BYTE Attribute;
- BYTE MaxZones;
- BYTE MaxSectors;
- WORD MaxBlocks;
- WORD MaxLogBlocks;
+ u8 Model;
+ u8 Attribute;
+ u8 MaxZones;
+ u8 MaxSectors;
+ u16 MaxBlocks;
+ u16 MaxLogBlocks;
};
struct keucr_media_address {
- BYTE Zone; /* Zone Number */
- BYTE Sector; /* Sector(512byte) Number on Block */
- WORD PhyBlock; /* Physical Block Number on Zone */
- WORD LogBlock; /* Logical Block Number of Zone */
+ u8 Zone; /* Zone Number */
+ u8 Sector; /* Sector(512byte) Number on Block */
+ u16 PhyBlock; /* Physical Block Number on Zone */
+ u16 LogBlock; /* Logical Block Number of Zone */
};
struct keucr_media_area {
- BYTE Sector; /* Sector(512byte) Number on Block */
- WORD PhyBlock; /* Physical Block Number on Zone 0 */
+ u8 Sector; /* Sector(512byte) Number on Block */
+ u16 PhyBlock; /* Physical Block Number on Zone 0 */
};
-extern WORD ReadBlock;
-extern WORD WriteBlock;
-extern DWORD MediaChange;
+extern u16 ReadBlock;
+extern u16 WriteBlock;
+extern u32 MediaChange;
extern struct keucr_media_info Ssfdc;
extern struct keucr_media_address Media;
@@ -204,24 +204,24 @@ extern struct keucr_media_area CisArea;
int Init_D_SmartMedia(void);
int Pwoff_D_SmartMedia(void);
int Check_D_SmartMedia(void);
-int Check_D_Parameter(struct us_data *, WORD *, BYTE *, BYTE *);
-int Media_D_ReadSector(struct us_data *, DWORD, WORD, BYTE *);
-int Media_D_WriteSector(struct us_data *, DWORD, WORD, BYTE *);
-int Media_D_CopySector(struct us_data *, DWORD, WORD, BYTE *);
-int Media_D_EraseBlock(struct us_data *, DWORD, WORD);
+int Check_D_Parameter(struct us_data *, u16 *, u8 *, u8 *);
+int Media_D_ReadSector(struct us_data *, u32, u16, u8 *);
+int Media_D_WriteSector(struct us_data *, u32, u16, u8 *);
+int Media_D_CopySector(struct us_data *, u32, u16, u8 *);
+int Media_D_EraseBlock(struct us_data *, u32, u16);
int Media_D_EraseAll(struct us_data *);
/******************************************/
-int Media_D_OneSectWriteStart(struct us_data *, DWORD, BYTE *);
-int Media_D_OneSectWriteNext(struct us_data *, BYTE *);
+int Media_D_OneSectWriteStart(struct us_data *, u32, u8 *);
+int Media_D_OneSectWriteNext(struct us_data *, u8 *);
int Media_D_OneSectWriteFlush(struct us_data *);
/******************************************/
extern int SM_FreeMem(void); /* ENE SM function */
-void SM_EnableLED(struct us_data *, BOOLEAN);
+void SM_EnableLED(struct us_data *, bool);
void Led_D_TernOn(void);
void Led_D_TernOff(void);
-int Media_D_EraseAllRedtData(DWORD Index, BOOLEAN CheckBlock);
+int Media_D_EraseAllRedtData(u32 Index, bool CheckBlock);
/*DWORD Media_D_GetMediaInfo(struct us_data * fdoExt,
PIOCTL_MEDIA_INFO_IN pParamIn, PIOCTL_MEDIA_INFO_OUT pParamOut); */
@@ -229,31 +229,31 @@ int Media_D_EraseAllRedtData(DWORD Index, BOOLEAN CheckBlock);
* SMILSub.c
*/
/******************************************/
-int Check_D_DataBlank(BYTE *);
-int Check_D_FailBlock(BYTE *);
-int Check_D_DataStatus(BYTE *);
-int Load_D_LogBlockAddr(BYTE *);
-void Clr_D_RedundantData(BYTE *);
-void Set_D_LogBlockAddr(BYTE *);
-void Set_D_FailBlock(BYTE *);
-void Set_D_DataStaus(BYTE *);
+int Check_D_DataBlank(u8 *);
+int Check_D_FailBlock(u8 *);
+int Check_D_DataStatus(u8 *);
+int Load_D_LogBlockAddr(u8 *);
+void Clr_D_RedundantData(u8 *);
+void Set_D_LogBlockAddr(u8 *);
+void Set_D_FailBlock(u8 *);
+void Set_D_DataStaus(u8 *);
/******************************************/
void Ssfdc_D_Reset(struct us_data *);
-int Ssfdc_D_ReadCisSect(struct us_data *, BYTE *, BYTE *);
+int Ssfdc_D_ReadCisSect(struct us_data *, u8 *, u8 *);
void Ssfdc_D_WriteRedtMode(void);
-void Ssfdc_D_ReadID(BYTE *, BYTE);
-int Ssfdc_D_ReadSect(struct us_data *, BYTE *, BYTE *);
-int Ssfdc_D_ReadBlock(struct us_data *, WORD, BYTE *, BYTE *);
-int Ssfdc_D_WriteSect(struct us_data *, BYTE *, BYTE *);
-int Ssfdc_D_WriteBlock(struct us_data *, WORD, BYTE *, BYTE *);
-int Ssfdc_D_CopyBlock(struct us_data *, WORD, BYTE *, BYTE *);
-int Ssfdc_D_WriteSectForCopy(struct us_data *, BYTE *, BYTE *);
+void Ssfdc_D_ReadID(u8 *, u8);
+int Ssfdc_D_ReadSect(struct us_data *, u8 *, u8 *);
+int Ssfdc_D_ReadBlock(struct us_data *, u16, u8 *, u8 *);
+int Ssfdc_D_WriteSect(struct us_data *, u8 *, u8 *);
+int Ssfdc_D_WriteBlock(struct us_data *, u16, u8 *, u8 *);
+int Ssfdc_D_CopyBlock(struct us_data *, u16, u8 *, u8 *);
+int Ssfdc_D_WriteSectForCopy(struct us_data *, u8 *, u8 *);
int Ssfdc_D_EraseBlock(struct us_data *);
-int Ssfdc_D_ReadRedtData(struct us_data *, BYTE *);
-int Ssfdc_D_WriteRedtData(struct us_data *, BYTE *);
+int Ssfdc_D_ReadRedtData(struct us_data *, u8 *);
+int Ssfdc_D_WriteRedtData(struct us_data *, u8 *);
int Ssfdc_D_CheckStatus(void);
-int Set_D_SsfdcModel(BYTE);
+int Set_D_SsfdcModel(u8);
void Cnt_D_Reset(void);
int Cnt_D_PowerOn(void);
void Cnt_D_PowerOff(void);
@@ -263,26 +263,26 @@ int Check_D_CntPower(void);
int Check_D_CardExist(void);
int Check_D_CardStsChg(void);
int Check_D_SsfdcWP(void);
-int SM_ReadBlock(struct us_data *, BYTE *, BYTE *);
+int SM_ReadBlock(struct us_data *, u8 *, u8 *);
-int Ssfdc_D_ReadSect_DMA(struct us_data *, BYTE *, BYTE *);
-int Ssfdc_D_ReadSect_PIO(struct us_data *, BYTE *, BYTE *);
-int Ssfdc_D_WriteSect_DMA(struct us_data *, BYTE *, BYTE *);
-int Ssfdc_D_WriteSect_PIO(struct us_data *, BYTE *, BYTE *);
+int Ssfdc_D_ReadSect_DMA(struct us_data *, u8 *, u8 *);
+int Ssfdc_D_ReadSect_PIO(struct us_data *, u8 *, u8 *);
+int Ssfdc_D_WriteSect_DMA(struct us_data *, u8 *, u8 *);
+int Ssfdc_D_WriteSect_PIO(struct us_data *, u8 *, u8 *);
/******************************************/
-int Check_D_ReadError(BYTE *);
-int Check_D_Correct(BYTE *, BYTE *);
-int Check_D_CISdata(BYTE *, BYTE *);
-void Set_D_RightECC(BYTE *);
+int Check_D_ReadError(u8 *);
+int Check_D_Correct(u8 *, u8 *);
+int Check_D_CISdata(u8 *, u8 *);
+void Set_D_RightECC(u8 *);
/*
* SMILECC.c
*/
-void calculate_ecc(BYTE *, BYTE *, BYTE *, BYTE *, BYTE *);
-BYTE correct_data(BYTE *, BYTE *, BYTE, BYTE, BYTE);
-int _Correct_D_SwECC(BYTE *, BYTE *, BYTE *);
-void _Calculate_D_SwECC(BYTE *, BYTE *);
+void calculate_ecc(u8 *, u8 *, u8 *, u8 *, u8 *);
+u8 correct_data(u8 *, u8 *, u8, u8, u8);
+int _Correct_D_SwECC(u8 *, u8 *, u8 *);
+void _Calculate_D_SwECC(u8 *, u8 *);
void SM_Init(void);
diff --git a/drivers/staging/keucr/smilecc.c b/drivers/staging/keucr/smilecc.c
index 6b8f7d7a7436..ffe6030f5e4d 100644
--- a/drivers/staging/keucr/smilecc.c
+++ b/drivers/staging/keucr/smilecc.c
@@ -13,7 +13,7 @@
/* #include "EMCRIOS.h" */
/* CP0-CP5 code table */
-static BYTE ecctable[256] = {
+static u8 ecctable[256] = {
0x00, 0x55, 0x56, 0x03, 0x59, 0x0C, 0x0F, 0x5A, 0x5A, 0x0F, 0x0C, 0x59, 0x03,
0x56, 0x55, 0x00, 0x65, 0x30, 0x33, 0x66, 0x3C, 0x69, 0x6A, 0x3F, 0x3F, 0x6A,
0x69, 0x3C, 0x66, 0x33, 0x30, 0x65, 0x66, 0x33, 0x30, 0x65, 0x3F, 0x6A, 0x69,
@@ -36,7 +36,7 @@ static BYTE ecctable[256] = {
0x5A, 0x5A, 0x0F, 0x0C, 0x59, 0x03, 0x56, 0x55, 0x00
};
-static void trans_result(BYTE, BYTE, BYTE *, BYTE *);
+static void trans_result(u8, u8, u8 *, u8 *);
#define BIT7 0x80
#define BIT6 0x40
@@ -57,11 +57,11 @@ static void trans_result(BYTE, BYTE, BYTE *, BYTE *);
* *ecc1; * LP15,LP14,LP13,...
* *ecc2; * LP07,LP06,LP05,...
*/
-static void trans_result(BYTE reg2, BYTE reg3, BYTE *ecc1, BYTE *ecc2)
+static void trans_result(u8 reg2, u8 reg3, u8 *ecc1, u8 *ecc2)
{
- BYTE a; /* Working for reg2,reg3 */
- BYTE b; /* Working for ecc1,ecc2 */
- BYTE i; /* For counting */
+ u8 a; /* Working for reg2,reg3 */
+ u8 b; /* Working for ecc1,ecc2 */
+ u8 i; /* For counting */
a = BIT7; b = BIT7; /* 80h=10000000b */
*ecc1 = *ecc2 = 0; /* Clear ecc1,ecc2 */
@@ -95,21 +95,21 @@ static void trans_result(BYTE reg2, BYTE reg3, BYTE *ecc1, BYTE *ecc2)
* *ecc2; * LP07,LP06,LP05,...
* *ecc3; * CP5,CP4,CP3,...,"1","1"
*/
-void calculate_ecc(BYTE *table, BYTE *data, BYTE *ecc1, BYTE *ecc2, BYTE *ecc3)
+void calculate_ecc(u8 *table, u8 *data, u8 *ecc1, u8 *ecc2, u8 *ecc3)
{
- DWORD i; /* For counting */
- BYTE a; /* Working for table */
- BYTE reg1; /* D-all,CP5,CP4,CP3,... */
- BYTE reg2; /* LP14,LP12,L10,... */
- BYTE reg3; /* LP15,LP13,L11,... */
+ u32 i; /* For counting */
+ u8 a; /* Working for table */
+ u8 reg1; /* D-all,CP5,CP4,CP3,... */
+ u8 reg2; /* LP14,LP12,L10,... */
+ u8 reg3; /* LP15,LP13,L11,... */
reg1 = reg2 = reg3 = 0; /* Clear parameter */
for (i = 0; i < 256; ++i) {
a = table[data[i]]; /* Get CP0-CP5 code from table */
reg1 ^= (a&MASK_CPS); /* XOR with a */
if ((a&BIT6) != 0) { /* If D_all(all bit XOR) = 1 */
- reg3 ^= (BYTE)i; /* XOR with counter */
- reg2 ^= ~((BYTE)i); /* XOR with inv. of counter */
+ reg3 ^= (u8)i; /* XOR with counter */
+ reg2 ^= ~((u8)i); /* XOR with inv. of counter */
}
}
@@ -127,22 +127,22 @@ void calculate_ecc(BYTE *table, BYTE *data, BYTE *ecc1, BYTE *ecc2, BYTE *ecc3)
* ecc2; * LP07,LP06,LP05,...
* ecc3; * CP5,CP4,CP3,...,"1","1"
*/
-BYTE correct_data(BYTE *data, BYTE *eccdata, BYTE ecc1, BYTE ecc2, BYTE ecc3)
+u8 correct_data(u8 *data, u8 *eccdata, u8 ecc1, u8 ecc2, u8 ecc3)
{
- DWORD l; /* Working to check d */
- DWORD d; /* Result of comparison */
- DWORD i; /* For counting */
- BYTE d1, d2, d3; /* Result of comparison */
- BYTE a; /* Working for add */
- BYTE add; /* Byte address of cor. DATA */
- BYTE b; /* Working for bit */
- BYTE bit; /* Bit address of cor. DATA */
+ u32 l; /* Working to check d */
+ u32 d; /* Result of comparison */
+ u32 i; /* For counting */
+ u8 d1, d2, d3; /* Result of comparison */
+ u8 a; /* Working for add */
+ u8 add; /* Byte address of cor. DATA */
+ u8 b; /* Working for bit */
+ u8 bit; /* Bit address of cor. DATA */
d1 = ecc1^eccdata[1]; d2 = ecc2^eccdata[0]; /* Compare LP's */
d3 = ecc3^eccdata[2]; /* Compare CP's */
- d = ((DWORD)d1<<16) /* Result of comparison */
- +((DWORD)d2<<8)
- +(DWORD)d3;
+ d = ((u32)d1<<16) /* Result of comparison */
+ +((u32)d2<<8)
+ +(u32)d3;
if (d == 0)
return 0; /* If No error, return */
@@ -188,9 +188,9 @@ BYTE correct_data(BYTE *data, BYTE *eccdata, BYTE ecc1, BYTE ecc2, BYTE ecc3)
return 3; /* Uncorrectable error */
}
-int _Correct_D_SwECC(BYTE *buf, BYTE *redundant_ecc, BYTE *calculate_ecc)
+int _Correct_D_SwECC(u8 *buf, u8 *redundant_ecc, u8 *calculate_ecc)
{
- DWORD err;
+ u32 err;
err = correct_data(buf, redundant_ecc, *(calculate_ecc + 1),
*(calculate_ecc), *(calculate_ecc + 2));
@@ -203,7 +203,7 @@ int _Correct_D_SwECC(BYTE *buf, BYTE *redundant_ecc, BYTE *calculate_ecc)
return -1;
}
-void _Calculate_D_SwECC(BYTE *buf, BYTE *ecc)
+void _Calculate_D_SwECC(u8 *buf, u8 *ecc)
{
calculate_ecc(ecctable, buf, ecc+1, ecc+0, ecc+2);
}
diff --git a/drivers/staging/keucr/smilmain.c b/drivers/staging/keucr/smilmain.c
index 09d07e05102f..fc7cbc6f8a45 100644
--- a/drivers/staging/keucr/smilmain.c
+++ b/drivers/staging/keucr/smilmain.c
@@ -4,11 +4,11 @@
#include "smcommon.h"
#include "smil.h"
-static int Conv_D_MediaAddr(struct us_data *, DWORD);
+static int Conv_D_MediaAddr(struct us_data *, u32);
static int Inc_D_MediaAddr(struct us_data *);
-static int Media_D_ReadOneSect(struct us_data *, WORD, BYTE *);
+static int Media_D_ReadOneSect(struct us_data *, u16, u8 *);
-static int Copy_D_BlockAll(struct us_data *, DWORD);
+static int Copy_D_BlockAll(struct us_data *, u32);
static int Assign_D_WriteBlock(void);
static int Release_D_ReadBlock(struct us_data *);
@@ -16,7 +16,7 @@ static int Release_D_WriteBlock(struct us_data *);
static int Release_D_CopySector(struct us_data *);
static int Copy_D_PhyOneSect(struct us_data *);
-static int Read_D_PhyOneSect(struct us_data *, WORD, BYTE *);
+static int Read_D_PhyOneSect(struct us_data *, u16, u8 *);
static int Erase_D_PhyOneBlock(struct us_data *);
static int Set_D_PhyFmtValue(struct us_data *);
@@ -25,24 +25,24 @@ static int Make_D_LogTable(struct us_data *);
static int MarkFail_D_PhyOneBlock(struct us_data *);
-static DWORD ErrCode;
-static BYTE WorkBuf[SECTSIZE];
-static BYTE Redundant[REDTSIZE];
-static BYTE WorkRedund[REDTSIZE];
+static u32 ErrCode;
+static u8 WorkBuf[SECTSIZE];
+static u8 Redundant[REDTSIZE];
+static u8 WorkRedund[REDTSIZE];
/* 128 x 1000, Log2Phy[MAX_ZONENUM][MAX_LOGBLOCK]; */
-static WORD *Log2Phy[MAX_ZONENUM];
-static BYTE Assign[MAX_ZONENUM][MAX_BLOCKNUM / 8];
-static WORD AssignStart[MAX_ZONENUM];
-WORD ReadBlock;
-WORD WriteBlock;
-DWORD MediaChange;
-static DWORD SectCopyMode;
+static u16 *Log2Phy[MAX_ZONENUM];
+static u8 Assign[MAX_ZONENUM][MAX_BLOCKNUM / 8];
+static u16 AssignStart[MAX_ZONENUM];
+u16 ReadBlock;
+u16 WriteBlock;
+u32 MediaChange;
+static u32 SectCopyMode;
/* BIT Control Macro */
-static BYTE BitData[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
-#define Set_D_Bit(a, b) (a[(BYTE)((b) / 8)] |= BitData[(b) % 8])
-#define Clr_D_Bit(a, b) (a[(BYTE)((b) / 8)] &= ~BitData[(b) % 8])
-#define Chk_D_Bit(a, b) (a[(BYTE)((b) / 8)] & BitData[(b) % 8])
+static u8 BitData[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
+#define Set_D_Bit(a, b) (a[(u8)((b) / 8)] |= BitData[(b) % 8])
+#define Clr_D_Bit(a, b) (a[(u8)((b) / 8)] &= ~BitData[(b) % 8])
+#define Chk_D_Bit(a, b) (a[(u8)((b) / 8)] & BitData[(b) % 8])
/* ----- SM_FreeMem() ------------------------------------------------- */
int SM_FreeMem(void)
@@ -62,9 +62,9 @@ int SM_FreeMem(void)
/* SmartMedia Read/Write/Erase Function */
/* ----- Media_D_ReadSector() ------------------------------------------- */
-int Media_D_ReadSector(struct us_data *us, DWORD start, WORD count, BYTE *buf)
+int Media_D_ReadSector(struct us_data *us, u32 start, u16 count, u8 *buf)
{
- WORD len, bn;
+ u16 len, bn;
if (Conv_D_MediaAddr(us, start))
return ErrCode;
@@ -97,9 +97,9 @@ int Media_D_ReadSector(struct us_data *us, DWORD start, WORD count, BYTE *buf)
}
/* here */
/* ----- Media_D_CopySector() ------------------------------------------ */
-int Media_D_CopySector(struct us_data *us, DWORD start, WORD count, BYTE *buf)
+int Media_D_CopySector(struct us_data *us, u32 start, u16 count, u8 *buf)
{
- WORD len, bn;
+ u16 len, bn;
/* pr_info("Media_D_CopySector !!!\n"); */
if (Conv_D_MediaAddr(us, start))
@@ -186,12 +186,12 @@ int Check_D_MediaFmt(struct us_data *us)
/* SmartMedia Physical Address Control Subroutine */
/* ----- Conv_D_MediaAddr() --------------------------------------------- */
-static int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
+static int Conv_D_MediaAddr(struct us_data *us, u32 addr)
{
- DWORD temp;
+ u32 temp;
temp = addr / Ssfdc.MaxSectors;
- Media.Zone = (BYTE) (temp / Ssfdc.MaxLogBlocks);
+ Media.Zone = (u8) (temp / Ssfdc.MaxLogBlocks);
if (Log2Phy[Media.Zone] == NULL) {
if (Make_D_LogTable(us)) {
@@ -200,8 +200,8 @@ static int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
}
}
- Media.Sector = (BYTE) (addr % Ssfdc.MaxSectors);
- Media.LogBlock = (WORD) (temp % Ssfdc.MaxLogBlocks);
+ Media.Sector = (u8) (addr % Ssfdc.MaxSectors);
+ Media.LogBlock = (u16) (temp % Ssfdc.MaxLogBlocks);
if (Media.Zone < Ssfdc.MaxZones) {
Clr_D_RedundantData(Redundant);
@@ -217,7 +217,7 @@ static int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
/* ----- Inc_D_MediaAddr() ---------------------------------------------- */
static int Inc_D_MediaAddr(struct us_data *us)
{
- WORD LogBlock = Media.LogBlock;
+ u16 LogBlock = Media.LogBlock;
if (++Media.Sector < Ssfdc.MaxSectors)
return SMSUCCESS;
@@ -265,9 +265,9 @@ static int Inc_D_MediaAddr(struct us_data *us)
/* SmartMedia Read/Write Subroutine with Retry */
/* ----- Media_D_ReadOneSect() ------------------------------------------ */
-static int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
+static int Media_D_ReadOneSect(struct us_data *us, u16 count, u8 *buf)
{
- DWORD err, retry;
+ u32 err, retry;
if (!Read_D_PhyOneSect(us, count, buf))
return SMSUCCESS;
@@ -309,9 +309,9 @@ static int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
/* SmartMedia Physical Sector Data Copy Subroutine */
/* ----- Copy_D_BlockAll() ---------------------------------------------- */
-static int Copy_D_BlockAll(struct us_data *us, DWORD mode)
+static int Copy_D_BlockAll(struct us_data *us, u32 mode)
{
- BYTE sect;
+ u8 sect;
sect = Media.Sector;
@@ -381,7 +381,7 @@ static int Assign_D_WriteBlock(void)
/* ----- Release_D_ReadBlock() ------------------------------------------ */
static int Release_D_ReadBlock(struct us_data *us)
{
- DWORD mode;
+ u32 mode;
mode = SectCopyMode;
SectCopyMode = COMPLETED;
@@ -430,7 +430,7 @@ static int Release_D_WriteBlock(struct us_data *us)
static int Copy_D_PhyOneSect(struct us_data *us)
{
int i;
- DWORD err, retry;
+ u32 err, retry;
/* pr_info("Copy_D_PhyOneSect --- Sector = %x\n", Media.Sector); */
if (ReadBlock != NO_ASSIGN) {
@@ -504,10 +504,10 @@ static int Copy_D_PhyOneSect(struct us_data *us)
/* SmartMedia Physical Sector Read/Write/Erase Subroutine */
/* ----- Read_D_PhyOneSect() -------------------------------------------- */
-static int Read_D_PhyOneSect(struct us_data *us, WORD count, BYTE *buf)
+static int Read_D_PhyOneSect(struct us_data *us, u16 count, u8 *buf)
{
int i;
- DWORD retry;
+ u32 retry;
if (Media.PhyBlock == NO_ASSIGN) {
for (i = 0; i < SECTSIZE; i++)
@@ -637,10 +637,10 @@ static int Search_D_CIS(struct us_data *us)
/* ----- Make_D_LogTable() ---------------------------------------------- */
static int Make_D_LogTable(struct us_data *us)
{
- WORD phyblock, logblock;
+ u16 phyblock, logblock;
if (Log2Phy[Media.Zone] == NULL) {
- Log2Phy[Media.Zone] = kmalloc(MAX_LOGBLOCK * sizeof(WORD),
+ Log2Phy[Media.Zone] = kmalloc(MAX_LOGBLOCK * sizeof(u16),
GFP_KERNEL);
/* pr_info("ExAllocatePool Zone = %x, Addr = %x\n",
Media.Zone, Log2Phy[Media.Zone]); */
@@ -693,7 +693,7 @@ static int Make_D_LogTable(struct us_data *us)
phyblock = Media.PhyBlock;
logblock = Media.LogBlock;
- Media.Sector = (BYTE)(Ssfdc.MaxSectors - 1);
+ Media.Sector = (u8)(Ssfdc.MaxSectors - 1);
if (Ssfdc_D_ReadRedtData(us, Redundant)) {
Ssfdc_D_Reset(us);
@@ -738,7 +738,7 @@ static int Make_D_LogTable(struct us_data *us)
/* ----- MarkFail_D_PhyOneBlock() --------------------------------------- */
static int MarkFail_D_PhyOneBlock(struct us_data *us)
{
- BYTE sect;
+ u8 sect;
sect = Media.Sector;
Set_D_FailBlock(WorkRedund);
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 16da9a9b4033..44ced8265039 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -6,16 +6,16 @@
#include "smcommon.h"
#include "smil.h"
-static BYTE _Check_D_DevCode(BYTE);
-static DWORD ErrXDCode;
-static BYTE IsSSFDCCompliance;
-static BYTE IsXDCompliance;
+static u8 _Check_D_DevCode(u8);
+static u32 ErrXDCode;
+static u8 IsSSFDCCompliance;
+static u8 IsXDCompliance;
struct keucr_media_info Ssfdc;
struct keucr_media_address Media;
struct keucr_media_area CisArea;
-static BYTE EccBuf[6];
+static u8 EccBuf[6];
#define EVEN 0 /* Even Page for 256byte/page */
#define ODD 1 /* Odd Page for 256byte/page */
@@ -24,7 +24,7 @@ static BYTE EccBuf[6];
/* SmartMedia Redundant buffer data Control Subroutine
*----- Check_D_DataBlank() --------------------------------------------
*/
-int Check_D_DataBlank(BYTE *redundant)
+int Check_D_DataBlank(u8 *redundant)
{
char i;
@@ -36,7 +36,7 @@ int Check_D_DataBlank(BYTE *redundant)
}
/* ----- Check_D_FailBlock() -------------------------------------------- */
-int Check_D_FailBlock(BYTE *redundant)
+int Check_D_FailBlock(u8 *redundant)
{
redundant += REDT_BLOCK;
@@ -51,7 +51,7 @@ int Check_D_FailBlock(BYTE *redundant)
}
/* ----- Check_D_DataStatus() ------------------------------------------- */
-int Check_D_DataStatus(BYTE *redundant)
+int Check_D_DataStatus(u8 *redundant)
{
redundant += REDT_DATA;
@@ -70,14 +70,14 @@ int Check_D_DataStatus(BYTE *redundant)
}
/* ----- Load_D_LogBlockAddr() ------------------------------------------ */
-int Load_D_LogBlockAddr(BYTE *redundant)
+int Load_D_LogBlockAddr(u8 *redundant)
{
- WORD addr1, addr2;
+ u16 addr1, addr2;
- addr1 = (WORD)*(redundant + REDT_ADDR1H)*0x0100 +
- (WORD)*(redundant + REDT_ADDR1L);
- addr2 = (WORD)*(redundant + REDT_ADDR2H)*0x0100 +
- (WORD)*(redundant + REDT_ADDR2L);
+ addr1 = (u16)*(redundant + REDT_ADDR1H)*0x0100 +
+ (u16)*(redundant + REDT_ADDR1L);
+ addr2 = (u16)*(redundant + REDT_ADDR2H)*0x0100 +
+ (u16)*(redundant + REDT_ADDR2L);
if (addr1 == addr2)
if ((addr1 & 0xF000) == 0x1000) {
@@ -85,7 +85,7 @@ int Load_D_LogBlockAddr(BYTE *redundant)
return SMSUCCESS;
}
- if (hweight16((WORD)(addr1^addr2)) != 0x01)
+ if (hweight16((u16)(addr1^addr2)) != 0x01)
return ERROR;
if ((addr1 & 0xF000) == 0x1000)
@@ -104,7 +104,7 @@ int Load_D_LogBlockAddr(BYTE *redundant)
}
/* ----- Clr_D_RedundantData() ------------------------------------------ */
-void Clr_D_RedundantData(BYTE *redundant)
+void Clr_D_RedundantData(u8 *redundant)
{
char i;
@@ -113,9 +113,9 @@ void Clr_D_RedundantData(BYTE *redundant)
}
/* ----- Set_D_LogBlockAddr() ------------------------------------------- */
-void Set_D_LogBlockAddr(BYTE *redundant)
+void Set_D_LogBlockAddr(u8 *redundant)
{
- WORD addr;
+ u16 addr;
*(redundant + REDT_BLOCK) = 0xFF;
*(redundant + REDT_DATA) = 0xFF;
@@ -125,20 +125,20 @@ void Set_D_LogBlockAddr(BYTE *redundant)
addr++;
*(redundant + REDT_ADDR1H) = *(redundant + REDT_ADDR2H) =
- (BYTE)(addr / 0x0100);
- *(redundant + REDT_ADDR1L) = *(redundant + REDT_ADDR2L) = (BYTE)addr;
+ (u8)(addr / 0x0100);
+ *(redundant + REDT_ADDR1L) = *(redundant + REDT_ADDR2L) = (u8)addr;
}
/*----- Set_D_FailBlock() ---------------------------------------------- */
-void Set_D_FailBlock(BYTE *redundant)
+void Set_D_FailBlock(u8 *redundant)
{
char i;
for (i = 0; i < REDTSIZE; i++)
- *redundant++ = (BYTE)((i == REDT_BLOCK) ? 0xF0 : 0xFF);
+ *redundant++ = (u8)((i == REDT_BLOCK) ? 0xF0 : 0xFF);
}
/* ----- Set_D_DataStaus() ---------------------------------------------- */
-void Set_D_DataStaus(BYTE *redundant)
+void Set_D_DataStaus(u8 *redundant)
{
redundant += REDT_DATA;
*redundant = 0x00;
@@ -154,10 +154,10 @@ void Ssfdc_D_Reset(struct us_data *us)
}
/* ----- Ssfdc_D_ReadCisSect() ------------------------------------------ */
-int Ssfdc_D_ReadCisSect(struct us_data *us, BYTE *buf, BYTE *redundant)
+int Ssfdc_D_ReadCisSect(struct us_data *us, u8 *buf, u8 *redundant)
{
- BYTE zone, sector;
- WORD block;
+ u8 zone, sector;
+ u16 block;
zone = Media.Zone; block = Media.PhyBlock; sector = Media.Sector;
Media.Zone = 0;
@@ -177,11 +177,11 @@ int Ssfdc_D_ReadCisSect(struct us_data *us, BYTE *buf, BYTE *redundant)
/* 6250 CMD 1 */
/* ----- Ssfdc_D_ReadSect() --------------------------------------------- */
-int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
+int Ssfdc_D_ReadSect(struct us_data *us, u8 *buf, u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
+ u16 addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -190,8 +190,8 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors + Media.Sector;
/* Read sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
@@ -200,8 +200,8 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02;
- bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[4] = (u8)addr;
+ bcb->CDB[3] = (u8)(addr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
result = ENE_SendScsiCmd(us, FDIR_READ, buf, 0);
@@ -215,8 +215,8 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
- bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[4] = (u8)addr;
+ bcb->CDB[3] = (u8)(addr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
bcb->CDB[8] = 0;
bcb->CDB[9] = 1;
@@ -229,12 +229,12 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
}
/* ----- Ssfdc_D_ReadBlock() --------------------------------------------- */
-int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,
- BYTE *redundant)
+int Ssfdc_D_ReadBlock(struct us_data *us, u16 count, u8 *buf,
+ u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
+ u16 addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -243,8 +243,8 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors + Media.Sector;
/* Read sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
@@ -253,8 +253,8 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02;
- bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[4] = (u8)addr;
+ bcb->CDB[3] = (u8)(addr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
result = ENE_SendScsiCmd(us, FDIR_READ, buf, 0);
@@ -268,8 +268,8 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
- bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[4] = (u8)addr;
+ bcb->CDB[3] = (u8)(addr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
bcb->CDB[8] = 0;
bcb->CDB[9] = 1;
@@ -283,12 +283,12 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,
/* ----- Ssfdc_D_CopyBlock() -------------------------------------------- */
-int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,
- BYTE *redundant)
+int Ssfdc_D_CopyBlock(struct us_data *us, u16 count, u8 *buf,
+ u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD ReadAddr, WriteAddr;
+ u16 ReadAddr, WriteAddr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -297,10 +297,10 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,
return USB_STOR_TRANSPORT_ERROR;
}
- ReadAddr = (WORD)Media.Zone*Ssfdc.MaxBlocks + ReadBlock;
- ReadAddr = ReadAddr*(WORD)Ssfdc.MaxSectors;
- WriteAddr = (WORD)Media.Zone*Ssfdc.MaxBlocks + WriteBlock;
- WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors;
+ ReadAddr = (u16)Media.Zone*Ssfdc.MaxBlocks + ReadBlock;
+ ReadAddr = ReadAddr*(u16)Ssfdc.MaxSectors;
+ WriteAddr = (u16)Media.Zone*Ssfdc.MaxBlocks + WriteBlock;
+ WriteAddr = WriteAddr*(u16)Ssfdc.MaxSectors;
/* Write sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
@@ -309,16 +309,16 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,
bcb->Flags = 0x00;
bcb->CDB[0] = 0xF0;
bcb->CDB[1] = 0x08;
- bcb->CDB[7] = (BYTE)WriteAddr;
- bcb->CDB[6] = (BYTE)(WriteAddr / 0x0100);
+ bcb->CDB[7] = (u8)WriteAddr;
+ bcb->CDB[6] = (u8)(WriteAddr / 0x0100);
bcb->CDB[5] = Media.Zone / 2;
bcb->CDB[8] = *(redundant + REDT_ADDR1H);
bcb->CDB[9] = *(redundant + REDT_ADDR1L);
bcb->CDB[10] = Media.Sector;
if (ReadBlock != NO_ASSIGN) {
- bcb->CDB[4] = (BYTE)ReadAddr;
- bcb->CDB[3] = (BYTE)(ReadAddr / 0x0100);
+ bcb->CDB[4] = (u8)ReadAddr;
+ bcb->CDB[3] = (u8)(ReadAddr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
} else
bcb->CDB[11] = 1;
@@ -331,11 +331,11 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,
}
/* ----- Ssfdc_D_WriteSectForCopy() ------------------------------------- */
-int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
+int Ssfdc_D_WriteSectForCopy(struct us_data *us, u8 *buf, u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
+ u16 addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -345,8 +345,8 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors + Media.Sector;
/* Write sect data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
@@ -355,8 +355,8 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
bcb->Flags = 0x00;
bcb->CDB[0] = 0xF0;
bcb->CDB[1] = 0x04;
- bcb->CDB[7] = (BYTE)addr;
- bcb->CDB[6] = (BYTE)(addr / 0x0100);
+ bcb->CDB[7] = (u8)addr;
+ bcb->CDB[6] = (u8)(addr / 0x0100);
bcb->CDB[5] = Media.Zone / 2;
bcb->CDB[8] = *(redundant + REDT_ADDR1H);
bcb->CDB[9] = *(redundant + REDT_ADDR1L);
@@ -374,7 +374,7 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
+ u16 addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -383,8 +383,8 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -392,8 +392,8 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF2;
bcb->CDB[1] = 0x06;
- bcb->CDB[7] = (BYTE)addr;
- bcb->CDB[6] = (BYTE)(addr / 0x0100);
+ bcb->CDB[7] = (u8)addr;
+ bcb->CDB[6] = (u8)(addr / 0x0100);
bcb->CDB[5] = Media.Zone / 2;
result = ENE_SendScsiCmd(us, FDIR_READ, NULL, 0);
@@ -405,12 +405,12 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
/* 6250 CMD 2 */
/*----- Ssfdc_D_ReadRedtData() ----------------------------------------- */
-int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
+int Ssfdc_D_ReadRedtData(struct us_data *us, u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
- BYTE *buf;
+ u16 addr;
+ u8 *buf;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -419,8 +419,8 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors + Media.Sector;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -428,8 +428,8 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
- bcb->CDB[4] = (BYTE)addr;
- bcb->CDB[3] = (BYTE)(addr / 0x0100);
+ bcb->CDB[4] = (u8)addr;
+ bcb->CDB[3] = (u8)(addr / 0x0100);
bcb->CDB[2] = Media.Zone / 2;
bcb->CDB[8] = 0;
bcb->CDB[9] = 1;
@@ -446,11 +446,11 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
/* 6250 CMD 4 */
/* ----- Ssfdc_D_WriteRedtData() ---------------------------------------- */
-int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
+int Ssfdc_D_WriteRedtData(struct us_data *us, u8 *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- WORD addr;
+ u16 addr;
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
@@ -459,8 +459,8 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
return USB_STOR_TRANSPORT_ERROR;
}
- addr = (WORD)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
- addr = addr*(WORD)Ssfdc.MaxSectors + Media.Sector;
+ addr = (u16)Media.Zone*Ssfdc.MaxBlocks + Media.PhyBlock;
+ addr = addr*(u16)Ssfdc.MaxSectors + Media.Sector;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -468,8 +468,8 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
bcb->Flags = 0x80;
bcb->CDB[0] = 0xF2;
bcb->CDB[1] = 0x05;
- bcb->CDB[7] = (BYTE)addr;
- bcb->CDB[6] = (BYTE)(addr / 0x0100);
+ bcb->CDB[7] = (u8)addr;
+ bcb->CDB[6] = (u8)(addr / 0x0100);
bcb->CDB[5] = Media.Zone / 2;
bcb->CDB[8] = *(redundant + REDT_ADDR1H);
bcb->CDB[9] = *(redundant + REDT_ADDR1L);
@@ -492,7 +492,7 @@ int Ssfdc_D_CheckStatus(void)
/* SmartMedia ID Code Check & Mode Set Subroutine
* ----- Set_D_SsfdcModel() ---------------------------------------------
*/
-int Set_D_SsfdcModel(BYTE dcode)
+int Set_D_SsfdcModel(u8 dcode)
{
switch (_Check_D_DevCode(dcode)) {
case SSFDC1MB:
@@ -600,7 +600,7 @@ int Set_D_SsfdcModel(BYTE dcode)
}
/* ----- _Check_D_DevCode() --------------------------------------------- */
-BYTE _Check_D_DevCode(BYTE dcode)
+static u8 _Check_D_DevCode(u8 dcode)
{
switch (dcode) {
case 0x6E:
@@ -630,21 +630,21 @@ BYTE _Check_D_DevCode(BYTE dcode)
/* SmartMedia ECC Control Subroutine
* ----- Check_D_ReadError() ----------------------------------------------
*/
-int Check_D_ReadError(BYTE *redundant)
+int Check_D_ReadError(u8 *redundant)
{
return SMSUCCESS;
}
/* ----- Check_D_Correct() ---------------------------------------------- */
-int Check_D_Correct(BYTE *buf, BYTE *redundant)
+int Check_D_Correct(u8 *buf, u8 *redundant)
{
return SMSUCCESS;
}
/* ----- Check_D_CISdata() ---------------------------------------------- */
-int Check_D_CISdata(BYTE *buf, BYTE *redundant)
+int Check_D_CISdata(u8 *buf, u8 *redundant)
{
- BYTE cis[] = {0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02,
+ u8 cis[] = {0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02,
0xDF, 0x01, 0x20};
int cis_len = sizeof(cis);
@@ -669,7 +669,7 @@ int Check_D_CISdata(BYTE *buf, BYTE *redundant)
}
/* ----- Set_D_RightECC() ---------------------------------------------- */
-void Set_D_RightECC(BYTE *redundant)
+void Set_D_RightECC(u8 *redundant)
{
/* Driver ECC Check */
return;
diff --git a/drivers/staging/keucr/smscsi.c b/drivers/staging/keucr/smscsi.c
index 5c03eca4dba8..20858f6777c8 100644
--- a/drivers/staging/keucr/smscsi.c
+++ b/drivers/staging/keucr/smscsi.c
@@ -68,7 +68,7 @@ static int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
/* ----- SM_SCSI_Inquiry() --------------------------------------------- */
static int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
{
- BYTE data_ptr[36] = {0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ u8 data_ptr[36] = {0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00,
0x55, 0x53, 0x42, 0x32, 0x2E, 0x30, 0x20,
0x20, 0x43, 0x61, 0x72, 0x64, 0x52, 0x65,
0x61, 0x64, 0x65, 0x72, 0x20, 0x20, 0x20,
@@ -82,9 +82,9 @@ static int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
/* ----- SM_SCSI_Mode_Sense() ------------------------------------------ */
static int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
{
- BYTE mediaNoWP[12] = {0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
+ u8 mediaNoWP[12] = {0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
- BYTE mediaWP[12] = {0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
+ u8 mediaWP[12] = {0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
if (us->SM_Status.WtP)
@@ -101,9 +101,9 @@ static int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
{
unsigned int offset = 0;
struct scatterlist *sg = NULL;
- DWORD bl_num;
- WORD bl_len;
- BYTE buf[8];
+ u32 bl_num;
+ u16 bl_len;
+ u8 buf[8];
dev_dbg(&us->pusb_dev->dev, "SM_SCSI_Read_Capacity\n");
@@ -132,13 +132,13 @@ static int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
static int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
{
int result = 0;
- PBYTE Cdb = srb->cmnd;
- DWORD bn = ((Cdb[2] << 24) & 0xff000000) |
+ u8 *Cdb = srb->cmnd;
+ u32 bn = ((Cdb[2] << 24) & 0xff000000) |
((Cdb[3] << 16) & 0x00ff0000) |
((Cdb[4] << 8) & 0x0000ff00) |
((Cdb[5] << 0) & 0x000000ff);
- WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
- DWORD blenByte = blen * 0x200;
+ u16 blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
+ u32 blenByte = blen * 0x200;
void *buf;
@@ -164,13 +164,13 @@ static int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
static int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
{
int result = 0;
- PBYTE Cdb = srb->cmnd;
- DWORD bn = ((Cdb[2] << 24) & 0xff000000) |
+ u8 *Cdb = srb->cmnd;
+ u32 bn = ((Cdb[2] << 24) & 0xff000000) |
((Cdb[3] << 16) & 0x00ff0000) |
((Cdb[4] << 8) & 0x0000ff00) |
((Cdb[5] << 0) & 0x000000ff);
- WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
- DWORD blenByte = blen * 0x200;
+ u16 blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
+ u32 blenByte = blen * 0x200;
void *buf;
diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c
index aeb2186d62ca..ae9414755d2f 100644
--- a/drivers/staging/keucr/transport.c
+++ b/drivers/staging/keucr/transport.c
@@ -83,8 +83,8 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
*/
static void usb_stor_print_cmd(struct us_data *us, struct scsi_cmnd *srb)
{
- PBYTE Cdb = srb->cmnd;
- DWORD cmd = Cdb[0];
+ u8 *Cdb = srb->cmnd;
+ u32 cmd = Cdb[0];
switch (cmd) {
case TEST_UNIT_READY:
@@ -545,8 +545,8 @@ Handle_Errors:
*/
void BuildSenseBuffer(struct scsi_cmnd *srb, int SrbStatus)
{
- BYTE *buf = srb->sense_buffer;
- BYTE asc;
+ u8 *buf = srb->sense_buffer;
+ u8 asc;
pr_info("transport --- BuildSenseBuffer\n");
switch (SrbStatus) {
diff --git a/drivers/staging/keucr/transport.h b/drivers/staging/keucr/transport.h
index df34474ae568..abd8e5a3dd6e 100644
--- a/drivers/staging/keucr/transport.h
+++ b/drivers/staging/keucr/transport.h
@@ -58,9 +58,9 @@ extern void usb_stor_set_xfer_buf(struct us_data*, unsigned char *buffer,
extern void ENE_stor_invoke_transport(struct scsi_cmnd *, struct us_data *);
extern int ENE_InitMedia(struct us_data *);
extern int ENE_SMInit(struct us_data *);
-extern int ENE_SendScsiCmd(struct us_data*, BYTE, void*, int);
-extern int ENE_LoadBinCode(struct us_data*, BYTE);
-extern int ene_read_byte(struct us_data*, WORD index, void *buf);
+extern int ENE_SendScsiCmd(struct us_data*, u8, void*, int);
+extern int ENE_LoadBinCode(struct us_data*, u8);
+extern int ene_read_byte(struct us_data*, u16 index, void *buf);
extern int ENE_Read_Data(struct us_data*, void *buf, unsigned int length);
extern int ENE_Write_Data(struct us_data*, void *buf, unsigned int length);
extern void BuildSenseBuffer(struct scsi_cmnd *, int);
diff --git a/drivers/staging/keucr/usb.c b/drivers/staging/keucr/usb.c
index 3e3ca6365fbc..12ebde7315cd 100644
--- a/drivers/staging/keucr/usb.c
+++ b/drivers/staging/keucr/usb.c
@@ -50,7 +50,7 @@ static int eucr_suspend(struct usb_interface *iface, pm_message_t message)
static int eucr_resume(struct usb_interface *iface)
{
- BYTE tmp = 0;
+ u8 tmp = 0;
struct us_data *us = usb_get_intfdata(iface);
pr_info("--- eucr_resume---\n");
@@ -71,7 +71,7 @@ static int eucr_resume(struct usb_interface *iface)
static int eucr_reset_resume(struct usb_interface *iface)
{
- BYTE tmp = 0;
+ u8 tmp = 0;
struct us_data *us = usb_get_intfdata(iface);
pr_info("--- eucr_reset_resume---\n");
@@ -528,7 +528,7 @@ static int eucr_probe(struct usb_interface *intf,
struct Scsi_Host *host;
struct us_data *us;
int result;
- BYTE MiscReg03 = 0;
+ u8 MiscReg03 = 0;
struct task_struct *th;
pr_info("usb --- eucr_probe\n");
diff --git a/drivers/staging/keucr/usb.h b/drivers/staging/keucr/usb.h
index d665af177b96..e894f840c708 100644
--- a/drivers/staging/keucr/usb.h
+++ b/drivers/staging/keucr/usb.h
@@ -52,34 +52,34 @@ struct us_unusual_dev {
#define FDIR_READ 1
struct keucr_sd_status {
- BYTE Insert:1;
- BYTE Ready:1;
- BYTE MediaChange:1;
- BYTE IsMMC:1;
- BYTE HiCapacity:1;
- BYTE HiSpeed:1;
- BYTE WtP:1;
- BYTE Reserved:1;
+ u8 Insert:1;
+ u8 Ready:1;
+ u8 MediaChange:1;
+ u8 IsMMC:1;
+ u8 HiCapacity:1;
+ u8 HiSpeed:1;
+ u8 WtP:1;
+ u8 Reserved:1;
};
struct keucr_ms_status {
- BYTE Insert:1;
- BYTE Ready:1;
- BYTE MediaChange:1;
- BYTE IsMSPro:1;
- BYTE IsMSPHG:1;
- BYTE Reserved1:1;
- BYTE WtP:1;
- BYTE Reserved2:1;
+ u8 Insert:1;
+ u8 Ready:1;
+ u8 MediaChange:1;
+ u8 IsMSPro:1;
+ u8 IsMSPHG:1;
+ u8 Reserved1:1;
+ u8 WtP:1;
+ u8 Reserved2:1;
};
struct keucr_sm_status {
- BYTE Insert:1;
- BYTE Ready:1;
- BYTE MediaChange:1;
- BYTE Reserved:3;
- BYTE WtP:1;
- BYTE IsMS:1;
+ u8 Insert:1;
+ u8 Ready:1;
+ u8 MediaChange:1;
+ u8 Reserved:3;
+ u8 WtP:1;
+ u8 IsMS:1;
};
/* SD Block Length */
@@ -184,38 +184,38 @@ struct us_data {
/* ----- SD Control Data ---------------- */
/* SD_REGISTER SD_Regs; */
- WORD SD_Block_Mult;
- BYTE SD_READ_BL_LEN;
- WORD SD_C_SIZE;
- BYTE SD_C_SIZE_MULT;
+ u16 SD_Block_Mult;
+ u8 SD_READ_BL_LEN;
+ u16 SD_C_SIZE;
+ u8 SD_C_SIZE_MULT;
/* SD/MMC New spec. */
- BYTE SD_SPEC_VER;
- BYTE SD_CSD_VER;
- BYTE SD20_HIGH_CAPACITY;
- DWORD HC_C_SIZE;
- BYTE MMC_SPEC_VER;
- BYTE MMC_BusWidth;
- BYTE MMC_HIGH_CAPACITY;
+ u8 SD_SPEC_VER;
+ u8 SD_CSD_VER;
+ u8 SD20_HIGH_CAPACITY;
+ u32 HC_C_SIZE;
+ u8 MMC_SPEC_VER;
+ u8 MMC_BusWidth;
+ u8 MMC_HIGH_CAPACITY;
/* ----- MS Control Data ---------------- */
- BOOLEAN MS_SWWP;
- DWORD MSP_TotalBlock;
+ bool MS_SWWP;
+ u32 MSP_TotalBlock;
/* MS_LibControl MS_Lib; */
- BOOLEAN MS_IsRWPage;
- WORD MS_Model;
+ bool MS_IsRWPage;
+ u16 MS_Model;
/* ----- SM Control Data ---------------- */
- BYTE SM_DeviceID;
- BYTE SM_CardID;
+ u8 SM_DeviceID;
+ u8 SM_CardID;
- PBYTE testbuf;
- BYTE BIN_FLAG;
- DWORD bl_num;
+ u8 *testbuf;
+ u8 BIN_FLAG;
+ u32 bl_num;
int SrbStatus;
/* ------Power Managerment --------------- */
- BOOLEAN Power_IsResum;
+ bool Power_IsResum;
};
/* Convert between us_data and the corresponding Scsi_Host */
diff --git a/drivers/staging/line6/audio.c b/drivers/staging/line6/audio.c
index a92e21f7d55b..171d80c1b020 100644
--- a/drivers/staging/line6/audio.c
+++ b/drivers/staging/line6/audio.c
@@ -24,8 +24,9 @@ int line6_init_audio(struct usb_line6 *line6)
struct snd_card *card;
int err;
- err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
- THIS_MODULE, 0, &card);
+ err = snd_card_new(line6->ifcdev,
+ SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, 0, &card);
if (err < 0)
return err;
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index f8316b71f13d..0eda51d2e9a2 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -157,6 +157,7 @@ void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf, int fsize)
copy two separate chunks.
*/
int len;
+
len = runtime->buffer_size - line6pcm->pos_in_done;
if (len > 0) {
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 7a6d85ebb29b..77f1b421e957 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -57,28 +57,32 @@ static const struct usb_device_id line6_id_table[] = {
MODULE_DEVICE_TABLE(usb, line6_id_table);
+#define L6PROP(dev_bit, dev_id, dev_name, dev_cap)\
+ {.device_bit = LINE6_BIT_##dev_bit, .id = dev_id,\
+ .name = dev_name, .capabilities = LINE6_BIT_##dev_cap}
+
/* *INDENT-OFF* */
-static struct line6_properties line6_properties_table[] = {
- { LINE6_BIT_BASSPODXT, "BassPODxt", "BassPODxt", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_BASSPODXTLIVE, "BassPODxtLive", "BassPODxt Live", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_BASSPODXTPRO, "BassPODxtPro", "BassPODxt Pro", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_GUITARPORT, "GuitarPort", "GuitarPort", LINE6_BIT_PCM },
- { LINE6_BIT_POCKETPOD, "PocketPOD", "Pocket POD", LINE6_BIT_CONTROL },
- { LINE6_BIT_PODHD300, "PODHD300", "POD HD300", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_PODHD400, "PODHD400", "POD HD400", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_PODHD500, "PODHD500", "POD HD500", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_PODSTUDIO_GX, "PODStudioGX", "POD Studio GX", LINE6_BIT_PCM },
- { LINE6_BIT_PODSTUDIO_UX1, "PODStudioUX1", "POD Studio UX1", LINE6_BIT_PCM },
- { LINE6_BIT_PODSTUDIO_UX2, "PODStudioUX2", "POD Studio UX2", LINE6_BIT_PCM },
- { LINE6_BIT_PODX3, "PODX3", "POD X3", LINE6_BIT_PCM },
- { LINE6_BIT_PODX3LIVE, "PODX3Live", "POD X3 Live", LINE6_BIT_PCM },
- { LINE6_BIT_PODXT, "PODxt", "PODxt", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_PODXTLIVE, "PODxtLive", "PODxt Live", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_PODXTPRO, "PODxtPro", "PODxt Pro", LINE6_BIT_CONTROL_PCM_HWMON },
- { LINE6_BIT_TONEPORT_GX, "TonePortGX", "TonePort GX", LINE6_BIT_PCM },
- { LINE6_BIT_TONEPORT_UX1, "TonePortUX1", "TonePort UX1", LINE6_BIT_PCM },
- { LINE6_BIT_TONEPORT_UX2, "TonePortUX2", "TonePort UX2", LINE6_BIT_PCM },
- { LINE6_BIT_VARIAX, "Variax", "Variax Workbench", LINE6_BIT_CONTROL },
+static const struct line6_properties line6_properties_table[] = {
+ L6PROP(BASSPODXT, "BassPODxt", "BassPODxt", CTRL_PCM_HW),
+ L6PROP(BASSPODXTLIVE, "BassPODxtLive", "BassPODxt Live", CTRL_PCM_HW),
+ L6PROP(BASSPODXTPRO, "BassPODxtPro", "BassPODxt Pro", CTRL_PCM_HW),
+ L6PROP(GUITARPORT, "GuitarPort", "GuitarPort", PCM),
+ L6PROP(POCKETPOD, "PocketPOD", "Pocket POD", CONTROL),
+ L6PROP(PODHD300, "PODHD300", "POD HD300", CTRL_PCM_HW),
+ L6PROP(PODHD400, "PODHD400", "POD HD400", CTRL_PCM_HW),
+ L6PROP(PODHD500, "PODHD500", "POD HD500", CTRL_PCM_HW),
+ L6PROP(PODSTUDIO_GX, "PODStudioGX", "POD Studio GX", PCM),
+ L6PROP(PODSTUDIO_UX1, "PODStudioUX1", "POD Studio UX1", PCM),
+ L6PROP(PODSTUDIO_UX2, "PODStudioUX2", "POD Studio UX2", PCM),
+ L6PROP(PODX3, "PODX3", "POD X3", PCM),
+ L6PROP(PODX3LIVE, "PODX3Live", "POD X3 Live", PCM),
+ L6PROP(PODXT, "PODxt", "PODxt", CTRL_PCM_HW),
+ L6PROP(PODXTLIVE, "PODxtLive", "PODxt Live", CTRL_PCM_HW),
+ L6PROP(PODXTPRO, "PODxtPro", "PODxt Pro", CTRL_PCM_HW),
+ L6PROP(TONEPORT_GX, "TonePortGX", "TonePort GX", PCM),
+ L6PROP(TONEPORT_UX1, "TonePortUX1", "TonePort UX1", PCM),
+ L6PROP(TONEPORT_UX2, "TonePortUX2", "TonePort UX2", PCM),
+ L6PROP(VARIAX, "Variax", "Variax Workbench", CONTROL),
};
/* *INDENT-ON* */
@@ -152,10 +156,10 @@ int line6_send_raw_message(struct usb_line6 *line6, const char *buffer,
int retval;
retval = usb_interrupt_msg(line6->usbdev,
- usb_sndintpipe(line6->usbdev,
- line6->ep_control_write),
- (char *)frag_buf, frag_size,
- &partial, LINE6_TIMEOUT * HZ);
+ usb_sndintpipe(line6->usbdev,
+ line6->ep_control_write),
+ (char *)frag_buf, frag_size,
+ &partial, LINE6_TIMEOUT * HZ);
if (retval) {
dev_err(line6->ifcdev,
@@ -217,7 +221,7 @@ static int line6_send_raw_message_async_part(struct message *msg,
Setup and start timer.
*/
void line6_start_timer(struct timer_list *timer, unsigned int msecs,
- void (*function) (unsigned long), unsigned long data)
+ void (*function)(unsigned long), unsigned long data)
{
setup_timer(timer, function, data);
timer->expires = jiffies + msecs * HZ / 1000;
diff --git a/drivers/staging/line6/driver.h b/drivers/staging/line6/driver.h
index 34ae95e7e512..16e3fc2f1f15 100644
--- a/drivers/staging/line6/driver.h
+++ b/drivers/staging/line6/driver.h
@@ -204,7 +204,7 @@ extern int line6_send_sysex_message(struct usb_line6 *line6,
extern ssize_t line6_set_raw(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
extern void line6_start_timer(struct timer_list *timer, unsigned int msecs,
- void (*function) (unsigned long),
+ void (*function)(unsigned long),
unsigned long data);
extern int line6_transmit_parameter(struct usb_line6 *line6, int param,
u8 value);
diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c
index 3f6d78c585fb..02345fb06e3d 100644
--- a/drivers/staging/line6/midi.c
+++ b/drivers/staging/line6/midi.c
@@ -47,7 +47,7 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
struct snd_line6_midi *line6midi = line6->line6midi;
struct midi_buffer *mb = &line6midi->midibuf_out;
unsigned long flags;
- unsigned char chunk[line6->max_packet_size];
+ unsigned char chunk[LINE6_FALLBACK_MAXPACKETSIZE];
int req, done;
spin_lock_irqsave(&line6->line6midi->midi_transmit_lock, flags);
@@ -64,7 +64,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
}
for (;;) {
- done = line6_midibuf_read(mb, chunk, line6->max_packet_size);
+ done = line6_midibuf_read(mb, chunk,
+ LINE6_FALLBACK_MAXPACKETSIZE);
if (done == 0)
break;
@@ -307,8 +308,6 @@ int line6_init_midi(struct usb_line6 *line6)
if (err < 0)
return err;
- snd_card_set_dev(line6->card, line6->ifcdev);
-
err = snd_line6_new_midi(line6midi);
if (err < 0)
return err;
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index df8331bce175..661080b3c39d 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -501,8 +501,6 @@ int line6_init_pcm(struct usb_line6 *line6,
if (err < 0)
return err;
- snd_card_set_dev(line6->card, line6->ifcdev);
-
err = snd_line6_new_pcm(line6pcm);
if (err < 0)
return err;
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
index 90cadddec56e..2d1cc472bead 100644
--- a/drivers/staging/line6/usbdefs.h
+++ b/drivers/staging/line6/usbdefs.h
@@ -91,9 +91,9 @@ enum {
LINE6_BITS_PODXTALL = LINE6_BIT_PODXT | LINE6_BIT_PODXTLIVE |
LINE6_BIT_PODXTPRO,
LINE6_BITS_PODX3ALL = LINE6_BIT_PODX3 | LINE6_BIT_PODX3LIVE,
- LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 |
- LINE6_BIT_PODHD400 |
- LINE6_BIT_PODHD500,
+ LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 |
+ LINE6_BIT_PODHD400 |
+ LINE6_BIT_PODHD500,
LINE6_BITS_BASSPODXTALL = LINE6_BIT_BASSPODXT |
LINE6_BIT_BASSPODXTLIVE |
LINE6_BIT_BASSPODXTPRO
@@ -106,7 +106,7 @@ enum {
/* device support hardware monitoring */
#define LINE6_BIT_HWMON (1 << 2)
-#define LINE6_BIT_CONTROL_PCM_HWMON (LINE6_BIT_CONTROL | \
+#define LINE6_BIT_CTRL_PCM_HW (LINE6_BIT_CONTROL | \
LINE6_BIT_PCM | \
LINE6_BIT_HWMON)
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO
index 22742d6d62a8..0a2b6cb3775e 100644
--- a/drivers/staging/lustre/TODO
+++ b/drivers/staging/lustre/TODO
@@ -9,5 +9,6 @@
* Other minor misc cleanups...
Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger
-<andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing
-hpdd-discuss <hpdd-discuss@lists.01.org> would be great too.
+<andreas.dilger@intel.com>, Oleg Drokin <oleg.drokin@intel.com> and
+Peng Tao <tao.peng@emc.com>. CCing hpdd-discuss <hpdd-discuss@lists.01.org>
+would be great too.
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index 507d16b9213c..8fd47c98fd33 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -44,13 +44,6 @@
#define __LIBCFS_CURPROC_H__
/*
- * Portable API to access common characteristics of "current" UNIX process.
- *
- * Implemented in portals/include/libcfs/<os>/
- */
-int cfs_curproc_groups_nr(void);
-
-/*
* Plus, platform-specific constant
*
* CFS_CURPROC_COMM_MAX,
@@ -91,8 +84,6 @@ void cfs_cap_raise(cfs_cap_t cap);
void cfs_cap_lower(cfs_cap_t cap);
int cfs_cap_raised(cfs_cap_t cap);
cfs_cap_t cfs_curproc_cap_pack(void);
-void cfs_curproc_cap_unpack(cfs_cap_t cap);
-int cfs_capable(cfs_cap_t cap);
/* __LIBCFS_CURPROC_H__ */
#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 9d5ee1a69c0c..e5d5db255622 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -65,8 +65,6 @@
#include <linux/hash.h>
-#define cfs_hash_long(val, bits) hash_long(val, bits)
-
/** disable debug */
#define CFS_HASH_DEBUG_NONE 0
/** record hash depth and output to console when it's too deep,
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 74dda57b98a8..49ba62a4daa8 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -211,10 +211,10 @@ static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
}
-extern int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
-extern int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
-extern int libcfs_ioctl_getdata(char *buf, char *end, void *arg);
-extern int libcfs_ioctl_popdata(void *arg, void *buf, int size);
+int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
+int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
+int libcfs_ioctl_getdata(char *buf, char *end, void *arg);
+int libcfs_ioctl_popdata(void *arg, void *buf, int size);
#endif /* __LIBCFS_IOCTL_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
index 596a15fc8996..037ae8a6d531 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
@@ -61,6 +61,8 @@ struct kuc_hdr {
__u16 kuc_msglen; /* Including header */
} __attribute__((aligned(sizeof(__u64))));
+#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE)
+
#define KUC_MAGIC 0x191C /*Lustre9etLinC */
#define KUC_FL_BLOCK 0x01 /* Wait for send */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index e6e417aeefd5..7cf34aa78f79 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -41,19 +41,10 @@
#define __LIBCFS_PRIM_H__
/*
- * Schedule
- */
-void cfs_pause(cfs_duration_t ticks);
-
-/*
* Timer
*/
typedef void (cfs_timer_func_t)(ulong_ptr_t);
-void schedule_timeout_and_set_state(long, int64_t);
-void init_waitqueue_entry_current(wait_queue_t *link);
-int64_t waitq_timedwait(wait_queue_t *, long, int64_t);
-void waitq_wait(wait_queue_t *, long);
void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
void cfs_init_timer(struct timer_list *t);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index d0d942ced01a..dddccca120c9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -120,7 +120,7 @@ do { \
do { \
LASSERT(!in_interrupt() || \
((size) <= LIBCFS_VMALLOC_SIZE && \
- ((mask) & GFP_ATOMIC)) != 0); \
+ ((mask) & __GFP_WAIT) == 0)); \
} while (0)
#define LIBCFS_ALLOC_POST(ptr, size) \
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index a6bac9c36339..509dc1e5c3b1 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -49,18 +49,6 @@ int cfs_strncasecmp(const char *s1, const char *s2, size_t n);
/* Convert a text string to a bitmask */
int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
int *oldmask, int minmask, int allmask);
-
-/* Allocate space for and copy an existing string.
- * Must free with kfree().
- */
-char *cfs_strdup(const char *str, u_int32_t flags);
-
-/* safe vsnprintf */
-int cfs_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-
-/* safe snprintf */
-int cfs_snprintf(char *buf, size_t size, const char *fmt, ...);
-
/* trim leading and trailing space characters */
char *cfs_firststr(char *str, size_t size);
@@ -90,27 +78,10 @@ struct cfs_expr_list {
struct list_head el_exprs;
};
-static inline int
-cfs_iswhite(char c)
-{
- switch (c) {
- case ' ':
- case '\t':
- case '\n':
- case '\r':
- return 1;
- default:
- break;
- }
- return 0;
-}
-
char *cfs_trimwhite(char *str);
int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res);
int cfs_str2num_check(char *str, int nob, unsigned *num,
unsigned min, unsigned max);
-int cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
- int single_tok, struct cfs_range_expr **expr);
int cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list);
int cfs_expr_list_values(struct cfs_expr_list *expr_list,
int max, __u32 **values);
@@ -124,7 +95,6 @@ cfs_expr_list_values_free(__u32 *values, int num)
}
void cfs_expr_list_free(struct cfs_expr_list *expr_list);
-void cfs_expr_list_print(struct cfs_expr_list *expr_list);
int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
struct cfs_expr_list **elpp);
void cfs_expr_list_free_list(struct list_head *list);
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index 3ac2bb5fd2db..856fcfaafafa 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -628,7 +628,7 @@ void lnet_ni_free(lnet_ni_t *ni);
static inline int
lnet_nid2peerhash(lnet_nid_t nid)
{
- return cfs_hash_long(nid, LNET_PEER_HASH_BITS);
+ return hash_long(nid, LNET_PEER_HASH_BITS);
}
static inline struct list_head *
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index dd8edcf1b5c0..1c13ef7df80e 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -282,16 +282,14 @@ typedef struct lnet_libmd {
#define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
#ifdef LNET_USE_LIB_FREELIST
-typedef struct
-{
+typedef struct {
void *fl_objs; /* single contiguous array of objects */
int fl_nobjs; /* the number of them */
int fl_objsize; /* the size (including overhead) of each of them */
struct list_head fl_list; /* where they are enqueued */
} lnet_freelist_t;
-typedef struct
-{
+typedef struct {
struct list_head fo_list; /* enqueue on fl_list */
void *fo_contents; /* aligned contents */
} lnet_freeobj_t;
@@ -312,8 +310,7 @@ typedef struct {
struct lnet_ni; /* forward ref */
-typedef struct lnet_lnd
-{
+typedef struct lnet_lnd {
/* fields managed by portals */
struct list_head lnd_list; /* stash in the LND table */
int lnd_refcount; /* # active instances */
@@ -321,8 +318,8 @@ typedef struct lnet_lnd
/* fields initialised by the LND */
unsigned int lnd_type;
- int (*lnd_startup) (struct lnet_ni *ni);
- void (*lnd_shutdown) (struct lnet_ni *ni);
+ int (*lnd_startup)(struct lnet_ni *ni);
+ void (*lnd_shutdown)(struct lnet_ni *ni);
int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
/* In data movement APIs below, payload buffers are described as a set
@@ -668,8 +665,7 @@ struct lnet_msg_container {
#define LNET_RC_STATE_RUNNING 1 /* started up OK */
#define LNET_RC_STATE_STOPPING 2 /* telling thread to stop */
-typedef struct
-{
+typedef struct {
/* CPU partition table of LNet */
struct cfs_cpt_table *ln_cpt_table;
/* number of CPTs in ln_cpt_table */
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 644a0000130a..0061c8afa206 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -41,7 +41,7 @@
#include "o2iblnd.h"
#include <asm/div64.h>
-lnd_t the_o2iblnd = {
+static lnd_t the_o2iblnd = {
.lnd_type = O2IBLND,
.lnd_startup = kiblnd_startup,
.lnd_shutdown = kiblnd_shutdown,
@@ -53,8 +53,8 @@ lnd_t the_o2iblnd = {
kib_data_t kiblnd_data;
-__u32
-kiblnd_cksum (void *ptr, int nob)
+static __u32
+kiblnd_cksum(void *ptr, int nob)
{
char *c = ptr;
__u32 sum = 0;
@@ -429,8 +429,8 @@ kiblnd_unlink_peer_locked (kib_peer_t *peer)
kiblnd_peer_decref(peer);
}
-int
-kiblnd_get_peer_info (lnet_ni_t *ni, int index,
+static int
+kiblnd_get_peer_info(lnet_ni_t *ni, int index,
lnet_nid_t *nidp, int *count)
{
kib_peer_t *peer;
@@ -468,8 +468,8 @@ kiblnd_get_peer_info (lnet_ni_t *ni, int index,
return -ENOENT;
}
-void
-kiblnd_del_peer_locked (kib_peer_t *peer)
+static void
+kiblnd_del_peer_locked(kib_peer_t *peer)
{
struct list_head *ctmp;
struct list_head *cnxt;
@@ -489,8 +489,8 @@ kiblnd_del_peer_locked (kib_peer_t *peer)
* last ref on it. */
}
-int
-kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
+static int
+kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
{
LIST_HEAD (zombies);
struct list_head *ptmp;
@@ -543,8 +543,8 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
return rc;
}
-kib_conn_t *
-kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
+static kib_conn_t *
+kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
{
kib_peer_t *peer;
struct list_head *ptmp;
@@ -584,74 +584,6 @@ kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
return NULL;
}
-void
-kiblnd_debug_rx (kib_rx_t *rx)
-{
- CDEBUG(D_CONSOLE, " %p status %d msg_type %x cred %d\n",
- rx, rx->rx_status, rx->rx_msg->ibm_type,
- rx->rx_msg->ibm_credits);
-}
-
-void
-kiblnd_debug_tx (kib_tx_t *tx)
-{
- CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lx "
- "cookie "LPX64" msg %s%s type %x cred %d\n",
- tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
- tx->tx_status, tx->tx_deadline, tx->tx_cookie,
- tx->tx_lntmsg[0] == NULL ? "-" : "!",
- tx->tx_lntmsg[1] == NULL ? "-" : "!",
- tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits);
-}
-
-void
-kiblnd_debug_conn (kib_conn_t *conn)
-{
- struct list_head *tmp;
- int i;
-
- spin_lock(&conn->ibc_lock);
-
- CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
- atomic_read(&conn->ibc_refcount), conn,
- conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d r_cred %d\n",
- conn->ibc_state, conn->ibc_noops_posted,
- conn->ibc_nsends_posted, conn->ibc_credits,
- conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
- CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error);
-
- CDEBUG(D_CONSOLE, " early_rxs:\n");
- list_for_each(tmp, &conn->ibc_early_rxs)
- kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
-
- CDEBUG(D_CONSOLE, " tx_noops:\n");
- list_for_each(tmp, &conn->ibc_tx_noops)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
-
- CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
- list_for_each(tmp, &conn->ibc_tx_queue_nocred)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
-
- CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
- list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
-
- CDEBUG(D_CONSOLE, " tx_queue:\n");
- list_for_each(tmp, &conn->ibc_tx_queue)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
-
- CDEBUG(D_CONSOLE, " active_txs:\n");
- list_for_each(tmp, &conn->ibc_active_txs)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
-
- CDEBUG(D_CONSOLE, " rxs:\n");
- for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++)
- kiblnd_debug_rx(&conn->ibc_rxs[i]);
-
- spin_unlock(&conn->ibc_lock);
-}
-
int
kiblnd_translate_mtu(int value)
{
@@ -1039,8 +971,8 @@ kiblnd_close_stale_conns_locked (kib_peer_t *peer,
return count;
}
-int
-kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
+static int
+kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
{
kib_peer_t *peer;
struct list_head *ptmp;
@@ -1440,7 +1372,7 @@ kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
return mr;
}
-void
+static void
kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
{
LASSERT (pool->fpo_map_count == 0);
@@ -1454,7 +1386,7 @@ kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
}
-void
+static void
kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
kib_fmr_pool_t *pool;
@@ -1480,7 +1412,7 @@ static int kiblnd_fmr_flush_trigger(int ncpts)
return max(IBLND_FMR_POOL_FLUSH, size);
}
-int
+static int
kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo)
{
/* FMR pool for RDMA */
@@ -1719,7 +1651,7 @@ kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
pool->po_size = size;
}
-void
+static void
kiblnd_destroy_pool_list(struct list_head *head)
{
kib_pool_t *pool;
@@ -2192,7 +2124,7 @@ kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
tx->tx_cookie = tps->tps_next_tx_cookie ++;
}
-void
+static void
kiblnd_net_fini_pools(kib_net_t *net)
{
int i;
@@ -2234,7 +2166,7 @@ kiblnd_net_fini_pools(kib_net_t *net)
}
}
-int
+static int
kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
{
unsigned long flags;
@@ -2408,7 +2340,7 @@ kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
return -EINVAL;
}
-void
+static void
kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
{
int i;
@@ -2442,7 +2374,7 @@ kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
LIBCFS_FREE(hdev, sizeof(*hdev));
}
-int
+static int
kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
{
struct ib_mr *mr;
@@ -2746,7 +2678,7 @@ kiblnd_destroy_dev (kib_dev_t *dev)
LIBCFS_FREE(dev, sizeof(*dev));
}
-kib_dev_t *
+static kib_dev_t *
kiblnd_create_dev(char *ifname)
{
struct net_device *netdev;
@@ -2800,7 +2732,7 @@ kiblnd_create_dev(char *ifname)
return dev;
}
-void
+static void
kiblnd_base_shutdown(void)
{
struct kib_sched_info *sched;
@@ -2842,7 +2774,8 @@ kiblnd_base_shutdown(void)
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
atomic_read(&kiblnd_data.kib_nthreads));
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
/* fall through */
@@ -2903,7 +2836,8 @@ kiblnd_shutdown (lnet_ni_t *ni)
"%s: waiting for %d peers to disconnect\n",
libcfs_nid2str(ni->ni_nid),
atomic_read(&net->ibn_npeers));
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
kiblnd_net_fini_pools(net);
@@ -2940,7 +2874,7 @@ out:
return;
}
-int
+static int
kiblnd_base_startup(void)
{
struct kib_sched_info *sched;
@@ -3030,7 +2964,7 @@ kiblnd_base_startup(void)
return -ENETDOWN;
}
-int
+static int
kiblnd_start_schedulers(struct kib_sched_info *sched)
{
int rc = 0;
@@ -3071,7 +3005,7 @@ kiblnd_start_schedulers(struct kib_sched_info *sched)
return rc;
}
-int
+static int
kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts)
{
int cpt;
@@ -3097,7 +3031,7 @@ kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts)
return 0;
}
-kib_dev_t *
+static kib_dev_t *
kiblnd_dev_search(char *ifname)
{
kib_dev_t *alias = NULL;
@@ -3226,13 +3160,13 @@ failed:
return -ENETDOWN;
}
-void __exit
+static void __exit
kiblnd_module_fini (void)
{
lnet_unregister_lnd(&the_o2iblnd);
}
-int __init
+static int __init
kiblnd_module_init (void)
{
int rc;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 93648632ba26..6173e74d7492 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -529,7 +529,7 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
{
struct page *page;
- if (is_vmalloc_addr(vaddr)) {
+ if (is_vmalloc_addr((void *)vaddr)) {
page = vmalloc_to_page ((void *)vaddr);
LASSERT (page != NULL);
return page;
@@ -3127,7 +3127,7 @@ kiblnd_connd (void *arg)
cfs_block_allsigs ();
- init_waitqueue_entry_current (&wait);
+ init_waitqueue_entry(&wait, current);
kiblnd_data.kib_connd = current;
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
@@ -3208,7 +3208,7 @@ kiblnd_connd (void *arg)
add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
- waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
+ schedule_timeout(timeout);
set_current_state(TASK_RUNNING);
remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
@@ -3324,7 +3324,7 @@ kiblnd_scheduler(void *arg)
cfs_block_allsigs();
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
@@ -3423,7 +3423,7 @@ kiblnd_scheduler(void *arg)
add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
spin_unlock_irqrestore(&sched->ibs_lock, flags);
- waitq_wait(&wait, TASK_INTERRUPTIBLE);
+ schedule();
busy_loops = 0;
remove_wait_queue(&sched->ibs_waitq, &wait);
@@ -3450,7 +3450,7 @@ kiblnd_failover_thread(void *arg)
cfs_block_allsigs ();
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
write_lock_irqsave(glock, flags);
while (!kiblnd_data.kib_shutdown) {
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 8f74d0be32f1..21d36ee5378a 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -2336,7 +2336,8 @@ ksocknal_base_shutdown(void)
"waiting for %d threads to terminate\n",
ksocknal_data.ksnd_nthreads);
read_unlock(&ksocknal_data.ksnd_global_lock);
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
read_lock(&ksocknal_data.ksnd_global_lock);
}
read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -2584,7 +2585,8 @@ ksocknal_shutdown (lnet_ni_t *ni)
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d peers to disconnect\n",
net->ksnn_npeers);
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
ksocknal_debug_peerhash(ni);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 68a4f52ec998..bdf95ea8a54d 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -189,7 +189,8 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
int bufnob;
if (ksocknal_data.ksnd_stall_tx != 0) {
- cfs_pause(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
}
LASSERT (tx->tx_resid != 0);
@@ -345,7 +346,8 @@ ksocknal_receive (ksock_conn_t *conn)
int rc;
if (ksocknal_data.ksnd_stall_rx != 0) {
- cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
}
rc = ksocknal_connsock_addref(conn);
@@ -924,7 +926,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
int
ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
- int mpflag = 0;
+ int mpflag = 1;
int type = lntmsg->msg_type;
lnet_process_id_t target = lntmsg->msg_target;
unsigned int payload_niov = lntmsg->msg_niov;
@@ -993,8 +995,9 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
/* The first fragment will be set later in pro_pack */
rc = ksocknal_launch_packet(ni, tx, target);
- if (lntmsg->msg_vmflush)
+ if (!mpflag)
cfs_memory_pressure_restore(mpflag);
+
if (rc == 0)
return (0);
@@ -2139,7 +2142,7 @@ ksocknal_connd (void *arg)
cfs_block_allsigs ();
- init_waitqueue_entry_current (&wait);
+ init_waitqueue_entry(&wait, current);
spin_lock_bh(connd_lock);
@@ -2228,7 +2231,7 @@ ksocknal_connd (void *arg)
spin_unlock_bh(connd_lock);
nloops = 0;
- waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
+ schedule_timeout(timeout);
set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
@@ -2531,7 +2534,7 @@ ksocknal_reaper (void *arg)
cfs_block_allsigs ();
INIT_LIST_HEAD(&enomem_conns);
- init_waitqueue_entry_current (&wait);
+ init_waitqueue_entry(&wait, current);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2638,8 +2641,7 @@ ksocknal_reaper (void *arg)
if (!ksocknal_data.ksnd_shuttingdown &&
list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
list_empty (&ksocknal_data.ksnd_zombie_conns))
- waitq_timedwait (&wait, TASK_INTERRUPTIBLE,
- timeout);
+ schedule_timeout(timeout);
set_current_state (TASK_RUNNING);
remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
index 80141aa32c21..a54b506ba7ca 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -373,7 +373,8 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
/* NB we can't trust socket ops to either consume our iovs
* or leave them alone. */
- if ((addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages)) != NULL) {
+ addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
+ if (addr != NULL) {
nob = scratchiov[0].iov_len;
msg.msg_iovlen = 1;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 71205e2015ce..2d91571cbab2 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -165,7 +165,8 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
LASSERT (tx_ack == NULL ||
tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
- if ((tx = conn->ksnc_tx_carrier) == NULL) {
+ tx = conn->ksnc_tx_carrier;
+ if (tx == NULL) {
if (tx_ack != NULL) {
list_add_tail(&tx_ack->tx_list,
&conn->ksnc_tx_queue);
@@ -392,7 +393,8 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
if (tx == NULL)
return -ENOMEM;
- if ((rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) == 0)
+ rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id);
+ if (rc == 0)
return 0;
ksocknal_free_tx(tx);
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index cb2ecd717714..09ea6cb1492c 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -371,7 +371,8 @@ lnet_acceptor(void *arg)
if (rc != 0) {
if (rc != -EAGAIN) {
CWARN("Accept error %d: pausing...\n", rc);
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
continue;
}
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index c562ff3e9283..45c23194081b 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -770,7 +770,7 @@ lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
if (number == 1)
return 0;
- val = cfs_hash_long(key, LNET_CPT_BITS);
+ val = hash_long(key, LNET_CPT_BITS);
/* NB: LNET_CP_NUMBER doesn't have to be PO2 */
if (val < number)
return val;
@@ -994,7 +994,8 @@ lnet_shutdown_lndnis (void)
"Waiting for zombie LNI %s\n",
libcfs_nid2str(ni->ni_nid));
}
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
lnet_net_lock(LNET_LOCK_EX);
continue;
}
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 6a07b0a65d12..d97464e95ddb 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -443,7 +443,7 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str)
/* Split 'str' into separate commands */
for (;;) {
/* skip leading whitespace */
- while (cfs_iswhite(*str))
+ while (isspace(*str))
str++;
/* scan for separator or comment */
@@ -460,7 +460,7 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str)
}
for (i = 0; i < nob; i++)
- if (cfs_iswhite(str[i]))
+ if (isspace(str[i]))
ltb->ltb_text[i] = ' ';
else
ltb->ltb_text[i] = str[i];
@@ -667,7 +667,7 @@ lnet_parse_route(char *str, int *im_a_router)
sep = str;
for (;;) {
/* scan for token start */
- while (cfs_iswhite(*sep))
+ while (isspace(*sep))
sep++;
if (*sep == 0) {
if (ntokens < (got_hops ? 3 : 2))
@@ -679,7 +679,7 @@ lnet_parse_route(char *str, int *im_a_router)
token = sep++;
/* scan for token end */
- while (*sep != 0 && !cfs_iswhite(*sep))
+ while (*sep != 0 && !isspace(*sep))
sep++;
if (*sep != 0)
*sep++ = 0;
@@ -858,7 +858,7 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
sep = tokens;
for (;;) {
/* scan for token start */
- while (cfs_iswhite(*sep))
+ while (isspace(*sep))
sep++;
if (*sep == 0)
break;
@@ -866,7 +866,7 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
token = sep++;
/* scan for token end */
- while (*sep != 0 && !cfs_iswhite(*sep))
+ while (*sep != 0 && !isspace(*sep))
sep++;
if (*sep != 0)
*sep++ = 0;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index 4ce68d3b0931..7ce07f61b2e1 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -334,21 +334,20 @@ lnet_eq_wait_locked(int *timeout_ms)
if (tms == 0)
return -1; /* don't want to wait and no new event */
- init_waitqueue_entry_current(&wl);
+ init_waitqueue_entry(&wl, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
lnet_eq_wait_unlock();
if (tms < 0) {
- waitq_wait(&wl, TASK_INTERRUPTIBLE);
+ schedule();
} else {
struct timeval tv;
now = cfs_time_current();
- waitq_timedwait(&wl, TASK_INTERRUPTIBLE,
- cfs_time_seconds(tms) / 1000);
+ schedule_timeout(cfs_time_seconds(tms) / 1000);
cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
if (tms < 0) /* no more wait but may have new event */
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 6fffd5e96f9c..920df69960a5 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -366,7 +366,7 @@ lnet_mt_match_head(struct lnet_match_table *mtable,
unsigned long hash = mbits + id.nid + id.pid;
LASSERT(lnet_ptl_is_unique(ptl));
- hash = cfs_hash_long(hash, LNET_MT_HASH_BITS);
+ hash = hash_long(hash, LNET_MT_HASH_BITS);
return &mtable->mt_mhash[hash];
}
}
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index 286977691393..72802b0404a4 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -145,7 +145,8 @@ lnet_peer_tables_cleanup(void)
"Waiting for %d peers on peer table\n",
ptable->pt_number);
}
- cfs_pause(cfs_time_seconds(1) / 2);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 2);
lnet_net_lock(i);
}
list_splice_init(&ptable->pt_deathrow, &deathrow);
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index d1ee44232eef..995f50976c42 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -779,7 +779,8 @@ lnet_wait_known_routerstate(void)
if (all_known)
return;
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
}
@@ -1147,7 +1148,8 @@ lnet_prune_rc_data(int wait_unlink)
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for rc buffers to unlink\n");
- cfs_pause(cfs_time_seconds(1) / 4);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 4);
lnet_net_lock(LNET_LOCK_EX);
}
@@ -1206,11 +1208,11 @@ rescan:
lnet_prune_rc_data(0); /* don't wait for UNLINK */
- /* Call cfs_pause() here always adds 1 to load average
+ /* Call schedule_timeout() here always adds 1 to load average
* because kernel counts # active tasks as nr_running
* + nr_uninterruptible. */
- schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
}
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 20d53e08705e..0cbd9fc98e07 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -837,8 +837,8 @@ static int __proc_lnet_portal_rotor(void *data, int write,
rc = -EINVAL;
lnet_res_lock(0);
for (i = 0; portal_rotors[i].pr_name != NULL; i++) {
- if (cfs_strncasecmp(portal_rotors[i].pr_name, tmp,
- strlen(portal_rotors[i].pr_name)) == 0) {
+ if (strncasecmp(portal_rotors[i].pr_name, tmp,
+ strlen(portal_rotors[i].pr_name)) == 0) {
portal_rotor = portal_rotors[i].pr_value;
rc = 0;
break;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 53d58924737b..8d1eea4cef6f 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -1346,7 +1346,8 @@ lstcon_rpc_cleanup_wait(void)
mutex_unlock(&console_session.ses_mutex);
CWARN("Session is shutting down, waiting for termination of transactions\n");
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
mutex_lock(&console_session.ses_mutex);
}
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 2a8eddc7db52..352fc96398d2 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -61,7 +61,7 @@ do { \
lstcon_session_t console_session;
-void
+static void
lstcon_node_get(lstcon_node_t *nd)
{
LASSERT (nd->nd_ref >= 1);
@@ -114,7 +114,7 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
return 0;
}
-void
+static void
lstcon_node_put(lstcon_node_t *nd)
{
lstcon_ndlink_t *ndl;
@@ -344,7 +344,7 @@ lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new)
}
}
-int
+static int
lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
{
lstcon_group_t *grp = (lstcon_group_t *)arg;
@@ -373,7 +373,7 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
return 1;
}
-int
+static int
lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
lstcon_rpc_ent_t *ent_up)
{
@@ -830,7 +830,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
return 0;
}
-int
+static int
lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
{
lstcon_batch_t *bat;
@@ -998,7 +998,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
return rc;
}
-int
+static int
lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
{
switch (transop) {
@@ -1141,7 +1141,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
}
-int
+static int
lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
{
lstcon_test_t *test;
@@ -1370,7 +1370,7 @@ out:
return rc;
}
-int
+static int
lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
{
lstcon_test_t *test;
@@ -1385,7 +1385,7 @@ lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
return -ENOENT;
}
-int
+static int
lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
lstcon_rpc_ent_t *ent_up)
{
@@ -1464,7 +1464,7 @@ lstcon_test_batch_query(char *name, int testidx, int client,
return rc;
}
-int
+static int
lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
lstcon_rpc_ent_t *ent_up)
{
@@ -1488,7 +1488,7 @@ lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
return 0;
}
-int
+static int
lstcon_ndlist_stat(struct list_head *ndlist,
int timeout, struct list_head *result_up)
{
@@ -1577,7 +1577,7 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
return rc;
}
-int
+static int
lstcon_debug_ndlist(struct list_head *ndlist,
struct list_head *translist,
int timeout, struct list_head *result_up)
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index d838985f51cb..9fc0429e8296 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -1585,7 +1585,8 @@ srpc_startup (void)
spin_lock_init(&srpc_data.rpc_glock);
/* 1 second pause to avoid timestamp reuse */
- cfs_pause(cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
srpc_data.rpc_state = SRPC_STATE_NONE;
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 228927e0f962..f4806a6bc942 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -572,7 +572,11 @@ swi_state2str (int state)
#undef STATE2STR
}
-#define selftest_wait_events() cfs_pause(cfs_time_seconds(1) / 10)
+#define selftest_wait_events() \
+ do { \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout(cfs_time_seconds(1) / 10); \
+ } while (0)
#define lst_wait_until(cond, lock, fmt, ...) \
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 79fc2fe131a2..3401c9ad42ac 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -196,8 +196,8 @@ static int seq_client_alloc_seq(const struct lu_env *env,
if (range_is_exhausted(&seq->lcs_space)) {
rc = seq_client_alloc_meta(env, seq);
if (rc) {
- CERROR("%s: Can't allocate new meta-sequence,"
- "rc %d\n", seq->lcs_name, rc);
+ CERROR("%s: Can't allocate new meta-sequence, rc %d\n",
+ seq->lcs_name, rc);
return rc;
} else {
CDEBUG(D_INFO, "%s: New range - "DRANGE"\n",
@@ -225,7 +225,7 @@ static int seq_fid_alloc_prep(struct lu_client_seq *seq,
set_current_state(TASK_UNINTERRUPTIBLE);
mutex_unlock(&seq->lcs_mutex);
- waitq_wait(link, TASK_UNINTERRUPTIBLE);
+ schedule();
mutex_lock(&seq->lcs_mutex);
remove_wait_queue(&seq->lcs_waitq, link);
@@ -256,7 +256,7 @@ int seq_client_get_seq(const struct lu_env *env,
LASSERT(seqnr != NULL);
mutex_lock(&seq->lcs_mutex);
- init_waitqueue_entry_current(&link);
+ init_waitqueue_entry(&link, current);
while (1) {
rc = seq_fid_alloc_prep(seq, &link);
@@ -266,15 +266,15 @@ int seq_client_get_seq(const struct lu_env *env,
rc = seq_client_alloc_seq(env, seq, seqnr);
if (rc) {
- CERROR("%s: Can't allocate new sequence, "
- "rc %d\n", seq->lcs_name, rc);
+ CERROR("%s: Can't allocate new sequence, rc %d\n",
+ seq->lcs_name, rc);
seq_fid_alloc_fini(seq);
mutex_unlock(&seq->lcs_mutex);
return rc;
}
- CDEBUG(D_INFO, "%s: allocate sequence "
- "[0x%16.16"LPF64"x]\n", seq->lcs_name, *seqnr);
+ CDEBUG(D_INFO, "%s: allocate sequence [0x%16.16"LPF64"x]\n",
+ seq->lcs_name, *seqnr);
/* Since the caller require the whole seq,
* so marked this seq to be used */
@@ -306,7 +306,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
LASSERT(seq != NULL);
LASSERT(fid != NULL);
- init_waitqueue_entry_current(&link);
+ init_waitqueue_entry(&link, current);
mutex_lock(&seq->lcs_mutex);
if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
@@ -329,15 +329,15 @@ int seq_client_alloc_fid(const struct lu_env *env,
rc = seq_client_alloc_seq(env, seq, &seqnr);
if (rc) {
- CERROR("%s: Can't allocate new sequence, "
- "rc %d\n", seq->lcs_name, rc);
+ CERROR("%s: Can't allocate new sequence, rc %d\n",
+ seq->lcs_name, rc);
seq_fid_alloc_fini(seq);
mutex_unlock(&seq->lcs_mutex);
return rc;
}
- CDEBUG(D_INFO, "%s: Switch to sequence "
- "[0x%16.16"LPF64"x]\n", seq->lcs_name, seqnr);
+ CDEBUG(D_INFO, "%s: Switch to sequence [0x%16.16"LPF64"x]\n",
+ seq->lcs_name, seqnr);
seq->lcs_fid.f_oid = LUSTRE_FID_INIT_OID;
seq->lcs_fid.f_seq = seqnr;
@@ -370,7 +370,7 @@ void seq_client_flush(struct lu_client_seq *seq)
wait_queue_t link;
LASSERT(seq != NULL);
- init_waitqueue_entry_current(&link);
+ init_waitqueue_entry(&link, current);
mutex_lock(&seq->lcs_mutex);
while (seq->lcs_update) {
@@ -378,7 +378,7 @@ void seq_client_flush(struct lu_client_seq *seq)
set_current_state(TASK_UNINTERRUPTIBLE);
mutex_unlock(&seq->lcs_mutex);
- waitq_wait(&link, TASK_UNINTERRUPTIBLE);
+ schedule();
mutex_lock(&seq->lcs_mutex);
remove_wait_queue(&seq->lcs_waitq, &link);
@@ -428,8 +428,8 @@ static int seq_client_proc_init(struct lu_client_seq *seq)
rc = lprocfs_add_vars(seq->lcs_proc_dir,
seq_client_proc_list, seq);
if (rc) {
- CERROR("%s: Can't init sequence manager "
- "proc, rc %d\n", seq->lcs_name, rc);
+ CERROR("%s: Can't init sequence manager proc, rc %d\n",
+ seq->lcs_name, rc);
GOTO(out_cleanup, rc);
}
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 6c379301df82..a06a642f549e 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -236,8 +236,8 @@ static int fld_cache_shrink(struct fld_cache *cache)
num++;
}
- CDEBUG(D_INFO, "%s: FLD cache - Shrunk by "
- "%d entries\n", cache->fci_name, num);
+ CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n",
+ cache->fci_name, num);
return 0;
}
@@ -355,7 +355,7 @@ static void fld_cache_overlap_handle(struct fld_cache *cache,
fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
} else
CERROR("NEW range ="DRANGE" curr = "DRANGE"\n",
- PRANGE(range),PRANGE(&f_curr->fce_range));
+ PRANGE(range), PRANGE(&f_curr->fce_range));
}
struct fld_cache_entry
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 5f3935cc0fd7..8661a788f120 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -90,7 +90,7 @@ struct fld_cache {
int fci_threshold;
/**
- * Prefered number of cached entries */
+ * Preferred number of cached entries */
int fci_cache_size;
/**
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 896f9fe83ffd..1f8abba31428 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -138,9 +138,8 @@ fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
return target;
}
- CERROR("%s: Can't find target by hash %d (seq "LPX64"). "
- "Targets (%d):\n", fld->lcf_name, hash, seq,
- fld->lcf_count);
+ CERROR("%s: Can't find target by hash %d (seq "LPX64"). Targets (%d):\n",
+ fld->lcf_name, hash, seq, fld->lcf_count);
list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
const char *srv_name = target->ft_srv != NULL ?
@@ -209,9 +208,8 @@ int fld_client_add_target(struct lu_client_fld *fld,
LASSERT(tar->ft_srv != NULL || tar->ft_exp != NULL);
if (fld->lcf_flags != LUSTRE_FLD_INIT) {
- CERROR("%s: Attempt to add target %s (idx "LPU64") "
- "on fly - skip it\n", fld->lcf_name, name,
- tar->ft_idx);
+ CERROR("%s: Attempt to add target %s (idx "LPU64") on fly - skip it\n",
+ fld->lcf_name, name, tar->ft_idx);
return 0;
} else {
CDEBUG(D_INFO, "%s: Adding target %s (idx "
@@ -476,9 +474,8 @@ int fld_client_lookup(struct lu_client_fld *fld, seqno_t seq, mdsno_t *mds,
target = fld_client_get_target(fld, seq);
LASSERT(target != NULL);
- CDEBUG(D_INFO, "%s: Lookup fld entry (seq: "LPX64") on "
- "target %s (idx "LPU64")\n", fld->lcf_name, seq,
- fld_target_name(target), target->ft_idx);
+ CDEBUG(D_INFO, "%s: Lookup fld entry (seq: "LPX64") on target %s (idx "LPU64")\n",
+ fld->lcf_name, seq, fld_target_name(target), target->ft_idx);
res.lsr_start = seq;
fld_range_set_type(&res, flags);
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index 4d692dcd96cf..c809239c0866 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -1314,7 +1314,7 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
* calls. To achieve this, every layer can implement ->clo_fits_into() method,
* that is called by lock matching code (cl_lock_lookup()), and that can be
* used to selectively disable matching of certain locks for certain IOs. For
- * exmaple, lov layer implements lov_lock_fits_into() that allow multi-stripe
+ * example, lov layer implements lov_lock_fits_into() that allow multi-stripe
* locks to be matched only for truncates and O_APPEND writes.
*
* Interaction with DLM
@@ -2385,14 +2385,18 @@ struct cl_io {
* Check if layout changed after the IO finishes. Mainly for HSM
* requirement. If IO occurs to openning files, it doesn't need to
* verify layout because HSM won't release openning files.
- * Right now, only two opertaions need to verify layout: glimpse
+ * Right now, only two operations need to verify layout: glimpse
* and setattr.
*/
ci_verify_layout:1,
/**
* file is released, restore has to to be triggered by vvp layer
*/
- ci_restore_needed:1;
+ ci_restore_needed:1,
+ /**
+ * O_NOATIME
+ */
+ ci_noatime:1;
/**
* Number of pages owned by this IO. For invariant checking.
*/
@@ -2552,7 +2556,7 @@ struct cl_req_obj {
*/
struct cl_req {
enum cl_req_type crq_type;
- /** A list of pages being transfered */
+ /** A list of pages being transferred */
struct list_head crq_pages;
/** Number of pages in cl_req::crq_pages */
unsigned crq_nrpages;
@@ -3224,7 +3228,7 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
*
* - call chains have no non-lustre portions inserted between lustre code.
*
- * On a client both these assumtpion fails, because every user thread can
+ * On a client both these assumption fails, because every user thread can
* potentially execute lustre code as part of a system call, and lustre calls
* into VFS or MM that call back into lustre.
*
diff --git a/drivers/staging/lustre/lustre/include/ioctl.h b/drivers/staging/lustre/lustre/include/ioctl.h
index 227c261b2ae9..b986920926bd 100644
--- a/drivers/staging/lustre/lustre/include/ioctl.h
+++ b/drivers/staging/lustre/lustre/include/ioctl.h
@@ -38,7 +38,7 @@
* and on newer kernels this header is shared as _ASM_GENERIC_IOCTL_H.
*
* We can avoid any problems with the kernel header being included again by
- * defining _ASM_I386_IOCTL_H here so that a later occurence of <asm/ioctl.h>
+ * defining _ASM_I386_IOCTL_H here so that a later occurrence of <asm/ioctl.h>
* does not include the kernel's ioctl.h after this one. b=14746 */
#define _ASM_I386_IOCTL_H
#define _ASM_GENERIC_IOCTL_H
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
index 27316f7b7a70..827209ea6bd0 100644
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ b/drivers/staging/lustre/lustre/include/lclient.h
@@ -118,8 +118,8 @@ struct ccc_io {
};
/**
- * True, if \a io is a normal io, False for other (sendfile, splice*).
- * must be impementated in arch specific code.
+ * True, if \a io is a normal io, False for splice_{read,write}.
+ * must be implemented in arch specific code.
*/
int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_acl.h b/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
index 778b123ce31e..a91c5497d22c 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
@@ -44,7 +44,7 @@
#define _LUSTRE_LINUX_ACL_H
#ifndef _LUSTRE_ACL_H
-#error Shoud not include direectly. use #include <lustre_acl.h> instead
+#error Should not include directly. use #include <lustre_acl.h> instead
#endif
#include <linux/fs.h>
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
index 01a50265239d..dc36f75eb635 100644
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ b/drivers/staging/lustre/lustre/include/linux/obd.h
@@ -96,7 +96,8 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
LCONSOLE_WARN("====== for current process =====\n");
dump_stack();
LCONSOLE_WARN("====== end =======\n");
- cfs_pause(1000 * HZ);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1000 * HZ);
}
cpu_relax();
}
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 5da31c54924a..87905bbc68e1 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -91,8 +91,8 @@
#ifndef _LUSTRE_IDL_H_
#define _LUSTRE_IDL_H_
-#if !defined(LASSERT) && !defined(LPU64)
-#include <linux/libcfs/libcfs.h> /* for LASSERT, LPUX64, etc */
+#if !defined(LPU64)
+#include <linux/libcfs/libcfs.h> /* for LPUX64, etc */
#endif
/* Defn's shared with user-space. */
@@ -232,7 +232,6 @@ static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
static inline void fld_range_set_type(struct lu_seq_range *range,
unsigned flags)
{
- LASSERT(!(flags & ~LU_SEQ_RANGE_MASK));
range->lsr_flags |= flags;
}
@@ -351,7 +350,7 @@ struct som_attrs {
/** Bitfield for supported data in this structure. For future use. */
__u32 som_compat;
- /** Incompat feature list. The supported feature mask is availabe in
+ /** Incompat feature list. The supported feature mask is available in
* SOM_INCOMPAT_SUPP */
__u32 som_incompat;
@@ -615,7 +614,6 @@ static inline obd_id fid_idif_id(obd_seq seq, __u32 oid, __u32 ver)
/* extract ost index from IDIF FID */
static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
{
- LASSERT(fid_is_idif(fid));
return (fid_seq(fid) >> 16) & 0xffff;
}
@@ -833,11 +831,6 @@ static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
*/
static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof(*src) ==
- sizeof(fid_seq(src)) +
- sizeof(fid_oid(src)) +
- sizeof(fid_ver(src)));
dst->f_seq = cpu_to_le64(fid_seq(src));
dst->f_oid = cpu_to_le32(fid_oid(src));
dst->f_ver = cpu_to_le32(fid_ver(src));
@@ -845,11 +838,6 @@ static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof(*src) ==
- sizeof(fid_seq(src)) +
- sizeof(fid_oid(src)) +
- sizeof(fid_ver(src)));
dst->f_seq = le64_to_cpu(fid_seq(src));
dst->f_oid = le32_to_cpu(fid_oid(src));
dst->f_ver = le32_to_cpu(fid_ver(src));
@@ -857,11 +845,6 @@ static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof(*src) ==
- sizeof(fid_seq(src)) +
- sizeof(fid_oid(src)) +
- sizeof(fid_ver(src)));
dst->f_seq = cpu_to_be64(fid_seq(src));
dst->f_oid = cpu_to_be32(fid_oid(src));
dst->f_ver = cpu_to_be32(fid_ver(src));
@@ -869,11 +852,6 @@ static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof(*src) ==
- sizeof(fid_seq(src)) +
- sizeof(fid_oid(src)) +
- sizeof(fid_ver(src)));
dst->f_seq = be64_to_cpu(fid_seq(src));
dst->f_oid = be32_to_cpu(fid_oid(src));
dst->f_ver = be32_to_cpu(fid_ver(src));
@@ -897,11 +875,6 @@ extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
{
- /* Check that there is no alignment padding. */
- CLASSERT(sizeof(*f0) ==
- sizeof(f0->f_seq) +
- sizeof(f0->f_oid) +
- sizeof(f0->f_ver));
return memcmp(f0, f1, sizeof(*f0)) == 0;
}
@@ -960,7 +933,7 @@ enum lu_dirent_attrs {
LUDA_TYPE = 0x0002,
LUDA_64BITHASH = 0x0004,
- /* The following attrs are used for MDT interanl only,
+ /* The following attrs are used for MDT internal only,
* not visible to client */
/* Verify the dirent consistency */
@@ -1331,6 +1304,9 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
#define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
+#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
+#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
+
/* XXX README XXX:
* Please DO NOT add flag values here before first ensuring that this same
* flag value is not in use on some other branch. Please clear any such
@@ -1368,7 +1344,10 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
OBD_CONNECT_EINPROGRESS | \
OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
- OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE)
+ OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\
+ OBD_CONNECT_FLOCK_DEAD | \
+ OBD_CONNECT_DISP_STRIPE)
+
#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
@@ -1771,7 +1750,6 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
-#define OBD_MD_FLXATTRLOCKED OBD_MD_FLGETATTRLOCK
#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
/* don't forget obdo_fid which is way down at the bottom so it can
@@ -2134,19 +2112,32 @@ extern void lustre_swab_generic_32s (__u32 *val);
#define DISP_LOOKUP_POS 0x00000008
#define DISP_OPEN_CREATE 0x00000010
#define DISP_OPEN_OPEN 0x00000020
-#define DISP_ENQ_COMPLETE 0x00400000
+#define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
#define DISP_ENQ_OPEN_REF 0x00800000
#define DISP_ENQ_CREATE_REF 0x01000000
#define DISP_OPEN_LOCK 0x02000000
#define DISP_OPEN_LEASE 0x04000000
+#define DISP_OPEN_STRIPE 0x08000000
/* INODE LOCK PARTS */
-#define MDS_INODELOCK_LOOKUP 0x000001 /* dentry, mode, owner, group */
-#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
-#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
-#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
-#define MDS_INODELOCK_PERM 0x000010 /* for permission */
-#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
+#define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
+ * was used to protect permission (mode,
+ * owner, group etc) before 2.4. */
+#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
+#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
+#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
+
+/* The PERM bit is added int 2.4, and it is used to protect permission(mode,
+ * owner, group, acl etc), so to separate the permission from LOOKUP lock.
+ * Because for remote directories(in DNE), these locks will be granted by
+ * different MDTs(different ldlm namespace).
+ *
+ * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
+ * For Remote directory, the master MDT, where the remote directory is, will
+ * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
+ * will grant LOOKUP_LOCK. */
+#define MDS_INODELOCK_PERM 0x000010
+#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
#define MDS_INODELOCK_MAXSHIFT 5
/* This FULL lock is useful to take on unlink sort of operations */
@@ -2595,7 +2586,7 @@ struct mdt_rec_setxattr {
* Do NOT change the size of various members, otherwise the value
* will be broken in lustre_swab_mdt_rec_reint().
*
- * If you add new members in other mdt_reint_xxx structres and need to use the
+ * If you add new members in other mdt_reint_xxx structures and need to use the
* rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
*/
struct mdt_rec_reint {
@@ -3328,9 +3319,10 @@ struct obdo {
#define o_grant_used o_data_version
static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
- struct obdo *wobdo, struct obdo *lobdo)
+ struct obdo *wobdo,
+ const struct obdo *lobdo)
{
- memcpy(wobdo, lobdo, sizeof(*lobdo));
+ *wobdo = *lobdo;
wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
if (ocd == NULL)
return;
@@ -3345,16 +3337,15 @@ static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
}
static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
- struct obdo *lobdo, struct obdo *wobdo)
+ struct obdo *lobdo,
+ const struct obdo *wobdo)
{
obd_flag local_flags = 0;
if (lobdo->o_valid & OBD_MD_FLFLAGS)
local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
- LASSERT(!(wobdo->o_flags & OBD_FL_LOCAL_MASK));
-
- memcpy(lobdo, wobdo, sizeof(*lobdo));
+ *lobdo = *wobdo;
if (local_flags != 0) {
lobdo->o_valid |= OBD_MD_FLFLAGS;
lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6b6c0240e824..f5f369e603de 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -265,13 +265,11 @@ struct ost_id {
#define MAX_OBD_NAME 128 /* If this changes, a NEW ioctl must be added */
-/* Hopefully O_LOV_DELAY_CREATE does not conflict with standard O_xxx flags.
- * Previously it was defined as 0100000000 and conflicts with FMODE_NONOTIFY
- * which was added since kernel 2.6.36, so we redefine it as 020000000.
- * To be compatible with old version's statically linked binary, finally we
- * define it as (020000000 | 0100000000).
- * */
-#define O_LOV_DELAY_CREATE 0120000000
+/* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular
+ * files, but are unlikely to be used in practice and are not harmful if
+ * used incorrectly. O_NOCTTY and FASYNC are only meaningful for character
+ * devices and are safe for use on new files (See LU-812, LU-4209). */
+#define O_LOV_DELAY_CREATE (O_NOCTTY | FASYNC)
#define LL_FILE_IGNORE_LOCK 0x00000001
#define LL_FILE_GROUP_LOCKED 0x00000002
@@ -300,7 +298,7 @@ struct ost_id {
#define LOV_MAX_STRIPE_COUNT_OLD 160
/* This calculation is crafted so that input of 4096 will result in 160
* which in turn is equal to old maximal stripe count.
- * XXX: In fact this is too simpified for now, what it also need is to get
+ * XXX: In fact this is too simplified for now, what it also need is to get
* ea_type argument to clearly know how much space each stripe consumes.
*
* The limit of 12 pages is somewhat arbitrary, but is a reasonably large
@@ -760,7 +758,8 @@ static inline void hsm_set_cl_error(int *flags, int error)
*flags |= (error << CLF_HSM_ERR_L);
}
-#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec))
+#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \
+ sizeof(struct changelog_ext_rec))
struct changelog_rec {
__u16 cr_namelen;
@@ -929,7 +928,7 @@ struct hsm_state_set_ioc {
/*
* This structure describes the current in-progress action for a file.
- * it is retuned to user space and send over the wire
+ * it is returned to user space and send over the wire
*/
struct hsm_current_action {
/** The current undergoing action, if there is one */
@@ -1158,12 +1157,6 @@ struct hsm_progress {
__u32 padding;
};
-/**
- * Use by copytool during any hsm request they handled.
- * This structure is initialized by llapi_hsm_copy_start()
- * which is an helper over the ioctl() interface
- * Store Lustre, internal use only, data.
- */
struct hsm_copy {
__u64 hc_data_version;
__u16 hc_flags;
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index e14a5f674e85..3680668a8920 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -88,6 +88,8 @@ enum lcfg_command_type {
LCFG_SET_LDLM_TIMEOUT = 0x00ce030, /**< set ldlm_timeout */
LCFG_PRE_CLEANUP = 0x00cf031, /**< call type-specific pre
* cleanup cleanup */
+ LCFG_SET_PARAM = 0x00ce032, /**< use set_param syntax to set
+ *a proc parameters */
};
struct lustre_cfg_bufs {
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 1de9a8bed497..ac08164793cb 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -99,6 +99,8 @@
#define LDD_F_IR_CAPABLE 0x2000
/** the MGS refused to register the target. */
#define LDD_F_ERROR 0x4000
+/** process at lctl conf_param */
+#define LDD_F_PARAM2 0x8000
/* opc for target register */
#define LDD_F_OPC_REG 0x10000000
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index ec4bb5e3c13e..3e25f001c07d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -330,7 +330,7 @@ enum {
};
typedef enum {
- /** invalide type */
+ /** invalid type */
LDLM_NS_TYPE_UNKNOWN = 0,
/** mdc namespace */
LDLM_NS_TYPE_MDC,
@@ -1185,7 +1185,7 @@ ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
/**
* Update Lock Value Block Operations (LVBO) on a resource taking into account
- * data from reqest \a r
+ * data from request \a r
*/
static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
struct ptlrpc_request *r, int increase)
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 75716f17f64b..16dcdbfae689 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -35,10 +35,10 @@
#ifndef LDLM_ALL_FLAGS_MASK
/** l_flags bits marked as "all_flags" bits */
-#define LDLM_FL_ALL_FLAGS_MASK 0x00FFFFFFC08F132FULL
+#define LDLM_FL_ALL_FLAGS_MASK 0x00FFFFFFC08F932FULL
/** l_flags bits marked as "ast" bits */
-#define LDLM_FL_AST_MASK 0x0000000080000000ULL
+#define LDLM_FL_AST_MASK 0x0000000080008000ULL
/** l_flags bits marked as "blocked" bits */
#define LDLM_FL_BLOCKED_MASK 0x000000000000000EULL
@@ -56,7 +56,7 @@
#define LDLM_FL_LOCAL_ONLY_MASK 0x00FFFFFF00000000ULL
/** l_flags bits marked as "on_wire" bits */
-#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F132FULL
+#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F932FULL
/** extent, mode, or resource changed */
#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL // bit 0
@@ -114,6 +114,12 @@
#define ldlm_set_has_intent(_l) LDLM_SET_FLAG(( _l), 1ULL << 12)
#define ldlm_clear_has_intent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 12)
+/** flock deadlock detected */
+#define LDLM_FL_FLOCK_DEADLOCK 0x0000000000008000ULL /* bit 15 */
+#define ldlm_is_flock_deadlock(_l) LDLM_TEST_FLAG((_l), 1ULL << 15)
+#define ldlm_set_flock_deadlock(_l) LDLM_SET_FLAG((_l), 1ULL << 15)
+#define ldlm_clear_flock_deadlock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 15)
+
/** discard (no writeback) on cancel */
#define LDLM_FL_DISCARD_DATA 0x0000000000010000ULL // bit 16
#define ldlm_is_discard_data(_l) LDLM_TEST_FLAG(( _l), 1ULL << 16)
@@ -141,7 +147,7 @@
#define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19)
/**
- * Immediatelly cancel such locks when they block some other locks. Send
+ * Immediately cancel such locks when they block some other locks. Send
* cancel notification to original lock holder, but expect no reply. This
* is for clients (like liblustre) that cannot be expected to reliably
* response to blocking AST. */
@@ -242,7 +248,7 @@
/**
* A lock contributes to the known minimum size (KMS) calculation until it
- * has finished the part of its cancelation that performs write back on its
+ * has finished the part of its cancellation that performs write back on its
* dirty pages. It can remain on the granted list during this whole time.
* Threads racing to update the KMS after performing their writeback need
* to know to exclude each other's locks from the calculation as they walk
@@ -390,6 +396,7 @@ static int hf_lustre_ldlm_fl_ast_sent = -1;
static int hf_lustre_ldlm_fl_replay = -1;
static int hf_lustre_ldlm_fl_intent_only = -1;
static int hf_lustre_ldlm_fl_has_intent = -1;
+static int hf_lustre_ldlm_fl_flock_deadlock = -1;
static int hf_lustre_ldlm_fl_discard_data = -1;
static int hf_lustre_ldlm_fl_no_timeout = -1;
static int hf_lustre_ldlm_fl_block_nowait = -1;
@@ -431,6 +438,7 @@ const value_string lustre_ldlm_flags_vals[] = {
{LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
{LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
{LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
+ {LDLM_FL_FLOCK_DEADLOCK, "LDLM_FL_FLOCK_DEADLOCK"},
{LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
{LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
{LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
index 2feb38b51af2..103f7a8bd83f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -192,9 +192,9 @@ struct obd_export {
struct obd_import *exp_imp_reverse;
struct nid_stat *exp_nid_stats;
struct lprocfs_stats *exp_md_stats;
- /** Active connetion */
+ /** Active connection */
struct ptlrpc_connection *exp_connection;
- /** Connection count value from last succesful reconnect rpc */
+ /** Connection count value from last successful reconnect rpc */
__u32 exp_conn_cnt;
/** Hash list of all ldlm locks granted on this export */
struct cfs_hash *exp_lock_hash;
@@ -380,6 +380,23 @@ static inline bool imp_connect_lvb_type(struct obd_import *imp)
return false;
}
+static inline __u64 exp_connect_ibits(struct obd_export *exp)
+{
+ struct obd_connect_data *ocd;
+
+ ocd = &exp->exp_connect_data;
+ return ocd->ocd_ibits_known;
+}
+
+static inline bool imp_connect_disp_stripe(struct obd_import *imp)
+{
+ struct obd_connect_data *ocd;
+
+ LASSERT(imp != NULL);
+ ocd = &imp->imp_connect_data;
+ return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE;
+}
+
extern struct obd_export *class_conn2export(struct lustre_handle *conn);
extern struct obd_device *class_conn2obd(struct lustre_handle *conn);
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 84a897eed1df..5e7b3165a851 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -339,7 +339,7 @@ struct lu_client_seq {
struct mutex lcs_mutex;
/*
- * Range of allowed for allocation sequeces. When using lu_client_seq on
+ * Range of allowed for allocation sequences. When using lu_client_seq on
* clients, this contains meta-sequence range. And for servers this
* contains super-sequence range.
*/
@@ -398,7 +398,7 @@ struct lu_server_seq {
/* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
enum lu_mgr_type lss_type;
- /* Client interafce to request controller */
+ /* Client interface to request controller */
struct lu_client_seq *lss_cli;
/* Mutex for protecting allocation */
@@ -568,14 +568,14 @@ fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
* finally, when we replace ost_id with FID in data stack.
*
* Currently, resid from the old client, whose res[0] = object_id,
- * res[1] = object_seq, is just oposite with Metatdata
+ * res[1] = object_seq, is just opposite with Metatdata
* resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid.
- * To unifiy the resid identification, we will reverse the data
+ * To unify the resid identification, we will reverse the data
* resid to keep it same with Metadata resid, i.e.
*
* For resid from the old client,
* res[0] = objid, res[1] = 0, still keep the original order,
- * for compatiblity.
+ * for compatibility.
*
* For new resid
* res will be built from normal FID directly, i.e. res[0] = f_seq,
@@ -685,7 +685,7 @@ static inline __u32 fid_hash(const struct lu_fid *f, int bits)
{
/* all objects with same id and different versions will belong to same
* collisions list. */
- return cfs_hash_long(fid_flatten(f), bits);
+ return hash_long(fid_flatten(f), bits);
}
/**
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 67259eb43cde..01ed786b40b9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -152,7 +152,7 @@ struct import_state_hist {
};
/**
- * Defintion of PortalRPC import structure.
+ * Definition of PortalRPC import structure.
* Imports are representing client-side view to remote target.
*/
struct obd_import {
@@ -180,6 +180,17 @@ struct obd_import {
struct list_head imp_delayed_list;
/** @} */
+ /**
+ * List of requests that are retained for committed open replay. Once
+ * open is committed, open replay request will be moved from the
+ * imp_replay_list into the imp_committed_list.
+ * The imp_replay_cursor is for accelerating searching during replay.
+ * @{
+ */
+ struct list_head imp_committed_list;
+ struct list_head *imp_replay_cursor;
+ /** @} */
+
/** obd device for this import */
struct obd_device *imp_obd;
@@ -219,7 +230,7 @@ struct obd_import {
* after a check to save on unnecessary replay list iterations
*/
int imp_last_generation_checked;
- /** Last tranno we replayed */
+ /** Last transno we replayed */
__u64 imp_last_replay_transno;
/** Last transno committed on remote side */
__u64 imp_peer_committed_transno;
@@ -237,7 +248,7 @@ struct obd_import {
struct lustre_handle imp_remote_handle;
/** When to perform next ping. time in jiffies. */
cfs_time_t imp_next_ping;
- /** When we last succesfully connected. time in 64bit jiffies */
+ /** When we last successfully connected. time in 64bit jiffies */
__u64 imp_last_success_conn;
/** List of all possible connection for import. */
@@ -268,7 +279,7 @@ struct obd_import {
imp_no_lock_replay:1,
/* recovery by versions was failed */
imp_vbr_failed:1,
- /* force an immidiate ping */
+ /* force an immediate ping */
imp_force_verify:1,
/* force a scheduled ping */
imp_force_next_verify:1,
@@ -281,7 +292,7 @@ struct obd_import {
/* need IR MNE swab */
imp_need_mne_swab:1,
/* import must be reconnected instead of
- * chouse new connection */
+ * chose new connection */
imp_force_reconnect:1,
/* import has tried to connect with server */
imp_connect_tried:1;
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 609a090484a6..0368ca6ffcc1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -536,7 +536,7 @@ do { \
if (condition) \
break; \
\
- init_waitqueue_entry_current(&__wait); \
+ init_waitqueue_entry(&__wait, current); \
l_add_wait(&wq, &__wait); \
\
/* Block all signals (just the non-fatal ones if no timeout). */ \
@@ -558,15 +558,13 @@ do { \
break; \
\
if (__timeout == 0) { \
- waitq_wait(&__wait, __wstate); \
+ schedule(); \
} else { \
cfs_duration_t interval = info->lwi_interval? \
min_t(cfs_duration_t, \
info->lwi_interval,__timeout):\
__timeout; \
- cfs_duration_t remaining = waitq_timedwait(&__wait,\
- __wstate, \
- interval); \
+ cfs_duration_t remaining = schedule_timeout(interval);\
__timeout = cfs_time_sub(__timeout, \
cfs_time_sub(interval, remaining));\
if (__timeout == 0) { \
diff --git a/drivers/staging/lustre/lustre/include/lustre_linkea.h b/drivers/staging/lustre/lustre/include/lustre_linkea.h
index 5790be913bf6..500ace30cfbf 100644
--- a/drivers/staging/lustre/lustre/include/lustre_linkea.h
+++ b/drivers/staging/lustre/lustre/include/lustre_linkea.h
@@ -33,7 +33,7 @@ struct linkea_data {
*/
struct lu_buf *ld_buf;
/**
- * The matched header, entry and its lenght in the EA
+ * The matched header, entry and its length in the EA
*/
struct link_ea_header *ld_leh;
struct link_ea_entry *ld_lee;
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index c1e02702b931..468f36344a34 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -166,6 +166,17 @@ void it_clear_disposition(struct lookup_intent *it, int flag);
void it_set_disposition(struct lookup_intent *it, int flag);
int it_open_error(int phase, struct lookup_intent *it);
+static inline bool cl_is_lov_delay_create(unsigned int flags)
+{
+ return (flags & O_LOV_DELAY_CREATE) == O_LOV_DELAY_CREATE;
+}
+
+static inline void cl_lov_delay_create_clear(unsigned int *flags)
+{
+ if ((*flags & O_LOV_DELAY_CREATE) == O_LOV_DELAY_CREATE)
+ *flags &= ~O_LOV_DELAY_CREATE;
+}
+
/** @} mdc */
#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index d8d088035428..745adbb74cfc 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -445,7 +445,7 @@ struct ptlrpc_reply_state {
lnet_handle_md_t rs_md_h;
atomic_t rs_refcount;
- /** Context for the sevice thread */
+ /** Context for the service thread */
struct ptlrpc_svc_ctx *rs_svc_ctx;
/** Reply buffer (actually sent to the client), encoded if needed */
struct lustre_msg *rs_repbuf; /* wrapper */
@@ -454,9 +454,9 @@ struct ptlrpc_reply_state {
/** Size of the reply message */
int rs_repdata_len; /* wrapper msg length */
/**
- * Actual reply message. Its content is encrupted (if needed) to
+ * Actual reply message. Its content is encrypted (if needed) to
* produce reply buffer for actual sending. In simple case
- * of no network encryption we jus set \a rs_repbuf to \a rs_msg
+ * of no network encryption we just set \a rs_repbuf to \a rs_msg
*/
struct lustre_msg *rs_msg; /* reply message */
@@ -497,7 +497,7 @@ struct ptlrpc_request_pool {
spinlock_t prp_lock;
/** list of ptlrpc_request structs */
struct list_head prp_req_list;
- /** Maximum message size that would fit into a rquest from this pool */
+ /** Maximum message size that would fit into a request from this pool */
int prp_rq_size;
/** Function to allocate more requests for this pool */
void (*prp_populate)(struct ptlrpc_request_pool *, int);
@@ -904,7 +904,7 @@ struct ptlrpc_nrs_pol_conf {
*/
struct module *nc_owner;
/**
- * Policy registration flags; a bitmast of \e nrs_policy_flags
+ * Policy registration flags; a bitmask of \e nrs_policy_flags
*/
unsigned nc_flags;
};
@@ -1351,7 +1351,7 @@ struct nrs_orr_data {
*/
enum nrs_orr_supp od_supp;
/**
- * Round Robin quantum; the maxium number of RPCs that each request
+ * Round Robin quantum; the maximum number of RPCs that each request
* batch for each object or OST can have in a scheduling round.
*/
__u16 od_quantum;
@@ -1486,7 +1486,7 @@ struct ptlrpc_nrs_request {
*/
struct nrs_fifo_req fifo;
/**
- * CRR-N request defintion
+ * CRR-N request definition
*/
struct nrs_crrn_req crr;
/** ORR and TRR share the same request definition */
@@ -1550,7 +1550,7 @@ struct ptlrpc_request {
* requests in time
*/
struct list_head rq_timed_list;
- /** server-side history, used for debuging purposes. */
+ /** server-side history, used for debugging purposes. */
struct list_head rq_history_list;
/** server-side per-export list */
struct list_head rq_exp_list;
@@ -1611,7 +1611,7 @@ struct ptlrpc_request {
enum rq_phase rq_phase; /* one of RQ_PHASE_* */
enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
atomic_t rq_refcount;/* client-side refcount for SENT race,
- server-side refcounf for multiple replies */
+ server-side refcount for multiple replies */
/** Portal to which this request would be sent */
short rq_request_portal; /* XXX FIXME bug 249 */
@@ -1637,7 +1637,7 @@ struct ptlrpc_request {
/** xid */
__u64 rq_xid;
/**
- * List item to for replay list. Not yet commited requests get linked
+ * List item to for replay list. Not yet committed requests get linked
* there.
* Also see \a rq_replay comment above.
*/
@@ -1952,7 +1952,7 @@ void _debug_req(struct ptlrpc_request *req,
__attribute__ ((format (printf, 3, 4)));
/**
- * Helper that decides if we need to print request accordig to current debug
+ * Helper that decides if we need to print request according to current debug
* level settings
*/
#define debug_req(msgdata, mask, cdls, req, fmt, a...) \
@@ -1966,7 +1966,7 @@ do { \
} while(0)
/**
- * This is the debug print function you need to use to print request sturucture
+ * This is the debug print function you need to use to print request structure
* content into lustre debug log.
* for most callers (level is a constant) this is resolved at compile time */
#define DEBUG_REQ(level, req, fmt, args...) \
@@ -2621,6 +2621,8 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
* request queues, request management, etc.
* @{
*/
+void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
+
void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
struct ptlrpc_client *);
void ptlrpc_cleanup_client(struct obd_import *imp);
diff --git a/drivers/staging/lustre/lustre/include/lustre_quota.h b/drivers/staging/lustre/lustre/include/lustre_quota.h
index 71b5d97e0343..07cb7c310bcc 100644
--- a/drivers/staging/lustre/lustre/include/lustre_quota.h
+++ b/drivers/staging/lustre/lustre/include/lustre_quota.h
@@ -140,7 +140,7 @@ struct qsd_instance;
* (i.e. when ->ldo_recovery_complete is called). This is used
* to notify the qsd layer that quota should now be enforced
* again via the qsd_op_begin/end functions. The last step of the
- * reintegration prodecure (namely usage reconciliation) will be
+ * reintegration procedure (namely usage reconciliation) will be
* completed during start.
*
* - qsd_fini(): is used to release a qsd_instance structure allocated with
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 885247d28b3a..bf3ee3915c28 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -572,7 +572,7 @@ struct ptlrpc_sec_cops {
/**
* Called then the reference of \a ctx dropped to 0. The policy module
* is supposed to destroy this context or whatever else according to
- * its cache maintainance mechamism.
+ * its cache maintenance mechanism.
*
* \param sync if zero, we shouldn't wait for the context being
* destroyed completely.
@@ -1002,7 +1002,7 @@ struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec);
void sptlrpc_sec_put(struct ptlrpc_sec *sec);
/*
- * internal apis which only used by policy impelentation
+ * internal apis which only used by policy implementation
*/
int sptlrpc_get_next_secid(void);
void sptlrpc_sec_destroy(struct ptlrpc_sec *sec);
diff --git a/drivers/staging/lustre/lustre/include/md_object.h b/drivers/staging/lustre/lustre/include/md_object.h
index 7b45b47b48f9..ef46b2c461a6 100644
--- a/drivers/staging/lustre/lustre/include/md_object.h
+++ b/drivers/staging/lustre/lustre/include/md_object.h
@@ -35,7 +35,7 @@
*
* lustre/include/md_object.h
*
- * Extention of lu_object.h for metadata objects
+ * Extension of lu_object.h for metadata objects
*/
#ifndef _LUSTRE_MD_OBJECT_H
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index c3470ce62cff..72cf3fe4b0c9 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -158,7 +158,7 @@ struct obd_info {
/* statfs data specific for every OSC, if needed at all. */
struct obd_statfs *oi_osfs;
/* An update callback which is called to update some data on upper
- * level. E.g. it is used for update lsm->lsm_oinfo at every recieved
+ * level. E.g. it is used for update lsm->lsm_oinfo at every received
* request in osc level for enqueue requests. It is also possible to
* update some caller data from LOV layer if needed. */
obd_enqueue_update_f oi_cb_up;
@@ -1042,8 +1042,8 @@ static inline int it_to_lock_mode(struct lookup_intent *it)
}
struct md_op_data {
- struct lu_fid op_fid1; /* operation fid1 (usualy parent) */
- struct lu_fid op_fid2; /* operation fid2 (usualy child) */
+ struct lu_fid op_fid1; /* operation fid1 (usually parent) */
+ struct lu_fid op_fid2; /* operation fid2 (usually child) */
struct lu_fid op_fid3; /* 2 extra fids to find conflicting */
struct lu_fid op_fid4; /* to the operation locks. */
mdsno_t op_mds; /* what mds server open will go to */
@@ -1323,7 +1323,8 @@ struct md_open_data {
struct obd_client_handle *mod_och;
struct ptlrpc_request *mod_open_req;
struct ptlrpc_request *mod_close_req;
- atomic_t mod_refcount;
+ atomic_t mod_refcount;
+ bool mod_is_create;
};
struct lookup_intent;
@@ -1392,7 +1393,7 @@ struct md_ops {
int (*m_set_open_replay_data)(struct obd_export *,
struct obd_client_handle *,
- struct ptlrpc_request *);
+ struct lookup_intent *);
int (*m_clear_open_replay_data)(struct obd_export *,
struct obd_client_handle *);
int (*m_set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *);
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 983718fe1e55..9d1f266e55e4 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -175,9 +175,13 @@ enum {
CONFIG_T_CONFIG = 0,
CONFIG_T_SPTLRPC = 1,
CONFIG_T_RECOVER = 2,
- CONFIG_T_MAX = 3
+ CONFIG_T_PARAMS = 3,
+ CONFIG_T_MAX = 4
};
+#define PARAMS_FILENAME "params"
+#define LCTL_UPCALL "lctl"
+
/* list of active configuration logs */
struct config_llog_data {
struct ldlm_res_id cld_resid;
@@ -185,7 +189,8 @@ struct config_llog_data {
struct list_head cld_list_chain;
atomic_t cld_refcount;
struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */
- struct config_llog_data *cld_recover; /* imperative recover log */
+ struct config_llog_data *cld_params; /* common parameters log */
+ struct config_llog_data *cld_recover;/* imperative recover log */
struct obd_export *cld_mgcexp;
struct mutex cld_lock;
int cld_type;
@@ -1626,7 +1631,7 @@ static inline int obd_health_check(const struct lu_env *env,
{
/* returns: 0 on healthy
* >0 on unhealthy + reason code/flag
- * however the only suppored reason == 1 right now
+ * however the only supported reason == 1 right now
* We'll need to define some better reasons
* or flags in the future.
* <0 on error
@@ -1996,11 +2001,11 @@ static inline int md_getxattr(struct obd_export *exp,
static inline int md_set_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och,
- struct ptlrpc_request *open_req)
+ struct lookup_intent *it)
{
EXP_CHECK_MD_OP(exp, set_open_replay_data);
EXP_MD_COUNTER_INCREMENT(exp, set_open_replay_data);
- return MDP(exp->exp_obd, set_open_replay_data)(exp, och, open_req);
+ return MDP(exp->exp_obd, set_open_replay_data)(exp, och, it);
}
static inline int md_clear_open_replay_data(struct obd_export *exp,
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 977bc231df9d..5ec336968fc8 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -681,7 +681,7 @@ do { \
*
* Be very careful when changing this value, especially when decreasing it,
* since vmalloc in Linux doesn't perform well on multi-cores system, calling
- * vmalloc in critical path would hurt peformance badly. See LU-66.
+ * vmalloc in critical path would hurt performance badly. See LU-66.
*/
#define OBD_ALLOC_BIG (4 * PAGE_CACHE_SIZE)
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index 94b164127e0c..6907a16dbbd1 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -1196,14 +1196,14 @@ static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
- init_waitqueue_entry_current(&waiter);
+ init_waitqueue_entry(&waiter, current);
add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&header->loh_ref) == 1)
break;
- waitq_wait(&waiter, TASK_UNINTERRUPTIBLE);
+ schedule();
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index c9aae132f98a..986bf384bff7 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -205,6 +205,26 @@ ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
return 0;
}
+static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
+ struct list_head *work_list)
+{
+ CDEBUG(D_INFO, "reprocess deadlock req=%p\n", lock);
+
+ if ((exp_connect_flags(lock->l_export) &
+ OBD_CONNECT_FLOCK_DEAD) == 0) {
+ CERROR(
+ "deadlock found, but client doesn't support flock canceliation\n");
+ } else {
+ LASSERT(lock->l_completion_ast);
+ LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
+ lock->l_flags |= LDLM_FL_AST_SENT | LDLM_FL_CANCEL_ON_BLOCK |
+ LDLM_FL_FLOCK_DEADLOCK;
+ ldlm_flock_blocking_unlink(lock);
+ ldlm_resource_unlink_lock(lock);
+ ldlm_add_ast_work_item(lock, NULL, work_list);
+ }
+}
+
/**
* Process a granting attempt for flock lock.
* Must be called under ns lock held.
@@ -272,6 +292,7 @@ reprocess:
}
}
} else {
+ int reprocess_failed = 0;
lockmode_verify(mode);
/* This loop determines if there are existing locks
@@ -293,8 +314,15 @@ reprocess:
if (!ldlm_flocks_overlap(lock, req))
continue;
- if (!first_enq)
- return LDLM_ITER_CONTINUE;
+ if (!first_enq) {
+ reprocess_failed = 1;
+ if (ldlm_flock_deadlock(req, lock)) {
+ ldlm_flock_cancel_on_deadlock(req,
+ work_list);
+ return LDLM_ITER_CONTINUE;
+ }
+ continue;
+ }
if (*flags & LDLM_FL_BLOCK_NOWAIT) {
ldlm_flock_destroy(req, mode, *flags);
@@ -330,6 +358,8 @@ reprocess:
*flags |= LDLM_FL_BLOCK_GRANTED;
return LDLM_ITER_STOP;
}
+ if (reprocess_failed)
+ return LDLM_ITER_CONTINUE;
}
if (*flags & LDLM_FL_TEST_LOCK) {
@@ -646,7 +676,10 @@ granted:
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
list_del_init(&lock->l_res_link);
- if (flags & LDLM_FL_TEST_LOCK) {
+ if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
+ LDLM_DEBUG(lock, "client-side enqueue deadlock received");
+ rc = -EDEADLK;
+ } else if (flags & LDLM_FL_TEST_LOCK) {
/* fcntl(F_GETLK) request */
/* The old mode was saved in getlk->fl_type so that if the mode
* in the lock changes we can decref the appropriate refcount.*/
@@ -672,7 +705,7 @@ granted:
ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
}
unlock_res_and_lock(lock);
- return 0;
+ return rc;
}
EXPORT_SYMBOL(ldlm_flock_completion_ast);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 692623beee12..0548aca45e8e 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -145,8 +145,6 @@ char *ldlm_it2str(int it)
return "getxattr";
case IT_LAYOUT:
return "layout";
- case IT_SETXATTR:
- return "setxattr";
default:
CERROR("Unknown intent %d\n", it);
return "UNKNOWN";
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 3ed020eb89c0..7e63cf355cd9 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -192,8 +192,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
int to = cfs_time_seconds(1);
while (to > 0) {
- schedule_timeout_and_set_state(
- TASK_INTERRUPTIBLE, to);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(to);
if (lock->l_granted_mode == lock->l_req_mode ||
lock->l_flags & LDLM_FL_DESTROYED)
break;
@@ -228,6 +228,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
lock_res_and_lock(lock);
LASSERT(lock->l_lvb_data == NULL);
+ lock->l_lvb_type = LVB_T_LAYOUT;
lock->l_lvb_data = lvb_data;
lock->l_lvb_len = lvb_len;
unlock_res_and_lock(lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index c0e54aead2ce..fcc7a99ce395 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -299,7 +299,7 @@ EXPORT_SYMBOL(ldlm_completion_ast);
* A helper to build a blocking AST function
*
* Perform a common operation for blocking ASTs:
- * defferred lock cancellation.
+ * deferred lock cancellation.
*
* \param lock the lock blocking or canceling AST was called on
* \retval 0
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 5f89864cda14..2824d4a9a85b 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -421,9 +421,9 @@ static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
} else {
val = fid_oid(&fid);
}
- hash = cfs_hash_long(hash, hs->hs_bkt_bits);
+ hash = hash_long(hash, hs->hs_bkt_bits);
/* give me another random factor */
- hash -= cfs_hash_long((unsigned long)hs, val % 11 + 3);
+ hash -= hash_long((unsigned long)hs, val % 11 + 3);
hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index f30c84f195aa..1e4c5ad26157 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -368,7 +368,7 @@ void libcfs_debug_dumplog(void)
/* we're being careful to ensure that the kernel thread is
* able to set our state to running as it exits before we
* get to schedule() */
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&debug_ctlwq, &wait);
@@ -379,7 +379,7 @@ void libcfs_debug_dumplog(void)
printk(KERN_ERR "LustreError: cannot start log dump thread:"
" %ld\n", PTR_ERR(dumper));
else
- waitq_wait(&wait, TASK_INTERRUPTIBLE);
+ schedule();
/* be sure to teardown if cfs_create_thread() failed */
remove_wait_queue(&debug_ctlwq, &wait);
diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c
index c54448d69008..ba43ff7f7900 100644
--- a/drivers/staging/lustre/lustre/libcfs/fail.c
+++ b/drivers/staging/lustre/lustre/libcfs/fail.c
@@ -127,8 +127,8 @@ int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
if (ret) {
CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
id, ms);
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- cfs_time_seconds(ms) / 1000);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(ms) / 1000);
set_current_state(TASK_RUNNING);
CERROR("cfs_fail_timeout id %x awake\n", id);
}
diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
index 7b2c31599327..b6ddc998f750 100644
--- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
+++ b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
@@ -44,106 +44,6 @@
#include <linux/libcfs/libcfs.h>
-#ifdef LUSTRE_UTILS
-/* This is the userspace side. */
-
-/** Start the userspace side of a KUC pipe.
- * @param link Private descriptor for pipe/socket.
- * @param groups KUC broadcast group to listen to
- * (can be null for unicast to this pid)
- */
-int libcfs_ukuc_start(lustre_kernelcomm *link, int group)
-{
- int pfd[2];
-
- if (pipe(pfd) < 0)
- return -errno;
-
- memset(link, 0, sizeof(*link));
- link->lk_rfd = pfd[0];
- link->lk_wfd = pfd[1];
- link->lk_group = group;
- link->lk_uid = getpid();
- return 0;
-}
-
-int libcfs_ukuc_stop(lustre_kernelcomm *link)
-{
- if (link->lk_wfd > 0)
- close(link->lk_wfd);
- return close(link->lk_rfd);
-}
-
-#define lhsz sizeof(*kuch)
-
-/** Read a message from the link.
- * Allocates memory, returns handle
- *
- * @param link Private descriptor for pipe/socket.
- * @param buf Buffer to read into, must include size for kuc_hdr
- * @param maxsize Maximum message size allowed
- * @param transport Only listen to messages on this transport
- * (and the generic transport)
- */
-int libcfs_ukuc_msg_get(lustre_kernelcomm *link, char *buf, int maxsize,
- int transport)
-{
- struct kuc_hdr *kuch;
- int rc = 0;
-
- memset(buf, 0, maxsize);
-
- CDEBUG(D_KUC, "Waiting for message from kernel on fd %d\n",
- link->lk_rfd);
-
- while (1) {
- /* Read header first to get message size */
- rc = read(link->lk_rfd, buf, lhsz);
- if (rc <= 0) {
- rc = -errno;
- break;
- }
- kuch = (struct kuc_hdr *)buf;
-
- CDEBUG(D_KUC, "Received message mg=%x t=%d m=%d l=%d\n",
- kuch->kuc_magic, kuch->kuc_transport, kuch->kuc_msgtype,
- kuch->kuc_msglen);
-
- if (kuch->kuc_magic != KUC_MAGIC) {
- CERROR("bad message magic %x != %x\n",
- kuch->kuc_magic, KUC_MAGIC);
- rc = -EPROTO;
- break;
- }
-
- if (kuch->kuc_msglen > maxsize) {
- rc = -EMSGSIZE;
- break;
- }
-
- /* Read payload */
- rc = read(link->lk_rfd, buf + lhsz, kuch->kuc_msglen - lhsz);
- if (rc < 0) {
- rc = -errno;
- break;
- }
- if (rc < (kuch->kuc_msglen - lhsz)) {
- CERROR("short read: got %d of %d bytes\n",
- rc, kuch->kuc_msglen);
- rc = -EPROTO;
- break;
- }
-
- if (kuch->kuc_transport == transport ||
- kuch->kuc_transport == KUC_TRANSPORT_GENERIC) {
- return 0;
- }
- /* Drop messages for other transports */
- }
- return rc;
-}
-
-#else /* LUSTRE_UTILS */
/* This is the kernel side (liblustre as well). */
/**
@@ -338,5 +238,3 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
return rc;
}
EXPORT_SYMBOL(libcfs_kkuc_group_foreach);
-
-#endif /* LUSTRE_UTILS */
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
index 922debd0a412..ed0a6b531058 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
@@ -42,26 +42,6 @@
#include <linux/libcfs/libcfs.h>
-/* non-0 = don't match */
-int cfs_strncasecmp(const char *s1, const char *s2, size_t n)
-{
- if (s1 == NULL || s2 == NULL)
- return 1;
-
- if (n == 0)
- return 0;
-
- while (n-- != 0 && tolower(*s1) == tolower(*s2)) {
- if (n == 0 || *s1 == '\0' || *s2 == '\0')
- break;
- s1++;
- s2++;
- }
-
- return tolower(*(unsigned char *)s1) - tolower(*(unsigned char *)s2);
-}
-EXPORT_SYMBOL(cfs_strncasecmp);
-
/* Convert a text string to a bitmask */
int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
int *oldmask, int minmask, int allmask)
@@ -101,7 +81,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
debugstr = bit2str(i);
if (debugstr != NULL &&
strlen(debugstr) == len &&
- cfs_strncasecmp(str, debugstr, len) == 0) {
+ strncasecmp(str, debugstr, len) == 0) {
if (op == '-')
newmask &= ~(1 << i);
else
@@ -111,7 +91,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
}
}
if (!found && len == 3 &&
- (cfs_strncasecmp(str, "ALL", len) == 0)) {
+ (strncasecmp(str, "ALL", len) == 0)) {
if (op == '-')
newmask = minmask;
else
@@ -129,7 +109,6 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
*oldmask = newmask;
return 0;
}
-EXPORT_SYMBOL(cfs_str2mask);
/* get the first string out of @str */
char *cfs_firststr(char *str, size_t size)
@@ -164,12 +143,12 @@ cfs_trimwhite(char *str)
{
char *end;
- while (cfs_iswhite(*str))
+ while (isspace(*str))
str++;
end = str + strlen(str);
while (end > str) {
- if (!cfs_iswhite(end[-1]))
+ if (!isspace(end[-1]))
break;
end--;
}
@@ -199,7 +178,7 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
/* skip leading white spaces */
while (next->ls_len) {
- if (!cfs_iswhite(*next->ls_str))
+ if (!isspace(*next->ls_str))
break;
next->ls_str++;
next->ls_len--;
@@ -226,14 +205,13 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
/* skip ending whitespaces */
while (--end != res->ls_str) {
- if (!cfs_iswhite(*end))
+ if (!isspace(*end))
break;
}
res->ls_len = end - res->ls_str + 1;
return 1;
}
-EXPORT_SYMBOL(cfs_gettok);
/**
* Converts string to integer.
@@ -256,13 +234,12 @@ cfs_str2num_check(char *str, int nob, unsigned *num,
return 0;
for (; endp < str + nob; endp++) {
- if (!cfs_iswhite(*endp))
+ if (!isspace(*endp))
return 0;
}
return (*num >= min && *num <= max);
}
-EXPORT_SYMBOL(cfs_str2num_check);
/**
* Parses \<range_expr\> token of the syntax. If \a bracketed is false,
@@ -277,7 +254,7 @@ EXPORT_SYMBOL(cfs_str2num_check);
* \retval 0 will be returned if it can be parsed, otherwise -EINVAL or
* -ENOMEM will be returned.
*/
-int
+static int
cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
int bracketed, struct cfs_range_expr **expr)
{
@@ -340,7 +317,6 @@ cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
LIBCFS_FREE(re, sizeof(*re));
return -EINVAL;
}
-EXPORT_SYMBOL(cfs_range_expr_parse);
/**
* Matches value (\a value) against ranges expression list \a expr_list.
@@ -361,7 +337,6 @@ cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list)
return 0;
}
-EXPORT_SYMBOL(cfs_expr_list_match);
/**
* Convert express list (\a expr_list) to an array of all matched values
@@ -432,18 +407,6 @@ cfs_expr_list_free(struct cfs_expr_list *expr_list)
}
EXPORT_SYMBOL(cfs_expr_list_free);
-void
-cfs_expr_list_print(struct cfs_expr_list *expr_list)
-{
- struct cfs_range_expr *expr;
-
- list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
- CDEBUG(D_WARNING, "%d-%d/%d\n",
- expr->re_lo, expr->re_hi, expr->re_stride);
- }
-}
-EXPORT_SYMBOL(cfs_expr_list_print);
-
/**
* Parses \<cfs_expr_list\> token of the syntax.
*
@@ -526,7 +489,6 @@ cfs_expr_list_free_list(struct list_head *list)
cfs_expr_list_free(el);
}
}
-EXPORT_SYMBOL(cfs_expr_list_free_list);
int
cfs_ip_addr_parse(char *str, int len, struct list_head *list)
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
index 58bb256ee047..77b1ef64ecc0 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
@@ -952,6 +952,7 @@ static int
cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ bool warn;
switch (action) {
case CPU_DEAD:
@@ -962,9 +963,21 @@ cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
cpt_data.cpt_version++;
spin_unlock(&cpt_data.cpt_lock);
default:
- CWARN("Lustre: can't support CPU hotplug well now, "
- "performance and stability could be impacted"
- "[CPU %u notify: %lx]\n", cpu, action);
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
+ CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
+ cpu, action);
+ break;
+ }
+
+ down(&cpt_data.cpt_mutex);
+ /* if all HTs in a core are offline, it may break affinity */
+ cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask);
+ warn = any_online_cpu(*cpt_data.cpt_cpumask) >= nr_cpu_ids;
+ up(&cpt_data.cpt_mutex);
+ CDEBUG(warn ? D_WARNING : D_INFO,
+ "Lustre: can't support CPU plug-out well now, "
+ "performance and stability could be impacted "
+ "[CPU %u action: %lx]\n", cpu, action);
}
return NOTIFY_OK;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
index a2ef64c3403d..e74c3e28a972 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
@@ -55,25 +55,13 @@
* for Linux kernel.
*/
-int cfs_curproc_groups_nr(void)
-{
- int nr;
-
- task_lock(current);
- nr = current_cred()->group_info->ngroups;
- task_unlock(current);
- return nr;
-}
-
-/* Currently all the CFS_CAP_* defines match CAP_* ones. */
-#define cfs_cap_pack(cap) (cap)
-#define cfs_cap_unpack(cap) (cap)
-
void cfs_cap_raise(cfs_cap_t cap)
{
struct cred *cred;
- if ((cred = prepare_creds())) {
- cap_raise(cred->cap_effective, cfs_cap_unpack(cap));
+
+ cred = prepare_creds();
+ if (cred) {
+ cap_raise(cred->cap_effective, cap);
commit_creds(cred);
}
}
@@ -81,42 +69,28 @@ void cfs_cap_raise(cfs_cap_t cap)
void cfs_cap_lower(cfs_cap_t cap)
{
struct cred *cred;
- if ((cred = prepare_creds())) {
- cap_lower(cred->cap_effective, cfs_cap_unpack(cap));
+
+ cred = prepare_creds();
+ if (cred) {
+ cap_lower(cred->cap_effective, cap);
commit_creds(cred);
}
}
int cfs_cap_raised(cfs_cap_t cap)
{
- return cap_raised(current_cap(), cfs_cap_unpack(cap));
+ return cap_raised(current_cap(), cap);
}
void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap)
{
-#if defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x19980330
- *cap = cfs_cap_pack(kcap);
-#elif defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x20071026
- *cap = cfs_cap_pack(kcap[0]);
-#elif defined(_KERNEL_CAPABILITY_VERSION) && _KERNEL_CAPABILITY_VERSION == 0x20080522
/* XXX lost high byte */
- *cap = cfs_cap_pack(kcap.cap[0]);
-#else
- #error "need correct _KERNEL_CAPABILITY_VERSION "
-#endif
+ *cap = kcap.cap[0];
}
void cfs_kernel_cap_unpack(kernel_cap_t *kcap, cfs_cap_t cap)
{
-#if defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x19980330
- *kcap = cfs_cap_unpack(cap);
-#elif defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x20071026
- (*kcap)[0] = cfs_cap_unpack(cap);
-#elif defined(_KERNEL_CAPABILITY_VERSION) && _KERNEL_CAPABILITY_VERSION == 0x20080522
- kcap->cap[0] = cfs_cap_unpack(cap);
-#else
- #error "need correct _KERNEL_CAPABILITY_VERSION "
-#endif
+ kcap->cap[0] = cap;
}
cfs_cap_t cfs_curproc_cap_pack(void)
@@ -126,20 +100,6 @@ cfs_cap_t cfs_curproc_cap_pack(void)
return cap;
}
-void cfs_curproc_cap_unpack(cfs_cap_t cap)
-{
- struct cred *cred;
- if ((cred = prepare_creds())) {
- cfs_kernel_cap_unpack(&cred->cap_effective, cap);
- commit_creds(cred);
- }
-}
-
-int cfs_capable(cfs_cap_t cap)
-{
- return capable(cfs_cap_unpack(cap));
-}
-
static int cfs_access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, int write)
{
@@ -292,13 +252,10 @@ out:
}
EXPORT_SYMBOL(cfs_get_environ);
-EXPORT_SYMBOL(cfs_curproc_groups_nr);
EXPORT_SYMBOL(cfs_cap_raise);
EXPORT_SYMBOL(cfs_cap_lower);
EXPORT_SYMBOL(cfs_cap_raised);
EXPORT_SYMBOL(cfs_curproc_cap_pack);
-EXPORT_SYMBOL(cfs_curproc_cap_unpack);
-EXPORT_SYMBOL(cfs_capable);
/*
* Local variables:
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
index 55296a3591d5..e6eae0666f0d 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
@@ -150,12 +150,12 @@ static long libcfs_ioctl(struct file *file,
/* Handle platform-dependent IOC requests */
switch (cmd) {
case IOC_LIBCFS_PANIC:
- if (!cfs_capable(CFS_CAP_SYS_BOOT))
+ if (!capable(CFS_CAP_SYS_BOOT))
return (-EPERM);
panic("debugctl-invoked panic");
return (0);
case IOC_LIBCFS_MEMHOG:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
/* go thought */
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
index c7bc7fcccb8e..9a40d1415a65 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
@@ -46,13 +46,6 @@
#include <asm/kgdb.h>
#endif
-void
-init_waitqueue_entry_current(wait_queue_t *link)
-{
- init_waitqueue_entry(link, current);
-}
-EXPORT_SYMBOL(init_waitqueue_entry_current);
-
/**
* wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
* waiting threads, which is not always desirable because all threads will
@@ -77,37 +70,6 @@ add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
}
EXPORT_SYMBOL(add_wait_queue_exclusive_head);
-void
-waitq_wait(wait_queue_t *link, long state)
-{
- schedule();
-}
-EXPORT_SYMBOL(waitq_wait);
-
-int64_t
-waitq_timedwait(wait_queue_t *link, long state, int64_t timeout)
-{
- return schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(waitq_timedwait);
-
-void
-schedule_timeout_and_set_state(long state, int64_t timeout)
-{
- set_current_state(state);
- schedule_timeout(timeout);
-}
-EXPORT_SYMBOL(schedule_timeout_and_set_state);
-
-/* deschedule for a bit... */
-void
-cfs_pause(cfs_duration_t ticks)
-{
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(ticks);
-}
-EXPORT_SYMBOL(cfs_pause);
-
void cfs_init_timer(struct timer_list *t)
{
init_timer(t);
diff --git a/drivers/staging/lustre/lustre/libcfs/nidstrings.c b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
index 732ae5540bf4..cfb274fc9cbd 100644
--- a/drivers/staging/lustre/lustre/libcfs/nidstrings.c
+++ b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
@@ -65,23 +65,20 @@ void libcfs_init_nidstrings(void)
spin_lock_init(&libcfs_nidstring_lock);
}
-# define NIDSTR_LOCK(f) spin_lock_irqsave(&libcfs_nidstring_lock, f)
-# define NIDSTR_UNLOCK(f) spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
-
static char *
libcfs_next_nidstring(void)
{
char *str;
unsigned long flags;
- NIDSTR_LOCK(flags);
+ spin_lock_irqsave(&libcfs_nidstring_lock, flags);
str = libcfs_nidstrings[libcfs_nidstring_idx++];
if (libcfs_nidstring_idx ==
sizeof(libcfs_nidstrings)/sizeof(libcfs_nidstrings[0]))
libcfs_nidstring_idx = 0;
- NIDSTR_UNLOCK(flags);
+ spin_unlock_irqrestore(&libcfs_nidstring_lock, flags);
return str;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 54290ce6bb43..c8599eefeb76 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -1076,11 +1076,10 @@ end_loop:
break;
}
}
- init_waitqueue_entry_current(&__wait);
+ init_waitqueue_entry(&__wait, current);
add_wait_queue(&tctl->tctl_waitq, &__wait);
set_current_state(TASK_INTERRUPTIBLE);
- waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
+ schedule_timeout(cfs_time_seconds(1));
remove_wait_queue(&tctl->tctl_waitq, &__wait);
}
complete(&tctl->tctl_stop);
diff --git a/drivers/staging/lustre/lustre/libcfs/upcall_cache.c b/drivers/staging/lustre/lustre/libcfs/upcall_cache.c
index 245b46f0dd96..8085e32e5e7a 100644
--- a/drivers/staging/lustre/lustre/libcfs/upcall_cache.c
+++ b/drivers/staging/lustre/lustre/libcfs/upcall_cache.c
@@ -218,13 +218,12 @@ find_again:
MAX_SCHEDULE_TIMEOUT;
long left;
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
add_wait_queue(&entry->ue_waitq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&cache->uc_lock);
- left = waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
- expiry);
+ left = schedule_timeout(expiry);
spin_lock(&cache->uc_lock);
remove_wait_queue(&entry->ue_waitq, &wait);
diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index 1a55c81892e0..ba16fd5db704 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -334,7 +334,8 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
sched->ws_nthreads, sched->ws_name);
spin_unlock(&cfs_wi_data.wi_glock);
- cfs_pause(cfs_time_seconds(1) / 20);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 20);
spin_lock(&cfs_wi_data.wi_glock);
}
@@ -389,11 +390,11 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
spin_unlock(&cfs_wi_data.wi_glock);
if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) {
- snprintf(name, sizeof(name), "%s_%02d_%02d",
+ snprintf(name, sizeof(name), "%s_%02d_%02u",
sched->ws_name, sched->ws_cpt,
sched->ws_nthreads);
} else {
- snprintf(name, sizeof(name), "%s_%02d",
+ snprintf(name, sizeof(name), "%s_%02u",
sched->ws_name, sched->ws_nthreads);
}
@@ -459,7 +460,8 @@ cfs_wi_shutdown (void)
while (sched->ws_nthreads != 0) {
spin_unlock(&cfs_wi_data.wi_glock);
- cfs_pause(cfs_time_seconds(1) / 20);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 20);
spin_lock(&cfs_wi_data.wi_glock);
}
spin_unlock(&cfs_wi_data.wi_glock);
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index cbd663ed030c..8b5508086174 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -160,7 +160,7 @@ static int ll_ddelete(const struct dentry *de)
/* kernel >= 2.6.38 last refcount is decreased after this function. */
LASSERT(d_count(de) == 1);
- /* Disable this piece of code temproarily because this is called
+ /* Disable this piece of code temporarily because this is called
* inside dcache_lock so it's not appropriate to do lots of work
* here. ATTENTION: Before this piece of code enabling, LU-2487 must be
* resolved. */
@@ -176,7 +176,7 @@ static int ll_ddelete(const struct dentry *de)
return 0;
}
-static int ll_set_dd(struct dentry *de)
+int ll_d_init(struct dentry *de)
{
LASSERT(de != NULL);
@@ -190,40 +190,22 @@ static int ll_set_dd(struct dentry *de)
OBD_ALLOC_PTR(lld);
if (likely(lld != NULL)) {
spin_lock(&de->d_lock);
- if (likely(de->d_fsdata == NULL))
+ if (likely(de->d_fsdata == NULL)) {
de->d_fsdata = lld;
- else
+ __d_lustre_invalidate(de);
+ } else {
OBD_FREE_PTR(lld);
+ }
spin_unlock(&de->d_lock);
} else {
return -ENOMEM;
}
}
+ LASSERT(de->d_op == &ll_d_ops);
return 0;
}
-int ll_dops_init(struct dentry *de, int block, int init_sa)
-{
- struct ll_dentry_data *lld = ll_d2d(de);
- int rc = 0;
-
- if (lld == NULL && block != 0) {
- rc = ll_set_dd(de);
- if (rc)
- return rc;
-
- lld = ll_d2d(de);
- }
-
- if (lld != NULL && init_sa != 0)
- lld->lld_sa_generation = 0;
-
- /* kernel >= 2.6.38 d_op is set in d_alloc() */
- LASSERT(de->d_op == &ll_d_ops);
- return rc;
-}
-
void ll_intent_drop_lock(struct lookup_intent *it)
{
if (it->it_op && it->d.lustre.it_lock_mode) {
@@ -259,9 +241,6 @@ void ll_intent_release(struct lookup_intent *it)
ptlrpc_req_finished(it->d.lustre.it_data); /* ll_file_open */
if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */
ptlrpc_req_finished(it->d.lustre.it_data);
- if (it_disposition(it, DISP_ENQ_COMPLETE)) /* saved req from revalidate
- * to lookup */
- ptlrpc_req_finished(it->d.lustre.it_data);
it->d.lustre.it_disposition = 0;
it->d.lustre.it_data = NULL;
@@ -346,268 +325,32 @@ void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
}
-int ll_revalidate_it(struct dentry *de, int lookup_flags,
- struct lookup_intent *it)
+static int ll_revalidate_dentry(struct dentry *dentry,
+ unsigned int lookup_flags)
{
- struct md_op_data *op_data;
- struct ptlrpc_request *req = NULL;
- struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
- struct obd_export *exp;
- struct inode *parent = de->d_parent->d_inode;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:name=%s,intent=%s\n", de->d_name.name,
- LL_IT2STR(it));
+ struct inode *dir = dentry->d_parent->d_inode;
- if (de->d_inode == NULL) {
- __u64 ibits;
-
- /* We can only use negative dentries if this is stat or lookup,
- for opens and stuff we do need to query server. */
- /* If there is IT_CREAT in intent op set, then we must throw
- away this negative dentry and actually do the request to
- kernel to create whatever needs to be created (if possible)*/
- if (it && (it->it_op & IT_CREAT))
- return 0;
-
- if (d_lustre_invalid(de))
- return 0;
-
- ibits = MDS_INODELOCK_UPDATE;
- rc = ll_have_md_lock(parent, &ibits, LCK_MINMODE);
- GOTO(out_sa, rc);
- }
-
- /* Never execute intents for mount points.
- * Attributes will be fixed up in ll_inode_revalidate_it */
- if (d_mountpoint(de))
- GOTO(out_sa, rc = 1);
-
- /* need to get attributes in case root got changed from other client */
- if (de == de->d_sb->s_root) {
- rc = __ll_inode_revalidate_it(de, it, MDS_INODELOCK_LOOKUP);
- if (rc == 0)
- rc = 1;
- GOTO(out_sa, rc);
- }
-
- exp = ll_i2mdexp(de->d_inode);
-
- OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_REVALIDATE_PAUSE, 5);
- ll_frob_intent(&it, &lookup_it);
- LASSERT(it);
+ /*
+ * if open&create is set, talk to MDS to make sure file is created if
+ * necessary, because we can't do this in ->open() later since that's
+ * called on an inode. return 0 here to let lookup to handle this.
+ */
+ if ((lookup_flags & (LOOKUP_OPEN | LOOKUP_CREATE)) ==
+ (LOOKUP_OPEN | LOOKUP_CREATE))
+ return 0;
- if (it->it_op == IT_LOOKUP && !d_lustre_invalid(de))
+ if (lookup_flags & (LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE))
return 1;
- if (it->it_op == IT_OPEN) {
- struct inode *inode = de->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_client_handle **och_p;
- __u64 ibits;
-
- /*
- * We used to check for MDS_INODELOCK_OPEN here, but in fact
- * just having LOOKUP lock is enough to justify inode is the
- * same. And if inode is the same and we have suitable
- * openhandle, then there is no point in doing another OPEN RPC
- * just to throw away newly received openhandle. There are no
- * security implications too, if file owner or access mode is
- * change, LOOKUP lock is revoked.
- */
-
-
- if (it->it_flags & FMODE_WRITE)
- och_p = &lli->lli_mds_write_och;
- else if (it->it_flags & FMODE_EXEC)
- och_p = &lli->lli_mds_exec_och;
- else
- och_p = &lli->lli_mds_read_och;
-
- /* Check for the proper lock. */
- ibits = MDS_INODELOCK_LOOKUP;
- if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
- goto do_lock;
- mutex_lock(&lli->lli_och_mutex);
- if (*och_p) { /* Everything is open already, do nothing */
- /* Originally it was idea to do not let them steal our
- * open handle from under us by (*och_usecount)++ here.
- * But in case we have the handle, but we cannot use it
- * due to later checks (e.g. O_CREAT|O_EXCL flags set),
- * nobody would decrement counter increased here. So we
- * just hope the lock won't be invalidated in between.
- * But if it would be, we'll reopen the open request to
- * MDS later during file open path.
- */
- mutex_unlock(&lli->lli_och_mutex);
- return 1;
- }
- mutex_unlock(&lli->lli_och_mutex);
- }
-
- if (it->it_op == IT_GETATTR) {
- rc = ll_statahead_enter(parent, &de, 0);
- if (rc == 1)
- goto mark;
- else if (rc != -EAGAIN && rc != 0)
- GOTO(out, rc = 0);
- }
-
-do_lock:
- op_data = ll_prep_md_op_data(NULL, parent, de->d_inode,
- de->d_name.name, de->d_name.len,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- if (!IS_POSIXACL(parent) || !exp_connect_umask(exp))
- it->it_create_mode &= ~current_umask();
- it->it_create_mode |= M_CHECK_STALE;
- rc = md_intent_lock(exp, op_data, NULL, 0, it,
- lookup_flags,
- &req, ll_md_blocking_ast, 0);
- it->it_create_mode &= ~M_CHECK_STALE;
- ll_finish_md_op_data(op_data);
-
- /* If req is NULL, then md_intent_lock only tried to do a lock match;
- * if all was well, it will return 1 if it found locks, 0 otherwise. */
- if (req == NULL && rc >= 0) {
- if (!rc)
- goto do_lookup;
- GOTO(out, rc);
- }
-
- if (rc < 0) {
- if (rc != -ESTALE) {
- CDEBUG(D_INFO, "ll_intent_lock: rc %d : it->it_status "
- "%d\n", rc, it->d.lustre.it_status);
- }
- GOTO(out, rc = 0);
- }
-
-revalidate_finish:
- rc = ll_revalidate_it_finish(req, it, de);
- if (rc != 0) {
- if (rc != -ESTALE && rc != -ENOENT)
- ll_intent_release(it);
- GOTO(out, rc = 0);
- }
-
- if ((it->it_op & IT_OPEN) && de->d_inode &&
- !S_ISREG(de->d_inode->i_mode) &&
- !S_ISDIR(de->d_inode->i_mode)) {
- ll_release_openhandle(de, it);
- }
- rc = 1;
-
-out:
- /* We do not free request as it may be reused during following lookup
- * (see comment in mdc/mdc_locks.c::mdc_intent_lock()), request will
- * be freed in ll_lookup_it or in ll_intent_release. But if
- * request was not completed, we need to free it. (bug 5154, 9903) */
- if (req != NULL && !it_disposition(it, DISP_ENQ_COMPLETE))
- ptlrpc_req_finished(req);
- if (rc == 0) {
- /* mdt may grant layout lock for the newly created file, so
- * release the lock to avoid leaking */
- ll_intent_drop_lock(it);
- ll_invalidate_aliases(de->d_inode);
- } else {
- __u64 bits = 0;
- __u64 matched_bits = 0;
-
- CDEBUG(D_DENTRY, "revalidated dentry %.*s (%p) parent %p "
- "inode %p refc %d\n", de->d_name.len,
- de->d_name.name, de, de->d_parent, de->d_inode,
- d_count(de));
-
- ll_set_lock_data(exp, de->d_inode, it, &bits);
-
- /* Note: We have to match both LOOKUP and PERM lock
- * here to make sure the dentry is valid and no one
- * changing the permission.
- * But if the client connects < 2.4 server, which will
- * only grant LOOKUP lock, so we can only Match LOOKUP
- * lock for old server */
- if (exp_connect_flags(ll_i2mdexp(de->d_inode)) &&
- OBD_CONNECT_LVB_TYPE)
- matched_bits =
- MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM;
- else
- matched_bits = MDS_INODELOCK_LOOKUP;
-
- if (((bits & matched_bits) == matched_bits) &&
- d_lustre_invalid(de))
- d_lustre_revalidate(de);
- ll_lookup_finish_locks(it, de);
- }
-
-mark:
- if (it != NULL && it->it_op == IT_GETATTR && rc > 0)
- ll_statahead_mark(parent, de);
- return rc;
-
- /*
- * This part is here to combat evil-evil race in real_lookup on 2.6
- * kernels. The race details are: We enter do_lookup() looking for some
- * name, there is nothing in dcache for this name yet and d_lookup()
- * returns NULL. We proceed to real_lookup(), and while we do this,
- * another process does open on the same file we looking up (most simple
- * reproducer), open succeeds and the dentry is added. Now back to
- * us. In real_lookup() we do d_lookup() again and suddenly find the
- * dentry, so we call d_revalidate on it, but there is no lock, so
- * without this code we would return 0, but unpatched real_lookup just
- * returns -ENOENT in such a case instead of retrying the lookup. Once
- * this is dealt with in real_lookup(), all of this ugly mess can go and
- * we can just check locks in ->d_revalidate without doing any RPCs
- * ever.
- */
-do_lookup:
- if (it != &lookup_it) {
- /* MDS_INODELOCK_UPDATE needed for IT_GETATTR case. */
- if (it->it_op == IT_GETATTR)
- lookup_it.it_op = IT_GETATTR;
- ll_lookup_finish_locks(it, de);
- it = &lookup_it;
- }
+ if (d_need_statahead(dir, dentry) <= 0)
+ return 1;
- /* Do real lookup here. */
- op_data = ll_prep_md_op_data(NULL, parent, NULL, de->d_name.name,
- de->d_name.len, 0, (it->it_op & IT_CREAT ?
- LUSTRE_OPC_CREATE :
- LUSTRE_OPC_ANY), NULL);
- if (IS_ERR(op_data))
- return PTR_ERR(op_data);
-
- rc = md_intent_lock(exp, op_data, NULL, 0, it, 0, &req,
- ll_md_blocking_ast, 0);
- if (rc >= 0) {
- struct mdt_body *mdt_body;
- struct lu_fid fid = {.f_seq = 0, .f_oid = 0, .f_ver = 0};
- mdt_body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-
- if (de->d_inode)
- fid = *ll_inode2fid(de->d_inode);
-
- /* see if we got same inode, if not - return error */
- if (lu_fid_eq(&fid, &mdt_body->fid1)) {
- ll_finish_md_op_data(op_data);
- op_data = NULL;
- goto revalidate_finish;
- }
- ll_intent_release(it);
- }
- ll_finish_md_op_data(op_data);
- GOTO(out, rc = 0);
+ if (lookup_flags & LOOKUP_RCU)
+ return -ECHILD;
-out_sa:
- /*
- * For rc == 1 case, should not return directly to prevent losing
- * statahead windows; for rc == 0 case, the "lookup" will be done later.
- */
- if (it != NULL && it->it_op == IT_GETATTR && rc == 1)
- ll_statahead_enter(parent, &de, 1);
- goto mark;
+ do_statahead_enter(dir, &dentry, dentry->d_inode == NULL);
+ ll_statahead_mark(dir, dentry);
+ return 1;
}
/*
@@ -615,24 +358,13 @@ out_sa:
*/
int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
{
- struct inode *parent = dentry->d_parent->d_inode;
- int unplug = 0;
+ int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%s,flags=%u\n",
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%s, flags=%u\n",
dentry->d_name.name, flags);
- if (!(flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE)) &&
- ll_need_statahead(parent, dentry) > 0) {
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-
- if (dentry->d_inode == NULL)
- unplug = 1;
- do_statahead_enter(parent, &dentry, unplug);
- ll_statahead_mark(parent, dentry);
- }
-
- return 1;
+ rc = ll_revalidate_dentry(dentry, flags);
+ return rc;
}
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 22d0acc95bc5..7fbc18e3e654 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -362,7 +362,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
struct ptlrpc_request *request;
struct md_op_data *op_data;
- op_data = ll_prep_md_op_data(NULL, dir, NULL, NULL, 0, 0,
+ op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return (void *)op_data;
@@ -1048,20 +1048,25 @@ progress:
}
-static int copy_and_ioctl(int cmd, struct obd_export *exp, void *data, int len)
+static int copy_and_ioctl(int cmd, struct obd_export *exp,
+ const void __user *data, size_t size)
{
- void *ptr;
+ void *copy;
int rc;
- OBD_ALLOC(ptr, len);
- if (ptr == NULL)
+ OBD_ALLOC(copy, size);
+ if (copy == NULL)
return -ENOMEM;
- if (copy_from_user(ptr, data, len)) {
- OBD_FREE(ptr, len);
- return -EFAULT;
+
+ if (copy_from_user(copy, data, size)) {
+ rc = -EFAULT;
+ goto out;
}
- rc = obd_iocontrol(cmd, exp, len, data, NULL);
- OBD_FREE(ptr, len);
+
+ rc = obd_iocontrol(cmd, exp, size, copy, NULL);
+out:
+ OBD_FREE(copy, size);
+
return rc;
}
@@ -1080,16 +1085,16 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
case Q_QUOTAOFF:
case Q_SETQUOTA:
case Q_SETINFO:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ if (!capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT)
return -EPERM;
break;
case Q_GETQUOTA:
if (((type == USRQUOTA &&
- uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
+ !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
(type == GRPQUOTA &&
!in_egroup_p(make_kgid(&init_user_ns, id)))) &&
- (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ (!capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT))
return -EPERM;
break;
@@ -1395,7 +1400,7 @@ lmv_out_free:
if (tmp == NULL)
GOTO(free_lmv, rc = -ENOMEM);
- memcpy(tmp, &lum, sizeof(lum));
+ *tmp = lum;
tmp->lum_type = LMV_STRIPE_TYPE;
tmp->lum_stripe_count = 1;
mdtindex = ll_get_mdt_idx(inode);
@@ -1597,7 +1602,7 @@ out_rmdir:
struct obd_quotactl *oqctl;
int error = 0;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ if (!capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT)
return -EPERM;
@@ -1621,7 +1626,7 @@ out_rmdir:
case OBD_IOC_POLL_QUOTACHECK: {
struct if_quotacheck *check;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ if (!capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT)
return -EPERM;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index c12821aedc2f..8e844a6371e0 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -205,7 +205,7 @@ out:
return rc;
}
-int ll_md_real_close(struct inode *inode, int flags)
+int ll_md_real_close(struct inode *inode, fmode_t fmode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_client_handle **och_p;
@@ -213,30 +213,33 @@ int ll_md_real_close(struct inode *inode, int flags)
__u64 *och_usecount;
int rc = 0;
- if (flags & FMODE_WRITE) {
+ if (fmode & FMODE_WRITE) {
och_p = &lli->lli_mds_write_och;
och_usecount = &lli->lli_open_fd_write_count;
- } else if (flags & FMODE_EXEC) {
+ } else if (fmode & FMODE_EXEC) {
och_p = &lli->lli_mds_exec_och;
och_usecount = &lli->lli_open_fd_exec_count;
} else {
- LASSERT(flags & FMODE_READ);
+ LASSERT(fmode & FMODE_READ);
och_p = &lli->lli_mds_read_och;
och_usecount = &lli->lli_open_fd_read_count;
}
mutex_lock(&lli->lli_och_mutex);
- if (*och_usecount) { /* There are still users of this handle, so
- skip freeing it. */
+ if (*och_usecount > 0) {
+ /* There are still users of this handle, so skip
+ * freeing it. */
mutex_unlock(&lli->lli_och_mutex);
return 0;
}
+
och=*och_p;
*och_p = NULL;
mutex_unlock(&lli->lli_och_mutex);
- if (och) { /* There might be a race and somebody have freed this och
- already */
+ if (och != NULL) {
+ /* There might be a race and this handle may already
+ be closed. */
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
inode, och, NULL);
}
@@ -443,8 +446,7 @@ static int ll_intent_file_open(struct file *file, void *lmm,
itp, NULL);
out:
- ptlrpc_req_finished(itp->d.lustre.it_data);
- it_clear_disposition(itp, DISP_ENQ_COMPLETE);
+ ptlrpc_req_finished(req);
ll_intent_drop_lock(itp);
return rc;
@@ -477,7 +479,7 @@ static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
och->och_flags = it->it_flags;
- return md_set_open_replay_data(md_exp, och, req);
+ return md_set_open_replay_data(md_exp, och, it);
}
int ll_local_open(struct file *file, struct lookup_intent *it,
@@ -671,14 +673,13 @@ restart:
ll_capa_open(inode);
- if (!lli->lli_has_smd) {
- if (file->f_flags & O_LOV_DELAY_CREATE ||
- !(file->f_mode & FMODE_WRITE)) {
- CDEBUG(D_INODE, "object creation was delayed\n");
- GOTO(out_och_free, rc);
- }
+ if (!lli->lli_has_smd &&
+ (cl_is_lov_delay_create(file->f_flags) ||
+ (file->f_mode & FMODE_WRITE) == 0)) {
+ CDEBUG(D_INODE, "object creation was delayed\n");
+ GOTO(out_och_free, rc);
}
- file->f_flags &= ~O_LOV_DELAY_CREATE;
+ cl_lov_delay_create_clear(&file->f_flags);
GOTO(out_och_free, rc);
out_och_free:
@@ -813,10 +814,7 @@ struct obd_client_handle *ll_lease_open(struct inode *inode, struct file *file,
* doesn't deal with openhandle, so normal openhandle will be leaked. */
LDLM_FL_NO_LRU | LDLM_FL_EXCL);
ll_finish_md_op_data(op_data);
- if (req != NULL) {
- ptlrpc_req_finished(req);
- it_clear_disposition(&it, DISP_ENQ_COMPLETE);
- }
+ ptlrpc_req_finished(req);
if (rc < 0)
GOTO(out_release_it, rc);
@@ -1033,6 +1031,33 @@ int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
return rc;
}
+static bool file_is_noatime(const struct file *file)
+{
+ const struct vfsmount *mnt = file->f_path.mnt;
+ const struct inode *inode = file->f_path.dentry->d_inode;
+
+ /* Adapted from file_accessed() and touch_atime().*/
+ if (file->f_flags & O_NOATIME)
+ return true;
+
+ if (inode->i_flags & S_NOATIME)
+ return true;
+
+ if (IS_NOATIME(inode))
+ return true;
+
+ if (mnt->mnt_flags & (MNT_NOATIME | MNT_READONLY))
+ return true;
+
+ if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
+ return true;
+
+ if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+ return true;
+
+ return false;
+}
+
void ll_io_init(struct cl_io *io, const struct file *file, int write)
{
struct inode *inode = file->f_dentry->d_inode;
@@ -1052,6 +1077,8 @@ void ll_io_init(struct cl_io *io, const struct file *file, int write)
} else if (file->f_flags & O_APPEND) {
io->ci_lockreq = CILR_MANDATORY;
}
+
+ io->ci_noatime = file_is_noatime(file);
}
static ssize_t
@@ -1092,16 +1119,12 @@ restart:
down_read(&lli->lli_trunc_sem);
}
break;
- case IO_SENDFILE:
- vio->u.sendfile.cui_actor = args->u.sendfile.via_actor;
- vio->u.sendfile.cui_target = args->u.sendfile.via_target;
- break;
case IO_SPLICE:
vio->u.splice.cui_pipe = args->u.splice.via_pipe;
vio->u.splice.cui_flags = args->u.splice.via_flags;
break;
default:
- CERROR("Unknow IO type - %u\n", vio->cui_io_subtype);
+ CERROR("Unknown IO type - %u\n", vio->cui_io_subtype);
LBUG();
}
result = cl_io_loop(env, io);
@@ -1340,7 +1363,7 @@ static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg)
struct ll_recreate_obj ucreat;
struct ost_id oi;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&ucreat, (struct ll_recreate_obj *)arg,
@@ -1358,7 +1381,7 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
struct ost_id oi;
obd_count ost_idx;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&fid, (struct lu_fid *)arg, sizeof(fid)))
@@ -1381,23 +1404,25 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
ccc_inode_lsm_put(inode, lsm);
CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
inode->i_ino);
- return -EEXIST;
+ GOTO(out, rc = -EEXIST);
}
ll_inode_size_lock(inode);
rc = ll_intent_file_open(file, lum, lum_size, &oit);
if (rc)
- GOTO(out, rc);
+ GOTO(out_unlock, rc);
rc = oit.d.lustre.it_status;
if (rc < 0)
GOTO(out_req_free, rc);
ll_release_openhandle(file->f_dentry, &oit);
- out:
+out_unlock:
ll_inode_size_unlock(inode);
ll_intent_release(&oit);
ccc_inode_lsm_put(inode, lsm);
+out:
+ cl_lov_delay_create_clear(&file->f_flags);
return rc;
out_req_free:
ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
@@ -1497,7 +1522,7 @@ static int ll_lov_setea(struct inode *inode, struct file *file,
sizeof(struct lov_user_ost_data);
int rc;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
OBD_ALLOC_LARGE(lump, lum_size);
@@ -1747,7 +1772,7 @@ int ll_fid2path(struct inode *inode, void *arg)
struct getinfo_fid2path *gfout, *gfin;
int outsize, rc;
- if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
+ if (!capable(CFS_CAP_DAC_READ_SEARCH) &&
!(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
return -EPERM;
@@ -2005,7 +2030,7 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
llss->ia2.ia_valid = ATTR_MTIME | ATTR_ATIME;
}
- /* ultimate check, before swaping the layouts we check if
+ /* ultimate check, before swapping the layouts we check if
* dataversion has changed (if requested) */
if (llss->check_dv1) {
rc = ll_data_version(llss->inode1, &dv, 0);
@@ -2093,7 +2118,7 @@ static int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
/* Non-root users are forbidden to set or clear flags which are
* NOT defined in HSM_USER_MASK. */
if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
- !cfs_capable(CFS_CAP_SYS_ADMIN))
+ !capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
@@ -2670,7 +2695,7 @@ int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
/* flocks are whole-file locks */
flock.l_flock.end = OFFSET_MAX;
- /* For flocks owner is determined by the local file desctiptor*/
+ /* For flocks owner is determined by the local file descriptor*/
flock.l_flock.owner = (unsigned long)file_lock->fl_file;
} else if (file_lock->fl_flags & FL_POSIX) {
flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
@@ -2891,7 +2916,7 @@ int __ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
oit.it_op = IT_LOOKUP;
/* Call getattr by fid, so do not provide name at all. */
- op_data = ll_prep_md_op_data(NULL, dentry->d_parent->d_inode,
+ op_data = ll_prep_md_op_data(NULL, dentry->d_inode,
dentry->d_inode, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
@@ -3175,7 +3200,7 @@ struct inode_operations ll_file_inode_operations = {
.get_acl = ll_get_acl,
};
-/* dynamic ioctl number support routins */
+/* dynamic ioctl number support routines */
static struct llioc_ctl_data {
struct rw_semaphore ioc_sem;
struct list_head ioc_head;
@@ -3299,7 +3324,7 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
if (result == 0) {
/* it can only be allowed to match after layout is
* applied to inode otherwise false layout would be
- * seen. Applying layout shoud happen before dropping
+ * seen. Applying layout should happen before dropping
* the intent lock. */
ldlm_lock_allow_match(lock);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index e2996c46b2c0..38c2d0e947db 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -348,7 +348,7 @@ static int ll_close_thread(void *arg)
break;
inode = ll_info2i(lli);
- CDEBUG(D_INFO, "done_writting for inode %lu/%u\n",
+ CDEBUG(D_INFO, "done_writing for inode %lu/%u\n",
inode->i_ino, inode->i_generation);
ll_done_writing(inode);
iput(inode);
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 7ee5c02783f9..69aba0afca41 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -296,13 +296,6 @@ int ll_xattr_cache_get(struct inode *inode,
size_t size,
__u64 valid);
-int ll_xattr_cache_update(struct inode *inode,
- const char *name,
- const char *newval,
- size_t size,
- __u64 valid,
- int flags);
-
/*
* Locking to guarantee consistency of non-atomic updates to long long i_size,
* consistency between file size and KMS.
@@ -532,7 +525,7 @@ struct ll_sb_info {
atomic_t ll_agl_total; /* AGL thread started count */
dev_t ll_sdev_orig; /* save s_dev before assign for
- * clustred nfs */
+ * clustered nfs */
struct rmtacl_ctl_table ll_rct;
struct eacl_table ll_et;
__kernel_fsid_t ll_fsid;
@@ -782,7 +775,7 @@ int ll_local_open(struct file *file,
int ll_release_openhandle(struct dentry *, struct lookup_intent *);
int ll_md_close(struct obd_export *md_exp, struct inode *inode,
struct file *file);
-int ll_md_real_close(struct inode *inode, int flags);
+int ll_md_real_close(struct inode *inode, fmode_t fmode);
void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
struct obd_client_handle **och, unsigned long flags);
void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data);
@@ -828,7 +821,7 @@ int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
/* llite/dcache.c */
-int ll_dops_init(struct dentry *de, int block, int init_sa);
+int ll_d_init(struct dentry *de);
extern struct dentry_operations ll_d_ops;
void ll_intent_drop_lock(struct lookup_intent *);
void ll_intent_release(struct lookup_intent *);
@@ -915,12 +908,10 @@ struct ccc_object *cl_inode2ccc(struct inode *inode);
void vvp_write_pending (struct ccc_object *club, struct ccc_page *page);
void vvp_write_complete(struct ccc_object *club, struct ccc_page *page);
-/* specific achitecture can implement only part of this list */
+/* specific architecture can implement only part of this list */
enum vvp_io_subtype {
/** normal IO */
IO_NORMAL,
- /** io called from .sendfile */
- IO_SENDFILE,
/** io started from splice_{read|write} */
IO_SPLICE
};
@@ -932,10 +923,6 @@ struct vvp_io {
union {
struct {
- read_actor_t cui_actor;
- void *cui_target;
- } sendfile;
- struct {
struct pipe_inode_info *cui_pipe;
unsigned int cui_flags;
} splice;
@@ -981,7 +968,7 @@ struct vvp_io {
* IO arguments for various VFS I/O interfaces.
*/
struct vvp_io_args {
- /** normal/sendfile/splice */
+ /** normal/splice */
enum vvp_io_subtype via_io_subtype;
union {
@@ -991,10 +978,6 @@ struct vvp_io_args {
unsigned long via_nrsegs;
} normal;
struct {
- read_actor_t via_actor;
- void *via_target;
- } sendfile;
- struct {
struct pipe_inode_info *via_pipe;
unsigned int via_flags;
} splice;
@@ -1320,12 +1303,13 @@ ll_statahead_mark(struct inode *dir, struct dentry *dentry)
if (lli->lli_opendir_pid != current_pid())
return;
- if (sai != NULL && ldd != NULL)
+ LASSERT(ldd != NULL);
+ if (sai != NULL)
ldd->lld_sa_generation = sai->sai_generation;
}
static inline int
-ll_need_statahead(struct inode *dir, struct dentry *dentryp)
+d_need_statahead(struct inode *dir, struct dentry *dentryp)
{
struct ll_inode_info *lli;
struct ll_dentry_data *ldd;
@@ -1370,14 +1354,14 @@ ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int only_unplug)
{
int ret;
- ret = ll_need_statahead(dir, *dentryp);
+ ret = d_need_statahead(dir, *dentryp);
if (ret <= 0)
return ret;
return do_statahead_enter(dir, dentryp, only_unplug);
}
-/* llite ioctl register support rountine */
+/* llite ioctl register support routine */
enum llioc_iter {
LLIOC_CONT = 0,
LLIOC_STOP
@@ -1389,7 +1373,7 @@ enum llioc_iter {
* Rules to write a callback function:
*
* Parameters:
- * @magic: Dynamic ioctl call routine will feed this vaule with the pointer
+ * @magic: Dynamic ioctl call routine will feed this value with the pointer
* returned to ll_iocontrol_register. Callback functions should use this
* data to check the potential collasion of ioctl cmd. If collasion is
* found, callback function should return LLIOC_CONT.
@@ -1414,7 +1398,7 @@ enum llioc_iter ll_iocontrol_call(struct inode *inode, struct file *file,
* @cb: callback function, it will be called if an ioctl command is found to
* belong to the command list @cmd.
*
- * Return vaule:
+ * Return value:
* A magic pointer will be returned if success;
* otherwise, NULL will be returned.
* */
@@ -1524,7 +1508,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
* separate locks in different namespaces, Master MDT,
* where the name entry is, will grant LOOKUP lock,
* remote MDT, where the object is, will grant
- * UPDATE|PERM lock. The inode will be attched to both
+ * UPDATE|PERM lock. The inode will be attached to both
* LOOKUP and PERM locks, so revoking either locks will
* case the dcache being cleared */
if (it->d.lustre.it_remote_lock_mode) {
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6cfdb9e4b74b..26003d3c1be7 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -155,11 +155,6 @@ void ll_free_sbi(struct super_block *sb)
}
}
-static struct dentry_operations ll_d_root_ops = {
- .d_compare = ll_dcompare,
- .d_revalidate = ll_revalidate_nd,
-};
-
static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
struct vfsmount *mnt)
{
@@ -211,7 +206,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_EINPROGRESS |
OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
OBD_CONNECT_LAYOUTLOCK |
- OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE;
+ OBD_CONNECT_PINGLESS |
+ OBD_CONNECT_MAX_EASIZE |
+ OBD_CONNECT_FLOCK_DEAD |
+ OBD_CONNECT_DISP_STRIPE;
if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
data->ocd_connect_flags |= OBD_CONNECT_SOM;
@@ -281,7 +279,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* For mount, we only need fs info from MDT0, and also in DNE, it
* can make sure the client can be mounted as long as MDT0 is
- * avaible */
+ * available */
err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
OBD_STATFS_FOR_MDT0);
@@ -579,10 +577,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
GOTO(out_root, err = -ENOMEM);
}
- /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
- d_set_d_op(sb->s_root, &ll_d_root_ops);
- sb->s_d_op = &ll_d_ops;
-
sbi->ll_sdev_orig = sb->s_dev;
/* We set sb->s_dev equal on all lustre clients in order to support
@@ -723,7 +717,7 @@ void ll_kill_super(struct super_block *sb)
return;
sbi = ll_s2sbi(sb);
- /* we need restore s_dev from changed for clustred NFS before put_super
+ /* we need to restore s_dev from changed for clustered NFS before put_super
* because new kernels have cached s_dev and change sb->s_dev in
* put_super not affected real removing devices */
if (sbi) {
@@ -740,7 +734,8 @@ char *ll_read_opt(const char *opt, char *data)
CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
if (strncmp(opt, data, strlen(opt)))
return NULL;
- if ((value = strchr(data, '=')) == NULL)
+ value = strchr(data, '=');
+ if (value == NULL)
return NULL;
value++;
@@ -1013,6 +1008,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
GOTO(out_free, err);
sb->s_bdi = &lsi->lsi_bdi;
+ /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
+ sb->s_d_op = &ll_d_ops;
/* Generate a string unique to this super, in case some joker tries
to mount the same fs at two mount points.
@@ -1067,7 +1064,7 @@ out_free:
void ll_put_super(struct super_block *sb)
{
- struct config_llog_instance cfg;
+ struct config_llog_instance cfg, params_cfg;
struct obd_device *obd;
struct lustre_sb_info *lsi = s2lsi(sb);
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -1081,6 +1078,9 @@ void ll_put_super(struct super_block *sb)
cfg.cfg_instance = sb;
lustre_end_log(sb, profilenm, &cfg);
+ params_cfg.cfg_instance = sb;
+ lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
+
if (sbi->ll_md_exp) {
obd = class_exp2obd(sbi->ll_md_exp);
if (obd)
@@ -1405,7 +1405,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
/* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
if (attr->ia_valid & TIMES_SET_FLAGS) {
if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
- !cfs_capable(CFS_CAP_FOWNER))
+ !capable(CFS_CAP_FOWNER))
return -EPERM;
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index caed6423e4ef..90b2c0d275f9 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -285,7 +285,7 @@ static inline int to_fault_error(int result)
* Lustre implementation of a vm_operations_struct::fault() method, called by
* VM to server page fault (both in kernel and user space).
*
- * \param vma - is virtiual area struct related to page fault
+ * \param vma - is virtual area struct related to page fault
* \param vmf - structure which describe type and address where hit fault
*
* \return allocated and filled _locked_ page for address
@@ -370,7 +370,7 @@ restart:
goto restart;
}
- result |= VM_FAULT_LOCKED;
+ result = VM_FAULT_LOCKED;
}
cfs_restore_sigs(set);
return result;
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 1767c741fb72..3580069789fc 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -167,10 +167,10 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
}
result = d_obtain_alias(inode);
- if (IS_ERR(result))
+ if (IS_ERR(result)) {
+ iput(inode);
return result;
-
- ll_dops_init(result, 1, 0);
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 5338e8d4c50f..f78eda235c7a 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -194,10 +194,10 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
struct cl_object *obj = ll_i2info(inode)->lli_clob;
pgoff_t offset;
int ret;
- int i;
int rw;
obd_count page_count = 0;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
struct bio *bio;
ssize_t bytes;
@@ -220,15 +220,15 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
for (bio = head; bio != NULL; bio = bio->bi_next) {
LASSERT(rw == bio->bi_rw);
- offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
- bio_for_each_segment(bvec, bio, i) {
- BUG_ON(bvec->bv_offset != 0);
- BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
+ offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
+ bio_for_each_segment(bvec, bio, iter) {
+ BUG_ON(bvec.bv_offset != 0);
+ BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
- pages[page_count] = bvec->bv_page;
+ pages[page_count] = bvec.bv_page;
offsets[page_count] = offset;
page_count++;
- offset += bvec->bv_len;
+ offset += bvec.bv_len;
}
LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
}
@@ -255,7 +255,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
* to store parity;
* 2. Reserve the # of (page_count * depth) cl_pages from the reserved
* pool. Afterwards, the clio would allocate the pages from reserved
- * pool, this guarantees we neeedn't allocate the cl_pages from
+ * pool, this guarantees we needn't allocate the cl_pages from
* generic cl_page slab cache.
* Of course, if there is NOT enough pages in the pool, we might
* be asked to write less pages once, this purely depends on
@@ -313,7 +313,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
bio = &lo->lo_bio;
while (*bio && (*bio)->bi_rw == rw) {
CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
- (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
+ (unsigned long long)(*bio)->bi_iter.bi_sector,
+ (*bio)->bi_iter.bi_size,
page_count, (*bio)->bi_vcnt);
if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
break;
@@ -324,7 +325,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
bio = &(*bio)->bi_next;
}
if (*bio) {
- /* Some of bios can't be mergable. */
+ /* Some of bios can't be mergeable. */
lo->lo_bio = *bio;
*bio = NULL;
} else {
@@ -347,7 +348,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
goto err;
CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
- (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+ (unsigned long long)old_bio->bi_iter.bi_sector,
+ old_bio->bi_iter.bi_size);
spin_lock_irq(&lo->lo_lock);
inactive = (lo->lo_state != LLOOP_BOUND);
@@ -367,7 +369,7 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
loop_add_bio(lo, old_bio);
return;
err:
- cfs_bio_io_error(old_bio, old_bio->bi_size);
+ cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
}
@@ -378,7 +380,7 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
while (bio) {
struct bio *tmp = bio->bi_next;
bio->bi_next = NULL;
- cfs_bio_endio(bio, bio->bi_size, ret);
+ cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
bio = tmp;
}
}
@@ -656,7 +658,7 @@ static struct block_device_operations lo_fops = {
* ll_iocontrol_call.
*
* This is a llite regular file ioctl function. It takes the responsibility
- * of attaching or detaching a file by a lloop's device numner.
+ * of attaching or detaching a file by a lloop's device number.
*/
static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
unsigned int cmd, unsigned long arg,
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index fc8d264f6c9a..25a6ea580f00 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -195,101 +195,107 @@ static void ll_invalidate_negative_children(struct inode *dir)
int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag)
{
- int rc;
struct lustre_handle lockh;
+ int rc;
switch (flag) {
case LDLM_CB_BLOCKING:
ldlm_lock2handle(lock, &lockh);
rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
if (rc < 0) {
- CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
+ CDEBUG(D_INODE, "ldlm_cli_cancel: rc = %d\n", rc);
return rc;
}
break;
case LDLM_CB_CANCELING: {
struct inode *inode = ll_inode_from_resource_lock(lock);
- struct ll_inode_info *lli;
__u64 bits = lock->l_policy_data.l_inodebits.bits;
- struct lu_fid *fid;
- ldlm_mode_t mode = lock->l_req_mode;
/* Inode is set to lock->l_resource->lr_lvb_inode
* for mdc - bug 24555 */
LASSERT(lock->l_ast_data == NULL);
- /* Invalidate all dentries associated with this inode */
if (inode == NULL)
break;
+ /* Invalidate all dentries associated with this inode */
LASSERT(lock->l_flags & LDLM_FL_CANCELING);
- if (bits & MDS_INODELOCK_XATTR)
+ if (!fid_res_name_eq(ll_inode2fid(inode),
+ &lock->l_resource->lr_name)) {
+ LDLM_ERROR(lock, "data mismatch with object "DFID"(%p)",
+ PFID(ll_inode2fid(inode)), inode);
+ LBUG();
+ }
+
+ if (bits & MDS_INODELOCK_XATTR) {
ll_xattr_cache_destroy(inode);
+ bits &= ~MDS_INODELOCK_XATTR;
+ }
/* For OPEN locks we differentiate between lock modes
* LCK_CR, LCK_CW, LCK_PR - bug 22891 */
- if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LAYOUT | MDS_INODELOCK_PERM))
- ll_have_md_lock(inode, &bits, LCK_MINMODE);
-
if (bits & MDS_INODELOCK_OPEN)
- ll_have_md_lock(inode, &bits, mode);
-
- fid = ll_inode2fid(inode);
- if (!fid_res_name_eq(fid, &lock->l_resource->lr_name))
- LDLM_ERROR(lock, "data mismatch with object "
- DFID" (%p)", PFID(fid), inode);
+ ll_have_md_lock(inode, &bits, lock->l_req_mode);
if (bits & MDS_INODELOCK_OPEN) {
- int flags = 0;
+ fmode_t fmode;
+
switch (lock->l_req_mode) {
case LCK_CW:
- flags = FMODE_WRITE;
+ fmode = FMODE_WRITE;
break;
case LCK_PR:
- flags = FMODE_EXEC;
+ fmode = FMODE_EXEC;
break;
case LCK_CR:
- flags = FMODE_READ;
+ fmode = FMODE_READ;
break;
default:
- CERROR("Unexpected lock mode for OPEN lock "
- "%d, inode %ld\n", lock->l_req_mode,
- inode->i_ino);
+ LDLM_ERROR(lock, "bad lock mode for OPEN lock");
+ LBUG();
}
- ll_md_real_close(inode, flags);
+
+ ll_md_real_close(inode, fmode);
}
- lli = ll_i2info(inode);
+ if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
+ MDS_INODELOCK_LAYOUT | MDS_INODELOCK_PERM))
+ ll_have_md_lock(inode, &bits, LCK_MINMODE);
+
if (bits & MDS_INODELOCK_LAYOUT) {
- struct cl_object_conf conf = { { 0 } };
+ struct cl_object_conf conf = {
+ .coc_opc = OBJECT_CONF_INVALIDATE,
+ .coc_inode = inode,
+ };
- conf.coc_opc = OBJECT_CONF_INVALIDATE;
- conf.coc_inode = inode;
rc = ll_layout_conf(inode, &conf);
- if (rc)
- CDEBUG(D_INODE, "invaliding layout %d.\n", rc);
+ if (rc < 0)
+ CDEBUG(D_INODE, "cannot invalidate layout of "
+ DFID": rc = %d\n",
+ PFID(ll_inode2fid(inode)), rc);
}
if (bits & MDS_INODELOCK_UPDATE) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+
spin_lock(&lli->lli_lock);
lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
spin_unlock(&lli->lli_lock);
}
- if (S_ISDIR(inode->i_mode) &&
- (bits & MDS_INODELOCK_UPDATE)) {
+ if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
CDEBUG(D_INODE, "invalidating inode %lu\n",
inode->i_ino);
truncate_inode_pages(inode->i_mapping, 0);
ll_invalidate_negative_children(inode);
}
- if (inode->i_sb->s_root &&
- inode != inode->i_sb->s_root->d_inode &&
- (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)))
+ if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
+ inode->i_sb->s_root != NULL &&
+ inode != inode->i_sb->s_root->d_inode)
ll_invalidate_aliases(inode);
+
iput(inode);
break;
}
@@ -400,11 +406,16 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
{
struct dentry *new;
+ int rc;
if (inode) {
new = ll_find_alias(inode, de);
if (new) {
- ll_dops_init(new, 1, 1);
+ rc = ll_d_init(new);
+ if (rc < 0) {
+ dput(new);
+ return ERR_PTR(rc);
+ }
d_move(new, de);
iput(inode);
CDEBUG(D_DENTRY,
@@ -413,8 +424,9 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
return new;
}
}
- ll_dops_init(de, 1, 1);
- __d_lustre_invalidate(de);
+ rc = ll_d_init(de);
+ if (rc < 0)
+ return ERR_PTR(rc);
d_add(de, inode);
CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
de, de->d_inode, d_count(de), de->d_flags);
@@ -453,10 +465,22 @@ int ll_lookup_it_finish(struct ptlrpc_request *request,
}
/* Only hash *de if it is unhashed (new dentry).
- * Atoimc_open may passin hashed dentries for open.
+ * Atoimc_open may passing hashed dentries for open.
*/
- if (d_unhashed(*de))
- *de = ll_splice_alias(inode, *de);
+ if (d_unhashed(*de)) {
+ struct dentry *alias;
+
+ alias = ll_splice_alias(inode, *de);
+ if (IS_ERR(alias))
+ return PTR_ERR(alias);
+ *de = alias;
+ } else if (!it_disposition(it, DISP_LOOKUP_NEG) &&
+ !it_disposition(it, DISP_OPEN_CREATE)) {
+ /* With DISP_OPEN_CREATE dentry will
+ instantiated in ll_create_it. */
+ LASSERT((*de)->d_inode == NULL);
+ d_instantiate(*de, inode);
+ }
if (!it_disposition(it, DISP_LOOKUP_NEG)) {
/* we have lookup look - unhide dentry */
@@ -505,16 +529,6 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
ll_frob_intent(&it, &lookup_it);
- /* As do_lookup is called before follow_mount, root dentry may be left
- * not valid, revalidate it here. */
- if (parent->i_sb->s_root && (parent->i_sb->s_root->d_inode == parent) &&
- (it->it_op & (IT_OPEN | IT_CREAT))) {
- rc = ll_inode_revalidate_it(parent->i_sb->s_root, it,
- MDS_INODELOCK_LOOKUP);
- if (rc)
- return ERR_PTR(rc);
- }
-
if (it->it_op == IT_GETATTR) {
rc = ll_statahead_enter(parent, &dentry, 0);
if (rc == 1) {
@@ -584,12 +598,8 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
parent->i_generation, parent, flags);
/* Optimize away (CREATE && !OPEN). Let .create handle the race. */
- if ((flags & LOOKUP_CREATE ) && !(flags & LOOKUP_OPEN)) {
- ll_dops_init(dentry, 1, 1);
- __d_lustre_invalidate(dentry);
- d_add(dentry, NULL);
+ if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN))
return NULL;
- }
if (flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE))
itp = NULL;
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index e9ba38a553cf..416f7a094a6d 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -135,7 +135,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
}
/*
- * Loop-back driver calls ->prepare_write() and ->sendfile()
+ * Loop-back driver calls ->prepare_write().
* methods directly, bypassing file system ->write() operation,
* so cl_io has to be created here.
*/
@@ -558,10 +558,10 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
* striped over, rather than having a constant value for all files here. */
/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
- * Temprarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
+ * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
* by default, this should be adjusted corresponding with max_read_ahead_mb
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used
- * up quickly which will affect read performance siginificantly. See LU-2816 */
+ * up quickly which will affect read performance significantly. See LU-2816 */
#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
static inline int stride_io_mode(struct ll_readahead_state *ras)
@@ -570,7 +570,7 @@ static inline int stride_io_mode(struct ll_readahead_state *ras)
}
/* The function calculates how much pages will be read in
* [off, off + length], in such stride IO area,
- * stride_offset = st_off, stride_lengh = st_len,
+ * stride_offset = st_off, stride_length = st_len,
* stride_pages = st_pgs
*
* |------------------|*****|------------------|*****|------------|*****|....
@@ -1090,7 +1090,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
ras_set_start(inode, ras, index);
if (stride_io_mode(ras))
- /* Since stride readahead is sentivite to the offset
+ /* Since stride readahead is sensitive to the offset
* of read-ahead, so we use original offset here,
* instead of ras_window_start, which is RPC aligned */
ras->ras_next_readahead = max(index, ras->ras_next_readahead);
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index f6b5f4b95f37..c8624b5a9875 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -577,7 +577,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
* Someone triggered glimpse within 1 sec before.
* 1) The former glimpse succeeded with glimpse lock granted by OST, and
* if the lock is still cached on client, AGL needs to do nothing. If
- * it is cancelled by other client, AGL maybe cannot obtaion new lock
+ * it is cancelled by other client, AGL maybe cannot obtain new lock
* for no glimpse callback triggered by AGL.
* 2) The former glimpse succeeded, but OST did not grant glimpse lock.
* Under such case, it is quite possible that the OST will not grant
@@ -877,9 +877,6 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
if (d_mountpoint(dentry))
return 1;
- if (unlikely(dentry == dentry->d_sb->s_root))
- return 1;
-
entry->se_inode = igrab(inode);
rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),NULL);
if (rc == 1) {
@@ -1588,8 +1585,15 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
ll_inode2fid(inode), &bits);
if (rc == 1) {
if ((*dentryp)->d_inode == NULL) {
- *dentryp = ll_splice_alias(inode,
+ struct dentry *alias;
+
+ alias = ll_splice_alias(inode,
*dentryp);
+ if (IS_ERR(alias)) {
+ ll_sai_unplug(sai, entry);
+ return PTR_ERR(alias);
+ }
+ *dentryp = alias;
} else if ((*dentryp)->d_inode != inode) {
/* revalidate, but inode is recreated */
CDEBUG(D_READA,
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 93cbfbb7e7f7..c7d70091246e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -51,7 +51,7 @@ static struct vvp_io *cl2vvp_io(const struct lu_env *env,
const struct cl_io_slice *slice);
/**
- * True, if \a io is a normal io, False for sendfile() / splice_{read|write}
+ * True, if \a io is a normal io, False for splice_{read,write}
*/
int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
{
@@ -474,20 +474,6 @@ static void vvp_io_setattr_fini(const struct lu_env *env,
vvp_io_fini(env, ios);
}
-static ssize_t lustre_generic_file_read(struct file *file,
- struct ccc_io *vio, loff_t *ppos)
-{
- return generic_file_aio_read(vio->cui_iocb, vio->cui_iov,
- vio->cui_nrsegs, *ppos);
-}
-
-static ssize_t lustre_generic_file_write(struct file *file,
- struct ccc_io *vio, loff_t *ppos)
-{
- return generic_file_aio_write(vio->cui_iocb, vio->cui_iov,
- vio->cui_nrsegs, *ppos);
-}
-
static int vvp_io_read_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
@@ -540,8 +526,11 @@ static int vvp_io_read_start(const struct lu_env *env,
file_accessed(file);
switch (vio->cui_io_subtype) {
case IO_NORMAL:
- result = lustre_generic_file_read(file, cio, &pos);
- break;
+ LASSERT(cio->cui_iocb->ki_pos == pos);
+ result = generic_file_aio_read(cio->cui_iocb,
+ cio->cui_iov, cio->cui_nrsegs,
+ cio->cui_iocb->ki_pos);
+ break;
case IO_SPLICE:
result = generic_file_splice_read(file, &pos,
vio->u.splice.cui_pipe, cnt,
@@ -586,7 +575,6 @@ static int vvp_io_write_start(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
struct inode *inode = ccc_object_inode(obj);
- struct file *file = cio->cui_fd->fd_file;
ssize_t result = 0;
loff_t pos = io->u.ci_wr.wr.crw_pos;
size_t cnt = io->u.ci_wr.wr.crw_count;
@@ -601,6 +589,8 @@ static int vvp_io_write_start(const struct lu_env *env,
*/
pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
cio->cui_iocb->ki_pos = pos;
+ } else {
+ LASSERT(cio->cui_iocb->ki_pos == pos);
}
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
@@ -608,8 +598,9 @@ static int vvp_io_write_start(const struct lu_env *env,
if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
result = 0;
else
- result = lustre_generic_file_write(file, cio, &pos);
-
+ result = generic_file_aio_write(cio->cui_iocb,
+ cio->cui_iov, cio->cui_nrsegs,
+ cio->cui_iocb->ki_pos);
if (result > 0) {
if (result < cnt)
io->ci_continue = 0;
@@ -655,7 +646,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
if (cfio->fault.ft_flags & VM_FAULT_RETRY)
return -EAGAIN;
- CERROR("unknow error in page fault %d!\n", cfio->fault.ft_flags);
+ CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags);
return -EINVAL;
}
@@ -1201,7 +1192,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
if (result == -ENOENT)
/* If the inode on MDS has been removed, but the objects
* on OSTs haven't been destroyed (async unlink), layout
- * fetch will return -ENOENT, we'd ingore this error
+ * fetch will return -ENOENT, we'd ignore this error
* and continue with dirty flush. LU-3230. */
result = 0;
if (result < 0)
@@ -1216,7 +1207,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
static struct vvp_io *cl2vvp_io(const struct lu_env *env,
const struct cl_io_slice *slice)
{
- /* Caling just for assertion */
+ /* Calling just for assertion */
cl2ccc_io(env, slice);
return vvp_env_io(env);
}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 3a7d03c12dd9..b1ed4d9ea6be 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -95,7 +95,7 @@ int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type)
if (xattr_type == XATTR_USER_T && !(sbi->ll_flags & LL_SBI_USER_XATTR))
return -EOPNOTSUPP;
- if (xattr_type == XATTR_TRUSTED_T && !cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (xattr_type == XATTR_TRUSTED_T && !capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
if (xattr_type == XATTR_OTHER_T)
return -EOPNOTSUPP;
@@ -183,17 +183,11 @@ int ll_setxattr_common(struct inode *inode, const char *name,
valid |= rce_ops2valid(rce->rce_ops);
}
#endif
- if (sbi->ll_xattr_cache_enabled &&
- (rce == NULL || rce->rce_ops == RMT_LSETFACL)) {
- rc = ll_xattr_cache_update(inode, name, pv, size, valid, flags);
- } else {
oc = ll_mdscapa_get(inode);
rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
valid, name, pv, size, 0, flags,
ll_i2suppgid(inode), &req);
capa_put(oc);
- }
-
#ifdef CONFIG_FS_POSIX_ACL
if (new_value != NULL)
lustre_posix_acl_xattr_free(new_value, size);
@@ -292,6 +286,7 @@ int ll_getxattr_common(struct inode *inode, const char *name,
void *xdata;
struct obd_capa *oc;
struct rmtacl_ctl_entry *rce = NULL;
+ struct ll_inode_info *lli = ll_i2info(inode);
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
inode->i_ino, inode->i_generation, inode);
@@ -339,7 +334,7 @@ int ll_getxattr_common(struct inode *inode, const char *name,
*/
if (xattr_type == XATTR_ACL_ACCESS_T &&
!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
- struct ll_inode_info *lli = ll_i2info(inode);
+
struct posix_acl *acl;
spin_lock(&lli->lli_lock);
@@ -358,13 +353,27 @@ int ll_getxattr_common(struct inode *inode, const char *name,
#endif
do_getxattr:
- if (sbi->ll_xattr_cache_enabled && (rce == NULL ||
- rce->rce_ops == RMT_LGETFACL ||
- rce->rce_ops == RMT_LSETFACL)) {
+ if (sbi->ll_xattr_cache_enabled && xattr_type != XATTR_ACL_ACCESS_T) {
rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
+ if (rc == -EAGAIN)
+ goto getxattr_nocache;
if (rc < 0)
GOTO(out_xattr, rc);
+
+ /* Add "system.posix_acl_access" to the list */
+ if (lli->lli_posix_acl != NULL && valid & OBD_MD_FLXATTRLS) {
+ if (size == 0) {
+ rc += sizeof(XATTR_NAME_ACL_ACCESS);
+ } else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) {
+ memcpy(buffer + rc, XATTR_NAME_ACL_ACCESS,
+ sizeof(XATTR_NAME_ACL_ACCESS));
+ rc += sizeof(XATTR_NAME_ACL_ACCESS);
+ } else {
+ GOTO(out_xattr, rc = -ERANGE);
+ }
+ }
} else {
+getxattr_nocache:
oc = ll_mdscapa_get(inode);
rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 3e3be1f13502..4defa2fd83b3 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -98,13 +98,13 @@ static int ll_xattr_cache_find(struct list_head *cache,
}
/**
- * This adds or updates an xattr.
+ * This adds an xattr.
*
* Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
- * if the attribute already exists, then update its value.
*
* \retval 0 success
* \retval -ENOMEM if no memory could be allocated for the cached attr
+ * \retval -EPROTO if duplicate xattr is being added
*/
static int ll_xattr_cache_add(struct list_head *cache,
const char *xattr_name,
@@ -116,27 +116,8 @@ static int ll_xattr_cache_add(struct list_head *cache,
if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
- /* Found a cached EA, update it */
-
- if (xattr_val_len != xattr->xe_vallen) {
- char *val;
- OBD_ALLOC(val, xattr_val_len);
- if (val == NULL) {
- CDEBUG(D_CACHE,
- "failed to allocate %u bytes for xattr %s update\n",
- xattr_val_len, xattr_name);
- return -ENOMEM;
- }
- OBD_FREE(xattr->xe_value, xattr->xe_vallen);
- xattr->xe_value = val;
- xattr->xe_vallen = xattr_val_len;
- }
- memcpy(xattr->xe_value, xattr_val, xattr_val_len);
-
- CDEBUG(D_CACHE, "update: [%s]=%.*s\n", xattr_name,
- xattr_val_len, xattr_val);
-
- return 0;
+ CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
+ return -EPROTO;
}
OBD_SLAB_ALLOC_PTR_GFP(xattr, xattr_kmem, __GFP_IO);
@@ -261,7 +242,7 @@ int ll_xattr_cache_valid(struct ll_inode_info *lli)
*
* Free all xattr memory. @lli is the inode info pointer.
*
- * \retval 0 no error occured
+ * \retval 0 no error occurred
*/
static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
{
@@ -292,14 +273,14 @@ int ll_xattr_cache_destroy(struct inode *inode)
}
/**
- * Match or enqueue a PR or PW LDLM lock.
+ * Match or enqueue a PR lock.
*
* Find or request an LDLM lock with xattr data.
* Since LDLM does not provide API for atomic match_or_enqueue,
* the function handles it with a separate enq lock.
* If successful, the function exits with the list lock held.
*
- * \retval 0 no error occured
+ * \retval 0 no error occurred
* \retval -ENOMEM not enough memory
*/
static int ll_xattr_find_get_lock(struct inode *inode,
@@ -322,9 +303,7 @@ static int ll_xattr_find_get_lock(struct inode *inode,
mutex_lock(&lli->lli_xattrs_enq_lock);
/* Try matching first. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
- oit->it_op == IT_SETXATTR ? LCK_PW :
- (LCK_PR | LCK_PW));
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0, LCK_PR);
if (mode != 0) {
/* fake oit in mdc_revalidate_lock() manner */
oit->d.lustre.it_lock_handle = lockh.cookie;
@@ -340,13 +319,7 @@ static int ll_xattr_find_get_lock(struct inode *inode,
return PTR_ERR(op_data);
}
- op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS |
- OBD_MD_FLXATTRLOCKED;
-#ifdef CONFIG_FS_POSIX_ACL
- /* If working with ACLs, we would like to cache local ACLs */
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- op_data->op_valid |= OBD_MD_FLRMTLGETFACL;
-#endif
+ op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
rc = md_enqueue(exp, &einfo, oit, op_data, &lockh, NULL, 0, NULL, 0);
ll_finish_md_op_data(op_data);
@@ -374,7 +347,7 @@ out:
* a read or a write xattr lock depending on operation in @oit.
* Intent is dropped on exit unless the operation is setxattr.
*
- * \retval 0 no error occured
+ * \retval 0 no error occurred
* \retval -EPROTO network protocol error
* \retval -ENOMEM not enough memory for the cache
*/
@@ -409,7 +382,11 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
if (oit->d.lustre.it_status < 0) {
CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
- GOTO(out_destroy, rc = oit->d.lustre.it_status);
+ rc = oit->d.lustre.it_status;
+ /* xattr data is so large that we don't want to cache it */
+ if (rc == -ERANGE)
+ rc = -EAGAIN;
+ GOTO(out_destroy, rc);
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
@@ -447,6 +424,11 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
rc = -EPROTO;
} else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
rc = -ENOMEM;
+ } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
+ /* Filter out ACL ACCESS since it's cached separately */
+ CDEBUG(D_CACHE, "not caching %s\n",
+ XATTR_NAME_ACL_ACCESS);
+ rc = 0;
} else {
rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
*xsizes);
@@ -467,8 +449,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
GOTO(out_maybe_drop, rc);
out_maybe_drop:
- /* drop lock on error or getxattr */
- if (rc != 0 || oit->it_op != IT_SETXATTR)
+
ll_intent_drop_lock(oit);
if (rc != 0)
@@ -496,7 +477,7 @@ out_destroy:
* The resulting value/list is stored in @buffer if the former
* is not larger than @size.
*
- * \retval 0 no error occured
+ * \retval 0 no error occurred
* \retval -EPROTO network protocol error
* \retval -ENOMEM not enough memory for the cache
* \retval -ERANGE the buffer is not large enough
@@ -553,65 +534,3 @@ out:
return rc;
}
-
-
-/**
- * Set/update an xattr value or remove xattr using the write-through cache.
- *
- * Set/update the xattr value (if @valid has OBD_MD_FLXATTR) of @name to @newval
- * or
- * remove the xattr @name (@valid has OBD_MD_FLXATTRRM set) from @inode.
- * @flags is either XATTR_CREATE or XATTR_REPLACE as defined by setxattr(2)
- *
- * \retval 0 no error occured
- * \retval -EPROTO network protocol error
- * \retval -ENOMEM not enough memory for the cache
- * \retval -ERANGE the buffer is not large enough
- * \retval -ENODATA no such attr (in the removal case)
- */
-int ll_xattr_cache_update(struct inode *inode,
- const char *name,
- const char *newval,
- size_t size,
- __u64 valid,
- int flags)
-{
- struct lookup_intent oit = { .it_op = IT_SETXATTR };
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_capa *oc;
- int rc;
-
-
-
- LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRRM));
-
- rc = ll_xattr_cache_refill(inode, &oit);
- if (rc)
- return rc;
-
- oc = ll_mdscapa_get(inode);
- rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
- valid | OBD_MD_FLXATTRLOCKED, name, newval,
- size, 0, flags, ll_i2suppgid(inode), &req);
- capa_put(oc);
-
- if (rc) {
- ll_intent_drop_lock(&oit);
- GOTO(out, rc);
- }
-
- if (valid & OBD_MD_FLXATTR)
- rc = ll_xattr_cache_add(&lli->lli_xattrs, name, newval, size);
- else if (valid & OBD_MD_FLXATTRRM)
- rc = ll_xattr_cache_del(&lli->lli_xattrs, name);
-
- ll_intent_drop_lock(&oit);
- GOTO(out, rc);
-out:
- up_write(&lli->lli_xattrs_list_rwsem);
- ptlrpc_req_finished(req);
-
- return rc;
-}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 56dedceaf0a0..9ba5a0a57390 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -119,7 +119,6 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
CDEBUG(D_INODE, "REMOTE_INTENT with fid="DFID" -> mds #%d\n",
PFID(&body->fid1), tgt->ltd_idx);
- it->d.lustre.it_disposition &= ~DISP_ENQ_COMPLETE;
rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it,
flags, &req, cb_blocking, extra_lock_flags);
if (rc)
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 1bddd8f62fbf..3ba0a0a1d945 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -1744,7 +1744,6 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
it->d.lustre.it_data = NULL;
fid1 = body->fid1;
- it->d.lustre.it_disposition &= ~DISP_ENQ_COMPLETE;
ptlrpc_req_finished(req);
tgt = lmv_find_target(lmv, &fid1);
@@ -2593,7 +2592,7 @@ int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
int lmv_set_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och,
- struct ptlrpc_request *open_req)
+ struct lookup_intent *it)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2603,7 +2602,7 @@ int lmv_set_open_replay_data(struct obd_export *exp,
if (IS_ERR(tgt))
return PTR_ERR(tgt);
- return md_set_open_replay_data(tgt->ltd_exp, och, open_req);
+ return md_set_open_replay_data(tgt->ltd_exp, och, it);
}
int lmv_clear_open_replay_data(struct obd_export *exp,
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index b355d01410e4..5d5c3081c467 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -87,8 +87,9 @@ static int lmv_placement_seq_show(struct seq_file *m, void *v)
#define MAX_POLICY_STRING_SIZE 64
-static ssize_t lmv_placement_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t lmv_placement_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
char dummy[MAX_POLICY_STRING_SIZE + 1];
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index e6c60151dc65..6f356e025543 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -98,7 +98,7 @@ struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size)
OBD_ALLOC_LARGE(lsm, *size);
if (!lsm)
- return NULL;;
+ return NULL;
for (i = 0; i < stripe_count; i++) {
OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, __GFP_IO);
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 5a6ab70ed0a1..65133ea308b6 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -194,6 +194,7 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
sub_io->ci_lockreq = io->ci_lockreq;
sub_io->ci_type = io->ci_type;
sub_io->ci_no_srvlock = io->ci_no_srvlock;
+ sub_io->ci_noatime = io->ci_noatime;
lov_sub_enter(sub);
result = cl_io_sub_init(sub->sub_env, sub_io,
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 50a77c5ef69a..02509d0cb106 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -339,7 +339,7 @@ static int lov_disconnect(struct obd_export *exp)
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
if (lov->lov_tgts[i] && lov->lov_tgts[i]->ltd_exp) {
/* Disconnection is the last we know about an obd */
- lov_del_target(obd, i, 0, lov->lov_tgts[i]->ltd_gen);
+ lov_del_target(obd, i, NULL, lov->lov_tgts[i]->ltd_gen);
}
}
obd_putref(obd);
@@ -644,7 +644,7 @@ out:
if (rc) {
CERROR("add failed (%d), deleting %s\n", rc,
obd_uuid2str(&tgt->ltd_uuid));
- lov_del_target(obd, index, 0, 0);
+ lov_del_target(obd, index, NULL, 0);
}
obd_putref(obd);
return rc;
@@ -768,7 +768,7 @@ void lov_fix_desc(struct lov_desc *desc)
int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
- struct lprocfs_static_vars lvars = { 0 };
+ struct lprocfs_static_vars lvars = { NULL };
struct lov_desc *desc;
struct lov_obd *lov = &obd->u.lov;
int rc;
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index df8b5b5b7cf4..d6b2cb45b938 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -122,8 +122,8 @@ static struct cl_object *lov_sub_find(const struct lu_env *env,
}
static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
- struct cl_object *stripe,
- struct lov_layout_raid0 *r0, int idx)
+ struct cl_object *stripe, struct lov_layout_raid0 *r0,
+ int idx)
{
struct cl_object_header *hdr;
struct cl_object_header *subhdr;
@@ -144,7 +144,6 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
hdr = cl_object_header(lov2cl(lov));
subhdr = cl_object_header(stripe);
- parent = subhdr->coh_parent;
oinfo = lov->lo_lsm->lsm_oinfo[idx];
CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: ostid: "DOSTID
@@ -153,8 +152,12 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi),
oinfo->loi_ost_idx, oinfo->loi_ost_gen);
+ /* reuse ->coh_attr_guard to protect coh_parent change */
+ spin_lock(&subhdr->coh_attr_guard);
+ parent = subhdr->coh_parent;
if (parent == NULL) {
subhdr->coh_parent = hdr;
+ spin_unlock(&subhdr->coh_attr_guard);
subhdr->coh_nesting = hdr->coh_nesting + 1;
lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
r0->lo_sub[idx] = cl2lovsub(stripe);
@@ -166,6 +169,7 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
struct lov_object *old_lov;
unsigned int mask = D_INODE;
+ spin_unlock(&subhdr->coh_attr_guard);
old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
LASSERT(old_obj != NULL);
old_lov = cl2lov(lu2cl(old_obj));
@@ -306,7 +310,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
* ->lo_sub[] slot in lovsub_object_fini() */
if (r0->lo_sub[idx] == los) {
waiter = &lov_env_info(env)->lti_waiter;
- init_waitqueue_entry_current(waiter);
+ init_waitqueue_entry(waiter, current);
add_wait_queue(&bkt->lsb_marche_funebre, waiter);
set_current_state(TASK_UNINTERRUPTIBLE);
while (1) {
@@ -316,7 +320,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
spin_lock(&r0->lo_sub_lock);
if (r0->lo_sub[idx] == los) {
spin_unlock(&r0->lo_sub_lock);
- waitq_wait(waiter, TASK_UNINTERRUPTIBLE);
+ schedule();
} else {
spin_unlock(&r0->lo_sub_lock);
set_current_state(TASK_RUNNING);
@@ -508,7 +512,7 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
return result;
}
-const static struct lov_layout_operations lov_dispatch[] = {
+static const struct lov_layout_operations lov_dispatch[] = {
[LLT_EMPTY] = {
.llo_init = lov_init_empty,
.llo_delete = lov_delete_empty,
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 27ed27e6fa6a..74200cf1b331 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -339,7 +339,8 @@ int lov_free_memmd(struct lov_stripe_md **lsmp)
*lsmp = NULL;
LASSERT(atomic_read(&lsm->lsm_refc) > 0);
- if ((refc = atomic_dec_return(&lsm->lsm_refc)) == 0) {
+ refc = atomic_dec_return(&lsm->lsm_refc);
+ if (refc == 0) {
LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index ca81cac9041c..a5481d7eb5d6 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -50,7 +50,7 @@ static void lov_init_set(struct lov_request_set *set)
atomic_set(&set->set_completes, 0);
atomic_set(&set->set_success, 0);
atomic_set(&set->set_finish_checked, 0);
- set->set_cookies = 0;
+ set->set_cookies = NULL;
INIT_LIST_HEAD(&set->set_list);
atomic_set(&set->set_refcount, 1);
init_waitqueue_head(&set->set_waitq);
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index 998ea1cbc7bb..926c35a25ceb 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -131,6 +131,10 @@ static struct lu_device *lovsub_device_free(const struct lu_env *env,
struct lovsub_device *lsd = lu2lovsub_dev(d);
struct lu_device *next = cl2lu_dev(lsd->acid_next);
+ if (atomic_read(&d->ld_ref) && d->ld_site) {
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
+ lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
+ }
cl_device_fini(lu2cl_dev(d));
OBD_FREE_PTR(lsd);
return next;
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
index 428ffd8c37b7..e44b7a532de7 100644
--- a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
@@ -55,7 +55,7 @@
struct lprocfs_stats *obd_memory = NULL;
EXPORT_SYMBOL(obd_memory);
-/* refine later and change to seqlock or simlar from libcfs */
+/* refine later and change to seqlock or similar from libcfs */
/* Debugging check only needed during development */
#ifdef OBD_CTXT_DEBUG
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index 506982996c0e..c78bf003c2c5 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -101,7 +101,7 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
struct lustre_handle *lockh, void *lmm, int lmmsize,
struct ptlrpc_request **req, __u64 extra_lock_flags);
-int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
+int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
struct list_head *cancels, ldlm_mode_t mode,
__u64 bits);
/* mdc/mdc_request.c */
@@ -122,7 +122,7 @@ int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md);
int mdc_set_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och,
- struct ptlrpc_request *open_req);
+ struct lookup_intent *it);
int mdc_clear_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index 91f6876dac3f..5b9f37141512 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -197,7 +197,7 @@ static __u64 mds_pack_open_flags(__u64 flags, __u32 mode)
if (flags & FMODE_EXEC)
cr_flags |= MDS_FMODE_EXEC;
#endif
- if (flags & O_LOV_DELAY_CREATE)
+ if (cl_is_lov_delay_create(flags))
cr_flags |= MDS_OPEN_DELAY_CREATE;
if (flags & O_NONBLOCK)
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index 8aa7c80c2002..53022ec390f0 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -37,15 +37,15 @@
#define DEBUG_SUBSYSTEM S_MDC
# include <linux/module.h>
-# include <linux/pagemap.h>
-# include <linux/miscdevice.h>
-#include <lustre_acl.h>
+#include <linux/lustre_intent.h>
+#include <obd.h>
#include <obd_class.h>
#include <lustre_dlm.h>
-/* fid_res_name_eq() */
-#include <lustre_fid.h>
-#include <lprocfs_status.h>
+#include <lustre_fid.h> /* fid_res_name_eq() */
+#include <lustre_mdc.h>
+#include <lustre_net.h>
+#include <lustre_req_layout.h>
#include "mdc_internal.h"
struct mdc_getattr_args {
@@ -121,7 +121,7 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
struct ldlm_lock *lock;
struct inode *new_inode = data;
- if(bits)
+ if (bits)
*bits = 0;
if (!*lockh)
@@ -160,6 +160,8 @@ ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
ldlm_mode_t rc;
fid_build_reg_res_name(fid, &res_id);
+ /* LU-4405: Clear bits not supported by server */
+ policy->l_inodebits.bits &= exp_connect_ibits(exp);
rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags,
&res_id, type, policy, mode, lockh, 0);
return rc;
@@ -194,7 +196,7 @@ int mdc_null_inode(struct obd_export *exp,
fid_build_reg_res_name(fid, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if(res == NULL)
+ if (res == NULL)
return 0;
lock_res(res);
@@ -334,9 +336,9 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
max(lmmsize, obddev->u.cli.cl_default_mds_easize));
rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
- if (rc) {
+ if (rc < 0) {
ptlrpc_request_free(req);
- return NULL;
+ return ERR_PTR(rc);
}
spin_lock(&req->rq_lock);
@@ -378,13 +380,6 @@ mdc_intent_getxattr_pack(struct obd_export *exp,
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
- if (it->it_op == IT_SETXATTR)
- /* If we want to upgrade to LCK_PW, let's cancel LCK_PR
- * locks now. This avoids unnecessary ASTs. */
- count = mdc_resource_get_unused(exp, &op_data->op_fid1,
- &cancels, LCK_PW,
- MDS_INODELOCK_XATTR);
-
rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
@@ -646,7 +641,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
* happens immediately after swabbing below, new reply
* is swabbed by that handler correctly.
*/
- mdc_set_open_replay_data(NULL, NULL, req);
+ mdc_set_open_replay_data(NULL, NULL, it);
}
if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
@@ -758,6 +753,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
/* install lvb_data */
lock_res_and_lock(lock);
if (lock->l_lvb_data == NULL) {
+ lock->l_lvb_type = LVB_T_LAYOUT;
lock->l_lvb_data = lmm;
lock->l_lvb_len = lvb_len;
lmm = NULL;
@@ -842,7 +838,7 @@ resend:
return -EOPNOTSUPP;
req = mdc_intent_layout_pack(exp, it, op_data);
lvb_type = LVB_T_LAYOUT;
- } else if (it->it_op & (IT_GETXATTR | IT_SETXATTR)) {
+ } else if (it->it_op & IT_GETXATTR) {
req = mdc_intent_getxattr_pack(exp, it, op_data);
} else {
LBUG();
@@ -880,7 +876,7 @@ resend:
rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, policy, &flags, NULL,
0, lvb_type, lockh, 0);
if (!it) {
- /* For flock requests we immediatelly return without further
+ /* For flock requests we immediately return without further
delay and let caller deal with the rest, since rest of
this function metadata processing makes no sense for flock
requests anyway. But in case of problem during comms with
@@ -973,7 +969,6 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
if (fid_is_sane(&op_data->op_fid2) &&
it->it_create_mode & M_CHECK_STALE &&
it->it_op != IT_GETATTR) {
- it_set_disposition(it, DISP_ENQ_COMPLETE);
/* Also: did we find the same inode? */
/* sever can return one of two fids:
@@ -1068,7 +1063,23 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
fid_build_reg_res_name(fid, &res_id);
switch (it->it_op) {
case IT_GETATTR:
- policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
+ /* File attributes are held under multiple bits:
+ * nlink is under lookup lock, size and times are
+ * under UPDATE lock and recently we've also got
+ * a separate permissions lock for owner/group/acl that
+ * were protected by lookup lock before.
+ * Getattr must provide all of that information,
+ * so we need to ensure we have all of those locks.
+ * Unfortunately, if the bits are split across multiple
+ * locks, there's no easy way to match all of them here,
+ * so an extra RPC would be performed to fetch all
+ * of those bits at once for now. */
+ /* For new MDTs(> 2.4), UPDATE|PERM should be enough,
+ * but for old MDTs (< 2.4), permission is covered
+ * by LOOKUP lock, so it needs to match all bits here.*/
+ policy.l_inodebits.bits = MDS_INODELOCK_UPDATE |
+ MDS_INODELOCK_LOOKUP |
+ MDS_INODELOCK_PERM;
break;
case IT_LAYOUT:
policy.l_inodebits.bits = MDS_INODELOCK_LAYOUT;
@@ -1077,10 +1088,11 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
policy.l_inodebits.bits = MDS_INODELOCK_LOOKUP;
break;
}
- mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
- LDLM_FL_BLOCK_GRANTED, &res_id,
+
+ mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED, fid,
LDLM_IBITS, &policy,
- LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh, 0);
+ LCK_CR | LCK_CW | LCK_PR | LCK_PW,
+ &lockh);
}
if (mode) {
@@ -1127,6 +1139,12 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
ldlm_blocking_callback cb_blocking,
__u64 extra_lock_flags)
{
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = it_to_lock_mode(it),
+ .ei_cb_bl = cb_blocking,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
struct lustre_handle lockh;
int rc = 0;
@@ -1152,42 +1170,19 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
- /* lookup_it may be called only after revalidate_it has run, because
- * revalidate_it cannot return errors, only zero. Returning zero causes
- * this call to lookup, which *can* return an error.
- *
- * We only want to execute the request associated with the intent one
- * time, however, so don't send the request again. Instead, skip past
- * this and use the request from revalidate. In this case, revalidate
- * never dropped its reference, so the refcounts are all OK */
- if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_IBITS,
- .ei_mode = it_to_lock_mode(it),
- .ei_cb_bl = cb_blocking,
- .ei_cb_cp = ldlm_completion_ast,
- };
-
- /* For case if upper layer did not alloc fid, do it now. */
- if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
- rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
- if (rc < 0) {
- CERROR("Can't alloc new fid, rc %d\n", rc);
- return rc;
- }
- }
- rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh,
- lmm, lmmsize, NULL, extra_lock_flags);
- if (rc < 0)
+ /* For case if upper layer did not alloc fid, do it now. */
+ if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
+ rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
+ if (rc < 0) {
+ CERROR("Can't alloc new fid, rc %d\n", rc);
return rc;
- } else if (!fid_is_sane(&op_data->op_fid2) ||
- !(it->it_create_mode & M_CHECK_STALE)) {
- /* DISP_ENQ_COMPLETE set means there is extra reference on
- * request referenced from this intent, saved for subsequent
- * lookup. This path is executed when we proceed to this
- * lookup, so we clear DISP_ENQ_COMPLETE */
- it_clear_disposition(it, DISP_ENQ_COMPLETE);
+ }
}
+ rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh, lmm, lmmsize, NULL,
+ extra_lock_flags);
+ if (rc < 0)
+ return rc;
+
*reqp = it->d.lustre.it_data;
rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh);
return rc;
@@ -1269,8 +1264,8 @@ int mdc_intent_getattr_async(struct obd_export *exp,
fid_build_reg_res_name(&op_data->op_fid1, &res_id);
req = mdc_intent_getattr_pack(exp, it, op_data);
- if (!req)
- return -ENOMEM;
+ if (IS_ERR(req))
+ return PTR_ERR(req);
rc = mdc_enter_request(&obddev->u.cli);
if (rc != 0) {
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index 9f3a345f34e0..d79aa1641fef 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -66,7 +66,7 @@ static int mdc_reint(struct ptlrpc_request *request,
/* Find and cancel locally locks matched by inode @bits & @mode in the resource
* found by @fid. Found locks are added into @cancel list. Returns the amount of
* locks added to @cancels list. */
-int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
+int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
struct list_head *cancels, ldlm_mode_t mode,
__u64 bits)
{
@@ -165,6 +165,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
req->rq_cb_data = *mod;
(*mod)->mod_open_req = req;
req->rq_commit_cb = mdc_commit_open;
+ (*mod)->mod_is_create = true;
/**
* Take an extra reference on \var mod, it protects \var
* mod from being freed on eviction (commit callback is
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index d1ad91c34ddc..bde9f93c149b 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -355,10 +355,32 @@ static int mdc_xattr_common(struct obd_export *exp,const struct req_format *fmt,
input_size);
}
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
+ /* Flush local XATTR locks to get rid of a possible cancel RPC */
+ if (opcode == MDS_REINT && fid_is_sane(fid) &&
+ exp->exp_connect_data.ocd_ibits_known & MDS_INODELOCK_XATTR) {
+ LIST_HEAD(cancels);
+ int count;
+
+ /* Without that packing would fail */
+ if (input_size == 0)
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
+ RCL_CLIENT, 0);
+
+ count = mdc_resource_get_unused(exp, fid,
+ &cancels, LCK_EX,
+ MDS_INODELOCK_XATTR);
+
+ rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+ } else {
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
}
if (opcode == MDS_REINT) {
@@ -700,11 +722,12 @@ void mdc_commit_open(struct ptlrpc_request *req)
int mdc_set_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och,
- struct ptlrpc_request *open_req)
+ struct lookup_intent *it)
{
struct md_open_data *mod;
struct mdt_rec_create *rec;
struct mdt_body *body;
+ struct ptlrpc_request *open_req = it->d.lustre.it_data;
struct obd_import *imp = open_req->rq_import;
if (!open_req->rq_replay)
@@ -738,6 +761,8 @@ int mdc_set_open_replay_data(struct obd_export *exp,
spin_lock(&open_req->rq_lock);
och->och_mod = mod;
mod->mod_och = och;
+ mod->mod_is_create = it_disposition(it, DISP_OPEN_CREATE) ||
+ it_disposition(it, DISP_OPEN_STRIPE);
mod->mod_open_req = open_req;
open_req->rq_cb_data = mod;
open_req->rq_commit_cb = mdc_commit_open;
@@ -758,6 +783,23 @@ int mdc_set_open_replay_data(struct obd_export *exp,
return 0;
}
+static void mdc_free_open(struct md_open_data *mod)
+{
+ int committed = 0;
+
+ if (mod->mod_is_create == 0 &&
+ imp_connect_disp_stripe(mod->mod_open_req->rq_import))
+ committed = 1;
+
+ LASSERT(mod->mod_open_req->rq_replay == 0);
+
+ DEBUG_REQ(D_RPCTRACE, mod->mod_open_req, "free open request\n");
+
+ ptlrpc_request_committed(mod->mod_open_req, committed);
+ if (mod->mod_close_req)
+ ptlrpc_request_committed(mod->mod_close_req, committed);
+}
+
int mdc_clear_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och)
{
@@ -771,6 +813,8 @@ int mdc_clear_open_replay_data(struct obd_export *exp,
return 0;
LASSERT(mod != LP_POISON);
+ LASSERT(mod->mod_open_req != NULL);
+ mdc_free_open(mod);
mod->mod_och = NULL;
och->och_mod = NULL;
@@ -969,6 +1013,9 @@ int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
if (mod) {
if (rc != 0)
mod->mod_close_req = NULL;
+ LASSERT(mod->mod_open_req != NULL);
+ mdc_free_open(mod);
+
/* Since now, mod is accessed through setattr req only,
* thus DW req does not keep a reference on mod anymore. */
obd_mod_put(mod);
@@ -1430,7 +1477,7 @@ static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
{
struct kuc_hdr *lh = (struct kuc_hdr *)buf;
- LASSERT(len <= CR_MAXSIZE);
+ LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
lh->kuc_magic = KUC_MAGIC;
lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
@@ -1503,7 +1550,7 @@ static int mdc_changelog_send_thread(void *csdata)
CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
cs->cs_fp, cs->cs_startrec);
- OBD_ALLOC(cs->cs_buf, CR_MAXSIZE);
+ OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
if (cs->cs_buf == NULL)
GOTO(out, rc = -ENOMEM);
@@ -1527,8 +1574,8 @@ static int mdc_changelog_send_thread(void *csdata)
rc = llog_cat_process(NULL, llh, changelog_kkuc_cb, cs, 0, 0);
/* Send EOF no matter what our result */
- if ((kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch),
- cs->cs_flags))) {
+ kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags);
+ if (kuch) {
kuch->kuc_msgtype = CL_EOF;
libcfs_kkuc_msg_put(cs->cs_fp, kuch);
}
@@ -1540,7 +1587,7 @@ out:
if (ctxt)
llog_ctxt_put(ctxt);
if (cs->cs_buf)
- OBD_FREE(cs->cs_buf, CR_MAXSIZE);
+ OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
OBD_FREE_PTR(cs);
return rc;
}
@@ -1650,11 +1697,16 @@ static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
if (rc)
CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
- if (req->rq_repmsg &&
- (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) {
- *oqctl = *oqc;
+ if (req->rq_repmsg) {
+ oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
+ if (oqc) {
+ *oqctl = *oqc;
+ } else if (!rc) {
+ CERROR ("Can't unpack obd_quotactl\n");
+ rc = -EPROTO;
+ }
} else if (!rc) {
- CERROR ("Can't unpack obd_quotactl\n");
+ CERROR("Can't unpack obd_quotactl\n");
rc = -EPROTO;
}
ptlrpc_req_finished(req);
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 3bdbb94e020f..de9fb1433edd 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -56,7 +56,7 @@ static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
{
__u64 resname = 0;
- if (len > 8) {
+ if (len > sizeof(resname)) {
CERROR("name too long: %s\n", name);
return -EINVAL;
}
@@ -76,6 +76,7 @@ static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
resname = 0;
break;
case CONFIG_T_RECOVER:
+ case CONFIG_T_PARAMS:
resname = type;
break;
default:
@@ -101,10 +102,13 @@ int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type)
int len;
/* logname consists of "fsname-nodetype".
- * e.g. "lustre-MDT0001", "SUN-000-client" */
+ * e.g. "lustre-MDT0001", "SUN-000-client"
+ * there is an exception: llog "params" */
name_end = strrchr(logname, '-');
- LASSERT(name_end);
- len = name_end - logname;
+ if (!name_end)
+ len = strlen(logname);
+ else
+ len = name_end - logname;
return mgc_name2resid(logname, len, res_id, type);
}
@@ -140,6 +144,8 @@ static void config_log_put(struct config_llog_data *cld)
config_log_put(cld->cld_recover);
if (cld->cld_sptlrpc)
config_log_put(cld->cld_sptlrpc);
+ if (cld->cld_params)
+ config_log_put(cld->cld_params);
if (cld_is_sptlrpc(cld))
sptlrpc_conf_log_stop(cld->cld_logname);
@@ -271,6 +277,19 @@ static struct config_llog_data *config_recover_log_add(struct obd_device *obd,
return cld;
}
+static struct config_llog_data *config_params_log_add(struct obd_device *obd,
+ struct config_llog_instance *cfg, struct super_block *sb)
+{
+ struct config_llog_instance lcfg = *cfg;
+ struct config_llog_data *cld;
+
+ lcfg.cfg_instance = sb;
+
+ cld = do_config_log_add(obd, PARAMS_FILENAME, CONFIG_T_PARAMS,
+ &lcfg, sb);
+
+ return cld;
+}
/** Add this log to the list of active logs watched by an MGC.
* Active means we're watching for updates.
@@ -284,8 +303,10 @@ static int config_log_add(struct obd_device *obd, char *logname,
struct lustre_sb_info *lsi = s2lsi(sb);
struct config_llog_data *cld;
struct config_llog_data *sptlrpc_cld;
- char seclogname[32];
- char *ptr;
+ struct config_llog_data *params_cld;
+ char seclogname[32];
+ char *ptr;
+ int rc;
CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance);
@@ -308,32 +329,49 @@ static int config_log_add(struct obd_device *obd, char *logname,
CONFIG_T_SPTLRPC, NULL, NULL);
if (IS_ERR(sptlrpc_cld)) {
CERROR("can't create sptlrpc log: %s\n", seclogname);
- return PTR_ERR(sptlrpc_cld);
+ GOTO(out_err, rc = PTR_ERR(sptlrpc_cld));
}
}
+ params_cld = config_params_log_add(obd, cfg, sb);
+ if (IS_ERR(params_cld)) {
+ rc = PTR_ERR(params_cld);
+ CERROR("%s: can't create params log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_err1, rc);
+ }
cld = do_config_log_add(obd, logname, CONFIG_T_CONFIG, cfg, sb);
if (IS_ERR(cld)) {
CERROR("can't create log: %s\n", logname);
- config_log_put(sptlrpc_cld);
- return PTR_ERR(cld);
+ GOTO(out_err2, rc = PTR_ERR(cld));
}
cld->cld_sptlrpc = sptlrpc_cld;
+ cld->cld_params = params_cld;
LASSERT(lsi->lsi_lmd);
if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) {
struct config_llog_data *recover_cld;
*strrchr(seclogname, '-') = 0;
recover_cld = config_recover_log_add(obd, seclogname, cfg, sb);
- if (IS_ERR(recover_cld)) {
- config_log_put(cld);
- return PTR_ERR(recover_cld);
- }
+ if (IS_ERR(recover_cld))
+ GOTO(out_err3, rc = PTR_ERR(recover_cld));
cld->cld_recover = recover_cld;
}
return 0;
+
+out_err3:
+ config_log_put(cld);
+
+out_err2:
+ config_log_put(params_cld);
+
+out_err1:
+ config_log_put(sptlrpc_cld);
+
+out_err:
+ return rc;
}
DEFINE_MUTEX(llog_process_lock);
@@ -344,6 +382,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
{
struct config_llog_data *cld;
struct config_llog_data *cld_sptlrpc = NULL;
+ struct config_llog_data *cld_params = NULL;
struct config_llog_data *cld_recover = NULL;
int rc = 0;
@@ -382,11 +421,20 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
spin_lock(&config_list_lock);
cld_sptlrpc = cld->cld_sptlrpc;
cld->cld_sptlrpc = NULL;
+ cld_params = cld->cld_params;
+ cld->cld_params = NULL;
spin_unlock(&config_list_lock);
if (cld_sptlrpc)
config_log_put(cld_sptlrpc);
+ if (cld_params) {
+ mutex_lock(&cld_params->cld_lock);
+ cld_params->cld_stopping = 1;
+ mutex_unlock(&cld_params->cld_lock);
+ config_log_put(cld_params);
+ }
+
/* drop the ref from the find */
config_log_put(cld);
/* drop the start ref */
@@ -1584,7 +1632,7 @@ static int mgc_llog_local_copy(const struct lu_env *env,
/*
* - copy it to backup using llog_backup()
* - copy remote llog to logname using llog_backup()
- * - if failed then move bakup to logname again
+ * - if failed then move backup to logname again
*/
OBD_ALLOC(temp_log, strlen(logname) + 1);
@@ -1664,7 +1712,7 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
LCONSOLE_ERROR_MSG(0x13a,
"Failed to get MGS log %s and no local copy.\n",
cld->cld_logname);
- GOTO(out_pop, rc = -ENOTCONN);
+ GOTO(out_pop, rc = -ENOENT);
}
CDEBUG(D_MGC,
"Failed to get MGS log %s, using local copy for now, will try to update later.\n",
@@ -1863,6 +1911,20 @@ static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf)
if (rc)
CERROR("Cannot process recover llog %d\n", rc);
}
+
+ if (rc == 0 && cld->cld_params != NULL) {
+ rc = mgc_process_log(obd, cld->cld_params);
+ if (rc == -ENOENT) {
+ CDEBUG(D_MGC,
+ "There is no params config file yet\n");
+ rc = 0;
+ }
+ /* params log is optional */
+ if (rc)
+ CERROR(
+ "%s: can't process params llog: rc = %d\n",
+ obd->obd_name, rc);
+ }
config_log_put(cld);
break;
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index e048500edd60..3bebc78e7673 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -942,7 +942,7 @@ int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
struct cl_page *page;
int result = 0;
- CERROR("Canceling ongoing page trasmission\n");
+ CERROR("Canceling ongoing page transmission\n");
cl_page_list_for_each(page, queue) {
int rc;
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index 749eb082f979..d795cef3f164 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -932,7 +932,7 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
* LU-305 */
blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
- init_waitqueue_entry_current(&waiter);
+ init_waitqueue_entry(&waiter, current);
add_wait_queue(&lock->cll_wq, &waiter);
set_current_state(TASK_INTERRUPTIBLE);
cl_lock_mutex_put(env, lock);
@@ -943,7 +943,7 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
* can be restarted if signals are pending here */
result = -ERESTARTSYS;
if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
- waitq_wait(&waiter, TASK_INTERRUPTIBLE);
+ schedule();
if (!cfs_signal_pending())
result = 0;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 1a926036724b..0fc256f59e92 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -508,7 +508,7 @@ EXPORT_SYMBOL(cl_site_stats_print);
* about journal_info. Currently following fields in task_struct are identified
* can be used for this purpose:
* - cl_env: for liblustre.
- * - tux_info: ony on RedHat kernel.
+ * - tux_info: only on RedHat kernel.
* - ...
* \note As long as we use task_struct to store cl_env, we assume that once
* called into Lustre, we'll never call into the other part of the kernel
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index d9f750d42c80..f2bdea33041d 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -1010,6 +1010,8 @@ struct obd_import *class_new_import(struct obd_device *obd)
INIT_LIST_HEAD(&imp->imp_replay_list);
INIT_LIST_HEAD(&imp->imp_sending_list);
INIT_LIST_HEAD(&imp->imp_delayed_list);
+ INIT_LIST_HEAD(&imp->imp_committed_list);
+ imp->imp_replay_cursor = &imp->imp_committed_list;
spin_lock_init(&imp->imp_lock);
imp->imp_last_success_conn = 0;
imp->imp_state = LUSTRE_IMP_NEW;
@@ -1532,8 +1534,8 @@ void obd_exports_barrier(struct obd_device *obd)
spin_lock(&obd->obd_dev_lock);
while (!list_empty(&obd->obd_unlinked_exports)) {
spin_unlock(&obd->obd_dev_lock);
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- cfs_time_seconds(waited));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(waited));
if (waited > 5 && IS_PO2(waited)) {
LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
"more than %d seconds. "
@@ -1625,7 +1627,7 @@ static int obd_zombie_impexp_check(void *arg)
}
/**
- * Add export to the obd_zombe thread and notify it.
+ * Add export to the obd_zombie thread and notify it.
*/
static void obd_zombie_export_add(struct obd_export *exp) {
spin_lock(&exp->exp_obd->obd_dev_lock);
@@ -1641,7 +1643,7 @@ static void obd_zombie_export_add(struct obd_export *exp) {
}
/**
- * Add import to the obd_zombe thread and notify it.
+ * Add import to the obd_zombie thread and notify it.
*/
static void obd_zombie_import_add(struct obd_import *imp) {
LASSERT(imp->imp_sec == NULL);
@@ -1661,7 +1663,7 @@ static void obd_zombie_import_add(struct obd_import *imp) {
static void obd_zombie_impexp_notify(void)
{
/*
- * Make sure obd_zomebie_impexp_thread get this notification.
+ * Make sure obd_zombie_impexp_thread get this notification.
* It is possible this signal only get by obd_zombie_barrier, and
* barrier gulps this notification and sleeps away and hangs ensues
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 121a856d5052..ba20776ebfa1 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -184,7 +184,7 @@ static long obd_class_ioctl(struct file *filp, unsigned int cmd,
int err = 0;
/* Allow non-root access for OBD_IOC_PING_TARGET - used by lfs check */
- if (!cfs_capable(CFS_CAP_SYS_ADMIN) && (cmd != OBD_IOC_PING_TARGET))
+ if (!capable(CFS_CAP_SYS_ADMIN) && (cmd != OBD_IOC_PING_TARGET))
return err = -EACCES;
if ((cmd & 0xffffff00) == ((int)'T') << 8) /* ignore all tty ioctls */
return err = -ENOTTY;
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index c0f3af725747..1d999310ec92 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -551,9 +551,8 @@ int llog_cat_process_cb(const struct lu_env *env, struct llog_handle *cat_llh,
if (rec->lrh_index < d->lpd_startcat)
/* Skip processing of the logs until startcat */
- return 0;
-
- if (d->lpd_startidx > 0) {
+ rc = 0;
+ else if (d->lpd_startidx > 0) {
struct llog_process_cat_data cd;
cd.lpcd_first_idx = d->lpd_startidx;
@@ -566,6 +565,7 @@ int llog_cat_process_cb(const struct lu_env *env, struct llog_handle *cat_llh,
rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
NULL, false);
}
+
llog_handle_put(llh);
return rc;
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c b/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c
index 5385d8e658cf..d86bb8c60354 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c
@@ -376,7 +376,7 @@ static void llog_skip_over(__u64 *off, int curr, int goal)
/* sets:
* - cur_offset to the furthest point read in the log file
- * - cur_idx to the log index preceeding cur_offset
+ * - cur_idx to the log index preceding cur_offset
* returns -EIO/-EINVAL on error
*/
static int llog_lvfs_next_block(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_osd.c b/drivers/staging/lustre/lustre/obdclass/llog_osd.c
index 654c8e189653..682279de8bea 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_osd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_osd.c
@@ -514,7 +514,7 @@ static void llog_skip_over(__u64 *off, int curr, int goal)
/* sets:
* - cur_offset to the furthest point read in the log file
- * - cur_idx to the log index preceeding cur_offset
+ * - cur_idx to the log index preceding cur_offset
* returns -EIO/-EINVAL on error
*/
static int llog_osd_next_block(const struct lu_env *env,
@@ -1073,7 +1073,7 @@ static int llog_osd_setup(const struct lu_env *env, struct obd_device *obd,
LASSERT(ctxt);
/* initialize data allowing to generate new fids,
- * literally we need a sequece */
+ * literally we need a sequence */
lgi->lgi_fid.f_seq = FID_SEQ_LLOG;
lgi->lgi_fid.f_oid = 1;
lgi->lgi_fid.f_ver = 0;
@@ -1280,7 +1280,7 @@ int llog_osd_put_cat_list(const struct lu_env *env, struct dt_device *d,
lgi->lgi_buf.lb_len = size;
rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
if (rc)
- CDEBUG(D_INODE, "error writeing CATALOGS: rc = %d\n", rc);
+ CDEBUG(D_INODE, "error writing CATALOGS: rc = %d\n", rc);
out_trans:
dt_trans_stop(env, d, th);
out:
diff --git a/drivers/staging/lustre/lustre/obdclass/local_storage.c b/drivers/staging/lustre/lustre/obdclass/local_storage.c
index e79e4beb3628..e76f7d044231 100644
--- a/drivers/staging/lustre/lustre/obdclass/local_storage.c
+++ b/drivers/staging/lustre/lustre/obdclass/local_storage.c
@@ -737,7 +737,7 @@ int lastid_compat_check(const struct lu_env *env, struct dt_device *dev,
* All dynamic fids will be generated with the same sequence and incremented
* OIDs
*
- * Returned local_oid_storage is in-memory representaion of OID storage
+ * Returned local_oid_storage is in-memory representation of OID storage
*/
int local_oid_storage_init(const struct lu_env *env, struct dt_device *dev,
const struct lu_fid *first_fid,
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index ec3b605dae6b..1432dd74fe95 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -98,6 +98,8 @@ static const char * const obd_connect_names[] = {
"lightweight_conn",
"short_io",
"pingless",
+ "flock_deadlock",
+ "disp_stripe",
"unknown",
NULL
};
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 9887d8fffb6e..92e8a15a5e5d 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -571,7 +571,7 @@ static struct lu_object *htable_lookup(struct lu_site *s,
* drained), and moreover, lookup has to wait until object is freed.
*/
- init_waitqueue_entry_current(waiter);
+ init_waitqueue_entry(waiter, current);
add_wait_queue(&bkt->lsb_marche_funebre, waiter);
set_current_state(TASK_UNINTERRUPTIBLE);
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
@@ -712,7 +712,7 @@ struct lu_object *lu_object_find_at(const struct lu_env *env,
* lu_object_find_try() already added waiter into the
* wait queue.
*/
- waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
+ schedule();
bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
}
@@ -890,10 +890,10 @@ static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
hash = fid_flatten32(fid);
hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
- hash = cfs_hash_long(hash, hs->hs_bkt_bits);
+ hash = hash_long(hash, hs->hs_bkt_bits);
/* give me another random factor */
- hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
+ hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
@@ -2100,7 +2100,7 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
EXPORT_SYMBOL(lu_object_assign_fid);
/**
- * allocates object with 0 (non-assiged) fid
+ * allocates object with 0 (non-assigned) fid
* XXX: temporary solution to be able to assign fid in ->do_create()
* till we have fully-functional OST fids
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/md_attrs.c b/drivers/staging/lustre/lustre/obdclass/md_attrs.c
index f7187829e276..f080cceb384c 100644
--- a/drivers/staging/lustre/lustre/obdclass/md_attrs.c
+++ b/drivers/staging/lustre/lustre/obdclass/md_attrs.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(lustre_lma_init);
*/
void lustre_lma_swab(struct lustre_mdt_attrs *lma)
{
- /* Use LUSTRE_MSG_MAGIC to detect local endianess. */
+ /* Use LUSTRE_MSG_MAGIC to detect local endianness. */
if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) {
__swab32s(&lma->lma_compat);
__swab32s(&lma->lma_incompat);
@@ -77,7 +77,7 @@ EXPORT_SYMBOL(lustre_lma_swab);
*/
void lustre_som_swab(struct som_attrs *attrs)
{
- /* Use LUSTRE_MSG_MAGIC to detect local endianess. */
+ /* Use LUSTRE_MSG_MAGIC to detect local endianness. */
if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) {
__swab32s(&attrs->som_compat);
__swab32s(&attrs->som_incompat);
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(lustre_buf2som);
*/
void lustre_hsm_swab(struct hsm_attrs *attrs)
{
- /* Use LUSTRE_MSG_MAGIC to detect local endianess. */
+ /* Use LUSTRE_MSG_MAGIC to detect local endianness. */
if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) {
__swab32s(&attrs->hsm_compat);
__swab32s(&attrs->hsm_flags);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 362ae541b209..2d5777699f47 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -61,7 +61,8 @@ int class_find_param(char *buf, char *key, char **valp)
if (!buf)
return 1;
- if ((ptr = strstr(buf, key)) == NULL)
+ ptr = strstr(buf, key);
+ if (ptr == NULL)
return 1;
if (valp)
@@ -592,7 +593,7 @@ int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg)
}
EXPORT_SYMBOL(class_detach);
-/** Start shutting down the obd. There may be in-progess ops when
+/** Start shutting down the obd. There may be in-progress ops when
* this is called. We tell them to start shutting down with a call
* to class_disconnect_exports().
*/
@@ -655,7 +656,7 @@ int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
/* The three references that should be remaining are the
* obd_self_export and the attach and setup references. */
if (atomic_read(&obd->obd_refcount) > 3) {
- /* refcounf - 3 might be the number of real exports
+ /* refcount - 3 might be the number of real exports
(excluding self export). But class_incref is called
by other things as well, so don't count on it. */
CDEBUG(D_IOCTL, "%s: forcing exports to disconnect: %d\n",
@@ -1027,6 +1028,46 @@ struct lustre_cfg *lustre_cfg_rename(struct lustre_cfg *cfg,
}
EXPORT_SYMBOL(lustre_cfg_rename);
+static int process_param2_config(struct lustre_cfg *lcfg)
+{
+ char *param = lustre_cfg_string(lcfg, 1);
+ char *upcall = lustre_cfg_string(lcfg, 2);
+ char *argv[] = {
+ [0] = "/usr/sbin/lctl",
+ [1] = "set_param",
+ [2] = param,
+ [3] = NULL
+ };
+ struct timeval start;
+ struct timeval end;
+ int rc;
+
+
+ /* Add upcall processing here. Now only lctl is supported */
+ if (strcmp(upcall, LCTL_UPCALL) != 0) {
+ CERROR("Unsupported upcall %s\n", upcall);
+ return -EINVAL;
+ }
+
+ do_gettimeofday(&start);
+ rc = USERMODEHELPER(argv[0], argv, NULL);
+ do_gettimeofday(&end);
+
+ if (rc < 0) {
+ CERROR(
+ "lctl: error invoking upcall %s %s %s: rc = %d; time %ldus\n",
+ argv[0], argv[1], argv[2], rc,
+ cfs_timeval_sub(&end, &start, NULL));
+ } else {
+ CDEBUG(D_HA, "lctl: invoked upcall %s %s %s, time %ldus\n",
+ argv[0], argv[1], argv[2],
+ cfs_timeval_sub(&end, &start, NULL));
+ rc = 0;
+ }
+
+ return rc;
+}
+
void lustre_register_quota_process_config(int (*qpc)(struct lustre_cfg *lcfg))
{
quota_process_config = qpc;
@@ -1142,11 +1183,14 @@ int class_process_config(struct lustre_cfg *lcfg)
err = (*quota_process_config)(lcfg);
GOTO(out, err);
}
- /* Fall through */
+
break;
}
+ case LCFG_SET_PARAM: {
+ err = process_param2_config(lcfg);
+ GOTO(out, 0);
+ }
}
-
/* Commands that require a device */
obd = class_name2obd(lustre_cfg_string(lcfg, 0));
if (obd == NULL) {
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index a69a630b596e..6f8ba54e9667 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -501,7 +501,7 @@ static int lustre_stop_mgc(struct super_block *sb)
}
/* The MGC has no recoverable data in any case.
- * force shotdown set in umount_begin */
+ * force shutdown set in umount_begin */
obd->obd_no_recov = 1;
if (obd->u.cli.cl_mgc_mgsexp) {
@@ -754,7 +754,7 @@ int server_name2index(const char *svname, __u32 *idx, const char **endptr)
}
EXPORT_SYMBOL(server_name2index);
-/*************** mount common betweeen server and client ***************/
+/*************** mount common between server and client ***************/
/* Common umount */
int lustre_common_put_super(struct super_block *sb)
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 70997648a4f3..3b1b28afd6b1 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -191,7 +191,7 @@ int obdo_cmp_md(struct obdo *dst, struct obdo *src, obd_flag compare)
}
if ( compare & OBD_MD_FLGENER )
res = (res || (dst->o_parent_oid != src->o_parent_oid));
- /* XXX Don't know if thses should be included here - wasn't previously
+ /* XXX Don't know if these should be included here - wasn't previously
if ( compare & OBD_MD_FLINLINE )
res = (res || memcmp(dst->o_inline, src->o_inline));
*/
@@ -233,7 +233,7 @@ void obdo_from_iattr(struct obdo *oa, struct iattr *attr, unsigned int ia_valid)
oa->o_mode = attr->ia_mode;
oa->o_valid |= OBD_MD_FLTYPE | OBD_MD_FLMODE;
if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
- !cfs_capable(CFS_CAP_FSETID))
+ !capable(CFS_CAP_FSETID))
oa->o_mode &= ~S_ISGID;
}
if (ia_valid & ATTR_UID) {
@@ -282,7 +282,7 @@ void iattr_from_obdo(struct iattr *attr, struct obdo *oa, obd_flag valid)
attr->ia_mode = (attr->ia_mode & S_IFMT)|(oa->o_mode & ~S_IFMT);
attr->ia_valid |= ATTR_MODE;
if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
- !cfs_capable(CFS_CAP_FSETID))
+ !capable(CFS_CAP_FSETID))
attr->ia_mode &= ~S_ISGID;
}
if (valid & OBD_MD_FLUID) {
diff --git a/drivers/staging/lustre/lustre/obdecho/echo.c b/drivers/staging/lustre/lustre/obdecho/echo.c
index debb9cec4900..96a807f82ec1 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo.c
@@ -606,7 +606,8 @@ static int echo_cleanup(struct obd_device *obd)
/* XXX Bug 3413; wait for a bit to ensure the BL callback has
* happened before calling ldlm_namespace_free() */
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE, cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
obd->obd_namespace = NULL;
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 9b2dea292363..754aa8e55665 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -997,8 +997,8 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
spin_unlock(&ec->ec_lock);
CERROR("echo_client still has objects at cleanup time, "
"wait for 1 second\n");
- schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
- cfs_time_seconds(1));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
lu_site_purge(env, &ed->ed_site->cs_lu, -1);
spin_lock(&ec->ec_lock);
}
@@ -2764,7 +2764,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
switch (cmd) {
case OBD_IOC_CREATE: /* may create echo object */
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1,
@@ -2778,7 +2778,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
int dirlen;
__u64 id;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO(out, rc = -EPERM);
count = data->ioc_count;
@@ -2806,7 +2806,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
__u64 seq;
int max_count;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO(out, rc = -EPERM);
cl_env = cl_env_get(&refcheck);
@@ -2838,7 +2838,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO(out, rc);
}
case OBD_IOC_DESTROY:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
rc = echo_get_object(&eco, ed, oa);
@@ -2863,7 +2863,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO(out, rc);
case OBD_IOC_SETATTR:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
rc = echo_get_object(&eco, ed, oa);
@@ -2878,7 +2878,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO(out, rc);
case OBD_IOC_BRW_WRITE:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
rw = OBD_BRW_WRITE;
@@ -2897,7 +2897,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO(out, rc);
case ECHO_IOC_SET_STRIPE:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
if (data->ioc_pbuf1 == NULL) { /* unset */
@@ -2914,7 +2914,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO (out, rc);
case ECHO_IOC_ENQUEUE:
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CFS_CAP_SYS_ADMIN))
GOTO (out, rc = -EPERM);
rc = echo_client_enqueue(exp, oa,
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index be4511e78c04..fe9989a49f81 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1020,7 +1020,7 @@ out:
/**
* This function is used to make the extent prepared for transfer.
- * A race with flusing page - ll_writepage() has to be handled cautiously.
+ * A race with flushing page - ll_writepage() has to be handled cautiously.
*/
static int osc_extent_make_ready(const struct lu_env *env,
struct osc_extent *ext)
@@ -2146,7 +2146,7 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
oap->oap_obj_off = offset;
LASSERT(!(offset & ~CFS_PAGE_MASK));
- if (!client_is_remote(exp) && cfs_capable(CFS_CAP_SYS_RESOURCE))
+ if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags = OBD_BRW_NOQUOTA;
INIT_LIST_HEAD(&oap->oap_pending_item);
@@ -2186,7 +2186,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
/* Set the OBD_BRW_SRVLOCK before the page is queued. */
brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
if (!client_is_remote(osc_export(osc)) &&
- cfs_capable(CFS_CAP_SYS_RESOURCE)) {
+ capable(CFS_CAP_SYS_RESOURCE)) {
brw_flags |= OBD_BRW_NOQUOTA;
cmd |= OBD_BRW_NOQUOTA;
}
@@ -2394,6 +2394,12 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
* really sending the RPC. */
case OES_TRUNC:
/* race with truncate, page will be redirtied */
+ case OES_ACTIVE:
+ /* The extent is active so we need to abort and let the caller
+ * re-dirty the page. If we continued on here, and we were the
+ * one making the extent active, we could deadlock waiting for
+ * the page writeback to clear but it won't because the extent
+ * is active and won't be written out. */
GOTO(out, rc = -EAGAIN);
default:
break;
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 681d60a7d5ef..5f3c545418d1 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -297,7 +297,7 @@ static int osc_io_commit_write(const struct lu_env *env,
*/
osc_page_touch(env, cl2osc_page(slice), to);
if (!client_is_remote(osc_export(obj)) &&
- cfs_capable(CFS_CAP_SYS_RESOURCE))
+ capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
if (oio->oi_lockless)
@@ -512,19 +512,15 @@ static int osc_io_read_start(const struct lu_env *env,
struct osc_io *oio = cl2osc_io(env, slice);
struct cl_object *obj = slice->cis_obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- int result = 0;
+ int rc = 0;
- if (oio->oi_lockless == 0) {
+ if (oio->oi_lockless == 0 && !slice->cis_io->ci_noatime) {
cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- if (result == 0) {
- attr->cat_atime = LTIME_S(CURRENT_TIME);
- result = cl_object_attr_set(env, obj, attr,
- CAT_ATIME);
- }
+ attr->cat_atime = LTIME_S(CURRENT_TIME);
+ rc = cl_object_attr_set(env, obj, attr, CAT_ATIME);
cl_object_attr_unlock(obj);
}
- return result;
+ return rc;
}
static int osc_io_write_start(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 4909e486dc5c..96cb6e2b9c4e 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -561,7 +561,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
if (!client_is_remote(osc_export(obj)) &&
- cfs_capable(CFS_CAP_SYS_RESOURCE)) {
+ capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
oap->oap_cmd |= OBD_BRW_NOQUOTA;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index 6045a78a2baa..0235fabaaffe 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -51,11 +51,8 @@ int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
if (oqi) {
- obd_uid id = oqi->oqi_id;
-
- LASSERTF(id == qid[type],
- "The ids don't match %u != %u\n",
- id, qid[type]);
+ /* do not try to access oqi here, it could have been
+ * freed by osc_quota_setdq() */
/* the slot is busy, the user is about to run out of
* quota space on this OST */
@@ -268,11 +265,16 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
if (rc)
CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
- if (req->rq_repmsg &&
- (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) {
- *oqctl = *oqc;
+ if (req->rq_repmsg) {
+ oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
+ if (oqc) {
+ *oqctl = *oqc;
+ } else if (!rc) {
+ CERROR("Can't unpack obd_quotactl\n");
+ rc = -EPROTO;
+ }
} else if (!rc) {
- CERROR ("Can't unpack obd_quotactl\n");
+ CERROR("Can't unpack obd_quotactl\n");
rc = -EPROTO;
}
ptlrpc_req_finished(req);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index d90efe408414..4c9e00695087 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -48,6 +48,7 @@
#include "ptlrpc_internal.h"
static int ptlrpc_send_new_req(struct ptlrpc_request *req);
+static int ptlrpcd_check_work(struct ptlrpc_request *req);
/**
* Initialize passed in client structure \a cl.
@@ -62,7 +63,7 @@ void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
EXPORT_SYMBOL(ptlrpc_init_client);
/**
- * Return PortalRPC connection for remore uud \a uuid
+ * Return PortalRPC connection for remote uud \a uuid
*/
struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
{
@@ -127,7 +128,7 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
* Prepare bulk descriptor for specified outgoing request \a req that
* can fit \a npages * pages. \a type is bulk type. \a portal is where
* the bulk to be sent. Used on client-side.
- * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
+ * Returns pointer to newly allocated initialized bulk descriptor or NULL on
* error.
*/
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
@@ -631,7 +632,7 @@ int ptlrpc_request_pack(struct ptlrpc_request *request,
/* For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
* ptlrpc_body sent from server equal to local ptlrpc_body size, so we
- * have to send old ptlrpc_body to keep interoprability with these
+ * have to send old ptlrpc_body to keep interoperability with these
* clients.
*
* Only three kinds of server->client RPCs so far:
@@ -639,7 +640,7 @@ int ptlrpc_request_pack(struct ptlrpc_request *request,
* - LDLM_CP_CALLBACK
* - LDLM_GL_CALLBACK
*
- * XXX This should be removed whenever we drop the interoprability with
+ * XXX This should be removed whenever we drop the interoperability with
* the these old clients.
*/
if (opcode == LDLM_BL_CALLBACK || opcode == LDLM_CP_CALLBACK ||
@@ -686,7 +687,7 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
/**
* Helper function for creating a request.
- * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
+ * Calls __ptlrpc_request_alloc to allocate new request structure and inits
* buffer structures according to capsule template \a format.
* Returns allocated request structure pointer or NULL on error.
*/
@@ -743,7 +744,7 @@ void ptlrpc_request_free(struct ptlrpc_request *request)
EXPORT_SYMBOL(ptlrpc_request_free);
/**
- * Allocate new request for operatione \a opcode and immediatelly pack it for
+ * Allocate new request for operation \a opcode and immediately pack it for
* network transfer.
* Only used for simple requests like OBD_PING where the only important
* part of the request is operation itself.
@@ -768,7 +769,7 @@ struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
/**
- * Prepare request (fetched from pool \a poolif not NULL) on import \a imp
+ * Prepare request (fetched from pool \a pool if not NULL) on import \a imp
* for operation \a opcode. Request would contain \a count buffers.
* Sizes of buffers are described in array \a lengths and buffers themselves
* are provided by a pointer \a bufs.
@@ -1073,7 +1074,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
}
/**
- * Decide if the eror message regarding provided request \a req
+ * Decide if the error message regarding provided request \a req
* should be printed to the console or not.
* Makes it's decision on request status and other properties.
* Returns 1 to print error on the system console or 0 if not.
@@ -1159,7 +1160,7 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req)
/**
* Callback function called when client receives RPC reply for \a req.
* Returns 0 on success or error code.
- * The return alue would be assigned to req->rq_status by the caller
+ * The return value would be assigned to req->rq_status by the caller
* as request processing status.
* This function also decides if the request needs to be saved for later replay.
*/
@@ -1190,7 +1191,9 @@ static int after_reply(struct ptlrpc_request *req)
* will roundup it */
req->rq_replen = req->rq_nob_received;
req->rq_nob_received = 0;
+ spin_lock(&req->rq_lock);
req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
return 0;
}
@@ -1313,7 +1316,11 @@ static int after_reply(struct ptlrpc_request *req)
/** version recovery */
ptlrpc_save_versions(req);
ptlrpc_retain_replayable_request(req, imp);
- } else if (req->rq_commit_cb != NULL) {
+ } else if (req->rq_commit_cb != NULL &&
+ list_empty(&req->rq_replay_list)) {
+ /* NB: don't call rq_commit_cb if it's already on
+ * rq_replay_list, ptlrpc_free_committed() will call
+ * it later, see LU-3618 for details */
spin_unlock(&imp->imp_lock);
req->rq_commit_cb(req);
spin_lock(&imp->imp_lock);
@@ -1408,7 +1415,9 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
req->rq_status = rc;
return 1;
} else {
+ spin_lock(&req->rq_lock);
req->rq_wait_ctx = 1;
+ spin_unlock(&req->rq_lock);
return 0;
}
}
@@ -1423,7 +1432,9 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
rc = ptl_send_rpc(req, 0);
if (rc) {
DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
+ spin_lock(&req->rq_lock);
req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
return rc;
}
return 0;
@@ -1688,6 +1699,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
spin_lock(&req->rq_lock);
req->rq_net_err = 1;
spin_unlock(&req->rq_lock);
+ continue;
}
/* need to reset the timeout */
force_timer_recalc = 1;
@@ -1773,6 +1785,10 @@ interpret:
ptlrpc_req_interpret(env, req, req->rq_status);
+ if (ptlrpcd_check_work(req)) {
+ atomic_dec(&set->set_remaining);
+ continue;
+ }
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
@@ -2031,7 +2047,7 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
EXPORT_SYMBOL(ptlrpc_set_next_timeout);
/**
- * Send all unset request from the set and then wait untill all
+ * Send all unset request from the set and then wait until all
* requests in the set complete (either get a reply, timeout, get an
* error or otherwise be interrupted).
* Returns 0 on success or error code otherwise.
@@ -2156,7 +2172,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
EXPORT_SYMBOL(ptlrpc_set_wait);
/**
- * Helper fuction for request freeing.
+ * Helper function for request freeing.
* Called when request count reached zero and request needs to be freed.
* Removes request from all sorts of sending/replay lists it might be on,
* frees network buffers if any are present.
@@ -2223,7 +2239,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
/**
* Drop one request reference. Must be called with import imp_lock held.
- * When reference count drops to zero, reuqest is freed.
+ * When reference count drops to zero, request is freed.
*/
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
{
@@ -2236,7 +2252,7 @@ EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
* Helper function
* Drops one reference count for request \a request.
* \a locked set indicates that caller holds import imp_lock.
- * Frees the request whe reference count reaches zero.
+ * Frees the request when reference count reaches zero.
*/
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
{
@@ -2360,19 +2376,52 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
}
EXPORT_SYMBOL(ptlrpc_unregister_reply);
+static void ptlrpc_free_request(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_lock);
+ req->rq_replay = 0;
+ spin_unlock(&req->rq_lock);
+
+ if (req->rq_commit_cb != NULL)
+ req->rq_commit_cb(req);
+ list_del_init(&req->rq_replay_list);
+
+ __ptlrpc_req_finished(req, 1);
+}
+
+/**
+ * the request is committed and dropped from the replay list of its import
+ */
+void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
+{
+ struct obd_import *imp = req->rq_import;
+
+ spin_lock(&imp->imp_lock);
+ if (list_empty(&req->rq_replay_list)) {
+ spin_unlock(&imp->imp_lock);
+ return;
+ }
+
+ if (force || req->rq_transno <= imp->imp_peer_committed_transno)
+ ptlrpc_free_request(req);
+
+ spin_unlock(&imp->imp_lock);
+}
+EXPORT_SYMBOL(ptlrpc_request_committed);
+
/**
* Iterates through replay_list on import and prunes
* all requests have transno smaller than last_committed for the
* import and don't have rq_replay set.
- * Since requests are sorted in transno order, stops when meetign first
+ * Since requests are sorted in transno order, stops when meeting first
* transno bigger than last_committed.
* caller must hold imp->imp_lock
*/
void ptlrpc_free_committed(struct obd_import *imp)
{
- struct list_head *tmp, *saved;
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req, *saved;
struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
+ bool skip_committed_list = true;
LASSERT(imp != NULL);
@@ -2388,13 +2437,15 @@ void ptlrpc_free_committed(struct obd_import *imp)
CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
imp->imp_generation);
+
+ if (imp->imp_generation != imp->imp_last_generation_checked)
+ skip_committed_list = false;
+
imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
imp->imp_last_generation_checked = imp->imp_generation;
- list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
- req = list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
-
+ list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
+ rq_replay_list) {
/* XXX ok to remove when 1357 resolved - rread 05/29/03 */
LASSERT(req != last_req);
last_req = req;
@@ -2408,27 +2459,34 @@ void ptlrpc_free_committed(struct obd_import *imp)
GOTO(free_req, 0);
}
- if (req->rq_replay) {
- DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
- continue;
- }
-
/* not yet committed */
if (req->rq_transno > imp->imp_peer_committed_transno) {
DEBUG_REQ(D_RPCTRACE, req, "stopping search");
break;
}
+ if (req->rq_replay) {
+ DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
+ list_move_tail(&req->rq_replay_list,
+ &imp->imp_committed_list);
+ continue;
+ }
+
DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
imp->imp_peer_committed_transno);
free_req:
- spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- spin_unlock(&req->rq_lock);
- if (req->rq_commit_cb != NULL)
- req->rq_commit_cb(req);
- list_del_init(&req->rq_replay_list);
- __ptlrpc_req_finished(req, 1);
+ ptlrpc_free_request(req);
+ }
+ if (skip_committed_list)
+ return;
+
+ list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
+ rq_replay_list) {
+ LASSERT(req->rq_transno != 0);
+ if (req->rq_import_generation < imp->imp_generation) {
+ DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
+ ptlrpc_free_request(req);
+ }
}
}
@@ -2585,7 +2643,7 @@ struct ptlrpc_replay_async_args {
/**
* Callback used for replayed requests reply processing.
- * In case of succesful reply calls registeresd request replay callback.
+ * In case of successful reply calls registered request replay callback.
* In case of error restart replay process.
*/
static int ptlrpc_replay_interpret(const struct lu_env *env,
@@ -2834,7 +2892,7 @@ void ptlrpc_init_xid(void)
ptlrpc_last_xid = (__u64)now << 20;
}
- /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
+ /* Always need to be aligned to a power-of-two for multi-bulk BRW */
CLASSERT((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) == 0);
ptlrpc_last_xid &= PTLRPC_BULK_OPS_MASK;
}
@@ -2904,22 +2962,50 @@ EXPORT_SYMBOL(ptlrpc_sample_next_xid);
* have delay before it really runs by ptlrpcd thread.
*/
struct ptlrpc_work_async_args {
- __u64 magic;
int (*cb)(const struct lu_env *, void *);
void *cbdata;
};
-#define PTLRPC_WORK_MAGIC 0x6655436b676f4f44ULL /* magic code */
+static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
+{
+ /* re-initialize the req */
+ req->rq_timeout = obd_timeout;
+ req->rq_sent = cfs_time_current_sec();
+ req->rq_deadline = req->rq_sent + req->rq_timeout;
+ req->rq_reply_deadline = req->rq_deadline;
+ req->rq_phase = RQ_PHASE_INTERPRET;
+ req->rq_next_phase = RQ_PHASE_COMPLETE;
+ req->rq_xid = ptlrpc_next_xid();
+ req->rq_import_generation = req->rq_import->imp_generation;
+
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+}
static int work_interpreter(const struct lu_env *env,
struct ptlrpc_request *req, void *data, int rc)
{
struct ptlrpc_work_async_args *arg = data;
- LASSERT(arg->magic == PTLRPC_WORK_MAGIC);
+ LASSERT(ptlrpcd_check_work(req));
LASSERT(arg->cb != NULL);
- return arg->cb(env, arg->cbdata);
+ rc = arg->cb(env, arg->cbdata);
+
+ list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
+
+ if (atomic_dec_return(&req->rq_refcount) > 1) {
+ atomic_set(&req->rq_refcount, 2);
+ ptlrpcd_add_work_req(req);
+ }
+ return rc;
+}
+
+static int worker_format;
+
+static int ptlrpcd_check_work(struct ptlrpc_request *req)
+{
+ return req->rq_pill.rc_fmt == (void *)&worker_format;
}
/**
@@ -2952,6 +3038,7 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
req->rq_receiving_reply = 0;
req->rq_must_unlink = 0;
req->rq_no_delay = req->rq_no_resend = 1;
+ req->rq_pill.rc_fmt = (void *)&worker_format;
spin_lock_init(&req->rq_lock);
INIT_LIST_HEAD(&req->rq_list);
@@ -2965,7 +3052,6 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
args = ptlrpc_req_async_args(req);
- args->magic = PTLRPC_WORK_MAGIC;
args->cb = cb;
args->cbdata = cbdata;
@@ -2995,25 +3081,8 @@ int ptlrpcd_queue_work(void *handler)
* req as opaque data. - Jinshan
*/
LASSERT(atomic_read(&req->rq_refcount) > 0);
- if (atomic_read(&req->rq_refcount) > 1)
- return -EBUSY;
-
- if (atomic_inc_return(&req->rq_refcount) > 2) { /* race */
- atomic_dec(&req->rq_refcount);
- return -EBUSY;
- }
-
- /* re-initialize the req */
- req->rq_timeout = obd_timeout;
- req->rq_sent = cfs_time_current_sec();
- req->rq_deadline = req->rq_sent + req->rq_timeout;
- req->rq_reply_deadline = req->rq_deadline;
- req->rq_phase = RQ_PHASE_INTERPRET;
- req->rq_next_phase = RQ_PHASE_COMPLETE;
- req->rq_xid = ptlrpc_next_xid();
- req->rq_import_generation = req->rq_import->imp_generation;
-
- ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ if (atomic_inc_return(&req->rq_refcount) == 2)
+ ptlrpcd_add_work_req(req);
return 0;
}
EXPORT_SYMBOL(ptlrpcd_queue_work);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index f66cfea87acf..6ea0a491cfb3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -545,7 +545,7 @@ int ptlrpc_ni_init(void)
* different depending on... */
/* kernel LNet calls our master callback when there are new event,
* because we are guaranteed to get every event via callback,
- * so we just set EQ size to 0 to avoid overhread of serializing
+ * so we just set EQ size to 0 to avoid overhead of serializing
* enqueue/dequeue operations in LNet. */
rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
if (rc == 0)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c
index 55247af3910e..c279edf5b2a5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c
@@ -336,7 +336,7 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
if (rc) {
/* If any _real_ denial be made, we expect server return
* -EACCES reply or return success but indicate gss error
- * inside reply messsage. All other errors are treated as
+ * inside reply message. All other errors are treated as
* timeout, caller might try the negotiation repeatedly,
* leave recovery decisions to general ptlrpc layer.
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
index 56c28286c9c1..8dc5c724958d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
@@ -144,7 +144,8 @@ int der_read_length(unsigned char **buf, int *bufsize)
sf = *(*buf)++;
(*bufsize)--;
if (sf & 0x80) {
- if ((sf &= 0x7f) > ((*bufsize) - 1))
+ sf &= 0x7f;
+ if (((*bufsize) - 1) < sf)
return -1;
if (sf > SIZEOF_INT)
return -1;
@@ -199,27 +200,32 @@ __u32 g_verify_token_header(rawobj_t *mech, int *body_size,
rawobj_t toid;
int ret = 0;
- if ((toksize -= 1) < 0)
+ toksize -= 1;
+ if (0 > toksize)
return (G_BAD_TOK_HEADER);
if (*buf++ != 0x60)
return (G_BAD_TOK_HEADER);
- if ((seqsize = der_read_length(&buf, &toksize)) < 0)
+ seqsize = der_read_length(&buf, &toksize);
+ if (seqsize < 0)
return(G_BAD_TOK_HEADER);
if (seqsize != toksize)
return (G_BAD_TOK_HEADER);
- if ((toksize -= 1) < 0)
+ toksize -= 1;
+ if (0 > toksize)
return (G_BAD_TOK_HEADER);
if (*buf++ != 0x06)
return (G_BAD_TOK_HEADER);
- if ((toksize -= 1) < 0)
+ toksize -= 1;
+ if (0 > toksize)
return (G_BAD_TOK_HEADER);
toid.len = *buf++;
- if ((toksize -= toid.len) < 0)
+ toksize -= toid.len;
+ if (0 > toksize)
return (G_BAD_TOK_HEADER);
toid.data = buf;
buf += toid.len;
@@ -231,7 +237,8 @@ __u32 g_verify_token_header(rawobj_t *mech, int *body_size,
* important to return G_BAD_TOK_HEADER if the token header is
* in fact bad
*/
- if ((toksize -= 2) < 0)
+ toksize -= 2;
+ if (0 > toksize)
return (G_BAD_TOK_HEADER);
if (ret)
@@ -256,24 +263,29 @@ __u32 g_get_mech_oid(rawobj_t *mech, rawobj_t *in_buf)
int ret = 0;
int seqsize;
- if ((len -= 1) < 0)
+ len -= 1;
+ if (0 > len)
return (G_BAD_TOK_HEADER);
if (*buf++ != 0x60)
return (G_BAD_TOK_HEADER);
- if ((seqsize = der_read_length(&buf, &len)) < 0)
+ seqsize = der_read_length(&buf, &len);
+ if (seqsize < 0)
return (G_BAD_TOK_HEADER);
- if ((len -= 1) < 0)
+ len -= 1;
+ if (0 > len)
return (G_BAD_TOK_HEADER);
if (*buf++ != 0x06)
return (G_BAD_TOK_HEADER);
- if ((len -= 1) < 0)
+ len -= 1;
+ if (0 > len)
return (G_BAD_TOK_HEADER);
mech->len = *buf++;
- if ((len -= mech->len) < 0)
+ len -= mech->len;
+ if (0 > len)
return (G_BAD_TOK_HEADER);
OBD_ALLOC_LARGE(mech->data, mech->len);
if (!mech->data)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
index d43a13c69669..4642bbfb9273 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
@@ -1176,7 +1176,7 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
/*
* called with key semaphore write locked. it means we can operate
- * on the context without fear of loosing refcount.
+ * on the context without fear of losing refcount.
*/
static
int gss_kt_update(struct key *key, const void *data, size_t datalen)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
index b9fa3b4a40db..d03f6c114171 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
@@ -54,7 +54,6 @@
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/mutex.h>
-#include <linux/crypto.h>
#include <obd.h>
#include <obd_class.h>
@@ -679,7 +678,8 @@ __s32 krb5_make_checksum(__u32 enctype,
__u32 code = GSS_S_FAILURE;
int rc;
- if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
+ tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0);
+ if (!tfm) {
CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
return GSS_S_FAILURE;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
index c624518c181a..7a1ff4fecbe3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
@@ -85,7 +85,7 @@ static void gss_sec_pipe_upcall_fini(struct gss_sec *gsec)
}
/****************************************
- * internel context helpers *
+ * internal context helpers *
****************************************/
static
@@ -652,7 +652,7 @@ __u32 mech_name2idx(const char *name)
/* pipefs dentries for each mechanisms */
static struct dentry *de_pipes[MECH_MAX] = { NULL, };
-/* all upcall messgaes linked here */
+/* all upcall messages linked here */
static struct list_head upcall_lists[MECH_MAX];
/* and protected by this */
static spinlock_t upcall_locks[MECH_MAX];
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
index 5b5365b4629f..359c48ec2f5b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
@@ -102,7 +102,7 @@ static inline unsigned long hash_mem(char *buf, int length, int bits)
len++;
if ((len & (BITS_PER_LONG/8-1)) == 0)
- hash = cfs_hash_long(hash^l, BITS_PER_LONG);
+ hash = hash_long(hash^l, BITS_PER_LONG);
} while (len);
return hash >> (BITS_PER_LONG - bits);
@@ -586,7 +586,7 @@ static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
goto out;
/* currently the expiry time passed down from user-space
- * is invalid, here we retrive it from mech. */
+ * is invalid, here we retrieve it from mech. */
if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
CERROR("unable to get expire time, drop it\n");
goto out;
@@ -880,7 +880,7 @@ int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
cache_get(&rsip->h); /* take an extra ref */
init_waitqueue_head(&rsip->waitq);
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
add_wait_queue(&rsip->waitq, &wait);
cache_check:
@@ -1067,7 +1067,7 @@ int __init gss_init_svc_upcall(void)
* the init upcall channel, otherwise there's big chance that the first
* upcall issued before the channel be opened thus nfsv4 cache code will
* drop the request direclty, thus lead to unnecessary recovery time.
- * here we wait at miximum 1.5 seconds. */
+ * here we wait at maximum 1.5 seconds. */
for (i = 0; i < 6; i++) {
if (atomic_read(&rsi_cache.readers) > 0)
break;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
index 8ce6271a5daa..383601cdd4e6 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
@@ -382,7 +382,7 @@ void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
/* At this point this ctx might have been marked as dead by
* someone else, in which case nobody will make further use
* of it. we don't care, and mark it UPTODATE will help
- * destroying server side context when it be destroied. */
+ * destroying server side context when it be destroyed. */
set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
if (sec_is_reverse(ctx->cc_sec)) {
@@ -676,7 +676,7 @@ redo:
* lead to the sequence number fall behind the window on server and
* be dropped. also applies to gss_cli_ctx_seal().
*
- * Note: null mode dosen't check sequence number. */
+ * Note: null mode doesn't check sequence number. */
if (svc != SPTLRPC_SVC_NULL &&
atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
int behind = atomic_read(&gctx->gc_seq) - seq;
@@ -1215,7 +1215,7 @@ int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
/*
* remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
* this is to avoid potential problems of client side reverse svc ctx
- * be mis-destroyed in various recovery senarios. anyway client can
+ * be mis-destroyed in various recovery scenarios. anyway client can
* manage its reverse ctx well by associating it with its buddy ctx.
*/
if (sec_is_reverse(sec))
@@ -1882,7 +1882,7 @@ int gss_svc_sign(struct ptlrpc_request *req,
LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
- /* embedded lustre_msg might have been shrinked */
+ /* embedded lustre_msg might have been shrunk */
if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
@@ -2596,7 +2596,7 @@ static int gss_svc_seal(struct ptlrpc_request *req,
int msglen, rc;
/* get clear data length. note embedded lustre_msg might
- * have been shrinked */
+ * have been shrunk */
if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
else
@@ -2765,7 +2765,7 @@ int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
* replay.
*
* each reverse root ctx will record its latest sequence number on its
- * buddy svcctx before be destroied, so here we continue use it.
+ * buddy svcctx before be destroyed, so here we continue use it.
*/
atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
@@ -2836,7 +2836,7 @@ int __init sptlrpc_gss_init(void)
if (rc)
goto out_svc_upcall;
- /* register policy after all other stuff be intialized, because it
+ /* register policy after all other stuff be initialized, because it
* might be in used immediately after the registration. */
rc = gss_init_keyring();
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index f465547eb95e..537aa6204a51 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -560,17 +560,30 @@ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
struct ptlrpc_request *req;
struct list_head *tmp;
- if (list_empty(&imp->imp_replay_list))
- return 0;
- tmp = imp->imp_replay_list.next;
- req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
- *transno = req->rq_transno;
- if (req->rq_transno == 0) {
- DEBUG_REQ(D_ERROR, req, "zero transno in replay");
- LBUG();
+ /* The requests in committed_list always have smaller transnos than
+ * the requests in replay_list */
+ if (!list_empty(&imp->imp_committed_list)) {
+ tmp = imp->imp_committed_list.next;
+ req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ *transno = req->rq_transno;
+ if (req->rq_transno == 0) {
+ DEBUG_REQ(D_ERROR, req,
+ "zero transno in committed_list");
+ LBUG();
+ }
+ return 1;
}
-
- return 1;
+ if (!list_empty(&imp->imp_replay_list)) {
+ tmp = imp->imp_replay_list.next;
+ req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ *transno = req->rq_transno;
+ if (req->rq_transno == 0) {
+ DEBUG_REQ(D_ERROR, req, "zero transno in replay_list");
+ LBUG();
+ }
+ return 1;
+ }
+ return 0;
}
/**
@@ -1042,7 +1055,7 @@ finish:
if ((ocd->ocd_cksum_types &
cksum_types_supported_client()) == 0) {
LCONSOLE_WARN("The negotiation of the checksum "
- "alogrithm to use with server %s "
+ "algorithm to use with server %s "
"failed (%x/%x), disabling "
"checksums\n",
obd2cli_tgt(imp->imp_obd),
@@ -1260,7 +1273,7 @@ static int ptlrpc_invalidate_import_thread(void *data)
/**
* This is the state machine for client-side recovery on import.
*
- * Typicaly we have two possibly paths. If we came to server and it is not
+ * Typically we have two possibly paths. If we came to server and it is not
* in recovery, we just enter IMP_EVICTED state, invalidate our import
* state and reconnect from scratch.
* If we came to server that is in recovery, we enter IMP_REPLAY import state.
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index dfcb410fe485..41c12e00129f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -295,7 +295,8 @@ static const struct req_msg_field *mds_reint_setxattr_client[] = {
&RMF_REC_REINT,
&RMF_CAPA1,
&RMF_NAME,
- &RMF_EADATA
+ &RMF_EADATA,
+ &RMF_DLM_REQ
};
static const struct req_msg_field *mdt_swap_layouts[] = {
@@ -2154,7 +2155,7 @@ EXPORT_SYMBOL(req_capsule_server_sized_swab_get);
* request (if the caller is executing on the server-side) or reply (if the
* caller is executing on the client-side).
*
- * This function convienient for use is code that could be executed on the
+ * This function convenient for use is code that could be executed on the
* client and server alike.
*/
const void *req_capsule_other_get(struct req_capsule *pill,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 1be978609c59..58f1c8bf25b0 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -616,7 +616,7 @@ out:
}
/**
- * The longest valid command string is the maxium policy name size, plus the
+ * The longest valid command string is the maximum policy name size, plus the
* length of the " reg" substring
*/
#define LPROCFS_NRS_WR_MAX_CMD (NRS_POL_NAME_MAX + sizeof(" reg") - 1)
@@ -1184,7 +1184,7 @@ int lprocfs_wr_evict_client(struct file *file, const char *buffer,
}
tmpbuf = cfs_firststr(kbuf, min_t(unsigned long, BUFLEN - 1, count));
/* Kludge code(deadlock situation): the lprocfs lock has been held
- * since the client is evicted by writting client's
+ * since the client is evicted by writing client's
* uuid/nid to procfs "evict_client" entry. However,
* obd_export_evict_by_uuid() will call lprocfs_remove() to destroy
* the proc entries under the being destroyed export{}, so I have
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index 3c6bf23415f9..a47a8d807d5b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -349,7 +349,7 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
/**
* Send request reply from request \a req reply buffer.
* \a flags defines reply types
- * Returns 0 on sucess or error code
+ * Returns 0 on success or error code
*/
int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
{
@@ -389,7 +389,7 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
* ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
* reply buffer on client will be overflow.
*
- * XXX Remove this whenver we drop the interoprability with such client.
+ * XXX Remove this whenever we drop the interoprability with such client.
*/
req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
sizeof(struct ptlrpc_body_v2), 1);
@@ -511,7 +511,9 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
request->rq_import->imp_obd->obd_name);
/* this prevents us from waiting in ptlrpc_queue_wait */
+ spin_lock(&request->rq_lock);
request->rq_err = 1;
+ spin_unlock(&request->rq_lock);
request->rq_status = -ENODEV;
return -ENODEV;
}
@@ -553,7 +555,9 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
if (rc) {
/* this prevents us from looping in
* ptlrpc_queue_wait */
+ spin_lock(&request->rq_lock);
request->rq_err = 1;
+ spin_unlock(&request->rq_lock);
request->rq_status = rc;
GOTO(cleanup_bulk, rc);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 0abcd6d82273..bcba1c8e8693 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -1322,7 +1322,7 @@ EXPORT_SYMBOL(ptlrpc_nrs_policy_unregister);
* Setup NRS heads on all service partitions of service \a svc, and register
* all compatible policies on those NRS heads.
*
- * To be called from withing ptl
+ * To be called from within ptl
* \param[in] svc the service to setup
*
* \retval -ve error, the calling logic should eventually call
@@ -1736,7 +1736,7 @@ fail:
}
/**
- * Removes all policy desciptors from nrs_core::nrs_policies, and frees the
+ * Removes all policy descriptors from nrs_core::nrs_policies, and frees the
* policy descriptors.
*
* Since all PTLRPC services are stopped at this point, there are no more
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 464479c0f00b..45c0b84621e4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -123,7 +123,7 @@ int lustre_msg_early_size(void)
* with the old client (< 2.3) which doesn't have pb_jobid
* in the ptlrpc_body.
*
- * XXX Remove this whenever we dorp interoprability with such
+ * XXX Remove this whenever we drop interoprability with such
* client.
*/
__u32 pblen = sizeof(struct ptlrpc_body_v2);
@@ -244,15 +244,7 @@ int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
/* only use new format, we don't need to be compatible with 1.4 */
- magic = LUSTRE_MSG_MAGIC_V2;
-
- switch (magic) {
- case LUSTRE_MSG_MAGIC_V2:
- return lustre_pack_request_v2(req, count, lens, bufs);
- default:
- LASSERTF(0, "incorrect message magic: %08x\n", magic);
- return -EINVAL;
- }
+ return lustre_pack_request_v2(req, count, lens, bufs);
}
EXPORT_SYMBOL(lustre_pack_request);
@@ -1545,7 +1537,7 @@ void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
__u32 opc = lustre_msg_get_opc(msg);
struct ptlrpc_body *pb;
- /* Don't set jobid for ldlm ast RPCs, they've been shrinked.
+ /* Don't set jobid for ldlm ast RPCs, they've been shrunk.
* See the comment in ptlrpc_request_pack(). */
if (!opc || opc == LDLM_BL_CALLBACK ||
opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 2d26fd543d46..ca734ce079c1 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -229,7 +229,7 @@ void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
spin_unlock(&req->rq_lock);
l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
} else if (req->rq_set) {
- /* If we have a vaid "rq_set", just reuse it to avoid double
+ /* If we have a valid "rq_set", just reuse it to avoid double
* linked. */
LASSERT(req->rq_phase == RQ_PHASE_NEW);
LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
@@ -471,7 +471,7 @@ static int ptlrpcd(void *arg)
* be better. But it breaks former data transfer policy.
*
* So we shouldn't be blind for avoiding the data transfer. We make some
- * compromise: divide the ptlrpcd threds pool into two parts. One part is
+ * compromise: divide the ptlrpcd threads pool into two parts. One part is
* for bound mode, each ptlrpcd thread in this part is bound to some CPU
* core. The other part is for free mode, all the ptlrpcd threads in the
* part can be scheduled on any CPU core. We specify some partnership
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 84c39e083ea4..48ae328ce24e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -105,24 +105,59 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
* imp_lock is being held by ptlrpc_replay, but it's not. it's
* just a little race...
*/
- list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
+
+ /* Replay all the committed open requests on committed_list first */
+ if (!list_empty(&imp->imp_committed_list)) {
+ tmp = imp->imp_committed_list.prev;
req = list_entry(tmp, struct ptlrpc_request,
rq_replay_list);
- /* If need to resend the last sent transno (because a
- reconnect has occurred), then stop on the matching
- req and send it again. If, however, the last sent
- transno has been committed then we continue replay
- from the next request. */
+ /* The last request on committed_list hasn't been replayed */
if (req->rq_transno > last_transno) {
- if (imp->imp_resend_replay)
- lustre_msg_add_flags(req->rq_reqmsg,
- MSG_RESENT);
- break;
+ /* Since the imp_committed_list is immutable before
+ * all of it's requests being replayed, it's safe to
+ * use a cursor to accelerate the search */
+ imp->imp_replay_cursor = imp->imp_replay_cursor->next;
+
+ while (imp->imp_replay_cursor !=
+ &imp->imp_committed_list) {
+ req = list_entry(imp->imp_replay_cursor,
+ struct ptlrpc_request,
+ rq_replay_list);
+ if (req->rq_transno > last_transno)
+ break;
+
+ req = NULL;
+ imp->imp_replay_cursor =
+ imp->imp_replay_cursor->next;
+ }
+ } else {
+ /* All requests on committed_list have been replayed */
+ imp->imp_replay_cursor = &imp->imp_committed_list;
+ req = NULL;
+ }
+ }
+
+ /* All the requests in committed list have been replayed, let's replay
+ * the imp_replay_list */
+ if (req == NULL) {
+ list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_replay_list);
+
+ if (req->rq_transno > last_transno)
+ break;
+ req = NULL;
}
- req = NULL;
}
+ /* If need to resend the last sent transno (because a reconnect
+ * has occurred), then stop on the matching req and send it again.
+ * If, however, the last sent transno has been committed then we
+ * continue replay from the next request. */
+ if (req != NULL && imp->imp_resend_replay)
+ lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
+
spin_lock(&imp->imp_lock);
imp->imp_resend_replay = 0;
spin_unlock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 962b31d163de..d80418057f29 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -543,8 +543,8 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
"ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
newctx, newctx->cc_flags);
- schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
- HZ);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
} else {
/*
* it's possible newctx == oldctx if we're switching
@@ -779,7 +779,7 @@ again:
* e.g. ptlrpc_abort_inflight();
*/
if (!cli_ctx_is_refreshed(ctx)) {
- /* timed out or interruptted */
+ /* timed out or interrupted */
req_off_ctx_list(req, ctx);
LASSERT(rc != 0);
@@ -805,7 +805,7 @@ void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
LASSERT(req->rq_cli_ctx->cc_sec);
LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
- /* special security flags accoding to opcode */
+ /* special security flags according to opcode */
switch (opcode) {
case OST_READ:
case MDS_READPAGE:
@@ -1218,7 +1218,7 @@ static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
LASSERT(policy->sp_cops->destroy_sec);
- CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
+ CDEBUG(D_SEC, "%s@%p: being destroyed\n", sec->ps_policy->sp_name, sec);
policy->sp_cops->destroy_sec(sec);
sptlrpc_policy_put(policy);
@@ -1264,7 +1264,7 @@ void sptlrpc_sec_put(struct ptlrpc_sec *sec)
EXPORT_SYMBOL(sptlrpc_sec_put);
/*
- * policy module is responsible for taking refrence of import
+ * policy module is responsible for taking reference of import
*/
static
struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
@@ -1419,7 +1419,7 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp,
sp = imp->imp_obd->u.cli.cl_sp_me;
} else {
- /* reverse import, determine flavor from incoming reqeust */
+ /* reverse import, determine flavor from incoming request */
sf = *flvr;
if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
@@ -2057,7 +2057,7 @@ int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
/*
* if it's not null flavor (which means embedded packing msg),
- * reset the swab mask for the comming inner msg unpacking.
+ * reset the swab mask for the coming inner msg unpacking.
*/
if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
req->rq_req_swab_mask = 0;
@@ -2108,7 +2108,7 @@ int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
/**
* Used by ptlrpc server, to perform transformation upon reply message.
*
- * \post req->rq_reply_off is set to approriate server-controlled reply offset.
+ * \post req->rq_reply_off is set to appropriate server-controlled reply offset.
* \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
*/
int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 316103ab7c3c..965668132747 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -113,7 +113,7 @@ static struct ptlrpc_enc_page_pool {
unsigned long epp_st_missings; /* # of cache missing */
unsigned long epp_st_lowfree; /* lowest free pages reached */
unsigned int epp_st_max_wqlen; /* highest waitqueue length */
- cfs_time_t epp_st_max_wait; /* in jeffies */
+ cfs_time_t epp_st_max_wait; /* in jiffies */
/*
* pointers to pools
*/
@@ -545,11 +545,11 @@ again:
page_pools.epp_waitqlen;
set_current_state(TASK_UNINTERRUPTIBLE);
- init_waitqueue_entry_current(&waitlink);
+ init_waitqueue_entry(&waitlink, current);
add_wait_queue(&page_pools.epp_waitq, &waitlink);
spin_unlock(&page_pools.epp_lock);
- waitq_wait(&waitlink, TASK_UNINTERRUPTIBLE);
+ schedule();
remove_wait_queue(&page_pools.epp_waitq, &waitlink);
LASSERT(page_pools.epp_waitqlen > 0);
spin_lock(&page_pools.epp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index 6cc3f23c27cc..bf56120abfaf 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -255,7 +255,7 @@ void sptlrpc_rule_set_free(struct sptlrpc_rule_set *rset)
EXPORT_SYMBOL(sptlrpc_rule_set_free);
/*
- * return 0 if the rule set could accomodate one more rule.
+ * return 0 if the rule set could accommodate one more rule.
*/
int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset)
{
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 590fa8df8b7f..192adec5382a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -177,7 +177,7 @@ ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
/**
* Part of Rep-Ack logic.
- * Puts a lock and its mode into reply state assotiated to request reply.
+ * Puts a lock and its mode into reply state associated to request reply.
*/
void
ptlrpc_save_lock(struct ptlrpc_request *req,
@@ -252,7 +252,7 @@ struct rs_batch {
static struct ptlrpc_hr_service ptlrpc_hr;
/**
- * maximum mumber of replies scheduled in one batch
+ * maximum number of replies scheduled in one batch
*/
#define MAX_SCHEDULED 256
@@ -612,7 +612,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
- /* acitve requests and hp requests */
+ /* active requests and hp requests */
spin_lock_init(&svcpt->scp_req_lock);
/* reply states */
@@ -752,7 +752,7 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
spin_lock_init(&service->srv_lock);
service->srv_name = conf->psc_name;
service->srv_watchdog_factor = conf->psc_watchdog_factor;
- INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
+ INIT_LIST_HEAD(&service->srv_list); /* for safety of cleanup */
/* buffer configuration */
service->srv_nbuf_per_group = test_req_buffer_pressure ?
@@ -1982,7 +1982,7 @@ put_conn:
do_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
+ "%s:%s+%d:%d:x"LPU64":%s:%d Request processed in "
"%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
current_comm(),
(request->rq_export ?
@@ -2736,7 +2736,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
spin_lock(&svcpt->scp_lock);
--svcpt->scp_nthrs_starting;
if (thread_is_stopping(thread)) {
- /* this ptlrpc_thread is being hanled
+ /* this ptlrpc_thread is being handled
* by ptlrpc_svcpt_stop_threads now
*/
thread_add_flags(thread, SVC_STOPPED);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 3aa445952024..3c8846006a7b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -1152,6 +1152,8 @@ void lustre_assert_wire_constants(void)
OBD_CONNECT_SHORTIO);
LASSERTF(OBD_CONNECT_PINGLESS == 0x4000000000000ULL, "found 0x%.16llxULL\n",
OBD_CONNECT_PINGLESS);
+ LASSERTF(OBD_CONNECT_FLOCK_DEAD == 0x8000000000000ULL,
+ "found 0x%.16llxULL\n", OBD_CONNECT_FLOCK_DEAD);
LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
(unsigned)OBD_CKSUM_CRC32);
LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 46f1e619cbd8..22b0c9d6f046 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -21,6 +21,8 @@ if STAGING_MEDIA
# Please keep them in alphabetic order
source "drivers/staging/media/as102/Kconfig"
+source "drivers/staging/media/bcm2048/Kconfig"
+
source "drivers/staging/media/cxd2099/Kconfig"
source "drivers/staging/media/davinci_vpfe/Kconfig"
@@ -31,8 +33,14 @@ source "drivers/staging/media/go7007/Kconfig"
source "drivers/staging/media/msi3101/Kconfig"
+source "drivers/staging/media/omap24xx/Kconfig"
+
+source "drivers/staging/media/sn9c102/Kconfig"
+
source "drivers/staging/media/solo6x10/Kconfig"
+source "drivers/staging/media/omap4iss/Kconfig"
+
# Keep LIRC at the end, as it has sub-menus
source "drivers/staging/media/lirc/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index eb7f30b1ccd8..bedc62aaede6 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_DVB_AS102) += as102/
+obj-$(CONFIG_I2C_BCM2048) += bcm2048/
obj-$(CONFIG_DVB_CXD2099) += cxd2099/
obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_SOLO6X10) += solo6x10/
@@ -6,3 +7,7 @@ obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_USB_MSI3101) += msi3101/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
+obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
+obj-$(CONFIG_USB_SN9C102) += sn9c102/
+obj-$(CONFIG_VIDEO_OMAP2) += omap24xx/
+obj-$(CONFIG_VIDEO_TCM825X) += omap24xx/
diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/staging/media/as102/as102_drv.c
index 8b7bb9547079..09d64cd67502 100644
--- a/drivers/staging/media/as102/as102_drv.c
+++ b/drivers/staging/media/as102/as102_drv.c
@@ -111,8 +111,6 @@ static int as10x_pid_filter(struct as102_dev_t *dev,
struct as10x_bus_adapter_t *bus_adap = &dev->bus_adap;
int ret = -EFAULT;
- ENTER();
-
if (mutex_lock_interruptible(&dev->bus_adap.lock)) {
dprintk(debug, "mutex_lock_interruptible(lock) failed !\n");
return -EBUSY;
@@ -133,15 +131,14 @@ static int as10x_pid_filter(struct as102_dev_t *dev,
filter.pid = pid;
ret = as10x_cmd_add_PID_filter(bus_adap, &filter);
- dprintk(debug, "ADD_PID_FILTER([%02d -> %02d], 0x%04x) ret = %d\n",
+ dprintk(debug,
+ "ADD_PID_FILTER([%02d -> %02d], 0x%04x) ret = %d\n",
index, filter.idx, filter.pid, ret);
break;
}
}
mutex_unlock(&dev->bus_adap.lock);
-
- LEAVE();
return ret;
}
@@ -151,8 +148,6 @@ static int as102_dvb_dmx_start_feed(struct dvb_demux_feed *dvbdmxfeed)
struct dvb_demux *demux = dvbdmxfeed->demux;
struct as102_dev_t *as102_dev = demux->priv;
- ENTER();
-
if (mutex_lock_interruptible(&as102_dev->sem))
return -ERESTARTSYS;
@@ -164,7 +159,6 @@ static int as102_dvb_dmx_start_feed(struct dvb_demux_feed *dvbdmxfeed)
ret = as102_start_stream(as102_dev);
mutex_unlock(&as102_dev->sem);
- LEAVE();
return ret;
}
@@ -173,8 +167,6 @@ static int as102_dvb_dmx_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
struct dvb_demux *demux = dvbdmxfeed->demux;
struct as102_dev_t *as102_dev = demux->priv;
- ENTER();
-
if (mutex_lock_interruptible(&as102_dev->sem))
return -ERESTARTSYS;
@@ -186,7 +178,6 @@ static int as102_dvb_dmx_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
dvbdmxfeed->pid, 0);
mutex_unlock(&as102_dev->sem);
- LEAVE();
return 0;
}
diff --git a/drivers/staging/media/as102/as102_drv.h b/drivers/staging/media/as102/as102_drv.h
index b0e5a23bd532..a06837dcc05d 100644
--- a/drivers/staging/media/as102/as102_drv.h
+++ b/drivers/staging/media/as102/as102_drv.h
@@ -38,14 +38,6 @@ extern int elna_enable;
printk(args); \
} } while (0)
-#ifdef TRACE
-#define ENTER() pr_debug(">> enter %s\n", __func__)
-#define LEAVE() pr_debug("<< leave %s\n", __func__)
-#else
-#define ENTER()
-#define LEAVE()
-#endif
-
#define AS102_DEVICE_MAJOR 192
#define AS102_USB_BUF_SIZE 512
diff --git a/drivers/staging/media/as102/as102_fe.c b/drivers/staging/media/as102/as102_fe.c
index 9ce8c9daa2e7..b686b7617cdc 100644
--- a/drivers/staging/media/as102/as102_fe.c
+++ b/drivers/staging/media/as102/as102_fe.c
@@ -34,8 +34,6 @@ static int as102_fe_set_frontend(struct dvb_frontend *fe)
struct as102_dev_t *dev;
struct as10x_tune_args tune_args = { 0 };
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
@@ -52,7 +50,6 @@ static int as102_fe_set_frontend(struct dvb_frontend *fe)
mutex_unlock(&dev->bus_adap.lock);
- LEAVE();
return (ret < 0) ? -EINVAL : 0;
}
@@ -63,8 +60,6 @@ static int as102_fe_get_frontend(struct dvb_frontend *fe)
struct as102_dev_t *dev;
struct as10x_tps tps = { 0 };
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -EINVAL;
@@ -80,13 +75,11 @@ static int as102_fe_get_frontend(struct dvb_frontend *fe)
mutex_unlock(&dev->bus_adap.lock);
- LEAVE();
return (ret < 0) ? -EINVAL : 0;
}
static int as102_fe_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *settings) {
- ENTER();
#if 0
dprintk(debug, "step_size = %d\n", settings->step_size);
@@ -97,7 +90,6 @@ static int as102_fe_get_tune_settings(struct dvb_frontend *fe,
settings->min_delay_ms = 1000;
- LEAVE();
return 0;
}
@@ -108,8 +100,6 @@ static int as102_fe_read_status(struct dvb_frontend *fe, fe_status_t *status)
struct as102_dev_t *dev;
struct as10x_tune_status tstate = { 0 };
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
@@ -151,8 +141,8 @@ static int as102_fe_read_status(struct dvb_frontend *fe, fe_status_t *status)
if (as10x_cmd_get_demod_stats(&dev->bus_adap,
(struct as10x_demod_stats *) &dev->demod_stats) < 0) {
memset(&dev->demod_stats, 0, sizeof(dev->demod_stats));
- dprintk(debug, "as10x_cmd_get_demod_stats failed "
- "(probably not tuned)\n");
+ dprintk(debug,
+ "as10x_cmd_get_demod_stats failed (probably not tuned)\n");
} else {
dprintk(debug,
"demod status: fc: 0x%08x, bad fc: 0x%08x, "
@@ -168,7 +158,6 @@ static int as102_fe_read_status(struct dvb_frontend *fe, fe_status_t *status)
out:
mutex_unlock(&dev->bus_adap.lock);
- LEAVE();
return ret;
}
@@ -183,15 +172,12 @@ static int as102_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct as102_dev_t *dev;
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
*snr = dev->demod_stats.mer;
- LEAVE();
return 0;
}
@@ -199,15 +185,12 @@ static int as102_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct as102_dev_t *dev;
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
*ber = dev->ber;
- LEAVE();
return 0;
}
@@ -216,15 +199,12 @@ static int as102_fe_read_signal_strength(struct dvb_frontend *fe,
{
struct as102_dev_t *dev;
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
*strength = (((0xffff * 400) * dev->signal_strength + 41000) * 2);
- LEAVE();
return 0;
}
@@ -232,8 +212,6 @@ static int as102_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct as102_dev_t *dev;
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
@@ -243,7 +221,6 @@ static int as102_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
else
*ucblocks = 0;
- LEAVE();
return 0;
}
@@ -252,8 +229,6 @@ static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
struct as102_dev_t *dev;
int ret;
- ENTER();
-
dev = (struct as102_dev_t *) fe->tuner_priv;
if (dev == NULL)
return -ENODEV;
@@ -263,7 +238,8 @@ static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
if (acquire) {
if (elna_enable)
- as10x_cmd_set_context(&dev->bus_adap, CONTEXT_LNA, dev->elna_cfg);
+ as10x_cmd_set_context(&dev->bus_adap,
+ CONTEXT_LNA, dev->elna_cfg);
ret = as10x_cmd_turn_on(&dev->bus_adap);
} else {
@@ -272,7 +248,6 @@ static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
mutex_unlock(&dev->bus_adap.lock);
- LEAVE();
return ret;
}
@@ -581,8 +556,8 @@ static void as102_fe_copy_tune_parameters(struct as10x_tune_args *tune_args,
as102_fe_get_code_rate(params->code_rate_LP);
}
- dprintk(debug, "\thierarchy: 0x%02x "
- "selected: %s code_rate_%s: 0x%02x\n",
+ dprintk(debug,
+ "\thierarchy: 0x%02x selected: %s code_rate_%s: 0x%02x\n",
tune_args->hierarchy,
tune_args->hier_select == HIER_HIGH_PRIORITY ?
"HP" : "LP",
diff --git a/drivers/staging/media/as102/as102_fw.c b/drivers/staging/media/as102/as102_fw.c
index b9670ee41b4e..f33f752c0aad 100644
--- a/drivers/staging/media/as102/as102_fw.c
+++ b/drivers/staging/media/as102/as102_fw.c
@@ -26,10 +26,10 @@
#include "as102_drv.h"
#include "as102_fw.h"
-char as102_st_fw1[] = "as102_data1_st.hex";
-char as102_st_fw2[] = "as102_data2_st.hex";
-char as102_dt_fw1[] = "as102_data1_dt.hex";
-char as102_dt_fw2[] = "as102_data2_dt.hex";
+static const char as102_st_fw1[] = "as102_data1_st.hex";
+static const char as102_st_fw2[] = "as102_data2_st.hex";
+static const char as102_dt_fw1[] = "as102_data1_dt.hex";
+static const char as102_dt_fw2[] = "as102_data2_dt.hex";
static unsigned char atohx(unsigned char *dst, char *src)
{
@@ -109,8 +109,6 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
int total_read_bytes = 0, errno = 0;
unsigned char addr_has_changed = 0;
- ENTER();
-
for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
int read_bytes = 0, data_len = 0;
@@ -158,7 +156,6 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
}
}
error:
- LEAVE();
return (errno == 0) ? total_read_bytes : errno;
}
@@ -167,11 +164,9 @@ int as102_fw_upload(struct as10x_bus_adapter_t *bus_adap)
int errno = -EFAULT;
const struct firmware *firmware = NULL;
unsigned char *cmd_buf = NULL;
- char *fw1, *fw2;
+ const char *fw1, *fw2;
struct usb_device *dev = bus_adap->usb_dev;
- ENTER();
-
/* select fw file to upload */
if (dual_tuner) {
fw1 = as102_dt_fw1;
@@ -233,6 +228,5 @@ error:
kfree(cmd_buf);
release_firmware(firmware);
- LEAVE();
return errno;
}
diff --git a/drivers/staging/media/as102/as102_usb_drv.c b/drivers/staging/media/as102/as102_usb_drv.c
index 9f275f020150..e4a69454ebeb 100644
--- a/drivers/staging/media/as102/as102_usb_drv.c
+++ b/drivers/staging/media/as102/as102_usb_drv.c
@@ -92,7 +92,6 @@ static int as102_usb_xfer_cmd(struct as10x_bus_adapter_t *bus_adap,
unsigned char *recv_buf, int recv_buf_len)
{
int ret = 0;
- ENTER();
if (send_buf != NULL) {
ret = usb_control_msg(bus_adap->usb_dev,
@@ -140,7 +139,6 @@ static int as102_usb_xfer_cmd(struct as10x_bus_adapter_t *bus_adap,
#endif
}
- LEAVE();
return ret;
}
@@ -191,7 +189,7 @@ static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap,
return ret ? ret : actual_len;
}
-struct as102_priv_ops_t as102_priv_ops = {
+static struct as102_priv_ops_t as102_priv_ops = {
.upload_fw_pkt = as102_send_ep1,
.xfer_cmd = as102_usb_xfer_cmd,
.as102_read_ep2 = as102_read_ep2,
@@ -240,8 +238,6 @@ static void as102_free_usb_stream_buffer(struct as102_dev_t *dev)
{
int i;
- ENTER();
-
for (i = 0; i < MAX_STREAM_URB; i++)
usb_free_urb(dev->stream_urb[i]);
@@ -249,15 +245,12 @@ static void as102_free_usb_stream_buffer(struct as102_dev_t *dev)
MAX_STREAM_URB * AS102_USB_BUF_SIZE,
dev->stream,
dev->dma_addr);
- LEAVE();
}
static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev)
{
int i, ret = 0;
- ENTER();
-
dev->stream = usb_alloc_coherent(dev->bus_adap.usb_dev,
MAX_STREAM_URB * AS102_USB_BUF_SIZE,
GFP_KERNEL,
@@ -287,7 +280,6 @@ static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev)
dev->stream_urb[i] = urb;
}
- LEAVE();
return ret;
}
@@ -318,23 +310,17 @@ static void as102_usb_release(struct kref *kref)
{
struct as102_dev_t *as102_dev;
- ENTER();
-
as102_dev = container_of(kref, struct as102_dev_t, kref);
if (as102_dev != NULL) {
usb_put_dev(as102_dev->bus_adap.usb_dev);
kfree(as102_dev);
}
-
- LEAVE();
}
static void as102_usb_disconnect(struct usb_interface *intf)
{
struct as102_dev_t *as102_dev;
- ENTER();
-
/* extract as102_dev_t from usb_device private data */
as102_dev = usb_get_intfdata(intf);
@@ -353,8 +339,6 @@ static void as102_usb_disconnect(struct usb_interface *intf)
kref_put(&as102_dev->kref, as102_usb_release);
pr_info("%s: device has been disconnected\n", DRIVER_NAME);
-
- LEAVE();
}
static int as102_usb_probe(struct usb_interface *intf,
@@ -364,8 +348,6 @@ static int as102_usb_probe(struct usb_interface *intf,
struct as102_dev_t *as102_dev;
int i;
- ENTER();
-
/* This should never actually happen */
if (ARRAY_SIZE(as102_usb_id_table) !=
(sizeof(as102_device_names) / sizeof(const char *))) {
@@ -419,15 +401,21 @@ static int as102_usb_probe(struct usb_interface *intf,
/* request buffer allocation for streaming */
ret = as102_alloc_usb_stream_buffer(as102_dev);
if (ret != 0)
- goto failed;
+ goto failed_stream;
/* register dvb layer */
ret = as102_dvb_register(as102_dev);
+ if (ret != 0)
+ goto failed_dvb;
- LEAVE();
return ret;
+failed_dvb:
+ as102_free_usb_stream_buffer(as102_dev);
+failed_stream:
+ usb_deregister_dev(intf, &as102_usb_class_driver);
failed:
+ usb_put_dev(as102_dev->bus_adap.usb_dev);
usb_set_intfdata(intf, NULL);
kfree(as102_dev);
return ret;
@@ -439,8 +427,6 @@ static int as102_open(struct inode *inode, struct file *file)
struct usb_interface *intf = NULL;
struct as102_dev_t *dev = NULL;
- ENTER();
-
/* read minor from inode */
minor = iminor(inode);
@@ -467,7 +453,6 @@ static int as102_open(struct inode *inode, struct file *file)
kref_get(&dev->kref);
exit:
- LEAVE();
return ret;
}
@@ -476,15 +461,12 @@ static int as102_release(struct inode *inode, struct file *file)
int ret = 0;
struct as102_dev_t *dev = NULL;
- ENTER();
-
dev = file->private_data;
if (dev != NULL) {
/* decrement the count on our device */
kref_put(&dev->kref, as102_usb_release);
}
- LEAVE();
return ret;
}
diff --git a/drivers/staging/media/as102/as10x_cmd.c b/drivers/staging/media/as102/as10x_cmd.c
index a73df10982d0..9e49f15a7c9f 100644
--- a/drivers/staging/media/as102/as10x_cmd.c
+++ b/drivers/staging/media/as102/as10x_cmd.c
@@ -34,8 +34,6 @@ int as10x_cmd_turn_on(struct as10x_bus_adapter_t *adap)
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -63,7 +61,6 @@ int as10x_cmd_turn_on(struct as10x_bus_adapter_t *adap)
error = as10x_rsp_parse(prsp, CONTROL_PROC_TURNON_RSP);
out:
- LEAVE();
return error;
}
@@ -78,8 +75,6 @@ int as10x_cmd_turn_off(struct as10x_bus_adapter_t *adap)
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -106,7 +101,6 @@ int as10x_cmd_turn_off(struct as10x_bus_adapter_t *adap)
error = as10x_rsp_parse(prsp, CONTROL_PROC_TURNOFF_RSP);
out:
- LEAVE();
return error;
}
@@ -123,8 +117,6 @@ int as10x_cmd_set_tune(struct as10x_bus_adapter_t *adap,
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *preq, *prsp;
- ENTER();
-
preq = adap->cmd;
prsp = adap->rsp;
@@ -164,7 +156,6 @@ int as10x_cmd_set_tune(struct as10x_bus_adapter_t *adap,
error = as10x_rsp_parse(prsp, CONTROL_PROC_SETTUNE_RSP);
out:
- LEAVE();
return error;
}
@@ -181,8 +172,6 @@ int as10x_cmd_get_tune_status(struct as10x_bus_adapter_t *adap,
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *preq, *prsp;
- ENTER();
-
preq = adap->cmd;
prsp = adap->rsp;
@@ -220,7 +209,6 @@ int as10x_cmd_get_tune_status(struct as10x_bus_adapter_t *adap,
pstatus->BER = le16_to_cpu(prsp->body.get_tune_status.rsp.sts.BER);
out:
- LEAVE();
return error;
}
@@ -236,8 +224,6 @@ int as10x_cmd_get_tps(struct as10x_bus_adapter_t *adap, struct as10x_tps *ptps)
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -281,7 +267,6 @@ int as10x_cmd_get_tps(struct as10x_bus_adapter_t *adap, struct as10x_tps *ptps)
ptps->cell_ID = le16_to_cpu(prsp->body.get_tps.rsp.tps.cell_ID);
out:
- LEAVE();
return error;
}
@@ -298,8 +283,6 @@ int as10x_cmd_get_demod_stats(struct as10x_bus_adapter_t *adap,
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -343,7 +326,6 @@ int as10x_cmd_get_demod_stats(struct as10x_bus_adapter_t *adap,
prsp->body.get_demod_stats.rsp.stats.has_started;
out:
- LEAVE();
return error;
}
@@ -361,8 +343,6 @@ int as10x_cmd_get_impulse_resp(struct as10x_bus_adapter_t *adap,
int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -397,7 +377,6 @@ int as10x_cmd_get_impulse_resp(struct as10x_bus_adapter_t *adap,
*is_ready = prsp->body.get_impulse_rsp.rsp.is_ready;
out:
- LEAVE();
return error;
}
diff --git a/drivers/staging/media/as102/as10x_cmd_cfg.c b/drivers/staging/media/as102/as10x_cmd_cfg.c
index 4a2bbd766655..b1e300d88753 100644
--- a/drivers/staging/media/as102/as10x_cmd_cfg.c
+++ b/drivers/staging/media/as102/as10x_cmd_cfg.c
@@ -40,8 +40,6 @@ int as10x_cmd_get_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -81,7 +79,6 @@ int as10x_cmd_get_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
}
out:
- LEAVE();
return error;
}
@@ -99,8 +96,6 @@ int as10x_cmd_set_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -136,7 +131,6 @@ int as10x_cmd_set_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
error = as10x_context_rsp_parse(prsp, CONTROL_PROC_CONTEXT_RSP);
out:
- LEAVE();
return error;
}
@@ -156,8 +150,6 @@ int as10x_cmd_eLNA_change_mode(struct as10x_bus_adapter_t *adap, uint8_t mode)
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -188,7 +180,6 @@ int as10x_cmd_eLNA_change_mode(struct as10x_bus_adapter_t *adap, uint8_t mode)
error = as10x_rsp_parse(prsp, CONTROL_PROC_ELNA_CHANGE_MODE_RSP);
out:
- LEAVE();
return error;
}
diff --git a/drivers/staging/media/as102/as10x_cmd_stream.c b/drivers/staging/media/as102/as10x_cmd_stream.c
index 6d000f60fb0e..1088ca1fe92f 100644
--- a/drivers/staging/media/as102/as10x_cmd_stream.c
+++ b/drivers/staging/media/as102/as10x_cmd_stream.c
@@ -34,8 +34,6 @@ int as10x_cmd_add_PID_filter(struct as10x_bus_adapter_t *adap,
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -77,7 +75,6 @@ int as10x_cmd_add_PID_filter(struct as10x_bus_adapter_t *adap,
}
out:
- LEAVE();
return error;
}
@@ -94,8 +91,6 @@ int as10x_cmd_del_PID_filter(struct as10x_bus_adapter_t *adap,
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -126,7 +121,6 @@ int as10x_cmd_del_PID_filter(struct as10x_bus_adapter_t *adap,
error = as10x_rsp_parse(prsp, CONTROL_PROC_REMOVEFILTER_RSP);
out:
- LEAVE();
return error;
}
@@ -141,8 +135,6 @@ int as10x_cmd_start_streaming(struct as10x_bus_adapter_t *adap)
int error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -172,7 +164,6 @@ int as10x_cmd_start_streaming(struct as10x_bus_adapter_t *adap)
error = as10x_rsp_parse(prsp, CONTROL_PROC_START_STREAMING_RSP);
out:
- LEAVE();
return error;
}
@@ -187,8 +178,6 @@ int as10x_cmd_stop_streaming(struct as10x_bus_adapter_t *adap)
int8_t error;
struct as10x_cmd_t *pcmd, *prsp;
- ENTER();
-
pcmd = adap->cmd;
prsp = adap->rsp;
@@ -218,6 +207,5 @@ int as10x_cmd_stop_streaming(struct as10x_bus_adapter_t *adap)
error = as10x_rsp_parse(prsp, CONTROL_PROC_STOP_STREAMING_RSP);
out:
- LEAVE();
return error;
}
diff --git a/drivers/staging/media/as102/as10x_handle.h b/drivers/staging/media/as102/as10x_handle.h
index 62b9795ee424..5638b191b780 100644
--- a/drivers/staging/media/as102/as10x_handle.h
+++ b/drivers/staging/media/as102/as10x_handle.h
@@ -28,26 +28,26 @@ struct as102_dev_t;
#define REGMODE32 32
struct as102_priv_ops_t {
- int (*upload_fw_pkt) (struct as10x_bus_adapter_t *bus_adap,
+ int (*upload_fw_pkt)(struct as10x_bus_adapter_t *bus_adap,
unsigned char *buf, int buflen, int swap32);
- int (*send_cmd) (struct as10x_bus_adapter_t *bus_adap,
+ int (*send_cmd)(struct as10x_bus_adapter_t *bus_adap,
unsigned char *buf, int buflen);
- int (*xfer_cmd) (struct as10x_bus_adapter_t *bus_adap,
+ int (*xfer_cmd)(struct as10x_bus_adapter_t *bus_adap,
unsigned char *send_buf, int send_buf_len,
unsigned char *recv_buf, int recv_buf_len);
- int (*start_stream) (struct as102_dev_t *dev);
- void (*stop_stream) (struct as102_dev_t *dev);
+ int (*start_stream)(struct as102_dev_t *dev);
+ void (*stop_stream)(struct as102_dev_t *dev);
- int (*reset_target) (struct as10x_bus_adapter_t *bus_adap);
+ int (*reset_target)(struct as10x_bus_adapter_t *bus_adap);
int (*read_write)(struct as10x_bus_adapter_t *bus_adap, uint8_t mode,
uint32_t rd_addr, uint16_t rd_len,
uint32_t wr_addr, uint16_t wr_len);
- int (*as102_read_ep2) (struct as10x_bus_adapter_t *bus_adap,
+ int (*as102_read_ep2)(struct as10x_bus_adapter_t *bus_adap,
unsigned char *recv_buf,
int recv_buf_len);
};
diff --git a/drivers/staging/media/bcm2048/Kconfig b/drivers/staging/media/bcm2048/Kconfig
new file mode 100644
index 000000000000..a9fc6e186494
--- /dev/null
+++ b/drivers/staging/media/bcm2048/Kconfig
@@ -0,0 +1,13 @@
+#
+# Multimedia Video device configuration
+#
+
+config I2C_BCM2048
+ tristate "Broadcom BCM2048 FM Radio Receiver support"
+ depends on I2C && VIDEO_V4L2 && RADIO_ADAPTERS
+ ---help---
+ Say Y here if you want support to BCM2048 FM Radio Receiver.
+ This device driver supports only i2c bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-bcm2048.
diff --git a/drivers/staging/media/bcm2048/Makefile b/drivers/staging/media/bcm2048/Makefile
new file mode 100644
index 000000000000..b4f5663d1408
--- /dev/null
+++ b/drivers/staging/media/bcm2048/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_I2C_BCM2048) += radio-bcm2048.o
diff --git a/drivers/staging/media/bcm2048/TODO b/drivers/staging/media/bcm2048/TODO
new file mode 100644
index 000000000000..051f85dbe89e
--- /dev/null
+++ b/drivers/staging/media/bcm2048/TODO
@@ -0,0 +1,24 @@
+TODO:
+
+From the initial code review:
+
+The main thing you need to do is to implement all the controls using the
+control framework (see Documentation/video4linux/v4l2-controls.txt).
+Most drivers are by now converted to the control framework, so you will
+find many examples of how to do this in drivers/media/radio.
+
+The sysfs stuff should be replaced by controls as well. A lot of the RDS
+support is now available as controls (although there may well be some
+missing features, but that is easy enough to add). Since the RDS data is
+actually read() from the device I am not sure whether the RDS
+properties/controls should be there at all.
+
+Correct Coding Style, as this driver also violates several Style
+rules, and do evil tricks, like returning from a function inside a
+macro.
+
+Finally this driver should probably be split up into two parts: one
+v4l2_subdev-based core driver and one platform driver. See e.g.
+radio-si4713/si4713-i2c.c as a good example. But I would wait with that
+until the rest of the driver is cleaned up. Then I have a better idea of
+whether this is necessary or not.
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
new file mode 100644
index 000000000000..b2cd3a85166d
--- /dev/null
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -0,0 +1,2744 @@
+/*
+ * drivers/staging/media/radio-bcm2048.c
+ *
+ * Driver for I2C Broadcom BCM2048 FM Radio Receiver:
+ *
+ * Copyright (C) Nokia Corporation
+ * Contact: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
+ *
+ * Copyright (C) Nils Faerber <nils.faerber@kernelconcepts.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+/*
+ * History:
+ * Eero Nurkkala <ext-eero.nurkkala@nokia.com>
+ * Version 0.0.1
+ * - Initial implementation
+ * 2010-02-21 Nils Faerber <nils.faerber@kernelconcepts.de>
+ * Version 0.0.2
+ * - Add support for interrupt driven rds data reading
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include "radio-bcm2048.h"
+
+/* driver definitions */
+#define BCM2048_DRIVER_AUTHOR "Eero Nurkkala <ext-eero.nurkkala@nokia.com>"
+#define BCM2048_DRIVER_NAME BCM2048_NAME
+#define BCM2048_DRIVER_VERSION KERNEL_VERSION(0, 0, 1)
+#define BCM2048_DRIVER_CARD "Broadcom bcm2048 FM Radio Receiver"
+#define BCM2048_DRIVER_DESC "I2C driver for BCM2048 FM Radio Receiver"
+
+/* I2C Control Registers */
+#define BCM2048_I2C_FM_RDS_SYSTEM 0x00
+#define BCM2048_I2C_FM_CTRL 0x01
+#define BCM2048_I2C_RDS_CTRL0 0x02
+#define BCM2048_I2C_RDS_CTRL1 0x03
+#define BCM2048_I2C_FM_AUDIO_PAUSE 0x04
+#define BCM2048_I2C_FM_AUDIO_CTRL0 0x05
+#define BCM2048_I2C_FM_AUDIO_CTRL1 0x06
+#define BCM2048_I2C_FM_SEARCH_CTRL0 0x07
+#define BCM2048_I2C_FM_SEARCH_CTRL1 0x08
+#define BCM2048_I2C_FM_SEARCH_TUNE_MODE 0x09
+#define BCM2048_I2C_FM_FREQ0 0x0a
+#define BCM2048_I2C_FM_FREQ1 0x0b
+#define BCM2048_I2C_FM_AF_FREQ0 0x0c
+#define BCM2048_I2C_FM_AF_FREQ1 0x0d
+#define BCM2048_I2C_FM_CARRIER 0x0e
+#define BCM2048_I2C_FM_RSSI 0x0f
+#define BCM2048_I2C_FM_RDS_MASK0 0x10
+#define BCM2048_I2C_FM_RDS_MASK1 0x11
+#define BCM2048_I2C_FM_RDS_FLAG0 0x12
+#define BCM2048_I2C_FM_RDS_FLAG1 0x13
+#define BCM2048_I2C_RDS_WLINE 0x14
+#define BCM2048_I2C_RDS_BLKB_MATCH0 0x16
+#define BCM2048_I2C_RDS_BLKB_MATCH1 0x17
+#define BCM2048_I2C_RDS_BLKB_MASK0 0x18
+#define BCM2048_I2C_RDS_BLKB_MASK1 0x19
+#define BCM2048_I2C_RDS_PI_MATCH0 0x1a
+#define BCM2048_I2C_RDS_PI_MATCH1 0x1b
+#define BCM2048_I2C_RDS_PI_MASK0 0x1c
+#define BCM2048_I2C_RDS_PI_MASK1 0x1d
+#define BCM2048_I2C_SPARE1 0x20
+#define BCM2048_I2C_SPARE2 0x21
+#define BCM2048_I2C_FM_RDS_REV 0x28
+#define BCM2048_I2C_SLAVE_CONFIGURATION 0x29
+#define BCM2048_I2C_RDS_DATA 0x80
+#define BCM2048_I2C_FM_BEST_TUNE_MODE 0x90
+
+/* BCM2048_I2C_FM_RDS_SYSTEM */
+#define BCM2048_FM_ON 0x01
+#define BCM2048_RDS_ON 0x02
+
+/* BCM2048_I2C_FM_CTRL */
+#define BCM2048_BAND_SELECT 0x01
+#define BCM2048_STEREO_MONO_AUTO_SELECT 0x02
+#define BCM2048_STEREO_MONO_MANUAL_SELECT 0x04
+#define BCM2048_STEREO_MONO_BLEND_SWITCH 0x08
+#define BCM2048_HI_LO_INJECTION 0x10
+
+/* BCM2048_I2C_RDS_CTRL0 */
+#define BCM2048_RBDS_RDS_SELECT 0x01
+#define BCM2048_FLUSH_FIFO 0x02
+
+/* BCM2048_I2C_FM_AUDIO_PAUSE */
+#define BCM2048_AUDIO_PAUSE_RSSI_TRESH 0x0f
+#define BCM2048_AUDIO_PAUSE_DURATION 0xf0
+
+/* BCM2048_I2C_FM_AUDIO_CTRL0 */
+#define BCM2048_RF_MUTE 0x01
+#define BCM2048_MANUAL_MUTE 0x02
+#define BCM2048_DAC_OUTPUT_LEFT 0x04
+#define BCM2048_DAC_OUTPUT_RIGHT 0x08
+#define BCM2048_AUDIO_ROUTE_DAC 0x10
+#define BCM2048_AUDIO_ROUTE_I2S 0x20
+#define BCM2048_DE_EMPHASIS_SELECT 0x40
+#define BCM2048_AUDIO_BANDWIDTH_SELECT 0x80
+
+/* BCM2048_I2C_FM_SEARCH_CTRL0 */
+#define BCM2048_SEARCH_RSSI_THRESHOLD 0x7f
+#define BCM2048_SEARCH_DIRECTION 0x80
+
+/* BCM2048_I2C_FM_SEARCH_TUNE_MODE */
+#define BCM2048_FM_AUTO_SEARCH 0x03
+
+/* BCM2048_I2C_FM_RSSI */
+#define BCM2048_RSSI_VALUE 0xff
+
+/* BCM2048_I2C_FM_RDS_MASK0 */
+/* BCM2048_I2C_FM_RDS_MASK1 */
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED 0x01
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL 0x02
+#define BCM2048_FM_FLAG_RSSI_LOW 0x04
+#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH 0x08
+#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION 0x10
+#define BCM2048_FLAG_STEREO_DETECTED 0x20
+#define BCM2048_FLAG_STEREO_ACTIVE 0x40
+
+/* BCM2048_I2C_RDS_DATA */
+#define BCM2048_SLAVE_ADDRESS 0x3f
+#define BCM2048_SLAVE_ENABLE 0x80
+
+/* BCM2048_I2C_FM_BEST_TUNE_MODE */
+#define BCM2048_BEST_TUNE_MODE 0x80
+
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED 0x01
+#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL 0x02
+#define BCM2048_FM_FLAG_RSSI_LOW 0x04
+#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH 0x08
+#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION 0x10
+#define BCM2048_FLAG_STEREO_DETECTED 0x20
+#define BCM2048_FLAG_STEREO_ACTIVE 0x40
+
+#define BCM2048_RDS_FLAG_FIFO_WLINE 0x02
+#define BCM2048_RDS_FLAG_B_BLOCK_MATCH 0x08
+#define BCM2048_RDS_FLAG_SYNC_LOST 0x10
+#define BCM2048_RDS_FLAG_PI_MATCH 0x20
+
+#define BCM2048_RDS_MARK_END_BYTE0 0x7C
+#define BCM2048_RDS_MARK_END_BYTEN 0xFF
+
+#define BCM2048_FM_FLAGS_ALL (FM_FLAG_SEARCH_TUNE_FINISHED | \
+ FM_FLAG_SEARCH_TUNE_FAIL | \
+ FM_FLAG_RSSI_LOW | \
+ FM_FLAG_CARRIER_ERROR_HIGH | \
+ FM_FLAG_AUDIO_PAUSE_INDICATION | \
+ FLAG_STEREO_DETECTED | FLAG_STEREO_ACTIVE)
+
+#define BCM2048_RDS_FLAGS_ALL (RDS_FLAG_FIFO_WLINE | \
+ RDS_FLAG_B_BLOCK_MATCH | \
+ RDS_FLAG_SYNC_LOST | RDS_FLAG_PI_MATCH)
+
+#define BCM2048_DEFAULT_TIMEOUT 1500
+#define BCM2048_AUTO_SEARCH_TIMEOUT 3000
+
+
+#define BCM2048_FREQDEV_UNIT 10000
+#define BCM2048_FREQV4L2_MULTI 625
+#define dev_to_v4l2(f) ((f * BCM2048_FREQDEV_UNIT) / BCM2048_FREQV4L2_MULTI)
+#define v4l2_to_dev(f) ((f * BCM2048_FREQV4L2_MULTI) / BCM2048_FREQDEV_UNIT)
+
+#define msb(x) ((u8)((u16) x >> 8))
+#define lsb(x) ((u8)((u16) x & 0x00FF))
+#define compose_u16(msb, lsb) (((u16)msb << 8) | lsb)
+
+#define BCM2048_DEFAULT_POWERING_DELAY 20
+#define BCM2048_DEFAULT_REGION 0x02
+#define BCM2048_DEFAULT_MUTE 0x01
+#define BCM2048_DEFAULT_RSSI_THRESHOLD 0x64
+#define BCM2048_DEFAULT_RDS_WLINE 0x7E
+
+#define BCM2048_FM_SEARCH_INACTIVE 0x00
+#define BCM2048_FM_PRE_SET_MODE 0x01
+#define BCM2048_FM_AUTO_SEARCH_MODE 0x02
+#define BCM2048_FM_AF_JUMP_MODE 0x03
+
+#define BCM2048_FREQUENCY_BASE 64000
+
+#define BCM2048_POWER_ON 0x01
+#define BCM2048_POWER_OFF 0x00
+
+#define BCM2048_ITEM_ENABLED 0x01
+#define BCM2048_SEARCH_DIRECTION_UP 0x01
+
+#define BCM2048_DE_EMPHASIS_75us 75
+#define BCM2048_DE_EMPHASIS_50us 50
+
+#define BCM2048_SCAN_FAIL 0x00
+#define BCM2048_SCAN_OK 0x01
+
+#define BCM2048_FREQ_ERROR_FLOOR -20
+#define BCM2048_FREQ_ERROR_ROOF 20
+
+/* -60 dB is reported as full signal strenght */
+#define BCM2048_RSSI_LEVEL_BASE -60
+#define BCM2048_RSSI_LEVEL_ROOF -100
+#define BCM2048_RSSI_LEVEL_ROOF_NEG 100
+#define BCM2048_SIGNAL_MULTIPLIER (0xFFFF / \
+ (BCM2048_RSSI_LEVEL_ROOF_NEG + \
+ BCM2048_RSSI_LEVEL_BASE))
+
+#define BCM2048_RDS_FIFO_DUPLE_SIZE 0x03
+#define BCM2048_RDS_CRC_MASK 0x0F
+#define BCM2048_RDS_CRC_NONE 0x00
+#define BCM2048_RDS_CRC_MAX_2BITS 0x04
+#define BCM2048_RDS_CRC_LEAST_2BITS 0x08
+#define BCM2048_RDS_CRC_UNRECOVARABLE 0x0C
+
+#define BCM2048_RDS_BLOCK_MASK 0xF0
+#define BCM2048_RDS_BLOCK_A 0x00
+#define BCM2048_RDS_BLOCK_B 0x10
+#define BCM2048_RDS_BLOCK_C 0x20
+#define BCM2048_RDS_BLOCK_D 0x30
+#define BCM2048_RDS_BLOCK_C_SCORED 0x40
+#define BCM2048_RDS_BLOCK_E 0x60
+
+#define BCM2048_RDS_RT 0x20
+#define BCM2048_RDS_PS 0x00
+
+#define BCM2048_RDS_GROUP_AB_MASK 0x08
+#define BCM2048_RDS_GROUP_A 0x00
+#define BCM2048_RDS_GROUP_B 0x08
+
+#define BCM2048_RDS_RT_AB_MASK 0x10
+#define BCM2048_RDS_RT_A 0x00
+#define BCM2048_RDS_RT_B 0x10
+#define BCM2048_RDS_RT_INDEX 0x0F
+
+#define BCM2048_RDS_PS_INDEX 0x03
+
+struct rds_info {
+ u16 rds_pi;
+#define BCM2048_MAX_RDS_RT (64 + 1)
+ u8 rds_rt[BCM2048_MAX_RDS_RT];
+ u8 rds_rt_group_b;
+ u8 rds_rt_ab;
+#define BCM2048_MAX_RDS_PS (8 + 1)
+ u8 rds_ps[BCM2048_MAX_RDS_PS];
+ u8 rds_ps_group;
+ u8 rds_ps_group_cnt;
+#define BCM2048_MAX_RDS_RADIO_TEXT 255
+ u8 radio_text[BCM2048_MAX_RDS_RADIO_TEXT + 3];
+ u8 text_len;
+};
+
+struct region_info {
+ u32 bottom_frequency;
+ u32 top_frequency;
+ u8 deemphasis;
+ u8 channel_spacing;
+ u8 region;
+};
+
+struct bcm2048_device {
+ struct i2c_client *client;
+ struct video_device *videodev;
+ struct work_struct work;
+ struct completion compl;
+ struct mutex mutex;
+ struct bcm2048_platform_data *platform_data;
+ struct rds_info rds_info;
+ struct region_info region_info;
+ u16 frequency;
+ u8 cache_fm_rds_system;
+ u8 cache_fm_ctrl;
+ u8 cache_fm_audio_ctrl0;
+ u8 cache_fm_search_ctrl0;
+ u8 power_state;
+ u8 rds_state;
+ u8 fifo_size;
+ u8 scan_state;
+ u8 mute_state;
+
+ /* for rds data device read */
+ wait_queue_head_t read_queue;
+ unsigned int users;
+ unsigned char rds_data_available;
+ unsigned int rd_index;
+};
+
+static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */
+module_param(radio_nr, int, 0);
+MODULE_PARM_DESC(radio_nr,
+ "Minor number for radio device (-1 ==> auto assign)");
+
+static struct region_info region_configs[] = {
+ /* USA */
+ {
+ .channel_spacing = 20,
+ .bottom_frequency = 87500,
+ .top_frequency = 108000,
+ .deemphasis = 75,
+ .region = 0,
+ },
+ /* Australia */
+ {
+ .channel_spacing = 20,
+ .bottom_frequency = 87500,
+ .top_frequency = 108000,
+ .deemphasis = 50,
+ .region = 1,
+ },
+ /* Europe */
+ {
+ .channel_spacing = 10,
+ .bottom_frequency = 87500,
+ .top_frequency = 108000,
+ .deemphasis = 50,
+ .region = 2,
+ },
+ /* Japan */
+ {
+ .channel_spacing = 10,
+ .bottom_frequency = 76000,
+ .top_frequency = 90000,
+ .deemphasis = 50,
+ .region = 3,
+ },
+ /* Japan wide band */
+ {
+ .channel_spacing = 10,
+ .bottom_frequency = 76000,
+ .top_frequency = 108000,
+ .deemphasis = 50,
+ .region = 4,
+ },
+};
+
+/*
+ * I2C Interface read / write
+ */
+static int bcm2048_send_command(struct bcm2048_device *bdev, unsigned int reg,
+ unsigned int value)
+{
+ struct i2c_client *client = bdev->client;
+ u8 data[2];
+
+ if (!bdev->power_state) {
+ dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
+ return -EIO;
+ }
+
+ data[0] = reg & 0xff;
+ data[1] = value & 0xff;
+
+ if (i2c_master_send(client, data, 2) == 2) {
+ return 0;
+ } else {
+ dev_err(&bdev->client->dev, "BCM I2C error!\n");
+ dev_err(&bdev->client->dev, "Is Bluetooth up and running?\n");
+ return -EIO;
+ }
+}
+
+static int bcm2048_recv_command(struct bcm2048_device *bdev, unsigned int reg,
+ u8 *value)
+{
+ struct i2c_client *client = bdev->client;
+
+ if (!bdev->power_state) {
+ dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
+ return -EIO;
+ }
+
+ value[0] = i2c_smbus_read_byte_data(client, reg & 0xff);
+
+ return 0;
+}
+
+static int bcm2048_recv_duples(struct bcm2048_device *bdev, unsigned int reg,
+ u8 *value, u8 duples)
+{
+ struct i2c_client *client = bdev->client;
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg[2];
+ u8 buf;
+
+ if (!bdev->power_state) {
+ dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
+ return -EIO;
+ }
+
+ buf = reg & 0xff;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = client->flags & I2C_M_TEN;
+ msg[0].len = 1;
+ msg[0].buf = &buf;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = client->flags & I2C_M_TEN;
+ msg[1].flags |= I2C_M_RD;
+ msg[1].len = duples;
+ msg[1].buf = value;
+
+ return i2c_transfer(adap, msg, 2);
+}
+
+/*
+ * BCM2048 - I2C register programming helpers
+ */
+static int bcm2048_set_power_state(struct bcm2048_device *bdev, u8 power)
+{
+ int err = 0;
+
+ mutex_lock(&bdev->mutex);
+
+ if (power) {
+ bdev->power_state = BCM2048_POWER_ON;
+ bdev->cache_fm_rds_system |= BCM2048_FM_ON;
+ } else {
+ bdev->cache_fm_rds_system &= ~BCM2048_FM_ON;
+ }
+
+ /*
+ * Warning! FM cannot be turned off because then
+ * the I2C communications get ruined!
+ * Comment off the "if (power)" when the chip works!
+ */
+ if (power)
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
+ bdev->cache_fm_rds_system);
+ msleep(BCM2048_DEFAULT_POWERING_DELAY);
+
+ if (!power)
+ bdev->power_state = BCM2048_POWER_OFF;
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_power_state(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err && (value & BCM2048_FM_ON))
+ return BCM2048_POWER_ON;
+
+ return err;
+}
+
+static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on)
+{
+ int err;
+ u8 flags;
+
+ bdev->cache_fm_rds_system &= ~BCM2048_RDS_ON;
+
+ if (rds_on) {
+ bdev->cache_fm_rds_system |= BCM2048_RDS_ON;
+ bdev->rds_state = BCM2048_RDS_ON;
+ flags = BCM2048_RDS_FLAG_FIFO_WLINE;
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
+ flags);
+ } else {
+ flags = 0;
+ bdev->rds_state = 0;
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
+ flags);
+ memset(&bdev->rds_info, 0, sizeof(bdev->rds_info));
+ }
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
+ bdev->cache_fm_rds_system);
+
+ return err;
+}
+
+static int bcm2048_get_rds_no_lock(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value);
+
+ if (!err && (value & BCM2048_RDS_ON))
+ return BCM2048_ITEM_ENABLED;
+
+ return err;
+}
+
+static int bcm2048_set_rds(struct bcm2048_device *bdev, u8 rds_on)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_set_rds_no_lock(bdev, rds_on);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds(struct bcm2048_device *bdev)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_get_rds_no_lock(bdev);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_pi(struct bcm2048_device *bdev)
+{
+ return bdev->rds_info.rds_pi;
+}
+
+static int bcm2048_set_fm_automatic_stereo_mono(struct bcm2048_device *bdev,
+ u8 enabled)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_ctrl &= ~BCM2048_STEREO_MONO_AUTO_SELECT;
+
+ if (enabled)
+ bdev->cache_fm_ctrl |= BCM2048_STEREO_MONO_AUTO_SELECT;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
+ bdev->cache_fm_ctrl);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_set_fm_hi_lo_injection(struct bcm2048_device *bdev,
+ u8 hi_lo)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_ctrl &= ~BCM2048_HI_LO_INJECTION;
+
+ if (hi_lo)
+ bdev->cache_fm_ctrl |= BCM2048_HI_LO_INJECTION;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
+ bdev->cache_fm_ctrl);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_hi_lo_injection(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CTRL, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err && (value & BCM2048_HI_LO_INJECTION))
+ return BCM2048_ITEM_ENABLED;
+
+ return err;
+}
+
+static int bcm2048_set_fm_frequency(struct bcm2048_device *bdev, u32 frequency)
+{
+ int err;
+
+ if (frequency < bdev->region_info.bottom_frequency ||
+ frequency > bdev->region_info.top_frequency)
+ return -EDOM;
+
+ frequency -= BCM2048_FREQUENCY_BASE;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ0, lsb(frequency));
+ err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ1,
+ msb(frequency));
+
+ if (!err)
+ bdev->frequency = frequency;
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_frequency(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ0, &lsb);
+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (err)
+ return err;
+
+ err = compose_u16(msb, lsb);
+ err += BCM2048_FREQUENCY_BASE;
+
+ return err;
+}
+
+static int bcm2048_set_fm_af_frequency(struct bcm2048_device *bdev,
+ u32 frequency)
+{
+ int err;
+
+ if (frequency < bdev->region_info.bottom_frequency ||
+ frequency > bdev->region_info.top_frequency)
+ return -EDOM;
+
+ frequency -= BCM2048_FREQUENCY_BASE;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ0,
+ lsb(frequency));
+ err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ1,
+ msb(frequency));
+ if (!err)
+ bdev->frequency = frequency;
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_af_frequency(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ0, &lsb);
+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (err)
+ return err;
+
+ err = compose_u16(msb, lsb);
+ err += BCM2048_FREQUENCY_BASE;
+
+ return err;
+}
+
+static int bcm2048_set_fm_deemphasis(struct bcm2048_device *bdev, int d)
+{
+ int err;
+ u8 deemphasis;
+
+ if (d == BCM2048_DE_EMPHASIS_75us)
+ deemphasis = BCM2048_DE_EMPHASIS_SELECT;
+ else
+ deemphasis = 0;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_audio_ctrl0 &= ~BCM2048_DE_EMPHASIS_SELECT;
+ bdev->cache_fm_audio_ctrl0 |= deemphasis;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+ bdev->cache_fm_audio_ctrl0);
+
+ if (!err)
+ bdev->region_info.deemphasis = d;
+
+ mutex_unlock(&bdev->mutex);
+
+ return err;
+}
+
+static int bcm2048_get_fm_deemphasis(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err) {
+ if (value & BCM2048_DE_EMPHASIS_SELECT)
+ return BCM2048_DE_EMPHASIS_75us;
+ else
+ return BCM2048_DE_EMPHASIS_50us;
+ }
+
+ return err;
+}
+
+static int bcm2048_set_region(struct bcm2048_device *bdev, u8 region)
+{
+ int err;
+ u32 new_frequency = 0;
+
+ if (region > ARRAY_SIZE(region_configs))
+ return -EINVAL;
+
+ mutex_lock(&bdev->mutex);
+ bdev->region_info = region_configs[region];
+ mutex_unlock(&bdev->mutex);
+
+ if (bdev->frequency < region_configs[region].bottom_frequency ||
+ bdev->frequency > region_configs[region].top_frequency)
+ new_frequency = region_configs[region].bottom_frequency;
+
+ if (new_frequency > 0) {
+ err = bcm2048_set_fm_frequency(bdev, new_frequency);
+
+ if (err)
+ goto done;
+ }
+
+ err = bcm2048_set_fm_deemphasis(bdev,
+ region_configs[region].deemphasis);
+
+done:
+ return err;
+}
+
+static int bcm2048_get_region(struct bcm2048_device *bdev)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+ err = bdev->region_info.region;
+ mutex_unlock(&bdev->mutex);
+
+ return err;
+}
+
+static int bcm2048_set_mute(struct bcm2048_device *bdev, u16 mute)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE);
+
+ if (mute)
+ bdev->cache_fm_audio_ctrl0 |= (BCM2048_RF_MUTE |
+ BCM2048_MANUAL_MUTE);
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+ bdev->cache_fm_audio_ctrl0);
+
+ if (!err)
+ bdev->mute_state = mute;
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_mute(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ if (bdev->power_state) {
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+ &value);
+ if (!err)
+ err = value & (BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE);
+ } else {
+ err = bdev->mute_state;
+ }
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_set_audio_route(struct bcm2048_device *bdev, u8 route)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ route &= (BCM2048_AUDIO_ROUTE_DAC | BCM2048_AUDIO_ROUTE_I2S);
+ bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_AUDIO_ROUTE_DAC |
+ BCM2048_AUDIO_ROUTE_I2S);
+ bdev->cache_fm_audio_ctrl0 |= route;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+ bdev->cache_fm_audio_ctrl0);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_audio_route(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value & (BCM2048_AUDIO_ROUTE_DAC |
+ BCM2048_AUDIO_ROUTE_I2S);
+
+ return err;
+}
+
+static int bcm2048_set_dac_output(struct bcm2048_device *bdev, u8 channels)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_DAC_OUTPUT_LEFT |
+ BCM2048_DAC_OUTPUT_RIGHT);
+ bdev->cache_fm_audio_ctrl0 |= channels;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
+ bdev->cache_fm_audio_ctrl0);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_dac_output(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value & (BCM2048_DAC_OUTPUT_LEFT |
+ BCM2048_DAC_OUTPUT_RIGHT);
+
+ return err;
+}
+
+static int bcm2048_set_fm_search_rssi_threshold(struct bcm2048_device *bdev,
+ u8 threshold)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ threshold &= BCM2048_SEARCH_RSSI_THRESHOLD;
+ bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_RSSI_THRESHOLD;
+ bdev->cache_fm_search_ctrl0 |= threshold;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0,
+ bdev->cache_fm_search_ctrl0);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_search_rssi_threshold(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value & BCM2048_SEARCH_RSSI_THRESHOLD;
+
+ return err;
+}
+
+static int bcm2048_set_fm_search_mode_direction(struct bcm2048_device *bdev,
+ u8 direction)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_DIRECTION;
+
+ if (direction)
+ bdev->cache_fm_search_ctrl0 |= BCM2048_SEARCH_DIRECTION;
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0,
+ bdev->cache_fm_search_ctrl0);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_search_mode_direction(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err && (value & BCM2048_SEARCH_DIRECTION))
+ return BCM2048_SEARCH_DIRECTION_UP;
+
+ return err;
+}
+
+static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev,
+ u8 mode)
+{
+ int err, timeout, restart_rds = 0;
+ u8 value, flags;
+
+ value = mode & BCM2048_FM_AUTO_SEARCH;
+
+ flags = BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED |
+ BCM2048_FM_FLAG_SEARCH_TUNE_FAIL;
+
+ mutex_lock(&bdev->mutex);
+
+ /*
+ * If RDS is enabled, and frequency is changed, RDS quits working.
+ * Thus, always restart RDS if it's enabled. Moreover, RDS must
+ * not be enabled while changing the frequency because it can
+ * provide a race to the mutex from the workqueue handler if RDS
+ * IRQ occurs while waiting for frequency changed IRQ.
+ */
+ if (bcm2048_get_rds_no_lock(bdev)) {
+ err = bcm2048_set_rds_no_lock(bdev, 0);
+ if (err)
+ goto unlock;
+ restart_rds = 1;
+ }
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK0, flags);
+
+ if (err)
+ goto unlock;
+
+ bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE, value);
+
+ if (mode != BCM2048_FM_AUTO_SEARCH_MODE)
+ timeout = BCM2048_DEFAULT_TIMEOUT;
+ else
+ timeout = BCM2048_AUTO_SEARCH_TIMEOUT;
+
+ if (!wait_for_completion_timeout(&bdev->compl,
+ msecs_to_jiffies(timeout)))
+ dev_err(&bdev->client->dev, "IRQ timeout.\n");
+
+ if (value)
+ if (!bdev->scan_state)
+ err = -EIO;
+
+unlock:
+ if (restart_rds)
+ err |= bcm2048_set_rds_no_lock(bdev, 1);
+
+ mutex_unlock(&bdev->mutex);
+
+ return err;
+}
+
+static int bcm2048_get_fm_search_tune_mode(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE,
+ &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value & BCM2048_FM_AUTO_SEARCH;
+
+ return err;
+}
+
+static int bcm2048_set_rds_b_block_mask(struct bcm2048_device *bdev, u16 mask)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MASK0, lsb(mask));
+ err |= bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MASK1, msb(mask));
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_b_block_mask(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MASK0, &lsb);
+ err |= bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MASK1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(msb, lsb);
+
+ return err;
+}
+
+static int bcm2048_set_rds_b_block_match(struct bcm2048_device *bdev,
+ u16 match)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MATCH0, lsb(match));
+ err |= bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MATCH1, msb(match));
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_b_block_match(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MATCH0, &lsb);
+ err |= bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_BLKB_MATCH1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(msb, lsb);
+
+ return err;
+}
+
+static int bcm2048_set_rds_pi_mask(struct bcm2048_device *bdev, u16 mask)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_PI_MASK0, lsb(mask));
+ err |= bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_PI_MASK1, msb(mask));
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_pi_mask(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_PI_MASK0, &lsb);
+ err |= bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_PI_MASK1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(msb, lsb);
+
+ return err;
+}
+
+static int bcm2048_set_rds_pi_match(struct bcm2048_device *bdev, u16 match)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_PI_MATCH0, lsb(match));
+ err |= bcm2048_send_command(bdev,
+ BCM2048_I2C_RDS_PI_MATCH1, msb(match));
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_pi_match(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 lsb, msb;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_PI_MATCH0, &lsb);
+ err |= bcm2048_recv_command(bdev,
+ BCM2048_I2C_RDS_PI_MATCH1, &msb);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(msb, lsb);
+
+ return err;
+}
+
+static int bcm2048_set_fm_rds_mask(struct bcm2048_device *bdev, u16 mask)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev,
+ BCM2048_I2C_FM_RDS_MASK0, lsb(mask));
+ err |= bcm2048_send_command(bdev,
+ BCM2048_I2C_FM_RDS_MASK1, msb(mask));
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_rds_mask(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value0, value1;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK0, &value0);
+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK1, &value1);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(value1, value0);
+
+ return err;
+}
+
+static int bcm2048_get_fm_rds_flags(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value0, value1;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &value0);
+ err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &value1);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return compose_u16(value1, value0);
+
+ return err;
+}
+
+static int bcm2048_get_region_bottom_frequency(struct bcm2048_device *bdev)
+{
+ return bdev->region_info.bottom_frequency;
+}
+
+static int bcm2048_get_region_top_frequency(struct bcm2048_device *bdev)
+{
+ return bdev->region_info.top_frequency;
+}
+
+static int bcm2048_set_fm_best_tune_mode(struct bcm2048_device *bdev, u8 mode)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ /* Perform read as the manual indicates */
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
+ &value);
+ value &= ~BCM2048_BEST_TUNE_MODE;
+
+ if (mode)
+ value |= BCM2048_BEST_TUNE_MODE;
+ err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
+ value);
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_fm_best_tune_mode(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
+ &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err && (value & BCM2048_BEST_TUNE_MODE))
+ return BCM2048_ITEM_ENABLED;
+
+ return err;
+}
+
+static int bcm2048_get_fm_carrier_error(struct bcm2048_device *bdev)
+{
+ int err = 0;
+ s8 value;
+
+ mutex_lock(&bdev->mutex);
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CARRIER, &value);
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value;
+
+ return err;
+}
+
+static int bcm2048_get_fm_rssi(struct bcm2048_device *bdev)
+{
+ int err;
+ s8 value;
+
+ mutex_lock(&bdev->mutex);
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RSSI, &value);
+ mutex_unlock(&bdev->mutex);
+
+ if (!err)
+ return value;
+
+ return err;
+}
+
+static int bcm2048_set_rds_wline(struct bcm2048_device *bdev, u8 wline)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_send_command(bdev, BCM2048_I2C_RDS_WLINE, wline);
+
+ if (!err)
+ bdev->fifo_size = wline;
+
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_wline(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 value;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_RDS_WLINE, &value);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err) {
+ bdev->fifo_size = value;
+ return value;
+ }
+
+ return err;
+}
+
+static int bcm2048_checkrev(struct bcm2048_device *bdev)
+{
+ int err;
+ u8 version;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_REV, &version);
+
+ mutex_unlock(&bdev->mutex);
+
+ if (!err) {
+ dev_info(&bdev->client->dev, "BCM2048 Version 0x%x\n",
+ version);
+ return version;
+ }
+
+ return err;
+}
+
+static int bcm2048_get_rds_rt(struct bcm2048_device *bdev, char *data)
+{
+ int err = 0, i, j = 0, ce = 0, cr = 0;
+ char data_buffer[BCM2048_MAX_RDS_RT+1];
+
+ mutex_lock(&bdev->mutex);
+
+ if (!bdev->rds_info.text_len) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ for (i = 0; i < BCM2048_MAX_RDS_RT; i++) {
+ if (bdev->rds_info.rds_rt[i]) {
+ ce = i;
+ /* Skip the carriage return */
+ if (bdev->rds_info.rds_rt[i] != 0x0d) {
+ data_buffer[j++] = bdev->rds_info.rds_rt[i];
+ } else {
+ cr = i;
+ break;
+ }
+ }
+ }
+
+ if (j <= BCM2048_MAX_RDS_RT)
+ data_buffer[j] = 0;
+
+ for (i = 0; i < BCM2048_MAX_RDS_RT; i++) {
+ if (!bdev->rds_info.rds_rt[i]) {
+ if (cr && (i < cr)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+ if (i < ce) {
+ if (cr && (i >= cr))
+ break;
+ err = -EBUSY;
+ goto unlock;
+ }
+ }
+ }
+
+ memcpy(data, data_buffer, sizeof(data_buffer));
+
+unlock:
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static int bcm2048_get_rds_ps(struct bcm2048_device *bdev, char *data)
+{
+ int err = 0, i, j = 0;
+ char data_buffer[BCM2048_MAX_RDS_PS+1];
+
+ mutex_lock(&bdev->mutex);
+
+ if (!bdev->rds_info.text_len) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ for (i = 0; i < BCM2048_MAX_RDS_PS; i++) {
+ if (bdev->rds_info.rds_ps[i]) {
+ data_buffer[j++] = bdev->rds_info.rds_ps[i];
+ } else {
+ if (i < (BCM2048_MAX_RDS_PS - 1)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+ }
+ }
+
+ if (j <= BCM2048_MAX_RDS_PS)
+ data_buffer[j] = 0;
+
+ memcpy(data, data_buffer, sizeof(data_buffer));
+
+unlock:
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+static void bcm2048_parse_rds_pi(struct bcm2048_device *bdev)
+{
+ int i, cnt = 0;
+ u16 pi;
+
+ for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
+
+ /* Block A match, only data without crc errors taken */
+ if (bdev->rds_info.radio_text[i] == BCM2048_RDS_BLOCK_A) {
+
+ pi = ((bdev->rds_info.radio_text[i+1] << 8) +
+ bdev->rds_info.radio_text[i+2]);
+
+ if (!bdev->rds_info.rds_pi) {
+ bdev->rds_info.rds_pi = pi;
+ return;
+ }
+ if (pi != bdev->rds_info.rds_pi) {
+ cnt++;
+ if (cnt > 3) {
+ bdev->rds_info.rds_pi = pi;
+ cnt = 0;
+ }
+ } else {
+ cnt = 0;
+ }
+ }
+ }
+}
+
+static int bcm2048_rds_block_crc(struct bcm2048_device *bdev, int i)
+{
+ return bdev->rds_info.radio_text[i] & BCM2048_RDS_CRC_MASK;
+}
+
+static void bcm2048_parse_rds_rt_block(struct bcm2048_device *bdev, int i,
+ int index, int crc)
+{
+ /* Good data will overwrite poor data */
+ if (crc) {
+ if (!bdev->rds_info.rds_rt[index])
+ bdev->rds_info.rds_rt[index] =
+ bdev->rds_info.radio_text[i+1];
+ if (!bdev->rds_info.rds_rt[index+1])
+ bdev->rds_info.rds_rt[index+1] =
+ bdev->rds_info.radio_text[i+2];
+ } else {
+ bdev->rds_info.rds_rt[index] = bdev->rds_info.radio_text[i+1];
+ bdev->rds_info.rds_rt[index+1] =
+ bdev->rds_info.radio_text[i+2];
+ }
+}
+
+static int bcm2048_parse_rt_match_b(struct bcm2048_device *bdev, int i)
+{
+ int crc, rt_id, rt_group_b, rt_ab, index = 0;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return -EIO;
+
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_B) {
+
+ rt_id = (bdev->rds_info.radio_text[i+1] &
+ BCM2048_RDS_BLOCK_MASK);
+ rt_group_b = bdev->rds_info.radio_text[i+1] &
+ BCM2048_RDS_GROUP_AB_MASK;
+ rt_ab = bdev->rds_info.radio_text[i+2] &
+ BCM2048_RDS_RT_AB_MASK;
+
+ if (rt_group_b != bdev->rds_info.rds_rt_group_b) {
+ memset(bdev->rds_info.rds_rt, 0,
+ sizeof(bdev->rds_info.rds_rt));
+ bdev->rds_info.rds_rt_group_b = rt_group_b;
+ }
+
+ if (rt_id == BCM2048_RDS_RT) {
+ /* A to B or (vice versa), means: clear screen */
+ if (rt_ab != bdev->rds_info.rds_rt_ab) {
+ memset(bdev->rds_info.rds_rt, 0,
+ sizeof(bdev->rds_info.rds_rt));
+ bdev->rds_info.rds_rt_ab = rt_ab;
+ }
+
+ index = bdev->rds_info.radio_text[i+2] &
+ BCM2048_RDS_RT_INDEX;
+
+ if (bdev->rds_info.rds_rt_group_b)
+ index <<= 1;
+ else
+ index <<= 2;
+
+ return index;
+ }
+ }
+
+ return -EIO;
+}
+
+static int bcm2048_parse_rt_match_c(struct bcm2048_device *bdev, int i,
+ int index)
+{
+ int crc;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return 0;
+
+ BUG_ON((index+2) >= BCM2048_MAX_RDS_RT);
+
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_C) {
+ if (bdev->rds_info.rds_rt_group_b)
+ return 1;
+ bcm2048_parse_rds_rt_block(bdev, i, index, crc);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bcm2048_parse_rt_match_d(struct bcm2048_device *bdev, int i,
+ int index)
+{
+ int crc;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return;
+
+ BUG_ON((index+4) >= BCM2048_MAX_RDS_RT);
+
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_D)
+ bcm2048_parse_rds_rt_block(bdev, i, index+2, crc);
+}
+
+static int bcm2048_parse_rds_rt(struct bcm2048_device *bdev)
+{
+ int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0;
+
+ for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
+
+ if (match_b) {
+ match_b = 0;
+ index = bcm2048_parse_rt_match_b(bdev, i);
+ if (index >= 0 && index <= (BCM2048_MAX_RDS_RT - 5))
+ match_c = 1;
+ continue;
+ } else if (match_c) {
+ match_c = 0;
+ if (bcm2048_parse_rt_match_c(bdev, i, index))
+ match_d = 1;
+ continue;
+ } else if (match_d) {
+ match_d = 0;
+ bcm2048_parse_rt_match_d(bdev, i, index);
+ continue;
+ }
+
+ /* Skip erroneous blocks due to messed up A block altogether */
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK)
+ == BCM2048_RDS_BLOCK_A) {
+ crc = bcm2048_rds_block_crc(bdev, i);
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ continue;
+ /* Syncronize to a good RDS PI */
+ if (((bdev->rds_info.radio_text[i+1] << 8) +
+ bdev->rds_info.radio_text[i+2]) ==
+ bdev->rds_info.rds_pi)
+ match_b = 1;
+ }
+ }
+
+ return 0;
+}
+
+static void bcm2048_parse_rds_ps_block(struct bcm2048_device *bdev, int i,
+ int index, int crc)
+{
+ /* Good data will overwrite poor data */
+ if (crc) {
+ if (!bdev->rds_info.rds_ps[index])
+ bdev->rds_info.rds_ps[index] =
+ bdev->rds_info.radio_text[i+1];
+ if (!bdev->rds_info.rds_ps[index+1])
+ bdev->rds_info.rds_ps[index+1] =
+ bdev->rds_info.radio_text[i+2];
+ } else {
+ bdev->rds_info.rds_ps[index] = bdev->rds_info.radio_text[i+1];
+ bdev->rds_info.rds_ps[index+1] =
+ bdev->rds_info.radio_text[i+2];
+ }
+}
+
+static int bcm2048_parse_ps_match_c(struct bcm2048_device *bdev, int i,
+ int index)
+{
+ int crc;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return 0;
+
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_C)
+ return 1;
+
+ return 0;
+}
+
+static void bcm2048_parse_ps_match_d(struct bcm2048_device *bdev, int i,
+ int index)
+{
+ int crc;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return;
+
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_D)
+ bcm2048_parse_rds_ps_block(bdev, i, index, crc);
+}
+
+static int bcm2048_parse_ps_match_b(struct bcm2048_device *bdev, int i)
+{
+ int crc, index, ps_id, ps_group;
+
+ crc = bcm2048_rds_block_crc(bdev, i);
+
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ return -EIO;
+
+ /* Block B Radio PS match */
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
+ BCM2048_RDS_BLOCK_B) {
+ ps_id = bdev->rds_info.radio_text[i+1] &
+ BCM2048_RDS_BLOCK_MASK;
+ ps_group = bdev->rds_info.radio_text[i+1] &
+ BCM2048_RDS_GROUP_AB_MASK;
+
+ /*
+ * Poor RSSI will lead to RDS data corruption
+ * So using 3 (same) sequential values to justify major changes
+ */
+ if (ps_group != bdev->rds_info.rds_ps_group) {
+ if (crc == BCM2048_RDS_CRC_NONE) {
+ bdev->rds_info.rds_ps_group_cnt++;
+ if (bdev->rds_info.rds_ps_group_cnt > 2) {
+ bdev->rds_info.rds_ps_group = ps_group;
+ bdev->rds_info.rds_ps_group_cnt = 0;
+ dev_err(&bdev->client->dev,
+ "RDS PS Group change!\n");
+ } else {
+ return -EIO;
+ }
+ } else {
+ bdev->rds_info.rds_ps_group_cnt = 0;
+ }
+ }
+
+ if (ps_id == BCM2048_RDS_PS) {
+ index = bdev->rds_info.radio_text[i+2] &
+ BCM2048_RDS_PS_INDEX;
+ index <<= 1;
+ return index;
+ }
+ }
+
+ return -EIO;
+}
+
+static void bcm2048_parse_rds_ps(struct bcm2048_device *bdev)
+{
+ int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0;
+
+ for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
+
+ if (match_b) {
+ match_b = 0;
+ index = bcm2048_parse_ps_match_b(bdev, i);
+ if (index >= 0 && index < (BCM2048_MAX_RDS_PS - 1))
+ match_c = 1;
+ continue;
+ } else if (match_c) {
+ match_c = 0;
+ if (bcm2048_parse_ps_match_c(bdev, i, index))
+ match_d = 1;
+ continue;
+ } else if (match_d) {
+ match_d = 0;
+ bcm2048_parse_ps_match_d(bdev, i, index);
+ continue;
+ }
+
+ /* Skip erroneous blocks due to messed up A block altogether */
+ if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK)
+ == BCM2048_RDS_BLOCK_A) {
+ crc = bcm2048_rds_block_crc(bdev, i);
+ if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
+ continue;
+ /* Syncronize to a good RDS PI */
+ if (((bdev->rds_info.radio_text[i+1] << 8) +
+ bdev->rds_info.radio_text[i+2]) ==
+ bdev->rds_info.rds_pi)
+ match_b = 1;
+ }
+ }
+}
+
+static void bcm2048_rds_fifo_receive(struct bcm2048_device *bdev)
+{
+ int err;
+
+ mutex_lock(&bdev->mutex);
+
+ err = bcm2048_recv_duples(bdev, BCM2048_I2C_RDS_DATA,
+ bdev->rds_info.radio_text, bdev->fifo_size);
+ if (err != 2) {
+ dev_err(&bdev->client->dev, "RDS Read problem\n");
+ mutex_unlock(&bdev->mutex);
+ return;
+ }
+
+ bdev->rds_info.text_len = bdev->fifo_size;
+
+ bcm2048_parse_rds_pi(bdev);
+ bcm2048_parse_rds_rt(bdev);
+ bcm2048_parse_rds_ps(bdev);
+
+ mutex_unlock(&bdev->mutex);
+
+ wake_up_interruptible(&bdev->read_queue);
+}
+
+static int bcm2048_get_rds_data(struct bcm2048_device *bdev, char *data)
+{
+ int err = 0, i, p = 0;
+ char *data_buffer;
+
+ mutex_lock(&bdev->mutex);
+
+ if (!bdev->rds_info.text_len) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ data_buffer = kzalloc(BCM2048_MAX_RDS_RADIO_TEXT*5, GFP_KERNEL);
+ if (!data_buffer) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ for (i = 0; i < bdev->rds_info.text_len; i++) {
+ p += sprintf(data_buffer+p, "%x ",
+ bdev->rds_info.radio_text[i]);
+ }
+
+ memcpy(data, data_buffer, p);
+ kfree(data_buffer);
+
+unlock:
+ mutex_unlock(&bdev->mutex);
+ return err;
+}
+
+/*
+ * BCM2048 default initialization sequence
+ */
+static int bcm2048_init(struct bcm2048_device *bdev)
+{
+ int err;
+
+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON);
+ if (err < 0)
+ goto exit;
+
+ err = bcm2048_set_audio_route(bdev, BCM2048_AUDIO_ROUTE_DAC);
+ if (err < 0)
+ goto exit;
+
+ err = bcm2048_set_dac_output(bdev, BCM2048_DAC_OUTPUT_LEFT |
+ BCM2048_DAC_OUTPUT_RIGHT);
+
+exit:
+ return err;
+}
+
+/*
+ * BCM2048 default deinitialization sequence
+ */
+static int bcm2048_deinit(struct bcm2048_device *bdev)
+{
+ int err;
+
+ err = bcm2048_set_audio_route(bdev, 0);
+ if (err < 0)
+ goto exit;
+
+ err = bcm2048_set_dac_output(bdev, 0);
+ if (err < 0)
+ goto exit;
+
+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
+ if (err < 0)
+ goto exit;
+
+exit:
+ return err;
+}
+
+/*
+ * BCM2048 probe sequence
+ */
+static int bcm2048_probe(struct bcm2048_device *bdev)
+{
+ int err;
+
+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_checkrev(bdev);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_set_mute(bdev, BCM2048_DEFAULT_MUTE);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_set_region(bdev, BCM2048_DEFAULT_REGION);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_set_fm_search_rssi_threshold(bdev,
+ BCM2048_DEFAULT_RSSI_THRESHOLD);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_set_fm_automatic_stereo_mono(bdev, BCM2048_ITEM_ENABLED);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_get_rds_wline(bdev);
+ if (err < BCM2048_DEFAULT_RDS_WLINE)
+ err = bcm2048_set_rds_wline(bdev, BCM2048_DEFAULT_RDS_WLINE);
+ if (err < 0)
+ goto unlock;
+
+ err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
+
+ init_waitqueue_head(&bdev->read_queue);
+ bdev->rds_data_available = 0;
+ bdev->rd_index = 0;
+ bdev->users = 0;
+
+unlock:
+ return err;
+}
+
+/*
+ * BCM2048 workqueue handler
+ */
+static void bcm2048_work(struct work_struct *work)
+{
+ struct bcm2048_device *bdev;
+ u8 flag_lsb, flag_msb, flags;
+
+ bdev = container_of(work, struct bcm2048_device, work);
+ bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &flag_lsb);
+ bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &flag_msb);
+
+ if (flag_lsb & (BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED |
+ BCM2048_FM_FLAG_SEARCH_TUNE_FAIL)) {
+
+ if (flag_lsb & BCM2048_FM_FLAG_SEARCH_TUNE_FAIL)
+ bdev->scan_state = BCM2048_SCAN_FAIL;
+ else
+ bdev->scan_state = BCM2048_SCAN_OK;
+
+ complete(&bdev->compl);
+ }
+
+ if (flag_msb & BCM2048_RDS_FLAG_FIFO_WLINE) {
+ bcm2048_rds_fifo_receive(bdev);
+ if (bdev->rds_state) {
+ flags = BCM2048_RDS_FLAG_FIFO_WLINE;
+ bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
+ flags);
+ }
+ bdev->rds_data_available = 1;
+ bdev->rd_index = 0; /* new data, new start */
+ }
+}
+
+/*
+ * BCM2048 interrupt handler
+ */
+static irqreturn_t bcm2048_handler(int irq, void *dev)
+{
+ struct bcm2048_device *bdev = dev;
+
+ dev_dbg(&bdev->client->dev, "IRQ called, queuing work\n");
+ if (bdev->power_state)
+ schedule_work(&bdev->work);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * BCM2048 sysfs interface definitions
+ */
+#define property_write(prop, type, mask, check) \
+static ssize_t bcm2048_##prop##_write(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+{ \
+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \
+ type value; \
+ int err; \
+ \
+ if (!bdev) \
+ return -ENODEV; \
+ \
+ sscanf(buf, mask, &value); \
+ \
+ if (check) \
+ return -EDOM; \
+ \
+ err = bcm2048_set_##prop(bdev, value); \
+ \
+ return err < 0 ? err : count; \
+}
+
+#define property_read(prop, size, mask) \
+static ssize_t bcm2048_##prop##_read(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \
+ int value; \
+ \
+ if (!bdev) \
+ return -ENODEV; \
+ \
+ value = bcm2048_get_##prop(bdev); \
+ \
+ if (value >= 0) \
+ value = sprintf(buf, mask "\n", value); \
+ \
+ return value; \
+}
+
+#define property_signed_read(prop, size, mask) \
+static ssize_t bcm2048_##prop##_read(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \
+ size value; \
+ \
+ if (!bdev) \
+ return -ENODEV; \
+ \
+ value = bcm2048_get_##prop(bdev); \
+ \
+ value = sprintf(buf, mask "\n", value); \
+ \
+ return value; \
+}
+
+#define DEFINE_SYSFS_PROPERTY(prop, signal, size, mask, check) \
+property_write(prop, signal size, mask, check) \
+property_read(prop, size, mask)
+
+#define property_str_read(prop, size) \
+static ssize_t bcm2048_##prop##_read(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct bcm2048_device *bdev = dev_get_drvdata(dev); \
+ int count; \
+ u8 *out; \
+ \
+ if (!bdev) \
+ return -ENODEV; \
+ \
+ out = kzalloc(size + 1, GFP_KERNEL); \
+ if (!out) \
+ return -ENOMEM; \
+ \
+ bcm2048_get_##prop(bdev, out); \
+ count = sprintf(buf, "%s\n", out); \
+ \
+ kfree(out); \
+ \
+ return count; \
+}
+
+DEFINE_SYSFS_PROPERTY(power_state, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(mute, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(audio_route, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(dac_output, unsigned, int, "%u", 0)
+
+DEFINE_SYSFS_PROPERTY(fm_hi_lo_injection, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_frequency, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_af_frequency, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_deemphasis, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_rds_mask, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_best_tune_mode, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_rssi_threshold, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_mode_direction, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_tune_mode, unsigned, int, "%u", value > 3)
+
+DEFINE_SYSFS_PROPERTY(rds, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_b_block_mask, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_b_block_match, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_pi_mask, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_pi_match, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_wline, unsigned, int, "%u", 0)
+property_read(rds_pi, unsigned int, "%x")
+property_str_read(rds_rt, (BCM2048_MAX_RDS_RT + 1))
+property_str_read(rds_ps, (BCM2048_MAX_RDS_PS + 1))
+
+property_read(fm_rds_flags, unsigned int, "%u")
+property_str_read(rds_data, BCM2048_MAX_RDS_RADIO_TEXT*5)
+
+property_read(region_bottom_frequency, unsigned int, "%u")
+property_read(region_top_frequency, unsigned int, "%u")
+property_signed_read(fm_carrier_error, int, "%d")
+property_signed_read(fm_rssi, int, "%d")
+DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0)
+
+static struct device_attribute attrs[] = {
+ __ATTR(power_state, S_IRUGO | S_IWUSR, bcm2048_power_state_read,
+ bcm2048_power_state_write),
+ __ATTR(mute, S_IRUGO | S_IWUSR, bcm2048_mute_read,
+ bcm2048_mute_write),
+ __ATTR(audio_route, S_IRUGO | S_IWUSR, bcm2048_audio_route_read,
+ bcm2048_audio_route_write),
+ __ATTR(dac_output, S_IRUGO | S_IWUSR, bcm2048_dac_output_read,
+ bcm2048_dac_output_write),
+ __ATTR(fm_hi_lo_injection, S_IRUGO | S_IWUSR,
+ bcm2048_fm_hi_lo_injection_read,
+ bcm2048_fm_hi_lo_injection_write),
+ __ATTR(fm_frequency, S_IRUGO | S_IWUSR, bcm2048_fm_frequency_read,
+ bcm2048_fm_frequency_write),
+ __ATTR(fm_af_frequency, S_IRUGO | S_IWUSR,
+ bcm2048_fm_af_frequency_read,
+ bcm2048_fm_af_frequency_write),
+ __ATTR(fm_deemphasis, S_IRUGO | S_IWUSR, bcm2048_fm_deemphasis_read,
+ bcm2048_fm_deemphasis_write),
+ __ATTR(fm_rds_mask, S_IRUGO | S_IWUSR, bcm2048_fm_rds_mask_read,
+ bcm2048_fm_rds_mask_write),
+ __ATTR(fm_best_tune_mode, S_IRUGO | S_IWUSR,
+ bcm2048_fm_best_tune_mode_read,
+ bcm2048_fm_best_tune_mode_write),
+ __ATTR(fm_search_rssi_threshold, S_IRUGO | S_IWUSR,
+ bcm2048_fm_search_rssi_threshold_read,
+ bcm2048_fm_search_rssi_threshold_write),
+ __ATTR(fm_search_mode_direction, S_IRUGO | S_IWUSR,
+ bcm2048_fm_search_mode_direction_read,
+ bcm2048_fm_search_mode_direction_write),
+ __ATTR(fm_search_tune_mode, S_IRUGO | S_IWUSR,
+ bcm2048_fm_search_tune_mode_read,
+ bcm2048_fm_search_tune_mode_write),
+ __ATTR(rds, S_IRUGO | S_IWUSR, bcm2048_rds_read,
+ bcm2048_rds_write),
+ __ATTR(rds_b_block_mask, S_IRUGO | S_IWUSR,
+ bcm2048_rds_b_block_mask_read,
+ bcm2048_rds_b_block_mask_write),
+ __ATTR(rds_b_block_match, S_IRUGO | S_IWUSR,
+ bcm2048_rds_b_block_match_read,
+ bcm2048_rds_b_block_match_write),
+ __ATTR(rds_pi_mask, S_IRUGO | S_IWUSR, bcm2048_rds_pi_mask_read,
+ bcm2048_rds_pi_mask_write),
+ __ATTR(rds_pi_match, S_IRUGO | S_IWUSR, bcm2048_rds_pi_match_read,
+ bcm2048_rds_pi_match_write),
+ __ATTR(rds_wline, S_IRUGO | S_IWUSR, bcm2048_rds_wline_read,
+ bcm2048_rds_wline_write),
+ __ATTR(rds_pi, S_IRUGO, bcm2048_rds_pi_read, NULL),
+ __ATTR(rds_rt, S_IRUGO, bcm2048_rds_rt_read, NULL),
+ __ATTR(rds_ps, S_IRUGO, bcm2048_rds_ps_read, NULL),
+ __ATTR(fm_rds_flags, S_IRUGO, bcm2048_fm_rds_flags_read, NULL),
+ __ATTR(region_bottom_frequency, S_IRUGO,
+ bcm2048_region_bottom_frequency_read, NULL),
+ __ATTR(region_top_frequency, S_IRUGO,
+ bcm2048_region_top_frequency_read, NULL),
+ __ATTR(fm_carrier_error, S_IRUGO,
+ bcm2048_fm_carrier_error_read, NULL),
+ __ATTR(fm_rssi, S_IRUGO,
+ bcm2048_fm_rssi_read, NULL),
+ __ATTR(region, S_IRUGO | S_IWUSR, bcm2048_region_read,
+ bcm2048_region_write),
+ __ATTR(rds_data, S_IRUGO, bcm2048_rds_data_read, NULL),
+};
+
+static int bcm2048_sysfs_unregister_properties(struct bcm2048_device *bdev,
+ int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ device_remove_file(&bdev->client->dev, &attrs[i]);
+
+ return 0;
+}
+
+static int bcm2048_sysfs_register_properties(struct bcm2048_device *bdev)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(attrs); i++) {
+ if (device_create_file(&bdev->client->dev, &attrs[i]) != 0) {
+ dev_err(&bdev->client->dev,
+ "could not register sysfs entry\n");
+ err = -EBUSY;
+ bcm2048_sysfs_unregister_properties(bdev, i);
+ break;
+ }
+ }
+
+ return err;
+}
+
+
+static int bcm2048_fops_open(struct file *file)
+{
+ struct bcm2048_device *bdev = video_drvdata(file);
+
+ bdev->users++;
+ bdev->rd_index = 0;
+ bdev->rds_data_available = 0;
+
+ return 0;
+}
+
+static int bcm2048_fops_release(struct file *file)
+{
+ struct bcm2048_device *bdev = video_drvdata(file);
+
+ bdev->users--;
+
+ return 0;
+}
+
+static unsigned int bcm2048_fops_poll(struct file *file,
+ struct poll_table_struct *pts)
+{
+ struct bcm2048_device *bdev = video_drvdata(file);
+ int retval = 0;
+
+ poll_wait(file, &bdev->read_queue, pts);
+
+ if (bdev->rds_data_available)
+ retval = POLLIN | POLLRDNORM;
+
+ return retval;
+}
+
+static ssize_t bcm2048_fops_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bcm2048_device *bdev = video_drvdata(file);
+ int i;
+ int retval = 0;
+
+ /* we return at least 3 bytes, one block */
+ count = (count / 3) * 3; /* only multiples of 3 */
+ if (count < 3)
+ return -ENOBUFS;
+
+ while (!bdev->rds_data_available) {
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EWOULDBLOCK;
+ goto done;
+ }
+ /* interruptible_sleep_on(&bdev->read_queue); */
+ if (wait_event_interruptible(bdev->read_queue,
+ bdev->rds_data_available) < 0) {
+ retval = -EINTR;
+ goto done;
+ }
+ }
+
+ mutex_lock(&bdev->mutex);
+ /* copy data to userspace */
+ i = bdev->fifo_size - bdev->rd_index;
+ if (count > i)
+ count = (i / 3) * 3;
+
+ i = 0;
+ while (i < count) {
+ unsigned char tmpbuf[3];
+ tmpbuf[i] = bdev->rds_info.radio_text[bdev->rd_index+i+2];
+ tmpbuf[i+1] = bdev->rds_info.radio_text[bdev->rd_index+i+1];
+ tmpbuf[i+2] = ((bdev->rds_info.radio_text[bdev->rd_index+i]
+ & 0xf0) >> 4);
+ if ((bdev->rds_info.radio_text[bdev->rd_index+i] &
+ BCM2048_RDS_CRC_MASK) == BCM2048_RDS_CRC_UNRECOVARABLE)
+ tmpbuf[i+2] |= 0x80;
+ if (copy_to_user(buf+i, tmpbuf, 3)) {
+ retval = -EFAULT;
+ break;
+ }
+ i += 3;
+ }
+
+ bdev->rd_index += i;
+ if (bdev->rd_index >= bdev->fifo_size)
+ bdev->rds_data_available = 0;
+
+ mutex_unlock(&bdev->mutex);
+ if (retval == 0)
+ retval = i;
+
+done:
+ return retval;
+}
+
+/*
+ * bcm2048_fops - file operations interface
+ */
+static const struct v4l2_file_operations bcm2048_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = video_ioctl2,
+ /* for RDS read support */
+ .open = bcm2048_fops_open,
+ .release = bcm2048_fops_release,
+ .read = bcm2048_fops_read,
+ .poll = bcm2048_fops_poll
+};
+
+/*
+ * Video4Linux Interface
+ */
+static struct v4l2_queryctrl bcm2048_v4l2_queryctrl[] = {
+ {
+ .id = V4L2_CID_AUDIO_VOLUME,
+ .flags = V4L2_CTRL_FLAG_DISABLED,
+ },
+ {
+ .id = V4L2_CID_AUDIO_BALANCE,
+ .flags = V4L2_CTRL_FLAG_DISABLED,
+ },
+ {
+ .id = V4L2_CID_AUDIO_BASS,
+ .flags = V4L2_CTRL_FLAG_DISABLED,
+ },
+ {
+ .id = V4L2_CID_AUDIO_TREBLE,
+ .flags = V4L2_CTRL_FLAG_DISABLED,
+ },
+ {
+ .id = V4L2_CID_AUDIO_MUTE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mute",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_AUDIO_LOUDNESS,
+ .flags = V4L2_CTRL_FLAG_DISABLED,
+ },
+};
+
+static int bcm2048_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *capability)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+
+ strlcpy(capability->driver, BCM2048_DRIVER_NAME,
+ sizeof(capability->driver));
+ strlcpy(capability->card, BCM2048_DRIVER_CARD,
+ sizeof(capability->card));
+ snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr);
+ capability->version = BCM2048_DRIVER_VERSION;
+ capability->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+ V4L2_CAP_HW_FREQ_SEEK;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_g_input(struct file *filp, void *priv,
+ unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_s_input(struct file *filp, void *priv,
+ unsigned int i)
+{
+ if (i)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm2048_v4l2_queryctrl); i++) {
+ if (qc->id && qc->id == bcm2048_v4l2_queryctrl[i].id) {
+ *qc = bcm2048_v4l2_queryctrl[i];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bcm2048_vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ int err = 0;
+
+ if (!bdev)
+ return -ENODEV;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ err = bcm2048_get_mute(bdev);
+ if (err >= 0)
+ ctrl->value = err;
+ break;
+ }
+
+ return err;
+}
+
+static int bcm2048_vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ int err = 0;
+
+ if (!bdev)
+ return -ENODEV;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ if (ctrl->value) {
+ if (bdev->power_state) {
+ err = bcm2048_set_mute(bdev, ctrl->value);
+ err |= bcm2048_deinit(bdev);
+ }
+ } else {
+ if (!bdev->power_state) {
+ err = bcm2048_init(bdev);
+ err |= bcm2048_set_mute(bdev, ctrl->value);
+ }
+ }
+ break;
+ }
+
+ return err;
+}
+
+static int bcm2048_vidioc_g_audio(struct file *file, void *priv,
+ struct v4l2_audio *audio)
+{
+ if (audio->index > 1)
+ return -EINVAL;
+
+ strncpy(audio->name, "Radio", 32);
+ audio->capability = V4L2_AUDCAP_STEREO;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_s_audio(struct file *file, void *priv,
+ const struct v4l2_audio *audio)
+{
+ if (audio->index != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *tuner)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ s8 f_error;
+ s8 rssi;
+
+ if (!bdev)
+ return -ENODEV;
+
+ if (tuner->index > 0)
+ return -EINVAL;
+
+ strncpy(tuner->name, "FM Receiver", 32);
+ tuner->type = V4L2_TUNER_RADIO;
+ tuner->rangelow =
+ dev_to_v4l2(bcm2048_get_region_bottom_frequency(bdev));
+ tuner->rangehigh =
+ dev_to_v4l2(bcm2048_get_region_top_frequency(bdev));
+ tuner->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
+ tuner->audmode = V4L2_TUNER_MODE_STEREO;
+ tuner->afc = 0;
+ if (bdev->power_state) {
+ /*
+ * Report frequencies with high carrier errors to have zero
+ * signal level
+ */
+ f_error = bcm2048_get_fm_carrier_error(bdev);
+ if (f_error < BCM2048_FREQ_ERROR_FLOOR ||
+ f_error > BCM2048_FREQ_ERROR_ROOF) {
+ tuner->signal = 0;
+ } else {
+ /*
+ * RSSI level -60 dB is defined to report full
+ * signal strenght
+ */
+ rssi = bcm2048_get_fm_rssi(bdev);
+ if (rssi >= BCM2048_RSSI_LEVEL_BASE) {
+ tuner->signal = 0xFFFF;
+ } else if (rssi > BCM2048_RSSI_LEVEL_ROOF) {
+ tuner->signal = (rssi +
+ BCM2048_RSSI_LEVEL_ROOF_NEG)
+ * BCM2048_SIGNAL_MULTIPLIER;
+ } else {
+ tuner->signal = 0;
+ }
+ }
+ } else {
+ tuner->signal = 0;
+ }
+
+ return 0;
+}
+
+static int bcm2048_vidioc_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *tuner)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+
+ if (!bdev)
+ return -ENODEV;
+
+ if (tuner->index > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm2048_vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *freq)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ int err = 0;
+ int f;
+
+ if (!bdev->power_state)
+ return -ENODEV;
+
+ freq->type = V4L2_TUNER_RADIO;
+ f = bcm2048_get_fm_frequency(bdev);
+
+ if (f < 0)
+ err = f;
+ else
+ freq->frequency = dev_to_v4l2(f);
+
+ return err;
+}
+
+static int bcm2048_vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *freq)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ int err;
+
+ if (freq->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
+
+ if (!bdev->power_state)
+ return -ENODEV;
+
+ err = bcm2048_set_fm_frequency(bdev, v4l2_to_dev(freq->frequency));
+ err |= bcm2048_set_fm_search_tune_mode(bdev, BCM2048_FM_PRE_SET_MODE);
+
+ return err;
+}
+
+static int bcm2048_vidioc_s_hw_freq_seek(struct file *file, void *priv,
+ const struct v4l2_hw_freq_seek *seek)
+{
+ struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
+ int err;
+
+ if (!bdev->power_state)
+ return -ENODEV;
+
+ if ((seek->tuner != 0) || (seek->type != V4L2_TUNER_RADIO))
+ return -EINVAL;
+
+ err = bcm2048_set_fm_search_mode_direction(bdev, seek->seek_upward);
+ err |= bcm2048_set_fm_search_tune_mode(bdev,
+ BCM2048_FM_AUTO_SEARCH_MODE);
+
+ return err;
+}
+
+static struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
+ .vidioc_querycap = bcm2048_vidioc_querycap,
+ .vidioc_g_input = bcm2048_vidioc_g_input,
+ .vidioc_s_input = bcm2048_vidioc_s_input,
+ .vidioc_queryctrl = bcm2048_vidioc_queryctrl,
+ .vidioc_g_ctrl = bcm2048_vidioc_g_ctrl,
+ .vidioc_s_ctrl = bcm2048_vidioc_s_ctrl,
+ .vidioc_g_audio = bcm2048_vidioc_g_audio,
+ .vidioc_s_audio = bcm2048_vidioc_s_audio,
+ .vidioc_g_tuner = bcm2048_vidioc_g_tuner,
+ .vidioc_s_tuner = bcm2048_vidioc_s_tuner,
+ .vidioc_g_frequency = bcm2048_vidioc_g_frequency,
+ .vidioc_s_frequency = bcm2048_vidioc_s_frequency,
+ .vidioc_s_hw_freq_seek = bcm2048_vidioc_s_hw_freq_seek,
+};
+
+/*
+ * bcm2048_viddev_template - video device interface
+ */
+static struct video_device bcm2048_viddev_template = {
+ .fops = &bcm2048_fops,
+ .name = BCM2048_DRIVER_NAME,
+ .release = video_device_release,
+ .ioctl_ops = &bcm2048_ioctl_ops,
+};
+
+/*
+ * I2C driver interface
+ */
+static int bcm2048_i2c_driver_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bcm2048_device *bdev;
+ int err, skip_release = 0;
+
+ bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
+ if (!bdev) {
+ dev_dbg(&client->dev, "Failed to alloc video device.\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ bdev->videodev = video_device_alloc();
+ if (!bdev->videodev) {
+ dev_dbg(&client->dev, "Failed to alloc video device.\n");
+ err = -ENOMEM;
+ goto free_bdev;
+ }
+
+ bdev->client = client;
+ i2c_set_clientdata(client, bdev);
+ mutex_init(&bdev->mutex);
+ init_completion(&bdev->compl);
+ INIT_WORK(&bdev->work, bcm2048_work);
+
+ if (client->irq) {
+ err = request_irq(client->irq,
+ bcm2048_handler, IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+ client->name, bdev);
+ if (err < 0) {
+ dev_err(&client->dev, "Could not request IRQ\n");
+ goto free_vdev;
+ }
+ dev_dbg(&client->dev, "IRQ requested.\n");
+ } else {
+ dev_dbg(&client->dev, "IRQ not configured. Using timeouts.\n");
+ }
+
+ *bdev->videodev = bcm2048_viddev_template;
+ video_set_drvdata(bdev->videodev, bdev);
+ if (video_register_device(bdev->videodev, VFL_TYPE_RADIO, radio_nr)) {
+ dev_dbg(&client->dev, "Could not register video device.\n");
+ err = -EIO;
+ goto free_irq;
+ }
+
+ err = bcm2048_sysfs_register_properties(bdev);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Could not register sysfs interface.\n");
+ goto free_registration;
+ }
+
+ err = bcm2048_probe(bdev);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Failed to probe device information.\n");
+ goto free_sysfs;
+ }
+
+ return 0;
+
+free_sysfs:
+ bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
+free_registration:
+ video_unregister_device(bdev->videodev);
+ /* video_unregister_device frees bdev->videodev */
+ bdev->videodev = NULL;
+ skip_release = 1;
+free_irq:
+ if (client->irq)
+ free_irq(client->irq, bdev);
+free_vdev:
+ if (!skip_release)
+ video_device_release(bdev->videodev);
+ i2c_set_clientdata(client, NULL);
+free_bdev:
+ kfree(bdev);
+exit:
+ return err;
+}
+
+static int __exit bcm2048_i2c_driver_remove(struct i2c_client *client)
+{
+ struct bcm2048_device *bdev = i2c_get_clientdata(client);
+ struct video_device *vd;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ if (bdev) {
+ vd = bdev->videodev;
+
+ bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
+
+ if (vd)
+ video_unregister_device(vd);
+
+ if (bdev->power_state)
+ bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
+
+ if (client->irq > 0)
+ free_irq(client->irq, bdev);
+
+ cancel_work_sync(&bdev->work);
+
+ kfree(bdev);
+ }
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+/*
+ * bcm2048_i2c_driver - i2c driver interface
+ */
+static const struct i2c_device_id bcm2048_id[] = {
+ { "bcm2048" , 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, bcm2048_id);
+
+static struct i2c_driver bcm2048_i2c_driver = {
+ .driver = {
+ .name = BCM2048_DRIVER_NAME,
+ },
+ .probe = bcm2048_i2c_driver_probe,
+ .remove = __exit_p(bcm2048_i2c_driver_remove),
+ .id_table = bcm2048_id,
+};
+
+/*
+ * Module Interface
+ */
+static int __init bcm2048_module_init(void)
+{
+ pr_info(BCM2048_DRIVER_DESC "\n");
+
+ return i2c_add_driver(&bcm2048_i2c_driver);
+}
+module_init(bcm2048_module_init);
+
+static void __exit bcm2048_module_exit(void)
+{
+ i2c_del_driver(&bcm2048_i2c_driver);
+}
+module_exit(bcm2048_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(BCM2048_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(BCM2048_DRIVER_DESC);
+MODULE_VERSION("0.0.2");
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.h b/drivers/staging/media/bcm2048/radio-bcm2048.h
new file mode 100644
index 000000000000..4c90a32db795
--- /dev/null
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.h
@@ -0,0 +1,30 @@
+/*
+ * drivers/staging/media/radio-bcm2048.h
+ *
+ * Property and command definitions for bcm2048 radio receiver chip.
+ *
+ * Copyright (C) Nokia Corporation
+ * Contact: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef BCM2048_H
+#define BCM2048_H
+
+#define BCM2048_NAME "bcm2048"
+#define BCM2048_I2C_ADDR 0x22
+
+#endif /* ifndef BCM2048_H */
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 6cb74dacc69d..a2a5182570c5 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -329,11 +329,14 @@ static int init(struct cxd *ci)
break;
#if 0
- status = write_reg(ci, 0x09, 0x4D); /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
+ /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
+ status = write_reg(ci, 0x09, 0x4D);
if (status < 0)
break;
#endif
- status = write_reg(ci, 0x0A, 0xA7); /* TOSTRT = 8, Mode B (gated clock), falling Edge, Serial, POL=HIGH, MSB */
+ /* TOSTRT = 8, Mode B (gated clock), falling Edge,
+ * Serial, POL=HIGH, MSB */
+ status = write_reg(ci, 0x0A, 0xA7);
if (status < 0)
break;
@@ -589,7 +592,7 @@ static int campoll(struct cxd *ci)
read_reg(ci, 0x01, &slotstat);
if (!(2&slotstat)) {
if (!ci->slot_stat) {
- ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_PRESENT;
+ ci->slot_stat = DVB_CA_EN50221_POLL_CAM_PRESENT;
write_regm(ci, 0x03, 0x08, 0x08);
}
@@ -601,7 +604,8 @@ static int campoll(struct cxd *ci)
ci->ready = 0;
}
}
- if (istat&8 && ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) {
+ if (istat&8 &&
+ ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) {
ci->ready = 1;
ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY;
}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index ff48fce94fcb..b942bf73c43f 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -19,6 +19,7 @@
* Prabhakar Lad <prabhakar.lad@ti.com>
*/
+#include <linux/delay.h>
#include "dm365_isif.h"
#include "vpfe_mc_capture.h"
@@ -918,7 +919,7 @@ isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
(0 << ISIF_VDFC_EN_SHIFT), DFCCTL);
isif_write(isif->isif_cfg.base_addr, 0x6, DFCMEMCTL);
- for (i = 0 ; i < vdfc->num_vdefects; i++) {
+ for (i = 0; i < vdfc->num_vdefects; i++) {
count = DFC_WRITE_WAIT_COUNT;
while (count &&
(isif_read(isif->isif_cfg.base_addr, DFCMEMCTL) & 0x2))
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 24d98a6866bb..1f3b0f9a8d10 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -346,7 +346,7 @@ static int vpfe_pipeline_disable(struct vpfe_pipeline *pipe)
}
mutex_unlock(&mdev->graph_mutex);
- return (ret == 0) ? ret : -ETIMEDOUT ;
+ return ret ? -ETIMEDOUT : 0;
}
/*
@@ -1201,6 +1201,8 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
unsigned long addr;
int ret;
+ if (count == 0)
+ return -ENOBUFS;
ret = mutex_lock_interruptible(&video->lock);
if (ret)
goto streamoff;
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
index df0aeec8b588..ca9a70245233 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.h
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.h
@@ -32,7 +32,7 @@ struct vpfe_device;
* if there was no buffer previously queued.
*/
struct vpfe_video_operations {
- int(*queue) (struct vpfe_device *vpfe_dev, unsigned long addr);
+ int (*queue)(struct vpfe_device *vpfe_dev, unsigned long addr);
};
enum vpfe_pipeline_stream_state {
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index e729e52639c5..97e7a9b48ac2 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -299,7 +299,7 @@ dt3155_buf_queue(struct vb2_buffer *vb)
* end driver-specific callbacks
*/
-const struct vb2_ops q_ops = {
+static const struct vb2_ops q_ops = {
.queue_setup = dt3155_queue_setup,
.wait_prepare = dt3155_wait_prepare,
.wait_finish = dt3155_wait_finish,
diff --git a/drivers/staging/media/go7007/Kconfig b/drivers/staging/media/go7007/Kconfig
index 04bd0fba7b1f..95a3af644a92 100644
--- a/drivers/staging/media/go7007/Kconfig
+++ b/drivers/staging/media/go7007/Kconfig
@@ -13,7 +13,6 @@ config VIDEO_GO7007
select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
- default N
---help---
This is a video4linux driver for the WIS GO7007 MPEG
encoder chip.
@@ -24,7 +23,6 @@ config VIDEO_GO7007
config VIDEO_GO7007_USB
tristate "WIS GO7007 USB support"
depends on VIDEO_GO7007 && USB
- default N
---help---
This is a video4linux driver for the WIS GO7007 MPEG
encoder chip over USB.
@@ -46,7 +44,6 @@ config VIDEO_GO7007_LOADER
config VIDEO_GO7007_USB_S2250_BOARD
tristate "Sensoray 2250/2251 support"
depends on VIDEO_GO7007_USB && USB
- default N
---help---
This is a video4linux driver for the Sensoray 2250/2251 device.
diff --git a/drivers/staging/media/go7007/go7007-loader.c b/drivers/staging/media/go7007/go7007-loader.c
index 10bb41c2fb6d..491d0e697f5a 100644
--- a/drivers/staging/media/go7007/go7007-loader.c
+++ b/drivers/staging/media/go7007/go7007-loader.c
@@ -28,7 +28,7 @@ struct fw_config {
const char * const fw_name2;
};
-struct fw_config fw_configs[] = {
+static struct fw_config fw_configs[] = {
{ 0x1943, 0xa250, "go7007/s2250-1.fw", "go7007/s2250-2.fw" },
{ 0x093b, 0xa002, "go7007/px-m402u.fw", NULL },
{ 0x093b, 0xa004, "go7007/px-tv402u.fw", NULL },
@@ -59,7 +59,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
if (usbdev->descriptor.bNumConfigurations != 1) {
dev_err(&interface->dev, "can't handle multiple config\n");
- return -ENODEV;
+ goto failed2;
}
vendor = le16_to_cpu(usbdev->descriptor.idVendor);
@@ -108,6 +108,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
return 0;
failed2:
+ usb_put_dev(usbdev);
dev_err(&interface->dev, "probe failed\n");
return -ENODEV;
}
@@ -115,6 +116,7 @@ failed2:
static void go7007_loader_disconnect(struct usb_interface *interface)
{
dev_info(&interface->dev, "disconnect\n");
+ usb_put_dev(interface_to_usbdev(interface));
usb_set_intfdata(interface, NULL);
}
diff --git a/drivers/staging/media/go7007/go7007-v4l2.c b/drivers/staging/media/go7007/go7007-v4l2.c
index edc52e2630a9..bdf414e19c8f 100644
--- a/drivers/staging/media/go7007/go7007-v4l2.c
+++ b/drivers/staging/media/go7007/go7007-v4l2.c
@@ -433,7 +433,8 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
return set_capture_size(go, fmt, 0);
}
-static int go7007_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+static int go7007_queue_setup(struct vb2_queue *q,
+ const struct v4l2_format *fmt,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
@@ -737,7 +738,8 @@ static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
if (a->index >= go->board_info->num_aud_inputs)
return -EINVAL;
- strlcpy(a->name, go->board_info->aud_inputs[a->index].name, sizeof(a->name));
+ strlcpy(a->name, go->board_info->aud_inputs[a->index].name,
+ sizeof(a->name));
a->capability = V4L2_AUDCAP_STEREO;
return 0;
}
@@ -747,12 +749,14 @@ static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
struct go7007 *go = video_drvdata(file);
a->index = go->aud_input;
- strlcpy(a->name, go->board_info->aud_inputs[go->aud_input].name, sizeof(a->name));
+ strlcpy(a->name, go->board_info->aud_inputs[go->aud_input].name,
+ sizeof(a->name));
a->capability = V4L2_AUDCAP_STEREO;
return 0;
}
-static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *a)
+static int vidioc_s_audio(struct file *file, void *fh,
+ const struct v4l2_audio *a)
{
struct go7007 *go = video_drvdata(file);
@@ -760,7 +764,7 @@ static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *
return -EINVAL;
go->aud_input = a->index;
v4l2_subdev_call(go->sd_audio, audio, s_routing,
- go->board_info->aud_inputs[go->aud_input].audio_input, 0, 0);
+ go->board_info->aud_inputs[go->aud_input].audio_input, 0, 0);
return 0;
}
@@ -960,8 +964,10 @@ int go7007_v4l2_ctrl_init(struct go7007 *go)
V4L2_MPEG_VIDEO_ASPECT_1x1);
ctrl = v4l2_ctrl_new_std(hdl, NULL,
V4L2_CID_JPEG_ACTIVE_MARKER, 0,
- V4L2_JPEG_ACTIVE_MARKER_DQT | V4L2_JPEG_ACTIVE_MARKER_DHT, 0,
- V4L2_JPEG_ACTIVE_MARKER_DQT | V4L2_JPEG_ACTIVE_MARKER_DHT);
+ V4L2_JPEG_ACTIVE_MARKER_DQT |
+ V4L2_JPEG_ACTIVE_MARKER_DHT, 0,
+ V4L2_JPEG_ACTIVE_MARKER_DQT |
+ V4L2_JPEG_ACTIVE_MARKER_DHT);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (hdl->error) {
diff --git a/drivers/staging/media/go7007/snd-go7007.c b/drivers/staging/media/go7007/snd-go7007.c
index 16dd64920767..9eb2a20ae40a 100644
--- a/drivers/staging/media/go7007/snd-go7007.c
+++ b/drivers/staging/media/go7007/snd-go7007.c
@@ -245,8 +245,8 @@ int go7007_snd_init(struct go7007 *go)
spin_lock_init(&gosnd->lock);
gosnd->hw_ptr = gosnd->w_idx = gosnd->avail = 0;
gosnd->capturing = 0;
- ret = snd_card_create(index[dev], id[dev], THIS_MODULE, 0,
- &gosnd->card);
+ ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0,
+ &gosnd->card);
if (ret < 0) {
kfree(gosnd);
return ret;
@@ -257,7 +257,6 @@ int go7007_snd_init(struct go7007 *go)
kfree(gosnd);
return ret;
}
- snd_card_set_dev(gosnd->card, go->dev);
ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm);
if (ret < 0) {
snd_card_free(gosnd->card);
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index f2dcc4a292da..f508a1372ad8 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -62,7 +62,7 @@
/* debugging support */
#ifdef CONFIG_USB_DEBUG
-static bool debug = 1;
+static bool debug = true;
#else
static bool debug;
#endif
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index f2d396cc4a4c..a5b62eec5e21 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -943,13 +943,17 @@ alloc_status_switch:
usb_free_urb(tx_urb);
case 6:
usb_free_urb(rx_urb);
+ /* fall-through */
case 5:
if (rbuf)
lirc_buffer_free(rbuf);
+ /* fall-through */
case 4:
kfree(rbuf);
+ /* fall-through */
case 3:
kfree(driver);
+ /* fall-through */
case 2:
kfree(context);
context = NULL;
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 41d110f8bc02..62f5137b947b 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -68,29 +68,29 @@
static bool debug;
static bool check_pselecd;
-unsigned int irq = LIRC_IRQ;
-unsigned int io = LIRC_PORT;
+static unsigned int irq = LIRC_IRQ;
+static unsigned int io = LIRC_PORT;
#ifdef LIRC_TIMER
-unsigned int timer;
-unsigned int default_timer = LIRC_TIMER;
+static unsigned int timer;
+static unsigned int default_timer = LIRC_TIMER;
#endif
#define RBUF_SIZE (256) /* this must be a power of 2 larger than 1 */
static int rbuf[RBUF_SIZE];
-DECLARE_WAIT_QUEUE_HEAD(lirc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(lirc_wait);
-unsigned int rptr;
-unsigned int wptr;
-unsigned int lost_irqs;
-int is_open;
+static unsigned int rptr;
+static unsigned int wptr;
+static unsigned int lost_irqs;
+static int is_open;
-struct parport *pport;
-struct pardevice *ppdevice;
-int is_claimed;
+static struct parport *pport;
+static struct pardevice *ppdevice;
+static int is_claimed;
-unsigned int tx_mask = 1;
+static unsigned int tx_mask = 1;
/*** Internal Functions ***/
@@ -220,7 +220,7 @@ static void rbuf_write(int signal)
wptr = nwptr;
}
-static void irq_handler(void *blah)
+static void lirc_lirc_irq_handler(void *blah)
{
struct timeval tv;
static struct timeval lasttv;
@@ -659,7 +659,7 @@ static int __init lirc_parallel_init(void)
goto exit_device_put;
}
ppdevice = parport_register_device(pport, LIRC_DRIVER_NAME,
- pf, kf, irq_handler, 0, NULL);
+ pf, kf, lirc_lirc_irq_handler, 0, NULL);
parport_put_port(pport);
if (ppdevice == NULL) {
pr_notice("parport_register_device() failed\n");
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index d2445fdd9015..81f90e17e1e6 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -74,7 +74,7 @@ static void usb_tx_callback(struct urb *urb);
static int vfd_open(struct inode *inode, struct file *file);
static long vfd_ioctl(struct file *file, unsigned cmd, unsigned long arg);
static int vfd_close(struct inode *inode, struct file *file);
-static ssize_t vfd_write(struct file *file, const char *buf,
+static ssize_t vfd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos);
/* LIRC driver function prototypes */
@@ -120,7 +120,7 @@ struct sasem_context {
static const struct file_operations vfd_fops = {
.owner = THIS_MODULE,
.open = &vfd_open,
- .write = &vfd_write,
+ .write = vfd_write,
.unlocked_ioctl = &vfd_ioctl,
.release = &vfd_close,
.llseek = noop_llseek,
@@ -360,7 +360,7 @@ static int send_packet(struct sasem_context *context)
* and requires data in 9 consecutive USB interrupt packets,
* each packet carrying 8 bytes.
*/
-static ssize_t vfd_write(struct file *file, const char *buf,
+static ssize_t vfd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos)
{
int i;
@@ -389,7 +389,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
goto exit;
}
- data_buf = memdup_user(buf, n_bytes);
+ data_buf = memdup_user((void const __user *)buf, n_bytes);
if (IS_ERR(data_buf)) {
retval = PTR_ERR(data_buf);
goto exit;
@@ -865,15 +865,20 @@ alloc_status_switch:
usb_free_urb(tx_urb);
case 6:
usb_free_urb(rx_urb);
+ /* fall-through */
case 5:
lirc_buffer_free(rbuf);
+ /* fall-through */
case 4:
kfree(rbuf);
+ /* fall-through */
case 3:
kfree(driver);
+ /* fall-through */
case 2:
kfree(context);
context = NULL;
+ /* fall-through */
case 1:
if (retval == 0)
retval = -ENOMEM;
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index abe0d5caa20b..10c685d5de7c 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -650,7 +650,7 @@ static void frbwrite(int l)
rbwrite(l);
}
-static irqreturn_t irq_handler(int i, void *blah)
+static irqreturn_t lirc_irq_handler(int i, void *blah)
{
struct timeval tv;
int counter, dcd;
@@ -852,7 +852,7 @@ static int lirc_serial_probe(struct platform_device *dev)
return result;
#endif
- result = request_irq(irq, irq_handler,
+ result = request_irq(irq, lirc_irq_handler,
(share_irq ? IRQF_SHARED : 0),
LIRC_DRIVER_NAME, (void *)&hardware);
if (result < 0) {
diff --git a/drivers/staging/media/msi3101/sdr-msi3101.c b/drivers/staging/media/msi3101/sdr-msi3101.c
index 4c3bf776bb20..5a0400fdb98c 100644
--- a/drivers/staging/media/msi3101/sdr-msi3101.c
+++ b/drivers/staging/media/msi3101/sdr-msi3101.c
@@ -411,7 +411,7 @@ struct msi3101_state {
unsigned int vb_full; /* vb is full and packets dropped */
struct urb *urbs[MAX_ISO_BUFS];
- int (*convert_stream) (struct msi3101_state *s, u32 *dst, u8 *src,
+ int (*convert_stream)(struct msi3101_state *s, u32 *dst, u8 *src,
unsigned int src_len);
/* Controls */
diff --git a/drivers/staging/media/omap24xx/Kconfig b/drivers/staging/media/omap24xx/Kconfig
new file mode 100644
index 000000000000..82e569a21c46
--- /dev/null
+++ b/drivers/staging/media/omap24xx/Kconfig
@@ -0,0 +1,35 @@
+config VIDEO_V4L2_INT_DEVICE
+ tristate
+
+config VIDEO_OMAP2
+ tristate "OMAP2 Camera Capture Interface driver (DEPRECATED)"
+ depends on VIDEO_DEV && ARCH_OMAP2
+ select VIDEOBUF_DMA_SG
+ select VIDEO_V4L2_INT_DEVICE
+ ---help---
+ This is a v4l2 driver for the TI OMAP2 camera capture interface
+
+ It uses the deprecated int-device API. Since this driver is no
+ longer actively maintained and nobody is interested in converting
+ it to the subdev API, this driver will be removed soon.
+
+ If you do want to keep this driver in the kernel, and are willing
+ to convert it to the subdev API, then please contact the linux-media
+ mailinglist.
+
+config VIDEO_TCM825X
+ tristate "TCM825x camera sensor support (DEPRECATED)"
+ depends on I2C && VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
+ select VIDEO_V4L2_INT_DEVICE
+ ---help---
+ This is a driver for the Toshiba TCM825x VGA camera sensor.
+ It is used for example in Nokia N800.
+
+ It uses the deprecated int-device API. Since this driver is no
+ longer actively maintained and nobody is interested in converting
+ it to the subdev API, this driver will be removed soon.
+
+ If you do want to keep this driver in the kernel, and are willing
+ to convert it to the subdev API, then please contact the linux-media
+ mailinglist.
diff --git a/drivers/staging/media/omap24xx/Makefile b/drivers/staging/media/omap24xx/Makefile
new file mode 100644
index 000000000000..c2e7175599c2
--- /dev/null
+++ b/drivers/staging/media/omap24xx/Makefile
@@ -0,0 +1,5 @@
+omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
+
+obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
+obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
+obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
diff --git a/drivers/media/platform/omap24xxcam-dma.c b/drivers/staging/media/omap24xx/omap24xxcam-dma.c
index 9c00776d6583..9c00776d6583 100644
--- a/drivers/media/platform/omap24xxcam-dma.c
+++ b/drivers/staging/media/omap24xx/omap24xxcam-dma.c
diff --git a/drivers/media/platform/omap24xxcam.c b/drivers/staging/media/omap24xx/omap24xxcam.c
index d2b440c842b3..d2b440c842b3 100644
--- a/drivers/media/platform/omap24xxcam.c
+++ b/drivers/staging/media/omap24xx/omap24xxcam.c
diff --git a/drivers/media/platform/omap24xxcam.h b/drivers/staging/media/omap24xx/omap24xxcam.h
index 7f6f79155537..233bb40cfec3 100644
--- a/drivers/media/platform/omap24xxcam.h
+++ b/drivers/staging/media/omap24xx/omap24xxcam.h
@@ -28,8 +28,8 @@
#define OMAP24XXCAM_H
#include <media/videobuf-dma-sg.h>
-#include <media/v4l2-int-device.h>
#include <media/v4l2-device.h>
+#include "v4l2-int-device.h"
/*
*
diff --git a/drivers/media/i2c/tcm825x.c b/drivers/staging/media/omap24xx/tcm825x.c
index 9252529fc5dd..f4dd32df2353 100644
--- a/drivers/media/i2c/tcm825x.c
+++ b/drivers/staging/media/omap24xx/tcm825x.c
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
-#include <media/v4l2-int-device.h>
+#include "v4l2-int-device.h"
#include "tcm825x.h"
@@ -249,11 +249,11 @@ static struct vcontrol {
};
-static const struct tcm825x_reg *tcm825x_siz_reg[NUM_IMAGE_SIZES] =
-{ &subqcif, &qqvga, &qcif, &qvga, &cif, &vga };
+static const struct tcm825x_reg *tcm825x_siz_reg[NUM_IMAGE_SIZES] = {
+ &subqcif, &qqvga, &qcif, &qvga, &cif, &vga };
-static const struct tcm825x_reg *tcm825x_fmt_reg[NUM_PIXEL_FORMATS] =
-{ &yuv422, &rgb565 };
+static const struct tcm825x_reg *tcm825x_fmt_reg[NUM_PIXEL_FORMATS] = {
+ &yuv422, &rgb565 };
/*
* Read a value from a register in an TCM825X sensor device. The value is
diff --git a/drivers/media/i2c/tcm825x.h b/drivers/staging/media/omap24xx/tcm825x.h
index 8ebab953963f..9970fb1c596a 100644
--- a/drivers/media/i2c/tcm825x.h
+++ b/drivers/staging/media/omap24xx/tcm825x.h
@@ -17,7 +17,7 @@
#include <linux/videodev2.h>
-#include <media/v4l2-int-device.h>
+#include "v4l2-int-device.h"
#define TCM825X_NAME "tcm825x"
@@ -178,7 +178,7 @@ struct tcm825x_platform_data {
/* Set power state, zero is off, non-zero is on. */
int (*power_set)(int power);
/* Default registers written after power-on or reset. */
- const struct tcm825x_reg *(*default_regs)(void);
+ const struct tcm825x_reg * (*default_regs)(void);
int (*needs_reset)(struct v4l2_int_device *s, void *buf,
struct v4l2_pix_format *fmt);
int (*ifparm)(struct v4l2_ifparm *p);
diff --git a/drivers/media/v4l2-core/v4l2-int-device.c b/drivers/staging/media/omap24xx/v4l2-int-device.c
index f4473494af7a..427a89033a1d 100644
--- a/drivers/media/v4l2-core/v4l2-int-device.c
+++ b/drivers/staging/media/omap24xx/v4l2-int-device.c
@@ -28,7 +28,7 @@
#include <linux/string.h>
#include <linux/module.h>
-#include <media/v4l2-int-device.h>
+#include "v4l2-int-device.h"
static DEFINE_MUTEX(mutex);
static LIST_HEAD(int_list);
diff --git a/drivers/staging/media/omap24xx/v4l2-int-device.h b/drivers/staging/media/omap24xx/v4l2-int-device.h
new file mode 100644
index 000000000000..0286c95814ff
--- /dev/null
+++ b/drivers/staging/media/omap24xx/v4l2-int-device.h
@@ -0,0 +1,305 @@
+/*
+ * include/media/v4l2-int-device.h
+ *
+ * V4L2 internal ioctl interface.
+ *
+ * Copyright (C) 2007 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef V4L2_INT_DEVICE_H
+#define V4L2_INT_DEVICE_H
+
+#include <media/v4l2-common.h>
+
+#define V4L2NAMESIZE 32
+
+/*
+ *
+ * The internal V4L2 device interface core.
+ *
+ */
+
+enum v4l2_int_type {
+ v4l2_int_type_master = 1,
+ v4l2_int_type_slave
+};
+
+struct module;
+
+struct v4l2_int_device;
+
+struct v4l2_int_master {
+ int (*attach)(struct v4l2_int_device *slave);
+ void (*detach)(struct v4l2_int_device *slave);
+};
+
+typedef int (v4l2_int_ioctl_func)(struct v4l2_int_device *);
+typedef int (v4l2_int_ioctl_func_0)(struct v4l2_int_device *);
+typedef int (v4l2_int_ioctl_func_1)(struct v4l2_int_device *, void *);
+
+struct v4l2_int_ioctl_desc {
+ int num;
+ v4l2_int_ioctl_func *func;
+};
+
+struct v4l2_int_slave {
+ /* Don't touch master. */
+ struct v4l2_int_device *master;
+
+ char attach_to[V4L2NAMESIZE];
+
+ int num_ioctls;
+ struct v4l2_int_ioctl_desc *ioctls;
+};
+
+struct v4l2_int_device {
+ /* Don't touch head. */
+ struct list_head head;
+
+ struct module *module;
+
+ char name[V4L2NAMESIZE];
+
+ enum v4l2_int_type type;
+ union {
+ struct v4l2_int_master *master;
+ struct v4l2_int_slave *slave;
+ } u;
+
+ void *priv;
+};
+
+void v4l2_int_device_try_attach_all(void);
+
+int v4l2_int_device_register(struct v4l2_int_device *d);
+void v4l2_int_device_unregister(struct v4l2_int_device *d);
+
+int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd);
+int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg);
+
+/*
+ *
+ * Types and definitions for IOCTL commands.
+ *
+ */
+
+enum v4l2_power {
+ V4L2_POWER_OFF = 0,
+ V4L2_POWER_ON,
+ V4L2_POWER_STANDBY,
+};
+
+/* Slave interface type. */
+enum v4l2_if_type {
+ /*
+ * Parallel 8-, 10- or 12-bit interface, used by for example
+ * on certain image sensors.
+ */
+ V4L2_IF_TYPE_BT656,
+};
+
+enum v4l2_if_type_bt656_mode {
+ /*
+ * Modes without Bt synchronisation codes. Separate
+ * synchronisation signal lines are used.
+ */
+ V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT,
+ V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT,
+ V4L2_IF_TYPE_BT656_MODE_NOBT_12BIT,
+ /*
+ * Use Bt synchronisation codes. The vertical and horizontal
+ * synchronisation is done based on synchronisation codes.
+ */
+ V4L2_IF_TYPE_BT656_MODE_BT_8BIT,
+ V4L2_IF_TYPE_BT656_MODE_BT_10BIT,
+};
+
+struct v4l2_if_type_bt656 {
+ /*
+ * 0: Frame begins when vsync is high.
+ * 1: Frame begins when vsync changes from low to high.
+ */
+ unsigned frame_start_on_rising_vs:1;
+ /* Use Bt synchronisation codes for sync correction. */
+ unsigned bt_sync_correct:1;
+ /* Swap every two adjacent image data elements. */
+ unsigned swap:1;
+ /* Inverted latch clock polarity from slave. */
+ unsigned latch_clk_inv:1;
+ /* Hs polarity. 0 is active high, 1 active low. */
+ unsigned nobt_hs_inv:1;
+ /* Vs polarity. 0 is active high, 1 active low. */
+ unsigned nobt_vs_inv:1;
+ enum v4l2_if_type_bt656_mode mode;
+ /* Minimum accepted bus clock for slave (in Hz). */
+ u32 clock_min;
+ /* Maximum accepted bus clock for slave. */
+ u32 clock_max;
+ /*
+ * Current wish of the slave. May only change in response to
+ * ioctls that affect image capture.
+ */
+ u32 clock_curr;
+};
+
+struct v4l2_ifparm {
+ enum v4l2_if_type if_type;
+ union {
+ struct v4l2_if_type_bt656 bt656;
+ } u;
+};
+
+/* IOCTL command numbers. */
+enum v4l2_int_ioctl_num {
+ /*
+ *
+ * "Proper" V4L ioctls, as in struct video_device.
+ *
+ */
+ vidioc_int_enum_fmt_cap_num = 1,
+ vidioc_int_g_fmt_cap_num,
+ vidioc_int_s_fmt_cap_num,
+ vidioc_int_try_fmt_cap_num,
+ vidioc_int_queryctrl_num,
+ vidioc_int_g_ctrl_num,
+ vidioc_int_s_ctrl_num,
+ vidioc_int_cropcap_num,
+ vidioc_int_g_crop_num,
+ vidioc_int_s_crop_num,
+ vidioc_int_g_parm_num,
+ vidioc_int_s_parm_num,
+ vidioc_int_querystd_num,
+ vidioc_int_s_std_num,
+ vidioc_int_s_video_routing_num,
+
+ /*
+ *
+ * Strictly internal ioctls.
+ *
+ */
+ /* Initialise the device when slave attaches to the master. */
+ vidioc_int_dev_init_num = 1000,
+ /* Delinitialise the device at slave detach. */
+ vidioc_int_dev_exit_num,
+ /* Set device power state. */
+ vidioc_int_s_power_num,
+ /*
+ * Get slave private data, e.g. platform-specific slave
+ * configuration used by the master.
+ */
+ vidioc_int_g_priv_num,
+ /* Get slave interface parameters. */
+ vidioc_int_g_ifparm_num,
+ /* Does the slave need to be reset after VIDIOC_DQBUF? */
+ vidioc_int_g_needs_reset_num,
+ vidioc_int_enum_framesizes_num,
+ vidioc_int_enum_frameintervals_num,
+
+ /*
+ *
+ * VIDIOC_INT_* ioctls.
+ *
+ */
+ /* VIDIOC_INT_RESET */
+ vidioc_int_reset_num,
+ /* VIDIOC_INT_INIT */
+ vidioc_int_init_num,
+
+ /*
+ *
+ * Start of private ioctls.
+ *
+ */
+ vidioc_int_priv_start_num = 2000,
+};
+
+/*
+ *
+ * IOCTL wrapper functions for better type checking.
+ *
+ */
+
+#define V4L2_INT_WRAPPER_0(name) \
+ static inline int vidioc_int_##name(struct v4l2_int_device *d) \
+ { \
+ return v4l2_int_ioctl_0(d, vidioc_int_##name##_num); \
+ } \
+ \
+ static inline struct v4l2_int_ioctl_desc \
+ vidioc_int_##name##_cb(int (*func) \
+ (struct v4l2_int_device *)) \
+ { \
+ struct v4l2_int_ioctl_desc desc; \
+ \
+ desc.num = vidioc_int_##name##_num; \
+ desc.func = (v4l2_int_ioctl_func *)func; \
+ \
+ return desc; \
+ }
+
+#define V4L2_INT_WRAPPER_1(name, arg_type, asterisk) \
+ static inline int vidioc_int_##name(struct v4l2_int_device *d, \
+ arg_type asterisk arg) \
+ { \
+ return v4l2_int_ioctl_1(d, vidioc_int_##name##_num, \
+ (void *)(unsigned long)arg); \
+ } \
+ \
+ static inline struct v4l2_int_ioctl_desc \
+ vidioc_int_##name##_cb(int (*func) \
+ (struct v4l2_int_device *, \
+ arg_type asterisk)) \
+ { \
+ struct v4l2_int_ioctl_desc desc; \
+ \
+ desc.num = vidioc_int_##name##_num; \
+ desc.func = (v4l2_int_ioctl_func *)func; \
+ \
+ return desc; \
+ }
+
+V4L2_INT_WRAPPER_1(enum_fmt_cap, struct v4l2_fmtdesc, *);
+V4L2_INT_WRAPPER_1(g_fmt_cap, struct v4l2_format, *);
+V4L2_INT_WRAPPER_1(s_fmt_cap, struct v4l2_format, *);
+V4L2_INT_WRAPPER_1(try_fmt_cap, struct v4l2_format, *);
+V4L2_INT_WRAPPER_1(queryctrl, struct v4l2_queryctrl, *);
+V4L2_INT_WRAPPER_1(g_ctrl, struct v4l2_control, *);
+V4L2_INT_WRAPPER_1(s_ctrl, struct v4l2_control, *);
+V4L2_INT_WRAPPER_1(cropcap, struct v4l2_cropcap, *);
+V4L2_INT_WRAPPER_1(g_crop, struct v4l2_crop, *);
+V4L2_INT_WRAPPER_1(s_crop, struct v4l2_crop, *);
+V4L2_INT_WRAPPER_1(g_parm, struct v4l2_streamparm, *);
+V4L2_INT_WRAPPER_1(s_parm, struct v4l2_streamparm, *);
+V4L2_INT_WRAPPER_1(querystd, v4l2_std_id, *);
+V4L2_INT_WRAPPER_1(s_std, v4l2_std_id, *);
+V4L2_INT_WRAPPER_1(s_video_routing, struct v4l2_routing, *);
+
+V4L2_INT_WRAPPER_0(dev_init);
+V4L2_INT_WRAPPER_0(dev_exit);
+V4L2_INT_WRAPPER_1(s_power, enum v4l2_power, );
+V4L2_INT_WRAPPER_1(g_priv, void, *);
+V4L2_INT_WRAPPER_1(g_ifparm, struct v4l2_ifparm, *);
+V4L2_INT_WRAPPER_1(g_needs_reset, void, *);
+V4L2_INT_WRAPPER_1(enum_framesizes, struct v4l2_frmsizeenum, *);
+V4L2_INT_WRAPPER_1(enum_frameintervals, struct v4l2_frmivalenum, *);
+
+V4L2_INT_WRAPPER_0(reset);
+V4L2_INT_WRAPPER_0(init);
+
+#endif
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
new file mode 100644
index 000000000000..b9fe753969bd
--- /dev/null
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -0,0 +1,12 @@
+config VIDEO_OMAP4
+ bool "OMAP 4 Camera support"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ Driver for an OMAP 4 ISS controller.
+
+config VIDEO_OMAP4_DEBUG
+ bool "OMAP 4 Camera debug messages"
+ depends on VIDEO_OMAP4
+ ---help---
+ Enable debug messages on OMAP 4 ISS controller driver.
diff --git a/drivers/staging/media/omap4iss/Makefile b/drivers/staging/media/omap4iss/Makefile
new file mode 100644
index 000000000000..a716ce936cf6
--- /dev/null
+++ b/drivers/staging/media/omap4iss/Makefile
@@ -0,0 +1,6 @@
+# Makefile for OMAP4 ISS driver
+
+omap4-iss-objs += \
+ iss.o iss_csi2.o iss_csiphy.o iss_ipipeif.o iss_ipipe.o iss_resizer.o iss_video.o
+
+obj-$(CONFIG_VIDEO_OMAP4) += omap4-iss.o
diff --git a/drivers/staging/media/omap4iss/TODO b/drivers/staging/media/omap4iss/TODO
new file mode 100644
index 000000000000..fcde88860a2c
--- /dev/null
+++ b/drivers/staging/media/omap4iss/TODO
@@ -0,0 +1,4 @@
+* Make the driver compile as a module
+* Fix FIFO/buffer overflows and underflows
+* Replace dummy resizer code with a real implementation
+* Fix checkpatch errors and warnings
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
new file mode 100644
index 000000000000..61fbfcd13582
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -0,0 +1,1563 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver
+ *
+ * Copyright (C) 2012, Texas Instruments
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+
+#define ISS_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###ISS " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_##name))
+
+static void iss_print_status(struct iss_device *iss)
+{
+ dev_dbg(iss->dev, "-------------ISS HL Register dump-------------\n");
+
+ ISS_PRINT_REGISTER(iss, HL_REVISION);
+ ISS_PRINT_REGISTER(iss, HL_SYSCONFIG);
+ ISS_PRINT_REGISTER(iss, HL_IRQSTATUS(5));
+ ISS_PRINT_REGISTER(iss, HL_IRQENABLE_SET(5));
+ ISS_PRINT_REGISTER(iss, HL_IRQENABLE_CLR(5));
+ ISS_PRINT_REGISTER(iss, CTRL);
+ ISS_PRINT_REGISTER(iss, CLKCTRL);
+ ISS_PRINT_REGISTER(iss, CLKSTAT);
+
+ dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+/*
+ * omap4iss_flush - Post pending L3 bus writes by doing a register readback
+ * @iss: OMAP4 ISS device
+ *
+ * In order to force posting of pending writes, we need to write and
+ * readback the same register, in this case the revision register.
+ *
+ * See this link for reference:
+ * http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
+ */
+void omap4iss_flush(struct iss_device *iss)
+{
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION, 0);
+ iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
+}
+
+/*
+ * iss_isp_enable_interrupts - Enable ISS ISP interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void omap4iss_isp_enable_interrupts(struct iss_device *iss)
+{
+ static const u32 isp_irq = ISP5_IRQ_OCP_ERR |
+ ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
+ ISP5_IRQ_RSZ_FIFO_OVF |
+ ISP5_IRQ_RSZ_INT_DMA |
+ ISP5_IRQ_ISIF_INT(0);
+
+ /* Enable ISP interrupts */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQSTATUS(0), isp_irq);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQENABLE_SET(0),
+ isp_irq);
+}
+
+/*
+ * iss_isp_disable_interrupts - Disable ISS interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void omap4iss_isp_disable_interrupts(struct iss_device *iss)
+{
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQENABLE_CLR(0), ~0);
+}
+
+/*
+ * iss_enable_interrupts - Enable ISS interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void iss_enable_interrupts(struct iss_device *iss)
+{
+ static const u32 hl_irq = ISS_HL_IRQ_CSIA | ISS_HL_IRQ_CSIB
+ | ISS_HL_IRQ_ISP(0);
+
+ /* Enable HL interrupts */
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5), hl_irq);
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQENABLE_SET(5), hl_irq);
+
+ if (iss->regs[OMAP4_ISS_MEM_ISP_SYS1])
+ omap4iss_isp_enable_interrupts(iss);
+}
+
+/*
+ * iss_disable_interrupts - Disable ISS interrupts.
+ * @iss: OMAP4 ISS device
+ */
+static void iss_disable_interrupts(struct iss_device *iss)
+{
+ if (iss->regs[OMAP4_ISS_MEM_ISP_SYS1])
+ omap4iss_isp_disable_interrupts(iss);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQENABLE_CLR(5), ~0);
+}
+
+int omap4iss_get_external_info(struct iss_pipeline *pipe,
+ struct media_link *link)
+{
+ struct iss_device *iss =
+ container_of(pipe, struct iss_video, pipe)->iss;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ if (!pipe->external)
+ return 0;
+
+ if (pipe->external_rate)
+ return 0;
+
+ memset(&fmt, 0, sizeof(fmt));
+
+ fmt.pad = link->source->index;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(link->sink->entity),
+ pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return -EPIPE;
+
+ pipe->external_bpp = omap4iss_video_format_info(fmt.format.code)->bpp;
+
+ ctrl = v4l2_ctrl_find(pipe->external->ctrl_handler,
+ V4L2_CID_PIXEL_RATE);
+ if (ctrl == NULL) {
+ dev_warn(iss->dev, "no pixel rate control in subdev %s\n",
+ pipe->external->name);
+ return -EPIPE;
+ }
+
+ pipe->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
+
+ return 0;
+}
+
+/*
+ * Configure the bridge. Valid inputs are
+ *
+ * IPIPEIF_INPUT_CSI2A: CSI2a receiver
+ * IPIPEIF_INPUT_CSI2B: CSI2b receiver
+ *
+ * The bridge and lane shifter are configured according to the selected input
+ * and the ISP platform data.
+ */
+void omap4iss_configure_bridge(struct iss_device *iss,
+ enum ipipeif_input_entity input)
+{
+ u32 issctrl_val;
+ u32 isp5ctrl_val;
+
+ issctrl_val = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_CTRL);
+ issctrl_val &= ~ISS_CTRL_INPUT_SEL_MASK;
+ issctrl_val &= ~ISS_CTRL_CLK_DIV_MASK;
+
+ isp5ctrl_val = iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL);
+
+ switch (input) {
+ case IPIPEIF_INPUT_CSI2A:
+ issctrl_val |= ISS_CTRL_INPUT_SEL_CSI2A;
+ break;
+
+ case IPIPEIF_INPUT_CSI2B:
+ issctrl_val |= ISS_CTRL_INPUT_SEL_CSI2B;
+ break;
+
+ default:
+ return;
+ }
+
+ issctrl_val |= ISS_CTRL_SYNC_DETECT_VS_RAISING;
+
+ isp5ctrl_val |= ISP5_CTRL_VD_PULSE_EXT | ISP5_CTRL_PSYNC_CLK_SEL |
+ ISP5_CTRL_SYNC_ENABLE;
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_CTRL, issctrl_val);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL, isp5ctrl_val);
+}
+
+#if defined(DEBUG) && defined(ISS_ISR_DEBUG)
+static void iss_isr_dbg(struct iss_device *iss, u32 irqstatus)
+{
+ static const char * const name[] = {
+ "ISP_0",
+ "ISP_1",
+ "ISP_2",
+ "ISP_3",
+ "CSIA",
+ "CSIB",
+ "CCP2_0",
+ "CCP2_1",
+ "CCP2_2",
+ "CCP2_3",
+ "CBUFF",
+ "BTE",
+ "SIMCOP_0",
+ "SIMCOP_1",
+ "SIMCOP_2",
+ "SIMCOP_3",
+ "CCP2_8",
+ "HS_VS",
+ "18",
+ "19",
+ "20",
+ "21",
+ "22",
+ "23",
+ "24",
+ "25",
+ "26",
+ "27",
+ "28",
+ "29",
+ "30",
+ "31",
+ };
+ unsigned int i;
+
+ dev_dbg(iss->dev, "ISS IRQ: ");
+
+ for (i = 0; i < ARRAY_SIZE(name); i++) {
+ if ((1 << i) & irqstatus)
+ pr_cont("%s ", name[i]);
+ }
+ pr_cont("\n");
+}
+
+static void iss_isp_isr_dbg(struct iss_device *iss, u32 irqstatus)
+{
+ static const char * const name[] = {
+ "ISIF_0",
+ "ISIF_1",
+ "ISIF_2",
+ "ISIF_3",
+ "IPIPEREQ",
+ "IPIPELAST_PIX",
+ "IPIPEDMA",
+ "IPIPEBSC",
+ "IPIPEHST",
+ "IPIPEIF",
+ "AEW",
+ "AF",
+ "H3A",
+ "RSZ_REG",
+ "RSZ_LAST_PIX",
+ "RSZ_DMA",
+ "RSZ_CYC_RZA",
+ "RSZ_CYC_RZB",
+ "RSZ_FIFO_OVF",
+ "RSZ_FIFO_IN_BLK_ERR",
+ "20",
+ "21",
+ "RSZ_EOF0",
+ "RSZ_EOF1",
+ "H3A_EOF",
+ "IPIPE_EOF",
+ "26",
+ "IPIPE_DPC_INI",
+ "IPIPE_DPC_RNEW0",
+ "IPIPE_DPC_RNEW1",
+ "30",
+ "OCP_ERR",
+ };
+ unsigned int i;
+
+ dev_dbg(iss->dev, "ISP IRQ: ");
+
+ for (i = 0; i < ARRAY_SIZE(name); i++) {
+ if ((1 << i) & irqstatus)
+ pr_cont("%s ", name[i]);
+ }
+ pr_cont("\n");
+}
+#endif
+
+/*
+ * iss_isr - Interrupt Service Routine for ISS module.
+ * @irq: Not used currently.
+ * @_iss: Pointer to the OMAP4 ISS device
+ *
+ * Handles the corresponding callback if plugged in.
+ *
+ * Returns IRQ_HANDLED when IRQ was correctly handled, or IRQ_NONE when the
+ * IRQ wasn't handled.
+ */
+static irqreturn_t iss_isr(int irq, void *_iss)
+{
+ static const u32 ipipeif_events = ISP5_IRQ_IPIPEIF_IRQ |
+ ISP5_IRQ_ISIF_INT(0);
+ static const u32 resizer_events = ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
+ ISP5_IRQ_RSZ_FIFO_OVF |
+ ISP5_IRQ_RSZ_INT_DMA;
+ struct iss_device *iss = _iss;
+ u32 irqstatus;
+
+ irqstatus = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5));
+ iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5), irqstatus);
+
+ if (irqstatus & ISS_HL_IRQ_CSIA)
+ omap4iss_csi2_isr(&iss->csi2a);
+
+ if (irqstatus & ISS_HL_IRQ_CSIB)
+ omap4iss_csi2_isr(&iss->csi2b);
+
+ if (irqstatus & ISS_HL_IRQ_ISP(0)) {
+ u32 isp_irqstatus = iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1,
+ ISP5_IRQSTATUS(0));
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQSTATUS(0),
+ isp_irqstatus);
+
+ if (isp_irqstatus & ISP5_IRQ_OCP_ERR)
+ dev_dbg(iss->dev, "ISP5 OCP Error!\n");
+
+ if (isp_irqstatus & ipipeif_events) {
+ omap4iss_ipipeif_isr(&iss->ipipeif,
+ isp_irqstatus & ipipeif_events);
+ }
+
+ if (isp_irqstatus & resizer_events)
+ omap4iss_resizer_isr(&iss->resizer,
+ isp_irqstatus & resizer_events);
+
+#if defined(DEBUG) && defined(ISS_ISR_DEBUG)
+ iss_isp_isr_dbg(iss, isp_irqstatus);
+#endif
+ }
+
+ omap4iss_flush(iss);
+
+#if defined(DEBUG) && defined(ISS_ISR_DEBUG)
+ iss_isr_dbg(iss, irqstatus);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline power management
+ *
+ * Entities must be powered up when part of a pipeline that contains at least
+ * one open video device node.
+ *
+ * To achieve this use the entity use_count field to track the number of users.
+ * For entities corresponding to video device nodes the use_count field stores
+ * the users count of the node. For entities corresponding to subdevs the
+ * use_count field stores the total number of users of all video device nodes
+ * in the pipeline.
+ *
+ * The omap4iss_pipeline_pm_use() function must be called in the open() and
+ * close() handlers of video device nodes. It increments or decrements the use
+ * count of all subdev entities in the pipeline.
+ *
+ * To react to link management on powered pipelines, the link setup notification
+ * callback updates the use count of all entities in the source and sink sides
+ * of the link.
+ */
+
+/*
+ * iss_pipeline_pm_use_count - Count the number of users of a pipeline
+ * @entity: The entity
+ *
+ * Return the total number of users of all video device nodes in the pipeline.
+ */
+static int iss_pipeline_pm_use_count(struct media_entity *entity)
+{
+ struct media_entity_graph graph;
+ int use = 0;
+
+ media_entity_graph_walk_start(&graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+ if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
+ use += entity->use_count;
+ }
+
+ return use;
+}
+
+/*
+ * iss_pipeline_pm_power_one - Apply power change to an entity
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Change the entity use count by @change. If the entity is a subdev update its
+ * power state by calling the core::s_power operation when the use count goes
+ * from 0 to != 0 or from != 0 to 0.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int iss_pipeline_pm_power_one(struct media_entity *entity, int change)
+{
+ struct v4l2_subdev *subdev;
+
+ subdev = media_entity_type(entity) == MEDIA_ENT_T_V4L2_SUBDEV
+ ? media_entity_to_v4l2_subdev(entity) : NULL;
+
+ if (entity->use_count == 0 && change > 0 && subdev != NULL) {
+ int ret;
+
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ if (entity->use_count == 0 && change < 0 && subdev != NULL)
+ v4l2_subdev_call(subdev, core, s_power, 0);
+
+ return 0;
+}
+
+/*
+ * iss_pipeline_pm_power - Apply power change to all entities in a pipeline
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Walk the pipeline to update the use count and the power state of all non-node
+ * entities.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int iss_pipeline_pm_power(struct media_entity *entity, int change)
+{
+ struct media_entity_graph graph;
+ struct media_entity *first = entity;
+ int ret = 0;
+
+ if (!change)
+ return 0;
+
+ media_entity_graph_walk_start(&graph, entity);
+
+ while (!ret && (entity = media_entity_graph_walk_next(&graph)))
+ if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+ ret = iss_pipeline_pm_power_one(entity, change);
+
+ if (!ret)
+ return 0;
+
+ media_entity_graph_walk_start(&graph, first);
+
+ while ((first = media_entity_graph_walk_next(&graph))
+ && first != entity)
+ if (media_entity_type(first) != MEDIA_ENT_T_DEVNODE)
+ iss_pipeline_pm_power_one(first, -change);
+
+ return ret;
+}
+
+/*
+ * omap4iss_pipeline_pm_use - Update the use count of an entity
+ * @entity: The entity
+ * @use: Use (1) or stop using (0) the entity
+ *
+ * Update the use count of all entities in the pipeline and power entities on or
+ * off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. No failure can occur when the use parameter is
+ * set to 0.
+ */
+int omap4iss_pipeline_pm_use(struct media_entity *entity, int use)
+{
+ int change = use ? 1 : -1;
+ int ret;
+
+ mutex_lock(&entity->parent->graph_mutex);
+
+ /* Apply use count to node. */
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ /* Apply power change to connected non-nodes. */
+ ret = iss_pipeline_pm_power(entity, change);
+ if (ret < 0)
+ entity->use_count -= change;
+
+ mutex_unlock(&entity->parent->graph_mutex);
+
+ return ret;
+}
+
+/*
+ * iss_pipeline_link_notify - Link management notification callback
+ * @link: The link
+ * @flags: New link flags that will be applied
+ *
+ * React to link management on powered pipelines by updating the use count of
+ * all entities in the source and sink sides of the link. Entities are powered
+ * on or off accordingly.
+ *
+ * Return 0 on success or a negative error code on failure. Powering entities
+ * off is assumed to never fail. This function will not fail for disconnection
+ * events.
+ */
+static int iss_pipeline_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct media_entity *source = link->source->entity;
+ struct media_entity *sink = link->sink->entity;
+ int source_use = iss_pipeline_pm_use_count(source);
+ int sink_use = iss_pipeline_pm_use_count(sink);
+ int ret;
+
+ if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ !(link->flags & MEDIA_LNK_FL_ENABLED)) {
+ /* Powering off entities is assumed to never fail. */
+ iss_pipeline_pm_power(source, -sink_use);
+ iss_pipeline_pm_power(sink, -source_use);
+ return 0;
+ }
+
+ if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+ ret = iss_pipeline_pm_power(source, sink_use);
+ if (ret < 0)
+ return ret;
+
+ ret = iss_pipeline_pm_power(sink, source_use);
+ if (ret < 0)
+ iss_pipeline_pm_power(source, -sink_use);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline stream management
+ */
+
+/*
+ * iss_pipeline_enable - Enable streaming on a pipeline
+ * @pipe: ISS pipeline
+ * @mode: Stream mode (single shot or continuous)
+ *
+ * Walk the entities chain starting at the pipeline output video node and start
+ * all modules in the chain in the given mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int iss_pipeline_enable(struct iss_pipeline *pipe,
+ enum iss_pipeline_stream_state mode)
+{
+ struct iss_device *iss = pipe->output->iss;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ unsigned long flags;
+ int ret;
+
+ /* If one of the entities in the pipeline has crashed it will not work
+ * properly. Refuse to start streaming in that case. This check must be
+ * performed before the loop below to avoid starting entities if the
+ * pipeline won't start anyway (those entities would then likely fail to
+ * stop, making the problem worse).
+ */
+ if (pipe->entities & iss->crashed)
+ return -EIO;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~(ISS_PIPELINE_IDLE_INPUT | ISS_PIPELINE_IDLE_OUTPUT);
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ pipe->do_propagation = false;
+
+ entity = &pipe->output->video.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, mode);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+ iss_print_status(pipe->output->iss);
+ return 0;
+}
+
+/*
+ * iss_pipeline_disable - Disable streaming on a pipeline
+ * @pipe: ISS pipeline
+ *
+ * Walk the entities chain starting at the pipeline output video node and stop
+ * all modules in the chain. Wait synchronously for the modules to be stopped if
+ * necessary.
+ */
+static int iss_pipeline_disable(struct iss_pipeline *pipe)
+{
+ struct iss_device *iss = pipe->output->iss;
+ struct media_entity *entity;
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ int failure = 0;
+ int ret;
+
+ entity = &pipe->output->video.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ if (ret < 0) {
+ dev_dbg(iss->dev, "%s: module stop timeout.\n",
+ subdev->name);
+ /* If the entity failed to stopped, assume it has
+ * crashed. Mark it as such, the ISS will be reset when
+ * applications will release it.
+ */
+ iss->crashed |= 1U << subdev->entity.id;
+ failure = -ETIMEDOUT;
+ }
+ }
+
+ return failure;
+}
+
+/*
+ * omap4iss_pipeline_set_stream - Enable/disable streaming on a pipeline
+ * @pipe: ISS pipeline
+ * @state: Stream state (stopped, single shot or continuous)
+ *
+ * Set the pipeline to the given stream state. Pipelines can be started in
+ * single-shot or continuous mode.
+ *
+ * Return 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise. The pipeline state is not updated when the operation
+ * fails, except when stopping the pipeline.
+ */
+int omap4iss_pipeline_set_stream(struct iss_pipeline *pipe,
+ enum iss_pipeline_stream_state state)
+{
+ int ret;
+
+ if (state == ISS_PIPELINE_STREAM_STOPPED)
+ ret = iss_pipeline_disable(pipe);
+ else
+ ret = iss_pipeline_enable(pipe, state);
+
+ if (ret == 0 || state == ISS_PIPELINE_STREAM_STOPPED)
+ pipe->stream_state = state;
+
+ return ret;
+}
+
+/*
+ * omap4iss_pipeline_cancel_stream - Cancel stream on a pipeline
+ * @pipe: ISS pipeline
+ *
+ * Cancelling a stream mark all buffers on all video nodes in the pipeline as
+ * erroneous and makes sure no new buffer can be queued. This function is called
+ * when a fatal error that prevents any further operation on the pipeline
+ * occurs.
+ */
+void omap4iss_pipeline_cancel_stream(struct iss_pipeline *pipe)
+{
+ if (pipe->input)
+ omap4iss_video_cancel_stream(pipe->input);
+ if (pipe->output)
+ omap4iss_video_cancel_stream(pipe->output);
+}
+
+/*
+ * iss_pipeline_is_last - Verify if entity has an enabled link to the output
+ * video node
+ * @me: ISS module's media entity
+ *
+ * Returns 1 if the entity has an enabled link to the output video node or 0
+ * otherwise. It's true only while pipeline can have no more than one output
+ * node.
+ */
+static int iss_pipeline_is_last(struct media_entity *me)
+{
+ struct iss_pipeline *pipe;
+ struct media_pad *pad;
+
+ if (!me->pipe)
+ return 0;
+ pipe = to_iss_pipeline(me);
+ if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+ pad = media_entity_remote_pad(&pipe->output->pad);
+ return pad->entity == me;
+}
+
+static int iss_reset(struct iss_device *iss)
+{
+ unsigned long timeout = 0;
+
+ iss_reg_set(iss, OMAP4_ISS_MEM_TOP, ISS_HL_SYSCONFIG,
+ ISS_HL_SYSCONFIG_SOFTRESET);
+
+ while (iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_SYSCONFIG) &
+ ISS_HL_SYSCONFIG_SOFTRESET) {
+ if (timeout++ > 100) {
+ dev_alert(iss->dev, "cannot reset ISS\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(10, 10);
+ }
+
+ iss->crashed = 0;
+ return 0;
+}
+
+static int iss_isp_reset(struct iss_device *iss)
+{
+ unsigned long timeout = 0;
+
+ /* Fist, ensure that the ISP is IDLE (no transactions happening) */
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG,
+ ISP5_SYSCONFIG_STANDBYMODE_MASK,
+ ISP5_SYSCONFIG_STANDBYMODE_SMART);
+
+ iss_reg_set(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL, ISP5_CTRL_MSTANDBY);
+
+ for (;;) {
+ if (iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL) &
+ ISP5_CTRL_MSTANDBY_WAIT)
+ break;
+ if (timeout++ > 1000) {
+ dev_alert(iss->dev, "cannot set ISP5 to standby\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(1000, 1500);
+ }
+
+ /* Now finally, do the reset */
+ iss_reg_set(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG,
+ ISP5_SYSCONFIG_SOFTRESET);
+
+ timeout = 0;
+ while (iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG) &
+ ISP5_SYSCONFIG_SOFTRESET) {
+ if (timeout++ > 1000) {
+ dev_alert(iss->dev, "cannot reset ISP5\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(1000, 1500);
+ }
+
+ return 0;
+}
+
+/*
+ * iss_module_sync_idle - Helper to sync module with its idle state
+ * @me: ISS submodule's media entity
+ * @wait: ISS submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISS submodule needs to wait for next interrupt. If
+ * yes, makes the caller to sleep while waiting for such event.
+ */
+int omap4iss_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+ atomic_t *stopping)
+{
+ struct iss_pipeline *pipe = to_iss_pipeline(me);
+ struct iss_video *video = pipe->output;
+ unsigned long flags;
+
+ if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED ||
+ (pipe->stream_state == ISS_PIPELINE_STREAM_SINGLESHOT &&
+ !iss_pipeline_ready(pipe)))
+ return 0;
+
+ /*
+ * atomic_set() doesn't include memory barrier on ARM platform for SMP
+ * scenario. We'll call it here to avoid race conditions.
+ */
+ atomic_set(stopping, 1);
+ smp_wmb();
+
+ /*
+ * If module is the last one, it's writing to memory. In this case,
+ * it's necessary to check if the module is already paused due to
+ * DMA queue underrun or if it has to wait for next interrupt to be
+ * idle.
+ * If it isn't the last one, the function won't sleep but *stopping
+ * will still be set to warn next submodule caller's interrupt the
+ * module wants to be idle.
+ */
+ if (!iss_pipeline_is_last(me))
+ return 0;
+
+ spin_lock_irqsave(&video->qlock, flags);
+ if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+ spin_unlock_irqrestore(&video->qlock, flags);
+ atomic_set(stopping, 0);
+ smp_wmb();
+ return 0;
+ }
+ spin_unlock_irqrestore(&video->qlock, flags);
+ if (!wait_event_timeout(*wait, !atomic_read(stopping),
+ msecs_to_jiffies(1000))) {
+ atomic_set(stopping, 0);
+ smp_wmb();
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * omap4iss_module_sync_is_stopped - Helper to verify if module was stopping
+ * @wait: ISS submodule's wait queue for streamoff/interrupt synchronization
+ * @stopping: flag which tells module wants to stop
+ *
+ * This function checks if ISS submodule was stopping. In case of yes, it
+ * notices the caller by setting stopping to 0 and waking up the wait queue.
+ * Returns 1 if it was stopping or 0 otherwise.
+ */
+int omap4iss_module_sync_is_stopping(wait_queue_head_t *wait,
+ atomic_t *stopping)
+{
+ if (atomic_cmpxchg(stopping, 1, 0)) {
+ wake_up(wait);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ * Clock management
+ */
+
+#define ISS_CLKCTRL_MASK (ISS_CLKCTRL_CSI2_A |\
+ ISS_CLKCTRL_CSI2_B |\
+ ISS_CLKCTRL_ISP)
+
+static int __iss_subclk_update(struct iss_device *iss)
+{
+ u32 clk = 0;
+ int ret = 0, timeout = 1000;
+
+ if (iss->subclk_resources & OMAP4_ISS_SUBCLK_CSI2_A)
+ clk |= ISS_CLKCTRL_CSI2_A;
+
+ if (iss->subclk_resources & OMAP4_ISS_SUBCLK_CSI2_B)
+ clk |= ISS_CLKCTRL_CSI2_B;
+
+ if (iss->subclk_resources & OMAP4_ISS_SUBCLK_ISP)
+ clk |= ISS_CLKCTRL_ISP;
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_TOP, ISS_CLKCTRL,
+ ISS_CLKCTRL_MASK, clk);
+
+ /* Wait for HW assertion */
+ while (--timeout > 0) {
+ udelay(1);
+ if ((iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_CLKSTAT) &
+ ISS_CLKCTRL_MASK) == clk)
+ break;
+ }
+
+ if (!timeout)
+ ret = -EBUSY;
+
+ return ret;
+}
+
+int omap4iss_subclk_enable(struct iss_device *iss,
+ enum iss_subclk_resource res)
+{
+ iss->subclk_resources |= res;
+
+ return __iss_subclk_update(iss);
+}
+
+int omap4iss_subclk_disable(struct iss_device *iss,
+ enum iss_subclk_resource res)
+{
+ iss->subclk_resources &= ~res;
+
+ return __iss_subclk_update(iss);
+}
+
+#define ISS_ISP5_CLKCTRL_MASK (ISP5_CTRL_BL_CLK_ENABLE |\
+ ISP5_CTRL_ISIF_CLK_ENABLE |\
+ ISP5_CTRL_H3A_CLK_ENABLE |\
+ ISP5_CTRL_RSZ_CLK_ENABLE |\
+ ISP5_CTRL_IPIPE_CLK_ENABLE |\
+ ISP5_CTRL_IPIPEIF_CLK_ENABLE)
+
+static void __iss_isp_subclk_update(struct iss_device *iss)
+{
+ u32 clk = 0;
+
+ if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_ISIF)
+ clk |= ISP5_CTRL_ISIF_CLK_ENABLE;
+
+ if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_H3A)
+ clk |= ISP5_CTRL_H3A_CLK_ENABLE;
+
+ if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_RSZ)
+ clk |= ISP5_CTRL_RSZ_CLK_ENABLE;
+
+ if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_IPIPE)
+ clk |= ISP5_CTRL_IPIPE_CLK_ENABLE;
+
+ if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_IPIPEIF)
+ clk |= ISP5_CTRL_IPIPEIF_CLK_ENABLE;
+
+ if (clk)
+ clk |= ISP5_CTRL_BL_CLK_ENABLE;
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL,
+ ISS_ISP5_CLKCTRL_MASK, clk);
+}
+
+void omap4iss_isp_subclk_enable(struct iss_device *iss,
+ enum iss_isp_subclk_resource res)
+{
+ iss->isp_subclk_resources |= res;
+
+ __iss_isp_subclk_update(iss);
+}
+
+void omap4iss_isp_subclk_disable(struct iss_device *iss,
+ enum iss_isp_subclk_resource res)
+{
+ iss->isp_subclk_resources &= ~res;
+
+ __iss_isp_subclk_update(iss);
+}
+
+/*
+ * iss_enable_clocks - Enable ISS clocks
+ * @iss: OMAP4 ISS device
+ *
+ * Return 0 if successful, or clk_enable return value if any of tthem fails.
+ */
+static int iss_enable_clocks(struct iss_device *iss)
+{
+ int ret;
+
+ ret = clk_enable(iss->iss_fck);
+ if (ret) {
+ dev_err(iss->dev, "clk_enable iss_fck failed\n");
+ return ret;
+ }
+
+ ret = clk_enable(iss->iss_ctrlclk);
+ if (ret) {
+ dev_err(iss->dev, "clk_enable iss_ctrlclk failed\n");
+ clk_disable(iss->iss_fck);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * iss_disable_clocks - Disable ISS clocks
+ * @iss: OMAP4 ISS device
+ */
+static void iss_disable_clocks(struct iss_device *iss)
+{
+ clk_disable(iss->iss_ctrlclk);
+ clk_disable(iss->iss_fck);
+}
+
+static void iss_put_clocks(struct iss_device *iss)
+{
+ if (iss->iss_fck) {
+ clk_put(iss->iss_fck);
+ iss->iss_fck = NULL;
+ }
+
+ if (iss->iss_ctrlclk) {
+ clk_put(iss->iss_ctrlclk);
+ iss->iss_ctrlclk = NULL;
+ }
+}
+
+static int iss_get_clocks(struct iss_device *iss)
+{
+ iss->iss_fck = clk_get(iss->dev, "iss_fck");
+ if (IS_ERR(iss->iss_fck)) {
+ dev_err(iss->dev, "Unable to get iss_fck clock info\n");
+ iss_put_clocks(iss);
+ return PTR_ERR(iss->iss_fck);
+ }
+
+ iss->iss_ctrlclk = clk_get(iss->dev, "iss_ctrlclk");
+ if (IS_ERR(iss->iss_ctrlclk)) {
+ dev_err(iss->dev, "Unable to get iss_ctrlclk clock info\n");
+ iss_put_clocks(iss);
+ return PTR_ERR(iss->iss_fck);
+ }
+
+ return 0;
+}
+
+/*
+ * omap4iss_get - Acquire the ISS resource.
+ *
+ * Initializes the clocks for the first acquire.
+ *
+ * Increment the reference count on the ISS. If the first reference is taken,
+ * enable clocks and power-up all submodules.
+ *
+ * Return a pointer to the ISS device structure, or NULL if an error occurred.
+ */
+struct iss_device *omap4iss_get(struct iss_device *iss)
+{
+ struct iss_device *__iss = iss;
+
+ if (iss == NULL)
+ return NULL;
+
+ mutex_lock(&iss->iss_mutex);
+ if (iss->ref_count > 0)
+ goto out;
+
+ if (iss_enable_clocks(iss) < 0) {
+ __iss = NULL;
+ goto out;
+ }
+
+ iss_enable_interrupts(iss);
+
+out:
+ if (__iss != NULL)
+ iss->ref_count++;
+ mutex_unlock(&iss->iss_mutex);
+
+ return __iss;
+}
+
+/*
+ * omap4iss_put - Release the ISS
+ *
+ * Decrement the reference count on the ISS. If the last reference is released,
+ * power-down all submodules, disable clocks and free temporary buffers.
+ */
+void omap4iss_put(struct iss_device *iss)
+{
+ if (iss == NULL)
+ return;
+
+ mutex_lock(&iss->iss_mutex);
+ BUG_ON(iss->ref_count == 0);
+ if (--iss->ref_count == 0) {
+ iss_disable_interrupts(iss);
+ /* Reset the ISS if an entity has failed to stop. This is the
+ * only way to recover from such conditions, although it would
+ * be worth investigating whether resetting the ISP only can't
+ * fix the problem in some cases.
+ */
+ if (iss->crashed)
+ iss_reset(iss);
+ iss_disable_clocks(iss);
+ }
+ mutex_unlock(&iss->iss_mutex);
+}
+
+static int iss_map_mem_resource(struct platform_device *pdev,
+ struct iss_device *iss,
+ enum iss_mem_resources res)
+{
+ struct resource *mem;
+
+ /* request the mem region for the camera registers */
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
+ if (!mem) {
+ dev_err(iss->dev, "no mem resource?\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
+ dev_err(iss->dev,
+ "cannot reserve camera register I/O region\n");
+ return -ENODEV;
+ }
+ iss->res[res] = mem;
+
+ /* map the region */
+ iss->regs[res] = ioremap_nocache(mem->start, resource_size(mem));
+ if (!iss->regs[res]) {
+ dev_err(iss->dev, "cannot map camera register I/O region\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void iss_unregister_entities(struct iss_device *iss)
+{
+ omap4iss_resizer_unregister_entities(&iss->resizer);
+ omap4iss_ipipe_unregister_entities(&iss->ipipe);
+ omap4iss_ipipeif_unregister_entities(&iss->ipipeif);
+ omap4iss_csi2_unregister_entities(&iss->csi2a);
+ omap4iss_csi2_unregister_entities(&iss->csi2b);
+
+ v4l2_device_unregister(&iss->v4l2_dev);
+ media_device_unregister(&iss->media_dev);
+}
+
+/*
+ * iss_register_subdev_group - Register a group of subdevices
+ * @iss: OMAP4 ISS device
+ * @board_info: I2C subdevs board information array
+ *
+ * Register all I2C subdevices in the board_info array. The array must be
+ * terminated by a NULL entry, and the first entry must be the sensor.
+ *
+ * Return a pointer to the sensor media entity if it has been successfully
+ * registered, or NULL otherwise.
+ */
+static struct v4l2_subdev *
+iss_register_subdev_group(struct iss_device *iss,
+ struct iss_subdev_i2c_board_info *board_info)
+{
+ struct v4l2_subdev *sensor = NULL;
+ unsigned int first;
+
+ if (board_info->board_info == NULL)
+ return NULL;
+
+ for (first = 1; board_info->board_info; ++board_info, first = 0) {
+ struct v4l2_subdev *subdev;
+ struct i2c_adapter *adapter;
+
+ adapter = i2c_get_adapter(board_info->i2c_adapter_id);
+ if (adapter == NULL) {
+ dev_err(iss->dev,
+ "%s: Unable to get I2C adapter %d for device %s\n",
+ __func__, board_info->i2c_adapter_id,
+ board_info->board_info->type);
+ continue;
+ }
+
+ subdev = v4l2_i2c_new_subdev_board(&iss->v4l2_dev, adapter,
+ board_info->board_info, NULL);
+ if (subdev == NULL) {
+ dev_err(iss->dev, "%s: Unable to register subdev %s\n",
+ __func__, board_info->board_info->type);
+ continue;
+ }
+
+ if (first)
+ sensor = subdev;
+ }
+
+ return sensor;
+}
+
+static int iss_register_entities(struct iss_device *iss)
+{
+ struct iss_platform_data *pdata = iss->pdata;
+ struct iss_v4l2_subdevs_group *subdevs;
+ int ret;
+
+ iss->media_dev.dev = iss->dev;
+ strlcpy(iss->media_dev.model, "TI OMAP4 ISS",
+ sizeof(iss->media_dev.model));
+ iss->media_dev.hw_revision = iss->revision;
+ iss->media_dev.link_notify = iss_pipeline_link_notify;
+ ret = media_device_register(&iss->media_dev);
+ if (ret < 0) {
+ dev_err(iss->dev, "%s: Media device registration failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ iss->v4l2_dev.mdev = &iss->media_dev;
+ ret = v4l2_device_register(iss->dev, &iss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(iss->dev, "%s: V4L2 device registration failed (%d)\n",
+ __func__, ret);
+ goto done;
+ }
+
+ /* Register internal entities */
+ ret = omap4iss_csi2_register_entities(&iss->csi2a, &iss->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap4iss_csi2_register_entities(&iss->csi2b, &iss->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap4iss_ipipeif_register_entities(&iss->ipipeif, &iss->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap4iss_ipipe_register_entities(&iss->ipipe, &iss->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = omap4iss_resizer_register_entities(&iss->resizer, &iss->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ /* Register external entities */
+ for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
+ struct v4l2_subdev *sensor;
+ struct media_entity *input;
+ unsigned int flags;
+ unsigned int pad;
+
+ sensor = iss_register_subdev_group(iss, subdevs->subdevs);
+ if (sensor == NULL)
+ continue;
+
+ sensor->host_priv = subdevs;
+
+ /* Connect the sensor to the correct interface module.
+ * CSI2a receiver through CSIPHY1, or
+ * CSI2b receiver through CSIPHY2
+ */
+ switch (subdevs->interface) {
+ case ISS_INTERFACE_CSI2A_PHY1:
+ input = &iss->csi2a.subdev.entity;
+ pad = CSI2_PAD_SINK;
+ flags = MEDIA_LNK_FL_IMMUTABLE
+ | MEDIA_LNK_FL_ENABLED;
+ break;
+
+ case ISS_INTERFACE_CSI2B_PHY2:
+ input = &iss->csi2b.subdev.entity;
+ pad = CSI2_PAD_SINK;
+ flags = MEDIA_LNK_FL_IMMUTABLE
+ | MEDIA_LNK_FL_ENABLED;
+ break;
+
+ default:
+ dev_err(iss->dev, "%s: invalid interface type %u\n",
+ __func__, subdevs->interface);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = media_entity_create_link(&sensor->entity, 0, input, pad,
+ flags);
+ if (ret < 0)
+ goto done;
+ }
+
+ ret = v4l2_device_register_subdev_nodes(&iss->v4l2_dev);
+
+done:
+ if (ret < 0)
+ iss_unregister_entities(iss);
+
+ return ret;
+}
+
+static void iss_cleanup_modules(struct iss_device *iss)
+{
+ omap4iss_csi2_cleanup(iss);
+ omap4iss_ipipeif_cleanup(iss);
+ omap4iss_ipipe_cleanup(iss);
+ omap4iss_resizer_cleanup(iss);
+}
+
+static int iss_initialize_modules(struct iss_device *iss)
+{
+ int ret;
+
+ ret = omap4iss_csiphy_init(iss);
+ if (ret < 0) {
+ dev_err(iss->dev, "CSI PHY initialization failed\n");
+ goto error_csiphy;
+ }
+
+ ret = omap4iss_csi2_init(iss);
+ if (ret < 0) {
+ dev_err(iss->dev, "CSI2 initialization failed\n");
+ goto error_csi2;
+ }
+
+ ret = omap4iss_ipipeif_init(iss);
+ if (ret < 0) {
+ dev_err(iss->dev, "ISP IPIPEIF initialization failed\n");
+ goto error_ipipeif;
+ }
+
+ ret = omap4iss_ipipe_init(iss);
+ if (ret < 0) {
+ dev_err(iss->dev, "ISP IPIPE initialization failed\n");
+ goto error_ipipe;
+ }
+
+ ret = omap4iss_resizer_init(iss);
+ if (ret < 0) {
+ dev_err(iss->dev, "ISP RESIZER initialization failed\n");
+ goto error_resizer;
+ }
+
+ /* Connect the submodules. */
+ ret = media_entity_create_link(
+ &iss->csi2a.subdev.entity, CSI2_PAD_SOURCE,
+ &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &iss->csi2b.subdev.entity, CSI2_PAD_SOURCE,
+ &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SOURCE_VP,
+ &iss->resizer.subdev.entity, RESIZER_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &iss->ipipeif.subdev.entity, IPIPEIF_PAD_SOURCE_VP,
+ &iss->ipipe.subdev.entity, IPIPE_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ ret = media_entity_create_link(
+ &iss->ipipe.subdev.entity, IPIPE_PAD_SOURCE_VP,
+ &iss->resizer.subdev.entity, RESIZER_PAD_SINK, 0);
+ if (ret < 0)
+ goto error_link;
+
+ return 0;
+
+error_link:
+ omap4iss_resizer_cleanup(iss);
+error_resizer:
+ omap4iss_ipipe_cleanup(iss);
+error_ipipe:
+ omap4iss_ipipeif_cleanup(iss);
+error_ipipeif:
+ omap4iss_csi2_cleanup(iss);
+error_csi2:
+error_csiphy:
+ return ret;
+}
+
+static int iss_probe(struct platform_device *pdev)
+{
+ struct iss_platform_data *pdata = pdev->dev.platform_data;
+ struct iss_device *iss;
+ unsigned int i;
+ int ret;
+
+ if (pdata == NULL)
+ return -EINVAL;
+
+ iss = kzalloc(sizeof(*iss), GFP_KERNEL);
+ if (!iss) {
+ dev_err(&pdev->dev, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&iss->iss_mutex);
+
+ iss->dev = &pdev->dev;
+ iss->pdata = pdata;
+
+ iss->raw_dmamask = DMA_BIT_MASK(32);
+ iss->dev->dma_mask = &iss->raw_dmamask;
+ iss->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ platform_set_drvdata(pdev, iss);
+
+ /* Clocks */
+ ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP);
+ if (ret < 0)
+ goto error;
+
+ ret = iss_get_clocks(iss);
+ if (ret < 0)
+ goto error;
+
+ if (omap4iss_get(iss) == NULL)
+ goto error;
+
+ ret = iss_reset(iss);
+ if (ret < 0)
+ goto error_iss;
+
+ iss->revision = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
+ dev_info(iss->dev, "Revision %08x found\n", iss->revision);
+
+ for (i = 1; i < OMAP4_ISS_MEM_LAST; i++) {
+ ret = iss_map_mem_resource(pdev, iss, i);
+ if (ret)
+ goto error_iss;
+ }
+
+ /* Configure BTE BW_LIMITER field to max recommended value (1 GB) */
+ iss_reg_update(iss, OMAP4_ISS_MEM_BTE, BTE_CTRL,
+ BTE_CTRL_BW_LIMITER_MASK,
+ 18 << BTE_CTRL_BW_LIMITER_SHIFT);
+
+ /* Perform ISP reset */
+ ret = omap4iss_subclk_enable(iss, OMAP4_ISS_SUBCLK_ISP);
+ if (ret < 0)
+ goto error_iss;
+
+ ret = iss_isp_reset(iss);
+ if (ret < 0)
+ goto error_iss;
+
+ dev_info(iss->dev, "ISP Revision %08x found\n",
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_REVISION));
+
+ /* Interrupt */
+ iss->irq_num = platform_get_irq(pdev, 0);
+ if (iss->irq_num <= 0) {
+ dev_err(iss->dev, "No IRQ resource\n");
+ ret = -ENODEV;
+ goto error_iss;
+ }
+
+ if (request_irq(iss->irq_num, iss_isr, IRQF_SHARED, "OMAP4 ISS", iss)) {
+ dev_err(iss->dev, "Unable to request IRQ\n");
+ ret = -EINVAL;
+ goto error_iss;
+ }
+
+ /* Entities */
+ ret = iss_initialize_modules(iss);
+ if (ret < 0)
+ goto error_irq;
+
+ ret = iss_register_entities(iss);
+ if (ret < 0)
+ goto error_modules;
+
+ omap4iss_put(iss);
+
+ return 0;
+
+error_modules:
+ iss_cleanup_modules(iss);
+error_irq:
+ free_irq(iss->irq_num, iss);
+error_iss:
+ omap4iss_put(iss);
+error:
+ iss_put_clocks(iss);
+
+ for (i = 0; i < OMAP4_ISS_MEM_LAST; i++) {
+ if (iss->regs[i]) {
+ iounmap(iss->regs[i]);
+ iss->regs[i] = NULL;
+ }
+
+ if (iss->res[i]) {
+ release_mem_region(iss->res[i]->start,
+ resource_size(iss->res[i]));
+ iss->res[i] = NULL;
+ }
+ }
+ platform_set_drvdata(pdev, NULL);
+
+ mutex_destroy(&iss->iss_mutex);
+ kfree(iss);
+
+ return ret;
+}
+
+static int iss_remove(struct platform_device *pdev)
+{
+ struct iss_device *iss = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ iss_unregister_entities(iss);
+ iss_cleanup_modules(iss);
+
+ free_irq(iss->irq_num, iss);
+ iss_put_clocks(iss);
+
+ for (i = 0; i < OMAP4_ISS_MEM_LAST; i++) {
+ if (iss->regs[i]) {
+ iounmap(iss->regs[i]);
+ iss->regs[i] = NULL;
+ }
+
+ if (iss->res[i]) {
+ release_mem_region(iss->res[i]->start,
+ resource_size(iss->res[i]));
+ iss->res[i] = NULL;
+ }
+ }
+
+ kfree(iss);
+
+ return 0;
+}
+
+static struct platform_device_id omap4iss_id_table[] = {
+ { "omap4iss", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, omap4iss_id_table);
+
+static struct platform_driver iss_driver = {
+ .probe = iss_probe,
+ .remove = iss_remove,
+ .id_table = omap4iss_id_table,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "omap4iss",
+ },
+};
+
+module_platform_driver(iss_driver);
+
+MODULE_DESCRIPTION("TI OMAP4 ISS driver");
+MODULE_AUTHOR("Sergio Aguirre <sergio.a.aguirre@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ISS_VIDEO_DRIVER_VERSION);
diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h
new file mode 100644
index 000000000000..346db9233171
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss.h
@@ -0,0 +1,236 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver
+ *
+ * Copyright (C) 2012 Texas Instruments.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _OMAP4_ISS_H_
+#define _OMAP4_ISS_H_
+
+#include <media/v4l2-device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include <media/omap4iss.h>
+
+#include "iss_regs.h"
+#include "iss_csiphy.h"
+#include "iss_csi2.h"
+#include "iss_ipipeif.h"
+#include "iss_ipipe.h"
+#include "iss_resizer.h"
+
+#define to_iss_device(ptr_module) \
+ container_of(ptr_module, struct iss_device, ptr_module)
+#define to_device(ptr_module) \
+ (to_iss_device(ptr_module)->dev)
+
+enum iss_mem_resources {
+ OMAP4_ISS_MEM_TOP,
+ OMAP4_ISS_MEM_CSI2_A_REGS1,
+ OMAP4_ISS_MEM_CAMERARX_CORE1,
+ OMAP4_ISS_MEM_CSI2_B_REGS1,
+ OMAP4_ISS_MEM_CAMERARX_CORE2,
+ OMAP4_ISS_MEM_BTE,
+ OMAP4_ISS_MEM_ISP_SYS1,
+ OMAP4_ISS_MEM_ISP_RESIZER,
+ OMAP4_ISS_MEM_ISP_IPIPE,
+ OMAP4_ISS_MEM_ISP_ISIF,
+ OMAP4_ISS_MEM_ISP_IPIPEIF,
+ OMAP4_ISS_MEM_LAST,
+};
+
+enum iss_subclk_resource {
+ OMAP4_ISS_SUBCLK_SIMCOP = (1 << 0),
+ OMAP4_ISS_SUBCLK_ISP = (1 << 1),
+ OMAP4_ISS_SUBCLK_CSI2_A = (1 << 2),
+ OMAP4_ISS_SUBCLK_CSI2_B = (1 << 3),
+ OMAP4_ISS_SUBCLK_CCP2 = (1 << 4),
+};
+
+enum iss_isp_subclk_resource {
+ OMAP4_ISS_ISP_SUBCLK_BL = (1 << 0),
+ OMAP4_ISS_ISP_SUBCLK_ISIF = (1 << 1),
+ OMAP4_ISS_ISP_SUBCLK_H3A = (1 << 2),
+ OMAP4_ISS_ISP_SUBCLK_RSZ = (1 << 3),
+ OMAP4_ISS_ISP_SUBCLK_IPIPE = (1 << 4),
+ OMAP4_ISS_ISP_SUBCLK_IPIPEIF = (1 << 5),
+};
+
+/*
+ * struct iss_reg - Structure for ISS register values.
+ * @reg: 32-bit Register address.
+ * @val: 32-bit Register value.
+ */
+struct iss_reg {
+ enum iss_mem_resources mmio_range;
+ u32 reg;
+ u32 val;
+};
+
+/*
+ * struct iss_device - ISS device structure.
+ * @crashed: Bitmask of crashed entities (indexed by entity ID)
+ */
+struct iss_device {
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct device *dev;
+ u32 revision;
+
+ /* platform HW resources */
+ struct iss_platform_data *pdata;
+ unsigned int irq_num;
+
+ struct resource *res[OMAP4_ISS_MEM_LAST];
+ void __iomem *regs[OMAP4_ISS_MEM_LAST];
+
+ u64 raw_dmamask;
+
+ struct mutex iss_mutex; /* For handling ref_count field */
+ bool crashed;
+ int has_context;
+ int ref_count;
+
+ struct clk *iss_fck;
+ struct clk *iss_ctrlclk;
+
+ /* ISS modules */
+ struct iss_csi2_device csi2a;
+ struct iss_csi2_device csi2b;
+ struct iss_csiphy csiphy1;
+ struct iss_csiphy csiphy2;
+ struct iss_ipipeif_device ipipeif;
+ struct iss_ipipe_device ipipe;
+ struct iss_resizer_device resizer;
+
+ unsigned int subclk_resources;
+ unsigned int isp_subclk_resources;
+};
+
+#define v4l2_dev_to_iss_device(dev) \
+ container_of(dev, struct iss_device, v4l2_dev)
+
+int omap4iss_get_external_info(struct iss_pipeline *pipe,
+ struct media_link *link);
+
+int omap4iss_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
+ atomic_t *stopping);
+
+int omap4iss_module_sync_is_stopping(wait_queue_head_t *wait,
+ atomic_t *stopping);
+
+int omap4iss_pipeline_set_stream(struct iss_pipeline *pipe,
+ enum iss_pipeline_stream_state state);
+void omap4iss_pipeline_cancel_stream(struct iss_pipeline *pipe);
+
+void omap4iss_configure_bridge(struct iss_device *iss,
+ enum ipipeif_input_entity input);
+
+struct iss_device *omap4iss_get(struct iss_device *iss);
+void omap4iss_put(struct iss_device *iss);
+int omap4iss_subclk_enable(struct iss_device *iss,
+ enum iss_subclk_resource res);
+int omap4iss_subclk_disable(struct iss_device *iss,
+ enum iss_subclk_resource res);
+void omap4iss_isp_subclk_enable(struct iss_device *iss,
+ enum iss_isp_subclk_resource res);
+void omap4iss_isp_subclk_disable(struct iss_device *iss,
+ enum iss_isp_subclk_resource res);
+
+int omap4iss_pipeline_pm_use(struct media_entity *entity, int use);
+
+int omap4iss_register_entities(struct platform_device *pdev,
+ struct v4l2_device *v4l2_dev);
+void omap4iss_unregister_entities(struct platform_device *pdev);
+
+/*
+ * iss_reg_read - Read the value of an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ *
+ * Return the register value.
+ */
+static inline
+u32 iss_reg_read(struct iss_device *iss, enum iss_mem_resources res,
+ u32 offset)
+{
+ return readl(iss->regs[res] + offset);
+}
+
+/*
+ * iss_reg_write - Write a value to an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @value: value to be written
+ */
+static inline
+void iss_reg_write(struct iss_device *iss, enum iss_mem_resources res,
+ u32 offset, u32 value)
+{
+ writel(value, iss->regs[res] + offset);
+}
+
+/*
+ * iss_reg_clr - Clear bits in an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @clr: bit mask to be cleared
+ */
+static inline
+void iss_reg_clr(struct iss_device *iss, enum iss_mem_resources res,
+ u32 offset, u32 clr)
+{
+ u32 v = iss_reg_read(iss, res, offset);
+
+ iss_reg_write(iss, res, offset, v & ~clr);
+}
+
+/*
+ * iss_reg_set - Set bits in an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @set: bit mask to be set
+ */
+static inline
+void iss_reg_set(struct iss_device *iss, enum iss_mem_resources res,
+ u32 offset, u32 set)
+{
+ u32 v = iss_reg_read(iss, res, offset);
+
+ iss_reg_write(iss, res, offset, v | set);
+}
+
+/*
+ * iss_reg_update - Clear and set bits in an OMAP4 ISS register
+ * @iss: the ISS device
+ * @res: memory resource in which the register is located
+ * @offset: register offset in the memory resource
+ * @clr: bit mask to be cleared
+ * @set: bit mask to be set
+ *
+ * Clear the clr mask first and then set the set mask.
+ */
+static inline
+void iss_reg_update(struct iss_device *iss, enum iss_mem_resources res,
+ u32 offset, u32 clr, u32 set)
+{
+ u32 v = iss_reg_read(iss, res, offset);
+
+ iss_reg_write(iss, res, offset, (v & ~clr) | set);
+}
+
+#endif /* _OMAP4_ISS_H_ */
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
new file mode 100644
index 000000000000..61fc350eb251
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -0,0 +1,1343 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI PHY module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <media/v4l2-common.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/mm.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_csi2.h"
+
+/*
+ * csi2_if_enable - Enable CSI2 Receiver interface.
+ * @enable: enable flag
+ *
+ */
+static void csi2_if_enable(struct iss_csi2_device *csi2, u8 enable)
+{
+ struct iss_csi2_ctrl_cfg *currctrl = &csi2->ctrl;
+
+ iss_reg_update(csi2->iss, csi2->regs1, CSI2_CTRL, CSI2_CTRL_IF_EN,
+ enable ? CSI2_CTRL_IF_EN : 0);
+
+ currctrl->if_enable = enable;
+}
+
+/*
+ * csi2_recv_config - CSI2 receiver module configuration.
+ * @currctrl: iss_csi2_ctrl_cfg structure
+ *
+ */
+static void csi2_recv_config(struct iss_csi2_device *csi2,
+ struct iss_csi2_ctrl_cfg *currctrl)
+{
+ u32 reg = 0;
+
+ if (currctrl->frame_mode)
+ reg |= CSI2_CTRL_FRAME;
+ else
+ reg &= ~CSI2_CTRL_FRAME;
+
+ if (currctrl->vp_clk_enable)
+ reg |= CSI2_CTRL_VP_CLK_EN;
+ else
+ reg &= ~CSI2_CTRL_VP_CLK_EN;
+
+ if (currctrl->vp_only_enable)
+ reg |= CSI2_CTRL_VP_ONLY_EN;
+ else
+ reg &= ~CSI2_CTRL_VP_ONLY_EN;
+
+ reg &= ~CSI2_CTRL_VP_OUT_CTRL_MASK;
+ reg |= currctrl->vp_out_ctrl << CSI2_CTRL_VP_OUT_CTRL_SHIFT;
+
+ if (currctrl->ecc_enable)
+ reg |= CSI2_CTRL_ECC_EN;
+ else
+ reg &= ~CSI2_CTRL_ECC_EN;
+
+ /*
+ * Set MFlag assertion boundaries to:
+ * Low: 4/8 of FIFO size
+ * High: 6/8 of FIFO size
+ */
+ reg &= ~(CSI2_CTRL_MFLAG_LEVH_MASK | CSI2_CTRL_MFLAG_LEVL_MASK);
+ reg |= (2 << CSI2_CTRL_MFLAG_LEVH_SHIFT) |
+ (4 << CSI2_CTRL_MFLAG_LEVL_SHIFT);
+
+ /* Generation of 16x64-bit bursts (Recommended) */
+ reg |= CSI2_CTRL_BURST_SIZE_EXPAND;
+
+ /* Do Non-Posted writes (Recommended) */
+ reg |= CSI2_CTRL_NON_POSTED_WRITE;
+
+ /*
+ * Enforce Little endian for all formats, including:
+ * YUV4:2:2 8-bit and YUV4:2:0 Legacy
+ */
+ reg |= CSI2_CTRL_ENDIANNESS;
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTRL, reg);
+}
+
+static const unsigned int csi2_input_fmts[] = {
+ V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SGBRG10_1X10,
+ V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_SGBRG8_1X8,
+ V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_MBUS_FMT_UYVY8_1X16,
+ V4L2_MBUS_FMT_YUYV8_1X16,
+};
+
+/* To set the format on the CSI2 requires a mapping function that takes
+ * the following inputs:
+ * - 3 different formats (at this time)
+ * - 2 destinations (mem, vp+mem) (vp only handled separately)
+ * - 2 decompression options (on, off)
+ * Output should be CSI2 frame format code
+ * Array indices as follows: [format][dest][decompr]
+ * Not all combinations are valid. 0 means invalid.
+ */
+static const u16 __csi2_fmt_map[][2][2] = {
+ /* RAW10 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_RAW10_EXP16,
+ /* DPCM decompression */
+ 0,
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_RAW10_EXP16_VP,
+ /* DPCM decompression */
+ 0,
+ },
+ },
+ /* RAW10 DPCM8 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ CSI2_USERDEF_8BIT_DATA1,
+ /* DPCM decompression */
+ CSI2_USERDEF_8BIT_DATA1_DPCM10,
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_RAW8_VP,
+ /* DPCM decompression */
+ CSI2_USERDEF_8BIT_DATA1_DPCM10_VP,
+ },
+ },
+ /* RAW8 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_RAW8,
+ /* DPCM decompression */
+ 0,
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_RAW8_VP,
+ /* DPCM decompression */
+ 0,
+ },
+ },
+ /* YUV422 formats */
+ {
+ /* Output to memory */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_YUV422_8BIT,
+ /* DPCM decompression */
+ 0,
+ },
+ /* Output to both */
+ {
+ /* No DPCM decompression */
+ CSI2_PIX_FMT_YUV422_8BIT_VP16,
+ /* DPCM decompression */
+ 0,
+ },
+ },
+};
+
+/*
+ * csi2_ctx_map_format - Map CSI2 sink media bus format to CSI2 format ID
+ * @csi2: ISS CSI2 device
+ *
+ * Returns CSI2 physical format id
+ */
+static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2)
+{
+ const struct v4l2_mbus_framefmt *fmt = &csi2->formats[CSI2_PAD_SINK];
+ int fmtidx, destidx;
+
+ switch (fmt->code) {
+ case V4L2_MBUS_FMT_SGRBG10_1X10:
+ case V4L2_MBUS_FMT_SRGGB10_1X10:
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case V4L2_MBUS_FMT_SGBRG10_1X10:
+ fmtidx = 0;
+ break;
+ case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
+ case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
+ fmtidx = 1;
+ break;
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case V4L2_MBUS_FMT_SGBRG8_1X8:
+ case V4L2_MBUS_FMT_SGRBG8_1X8:
+ case V4L2_MBUS_FMT_SRGGB8_1X8:
+ fmtidx = 2;
+ break;
+ case V4L2_MBUS_FMT_UYVY8_1X16:
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ fmtidx = 3;
+ break;
+ default:
+ WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n",
+ fmt->code);
+ return 0;
+ }
+
+ if (!(csi2->output & CSI2_OUTPUT_IPIPEIF) &&
+ !(csi2->output & CSI2_OUTPUT_MEMORY)) {
+ /* Neither output enabled is a valid combination */
+ return CSI2_PIX_FMT_OTHERS;
+ }
+
+ /* If we need to skip frames at the beginning of the stream disable the
+ * video port to avoid sending the skipped frames to the IPIPEIF.
+ */
+ destidx = csi2->frame_skip ? 0 : !!(csi2->output & CSI2_OUTPUT_IPIPEIF);
+
+ return __csi2_fmt_map[fmtidx][destidx][csi2->dpcm_decompress];
+}
+
+/*
+ * csi2_set_outaddr - Set memory address to save output image
+ * @csi2: Pointer to ISS CSI2a device.
+ * @addr: 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ *
+ * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte
+ * boundary.
+ */
+static void csi2_set_outaddr(struct iss_csi2_device *csi2, u32 addr)
+{
+ struct iss_csi2_ctx_cfg *ctx = &csi2->contexts[0];
+
+ ctx->ping_addr = addr;
+ ctx->pong_addr = addr;
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PING_ADDR(ctx->ctxnum),
+ ctx->ping_addr);
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PONG_ADDR(ctx->ctxnum),
+ ctx->pong_addr);
+}
+
+/*
+ * is_usr_def_mapping - Checks whether USER_DEF_MAPPING should
+ * be enabled by CSI2.
+ * @format_id: mapped format id
+ *
+ */
+static inline int is_usr_def_mapping(u32 format_id)
+{
+ return (format_id & 0xf0) == 0x40 ? 1 : 0;
+}
+
+/*
+ * csi2_ctx_enable - Enable specified CSI2 context
+ * @ctxnum: Context number, valid between 0 and 7 values.
+ * @enable: enable
+ *
+ */
+static void csi2_ctx_enable(struct iss_csi2_device *csi2, u8 ctxnum, u8 enable)
+{
+ struct iss_csi2_ctx_cfg *ctx = &csi2->contexts[ctxnum];
+ u32 reg;
+
+ reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctxnum));
+
+ if (enable) {
+ unsigned int skip = 0;
+
+ if (csi2->frame_skip)
+ skip = csi2->frame_skip;
+ else if (csi2->output & CSI2_OUTPUT_MEMORY)
+ skip = 1;
+
+ reg &= ~CSI2_CTX_CTRL1_COUNT_MASK;
+ reg |= CSI2_CTX_CTRL1_COUNT_UNLOCK
+ | (skip << CSI2_CTX_CTRL1_COUNT_SHIFT)
+ | CSI2_CTX_CTRL1_CTX_EN;
+ } else {
+ reg &= ~CSI2_CTX_CTRL1_CTX_EN;
+ }
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctxnum), reg);
+ ctx->enabled = enable;
+}
+
+/*
+ * csi2_ctx_config - CSI2 context configuration.
+ * @ctx: context configuration
+ *
+ */
+static void csi2_ctx_config(struct iss_csi2_device *csi2,
+ struct iss_csi2_ctx_cfg *ctx)
+{
+ u32 reg;
+
+ /* Set up CSI2_CTx_CTRL1 */
+ if (ctx->eof_enabled)
+ reg = CSI2_CTX_CTRL1_EOF_EN;
+
+ if (ctx->eol_enabled)
+ reg |= CSI2_CTX_CTRL1_EOL_EN;
+
+ if (ctx->checksum_enabled)
+ reg |= CSI2_CTX_CTRL1_CS_EN;
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctx->ctxnum), reg);
+
+ /* Set up CSI2_CTx_CTRL2 */
+ reg = ctx->virtual_id << CSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT;
+ reg |= ctx->format_id << CSI2_CTX_CTRL2_FORMAT_SHIFT;
+
+ if (ctx->dpcm_decompress && ctx->dpcm_predictor)
+ reg |= CSI2_CTX_CTRL2_DPCM_PRED;
+
+ if (is_usr_def_mapping(ctx->format_id))
+ reg |= 2 << CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT;
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL2(ctx->ctxnum), reg);
+
+ /* Set up CSI2_CTx_CTRL3 */
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL3(ctx->ctxnum),
+ ctx->alpha << CSI2_CTX_CTRL3_ALPHA_SHIFT);
+
+ /* Set up CSI2_CTx_DAT_OFST */
+ iss_reg_update(csi2->iss, csi2->regs1, CSI2_CTX_DAT_OFST(ctx->ctxnum),
+ CSI2_CTX_DAT_OFST_MASK, ctx->data_offset);
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PING_ADDR(ctx->ctxnum),
+ ctx->ping_addr);
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PONG_ADDR(ctx->ctxnum),
+ ctx->pong_addr);
+}
+
+/*
+ * csi2_timing_config - CSI2 timing configuration.
+ * @timing: csi2_timing_cfg structure
+ */
+static void csi2_timing_config(struct iss_csi2_device *csi2,
+ struct iss_csi2_timing_cfg *timing)
+{
+ u32 reg;
+
+ reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_TIMING);
+
+ if (timing->force_rx_mode)
+ reg |= CSI2_TIMING_FORCE_RX_MODE_IO1;
+ else
+ reg &= ~CSI2_TIMING_FORCE_RX_MODE_IO1;
+
+ if (timing->stop_state_16x)
+ reg |= CSI2_TIMING_STOP_STATE_X16_IO1;
+ else
+ reg &= ~CSI2_TIMING_STOP_STATE_X16_IO1;
+
+ if (timing->stop_state_4x)
+ reg |= CSI2_TIMING_STOP_STATE_X4_IO1;
+ else
+ reg &= ~CSI2_TIMING_STOP_STATE_X4_IO1;
+
+ reg &= ~CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK;
+ reg |= timing->stop_state_counter <<
+ CSI2_TIMING_STOP_STATE_COUNTER_IO1_SHIFT;
+
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_TIMING, reg);
+}
+
+/*
+ * csi2_irq_ctx_set - Enables CSI2 Context IRQs.
+ * @enable: Enable/disable CSI2 Context interrupts
+ */
+static void csi2_irq_ctx_set(struct iss_csi2_device *csi2, int enable)
+{
+ u32 reg = CSI2_CTX_IRQ_FE;
+ int i;
+
+ if (csi2->use_fs_irq)
+ reg |= CSI2_CTX_IRQ_FS;
+
+ for (i = 0; i < 8; i++) {
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(i),
+ reg);
+ if (enable)
+ iss_reg_set(csi2->iss, csi2->regs1,
+ CSI2_CTX_IRQENABLE(i), reg);
+ else
+ iss_reg_clr(csi2->iss, csi2->regs1,
+ CSI2_CTX_IRQENABLE(i), reg);
+ }
+}
+
+/*
+ * csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs.
+ * @enable: Enable/disable CSI2 ComplexIO #1 interrupts
+ */
+static void csi2_irq_complexio1_set(struct iss_csi2_device *csi2, int enable)
+{
+ u32 reg;
+ reg = CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT |
+ CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER |
+ CSI2_COMPLEXIO_IRQ_STATEULPM5 |
+ CSI2_COMPLEXIO_IRQ_ERRCONTROL5 |
+ CSI2_COMPLEXIO_IRQ_ERRESC5 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTHS5 |
+ CSI2_COMPLEXIO_IRQ_STATEULPM4 |
+ CSI2_COMPLEXIO_IRQ_ERRCONTROL4 |
+ CSI2_COMPLEXIO_IRQ_ERRESC4 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTHS4 |
+ CSI2_COMPLEXIO_IRQ_STATEULPM3 |
+ CSI2_COMPLEXIO_IRQ_ERRCONTROL3 |
+ CSI2_COMPLEXIO_IRQ_ERRESC3 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTHS3 |
+ CSI2_COMPLEXIO_IRQ_STATEULPM2 |
+ CSI2_COMPLEXIO_IRQ_ERRCONTROL2 |
+ CSI2_COMPLEXIO_IRQ_ERRESC2 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTHS2 |
+ CSI2_COMPLEXIO_IRQ_STATEULPM1 |
+ CSI2_COMPLEXIO_IRQ_ERRCONTROL1 |
+ CSI2_COMPLEXIO_IRQ_ERRESC1 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1 |
+ CSI2_COMPLEXIO_IRQ_ERRSOTHS1;
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQSTATUS, reg);
+ if (enable)
+ iss_reg_set(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQENABLE,
+ reg);
+ else
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQENABLE,
+ 0);
+}
+
+/*
+ * csi2_irq_status_set - Enables CSI2 Status IRQs.
+ * @enable: Enable/disable CSI2 Status interrupts
+ */
+static void csi2_irq_status_set(struct iss_csi2_device *csi2, int enable)
+{
+ u32 reg;
+ reg = CSI2_IRQ_OCP_ERR |
+ CSI2_IRQ_SHORT_PACKET |
+ CSI2_IRQ_ECC_CORRECTION |
+ CSI2_IRQ_ECC_NO_CORRECTION |
+ CSI2_IRQ_COMPLEXIO_ERR |
+ CSI2_IRQ_FIFO_OVF |
+ CSI2_IRQ_CONTEXT0;
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQSTATUS, reg);
+ if (enable)
+ iss_reg_set(csi2->iss, csi2->regs1, CSI2_IRQENABLE, reg);
+ else
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQENABLE, 0);
+}
+
+/*
+ * omap4iss_csi2_reset - Resets the CSI2 module.
+ *
+ * Must be called with the phy lock held.
+ *
+ * Returns 0 if successful, or -EBUSY if power command didn't respond.
+ */
+int omap4iss_csi2_reset(struct iss_csi2_device *csi2)
+{
+ u8 soft_reset_retries = 0;
+ u32 reg;
+ int i;
+
+ if (!csi2->available)
+ return -ENODEV;
+
+ if (csi2->phy->phy_in_use)
+ return -EBUSY;
+
+ iss_reg_set(csi2->iss, csi2->regs1, CSI2_SYSCONFIG,
+ CSI2_SYSCONFIG_SOFT_RESET);
+
+ do {
+ reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_SYSSTATUS)
+ & CSI2_SYSSTATUS_RESET_DONE;
+ if (reg == CSI2_SYSSTATUS_RESET_DONE)
+ break;
+ soft_reset_retries++;
+ if (soft_reset_retries < 5)
+ usleep_range(100, 100);
+ } while (soft_reset_retries < 5);
+
+ if (soft_reset_retries == 5) {
+ dev_err(csi2->iss->dev,
+ "CSI2: Soft reset try count exceeded!\n");
+ return -EBUSY;
+ }
+
+ iss_reg_set(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_CFG,
+ CSI2_COMPLEXIO_CFG_RESET_CTRL);
+
+ i = 100;
+ do {
+ reg = iss_reg_read(csi2->iss, csi2->phy->phy_regs, REGISTER1)
+ & REGISTER1_RESET_DONE_CTRLCLK;
+ if (reg == REGISTER1_RESET_DONE_CTRLCLK)
+ break;
+ usleep_range(100, 100);
+ } while (--i > 0);
+
+ if (i == 0) {
+ dev_err(csi2->iss->dev,
+ "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
+ return -EBUSY;
+ }
+
+ iss_reg_update(csi2->iss, csi2->regs1, CSI2_SYSCONFIG,
+ CSI2_SYSCONFIG_MSTANDBY_MODE_MASK |
+ CSI2_SYSCONFIG_AUTO_IDLE,
+ CSI2_SYSCONFIG_MSTANDBY_MODE_NO);
+
+ return 0;
+}
+
+static int csi2_configure(struct iss_csi2_device *csi2)
+{
+ const struct iss_v4l2_subdevs_group *pdata;
+ struct iss_csi2_timing_cfg *timing = &csi2->timing[0];
+ struct v4l2_subdev *sensor;
+ struct media_pad *pad;
+
+ /*
+ * CSI2 fields that can be updated while the context has
+ * been enabled or the interface has been enabled are not
+ * updated dynamically currently. So we do not allow to
+ * reconfigure if either has been enabled
+ */
+ if (csi2->contexts[0].enabled || csi2->ctrl.if_enable)
+ return -EBUSY;
+
+ pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]);
+ sensor = media_entity_to_v4l2_subdev(pad->entity);
+ pdata = sensor->host_priv;
+
+ csi2->frame_skip = 0;
+ v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip);
+
+ csi2->ctrl.vp_out_ctrl = pdata->bus.csi2.vpclk_div;
+ csi2->ctrl.frame_mode = ISS_CSI2_FRAME_IMMEDIATE;
+ csi2->ctrl.ecc_enable = pdata->bus.csi2.crc;
+
+ timing->force_rx_mode = 1;
+ timing->stop_state_16x = 1;
+ timing->stop_state_4x = 1;
+ timing->stop_state_counter = 0x1ff;
+
+ /*
+ * The CSI2 receiver can't do any format conversion except DPCM
+ * decompression, so every set_format call configures both pads
+ * and enables DPCM decompression as a special case:
+ */
+ if (csi2->formats[CSI2_PAD_SINK].code !=
+ csi2->formats[CSI2_PAD_SOURCE].code)
+ csi2->dpcm_decompress = true;
+ else
+ csi2->dpcm_decompress = false;
+
+ csi2->contexts[0].format_id = csi2_ctx_map_format(csi2);
+
+ if (csi2->video_out.bpl_padding == 0)
+ csi2->contexts[0].data_offset = 0;
+ else
+ csi2->contexts[0].data_offset = csi2->video_out.bpl_value;
+
+ /*
+ * Enable end of frame and end of line signals generation for
+ * context 0. These signals are generated from CSI2 receiver to
+ * qualify the last pixel of a frame and the last pixel of a line.
+ * Without enabling the signals CSI2 receiver writes data to memory
+ * beyond buffer size and/or data line offset is not handled correctly.
+ */
+ csi2->contexts[0].eof_enabled = 1;
+ csi2->contexts[0].eol_enabled = 1;
+
+ csi2_irq_complexio1_set(csi2, 1);
+ csi2_irq_ctx_set(csi2, 1);
+ csi2_irq_status_set(csi2, 1);
+
+ /* Set configuration (timings, format and links) */
+ csi2_timing_config(csi2, timing);
+ csi2_recv_config(csi2, &csi2->ctrl);
+ csi2_ctx_config(csi2, &csi2->contexts[0]);
+
+ return 0;
+}
+
+/*
+ * csi2_print_status - Prints CSI2 debug information.
+ */
+#define CSI2_PRINT_REGISTER(iss, regs, name)\
+ dev_dbg(iss->dev, "###CSI2 " #name "=0x%08x\n", \
+ iss_reg_read(iss, regs, CSI2_##name))
+
+static void csi2_print_status(struct iss_csi2_device *csi2)
+{
+ struct iss_device *iss = csi2->iss;
+
+ if (!csi2->available)
+ return;
+
+ dev_dbg(iss->dev, "-------------CSI2 Register dump-------------\n");
+
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, SYSCONFIG);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, SYSSTATUS);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, IRQENABLE);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, IRQSTATUS);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTRL);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, DBG_H);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_CFG);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_IRQSTATUS);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, SHORT_PACKET);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_IRQENABLE);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, DBG_P);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, TIMING);
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL1(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL2(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_DAT_OFST(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_PING_ADDR(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_PONG_ADDR(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_IRQENABLE(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_IRQSTATUS(0));
+ CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL3(0));
+
+ dev_dbg(iss->dev, "--------------------------------------------\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+/*
+ * csi2_isr_buffer - Does buffer handling at end-of-frame
+ * when writing to memory.
+ */
+static void csi2_isr_buffer(struct iss_csi2_device *csi2)
+{
+ struct iss_buffer *buffer;
+
+ csi2_ctx_enable(csi2, 0, 0);
+
+ buffer = omap4iss_video_buffer_next(&csi2->video_out);
+
+ /*
+ * Let video queue operation restart engine if there is an underrun
+ * condition.
+ */
+ if (buffer == NULL)
+ return;
+
+ csi2_set_outaddr(csi2, buffer->iss_addr);
+ csi2_ctx_enable(csi2, 0, 1);
+}
+
+static void csi2_isr_ctx(struct iss_csi2_device *csi2,
+ struct iss_csi2_ctx_cfg *ctx)
+{
+ unsigned int n = ctx->ctxnum;
+ u32 status;
+
+ status = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n));
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n), status);
+
+ /* Propagate frame number */
+ if (status & CSI2_CTX_IRQ_FS) {
+ struct iss_pipeline *pipe =
+ to_iss_pipeline(&csi2->subdev.entity);
+ if (pipe->do_propagation)
+ atomic_inc(&pipe->frame_number);
+ }
+
+ if (!(status & CSI2_CTX_IRQ_FE))
+ return;
+
+ /* Skip interrupts until we reach the frame skip count. The CSI2 will be
+ * automatically disabled, as the frame skip count has been programmed
+ * in the CSI2_CTx_CTRL1::COUNT field, so reenable it.
+ *
+ * It would have been nice to rely on the FRAME_NUMBER interrupt instead
+ * but it turned out that the interrupt is only generated when the CSI2
+ * writes to memory (the CSI2_CTx_CTRL1::COUNT field is decreased
+ * correctly and reaches 0 when data is forwarded to the video port only
+ * but no interrupt arrives). Maybe a CSI2 hardware bug.
+ */
+ if (csi2->frame_skip) {
+ csi2->frame_skip--;
+ if (csi2->frame_skip == 0) {
+ ctx->format_id = csi2_ctx_map_format(csi2);
+ csi2_ctx_config(csi2, ctx);
+ csi2_ctx_enable(csi2, n, 1);
+ }
+ return;
+ }
+
+ if (csi2->output & CSI2_OUTPUT_MEMORY)
+ csi2_isr_buffer(csi2);
+}
+
+/*
+ * omap4iss_csi2_isr - CSI2 interrupt handling.
+ */
+void omap4iss_csi2_isr(struct iss_csi2_device *csi2)
+{
+ struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
+ u32 csi2_irqstatus, cpxio1_irqstatus;
+ struct iss_device *iss = csi2->iss;
+
+ if (!csi2->available)
+ return;
+
+ csi2_irqstatus = iss_reg_read(csi2->iss, csi2->regs1, CSI2_IRQSTATUS);
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQSTATUS, csi2_irqstatus);
+
+ /* Failure Cases */
+ if (csi2_irqstatus & CSI2_IRQ_COMPLEXIO_ERR) {
+ cpxio1_irqstatus = iss_reg_read(csi2->iss, csi2->regs1,
+ CSI2_COMPLEXIO_IRQSTATUS);
+ iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQSTATUS,
+ cpxio1_irqstatus);
+ dev_dbg(iss->dev, "CSI2: ComplexIO Error IRQ %x\n",
+ cpxio1_irqstatus);
+ pipe->error = true;
+ }
+
+ if (csi2_irqstatus & (CSI2_IRQ_OCP_ERR |
+ CSI2_IRQ_SHORT_PACKET |
+ CSI2_IRQ_ECC_NO_CORRECTION |
+ CSI2_IRQ_COMPLEXIO_ERR |
+ CSI2_IRQ_FIFO_OVF)) {
+ dev_dbg(iss->dev,
+ "CSI2 Err: OCP:%d SHORT:%d ECC:%d CPXIO:%d OVF:%d\n",
+ csi2_irqstatus & CSI2_IRQ_OCP_ERR ? 1 : 0,
+ csi2_irqstatus & CSI2_IRQ_SHORT_PACKET ? 1 : 0,
+ csi2_irqstatus & CSI2_IRQ_ECC_NO_CORRECTION ? 1 : 0,
+ csi2_irqstatus & CSI2_IRQ_COMPLEXIO_ERR ? 1 : 0,
+ csi2_irqstatus & CSI2_IRQ_FIFO_OVF ? 1 : 0);
+ pipe->error = true;
+ }
+
+ if (omap4iss_module_sync_is_stopping(&csi2->wait, &csi2->stopping))
+ return;
+
+ /* Successful cases */
+ if (csi2_irqstatus & CSI2_IRQ_CONTEXT0)
+ csi2_isr_ctx(csi2, &csi2->contexts[0]);
+
+ if (csi2_irqstatus & CSI2_IRQ_ECC_CORRECTION)
+ dev_dbg(iss->dev, "CSI2: ECC correction done\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * ISS video operations
+ */
+
+/*
+ * csi2_queue - Queues the first buffer when using memory output
+ * @video: The video node
+ * @buffer: buffer to queue
+ */
+static int csi2_queue(struct iss_video *video, struct iss_buffer *buffer)
+{
+ struct iss_csi2_device *csi2 = container_of(video,
+ struct iss_csi2_device, video_out);
+
+ csi2_set_outaddr(csi2, buffer->iss_addr);
+
+ /*
+ * If streaming was enabled before there was a buffer queued
+ * or underrun happened in the ISR, the hardware was not enabled
+ * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
+ * Enable it now.
+ */
+ if (csi2->video_out.dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+ /* Enable / disable context 0 and IRQs */
+ csi2_if_enable(csi2, 1);
+ csi2_ctx_enable(csi2, 0, 1);
+ iss_video_dmaqueue_flags_clr(&csi2->video_out);
+ }
+
+ return 0;
+}
+
+static const struct iss_video_operations csi2_issvideo_ops = {
+ .queue = csi2_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__csi2_get_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &csi2->formats[pad];
+}
+
+static void
+csi2_try_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ enum v4l2_mbus_pixelcode pixelcode;
+ struct v4l2_mbus_framefmt *format;
+ const struct iss_format_info *info;
+ unsigned int i;
+
+ switch (pad) {
+ case CSI2_PAD_SINK:
+ /* Clamp the width and height to valid range (1-8191). */
+ for (i = 0; i < ARRAY_SIZE(csi2_input_fmts); i++) {
+ if (fmt->code == csi2_input_fmts[i])
+ break;
+ }
+
+ /* If not found, use SGRBG10 as default */
+ if (i >= ARRAY_SIZE(csi2_input_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ fmt->width = clamp_t(u32, fmt->width, 1, 8191);
+ fmt->height = clamp_t(u32, fmt->height, 1, 8191);
+ break;
+
+ case CSI2_PAD_SOURCE:
+ /* Source format same as sink format, except for DPCM
+ * compression.
+ */
+ pixelcode = fmt->code;
+ format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK, which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ /*
+ * Only Allow DPCM decompression, and check that the
+ * pattern is preserved
+ */
+ info = omap4iss_video_format_info(fmt->code);
+ if (info->uncompressed == pixelcode)
+ fmt->code = pixelcode;
+ break;
+ }
+
+ /* RGB, non-interlaced */
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * csi2_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+ const struct iss_format_info *info;
+
+ if (code->pad == CSI2_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(csi2_input_fmts))
+ return -EINVAL;
+
+ code->code = csi2_input_fmts[code->index];
+ } else {
+ format = __csi2_get_format(csi2, fh, CSI2_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_TRY);
+ switch (code->index) {
+ case 0:
+ /* Passthrough sink pad code */
+ code->code = format->code;
+ break;
+ case 1:
+ /* Uncompressed code */
+ info = omap4iss_video_format_info(format->code);
+ if (info->uncompressed == format->code)
+ return -EINVAL;
+
+ code->code = info->uncompressed;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int csi2_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ csi2_try_format(csi2, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * csi2_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * csi2_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csi2_get_format(csi2, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ csi2_try_format(csi2, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == CSI2_PAD_SINK) {
+ format = __csi2_get_format(csi2, fh, CSI2_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ csi2_try_format(csi2, fh, CSI2_PAD_SOURCE, format, fmt->which);
+ }
+
+ return 0;
+}
+
+static int csi2_link_validate(struct v4l2_subdev *sd, struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
+ int rval;
+
+ pipe->external = media_entity_to_v4l2_subdev(link->source->entity);
+ rval = omap4iss_get_external_info(pipe, link);
+ if (rval < 0)
+ return rval;
+
+ return v4l2_subdev_link_validate_default(sd, link, source_fmt,
+ sink_fmt);
+}
+
+/*
+ * csi2_init_formats - Initialize formats on all pads
+ * @sd: ISS CSI2 V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = CSI2_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ csi2_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/*
+ * csi2_set_stream - Enable/Disable streaming on the CSI2 module
+ * @sd: ISS CSI2 V4L2 subdevice
+ * @enable: ISS pipeline stream state
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = csi2->iss;
+ struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
+ struct iss_video *video_out = &csi2->video_out;
+ int ret = 0;
+
+ if (csi2->state == ISS_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap4iss_subclk_enable(iss, csi2->subclk);
+ }
+
+ switch (enable) {
+ case ISS_PIPELINE_STREAM_CONTINUOUS: {
+ ret = omap4iss_csiphy_config(iss, sd);
+ if (ret < 0)
+ return ret;
+
+ if (omap4iss_csiphy_acquire(csi2->phy) < 0)
+ return -ENODEV;
+ csi2->use_fs_irq = pipe->do_propagation;
+ csi2_configure(csi2);
+ csi2_print_status(csi2);
+
+ /*
+ * When outputting to memory with no buffer available, let the
+ * buffer queue handler start the hardware. A DMA queue flag
+ * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+ * a buffer available.
+ */
+ if (csi2->output & CSI2_OUTPUT_MEMORY &&
+ !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
+ break;
+ /* Enable context 0 and IRQs */
+ atomic_set(&csi2->stopping, 0);
+ csi2_ctx_enable(csi2, 0, 1);
+ csi2_if_enable(csi2, 1);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+ }
+ case ISS_PIPELINE_STREAM_STOPPED:
+ if (csi2->state == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+ if (omap4iss_module_sync_idle(&sd->entity, &csi2->wait,
+ &csi2->stopping))
+ ret = -ETIMEDOUT;
+ csi2_ctx_enable(csi2, 0, 0);
+ csi2_if_enable(csi2, 0);
+ csi2_irq_ctx_set(csi2, 0);
+ omap4iss_csiphy_release(csi2->phy);
+ omap4iss_subclk_disable(iss, csi2->subclk);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+ }
+
+ csi2->state = enable;
+ return ret;
+}
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops csi2_video_ops = {
+ .s_stream = csi2_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+ .enum_mbus_code = csi2_enum_mbus_code,
+ .enum_frame_size = csi2_enum_frame_size,
+ .get_fmt = csi2_get_format,
+ .set_fmt = csi2_set_format,
+ .link_validate = csi2_link_validate,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops csi2_ops = {
+ .video = &csi2_video_ops,
+ .pad = &csi2_pad_ops,
+};
+
+/* subdev internal operations */
+static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
+ .open = csi2_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * csi2_link_setup - Setup CSI2 connections.
+ * @entity : Pointer to media entity structure
+ * @local : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags : Link flags
+ * return -EINVAL or zero on success
+ */
+static int csi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct iss_csi2_ctrl_cfg *ctrl = &csi2->ctrl;
+
+ /*
+ * The ISS core doesn't support pipelines with multiple video outputs.
+ * Revisit this when it will be implemented, and return -EBUSY for now.
+ */
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case CSI2_PAD_SOURCE | MEDIA_ENT_T_DEVNODE:
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->output & ~CSI2_OUTPUT_MEMORY)
+ return -EBUSY;
+ csi2->output |= CSI2_OUTPUT_MEMORY;
+ } else {
+ csi2->output &= ~CSI2_OUTPUT_MEMORY;
+ }
+ break;
+
+ case CSI2_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV:
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->output & ~CSI2_OUTPUT_IPIPEIF)
+ return -EBUSY;
+ csi2->output |= CSI2_OUTPUT_IPIPEIF;
+ } else {
+ csi2->output &= ~CSI2_OUTPUT_IPIPEIF;
+ }
+ break;
+
+ default:
+ /* Link from camera to CSI2 is fixed... */
+ return -EINVAL;
+ }
+
+ ctrl->vp_only_enable = csi2->output & CSI2_OUTPUT_MEMORY ? false : true;
+ ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_IPIPEIF);
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations csi2_media_ops = {
+ .link_setup = csi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void omap4iss_csi2_unregister_entities(struct iss_csi2_device *csi2)
+{
+ v4l2_device_unregister_subdev(&csi2->subdev);
+ omap4iss_video_unregister(&csi2->video_out);
+}
+
+int omap4iss_csi2_register_entities(struct iss_csi2_device *csi2,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap4iss_video_register(&csi2->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap4iss_csi2_unregister_entities(csi2);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISS CSI2 initialisation and cleanup
+ */
+
+/*
+ * csi2_init_entities - Initialize subdev and media entity.
+ * @csi2: Pointer to csi2 structure.
+ * return -ENOMEM or zero on success
+ */
+static int csi2_init_entities(struct iss_csi2_device *csi2, const char *subname)
+{
+ struct v4l2_subdev *sd = &csi2->subdev;
+ struct media_pad *pads = csi2->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+ char name[V4L2_SUBDEV_NAME_SIZE];
+
+ v4l2_subdev_init(sd, &csi2_ops);
+ sd->internal_ops = &csi2_internal_ops;
+ sprintf(name, "CSI2%s", subname);
+ snprintf(sd->name, sizeof(sd->name), "OMAP4 ISS %s", name);
+
+ sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ v4l2_set_subdevdata(sd, csi2);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+
+ me->ops = &csi2_media_ops;
+ ret = media_entity_init(me, CSI2_PADS_NUM, pads, 0);
+ if (ret < 0)
+ return ret;
+
+ csi2_init_formats(sd, NULL);
+
+ /* Video device node */
+ csi2->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ csi2->video_out.ops = &csi2_issvideo_ops;
+ csi2->video_out.bpl_alignment = 32;
+ csi2->video_out.bpl_zero_padding = 1;
+ csi2->video_out.bpl_max = 0x1ffe0;
+ csi2->video_out.iss = csi2->iss;
+ csi2->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+
+ ret = omap4iss_video_init(&csi2->video_out, name);
+ if (ret < 0)
+ goto error_video;
+
+ /* Connect the CSI2 subdev to the video node. */
+ ret = media_entity_create_link(&csi2->subdev.entity, CSI2_PAD_SOURCE,
+ &csi2->video_out.video.entity, 0, 0);
+ if (ret < 0)
+ goto error_link;
+
+ return 0;
+
+error_link:
+ omap4iss_video_cleanup(&csi2->video_out);
+error_video:
+ media_entity_cleanup(&csi2->subdev.entity);
+ return ret;
+}
+
+/*
+ * omap4iss_csi2_init - Routine for module driver init
+ */
+int omap4iss_csi2_init(struct iss_device *iss)
+{
+ struct iss_csi2_device *csi2a = &iss->csi2a;
+ struct iss_csi2_device *csi2b = &iss->csi2b;
+ int ret;
+
+ csi2a->iss = iss;
+ csi2a->available = 1;
+ csi2a->regs1 = OMAP4_ISS_MEM_CSI2_A_REGS1;
+ csi2a->phy = &iss->csiphy1;
+ csi2a->subclk = OMAP4_ISS_SUBCLK_CSI2_A;
+ csi2a->state = ISS_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&csi2a->wait);
+
+ ret = csi2_init_entities(csi2a, "a");
+ if (ret < 0)
+ return ret;
+
+ csi2b->iss = iss;
+ csi2b->available = 1;
+ csi2b->regs1 = OMAP4_ISS_MEM_CSI2_B_REGS1;
+ csi2b->phy = &iss->csiphy2;
+ csi2b->subclk = OMAP4_ISS_SUBCLK_CSI2_B;
+ csi2b->state = ISS_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&csi2b->wait);
+
+ ret = csi2_init_entities(csi2b, "b");
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * omap4iss_csi2_cleanup - Routine for module driver cleanup
+ */
+void omap4iss_csi2_cleanup(struct iss_device *iss)
+{
+ struct iss_csi2_device *csi2a = &iss->csi2a;
+ struct iss_csi2_device *csi2b = &iss->csi2b;
+
+ omap4iss_video_cleanup(&csi2a->video_out);
+ media_entity_cleanup(&csi2a->subdev.entity);
+
+ omap4iss_video_cleanup(&csi2b->video_out);
+ media_entity_cleanup(&csi2b->subdev.entity);
+}
diff --git a/drivers/staging/media/omap4iss/iss_csi2.h b/drivers/staging/media/omap4iss/iss_csi2.h
new file mode 100644
index 000000000000..971aa7b08013
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csi2.h
@@ -0,0 +1,158 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI2 module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_CSI2_H
+#define OMAP4_ISS_CSI2_H
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include "iss_video.h"
+
+struct iss_csiphy;
+
+/* This is not an exhaustive list */
+enum iss_csi2_pix_formats {
+ CSI2_PIX_FMT_OTHERS = 0,
+ CSI2_PIX_FMT_YUV422_8BIT = 0x1e,
+ CSI2_PIX_FMT_YUV422_8BIT_VP = 0x9e,
+ CSI2_PIX_FMT_YUV422_8BIT_VP16 = 0xde,
+ CSI2_PIX_FMT_RAW10_EXP16 = 0xab,
+ CSI2_PIX_FMT_RAW10_EXP16_VP = 0x12f,
+ CSI2_PIX_FMT_RAW8 = 0x2a,
+ CSI2_PIX_FMT_RAW8_DPCM10_EXP16 = 0x2aa,
+ CSI2_PIX_FMT_RAW8_DPCM10_VP = 0x32a,
+ CSI2_PIX_FMT_RAW8_VP = 0x12a,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10_VP = 0x340,
+ CSI2_USERDEF_8BIT_DATA1_DPCM10 = 0x2c0,
+ CSI2_USERDEF_8BIT_DATA1 = 0x40,
+};
+
+enum iss_csi2_irqevents {
+ OCP_ERR_IRQ = 0x4000,
+ SHORT_PACKET_IRQ = 0x2000,
+ ECC_CORRECTION_IRQ = 0x1000,
+ ECC_NO_CORRECTION_IRQ = 0x800,
+ COMPLEXIO2_ERR_IRQ = 0x400,
+ COMPLEXIO1_ERR_IRQ = 0x200,
+ FIFO_OVF_IRQ = 0x100,
+ CONTEXT7 = 0x80,
+ CONTEXT6 = 0x40,
+ CONTEXT5 = 0x20,
+ CONTEXT4 = 0x10,
+ CONTEXT3 = 0x8,
+ CONTEXT2 = 0x4,
+ CONTEXT1 = 0x2,
+ CONTEXT0 = 0x1,
+};
+
+enum iss_csi2_ctx_irqevents {
+ CTX_ECC_CORRECTION = 0x100,
+ CTX_LINE_NUMBER = 0x80,
+ CTX_FRAME_NUMBER = 0x40,
+ CTX_CS = 0x20,
+ CTX_LE = 0x8,
+ CTX_LS = 0x4,
+ CTX_FE = 0x2,
+ CTX_FS = 0x1,
+};
+
+enum iss_csi2_frame_mode {
+ ISS_CSI2_FRAME_IMMEDIATE,
+ ISS_CSI2_FRAME_AFTERFEC,
+};
+
+#define ISS_CSI2_MAX_CTX_NUM 7
+
+struct iss_csi2_ctx_cfg {
+ u8 ctxnum; /* context number 0 - 7 */
+ u8 dpcm_decompress;
+
+ /* Fields in CSI2_CTx_CTRL2 - locked by CSI2_CTx_CTRL1.CTX_EN */
+ u8 virtual_id;
+ u16 format_id; /* as in CSI2_CTx_CTRL2[9:0] */
+ u8 dpcm_predictor; /* 1: simple, 0: advanced */
+
+ /* Fields in CSI2_CTx_CTRL1/3 - Shadowed */
+ u16 alpha;
+ u16 data_offset;
+ u32 ping_addr;
+ u32 pong_addr;
+ u8 eof_enabled;
+ u8 eol_enabled;
+ u8 checksum_enabled;
+ u8 enabled;
+};
+
+struct iss_csi2_timing_cfg {
+ u8 ionum; /* IO1 or IO2 as in CSI2_TIMING */
+ unsigned force_rx_mode:1;
+ unsigned stop_state_16x:1;
+ unsigned stop_state_4x:1;
+ u16 stop_state_counter;
+};
+
+struct iss_csi2_ctrl_cfg {
+ bool vp_clk_enable;
+ bool vp_only_enable;
+ u8 vp_out_ctrl;
+ enum iss_csi2_frame_mode frame_mode;
+ bool ecc_enable;
+ bool if_enable;
+};
+
+#define CSI2_PAD_SINK 0
+#define CSI2_PAD_SOURCE 1
+#define CSI2_PADS_NUM 2
+
+#define CSI2_OUTPUT_IPIPEIF (1 << 0)
+#define CSI2_OUTPUT_MEMORY (1 << 1)
+
+struct iss_csi2_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[CSI2_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM];
+
+ struct iss_video video_out;
+ struct iss_device *iss;
+
+ u8 available; /* Is the IP present on the silicon? */
+
+ /* memory resources, as defined in enum iss_mem_resources */
+ unsigned int regs1;
+ unsigned int regs2;
+ /* ISP subclock, as defined in enum iss_isp_subclk_resource */
+ unsigned int subclk;
+
+ u32 output; /* output to IPIPEIF, memory or both? */
+ bool dpcm_decompress;
+ unsigned int frame_skip;
+ bool use_fs_irq;
+
+ struct iss_csiphy *phy;
+ struct iss_csi2_ctx_cfg contexts[ISS_CSI2_MAX_CTX_NUM + 1];
+ struct iss_csi2_timing_cfg timing[2];
+ struct iss_csi2_ctrl_cfg ctrl;
+ enum iss_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+void omap4iss_csi2_isr(struct iss_csi2_device *csi2);
+int omap4iss_csi2_reset(struct iss_csi2_device *csi2);
+int omap4iss_csi2_init(struct iss_device *iss);
+void omap4iss_csi2_cleanup(struct iss_device *iss);
+void omap4iss_csi2_unregister_entities(struct iss_csi2_device *csi2);
+int omap4iss_csi2_register_entities(struct iss_csi2_device *csi2,
+ struct v4l2_device *vdev);
+#endif /* OMAP4_ISS_CSI2_H */
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.c b/drivers/staging/media/omap4iss/iss_csiphy.c
new file mode 100644
index 000000000000..7c3d55d811ef
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csiphy.c
@@ -0,0 +1,279 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI PHY module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include "../../../../arch/arm/mach-omap2/control.h"
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_csiphy.h"
+
+/*
+ * csiphy_lanes_config - Configuration of CSIPHY lanes.
+ *
+ * Updates HW configuration.
+ * Called with phy->mutex taken.
+ */
+static void csiphy_lanes_config(struct iss_csiphy *phy)
+{
+ unsigned int i;
+ u32 reg;
+
+ reg = iss_reg_read(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG);
+
+ for (i = 0; i < phy->max_data_lanes; i++) {
+ reg &= ~(CSI2_COMPLEXIO_CFG_DATA_POL(i + 1) |
+ CSI2_COMPLEXIO_CFG_DATA_POSITION_MASK(i + 1));
+ reg |= (phy->lanes.data[i].pol ?
+ CSI2_COMPLEXIO_CFG_DATA_POL(i + 1) : 0);
+ reg |= (phy->lanes.data[i].pos <<
+ CSI2_COMPLEXIO_CFG_DATA_POSITION_SHIFT(i + 1));
+ }
+
+ reg &= ~(CSI2_COMPLEXIO_CFG_CLOCK_POL |
+ CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK);
+ reg |= phy->lanes.clk.pol ? CSI2_COMPLEXIO_CFG_CLOCK_POL : 0;
+ reg |= phy->lanes.clk.pos << CSI2_COMPLEXIO_CFG_CLOCK_POSITION_SHIFT;
+
+ iss_reg_write(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG, reg);
+}
+
+/*
+ * csiphy_set_power
+ * @power: Power state to be set.
+ *
+ * Returns 0 if successful, or -EBUSY if the retry count is exceeded.
+ */
+static int csiphy_set_power(struct iss_csiphy *phy, u32 power)
+{
+ u32 reg;
+ u8 retry_count;
+
+ iss_reg_update(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG,
+ CSI2_COMPLEXIO_CFG_PWD_CMD_MASK,
+ power | CSI2_COMPLEXIO_CFG_PWR_AUTO);
+
+ retry_count = 0;
+ do {
+ udelay(1);
+ reg = iss_reg_read(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG)
+ & CSI2_COMPLEXIO_CFG_PWD_STATUS_MASK;
+
+ if (reg != power >> 2)
+ retry_count++;
+
+ } while ((reg != power >> 2) && (retry_count < 250));
+
+ if (retry_count == 250) {
+ dev_err(phy->iss->dev, "CSI2 CIO set power failed!\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/*
+ * csiphy_dphy_config - Configure CSI2 D-PHY parameters.
+ *
+ * Called with phy->mutex taken.
+ */
+static void csiphy_dphy_config(struct iss_csiphy *phy)
+{
+ u32 reg;
+
+ /* Set up REGISTER0 */
+ reg = phy->dphy.ths_term << REGISTER0_THS_TERM_SHIFT;
+ reg |= phy->dphy.ths_settle << REGISTER0_THS_SETTLE_SHIFT;
+
+ iss_reg_write(phy->iss, phy->phy_regs, REGISTER0, reg);
+
+ /* Set up REGISTER1 */
+ reg = phy->dphy.tclk_term << REGISTER1_TCLK_TERM_SHIFT;
+ reg |= phy->dphy.tclk_miss << REGISTER1_CTRLCLK_DIV_FACTOR_SHIFT;
+ reg |= phy->dphy.tclk_settle << REGISTER1_TCLK_SETTLE_SHIFT;
+ reg |= 0xb8 << REGISTER1_DPHY_HS_SYNC_PATTERN_SHIFT;
+
+ iss_reg_write(phy->iss, phy->phy_regs, REGISTER1, reg);
+}
+
+/*
+ * TCLK values are OK at their reset values
+ */
+#define TCLK_TERM 0
+#define TCLK_MISS 1
+#define TCLK_SETTLE 14
+
+int omap4iss_csiphy_config(struct iss_device *iss,
+ struct v4l2_subdev *csi2_subdev)
+{
+ struct iss_csi2_device *csi2 = v4l2_get_subdevdata(csi2_subdev);
+ struct iss_pipeline *pipe = to_iss_pipeline(&csi2_subdev->entity);
+ struct iss_v4l2_subdevs_group *subdevs = pipe->external->host_priv;
+ struct iss_csiphy_dphy_cfg csi2phy;
+ int csi2_ddrclk_khz;
+ struct iss_csiphy_lanes_cfg *lanes;
+ unsigned int used_lanes = 0;
+ u32 cam_rx_ctrl;
+ unsigned int i;
+
+ lanes = &subdevs->bus.csi2.lanecfg;
+
+ /*
+ * SCM.CONTROL_CAMERA_RX
+ * - bit [31] : CSIPHY2 lane 2 enable (4460+ only)
+ * - bit [30:29] : CSIPHY2 per-lane enable (1 to 0)
+ * - bit [28:24] : CSIPHY1 per-lane enable (4 to 0)
+ * - bit [21] : CSIPHY2 CTRLCLK enable
+ * - bit [20:19] : CSIPHY2 config: 00 d-phy, 01/10 ccp2
+ * - bit [18] : CSIPHY1 CTRLCLK enable
+ * - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2
+ */
+ cam_rx_ctrl = omap4_ctrl_pad_readl(
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
+
+
+ if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) {
+ cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK |
+ OMAP4_CAMERARX_CSI21_CAMMODE_MASK);
+ /* NOTE: Leave CSIPHY1 config to 0x0: D-PHY mode */
+ /* Enable all lanes for now */
+ cam_rx_ctrl |=
+ 0x1f << OMAP4_CAMERARX_CSI21_LANEENABLE_SHIFT;
+ /* Enable CTRLCLK */
+ cam_rx_ctrl |= OMAP4_CAMERARX_CSI21_CTRLCLKEN_MASK;
+ }
+
+ if (subdevs->interface == ISS_INTERFACE_CSI2B_PHY2) {
+ cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI22_LANEENABLE_MASK |
+ OMAP4_CAMERARX_CSI22_CAMMODE_MASK);
+ /* NOTE: Leave CSIPHY2 config to 0x0: D-PHY mode */
+ /* Enable all lanes for now */
+ cam_rx_ctrl |=
+ 0x3 << OMAP4_CAMERARX_CSI22_LANEENABLE_SHIFT;
+ /* Enable CTRLCLK */
+ cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK;
+ }
+
+ omap4_ctrl_pad_writel(cam_rx_ctrl,
+ OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
+
+ /* Reset used lane count */
+ csi2->phy->used_data_lanes = 0;
+
+ /* Clock and data lanes verification */
+ for (i = 0; i < csi2->phy->max_data_lanes; i++) {
+ if (lanes->data[i].pos == 0)
+ continue;
+
+ if (lanes->data[i].pol > 1 ||
+ lanes->data[i].pos > (csi2->phy->max_data_lanes + 1))
+ return -EINVAL;
+
+ if (used_lanes & (1 << lanes->data[i].pos))
+ return -EINVAL;
+
+ used_lanes |= 1 << lanes->data[i].pos;
+ csi2->phy->used_data_lanes++;
+ }
+
+ if (lanes->clk.pol > 1 ||
+ lanes->clk.pos > (csi2->phy->max_data_lanes + 1))
+ return -EINVAL;
+
+ if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
+ return -EINVAL;
+
+ csi2_ddrclk_khz = pipe->external_rate / 1000
+ / (2 * csi2->phy->used_data_lanes)
+ * pipe->external_bpp;
+
+ /*
+ * THS_TERM: Programmed value = ceil(12.5 ns/DDRClk period) - 1.
+ * THS_SETTLE: Programmed value = ceil(90 ns/DDRClk period) + 3.
+ */
+ csi2phy.ths_term = DIV_ROUND_UP(25 * csi2_ddrclk_khz, 2000000) - 1;
+ csi2phy.ths_settle = DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3;
+ csi2phy.tclk_term = TCLK_TERM;
+ csi2phy.tclk_miss = TCLK_MISS;
+ csi2phy.tclk_settle = TCLK_SETTLE;
+
+ mutex_lock(&csi2->phy->mutex);
+ csi2->phy->dphy = csi2phy;
+ csi2->phy->lanes = *lanes;
+ mutex_unlock(&csi2->phy->mutex);
+
+ return 0;
+}
+
+int omap4iss_csiphy_acquire(struct iss_csiphy *phy)
+{
+ int rval;
+
+ mutex_lock(&phy->mutex);
+
+ rval = omap4iss_csi2_reset(phy->csi2);
+ if (rval)
+ goto done;
+
+ csiphy_dphy_config(phy);
+ csiphy_lanes_config(phy);
+
+ rval = csiphy_set_power(phy, CSI2_COMPLEXIO_CFG_PWD_CMD_ON);
+ if (rval)
+ goto done;
+
+ phy->phy_in_use = 1;
+
+done:
+ mutex_unlock(&phy->mutex);
+ return rval;
+}
+
+void omap4iss_csiphy_release(struct iss_csiphy *phy)
+{
+ mutex_lock(&phy->mutex);
+ if (phy->phy_in_use) {
+ csiphy_set_power(phy, CSI2_COMPLEXIO_CFG_PWD_CMD_OFF);
+ phy->phy_in_use = 0;
+ }
+ mutex_unlock(&phy->mutex);
+}
+
+/*
+ * omap4iss_csiphy_init - Initialize the CSI PHY frontends
+ */
+int omap4iss_csiphy_init(struct iss_device *iss)
+{
+ struct iss_csiphy *phy1 = &iss->csiphy1;
+ struct iss_csiphy *phy2 = &iss->csiphy2;
+
+ phy1->iss = iss;
+ phy1->csi2 = &iss->csi2a;
+ phy1->max_data_lanes = ISS_CSIPHY1_NUM_DATA_LANES;
+ phy1->used_data_lanes = 0;
+ phy1->cfg_regs = OMAP4_ISS_MEM_CSI2_A_REGS1;
+ phy1->phy_regs = OMAP4_ISS_MEM_CAMERARX_CORE1;
+ mutex_init(&phy1->mutex);
+
+ phy2->iss = iss;
+ phy2->csi2 = &iss->csi2b;
+ phy2->max_data_lanes = ISS_CSIPHY2_NUM_DATA_LANES;
+ phy2->used_data_lanes = 0;
+ phy2->cfg_regs = OMAP4_ISS_MEM_CSI2_B_REGS1;
+ phy2->phy_regs = OMAP4_ISS_MEM_CAMERARX_CORE2;
+ mutex_init(&phy2->mutex);
+
+ return 0;
+}
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.h b/drivers/staging/media/omap4iss/iss_csiphy.h
new file mode 100644
index 000000000000..e9ca43955654
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_csiphy.h
@@ -0,0 +1,51 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - CSI PHY module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_CSI_PHY_H
+#define OMAP4_ISS_CSI_PHY_H
+
+#include <media/omap4iss.h>
+
+struct iss_csi2_device;
+
+struct iss_csiphy_dphy_cfg {
+ u8 ths_term;
+ u8 ths_settle;
+ u8 tclk_term;
+ unsigned tclk_miss:1;
+ u8 tclk_settle;
+};
+
+struct iss_csiphy {
+ struct iss_device *iss;
+ struct mutex mutex; /* serialize csiphy configuration */
+ u8 phy_in_use;
+ struct iss_csi2_device *csi2;
+
+ /* memory resources, as defined in enum iss_mem_resources */
+ unsigned int cfg_regs;
+ unsigned int phy_regs;
+
+ u8 max_data_lanes; /* number of CSI2 Data Lanes supported */
+ u8 used_data_lanes; /* number of CSI2 Data Lanes used */
+ struct iss_csiphy_lanes_cfg lanes;
+ struct iss_csiphy_dphy_cfg dphy;
+};
+
+int omap4iss_csiphy_config(struct iss_device *iss,
+ struct v4l2_subdev *csi2_subdev);
+int omap4iss_csiphy_acquire(struct iss_csiphy *phy);
+void omap4iss_csiphy_release(struct iss_csiphy *phy);
+int omap4iss_csiphy_init(struct iss_device *iss);
+
+#endif /* OMAP4_ISS_CSI_PHY_H */
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.c b/drivers/staging/media/omap4iss/iss_ipipe.c
new file mode 100644
index 000000000000..6eaafc5e2eea
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipe.c
@@ -0,0 +1,570 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPE module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_ipipe.h"
+
+static struct v4l2_mbus_framefmt *
+__ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which);
+
+static const unsigned int ipipe_fmts[] = {
+ V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SGBRG10_1X10,
+};
+
+/*
+ * ipipe_print_status - Print current IPIPE Module register values.
+ * @ipipe: Pointer to ISS ISP IPIPE device.
+ *
+ * Also prints other debug information stored in the IPIPE module.
+ */
+#define IPIPE_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###IPIPE " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_##name))
+
+static void ipipe_print_status(struct iss_ipipe_device *ipipe)
+{
+ struct iss_device *iss = to_iss_device(ipipe);
+
+ dev_dbg(iss->dev, "-------------IPIPE Register dump-------------\n");
+
+ IPIPE_PRINT_REGISTER(iss, SRC_EN);
+ IPIPE_PRINT_REGISTER(iss, SRC_MODE);
+ IPIPE_PRINT_REGISTER(iss, SRC_FMT);
+ IPIPE_PRINT_REGISTER(iss, SRC_COL);
+ IPIPE_PRINT_REGISTER(iss, SRC_VPS);
+ IPIPE_PRINT_REGISTER(iss, SRC_VSZ);
+ IPIPE_PRINT_REGISTER(iss, SRC_HPS);
+ IPIPE_PRINT_REGISTER(iss, SRC_HSZ);
+ IPIPE_PRINT_REGISTER(iss, GCK_MMR);
+ IPIPE_PRINT_REGISTER(iss, YUV_PHS);
+
+ dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+/*
+ * ipipe_enable - Enable/Disable IPIPE.
+ * @enable: enable flag
+ *
+ */
+static void ipipe_enable(struct iss_ipipe_device *ipipe, u8 enable)
+{
+ struct iss_device *iss = to_iss_device(ipipe);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_EN,
+ IPIPE_SRC_EN_EN, enable ? IPIPE_SRC_EN_EN : 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+static void ipipe_configure(struct iss_ipipe_device *ipipe)
+{
+ struct iss_device *iss = to_iss_device(ipipe);
+ struct v4l2_mbus_framefmt *format;
+
+ /* IPIPE_PAD_SINK */
+ format = &ipipe->formats[IPIPE_PAD_SINK];
+
+ /* NOTE: Currently just supporting pipeline IN: RGB, OUT: YUV422 */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_FMT,
+ IPIPE_SRC_FMT_RAW2YUV);
+
+ /* Enable YUV444 -> YUV422 conversion */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_YUV_PHS,
+ IPIPE_YUV_PHS_LPF);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_VPS, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_HPS, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_VSZ,
+ (format->height - 2) & IPIPE_SRC_VSZ_MASK);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_HSZ,
+ (format->width - 1) & IPIPE_SRC_HSZ_MASK);
+
+ /* Ignore ipipeif_wrt signal, and operate on-the-fly. */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_MODE,
+ IPIPE_SRC_MODE_WRT | IPIPE_SRC_MODE_OST);
+
+ /* HACK: Values tuned for Ducati SW (OV) */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_COL,
+ IPIPE_SRC_COL_EE_B | IPIPE_SRC_COL_EO_GB |
+ IPIPE_SRC_COL_OE_GR | IPIPE_SRC_COL_OO_R);
+
+ /* IPIPE_PAD_SOURCE_VP */
+ format = &ipipe->formats[IPIPE_PAD_SOURCE_VP];
+ /* Do nothing? */
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * ipipe_set_stream - Enable/Disable streaming on the IPIPE module
+ * @sd: ISP IPIPE V4L2 subdevice
+ * @enable: Enable/disable stream
+ */
+static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(ipipe);
+ int ret = 0;
+
+ if (ipipe->state == ISS_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap4iss_isp_subclk_enable(iss, OMAP4_ISS_ISP_SUBCLK_IPIPE);
+
+ /* Enable clk_arm_g0 */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_GCK_MMR,
+ IPIPE_GCK_MMR_REG);
+
+ /* Enable clk_pix_g[3:0] */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_GCK_PIX,
+ IPIPE_GCK_PIX_G3 | IPIPE_GCK_PIX_G2 |
+ IPIPE_GCK_PIX_G1 | IPIPE_GCK_PIX_G0);
+ }
+
+ switch (enable) {
+ case ISS_PIPELINE_STREAM_CONTINUOUS:
+
+ ipipe_configure(ipipe);
+ ipipe_print_status(ipipe);
+
+ atomic_set(&ipipe->stopping, 0);
+ ipipe_enable(ipipe, 1);
+ break;
+
+ case ISS_PIPELINE_STREAM_STOPPED:
+ if (ipipe->state == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+ if (omap4iss_module_sync_idle(&sd->entity, &ipipe->wait,
+ &ipipe->stopping))
+ ret = -ETIMEDOUT;
+
+ ipipe_enable(ipipe, 0);
+ omap4iss_isp_subclk_disable(iss, OMAP4_ISS_ISP_SUBCLK_IPIPE);
+ break;
+ }
+
+ ipipe->state = enable;
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &ipipe->formats[pad];
+}
+
+/*
+ * ipipe_try_format - Try video format on a pad
+ * @ipipe: ISS IPIPE device
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+ipipe_try_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt *format;
+ unsigned int width = fmt->width;
+ unsigned int height = fmt->height;
+ unsigned int i;
+
+ switch (pad) {
+ case IPIPE_PAD_SINK:
+ for (i = 0; i < ARRAY_SIZE(ipipe_fmts); i++) {
+ if (fmt->code == ipipe_fmts[i])
+ break;
+ }
+
+ /* If not found, use SGRBG10 as default */
+ if (i >= ARRAY_SIZE(ipipe_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ /* Clamp the input size. */
+ fmt->width = clamp_t(u32, width, 1, 8192);
+ fmt->height = clamp_t(u32, height, 1, 8192);
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+
+ case IPIPE_PAD_SOURCE_VP:
+ format = __ipipe_get_format(ipipe, fh, IPIPE_PAD_SINK, which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ fmt->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ fmt->width = clamp_t(u32, width, 32, fmt->width);
+ fmt->height = clamp_t(u32, height, 32, fmt->height);
+ fmt->colorspace = V4L2_COLORSPACE_JPEG;
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * ipipe_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ switch (code->pad) {
+ case IPIPE_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(ipipe_fmts))
+ return -EINVAL;
+
+ code->code = ipipe_fmts[code->index];
+ break;
+
+ case IPIPE_PAD_SOURCE_VP:
+ /* FIXME: Forced format conversion inside IPIPE ? */
+ if (code->index != 0)
+ return -EINVAL;
+
+ code->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ipipe_try_format(ipipe, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ipipe_try_format(ipipe, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * ipipe_get_format - Retrieve the video format on a pad
+ * @sd : ISP IPIPE V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipe_get_format(ipipe, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * ipipe_set_format - Set the video format on a pad
+ * @sd : ISP IPIPE V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipe_get_format(ipipe, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ipipe_try_format(ipipe, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == IPIPE_PAD_SINK) {
+ format = __ipipe_get_format(ipipe, fh, IPIPE_PAD_SOURCE_VP,
+ fmt->which);
+ *format = fmt->format;
+ ipipe_try_format(ipipe, fh, IPIPE_PAD_SOURCE_VP, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+static int ipipe_link_validate(struct v4l2_subdev *sd, struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ /* Check if the two ends match */
+ if (source_fmt->format.width != sink_fmt->format.width ||
+ source_fmt->format.height != sink_fmt->format.height)
+ return -EPIPE;
+
+ if (source_fmt->format.code != sink_fmt->format.code)
+ return -EPIPE;
+
+ return 0;
+}
+
+/*
+ * ipipe_init_formats - Initialize formats on all pads
+ * @sd: ISP IPIPE V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPE_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ ipipe_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops ipipe_v4l2_video_ops = {
+ .s_stream = ipipe_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops ipipe_v4l2_pad_ops = {
+ .enum_mbus_code = ipipe_enum_mbus_code,
+ .enum_frame_size = ipipe_enum_frame_size,
+ .get_fmt = ipipe_get_format,
+ .set_fmt = ipipe_set_format,
+ .link_validate = ipipe_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops ipipe_v4l2_ops = {
+ .video = &ipipe_v4l2_video_ops,
+ .pad = &ipipe_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops ipipe_v4l2_internal_ops = {
+ .open = ipipe_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ipipe_link_setup - Setup IPIPE connections
+ * @entity: IPIPE media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int ipipe_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(ipipe);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case IPIPE_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Read from IPIPEIF. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipe->input = IPIPE_INPUT_NONE;
+ break;
+ }
+
+ if (ipipe->input != IPIPE_INPUT_NONE)
+ return -EBUSY;
+
+ if (remote->entity == &iss->ipipeif.subdev.entity)
+ ipipe->input = IPIPE_INPUT_IPIPEIF;
+
+ break;
+
+ case IPIPE_PAD_SOURCE_VP | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Send to RESIZER */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ipipe->output & ~IPIPE_OUTPUT_VP)
+ return -EBUSY;
+ ipipe->output |= IPIPE_OUTPUT_VP;
+ } else {
+ ipipe->output &= ~IPIPE_OUTPUT_VP;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ipipe_media_ops = {
+ .link_setup = ipipe_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * ipipe_init_entities - Initialize V4L2 subdev and media entity
+ * @ipipe: ISS ISP IPIPE module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int ipipe_init_entities(struct iss_ipipe_device *ipipe)
+{
+ struct v4l2_subdev *sd = &ipipe->subdev;
+ struct media_pad *pads = ipipe->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ ipipe->input = IPIPE_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &ipipe_v4l2_ops);
+ sd->internal_ops = &ipipe_v4l2_internal_ops;
+ strlcpy(sd->name, "OMAP4 ISS ISP IPIPE", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ v4l2_set_subdevdata(sd, ipipe);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[IPIPE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[IPIPE_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &ipipe_media_ops;
+ ret = media_entity_init(me, IPIPE_PADS_NUM, pads, 0);
+ if (ret < 0)
+ return ret;
+
+ ipipe_init_formats(sd, NULL);
+
+ return 0;
+}
+
+void omap4iss_ipipe_unregister_entities(struct iss_ipipe_device *ipipe)
+{
+ media_entity_cleanup(&ipipe->subdev.entity);
+
+ v4l2_device_unregister_subdev(&ipipe->subdev);
+}
+
+int omap4iss_ipipe_register_entities(struct iss_ipipe_device *ipipe,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video node. */
+ ret = v4l2_device_register_subdev(vdev, &ipipe->subdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap4iss_ipipe_unregister_entities(ipipe);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP IPIPE initialisation and cleanup
+ */
+
+/*
+ * omap4iss_ipipe_init - IPIPE module initialization.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap4iss_ipipe_init(struct iss_device *iss)
+{
+ struct iss_ipipe_device *ipipe = &iss->ipipe;
+
+ ipipe->state = ISS_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&ipipe->wait);
+
+ return ipipe_init_entities(ipipe);
+}
+
+/*
+ * omap4iss_ipipe_cleanup - IPIPE module cleanup.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ */
+void omap4iss_ipipe_cleanup(struct iss_device *iss)
+{
+ /* FIXME: are you sure there's nothing to do? */
+}
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.h b/drivers/staging/media/omap4iss/iss_ipipe.h
new file mode 100644
index 000000000000..c22d9041f2a5
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipe.h
@@ -0,0 +1,67 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPE module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_IPIPE_H
+#define OMAP4_ISS_IPIPE_H
+
+#include "iss_video.h"
+
+enum ipipe_input_entity {
+ IPIPE_INPUT_NONE,
+ IPIPE_INPUT_IPIPEIF,
+};
+
+#define IPIPE_OUTPUT_VP (1 << 0)
+
+/* Sink and source IPIPE pads */
+#define IPIPE_PAD_SINK 0
+#define IPIPE_PAD_SOURCE_VP 1
+#define IPIPE_PADS_NUM 2
+
+/*
+ * struct iss_ipipe_device - Structure for the IPIPE module to store its own
+ * information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @input: Active input
+ * @output: Active outputs
+ * @error: A hardware error occurred during capture
+ * @state: Streaming state
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ */
+struct iss_ipipe_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[IPIPE_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[IPIPE_PADS_NUM];
+
+ enum ipipe_input_entity input;
+ unsigned int output;
+ unsigned int error;
+
+ enum iss_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+struct iss_device;
+
+int omap4iss_ipipe_register_entities(struct iss_ipipe_device *ipipe,
+ struct v4l2_device *vdev);
+void omap4iss_ipipe_unregister_entities(struct iss_ipipe_device *ipipe);
+
+int omap4iss_ipipe_init(struct iss_device *iss);
+void omap4iss_ipipe_cleanup(struct iss_device *iss);
+
+#endif /* OMAP4_ISS_IPIPE_H */
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
new file mode 100644
index 000000000000..7bc145762499
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.c
@@ -0,0 +1,849 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPEIF module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_ipipeif.h"
+
+static const unsigned int ipipeif_fmts[] = {
+ V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SGBRG10_1X10,
+ V4L2_MBUS_FMT_UYVY8_1X16,
+ V4L2_MBUS_FMT_YUYV8_1X16,
+};
+
+/*
+ * ipipeif_print_status - Print current IPIPEIF Module register values.
+ * @ipipeif: Pointer to ISS ISP IPIPEIF device.
+ *
+ * Also prints other debug information stored in the IPIPEIF module.
+ */
+#define IPIPEIF_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###IPIPEIF " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_##name))
+
+#define ISIF_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###ISIF " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_##name))
+
+#define ISP5_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###ISP5 " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_##name))
+
+static void ipipeif_print_status(struct iss_ipipeif_device *ipipeif)
+{
+ struct iss_device *iss = to_iss_device(ipipeif);
+
+ dev_dbg(iss->dev, "-------------IPIPEIF Register dump-------------\n");
+
+ IPIPEIF_PRINT_REGISTER(iss, CFG1);
+ IPIPEIF_PRINT_REGISTER(iss, CFG2);
+
+ ISIF_PRINT_REGISTER(iss, SYNCEN);
+ ISIF_PRINT_REGISTER(iss, CADU);
+ ISIF_PRINT_REGISTER(iss, CADL);
+ ISIF_PRINT_REGISTER(iss, MODESET);
+ ISIF_PRINT_REGISTER(iss, CCOLP);
+ ISIF_PRINT_REGISTER(iss, SPH);
+ ISIF_PRINT_REGISTER(iss, LNH);
+ ISIF_PRINT_REGISTER(iss, LNV);
+ ISIF_PRINT_REGISTER(iss, VDINT(0));
+ ISIF_PRINT_REGISTER(iss, HSIZE);
+
+ ISP5_PRINT_REGISTER(iss, SYSCONFIG);
+ ISP5_PRINT_REGISTER(iss, CTRL);
+ ISP5_PRINT_REGISTER(iss, IRQSTATUS(0));
+ ISP5_PRINT_REGISTER(iss, IRQENABLE_SET(0));
+ ISP5_PRINT_REGISTER(iss, IRQENABLE_CLR(0));
+
+ dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+static void ipipeif_write_enable(struct iss_ipipeif_device *ipipeif, u8 enable)
+{
+ struct iss_device *iss = to_iss_device(ipipeif);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SYNCEN,
+ ISIF_SYNCEN_DWEN, enable ? ISIF_SYNCEN_DWEN : 0);
+}
+
+/*
+ * ipipeif_enable - Enable/Disable IPIPEIF.
+ * @enable: enable flag
+ *
+ */
+static void ipipeif_enable(struct iss_ipipeif_device *ipipeif, u8 enable)
+{
+ struct iss_device *iss = to_iss_device(ipipeif);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SYNCEN,
+ ISIF_SYNCEN_SYEN, enable ? ISIF_SYNCEN_SYEN : 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+/*
+ * ipipeif_set_outaddr - Set memory address to save output image
+ * @ipipeif: Pointer to ISP IPIPEIF device.
+ * @addr: 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ */
+static void ipipeif_set_outaddr(struct iss_ipipeif_device *ipipeif, u32 addr)
+{
+ struct iss_device *iss = to_iss_device(ipipeif);
+
+ /* Save address splitted in Base Address H & L */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CADU,
+ (addr >> (16 + 5)) & ISIF_CADU_MASK);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CADL,
+ (addr >> 5) & ISIF_CADL_MASK);
+}
+
+static void ipipeif_configure(struct iss_ipipeif_device *ipipeif)
+{
+ struct iss_device *iss = to_iss_device(ipipeif);
+ const struct iss_format_info *info;
+ struct v4l2_mbus_framefmt *format;
+ u32 isif_ccolp = 0;
+
+ omap4iss_configure_bridge(iss, ipipeif->input);
+
+ /* IPIPEIF_PAD_SINK */
+ format = &ipipeif->formats[IPIPEIF_PAD_SINK];
+
+ /* IPIPEIF with YUV422 input from ISIF */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG1,
+ IPIPEIF_CFG1_INPSRC1_MASK | IPIPEIF_CFG1_INPSRC2_MASK);
+
+ /* Select ISIF/IPIPEIF input format */
+ switch (format->code) {
+ case V4L2_MBUS_FMT_UYVY8_1X16:
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
+ ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
+ ISIF_MODESET_CCDW_MASK,
+ ISIF_MODESET_INPMOD_YCBCR16);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG2,
+ IPIPEIF_CFG2_YUV8, IPIPEIF_CFG2_YUV16);
+
+ break;
+ case V4L2_MBUS_FMT_SGRBG10_1X10:
+ isif_ccolp = ISIF_CCOLP_CP0_F0_GR |
+ ISIF_CCOLP_CP1_F0_R |
+ ISIF_CCOLP_CP2_F0_B |
+ ISIF_CCOLP_CP3_F0_GB;
+ goto cont_raw;
+ case V4L2_MBUS_FMT_SRGGB10_1X10:
+ isif_ccolp = ISIF_CCOLP_CP0_F0_R |
+ ISIF_CCOLP_CP1_F0_GR |
+ ISIF_CCOLP_CP2_F0_GB |
+ ISIF_CCOLP_CP3_F0_B;
+ goto cont_raw;
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ isif_ccolp = ISIF_CCOLP_CP0_F0_B |
+ ISIF_CCOLP_CP1_F0_GB |
+ ISIF_CCOLP_CP2_F0_GR |
+ ISIF_CCOLP_CP3_F0_R;
+ goto cont_raw;
+ case V4L2_MBUS_FMT_SGBRG10_1X10:
+ isif_ccolp = ISIF_CCOLP_CP0_F0_GB |
+ ISIF_CCOLP_CP1_F0_B |
+ ISIF_CCOLP_CP2_F0_R |
+ ISIF_CCOLP_CP3_F0_GR;
+cont_raw:
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG2,
+ IPIPEIF_CFG2_YUV16);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
+ ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
+ ISIF_MODESET_CCDW_MASK, ISIF_MODESET_INPMOD_RAW |
+ ISIF_MODESET_CCDW_2BIT);
+
+ info = omap4iss_video_format_info(format->code);
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CGAMMAWD,
+ ISIF_CGAMMAWD_GWDI_MASK,
+ ISIF_CGAMMAWD_GWDI(info->bpp));
+
+ /* Set RAW Bayer pattern */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CCOLP,
+ isif_ccolp);
+ break;
+ }
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SPH, 0 & ISIF_SPH_MASK);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_LNH,
+ (format->width - 1) & ISIF_LNH_MASK);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_LNV,
+ (format->height - 1) & ISIF_LNV_MASK);
+
+ /* Generate ISIF0 on the last line of the image */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_VDINT(0),
+ format->height - 1);
+
+ /* IPIPEIF_PAD_SOURCE_ISIF_SF */
+ format = &ipipeif->formats[IPIPEIF_PAD_SOURCE_ISIF_SF];
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_HSIZE,
+ (ipipeif->video_out.bpl_value >> 5) &
+ ISIF_HSIZE_HSIZE_MASK);
+
+ /* IPIPEIF_PAD_SOURCE_VP */
+ /* Do nothing? */
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void ipipeif_isr_buffer(struct iss_ipipeif_device *ipipeif)
+{
+ struct iss_buffer *buffer;
+
+ /* The ISIF generates VD0 interrupts even when writes are disabled.
+ * deal with it anyway). Disabling the ISIF when no buffer is available
+ * is thus not be enough, we need to handle the situation explicitly.
+ */
+ if (list_empty(&ipipeif->video_out.dmaqueue))
+ return;
+
+ ipipeif_write_enable(ipipeif, 0);
+
+ buffer = omap4iss_video_buffer_next(&ipipeif->video_out);
+ if (buffer == NULL)
+ return;
+
+ ipipeif_set_outaddr(ipipeif, buffer->iss_addr);
+
+ ipipeif_write_enable(ipipeif, 1);
+}
+
+/*
+ * ipipeif_isif0_isr - Handle ISIF0 event
+ * @ipipeif: Pointer to ISP IPIPEIF device.
+ *
+ * Executes LSC deferred enablement before next frame starts.
+ */
+static void ipipeif_isif0_isr(struct iss_ipipeif_device *ipipeif)
+{
+ struct iss_pipeline *pipe =
+ to_iss_pipeline(&ipipeif->subdev.entity);
+ if (pipe->do_propagation)
+ atomic_inc(&pipe->frame_number);
+
+ if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+ ipipeif_isr_buffer(ipipeif);
+}
+
+/*
+ * omap4iss_ipipeif_isr - Configure ipipeif during interframe time.
+ * @ipipeif: Pointer to ISP IPIPEIF device.
+ * @events: IPIPEIF events
+ */
+void omap4iss_ipipeif_isr(struct iss_ipipeif_device *ipipeif, u32 events)
+{
+ if (omap4iss_module_sync_is_stopping(&ipipeif->wait,
+ &ipipeif->stopping))
+ return;
+
+ if (events & ISP5_IRQ_ISIF_INT(0))
+ ipipeif_isif0_isr(ipipeif);
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP video operations
+ */
+
+static int ipipeif_video_queue(struct iss_video *video,
+ struct iss_buffer *buffer)
+{
+ struct iss_ipipeif_device *ipipeif = container_of(video,
+ struct iss_ipipeif_device, video_out);
+
+ if (!(ipipeif->output & IPIPEIF_OUTPUT_MEMORY))
+ return -ENODEV;
+
+ ipipeif_set_outaddr(ipipeif, buffer->iss_addr);
+
+ /*
+ * If streaming was enabled before there was a buffer queued
+ * or underrun happened in the ISR, the hardware was not enabled
+ * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
+ * Enable it now.
+ */
+ if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+ if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+ ipipeif_write_enable(ipipeif, 1);
+ ipipeif_enable(ipipeif, 1);
+ iss_video_dmaqueue_flags_clr(video);
+ }
+
+ return 0;
+}
+
+static const struct iss_video_operations ipipeif_video_ops = {
+ .queue = ipipeif_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+#define IPIPEIF_DRV_SUBCLK_MASK (OMAP4_ISS_ISP_SUBCLK_IPIPEIF |\
+ OMAP4_ISS_ISP_SUBCLK_ISIF)
+/*
+ * ipipeif_set_stream - Enable/Disable streaming on the IPIPEIF module
+ * @sd: ISP IPIPEIF V4L2 subdevice
+ * @enable: Enable/disable stream
+ */
+static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(ipipeif);
+ struct iss_video *video_out = &ipipeif->video_out;
+ int ret = 0;
+
+ if (ipipeif->state == ISS_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap4iss_isp_subclk_enable(iss, IPIPEIF_DRV_SUBCLK_MASK);
+ }
+
+ switch (enable) {
+ case ISS_PIPELINE_STREAM_CONTINUOUS:
+
+ ipipeif_configure(ipipeif);
+ ipipeif_print_status(ipipeif);
+
+ /*
+ * When outputting to memory with no buffer available, let the
+ * buffer queue handler start the hardware. A DMA queue flag
+ * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+ * a buffer available.
+ */
+ if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY &&
+ !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
+ break;
+
+ atomic_set(&ipipeif->stopping, 0);
+ if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+ ipipeif_write_enable(ipipeif, 1);
+ ipipeif_enable(ipipeif, 1);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+
+ case ISS_PIPELINE_STREAM_STOPPED:
+ if (ipipeif->state == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+ if (omap4iss_module_sync_idle(&sd->entity, &ipipeif->wait,
+ &ipipeif->stopping))
+ ret = -ETIMEDOUT;
+
+ if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
+ ipipeif_write_enable(ipipeif, 0);
+ ipipeif_enable(ipipeif, 0);
+ omap4iss_isp_subclk_disable(iss, IPIPEIF_DRV_SUBCLK_MASK);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+ }
+
+ ipipeif->state = enable;
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__ipipeif_get_format(struct iss_ipipeif_device *ipipeif,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &ipipeif->formats[pad];
+}
+
+/*
+ * ipipeif_try_format - Try video format on a pad
+ * @ipipeif: ISS IPIPEIF device
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt *format;
+ unsigned int width = fmt->width;
+ unsigned int height = fmt->height;
+ unsigned int i;
+
+ switch (pad) {
+ case IPIPEIF_PAD_SINK:
+ /* TODO: If the IPIPEIF output formatter pad is connected
+ * directly to the resizer, only YUV formats can be used.
+ */
+ for (i = 0; i < ARRAY_SIZE(ipipeif_fmts); i++) {
+ if (fmt->code == ipipeif_fmts[i])
+ break;
+ }
+
+ /* If not found, use SGRBG10 as default */
+ if (i >= ARRAY_SIZE(ipipeif_fmts))
+ fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+
+ /* Clamp the input size. */
+ fmt->width = clamp_t(u32, width, 1, 8192);
+ fmt->height = clamp_t(u32, height, 1, 8192);
+ break;
+
+ case IPIPEIF_PAD_SOURCE_ISIF_SF:
+ format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
+ which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ /* The data formatter truncates the number of horizontal output
+ * pixels to a multiple of 16. To avoid clipping data, allow
+ * callers to request an output size bigger than the input size
+ * up to the nearest multiple of 16.
+ */
+ fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
+ fmt->width &= ~15;
+ fmt->height = clamp_t(u32, height, 32, fmt->height);
+ break;
+
+ case IPIPEIF_PAD_SOURCE_VP:
+ format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
+ which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ fmt->width = clamp_t(u32, width, 32, fmt->width);
+ fmt->height = clamp_t(u32, height, 32, fmt->height);
+ break;
+ }
+
+ /* Data is written to memory unpacked, each 10-bit or 12-bit pixel is
+ * stored on 2 bytes.
+ */
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * ipipeif_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ switch (code->pad) {
+ case IPIPEIF_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(ipipeif_fmts))
+ return -EINVAL;
+
+ code->code = ipipeif_fmts[code->index];
+ break;
+
+ case IPIPEIF_PAD_SOURCE_ISIF_SF:
+ case IPIPEIF_PAD_SOURCE_VP:
+ /* No format conversion inside IPIPEIF */
+ if (code->index != 0)
+ return -EINVAL;
+
+ format = __ipipeif_get_format(ipipeif, fh, IPIPEIF_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_TRY);
+
+ code->code = format->code;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ ipipeif_try_format(ipipeif, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ ipipeif_try_format(ipipeif, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * ipipeif_get_format - Retrieve the video format on a pad
+ * @sd : ISP IPIPEIF V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipeif_get_format(ipipeif, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * ipipeif_set_format - Set the video format on a pad
+ * @sd : ISP IPIPEIF V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ipipeif_get_format(ipipeif, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ipipeif_try_format(ipipeif, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == IPIPEIF_PAD_SINK) {
+ format = __ipipeif_get_format(ipipeif, fh,
+ IPIPEIF_PAD_SOURCE_ISIF_SF,
+ fmt->which);
+ *format = fmt->format;
+ ipipeif_try_format(ipipeif, fh, IPIPEIF_PAD_SOURCE_ISIF_SF,
+ format, fmt->which);
+
+ format = __ipipeif_get_format(ipipeif, fh,
+ IPIPEIF_PAD_SOURCE_VP,
+ fmt->which);
+ *format = fmt->format;
+ ipipeif_try_format(ipipeif, fh, IPIPEIF_PAD_SOURCE_VP, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+static int ipipeif_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ /* Check if the two ends match */
+ if (source_fmt->format.width != sink_fmt->format.width ||
+ source_fmt->format.height != sink_fmt->format.height)
+ return -EPIPE;
+
+ if (source_fmt->format.code != sink_fmt->format.code)
+ return -EPIPE;
+
+ return 0;
+}
+
+/*
+ * ipipeif_init_formats - Initialize formats on all pads
+ * @sd: ISP IPIPEIF V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int ipipeif_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = IPIPEIF_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ ipipeif_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops ipipeif_v4l2_video_ops = {
+ .s_stream = ipipeif_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops ipipeif_v4l2_pad_ops = {
+ .enum_mbus_code = ipipeif_enum_mbus_code,
+ .enum_frame_size = ipipeif_enum_frame_size,
+ .get_fmt = ipipeif_get_format,
+ .set_fmt = ipipeif_set_format,
+ .link_validate = ipipeif_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops ipipeif_v4l2_ops = {
+ .video = &ipipeif_v4l2_video_ops,
+ .pad = &ipipeif_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops ipipeif_v4l2_internal_ops = {
+ .open = ipipeif_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * ipipeif_link_setup - Setup IPIPEIF connections
+ * @entity: IPIPEIF media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int ipipeif_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(ipipeif);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case IPIPEIF_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Read from the sensor CSI2a or CSI2b. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ipipeif->input = IPIPEIF_INPUT_NONE;
+ break;
+ }
+
+ if (ipipeif->input != IPIPEIF_INPUT_NONE)
+ return -EBUSY;
+
+ if (remote->entity == &iss->csi2a.subdev.entity)
+ ipipeif->input = IPIPEIF_INPUT_CSI2A;
+ else if (remote->entity == &iss->csi2b.subdev.entity)
+ ipipeif->input = IPIPEIF_INPUT_CSI2B;
+
+ break;
+
+ case IPIPEIF_PAD_SOURCE_ISIF_SF | MEDIA_ENT_T_DEVNODE:
+ /* Write to memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ipipeif->output & ~IPIPEIF_OUTPUT_MEMORY)
+ return -EBUSY;
+ ipipeif->output |= IPIPEIF_OUTPUT_MEMORY;
+ } else {
+ ipipeif->output &= ~IPIPEIF_OUTPUT_MEMORY;
+ }
+ break;
+
+ case IPIPEIF_PAD_SOURCE_VP | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Send to IPIPE/RESIZER */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (ipipeif->output & ~IPIPEIF_OUTPUT_VP)
+ return -EBUSY;
+ ipipeif->output |= IPIPEIF_OUTPUT_VP;
+ } else {
+ ipipeif->output &= ~IPIPEIF_OUTPUT_VP;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations ipipeif_media_ops = {
+ .link_setup = ipipeif_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * ipipeif_init_entities - Initialize V4L2 subdev and media entity
+ * @ipipeif: ISS ISP IPIPEIF module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int ipipeif_init_entities(struct iss_ipipeif_device *ipipeif)
+{
+ struct v4l2_subdev *sd = &ipipeif->subdev;
+ struct media_pad *pads = ipipeif->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ ipipeif->input = IPIPEIF_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &ipipeif_v4l2_ops);
+ sd->internal_ops = &ipipeif_v4l2_internal_ops;
+ strlcpy(sd->name, "OMAP4 ISS ISP IPIPEIF", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ v4l2_set_subdevdata(sd, ipipeif);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[IPIPEIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[IPIPEIF_PAD_SOURCE_ISIF_SF].flags = MEDIA_PAD_FL_SOURCE;
+ pads[IPIPEIF_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &ipipeif_media_ops;
+ ret = media_entity_init(me, IPIPEIF_PADS_NUM, pads, 0);
+ if (ret < 0)
+ return ret;
+
+ ipipeif_init_formats(sd, NULL);
+
+ ipipeif->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ipipeif->video_out.ops = &ipipeif_video_ops;
+ ipipeif->video_out.iss = to_iss_device(ipipeif);
+ ipipeif->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+ ipipeif->video_out.bpl_alignment = 32;
+ ipipeif->video_out.bpl_zero_padding = 1;
+ ipipeif->video_out.bpl_max = 0x1ffe0;
+
+ ret = omap4iss_video_init(&ipipeif->video_out, "ISP IPIPEIF");
+ if (ret < 0)
+ return ret;
+
+ /* Connect the IPIPEIF subdev to the video node. */
+ ret = media_entity_create_link(&ipipeif->subdev.entity,
+ IPIPEIF_PAD_SOURCE_ISIF_SF,
+ &ipipeif->video_out.video.entity, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void omap4iss_ipipeif_unregister_entities(struct iss_ipipeif_device *ipipeif)
+{
+ media_entity_cleanup(&ipipeif->subdev.entity);
+
+ v4l2_device_unregister_subdev(&ipipeif->subdev);
+ omap4iss_video_unregister(&ipipeif->video_out);
+}
+
+int omap4iss_ipipeif_register_entities(struct iss_ipipeif_device *ipipeif,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video node. */
+ ret = v4l2_device_register_subdev(vdev, &ipipeif->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap4iss_video_register(&ipipeif->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap4iss_ipipeif_unregister_entities(ipipeif);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP IPIPEIF initialisation and cleanup
+ */
+
+/*
+ * omap4iss_ipipeif_init - IPIPEIF module initialization.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap4iss_ipipeif_init(struct iss_device *iss)
+{
+ struct iss_ipipeif_device *ipipeif = &iss->ipipeif;
+
+ ipipeif->state = ISS_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&ipipeif->wait);
+
+ return ipipeif_init_entities(ipipeif);
+}
+
+/*
+ * omap4iss_ipipeif_cleanup - IPIPEIF module cleanup.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ */
+void omap4iss_ipipeif_cleanup(struct iss_device *iss)
+{
+ /* FIXME: are you sure there's nothing to do? */
+}
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.h b/drivers/staging/media/omap4iss/iss_ipipeif.h
new file mode 100644
index 000000000000..cbdccb982eee
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.h
@@ -0,0 +1,92 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP IPIPEIF module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_IPIPEIF_H
+#define OMAP4_ISS_IPIPEIF_H
+
+#include "iss_video.h"
+
+enum ipipeif_input_entity {
+ IPIPEIF_INPUT_NONE,
+ IPIPEIF_INPUT_CSI2A,
+ IPIPEIF_INPUT_CSI2B
+};
+
+#define IPIPEIF_OUTPUT_MEMORY (1 << 0)
+#define IPIPEIF_OUTPUT_VP (1 << 1)
+
+/* Sink and source IPIPEIF pads */
+#define IPIPEIF_PAD_SINK 0
+#define IPIPEIF_PAD_SOURCE_ISIF_SF 1
+#define IPIPEIF_PAD_SOURCE_VP 2
+#define IPIPEIF_PADS_NUM 3
+
+/*
+ * struct iss_ipipeif_device - Structure for the IPIPEIF module to store its own
+ * information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @input: Active input
+ * @output: Active outputs
+ * @video_out: Output video node
+ * @error: A hardware error occurred during capture
+ * @alaw: A-law compression enabled (1) or disabled (0)
+ * @lpf: Low pass filter enabled (1) or disabled (0)
+ * @obclamp: Optical-black clamp enabled (1) or disabled (0)
+ * @fpc_en: Faulty pixels correction enabled (1) or disabled (0)
+ * @blcomp: Black level compensation configuration
+ * @clamp: Optical-black or digital clamp configuration
+ * @fpc: Faulty pixels correction configuration
+ * @lsc: Lens shading compensation configuration
+ * @update: Bitmask of controls to update during the next interrupt
+ * @shadow_update: Controls update in progress by userspace
+ * @syncif: Interface synchronization configuration
+ * @vpcfg: Video port configuration
+ * @underrun: A buffer underrun occurred and a new buffer has been queued
+ * @state: Streaming state
+ * @lock: Serializes shadow_update with interrupt handler
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ * @ioctl_lock: Serializes ioctl calls and LSC requests freeing
+ */
+struct iss_ipipeif_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[IPIPEIF_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[IPIPEIF_PADS_NUM];
+
+ enum ipipeif_input_entity input;
+ unsigned int output;
+ struct iss_video video_out;
+ unsigned int error;
+
+ enum iss_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+struct iss_device;
+
+int omap4iss_ipipeif_init(struct iss_device *iss);
+void omap4iss_ipipeif_cleanup(struct iss_device *iss);
+int omap4iss_ipipeif_register_entities(struct iss_ipipeif_device *ipipeif,
+ struct v4l2_device *vdev);
+void omap4iss_ipipeif_unregister_entities(struct iss_ipipeif_device *ipipeif);
+
+int omap4iss_ipipeif_busy(struct iss_ipipeif_device *ipipeif);
+void omap4iss_ipipeif_isr(struct iss_ipipeif_device *ipipeif, u32 events);
+void omap4iss_ipipeif_restore_context(struct iss_device *iss);
+void omap4iss_ipipeif_max_rate(struct iss_ipipeif_device *ipipeif,
+ unsigned int *max_rate);
+
+#endif /* OMAP4_ISS_IPIPEIF_H */
diff --git a/drivers/staging/media/omap4iss/iss_regs.h b/drivers/staging/media/omap4iss/iss_regs.h
new file mode 100644
index 000000000000..efd0291a86f7
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_regs.h
@@ -0,0 +1,901 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - Register defines
+ *
+ * Copyright (C) 2012 Texas Instruments.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _OMAP4_ISS_REGS_H_
+#define _OMAP4_ISS_REGS_H_
+
+/* ISS */
+#define ISS_HL_REVISION 0x0
+
+#define ISS_HL_SYSCONFIG 0x10
+#define ISS_HL_SYSCONFIG_IDLEMODE_SHIFT 2
+#define ISS_HL_SYSCONFIG_IDLEMODE_FORCEIDLE 0x0
+#define ISS_HL_SYSCONFIG_IDLEMODE_NOIDLE 0x1
+#define ISS_HL_SYSCONFIG_IDLEMODE_SMARTIDLE 0x2
+#define ISS_HL_SYSCONFIG_SOFTRESET (1 << 0)
+
+#define ISS_HL_IRQSTATUS_RAW(i) (0x20 + (0x10 * (i)))
+#define ISS_HL_IRQSTATUS(i) (0x24 + (0x10 * (i)))
+#define ISS_HL_IRQENABLE_SET(i) (0x28 + (0x10 * (i)))
+#define ISS_HL_IRQENABLE_CLR(i) (0x2c + (0x10 * (i)))
+
+#define ISS_HL_IRQ_HS_VS (1 << 17)
+#define ISS_HL_IRQ_SIMCOP(i) (1 << (12 + (i)))
+#define ISS_HL_IRQ_BTE (1 << 11)
+#define ISS_HL_IRQ_CBUFF (1 << 10)
+#define ISS_HL_IRQ_CCP2(i) (1 << ((i) > 3 ? 16 : 14 + (i)))
+#define ISS_HL_IRQ_CSIB (1 << 5)
+#define ISS_HL_IRQ_CSIA (1 << 4)
+#define ISS_HL_IRQ_ISP(i) (1 << (i))
+
+#define ISS_CTRL 0x80
+#define ISS_CTRL_CLK_DIV_MASK (3 << 4)
+#define ISS_CTRL_INPUT_SEL_MASK (3 << 2)
+#define ISS_CTRL_INPUT_SEL_CSI2A (0 << 2)
+#define ISS_CTRL_INPUT_SEL_CSI2B (1 << 2)
+#define ISS_CTRL_SYNC_DETECT_VS_RAISING (3 << 0)
+
+#define ISS_CLKCTRL 0x84
+#define ISS_CLKCTRL_VPORT2_CLK (1 << 30)
+#define ISS_CLKCTRL_VPORT1_CLK (1 << 29)
+#define ISS_CLKCTRL_VPORT0_CLK (1 << 28)
+#define ISS_CLKCTRL_CCP2 (1 << 4)
+#define ISS_CLKCTRL_CSI2_B (1 << 3)
+#define ISS_CLKCTRL_CSI2_A (1 << 2)
+#define ISS_CLKCTRL_ISP (1 << 1)
+#define ISS_CLKCTRL_SIMCOP (1 << 0)
+
+#define ISS_CLKSTAT 0x88
+#define ISS_CLKSTAT_VPORT2_CLK (1 << 30)
+#define ISS_CLKSTAT_VPORT1_CLK (1 << 29)
+#define ISS_CLKSTAT_VPORT0_CLK (1 << 28)
+#define ISS_CLKSTAT_CCP2 (1 << 4)
+#define ISS_CLKSTAT_CSI2_B (1 << 3)
+#define ISS_CLKSTAT_CSI2_A (1 << 2)
+#define ISS_CLKSTAT_ISP (1 << 1)
+#define ISS_CLKSTAT_SIMCOP (1 << 0)
+
+#define ISS_PM_STATUS 0x8c
+#define ISS_PM_STATUS_CBUFF_PM_MASK (3 << 12)
+#define ISS_PM_STATUS_BTE_PM_MASK (3 << 10)
+#define ISS_PM_STATUS_SIMCOP_PM_MASK (3 << 8)
+#define ISS_PM_STATUS_ISP_PM_MASK (3 << 6)
+#define ISS_PM_STATUS_CCP2_PM_MASK (3 << 4)
+#define ISS_PM_STATUS_CSI2_B_PM_MASK (3 << 2)
+#define ISS_PM_STATUS_CSI2_A_PM_MASK (3 << 0)
+
+#define REGISTER0 0x0
+#define REGISTER0_HSCLOCKCONFIG (1 << 24)
+#define REGISTER0_THS_TERM_MASK (0xff << 8)
+#define REGISTER0_THS_TERM_SHIFT 8
+#define REGISTER0_THS_SETTLE_MASK (0xff << 0)
+#define REGISTER0_THS_SETTLE_SHIFT 0
+
+#define REGISTER1 0x4
+#define REGISTER1_RESET_DONE_CTRLCLK (1 << 29)
+#define REGISTER1_CLOCK_MISS_DETECTOR_STATUS (1 << 25)
+#define REGISTER1_TCLK_TERM_MASK (0x3f << 18)
+#define REGISTER1_TCLK_TERM_SHIFT 18
+#define REGISTER1_DPHY_HS_SYNC_PATTERN_SHIFT 10
+#define REGISTER1_CTRLCLK_DIV_FACTOR_MASK (0x3 << 8)
+#define REGISTER1_CTRLCLK_DIV_FACTOR_SHIFT 8
+#define REGISTER1_TCLK_SETTLE_MASK (0xff << 0)
+#define REGISTER1_TCLK_SETTLE_SHIFT 0
+
+#define REGISTER2 0x8
+
+#define CSI2_SYSCONFIG 0x10
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_MASK (3 << 12)
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_FORCE (0 << 12)
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_NO (1 << 12)
+#define CSI2_SYSCONFIG_MSTANDBY_MODE_SMART (2 << 12)
+#define CSI2_SYSCONFIG_SOFT_RESET (1 << 1)
+#define CSI2_SYSCONFIG_AUTO_IDLE (1 << 0)
+
+#define CSI2_SYSSTATUS 0x14
+#define CSI2_SYSSTATUS_RESET_DONE (1 << 0)
+
+#define CSI2_IRQSTATUS 0x18
+#define CSI2_IRQENABLE 0x1c
+
+/* Shared bits across CSI2_IRQENABLE and IRQSTATUS */
+
+#define CSI2_IRQ_OCP_ERR (1 << 14)
+#define CSI2_IRQ_SHORT_PACKET (1 << 13)
+#define CSI2_IRQ_ECC_CORRECTION (1 << 12)
+#define CSI2_IRQ_ECC_NO_CORRECTION (1 << 11)
+#define CSI2_IRQ_COMPLEXIO_ERR (1 << 9)
+#define CSI2_IRQ_FIFO_OVF (1 << 8)
+#define CSI2_IRQ_CONTEXT0 (1 << 0)
+
+#define CSI2_CTRL 0x40
+#define CSI2_CTRL_MFLAG_LEVH_MASK (7 << 20)
+#define CSI2_CTRL_MFLAG_LEVH_SHIFT 20
+#define CSI2_CTRL_MFLAG_LEVL_MASK (7 << 17)
+#define CSI2_CTRL_MFLAG_LEVL_SHIFT 17
+#define CSI2_CTRL_BURST_SIZE_EXPAND (1 << 16)
+#define CSI2_CTRL_VP_CLK_EN (1 << 15)
+#define CSI2_CTRL_NON_POSTED_WRITE (1 << 13)
+#define CSI2_CTRL_VP_ONLY_EN (1 << 11)
+#define CSI2_CTRL_VP_OUT_CTRL_MASK (3 << 8)
+#define CSI2_CTRL_VP_OUT_CTRL_SHIFT 8
+#define CSI2_CTRL_DBG_EN (1 << 7)
+#define CSI2_CTRL_BURST_SIZE_MASK (3 << 5)
+#define CSI2_CTRL_ENDIANNESS (1 << 4)
+#define CSI2_CTRL_FRAME (1 << 3)
+#define CSI2_CTRL_ECC_EN (1 << 2)
+#define CSI2_CTRL_IF_EN (1 << 0)
+
+#define CSI2_DBG_H 0x44
+
+#define CSI2_COMPLEXIO_CFG 0x50
+#define CSI2_COMPLEXIO_CFG_RESET_CTRL (1 << 30)
+#define CSI2_COMPLEXIO_CFG_RESET_DONE (1 << 29)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_MASK (3 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_OFF (0 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_ON (1 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_CMD_ULP (2 << 27)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_MASK (3 << 25)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_OFF (0 << 25)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_ON (1 << 25)
+#define CSI2_COMPLEXIO_CFG_PWD_STATUS_ULP (2 << 25)
+#define CSI2_COMPLEXIO_CFG_PWR_AUTO (1 << 24)
+#define CSI2_COMPLEXIO_CFG_DATA_POL(i) (1 << (((i) * 4) + 3))
+#define CSI2_COMPLEXIO_CFG_DATA_POSITION_MASK(i) (7 << ((i) * 4))
+#define CSI2_COMPLEXIO_CFG_DATA_POSITION_SHIFT(i) ((i) * 4)
+#define CSI2_COMPLEXIO_CFG_CLOCK_POL (1 << 3)
+#define CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK (7 << 0)
+#define CSI2_COMPLEXIO_CFG_CLOCK_POSITION_SHIFT 0
+
+#define CSI2_COMPLEXIO_IRQSTATUS 0x54
+
+#define CSI2_SHORT_PACKET 0x5c
+
+#define CSI2_COMPLEXIO_IRQENABLE 0x60
+
+/* Shared bits across CSI2_COMPLEXIO_IRQENABLE and IRQSTATUS */
+#define CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT (1 << 26)
+#define CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER (1 << 25)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM5 (1 << 24)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM4 (1 << 23)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM3 (1 << 22)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM2 (1 << 21)
+#define CSI2_COMPLEXIO_IRQ_STATEULPM1 (1 << 20)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL5 (1 << 19)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL4 (1 << 18)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL3 (1 << 17)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL2 (1 << 16)
+#define CSI2_COMPLEXIO_IRQ_ERRCONTROL1 (1 << 15)
+#define CSI2_COMPLEXIO_IRQ_ERRESC5 (1 << 14)
+#define CSI2_COMPLEXIO_IRQ_ERRESC4 (1 << 13)
+#define CSI2_COMPLEXIO_IRQ_ERRESC3 (1 << 12)
+#define CSI2_COMPLEXIO_IRQ_ERRESC2 (1 << 11)
+#define CSI2_COMPLEXIO_IRQ_ERRESC1 (1 << 10)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5 (1 << 9)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4 (1 << 8)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3 (1 << 7)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2 (1 << 6)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1 (1 << 5)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS5 (1 << 4)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS4 (1 << 3)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS3 (1 << 2)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS2 (1 << 1)
+#define CSI2_COMPLEXIO_IRQ_ERRSOTHS1 (1 << 0)
+
+#define CSI2_DBG_P 0x68
+
+#define CSI2_TIMING 0x6c
+#define CSI2_TIMING_FORCE_RX_MODE_IO1 (1 << 15)
+#define CSI2_TIMING_STOP_STATE_X16_IO1 (1 << 14)
+#define CSI2_TIMING_STOP_STATE_X4_IO1 (1 << 13)
+#define CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK (0x1fff << 0)
+#define CSI2_TIMING_STOP_STATE_COUNTER_IO1_SHIFT 0
+
+#define CSI2_CTX_CTRL1(i) (0x70 + (0x20 * i))
+#define CSI2_CTX_CTRL1_GENERIC (1 << 30)
+#define CSI2_CTX_CTRL1_TRANSCODE (0xf << 24)
+#define CSI2_CTX_CTRL1_FEC_NUMBER_MASK (0xff << 16)
+#define CSI2_CTX_CTRL1_COUNT_MASK (0xff << 8)
+#define CSI2_CTX_CTRL1_COUNT_SHIFT 8
+#define CSI2_CTX_CTRL1_EOF_EN (1 << 7)
+#define CSI2_CTX_CTRL1_EOL_EN (1 << 6)
+#define CSI2_CTX_CTRL1_CS_EN (1 << 5)
+#define CSI2_CTX_CTRL1_COUNT_UNLOCK (1 << 4)
+#define CSI2_CTX_CTRL1_PING_PONG (1 << 3)
+#define CSI2_CTX_CTRL1_CTX_EN (1 << 0)
+
+#define CSI2_CTX_CTRL2(i) (0x74 + (0x20 * i))
+#define CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT 13
+#define CSI2_CTX_CTRL2_USER_DEF_MAP_MASK \
+ (0x3 << CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT)
+#define CSI2_CTX_CTRL2_VIRTUAL_ID_MASK (3 << 11)
+#define CSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT 11
+#define CSI2_CTX_CTRL2_DPCM_PRED (1 << 10)
+#define CSI2_CTX_CTRL2_FORMAT_MASK (0x3ff << 0)
+#define CSI2_CTX_CTRL2_FORMAT_SHIFT 0
+
+#define CSI2_CTX_DAT_OFST(i) (0x78 + (0x20 * i))
+#define CSI2_CTX_DAT_OFST_MASK (0xfff << 5)
+
+#define CSI2_CTX_PING_ADDR(i) (0x7c + (0x20 * i))
+#define CSI2_CTX_PING_ADDR_MASK 0xffffffe0
+
+#define CSI2_CTX_PONG_ADDR(i) (0x80 + (0x20 * i))
+#define CSI2_CTX_PONG_ADDR_MASK CSI2_CTX_PING_ADDR_MASK
+
+#define CSI2_CTX_IRQENABLE(i) (0x84 + (0x20 * i))
+#define CSI2_CTX_IRQSTATUS(i) (0x88 + (0x20 * i))
+
+#define CSI2_CTX_CTRL3(i) (0x8c + (0x20 * i))
+#define CSI2_CTX_CTRL3_ALPHA_SHIFT 5
+#define CSI2_CTX_CTRL3_ALPHA_MASK \
+ (0x3fff << CSI2_CTX_CTRL3_ALPHA_SHIFT)
+
+/* Shared bits across CSI2_CTX_IRQENABLE and IRQSTATUS */
+#define CSI2_CTX_IRQ_ECC_CORRECTION (1 << 8)
+#define CSI2_CTX_IRQ_LINE_NUMBER (1 << 7)
+#define CSI2_CTX_IRQ_FRAME_NUMBER (1 << 6)
+#define CSI2_CTX_IRQ_CS (1 << 5)
+#define CSI2_CTX_IRQ_LE (1 << 3)
+#define CSI2_CTX_IRQ_LS (1 << 2)
+#define CSI2_CTX_IRQ_FE (1 << 1)
+#define CSI2_CTX_IRQ_FS (1 << 0)
+
+/* ISS BTE */
+#define BTE_CTRL (0x0030)
+#define BTE_CTRL_BW_LIMITER_MASK (0x3ff << 22)
+#define BTE_CTRL_BW_LIMITER_SHIFT 22
+
+/* ISS ISP_SYS1 */
+#define ISP5_REVISION (0x0000)
+#define ISP5_SYSCONFIG (0x0010)
+#define ISP5_SYSCONFIG_STANDBYMODE_MASK (3 << 4)
+#define ISP5_SYSCONFIG_STANDBYMODE_FORCE (0 << 4)
+#define ISP5_SYSCONFIG_STANDBYMODE_NO (1 << 4)
+#define ISP5_SYSCONFIG_STANDBYMODE_SMART (2 << 4)
+#define ISP5_SYSCONFIG_SOFTRESET (1 << 1)
+
+#define ISP5_IRQSTATUS(i) (0x0028 + (0x10 * (i)))
+#define ISP5_IRQENABLE_SET(i) (0x002c + (0x10 * (i)))
+#define ISP5_IRQENABLE_CLR(i) (0x0030 + (0x10 * (i)))
+
+/* Bits shared for ISP5_IRQ* registers */
+#define ISP5_IRQ_OCP_ERR (1 << 31)
+#define ISP5_IRQ_IPIPE_INT_DPC_RNEW1 (1 << 29)
+#define ISP5_IRQ_IPIPE_INT_DPC_RNEW0 (1 << 28)
+#define ISP5_IRQ_IPIPE_INT_DPC_INIT (1 << 27)
+#define ISP5_IRQ_IPIPE_INT_EOF (1 << 25)
+#define ISP5_IRQ_H3A_INT_EOF (1 << 24)
+#define ISP5_IRQ_RSZ_INT_EOF1 (1 << 23)
+#define ISP5_IRQ_RSZ_INT_EOF0 (1 << 22)
+#define ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR (1 << 19)
+#define ISP5_IRQ_RSZ_FIFO_OVF (1 << 18)
+#define ISP5_IRQ_RSZ_INT_CYC_RSZB (1 << 17)
+#define ISP5_IRQ_RSZ_INT_CYC_RSZA (1 << 16)
+#define ISP5_IRQ_RSZ_INT_DMA (1 << 15)
+#define ISP5_IRQ_RSZ_INT_LAST_PIX (1 << 14)
+#define ISP5_IRQ_RSZ_INT_REG (1 << 13)
+#define ISP5_IRQ_H3A_INT (1 << 12)
+#define ISP5_IRQ_AF_INT (1 << 11)
+#define ISP5_IRQ_AEW_INT (1 << 10)
+#define ISP5_IRQ_IPIPEIF_IRQ (1 << 9)
+#define ISP5_IRQ_IPIPE_INT_HST (1 << 8)
+#define ISP5_IRQ_IPIPE_INT_BSC (1 << 7)
+#define ISP5_IRQ_IPIPE_INT_DMA (1 << 6)
+#define ISP5_IRQ_IPIPE_INT_LAST_PIX (1 << 5)
+#define ISP5_IRQ_IPIPE_INT_REG (1 << 4)
+#define ISP5_IRQ_ISIF_INT(i) (1 << (i))
+
+#define ISP5_CTRL (0x006c)
+#define ISP5_CTRL_MSTANDBY (1 << 24)
+#define ISP5_CTRL_VD_PULSE_EXT (1 << 23)
+#define ISP5_CTRL_MSTANDBY_WAIT (1 << 20)
+#define ISP5_CTRL_BL_CLK_ENABLE (1 << 15)
+#define ISP5_CTRL_ISIF_CLK_ENABLE (1 << 14)
+#define ISP5_CTRL_H3A_CLK_ENABLE (1 << 13)
+#define ISP5_CTRL_RSZ_CLK_ENABLE (1 << 12)
+#define ISP5_CTRL_IPIPE_CLK_ENABLE (1 << 11)
+#define ISP5_CTRL_IPIPEIF_CLK_ENABLE (1 << 10)
+#define ISP5_CTRL_SYNC_ENABLE (1 << 9)
+#define ISP5_CTRL_PSYNC_CLK_SEL (1 << 8)
+
+/* ISS ISP ISIF register offsets */
+#define ISIF_SYNCEN (0x0000)
+#define ISIF_SYNCEN_DWEN (1 << 1)
+#define ISIF_SYNCEN_SYEN (1 << 0)
+
+#define ISIF_MODESET (0x0004)
+#define ISIF_MODESET_INPMOD_MASK (3 << 12)
+#define ISIF_MODESET_INPMOD_RAW (0 << 12)
+#define ISIF_MODESET_INPMOD_YCBCR16 (1 << 12)
+#define ISIF_MODESET_INPMOD_YCBCR8 (2 << 12)
+#define ISIF_MODESET_CCDW_MASK (7 << 8)
+#define ISIF_MODESET_CCDW_2BIT (2 << 8)
+#define ISIF_MODESET_CCDMD (1 << 7)
+#define ISIF_MODESET_SWEN (1 << 5)
+#define ISIF_MODESET_HDPOL (1 << 3)
+#define ISIF_MODESET_VDPOL (1 << 2)
+
+#define ISIF_SPH (0x0018)
+#define ISIF_SPH_MASK (0x7fff)
+
+#define ISIF_LNH (0x001c)
+#define ISIF_LNH_MASK (0x7fff)
+
+#define ISIF_LNV (0x0028)
+#define ISIF_LNV_MASK (0x7fff)
+
+#define ISIF_HSIZE (0x0034)
+#define ISIF_HSIZE_ADCR (1 << 12)
+#define ISIF_HSIZE_HSIZE_MASK (0xfff)
+
+#define ISIF_CADU (0x003c)
+#define ISIF_CADU_MASK (0x7ff)
+
+#define ISIF_CADL (0x0040)
+#define ISIF_CADL_MASK (0xffff)
+
+#define ISIF_CCOLP (0x004c)
+#define ISIF_CCOLP_CP0_F0_R (0 << 6)
+#define ISIF_CCOLP_CP0_F0_GR (1 << 6)
+#define ISIF_CCOLP_CP0_F0_B (3 << 6)
+#define ISIF_CCOLP_CP0_F0_GB (2 << 6)
+#define ISIF_CCOLP_CP1_F0_R (0 << 4)
+#define ISIF_CCOLP_CP1_F0_GR (1 << 4)
+#define ISIF_CCOLP_CP1_F0_B (3 << 4)
+#define ISIF_CCOLP_CP1_F0_GB (2 << 4)
+#define ISIF_CCOLP_CP2_F0_R (0 << 2)
+#define ISIF_CCOLP_CP2_F0_GR (1 << 2)
+#define ISIF_CCOLP_CP2_F0_B (3 << 2)
+#define ISIF_CCOLP_CP2_F0_GB (2 << 2)
+#define ISIF_CCOLP_CP3_F0_R (0 << 0)
+#define ISIF_CCOLP_CP3_F0_GR (1 << 0)
+#define ISIF_CCOLP_CP3_F0_B (3 << 0)
+#define ISIF_CCOLP_CP3_F0_GB (2 << 0)
+
+#define ISIF_VDINT(i) (0x0070 + (i) * 4)
+#define ISIF_VDINT_MASK (0x7fff)
+
+#define ISIF_CGAMMAWD (0x0080)
+#define ISIF_CGAMMAWD_GWDI_MASK (0xf << 1)
+#define ISIF_CGAMMAWD_GWDI(bpp) ((16 - (bpp)) << 1)
+
+#define ISIF_CCDCFG (0x0088)
+#define ISIF_CCDCFG_Y8POS (1 << 11)
+
+/* ISS ISP IPIPEIF register offsets */
+#define IPIPEIF_ENABLE (0x0000)
+
+#define IPIPEIF_CFG1 (0x0004)
+#define IPIPEIF_CFG1_INPSRC1_MASK (3 << 14)
+#define IPIPEIF_CFG1_INPSRC1_VPORT_RAW (0 << 14)
+#define IPIPEIF_CFG1_INPSRC1_SDRAM_RAW (1 << 14)
+#define IPIPEIF_CFG1_INPSRC1_ISIF_DARKFM (2 << 14)
+#define IPIPEIF_CFG1_INPSRC1_SDRAM_YUV (3 << 14)
+#define IPIPEIF_CFG1_INPSRC2_MASK (3 << 2)
+#define IPIPEIF_CFG1_INPSRC2_ISIF (0 << 2)
+#define IPIPEIF_CFG1_INPSRC2_SDRAM_RAW (1 << 2)
+#define IPIPEIF_CFG1_INPSRC2_ISIF_DARKFM (2 << 2)
+#define IPIPEIF_CFG1_INPSRC2_SDRAM_YUV (3 << 2)
+
+#define IPIPEIF_CFG2 (0x0030)
+#define IPIPEIF_CFG2_YUV8P (1 << 7)
+#define IPIPEIF_CFG2_YUV8 (1 << 6)
+#define IPIPEIF_CFG2_YUV16 (1 << 3)
+#define IPIPEIF_CFG2_VDPOL (1 << 2)
+#define IPIPEIF_CFG2_HDPOL (1 << 1)
+#define IPIPEIF_CFG2_INTSW (1 << 0)
+
+#define IPIPEIF_CLKDIV (0x0040)
+
+/* ISS ISP IPIPE register offsets */
+#define IPIPE_SRC_EN (0x0000)
+#define IPIPE_SRC_EN_EN (1 << 0)
+
+#define IPIPE_SRC_MODE (0x0004)
+#define IPIPE_SRC_MODE_WRT (1 << 1)
+#define IPIPE_SRC_MODE_OST (1 << 0)
+
+#define IPIPE_SRC_FMT (0x0008)
+#define IPIPE_SRC_FMT_RAW2YUV (0 << 0)
+#define IPIPE_SRC_FMT_RAW2RAW (1 << 0)
+#define IPIPE_SRC_FMT_RAW2STATS (2 << 0)
+#define IPIPE_SRC_FMT_YUV2YUV (3 << 0)
+
+#define IPIPE_SRC_COL (0x000c)
+#define IPIPE_SRC_COL_OO_R (0 << 6)
+#define IPIPE_SRC_COL_OO_GR (1 << 6)
+#define IPIPE_SRC_COL_OO_B (3 << 6)
+#define IPIPE_SRC_COL_OO_GB (2 << 6)
+#define IPIPE_SRC_COL_OE_R (0 << 4)
+#define IPIPE_SRC_COL_OE_GR (1 << 4)
+#define IPIPE_SRC_COL_OE_B (3 << 4)
+#define IPIPE_SRC_COL_OE_GB (2 << 4)
+#define IPIPE_SRC_COL_EO_R (0 << 2)
+#define IPIPE_SRC_COL_EO_GR (1 << 2)
+#define IPIPE_SRC_COL_EO_B (3 << 2)
+#define IPIPE_SRC_COL_EO_GB (2 << 2)
+#define IPIPE_SRC_COL_EE_R (0 << 0)
+#define IPIPE_SRC_COL_EE_GR (1 << 0)
+#define IPIPE_SRC_COL_EE_B (3 << 0)
+#define IPIPE_SRC_COL_EE_GB (2 << 0)
+
+#define IPIPE_SRC_VPS (0x0010)
+#define IPIPE_SRC_VPS_MASK (0xffff)
+
+#define IPIPE_SRC_VSZ (0x0014)
+#define IPIPE_SRC_VSZ_MASK (0x1fff)
+
+#define IPIPE_SRC_HPS (0x0018)
+#define IPIPE_SRC_HPS_MASK (0xffff)
+
+#define IPIPE_SRC_HSZ (0x001c)
+#define IPIPE_SRC_HSZ_MASK (0x1ffe)
+
+#define IPIPE_SEL_SBU (0x0020)
+
+#define IPIPE_SRC_STA (0x0024)
+
+#define IPIPE_GCK_MMR (0x0028)
+#define IPIPE_GCK_MMR_REG (1 << 0)
+
+#define IPIPE_GCK_PIX (0x002c)
+#define IPIPE_GCK_PIX_G3 (1 << 3)
+#define IPIPE_GCK_PIX_G2 (1 << 2)
+#define IPIPE_GCK_PIX_G1 (1 << 1)
+#define IPIPE_GCK_PIX_G0 (1 << 0)
+
+#define IPIPE_DPC_LUT_EN (0x0034)
+#define IPIPE_DPC_LUT_SEL (0x0038)
+#define IPIPE_DPC_LUT_ADR (0x003c)
+#define IPIPE_DPC_LUT_SIZ (0x0040)
+
+#define IPIPE_DPC_OTF_EN (0x0044)
+#define IPIPE_DPC_OTF_TYP (0x0048)
+#define IPIPE_DPC_OTF_2_D_THR_R (0x004c)
+#define IPIPE_DPC_OTF_2_D_THR_GR (0x0050)
+#define IPIPE_DPC_OTF_2_D_THR_GB (0x0054)
+#define IPIPE_DPC_OTF_2_D_THR_B (0x0058)
+#define IPIPE_DPC_OTF_2_C_THR_R (0x005c)
+#define IPIPE_DPC_OTF_2_C_THR_GR (0x0060)
+#define IPIPE_DPC_OTF_2_C_THR_GB (0x0064)
+#define IPIPE_DPC_OTF_2_C_THR_B (0x0068)
+#define IPIPE_DPC_OTF_3_SHF (0x006c)
+#define IPIPE_DPC_OTF_3_D_THR (0x0070)
+#define IPIPE_DPC_OTF_3_D_SPL (0x0074)
+#define IPIPE_DPC_OTF_3_D_MIN (0x0078)
+#define IPIPE_DPC_OTF_3_D_MAX (0x007c)
+#define IPIPE_DPC_OTF_3_C_THR (0x0080)
+#define IPIPE_DPC_OTF_3_C_SLP (0x0084)
+#define IPIPE_DPC_OTF_3_C_MIN (0x0088)
+#define IPIPE_DPC_OTF_3_C_MAX (0x008c)
+
+#define IPIPE_LSC_VOFT (0x0090)
+#define IPIPE_LSC_VA2 (0x0094)
+#define IPIPE_LSC_VA1 (0x0098)
+#define IPIPE_LSC_VS (0x009c)
+#define IPIPE_LSC_HOFT (0x00a0)
+#define IPIPE_LSC_HA2 (0x00a4)
+#define IPIPE_LSC_HA1 (0x00a8)
+#define IPIPE_LSC_HS (0x00ac)
+#define IPIPE_LSC_GAN_R (0x00b0)
+#define IPIPE_LSC_GAN_GR (0x00b4)
+#define IPIPE_LSC_GAN_GB (0x00b8)
+#define IPIPE_LSC_GAN_B (0x00bc)
+#define IPIPE_LSC_OFT_R (0x00c0)
+#define IPIPE_LSC_OFT_GR (0x00c4)
+#define IPIPE_LSC_OFT_GB (0x00c8)
+#define IPIPE_LSC_OFT_B (0x00cc)
+#define IPIPE_LSC_SHF (0x00d0)
+#define IPIPE_LSC_MAX (0x00d4)
+
+#define IPIPE_D2F_1ST_EN (0x00d8)
+#define IPIPE_D2F_1ST_TYP (0x00dc)
+#define IPIPE_D2F_1ST_THR_00 (0x00e0)
+#define IPIPE_D2F_1ST_THR_01 (0x00e4)
+#define IPIPE_D2F_1ST_THR_02 (0x00e8)
+#define IPIPE_D2F_1ST_THR_03 (0x00ec)
+#define IPIPE_D2F_1ST_THR_04 (0x00f0)
+#define IPIPE_D2F_1ST_THR_05 (0x00f4)
+#define IPIPE_D2F_1ST_THR_06 (0x00f8)
+#define IPIPE_D2F_1ST_THR_07 (0x00fc)
+#define IPIPE_D2F_1ST_STR_00 (0x0100)
+#define IPIPE_D2F_1ST_STR_01 (0x0104)
+#define IPIPE_D2F_1ST_STR_02 (0x0108)
+#define IPIPE_D2F_1ST_STR_03 (0x010c)
+#define IPIPE_D2F_1ST_STR_04 (0x0110)
+#define IPIPE_D2F_1ST_STR_05 (0x0114)
+#define IPIPE_D2F_1ST_STR_06 (0x0118)
+#define IPIPE_D2F_1ST_STR_07 (0x011c)
+#define IPIPE_D2F_1ST_SPR_00 (0x0120)
+#define IPIPE_D2F_1ST_SPR_01 (0x0124)
+#define IPIPE_D2F_1ST_SPR_02 (0x0128)
+#define IPIPE_D2F_1ST_SPR_03 (0x012c)
+#define IPIPE_D2F_1ST_SPR_04 (0x0130)
+#define IPIPE_D2F_1ST_SPR_05 (0x0134)
+#define IPIPE_D2F_1ST_SPR_06 (0x0138)
+#define IPIPE_D2F_1ST_SPR_07 (0x013c)
+#define IPIPE_D2F_1ST_EDG_MIN (0x0140)
+#define IPIPE_D2F_1ST_EDG_MAX (0x0144)
+#define IPIPE_D2F_2ND_EN (0x0148)
+#define IPIPE_D2F_2ND_TYP (0x014c)
+#define IPIPE_D2F_2ND_THR00 (0x0150)
+#define IPIPE_D2F_2ND_THR01 (0x0154)
+#define IPIPE_D2F_2ND_THR02 (0x0158)
+#define IPIPE_D2F_2ND_THR03 (0x015c)
+#define IPIPE_D2F_2ND_THR04 (0x0160)
+#define IPIPE_D2F_2ND_THR05 (0x0164)
+#define IPIPE_D2F_2ND_THR06 (0x0168)
+#define IPIPE_D2F_2ND_THR07 (0x016c)
+#define IPIPE_D2F_2ND_STR_00 (0x0170)
+#define IPIPE_D2F_2ND_STR_01 (0x0174)
+#define IPIPE_D2F_2ND_STR_02 (0x0178)
+#define IPIPE_D2F_2ND_STR_03 (0x017c)
+#define IPIPE_D2F_2ND_STR_04 (0x0180)
+#define IPIPE_D2F_2ND_STR_05 (0x0184)
+#define IPIPE_D2F_2ND_STR_06 (0x0188)
+#define IPIPE_D2F_2ND_STR_07 (0x018c)
+#define IPIPE_D2F_2ND_SPR_00 (0x0190)
+#define IPIPE_D2F_2ND_SPR_01 (0x0194)
+#define IPIPE_D2F_2ND_SPR_02 (0x0198)
+#define IPIPE_D2F_2ND_SPR_03 (0x019c)
+#define IPIPE_D2F_2ND_SPR_04 (0x01a0)
+#define IPIPE_D2F_2ND_SPR_05 (0x01a4)
+#define IPIPE_D2F_2ND_SPR_06 (0x01a8)
+#define IPIPE_D2F_2ND_SPR_07 (0x01ac)
+#define IPIPE_D2F_2ND_EDG_MIN (0x01b0)
+#define IPIPE_D2F_2ND_EDG_MAX (0x01b4)
+
+#define IPIPE_GIC_EN (0x01b8)
+#define IPIPE_GIC_TYP (0x01bc)
+#define IPIPE_GIC_GAN (0x01c0)
+#define IPIPE_GIC_NFGAIN (0x01c4)
+#define IPIPE_GIC_THR (0x01c8)
+#define IPIPE_GIC_SLP (0x01cc)
+
+#define IPIPE_WB2_OFT_R (0x01d0)
+#define IPIPE_WB2_OFT_GR (0x01d4)
+#define IPIPE_WB2_OFT_GB (0x01d8)
+#define IPIPE_WB2_OFT_B (0x01dc)
+
+#define IPIPE_WB2_WGN_R (0x01e0)
+#define IPIPE_WB2_WGN_GR (0x01e4)
+#define IPIPE_WB2_WGN_GB (0x01e8)
+#define IPIPE_WB2_WGN_B (0x01ec)
+
+#define IPIPE_CFA_MODE (0x01f0)
+#define IPIPE_CFA_2DIR_HPF_THR (0x01f4)
+#define IPIPE_CFA_2DIR_HPF_SLP (0x01f8)
+#define IPIPE_CFA_2DIR_MIX_THR (0x01fc)
+#define IPIPE_CFA_2DIR_MIX_SLP (0x0200)
+#define IPIPE_CFA_2DIR_DIR_TRH (0x0204)
+#define IPIPE_CFA_2DIR_DIR_SLP (0x0208)
+#define IPIPE_CFA_2DIR_NDWT (0x020c)
+#define IPIPE_CFA_MONO_HUE_FRA (0x0210)
+#define IPIPE_CFA_MONO_EDG_THR (0x0214)
+#define IPIPE_CFA_MONO_THR_MIN (0x0218)
+
+#define IPIPE_CFA_MONO_THR_SLP (0x021c)
+#define IPIPE_CFA_MONO_SLP_MIN (0x0220)
+#define IPIPE_CFA_MONO_SLP_SLP (0x0224)
+#define IPIPE_CFA_MONO_LPWT (0x0228)
+
+#define IPIPE_RGB1_MUL_RR (0x022c)
+#define IPIPE_RGB1_MUL_GR (0x0230)
+#define IPIPE_RGB1_MUL_BR (0x0234)
+#define IPIPE_RGB1_MUL_RG (0x0238)
+#define IPIPE_RGB1_MUL_GG (0x023c)
+#define IPIPE_RGB1_MUL_BG (0x0240)
+#define IPIPE_RGB1_MUL_RB (0x0244)
+#define IPIPE_RGB1_MUL_GB (0x0248)
+#define IPIPE_RGB1_MUL_BB (0x024c)
+#define IPIPE_RGB1_OFT_OR (0x0250)
+#define IPIPE_RGB1_OFT_OG (0x0254)
+#define IPIPE_RGB1_OFT_OB (0x0258)
+#define IPIPE_GMM_CFG (0x025c)
+#define IPIPE_RGB2_MUL_RR (0x0260)
+#define IPIPE_RGB2_MUL_GR (0x0264)
+#define IPIPE_RGB2_MUL_BR (0x0268)
+#define IPIPE_RGB2_MUL_RG (0x026c)
+#define IPIPE_RGB2_MUL_GG (0x0270)
+#define IPIPE_RGB2_MUL_BG (0x0274)
+#define IPIPE_RGB2_MUL_RB (0x0278)
+#define IPIPE_RGB2_MUL_GB (0x027c)
+#define IPIPE_RGB2_MUL_BB (0x0280)
+#define IPIPE_RGB2_OFT_OR (0x0284)
+#define IPIPE_RGB2_OFT_OG (0x0288)
+#define IPIPE_RGB2_OFT_OB (0x028c)
+
+#define IPIPE_YUV_ADJ (0x0294)
+#define IPIPE_YUV_MUL_RY (0x0298)
+#define IPIPE_YUV_MUL_GY (0x029c)
+#define IPIPE_YUV_MUL_BY (0x02a0)
+#define IPIPE_YUV_MUL_RCB (0x02a4)
+#define IPIPE_YUV_MUL_GCB (0x02a8)
+#define IPIPE_YUV_MUL_BCB (0x02ac)
+#define IPIPE_YUV_MUL_RCR (0x02b0)
+#define IPIPE_YUV_MUL_GCR (0x02b4)
+#define IPIPE_YUV_MUL_BCR (0x02b8)
+#define IPIPE_YUV_OFT_Y (0x02bc)
+#define IPIPE_YUV_OFT_CB (0x02c0)
+#define IPIPE_YUV_OFT_CR (0x02c4)
+
+#define IPIPE_YUV_PHS (0x02c8)
+#define IPIPE_YUV_PHS_LPF (1 << 1)
+#define IPIPE_YUV_PHS_POS (1 << 0)
+
+#define IPIPE_YEE_EN (0x02d4)
+#define IPIPE_YEE_TYP (0x02d8)
+#define IPIPE_YEE_SHF (0x02dc)
+#define IPIPE_YEE_MUL_00 (0x02e0)
+#define IPIPE_YEE_MUL_01 (0x02e4)
+#define IPIPE_YEE_MUL_02 (0x02e8)
+#define IPIPE_YEE_MUL_10 (0x02ec)
+#define IPIPE_YEE_MUL_11 (0x02f0)
+#define IPIPE_YEE_MUL_12 (0x02f4)
+#define IPIPE_YEE_MUL_20 (0x02f8)
+#define IPIPE_YEE_MUL_21 (0x02fc)
+#define IPIPE_YEE_MUL_22 (0x0300)
+#define IPIPE_YEE_THR (0x0304)
+#define IPIPE_YEE_E_GAN (0x0308)
+#define IPIPE_YEE_E_THR_1 (0x030c)
+#define IPIPE_YEE_E_THR_2 (0x0310)
+#define IPIPE_YEE_G_GAN (0x0314)
+#define IPIPE_YEE_G_OFT (0x0318)
+
+#define IPIPE_CAR_EN (0x031c)
+#define IPIPE_CAR_TYP (0x0320)
+#define IPIPE_CAR_SW (0x0324)
+#define IPIPE_CAR_HPF_TYP (0x0328)
+#define IPIPE_CAR_HPF_SHF (0x032c)
+#define IPIPE_CAR_HPF_THR (0x0330)
+#define IPIPE_CAR_GN1_GAN (0x0334)
+#define IPIPE_CAR_GN1_SHF (0x0338)
+#define IPIPE_CAR_GN1_MIN (0x033c)
+#define IPIPE_CAR_GN2_GAN (0x0340)
+#define IPIPE_CAR_GN2_SHF (0x0344)
+#define IPIPE_CAR_GN2_MIN (0x0348)
+#define IPIPE_CGS_EN (0x034c)
+#define IPIPE_CGS_GN1_L_THR (0x0350)
+#define IPIPE_CGS_GN1_L_GAIN (0x0354)
+#define IPIPE_CGS_GN1_L_SHF (0x0358)
+#define IPIPE_CGS_GN1_L_MIN (0x035c)
+#define IPIPE_CGS_GN1_H_THR (0x0360)
+#define IPIPE_CGS_GN1_H_GAIN (0x0364)
+#define IPIPE_CGS_GN1_H_SHF (0x0368)
+#define IPIPE_CGS_GN1_H_MIN (0x036c)
+#define IPIPE_CGS_GN2_L_THR (0x0370)
+#define IPIPE_CGS_GN2_L_GAIN (0x0374)
+#define IPIPE_CGS_GN2_L_SHF (0x0378)
+#define IPIPE_CGS_GN2_L_MIN (0x037c)
+
+#define IPIPE_BOX_EN (0x0380)
+#define IPIPE_BOX_MODE (0x0384)
+#define IPIPE_BOX_TYP (0x0388)
+#define IPIPE_BOX_SHF (0x038c)
+#define IPIPE_BOX_SDR_SAD_H (0x0390)
+#define IPIPE_BOX_SDR_SAD_L (0x0394)
+
+#define IPIPE_HST_EN (0x039c)
+#define IPIPE_HST_MODE (0x03a0)
+#define IPIPE_HST_SEL (0x03a4)
+#define IPIPE_HST_PARA (0x03a8)
+#define IPIPE_HST_0_VPS (0x03ac)
+#define IPIPE_HST_0_VSZ (0x03b0)
+#define IPIPE_HST_0_HPS (0x03b4)
+#define IPIPE_HST_0_HSZ (0x03b8)
+#define IPIPE_HST_1_VPS (0x03bc)
+#define IPIPE_HST_1_VSZ (0x03c0)
+#define IPIPE_HST_1_HPS (0x03c4)
+#define IPIPE_HST_1_HSZ (0x03c8)
+#define IPIPE_HST_2_VPS (0x03cc)
+#define IPIPE_HST_2_VSZ (0x03d0)
+#define IPIPE_HST_2_HPS (0x03d4)
+#define IPIPE_HST_2_HSZ (0x03d8)
+#define IPIPE_HST_3_VPS (0x03dc)
+#define IPIPE_HST_3_VSZ (0x03e0)
+#define IPIPE_HST_3_HPS (0x03e4)
+#define IPIPE_HST_3_HSZ (0x03e8)
+#define IPIPE_HST_TBL (0x03ec)
+#define IPIPE_HST_MUL_R (0x03f0)
+#define IPIPE_HST_MUL_GR (0x03f4)
+#define IPIPE_HST_MUL_GB (0x03f8)
+#define IPIPE_HST_MUL_B (0x03fc)
+
+#define IPIPE_BSC_EN (0x0400)
+#define IPIPE_BSC_MODE (0x0404)
+#define IPIPE_BSC_TYP (0x0408)
+#define IPIPE_BSC_ROW_VCT (0x040c)
+#define IPIPE_BSC_ROW_SHF (0x0410)
+#define IPIPE_BSC_ROW_VPO (0x0414)
+#define IPIPE_BSC_ROW_VNU (0x0418)
+#define IPIPE_BSC_ROW_VSKIP (0x041c)
+#define IPIPE_BSC_ROW_HPO (0x0420)
+#define IPIPE_BSC_ROW_HNU (0x0424)
+#define IPIPE_BSC_ROW_HSKIP (0x0428)
+#define IPIPE_BSC_COL_VCT (0x042c)
+#define IPIPE_BSC_COL_SHF (0x0430)
+#define IPIPE_BSC_COL_VPO (0x0434)
+#define IPIPE_BSC_COL_VNU (0x0438)
+#define IPIPE_BSC_COL_VSKIP (0x043c)
+#define IPIPE_BSC_COL_HPO (0x0440)
+#define IPIPE_BSC_COL_HNU (0x0444)
+#define IPIPE_BSC_COL_HSKIP (0x0448)
+
+#define IPIPE_BSC_EN (0x0400)
+
+/* ISS ISP Resizer register offsets */
+#define RSZ_REVISION (0x0000)
+#define RSZ_SYSCONFIG (0x0004)
+#define RSZ_SYSCONFIG_RSZB_CLK_EN (1 << 9)
+#define RSZ_SYSCONFIG_RSZA_CLK_EN (1 << 8)
+
+#define RSZ_IN_FIFO_CTRL (0x000c)
+#define RSZ_IN_FIFO_CTRL_THRLD_LOW_MASK (0x1ff << 16)
+#define RSZ_IN_FIFO_CTRL_THRLD_LOW_SHIFT 16
+#define RSZ_IN_FIFO_CTRL_THRLD_HIGH_MASK (0x1ff << 0)
+#define RSZ_IN_FIFO_CTRL_THRLD_HIGH_SHIFT 0
+
+#define RSZ_FRACDIV (0x0008)
+#define RSZ_FRACDIV_MASK (0xffff)
+
+#define RSZ_SRC_EN (0x0020)
+#define RSZ_SRC_EN_SRC_EN (1 << 0)
+
+#define RSZ_SRC_MODE (0x0024)
+#define RSZ_SRC_MODE_OST (1 << 0)
+#define RSZ_SRC_MODE_WRT (1 << 1)
+
+#define RSZ_SRC_FMT0 (0x0028)
+#define RSZ_SRC_FMT0_BYPASS (1 << 1)
+#define RSZ_SRC_FMT0_SEL (1 << 0)
+
+#define RSZ_SRC_FMT1 (0x002c)
+#define RSZ_SRC_FMT1_IN420 (1 << 1)
+
+#define RSZ_SRC_VPS (0x0030)
+#define RSZ_SRC_VSZ (0x0034)
+#define RSZ_SRC_HPS (0x0038)
+#define RSZ_SRC_HSZ (0x003c)
+#define RSZ_DMA_RZA (0x0040)
+#define RSZ_DMA_RZB (0x0044)
+#define RSZ_DMA_STA (0x0048)
+#define RSZ_GCK_MMR (0x004c)
+#define RSZ_GCK_MMR_MMR (1 << 0)
+
+#define RSZ_GCK_SDR (0x0054)
+#define RSZ_GCK_SDR_CORE (1 << 0)
+
+#define RSZ_IRQ_RZA (0x0058)
+#define RSZ_IRQ_RZA_MASK (0x1fff)
+
+#define RSZ_IRQ_RZB (0x005c)
+#define RSZ_IRQ_RZB_MASK (0x1fff)
+
+#define RSZ_YUV_Y_MIN (0x0060)
+#define RSZ_YUV_Y_MAX (0x0064)
+#define RSZ_YUV_C_MIN (0x0068)
+#define RSZ_YUV_C_MAX (0x006c)
+
+#define RSZ_SEQ (0x0074)
+#define RSZ_SEQ_HRVB (1 << 2)
+#define RSZ_SEQ_HRVA (1 << 0)
+
+#define RZA_EN (0x0078)
+#define RZA_MODE (0x007c)
+#define RZA_MODE_ONE_SHOT (1 << 0)
+
+#define RZA_420 (0x0080)
+#define RZA_I_VPS (0x0084)
+#define RZA_I_HPS (0x0088)
+#define RZA_O_VSZ (0x008c)
+#define RZA_O_HSZ (0x0090)
+#define RZA_V_PHS_Y (0x0094)
+#define RZA_V_PHS_C (0x0098)
+#define RZA_V_DIF (0x009c)
+#define RZA_V_TYP (0x00a0)
+#define RZA_V_LPF (0x00a4)
+#define RZA_H_PHS (0x00a8)
+#define RZA_H_DIF (0x00b0)
+#define RZA_H_TYP (0x00b4)
+#define RZA_H_LPF (0x00b8)
+#define RZA_DWN_EN (0x00bc)
+#define RZA_SDR_Y_BAD_H (0x00d0)
+#define RZA_SDR_Y_BAD_L (0x00d4)
+#define RZA_SDR_Y_SAD_H (0x00d8)
+#define RZA_SDR_Y_SAD_L (0x00dc)
+#define RZA_SDR_Y_OFT (0x00e0)
+#define RZA_SDR_Y_PTR_S (0x00e4)
+#define RZA_SDR_Y_PTR_E (0x00e8)
+#define RZA_SDR_C_BAD_H (0x00ec)
+#define RZA_SDR_C_BAD_L (0x00f0)
+#define RZA_SDR_C_SAD_H (0x00f4)
+#define RZA_SDR_C_SAD_L (0x00f8)
+#define RZA_SDR_C_OFT (0x00fc)
+#define RZA_SDR_C_PTR_S (0x0100)
+#define RZA_SDR_C_PTR_E (0x0104)
+
+#define RZB_EN (0x0108)
+#define RZB_MODE (0x010c)
+#define RZB_420 (0x0110)
+#define RZB_I_VPS (0x0114)
+#define RZB_I_HPS (0x0118)
+#define RZB_O_VSZ (0x011c)
+#define RZB_O_HSZ (0x0120)
+
+#define RZB_V_DIF (0x012c)
+#define RZB_V_TYP (0x0130)
+#define RZB_V_LPF (0x0134)
+
+#define RZB_H_DIF (0x0140)
+#define RZB_H_TYP (0x0144)
+#define RZB_H_LPF (0x0148)
+
+#define RZB_SDR_Y_BAD_H (0x0160)
+#define RZB_SDR_Y_BAD_L (0x0164)
+#define RZB_SDR_Y_SAD_H (0x0168)
+#define RZB_SDR_Y_SAD_L (0x016c)
+#define RZB_SDR_Y_OFT (0x0170)
+#define RZB_SDR_Y_PTR_S (0x0174)
+#define RZB_SDR_Y_PTR_E (0x0178)
+#define RZB_SDR_C_BAD_H (0x017c)
+#define RZB_SDR_C_BAD_L (0x0180)
+#define RZB_SDR_C_SAD_H (0x0184)
+#define RZB_SDR_C_SAD_L (0x0188)
+
+#define RZB_SDR_C_PTR_S (0x0190)
+#define RZB_SDR_C_PTR_E (0x0194)
+
+/* Shared Bitmasks between RZA & RZB */
+#define RSZ_EN_EN (1 << 0)
+
+#define RSZ_420_CEN (1 << 1)
+#define RSZ_420_YEN (1 << 0)
+
+#define RSZ_I_VPS_MASK (0x1fff)
+
+#define RSZ_I_HPS_MASK (0x1fff)
+
+#define RSZ_O_VSZ_MASK (0x1fff)
+
+#define RSZ_O_HSZ_MASK (0x1ffe)
+
+#define RSZ_V_PHS_Y_MASK (0x3fff)
+
+#define RSZ_V_PHS_C_MASK (0x3fff)
+
+#define RSZ_V_DIF_MASK (0x3fff)
+
+#define RSZ_V_TYP_C (1 << 1)
+#define RSZ_V_TYP_Y (1 << 0)
+
+#define RSZ_V_LPF_C_MASK (0x3f << 6)
+#define RSZ_V_LPF_C_SHIFT 6
+#define RSZ_V_LPF_Y_MASK (0x3f << 0)
+#define RSZ_V_LPF_Y_SHIFT 0
+
+#define RSZ_H_PHS_MASK (0x3fff)
+
+#define RSZ_H_DIF_MASK (0x3fff)
+
+#define RSZ_H_TYP_C (1 << 1)
+#define RSZ_H_TYP_Y (1 << 0)
+
+#define RSZ_H_LPF_C_MASK (0x3f << 6)
+#define RSZ_H_LPF_C_SHIFT 6
+#define RSZ_H_LPF_Y_MASK (0x3f << 0)
+#define RSZ_H_LPF_Y_SHIFT 0
+
+#define RSZ_DWN_EN_DWN_EN (1 << 0)
+
+#endif /* _OMAP4_ISS_REGS_H_ */
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
new file mode 100644
index 000000000000..ae831b8985c9
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_resizer.c
@@ -0,0 +1,893 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP RESIZER module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include "iss.h"
+#include "iss_regs.h"
+#include "iss_resizer.h"
+
+static const unsigned int resizer_fmts[] = {
+ V4L2_MBUS_FMT_UYVY8_1X16,
+ V4L2_MBUS_FMT_YUYV8_1X16,
+};
+
+/*
+ * resizer_print_status - Print current RESIZER Module register values.
+ * @resizer: Pointer to ISS ISP RESIZER device.
+ *
+ * Also prints other debug information stored in the RESIZER module.
+ */
+#define RSZ_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###RSZ " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_##name))
+
+#define RZA_PRINT_REGISTER(iss, name)\
+ dev_dbg(iss->dev, "###RZA " #name "=0x%08x\n", \
+ iss_reg_read(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_##name))
+
+static void resizer_print_status(struct iss_resizer_device *resizer)
+{
+ struct iss_device *iss = to_iss_device(resizer);
+
+ dev_dbg(iss->dev, "-------------RESIZER Register dump-------------\n");
+
+ RSZ_PRINT_REGISTER(iss, SYSCONFIG);
+ RSZ_PRINT_REGISTER(iss, IN_FIFO_CTRL);
+ RSZ_PRINT_REGISTER(iss, FRACDIV);
+ RSZ_PRINT_REGISTER(iss, SRC_EN);
+ RSZ_PRINT_REGISTER(iss, SRC_MODE);
+ RSZ_PRINT_REGISTER(iss, SRC_FMT0);
+ RSZ_PRINT_REGISTER(iss, SRC_FMT1);
+ RSZ_PRINT_REGISTER(iss, SRC_VPS);
+ RSZ_PRINT_REGISTER(iss, SRC_VSZ);
+ RSZ_PRINT_REGISTER(iss, SRC_HPS);
+ RSZ_PRINT_REGISTER(iss, SRC_HSZ);
+ RSZ_PRINT_REGISTER(iss, DMA_RZA);
+ RSZ_PRINT_REGISTER(iss, DMA_RZB);
+ RSZ_PRINT_REGISTER(iss, DMA_STA);
+ RSZ_PRINT_REGISTER(iss, GCK_MMR);
+ RSZ_PRINT_REGISTER(iss, GCK_SDR);
+ RSZ_PRINT_REGISTER(iss, IRQ_RZA);
+ RSZ_PRINT_REGISTER(iss, IRQ_RZB);
+ RSZ_PRINT_REGISTER(iss, YUV_Y_MIN);
+ RSZ_PRINT_REGISTER(iss, YUV_Y_MAX);
+ RSZ_PRINT_REGISTER(iss, YUV_C_MIN);
+ RSZ_PRINT_REGISTER(iss, YUV_C_MAX);
+ RSZ_PRINT_REGISTER(iss, SEQ);
+
+ RZA_PRINT_REGISTER(iss, EN);
+ RZA_PRINT_REGISTER(iss, MODE);
+ RZA_PRINT_REGISTER(iss, 420);
+ RZA_PRINT_REGISTER(iss, I_VPS);
+ RZA_PRINT_REGISTER(iss, I_HPS);
+ RZA_PRINT_REGISTER(iss, O_VSZ);
+ RZA_PRINT_REGISTER(iss, O_HSZ);
+ RZA_PRINT_REGISTER(iss, V_PHS_Y);
+ RZA_PRINT_REGISTER(iss, V_PHS_C);
+ RZA_PRINT_REGISTER(iss, V_DIF);
+ RZA_PRINT_REGISTER(iss, V_TYP);
+ RZA_PRINT_REGISTER(iss, V_LPF);
+ RZA_PRINT_REGISTER(iss, H_PHS);
+ RZA_PRINT_REGISTER(iss, H_DIF);
+ RZA_PRINT_REGISTER(iss, H_TYP);
+ RZA_PRINT_REGISTER(iss, H_LPF);
+ RZA_PRINT_REGISTER(iss, DWN_EN);
+ RZA_PRINT_REGISTER(iss, SDR_Y_BAD_H);
+ RZA_PRINT_REGISTER(iss, SDR_Y_BAD_L);
+ RZA_PRINT_REGISTER(iss, SDR_Y_SAD_H);
+ RZA_PRINT_REGISTER(iss, SDR_Y_SAD_L);
+ RZA_PRINT_REGISTER(iss, SDR_Y_OFT);
+ RZA_PRINT_REGISTER(iss, SDR_Y_PTR_S);
+ RZA_PRINT_REGISTER(iss, SDR_Y_PTR_E);
+ RZA_PRINT_REGISTER(iss, SDR_C_BAD_H);
+ RZA_PRINT_REGISTER(iss, SDR_C_BAD_L);
+ RZA_PRINT_REGISTER(iss, SDR_C_SAD_H);
+ RZA_PRINT_REGISTER(iss, SDR_C_SAD_L);
+ RZA_PRINT_REGISTER(iss, SDR_C_OFT);
+ RZA_PRINT_REGISTER(iss, SDR_C_PTR_S);
+ RZA_PRINT_REGISTER(iss, SDR_C_PTR_E);
+
+ dev_dbg(iss->dev, "-----------------------------------------------\n");
+}
+
+/*
+ * resizer_enable - Enable/Disable RESIZER.
+ * @enable: enable flag
+ *
+ */
+static void resizer_enable(struct iss_resizer_device *resizer, u8 enable)
+{
+ struct iss_device *iss = to_iss_device(resizer);
+
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_EN,
+ RSZ_SRC_EN_SRC_EN, enable ? RSZ_SRC_EN_SRC_EN : 0);
+
+ /* TODO: Enable RSZB */
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_EN, RSZ_EN_EN,
+ enable ? RSZ_EN_EN : 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * Format- and pipeline-related configuration helpers
+ */
+
+/*
+ * resizer_set_outaddr - Set memory address to save output image
+ * @resizer: Pointer to ISP RESIZER device.
+ * @addr: 32-bit memory address aligned on 32 byte boundary.
+ *
+ * Sets the memory address where the output will be saved.
+ */
+static void resizer_set_outaddr(struct iss_resizer_device *resizer, u32 addr)
+{
+ struct iss_device *iss = to_iss_device(resizer);
+ struct v4l2_mbus_framefmt *informat, *outformat;
+
+ informat = &resizer->formats[RESIZER_PAD_SINK];
+ outformat = &resizer->formats[RESIZER_PAD_SOURCE_MEM];
+
+ /* Save address splitted in Base Address H & L */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_BAD_H,
+ (addr >> 16) & 0xffff);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_BAD_L,
+ addr & 0xffff);
+
+ /* SAD = BAD */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_SAD_H,
+ (addr >> 16) & 0xffff);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_SAD_L,
+ addr & 0xffff);
+
+ /* Program UV buffer address... Hardcoded to be contiguous! */
+ if ((informat->code == V4L2_MBUS_FMT_UYVY8_1X16) &&
+ (outformat->code == V4L2_MBUS_FMT_YUYV8_1_5X8)) {
+ u32 c_addr = addr + (resizer->video_out.bpl_value *
+ (outformat->height - 1));
+
+ /* Ensure Y_BAD_L[6:0] = C_BAD_L[6:0]*/
+ if ((c_addr ^ addr) & 0x7f) {
+ c_addr &= ~0x7f;
+ c_addr += 0x80;
+ c_addr |= addr & 0x7f;
+ }
+
+ /* Save address splitted in Base Address H & L */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_BAD_H,
+ (c_addr >> 16) & 0xffff);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_BAD_L,
+ c_addr & 0xffff);
+
+ /* SAD = BAD */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_SAD_H,
+ (c_addr >> 16) & 0xffff);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_SAD_L,
+ c_addr & 0xffff);
+ }
+}
+
+static void resizer_configure(struct iss_resizer_device *resizer)
+{
+ struct iss_device *iss = to_iss_device(resizer);
+ struct v4l2_mbus_framefmt *informat, *outformat;
+
+ informat = &resizer->formats[RESIZER_PAD_SINK];
+ outformat = &resizer->formats[RESIZER_PAD_SOURCE_MEM];
+
+ /* Disable pass-through more. Despite its name, the BYPASS bit controls
+ * pass-through mode, not bypass mode.
+ */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_FMT0,
+ RSZ_SRC_FMT0_BYPASS);
+
+ /* Select RSZ input */
+ iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_FMT0,
+ RSZ_SRC_FMT0_SEL,
+ resizer->input == RESIZER_INPUT_IPIPEIF ?
+ RSZ_SRC_FMT0_SEL : 0);
+
+ /* RSZ ignores WEN signal from IPIPE/IPIPEIF */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_MODE,
+ RSZ_SRC_MODE_WRT);
+
+ /* Set Resizer in free-running mode */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_MODE,
+ RSZ_SRC_MODE_OST);
+
+ /* Init Resizer A */
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_MODE,
+ RZA_MODE_ONE_SHOT);
+
+ /* Set size related things now */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_VPS, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_HPS, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_VSZ,
+ informat->height - 2);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_HSZ,
+ informat->width - 1);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_I_VPS, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_I_HPS, 0);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_O_VSZ,
+ outformat->height - 2);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_O_HSZ,
+ outformat->width - 1);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_V_DIF, 0x100);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_H_DIF, 0x100);
+
+ /* Buffer output settings */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_PTR_S, 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_PTR_E,
+ outformat->height - 1);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_OFT,
+ resizer->video_out.bpl_value);
+
+ /* UYVY -> NV12 conversion */
+ if ((informat->code == V4L2_MBUS_FMT_UYVY8_1X16) &&
+ (outformat->code == V4L2_MBUS_FMT_YUYV8_1_5X8)) {
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420,
+ RSZ_420_CEN | RSZ_420_YEN);
+
+ /* UV Buffer output settings */
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_PTR_S,
+ 0);
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_PTR_E,
+ outformat->height - 1);
+
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_OFT,
+ resizer->video_out.bpl_value);
+ } else {
+ iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420, 0);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt handling
+ */
+
+static void resizer_isr_buffer(struct iss_resizer_device *resizer)
+{
+ struct iss_buffer *buffer;
+
+ /* The whole resizer needs to be stopped. Disabling RZA only produces
+ * input FIFO overflows, most probably when the next frame is received.
+ */
+ resizer_enable(resizer, 0);
+
+ buffer = omap4iss_video_buffer_next(&resizer->video_out);
+ if (buffer == NULL)
+ return;
+
+ resizer_set_outaddr(resizer, buffer->iss_addr);
+
+ resizer_enable(resizer, 1);
+}
+
+/*
+ * resizer_isif0_isr - Handle ISIF0 event
+ * @resizer: Pointer to ISP RESIZER device.
+ *
+ * Executes LSC deferred enablement before next frame starts.
+ */
+static void resizer_int_dma_isr(struct iss_resizer_device *resizer)
+{
+ struct iss_pipeline *pipe =
+ to_iss_pipeline(&resizer->subdev.entity);
+ if (pipe->do_propagation)
+ atomic_inc(&pipe->frame_number);
+
+ resizer_isr_buffer(resizer);
+}
+
+/*
+ * omap4iss_resizer_isr - Configure resizer during interframe time.
+ * @resizer: Pointer to ISP RESIZER device.
+ * @events: RESIZER events
+ */
+void omap4iss_resizer_isr(struct iss_resizer_device *resizer, u32 events)
+{
+ struct iss_device *iss = to_iss_device(resizer);
+ struct iss_pipeline *pipe =
+ to_iss_pipeline(&resizer->subdev.entity);
+
+ if (events & (ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
+ ISP5_IRQ_RSZ_FIFO_OVF)) {
+ dev_dbg(iss->dev, "RSZ Err: FIFO_IN_BLK:%d, FIFO_OVF:%d\n",
+ events & ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR ? 1 : 0,
+ events & ISP5_IRQ_RSZ_FIFO_OVF ? 1 : 0);
+ omap4iss_pipeline_cancel_stream(pipe);
+ }
+
+ if (omap4iss_module_sync_is_stopping(&resizer->wait,
+ &resizer->stopping))
+ return;
+
+ if (events & ISP5_IRQ_RSZ_INT_DMA)
+ resizer_int_dma_isr(resizer);
+}
+
+/* -----------------------------------------------------------------------------
+ * ISS video operations
+ */
+
+static int resizer_video_queue(struct iss_video *video,
+ struct iss_buffer *buffer)
+{
+ struct iss_resizer_device *resizer = container_of(video,
+ struct iss_resizer_device, video_out);
+
+ if (!(resizer->output & RESIZER_OUTPUT_MEMORY))
+ return -ENODEV;
+
+ resizer_set_outaddr(resizer, buffer->iss_addr);
+
+ /*
+ * If streaming was enabled before there was a buffer queued
+ * or underrun happened in the ISR, the hardware was not enabled
+ * and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
+ * Enable it now.
+ */
+ if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
+ resizer_enable(resizer, 1);
+ iss_video_dmaqueue_flags_clr(video);
+ }
+
+ return 0;
+}
+
+static const struct iss_video_operations resizer_video_ops = {
+ .queue = resizer_video_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+/*
+ * resizer_set_stream - Enable/Disable streaming on the RESIZER module
+ * @sd: ISP RESIZER V4L2 subdevice
+ * @enable: Enable/disable stream
+ */
+static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(resizer);
+ struct iss_video *video_out = &resizer->video_out;
+ int ret = 0;
+
+ if (resizer->state == ISS_PIPELINE_STREAM_STOPPED) {
+ if (enable == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+
+ omap4iss_isp_subclk_enable(iss, OMAP4_ISS_ISP_SUBCLK_RSZ);
+
+ iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_MMR,
+ RSZ_GCK_MMR_MMR);
+ iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_SDR,
+ RSZ_GCK_SDR_CORE);
+
+ /* FIXME: Enable RSZB also */
+ iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SYSCONFIG,
+ RSZ_SYSCONFIG_RSZA_CLK_EN);
+ }
+
+ switch (enable) {
+ case ISS_PIPELINE_STREAM_CONTINUOUS:
+
+ resizer_configure(resizer);
+ resizer_print_status(resizer);
+
+ /*
+ * When outputting to memory with no buffer available, let the
+ * buffer queue handler start the hardware. A DMA queue flag
+ * ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
+ * a buffer available.
+ */
+ if (resizer->output & RESIZER_OUTPUT_MEMORY &&
+ !(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
+ break;
+
+ atomic_set(&resizer->stopping, 0);
+ resizer_enable(resizer, 1);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+
+ case ISS_PIPELINE_STREAM_STOPPED:
+ if (resizer->state == ISS_PIPELINE_STREAM_STOPPED)
+ return 0;
+ if (omap4iss_module_sync_idle(&sd->entity, &resizer->wait,
+ &resizer->stopping))
+ ret = -ETIMEDOUT;
+
+ resizer_enable(resizer, 0);
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SYSCONFIG,
+ RSZ_SYSCONFIG_RSZA_CLK_EN);
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_SDR,
+ RSZ_GCK_SDR_CORE);
+ iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_MMR,
+ RSZ_GCK_MMR_MMR);
+ omap4iss_isp_subclk_disable(iss, OMAP4_ISS_ISP_SUBCLK_RSZ);
+ iss_video_dmaqueue_flags_clr(video_out);
+ break;
+ }
+
+ resizer->state = enable;
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__resizer_get_format(struct iss_resizer_device *resizer,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &resizer->formats[pad];
+}
+
+/*
+ * resizer_try_format - Try video format on a pad
+ * @resizer: ISS RESIZER device
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ */
+static void
+resizer_try_format(struct iss_resizer_device *resizer,
+ struct v4l2_subdev_fh *fh, unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ enum v4l2_mbus_pixelcode pixelcode;
+ struct v4l2_mbus_framefmt *format;
+ unsigned int width = fmt->width;
+ unsigned int height = fmt->height;
+ unsigned int i;
+
+ switch (pad) {
+ case RESIZER_PAD_SINK:
+ for (i = 0; i < ARRAY_SIZE(resizer_fmts); i++) {
+ if (fmt->code == resizer_fmts[i])
+ break;
+ }
+
+ /* If not found, use UYVY as default */
+ if (i >= ARRAY_SIZE(resizer_fmts))
+ fmt->code = V4L2_MBUS_FMT_UYVY8_1X16;
+
+ /* Clamp the input size. */
+ fmt->width = clamp_t(u32, width, 1, 8192);
+ fmt->height = clamp_t(u32, height, 1, 8192);
+ break;
+
+ case RESIZER_PAD_SOURCE_MEM:
+ pixelcode = fmt->code;
+ format = __resizer_get_format(resizer, fh, RESIZER_PAD_SINK,
+ which);
+ memcpy(fmt, format, sizeof(*fmt));
+
+ if ((pixelcode == V4L2_MBUS_FMT_YUYV8_1_5X8) &&
+ (fmt->code == V4L2_MBUS_FMT_UYVY8_1X16))
+ fmt->code = pixelcode;
+
+ /* The data formatter truncates the number of horizontal output
+ * pixels to a multiple of 16. To avoid clipping data, allow
+ * callers to request an output size bigger than the input size
+ * up to the nearest multiple of 16.
+ */
+ fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
+ fmt->width &= ~15;
+ fmt->height = clamp_t(u32, height, 32, fmt->height);
+ break;
+
+ }
+
+ fmt->colorspace = V4L2_COLORSPACE_JPEG;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+/*
+ * resizer_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ switch (code->pad) {
+ case RESIZER_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(resizer_fmts))
+ return -EINVAL;
+
+ code->code = resizer_fmts[code->index];
+ break;
+
+ case RESIZER_PAD_SOURCE_MEM:
+ format = __resizer_get_format(resizer, fh, RESIZER_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_TRY);
+
+ if (code->index == 0) {
+ code->code = format->code;
+ break;
+ }
+
+ switch (format->code) {
+ case V4L2_MBUS_FMT_UYVY8_1X16:
+ if (code->index == 1)
+ code->code = V4L2_MBUS_FMT_YUYV8_1_5X8;
+ else
+ return -EINVAL;
+ break;
+ default:
+ if (code->index != 0)
+ return -EINVAL;
+ }
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int resizer_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt format;
+
+ if (fse->index != 0)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = 1;
+ format.height = 1;
+ resizer_try_format(resizer, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->min_width = format.width;
+ fse->min_height = format.height;
+
+ if (format.code != fse->code)
+ return -EINVAL;
+
+ format.code = fse->code;
+ format.width = -1;
+ format.height = -1;
+ resizer_try_format(resizer, fh, fse->pad, &format,
+ V4L2_SUBDEV_FORMAT_TRY);
+ fse->max_width = format.width;
+ fse->max_height = format.height;
+
+ return 0;
+}
+
+/*
+ * resizer_get_format - Retrieve the video format on a pad
+ * @sd : ISP RESIZER V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __resizer_get_format(resizer, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * resizer_set_format - Set the video format on a pad
+ * @sd : ISP RESIZER V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __resizer_get_format(resizer, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ resizer_try_format(resizer, fh, fmt->pad, &fmt->format, fmt->which);
+ *format = fmt->format;
+
+ /* Propagate the format from sink to source */
+ if (fmt->pad == RESIZER_PAD_SINK) {
+ format = __resizer_get_format(resizer, fh,
+ RESIZER_PAD_SOURCE_MEM,
+ fmt->which);
+ *format = fmt->format;
+ resizer_try_format(resizer, fh, RESIZER_PAD_SOURCE_MEM, format,
+ fmt->which);
+ }
+
+ return 0;
+}
+
+static int resizer_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ /* Check if the two ends match */
+ if (source_fmt->format.width != sink_fmt->format.width ||
+ source_fmt->format.height != sink_fmt->format.height)
+ return -EPIPE;
+
+ if (source_fmt->format.code != sink_fmt->format.code)
+ return -EPIPE;
+
+ return 0;
+}
+
+/*
+ * resizer_init_formats - Initialize formats on all pads
+ * @sd: ISP RESIZER V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static int resizer_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+ format.pad = RESIZER_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = V4L2_MBUS_FMT_UYVY8_1X16;
+ format.format.width = 4096;
+ format.format.height = 4096;
+ resizer_set_format(sd, fh, &format);
+
+ return 0;
+}
+
+/* V4L2 subdev video operations */
+static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
+ .s_stream = resizer_set_stream,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
+ .enum_mbus_code = resizer_enum_mbus_code,
+ .enum_frame_size = resizer_enum_frame_size,
+ .get_fmt = resizer_get_format,
+ .set_fmt = resizer_set_format,
+ .link_validate = resizer_link_validate,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops resizer_v4l2_ops = {
+ .video = &resizer_v4l2_video_ops,
+ .pad = &resizer_v4l2_pad_ops,
+};
+
+/* V4L2 subdev internal operations */
+static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = {
+ .open = resizer_init_formats,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+/*
+ * resizer_link_setup - Setup RESIZER connections
+ * @entity: RESIZER media entity
+ * @local: Pad at the local end of the link
+ * @remote: Pad at the remote end of the link
+ * @flags: Link flags
+ *
+ * return -EINVAL or zero on success
+ */
+static int resizer_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
+ struct iss_device *iss = to_iss_device(resizer);
+
+ switch (local->index | media_entity_type(remote->entity)) {
+ case RESIZER_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV:
+ /* Read from IPIPE or IPIPEIF. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ resizer->input = RESIZER_INPUT_NONE;
+ break;
+ }
+
+ if (resizer->input != RESIZER_INPUT_NONE)
+ return -EBUSY;
+
+ if (remote->entity == &iss->ipipeif.subdev.entity)
+ resizer->input = RESIZER_INPUT_IPIPEIF;
+ else if (remote->entity == &iss->ipipe.subdev.entity)
+ resizer->input = RESIZER_INPUT_IPIPE;
+
+
+ break;
+
+ case RESIZER_PAD_SOURCE_MEM | MEDIA_ENT_T_DEVNODE:
+ /* Write to memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (resizer->output & ~RESIZER_OUTPUT_MEMORY)
+ return -EBUSY;
+ resizer->output |= RESIZER_OUTPUT_MEMORY;
+ } else {
+ resizer->output &= ~RESIZER_OUTPUT_MEMORY;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations resizer_media_ops = {
+ .link_setup = resizer_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * resizer_init_entities - Initialize V4L2 subdev and media entity
+ * @resizer: ISS ISP RESIZER module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int resizer_init_entities(struct iss_resizer_device *resizer)
+{
+ struct v4l2_subdev *sd = &resizer->subdev;
+ struct media_pad *pads = resizer->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ resizer->input = RESIZER_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &resizer_v4l2_ops);
+ sd->internal_ops = &resizer_v4l2_internal_ops;
+ strlcpy(sd->name, "OMAP4 ISS ISP resizer", sizeof(sd->name));
+ sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ v4l2_set_subdevdata(sd, resizer);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[RESIZER_PAD_SOURCE_MEM].flags = MEDIA_PAD_FL_SOURCE;
+
+ me->ops = &resizer_media_ops;
+ ret = media_entity_init(me, RESIZER_PADS_NUM, pads, 0);
+ if (ret < 0)
+ return ret;
+
+ resizer_init_formats(sd, NULL);
+
+ resizer->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ resizer->video_out.ops = &resizer_video_ops;
+ resizer->video_out.iss = to_iss_device(resizer);
+ resizer->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
+ resizer->video_out.bpl_alignment = 32;
+ resizer->video_out.bpl_zero_padding = 1;
+ resizer->video_out.bpl_max = 0x1ffe0;
+
+ ret = omap4iss_video_init(&resizer->video_out, "ISP resizer a");
+ if (ret < 0)
+ return ret;
+
+ /* Connect the RESIZER subdev to the video node. */
+ ret = media_entity_create_link(&resizer->subdev.entity,
+ RESIZER_PAD_SOURCE_MEM,
+ &resizer->video_out.video.entity, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void omap4iss_resizer_unregister_entities(struct iss_resizer_device *resizer)
+{
+ media_entity_cleanup(&resizer->subdev.entity);
+
+ v4l2_device_unregister_subdev(&resizer->subdev);
+ omap4iss_video_unregister(&resizer->video_out);
+}
+
+int omap4iss_resizer_register_entities(struct iss_resizer_device *resizer,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video node. */
+ ret = v4l2_device_register_subdev(vdev, &resizer->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = omap4iss_video_register(&resizer->video_out, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ omap4iss_resizer_unregister_entities(resizer);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP RESIZER initialisation and cleanup
+ */
+
+/*
+ * omap4iss_resizer_init - RESIZER module initialization.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int omap4iss_resizer_init(struct iss_device *iss)
+{
+ struct iss_resizer_device *resizer = &iss->resizer;
+
+ resizer->state = ISS_PIPELINE_STREAM_STOPPED;
+ init_waitqueue_head(&resizer->wait);
+
+ return resizer_init_entities(resizer);
+}
+
+/*
+ * omap4iss_resizer_cleanup - RESIZER module cleanup.
+ * @iss: Device pointer specific to the OMAP4 ISS.
+ */
+void omap4iss_resizer_cleanup(struct iss_device *iss)
+{
+ /* FIXME: are you sure there's nothing to do? */
+}
diff --git a/drivers/staging/media/omap4iss/iss_resizer.h b/drivers/staging/media/omap4iss/iss_resizer.h
new file mode 100644
index 000000000000..3727498b06a3
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_resizer.h
@@ -0,0 +1,75 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - ISP RESIZER module
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_RESIZER_H
+#define OMAP4_ISS_RESIZER_H
+
+#include "iss_video.h"
+
+enum resizer_input_entity {
+ RESIZER_INPUT_NONE,
+ RESIZER_INPUT_IPIPE,
+ RESIZER_INPUT_IPIPEIF
+};
+
+#define RESIZER_OUTPUT_MEMORY (1 << 0)
+
+/* Sink and source RESIZER pads */
+#define RESIZER_PAD_SINK 0
+#define RESIZER_PAD_SOURCE_MEM 1
+#define RESIZER_PADS_NUM 2
+
+/*
+ * struct iss_resizer_device - Structure for the RESIZER module to store its own
+ * information
+ * @subdev: V4L2 subdevice
+ * @pads: Sink and source media entity pads
+ * @formats: Active video formats
+ * @input: Active input
+ * @output: Active outputs
+ * @video_out: Output video node
+ * @error: A hardware error occurred during capture
+ * @state: Streaming state
+ * @wait: Wait queue used to stop the module
+ * @stopping: Stopping state
+ */
+struct iss_resizer_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[RESIZER_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[RESIZER_PADS_NUM];
+
+ enum resizer_input_entity input;
+ unsigned int output;
+ struct iss_video video_out;
+ unsigned int error;
+
+ enum iss_pipeline_stream_state state;
+ wait_queue_head_t wait;
+ atomic_t stopping;
+};
+
+struct iss_device;
+
+int omap4iss_resizer_init(struct iss_device *iss);
+void omap4iss_resizer_cleanup(struct iss_device *iss);
+int omap4iss_resizer_register_entities(struct iss_resizer_device *resizer,
+ struct v4l2_device *vdev);
+void omap4iss_resizer_unregister_entities(struct iss_resizer_device *resizer);
+
+int omap4iss_resizer_busy(struct iss_resizer_device *resizer);
+void omap4iss_resizer_isr(struct iss_resizer_device *resizer, u32 events);
+void omap4iss_resizer_restore_context(struct iss_device *iss);
+void omap4iss_resizer_max_rate(struct iss_resizer_device *resizer,
+ unsigned int *max_rate);
+
+#endif /* OMAP4_ISS_RESIZER_H */
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
new file mode 100644
index 000000000000..8c7f35029cd5
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -0,0 +1,1226 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - Generic video node
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+
+#include "iss_video.h"
+#include "iss.h"
+
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static struct iss_format_info formats[] = {
+ { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_PIX_FMT_GREY, 8, "Greyscale 8 bpp", },
+ { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
+ V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_PIX_FMT_Y10, 10, "Greyscale 10 bpp", },
+ { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
+ V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_PIX_FMT_Y12, 12, "Greyscale 12 bpp", },
+ { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_PIX_FMT_SBGGR8, 8, "BGGR Bayer 8 bpp", },
+ { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
+ V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
+ V4L2_PIX_FMT_SGBRG8, 8, "GBRG Bayer 8 bpp", },
+ { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_PIX_FMT_SGRBG8, 8, "GRBG Bayer 8 bpp", },
+ { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_PIX_FMT_SRGGB8, 8, "RGGB Bayer 8 bpp", },
+ { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SGRBG10_1X10, 0,
+ V4L2_PIX_FMT_SGRBG10DPCM8, 8, "GRBG Bayer 10 bpp DPCM8", },
+ { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_PIX_FMT_SBGGR10, 10, "BGGR Bayer 10 bpp", },
+ { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
+ V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
+ V4L2_PIX_FMT_SGBRG10, 10, "GBRG Bayer 10 bpp", },
+ { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_PIX_FMT_SGRBG10, 10, "GRBG Bayer 10 bpp", },
+ { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_PIX_FMT_SRGGB10, 10, "RGGB Bayer 10 bpp", },
+ { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_PIX_FMT_SBGGR12, 12, "BGGR Bayer 12 bpp", },
+ { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
+ V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
+ V4L2_PIX_FMT_SGBRG12, 12, "GBRG Bayer 12 bpp", },
+ { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
+ V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_PIX_FMT_SGRBG12, 12, "GRBG Bayer 12 bpp", },
+ { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
+ V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_PIX_FMT_SRGGB12, 12, "RGGB Bayer 12 bpp", },
+ { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
+ V4L2_MBUS_FMT_UYVY8_1X16, 0,
+ V4L2_PIX_FMT_UYVY, 16, "YUV 4:2:2 (UYVY)", },
+ { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
+ V4L2_MBUS_FMT_YUYV8_1X16, 0,
+ V4L2_PIX_FMT_YUYV, 16, "YUV 4:2:2 (YUYV)", },
+ { V4L2_MBUS_FMT_YUYV8_1_5X8, V4L2_MBUS_FMT_YUYV8_1_5X8,
+ V4L2_MBUS_FMT_YUYV8_1_5X8, 0,
+ V4L2_PIX_FMT_NV12, 8, "YUV 4:2:0 (NV12)", },
+};
+
+const struct iss_format_info *
+omap4iss_video_format_info(enum v4l2_mbus_pixelcode code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].code == code)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * iss_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
+ * @video: ISS video instance
+ * @mbus: v4l2_mbus_framefmt format (input)
+ * @pix: v4l2_pix_format format (output)
+ *
+ * Fill the output pix structure with information from the input mbus format.
+ * The bytesperline and sizeimage fields are computed from the requested bytes
+ * per line value in the pix format and information from the video instance.
+ *
+ * Return the number of padding bytes at end of line.
+ */
+static unsigned int iss_video_mbus_to_pix(const struct iss_video *video,
+ const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_pix_format *pix)
+{
+ unsigned int bpl = pix->bytesperline;
+ unsigned int min_bpl;
+ unsigned int i;
+
+ memset(pix, 0, sizeof(*pix));
+ pix->width = mbus->width;
+ pix->height = mbus->height;
+
+ /* Skip the last format in the loop so that it will be selected if no
+ * match is found.
+ */
+ for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
+ if (formats[i].code == mbus->code)
+ break;
+ }
+
+ min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
+
+ /* Clamp the requested bytes per line value. If the maximum bytes per
+ * line value is zero, the module doesn't support user configurable line
+ * sizes. Override the requested value with the minimum in that case.
+ */
+ if (video->bpl_max)
+ bpl = clamp(bpl, min_bpl, video->bpl_max);
+ else
+ bpl = min_bpl;
+
+ if (!video->bpl_zero_padding || bpl != min_bpl)
+ bpl = ALIGN(bpl, video->bpl_alignment);
+
+ pix->pixelformat = formats[i].pixelformat;
+ pix->bytesperline = bpl;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->colorspace = mbus->colorspace;
+ pix->field = mbus->field;
+
+ /* FIXME: Special case for NV12! We should make this nicer... */
+ if (pix->pixelformat == V4L2_PIX_FMT_NV12)
+ pix->sizeimage += (pix->bytesperline * pix->height) / 2;
+
+ return bpl - min_bpl;
+}
+
+static void iss_video_pix_to_mbus(const struct v4l2_pix_format *pix,
+ struct v4l2_mbus_framefmt *mbus)
+{
+ unsigned int i;
+
+ memset(mbus, 0, sizeof(*mbus));
+ mbus->width = pix->width;
+ mbus->height = pix->height;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].pixelformat == pix->pixelformat)
+ break;
+ }
+
+ if (WARN_ON(i == ARRAY_SIZE(formats)))
+ return;
+
+ mbus->code = formats[i].code;
+ mbus->colorspace = pix->colorspace;
+ mbus->field = pix->field;
+}
+
+static struct v4l2_subdev *
+iss_video_remote_subdev(struct iss_video *video, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(&video->pad);
+
+ if (remote == NULL ||
+ media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+/* Return a pointer to the ISS video instance at the far end of the pipeline. */
+static struct iss_video *
+iss_video_far_end(struct iss_video *video)
+{
+ struct media_entity_graph graph;
+ struct media_entity *entity = &video->video.entity;
+ struct media_device *mdev = entity->parent;
+ struct iss_video *far_end = NULL;
+
+ mutex_lock(&mdev->graph_mutex);
+ media_entity_graph_walk_start(&graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+ if (entity == &video->video.entity)
+ continue;
+
+ if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
+ continue;
+
+ far_end = to_iss_video(media_entity_to_video_device(entity));
+ if (far_end->type != video->type)
+ break;
+
+ far_end = NULL;
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+ return far_end;
+}
+
+static int
+__iss_video_get_format(struct iss_video *video,
+ struct v4l2_mbus_framefmt *format)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ u32 pad;
+ int ret;
+
+ subdev = iss_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ memset(&fmt, 0, sizeof(fmt));
+ fmt.pad = pad;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ mutex_lock(&video->mutex);
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ mutex_unlock(&video->mutex);
+
+ if (ret)
+ return ret;
+
+ *format = fmt.format;
+ return 0;
+}
+
+static int
+iss_video_check_format(struct iss_video *video, struct iss_video_fh *vfh)
+{
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_pix_format pixfmt;
+ int ret;
+
+ ret = __iss_video_get_format(video, &format);
+ if (ret < 0)
+ return ret;
+
+ pixfmt.bytesperline = 0;
+ ret = iss_video_mbus_to_pix(video, &format, &pixfmt);
+
+ if (vfh->format.fmt.pix.pixelformat != pixfmt.pixelformat ||
+ vfh->format.fmt.pix.height != pixfmt.height ||
+ vfh->format.fmt.pix.width != pixfmt.width ||
+ vfh->format.fmt.pix.bytesperline != pixfmt.bytesperline ||
+ vfh->format.fmt.pix.sizeimage != pixfmt.sizeimage)
+ return -EINVAL;
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Video queue operations
+ */
+
+static int iss_video_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct iss_video_fh *vfh = vb2_get_drv_priv(vq);
+ struct iss_video *video = vfh->video;
+
+ /* Revisit multi-planar support for NV12 */
+ *num_planes = 1;
+
+ sizes[0] = vfh->format.fmt.pix.sizeimage;
+ if (sizes[0] == 0)
+ return -EINVAL;
+
+ alloc_ctxs[0] = video->alloc_ctx;
+
+ *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
+
+ return 0;
+}
+
+static void iss_video_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+
+ if (buffer->iss_addr)
+ buffer->iss_addr = 0;
+}
+
+static int iss_video_buf_prepare(struct vb2_buffer *vb)
+{
+ struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
+ struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+ struct iss_video *video = vfh->video;
+ unsigned long size = vfh->format.fmt.pix.sizeimage;
+ dma_addr_t addr;
+
+ if (vb2_plane_size(vb, 0) < size)
+ return -ENOBUFS;
+
+ /* Refuse to prepare the buffer is the video node has registered an
+ * error. We don't need to take any lock here as the operation is
+ * inherently racy. The authoritative check will be performed in the
+ * queue handler, which can't return an error, this check is just a best
+ * effort to notify userspace as early as possible.
+ */
+ if (unlikely(video->error))
+ return -EIO;
+
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (!IS_ALIGNED(addr, 32)) {
+ dev_dbg(video->iss->dev,
+ "Buffer address must be aligned to 32 bytes boundary.\n");
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+ buffer->iss_addr = addr;
+ return 0;
+}
+
+static void iss_video_buf_queue(struct vb2_buffer *vb)
+{
+ struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
+ struct iss_video *video = vfh->video;
+ struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb);
+ struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
+ unsigned long flags;
+ bool empty;
+
+ spin_lock_irqsave(&video->qlock, flags);
+
+ if (unlikely(video->error)) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&video->qlock, flags);
+ return;
+ }
+
+ empty = list_empty(&video->dmaqueue);
+ list_add_tail(&buffer->list, &video->dmaqueue);
+
+ spin_unlock_irqrestore(&video->qlock, flags);
+
+ if (empty) {
+ enum iss_pipeline_state state;
+ unsigned int start;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ state = ISS_PIPELINE_QUEUE_OUTPUT;
+ else
+ state = ISS_PIPELINE_QUEUE_INPUT;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state |= state;
+ video->ops->queue(video, buffer);
+ video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_QUEUED;
+
+ start = iss_pipeline_ready(pipe);
+ if (start)
+ pipe->state |= ISS_PIPELINE_STREAM;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ if (start)
+ omap4iss_pipeline_set_stream(pipe,
+ ISS_PIPELINE_STREAM_SINGLESHOT);
+ }
+}
+
+static struct vb2_ops iss_video_vb2ops = {
+ .queue_setup = iss_video_queue_setup,
+ .buf_prepare = iss_video_buf_prepare,
+ .buf_queue = iss_video_buf_queue,
+ .buf_cleanup = iss_video_buf_cleanup,
+};
+
+/*
+ * omap4iss_video_buffer_next - Complete the current buffer and return the next
+ * @video: ISS video object
+ *
+ * Remove the current video buffer from the DMA queue and fill its timestamp,
+ * field count and state fields before waking up its completion handler.
+ *
+ * For capture video nodes, the buffer state is set to VB2_BUF_STATE_DONE if no
+ * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
+ *
+ * The DMA queue is expected to contain at least one buffer.
+ *
+ * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
+ * empty.
+ */
+struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
+{
+ struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
+ enum iss_pipeline_state state;
+ struct iss_buffer *buf;
+ unsigned long flags;
+ struct timespec ts;
+
+ spin_lock_irqsave(&video->qlock, flags);
+ if (WARN_ON(list_empty(&video->dmaqueue))) {
+ spin_unlock_irqrestore(&video->qlock, flags);
+ return NULL;
+ }
+
+ buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
+ list);
+ list_del(&buf->list);
+ spin_unlock_irqrestore(&video->qlock, flags);
+
+ ktime_get_ts(&ts);
+ buf->vb.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
+ buf->vb.v4l2_buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+
+ /* Do frame number propagation only if this is the output video node.
+ * Frame number either comes from the CSI receivers or it gets
+ * incremented here if H3A is not active.
+ * Note: There is no guarantee that the output buffer will finish
+ * first, so the input number might lag behind by 1 in some cases.
+ */
+ if (video == pipe->output && !pipe->do_propagation)
+ buf->vb.v4l2_buf.sequence =
+ atomic_inc_return(&pipe->frame_number);
+ else
+ buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
+
+ vb2_buffer_done(&buf->vb, pipe->error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ pipe->error = false;
+
+ spin_lock_irqsave(&video->qlock, flags);
+ if (list_empty(&video->dmaqueue)) {
+ spin_unlock_irqrestore(&video->qlock, flags);
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ state = ISS_PIPELINE_QUEUE_OUTPUT
+ | ISS_PIPELINE_STREAM;
+ else
+ state = ISS_PIPELINE_QUEUE_INPUT
+ | ISS_PIPELINE_STREAM;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~state;
+ if (video->pipe.stream_state == ISS_PIPELINE_STREAM_CONTINUOUS)
+ video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+ return NULL;
+ }
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
+ spin_lock(&pipe->lock);
+ pipe->state &= ~ISS_PIPELINE_STREAM;
+ spin_unlock(&pipe->lock);
+ }
+
+ buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
+ list);
+ spin_unlock_irqrestore(&video->qlock, flags);
+ buf->vb.state = VB2_BUF_STATE_ACTIVE;
+ return buf;
+}
+
+/*
+ * omap4iss_video_cancel_stream - Cancel stream on a video node
+ * @video: ISS video object
+ *
+ * Cancelling a stream mark all buffers on the video node as erroneous and makes
+ * sure no new buffer can be queued.
+ */
+void omap4iss_video_cancel_stream(struct iss_video *video)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&video->qlock, flags);
+
+ while (!list_empty(&video->dmaqueue)) {
+ struct iss_buffer *buf;
+
+ buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
+ list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ video->error = true;
+
+ spin_unlock_irqrestore(&video->qlock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+iss_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ struct iss_video *video = video_drvdata(file);
+
+ strlcpy(cap->driver, ISS_VIDEO_DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, video->video.name, sizeof(cap->card));
+ strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ else
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
+
+ return 0;
+}
+
+static int
+iss_video_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_mbus_framefmt format;
+ unsigned int index = f->index;
+ unsigned int i;
+ int ret;
+
+ if (f->type != video->type)
+ return -EINVAL;
+
+ ret = __iss_video_get_format(video, &format);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ const struct iss_format_info *info = &formats[i];
+
+ if (format.code != info->code)
+ continue;
+
+ if (index == 0) {
+ f->pixelformat = info->pixelformat;
+ strlcpy(f->description, info->description,
+ sizeof(f->description));
+ return 0;
+ }
+
+ index--;
+ }
+
+ return -EINVAL;
+}
+
+static int
+iss_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+
+ if (format->type != video->type)
+ return -EINVAL;
+
+ mutex_lock(&video->mutex);
+ *format = vfh->format;
+ mutex_unlock(&video->mutex);
+
+ return 0;
+}
+
+static int
+iss_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_mbus_framefmt fmt;
+
+ if (format->type != video->type)
+ return -EINVAL;
+
+ mutex_lock(&video->mutex);
+
+ /* Fill the bytesperline and sizeimage fields by converting to media bus
+ * format and back to pixel format.
+ */
+ iss_video_pix_to_mbus(&format->fmt.pix, &fmt);
+ iss_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
+
+ vfh->format = *format;
+
+ mutex_unlock(&video->mutex);
+ return 0;
+}
+
+static int
+iss_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ u32 pad;
+ int ret;
+
+ if (format->type != video->type)
+ return -EINVAL;
+
+ subdev = iss_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ iss_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
+
+ fmt.pad = pad;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ iss_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
+ return 0;
+}
+
+static int
+iss_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = iss_video_remote_subdev(video, NULL);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&video->mutex);
+ ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
+ mutex_unlock(&video->mutex);
+
+ return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+}
+
+static int
+iss_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_subdev_format format;
+ struct v4l2_subdev *subdev;
+ u32 pad;
+ int ret;
+
+ subdev = iss_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ /* Try the get crop operation first and fallback to get format if not
+ * implemented.
+ */
+ ret = v4l2_subdev_call(subdev, video, g_crop, crop);
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ format.pad = pad;
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+
+ crop->c.left = 0;
+ crop->c.top = 0;
+ crop->c.width = format.format.width;
+ crop->c.height = format.format.height;
+
+ return 0;
+}
+
+static int
+iss_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = iss_video_remote_subdev(video, NULL);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&video->mutex);
+ ret = v4l2_subdev_call(subdev, video, s_crop, crop);
+ mutex_unlock(&video->mutex);
+
+ return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+}
+
+static int
+iss_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+
+ if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ video->type != a->type)
+ return -EINVAL;
+
+ memset(a, 0, sizeof(*a));
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.output.timeperframe = vfh->timeperframe;
+
+ return 0;
+}
+
+static int
+iss_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+
+ if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ video->type != a->type)
+ return -EINVAL;
+
+ if (a->parm.output.timeperframe.denominator == 0)
+ a->parm.output.timeperframe.denominator = 1;
+
+ vfh->timeperframe = a->parm.output.timeperframe;
+
+ return 0;
+}
+
+static int
+iss_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+ return vb2_reqbufs(&vfh->queue, rb);
+}
+
+static int
+iss_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+ return vb2_querybuf(&vfh->queue, b);
+}
+
+static int
+iss_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+ return vb2_qbuf(&vfh->queue, b);
+}
+
+static int
+iss_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+
+ return vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
+}
+
+/*
+ * Stream management
+ *
+ * Every ISS pipeline has a single input and a single output. The input can be
+ * either a sensor or a video node. The output is always a video node.
+ *
+ * As every pipeline has an output video node, the ISS video objects at the
+ * pipeline output stores the pipeline state. It tracks the streaming state of
+ * both the input and output, as well as the availability of buffers.
+ *
+ * In sensor-to-memory mode, frames are always available at the pipeline input.
+ * Starting the sensor usually requires I2C transfers and must be done in
+ * interruptible context. The pipeline is started and stopped synchronously
+ * to the stream on/off commands. All modules in the pipeline will get their
+ * subdev set stream handler called. The module at the end of the pipeline must
+ * delay starting the hardware until buffers are available at its output.
+ *
+ * In memory-to-memory mode, starting/stopping the stream requires
+ * synchronization between the input and output. ISS modules can't be stopped
+ * in the middle of a frame, and at least some of the modules seem to become
+ * busy as soon as they're started, even if they don't receive a frame start
+ * event. For that reason frames need to be processed in single-shot mode. The
+ * driver needs to wait until a frame is completely processed and written to
+ * memory before restarting the pipeline for the next frame. Pipelined
+ * processing might be possible but requires more testing.
+ *
+ * Stream start must be delayed until buffers are available at both the input
+ * and output. The pipeline must be started in the videobuf queue callback with
+ * the buffers queue spinlock held. The modules subdev set stream operation must
+ * not sleep.
+ */
+static int
+iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+ struct media_entity_graph graph;
+ struct media_entity *entity;
+ enum iss_pipeline_state state;
+ struct iss_pipeline *pipe;
+ struct iss_video *far_end;
+ unsigned long flags;
+ int ret;
+
+ if (type != video->type)
+ return -EINVAL;
+
+ mutex_lock(&video->stream_lock);
+
+ /* Start streaming on the pipeline. No link touching an entity in the
+ * pipeline can be activated or deactivated once streaming is started.
+ */
+ pipe = video->video.entity.pipe
+ ? to_iss_pipeline(&video->video.entity) : &video->pipe;
+ pipe->external = NULL;
+ pipe->external_rate = 0;
+ pipe->external_bpp = 0;
+ pipe->entities = 0;
+
+ if (video->iss->pdata->set_constraints)
+ video->iss->pdata->set_constraints(video->iss, true);
+
+ ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+ if (ret < 0)
+ goto err_media_entity_pipeline_start;
+
+ entity = &video->video.entity;
+ media_entity_graph_walk_start(&graph, entity);
+ while ((entity = media_entity_graph_walk_next(&graph)))
+ pipe->entities |= 1 << entity->id;
+
+ /* Verify that the currently configured format matches the output of
+ * the connected subdev.
+ */
+ ret = iss_video_check_format(video, vfh);
+ if (ret < 0)
+ goto err_iss_video_check_format;
+
+ video->bpl_padding = ret;
+ video->bpl_value = vfh->format.fmt.pix.bytesperline;
+
+ /* Find the ISS video node connected at the far end of the pipeline and
+ * update the pipeline.
+ */
+ far_end = iss_video_far_end(video);
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ state = ISS_PIPELINE_STREAM_OUTPUT | ISS_PIPELINE_IDLE_OUTPUT;
+ pipe->input = far_end;
+ pipe->output = video;
+ } else {
+ if (far_end == NULL) {
+ ret = -EPIPE;
+ goto err_iss_video_check_format;
+ }
+
+ state = ISS_PIPELINE_STREAM_INPUT | ISS_PIPELINE_IDLE_INPUT;
+ pipe->input = video;
+ pipe->output = far_end;
+ }
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~ISS_PIPELINE_STREAM;
+ pipe->state |= state;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ /* Set the maximum time per frame as the value requested by userspace.
+ * This is a soft limit that can be overridden if the hardware doesn't
+ * support the request limit.
+ */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ pipe->max_timeperframe = vfh->timeperframe;
+
+ video->queue = &vfh->queue;
+ INIT_LIST_HEAD(&video->dmaqueue);
+ spin_lock_init(&video->qlock);
+ video->error = false;
+ atomic_set(&pipe->frame_number, -1);
+
+ ret = vb2_streamon(&vfh->queue, type);
+ if (ret < 0)
+ goto err_iss_video_check_format;
+
+ /* In sensor-to-memory mode, the stream can be started synchronously
+ * to the stream on command. In memory-to-memory mode, it will be
+ * started when buffers are queued on both the input and output.
+ */
+ if (pipe->input == NULL) {
+ unsigned long flags;
+ ret = omap4iss_pipeline_set_stream(pipe,
+ ISS_PIPELINE_STREAM_CONTINUOUS);
+ if (ret < 0)
+ goto err_omap4iss_set_stream;
+ spin_lock_irqsave(&video->qlock, flags);
+ if (list_empty(&video->dmaqueue))
+ video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN;
+ spin_unlock_irqrestore(&video->qlock, flags);
+ }
+
+ mutex_unlock(&video->stream_lock);
+ return 0;
+
+err_omap4iss_set_stream:
+ vb2_streamoff(&vfh->queue, type);
+err_iss_video_check_format:
+ media_entity_pipeline_stop(&video->video.entity);
+err_media_entity_pipeline_start:
+ if (video->iss->pdata->set_constraints)
+ video->iss->pdata->set_constraints(video->iss, false);
+ video->queue = NULL;
+
+ mutex_unlock(&video->stream_lock);
+ return ret;
+}
+
+static int
+iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(fh);
+ struct iss_video *video = video_drvdata(file);
+ struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
+ enum iss_pipeline_state state;
+ unsigned long flags;
+
+ if (type != video->type)
+ return -EINVAL;
+
+ mutex_lock(&video->stream_lock);
+
+ if (!vb2_is_streaming(&vfh->queue))
+ goto done;
+
+ /* Update the pipeline state. */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ state = ISS_PIPELINE_STREAM_OUTPUT
+ | ISS_PIPELINE_QUEUE_OUTPUT;
+ else
+ state = ISS_PIPELINE_STREAM_INPUT
+ | ISS_PIPELINE_QUEUE_INPUT;
+
+ spin_lock_irqsave(&pipe->lock, flags);
+ pipe->state &= ~state;
+ spin_unlock_irqrestore(&pipe->lock, flags);
+
+ /* Stop the stream. */
+ omap4iss_pipeline_set_stream(pipe, ISS_PIPELINE_STREAM_STOPPED);
+ vb2_streamoff(&vfh->queue, type);
+ video->queue = NULL;
+
+ if (video->iss->pdata->set_constraints)
+ video->iss->pdata->set_constraints(video->iss, false);
+ media_entity_pipeline_stop(&video->video.entity);
+
+done:
+ mutex_unlock(&video->stream_lock);
+ return 0;
+}
+
+static int
+iss_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
+{
+ if (input->index > 0)
+ return -EINVAL;
+
+ strlcpy(input->name, "camera", sizeof(input->name));
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int
+iss_video_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ *input = 0;
+
+ return 0;
+}
+
+static int
+iss_video_s_input(struct file *file, void *fh, unsigned int input)
+{
+ return input == 0 ? 0 : -EINVAL;
+}
+
+static const struct v4l2_ioctl_ops iss_video_ioctl_ops = {
+ .vidioc_querycap = iss_video_querycap,
+ .vidioc_enum_fmt_vid_cap = iss_video_enum_format,
+ .vidioc_g_fmt_vid_cap = iss_video_get_format,
+ .vidioc_s_fmt_vid_cap = iss_video_set_format,
+ .vidioc_try_fmt_vid_cap = iss_video_try_format,
+ .vidioc_g_fmt_vid_out = iss_video_get_format,
+ .vidioc_s_fmt_vid_out = iss_video_set_format,
+ .vidioc_try_fmt_vid_out = iss_video_try_format,
+ .vidioc_cropcap = iss_video_cropcap,
+ .vidioc_g_crop = iss_video_get_crop,
+ .vidioc_s_crop = iss_video_set_crop,
+ .vidioc_g_parm = iss_video_get_param,
+ .vidioc_s_parm = iss_video_set_param,
+ .vidioc_reqbufs = iss_video_reqbufs,
+ .vidioc_querybuf = iss_video_querybuf,
+ .vidioc_qbuf = iss_video_qbuf,
+ .vidioc_dqbuf = iss_video_dqbuf,
+ .vidioc_streamon = iss_video_streamon,
+ .vidioc_streamoff = iss_video_streamoff,
+ .vidioc_enum_input = iss_video_enum_input,
+ .vidioc_g_input = iss_video_g_input,
+ .vidioc_s_input = iss_video_s_input,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 file operations
+ */
+
+static int iss_video_open(struct file *file)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct iss_video_fh *handle;
+ struct vb2_queue *q;
+ int ret = 0;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (handle == NULL)
+ return -ENOMEM;
+
+ v4l2_fh_init(&handle->vfh, &video->video);
+ v4l2_fh_add(&handle->vfh);
+
+ /* If this is the first user, initialise the pipeline. */
+ if (omap4iss_get(video->iss) == NULL) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ ret = omap4iss_pipeline_pm_use(&video->video.entity, 1);
+ if (ret < 0) {
+ omap4iss_put(video->iss);
+ goto done;
+ }
+
+ video->alloc_ctx = vb2_dma_contig_init_ctx(video->iss->dev);
+ if (IS_ERR(video->alloc_ctx)) {
+ ret = PTR_ERR(video->alloc_ctx);
+ omap4iss_put(video->iss);
+ goto done;
+ }
+
+ q = &handle->queue;
+
+ q->type = video->type;
+ q->io_modes = VB2_MMAP;
+ q->drv_priv = handle;
+ q->ops = &iss_video_vb2ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct iss_buffer);
+ q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ ret = vb2_queue_init(q);
+ if (ret) {
+ omap4iss_put(video->iss);
+ goto done;
+ }
+
+ memset(&handle->format, 0, sizeof(handle->format));
+ handle->format.type = video->type;
+ handle->timeperframe.denominator = 1;
+
+ handle->video = video;
+ file->private_data = &handle->vfh;
+
+done:
+ if (ret < 0) {
+ v4l2_fh_del(&handle->vfh);
+ kfree(handle);
+ }
+
+ return ret;
+}
+
+static int iss_video_release(struct file *file)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_fh *vfh = file->private_data;
+ struct iss_video_fh *handle = to_iss_video_fh(vfh);
+
+ /* Disable streaming and free the buffers queue resources. */
+ iss_video_streamoff(file, vfh, video->type);
+
+ omap4iss_pipeline_pm_use(&video->video.entity, 0);
+
+ /* Release the videobuf2 queue */
+ vb2_queue_release(&handle->queue);
+
+ /* Release the file handle. */
+ v4l2_fh_del(vfh);
+ kfree(handle);
+ file->private_data = NULL;
+
+ omap4iss_put(video->iss);
+
+ return 0;
+}
+
+static unsigned int iss_video_poll(struct file *file, poll_table *wait)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
+
+ return vb2_poll(&vfh->queue, file, wait);
+}
+
+static int iss_video_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
+
+ return vb2_mmap(&vfh->queue, vma);
+}
+
+static struct v4l2_file_operations iss_video_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = iss_video_open,
+ .release = iss_video_release,
+ .poll = iss_video_poll,
+ .mmap = iss_video_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * ISS video core
+ */
+
+static const struct iss_video_operations iss_video_dummy_ops = {
+};
+
+int omap4iss_video_init(struct iss_video *video, const char *name)
+{
+ const char *direction;
+ int ret;
+
+ switch (video->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ direction = "output";
+ video->pad.flags = MEDIA_PAD_FL_SINK;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ direction = "input";
+ video->pad.flags = MEDIA_PAD_FL_SOURCE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&video->mutex);
+ atomic_set(&video->active, 0);
+
+ spin_lock_init(&video->pipe.lock);
+ mutex_init(&video->stream_lock);
+
+ /* Initialize the video device. */
+ if (video->ops == NULL)
+ video->ops = &iss_video_dummy_ops;
+
+ video->video.fops = &iss_video_fops;
+ snprintf(video->video.name, sizeof(video->video.name),
+ "OMAP4 ISS %s %s", name, direction);
+ video->video.vfl_type = VFL_TYPE_GRABBER;
+ video->video.release = video_device_release_empty;
+ video->video.ioctl_ops = &iss_video_ioctl_ops;
+ video->pipe.stream_state = ISS_PIPELINE_STREAM_STOPPED;
+
+ video_set_drvdata(&video->video, video);
+
+ return 0;
+}
+
+void omap4iss_video_cleanup(struct iss_video *video)
+{
+ media_entity_cleanup(&video->video.entity);
+ mutex_destroy(&video->stream_lock);
+ mutex_destroy(&video->mutex);
+}
+
+int omap4iss_video_register(struct iss_video *video, struct v4l2_device *vdev)
+{
+ int ret;
+
+ video->video.v4l2_dev = vdev;
+
+ ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ dev_err(video->iss->dev,
+ "%s: could not register video device (%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+void omap4iss_video_unregister(struct iss_video *video)
+{
+ video_unregister_device(&video->video);
+}
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
new file mode 100644
index 000000000000..878e4a3082e7
--- /dev/null
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -0,0 +1,204 @@
+/*
+ * TI OMAP4 ISS V4L2 Driver - Generic video node
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Author: Sergio Aguirre <sergio.a.aguirre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef OMAP4_ISS_VIDEO_H
+#define OMAP4_ISS_VIDEO_H
+
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define ISS_VIDEO_DRIVER_NAME "issvideo"
+#define ISS_VIDEO_DRIVER_VERSION "0.0.2"
+
+struct iss_device;
+struct iss_video;
+struct v4l2_mbus_framefmt;
+struct v4l2_pix_format;
+
+/*
+ * struct iss_format_info - ISS media bus format information
+ * @code: V4L2 media bus format code
+ * @truncated: V4L2 media bus format code for the same format truncated to 10
+ * bits. Identical to @code if the format is 10 bits wide or less.
+ * @uncompressed: V4L2 media bus format code for the corresponding uncompressed
+ * format. Identical to @code if the format is not DPCM compressed.
+ * @flavor: V4L2 media bus format code for the same pixel layout but
+ * shifted to be 8 bits per pixel. =0 if format is not shiftable.
+ * @pixelformat: V4L2 pixel format FCC identifier
+ * @bpp: Bits per pixel
+ * @description: Human-readable format description
+ */
+struct iss_format_info {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_mbus_pixelcode truncated;
+ enum v4l2_mbus_pixelcode uncompressed;
+ enum v4l2_mbus_pixelcode flavor;
+ u32 pixelformat;
+ unsigned int bpp;
+ const char *description;
+};
+
+enum iss_pipeline_stream_state {
+ ISS_PIPELINE_STREAM_STOPPED = 0,
+ ISS_PIPELINE_STREAM_CONTINUOUS = 1,
+ ISS_PIPELINE_STREAM_SINGLESHOT = 2,
+};
+
+enum iss_pipeline_state {
+ /* The stream has been started on the input video node. */
+ ISS_PIPELINE_STREAM_INPUT = 1,
+ /* The stream has been started on the output video node. */
+ ISS_PIPELINE_STREAM_OUTPUT = (1 << 1),
+ /* At least one buffer is queued on the input video node. */
+ ISS_PIPELINE_QUEUE_INPUT = (1 << 2),
+ /* At least one buffer is queued on the output video node. */
+ ISS_PIPELINE_QUEUE_OUTPUT = (1 << 3),
+ /* The input entity is idle, ready to be started. */
+ ISS_PIPELINE_IDLE_INPUT = (1 << 4),
+ /* The output entity is idle, ready to be started. */
+ ISS_PIPELINE_IDLE_OUTPUT = (1 << 5),
+ /* The pipeline is currently streaming. */
+ ISS_PIPELINE_STREAM = (1 << 6),
+};
+
+/*
+ * struct iss_pipeline - An OMAP4 ISS hardware pipeline
+ * @entities: Bitmask of entities in the pipeline (indexed by entity ID)
+ * @error: A hardware error occurred during capture
+ */
+struct iss_pipeline {
+ struct media_pipeline pipe;
+ spinlock_t lock; /* Pipeline state and queue flags */
+ unsigned int state;
+ enum iss_pipeline_stream_state stream_state;
+ struct iss_video *input;
+ struct iss_video *output;
+ unsigned int entities;
+ atomic_t frame_number;
+ bool do_propagation; /* of frame number */
+ bool error;
+ struct v4l2_fract max_timeperframe;
+ struct v4l2_subdev *external;
+ unsigned int external_rate;
+ int external_bpp;
+};
+
+#define to_iss_pipeline(__e) \
+ container_of((__e)->pipe, struct iss_pipeline, pipe)
+
+static inline int iss_pipeline_ready(struct iss_pipeline *pipe)
+{
+ return pipe->state == (ISS_PIPELINE_STREAM_INPUT |
+ ISS_PIPELINE_STREAM_OUTPUT |
+ ISS_PIPELINE_QUEUE_INPUT |
+ ISS_PIPELINE_QUEUE_OUTPUT |
+ ISS_PIPELINE_IDLE_INPUT |
+ ISS_PIPELINE_IDLE_OUTPUT);
+}
+
+/*
+ * struct iss_buffer - ISS buffer
+ * @buffer: ISS video buffer
+ * @iss_addr: Physical address of the buffer.
+ */
+struct iss_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_buffer vb;
+ struct list_head list;
+ dma_addr_t iss_addr;
+};
+
+#define to_iss_buffer(buf) container_of(buf, struct iss_buffer, buffer)
+
+enum iss_video_dmaqueue_flags {
+ /* Set if DMA queue becomes empty when ISS_PIPELINE_STREAM_CONTINUOUS */
+ ISS_VIDEO_DMAQUEUE_UNDERRUN = (1 << 0),
+ /* Set when queuing buffer to an empty DMA queue */
+ ISS_VIDEO_DMAQUEUE_QUEUED = (1 << 1),
+};
+
+#define iss_video_dmaqueue_flags_clr(video) \
+ ({ (video)->dmaqueue_flags = 0; })
+
+/*
+ * struct iss_video_operations - ISS video operations
+ * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
+ * if there was no buffer previously queued.
+ */
+struct iss_video_operations {
+ int(*queue)(struct iss_video *video, struct iss_buffer *buffer);
+};
+
+struct iss_video {
+ struct video_device video;
+ enum v4l2_buf_type type;
+ struct media_pad pad;
+
+ struct mutex mutex; /* format and crop settings */
+ atomic_t active;
+
+ struct iss_device *iss;
+
+ unsigned int capture_mem;
+ unsigned int bpl_alignment; /* alignment value */
+ unsigned int bpl_zero_padding; /* whether the alignment is optional */
+ unsigned int bpl_max; /* maximum bytes per line value */
+ unsigned int bpl_value; /* bytes per line value */
+ unsigned int bpl_padding; /* padding at end of line */
+
+ /* Pipeline state */
+ struct iss_pipeline pipe;
+ struct mutex stream_lock; /* pipeline and stream states */
+ bool error;
+
+ /* Video buffers queue */
+ struct vb2_queue *queue;
+ spinlock_t qlock; /* protects dmaqueue and error */
+ struct list_head dmaqueue;
+ enum iss_video_dmaqueue_flags dmaqueue_flags;
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ const struct iss_video_operations *ops;
+};
+
+#define to_iss_video(vdev) container_of(vdev, struct iss_video, video)
+
+struct iss_video_fh {
+ struct v4l2_fh vfh;
+ struct iss_video *video;
+ struct vb2_queue queue;
+ struct v4l2_format format;
+ struct v4l2_fract timeperframe;
+};
+
+#define to_iss_video_fh(fh) container_of(fh, struct iss_video_fh, vfh)
+#define iss_video_queue_to_iss_video_fh(q) \
+ container_of(q, struct iss_video_fh, queue)
+
+int omap4iss_video_init(struct iss_video *video, const char *name);
+void omap4iss_video_cleanup(struct iss_video *video);
+int omap4iss_video_register(struct iss_video *video,
+ struct v4l2_device *vdev);
+void omap4iss_video_unregister(struct iss_video *video);
+struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video);
+void omap4iss_video_cancel_stream(struct iss_video *video);
+struct media_pad *omap4iss_video_remote_pad(struct iss_video *video);
+
+const struct iss_format_info *
+omap4iss_video_format_info(enum v4l2_mbus_pixelcode code);
+
+#endif /* OMAP4_ISS_VIDEO_H */
diff --git a/drivers/media/usb/sn9c102/Kconfig b/drivers/staging/media/sn9c102/Kconfig
index 6ebaf2940d06..c9aba59258d9 100644
--- a/drivers/media/usb/sn9c102/Kconfig
+++ b/drivers/staging/media/sn9c102/Kconfig
@@ -1,14 +1,17 @@
config USB_SN9C102
tristate "USB SN9C1xx PC Camera Controller support (DEPRECATED)"
- depends on VIDEO_V4L2
+ depends on VIDEO_V4L2 && MEDIA_USB_SUPPORT
---help---
- This driver is DEPRECATED please use the gspca sonixb and
+ This driver is DEPRECATED, please use the gspca sonixb and
sonixj modules instead.
Say Y here if you want support for cameras based on SONiX SN9C101,
SN9C102, SN9C103, SN9C105 and SN9C120 PC Camera Controllers.
- See <file:Documentation/video4linux/sn9c102.txt> for more info.
+ See <file:drivers/staging/media/sn9c102/sn9c102.txt> for more info.
+
+ If you have webcams that are only supported by this driver and not by
+ the gspca driver, then contact the linux-media mailinglist.
To compile this driver as a module, choose M here: the
module will be called sn9c102.
diff --git a/drivers/media/usb/sn9c102/Makefile b/drivers/staging/media/sn9c102/Makefile
index 7ecd5a90c7c9..7ecd5a90c7c9 100644
--- a/drivers/media/usb/sn9c102/Makefile
+++ b/drivers/staging/media/sn9c102/Makefile
diff --git a/drivers/media/usb/sn9c102/sn9c102.h b/drivers/staging/media/sn9c102/sn9c102.h
index 8a917f060503..8a917f060503 100644
--- a/drivers/media/usb/sn9c102/sn9c102.h
+++ b/drivers/staging/media/sn9c102/sn9c102.h
diff --git a/drivers/staging/media/sn9c102/sn9c102.txt b/drivers/staging/media/sn9c102/sn9c102.txt
new file mode 100644
index 000000000000..b4f67040403a
--- /dev/null
+++ b/drivers/staging/media/sn9c102/sn9c102.txt
@@ -0,0 +1,592 @@
+
+ SN9C1xx PC Camera Controllers
+ Driver for Linux
+ =============================
+
+ - Documentation -
+
+
+Index
+=====
+1. Copyright
+2. Disclaimer
+3. License
+4. Overview and features
+5. Module dependencies
+6. Module loading
+7. Module parameters
+8. Optional device control through "sysfs"
+9. Supported devices
+10. Notes for V4L2 application developers
+11. Video frame formats
+12. Contact information
+13. Credits
+
+
+1. Copyright
+============
+Copyright (C) 2004-2007 by Luca Risolia <luca.risolia@studio.unibo.it>
+
+
+2. Disclaimer
+=============
+SONiX is a trademark of SONiX Technology Company Limited, inc.
+This software is not sponsored or developed by SONiX.
+
+
+3. License
+==========
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+4. Overview and features
+========================
+This driver attempts to support the video interface of the devices assembling
+the SONiX SN9C101, SN9C102, SN9C103, SN9C105 and SN9C120 PC Camera Controllers
+("SN9C1xx" from now on).
+
+The driver relies on the Video4Linux2 and USB core modules. It has been
+designed to run properly on SMP systems as well.
+
+The latest version of the SN9C1xx driver can be found at the following URL:
+http://www.linux-projects.org/
+
+Some of the features of the driver are:
+
+- full compliance with the Video4Linux2 API (see also "Notes for V4L2
+ application developers" paragraph);
+- available mmap or read/poll methods for video streaming through isochronous
+ data transfers;
+- automatic detection of image sensor;
+- support for built-in microphone interface;
+- support for any window resolutions and optional panning within the maximum
+ pixel area of image sensor;
+- image downscaling with arbitrary scaling factors from 1, 2 and 4 in both
+ directions (see "Notes for V4L2 application developers" paragraph);
+- two different video formats for uncompressed or compressed data in low or
+ high compression quality (see also "Notes for V4L2 application developers"
+ and "Video frame formats" paragraphs);
+- full support for the capabilities of many of the possible image sensors that
+ can be connected to the SN9C1xx bridges, including, for instance, red, green,
+ blue and global gain adjustments and exposure (see "Supported devices"
+ paragraph for details);
+- use of default color settings for sunlight conditions;
+- dynamic I/O interface for both SN9C1xx and image sensor control and
+ monitoring (see "Optional device control through 'sysfs'" paragraph);
+- dynamic driver control thanks to various module parameters (see "Module
+ parameters" paragraph);
+- up to 64 cameras can be handled at the same time; they can be connected and
+ disconnected from the host many times without turning off the computer, if
+ the system supports hotplugging;
+- no known bugs.
+
+
+5. Module dependencies
+======================
+For it to work properly, the driver needs kernel support for Video4Linux and
+USB.
+
+The following options of the kernel configuration file must be enabled and
+corresponding modules must be compiled:
+
+ # Multimedia devices
+ #
+ CONFIG_VIDEO_DEV=m
+
+To enable advanced debugging functionality on the device through /sysfs:
+
+ # Multimedia devices
+ #
+ CONFIG_VIDEO_ADV_DEBUG=y
+
+ # USB support
+ #
+ CONFIG_USB=m
+
+In addition, depending on the hardware being used, the modules below are
+necessary:
+
+ # USB Host Controller Drivers
+ #
+ CONFIG_USB_EHCI_HCD=m
+ CONFIG_USB_UHCI_HCD=m
+ CONFIG_USB_OHCI_HCD=m
+
+The SN9C103, SN9c105 and SN9C120 controllers also provide a built-in microphone
+interface. It is supported by the USB Audio driver thanks to the ALSA API:
+
+ # Sound
+ #
+ CONFIG_SOUND=y
+
+ # Advanced Linux Sound Architecture
+ #
+ CONFIG_SND=m
+
+ # USB devices
+ #
+ CONFIG_SND_USB_AUDIO=m
+
+And finally:
+
+ # USB Multimedia devices
+ #
+ CONFIG_USB_SN9C102=m
+
+
+6. Module loading
+=================
+To use the driver, it is necessary to load the "sn9c102" module into memory
+after every other module required: "videodev", "v4l2_common", "compat_ioctl32",
+"usbcore" and, depending on the USB host controller you have, "ehci-hcd",
+"uhci-hcd" or "ohci-hcd".
+
+Loading can be done as shown below:
+
+ [root@localhost home]# modprobe sn9c102
+
+Note that the module is called "sn9c102" for historic reasons, although it
+does not just support the SN9C102.
+
+At this point all the devices supported by the driver and connected to the USB
+ports should be recognized. You can invoke "dmesg" to analyze kernel messages
+and verify that the loading process has gone well:
+
+ [user@localhost home]$ dmesg
+
+or, to isolate all the kernel messages generated by the driver:
+
+ [user@localhost home]$ dmesg | grep sn9c102
+
+
+7. Module parameters
+====================
+Module parameters are listed below:
+-------------------------------------------------------------------------------
+Name: video_nr
+Type: short array (min = 0, max = 64)
+Syntax: <-1|n[,...]>
+Description: Specify V4L2 minor mode number:
+ -1 = use next available
+ n = use minor number n
+ You can specify up to 64 cameras this way.
+ For example:
+ video_nr=-1,2,-1 would assign minor number 2 to the second
+ recognized camera and use auto for the first one and for every
+ other camera.
+Default: -1
+-------------------------------------------------------------------------------
+Name: force_munmap
+Type: bool array (min = 0, max = 64)
+Syntax: <0|1[,...]>
+Description: Force the application to unmap previously mapped buffer memory
+ before calling any VIDIOC_S_CROP or VIDIOC_S_FMT ioctl's. Not
+ all the applications support this feature. This parameter is
+ specific for each detected camera.
+ 0 = do not force memory unmapping
+ 1 = force memory unmapping (save memory)
+Default: 0
+-------------------------------------------------------------------------------
+Name: frame_timeout
+Type: uint array (min = 0, max = 64)
+Syntax: <0|n[,...]>
+Description: Timeout for a video frame in seconds before returning an I/O
+ error; 0 for infinity. This parameter is specific for each
+ detected camera and can be changed at runtime thanks to the
+ /sys filesystem interface.
+Default: 2
+-------------------------------------------------------------------------------
+Name: debug
+Type: ushort
+Syntax: <n>
+Description: Debugging information level, from 0 to 3:
+ 0 = none (use carefully)
+ 1 = critical errors
+ 2 = significant information
+ 3 = more verbose messages
+ Level 3 is useful for testing only. It also shows some more
+ information about the hardware being detected.
+ This parameter can be changed at runtime thanks to the /sys
+ filesystem interface.
+Default: 2
+-------------------------------------------------------------------------------
+
+
+8. Optional device control through "sysfs" [1]
+==========================================
+If the kernel has been compiled with the CONFIG_VIDEO_ADV_DEBUG option enabled,
+it is possible to read and write both the SN9C1xx and the image sensor
+registers by using the "sysfs" filesystem interface.
+
+Every time a supported device is recognized, a write-only file named "green" is
+created in the /sys/class/video4linux/videoX directory. You can set the green
+channel's gain by writing the desired value to it. The value may range from 0
+to 15 for the SN9C101 or SN9C102 bridges, from 0 to 127 for the SN9C103,
+SN9C105 and SN9C120 bridges.
+Similarly, only for the SN9C103, SN9C105 and SN9C120 controllers, blue and red
+gain control files are available in the same directory, for which accepted
+values may range from 0 to 127.
+
+There are other four entries in the directory above for each registered camera:
+"reg", "val", "i2c_reg" and "i2c_val". The first two files control the
+SN9C1xx bridge, while the other two control the sensor chip. "reg" and
+"i2c_reg" hold the values of the current register index where the following
+reading/writing operations are addressed at through "val" and "i2c_val". Their
+use is not intended for end-users. Note that "i2c_reg" and "i2c_val" will not
+be created if the sensor does not actually support the standard I2C protocol or
+its registers are not 8-bit long. Also, remember that you must be logged in as
+root before writing to them.
+
+As an example, suppose we were to want to read the value contained in the
+register number 1 of the sensor register table - which is usually the product
+identifier - of the camera registered as "/dev/video0":
+
+ [root@localhost #] cd /sys/class/video4linux/video0
+ [root@localhost #] echo 1 > i2c_reg
+ [root@localhost #] cat i2c_val
+
+Note that "cat" will fail if sensor registers cannot be read.
+
+Now let's set the green gain's register of the SN9C101 or SN9C102 chips to 2:
+
+ [root@localhost #] echo 0x11 > reg
+ [root@localhost #] echo 2 > val
+
+Note that the SN9C1xx always returns 0 when some of its registers are read.
+To avoid race conditions, all the I/O accesses to the above files are
+serialized.
+The sysfs interface also provides the "frame_header" entry, which exports the
+frame header of the most recent requested and captured video frame. The header
+is always 18-bytes long and is appended to every video frame by the SN9C1xx
+controllers. As an example, this additional information can be used by the user
+application for implementing auto-exposure features via software.
+
+The following table describes the frame header exported by the SN9C101 and
+SN9C102:
+
+Byte # Value or bits Description
+------ ------------- -----------
+0x00 0xFF Frame synchronisation pattern
+0x01 0xFF Frame synchronisation pattern
+0x02 0x00 Frame synchronisation pattern
+0x03 0xC4 Frame synchronisation pattern
+0x04 0xC4 Frame synchronisation pattern
+0x05 0x96 Frame synchronisation pattern
+0x06 [3:0] Read channel gain control = (1+R_GAIN/8)
+ [7:4] Blue channel gain control = (1+B_GAIN/8)
+0x07 [ 0 ] Compression mode. 0=No compression, 1=Compression enabled
+ [2:1] Maximum scale factor for compression
+ [ 3 ] 1 = USB fifo(2K bytes) is full
+ [ 4 ] 1 = Digital gain is finish
+ [ 5 ] 1 = Exposure is finish
+ [7:6] Frame index
+0x08 [7:0] Y sum inside Auto-Exposure area (low-byte)
+0x09 [7:0] Y sum inside Auto-Exposure area (high-byte)
+ where Y sum = (R/4 + 5G/16 + B/8) / 32
+0x0A [7:0] Y sum outside Auto-Exposure area (low-byte)
+0x0B [7:0] Y sum outside Auto-Exposure area (high-byte)
+ where Y sum = (R/4 + 5G/16 + B/8) / 128
+0x0C 0xXX Not used
+0x0D 0xXX Not used
+0x0E 0xXX Not used
+0x0F 0xXX Not used
+0x10 0xXX Not used
+0x11 0xXX Not used
+
+The following table describes the frame header exported by the SN9C103:
+
+Byte # Value or bits Description
+------ ------------- -----------
+0x00 0xFF Frame synchronisation pattern
+0x01 0xFF Frame synchronisation pattern
+0x02 0x00 Frame synchronisation pattern
+0x03 0xC4 Frame synchronisation pattern
+0x04 0xC4 Frame synchronisation pattern
+0x05 0x96 Frame synchronisation pattern
+0x06 [6:0] Read channel gain control = (1/2+R_GAIN/64)
+0x07 [6:0] Blue channel gain control = (1/2+B_GAIN/64)
+ [7:4]
+0x08 [ 0 ] Compression mode. 0=No compression, 1=Compression enabled
+ [2:1] Maximum scale factor for compression
+ [ 3 ] 1 = USB fifo(2K bytes) is full
+ [ 4 ] 1 = Digital gain is finish
+ [ 5 ] 1 = Exposure is finish
+ [7:6] Frame index
+0x09 [7:0] Y sum inside Auto-Exposure area (low-byte)
+0x0A [7:0] Y sum inside Auto-Exposure area (high-byte)
+ where Y sum = (R/4 + 5G/16 + B/8) / 32
+0x0B [7:0] Y sum outside Auto-Exposure area (low-byte)
+0x0C [7:0] Y sum outside Auto-Exposure area (high-byte)
+ where Y sum = (R/4 + 5G/16 + B/8) / 128
+0x0D [1:0] Audio frame number
+ [ 2 ] 1 = Audio is recording
+0x0E [7:0] Audio summation (low-byte)
+0x0F [7:0] Audio summation (high-byte)
+0x10 [7:0] Audio sample count
+0x11 [7:0] Audio peak data in audio frame
+
+The AE area (sx, sy, ex, ey) in the active window can be set by programming the
+registers 0x1c, 0x1d, 0x1e and 0x1f of the SN9C1xx controllers, where one unit
+corresponds to 32 pixels.
+
+[1] The frame headers exported by the SN9C105 and SN9C120 are not described.
+
+
+9. Supported devices
+====================
+None of the names of the companies as well as their products will be mentioned
+here. They have never collaborated with the author, so no advertising.
+
+From the point of view of a driver, what unambiguously identify a device are
+its vendor and product USB identifiers. Below is a list of known identifiers of
+devices assembling the SN9C1xx PC camera controllers:
+
+Vendor ID Product ID
+--------- ----------
+0x0458 0x7025
+0x045e 0x00f5
+0x045e 0x00f7
+0x0471 0x0327
+0x0471 0x0328
+0x0c45 0x6001
+0x0c45 0x6005
+0x0c45 0x6007
+0x0c45 0x6009
+0x0c45 0x600d
+0x0c45 0x6011
+0x0c45 0x6019
+0x0c45 0x6024
+0x0c45 0x6025
+0x0c45 0x6028
+0x0c45 0x6029
+0x0c45 0x602a
+0x0c45 0x602b
+0x0c45 0x602c
+0x0c45 0x602d
+0x0c45 0x602e
+0x0c45 0x6030
+0x0c45 0x603f
+0x0c45 0x6080
+0x0c45 0x6082
+0x0c45 0x6083
+0x0c45 0x6088
+0x0c45 0x608a
+0x0c45 0x608b
+0x0c45 0x608c
+0x0c45 0x608e
+0x0c45 0x608f
+0x0c45 0x60a0
+0x0c45 0x60a2
+0x0c45 0x60a3
+0x0c45 0x60a8
+0x0c45 0x60aa
+0x0c45 0x60ab
+0x0c45 0x60ac
+0x0c45 0x60ae
+0x0c45 0x60af
+0x0c45 0x60b0
+0x0c45 0x60b2
+0x0c45 0x60b3
+0x0c45 0x60b8
+0x0c45 0x60ba
+0x0c45 0x60bb
+0x0c45 0x60bc
+0x0c45 0x60be
+0x0c45 0x60c0
+0x0c45 0x60c2
+0x0c45 0x60c8
+0x0c45 0x60cc
+0x0c45 0x60ea
+0x0c45 0x60ec
+0x0c45 0x60ef
+0x0c45 0x60fa
+0x0c45 0x60fb
+0x0c45 0x60fc
+0x0c45 0x60fe
+0x0c45 0x6102
+0x0c45 0x6108
+0x0c45 0x610f
+0x0c45 0x6130
+0x0c45 0x6138
+0x0c45 0x613a
+0x0c45 0x613b
+0x0c45 0x613c
+0x0c45 0x613e
+
+The list above does not imply that all those devices work with this driver: up
+until now only the ones that assemble the following pairs of SN9C1xx bridges
+and image sensors are supported; kernel messages will always tell you whether
+this is the case (see "Module loading" paragraph):
+
+Image sensor / SN9C1xx bridge | SN9C10[12] SN9C103 SN9C105 SN9C120
+-------------------------------------------------------------------------------
+HV7131D Hynix Semiconductor | Yes No No No
+HV7131R Hynix Semiconductor | No Yes Yes Yes
+MI-0343 Micron Technology | Yes No No No
+MI-0360 Micron Technology | No Yes Yes Yes
+OV7630 OmniVision Technologies | Yes Yes Yes Yes
+OV7660 OmniVision Technologies | No No Yes Yes
+PAS106B PixArt Imaging | Yes No No No
+PAS202B PixArt Imaging | Yes Yes No No
+TAS5110C1B Taiwan Advanced Sensor | Yes No No No
+TAS5110D Taiwan Advanced Sensor | Yes No No No
+TAS5130D1B Taiwan Advanced Sensor | Yes No No No
+
+"Yes" means that the pair is supported by the driver, while "No" means that the
+pair does not exist or is not supported by the driver.
+
+Only some of the available control settings of each image sensor are supported
+through the V4L2 interface.
+
+Donations of new models for further testing and support would be much
+appreciated. Non-available hardware will not be supported by the author of this
+driver.
+
+
+10. Notes for V4L2 application developers
+=========================================
+This driver follows the V4L2 API specifications. In particular, it enforces two
+rules:
+
+- exactly one I/O method, either "mmap" or "read", is associated with each
+file descriptor. Once it is selected, the application must close and reopen the
+device to switch to the other I/O method;
+
+- although it is not mandatory, previously mapped buffer memory should always
+be unmapped before calling any "VIDIOC_S_CROP" or "VIDIOC_S_FMT" ioctl's.
+The same number of buffers as before will be allocated again to match the size
+of the new video frames, so you have to map the buffers again before any I/O
+attempts on them.
+
+Consistently with the hardware limits, this driver also supports image
+downscaling with arbitrary scaling factors from 1, 2 and 4 in both directions.
+However, the V4L2 API specifications don't correctly define how the scaling
+factor can be chosen arbitrarily by the "negotiation" of the "source" and
+"target" rectangles. To work around this flaw, we have added the convention
+that, during the negotiation, whenever the "VIDIOC_S_CROP" ioctl is issued, the
+scaling factor is restored to 1.
+
+This driver supports two different video formats: the first one is the "8-bit
+Sequential Bayer" format and can be used to obtain uncompressed video data
+from the device through the current I/O method, while the second one provides
+either "raw" compressed video data (without frame headers not related to the
+compressed data) or standard JPEG (with frame headers). The compression quality
+may vary from 0 to 1 and can be selected or queried thanks to the
+VIDIOC_S_JPEGCOMP and VIDIOC_G_JPEGCOMP V4L2 ioctl's. For maximum flexibility,
+both the default active video format and the default compression quality
+depend on how the image sensor being used is initialized.
+
+
+11. Video frame formats [1]
+=======================
+The SN9C1xx PC Camera Controllers can send images in two possible video
+formats over the USB: either native "Sequential RGB Bayer" or compressed.
+The compression is used to achieve high frame rates. With regard to the
+SN9C101, SN9C102 and SN9C103, the compression is based on the Huffman encoding
+algorithm described below, while with regard to the SN9C105 and SN9C120 the
+compression is based on the JPEG standard.
+The current video format may be selected or queried from the user application
+by calling the VIDIOC_S_FMT or VIDIOC_G_FMT ioctl's, as described in the V4L2
+API specifications.
+
+The name "Sequential Bayer" indicates the organization of the red, green and
+blue pixels in one video frame. Each pixel is associated with a 8-bit long
+value and is disposed in memory according to the pattern shown below:
+
+B[0] G[1] B[2] G[3] ... B[m-2] G[m-1]
+G[m] R[m+1] G[m+2] R[m+2] ... G[2m-2] R[2m-1]
+...
+... B[(n-1)(m-2)] G[(n-1)(m-1)]
+... G[n(m-2)] R[n(m-1)]
+
+The above matrix also represents the sequential or progressive read-out mode of
+the (n, m) Bayer color filter array used in many CCD or CMOS image sensors.
+
+The Huffman compressed video frame consists of a bitstream that encodes for
+every R, G, or B pixel the difference between the value of the pixel itself and
+some reference pixel value. Pixels are organised in the Bayer pattern and the
+Bayer sub-pixels are tracked individually and alternatingly. For example, in
+the first line values for the B and G1 pixels are alternatingly encoded, while
+in the second line values for the G2 and R pixels are alternatingly encoded.
+
+The pixel reference value is calculated as follows:
+- the 4 top left pixels are encoded in raw uncompressed 8-bit format;
+- the value in the top two rows is the value of the pixel left of the current
+ pixel;
+- the value in the left column is the value of the pixel above the current
+ pixel;
+- for all other pixels, the reference value is the average of the value of the
+ pixel on the left and the value of the pixel above the current pixel;
+- there is one code in the bitstream that specifies the value of a pixel
+ directly (in 4-bit resolution);
+- pixel values need to be clamped inside the range [0..255] for proper
+ decoding.
+
+The algorithm purely describes the conversion from compressed Bayer code used
+in the SN9C101, SN9C102 and SN9C103 chips to uncompressed Bayer. Additional
+steps are required to convert this to a color image (i.e. a color interpolation
+algorithm).
+
+The following Huffman codes have been found:
+0: +0 (relative to reference pixel value)
+100: +4
+101: -4?
+1110xxxx: set absolute value to xxxx.0000
+1101: +11
+1111: -11
+11001: +20
+110000: -20
+110001: ??? - these codes are apparently not used
+
+[1] The Huffman compression algorithm has been reverse-engineered and
+ documented by Bertrik Sikken.
+
+
+12. Contact information
+=======================
+The author may be contacted by e-mail at <luca.risolia@studio.unibo.it>.
+
+GPG/PGP encrypted e-mail's are accepted. The GPG key ID of the author is
+'FCE635A4'; the public 1024-bit key should be available at any keyserver;
+the fingerprint is: '88E8 F32F 7244 68BA 3958 5D40 99DA 5D2A FCE6 35A4'.
+
+
+13. Credits
+===========
+Many thanks to following persons for their contribute (listed in alphabetical
+order):
+
+- David Anderson for the donation of a webcam;
+- Luca Capello for the donation of a webcam;
+- Philippe Coval for having helped testing the PAS202BCA image sensor;
+- Joao Rodrigo Fuzaro, Joao Limirio, Claudio Filho and Caio Begotti for the
+ donation of a webcam;
+- Dennis Heitmann for the donation of a webcam;
+- Jon Hollstrom for the donation of a webcam;
+- Nick McGill for the donation of a webcam;
+- Carlos Eduardo Medaglia Dyonisio, who added the support for the PAS202BCB
+ image sensor;
+- Stefano Mozzi, who donated 45 EU;
+- Andrew Pearce for the donation of a webcam;
+- John Pullan for the donation of a webcam;
+- Bertrik Sikken, who reverse-engineered and documented the Huffman compression
+ algorithm used in the SN9C101, SN9C102 and SN9C103 controllers and
+ implemented the first decoder;
+- Ronny Standke for the donation of a webcam;
+- Mizuno Takafumi for the donation of a webcam;
+- an "anonymous" donator (who didn't want his name to be revealed) for the
+ donation of a webcam.
+- an anonymous donator for the donation of four webcams and two boards with ten
+ image sensors.
diff --git a/drivers/media/usb/sn9c102/sn9c102_config.h b/drivers/staging/media/sn9c102/sn9c102_config.h
index 0f4e0378b071..0f4e0378b071 100644
--- a/drivers/media/usb/sn9c102/sn9c102_config.h
+++ b/drivers/staging/media/sn9c102/sn9c102_config.h
diff --git a/drivers/media/usb/sn9c102/sn9c102_core.c b/drivers/staging/media/sn9c102/sn9c102_core.c
index 2cb44de2b92c..71f594f5aa71 100644
--- a/drivers/media/usb/sn9c102/sn9c102_core.c
+++ b/drivers/staging/media/sn9c102/sn9c102_core.c
@@ -158,8 +158,8 @@ sn9c102_request_buffers(struct sn9c102_device* cam, u32 count,
cam->nbuffers = count;
while (cam->nbuffers > 0) {
- if ((buff = vmalloc_32_user(cam->nbuffers *
- PAGE_ALIGN(imagesize))))
+ buff = vmalloc_32_user(cam->nbuffers * PAGE_ALIGN(imagesize));
+ if (buff)
break;
cam->nbuffers--;
}
@@ -1121,7 +1121,8 @@ static ssize_t sn9c102_show_val(struct device* cd,
return -ENODEV;
}
- if ((val = sn9c102_read_reg(cam, cam->sysfs.reg)) < 0) {
+ val = sn9c102_read_reg(cam, cam->sysfs.reg);
+ if (val < 0) {
mutex_unlock(&sn9c102_sysfs_lock);
return -EIO;
}
@@ -1256,7 +1257,8 @@ static ssize_t sn9c102_show_i2c_val(struct device* cd,
return -ENOSYS;
}
- if ((val = sn9c102_i2c_read(cam, cam->sysfs.i2c_reg)) < 0) {
+ val = sn9c102_i2c_read(cam, cam->sysfs.i2c_reg);
+ if (val < 0) {
mutex_unlock(&sn9c102_sysfs_lock);
return -EIO;
}
@@ -1440,27 +1442,35 @@ static int sn9c102_create_sysfs(struct sn9c102_device* cam)
struct device *dev = &(cam->v4ldev->dev);
int err = 0;
- if ((err = device_create_file(dev, &dev_attr_reg)))
+ err = device_create_file(dev, &dev_attr_reg);
+ if (err)
goto err_out;
- if ((err = device_create_file(dev, &dev_attr_val)))
+ err = device_create_file(dev, &dev_attr_val);
+ if (err)
goto err_reg;
- if ((err = device_create_file(dev, &dev_attr_frame_header)))
+ err = device_create_file(dev, &dev_attr_frame_header);
+ if (err)
goto err_val;
if (cam->sensor.sysfs_ops) {
- if ((err = device_create_file(dev, &dev_attr_i2c_reg)))
+ err = device_create_file(dev, &dev_attr_i2c_reg);
+ if (err)
goto err_frame_header;
- if ((err = device_create_file(dev, &dev_attr_i2c_val)))
+ err = device_create_file(dev, &dev_attr_i2c_val);
+ if (err)
goto err_i2c_reg;
}
if (cam->bridge == BRIDGE_SN9C101 || cam->bridge == BRIDGE_SN9C102) {
- if ((err = device_create_file(dev, &dev_attr_green)))
+ err = device_create_file(dev, &dev_attr_green);
+ if (err)
goto err_i2c_val;
} else {
- if ((err = device_create_file(dev, &dev_attr_blue)))
+ err = device_create_file(dev, &dev_attr_blue);
+ if (err)
goto err_i2c_val;
- if ((err = device_create_file(dev, &dev_attr_red)))
+ err = device_create_file(dev, &dev_attr_red);
+ if (err)
goto err_blue;
}
@@ -1684,11 +1694,13 @@ static int sn9c102_init(struct sn9c102_device* cam)
else
DBG(3, "Uncompressed video format is active");
- if (s->set_crop)
- if ((err = s->set_crop(cam, rect))) {
+ if (s->set_crop) {
+ err = s->set_crop(cam, rect);
+ if (err) {
DBG(3, "set_crop() failed");
return err;
}
+ }
if (s->set_ctrl) {
for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
@@ -1835,7 +1847,8 @@ static int sn9c102_open(struct file *filp)
cam->state &= ~DEV_MISCONFIGURED;
}
- if ((err = sn9c102_start_transfer(cam)))
+ err = sn9c102_start_transfer(cam);
+ if (err)
goto out;
filp->private_data = cam;
@@ -2308,7 +2321,8 @@ sn9c102_vidioc_s_ctrl(struct sn9c102_device* cam, void __user * arg)
}
if (i == ARRAY_SIZE(s->qctrl))
return -EINVAL;
- if ((err = s->set_ctrl(cam, &ctrl)))
+ err = s->set_ctrl(cam, &ctrl);
+ if (err)
return err;
s->_qctrl[i].default_value = ctrl.value;
@@ -2416,9 +2430,11 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
} else
scale = 1;
- if (cam->stream == STREAM_ON)
- if ((err = sn9c102_stream_interrupt(cam)))
+ if (cam->stream == STREAM_ON) {
+ err = sn9c102_stream_interrupt(cam);
+ if (err)
return err;
+ }
if (copy_to_user(arg, &crop, sizeof(crop))) {
cam->stream = stream;
@@ -2672,9 +2688,11 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
return -EBUSY;
}
- if (cam->stream == STREAM_ON)
- if ((err = sn9c102_stream_interrupt(cam)))
+ if (cam->stream == STREAM_ON) {
+ err = sn9c102_stream_interrupt(cam);
+ if (err)
return err;
+ }
if (copy_to_user(arg, &format, sizeof(format))) {
cam->stream = stream;
@@ -2746,9 +2764,11 @@ sn9c102_vidioc_s_jpegcomp(struct sn9c102_device* cam, void __user * arg)
if (jc.quality != 0 && jc.quality != 1)
return -EINVAL;
- if (cam->stream == STREAM_ON)
- if ((err = sn9c102_stream_interrupt(cam)))
+ if (cam->stream == STREAM_ON) {
+ err = sn9c102_stream_interrupt(cam);
+ if (err)
return err;
+ }
err += sn9c102_set_compression(cam, &jc);
if (err) { /* atomic, no rollback in ioctl() */
@@ -2794,9 +2814,11 @@ sn9c102_vidioc_reqbufs(struct sn9c102_device* cam, void __user * arg)
return -EBUSY;
}
- if (cam->stream == STREAM_ON)
- if ((err = sn9c102_stream_interrupt(cam)))
+ if (cam->stream == STREAM_ON) {
+ err = sn9c102_stream_interrupt(cam);
+ if (err)
return err;
+ }
sn9c102_empty_framequeues(cam);
@@ -2974,9 +2996,11 @@ sn9c102_vidioc_streamoff(struct sn9c102_device* cam, void __user * arg)
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
return -EINVAL;
- if (cam->stream == STREAM_ON)
- if ((err = sn9c102_stream_interrupt(cam)))
+ if (cam->stream == STREAM_ON) {
+ err = sn9c102_stream_interrupt(cam);
+ if (err)
return err;
+ }
sn9c102_empty_framequeues(cam);
@@ -3250,7 +3274,8 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
unsigned int i;
int err = 0, r;
- if (!(cam = kzalloc(sizeof(struct sn9c102_device), GFP_KERNEL)))
+ cam = kzalloc(sizeof(struct sn9c102_device), GFP_KERNEL);
+ if (!cam)
return -ENOMEM;
cam->usbdev = udev;
@@ -3262,13 +3287,15 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- if (!(cam->control_buffer = kzalloc(8, GFP_KERNEL))) {
+ cam->control_buffer = kzalloc(8, GFP_KERNEL);
+ if (!cam->control_buffer) {
DBG(1, "kzalloc() failed");
err = -ENOMEM;
goto fail;
}
- if (!(cam->v4ldev = video_device_alloc())) {
+ cam->v4ldev = video_device_alloc();
+ if (!cam->v4ldev) {
DBG(1, "video_device_alloc() failed");
err = -ENOMEM;
goto fail;
diff --git a/drivers/media/usb/sn9c102/sn9c102_devtable.h b/drivers/staging/media/sn9c102/sn9c102_devtable.h
index b3d2cc729657..b3d2cc729657 100644
--- a/drivers/media/usb/sn9c102/sn9c102_devtable.h
+++ b/drivers/staging/media/sn9c102/sn9c102_devtable.h
diff --git a/drivers/media/usb/sn9c102/sn9c102_hv7131d.c b/drivers/staging/media/sn9c102/sn9c102_hv7131d.c
index 2dce5c908c8e..468072176527 100644
--- a/drivers/media/usb/sn9c102/sn9c102_hv7131d.c
+++ b/drivers/staging/media/sn9c102/sn9c102_hv7131d.c
@@ -53,27 +53,32 @@ static int hv7131d_get_ctrl(struct sn9c102_device* cam,
}
return 0;
case V4L2_CID_RED_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x31)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x31);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = 0x3f - (ctrl->value & 0x3f);
return 0;
case V4L2_CID_BLUE_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x33)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x33);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = 0x3f - (ctrl->value & 0x3f);
return 0;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x32)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x32);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = 0x3f - (ctrl->value & 0x3f);
return 0;
case SN9C102_V4L2_CID_RESET_LEVEL:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x30)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x30);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x3f;
return 0;
case SN9C102_V4L2_CID_PIXEL_BIAS_VOLTAGE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x34)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x34);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x07;
return 0;
diff --git a/drivers/media/usb/sn9c102/sn9c102_hv7131r.c b/drivers/staging/media/sn9c102/sn9c102_hv7131r.c
index 4295887ff609..26a91115b831 100644
--- a/drivers/media/usb/sn9c102/sn9c102_hv7131r.c
+++ b/drivers/staging/media/sn9c102/sn9c102_hv7131r.c
@@ -142,26 +142,31 @@ static int hv7131r_get_ctrl(struct sn9c102_device* cam,
{
switch (ctrl->id) {
case V4L2_CID_GAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x30)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x30);
+ if (ctrl->value < 0)
return -EIO;
return 0;
case V4L2_CID_RED_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x31)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x31);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = ctrl->value & 0x3f;
return 0;
case V4L2_CID_BLUE_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x33)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x33);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = ctrl->value & 0x3f;
return 0;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x32)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x32);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = ctrl->value & 0x3f;
return 0;
case V4L2_CID_BLACK_LEVEL:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x01)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x01);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x08) ? 1 : 0;
return 0;
diff --git a/drivers/media/usb/sn9c102/sn9c102_mi0343.c b/drivers/staging/media/sn9c102/sn9c102_mi0343.c
index 1f5b09bec89c..1f5b09bec89c 100644
--- a/drivers/media/usb/sn9c102/sn9c102_mi0343.c
+++ b/drivers/staging/media/sn9c102/sn9c102_mi0343.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_mi0360.c b/drivers/staging/media/sn9c102/sn9c102_mi0360.c
index d973fc1973d9..d973fc1973d9 100644
--- a/drivers/media/usb/sn9c102/sn9c102_mi0360.c
+++ b/drivers/staging/media/sn9c102/sn9c102_mi0360.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_mt9v111.c b/drivers/staging/media/sn9c102/sn9c102_mt9v111.c
index 95986eb492e4..95986eb492e4 100644
--- a/drivers/media/usb/sn9c102/sn9c102_mt9v111.c
+++ b/drivers/staging/media/sn9c102/sn9c102_mt9v111.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_ov7630.c b/drivers/staging/media/sn9c102/sn9c102_ov7630.c
index 803712c29f02..d3a1bd8d5648 100644
--- a/drivers/media/usb/sn9c102/sn9c102_ov7630.c
+++ b/drivers/staging/media/sn9c102/sn9c102_ov7630.c
@@ -260,7 +260,8 @@ static int ov7630_get_ctrl(struct sn9c102_device* cam,
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x10)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x10);
+ if (ctrl->value < 0)
return -EIO;
break;
case V4L2_CID_RED_BALANCE:
@@ -280,37 +281,44 @@ static int ov7630_get_ctrl(struct sn9c102_device* cam,
break;
break;
case V4L2_CID_GAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x00)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x00);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x3f;
break;
case V4L2_CID_DO_WHITE_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0c)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0c);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x3f;
break;
case V4L2_CID_WHITENESS:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0d)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0d);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x3f;
break;
case V4L2_CID_AUTOGAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x13)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x13);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x01;
break;
case V4L2_CID_VFLIP:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x75)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x75);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x80) ? 1 : 0;
break;
case SN9C102_V4L2_CID_GAMMA:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x14)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x14);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x02) ? 1 : 0;
break;
case SN9C102_V4L2_CID_BAND_FILTER:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x2d)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x2d);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x02) ? 1 : 0;
break;
diff --git a/drivers/media/usb/sn9c102/sn9c102_ov7660.c b/drivers/staging/media/sn9c102/sn9c102_ov7660.c
index 7977795d342b..530157a234e6 100644
--- a/drivers/media/usb/sn9c102/sn9c102_ov7660.c
+++ b/drivers/staging/media/sn9c102/sn9c102_ov7660.c
@@ -278,41 +278,49 @@ static int ov7660_get_ctrl(struct sn9c102_device* cam,
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x10)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x10);
+ if (ctrl->value < 0)
return -EIO;
break;
case V4L2_CID_DO_WHITE_BALANCE:
- if ((ctrl->value = sn9c102_read_reg(cam, 0x02)) < 0)
+ ctrl->value = sn9c102_read_reg(cam, 0x02);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x04) ? 1 : 0;
break;
case V4L2_CID_RED_BALANCE:
- if ((ctrl->value = sn9c102_read_reg(cam, 0x05)) < 0)
+ ctrl->value = sn9c102_read_reg(cam, 0x05);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x7f;
break;
case V4L2_CID_BLUE_BALANCE:
- if ((ctrl->value = sn9c102_read_reg(cam, 0x06)) < 0)
+ ctrl->value = sn9c102_read_reg(cam, 0x06);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x7f;
break;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- if ((ctrl->value = sn9c102_read_reg(cam, 0x07)) < 0)
+ ctrl->value = sn9c102_read_reg(cam, 0x07);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x7f;
break;
case SN9C102_V4L2_CID_BAND_FILTER:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x3b)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x3b);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x08;
break;
case V4L2_CID_GAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x00)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x00);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x1f;
break;
case V4L2_CID_AUTOGAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x13)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x13);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x01;
break;
diff --git a/drivers/media/usb/sn9c102/sn9c102_pas106b.c b/drivers/staging/media/sn9c102/sn9c102_pas106b.c
index 81cd969c1b7b..47bd82de80f9 100644
--- a/drivers/media/usb/sn9c102/sn9c102_pas106b.c
+++ b/drivers/staging/media/sn9c102/sn9c102_pas106b.c
@@ -62,32 +62,38 @@ static int pas106b_get_ctrl(struct sn9c102_device* cam,
}
return 0;
case V4L2_CID_RED_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0c)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0c);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x1f;
return 0;
case V4L2_CID_BLUE_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x09)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x09);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x1f;
return 0;
case V4L2_CID_GAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0e)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0e);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x1f;
return 0;
case V4L2_CID_CONTRAST:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0f)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0f);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x07;
return 0;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0a)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0a);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value = (ctrl->value & 0x1f) << 1;
return 0;
case SN9C102_V4L2_CID_DAC_MAGNITUDE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x08)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x08);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0xf8;
return 0;
diff --git a/drivers/media/usb/sn9c102/sn9c102_pas202bcb.c b/drivers/staging/media/sn9c102/sn9c102_pas202bcb.c
index 2e86fdc86989..cbfacc2dad84 100644
--- a/drivers/media/usb/sn9c102/sn9c102_pas202bcb.c
+++ b/drivers/staging/media/sn9c102/sn9c102_pas202bcb.c
@@ -92,27 +92,32 @@ static int pas202bcb_get_ctrl(struct sn9c102_device* cam,
}
return 0;
case V4L2_CID_RED_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x09)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x09);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x0f;
return 0;
case V4L2_CID_BLUE_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x07)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x07);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x0f;
return 0;
case V4L2_CID_GAIN:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x10)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x10);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x1f;
return 0;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x08)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x08);
+ if (ctrl->value < 0)
return -EIO;
ctrl->value &= 0x0f;
return 0;
case SN9C102_V4L2_CID_DAC_MAGNITUDE:
- if ((ctrl->value = sn9c102_i2c_read(cam, 0x0c)) < 0)
+ ctrl->value = sn9c102_i2c_read(cam, 0x0c);
+ if (ctrl->value < 0)
return -EIO;
return 0;
default:
diff --git a/drivers/media/usb/sn9c102/sn9c102_sensor.h b/drivers/staging/media/sn9c102/sn9c102_sensor.h
index 3679970dba2c..3679970dba2c 100644
--- a/drivers/media/usb/sn9c102/sn9c102_sensor.h
+++ b/drivers/staging/media/sn9c102/sn9c102_sensor.h
diff --git a/drivers/media/usb/sn9c102/sn9c102_tas5110c1b.c b/drivers/staging/media/sn9c102/sn9c102_tas5110c1b.c
index 04cdfdde8564..04cdfdde8564 100644
--- a/drivers/media/usb/sn9c102/sn9c102_tas5110c1b.c
+++ b/drivers/staging/media/sn9c102/sn9c102_tas5110c1b.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_tas5110d.c b/drivers/staging/media/sn9c102/sn9c102_tas5110d.c
index 9372e6f9fcff..9372e6f9fcff 100644
--- a/drivers/media/usb/sn9c102/sn9c102_tas5110d.c
+++ b/drivers/staging/media/sn9c102/sn9c102_tas5110d.c
diff --git a/drivers/media/usb/sn9c102/sn9c102_tas5130d1b.c b/drivers/staging/media/sn9c102/sn9c102_tas5130d1b.c
index a30bbc4389f5..a30bbc4389f5 100644
--- a/drivers/media/usb/sn9c102/sn9c102_tas5130d1b.c
+++ b/drivers/staging/media/sn9c102/sn9c102_tas5130d1b.c
diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
index 480b7c4064cc..f67046955ef6 100644
--- a/drivers/staging/media/solo6x10/solo6x10-core.c
+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
@@ -40,7 +40,7 @@ MODULE_AUTHOR("Bluecherry <maintainers@bluecherrydvr.com>");
MODULE_VERSION(SOLO6X10_VERSION);
MODULE_LICENSE("GPL");
-unsigned video_nr = -1;
+static unsigned video_nr = -1;
module_param(video_nr, uint, 0644);
MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect (default)");
diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c
index 1db18c7972a0..74f037b6166c 100644
--- a/drivers/staging/media/solo6x10/solo6x10-g723.c
+++ b/drivers/staging/media/solo6x10/solo6x10-g723.c
@@ -366,8 +366,9 @@ int solo_g723_init(struct solo_dev *solo_dev)
/* Allows for easier mapping between video and audio */
sprintf(name, "Softlogic%d", solo_dev->vfd->num);
- ret = snd_card_create(SNDRV_DEFAULT_IDX1, name, THIS_MODULE, 0,
- &solo_dev->snd_card);
+ ret = snd_card_new(&solo_dev->pdev->dev,
+ SNDRV_DEFAULT_IDX1, name, THIS_MODULE, 0,
+ &solo_dev->snd_card);
if (ret < 0)
return ret;
@@ -377,7 +378,6 @@ int solo_g723_init(struct solo_dev *solo_dev)
strcpy(card->shortname, "SOLO-6x10 Audio");
sprintf(card->longname, "%s on %s IRQ %d", card->shortname,
pci_name(solo_dev->pdev), solo_dev->pdev->irq);
- snd_card_set_dev(card, &solo_dev->pdev->dev);
ret = snd_device_new(card, SNDRV_DEV_LOWLEVEL, solo_dev, &ops);
if (ret < 0)
diff --git a/drivers/staging/media/solo6x10/solo6x10-tw28.c b/drivers/staging/media/solo6x10/solo6x10-tw28.c
index af65ea655f15..36daa1720b54 100644
--- a/drivers/staging/media/solo6x10/solo6x10-tw28.c
+++ b/drivers/staging/media/solo6x10/solo6x10-tw28.c
@@ -516,7 +516,7 @@ static int tw2815_setup(struct solo_dev *solo_dev, u8 dev_addr)
static void saa712x_write_regs(struct solo_dev *dev, const uint8_t *vals,
int start, int n)
{
- for (;start < n; start++, vals++) {
+ for (; start < n; start++, vals++) {
/* Skip read-only registers */
switch (start) {
/* case 0x00 ... 0x25: */
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
index d582c5b84c14..5aeb9c0c2781 100644
--- a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
@@ -399,8 +399,8 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
if (solo_enc->desc_count <= 1)
return 0;
- return solo_p2m_dma_desc(solo_dev, solo_enc->desc_items, solo_enc->desc_dma,
- solo_enc->desc_count - 1);
+ return solo_p2m_dma_desc(solo_dev, solo_enc->desc_items,
+ solo_enc->desc_dma, solo_enc->desc_count - 1);
}
/* Extract values from VOP header - VE_STATUSxx */
@@ -472,8 +472,7 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
if (vb2_plane_size(vb, 0) < vop_jpeg_size(vh) + solo_enc->jpeg_len)
return -EIO;
- frame_size = (vop_jpeg_size(vh) + solo_enc->jpeg_len + (DMA_ALIGN - 1))
- & ~(DMA_ALIGN - 1);
+ frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN);
vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
/* may discard all previous data in vbuf->sgl */
@@ -506,21 +505,22 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
return -EIO;
/* If this is a key frame, add extra header */
- vb->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME);
+ vb->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME);
if (!vop_type(vh)) {
skip = solo_enc->vop_len;
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
- vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) + solo_enc->vop_len);
+ vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh) +
+ solo_enc->vop_len);
} else {
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
vb2_set_plane_payload(vb, 0, vop_mpeg_size(vh));
}
/* Now get the actual mpeg payload */
- frame_off = (vop_mpeg_offset(vh) - SOLO_MP4E_EXT_ADDR(solo_dev) + sizeof(*vh))
- % SOLO_MP4E_EXT_SIZE(solo_dev);
- frame_size = (vop_mpeg_size(vh) + skip + (DMA_ALIGN - 1))
- & ~(DMA_ALIGN - 1);
+ frame_off = (vop_mpeg_offset(vh) - SOLO_MP4E_EXT_ADDR(solo_dev) +
+ sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
+ frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN);
/* may discard all previous data in vbuf->sgl */
dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
@@ -589,7 +589,8 @@ static void solo_enc_handle_one(struct solo_enc_dev *solo_enc,
spin_unlock_irqrestore(&solo_enc->av_lock, flags);
goto unlock;
}
- vb = list_first_entry(&solo_enc->vidq_active, struct solo_vb2_buf, list);
+ vb = list_first_entry(&solo_enc->vidq_active, struct solo_vb2_buf,
+ list);
list_del(&vb->list);
spin_unlock_irqrestore(&solo_enc->av_lock, flags);
@@ -645,7 +646,8 @@ static void solo_handle_ring(struct solo_dev *solo_dev)
enc_buf.vh = solo_dev->vh_buf;
/* Sanity check */
- if (vop_mpeg_offset(enc_buf.vh) != SOLO_MP4E_EXT_ADDR(solo_dev) + off)
+ if (vop_mpeg_offset(enc_buf.vh) !=
+ SOLO_MP4E_EXT_ADDR(solo_dev) + off)
continue;
if (solo_motion_detected(solo_enc))
@@ -680,9 +682,11 @@ static int solo_ring_thread(void *data)
return 0;
}
-static int solo_enc_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+static int solo_enc_queue_setup(struct vb2_queue *q,
+ const struct v4l2_format *fmt,
+ unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ void *alloc_ctxs[])
{
sizes[0] = FRAME_BUF_SIZE;
*num_planes = 1;
@@ -964,7 +968,7 @@ static int solo_enc_s_std(struct file *file, void *priv, v4l2_std_id std)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
- return solo_set_video_type(solo_enc->solo_dev, std & V4L2_STD_PAL);
+ return solo_set_video_type(solo_enc->solo_dev, std & V4L2_STD_625_50);
}
static int solo_enum_framesizes(struct file *file, void *priv,
@@ -1112,14 +1116,15 @@ static int solo_s_ctrl(struct v4l2_ctrl *ctrl)
solo_enc->motion_thresh = ctrl->val;
if (!solo_enc->motion_global || !solo_enc->motion_enabled)
return 0;
- return solo_set_motion_threshold(solo_dev, solo_enc->ch, ctrl->val);
+ return solo_set_motion_threshold(solo_dev, solo_enc->ch,
+ ctrl->val);
case V4L2_CID_MOTION_MODE:
solo_enc->motion_global = ctrl->val == 1;
solo_enc->motion_enabled = ctrl->val > 0;
if (ctrl->val) {
if (solo_enc->motion_global)
- solo_set_motion_threshold(solo_dev, solo_enc->ch,
- solo_enc->motion_thresh);
+ solo_set_motion_threshold(solo_dev,
+ solo_enc->ch, solo_enc->motion_thresh);
else
solo_set_motion_block(solo_dev, solo_enc->ch,
&solo_enc->motion_thresholds);
@@ -1307,7 +1312,8 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev,
solo_enc->desc_nelts = 32;
solo_enc->desc_items = pci_alloc_consistent(solo_dev->pdev,
sizeof(struct solo_p2m_desc) *
- solo_enc->desc_nelts, &solo_enc->desc_dma);
+ solo_enc->desc_nelts,
+ &solo_enc->desc_dma);
ret = -ENOMEM;
if (solo_enc->desc_items == NULL)
goto hdl_free;
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2.c b/drivers/staging/media/solo6x10/solo6x10-v4l2.c
index 7b26de3488da..47e72dac9b13 100644
--- a/drivers/staging/media/solo6x10/solo6x10-v4l2.c
+++ b/drivers/staging/media/solo6x10/solo6x10-v4l2.c
@@ -527,7 +527,7 @@ static int solo_g_std(struct file *file, void *priv, v4l2_std_id *i)
return 0;
}
-int solo_set_video_type(struct solo_dev *solo_dev, bool type)
+int solo_set_video_type(struct solo_dev *solo_dev, bool is_50hz)
{
int i;
@@ -537,7 +537,8 @@ int solo_set_video_type(struct solo_dev *solo_dev, bool type)
for (i = 0; i < solo_dev->nr_chans; i++)
if (vb2_is_busy(&solo_dev->v4l2_enc[i]->vidq))
return -EBUSY;
- solo_dev->video_type = type;
+ solo_dev->video_type = is_50hz ? SOLO_VO_FMT_TYPE_PAL :
+ SOLO_VO_FMT_TYPE_NTSC;
/* Reconfigure for the new standard */
solo_disp_init(solo_dev);
solo_enc_init(solo_dev);
@@ -551,7 +552,7 @@ static int solo_s_std(struct file *file, void *priv, v4l2_std_id std)
{
struct solo_dev *solo_dev = video_drvdata(file);
- return solo_set_video_type(solo_dev, std & V4L2_STD_PAL);
+ return solo_set_video_type(solo_dev, std & V4L2_STD_625_50);
}
static int solo_s_ctrl(struct v4l2_ctrl *ctrl)
diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
index f1bbb8cb74e6..8964f8be158e 100644
--- a/drivers/staging/media/solo6x10/solo6x10.h
+++ b/drivers/staging/media/solo6x10/solo6x10.h
@@ -398,7 +398,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
int desc_cnt);
/* Global s_std ioctl */
-int solo_set_video_type(struct solo_dev *solo_dev, bool type);
+int solo_set_video_type(struct solo_dev *solo_dev, bool is_50hz);
void solo_update_mode(struct solo_enc_dev *solo_enc);
/* Set the threshold for motion detection */
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index eedffed17e39..c83e3375104b 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -307,7 +307,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
}
static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv,
+ select_queue_fallback_t fallback)
{
return (u16)smp_processor_id();
}
@@ -614,8 +615,6 @@ static void xlr_config_translate_table(struct xlr_net_priv *priv)
k = (k + 1) % j;
b2 = bkts[k];
k = (k + 1) % j;
- val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
- (c2 << 7) | (b2 << 1) | (use_bkt << 0));
val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
(c2 << 7) | (b2 << 1) | (use_bkt << 0));
@@ -892,6 +891,11 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv,
priv->mii_bus->write = xlr_mii_write;
priv->mii_bus->parent = &pdev->dev;
priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (priv->mii_bus->irq == NULL) {
+ pr_err("irq alloc failed\n");
+ mdiobus_free(priv->mii_bus);
+ return -ENOMEM;
+ }
priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq;
/* Scan only the enabled address */
diff --git a/drivers/staging/nokia_h4p/Kconfig b/drivers/staging/nokia_h4p/Kconfig
new file mode 100644
index 000000000000..4336c0ad065b
--- /dev/null
+++ b/drivers/staging/nokia_h4p/Kconfig
@@ -0,0 +1,9 @@
+config BT_NOKIA_H4P
+ tristate "HCI driver with H4 Nokia extensions"
+ depends on BT && ARCH_OMAP
+ help
+ Bluetooth HCI driver with H4 extensions. This driver provides
+ support for H4+ Bluetooth chip with vendor-specific H4 extensions.
+
+ Say Y here to compile support for h4 extended devices into the kernel
+ or say M to compile it as module (btnokia_h4p).
diff --git a/drivers/staging/nokia_h4p/Makefile b/drivers/staging/nokia_h4p/Makefile
new file mode 100644
index 000000000000..9625db4a9af3
--- /dev/null
+++ b/drivers/staging/nokia_h4p/Makefile
@@ -0,0 +1,6 @@
+
+obj-$(CONFIG_BT_NOKIA_H4P) += btnokia_h4p.o
+btnokia_h4p-objs := nokia_core.o nokia_fw.o nokia_uart.o nokia_fw-csr.o \
+ nokia_fw-bcm.o nokia_fw-ti1273.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/nokia_h4p/TODO b/drivers/staging/nokia_h4p/TODO
new file mode 100644
index 000000000000..0ec5823e0ca8
--- /dev/null
+++ b/drivers/staging/nokia_h4p/TODO
@@ -0,0 +1,132 @@
+Few attempts to submission have been made, last review comments were received in
+
+Date: Wed, 15 Jan 2014 19:01:51 -0800
+From: Marcel Holtmann <marcel@holtmann.org>
+Subject: Re: [PATCH v6] Bluetooth: Add hci_h4p driver
+
+Some code refactoring is still needed.
+
+TODO:
+
+> +++ b/drivers/bluetooth/hci_h4p.h
+
+can we please get the naming straight. File names do not start with
+hci_ anymore. We moved away from it since that term is too generic.
+
+> +struct hci_h4p_info {
+
+Can we please get rid of the hci_ prefix for everything. Copying from
+drivers that are over 10 years old is not a good idea. Please look at
+recent ones.
+
+> + struct timer_list lazy_release;
+
+Timer? Not delayed work?
+
+> +void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val);
+> +u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset);
+> +void hci_h4p_set_rts(struct hci_h4p_info *info, int active);
+> +int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active, int timeout_ms);
+> +void __hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which);
+> +void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which);
+> +void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed);
+> +int hci_h4p_reset_uart(struct hci_h4p_info *info);
+> +void hci_h4p_init_uart(struct hci_h4p_info *info);
+> +void hci_h4p_enable_tx(struct hci_h4p_info *info);
+> +void hci_h4p_store_regs(struct hci_h4p_info *info);
+> +void hci_h4p_restore_regs(struct hci_h4p_info *info);
+> +void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable);
+
+These are a lot of public functions. Are they all really needed or can
+the code be done smart.
+
+> +static ssize_t hci_h4p_store_bdaddr(struct device *dev,
+> + struct device_attribute *attr,
+> + const char *buf, size_t count)
+> +{
+> + struct hci_h4p_info *info = dev_get_drvdata(dev);
+
+Since none of these devices can function without having a valid
+address, the way this should work is that we should not register the
+HCI device when probing the platform device.
+
+The HCI device should be registered once a valid address has been
+written into the sysfs file. I do not want to play the tricks with
+bringing up the device without a valid address.
+
+> + hdev->close = hci_h4p_hci_close;
+> + hdev->flush = hci_h4p_hci_flush;
+> + hdev->send = hci_h4p_hci_send_frame;
+
+It needs to use hdev->setup to load the firmware. I assume the
+firmware only needs to be loaded once. That is exactly what
+hdev->setup does. It gets executed once.
+
+> + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+Is this quirk really needed? Normally only Bluetooth 1.1 and early
+devices qualify for it.
+
+> +static int hci_h4p_bcm_set_bdaddr(struct hci_h4p_info *info, struct sk_buff *skb)
+> +{
+> + int i;
+> + static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
+> + int not_valid;
+
+Has this actually been confirmed that we can just randomly set an
+address out of the Nokia range. I do not think so. This is a pretty
+bad idea.
+
+I have no interest in merging a driver with such a hack.
+
+> + not_valid = 1;
+> + for (i = 0; i < 6; i++) {
+> + if (info->bd_addr[i] != 0x00) {
+> + not_valid = 0;
+> + break;
+> + }
+> + }
+
+Anybody every heard of memcmp or bacmp and BDADDR_ANY?
+
+> + if (not_valid) {
+> + dev_info(info->dev, "Valid bluetooth address not found,"
+> + " setting some random\n");
+> + /* When address is not valid, use some random */
+> + memcpy(info->bd_addr, nokia_oui, 3);
+> + get_random_bytes(info->bd_addr + 3, 3);
+> + }
+
+
+And why does every single chip firmware does this differently. Seriously, this is a mess.
+
+> +void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb)
+> +{
+> + switch (info->man_id) {
+> + case H4P_ID_CSR:
+> + hci_h4p_bc4_parse_fw_event(info, skb);
+> + break;
+...
+> +}
+
+We have proper HCI sync command handling in recent kernels. I really
+do not know why this is hand coded these days. Check how the Intel
+firmware loading inside btusb.c does it.
+
+> +inline u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset)
+> +{
+> + return __raw_readb(info->uart_base + (offset << 2));
+> +}
+
+Inline in a *.c file for a non-static function. Makes no sense to me.
+
+> +/**
+> + * struct hci_h4p_platform data - hci_h4p Platform data structure
+> + */
+> +struct hci_h4p_platform_data {
+
+please have a proper name here. For example
+btnokia_h4p_platform_data.
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
+Pavel Machek <pavel@ucw.cz>
diff --git a/drivers/staging/nokia_h4p/hci_h4p.h b/drivers/staging/nokia_h4p/hci_h4p.h
new file mode 100644
index 000000000000..99c4da61a56c
--- /dev/null
+++ b/drivers/staging/nokia_h4p/hci_h4p.h
@@ -0,0 +1,222 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2005-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __DRIVERS_BLUETOOTH_HCI_H4P_H
+#define __DRIVERS_BLUETOOTH_HCI_H4P_H
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci.h>
+
+#define UART_SYSC_OMAP_RESET 0x03
+#define UART_SYSS_RESETDONE 0x01
+#define UART_OMAP_SCR_EMPTY_THR 0x08
+#define UART_OMAP_SCR_WAKEUP 0x10
+#define UART_OMAP_SSR_WAKEUP 0x02
+#define UART_OMAP_SSR_TXFULL 0x01
+
+#define UART_OMAP_SYSC_IDLEMODE 0x03
+#define UART_OMAP_SYSC_IDLEMASK (3 << UART_OMAP_SYSC_IDLEMODE)
+
+#define UART_OMAP_SYSC_FORCE_IDLE (0 << UART_OMAP_SYSC_IDLEMODE)
+#define UART_OMAP_SYSC_NO_IDLE (1 << UART_OMAP_SYSC_IDLEMODE)
+#define UART_OMAP_SYSC_SMART_IDLE (2 << UART_OMAP_SYSC_IDLEMODE)
+
+#define H4P_TRANSFER_MODE 1
+#define H4P_SCHED_TRANSFER_MODE 2
+#define H4P_ACTIVE_MODE 3
+
+struct hci_h4p_info {
+ struct timer_list lazy_release;
+ struct hci_dev *hdev;
+ spinlock_t lock;
+
+ void __iomem *uart_base;
+ unsigned long uart_phys_base;
+ int irq;
+ struct device *dev;
+ u8 chip_type;
+ u8 bt_wakeup_gpio;
+ u8 host_wakeup_gpio;
+ u8 reset_gpio;
+ u8 reset_gpio_shared;
+ u8 bt_sysclk;
+ u8 man_id;
+ u8 ver_id;
+
+ struct sk_buff_head fw_queue;
+ struct sk_buff *alive_cmd_skb;
+ struct completion init_completion;
+ struct completion fw_completion;
+ struct completion test_completion;
+ int fw_error;
+ int init_error;
+
+ struct sk_buff_head txq;
+
+ struct sk_buff *rx_skb;
+ long rx_count;
+ unsigned long rx_state;
+ unsigned long garbage_bytes;
+
+ u8 bd_addr[6];
+ struct sk_buff_head *fw_q;
+
+ int pm_enabled;
+ int tx_enabled;
+ int autorts;
+ int rx_enabled;
+ unsigned long pm_flags;
+
+ int tx_clocks_en;
+ int rx_clocks_en;
+ spinlock_t clocks_lock;
+ struct clk *uart_iclk;
+ struct clk *uart_fclk;
+ atomic_t clk_users;
+ u16 dll;
+ u16 dlh;
+ u16 ier;
+ u16 mdr1;
+ u16 efr;
+};
+
+struct hci_h4p_radio_hdr {
+ __u8 evt;
+ __u8 dlen;
+} __packed;
+
+struct hci_h4p_neg_hdr {
+ __u8 dlen;
+} __packed;
+#define H4P_NEG_HDR_SIZE 1
+
+#define H4P_NEG_REQ 0x00
+#define H4P_NEG_ACK 0x20
+#define H4P_NEG_NAK 0x40
+
+#define H4P_PROTO_PKT 0x44
+#define H4P_PROTO_BYTE 0x4c
+
+#define H4P_ID_CSR 0x02
+#define H4P_ID_BCM2048 0x04
+#define H4P_ID_TI1271 0x31
+
+struct hci_h4p_neg_cmd {
+ __u8 ack;
+ __u16 baud;
+ __u16 unused1;
+ __u8 proto;
+ __u16 sys_clk;
+ __u16 unused2;
+} __packed;
+
+struct hci_h4p_neg_evt {
+ __u8 ack;
+ __u16 baud;
+ __u16 unused1;
+ __u8 proto;
+ __u16 sys_clk;
+ __u16 unused2;
+ __u8 man_id;
+ __u8 ver_id;
+} __packed;
+
+#define H4P_ALIVE_REQ 0x55
+#define H4P_ALIVE_RESP 0xcc
+
+struct hci_h4p_alive_hdr {
+ __u8 dlen;
+} __packed;
+#define H4P_ALIVE_HDR_SIZE 1
+
+struct hci_h4p_alive_pkt {
+ __u8 mid;
+ __u8 unused;
+} __packed;
+
+#define MAX_BAUD_RATE 921600
+#define BC4_MAX_BAUD_RATE 3692300
+#define UART_CLOCK 48000000
+#define BT_INIT_DIVIDER 320
+#define BT_BAUDRATE_DIVIDER 384000000
+#define BT_SYSCLK_DIV 1000
+#define INIT_SPEED 120000
+
+#define H4_TYPE_SIZE 1
+#define H4_RADIO_HDR_SIZE 2
+
+/* H4+ packet types */
+#define H4_CMD_PKT 0x01
+#define H4_ACL_PKT 0x02
+#define H4_SCO_PKT 0x03
+#define H4_EVT_PKT 0x04
+#define H4_NEG_PKT 0x06
+#define H4_ALIVE_PKT 0x07
+#define H4_RADIO_PKT 0x08
+
+/* TX states */
+#define WAIT_FOR_PKT_TYPE 1
+#define WAIT_FOR_HEADER 2
+#define WAIT_FOR_DATA 3
+
+struct hci_fw_event {
+ struct hci_event_hdr hev;
+ struct hci_ev_cmd_complete cmd;
+ u8 status;
+} __packed;
+
+int hci_h4p_send_alive_packet(struct hci_h4p_info *info);
+
+void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info,
+ struct sk_buff *skb);
+int hci_h4p_bcm_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue);
+
+void hci_h4p_bc4_parse_fw_event(struct hci_h4p_info *info,
+ struct sk_buff *skb);
+int hci_h4p_bc4_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue);
+
+void hci_h4p_ti1273_parse_fw_event(struct hci_h4p_info *info,
+ struct sk_buff *skb);
+int hci_h4p_ti1273_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue);
+
+int hci_h4p_read_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue);
+int hci_h4p_send_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue);
+void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb);
+
+void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val);
+u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset);
+void hci_h4p_set_rts(struct hci_h4p_info *info, int active);
+int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active, int timeout_ms);
+void __hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which);
+void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which);
+void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed);
+int hci_h4p_reset_uart(struct hci_h4p_info *info);
+void hci_h4p_init_uart(struct hci_h4p_info *info);
+void hci_h4p_enable_tx(struct hci_h4p_info *info);
+void hci_h4p_store_regs(struct hci_h4p_info *info);
+void hci_h4p_restore_regs(struct hci_h4p_info *info);
+void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable);
+
+#endif /* __DRIVERS_BLUETOOTH_HCI_H4P_H */
diff --git a/drivers/staging/nokia_h4p/nokia_core.c b/drivers/staging/nokia_h4p/nokia_core.c
new file mode 100644
index 000000000000..5e19cd6ccda3
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_core.c
@@ -0,0 +1,1206 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2005-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * Thanks to all the Nokia people that helped with this driver,
+ * including Ville Tervo and Roger Quadros.
+ *
+ * Power saving functionality was removed from this driver to make
+ * merging easier.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/serial_reg.h>
+#include <linux/skbuff.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/timer.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+#include <linux/completion.h>
+#include <linux/sizes.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci.h>
+
+#include <linux/platform_data/bt-nokia-h4p.h>
+
+#include "hci_h4p.h"
+
+/* This should be used in function that cannot release clocks */
+static void hci_h4p_set_clk(struct hci_h4p_info *info, int *clock, int enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->clocks_lock, flags);
+ if (enable && !*clock) {
+ BT_DBG("Enabling %p", clock);
+ clk_prepare_enable(info->uart_fclk);
+ clk_prepare_enable(info->uart_iclk);
+ if (atomic_read(&info->clk_users) == 0)
+ hci_h4p_restore_regs(info);
+ atomic_inc(&info->clk_users);
+ }
+
+ if (!enable && *clock) {
+ BT_DBG("Disabling %p", clock);
+ if (atomic_dec_and_test(&info->clk_users))
+ hci_h4p_store_regs(info);
+ clk_disable_unprepare(info->uart_fclk);
+ clk_disable_unprepare(info->uart_iclk);
+ }
+
+ *clock = enable;
+ spin_unlock_irqrestore(&info->clocks_lock, flags);
+}
+
+static void hci_h4p_lazy_clock_release(unsigned long data)
+{
+ struct hci_h4p_info *info = (struct hci_h4p_info *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+ if (!info->tx_enabled)
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+/* Power management functions */
+void hci_h4p_smart_idle(struct hci_h4p_info *info, bool enable)
+{
+ u8 v;
+
+ v = hci_h4p_inb(info, UART_OMAP_SYSC);
+ v &= ~(UART_OMAP_SYSC_IDLEMASK);
+
+ if (enable)
+ v |= UART_OMAP_SYSC_SMART_IDLE;
+ else
+ v |= UART_OMAP_SYSC_NO_IDLE;
+
+ hci_h4p_outb(info, UART_OMAP_SYSC, v);
+}
+
+static inline void h4p_schedule_pm(struct hci_h4p_info *info)
+{
+}
+
+static void hci_h4p_disable_tx(struct hci_h4p_info *info)
+{
+ if (!info->pm_enabled)
+ return;
+
+ /* Re-enable smart-idle */
+ hci_h4p_smart_idle(info, 1);
+
+ gpio_set_value(info->bt_wakeup_gpio, 0);
+ mod_timer(&info->lazy_release, jiffies + msecs_to_jiffies(100));
+ info->tx_enabled = 0;
+}
+
+void hci_h4p_enable_tx(struct hci_h4p_info *info)
+{
+ unsigned long flags;
+
+ if (!info->pm_enabled)
+ return;
+
+ h4p_schedule_pm(info);
+
+ spin_lock_irqsave(&info->lock, flags);
+ del_timer(&info->lazy_release);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
+ info->tx_enabled = 1;
+ gpio_set_value(info->bt_wakeup_gpio, 1);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ /*
+ * Disable smart-idle as UART TX interrupts
+ * are not wake-up capable
+ */
+ hci_h4p_smart_idle(info, 0);
+
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static void hci_h4p_disable_rx(struct hci_h4p_info *info)
+{
+ if (!info->pm_enabled)
+ return;
+
+ info->rx_enabled = 0;
+
+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR)
+ return;
+
+ if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT))
+ return;
+
+ __hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
+ info->autorts = 0;
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
+}
+
+static void hci_h4p_enable_rx(struct hci_h4p_info *info)
+{
+ if (!info->pm_enabled)
+ return;
+
+ h4p_schedule_pm(info);
+
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 1);
+ info->rx_enabled = 1;
+
+ if (!(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT))
+ return;
+
+ __hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
+ info->autorts = 1;
+}
+
+/* Negotiation functions */
+int hci_h4p_send_alive_packet(struct hci_h4p_info *info)
+{
+ struct hci_h4p_alive_hdr *hdr;
+ struct hci_h4p_alive_pkt *pkt;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int len;
+
+ BT_DBG("Sending alive packet");
+
+ len = H4_TYPE_SIZE + sizeof(*hdr) + sizeof(*pkt);
+ skb = bt_skb_alloc(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(skb->data, 0x00, len);
+ *skb_put(skb, 1) = H4_ALIVE_PKT;
+ hdr = (struct hci_h4p_alive_hdr *)skb_put(skb, sizeof(*hdr));
+ hdr->dlen = sizeof(*pkt);
+ pkt = (struct hci_h4p_alive_pkt *)skb_put(skb, sizeof(*pkt));
+ pkt->mid = H4P_ALIVE_REQ;
+
+ skb_queue_tail(&info->txq, skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ BT_DBG("Alive packet sent");
+
+ return 0;
+}
+
+static void hci_h4p_alive_packet(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ struct hci_h4p_alive_hdr *hdr;
+ struct hci_h4p_alive_pkt *pkt;
+
+ BT_DBG("Received alive packet");
+ hdr = (struct hci_h4p_alive_hdr *)skb->data;
+ if (hdr->dlen != sizeof(*pkt)) {
+ dev_err(info->dev, "Corrupted alive message\n");
+ info->init_error = -EIO;
+ goto finish_alive;
+ }
+
+ pkt = (struct hci_h4p_alive_pkt *)skb_pull(skb, sizeof(*hdr));
+ if (pkt->mid != H4P_ALIVE_RESP) {
+ dev_err(info->dev, "Could not negotiate hci_h4p settings\n");
+ info->init_error = -EINVAL;
+ }
+
+finish_alive:
+ complete(&info->init_completion);
+ kfree_skb(skb);
+}
+
+static int hci_h4p_send_negotiation(struct hci_h4p_info *info)
+{
+ struct hci_h4p_neg_cmd *neg_cmd;
+ struct hci_h4p_neg_hdr *neg_hdr;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int err, len;
+ u16 sysclk;
+
+ BT_DBG("Sending negotiation..");
+
+ switch (info->bt_sysclk) {
+ case 1:
+ sysclk = 12000;
+ break;
+ case 2:
+ sysclk = 38400;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ len = sizeof(*neg_cmd) + sizeof(*neg_hdr) + H4_TYPE_SIZE;
+ skb = bt_skb_alloc(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(skb->data, 0x00, len);
+ *skb_put(skb, 1) = H4_NEG_PKT;
+ neg_hdr = (struct hci_h4p_neg_hdr *)skb_put(skb, sizeof(*neg_hdr));
+ neg_cmd = (struct hci_h4p_neg_cmd *)skb_put(skb, sizeof(*neg_cmd));
+
+ neg_hdr->dlen = sizeof(*neg_cmd);
+ neg_cmd->ack = H4P_NEG_REQ;
+ neg_cmd->baud = cpu_to_le16(BT_BAUDRATE_DIVIDER/MAX_BAUD_RATE);
+ neg_cmd->proto = H4P_PROTO_BYTE;
+ neg_cmd->sys_clk = cpu_to_le16(sysclk);
+
+ hci_h4p_change_speed(info, INIT_SPEED);
+
+ hci_h4p_set_rts(info, 1);
+ info->init_error = 0;
+ init_completion(&info->init_completion);
+ skb_queue_tail(&info->txq, skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ if (!wait_for_completion_interruptible_timeout(&info->init_completion,
+ msecs_to_jiffies(1000)))
+ return -ETIMEDOUT;
+
+ if (info->init_error < 0)
+ return info->init_error;
+
+ /* Change to operational settings */
+ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
+ hci_h4p_set_rts(info, 0);
+ hci_h4p_change_speed(info, MAX_BAUD_RATE);
+
+ err = hci_h4p_wait_for_cts(info, 1, 100);
+ if (err < 0)
+ return err;
+
+ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
+ init_completion(&info->init_completion);
+ err = hci_h4p_send_alive_packet(info);
+
+ if (err < 0)
+ return err;
+
+ if (!wait_for_completion_interruptible_timeout(&info->init_completion,
+ msecs_to_jiffies(1000)))
+ return -ETIMEDOUT;
+
+ if (info->init_error < 0)
+ return info->init_error;
+
+ BT_DBG("Negotiation successful");
+ return 0;
+}
+
+static void hci_h4p_negotiation_packet(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ struct hci_h4p_neg_hdr *hdr;
+ struct hci_h4p_neg_evt *evt;
+
+ hdr = (struct hci_h4p_neg_hdr *)skb->data;
+ if (hdr->dlen != sizeof(*evt)) {
+ info->init_error = -EIO;
+ goto finish_neg;
+ }
+
+ evt = (struct hci_h4p_neg_evt *)skb_pull(skb, sizeof(*hdr));
+
+ if (evt->ack != H4P_NEG_ACK) {
+ dev_err(info->dev, "Could not negotiate hci_h4p settings\n");
+ info->init_error = -EINVAL;
+ }
+
+ info->man_id = evt->man_id;
+ info->ver_id = evt->ver_id;
+
+finish_neg:
+
+ complete(&info->init_completion);
+ kfree_skb(skb);
+}
+
+/* H4 packet handling functions */
+static int hci_h4p_get_hdr_len(struct hci_h4p_info *info, u8 pkt_type)
+{
+ long retval;
+
+ switch (pkt_type) {
+ case H4_EVT_PKT:
+ retval = HCI_EVENT_HDR_SIZE;
+ break;
+ case H4_ACL_PKT:
+ retval = HCI_ACL_HDR_SIZE;
+ break;
+ case H4_SCO_PKT:
+ retval = HCI_SCO_HDR_SIZE;
+ break;
+ case H4_NEG_PKT:
+ retval = H4P_NEG_HDR_SIZE;
+ break;
+ case H4_ALIVE_PKT:
+ retval = H4P_ALIVE_HDR_SIZE;
+ break;
+ case H4_RADIO_PKT:
+ retval = H4_RADIO_HDR_SIZE;
+ break;
+ default:
+ dev_err(info->dev, "Unknown H4 packet type 0x%.2x\n", pkt_type);
+ retval = -1;
+ break;
+ }
+
+ return retval;
+}
+
+static unsigned int hci_h4p_get_data_len(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ long retval = -1;
+ struct hci_acl_hdr *acl_hdr;
+ struct hci_sco_hdr *sco_hdr;
+ struct hci_event_hdr *evt_hdr;
+ struct hci_h4p_neg_hdr *neg_hdr;
+ struct hci_h4p_alive_hdr *alive_hdr;
+ struct hci_h4p_radio_hdr *radio_hdr;
+
+ switch (bt_cb(skb)->pkt_type) {
+ case H4_EVT_PKT:
+ evt_hdr = (struct hci_event_hdr *)skb->data;
+ retval = evt_hdr->plen;
+ break;
+ case H4_ACL_PKT:
+ acl_hdr = (struct hci_acl_hdr *)skb->data;
+ retval = le16_to_cpu(acl_hdr->dlen);
+ break;
+ case H4_SCO_PKT:
+ sco_hdr = (struct hci_sco_hdr *)skb->data;
+ retval = sco_hdr->dlen;
+ break;
+ case H4_RADIO_PKT:
+ radio_hdr = (struct hci_h4p_radio_hdr *)skb->data;
+ retval = radio_hdr->dlen;
+ break;
+ case H4_NEG_PKT:
+ neg_hdr = (struct hci_h4p_neg_hdr *)skb->data;
+ retval = neg_hdr->dlen;
+ break;
+ case H4_ALIVE_PKT:
+ alive_hdr = (struct hci_h4p_alive_hdr *)skb->data;
+ retval = alive_hdr->dlen;
+ break;
+ }
+
+ return retval;
+}
+
+static inline void hci_h4p_recv_frame(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ if (unlikely(!test_bit(HCI_RUNNING, &info->hdev->flags))) {
+ switch (bt_cb(skb)->pkt_type) {
+ case H4_NEG_PKT:
+ hci_h4p_negotiation_packet(info, skb);
+ info->rx_state = WAIT_FOR_PKT_TYPE;
+ return;
+ case H4_ALIVE_PKT:
+ hci_h4p_alive_packet(info, skb);
+ info->rx_state = WAIT_FOR_PKT_TYPE;
+ return;
+ }
+
+ if (!test_bit(HCI_UP, &info->hdev->flags)) {
+ BT_DBG("fw_event");
+ hci_h4p_parse_fw_event(info, skb);
+ return;
+ }
+ }
+
+ hci_recv_frame(info->hdev, skb);
+ BT_DBG("Frame sent to upper layer");
+}
+
+static inline void hci_h4p_handle_byte(struct hci_h4p_info *info, u8 byte)
+{
+ switch (info->rx_state) {
+ case WAIT_FOR_PKT_TYPE:
+ bt_cb(info->rx_skb)->pkt_type = byte;
+ info->rx_count = hci_h4p_get_hdr_len(info, byte);
+ if (info->rx_count < 0) {
+ info->hdev->stat.err_rx++;
+ kfree_skb(info->rx_skb);
+ info->rx_skb = NULL;
+ } else {
+ info->rx_state = WAIT_FOR_HEADER;
+ }
+ break;
+ case WAIT_FOR_HEADER:
+ info->rx_count--;
+ *skb_put(info->rx_skb, 1) = byte;
+ if (info->rx_count != 0)
+ break;
+ info->rx_count = hci_h4p_get_data_len(info, info->rx_skb);
+ if (info->rx_count > skb_tailroom(info->rx_skb)) {
+ dev_err(info->dev, "frame too long\n");
+ info->garbage_bytes = info->rx_count
+ - skb_tailroom(info->rx_skb);
+ kfree_skb(info->rx_skb);
+ info->rx_skb = NULL;
+ break;
+ }
+ info->rx_state = WAIT_FOR_DATA;
+ break;
+ case WAIT_FOR_DATA:
+ info->rx_count--;
+ *skb_put(info->rx_skb, 1) = byte;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ if (info->rx_count == 0) {
+ /* H4+ devices should always send word aligned packets */
+ if (!(info->rx_skb->len % 2))
+ info->garbage_bytes++;
+ hci_h4p_recv_frame(info, info->rx_skb);
+ info->rx_skb = NULL;
+ }
+}
+
+static void hci_h4p_rx_tasklet(unsigned long data)
+{
+ u8 byte;
+ struct hci_h4p_info *info = (struct hci_h4p_info *)data;
+
+ BT_DBG("tasklet woke up");
+ BT_DBG("rx_tasklet woke up");
+
+ while (hci_h4p_inb(info, UART_LSR) & UART_LSR_DR) {
+ byte = hci_h4p_inb(info, UART_RX);
+ if (info->garbage_bytes) {
+ info->garbage_bytes--;
+ continue;
+ }
+ if (info->rx_skb == NULL) {
+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE,
+ GFP_ATOMIC | GFP_DMA);
+ if (!info->rx_skb) {
+ dev_err(info->dev,
+ "No memory for new packet\n");
+ goto finish_rx;
+ }
+ info->rx_state = WAIT_FOR_PKT_TYPE;
+ info->rx_skb->dev = (void *)info->hdev;
+ }
+ info->hdev->stat.byte_rx++;
+ hci_h4p_handle_byte(info, byte);
+ }
+
+ if (!info->rx_enabled) {
+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT &&
+ info->autorts) {
+ __hci_h4p_set_auto_ctsrts(info, 0 , UART_EFR_RTS);
+ info->autorts = 0;
+ }
+ /* Flush posted write to avoid spurious interrupts */
+ hci_h4p_inb(info, UART_OMAP_SCR);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
+ }
+
+finish_rx:
+ BT_DBG("rx_ended");
+}
+
+static void hci_h4p_tx_tasklet(unsigned long data)
+{
+ unsigned int sent = 0;
+ struct sk_buff *skb;
+ struct hci_h4p_info *info = (struct hci_h4p_info *)data;
+
+ BT_DBG("tasklet woke up");
+ BT_DBG("tx_tasklet woke up");
+
+ if (info->autorts != info->rx_enabled) {
+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) {
+ if (info->autorts && !info->rx_enabled) {
+ __hci_h4p_set_auto_ctsrts(info, 0,
+ UART_EFR_RTS);
+ info->autorts = 0;
+ }
+ if (!info->autorts && info->rx_enabled) {
+ __hci_h4p_set_auto_ctsrts(info, 1,
+ UART_EFR_RTS);
+ info->autorts = 1;
+ }
+ } else {
+ hci_h4p_outb(info, UART_OMAP_SCR,
+ hci_h4p_inb(info, UART_OMAP_SCR) |
+ UART_OMAP_SCR_EMPTY_THR);
+ goto finish_tx;
+ }
+ }
+
+ skb = skb_dequeue(&info->txq);
+ if (!skb) {
+ /* No data in buffer */
+ BT_DBG("skb ready");
+ if (hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT) {
+ hci_h4p_outb(info, UART_IER,
+ hci_h4p_inb(info, UART_IER) &
+ ~UART_IER_THRI);
+ hci_h4p_inb(info, UART_OMAP_SCR);
+ hci_h4p_disable_tx(info);
+ return;
+ }
+ hci_h4p_outb(info, UART_OMAP_SCR,
+ hci_h4p_inb(info, UART_OMAP_SCR) |
+ UART_OMAP_SCR_EMPTY_THR);
+ goto finish_tx;
+ }
+
+ /* Copy data to tx fifo */
+ while (!(hci_h4p_inb(info, UART_OMAP_SSR) & UART_OMAP_SSR_TXFULL) &&
+ (sent < skb->len)) {
+ hci_h4p_outb(info, UART_TX, skb->data[sent]);
+ sent++;
+ }
+
+ info->hdev->stat.byte_tx += sent;
+ if (skb->len == sent) {
+ kfree_skb(skb);
+ } else {
+ skb_pull(skb, sent);
+ skb_queue_head(&info->txq, skb);
+ }
+
+ hci_h4p_outb(info, UART_OMAP_SCR, hci_h4p_inb(info, UART_OMAP_SCR) &
+ ~UART_OMAP_SCR_EMPTY_THR);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+
+finish_tx:
+ /* Flush posted write to avoid spurious interrupts */
+ hci_h4p_inb(info, UART_OMAP_SCR);
+
+}
+
+static irqreturn_t hci_h4p_interrupt(int irq, void *data)
+{
+ struct hci_h4p_info *info = (struct hci_h4p_info *)data;
+ u8 iir, msr;
+ int ret;
+
+ ret = IRQ_NONE;
+
+ iir = hci_h4p_inb(info, UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ return IRQ_HANDLED;
+
+ BT_DBG("In interrupt handler iir 0x%.2x", iir);
+
+ iir &= UART_IIR_ID;
+
+ if (iir == UART_IIR_MSI) {
+ msr = hci_h4p_inb(info, UART_MSR);
+ ret = IRQ_HANDLED;
+ }
+ if (iir == UART_IIR_RLSI) {
+ hci_h4p_inb(info, UART_RX);
+ hci_h4p_inb(info, UART_LSR);
+ ret = IRQ_HANDLED;
+ }
+
+ if (iir == UART_IIR_RDI) {
+ hci_h4p_rx_tasklet((unsigned long)data);
+ ret = IRQ_HANDLED;
+ }
+
+ if (iir == UART_IIR_THRI) {
+ hci_h4p_tx_tasklet((unsigned long)data);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t hci_h4p_wakeup_interrupt(int irq, void *dev_inst)
+{
+ struct hci_h4p_info *info = dev_inst;
+ int should_wakeup;
+ struct hci_dev *hdev;
+
+ if (!info->hdev)
+ return IRQ_HANDLED;
+
+ should_wakeup = gpio_get_value(info->host_wakeup_gpio);
+ hdev = info->hdev;
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+ if (should_wakeup == 1)
+ complete_all(&info->test_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ BT_DBG("gpio interrupt %d", should_wakeup);
+
+ /* Check if wee have missed some interrupts */
+ if (info->rx_enabled == should_wakeup)
+ return IRQ_HANDLED;
+
+ if (should_wakeup)
+ hci_h4p_enable_rx(info);
+ else
+ hci_h4p_disable_rx(info);
+
+ return IRQ_HANDLED;
+}
+
+static inline void hci_h4p_set_pm_limits(struct hci_h4p_info *info, bool set)
+{
+ struct hci_h4p_platform_data *bt_plat_data = info->dev->platform_data;
+ const char *sset = set ? "set" : "clear";
+
+ if (unlikely(!bt_plat_data || !bt_plat_data->set_pm_limits))
+ return;
+
+ if (set != !!test_bit(H4P_ACTIVE_MODE, &info->pm_flags)) {
+ bt_plat_data->set_pm_limits(info->dev, set);
+ if (set)
+ set_bit(H4P_ACTIVE_MODE, &info->pm_flags);
+ else
+ clear_bit(H4P_ACTIVE_MODE, &info->pm_flags);
+ BT_DBG("Change pm constraints to: %s", sset);
+ return;
+ }
+
+ BT_DBG("pm constraints remains: %s", sset);
+}
+
+static int hci_h4p_reset(struct hci_h4p_info *info)
+{
+ int err;
+
+ err = hci_h4p_reset_uart(info);
+ if (err < 0) {
+ dev_err(info->dev, "Uart reset failed\n");
+ return err;
+ }
+ hci_h4p_init_uart(info);
+ hci_h4p_set_rts(info, 0);
+
+ gpio_set_value(info->reset_gpio, 0);
+ gpio_set_value(info->bt_wakeup_gpio, 1);
+ msleep(10);
+
+ if (gpio_get_value(info->host_wakeup_gpio) == 1) {
+ dev_err(info->dev, "host_wakeup_gpio not low\n");
+ return -EPROTO;
+ }
+
+ init_completion(&info->test_completion);
+ gpio_set_value(info->reset_gpio, 1);
+
+ if (!wait_for_completion_interruptible_timeout(&info->test_completion,
+ msecs_to_jiffies(100))) {
+ dev_err(info->dev, "wakeup test timed out\n");
+ complete_all(&info->test_completion);
+ return -EPROTO;
+ }
+
+ err = hci_h4p_wait_for_cts(info, 1, 100);
+ if (err < 0) {
+ dev_err(info->dev, "No cts from bt chip\n");
+ return err;
+ }
+
+ hci_h4p_set_rts(info, 1);
+
+ return 0;
+}
+
+/* hci callback functions */
+static int hci_h4p_hci_flush(struct hci_dev *hdev)
+{
+ struct hci_h4p_info *info = hci_get_drvdata(hdev);
+ skb_queue_purge(&info->txq);
+
+ return 0;
+}
+
+static int hci_h4p_bt_wakeup_test(struct hci_h4p_info *info)
+{
+ /*
+ * Test Sequence:
+ * Host de-asserts the BT_WAKE_UP line.
+ * Host polls the UART_CTS line, waiting for it to be de-asserted.
+ * Host asserts the BT_WAKE_UP line.
+ * Host polls the UART_CTS line, waiting for it to be asserted.
+ * Host de-asserts the BT_WAKE_UP line (allow the Bluetooth device to
+ * sleep).
+ * Host polls the UART_CTS line, waiting for it to be de-asserted.
+ */
+ int err;
+ int ret = -ECOMM;
+
+ if (!info)
+ return -EINVAL;
+
+ /* Disable wakeup interrupts */
+ disable_irq(gpio_to_irq(info->host_wakeup_gpio));
+
+ gpio_set_value(info->bt_wakeup_gpio, 0);
+ err = hci_h4p_wait_for_cts(info, 0, 100);
+ if (err) {
+ dev_warn(info->dev,
+ "bt_wakeup_test: fail: CTS low timed out: %d\n",
+ err);
+ goto out;
+ }
+
+ gpio_set_value(info->bt_wakeup_gpio, 1);
+ err = hci_h4p_wait_for_cts(info, 1, 100);
+ if (err) {
+ dev_warn(info->dev,
+ "bt_wakeup_test: fail: CTS high timed out: %d\n",
+ err);
+ goto out;
+ }
+
+ gpio_set_value(info->bt_wakeup_gpio, 0);
+ err = hci_h4p_wait_for_cts(info, 0, 100);
+ if (err) {
+ dev_warn(info->dev,
+ "bt_wakeup_test: fail: CTS re-low timed out: %d\n",
+ err);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+
+ /* Re-enable wakeup interrupts */
+ enable_irq(gpio_to_irq(info->host_wakeup_gpio));
+
+ return ret;
+}
+
+static int hci_h4p_hci_open(struct hci_dev *hdev)
+{
+ struct hci_h4p_info *info;
+ int err, retries = 0;
+ struct sk_buff_head fw_queue;
+ unsigned long flags;
+
+ info = hci_get_drvdata(hdev);
+
+ if (test_bit(HCI_RUNNING, &hdev->flags))
+ return 0;
+
+ /* TI1271 has HW bug and boot up might fail. Retry up to three times */
+again:
+
+ info->rx_enabled = 1;
+ info->rx_state = WAIT_FOR_PKT_TYPE;
+ info->rx_count = 0;
+ info->garbage_bytes = 0;
+ info->rx_skb = NULL;
+ info->pm_enabled = 0;
+ init_completion(&info->fw_completion);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 1);
+ skb_queue_head_init(&fw_queue);
+
+ err = hci_h4p_reset(info);
+ if (err < 0)
+ goto err_clean;
+
+ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_CTS | UART_EFR_RTS);
+ info->autorts = 1;
+
+ err = hci_h4p_send_negotiation(info);
+
+ err = hci_h4p_read_fw(info, &fw_queue);
+ if (err < 0) {
+ dev_err(info->dev, "Cannot read firmware\n");
+ goto err_clean;
+ }
+
+ err = hci_h4p_send_fw(info, &fw_queue);
+ if (err < 0) {
+ dev_err(info->dev, "Sending firmware failed.\n");
+ goto err_clean;
+ }
+
+ info->pm_enabled = 1;
+
+ err = hci_h4p_bt_wakeup_test(info);
+ if (err < 0) {
+ dev_err(info->dev, "BT wakeup test failed.\n");
+ goto err_clean;
+ }
+
+ spin_lock_irqsave(&info->lock, flags);
+ info->rx_enabled = gpio_get_value(info->host_wakeup_gpio);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, info->rx_enabled);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
+
+ kfree_skb(info->alive_cmd_skb);
+ info->alive_cmd_skb = NULL;
+ set_bit(HCI_RUNNING, &hdev->flags);
+
+ BT_DBG("hci up and running");
+ return 0;
+
+err_clean:
+ hci_h4p_hci_flush(hdev);
+ hci_h4p_reset_uart(info);
+ del_timer_sync(&info->lazy_release);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
+ gpio_set_value(info->reset_gpio, 0);
+ gpio_set_value(info->bt_wakeup_gpio, 0);
+ skb_queue_purge(&fw_queue);
+ kfree_skb(info->alive_cmd_skb);
+ info->alive_cmd_skb = NULL;
+ kfree_skb(info->rx_skb);
+ info->rx_skb = NULL;
+
+ if (retries++ < 3) {
+ dev_err(info->dev, "FW loading try %d fail. Retry.\n", retries);
+ goto again;
+ }
+
+ return err;
+}
+
+static int hci_h4p_hci_close(struct hci_dev *hdev)
+{
+ struct hci_h4p_info *info = hci_get_drvdata(hdev);
+
+ if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
+ return 0;
+
+ hci_h4p_hci_flush(hdev);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 1);
+ hci_h4p_reset_uart(info);
+ del_timer_sync(&info->lazy_release);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
+ hci_h4p_set_clk(info, &info->rx_clocks_en, 0);
+ gpio_set_value(info->reset_gpio, 0);
+ gpio_set_value(info->bt_wakeup_gpio, 0);
+ kfree_skb(info->rx_skb);
+
+ return 0;
+}
+
+static int hci_h4p_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_h4p_info *info;
+ int err = 0;
+
+ BT_DBG("dev %p, skb %p", hdev, skb);
+
+ info = hci_get_drvdata(hdev);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+ dev_warn(info->dev, "Frame for non-running device\n");
+ return -EIO;
+ }
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+ }
+
+ /* Push frame type to skb */
+ *skb_push(skb, 1) = (bt_cb(skb)->pkt_type);
+ /* We should allways send word aligned data to h4+ devices */
+ if (skb->len % 2) {
+ err = skb_pad(skb, 1);
+ if (!err)
+ *skb_put(skb, 1) = 0x00;
+ }
+ if (err)
+ return err;
+
+ skb_queue_tail(&info->txq, skb);
+ hci_h4p_enable_tx(info);
+
+ return 0;
+}
+
+static ssize_t hci_h4p_store_bdaddr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hci_h4p_info *info = dev_get_drvdata(dev);
+ unsigned int bdaddr[6];
+ int ret, i;
+
+ ret = sscanf(buf, "%2x:%2x:%2x:%2x:%2x:%2x\n",
+ &bdaddr[0], &bdaddr[1], &bdaddr[2],
+ &bdaddr[3], &bdaddr[4], &bdaddr[5]);
+
+ if (ret != 6)
+ return -EINVAL;
+
+ for (i = 0; i < 6; i++) {
+ if (bdaddr[i] > 0xff)
+ return -EINVAL;
+ info->bd_addr[i] = bdaddr[i] & 0xff;
+ }
+
+ return count;
+}
+
+static ssize_t hci_h4p_show_bdaddr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hci_h4p_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%pMR\n", info->bd_addr);
+}
+
+static DEVICE_ATTR(bdaddr, S_IRUGO | S_IWUSR, hci_h4p_show_bdaddr,
+ hci_h4p_store_bdaddr);
+
+static int hci_h4p_sysfs_create_files(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_bdaddr);
+}
+
+static void hci_h4p_sysfs_remove_files(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_bdaddr);
+}
+
+static int hci_h4p_register_hdev(struct hci_h4p_info *info)
+{
+ struct hci_dev *hdev;
+
+ /* Initialize and register HCI device */
+
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ dev_err(info->dev, "Can't allocate memory for device\n");
+ return -ENOMEM;
+ }
+ info->hdev = hdev;
+
+ hdev->bus = HCI_UART;
+ hci_set_drvdata(hdev, info);
+
+ hdev->open = hci_h4p_hci_open;
+ hdev->close = hci_h4p_hci_close;
+ hdev->flush = hci_h4p_hci_flush;
+ hdev->send = hci_h4p_hci_send_frame;
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+ SET_HCIDEV_DEV(hdev, info->dev);
+
+ if (hci_h4p_sysfs_create_files(info->dev) < 0) {
+ dev_err(info->dev, "failed to create sysfs files\n");
+ goto free;
+ }
+
+ if (hci_register_dev(hdev) >= 0)
+ return 0;
+
+ dev_err(info->dev, "hci_register failed %s.\n", hdev->name);
+ hci_h4p_sysfs_remove_files(info->dev);
+free:
+ hci_free_dev(info->hdev);
+ return -ENODEV;
+}
+
+static int hci_h4p_probe(struct platform_device *pdev)
+{
+ struct hci_h4p_platform_data *bt_plat_data;
+ struct hci_h4p_info *info;
+ int err;
+
+ dev_info(&pdev->dev, "Registering HCI H4P device\n");
+ info = devm_kzalloc(&pdev->dev, sizeof(struct hci_h4p_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &pdev->dev;
+ info->tx_enabled = 1;
+ info->rx_enabled = 1;
+ spin_lock_init(&info->lock);
+ spin_lock_init(&info->clocks_lock);
+ skb_queue_head_init(&info->txq);
+
+ if (pdev->dev.platform_data == NULL) {
+ dev_err(&pdev->dev, "Could not get Bluetooth config data\n");
+ return -ENODATA;
+ }
+
+ bt_plat_data = pdev->dev.platform_data;
+ info->chip_type = bt_plat_data->chip_type;
+ info->bt_wakeup_gpio = bt_plat_data->bt_wakeup_gpio;
+ info->host_wakeup_gpio = bt_plat_data->host_wakeup_gpio;
+ info->reset_gpio = bt_plat_data->reset_gpio;
+ info->reset_gpio_shared = bt_plat_data->reset_gpio_shared;
+ info->bt_sysclk = bt_plat_data->bt_sysclk;
+
+ BT_DBG("RESET gpio: %d", info->reset_gpio);
+ BT_DBG("BTWU gpio: %d", info->bt_wakeup_gpio);
+ BT_DBG("HOSTWU gpio: %d", info->host_wakeup_gpio);
+ BT_DBG("sysclk: %d", info->bt_sysclk);
+
+ init_completion(&info->test_completion);
+ complete_all(&info->test_completion);
+
+ if (!info->reset_gpio_shared) {
+ err = devm_gpio_request_one(&pdev->dev, info->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "bt_reset");
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot get GPIO line %d\n",
+ info->reset_gpio);
+ return err;
+ }
+ }
+
+ err = devm_gpio_request_one(&pdev->dev, info->bt_wakeup_gpio,
+ GPIOF_OUT_INIT_LOW, "bt_wakeup");
+
+ if (err < 0) {
+ dev_err(info->dev, "Cannot get GPIO line 0x%d",
+ info->bt_wakeup_gpio);
+ return err;
+ }
+
+ err = devm_gpio_request_one(&pdev->dev, info->host_wakeup_gpio,
+ GPIOF_DIR_IN, "host_wakeup");
+ if (err < 0) {
+ dev_err(info->dev, "Cannot get GPIO line %d",
+ info->host_wakeup_gpio);
+ return err;
+ }
+
+ info->irq = bt_plat_data->uart_irq;
+ info->uart_base = devm_ioremap(&pdev->dev, bt_plat_data->uart_base,
+ SZ_2K);
+ info->uart_iclk = devm_clk_get(&pdev->dev, bt_plat_data->uart_iclk);
+ info->uart_fclk = devm_clk_get(&pdev->dev, bt_plat_data->uart_fclk);
+
+ err = devm_request_irq(&pdev->dev, info->irq, hci_h4p_interrupt,
+ IRQF_DISABLED, "hci_h4p", info);
+ if (err < 0) {
+ dev_err(info->dev, "hci_h4p: unable to get IRQ %d\n",
+ info->irq);
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, gpio_to_irq(info->host_wakeup_gpio),
+ hci_h4p_wakeup_interrupt, IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING | IRQF_DISABLED,
+ "hci_h4p_wkup", info);
+ if (err < 0) {
+ dev_err(info->dev, "hci_h4p: unable to get wakeup IRQ %d\n",
+ gpio_to_irq(info->host_wakeup_gpio));
+ return err;
+ }
+
+ err = irq_set_irq_wake(gpio_to_irq(info->host_wakeup_gpio), 1);
+ if (err < 0) {
+ dev_err(info->dev, "hci_h4p: unable to set wakeup for IRQ %d\n",
+ gpio_to_irq(info->host_wakeup_gpio));
+ return err;
+ }
+
+ init_timer_deferrable(&info->lazy_release);
+ info->lazy_release.function = hci_h4p_lazy_clock_release;
+ info->lazy_release.data = (unsigned long)info;
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
+ err = hci_h4p_reset_uart(info);
+ if (err < 0)
+ return err;
+ gpio_set_value(info->reset_gpio, 0);
+ hci_h4p_set_clk(info, &info->tx_clocks_en, 0);
+
+ platform_set_drvdata(pdev, info);
+
+ if (hci_h4p_register_hdev(info) < 0) {
+ dev_err(info->dev, "failed to register hci_h4p hci device\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hci_h4p_remove(struct platform_device *pdev)
+{
+ struct hci_h4p_info *info;
+
+ info = platform_get_drvdata(pdev);
+
+ hci_h4p_sysfs_remove_files(info->dev);
+ hci_h4p_hci_close(info->hdev);
+ hci_unregister_dev(info->hdev);
+ hci_free_dev(info->hdev);
+
+ return 0;
+}
+
+static struct platform_driver hci_h4p_driver = {
+ .probe = hci_h4p_probe,
+ .remove = hci_h4p_remove,
+ .driver = {
+ .name = "hci_h4p",
+ },
+};
+
+module_platform_driver(hci_h4p_driver);
+
+MODULE_ALIAS("platform:hci_h4p");
+MODULE_DESCRIPTION("Bluetooth h4 driver with nokia extensions");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ville Tervo");
diff --git a/drivers/staging/nokia_h4p/nokia_fw-bcm.c b/drivers/staging/nokia_h4p/nokia_fw-bcm.c
new file mode 100644
index 000000000000..111ae94032d1
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_fw-bcm.c
@@ -0,0 +1,148 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2005-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/serial_reg.h>
+
+#include "hci_h4p.h"
+
+static int hci_h4p_bcm_set_bdaddr(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ int i;
+ static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
+ int not_valid;
+
+ not_valid = 1;
+ for (i = 0; i < 6; i++) {
+ if (info->bd_addr[i] != 0x00) {
+ not_valid = 0;
+ break;
+ }
+ }
+
+ if (not_valid) {
+ dev_info(info->dev, "Valid bluetooth address not found, setting some random\n");
+ /* When address is not valid, use some random but Nokia MAC */
+ memcpy(info->bd_addr, nokia_oui, 3);
+ get_random_bytes(info->bd_addr + 3, 3);
+ }
+
+ for (i = 0; i < 6; i++)
+ skb->data[9 - i] = info->bd_addr[i];
+
+ return 0;
+}
+
+void hci_h4p_bcm_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb)
+{
+ struct sk_buff *fw_skb;
+ int err;
+ unsigned long flags;
+
+ if (skb->data[5] != 0x00) {
+ dev_err(info->dev, "Firmware sending command failed 0x%.2x\n",
+ skb->data[5]);
+ info->fw_error = -EPROTO;
+ }
+
+ kfree_skb(skb);
+
+ fw_skb = skb_dequeue(info->fw_q);
+ if (fw_skb == NULL || info->fw_error) {
+ complete(&info->fw_completion);
+ return;
+ }
+
+ if (fw_skb->data[1] == 0x01 && fw_skb->data[2] == 0xfc && fw_skb->len >= 10) {
+ BT_DBG("Setting bluetooth address");
+ err = hci_h4p_bcm_set_bdaddr(info, fw_skb);
+ if (err < 0) {
+ kfree_skb(fw_skb);
+ info->fw_error = err;
+ complete(&info->fw_completion);
+ return;
+ }
+ }
+
+ skb_queue_tail(&info->txq, fw_skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+
+int hci_h4p_bcm_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue)
+{
+ struct sk_buff *skb;
+ unsigned long flags, time;
+
+ info->fw_error = 0;
+
+ BT_DBG("Sending firmware");
+
+ time = jiffies;
+
+ info->fw_q = fw_queue;
+ skb = skb_dequeue(fw_queue);
+ if (!skb)
+ return -ENODATA;
+
+ BT_DBG("Sending commands");
+
+ /*
+ * Disable smart-idle as UART TX interrupts
+ * are not wake-up capable
+ */
+ hci_h4p_smart_idle(info, 0);
+
+ /* Check if this is bd_address packet */
+ init_completion(&info->fw_completion);
+ skb_queue_tail(&info->txq, skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ if (!wait_for_completion_timeout(&info->fw_completion,
+ msecs_to_jiffies(2000))) {
+ dev_err(info->dev, "No reply to fw command\n");
+ return -ETIMEDOUT;
+ }
+
+ if (info->fw_error) {
+ dev_err(info->dev, "FW error\n");
+ return -EPROTO;
+ }
+
+ BT_DBG("Firmware sent in %d msecs",
+ jiffies_to_msecs(jiffies-time));
+
+ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
+ hci_h4p_set_rts(info, 0);
+ hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE);
+ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
+
+ return 0;
+}
diff --git a/drivers/staging/nokia_h4p/nokia_fw-csr.c b/drivers/staging/nokia_h4p/nokia_fw-csr.c
new file mode 100644
index 000000000000..fe6b704b3d97
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_fw-csr.c
@@ -0,0 +1,149 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2005-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/serial_reg.h>
+
+#include "hci_h4p.h"
+
+void hci_h4p_bc4_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb)
+{
+ /* Check if this is fw packet */
+ if (skb->data[0] != 0xff) {
+ hci_recv_frame(info->hdev, skb);
+ return;
+ }
+
+ if (skb->data[11] || skb->data[12]) {
+ dev_err(info->dev, "Firmware sending command failed\n");
+ info->fw_error = -EPROTO;
+ }
+
+ kfree_skb(skb);
+ complete(&info->fw_completion);
+}
+
+int hci_h4p_bc4_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue)
+{
+ static const u8 nokia_oui[3] = {0x00, 0x19, 0x4F};
+ struct sk_buff *skb;
+ unsigned int offset;
+ int retries, count, i, not_valid;
+ unsigned long flags;
+
+ info->fw_error = 0;
+
+ BT_DBG("Sending firmware");
+ skb = skb_dequeue(fw_queue);
+
+ if (!skb)
+ return -ENOMSG;
+
+ /* Check if this is bd_address packet */
+ if (skb->data[15] == 0x01 && skb->data[16] == 0x00) {
+ offset = 21;
+ skb->data[offset + 1] = 0x00;
+ skb->data[offset + 5] = 0x00;
+
+ not_valid = 1;
+ for (i = 0; i < 6; i++) {
+ if (info->bd_addr[i] != 0x00) {
+ not_valid = 0;
+ break;
+ }
+ }
+
+ if (not_valid) {
+ dev_info(info->dev, "Valid bluetooth address not found, setting some random\n");
+ /* When address is not valid, use some random */
+ memcpy(info->bd_addr, nokia_oui, 3);
+ get_random_bytes(info->bd_addr + 3, 3);
+ }
+
+ skb->data[offset + 7] = info->bd_addr[0];
+ skb->data[offset + 6] = info->bd_addr[1];
+ skb->data[offset + 4] = info->bd_addr[2];
+ skb->data[offset + 0] = info->bd_addr[3];
+ skb->data[offset + 3] = info->bd_addr[4];
+ skb->data[offset + 2] = info->bd_addr[5];
+ }
+
+ for (count = 1; ; count++) {
+ BT_DBG("Sending firmware command %d", count);
+ init_completion(&info->fw_completion);
+ skb_queue_tail(&info->txq, skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ skb = skb_dequeue(fw_queue);
+ if (!skb)
+ break;
+
+ if (!wait_for_completion_timeout(&info->fw_completion,
+ msecs_to_jiffies(1000))) {
+ dev_err(info->dev, "No reply to fw command\n");
+ return -ETIMEDOUT;
+ }
+
+ if (info->fw_error) {
+ dev_err(info->dev, "FW error\n");
+ return -EPROTO;
+ }
+ };
+
+ /* Wait for chip warm reset */
+ retries = 100;
+ while ((!skb_queue_empty(&info->txq) ||
+ !(hci_h4p_inb(info, UART_LSR) & UART_LSR_TEMT)) &&
+ retries--) {
+ msleep(10);
+ }
+ if (!retries) {
+ dev_err(info->dev, "Transmitter not empty\n");
+ return -ETIMEDOUT;
+ }
+
+ hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE);
+
+ if (hci_h4p_wait_for_cts(info, 1, 100)) {
+ dev_err(info->dev, "cts didn't deassert after final speed\n");
+ return -ETIMEDOUT;
+ }
+
+ retries = 100;
+ do {
+ init_completion(&info->init_completion);
+ hci_h4p_send_alive_packet(info);
+ retries--;
+ } while (!wait_for_completion_timeout(&info->init_completion, 100) &&
+ retries > 0);
+
+ if (!retries) {
+ dev_err(info->dev, "No alive reply after speed change\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/nokia_h4p/nokia_fw-ti1273.c b/drivers/staging/nokia_h4p/nokia_fw-ti1273.c
new file mode 100644
index 000000000000..f5500f71c839
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_fw-ti1273.c
@@ -0,0 +1,110 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2009 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/serial_reg.h>
+
+#include "hci_h4p.h"
+
+static struct sk_buff_head *fw_q;
+
+void hci_h4p_ti1273_parse_fw_event(struct hci_h4p_info *info,
+ struct sk_buff *skb)
+{
+ struct sk_buff *fw_skb;
+ unsigned long flags;
+
+ if (skb->data[5] != 0x00) {
+ dev_err(info->dev, "Firmware sending command failed 0x%.2x\n",
+ skb->data[5]);
+ info->fw_error = -EPROTO;
+ }
+
+ kfree_skb(skb);
+
+ fw_skb = skb_dequeue(fw_q);
+ if (fw_skb == NULL || info->fw_error) {
+ complete(&info->fw_completion);
+ return;
+ }
+
+ skb_queue_tail(&info->txq, fw_skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+
+int hci_h4p_ti1273_send_fw(struct hci_h4p_info *info,
+ struct sk_buff_head *fw_queue)
+{
+ struct sk_buff *skb;
+ unsigned long flags, time;
+
+ info->fw_error = 0;
+
+ BT_DBG("Sending firmware");
+
+ time = jiffies;
+
+ fw_q = fw_queue;
+ skb = skb_dequeue(fw_queue);
+ if (!skb)
+ return -ENODATA;
+
+ BT_DBG("Sending commands");
+ /* Check if this is bd_address packet */
+ init_completion(&info->fw_completion);
+ hci_h4p_smart_idle(info, 0);
+ skb_queue_tail(&info->txq, skb);
+ spin_lock_irqsave(&info->lock, flags);
+ hci_h4p_outb(info, UART_IER, hci_h4p_inb(info, UART_IER) |
+ UART_IER_THRI);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ if (!wait_for_completion_timeout(&info->fw_completion,
+ msecs_to_jiffies(2000))) {
+ dev_err(info->dev, "No reply to fw command\n");
+ return -ETIMEDOUT;
+ }
+
+ if (info->fw_error) {
+ dev_err(info->dev, "FW error\n");
+ return -EPROTO;
+ }
+
+ BT_DBG("Firmware sent in %d msecs",
+ jiffies_to_msecs(jiffies-time));
+
+ hci_h4p_set_auto_ctsrts(info, 0, UART_EFR_RTS);
+ hci_h4p_set_rts(info, 0);
+ hci_h4p_change_speed(info, BC4_MAX_BAUD_RATE);
+ if (hci_h4p_wait_for_cts(info, 1, 100)) {
+ dev_err(info->dev,
+ "cts didn't go down after final speed change\n");
+ return -ETIMEDOUT;
+ }
+ hci_h4p_set_auto_ctsrts(info, 1, UART_EFR_RTS);
+
+ return 0;
+}
diff --git a/drivers/staging/nokia_h4p/nokia_fw.c b/drivers/staging/nokia_h4p/nokia_fw.c
new file mode 100644
index 000000000000..14ba2193efb6
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_fw.c
@@ -0,0 +1,208 @@
+/*
+ * This file is part of hci_h4p bluetooth driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation.
+ *
+ * Contact: Ville Tervo <ville.tervo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/clk.h>
+
+#include <net/bluetooth/bluetooth.h>
+
+#include "hci_h4p.h"
+
+#define FW_NAME_TI1271_PRELE "ti1273_prele.bin"
+#define FW_NAME_TI1271_LE "ti1273_le.bin"
+#define FW_NAME_TI1271 "ti1273.bin"
+#define FW_NAME_BCM2048 "bcmfw.bin"
+#define FW_NAME_CSR "bc4fw.bin"
+
+static int fw_pos;
+
+/* Firmware handling */
+static int hci_h4p_open_firmware(struct hci_h4p_info *info,
+ const struct firmware **fw_entry)
+{
+ int err;
+
+ fw_pos = 0;
+ BT_DBG("Opening firmware man_id 0x%.2x ver_id 0x%.2x",
+ info->man_id, info->ver_id);
+ switch (info->man_id) {
+ case H4P_ID_TI1271:
+ switch (info->ver_id) {
+ case 0xe1:
+ err = request_firmware(fw_entry, FW_NAME_TI1271_PRELE,
+ info->dev);
+ break;
+ case 0xd1:
+ case 0xf1:
+ err = request_firmware(fw_entry, FW_NAME_TI1271_LE,
+ info->dev);
+ break;
+ default:
+ err = request_firmware(fw_entry, FW_NAME_TI1271,
+ info->dev);
+ }
+ break;
+ case H4P_ID_CSR:
+ err = request_firmware(fw_entry, FW_NAME_CSR, info->dev);
+ break;
+ case H4P_ID_BCM2048:
+ err = request_firmware(fw_entry, FW_NAME_BCM2048, info->dev);
+ break;
+ default:
+ dev_err(info->dev, "Invalid chip type\n");
+ *fw_entry = NULL;
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static void hci_h4p_close_firmware(const struct firmware *fw_entry)
+{
+ release_firmware(fw_entry);
+}
+
+/* Read fw. Return length of the command. If no more commands in
+ * fw 0 is returned. In error case return value is negative.
+ */
+static int hci_h4p_read_fw_cmd(struct hci_h4p_info *info, struct sk_buff **skb,
+ const struct firmware *fw_entry, gfp_t how)
+{
+ unsigned int cmd_len;
+
+ if (fw_pos >= fw_entry->size)
+ return 0;
+
+ if (fw_pos + 2 > fw_entry->size) {
+ dev_err(info->dev, "Corrupted firmware image 1\n");
+ return -EMSGSIZE;
+ }
+
+ cmd_len = fw_entry->data[fw_pos++];
+ cmd_len += fw_entry->data[fw_pos++] << 8;
+ if (cmd_len == 0)
+ return 0;
+
+ if (fw_pos + cmd_len > fw_entry->size) {
+ dev_err(info->dev, "Corrupted firmware image 2\n");
+ return -EMSGSIZE;
+ }
+
+ *skb = bt_skb_alloc(cmd_len, how);
+ if (!*skb) {
+ dev_err(info->dev, "Cannot reserve memory for buffer\n");
+ return -ENOMEM;
+ }
+ memcpy(skb_put(*skb, cmd_len), &fw_entry->data[fw_pos], cmd_len);
+
+ fw_pos += cmd_len;
+
+ return (*skb)->len;
+}
+
+int hci_h4p_read_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue)
+{
+ const struct firmware *fw_entry = NULL;
+ struct sk_buff *skb = NULL;
+ int err;
+
+ err = hci_h4p_open_firmware(info, &fw_entry);
+ if (err < 0 || !fw_entry)
+ goto err_clean;
+
+ while ((err = hci_h4p_read_fw_cmd(info, &skb, fw_entry, GFP_KERNEL))) {
+ if (err < 0 || !skb)
+ goto err_clean;
+
+ skb_queue_tail(fw_queue, skb);
+ }
+
+ /* Chip detection code does neg and alive stuff
+ * discard two first skbs */
+ skb = skb_dequeue(fw_queue);
+ if (!skb) {
+ err = -EMSGSIZE;
+ goto err_clean;
+ }
+ kfree_skb(skb);
+ skb = skb_dequeue(fw_queue);
+ if (!skb) {
+ err = -EMSGSIZE;
+ goto err_clean;
+ }
+ kfree_skb(skb);
+
+err_clean:
+ hci_h4p_close_firmware(fw_entry);
+ return err;
+}
+
+int hci_h4p_send_fw(struct hci_h4p_info *info, struct sk_buff_head *fw_queue)
+{
+ int err;
+
+ switch (info->man_id) {
+ case H4P_ID_CSR:
+ err = hci_h4p_bc4_send_fw(info, fw_queue);
+ break;
+ case H4P_ID_TI1271:
+ err = hci_h4p_ti1273_send_fw(info, fw_queue);
+ break;
+ case H4P_ID_BCM2048:
+ err = hci_h4p_bcm_send_fw(info, fw_queue);
+ break;
+ default:
+ dev_err(info->dev, "Don't know how to send firmware\n");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+void hci_h4p_parse_fw_event(struct hci_h4p_info *info, struct sk_buff *skb)
+{
+ switch (info->man_id) {
+ case H4P_ID_CSR:
+ hci_h4p_bc4_parse_fw_event(info, skb);
+ break;
+ case H4P_ID_TI1271:
+ hci_h4p_ti1273_parse_fw_event(info, skb);
+ break;
+ case H4P_ID_BCM2048:
+ hci_h4p_bcm_parse_fw_event(info, skb);
+ break;
+ default:
+ dev_err(info->dev, "Don't know how to parse fw event\n");
+ info->fw_error = -EINVAL;
+ }
+
+ return;
+}
+
+MODULE_FIRMWARE(FW_NAME_TI1271_PRELE);
+MODULE_FIRMWARE(FW_NAME_TI1271_LE);
+MODULE_FIRMWARE(FW_NAME_TI1271);
+MODULE_FIRMWARE(FW_NAME_BCM2048);
+MODULE_FIRMWARE(FW_NAME_CSR);
diff --git a/drivers/staging/nokia_h4p/nokia_uart.c b/drivers/staging/nokia_h4p/nokia_uart.c
new file mode 100644
index 000000000000..0fb57de4b750
--- /dev/null
+++ b/drivers/staging/nokia_h4p/nokia_uart.c
@@ -0,0 +1,199 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/serial_reg.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <linux/io.h>
+
+#include "hci_h4p.h"
+
+inline void hci_h4p_outb(struct hci_h4p_info *info, unsigned int offset, u8 val)
+{
+ __raw_writeb(val, info->uart_base + (offset << 2));
+}
+
+inline u8 hci_h4p_inb(struct hci_h4p_info *info, unsigned int offset)
+{
+ return __raw_readb(info->uart_base + (offset << 2));
+}
+
+void hci_h4p_set_rts(struct hci_h4p_info *info, int active)
+{
+ u8 b;
+
+ b = hci_h4p_inb(info, UART_MCR);
+ if (active)
+ b |= UART_MCR_RTS;
+ else
+ b &= ~UART_MCR_RTS;
+ hci_h4p_outb(info, UART_MCR, b);
+}
+
+int hci_h4p_wait_for_cts(struct hci_h4p_info *info, int active,
+ int timeout_ms)
+{
+ unsigned long timeout;
+ int state;
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ for (;;) {
+ state = hci_h4p_inb(info, UART_MSR) & UART_MSR_CTS;
+ if (active) {
+ if (state)
+ return 0;
+ } else {
+ if (!state)
+ return 0;
+ }
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ msleep(1);
+ }
+}
+
+void __hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which)
+{
+ u8 lcr, b;
+
+ lcr = hci_h4p_inb(info, UART_LCR);
+ hci_h4p_outb(info, UART_LCR, 0xbf);
+ b = hci_h4p_inb(info, UART_EFR);
+ if (on)
+ b |= which;
+ else
+ b &= ~which;
+ hci_h4p_outb(info, UART_EFR, b);
+ hci_h4p_outb(info, UART_LCR, lcr);
+}
+
+void hci_h4p_set_auto_ctsrts(struct hci_h4p_info *info, int on, u8 which)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+ __hci_h4p_set_auto_ctsrts(info, on, which);
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+void hci_h4p_change_speed(struct hci_h4p_info *info, unsigned long speed)
+{
+ unsigned int divisor;
+ u8 lcr, mdr1;
+
+ BT_DBG("Setting speed %lu", speed);
+
+ if (speed >= 460800) {
+ divisor = UART_CLOCK / 13 / speed;
+ mdr1 = 3;
+ } else {
+ divisor = UART_CLOCK / 16 / speed;
+ mdr1 = 0;
+ }
+
+ /* Make sure UART mode is disabled */
+ hci_h4p_outb(info, UART_OMAP_MDR1, 7);
+
+ lcr = hci_h4p_inb(info, UART_LCR);
+ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB); /* Set DLAB */
+ hci_h4p_outb(info, UART_DLL, divisor & 0xff); /* Set speed */
+ hci_h4p_outb(info, UART_DLM, divisor >> 8);
+ hci_h4p_outb(info, UART_LCR, lcr);
+
+ /* Make sure UART mode is enabled */
+ hci_h4p_outb(info, UART_OMAP_MDR1, mdr1);
+}
+
+int hci_h4p_reset_uart(struct hci_h4p_info *info)
+{
+ int count = 0;
+
+ /* Reset the UART */
+ hci_h4p_outb(info, UART_OMAP_SYSC, UART_SYSC_OMAP_RESET);
+ while (!(hci_h4p_inb(info, UART_OMAP_SYSS) & UART_SYSS_RESETDONE)) {
+ if (count++ > 100) {
+ dev_err(info->dev, "hci_h4p: UART reset timeout\n");
+ return -ENODEV;
+ }
+ udelay(1);
+ }
+
+ return 0;
+}
+
+void hci_h4p_store_regs(struct hci_h4p_info *info)
+{
+ u16 lcr = 0;
+
+ lcr = hci_h4p_inb(info, UART_LCR);
+ hci_h4p_outb(info, UART_LCR, 0xBF);
+ info->dll = hci_h4p_inb(info, UART_DLL);
+ info->dlh = hci_h4p_inb(info, UART_DLM);
+ info->efr = hci_h4p_inb(info, UART_EFR);
+ hci_h4p_outb(info, UART_LCR, lcr);
+ info->mdr1 = hci_h4p_inb(info, UART_OMAP_MDR1);
+ info->ier = hci_h4p_inb(info, UART_IER);
+}
+
+void hci_h4p_restore_regs(struct hci_h4p_info *info)
+{
+ u16 lcr = 0;
+
+ hci_h4p_init_uart(info);
+
+ hci_h4p_outb(info, UART_OMAP_MDR1, 7);
+ lcr = hci_h4p_inb(info, UART_LCR);
+ hci_h4p_outb(info, UART_LCR, 0xBF);
+ hci_h4p_outb(info, UART_DLL, info->dll); /* Set speed */
+ hci_h4p_outb(info, UART_DLM, info->dlh);
+ hci_h4p_outb(info, UART_EFR, info->efr);
+ hci_h4p_outb(info, UART_LCR, lcr);
+ hci_h4p_outb(info, UART_OMAP_MDR1, info->mdr1);
+ hci_h4p_outb(info, UART_IER, info->ier);
+}
+
+void hci_h4p_init_uart(struct hci_h4p_info *info)
+{
+ u8 mcr, efr;
+
+ /* Enable and setup FIFO */
+ hci_h4p_outb(info, UART_OMAP_MDR1, 0x00);
+
+ hci_h4p_outb(info, UART_LCR, 0xbf);
+ efr = hci_h4p_inb(info, UART_EFR);
+ hci_h4p_outb(info, UART_EFR, UART_EFR_ECB);
+ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB);
+ mcr = hci_h4p_inb(info, UART_MCR);
+ hci_h4p_outb(info, UART_MCR, UART_MCR_TCRTLR);
+ hci_h4p_outb(info, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT |
+ (3 << 6) | (0 << 4));
+ hci_h4p_outb(info, UART_LCR, 0xbf);
+ hci_h4p_outb(info, UART_TI752_TLR, 0xed);
+ hci_h4p_outb(info, UART_TI752_TCR, 0xef);
+ hci_h4p_outb(info, UART_EFR, efr);
+ hci_h4p_outb(info, UART_LCR, UART_LCR_DLAB);
+ hci_h4p_outb(info, UART_MCR, 0x00);
+ hci_h4p_outb(info, UART_LCR, UART_LCR_WLEN8);
+ hci_h4p_outb(info, UART_IER, UART_IER_RDI);
+ hci_h4p_outb(info, UART_OMAP_SYSC, (1 << 0) | (1 << 2) | (2 << 3));
+}
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index bb152201e93d..90f1c4d7fa89 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -36,7 +36,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <linux/clk/tegra.h>
#include "nvec.h"
@@ -679,8 +678,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
nvec->rx->data[nvec->rx->pos++] = received;
else
dev_err(nvec->dev,
- "RX buffer overflow on %p: "
- "Trying to write byte %u of %u\n",
+ "RX buffer overflow on %p: Trying to write byte %u of %u\n",
nvec->rx, nvec->rx ? nvec->rx->pos : 0,
NVEC_MSG_SIZE);
break;
@@ -734,9 +732,9 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
clk_prepare_enable(nvec->i2c_clk);
- tegra_periph_reset_assert(nvec->i2c_clk);
+ reset_control_assert(nvec->rst);
udelay(2);
- tegra_periph_reset_deassert(nvec->i2c_clk);
+ reset_control_deassert(nvec->rst);
val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
(0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
@@ -837,6 +835,12 @@ static int tegra_nvec_probe(struct platform_device *pdev)
return -ENODEV;
}
+ nvec->rst = devm_reset_control_get(&pdev->dev, "i2c");
+ if (IS_ERR(nvec->rst)) {
+ dev_err(nvec->dev, "failed to get controller reset\n");
+ return PTR_ERR(nvec->rst);
+ }
+
nvec->base = base;
nvec->irq = res->start;
nvec->i2c_clk = i2c_clk;
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index e880518935fb..e271375053fa 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -109,7 +110,8 @@ struct nvec_msg {
* @irq: The IRQ of the I2C device
* @i2c_addr: The address of the I2C slave
* @base: The base of the memory mapped region of the I2C device
- * @clk: The clock of the I2C device
+ * @i2c_clk: The clock of the I2C device
+ * @rst: The reset of the I2C device
* @notifier_list: Notifiers to be called on received messages, see
* nvec_register_notifier()
* @rx_data: Received messages that have to be processed
@@ -139,6 +141,7 @@ struct nvec_chip {
int i2c_addr;
void __iomem *base;
struct clk *i2c_clk;
+ struct reset_control *rst;
struct atomic_notifier_head notifier_list;
struct list_head rx_data, tx_data;
struct notifier_block nvec_status_notifier;
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 06dbb02085a9..45b2f1308e01 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -106,7 +106,7 @@ static int nvec_mouse_probe(struct platform_device *pdev)
struct serio *ser_dev;
char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
- ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
if (ser_dev == NULL)
return -ENOMEM;
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 47e0a91238a1..8b8ce7293c52 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -275,13 +275,6 @@ enum cvmx_usb_pipe_flags {
*/
#define MAX_TRANSFER_PACKETS ((1<<10)-1)
-enum {
- USB_CLOCK_TYPE_REF_12,
- USB_CLOCK_TYPE_REF_24,
- USB_CLOCK_TYPE_REF_48,
- USB_CLOCK_TYPE_CRYSTAL_12,
-};
-
/**
* Logical transactions may take numerous low level
* transactions, especially when splits are concerned. This
@@ -471,17 +464,110 @@ struct octeon_hcd {
/* Returns the IO address to push/pop stuff data from the FIFOs */
#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
-static int octeon_usb_get_clock_type(void)
+/**
+ * struct octeon_temp_buffer - a bounce buffer for USB transfers
+ * @temp_buffer: the newly allocated temporary buffer (including meta-data)
+ * @orig_buffer: the original buffer passed by the USB stack
+ * @data: the newly allocated temporary buffer (excluding meta-data)
+ *
+ * Both the DMA engine and FIFO mode will always transfer full 32-bit words. If
+ * the buffer is too short, we need to allocate a temporary one, and this struct
+ * represents it.
+ */
+struct octeon_temp_buffer {
+ void *temp_buffer;
+ void *orig_buffer;
+ u8 data[0];
+};
+
+/**
+ * octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer
+ * (if needed)
+ * @urb: URB.
+ * @mem_flags: Memory allocation flags.
+ *
+ * This function allocates a temporary bounce buffer whenever it's needed
+ * due to HW limitations.
+ */
+static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
{
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_BBGW_REF:
- case CVMX_BOARD_TYPE_LANAI2_A:
- case CVMX_BOARD_TYPE_LANAI2_U:
- case CVMX_BOARD_TYPE_LANAI2_G:
- case CVMX_BOARD_TYPE_UBNT_E100:
- return USB_CLOCK_TYPE_CRYSTAL_12;
- }
- return USB_CLOCK_TYPE_REF_48;
+ struct octeon_temp_buffer *temp;
+
+ if (urb->num_sgs || urb->sg ||
+ (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) ||
+ !(urb->transfer_buffer_length % sizeof(u32)))
+ return 0;
+
+ temp = kmalloc(ALIGN(urb->transfer_buffer_length, sizeof(u32)) +
+ sizeof(*temp), mem_flags);
+ if (!temp)
+ return -ENOMEM;
+
+ temp->temp_buffer = temp;
+ temp->orig_buffer = urb->transfer_buffer;
+ if (usb_urb_dir_out(urb))
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+}
+
+/**
+ * octeon_free_temp_buffer - free a temporary buffer used by USB transfers.
+ * @urb: URB.
+ *
+ * Frees a buffer allocated by octeon_alloc_temp_buffer().
+ */
+static void octeon_free_temp_buffer(struct urb *urb)
+{
+ struct octeon_temp_buffer *temp;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer,
+ data);
+ if (usb_urb_dir_in(urb))
+ memcpy(temp->orig_buffer, urb->transfer_buffer,
+ urb->actual_length);
+ urb->transfer_buffer = temp->orig_buffer;
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+ kfree(temp->temp_buffer);
+}
+
+/**
+ * octeon_map_urb_for_dma - Octeon-specific map_urb_for_dma().
+ * @hcd: USB HCD structure.
+ * @urb: URB.
+ * @mem_flags: Memory allocation flags.
+ */
+static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ ret = octeon_alloc_temp_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ octeon_free_temp_buffer(urb);
+
+ return ret;
+}
+
+/**
+ * octeon_unmap_urb_for_dma - Octeon-specific unmap_urb_for_dma()
+ * @hcd: USB HCD structure.
+ * @urb: URB.
+ */
+static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+ octeon_free_temp_buffer(urb);
}
/**
@@ -582,37 +668,6 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
return 0; /* Data0 */
}
-
-/**
- * Return the number of USB ports supported by this Octeon
- * chip. If the chip doesn't support USB, or is not supported
- * by this API, a zero will be returned. Most Octeon chips
- * support one usb port, but some support two ports.
- * cvmx_usb_initialize() must be called on independent
- * struct cvmx_usb_state.
- *
- * Returns: Number of port, zero if usb isn't supported
- */
-static int cvmx_usb_get_num_ports(void)
-{
- int arch_ports = 0;
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
- arch_ports = 2;
- else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
- arch_ports = 1;
- else
- arch_ports = 0;
-
- return arch_ports;
-}
-
/**
* Initialize a USB port for use. This must be called before any
* other access to the Octeon USB port is made. The port starts
@@ -628,41 +683,16 @@ static int cvmx_usb_get_num_ports(void)
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
- int usb_port_number)
+ int usb_port_number,
+ enum cvmx_usb_initialize_flags flags)
{
union cvmx_usbnx_clk_ctl usbn_clk_ctl;
union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
- enum cvmx_usb_initialize_flags flags = 0;
int i;
/* At first allow 0-1 for the usb port number */
if ((usb_port_number < 0) || (usb_port_number > 1))
return -EINVAL;
- /* For all chips except 52XX there is only one port */
- if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
- return -EINVAL;
- /* Try to determine clock type automatically */
- if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) {
- /* Only 12 MHZ crystals are supported */
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
- } else {
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
-
- switch (octeon_usb_get_clock_type()) {
- case USB_CLOCK_TYPE_REF_12:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
- break;
- case USB_CLOCK_TYPE_REF_24:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
- break;
- case USB_CLOCK_TYPE_REF_48:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
- break;
- default:
- return -EINVAL;
- break;
- }
- }
memset(usb, 0, sizeof(*usb));
usb->init_flags = flags;
@@ -681,7 +711,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
* 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
* USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
*/
- usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.u64 =
+ __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
usbn_clk_ctl.s.por = 1;
usbn_clk_ctl.s.hrst = 0;
usbn_clk_ctl.s.prst = 0;
@@ -767,7 +798,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
* USBP control and status register:
* USBN_USBP_CTL_STATUS[ATE_RESET] = 1
*/
- usbn_usbp_ctl_status.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index));
+ usbn_usbp_ctl_status.u64 = __cvmx_usb_read_csr64(usb,
+ CVMX_USBNX_USBP_CTL_STATUS(usb->index));
usbn_usbp_ctl_status.s.ate_reset = 1;
__cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
usbn_usbp_ctl_status.u64);
@@ -834,7 +866,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
if (OCTEON_IS_MODEL(OCTEON_CN31XX))
usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
usbcx_gahbcfg.u32 = 0;
- usbcx_gahbcfg.s.dmaen = !(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
+ usbcx_gahbcfg.s.dmaen = !(usb->init_flags &
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
/* Only use one channel with non DMA */
usb->idle_hardware_channels = 0x1;
@@ -859,7 +892,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
*/
{
union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
- usbcx_gusbcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
+ usbcx_gusbcfg.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GUSBCFG(usb->index));
usbcx_gusbcfg.s.toutcal = 0;
usbcx_gusbcfg.s.ddrsel = 0;
usbcx_gusbcfg.s.usbtrdtim = 0x5;
@@ -877,7 +911,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
union cvmx_usbcx_gintmsk usbcx_gintmsk;
int channel;
- usbcx_gintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
+ usbcx_gintmsk.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GINTMSK(usb->index));
usbcx_gintmsk.s.otgintmsk = 1;
usbcx_gintmsk.s.modemismsk = 1;
usbcx_gintmsk.s.hchintmsk = 1;
@@ -893,7 +928,8 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
* later.
*/
for (channel = 0; channel < 8; channel++)
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
}
{
@@ -903,26 +939,30 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
* 1. Program the host-port interrupt-mask field to unmask,
* USBC_GINTMSK[PRTINT] = 1
*/
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
- prtintmsk, 1);
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
- disconnintmsk, 1);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk, prtintmsk, 1);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk, disconnintmsk, 1);
/*
* 2. Program the USBC_HCFG register to select full-speed host
* or high-speed host.
*/
{
union cvmx_usbcx_hcfg usbcx_hcfg;
- usbcx_hcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
+ usbcx_hcfg.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCFG(usb->index));
usbcx_hcfg.s.fslssupp = 0;
usbcx_hcfg.s.fslspclksel = 0;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCFG(usb->index),
+ usbcx_hcfg.u32);
}
/*
* 3. Program the port power bit to drive VBUS on the USB,
* USBC_HPRT[PRTPWR] = 1
*/
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtpwr, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index),
+ union cvmx_usbcx_hprt, prtpwr, 1);
/*
* Steps 4-15 from the manual are done later in the port enable
@@ -955,7 +995,8 @@ static int cvmx_usb_shutdown(struct cvmx_usb_state *usb)
return -EBUSY;
/* Disable the clocks and put them in power on reset */
- usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb,
+ CVMX_USBNX_CLK_CTL(usb->index));
usbn_clk_ctl.s.enable = 1;
usbn_clk_ctl.s.por = 1;
usbn_clk_ctl.s.hclk_rst = 1;
@@ -979,7 +1020,8 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
{
union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
- usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPRT(usb->index));
/*
* If the port is already enabled the just return. We don't need to do
@@ -989,12 +1031,12 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
return 0;
/* If there is nothing plugged into the port then fail immediately */
- if (!usb->usbcx_hprt.s.prtconnsts) {
+ if (!usb->usbcx_hprt.s.prtconnsts)
return -ETIMEDOUT;
- }
/* Program the port reset bit to start the reset process */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
+ prtrst, 1);
/*
* Wait at least 50ms (high speed), or 10ms (full speed) for the reset
@@ -1003,26 +1045,30 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
mdelay(50);
/* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 0);
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
+ prtrst, 0);
/* Wait for the USBC_HPRT[PRTENA]. */
- if (CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
- prtena, ==, 1, 100000))
+ if (CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_HPRT(usb->index),
+ union cvmx_usbcx_hprt, prtena, ==, 1, 100000))
return -ETIMEDOUT;
/*
* Read the port speed field to get the enumerated speed,
* USBC_HPRT[PRTSPD].
*/
- usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- usbcx_ghwcfg3.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GHWCFG3(usb->index));
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPRT(usb->index));
+ usbcx_ghwcfg3.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GHWCFG3(usb->index));
/*
* 13. Program the USBC_GRXFSIZ register to select the size of the
* receive FIFO (25%).
*/
- USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), union cvmx_usbcx_grxfsiz,
- rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
+ USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index),
+ union cvmx_usbcx_grxfsiz, rxfdep,
+ usbcx_ghwcfg3.s.dfifodepth / 4);
/*
* 14. Program the USBC_GNPTXFSIZ register to select the size and the
* start address of the non- periodic transmit FIFO for nonperiodic
@@ -1030,10 +1076,12 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
*/
{
union cvmx_usbcx_gnptxfsiz siz;
- siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
+ siz.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GNPTXFSIZ(usb->index));
siz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
siz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), siz.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index),
+ siz.u32);
}
/*
* 15. Program the USBC_HPTXFSIZ register to select the size and start
@@ -1042,18 +1090,25 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
*/
{
union cvmx_usbcx_hptxfsiz siz;
- siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
+ siz.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPTXFSIZ(usb->index));
siz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
siz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), siz.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index),
+ siz.u32);
}
/* Flush all FIFOs */
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfnum, 0x10);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfflsh, 1);
- CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ union cvmx_usbcx_grstctl, txfnum, 0x10);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ union cvmx_usbcx_grstctl, txfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ union cvmx_usbcx_grstctl,
txfflsh, ==, 0, 100);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, rxfflsh, 1);
- CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ union cvmx_usbcx_grstctl, rxfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
+ union cvmx_usbcx_grstctl,
rxfflsh, ==, 0, 100);
return 0;
@@ -1073,7 +1128,8 @@ static int cvmx_usb_enable(struct cvmx_usb_state *usb)
static int cvmx_usb_disable(struct cvmx_usb_state *usb)
{
/* Disable the port */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtena, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
+ prtena, 1);
return 0;
}
@@ -1089,20 +1145,23 @@ static int cvmx_usb_disable(struct cvmx_usb_state *usb)
*
* Returns: Port status information
*/
-static struct cvmx_usb_port_status cvmx_usb_get_status(struct cvmx_usb_state *usb)
+static struct cvmx_usb_port_status cvmx_usb_get_status(
+ struct cvmx_usb_state *usb)
{
union cvmx_usbcx_hprt usbc_hprt;
struct cvmx_usb_port_status result;
memset(&result, 0, sizeof(result));
- usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPRT(usb->index));
result.port_enabled = usbc_hprt.s.prtena;
result.port_over_current = usbc_hprt.s.prtovrcurract;
result.port_powered = usbc_hprt.s.prtpwr;
result.port_speed = usbc_hprt.s.prtspd;
result.connected = usbc_hprt.s.prtconnsts;
- result.connect_change = (result.connected != usb->port_status.connected);
+ result.connect_change =
+ (result.connected != usb->port_status.connected);
return result;
}
@@ -1197,7 +1256,8 @@ static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb,
if (unlikely((device_speed != CVMX_USB_SPEED_HIGH) &&
(multi_count != 0)))
return NULL;
- if (unlikely((hub_device_addr < 0) || (hub_device_addr > MAX_USB_ADDRESS)))
+ if (unlikely((hub_device_addr < 0) ||
+ (hub_device_addr > MAX_USB_ADDRESS)))
return NULL;
if (unlikely((hub_port < 0) || (hub_port > MAX_USB_HUB_PORT)))
return NULL;
@@ -1262,7 +1322,8 @@ static void __cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb)
uint64_t address;
uint32_t *ptr;
- rx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GRXSTSPH(usb->index));
+ rx_status.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GRXSTSPH(usb->index));
/* Only read data if IN data is there */
if (rx_status.s.pktsts != 2)
return;
@@ -1312,7 +1373,8 @@ static int __cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb,
while (available && (fifo->head != fifo->tail)) {
int i = fifo->tail;
const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
- uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, usb->index) ^ 4;
+ uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
+ usb->index) ^ 4;
int words = available;
/* Limit the amount of data to waht the SW fifo has */
@@ -1336,7 +1398,8 @@ static int __cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb,
cvmx_write64_uint32(csr_address, *ptr++);
cvmx_write64_uint32(csr_address, *ptr++);
cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ cvmx_read64_uint64(
+ CVMX_USBNX_DMA0_INB_CHN0(usb->index));
words -= 3;
}
cvmx_write64_uint32(csr_address, *ptr++);
@@ -1360,20 +1423,32 @@ static void __cvmx_usb_poll_tx_fifo(struct cvmx_usb_state *usb)
{
if (usb->periodic.head != usb->periodic.tail) {
union cvmx_usbcx_hptxsts tx_status;
- tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXSTS(usb->index));
- if (__cvmx_usb_fill_tx_hw(usb, &usb->periodic, tx_status.s.ptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 1);
+ tx_status.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->periodic,
+ tx_status.s.ptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk,
+ ptxfempmsk, 1);
else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 0);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk,
+ ptxfempmsk, 0);
}
if (usb->nonperiodic.head != usb->nonperiodic.tail) {
union cvmx_usbcx_gnptxsts tx_status;
- tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXSTS(usb->index));
- if (__cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, tx_status.s.nptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 1);
+ tx_status.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_GNPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic,
+ tx_status.s.nptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk,
+ nptxfempmsk, 1);
else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 0);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk,
+ nptxfempmsk, 0);
}
return;
@@ -1394,12 +1469,14 @@ static void __cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
struct cvmx_usb_tx_fifo *fifo;
/* We only need to fill data on outbound channels */
- hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ hcchar.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index));
if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
return;
/* OUT Splits only have data on the start and not the complete */
- usbc_hcsplt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index));
+ usbc_hcsplt.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCSPLTX(channel, usb->index));
if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
return;
@@ -1407,7 +1484,8 @@ static void __cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
* Find out how many bytes we need to fill and convert it into 32bit
* words.
*/
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index));
if (!usbc_hctsiz.s.xfersize)
return;
@@ -1447,11 +1525,13 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
node);
union cvmx_usb_control_header *header =
cvmx_phys_to_ptr(transaction->control_header);
- int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+ int bytes_to_transfer = transaction->buffer_length -
+ transaction->actual_bytes;
int packets_to_transfer;
union cvmx_usbcx_hctsizx usbc_hctsiz;
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index));
switch (transaction->stage) {
case CVMX_USB_STAGE_NON_CONTROL:
@@ -1499,12 +1579,14 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
((header->s.request_type & 0x80) ?
CVMX_USB_DIRECTION_IN :
CVMX_USB_DIRECTION_OUT));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
+ union cvmx_usbcx_hcspltx, compsplt, 1);
break;
case CVMX_USB_STAGE_STATUS:
usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, epdir,
((header->s.request_type & 0x80) ?
CVMX_USB_DIRECTION_OUT :
CVMX_USB_DIRECTION_IN));
@@ -1512,11 +1594,13 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, epdir,
((header->s.request_type & 0x80) ?
CVMX_USB_DIRECTION_OUT :
CVMX_USB_DIRECTION_IN));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
+ union cvmx_usbcx_hcspltx, compsplt, 1);
break;
}
@@ -1534,10 +1618,12 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
* Calculate the number of packets to transfer. If the length is zero
* we still need to transfer one packet
*/
- packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) /
+ pipe->max_packet;
if (packets_to_transfer == 0)
packets_to_transfer = 1;
- else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ else if ((packets_to_transfer > 1) &&
+ (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
/*
* Limit to one packet when not using DMA. Channels must be
* restarted between every packet for IN transactions, so there
@@ -1557,7 +1643,8 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
usbc_hctsiz.s.xfersize = bytes_to_transfer;
usbc_hctsiz.s.pktcnt = packets_to_transfer;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index),
+ usbc_hctsiz.u32);
return;
}
@@ -1595,8 +1682,11 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
union cvmx_usbcx_haintmsk usbc_haintmsk;
/* Clear all channel status bits */
- usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index), usbc_hcint.u32);
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCINTX(channel, usb->index));
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTX(channel, usb->index),
+ usbc_hcint.u32);
usbc_hcintmsk.u32 = 0;
usbc_hcintmsk.s.chhltdmsk = 1;
@@ -1643,14 +1733,17 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
int packets_to_transfer;
- int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+ int bytes_to_transfer = transaction->buffer_length -
+ transaction->actual_bytes;
/*
* ISOCHRONOUS transactions store each individual transfer size
* in the packet structure, not the global buffer_length
*/
if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- bytes_to_transfer = transaction->iso_packets[0].length - transaction->actual_bytes;
+ bytes_to_transfer =
+ transaction->iso_packets[0].length -
+ transaction->actual_bytes;
/*
* We need to do split transactions when we are talking to non
@@ -1665,16 +1758,19 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
*/
if ((transaction->stage&1) == 0) {
if (transaction->type == CVMX_USB_TRANSFER_BULK)
- pipe->split_sc_frame = (usb->frame_number + 1) & 0x7f;
+ pipe->split_sc_frame =
+ (usb->frame_number + 1) & 0x7f;
else
- pipe->split_sc_frame = (usb->frame_number + 2) & 0x7f;
+ pipe->split_sc_frame =
+ (usb->frame_number + 2) & 0x7f;
} else
pipe->split_sc_frame = -1;
usbc_hcsplt.s.spltena = 1;
usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
usbc_hcsplt.s.prtaddr = pipe->hub_port;
- usbc_hcsplt.s.compsplt = (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
+ usbc_hcsplt.s.compsplt = (transaction->stage ==
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
/*
* SPLIT transactions can only ever transmit one data
@@ -1690,8 +1786,10 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
* begin/middle/end of the data or all
*/
if (!usbc_hcsplt.s.compsplt &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+ (pipe->transfer_dir ==
+ CVMX_USB_DIRECTION_OUT) &&
+ (pipe->transfer_type ==
+ CVMX_USB_TRANSFER_ISOCHRONOUS)) {
/*
* Clear the split complete frame number as
* there isn't going to be a split complete
@@ -1743,7 +1841,8 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
* Round MAX_TRANSFER_BYTES to a multiple of out packet
* size
*/
- bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
+ bytes_to_transfer = MAX_TRANSFER_BYTES /
+ pipe->max_packet;
bytes_to_transfer *= pipe->max_packet;
}
@@ -1751,10 +1850,14 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
* Calculate the number of packets to transfer. If the length is
* zero we still need to transfer one packet
*/
- packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ packets_to_transfer =
+ (bytes_to_transfer + pipe->max_packet - 1) /
+ pipe->max_packet;
if (packets_to_transfer == 0)
packets_to_transfer = 1;
- else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ else if ((packets_to_transfer > 1) &&
+ (usb->init_flags &
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
/*
* Limit to one packet when not using DMA. Channels must
* be restarted between every packet for IN
@@ -1762,14 +1865,16 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
* packets in a row
*/
packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ bytes_to_transfer = packets_to_transfer *
+ pipe->max_packet;
} else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
/*
* Limit the number of packet and data transferred to
* what the hardware can handle
*/
packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ bytes_to_transfer = packets_to_transfer *
+ pipe->max_packet;
}
usbc_hctsiz.s.xfersize = bytes_to_transfer;
@@ -1783,8 +1888,11 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
if (pipe->flags & __CVMX_USB_PIPE_FLAGS_NEED_PING)
usbc_hctsiz.s.dopng = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index), usbc_hcsplt.u32);
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCSPLTX(channel, usb->index),
+ usbc_hcsplt.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel,
+ usb->index), usbc_hctsiz.u32);
}
/* Setup the Host Channel Characteristics Register */
@@ -1815,11 +1923,14 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
/* Set the rest of the endpoint specific settings */
usbc_hcchar.s.devaddr = pipe->device_addr;
usbc_hcchar.s.eptype = transaction->type;
- usbc_hcchar.s.lspddev = (pipe->device_speed == CVMX_USB_SPEED_LOW);
+ usbc_hcchar.s.lspddev =
+ (pipe->device_speed == CVMX_USB_SPEED_LOW);
usbc_hcchar.s.epdir = pipe->transfer_dir;
usbc_hcchar.s.epnum = pipe->endpoint_num;
usbc_hcchar.s.mps = pipe->max_packet;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index),
+ usbc_hcchar.u32);
}
/* Do transaction type specific fixups as needed */
@@ -1838,22 +1949,33 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
*/
if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
if (pipe->multi_count < 2) /* Need DATA0 */
- USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 0);
+ USB_SET_FIELD32(
+ CVMX_USBCX_HCTSIZX(channel,
+ usb->index),
+ union cvmx_usbcx_hctsizx,
+ pid, 0);
else /* Need MDATA */
- USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 3);
+ USB_SET_FIELD32(
+ CVMX_USBCX_HCTSIZX(channel,
+ usb->index),
+ union cvmx_usbcx_hctsizx,
+ pid, 3);
}
}
break;
}
{
- union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index))};
+ union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 =
+ __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index))};
transaction->xfersize = usbc_hctsiz.s.xfersize;
transaction->pktcnt = usbc_hctsiz.s.pktcnt;
}
/* Remeber when we start a split transaction */
if (__cvmx_usb_pipe_needs_split(usb, pipe))
usb->active_split = transaction;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, chena, 1);
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, chena, 1);
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
__cvmx_usb_fill_tx_fifo(usb, channel);
return;
@@ -1869,16 +1991,22 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
*
* Returns: Pipe or NULL if none are ready
*/
-static struct cvmx_usb_pipe *__cvmx_usb_find_ready_pipe(struct cvmx_usb_state *usb, struct list_head *list, uint64_t current_frame)
+static struct cvmx_usb_pipe *__cvmx_usb_find_ready_pipe(
+ struct cvmx_usb_state *usb,
+ struct list_head *list,
+ uint64_t current_frame)
{
struct cvmx_usb_pipe *pipe;
list_for_each_entry(pipe, list, node) {
struct cvmx_usb_transaction *t =
- list_first_entry(&pipe->transactions, typeof(*t), node);
+ list_first_entry(&pipe->transactions, typeof(*t),
+ node);
if (!(pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
(pipe->next_tx_frame <= current_frame) &&
- ((pipe->split_sc_frame == -1) || ((((int)current_frame - (int)pipe->split_sc_frame) & 0x7f) < 0x40)) &&
+ ((pipe->split_sc_frame == -1) ||
+ ((((int)current_frame - (int)pipe->split_sc_frame)
+ & 0x7f) < 0x40)) &&
(!usb->active_split || (usb->active_split == t))) {
CVMX_PREFETCH(pipe, 128);
CVMX_PREFETCH(t, 0);
@@ -1928,14 +2056,26 @@ static void __cvmx_usb_schedule(struct cvmx_usb_state *usb, int is_sof)
* way we are sure that the periodic data is sent in the
* beginning of the frame
*/
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_ISOCHRONOUS, usb->frame_number);
+ pipe = __cvmx_usb_find_ready_pipe(usb,
+ usb->active_pipes +
+ CVMX_USB_TRANSFER_ISOCHRONOUS,
+ usb->frame_number);
if (likely(!pipe))
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_INTERRUPT, usb->frame_number);
+ pipe = __cvmx_usb_find_ready_pipe(usb,
+ usb->active_pipes +
+ CVMX_USB_TRANSFER_INTERRUPT,
+ usb->frame_number);
}
if (likely(!pipe)) {
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_CONTROL, usb->frame_number);
+ pipe = __cvmx_usb_find_ready_pipe(usb,
+ usb->active_pipes +
+ CVMX_USB_TRANSFER_CONTROL,
+ usb->frame_number);
if (likely(!pipe))
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_BULK, usb->frame_number);
+ pipe = __cvmx_usb_find_ready_pipe(usb,
+ usb->active_pipes +
+ CVMX_USB_TRANSFER_BULK,
+ usb->frame_number);
}
if (!pipe)
break;
@@ -1949,7 +2089,8 @@ done:
* future that might need to be scheduled
*/
need_sof = 0;
- for (ttype = CVMX_USB_TRANSFER_CONTROL; ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
+ for (ttype = CVMX_USB_TRANSFER_CONTROL;
+ ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
if (pipe->next_tx_frame > usb->frame_number) {
need_sof = 1;
@@ -1957,7 +2098,8 @@ done:
}
}
}
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, sofmsk, need_sof);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
+ union cvmx_usbcx_gintmsk, sofmsk, need_sof);
return;
}
@@ -2008,10 +2150,13 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
/* Recalculate the transfer size by adding up each packet */
urb->actual_length = 0;
for (i = 0; i < urb->number_of_packets; i++) {
- if (iso_packet[i].status == CVMX_USB_COMPLETE_SUCCESS) {
+ if (iso_packet[i].status ==
+ CVMX_USB_COMPLETE_SUCCESS) {
urb->iso_frame_desc[i].status = 0;
- urb->iso_frame_desc[i].actual_length = iso_packet[i].length;
- urb->actual_length += urb->iso_frame_desc[i].actual_length;
+ urb->iso_frame_desc[i].actual_length =
+ iso_packet[i].length;
+ urb->actual_length +=
+ urb->iso_frame_desc[i].actual_length;
} else {
dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n",
i, urb->number_of_packets,
@@ -2073,10 +2218,11 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
* @complete_code:
* Completion code
*/
-static void __cvmx_usb_perform_complete(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- enum cvmx_usb_complete complete_code)
+static void __cvmx_usb_perform_complete(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ enum cvmx_usb_complete complete_code)
{
/* If this was a split then clear our split in progress marker */
if (usb->active_split == transaction)
@@ -2095,7 +2241,8 @@ static void __cvmx_usb_perform_complete(struct cvmx_usb_state *usb,
* If there are more ISOs pending and we succeeded, schedule the
* next one
*/
- if ((transaction->iso_number_packets > 1) && (complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
+ if ((transaction->iso_number_packets > 1) &&
+ (complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
/* No bytes transferred for this packet as of yet */
transaction->actual_bytes = 0;
/* One less ISO waiting to transfer */
@@ -2143,16 +2290,17 @@ done:
*
* Returns: Transaction or NULL on failure.
*/
-static struct cvmx_usb_transaction *__cvmx_usb_submit_transaction(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- enum cvmx_usb_transfer type,
- uint64_t buffer,
- int buffer_length,
- uint64_t control_header,
- int iso_start_frame,
- int iso_number_packets,
- struct cvmx_usb_iso_packet *iso_packets,
- struct urb *urb)
+static struct cvmx_usb_transaction *__cvmx_usb_submit_transaction(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ enum cvmx_usb_transfer type,
+ uint64_t buffer,
+ int buffer_length,
+ uint64_t control_header,
+ int iso_start_frame,
+ int iso_number_packets,
+ struct cvmx_usb_iso_packet *iso_packets,
+ struct urb *urb)
{
struct cvmx_usb_transaction *transaction;
@@ -2204,9 +2352,10 @@ static struct cvmx_usb_transaction *__cvmx_usb_submit_transaction(struct cvmx_us
*
* Returns: A submitted transaction or NULL on failure.
*/
-static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
+static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
{
return __cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK,
urb->transfer_dma,
@@ -2228,9 +2377,10 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(struct cvmx_usb_state *
*
* Returns: A submitted transaction or NULL on failure.
*/
-static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
+static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
{
return __cvmx_usb_submit_transaction(usb, pipe,
CVMX_USB_TRANSFER_INTERRUPT,
@@ -2253,9 +2403,10 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(struct cvmx_usb_st
*
* Returns: A submitted transaction or NULL on failure.
*/
-static struct cvmx_usb_transaction *cvmx_usb_submit_control(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
+static struct cvmx_usb_transaction *cvmx_usb_submit_control(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
{
int buffer_length = urb->transfer_buffer_length;
uint64_t control_header = urb->setup_dma;
@@ -2285,9 +2436,10 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_control(struct cvmx_usb_stat
*
* Returns: A submitted transaction or NULL on failure.
*/
-static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
+static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
+ struct cvmx_usb_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct urb *urb)
{
struct cvmx_usb_iso_packet *packets;
@@ -2333,17 +2485,22 @@ static int cvmx_usb_cancel(struct cvmx_usb_state *usb,
CVMX_SYNCW;
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
/*
* If the channel isn't enabled then the transaction already
* completed.
*/
if (usbc_hcchar.s.chena) {
usbc_hcchar.s.chdis = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index), usbc_hcchar.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(pipe->channel,
+ usb->index),
+ usbc_hcchar.u32);
}
}
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_CANCEL);
+ __cvmx_usb_perform_complete(usb, pipe, transaction,
+ CVMX_USB_COMPLETE_CANCEL);
return 0;
}
@@ -2407,7 +2564,8 @@ static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb)
int frame_number;
union cvmx_usbcx_hfnum usbc_hfnum;
- usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+ usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HFNUM(usb->index));
frame_number = usbc_hfnum.s.frnum;
return frame_number;
@@ -2435,10 +2593,12 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
int buffer_space_left;
/* Read the interrupt status bits for the channel */
- usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCINTX(channel, usb->index));
if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index));
if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
/*
@@ -2446,7 +2606,10 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* interrupt IN transfers to get stuck until we do a
* write of HCCHARX without changing things
*/
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel,
+ usb->index),
+ usbc_hcchar.u32);
return 0;
}
@@ -2460,9 +2623,15 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
/* Disable all interrupts except CHHLTD */
hcintmsk.u32 = 0;
hcintmsk.s.chhltdmsk = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), hcintmsk.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTMSKX(channel,
+ usb->index),
+ hcintmsk.u32);
usbc_hcchar.s.chdis = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel,
+ usb->index),
+ usbc_hcchar.u32);
return 0;
} else if (usbc_hcint.s.xfercompl) {
/*
@@ -2470,7 +2639,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* Channel halt isn't needed.
*/
} else {
- cvmx_dprintf("USB%d: Channel %d interrupt without halt\n", usb->index, channel);
+ cvmx_dprintf("USB%d: Channel %d interrupt without halt\n",
+ usb->index, channel);
return 0;
}
}
@@ -2493,7 +2663,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
CVMX_PREFETCH(pipe, 128);
if (!pipe)
return 0;
- transaction = list_first_entry(&pipe->transactions, typeof(*transaction),
+ transaction = list_first_entry(&pipe->transactions,
+ typeof(*transaction),
node);
CVMX_PREFETCH(transaction, 0);
@@ -2508,8 +2679,10 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* Read the channel config info so we can figure out how much data
* transfered
*/
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCCHARX(channel, usb->index));
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HCTSIZX(channel, usb->index));
/*
* Calculating the number of bytes successfully transferred is dependent
@@ -2523,7 +2696,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* the current value of xfersize from its starting value and we
* know how many bytes were written to the buffer
*/
- bytes_this_transfer = transaction->xfersize - usbc_hctsiz.s.xfersize;
+ bytes_this_transfer = transaction->xfersize -
+ usbc_hctsiz.s.xfersize;
} else {
/*
* OUT transaction don't decrement xfersize. Instead pktcnt is
@@ -2541,7 +2715,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
}
/* Figure out how many bytes were in the last packet of the transfer */
if (packets_processed)
- bytes_in_last_packet = bytes_this_transfer - (packets_processed-1) * usbc_hcchar.s.mps;
+ bytes_in_last_packet = bytes_this_transfer -
+ (packets_processed - 1) * usbc_hcchar.s.mps;
else
bytes_in_last_packet = bytes_this_transfer;
@@ -2561,9 +2736,11 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
*/
transaction->actual_bytes += bytes_this_transfer;
if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- buffer_space_left = transaction->iso_packets[0].length - transaction->actual_bytes;
+ buffer_space_left = transaction->iso_packets[0].length -
+ transaction->actual_bytes;
else
- buffer_space_left = transaction->buffer_length - transaction->actual_bytes;
+ buffer_space_left = transaction->buffer_length -
+ transaction->actual_bytes;
/*
* We need to remember the PID toggle state for the next transaction.
@@ -2589,7 +2766,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* the actual bytes transferred
*/
pipe->pid_toggle = 0;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_STALL);
+ __cvmx_usb_perform_complete(usb, pipe, transaction,
+ CVMX_USB_COMPLETE_STALL);
} else if (usbc_hcint.s.xacterr) {
/*
* We know at least one packet worked if we get a ACK or NAK.
@@ -2604,7 +2782,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* something wrong with the transfer. For example, PID
* toggle errors cause these
*/
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_XACTERR);
+ __cvmx_usb_perform_complete(usb, pipe, transaction,
+ CVMX_USB_COMPLETE_XACTERR);
} else {
/*
* If this was a split then clear our split in progress
@@ -2620,12 +2799,15 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
pipe->split_sc_frame = -1;
pipe->next_tx_frame += pipe->interval;
if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number + pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ pipe->next_tx_frame =
+ usb->frame_number + pipe->interval -
+ (usb->frame_number -
+ pipe->next_tx_frame) % pipe->interval;
}
} else if (usbc_hcint.s.bblerr) {
/* Babble Error (BblErr) */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_BABBLEERR);
+ __cvmx_usb_perform_complete(usb, pipe, transaction,
+ CVMX_USB_COMPLETE_BABBLEERR);
} else if (usbc_hcint.s.datatglerr) {
/* We'll retry the exact same transaction again */
transaction->retries++;
@@ -2642,8 +2824,11 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* If there is more data to go then we need to try
* again. Otherwise this transaction is complete
*/
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet))
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet < pipe->max_packet))
+ __cvmx_usb_perform_complete(usb, pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
} else {
/*
* Split transactions retry the split complete 4 times
@@ -2681,12 +2866,14 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
case CVMX_USB_STAGE_NON_CONTROL:
case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
/* This should be impossible */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ __cvmx_usb_perform_complete(usb, pipe,
+ transaction, CVMX_USB_COMPLETE_ERROR);
break;
case CVMX_USB_STAGE_SETUP:
pipe->pid_toggle = 1;
if (__cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage = CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
+ transaction->stage =
+ CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
else {
union cvmx_usb_control_header *header =
cvmx_phys_to_ptr(transaction->control_header);
@@ -2708,7 +2895,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
break;
case CVMX_USB_STAGE_DATA:
if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
+ transaction->stage =
+ CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
/*
* For setup OUT data that are splits,
* the hardware doesn't appear to count
@@ -2717,31 +2905,45 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
*/
if (!usbc_hcchar.s.epdir) {
if (buffer_space_left < pipe->max_packet)
- transaction->actual_bytes += buffer_space_left;
+ transaction->actual_bytes +=
+ buffer_space_left;
else
- transaction->actual_bytes += pipe->max_packet;
+ transaction->actual_bytes +=
+ pipe->max_packet;
}
- } else if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ } else if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet <
+ pipe->max_packet)) {
pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS;
}
break;
case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet <
+ pipe->max_packet)) {
pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS;
} else {
- transaction->stage = CVMX_USB_STAGE_DATA;
+ transaction->stage =
+ CVMX_USB_STAGE_DATA;
}
break;
case CVMX_USB_STAGE_STATUS:
if (__cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage = CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
else
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ __cvmx_usb_perform_complete(usb, pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
break;
case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ __cvmx_usb_perform_complete(usb, pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
break;
}
break;
@@ -2754,27 +2956,49 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* data is needed
*/
if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ if (transaction->stage ==
+ CVMX_USB_STAGE_NON_CONTROL)
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
else {
- if (buffer_space_left && (bytes_in_last_packet == pipe->max_packet))
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ if (buffer_space_left &&
+ (bytes_in_last_packet ==
+ pipe->max_packet))
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL;
else {
- if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ if (transaction->type ==
+ CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame +=
+ pipe->interval;
+ __cvmx_usb_perform_complete(
+ usb,
+ pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
}
}
} else {
- if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ if ((pipe->device_speed ==
+ CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_type ==
+ CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir ==
+ CVMX_USB_DIRECTION_OUT) &&
(usbc_hcint.s.nak))
- pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
- if (!buffer_space_left || (bytes_in_last_packet < pipe->max_packet)) {
- if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ pipe->flags |=
+ __CVMX_USB_PIPE_FLAGS_NEED_PING;
+ if (!buffer_space_left ||
+ (bytes_in_last_packet <
+ pipe->max_packet)) {
+ if (transaction->type ==
+ CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame +=
+ pipe->interval;
+ __cvmx_usb_perform_complete(usb,
+ pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
}
}
break;
@@ -2795,28 +3019,45 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* complete. Otherwise start it again to
* send the next 188 bytes
*/
- if (!buffer_space_left || (bytes_this_transfer < 188)) {
+ if (!buffer_space_left ||
+ (bytes_this_transfer < 188)) {
pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ __cvmx_usb_perform_complete(
+ usb,
+ pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
}
} else {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
+ if (transaction->stage ==
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
/*
* We are in the incoming data
* phase. Keep getting data
* until we run out of space or
* get a small packet
*/
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet <
+ pipe->max_packet)) {
+ pipe->next_tx_frame +=
+ pipe->interval;
+ __cvmx_usb_perform_complete(
+ usb,
+ pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
}
} else
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
}
} else {
pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ __cvmx_usb_perform_complete(usb,
+ pipe,
+ transaction,
+ CVMX_USB_COMPLETE_SUCCESS);
}
break;
}
@@ -2836,8 +3077,10 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
transaction->stage &= ~1;
pipe->next_tx_frame += pipe->interval;
if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number + pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ pipe->next_tx_frame = usb->frame_number +
+ pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) %
+ pipe->interval;
} else {
struct cvmx_usb_port_status port;
port = cvmx_usb_get_status(usb);
@@ -2849,7 +3092,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* We get channel halted interrupts with no result bits
* sets when the cable is unplugged
*/
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ __cvmx_usb_perform_complete(usb, pipe, transaction,
+ CVMX_USB_COMPLETE_ERROR);
}
}
return 0;
@@ -2932,9 +3176,11 @@ static int cvmx_usb_poll(struct cvmx_usb_state *usb)
*/
octeon_usb_port_callback(usb);
/* Clear the port change bits */
- usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb,
+ CVMX_USBCX_HPRT(usb->index));
usbc_hprt.s.prtena = 0;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), usbc_hprt.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index),
+ usbc_hprt.u32);
}
if (usbc_gintsts.s.hchint) {
/*
@@ -3078,13 +3324,15 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
}
pipe = cvmx_usb_open_pipe(&priv->usb, usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe), speed,
- le16_to_cpu(ep->desc.wMaxPacketSize) & 0x7ff,
+ le16_to_cpu(ep->desc.wMaxPacketSize)
+ & 0x7ff,
transfer_type,
usb_pipein(urb->pipe) ?
CVMX_USB_DIRECTION_IN :
CVMX_USB_DIRECTION_OUT,
urb->interval,
- (le16_to_cpu(ep->desc.wMaxPacketSize) >> 11) & 0x3,
+ (le16_to_cpu(ep->desc.wMaxPacketSize)
+ >> 11) & 0x3,
split_device, split_port);
if (!pipe) {
spin_unlock_irqrestore(&priv->lock, flags);
@@ -3099,7 +3347,8 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
dev_dbg(dev, "Submit isochronous to %d.%d\n",
- usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
/*
* Allocate a structure to use for our private list of
* isochronous packets.
@@ -3111,9 +3360,12 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
int i;
/* Fill the list with the data from the URB */
for (i = 0; i < urb->number_of_packets; i++) {
- iso_packet[i].offset = urb->iso_frame_desc[i].offset;
- iso_packet[i].length = urb->iso_frame_desc[i].length;
- iso_packet[i].status = CVMX_USB_COMPLETE_ERROR;
+ iso_packet[i].offset =
+ urb->iso_frame_desc[i].offset;
+ iso_packet[i].length =
+ urb->iso_frame_desc[i].length;
+ iso_packet[i].status =
+ CVMX_USB_COMPLETE_ERROR;
}
/*
* Store a pointer to the list in the URB setup_packet
@@ -3135,17 +3387,20 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
break;
case PIPE_INTERRUPT:
dev_dbg(dev, "Submit interrupt to %d.%d\n",
- usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
transaction = cvmx_usb_submit_interrupt(&priv->usb, pipe, urb);
break;
case PIPE_CONTROL:
dev_dbg(dev, "Submit control to %d.%d\n",
- usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
transaction = cvmx_usb_submit_control(&priv->usb, pipe, urb);
break;
case PIPE_BULK:
dev_dbg(dev, "Submit bulk to %d.%d\n",
- usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe));
transaction = cvmx_usb_submit_bulk(&priv->usb, pipe, urb);
break;
}
@@ -3176,7 +3431,9 @@ static void octeon_usb_urb_dequeue_work(unsigned long arg)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static int octeon_usb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+static int octeon_usb_urb_dequeue(struct usb_hcd *hcd,
+ struct urb *urb,
+ int status)
{
struct octeon_hcd *priv = hcd_to_octeon(hcd);
unsigned long flags;
@@ -3196,7 +3453,8 @@ static int octeon_usb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int stat
return 0;
}
-static void octeon_usb_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+static void octeon_usb_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
{
struct device *dev = hcd->self.controller;
@@ -3279,7 +3537,8 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dev_dbg(dev, " C_CONNECTION\n");
/* Clears drivers internal connect status change flag */
spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
+ priv->usb.port_status =
+ cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
case USB_PORT_FEAT_C_RESET:
@@ -3288,7 +3547,8 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* Clears the driver's internal Port Reset Change flag.
*/
spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
+ priv->usb.port_status =
+ cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
case USB_PORT_FEAT_C_ENABLE:
@@ -3298,7 +3558,8 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* Change flag.
*/
spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
+ priv->usb.port_status =
+ cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
case USB_PORT_FEAT_C_SUSPEND:
@@ -3313,7 +3574,8 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dev_dbg(dev, " C_OVER_CURRENT\n");
/* Clears the driver's overcurrent Change flag */
spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status = cvmx_usb_get_status(&priv->usb);
+ priv->usb.port_status =
+ cvmx_usb_get_status(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
break;
default:
@@ -3431,7 +3693,6 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
return 0;
}
-
static const struct hc_driver octeon_hc_driver = {
.description = "Octeon USB",
.product_desc = "Octeon Host Controller",
@@ -3446,17 +3707,79 @@ static const struct hc_driver octeon_hc_driver = {
.get_frame_number = octeon_usb_get_frame_number,
.hub_status_data = octeon_usb_hub_status_data,
.hub_control = octeon_usb_hub_control,
+ .map_urb_for_dma = octeon_map_urb_for_dma,
+ .unmap_urb_for_dma = octeon_unmap_urb_for_dma,
};
-
-static int octeon_usb_driver_probe(struct device *dev)
+static int octeon_usb_probe(struct platform_device *pdev)
{
int status;
- int usb_num = to_platform_device(dev)->id;
- int irq = platform_get_irq(to_platform_device(dev), 0);
+ int initialize_flags;
+ int usb_num;
+ struct resource *res_mem;
+ struct device_node *usbn_node;
+ int irq = platform_get_irq(pdev, 0);
+ struct device *dev = &pdev->dev;
struct octeon_hcd *priv;
struct usb_hcd *hcd;
unsigned long flags;
+ u32 clock_rate = 48000000;
+ bool is_crystal_clock = false;
+ const char *clock_type;
+ int i;
+
+ if (dev->of_node == NULL) {
+ dev_err(dev, "Error: empty of_node\n");
+ return -ENXIO;
+ }
+ usbn_node = dev->of_node->parent;
+
+ i = of_property_read_u32(usbn_node,
+ "refclk-frequency", &clock_rate);
+ if (i) {
+ dev_err(dev, "No USBN \"refclk-frequency\"\n");
+ return -ENXIO;
+ }
+ switch (clock_rate) {
+ case 12000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case 24000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case 48000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ default:
+ dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n",
+ clock_rate);
+ return -ENXIO;
+
+ }
+
+ i = of_property_read_string(usbn_node,
+ "refclk-type", &clock_type);
+
+ if (!i && strcmp("crystal", clock_type) == 0)
+ is_crystal_clock = true;
+
+ if (is_crystal_clock)
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
+ else
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem == NULL) {
+ dev_err(dev, "found no memory resource\n");
+ return -ENXIO;
+ }
+ usb_num = (res_mem->start >> 44) & 1;
+
+ if (irq < 0) {
+ /* Defective device tree, but we know how to fix it. */
+ irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
+ irq = irq_create_mapping(NULL, hwirq);
+ }
/*
* Set the DMA mask to 64bits so we get buffers already translated for
@@ -3465,6 +3788,26 @@ static int octeon_usb_driver_probe(struct device *dev)
dev->coherent_dma_mask = ~0;
dev->dma_mask = &dev->coherent_dma_mask;
+ /*
+ * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
+ * IOB priority registers. Under heavy network load USB
+ * hardware can be starved by the IOB causing a crash. Give
+ * it a priority boost if it has been waiting more than 400
+ * cycles to avoid this situation.
+ *
+ * Testing indicates that a cnt_val of 8192 is not sufficient,
+ * but no failures are seen with 4096. We choose a value of
+ * 400 to give a safety factor of 10.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
+
+ pri_cnt.u64 = 0;
+ pri_cnt.s.cnt_enb = 1;
+ pri_cnt.s.cnt_val = 400;
+ cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
+ }
+
hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
if (!hcd) {
dev_dbg(dev, "Failed to allocate memory for HCD\n");
@@ -3475,10 +3818,11 @@ static int octeon_usb_driver_probe(struct device *dev)
spin_lock_init(&priv->lock);
- tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
+ tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work,
+ (unsigned long)priv);
INIT_LIST_HEAD(&priv->dequeue_list);
- status = cvmx_usb_initialize(&priv->usb, usb_num);
+ status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags);
if (status) {
dev_dbg(dev, "USB initialization failed with %d\n", status);
kfree(hcd);
@@ -3492,7 +3836,7 @@ static int octeon_usb_driver_probe(struct device *dev)
cvmx_usb_poll(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
- status = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ status = usb_add_hcd(hcd, irq, 0);
if (status) {
dev_dbg(dev, "USB add HCD failed with %d\n", status);
kfree(hcd);
@@ -3500,14 +3844,15 @@ static int octeon_usb_driver_probe(struct device *dev)
}
device_wakeup_enable(hcd->self.controller);
- dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
+ dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
return 0;
}
-static int octeon_usb_driver_remove(struct device *dev)
+static int octeon_usb_remove(struct platform_device *pdev)
{
int status;
+ struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct octeon_hcd *priv = hcd_to_octeon(hcd);
unsigned long flags;
@@ -3525,85 +3870,41 @@ static int octeon_usb_driver_remove(struct device *dev)
return 0;
}
-static struct device_driver octeon_usb_driver = {
- .name = "OcteonUSB",
- .bus = &platform_bus_type,
- .probe = octeon_usb_driver_probe,
- .remove = octeon_usb_driver_remove,
+static struct of_device_id octeon_usb_match[] = {
+ {
+ .compatible = "cavium,octeon-5750-usbc",
+ },
+ {},
};
+static struct platform_driver octeon_usb_driver = {
+ .driver = {
+ .name = "OcteonUSB",
+ .owner = THIS_MODULE,
+ .of_match_table = octeon_usb_match,
+ },
+ .probe = octeon_usb_probe,
+ .remove = octeon_usb_remove,
+};
-#define MAX_USB_PORTS 10
-static struct platform_device *pdev_glob[MAX_USB_PORTS];
-static int octeon_usb_registered;
-static int __init octeon_usb_module_init(void)
+static int __init octeon_usb_driver_init(void)
{
- int num_devices = cvmx_usb_get_num_ports();
- int device;
-
- if (usb_disabled() || num_devices == 0)
- return -ENODEV;
-
- if (driver_register(&octeon_usb_driver))
- return -ENOMEM;
-
- octeon_usb_registered = 1;
-
- /*
- * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
- * IOB priority registers. Under heavy network load USB
- * hardware can be starved by the IOB causing a crash. Give
- * it a priority boost if it has been waiting more than 400
- * cycles to avoid this situation.
- *
- * Testing indicates that a cnt_val of 8192 is not sufficient,
- * but no failures are seen with 4096. We choose a value of
- * 400 to give a safety factor of 10.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
-
- pri_cnt.u64 = 0;
- pri_cnt.s.cnt_enb = 1;
- pri_cnt.s.cnt_val = 400;
- cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
- }
-
- for (device = 0; device < num_devices; device++) {
- struct resource irq_resource;
- struct platform_device *pdev;
- memset(&irq_resource, 0, sizeof(irq_resource));
- irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1;
- irq_resource.end = irq_resource.start;
- irq_resource.flags = IORESOURCE_IRQ;
- pdev = platform_device_register_simple((char *)octeon_usb_driver. name, device, &irq_resource, 1);
- if (IS_ERR(pdev)) {
- driver_unregister(&octeon_usb_driver);
- octeon_usb_registered = 0;
- return PTR_ERR(pdev);
- }
- if (device < MAX_USB_PORTS)
- pdev_glob[device] = pdev;
+ if (usb_disabled())
+ return 0;
- }
- return 0;
+ return platform_driver_register(&octeon_usb_driver);
}
+module_init(octeon_usb_driver_init);
-static void __exit octeon_usb_module_cleanup(void)
+static void __exit octeon_usb_driver_exit(void)
{
- int i;
+ if (usb_disabled())
+ return;
- for (i = 0; i < MAX_USB_PORTS; i++)
- if (pdev_glob[i]) {
- platform_device_unregister(pdev_glob[i]);
- pdev_glob[i] = NULL;
- }
- if (octeon_usb_registered)
- driver_unregister(&octeon_usb_driver);
+ platform_driver_unregister(&octeon_usb_driver);
}
+module_exit(octeon_usb_driver_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
-MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver.");
-module_init(octeon_usb_module_init);
-module_exit(octeon_usb_module_cleanup);
+MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index bdaec8d2ca0c..2a98a2153e16 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -33,10 +33,6 @@
* driver will use this memory instead of kernel memory for pools. This
* allows 32bit userspace application to access the buffers, but also
* requires all received packets to be copied.
- * CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
- * This kernel config option allows the user to control the number of
- * packet and work queue buffers allocated by the driver. If this is zero,
- * the driver uses the default from below.
* USE_SKBUFFS_IN_HW
* Tells the driver to populate the packet buffers with kernel skbuffs.
* This allows the driver to receive packets without copying them. It also
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 83b103091cf2..3f067f189b3d 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -127,23 +127,21 @@ static void cvm_oct_adjust_link(struct net_device *dev)
link_info.s.link_up = priv->last_link ? 1 : 0;
link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
link_info.s.speed = priv->phydev->speed;
- cvmx_helper_link_set( priv->port, link_info);
+ cvmx_helper_link_set(priv->port, link_info);
if (priv->last_link) {
netif_carrier_on(dev);
if (priv->queue != -1)
printk_ratelimited("%s: %u Mbps %s duplex, "
- "port %2d, queue %2d\n",
- dev->name, priv->phydev->speed,
- priv->phydev->duplex ?
- "Full" : "Half",
- priv->port, priv->queue);
+ "port %2d, queue %2d\n", dev->name,
+ priv->phydev->speed,
+ priv->phydev->duplex ? "Full" : "Half",
+ priv->port, priv->queue);
else
printk_ratelimited("%s: %u Mbps %s duplex, "
- "port %2d, POW\n",
- dev->name, priv->phydev->speed,
- priv->phydev->duplex ?
- "Full" : "Half",
- priv->port);
+ "port %2d, POW\n", dev->name,
+ priv->phydev->speed,
+ priv->phydev->duplex ? "Full" : "Half",
+ priv->port);
} else {
netif_carrier_off(dev);
printk_ratelimited("%s: Link down\n", dev->name);
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 199059d64c9b..bf666b023190 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -30,6 +30,7 @@
#include <asm/octeon/octeon.h>
+#include "ethernet-mem.h"
#include "ethernet-defines.h"
#include <asm/octeon/cvmx-fpa.h>
@@ -79,10 +80,10 @@ static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
} while (memory);
if (elements < 0)
- pr_warning("Freeing of pool %u had too many skbuffs (%d)\n",
+ pr_warn("Freeing of pool %u had too many skbuffs (%d)\n",
pool, elements);
else if (elements > 0)
- pr_warning("Freeing of pool %u is missing %d skbuffs\n",
+ pr_warn("Freeing of pool %u is missing %d skbuffs\n",
pool, elements);
}
@@ -113,7 +114,7 @@ static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
*/
memory = kmalloc(size + 256, GFP_ATOMIC);
if (unlikely(memory == NULL)) {
- pr_warning("Unable to allocate %u bytes for FPA pool %d\n",
+ pr_warn("Unable to allocate %u bytes for FPA pool %d\n",
elements * size, pool);
break;
}
@@ -146,10 +147,10 @@ static void cvm_oct_free_hw_memory(int pool, int size, int elements)
} while (fpa);
if (elements < 0)
- pr_warning("Freeing of pool %u had too many buffers (%d)\n",
+ pr_warn("Freeing of pool %u had too many buffers (%d)\n",
pool, elements);
else if (elements > 0)
- pr_warning("Warning: Freeing of pool %u is missing %d buffers\n",
+ pr_warn("Warning: Freeing of pool %u is missing %d buffers\n",
pool, elements);
}
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index ea53af30dfa7..0ec0da328215 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -43,7 +43,7 @@
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
-DEFINE_SPINLOCK(global_register_lock);
+static DEFINE_SPINLOCK(global_register_lock);
static int number_rgmii_ports;
@@ -72,7 +72,8 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
* If the 10Mbps preamble workaround is supported and we're
* at 10Mbps we may need to do some special checking.
*/
- if (USE_10MBPS_PREAMBLE_WORKAROUND && (link_info.s.speed == 10)) {
+ if (USE_10MBPS_PREAMBLE_WORKAROUND &&
+ (link_info.s.speed == 10)) {
/*
* Read the GMXX_RXX_INT_REG[PCTERR] bit and
@@ -166,9 +167,8 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
if (use_global_register_lock)
spin_unlock_irqrestore(&global_register_lock, flags);
- else {
+ else
mutex_unlock(&priv->phydev->bus->mdio_lock);
- }
if (priv->phydev == NULL) {
/* Tell core. */
@@ -232,8 +232,10 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
(interface, index)];
struct octeon_ethernet *priv = netdev_priv(dev);
- if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
- queue_work(cvm_oct_poll_queue, &priv->port_work);
+ if (dev &&
+ !atomic_read(&cvm_oct_poll_queue_stopping))
+ queue_work(cvm_oct_poll_queue,
+ &priv->port_work);
gmx_rx_int_reg.u64 = 0;
gmx_rx_int_reg.s.phy_dupx = 1;
@@ -274,8 +276,10 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
(interface, index)];
struct octeon_ethernet *priv = netdev_priv(dev);
- if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
- queue_work(cvm_oct_poll_queue, &priv->port_work);
+ if (dev &&
+ !atomic_read(&cvm_oct_poll_queue_stopping))
+ queue_work(cvm_oct_poll_queue,
+ &priv->port_work);
gmx_rx_int_reg.u64 = 0;
gmx_rx_int_reg.s.phy_dupx = 1;
@@ -327,7 +331,8 @@ int cvm_oct_rgmii_stop(struct net_device *dev)
static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
{
- struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work);
+ struct octeon_ethernet *priv =
+ container_of(work, struct octeon_ethernet, port_work);
cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 47541e1608f3..8ca55c4e9db2 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -95,7 +95,7 @@ static void cvm_oct_kick_tx_poll_watchdog(void)
cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
}
-void cvm_oct_free_tx_skbs(struct net_device *dev)
+static void cvm_oct_free_tx_skbs(struct net_device *dev)
{
int32_t skb_to_free;
int qos, queues_per_port;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 089dc4b9efd4..ff7214aac9dd 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -55,17 +55,11 @@
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-smix-defs.h>
-#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
- && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
-int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
-#else
-int num_packet_buffers = 1024;
-#endif
+static int num_packet_buffers = 1024;
module_param(num_packet_buffers, int, 0444);
MODULE_PARM_DESC(num_packet_buffers, "\n"
"\tNumber of packet buffers to allocate and store in the\n"
- "\tFPA. By default, 1024 packet buffers are used unless\n"
- "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined.");
+ "\tFPA. By default, 1024 packet buffers are used.\n");
int pow_receive_group = 15;
module_param(pow_receive_group, int, 0444);
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 9360e22e0739..4cf3884070fa 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -58,7 +58,7 @@ struct octeon_ethernet {
/* Last negotiated link state */
uint64_t link_info;
/* Called periodically to check link status */
- void (*poll) (struct net_device *dev);
+ void (*poll)(struct net_device *dev);
struct delayed_work port_periodic_work;
struct work_struct port_work; /* may be unused. */
struct device_node *of_node;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index e2663b189c66..aec98958f795 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -37,7 +37,7 @@
/* Load Delay Locked Loop (DLL) settings for clock delay */
#define MEM_DLL_CLOCK_DELAY (1<<0)
/* Memory controller power down function */
-#define MEM_POWER_DOWN (1<<8)
+#define MEM_POWER_DOWN (1<<8)
/* Memory controller software reset */
#define MEM_SOFT_RESET (1<<0)
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 6ce0af9977d8..10c0a96ce8bb 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -217,7 +217,7 @@ static int oz_set_active_pd(const u8 *addr)
pd = oz_pd_find(addr);
if (pd) {
spin_lock_bh(&g_cdev.lock);
- memcpy(g_cdev.active_addr, addr, ETH_ALEN);
+ ether_addr_copy(g_cdev.active_addr, addr);
old_pd = g_cdev.active_pd;
g_cdev.active_pd = pd;
spin_unlock_bh(&g_cdev.lock);
@@ -283,7 +283,7 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
u8 addr[ETH_ALEN];
oz_dbg(ON, "OZ_IOCTL_GET_ACTIVE_PD\n");
spin_lock_bh(&g_cdev.lock);
- memcpy(addr, g_cdev.active_addr, ETH_ALEN);
+ ether_addr_copy(addr, g_cdev.active_addr);
spin_unlock_bh(&g_cdev.lock);
if (copy_to_user((void __user *)arg, addr, ETH_ALEN))
return -EFAULT;
@@ -448,7 +448,7 @@ int oz_cdev_start(struct oz_pd *pd, int resume)
}
spin_lock(&g_cdev.lock);
if ((g_cdev.active_pd == NULL) &&
- (memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
+ ether_addr_equal(pd->mac_addr, g_cdev.active_addr)) {
oz_pd_get(pd);
g_cdev.active_pd = pd;
oz_dbg(ON, "Active PD arrived\n");
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index efaf26f734c3..b3d401ae41b4 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -354,7 +354,8 @@ static struct oz_endpoint *oz_ep_alloc(int buffer_size, gfp_t mem_flags)
* disabled.
* Context: softirq or process
*/
-static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb)
+static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd,
+ struct urb *urb)
{
struct oz_urb_link *urbl;
struct list_head *e;
@@ -1986,8 +1987,7 @@ static void oz_get_hub_descriptor(struct usb_hcd *hcd,
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
- desc->wHubCharacteristics = (__force __u16)
- __constant_cpu_to_le16(0x0001);
+ desc->wHubCharacteristics = (__force __u16)cpu_to_le16(0x0001);
desc->bNbrPorts = OZ_NB_PORTS;
}
@@ -2181,7 +2181,7 @@ static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
break;
case GetHubStatus:
oz_dbg(HUB, "GetHubStatus: req_type = 0x%x\n", req_type);
- put_unaligned(__constant_cpu_to_le32(0), (__le32 *)buf);
+ put_unaligned(cpu_to_le32(0), (__le32 *)buf);
break;
case GetPortStatus:
err = oz_get_port_status(hcd, windex, buf);
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index 743695077346..10f1b3ac8832 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -8,6 +8,7 @@
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/errno.h>
#include "ozdbg.h"
#include "ozprotocol.h"
@@ -173,7 +174,7 @@ struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
pd->last_rx_pkt_num = 0xffffffff;
oz_pd_set_state(pd, OZ_PD_S_IDLE);
pd->max_tx_size = OZ_MAX_TX_SIZE;
- memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(pd->mac_addr, mac_addr);
if (0 != oz_elt_buf_init(&pd->elt_buff)) {
kfree(pd);
pd = NULL;
@@ -284,11 +285,11 @@ int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
ai->app_id);
break;
}
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
pd->total_apps |= (1<<ai->app_id);
if (resume)
pd->paused_apps &= ~(1<<ai->app_id);
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
}
}
return rc;
@@ -304,14 +305,14 @@ void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
if (pause) {
pd->paused_apps |= (1<<ai->app_id);
} else {
pd->total_apps &= ~(1<<ai->app_id);
pd->paused_apps &= ~(1<<ai->app_id);
}
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
ai->stop(pd, pause);
}
}
@@ -349,17 +350,17 @@ void oz_pd_stop(struct oz_pd *pd)
oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
oz_pd_indicate_farewells(pd);
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
stop_apps = pd->total_apps;
pd->total_apps = 0;
pd->paused_apps = 0;
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
oz_services_stop(pd, stop_apps, 0);
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
oz_pd_set_state(pd, OZ_PD_S_STOPPED);
/* Remove from PD list.*/
list_del(&pd->link);
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
oz_pd_put(pd);
}
@@ -372,9 +373,9 @@ int oz_pd_sleep(struct oz_pd *pd)
int do_stop = 0;
u16 stop_apps;
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
return 0;
}
if (pd->keep_alive && pd->session_id)
@@ -383,7 +384,7 @@ int oz_pd_sleep(struct oz_pd *pd)
do_stop = 1;
stop_apps = pd->total_apps;
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
if (do_stop) {
oz_pd_stop(pd);
} else {
@@ -999,15 +1000,15 @@ void oz_pd_indicate_farewells(struct oz_pd *pd)
const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
while (1) {
- oz_polling_lock_bh();
+ spin_lock_bh(&g_polling_lock);
if (list_empty(&pd->farewell_list)) {
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
break;
}
f = list_first_entry(&pd->farewell_list,
struct oz_farewell, link);
list_del(&f->link);
- oz_polling_unlock_bh();
+ spin_unlock_bh(&g_polling_lock);
if (ai->farewell)
ai->farewell(pd, f->ep_num, f->report, f->len);
kfree(f);
diff --git a/drivers/staging/ozwpan/ozpd.h b/drivers/staging/ozwpan/ozpd.h
index 12c712956888..ad5fe7a6e619 100644
--- a/drivers/staging/ozwpan/ozpd.h
+++ b/drivers/staging/ozwpan/ozpd.h
@@ -22,6 +22,11 @@
#define OZ_TIMER_HEARTBEAT 2
#define OZ_TIMER_STOP 3
+/*
+ *External spinlock variable
+ */
+extern spinlock_t g_polling_lock;
+
/* Data structure that hold information on a frame for transmisson. This is
* built when the frame is first transmitted and is used to rebuild the frame
* if a re-transmission is required.
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index 64d94f77f8a7..f09acd0bb013 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -8,6 +8,7 @@
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/errno.h>
#include <linux/ieee80211.h>
#include "ozdbg.h"
@@ -37,9 +38,13 @@ struct oz_binding {
};
/*
+ * External variable
+ */
+
+DEFINE_SPINLOCK(g_polling_lock);
+/*
* Static external variables.
*/
-static DEFINE_SPINLOCK(g_polling_lock);
static LIST_HEAD(g_pd_list);
static LIST_HEAD(g_binding);
static DEFINE_SPINLOCK(g_binding_lock);
@@ -179,7 +184,7 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
pd2 = container_of(e, struct oz_pd, link);
- if (memcmp(pd2->mac_addr, pd_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(pd2->mac_addr, pd_addr)) {
free_pd = pd;
pd = pd2;
break;
@@ -596,7 +601,7 @@ struct oz_pd *oz_pd_find(const u8 *mac_addr)
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
pd = container_of(e, struct oz_pd, link);
- if (memcmp(pd->mac_addr, mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(pd->mac_addr, mac_addr)) {
atomic_inc(&pd->ref_count);
spin_unlock_bh(&g_polling_lock);
return pd;
@@ -663,31 +668,26 @@ void oz_binding_add(const char *net_dev)
{
struct oz_binding *binding;
- binding = kmalloc(sizeof(struct oz_binding), GFP_KERNEL);
- if (binding) {
- binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
- binding->ptype.func = oz_pkt_recv;
+ binding = kzalloc(sizeof(struct oz_binding), GFP_KERNEL);
+ if (!binding)
+ return;
+
+ binding->ptype.type = htons(OZ_ETHERTYPE);
+ binding->ptype.func = oz_pkt_recv;
+ if (net_dev && *net_dev) {
memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
- if (net_dev && *net_dev) {
- oz_dbg(ON, "Adding binding: %s\n", net_dev);
- binding->ptype.dev =
- dev_get_by_name(&init_net, net_dev);
- if (binding->ptype.dev == NULL) {
- oz_dbg(ON, "Netdev %s not found\n", net_dev);
- kfree(binding);
- binding = NULL;
- }
- } else {
- oz_dbg(ON, "Binding to all netcards\n");
- binding->ptype.dev = NULL;
- }
- if (binding) {
- dev_add_pack(&binding->ptype);
- spin_lock_bh(&g_binding_lock);
- list_add_tail(&binding->link, &g_binding);
- spin_unlock_bh(&g_binding_lock);
+ oz_dbg(ON, "Adding binding: %s\n", net_dev);
+ binding->ptype.dev = dev_get_by_name(&init_net, net_dev);
+ if (binding->ptype.dev == NULL) {
+ oz_dbg(ON, "Netdev %s not found\n", net_dev);
+ kfree(binding);
+ return;
}
}
+ dev_add_pack(&binding->ptype);
+ spin_lock_bh(&g_binding_lock);
+ list_add_tail(&binding->link, &g_binding);
+ spin_unlock_bh(&g_binding_lock);
}
/*
@@ -798,12 +798,3 @@ int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
return count;
}
-void oz_polling_lock_bh(void)
-{
- spin_lock_bh(&g_polling_lock);
-}
-
-void oz_polling_unlock_bh(void)
-{
- spin_unlock_bh(&g_polling_lock);
-}
diff --git a/drivers/staging/ozwpan/ozproto.h b/drivers/staging/ozwpan/ozproto.h
index 0c49c8a0e815..cb38e02c968e 100644
--- a/drivers/staging/ozwpan/ozproto.h
+++ b/drivers/staging/ozwpan/ozproto.h
@@ -59,8 +59,6 @@ void oz_binding_remove(const char *net_dev);
void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time);
void oz_timer_delete(struct oz_pd *pd, int type);
void oz_pd_request_heartbeat(struct oz_pd *pd);
-void oz_polling_lock_bh(void);
-void oz_polling_unlock_bh(void);
void oz_pd_heartbeat_handler(unsigned long data);
void oz_pd_timeout_handler(unsigned long data);
enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer);
diff --git a/drivers/staging/ozwpan/ozprotocol.h b/drivers/staging/ozwpan/ozprotocol.h
index 17b09b9a5b08..9bbb182f2776 100644
--- a/drivers/staging/ozwpan/ozprotocol.h
+++ b/drivers/staging/ozwpan/ozprotocol.h
@@ -192,7 +192,7 @@ struct oz_get_desc_req {
u16 size;
u8 req_type;
u8 desc_type;
- u16 w_index;
+ __le16 w_index;
u8 index;
} PACKED;
@@ -219,8 +219,8 @@ struct oz_get_desc_rsp {
u8 elt_seq_num;
u8 type;
u8 req_id;
- u16 offset;
- u16 total_size;
+ __le16 offset;
+ __le16 total_size;
u8 rcode;
u8 data[1];
} PACKED;
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
index 8531438d7586..4249fa374012 100644
--- a/drivers/staging/ozwpan/ozusbif.h
+++ b/drivers/staging/ozwpan/ozusbif.h
@@ -23,7 +23,7 @@ int oz_usb_stream_delete(void *hpd, u8 ep_num);
int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
const u8 *data, int data_len);
int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
- u8 index, u16 windex, int offset, int len);
+ u8 index, __le16 windex, int offset, int len);
int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb);
void oz_usb_request_heartbeat(void *hpd);
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index 617f51cdaea7..f32d01427f77 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -54,7 +54,7 @@ static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
* Context: softirq
*/
int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
- u8 index, u16 windex, int offset, int len)
+ u8 index, __le16 windex, int offset, int len)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
struct oz_pd *pd = usb_ctx->pd;
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index ec4b1fd14021..08f9a4896116 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -171,8 +171,8 @@ struct logical_input {
union {
struct { /* valid when type == INPUT_TYPE_STD */
- void (*press_fct) (int);
- void (*release_fct) (int);
+ void (*press_fct)(int);
+ void (*release_fct)(int);
int press_data;
int release_data;
} std;
@@ -417,9 +417,9 @@ static char lcd_must_clear;
static char lcd_left_shift;
static char init_in_progress;
-static void (*lcd_write_cmd) (int);
-static void (*lcd_write_data) (int);
-static void (*lcd_clear_fast) (void);
+static void (*lcd_write_cmd)(int);
+static void (*lcd_write_data)(int);
+static void (*lcd_clear_fast)(void);
static DEFINE_SPINLOCK(pprt_lock);
static struct timer_list scan_timer;
@@ -457,14 +457,12 @@ MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
static int lcd_type = -1;
module_param(lcd_type, int, 0000);
MODULE_PARM_DESC(lcd_type,
- "LCD type: 0=none, 1=old //, 2=serial ks0074, "
- "3=hantronix //, 4=nexcom //, 5=compiled-in");
+ "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
static int lcd_proto = -1;
module_param(lcd_proto, int, 0000);
MODULE_PARM_DESC(lcd_proto,
- "LCD communication: 0=parallel (//), 1=serial,"
- "2=TI LCD Interface");
+ "LCD communication: 0=parallel (//), 1=serial, 2=TI LCD Interface");
static int lcd_charset = -1;
module_param(lcd_charset, int, 0000);
@@ -473,8 +471,7 @@ MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074");
static int keypad_type = -1;
module_param(keypad_type, int, 0000);
MODULE_PARM_DESC(keypad_type,
- "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, "
- "3=nexcom 4 keys");
+ "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, 3=nexcom 4 keys");
static int profile = DEFAULT_PROFILE;
module_param(profile, int, 0000);
@@ -494,38 +491,32 @@ MODULE_PARM_DESC(profile,
static int lcd_e_pin = PIN_NOT_SET;
module_param(lcd_e_pin, int, 0000);
MODULE_PARM_DESC(lcd_e_pin,
- "# of the // port pin connected to LCD 'E' signal, "
- "with polarity (-17..17)");
+ "# of the // port pin connected to LCD 'E' signal, with polarity (-17..17)");
static int lcd_rs_pin = PIN_NOT_SET;
module_param(lcd_rs_pin, int, 0000);
MODULE_PARM_DESC(lcd_rs_pin,
- "# of the // port pin connected to LCD 'RS' signal, "
- "with polarity (-17..17)");
+ "# of the // port pin connected to LCD 'RS' signal, with polarity (-17..17)");
static int lcd_rw_pin = PIN_NOT_SET;
module_param(lcd_rw_pin, int, 0000);
MODULE_PARM_DESC(lcd_rw_pin,
- "# of the // port pin connected to LCD 'RW' signal, "
- "with polarity (-17..17)");
+ "# of the // port pin connected to LCD 'RW' signal, with polarity (-17..17)");
static int lcd_bl_pin = PIN_NOT_SET;
module_param(lcd_bl_pin, int, 0000);
MODULE_PARM_DESC(lcd_bl_pin,
- "# of the // port pin connected to LCD backlight, "
- "with polarity (-17..17)");
+ "# of the // port pin connected to LCD backlight, with polarity (-17..17)");
static int lcd_da_pin = PIN_NOT_SET;
module_param(lcd_da_pin, int, 0000);
MODULE_PARM_DESC(lcd_da_pin,
- "# of the // port pin connected to serial LCD 'SDA' "
- "signal, with polarity (-17..17)");
+ "# of the // port pin connected to serial LCD 'SDA' signal, with polarity (-17..17)");
static int lcd_cl_pin = PIN_NOT_SET;
module_param(lcd_cl_pin, int, 0000);
MODULE_PARM_DESC(lcd_cl_pin,
- "# of the // port pin connected to serial LCD 'SCL' "
- "signal, with polarity (-17..17)");
+ "# of the // port pin connected to serial LCD 'SCL' signal, with polarity (-17..17)");
static const unsigned char *lcd_char_conv;
@@ -2017,9 +2008,9 @@ static struct logical_input *panel_bind_key(const char *name, const char *press,
* be bound.
*/
static struct logical_input *panel_bind_callback(char *name,
- void (*press_fct) (int),
+ void (*press_fct)(int),
int press_data,
- void (*release_fct) (int),
+ void (*release_fct)(int),
int release_data)
{
struct logical_input *callback;
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index 9f6ebdb23740..a85c3d68c462 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -31,7 +31,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/input.h>
diff --git a/drivers/staging/rtl8187se/Kconfig b/drivers/staging/rtl8187se/Kconfig
index 3162aabbeb07..ff8d41ebca36 100644
--- a/drivers/staging/rtl8187se/Kconfig
+++ b/drivers/staging/rtl8187se/Kconfig
@@ -6,6 +6,5 @@ config R8187SE
select WEXT_PRIV
select EEPROM_93CX6
select CRYPTO
- default N
---help---
If built as a module, it will be called r8187se.ko.
diff --git a/drivers/staging/rtl8187se/Module.symvers b/drivers/staging/rtl8187se/Module.symvers
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/staging/rtl8187se/Module.symvers
diff --git a/drivers/staging/rtl8187se/ieee80211/dot11d.h b/drivers/staging/rtl8187se/ieee80211/dot11d.h
index 63f4f3c72f10..f996691307bf 100644
--- a/drivers/staging/rtl8187se/ieee80211/dot11d.h
+++ b/drivers/staging/rtl8187se/ieee80211/dot11d.h
@@ -11,13 +11,13 @@ typedef struct _CHNL_TXPOWER_TRIPLE {
u8 FirstChnl;
u8 NumChnls;
u8 MaxTxPowerInDbm;
-}CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
+} CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
typedef enum _DOT11D_STATE {
DOT11D_STATE_NONE = 0,
DOT11D_STATE_LEARNED,
DOT11D_STATE_DONE,
-}DOT11D_STATE;
+} DOT11D_STATE;
typedef struct _RT_DOT11D_INFO {
/* DECLARE_RT_OBJECT(RT_DOT12D_INFO); */
@@ -35,9 +35,10 @@ typedef struct _RT_DOT11D_INFO {
u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
DOT11D_STATE State;
-}RT_DOT11D_INFO, *PRT_DOT11D_INFO;
-#define eqMacAddr(a,b) ( ((a)[0]==(b)[0] && (a)[1]==(b)[1] && (a)[2]==(b)[2] && (a)[3]==(b)[3] && (a)[4]==(b)[4] && (a)[5]==(b)[5]) ? 1:0 )
-#define cpMacAddr(des,src) ((des)[0]=(src)[0],(des)[1]=(src)[1],(des)[2]=(src)[2],(des)[3]=(src)[3],(des)[4]=(src)[4],(des)[5]=(src)[5])
+} RT_DOT11D_INFO, *PRT_DOT11D_INFO;
+
+#define eqMacAddr(a, b) (((a)[0] == (b)[0] && (a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1:0)
+#define cpMacAddr(des, src) ((des)[0] = (src)[0], (des)[1] = (src)[1], (des)[2] = (src)[2], (des)[3] = (src)[3], (des)[4] = (src)[4], (des)[5] = (src)[5])
#define GET_DOT11D_INFO(__pIeeeDev) ((PRT_DOT11D_INFO)((__pIeeeDev)->pDot11dInfo))
#define IS_DOT11D_ENABLE(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->bEnabled
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 09ffd9bc8991..d1763b7b8f27 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -473,7 +473,7 @@ enum {
};
struct ieee80211_header_data {
- u16 frame_ctl;
+ __le16 frame_ctl;
u16 duration_id;
u8 addr1[6];
u8 addr2[6];
@@ -482,7 +482,7 @@ struct ieee80211_header_data {
};
struct ieee80211_hdr_4addr {
- u16 frame_ctl;
+ __le16 frame_ctl;
u16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
@@ -709,10 +709,10 @@ enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
#define MAX_IE_LEN 0xFF //+YJ,080625
-typedef struct _CHANNEL_LIST{
- u8 Channel[MAX_CHANNEL_NUMBER + 1];
- u8 Len;
-}CHANNEL_LIST, *PCHANNEL_LIST;
+struct rtl8187se_channel_list {
+ u8 channel[MAX_CHANNEL_NUMBER + 1];
+ u8 len;
+};
//by amy for ps
#define IEEE80211_WATCH_DOG_TIME 2000
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
index c8013d373a77..4fe253818630 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
@@ -58,7 +58,7 @@ struct ieee80211_ccmp_data {
u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
};
-void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
+static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
const u8 pt[16], u8 ct[16])
{
crypto_cipher_encrypt_one((void *)tfm, ct, pt);
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
index c5907968e1a7..6c1acc5dfba7 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
@@ -187,8 +187,7 @@ static inline u16 Mk16_le(u16 *v)
}
-static const u16 Sbox[256] =
-{
+static const u16 Sbox[256] = {
0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
@@ -307,7 +306,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
int len;
u8 *pos;
struct ieee80211_hdr_4addr *hdr;
- u8 rc4key[16],*icv;
+ u8 rc4key[16], *icv;
u32 crc;
struct scatterlist sg;
int ret;
@@ -348,7 +347,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[3] = crc >> 24;
crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, len + 4);
- ret= crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+ ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
tkey->tx_iv16++;
if (tkey->tx_iv16 == 0) {
@@ -537,9 +536,9 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
michael_mic_hdr(skb, tkey->tx_hdr);
- if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
+ if (IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
- }
+
pos = skb_put(skb, 8);
if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
@@ -583,9 +582,8 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
return -1;
michael_mic_hdr(skb, tkey->rx_hdr);
- if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
+ if (IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
- }
if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
index f114f9a33e17..f25367224941 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
@@ -11,7 +11,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-//#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/random.h>
@@ -28,8 +27,6 @@ MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Host AP crypt: WEP");
MODULE_LICENSE("GPL");
-
-
struct prism2_wep_data {
u32 iv;
#define WEP_KEY_LEN 13
@@ -40,7 +37,6 @@ struct prism2_wep_data {
struct crypto_blkcipher *rx_tfm;
};
-
static void *prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
@@ -79,7 +75,6 @@ fail:
return NULL;
}
-
static void prism2_wep_deinit(void *priv)
{
struct prism2_wep_data *_priv = priv;
@@ -94,7 +89,6 @@ static void prism2_wep_deinit(void *priv)
kfree(priv);
}
-
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
* for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
* so the payload length increases with 8 bytes.
@@ -157,7 +151,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
}
-
/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
* the frame: IV (4 bytes), encrypted payload (including SNAP header),
* ICV (4 bytes). len includes both IV and ICV.
@@ -219,7 +212,6 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return 0;
}
-
static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
{
struct prism2_wep_data *wep = priv;
@@ -233,7 +225,6 @@ static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
return 0;
}
-
static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
{
struct prism2_wep_data *wep = priv;
@@ -246,7 +237,6 @@ static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
return wep->key_len;
}
-
static char *prism2_wep_print_stats(char *p, void *priv)
{
struct prism2_wep_data *wep = priv;
@@ -255,7 +245,6 @@ static char *prism2_wep_print_stats(char *p, void *priv)
return p;
}
-
static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
.name = "WEP",
.init = prism2_wep_init,
@@ -272,21 +261,17 @@ static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
.owner = THIS_MODULE,
};
-
int ieee80211_crypto_wep_init(void)
{
return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
}
-
void ieee80211_crypto_wep_exit(void)
{
ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
}
-
void ieee80211_wep_null(void)
{
-// printk("============>%s()\n", __func__);
return;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index c27392d8b640..03eb164798cd 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -13,7 +13,6 @@
* released under the GPL
*/
-
#include "ieee80211.h"
#include <linux/random.h>
@@ -24,14 +23,6 @@
#include <linux/etherdevice.h>
#include "dot11d.h"
-u8 rsn_authen_cipher_suite[16][4] = {
- {0x00, 0x0F, 0xAC, 0x00}, //Use group key, //Reserved
- {0x00, 0x0F, 0xAC, 0x01}, //WEP-40 //RSNA default
- {0x00, 0x0F, 0xAC, 0x02}, //TKIP //NONE //{used just as default}
- {0x00, 0x0F, 0xAC, 0x03}, //WRAP-historical
- {0x00, 0x0F, 0xAC, 0x04}, //CCMP
- {0x00, 0x0F, 0xAC, 0x05}, //WEP-104
-};
short ieee80211_is_54g(const struct ieee80211_network *net)
{
@@ -62,14 +53,13 @@ static unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
}
/* place the MFIE rate, tag to the memory (double) poised.
- * Then it updates the pointer so that
- * it points after the new MFIE tag added.
+ * Then it updates the pointer so that it points after the new MFIE tag added.
*/
static void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
- if (ieee->modulation & IEEE80211_CCK_MODULATION){
+ if (ieee->modulation & IEEE80211_CCK_MODULATION) {
*tag++ = MFIE_TYPE_RATES;
*tag++ = 4;
*tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
@@ -86,8 +76,7 @@ static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
- if (ieee->modulation & IEEE80211_OFDM_MODULATION){
-
+ if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
*tag++ = MFIE_TYPE_RATES_EX;
*tag++ = 8;
*tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
@@ -100,22 +89,20 @@ static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
*tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
}
-
/* We may add an option for custom rates that specific HW might support */
*tag_p = tag;
}
-
static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
- *tag++ = MFIE_TYPE_GENERIC; //0
+ *tag++ = MFIE_TYPE_GENERIC; /* 0 */
*tag++ = 7;
*tag++ = 0x00;
*tag++ = 0x50;
*tag++ = 0xf2;
- *tag++ = 0x02;//5
+ *tag++ = 0x02; /* 5 */
*tag++ = 0x00;
*tag++ = 0x01;
#ifdef SUPPORT_USPD
@@ -148,32 +135,23 @@ static void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p)
static void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
{
int nh;
- nh = (ieee->mgmt_queue_head +1) % MGMT_QUEUE_NUM;
+ nh = (ieee->mgmt_queue_head + 1) % MGMT_QUEUE_NUM;
-/*
- * if the queue is full but we have newer frames then
- * just overwrites the oldest.
- *
- * if (nh == ieee->mgmt_queue_tail)
- * return -1;
- */
ieee->mgmt_queue_head = nh;
ieee->mgmt_queue_ring[nh] = skb;
-
- //return 0;
}
static struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
{
struct sk_buff *ret;
- if(ieee->mgmt_queue_tail == ieee->mgmt_queue_head)
+ if (ieee->mgmt_queue_tail == ieee->mgmt_queue_head)
return NULL;
ret = ieee->mgmt_queue_ring[ieee->mgmt_queue_tail];
ieee->mgmt_queue_tail =
- (ieee->mgmt_queue_tail+1) % MGMT_QUEUE_NUM;
+ (ieee->mgmt_queue_tail + 1) % MGMT_QUEUE_NUM;
return ret;
}
@@ -183,7 +161,6 @@ static void init_mgmt_queue(struct ieee80211_device *ieee)
ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
}
-
void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl);
inline void softmac_mgmt_xmit(struct sk_buff *skb,
@@ -191,20 +168,18 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb,
{
unsigned long flags;
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
- struct ieee80211_hdr_3addr *header=
+ struct ieee80211_hdr_3addr *header =
(struct ieee80211_hdr_3addr *) skb->data;
-
spin_lock_irqsave(&ieee->lock, flags);
/* called with 2nd param 0, no mgmt lock required */
- ieee80211_sta_wakeup(ieee,0);
+ ieee80211_sta_wakeup(ieee, 0);
- if(single){
- if(ieee->queue_stop){
-
- enqueue_mgmt(ieee,skb);
- }else{
+ if (single) {
+ if (ieee->queue_stop) {
+ enqueue_mgmt(ieee, skb);
+ } else {
header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -214,11 +189,11 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb,
/* avoid watchdog triggers */
ieee->dev->trans_start = jiffies;
- ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
}
spin_unlock_irqrestore(&ieee->lock, flags);
- }else{
+ } else {
spin_unlock_irqrestore(&ieee->lock, flags);
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
@@ -231,24 +206,20 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb,
/* avoid watchdog triggers */
ieee->dev->trans_start = jiffies;
- ieee->softmac_hard_start_xmit(skb,ieee->dev);
+ ieee->softmac_hard_start_xmit(skb, ieee->dev);
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags);
}
}
-
inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
struct ieee80211_device *ieee)
{
-
short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
struct ieee80211_hdr_3addr *header =
(struct ieee80211_hdr_3addr *) skb->data;
-
- if(single){
-
+ if (single) {
header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -258,10 +229,8 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
/* avoid watchdog triggers */
ieee->dev->trans_start = jiffies;
- ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
-
- }else{
-
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
+ } else {
header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
@@ -271,12 +240,10 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
/* avoid watchdog triggers */
ieee->dev->trans_start = jiffies;
- ieee->softmac_hard_start_xmit(skb,ieee->dev);
-
+ ieee->softmac_hard_start_xmit(skb, ieee->dev);
}
-// dev_kfree_skb_any(skb);//edit by thomas
}
-//by amy for power save
+
inline struct sk_buff *
ieee80211_disassociate_skb(struct ieee80211_network *beacon,
struct ieee80211_device *ieee, u8 asRsn)
@@ -288,7 +255,7 @@ ieee80211_disassociate_skb(struct ieee80211_network *beacon,
if (!skb)
return NULL;
- disass = (struct ieee80211_disassoc_frame *) skb_put(skb,sizeof(struct ieee80211_disassoc_frame));
+ disass = (struct ieee80211_disassoc_frame *) skb_put(skb, sizeof(struct ieee80211_disassoc_frame));
disass->header.frame_control = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
disass->header.duration_id = 0;
@@ -299,21 +266,19 @@ ieee80211_disassociate_skb(struct ieee80211_network *beacon,
disass->reasoncode = asRsn;
return skb;
}
+
void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta, u8 asRsn)
{
- struct ieee80211_network *beacon = &ieee->current_network;
- struct sk_buff *skb;
- skb = ieee80211_disassociate_skb(beacon,ieee,asRsn);
- if (skb){
- softmac_mgmt_xmit(skb, ieee);
- //dev_kfree_skb_any(skb);//edit by thomas
- }
+ struct ieee80211_network *beacon = &ieee->current_network;
+ struct sk_buff *skb;
+ skb = ieee80211_disassociate_skb(beacon, ieee, asRsn);
+ if (skb)
+ softmac_mgmt_xmit(skb, ieee);
}
-//by amy for power save
inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
{
- unsigned int len,rate_len;
+ unsigned int len, rate_len;
u8 *tag;
struct sk_buff *skb;
struct ieee80211_probe_request *req;
@@ -327,75 +292,45 @@ inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
if (!skb)
return NULL;
- req = (struct ieee80211_probe_request *) skb_put(skb,sizeof(struct ieee80211_probe_request));
+ req = (struct ieee80211_probe_request *) skb_put(skb, sizeof(struct ieee80211_probe_request));
req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- req->header.duration_id = 0; //FIXME: is this OK ?
+ req->header.duration_id = 0; /* FIXME: is this OK ? */
memset(req->header.addr1, 0xff, ETH_ALEN);
memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memset(req->header.addr3, 0xff, ETH_ALEN);
- tag = (u8 *) skb_put(skb,len+2+rate_len);
+ tag = (u8 *) skb_put(skb, len + 2 + rate_len);
*tag++ = MFIE_TYPE_SSID;
*tag++ = len;
memcpy(tag, ieee->current_network.ssid, len);
tag += len;
- ieee80211_MFIE_Brate(ieee,&tag);
- ieee80211_MFIE_Grate(ieee,&tag);
+ ieee80211_MFIE_Brate(ieee, &tag);
+ ieee80211_MFIE_Grate(ieee, &tag);
return skb;
}
struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee);
-void ext_ieee80211_send_beacon_wq(struct ieee80211_device *ieee)
-{
- struct sk_buff *skb;
-
- //unsigned long flags;
-
- skb = ieee80211_get_beacon_(ieee);
-
- if (skb){
- softmac_mgmt_xmit(skb, ieee);
- ieee->softmac_stats.tx_beacons++;
- dev_kfree_skb_any(skb);//edit by thomas
- }
-
-
- //printk(KERN_WARNING "[1] beacon sending!\n");
- ieee->beacon_timer.expires = jiffies +
- (MSECS( ieee->current_network.beacon_interval -5));
-
- //spin_lock_irqsave(&ieee->beacon_lock,flags);
- if(ieee->beacon_txing)
- add_timer(&ieee->beacon_timer);
- //spin_unlock_irqrestore(&ieee->beacon_lock,flags);
-}
-
static void ieee80211_send_beacon(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
- //unsigned long flags;
-
skb = ieee80211_get_beacon_(ieee);
- if (skb){
+ if (skb) {
softmac_mgmt_xmit(skb, ieee);
ieee->softmac_stats.tx_beacons++;
- dev_kfree_skb_any(skb);//edit by thomas
+ dev_kfree_skb_any(skb);
}
- //printk(KERN_WARNING "[1] beacon sending!\n");
ieee->beacon_timer.expires = jiffies +
- (MSECS( ieee->current_network.beacon_interval -5));
+ (MSECS(ieee->current_network.beacon_interval - 5));
- //spin_lock_irqsave(&ieee->beacon_lock,flags);
- if(ieee->beacon_txing)
+ if (ieee->beacon_txing)
add_timer(&ieee->beacon_timer);
- //spin_unlock_irqrestore(&ieee->beacon_lock,flags);
}
@@ -415,16 +350,15 @@ static void ieee80211_send_probe(struct ieee80211_device *ieee)
struct sk_buff *skb;
skb = ieee80211_probe_req(ieee);
- if (skb){
+ if (skb) {
softmac_mgmt_xmit(skb, ieee);
ieee->softmac_stats.tx_probe_rq++;
- //dev_kfree_skb_any(skb);//edit by thomas
}
}
static void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
{
- if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)){
+ if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)) {
ieee80211_send_probe(ieee);
ieee80211_send_probe(ieee);
}
@@ -439,17 +373,14 @@ static void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
u8 channel_map[MAX_CHANNEL_NUMBER+1];
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
down(&ieee->scan_sem);
-// printk("==================> Sync scan\n");
- while(1)
- {
-
- do{
+ while (1) {
+ do {
ch++;
if (ch > MAX_CHANNEL_NUMBER)
goto out; /* scan completed */
- }while(!channel_map[ch]);
+ } while (!channel_map[ch]);
/* this function can be called in two situations
* 1- We have switched to ad-hoc mode and we are
* performing a complete syncro scan before conclude
@@ -473,101 +404,74 @@ static void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
goto out;
ieee->set_chan(ieee->dev, ch);
-// printk("=====>channel=%d ",ch);
- if(channel_map[ch] == 1)
- {
-// printk("====send probe request\n");
+ if (channel_map[ch] == 1)
ieee80211_send_probe_requests(ieee);
- }
+
/* this prevent excessive time wait when we
* need to wait for a syncro scan to end..
*/
if (ieee->sync_scan_hurryup)
goto out;
-
msleep_interruptible_rtl(IEEE80211_SOFTMAC_SCAN_TIME);
-
}
out:
ieee->sync_scan_hurryup = 0;
up(&ieee->scan_sem);
- if(IS_DOT11D_ENABLE(ieee))
+ if (IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
}
void ieee80211_softmac_ips_scan_syncro(struct ieee80211_device *ieee)
{
int ch;
- unsigned int watch_dog = 0;
+ unsigned int watch_dog = 0;
u8 channel_map[MAX_CHANNEL_NUMBER+1];
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
- down(&ieee->scan_sem);
+ down(&ieee->scan_sem);
ch = ieee->current_network.channel;
-// if(ieee->sync_scan_hurryup)
-// {
-
-// printk("stop scan sync\n");
-// goto out;
-// }
-// printk("=======hh===============>ips scan\n");
- while(1)
- {
- /* this function can be called in two situations
- * 1- We have switched to ad-hoc mode and we are
- * performing a complete syncro scan before conclude
- * there are no interesting cell and to create a
- * new one. In this case the link state is
- * IEEE80211_NOLINK until we found an interesting cell.
- * If so the ieee8021_new_net, called by the RX path
- * will set the state to IEEE80211_LINKED, so we stop
- * scanning
- * 2- We are linked and the root uses run iwlist scan.
- * So we switch to IEEE80211_LINKED_SCANNING to remember
- * that we are still logically linked (not interested in
- * new network events, despite for updating the net list,
- * but we are temporarily 'unlinked' as the driver shall
- * not filter RX frames and the channel is changing.
- * So the only situation in witch are interested is to check
- * if the state become LINKED because of the #1 situation
- */
+
+ while (1) {
+ /* this function can be called in two situations
+ * 1- We have switched to ad-hoc mode and we are
+ * performing a complete syncro scan before conclude
+ * there are no interesting cell and to create a
+ * new one. In this case the link state is
+ * IEEE80211_NOLINK until we found an interesting cell.
+ * If so the ieee8021_new_net, called by the RX path
+ * will set the state to IEEE80211_LINKED, so we stop
+ * scanning
+ * 2- We are linked and the root uses run iwlist scan.
+ * So we switch to IEEE80211_LINKED_SCANNING to remember
+ * that we are still logically linked (not interested in
+ * new network events, despite for updating the net list,
+ * but we are temporarily 'unlinked' as the driver shall
+ * not filter RX frames and the channel is changing.
+ * So the only situation in witch are interested is to check
+ * if the state become LINKED because of the #1 situation
+ */
if (ieee->state == IEEE80211_LINKED)
- {
goto out;
- }
- if(channel_map[ieee->current_network.channel] > 0)
- {
+
+ if (channel_map[ieee->current_network.channel] > 0)
ieee->set_chan(ieee->dev, ieee->current_network.channel);
-// printk("======>channel=%d ",ieee->current_network.channel);
- }
- if(channel_map[ieee->current_network.channel] == 1)
- {
-// printk("====send probe request\n");
+
+ if (channel_map[ieee->current_network.channel] == 1)
ieee80211_send_probe_requests(ieee);
- }
- /* this prevent excessive time wait when we
- * need to wait for a syncro scan to end..
- */
-// if (ieee->sync_scan_hurryup)
-// goto out;
msleep_interruptible_rtl(IEEE80211_SOFTMAC_SCAN_TIME);
- do{
+ do {
if (watch_dog++ >= MAX_CHANNEL_NUMBER)
- // if (++watch_dog >= 15);//MAX_CHANNEL_NUMBER) //YJ,modified,080630
goto out; /* scan completed */
ieee->current_network.channel = (ieee->current_network.channel + 1)%MAX_CHANNEL_NUMBER;
- }while(!channel_map[ieee->current_network.channel]);
- }
+ } while (!channel_map[ieee->current_network.channel]);
+ }
out:
- //ieee->sync_scan_hurryup = 0;
- //ieee->set_chan(ieee->dev, ch);
- //ieee->current_network.channel = ch;
ieee->actscanning = false;
up(&ieee->scan_sem);
- if(IS_DOT11D_ENABLE(ieee))
+ if (IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
}
@@ -575,29 +479,24 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
- static short watchdog = 0;
+ static short watchdog;
u8 channel_map[MAX_CHANNEL_NUMBER+1];
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
-// printk("ieee80211_softmac_scan_wq ENABLE_IPS\n");
-// printk("in %s\n",__func__);
down(&ieee->scan_sem);
- do{
+ do {
ieee->current_network.channel =
(ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER;
if (watchdog++ > MAX_CHANNEL_NUMBER)
goto out; /* no good chans */
+ } while (!channel_map[ieee->current_network.channel]);
- }while(!channel_map[ieee->current_network.channel]);
-
- //printk("current_network.channel:%d\n", ieee->current_network.channel);
- if (ieee->scanning == 0 )
- {
+ if (ieee->scanning == 0) {
printk("error out, scanning = 0\n");
goto out;
}
ieee->set_chan(ieee->dev, ieee->current_network.channel);
- if(channel_map[ieee->current_network.channel] == 1)
+ if (channel_map[ieee->current_network.channel] == 1)
ieee80211_send_probe_requests(ieee);
queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
@@ -609,7 +508,7 @@ out:
ieee->scanning = 0;
up(&ieee->scan_sem);
- if(IS_DOT11D_ENABLE(ieee))
+ if (IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
return;
}
@@ -618,62 +517,51 @@ static void ieee80211_beacons_start(struct ieee80211_device *ieee)
{
unsigned long flags;
- spin_lock_irqsave(&ieee->beacon_lock,flags);
+ spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 1;
ieee80211_send_beacon(ieee);
- spin_unlock_irqrestore(&ieee->beacon_lock,flags);
+ spin_unlock_irqrestore(&ieee->beacon_lock, flags);
}
static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
{
unsigned long flags;
- spin_lock_irqsave(&ieee->beacon_lock,flags);
+ spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 0;
- del_timer_sync(&ieee->beacon_timer);
-
- spin_unlock_irqrestore(&ieee->beacon_lock,flags);
+ del_timer_sync(&ieee->beacon_timer);
+ spin_unlock_irqrestore(&ieee->beacon_lock, flags);
}
-
void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
{
- if(ieee->stop_send_beacons)
+ if (ieee->stop_send_beacons)
ieee->stop_send_beacons(ieee->dev);
if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
ieee80211_beacons_stop(ieee);
}
-
void ieee80211_start_send_beacons(struct ieee80211_device *ieee)
{
- if(ieee->start_send_beacons)
+ if (ieee->start_send_beacons)
ieee->start_send_beacons(ieee->dev);
- if(ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
+ if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
ieee80211_beacons_start(ieee);
}
-
static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
{
-// unsigned long flags;
-
- //ieee->sync_scan_hurryup = 1;
-
down(&ieee->scan_sem);
-// spin_lock_irqsave(&ieee->lock, flags);
- if (ieee->scanning == 1){
+ if (ieee->scanning == 1) {
ieee->scanning = 0;
- //del_timer_sync(&ieee->scan_timer);
cancel_delayed_work(&ieee->softmac_scan_wq);
}
-// spin_unlock_irqrestore(&ieee->lock, flags);
up(&ieee->scan_sem);
}
@@ -688,38 +576,28 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
/* called with ieee->lock held */
void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
{
- if(IS_DOT11D_ENABLE(ieee) )
- {
- if(IS_COUNTRY_IE_VALID(ieee))
- {
+ if (IS_DOT11D_ENABLE(ieee)) {
+ if (IS_COUNTRY_IE_VALID(ieee))
RESET_CIE_WATCHDOG(ieee);
- }
}
- if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){
- if (ieee->scanning == 0)
- {
+
+ if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
+ if (ieee->scanning == 0) {
ieee->scanning = 1;
- //ieee80211_softmac_scan(ieee);
- // queue_work(ieee->wq, &ieee->softmac_scan_wq);
- //care this,1203,2007,by lawrence
#if 1
- queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq,0);
+ queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, 0);
#endif
}
}else
ieee->start_scan(ieee->dev);
-
}
/* called with wx_sem held */
void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
{
- if(IS_DOT11D_ENABLE(ieee) )
- {
- if(IS_COUNTRY_IE_VALID(ieee))
- {
+ if (IS_DOT11D_ENABLE(ieee)) {
+ if (IS_COUNTRY_IE_VALID(ieee))
RESET_CIE_WATCHDOG(ieee);
- }
}
ieee->sync_scan_hurryup = 0;
@@ -727,7 +605,6 @@ void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
ieee80211_softmac_scan_syncro(ieee);
else
ieee->scan_syncro(ieee->dev);
-
}
inline struct sk_buff *
@@ -739,15 +616,17 @@ ieee80211_authentication_req(struct ieee80211_network *beacon,
skb = dev_alloc_skb(sizeof(struct ieee80211_authentication) + challengelen);
- if (!skb) return NULL;
+ if (!skb)
+ return NULL;
auth = (struct ieee80211_authentication *)
skb_put(skb, sizeof(struct ieee80211_authentication));
auth->header.frame_ctl = IEEE80211_STYPE_AUTH;
- if (challengelen) auth->header.frame_ctl |= IEEE80211_FCTL_WEP;
+ if (challengelen)
+ auth->header.frame_ctl |= IEEE80211_FCTL_WEP;
- auth->header.duration_id = 0x013a; //FIXME
+ auth->header.duration_id = 0x013a; /* FIXME */
memcpy(auth->header.addr1, beacon->bssid, ETH_ALEN);
memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -761,7 +640,6 @@ ieee80211_authentication_req(struct ieee80211_network *beacon,
auth->status = cpu_to_le16(WLAN_STATUS_SUCCESS);
return skb;
-
}
static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee,
@@ -772,29 +650,30 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee,
struct ieee80211_probe_response *beacon_buf;
struct sk_buff *skb;
int encrypt;
- int atim_len,erp_len;
- struct ieee80211_crypt_data* crypt;
+ int atim_len, erp_len;
+ struct ieee80211_crypt_data *crypt;
char *ssid = ieee->current_network.ssid;
int ssid_len = ieee->current_network.ssid_len;
int rate_len = ieee->current_network.rates_len+2;
int rate_ex_len = ieee->current_network.rates_ex_len;
int wpa_ie_len = ieee->wpa_ie_len;
- if(rate_ex_len > 0) rate_ex_len+=2;
+ if (rate_ex_len > 0)
+ rate_ex_len += 2;
- if(ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
+ if (ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
atim_len = 4;
else
atim_len = 0;
- if(ieee80211_is_54g(&ieee->current_network))
+ if (ieee80211_is_54g(&ieee->current_network))
erp_len = 3;
else
erp_len = 0;
beacon_size = sizeof(struct ieee80211_probe_response)+
ssid_len
- +3 //channel
+ +3 /* channel */
+rate_len
+rate_ex_len
+atim_len
@@ -806,19 +685,19 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee,
if (!skb)
return NULL;
- beacon_buf = (struct ieee80211_probe_response*) skb_put(skb, beacon_size);
+ beacon_buf = (struct ieee80211_probe_response *) skb_put(skb, beacon_size);
- memcpy (beacon_buf->header.addr1, dest,ETH_ALEN);
- memcpy (beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memcpy (beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
+ memcpy(beacon_buf->header.addr1, dest, ETH_ALEN);
+ memcpy(beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
+ memcpy(beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
- beacon_buf->header.duration_id = 0; //FIXME
+ beacon_buf->header.duration_id = 0; /* FIXME */
beacon_buf->beacon_interval =
cpu_to_le16(ieee->current_network.beacon_interval);
beacon_buf->capability =
cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_IBSS);
- if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
+ if (ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
crypt = ieee->crypt[ieee->tx_keyidx];
@@ -835,51 +714,52 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee,
beacon_buf->info_element.id = MFIE_TYPE_SSID;
beacon_buf->info_element.len = ssid_len;
- tag = (u8*) beacon_buf->info_element.data;
+ tag = (u8 *) beacon_buf->info_element.data;
memcpy(tag, ssid, ssid_len);
tag += ssid_len;
*(tag++) = MFIE_TYPE_RATES;
- *(tag++) = rate_len-2;
- memcpy(tag,ieee->current_network.rates,rate_len-2);
- tag+=rate_len-2;
+ *(tag++) = rate_len - 2;
+ memcpy(tag, ieee->current_network.rates, rate_len-2);
+ tag += rate_len - 2;
*(tag++) = MFIE_TYPE_DS_SET;
*(tag++) = 1;
*(tag++) = ieee->current_network.channel;
- if(atim_len){
+ if (atim_len) {
*(tag++) = MFIE_TYPE_IBSS_SET;
*(tag++) = 2;
- *((u16*)(tag)) = cpu_to_le16(ieee->current_network.atim_window);
- tag+=2;
+ *((u16 *)(tag)) = cpu_to_le16(ieee->current_network.atim_window);
+ tag += 2;
}
- if(erp_len){
+ if (erp_len) {
*(tag++) = MFIE_TYPE_ERP;
*(tag++) = 1;
*(tag++) = 0;
}
- if(rate_ex_len){
+ if (rate_ex_len) {
*(tag++) = MFIE_TYPE_RATES_EX;
*(tag++) = rate_ex_len-2;
- memcpy(tag,ieee->current_network.rates_ex,rate_ex_len-2);
- tag+=rate_ex_len-2;
+ memcpy(tag, ieee->current_network.rates_ex, rate_ex_len-2);
+ tag += rate_ex_len - 2;
}
- if (wpa_ie_len)
- {
- if (ieee->iw_mode == IW_MODE_ADHOC)
- {//as Windows will set pairwise key same as the group key which is not allowed in Linux, so set this for IOT issue. WB 2008.07.07
+ if (wpa_ie_len) {
+ if (ieee->iw_mode == IW_MODE_ADHOC) {
+ /* as Windows will set pairwise key same as the group
+ * key which is not allowed in Linux, so set this for
+ * IOT issue.
+ */
memcpy(&ieee->wpa_ie[14], &ieee->wpa_ie[8], 4);
}
memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
}
-
skb->dev = ieee->dev;
return skb;
}
@@ -888,9 +768,9 @@ static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
u8 *dest)
{
struct sk_buff *skb;
- u8* tag;
+ u8 *tag;
- struct ieee80211_crypt_data* crypt;
+ struct ieee80211_crypt_data *crypt;
struct ieee80211_assoc_response_frame *assoc;
short encrypt;
@@ -903,34 +783,36 @@ static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
return NULL;
assoc = (struct ieee80211_assoc_response_frame *)
- skb_put(skb,sizeof(struct ieee80211_assoc_response_frame));
+ skb_put(skb, sizeof(struct ieee80211_assoc_response_frame));
assoc->header.frame_control = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
- memcpy(assoc->header.addr1, dest,ETH_ALEN);
+ memcpy(assoc->header.addr1, dest, ETH_ALEN);
memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
assoc->capability = cpu_to_le16(ieee->iw_mode == IW_MODE_MASTER ?
WLAN_CAPABILITY_BSS : WLAN_CAPABILITY_IBSS);
-
- if(ieee->short_slot)
+ if (ieee->short_slot)
assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
if (ieee->host_encrypt)
crypt = ieee->crypt[ieee->tx_keyidx];
- else crypt = NULL;
+ else
+ crypt = NULL;
- encrypt = ( crypt && crypt->ops);
+ encrypt = (crypt && crypt->ops);
if (encrypt)
assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
assoc->status = 0;
assoc->aid = cpu_to_le16(ieee->assoc_id);
- if (ieee->assoc_id == 0x2007) ieee->assoc_id=0;
- else ieee->assoc_id++;
+ if (ieee->assoc_id == 0x2007)
+ ieee->assoc_id = 0;
+ else
+ ieee->assoc_id++;
- tag = (u8*) skb_put(skb, rate_len);
+ tag = (u8 *) skb_put(skb, rate_len);
ieee80211_MFIE_Brate(ieee, &tag);
ieee80211_MFIE_Grate(ieee, &tag);
@@ -962,21 +844,19 @@ static struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,
memcpy(auth->header.addr1, dest, ETH_ALEN);
auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH);
return skb;
-
-
}
static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee, short pwr)
{
struct sk_buff *skb;
- struct ieee80211_hdr_3addr* hdr;
+ struct ieee80211_hdr_3addr *hdr;
skb = dev_alloc_skb(sizeof(struct ieee80211_hdr_3addr));
if (!skb)
return NULL;
- hdr = (struct ieee80211_hdr_3addr*)skb_put(skb,sizeof(struct ieee80211_hdr_3addr));
+ hdr = (struct ieee80211_hdr_3addr *)skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
memcpy(hdr->addr1, ieee->current_network.bssid, ETH_ALEN);
memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -987,83 +867,64 @@ static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee, short
(pwr ? IEEE80211_FCTL_PM:0));
return skb;
-
-
}
-
static void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
{
struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest);
- if (buf){
+ if (buf) {
softmac_mgmt_xmit(buf, ieee);
- dev_kfree_skb_any(buf);//edit by thomas
+ dev_kfree_skb_any(buf);
}
}
-
static void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8 *dest)
{
struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest);
- if (buf){
+ if (buf) {
softmac_mgmt_xmit(buf, ieee);
- dev_kfree_skb_any(buf);//edit by thomas
+ dev_kfree_skb_any(buf);
}
}
-
static void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
{
-
struct sk_buff *buf = ieee80211_probe_resp(ieee, dest);
if (buf) {
softmac_mgmt_xmit(buf, ieee);
- dev_kfree_skb_any(buf);//edit by thomas
+ dev_kfree_skb_any(buf);
}
}
-
inline struct sk_buff *
ieee80211_association_req(struct ieee80211_network *beacon,
struct ieee80211_device *ieee)
{
struct sk_buff *skb;
- //unsigned long flags;
struct ieee80211_assoc_request_frame *hdr;
u8 *tag;
- //short info_addr = 0;
- //int i;
- //u16 suite_count = 0;
- //u8 suit_select = 0;
unsigned int wpa_len = beacon->wpa_ie_len;
- //struct net_device *dev = ieee->dev;
- //union iwreq_data wrqu;
- //u8 *buff;
- //u8 *p;
#if 1
- // for testing purpose
+ /* for testing purpose */
unsigned int rsn_len = beacon->rsn_ie_len;
-#else
- unsigned int rsn_len = beacon->rsn_ie_len - 4;
#endif
unsigned int rate_len = ieee80211_MFIE_rate_len(ieee);
unsigned int wmm_info_len = beacon->QoS_Enable?9:0;
unsigned int turbo_info_len = beacon->Turbo_Enable?9:0;
u8 encry_proto = ieee->wpax_type_notify & 0xff;
- //u8 pairwise_type = (ieee->wpax_type_notify >> 8) & 0xff;
- //u8 authen_type = (ieee->wpax_type_notify >> 16) & 0xff;
int len = 0;
- //[0] Notify type of encryption: WPA/WPA2
- //[1] pair wise type
- //[2] authen type
- if(ieee->wpax_type_set) {
+ /* [0] Notify type of encryption: WPA/WPA2
+ * [1] pair wise type
+ * [2] authen type
+ */
+ if (ieee->wpax_type_set) {
if (IEEE_PROTO_WPA == encry_proto) {
rsn_len = 0;
} else if (IEEE_PROTO_RSN == encry_proto) {
@@ -1071,8 +932,8 @@ ieee80211_association_req(struct ieee80211_network *beacon,
}
}
len = sizeof(struct ieee80211_assoc_request_frame)+
- + beacon->ssid_len//essid tagged val
- + rate_len//rates tagged val
+ + beacon->ssid_len /* essid tagged val */
+ + rate_len /* rates tagged val */
+ wpa_len
+ rsn_len
+ wmm_info_len
@@ -1086,24 +947,23 @@ ieee80211_association_req(struct ieee80211_network *beacon,
hdr = (struct ieee80211_assoc_request_frame *)
skb_put(skb, sizeof(struct ieee80211_assoc_request_frame));
-
hdr->header.frame_control = IEEE80211_STYPE_ASSOC_REQ;
- hdr->header.duration_id= 37; //FIXME
+ hdr->header.duration_id = 37; /* FIXME */
memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(hdr->header.addr3, beacon->bssid, ETH_ALEN);
- memcpy(ieee->ap_mac_addr, beacon->bssid, ETH_ALEN);//for HW security, John
+ memcpy(ieee->ap_mac_addr, beacon->bssid, ETH_ALEN); /* for HW security */
hdr->capability = cpu_to_le16(WLAN_CAPABILITY_BSS);
- if (beacon->capability & WLAN_CAPABILITY_PRIVACY )
+ if (beacon->capability & WLAN_CAPABILITY_PRIVACY)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
if (beacon->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
- if(ieee->short_slot)
+ if (ieee->short_slot)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
- hdr->listen_interval = 0xa; //FIXME
+ hdr->listen_interval = 0xa; /* FIXME */
hdr->info_element.id = MFIE_TYPE_SSID;
@@ -1116,27 +976,26 @@ ieee80211_association_req(struct ieee80211_network *beacon,
ieee80211_MFIE_Brate(ieee, &tag);
ieee80211_MFIE_Grate(ieee, &tag);
- //add rsn==0 condition for ap's mix security mode(wpa+wpa2), john2007.8.9
- //choose AES encryption as default algorithm while using mixed mode
+ /* add rsn==0 condition for ap's mix security mode(wpa+wpa2)
+ * choose AES encryption as default algorithm while using mixed mode.
+ */
- tag = skb_put(skb,ieee->wpa_ie_len);
- memcpy(tag,ieee->wpa_ie,ieee->wpa_ie_len);
+ tag = skb_put(skb, ieee->wpa_ie_len);
+ memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
- tag = skb_put(skb,wmm_info_len);
- if(wmm_info_len) {
+ tag = skb_put(skb, wmm_info_len);
+ if (wmm_info_len)
ieee80211_WMM_Info(ieee, &tag);
- }
- tag = skb_put(skb,turbo_info_len);
- if(turbo_info_len) {
- ieee80211_TURBO_Info(ieee, &tag);
- }
+
+ tag = skb_put(skb, turbo_info_len);
+ if (turbo_info_len)
+ ieee80211_TURBO_Info(ieee, &tag);
return skb;
}
void ieee80211_associate_abort(struct ieee80211_device *ieee)
{
-
unsigned long flags;
spin_lock_irqsave(&ieee->lock, flags);
@@ -1148,17 +1007,17 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
* Here we will check if there are good nets to associate
* with, so we retry or just get back to NO_LINK and scanning
*/
- if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING){
+ if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING) {
IEEE80211_DEBUG_MGMT("Authentication failed\n");
ieee->softmac_stats.no_auth_rs++;
- }else{
+ } else {
IEEE80211_DEBUG_MGMT("Association failed\n");
ieee->softmac_stats.no_ass_rs++;
}
ieee->state = IEEE80211_ASSOCIATING_RETRY;
- queue_delayed_work(ieee->wq, &ieee->associate_retry_wq,IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
+ queue_delayed_work(ieee->wq, &ieee->associate_retry_wq, IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
spin_unlock_irqrestore(&ieee->lock, flags);
}
@@ -1168,7 +1027,6 @@ static void ieee80211_associate_abort_cb(unsigned long dev)
ieee80211_associate_abort((struct ieee80211_device *) dev);
}
-
static void ieee80211_associate_step1(struct ieee80211_device *ieee)
{
struct ieee80211_network *beacon = &ieee->current_network;
@@ -1176,26 +1034,24 @@ static void ieee80211_associate_step1(struct ieee80211_device *ieee)
IEEE80211_DEBUG_MGMT("Stopping scan\n");
ieee->softmac_stats.tx_auth_rq++;
- skb=ieee80211_authentication_req(beacon, ieee, 0);
- if (!skb){
-
+ skb = ieee80211_authentication_req(beacon, ieee, 0);
+ if (!skb) {
ieee80211_associate_abort(ieee);
- }
- else{
- ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING ;
+ } else {
+ ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING;
IEEE80211_DEBUG_MGMT("Sending authentication request\n");
- //printk("---Sending authentication request\n");
softmac_mgmt_xmit(skb, ieee);
- //BUGON when you try to add_timer twice, using mod_timer may be better, john0709
- if(!timer_pending(&ieee->associate_timer)){
+ /* BUGON when you try to add_timer twice, using mod_timer may
+ * be better.
+ */
+ if (!timer_pending(&ieee->associate_timer)) {
ieee->associate_timer.expires = jiffies + (HZ / 2);
add_timer(&ieee->associate_timer);
}
- //If call dev_kfree_skb_any,a warning will ocur....
- //KERNEL: assertion (!atomic_read(&skb->users)) failed at net/core/dev.c (1708)
- //So ... 1204 by lawrence.
- //printk("\nIn %s,line %d call kfree skb.",__func__,__LINE__);
- //dev_kfree_skb_any(skb);//edit by thomas
+ /* If call dev_kfree_skb_any,a warning will ocur....
+ * KERNEL: assertion (!atomic_read(&skb->users)) failed at
+ * net/core/dev.c (1708)
+ */
}
}
@@ -1205,7 +1061,6 @@ static void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *chal
u8 *c;
struct sk_buff *skb;
struct ieee80211_network *beacon = &ieee->current_network;
-// int hlen = sizeof(struct ieee80211_authentication);
del_timer_sync(&ieee->associate_timer);
ieee->associate_seq++;
ieee->softmac_stats.tx_auth_rq++;
@@ -1213,7 +1068,7 @@ static void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *chal
skb = ieee80211_authentication_req(beacon, ieee, chlen+2);
if (!skb)
ieee80211_associate_abort(ieee);
- else{
+ else {
c = skb_put(skb, chlen+2);
*(c++) = MFIE_TYPE_CHALLENGE;
*(c++) = chlen;
@@ -1221,38 +1076,36 @@ static void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *chal
IEEE80211_DEBUG_MGMT("Sending authentication challenge response\n");
- ieee80211_encrypt_fragment(ieee, skb, sizeof(struct ieee80211_hdr_3addr ));
+ ieee80211_encrypt_fragment(ieee, skb, sizeof(struct ieee80211_hdr_3addr));
softmac_mgmt_xmit(skb, ieee);
- if (!timer_pending(&ieee->associate_timer)){
- //printk("=========>add timer again, to crash\n");
+ if (!timer_pending(&ieee->associate_timer)) {
ieee->associate_timer.expires = jiffies + (HZ / 2);
add_timer(&ieee->associate_timer);
}
- dev_kfree_skb_any(skb);//edit by thomas
+ dev_kfree_skb_any(skb);
}
kfree(challenge);
}
static void ieee80211_associate_step2(struct ieee80211_device *ieee)
{
- struct sk_buff* skb;
+ struct sk_buff *skb;
struct ieee80211_network *beacon = &ieee->current_network;
del_timer_sync(&ieee->associate_timer);
IEEE80211_DEBUG_MGMT("Sending association request\n");
ieee->softmac_stats.tx_ass_rq++;
- skb=ieee80211_association_req(beacon, ieee);
+ skb = ieee80211_association_req(beacon, ieee);
if (!skb)
ieee80211_associate_abort(ieee);
- else{
+ else {
softmac_mgmt_xmit(skb, ieee);
- if (!timer_pending(&ieee->associate_timer)){
+ if (!timer_pending(&ieee->associate_timer)) {
ieee->associate_timer.expires = jiffies + (HZ / 2);
add_timer(&ieee->associate_timer);
}
- //dev_kfree_skb_any(skb);//edit by thomas
}
}
@@ -1261,12 +1114,11 @@ static void ieee80211_associate_complete_wq(struct work_struct *work)
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
printk(KERN_INFO "Associated successfully\n");
- if(ieee80211_is_54g(&ieee->current_network) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION)){
-
+ if (ieee80211_is_54g(&ieee->current_network) &&
+ (ieee->modulation & IEEE80211_OFDM_MODULATION)) {
ieee->rate = 540;
printk(KERN_INFO"Using G rates\n");
- }else{
+ } else {
ieee->rate = 110;
printk(KERN_INFO"Using B rates\n");
}
@@ -1279,12 +1131,8 @@ static void ieee80211_associate_complete_wq(struct work_struct *work)
static void ieee80211_associate_complete(struct ieee80211_device *ieee)
{
- int i;
del_timer_sync(&ieee->associate_timer);
- for(i = 0; i < 6; i++) {
- //ieee->seq_ctrl[i] = 0;
- }
ieee->state = IEEE80211_LINKED;
IEEE80211_DEBUG_MGMT("Successfully associated\n");
@@ -1316,7 +1164,7 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
u8 tmp_ssid[IW_ESSID_MAX_SIZE+1];
int tmp_ssid_len = 0;
- short apset,ssidset,ssidbroad,apmatch,ssidmatch;
+ short apset, ssidset, ssidbroad, apmatch, ssidmatch;
/* we are interested in new new only if we are not associated
* and we are not associating / authenticating
@@ -1330,74 +1178,70 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability & WLAN_CAPABILITY_IBSS))
return;
-
- if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC){
+ if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
/* if the user specified the AP MAC, we need also the essid
* This could be obtained by beacons or, if the network does not
* broadcast it, it can be put manually.
*/
- apset = ieee->wap_set;//(memcmp(ieee->current_network.bssid, zero,ETH_ALEN)!=0 );
- ssidset = ieee->ssid_set;//ieee->current_network.ssid[0] != '\0';
- ssidbroad = !(net->ssid_len == 0 || net->ssid[0]== '\0');
- apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN)==0);
+ apset = ieee->wap_set;
+ ssidset = ieee->ssid_set;
+ ssidbroad = !(net->ssid_len == 0 || net->ssid[0] == '\0');
+ apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN) == 0);
- if(ieee->current_network.ssid_len != net->ssid_len)
+ if (ieee->current_network.ssid_len != net->ssid_len)
ssidmatch = 0;
else
- ssidmatch = (0==strncmp(ieee->current_network.ssid, net->ssid, net->ssid_len));
-
- //printk("cur: %s, %d, net:%s, %d\n", ieee->current_network.ssid, ieee->current_network.ssid_len, net->ssid, net->ssid_len);
- //printk("apset=%d apmatch=%d ssidset=%d ssidbroad=%d ssidmatch=%d\n",apset,apmatch,ssidset,ssidbroad,ssidmatch);
-
- if ( /* if the user set the AP check if match.
- * if the network does not broadcast essid we check the user supplied ANY essid
- * if the network does broadcast and the user does not set essid it is OK
- * if the network does broadcast and the user did set essid chech if essid match
- */
- ( apset && apmatch &&
- ((ssidset && ssidbroad && ssidmatch) || (ssidbroad && !ssidset) || (!ssidbroad && ssidset)) ) ||
- /* if the ap is not set, check that the user set the bssid
- * and the network does broadcast and that those two bssid matches
- */
- (!apset && ssidset && ssidbroad && ssidmatch)
- ){
-
-
+ ssidmatch = (0 == strncmp(ieee->current_network.ssid, net->ssid, net->ssid_len));
+
+ /* if the user set the AP check if match.
+ * if the network does not broadcast essid we check the user
+ * supplied ANY essid
+ * if the network does broadcast and the user does not set essid
+ * it is OK
+ * if the network does broadcast and the user did set essid
+ * chech if essid match
+ * (apset && apmatch && ((ssidset && ssidbroad && ssidmatch) ||
+ * (ssidbroad && !ssidset) || (!ssidbroad && ssidset))) ||
+ * if the ap is not set, check that the user set the bssid and
+ * the network does broadcast and that those two bssid matches
+ * (!apset && ssidset && ssidbroad && ssidmatch)
+ */
+ if ((apset && apmatch && ((ssidset && ssidbroad && ssidmatch) ||
+ (ssidbroad && !ssidset) || (!ssidbroad && ssidset))) ||
+ (!apset && ssidset && ssidbroad && ssidmatch)) {
/* if the essid is hidden replace it with the
* essid provided by the user.
*/
- if (!ssidbroad){
+ if (!ssidbroad) {
strncpy(tmp_ssid, ieee->current_network.ssid, IW_ESSID_MAX_SIZE);
tmp_ssid_len = ieee->current_network.ssid_len;
}
memcpy(&ieee->current_network, net, sizeof(struct ieee80211_network));
- if (!ssidbroad){
+ if (!ssidbroad) {
strncpy(ieee->current_network.ssid, tmp_ssid, IW_ESSID_MAX_SIZE);
ieee->current_network.ssid_len = tmp_ssid_len;
}
- printk(KERN_INFO"Linking with %s: channel is %d\n",ieee->current_network.ssid,ieee->current_network.channel);
+ printk(KERN_INFO"Linking with %s: channel is %d\n", ieee->current_network.ssid, ieee->current_network.channel);
- if (ieee->iw_mode == IW_MODE_INFRA){
+ if (ieee->iw_mode == IW_MODE_INFRA) {
ieee->state = IEEE80211_ASSOCIATING;
ieee->beinretry = false;
queue_work(ieee->wq, &ieee->associate_procedure_wq);
- }else{
- if(ieee80211_is_54g(&ieee->current_network) &&
- (ieee->modulation & IEEE80211_OFDM_MODULATION)){
+ } else {
+ if (ieee80211_is_54g(&ieee->current_network) &&
+ (ieee->modulation & IEEE80211_OFDM_MODULATION)) {
ieee->rate = 540;
printk(KERN_INFO"Using G rates\n");
- }else{
+ } else {
ieee->rate = 110;
printk(KERN_INFO"Using B rates\n");
}
ieee->state = IEEE80211_LINKED;
ieee->beinretry = false;
}
-
}
}
-
}
void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
@@ -1407,60 +1251,52 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(target, &ieee->network_list, list) {
-
/* if the state become different that NOLINK means
* we had found what we are searching for
*/
-
if (ieee->state != IEEE80211_NOLINK)
break;
if (ieee->scan_age == 0 || time_after(target->last_scanned + ieee->scan_age, jiffies))
ieee80211_softmac_new_net(ieee, target);
}
-
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
-
static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
{
struct ieee80211_authentication *a;
u8 *t;
- if (skb->len < (sizeof(struct ieee80211_authentication)-sizeof(struct ieee80211_info_element))){
- IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n",skb->len);
+ if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
+ IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
return 0xcafe;
}
*challenge = NULL;
- a = (struct ieee80211_authentication*) skb->data;
- if(skb->len > (sizeof(struct ieee80211_authentication) +3)){
+ a = (struct ieee80211_authentication *) skb->data;
+ if (skb->len > (sizeof(struct ieee80211_authentication) + 3)) {
t = skb->data + sizeof(struct ieee80211_authentication);
- if(*(t++) == MFIE_TYPE_CHALLENGE){
+ if (*(t++) == MFIE_TYPE_CHALLENGE) {
*chlen = *(t++);
*challenge = kmemdup(t, *chlen, GFP_ATOMIC);
if (!*challenge)
return -ENOMEM;
}
}
-
return cpu_to_le16(a->status);
-
}
-
static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
{
struct ieee80211_authentication *a;
- if (skb->len < (sizeof(struct ieee80211_authentication)-sizeof(struct ieee80211_info_element))){
- IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n",skb->len);
+ if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
+ IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n", skb->len);
return -1;
}
- a = (struct ieee80211_authentication*) skb->data;
+ a = (struct ieee80211_authentication *) skb->data;
- memcpy(dest,a->header.addr2, ETH_ALEN);
+ memcpy(dest, a->header.addr2, ETH_ALEN);
if (le16_to_cpu(a->algorithm) != WLAN_AUTH_OPEN)
return WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
@@ -1473,23 +1309,23 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
{
u8 *tag;
u8 *skbend;
- u8 *ssid=NULL;
+ u8 *ssid = NULL;
u8 ssidlen = 0;
struct ieee80211_hdr_3addr *header =
(struct ieee80211_hdr_3addr *) skb->data;
- if (skb->len < sizeof (struct ieee80211_hdr_3addr ))
+ if (skb->len < sizeof(struct ieee80211_hdr_3addr))
return -1; /* corrupted */
- memcpy(src,header->addr2, ETH_ALEN);
+ memcpy(src, header->addr2, ETH_ALEN);
- skbend = (u8*)skb->data + skb->len;
+ skbend = (u8 *)skb->data + skb->len;
- tag = skb->data + sizeof (struct ieee80211_hdr_3addr );
+ tag = skb->data + sizeof(struct ieee80211_hdr_3addr);
- while (tag+1 < skbend){
- if (*tag == 0){
+ while (tag+1 < skbend) {
+ if (*tag == 0) {
ssid = tag+2;
ssidlen = *(tag+1);
break;
@@ -1499,10 +1335,12 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
tag++; /* point to the next tag */
}
- //IEEE80211DMESG("Card MAC address is "MACSTR, MAC2STR(src));
- if (ssidlen == 0) return 1;
+ if (ssidlen == 0)
+ return 1;
+
+ if (!ssid)
+ return 1; /* ssid not found in tagged param */
- if (!ssid) return 1; /* ssid not found in tagged param */
return (!strncmp(ssid, ieee->current_network.ssid, ssidlen));
}
@@ -1514,13 +1352,13 @@ static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
if (skb->len < (sizeof(struct ieee80211_assoc_request_frame) -
sizeof(struct ieee80211_info_element))) {
- IEEE80211_DEBUG_MGMT("invalid len in auth request:%d \n", skb->len);
+ IEEE80211_DEBUG_MGMT("invalid len in auth request:%d\n", skb->len);
return -1;
}
- a = (struct ieee80211_assoc_request_frame*) skb->data;
+ a = (struct ieee80211_assoc_request_frame *) skb->data;
- memcpy(dest,a->header.addr2,ETH_ALEN);
+ memcpy(dest, a->header.addr2, ETH_ALEN);
return 0;
}
@@ -1528,12 +1366,12 @@ static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
static inline u16 assoc_parse(struct sk_buff *skb, int *aid)
{
struct ieee80211_assoc_response_frame *a;
- if (skb->len < sizeof(struct ieee80211_assoc_response_frame)){
+ if (skb->len < sizeof(struct ieee80211_assoc_response_frame)) {
IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
return 0xcafe;
}
- a = (struct ieee80211_assoc_response_frame*) skb->data;
+ a = (struct ieee80211_assoc_response_frame *) skb->data;
*aid = le16_to_cpu(a->aid) & 0x3fff;
return le16_to_cpu(a->status);
}
@@ -1543,11 +1381,8 @@ static inline void ieee80211_rx_probe_rq(struct ieee80211_device *ieee,
{
u8 dest[ETH_ALEN];
- //IEEE80211DMESG("Rx probe");
ieee->softmac_stats.rx_probe_rq++;
- //DMESG("Dest is "MACSTR, MAC2STR(dest));
- if (probe_rq_parse(ieee, skb, dest)){
- //IEEE80211DMESG("Was for me!");
+ if (probe_rq_parse(ieee, skb, dest)) {
ieee->softmac_stats.tx_probe_rs++;
ieee80211_resp_to_probe(ieee, dest);
}
@@ -1558,115 +1393,92 @@ inline void ieee80211_rx_auth_rq(struct ieee80211_device *ieee,
{
u8 dest[ETH_ALEN];
int status;
- //IEEE80211DMESG("Rx probe");
ieee->softmac_stats.rx_auth_rq++;
status = auth_rq_parse(skb, dest);
- if (status != -1) {
+ if (status != -1)
ieee80211_resp_to_auth(ieee, status, dest);
- }
- //DMESG("Dest is "MACSTR, MAC2STR(dest));
-
}
- inline void
+inline void
ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
{
u8 dest[ETH_ALEN];
- //unsigned long flags;
ieee->softmac_stats.rx_ass_rq++;
- if (assoc_rq_parse(skb,dest) != -1){
+ if (assoc_rq_parse(skb, dest) != -1)
ieee80211_resp_to_assoc_rq(ieee, dest);
- }
+
printk(KERN_INFO"New client associated: %pM\n", dest);
}
-
-
void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr)
{
-
struct sk_buff *buf = ieee80211_null_func(ieee, pwr);
if (buf)
softmac_ps_mgmt_xmit(buf, ieee);
-
}
-
static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
u32 *time_l)
{
- int timeout = 0;
+ int timeout = 0;
u8 dtim;
- /*if(ieee->ps == IEEE80211_PS_DISABLED ||
- ieee->iw_mode != IW_MODE_INFRA ||
- ieee->state != IEEE80211_LINKED)
-
- return 0;
- */
dtim = ieee->current_network.dtim_data;
- //printk("DTIM\n");
- if(!(dtim & IEEE80211_DTIM_VALID))
+ if (!(dtim & IEEE80211_DTIM_VALID))
return 0;
- else
- timeout = ieee->current_network.beacon_interval;
+ else
+ timeout = ieee->current_network.beacon_interval;
- //printk("VALID\n");
ieee->current_network.dtim_data = IEEE80211_DTIM_INVALID;
- if(dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST)& ieee->ps))
+ if (dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST) & ieee->ps))
return 2;
- if(!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout)))
+ if (!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout)))
return 0;
- if(!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout)))
+ if (!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout)))
return 0;
- if((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE ) &&
+ if ((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) &&
(ieee->mgmt_queue_tail != ieee->mgmt_queue_head))
return 0;
- if(time_l){
+ if (time_l) {
*time_l = ieee->current_network.last_dtim_sta_time[0]
+ MSECS((ieee->current_network.beacon_interval));
- //* ieee->current_network.dtim_period));
- //printk("beacon_interval:%x, dtim_period:%x, totol to Msecs:%x, HZ:%x\n", ieee->current_network.beacon_interval, ieee->current_network.dtim_period, MSECS(((ieee->current_network.beacon_interval * ieee->current_network.dtim_period))), HZ);
}
- if(time_h){
+ if (time_h) {
*time_h = ieee->current_network.last_dtim_sta_time[1];
- if(time_l && *time_l < ieee->current_network.last_dtim_sta_time[0])
+ if (time_l && *time_l < ieee->current_network.last_dtim_sta_time[0])
*time_h += 1;
}
return 1;
-
-
}
static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
{
- u32 th,tl;
+ u32 th, tl;
short sleep;
- unsigned long flags,flags2;
+ unsigned long flags, flags2;
spin_lock_irqsave(&ieee->lock, flags);
- if((ieee->ps == IEEE80211_PS_DISABLED ||
-
+ if ((ieee->ps == IEEE80211_PS_DISABLED ||
ieee->iw_mode != IW_MODE_INFRA ||
- ieee->state != IEEE80211_LINKED)){
+ ieee->state != IEEE80211_LINKED)) {
- //#warning CHECK_LOCK_HERE
+ /* #warning CHECK_LOCK_HERE */
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
ieee80211_sta_wakeup(ieee, 1);
@@ -1674,71 +1486,57 @@ static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
- sleep = ieee80211_sta_ps_sleep(ieee,&th, &tl);
-// printk("===>%s,%d[2 wake, 1 sleep, 0 do nothing], ieee->sta_sleep = %d\n",__func__, sleep,ieee->sta_sleep);
+ sleep = ieee80211_sta_ps_sleep(ieee, &th, &tl);
/* 2 wake, 1 sleep, 0 do nothing */
- if(sleep == 0)
+ if (sleep == 0)
goto out;
- if(sleep == 1){
-
- if(ieee->sta_sleep == 1)
- ieee->enter_sleep_state(ieee->dev,th,tl);
+ if (sleep == 1) {
+ if (ieee->sta_sleep == 1)
+ ieee->enter_sleep_state(ieee->dev, th, tl);
- else if(ieee->sta_sleep == 0){
- // printk("send null 1\n");
+ else if (ieee->sta_sleep == 0) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
-
- if(ieee->ps_is_queue_empty(ieee->dev)){
-
-
+ if (ieee->ps_is_queue_empty(ieee->dev)) {
ieee->sta_sleep = 2;
ieee->ps_request_tx_ack(ieee->dev);
- ieee80211_sta_ps_send_null_frame(ieee,1);
+ ieee80211_sta_ps_send_null_frame(ieee, 1);
ieee->ps_th = th;
ieee->ps_tl = tl;
}
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
-
}
-
-
- }else if(sleep == 2){
-//#warning CHECK_LOCK_HERE
+ } else if (sleep == 2) {
+ /* #warning CHECK_LOCK_HERE */
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
- // printk("send wakeup packet\n");
- ieee80211_sta_wakeup(ieee,1);
+ ieee80211_sta_wakeup(ieee, 1);
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
-
out:
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl)
{
- if(ieee->sta_sleep == 0){
- if(nl){
- // printk("Warning: driver is probably failing to report TX ps error\n");
+ if (ieee->sta_sleep == 0) {
+ if (nl) {
ieee->ps_request_tx_ack(ieee->dev);
ieee80211_sta_ps_send_null_frame(ieee, 0);
}
return;
-
}
- if(ieee->sta_sleep == 1)
+ if (ieee->sta_sleep == 1)
ieee->sta_wake_up(ieee->dev);
ieee->sta_sleep = 0;
- if(nl){
+ if (nl) {
ieee->ps_request_tx_ack(ieee->dev);
ieee80211_sta_ps_send_null_frame(ieee, 0);
}
@@ -1746,25 +1544,20 @@ void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl)
void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success)
{
- unsigned long flags,flags2;
+ unsigned long flags, flags2;
spin_lock_irqsave(&ieee->lock, flags);
- if(ieee->sta_sleep == 2){
+ if (ieee->sta_sleep == 2) {
/* Null frame with PS bit set */
- if(success){
-
- // printk("==================> %s::enter sleep state\n",__func__);
+ if (success) {
ieee->sta_sleep = 1;
- ieee->enter_sleep_state(ieee->dev,ieee->ps_th,ieee->ps_tl);
+ ieee->enter_sleep_state(ieee->dev, ieee->ps_th, ieee->ps_tl);
}
/* if the card report not success we can't be sure the AP
* has not RXed so we can't assume the AP believe us awake
*/
- }
- /* 21112005 - tx again null without PS bit if lost */
- else {
-
- if((ieee->sta_sleep == 0) && !success){
+ } else {
+ if ((ieee->sta_sleep == 0) && !success) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
ieee80211_sta_ps_send_null_frame(ieee, 0);
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
@@ -1780,16 +1573,16 @@ inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
{
struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data;
u16 errcode;
- u8* challenge=NULL;
- int chlen=0;
- int aid=0;
+ u8 *challenge = NULL;
+ int chlen = 0;
+ int aid = 0;
struct ieee80211_assoc_response_frame *assoc_resp;
struct ieee80211_info_element *info_element;
- if(!ieee->proto_started)
+ if (!ieee->proto_started)
return 0;
- if(ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
+ if (ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
ieee->iw_mode == IW_MODE_INFRA &&
ieee->state == IEEE80211_LINKED))
@@ -1800,30 +1593,27 @@ inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
ieee->last_rx_ps_time = jiffies;
switch (WLAN_FC_GET_STYPE(header->frame_control)) {
-
case IEEE80211_STYPE_ASSOC_RESP:
case IEEE80211_STYPE_REASSOC_RESP:
-
IEEE80211_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n",
WLAN_FC_GET_STYPE(header->frame_ctl));
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED &&
- ieee->iw_mode == IW_MODE_INFRA){
- if (0 == (errcode=assoc_parse(skb, &aid))){
+ ieee->iw_mode == IW_MODE_INFRA) {
+ errcode = assoc_parse(skb, &aid);
+ if (0 == errcode) {
u16 left;
- ieee->state=IEEE80211_LINKED;
+ ieee->state = IEEE80211_LINKED;
ieee->assoc_id = aid;
ieee->softmac_stats.rx_ass_ok++;
-
- //printk(KERN_WARNING "nic_type = %s", (rx_stats->nic_type == 1)?"rtl8187":"rtl8187B");
- if(1 == rx_stats->nic_type) //card type is 8187
- {
+ /* card type is 8187 */
+ if (1 == rx_stats->nic_type)
goto associate_complete;
- }
- assoc_resp = (struct ieee80211_assoc_response_frame*)skb->data;
- info_element = &assoc_resp->info_element;
- left = skb->len - ((void*)info_element - (void*)assoc_resp);
+
+ assoc_resp = (struct ieee80211_assoc_response_frame *)skb->data;
+ info_element = &assoc_resp->info_element;
+ left = skb->len - ((void *)info_element - (void *)assoc_resp);
while (left >= sizeof(struct ieee80211_info_element_hdr)) {
if (sizeof(struct ieee80211_info_element_hdr) + info_element->len > left) {
@@ -1832,32 +1622,33 @@ inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
}
switch (info_element->id) {
case MFIE_TYPE_GENERIC:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len);
+ IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len);
if (info_element->len >= 8 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x50 &&
info_element->data[2] == 0xf2 &&
info_element->data[3] == 0x02 &&
info_element->data[4] == 0x01) {
- // Not care about version at present.
- //WMM Parameter Element
- memcpy(ieee->current_network.wmm_param,(u8*)(info_element->data\
- + 8),(info_element->len - 8));
-
- if (((ieee->current_network.wmm_info^info_element->data[6])& \
- 0x0f)||(!ieee->init_wmmparam_flag)) {
- // refresh parameter element for current network
- // update the register parameter for hardware
- ieee->init_wmmparam_flag = 1;
- queue_work(ieee->wq, &ieee->wmm_param_update_wq);
-
- }
- //update info_element for current network
- ieee->current_network.wmm_info = info_element->data[6];
+ /* Not care about version at present.
+ * WMM Parameter Element.
+ */
+ memcpy(ieee->current_network.wmm_param, (u8 *)(info_element->data\
+ + 8), (info_element->len - 8));
+
+ if (((ieee->current_network.wmm_info^info_element->data[6])& \
+ 0x0f) || (!ieee->init_wmmparam_flag)) {
+ /* refresh parameter element for current network
+ * update the register parameter for hardware.
+ */
+ ieee->init_wmmparam_flag = 1;
+ queue_work(ieee->wq, &ieee->wmm_param_update_wq);
+ }
+ /* update info_element for current network */
+ ieee->current_network.wmm_info = info_element->data[6];
}
break;
default:
- //nothing to do at present!!!
+ /* nothing to do at present!!! */
break;
}
@@ -1866,14 +1657,14 @@ inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
info_element = (struct ieee80211_info_element *)
&info_element->data[info_element->len];
}
- if(!ieee->init_wmmparam_flag) //legacy AP, reset the AC_xx_param register
- {
- queue_work(ieee->wq,&ieee->wmm_param_update_wq);
- ieee->init_wmmparam_flag = 1;//indicate AC_xx_param upated since last associate
+ /* legacy AP, reset the AC_xx_param register */
+ if (!ieee->init_wmmparam_flag) {
+ queue_work(ieee->wq, &ieee->wmm_param_update_wq);
+ ieee->init_wmmparam_flag = 1; /* indicate AC_xx_param upated since last associate */
}
associate_complete:
ieee80211_associate_complete(ieee);
- }else{
+ } else {
ieee->softmac_stats.rx_ass_err++;
IEEE80211_DEBUG_MGMT(
"Association response status code 0x%x\n",
@@ -1882,47 +1673,41 @@ associate_complete:
}
}
break;
-
case IEEE80211_STYPE_ASSOC_REQ:
case IEEE80211_STYPE_REASSOC_REQ:
-
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
ieee->iw_mode == IW_MODE_MASTER)
ieee80211_rx_assoc_rq(ieee, skb);
break;
-
case IEEE80211_STYPE_AUTH:
-
- if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE){
+ if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) {
if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING &&
ieee->iw_mode == IW_MODE_INFRA){
-
IEEE80211_DEBUG_MGMT("Received authentication response");
- if (0 == (errcode=auth_parse(skb, &challenge, &chlen))){
- if(ieee->open_wep || !challenge){
+ errcode = auth_parse(skb, &challenge, &chlen);
+ if (0 == errcode) {
+ if (ieee->open_wep || !challenge) {
ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATED;
ieee->softmac_stats.rx_auth_rs_ok++;
ieee80211_associate_step2(ieee);
- }else{
+ } else {
ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
}
- }else{
+ } else {
ieee->softmac_stats.rx_auth_rs_err++;
- IEEE80211_DEBUG_MGMT("Authentication response status code 0x%x",errcode);
+ IEEE80211_DEBUG_MGMT("Authentication response status code 0x%x", errcode);
ieee80211_associate_abort(ieee);
}
- }else if (ieee->iw_mode == IW_MODE_MASTER){
+ } else if (ieee->iw_mode == IW_MODE_MASTER) {
ieee80211_rx_auth_rq(ieee, skb);
}
}
break;
-
case IEEE80211_STYPE_PROBE_REQ:
-
if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
((ieee->iw_mode == IW_MODE_ADHOC ||
ieee->iw_mode == IW_MODE_MASTER) &&
@@ -1930,36 +1715,28 @@ associate_complete:
ieee80211_rx_probe_rq(ieee, skb);
break;
-
case IEEE80211_STYPE_DISASSOC:
case IEEE80211_STYPE_DEAUTH:
/* FIXME for now repeat all the association procedure
- * both for disassociation and deauthentication
- */
+ * both for disassociation and deauthentication
+ */
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
(ieee->state == IEEE80211_LINKED) &&
(ieee->iw_mode == IW_MODE_INFRA) &&
- (!memcmp(header->addr2,ieee->current_network.bssid,ETH_ALEN))){
+ (!memcmp(header->addr2, ieee->current_network.bssid, ETH_ALEN))) {
ieee->state = IEEE80211_ASSOCIATING;
ieee->softmac_stats.reassoc++;
- //notify_wx_assoc_event(ieee); //YJ,del,080828, do not notify os here
queue_work(ieee->wq, &ieee->associate_procedure_wq);
}
-
break;
-
default:
return -1;
break;
}
-
- //dev_kfree_skb_any(skb);
return 0;
}
-
-
/* following are for a simpler TX queue management.
* Instead of using netif_[stop/wake]_queue the driver
* will uses these two function (plus a reset one), that
@@ -1982,27 +1759,23 @@ associate_complete:
void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
struct ieee80211_device *ieee)
{
-
-
unsigned long flags;
int i;
- spin_lock_irqsave(&ieee->lock,flags);
+ spin_lock_irqsave(&ieee->lock, flags);
/* called with 2nd parm 0, no tx mgmt lock required */
- ieee80211_sta_wakeup(ieee,0);
-
- for(i = 0; i < txb->nr_frags; i++) {
+ ieee80211_sta_wakeup(ieee, 0);
- if (ieee->queue_stop){
+ for (i = 0; i < txb->nr_frags; i++) {
+ if (ieee->queue_stop) {
ieee->tx_pending.txb = txb;
ieee->tx_pending.frag = i;
goto exit;
- }else{
+ } else {
ieee->softmac_data_hard_start_xmit(
txb->fragments[i],
- ieee->dev,ieee->rate);
- //(i+1)<txb->nr_frags);
+ ieee->dev, ieee->rate);
ieee->stats.tx_packets++;
ieee->stats.tx_bytes += txb->fragments[i]->len;
ieee->dev->trans_start = jiffies;
@@ -2012,66 +1785,59 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
ieee80211_txb_free(txb);
exit:
- spin_unlock_irqrestore(&ieee->lock,flags);
-
+ spin_unlock_irqrestore(&ieee->lock, flags);
}
/* called with ieee->lock acquired */
static void ieee80211_resume_tx(struct ieee80211_device *ieee)
{
int i;
- for(i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
+ for (i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
- if (ieee->queue_stop){
+ if (ieee->queue_stop) {
ieee->tx_pending.frag = i;
return;
- }else{
-
+ } else {
ieee->softmac_data_hard_start_xmit(
ieee->tx_pending.txb->fragments[i],
- ieee->dev,ieee->rate);
- //(i+1)<ieee->tx_pending.txb->nr_frags);
+ ieee->dev, ieee->rate);
ieee->stats.tx_packets++;
ieee->dev->trans_start = jiffies;
}
}
-
ieee80211_txb_free(ieee->tx_pending.txb);
ieee->tx_pending.txb = NULL;
}
-
void ieee80211_reset_queue(struct ieee80211_device *ieee)
{
unsigned long flags;
- spin_lock_irqsave(&ieee->lock,flags);
+ spin_lock_irqsave(&ieee->lock, flags);
init_mgmt_queue(ieee);
- if (ieee->tx_pending.txb){
+ if (ieee->tx_pending.txb) {
ieee80211_txb_free(ieee->tx_pending.txb);
ieee->tx_pending.txb = NULL;
}
ieee->queue_stop = 0;
- spin_unlock_irqrestore(&ieee->lock,flags);
-
+ spin_unlock_irqrestore(&ieee->lock, flags);
}
void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
{
-
unsigned long flags;
struct sk_buff *skb;
struct ieee80211_hdr_3addr *header;
- spin_lock_irqsave(&ieee->lock,flags);
- if (! ieee->queue_stop) goto exit;
+ spin_lock_irqsave(&ieee->lock, flags);
+ if (!ieee->queue_stop)
+ goto exit;
ieee->queue_stop = 0;
- if(ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE){
- while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))){
-
+ if (ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) {
+ while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))) {
header = (struct ieee80211_hdr_3addr *) skb->data;
header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
@@ -2081,42 +1847,32 @@ void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
else
ieee->seq_ctrl[0]++;
- //printk(KERN_ALERT "ieee80211_wake_queue \n");
- ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
- dev_kfree_skb_any(skb);//edit by thomas
+ ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
+ dev_kfree_skb_any(skb);
}
}
if (!ieee->queue_stop && ieee->tx_pending.txb)
ieee80211_resume_tx(ieee);
- if (!ieee->queue_stop && netif_queue_stopped(ieee->dev)){
+ if (!ieee->queue_stop && netif_queue_stopped(ieee->dev)) {
ieee->softmac_stats.swtxawake++;
netif_wake_queue(ieee->dev);
}
-
-exit :
- spin_unlock_irqrestore(&ieee->lock,flags);
+exit:
+ spin_unlock_irqrestore(&ieee->lock, flags);
}
-
void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
{
- //unsigned long flags;
- //spin_lock_irqsave(&ieee->lock,flags);
-
- if (! netif_queue_stopped(ieee->dev)){
+ if (!netif_queue_stopped(ieee->dev)) {
netif_stop_queue(ieee->dev);
ieee->softmac_stats.swtxstop++;
}
ieee->queue_stop = 1;
- //spin_unlock_irqrestore(&ieee->lock,flags);
-
}
-
inline void ieee80211_randomize_cell(struct ieee80211_device *ieee)
{
-
random_ether_addr(ieee->current_network.bssid);
}
@@ -2125,7 +1881,7 @@ void ieee80211_start_master_bss(struct ieee80211_device *ieee)
{
ieee->assoc_id = 1;
- if (ieee->current_network.ssid_len == 0){
+ if (ieee->current_network.ssid_len == 0) {
strncpy(ieee->current_network.ssid,
IEEE80211_DEFAULT_TX_ESSID,
IW_ESSID_MAX_SIZE);
@@ -2149,7 +1905,7 @@ void ieee80211_start_master_bss(struct ieee80211_device *ieee)
static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
{
- if(ieee->raw_tx){
+ if (ieee->raw_tx) {
if (ieee->data_hard_resume)
ieee->data_hard_resume(ieee->dev);
@@ -2173,9 +1929,8 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
down(&ieee->wx_sem);
-
- if (ieee->current_network.ssid_len == 0){
- strcpy(ieee->current_network.ssid,IEEE80211_DEFAULT_TX_ESSID);
+ if (ieee->current_network.ssid_len == 0) {
+ strcpy(ieee->current_network.ssid, IEEE80211_DEFAULT_TX_ESSID);
ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID);
ieee->ssid_set = 1;
}
@@ -2183,7 +1938,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
/* check if we have this cell in our network list */
ieee80211_softmac_check_all_nets(ieee);
- if(ieee->state == IEEE80211_NOLINK)
+ if (ieee->state == IEEE80211_NOLINK)
ieee->current_network.channel = 10;
/* if not then the state is not linked. Maybe the user switched to
* ad-hoc mode just after being in monitor mode, or just after
@@ -2203,13 +1958,12 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
ieee80211_start_scan_syncro(ieee);
/* the network definitively is not here.. create a new cell */
- if (ieee->state == IEEE80211_NOLINK){
+ if (ieee->state == IEEE80211_NOLINK) {
printk("creating new IBSS cell\n");
- if(!ieee->wap_set)
+ if (!ieee->wap_set)
ieee80211_randomize_cell(ieee);
- if(ieee->modulation & IEEE80211_CCK_MODULATION){
-
+ if (ieee->modulation & IEEE80211_CCK_MODULATION) {
ieee->current_network.rates_len = 4;
ieee->current_network.rates[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
@@ -2217,10 +1971,10 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
ieee->current_network.rates[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
ieee->current_network.rates[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
- }else
+ } else
ieee->current_network.rates_len = 0;
- if(ieee->modulation & IEEE80211_OFDM_MODULATION){
+ if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
ieee->current_network.rates_ex_len = 8;
ieee->current_network.rates_ex[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
@@ -2233,19 +1987,18 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
ieee->current_network.rates_ex[7] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
ieee->rate = 540;
- }else{
+ } else {
ieee->current_network.rates_ex_len = 0;
ieee->rate = 110;
}
- // By default, WMM function will be disabled in IBSS mode
+ /* By default, WMM function will be disabled in IBSS mode */
ieee->current_network.QoS_Enable = 0;
ieee->current_network.atim_window = 0;
ieee->current_network.capability = WLAN_CAPABILITY_IBSS;
- if(ieee->short_slot)
+ if (ieee->short_slot)
ieee->current_network.capability |= WLAN_CAPABILITY_SHORT_SLOT;
-
}
ieee->state = IEEE80211_LINKED;
@@ -2264,6 +2017,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
up(&ieee->wx_sem);
}
+
inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
{
queue_delayed_work(ieee->wq, &ieee->start_ibss_wq, 100);
@@ -2273,19 +2027,15 @@ inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
void ieee80211_start_bss(struct ieee80211_device *ieee)
{
unsigned long flags;
- //
- // Ref: 802.11d 11.1.3.3
- // STA shall not start a BSS unless properly formed Beacon frame including a Country IE.
- //
- if(IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee))
- {
- if(! ieee->bGlobalDomain)
- {
+ /* Ref: 802.11d 11.1.3.3
+ * STA shall not start a BSS unless properly formed Beacon frame
+ * including a Country IE.
+ */
+ if (IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) {
+ if (!ieee->bGlobalDomain)
return;
- }
}
- /* check if we have already found the net we
- * are interested in (if any).
+ /* check if we have already found the net we are interested in (if any).
* if not (we are disassociated and we are not
* in associating / authenticating phase) start the background scanning.
*/
@@ -2300,14 +2050,10 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
*/
spin_lock_irqsave(&ieee->lock, flags);
-//#ifdef ENABLE_IPS
-// printk("start bss ENABLE_IPS\n");
-//#else
- if (ieee->state == IEEE80211_NOLINK){
+ if (ieee->state == IEEE80211_NOLINK) {
ieee->actscanning = true;
ieee80211_rtl_start_scan(ieee);
}
-//#endif
spin_unlock_irqrestore(&ieee->lock, flags);
}
@@ -2322,7 +2068,7 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
if (ieee->data_hard_stop)
ieee->data_hard_stop(ieee->dev);
- if(IS_DOT11D_ENABLE(ieee))
+ if (IS_DOT11D_ENABLE(ieee))
Dot11d_Reset(ieee);
ieee->link_change(ieee->dev);
@@ -2337,9 +2083,9 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
unsigned long flags;
down(&ieee->wx_sem);
- if(!ieee->proto_started)
+ if (!ieee->proto_started)
goto exit;
- if(ieee->state != IEEE80211_ASSOCIATING_RETRY)
+ if (ieee->state != IEEE80211_ASSOCIATING_RETRY)
goto exit;
/* until we do not set the state to IEEE80211_NOLINK
* there are no possibility to have someone else trying
@@ -2360,17 +2106,13 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
spin_lock_irqsave(&ieee->lock, flags);
- if(ieee->state == IEEE80211_NOLINK){
+ if (ieee->state == IEEE80211_NOLINK) {
ieee->beinretry = false;
ieee->actscanning = true;
ieee80211_rtl_start_scan(ieee);
}
- //YJ,add,080828, notify os here
- if(ieee->state == IEEE80211_NOLINK)
- {
+ if (ieee->state == IEEE80211_NOLINK)
notify_wx_assoc_event(ieee);
- }
- //YJ,add,080828,end
spin_unlock_irqrestore(&ieee->lock, flags);
exit:
@@ -2379,7 +2121,7 @@ exit:
struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
{
- u8 broadcast_addr[] = {0xff,0xff,0xff,0xff,0xff,0xff};
+ u8 broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sk_buff *skb = NULL;
struct ieee80211_probe_response *b;
@@ -2392,7 +2134,6 @@ struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
b->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_BEACON);
return skb;
-
}
struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee)
@@ -2401,7 +2142,7 @@ struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee)
struct ieee80211_probe_response *b;
skb = ieee80211_get_beacon_(ieee);
- if(!skb)
+ if (!skb)
return NULL;
b = (struct ieee80211_probe_response *) skb->data;
@@ -2423,7 +2164,6 @@ void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee)
up(&ieee->wx_sem);
}
-
void ieee80211_stop_protocol(struct ieee80211_device *ieee)
{
if (!ieee->proto_started)
@@ -2432,9 +2172,9 @@ void ieee80211_stop_protocol(struct ieee80211_device *ieee)
ieee->proto_started = 0;
ieee80211_stop_send_beacons(ieee);
- if((ieee->iw_mode == IW_MODE_INFRA)&&(ieee->state == IEEE80211_LINKED)) {
- SendDisassociation(ieee,NULL,WLAN_REASON_DISASSOC_STA_HAS_LEFT);
- }
+ if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state == IEEE80211_LINKED))
+ SendDisassociation(ieee, NULL, WLAN_REASON_DISASSOC_STA_HAS_LEFT);
+
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
cancel_delayed_work(&ieee->start_ibss_wq);
@@ -2454,36 +2194,35 @@ void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee)
void ieee80211_start_protocol(struct ieee80211_device *ieee)
{
short ch = 0;
- int i = 0;
+ int i = 0;
if (ieee->proto_started)
return;
ieee->proto_started = 1;
- if (ieee->current_network.channel == 0){
- do{
+ if (ieee->current_network.channel == 0) {
+ do {
ch++;
if (ch > MAX_CHANNEL_NUMBER)
return; /* no channel found */
- }while(!GET_DOT11D_INFO(ieee)->channel_map[ch]);
+ } while (!GET_DOT11D_INFO(ieee)->channel_map[ch]);
ieee->current_network.channel = ch;
}
if (ieee->current_network.beacon_interval == 0)
ieee->current_network.beacon_interval = 100;
- ieee->set_chan(ieee->dev,ieee->current_network.channel);
+ ieee->set_chan(ieee->dev, ieee->current_network.channel);
- for(i = 0; i < 17; i++) {
- ieee->last_rxseq_num[i] = -1;
- ieee->last_rxfrag_num[i] = -1;
- ieee->last_packet_time[i] = 0;
+ for (i = 0; i < 17; i++) {
+ ieee->last_rxseq_num[i] = -1;
+ ieee->last_rxfrag_num[i] = -1;
+ ieee->last_packet_time[i] = 0;
}
- ieee->init_wmmparam_flag = 0;//reinitialize AC_xx_PARAM registers.
-
+ ieee->init_wmmparam_flag = 0; /* reinitialize AC_xx_PARAM registers. */
/* if the user set the MAC of the ad-hoc cell and then
* switch to managed mode, shall we make sure that association
@@ -2493,7 +2232,7 @@ void ieee80211_start_protocol(struct ieee80211_device *ieee)
switch (ieee->iw_mode) {
case IW_MODE_AUTO:
ieee->iw_mode = IW_MODE_INFRA;
- //not set break here intentionly
+ /* not set break here intentionly */
case IW_MODE_INFRA:
ieee80211_start_bss(ieee);
break;
@@ -2517,7 +2256,6 @@ void ieee80211_start_protocol(struct ieee80211_device *ieee)
}
}
-
#define DRV_NAME "Ieee80211"
void ieee80211_softmac_init(struct ieee80211_device *ieee)
{
@@ -2526,36 +2264,29 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->state = IEEE80211_NOLINK;
ieee->sync_scan_hurryup = 0;
- for(i = 0; i < 5; i++) {
- ieee->seq_ctrl[i] = 0;
- }
+ for (i = 0; i < 5; i++)
+ ieee->seq_ctrl[i] = 0;
ieee->assoc_id = 0;
ieee->queue_stop = 0;
ieee->scanning = 0;
- ieee->softmac_features = 0; //so IEEE2100-like driver are happy
+ ieee->softmac_features = 0; /* so IEEE2100-like driver are happy */
ieee->wap_set = 0;
ieee->ssid_set = 0;
ieee->proto_started = 0;
ieee->basic_rate = IEEE80211_DEFAULT_BASIC_RATE;
ieee->rate = 3;
-//#ifdef ENABLE_LPS
ieee->ps = IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST;
-//#else
-// ieee->ps = IEEE80211_PS_DISABLED;
-//#endif
ieee->sta_sleep = 0;
-//by amy
ieee->bInactivePs = false;
ieee->actscanning = false;
ieee->ListenInterval = 2;
- ieee->NumRxDataInPeriod = 0; //YJ,add,080828
- ieee->NumRxBcnInPeriod = 0; //YJ,add,080828
- ieee->NumRxOkTotal = 0;//+by amy 080312
- ieee->NumRxUnicast = 0;//YJ,add,080828,for keep alive
+ ieee->NumRxDataInPeriod = 0;
+ ieee->NumRxBcnInPeriod = 0;
+ ieee->NumRxOkTotal = 0;
+ ieee->NumRxUnicast = 0; /* for keep alive */
ieee->beinretry = false;
ieee->bHwRadioOff = false;
-//by amy
init_mgmt_queue(ieee);
@@ -2571,13 +2302,12 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->wq = create_workqueue(DRV_NAME);
- INIT_DELAYED_WORK(&ieee->start_ibss_wq,(void*) ieee80211_start_ibss_wq);
- INIT_WORK(&ieee->associate_complete_wq,(void*) ieee80211_associate_complete_wq);
- INIT_WORK(&ieee->associate_procedure_wq,(void*) ieee80211_associate_procedure_wq);
- INIT_DELAYED_WORK(&ieee->softmac_scan_wq,(void*) ieee80211_softmac_scan_wq);
- INIT_DELAYED_WORK(&ieee->associate_retry_wq,(void*) ieee80211_associate_retry_wq);
- INIT_WORK(&ieee->wx_sync_scan_wq,(void*) ieee80211_wx_sync_scan_wq);
-// INIT_WORK(&ieee->watch_dog_wq,(void*) ieee80211_watch_dog_wq);
+ INIT_DELAYED_WORK(&ieee->start_ibss_wq, (void *) ieee80211_start_ibss_wq);
+ INIT_WORK(&ieee->associate_complete_wq, (void *) ieee80211_associate_complete_wq);
+ INIT_WORK(&ieee->associate_procedure_wq, (void *) ieee80211_associate_procedure_wq);
+ INIT_DELAYED_WORK(&ieee->softmac_scan_wq, (void *) ieee80211_softmac_scan_wq);
+ INIT_DELAYED_WORK(&ieee->associate_retry_wq, (void *) ieee80211_associate_retry_wq);
+ INIT_WORK(&ieee->wx_sync_scan_wq, (void *) ieee80211_wx_sync_scan_wq);
sema_init(&ieee->wx_sem, 1);
sema_init(&ieee->scan_sem, 1);
@@ -2598,8 +2328,7 @@ void ieee80211_softmac_free(struct ieee80211_device *ieee)
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
-
- //add for RF power on power of by lizhaoming 080512
+ /* add for RF power on power of */
cancel_delayed_work(&ieee->GPIOChangeRFWorkItem);
destroy_workqueue(ieee->wq);
@@ -2607,22 +2336,16 @@ void ieee80211_softmac_free(struct ieee80211_device *ieee)
up(&ieee->wx_sem);
}
-/********************************************************
- * Start of WPA code. *
- * this is stolen from the ipw2200 driver *
- ********************************************************/
-
-
+/* Start of WPA code. This is stolen from the ipw2200 driver */
static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value)
{
/* This is called when wpa_supplicant loads and closes the driver
* interface. */
- printk("%s WPA\n",value ? "enabling" : "disabling");
+ printk("%s WPA\n", value ? "enabling" : "disabling");
ieee->wpa_enabled = value;
return 0;
}
-
static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie,
int wpa_ie_len)
{
@@ -2632,16 +2355,14 @@ static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_i
ieee80211_disassociate(ieee);
}
-
static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command,
int reason)
{
-
int ret = 0;
switch (command) {
case IEEE_MLME_STA_DEAUTH:
- // silently ignore
+ /* silently ignore */
break;
case IEEE_MLME_STA_DISASSOC:
@@ -2656,7 +2377,6 @@ static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command,
return ret;
}
-
static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
struct ieee_param *param, int plen)
{
@@ -2690,7 +2410,6 @@ static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
{
-
struct ieee80211_security sec = {
.flags = SEC_AUTH_MODE,
};
@@ -2715,7 +2434,7 @@ static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name,
u32 value)
{
- int ret=0;
+ int ret = 0;
unsigned long flags;
switch (name) {
@@ -2724,7 +2443,7 @@ static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name,
break;
case IEEE_PARAM_TKIP_COUNTERMEASURES:
- ieee->tkip_countermeasures=value;
+ ieee->tkip_countermeasures = value;
break;
case IEEE_PARAM_DROP_UNENCRYPTED: {
@@ -2743,15 +2462,14 @@ static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name,
.flags = SEC_ENABLED,
.enabled = value,
};
- ieee->drop_unencrypted = value;
+ ieee->drop_unencrypted = value;
/* We only change SEC_LEVEL for open mode. Others
* are set by ipw_wpa_set_encryption.
*/
if (!value) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_0;
- }
- else {
+ } else {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
}
@@ -2761,27 +2479,22 @@ static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name,
}
case IEEE_PARAM_PRIVACY_INVOKED:
- ieee->privacy_invoked=value;
+ ieee->privacy_invoked = value;
break;
-
case IEEE_PARAM_AUTH_ALGS:
ret = ieee80211_wpa_set_auth_algs(ieee, value);
break;
-
case IEEE_PARAM_IEEE_802_1X:
- ieee->ieee802_1x=value;
+ ieee->ieee802_1x = value;
break;
case IEEE_PARAM_WPAX_SELECT:
- // added for WPA2 mixed mode
- //printk(KERN_WARNING "------------------------>wpax value = %x\n", value);
- spin_lock_irqsave(&ieee->wpax_suitlist_lock,flags);
+ spin_lock_irqsave(&ieee->wpax_suitlist_lock, flags);
ieee->wpax_type_set = 1;
ieee->wpax_type_notify = value;
- spin_unlock_irqrestore(&ieee->wpax_suitlist_lock,flags);
+ spin_unlock_irqrestore(&ieee->wpax_suitlist_lock, flags);
break;
-
default:
- printk("Unknown WPA param: %d\n",name);
+ printk("Unknown WPA param: %d\n", name);
ret = -EOPNOTSUPP;
}
@@ -2823,8 +2536,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
if (strcmp(param->u.crypt.alg, "none") == 0) {
if (crypt) {
sec.enabled = 0;
- // FIXME FIXME
- //sec.encrypt = 0;
+ /* FIXME FIXME */
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_ENABLED | SEC_LEVEL;
ieee80211_crypt_delayed_deinit(ieee, crypt);
@@ -2832,8 +2544,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
goto done;
}
sec.enabled = 1;
-// FIXME FIXME
-// sec.encrypt = 1;
+ /* FIXME FIXME */
sec.flags |= SEC_ENABLED;
/* IPW HW cannot build TKIP MIC, host decryption still needed. */
@@ -2942,12 +2653,11 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
struct iw_point *p)
{
struct ieee_param *param;
- int ret=0;
+ int ret = 0;
down(&ieee->wx_sem);
- //IEEE_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length);
- if (p->length < sizeof(struct ieee_param) || !p->pointer){
+ if (p->length < sizeof(struct ieee_param) || !p->pointer) {
ret = -EINVAL;
goto out;
}
@@ -2959,27 +2669,22 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
}
switch (param->cmd) {
-
case IEEE_CMD_SET_WPA_PARAM:
ret = ieee80211_wpa_set_param(ieee, param->u.wpa_param.name,
param->u.wpa_param.value);
break;
-
case IEEE_CMD_SET_WPA_IE:
ret = ieee80211_wpa_set_wpa_ie(ieee, param, p->length);
break;
-
case IEEE_CMD_SET_ENCRYPTION:
ret = ieee80211_wpa_set_encryption(ieee, param, p->length);
break;
-
case IEEE_CMD_MLME:
ret = ieee80211_wpa_mlme(ieee, param->u.mlme.command,
param->u.mlme.reason_code);
break;
-
default:
- printk("Unknown WPA supplicant request: %d\n",param->cmd);
+ printk("Unknown WPA supplicant request: %d\n", param->cmd);
ret = -EOPNOTSUPP;
break;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
index 3b7955f0ff98..07c3f715a6f5 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
@@ -1,38 +1,38 @@
-/******************************************************************************
-
- Copyright(c) 2004 Intel Corporation. All rights reserved.
-
- Portions of this file are based on the WEP enablement code provided by the
- Host AP project hostap-drivers v0.1.3
- Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- <jkmaline@cc.hut.fi>
- Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc., 59
- Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
- The full GNU General Public License is included in this distribution in the
- file called LICENSE.
-
- Contact Information:
- James P. Ketrenos <ipw2100-admin@linux.intel.com>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+/*
+ * Copyright(c) 2004 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are based on the WEP enablement code provided by the
+ * Host AP project hostap-drivers v0.1.3
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * James P. Ketrenos <ipw2100-admin@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
-******************************************************************************/
#include <linux/wireless.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/etherdevice.h>
#include "ieee80211.h"
static const char *ieee80211_modes[] = {
@@ -54,7 +54,7 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
/* First entry *MUST* be the AP MAC address */
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
+ ether_addr_copy(iwe.u.ap_addr.sa_data, network->bssid);
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
/* Remaining entries will be displayed in the order we provide them */
@@ -62,17 +62,13 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
/* Add the ESSID */
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
- //YJ,modified,080903,for hidden ap
- //if (network->flags & NETWORK_EMPTY_ESSID) {
if (network->ssid_len == 0) {
- //YJ,modified,080903,end
iwe.u.data.length = sizeof("<hidden>");
start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>");
} else {
- iwe.u.data.length = min(network->ssid_len, (u8)32);
+ iwe.u.data.length = min_t(u8, network->ssid_len, 32);
start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
}
- //printk("ESSID: %s\n",network->ssid);
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", ieee80211_modes[network->mode]);
@@ -92,8 +88,6 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
/* Add frequency/channel */
iwe.cmd = SIOCGIWFREQ;
-/* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode);
- iwe.u.freq.e = 3; */
iwe.u.freq.m = network->channel;
iwe.u.freq.e = 0;
iwe.u.freq.i = 0;
@@ -145,10 +139,9 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
/* Add quality statistics */
/* TODO: Fix these values... */
if (network->stats.signal == 0 || network->stats.rssi == 0)
- printk("========>signal:%d, rssi:%d\n", network->stats.signal,
- network->stats.rssi);
+ netdev_info(ieee->dev, "========>signal:%d, rssi:%d\n",
+ network->stats.signal, network->stats.rssi);
iwe.cmd = IWEVQUAL;
-// printk("SIGNAL: %d,RSSI: %d,NOISE: %d\n",network->stats.signal,network->stats.rssi,network->stats.noise);
iwe.u.qual.qual = network->stats.signalstrength;
iwe.u.qual.level = network->stats.signal;
iwe.u.qual.noise = network->stats.noise;
@@ -171,7 +164,6 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
memset(&iwe, 0, sizeof(iwe));
if (network->wpa_ie_len) {
- // printk("wpa_ie_len:%d\n", network->wpa_ie_len);
char buf[MAX_WPA_IE_LEN];
memcpy(buf, network->wpa_ie, network->wpa_ie_len);
iwe.cmd = IWEVGENIE;
@@ -181,7 +173,6 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
memset(&iwe, 0, sizeof(iwe));
if (network->rsn_ie_len) {
- // printk("=====>rsn_ie_len:\n", network->rsn_ie_len);
char buf[MAX_WPA_IE_LEN];
memcpy(buf, network->rsn_ie, network->rsn_ie_len);
iwe.cmd = IWEVGENIE;
@@ -190,7 +181,8 @@ static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
}
/* Add EXTRA: Age to display seconds since last beacon/probe response
- * for given network. */
+ * for given network.
+ */
iwe.cmd = IWEVCUSTOM;
p = custom;
p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
@@ -210,8 +202,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
unsigned long flags;
int err = 0;
char *ev = extra;
- char *stop = ev + wrqu->data.length;//IW_SCAN_MAX_DATA;
- //char *stop = ev + IW_SCAN_MAX_DATA;
+ char *stop = ev + wrqu->data.length;
int i = 0;
IEEE80211_DEBUG_WX("Getting scan\n");
@@ -287,7 +278,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
IEEE80211_DEBUG_WX("Disabling encryption.\n");
/* Check all the keys to see if any are still configured,
- * and if no key index was provided, de-init them all */
+ * and if no key index was provided, de-init them all.
+ */
for (i = 0; i < WEP_KEYS; i++) {
if (ieee->crypt[i] != NULL) {
if (key_provided)
@@ -306,15 +298,14 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
goto done;
}
-
-
sec.enabled = 1;
sec.flags |= SEC_ENABLED;
if (*crypt != NULL && (*crypt)->ops != NULL &&
strcmp((*crypt)->ops->name, "WEP") != 0) {
/* changing to use WEP; deinit previously used algorithm
- * on this key */
+ * on this key.
+ */
ieee80211_crypt_delayed_deinit(ieee, crypt);
}
@@ -359,10 +350,11 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
(*crypt)->priv);
sec.flags |= (1 << key);
/* This ensures a key will be activated if no key is
- * explicitly set */
+ * explicitly set.
+ */
if (key == sec.active_key)
sec.flags |= SEC_ACTIVE_KEY;
- ieee->tx_keyidx = key;//by wb 080312
+ ieee->tx_keyidx = key;
} else {
len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
NULL, (*crypt)->priv);
@@ -395,7 +387,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
"OPEN" : "SHARED KEY");
/* For now we just support WEP, so only set that security level...
- * TODO: When WPA is added this is one place that needs to change */
+ * TODO: When WPA is added this is one place that needs to change
+ */
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
@@ -406,7 +399,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
* generate new IEEE 802.11 authentication which may end up in looping
* with IEEE 802.1X. If your hardware requires a reset after WEP
* configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack. */
+ * the callbacks structures used to initialize the 802.11 stack.
+ */
if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
@@ -448,7 +442,8 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
if (strcmp(crypt->ops->name, "WEP") != 0) {
/* only WEP is supported with wireless extensions, so just
- * report that encryption is used */
+ * report that encryption is used.
+ */
erq->length = 0;
erq->flags |= IW_ENCODE_ENABLED;
return 0;
@@ -483,7 +478,6 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
struct ieee80211_security sec = {
.flags = 0,
};
- //printk("======>encoding flag:%x,ext flag:%x, ext alg:%d\n", encoding->flags,ext->ext_flags, ext->alg);
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if (idx < 1 || idx > WEP_KEYS)
@@ -497,7 +491,6 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
group_key = 1;
} else {
/* some Cisco APs use idx>0 for unicast in dynamic WEP */
- //printk("not group key, flags:%x, ext->alg:%d\n", ext->ext_flags, ext->alg);
if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
return -EINVAL;
if (ieee->iw_mode == IW_MODE_INFRA)
@@ -506,7 +499,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
return -EINVAL;
}
- sec.flags |= SEC_ENABLED;// | SEC_ENCRYPT;
+ sec.flags |= SEC_ENABLED;
if ((encoding->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) {
if (*crypt)
@@ -518,16 +511,13 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
if (i == WEP_KEYS) {
sec.enabled = 0;
- // sec.encrypt = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_LEVEL;
}
- //printk("disabled: flag:%x\n", encoding->flags);
goto done;
}
sec.enabled = 1;
- // sec.encrypt = 1;
switch (ext->alg) {
case IW_ENCODE_ALG_WEP:
@@ -545,7 +535,6 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
ret = -EINVAL;
goto done;
}
-// printk("8-09-08-9=====>%s, alg name:%s\n",__func__, alg);
ops = ieee80211_get_crypto_ops(alg);
if (ops == NULL)
@@ -553,7 +542,8 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
if (ops == NULL) {
IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
dev->name, ext->alg);
- printk("========>unknown crypto alg %d\n", ext->alg);
+ netdev_err(ieee->dev, "========>unknown crypto alg %d\n",
+ ext->alg);
ret = -EINVAL;
goto done;
}
@@ -584,13 +574,11 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
(*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
(*crypt)->priv) < 0) {
IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
- printk("key setting failed\n");
+ netdev_err(ieee->dev, "key setting failed\n");
ret = -EINVAL;
goto done;
}
#if 1
- //skip_host_crypt:
- //printk("skip_host_crypt:ext_flags:%x\n", ext->ext_flags);
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
ieee->tx_keyidx = idx;
sec.active_key = idx;
@@ -602,15 +590,12 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
sec.key_sizes[idx] = ext->key_len;
sec.flags |= (1 << idx);
if (ext->alg == IW_ENCODE_ALG_WEP) {
- // sec.encode_alg[idx] = SEC_ALG_WEP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
} else if (ext->alg == IW_ENCODE_ALG_TKIP) {
- // sec.encode_alg[idx] = SEC_ALG_TKIP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_2;
} else if (ext->alg == IW_ENCODE_ALG_CCMP) {
- // sec.encode_alg[idx] = SEC_ALG_CCMP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_3;
}
@@ -632,20 +617,19 @@ done:
return ret;
}
+
int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_mlme *mlme = (struct iw_mlme *) extra;
-// printk("\ndkgadfslkdjgalskdf===============>%s(), cmd:%x\n", __func__, mlme->cmd);
#if 1
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
case IW_MLME_DISASSOC:
- // printk("disassoc now\n");
ieee80211_disassociate(ieee);
break;
- default:
+ default:
return -EOPNOTSUPP;
}
#endif
@@ -656,24 +640,16 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
struct iw_request_info *info,
struct iw_param *data, char *extra)
{
-/*
- struct ieee80211_security sec = {
- .flags = SEC_AUTH_MODE,
- }
-*/
- //printk("set auth:flag:%x, data value:%x\n", data->flags, data->value);
switch (data->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
- /*need to support wpa2 here*/
- //printk("wpa version:%x\n", data->value);
+ /* need to support wpa2 here */
break;
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_KEY_MGMT:
- /*
- * * Host AP driver does not use these parameters and allows
- * * wpa_supplicant to control them internally.
- * */
+ /* Host AP driver does not use these parameters and allows
+ * wpa_supplicant to control them internally.
+ */
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
ieee->tkip_countermeasures = data->value;
@@ -684,13 +660,11 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
case IW_AUTH_80211_AUTH_ALG:
ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM) ? 1 : 0;
- //printk("open_wep:%d\n", ieee->open_wep);
break;
#if 1
case IW_AUTH_WPA_ENABLED:
ieee->wpa_enabled = (data->value) ? 1 : 0;
- //printk("enable wpa:%d\n", ieee->wpa_enabled);
break;
#endif
@@ -712,13 +686,13 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
u8 *buf = NULL;
if (len > MAX_WPA_IE_LEN || (len && ie == NULL)) {
- printk("return error out, len:%zu\n", len);
+ netdev_err(ieee->dev, "return error out, len:%zu\n", len);
return -EINVAL;
}
if (len) {
if (len != ie[1]+2) {
- printk("len:%zu, ie:%d\n", len, ie[1]);
+ netdev_err(ieee->dev, "len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
buf = kmemdup(ie, len, GFP_KERNEL);
@@ -732,7 +706,6 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
ieee->wpa_ie = NULL;
ieee->wpa_ie_len = 0;
}
-// printk("<=====out %s()\n", __func__);
return 0;
diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h
index 8999ec62450d..9f931dba1d82 100644
--- a/drivers/staging/rtl8187se/r8180.h
+++ b/drivers/staging/rtl8187se/r8180.h
@@ -1,19 +1,18 @@
/*
- This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part of the
- official realtek driver
-
- Parts of this driver are based on the rtl8180 driver skeleton
- from Patric Schenke & Andres Salomon
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
-
- We want to thanks the Authors of those projects and the Ndiswrapper
- project Authors.
-*/
+ * This is part of rtl8180 OpenSource driver.
+ * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
+ * Released under the terms of GPL (General Public Licence)
+ *
+ * Parts of this driver are based on the GPL part of the official realtek driver
+ *
+ * Parts of this driver are based on the rtl8180 driver skeleton from Patric
+ * Schenke & Andres Salomon
+ *
+ * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
+ *
+ * We want to thanks the Authors of those projects and the Ndiswrapper project
+ * Authors.
+ */
#ifndef R8180H
#define R8180H
@@ -21,13 +20,12 @@
#include <linux/interrupt.h>
#define RTL8180_MODULE_NAME "r8180"
-#define DMESG(x,a...) printk(KERN_INFO RTL8180_MODULE_NAME ": " x "\n", ## a)
-#define DMESGW(x,a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": WW:" x "\n", ## a)
-#define DMESGE(x,a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": EE:" x "\n", ## a)
+#define DMESG(x, a...) printk(KERN_INFO RTL8180_MODULE_NAME ": " x "\n", ## a)
+#define DMESGW(x, a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": WW:" x "\n", ## a)
+#define DMESGE(x, a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": EE:" x "\n", ## a)
#include <linux/module.h>
#include <linux/kernel.h>
-//#include <linux/config.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -36,22 +34,21 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
-#include <linux/rtnetlink.h> //for rtnl_lock()
+#include <linux/rtnetlink.h> /* for rtnl_lock() */
#include <linux/wireless.h>
#include <linux/timer.h>
-#include <linux/proc_fs.h> // Necessary because we use the proc fs
+#include <linux/proc_fs.h> /* Necessary because we use the proc fs. */
#include <linux/if_arp.h>
#include "ieee80211/ieee80211.h"
#include <asm/io.h>
-//#include <asm/semaphore.h>
#define EPROM_93c46 0
#define EPROM_93c56 1
-#define RTL_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
+#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
#define DEFAULT_FRAG_THRESHOLD 2342U
-#define MIN_FRAG_THRESHOLD 256U
+#define MIN_FRAG_THRESHOLD 256U
#define DEFAULT_RTS_THRESHOLD 2342U
#define MIN_RTS_THRESHOLD 0U
#define MAX_RTS_THRESHOLD 2342U
@@ -60,132 +57,99 @@
#define DEFAULT_RETRY_RTS 7
#define DEFAULT_RETRY_DATA 7
-#define BEACON_QUEUE 6
+#define BEACON_QUEUE 6
-#define aSifsTime 10
+#define aSifsTime 10
-#define sCrcLng 4
-#define sAckCtsLng 112 // bits in ACK and CTS frames
-//+by amy 080312
-#define RATE_ADAPTIVE_TIMER_PERIOD 300
+#define sCrcLng 4
+#define sAckCtsLng 112 /* bits in ACK and CTS frames. */
+/* +by amy 080312. */
+#define RATE_ADAPTIVE_TIMER_PERIOD 300
-typedef enum _WIRELESS_MODE {
+enum wireless_mode {
WIRELESS_MODE_UNKNOWN = 0x00,
WIRELESS_MODE_A = 0x01,
WIRELESS_MODE_B = 0x02,
WIRELESS_MODE_G = 0x04,
WIRELESS_MODE_AUTO = 0x08,
-} WIRELESS_MODE;
-
-typedef struct ChnlAccessSetting {
- u16 SIFS_Timer;
- u16 DIFS_Timer;
- u16 SlotTimeTimer;
- u16 EIFS_Timer;
- u16 CWminIndex;
- u16 CWmaxIndex;
-}*PCHANNEL_ACCESS_SETTING,CHANNEL_ACCESS_SETTING;
-
-typedef enum{
- NIC_8185 = 1,
- NIC_8185B
- } nic_t;
+};
+
+struct chnl_access_setting {
+ u16 sifs_timer;
+ u16 difs_timer;
+ u16 slot_time_timer;
+ u16 eifs_timer;
+ u16 cwmin_index;
+ u16 cwmax_index;
+};
+
+enum nic_t {
+ NIC_8185 = 1,
+ NIC_8185B
+};
typedef u32 AC_CODING;
-#define AC0_BE 0 // ACI: 0x00 // Best Effort
-#define AC1_BK 1 // ACI: 0x01 // Background
-#define AC2_VI 2 // ACI: 0x10 // Video
-#define AC3_VO 3 // ACI: 0x11 // Voice
-#define AC_MAX 4 // Max: define total number; Should not to be used as a real enum.
-
-//
-// ECWmin/ECWmax field.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
-//
-typedef union _ECW{
- u8 charData;
- struct
- {
- u8 ECWmin:4;
- u8 ECWmax:4;
- }f; // Field
-}ECW, *PECW;
-
-//
-// ACI/AIFSN Field.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
-//
-typedef union _ACI_AIFSN{
- u8 charData;
-
- struct
- {
- u8 AIFSN:4;
- u8 ACM:1;
- u8 ACI:2;
- u8 Reserved:1;
- }f; // Field
-}ACI_AIFSN, *PACI_AIFSN;
-
-//
-// AC Parameters Record Format.
-// Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
-//
-typedef union _AC_PARAM{
- u32 longData;
- u8 charData[4];
-
- struct
- {
- ACI_AIFSN AciAifsn;
- ECW Ecw;
- u16 TXOPLimit;
- }f; // Field
-}AC_PARAM, *PAC_PARAM;
-
-/* it is a wrong definition. -xiong-2006-11-17
-typedef struct ThreeWireReg {
- u16 longData;
+#define AC0_BE 0 /* ACI: 0x00 */ /* Best Effort. */
+#define AC1_BK 1 /* ACI: 0x01 */ /* Background. */
+#define AC2_VI 2 /* ACI: 0x10 */ /* Video. */
+#define AC3_VO 3 /* ACI: 0x11 */ /* Voice. */
+#define AC_MAX 4 /* Max: define total number; Should not to be used as a real
+ * enum.
+ */
+
+/*
+ * ECWmin/ECWmax field.
+ * Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
+ */
+typedef union _ECW {
+ u8 charData;
+ struct {
+ u8 ECWmin:4;
+ u8 ECWmax:4;
+ } f; /* Field */
+} ECW, *PECW;
+
+/*
+ * ACI/AIFSN Field. Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
+ */
+typedef union _ACI_AIFSN {
+ u8 charData;
+
struct {
- u8 enableB;
- u8 data;
- u8 clk;
- u8 read_write;
- } struc;
-} ThreeWireReg;
-*/
-
-typedef union _ThreeWire{
- struct _ThreeWireStruc{
- u16 data:1;
- u16 clk:1;
- u16 enableB:1;
- u16 read_write:1;
- u16 resv1:12;
-// u2Byte resv2:14;
-// u2Byte ThreeWireEnable:1;
-// u2Byte resv3:1;
- }struc;
- u16 longData;
-}ThreeWireReg;
-
-
-typedef struct buffer
-{
+ u8 AIFSN:4;
+ u8 ACM:1;
+ u8 ACI:2;
+ u8 Reserved:1;
+ } f; /* Field */
+} ACI_AIFSN, *PACI_AIFSN;
+
+/*
+ * AC Parameters Record Format.
+ * Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
+ */
+typedef union _AC_PARAM {
+ u32 longData;
+ u8 charData[4];
+
+ struct {
+ ACI_AIFSN AciAifsn;
+ ECW Ecw;
+ u16 TXOPLimit;
+ } f; /* Field */
+} AC_PARAM, *PAC_PARAM;
+
+struct buffer {
struct buffer *next;
u32 *buf;
dma_addr_t dma;
-} buffer;
+};
-//YJ,modified,080828
-typedef struct Stats
-{
+/* YJ,modified,080828. */
+struct stats {
unsigned long txrdu;
unsigned long rxrdu;
unsigned long rxnolast;
unsigned long rxnodata;
-// unsigned long rxreset;
-// unsigned long rxwrkaround;
unsigned long rxnopointer;
unsigned long txnperr;
unsigned long txresumed;
@@ -207,126 +171,123 @@ typedef struct Stats
unsigned long txbeaconerr;
unsigned long txlpokint;
unsigned long txlperr;
- unsigned long txretry;//retry number tony 20060601
- unsigned long rxcrcerrmin;//crc error (0-500)
- unsigned long rxcrcerrmid;//crc error (500-1000)
- unsigned long rxcrcerrmax;//crc error (>1000)
- unsigned long rxicverr;//ICV error
-} Stats;
+ unsigned long txretry; /* retry number tony 20060601 */
+ unsigned long rxcrcerrmin; /* crc error (0-500) */
+ unsigned long rxcrcerrmid; /* crc error (500-1000) */
+ unsigned long rxcrcerrmax; /* crc error (>1000) */
+ unsigned long rxicverr; /* ICV error */
+};
#define MAX_LD_SLOT_NUM 10
-#define KEEP_ALIVE_INTERVAL 20 // in seconds.
-#define CHECK_FOR_HANG_PERIOD 2 //be equal to watchdog check time
-#define DEFAULT_KEEP_ALIVE_LEVEL 1
-#define DEFAULT_SLOT_NUM 2
-#define POWER_PROFILE_AC 0
-#define POWER_PROFILE_BATTERY 1
-
-typedef struct _link_detect_t
-{
- u32 RxFrameNum[MAX_LD_SLOT_NUM]; // number of Rx Frame / CheckForHang_period to determine link status
- u16 SlotNum; // number of CheckForHang period to determine link status, default is 2
- u16 SlotIndex;
-
- u32 NumTxOkInPeriod; //number of packet transmitted during CheckForHang
- u32 NumRxOkInPeriod; //number of packet received during CheckForHang
-
- u8 IdleCount; // (KEEP_ALIVE_INTERVAL / CHECK_FOR_HANG_PERIOD)
- u32 LastNumTxUnicast;
- u32 LastNumRxUnicast;
-
- bool bBusyTraffic; //when it is set to 1, UI cann't scan at will.
-}link_detect_t, *plink_detect_t;
-
-//YJ,modified,080828,end
-
-//by amy for led
-//================================================================================
-// LED customization.
-//================================================================================
-
-typedef enum _LED_STRATEGY_8185{
- SW_LED_MODE0, //
- SW_LED_MODE1, //
- HW_LED, // HW control 2 LEDs, LED0 and LED1 (there are 4 different control modes)
-}LED_STRATEGY_8185, *PLED_STRATEGY_8185;
-//by amy for led
-//by amy for power save
-typedef enum _LED_CTL_MODE{
- LED_CTL_POWER_ON = 1,
- LED_CTL_LINK = 2,
- LED_CTL_NO_LINK = 3,
- LED_CTL_TX = 4,
- LED_CTL_RX = 5,
- LED_CTL_SITE_SURVEY = 6,
- LED_CTL_POWER_OFF = 7
-}LED_CTL_MODE;
-
-typedef enum _RT_RF_POWER_STATE
-{
- eRfOn,
- eRfSleep,
- eRfOff
-}RT_RF_POWER_STATE;
-
-enum _ReasonCode{
- unspec_reason = 0x1,
- auth_not_valid = 0x2,
- deauth_lv_ss = 0x3,
- inactivity = 0x4,
- ap_overload = 0x5,
- class2_err = 0x6,
- class3_err = 0x7,
- disas_lv_ss = 0x8,
- asoc_not_auth = 0x9,
-
- //----MIC_CHECK
- mic_failure = 0xe,
- //----END MIC_CHECK
-
- // Reason code defined in 802.11i D10.0 p.28.
- invalid_IE = 0x0d,
- four_way_tmout = 0x0f,
- two_way_tmout = 0x10,
- IE_dismatch = 0x11,
+#define KEEP_ALIVE_INTERVAL 20 /* in seconds. */
+#define CHECK_FOR_HANG_PERIOD 2 /* be equal to watchdog check time. */
+#define DEFAULT_KEEP_ALIVE_LEVEL 1
+#define DEFAULT_SLOT_NUM 2
+#define POWER_PROFILE_AC 0
+#define POWER_PROFILE_BATTERY 1
+
+struct link_detect_t {
+ u32 rx_frame_num[MAX_LD_SLOT_NUM]; /* number of Rx Frame.
+ * CheckForHang_period to determine
+ * link status.
+ */
+ u16 slot_num; /* number of CheckForHang period to determine link status,
+ * default is 2.
+ */
+ u16 slot_index;
+ u32 num_tx_ok_in_period; /* number of packet transmitted during
+ * CheckForHang.
+ */
+ u32 num_rx_ok_in_period; /* number of packet received during
+ * CheckForHang.
+ */
+ u8 idle_count; /* (KEEP_ALIVE_INTERVAL / CHECK_FOR_HANG_PERIOD) */
+ u32 last_num_tx_unicast;
+ u32 last_num_rx_unicast;
+
+ bool b_busy_traffic; /* when it is set to 1, UI cann't scan at will. */
+};
+
+/* YJ,modified,080828,end */
+
+/* by amy for led
+ * ==========================================================================
+ * LED customization.
+ * ==========================================================================
+ */
+enum led_strategy_8185 {
+ SW_LED_MODE0,
+ SW_LED_MODE1,
+ HW_LED, /* HW control 2 LEDs, LED0 and LED1 (there are 4 different
+ * control modes). */
+};
+
+enum rt_rf_power_state {
+ RF_ON,
+ RF_SLEEP,
+ RF_OFF
+};
+
+enum _ReasonCode {
+ unspec_reason = 0x1,
+ auth_not_valid = 0x2,
+ deauth_lv_ss = 0x3,
+ inactivity = 0x4,
+ ap_overload = 0x5,
+ class2_err = 0x6,
+ class3_err = 0x7,
+ disas_lv_ss = 0x8,
+ asoc_not_auth = 0x9,
+
+ /* ----MIC_CHECK */
+ mic_failure = 0xe,
+ /* ----END MIC_CHECK */
+
+ /* Reason code defined in 802.11i D10.0 p.28. */
+ invalid_IE = 0x0d,
+ four_way_tmout = 0x0f,
+ two_way_tmout = 0x10,
+ IE_dismatch = 0x11,
invalid_Gcipher = 0x12,
invalid_Pcipher = 0x13,
- invalid_AKMP = 0x14,
+ invalid_AKMP = 0x14,
unsup_RSNIEver = 0x15,
- invalid_RSNIE = 0x16,
- auth_802_1x_fail= 0x17,
- ciper_reject = 0x18,
-
- // Reason code defined in 7.3.1.7, 802.1e D13.0, p.42. Added by Annie, 2005-11-15.
- QoS_unspec = 0x20, // 32
- QAP_bandwidth = 0x21, // 33
- poor_condition = 0x22, // 34
- no_facility = 0x23, // 35
- // Where is 36???
- req_declined = 0x25, // 37
- invalid_param = 0x26, // 38
- req_not_honored= 0x27, // 39
- TS_not_created = 0x2F, // 47
- DL_not_allowed = 0x30, // 48
- dest_not_exist = 0x31, // 49
- dest_not_QSTA = 0x32, // 50
+ invalid_RSNIE = 0x16,
+ auth_802_1x_fail = 0x17,
+ ciper_reject = 0x18,
+
+ /* Reason code defined in 7.3.1.7, 802.1e D13.0, p.42. Added by Annie,
+ * 2005-11-15.
+ */
+ QoS_unspec = 0x20, /* 32 */
+ QAP_bandwidth = 0x21, /* 33 */
+ poor_condition = 0x22, /* 34 */
+ no_facility = 0x23, /* 35 */
+ /* Where is 36??? */
+ req_declined = 0x25, /* 37 */
+ invalid_param = 0x26, /* 38 */
+ req_not_honored = 0x27, /* 39 */
+ TS_not_created = 0x2F, /* 47 */
+ DL_not_allowed = 0x30, /* 48 */
+ dest_not_exist = 0x31, /* 49 */
+ dest_not_QSTA = 0x32, /* 50 */
+};
+
+enum rt_ps_mode {
+ ACTIVE, /* Active/Continuous access. */
+ MAX_PS, /* Max power save mode. */
+ FAST_PS /* Fast power save mode. */
};
-typedef enum _RT_PS_MODE
-{
- eActive, // Active/Continuous access.
- eMaxPs, // Max power save mode.
- eFastPs // Fast power save mode.
-}RT_PS_MODE;
-//by amy for power save
-typedef struct r8180_priv
-{
+
+/* by amy for power save. */
+struct r8180_priv {
struct pci_dev *pdev;
short epromtype;
int irq;
struct ieee80211_device *ieee80211;
- short plcp_preamble_mode; // 0:auto 1:short 2:long
+ short plcp_preamble_mode; /* 0:auto 1:short 2:long */
spinlock_t irq_th_lock;
spinlock_t tx_lock;
@@ -339,19 +300,15 @@ typedef struct r8180_priv
short chan;
short sens;
short max_sens;
- u8 chtxpwr[15]; //channels from 1 to 14, 0 not used
- u8 chtxpwr_ofdm[15]; //channels from 1 to 14, 0 not used
- //u8 challow[15]; //channels from 1 to 14, 0 not used
- u8 channel_plan; // it's the channel plan index
+ u8 chtxpwr[15]; /* channels from 1 to 14, 0 not used. */
+ u8 chtxpwr_ofdm[15]; /* channels from 1 to 14, 0 not used. */
+ u8 channel_plan; /* it's the channel plan index. */
short up;
- short crcmon; //if 1 allow bad crc frame reception in monitor mode
+ short crcmon; /* if 1 allow bad crc frame reception in monitor mode. */
struct timer_list scan_timer;
- /*short scanpending;
- short stopscan;*/
spinlock_t scan_lock;
u8 active_probe;
- //u8 active_scan_num;
struct semaphore wx_sem;
short hw_wep;
@@ -359,20 +316,20 @@ typedef struct r8180_priv
short antb;
short diversity;
u32 key0[4];
- short (*rf_set_sens)(struct net_device *dev,short sens);
- void (*rf_set_chan)(struct net_device *dev,short ch);
+ short (*rf_set_sens)(struct net_device *dev, short sens);
+ void (*rf_set_chan)(struct net_device *dev, short ch);
void (*rf_close)(struct net_device *dev);
void (*rf_init)(struct net_device *dev);
void (*rf_sleep)(struct net_device *dev);
void (*rf_wakeup)(struct net_device *dev);
- //short rate;
+ /* short rate; */
short promisc;
- /*stats*/
- struct Stats stats;
- struct _link_detect_t link_detect; //YJ,add,080828
+ /* stats */
+ struct stats stats;
+ struct link_detect_t link_detect; /* YJ,add,080828 */
struct iw_statistics wstats;
- /*RX stuff*/
+ /* RX stuff. */
u32 *rxring;
u32 *rxringtail;
dma_addr_t rxringdma;
@@ -387,27 +344,6 @@ typedef struct r8180_priv
u32 rx_prevlen;
- /*TX stuff*/
-/*
- u32 *txlpring;
- u32 *txhpring;
- u32 *txnpring;
- dma_addr_t txlpringdma;
- dma_addr_t txhpringdma;
- dma_addr_t txnpringdma;
- u32 *txlpringtail;
- u32 *txhpringtail;
- u32 *txnpringtail;
- u32 *txlpringhead;
- u32 *txhpringhead;
- u32 *txnpringhead;
- struct buffer *txlpbufs;
- struct buffer *txhpbufs;
- struct buffer *txnpbufs;
- struct buffer *txlpbufstail;
- struct buffer *txhpbufstail;
- struct buffer *txnpbufstail;
-*/
u32 *txmapring;
u32 *txbkpring;
u32 *txbepring;
@@ -447,54 +383,47 @@ typedef struct r8180_priv
int txringcount;
int txbuffsize;
- //struct tx_pendingbuf txnp_pending;
- //struct tasklet_struct irq_tx_tasklet;
struct tasklet_struct irq_rx_tasklet;
u8 dma_poll_mask;
- //short tx_suspend;
- /* adhoc/master mode stuff */
+ /* adhoc/master mode stuff. */
u32 *txbeaconringtail;
dma_addr_t txbeaconringdma;
u32 *txbeaconring;
int txbeaconcount;
struct buffer *txbeaconbufs;
struct buffer *txbeaconbufstail;
- //char *master_essid;
- //u16 master_beaconinterval;
- //u32 master_beaconsize;
- //u16 beacon_interval;
u8 retry_data;
u8 retry_rts;
u16 rts;
-//by amy for led
- LED_STRATEGY_8185 LedStrategy;
-//by amy for led
+ /* by amy for led. */
+ enum led_strategy_8185 led_strategy;
+ /* by amy for led. */
-//by amy for power save
+ /* by amy for power save. */
struct timer_list watch_dog_timer;
bool bInactivePs;
bool bSwRfProcessing;
- RT_RF_POWER_STATE eInactivePowerState;
- RT_RF_POWER_STATE eRFPowerState;
+ enum rt_rf_power_state eInactivePowerState;
+ enum rt_rf_power_state eRFPowerState;
u32 RfOffReason;
bool RFChangeInProgress;
bool SetRFPowerStateInProgress;
- u8 RFProgType;
+ u8 RFProgType;
bool bLeisurePs;
- RT_PS_MODE dot11PowerSaveMode;
- //u32 NumRxOkInPeriod; //YJ,del,080828
- //u32 NumTxOkInPeriod; //YJ,del,080828
- u8 TxPollingTimes;
+ enum rt_ps_mode dot11PowerSaveMode;
+ u8 TxPollingTimes;
- bool bApBufOurFrame;// TRUE if AP buffer our unicast data , we will keep eAwake until receive data or timeout.
- u8 WaitBufDataBcnCount;
- u8 WaitBufDataTimeOut;
+ bool bApBufOurFrame; /* TRUE if AP buffer our unicast data , we will
+ * keep eAwake until receive data or timeout.
+ */
+ u8 WaitBufDataBcnCount;
+ u8 WaitBufDataTimeOut;
-//by amy for power save
-//by amy for antenna
+ /* by amy for power save. */
+ /* by amy for antenna. */
u8 EEPROMSwAntennaDiversity;
bool EEPROMDefaultAntenna1;
u8 RegSwAntennaDiversityMechanism;
@@ -503,115 +432,128 @@ typedef struct r8180_priv
bool bDefaultAntenna1;
u8 SignalStrength;
long Stats_SignalStrength;
- long LastSignalStrengthInPercent; // In percentage, used for smoothing, e.g. Moving Average.
- u8 SignalQuality; // in 0-100 index.
+ long LastSignalStrengthInPercent; /* In percentage, used for smoothing,
+ * e.g. Moving Average.
+ */
+ u8 SignalQuality; /* in 0-100 index. */
long Stats_SignalQuality;
- long RecvSignalPower; // in dBm.
+ long RecvSignalPower; /* in dBm. */
long Stats_RecvSignalPower;
- u8 LastRxPktAntenna; // +by amy 080312 Antenna which received the lasted packet. 0: Aux, 1:Main. Added by Roger, 2008.01.25.
+ u8 LastRxPktAntenna; /* +by amy 080312 Antenna which received the lasted
+ * packet. 0: Aux, 1:Main. Added by Roger,
+ * 2008.01.25.
+ */
u32 AdRxOkCnt;
long AdRxSignalStrength;
- u8 CurrAntennaIndex; // Index to current Antenna (both Tx and Rx).
- u8 AdTickCount; // Times of SwAntennaDiversityTimer happened.
- u8 AdCheckPeriod; // # of period SwAntennaDiversityTimer to check Rx signal strength for SW Antenna Diversity.
- u8 AdMinCheckPeriod; // Min value of AdCheckPeriod.
- u8 AdMaxCheckPeriod; // Max value of AdCheckPeriod.
- long AdRxSsThreshold; // Signal strength threshold to switch antenna.
- long AdMaxRxSsThreshold; // Max value of AdRxSsThreshold.
- bool bAdSwitchedChecking; // TRUE if we shall shall check Rx signal strength for last time switching antenna.
- long AdRxSsBeforeSwitched; // Rx signal strength before we switched antenna.
+ u8 CurrAntennaIndex; /* Index to current Antenna (both Tx and Rx). */
+ u8 AdTickCount; /* Times of SwAntennaDiversityTimer happened. */
+ u8 AdCheckPeriod; /* # of period SwAntennaDiversityTimer to check Rx
+ * signal strength for SW Antenna Diversity.
+ */
+ u8 AdMinCheckPeriod; /* Min value of AdCheckPeriod. */
+ u8 AdMaxCheckPeriod; /* Max value of AdCheckPeriod. */
+ long AdRxSsThreshold; /* Signal strength threshold to switch antenna. */
+ long AdMaxRxSsThreshold; /* Max value of AdRxSsThreshold. */
+ bool bAdSwitchedChecking; /* TRUE if we shall shall check Rx signal
+ * strength for last time switching antenna.
+ */
+ long AdRxSsBeforeSwitched; /* Rx signal strength before we switched
+ * antenna.
+ */
struct timer_list SwAntennaDiversityTimer;
-//by amy for antenna
-//{by amy 080312
-//
- // Crystal calibration.
- // Added by Roger, 2007.12.11.
- //
- bool bXtalCalibration; // Crystal calibration.
- u8 XtalCal_Xin; // Crystal calibration for Xin. 0~7.5pF
- u8 XtalCal_Xout; // Crystal calibration for Xout. 0~7.5pF
- //
- // Tx power tracking with thermal meter indication.
- // Added by Roger, 2007.12.11.
- //
- bool bTxPowerTrack; // Tx Power tracking.
- u8 ThermalMeter; // Thermal meter reference indication.
- //
- // Dynamic Initial Gain Adjustment Mechanism. Added by Bruce, 2007-02-14.
- //
- bool bDigMechanism; // TRUE if DIG is enabled, FALSE ow.
- bool bRegHighPowerMechanism; // For High Power Mechanism. 061010, by rcnjko.
- u32 FalseAlarmRegValue;
- u8 RegDigOfdmFaUpTh; // Upper threshold of OFDM false alarm, which is used in DIG.
- u8 DIG_NumberFallbackVote;
- u8 DIG_NumberUpgradeVote;
- // For HW antenna diversity, added by Roger, 2008.01.30.
- u32 AdMainAntennaRxOkCnt; // Main antenna Rx OK count.
- u32 AdAuxAntennaRxOkCnt; // Aux antenna Rx OK count.
- bool bHWAdSwitched; // TRUE if we has switched default antenna by HW evaluation.
- // RF High Power upper/lower threshold.
- u8 RegHiPwrUpperTh;
- u8 RegHiPwrLowerTh;
- // RF RSSI High Power upper/lower Threshold.
- u8 RegRSSIHiPwrUpperTh;
- u8 RegRSSIHiPwrLowerTh;
- // Current CCK RSSI value to determine CCK high power, asked by SD3 DZ, by Bruce, 2007-04-12.
- u8 CurCCKRSSI;
- bool bCurCCKPkt;
- //
- // High Power Mechanism. Added by amy, 080312.
- //
- bool bToUpdateTxPwr;
- long UndecoratedSmoothedSS;
- long UndercorateSmoothedRxPower;
- u8 RSSI;
- char RxPower;
- u8 InitialGain;
- //For adjust Dig Threshold during Legacy/Leisure Power Save Mode
- u32 DozePeriodInPast2Sec;
- // Don't access BB/RF under disable PLL situation.
- u8 InitialGainBackUp;
- u8 RegBModeGainStage;
-//by amy for rate adaptive
- struct timer_list rateadapter_timer;
- u32 RateAdaptivePeriod;
- bool bEnhanceTxPwr;
- bool bUpdateARFR;
- int ForcedDataRate; // Force Data Rate. 0: Auto, 0x02: 1M ~ 0x6C: 54M.)
- u32 NumTxUnicast; //YJ,add,080828,for keep alive
- u8 keepAliveLevel; //YJ,add,080828,for KeepAlive
- unsigned long NumTxOkTotal;
- u16 LastRetryCnt;
- u16 LastRetryRate;
- unsigned long LastTxokCnt;
- unsigned long LastRxokCnt;
- u16 CurrRetryCnt;
- unsigned long LastTxOKBytes;
- unsigned long NumTxOkBytesTotal;
- u8 LastFailTxRate;
- long LastFailTxRateSS;
- u8 FailTxRateCount;
- u32 LastTxThroughput;
- //for up rate
- unsigned short bTryuping;
- u8 CurrTxRate; //the rate before up
- u16 CurrRetryRate;
- u16 TryupingCount;
- u8 TryDownCountLowData;
- u8 TryupingCountNoData;
-
- u8 CurrentOperaRate;
-//by amy for rate adaptive
-//by amy 080312}
-// short wq_hurryup;
-// struct workqueue_struct *workqueue;
+ /* by amy for antenna {by amy 080312 */
+
+ /* Crystal calibration. Added by Roger, 2007.12.11. */
+
+ bool bXtalCalibration; /* Crystal calibration.*/
+ u8 XtalCal_Xin; /* Crystal calibration for Xin. 0~7.5pF */
+ u8 XtalCal_Xout; /* Crystal calibration for Xout. 0~7.5pF */
+
+ /* Tx power tracking with thermal meter indication.
+ * Added by Roger, 2007.12.11.
+ */
+
+ bool bTxPowerTrack; /* Tx Power tracking. */
+ u8 ThermalMeter; /* Thermal meter reference indication. */
+
+ /* Dynamic Initial Gain Adjustment Mechanism. Added by Bruce,
+ * 2007-02-14.
+ */
+ bool bDigMechanism; /* TRUE if DIG is enabled, FALSE ow. */
+ bool bRegHighPowerMechanism; /* For High Power Mechanism. 061010,
+ * by rcnjko.
+ */
+ u32 FalseAlarmRegValue;
+ u8 RegDigOfdmFaUpTh; /* Upper threshold of OFDM false alarm, which is
+ * used in DIG.
+ */
+ u8 DIG_NumberFallbackVote;
+ u8 DIG_NumberUpgradeVote;
+ /* For HW antenna diversity, added by Roger, 2008.01.30. */
+ u32 AdMainAntennaRxOkCnt; /* Main antenna Rx OK count. */
+ u32 AdAuxAntennaRxOkCnt; /* Aux antenna Rx OK count. */
+ bool bHWAdSwitched; /* TRUE if we has switched default antenna by HW
+ * evaluation.
+ */
+ /* RF High Power upper/lower threshold. */
+ u8 RegHiPwrUpperTh;
+ u8 RegHiPwrLowerTh;
+ /* RF RSSI High Power upper/lower Threshold. */
+ u8 RegRSSIHiPwrUpperTh;
+ u8 RegRSSIHiPwrLowerTh;
+ /* Current CCK RSSI value to determine CCK high power, asked by SD3 DZ,
+ * by Bruce, 2007-04-12.
+ */
+ u8 CurCCKRSSI;
+ bool bCurCCKPkt;
+ /* High Power Mechanism. Added by amy, 080312. */
+ bool bToUpdateTxPwr;
+ long UndecoratedSmoothedSS;
+ long UndecoratedSmoothedRxPower;
+ u8 RSSI;
+ char RxPower;
+ u8 InitialGain;
+ /* For adjust Dig Threshold during Legacy/Leisure Power Save Mode. */
+ u32 DozePeriodInPast2Sec;
+ /* Don't access BB/RF under disable PLL situation. */
+ u8 InitialGainBackUp;
+ u8 RegBModeGainStage;
+ /* by amy for rate adaptive */
+ struct timer_list rateadapter_timer;
+ u32 RateAdaptivePeriod;
+ bool bEnhanceTxPwr;
+ bool bUpdateARFR;
+ int ForcedDataRate; /* Force Data Rate. 0: Auto, 0x02: 1M ~ 0x6C: 54M.)
+ */
+ u32 NumTxUnicast; /* YJ,add,080828,for keep alive. */
+ u8 keepAliveLevel; /*YJ,add,080828,for KeepAlive. */
+ unsigned long NumTxOkTotal;
+ u16 LastRetryCnt;
+ u16 LastRetryRate;
+ unsigned long LastTxokCnt;
+ unsigned long LastRxokCnt;
+ u16 CurrRetryCnt;
+ unsigned long LastTxOKBytes;
+ unsigned long NumTxOkBytesTotal;
+ u8 LastFailTxRate;
+ long LastFailTxRateSS;
+ u8 FailTxRateCount;
+ u32 LastTxThroughput;
+ /* for up rate. */
+ unsigned short bTryuping;
+ u8 CurrTxRate; /* the rate before up. */
+ u16 CurrRetryRate;
+ u16 TryupingCount;
+ u8 TryDownCountLowData;
+ u8 TryupingCountNoData;
+
+ u8 CurrentOperaRate;
struct work_struct reset_wq;
struct work_struct watch_dog_wq;
short ack_tx_to_ieee;
u8 dma_poll_stop_mask;
- //u8 RegThreeWireMode;
u16 ShortRetryLimit;
u16 LongRetryLimit;
u16 EarlyRxThreshold;
@@ -619,8 +561,8 @@ typedef struct r8180_priv
u32 ReceiveConfig;
u32 IntrMask;
- struct ChnlAccessSetting ChannelAccessSetting;
-}r8180_priv;
+ struct chnl_access_setting ChannelAccessSetting;
+};
#define MANAGE_PRIORITY 0
#define BK_PRIORITY 1
@@ -632,14 +574,14 @@ typedef struct r8180_priv
#define LOW_PRIORITY VI_PRIORITY
#define NORM_PRIORITY VO_PRIORITY
-//AC2Queue mapping
+/* AC2Queue mapping. */
#define AC2Q(_ac) (((_ac) == WME_AC_VO) ? VO_PRIORITY : \
((_ac) == WME_AC_VI) ? VI_PRIORITY : \
((_ac) == WME_AC_BK) ? BK_PRIORITY : \
BE_PRIORITY)
short rtl8180_tx(struct net_device *dev, u8 *skbuf, int len, int priority,
- short morefrag, short fragdesc, int rate);
+ bool morefrag, short fragdesc, int rate);
u8 read_nic_byte(struct net_device *dev, int x);
u32 read_nic_dword(struct net_device *dev, int x);
@@ -673,7 +615,6 @@ void UpdateInitialGain(struct net_device *dev);
bool SetAntennaConfig87SE(struct net_device *dev, u8 DefaultAnt,
bool bAntDiversity);
-//#ifdef CONFIG_RTL8185B
void rtl8185b_adapter_start(struct net_device *dev);
void rtl8185b_rx_enable(struct net_device *dev);
void rtl8185b_tx_enable(struct net_device *dev);
@@ -682,9 +623,8 @@ void rtl8185b_irq_enable(struct net_device *dev);
void fix_rx_fifo(struct net_device *dev);
void fix_tx_fifo(struct net_device *dev);
void rtl8225z2_SetTXPowerLevel(struct net_device *dev, short ch);
-void rtl8180_rate_adapter(struct work_struct * work);
-//#endif
-bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
+void rtl8180_rate_adapter(struct work_struct *work);
+bool MgntActSet_RF_State(struct net_device *dev, enum rt_rf_power_state StateToSet,
u32 ChangeSource);
#endif
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 6cafee22bec4..a6022d4e7573 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -1,31 +1,31 @@
/*
- This is part of rtl818x pci OpenSource driver - v 0.1
- Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
- Released under the terms of GPL (General Public License)
-
- Parts of this driver are based on the GPL part of the official
- Realtek driver.
-
- Parts of this driver are based on the rtl8180 driver skeleton
- from Patric Schenke & Andres Salomon.
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
-
- Parts of BB/RF code are derived from David Young rtl8180 netbsd driver.
-
- RSSI calc function from 'The Deuce'
-
- Some ideas borrowed from the 8139too.c driver included in linux kernel.
-
- We (I?) want to thanks the Authors of those projecs and also the
- Ndiswrapper's project Authors.
-
- A big big thanks goes also to Realtek corp. for their help in my attempt to
- add RTL8185 and RTL8225 support, and to David Young also.
-
- Power management interface routines.
- Written by Mariusz Matuszek.
-*/
+ * This is part of rtl818x pci OpenSource driver - v 0.1
+ * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
+ * Released under the terms of GPL (General Public License)
+ *
+ * Parts of this driver are based on the GPL part of the official
+ * Realtek driver.
+ *
+ * Parts of this driver are based on the rtl8180 driver skeleton
+ * from Patric Schenke & Andres Salomon.
+ *
+ * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
+ *
+ * Parts of BB/RF code are derived from David Young rtl8180 netbsd driver.
+ *
+ * RSSI calc function from 'The Deuce'
+ *
+ * Some ideas borrowed from the 8139too.c driver included in linux kernel.
+ *
+ * We (I?) want to thanks the Authors of those projecs and also the
+ * Ndiswrapper's project Authors.
+ *
+ * A big big thanks goes also to Realtek corp. for their help in my attempt to
+ * add RTL8185 and RTL8225 support, and to David Young also.
+ *
+ * Power management interface routines.
+ * Written by Mariusz Matuszek.
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -258,7 +258,9 @@ static int proc_get_stats_tx(struct seq_file *m, void *v)
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
unsigned long totalOK;
- totalOK = priv->stats.txnpokint+priv->stats.txhpokint+priv->stats.txlpokint;
+ totalOK = priv->stats.txnpokint + priv->stats.txhpokint +
+ priv->stats.txlpokint;
+
seq_printf(m,
"TX OK: %lu\n"
"TX Error: %lu\n"
@@ -347,9 +349,9 @@ static void rtl8180_proc_init_one(struct net_device *dev)
}
/*
- FIXME: check if we can use some standard already-existent
- data type+functions in kernel
-*/
+ * FIXME: check if we can use some standard already-existent
+ * data type+functions in kernel.
+ */
static short buffer_add(struct buffer **buffer, u32 *buf, dma_addr_t dma,
struct buffer **bufferhead)
@@ -468,9 +470,11 @@ static short check_nic_enought_desc(struct net_device *dev, int priority)
{
struct r8180_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = netdev_priv(dev);
- int requiredbyte, required;
+ int requiredbyte;
+ int required;
- requiredbyte = priv->ieee80211->fts + sizeof(struct ieee80211_header_data);
+ requiredbyte = priv->ieee80211->fts +
+ sizeof(struct ieee80211_header_data);
if (ieee->current_network.QoS_Enable)
requiredbyte += 2;
@@ -484,7 +488,7 @@ static short check_nic_enought_desc(struct net_device *dev, int priority)
* between the tail and the head
*/
- return (required+2 < get_curr_tx_free_desc(dev, priority));
+ return required + 2 < get_curr_tx_free_desc(dev, priority);
}
void fix_tx_fifo(struct net_device *dev)
@@ -649,7 +653,7 @@ void rtl8180_set_chan(struct net_device *dev, short ch)
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
if ((ch > 14) || (ch < 1)) {
- printk("In %s: Invalid chnanel %d\n", __func__, ch);
+ netdev_err(dev, "In %s: Invalid channel %d\n", __func__, ch);
return;
}
@@ -742,43 +746,50 @@ static short alloc_tx_desc_ring(struct net_device *dev, int bufsize, int count,
switch (addr) {
case TX_MANAGEPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txmapbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txmapbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer NP");
return -ENOMEM;
}
break;
case TX_BKPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txbkpbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txbkpbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer LP");
return -ENOMEM;
}
break;
case TX_BEPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txbepbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txbepbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer NP");
return -ENOMEM;
}
break;
case TX_VIPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txvipbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txvipbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer LP");
return -ENOMEM;
}
break;
case TX_VOPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txvopbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txvopbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer NP");
return -ENOMEM;
}
break;
case TX_HIGHPRIORITY_RING_ADDR:
- if (-1 == buffer_add(&(priv->txhpbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txhpbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer HP");
return -ENOMEM;
}
break;
case TX_BEACON_RING_ADDR:
- if (-1 == buffer_add(&(priv->txbeaconbufs), buf, dma_tmp, NULL)) {
+ if (-1 == buffer_add(&priv->txbeaconbufs,
+ buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer BP");
return -ENOMEM;
}
@@ -897,8 +908,8 @@ static short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
return -1;
}
- desc = (u32 *)pci_alloc_consistent(pdev, sizeof(u32)*rx_desc_size*count+256,
- &dma_desc);
+ desc = (u32 *)pci_alloc_consistent(pdev,
+ sizeof(u32) * rx_desc_size * count + 256, &dma_desc);
if (dma_desc & 0xff)
/*
@@ -935,7 +946,8 @@ static short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
tmp = tmp+rx_desc_size;
}
- *(tmp-rx_desc_size) = *(tmp-rx_desc_size) | (1<<30); /* this is the last descriptor */
+ /* this is the last descriptor */
+ *(tmp - rx_desc_size) = *(tmp - rx_desc_size) | (1 << 30);
return 0;
}
@@ -1009,7 +1021,8 @@ inline u16 ieeerate2rtlrate(int rate)
}
}
-static u16 rtl_rate[] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540, 720};
+static u16 rtl_rate[] = {10, 20, 55, 110, 60,
+ 90, 120, 180, 240, 360, 480, 540, 720};
inline u16 rtl8180_rate2rate(short rate)
{
@@ -1143,23 +1156,30 @@ static long TranslateToDbm8185(u8 SignalStrengthIndex)
/*
* Perform signal smoothing for dynamic mechanism.
* This is different with PerformSignalSmoothing8185 in smoothing formula.
- * No dramatic adjustion is apply because dynamic mechanism need some degree
- * of correctness. Ported from 8187B.
+ * No dramatic adjustment is applied because dynamic mechanism need some
+ * degree of correctness. Ported from 8187B.
*/
static void PerformUndecoratedSignalSmoothing8185(struct r8180_priv *priv,
bool bCckRate)
{
- /* Determin the current packet is CCK rate. */
+ long smoothedSS;
+ long smoothedRx;
+
+ /* Determine the current packet is CCK rate. */
priv->bCurCCKPkt = bCckRate;
+ smoothedSS = priv->SignalStrength * 10;
+
if (priv->UndecoratedSmoothedSS >= 0)
- priv->UndecoratedSmoothedSS = ((priv->UndecoratedSmoothedSS * 5) +
- (priv->SignalStrength * 10)) / 6;
- else
- priv->UndecoratedSmoothedSS = priv->SignalStrength * 10;
+ smoothedSS = ((priv->UndecoratedSmoothedSS * 5) +
+ smoothedSS) / 6;
- priv->UndercorateSmoothedRxPower = ((priv->UndercorateSmoothedRxPower * 50) +
- (priv->RxPower * 11)) / 60;
+ priv->UndecoratedSmoothedSS = smoothedSS;
+
+ smoothedRx = ((priv->UndecoratedSmoothedRxPower * 50) +
+ (priv->RxPower * 11)) / 60;
+
+ priv->UndecoratedSmoothedRxPower = smoothedRx;
if (bCckRate)
priv->CurCCKRSSI = priv->RSSI;
@@ -1206,8 +1226,9 @@ static void rtl8180_rx(struct net_device *dev)
rx_desc_size = 8;
if ((*(priv->rxringtail)) & (1<<31)) {
- /* we have got an RX int, but the descriptor
- * we are pointing is empty */
+ /* we have got an RX int, but the descriptor. we are pointing
+ * is empty.
+ */
priv->stats.rxnodata++;
priv->ieee80211->stats.rx_errors++;
@@ -1216,7 +1237,8 @@ static void rtl8180_rx(struct net_device *dev)
tmp = priv->rxringtail;
do {
if (tmp == priv->rxring)
- tmp = priv->rxring + (priv->rxringcount - 1)*rx_desc_size;
+ tmp = priv->rxring + (priv->rxringcount - 1) *
+ rx_desc_size;
else
tmp -= rx_desc_size;
@@ -1237,7 +1259,6 @@ static void rtl8180_rx(struct net_device *dev)
if (*(priv->rxringtail) & (1<<27)) {
priv->stats.rxdmafail++;
- /* DMESG("EE: RX DMA FAILED at buffer pointed by descriptor %x",(u32)priv->rxringtail); */
goto drop;
}
@@ -1254,10 +1275,9 @@ static void rtl8180_rx(struct net_device *dev)
if (last) {
lastlen = ((*priv->rxringtail) & 0xfff);
- /* if the last descriptor (that should
- * tell us the total packet len) tell
- * us something less than the descriptors
- * len we had until now, then there is some
+ /* if the last descriptor (that should tell us the total
+ * packet len) tell us something less than the
+ * descriptors len we had until now, then there is some
* problem..
* workaround to prevent kernel panic
*/
@@ -1293,31 +1313,36 @@ static void rtl8180_rx(struct net_device *dev)
priv->rx_prevlen += len;
if (priv->rx_prevlen > MAX_FRAG_THRESHOLD + 100) {
- /* HW is probably passing several buggy frames
- * without FD or LD flag set.
- * Throw this garbage away to prevent skb
- * memory exhausting
- */
+ /* HW is probably passing several buggy frames without
+ * FD or LD flag set.
+ * Throw this garbage away to prevent skb memory
+ * exhausting
+ */
if (!priv->rx_skb_complete)
dev_kfree_skb_any(priv->rx_skb);
priv->rx_skb_complete = 1;
}
- signal = (unsigned char)(((*(priv->rxringtail+3)) & (0x00ff0000))>>16);
+ signal = (unsigned char)((*(priv->rxringtail + 3) &
+ 0x00ff0000) >> 16);
signal = (signal & 0xfe) >> 1;
quality = (unsigned char)((*(priv->rxringtail+3)) & (0xff));
stats.mac_time[0] = *(priv->rxringtail+1);
stats.mac_time[1] = *(priv->rxringtail+2);
- rxpower = ((char)(((*(priv->rxringtail+4)) & (0x00ff0000))>>16))/2 - 42;
- RSSI = ((u8)(((*(priv->rxringtail+3)) & (0x0000ff00))>>8)) & (0x7f);
+
+ rxpower = ((char)((*(priv->rxringtail + 4) &
+ 0x00ff0000) >> 16)) / 2 - 42;
+
+ RSSI = ((u8)((*(priv->rxringtail + 3) &
+ 0x0000ff00) >> 8)) & 0x7f;
rate = ((*(priv->rxringtail)) &
((1<<23)|(1<<22)|(1<<21)|(1<<20)))>>20;
stats.rate = rtl8180_rate2rate(rate);
- Antenna = (((*(priv->rxringtail+3)) & (0x00008000)) == 0) ? 0 : 1;
+ Antenna = (*(priv->rxringtail + 3) & 0x00008000) == 0 ? 0 : 1;
if (!rtl8180_IsWirelessBMode(stats.rate)) { /* OFDM rate. */
RxAGC_dBm = rxpower+1; /* bias */
} else { /* CCK rate. */
@@ -1326,7 +1351,8 @@ static void rtl8180_rx(struct net_device *dev)
LNA = (u8) (RxAGC_dBm & 0x60) >> 5; /* bit 6~ bit 5 */
BB = (u8) (RxAGC_dBm & 0x1F); /* bit 4 ~ bit 0 */
- RxAGC_dBm = -(LNA_gain[LNA] + (BB*2)); /* Pin_11b=-(LNA_gain+BB_gain) (dBm) */
+ /* Pin_11b=-(LNA_gain+BB_gain) (dBm) */
+ RxAGC_dBm = -(LNA_gain[LNA] + (BB * 2));
RxAGC_dBm += 4; /* bias */
}
@@ -1354,21 +1380,23 @@ static void rtl8180_rx(struct net_device *dev)
priv->RSSI = RSSI;
/* SQ translation formula is provided by SD3 DZ. 2006.06.27 */
if (quality >= 127)
- quality = 1; /*0; */ /* 0 will cause epc to show signal zero , walk around now; */
+ /* 0 causes epc to show signal zero, walk around now */
+ quality = 1;
else if (quality < 27)
quality = 100;
else
quality = 127 - quality;
priv->SignalQuality = quality;
- stats.signal = (u8)quality; /*priv->wstats.qual.level = priv->SignalStrength; */
+ stats.signal = (u8) quality;
+
stats.signalstrength = RXAGC;
if (stats.signalstrength > 100)
stats.signalstrength = 100;
- stats.signalstrength = (stats.signalstrength * 70)/100 + 30;
- /* printk("==========================>rx : RXAGC is %d,signalstrength is %d\n",RXAGC,stats.signalstrength); */
+ stats.signalstrength = (stats.signalstrength * 70) / 100 + 30;
stats.rssi = priv->wstats.qual.qual = priv->SignalQuality;
- stats.noise = priv->wstats.qual.noise = 100 - priv->wstats.qual.qual;
+ stats.noise = priv->wstats.qual.noise =
+ 100 - priv->wstats.qual.qual;
bHwError = (((*(priv->rxringtail)) & (0x00000fff)) == 4080) |
(((*(priv->rxringtail)) & (0x04000000)) != 0) |
(((*(priv->rxringtail)) & (0x08000000)) != 0) |
@@ -1397,27 +1425,40 @@ static void rtl8180_rx(struct net_device *dev)
/* For good-looking singal strength. */
SignalStrengthIndex = NetgearSignalStrengthTranslate(
- priv->LastSignalStrengthInPercent,
- priv->SignalStrength);
+ priv->LastSignalStrengthInPercent,
+ priv->SignalStrength);
priv->LastSignalStrengthInPercent = SignalStrengthIndex;
- priv->Stats_SignalStrength = TranslateToDbm8185((u8)SignalStrengthIndex);
- /*
- * We need more correct power of received packets and the "SignalStrength" of RxStats is beautified,
- * so we record the correct power here.
- */
- priv->Stats_SignalQuality = (long)(priv->Stats_SignalQuality * 5 + (long)priv->SignalQuality + 5) / 6;
- priv->Stats_RecvSignalPower = (long)(priv->Stats_RecvSignalPower * 5 + priv->RecvSignalPower - 1) / 6;
+ priv->Stats_SignalStrength =
+ TranslateToDbm8185((u8)SignalStrengthIndex);
+
+ /*
+ * We need more correct power of received packets and
+ * the "SignalStrength" of RxStats is beautified, so we
+ * record the correct power here.
+ */
- /* Figure out which antenna that received the last packet. */
- priv->LastRxPktAntenna = Antenna ? 1 : 0; /* 0: aux, 1: main. */
+ priv->Stats_SignalQuality = (long)(
+ priv->Stats_SignalQuality * 5 +
+ (long)priv->SignalQuality + 5) / 6;
+
+ priv->Stats_RecvSignalPower = (long)(
+ priv->Stats_RecvSignalPower * 5 +
+ priv->RecvSignalPower - 1) / 6;
+
+ /*
+ * Figure out which antenna received the last packet.
+ * 0: aux, 1: main
+ */
+ priv->LastRxPktAntenna = Antenna ? 1 : 0;
SwAntennaDiversityRxOk8185(dev, priv->SignalStrength);
}
if (first) {
if (!priv->rx_skb_complete) {
/* seems that HW sometimes fails to receive and
- doesn't provide the last descriptor */
+ * doesn't provide the last descriptor.
+ */
dev_kfree_skb_any(priv->rx_skb);
priv->stats.rxnolast++;
}
@@ -1428,15 +1469,16 @@ static void rtl8180_rx(struct net_device *dev)
priv->rx_skb_complete = 0;
priv->rx_skb->dev = dev;
} else {
- /* if we are here we should have already RXed
- * the first frame.
- * If we get here and the skb is not allocated then
- * we have just throw out garbage (skb not allocated)
- * and we are still rxing garbage....
- */
+ /* if we are here we should have already RXed the first
+ * frame.
+ * If we get here and the skb is not allocated then
+ * we have just throw out garbage (skb not allocated)
+ * and we are still rxing garbage....
+ */
if (!priv->rx_skb_complete) {
- tmp_skb = dev_alloc_skb(priv->rx_skb->len+len+2);
+ tmp_skb = dev_alloc_skb(
+ priv->rx_skb->len + len + 2);
if (!tmp_skb)
goto drop;
@@ -1454,13 +1496,8 @@ static void rtl8180_rx(struct net_device *dev)
}
if (!priv->rx_skb_complete) {
- if (padding) {
- memcpy(skb_put(priv->rx_skb, len),
- (((unsigned char *)priv->rxbuffer->buf) + 2), len);
- } else {
- memcpy(skb_put(priv->rx_skb, len),
- priv->rxbuffer->buf, len);
- }
+ memcpy(skb_put(priv->rx_skb, len), ((unsigned char *)
+ priv->rxbuffer->buf) + (padding ? 2 : 0), len);
}
if (last && !priv->rx_skb_complete) {
@@ -1538,8 +1575,8 @@ static void rtl8180_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
int mode;
- struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *) skb->data;
- short morefrag = (h->frame_control) & IEEE80211_FCTL_MOREFRAGS;
+ struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *)skb->data;
+ bool morefrag = le16_to_cpu(h->frame_control) & IEEE80211_FCTL_MOREFRAGS;
unsigned long flags;
int priority;
@@ -1547,11 +1584,10 @@ static void rtl8180_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
rate = ieeerate2rtlrate(rate);
/*
- * This function doesn't require lock because we make
- * sure it's called with the tx_lock already acquired.
- * this come from the kernel's hard_xmit callback (through
- * the ieee stack, or from the try_wake_queue (again through
- * the ieee stack.
+ * This function doesn't require lock because we make sure it's called
+ * with the tx_lock already acquired.
+ * This come from the kernel's hard_xmit callback (through the ieee
+ * stack, or from the try_wake_queue (again through the ieee stack.
*/
priority = AC2Q(skb->priority);
spin_lock_irqsave(&priv->tx_lock, flags);
@@ -1613,55 +1649,6 @@ static int rtl8180_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-/* longpre 144+48 shortpre 72+24 */
-u16 rtl8180_len2duration(u32 len, short rate, short *ext)
-{
- u16 duration;
- u16 drift;
- *ext = 0;
-
- switch (rate) {
- case 0: /* 1mbps */
- *ext = 0;
- duration = ((len+4)<<4) / 0x2;
- drift = ((len+4)<<4) % 0x2;
- if (drift == 0)
- break;
- duration++;
- break;
- case 1: /* 2mbps */
- *ext = 0;
- duration = ((len+4)<<4) / 0x4;
- drift = ((len+4)<<4) % 0x4;
- if (drift == 0)
- break;
- duration++;
- break;
- case 2: /* 5.5mbps */
- *ext = 0;
- duration = ((len+4)<<4) / 0xb;
- drift = ((len+4)<<4) % 0xb;
- if (drift == 0)
- break;
- duration++;
- break;
- default:
- case 3: /* 11mbps */
- *ext = 0;
- duration = ((len+4)<<4) / 0x16;
- drift = ((len+4)<<4) % 0x16;
- if (drift == 0)
- break;
- duration++;
- if (drift > 6)
- break;
- *ext = 1;
- break;
- }
-
- return duration;
-}
-
static void rtl8180_prepare_beacon(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
@@ -1669,7 +1656,10 @@ static void rtl8180_prepare_beacon(struct net_device *dev)
u16 word = read_nic_word(dev, BcnItv);
word &= ~BcnItv_BcnItv; /* clear Bcn_Itv */
- word |= cpu_to_le16(priv->ieee80211->current_network.beacon_interval); /* 0x64; */
+
+ /* word |= 0x64; */
+ word |= cpu_to_le16(priv->ieee80211->current_network.beacon_interval);
+
write_nic_word(dev, BcnItv, word);
skb = ieee80211_get_beacon(priv->ieee80211);
@@ -1681,12 +1671,12 @@ static void rtl8180_prepare_beacon(struct net_device *dev)
}
/*
- * This function do the real dirty work: it enqueues a TX command
- * descriptor in the ring buffer, copyes the frame in a TX buffer
- * and kicks the NIC to ensure it does the DMA transfer.
+ * This function do the real dirty work: it enqueues a TX command descriptor in
+ * the ring buffer, copyes the frame in a TX buffer and kicks the NIC to ensure
+ * it does the DMA transfer.
*/
short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
- short morefrag, short descfrag, int rate)
+ bool morefrag, short descfrag, int rate)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u32 *tail, *temp_tail;
@@ -1697,16 +1687,17 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
int buflen;
int count;
struct buffer *buflist;
- struct ieee80211_hdr_3addr *frag_hdr = (struct ieee80211_hdr_3addr *)txbuf;
+ struct ieee80211_hdr_3addr *frag_hdr =
+ (struct ieee80211_hdr_3addr *)txbuf;
u8 dest[ETH_ALEN];
- u8 bUseShortPreamble = 0;
- u8 bCTSEnable = 0;
- u8 bRTSEnable = 0;
- u16 Duration = 0;
- u16 RtsDur = 0;
- u16 ThisFrameTime = 0;
- u16 TxDescDuration = 0;
- bool ownbit_flag = false;
+ u8 bUseShortPreamble = 0;
+ u8 bCTSEnable = 0;
+ u8 bRTSEnable = 0;
+ u16 Duration = 0;
+ u16 RtsDur = 0;
+ u16 ThisFrameTime = 0;
+ u16 TxDescDuration = 0;
+ bool ownbit_flag = false;
switch (priority) {
case MANAGE_PRIORITY:
@@ -1756,74 +1747,79 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
break;
}
- memcpy(&dest, frag_hdr->addr1, ETH_ALEN);
- if (is_multicast_ether_addr(dest)) {
- Duration = 0;
- RtsDur = 0;
- bRTSEnable = 0;
+ memcpy(&dest, frag_hdr->addr1, ETH_ALEN);
+ if (is_multicast_ether_addr(dest)) {
+ Duration = 0;
+ RtsDur = 0;
+ bRTSEnable = 0;
+ bCTSEnable = 0;
+
+ ThisFrameTime = ComputeTxTime(len + sCrcLng,
+ rtl8180_rate2rate(rate), 0, bUseShortPreamble);
+ TxDescDuration = ThisFrameTime;
+ } else { /* Unicast packet */
+ u16 AckTime;
+
+ /* for Keep alive */
+ priv->NumTxUnicast++;
+
+ /* Figure out ACK rate according to BSS basic rate
+ * and Tx rate.
+ * AckCTSLng = 14 use 1M bps send
+ */
+ AckTime = ComputeTxTime(14, 10, 0, 0);
+
+ if (((len + sCrcLng) > priv->rts) && priv->rts) { /* RTS/CTS. */
+ u16 RtsTime, CtsTime;
+ bRTSEnable = 1;
bCTSEnable = 0;
- ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate),
- 0, bUseShortPreamble);
- TxDescDuration = ThisFrameTime;
- } else { /* Unicast packet */
- u16 AckTime;
-
- /* YJ,add,080828,for Keep alive */
- priv->NumTxUnicast++;
-
- /* Figure out ACK rate according to BSS basic rate
- * and Tx rate. */
- AckTime = ComputeTxTime(14, 10, 0, 0); /* AckCTSLng = 14 use 1M bps send */
-
- if (((len + sCrcLng) > priv->rts) && priv->rts) { /* RTS/CTS. */
- u16 RtsTime, CtsTime;
- /* u16 CtsRate; */
- bRTSEnable = 1;
- bCTSEnable = 0;
-
- /* Rate and time required for RTS. */
- RtsTime = ComputeTxTime(sAckCtsLng/8, priv->ieee80211->basic_rate, 0, 0);
- /* Rate and time required for CTS. */
- CtsTime = ComputeTxTime(14, 10, 0, 0); /* AckCTSLng = 14 use 1M bps send */
-
- /* Figure out time required to transmit this frame. */
- ThisFrameTime = ComputeTxTime(len + sCrcLng,
- rtl8180_rate2rate(rate),
- 0,
- bUseShortPreamble);
-
- /* RTS-CTS-ThisFrame-ACK. */
- RtsDur = CtsTime + ThisFrameTime + AckTime + 3*aSifsTime;
-
- TxDescDuration = RtsTime + RtsDur;
- } else { /* Normal case. */
- bCTSEnable = 0;
- bRTSEnable = 0;
- RtsDur = 0;
-
- ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate),
- 0, bUseShortPreamble);
- TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
- }
+ /* Rate and time required for RTS. */
+ RtsTime = ComputeTxTime(sAckCtsLng / 8,
+ priv->ieee80211->basic_rate, 0, 0);
- if (!(frag_hdr->frame_control & IEEE80211_FCTL_MOREFRAGS)) {
- /* ThisFrame-ACK. */
- Duration = aSifsTime + AckTime;
- } else { /* One or more fragments remained. */
- u16 NextFragTime;
- NextFragTime = ComputeTxTime(len + sCrcLng, /* pretend following packet length equal current packet */
- rtl8180_rate2rate(rate),
- 0,
- bUseShortPreamble);
-
- /* ThisFrag-ACk-NextFrag-ACK. */
- Duration = NextFragTime + 3*aSifsTime + 2*AckTime;
- }
+ /* Rate and time required for CTS.
+ * AckCTSLng = 14 use 1M bps send
+ */
+ CtsTime = ComputeTxTime(14, 10, 0, 0);
+
+ /* Figure out time required to transmit this frame. */
+ ThisFrameTime = ComputeTxTime(len + sCrcLng,
+ rtl8180_rate2rate(rate), 0,
+ bUseShortPreamble);
+
+ /* RTS-CTS-ThisFrame-ACK. */
+ RtsDur = CtsTime + ThisFrameTime +
+ AckTime + 3 * aSifsTime;
+
+ TxDescDuration = RtsTime + RtsDur;
+ } else { /* Normal case. */
+ bCTSEnable = 0;
+ bRTSEnable = 0;
+ RtsDur = 0;
- } /* End of Unicast packet */
+ ThisFrameTime = ComputeTxTime(len + sCrcLng,
+ rtl8180_rate2rate(rate), 0, bUseShortPreamble);
+ TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
+ }
+
+ if (!(le16_to_cpu(frag_hdr->frame_control) & IEEE80211_FCTL_MOREFRAGS)) {
+ /* ThisFrame-ACK. */
+ Duration = aSifsTime + AckTime;
+ } else { /* One or more fragments remained. */
+ u16 NextFragTime;
+
+ /* pretend following packet length = current packet */
+ NextFragTime = ComputeTxTime(len + sCrcLng,
+ rtl8180_rate2rate(rate), 0, bUseShortPreamble);
+
+ /* ThisFrag-ACk-NextFrag-ACK. */
+ Duration = NextFragTime + 3 * aSifsTime + 2 * AckTime;
+ }
+
+ } /* End of Unicast packet */
- frag_hdr->duration_id = Duration;
+ frag_hdr->duration_id = Duration;
buflen = priv->txbuffsize;
remain = len;
@@ -1832,7 +1828,8 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
while (remain != 0) {
mb();
if (!buflist) {
- DMESGE("TX buffer error, cannot TX frames. pri %d.", priority);
+ DMESGE("TX buffer error, cannot TX frames. pri %d.",
+ priority);
return -1;
}
buf = buflist->buf;
@@ -1851,43 +1848,43 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
*(tail+6) = 0;
*(tail+7) = 0;
- /* FIXME: this should be triggered by HW encryption parameters.*/
+ /* FIXME: should be triggered by HW encryption parameters.*/
*tail |= (1<<15); /* no encrypt */
if (remain == len && !descfrag) {
ownbit_flag = false;
- *tail = *tail | (1<<29); /* fist segment of the packet */
+ *tail = *tail | (1 << 29); /* first segment of packet */
*tail = *tail | (len);
} else {
ownbit_flag = true;
}
for (i = 0; i < buflen && remain > 0; i++, remain--) {
- ((u8 *)buf)[i] = txbuf[i]; /* copy data into descriptor pointed DMAble buffer */
+ /* copy data into descriptor pointed DMAble buffer */
+ ((u8 *)buf)[i] = txbuf[i];
+
if (remain == 4 && i+4 >= buflen)
break;
/* ensure the last desc has at least 4 bytes payload */
-
}
txbuf = txbuf + i;
*(tail+3) = *(tail+3) & ~0xfff;
*(tail+3) = *(tail+3) | i; /* buffer length */
- /* Use short preamble or not */
- if (priv->ieee80211->current_network.capability&WLAN_CAPABILITY_SHORT_PREAMBLE)
- if (priv->plcp_preamble_mode == 1 && rate != 0) /* short mode now, not long! */
- ; /* *tail |= (1<<16); */ /* enable short preamble mode. */
if (bCTSEnable)
*tail |= (1<<18);
if (bRTSEnable) { /* rts enable */
- *tail |= ((ieeerate2rtlrate(priv->ieee80211->basic_rate))<<19); /* RTS RATE */
+ /* RTS RATE */
+ *tail |= (ieeerate2rtlrate(
+ priv->ieee80211->basic_rate) << 19);
+
*tail |= (1<<23); /* rts enable */
*(tail+1) |= (RtsDur&0xffff); /* RTS Duration */
}
*(tail+3) |= ((TxDescDuration&0xffff)<<16); /* DURATION */
- /* *(tail+3) |= (0xe6<<16); */
- *(tail+5) |= (11<<8); /* (priv->retry_data<<8); */ /* retry lim; */
+
+ *(tail + 5) |= (11 << 8); /* retry lim; */
*tail = *tail | ((rate&0xf) << 24);
@@ -1901,7 +1898,8 @@ short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
wmb();
if (ownbit_flag)
- *tail = *tail | (1<<31); /* descriptor ready to be txed */
+ /* descriptor ready to be txed */
+ *tail |= (1 << 31);
if ((tail - begin)/8 == count-1)
tail = begin;
@@ -1983,7 +1981,8 @@ static void rtl8180_rq_tx_ack(struct net_device *dev)
struct r8180_priv *priv = ieee80211_priv(dev);
- write_nic_byte(dev, CONFIG4, read_nic_byte(dev, CONFIG4) | CONFIG4_PWRMGT);
+ write_nic_byte(dev, CONFIG4,
+ read_nic_byte(dev, CONFIG4) | CONFIG4_PWRMGT);
priv->ack_tx_to_ieee = 1;
}
@@ -2031,7 +2030,8 @@ static void rtl8180_hw_wakeup(struct net_device *dev)
struct r8180_priv *priv = ieee80211_priv(dev);
spin_lock_irqsave(&priv->ps_lock, flags);
- write_nic_byte(dev, CONFIG4, read_nic_byte(dev, CONFIG4) & ~CONFIG4_PWRMGT);
+ write_nic_byte(dev, CONFIG4,
+ read_nic_byte(dev, CONFIG4) & ~CONFIG4_PWRMGT);
if (priv->rf_wakeup)
priv->rf_wakeup(dev);
spin_unlock_irqrestore(&priv->ps_lock, flags);
@@ -2063,13 +2063,13 @@ static void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
tl -= MSECS(4+16+7);
/*
- * If the interval in witch we are requested to sleep is too
+ * If the interval in which we are requested to sleep is too
* short then give up and remain awake
*/
if (((tl >= rb) && (tl-rb) <= MSECS(MIN_SLEEP_TIME))
|| ((rb > tl) && (rb-tl) < MSECS(MIN_SLEEP_TIME))) {
spin_unlock_irqrestore(&priv->ps_lock, flags);
- printk("too short to sleep\n");
+ netdev_warn(dev, "too short to sleep\n");
return;
}
@@ -2078,7 +2078,8 @@ static void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
priv->DozePeriodInPast2Sec += jiffies_to_msecs(tmp);
/* as tl may be less than rb */
- queue_delayed_work(priv->ieee80211->wq, &priv->ieee80211->hw_wakeup_wq, tmp);
+ queue_delayed_work(priv->ieee80211->wq,
+ &priv->ieee80211->hw_wakeup_wq, tmp);
}
/*
* If we suspect the TimerInt is gone beyond tl
@@ -2095,16 +2096,49 @@ static void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
spin_unlock_irqrestore(&priv->ps_lock, flags);
}
+static void rtl8180_wmm_single_param_update(struct net_device *dev,
+ u8 mode, AC_CODING eACI, PAC_PARAM param)
+{
+ u8 u1bAIFS;
+ u32 u4bAcParam;
+
+ /* Retrieve parameters to update. */
+ /* Mode G/A: slotTimeTimer = 9; Mode B: 20 */
+ u1bAIFS = param->f.AciAifsn.f.AIFSN * ((mode & IEEE_G) == IEEE_G ?
+ 9 : 20) + aSifsTime;
+ u4bAcParam = (((u32)param->f.TXOPLimit << AC_PARAM_TXOP_LIMIT_OFFSET) |
+ ((u32)param->f.Ecw.f.ECWmax << AC_PARAM_ECW_MAX_OFFSET) |
+ ((u32)param->f.Ecw.f.ECWmin << AC_PARAM_ECW_MIN_OFFSET) |
+ ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
+
+ switch (eACI) {
+ case AC1_BK:
+ write_nic_dword(dev, AC_BK_PARAM, u4bAcParam);
+ return;
+ case AC0_BE:
+ write_nic_dword(dev, AC_BE_PARAM, u4bAcParam);
+ return;
+ case AC2_VI:
+ write_nic_dword(dev, AC_VI_PARAM, u4bAcParam);
+ return;
+ case AC3_VO:
+ write_nic_dword(dev, AC_VO_PARAM, u4bAcParam);
+ return;
+ default:
+ pr_warn("SetHwReg8185(): invalid ACI: %d!\n", eACI);
+ return;
+ }
+}
+
static void rtl8180_wmm_param_update(struct work_struct *work)
{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wmm_param_update_wq);
+ struct ieee80211_device *ieee = container_of(work,
+ struct ieee80211_device, wmm_param_update_wq);
struct net_device *dev = ieee->dev;
u8 *ac_param = (u8 *)(ieee->current_network.wmm_param);
u8 mode = ieee->current_network.mode;
- AC_CODING eACI;
- AC_PARAM AcParam;
- PAC_PARAM pAcParam;
- u8 i;
+ AC_CODING eACI;
+ AC_PARAM AcParam;
if (!ieee->current_network.QoS_Enable) {
/* legacy ac_xx_param update */
@@ -2114,83 +2148,26 @@ static void rtl8180_wmm_param_update(struct work_struct *work)
AcParam.f.Ecw.f.ECWmin = 3; /* Follow 802.11 CWmin. */
AcParam.f.Ecw.f.ECWmax = 7; /* Follow 802.11 CWmax. */
AcParam.f.TXOPLimit = 0;
+
for (eACI = 0; eACI < AC_MAX; eACI++) {
AcParam.f.AciAifsn.f.ACI = (u8)eACI;
- {
- u8 u1bAIFS;
- u32 u4bAcParam;
- pAcParam = (PAC_PARAM)(&AcParam);
- /* Retrieve parameters to update. */
- u1bAIFS = pAcParam->f.AciAifsn.f.AIFSN * (((mode&IEEE_G) == IEEE_G) ? 9 : 20) + aSifsTime;
- u4bAcParam = ((((u32)(pAcParam->f.TXOPLimit))<<AC_PARAM_TXOP_LIMIT_OFFSET)|
- (((u32)(pAcParam->f.Ecw.f.ECWmax))<<AC_PARAM_ECW_MAX_OFFSET)|
- (((u32)(pAcParam->f.Ecw.f.ECWmin))<<AC_PARAM_ECW_MIN_OFFSET)|
- (((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET));
- switch (eACI) {
- case AC1_BK:
- write_nic_dword(dev, AC_BK_PARAM, u4bAcParam);
- break;
- case AC0_BE:
- write_nic_dword(dev, AC_BE_PARAM, u4bAcParam);
- break;
- case AC2_VI:
- write_nic_dword(dev, AC_VI_PARAM, u4bAcParam);
- break;
- case AC3_VO:
- write_nic_dword(dev, AC_VO_PARAM, u4bAcParam);
- break;
- default:
- pr_warn("SetHwReg8185():invalid ACI: %d!\n",
- eACI);
- break;
- }
- }
+
+ rtl8180_wmm_single_param_update(dev, mode, eACI,
+ (PAC_PARAM)&AcParam);
}
return;
}
- for (i = 0; i < AC_MAX; i++) {
- /* AcParam.longData = 0; */
- pAcParam = (AC_PARAM *)ac_param;
- {
- AC_CODING eACI;
- u8 u1bAIFS;
- u32 u4bAcParam;
-
- /* Retrieve parameters to update. */
- eACI = pAcParam->f.AciAifsn.f.ACI;
- /* Mode G/A: slotTimeTimer = 9; Mode B: 20 */
- u1bAIFS = pAcParam->f.AciAifsn.f.AIFSN * (((mode&IEEE_G) == IEEE_G) ? 9 : 20) + aSifsTime;
- u4bAcParam = ((((u32)(pAcParam->f.TXOPLimit)) << AC_PARAM_TXOP_LIMIT_OFFSET) |
- (((u32)(pAcParam->f.Ecw.f.ECWmax)) << AC_PARAM_ECW_MAX_OFFSET) |
- (((u32)(pAcParam->f.Ecw.f.ECWmin)) << AC_PARAM_ECW_MIN_OFFSET) |
- (((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET));
-
- switch (eACI) {
- case AC1_BK:
- write_nic_dword(dev, AC_BK_PARAM, u4bAcParam);
- break;
- case AC0_BE:
- write_nic_dword(dev, AC_BE_PARAM, u4bAcParam);
- break;
- case AC2_VI:
- write_nic_dword(dev, AC_VI_PARAM, u4bAcParam);
- break;
- case AC3_VO:
- write_nic_dword(dev, AC_VO_PARAM, u4bAcParam);
- break;
- default:
- pr_warn("SetHwReg8185(): invalid ACI: %d !\n",
- eACI);
- break;
- }
- }
- ac_param += (sizeof(AC_PARAM));
+ for (eACI = 0; eACI < AC_MAX; eACI++) {
+ rtl8180_wmm_single_param_update(dev, mode,
+ ((PAC_PARAM)ac_param)->f.AciAifsn.f.ACI,
+ (PAC_PARAM)ac_param);
+
+ ac_param += sizeof(AC_PARAM);
}
}
void rtl8180_restart_wq(struct work_struct *work);
-/* void rtl8180_rq_tx_ack(struct work_struct *work); */
void rtl8180_watch_dog_wq(struct work_struct *work);
void rtl8180_hw_wakeup_wq(struct work_struct *work);
void rtl8180_hw_sleep_wq(struct work_struct *work);
@@ -2208,7 +2185,8 @@ static void watch_dog_adaptive(unsigned long data)
/* Tx High Power Mechanism. */
if (CheckHighPower((struct net_device *)data))
- queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->tx_pw_wq);
+ queue_work(priv->ieee80211->wq,
+ (void *)&priv->ieee80211->tx_pw_wq);
/* Tx Power Tracking on 87SE. */
if (CheckTxPwrTracking((struct net_device *)data))
@@ -2216,27 +2194,59 @@ static void watch_dog_adaptive(unsigned long data)
/* Perform DIG immediately. */
if (CheckDig((struct net_device *)data))
- queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->hw_dig_wq);
+ queue_work(priv->ieee80211->wq,
+ (void *)&priv->ieee80211->hw_dig_wq);
+
rtl8180_watch_dog((struct net_device *)data);
- queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->GPIOChangeRFWorkItem);
+ queue_work(priv->ieee80211->wq,
+ (void *)&priv->ieee80211->GPIOChangeRFWorkItem);
+
+ priv->watch_dog_timer.expires = jiffies +
+ MSECS(IEEE80211_WATCH_DOG_TIME);
- priv->watch_dog_timer.expires = jiffies + MSECS(IEEE80211_WATCH_DOG_TIME);
add_timer(&priv->watch_dog_timer);
}
-static CHANNEL_LIST ChannelPlan[] = {
- {{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64},19}, /* FCC */
- {{1,2,3,4,5,6,7,8,9,10,11},11}, /* IC */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* ETSI */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* Spain. Change to ETSI. */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* France. Change to ETSI. */
- {{14,36,40,44,48,52,56,60,64},9}, /* MKK */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,14, 36,40,44,48,52,56,60,64},22},/* MKK1 */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* Israel. */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,34,38,42,46},17}, /* For 11a , TELEC */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14}, /* For Global Domain. 1-11:active scan, 12-14 passive scan. //+YJ, 080626 */
- {{1,2,3,4,5,6,7,8,9,10,11,12,13},13} /* world wide 13: ch1~ch11 active scan, ch12~13 passive //lzm add 080826 */
+static struct rtl8187se_channel_list channel_plan_list[] = {
+ /* FCC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40,
+ 44, 48, 52, 56, 60, 64}, 19},
+
+ /* IC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11},
+
+ /* ETSI */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
+ 44, 48, 52, 56, 60, 64}, 21},
+
+ /* Spain. Change to ETSI. */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
+ 44, 48, 52, 56, 60, 64}, 21},
+
+ /* France. Change to ETSI. */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
+ 44, 48, 52, 56, 60, 64}, 21},
+
+ /* MKK */
+ {{14, 36, 40, 44, 48, 52, 56, 60, 64}, 9},
+
+ /* MKK1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36,
+ 40, 44, 48, 52, 56, 60, 64}, 22},
+
+ /* Israel. */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
+ 44, 48, 52, 56, 60, 64}, 21},
+
+ /* For 11a , TELEC */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 34, 38, 42, 46}, 17},
+
+ /* For Global Domain. 1-11 active, 12-14 passive. */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14},
+
+ /* world wide 13: ch1~ch11 active, ch12~13 passive */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}
};
static void rtl8180_set_channel_map(u8 channel_plan,
@@ -2244,7 +2254,6 @@ static void rtl8180_set_channel_map(u8 channel_plan,
{
int i;
- /* lzm add 080826 */
ieee->MinPassiveChnlNum = MAX_CHANNEL_NUMBER+1;
ieee->IbssStartChnl = 0;
@@ -2261,13 +2270,13 @@ static void rtl8180_set_channel_map(u8 channel_plan,
{
Dot11d_Init(ieee);
ieee->bGlobalDomain = false;
- if (ChannelPlan[channel_plan].Len != 0) {
+ if (channel_plan_list[channel_plan].len != 0) {
/* Clear old channel map */
memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
/* Set new channel map */
- for (i = 0; i < ChannelPlan[channel_plan].Len; i++) {
- if (ChannelPlan[channel_plan].Channel[i] <= 14)
- GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
+ for (i = 0; i < channel_plan_list[channel_plan].len; i++) {
+ if (channel_plan_list[channel_plan].channel[i] <= 14)
+ GET_DOT11D_INFO(ieee)->channel_map[channel_plan_list[channel_plan].channel[i]] = 1;
}
}
break;
@@ -2279,7 +2288,7 @@ static void rtl8180_set_channel_map(u8 channel_plan,
ieee->bGlobalDomain = true;
break;
}
- case COUNTRY_CODE_WORLD_WIDE_13_INDEX:/* lzm add 080826 */
+ case COUNTRY_CODE_WORLD_WIDE_13_INDEX:
{
ieee->MinPassiveChnlNum = 12;
ieee->IbssStartChnl = 10;
@@ -2299,19 +2308,17 @@ static void rtl8180_set_channel_map(u8 channel_plan,
void GPIOChangeRFWorkItemCallBack(struct work_struct *work);
-/* YJ,add,080828 */
-static void rtl8180_statistics_init(struct Stats *pstats)
+static void rtl8180_statistics_init(struct stats *pstats)
{
- memset(pstats, 0, sizeof(struct Stats));
+ memset(pstats, 0, sizeof(struct stats));
}
-static void rtl8180_link_detect_init(plink_detect_t plink_detect)
+static void rtl8180_link_detect_init(struct link_detect_t *plink_detect)
{
- memset(plink_detect, 0, sizeof(link_detect_t));
- plink_detect->SlotNum = DEFAULT_SLOT_NUM;
+ memset(plink_detect, 0, sizeof(struct link_detect_t));
+ plink_detect->slot_num = DEFAULT_SLOT_NUM;
}
-/* YJ,add,080828,end */
static void rtl8187se_eeprom_register_read(struct eeprom_93cx6 *eeprom)
{
struct net_device *dev = eeprom->data;
@@ -2360,7 +2367,7 @@ static short rtl8180_init(struct net_device *dev)
eeprom_93cx6_read(&eeprom, EEPROM_COUNTRY_CODE>>1, &eeprom_val);
priv->channel_plan = eeprom_val & 0xFF;
if (priv->channel_plan > COUNTRY_CODE_GLOBAL_DOMAIN) {
- printk("rtl8180_init:Error channel plan! Set to default.\n");
+ netdev_err(dev, "rtl8180_init: Invalid channel plan! Set to default.\n");
priv->channel_plan = 0;
}
@@ -2385,7 +2392,8 @@ static short rtl8180_init(struct net_device *dev)
rtl8180_link_detect_init(&priv->link_detect);
priv->ack_tx_to_ieee = 0;
- priv->ieee80211->current_network.beacon_interval = DEFAULT_BEACONINTERVAL;
+ priv->ieee80211->current_network.beacon_interval =
+ DEFAULT_BEACONINTERVAL;
priv->ieee80211->iw_mode = IW_MODE_INFRA;
priv->ieee80211->softmac_features = IEEE_SOFTMAC_SCAN |
IEEE_SOFTMAC_ASSOCIATE | IEEE_SOFTMAC_PROBERQ |
@@ -2410,12 +2418,12 @@ static short rtl8180_init(struct net_device *dev)
priv->bInactivePs = true; /* false; */
priv->ieee80211->bInactivePs = priv->bInactivePs;
priv->bSwRfProcessing = false;
- priv->eRFPowerState = eRfOff;
+ priv->eRFPowerState = RF_OFF;
priv->RfOffReason = 0;
- priv->LedStrategy = SW_LED_MODE0;
- priv->TxPollingTimes = 0; /* lzm add 080826 */
+ priv->led_strategy = SW_LED_MODE0;
+ priv->TxPollingTimes = 0;
priv->bLeisurePs = true;
- priv->dot11PowerSaveMode = eActive;
+ priv->dot11PowerSaveMode = ACTIVE;
priv->AdMinCheckPeriod = 5;
priv->AdMaxCheckPeriod = 10;
priv->AdMaxRxSsThreshold = 30; /* 60->30 */
@@ -2431,7 +2439,8 @@ static short rtl8180_init(struct net_device *dev)
priv->AdRxSsBeforeSwitched = 0;
init_timer(&priv->SwAntennaDiversityTimer);
priv->SwAntennaDiversityTimer.data = (unsigned long)dev;
- priv->SwAntennaDiversityTimer.function = (void *)SwAntennaDiversityTimerCallback;
+ priv->SwAntennaDiversityTimer.function =
+ (void *)SwAntennaDiversityTimerCallback;
priv->bDigMechanism = true;
priv->InitialGain = 6;
priv->bXtalCalibration = false;
@@ -2440,7 +2449,8 @@ static short rtl8180_init(struct net_device *dev)
priv->bTxPowerTrack = false;
priv->ThermalMeter = 0;
priv->FalseAlarmRegValue = 0;
- priv->RegDigOfdmFaUpTh = 0xc; /* Upper threshold of OFDM false alarm, which is used in DIG. */
+ priv->RegDigOfdmFaUpTh = 0xc; /* Upper threshold of OFDM false alarm,
+ which is used in DIG. */
priv->DIG_NumberFallbackVote = 0;
priv->DIG_NumberUpgradeVote = 0;
priv->LastSignalStrengthInPercent = 0;
@@ -2585,7 +2595,8 @@ static short rtl8180_init(struct net_device *dev)
priv->bSwAntennaDiverity = priv->EEPROMSwAntennaDiversity;
else
/* 1:disable antenna diversity, 2: enable antenna diversity. */
- priv->bSwAntennaDiverity = priv->RegSwAntennaDiversityMechanism == 2;
+ priv->bSwAntennaDiverity =
+ priv->RegSwAntennaDiversityMechanism == 2;
if (priv->RegDefaultAntenna == 0)
/* 0: default from EEPROM. */
@@ -2669,7 +2680,8 @@ static short rtl8180_init(struct net_device *dev)
TX_BEACON_RING_ADDR))
return -ENOMEM;
- if (request_irq(dev->irq, rtl8180_interrupt, IRQF_SHARED, dev->name, dev)) {
+ if (request_irq(dev->irq, rtl8180_interrupt,
+ IRQF_SHARED, dev->name, dev)) {
DMESGE("Error allocating IRQ %d", dev->irq);
return -1;
} else {
@@ -2718,8 +2730,6 @@ void rtl8180_set_hw_wep(struct net_device *dev)
void rtl8185_rf_pins_enable(struct net_device *dev)
{
- /* u16 tmp; */
- /* tmp = read_nic_word(dev, RFPinsEnable); */
write_nic_word(dev, RFPinsEnable, 0x1fff); /* | tmp); */
}
@@ -2768,16 +2778,11 @@ static void rtl8185_write_phy(struct net_device *dev, u8 adr, u32 data)
phyw = ((data<<8) | adr);
- /* Note that, we must write 0xff7c after 0x7d-0x7f to write BB register. */
+ /* Note: we must write 0xff7c after 0x7d-0x7f to write BB register. */
write_nic_byte(dev, 0x7f, ((phyw & 0xff000000) >> 24));
write_nic_byte(dev, 0x7e, ((phyw & 0x00ff0000) >> 16));
write_nic_byte(dev, 0x7d, ((phyw & 0x0000ff00) >> 8));
write_nic_byte(dev, 0x7c, ((phyw & 0x000000ff)));
-
- /* this is ok to fail when we write AGC table. check for AGC table might be
- * done by masking with 0x7f instead of 0xff
- */
- /* if (phyr != (data&0xff)) DMESGW("Phy write timeout %x %x %x", phyr, data, adr); */
}
inline void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data)
@@ -2812,9 +2817,9 @@ void rtl8180_start_tx_beacon(struct net_device *dev)
word = read_nic_word(dev, BintrItv);
word &= ~BintrItv_BintrItv;
word |= 1000; /* priv->ieee80211->current_network.beacon_interval *
- ((priv->txbeaconcount > 1)?(priv->txbeaconcount-1):1);
- // FIXME: check if correct ^^ worked with 0x3e8;
- */
+ * ((priv->txbeaconcount > 1)?(priv->txbeaconcount-1):1);
+ * FIXME: check if correct ^^ worked with 0x3e8;
+ */
write_nic_word(dev, BintrItv, word);
rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
@@ -2833,7 +2838,7 @@ static struct net_device_stats *rtl8180_stats(struct net_device *dev)
* Change current and default preamble mode.
*/
static bool MgntActSet_802_11_PowerSaveMode(struct r8180_priv *priv,
- RT_PS_MODE rtPsMode)
+ enum rt_ps_mode rtPsMode)
{
/* Currently, we do not change power save mode on IBSS mode. */
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
@@ -2846,25 +2851,26 @@ static bool MgntActSet_802_11_PowerSaveMode(struct r8180_priv *priv,
static void LeisurePSEnter(struct r8180_priv *priv)
{
- if (priv->bLeisurePs) {
+ if (priv->bLeisurePs)
if (priv->ieee80211->ps == IEEE80211_PS_DISABLED)
/* IEEE80211_PS_ENABLE */
- MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST);
- }
+ MgntActSet_802_11_PowerSaveMode(priv,
+ IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST);
}
static void LeisurePSLeave(struct r8180_priv *priv)
{
- if (priv->bLeisurePs) {
+ if (priv->bLeisurePs)
if (priv->ieee80211->ps != IEEE80211_PS_DISABLED)
- MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_DISABLED);
- }
+ MgntActSet_802_11_PowerSaveMode(
+ priv, IEEE80211_PS_DISABLED);
}
void rtl8180_hw_wakeup_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, hw_wakeup_wq);
+ struct ieee80211_device *ieee = container_of(
+ dwork, struct ieee80211_device, hw_wakeup_wq);
struct net_device *dev = ieee->dev;
rtl8180_hw_wakeup(dev);
@@ -2873,7 +2879,8 @@ void rtl8180_hw_wakeup_wq(struct work_struct *work)
void rtl8180_hw_sleep_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, hw_sleep_wq);
+ struct ieee80211_device *ieee = container_of(
+ dwork, struct ieee80211_device, hw_sleep_wq);
struct net_device *dev = ieee->dev;
rtl8180_hw_sleep_down(dev);
@@ -2890,23 +2897,30 @@ static void MgntLinkKeepAlive(struct r8180_priv *priv)
*/
if ((priv->keepAliveLevel == 2) ||
- (priv->link_detect.LastNumTxUnicast == priv->NumTxUnicast &&
- priv->link_detect.LastNumRxUnicast == priv->ieee80211->NumRxUnicast)
+ (priv->link_detect.last_num_tx_unicast ==
+ priv->NumTxUnicast &&
+ priv->link_detect.last_num_rx_unicast ==
+ priv->ieee80211->NumRxUnicast)
) {
- priv->link_detect.IdleCount++;
+ priv->link_detect.idle_count++;
/*
- * Send a Keep-Alive packet packet to AP if we had been idle for a while.
+ * Send a Keep-Alive packet packet to AP if we had
+ * been idle for a while.
*/
- if (priv->link_detect.IdleCount >= ((KEEP_ALIVE_INTERVAL / CHECK_FOR_HANG_PERIOD)-1)) {
- priv->link_detect.IdleCount = 0;
- ieee80211_sta_ps_send_null_frame(priv->ieee80211, false);
+ if (priv->link_detect.idle_count >=
+ KEEP_ALIVE_INTERVAL /
+ CHECK_FOR_HANG_PERIOD - 1) {
+ priv->link_detect.idle_count = 0;
+ ieee80211_sta_ps_send_null_frame(
+ priv->ieee80211, false);
}
} else {
- priv->link_detect.IdleCount = 0;
+ priv->link_detect.idle_count = 0;
}
- priv->link_detect.LastNumTxUnicast = priv->NumTxUnicast;
- priv->link_detect.LastNumRxUnicast = priv->ieee80211->NumRxUnicast;
+ priv->link_detect.last_num_tx_unicast = priv->NumTxUnicast;
+ priv->link_detect.last_num_rx_unicast =
+ priv->ieee80211->NumRxUnicast;
}
}
@@ -2922,36 +2936,42 @@ void rtl8180_watch_dog(struct net_device *dev)
if ((priv->ieee80211->iw_mode != IW_MODE_ADHOC) &&
(priv->ieee80211->state == IEEE80211_NOLINK) &&
(priv->ieee80211->beinretry == false) &&
- (priv->eRFPowerState == eRfOn))
+ (priv->eRFPowerState == RF_ON))
IPSEnter(dev);
}
- /* YJ,add,080828,for link state check */
- if ((priv->ieee80211->state == IEEE80211_LINKED) && (priv->ieee80211->iw_mode == IW_MODE_INFRA)) {
- SlotIndex = (priv->link_detect.SlotIndex++) % priv->link_detect.SlotNum;
- priv->link_detect.RxFrameNum[SlotIndex] = priv->ieee80211->NumRxDataInPeriod + priv->ieee80211->NumRxBcnInPeriod;
- for (i = 0; i < priv->link_detect.SlotNum; i++)
- TotalRxNum += priv->link_detect.RxFrameNum[i];
+ if ((priv->ieee80211->state == IEEE80211_LINKED) &&
+ (priv->ieee80211->iw_mode == IW_MODE_INFRA)) {
+ SlotIndex = (priv->link_detect.slot_index++) %
+ priv->link_detect.slot_num;
+
+ priv->link_detect.rx_frame_num[SlotIndex] =
+ priv->ieee80211->NumRxDataInPeriod +
+ priv->ieee80211->NumRxBcnInPeriod;
+
+ for (i = 0; i < priv->link_detect.slot_num; i++)
+ TotalRxNum += priv->link_detect.rx_frame_num[i];
if (TotalRxNum == 0) {
priv->ieee80211->state = IEEE80211_ASSOCIATING;
- queue_work(priv->ieee80211->wq, &priv->ieee80211->associate_procedure_wq);
+ queue_work(priv->ieee80211->wq,
+ &priv->ieee80211->associate_procedure_wq);
}
}
- /* YJ,add,080828,for KeepAlive */
MgntLinkKeepAlive(priv);
- /* YJ,add,080828,for LPS */
LeisurePSLeave(priv);
if (priv->ieee80211->state == IEEE80211_LINKED) {
- priv->link_detect.NumRxOkInPeriod = priv->ieee80211->NumRxDataInPeriod;
- if (priv->link_detect.NumRxOkInPeriod > 666 ||
- priv->link_detect.NumTxOkInPeriod > 666) {
+ priv->link_detect.num_rx_ok_in_period =
+ priv->ieee80211->NumRxDataInPeriod;
+ if (priv->link_detect.num_rx_ok_in_period > 666 ||
+ priv->link_detect.num_tx_ok_in_period > 666) {
bBusyTraffic = true;
}
- if (((priv->link_detect.NumRxOkInPeriod + priv->link_detect.NumTxOkInPeriod) > 8)
- || (priv->link_detect.NumRxOkInPeriod > 2)) {
+ if ((priv->link_detect.num_rx_ok_in_period +
+ priv->link_detect.num_tx_ok_in_period > 8)
+ || (priv->link_detect.num_rx_ok_in_period > 2)) {
bEnterPS = false;
} else
bEnterPS = true;
@@ -2962,9 +2982,9 @@ void rtl8180_watch_dog(struct net_device *dev)
LeisurePSLeave(priv);
} else
LeisurePSLeave(priv);
- priv->link_detect.bBusyTraffic = bBusyTraffic;
- priv->link_detect.NumRxOkInPeriod = 0;
- priv->link_detect.NumTxOkInPeriod = 0;
+ priv->link_detect.b_busy_traffic = bBusyTraffic;
+ priv->link_detect.num_rx_ok_in_period = 0;
+ priv->link_detect.num_tx_ok_in_period = 0;
priv->ieee80211->NumRxDataInPeriod = 0;
priv->ieee80211->NumRxBcnInPeriod = 0;
}
@@ -3047,15 +3067,17 @@ int rtl8180_down(struct net_device *dev)
cancel_delayed_work(&priv->ieee80211->hw_dig_wq);
cancel_delayed_work(&priv->ieee80211->tx_pw_wq);
del_timer_sync(&priv->SwAntennaDiversityTimer);
- SetZebraRFPowerState8185(dev, eRfOff);
- memset(&(priv->ieee80211->current_network), 0, sizeof(struct ieee80211_network));
+ SetZebraRFPowerState8185(dev, RF_OFF);
+ memset(&priv->ieee80211->current_network,
+ 0, sizeof(struct ieee80211_network));
priv->ieee80211->state = IEEE80211_NOLINK;
return 0;
}
void rtl8180_restart_wq(struct work_struct *work)
{
- struct r8180_priv *priv = container_of(work, struct r8180_priv, reset_wq);
+ struct r8180_priv *priv = container_of(
+ work, struct r8180_priv, reset_wq);
struct net_device *dev = priv->dev;
down(&priv->wx_sem);
@@ -3116,7 +3138,8 @@ static int r8180_set_mac_adr(struct net_device *dev, void *mac)
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
- memcpy(priv->ieee80211->current_network.bssid, dev->dev_addr, ETH_ALEN);
+ memcpy(priv->ieee80211->current_network.bssid,
+ dev->dev_addr, ETH_ALEN);
if (priv->up) {
rtl8180_down(dev);
@@ -3137,7 +3160,8 @@ static int rtl8180_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case RTL_IOCTL_WPA_SUPPLICANT:
- ret = ieee80211_wpa_supplicant_ioctl(priv->ieee80211, &wrq->u.data);
+ ret = ieee80211_wpa_supplicant_ioctl(
+ priv->ieee80211, &wrq->u.data);
return ret;
default:
return -EOPNOTSUPP;
@@ -3387,7 +3411,7 @@ static void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
int j, i;
int hd;
if (error)
- priv->stats.txretry++; /* tony 20060601 */
+ priv->stats.txretry++;
spin_lock_irqsave(&priv->tx_lock, flag);
switch (pri) {
case MANAGE_PRIORITY:
@@ -3540,7 +3564,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *netdev)
spin_lock_irqsave(&priv->irq_th_lock, flags);
/* ISR: 4bytes */
- inta = read_nic_dword(dev, ISR); /* & priv->IntrMask; */
+ inta = read_nic_dword(dev, ISR);
write_nic_dword(dev, ISR, inta); /* reset int situation */
priv->stats.shints++;
@@ -3586,7 +3610,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *netdev)
}
if (inta & ISR_THPDOK) { /* High priority tx ok */
- priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ priv->link_detect.num_tx_ok_in_period++;
priv->stats.txhpokint++;
rtl8180_tx_isr(dev, HI_PRIORITY, 0);
}
@@ -3649,14 +3673,14 @@ static irqreturn_t rtl8180_interrupt(int irq, void *netdev)
priv->stats.txoverflow++;
if (inta & ISR_TNPDOK) { /* Normal priority tx ok */
- priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ priv->link_detect.num_tx_ok_in_period++;
priv->stats.txnpokint++;
rtl8180_tx_isr(dev, NORM_PRIORITY, 0);
rtl8180_try_wake_queue(dev, NORM_PRIORITY);
}
if (inta & ISR_TLPDOK) { /* Low priority tx ok */
- priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ priv->link_detect.num_tx_ok_in_period++;
priv->stats.txlpokint++;
rtl8180_tx_isr(dev, LOW_PRIORITY, 0);
rtl8180_try_wake_queue(dev, LOW_PRIORITY);
@@ -3664,14 +3688,14 @@ static irqreturn_t rtl8180_interrupt(int irq, void *netdev)
if (inta & ISR_TBKDOK) { /* corresponding to BK_PRIORITY */
priv->stats.txbkpokint++;
- priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ priv->link_detect.num_tx_ok_in_period++;
rtl8180_tx_isr(dev, BK_PRIORITY, 0);
rtl8180_try_wake_queue(dev, BE_PRIORITY);
}
if (inta & ISR_TBEDOK) { /* corresponding to BE_PRIORITY */
priv->stats.txbeperr++;
- priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ priv->link_detect.num_tx_ok_in_period++;
rtl8180_tx_isr(dev, BE_PRIORITY, 0);
rtl8180_try_wake_queue(dev, BE_PRIORITY);
}
@@ -3688,17 +3712,19 @@ void rtl8180_irq_rx_tasklet(struct r8180_priv *priv)
void GPIOChangeRFWorkItemCallBack(struct work_struct *work)
{
- struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, GPIOChangeRFWorkItem.work);
+ struct ieee80211_device *ieee = container_of(
+ work, struct ieee80211_device, GPIOChangeRFWorkItem.work);
struct net_device *dev = ieee->dev;
struct r8180_priv *priv = ieee80211_priv(dev);
u8 btPSR;
u8 btConfig0;
- RT_RF_POWER_STATE eRfPowerStateToSet;
+ enum rt_rf_power_state eRfPowerStateToSet;
bool bActuallySet = false;
char *argv[3];
static char *RadioPowerPath = "/etc/acpi/events/RadioPower.sh";
- static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", NULL};
+ static char *envp[] = {"HOME=/", "TERM=linux",
+ "PATH=/usr/bin:/bin", NULL};
static int readf_count;
readf_count = (readf_count+1)%0xffff;
@@ -3708,24 +3734,24 @@ void GPIOChangeRFWorkItemCallBack(struct work_struct *work)
btPSR = read_nic_byte(dev, PSR);
write_nic_byte(dev, PSR, (btPSR & ~BIT3));
- /* It need to delay 4us suggested by Jong, 2008-01-16 */
+ /* It need to delay 4us suggested */
udelay(4);
/* HW radio On/Off according to the value of FF51[4](config0) */
btConfig0 = btPSR = read_nic_byte(dev, CONFIG0);
- eRfPowerStateToSet = (btConfig0 & BIT4) ? eRfOn : eRfOff;
+ eRfPowerStateToSet = (btConfig0 & BIT4) ? RF_ON : RF_OFF;
/* Turn LED back on when radio enabled */
- if (eRfPowerStateToSet == eRfOn)
+ if (eRfPowerStateToSet == RF_ON)
write_nic_byte(dev, PSR, btPSR | BIT3);
if ((priv->ieee80211->bHwRadioOff == true) &&
- (eRfPowerStateToSet == eRfOn)) {
+ (eRfPowerStateToSet == RF_ON)) {
priv->ieee80211->bHwRadioOff = false;
bActuallySet = true;
} else if ((priv->ieee80211->bHwRadioOff == false) &&
- (eRfPowerStateToSet == eRfOff)) {
+ (eRfPowerStateToSet == RF_OFF)) {
priv->ieee80211->bHwRadioOff = true;
bActuallySet = true;
}
diff --git a/drivers/staging/rtl8187se/r8180_dm.c b/drivers/staging/rtl8187se/r8180_dm.c
index 2ccd2cb70fac..8c020e064869 100644
--- a/drivers/staging/rtl8187se/r8180_dm.c
+++ b/drivers/staging/rtl8187se/r8180_dm.c
@@ -1116,14 +1116,14 @@ bool CheckTxPwrTracking(struct net_device *dev)
void SwAntennaDiversityTimerCallback(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- RT_RF_POWER_STATE rtState;
+ enum rt_rf_power_state rtState;
/* We do NOT need to switch antenna while RF is off. */
rtState = priv->eRFPowerState;
do {
- if (rtState == eRfOff) {
+ if (rtState == RF_OFF) {
break;
- } else if (rtState == eRfSleep) {
+ } else if (rtState == RF_SLEEP) {
/* Don't access BB/RF under Disable PLL situation. */
break;
}
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225.h b/drivers/staging/rtl8187se/r8180_rtl8225.h
index de084f07a071..7df73927b3cc 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225.h
+++ b/drivers/staging/rtl8187se/r8180_rtl8225.h
@@ -1,14 +1,13 @@
/*
- This is part of the rtl8180-sa2400 driver
- released under the GPL (See file COPYING for details).
- Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
-
- This files contains programming code for the rtl8225
- radio frontend.
-
- *Many* thanks to Realtek Corp. for their great support!
-
-*/
+ * This is part of the rtl8180-sa2400 driver released under the GPL (See file
+ * COPYING for details).
+ *
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * This files contains programming code for the rtl8225 radio frontend.
+ *
+ * *Many* thanks to Realtek Corp. for their great support!
+ */
#include "r8180.h"
@@ -29,7 +28,7 @@ u16 RF_ReadReg(struct net_device *dev, u8 offset);
void rtl8180_set_mode(struct net_device *dev, int mode);
void rtl8180_set_mode(struct net_device *dev, int mode);
bool SetZebraRFPowerState8185(struct net_device *dev,
- RT_RF_POWER_STATE eRFPowerState);
+ enum rt_rf_power_state eRFPowerState);
void rtl8225z4_rf_sleep(struct net_device *dev);
void rtl8225z4_rf_wakeup(struct net_device *dev);
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225z2.c b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
index 7c9a8bfe6d88..47104fa05c55 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225z2.c
+++ b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
@@ -279,8 +279,8 @@ void rtl8225z2_rf_close(struct net_device *dev)
* Map dBm into Tx power index according to current HW model, for example,
* RF and PA, and current wireless mode.
*/
-static s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode,
- s32 PowerInDbm)
+static s8 DbmToTxPwrIdx(struct r8180_priv *priv,
+ enum wireless_mode mode, s32 PowerInDbm)
{
bool bUseDefault = true;
s8 TxPwrIdx = 0;
@@ -291,7 +291,7 @@ static s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode,
*/
s32 tmp = 0;
- if (WirelessMode == WIRELESS_MODE_G) {
+ if (mode == WIRELESS_MODE_G) {
bUseDefault = false;
tmp = (2 * PowerInDbm);
@@ -301,7 +301,7 @@ static s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode,
TxPwrIdx = 40;
else
TxPwrIdx = (s8)tmp;
- } else if (WirelessMode == WIRELESS_MODE_B) {
+ } else if (mode == WIRELESS_MODE_B) {
bUseDefault = false;
tmp = (4 * PowerInDbm) - 52;
@@ -606,51 +606,12 @@ void rtl8225z2_rf_init(struct net_device *dev)
rtl8225z2_rf_set_chan(dev, priv->chan);
}
-void rtl8225z2_rf_set_mode(struct net_device *dev)
-{
- struct r8180_priv *priv = ieee80211_priv(dev);
-
- if (priv->ieee80211->mode == IEEE_A) {
- write_rtl8225(dev, 0x5, 0x1865);
- write_nic_dword(dev, RF_PARA, 0x10084);
- write_nic_dword(dev, RF_TIMING, 0xa8008);
- write_phy_ofdm(dev, 0x0, 0x0);
- write_phy_ofdm(dev, 0xa, 0x6);
- write_phy_ofdm(dev, 0xb, 0x99);
- write_phy_ofdm(dev, 0xf, 0x20);
- write_phy_ofdm(dev, 0x11, 0x7);
-
- rtl8225z2_set_gain(dev, 4);
-
- write_phy_ofdm(dev, 0x15, 0x40);
- write_phy_ofdm(dev, 0x17, 0x40);
-
- write_nic_dword(dev, 0x94, 0x10000000);
- } else {
- write_rtl8225(dev, 0x5, 0x1864);
- write_nic_dword(dev, RF_PARA, 0x10044);
- write_nic_dword(dev, RF_TIMING, 0xa8008);
- write_phy_ofdm(dev, 0x0, 0x1);
- write_phy_ofdm(dev, 0xa, 0x6);
- write_phy_ofdm(dev, 0xb, 0x99);
- write_phy_ofdm(dev, 0xf, 0x20);
- write_phy_ofdm(dev, 0x11, 0x7);
-
- rtl8225z2_set_gain(dev, 4);
-
- write_phy_ofdm(dev, 0x15, 0x40);
- write_phy_ofdm(dev, 0x17, 0x40);
-
- write_nic_dword(dev, 0x94, 0x04000002);
- }
-}
-
#define MAX_DOZE_WAITING_TIMES_85B 20
#define MAX_POLLING_24F_TIMES_87SE 10
#define LPS_MAX_SLEEP_WAITING_TIMES_87SE 5
bool SetZebraRFPowerState8185(struct net_device *dev,
- RT_RF_POWER_STATE eRFPowerState)
+ enum rt_rf_power_state eRFPowerState)
{
struct r8180_priv *priv = ieee80211_priv(dev);
u8 btCR9346, btConfig3;
@@ -672,7 +633,7 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
write_nic_byte(dev, CONFIG3, (btConfig3 | CONFIG3_PARM_En));
switch (eRFPowerState) {
- case eRfOn:
+ case RF_ON:
write_nic_word(dev, 0x37C, 0x00EC);
/* turn on AFE */
@@ -697,7 +658,7 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
u1bTmp = read_nic_byte(dev, 0x24E);
write_nic_byte(dev, 0x24E, (u1bTmp & (~(BIT5 | BIT6))));
break;
- case eRfSleep:
+ case RF_SLEEP:
for (QueueID = 0, i = 0; QueueID < 6;) {
if (get_curr_tx_free_desc(dev, QueueID) ==
priv->txringcount) {
@@ -764,7 +725,7 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
}
}
break;
- case eRfOff:
+ case RF_OFF:
for (QueueID = 0, i = 0; QueueID < 6;) {
if (get_curr_tx_free_desc(dev, QueueID) ==
priv->txringcount) {
@@ -841,10 +802,10 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
void rtl8225z4_rf_sleep(struct net_device *dev)
{
- MgntActSet_RF_State(dev, eRfSleep, RF_CHANGE_BY_PS);
+ MgntActSet_RF_State(dev, RF_SLEEP, RF_CHANGE_BY_PS);
}
void rtl8225z4_rf_wakeup(struct net_device *dev)
{
- MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_PS);
+ MgntActSet_RF_State(dev, RF_ON, RF_CHANGE_BY_PS);
}
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index 9b676e027cad..b55249170f18 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -29,7 +29,7 @@ static u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
#define RATE_COUNT ARRAY_SIZE(rtl8180_rates)
-static CHANNEL_LIST DefaultChannelPlan[] = {
+static struct rtl8187se_channel_list default_channel_plan[] = {
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64}, 19}, /* FCC */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* IC */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* ETSI */
@@ -337,7 +337,7 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
} else {
/* prevent scan in BusyTraffic */
/* FIXME: Need to consider last scan time */
- if ((priv->link_detect.bBusyTraffic) && (true)) {
+ if ((priv->link_detect.b_busy_traffic) && (true)) {
ret = 0;
printk("Now traffic is busy, please try later!\n");
} else
@@ -1030,15 +1030,15 @@ static int r8180_wx_set_channelplan(struct net_device *dev,
/* unsigned long flags; */
down(&priv->wx_sem);
- if (DefaultChannelPlan[*val].Len != 0) {
+ if (default_channel_plan[*val].len != 0) {
priv->channel_plan = *val;
/* Clear old channel map 8 */
for (i = 1; i <= MAX_CHANNEL_NUMBER; i++)
GET_DOT11D_INFO(priv->ieee80211)->channel_map[i] = 0;
/* Set new channel map */
- for (i = 1; i <= DefaultChannelPlan[*val].Len; i++)
- GET_DOT11D_INFO(priv->ieee80211)->channel_map[DefaultChannelPlan[*val].Channel[i-1]] = 1;
+ for (i = 1; i <= default_channel_plan[*val].len; i++)
+ GET_DOT11D_INFO(priv->ieee80211)->channel_map[default_channel_plan[*val].channel[i-1]] = 1;
}
up(&priv->wx_sem);
diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c
index c8b9baff1dbc..cc6f100814f3 100644
--- a/drivers/staging/rtl8187se/r8185b_init.c
+++ b/drivers/staging/rtl8187se/r8185b_init.c
@@ -619,10 +619,10 @@ void UpdateInitialGain(struct net_device *dev)
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
/* lzm add 080826 */
- if (priv->eRFPowerState != eRfOn) {
+ if (priv->eRFPowerState != RF_ON) {
/* Don't access BB/RF under disable PLL situation.
* RT_TRACE(COMP_DIG, DBG_LOUD, ("UpdateInitialGain -
- * pHalData->eRFPowerState!=eRfOn\n"));
+ * pHalData->eRFPowerState!=RF_ON\n"));
* Back to the original state
*/
priv->InitialGain = priv->InitialGainBackUp;
@@ -872,8 +872,8 @@ static u8 GetSupportedWirelessMode8185(struct net_device *dev)
static void
ActUpdateChannelAccessSetting(struct net_device *dev,
- WIRELESS_MODE WirelessMode,
- PCHANNEL_ACCESS_SETTING ChnlAccessSetting)
+ enum wireless_mode mode,
+ struct chnl_access_setting *chnl_access_setting)
{
AC_CODING eACI;
@@ -890,25 +890,25 @@ ActUpdateChannelAccessSetting(struct net_device *dev,
*/
/* Suggested by Jong, 2005.12.08. */
- ChnlAccessSetting->SIFS_Timer = 0x22;
- ChnlAccessSetting->DIFS_Timer = 0x1C; /* 2006.06.02, by rcnjko. */
- ChnlAccessSetting->SlotTimeTimer = 9; /* 2006.06.02, by rcnjko. */
+ chnl_access_setting->sifs_timer = 0x22;
+ chnl_access_setting->difs_timer = 0x1C; /* 2006.06.02, by rcnjko. */
+ chnl_access_setting->slot_time_timer = 9; /* 2006.06.02, by rcnjko. */
/*
* Suggested by wcchu, it is the default value of EIFS register,
* 2005.12.08.
*/
- ChnlAccessSetting->EIFS_Timer = 0x5B;
- ChnlAccessSetting->CWminIndex = 3; /* 2006.06.02, by rcnjko. */
- ChnlAccessSetting->CWmaxIndex = 7; /* 2006.06.02, by rcnjko. */
+ chnl_access_setting->eifs_timer = 0x5B;
+ chnl_access_setting->cwmin_index = 3; /* 2006.06.02, by rcnjko. */
+ chnl_access_setting->cwmax_index = 7; /* 2006.06.02, by rcnjko. */
- write_nic_byte(dev, SIFS, ChnlAccessSetting->SIFS_Timer);
+ write_nic_byte(dev, SIFS, chnl_access_setting->sifs_timer);
/*
* Rewrited from directly use PlatformEFIOWrite1Byte(),
* by Annie, 2006-03-29.
*/
- write_nic_byte(dev, SLOT, ChnlAccessSetting->SlotTimeTimer);
+ write_nic_byte(dev, SLOT, chnl_access_setting->slot_time_timer);
- write_nic_byte(dev, EIFS, ChnlAccessSetting->EIFS_Timer);
+ write_nic_byte(dev, EIFS, chnl_access_setting->eifs_timer);
/*
* <RJ_EXPR_QOS> Suggested by wcchu, it is the default value of EIFS
@@ -959,7 +959,7 @@ static void ActSetWirelessMode8185(struct net_device *dev, u8 btWirelessMode)
* wireless mode if we switch to specified band successfully.
*/
- ieee->mode = (WIRELESS_MODE)btWirelessMode;
+ ieee->mode = (enum wireless_mode)btWirelessMode;
/* 3. Change related setting. */
if (ieee->mode == WIRELESS_MODE_A)
@@ -1085,7 +1085,7 @@ static bool MgntDisconnect(struct net_device *dev, u8 asRsn)
* PASSIVE LEVEL.
*/
static bool SetRFPowerState(struct net_device *dev,
- RT_RF_POWER_STATE eRFPowerState)
+ enum rt_rf_power_state eRFPowerState)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bResult = false;
@@ -1098,13 +1098,13 @@ static bool SetRFPowerState(struct net_device *dev,
return bResult;
}
-bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
+bool MgntActSet_RF_State(struct net_device *dev, enum rt_rf_power_state StateToSet,
u32 ChangeSource)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
bool bActionAllowed = false;
bool bConnectBySSID = false;
- RT_RF_POWER_STATE rtState;
+ enum rt_rf_power_state rtState;
u16 RFWaitCounter = 0;
unsigned long flag;
/*
@@ -1140,7 +1140,7 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
rtState = priv->eRFPowerState;
switch (StateToSet) {
- case eRfOn:
+ case RF_ON:
/*
* Turn On RF no matter the IPS setting because we need to
* update the RF state to Ndis under Vista, or the Windows
@@ -1153,13 +1153,13 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
priv->RfOffReason = 0;
bActionAllowed = true;
- if (rtState == eRfOff &&
+ if (rtState == RF_OFF &&
ChangeSource >= RF_CHANGE_BY_HW)
bConnectBySSID = true;
}
break;
- case eRfOff:
+ case RF_OFF:
/* 070125, rcnjko: we always keep connected in AP mode. */
if (priv->RfOffReason > RF_CHANGE_BY_IPS) {
@@ -1182,7 +1182,7 @@ bool MgntActSet_RF_State(struct net_device *dev, RT_RF_POWER_STATE StateToSet,
priv->RfOffReason |= ChangeSource;
bActionAllowed = true;
break;
- case eRfSleep:
+ case RF_SLEEP:
priv->RfOffReason |= ChangeSource;
bActionAllowed = true;
break;
@@ -1233,7 +1233,7 @@ static void InactivePowerSave(struct net_device *dev)
void IPSEnter(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- RT_RF_POWER_STATE rtState;
+ enum rt_rf_power_state rtState;
if (priv->bInactivePs) {
rtState = priv->eRFPowerState;
@@ -1245,9 +1245,9 @@ void IPSEnter(struct net_device *dev)
* trigger IPS)(4) IBSS (send Beacon)
* (5) AP mode (send Beacon)
*/
- if (rtState == eRfOn && !priv->bSwRfProcessing
+ if (rtState == RF_ON && !priv->bSwRfProcessing
&& (priv->ieee80211->state != IEEE80211_LINKED)) {
- priv->eInactivePowerState = eRfOff;
+ priv->eInactivePowerState = RF_OFF;
InactivePowerSave(dev);
}
}
@@ -1255,13 +1255,13 @@ void IPSEnter(struct net_device *dev)
void IPSLeave(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- RT_RF_POWER_STATE rtState;
+ enum rt_rf_power_state rtState;
if (priv->bInactivePs) {
rtState = priv->eRFPowerState;
- if ((rtState == eRfOff || rtState == eRfSleep) &&
+ if ((rtState == RF_OFF || rtState == RF_SLEEP) &&
!priv->bSwRfProcessing
&& priv->RfOffReason <= RF_CHANGE_BY_IPS) {
- priv->eInactivePowerState = eRfOn;
+ priv->eInactivePowerState = RF_ON;
InactivePowerSave(dev);
}
}
@@ -1385,23 +1385,23 @@ void rtl8185b_adapter_start(struct net_device *dev)
/* Initialize RegWirelessMode if it is not a valid one. */
if (bInvalidWirelessMode)
- ieee->mode = (WIRELESS_MODE)InitWirelessMode;
+ ieee->mode = (enum wireless_mode)InitWirelessMode;
} else {
/* One of B, G, A. */
InitWirelessMode = ieee->mode;
}
- priv->eRFPowerState = eRfOff;
+ priv->eRFPowerState = RF_OFF;
priv->RfOffReason = 0;
{
- MgntActSet_RF_State(dev, eRfOn, 0);
+ MgntActSet_RF_State(dev, RF_ON, 0);
}
/*
* If inactive power mode is enabled, disable rf while in
* disconnected state.
*/
if (priv->bInactivePs)
- MgntActSet_RF_State(dev , eRfOff, RF_CHANGE_BY_IPS);
+ MgntActSet_RF_State(dev , RF_OFF, RF_CHANGE_BY_IPS);
ActSetWirelessMode8185(dev, (u8)(InitWirelessMode));
diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
index c9c548f17494..e45c106c2162 100644
--- a/drivers/staging/rtl8188eu/Kconfig
+++ b/drivers/staging/rtl8188eu/Kconfig
@@ -3,7 +3,6 @@ config R8188EU
depends on WLAN && USB
select WIRELESS_EXT
select WEXT_PRIV
- default N
---help---
This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
If built as a module, it will be called r8188eu.
@@ -12,7 +11,7 @@ if R8188EU
config 88EU_AP_MODE
bool "Realtek RTL8188EU AP mode"
- default Y
+ default y
---help---
This option enables Access Point mode. Unless you know that your system
will never be used as an AP, or the target system has limited memory,
@@ -20,7 +19,7 @@ config 88EU_AP_MODE
config 88EU_P2P
bool "Realtek RTL8188EU Peer-to-peer mode"
- default Y
+ default y
---help---
This option enables peer-to-peer mode for the r8188eu driver. Unless you
know that peer-to-peer (P2P) mode will never be used, or the target system has
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
index 0a617b42cc99..6a138ff699e9 100644
--- a/drivers/staging/rtl8188eu/Makefile
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -34,7 +34,6 @@ r8188eu-y := \
hal/hal_com.o \
hal/odm.o \
hal/odm_debug.o \
- hal/odm_interface.o \
hal/odm_HWConfig.o \
hal/odm_RegConfig8188E.o\
hal/odm_RTL8188E.o \
diff --git a/drivers/staging/rtl8188eu/TODO b/drivers/staging/rtl8188eu/TODO
index f7f389c40e71..b574b235b340 100644
--- a/drivers/staging/rtl8188eu/TODO
+++ b/drivers/staging/rtl8188eu/TODO
@@ -9,6 +9,11 @@ TODO:
- merge Realtek's bugfixes and new features into the driver
- switch to use LIB80211
- switch to use MAC80211
+- figure out what to do with this code in rtw_recv_indicatepkt():
+ rcu_read_lock();
+ rcu_dereference(padapter->pnetdev->rx_handler_data);
+ rcu_read_unlock();
+ Perhaps delete it, perhaps assign to some local variable.
Please send any patches to Greg Kroah-Hartman <gregkh@linux.com>,
and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 8ebe6bc40022..ff74d0d7efa3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -94,7 +94,7 @@ static void update_BCNTIM(struct adapter *padapter)
} else {
tim_ielen = 0;
- /* calucate head_len */
+ /* calculate head_len */
offset = _FIXED_IE_LENGTH_;
offset += pnetwork_mlmeext->Ssid.SsidLength + 2;
@@ -129,7 +129,7 @@ static void update_BCNTIM(struct adapter *padapter)
*dst_ie++ = tim_ielen;
*dst_ie++ = 0;/* DTIM count */
- *dst_ie++ = 1;/* DTIM peroid */
+ *dst_ie++ = 1;/* DTIM period */
if (pstapriv->tim_bitmap&BIT(0))/* for bc/mc frames */
*dst_ie++ = BIT(0);/* bitmap ctrl */
@@ -285,12 +285,12 @@ void expire_timeout_chk(struct adapter *padapter)
spin_lock_bh(&pstapriv->auth_list_lock);
phead = &pstapriv->auth_list;
- plist = get_next(phead);
+ plist = phead->next;
/* check auth_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, auth_list);
- plist = get_next(plist);
+ psta = container_of(plist, struct sta_info, auth_list);
+ plist = plist->next;
if (psta->expire_to > 0) {
psta->expire_to--;
@@ -319,12 +319,12 @@ void expire_timeout_chk(struct adapter *padapter)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* check asoc_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ psta = container_of(plist, struct sta_info, asoc_list);
+ plist = plist->next;
if (chk_sta_is_alive(psta) || !psta->expire_to) {
psta->expire_to = pstapriv->expire_to;
@@ -821,7 +821,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
/* update cur_wireless_mode */
update_wireless_mode(padapter);
- /* udpate capability after cur_wireless_mode updated */
+ /* update capability after cur_wireless_mode updated */
update_capinfo(padapter, rtw_get_capability((struct wlan_bssid_ex *)pnetwork));
/* let pnetwork_mlmeext == pnetwork_mlme. */
@@ -980,7 +980,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _SSN_IE_1_, &ie_len,
(pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if ((p) && (_rtw_memcmp(p+2, OUI1, 4))) {
+ if ((p) && (!memcmp(p+2, OUI1, 4))) {
if (rtw_parse_wpa_ie(p, ie_len+2, &group_cipher,
&pairwise_cipher, NULL) == _SUCCESS) {
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
@@ -1005,7 +1005,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len,
(pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if ((p) && _rtw_memcmp(p+2, WMM_PARA_IE, 6)) {
+ if ((p) && !memcmp(p+2, WMM_PARA_IE, 6)) {
pmlmepriv->qospriv.qos_option = 1;
*(p+8) |= BIT(7);/* QoS Info, support U-APSD */
@@ -1144,13 +1144,13 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
+ plist = phead->next;
while (!rtw_end_of_queue_search(phead, plist)) {
- paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+ plist = plist->next;
- if (_rtw_memcmp(paclnode->addr, addr, ETH_ALEN)) {
+ if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
if (paclnode->valid) {
added = true;
DBG_88E("%s, sta has been added\n", __func__);
@@ -1205,13 +1205,13 @@ int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
+ plist = phead->next;
while (!rtw_end_of_queue_search(phead, plist)) {
- paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+ plist = plist->next;
- if (_rtw_memcmp(paclnode->addr, addr, ETH_ALEN)) {
+ if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
if (paclnode->valid) {
paclnode->valid = false;
@@ -1339,8 +1339,7 @@ static void update_bcn_wps_ie(struct adapter *padapter)
pnetwork->IELength = wps_offset + (wps_ielen+2) + remainder_ielen;
}
- if (pbackup_remainder_ie)
- kfree(pbackup_remainder_ie);
+ kfree(pbackup_remainder_ie);
}
static void update_bcn_p2p_ie(struct adapter *padapter)
@@ -1351,13 +1350,13 @@ static void update_bcn_vendor_spec_ie(struct adapter *padapter, u8 *oui)
{
DBG_88E("%s\n", __func__);
- if (_rtw_memcmp(RTW_WPA_OUI, oui, 4))
+ if (!memcmp(RTW_WPA_OUI, oui, 4))
update_bcn_wpa_ie(padapter);
- else if (_rtw_memcmp(WMM_OUI, oui, 4))
+ else if (!memcmp(WMM_OUI, oui, 4))
update_bcn_wmm_ie(padapter);
- else if (_rtw_memcmp(WPS_OUI, oui, 4))
+ else if (!memcmp(WPS_OUI, oui, 4))
update_bcn_wps_ie(padapter);
- else if (_rtw_memcmp(P2P_OUI, oui, 4))
+ else if (!memcmp(P2P_OUI, oui, 4))
update_bcn_p2p_ie(padapter);
else
DBG_88E("unknown OUI type!\n");
@@ -1415,7 +1414,7 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
/*
op_mode
-Set to 0 (HT pure) under the followign conditions
+Set to 0 (HT pure) under the following conditions
- all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
- all STAs in the BSS are 20 MHz HT in 20 MHz BSS
Set to 1 (HT non-member protection) if there may be non-HT STAs
@@ -1494,7 +1493,7 @@ static int rtw_ht_operation_update(struct adapter *padapter)
void associated_clients_update(struct adapter *padapter, u8 updated)
{
- /* update associcated stations cap. */
+ /* update associated stations cap. */
if (updated) {
struct list_head *phead, *plist;
struct sta_info *psta = NULL;
@@ -1503,13 +1502,13 @@ void associated_clients_update(struct adapter *padapter, u8 updated)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* check asoc_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ plist = plist->next;
VCS_update(padapter, psta);
}
@@ -1647,7 +1646,7 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
}
- /* update associcated stations cap. */
+ /* update associated stations cap. */
associated_clients_update(padapter, beacon_updated);
DBG_88E("%s, updated =%d\n", __func__, beacon_updated);
@@ -1711,7 +1710,7 @@ u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
}
- /* update associcated stations cap. */
+ /* update associated stations cap. */
DBG_88E("%s, updated =%d\n", __func__, beacon_updated);
@@ -1777,12 +1776,12 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* for each sta in asoc_queue */
while (!rtw_end_of_queue_search(phead, plist)) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ psta = container_of(plist, struct sta_info, asoc_list);
+ plist = plist->next;
issue_action_spct_ch_switch(padapter, psta->hwaddr, new_ch, ch_offset);
psta->expire_to = ((pstapriv->expire_to * 2) > 5) ? 5 : (pstapriv->expire_to * 2);
@@ -1811,13 +1810,13 @@ int rtw_sta_flush(struct adapter *padapter)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* free sta asoc_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ plist = plist->next;
rtw_list_delete(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
@@ -1942,10 +1941,10 @@ void stop_ap_mode(struct adapter *padapter)
/* for ACL */
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
+ plist = phead->next;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+ plist = plist->next;
if (paclnode->valid) {
paclnode->valid = false;
diff --git a/drivers/staging/rtl8188eu/core/rtw_br_ext.c b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
index 75e38d4ff4c3..e843c6bd4525 100644
--- a/drivers/staging/rtl8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
@@ -409,7 +409,7 @@ static void __nat25_db_network_insert(struct adapter *priv,
db = priv->nethash[hash];
while (db != NULL) {
if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
- memcpy(db->macAddr, macAddr, ETH_ALEN);
+ ether_addr_copy(db->macAddr, macAddr);
db->ageing_timer = jiffies;
spin_unlock_bh(&priv->br_ext_lock);
return;
@@ -422,7 +422,7 @@ static void __nat25_db_network_insert(struct adapter *priv,
return;
}
memcpy(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN);
- memcpy(db->macAddr, macAddr, ETH_ALEN);
+ ether_addr_copy(db->macAddr, macAddr);
atomic_set(&db->use_count, 1);
db->ageing_timer = jiffies;
@@ -543,13 +543,14 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
if (!__nat25_db_network_lookup_and_replace(priv, skb, networkAddr)) {
if (*((unsigned char *)&iph->daddr + 3) == 0xff) {
/* L2 is unicast but L3 is broadcast, make L2 bacome broadcast */
- DEBUG_INFO("NAT25: Set DA as boardcast\n");
+ DEBUG_INFO("NAT25: Set DA as broadcast\n");
memset(skb->data, 0xff, ETH_ALEN);
} else {
- /* forward unknow IP packet to upper TCP/IP */
+ /* forward unknown IP packet to upper TCP/IP */
DEBUG_INFO("NAT25: Replace DA with BR's MAC\n");
if ((*(u32 *)priv->br_mac) == 0 && (*(u16 *)(priv->br_mac+4)) == 0) {
- printk("Re-init netdev_br_init() due to br_mac == 0!\n");
+ netdev_info(skb->dev,
+ "Re-init netdev_br_init() due to br_mac == 0!\n");
netdev_br_init(priv->pnetdev);
}
memcpy(skb->data, priv->br_mac, ETH_ALEN);
@@ -932,7 +933,7 @@ int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
(ph->code == PADO_CODE ? "PADO" : "PADS"), skb->dev->name);
} else { /* not add relay tag */
if (!priv->pppoe_connection_in_progress) {
- DEBUG_ERR("Discard PPPoE packet due to no connection in progresss!\n");
+ DEBUG_ERR("Discard PPPoE packet due to no connection in progress!\n");
return -1;
}
memcpy(skb->data, priv->pppoe_addr, ETH_ALEN);
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 82fe8c47a1de..c2fb050337d5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -32,11 +32,10 @@ Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
No irqsave is necessary.
*/
-int _rtw_init_cmd_priv (struct cmd_priv *pcmdpriv)
+int _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
int res = _SUCCESS;
-_func_enter_;
sema_init(&(pcmdpriv->cmd_queue_sema), 0);
/* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
@@ -71,7 +70,6 @@ _func_enter_;
pcmdpriv->cmd_done_cnt = 0;
pcmdpriv->rsp_cnt = 0;
exit:
-_func_exit_;
return res;
}
@@ -81,24 +79,21 @@ int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
{
int res = _SUCCESS;
-_func_enter_;
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
atomic_set(&pevtpriv->event_seq, 0);
pevtpriv->evt_done_cnt = 0;
- _init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL);
+ INIT_WORK(&pevtpriv->c2h_wk, c2h_wk_callback);
pevtpriv->c2h_wk_alive = false;
pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN+1);
-_func_exit_;
return res;
}
-void rtw_free_evt_priv(struct evt_priv *pevtpriv)
+void rtw_free_evt_priv(struct evt_priv *pevtpriv)
{
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+rtw_free_evt_priv\n"));
@@ -113,21 +108,15 @@ _func_enter_;
}
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("-rtw_free_evt_priv\n"));
-_func_exit_;
}
-void _rtw_free_cmd_priv (struct cmd_priv *pcmdpriv)
+void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
{
-_func_enter_;
if (pcmdpriv) {
- if (pcmdpriv->cmd_allocated_buf)
- kfree(pcmdpriv->cmd_allocated_buf);
-
- if (pcmdpriv->rsp_allocated_buf)
- kfree(pcmdpriv->rsp_allocated_buf);
+ kfree(pcmdpriv->cmd_allocated_buf);
+ kfree(pcmdpriv->rsp_allocated_buf);
}
-_func_exit_;
}
/*
@@ -140,11 +129,10 @@ ISR/Call-Back functions can't call this sub-function.
*/
-int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
+int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
{
unsigned long irqL;
-_func_enter_;
if (obj == NULL)
goto exit;
@@ -157,7 +145,6 @@ _func_enter_;
exit:
-_func_exit_;
return _SUCCESS;
}
@@ -167,47 +154,39 @@ struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
unsigned long irqL;
struct cmd_obj *obj;
-_func_enter_;
spin_lock_irqsave(&queue->lock, irqL);
if (rtw_is_list_empty(&(queue->queue))) {
obj = NULL;
} else {
- obj = LIST_CONTAINOR(get_next(&(queue->queue)), struct cmd_obj, list);
+ obj = container_of((&queue->queue)->next, struct cmd_obj, list);
rtw_list_delete(&obj->list);
}
spin_unlock_irqrestore(&queue->lock, irqL);
-_func_exit_;
return obj;
}
-u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
u32 res;
-_func_enter_;
- res = _rtw_init_cmd_priv (pcmdpriv);
-_func_exit_;
+ res = _rtw_init_cmd_priv(pcmdpriv);
return res;
}
-u32 rtw_init_evt_priv (struct evt_priv *pevtpriv)
+u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
{
- int res;
-_func_enter_;
+ int res;
res = _rtw_init_evt_priv(pevtpriv);
-_func_exit_;
return res;
}
-void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
+void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
{
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("rtw_free_cmd_priv\n"));
_rtw_free_cmd_priv(pcmdpriv);
-_func_exit_;
}
static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
@@ -238,7 +217,6 @@ u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
int res = _FAIL;
struct adapter *padapter = pcmdpriv->padapter;
-_func_enter_;
if (cmd_obj == NULL)
goto exit;
@@ -258,34 +236,28 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
-struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
+struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
{
struct cmd_obj *cmd_obj;
-_func_enter_;
cmd_obj = _rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
-_func_exit_;
return cmd_obj;
}
-void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv)
+void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv)
{
-_func_enter_;
pcmdpriv->cmd_done_cnt++;
/* up(&(pcmdpriv->cmd_done_sema)); */
-_func_exit_;
}
void rtw_free_cmd_obj(struct cmd_obj *pcmd)
{
-_func_enter_;
if ((pcmd->cmdcode != _JoinBss_CMD_) && (pcmd->cmdcode != _CreateBss_CMD_)) {
/* free parmbuf in cmd_obj */
@@ -302,7 +274,6 @@ _func_enter_;
/* free cmd_obj */
kfree(pcmd);
-_func_exit_;
}
int rtw_cmd_thread(void *context)
@@ -315,7 +286,6 @@ int rtw_cmd_thread(void *context)
struct adapter *padapter = (struct adapter *)context;
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
-_func_enter_;
thread_enter("RTW_CMD_THREAD");
@@ -410,7 +380,6 @@ post_process:
up(&pcmdpriv->terminate_cmdthread_sema);
-_func_exit_;
complete_and_exit(NULL, 0);
}
@@ -423,15 +392,14 @@ u8 rtw_setstandby_cmd(struct adapter *padapter, uint action)
u8 ret = _SUCCESS;
-_func_enter_;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
if (ph2c == NULL) {
ret = _FAIL;
goto exit;
}
- psetusbsuspend = (struct usb_suspend_parm *)rtw_zmalloc(sizeof(struct usb_suspend_parm));
+ psetusbsuspend = kzalloc(sizeof(struct usb_suspend_parm), GFP_KERNEL);
if (psetusbsuspend == NULL) {
kfree(ph2c);
ret = _FAIL;
@@ -446,7 +414,6 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
@@ -465,14 +432,11 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
- }
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
p2p_ps_wk_cmd(padapter, P2P_PS_SCAN, 1);
- }
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL)
@@ -500,9 +464,6 @@ _func_enter_;
if (ssid[i].SsidLength) {
memcpy(&psurveyPara->ssid[i], &ssid[i], sizeof(struct ndis_802_11_ssid));
psurveyPara->ssid_num++;
- if (0)
- DBG_88E(FUNC_ADPT_FMT" ssid:(%s, %d)\n", FUNC_ADPT_ARG(padapter),
- psurveyPara->ssid[i].Ssid, psurveyPara->ssid[i].SsidLength);
}
}
}
@@ -514,9 +475,6 @@ _func_enter_;
if (ch[i].hw_value && !(ch[i].flags & RTW_IEEE80211_CHAN_DISABLED)) {
memcpy(&psurveyPara->ch[i], &ch[i], sizeof(struct rtw_ieee80211_channel));
psurveyPara->ch_num++;
- if (0)
- DBG_88E(FUNC_ADPT_FMT" ch:%u\n", FUNC_ADPT_ARG(padapter),
- psurveyPara->ch[i].hw_value);
}
}
}
@@ -537,7 +495,6 @@ _func_enter_;
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
}
-_func_exit_;
return res;
}
@@ -549,7 +506,6 @@ u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -570,7 +526,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -582,7 +537,6 @@ u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -604,7 +558,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -624,7 +577,6 @@ u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -648,7 +600,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -659,7 +610,6 @@ u8 rtw_setbbreg_cmd(struct adapter *padapter, u8 offset, u8 val)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
@@ -680,7 +630,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -691,7 +640,6 @@ u8 rtw_getbbreg_cmd(struct adapter *padapter, u8 offset, u8 *pval)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
@@ -715,7 +663,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -725,7 +672,6 @@ u8 rtw_setrfreg_cmd(struct adapter *padapter, u8 offset, u32 val)
struct writeRF_parm *pwriterfparm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
@@ -746,7 +692,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -757,7 +702,6 @@ u8 rtw_getrfreg_cmd(struct adapter *padapter, u8 offset, u8 *pval)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -785,33 +729,28 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
- _func_enter_;
kfree(pcmd->parmbuf);
kfree(pcmd);
if (padapter->registrypriv.mp_mode == 1)
padapter->mppriv.workparam.bcompleted = true;
-_func_exit_;
}
void rtw_readtssi_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
- _func_enter_;
kfree(pcmd->parmbuf);
kfree(pcmd);
if (padapter->registrypriv.mp_mode == 1)
padapter->mppriv.workparam.bcompleted = true;
-_func_exit_;
}
u8 rtw_createbss_cmd(struct adapter *padapter)
@@ -822,7 +761,6 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network;
u8 res = _SUCCESS;
-_func_enter_;
rtw_led_control(padapter, LED_CTL_START_TO_LINK);
@@ -847,7 +785,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
exit:
-_func_exit_;
return res;
}
@@ -858,7 +795,6 @@ u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss, unsigned
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
@@ -877,7 +813,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -898,15 +833,13 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
-_func_enter_;
rtw_led_control(padapter, LED_CTL_START_TO_LINK);
- if (pmlmepriv->assoc_ssid.SsidLength == 0) {
+ if (pmlmepriv->assoc_ssid.SsidLength == 0)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
- } else {
+ else
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
- }
pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
@@ -952,11 +885,10 @@ _func_enter_;
psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->IELength;
- if ((psecnetwork->IELength-12) < (256-1)) {
+ if ((psecnetwork->IELength-12) < (256-1))
memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], psecnetwork->IELength-12);
- } else {
+ else
memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], (256-1));
- }
psecnetwork->IELength = 0;
/* Added by Albert 2009/02/18 */
@@ -987,9 +919,12 @@ _func_enter_;
phtpriv->ht_option = false;
if (pregistrypriv->ht_enable) {
- /* Added by Albert 2010/06/23 */
- /* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
- /* Especially for Realtek 8192u SoftAP. */
+ /*
+ * Added by Albert 2010/06/23
+ * For the WEP mode, we will use the bg mode to do
+ * the connection to avoid some IOT issue.
+ * Especially for Realtek 8192u SoftAP.
+ */
if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
@@ -1020,7 +955,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1032,7 +966,6 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
struct cmd_priv *cmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_disassoc_cmd\n"));
@@ -1063,7 +996,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1076,7 +1008,6 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra n
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1098,7 +1029,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1115,7 +1045,6 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key)
struct sta_info *sta = (struct sta_info *)psta;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1142,7 +1071,7 @@ _func_enter_;
ph2c->rsp = (u8 *)psetstakey_rsp;
ph2c->rspsz = sizeof(struct set_stakey_rsp);
- memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
+ ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
psetstakey_para->algorithm = (unsigned char) psecuritypriv->dot11PrivacyAlgrthm;
@@ -1161,7 +1090,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1175,7 +1103,6 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
struct sta_info *sta = (struct sta_info *)psta;
u8 res = _SUCCESS;
-_func_enter_;
if (!enqueue) {
clear_cam_entry(padapter, entry);
@@ -1205,7 +1132,7 @@ _func_enter_;
ph2c->rsp = (u8 *)psetstakey_rsp;
ph2c->rspsz = sizeof(struct set_stakey_rsp);
- memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
+ ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
psetstakey_para->algorithm = _NO_PRIVACY_;
@@ -1215,7 +1142,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return res;
}
@@ -1226,7 +1152,6 @@ u8 rtw_setrttbl_cmd(struct adapter *padapter, struct setratable_parm *prate_tab
struct setratable_parm *psetrttblparm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1247,7 +1172,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -1257,7 +1181,6 @@ u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval)
struct getratable_parm *pgetrttblparm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1272,8 +1195,6 @@ _func_enter_;
goto exit;
}
-/* init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm, GEN_CMD_CODE(_SetRaTable)); */
-
_rtw_init_listhead(&ph2c->list);
ph2c->cmdcode = GEN_CMD_CODE(_GetRaTable);
ph2c->parmbuf = (unsigned char *)pgetrttblparm;
@@ -1285,7 +1206,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -1298,7 +1218,6 @@ u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr)
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1324,16 +1243,15 @@ _func_enter_;
ph2c->rsp = (u8 *)psetassocsta_rsp;
ph2c->rspsz = sizeof(struct set_assocsta_rsp);
- memcpy(psetassocsta_para->addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(psetassocsta_para->addr, mac_addr);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
- }
+}
u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
{
@@ -1342,7 +1260,6 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
struct addBaReq_parm *paddbareq_parm;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1369,7 +1286,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1381,7 +1297,6 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1406,7 +1321,6 @@ _func_enter_;
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -1418,7 +1332,6 @@ u8 rtw_set_ch_cmd(struct adapter *padapter, u8 ch, u8 bw, u8 ch_offset, u8 enque
u8 res = _SUCCESS;
-_func_enter_;
DBG_88E(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n",
FUNC_NDEV_ARG(padapter->pnetdev), ch, bw, ch_offset);
@@ -1460,7 +1373,6 @@ exit:
DBG_88E(FUNC_NDEV_FMT" res:%u\n", FUNC_NDEV_ARG(padapter->pnetdev), res);
-_func_exit_;
return res;
}
@@ -1473,7 +1385,6 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
u8 res = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_set_chplan_cmd\n"));
@@ -1516,7 +1427,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1529,7 +1439,6 @@ u8 rtw_led_blink_cmd(struct adapter *padapter, struct LED_871x *pLed)
u8 res = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_led_blink_cmd\n"));
@@ -1553,7 +1462,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1566,7 +1474,6 @@ u8 rtw_set_csa_cmd(struct adapter *padapter, u8 new_ch_no)
u8 res = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_set_csa_cmd\n"));
@@ -1590,7 +1497,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1685,7 +1591,6 @@ static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
u8 mstatus;
-_func_enter_;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
@@ -1724,7 +1629,6 @@ _func_enter_;
break;
}
-_func_exit_;
}
u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
@@ -1735,11 +1639,6 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
/* struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; */
u8 res = _SUCCESS;
-_func_enter_;
-
- /* if (!pwrctrlpriv->bLeisurePs) */
- /* return res; */
-
if (enqueue) {
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -1767,7 +1666,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1785,7 +1683,6 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
u8 res = _SUCCESS;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
@@ -1806,7 +1703,6 @@ _func_enter_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-_func_exit_;
return res;
}
@@ -1824,7 +1720,6 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
u8 support_ant_div;
u8 res = _SUCCESS;
-_func_enter_;
rtw_hal_get_def_var(padapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &support_ant_div);
if (!support_ant_div)
return res;
@@ -1854,7 +1749,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return res;
}
@@ -1873,7 +1767,6 @@ u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return res;
@@ -1892,8 +1785,8 @@ _func_enter_;
}
pdrvextra_cmd_parm->ec_id = P2P_PROTO_WK_CID;
- pdrvextra_cmd_parm->type_size = intCmdType; /* As the command tppe. */
- pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
+ pdrvextra_cmd_parm->type_size = intCmdType; /* As the command tppe. */
+ pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
@@ -1901,7 +1794,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1914,7 +1806,6 @@ u8 rtw_ps_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
ppscmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ppscmd == NULL) {
@@ -1937,7 +1828,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -2084,12 +1974,16 @@ static void c2h_wk_callback(struct work_struct *work)
evtpriv->c2h_wk_alive = true;
while (!rtw_cbuf_empty(evtpriv->c2h_queue)) {
- if ((c2h_evt = (struct c2h_evt_hdr *)rtw_cbuf_pop(evtpriv->c2h_queue)) != NULL) {
+ c2h_evt = (struct c2h_evt_hdr *)
+ rtw_cbuf_pop(evtpriv->c2h_queue);
+ if (c2h_evt != NULL)
/* This C2H event is read, clear it */
c2h_evt_clear(adapter);
- } else if ((c2h_evt = (struct c2h_evt_hdr *)rtw_malloc(16)) != NULL) {
+ else {
+ c2h_evt = (struct c2h_evt_hdr *)rtw_malloc(16);
/* This C2H event is not read, read & clear now */
- if (c2h_evt_read(adapter, (u8 *)c2h_evt) != _SUCCESS)
+ if (c2h_evt != NULL &&
+ c2h_evt_read(adapter, (u8 *)c2h_evt) != _SUCCESS)
continue;
}
@@ -2147,8 +2041,10 @@ u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
p2p_ps_wk_hdl(padapter, pdrvextra_cmd->type_size);
break;
case P2P_PROTO_WK_CID:
- /* Commented by Albert 2011/07/01 */
- /* I used the type_size as the type command */
+ /*
+ * Commented by Albert 2011/07/01
+ * I used the type_size as the type command
+ */
p2p_protocol_wk_hdl(padapter, pdrvextra_cmd->type_size);
break;
#endif
@@ -2174,13 +2070,12 @@ void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
if (pcmd->res == H2C_DROPPED) {
/* TODO: cancel timer and do timeout handler directly... */
/* need to make timeout handlerOS independent */
_set_timer(&pmlmepriv->scan_to_timer, 1);
- } else if (pcmd->res != H2C_SUCCESS) {
+ } else if (pcmd->res != H2C_SUCCESS) {
_set_timer(&pmlmepriv->scan_to_timer, 1);
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: MgntActrtw_set_802_11_bssid_LIST_SCAN Fail ************\n\n."));
}
@@ -2188,13 +2083,11 @@ _func_enter_;
/* free cmd */
rtw_free_cmd_obj(pcmd);
-_func_exit_;
}
void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
if (pcmd->res != H2C_SUCCESS) {
spin_lock_bh(&pmlmepriv->lock);
@@ -2202,24 +2095,18 @@ _func_enter_;
spin_unlock_bh(&pmlmepriv->lock);
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ***Error: disconnect_cmd_callback Fail ***\n."));
-
- goto exit;
+ return;
} else /* clear bridge database */
nat25_db_cleanup(padapter);
/* free cmd */
rtw_free_cmd_obj(pcmd);
-
-exit:
-
-_func_exit_;
}
void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
if (pcmd->res == H2C_DROPPED) {
/* TODO: cancel timer and do timeout handler directly... */
@@ -2232,7 +2119,6 @@ _func_enter_;
rtw_free_cmd_obj(pcmd);
-_func_exit_;
}
void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
@@ -2244,9 +2130,8 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
-_func_enter_;
- if ((pcmd->res != H2C_SUCCESS)) {
+ if (pcmd->res != H2C_SUCCESS) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: rtw_createbss_cmd_callback Fail ************\n\n."));
_set_timer(&pmlmepriv->assoc_timer, 1);
}
@@ -2261,7 +2146,7 @@ _func_enter_;
psta = rtw_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress);
if (psta == NULL) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nCan't alloc sta_info when createbss_cmd_callback\n"));
- goto createbss_cmd_fail ;
+ goto createbss_cmd_fail;
}
}
@@ -2298,7 +2183,6 @@ createbss_cmd_fail:
rtw_free_cmd_obj(pcmd);
-_func_exit_;
}
void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
@@ -2307,7 +2191,6 @@ void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pc
struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)(pcmd->rsp);
struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
-_func_enter_;
if (psta == NULL) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: rtw_setstaKey_cmdrsp_callback => can't get sta_info\n\n"));
@@ -2315,7 +2198,6 @@ _func_enter_;
}
exit:
rtw_free_cmd_obj(pcmd);
-_func_exit_;
}
void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
@@ -2326,7 +2208,6 @@ void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *
struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *)(pcmd->rsp);
struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
-_func_enter_;
if (psta == NULL) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: setassocsta_cmdrsp_callbac => can't get sta_info\n\n"));
@@ -2347,5 +2228,4 @@ _func_enter_;
exit:
rtw_free_cmd_obj(pcmd);
-_func_exit_;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index af32041a1e97..2beb2695e0f2 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -233,7 +233,7 @@ int proc_get_rf_info(char *page, char **start,
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
int len = 0;
- len += snprintf(page + len, count - len, "cur_ch=%d, cur_bw=%d, cur_ch_offet=%d\n",
+ len += snprintf(page + len, count - len, "cur_ch=%d, cur_bw=%d, cur_ch_offset=%d\n",
pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
*eof = 1;
return len;
@@ -783,7 +783,7 @@ int proc_set_rx_stbc(struct file *file, const char __user *buffer,
if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
if (pregpriv) {
pregpriv->rx_stbc = mode;
- printk("rx_stbc=%d\n", mode);
+ netdev_info(dev, "rx_stbc=%d\n", mode);
}
}
return count;
@@ -820,7 +820,7 @@ int proc_set_rssi_disp(struct file *file, const char __user *buffer,
if (enable) {
DBG_88E("Turn On Rx RSSI Display Function\n");
- padapter->bRxRSSIDisplay = enable ;
+ padapter->bRxRSSIDisplay = enable;
} else {
DBG_88E("Turn Off Rx RSSI Display Function\n");
padapter->bRxRSSIDisplay = 0;
@@ -851,12 +851,12 @@ int proc_get_all_sta_info(char *page, char **start,
for (i = 0; i < NUM_STA; i++) {
phead = &(pstapriv->sta_hash[i]);
- plist = get_next(phead);
+ plist = phead->next;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+ psta = container_of(plist, struct sta_info, hash_list);
- plist = get_next(plist);
+ plist = plist->next;
len += snprintf(page + len, count - len, "sta's macaddr: %pM\n", psta->hwaddr);
len += snprintf(page + len, count - len, "rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self);
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 6149e3aaa011..40afe48a12ef 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -76,11 +76,10 @@ Efuse_Write1ByteToFakeContent(
{
if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
- if (fakeEfuseBank == 0) {
+ if (fakeEfuseBank == 0)
fakeEfuseContent[Offset] = Value;
- } else {
+ else
fakeBTEfuseContent[fakeEfuseBank-1][Offset] = Value;
- }
return true;
}
@@ -156,17 +155,15 @@ Efuse_CalculateWordCnts(u8 word_en)
return word_cnts;
}
-/* */
-/* Description: */
-/* Execute E-Fuse read byte operation. */
-/* Referred from SD1 Richard. */
-/* */
-/* Assumption: */
-/* 1. Boot from E-Fuse and successfully auto-load. */
-/* 2. PASSIVE_LEVEL (USB interface) */
-/* */
-/* Created by Roger, 2008.10.21. */
-/* */
+/*
+ * Description:
+ * Execute E-Fuse read byte operation.
+ * Referred from SD1 Richard.
+ * Assumption:
+ * 1. Boot from E-Fuse and successfully auto-load.
+ * 2. PASSIVE_LEVEL (USB interface)
+ * Created by Roger, 2008.10.21.
+ */
void
ReadEFuseByte(
struct adapter *Adapter,
@@ -210,23 +207,21 @@ ReadEFuseByte(
*pbuf = (u8)(value32 & 0xff);
}
-/* */
-/* Description: */
-/* 1. Execute E-Fuse read byte operation according as map offset and */
-/* save to E-Fuse table. */
-/* 2. Referred from SD1 Richard. */
-/* */
-/* Assumption: */
-/* 1. Boot from E-Fuse and successfully auto-load. */
-/* 2. PASSIVE_LEVEL (USB interface) */
-/* */
-/* Created by Roger, 2008.10.21. */
-/* */
-/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
-/* 2. Add efuse utilization collect. */
-/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
-/* write addr must be after sec5. */
-/* */
+/* Description:
+ * 1. Execute E-Fuse read byte operation according as map offset and
+ * save to E-Fuse table.
+ * 2. Referred from SD1 Richard.
+ * Assumption:
+ * 1. Boot from E-Fuse and successfully auto-load.
+ * 2. PASSIVE_LEVEL (USB interface)
+ * Created by Roger, 2008.10.21.
+ * 2008/12/12 MH
+ * 1. Reorganize code flow and reserve bytes. and add description.
+ * 2. Add efuse utilization collect.
+ * 2008/12/22 MH
+ * Read Efuse must check if we write section 1 data again!!!
+ * Sec1 write addr must be after sec5.
+ */
static void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool pseudo)
{
@@ -450,7 +445,7 @@ u8 rtw_efuse_access(struct adapter *padapter, u8 write, u16 start_addr, u16 cnts
{
int i = 0;
u16 real_content_len = 0, max_available_size = 0;
- u8 res = _FAIL ;
+ u8 res = _FAIL;
u8 (*rw8)(struct adapter *, u16, u8*);
EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&real_content_len, false);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index e6f98fb8f113..0552019d1cf7 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -147,7 +147,6 @@ u8 *rtw_set_ie
uint *frlen /* frame length */
)
{
-_func_enter_;
*pbuf = (u8)index;
*(pbuf + 1) = (u8)len;
@@ -157,7 +156,6 @@ _func_enter_;
*frlen = *frlen + (len + 2);
-_func_exit_;
return pbuf + len + 2;
}
@@ -221,11 +219,8 @@ u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit)
{
int tmp, i;
u8 *p;
-_func_enter_;
- if (limit < 1) {
- _func_exit_;
+ if (limit < 1)
return NULL;
- }
p = pbuf;
i = 0;
@@ -242,7 +237,6 @@ _func_enter_;
if (i >= limit)
break;
}
-_func_exit_;
return NULL;
}
@@ -273,7 +267,7 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u
cnt = 0;
while (cnt < in_len) {
- if (eid == in_ie[cnt] && (!oui || _rtw_memcmp(&in_ie[cnt+2], oui, oui_len))) {
+ if (eid == in_ie[cnt] && (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) {
target_ie = &in_ie[cnt];
if (ie)
@@ -339,7 +333,6 @@ exit:
void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
{
-_func_enter_;
_rtw_memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
@@ -361,13 +354,11 @@ _func_enter_;
memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
break;
}
-_func_exit_;
}
uint rtw_get_rateset_len(u8 *rateset)
{
uint i = 0;
-_func_enter_;
while (1) {
if ((rateset[i]) == 0)
break;
@@ -375,7 +366,6 @@ _func_enter_;
break;
i++;
}
-_func_exit_;
return i;
}
@@ -386,7 +376,6 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
u8 *ie = pdev_network->IEs;
-_func_enter_;
/* timestamp will be inserted by hardware */
sz += 8;
@@ -444,7 +433,6 @@ _func_enter_;
if (rateLen > 8)
ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz);
-_func_exit_;
return sz;
}
@@ -463,7 +451,7 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
if (pbuf) {
/* check if oui matches... */
- if (_rtw_memcmp((pbuf + 2), wpa_oui_type, sizeof (wpa_oui_type)) == false)
+ if (!memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type)) == false)
goto check_next_ie;
/* check version... */
@@ -497,15 +485,15 @@ unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit)
int rtw_get_wpa_cipher_suite(u8 *s)
{
- if (_rtw_memcmp(s, WPA_CIPHER_SUITE_NONE, WPA_SELECTOR_LEN) == true)
+ if (!memcmp(s, WPA_CIPHER_SUITE_NONE, WPA_SELECTOR_LEN))
return WPA_CIPHER_NONE;
- if (_rtw_memcmp(s, WPA_CIPHER_SUITE_WEP40, WPA_SELECTOR_LEN) == true)
+ if (!memcmp(s, WPA_CIPHER_SUITE_WEP40, WPA_SELECTOR_LEN))
return WPA_CIPHER_WEP40;
- if (_rtw_memcmp(s, WPA_CIPHER_SUITE_TKIP, WPA_SELECTOR_LEN) == true)
+ if (!memcmp(s, WPA_CIPHER_SUITE_TKIP, WPA_SELECTOR_LEN))
return WPA_CIPHER_TKIP;
- if (_rtw_memcmp(s, WPA_CIPHER_SUITE_CCMP, WPA_SELECTOR_LEN) == true)
+ if (!memcmp(s, WPA_CIPHER_SUITE_CCMP, WPA_SELECTOR_LEN))
return WPA_CIPHER_CCMP;
- if (_rtw_memcmp(s, WPA_CIPHER_SUITE_WEP104, WPA_SELECTOR_LEN) == true)
+ if (!memcmp(s, WPA_CIPHER_SUITE_WEP104, WPA_SELECTOR_LEN))
return WPA_CIPHER_WEP104;
return 0;
@@ -513,15 +501,15 @@ int rtw_get_wpa_cipher_suite(u8 *s)
int rtw_get_wpa2_cipher_suite(u8 *s)
{
- if (_rtw_memcmp(s, RSN_CIPHER_SUITE_NONE, RSN_SELECTOR_LEN) == true)
+ if (!memcmp(s, RSN_CIPHER_SUITE_NONE, RSN_SELECTOR_LEN))
return WPA_CIPHER_NONE;
- if (_rtw_memcmp(s, RSN_CIPHER_SUITE_WEP40, RSN_SELECTOR_LEN) == true)
+ if (!memcmp(s, RSN_CIPHER_SUITE_WEP40, RSN_SELECTOR_LEN))
return WPA_CIPHER_WEP40;
- if (_rtw_memcmp(s, RSN_CIPHER_SUITE_TKIP, RSN_SELECTOR_LEN) == true)
+ if (!memcmp(s, RSN_CIPHER_SUITE_TKIP, RSN_SELECTOR_LEN))
return WPA_CIPHER_TKIP;
- if (_rtw_memcmp(s, RSN_CIPHER_SUITE_CCMP, RSN_SELECTOR_LEN) == true)
+ if (!memcmp(s, RSN_CIPHER_SUITE_CCMP, RSN_SELECTOR_LEN))
return WPA_CIPHER_CCMP;
- if (_rtw_memcmp(s, RSN_CIPHER_SUITE_WEP104, RSN_SELECTOR_LEN) == true)
+ if (!memcmp(s, RSN_CIPHER_SUITE_WEP104, RSN_SELECTOR_LEN))
return WPA_CIPHER_WEP104;
return 0;
@@ -542,7 +530,7 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie+1) != (u8)(wpa_ie_len - 2)) ||
- (_rtw_memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN) != true))
+ (memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN)))
return _FAIL;
pos = wpa_ie;
@@ -587,7 +575,7 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
if (is_8021x) {
if (left >= 6) {
pos += 2;
- if (_rtw_memcmp(pos, SUITE_1X, 4) == 1) {
+ if (!memcmp(pos, SUITE_1X, 4)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s : there has 802.1x auth\n", __func__));
*is_8021x = 1;
}
@@ -657,7 +645,7 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
if (is_8021x) {
if (left >= 6) {
pos += 2;
- if (_rtw_memcmp(pos, SUITE_1X, 4) == 1) {
+ if (!memcmp(pos, SUITE_1X, 4)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s (): there has 802.1x auth\n", __func__));
*is_8021x = 1;
}
@@ -672,7 +660,6 @@ int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie,
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
uint cnt;
-_func_enter_;
/* Search required WPA or WPA2 IE and copy to sec_ie[] */
@@ -683,7 +670,7 @@ _func_enter_;
while (cnt < in_len) {
authmode = in_ie[cnt];
- if ((authmode == _WPA_IE_ID_) && (_rtw_memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) {
+ if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
("\n rtw_get_wpa_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n",
sec_idx, in_ie[cnt+1]+2));
@@ -726,7 +713,6 @@ _func_enter_;
}
}
-_func_exit_;
return *rsn_len + *wpa_len;
}
@@ -741,7 +727,7 @@ u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
eid = ie_ptr[0];
- if ((eid == _WPA_IE_ID_) && (_rtw_memcmp(&ie_ptr[2], wps_oui, 4))) {
+ if ((eid == _WPA_IE_ID_) && (!memcmp(&ie_ptr[2], wps_oui, 4))) {
*wps_ielen = ie_ptr[1]+2;
match = true;
}
@@ -774,7 +760,7 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
while (cnt < in_len) {
eid = in_ie[cnt];
- if ((eid == _WPA_IE_ID_) && (_rtw_memcmp(&in_ie[cnt+2], wps_oui, 4))) {
+ if ((eid == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], wps_oui, 4))) {
wpsie_ptr = &in_ie[cnt];
if (wps_ie)
@@ -813,7 +799,7 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_at
*len_attr = 0;
if ((wps_ie[0] != _VENDOR_SPECIFIC_IE_) ||
- (_rtw_memcmp(wps_ie + 2, wps_oui , 4) != true))
+ (memcmp(wps_ie + 2, wps_oui , 4)))
return attr_ptr;
/* 6 = 1(Element ID) + 1(Length) + 4(WPS OUI) */
@@ -1223,7 +1209,7 @@ u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen)
dump_stack();
return NULL;
}
- if ((eid == _VENDOR_SPECIFIC_IE_) && (_rtw_memcmp(&in_ie[cnt+2], p2p_oui, 4) == true)) {
+ if ((eid == _VENDOR_SPECIFIC_IE_) && (!memcmp(&in_ie[cnt+2], p2p_oui, 4))) {
p2p_ie_ptr = in_ie + cnt;
if (p2p_ie != NULL)
@@ -1258,7 +1244,7 @@ u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id , u8 *buf_att
*len_attr = 0;
if (!p2p_ie || (p2p_ie[0] != _VENDOR_SPECIFIC_IE_) ||
- (_rtw_memcmp(p2p_ie + 2, p2p_oui , 4) != true))
+ (memcmp(p2p_ie + 2, p2p_oui , 4)))
return attr_ptr;
/* 6 = 1(Element ID) + 1(Length) + 3 (OUI) + 1(OUI Type) */
diff --git a/drivers/staging/rtl8188eu/core/rtw_io.c b/drivers/staging/rtl8188eu/core/rtw_io.c
index ff0398fca52b..7530532b3ff0 100644
--- a/drivers/staging/rtl8188eu/core/rtw_io.c
+++ b/drivers/staging/rtl8188eu/core/rtw_io.c
@@ -59,10 +59,8 @@ u8 _rtw_read8(struct adapter *adapter, u32 addr)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
- _func_enter_;
_read8 = pintfhdl->io_ops._read8;
r_val = _read8(pintfhdl, addr);
- _func_exit_;
return r_val;
}
@@ -72,11 +70,9 @@ u16 _rtw_read16(struct adapter *adapter, u32 addr)
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
-_func_enter_;
_read16 = pintfhdl->io_ops._read16;
r_val = _read16(pintfhdl, addr);
-_func_exit_;
return r_val;
}
@@ -86,11 +82,9 @@ u32 _rtw_read32(struct adapter *adapter, u32 addr)
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
-_func_enter_;
_read32 = pintfhdl->io_ops._read32;
r_val = _read32(pintfhdl, addr);
-_func_exit_;
return r_val;
}
@@ -100,11 +94,9 @@ int _rtw_write8(struct adapter *adapter, u32 addr, u8 val)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
int ret;
- _func_enter_;
_write8 = pintfhdl->io_ops._write8;
ret = _write8(pintfhdl, addr, val);
- _func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -115,11 +107,9 @@ int _rtw_write16(struct adapter *adapter, u32 addr, u16 val)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
int ret;
- _func_enter_;
_write16 = pintfhdl->io_ops._write16;
ret = _write16(pintfhdl, addr, val);
- _func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -129,11 +119,9 @@ int _rtw_write32(struct adapter *adapter, u32 addr, u32 val)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
int ret;
- _func_enter_;
_write32 = pintfhdl->io_ops._write32;
ret = _write32(pintfhdl, addr, val);
- _func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -144,11 +132,9 @@ int _rtw_writeN(struct adapter *adapter, u32 addr , u32 length , u8 *pdata)
struct intf_hdl *pintfhdl = (struct intf_hdl *)(&(pio_priv->intf));
int (*_writeN)(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata);
int ret;
- _func_enter_;
_writeN = pintfhdl->io_ops._writeN;
ret = _writeN(pintfhdl, addr, length, pdata);
- _func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -158,11 +144,9 @@ int _rtw_write8_async(struct adapter *adapter, u32 addr, u8 val)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
int (*_write8_async)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
int ret;
- _func_enter_;
_write8_async = pintfhdl->io_ops._write8_async;
ret = _write8_async(pintfhdl, addr, val);
- _func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -174,10 +158,8 @@ int _rtw_write16_async(struct adapter *adapter, u32 addr, u16 val)
int (*_write16_async)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
int ret;
-_func_enter_;
_write16_async = pintfhdl->io_ops._write16_async;
ret = _write16_async(pintfhdl, addr, val);
-_func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -189,10 +171,8 @@ int _rtw_write32_async(struct adapter *adapter, u32 addr, u32 val)
int (*_write32_async)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
int ret;
-_func_enter_;
_write32_async = pintfhdl->io_ops._write32_async;
ret = _write32_async(pintfhdl, addr, val);
-_func_exit_;
return RTW_STATUS_CODE(ret);
}
@@ -203,7 +183,6 @@ void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
- _func_enter_;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
("rtw_read_mem:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
@@ -212,7 +191,6 @@ void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
}
_read_mem = pintfhdl->io_ops._read_mem;
_read_mem(pintfhdl, addr, cnt, pmem);
- _func_exit_;
}
void _rtw_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
@@ -221,13 +199,11 @@ void _rtw_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
- _func_enter_;
_write_mem = pintfhdl->io_ops._write_mem;
_write_mem(pintfhdl, addr, cnt, pmem);
- _func_exit_;
}
void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
@@ -236,7 +212,6 @@ void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
- _func_enter_;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
@@ -249,7 +224,6 @@ void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
_read_port(pintfhdl, addr, cnt, pmem);
- _func_exit_;
}
void _rtw_read_port_cancel(struct adapter *adapter)
@@ -271,13 +245,11 @@ u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
struct intf_hdl *pintfhdl = &(pio_priv->intf);
u32 ret = _SUCCESS;
- _func_enter_;
_write_port = pintfhdl->io_ops._write_port;
ret = _write_port(pintfhdl, addr, cnt, pmem);
- _func_exit_;
return ret;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index e25b39b97d9e..f1398ab01d7b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -42,7 +42,6 @@ u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid)
u8 i;
u8 ret = true;
-_func_enter_;
if (ssid->SsidLength > 32) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid length >32\n"));
@@ -61,8 +60,6 @@ _func_enter_;
exit:
-_func_exit_;
-
return ret;
}
@@ -74,11 +71,10 @@ u8 rtw_do_join(struct adapter *padapter)
struct __queue *queue = &(pmlmepriv->scanned_queue);
u8 ret = _SUCCESS;
-_func_enter_;
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("\n rtw_do_join: phead = %p; plist = %p\n\n\n", phead, plist));
@@ -170,7 +166,6 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
@@ -181,7 +176,6 @@ u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
u32 cur_time = 0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
DBG_88E_LEVEL(_drv_info_, "set bssid:%pM\n", bssid);
@@ -205,7 +199,7 @@ _func_enter_;
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
- if (_rtw_memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) {
+ if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) {
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)
goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
} else {
@@ -257,7 +251,6 @@ exit:
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
("rtw_set_802_11_bssid: status=%d\n", status));
-_func_exit_;
return status;
}
@@ -270,7 +263,6 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *pnetwork = &pmlmepriv->cur_network;
-_func_enter_;
DBG_88E_LEVEL(_drv_info_, "set ssid [%s] fw_state=0x%08x\n",
ssid->Ssid, get_fwstate(pmlmepriv));
@@ -285,18 +277,17 @@ _func_enter_;
spin_lock_bh(&pmlmepriv->lock);
DBG_88E("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
goto handle_tkip_countermeasure;
- } else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
+ else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true)
goto release_mlme_lock;
- }
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
("set_ssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
if ((pmlmepriv->assoc_ssid.SsidLength == ssid->SsidLength) &&
- (_rtw_memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid, ssid->SsidLength))) {
+ (!memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid, ssid->SsidLength))) {
if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
("Set SSID is the same ssid, fw_state = 0x%08x\n",
@@ -357,11 +348,10 @@ handle_tkip_countermeasure:
memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct ndis_802_11_ssid));
pmlmepriv->assoc_by_bssid = false;
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
pmlmepriv->to_join = true;
- } else {
+ else
status = rtw_do_join(padapter);
- }
release_mlme_lock:
spin_unlock_bh(&pmlmepriv->lock);
@@ -369,7 +359,6 @@ release_mlme_lock:
exit:
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
("-rtw_set_802_11_ssid: status =%d\n", status));
-_func_exit_;
return status;
}
@@ -380,7 +369,6 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
struct wlan_network *cur_network = &pmlmepriv->cur_network;
enum ndis_802_11_network_infra *pold_state = &(cur_network->network.InfrastructureMode);
-_func_enter_;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_notice_,
("+rtw_set_802_11_infrastructure_mode: old =%d new =%d fw_state = 0x%08x\n",
@@ -411,7 +399,7 @@ _func_enter_;
if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) {
if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
- rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have chked whether issue dis-assoc_cmd or not */
+ rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have checked whether issue dis-assoc_cmd or not */
}
*pold_state = networktype;
@@ -438,7 +426,6 @@ _func_enter_;
spin_unlock_bh(&pmlmepriv->lock);
}
-_func_exit_;
return true;
}
@@ -448,7 +435,6 @@ u8 rtw_set_802_11_disassociate(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
spin_lock_bh(&pmlmepriv->lock);
@@ -464,7 +450,6 @@ _func_enter_;
spin_unlock_bh(&pmlmepriv->lock);
-_func_exit_;
return true;
}
@@ -474,7 +459,6 @@ u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_s
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 res = true;
-_func_enter_;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("+rtw_set_802_11_bssid_list_scan(), fw_state =%x\n", get_fwstate(pmlmepriv)));
@@ -494,11 +478,12 @@ _func_enter_;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_bssid_list_scan fail since fw_state = %x\n", get_fwstate(pmlmepriv)));
res = true;
- if (check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)) == true) {
+ if (check_fwstate(pmlmepriv,
+ (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)) == true)
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###_FW_UNDER_SURVEY|_FW_UNDER_LINKING\n\n"));
- } else {
+ else
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###pmlmepriv->sitesurveyctrl.traffic_busy == true\n\n"));
- }
+
} else {
if (rtw_is_scan_deny(padapter)) {
DBG_88E(FUNC_ADPT_FMT": scan deny\n", FUNC_ADPT_ARG(padapter));
@@ -514,7 +499,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return res;
}
@@ -525,7 +509,6 @@ u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum ndis_802_11
int res;
u8 ret;
-_func_enter_;
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_802_11_auth.mode(): mode =%x\n", authmode));
@@ -545,7 +528,6 @@ _func_enter_;
else
ret = false;
-_func_exit_;
return ret;
}
@@ -556,7 +538,6 @@ u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
struct security_priv *psecuritypriv = &(padapter->securitypriv);
u8 ret = _SUCCESS;
-_func_enter_;
keyid = wep->KeyIndex & 0x3fffffff;
@@ -581,7 +562,7 @@ _func_enter_;
break;
}
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
- ("rtw_set_802_11_add_wep:befor memcpy, wep->KeyLength = 0x%x wep->KeyIndex = 0x%x keyid =%x\n",
+ ("rtw_set_802_11_add_wep:before memcpy, wep->KeyLength = 0x%x wep->KeyIndex = 0x%x keyid =%x\n",
wep->KeyLength, wep->KeyIndex, keyid));
memcpy(&(psecuritypriv->dot11DefKey[keyid].skey[0]), &(wep->KeyMaterial), wep->KeyLength);
@@ -611,7 +592,6 @@ _func_enter_;
if (res == _FAIL)
ret = false;
exit:
-_func_exit_;
return ret;
}
@@ -619,7 +599,6 @@ u8 rtw_set_802_11_remove_wep(struct adapter *padapter, u32 keyindex)
{
u8 ret = _SUCCESS;
-_func_enter_;
if (keyindex >= 0x80000000 || padapter == NULL) {
ret = false;
goto exit;
@@ -638,7 +617,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return ret;
}
@@ -651,7 +629,6 @@ u8 rtw_set_802_11_add_key(struct adapter *padapter, struct ndis_802_11_key *key)
u8 bgrouptkey = false;/* can be removed later */
u8 ret = _SUCCESS;
-_func_enter_;
if (((key->KeyIndex & 0x80000000) == 0) && ((key->KeyIndex & 0x40000000) > 0)) {
/* It is invalid to clear bit 31 and set bit 30. If the miniport driver encounters this combination, */
@@ -992,7 +969,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return ret;
}
@@ -1004,7 +980,6 @@ u8 rtw_set_802_11_remove_key(struct adapter *padapter, struct ndis_802_11_remove
u8 keyIndex = (u8)key->KeyIndex & 0x03;
u8 ret = _SUCCESS;
-_func_enter_;
if ((key->KeyIndex & 0xbffffffc) > 0) {
ret = _FAIL;
@@ -1032,7 +1007,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return ret;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index afac53709843..42b41ab1bce1 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -34,7 +34,7 @@ void BlinkTimerCallback(void *data)
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
return;
- _set_workitem(&(pLed->BlinkWorkItem));
+ schedule_work(&(pLed->BlinkWorkItem));
}
/* */
@@ -80,7 +80,7 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed, enum LED_PIN_8
_init_timer(&(pLed->BlinkTimer), padapter->pnetdev, BlinkTimerCallback, pLed);
- _init_workitem(&(pLed->BlinkWorkItem), BlinkWorkItemCallback, pLed);
+ INIT_WORK(&(pLed->BlinkWorkItem), BlinkWorkItemCallback);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index c7382303088f..769d4ddc6754 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -31,11 +31,12 @@
#include <wlan_bssdef.h>
#include <rtw_ioctl_set.h>
#include <usb_osintf.h>
+#include <linux/vmalloc.h>
extern unsigned char MCS_rate_2R[16];
extern unsigned char MCS_rate_1R[16];
-int _rtw_init_mlme_priv (struct adapter *padapter)
+int _rtw_init_mlme_priv(struct adapter *padapter)
{
int i;
u8 *pbuf;
@@ -43,9 +44,7 @@ int _rtw_init_mlme_priv (struct adapter *padapter)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int res = _SUCCESS;
-_func_enter_;
-
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
pmlmepriv->nic_hdl = (u8 *)padapter;
@@ -62,7 +61,7 @@ _func_enter_;
_rtw_memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
- pbuf = rtw_zvmalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
+ pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
if (pbuf == NULL) {
res = _FAIL;
@@ -87,13 +86,10 @@ _func_enter_;
rtw_init_mlme_timer(padapter);
exit:
-
-_func_exit_;
-
return res;
}
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
{
kfree(*ppie);
@@ -122,24 +118,18 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
}
#endif
-void _rtw_free_mlme_priv (struct mlme_priv *pmlmepriv)
+void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
-_func_enter_;
-
rtw_free_mlme_priv_ie_data(pmlmepriv);
if (pmlmepriv) {
- if (pmlmepriv->free_bss_buf) {
- rtw_vmfree(pmlmepriv->free_bss_buf, MAX_BSS_CNT * sizeof(struct wlan_network));
- }
+ if (pmlmepriv->free_bss_buf)
+ vfree(pmlmepriv->free_bss_buf);
}
-_func_exit_;
}
int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork)
{
-_func_enter_;
-
if (pnetwork == NULL)
goto exit;
@@ -150,9 +140,6 @@ _func_enter_;
spin_unlock_bh(&queue->lock);
exit:
-
-_func_exit_;
-
return _SUCCESS;
}
@@ -160,22 +147,18 @@ struct wlan_network *_rtw_dequeue_network(struct __queue *queue)
{
struct wlan_network *pnetwork;
-_func_enter_;
-
spin_lock_bh(&queue->lock);
if (_rtw_queue_empty(queue)) {
pnetwork = NULL;
} else {
- pnetwork = LIST_CONTAINOR(get_next(&queue->queue), struct wlan_network, list);
+ pnetwork = container_of((&queue->queue)->next, struct wlan_network, list);
rtw_list_delete(&(pnetwork->list));
}
spin_unlock_bh(&queue->lock);
-_func_exit_;
-
return pnetwork;
}
@@ -185,17 +168,15 @@ struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *f
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
struct list_head *plist = NULL;
-_func_enter_;
-
spin_lock_bh(&free_queue->lock);
if (_rtw_queue_empty(free_queue) == true) {
pnetwork = NULL;
goto exit;
}
- plist = get_next(&(free_queue->queue));
+ plist = free_queue->queue.next;
- pnetwork = LIST_CONTAINOR(plist , struct wlan_network, list);
+ pnetwork = container_of(plist , struct wlan_network, list);
rtw_list_delete(&pnetwork->list);
@@ -211,8 +192,6 @@ _func_enter_;
exit:
spin_unlock_bh(&free_queue->lock);
-_func_exit_;
-
return pnetwork;
}
@@ -222,13 +201,11 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv , struct wlan_network *pnetwo
u32 lifetime = SCANQUEUE_LIFETIME;
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
-_func_enter_;
-
if (pnetwork == NULL)
- goto exit;
+ return;
if (pnetwork->fixed)
- goto exit;
+ return;
curr_time = jiffies;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
@@ -236,33 +213,26 @@ _func_enter_;
if (!isfreeall) {
delta_time = (curr_time - pnetwork->last_scanned)/HZ;
if (delta_time < lifetime)/* unit:sec */
- goto exit;
+ return;
}
spin_lock_bh(&free_queue->lock);
rtw_list_delete(&(pnetwork->list));
rtw_list_insert_tail(&(pnetwork->list), &(free_queue->queue));
pmlmepriv->num_of_scanned--;
spin_unlock_bh(&free_queue->lock);
-
-exit:
-_func_exit_;
}
void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork)
{
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
-_func_enter_;
if (pnetwork == NULL)
- goto exit;
+ return;
if (pnetwork->fixed)
- goto exit;
+ return;
rtw_list_delete(&(pnetwork->list));
rtw_list_insert_tail(&(pnetwork->list), get_list_head(free_queue));
pmlmepriv->num_of_scanned--;
-exit:
-
-_func_exit_;
}
/*
@@ -276,24 +246,22 @@ struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
struct wlan_network *pnetwork = NULL;
u8 zero_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
-_func_enter_;
- if (_rtw_memcmp(zero_addr, addr, ETH_ALEN)) {
+ if (!memcmp(zero_addr, addr, ETH_ALEN)) {
pnetwork = NULL;
goto exit;
}
phead = get_list_head(scanned_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (plist != phead) {
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network , list);
- if (_rtw_memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN) == true)
+ pnetwork = container_of(plist, struct wlan_network , list);
+ if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN))
break;
- plist = get_next(plist);
+ plist = plist->next;
}
if (plist == phead)
pnetwork = NULL;
exit:
-_func_exit_;
return pnetwork;
}
@@ -305,29 +273,24 @@ void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *scanned_queue = &pmlmepriv->scanned_queue;
-_func_enter_;
-
-
spin_lock_bh(&scanned_queue->lock);
phead = get_list_head(scanned_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (rtw_end_of_queue_search(phead, plist) == false) {
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
- plist = get_next(plist);
+ plist = plist->next;
_rtw_free_network(pmlmepriv, pnetwork, isfreeall);
}
spin_unlock_bh(&scanned_queue->lock);
-_func_exit_;
}
int rtw_if_up(struct adapter *padapter)
{
int res;
-_func_enter_;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
(check_fwstate(&padapter->mlmepriv, _FW_LINKED) == false)) {
@@ -338,8 +301,6 @@ _func_enter_;
} else {
res = true;
}
-
-_func_exit_;
return res;
}
@@ -347,14 +308,12 @@ void rtw_generate_random_ibss(u8 *pibss)
{
u32 curtime = jiffies;
-_func_enter_;
pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */
pibss[1] = 0x11;
pibss[2] = 0x87;
pibss[3] = (u8)(curtime & 0xff);/* p[0]; */
pibss[4] = (u8)((curtime>>8) & 0xff);/* p[1]; */
pibss[5] = (u8)((curtime>>16) & 0xff);/* p[2]; */
-_func_exit_;
return;
}
@@ -367,11 +326,9 @@ u8 *rtw_get_capability_from_ie(u8 *ie)
u16 rtw_get_capability(struct wlan_bssid_ex *bss)
{
__le16 val;
-_func_enter_;
memcpy((u8 *)&val, rtw_get_capability_from_ie(bss->IEs), 2);
-_func_exit_;
return le16_to_cpu(val);
}
@@ -385,46 +342,34 @@ u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
return ie + 8;
}
-int rtw_init_mlme_priv (struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */
+int rtw_init_mlme_priv(struct adapter *padapter)
{
int res;
-_func_enter_;
res = _rtw_init_mlme_priv(padapter);/* (pmlmepriv); */
-_func_exit_;
return res;
}
-void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv)
+void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_mlme_priv\n"));
- _rtw_free_mlme_priv (pmlmepriv);
-_func_exit_;
+ _rtw_free_mlme_priv(pmlmepriv);
}
static struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
{
- struct wlan_network *pnetwork;
-_func_enter_;
- pnetwork = _rtw_alloc_network(pmlmepriv);
-_func_exit_;
- return pnetwork;
+ return _rtw_alloc_network(pmlmepriv);
}
static void rtw_free_network_nolock(struct mlme_priv *pmlmepriv,
struct wlan_network *pnetwork)
{
-_func_enter_;
_rtw_free_network_nolock(pmlmepriv, pnetwork);
-_func_exit_;
}
void rtw_free_network_queue(struct adapter *dev, u8 isfreeall)
{
-_func_enter_;
_rtw_free_network_queue(dev, isfreeall);
-_func_exit_;
}
/*
@@ -458,7 +403,7 @@ int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork)
static int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b)
{
return (a->Ssid.SsidLength == b->Ssid.SsidLength) &&
- _rtw_memcmp(a->Ssid.Ssid, b->Ssid.Ssid, a->Ssid.SsidLength);
+ !memcmp(a->Ssid.Ssid, b->Ssid.Ssid, a->Ssid.SsidLength);
}
int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
@@ -466,7 +411,6 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
u16 s_cap, d_cap;
__le16 le_scap, le_dcap;
-_func_enter_;
memcpy((u8 *)&le_scap, rtw_get_capability_from_ie(src->IEs), 2);
memcpy((u8 *)&le_dcap, rtw_get_capability_from_ie(dst->IEs), 2);
@@ -474,11 +418,9 @@ _func_enter_;
s_cap = le16_to_cpu(le_scap);
d_cap = le16_to_cpu(le_dcap);
-_func_exit_;
-
return ((src->Ssid.SsidLength == dst->Ssid.SsidLength) &&
- ((_rtw_memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN)) == true) &&
- ((_rtw_memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength)) == true) &&
+ ((!memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN)) == true) &&
+ ((!memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength)) == true) &&
((s_cap & WLAN_CAPABILITY_IBSS) ==
(d_cap & WLAN_CAPABILITY_IBSS)) &&
((s_cap & WLAN_CAPABILITY_BSS) ==
@@ -491,25 +433,23 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
struct wlan_network *pwlan = NULL;
struct wlan_network *oldest = NULL;
-_func_enter_;
phead = get_list_head(scanned_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pwlan = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pwlan = container_of(plist, struct wlan_network, list);
if (!pwlan->fixed) {
if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned))
oldest = pwlan;
}
- plist = get_next(plist);
+ plist = plist->next;
}
-_func_exit_;
return oldest;
}
@@ -522,7 +462,6 @@ void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
u8 sq_final;
long rssi_final;
-_func_enter_;
rtw_hal_antdiv_rssi_compared(padapter, dst, src); /* this will update src.Rssi, need consider again */
/* The rule below is 1/5 for sample value, 4/5 for history value */
@@ -553,22 +492,18 @@ _func_enter_;
dst->PhyInfo.SignalQuality = sq_final;
dst->Rssi = rssi_final;
-_func_exit_;
}
static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex *pnetwork)
{
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
-_func_enter_;
-
if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) &&
(is_same_network(&(pmlmepriv->cur_network.network), pnetwork))) {
update_network(&(pmlmepriv->cur_network.network), pnetwork, adapter, true);
rtw_update_protection(adapter, (pmlmepriv->cur_network.network.IEs) + sizeof(struct ndis_802_11_fixed_ie),
pmlmepriv->cur_network.network.IELength);
}
-_func_exit_;
}
/*
@@ -583,24 +518,22 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
struct wlan_network *pnetwork = NULL;
struct wlan_network *oldest = NULL;
-_func_enter_;
-
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (is_same_network(&(pnetwork->network), target))
break;
if ((oldest == ((struct wlan_network *)0)) ||
time_after(oldest->last_scanned, pnetwork->last_scanned))
oldest = pnetwork;
- plist = get_next(plist);
+ plist = plist->next;
}
/* If we didn't find a match, then get a new network slot to initialize
* with this beacon's information */
@@ -663,27 +596,26 @@ _func_enter_;
exit:
spin_unlock_bh(&queue->lock);
-_func_exit_;
}
static void rtw_add_network(struct adapter *adapter,
struct wlan_bssid_ex *pnetwork)
{
-_func_enter_;
#if defined(CONFIG_88EU_P2P)
rtw_wlan_bssid_ex_remove_p2p_attr(pnetwork, P2P_ATTR_GROUP_INFO);
#endif
update_current_network(adapter, pnetwork);
rtw_update_scanned_network(adapter, pnetwork);
-_func_exit_;
}
-/* select the desired network based on the capability of the (i)bss. */
-/* check items: (1) security */
-/* (2) network_type */
-/* (3) WMM */
-/* (4) HT */
-/* (5) others */
+/*
+ * select the desired network based on the capability of the (i)bss.
+ * check items: (1) security
+ * (2) network_type
+ * (3) WMM
+ * (4) HT
+ * (5) others
+ */
static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwork)
{
struct security_priv *psecuritypriv = &adapter->securitypriv;
@@ -728,9 +660,7 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *
/* TODO: Perry: For Power Management */
void rtw_atimdone_event_callback(struct adapter *adapter , u8 *pbuf)
{
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_evet\n"));
-_func_exit_;
return;
}
@@ -741,8 +671,6 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
struct wlan_bssid_ex *pnetwork;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
-_func_enter_;
-
pnetwork = (struct wlan_bssid_ex *)pbuf;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_survey_event_callback, ssid=%s\n", pnetwork->Ssid.Ssid));
@@ -756,7 +684,7 @@ _func_enter_;
/* update IBSS_network 's timestamp */
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) {
- if (_rtw_memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) {
+ if (!memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) {
struct wlan_network *ibss_wlan = NULL;
memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8);
@@ -781,20 +709,14 @@ _func_enter_;
exit:
spin_unlock_bh(&pmlmepriv->lock);
-
-_func_exit_;
-
return;
}
-
-
void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
{
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
struct mlme_ext_priv *pmlmeext;
-_func_enter_;
spin_lock_bh(&pmlmepriv->lock);
if (pmlmepriv->wps_probe_req_ie) {
@@ -884,7 +806,6 @@ _func_enter_;
pmlmeext = &adapter->mlmeextpriv;
if (pmlmeext->sitesurvey_res.bss_cnt == 0)
rtw_hal_sreset_reset(adapter);
-_func_exit_;
}
void rtw_dummy_event_callback(struct adapter *adapter , u8 *pbuf)
@@ -901,17 +822,15 @@ static void free_scanqueue(struct mlme_priv *pmlmepriv)
struct __queue *scan_queue = &pmlmepriv->scanned_queue;
struct list_head *plist, *phead, *ptemp;
-_func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+free_scanqueue\n"));
spin_lock_bh(&scan_queue->lock);
spin_lock_bh(&free_queue->lock);
phead = get_list_head(scan_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (plist != phead) {
- ptemp = get_next(plist);
+ ptemp = plist->next;
rtw_list_delete(plist);
rtw_list_insert_tail(plist, &free_queue->queue);
plist = ptemp;
@@ -920,8 +839,6 @@ _func_enter_;
spin_unlock_bh(&free_queue->lock);
spin_unlock_bh(&scan_queue->lock);
-
-_func_exit_;
}
/*
@@ -934,8 +851,6 @@ void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
struct sta_priv *pstapriv = &adapter->stapriv;
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
-_func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_free_assoc_resources\n"));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
("tgt_network->network.MacAddress=%pM ssid=%s\n",
@@ -979,7 +894,6 @@ _func_enter_;
if (lock_scanned_queue)
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
pmlmepriv->key_mask = 0;
-_func_exit_;
}
/*
@@ -989,8 +903,6 @@ void rtw_indicate_connect(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_connect\n"));
pmlmepriv->to_join = false;
@@ -1008,7 +920,6 @@ _func_enter_;
rtw_set_scan_deny(padapter, 3000);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("-rtw_indicate_connect: fw_state=0x%08x\n", get_fwstate(pmlmepriv)));
-_func_exit_;
}
/*
@@ -1018,7 +929,6 @@ void rtw_indicate_disconnect(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_disconnect\n"));
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING | WIFI_UNDER_WPS);
@@ -1038,8 +948,6 @@ _func_enter_;
p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_DISCONNECT, 1);
-
-_func_exit_;
}
inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted)
@@ -1100,9 +1008,12 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
_rtw_memset((u8 *)&psta->dot11txpn, 0, sizeof(union pn48));
_rtw_memset((u8 *)&psta->dot11rxpn, 0, sizeof(union pn48));
}
- /* Commented by Albert 2012/07/21 */
- /* When doing the WPS, the wps_ie_len won't equal to 0 */
- /* And the Wi-Fi driver shouldn't allow the data packet to be tramsmitted. */
+ /*
+ * Commented by Albert 2012/07/21
+ * When doing the WPS, the wps_ie_len won't equal to 0
+ * And the Wi-Fi driver shouldn't allow the data
+ * packet to be tramsmitted.
+ */
if (padapter->securitypriv.wps_ie_len != 0) {
psta->ieee8021x_blocked = true;
padapter->securitypriv.wps_ie_len = 0;
@@ -1206,8 +1117,6 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
struct wlan_network *pcur_wlan = NULL, *ptarget_wlan = NULL;
unsigned int the_same_macaddr = false;
-_func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("joinbss event call back received with res=%d\n", pnetwork->join_res));
rtw_get_encrypt_decrypt_from_registrypriv(adapter);
@@ -1218,12 +1127,12 @@ _func_enter_;
else
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
- the_same_macaddr = _rtw_memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
+ the_same_macaddr = !memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
pnetwork->network.Length = get_wlan_bssid_ex_sz(&pnetwork->network);
if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n ***joinbss_evt_callback return a wrong bss ***\n\n"));
- goto ignore_nolock;
+ return;
}
spin_lock_bh(&pmlmepriv->lock);
@@ -1319,27 +1228,21 @@ _func_enter_;
ignore_joinbss_callback:
spin_unlock_bh(&pmlmepriv->lock);
-ignore_nolock:
-_func_exit_;
}
void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
{
struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
-_func_enter_;
-
mlmeext_joinbss_event_callback(adapter, pnetwork->join_res);
rtw_os_xmit_schedule(adapter);
-
-_func_exit_;
}
static u8 search_max_mac_id(struct adapter *padapter)
{
u8 mac_id;
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
u8 aid;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1347,7 +1250,7 @@ static u8 search_max_mac_id(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
for (aid = (pstapriv->max_num_sta); aid > 0; aid--) {
if (pstapriv->sta_aid[aid-1] != NULL)
@@ -1388,19 +1291,17 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
struct wlan_network *cur_network = &(pmlmepriv->cur_network);
struct wlan_network *ptarget_wlan = NULL;
-_func_enter_;
-
if (rtw_access_ctrl(adapter, pstassoc->macaddr) == false)
return;
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
if (psta) {
ap_sta_info_defer_update(adapter, psta);
rtw_stassoc_hw_rpt(adapter, psta);
}
- goto exit;
+ return;
}
#endif
/* for AD-HOC mode */
@@ -1408,12 +1309,12 @@ _func_enter_;
if (psta != NULL) {
/* the sta have been in sta_info_queue => do nothing */
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error: rtw_stassoc_event_callback: sta has been in sta_hash_queue\n"));
- goto exit; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */
+ return; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */
}
psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
if (psta == NULL) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't alloc sta_info when rtw_stassoc_event_callback\n"));
- goto exit;
+ return;
}
/* to do: init sta_info variable */
psta->qos_option = 0;
@@ -1440,8 +1341,6 @@ _func_enter_;
}
spin_unlock_bh(&pmlmepriv->lock);
mlmeext_sta_add_event_callback(adapter, psta);
-exit:
-_func_exit_;
}
void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
@@ -1456,8 +1355,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
struct sta_priv *pstapriv = &adapter->stapriv;
struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
-_func_enter_;
-
psta = rtw_get_stainfo(&adapter->stapriv, pstadel->macaddr);
if (psta)
mac_id = psta->mac_id;
@@ -1541,14 +1438,11 @@ _func_enter_;
}
}
spin_unlock_bh(&pmlmepriv->lock);
-_func_exit_;
}
void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
{
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_cpwm_event_callback !!!\n"));
-_func_exit_;
}
/*
@@ -1560,8 +1454,6 @@ void _rtw_join_timeout_handler (struct adapter *adapter)
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
int do_join_r;
-_func_enter_;
-
DBG_88E("%s, fw_state=%x\n", __func__, get_fwstate(pmlmepriv));
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
@@ -1592,7 +1484,6 @@ _func_enter_;
free_scanqueue(pmlmepriv);/* */
}
spin_unlock_bh(&pmlmepriv->lock);
-_func_exit_;
}
/*
@@ -1658,14 +1549,12 @@ void rtw_dynamic_check_timer_handlder(struct adapter *adapter)
/* expire NAT2.5 entry */
nat25_db_expire(adapter);
- if (adapter->pppoe_connection_in_progress > 0) {
+ if (adapter->pppoe_connection_in_progress > 0)
adapter->pppoe_connection_in_progress--;
- }
/* due to rtw_dynamic_check_timer_handlder() is called every 2 seconds */
- if (adapter->pppoe_connection_in_progress > 0) {
+ if (adapter->pppoe_connection_in_progress > 0)
adapter->pppoe_connection_in_progress--;
- }
}
rcu_read_unlock();
@@ -1687,14 +1576,14 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
/* check bssid, if needed */
if (pmlmepriv->assoc_by_bssid) {
- if (!_rtw_memcmp(competitor->network.MacAddress, pmlmepriv->assoc_bssid, ETH_ALEN))
+ if (memcmp(competitor->network.MacAddress, pmlmepriv->assoc_bssid, ETH_ALEN))
goto exit;
}
/* check ssid, if needed */
- if (pmlmepriv->assoc_ssid.Ssid && pmlmepriv->assoc_ssid.SsidLength) {
+ if (pmlmepriv->assoc_ssid.SsidLength) {
if (competitor->network.Ssid.SsidLength != pmlmepriv->assoc_ssid.SsidLength ||
- _rtw_memcmp(competitor->network.Ssid.Ssid, pmlmepriv->assoc_ssid.Ssid, pmlmepriv->assoc_ssid.SsidLength) == false)
+ !memcmp(competitor->network.Ssid.Ssid, pmlmepriv->assoc_ssid.Ssid, pmlmepriv->assoc_ssid.SsidLength) == false)
goto exit;
}
@@ -1742,20 +1631,18 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
struct wlan_network *candidate = NULL;
u8 supp_ant_div = false;
-_func_enter_;
-
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
adapter = (struct adapter *)pmlmepriv->nic_hdl;
- pmlmepriv->pscanned = get_next(phead);
+ pmlmepriv->pscanned = phead->next;
while (!rtw_end_of_queue_search(phead, pmlmepriv->pscanned)) {
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
if (pnetwork == NULL) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork==NULL)\n", __func__));
ret = _FAIL;
goto exit;
}
- pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+ pmlmepriv->pscanned = pmlmepriv->pscanned->next;
rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
}
if (candidate == NULL) {
@@ -1792,9 +1679,6 @@ _func_enter_;
exit:
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-
-_func_exit_;
-
return ret;
}
@@ -1805,8 +1689,6 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
int res = _SUCCESS;
-_func_enter_;
-
pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL; /* try again */
@@ -1832,7 +1714,6 @@ _func_enter_;
psecuritypriv->dot11AuthAlgrthm));
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
exit:
-_func_exit_;
return res;
}
@@ -1845,7 +1726,6 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
int res = _SUCCESS;
-_func_enter_;
pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL; /* try again */
@@ -1914,7 +1794,6 @@ _func_enter_;
_rtw_init_listhead(&pcmd->list);
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
exit:
-_func_exit_;
return res;
}
@@ -1946,17 +1825,15 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
return ielength;
}
-/* */
-/* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
-/* Added by Annie, 2006-05-07. */
-/* */
-/* Search by BSSID, */
-/* Return Value: */
-/* -1 :if there is no pre-auth key in the table */
-/* >= 0 :if there is pre-auth key, and return the entry id */
-/* */
-/* */
-
+/*
+ * Ported from 8185: IsInPreAuthKeyList().
+ * (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.)
+ * Added by Annie, 2006-05-07.
+ * Search by BSSID,
+ * Return Value:
+ * -1 :if there is no pre-auth key in the table
+ * >= 0 :if there is pre-auth key, and return the entry id
+ */
static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
{
struct security_priv *psecuritypriv = &Adapter->securitypriv;
@@ -1964,7 +1841,7 @@ static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
do {
if ((psecuritypriv->PMKIDList[i].bUsed) &&
- (_rtw_memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN) == true)) {
+ (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) {
break;
} else {
i++;
@@ -1973,11 +1850,9 @@ static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
} while (i < NUM_PMKID_CACHE);
- if (i == NUM_PMKID_CACHE) {
+ if (i == NUM_PMKID_CACHE)
i = -1;/* Could not find. */
- } else {
- /* There is one Pre-Authentication Key for the specific BSSID. */
- }
+
return i;
}
@@ -2018,8 +1893,6 @@ int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
uint ndisauthmode = psecuritypriv->ndisauthtype;
uint ndissecuritytype = psecuritypriv->ndisencryptstatus;
-_func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
("+rtw_restruct_sec_ie: ndisauthmode=%d ndissecuritytype=%d\n",
ndisauthmode, ndissecuritytype));
@@ -2052,9 +1925,6 @@ _func_enter_;
if (authmode == _WPA2_IE_ID_)
ielength = rtw_append_pmkid(adapter, iEntry, out_ie, ielength);
}
-
-_func_exit_;
-
return ielength;
}
@@ -2065,8 +1935,6 @@ void rtw_init_registrypriv_dev_network(struct adapter *adapter)
struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
u8 *myhwaddr = myid(peepriv);
-_func_enter_;
-
memcpy(pdev_network->MacAddress, myhwaddr, ETH_ALEN);
memcpy(&pdev_network->Ssid, &pregistrypriv->ssid, sizeof(struct ndis_802_11_ssid));
@@ -2077,8 +1945,6 @@ _func_enter_;
pdev_network->Configuration.FHConfig.HopPattern = 0;
pdev_network->Configuration.FHConfig.HopSet = 0;
pdev_network->Configuration.FHConfig.DwellTime = 0;
-
-_func_exit_;
}
void rtw_update_registrypriv_dev_network(struct adapter *adapter)
@@ -2089,8 +1955,6 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter)
struct security_priv *psecuritypriv = &adapter->securitypriv;
struct wlan_network *cur_network = &adapter->mlmepriv.cur_network;
-_func_enter_;
-
pdev_network->Privacy = (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0); /* adhoc no 802.1x */
pdev_network->Rssi = 0;
@@ -2140,13 +2004,10 @@ _func_enter_;
/* notes: translate IELength & Length after assign the Length to cmdsz in createbss_cmd(); */
/* pdev_network->IELength = cpu_to_le32(sz); */
-_func_exit_;
}
void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter)
{
-_func_enter_;
-_func_exit_;
}
/* the function is at passive_level */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 6f7e415ecb6c..3ed5941bedc3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -171,7 +171,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
{0x03}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */
};
-static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the conbination for max channel numbers */
+static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the combination for max channel numbers */
/*
* Search the @param channel_num in given @param channel_set
@@ -414,21 +414,21 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
}
}
-static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, union recv_frame *precv_frame)
+static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, struct recv_frame *precv_frame)
{
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
if (ptable->func) {
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
- if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- !_rtw_memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
+ memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
return;
ptable->func(padapter, precv_frame);
}
}
-void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
+void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
{
int index;
struct mlme_handler *ptable;
@@ -436,7 +436,7 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
#endif /* CONFIG_88EU_AP_MODE */
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
@@ -449,8 +449,8 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
}
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
- if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- !_rtw_memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
+ memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
return;
ptable = mlme_sta_tbl;
@@ -465,13 +465,15 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
if (psta != NULL) {
if (GetRetry(pframe)) {
- if (precv_frame->u.hdr.attrib.seq_num == psta->RxMgmtFrameSeqNum) {
+ if (precv_frame->attrib.seq_num ==
+ psta->RxMgmtFrameSeqNum) {
/* drop the duplicate management frame */
- DBG_88E("Drop duplicate management frame with seq_num=%d.\n", precv_frame->u.hdr.attrib.seq_num);
+ DBG_88E("Drop duplicate management frame with seq_num=%d.\n",
+ precv_frame->attrib.seq_num);
return;
}
}
- psta->RxMgmtFrameSeqNum = precv_frame->u.hdr.attrib.seq_num;
+ psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num;
}
#ifdef CONFIG_88EU_AP_MODE
@@ -532,7 +534,7 @@ Following are the callback functions for each subtype of the management frames
*****************************************************************************/
-unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame)
{
unsigned int ielen;
unsigned char *p;
@@ -540,8 +542,8 @@ unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint len = precv_frame->len;
u8 is_valid_p2p_probereq = false;
#ifdef CONFIG_88EU_P2P
@@ -596,7 +598,7 @@ _continue:
if (is_valid_p2p_probereq)
goto _issue_probersp;
- if ((ielen != 0 && !_rtw_memcmp((void *)(p+2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength)) ||
+ if ((ielen != 0 && memcmp((void *)(p+2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength)) ||
(ielen == 0 && pmlmeinfo->hidden_ssid_mode))
return _SUCCESS;
@@ -609,18 +611,18 @@ _issue_probersp:
return _SUCCESS;
}
-unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnProbeRsp(struct adapter *padapter, struct recv_frame *precv_frame)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
#endif
#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
if (pwdinfo->tx_prov_disc_info.benable) {
- if (_rtw_memcmp(pwdinfo->tx_prov_disc_info.peerIFAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ if (!memcmp(pwdinfo->tx_prov_disc_info.peerIFAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
pwdinfo->tx_prov_disc_info.benable = false;
issue_p2p_provision_request(padapter,
@@ -638,7 +640,7 @@ unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
} else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
if (pwdinfo->nego_req_info.benable) {
DBG_88E("[%s] P2P State is GONEGO ING!\n", __func__);
- if (_rtw_memcmp(pwdinfo->nego_req_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ if (!memcmp(pwdinfo->nego_req_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
pwdinfo->nego_req_info.benable = false;
issue_p2p_GO_request(padapter, pwdinfo->nego_req_info.peerDevAddr);
}
@@ -646,7 +648,7 @@ unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
} else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_INVITE_REQ)) {
if (pwdinfo->invitereq_info.benable) {
DBG_88E("[%s] P2P_STATE_TX_INVITE_REQ!\n", __func__);
- if (_rtw_memcmp(pwdinfo->invitereq_info.peer_macaddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ if (!memcmp(pwdinfo->invitereq_info.peer_macaddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
pwdinfo->invitereq_info.benable = false;
issue_p2p_invitation_request(padapter, pwdinfo->invitereq_info.peer_macaddr);
}
@@ -663,7 +665,7 @@ unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
}
-unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame)
{
int cam_idx;
struct sta_info *psta;
@@ -671,8 +673,8 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint len = precv_frame->len;
struct wlan_bssid_ex *pbss;
int ret = _SUCCESS;
@@ -681,7 +683,7 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
}
- if (_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) {
+ if (!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) {
if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
/* we should update current network before auth, or some IE is wrong */
pbss = (struct wlan_bssid_ex *)rtw_malloc(sizeof(struct wlan_bssid_ex));
@@ -753,7 +755,7 @@ _END_ONBEACON_:
return _SUCCESS;
}
-unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAuth(struct adapter *padapter, struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
unsigned int auth_mode, ie_len;
@@ -767,8 +769,8 @@ unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint len = precv_frame->len;
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
return _FAIL;
@@ -878,7 +880,7 @@ unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
goto auth_fail;
}
- if (_rtw_memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
+ if (!memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
pstat->state &= (~WIFI_FW_AUTH_STATE);
pstat->state |= WIFI_FW_AUTH_SUCCESS;
/* challenging txt is correct... */
@@ -926,20 +928,20 @@ auth_fail:
return _FAIL;
}
-unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAuthClient(struct adapter *padapter, struct recv_frame *precv_frame)
{
unsigned int seq, len, status, offset;
unsigned char *p;
unsigned int go2asoc = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint pkt_len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint pkt_len = precv_frame->len;
DBG_88E("%s\n", __func__);
/* check A1 matches or not */
- if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
+ if (memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
return _SUCCESS;
if (!(pmlmeinfo->state & WIFI_FW_AUTH_STATE))
@@ -1001,7 +1003,7 @@ authclnt_fail:
return _FAIL;
}
-unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
u16 capab_info;
@@ -1020,8 +1022,8 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint pkt_len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint pkt_len = precv_frame->len;
#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
u8 p2p_status_code = P2P_STATUS_SUCCESS;
@@ -1097,7 +1099,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
status = _STATS_FAILURE_;
} else {
/* check if ssid match */
- if (!_rtw_memcmp((void *)(p+2), cur->Ssid.Ssid, cur->Ssid.SsidLength))
+ if (memcmp((void *)(p+2), cur->Ssid.Ssid, cur->Ssid.SsidLength))
status = _STATS_FAILURE_;
if (ie_len != cur->Ssid.SsidLength)
@@ -1270,7 +1272,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
for (;;) {
p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
if (p != NULL) {
- if (_rtw_memcmp(p+2, WMM_IE, 6)) {
+ if (!memcmp(p+2, WMM_IE, 6)) {
pstat->flags |= WLAN_STA_WME;
pstat->qos_option = 1;
@@ -1470,7 +1472,7 @@ OnAssocReqFail:
return _FAIL;
}
-unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame)
{
uint i;
int res;
@@ -1480,13 +1482,13 @@ unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
/* struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); */
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint pkt_len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint pkt_len = precv_frame->len;
DBG_88E("%s\n", __func__);
/* check A1 matches or not */
- if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
+ if (memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
return _SUCCESS;
if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)))
@@ -1524,7 +1526,7 @@ unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if (_rtw_memcmp(pIE->data, WMM_PARA_OUI, 6)) /* WMM */
+ if (!memcmp(pIE->data, WMM_PARA_OUI, 6)) /* WMM */
WMM_param_handler(padapter, pIE);
break;
case _HT_CAPABILITY_IE_: /* HT caps */
@@ -1560,19 +1562,20 @@ report_assoc_result:
return _SUCCESS;
}
-unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
{
unsigned short reason;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
#endif /* CONFIG_88EU_P2P */
/* check A3 */
- if (!(_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network),
+ ETH_ALEN))
return _SUCCESS;
#ifdef CONFIG_88EU_P2P
@@ -1623,19 +1626,20 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
}
-unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame)
{
u16 reason;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
#endif /* CONFIG_88EU_P2P */
/* check A3 */
- if (!(_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network),
+ ETH_ALEN))
return _SUCCESS;
#ifdef CONFIG_88EU_P2P
@@ -1685,18 +1689,18 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
}
-unsigned int OnAtim(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAtim(struct adapter *padapter, struct recv_frame *precv_frame)
{
DBG_88E("%s\n", __func__);
return _SUCCESS;
}
-unsigned int on_action_spct(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int on_action_spct(struct adapter *padapter, struct recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
u8 *frame_body = (u8 *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
u8 category;
u8 action;
@@ -1729,17 +1733,17 @@ exit:
return ret;
}
-unsigned int OnAction_qos(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_qos(struct adapter *padapter, struct recv_frame *precv_frame)
{
return _SUCCESS;
}
-unsigned int OnAction_dls(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_dls(struct adapter *padapter, struct recv_frame *precv_frame)
{
return _SUCCESS;
}
-unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_back(struct adapter *padapter, struct recv_frame *precv_frame)
{
u8 *addr;
struct sta_info *psta = NULL;
@@ -1749,10 +1753,11 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
unsigned short tid, status, reason_code = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
struct sta_priv *pstapriv = &padapter->stapriv;
/* check RA matches or not */
- if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+ if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe),
+ ETH_ALEN))/* for if1, sta/ap mode */
return _SUCCESS;
DBG_88E("%s\n", __func__);
@@ -1937,7 +1942,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
/* Commented by Albert 20110306 */
- /* According to the P2P Specification, the group negoitation request frame should contain 9 P2P attributes */
+ /* According to the P2P Specification, the group negotiation request frame should contain 9 P2P attributes */
/* 1. P2P Capability */
/* 2. Group Owner Intent */
/* 3. Configuration Timeout */
@@ -2280,7 +2285,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
/* Commented by Kurt 20120113 */
/* If some device wants to do p2p handshake without sending prov_disc_req */
/* We have to get peer_req_cm from here. */
- if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
+ if (!memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
@@ -2302,7 +2307,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
/* Commented by Albert 20100908 */
- /* According to the P2P Specification, the group negoitation response frame should contain 9 P2P attributes */
+ /* According to the P2P Specification, the group negotiation response frame should contain 9 P2P attributes */
/* 1. Status */
/* 2. P2P Capability */
/* 3. Group Owner Intent */
@@ -2604,7 +2609,7 @@ static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
/* Commented by Albert 20110306 */
- /* According to the P2P Specification, the group negoitation request frame should contain 5 P2P attributes */
+ /* According to the P2P Specification, the group negotiation request frame should contain 5 P2P attributes */
/* 1. Status */
/* 2. P2P Capability */
/* 3. Operating Channel */
@@ -2825,7 +2830,8 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
/* Channel Number */
p2pie[p2pielen++] = pwdinfo->invitereq_info.operating_ch; /* operating channel number */
- if (_rtw_memcmp(myid(&padapter->eeprompriv), pwdinfo->invitereq_info.go_bssid, ETH_ALEN)) {
+ if (!memcmp(myid(&padapter->eeprompriv),
+ pwdinfo->invitereq_info.go_bssid, ETH_ALEN)) {
/* P2P Group BSSID */
/* Type: */
p2pie[p2pielen++] = P2P_ATTR_GROUP_BSSID;
@@ -3260,7 +3266,7 @@ static u8 is_matched_in_profilelist(u8 *peermacaddr, struct profile_info *profil
for (i = 0; i < P2P_MAX_PERSISTENT_GROUP_NUM; i++, profileinfo++) {
DBG_88E("[%s] profileinfo_mac=%.2X %.2X %.2X %.2X %.2X %.2X\n", __func__,
profileinfo->peermac[0], profileinfo->peermac[1], profileinfo->peermac[2], profileinfo->peermac[3], profileinfo->peermac[4], profileinfo->peermac[5]);
- if (_rtw_memcmp(peermacaddr, profileinfo->peermac, ETH_ALEN)) {
+ if (!memcmp(peermacaddr, profileinfo->peermac, ETH_ALEN)) {
match_result = 1;
DBG_88E("[%s] Match!\n", __func__);
break;
@@ -3853,13 +3859,13 @@ exit:
#endif /* CONFIG_88EU_P2P */
-static s32 rtw_action_public_decache(union recv_frame *recv_frame, s32 token)
+static s32 rtw_action_public_decache(struct recv_frame *recv_frame, s32 token)
{
- struct adapter *adapter = recv_frame->u.hdr.adapter;
+ struct adapter *adapter = recv_frame->adapter;
struct mlme_ext_priv *mlmeext = &(adapter->mlmeextpriv);
- u8 *frame = recv_frame->u.hdr.rx_data;
- u16 seq_ctrl = ((recv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
- (recv_frame->u.hdr.attrib.frag_num & 0xf);
+ u8 *frame = recv_frame->rx_data;
+ u16 seq_ctrl = ((recv_frame->attrib.seq_num&0xffff) << 4) |
+ (recv_frame->attrib.frag_num & 0xf);
if (GetRetry(frame)) {
if (token >= 0) {
@@ -3885,14 +3891,14 @@ static s32 rtw_action_public_decache(union recv_frame *recv_frame, s32 token)
return _SUCCESS;
}
-static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
+static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
{
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
u8 *frame_body;
u8 dialogToken = 0;
#ifdef CONFIG_88EU_P2P
- struct adapter *padapter = precv_frame->u.hdr.adapter;
- uint len = precv_frame->u.hdr.len;
+ struct adapter *padapter = precv_frame->adapter;
+ uint len = precv_frame->len;
u8 *p2p_ie;
u32 p2p_ielen;
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
@@ -3939,7 +3945,8 @@ static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
/* Commented by Kurt 20120113 */
/* Get peer_dev_addr here if peer doesn't issue prov_disc frame. */
- if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.peerDevAddr, empty_addr, ETH_ALEN))
+ if (!memcmp(pwdinfo->rx_prov_disc_info.peerDevAddr, empty_addr,
+ ETH_ALEN))
memcpy(pwdinfo->rx_prov_disc_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN);
result = process_p2p_group_negotation_req(pwdinfo, frame_body, len);
@@ -4021,7 +4028,7 @@ static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
_rtw_memset(&group_id, 0x00, sizeof(struct group_id_info));
rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8 *)&group_id, &attr_contentlen);
if (attr_contentlen) {
- if (_rtw_memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ if (!memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
/* The p2p device sending this p2p invitation request wants this Wi-Fi device to be the persistent GO. */
rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_GO);
rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
@@ -4069,7 +4076,7 @@ static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
_rtw_memset(&group_id, 0x00, sizeof(struct group_id_info));
rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8 *)&group_id, &attr_contentlen);
if (attr_contentlen) {
- if (_rtw_memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ if (!memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
/* In this case, the GO can't be myself. */
rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
@@ -4116,7 +4123,7 @@ static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
pwdinfo->invitereq_info.benable = false;
if (attr_content == P2P_STATUS_SUCCESS) {
- if (_rtw_memcmp(pwdinfo->invitereq_info.go_bssid, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ if (!memcmp(pwdinfo->invitereq_info.go_bssid, myid(&padapter->eeprompriv), ETH_ALEN)) {
rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
} else {
rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
@@ -4175,23 +4182,22 @@ static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
return _SUCCESS;
}
-static unsigned int on_action_public_vendor(union recv_frame *precv_frame)
+static unsigned int on_action_public_vendor(struct recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
- if (_rtw_memcmp(frame_body + 2, P2P_OUI, 4) == true) {
+ if (!memcmp(frame_body + 2, P2P_OUI, 4))
ret = on_action_public_p2p(precv_frame);
- }
return ret;
}
-static unsigned int on_action_public_default(union recv_frame *precv_frame, u8 action)
+static unsigned int on_action_public_default(struct recv_frame *precv_frame, u8 action)
{
unsigned int ret = _FAIL;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
u8 token;
@@ -4206,15 +4212,15 @@ exit:
return ret;
}
-unsigned int on_action_public(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int on_action_public(struct adapter *padapter, struct recv_frame *precv_frame)
{
unsigned int ret = _FAIL;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
u8 category, action;
/* check RA matches or not */
- if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))
+ if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))
goto exit;
category = frame_body[0];
@@ -4235,30 +4241,30 @@ exit:
return ret;
}
-unsigned int OnAction_ht(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_ht(struct adapter *padapter, struct recv_frame *precv_frame)
{
return _SUCCESS;
}
-unsigned int OnAction_wmm(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_wmm(struct adapter *padapter, struct recv_frame *precv_frame)
{
return _SUCCESS;
}
-unsigned int OnAction_p2p(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_P2P
u8 *frame_body;
u8 category, OUI_Subtype;
- u8 *pframe = precv_frame->u.hdr.rx_data;
- uint len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ uint len = precv_frame->len;
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
DBG_88E("%s\n", __func__);
/* check RA matches or not */
- if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+ if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
return _SUCCESS;
frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
@@ -4290,13 +4296,13 @@ unsigned int OnAction_p2p(struct adapter *padapter, union recv_frame *precv_fram
return _SUCCESS;
}
-unsigned int OnAction(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int OnAction(struct adapter *padapter, struct recv_frame *precv_frame)
{
int i;
unsigned char category;
struct action_handler *ptable;
unsigned char *frame_body;
- u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *pframe = precv_frame->rx_data;
frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
@@ -4310,7 +4316,7 @@ unsigned int OnAction(struct adapter *padapter, union recv_frame *precv_frame)
return _SUCCESS;
}
-unsigned int DoReserved(struct adapter *padapter, union recv_frame *precv_frame)
+unsigned int DoReserved(struct adapter *padapter, struct recv_frame *precv_frame)
{
return _SUCCESS;
}
@@ -4341,7 +4347,7 @@ struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
/****************************************************************************
-Following are some TX fuctions for WiFi MLME
+Following are some TX functions for WiFi MLME
*****************************************************************************/
@@ -4432,7 +4438,7 @@ s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmg
}
pxmitpriv->ack_tx = false;
- _exit_critical_mutex(&pxmitpriv->ack_tx_mutex, NULL);
+ mutex_unlock(&pxmitpriv->ack_tx_mutex);
return ret;
}
@@ -4995,7 +5001,7 @@ exit:
return ret;
}
-/* if psta == NULL, indiate we are station(client) now... */
+/* if psta == NULL, indicate we are station(client) now... */
void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status)
{
struct xmit_frame *pmgntframe;
@@ -5234,7 +5240,7 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
for (pbuf = ie + _BEACON_IE_OFFSET_;; pbuf += (ie_len + 2)) {
pbuf = rtw_get_ie(pbuf, _VENDOR_SPECIFIC_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if (pbuf && _rtw_memcmp(pbuf+2, WMM_PARA_IE, 6)) {
+ if (pbuf && !memcmp(pbuf+2, WMM_PARA_IE, 6)) {
memcpy(pframe, pbuf, ie_len+2);
pframe += (ie_len+2);
pattrib->pktlen += (ie_len+2);
@@ -5439,14 +5445,14 @@ void issue_assocreq(struct adapter *padapter)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if ((_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4)) ||
- (_rtw_memcmp(pIE->data, WMM_OUI, 4)) ||
- (_rtw_memcmp(pIE->data, WPS_OUI, 4))) {
+ if ((!memcmp(pIE->data, RTW_WPA_OUI, 4)) ||
+ (!memcmp(pIE->data, WMM_OUI, 4)) ||
+ (!memcmp(pIE->data, WPS_OUI, 4))) {
if (!padapter->registrypriv.wifi_spec) {
/* Commented by Kurt 20110629 */
/* In some older APs, WPS handshake */
/* would be fail if we append vender extensions informations to AP */
- if (_rtw_memcmp(pIE->data, WPS_OUI, 4))
+ if (!memcmp(pIE->data, WPS_OUI, 4))
pIE->Length = 14;
}
pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, pIE->Length, pIE->data, &(pattrib->pktlen));
@@ -5606,7 +5612,7 @@ exit:
return;
}
-/* when wait_ack is ture, this function shoule be called at process context */
+/* when wait_ack is true, this function should be called at process context */
static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int wait_ack)
{
int ret = _FAIL;
@@ -5676,7 +5682,7 @@ exit:
}
-/* when wait_ms > 0 , this function shoule be called at process context */
+/* when wait_ms > 0 , this function should be called at process context */
/* da == NULL for station mode */
int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
{
@@ -5686,7 +5692,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- /* da == NULL, assum it's null data for sta to ap*/
+ /* da == NULL, assume it's null data for sta to ap*/
if (da == NULL)
da = get_my_bssid(&(pmlmeinfo->network));
@@ -5721,7 +5727,7 @@ exit:
return ret;
}
-/* when wait_ack is ture, this function shoule be called at process context */
+/* when wait_ack is true, this function should be called at process context */
static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int wait_ack)
{
int ret = _FAIL;
@@ -5799,7 +5805,7 @@ exit:
return ret;
}
-/* when wait_ms > 0 , this function shoule be called at process context */
+/* when wait_ms > 0 , this function should be called at process context */
/* da == NULL for station mode */
int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
{
@@ -5809,7 +5815,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- /* da == NULL, assum it's null data for sta to ap*/
+ /* da == NULL, assume it's null data for sta to ap*/
if (da == NULL)
da = get_my_bssid(&(pmlmeinfo->network));
@@ -6103,17 +6109,26 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
case 1: /* ADDBA rsp */
pframe = rtw_set_fixed_ie(pframe, 1, &(pmlmeinfo->ADDBA_req.dialog_token), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&status), &(pattrib->pktlen));
+
+ BA_para_set = le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f;
rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
- if (MAX_AMPDU_FACTOR_64K == max_rx_ampdu_factor)
- BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
- else if (MAX_AMPDU_FACTOR_32K == max_rx_ampdu_factor)
- BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0800); /* 32 buffer size */
- else if (MAX_AMPDU_FACTOR_16K == max_rx_ampdu_factor)
- BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0400); /* 16 buffer size */
- else if (MAX_AMPDU_FACTOR_8K == max_rx_ampdu_factor)
- BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0200); /* 8 buffer size */
- else
- BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
+ switch (max_rx_ampdu_factor) {
+ case MAX_AMPDU_FACTOR_64K:
+ BA_para_set |= 0x1000; /* 64 buffer size */
+ break;
+ case MAX_AMPDU_FACTOR_32K:
+ BA_para_set |= 0x0800; /* 32 buffer size */
+ break;
+ case MAX_AMPDU_FACTOR_16K:
+ BA_para_set |= 0x0400; /* 16 buffer size */
+ break;
+ case MAX_AMPDU_FACTOR_8K:
+ BA_para_set |= 0x0200; /* 8 buffer size */
+ break;
+ default:
+ BA_para_set |= 0x1000; /* 64 buffer size */
+ break;
+ }
if (pregpriv->ampdu_amsdu == 0)/* disabled */
BA_para_set = BA_para_set & ~BIT(0);
@@ -6222,7 +6237,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
int len;
@@ -6232,9 +6247,9 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
if (rtw_end_of_queue_search(phead, plist))
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
- plist = get_next(plist);
+ plist = plist->next;
pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
@@ -6355,7 +6370,7 @@ unsigned int send_beacon(struct adapter *padapter)
/****************************************************************************
-Following are some utitity fuctions for WiFi MLME
+Following are some utility functions for WiFi MLME
*****************************************************************************/
@@ -6468,7 +6483,7 @@ void site_survey(struct adapter *padapter)
{
/* 20100721:Interrupt scan operation here. */
/* For SW antenna diversity before link, it needs to switch to another antenna and scan again. */
- /* It compares the scan result and select beter one to do connection. */
+ /* It compares the scan result and select better one to do connection. */
if (rtw_hal_antdiv_before_linked(padapter)) {
pmlmeext->sitesurvey_res.bss_cnt = 0;
pmlmeext->sitesurvey_res.channel_idx = -1;
@@ -6526,14 +6541,14 @@ void site_survey(struct adapter *padapter)
}
/* collect bss info from Beacon and Probe request/response frames. */
-u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, struct wlan_bssid_ex *bssid)
+u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, struct wlan_bssid_ex *bssid)
{
int i;
u32 len;
u8 *p;
u16 val16, subtype;
- u8 *pframe = precv_frame->u.hdr.rx_data;
- u32 packet_len = precv_frame->u.hdr.len;
+ u8 *pframe = precv_frame->rx_data;
+ u32 packet_len = precv_frame->len;
u8 ie_offset;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -6572,10 +6587,10 @@ u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, str
bssid->IELength = len;
memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
- /* get the signal strength */
- bssid->Rssi = precv_frame->u.hdr.attrib.phy_info.recvpower; /* in dBM.raw data */
- bssid->PhyInfo.SignalQuality = precv_frame->u.hdr.attrib.phy_info.SignalQuality;/* in percentage */
- bssid->PhyInfo.SignalStrength = precv_frame->u.hdr.attrib.phy_info.SignalStrength;/* in percentage */
+ /* get the signal strength in dBM.raw data */
+ bssid->Rssi = precv_frame->attrib.phy_info.recvpower;
+ bssid->PhyInfo.SignalQuality = precv_frame->attrib.phy_info.SignalQuality;/* in percentage */
+ bssid->PhyInfo.SignalStrength = precv_frame->attrib.phy_info.SignalStrength;/* in percentage */
rtw_hal_get_def_var(padapter, HAL_DEF_CURRENT_ANTENNA, &bssid->PhyInfo.Optimum_antenna);
/* checking SSID */
@@ -6707,7 +6722,7 @@ void start_create_ibss(struct adapter *padapter)
/* update wireless mode */
update_wireless_mode(padapter);
- /* udpate capability */
+ /* update capability */
caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
update_capinfo(padapter, caps);
if (caps&cap_IBSS) {/* adhoc master */
@@ -6759,7 +6774,7 @@ void start_clnt_join(struct adapter *padapter)
/* update wireless mode */
update_wireless_mode(padapter);
- /* udpate capability */
+ /* update capability */
caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
update_capinfo(padapter, caps);
if (caps&cap_ESS) {
@@ -6851,7 +6866,7 @@ unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
/* check A3 */
- if (!(_rtw_memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ if (memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
return _SUCCESS;
DBG_88E("%s\n", __func__);
@@ -7028,7 +7043,8 @@ Following are the functions to report events
*****************************************************************************/
-void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame)
+void report_survey_event(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
@@ -7037,8 +7053,6 @@ void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame
struct C2HEvent_Header *pc2h_evt_hdr;
struct mlme_ext_priv *pmlmeext;
struct cmd_priv *pcmdpriv;
- /* u8 *pframe = precv_frame->u.hdr.rx_data; */
- /* uint len = precv_frame->u.hdr.len; */
if (!padapter)
return;
@@ -7373,7 +7387,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* turn on dynamic functions */
Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
- /* update IOT-releated issue */
+ /* update IOT-related issue */
update_IOT_info(padapter);
rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
@@ -7381,7 +7395,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* BCN interval */
rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
- /* udpate capability */
+ /* update capability */
update_capinfo(padapter, pmlmeinfo->capability);
/* WMM, Update EDCA param */
@@ -7902,7 +7916,7 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:/* Get WMM IE. */
- if (_rtw_memcmp(pIE->data, WMM_OUI, 4))
+ if (!memcmp(pIE->data, WMM_OUI, 4))
pmlmeinfo->WMM_enable = 1;
break;
case _HT_CAPABILITY_IE_: /* Get HT Cap IE. */
@@ -8016,7 +8030,7 @@ static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_c
set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, in[i].hw_value);
if (in[i].hw_value && !(in[i].flags & RTW_IEEE80211_CHAN_DISABLED) &&
set_idx >= 0) {
- memcpy(&out[j], &in[i], sizeof(struct rtw_ieee80211_channel));
+ out[j] = in[i];
if (pmlmeext->channel_set[set_idx].ScanType == SCAN_PASSIVE)
out[j].flags &= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
@@ -8261,7 +8275,6 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
int len_diff = 0;
-_func_enter_;
ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
@@ -8290,7 +8303,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -8368,12 +8380,12 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
spin_lock_bh(&psta_bmc->sleep_q.lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
rtw_list_delete(&pxmitframe->list);
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
index 6451efdfb132..705f666bca6b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -23,6 +23,7 @@
#include "odm_precomp.h"
#include "rtl8188e_hal.h"
+#include <linux/vmalloc.h>
u32 read_macreg(struct adapter *padapter, u32 addr, u32 sz)
{
@@ -406,7 +407,7 @@ s32 mp_start_test(struct adapter *padapter)
goto end_of_mp_start_test;
}
- /* 3 3. join psudo AdHoc */
+ /* 3 3. join pseudo AdHoc */
tgt_network->join_res = 1;
tgt_network->aid = 1;
psta->aid = 1;
@@ -442,7 +443,7 @@ void mp_stop_test(struct adapter *padapter)
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == false)
goto end_of_mp_stop_test;
- /* 3 1. disconnect psudo AdHoc */
+ /* 3 1. disconnect pseudo AdHoc */
rtw_indicate_disconnect(padapter);
/* 3 2. clear psta used in mp test mode. */
@@ -882,7 +883,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
{
u32 i, psd_pts = 0, psd_start = 0, psd_stop = 0;
u32 psd_data = 0;
-
+ int ret;
if (!netif_running(pAdapter->pnetdev)) {
RT_TRACE(_module_mp_, _drv_warning_, ("mp_query_psd: Fail! interface not opened!\n"));
@@ -899,7 +900,10 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
psd_start = 64;
psd_stop = 128;
} else {
- sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
+ ret = sscanf(data, "pts =%d, start =%d, stop =%d",
+ &psd_pts, &psd_start, &psd_stop);
+ if (ret != 3)
+ return 0;
}
_rtw_memset(data, '\0', sizeof(*data));
@@ -943,7 +947,7 @@ void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv)
}
if (pxmitpriv->pallocated_xmit_extbuf)
- rtw_vmfree(pxmitpriv->pallocated_xmit_extbuf, num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+ vfree(pxmitpriv->pallocated_xmit_extbuf);
if (padapter->registrypriv.mp_mode == 0) {
max_xmit_extbuf_size = 20000;
@@ -956,7 +960,7 @@ void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv)
/* Init xmit extension buff */
_rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
- pxmitpriv->pallocated_xmit_extbuf = rtw_zvmalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+ pxmitpriv->pallocated_xmit_extbuf = vzalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n"));
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
index edcd8a5042be..e783968b29ea 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
@@ -33,7 +33,6 @@ int rtl8188eu_oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->information_buf_len < sizeof(u8))
return NDIS_STATUS_INVALID_LENGTH;
@@ -48,7 +47,6 @@ _func_enter_;
status = NDIS_STATUS_NOT_ACCEPTED;
}
-_func_exit_;
return status;
}
@@ -61,7 +59,6 @@ int rtl8188eu_oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_write_bb_reg_hdl\n"));
@@ -87,7 +84,6 @@ _func_enter_;
write_bbreg(Adapter, offset, 0xFFFFFFFF, value);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -100,7 +96,6 @@ int rtl8188eu_oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_read_bb_reg_hdl\n"));
@@ -126,7 +121,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_notice_,
("-rtl8188eu_oid_rt_pro_read_bb_reg_hdl: offset=0x%03X value:0x%08X\n",
offset, value));
-_func_exit_;
return status;
}
@@ -140,7 +134,6 @@ int rtl8188eu_oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_write_rf_reg_hdl\n"));
@@ -171,7 +164,6 @@ _func_enter_;
write_rfreg(Adapter, path, offset, value);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -185,7 +177,6 @@ int rtl8188eu_oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
int status = NDIS_STATUS_SUCCESS;
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_read_rf_reg_hdl\n"));
@@ -217,7 +208,6 @@ _func_enter_;
("-rtl8188eu_oid_rt_pro_read_rf_reg_hdl: path=%d offset=0x%02X value=0x%05X\n",
path, offset, value));
-_func_exit_;
return status;
}
@@ -232,7 +222,6 @@ int rtl8188eu_oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_,
("+rtl8188eu_oid_rt_pro_set_data_rate_hdl\n"));
@@ -255,7 +244,6 @@ _func_enter_;
SetDataRate(Adapter);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -266,7 +254,6 @@ int rtl8188eu_oid_rt_pro_start_test_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_start_test_hdl\n"));
@@ -293,7 +280,6 @@ exit:
RT_TRACE(_module_mp_, _drv_notice_, ("-rtl8188eu_oid_rt_pro_start_test_hdl: mp_mode=%d\n", Adapter->mppriv.mode));
-_func_exit_;
return status;
}
@@ -303,7 +289,6 @@ int rtl8188eu_oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+Set OID_RT_PRO_STOP_TEST\n"));
@@ -316,7 +301,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("-Set OID_RT_PRO_STOP_TEST\n"));
-_func_exit_;
return status;
}
@@ -327,7 +311,6 @@ int rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_p
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl\n"));
@@ -352,7 +335,6 @@ _func_enter_;
SetChannel(Adapter);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -364,7 +346,6 @@ int rtl8188eu_oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *padapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_,
("+rtl8188eu_oid_rt_set_bandwidth_hdl\n"));
@@ -391,7 +372,6 @@ _func_enter_;
("-rtl8188eu_oid_rt_set_bandwidth_hdl: bandwidth=%d channel_offset=%d\n",
bandwidth, channel_offset));
-_func_exit_;
return status;
}
@@ -402,7 +382,6 @@ int rtl8188eu_oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_antenna_bb_hdl\n"));
@@ -426,7 +405,6 @@ _func_enter_;
*(u32 *)poid_par_priv->information_buf = antenna;
}
-_func_exit_;
return status;
}
@@ -437,7 +415,6 @@ int rtl8188eu_oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_pro_set_tx_power_control_hdl\n"));
@@ -461,7 +438,6 @@ _func_enter_;
SetTxPower(Adapter);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -474,7 +450,6 @@ int rtl8188eu_oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -488,7 +463,6 @@ _func_enter_;
status = NDIS_STATUS_INVALID_LENGTH;
}
-_func_exit_;
return status;
}
@@ -498,7 +472,6 @@ int rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -513,7 +486,6 @@ _func_enter_;
status = NDIS_STATUS_INVALID_LENGTH;
}
-_func_exit_;
return status;
}
@@ -523,7 +495,6 @@ int rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *po
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -538,7 +509,6 @@ _func_enter_;
status = NDIS_STATUS_INVALID_LENGTH;
}
-_func_exit_;
return status;
}
@@ -549,7 +519,6 @@ int rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -559,7 +528,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_alert_, ("===> rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl.\n"));
Adapter->mppriv.tx_pktcount = 0;
-_func_exit_;
return status;
}
@@ -569,7 +537,6 @@ int rtl8188eu_oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *poid_
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -583,7 +550,6 @@ _func_enter_;
status = NDIS_STATUS_INVALID_LENGTH;
}
-_func_exit_;
return status;
}
@@ -593,7 +559,6 @@ int rtl8188eu_oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -604,7 +569,6 @@ _func_enter_;
ResetPhyRxPktCount(Adapter);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -614,7 +578,6 @@ int rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_pa
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl\n"));
@@ -632,7 +595,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("-rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl: recv_ok=%d\n", *(u32 *)poid_par_priv->information_buf));
-_func_exit_;
return status;
}
@@ -642,7 +604,6 @@ int rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *poid
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl\n"));
@@ -663,7 +624,6 @@ _func_enter_;
("-rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl: recv_err =%d\n",
*(u32 *)poid_par_priv->information_buf));
-_func_exit_;
return status;
}
@@ -674,7 +634,6 @@ int rtl8188eu_oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_pri
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_continuous_tx_hdl\n"));
@@ -698,7 +657,6 @@ _func_enter_;
}
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -709,7 +667,6 @@ int rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_alert_, ("+rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl\n"));
@@ -733,7 +690,6 @@ _func_enter_;
}
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -744,7 +700,6 @@ int rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *poi
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl\n"));
@@ -768,7 +723,6 @@ _func_enter_;
}
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -779,7 +733,6 @@ int rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_pr
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_alert_, ("+rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl\n"));
@@ -792,7 +745,6 @@ _func_enter_;
SetSingleToneTx(Adapter, (u8)bStartTest);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -806,7 +758,6 @@ int rtl8188eu_oid_rt_pro_trigger_gpio_hdl(struct oid_par_priv *poid_par_priv)
{
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
int status = NDIS_STATUS_SUCCESS;
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID)
return NDIS_STATUS_NOT_ACCEPTED;
@@ -815,7 +766,6 @@ _func_enter_;
rtw_hal_set_hwreg(Adapter, HW_VAR_TRIGGER_GPIO_0, NULL);
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -833,7 +783,6 @@ int rtl8188eu_oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_,
("+rtl8188eu_oid_rt_pro_read_register_hdl\n"));
@@ -870,7 +819,6 @@ _func_enter_;
*poid_par_priv->bytes_rw = width;
-_func_exit_;
return status;
}
@@ -882,7 +830,6 @@ int rtl8188eu_oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *padapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_,
("+rtl8188eu_oid_rt_pro_write_register_hdl\n"));
@@ -929,7 +876,6 @@ _func_enter_;
("-rtl8188eu_oid_rt_pro_write_register_hdl: offset=0x%08X width=%d value=0x%X\n",
offset, width, value));
-_func_exit_;
return status;
}
@@ -1002,7 +948,6 @@ int rtl8188eu_oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv *poid_par_priv
int status = NDIS_STATUS_SUCCESS;
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+OID_RT_PRO_SET_DATA_RATE_EX\n"));
@@ -1016,7 +961,6 @@ _func_enter_;
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -1027,7 +971,6 @@ int rtl8188eu_oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv)
u8 thermal = 0;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_get_thermal_meter_hdl\n"));
@@ -1044,7 +987,6 @@ _func_enter_;
*(u32 *)poid_par_priv->information_buf = (u32)thermal;
*poid_par_priv->bytes_rw = sizeof(u32);
-_func_exit_;
return status;
}
@@ -1060,7 +1002,6 @@ int rtl8188eu_oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv *poid_par_pr
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->information_buf_len < sizeof(u8))
return NDIS_STATUS_INVALID_LENGTH;
@@ -1079,7 +1020,6 @@ _func_enter_;
}
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -1143,7 +1083,6 @@ int rtl8188eu_oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID)
return NDIS_STATUS_NOT_ACCEPTED;
@@ -1176,7 +1115,6 @@ _func_enter_;
}
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -1190,7 +1128,6 @@ int rtl8188eu_oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv)
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID)
return NDIS_STATUS_NOT_ACCEPTED;
@@ -1216,7 +1153,6 @@ _func_enter_;
status = NDIS_STATUS_FAILURE;
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
@@ -1227,7 +1163,6 @@ int rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
*poid_par_priv->bytes_rw = 0;
@@ -1267,7 +1202,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_info_,
("-rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl: status=0x%08X\n", status));
-_func_exit_;
return status;
}
@@ -1279,7 +1213,6 @@ int rtl8188eu_oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_pr
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID)
return NDIS_STATUS_NOT_ACCEPTED;
@@ -1296,7 +1229,6 @@ _func_enter_;
} else {
status = NDIS_STATUS_FAILURE;
}
-_func_exit_;
return status;
}
@@ -1306,7 +1238,6 @@ int rtl8188eu_oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv)
int status = NDIS_STATUS_SUCCESS;
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-_func_enter_;
if (poid_par_priv->type_of_oid != QUERY_OID)
return NDIS_STATUS_NOT_ACCEPTED;
@@ -1321,7 +1252,6 @@ _func_enter_;
("-rtl8188eu_oid_rt_get_efuse_max_size_hdl: size=%d status=0x%08X\n",
*(int *)poid_par_priv->information_buf, status));
-_func_exit_;
return status;
}
@@ -1330,7 +1260,6 @@ int rtl8188eu_oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv)
{
int status;
-_func_enter_;
RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_pro_efuse_hdl\n"));
@@ -1341,7 +1270,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_info_, ("-rtl8188eu_oid_rt_pro_efuse_hdl: status=0x%08X\n", status));
-_func_exit_;
return status;
}
@@ -1353,7 +1281,6 @@ int rtl8188eu_oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv)
struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
u16 maplen = 0;
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_efuse_map_hdl\n"));
@@ -1398,7 +1325,6 @@ _func_enter_;
RT_TRACE(_module_mp_, _drv_info_,
("-rtl8188eu_oid_rt_pro_efuse_map_hdl: status=0x%08X\n", status));
-_func_exit_;
return status;
}
@@ -1414,7 +1340,6 @@ int rtl8188eu_oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
u8 rx_pkt_type;
int status = NDIS_STATUS_SUCCESS;
-_func_enter_;
RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_set_rx_packet_type_hdl\n"));
@@ -1427,7 +1352,6 @@ _func_enter_;
rx_pkt_type = *((u8 *)poid_par_priv->information_buf);/* 4 */
RT_TRACE(_module_mp_, _drv_info_, ("rx_pkt_type: %x\n", rx_pkt_type));
-_func_exit_;
return status;
}
@@ -1482,7 +1406,6 @@ int rtl8188eu_oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv)
{
int status = NDIS_STATUS_SUCCESS;
-_func_enter_;
if (poid_par_priv->type_of_oid != SET_OID) {
status = NDIS_STATUS_NOT_ACCEPTED;
@@ -1497,7 +1420,6 @@ _func_enter_;
/* CALL the power_down function */
_irqlevel_changed_(&oldirql, RAISE);
-_func_exit_;
return status;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_p2p.c b/drivers/staging/rtl8188eu/core/rtw_p2p.c
index 6e8c06e840b3..9425c4991ccd 100644
--- a/drivers/staging/rtl8188eu/core/rtw_p2p.c
+++ b/drivers/staging/rtl8188eu/core/rtw_p2p.c
@@ -57,13 +57,13 @@ static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* look up sta asoc_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ plist = plist->next;
if (psta->is_p2p_device) {
@@ -824,7 +824,7 @@ u32 process_probe_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint l
if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
p2pie = rtw_get_p2p_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_ , len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_ , NULL, &p2pielen);
if (p2pie) {
- if ((p != NULL) && _rtw_memcmp((void *)(p+2), (void *)pwdinfo->p2p_wildcard_ssid , 7)) {
+ if ((p != NULL) && !memcmp((void *)(p+2), (void *)pwdinfo->p2p_wildcard_ssid , 7)) {
/* todo: */
/* Check Requested Device Type attributes in WSC IE. */
/* Check Device ID attribute in P2P IE */
@@ -972,24 +972,24 @@ u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint le
u32 attr_contentlen = 0;
if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen)) {
- if (_rtw_memcmp(pwdinfo->device_addr, groupid, ETH_ALEN) &&
- _rtw_memcmp(pwdinfo->p2p_group_ssid, groupid+ETH_ALEN, pwdinfo->p2p_group_ssid_len)) {
+ if (!memcmp(pwdinfo->device_addr, groupid, ETH_ALEN) &&
+ !memcmp(pwdinfo->p2p_group_ssid, groupid+ETH_ALEN, pwdinfo->p2p_group_ssid_len)) {
attr_contentlen = 0;
if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_ID, dev_addr, &attr_contentlen)) {
struct list_head *phead, *plist;
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* look up sta asoc_queue */
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ plist = plist->next;
if (psta->is_p2p_device && (psta->dev_cap&P2P_DEVCAP_CLIENT_DISCOVERABILITY) &&
- _rtw_memcmp(psta->dev_addr, dev_addr, ETH_ALEN)) {
+ !memcmp(psta->dev_addr, dev_addr, ETH_ALEN)) {
/* issue GO Discoverability Request */
issue_group_disc_req(pwdinfo, psta->hwaddr);
status = P2P_STATUS_SUCCESS;
@@ -1118,7 +1118,7 @@ u8 process_p2p_group_negotation_req(struct wifidirect_info *pwdinfo, u8 *pframe,
/* Commented by Kurt 20120113 */
/* If some device wants to do p2p handshake without sending prov_disc_req */
/* We have to get peer_req_cm from here. */
- if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
+ if (!memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
rtw_get_wps_attr_content(wpsie, wps_ielen, WPS_ATTR_DEVICE_PWID, (u8 *)&be_tmp, &wps_devicepassword_id_len);
wps_devicepassword_id = be16_to_cpu(be_tmp);
@@ -1498,7 +1498,6 @@ static void find_phase_handler(struct adapter *padapter)
struct ndis_802_11_ssid ssid;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
_rtw_memset((unsigned char *)&ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN);
@@ -1509,7 +1508,6 @@ _func_enter_;
spin_lock_bh(&pmlmepriv->lock);
rtw_sitesurvey_cmd(padapter, &ssid, 1, NULL, 0);
spin_unlock_bh(&pmlmepriv->lock);
-_func_exit_;
}
void p2p_concurrent_handler(struct adapter *padapter);
@@ -1518,7 +1516,6 @@ static void restore_p2p_state_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-_func_enter_;
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL))
rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
@@ -1528,54 +1525,46 @@ _func_enter_;
/* because this P2P client should stay at the operating channel of P2P GO. */
set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
}
-_func_exit_;
}
static void pre_tx_invitereq_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 val8 = 1;
-_func_enter_;
set_channel_bwmode(padapter, pwdinfo->invitereq_info.peer_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
-_func_exit_;
}
static void pre_tx_provdisc_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 val8 = 1;
-_func_enter_;
set_channel_bwmode(padapter, pwdinfo->tx_prov_disc_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
-_func_exit_;
}
static void pre_tx_negoreq_handler(struct adapter *padapter)
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 val8 = 1;
-_func_enter_;
set_channel_bwmode(padapter, pwdinfo->nego_req_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
-_func_exit_;
}
void p2p_protocol_wk_hdl(struct adapter *padapter, int intCmdType)
{
-_func_enter_;
switch (intCmdType) {
case P2P_FIND_PHASE_WK:
find_phase_handler(padapter);
@@ -1594,7 +1583,6 @@ _func_enter_;
break;
}
-_func_exit_;
}
void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength)
@@ -1610,7 +1598,6 @@ void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength)
u8 find_p2p = false, find_p2p_ps = false;
u8 noa_offset, noa_num, noa_index;
-_func_enter_;
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return;
@@ -1683,7 +1670,6 @@ _func_enter_;
p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
}
-_func_exit_;
}
void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
@@ -1691,7 +1677,6 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
-_func_enter_;
/* Pre action for p2p state */
switch (p2p_ps_state) {
@@ -1738,7 +1723,6 @@ _func_enter_;
break;
}
-_func_exit_;
}
u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue)
@@ -1749,7 +1733,6 @@ u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-_func_enter_;
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return res;
@@ -1781,7 +1764,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -2024,11 +2006,11 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
/* Disable P2P function */
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
- _cancel_timer_ex(&pwdinfo->find_phase_timer);
- _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
- _cancel_timer_ex(&pwdinfo->pre_tx_scan_timer);
- _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
- _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey2);
+ del_timer_sync(&pwdinfo->find_phase_timer);
+ del_timer_sync(&pwdinfo->restore_p2p_state_timer);
+ del_timer_sync(&pwdinfo->pre_tx_scan_timer);
+ del_timer_sync(&pwdinfo->reset_ch_sitesurvey);
+ del_timer_sync(&pwdinfo->reset_ch_sitesurvey2);
reset_ch_sitesurvey_timer_process(padapter);
reset_ch_sitesurvey_timer_process2(padapter);
rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index b5db22cc81ed..f6583734aefa 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -226,11 +226,8 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
u8 rpwm;
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-_func_enter_;
-
pslv = PS_STATE(pslv);
-
if (pwrpriv->btcoex_rfon) {
if (pslv < PS_STATE_S4)
pslv = PS_STATE_S3;
@@ -274,8 +271,6 @@ _func_enter_;
pwrpriv->tog += 0x80;
pwrpriv->cpwm = pslv;
-
-_func_exit_;
}
static u8 PS_RDY_CHECK(struct adapter *padapter)
@@ -313,8 +308,6 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
#endif /* CONFIG_88EU_P2P */
-_func_enter_;
-
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
("%s: PowerMode=%d Smart_PS=%d\n",
__func__, ps_mode, smart_ps));
@@ -362,8 +355,6 @@ _func_enter_;
rtw_set_rpwm(padapter, PS_STATE_S2);
}
}
-
-_func_exit_;
}
/*
@@ -410,8 +401,6 @@ void LPS_Enter(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-_func_enter_;
-
if (PS_RDY_CHECK(padapter) == false)
return;
@@ -428,8 +417,6 @@ _func_enter_;
pwrpriv->LpsIdleCount++;
}
}
-
-_func_exit_;
}
#define LPS_LEAVE_TIMEOUT_MS 100
@@ -440,8 +427,6 @@ void LPS_Leave(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-_func_enter_;
-
if (pwrpriv->bLeisurePs) {
if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
rtw_set_ps_mode(padapter, PS_MODE_ACTIVE, 0, 0);
@@ -452,8 +437,6 @@ _func_enter_;
}
pwrpriv->bpower_saving = false;
-
-_func_exit_;
}
/* */
@@ -465,23 +448,17 @@ void LeaveAllPowerSaveMode(struct adapter *Adapter)
struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
u8 enqueue = 0;
-_func_enter_;
-
if (check_fwstate(pmlmepriv, _FW_LINKED)) { /* connect */
p2p_ps_wk_cmd(Adapter, P2P_PS_DISABLE, enqueue);
rtw_lps_ctrl_wk_cmd(Adapter, LPS_CTRL_LEAVE, enqueue);
}
-
-_func_exit_;
}
void rtw_init_pwrctrl_priv(struct adapter *padapter)
{
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
-_func_enter_;
-
_init_pwrlock(&pwrctrlpriv->lock);
pwrctrlpriv->rf_pwrstate = rf_on;
pwrctrlpriv->ips_enter_cnts = 0;
@@ -518,8 +495,6 @@ _func_enter_;
pwrctrlpriv->btcoex_rfon = false;
_init_timer(&(pwrctrlpriv->pwr_state_check_timer), padapter->pnetdev, pwr_state_check_handler, (u8 *)padapter);
-
-_func_exit_;
}
u8 rtw_interface_ps_func(struct adapter *padapter, enum hal_intf_ps_func efunc_id, u8 *val)
@@ -540,7 +515,7 @@ inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms)
/*
* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
* @adapter: pointer to struct adapter structure
-* @ips_deffer_ms: the ms wiil prevent from falling into IPS after wakeup
+* @ips_deffer_ms: the ms will prevent from falling into IPS after wakeup
* Return _SUCCESS or _FAIL
*/
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index c9c180649c12..636ec553ae83 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -23,11 +23,12 @@
#include <drv_types.h>
#include <recv_osdep.h>
#include <mlme_osdep.h>
-#include <ip.h>
-#include <if_ether.h>
-#include <ethernet.h>
#include <usb_ops.h>
#include <wifi.h>
+#include <linux/vmalloc.h>
+
+#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
+#define LLC_HEADER_SIZE 6 /* LLC Header Length */
static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
@@ -45,7 +46,6 @@ void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
-_func_enter_;
_rtw_memset((u8 *)psta_recvpriv, 0, sizeof (struct sta_recv_priv));
@@ -53,18 +53,16 @@ _func_enter_;
_rtw_init_queue(&psta_recvpriv->defrag_q);
-_func_exit_;
}
int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
{
int i;
- union recv_frame *precvframe;
+ struct recv_frame *precvframe;
int res = _SUCCESS;
-_func_enter_;
spin_lock_init(&precvpriv->lock);
_rtw_init_queue(&precvpriv->free_recv_queue);
@@ -77,7 +75,7 @@ _func_enter_;
rtw_os_recv_resource_init(precvpriv, padapter);
- precvpriv->pallocated_frame_buf = rtw_zvmalloc(NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
+ precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ);
if (precvpriv->pallocated_frame_buf == NULL) {
res = _FAIL;
@@ -86,18 +84,19 @@ _func_enter_;
precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
- precvframe = (union recv_frame *)precvpriv->precv_frame_buf;
+ precvframe = (struct recv_frame *)precvpriv->precv_frame_buf;
for (i = 0; i < NR_RECVFRAME; i++) {
- _rtw_init_listhead(&(precvframe->u.list));
+ _rtw_init_listhead(&(precvframe->list));
- rtw_list_insert_tail(&(precvframe->u.list), &(precvpriv->free_recv_queue.queue));
+ rtw_list_insert_tail(&(precvframe->list),
+ &(precvpriv->free_recv_queue.queue));
res = rtw_os_recv_resource_alloc(padapter, precvframe);
- precvframe->u.hdr.len = 0;
+ precvframe->len = 0;
- precvframe->u.hdr.adapter = padapter;
+ precvframe->adapter = padapter;
precvframe++;
}
precvpriv->rx_pending_cnt = 1;
@@ -113,7 +112,6 @@ _func_enter_;
rtw_set_signal_stat_timer(precvpriv);
exit:
-_func_exit_;
return res;
}
@@ -122,40 +120,37 @@ void _rtw_free_recv_priv (struct recv_priv *precvpriv)
{
struct adapter *padapter = precvpriv->adapter;
-_func_enter_;
rtw_free_uc_swdec_pending_queue(padapter);
rtw_os_recv_resource_free(precvpriv);
if (precvpriv->pallocated_frame_buf) {
- rtw_vmfree(precvpriv->pallocated_frame_buf, NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
+ vfree(precvpriv->pallocated_frame_buf);
}
rtw_hal_free_recv_priv(padapter);
-_func_exit_;
}
-union recv_frame *_rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
+struct recv_frame *_rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
{
- union recv_frame *precvframe;
+ struct recv_frame *hdr;
struct list_head *plist, *phead;
struct adapter *padapter;
struct recv_priv *precvpriv;
-_func_enter_;
if (_rtw_queue_empty(pfree_recv_queue)) {
- precvframe = NULL;
+ hdr = NULL;
} else {
phead = get_list_head(pfree_recv_queue);
- plist = get_next(phead);
+ plist = phead->next;
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ hdr = container_of(plist, struct recv_frame, list);
- rtw_list_delete(&precvframe->u.hdr.list);
- padapter = precvframe->u.hdr.adapter;
+ rtw_list_delete(&hdr->list);
+ padapter = hdr->adapter;
if (padapter != NULL) {
precvpriv = &padapter->recvpriv;
if (pfree_recv_queue == &precvpriv->free_recv_queue)
@@ -163,14 +158,13 @@ _func_enter_;
}
}
-_func_exit_;
- return precvframe;
+ return (struct recv_frame *)hdr;
}
-union recv_frame *rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
+struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
{
- union recv_frame *precvframe;
+ struct recv_frame *precvframe;
spin_lock_bh(&pfree_recv_queue->lock);
@@ -181,36 +175,36 @@ union recv_frame *rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
return precvframe;
}
-void rtw_init_recvframe(union recv_frame *precvframe, struct recv_priv *precvpriv)
+void rtw_init_recvframe(struct recv_frame *precvframe, struct recv_priv *precvpriv)
{
/* Perry: This can be removed */
- _rtw_init_listhead(&precvframe->u.hdr.list);
+ _rtw_init_listhead(&precvframe->list);
- precvframe->u.hdr.len = 0;
+ precvframe->len = 0;
}
-int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue)
+int rtw_free_recvframe(struct recv_frame *precvframe,
+ struct __queue *pfree_recv_queue)
{
struct adapter *padapter;
struct recv_priv *precvpriv;
-_func_enter_;
if (!precvframe)
return _FAIL;
- padapter = precvframe->u.hdr.adapter;
+ padapter = precvframe->adapter;
precvpriv = &padapter->recvpriv;
- if (precvframe->u.hdr.pkt) {
- dev_kfree_skb_any(precvframe->u.hdr.pkt);/* free skb by driver */
- precvframe->u.hdr.pkt = NULL;
+ if (precvframe->pkt) {
+ dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */
+ precvframe->pkt = NULL;
}
spin_lock_bh(&pfree_recv_queue->lock);
- rtw_list_delete(&(precvframe->u.hdr.list));
+ rtw_list_delete(&(precvframe->list));
- precvframe->u.hdr.len = 0;
+ precvframe->len = 0;
- rtw_list_insert_tail(&(precvframe->u.hdr.list), get_list_head(pfree_recv_queue));
+ rtw_list_insert_tail(&(precvframe->list), get_list_head(pfree_recv_queue));
if (padapter != NULL) {
if (pfree_recv_queue == &precvpriv->free_recv_queue)
@@ -219,32 +213,29 @@ _func_enter_;
spin_unlock_bh(&pfree_recv_queue->lock);
-_func_exit_;
return _SUCCESS;
}
-int _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
+int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
{
- struct adapter *padapter = precvframe->u.hdr.adapter;
+ struct adapter *padapter = precvframe->adapter;
struct recv_priv *precvpriv = &padapter->recvpriv;
-_func_enter_;
- rtw_list_delete(&(precvframe->u.hdr.list));
- rtw_list_insert_tail(&(precvframe->u.hdr.list), get_list_head(queue));
+ rtw_list_delete(&(precvframe->list));
+ rtw_list_insert_tail(&(precvframe->list), get_list_head(queue));
if (padapter != NULL) {
if (queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt++;
}
-_func_exit_;
return _SUCCESS;
}
-int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
+int rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
{
int ret;
@@ -265,32 +256,30 @@ using spinlock to protect
void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue)
{
- union recv_frame *precvframe;
+ struct recv_frame *hdr;
struct list_head *plist, *phead;
-_func_enter_;
spin_lock(&pframequeue->lock);
phead = get_list_head(pframequeue);
- plist = get_next(phead);
+ plist = phead->next;
while (rtw_end_of_queue_search(phead, plist) == false) {
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ hdr = container_of(plist, struct recv_frame, list);
- plist = get_next(plist);
+ plist = plist->next;
- rtw_free_recvframe(precvframe, pfree_recv_queue);
+ rtw_free_recvframe((struct recv_frame *)hdr, pfree_recv_queue);
}
spin_unlock(&pframequeue->lock);
-_func_exit_;
}
u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
{
u32 cnt = 0;
- union recv_frame *pending_frame;
+ struct recv_frame *pending_frame;
while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
DBG_88E("%s: dequeue uc_swdec_pending_queue\n", __func__);
@@ -337,9 +326,9 @@ struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue)
} else {
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
- precvbuf = LIST_CONTAINOR(plist, struct recv_buf, list);
+ precvbuf = container_of(plist, struct recv_buf, list);
rtw_list_delete(&precvbuf->list);
}
@@ -349,7 +338,8 @@ struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue)
return precvbuf;
}
-static int recvframe_chkmic(struct adapter *adapter, union recv_frame *precvframe)
+static int recvframe_chkmic(struct adapter *adapter,
+ struct recv_frame *precvframe)
{
int i, res = _SUCCESS;
u32 datalen;
@@ -358,12 +348,11 @@ static int recvframe_chkmic(struct adapter *adapter, union recv_frame *precvfra
u8 *pframe, *payload, *pframemic;
u8 *mickey;
struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &precvframe->u.hdr.attrib;
+ struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
struct security_priv *psecuritypriv = &adapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
-_func_enter_;
stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
@@ -375,23 +364,24 @@ _func_enter_;
/* calculate mic code */
if (stainfo != NULL) {
if (IS_MCAST(prxattrib->ra)) {
- mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
-
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic: bcmc key\n"));
-
if (!psecuritypriv) {
res = _FAIL;
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n"));
DBG_88E("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n");
goto exit;
}
+ mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic: bcmc key\n"));
} else {
mickey = &stainfo->dot11tkiprxmickey.skey[0];
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic: unicast key\n"));
}
- datalen = precvframe->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len-prxattrib->icv_len-8;/* icv_len included the mic code */
- pframe = precvframe->u.hdr.rx_data;
+ /* icv_len included the mic code */
+ datalen = precvframe->len-prxattrib->hdrlen -
+ prxattrib->iv_len-prxattrib->icv_len-8;
+ pframe = precvframe->rx_data;
payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
@@ -424,16 +414,30 @@ _func_enter_;
*(pframemic-10), *(pframemic-9)));
{
uint i;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n ======demp packet (len=%d)======\n", precvframe->u.hdr.len));
- for (i = 0; i < precvframe->u.hdr.len; i = i+8) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
- *(precvframe->u.hdr.rx_data+i), *(precvframe->u.hdr.rx_data+i+1),
- *(precvframe->u.hdr.rx_data+i+2), *(precvframe->u.hdr.rx_data+i+3),
- *(precvframe->u.hdr.rx_data+i+4), *(precvframe->u.hdr.rx_data+i+5),
- *(precvframe->u.hdr.rx_data+i+6), *(precvframe->u.hdr.rx_data+i+7)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n ======demp packet (len=%d)======\n",
+ precvframe->len));
+ for (i = 0; i < precvframe->len; i += 8) {
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
+ *(precvframe->rx_data+i),
+ *(precvframe->rx_data+i+1),
+ *(precvframe->rx_data+i+2),
+ *(precvframe->rx_data+i+3),
+ *(precvframe->rx_data+i+4),
+ *(precvframe->rx_data+i+5),
+ *(precvframe->rx_data+i+6),
+ *(precvframe->rx_data+i+7)));
}
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n ====== demp packet end [len=%d]======\n", precvframe->u.hdr.len));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n hrdlen=%d,\n", prxattrib->hdrlen));
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("\n ====== demp packet end [len=%d]======\n",
+ precvframe->len));
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("\n hrdlen=%d,\n",
+ prxattrib->hdrlen));
}
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
@@ -471,24 +475,23 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
/* decrypt and set the ivlen, icvlen of the recv_frame */
-static union recv_frame *decryptor(struct adapter *padapter, union recv_frame *precv_frame)
+static struct recv_frame *decryptor(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
- struct rx_pkt_attrib *prxattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *prxattrib = &precv_frame->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- union recv_frame *return_packet = precv_frame;
+ struct recv_frame *return_packet = precv_frame;
u32 res = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("prxstat->decrypted=%x prxattrib->encrypt=0x%03x\n", prxattrib->bdecrypted, prxattrib->encrypt));
if (prxattrib->encrypt > 0) {
- u8 *iv = precv_frame->u.hdr.rx_data+prxattrib->hdrlen;
+ u8 *iv = precv_frame->rx_data+prxattrib->hdrlen;
prxattrib->key_index = (((iv[3])>>6)&0x3);
if (prxattrib->key_index > WEP_KEYS) {
@@ -534,34 +537,33 @@ _func_enter_;
return_packet = NULL;
}
-_func_exit_;
return return_packet;
}
/* set the security information in the recv_frame */
-static union recv_frame *portctrl(struct adapter *adapter, union recv_frame *precv_frame)
+static struct recv_frame *portctrl(struct adapter *adapter,
+ struct recv_frame *precv_frame)
{
u8 *psta_addr = NULL, *ptr;
uint auth_alg;
- struct recv_frame_hdr *pfhdr;
+ struct recv_frame *pfhdr;
struct sta_info *psta;
struct sta_priv *pstapriv;
- union recv_frame *prtnframe;
+ struct recv_frame *prtnframe;
u16 ether_type = 0;
u16 eapol_type = 0x888e;/* for Funia BD's WPA issue */
struct rx_pkt_attrib *pattrib;
__be16 be_tmp;
-_func_enter_;
pstapriv = &adapter->stapriv;
psta = rtw_get_stainfo(pstapriv, psta_addr);
auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
- ptr = get_recvframe_data(precv_frame);
- pfhdr = &precv_frame->u.hdr;
+ ptr = precv_frame->rx_data;
+ pfhdr = precv_frame;
pattrib = &pfhdr->attrib;
psta_addr = pattrib->ta;
@@ -593,7 +595,9 @@ _func_enter_;
/* allowed */
/* check decryption status, and decrypt the frame if needed */
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:psta->ieee8021x_blocked==0\n"));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("portctrl:precv_frame->hdr.attrib.privacy=%x\n", precv_frame->u.hdr.attrib.privacy));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("portctrl:precv_frame->hdr.attrib.privacy=%x\n",
+ precv_frame->attrib.privacy));
if (pattrib->bdecrypted == 0)
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("portctrl:prxstat->decrypted=%x\n", pattrib->bdecrypted));
@@ -613,19 +617,18 @@ _func_enter_;
prtnframe = precv_frame;
}
-_func_exit_;
return prtnframe;
}
-static int recv_decache(union recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache)
+static int recv_decache(struct recv_frame *precv_frame, u8 bretry,
+ struct stainfo_rxcache *prxcache)
{
- int tid = precv_frame->u.hdr.attrib.priority;
+ int tid = precv_frame->attrib.priority;
- u16 seq_ctrl = ((precv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
- (precv_frame->u.hdr.attrib.frag_num & 0xf);
+ u16 seq_ctrl = ((precv_frame->attrib.seq_num&0xffff) << 4) |
+ (precv_frame->attrib.frag_num & 0xf);
-_func_enter_;
if (tid > 15) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, (tid>15)! seq_ctrl=0x%x, tid=0x%x\n", seq_ctrl, tid));
@@ -643,18 +646,17 @@ _func_enter_;
prxcache->tid_rxseq[tid] = seq_ctrl;
-_func_exit_;
return _SUCCESS;
}
-void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame);
-void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame)
+void process_pwrbit_data(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
unsigned char pwrbit;
- u8 *ptr = precv_frame->u.hdr.rx_data;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ u8 *ptr = precv_frame->rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta = NULL;
@@ -675,10 +677,11 @@ void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame
#endif
}
-static void process_wmmps_data(struct adapter *padapter, union recv_frame *precv_frame)
+static void process_wmmps_data(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta = NULL;
@@ -730,15 +733,17 @@ static void process_wmmps_data(struct adapter *padapter, union recv_frame *precv
#endif
}
-static void count_rx_stats(struct adapter *padapter, union recv_frame *prframe, struct sta_info *sta)
+static void count_rx_stats(struct adapter *padapter,
+ struct recv_frame *prframe,
+ struct sta_info *sta)
{
int sz;
struct sta_info *psta = NULL;
struct stainfo_stats *pstats = NULL;
- struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &prframe->attrib;
struct recv_priv *precvpriv = &padapter->recvpriv;
- sz = get_recvframe_len(prframe);
+ sz = prframe->len;
precvpriv->rx_bytes += sz;
padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
@@ -749,7 +754,7 @@ static void count_rx_stats(struct adapter *padapter, union recv_frame *prframe,
if (sta)
psta = sta;
else
- psta = prframe->u.hdr.psta;
+ psta = prframe->psta;
if (psta) {
pstats = &psta->sta_stats;
@@ -761,15 +766,16 @@ static void count_rx_stats(struct adapter *padapter, union recv_frame *prframe,
int sta2sta_data_frame(
struct adapter *adapter,
- union recv_frame *precv_frame,
+ struct recv_frame *precv_frame,
struct sta_info **psta
);
-int sta2sta_data_frame(struct adapter *adapter, union recv_frame *precv_frame, struct sta_info **psta)
+int sta2sta_data_frame(struct adapter *adapter, struct recv_frame *precv_frame,
+ struct sta_info **psta)
{
- u8 *ptr = precv_frame->u.hdr.rx_data;
+ u8 *ptr = precv_frame->rx_data;
int ret = _SUCCESS;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *mybssid = get_bssid(pmlmepriv);
@@ -777,25 +783,24 @@ int sta2sta_data_frame(struct adapter *adapter, union recv_frame *precv_frame, s
u8 *sta_addr = NULL;
int bmcast = IS_MCAST(pattrib->dst);
-_func_enter_;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
/* filter packets that SA is myself or multicast or broadcast */
- if (_rtw_memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
+ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA==myself\n"));
ret = _FAIL;
goto exit;
}
- if ((!_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
+ if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
ret = _FAIL;
goto exit;
}
- if (_rtw_memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- _rtw_memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- !_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
+ if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
ret = _FAIL;
goto exit;
}
@@ -803,7 +808,7 @@ _func_enter_;
sta_addr = pattrib->src;
} else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
/* For Station mode, sa and bssid should always be BSSID, and DA is my mac-address */
- if (!_rtw_memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) {
+ if (memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("bssid!=TA under STATION_MODE; drop pkt\n"));
ret = _FAIL;
goto exit;
@@ -818,7 +823,7 @@ _func_enter_;
}
} else { /* not mc-frame */
/* For AP mode, if DA is non-MCAST, then it must be BSSID, and bssid == BSSID */
- if (!_rtw_memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) {
+ if (memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) {
ret = _FAIL;
goto exit;
}
@@ -853,17 +858,16 @@ _func_enter_;
}
exit:
-_func_exit_;
return ret;
}
static int ap2sta_data_frame (
struct adapter *adapter,
- union recv_frame *precv_frame,
+ struct recv_frame *precv_frame,
struct sta_info **psta)
{
- u8 *ptr = precv_frame->u.hdr.rx_data;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ u8 *ptr = precv_frame->rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
int ret = _SUCCESS;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
@@ -871,20 +875,19 @@ static int ap2sta_data_frame (
u8 *myhwaddr = myid(&adapter->eeprompriv);
int bmcast = IS_MCAST(pattrib->dst);
-_func_enter_;
if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) &&
(check_fwstate(pmlmepriv, _FW_LINKED) == true ||
check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) {
/* filter packets that SA is myself or multicast or broadcast */
- if (_rtw_memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
+ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA==myself\n"));
ret = _FAIL;
goto exit;
}
/* da should be for me */
- if ((!_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
+ if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
(" ap2sta_data_frame: compare DA fail; DA=%pM\n", (pattrib->dst)));
ret = _FAIL;
@@ -892,9 +895,9 @@ _func_enter_;
}
/* check BSSID */
- if (_rtw_memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- _rtw_memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- (!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
+ if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
(" ap2sta_data_frame: compare BSSID fail ; BSSID=%pM\n", (pattrib->bssid)));
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("mybssid=%pM\n", (mybssid)));
@@ -950,7 +953,7 @@ _func_enter_;
ret = RTW_RX_HANDLED;
goto exit;
} else {
- if (_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN) && (!bmcast)) {
+ if (!memcmp(myhwaddr, pattrib->dst, ETH_ALEN) && (!bmcast)) {
*psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
if (*psta == NULL) {
DBG_88E("issue_deauth to the ap =%pM for the reason(7)\n", (pattrib->bssid));
@@ -964,27 +967,25 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
static int sta2ap_data_frame(struct adapter *adapter,
- union recv_frame *precv_frame,
+ struct recv_frame *precv_frame,
struct sta_info **psta)
{
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- u8 *ptr = precv_frame->u.hdr.rx_data;
+ u8 *ptr = precv_frame->rx_data;
unsigned char *mybssid = get_bssid(pmlmepriv);
int ret = _SUCCESS;
-_func_enter_;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
/* For AP mode, RA = BSSID, TX = STA(SRC_ADDR), A3 = DST_ADDR */
- if (!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
+ if (memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
ret = _FAIL;
goto exit;
}
@@ -1014,7 +1015,7 @@ _func_enter_;
}
} else {
u8 *myhwaddr = myid(&adapter->eeprompriv);
- if (!_rtw_memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
+ if (memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
ret = RTW_RX_HANDLED;
goto exit;
}
@@ -1026,25 +1027,23 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
static int validate_recv_ctrl_frame(struct adapter *padapter,
- union recv_frame *precv_frame)
+ struct recv_frame *precv_frame)
{
#ifdef CONFIG_88EU_AP_MODE
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
- u8 *pframe = precv_frame->u.hdr.rx_data;
- /* uint len = precv_frame->u.hdr.len; */
+ u8 *pframe = precv_frame->rx_data;
if (GetFrameType(pframe) != WIFI_CTRL_TYPE)
return _FAIL;
/* receive the frames that ra(a1) is my address */
- if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
+ if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
return _FAIL;
/* only handle ps-poll */
@@ -1098,12 +1097,12 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
if ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == false) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
rtw_list_delete(&pxmitframe->list);
@@ -1124,7 +1123,7 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
if (psta->sleepq_len == 0) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
/* update_BCNTIM(padapter); */
update_beacon(padapter, _TIM_IE_, NULL, false);
}
@@ -1142,7 +1141,7 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
pstapriv->tim_bitmap &= ~BIT(psta->aid);
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
/* update_BCNTIM(padapter); */
update_beacon(padapter, _TIM_IE_, NULL, false);
}
@@ -1157,10 +1156,11 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
return _FAIL;
}
-union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union recv_frame *precv_frame);
+struct recv_frame *recvframe_chk_defrag(struct adapter *padapter,
+ struct recv_frame *precv_frame);
static int validate_recv_mgnt_frame(struct adapter *padapter,
- union recv_frame *precv_frame)
+ struct recv_frame *precv_frame)
{
struct sta_info *psta;
@@ -1173,18 +1173,20 @@ static int validate_recv_mgnt_frame(struct adapter *padapter,
}
/* for rx pkt statistics */
- psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->u.hdr.rx_data));
+ psta = rtw_get_stainfo(&padapter->stapriv,
+ GetAddr2Ptr(precv_frame->rx_data));
if (psta) {
psta->sta_stats.rx_mgnt_pkts++;
- if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_BEACON) {
+ if (GetFrameSubType(precv_frame->rx_data) == WIFI_BEACON) {
psta->sta_stats.rx_beacon_pkts++;
- } else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBEREQ) {
+ } else if (GetFrameSubType(precv_frame->rx_data) == WIFI_PROBEREQ) {
psta->sta_stats.rx_probereq_pkts++;
- } else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBERSP) {
- if (_rtw_memcmp(padapter->eeprompriv.mac_addr, GetAddr1Ptr(precv_frame->u.hdr.rx_data), ETH_ALEN) == true)
+ } else if (GetFrameSubType(precv_frame->rx_data) == WIFI_PROBERSP) {
+ if (!memcmp(padapter->eeprompriv.mac_addr,
+ GetAddr1Ptr(precv_frame->rx_data), ETH_ALEN))
psta->sta_stats.rx_probersp_pkts++;
- else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)) ||
- is_multicast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)))
+ else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->rx_data)) ||
+ is_multicast_mac_addr(GetAddr1Ptr(precv_frame->rx_data)))
psta->sta_stats.rx_probersp_bm_pkts++;
else
psta->sta_stats.rx_probersp_uo_pkts++;
@@ -1197,17 +1199,16 @@ static int validate_recv_mgnt_frame(struct adapter *padapter,
}
static int validate_recv_data_frame(struct adapter *adapter,
- union recv_frame *precv_frame)
+ struct recv_frame *precv_frame)
{
u8 bretry;
u8 *psa, *pda, *pbssid;
struct sta_info *psta = NULL;
- u8 *ptr = precv_frame->u.hdr.rx_data;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ u8 *ptr = precv_frame->rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct security_priv *psecuritypriv = &adapter->securitypriv;
int ret = _SUCCESS;
-_func_enter_;
bretry = GetRetry(ptr);
pda = get_da(ptr);
@@ -1265,7 +1266,7 @@ _func_enter_;
/* psta->rssi = prxcmd->rssi; */
/* psta->signal_quality = prxcmd->sq; */
- precv_frame->u.hdr.psta = psta;
+ precv_frame->psta = psta;
pattrib->amsdu = 0;
pattrib->ack_policy = 0;
@@ -1286,7 +1287,7 @@ _func_enter_;
if (pattrib->order)/* HT-CTRL 11n */
pattrib->hdrlen += 4;
- precv_frame->u.hdr.preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
+ precv_frame->preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
/* decache, drop duplicate recv packets */
if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) {
@@ -1312,12 +1313,12 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
-static int validate_recv_frame(struct adapter *adapter, union recv_frame *precv_frame)
+static int validate_recv_frame(struct adapter *adapter,
+ struct recv_frame *precv_frame)
{
/* shall check frame subtype, to / from ds, da, bssid */
@@ -1327,12 +1328,11 @@ static int validate_recv_frame(struct adapter *adapter, union recv_frame *precv_
u8 subtype;
int retval = _SUCCESS;
u8 bDumpRxPkt;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
- u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
+ u8 *ptr = precv_frame->rx_data;
u8 ver = (unsigned char) (*ptr)&0x3;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
-_func_enter_;
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
int ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, rtw_get_oper_ch(adapter));
@@ -1422,14 +1422,13 @@ _func_enter_;
exit:
-_func_exit_;
return retval;
}
/* remove the wlanhdr and add the eth_hdr */
-static int wlanhdr_to_ethhdr (union recv_frame *precvframe)
+static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
{
int rmv_len;
u16 eth_type, len;
@@ -1439,13 +1438,10 @@ static int wlanhdr_to_ethhdr (union recv_frame *precvframe)
struct ieee80211_snap_hdr *psnap;
int ret = _SUCCESS;
- struct adapter *adapter = precvframe->u.hdr.adapter;
+ struct adapter *adapter = precvframe->adapter;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-
- u8 *ptr = get_recvframe_data(precvframe); /* point to frame_ctrl field */
- struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
-
-_func_enter_;
+ u8 *ptr = precvframe->rx_data;
+ struct rx_pkt_attrib *pattrib = &precvframe->attrib;
if (pattrib->encrypt)
recvframe_pull_tail(precvframe, pattrib->icv_len);
@@ -1453,10 +1449,10 @@ _func_enter_;
psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
/* convert hdr + possible LLC headers into Ethernet header */
- if ((_rtw_memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
- (_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) &&
- (_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2) == false)) ||
- _rtw_memcmp(psnap, rtw_bridge_tunnel_header, SNAP_SIZE)) {
+ if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
+ (!memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) &&
+ (!memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2) == false)) ||
+ !memcmp(psnap, rtw_bridge_tunnel_header, SNAP_SIZE)) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
bsnaphdr = true;
} else {
@@ -1465,7 +1461,7 @@ _func_enter_;
}
rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0);
- len = precvframe->u.hdr.len - rmv_len;
+ len = precvframe->len - rmv_len;
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
("\n===pattrib->hdrlen: %x, pattrib->iv_len:%x===\n\n", pattrib->hdrlen, pattrib->iv_len));
@@ -1496,30 +1492,29 @@ _func_enter_;
memcpy(ptr+12, &be_tmp, 2);
}
-_func_exit_;
return ret;
}
/* perform defrag */
-static union recv_frame *recvframe_defrag(struct adapter *adapter, struct __queue *defrag_q)
+static struct recv_frame *recvframe_defrag(struct adapter *adapter,
+ struct __queue *defrag_q)
{
struct list_head *plist, *phead;
u8 wlanhdr_offset;
u8 curfragnum;
- struct recv_frame_hdr *pfhdr, *pnfhdr;
- union recv_frame *prframe, *pnextrframe;
+ struct recv_frame *pfhdr, *pnfhdr;
+ struct recv_frame *prframe, *pnextrframe;
struct __queue *pfree_recv_queue;
-_func_enter_;
curfragnum = 0;
pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
phead = get_list_head(defrag_q);
- plist = get_next(phead);
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
- pfhdr = &prframe->u.hdr;
- rtw_list_delete(&(prframe->u.list));
+ plist = phead->next;
+ pfhdr = container_of(plist, struct recv_frame, list);
+ prframe = (struct recv_frame *)pfhdr;
+ rtw_list_delete(&(prframe->list));
if (curfragnum != pfhdr->attrib.frag_num) {
/* the first fragment number must be 0 */
@@ -1534,11 +1529,11 @@ _func_enter_;
plist = get_list_head(defrag_q);
- plist = get_next(plist);
+ plist = plist->next;
while (rtw_end_of_queue_search(phead, plist) == false) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame , u);
- pnfhdr = &pnextrframe->u.hdr;
+ pnfhdr = container_of(plist, struct recv_frame, list);
+ pnextrframe = (struct recv_frame *)pnfhdr;
/* check the fragment sequence (2nd ~n fragment frame) */
@@ -1568,7 +1563,7 @@ _func_enter_;
recvframe_put(prframe, pnfhdr->len);
pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
- plist = get_next(plist);
+ plist = plist->next;
}
/* free the defrag_q queue and return the prframe */
@@ -1576,29 +1571,28 @@ _func_enter_;
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Performance defrag!!!!!\n"));
-_func_exit_;
return prframe;
}
/* check if need to defrag, if needed queue the frame to defrag_q */
-union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union recv_frame *precv_frame)
+struct recv_frame *recvframe_chk_defrag(struct adapter *padapter,
+ struct recv_frame *precv_frame)
{
u8 ismfrag;
u8 fragnum;
u8 *psta_addr;
- struct recv_frame_hdr *pfhdr;
+ struct recv_frame *pfhdr;
struct sta_info *psta;
struct sta_priv *pstapriv;
struct list_head *phead;
- union recv_frame *prtnframe = NULL;
+ struct recv_frame *prtnframe = NULL;
struct __queue *pfree_recv_queue, *pdefrag_q;
-_func_enter_;
pstapriv = &padapter->stapriv;
- pfhdr = &precv_frame->u.hdr;
+ pfhdr = precv_frame;
pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
@@ -1670,7 +1664,7 @@ _func_enter_;
}
}
- if ((prtnframe != NULL) && (prtnframe->u.hdr.attrib.privacy)) {
+ if ((prtnframe != NULL) && (prtnframe->attrib.privacy)) {
/* after defrag we must check tkip mic code */
if (recvframe_chkmic(padapter, prtnframe) == _FAIL) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic(padapter, prtnframe)==_FAIL\n"));
@@ -1679,12 +1673,11 @@ _func_enter_;
}
}
-_func_exit_;
return prtnframe;
}
-static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
+static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
{
int a_len, padding_len;
u16 eth_type, nSubframe_Length;
@@ -1698,16 +1691,16 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
int ret = _SUCCESS;
nr_subframes = 0;
- pattrib = &prframe->u.hdr.attrib;
+ pattrib = &prframe->attrib;
- recvframe_pull(prframe, prframe->u.hdr.attrib.hdrlen);
+ recvframe_pull(prframe, prframe->attrib.hdrlen);
- if (prframe->u.hdr.attrib.iv_len > 0)
- recvframe_pull(prframe, prframe->u.hdr.attrib.iv_len);
+ if (prframe->attrib.iv_len > 0)
+ recvframe_pull(prframe, prframe->attrib.iv_len);
- a_len = prframe->u.hdr.len;
+ a_len = prframe->len;
- pdata = prframe->u.hdr.rx_data;
+ pdata = prframe->rx_data;
while (a_len > ETH_HLEN) {
/* Offset 12 denote 2 mac address */
@@ -1729,7 +1722,7 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
memcpy(data_ptr, pdata, nSubframe_Length);
} else {
- sub_skb = skb_clone(prframe->u.hdr.pkt, GFP_ATOMIC);
+ sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
if (sub_skb) {
sub_skb->data = pdata;
sub_skb->len = nSubframe_Length;
@@ -1768,9 +1761,9 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
/* convert hdr + possible LLC headers into Ethernet header */
eth_type = RTW_GET_BE16(&sub_skb->data[6]);
if (sub_skb->len >= 8 &&
- ((_rtw_memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
+ ((!memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
- _rtw_memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) {
+ !memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
skb_pull(sub_skb, SNAP_SIZE);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
@@ -1796,7 +1789,7 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
exit:
- prframe->u.hdr.len = 0;
+ prframe->len = 0;
rtw_free_recvframe(prframe, pfree_recv_queue);/* free this recv_frame */
return ret;
@@ -1832,70 +1825,72 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_n
return true;
}
-int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe);
-int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe)
+int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
+ struct recv_frame *prframe)
{
- struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &prframe->attrib;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
struct list_head *phead, *plist;
- union recv_frame *pnextrframe;
+ struct recv_frame *hdr;
struct rx_pkt_attrib *pnextattrib;
phead = get_list_head(ppending_recvframe_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (rtw_end_of_queue_search(phead, plist) == false) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
- pnextattrib = &pnextrframe->u.hdr.attrib;
+ hdr = container_of(plist, struct recv_frame, list);
+ pnextattrib = &hdr->attrib;
if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
- plist = get_next(plist);
+ plist = plist->next;
else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
return false;
else
break;
}
- rtw_list_delete(&(prframe->u.hdr.list));
+ rtw_list_delete(&(prframe->list));
- rtw_list_insert_tail(&(prframe->u.hdr.list), plist);
+ rtw_list_insert_tail(&(prframe->list), plist);
return true;
}
static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced)
{
struct list_head *phead, *plist;
- union recv_frame *prframe;
+ struct recv_frame *prframe;
+ struct recv_frame *prhdr;
struct rx_pkt_attrib *pattrib;
int bPktInBuf = false;
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
phead = get_list_head(ppending_recvframe_queue);
- plist = get_next(phead);
+ plist = phead->next;
/* Handling some condition for forced indicate case. */
if (bforced) {
if (rtw_is_list_empty(phead))
return true;
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
- pattrib = &prframe->u.hdr.attrib;
+ prhdr = container_of(plist, struct recv_frame, list);
+ pattrib = &prhdr->attrib;
preorder_ctrl->indicate_seq = pattrib->seq_num;
}
/* Prepare indication list and indication. */
/* Check if there is any packet need indicate. */
while (!rtw_is_list_empty(phead)) {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
- pattrib = &prframe->u.hdr.attrib;
+ prhdr = container_of(plist, struct recv_frame, list);
+ prframe = (struct recv_frame *)prhdr;
+ pattrib = &prframe->attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
("recv_indicatepkts_in_order: indicate=%d seq=%d amsdu=%d\n",
preorder_ctrl->indicate_seq, pattrib->seq_num, pattrib->amsdu));
- plist = get_next(plist);
- rtw_list_delete(&(prframe->u.hdr.list));
+ plist = plist->next;
+ rtw_list_delete(&(prframe->list));
if (SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num))
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
@@ -1924,11 +1919,12 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
return bPktInBuf;
}
-static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *prframe)
+static int recv_indicatepkt_reorder(struct adapter *padapter,
+ struct recv_frame *prframe)
{
int retval = _SUCCESS;
- struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
- struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl;
+ struct rx_pkt_attrib *pattrib = &prframe->attrib;
+ struct recv_reorder_ctrl *preorder_ctrl = prframe->preorder_ctrl;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
if (!pattrib->amsdu) {
@@ -2001,7 +1997,7 @@ static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *
spin_unlock_bh(&ppending_recvframe_queue->lock);
} else {
spin_unlock_bh(&ppending_recvframe_queue->lock);
- _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
}
_success_exit:
@@ -2032,17 +2028,14 @@ void rtw_reordering_ctrl_timeout_handler(void *pcontext)
spin_unlock_bh(&ppending_recvframe_queue->lock);
}
-static int process_recv_indicatepkts(struct adapter *padapter, union recv_frame *prframe)
+static int process_recv_indicatepkts(struct adapter *padapter,
+ struct recv_frame *prframe)
{
int retval = _SUCCESS;
- /* struct recv_priv *precvpriv = &padapter->recvpriv; */
- /* struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib; */
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
if (phtpriv->ht_option) { /* B/G/N Mode */
- /* prframe->u.hdr.preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority]; */
-
if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) {
/* including perform A-MPDU Rx Ordering Buffer Control */
if ((!padapter->bDriverStopped) &&
@@ -2075,10 +2068,11 @@ static int process_recv_indicatepkts(struct adapter *padapter, union recv_frame
return retval;
}
-static int recv_func_prehandle(struct adapter *padapter, union recv_frame *rframe)
+static int recv_func_prehandle(struct adapter *padapter,
+ struct recv_frame *rframe)
{
int ret = _SUCCESS;
- struct rx_pkt_attrib *pattrib = &rframe->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &rframe->attrib;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -2110,10 +2104,11 @@ exit:
return ret;
}
-static int recv_func_posthandle(struct adapter *padapter, union recv_frame *prframe)
+static int recv_func_posthandle(struct adapter *padapter,
+ struct recv_frame *prframe)
{
int ret = _SUCCESS;
- union recv_frame *orig_prframe = prframe;
+ struct recv_frame *orig_prframe = prframe;
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
@@ -2155,16 +2150,16 @@ _recv_data_drop:
return ret;
}
-static int recv_func(struct adapter *padapter, union recv_frame *rframe)
+static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
{
int ret;
- struct rx_pkt_attrib *prxattrib = &rframe->u.hdr.attrib;
+ struct rx_pkt_attrib *prxattrib = &rframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_priv *mlmepriv = &padapter->mlmepriv;
/* check if need to handle uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
- union recv_frame *pending_frame;
+ struct recv_frame *pending_frame;
while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) {
if (recv_func_posthandle(padapter, pending_frame) == _SUCCESS)
@@ -2193,15 +2188,14 @@ exit:
return ret;
}
-s32 rtw_recv_entry(union recv_frame *precvframe)
+s32 rtw_recv_entry(struct recv_frame *precvframe)
{
struct adapter *padapter;
struct recv_priv *precvpriv;
s32 ret = _SUCCESS;
-_func_enter_;
- padapter = precvframe->u.hdr.adapter;
+ padapter = precvframe->adapter;
precvpriv = &padapter->recvpriv;
@@ -2213,7 +2207,6 @@ _func_enter_;
precvpriv->rx_pkts++;
-_func_exit_;
return ret;
@@ -2222,7 +2215,6 @@ _recv_entry_drop:
if (padapter->registrypriv.mp_mode == 1)
padapter->mppriv.rx_pktloss = precvpriv->rx_drop;
-_func_exit_;
return ret;
}
@@ -2244,13 +2236,13 @@ void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS)
} else {
if (recvpriv->signal_strength_data.update_req == 0) {/* update_req is clear, means we got rx */
avg_signal_strength = recvpriv->signal_strength_data.avg_val;
- /* after avg_vals are accquired, we can re-stat the signal values */
+ /* after avg_vals are acquired, we can re-stat the signal values */
recvpriv->signal_strength_data.update_req = 1;
}
if (recvpriv->signal_qual_data.update_req == 0) {/* update_req is clear, means we got rx */
avg_signal_qual = recvpriv->signal_qual_data.avg_val;
- /* after avg_vals are accquired, we can re-stat the signal values */
+ /* after avg_vals are acquired, we can re-stat the signal values */
recvpriv->signal_qual_data.update_req = 1;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index e08845729772..c4b16ea6348a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -41,7 +41,6 @@ static void arcfour_init(struct arc4context *parc4ctx, u8 *key, u32 key_len)
u32 stateindex;
u8 *state;
u32 counter;
-_func_enter_;
state = parc4ctx->state;
parc4ctx->x = 0;
parc4ctx->y = 0;
@@ -58,7 +57,6 @@ _func_enter_;
if (++keyindex >= key_len)
keyindex = 0;
}
-_func_exit_;
}
static u32 arcfour_byte(struct arc4context *parc4ctx)
@@ -67,7 +65,6 @@ static u32 arcfour_byte(struct arc4context *parc4ctx)
u32 y;
u32 sx, sy;
u8 *state;
-_func_enter_;
state = parc4ctx->state;
x = (parc4ctx->x + 1) & 0xff;
sx = state[x];
@@ -77,17 +74,14 @@ _func_enter_;
parc4ctx->y = y;
state[y] = (u8)sx;
state[x] = (u8)sy;
-_func_exit_;
return state[(sx + sy) & 0xff];
}
static void arcfour_encrypt(struct arc4context *parc4ctx, u8 *dest, u8 *src, u32 len)
{
u32 i;
-_func_enter_;
for (i = 0; i < len; i++)
dest[i] = src[i] ^ (unsigned char)arcfour_byte(parc4ctx);
-_func_exit_;
}
static int bcrc32initialized;
@@ -102,9 +96,8 @@ static u8 crc32_reverseBit(u8 data)
static void crc32_init(void)
{
-_func_enter_;
if (bcrc32initialized == 1) {
- goto exit;
+ return;
} else {
int i, j;
u32 c;
@@ -126,15 +119,12 @@ _func_enter_;
}
bcrc32initialized = 1;
}
-exit:
-_func_exit_;
}
static __le32 getcrc32(u8 *buf, int len)
{
u8 *p;
u32 crc;
-_func_enter_;
if (bcrc32initialized == 0)
crc32_init();
@@ -142,7 +132,6 @@ _func_enter_;
for (p = buf; len > 0; ++p, --len)
crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8);
-_func_exit_;
return cpu_to_le32(~crc); /* transmit complement, per CRC-32 spec */
}
@@ -165,7 +154,6 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-_func_enter_;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
return;
@@ -206,7 +194,6 @@ _func_enter_;
}
}
-_func_exit_;
}
void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
@@ -218,12 +205,11 @@ void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
u32 keylength;
u8 *pframe, *payload, *iv, wepkey[16];
u8 keyindex;
- struct rx_pkt_attrib *prxattrib = &(((union recv_frame *)precvframe)->u.hdr.attrib);
+ struct rx_pkt_attrib *prxattrib = &(((struct recv_frame *)precvframe)->attrib);
struct security_priv *psecuritypriv = &padapter->securitypriv;
-_func_enter_;
- pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+ pframe = (unsigned char *)((struct recv_frame *)precvframe)->rx_data;
/* start to decrypt recvframe */
if ((prxattrib->encrypt == _WEP40_) || (prxattrib->encrypt == _WEP104_)) {
@@ -232,7 +218,7 @@ _func_enter_;
keylength = psecuritypriv->dot11DefKeylen[keyindex];
memcpy(&wepkey[0], iv, 3);
memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[keyindex].skey[0], keylength);
- length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+ length = ((struct recv_frame *)precvframe)->len-prxattrib->hdrlen-prxattrib->iv_len;
payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
@@ -252,7 +238,6 @@ _func_enter_;
&crc, &payload[length-4]));
}
}
-_func_exit_;
return;
}
@@ -263,10 +248,8 @@ static u32 secmicgetuint32(u8 *p)
{
s32 i;
u32 res = 0;
-_func_enter_;
for (i = 0; i < 4; i++)
res |= ((u32)(*p++)) << (8*i);
-_func_exit_;
return res;
}
@@ -274,39 +257,32 @@ static void secmicputuint32(u8 *p, u32 val)
/* Convert from Us3232 to Byte[] in a portable way */
{
long i;
-_func_enter_;
for (i = 0; i < 4; i++) {
*p++ = (u8) (val & 0xff);
val >>= 8;
}
-_func_exit_;
}
static void secmicclear(struct mic_data *pmicdata)
{
/* Reset the state to the empty message. */
-_func_enter_;
pmicdata->L = pmicdata->K0;
pmicdata->R = pmicdata->K1;
pmicdata->nBytesInM = 0;
pmicdata->M = 0;
-_func_exit_;
}
void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key)
{
/* Set the key */
-_func_enter_;
pmicdata->K0 = secmicgetuint32(key);
pmicdata->K1 = secmicgetuint32(key + 4);
/* and reset the message */
secmicclear(pmicdata);
-_func_exit_;
}
void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b)
{
-_func_enter_;
/* Append the byte to our word-sized buffer */
pmicdata->M |= ((unsigned long)b) << (8*pmicdata->nBytesInM);
pmicdata->nBytesInM++;
@@ -325,23 +301,19 @@ _func_enter_;
pmicdata->M = 0;
pmicdata->nBytesInM = 0;
}
-_func_exit_;
}
void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nbytes)
{
-_func_enter_;
/* This is simple */
while (nbytes > 0) {
rtw_secmicappendbyte(pmicdata, *src++);
nbytes--;
}
-_func_exit_;
}
void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst)
{
-_func_enter_;
/* Append the minimum padding */
rtw_secmicappendbyte(pmicdata, 0x5a);
rtw_secmicappendbyte(pmicdata, 0);
@@ -356,14 +328,12 @@ _func_enter_;
secmicputuint32(dst+4, pmicdata->R);
/* Reset to the empty message. */
secmicclear(pmicdata);
-_func_exit_;
}
void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_code, u8 pri)
{
struct mic_data micdata;
u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
-_func_enter_;
rtw_secmicsetkey(&micdata, key);
priority[0] = pri;
@@ -386,7 +356,6 @@ _func_enter_;
rtw_secmicappend(&micdata, data, data_len);
rtw_secgetmic(&micdata, mic_code);
-_func_exit_;
}
@@ -505,7 +474,6 @@ static const unsigned short Sbox1[2][256] = { /* Sbox for hash (can be in ROM)
static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
{
int i;
-_func_enter_;
/* Initialize the 80 bits of P1K[] from IV32 and TA[0..5] */
p1k[0] = Lo16(iv32);
p1k[1] = Hi16(iv32);
@@ -523,7 +491,6 @@ _func_enter_;
p1k[4] += _S_(p1k[3] ^ TK16((i&1)+0));
p1k[4] += (unsigned short)i; /* avoid "slide attacks" */
}
-_func_exit_;
}
/*
@@ -553,7 +520,6 @@ static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
{
int i;
u16 PPK[6]; /* temporary key for mixing */
-_func_enter_;
/* Note: all adds in the PPK[] equations below are mod 2**16 */
for (i = 0; i < 5; i++)
PPK[i] = p1k[i]; /* first, copy P1K to PPK */
@@ -590,7 +556,6 @@ _func_enter_;
rc4key[4+2*i] = Lo8(PPK[i]);
rc4key[5+2*i] = Hi8(PPK[i]);
}
-_func_exit_;
}
/* The hlen isn't include the IV */
@@ -612,7 +577,6 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
u32 res = _SUCCESS;
-_func_enter_;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
return _FAIL;
@@ -672,7 +636,6 @@ _func_enter_;
res = _FAIL;
}
}
-_func_exit_;
return res;
}
@@ -690,13 +653,12 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
+ struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
u32 res = _SUCCESS;
-_func_enter_;
- pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+ pframe = (unsigned char *)((struct recv_frame *)precvframe)->rx_data;
/* 4 start to decrypt recvframe */
if (prxattrib->encrypt == _TKIP_) {
@@ -716,7 +678,7 @@ _func_enter_;
iv = pframe+prxattrib->hdrlen;
payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
- length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+ length = ((struct recv_frame *)precvframe)->len-prxattrib->hdrlen-prxattrib->iv_len;
GET_TKIP_PN(iv, dot11txpn);
@@ -747,7 +709,6 @@ _func_enter_;
res = _FAIL;
}
}
-_func_exit_;
exit:
return res;
}
@@ -821,19 +782,15 @@ static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext);
static void xor_128(u8 *a, u8 *b, u8 *out)
{
int i;
-_func_enter_;
for (i = 0; i < 16; i++)
out[i] = a[i] ^ b[i];
-_func_exit_;
}
static void xor_32(u8 *a, u8 *b, u8 *out)
{
int i;
-_func_enter_;
for (i = 0; i < 4; i++)
out[i] = a[i] ^ b[i];
-_func_exit_;
}
static u8 sbox(u8 a)
@@ -849,7 +806,6 @@ static void next_key(u8 *key, int round)
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
0x1b, 0x36, 0x36, 0x36
};
-_func_enter_;
sbox_key[0] = sbox(key[13]);
sbox_key[1] = sbox(key[14]);
sbox_key[2] = sbox(key[15]);
@@ -863,21 +819,17 @@ _func_enter_;
xor_32(&key[4], &key[0], &key[4]);
xor_32(&key[8], &key[4], &key[8]);
xor_32(&key[12], &key[8], &key[12]);
-_func_exit_;
}
static void byte_sub(u8 *in, u8 *out)
{
int i;
-_func_enter_;
for (i = 0; i < 16; i++)
out[i] = sbox(in[i]);
-_func_exit_;
}
static void shift_row(u8 *in, u8 *out)
{
-_func_enter_;
out[0] = in[0];
out[1] = in[5];
out[2] = in[10];
@@ -894,7 +846,6 @@ _func_enter_;
out[13] = in[1];
out[14] = in[6];
out[15] = in[11];
-_func_exit_;
}
static void mix_column(u8 *in, u8 *out)
@@ -908,7 +859,6 @@ static void mix_column(u8 *in, u8 *out)
u8 rotr[4];
u8 temp[4];
u8 tempb[4];
-_func_enter_;
for (i = 0 ; i < 4; i++) {
if ((in[i] & 0x80) == 0x80)
add1b[i] = 0x1b;
@@ -952,7 +902,6 @@ _func_enter_;
xor_32(add1bf7, rotr, temp);
xor_32(swap_halfs, rotl, tempb);
xor_32(temp, tempb, out);
-_func_exit_;
}
static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
@@ -962,7 +911,6 @@ static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
u8 intermediatea[16];
u8 intermediateb[16];
u8 round_key[16];
-_func_enter_;
for (i = 0; i < 16; i++)
round_key[i] = key[i];
for (round = 0; round < 11; round++) {
@@ -984,7 +932,6 @@ _func_enter_;
next_key(round_key, round);
}
}
-_func_exit_;
}
/************************************************/
@@ -995,7 +942,6 @@ static void construct_mic_iv(u8 *mic_iv, int qc_exists, int a4_exists, u8 *mpdu,
uint payload_length, u8 *pn_vector)
{
int i;
-_func_enter_;
mic_iv[0] = 0x59;
if (qc_exists && a4_exists)
mic_iv[1] = mpdu[30] & 0x0f; /* QoS_TC */
@@ -1009,7 +955,6 @@ _func_enter_;
mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
mic_iv[14] = (unsigned char) (payload_length / 256);
mic_iv[15] = (unsigned char) (payload_length % 256);
-_func_exit_;
}
/************************************************/
@@ -1019,7 +964,6 @@ _func_exit_;
/************************************************/
static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu)
{
-_func_enter_;
mic_header1[0] = (u8)((header_length - 2) / 256);
mic_header1[1] = (u8)((header_length - 2) % 256);
mic_header1[2] = mpdu[0] & 0xcf; /* Mute CF poll & CF ack bits */
@@ -1036,7 +980,6 @@ _func_enter_;
mic_header1[13] = mpdu[13];
mic_header1[14] = mpdu[14];
mic_header1[15] = mpdu[15];
-_func_exit_;
}
/************************************************/
@@ -1047,7 +990,6 @@ _func_exit_;
static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists, int qc_exists)
{
int i;
-_func_enter_;
for (i = 0; i < 16; i++)
mic_header2[i] = 0x00;
@@ -1079,7 +1021,6 @@ _func_enter_;
mic_header2[15] = mpdu[31] & 0x00;
}
-_func_exit_;
}
/************************************************/
@@ -1090,7 +1031,6 @@ _func_exit_;
static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c)
{
int i;
-_func_enter_;
for (i = 0; i < 16; i++)
ctr_preload[i] = 0x00;
i = 0;
@@ -1107,7 +1047,6 @@ _func_enter_;
ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */
ctr_preload[15] = (unsigned char) (c % 256);
-_func_exit_;
}
/************************************/
@@ -1117,10 +1056,8 @@ _func_exit_;
static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
{
int i;
-_func_enter_;
for (i = 0; i < 16; i++)
out[i] = ina[i] ^ inb[i];
-_func_exit_;
}
static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
@@ -1142,7 +1079,6 @@ static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
uint frtype = GetFrameType(pframe);
uint frsubtype = GetFrameSubType(pframe);
-_func_enter_;
frsubtype = frsubtype>>4;
_rtw_memset((void *)mic_iv, 0, 16);
@@ -1253,7 +1189,6 @@ _func_enter_;
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < 8; j++)
pframe[payload_index++] = chain_buffer[j];
-_func_exit_;
return _SUCCESS;
}
@@ -1274,7 +1209,6 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* uint offset = 0; */
u32 res = _SUCCESS;
-_func_enter_;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
return _FAIL;
@@ -1318,7 +1252,6 @@ _func_enter_;
}
-_func_exit_;
return res;
}
@@ -1344,7 +1277,6 @@ static int aes_decipher(u8 *key, uint hdrlen,
/* uint offset = 0; */
uint frtype = GetFrameType(pframe);
uint frsubtype = GetFrameSubType(pframe);
-_func_enter_;
frsubtype = frsubtype>>4;
_rtw_memset((void *)mic_iv, 0, 16);
@@ -1514,7 +1446,6 @@ _func_enter_;
res = _FAIL;
}
}
-_func_exit_;
return res;
}
@@ -1524,11 +1455,10 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
int length;
u8 *pframe, *prwskey; /* *payload,*iv */
struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
+ struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
u32 res = _SUCCESS;
-_func_enter_;
- pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+ pframe = (unsigned char *)((struct recv_frame *)precvframe)->rx_data;
/* 4 start to encrypt each fragment */
if ((prxattrib->encrypt == _AES_)) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
@@ -1552,14 +1482,13 @@ _func_enter_;
} else {
prwskey = &stainfo->dot118021x_UncstKey.skey[0];
}
- length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+ length = ((struct recv_frame *)precvframe)->len-prxattrib->hdrlen-prxattrib->iv_len;
res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
} else {
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo==NULL!!!\n"));
res = _FAIL;
}
}
-_func_exit_;
exit:
return res;
}
@@ -1767,7 +1696,6 @@ void rtw_use_tkipkey_handler(void *FunctionContext)
{
struct adapter *padapter = (struct adapter *)FunctionContext;
-_func_enter_;
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("^^^rtw_use_tkipkey_handler ^^^\n"));
@@ -1775,5 +1703,4 @@ _func_enter_;
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("^^^rtw_use_tkipkey_handler padapter->securitypriv.busetkipkey=%d^^^\n", padapter->securitypriv.busetkipkey));
-_func_exit_;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 02e1e1f8b3ea..2d0b60686a01 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -25,10 +25,10 @@
#include <xmit_osdep.h>
#include <mlme_osdep.h>
#include <sta_info.h>
+#include <linux/vmalloc.h>
static void _rtw_init_stainfo(struct sta_info *psta)
{
-_func_enter_;
_rtw_memset((u8 *)psta, 0, sizeof (struct sta_info));
spin_lock_init(&psta->lock);
@@ -69,7 +69,6 @@ _func_enter_;
#endif /* CONFIG_88EU_AP_MODE */
-_func_exit_;
}
u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
@@ -77,9 +76,8 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
struct sta_info *psta;
s32 i;
-_func_enter_;
- pstapriv->pallocated_stainfo_buf = rtw_zvmalloc(sizeof(struct sta_info) * NUM_STA + 4);
+ pstapriv->pallocated_stainfo_buf = vzalloc(sizeof(struct sta_info) * NUM_STA + 4);
if (!pstapriv->pallocated_stainfo_buf)
return _FAIL;
@@ -125,7 +123,6 @@ _func_enter_;
pstapriv->max_num_sta = NUM_STA;
#endif
-_func_exit_;
return _SUCCESS;
}
@@ -154,21 +151,19 @@ static void rtw_mfree_all_stainfo(struct sta_priv *pstapriv)
struct list_head *plist, *phead;
struct sta_info *psta = NULL;
-_func_enter_;
spin_lock_bh(&pstapriv->sta_hash_lock);
phead = get_list_head(&pstapriv->free_sta_queue);
- plist = get_next(phead);
+ plist = phead->next;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info , list);
- plist = get_next(plist);
+ psta = container_of(plist, struct sta_info , list);
+ plist = plist->next;
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
-_func_exit_;
}
static void rtw_mfree_sta_priv_lock(struct sta_priv *pstapriv)
@@ -183,22 +178,21 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
struct recv_reorder_ctrl *preorder_ctrl;
int index;
-_func_enter_;
if (pstapriv) {
/* delete all reordering_ctrl_timer */
spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
- plist = get_next(phead);
+ plist = phead->next;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
int i;
- psta = LIST_CONTAINOR(plist, struct sta_info , hash_list);
- plist = get_next(plist);
+ psta = container_of(plist, struct sta_info , hash_list);
+ plist = plist->next;
for (i = 0; i < 16; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
- _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
}
}
}
@@ -208,10 +202,9 @@ _func_enter_;
rtw_mfree_sta_priv_lock(pstapriv);
if (pstapriv->pallocated_stainfo_buf)
- rtw_vmfree(pstapriv->pallocated_stainfo_buf, sizeof(struct sta_info)*NUM_STA+4);
+ vfree(pstapriv->pallocated_stainfo_buf);
}
-_func_exit_;
return _SUCCESS;
}
@@ -225,7 +218,6 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
int i = 0;
u16 wRxSeqInitialValue = 0xffff;
-_func_enter_;
pfree_sta_queue = &pstapriv->free_sta_queue;
@@ -235,7 +227,7 @@ _func_enter_;
spin_unlock_bh(&pfree_sta_queue->lock);
psta = NULL;
} else {
- psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue), struct sta_info, list);
+ psta = container_of((&pfree_sta_queue->queue)->next, struct sta_info, list);
rtw_list_delete(&(psta->list));
spin_unlock_bh(&pfree_sta_queue->lock);
_rtw_init_stainfo(psta);
@@ -297,9 +289,6 @@ _func_enter_;
}
exit:
-
-_func_exit_;
-
return psta;
}
@@ -313,7 +302,6 @@ u32 rtw_free_stainfo(struct adapter *padapter , struct sta_info *psta)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
-_func_enter_;
if (psta == NULL)
goto exit;
@@ -353,32 +341,34 @@ _func_enter_;
_rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
_rtw_init_sta_recv_priv(&psta->sta_recvpriv);
- _cancel_timer_ex(&psta->addba_retry_timer);
+ del_timer_sync(&psta->addba_retry_timer);
/* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
for (i = 0; i < 16; i++) {
struct list_head *phead, *plist;
- union recv_frame *prframe;
+ struct recv_frame *prhdr;
+ struct recv_frame *prframe;
struct __queue *ppending_recvframe_queue;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
preorder_ctrl = &psta->recvreorder_ctrl[i];
- _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
spin_lock_bh(&ppending_recvframe_queue->lock);
phead = get_list_head(ppending_recvframe_queue);
- plist = get_next(phead);
+ plist = phead->next;
while (!rtw_is_list_empty(phead)) {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prhdr = container_of(plist, struct recv_frame, list);
+ prframe = (struct recv_frame *)prhdr;
- plist = get_next(plist);
+ plist = plist->next;
- rtw_list_delete(&(prframe->u.hdr.list));
+ rtw_list_delete(&(prframe->list));
rtw_free_recvframe(prframe, pfree_recv_queue);
}
@@ -428,7 +418,6 @@ _func_enter_;
exit:
-_func_exit_;
return _SUCCESS;
}
@@ -442,32 +431,26 @@ void rtw_free_all_stainfo(struct adapter *padapter)
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter);
-_func_enter_;
if (pstapriv->asoc_sta_count == 1)
- goto exit;
+ return;
spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &(pstapriv->sta_hash[index]);
- plist = get_next(phead);
+ plist = phead->next;
while ((!rtw_end_of_queue_search(phead, plist))) {
- psta = LIST_CONTAINOR(plist, struct sta_info , hash_list);
+ psta = container_of(plist, struct sta_info , hash_list);
- plist = get_next(plist);
+ plist = plist->next;
if (pbcmc_stainfo != psta)
rtw_free_stainfo(padapter , psta);
}
}
-
spin_unlock_bh(&pstapriv->sta_hash_lock);
-
-exit:
-
-_func_exit_;
}
/* any station allocated can be searched by hash list */
@@ -479,7 +462,6 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
u8 *addr;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-_func_enter_;
if (hwaddr == NULL)
return NULL;
@@ -494,21 +476,20 @@ _func_enter_;
spin_lock_bh(&pstapriv->sta_hash_lock);
phead = &(pstapriv->sta_hash[index]);
- plist = get_next(phead);
+ plist = phead->next;
while ((!rtw_end_of_queue_search(phead, plist))) {
- psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+ psta = container_of(plist, struct sta_info, hash_list);
- if ((_rtw_memcmp(psta->hwaddr, addr, ETH_ALEN)) == true) {
+ if ((!memcmp(psta->hwaddr, addr, ETH_ALEN)) == true) {
/* if found the matched address */
break;
}
psta = NULL;
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
-_func_exit_;
return psta;
}
@@ -519,7 +500,6 @@ u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
unsigned char bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sta_priv *pstapriv = &padapter->stapriv;
-_func_enter_;
psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
@@ -533,7 +513,6 @@ _func_enter_;
psta->mac_id = 1;
exit:
-_func_exit_;
return res;
}
@@ -542,9 +521,7 @@ struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-_func_enter_;
psta = rtw_get_stainfo(pstapriv, bc_addr);
-_func_exit_;
return psta;
}
@@ -561,12 +538,12 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
spin_lock_bh(&(pacl_node_q->lock));
phead = get_list_head(pacl_node_q);
- plist = get_next(phead);
+ plist = phead->next;
while ((!rtw_end_of_queue_search(phead, plist))) {
- paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
- plist = get_next(plist);
+ paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
+ plist = plist->next;
- if (_rtw_memcmp(paclnode->addr, mac_addr, ETH_ALEN)) {
+ if (!memcmp(paclnode->addr, mac_addr, ETH_ALEN)) {
if (paclnode->valid) {
match = true;
break;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 153ec61493ab..3dd90599fd4b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -912,12 +912,12 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
unsigned char *pbuf;
u32 wpa_ielen = 0;
u8 *pbssid = GetAddr3Ptr(pframe);
- u32 hidden_ssid = 0;
struct HT_info_element *pht_info = NULL;
struct rtw_ieee80211_ht_cap *pht_cap = NULL;
u32 bcn_channel;
unsigned short ht_cap_info;
unsigned char ht_info_infos_0;
+ int ssid_len;
if (is_client_associated_to_ap(Adapter) == false)
return true;
@@ -929,7 +929,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
return _FAIL;
}
- if (_rtw_memcmp(cur_network->network.MacAddress, pbssid, 6) == false) {
+ if (!memcmp(cur_network->network.MacAddress, pbssid, 6) == false) {
DBG_88E("Oops: rtw_check_network_encrypt linked but recv other bssid bcn\n%pM %pM\n",
(pbssid), (cur_network->network.MacAddress));
return true;
@@ -999,28 +999,22 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
/* checking SSID */
+ ssid_len = 0;
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (p == NULL) {
- DBG_88E("%s marc: cannot find SSID for survey event\n", __func__);
- hidden_ssid = true;
- } else {
- hidden_ssid = false;
- }
-
- if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
- memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
- bssid->Ssid.SsidLength = *(p + 1);
- } else {
- bssid->Ssid.SsidLength = 0;
- bssid->Ssid.Ssid[0] = '\0';
+ if (p) {
+ ssid_len = *(p + 1);
+ if (ssid_len > NDIS_802_11_LENGTH_SSID)
+ ssid_len = 0;
}
+ memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len);
+ bssid->Ssid.SsidLength = ssid_len;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
"cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
bssid->Ssid.SsidLength, cur_network->network.Ssid.Ssid,
cur_network->network.Ssid.SsidLength));
- if (!_rtw_memcmp(bssid->Ssid.Ssid, cur_network->network.Ssid.Ssid, 32) ||
+ if (memcmp(bssid->Ssid.Ssid, cur_network->network.Ssid.Ssid, 32) ||
bssid->Ssid.SsidLength != cur_network->network.Ssid.SsidLength) {
if (bssid->Ssid.Ssid[0] != '\0' && bssid->Ssid.SsidLength != 0) { /* not hidden ssid */
DBG_88E("%s(), SSID is not match return FAIL\n", __func__);
@@ -1056,7 +1050,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
if (cur_network->BcnInfo.encryp_protocol != encryp_protocol) {
- DBG_88E("%s(): enctyp is not match , return FAIL\n", __func__);
+ DBG_88E("%s(): encryption protocol is not match , return FAIL\n", __func__);
goto _mismatch;
}
@@ -1096,12 +1090,10 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
kfree(bssid);
- _func_exit_;
return _SUCCESS;
_mismatch:
kfree(bssid);
- _func_exit_;
return _FAIL;
}
@@ -1147,11 +1139,11 @@ unsigned int is_ap_in_tkip(struct adapter *padapter)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if ((_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4)) && (_rtw_memcmp((pIE->data + 12), WPA_TKIP_CIPHER, 4)))
+ if ((!memcmp(pIE->data, RTW_WPA_OUI, 4)) && (!memcmp((pIE->data + 12), WPA_TKIP_CIPHER, 4)))
return true;
break;
case _RSN_IE_2_:
- if (_rtw_memcmp((pIE->data + 8), RSN_TKIP_CIPHER, 4))
+ if (!memcmp((pIE->data + 8), RSN_TKIP_CIPHER, 4))
return true;
default:
break;
@@ -1178,14 +1170,14 @@ unsigned int should_forbid_n_rate(struct adapter *padapter)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if (_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4) &&
- ((_rtw_memcmp((pIE->data + 12), WPA_CIPHER_SUITE_CCMP, 4)) ||
- (_rtw_memcmp((pIE->data + 16), WPA_CIPHER_SUITE_CCMP, 4))))
+ if (!memcmp(pIE->data, RTW_WPA_OUI, 4) &&
+ ((!memcmp((pIE->data + 12), WPA_CIPHER_SUITE_CCMP, 4)) ||
+ (!memcmp((pIE->data + 16), WPA_CIPHER_SUITE_CCMP, 4))))
return false;
break;
case _RSN_IE_2_:
- if ((_rtw_memcmp((pIE->data + 8), RSN_CIPHER_SUITE_CCMP, 4)) ||
- (_rtw_memcmp((pIE->data + 12), RSN_CIPHER_SUITE_CCMP, 4)))
+ if ((!memcmp((pIE->data + 8), RSN_CIPHER_SUITE_CCMP, 4)) ||
+ (!memcmp((pIE->data + 12), RSN_CIPHER_SUITE_CCMP, 4)))
return false;
default:
break;
@@ -1214,7 +1206,7 @@ unsigned int is_ap_in_wep(struct adapter *padapter)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if (_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4))
+ if (!memcmp(pIE->data, RTW_WPA_OUI, 4))
return false;
break;
case _RSN_IE_2_:
@@ -1406,35 +1398,35 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
switch (pIE->ElementID) {
case _VENDOR_SPECIFIC_IE_:
- if ((_rtw_memcmp(pIE->data, ARTHEROS_OUI1, 3)) ||
- (_rtw_memcmp(pIE->data, ARTHEROS_OUI2, 3))) {
+ if ((!memcmp(pIE->data, ARTHEROS_OUI1, 3)) ||
+ (!memcmp(pIE->data, ARTHEROS_OUI2, 3))) {
DBG_88E("link to Artheros AP\n");
return HT_IOT_PEER_ATHEROS;
- } else if ((_rtw_memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
- (_rtw_memcmp(pIE->data, BROADCOM_OUI2, 3)) ||
- (_rtw_memcmp(pIE->data, BROADCOM_OUI2, 3))) {
+ } else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
+ (!memcmp(pIE->data, BROADCOM_OUI2, 3)) ||
+ (!memcmp(pIE->data, BROADCOM_OUI2, 3))) {
DBG_88E("link to Broadcom AP\n");
return HT_IOT_PEER_BROADCOM;
- } else if (_rtw_memcmp(pIE->data, MARVELL_OUI, 3)) {
+ } else if (!memcmp(pIE->data, MARVELL_OUI, 3)) {
DBG_88E("link to Marvell AP\n");
return HT_IOT_PEER_MARVELL;
- } else if (_rtw_memcmp(pIE->data, RALINK_OUI, 3)) {
+ } else if (!memcmp(pIE->data, RALINK_OUI, 3)) {
if (!ralink_vendor_flag) {
ralink_vendor_flag = 1;
} else {
DBG_88E("link to Ralink AP\n");
return HT_IOT_PEER_RALINK;
}
- } else if (_rtw_memcmp(pIE->data, CISCO_OUI, 3)) {
+ } else if (!memcmp(pIE->data, CISCO_OUI, 3)) {
DBG_88E("link to Cisco AP\n");
return HT_IOT_PEER_CISCO;
- } else if (_rtw_memcmp(pIE->data, REALTEK_OUI, 3)) {
+ } else if (!memcmp(pIE->data, REALTEK_OUI, 3)) {
DBG_88E("link to Realtek 96B\n");
return HT_IOT_PEER_REALTEK;
- } else if (_rtw_memcmp(pIE->data, AIRGOCAP_OUI, 3)) {
+ } else if (!memcmp(pIE->data, AIRGOCAP_OUI, 3)) {
DBG_88E("link to Airgo Cap\n");
return HT_IOT_PEER_AIRGO;
- } else if (_rtw_memcmp(pIE->data, EPIGRAM_OUI, 3)) {
+ } else if (!memcmp(pIE->data, EPIGRAM_OUI, 3)) {
epigram_vendor_flag = 1;
if (ralink_vendor_flag) {
DBG_88E("link to Tenda W311R AP\n");
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 24182fbc6a71..8d4265fb486d 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -23,25 +23,22 @@
#include <drv_types.h>
#include <wifi.h>
#include <osdep_intf.h>
-#include <ip.h>
#include <usb_ops.h>
#include <usb_osintf.h>
+#include <linux/vmalloc.h>
static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
static void _init_txservq(struct tx_servq *ptxservq)
{
-_func_enter_;
_rtw_init_listhead(&ptxservq->tx_pending);
_rtw_init_queue(&ptxservq->sta_pending);
ptxservq->qcnt = 0;
-_func_exit_;
}
void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
{
-_func_enter_;
_rtw_memset((unsigned char *)psta_xmitpriv, 0, sizeof (struct sta_xmit_priv));
spin_lock_init(&psta_xmitpriv->lock);
_init_txservq(&psta_xmitpriv->be_q);
@@ -51,7 +48,6 @@ _func_enter_;
_rtw_init_listhead(&psta_xmitpriv->legacy_dz);
_rtw_init_listhead(&psta_xmitpriv->apsd);
-_func_exit_;
}
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
@@ -63,9 +59,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
-_func_enter_;
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
spin_lock_init(&pxmitpriv->lock);
sema_init(&pxmitpriv->xmit_sema, 0);
@@ -91,7 +86,7 @@ _func_enter_;
Please also apply free_txobj to link_up all the xmit_frames...
*/
- pxmitpriv->pallocated_frame_buf = rtw_zvmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
+ pxmitpriv->pallocated_frame_buf = vzalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
if (pxmitpriv->pallocated_frame_buf == NULL) {
pxmitpriv->pxmit_frame_buf = NULL;
@@ -129,7 +124,7 @@ _func_enter_;
_rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
_rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
- pxmitpriv->pallocated_xmitbuf = rtw_zvmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
+ pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
if (pxmitpriv->pallocated_xmitbuf == NULL) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_buf fail!\n"));
@@ -171,7 +166,7 @@ _func_enter_;
/* Init xmit extension buff */
_rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
- pxmitpriv->pallocated_xmit_extbuf = rtw_zvmalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+ pxmitpriv->pallocated_xmit_extbuf = vzalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n"));
@@ -226,7 +221,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -240,12 +234,11 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
- _func_enter_;
rtw_hal_free_xmit_priv(padapter);
if (pxmitpriv->pxmit_frame_buf == NULL)
- goto out;
+ return;
for (i = 0; i < NR_XMITFRAME; i++) {
rtw_os_xmit_complete(padapter, pxmitframe);
@@ -259,10 +252,10 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
}
if (pxmitpriv->pallocated_frame_buf)
- rtw_vmfree(pxmitpriv->pallocated_frame_buf, NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
+ vfree(pxmitpriv->pallocated_frame_buf);
if (pxmitpriv->pallocated_xmitbuf)
- rtw_vmfree(pxmitpriv->pallocated_xmitbuf, NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
+ vfree(pxmitpriv->pallocated_xmitbuf);
/* free xmit extension buff */
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
@@ -272,16 +265,12 @@ void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
}
if (pxmitpriv->pallocated_xmit_extbuf) {
- rtw_vmfree(pxmitpriv->pallocated_xmit_extbuf, num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+ vfree(pxmitpriv->pallocated_xmit_extbuf);
}
rtw_free_hwxmits(padapter);
mutex_destroy(&pxmitpriv->ack_tx_mutex);
-
-out:
-
-_func_exit_;
}
static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -455,7 +444,6 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
int res = _SUCCESS;
- _func_enter_;
_rtw_open_pktfile(pkt, &pktfile);
_rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
@@ -639,7 +627,6 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
exit:
-_func_exit_;
return res;
}
@@ -662,7 +649,6 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
else
stainfo = rtw_get_stainfo(&padapter->stapriv , &pattrib->ra[0]);
-_func_enter_;
hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
@@ -676,12 +662,12 @@ _func_enter_;
pframe = pxmitframe->buf_addr + hw_hdr_offset;
if (bmcst) {
- if (_rtw_memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16))
+ if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16))
return _FAIL;
/* start to calculate the mic code */
rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
} else {
- if (_rtw_memcmp(&stainfo->dot11tkiptxmickey.skey[0], null_key, 16) == true) {
+ if (!memcmp(&stainfo->dot11tkiptxmickey.skey[0], null_key, 16)) {
/* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
/* msleep(10); */
return _FAIL;
@@ -760,7 +746,6 @@ _func_enter_;
}
}
-_func_exit_;
return _SUCCESS;
}
@@ -769,7 +754,6 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
{
struct pkt_attrib *pattrib = &pxmitframe->attrib;
-_func_enter_;
if (pattrib->bswenc) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("### xmitframe_swencrypt\n"));
@@ -791,7 +775,6 @@ _func_enter_;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("### xmitframe_hwencrypt\n"));
}
-_func_exit_;
return _SUCCESS;
}
@@ -812,7 +795,6 @@ s32 rtw_make_wlanhdr (struct adapter *padapter , u8 *hdr, struct pkt_attrib *pat
int bmcst = IS_MCAST(pattrib->ra);
-_func_enter_;
if (pattrib->psta) {
psta = pattrib->psta;
@@ -918,7 +900,6 @@ _func_enter_;
}
exit:
-_func_exit_;
return res;
}
@@ -1007,7 +988,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
s32 bmcst = IS_MCAST(pattrib->ra);
s32 res = _SUCCESS;
-_func_enter_;
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
@@ -1145,7 +1125,6 @@ _func_enter_;
exit:
-_func_exit_;
return res;
}
@@ -1162,7 +1141,6 @@ s32 rtw_put_snap(u8 *data, u16 h_proto)
struct ieee80211_snap_hdr *snap;
u8 *oui;
-_func_enter_;
snap = (struct ieee80211_snap_hdr *)data;
snap->dsap = 0xaa;
@@ -1180,7 +1158,6 @@ _func_enter_;
*(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
-_func_exit_;
return SNAP_SIZE + sizeof(u16);
}
@@ -1193,7 +1170,6 @@ void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
-_func_enter_;
switch (pxmitpriv->vcs_setting) {
case DISABLE_VCS:
@@ -1220,7 +1196,6 @@ _func_enter_;
break;
}
-_func_exit_;
}
void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz)
@@ -1250,7 +1225,6 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
struct list_head *plist, *phead;
struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
-_func_enter_;
spin_lock_irqsave(&pfree_queue->lock, irql);
@@ -1259,9 +1233,9 @@ _func_enter_;
} else {
phead = get_list_head(pfree_queue);
- plist = get_next(phead);
+ plist = phead->next;
- pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+ pxmitbuf = container_of(plist, struct xmit_buf, list);
rtw_list_delete(&(pxmitbuf->list));
}
@@ -1280,7 +1254,6 @@ _func_enter_;
spin_unlock_irqrestore(&pfree_queue->lock, irql);
-_func_exit_;
return pxmitbuf;
}
@@ -1290,7 +1263,6 @@ s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
unsigned long irql;
struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
-_func_enter_;
if (pxmitbuf == NULL)
return _FAIL;
@@ -1304,7 +1276,6 @@ _func_enter_;
spin_unlock_irqrestore(&pfree_queue->lock, irql);
-_func_exit_;
return _SUCCESS;
}
@@ -1316,7 +1287,6 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
struct list_head *plist, *phead;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
-_func_enter_;
/* DBG_88E("+rtw_alloc_xmitbuf\n"); */
@@ -1327,9 +1297,9 @@ _func_enter_;
} else {
phead = get_list_head(pfree_xmitbuf_queue);
- plist = get_next(phead);
+ plist = phead->next;
- pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+ pxmitbuf = container_of(plist, struct xmit_buf, list);
rtw_list_delete(&(pxmitbuf->list));
}
@@ -1344,7 +1314,6 @@ _func_enter_;
}
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
-_func_exit_;
return pxmitbuf;
}
@@ -1354,7 +1323,6 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
unsigned long irql;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
-_func_enter_;
if (pxmitbuf == NULL)
return _FAIL;
@@ -1376,7 +1344,6 @@ _func_enter_;
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
}
-_func_exit_;
return _SUCCESS;
}
@@ -1405,7 +1372,6 @@ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pf
struct list_head *plist, *phead;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
-_func_enter_;
spin_lock_bh(&pfree_xmit_queue->lock);
@@ -1415,9 +1381,9 @@ _func_enter_;
} else {
phead = get_list_head(pfree_xmit_queue);
- plist = get_next(phead);
+ plist = phead->next;
- pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+ pxframe = container_of(plist, struct xmit_frame, list);
rtw_list_delete(&(pxframe->list));
}
@@ -1444,7 +1410,6 @@ _func_enter_;
spin_unlock_bh(&pfree_xmit_queue->lock);
-_func_exit_;
return pxframe;
}
@@ -1455,7 +1420,6 @@ s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitfram
struct adapter *padapter = pxmitpriv->adapter;
struct sk_buff *pndis_pkt = NULL;
-_func_enter_;
if (pxmitframe == NULL) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====== rtw_free_xmitframe():pxmitframe == NULL!!!!!!!!!!\n"));
@@ -1483,7 +1447,6 @@ _func_enter_;
exit:
-_func_exit_;
return _SUCCESS;
}
@@ -1493,23 +1456,21 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
struct list_head *plist, *phead;
struct xmit_frame *pxmitframe;
-_func_enter_;
spin_lock_bh(&(pframequeue->lock));
phead = get_list_head(pframequeue);
- plist = get_next(phead);
+ plist = phead->next;
while (!rtw_end_of_queue_search(phead, plist)) {
- pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+ pxmitframe = container_of(plist, struct xmit_frame, list);
- plist = get_next(plist);
+ plist = plist->next;
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
spin_unlock_bh(&(pframequeue->lock));
-_func_exit_;
}
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -1530,12 +1491,12 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, str
struct xmit_frame *pxmitframe = NULL;
xmitframe_phead = get_list_head(pframe_queue);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
if (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
rtw_list_delete(&pxmitframe->list);
@@ -1555,7 +1516,6 @@ struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmi
struct registry_priv *pregpriv = &padapter->registrypriv;
int i, inx[4];
-_func_enter_;
inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
@@ -1572,10 +1532,10 @@ _func_enter_;
phwxmit = phwxmit_i + inx[i];
sta_phead = get_list_head(phwxmit->sta_queue);
- sta_plist = get_next(sta_phead);
+ sta_plist = sta_phead->next;
while (!rtw_end_of_queue_search(sta_phead, sta_plist)) {
- ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq, tx_pending);
+ ptxservq = container_of(sta_plist, struct tx_servq, tx_pending);
pframe_queue = &ptxservq->sta_pending;
@@ -1590,12 +1550,11 @@ _func_enter_;
goto exit;
}
- sta_plist = get_next(sta_plist);
+ sta_plist = sta_plist->next;
}
}
exit:
spin_unlock_bh(&pxmitpriv->lock);
-_func_exit_;
return pxmitframe;
}
@@ -1603,7 +1562,6 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
{
struct tx_servq *ptxservq;
-_func_enter_;
switch (up) {
case 1:
case 2:
@@ -1632,7 +1590,6 @@ _func_enter_;
break;
}
-_func_exit_;
return ptxservq;
}
@@ -1651,7 +1608,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
int res = _SUCCESS;
-_func_enter_;
if (pattrib->psta) {
psta = pattrib->psta;
@@ -1676,7 +1632,6 @@ _func_enter_;
phwxmits[ac_index].accnt++;
exit:
-_func_exit_;
return res;
}
@@ -1719,10 +1674,8 @@ void rtw_free_hwxmits(struct adapter *padapter)
void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry)
{
int i;
-_func_enter_;
for (i = 0; i < entry; i++, phwxmit++)
phwxmit->accnt = 0;
-_func_exit_;
}
static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
@@ -1997,7 +1950,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
pstapriv->tim_bitmap |= BIT(0);/* */
pstapriv->sta_dz_bitmap |= BIT(0);
- update_beacon(padapter, _TIM_IE_, NULL, false);/* tx bc/mc packets after upate bcn */
+ update_beacon(padapter, _TIM_IE_, NULL, false);/* tx bc/mc packets after update bcn */
ret = true;
}
@@ -2047,7 +2000,7 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
pstapriv->tim_bitmap |= BIT(psta->aid);
if (psta->sleepq_len == 1) {
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
update_beacon(padapter, _TIM_IE_, NULL, false);
}
}
@@ -2070,12 +2023,12 @@ static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struc
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
phead = get_list_head(pframequeue);
- plist = get_next(phead);
+ plist = phead->next;
while (!rtw_end_of_queue_search(phead, plist)) {
- pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+ pxmitframe = container_of(plist, struct xmit_frame, list);
- plist = get_next(plist);
+ plist = plist->next;
xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe);
@@ -2137,12 +2090,12 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
rtw_list_delete(&pxmitframe->list);
@@ -2218,12 +2171,12 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
spin_lock_bh(&psta_bmc->sleep_q.lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
rtw_list_delete(&pxmitframe->list);
@@ -2265,12 +2218,12 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ xmitframe_plist = xmitframe_plist->next;
switch (pxmitframe->attrib.priority) {
case 1:
@@ -2316,7 +2269,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
- /* upate BCN for TIM IE */
+ /* update BCN for TIM IE */
update_beacon(padapter, _TIM_IE_, NULL, false);
}
}
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index 3df33bc7197a..dea220b507ad 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -331,6 +331,7 @@ static void odm_RateDecision_8188E(struct odm_dm_struct *dm_odm,
static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_info *pRaInfo)
{ /* Wilson 2011/10/26 */
+ struct adapter *adapt = dm_odm->Adapter;
u32 MaskFromReg;
s8 i;
@@ -357,19 +358,19 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf
pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0000000d;
break;
case 12:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR0);
+ MaskFromReg = rtw_read32(adapt, REG_ARFR0);
pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
break;
case 13:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR1);
+ MaskFromReg = rtw_read32(adapt, REG_ARFR1);
pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
break;
case 14:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR2);
+ MaskFromReg = rtw_read32(adapt, REG_ARFR2);
pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
break;
case 15:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR3);
+ MaskFromReg = rtw_read32(adapt, REG_ARFR3);
pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
break;
default:
@@ -667,7 +668,9 @@ void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rssi)
void ODM_RA_Set_TxRPT_Time(struct odm_dm_struct *dm_odm, u16 minRptTime)
{
- ODM_Write2Byte(dm_odm, REG_TX_RPT_TIME, minRptTime);
+ struct adapter *adapt = dm_odm->Adapter;
+
+ rtw_write16(adapt, REG_TX_RPT_TIME, minRptTime);
}
void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16 TxRPT_Len, u32 macid_entry0, u32 macid_entry1)
diff --git a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
index 15e8e3f62198..056052ddd82e 100644
--- a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
+++ b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
@@ -217,7 +217,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
for (i = 0; i < CCK_TABLE_SIZE; i++) {
if (dm_odm->RFCalibrateInfo.bCCKinCH14) {
- if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4) == 0) {
+ if (memcmp(&TempCCk, &CCKSwingTable_Ch14[i][2], 4)) {
CCK_index_old = (u8)i;
dm_odm->BbSwingIdxCckBase = (u8)i;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
@@ -229,7 +229,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
("RegA24: 0x%X, CCKSwingTable_Ch1_Ch13[%d][2]: CCKSwingTable_Ch1_Ch13[i][2]: 0x%X\n",
TempCCk, i, CCKSwingTable_Ch1_Ch13[i][2]));
- if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4) == 0) {
+ if (memcmp(&TempCCk, &CCKSwingTable_Ch1_Ch13[i][2], 4)) {
CCK_index_old = (u8)i;
dm_odm->BbSwingIdxCckBase = (u8)i;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
@@ -839,9 +839,9 @@ static void _PHY_SaveMACRegisters(
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save MAC parameters.\n"));
for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
- MACBackup[i] = ODM_Read1Byte(dm_odm, MACReg[i]);
+ MACBackup[i] = rtw_read8(adapt, MACReg[i]);
}
- MACBackup[i] = ODM_Read4Byte(dm_odm, MACReg[i]);
+ MACBackup[i] = rtw_read32(adapt, MACReg[i]);
}
static void reload_adda_reg(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegiesterNum)
@@ -868,9 +868,9 @@ _PHY_ReloadMACRegisters(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload MAC parameters !\n"));
for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)MACBackup[i]);
+ rtw_write8(adapt, MACReg[i], (u8)MACBackup[i]);
}
- ODM_Write4Byte(dm_odm, MACReg[i], MACBackup[i]);
+ rtw_write32(adapt, MACReg[i], MACBackup[i]);
}
void
@@ -912,12 +912,12 @@ _PHY_MACSettingCalibration(
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("MAC settings for Calibration.\n"));
- ODM_Write1Byte(dm_odm, MACReg[i], 0x3F);
+ rtw_write8(adapt, MACReg[i], 0x3F);
for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) {
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i]&(~BIT3)));
+ rtw_write8(adapt, MACReg[i], (u8)(MACBackup[i]&(~BIT3)));
}
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i]&(~BIT5)));
+ rtw_write8(adapt, MACReg[i], (u8)(MACBackup[i]&(~BIT5)));
}
void
@@ -1223,16 +1223,14 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
{
u8 tmpreg;
u32 RF_Amode = 0, RF_Bmode = 0, LC_Cal;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
/* Check continuous TX and Packet TX */
- tmpreg = ODM_Read1Byte(dm_odm, 0xd03);
+ tmpreg = rtw_read8(adapt, 0xd03);
if ((tmpreg&0x70) != 0) /* Deal with contisuous TX case */
- ODM_Write1Byte(dm_odm, 0xd03, tmpreg&0x8F); /* disable all continuous TX */
+ rtw_write8(adapt, 0xd03, tmpreg&0x8F); /* disable all continuous TX */
else /* Deal with Packet TX case */
- ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0xFF); /* block all queues */
+ rtw_write8(adapt, REG_TXPAUSE, 0xFF); /* block all queues */
if ((tmpreg&0x70) != 0) {
/* 1. Read original RF mode */
@@ -1264,7 +1262,7 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
if ((tmpreg&0x70) != 0) {
/* Deal with continuous TX case */
/* Path-A */
- ODM_Write1Byte(dm_odm, 0xd03, tmpreg);
+ rtw_write8(adapt, 0xd03, tmpreg);
PHY_SetRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
/* Path-B */
@@ -1272,7 +1270,7 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
PHY_SetRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
} else {
/* Deal with Packet TX case */
- ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0x00);
+ rtw_write8(adapt, REG_TXPAUSE, 0x00);
}
}
@@ -1468,13 +1466,10 @@ void PHY_LCCalibrate_8188E(struct adapter *adapt)
static void phy_setrfpathswitch_8188e(struct adapter *adapt, bool main, bool is2t)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
if (!adapt->hw_init_completed) {
u8 u1btmp;
- u1btmp = ODM_Read1Byte(dm_odm, REG_LEDCFG2) | BIT7;
- ODM_Write1Byte(dm_odm, REG_LEDCFG2, u1btmp);
+ u1btmp = rtw_read8(adapt, REG_LEDCFG2) | BIT7;
+ rtw_write8(adapt, REG_LEDCFG2, u1btmp);
PHY_SetBBReg(adapt, rFPGA0_XAB_RFParameter, BIT13, 0x01);
}
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 598140464f05..d75ca7a27c86 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -116,8 +116,6 @@ uint rtw_hal_deinit(struct adapter *adapt)
{
uint status = _SUCCESS;
-_func_enter_;
-
status = adapt->HalFunc.hal_deinit(adapt);
if (status == _SUCCESS)
@@ -125,8 +123,6 @@ _func_enter_;
else
DBG_88E("\n rtw_hal_deinit: hal_init fail\n");
-_func_exit_;
-
return status;
}
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 3555ffaa4e06..89a26e3e217f 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -887,9 +887,10 @@ void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres)
{
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct adapter *adapt = pDM_Odm->Adapter;
if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres) /* modify by Guo.Mingzhi 2012-01-03 */
- ODM_Write1Byte(pDM_Odm, ODM_REG(CCK_CCA, pDM_Odm), CurCCK_CCAThres);
+ rtw_write8(adapt, ODM_REG(CCK_CCA, pDM_Odm), CurCCK_CCAThres);
pDM_DigTable->PreCCK_CCAThres = pDM_DigTable->CurCCK_CCAThres;
pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
}
@@ -1301,8 +1302,8 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
psta = pDM_Odm->pODM_StaInfo[i];
if (IS_STA_VALID(psta) &&
(psta->state & WIFI_ASOC_STATE) &&
- !_rtw_memcmp(psta->hwaddr, bcast_addr, ETH_ALEN) &&
- !_rtw_memcmp(psta->hwaddr, myid(&Adapter->eeprompriv), ETH_ALEN)) {
+ memcmp(psta->hwaddr, bcast_addr, ETH_ALEN) &&
+ memcmp(psta->hwaddr, myid(&Adapter->eeprompriv), ETH_ALEN)) {
if (psta->rssi_stat.UndecoratedSmoothedPWDB < tmpEntryMinPWDB)
tmpEntryMinPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
@@ -1433,10 +1434,10 @@ void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
Adapter->recvpriv.bIsAnyNonBEPkts = false;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VO PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_VO_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VI PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_VI_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BE PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_BE_PARAM)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BK PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_BK_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VO PARAM: 0x%x\n", rtw_read32(Adapter, ODM_EDCA_VO_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VI PARAM: 0x%x\n", rtw_read32(Adapter, ODM_EDCA_VI_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BE PARAM: 0x%x\n", rtw_read32(Adapter, ODM_EDCA_BE_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BK PARAM: 0x%x\n", rtw_read32(Adapter, ODM_EDCA_BK_PARAM)));
} /* ODM_InitEdcaTurbo */
void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
diff --git a/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
index 6193d9fafb98..a9886122b459 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
@@ -66,7 +66,9 @@ void odm_ConfigRF_RadioB_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data
void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
{
- ODM_Write1Byte(pDM_Odm, Addr, Data);
+ struct adapter *adapt = pDM_Odm->Adapter;
+
+ rtw_write8(adapt, Addr, Data);
ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigMACWithHeaderFile: [MAC_REG] %08X %08X\n", Addr, Data));
}
diff --git a/drivers/staging/rtl8188eu/hal/odm_interface.c b/drivers/staging/rtl8188eu/hal/odm_interface.c
deleted file mode 100644
index 3cd68212afd1..000000000000
--- a/drivers/staging/rtl8188eu/hal/odm_interface.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-
-#include "odm_precomp.h"
-/* ODM IO Relative API. */
-
-u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return rtw_read8(Adapter, RegAddr);
-}
-
-u16 ODM_Read2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return rtw_read16(Adapter, RegAddr);
-}
-
-u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return rtw_read32(Adapter, RegAddr);
-}
-
-void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write8(Adapter, RegAddr, Data);
-}
-
-void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write16(Adapter, RegAddr, Data);
-}
-
-void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write32(Adapter, RegAddr, Data);
-}
-
-/* ODM Memory relative API. */
-void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length)
-{
- *pPtr = rtw_zvmalloc(length);
-}
-
-/* length could be ignored, used to detect memory leakage. */
-void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length)
-{
- rtw_vmfree(pPtr, length);
-}
-
-s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2, u32 length)
-{
- return _rtw_memcmp(pBuf1, pBuf2, length);
-}
-
-void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer, u32 msDelay)
-{
- _set_timer(pTimer, msDelay); /* ms */
-}
-
-void ODM_InitializeTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer,
- void *CallBackFunc, void *pContext,
- const char *szID)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- _init_timer(pTimer, Adapter->pnetdev, CallBackFunc, pDM_Odm);
-}
-
-void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
-{
- _cancel_timer_ex(pTimer);
-}
-
-/* ODM FW relative API. */
-u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
- u32 *pElementID, u32 *pCmdLen,
- u8 **pCmbBuffer, u8 *CmdStartSeq)
-{
- return true;
-}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index ca0a7085445f..021e5879abcf 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -72,7 +72,6 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
u32 h2c_cmd_ex = 0;
s32 ret = _FAIL;
-_func_enter_;
if (!adapt->bFWReady) {
DBG_88E("FillH2CCmd_88E(): return H2C cmd because fw is not ready\n");
@@ -125,7 +124,6 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
@@ -134,7 +132,6 @@ u8 rtl8188e_set_rssi_cmd(struct adapter *adapt, u8 *param)
{
u8 res = _SUCCESS;
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-_func_enter_;
if (haldata->fw_ractrl) {
;
@@ -143,7 +140,6 @@ _func_enter_;
res = _FAIL;
}
-_func_exit_;
return res;
}
@@ -154,7 +150,6 @@ u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
u8 res = _SUCCESS;
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-_func_enter_;
if (haldata->fw_ractrl) {
__le32 lmask;
@@ -168,7 +163,6 @@ _func_enter_;
res = _FAIL;
}
-_func_exit_;
return res;
}
@@ -215,7 +209,6 @@ void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode)
struct setpwrmode_parm H2CSetPwrMode;
struct pwrctrl_priv *pwrpriv = &adapt->pwrctrlpriv;
u8 RLBM = 0; /* 0:Min, 1:Max, 2:User define */
-_func_enter_;
DBG_88E("%s: Mode=%d SmartPS=%d UAPSD=%d\n", __func__,
Mode, pwrpriv->smart_ps, adapt->registrypriv.uapsd_enable);
@@ -256,7 +249,6 @@ _func_enter_;
FillH2CCmd_88E(adapt, H2C_PS_PWR_MODE, sizeof(H2CSetPwrMode), (u8 *)&H2CSetPwrMode);
-_func_exit_;
}
void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
@@ -617,7 +609,6 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
u8 DLBcnCount = 0;
u32 poll = 0;
-_func_enter_;
DBG_88E("%s mstatus(%x)\n", __func__, mstatus);
@@ -701,7 +692,6 @@ _func_enter_;
haldata->RegCR_1 &= (~BIT0);
rtw_write8(adapt, REG_CR+1, haldata->RegCR_1);
}
-_func_exit_;
}
void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state)
@@ -712,7 +702,6 @@ void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state)
struct P2P_PS_Offload_t *p2p_ps_offload = &haldata->p2p_ps_offload;
u8 i;
-_func_enter_;
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
@@ -775,5 +764,4 @@ _func_enter_;
FillH2CCmd_88E(adapt, H2C_PS_P2P_OFFLOAD, 1, (u8 *)p2p_ps_offload);
#endif
-_func_exit_;
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 4c934e2e0911..cf88bf247c85 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -160,7 +160,6 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
u8 hw_init_completed = false;
struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
- _func_enter_;
hw_init_completed = Adapter->hw_init_completed;
if (!hw_init_completed)
@@ -178,7 +177,6 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
/* Calculate Tx/Rx statistics. */
dm_CheckStatistics(Adapter);
- _func_exit_;
}
/* ODM */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 5921db86547f..f9d5558431e0 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -20,6 +20,7 @@
#define _HAL_INIT_C_
#include <linux/firmware.h>
+#include <linux/vmalloc.h>
#include <drv_types.h>
#include <rtw_efuse.h>
@@ -365,7 +366,7 @@ void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int data_len)
u32 fifo_data, reg_140;
u32 addr, rstatus, loop = 0;
u16 data_cnts = (data_len/8)+1;
- u8 *pbuf = rtw_zvmalloc(data_len+10);
+ u8 *pbuf = vzalloc(data_len+10);
DBG_88E("###### %s ######\n", __func__);
rtw_write8(Adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
@@ -387,7 +388,7 @@ void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int data_len)
} while (!rstatus && (loop++ < 10));
}
rtw_IOL_cmd_buf_dump(Adapter, data_len, pbuf);
- rtw_vmfree(pbuf, data_len+10);
+ vfree(pbuf);
}
DBG_88E("###### %s ######\n", __func__);
}
@@ -583,59 +584,70 @@ static s32 _FWFreeToGo(struct adapter *padapter)
#define IS_FW_81xxC(padapter) (((GET_HAL_DATA(padapter))->FirmwareSignature & 0xFFF0) == 0x88C0)
-s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
+static int load_firmware(struct rt_firmware *pFirmware, struct device *device)
{
- s32 rtStatus = _SUCCESS;
- u8 writeFW_retry = 0;
- u32 fwdl_start_time;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
- struct device *device = dvobj_to_dev(dvobj);
- struct rt_firmware *pFirmware = NULL;
+ int rtstatus = _SUCCESS;
const struct firmware *fw;
- struct rt_firmware_hdr *pFwHdr = NULL;
- u8 *pFirmwareBuf;
- u32 FirmwareLen;
- char fw_name[] = "rtlwifi/rtl8188eufw.bin";
- static int log_version;
-
- RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
- pFirmware = (struct rt_firmware *)rtw_zmalloc(sizeof(struct rt_firmware));
- if (!pFirmware) {
- rtStatus = _FAIL;
- goto Exit;
- }
+ const char fw_name[] = "rtlwifi/rtl8188eufw.bin";
if (request_firmware(&fw, fw_name, device)) {
- rtStatus = _FAIL;
- goto Exit;
+ rtstatus = _FAIL;
+ goto exit;
}
if (!fw) {
pr_err("Firmware %s not available\n", fw_name);
- rtStatus = _FAIL;
- goto Exit;
+ rtstatus = _FAIL;
+ goto exit;
}
if (fw->size > FW_8188E_SIZE) {
- rtStatus = _FAIL;
- RT_TRACE(_module_hal_init_c_, _drv_err_, ("Firmware size exceed 0x%X. Check it.\n", FW_8188E_SIZE));
- goto Exit;
+ rtstatus = _FAIL;
+ RT_TRACE(_module_hal_init_c_, _drv_err_,
+ ("Firmware size exceed 0x%X. Check it.\n",
+ FW_8188E_SIZE));
+ goto exit;
}
pFirmware->szFwBuffer = kzalloc(FW_8188E_SIZE, GFP_KERNEL);
if (!pFirmware->szFwBuffer) {
- rtStatus = _FAIL;
- goto Exit;
+ rtstatus = _FAIL;
+ goto exit;
}
memcpy(pFirmware->szFwBuffer, fw->data, fw->size);
pFirmware->ulFwLength = fw->size;
- pFirmwareBuf = pFirmware->szFwBuffer;
- FirmwareLen = pFirmware->ulFwLength;
release_firmware(fw);
- DBG_88E_LEVEL(_drv_info_, "+%s: !bUsedWoWLANFw, FmrmwareLen:%d+\n", __func__, FirmwareLen);
+ DBG_88E_LEVEL(_drv_info_,
+ "+%s: !bUsedWoWLANFw, FmrmwareLen:%d+\n", __func__,
+ pFirmware->ulFwLength);
+exit:
+ return rtstatus;
+}
+
+s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
+{
+ s32 rtStatus = _SUCCESS;
+ u8 writeFW_retry = 0;
+ u32 fwdl_start_time;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
+ struct device *device = dvobj_to_dev(dvobj);
+ struct rt_firmware_hdr *pFwHdr = NULL;
+ u8 *pFirmwareBuf;
+ u32 FirmwareLen;
+ static int log_version;
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
+ if (!dvobj->firmware.szFwBuffer)
+ rtStatus = load_firmware(&dvobj->firmware, device);
+ if (rtStatus == _FAIL) {
+ dvobj->firmware.szFwBuffer = NULL;
+ goto Exit;
+ }
+ pFirmwareBuf = dvobj->firmware.szFwBuffer;
+ FirmwareLen = dvobj->firmware.ulFwLength;
/* To Check Fw header. Added by tynli. 2009.12.04. */
- pFwHdr = (struct rt_firmware_hdr *)pFirmware->szFwBuffer;
+ pFwHdr = (struct rt_firmware_hdr *)dvobj->firmware.szFwBuffer;
pHalData->FirmwareVersion = le16_to_cpu(pFwHdr->Version);
pHalData->FirmwareSubVersion = pFwHdr->Subversion;
@@ -687,10 +699,7 @@ s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
goto Exit;
}
RT_TRACE(_module_hal_init_c_, _drv_info_, ("Firmware is ready to run!\n"));
- kfree(pFirmware->szFwBuffer);
Exit:
-
- kfree(pFirmware);
return rtStatus;
}
@@ -707,10 +716,8 @@ void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
static void rtl8188e_free_hal_data(struct adapter *padapter)
{
-_func_enter_;
kfree(padapter->HalData);
padapter->HalData = NULL;
-_func_exit_;
}
/* */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
index 3d0e6c9e0310..a4d057cf7db2 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
@@ -115,14 +115,12 @@ void Hal_MPT_CCKTxPowerAdjust(struct adapter *Adapter, bool bInCH14)
/* Write 0xa24 ~ 0xa27 */
- TempVal2 = 0;
TempVal2 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][2] +
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][3]<<8) +
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][4]<<16)+
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][5]<<24);
/* Write 0xa28 0xa29 */
- TempVal3 = 0;
TempVal3 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][6] +
(CCKSwingTable_Ch1_Ch13[CCKSwingIndex][7]<<8);
} else {
@@ -139,14 +137,12 @@ void Hal_MPT_CCKTxPowerAdjust(struct adapter *Adapter, bool bInCH14)
(CCKSwingTable_Ch14[CCKSwingIndex][1]<<8);
/* Write 0xa24 ~ 0xa27 */
- TempVal2 = 0;
TempVal2 = CCKSwingTable_Ch14[CCKSwingIndex][2] +
(CCKSwingTable_Ch14[CCKSwingIndex][3]<<8) +
(CCKSwingTable_Ch14[CCKSwingIndex][4]<<16)+
(CCKSwingTable_Ch14[CCKSwingIndex][5]<<24);
/* Write 0xa28 0xa29 */
- TempVal3 = 0;
TempVal3 = CCKSwingTable_Ch14[CCKSwingIndex][6] +
(CCKSwingTable_Ch14[CCKSwingIndex][7]<<8);
}
@@ -184,12 +180,12 @@ void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven)
TempCCk = read_bbreg(pAdapter, rCCK0_TxFilter2, bMaskDWord) & bMaskCCK;
for (i = 0; i < CCK_TABLE_SIZE; i++) {
if (pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
- if (_rtw_memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4)) {
+ if (!memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4)) {
CCK_index_old = (u8)i;
break;
}
} else {
- if (_rtw_memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4)) {
+ if (!memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4)) {
CCK_index_old = (u8)i;
break;
}
@@ -201,6 +197,8 @@ void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven)
else
CCK_index = CCK_index_old + 1;
+ if (CCK_index > 32)
+ CCK_index = 32;
/* Adjust CCK according to gain index */
if (!pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
rtw_write8(pAdapter, 0xa22, CCKSwingTable_Ch1_Ch13[CCK_index][0]);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index 511f61cbb9e0..43eb960e4e0b 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -23,9 +23,9 @@
#include <drv_types.h>
#include <rtl8188e_hal.h>
-static void process_rssi(struct adapter *padapter, union recv_frame *prframe)
+static void process_rssi(struct adapter *padapter, struct recv_frame *prframe)
{
- struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &prframe->attrib;
struct signal_stat *signal_stat = &padapter->recvpriv.signal_strength_data;
if (signal_stat->update_req) {
@@ -39,7 +39,8 @@ static void process_rssi(struct adapter *padapter, union recv_frame *prframe)
signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
} /* Process_UI_RSSI_8192C */
-static void process_link_qual(struct adapter *padapter, union recv_frame *prframe)
+static void process_link_qual(struct adapter *padapter,
+ struct recv_frame *prframe)
{
struct rx_pkt_attrib *pattrib;
struct signal_stat *signal_stat;
@@ -47,7 +48,7 @@ static void process_link_qual(struct adapter *padapter, union recv_frame *prfram
if (prframe == NULL || padapter == NULL)
return;
- pattrib = &prframe->u.hdr.attrib;
+ pattrib = &prframe->attrib;
signal_stat = &padapter->recvpriv.signal_qual_data;
if (signal_stat->update_req) {
@@ -63,7 +64,7 @@ static void process_link_qual(struct adapter *padapter, union recv_frame *prfram
void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
{
- union recv_frame *precvframe = (union recv_frame *)prframe;
+ struct recv_frame *precvframe = (struct recv_frame *)prframe;
/* Check RSSI */
process_rssi(padapter, precvframe);
@@ -71,7 +72,8 @@ void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
process_link_qual(padapter, precvframe);
}
-void update_recvframe_attrib_88e(union recv_frame *precvframe, struct recv_stat *prxstat)
+void update_recvframe_attrib_88e(struct recv_frame *precvframe,
+ struct recv_stat *prxstat)
{
struct rx_pkt_attrib *pattrib;
struct recv_stat report;
@@ -83,7 +85,7 @@ void update_recvframe_attrib_88e(union recv_frame *precvframe, struct recv_stat
report.rxdw4 = prxstat->rxdw4;
report.rxdw5 = prxstat->rxdw5;
- pattrib = &precvframe->u.hdr.attrib;
+ pattrib = &precvframe->attrib;
_rtw_memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
pattrib->crc_err = (u8)((le32_to_cpu(report.rxdw0) >> 14) & 0x1);/* u8)prxreport->crc32; */
@@ -136,12 +138,13 @@ void update_recvframe_attrib_88e(union recv_frame *precvframe, struct recv_stat
/*
* Notice:
* Before calling this function,
- * precvframe->u.hdr.rx_data should be ready!
+ * precvframe->rx_data should be ready!
*/
-void update_recvframe_phyinfo_88e(union recv_frame *precvframe, struct phy_stat *pphy_status)
+void update_recvframe_phyinfo_88e(struct recv_frame *precvframe,
+ struct phy_stat *pphy_status)
{
- struct adapter *padapter = precvframe->u.hdr.adapter;
- struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
+ struct adapter *padapter = precvframe->adapter;
+ struct rx_pkt_attrib *pattrib = &precvframe->attrib;
struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
struct odm_phy_status_info *pPHYInfo = (struct odm_phy_status_info *)(&pattrib->phy_info);
u8 *wlanhdr;
@@ -154,15 +157,15 @@ void update_recvframe_phyinfo_88e(union recv_frame *precvframe, struct phy_stat
pkt_info.bPacketToSelf = false;
pkt_info.bPacketBeacon = false;
- wlanhdr = get_recvframe_data(precvframe);
+ wlanhdr = precvframe->rx_data;
pkt_info.bPacketMatchBSSID = ((!IsFrameTypeCtrl(wlanhdr)) &&
!pattrib->icv_err && !pattrib->crc_err &&
- _rtw_memcmp(get_hdr_bssid(wlanhdr),
+ !memcmp(get_hdr_bssid(wlanhdr),
get_bssid(&padapter->mlmepriv), ETH_ALEN));
pkt_info.bPacketToSelf = pkt_info.bPacketMatchBSSID &&
- (_rtw_memcmp(get_da(wlanhdr),
+ (!memcmp(get_da(wlanhdr),
myid(&padapter->eeprompriv), ETH_ALEN));
pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
@@ -185,17 +188,17 @@ void update_recvframe_phyinfo_88e(union recv_frame *precvframe, struct phy_stat
ODM_PhyStatusQuery(&pHalData->odmpriv, pPHYInfo, (u8 *)pphy_status, &(pkt_info));
- precvframe->u.hdr.psta = NULL;
+ precvframe->psta = NULL;
if (pkt_info.bPacketMatchBSSID &&
(check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE))) {
if (psta) {
- precvframe->u.hdr.psta = psta;
+ precvframe->psta = psta;
rtl8188e_process_phy_info(padapter, precvframe);
}
} else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) {
if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) {
if (psta)
- precvframe->u.hdr.psta = psta;
+ precvframe->psta = psta;
}
rtl8188e_process_phy_info(padapter, precvframe);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index 17c94f4cc477..b1b1584af1cd 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -22,9 +22,6 @@
#include <drv_types.h>
#include <recv_osdep.h>
#include <mlme_osdep.h>
-#include <ip.h>
-#include <if_ether.h>
-#include <ethernet.h>
#include <usb_ops.h>
#include <wifi.h>
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index 6fb6a46f04fe..3476f8898330 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -537,11 +537,11 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&ptxservq->sta_pending);
- xmitframe_plist = get_next(xmitframe_phead);
+ xmitframe_plist = xmitframe_phead->next;
while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
- xmitframe_plist = get_next(xmitframe_plist);
+ pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
+ xmitframe_plist = xmitframe_plist->next;
pxmitframe->agg_num = 0; /* not first frame of aggregation */
pxmitframe->pkt_offset = 0; /* not first frame of aggregation, no need to reserve offset */
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index b24ad495062c..c92067f0ef15 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -709,7 +709,6 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
#define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
-_func_enter_;
HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
@@ -967,7 +966,6 @@ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
DBG_88E("%s in %dms\n", __func__, rtw_get_passing_time_ms(init_start_time));
-_func_exit_;
return status;
}
@@ -1084,7 +1082,6 @@ static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
struct recv_priv *precvpriv = &(Adapter->recvpriv);
u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
-_func_enter_;
_read_port = pintfhdl->io_ops._read_port;
@@ -1112,7 +1109,6 @@ exit:
RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("<=== usb_inirp_init\n"));
-_func_exit_;
return status;
}
@@ -1402,7 +1398,6 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
struct dm_priv *pdmpriv = &haldata->dmpriv;
struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-_func_enter_;
switch (variable) {
case HW_VAR_MEDIA_STATUS:
@@ -1921,14 +1916,12 @@ _func_enter_;
default:
break;
}
-_func_exit_;
}
static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-_func_enter_;
switch (variable) {
case HW_VAR_BASIC_RATE:
@@ -1980,7 +1973,6 @@ _func_enter_;
break;
}
-_func_exit_;
}
/* */
@@ -2302,7 +2294,6 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
{
struct hal_ops *halfunc = &adapt->HalFunc;
-_func_enter_;
adapt->HalData = rtw_zmalloc(sizeof(struct hal_data_8188e));
if (adapt->HalData == NULL)
@@ -2342,5 +2333,4 @@ _func_enter_;
halfunc->interface_ps_func = &rtl8188eu_ps_func;
rtl8188e_set_hal_ops(halfunc);
-_func_exit_;
}
diff --git a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
index 31ae21a54c92..1fa5370f1da6 100644
--- a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
@@ -111,7 +111,7 @@ static int usbctrl_vendorreq(struct intf_hdl *pintfhdl, u8 request, u16 value, u
break;
}
release_mutex:
- _exit_critical_mutex(&dvobjpriv->usb_vendor_req_mutex, NULL);
+ mutex_unlock(&dvobjpriv->usb_vendor_req_mutex);
exit:
return status;
}
@@ -125,7 +125,6 @@ static u8 usb_read8(struct intf_hdl *pintfhdl, u32 addr)
u16 len;
u8 data = 0;
- _func_enter_;
request = 0x05;
requesttype = 0x01;/* read_in */
@@ -136,7 +135,6 @@ static u8 usb_read8(struct intf_hdl *pintfhdl, u32 addr)
usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
- _func_exit_;
return data;
@@ -151,14 +149,12 @@ static u16 usb_read16(struct intf_hdl *pintfhdl, u32 addr)
u16 len;
__le32 data;
-_func_enter_;
request = 0x05;
requesttype = 0x01;/* read_in */
index = 0;/* n/a */
wvalue = (u16)(addr&0x0000ffff);
len = 2;
usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
-_func_exit_;
return (u16)(le32_to_cpu(data)&0xffff);
}
@@ -172,7 +168,6 @@ static u32 usb_read32(struct intf_hdl *pintfhdl, u32 addr)
u16 len;
__le32 data;
-_func_enter_;
request = 0x05;
requesttype = 0x01;/* read_in */
@@ -183,7 +178,6 @@ _func_enter_;
usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
-_func_exit_;
return le32_to_cpu(data);
}
@@ -198,7 +192,6 @@ static int usb_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val)
u8 data;
int ret;
- _func_enter_;
request = 0x05;
requesttype = 0x00;/* write_out */
index = 0;/* n/a */
@@ -206,7 +199,6 @@ static int usb_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val)
len = 1;
data = val;
ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
- _func_exit_;
return ret;
}
@@ -220,7 +212,6 @@ static int usb_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val)
__le32 data;
int ret;
- _func_enter_;
request = 0x05;
requesttype = 0x00;/* write_out */
@@ -233,7 +224,6 @@ static int usb_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val)
ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
- _func_exit_;
return ret;
}
@@ -248,7 +238,6 @@ static int usb_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val)
__le32 data;
int ret;
- _func_enter_;
request = 0x05;
requesttype = 0x00;/* write_out */
@@ -260,7 +249,6 @@ static int usb_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val)
ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
- _func_exit_;
return ret;
}
@@ -275,7 +263,6 @@ static int usb_writeN(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata
u8 buf[VENDOR_CMD_MAX_DATA_LEN] = {0};
int ret;
- _func_enter_;
request = 0x05;
requesttype = 0x00;/* write_out */
@@ -287,7 +274,6 @@ static int usb_writeN(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata
ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, buf, len, requesttype);
- _func_exit_;
return ret;
}
@@ -320,7 +306,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
struct recv_stat *prxstat;
struct phy_stat *pphy_status = NULL;
struct sk_buff *pkt_copy = NULL;
- union recv_frame *precvframe = NULL;
+ struct recv_frame *precvframe = NULL;
struct rx_pkt_attrib *pattrib = NULL;
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
struct recv_priv *precvpriv = &adapt->recvpriv;
@@ -346,13 +332,13 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
goto _exit_recvbuf2recvframe;
}
- _rtw_init_listhead(&precvframe->u.hdr.list);
- precvframe->u.hdr.precvbuf = NULL; /* can't access the precvbuf for new arch. */
- precvframe->u.hdr.len = 0;
+ _rtw_init_listhead(&precvframe->list);
+ precvframe->precvbuf = NULL; /* can't access the precvbuf for new arch. */
+ precvframe->len = 0;
update_recvframe_attrib_88e(precvframe, prxstat);
- pattrib = &precvframe->u.hdr.attrib;
+ pattrib = &precvframe->attrib;
if ((pattrib->crc_err) || (pattrib->icv_err)) {
DBG_88E("%s: RX Warning! crc_err=%d icv_err=%d, skip!\n", __func__, pattrib->crc_err, pattrib->icv_err);
@@ -399,26 +385,26 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
pkt_copy = netdev_alloc_skb(adapt->pnetdev, alloc_sz);
if (pkt_copy) {
pkt_copy->dev = adapt->pnetdev;
- precvframe->u.hdr.pkt = pkt_copy;
- precvframe->u.hdr.rx_head = pkt_copy->data;
- precvframe->u.hdr.rx_end = pkt_copy->data + alloc_sz;
+ precvframe->pkt = pkt_copy;
+ precvframe->rx_head = pkt_copy->data;
+ precvframe->rx_end = pkt_copy->data + alloc_sz;
skb_reserve(pkt_copy, 8 - ((size_t)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
memcpy(pkt_copy->data, (pbuf + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len);
- precvframe->u.hdr.rx_tail = pkt_copy->data;
- precvframe->u.hdr.rx_data = pkt_copy->data;
+ precvframe->rx_tail = pkt_copy->data;
+ precvframe->rx_data = pkt_copy->data;
} else {
if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
DBG_88E("recvbuf2recvframe: alloc_skb fail , drop frag frame\n");
rtw_free_recvframe(precvframe, pfree_recv_queue);
goto _exit_recvbuf2recvframe;
}
- precvframe->u.hdr.pkt = skb_clone(pskb, GFP_ATOMIC);
- if (precvframe->u.hdr.pkt) {
- precvframe->u.hdr.rx_tail = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE;
- precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_tail;
- precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail;
- precvframe->u.hdr.rx_end = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE + alloc_sz;
+ precvframe->pkt = skb_clone(pskb, GFP_ATOMIC);
+ if (precvframe->pkt) {
+ precvframe->rx_tail = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE;
+ precvframe->rx_head = precvframe->rx_tail;
+ precvframe->rx_data = precvframe->rx_tail;
+ precvframe->rx_end = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE + alloc_sz;
} else {
DBG_88E("recvbuf2recvframe: skb_clone fail\n");
rtw_free_recvframe(precvframe, pfree_recv_queue);
@@ -451,17 +437,17 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
/* enqueue recvframe to txrtp queue */
if (pattrib->pkt_rpt_type == TX_REPORT1) {
/* CCX-TXRPT ack for xmit mgmt frames. */
- handle_txrpt_ccx_88e(adapt, precvframe->u.hdr.rx_data);
+ handle_txrpt_ccx_88e(adapt, precvframe->rx_data);
} else if (pattrib->pkt_rpt_type == TX_REPORT2) {
ODM_RA_TxRPT2Handle_8188E(
&haldata->odmpriv,
- precvframe->u.hdr.rx_data,
+ precvframe->rx_data,
pattrib->pkt_len,
pattrib->MacIDValidEntry[0],
pattrib->MacIDValidEntry[1]
);
} else if (pattrib->pkt_rpt_type == HIS_REPORT) {
- interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->u.hdr.rx_data);
+ interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data);
}
rtw_free_recvframe(precvframe, pfree_recv_queue);
}
@@ -519,7 +505,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
DBG_88E("%s() RX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bReadPortCancel(%d)\n",
__func__, adapt->bDriverStopped,
adapt->bSurpriseRemoved, adapt->bReadPortCancel);
- goto exit;
+ return;
}
if (purb->status == 0) { /* SUCCESS */
@@ -579,9 +565,6 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
break;
}
}
-
-exit:
-_func_exit_;
}
static u32 usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
@@ -598,7 +581,6 @@ static u32 usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
size_t alignment = 0;
u32 ret = _SUCCESS;
-_func_enter_;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
adapter->pwrctrlpriv.pnp_bstop_trx) {
@@ -672,7 +654,6 @@ _func_enter_;
ret = _FAIL;
}
-_func_exit_;
return ret;
}
@@ -702,7 +683,6 @@ void rtl8188eu_xmit_tasklet(void *priv)
void rtl8188eu_set_intf_ops(struct _io_ops *pops)
{
- _func_enter_;
_rtw_memset((u8 *)pops, 0, sizeof(struct _io_ops));
pops->_read8 = &usb_read8;
pops->_read16 = &usb_read16;
@@ -717,7 +697,6 @@ void rtl8188eu_set_intf_ops(struct _io_ops *pops)
pops->_write_port = &usb_write_port;
pops->_read_port_cancel = &usb_read_port_cancel;
pops->_write_port_cancel = &usb_write_port_cancel;
- _func_exit_;
}
void rtl8188eu_set_hw_type(struct adapter *adapt)
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index a492a1c547ae..936c196699af 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -159,9 +159,15 @@ struct registry_priv {
#define MAX_CONTINUAL_URB_ERR 4
+struct rt_firmware {
+ u8 *szFwBuffer;
+ u32 ulFwLength;
+};
+
struct dvobj_priv {
struct adapter *if1;
struct adapter *if2;
+ struct rt_firmware firmware;
/* For 92D, DMDP have 2 interface. */
u8 InterfaceNumber;
diff --git a/drivers/staging/rtl8188eu/include/ethernet.h b/drivers/staging/rtl8188eu/include/ethernet.h
deleted file mode 100644
index a59f9120cd7e..000000000000
--- a/drivers/staging/rtl8188eu/include/ethernet.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-/*! \file */
-#ifndef __INC_ETHERNET_H
-#define __INC_ETHERNET_H
-
-#define ETHERNET_ADDRESS_LENGTH 6 /* Ethernet Address Length */
-#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
-#define LLC_HEADER_SIZE 6 /* LLC Header Length */
-#define TYPE_LENGTH_FIELD_SIZE 2 /* Type/Length Size */
-#define MINIMUM_ETHERNET_PACKET_SIZE 60 /* Min Ethernet Packet Size */
-#define MAXIMUM_ETHERNET_PACKET_SIZE 1514 /* Max Ethernet Packet Size */
-
-/* Is Multicast Address? */
-#define RT_ETH_IS_MULTICAST(_addr) ((((u8 *)(_addr))[0]&0x01) != 0)
-#define RT_ETH_IS_BROADCAST(_addr) ( \
- ((u8 *)(_addr))[0] == 0xff && \
- ((u8 *)(_addr))[1] == 0xff && \
- ((u8 *)(_addr))[2] == 0xff && \
- ((u8 *)(_addr))[3] == 0xff && \
- ((u8 *)(_addr))[4] == 0xff && \
- ((u8 *)(_addr))[5] == 0xff) /* Is Broadcast Address? */
-
-
-#endif /* #ifndef __INC_ETHERNET_H */
diff --git a/drivers/staging/rtl8188eu/include/if_ether.h b/drivers/staging/rtl8188eu/include/if_ether.h
deleted file mode 100644
index db157712a203..000000000000
--- a/drivers/staging/rtl8188eu/include/if_ether.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-
-#ifndef _LINUX_IF_ETHER_H
-#define _LINUX_IF_ETHER_H
-
-/*
- * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
- * and FCS/CRC (frame check sequence).
- */
-
-#define ETH_ALEN 6 /* Octets in one ethernet addr */
-#define ETH_HLEN 14 /* Total octets in header. */
-#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
-#define ETH_DATA_LEN 1500 /* Max. octets in payload */
-#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
-
-/*
- * These are the defined Ethernet Protocol ID's.
- */
-
-#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
-#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
-#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
-#define ETH_P_IP 0x0800 /* Internet Protocol packet */
-#define ETH_P_X25 0x0805 /* CCITT X.25 */
-#define ETH_P_ARP 0x0806 /* Address Resolution packet */
-#define ETH_P_BPQ 0x08FF /* G8BPQ AX.25 Ethernet Packet */
-#define ETH_P_IEEEPUP 0x0a00 /* Xerox IEEE802.3 PUP packet */
-#define ETH_P_IEEEPUPAT 0x0a01 /* Xerox IEEE802.3 PUP */
-#define ETH_P_DEC 0x6000 /* DEC Assigned proto */
-#define ETH_P_DNA_DL 0x6001 /* DEC DNA Dump/Load */
-#define ETH_P_DNA_RC 0x6002 /* DEC DNA Remote Console */
-#define ETH_P_DNA_RT 0x6003 /* DEC DNA Routing */
-#define ETH_P_LAT 0x6004 /* DEC LAT */
-#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */
-#define ETH_P_CUST 0x6006 /* DEC Customer use */
-#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */
-#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
-#define ETH_P_ATALK 0x809B /* Appletalk DDP */
-#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
-#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */
-#define ETH_P_IPX 0x8137 /* IPX over DIX */
-#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
-#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */
-#define ETH_P_PPP_SES 0x8864 /* PPPoE session messages */
-#define ETH_P_ATMMPOA 0x884c /* MultiProtocol Over ATM */
-#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport
- * over Ethernet
- */
-
-/*
- * Non DIX types. Won't clash for 1500 types.
- */
-
-#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */
-#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */
-#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
-#define ETH_P_802_2 0x0004 /* 802.2 frames */
-#define ETH_P_SNAP 0x0005 /* Internal only */
-#define ETH_P_DDCMP 0x0006 /* DEC DDCMP: Internal only */
-#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/
-#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */
-#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
-#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
-#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
-#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */
-#define ETH_P_CONTROL 0x0016 /* Card specific control frames */
-#define ETH_P_IRDA 0x0017 /* Linux-IrDA */
-#define ETH_P_ECONET 0x0018 /* Acorn Econet */
-
-/*
- * This is an Ethernet frame header.
- */
-
-struct ethhdr {
- unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
- unsigned char h_source[ETH_ALEN]; /* source ether addr */
- unsigned short h_proto; /* packet type ID field */
-};
-
-struct _vlan {
- unsigned short h_vlan_TCI; /* Encap prio and VLAN ID */
- unsigned short h_vlan_encapsulated_proto;
-};
-
-#define get_vlan_id(pvlan) \
- ((ntohs((unsigned short)pvlan->h_vlan_TCI)) & 0xfff)
-#define get_vlan_priority(pvlan) \
- ((ntohs((unsigned short)pvlan->h_vlan_TCI))>>13)
-#define get_vlan_encap_proto(pvlan) \
- (ntohs((unsigned short)pvlan->h_vlan_encapsulated_proto))
-
-#endif /* _LINUX_IF_ETHER_H */
diff --git a/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h b/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h
deleted file mode 100644
index 037e9a5e5af9..000000000000
--- a/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-#ifndef __IOCTL_CFG80211_H__
-#define __IOCTL_CFG80211_H__
-
-struct rtw_wdev_invit_info {
- u8 token;
- u8 flags;
- u8 status;
- u8 req_op_ch;
- u8 rsp_op_ch;
-};
-
-#define rtw_wdev_invit_info_init(invit_info) \
- do { \
- (invit_info)->token = 0; \
- (invit_info)->flags = 0x00; \
- (invit_info)->status = 0xff; \
- (invit_info)->req_op_ch = 0; \
- (invit_info)->rsp_op_ch = 0; \
- } while (0)
-
-struct rtw_wdev_priv {
- struct wireless_dev *rtw_wdev;
-
- struct adapter *padapter;
-
- struct cfg80211_scan_request *scan_request;
- spinlock_t scan_req_lock;
-
- struct net_device *pmon_ndev;/* for monitor interface */
- char ifname_mon[IFNAMSIZ + 1]; /* name of monitor interface */
-
- u8 p2p_enabled;
-
- u8 provdisc_req_issued;
-
- struct rtw_wdev_invit_info invit_info;
-
- u8 bandroid_scan;
- bool block;
- bool power_mgmt;
-};
-
-#define wdev_to_priv(w) ((struct rtw_wdev_priv *)(wdev_priv(w)))
-
-#define wiphy_to_wdev(x) \
-((struct wireless_dev *)(((struct rtw_wdev_priv *)wiphy_priv(x))->rtw_wdev))
-
-int rtw_wdev_alloc(struct adapter *padapter, struct device *dev);
-void rtw_wdev_free(struct wireless_dev *wdev);
-void rtw_wdev_unregister(struct wireless_dev *wdev);
-
-void rtw_cfg80211_init_wiphy(struct adapter *padapter);
-
-void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter);
-
-void rtw_cfg80211_indicate_connect(struct adapter *padapter);
-void rtw_cfg80211_indicate_disconnect(struct adapter *padapter);
-void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
- bool aborted);
-
-#ifdef CONFIG_88EU_AP_MODE
-void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter,
- u8 *pmgmt_frame, uint frame_len);
-void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter,
- unsigned char *da,
- unsigned short reason);
-#endif /* CONFIG_88EU_AP_MODE */
-
-void rtw_cfg80211_issue_p2p_provision_request(struct adapter *padapter,
- const u8 *buf, size_t len);
-void rtw_cfg80211_rx_p2p_action_public(struct adapter *padapter,
- u8 *pmgmt_frame, uint frame_len);
-void rtw_cfg80211_rx_action_p2p(struct adapter *padapter, u8 *pmgmt_frame,
- uint frame_len);
-void rtw_cfg80211_rx_action(struct adapter *adapter, u8 *frame,
- uint frame_len, const char *msg);
-
-int rtw_cfg80211_set_mgnt_wpsp2pie(struct net_device *net,
- char *buf, int len, int type);
-
-bool rtw_cfg80211_pwr_mgmt(struct adapter *adapter);
-
-#define rtw_cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp) \
- cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp)
-#define rtw_cfg80211_send_rx_assoc(dev, bss, buf, len) \
- cfg80211_send_rx_assoc(dev, bss, buf, len)
-
-#endif /* __IOCTL_CFG80211_H__ */
diff --git a/drivers/staging/rtl8188eu/include/ip.h b/drivers/staging/rtl8188eu/include/ip.h
deleted file mode 100644
index 9fdac6d42d17..000000000000
--- a/drivers/staging/rtl8188eu/include/ip.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-#ifndef _LINUX_IP_H
-#define _LINUX_IP_H
-
-/* SOL_IP socket options */
-
-#define IPTOS_TOS_MASK 0x1E
-#define IPTOS_TOS(tos) ((tos)&IPTOS_TOS_MASK)
-#define IPTOS_LOWDELAY 0x10
-#define IPTOS_THROUGHPUT 0x08
-#define IPTOS_RELIABILITY 0x04
-#define IPTOS_MINCOST 0x02
-
-#define IPTOS_PREC_MASK 0xE0
-#define IPTOS_PREC(tos) ((tos)&IPTOS_PREC_MASK)
-#define IPTOS_PREC_NETCONTROL 0xe0
-#define IPTOS_PREC_INTERNETCONTROL 0xc0
-#define IPTOS_PREC_CRITIC_ECP 0xa0
-#define IPTOS_PREC_FLASHOVERRIDE 0x80
-#define IPTOS_PREC_FLASH 0x60
-#define IPTOS_PREC_IMMEDIATE 0x40
-#define IPTOS_PREC_PRIORITY 0x20
-#define IPTOS_PREC_ROUTINE 0x00
-
-
-/* IP options */
-#define IPOPT_COPY 0x80
-#define IPOPT_CLASS_MASK 0x60
-#define IPOPT_NUMBER_MASK 0x1f
-
-#define IPOPT_COPIED(o) ((o)&IPOPT_COPY)
-#define IPOPT_CLASS(o) ((o)&IPOPT_CLASS_MASK)
-#define IPOPT_NUMBER(o) ((o)&IPOPT_NUMBER_MASK)
-
-#define IPOPT_CONTROL 0x00
-#define IPOPT_RESERVED1 0x20
-#define IPOPT_MEASUREMENT 0x40
-#define IPOPT_RESERVED2 0x60
-
-#define IPOPT_END (0 | IPOPT_CONTROL)
-#define IPOPT_NOOP (1 | IPOPT_CONTROL)
-#define IPOPT_SEC (2 | IPOPT_CONTROL | IPOPT_COPY)
-#define IPOPT_LSRR (3 | IPOPT_CONTROL | IPOPT_COPY)
-#define IPOPT_TIMESTAMP (4 | IPOPT_MEASUREMENT)
-#define IPOPT_RR (7 | IPOPT_CONTROL)
-#define IPOPT_SID (8 | IPOPT_CONTROL | IPOPT_COPY)
-#define IPOPT_SSRR (9 | IPOPT_CONTROL | IPOPT_COPY)
-#define IPOPT_RA (20 | IPOPT_CONTROL | IPOPT_COPY)
-
-#define IPVERSION 4
-#define MAXTTL 255
-#define IPDEFTTL 64
-#define IPOPT_OPTVAL 0
-#define IPOPT_OLEN 1
-#define IPOPT_OFFSET 2
-#define IPOPT_MINOFF 4
-#define MAX_IPOPTLEN 40
-#define IPOPT_NOP IPOPT_NOOP
-#define IPOPT_EOL IPOPT_END
-#define IPOPT_TS IPOPT_TIMESTAMP
-
-#define IPOPT_TS_TSONLY 0 /* timestamps only */
-#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */
-#define IPOPT_TS_PRESPEC 3 /* specified modules only */
-
-struct ip_options {
- __u32 faddr; /* Saved first hop address */
- unsigned char optlen;
- unsigned char srr;
- unsigned char rr;
- unsigned char ts;
- unsigned char is_setbyuser:1, /* Set by setsockopt? */
- is_data:1, /* Options in __data, rather than skb*/
- is_strictroute:1,/* Strict source route */
- srr_is_hit:1, /* Packet destn addr was ours */
- is_changed:1, /* IP checksum more not valid */
- rr_needaddr:1, /* Need to record addr of out dev*/
- ts_needtime:1, /* Need to record timestamp */
- ts_needaddr:1; /* Need to record addr of out dev */
- unsigned char router_alert;
- unsigned char __pad1;
- unsigned char __pad2;
- unsigned char __data[0];
-};
-
-#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
-
-struct iphdr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 ihl:4,
- version:4;
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u8 version:4,
- ihl:4;
-#endif
- __u8 tos;
- __u16 tot_len;
- __u16 id;
- __u16 frag_off;
- __u8 ttl;
- __u8 protocol;
- __u16 check;
- __u32 saddr;
- __u32 daddr;
- /*The options start here. */
-};
-
-#endif /* _LINUX_IP_H */
diff --git a/drivers/staging/rtl8188eu/include/nic_spec.h b/drivers/staging/rtl8188eu/include/nic_spec.h
deleted file mode 100644
index d42244788ca9..000000000000
--- a/drivers/staging/rtl8188eu/include/nic_spec.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-
-
-#ifndef __NIC_SPEC_H__
-#define __NIC_SPEC_H__
-
-#define RTL8711_MCTRL_ (0x20000)
-#define RTL8711_UART_ (0x30000)
-#define RTL8711_TIMER_ (0x40000)
-#define RTL8711_FINT_ (0x50000)
-#define RTL8711_HINT_ (0x50000)
-#define RTL8711_GPIO_ (0x60000)
-#define RTL8711_WLANCTRL_ (0x200000)
-#define RTL8711_WLANFF_ (0xe00000)
-#define RTL8711_HCICTRL_ (0x600000)
-#define RTL8711_SYSCFG_ (0x620000)
-#define RTL8711_SYSCTRL_ (0x620000)
-#define RTL8711_MCCTRL_ (0x020000)
-
-
-#include <rtl8711_regdef.h>
-
-#include <rtl8711_bitdef.h>
-
-
-#endif /* __RTL8711_SPEC_H__ */
diff --git a/drivers/staging/rtl8188eu/include/odm_interface.h b/drivers/staging/rtl8188eu/include/odm_interface.h
index a50eae3ec68e..548a309db199 100644
--- a/drivers/staging/rtl8188eu/include/odm_interface.h
+++ b/drivers/staging/rtl8188eu/include/odm_interface.h
@@ -77,32 +77,9 @@ typedef void (*RT_WORKITEM_CALL_BACK)(void *pContext);
/* =========== EXtern Function Prototype */
-u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-
-u16 ODM_Read2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-
-u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-
-void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data);
-
-void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data);
-
-void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data);
-
/* Memory Relative Function. */
-void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length);
-void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length);
-
-s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2,
- u32 length);
/* ODM Timer relative API. */
-void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer,
- u32 msDelay);
-
-void ODM_InitializeTimer(struct odm_dm_struct *pDM_Odm,
- struct timer_list *pTimer, void *CallBackFunc,
- void *pContext, const char *szID);
void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 7956f0cdb96b..5889f58ed7d3 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -62,20 +62,11 @@ struct __queue {
spinlock_t lock;
};
-static inline struct list_head *get_next(struct list_head *list)
-{
- return list->next;
-}
-
static inline struct list_head *get_list_head(struct __queue *queue)
{
return &(queue->queue);
}
-
-#define LIST_CONTAINOR(ptr, type, member) \
- ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member)))
-
static inline int _enter_critical_mutex(struct mutex *pmutex,
unsigned long *pirqL)
{
@@ -85,13 +76,6 @@ static inline int _enter_critical_mutex(struct mutex *pmutex,
return ret;
}
-
-static inline void _exit_critical_mutex(struct mutex *pmutex,
- unsigned long *pirqL)
-{
- mutex_unlock(pmutex);
-}
-
static inline void rtw_list_delete(struct list_head *plist)
{
list_del_init(plist);
@@ -122,17 +106,6 @@ static inline void _cancel_timer(struct timer_list *ptimer, u8 *bcancelled)
#define RTW_DECLARE_TIMER_HDL(name) \
void RTW_TIMER_HDL_NAME(name)(RTW_TIMER_HDL_ARGS)
-static inline void _init_workitem(struct work_struct *pwork, void *pfunc,
- void *cntx)
-{
- INIT_WORK(pwork, pfunc);
-}
-
-static inline void _set_workitem(struct work_struct *pwork)
-{
- schedule_work(pwork);
-}
-
static inline void _cancel_workitem_sync(struct work_struct *pwork)
{
cancel_work_sync(pwork);
@@ -230,15 +203,9 @@ extern unsigned char WPA_TKIP_CIPHER[4];
extern unsigned char RSN_TKIP_CIPHER[4];
#define rtw_update_mem_stat(flag, sz) do {} while (0)
-u8 *_rtw_vmalloc(u32 sz);
-u8 *_rtw_zvmalloc(u32 sz);
-void _rtw_vmfree(u8 *pbuf, u32 sz);
u8 *_rtw_zmalloc(u32 sz);
u8 *_rtw_malloc(u32 sz);
void _rtw_mfree(u8 *pbuf, u32 sz);
-#define rtw_vmalloc(sz) _rtw_vmalloc((sz))
-#define rtw_zvmalloc(sz) _rtw_zvmalloc((sz))
-#define rtw_vmfree(pbuf, sz) _rtw_vmfree((pbuf), (sz))
#define rtw_malloc(sz) _rtw_malloc((sz))
#define rtw_zmalloc(sz) _rtw_zmalloc((sz))
#define rtw_mfree(pbuf, sz) _rtw_mfree((pbuf), (sz))
@@ -247,7 +214,6 @@ void *rtw_malloc2d(int h, int w, int size);
void rtw_mfree2d(void *pbuf, int h, int w, int size);
void _rtw_memcpy(void *dec, void *sour, u32 sz);
-int _rtw_memcmp(void *dst, void *src, u32 sz);
void _rtw_memset(void *pbuf, int c, u32 sz);
void _rtw_init_listhead(struct list_head *list);
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index 691238078075..d76cc2db5028 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -28,18 +28,20 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
void _rtw_free_recv_priv(struct recv_priv *precvpriv);
-s32 rtw_recv_entry(union recv_frame *precv_frame);
-int rtw_recv_indicatepkt(struct adapter *adapter, union recv_frame *recv_frame);
+s32 rtw_recv_entry(struct recv_frame *precv_frame);
+int rtw_recv_indicatepkt(struct adapter *adapter,
+ struct recv_frame *recv_frame);
void rtw_recv_returnpacket(struct net_device *cnxt, struct sk_buff *retpkt);
-void rtw_hostapd_mlme_rx(struct adapter *padapter, union recv_frame *recv_fr);
+void rtw_hostapd_mlme_rx(struct adapter *padapter, struct recv_frame *recv_fr);
void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
void rtw_free_recv_priv(struct recv_priv *precvpriv);
int rtw_os_recv_resource_init(struct recv_priv *recvpr, struct adapter *adapt);
-int rtw_os_recv_resource_alloc(struct adapter *adapt, union recv_frame *recvfr);
+int rtw_os_recv_resource_alloc(struct adapter *adapt,
+ struct recv_frame *recvfr);
void rtw_os_recv_resource_free(struct recv_priv *precvpriv);
int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 161f1e5af9e6..75e41c4aeb27 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -76,17 +76,6 @@
(le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x2300 || \
(le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x88E0)
-enum firmware_source {
- FW_SOURCE_IMG_FILE = 0,
- FW_SOURCE_HEADER_FILE = 1, /* from header file */
-};
-
-struct rt_firmware {
- enum firmware_source eFWSource;
- u8 *szFwBuffer;
- u32 ulFwLength;
-};
-
/* This structure must be careful with byte-ordering */
struct rt_firmware_hdr {
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index a8facf00eac0..07e5f5227336 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -61,9 +61,10 @@ s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
void rtl8188eu_free_recv_priv(struct adapter *padapter);
void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf);
void rtl8188eu_recv_tasklet(void *priv);
-void rtl8188e_query_rx_phy_status(union recv_frame *fr, struct phy_stat *phy);
+void rtl8188e_query_rx_phy_status(struct recv_frame *fr, struct phy_stat *phy);
void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe);
-void update_recvframe_phyinfo_88e(union recv_frame *fra, struct phy_stat *phy);
-void update_recvframe_attrib_88e(union recv_frame *fra, struct recv_stat *stat);
+void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
+void update_recvframe_attrib_88e(struct recv_frame *fra,
+ struct recv_stat *stat);
#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index c6b193a2e795..ae05141f5ddf 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -99,20 +99,6 @@ extern u32 GlobalDebugLevel;
} \
} while (0)
-#define _func_enter_ \
- do { \
- if (GlobalDebugLevel >= _drv_debug_) \
- pr_info("%s : %s enters at %d\n", \
- DRIVER_PREFIX, __func__, __LINE__); \
- } while (0)
-
-#define _func_exit_ \
- do { \
- if (GlobalDebugLevel >= _drv_debug_) \
- pr_info("%s : %s exits at %d\n", \
- DRIVER_PREFIX, __func__, __LINE__); \
- } while (0)
-
#define RT_PRINT_DATA(_comp, _level, _titlestring, _hexdata, _hexdatalen)\
do { \
if (_level <= GlobalDebugLevel) { \
@@ -277,14 +263,4 @@ int proc_get_rssi_disp(char *page, char **start,
int proc_set_rssi_disp(struct file *file, const char __user *buffer,
unsigned long count, void *data);
-#ifdef CONFIG_BT_COEXIST
-int proc_get_btcoex_dbg(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_btcoex_dbg(struct file *file, const char *buffer,
- signed long count, void *data);
-
-#endif /* CONFIG_BT_COEXIST */
-
#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
index 3d1dfcc1b603..e8790f8f913e 100644
--- a/drivers/staging/rtl8188eu/include/rtw_io.h
+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
@@ -99,7 +99,6 @@
struct intf_priv;
struct intf_hdl;
-struct io_queue;
struct _io_ops {
u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
@@ -117,7 +116,6 @@ struct _io_ops {
u8 *pmem);
void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
u8 *pmem);
- void (*_sync_irp_protocol_rw)(struct io_queue *pio_q);
u32 (*_read_interrupt)(struct intf_hdl *pintfhdl, u32 addr);
u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
u8 *pmem);
@@ -237,34 +235,11 @@ struct reg_protocol_wt {
Below is the data structure used by _io_handler
*/
-struct io_queue {
- spinlock_t lock;
- struct list_head free_ioreqs;
- struct list_head pending; /* The io_req list that will be served
- * in the single protocol read/write.*/
- struct list_head processing;
- u8 *free_ioreqs_buf; /* 4-byte aligned */
- u8 *pallocated_free_ioreqs_buf;
- struct intf_hdl intf;
-};
-
struct io_priv {
struct adapter *padapter;
struct intf_hdl intf;
};
-uint ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-void sync_ioreq_enqueue(struct io_req *preq, struct io_queue *ioqueue);
-uint sync_ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
-struct io_req *alloc_ioreq(struct io_queue *pio_q);
-
-uint register_intf_hdl(u8 *dev, struct intf_hdl *pintfhdl);
-void unregister_intf_hdl(struct intf_hdl *pintfhdl);
-
-void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-
u8 _rtw_read8(struct adapter *adapter, u32 addr);
u16 _rtw_read16(struct adapter *adapter, u32 addr);
u32 _rtw_read32(struct adapter *adapter, u32 addr);
@@ -363,25 +338,6 @@ void async_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
int rtw_init_io_priv(struct adapter *padapter,
void (*set_intf_ops)(struct _io_ops *pops));
-uint alloc_io_queue(struct adapter *adapter);
-void free_io_queue(struct adapter *adapter);
-void async_bus_io(struct io_queue *pio_q);
-void bus_sync_io(struct io_queue *pio_q);
-u32 _ioreq2rwmem(struct io_queue *pio_q);
void dev_power_down(struct adapter *Adapter, u8 bpwrup);
-#define PlatformEFIOWrite1Byte(_a, _b, _c) \
- rtw_write8(_a, _b, _c)
-#define PlatformEFIOWrite2Byte(_a, _b, _c) \
- rtw_write16(_a, _b, _c)
-#define PlatformEFIOWrite4Byte(_a, _b, _c) \
- rtw_write32(_a, _b, _c)
-
-#define PlatformEFIORead1Byte(_a, _b) \
- rtw_read8(_a, _b)
-#define PlatformEFIORead2Byte(_a, _b) \
- rtw_read16(_a, _b)
-#define PlatformEFIORead4Byte(_a, _b) \
- rtw_read32(_a, _b)
-
#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
index 8772d1d178c6..f3aa924f2029 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
@@ -102,8 +102,6 @@ struct oid_obj_priv {
#if defined(_RTW_MP_IOCTL_C_)
static int oid_null_function(struct oid_par_priv *poid_par_priv) {
- _func_enter_;
- _func_exit_;
return NDIS_STATUS_SUCCESS;
}
#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 6cd988f867da..45c22efe93fe 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -106,13 +106,6 @@ SHALL not lock up more than one lock at a time!
#define traffic_threshold 10
#define traffic_scan_period 500
-struct sitesurvey_ctrl {
- u64 last_tx_pkts;
- uint last_rx_pkts;
- int traffic_busy;
- struct timer_list sitesurvey_ctrl_timer;
-};
-
struct rt_link_detect {
u32 NumTxOkInPeriod;
u32 NumRxOkInPeriod;
@@ -304,31 +297,6 @@ struct wifidirect_info {
u32 noa_start_time[P2P_MAX_NOA_NUM];
};
-struct tdls_ss_record { /* signal strength record */
- u8 macaddr[ETH_ALEN];
- u8 RxPWDBAll;
- u8 is_tdls_sta; /* true: direct link sta, false: else */
-};
-
-struct tdls_info {
- u8 ap_prohibited;
- uint setup_state;
- u8 sta_cnt;
- u8 sta_maximum; /* 1:tdls sta is equal (NUM_STA-1), reach max direct link number; 0: else; */
- struct tdls_ss_record ss_record;
- u8 macid_index; /* macid entry that is ready to write */
- u8 clear_cam; /* cam entry that is trying to clear, using it in direct link teardown */
- u8 ch_sensing;
- u8 cur_channel;
- u8 candidate_ch;
- u8 collect_pkt_num[MAX_CHANNEL_NUM];
- spinlock_t cmd_lock;
- spinlock_t hdl_lock;
- u8 watchdog_count;
- u8 dev_discovered; /* WFD_TDLS: for sigma test */
- u8 enable;
-};
-
struct mlme_priv {
spinlock_t lock;
int fw_state; /* shall we protect this variable? maybe not necessarily... */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index f0c982d6d5f2..09e2a3980ea7 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -236,13 +236,13 @@ enum SCAN_STATE {
struct mlme_handler {
unsigned int num;
char *str;
- unsigned int (*func)(struct adapter *adapt, union recv_frame *frame);
+ unsigned int (*func)(struct adapter *adapt, struct recv_frame *frame);
};
struct action_handler {
unsigned int num;
char *str;
- unsigned int (*func)(struct adapter *adapt, union recv_frame *frame);
+ unsigned int (*func)(struct adapter *adapt, struct recv_frame *frame);
};
struct ss_res {
@@ -490,7 +490,7 @@ int allocate_fw_sta_entry(struct adapter *padapter);
void flush_all_cam_entry(struct adapter *padapter);
void site_survey(struct adapter *padapter);
-u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame,
+u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame,
struct wlan_bssid_ex *bssid);
void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
struct adapter *adapter, bool update_ie);
@@ -544,7 +544,8 @@ unsigned int is_ap_in_wep(struct adapter *padapter);
unsigned int should_forbid_n_rate(struct adapter *padapter);
void report_join_res(struct adapter *padapter, int res);
-void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame);
+void report_survey_event(struct adapter *padapter,
+ struct recv_frame *precv_frame);
void report_surveydone_event(struct adapter *padapter);
void report_del_sta_event(struct adapter *padapter,
unsigned char *addr, unsigned short reason);
@@ -609,46 +610,46 @@ void start_clnt_join(struct adapter *padapter);
void start_create_ibss(struct adapter *padapter);
unsigned int OnAssocReq(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAssocRsp(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnProbeReq(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnProbeRsp(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int DoReserved(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnBeacon(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAtim(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnDisassoc(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAuth(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAuthClient(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnDeAuth(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int on_action_spct(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_qos(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_dls(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_back(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int on_action_public(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_ht(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_wmm(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
unsigned int OnAction_p2p(struct adapter *padapter,
- union recv_frame *precv_frame);
+ struct recv_frame *precv_frame);
void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res);
void mlmeext_sta_del_event_callback(struct adapter *padapter);
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index 4a0e9ff3d479..9a42859d612b 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -206,10 +206,6 @@ struct pwrctrl_priv {
u8 bInternalAutoSuspend;
u8 bInSuspend;
-#ifdef CONFIG_BT_COEXIST
- u8 bAutoResume;
- u8 autopm_cnt;
-#endif
u8 bSupportRemoteWakeup;
struct timer_list pwr_state_check_timer;
int pwr_state_check_interval;
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index be9c30c57419..bcbce46cec85 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -270,7 +270,7 @@ struct recv_buf {
len = (unsigned int )(tail - data);
*/
-struct recv_frame_hdr {
+struct recv_frame {
struct list_head list;
struct sk_buff *pkt;
struct sk_buff *pkt_newalloc;
@@ -289,23 +289,16 @@ struct recv_frame_hdr {
struct recv_reorder_ctrl *preorder_ctrl;
};
-union recv_frame {
- union {
- struct list_head list;
- struct recv_frame_hdr hdr;
- uint mem[RECVFRAME_HDR_ALIGN>>2];
- } u;
-};
-
-union recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
-union recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
-void rtw_init_recvframe(union recv_frame *precvframe,
+struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
+struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
+void rtw_init_recvframe(struct recv_frame *precvframe,
struct recv_priv *precvpriv);
-int rtw_free_recvframe(union recv_frame *precvframe,
+int rtw_free_recvframe(struct recv_frame *precvframe,
struct __queue *pfree_recv_queue);
#define rtw_dequeue_recvframe(queue) rtw_alloc_recvframe(queue)
-int _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
-int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
+int _rtw_enqueue_recvframe(struct recv_frame *precvframe,
+ struct __queue *queue);
+int rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue);
void rtw_free_recvframe_queue(struct __queue *pframequeue,
struct __queue *pfree_recv_queue);
u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter);
@@ -315,29 +308,20 @@ struct recv_buf *rtw_dequeue_recvbuf(struct __queue *queue);
void rtw_reordering_ctrl_timeout_handler(void *pcontext);
-static inline u8 *get_rxmem(union recv_frame *precvframe)
+static inline u8 *get_rxmem(struct recv_frame *precvframe)
{
/* always return rx_head... */
if (precvframe == NULL)
return NULL;
- return precvframe->u.hdr.rx_head;
+ return precvframe->rx_head;
}
-static inline u8 *get_rx_status(union recv_frame *precvframe)
+static inline u8 *get_rx_status(struct recv_frame *precvframe)
{
return get_rxmem(precvframe);
}
-static inline u8 *get_recvframe_data(union recv_frame *precvframe)
-{
- /* always return rx_data */
- if (precvframe == NULL)
- return NULL;
-
- return precvframe->u.hdr.rx_data;
-}
-
-static inline u8 *recvframe_push(union recv_frame *precvframe, int sz)
+static inline u8 *recvframe_push(struct recv_frame *precvframe, int sz)
{
/* append data before rx_data */
@@ -348,16 +332,16 @@ static inline u8 *recvframe_push(union recv_frame *precvframe, int sz)
*/
if (precvframe == NULL)
return NULL;
- precvframe->u.hdr.rx_data -= sz ;
- if (precvframe->u.hdr.rx_data < precvframe->u.hdr.rx_head) {
- precvframe->u.hdr.rx_data += sz;
+ precvframe->rx_data -= sz;
+ if (precvframe->rx_data < precvframe->rx_head) {
+ precvframe->rx_data += sz;
return NULL;
}
- precvframe->u.hdr.len += sz;
- return precvframe->u.hdr.rx_data;
+ precvframe->len += sz;
+ return precvframe->rx_data;
}
-static inline u8 *recvframe_pull(union recv_frame *precvframe, int sz)
+static inline u8 *recvframe_pull(struct recv_frame *precvframe, int sz)
{
/* rx_data += sz; move rx_data sz bytes hereafter */
@@ -366,16 +350,16 @@ static inline u8 *recvframe_pull(union recv_frame *precvframe, int sz)
if (precvframe == NULL)
return NULL;
- precvframe->u.hdr.rx_data += sz;
- if (precvframe->u.hdr.rx_data > precvframe->u.hdr.rx_tail) {
- precvframe->u.hdr.rx_data -= sz;
+ precvframe->rx_data += sz;
+ if (precvframe->rx_data > precvframe->rx_tail) {
+ precvframe->rx_data -= sz;
return NULL;
}
- precvframe->u.hdr.len -= sz;
- return precvframe->u.hdr.rx_data;
+ precvframe->len -= sz;
+ return precvframe->rx_data;
}
-static inline u8 *recvframe_put(union recv_frame *precvframe, int sz)
+static inline u8 *recvframe_put(struct recv_frame *precvframe, int sz)
{
/* used for append sz bytes from ptr to rx_tail, update rx_tail
* and return the updated rx_tail to the caller */
@@ -384,17 +368,17 @@ static inline u8 *recvframe_put(union recv_frame *precvframe, int sz)
if (precvframe == NULL)
return NULL;
- precvframe->u.hdr.rx_tail += sz;
+ precvframe->rx_tail += sz;
- if (precvframe->u.hdr.rx_tail > precvframe->u.hdr.rx_end) {
- precvframe->u.hdr.rx_tail -= sz;
+ if (precvframe->rx_tail > precvframe->rx_end) {
+ precvframe->rx_tail -= sz;
return NULL;
}
- precvframe->u.hdr.len += sz;
- return precvframe->u.hdr.rx_tail;
+ precvframe->len += sz;
+ return precvframe->rx_tail;
}
-static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, int sz)
+static inline u8 *recvframe_pull_tail(struct recv_frame *precvframe, int sz)
{
/* rmv data from rx_tail (by yitsen) */
@@ -404,64 +388,13 @@ static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, int sz)
if (precvframe == NULL)
return NULL;
- precvframe->u.hdr.rx_tail -= sz;
- if (precvframe->u.hdr.rx_tail < precvframe->u.hdr.rx_data) {
- precvframe->u.hdr.rx_tail += sz;
+ precvframe->rx_tail -= sz;
+ if (precvframe->rx_tail < precvframe->rx_data) {
+ precvframe->rx_tail += sz;
return NULL;
}
- precvframe->u.hdr.len -= sz;
- return precvframe->u.hdr.rx_tail;
-}
-
-static inline unsigned char *get_rxbuf_desc(union recv_frame *precvframe)
-{
- unsigned char *buf_desc;
-
- if (precvframe == NULL)
- return NULL;
- return buf_desc;
-}
-
-static inline union recv_frame *rxmem_to_recvframe(u8 *rxmem)
-{
- /* due to the design of 2048 bytes alignment of recv_frame,
- * we can reference the union recv_frame */
- /* from any given member of recv_frame. */
- /* rxmem indicates the any member/address in recv_frame */
-
- return (union recv_frame *)(((size_t)rxmem >> RXFRAME_ALIGN) << RXFRAME_ALIGN);
-}
-
-static inline union recv_frame *pkt_to_recvframe(struct sk_buff *pkt)
-{
- u8 *buf_star;
- union recv_frame *precv_frame;
- precv_frame = rxmem_to_recvframe((unsigned char *)buf_star);
-
- return precv_frame;
-}
-
-static inline u8 *pkt_to_recvmem(struct sk_buff *pkt)
-{
- /* return the rx_head */
-
- union recv_frame *precv_frame = pkt_to_recvframe(pkt);
-
- return precv_frame->u.hdr.rx_head;
-}
-
-static inline u8 *pkt_to_recvdata(struct sk_buff *pkt)
-{
- /* return the rx_data */
-
- union recv_frame *precv_frame = pkt_to_recvframe(pkt);
-
- return precv_frame->u.hdr.rx_data;
-}
-
-static inline int get_recvframe_len(union recv_frame *precvframe)
-{
- return precvframe->u.hdr.len;
+ precvframe->len -= sz;
+ return precvframe->rx_tail;
}
static inline s32 translate_percentage_to_dbm(u32 sig_stren_index)
@@ -480,6 +413,6 @@ struct sta_info;
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv);
-void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame);
+void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame);
#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index 1ac1dd31db68..62f5db169523 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -105,11 +105,6 @@ struct tx_desc {
__le32 txdw7;
};
-union txdesc {
- struct tx_desc txdesc;
- unsigned int value[TXDESC_SIZE>>2];
-};
-
struct hw_xmit {
struct __queue *sta_queue;
int accnt;
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 2e7307f259b6..a88ebf41bba1 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -694,7 +694,7 @@ struct WMM_para_element {
struct ADDBA_request {
unsigned char dialog_token;
- unsigned short BA_para_set;
+ __le16 BA_para_set;
unsigned short BA_timeout_value;
unsigned short BA_starting_seqctrl;
} __packed;
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index dec992569476..2636e7f3dbb8 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -35,6 +35,7 @@
#include <rtw_mp.h>
#include <rtw_iol.h>
+#include <linux/vmalloc.h>
#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
@@ -472,8 +473,6 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
#endif /* CONFIG_88EU_P2P */
-_func_enter_;
-
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
@@ -614,9 +613,6 @@ _func_enter_;
exit:
kfree(pwep);
-
-_func_exit_;
-
return ret;
}
@@ -770,8 +766,6 @@ static int rtw_wx_get_name(struct net_device *dev,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("cmd_code =%x\n", info->cmd));
- _func_enter_;
-
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) {
/* parsing HT_CAP_IE */
p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
@@ -806,9 +800,6 @@ static int rtw_wx_get_name(struct net_device *dev,
} else {
snprintf(wrqu->name, IFNAMSIZ, "unassociated");
}
-
- _func_exit_;
-
return 0;
}
@@ -816,12 +807,7 @@ static int rtw_wx_set_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- _func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_wx_set_freq\n"));
-
- _func_exit_;
-
return 0;
}
@@ -854,8 +840,6 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
enum ndis_802_11_network_infra networkType;
int ret = 0;
- _func_enter_;
-
if (_FAIL == rtw_pwr_wakeup(padapter)) {
ret = -EPERM;
goto exit;
@@ -894,7 +878,6 @@ static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
}
rtw_setopmode_cmd(padapter, networkType);
exit:
- _func_exit_;
return ret;
}
@@ -906,8 +889,6 @@ static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_get_mode\n"));
- _func_enter_;
-
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
wrqu->mode = IW_MODE_INFRA;
else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
@@ -918,8 +899,6 @@ static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
else
wrqu->mode = IW_MODE_AUTO;
- _func_exit_;
-
return 0;
}
@@ -1011,8 +990,6 @@ static int rtw_wx_get_range(struct net_device *dev,
u16 val;
int i;
- _func_enter_;
-
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_range. cmd_code =%x\n", info->cmd));
wrqu->data.length = sizeof(*range);
@@ -1093,8 +1070,6 @@ static int rtw_wx_get_range(struct net_device *dev,
range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE |
IW_SCAN_CAPA_BSSID | IW_SCAN_CAPA_CHANNEL |
IW_SCAN_CAPA_MODE | IW_SCAN_CAPA_RATE;
- _func_exit_;
-
return 0;
}
@@ -1118,8 +1093,6 @@ static int rtw_wx_set_wap(struct net_device *dev,
struct wlan_network *pnetwork = NULL;
enum ndis_802_11_auth_mode authmode;
- _func_enter_;
-
if (_FAIL == rtw_pwr_wakeup(padapter)) {
ret = -1;
goto exit;
@@ -1138,15 +1111,15 @@ static int rtw_wx_set_wap(struct net_device *dev,
authmode = padapter->securitypriv.ndisauthtype;
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- pmlmepriv->pscanned = get_next(phead);
+ pmlmepriv->pscanned = phead->next;
while (1) {
if ((rtw_end_of_queue_search(phead, pmlmepriv->pscanned)) == true)
break;
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
- pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+ pmlmepriv->pscanned = pmlmepriv->pscanned->next;
dst_bssid = pnetwork->network.MacAddress;
@@ -1173,8 +1146,6 @@ static int rtw_wx_set_wap(struct net_device *dev,
exit:
- _func_exit_;
-
return ret;
}
@@ -1192,17 +1163,12 @@ static int rtw_wx_get_wap(struct net_device *dev,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_wap\n"));
- _func_enter_;
-
if (((check_fwstate(pmlmepriv, _FW_LINKED)) == true) ||
((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) ||
((check_fwstate(pmlmepriv, WIFI_AP_STATE)) == true))
memcpy(wrqu->ap_addr.sa_data, pcur_bss->MacAddress, ETH_ALEN);
else
_rtw_memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
-
- _func_exit_;
-
return 0;
}
@@ -1252,7 +1218,6 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
#endif /* CONFIG_88EU_P2P */
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_set_scan\n"));
-_func_enter_;
if (padapter->registrypriv.mp_mode == 1) {
if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
ret = -1;
@@ -1383,7 +1348,6 @@ _func_enter_;
exit:
-_func_exit_;
return ret;
}
@@ -1407,8 +1371,6 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan\n"));
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, (" Start of Query SIOCGIWSCAN .\n"));
- _func_enter_;
-
if (padapter->pwrctrlpriv.brfoffbyhw && padapter->bDriverStopped) {
ret = -EINVAL;
goto exit;
@@ -1440,7 +1402,7 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist))
@@ -1451,13 +1413,13 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
break;
}
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
/* report network only if the current channel set contains the channel to which this network belongs */
if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) >= 0)
ev = translate_scan(padapter, a, pnetwork, ev, stop);
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -1466,7 +1428,6 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
wrqu->data.flags = 0;
exit:
- _func_exit_;
return ret;
}
@@ -1490,7 +1451,6 @@ static int rtw_wx_set_essid(struct net_device *dev,
uint ret = 0, len;
- _func_enter_;
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
("+rtw_wx_set_essid: fw_state = 0x%08x\n", get_fwstate(pmlmepriv)));
@@ -1530,7 +1490,7 @@ static int rtw_wx_set_essid(struct net_device *dev,
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: ssid =[%s]\n", src_ssid));
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
- pmlmepriv->pscanned = get_next(phead);
+ pmlmepriv->pscanned = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, pmlmepriv->pscanned) == true) {
@@ -1540,9 +1500,9 @@ static int rtw_wx_set_essid(struct net_device *dev,
break;
}
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
- pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+ pmlmepriv->pscanned = pmlmepriv->pscanned->next;
dst_ssid = pnetwork->network.Ssid.Ssid;
@@ -1583,7 +1543,6 @@ exit:
DBG_88E("<=%s, ret %d\n", __func__, ret);
- _func_exit_;
return ret;
}
@@ -1599,7 +1558,6 @@ static int rtw_wx_get_essid(struct net_device *dev,
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_essid\n"));
- _func_enter_;
if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
@@ -1617,7 +1575,6 @@ static int rtw_wx_get_essid(struct net_device *dev,
exit:
- _func_exit_;
return ret;
}
@@ -1634,7 +1591,6 @@ static int rtw_wx_set_rate(struct net_device *dev,
u32 ratevalue = 0;
u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_set_rate\n"));
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("target_rate = %d, fixed = %d\n", target_rate, fixed));
@@ -1706,7 +1662,6 @@ set_rate:
ret = -1;
}
-_func_exit_;
return ret;
}
@@ -1734,7 +1689,6 @@ static int rtw_wx_set_rts(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- _func_enter_;
if (wrqu->rts.disabled) {
padapter->registrypriv.rts_thresh = 2347;
@@ -1748,7 +1702,6 @@ static int rtw_wx_set_rts(struct net_device *dev,
DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
- _func_exit_;
return 0;
}
@@ -1759,7 +1712,6 @@ static int rtw_wx_get_rts(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- _func_enter_;
DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
@@ -1767,7 +1719,6 @@ static int rtw_wx_get_rts(struct net_device *dev,
wrqu->rts.fixed = 0; /* no auto select */
/* wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); */
- _func_exit_;
return 0;
}
@@ -1778,7 +1729,6 @@ static int rtw_wx_set_frag(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- _func_enter_;
if (wrqu->frag.disabled) {
padapter->xmitpriv.frag_len = MAX_FRAG_THRESHOLD;
@@ -1792,7 +1742,6 @@ static int rtw_wx_set_frag(struct net_device *dev,
DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
- _func_exit_;
return 0;
}
@@ -1803,14 +1752,12 @@ static int rtw_wx_get_frag(struct net_device *dev,
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- _func_enter_;
DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
wrqu->frag.value = padapter->xmitpriv.frag_len;
wrqu->frag.fixed = 0; /* no auto select */
- _func_exit_;
return 0;
}
@@ -1844,7 +1791,6 @@ static int rtw_wx_set_enc(struct net_device *dev,
key = erq->flags & IW_ENCODE_INDEX;
- _func_enter_;
if (erq->flags & IW_ENCODE_DISABLED) {
DBG_88E("EncryptionDisabled\n");
@@ -1939,7 +1885,6 @@ static int rtw_wx_set_enc(struct net_device *dev,
exit:
- _func_exit_;
return ret;
}
@@ -1953,7 +1898,6 @@ static int rtw_wx_get_enc(struct net_device *dev,
struct iw_point *erq = &(wrqu->encoding);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- _func_enter_;
if (check_fwstate(pmlmepriv, _FW_LINKED) != true) {
if (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
@@ -2007,7 +1951,6 @@ static int rtw_wx_get_enc(struct net_device *dev,
erq->flags |= IW_ENCODE_DISABLED;
break;
}
- _func_exit_;
return ret;
}
@@ -2210,6 +2153,7 @@ static int rtw_wx_read32(struct net_device *dev,
u32 data32;
u32 bytes;
u8 *ptmp;
+ int rv;
padapter = (struct adapter *)rtw_netdev_priv(dev);
p = &wrqu->data;
@@ -2225,7 +2169,11 @@ static int rtw_wx_read32(struct net_device *dev,
bytes = 0;
addr = 0;
- sscanf(ptmp, "%d,%x", &bytes, &addr);
+ rv = sscanf(ptmp, "%d,%x", &bytes, &addr);
+ if (rv != 2) {
+ kfree(ptmp);
+ return -EINVAL;
+ }
switch (bytes) {
case 1:
@@ -2255,6 +2203,7 @@ static int rtw_wx_write32(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int rv;
u32 addr;
u32 data32;
@@ -2263,7 +2212,9 @@ static int rtw_wx_write32(struct net_device *dev,
bytes = 0;
addr = 0;
data32 = 0;
- sscanf(extra, "%d,%x,%x", &bytes, &addr, &data32);
+ rv = sscanf(extra, "%d,%x,%x", &bytes, &addr, &data32);
+ if (rv != 3)
+ return -EINVAL;
switch (bytes) {
case 1:
@@ -2500,7 +2451,7 @@ static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info
("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n",
poidparam->subcode, poidparam->len, len));
- if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
+ if (poidparam->subcode >= ARRAY_SIZE(mp_ioctl_hdl)) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n"));
ret = -EINVAL;
goto _rtw_mp_ioctl_hdl_exit;
@@ -2607,13 +2558,13 @@ static int rtw_get_ap_info(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (hwaddr_aton_i(data, bssid)) {
DBG_88E("Invalid BSSID '%s'.\n", (u8 *)data);
@@ -2638,7 +2589,7 @@ static int rtw_get_ap_info(struct net_device *dev,
}
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -2690,13 +2641,13 @@ static int rtw_wps_start(struct net_device *dev,
struct iw_point *pdata = &wrqu->data;
u32 u32wps_start = 0;
- ret = copy_from_user((void *)&u32wps_start, pdata->pointer, 4);
- if (ret) {
+ if ((padapter->bDriverStopped) || (pdata == NULL)) {
ret = -EINVAL;
goto exit;
}
- if ((padapter->bDriverStopped) || (pdata == NULL)) {
+ ret = copy_from_user((void *)&u32wps_start, pdata->pointer, 4);
+ if (ret) {
ret = -EINVAL;
goto exit;
}
@@ -3110,13 +3061,13 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
u8 *wpsie;
uint wpsie_len = 0;
@@ -3134,7 +3085,7 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
}
break;
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3164,9 +3115,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
u8 attr_content[100] = {0x00};
-
- u8 go_devadd_str[17 + 10] = {0x00};
- /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */
+ u8 go_devadd_str[17 + 12] = {};
/* Commented by Albert 20121209 */
/* The input data is the GO's interface address which the application wants to know its device address. */
@@ -3182,13 +3131,13 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
/* Commented by Albert 2011/05/18 */
/* Match the device address located in the P2P IE */
@@ -3217,18 +3166,18 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
}
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
- sprintf(go_devadd_str, "\n\ndev_add = NULL");
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL");
else
- sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
- if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17))
+ if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str)))
return -EFAULT;
return ret;
}
@@ -3266,13 +3215,13 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
u8 *wpsie;
uint wpsie_len = 0;
@@ -3297,7 +3246,7 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
break;
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3345,13 +3294,13 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
u8 *wpsie;
uint wpsie_len = 0;
@@ -3368,7 +3317,7 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
break;
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3416,13 +3365,13 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
/* Commented by Albert 20121226 */
/* Match the device address located in the P2P IE */
@@ -3442,7 +3391,7 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
}
}
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3498,19 +3447,19 @@ static int rtw_p2p_connect(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
uintPeerChannel = pnetwork->network.Configuration.DSConfig;
break;
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3593,13 +3542,13 @@ static int rtw_p2p_invite_req(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
/* Commented by Albert 2011/05/18 */
/* Match the device address located in the P2P IE */
@@ -3624,7 +3573,7 @@ static int rtw_p2p_invite_req(struct net_device *dev,
}
}
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3743,7 +3692,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
phead = get_list_head(queue);
- plist = get_next(phead);
+ plist = phead->next;
while (1) {
if (rtw_end_of_queue_search(phead, plist) == true)
@@ -3752,7 +3701,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
if (uintPeerChannel != 0)
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
/* Commented by Albert 2011/05/18 */
/* Match the device address located in the P2P IE */
@@ -3783,7 +3732,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
}
}
- plist = get_next(plist);
+ plist = plist->next;
}
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
@@ -3916,24 +3865,33 @@ static int rtw_p2p_get(struct net_device *dev,
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
if (padapter->bShowGetP2PState)
- DBG_88E("[%s] extra = %s\n", __func__, (char *)wrqu->data.pointer);
- if (!memcmp(wrqu->data.pointer, "status", 6)) {
+ DBG_88E("[%s] extra = %s\n", __func__,
+ (char __user *)wrqu->data.pointer);
+ if (!memcmp((__force const char *)wrqu->data.pointer,
+ "status", 6)) {
rtw_p2p_get_status(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "role", 4)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "role", 4)) {
rtw_p2p_get_role(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_ifa", 8)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "peer_ifa", 8)) {
rtw_p2p_get_peer_ifaddr(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "req_cm", 6)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "req_cm", 6)) {
rtw_p2p_get_req_cm(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_deva", 9)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "peer_deva", 9)) {
/* Get the P2P device address when receiving the provision discovery request frame. */
rtw_p2p_get_peer_devaddr(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "group_id", 8)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "group_id", 8)) {
rtw_p2p_get_groupid(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "peer_deva_inv", 9)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "peer_deva_inv", 9)) {
/* Get the P2P device address when receiving the P2P Invitation request frame. */
rtw_p2p_get_peer_devaddr_by_invitation(dev, info, wrqu, extra);
- } else if (!memcmp(wrqu->data.pointer, "op_ch", 5)) {
+ } else if (!memcmp((__force const char *)wrqu->data.pointer,
+ "op_ch", 5)) {
rtw_p2p_get_op_ch(dev, info, wrqu, extra);
}
#endif /* CONFIG_88EU_P2P */
@@ -3947,7 +3905,8 @@ static int rtw_p2p_get2(struct net_device *dev,
int ret = 0;
#ifdef CONFIG_88EU_P2P
- DBG_88E("[%s] extra = %s\n", __func__, (char *)wrqu->data.pointer);
+ DBG_88E("[%s] extra = %s\n", __func__,
+ (char __user *)wrqu->data.pointer);
if (!memcmp(extra, "wpsCM =", 6)) {
wrqu->data.length -= 6;
rtw_p2p_get_wps_configmethod(dev, info, wrqu, &extra[6]);
@@ -4438,12 +4397,12 @@ static int rtw_dbg_port(struct net_device *dev,
for (i = 0; i < NUM_STA; i++) {
phead = &(pstapriv->sta_hash[i]);
- plist = get_next(phead);
+ plist = phead->next;
while ((rtw_end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+ psta = container_of(plist, struct sta_info, hash_list);
- plist = get_next(plist);
+ plist = plist->next;
if (extra_arg == psta->aid) {
DBG_88E("sta's macaddr:%pM\n", (psta->hwaddr));
@@ -4509,11 +4468,9 @@ static int rtw_dbg_port(struct net_device *dev,
struct registry_priv *pregpriv = &padapter->registrypriv;
/* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, 0x3: enable both 2.4g and 5g */
/* default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
- if (pregpriv &&
- (extra_arg == 0 ||
- extra_arg == 1 ||
- extra_arg == 2 ||
- extra_arg == 3)) {
+ if (!pregpriv)
+ break;
+ if (extra_arg >= 0 && extra_arg <= 3) {
pregpriv->rx_stbc = extra_arg;
DBG_88E("set rx_stbc =%d\n", pregpriv->rx_stbc);
} else {
@@ -4525,7 +4482,9 @@ static int rtw_dbg_port(struct net_device *dev,
{
struct registry_priv *pregpriv = &padapter->registrypriv;
/* 0: disable, 0x1:enable (but wifi_spec should be 0), 0x2: force enable (don't care wifi_spec) */
- if (pregpriv && extra_arg >= 0 && extra_arg < 3) {
+ if (!pregpriv)
+ break;
+ if (extra_arg >= 0 && extra_arg < 3) {
pregpriv->ampdu_enable = extra_arg;
DBG_88E("set ampdu_enable =%d\n", pregpriv->ampdu_enable);
} else {
@@ -5650,12 +5609,12 @@ static int rtw_wx_set_priv(struct net_device *dev,
return -EFAULT;
len = dwrq->length;
- ext = rtw_vmalloc(len);
+ ext = vmalloc(len);
if (!ext)
return -ENOMEM;
if (copy_from_user(ext, dwrq->pointer, len)) {
- rtw_vmfree(ext, len);
+ vfree(ext);
return -EFAULT;
}
@@ -5695,7 +5654,7 @@ static int rtw_wx_set_priv(struct net_device *dev,
FREE_EXT:
- rtw_vmfree(ext, len);
+ vfree(ext);
return ret;
}
@@ -5711,10 +5670,14 @@ static int rtw_pm_set(struct net_device *dev,
DBG_88E("[%s] extra = %s\n", __func__, extra);
if (!memcmp(extra, "lps =", 4)) {
- sscanf(extra+4, "%u", &mode);
+ ret = sscanf(extra+4, "%u", &mode);
+ if (ret != 1)
+ return -EINVAL;
ret = rtw_pm_set_lps(padapter, mode);
} else if (!memcmp(extra, "ips =", 4)) {
- sscanf(extra+4, "%u", &mode);
+ ret = sscanf(extra+4, "%u", &mode);
+ if (ret != 1)
+ return -EINVAL;
ret = rtw_pm_set_ips(padapter, mode);
} else {
ret = -EINVAL;
@@ -6814,8 +6777,11 @@ static int rtw_mp_bandwidth(struct net_device *dev,
{
u32 bandwidth = 0, sg = 0;
struct adapter *padapter = rtw_netdev_priv(dev);
+ int rv;
- sscanf(extra, "40M =%d, shortGI =%d", &bandwidth, &sg);
+ rv = sscanf(extra, "40M =%d, shortGI =%d", &bandwidth, &sg);
+ if (rv != 2)
+ return -EINVAL;
if (bandwidth != HT_CHANNEL_WIDTH_40)
bandwidth = HT_CHANNEL_WIDTH_20;
@@ -6835,6 +6801,7 @@ static int rtw_mp_txpower(struct net_device *dev,
u32 idx_a = 0, idx_b = 0;
char *input = kmalloc(wrqu->length, GFP_KERNEL);
struct adapter *padapter = rtw_netdev_priv(dev);
+ int rv;
if (!input)
return -ENOMEM;
@@ -6842,7 +6809,11 @@ static int rtw_mp_txpower(struct net_device *dev,
kfree(input);
return -EFAULT;
}
- sscanf(input, "patha =%d, pathb =%d", &idx_a, &idx_b);
+ rv = sscanf(input, "patha =%d, pathb =%d", &idx_a, &idx_b);
+ if (rv != 2) {
+ kfree(input);
+ return -EINVAL;
+ }
sprintf(extra, "Set power level path_A:%d path_B:%d", idx_a, idx_b);
padapter->mppriv.txpoweridx = (u8)idx_a;
@@ -6936,6 +6907,7 @@ static int rtw_mp_ctx(struct net_device *dev,
u32 pkTx = 1, countPkTx = 1, cotuTx = 1, CarrSprTx = 1, scTx = 1, sgleTx = 1, stop = 1;
u32 bStartTest = 1;
u32 count = 0;
+ int rv;
struct mp_priv *pmp_priv;
struct pkt_attrib *pattrib;
@@ -6955,7 +6927,9 @@ static int rtw_mp_ctx(struct net_device *dev,
sgleTx = strncmp(extra, "background, stone", 20);
pkTx = strncmp(extra, "background, pkt", 20);
stop = strncmp(extra, "stop", 4);
- sscanf(extra, "count =%d, pkt", &count);
+ rv = sscanf(extra, "count =%d, pkt", &count);
+ if (rv != 2)
+ return -EINVAL;
_rtw_memset(extra, '\0', sizeof(*extra));
@@ -7314,6 +7288,7 @@ static int rtw_mp_phypara(struct net_device *dev,
{
char *input = kmalloc(wrqu->length, GFP_KERNEL);
u32 valxcap;
+ int rv;
if (!input)
return -ENOMEM;
@@ -7324,7 +7299,11 @@ static int rtw_mp_phypara(struct net_device *dev,
DBG_88E("%s:iwpriv in =%s\n", __func__, input);
- sscanf(input, "xcap =%d", &valxcap);
+ rv = sscanf(input, "xcap =%d", &valxcap);
+ if (rv != 1) {
+ kfree(input);
+ return -EINVAL;
+ }
kfree(input);
return 0;
@@ -7890,6 +7869,7 @@ static int rtw_ioctl_wext_private(struct net_device *dev, union iwreq_data *wrq_
s32 len;
u8 *extra = NULL;
u32 extra_size = 0;
+ int rv;
s32 k;
const iw_handler *priv; /* Private ioctl */
@@ -7915,7 +7895,11 @@ static int rtw_ioctl_wext_private(struct net_device *dev, union iwreq_data *wrq_
ptr = input;
len = input_len;
- sscanf(ptr, "%16s", cmdname);
+ rv = sscanf(ptr, "%16s", cmdname);
+ if (rv != 1) {
+ err = -EINVAL;
+ goto exit;
+ }
cmdlen = strlen(cmdname);
DBG_88E("%s: cmd =%s\n", __func__, cmdname);
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index 57d1ff750d52..0624378efd6f 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -61,12 +61,10 @@ void rtw_init_mlme_timer(struct adapter *padapter)
void rtw_os_indicate_connect(struct adapter *adapter)
{
-_func_enter_;
rtw_indicate_wx_assoc_event(adapter);
netif_carrier_on(adapter->pnetdev);
if (adapter->pid[2] != 0)
rtw_signal_process(adapter->pid[2], SIGALRM);
-_func_exit_;
}
void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted)
@@ -119,11 +117,9 @@ void rtw_reset_securitypriv(struct adapter *adapter)
void rtw_os_indicate_disconnect(struct adapter *adapter)
{
-_func_enter_;
netif_carrier_off(adapter->pnetdev); /* Do it first for tx broadcast pkt after disconnection issue! */
rtw_indicate_wx_disassoc_event(adapter);
rtw_reset_securitypriv(adapter);
-_func_exit_;
}
void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
@@ -132,7 +128,6 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
u8 *buff, *p, i;
union iwreq_data wrqu;
-_func_enter_;
RT_TRACE(_module_mlme_osdep_c_, _drv_info_,
("+rtw_report_sec_ie, authmode=%d\n", authmode));
buff = NULL;
@@ -141,7 +136,7 @@ _func_enter_;
("rtw_report_sec_ie, authmode=%d\n", authmode));
buff = rtw_malloc(IW_CUSTOM_MAX);
if (!buff)
- goto exit;
+ return;
_rtw_memset(buff, 0, IW_CUSTOM_MAX);
p = buff;
p += sprintf(p, "ASSOCINFO(ReqIEs =");
@@ -157,8 +152,6 @@ _func_enter_;
wireless_send_event(adapter->pnetdev, IWEVCUSTOM, &wrqu, buff);
kfree(buff);
}
-exit:
-_func_exit_;
}
static void _survey_timer_hdl(void *FunctionContext)
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 68f98fa114d2..b225d1c07210 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -520,7 +520,6 @@ static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
uint status = _SUCCESS;
struct registry_priv *registry_par = &padapter->registrypriv;
-_func_enter_;
GlobalDebugLevel = rtw_debug;
registry_par->chip_version = (u8)rtw_chip_version;
@@ -588,7 +587,6 @@ _func_enter_;
snprintf(registry_par->ifname, 16, "%s", ifname);
snprintf(registry_par->if2name, 16, "%s", if2name);
registry_par->notch_filter = (u8)rtw_notch_filter;
-_func_exit_;
return status;
}
@@ -653,7 +651,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -855,7 +853,6 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
{
u8 ret8 = _SUCCESS;
-_func_enter_;
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw\n"));
@@ -930,7 +927,6 @@ _func_enter_;
exit:
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n"));
- _func_exit_;
return ret8;
}
@@ -1115,7 +1111,7 @@ int netdev_open(struct net_device *pnetdev)
_enter_critical_mutex(padapter->hw_init_mutex, NULL);
ret = _netdev_open(pnetdev);
- _exit_critical_mutex(padapter->hw_init_mutex, NULL);
+ mutex_unlock(padapter->hw_init_mutex);
return ret;
}
@@ -1208,6 +1204,7 @@ int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
int netdev_close(struct net_device *pnetdev)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - drv_close\n"));
@@ -1246,6 +1243,9 @@ int netdev_close(struct net_device *pnetdev)
rtw_p2p_enable(padapter, P2P_ROLE_DISABLE);
#endif /* CONFIG_88EU_P2P */
+ kfree(dvobj->firmware.szFwBuffer);
+ dvobj->firmware.szFwBuffer = NULL;
+
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n"));
DBG_88E("-88eu_drv - drv_close, bup =%d\n", padapter->bup);
return 0;
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 8c3b077448af..2579a404a766 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -55,27 +55,6 @@ u32 rtw_atoi(u8 *s)
return num;
}
-inline u8 *_rtw_vmalloc(u32 sz)
-{
- u8 *pbuf;
- pbuf = vmalloc(sz);
- return pbuf;
-}
-
-inline u8 *_rtw_zvmalloc(u32 sz)
-{
- u8 *pbuf;
- pbuf = _rtw_vmalloc(sz);
- if (pbuf != NULL)
- memset(pbuf, 0, sz);
- return pbuf;
-}
-
-inline void _rtw_vmfree(u8 *pbuf, u32 sz)
-{
- vfree(pbuf);
-}
-
u8 *_rtw_malloc(u32 sz)
{
u8 *pbuf = NULL;
@@ -114,16 +93,6 @@ void rtw_mfree2d(void *pbuf, int h, int w, int size)
kfree(pbuf);
}
-int _rtw_memcmp(void *dst, void *src, u32 sz)
-{
-/* under Linux/GNU/GLibc, the return value of memcmp for two same
- * mem. chunk is 0 */
- if (!(memcmp(dst, src, sz)))
- return true;
- else
- return false;
-}
-
void _rtw_memset(void *pbuf, int c, u32 sz)
{
memset(pbuf, c, sz);
@@ -252,7 +221,7 @@ struct net_device *rtw_alloc_etherdev(int sizeof_priv)
pnpi = netdev_priv(pnetdev);
- pnpi->priv = rtw_zvmalloc(sizeof_priv);
+ pnpi->priv = vzalloc(sizeof_priv);
if (!pnpi->priv) {
free_netdev(pnetdev);
pnetdev = NULL;
@@ -276,7 +245,7 @@ void rtw_free_netdev(struct net_device *netdev)
if (!pnpi->priv)
goto RETURN;
- rtw_vmfree(pnpi->priv, pnpi->sizeof_priv);
+ vfree(pnpi->priv);
free_netdev(netdev);
RETURN:
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 2a18b3208a00..da397e4c6773 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -26,7 +26,6 @@
#include <recv_osdep.h>
#include <osdep_intf.h>
-#include <ethernet.h>
#include <usb_ops.h>
/* init os related resource in struct recv_priv */
@@ -36,16 +35,16 @@ int rtw_os_recv_resource_init(struct recv_priv *precvpriv,
return _SUCCESS;
}
-/* alloc os related resource in union recv_frame */
+/* alloc os related resource in struct recv_frame */
int rtw_os_recv_resource_alloc(struct adapter *padapter,
- union recv_frame *precvframe)
+ struct recv_frame *precvframe)
{
- precvframe->u.hdr.pkt_newalloc = NULL;
- precvframe->u.hdr.pkt = NULL;
+ precvframe->pkt_newalloc = NULL;
+ precvframe->pkt = NULL;
return _SUCCESS;
}
-/* free os related resource in union recv_frame */
+/* free os related resource in struct recv_frame */
void rtw_os_recv_resource_free(struct recv_priv *precvpriv)
{
}
@@ -118,24 +117,23 @@ void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
}
void rtw_hostapd_mlme_rx(struct adapter *padapter,
- union recv_frame *precv_frame)
+ struct recv_frame *precv_frame)
{
}
int rtw_recv_indicatepkt(struct adapter *padapter,
- union recv_frame *precv_frame)
+ struct recv_frame *precv_frame)
{
struct recv_priv *precvpriv;
struct __queue *pfree_recv_queue;
struct sk_buff *skb;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-_func_enter_;
precvpriv = &(padapter->recvpriv);
pfree_recv_queue = &(precvpriv->free_recv_queue);
- skb = precv_frame->u.hdr.pkt;
+ skb = precv_frame->pkt;
if (skb == NULL) {
RT_TRACE(_module_recv_osdep_c_, _drv_err_,
("rtw_recv_indicatepkt():skb == NULL something wrong!!!!\n"));
@@ -145,18 +143,18 @@ _func_enter_;
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("rtw_recv_indicatepkt():skb != NULL !!!\n"));
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
- ("rtw_recv_indicatepkt():precv_frame->u.hdr.rx_head =%p precv_frame->hdr.rx_data =%p\n",
- precv_frame->u.hdr.rx_head, precv_frame->u.hdr.rx_data));
+ ("rtw_recv_indicatepkt():precv_frame->rx_head =%p precv_frame->hdr.rx_data =%p\n",
+ precv_frame->rx_head, precv_frame->rx_data));
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
- ("precv_frame->hdr.rx_tail =%p precv_frame->u.hdr.rx_end =%p precv_frame->hdr.len =%d\n",
- precv_frame->u.hdr.rx_tail, precv_frame->u.hdr.rx_end,
- precv_frame->u.hdr.len));
+ ("precv_frame->hdr.rx_tail =%p precv_frame->rx_end =%p precv_frame->hdr.len =%d\n",
+ precv_frame->rx_tail, precv_frame->rx_end,
+ precv_frame->len));
- skb->data = precv_frame->u.hdr.rx_data;
+ skb->data = precv_frame->rx_data;
- skb_set_tail_pointer(skb, precv_frame->u.hdr.len);
+ skb_set_tail_pointer(skb, precv_frame->len);
- skb->len = precv_frame->u.hdr.len;
+ skb->len = precv_frame->len;
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n",
@@ -167,11 +165,11 @@ _func_enter_;
struct sk_buff *pskb2 = NULL;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
int bmcast = IS_MCAST(pattrib->dst);
- if (!_rtw_memcmp(pattrib->dst, myid(&padapter->eeprompriv),
- ETH_ALEN)) {
+ if (memcmp(pattrib->dst, myid(&padapter->eeprompriv),
+ ETH_ALEN)) {
if (bmcast) {
psta = rtw_get_bcmc_stainfo(padapter);
pskb2 = skb_clone(skb, GFP_ATOMIC);
@@ -209,14 +207,13 @@ _func_enter_;
_recv_indicatepkt_end:
/* pointers to NULL before rtw_free_recvframe() */
- precv_frame->u.hdr.pkt = NULL;
+ precv_frame->pkt = NULL;
rtw_free_recvframe(precv_frame, pfree_recv_queue);
RT_TRACE(_module_recv_osdep_c_, _drv_info_,
("\n rtw_recv_indicatepkt :after netif_rx!!!!\n"));
-_func_exit_;
return _SUCCESS;
@@ -225,7 +222,6 @@ _recv_indicatepkt_drop:
/* enqueue back to free_recv_queue */
rtw_free_recvframe(precv_frame, pfree_recv_queue);
-_func_exit_;
return _FAIL;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index a3c2bc5922e4..ca2736d60b62 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -24,7 +24,6 @@
#include <rtw_android.h>
#include <osdep_service.h>
#include <rtw_debug.h>
-#include <ioctl_cfg80211.h>
#include <rtw_ioctl_set.h>
static const char *android_wifi_cmd_str[ANDROID_WIFI_CMD_MAX] = {
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 0a341d6ec51f..2e49cd583212 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -26,6 +26,7 @@
#include <hal_intf.h>
#include <rtw_version.h>
#include <linux/usb.h>
+#include <linux/vmalloc.h>
#include <osdep_intf.h>
#include <usb_vendor_req.h>
@@ -53,8 +54,9 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
/*=== Customer ID ===*/
/****** 8188EUS ********/
- {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{} /* Terminating entry */
};
@@ -161,7 +163,6 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
struct usb_endpoint_descriptor *pendp_desc;
struct usb_device *pusbd;
-_func_enter_;
pdvobjpriv = (struct dvobj_priv *)rtw_zmalloc(sizeof(*pdvobjpriv));
if (pdvobjpriv == NULL)
@@ -254,7 +255,6 @@ free_dvobj:
pdvobjpriv = NULL;
}
exit:
-_func_exit_;
return pdvobjpriv;
}
@@ -262,7 +262,6 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
{
struct dvobj_priv *dvobj = usb_get_intfdata(usb_intf);
-_func_enter_;
usb_set_intfdata(usb_intf, NULL);
if (dvobj) {
@@ -287,7 +286,6 @@ _func_enter_;
usb_put_dev(interface_to_usbdev(usb_intf));
-_func_exit_;
}
static void chip_by_usb_id(struct adapter *padapter,
@@ -389,7 +387,6 @@ int rtw_hw_suspend(struct adapter *padapter)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct net_device *pnetdev = padapter->pnetdev;
- _func_enter_;
if ((!padapter->bup) || (padapter->bDriverStopped) ||
(padapter->bSurpriseRemoved)) {
@@ -442,7 +439,6 @@ int rtw_hw_suspend(struct adapter *padapter)
} else {
goto error_exit;
}
- _func_exit_;
return 0;
error_exit:
@@ -455,7 +451,6 @@ int rtw_hw_resume(struct adapter *padapter)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct net_device *pnetdev = padapter->pnetdev;
- _func_enter_;
if (padapter) { /* system resume */
DBG_88E("==> rtw_hw_resume\n");
@@ -487,7 +482,6 @@ int rtw_hw_resume(struct adapter *padapter)
goto error_exit;
}
- _func_exit_;
return 0;
error_exit:
@@ -506,7 +500,6 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
int ret = 0;
u32 start_time = jiffies;
- _func_enter_;
DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
@@ -563,7 +556,6 @@ exit:
DBG_88E("<=== %s return %d.............. in %dms\n", __func__
, ret, rtw_get_passing_time_ms(start_time));
- _func_exit_;
return ret;
}
@@ -587,7 +579,6 @@ int rtw_resume_process(struct adapter *padapter)
struct pwrctrl_priv *pwrpriv = NULL;
int ret = -1;
u32 start_time = jiffies;
- _func_enter_;
DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
@@ -626,7 +617,6 @@ exit:
DBG_88E("<=== %s return %d.............. in %dms\n", __func__,
ret, rtw_get_passing_time_ms(start_time));
- _func_exit_;
return ret;
}
@@ -646,7 +636,7 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
struct net_device *pnetdev = NULL;
int status = _FAIL;
- padapter = (struct adapter *)rtw_zvmalloc(sizeof(*padapter));
+ padapter = (struct adapter *)vzalloc(sizeof(*padapter));
if (padapter == NULL)
goto exit;
padapter->dvobj = dvobj;
@@ -746,7 +736,7 @@ free_adapter:
if (pnetdev)
rtw_free_netdev(pnetdev);
else if (padapter)
- rtw_vmfree((u8 *)padapter, sizeof(*padapter));
+ vfree(padapter);
padapter = NULL;
}
exit:
@@ -835,7 +825,6 @@ static void rtw_dev_remove(struct usb_interface *pusb_intf)
struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
struct adapter *padapter = dvobj->if1;
-_func_enter_;
DBG_88E("+rtw_dev_remove\n");
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+dev_remove()\n"));
@@ -854,7 +843,6 @@ _func_enter_;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-dev_remove()\n"));
DBG_88E("-r871xu_dev_remove, done\n");
-_func_exit_;
return;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index 7e3f2fadd5bf..fb0bba85373b 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -80,7 +80,6 @@ static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct hal_data_8188e *haldata;
-_func_enter_;
switch (pxmitbuf->flags) {
case VO_QUEUE_INX:
@@ -156,8 +155,6 @@ check_completion:
rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
-
-_func_exit_;
}
u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
@@ -174,7 +171,6 @@ u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
struct usb_device *pusbd = pdvobj->pusbdev;
-_func_enter_;
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("+usb_write_port\n"));
@@ -255,7 +251,6 @@ _func_enter_;
exit:
if (ret != _SUCCESS)
rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
-_func_exit_;
return ret;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 9005971084b7..2c8e3f72dec7 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -23,8 +23,6 @@
#include <osdep_service.h>
#include <drv_types.h>
-#include <if_ether.h>
-#include <ip.h>
#include <wifi.h>
#include <mlme_osdep.h>
#include <xmit_osdep.h>
@@ -39,7 +37,6 @@ uint rtw_remainder_len(struct pkt_file *pfile)
void _rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
{
-_func_enter_;
pfile->pkt = pktptr;
pfile->cur_addr = pktptr->data;
@@ -49,14 +46,12 @@ _func_enter_;
pfile->cur_buffer = pfile->buf_start;
-_func_exit_;
}
uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen)
{
uint len = 0;
-_func_enter_;
len = rtw_remainder_len(pfile);
len = (rlen > len) ? len : rlen;
@@ -67,21 +62,17 @@ _func_enter_;
pfile->cur_addr += len;
pfile->pkt_len -= len;
-_func_exit_;
return len;
}
int rtw_endofpktfile(struct pkt_file *pfile)
{
-_func_enter_;
if (pfile->pkt_len == 0) {
- _func_exit_;
return true;
}
-_func_exit_;
return false;
}
@@ -200,13 +191,13 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
- plist = get_next(phead);
+ plist = phead->next;
/* free sta asoc_queue */
while (!rtw_end_of_queue_search(phead, plist)) {
- psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ psta = container_of(plist, struct sta_info, asoc_list);
- plist = get_next(plist);
+ plist = plist->next;
/* avoid come from STA1 and send back STA1 */
if (!memcmp(psta->hwaddr, &skb->data[6], 6))
@@ -246,7 +237,6 @@ int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
s32 res = 0;
-_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("+xmit_enry\n"));
@@ -282,7 +272,6 @@ drop_packet:
exit:
-_func_exit_;
return 0;
}
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index eb33c517fcc8..53da61072992 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -133,14 +133,12 @@ void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
pTriple = (struct chnl_txpow_triple *)(pCoutryIe + 3);
for (i = 0; i < NumTriples; i++) {
if (MaxChnlNum >= pTriple->FirstChnl) {
- printk(KERN_INFO "Dot11d_UpdateCountryIe(): Invalid"
- " country IE, skip it........1\n");
+ printk(KERN_INFO "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
return;
}
if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl +
pTriple->NumChnls)) {
- printk(KERN_INFO "Dot11d_UpdateCountryIe(): Invalid "
- "country IE, skip it........2\n");
+ printk(KERN_INFO "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
return;
}
@@ -167,8 +165,7 @@ u8 DOT11D_GetMaxTxPwrInDbm(struct rtllib_device *dev, u8 Channel)
u8 MaxTxPwrInDbm = 255;
if (MAX_CHANNEL_NUMBER < Channel) {
- printk(KERN_INFO "DOT11D_GetMaxTxPwrInDbm(): Invalid "
- "Channel\n");
+ printk(KERN_INFO "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
return MaxTxPwrInDbm;
}
if (pDot11dInfo->channel_map[Channel])
diff --git a/drivers/staging/rtl8192e/rtl8192e/Kconfig b/drivers/staging/rtl8192e/rtl8192e/Kconfig
index 50e0d91a409a..ad82bc348a75 100644
--- a/drivers/staging/rtl8192e/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/rtl8192e/Kconfig
@@ -5,5 +5,4 @@ config RTL8192E
select WIRELESS_EXT
select WEXT_PRIV
select CRYPTO
- default N
---help---
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
index 356aec437963..4b94653c50d9 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
@@ -28,31 +28,6 @@
#include <linux/types.h>
#include <linux/pci.h>
-static inline void NdisRawWritePortUlong(u32 port, u32 val)
-{
- outl(val, port);
-}
-
-static inline void NdisRawWritePortUchar(u32 port, u8 val)
-{
- outb(val, port);
-}
-
-static inline void NdisRawReadPortUchar(u32 port, u8 *pval)
-{
- *pval = inb(port);
-}
-
-static inline void NdisRawReadPortUshort(u32 port, u16 *pval)
-{
- *pval = inw(port);
-}
-
-static inline void NdisRawReadPortUlong(u32 port, u32 *pval)
-{
- *pval = inl(port);
-}
-
struct mp_adapter {
u8 LinkCtrlReg;
@@ -70,33 +45,6 @@ struct mp_adapter {
u8 PciBridgeLinkCtrlReg;
};
-struct rt_pci_capab_header {
- unsigned char CapabilityID;
- unsigned char Next;
-};
-
-#define PCI_MAX_BRIDGE_NUMBER 255
-#define PCI_MAX_DEVICES 32
-#define PCI_MAX_FUNCTION 8
-
-#define PCI_CONF_ADDRESS 0x0CF8
-#define PCI_CONF_DATA 0x0CFC
-
-#define PCI_CLASS_BRIDGE_DEV 0x06
-#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04
-
-#define U1DONTCARE 0xFF
-#define U2DONTCARE 0xFFFF
-#define U4DONTCARE 0xFFFFFFFF
-
-#define INTEL_VENDOR_ID 0x8086
-#define SIS_VENDOR_ID 0x1039
-#define ATI_VENDOR_ID 0x1002
-#define ATI_DEVICE_ID 0x7914
-#define AMD_VENDOR_ID 0x1022
-
-#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10
-
struct net_device;
bool rtl8192_pci_findadapter(struct pci_dev *pdev, struct net_device *dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 6202358c2984..498995d833e7 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -609,7 +609,7 @@ static int r8192_wx_set_nick(struct net_device *dev,
if (wrqu->data.length > IW_ESSID_MAX_SIZE)
return -E2BIG;
down(&priv->wx_sem);
- wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
+ wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
up(&priv->wx_sem);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 77964885b3f2..11d0a9d8ee59 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -939,12 +939,12 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
if (txb) {
if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
dev->stats.tx_packets++;
- dev->stats.tx_bytes += txb->payload_size;
+ dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
rtllib_softmac_xmit(txb, ieee);
} else {
if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
stats->tx_packets++;
- stats->tx_bytes += txb->payload_size;
+ stats->tx_bytes += le16_to_cpu(txb->payload_size);
return 0;
}
rtllib_txb_free(txb);
diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig
index 3f055091b35f..3ee9d0d00fb6 100644
--- a/drivers/staging/rtl8192u/Kconfig
+++ b/drivers/staging/rtl8192u/Kconfig
@@ -5,5 +5,4 @@ config RTL8192U
select WIRELESS_EXT
select WEXT_PRIV
select CRYPTO
- default N
---help---
diff --git a/drivers/staging/rtl8192u/ieee80211/EndianFree.h b/drivers/staging/rtl8192u/ieee80211/EndianFree.h
deleted file mode 100644
index dc85fb913dc2..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/EndianFree.h
+++ /dev/null
@@ -1,194 +0,0 @@
-#ifndef __INC_ENDIANFREE_H
-#define __INC_ENDIANFREE_H
-
-/*
- * Call endian free function when
- * 1. Read/write packet content.
- * 2. Before write integer to IO.
- * 3. After read integer from IO.
- */
-
-#define __MACHINE_LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
-#define __MACHINE_BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net, ppc */
-
-#define BYTE_ORDER __MACHINE_LITTLE_ENDIAN
-
-#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
-// Convert data
-#define EF1Byte(_val) ((u8)(_val))
-#define EF2Byte(_val) ((u16)(_val))
-#define EF4Byte(_val) ((u32)(_val))
-
-#else
-// Convert data
-#define EF1Byte(_val) ((u8)(_val))
-#define EF2Byte(_val) (((((u16)(_val))&0x00ff)<<8)|((((u16)(_val))&0xff00)>>8))
-#define EF4Byte(_val) (((((u32)(_val))&0x000000ff)<<24)|\
- ((((u32)(_val))&0x0000ff00)<<8)|\
- ((((u32)(_val))&0x00ff0000)>>8)|\
- ((((u32)(_val))&0xff000000)>>24))
-#endif
-
-// Read data from memory
-#define ReadEF1Byte(_ptr) EF1Byte(*((u8 *)(_ptr)))
-#define ReadEF2Byte(_ptr) EF2Byte(*((u16 *)(_ptr)))
-#define ReadEF4Byte(_ptr) EF4Byte(*((u32 *)(_ptr)))
-
-// Write data to memory
-#define WriteEF1Byte(_ptr, _val) (*((u8 *)(_ptr)))=EF1Byte(_val)
-#define WriteEF2Byte(_ptr, _val) (*((u16 *)(_ptr)))=EF2Byte(_val)
-#define WriteEF4Byte(_ptr, _val) (*((u32 *)(_ptr)))=EF4Byte(_val)
-// Convert Host system specific byte ording (litten or big endia) to Network byte ording (big endian).
-// 2006.05.07, by rcnjko.
-#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
-#define H2N1BYTE(_val) ((u8)(_val))
-#define H2N2BYTE(_val) (((((u16)(_val))&0x00ff)<<8)|\
- ((((u16)(_val))&0xff00)>>8))
-#define H2N4BYTE(_val) (((((u32)(_val))&0x000000ff)<<24)|\
- ((((u32)(_val))&0x0000ff00)<<8) |\
- ((((u32)(_val))&0x00ff0000)>>8) |\
- ((((u32)(_val))&0xff000000)>>24))
-#else
-#define H2N1BYTE(_val) ((u8)(_val))
-#define H2N2BYTE(_val) ((u16)(_val))
-#define H2N4BYTE(_val) ((u32)(_val))
-#endif
-
-// Convert from Network byte ording (big endian) to Host system specific byte ording (litten or big endia).
-// 2006.05.07, by rcnjko.
-#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
-#define N2H1BYTE(_val) ((u8)(_val))
-#define N2H2BYTE(_val) (((((u16)(_val))&0x00ff)<<8)|\
- ((((u16)(_val))&0xff00)>>8))
-#define N2H4BYTE(_val) (((((u32)(_val))&0x000000ff)<<24)|\
- ((((u32)(_val))&0x0000ff00)<<8) |\
- ((((u32)(_val))&0x00ff0000)>>8) |\
- ((((u32)(_val))&0xff000000)>>24))
-#else
-#define N2H1BYTE(_val) ((u8)(_val))
-#define N2H2BYTE(_val) ((u16)(_val))
-#define N2H4BYTE(_val) ((u32)(_val))
-#endif
-
-//
-// Example:
-// BIT_LEN_MASK_32(0) => 0x00000000
-// BIT_LEN_MASK_32(1) => 0x00000001
-// BIT_LEN_MASK_32(2) => 0x00000003
-// BIT_LEN_MASK_32(32) => 0xFFFFFFFF
-//
-#define BIT_LEN_MASK_32(__BitLen) (0xFFFFFFFF >> (32 - (__BitLen)))
-//
-// Example:
-// BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
-// BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
-//
-#define BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen) (BIT_LEN_MASK_32(__BitLen) << (__BitOffset))
-
-//
-// Description:
-// Return 4-byte value in host byte ordering from
-// 4-byte pointer in litten-endian system.
-//
-#define LE_P4BYTE_TO_HOST_4BYTE(__pStart) (EF4Byte(*((u32 *)(__pStart))))
-
-//
-// Description:
-// Translate subfield (continuous bits in little-endian) of 4-byte value in litten byte to
-// 4-byte value in host byte ordering.
-//
-#define LE_BITS_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- ( LE_P4BYTE_TO_HOST_4BYTE(__pStart) >> (__BitOffset) ) \
- & \
- BIT_LEN_MASK_32(__BitLen) \
- )
-
-//
-// Description:
-// Mask subfield (continuous bits in little-endian) of 4-byte value in litten byte oredering
-// and return the result in 4-byte value in host byte ordering.
-//
-#define LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- LE_P4BYTE_TO_HOST_4BYTE(__pStart) \
- & \
- ( ~BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen) ) \
- )
-
-//
-// Description:
-// Set subfield of little-endian 4-byte value to specified value.
-//
-#define SET_BITS_TO_LE_4BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u32 *)(__pStart)) = \
- EF4Byte( \
- LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
- | \
- ( (((u32)__Value) & BIT_LEN_MASK_32(__BitLen)) << (__BitOffset) ) \
- );
-
-
-#define BIT_LEN_MASK_16(__BitLen) \
- (0xFFFF >> (16 - (__BitLen)))
-
-#define BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen) \
- (BIT_LEN_MASK_16(__BitLen) << (__BitOffset))
-
-#define LE_P2BYTE_TO_HOST_2BYTE(__pStart) \
- (EF2Byte(*((u16 *)(__pStart))))
-
-#define LE_BITS_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- ( LE_P2BYTE_TO_HOST_2BYTE(__pStart) >> (__BitOffset) ) \
- & \
- BIT_LEN_MASK_16(__BitLen) \
- )
-
-#define LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- LE_P2BYTE_TO_HOST_2BYTE(__pStart) \
- & \
- ( ~BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen) ) \
- )
-
-#define SET_BITS_TO_LE_2BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u16 *)(__pStart)) = \
- EF2Byte( \
- LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
- | \
- ( (((u16)__Value) & BIT_LEN_MASK_16(__BitLen)) << (__BitOffset) ) \
- );
-
-#define BIT_LEN_MASK_8(__BitLen) \
- (0xFF >> (8 - (__BitLen)))
-
-#define BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen) \
- (BIT_LEN_MASK_8(__BitLen) << (__BitOffset))
-
-#define LE_P1BYTE_TO_HOST_1BYTE(__pStart) \
- (EF1Byte(*((u8 *)(__pStart))))
-
-#define LE_BITS_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- ( LE_P1BYTE_TO_HOST_1BYTE(__pStart) >> (__BitOffset) ) \
- & \
- BIT_LEN_MASK_8(__BitLen) \
- )
-
-#define LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
- ( \
- LE_P1BYTE_TO_HOST_1BYTE(__pStart) \
- & \
- ( ~BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen) ) \
- )
-
-#define SET_BITS_TO_LE_1BYTE(__pStart, __BitOffset, __BitLen, __Value) \
- *((u8 *)(__pStart)) = \
- EF1Byte( \
- LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
- | \
- ( (((u8)__Value) & BIT_LEN_MASK_8(__BitLen)) << (__BitOffset) ) \
- );
-
-#endif // #ifndef __INC_ENDIANFREE_H
diff --git a/drivers/staging/rtl8192u/ieee80211/aes.c b/drivers/staging/rtl8192u/ieee80211/aes.c
deleted file mode 100644
index abc1023cef65..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/aes.c
+++ /dev/null
@@ -1,468 +0,0 @@
-/*
- * Cryptographic API.
- *
- * AES Cipher Algorithm.
- *
- * Based on Brian Gladman's code.
- *
- * Linux developers:
- * Alexander Kjeldaas <astor@fast.no>
- * Herbert Valerio Riedel <hvr@hvrlab.org>
- * Kyle McMartin <kyle@debian.org>
- * Adam J. Richter <adam@yggdrasil.com> (conversion to 2.5 API).
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * ---------------------------------------------------------------------------
- * Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK.
- * All rights reserved.
- *
- * LICENSE TERMS
- *
- * The free distribution and use of this software in both source and binary
- * form is allowed (with or without changes) provided that:
- *
- * 1. distributions of this source code include the above copyright
- * notice, this list of conditions and the following disclaimer;
- *
- * 2. distributions in binary form include the above copyright
- * notice, this list of conditions and the following disclaimer
- * in the documentation and/or other associated materials;
- *
- * 3. the copyright holder's name is not used to endorse products
- * built using this software without specific written permission.
- *
- * ALTERNATIVELY, provided that this notice is retained in full, this product
- * may be distributed under the terms of the GNU General Public License (GPL),
- * in which case the provisions of the GPL apply INSTEAD OF those given above.
- *
- * DISCLAIMER
- *
- * This software is provided 'as is' with no explicit or implied warranties
- * in respect of its properties, including, but not limited to, correctness
- * and/or fitness for purpose.
- * ---------------------------------------------------------------------------
- */
-
-/* Some changes from the Gladman version:
- s/RIJNDAEL(e_key)/E_KEY/g
- s/RIJNDAEL(d_key)/D_KEY/g
-*/
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <asm/byteorder.h>
-
-#define AES_MIN_KEY_SIZE 16
-#define AES_MAX_KEY_SIZE 32
-
-#define AES_BLOCK_SIZE 16
-
-static inline
-u32 generic_rotr32 (const u32 x, const unsigned bits)
-{
- const unsigned n = bits % 32;
- return (x >> n) | (x << (32 - n));
-}
-
-static inline
-u32 generic_rotl32 (const u32 x, const unsigned bits)
-{
- const unsigned n = bits % 32;
- return (x << n) | (x >> (32 - n));
-}
-
-#define rotl generic_rotl32
-#define rotr generic_rotr32
-
-/*
- * #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
- */
-inline static u8
-byte(const u32 x, const unsigned n)
-{
- return x >> (n << 3);
-}
-
-#define u32_in(x) le32_to_cpu(*(const u32 *)(x))
-#define u32_out(to, from) (*(u32 *)(to) = cpu_to_le32(from))
-
-struct aes_ctx {
- int key_length;
- u32 E[60];
- u32 D[60];
-};
-
-#define E_KEY ctx->E
-#define D_KEY ctx->D
-
-static u8 pow_tab[256] __initdata;
-static u8 log_tab[256] __initdata;
-static u8 sbx_tab[256] __initdata;
-static u8 isb_tab[256] __initdata;
-static u32 rco_tab[10];
-static u32 ft_tab[4][256];
-static u32 it_tab[4][256];
-
-static u32 fl_tab[4][256];
-static u32 il_tab[4][256];
-
-static inline u8 __init
-f_mult (u8 a, u8 b)
-{
- u8 aa = log_tab[a], cc = aa + log_tab[b];
-
- return pow_tab[cc + (cc < aa ? 1 : 0)];
-}
-
-#define ff_mult(a,b) (a && b ? f_mult(a, b) : 0)
-
-#define f_rn(bo, bi, n, k) \
- bo[n] = ft_tab[0][byte(bi[n],0)] ^ \
- ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
- ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
- ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
-
-#define i_rn(bo, bi, n, k) \
- bo[n] = it_tab[0][byte(bi[n],0)] ^ \
- it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
- it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
- it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
-
-#define ls_box(x) \
- ( fl_tab[0][byte(x, 0)] ^ \
- fl_tab[1][byte(x, 1)] ^ \
- fl_tab[2][byte(x, 2)] ^ \
- fl_tab[3][byte(x, 3)] )
-
-#define f_rl(bo, bi, n, k) \
- bo[n] = fl_tab[0][byte(bi[n],0)] ^ \
- fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
- fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
- fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
-
-#define i_rl(bo, bi, n, k) \
- bo[n] = il_tab[0][byte(bi[n],0)] ^ \
- il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
- il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
- il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
-
-static void __init
-gen_tabs (void)
-{
- u32 i, t;
- u8 p, q;
-
- /* log and power tables for GF(2**8) finite field with
- 0x011b as modular polynomial - the simplest primitive
- root is 0x03, used here to generate the tables */
-
- for (i = 0, p = 1; i < 256; ++i) {
- pow_tab[i] = (u8) p;
- log_tab[p] = (u8) i;
-
- p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0);
- }
-
- log_tab[1] = 0;
-
- for (i = 0, p = 1; i < 10; ++i) {
- rco_tab[i] = p;
-
- p = (p << 1) ^ (p & 0x80 ? 0x01b : 0);
- }
-
- for (i = 0; i < 256; ++i) {
- p = (i ? pow_tab[255 - log_tab[i]] : 0);
- q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2));
- p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2));
- sbx_tab[i] = p;
- isb_tab[p] = (u8) i;
- }
-
- for (i = 0; i < 256; ++i) {
- p = sbx_tab[i];
-
- t = p;
- fl_tab[0][i] = t;
- fl_tab[1][i] = rotl (t, 8);
- fl_tab[2][i] = rotl (t, 16);
- fl_tab[3][i] = rotl (t, 24);
-
- t = ((u32) ff_mult (2, p)) |
- ((u32) p << 8) |
- ((u32) p << 16) | ((u32) ff_mult (3, p) << 24);
-
- ft_tab[0][i] = t;
- ft_tab[1][i] = rotl (t, 8);
- ft_tab[2][i] = rotl (t, 16);
- ft_tab[3][i] = rotl (t, 24);
-
- p = isb_tab[i];
-
- t = p;
- il_tab[0][i] = t;
- il_tab[1][i] = rotl (t, 8);
- il_tab[2][i] = rotl (t, 16);
- il_tab[3][i] = rotl (t, 24);
-
- t = ((u32) ff_mult (14, p)) |
- ((u32) ff_mult (9, p) << 8) |
- ((u32) ff_mult (13, p) << 16) |
- ((u32) ff_mult (11, p) << 24);
-
- it_tab[0][i] = t;
- it_tab[1][i] = rotl (t, 8);
- it_tab[2][i] = rotl (t, 16);
- it_tab[3][i] = rotl (t, 24);
- }
-}
-
-#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
-
-#define imix_col(y,x) \
- u = star_x(x); \
- v = star_x(u); \
- w = star_x(v); \
- t = w ^ (x); \
- (y) = u ^ v ^ w; \
- (y) ^= rotr(u ^ t, 8) ^ \
- rotr(v ^ t, 16) ^ \
- rotr(t,24)
-
-/* initialise the key schedule from the user supplied key */
-
-#define loop4(i) \
-{ t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
- t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \
- t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \
- t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \
- t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \
-}
-
-#define loop6(i) \
-{ t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
- t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \
- t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \
- t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \
- t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \
- t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \
- t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \
-}
-
-#define loop8(i) \
-{ t = rotr(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \
- t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \
- t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \
- t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \
- t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \
- t = E_KEY[8 * i + 4] ^ ls_box(t); \
- E_KEY[8 * i + 12] = t; \
- t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \
- t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \
- t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
-}
-
-static int
-aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
-{
- struct aes_ctx *ctx = ctx_arg;
- u32 i, t, u, v, w;
-
- if (key_len != 16 && key_len != 24 && key_len != 32) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
-
- ctx->key_length = key_len;
-
- E_KEY[0] = u32_in (in_key);
- E_KEY[1] = u32_in (in_key + 4);
- E_KEY[2] = u32_in (in_key + 8);
- E_KEY[3] = u32_in (in_key + 12);
-
- switch (key_len) {
- case 16:
- t = E_KEY[3];
- for (i = 0; i < 10; ++i)
- loop4 (i);
- break;
-
- case 24:
- E_KEY[4] = u32_in (in_key + 16);
- t = E_KEY[5] = u32_in (in_key + 20);
- for (i = 0; i < 8; ++i)
- loop6 (i);
- break;
-
- case 32:
- E_KEY[4] = u32_in (in_key + 16);
- E_KEY[5] = u32_in (in_key + 20);
- E_KEY[6] = u32_in (in_key + 24);
- t = E_KEY[7] = u32_in (in_key + 28);
- for (i = 0; i < 7; ++i)
- loop8 (i);
- break;
- }
-
- D_KEY[0] = E_KEY[0];
- D_KEY[1] = E_KEY[1];
- D_KEY[2] = E_KEY[2];
- D_KEY[3] = E_KEY[3];
-
- for (i = 4; i < key_len + 24; ++i) {
- imix_col (D_KEY[i], E_KEY[i]);
- }
-
- return 0;
-}
-
-/* encrypt a block of text */
-
-#define f_nround(bo, bi, k) \
- f_rn(bo, bi, 0, k); \
- f_rn(bo, bi, 1, k); \
- f_rn(bo, bi, 2, k); \
- f_rn(bo, bi, 3, k); \
- k += 4
-
-#define f_lround(bo, bi, k) \
- f_rl(bo, bi, 0, k); \
- f_rl(bo, bi, 1, k); \
- f_rl(bo, bi, 2, k); \
- f_rl(bo, bi, 3, k)
-
-static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
-{
- const struct aes_ctx *ctx = ctx_arg;
- u32 b0[4], b1[4];
- const u32 *kp = E_KEY + 4;
-
- b0[0] = u32_in (in) ^ E_KEY[0];
- b0[1] = u32_in (in + 4) ^ E_KEY[1];
- b0[2] = u32_in (in + 8) ^ E_KEY[2];
- b0[3] = u32_in (in + 12) ^ E_KEY[3];
-
- if (ctx->key_length > 24) {
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- }
-
- if (ctx->key_length > 16) {
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- }
-
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- f_nround (b1, b0, kp);
- f_nround (b0, b1, kp);
- f_nround (b1, b0, kp);
- f_lround (b0, b1, kp);
-
- u32_out (out, b0[0]);
- u32_out (out + 4, b0[1]);
- u32_out (out + 8, b0[2]);
- u32_out (out + 12, b0[3]);
-}
-
-/* decrypt a block of text */
-
-#define i_nround(bo, bi, k) \
- i_rn(bo, bi, 0, k); \
- i_rn(bo, bi, 1, k); \
- i_rn(bo, bi, 2, k); \
- i_rn(bo, bi, 3, k); \
- k -= 4
-
-#define i_lround(bo, bi, k) \
- i_rl(bo, bi, 0, k); \
- i_rl(bo, bi, 1, k); \
- i_rl(bo, bi, 2, k); \
- i_rl(bo, bi, 3, k)
-
-static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in)
-{
- const struct aes_ctx *ctx = ctx_arg;
- u32 b0[4], b1[4];
- const int key_len = ctx->key_length;
- const u32 *kp = D_KEY + key_len + 20;
-
- b0[0] = u32_in (in) ^ E_KEY[key_len + 24];
- b0[1] = u32_in (in + 4) ^ E_KEY[key_len + 25];
- b0[2] = u32_in (in + 8) ^ E_KEY[key_len + 26];
- b0[3] = u32_in (in + 12) ^ E_KEY[key_len + 27];
-
- if (key_len > 24) {
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- }
-
- if (key_len > 16) {
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- }
-
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- i_nround (b1, b0, kp);
- i_nround (b0, b1, kp);
- i_nround (b1, b0, kp);
- i_lround (b0, b1, kp);
-
- u32_out (out, b0[0]);
- u32_out (out + 4, b0[1]);
- u32_out (out + 8, b0[2]);
- u32_out (out + 12, b0[3]);
-}
-
-
-static struct crypto_alg aes_alg = {
- .cra_name = "aes",
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
- .cra_u = {
- .cipher = {
- .cia_min_keysize = AES_MIN_KEY_SIZE,
- .cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = aes_set_key,
- .cia_encrypt = aes_encrypt,
- .cia_decrypt = aes_decrypt
- }
- }
-};
-
-static int __init aes_init(void)
-{
- gen_tabs();
- return crypto_register_alg(&aes_alg);
-}
-
-static void __exit aes_fini(void)
-{
- crypto_unregister_alg(&aes_alg);
-}
-
-module_init(aes_init);
-module_exit(aes_fini);
-
-MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/staging/rtl8192u/ieee80211/arc4.c b/drivers/staging/rtl8192u/ieee80211/arc4.c
deleted file mode 100644
index b790e9ad104c..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/arc4.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Cryptographic API
- *
- * ARC4 Cipher Algorithm
- *
- * Jon Oberheide <jon@oberheide.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include "rtl_crypto.h"
-
-#define ARC4_MIN_KEY_SIZE 1
-#define ARC4_MAX_KEY_SIZE 256
-#define ARC4_BLOCK_SIZE 1
-
-struct arc4_ctx {
- u8 S[256];
- u8 x, y;
-};
-
-static int arc4_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
-{
- struct arc4_ctx *ctx = ctx_arg;
- int i, j = 0, k = 0;
-
- ctx->x = 1;
- ctx->y = 0;
-
- for(i = 0; i < 256; i++)
- ctx->S[i] = i;
-
- for(i = 0; i < 256; i++)
- {
- u8 a = ctx->S[i];
- j = (j + in_key[k] + a) & 0xff;
- ctx->S[i] = ctx->S[j];
- ctx->S[j] = a;
- if((unsigned int)++k >= key_len)
- k = 0;
- }
-
- return 0;
-}
-
-static void arc4_crypt(void *ctx_arg, u8 *out, const u8 *in)
-{
- struct arc4_ctx *ctx = ctx_arg;
-
- u8 *const S = ctx->S;
- u8 x = ctx->x;
- u8 y = ctx->y;
- u8 a, b;
-
- a = S[x];
- y = (y + a) & 0xff;
- b = S[y];
- S[x] = b;
- S[y] = a;
- x = (x + 1) & 0xff;
- *out++ = *in ^ S[(a + b) & 0xff];
-
- ctx->x = x;
- ctx->y = y;
-}
-
-static struct crypto_alg arc4_alg = {
- .cra_name = "arc4",
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = ARC4_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct arc4_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(arc4_alg.cra_list),
- .cra_u = { .cipher = {
- .cia_min_keysize = ARC4_MIN_KEY_SIZE,
- .cia_max_keysize = ARC4_MAX_KEY_SIZE,
- .cia_setkey = arc4_set_key,
- .cia_encrypt = arc4_crypt,
- .cia_decrypt = arc4_crypt } }
-};
-
-static int __init arc4_init(void)
-{
- return crypto_register_alg(&arc4_alg);
-}
-
-
-static void __exit arc4_exit(void)
-{
- crypto_unregister_alg(&arc4_alg);
-}
-
-module_init(arc4_init);
-module_exit(arc4_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
-MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
diff --git a/drivers/staging/rtl8192u/ieee80211/autoload.c b/drivers/staging/rtl8192u/ieee80211/autoload.c
deleted file mode 100644
index c97756f3b2ea..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/autoload.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Algorithm autoloader.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include "kmap_types.h"
-
-#include <linux/kernel.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/string.h>
-#include <linux/kmod.h>
-#include "internal.h"
-
-/*
- * A far more intelligent version of this is planned. For now, just
- * try an exact match on the name of the algorithm.
- */
-void crypto_alg_autoload(const char *name)
-{
- request_module(name);
-}
-
-struct crypto_alg *crypto_alg_mod_lookup(const char *name)
-{
- struct crypto_alg *alg = crypto_alg_lookup(name);
- if (alg == NULL) {
- crypto_alg_autoload(name);
- alg = crypto_alg_lookup(name);
- }
- return alg;
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/cipher.c b/drivers/staging/rtl8192u/ieee80211/cipher.c
deleted file mode 100644
index d47345c4adcf..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/cipher.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Cipher operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include <linux/kernel.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <asm/scatterlist.h>
-#include "internal.h"
-#include "scatterwalk.h"
-
-typedef void (cryptfn_t)(void *, u8 *, const u8 *);
-typedef void (procfn_t)(struct crypto_tfm *, u8 *,
- u8*, cryptfn_t, int enc, void *, int);
-
-static inline void xor_64(u8 *a, const u8 *b)
-{
- ((u32 *)a)[0] ^= ((u32 *)b)[0];
- ((u32 *)a)[1] ^= ((u32 *)b)[1];
-}
-
-static inline void xor_128(u8 *a, const u8 *b)
-{
- ((u32 *)a)[0] ^= ((u32 *)b)[0];
- ((u32 *)a)[1] ^= ((u32 *)b)[1];
- ((u32 *)a)[2] ^= ((u32 *)b)[2];
- ((u32 *)a)[3] ^= ((u32 *)b)[3];
-}
-
-
-/*
- * Generic encrypt/decrypt wrapper for ciphers, handles operations across
- * multiple page boundaries by using temporary blocks. In user context,
- * the kernel is given a chance to schedule us once per block.
- */
-static int crypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, cryptfn_t crfn,
- procfn_t prfn, int enc, void *info)
-{
- struct scatter_walk walk_in, walk_out;
- const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
- u8 tmp_src[bsize];
- u8 tmp_dst[bsize];
-
- if (!nbytes)
- return 0;
-
- if (nbytes % bsize) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
- return -EINVAL;
- }
-
- scatterwalk_start(&walk_in, src);
- scatterwalk_start(&walk_out, dst);
-
- for(;;) {
- u8 *src_p, *dst_p;
- int in_place;
-
- scatterwalk_map(&walk_in);
- scatterwalk_map(&walk_out);
- src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
- dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
- in_place = scatterwalk_samebuf(&walk_in, &walk_out,
- src_p, dst_p);
-
- nbytes -= bsize;
-
- scatterwalk_copychunks(src_p, &walk_in, bsize, 0);
-
- prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
-
- scatterwalk_done(&walk_in, nbytes);
-
- scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
- scatterwalk_done(&walk_out, nbytes);
-
- if (!nbytes)
- return 0;
-
- crypto_yield(tfm);
- }
-}
-
-static void cbc_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
- cryptfn_t fn, int enc, void *info, int in_place)
-{
- u8 *iv = info;
-
- /* Null encryption */
- if (!iv)
- return;
-
- if (enc) {
- tfm->crt_u.cipher.cit_xor_block(iv, src);
- fn(crypto_tfm_ctx(tfm), dst, iv);
- memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm));
- } else {
- u8 stack[in_place ? crypto_tfm_alg_blocksize(tfm) : 0];
- u8 *buf = in_place ? stack : dst;
-
- fn(crypto_tfm_ctx(tfm), buf, src);
- tfm->crt_u.cipher.cit_xor_block(buf, iv);
- memcpy(iv, src, crypto_tfm_alg_blocksize(tfm));
- if (buf != dst)
- memcpy(dst, buf, crypto_tfm_alg_blocksize(tfm));
- }
-}
-
-static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
- cryptfn_t fn, int enc, void *info, int in_place)
-{
- fn(crypto_tfm_ctx(tfm), dst, src);
-}
-
-static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
-{
- struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
-
- if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- } else
- return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
- &tfm->crt_flags);
-}
-
-static int ecb_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_encrypt,
- ecb_process, 1, NULL);
-}
-
-static int ecb_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_decrypt,
- ecb_process, 1, NULL);
-}
-
-static int cbc_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_encrypt,
- cbc_process, 1, tfm->crt_cipher.cit_iv);
-}
-
-static int cbc_encrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_encrypt,
- cbc_process, 1, iv);
-}
-
-static int cbc_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_decrypt,
- cbc_process, 0, tfm->crt_cipher.cit_iv);
-}
-
-static int cbc_decrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- return crypt(tfm, dst, src, nbytes,
- tfm->__crt_alg->cra_cipher.cia_decrypt,
- cbc_process, 0, iv);
-}
-
-static int nocrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return -ENOSYS;
-}
-
-static int nocrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- return -ENOSYS;
-}
-
-int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
-{
- u32 mode = flags & CRYPTO_TFM_MODE_MASK;
-
- tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
- if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
- tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
-
- return 0;
-}
-
-int crypto_init_cipher_ops(struct crypto_tfm *tfm)
-{
- int ret = 0;
- struct cipher_tfm *ops = &tfm->crt_cipher;
-
- ops->cit_setkey = setkey;
-
- switch (tfm->crt_cipher.cit_mode) {
- case CRYPTO_TFM_MODE_ECB:
- ops->cit_encrypt = ecb_encrypt;
- ops->cit_decrypt = ecb_decrypt;
- break;
-
- case CRYPTO_TFM_MODE_CBC:
- ops->cit_encrypt = cbc_encrypt;
- ops->cit_decrypt = cbc_decrypt;
- ops->cit_encrypt_iv = cbc_encrypt_iv;
- ops->cit_decrypt_iv = cbc_decrypt_iv;
- break;
-
- case CRYPTO_TFM_MODE_CFB:
- ops->cit_encrypt = nocrypt;
- ops->cit_decrypt = nocrypt;
- ops->cit_encrypt_iv = nocrypt_iv;
- ops->cit_decrypt_iv = nocrypt_iv;
- break;
-
- case CRYPTO_TFM_MODE_CTR:
- ops->cit_encrypt = nocrypt;
- ops->cit_decrypt = nocrypt;
- ops->cit_encrypt_iv = nocrypt_iv;
- ops->cit_decrypt_iv = nocrypt_iv;
- break;
-
- default:
- BUG();
- }
-
- if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
-
- switch (crypto_tfm_alg_blocksize(tfm)) {
- case 8:
- ops->cit_xor_block = xor_64;
- break;
-
- case 16:
- ops->cit_xor_block = xor_128;
- break;
-
- default:
- printk(KERN_WARNING "%s: block size %u not supported\n",
- crypto_tfm_alg_name(tfm),
- crypto_tfm_alg_blocksize(tfm));
- ret = -EINVAL;
- goto out;
- }
-
- ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
- ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL);
- if (ops->cit_iv == NULL)
- ret = -ENOMEM;
- }
-
-out:
- return ret;
-}
-
-void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
-{
- kfree(tfm->crt_cipher.cit_iv);
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/compress.c b/drivers/staging/rtl8192u/ieee80211/compress.c
deleted file mode 100644
index 5416ab63a738..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/compress.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Compression operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include <linux/types.h>
-/*#include <linux/crypto.h>*/
-#include "rtl_crypto.h"
-#include <linux/errno.h>
-#include <linux/scatterlist.h>
-#include <linux/string.h>
-#include "internal.h"
-
-static int crypto_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return tfm->__crt_alg->cra_compress.coa_compress(crypto_tfm_ctx(tfm),
- src, slen, dst,
- dlen);
-}
-
-static int crypto_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return tfm->__crt_alg->cra_compress.coa_decompress(crypto_tfm_ctx(tfm),
- src, slen, dst,
- dlen);
-}
-
-int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags)
-{
- return flags ? -EINVAL : 0;
-}
-
-int crypto_init_compress_ops(struct crypto_tfm *tfm)
-{
- int ret = 0;
- struct compress_tfm *ops = &tfm->crt_compress;
-
- ret = tfm->__crt_alg->cra_compress.coa_init(crypto_tfm_ctx(tfm));
- if (ret)
- goto out;
-
- ops->cot_compress = crypto_compress;
- ops->cot_decompress = crypto_decompress;
-
-out:
- return ret;
-}
-
-void crypto_exit_compress_ops(struct crypto_tfm *tfm)
-{
- tfm->__crt_alg->cra_compress.coa_exit(crypto_tfm_ctx(tfm));
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/crypto_compat.h b/drivers/staging/rtl8192u/ieee80211/crypto_compat.h
deleted file mode 100644
index 2ba374a64178..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/crypto_compat.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Header file to maintain compatibility among different kernel versions.
- *
- * Copyright (c) 2004-2006 <lawrence_wang@realsil.com.cn>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
- */
-
-#include <linux/crypto.h>
-
-static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
-}
-
-
-static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
-}
-
- struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
-{
- struct crypto_tfm *tfm = NULL;
- int err;
- printk("call crypto_alloc_tfm!!!\n");
- do {
- struct crypto_alg *alg;
-
- alg = crypto_alg_mod_lookup(name, 0, CRYPTO_ALG_ASYNC);
- err = PTR_ERR(alg);
- if (IS_ERR(alg))
- continue;
-
- tfm = __crypto_alloc_tfm(alg, flags);
- err = 0;
- if (IS_ERR(tfm)) {
- crypto_mod_put(alg);
- err = PTR_ERR(tfm);
- tfm = NULL;
- }
- } while (err == -EAGAIN && !signal_pending(current));
-
- return tfm;
-}
-//EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
-//EXPORT_SYMBOL_GPL(crypto_free_tfm);
diff --git a/drivers/staging/rtl8192u/ieee80211/digest.c b/drivers/staging/rtl8192u/ieee80211/digest.c
deleted file mode 100644
index 05e7497fd106..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/digest.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Digest operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/highmem.h>
-#include <asm/scatterlist.h>
-#include "internal.h"
-
-static void init(struct crypto_tfm *tfm)
-{
- tfm->__crt_alg->cra_digest.dia_init(crypto_tfm_ctx(tfm));
-}
-
-static void update(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg)
-{
- unsigned int i;
-
- for (i = 0; i < nsg; i++) {
-
- struct page *pg = sg[i].page;
- unsigned int offset = sg[i].offset;
- unsigned int l = sg[i].length;
-
- do {
- unsigned int bytes_from_page = min(l, ((unsigned int)
- (PAGE_SIZE)) -
- offset);
- char *p = kmap_atomic(pg) + offset;
-
- tfm->__crt_alg->cra_digest.dia_update
- (crypto_tfm_ctx(tfm), p,
- bytes_from_page);
- kunmap_atomic(p);
- crypto_yield(tfm);
- offset = 0;
- pg++;
- l -= bytes_from_page;
- } while (l > 0);
- }
-}
-
-static void final(struct crypto_tfm *tfm, u8 *out)
-{
- tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), out);
-}
-
-static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
-{
- u32 flags;
- if (tfm->__crt_alg->cra_digest.dia_setkey == NULL)
- return -ENOSYS;
- return tfm->__crt_alg->cra_digest.dia_setkey(crypto_tfm_ctx(tfm),
- key, keylen, &flags);
-}
-
-static void digest(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg, u8 *out)
-{
- unsigned int i;
-
- tfm->crt_digest.dit_init(tfm);
-
- for (i = 0; i < nsg; i++) {
- char *p = kmap_atomic(sg[i].page) + sg[i].offset;
- tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm),
- p, sg[i].length);
- kunmap_atomic(p);
- crypto_yield(tfm);
- }
- crypto_digest_final(tfm, out);
-}
-
-int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
-{
- return flags ? -EINVAL : 0;
-}
-
-int crypto_init_digest_ops(struct crypto_tfm *tfm)
-{
- struct digest_tfm *ops = &tfm->crt_digest;
-
- ops->dit_init = init;
- ops->dit_update = update;
- ops->dit_final = final;
- ops->dit_digest = digest;
- ops->dit_setkey = setkey;
-
- return crypto_alloc_hmac_block(tfm);
-}
-
-void crypto_exit_digest_ops(struct crypto_tfm *tfm)
-{
- crypto_free_hmac_block(tfm);
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index 34edcfab96be..90ace791f2d7 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -1,16 +1,8 @@
-//-----------------------------------------------------------------------------
-// File:
-// Dot11d.c
-//
-// Description:
-// Implement 802.11d.
-//
-//-----------------------------------------------------------------------------
+/* Implement 802.11d. */
#include "dot11d.h"
-void
-Dot11d_Init(struct ieee80211_device *ieee)
+void Dot11d_Init(struct ieee80211_device *ieee)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
@@ -22,55 +14,42 @@ Dot11d_Init(struct ieee80211_device *ieee)
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
RESET_CIE_WATCHDOG(ieee);
- printk("Dot11d_Init()\n");
+ netdev_info(ieee->dev, "Dot11d_Init()\n");
}
+EXPORT_SYMBOL(Dot11d_Init);
-//
-// Description:
-// Reset to the state as we are just entering a regulatory domain.
-//
-void
-Dot11d_Reset(struct ieee80211_device *ieee)
+/* Reset to the state as we are just entering a regulatory domain. */
+void Dot11d_Reset(struct ieee80211_device *ieee)
{
u32 i;
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
- // Clear old channel map
+ /* Clear old channel map */
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
- // Set new channel map
- for (i=1; i<=11; i++) {
+ /* Set new channel map */
+ for (i = 1; i <= 11; i++)
(pDot11dInfo->channel_map)[i] = 1;
- }
- for (i=12; i<=14; i++) {
+
+ for (i = 12; i <= 14; i++)
(pDot11dInfo->channel_map)[i] = 2;
- }
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
RESET_CIE_WATCHDOG(ieee);
-
- //printk("Dot11d_Reset()\n");
}
+EXPORT_SYMBOL(Dot11d_Reset);
-//
-// Description:
-// Update country IE from Beacon or Probe Resopnse
-// and configure PHY for operation in the regulatory domain.
-//
-// TODO:
-// Configure Tx power.
-//
-// Assumption:
-// 1. IS_DOT11D_ENABLE() is TRUE.
-// 2. Input IE is an valid one.
-//
-void
-Dot11d_UpdateCountryIe(
- struct ieee80211_device *dev,
- u8 *pTaddr,
- u16 CoutryIeLen,
- u8 *pCoutryIe
- )
+/*
+ * Update country IE from Beacon or Probe Resopnse and configure PHY for
+ * operation in the regulatory domain.
+ *
+ * TODO: Configure Tx power.
+ * Assumption:
+ * 1. IS_DOT11D_ENABLE() is TRUE.
+ * 2. Input IE is an valid one.
+ */
+void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
+ u16 CoutryIeLen, u8 *pCoutryIe)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 i, j, NumTriples, MaxChnlNum;
@@ -79,23 +58,25 @@ Dot11d_UpdateCountryIe(
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
MaxChnlNum = 0;
- NumTriples = (CoutryIeLen - 3) / 3; // skip 3-byte country string.
+ NumTriples = (CoutryIeLen - 3) / 3; /* skip 3-byte country string. */
pTriple = (PCHNL_TXPOWER_TRIPLE)(pCoutryIe + 3);
- for(i = 0; i < NumTriples; i++)
- {
- if(MaxChnlNum >= pTriple->FirstChnl)
- { // It is not in a monotonically increasing order, so stop processing.
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
+ for (i = 0; i < NumTriples; i++) {
+ if (MaxChnlNum >= pTriple->FirstChnl) {
+ /* It is not in a monotonically increasing order, so
+ * stop processing.
+ */
+ netdev_err(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
return;
}
- if(MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls))
- { // It is not a valid set of channel id, so stop processing.
- printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
+ if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls)) {
+ /* It is not a valid set of channel id, so stop
+ * processing.
+ */
+ netdev_err(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
return;
}
- for(j = 0 ; j < pTriple->NumChnls; j++)
- {
+ for (j = 0; j < pTriple->NumChnls; j++) {
pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] = pTriple->MaxTxPowerInDbm;
MaxChnlNum = pTriple->FirstChnl + j;
@@ -103,60 +84,48 @@ Dot11d_UpdateCountryIe(
pTriple = (PCHNL_TXPOWER_TRIPLE)((u8 *)pTriple + 3);
}
- //printk("Dot11d_UpdateCountryIe(): Channel List:\n");
- printk("Channel List:");
- for(i=1; i<= MAX_CHANNEL_NUMBER; i++)
- if(pDot11dInfo->channel_map[i] > 0)
- printk(" %d", i);
- printk("\n");
+ netdev_info(dev->dev, "Channel List:");
+ for (i = 1; i <= MAX_CHANNEL_NUMBER; i++)
+ if (pDot11dInfo->channel_map[i] > 0)
+ netdev_info(dev->dev, " %d", i);
+ netdev_info(dev->dev, "\n");
UPDATE_CIE_SRC(dev, pTaddr);
pDot11dInfo->CountryIeLen = CoutryIeLen;
- memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe,CoutryIeLen);
+ memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe, CoutryIeLen);
pDot11dInfo->State = DOT11D_STATE_LEARNED;
}
+EXPORT_SYMBOL(Dot11d_UpdateCountryIe);
-
-u8
-DOT11D_GetMaxTxPwrInDbm(
- struct ieee80211_device *dev,
- u8 Channel
- )
+u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 MaxTxPwrInDbm = 255;
- if(MAX_CHANNEL_NUMBER < Channel)
- {
- printk("DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
+ if (MAX_CHANNEL_NUMBER < Channel) {
+ netdev_err(dev->dev, "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
return MaxTxPwrInDbm;
}
- if(pDot11dInfo->channel_map[Channel])
- {
+ if (pDot11dInfo->channel_map[Channel])
MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
- }
return MaxTxPwrInDbm;
}
+EXPORT_SYMBOL(DOT11D_GetMaxTxPwrInDbm);
-
-void
-DOT11D_ScanComplete(
- struct ieee80211_device *dev
- )
+void DOT11D_ScanComplete(struct ieee80211_device *dev)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
- switch (pDot11dInfo->State)
- {
+ switch (pDot11dInfo->State) {
case DOT11D_STATE_LEARNED:
pDot11dInfo->State = DOT11D_STATE_DONE;
break;
case DOT11D_STATE_DONE:
- if( GET_CIE_WATCHDOG(dev) == 0 )
- { // Reset country IE if previous one is gone.
+ if (GET_CIE_WATCHDOG(dev) == 0) {
+ /* Reset country IE if previous one is gone. */
Dot11d_Reset(dev);
}
break;
@@ -164,57 +133,43 @@ DOT11D_ScanComplete(
break;
}
}
+EXPORT_SYMBOL(DOT11D_ScanComplete);
-int IsLegalChannel(
- struct ieee80211_device *dev,
- u8 channel
-)
+int IsLegalChannel(struct ieee80211_device *dev, u8 channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
- if(MAX_CHANNEL_NUMBER < channel)
- {
- printk("IsLegalChannel(): Invalid Channel\n");
+ if (MAX_CHANNEL_NUMBER < channel) {
+ netdev_err(dev->dev, "IsLegalChannel(): Invalid Channel\n");
return 0;
}
- if(pDot11dInfo->channel_map[channel] > 0)
+ if (pDot11dInfo->channel_map[channel] > 0)
return 1;
return 0;
}
+EXPORT_SYMBOL(IsLegalChannel);
-int ToLegalChannel(
- struct ieee80211_device *dev,
- u8 channel
-)
+int ToLegalChannel(struct ieee80211_device *dev, u8 channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 default_chn = 0;
u32 i = 0;
- for (i=1; i<= MAX_CHANNEL_NUMBER; i++)
- {
- if(pDot11dInfo->channel_map[i] > 0)
- {
+ for (i = 1; i <= MAX_CHANNEL_NUMBER; i++) {
+ if (pDot11dInfo->channel_map[i] > 0) {
default_chn = i;
break;
}
}
- if(MAX_CHANNEL_NUMBER < channel)
- {
- printk("IsLegalChannel(): Invalid Channel\n");
+ if (MAX_CHANNEL_NUMBER < channel) {
+ netdev_err(dev->dev, "IsLegalChannel(): Invalid Channel\n");
return default_chn;
}
- if(pDot11dInfo->channel_map[channel] > 0)
+ if (pDot11dInfo->channel_map[channel] > 0)
return channel;
return default_chn;
}
-EXPORT_SYMBOL(Dot11d_Init);
-EXPORT_SYMBOL(Dot11d_Reset);
-EXPORT_SYMBOL(Dot11d_UpdateCountryIe);
-EXPORT_SYMBOL(DOT11D_GetMaxTxPwrInDbm);
-EXPORT_SYMBOL(DOT11D_ScanComplete);
-EXPORT_SYMBOL(IsLegalChannel);
EXPORT_SYMBOL(ToLegalChannel);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index bc64f05a7e6a..cac2056b7f82 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1152,7 +1152,7 @@ struct ieee80211_probe_request {
struct ieee80211_probe_response {
struct ieee80211_hdr_3addr header;
- u32 time_stamp[2];
+ __le32 time_stamp[2];
__le16 beacon_interval;
__le16 capability;
/* SSID, supported rates, FH params, DS params,
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index e730ed64c0fe..a98414a72bf5 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -517,11 +517,8 @@ drop:
return 1;
}
-bool
-AddReorderEntry(
- PRX_TS_RECORD pTS,
- PRX_REORDER_ENTRY pReorderEntry
- )
+
+static bool AddReorderEntry(PRX_TS_RECORD pTS, PRX_REORDER_ENTRY pReorderEntry)
{
struct list_head *pList = &pTS->RxPendingPktList;
while(pList->next != &pTS->RxPendingPktList)
@@ -601,10 +598,9 @@ void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_
}
-void RxReorderIndicatePacket( struct ieee80211_device *ieee,
- struct ieee80211_rxb *prxb,
- PRX_TS_RECORD pTS,
- u16 SeqNum)
+static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
+ struct ieee80211_rxb *prxb,
+ PRX_TS_RECORD pTS, u16 SeqNum)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
PRX_REORDER_ENTRY pReorderEntry = NULL;
@@ -771,9 +767,9 @@ void RxReorderIndicatePacket( struct ieee80211_device *ieee,
}
}
-u8 parse_subframe(struct sk_buff *skb,
- struct ieee80211_rx_stats *rx_stats,
- struct ieee80211_rxb *rxb,u8 *src,u8 *dst)
+static u8 parse_subframe(struct sk_buff *skb,
+ struct ieee80211_rx_stats *rx_stats,
+ struct ieee80211_rxb *rxb,u8 *src,u8 *dst)
{
struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
@@ -2200,7 +2196,7 @@ static inline int ieee80211_network_init(
network->last_scanned = jiffies;
network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
- network->beacon_interval = le32_to_cpu(beacon->beacon_interval);
+ network->beacon_interval = le16_to_cpu(beacon->beacon_interval);
/* Where to pull this? beacon->listen_interval;*/
network->listen_interval = 0x0A;
network->rates_len = network->rates_ex_len = 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 662c7e41cd5c..2131912113d7 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -24,15 +24,6 @@
#include "dot11d.h"
-u8 rsn_authen_cipher_suite[16][4] = {
- {0x00,0x0F,0xAC,0x00}, //Use group key, //Reserved
- {0x00,0x0F,0xAC,0x01}, //WEP-40 //RSNA default
- {0x00,0x0F,0xAC,0x02}, //TKIP //NONE //{used just as default}
- {0x00,0x0F,0xAC,0x03}, //WRAP-historical
- {0x00,0x0F,0xAC,0x04}, //CCMP
- {0x00,0x0F,0xAC,0x05}, //WEP-104
-};
-
short ieee80211_is_54g(const struct ieee80211_network *net)
{
return (net->rates_ex_len > 0) || (net->rates_len > 4);
@@ -47,7 +38,7 @@ short ieee80211_is_shortslot(const struct ieee80211_network *net)
* tag and the EXTENDED RATE MFIE tag if needed.
* It encludes two bytes per tag for the tag itself and its len
*/
-unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
+static unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
{
unsigned int rate_len = 0;
@@ -65,7 +56,7 @@ unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
* Then it updates the pointer so that
* it points after the new MFIE tag added.
*/
-void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
+static void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -82,7 +73,7 @@ void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
*tag_p = tag;
}
-void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
+static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -106,7 +97,8 @@ void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
}
-void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p) {
+static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p)
+{
u8 *tag = *tag_p;
*tag++ = MFIE_TYPE_GENERIC; //0
@@ -148,7 +140,7 @@ void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p) {
}
#endif
-void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
+static void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
{
int nh;
nh = (ieee->mgmt_queue_head +1) % MGMT_QUEUE_NUM;
@@ -166,7 +158,7 @@ void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
//return 0;
}
-struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
+static struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
{
struct sk_buff *ret;
@@ -181,12 +173,12 @@ struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
return ret;
}
-void init_mgmt_queue(struct ieee80211_device *ieee)
+static void init_mgmt_queue(struct ieee80211_device *ieee)
{
ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
}
-u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
+static u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
u8 rate;
@@ -365,7 +357,8 @@ inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
}
struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee);
-void ieee80211_send_beacon(struct ieee80211_device *ieee)
+
+static void ieee80211_send_beacon(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
if(!ieee->ieee_up)
@@ -391,7 +384,7 @@ void ieee80211_send_beacon(struct ieee80211_device *ieee)
}
-void ieee80211_send_beacon_cb(unsigned long _ieee)
+static void ieee80211_send_beacon_cb(unsigned long _ieee)
{
struct ieee80211_device *ieee =
(struct ieee80211_device *) _ieee;
@@ -403,7 +396,7 @@ void ieee80211_send_beacon_cb(unsigned long _ieee)
}
-void ieee80211_send_probe(struct ieee80211_device *ieee)
+static void ieee80211_send_probe(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
@@ -494,7 +487,7 @@ out:
}
-void ieee80211_softmac_scan_wq(struct work_struct *work)
+static void ieee80211_softmac_scan_wq(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
@@ -538,7 +531,7 @@ out:
-void ieee80211_beacons_start(struct ieee80211_device *ieee)
+static void ieee80211_beacons_start(struct ieee80211_device *ieee)
{
unsigned long flags;
spin_lock_irqsave(&ieee->beacon_lock,flags);
@@ -549,7 +542,7 @@ void ieee80211_beacons_start(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->beacon_lock,flags);
}
-void ieee80211_beacons_stop(struct ieee80211_device *ieee)
+static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -581,7 +574,7 @@ void ieee80211_start_send_beacons(struct ieee80211_device *ieee)
}
-void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
+static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
{
// unsigned long flags;
@@ -609,7 +602,7 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
}
/* called with ieee->lock held */
-void ieee80211_start_scan(struct ieee80211_device *ieee)
+static void ieee80211_start_scan(struct ieee80211_device *ieee)
{
if(IS_DOT11D_ENABLE(ieee) )
{
@@ -841,7 +834,8 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
}
-struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
+static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
+ u8 *dest)
{
struct sk_buff *skb;
u8 *tag;
@@ -896,7 +890,8 @@ struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
return skb;
}
-struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8 *dest)
+static struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,
+ int status, u8 *dest)
{
struct sk_buff *skb;
struct ieee80211_authentication *auth;
@@ -924,7 +919,8 @@ struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8
}
-struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
+static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee,
+ short pwr)
{
struct sk_buff *skb;
struct ieee80211_hdr_3addr *hdr;
@@ -950,7 +946,7 @@ struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
}
-void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
+static void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
{
struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest);
@@ -959,7 +955,8 @@ void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
}
-void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8 *dest)
+static void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s,
+ u8 *dest)
{
struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest);
@@ -968,7 +965,7 @@ void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8 *dest)
}
-void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
+static void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
{
@@ -1250,13 +1247,13 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-void ieee80211_associate_abort_cb(unsigned long dev)
+static void ieee80211_associate_abort_cb(unsigned long dev)
{
ieee80211_associate_abort((struct ieee80211_device *) dev);
}
-void ieee80211_associate_step1(struct ieee80211_device *ieee)
+static void ieee80211_associate_step1(struct ieee80211_device *ieee)
{
struct ieee80211_network *beacon = &ieee->current_network;
struct sk_buff *skb;
@@ -1282,7 +1279,9 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
}
}
-void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
+static void ieee80211_auth_challenge(struct ieee80211_device *ieee,
+ u8 *challenge,
+ int chlen)
{
u8 *c;
struct sk_buff *skb;
@@ -1312,7 +1311,7 @@ void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int
kfree(challenge);
}
-void ieee80211_associate_step2(struct ieee80211_device *ieee)
+static void ieee80211_associate_step2(struct ieee80211_device *ieee)
{
struct sk_buff *skb;
struct ieee80211_network *beacon = &ieee->current_network;
@@ -1331,7 +1330,7 @@ void ieee80211_associate_step2(struct ieee80211_device *ieee)
//dev_kfree_skb_any(skb);//edit by thomas
}
}
-void ieee80211_associate_complete_wq(struct work_struct *work)
+static void ieee80211_associate_complete_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
printk(KERN_INFO "Associated successfully\n");
@@ -1378,7 +1377,7 @@ void ieee80211_associate_complete_wq(struct work_struct *work)
netif_carrier_on(ieee->dev);
}
-void ieee80211_associate_complete(struct ieee80211_device *ieee)
+static void ieee80211_associate_complete(struct ieee80211_device *ieee)
{
// int i;
// struct net_device* dev = ieee->dev;
@@ -1389,7 +1388,7 @@ void ieee80211_associate_complete(struct ieee80211_device *ieee)
queue_work(ieee->wq, &ieee->associate_complete_wq);
}
-void ieee80211_associate_procedure_wq(struct work_struct *work)
+static void ieee80211_associate_procedure_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq);
ieee->sync_scan_hurryup = 1;
@@ -1562,7 +1561,7 @@ static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
}
-int auth_rq_parse(struct sk_buff *skb,u8 *dest)
+static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
{
struct ieee80211_authentication *a;
@@ -1618,7 +1617,7 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
}
-int assoc_rq_parse(struct sk_buff *skb,u8 *dest)
+static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
{
struct ieee80211_assoc_request_frame *a;
@@ -1712,7 +1711,8 @@ ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
-void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr)
+static void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee,
+ short pwr)
{
struct sk_buff *buf = ieee80211_null_func(ieee, pwr);
@@ -1723,7 +1723,8 @@ void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr)
}
-short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *time_l)
+static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
+ u32 *time_l)
{
int timeout = ieee->ps_timeout;
u8 dtim;
@@ -1771,7 +1772,7 @@ short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *ti
}
-inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
+static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
{
u32 th,tl;
@@ -1888,7 +1889,8 @@ void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success)
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
-void ieee80211_process_action(struct ieee80211_device *ieee, struct sk_buff *skb)
+static void ieee80211_process_action(struct ieee80211_device *ieee,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *header = (struct ieee80211_hdr *)skb->data;
u8 *act = ieee80211_get_payload(header);
@@ -1959,7 +1961,8 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_network network_resp;
struct ieee80211_network *network = &network_resp;
- if (0 == (errcode=assoc_parse(ieee,skb, &aid))){
+ errcode = assoc_parse(ieee, skb, &aid);
+ if (!errcode) {
ieee->state=IEEE80211_LINKED;
ieee->assoc_id = aid;
ieee->softmac_stats.rx_ass_ok++;
@@ -2017,7 +2020,8 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
IEEE80211_DEBUG_MGMT("Received authentication response");
- if (0 == (errcode=auth_parse(skb, &challenge, &chlen))){
+ errcode = auth_parse(skb, &challenge, &chlen);
+ if (!errcode) {
if(ieee->open_wep || !challenge){
ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATED;
ieee->softmac_stats.rx_auth_rs_ok++;
@@ -2189,7 +2193,7 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
}
/* called with ieee->lock acquired */
-void ieee80211_resume_tx(struct ieee80211_device *ieee)
+static void ieee80211_resume_tx(struct ieee80211_device *ieee)
{
int i;
for(i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
@@ -2318,7 +2322,7 @@ void ieee80211_start_master_bss(struct ieee80211_device *ieee)
netif_carrier_on(ieee->dev);
}
-void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
+static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
{
if(ieee->raw_tx){
@@ -2328,7 +2332,7 @@ void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
netif_carrier_on(ieee->dev);
}
}
-void ieee80211_start_ibss_wq(struct work_struct *work)
+static void ieee80211_start_ibss_wq(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
@@ -2501,7 +2505,7 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
notify_wx_assoc_event(ieee);
}
-void ieee80211_associate_retry_wq(struct work_struct *work)
+static void ieee80211_associate_retry_wq(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
@@ -2772,7 +2776,8 @@ static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value)
}
-void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie, int wpa_ie_len)
+static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee,
+ char *wpa_ie, int wpa_ie_len)
{
/* make sure WPA is enabled */
ieee80211_wpa_enable(ieee, 1);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 157b2d7466e5..6779bdd561d7 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -378,7 +378,8 @@ FORCED_AGG_SETTING:
return;
}
-extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device *ieee, cb_desc *tcb_desc)
+static void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device *ieee,
+ cb_desc *tcb_desc)
{
tcb_desc->bUseShortPreamble = false;
if (tcb_desc->data_rate == 2)
@@ -391,7 +392,7 @@ extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device *ieee, cb_
}
return;
}
-extern void
+static void
ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index e1fe54acb4b8..bdf67ec5df21 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -39,7 +39,7 @@ struct modes_unit {
char *mode_string;
int mode_size;
};
-struct modes_unit ieee80211_modes[] = {
+static struct modes_unit ieee80211_modes[] = {
{"a",1},
{"b",1},
{"g",1},
diff --git a/drivers/staging/rtl8192u/ieee80211/internal.h b/drivers/staging/rtl8192u/ieee80211/internal.h
deleted file mode 100644
index 6f54cfe8a467..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/internal.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#ifndef _CRYPTO_INTERNAL_H
-#define _CRYPTO_INTERNAL_H
-
-
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/init.h>
-#include <asm/hardirq.h>
-#include <asm/softirq.h>
-#include <asm/kmap_types.h>
-
-
-static inline void crypto_yield(struct crypto_tfm *tfm)
-{
- if (!in_softirq())
- cond_resched();
-}
-
-static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
-{
- return (void *)&tfm[1];
-}
-
-struct crypto_alg *crypto_alg_lookup(const char *name);
-
-#ifdef CONFIG_KMOD
-void crypto_alg_autoload(const char *name);
-struct crypto_alg *crypto_alg_mod_lookup(const char *name);
-#else
-static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name)
-{
- return crypto_alg_lookup(name);
-}
-#endif
-
-#ifdef CONFIG_CRYPTO_HMAC
-int crypto_alloc_hmac_block(struct crypto_tfm *tfm);
-void crypto_free_hmac_block(struct crypto_tfm *tfm);
-#else
-static inline int crypto_alloc_hmac_block(struct crypto_tfm *tfm)
-{
- return 0;
-}
-
-static inline void crypto_free_hmac_block(struct crypto_tfm *tfm)
-{ }
-#endif
-
-#ifdef CONFIG_PROC_FS
-void __init crypto_init_proc(void);
-#else
-static inline void crypto_init_proc(void)
-{ }
-#endif
-
-int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags);
-int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags);
-int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags);
-
-int crypto_init_digest_ops(struct crypto_tfm *tfm);
-int crypto_init_cipher_ops(struct crypto_tfm *tfm);
-int crypto_init_compress_ops(struct crypto_tfm *tfm);
-
-void crypto_exit_digest_ops(struct crypto_tfm *tfm);
-void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
-void crypto_exit_compress_ops(struct crypto_tfm *tfm);
-
-#endif /* _CRYPTO_INTERNAL_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/michael_mic.c b/drivers/staging/rtl8192u/ieee80211/michael_mic.c
deleted file mode 100644
index df256e487c20..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/michael_mic.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Cryptographic API
- *
- * Michael MIC (IEEE 802.11i/TKIP) keyed digest
- *
- * Copyright (c) 2004 Jouni Malinen <jkmaline@cc.hut.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/string.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-
-
-struct michael_mic_ctx {
- u8 pending[4];
- size_t pending_len;
-
- u32 l, r;
-};
-
-
-static inline u32 rotl(u32 val, int bits)
-{
- return (val << bits) | (val >> (32 - bits));
-}
-
-
-static inline u32 rotr(u32 val, int bits)
-{
- return (val >> bits) | (val << (32 - bits));
-}
-
-
-static inline u32 xswap(u32 val)
-{
- return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
-}
-
-
-#define michael_block(l, r) \
-do { \
- r ^= rotl(l, 17); \
- l += r; \
- r ^= xswap(l); \
- l += r; \
- r ^= rotl(l, 3); \
- l += r; \
- r ^= rotr(l, 2); \
- l += r; \
-} while (0)
-
-
-static inline u32 get_le32(const u8 *p)
-{
- return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
-}
-
-
-static inline void put_le32(u8 *p, u32 v)
-{
- p[0] = v;
- p[1] = v >> 8;
- p[2] = v >> 16;
- p[3] = v >> 24;
-}
-
-
-static void michael_init(void *ctx)
-{
- struct michael_mic_ctx *mctx = ctx;
- mctx->pending_len = 0;
-}
-
-
-static void michael_update(void *ctx, const u8 *data, unsigned int len)
-{
- struct michael_mic_ctx *mctx = ctx;
-
- if (mctx->pending_len) {
- int flen = 4 - mctx->pending_len;
- if (flen > len)
- flen = len;
- memcpy(&mctx->pending[mctx->pending_len], data, flen);
- mctx->pending_len += flen;
- data += flen;
- len -= flen;
-
- if (mctx->pending_len < 4)
- return;
-
- mctx->l ^= get_le32(mctx->pending);
- michael_block(mctx->l, mctx->r);
- mctx->pending_len = 0;
- }
-
- while (len >= 4) {
- mctx->l ^= get_le32(data);
- michael_block(mctx->l, mctx->r);
- data += 4;
- len -= 4;
- }
-
- if (len > 0) {
- mctx->pending_len = len;
- memcpy(mctx->pending, data, len);
- }
-}
-
-
-static void michael_final(void *ctx, u8 *out)
-{
- struct michael_mic_ctx *mctx = ctx;
- u8 *data = mctx->pending;
-
- /* Last block and padding (0x5a, 4..7 x 0) */
- switch (mctx->pending_len) {
- case 0:
- mctx->l ^= 0x5a;
- break;
- case 1:
- mctx->l ^= data[0] | 0x5a00;
- break;
- case 2:
- mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000;
- break;
- case 3:
- mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) |
- 0x5a000000;
- break;
- }
- michael_block(mctx->l, mctx->r);
- /* l ^= 0; */
- michael_block(mctx->l, mctx->r);
-
- put_le32(out, mctx->l);
- put_le32(out + 4, mctx->r);
-}
-
-
-static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen,
- u32 *flags)
-{
- struct michael_mic_ctx *mctx = ctx;
- if (keylen != 8) {
- if (flags)
- *flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
- mctx->l = get_le32(key);
- mctx->r = get_le32(key + 4);
- return 0;
-}
-
-
-static struct crypto_alg michael_mic_alg = {
- .cra_name = "michael_mic",
- .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
- .cra_blocksize = 8,
- .cra_ctxsize = sizeof(struct michael_mic_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list),
- .cra_u = { .digest = {
- .dia_digestsize = 8,
- .dia_init = michael_init,
- .dia_update = michael_update,
- .dia_final = michael_final,
- .dia_setkey = michael_setkey } }
-};
-
-
-static int __init michael_mic_init(void)
-{
- return crypto_register_alg(&michael_mic_alg);
-}
-
-
-static void __exit michael_mic_exit(void)
-{
- crypto_unregister_alg(&michael_mic_alg);
-}
-
-
-module_init(michael_mic_init);
-module_exit(michael_mic_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Michael MIC");
-MODULE_AUTHOR("Jouni Malinen <jkmaline@cc.hut.fi>");
diff --git a/drivers/staging/rtl8192u/ieee80211/proc.c b/drivers/staging/rtl8192u/ieee80211/proc.c
deleted file mode 100644
index c426dfdd9fdd..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/proc.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Scatterlist Cryptographic API.
- *
- * Procfs information.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include <linux/init.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/rwsem.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include "internal.h"
-
-extern struct list_head crypto_alg_list;
-extern struct rw_semaphore crypto_alg_sem;
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- struct list_head *v;
- loff_t n = *pos;
-
- down_read(&crypto_alg_sem);
- list_for_each(v, &crypto_alg_list)
- if (!n--)
- return list_entry(v, struct crypto_alg, cra_list);
- return NULL;
-}
-
-static void *c_next(struct seq_file *m, void *p, loff_t *pos)
-{
- struct list_head *v = p;
-
- (*pos)++;
- v = v->next;
- return (v == &crypto_alg_list) ?
- NULL : list_entry(v, struct crypto_alg, cra_list);
-}
-
-static void c_stop(struct seq_file *m, void *p)
-{
- up_read(&crypto_alg_sem);
-}
-
-static int c_show(struct seq_file *m, void *p)
-{
- struct crypto_alg *alg = (struct crypto_alg *)p;
-
- seq_printf(m, "name : %s\n", alg->cra_name);
- seq_printf(m, "module : %s\n",
- (alg->cra_module ?
- alg->cra_module->name :
- "kernel"));
-
- switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_CIPHER:
- seq_printf(m, "type : cipher\n");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "min keysize : %u\n",
- alg->cra_cipher.cia_min_keysize);
- seq_printf(m, "max keysize : %u\n",
- alg->cra_cipher.cia_max_keysize);
- break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- seq_printf(m, "type : digest\n");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n",
- alg->cra_digest.dia_digestsize);
- break;
- case CRYPTO_ALG_TYPE_COMPRESS:
- seq_printf(m, "type : compression\n");
- break;
- default:
- seq_printf(m, "type : unknown\n");
- break;
- }
-
- seq_putc(m, '\n');
- return 0;
-}
-
-static struct seq_operations crypto_seq_ops = {
- .start = c_start,
- .next = c_next,
- .stop = c_stop,
- .show = c_show
-};
-
-static int crypto_info_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &crypto_seq_ops);
-}
-
-static const struct file_operations proc_crypto_ops = {
- .open = crypto_info_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
-
-void __init crypto_init_proc(void)
-{
- proc_create("crypto", 0, NULL, &proc_crypto_ops);
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 3684da340bd4..7bf55e393554 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -13,7 +13,7 @@
* u16 Time //indicate time delay.
* output: none
********************************************************************************************************************/
-void ActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA, u16 Time)
+static void ActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA, u16 Time)
{
pBA->bValid = true;
if(Time != 0)
@@ -25,7 +25,7 @@ void ActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA, u16 Time)
* input: PBA_RECORD pBA //BA entry to be disabled
* output: none
********************************************************************************************************************/
-void DeActivateBAEntry( struct ieee80211_device *ieee, PBA_RECORD pBA)
+static void DeActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA)
{
pBA->bValid = false;
del_timer_sync(&pBA->Timer);
@@ -37,7 +37,7 @@ void DeActivateBAEntry( struct ieee80211_device *ieee, PBA_RECORD pBA)
* output: none
* notice: As PTX_TS_RECORD structure will be defined in QOS, so wait to be merged. //FIXME
********************************************************************************************************************/
-u8 TxTsDeleteBA( struct ieee80211_device *ieee, PTX_TS_RECORD pTxTs)
+static u8 TxTsDeleteBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTs)
{
PBA_RECORD pAdmittedBa = &pTxTs->TxAdmittedBARecord; //These two BA entries must exist in TS structure
PBA_RECORD pPendingBa = &pTxTs->TxPendingBARecord;
@@ -67,7 +67,7 @@ u8 TxTsDeleteBA( struct ieee80211_device *ieee, PTX_TS_RECORD pTxTs)
* output: none
* notice: As PRX_TS_RECORD structure will be defined in QOS, so wait to be merged. //FIXME, same with above
********************************************************************************************************************/
-u8 RxTsDeleteBA( struct ieee80211_device *ieee, PRX_TS_RECORD pRxTs)
+static u8 RxTsDeleteBA(struct ieee80211_device *ieee, PRX_TS_RECORD pRxTs)
{
PBA_RECORD pBa = &pRxTs->RxAdmittedBARecord;
u8 bSendDELBA = false;
@@ -307,7 +307,9 @@ static void ieee80211_send_ADDBARsp(struct ieee80211_device *ieee, u8 *dst,
* notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
********************************************************************************************************************/
-void ieee80211_send_DELBA(struct ieee80211_device *ieee, u8 *dst, PBA_RECORD pBA, TR_SELECT TxRxSelect, u16 ReasonCode)
+static void ieee80211_send_DELBA(struct ieee80211_device *ieee, u8 *dst,
+ PBA_RECORD pBA, TR_SELECT TxRxSelect,
+ u16 ReasonCode)
{
struct sk_buff *skb = NULL;
skb = ieee80211_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode); //construct ACT_ADDBARSP frames
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index e956da5a2d76..53ec2d435ffe 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -219,7 +219,7 @@ void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString)
/*
* Return: true if station in half n mode and AP supports 40 bw
*/
-bool IsHTHalfNmode40Bandwidth(struct ieee80211_device *ieee)
+static bool IsHTHalfNmode40Bandwidth(struct ieee80211_device *ieee)
{
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -238,7 +238,7 @@ bool IsHTHalfNmode40Bandwidth(struct ieee80211_device *ieee)
return retValue;
}
-bool IsHTHalfNmodeSGI(struct ieee80211_device *ieee, bool is40MHz)
+static bool IsHTHalfNmodeSGI(struct ieee80211_device *ieee, bool is40MHz)
{
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -376,7 +376,7 @@ bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee)
* return:
* notice:
* *****************************************************************************************************************/
-void HTIOTPeerDetermine(struct ieee80211_device *ieee)
+static void HTIOTPeerDetermine(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
struct ieee80211_network *net = &ieee->current_network;
@@ -413,7 +413,7 @@ void HTIOTPeerDetermine(struct ieee80211_device *ieee)
* output: none
* return: return 1 if driver should declare MCS13 only(otherwise return 0)
* *****************************************************************************************************************/
-u8 HTIOTActIsDisableMCS14(struct ieee80211_device *ieee, u8 *PeerMacAddr)
+static u8 HTIOTActIsDisableMCS14(struct ieee80211_device *ieee, u8 *PeerMacAddr)
{
u8 ret = 0;
return ret;
@@ -432,7 +432,7 @@ u8 HTIOTActIsDisableMCS14(struct ieee80211_device *ieee, u8 *PeerMacAddr)
* Return: true if driver should disable MCS15
* 2008.04.15 Emily
*/
-bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)
+static bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)
{
bool retValue = false;
@@ -469,7 +469,8 @@ bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)
* Return: true if driver should disable all two spatial stream packet
* 2008.04.21 Emily
*/
-bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device *ieee, u8 *PeerMacAddr)
+static bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device *ieee,
+ u8 *PeerMacAddr)
{
bool retValue = false;
@@ -486,7 +487,8 @@ bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device *ieee, u8 *Pee
* output: none
* return: return 1 if driver should disable EDCA turbo mode(otherwise return 0)
* *****************************************************************************************************************/
-u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device *ieee, u8 *PeerMacAddr)
+static u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device *ieee,
+ u8 *PeerMacAddr)
{
u8 retValue = false; // default enable EDCA Turbo mode.
// Set specific EDCA parameter for different AP in DM handler.
@@ -500,7 +502,7 @@ u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device *ieee, u8 *PeerMacAddr)
* output: none
* return: return 1 if true
* *****************************************************************************************************************/
-u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
+static u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
{
u8 retValue = 0;
@@ -515,7 +517,7 @@ u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
return retValue;
}
-u8 HTIOTActIsCCDFsync(u8 *PeerMacAddr)
+static u8 HTIOTActIsCCDFsync(u8 *PeerMacAddr)
{
u8 retValue = 0;
if( (memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0) ||
@@ -792,7 +794,7 @@ void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg,
* return: always we return true
* notice:
* *****************************************************************************************************************/
-u8 HT_PickMCSRate(struct ieee80211_device *ieee, u8 *pOperateMCS)
+static u8 HT_PickMCSRate(struct ieee80211_device *ieee, u8 *pOperateMCS)
{
u8 i;
if (pOperateMCS == NULL)
@@ -907,7 +909,8 @@ u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSF
**
** \pHTSupportedCap: the connected STA's supported rate Capability element
*/
-u8 HTFilterMCSRate( struct ieee80211_device *ieee, u8 *pSupportMCS, u8 *pOperateMCS)
+static u8 HTFilterMCSRate(struct ieee80211_device *ieee, u8 *pSupportMCS,
+ u8 *pOperateMCS)
{
u8 i=0;
@@ -1317,51 +1320,6 @@ void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_
}
}
-void HTUseDefaultSetting(struct ieee80211_device *ieee)
-{
- PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
-// u8 regBwOpMode;
-
- if(pHTInfo->bEnableHT)
- {
- pHTInfo->bCurrentHTSupport = true;
-
- pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK;
-
- pHTInfo->bCurBW40MHz = pHTInfo->bRegBW40MHz;
-
- pHTInfo->bCurShortGI20MHz= pHTInfo->bRegShortGI20MHz;
-
- pHTInfo->bCurShortGI40MHz= pHTInfo->bRegShortGI40MHz;
-
- pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
-
- pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
-
- pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
-
- pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
-
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
-
- // Set BWOpMode register
-
- //update RATR index0
- HTFilterMCSRate(ieee, ieee->Regdot11HTOperationalRateSet, ieee->dot11HTOperationalRateSet);
- //function below is not implemented at all. WB
-#ifdef TODO
- Adapter->HalFunc.InitHalRATRTableHandler( Adapter, &pMgntInfo->dot11OperationalRateSet, pMgntInfo->dot11HTOperationalRateSet);
-#endif
- ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, MCS_FILTER_ALL);
- ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
-
- }
- else
- {
- pHTInfo->bCurrentHTSupport = false;
- }
- return;
-}
/********************************************************************************************************************
*function: check whether HT control field exists
* input: struct ieee80211_device *ieee
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 3058120a3243..426f223e6a21 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -3,13 +3,13 @@
#include <linux/slab.h>
#include "rtl819x_TS.h"
-void TsSetupTimeOut(unsigned long data)
+static void TsSetupTimeOut(unsigned long data)
{
// Not implement yet
// This is used for WMMSA and ACM , that would send ADDTSReq frame.
}
-void TsInactTimeout(unsigned long data)
+static void TsInactTimeout(unsigned long data)
{
// Not implement yet
// This is used for WMMSA and ACM.
@@ -22,7 +22,7 @@ void TsInactTimeout(unsigned long data)
* return: NULL
* notice:
********************************************************************************************************************/
-void RxPktPendingTimeout(unsigned long data)
+static void RxPktPendingTimeout(unsigned long data)
{
PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data;
struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
@@ -99,7 +99,7 @@ void RxPktPendingTimeout(unsigned long data)
* return: NULL
* notice:
********************************************************************************************************************/
-void TsAddBaProcess(unsigned long data)
+static void TsAddBaProcess(unsigned long data)
{
PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data;
u8 num = pTxTs->num;
@@ -110,7 +110,7 @@ void TsAddBaProcess(unsigned long data)
}
-void ResetTsCommonInfo(PTS_COMMON_INFO pTsCommonInfo)
+static void ResetTsCommonInfo(PTS_COMMON_INFO pTsCommonInfo)
{
memset(pTsCommonInfo->Addr, 0, 6);
memset(&pTsCommonInfo->TSpec, 0, sizeof(TSPEC_BODY));
@@ -119,7 +119,7 @@ void ResetTsCommonInfo(PTS_COMMON_INFO pTsCommonInfo)
pTsCommonInfo->TClasNum = 0;
}
-void ResetTxTsEntry(PTX_TS_RECORD pTS)
+static void ResetTxTsEntry(PTX_TS_RECORD pTS)
{
ResetTsCommonInfo(&pTS->TsCommonInfo);
pTS->TxCurSeq = 0;
@@ -130,7 +130,7 @@ void ResetTxTsEntry(PTX_TS_RECORD pTS)
ResetBaEntry(&pTS->TxPendingBARecord);
}
-void ResetRxTsEntry(PRX_TS_RECORD pTS)
+static void ResetRxTsEntry(PRX_TS_RECORD pTS)
{
ResetTsCommonInfo(&pTS->TsCommonInfo);
pTS->RxIndicateSeq = 0xffff; // This indicate the RxIndicateSeq is not used now!!
@@ -224,7 +224,8 @@ void TSInitialize(struct ieee80211_device *ieee)
}
-void AdmitTS(struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, u32 InactTime)
+static void AdmitTS(struct ieee80211_device *ieee,
+ PTS_COMMON_INFO pTsCommonInfo, u32 InactTime)
{
del_timer_sync(&pTsCommonInfo->SetupTimer);
del_timer_sync(&pTsCommonInfo->InactTimer);
@@ -234,7 +235,9 @@ void AdmitTS(struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, u32 I
}
-PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8 *Addr, u8 TID, TR_SELECT TxRxSelect)
+static PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee,
+ u8 *Addr, u8 TID,
+ TR_SELECT TxRxSelect)
{
//DIRECTION_VALUE dir;
u8 dir;
@@ -309,14 +312,9 @@ PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8 *Addr, u8
return NULL;
}
-void MakeTSEntry(
- PTS_COMMON_INFO pTsCommonInfo,
- u8 *Addr,
- PTSPEC_BODY pTSPEC,
- PQOS_TCLAS pTCLAS,
- u8 TCLAS_Num,
- u8 TCLAS_Proc
- )
+static void MakeTSEntry(PTS_COMMON_INFO pTsCommonInfo, u8 *Addr,
+ PTSPEC_BODY pTSPEC, PQOS_TCLAS pTCLAS, u8 TCLAS_Num,
+ u8 TCLAS_Proc)
{
u8 count;
@@ -472,11 +470,8 @@ bool GetTs(
}
}
-void RemoveTsEntry(
- struct ieee80211_device *ieee,
- PTS_COMMON_INFO pTs,
- TR_SELECT TxRxSelect
- )
+static void RemoveTsEntry(struct ieee80211_device *ieee, PTS_COMMON_INFO pTs,
+ TR_SELECT TxRxSelect)
{
//u32 flags = 0;
unsigned long flags = 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl_crypto.h b/drivers/staging/rtl8192u/ieee80211/rtl_crypto.h
deleted file mode 100644
index c3c87108ee9e..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/rtl_crypto.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Scatterlist Cryptographic API.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2002 David S. Miller (davem@redhat.com)
- *
- * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
- * and Nettle, by Niels Mé°ˆler.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#ifndef _LINUX_CRYPTO_H
-#define _LINUX_CRYPTO_H
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/string.h>
-#include <asm/page.h>
-#include <asm/errno.h>
-
-#define crypto_register_alg crypto_register_alg_rsl
-#define crypto_unregister_alg crypto_unregister_alg_rsl
-#define crypto_alloc_tfm crypto_alloc_tfm_rsl
-#define crypto_free_tfm crypto_free_tfm_rsl
-#define crypto_alg_available crypto_alg_available_rsl
-
-/*
- * Algorithm masks and types.
- */
-#define CRYPTO_ALG_TYPE_MASK 0x000000ff
-#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
-#define CRYPTO_ALG_TYPE_DIGEST 0x00000002
-#define CRYPTO_ALG_TYPE_COMPRESS 0x00000004
-
-/*
- * Transform masks and values (for crt_flags).
- */
-#define CRYPTO_TFM_MODE_MASK 0x000000ff
-#define CRYPTO_TFM_REQ_MASK 0x000fff00
-#define CRYPTO_TFM_RES_MASK 0xfff00000
-
-#define CRYPTO_TFM_MODE_ECB 0x00000001
-#define CRYPTO_TFM_MODE_CBC 0x00000002
-#define CRYPTO_TFM_MODE_CFB 0x00000004
-#define CRYPTO_TFM_MODE_CTR 0x00000008
-
-#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
-#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
-#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
-#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
-#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
-#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
-
-/*
- * Miscellaneous stuff.
- */
-#define CRYPTO_UNSPEC 0
-#define CRYPTO_MAX_ALG_NAME 64
-
-struct scatterlist;
-
-/*
- * Algorithms: modular crypto algorithm implementations, managed
- * via crypto_register_alg() and crypto_unregister_alg().
- */
-struct cipher_alg {
- unsigned int cia_min_keysize;
- unsigned int cia_max_keysize;
- int (*cia_setkey)(void *ctx, const u8 *key,
- unsigned int keylen, u32 *flags);
- void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src);
- void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src);
-};
-
-struct digest_alg {
- unsigned int dia_digestsize;
- void (*dia_init)(void *ctx);
- void (*dia_update)(void *ctx, const u8 *data, unsigned int len);
- void (*dia_final)(void *ctx, u8 *out);
- int (*dia_setkey)(void *ctx, const u8 *key,
- unsigned int keylen, u32 *flags);
-};
-
-struct compress_alg {
- int (*coa_init)(void *ctx);
- void (*coa_exit)(void *ctx);
- int (*coa_compress)(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*coa_decompress)(void *ctx, const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
-
-#define cra_cipher cra_u.cipher
-#define cra_digest cra_u.digest
-#define cra_compress cra_u.compress
-
-struct crypto_alg {
- struct list_head cra_list;
- u32 cra_flags;
- unsigned int cra_blocksize;
- unsigned int cra_ctxsize;
- const char cra_name[CRYPTO_MAX_ALG_NAME];
-
- union {
- struct cipher_alg cipher;
- struct digest_alg digest;
- struct compress_alg compress;
- } cra_u;
-
- struct module *cra_module;
-};
-
-/*
- * Algorithm registration interface.
- */
-int crypto_register_alg(struct crypto_alg *alg);
-int crypto_unregister_alg(struct crypto_alg *alg);
-
-/*
- * Algorithm query interface.
- */
-int crypto_alg_available(const char *name, u32 flags);
-
-/*
- * Transforms: user-instantiated objects which encapsulate algorithms
- * and core processing logic. Managed via crypto_alloc_tfm() and
- * crypto_free_tfm(), as well as the various helpers below.
- */
-struct crypto_tfm;
-
-struct cipher_tfm {
- void *cit_iv;
- unsigned int cit_ivsize;
- u32 cit_mode;
- int (*cit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
- int (*cit_encrypt)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes);
- int (*cit_encrypt_iv)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv);
- int (*cit_decrypt)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes);
- int (*cit_decrypt_iv)(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv);
- void (*cit_xor_block)(u8 *dst, const u8 *src);
-};
-
-struct digest_tfm {
- void (*dit_init)(struct crypto_tfm *tfm);
- void (*dit_update)(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg);
- void (*dit_final)(struct crypto_tfm *tfm, u8 *out);
- void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg,
- unsigned int nsg, u8 *out);
- int (*dit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
-#ifdef CONFIG_CRYPTO_HMAC
- void *dit_hmac_block;
-#endif
-};
-
-struct compress_tfm {
- int (*cot_compress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*cot_decompress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
-
-#define crt_cipher crt_u.cipher
-#define crt_digest crt_u.digest
-#define crt_compress crt_u.compress
-
-struct crypto_tfm {
-
- u32 crt_flags;
-
- union {
- struct cipher_tfm cipher;
- struct digest_tfm digest;
- struct compress_tfm compress;
- } crt_u;
-
- struct crypto_alg *__crt_alg;
-};
-
-/*
- * Transform user interface.
- */
-
-/*
- * crypto_alloc_tfm() will first attempt to locate an already loaded algorithm.
- * If that fails and the kernel supports dynamically loadable modules, it
- * will then attempt to load a module of the same name or alias. A refcount
- * is grabbed on the algorithm which is then associated with the new transform.
- *
- * crypto_free_tfm() frees up the transform and any associated resources,
- * then drops the refcount on the associated algorithm.
- */
-struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
-void crypto_free_tfm(struct crypto_tfm *tfm);
-
-/*
- * Transform helpers which query the underlying algorithm.
- */
-static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_name;
-}
-
-static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
-
- if (alg->cra_module)
- return alg->cra_module->name;
- else
- return NULL;
-}
-
-static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
-}
-
-static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->__crt_alg->cra_cipher.cia_min_keysize;
-}
-
-static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->__crt_alg->cra_cipher.cia_max_keysize;
-}
-
-static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_ivsize;
-}
-
-static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_blocksize;
-}
-
-static inline unsigned int crypto_tfm_alg_digestsize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- return tfm->__crt_alg->cra_digest.dia_digestsize;
-}
-
-/*
- * API wrappers.
- */
-static inline void crypto_digest_init(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_init(tfm);
-}
-
-static inline void crypto_digest_update(struct crypto_tfm *tfm,
- struct scatterlist *sg,
- unsigned int nsg)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_update(tfm, sg, nsg);
-}
-
-static inline void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_final(tfm, out);
-}
-
-static inline void crypto_digest_digest(struct crypto_tfm *tfm,
- struct scatterlist *sg,
- unsigned int nsg, u8 *out)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- tfm->crt_digest.dit_digest(tfm, sg, nsg, out);
-}
-
-static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- if (tfm->crt_digest.dit_setkey == NULL)
- return -ENOSYS;
- return tfm->crt_digest.dit_setkey(tfm, key, keylen);
-}
-
-static inline int crypto_cipher_setkey(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_setkey(tfm, key, keylen);
-}
-
-static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
-}
-
-static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
- return tfm->crt_cipher.cit_encrypt_iv(tfm, dst, src, nbytes, iv);
-}
-
-static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
-}
-
-static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
- return tfm->crt_cipher.cit_decrypt_iv(tfm, dst, src, nbytes, iv);
-}
-
-static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
- const u8 *src, unsigned int len)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- memcpy(tfm->crt_cipher.cit_iv, src, len);
-}
-
-static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm,
- u8 *dst, unsigned int len)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- memcpy(dst, tfm->crt_cipher.cit_iv, len);
-}
-
-static inline int crypto_comp_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
- return tfm->crt_compress.cot_compress(tfm, src, slen, dst, dlen);
-}
-
-static inline int crypto_comp_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
- return tfm->crt_compress.cot_decompress(tfm, src, slen, dst, dlen);
-}
-
-/*
- * HMAC support.
- */
-#ifdef CONFIG_CRYPTO_HMAC
-void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen);
-void crypto_hmac_update(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg);
-void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
- unsigned int *keylen, u8 *out);
-void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen,
- struct scatterlist *sg, unsigned int nsg, u8 *out);
-#endif /* CONFIG_CRYPTO_HMAC */
-
-#endif /* _LINUX_CRYPTO_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c b/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
deleted file mode 100644
index 8b73f6cefcf9..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Cipher operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * 2002 Adam J. Richter <adam@yggdrasil.com>
- * 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/highmem.h>
-#include <asm/scatterlist.h>
-#include "internal.h"
-#include "scatterwalk.h"
-
-void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
-{
- if (nbytes <= walk->len_this_page &&
- (((unsigned long)walk->data) & (PAGE_CACHE_SIZE - 1)) + nbytes <=
- PAGE_CACHE_SIZE)
- return walk->data;
- else
- return scratch;
-}
-
-static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
-{
- if (out)
- memcpy(sgdata, buf, nbytes);
- else
- memcpy(buf, sgdata, nbytes);
-}
-
-void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
-{
- unsigned int rest_of_page;
-
- walk->sg = sg;
-
- walk->page = sg->page;
- walk->len_this_segment = sg->length;
-
- rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1));
- walk->len_this_page = min(sg->length, rest_of_page);
- walk->offset = sg->offset;
-}
-
-void scatterwalk_map(struct scatter_walk *walk)
-{
- walk->data = kmap_atomic(walk->page) + walk->offset;
-}
-
-static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
- unsigned int more)
-{
- /* walk->data may be pointing the first byte of the next page;
- however, we know we transferred at least one byte. So,
- walk->data - 1 will be a virtual address in the mapped page. */
-
- if (out)
- flush_dcache_page(walk->page);
-
- if (more) {
- walk->len_this_segment -= walk->len_this_page;
-
- if (walk->len_this_segment) {
- walk->page++;
- walk->len_this_page = min(walk->len_this_segment,
- (unsigned)PAGE_CACHE_SIZE);
- walk->offset = 0;
- }
- else
- scatterwalk_start(walk, sg_next(walk->sg));
- }
-}
-
-void scatterwalk_done(struct scatter_walk *walk, int out, int more)
-{
- crypto_kunmap(walk->data, out);
- if (walk->len_this_page == 0 || !more)
- scatterwalk_pagedone(walk, out, more);
-}
-
-/*
- * Do not call this unless the total length of all of the fragments
- * has been verified as multiple of the block size.
- */
-int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
- size_t nbytes)
-{
- if (buf != walk->data) {
- while (nbytes > walk->len_this_page) {
- memcpy_dir(buf, walk->data, walk->len_this_page, out);
- buf += walk->len_this_page;
- nbytes -= walk->len_this_page;
-
- kunmap_atomic(walk->data);
- scatterwalk_pagedone(walk, out, 1);
- scatterwalk_map(walk);
- }
-
- memcpy_dir(buf, walk->data, nbytes, out);
- }
-
- walk->offset += nbytes;
- walk->len_this_page -= nbytes;
- walk->len_this_segment -= nbytes;
- return 0;
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/scatterwalk.h b/drivers/staging/rtl8192u/ieee80211/scatterwalk.h
deleted file mode 100644
index b16446519017..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/scatterwalk.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com>
- * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#ifndef _CRYPTO_SCATTERWALK_H
-#define _CRYPTO_SCATTERWALK_H
-#include <linux/mm.h>
-#include <asm/scatterlist.h>
-
-struct scatter_walk {
- struct scatterlist *sg;
- struct page *page;
- void *data;
- unsigned int len_this_page;
- unsigned int len_this_segment;
- unsigned int offset;
-};
-
-/* Define sg_next is an inline routine now in case we want to change
- scatterlist to a linked list later. */
-static inline struct scatterlist *sg_next(struct scatterlist *sg)
-{
- return sg + 1;
-}
-
-static inline int scatterwalk_samebuf(struct scatter_walk *walk_in,
- struct scatter_walk *walk_out,
- void *src_p, void *dst_p)
-{
- return walk_in->page == walk_out->page &&
- walk_in->offset == walk_out->offset &&
- walk_in->data == src_p && walk_out->data == dst_p;
-}
-
-void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch);
-void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
-int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out);
-void scatterwalk_map(struct scatter_walk *walk, int out);
-void scatterwalk_done(struct scatter_walk *walk, int out, int more);
-
-#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c
index c61729b727ef..cd06054fbedb 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.c
+++ b/drivers/staging/rtl8192u/r8180_93cx6.c
@@ -20,7 +20,7 @@
#include "r8180_93cx6.h"
-void eprom_cs(struct net_device *dev, short bit)
+static void eprom_cs(struct net_device *dev, short bit)
{
u8 cmdreg;
@@ -37,7 +37,7 @@ void eprom_cs(struct net_device *dev, short bit)
}
-void eprom_ck_cycle(struct net_device *dev)
+static void eprom_ck_cycle(struct net_device *dev)
{
u8 cmdreg;
@@ -53,7 +53,7 @@ void eprom_ck_cycle(struct net_device *dev)
}
-void eprom_w(struct net_device *dev,short bit)
+static void eprom_w(struct net_device *dev,short bit)
{
u8 cmdreg;
@@ -68,7 +68,7 @@ void eprom_w(struct net_device *dev,short bit)
}
-short eprom_r(struct net_device *dev)
+static short eprom_r(struct net_device *dev)
{
u8 bit;
@@ -82,7 +82,7 @@ short eprom_r(struct net_device *dev)
}
-void eprom_send_bits_string(struct net_device *dev, short b[], int len)
+static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
{
int i;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index c2bcbe230ed3..1bb6143cdf0f 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -238,7 +238,7 @@ static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv *priv)
-void CamResetAllEntry(struct net_device *dev)
+static void CamResetAllEntry(struct net_device *dev)
{
u32 ulcommand = 0;
//2004/02/11 In static WEP, OID_ADD_KEY or OID_ADD_WEP are set before STA associate to AP.
@@ -591,12 +591,6 @@ static void rtl8192_proc_module_init(void)
rtl8192_proc = proc_mkdir(RTL819xU_MODULE_NAME, init_net.proc_net);
}
-
-void rtl8192_proc_module_remove(void)
-{
- remove_proc_entry(RTL819xU_MODULE_NAME, init_net.proc_net);
-}
-
/*
* seq_file wrappers for procfile show routines.
*/
@@ -673,7 +667,7 @@ short check_nic_enough_desc(struct net_device *dev, int queue_index)
return (used < MAX_TX_URB);
}
-void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -689,24 +683,6 @@ void dump_eprom(struct net_device *dev)
RT_TRACE(COMP_EPROM, "EEPROM addr %x : %x", i, eprom_read(dev, i));
}
-
-/****************************************************************************
- ------------------------------HW STUFF---------------------------
-*****************************************************************************/
-
-
-void rtl8192_set_mode(struct net_device *dev, int mode)
-{
- u8 ecmd;
- read_nic_byte(dev, EPROM_CMD, &ecmd);
- ecmd = ecmd & ~EPROM_CMD_OPERATING_MODE_MASK;
- ecmd = ecmd | (mode<<EPROM_CMD_OPERATING_MODE_SHIFT);
- ecmd = ecmd & ~EPROM_CS_BIT;
- ecmd = ecmd & ~EPROM_CK_BIT;
- write_nic_byte(dev, EPROM_CMD, ecmd);
-}
-
-
void rtl8192_update_msr(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -903,12 +879,6 @@ void rtl8192_rtx_disable(struct net_device *dev)
return;
}
-
-int alloc_tx_beacon_desc_ring(struct net_device *dev, int count)
-{
- return 0;
-}
-
inline u16 ieeerate2rtlrate(int rate)
{
switch (rate) {
@@ -1011,13 +981,13 @@ static u32 rtl819xusb_rx_command_packet(struct net_device *dev,
}
-void rtl8192_data_hard_stop(struct net_device *dev)
+static void rtl8192_data_hard_stop(struct net_device *dev)
{
//FIXME !!
}
-void rtl8192_data_hard_resume(struct net_device *dev)
+static void rtl8192_data_hard_resume(struct net_device *dev)
{
// FIXME !!
}
@@ -1025,7 +995,7 @@ void rtl8192_data_hard_resume(struct net_device *dev)
/* this function TX data frames when the ieee80211 stack requires this.
* It checks also if we need to stop the ieee tx queue, eventually do it
*/
-void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rate)
+static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rate)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
int ret;
@@ -1053,7 +1023,7 @@ void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rat
* If the ring is full packet are dropped (for data frame the queue
* is stopped before this can happen).
*/
-int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
int ret;
@@ -1318,7 +1288,8 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
/* Don't send data frame during scanning.*/
if ((skb_queue_len(&priv->ieee80211->skb_waitQ[queue_index]) != 0) &&
(!(priv->ieee80211->queue_stop))) {
- if (NULL != (skb = skb_dequeue(&(priv->ieee80211->skb_waitQ[queue_index]))))
+ skb = skb_dequeue(&(priv->ieee80211->skb_waitQ[queue_index]));
+ if (skb)
priv->ieee80211->softmac_hard_start_xmit(skb, dev);
return; //modified by david to avoid further processing AMSDU
@@ -1358,25 +1329,7 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
}
-void rtl8192_beacon_stop(struct net_device *dev)
-{
- u8 msr, msrm, msr2;
- struct r8192_priv *priv = ieee80211_priv(dev);
-
- read_nic_byte(dev, MSR, &msr);
- msrm = msr & MSR_LINK_MASK;
- msr2 = msr & ~MSR_LINK_MASK;
-
- if (NIC_8192U == priv->card_8192)
- usb_kill_urb(priv->rx_urb[MAX_RX_URB]);
- if ((msrm == (MSR_LINK_ADHOC<<MSR_LINK_SHIFT) ||
- (msrm == (MSR_LINK_MASTER<<MSR_LINK_SHIFT)))) {
- write_nic_byte(dev, MSR, msr2 | MSR_LINK_NONE);
- write_nic_byte(dev, MSR, msr);
- }
-}
-
-void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
+static void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_network *net;
@@ -1423,7 +1376,7 @@ void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
-void rtl8192_update_cap(struct net_device *dev, u16 cap)
+static void rtl8192_update_cap(struct net_device *dev, u16 cap)
{
u32 tmp = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1445,7 +1398,7 @@ void rtl8192_update_cap(struct net_device *dev, u16 cap)
}
}
-void rtl8192_net_update(struct net_device *dev)
+static void rtl8192_net_update(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1560,11 +1513,6 @@ u16 N_DBPSOfRate(u16 DataRate)
return N_DBPS;
}
-void rtl819xU_cmd_isr(struct urb *tx_cmd_urb, struct pt_regs *regs)
-{
- usb_free_urb(tx_cmd_urb);
-}
-
unsigned int txqueue2outpipe(struct r8192_priv *priv, unsigned int tx_queue)
{
if (tx_queue >= 9) {
@@ -1673,7 +1621,7 @@ static u8 MapHwQueueToFirmwareQueue(u8 QueueID)
return QueueSelect;
}
-u8 MRateToHwRate8190Pci(u8 rate)
+static u8 MRateToHwRate8190Pci(u8 rate)
{
u8 ret = DESC90_RATE1M;
@@ -2039,7 +1987,7 @@ void rtl8192_usb_deleteendpoints(struct net_device *dev)
#endif
extern void rtl8192_update_ratr_table(struct net_device *dev);
-void rtl8192_link_change(struct net_device *dev)
+static void rtl8192_link_change(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
@@ -2071,7 +2019,7 @@ static struct ieee80211_qos_parameters def_qos_parameters = {
};
-void rtl8192_update_beacon(struct work_struct *work)
+static void rtl8192_update_beacon(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, update_beacon_wq.work);
struct net_device *dev = priv->ieee80211->dev;
@@ -2086,8 +2034,8 @@ void rtl8192_update_beacon(struct work_struct *work)
/*
* background support to run QoS activate functionality
*/
-int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI, EDCAPARA_VO};
-void rtl8192_qos_activate(struct work_struct *work)
+static int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI, EDCAPARA_VO};
+static void rtl8192_qos_activate(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, qos_activate);
struct net_device *dev = priv->ieee80211->dev;
@@ -2315,7 +2263,7 @@ static bool GetNmodeSupportBySecCfg8192(struct net_device *dev)
return true;
}
-bool GetHalfNmodeSupportByAPs819xUsb(struct net_device *dev)
+static bool GetHalfNmodeSupportByAPs819xUsb(struct net_device *dev)
{
bool Reval;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2329,7 +2277,7 @@ bool GetHalfNmodeSupportByAPs819xUsb(struct net_device *dev)
return Reval;
}
-void rtl8192_refresh_supportrate(struct r8192_priv *priv)
+static void rtl8192_refresh_supportrate(struct r8192_priv *priv)
{
struct ieee80211_device *ieee = priv->ieee80211;
//we do not consider set support rate for ABG mode, only HT MCS rate is set here.
@@ -2340,7 +2288,7 @@ void rtl8192_refresh_supportrate(struct r8192_priv *priv)
return;
}
-u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
+static u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 ret = 0;
@@ -2359,7 +2307,7 @@ u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
}
return ret;
}
-void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
+static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 bSupportMode = rtl8192_getSupportedWireleeMode(dev);
@@ -2779,7 +2727,7 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
return;
}
-short rtl8192_get_channel_map(struct net_device *dev)
+static short rtl8192_get_channel_map(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
if (priv->ChannelPlan > COUNTRY_CODE_GLOBAL_DOMAIN) {
@@ -2792,7 +2740,7 @@ short rtl8192_get_channel_map(struct net_device *dev)
return 0;
}
-short rtl8192_init(struct net_device *dev)
+static short rtl8192_init(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2840,7 +2788,7 @@ short rtl8192_init(struct net_device *dev)
* return: none
* notice: This part need to modified according to the rate set we filtered
* ****************************************************************************/
-void rtl8192_hwconfig(struct net_device *dev)
+static void rtl8192_hwconfig(struct net_device *dev)
{
u32 regRATR = 0, regRRSR = 0;
u8 regBwOpMode = 0, regTmp = 0;
@@ -2923,7 +2871,7 @@ void rtl8192_hwconfig(struct net_device *dev)
//InitializeAdapter and PhyCfg
-bool rtl8192_adapter_start(struct net_device *dev)
+static bool rtl8192_adapter_start(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 dwRegRead = 0;
@@ -3179,7 +3127,7 @@ static bool HalTxCheckStuck819xUsb(struct net_device *dev)
* <Assumption: RT_TX_SPINLOCK is acquired.>
* First added: 2006.11.19 by emily
*/
-RESET_TYPE TxCheckStuck(struct net_device *dev)
+static RESET_TYPE TxCheckStuck(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 QueueID;
@@ -3282,7 +3230,7 @@ static RESET_TYPE RxCheckStuck(struct net_device *dev)
*
* 8185 and 8185b does not implement this function. This is added by Emily at 2006.11.24
*/
-RESET_TYPE rtl819x_ifcheck_resetornot(struct net_device *dev)
+static RESET_TYPE rtl819x_ifcheck_resetornot(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
RESET_TYPE TxResetType = RESET_TYPE_NORESET;
@@ -3321,7 +3269,7 @@ int rtl8192_close(struct net_device *dev);
-void CamRestoreAllEntry(struct net_device *dev)
+static void CamRestoreAllEntry(struct net_device *dev)
{
u8 EntryId = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3397,7 +3345,7 @@ void CamRestoreAllEntry(struct net_device *dev)
// The method checking Tx/Rx stuck of this function is supported by FW,
// which reports Tx and Rx counter to register 0x128 and 0x130.
//////////////////////////////////////////////////////////////
-void rtl819x_ifsilentreset(struct net_device *dev)
+static void rtl819x_ifsilentreset(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 reset_times = 0;
@@ -3517,7 +3465,7 @@ void CAM_read_entry(struct net_device *dev, u32 iIndex)
printk("\n");
}
-void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
+static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
u32 *TotalRxDataNum)
{
u16 SlotIndex;
@@ -3536,7 +3484,7 @@ void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
}
-extern void rtl819x_watchdog_wqcallback(struct work_struct *work)
+void rtl819x_watchdog_wqcallback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
struct r8192_priv *priv = container_of(dwork, struct r8192_priv, watch_dog_wq);
@@ -3634,7 +3582,7 @@ int _rtl8192_up(struct net_device *dev)
}
-int rtl8192_open(struct net_device *dev)
+static int rtl8192_open(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
@@ -3754,7 +3702,7 @@ static void r8192_set_multicast(struct net_device *dev)
}
-int r8192_set_mac_adr(struct net_device *dev, void *mac)
+static int r8192_set_mac_adr(struct net_device *dev, void *mac)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct sockaddr *addr = mac;
@@ -3770,7 +3718,7 @@ int r8192_set_mac_adr(struct net_device *dev, void *mac)
}
/* based on ipw2200 driver */
-int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct iwreq *wrq = (struct iwreq *)rq;
@@ -3861,7 +3809,7 @@ out:
return ret;
}
-u8 HwRateToMRate90(bool bIsHT, u8 rate)
+static u8 HwRateToMRate90(bool bIsHT, u8 rate)
{
u8 ret_rate = 0xff;
@@ -3947,7 +3895,7 @@ static void UpdateRxPktTimeStamp8190(struct net_device *dev,
//by amy 080606
-long rtl819x_translate_todbm(u8 signal_strength_index)// 0-100 index.
+static long rtl819x_translate_todbm(u8 signal_strength_index)// 0-100 index.
{
long signal_power; // in dBm.
@@ -3963,7 +3911,9 @@ long rtl819x_translate_todbm(u8 signal_strength_index)// 0-100 index.
be a local static. Otherwise, it may increase when we return from S3/S4. The
value will be kept in memory or disk. Declare the value in the adaptor
and it will be reinitialized when returned from S3/S4. */
-void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer, struct ieee80211_rx_stats *pprevious_stats, struct ieee80211_rx_stats *pcurrent_stats)
+static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
+ struct ieee80211_rx_stats *pprevious_stats,
+ struct ieee80211_rx_stats *pcurrent_stats)
{
bool bcheck = false;
u8 rfpath;
@@ -4449,8 +4399,8 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
}
} /* QueryRxPhyStatus8190Pci */
-void rtl8192_record_rxdesc_forlateruse(struct ieee80211_rx_stats *psrc_stats,
- struct ieee80211_rx_stats *ptarget_stats)
+static void rtl8192_record_rxdesc_forlateruse(struct ieee80211_rx_stats *psrc_stats,
+ struct ieee80211_rx_stats *ptarget_stats)
{
ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
@@ -4721,7 +4671,7 @@ u32 GetRxPacketShiftBytes819xUsb(struct ieee80211_rx_stats *Status, bool bIsRxA
+ Status->RxBufShift);
}
-void rtl8192_rx_nomal(struct sk_buff *skb)
+static void rtl8192_rx_nomal(struct sk_buff *skb)
{
rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev = info->dev;
@@ -4871,8 +4821,8 @@ void rtl8192_rx_nomal(struct sk_buff *skb)
}
-void rtl819xusb_process_received_packet(struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
+static void rtl819xusb_process_received_packet(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats)
{
u8 *frame;
u16 frame_len = 0;
@@ -4932,7 +4882,7 @@ static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
}
-void rtl8192_rx_cmd(struct sk_buff *skb)
+static void rtl8192_rx_cmd(struct sk_buff *skb)
{
struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev = info->dev;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 41fb67b7337d..d97ad7b909bc 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -50,34 +50,7 @@ DRxPathSel DM_RxPathSelTable;
/*--------------------Define export function prototype-----------------------*/
-extern void init_hal_dm(struct net_device *dev);
-extern void deinit_hal_dm(struct net_device *dev);
-
-extern void hal_dm_watchdog(struct net_device *dev);
-
-
-extern void init_rate_adaptive(struct net_device *dev);
-extern void dm_txpower_trackingcallback(struct work_struct *work);
-
-extern void dm_cck_txpower_adjust(struct net_device *dev,bool binch14);
-extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
- u32 dm_type,
- u32 dm_value);
-extern void DM_ChangeFsyncSetting(struct net_device *dev,
- s32 DM_Type,
- s32 DM_Value);
-extern void dm_force_tx_fw_info(struct net_device *dev,
- u32 force_type,
- u32 force_value);
-extern void dm_init_edca_turbo(struct net_device *dev);
-extern void dm_rf_operation_test_callback(unsigned long data);
-extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-extern void dm_fsync_timer_callback(unsigned long data);
extern void dm_check_fsync(struct net_device *dev);
-extern void dm_shadow_init(struct net_device *dev);
-
/*--------------------Define export function prototype-----------------------*/
@@ -155,8 +128,7 @@ static void dm_ctstoself(struct net_device *dev);
// This function is only invoked at driver intialization once.
//
//
-extern void
-init_hal_dm(struct net_device *dev)
+void init_hal_dm(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -176,7 +148,7 @@ init_hal_dm(struct net_device *dev)
} // InitHalDm
-extern void deinit_hal_dm(struct net_device *dev)
+void deinit_hal_dm(struct net_device *dev)
{
dm_deInit_fsync(dev);
@@ -242,7 +214,7 @@ void dm_CheckRxAggregation(struct net_device *dev) {
-extern void hal_dm_watchdog(struct net_device *dev)
+void hal_dm_watchdog(struct net_device *dev)
{
//struct r8192_priv *priv = ieee80211_priv(dev);
@@ -275,7 +247,7 @@ extern void hal_dm_watchdog(struct net_device *dev)
* 01/16/2008 MHC RF_Type is assigned in ReadAdapterInfo(). We must call
* the function after making sure RF_Type.
*/
-extern void init_rate_adaptive(struct net_device *dev)
+void init_rate_adaptive(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -875,7 +847,7 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
priv->txpower_count = 0;
}
-extern void dm_txpower_trackingcallback(struct work_struct *work)
+void dm_txpower_trackingcallback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work,struct delayed_work,work);
struct r8192_priv *priv = container_of(dwork,struct r8192_priv,txpower_tracking_wq);
@@ -1606,10 +1578,7 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
-extern void dm_cck_txpower_adjust(
- struct net_device *dev,
- bool binch14
-)
+void dm_cck_txpower_adjust(struct net_device *dev, bool binch14)
{ // dm_CCKTxPowerAdjust
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1642,7 +1611,7 @@ static void dm_txpower_reset_recovery(
} // dm_TXPowerResetRecovery
-extern void dm_restore_dynamic_mechanism_state(struct net_device *dev)
+void dm_restore_dynamic_mechanism_state(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 reg_ratr = priv->rate_adaptive.last_ratr;
@@ -1718,7 +1687,7 @@ static void dm_bb_initialgain_restore(struct net_device *dev)
} // dm_BBInitialGainRestore
-extern void dm_backup_dynamic_mechanism_state(struct net_device *dev)
+void dm_backup_dynamic_mechanism_state(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1773,9 +1742,9 @@ static void dm_bb_initialgain_backup(struct net_device *dev)
* 05/29/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
- u32 dm_type,
- u32 dm_value)
+
+void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type,
+ u32 dm_value)
{
if (dm_type == DIG_TYPE_THRESH_HIGH)
{
@@ -1842,24 +1811,8 @@ extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
dm_digtable.rx_gain_range_max = (u8)dm_value;
}
} /* DM_ChangeDynamicInitGainThresh */
-extern void
-dm_change_fsync_setting(
- struct net_device *dev,
- s32 DM_Type,
- s32 DM_Value)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- if (DM_Type == 0) // monitor 0xc38 register
- {
- if(DM_Value > 1)
- DM_Value = 1;
- priv->framesyncMonitor = (u8)DM_Value;
- //DbgPrint("pHalData->framesyncMonitor = %d", pHalData->framesyncMonitor);
- }
-}
-
-extern void
+void
dm_change_rxpath_selection_setting(
struct net_device *dev,
s32 DM_Type,
@@ -2540,7 +2493,7 @@ static void dm_cs_ratio(
}
}
-extern void dm_init_edca_turbo(struct net_device *dev)
+void dm_init_edca_turbo(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2660,26 +2613,6 @@ dm_CheckEdcaTurbo_EXIT:
lastRxOkCnt = priv->stats.rxbytesunicast;
} // dm_CheckEdcaTurbo
-extern void DM_CTSToSelfSetting(struct net_device *dev,u32 DM_Type, u32 DM_Value)
-{
- struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
-
- if (DM_Type == 0) // CTS to self disable/enable
- {
- if(DM_Value > 1)
- DM_Value = 1;
- priv->ieee80211->bCTSToSelfEnable = (bool)DM_Value;
- //DbgPrint("pMgntInfo->bCTSToSelfEnable = %d\n", pMgntInfo->bCTSToSelfEnable);
- }
- else if(DM_Type == 1) //CTS to self Th
- {
- if(DM_Value >= 50)
- DM_Value = 50;
- priv->ieee80211->CTSToSelfTH = (u8)DM_Value;
- //DbgPrint("pMgntInfo->CTSToSelfTH = %d\n", pMgntInfo->CTSToSelfTH);
- }
-}
-
static void dm_init_ctstoself(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
@@ -2779,7 +2712,7 @@ static void dm_check_pbc_gpio(struct net_device *dev)
* 01/30/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
-extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
+void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work,struct delayed_work,work);
struct r8192_priv *priv = container_of(dwork,struct r8192_priv,rfpath_check_wq);
@@ -3139,7 +3072,7 @@ static void dm_deInit_fsync(struct net_device *dev)
del_timer_sync(&priv->fsync_timer);
}
-extern void dm_fsync_timer_callback(unsigned long data)
+void dm_fsync_timer_callback(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct r8192_priv *priv = ieee80211_priv((struct net_device *)data);
@@ -3478,7 +3411,7 @@ void dm_check_fsync(struct net_device *dev)
* 05/29/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-extern void dm_shadow_init(struct net_device *dev)
+void dm_shadow_init(struct net_device *dev)
{
u8 page;
u16 offset;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index ae550520311b..3008f91ad4cf 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -200,9 +200,9 @@ typedef struct tag_Tx_Config_Cmd_Format {
/*------------------------Export global variable----------------------------*/
-extern dig_t dm_digtable;
-extern u8 dm_shadow[16][256];
-extern DRxPathSel DM_RxPathSelTable;
+extern dig_t dm_digtable;
+extern u8 dm_shadow[16][256];
+extern DRxPathSel DM_RxPathSelTable;
/*------------------------Export global variable----------------------------*/
@@ -212,25 +212,23 @@ extern DRxPathSel DM_RxPathSelTable;
/*--------------------------Exported Function prototype---------------------*/
-extern void init_hal_dm(struct net_device *dev);
-extern void deinit_hal_dm(struct net_device *dev);
-
+extern void init_hal_dm(struct net_device *dev);
+extern void deinit_hal_dm(struct net_device *dev);
extern void hal_dm_watchdog(struct net_device *dev);
-
-extern void init_rate_adaptive(struct net_device *dev);
-extern void dm_txpower_trackingcallback(struct work_struct *work);
-extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
- u32 dm_type, u32 dm_value);
-extern void dm_force_tx_fw_info(struct net_device *dev,
- u32 force_type, u32 force_value);
-extern void dm_init_edca_turbo(struct net_device *dev);
-extern void dm_rf_operation_test_callback(unsigned long data);
-extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-extern void dm_fsync_timer_callback(unsigned long data);
-extern void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
-extern void dm_shadow_init(struct net_device *dev);
+extern void init_rate_adaptive(struct net_device *dev);
+extern void dm_txpower_trackingcallback(struct work_struct *work);
+extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
+extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
+extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
+ u32 dm_type, u32 dm_value);
+extern void dm_force_tx_fw_info(struct net_device *dev,
+ u32 force_type, u32 force_value);
+extern void dm_init_edca_turbo(struct net_device *dev);
+extern void dm_rf_operation_test_callback(unsigned long data);
+extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
+extern void dm_fsync_timer_callback(unsigned long data);
+extern void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
+extern void dm_shadow_init(struct net_device *dev);
extern void dm_initialize_txpower_tracking(struct net_device *dev);
/*--------------------------Exported Function prototype---------------------*/
diff --git a/drivers/staging/rtl8192u/r819xU_HTGen.h b/drivers/staging/rtl8192u/r819xU_HTGen.h
deleted file mode 100644
index 6a4678f7da5f..000000000000
--- a/drivers/staging/rtl8192u/r819xU_HTGen.h
+++ /dev/null
@@ -1,12 +0,0 @@
-//
-// IOT Action for different AP
-//
-typedef enum _HT_IOT_ACTION{
- HT_IOT_ACT_TX_USE_AMSDU_4K = 0x00000001,
- HT_IOT_ACT_TX_USE_AMSDU_8K = 0x00000002,
- HT_IOT_ACT_DECLARE_MCS13 = 0x00000004,
- HT_IOT_ACT_DISABLE_EDCA_TURBO = 0x00000008,
- HT_IOT_ACT_MGNT_USE_CCK_6M = 0x00000010,
- HT_IOT_ACT_CDD_FSYNC = 0x00000020,
- HT_IOT_ACT_PURE_N_MODE = 0x00000040,
-}HT_IOT_ACTION_E, *PHT_IOT_ACTION_E;
diff --git a/drivers/staging/rtl8192u/r819xU_HTType.h b/drivers/staging/rtl8192u/r819xU_HTType.h
deleted file mode 100644
index 2cbb8e6584f8..000000000000
--- a/drivers/staging/rtl8192u/r819xU_HTType.h
+++ /dev/null
@@ -1,379 +0,0 @@
-#ifndef _R819XU_HTTYPE_H_
-#define _R819XU_HTTYPE_H_
-
-
-/*----------------------------------------------------------------------
- * The HT Capability element is present in beacons, association request,
- * reassociation request and probe response frames
- *----------------------------------------------------------------------*/
-
-/* Operation mode value */
-#define HT_OPMODE_NO_PROTECT 0
-#define HT_OPMODE_OPTIONAL 1
-#define HT_OPMODE_40MHZ_PROTECT 2
-#define HT_OPMODE_MIXED 3
-
-/* MIMO Power Save Settings */
-#define MIMO_PS_STATIC 0
-#define MIMO_PS_DYNAMIC 1
-#define MIMO_PS_NOLIMIT 3
-
-
-/* There should be 128 bits to cover all of the MCS rates. However, since
- * 8190 does not support too much rates, one integer is quite enough. */
-
-#define sHTCLng 4
-
-
-#define HT_SUPPORTED_MCS_1SS_BITMAP 0x000000ff
-#define HT_SUPPORTED_MCS_2SS_BITMAP 0x0000ff00
-#define HT_SUPPORTED_MCS_1SS_2SS_BITMAP \
- (HT_MCS_1SS_BITMAP | HT_MCS_1SS_2SS_BITMAP)
-
-
-typedef enum _HT_MCS_RATE {
- HT_MCS0 = 0x00000001,
- HT_MCS1 = 0x00000002,
- HT_MCS2 = 0x00000004,
- HT_MCS3 = 0x00000008,
- HT_MCS4 = 0x00000010,
- HT_MCS5 = 0x00000020,
- HT_MCS6 = 0x00000040,
- HT_MCS7 = 0x00000080,
- HT_MCS8 = 0x00000100,
- HT_MCS9 = 0x00000200,
- HT_MCS10 = 0x00000400,
- HT_MCS11 = 0x00000800,
- HT_MCS12 = 0x00001000,
- HT_MCS13 = 0x00002000,
- HT_MCS14 = 0x00004000,
- HT_MCS15 = 0x00008000,
- /* Do not define MCS32 here although 8190 support MCS32 */
-} HT_MCS_RATE, *PHT_MCS_RATE;
-
-/* Represent Channel Width in HT Capabilities */
-typedef enum _HT_CHANNEL_WIDTH {
- HT_CHANNEL_WIDTH_20 = 0,
- HT_CHANNEL_WIDTH_20_40 = 1,
-} HT_CHANNEL_WIDTH, *PHT_CHANNEL_WIDTH;
-
-/* Represent Extension Channel Offset in HT Capabilities
- * This is available only in 40Mhz mode. */
-typedef enum _HT_EXTCHNL_OFFSET {
- HT_EXTCHNL_OFFSET_NO_EXT = 0,
- HT_EXTCHNL_OFFSET_UPPER = 1,
- HT_EXTCHNL_OFFSET_NO_DEF = 2,
- HT_EXTCHNL_OFFSET_LOWER = 3,
-} HT_EXTCHNL_OFFSET, *PHT_EXTCHNL_OFFSET;
-
-typedef enum _CHNLOP {
- CHNLOP_NONE = 0, /* No Action now */
- CHNLOP_SCAN = 1, /* Scan in progress */
- CHNLOP_SWBW = 2, /* Bandwidth switching in progress */
- CHNLOP_SWCHNL = 3, /* Software Channel switching in progress */
-} CHNLOP, *PCHNLOP;
-
-/* Determine if the Channel Operation is in progress */
-#define CHHLOP_IN_PROGRESS(_pHTInfo) \
- (((_pHTInfo)->ChnlOp > CHNLOP_NONE) ? TRUE : FALSE)
-
-
-typedef enum _HT_ACTION {
- ACT_RECOMMAND_WIDTH = 0,
- ACT_MIMO_PWR_SAVE = 1,
- ACT_PSMP = 2,
- ACT_SET_PCO_PHASE = 3,
- ACT_MIMO_CHL_MEASURE = 4,
- ACT_RECIPROCITY_CORRECT = 5,
- ACT_MIMO_CSI_MATRICS = 6,
- ACT_MIMO_NOCOMPR_STEER = 7,
- ACT_MIMO_COMPR_STEER = 8,
- ACT_ANTENNA_SELECT = 9,
-} HT_ACTION, *PHT_ACTION;
-
-
-/* Define sub-carrier mode for 40MHZ. */
-typedef enum _HT_Bandwidth_40MHZ_Sub_Carrier {
- SC_MODE_DUPLICATE = 0,
- SC_MODE_LOWER = 1,
- SC_MODE_UPPER = 2,
- SC_MODE_FULL40MHZ = 3,
-} HT_BW40_SC_E;
-
-typedef struct _HT_CAPABILITY_ELE {
-
- /* HT capability info */
- u8 AdvCoding:1;
- u8 ChlWidth:1;
- u8 MimoPwrSave:2;
- u8 GreenField:1;
- u8 ShortGI20Mhz:1;
- u8 ShortGI40Mhz:1;
- u8 TxSTBC:1;
- u8 RxSTBC:2;
- u8 DelayBA:1;
- u8 MaxAMSDUSize:1;
- u8 DssCCk:1;
- u8 PSMP:1;
- u8 Rsvd1:1;
- u8 LSigTxopProtect:1;
-
- /* MAC HT parameters info */
- u8 MaxRxAMPDUFactor:2;
- u8 MPDUDensity:3;
- u8 Rsvd2:3;
-
- /* Supported MCS set */
- u8 MCS[16];
-
-
- /* Extended HT Capability Info */
- u16 ExtHTCapInfo;
-
- /* TXBF Capabilities */
- u8 TxBFCap[4];
-
- /* Antenna Selection Capabilities */
- u8 ASCap;
-
-} __packed HT_CAPABILITY_ELE, *PHT_CAPABILITY_ELE;
-
-/*------------------------------------------------------------
- * The HT Information element is present in beacons
- * Only AP is required to include this element
- *------------------------------------------------------------*/
-
-typedef struct _HT_INFORMATION_ELE {
- u8 ControlChl;
-
- u8 ExtChlOffset:2;
- u8 RecommemdedTxWidth:1;
- u8 RIFS:1;
- u8 PSMPAccessOnly:1;
- u8 SrvIntGranularity:3;
-
- u8 OptMode:2;
- u8 NonGFDevPresent:1;
- u8 Revd1:5;
- u8 Revd2:8;
-
- u8 Rsvd3:6;
- u8 DualBeacon:1;
- u8 DualCTSProtect:1;
-
- u8 SecondaryBeacon:1;
- u8 LSigTxopProtectFull:1;
- u8 PcoActive:1;
- u8 PcoPhase:1;
- u8 Rsvd4:4;
-
- u8 BasicMSC[16];
-} __packed HT_INFORMATION_ELE, *PHT_INFORMATION_ELE;
-
-/* MIMO Power Save control field.
- * This is appear in MIMO Power Save Action Frame */
-typedef struct _MIMOPS_CTRL {
- u8 MimoPsEnable:1;
- u8 MimoPsMode:1;
- u8 Reserved:6;
-} MIMOPS_CTRL, *PMIMOPS_CTRL;
-
-typedef enum _HT_SPEC_VER {
- HT_SPEC_VER_IEEE = 0,
- HT_SPEC_VER_EWC = 1,
-} HT_SPEC_VER, *PHT_SPEC_VER;
-
-typedef enum _HT_AGGRE_MODE_E {
- HT_AGG_AUTO = 0,
- HT_AGG_FORCE_ENABLE = 1,
- HT_AGG_FORCE_DISABLE = 2,
-} HT_AGGRE_MODE_E, *PHT_AGGRE_MODE_E;
-
-/*----------------------------------------------------------------------------
- * The Data structure is used to keep HT related variables when card is
- * configured as non-AP STA mode.
- * **Note** Current_xxx should be set to default value in HTInitializeHTInfo()
- *----------------------------------------------------------------------------*/
-
-typedef struct _RT_HIGH_THROUGHPUT {
- u8 bEnableHT;
- u8 bCurrentHTSupport;
- /* Tx 40MHz channel capability */
- u8 bRegBW40MHz;
- u8 bCurBW40MHz;
- /* Tx Short GI for 40Mhz */
- u8 bRegShortGI40MHz;
- u8 bCurShortGI40MHz;
- /* Tx Short GI for 20MHz */
- u8 bRegShortGI20MHz;
- u8 bCurShortGI20MHz;
- /* Tx CCK rate capability */
- u8 bRegSuppCCK;
- u8 bCurSuppCCK;
-
- /* 802.11n spec version for "peer" */
- HT_SPEC_VER ePeerHTSpecVer;
-
-
- /* HT related information for "Self" */
- /* This is HT cap element sent to peer STA, which also indicate
- * HT Rx capabilities. */
- HT_CAPABILITY_ELE SelfHTCap;
- HT_INFORMATION_ELE SelfHTInfo;
-
- /* HT related information for "Peer" */
- u8 PeerHTCapBuf[32];
- u8 PeerHTInfoBuf[32];
-
-
- /* A-MSDU related */
- /* This indicates Tx A-MSDU capability */
- u8 bAMSDU_Support;
- u16 nAMSDU_MaxSize;
- u8 bCurrent_AMSDU_Support;
- u16 nCurrent_AMSDU_MaxSize;
-
-
- /* A-MPDU related */
- /* This indicate Tx A-MPDU capability */
- u8 bAMPDUEnable;
- u8 bCurrentAMPDUEnable;
- u8 AMPDU_Factor;
- u8 CurrentAMPDUFactor;
- u8 MPDU_Density;
- u8 CurrentMPDUDensity;
-
- /* Forced A-MPDU enable */
- HT_AGGRE_MODE_E ForcedAMPDUMode;
- u8 ForcedAMPDUFactor;
- u8 ForcedMPDUDensity;
-
- /* Forced A-MSDU enable */
- HT_AGGRE_MODE_E ForcedAMSDUMode;
- u16 ForcedAMSDUMaxSize;
-
- u8 bForcedShortGI;
-
- u8 CurrentOpMode;
-
- /* MIMO PS related */
- u8 SelfMimoPs;
- u8 PeerMimoPs;
-
- /* 40MHz Channel Offset settings. */
- HT_EXTCHNL_OFFSET CurSTAExtChnlOffset;
- u8 bCurTxBW40MHz; /* If we use 40 MHz to Tx */
- u8 PeerBandwidth;
-
- /* For Bandwidth Switching */
- u8 bSwBwInProgress;
- CHNLOP ChnlOp; /* sw switching channel in progress. */
- u8 SwBwStep;
- struct timer_list SwBwTimer;
-
- /* For Realtek proprietary A-MPDU factor for aggregation */
- u8 bRegRT2RTAggregation;
- u8 bCurrentRT2RTAggregation;
- u8 bCurrentRT2RTLongSlotTime;
- u8 szRT2RTAggBuffer[10];
-
- /* Rx Reorder control */
- u8 bRegRxReorderEnable;
- u8 bCurRxReorderEnable;
- u8 RxReorderWinSize;
- u8 RxReorderPendingTime;
- u16 RxReorderDropCounter;
-
-#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- u8 UsbTxAggrNum;
-#endif
-#ifdef USB_RX_AGGREGATION_SUPPORT
- u8 UsbRxFwAggrEn;
- u8 UsbRxFwAggrPageNum;
- u8 UsbRxFwAggrPacketNum;
- u8 UsbRxFwAggrTimeout;
-#endif
-
- /* Add for Broadcom(Linksys) IOT. */
- u8 bIsPeerBcm;
-
- /* For IOT issue. */
- u32 IOTAction;
-} RT_HIGH_THROUGHPUT, *PRT_HIGH_THROUGHPUT;
-
-
-/*----------------------------------------------------------------------
- * The Data structure is used to keep HT related variable for "each Sta"
- * when card is configured as "AP mode"
- *----------------------------------------------------------------------*/
-
-typedef struct _RT_HTINFO_STA_ENTRY {
- u8 bEnableHT;
-
- u8 bSupportCck;
-
- u16 AMSDU_MaxSize;
-
- u8 AMPDU_Factor;
- u8 MPDU_Density;
-
- u8 HTHighestOperaRate;
-
- u8 bBw40MHz;
-
- u8 MimoPs;
-
- u8 McsRateSet[16];
-
-
-} RT_HTINFO_STA_ENTRY, *PRT_HTINFO_STA_ENTRY;
-
-
-
-
-
-/*---------------------------------------------------------------------
- * The Data structure is used to keep HT related variable for "each AP"
- * when card is configured as "STA mode"
- *---------------------------------------------------------------------*/
-
-typedef struct _BSS_HT {
-
- u8 bdSupportHT;
-
- /* HT related elements */
- u8 bdHTCapBuf[32];
- u16 bdHTCapLen;
- u8 bdHTInfoBuf[32];
- u16 bdHTInfoLen;
-
- HT_SPEC_VER bdHTSpecVer;
-
- u8 bdRT2RTAggregation;
- u8 bdRT2RTLongSlotTime;
-} BSS_HT, *PBSS_HT;
-
-typedef struct _MIMO_RSSI {
- u32 EnableAntenna;
- u32 AntennaA;
- u32 AntennaB;
- u32 AntennaC;
- u32 AntennaD;
- u32 Average;
-} MIMO_RSSI, *PMIMO_RSSI;
-
-typedef struct _MIMO_EVM {
- u32 EVM1;
- u32 EVM2;
-} MIMO_EVM, *PMIMO_EVM;
-
-typedef struct _FALSE_ALARM_STATISTICS {
- u32 Cnt_Parity_Fail;
- u32 Cnt_Rate_Illegal;
- u32 Cnt_Crc8_fail;
- u32 Cnt_all;
-} FALSE_ALARM_STATISTICS, *PFALSE_ALARM_STATISTICS;
-
-
-
-#endif
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 7bdcbd39a3b2..723c8630e9e2 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -182,7 +182,7 @@ static void cmpk_handle_tx_feedback(struct net_device *dev, u8 *pmsg)
}
-void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
+static void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tx_rate;
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index d6a6de3a64f5..ecfb66538eb3 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -17,7 +17,8 @@
#include "r819xU_firmware_img.h"
#include "r819xU_firmware.h"
#include <linux/firmware.h>
-void firmware_init_param(struct net_device *dev)
+
+static void firmware_init_param(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
rt_firmware *pfirmware = priv->pFirmware;
@@ -29,7 +30,8 @@ void firmware_init_param(struct net_device *dev)
* segment the img and use the ptr and length to remember info on each segment
*
*/
-bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buffer_len)
+static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
+ u32 buffer_len)
{
struct r8192_priv *priv = ieee80211_priv(dev);
bool rt_status = true;
@@ -103,46 +105,6 @@ bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buff
}
-bool
-fwSendNullPacket(
- struct net_device *dev,
- u32 Length
-)
-{
- bool rtStatus = true;
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct sk_buff *skb;
- cb_desc *tcb_desc;
- unsigned char *ptr_buf;
- bool bLastInitPacket = false;
-
- //PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK);
-
- //Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
- skb = dev_alloc_skb(Length+ 4);
- memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
- tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->queue_index = TXCMD_QUEUE;
- tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
- tcb_desc->bLastIniPkt = bLastInitPacket;
- ptr_buf = skb_put(skb, Length);
- memset(ptr_buf,0,Length);
- tcb_desc->txbuf_size= (u16)Length;
-
- if (!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
- (priv->ieee80211->queue_stop) ) {
- RT_TRACE(COMP_FIRMWARE,"===================NULL packet==================================> tx full!\n");
- skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
- } else {
- priv->ieee80211->softmac_hard_start_xmit(skb,dev);
- }
-
- //PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
- return rtStatus;
-}
-
-
//-----------------------------------------------------------------------------
// Procedure: Check whether main code is download OK. If OK, turn on CPU
//
@@ -156,7 +118,7 @@ fwSendNullPacket(
// NDIS_STATUS_FAILURE - the following initialization process should be terminated
// NDIS_STATUS_SUCCESS - if firmware initialization process success
//-----------------------------------------------------------------------------
-bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev)
+static bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev)
{
bool rt_status = true;
int check_putcodeOK_time = 200000, check_bootOk_time = 200000;
@@ -205,7 +167,7 @@ CPUCheckMainCodeOKAndTurnOnCPU_Fail:
return rt_status;
}
-bool CPUcheck_firmware_ready(struct net_device *dev)
+static bool CPUcheck_firmware_ready(struct net_device *dev)
{
bool rt_status = true;
diff --git a/drivers/staging/rtl8192u/r819xU_firmware_img.c b/drivers/staging/rtl8192u/r819xU_firmware_img.c
index df0f9d1648ec..0785de72a5ea 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware_img.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware_img.c
@@ -1,5 +1,6 @@
/*Created on 2008/ 7/16, 5:31*/
#include <linux/types.h>
+#include "r819xU_firmware_img.h"
u32 Rtl8192UsbPHY_REGArray[] = {
0x0, };
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index 39cd426bc5e5..b9f35313c7ab 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -44,7 +44,7 @@ static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
* output: none
* return: u32 return the shift bit position of the mask
******************************************************************************/
-u32 rtl8192_CalculateBitShift(u32 bitmask)
+static u32 rtl8192_CalculateBitShift(u32 bitmask)
{
u32 i;
@@ -144,8 +144,8 @@ static void phy_FwRFSerialWrite(struct net_device *dev,
* Driver here need to implement (1) and (2)
* ---need more spec for this information.
******************************************************************************/
-u32 rtl8192_phy_RFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
- u32 offset)
+static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath, u32 offset)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 ret = 0;
@@ -229,8 +229,9 @@ u32 rtl8192_phy_RFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
* Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf)
* ---------------------------------------------------------------------------
*****************************************************************************/
-void rtl8192_phy_RFSerialWrite(struct net_device *dev,
- RF90_RADIO_PATH_E eRFPath, u32 offset, u32 data)
+static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath, u32 offset,
+ u32 data)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 DataAndAddr = 0, new_offset = 0;
@@ -571,7 +572,7 @@ void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType)
* notice: Initialization value here is constant and it should never
* be changed
*****************************************************************************/
-void rtl8192_InitBBRFRegDef(struct net_device *dev)
+static void rtl8192_InitBBRFRegDef(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -780,7 +781,7 @@ u8 rtl8192_phy_checkBBAndRF(struct net_device *dev, HW90_BLOCK_E CheckBlock,
* notice: Initialization value may change all the time, so please make
* sure it has been synced with the newest.
******************************************************************************/
-void rtl8192_BB_Config_ParaFile(struct net_device *dev)
+static void rtl8192_BB_Config_ParaFile(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 reg_u8 = 0, eCheckItem = 0, status = 0;
@@ -1070,7 +1071,7 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
* return: none
* notice:
******************************************************************************/
-void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
+static void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 powerlevel = priv->TxPowerLevelCCK[channel-1];
@@ -1239,9 +1240,9 @@ bool rtl8192_SetRFPowerState(struct net_device *dev,
* return: true if finished, false otherwise
* notice:
******************************************************************************/
-u8 rtl8192_phy_SetSwChnlCmdArray(SwChnlCmd *CmdTable, u32 CmdTableIdx,
- u32 CmdTableSz, SwChnlCmdID CmdID, u32 Para1,
- u32 Para2, u32 msDelay)
+static u8 rtl8192_phy_SetSwChnlCmdArray(SwChnlCmd *CmdTable, u32 CmdTableIdx,
+ u32 CmdTableSz, SwChnlCmdID CmdID,
+ u32 Para1, u32 Para2, u32 msDelay)
{
SwChnlCmd *pCmd;
@@ -1276,8 +1277,8 @@ u8 rtl8192_phy_SetSwChnlCmdArray(SwChnlCmd *CmdTable, u32 CmdTableIdx,
* return: true if finished, false otherwise
* notice: Wait for simpler function to replace it
*****************************************************************************/
-u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8 *stage,
- u8 *step, u32 *delay)
+static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
+ u8 *stage, u8 *step, u32 *delay)
{
struct r8192_priv *priv = ieee80211_priv(dev);
SwChnlCmd PreCommonCmd[MAX_PRECMD_CNT];
@@ -1433,7 +1434,7 @@ u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8 *stage,
* return: none
* notice: We should not call this function directly
*****************************************************************************/
-void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel)
+static void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 delay = 0;
diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig
index 6a43312380e0..f160eee52f09 100644
--- a/drivers/staging/rtl8712/Kconfig
+++ b/drivers/staging/rtl8712/Kconfig
@@ -4,7 +4,6 @@ config R8712U
select WIRELESS_EXT
select WEXT_PRIV
select FW_LOADER
- default N
---help---
This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130.
If built as a module, it will be called r8712u.
@@ -12,7 +11,6 @@ config R8712U
config R8712_TX_AGGR
bool "Realtek RTL8712U Transmit Aggregation code"
depends on R8712U && BROKEN
- default N
---help---
This option provides transmit aggregation for the Realtek RTL8712 USB device.
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index a074fe810169..3362e5e32bcc 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -168,7 +168,7 @@ struct _adapter {
struct task_struct *xmitThread;
pid_t recvThread;
uint(*dvobj_init)(struct _adapter *adapter);
- void (*dvobj_deinit)(struct _adapter *adapter);
+ void (*dvobj_deinit)(struct _adapter *adapter);
struct net_device *pnetdev;
int bup;
struct net_device_stats stats;
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index cc68d9748edb..57fef70ad984 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -78,9 +78,9 @@ uint r8712_is_cckrates_included(u8 *rate)
if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
(((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
return true;
- i++;
- }
- return false;
+ i++;
+ }
+ return false;
}
uint r8712_is_cckratesonly_included(u8 *rate)
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 37fe33005c02..6bd08213cb70 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -239,7 +239,7 @@ static u32 start_drv_threads(struct _adapter *padapter)
{
padapter->cmdThread = kthread_run(r8712_cmd_thread, padapter, "%s",
padapter->pnetdev->name);
- if (IS_ERR(padapter->cmdThread) < 0)
+ if (IS_ERR(padapter->cmdThread))
return _FAIL;
return _SUCCESS;
}
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index f1ccc7ebbda7..566235a14a80 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -147,7 +147,8 @@ static inline u32 _queue_empty(struct __queue *pqueue)
return is_list_empty(&(pqueue->queue));
}
-static inline u32 end_of_queue_search(struct list_head *head, struct list_head *plist)
+static inline u32 end_of_queue_search(struct list_head *head,
+ struct list_head *plist)
{
if (head == plist)
return true;
@@ -164,7 +165,7 @@ static inline void sleep_schedulable(int ms)
delta = 1;/* 1 ms */
set_current_state(TASK_INTERRUPTIBLE);
if (schedule_timeout(delta) != 0)
- return ;
+ return;
}
static inline u8 *_malloc(u32 sz)
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index ea965370d1ac..0723b2f73aad 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -90,7 +90,6 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
pskb = netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ +
RECVBUFF_ALIGN_SZ);
if (pskb) {
- pskb->dev = padapter->pnetdev;
tmpaddr = (addr_t)pskb->data;
alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
@@ -1083,7 +1082,6 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
alloc_sz += 6;
pkt_copy = netdev_alloc_skb(padapter->pnetdev, alloc_sz);
if (pkt_copy) {
- pkt_copy->dev = padapter->pnetdev;
precvframe->u.hdr.pkt = pkt_copy;
skb_reserve(pkt_copy, 4 - ((addr_t)(pkt_copy->data)
% 4));
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index c71c7e55bb36..a67185db392b 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -468,10 +468,9 @@ u8 r8712_createbss_cmd(struct _adapter *padapter)
pcmd->rsp = NULL;
pcmd->rspsz = 0;
/* notes: translate IELength & Length after assign to cmdsz; */
- pdev_network->Length = cpu_to_le32(pcmd->cmdsz);
- pdev_network->IELength = cpu_to_le32(pdev_network->IELength);
- pdev_network->Ssid.SsidLength = cpu_to_le32(
- pdev_network->Ssid.SsidLength);
+ pdev_network->Length = pcmd->cmdsz;
+ pdev_network->IELength = pdev_network->IELength;
+ pdev_network->Ssid.SsidLength = pdev_network->Ssid.SsidLength;
r8712_enqueue_cmd(pcmdpriv, pcmd);
return _SUCCESS;
}
@@ -911,7 +910,7 @@ void r8712_joinbss_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if ((pcmd->res != H2C_SUCCESS))
+ if (pcmd->res != H2C_SUCCESS)
_set_timer(&pmlmepriv->assoc_timer, 1);
r8712_free_cmd_obj(pcmd);
}
@@ -928,7 +927,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
pcmd->parmbuf;
struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
- if ((pcmd->res != H2C_SUCCESS))
+ if (pcmd->res != H2C_SUCCESS)
_set_timer(&pmlmepriv->assoc_timer, 1);
_cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
#ifdef __BIG_ENDIAN
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index aae5125a2e7e..5ffc489e9501 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -1244,17 +1244,18 @@ static sint aes_decipher(u8 *key, uint hdrlen,
(frtype == WIFI_DATA_CFPOLL) ||
(frtype == WIFI_DATA_CFACKPOLL)) {
qc_exists = 1;
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
hdrlen += 2;
- } else if ((frsubtype == 0x08) ||
+ } else if ((frsubtype == 0x08) ||
(frsubtype == 0x09) ||
(frsubtype == 0x0a) ||
(frsubtype == 0x0b)) {
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
- hdrlen += 2;
- qc_exists = 1;
- } else
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+ qc_exists = 1;
+ } else {
qc_exists = 0;
+ }
/* now, decrypt pframe with hdrlen offset and plen long */
payload_index = hdrlen + 8; /* 8 is for extiv */
for (i = 0; i < num_blocks; i++) {
diff --git a/drivers/staging/rtl8821ae/Kconfig b/drivers/staging/rtl8821ae/Kconfig
new file mode 100644
index 000000000000..abccc9dabd65
--- /dev/null
+++ b/drivers/staging/rtl8821ae/Kconfig
@@ -0,0 +1,11 @@
+config R8821AE
+ tristate "RealTek RTL8821AE Wireless LAN NIC driver"
+ depends on PCI && WLAN && MAC80211
+ depends on m
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select EEPROM_93CX6
+ select CRYPTO
+ default N
+ ---help---
+ If built as a module, it will be called r8821ae.ko.
diff --git a/drivers/staging/rtl8821ae/Makefile b/drivers/staging/rtl8821ae/Makefile
new file mode 100644
index 000000000000..8a23bd7e8842
--- /dev/null
+++ b/drivers/staging/rtl8821ae/Makefile
@@ -0,0 +1,35 @@
+PCI_MAIN_OBJS := base.o \
+ rc.o \
+ debug.o \
+ regd.o \
+ efuse.o \
+ cam.o \
+ ps.o \
+ core.o \
+ stats.o \
+ pci.o \
+
+BT_COEXIST_OBJS:= btcoexist/halbtc8192e2ant.o\
+ btcoexist/halbtc8723b1ant.o\
+ btcoexist/halbtc8723b2ant.o\
+ btcoexist/halbtcoutsrc.o\
+ btcoexist/rtl_btc.o \
+
+PCI_8821AE_HAL_OBJS:= \
+ rtl8821ae/hw.o \
+ rtl8821ae/table.o \
+ rtl8821ae/sw.o \
+ rtl8821ae/trx.o \
+ rtl8821ae/led.o \
+ rtl8821ae/fw.o \
+ rtl8821ae/phy.o \
+ rtl8821ae/rf.o \
+ rtl8821ae/dm.o \
+ rtl8821ae/pwrseq.o \
+ rtl8821ae/pwrseqcmd.o \
+ rtl8821ae/hal_btc.o \
+ rtl8821ae/hal_bt_coexist.o \
+
+rtl8821ae-objs += $(BT_COEXIST_OBJS) $(PCI_MAIN_OBJS) $(PCI_8821AE_HAL_OBJS)
+
+obj-$(CONFIG_R8821AE) += rtl8821ae.o
diff --git a/drivers/staging/rtl8821ae/TODO b/drivers/staging/rtl8821ae/TODO
new file mode 100644
index 000000000000..3ee7529d4ed5
--- /dev/null
+++ b/drivers/staging/rtl8821ae/TODO
@@ -0,0 +1,10 @@
+Realtek 8821AE PCI wifi driver TODO:
+ - remove built-in btcoexist module when the "real" one gets upstream
+ - remove built-in rtlwifi code by porting driver to use the "real" one
+ in the drivers/net/ directory.
+ - fix up coding style issues
+
+Please send any patches for this driver to:
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+and the <devel@driverdev.osuosl.org> mailing list.
+
diff --git a/drivers/staging/rtl8821ae/base.c b/drivers/staging/rtl8821ae/base.c
new file mode 100644
index 000000000000..e5073fe24770
--- /dev/null
+++ b/drivers/staging/rtl8821ae/base.c
@@ -0,0 +1,1868 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include <linux/ip.h>
+#include <linux/module.h>
+#include "wifi.h"
+#include "rc.h"
+#include "base.h"
+#include "efuse.h"
+#include "cam.h"
+#include "ps.h"
+#include "regd.h"
+#include "pci.h"
+
+/*
+ *NOTICE!!!: This file will be very big, we should
+ *keep it clear under following roles:
+ *
+ *This file include following part, so, if you add new
+ *functions into this file, please check which part it
+ *should includes. or check if you should add new part
+ *for this file:
+ *
+ *1) mac80211 init functions
+ *2) tx information functions
+ *3) functions called by core.c
+ *4) wq & timer callback functions
+ *5) frame process functions
+ *6) IOT functions
+ *7) sysfs functions
+ *8) vif functions
+ *9) ...
+ */
+
+/*********************************************************
+ *
+ * mac80211 init functions
+ *
+ *********************************************************/
+static struct ieee80211_channel rtl_channeltable_2g[] = {
+ {.center_freq = 2412, .hw_value = 1,},
+ {.center_freq = 2417, .hw_value = 2,},
+ {.center_freq = 2422, .hw_value = 3,},
+ {.center_freq = 2427, .hw_value = 4,},
+ {.center_freq = 2432, .hw_value = 5,},
+ {.center_freq = 2437, .hw_value = 6,},
+ {.center_freq = 2442, .hw_value = 7,},
+ {.center_freq = 2447, .hw_value = 8,},
+ {.center_freq = 2452, .hw_value = 9,},
+ {.center_freq = 2457, .hw_value = 10,},
+ {.center_freq = 2462, .hw_value = 11,},
+ {.center_freq = 2467, .hw_value = 12,},
+ {.center_freq = 2472, .hw_value = 13,},
+ {.center_freq = 2484, .hw_value = 14,},
+};
+
+static struct ieee80211_channel rtl_channeltable_5g[] = {
+ {.center_freq = 5180, .hw_value = 36,},
+ {.center_freq = 5200, .hw_value = 40,},
+ {.center_freq = 5220, .hw_value = 44,},
+ {.center_freq = 5240, .hw_value = 48,},
+ {.center_freq = 5260, .hw_value = 52,},
+ {.center_freq = 5280, .hw_value = 56,},
+ {.center_freq = 5300, .hw_value = 60,},
+ {.center_freq = 5320, .hw_value = 64,},
+ {.center_freq = 5500, .hw_value = 100,},
+ {.center_freq = 5520, .hw_value = 104,},
+ {.center_freq = 5540, .hw_value = 108,},
+ {.center_freq = 5560, .hw_value = 112,},
+ {.center_freq = 5580, .hw_value = 116,},
+ {.center_freq = 5600, .hw_value = 120,},
+ {.center_freq = 5620, .hw_value = 124,},
+ {.center_freq = 5640, .hw_value = 128,},
+ {.center_freq = 5660, .hw_value = 132,},
+ {.center_freq = 5680, .hw_value = 136,},
+ {.center_freq = 5700, .hw_value = 140,},
+ {.center_freq = 5745, .hw_value = 149,},
+ {.center_freq = 5765, .hw_value = 153,},
+ {.center_freq = 5785, .hw_value = 157,},
+ {.center_freq = 5805, .hw_value = 161,},
+ {.center_freq = 5825, .hw_value = 165,},
+};
+
+static struct ieee80211_rate rtl_ratetable_2g[] = {
+ {.bitrate = 10, .hw_value = 0x00,},
+ {.bitrate = 20, .hw_value = 0x01,},
+ {.bitrate = 55, .hw_value = 0x02,},
+ {.bitrate = 110, .hw_value = 0x03,},
+ {.bitrate = 60, .hw_value = 0x04,},
+ {.bitrate = 90, .hw_value = 0x05,},
+ {.bitrate = 120, .hw_value = 0x06,},
+ {.bitrate = 180, .hw_value = 0x07,},
+ {.bitrate = 240, .hw_value = 0x08,},
+ {.bitrate = 360, .hw_value = 0x09,},
+ {.bitrate = 480, .hw_value = 0x0a,},
+ {.bitrate = 540, .hw_value = 0x0b,},
+};
+
+static struct ieee80211_rate rtl_ratetable_5g[] = {
+ {.bitrate = 60, .hw_value = 0x04,},
+ {.bitrate = 90, .hw_value = 0x05,},
+ {.bitrate = 120, .hw_value = 0x06,},
+ {.bitrate = 180, .hw_value = 0x07,},
+ {.bitrate = 240, .hw_value = 0x08,},
+ {.bitrate = 360, .hw_value = 0x09,},
+ {.bitrate = 480, .hw_value = 0x0a,},
+ {.bitrate = 540, .hw_value = 0x0b,},
+};
+
+static const struct ieee80211_supported_band rtl_band_2ghz = {
+ .band = IEEE80211_BAND_2GHZ,
+
+ .channels = rtl_channeltable_2g,
+ .n_channels = ARRAY_SIZE(rtl_channeltable_2g),
+
+ .bitrates = rtl_ratetable_2g,
+ .n_bitrates = ARRAY_SIZE(rtl_ratetable_2g),
+
+ .ht_cap = {0},
+};
+
+static struct ieee80211_supported_band rtl_band_5ghz = {
+ .band = IEEE80211_BAND_5GHZ,
+
+ .channels = rtl_channeltable_5g,
+ .n_channels = ARRAY_SIZE(rtl_channeltable_5g),
+
+ .bitrates = rtl_ratetable_5g,
+ .n_bitrates = ARRAY_SIZE(rtl_ratetable_5g),
+
+ .ht_cap = {0},
+};
+
+static const u8 tid_to_ac[] = {
+ 2, /* IEEE80211_AC_BE */
+ 3, /* IEEE80211_AC_BK */
+ 3, /* IEEE80211_AC_BK */
+ 2, /* IEEE80211_AC_BE */
+ 1, /* IEEE80211_AC_VI */
+ 1, /* IEEE80211_AC_VI */
+ 0, /* IEEE80211_AC_VO */
+ 0, /* IEEE80211_AC_VO */
+};
+
+u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid)
+{
+ return tid_to_ac[tid];
+}
+
+static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ ht_cap->ht_supported = true;
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU;
+
+ if (rtlpriv->rtlhal.disable_amsdu_8k)
+ ht_cap->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
+
+ /*
+ *Maximum length of AMPDU that the STA can receive.
+ *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+
+ /*Minimum MPDU start spacing , */
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ /*
+ *hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ *base on ant_num
+ *rx_mask: RX mask
+ *if rx_ant =1 rx_mask[0]=0xff;==>MCS0-MCS7
+ *if rx_ant =2 rx_mask[1]=0xff;==>MCS8-MCS15
+ *if rx_ant >=3 rx_mask[2]=0xff;
+ *if BW_40 rx_mask[4]=0x01;
+ *highest supported RX rate
+ */
+ if (rtlpriv->dm.supp_phymode_switch) {
+ RT_TRACE(COMP_INIT, DBG_EMERG, ("Support phy mode switch\n"));
+
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
+ } else {
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_2T2R) {
+
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("1T2R or 2T2R\n"));
+
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
+ } else if (get_rf_type(rtlphy) == RF_1T1R) {
+
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("1T1R\n"));
+
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0x00;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7;
+ }
+ }
+}
+
+static void _rtl_init_mac80211(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct ieee80211_supported_band *sband;
+
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY &&
+ rtlhal->bandset == BAND_ON_BOTH) {
+ /* 1: 2.4 G bands */
+ /* <1> use mac->bands as mem for hw->wiphy->bands */
+ sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+
+ /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ * to default value(1T1R) */
+ memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz,
+ sizeof(struct ieee80211_supported_band));
+
+ /* <3> init ht cap base on ant_num */
+ _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+ /* <4> set mac->sband to wiphy->sband */
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+
+ /* 2: 5 G bands */
+ /* <1> use mac->bands as mem for hw->wiphy->bands */
+ sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+
+ /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+ * to default value(1T1R) */
+ memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz,
+ sizeof(struct ieee80211_supported_band));
+
+ /* <3> init ht cap base on ant_num */
+ _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+ /* <4> set mac->sband to wiphy->sband */
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ } else {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* <1> use mac->bands as mem for hw->wiphy->bands */
+ sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+
+ /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ * to default value(1T1R) */
+ memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]),
+ &rtl_band_2ghz,
+ sizeof(struct ieee80211_supported_band));
+
+ /* <3> init ht cap base on ant_num */
+ _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+ /* <4> set mac->sband to wiphy->sband */
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ } else if (rtlhal->current_bandtype == BAND_ON_5G) {
+ /* <1> use mac->bands as mem for hw->wiphy->bands */
+ sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+
+ /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+ * to default value(1T1R) */
+ memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]),
+ &rtl_band_5ghz,
+ sizeof(struct ieee80211_supported_band));
+
+ /* <3> init ht cap base on ant_num */
+ _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+ /* <4> set mac->sband to wiphy->sband */
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ } else {
+ RT_TRACE(COMP_INIT, DBG_EMERG, ("Err BAND %d\n",
+ rtlhal->current_bandtype));
+ }
+ }
+ /* <5> set hw caps */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_RX_INCLUDES_FCS |
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+ IEEE80211_HW_BEACON_FILTER |
+#endif
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ /* IEEE80211_HW_SUPPORTS_CQM_RSSI | */
+ IEEE80211_HW_MFP_CAPABLE | 0;
+
+ /* swlps or hwlps has been set in diff chip in init_sw_vars */
+ if (rtlpriv->psc.b_swctrl_lps)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ /* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
+ 0;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+#else
+/*<delete in kernel end>*/
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT) ;
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+#endif
+
+ hw->wiphy->rts_threshold = 2347;
+
+ hw->queues = AC_MAX;
+ hw->extra_tx_headroom = RTL_TX_HEADER_SIZE;
+
+ /* TODO: Correct this value for our hw */
+ /* TODO: define these hard code value */
+ hw->max_listen_interval = 10;
+ hw->max_rate_tries = 4;
+ /* hw->max_rates = 1; */
+ hw->sta_data_size = sizeof(struct rtl_sta_info);
+#ifdef VIF_TODO
+ hw->vif_data_size = sizeof(struct rtl_vif_info);
+#endif
+
+ /* <6> mac address */
+ if (is_valid_ether_addr(rtlefuse->dev_addr)) {
+ SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
+ } else {
+ u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
+ get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1);
+ SET_IEEE80211_PERM_ADDR(hw, rtlmac);
+ }
+
+}
+
+static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* <1> timer */
+ init_timer(&rtlpriv->works.watchdog_timer);
+ setup_timer(&rtlpriv->works.watchdog_timer,
+ rtl_watch_dog_timer_callback, (unsigned long)hw);
+ init_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer);
+ setup_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer,
+ rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw);
+ /* <2> work queue */
+ rtlpriv->works.hw = hw;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+/*<delete in kernel end>*/
+ rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0);
+/*<delete in kernel start>*/
+#else
+ rtlpriv->works.rtl_wq = create_workqueue(rtlpriv->cfg->name);
+#endif
+/*<delete in kernel end>*/
+ INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
+ (void *)rtl_watchdog_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
+ (void *)rtl_ips_nic_off_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.ps_work,
+ (void *)rtl_swlps_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq,
+ (void *)rtl_swlps_rfon_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
+ (void *)rtl_fwevt_wq_callback);
+
+}
+
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ del_timer_sync(&rtlpriv->works.watchdog_timer);
+
+ cancel_delayed_work(&rtlpriv->works.watchdog_wq);
+ cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+ cancel_delayed_work(&rtlpriv->works.ps_work);
+ cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
+ cancel_delayed_work(&rtlpriv->works.fwevt_wq);
+}
+
+void rtl_init_rfkill(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ bool radio_state;
+ bool blocked;
+ u8 valid = 0;
+
+ /*set init state to on */
+ rtlpriv->rfkill.rfkill_state = 1;
+ wiphy_rfkill_set_hw_state(hw->wiphy, 0);
+
+ radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
+
+ if (valid) {
+ printk(KERN_INFO "rtlwifi: wireless switch is %s\n",
+ rtlpriv->rfkill.rfkill_state ? "on" : "off");
+
+ rtlpriv->rfkill.rfkill_state = radio_state;
+
+ blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+ }
+
+ wiphy_rfkill_start_polling(hw->wiphy);
+}
+
+void rtl_deinit_rfkill(struct ieee80211_hw *hw)
+{
+ wiphy_rfkill_stop_polling(hw->wiphy);
+}
+
+#ifdef VIF_TODO
+static void rtl_init_vif(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ INIT_LIST_HEAD(&rtlpriv->vif_priv.vif_list);
+
+ rtlpriv->vif_priv.vifs = 0;
+}
+#endif
+
+int rtl_init_core(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+
+ /* <1> init mac80211 */
+ _rtl_init_mac80211(hw);
+ rtlmac->hw = hw;
+ rtlmac->link_state = MAC80211_NOLINK;
+
+ /* <2> rate control register */
+ hw->rate_control_algorithm = "rtl_rc";
+
+ /*
+ * <3> init CRDA must come after init
+ * mac80211 hw in _rtl_init_mac80211.
+ */
+ if (rtl_regd_init(hw, rtl_reg_notifier)) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("REGD init failed\n"));
+ return 1;
+ }
+
+ /* <4> locks */
+ mutex_init(&rtlpriv->locks.conf_mutex);
+ spin_lock_init(&rtlpriv->locks.ips_lock);
+ spin_lock_init(&rtlpriv->locks.irq_th_lock);
+ spin_lock_init(&rtlpriv->locks.h2c_lock);
+ spin_lock_init(&rtlpriv->locks.rf_ps_lock);
+ spin_lock_init(&rtlpriv->locks.rf_lock);
+ spin_lock_init(&rtlpriv->locks.lps_lock);
+ spin_lock_init(&rtlpriv->locks.waitq_lock);
+ spin_lock_init(&rtlpriv->locks.entry_list_lock);
+ spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
+ spin_lock_init(&rtlpriv->locks.check_sendpkt_lock);
+ spin_lock_init(&rtlpriv->locks.fw_ps_lock);
+ spin_lock_init(&rtlpriv->locks.iqk_lock);
+ /* <5> init list */
+ INIT_LIST_HEAD(&rtlpriv->entry_list);
+
+ /* <6> init deferred work */
+ _rtl_init_deferred_work(hw);
+
+ /* <7> */
+#ifdef VIF_TODO
+ rtl_init_vif(hw);
+#endif
+
+ return 0;
+}
+
+void rtl_deinit_core(struct ieee80211_hw *hw)
+{
+}
+
+void rtl_init_rx_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
+}
+
+/*********************************************************
+ *
+ * tx information functions
+ *
+ *********************************************************/
+static void _rtl_qurey_shortpreamble_mode(struct ieee80211_hw *hw,
+ struct rtl_tcb_desc *tcb_desc,
+ struct ieee80211_tx_info *info)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 rate_flag = info->control.rates[0].flags;
+
+ tcb_desc->use_shortpreamble = false;
+
+ /* 1M can only use Long Preamble. 11B spec */
+ if (tcb_desc->hw_rate == rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M])
+ return;
+ else if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ tcb_desc->use_shortpreamble = true;
+
+ return;
+}
+
+static void _rtl_query_shortgi(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct rtl_tcb_desc *tcb_desc,
+ struct ieee80211_tx_info *info)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 rate_flag = info->control.rates[0].flags;
+ u8 sgi_40 = 0, sgi_20 = 0, bw_40 = 0;
+ tcb_desc->use_shortgi = false;
+
+ if (sta == NULL)
+ return;
+
+ sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+ sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+
+ if (!(sta->ht_cap.ht_supported))
+ return;
+
+ if (!sgi_40 && !sgi_20)
+ return;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION)
+ bw_40 = mac->bw_40;
+ else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT)
+ bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ if ((bw_40 == true) && sgi_40)
+ tcb_desc->use_shortgi = true;
+ else if ((bw_40 == false) && sgi_20)
+ tcb_desc->use_shortgi = true;
+
+ if (!(rate_flag & IEEE80211_TX_RC_SHORT_GI))
+ tcb_desc->use_shortgi = false;
+}
+
+static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
+ struct rtl_tcb_desc *tcb_desc,
+ struct ieee80211_tx_info *info)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 rate_flag = info->control.rates[0].flags;
+
+ /* Common Settings */
+ tcb_desc->b_rts_stbc = false;
+ tcb_desc->b_cts_enable = false;
+ tcb_desc->rts_sc = 0;
+ tcb_desc->b_rts_bw = false;
+ tcb_desc->b_rts_use_shortpreamble = false;
+ tcb_desc->b_rts_use_shortgi = false;
+
+ if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ /* Use CTS-to-SELF in protection mode. */
+ tcb_desc->b_rts_enable = true;
+ tcb_desc->b_cts_enable = true;
+ tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
+ } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /* Use RTS-CTS in protection mode. */
+ tcb_desc->b_rts_enable = true;
+ tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
+ }
+}
+
+static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct rtl_tcb_desc *tcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_sta_info *sta_entry = NULL;
+ u8 ratr_index = 7;
+
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ ratr_index = sta_entry->ratr_index;
+ }
+ if (!tcb_desc->disable_ratefallback || !tcb_desc->use_driver_rate) {
+ if (mac->opmode == NL80211_IFTYPE_STATION) {
+ tcb_desc->ratr_index = 0;
+ } else if (mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT) {
+ if (tcb_desc->b_multicast || tcb_desc->b_broadcast) {
+ tcb_desc->hw_rate =
+ rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
+ tcb_desc->use_driver_rate = 1;
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+ } else {
+ tcb_desc->ratr_index = ratr_index;
+ }
+ } else if (mac->opmode == NL80211_IFTYPE_AP) {
+ tcb_desc->ratr_index = ratr_index;
+ }
+ }
+
+ if (rtlpriv->dm.b_useramask) {
+ tcb_desc->ratr_index = ratr_index;
+ /* TODO we will differentiate adhoc and station future */
+ if (mac->opmode == NL80211_IFTYPE_STATION ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT) {
+ tcb_desc->mac_id = 0;
+ if (mac->mode == WIRELESS_MODE_N_24G)
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB;
+ else if (mac->mode == WIRELESS_MODE_N_5G)
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_NG;
+ else if (mac->mode & WIRELESS_MODE_G)
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_GB;
+ else if (mac->mode & WIRELESS_MODE_B)
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_B;
+ else if (mac->mode & WIRELESS_MODE_A)
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_G;
+ } else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (NULL != sta) {
+ if (sta->aid > 0)
+ tcb_desc->mac_id = sta->aid + 1;
+ else
+ tcb_desc->mac_id = 1;
+ } else {
+ tcb_desc->mac_id = 0;
+ }
+ }
+ }
+}
+
+static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct rtl_tcb_desc *tcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ tcb_desc->b_packet_bw = false;
+ if (!sta)
+ return;
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT) {
+ if (!(sta->ht_cap.ht_supported) ||
+ !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ return;
+ } else if (mac->opmode == NL80211_IFTYPE_STATION) {
+ if (!mac->bw_40 || !(sta->ht_cap.ht_supported))
+ return;
+ }
+ if (tcb_desc->b_multicast || tcb_desc->b_broadcast)
+ return;
+
+ /*use legency rate, shall use 20MHz */
+ if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M])
+ return;
+
+ tcb_desc->b_packet_bw = true;
+}
+
+static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 hw_rate;
+
+ if ((get_rf_type(rtlphy) == RF_2T2R) && (sta->ht_cap.mcs.rx_mask[1] != 0))
+ hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
+ else
+ hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
+
+ return hw_rate;
+}
+
+void rtl_get_tcb_desc(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+ struct ieee80211_rate *txrate;
+ u16 fc = rtl_get_fc(skb);
+
+ txrate = ieee80211_get_tx_rate(hw, info);
+ if (txrate != NULL)
+ tcb_desc->hw_rate = txrate->hw_value;
+
+ if (ieee80211_is_data(fc)) {
+ /*
+ *we set data rate INX 0
+ *in rtl_rc.c if skb is special data or
+ *mgt which need low data rate.
+ */
+
+ /*
+ *So tcb_desc->hw_rate is just used for
+ *special data and mgt frames
+ */
+ if (info->control.rates[0].idx == 0 ||
+ ieee80211_is_nullfunc(fc)) {
+ tcb_desc->use_driver_rate = true;
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+
+ tcb_desc->disable_ratefallback = 1;
+ } else {
+ /*
+ *because hw will never use hw_rate
+ *when tcb_desc->use_driver_rate = false
+ *so we never set highest N rate here,
+ *and N rate will all be controlled by FW
+ *when tcb_desc->use_driver_rate = false
+ */
+ if (sta && (sta->ht_cap.ht_supported)) {
+ tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw, sta);
+ } else {
+ if (rtlmac->mode == WIRELESS_MODE_B) {
+ tcb_desc->hw_rate =
+ rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M];
+ } else {
+ tcb_desc->hw_rate =
+ rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M];
+ }
+ }
+ }
+
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
+ tcb_desc->b_multicast = 1;
+ else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ tcb_desc->b_broadcast = 1;
+
+ _rtl_txrate_selectmode(hw, sta, tcb_desc);
+ _rtl_query_bandwidth_mode(hw, sta, tcb_desc);
+ _rtl_qurey_shortpreamble_mode(hw, tcb_desc, info);
+ _rtl_query_shortgi(hw, sta, tcb_desc, info);
+ _rtl_query_protection_mode(hw, tcb_desc, info);
+ } else {
+ tcb_desc->use_driver_rate = true;
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+ tcb_desc->disable_ratefallback = 1;
+ tcb_desc->mac_id = 0;
+ tcb_desc->b_packet_bw = false;
+ }
+}
+/* EXPORT_SYMBOL(rtl_get_tcb_desc); */
+
+bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 fc = rtl_get_fc(skb);
+
+ if (rtlpriv->dm.supp_phymode_switch &&
+ mac->link_state < MAC80211_LINKED &&
+ (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) {
+ if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+ rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+ }
+ if (ieee80211_is_auth(fc)) {
+ RT_TRACE(COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
+ rtl_ips_nic_on(hw);
+
+ mac->link_state = MAC80211_LINKING;
+ /* Dul mac */
+ rtlpriv->phy.b_need_iqk = true;
+
+ }
+
+ return true;
+}
+
+struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, u8 *sa,
+ u8 *bssid, u16 tid);
+bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 fc = rtl_get_fc(skb);
+ u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
+ u8 category;
+
+ if (!ieee80211_is_action(fc))
+ return true;
+
+ category = *act;
+ act++;
+ switch (category) {
+ case ACT_CAT_BA:
+ switch (*act) {
+ case ACT_ADDBAREQ:
+ if (mac->act_scanning)
+ return false;
+
+ RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("%s ACT_ADDBAREQ From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2));
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("req\n"),
+ skb->data, skb->len);
+ if (!is_tx) {
+ struct ieee80211_sta *sta = NULL;
+ struct rtl_sta_info *sta_entry = NULL;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ u16 capab = 0, tid = 0;
+ struct rtl_tid_data *tid_data;
+ struct sk_buff *skb_delba = NULL;
+ struct ieee80211_rx_status rx_status = { 0 };
+
+ rcu_read_lock();
+ sta = rtl_find_sta(hw, hdr->addr3);
+ if (sta == NULL) {
+ RT_TRACE((COMP_SEND | COMP_RECV),
+ DBG_EMERG, ("sta is NULL\n"));
+ rcu_read_unlock();
+ return true;
+ }
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ if (!sta_entry) {
+ rcu_read_unlock();
+ return true;
+ }
+ capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+ tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+ tid_data = &sta_entry->tids[tid];
+ if (tid_data->agg.rx_agg_state ==
+ RTL_RX_AGG_START) {
+ skb_delba = rtl_make_del_ba(hw,
+ hdr->addr2,
+ hdr->addr3,
+ tid);
+ if (skb_delba) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+ rx_status.freq = hw->conf.chandef.chan->center_freq;
+ rx_status.band = hw->conf.chandef.chan->band;
+#else
+ rx_status.freq = hw->conf.channel->center_freq;
+ rx_status.band = hw->conf.channel->band;
+#endif
+ rx_status.flag |= RX_FLAG_DECRYPTED;
+ rx_status.flag |= RX_FLAG_MACTIME_MPDU;
+ rx_status.rate_idx = 0;
+ rx_status.signal = 50 + 10;
+ memcpy(IEEE80211_SKB_RXCB(skb_delba), &rx_status,
+ sizeof(rx_status));
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("fake del\n"), skb_delba->data,
+ skb_delba->len);
+ ieee80211_rx_irqsafe(hw, skb_delba);
+ }
+ }
+ rcu_read_unlock();
+ }
+ break;
+ case ACT_ADDBARSP:
+ RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("%s ACT_ADDBARSP From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2));
+ break;
+ case ACT_DELBA:
+ RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("ACT_ADDBADEL From :%pM\n", hdr->addr2));
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+/*should call before software enc*/
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u16 fc = rtl_get_fc(skb);
+ u16 ether_type;
+ u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ const struct iphdr *ip;
+
+ if (!ieee80211_is_data(fc))
+ goto end;
+
+
+ ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
+ SNAP_SIZE + PROTOC_TYPE_SIZE);
+ ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
+ ether_type = ntohs(ether_type);
+
+ if (ETH_P_IP == ether_type) {
+ if (IPPROTO_UDP == ip->protocol) {
+ struct udphdr *udp = (struct udphdr *)((u8 *) ip +
+ (ip->ihl << 2));
+ if (((((u8 *) udp)[1] == 68) &&
+ (((u8 *) udp)[3] == 67)) ||
+ ((((u8 *) udp)[1] == 67) &&
+ (((u8 *) udp)[3] == 68))) {
+ /*
+ * 68 : UDP BOOTP client
+ * 67 : UDP BOOTP server
+ */
+ RT_TRACE((COMP_SEND | COMP_RECV),
+ DBG_DMESG, ("dhcp %s !!\n",
+ (is_tx) ? "Tx" : "Rx"));
+
+ if (is_tx) {
+ rtlpriv->ra.is_special_data = true;
+ rtl_lps_leave(hw);
+ ppsc->last_delaylps_stamp_jiffies =
+ jiffies;
+ }
+
+ return true;
+ }
+ }
+ } else if (ETH_P_ARP == ether_type) {
+ if (is_tx) {
+ rtlpriv->ra.is_special_data = true;
+ rtl_lps_leave(hw);
+ ppsc->last_delaylps_stamp_jiffies = jiffies;
+ }
+
+ return true;
+ } else if (ETH_P_PAE == ether_type) {
+ RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"));
+
+ if (is_tx) {
+ rtlpriv->ra.is_special_data = true;
+ rtl_lps_leave(hw);
+ ppsc->last_delaylps_stamp_jiffies = jiffies;
+ }
+
+ return true;
+ } else if (0x86DD == ether_type) {
+ return true;
+ }
+
+end:
+ rtlpriv->ra.is_special_data = false;
+ return false;
+}
+
+/*********************************************************
+ *
+ * functions called by core.c
+ *
+ *********************************************************/
+int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tid_data *tid_data;
+ struct rtl_sta_info *sta_entry = NULL;
+
+ if (sta == NULL)
+ return -EINVAL;
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ if (!sta_entry)
+ return -ENXIO;
+ tid_data = &sta_entry->tids[tid];
+
+ RT_TRACE(COMP_SEND, DBG_DMESG,
+ ("on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
+ tid_data->seq_number));
+
+ *ssn = tid_data->seq_number;
+ tid_data->agg.agg_state = RTL_AGG_START;
+
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ return 0;
+}
+
+int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tid_data *tid_data;
+ struct rtl_sta_info *sta_entry = NULL;
+
+ if (sta == NULL)
+ return -EINVAL;
+
+ if (!sta->addr) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+ return -EINVAL;
+ }
+
+ RT_TRACE(COMP_SEND, DBG_DMESG,
+ ("on ra = %pM tid = %d\n", sta->addr, tid));
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid_data = &sta_entry->tids[tid];
+ sta_entry->tids[tid].agg.agg_state = RTL_AGG_STOP;
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ return 0;
+}
+
+int rtl_rx_agg_start(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tid_data *tid_data;
+ struct rtl_sta_info *sta_entry = NULL;
+
+ if (sta == NULL)
+ return -EINVAL;
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ if (!sta_entry)
+ return -ENXIO;
+ tid_data = &sta_entry->tids[tid];
+
+ RT_TRACE(COMP_RECV, DBG_DMESG,
+ ("on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
+ tid_data->seq_number));
+
+ tid_data->agg.rx_agg_state = RTL_RX_AGG_START;
+ return 0;
+}
+
+int rtl_rx_agg_stop(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tid_data *tid_data;
+ struct rtl_sta_info *sta_entry = NULL;
+
+ if (sta == NULL)
+ return -EINVAL;
+
+ if (!sta->addr) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+ return -EINVAL;
+ }
+
+ RT_TRACE(COMP_SEND, DBG_DMESG,
+ ("on ra = %pM tid = %d\n", sta->addr, tid));
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid_data = &sta_entry->tids[tid];
+ sta_entry->tids[tid].agg.rx_agg_state = RTL_RX_AGG_STOP;
+
+ return 0;
+}
+int rtl_tx_agg_oper(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tid_data *tid_data;
+ struct rtl_sta_info *sta_entry = NULL;
+
+ if (sta == NULL)
+ return -EINVAL;
+
+ if (!sta->addr) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+ return -EINVAL;
+ }
+
+ RT_TRACE(COMP_SEND, DBG_DMESG,
+ ("on ra = %pM tid = %d\n", sta->addr, tid));
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid_data = &sta_entry->tids[tid];
+ sta_entry->tids[tid].agg.agg_state = RTL_AGG_OPERATIONAL;
+
+ return 0;
+}
+
+/*********************************************************
+ *
+ * wq & timer callback functions
+ *
+ *********************************************************/
+/* this function is used for roaming */
+void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+ return;
+
+ if (rtlpriv->mac80211.link_state < MAC80211_LINKED)
+ return;
+
+ /* check if this really is a beacon */
+ if (!ieee80211_is_beacon(hdr->frame_control) &&
+ !ieee80211_is_probe_resp(hdr->frame_control))
+ return;
+
+ /* min. beacon length + FCS_LEN */
+ if (skb->len <= 40 + FCS_LEN)
+ return;
+
+ /* and only beacons from the associated BSSID, please */
+ if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ return;
+
+ rtlpriv->link_info.bcn_rx_inperiod++;
+}
+
+void rtl_watchdog_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks = container_of_dwork_rtl(data,
+ struct rtl_works,
+ watchdog_wq);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ bool b_busytraffic = false;
+ bool b_tx_busy_traffic = false;
+ bool b_rx_busy_traffic = false;
+ bool b_higher_busytraffic = false;
+ bool b_higher_busyrxtraffic = false;
+ u8 idx, tid;
+ u32 rx_cnt_inp4eriod = 0;
+ u32 tx_cnt_inp4eriod = 0;
+ u32 aver_rx_cnt_inperiod = 0;
+ u32 aver_tx_cnt_inperiod = 0;
+ u32 aver_tidtx_inperiod[MAX_TID_COUNT] = {0};
+ u32 tidtx_inp4eriod[MAX_TID_COUNT] = {0};
+ bool benter_ps = false;
+
+ if (is_hal_stop(rtlhal))
+ return;
+
+ /* <1> Determine if action frame is allowed */
+ if (mac->link_state > MAC80211_NOLINK) {
+ if (mac->cnt_after_linked < 20)
+ mac->cnt_after_linked++;
+ } else {
+ mac->cnt_after_linked = 0;
+ }
+
+ /* <2> to check if traffic busy, if
+ * busytraffic we don't change channel */
+ if (mac->link_state >= MAC80211_LINKED) {
+
+ /* (1) get aver_rx_cnt_inperiod & aver_tx_cnt_inperiod */
+ for (idx = 0; idx <= 2; idx++) {
+ rtlpriv->link_info.num_rx_in4period[idx] =
+ rtlpriv->link_info.num_rx_in4period[idx + 1];
+ rtlpriv->link_info.num_tx_in4period[idx] =
+ rtlpriv->link_info.num_tx_in4period[idx + 1];
+ }
+ rtlpriv->link_info.num_rx_in4period[3] =
+ rtlpriv->link_info.num_rx_inperiod;
+ rtlpriv->link_info.num_tx_in4period[3] =
+ rtlpriv->link_info.num_tx_inperiod;
+ for (idx = 0; idx <= 3; idx++) {
+ rx_cnt_inp4eriod +=
+ rtlpriv->link_info.num_rx_in4period[idx];
+ tx_cnt_inp4eriod +=
+ rtlpriv->link_info.num_tx_in4period[idx];
+ }
+ aver_rx_cnt_inperiod = rx_cnt_inp4eriod / 4;
+ aver_tx_cnt_inperiod = tx_cnt_inp4eriod / 4;
+
+ /* (2) check traffic busy */
+ if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100) {
+ b_busytraffic = true;
+ if (aver_rx_cnt_inperiod > aver_tx_cnt_inperiod)
+ b_rx_busy_traffic = true;
+ else
+ b_tx_busy_traffic = false;
+ }
+
+ /* Higher Tx/Rx data. */
+ if (aver_rx_cnt_inperiod > 4000 ||
+ aver_tx_cnt_inperiod > 4000) {
+ b_higher_busytraffic = true;
+
+ /* Extremely high Rx data. */
+ if (aver_rx_cnt_inperiod > 5000)
+ b_higher_busyrxtraffic = true;
+ }
+
+ /* check every tid's tx traffic */
+ for (tid = 0; tid <= 7; tid++) {
+ for (idx = 0; idx <= 2; idx++)
+ rtlpriv->link_info.tidtx_in4period[tid][idx] =
+ rtlpriv->link_info.tidtx_in4period[tid]
+ [idx + 1];
+ rtlpriv->link_info.tidtx_in4period[tid][3] =
+ rtlpriv->link_info.tidtx_inperiod[tid];
+
+ for (idx = 0; idx <= 3; idx++)
+ tidtx_inp4eriod[tid] +=
+ rtlpriv->link_info.tidtx_in4period[tid][idx];
+ aver_tidtx_inperiod[tid] = tidtx_inp4eriod[tid] / 4;
+ if (aver_tidtx_inperiod[tid] > 5000)
+ rtlpriv->link_info.higher_busytxtraffic[tid] =
+ true;
+ else
+ rtlpriv->link_info.higher_busytxtraffic[tid] =
+ false;
+ }
+
+ if (((rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ (rtlpriv->link_info.num_rx_inperiod > 2))
+ benter_ps = false;
+ else
+ benter_ps = true;
+
+ /* LeisurePS only work in infra mode. */
+ if (benter_ps)
+ rtl_lps_enter(hw);
+ else
+ rtl_lps_leave(hw);
+ }
+
+ rtlpriv->link_info.num_rx_inperiod = 0;
+ rtlpriv->link_info.num_tx_inperiod = 0;
+ for (tid = 0; tid <= 7; tid++)
+ rtlpriv->link_info.tidtx_inperiod[tid] = 0;
+
+ rtlpriv->link_info.b_busytraffic = b_busytraffic;
+ rtlpriv->link_info.b_rx_busy_traffic = b_rx_busy_traffic;
+ rtlpriv->link_info.b_tx_busy_traffic = b_tx_busy_traffic;
+ rtlpriv->link_info.b_higher_busytraffic = b_higher_busytraffic;
+ rtlpriv->link_info.b_higher_busyrxtraffic = b_higher_busyrxtraffic;
+
+ /* <3> DM */
+ rtlpriv->cfg->ops->dm_watchdog(hw);
+
+ /* <4> roaming */
+ if (mac->link_state == MAC80211_LINKED &&
+ mac->opmode == NL80211_IFTYPE_STATION) {
+ if ((rtlpriv->link_info.bcn_rx_inperiod +
+ rtlpriv->link_info.num_rx_inperiod) == 0) {
+ rtlpriv->link_info.roam_times++;
+ RT_TRACE(COMP_ERR, DBG_DMESG, ("AP off for %d s\n",
+ (rtlpriv->link_info.roam_times * 2)));
+
+ /* if we can't recv beacon for 10s,
+ * we should reconnect this AP */
+ if (rtlpriv->link_info.roam_times >= 5) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("AP off, try to reconnect now\n"));
+ rtlpriv->link_info.roam_times = 0;
+ ieee80211_connection_loss(rtlpriv->mac80211.vif);
+ }
+ } else {
+ rtlpriv->link_info.roam_times = 0;
+ }
+ }
+ rtlpriv->link_info.bcn_rx_inperiod = 0;
+}
+
+void rtl_watch_dog_timer_callback(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.watchdog_wq, 0);
+
+ mod_timer(&rtlpriv->works.watchdog_timer,
+ jiffies + MSECS(RTL_WATCH_DOG_TIME));
+}
+void rtl_fwevt_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks =
+ container_of_dwork_rtl(data, struct rtl_works, fwevt_wq);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->cfg->ops->c2h_command_handle(hw);
+}
+void rtl_easy_concurrent_retrytimer_callback(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *buddy_priv = rtlpriv->buddy_priv;
+
+ if (buddy_priv == NULL)
+ return;
+
+ rtlpriv->cfg->ops->dualmac_easy_concurrent(hw);
+}
+/*********************************************************
+ *
+ * frame process functions
+ *
+ *********************************************************/
+u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie)
+{
+ struct ieee80211_mgmt *mgmt = (void *)data;
+ u8 *pos, *end;
+
+ pos = (u8 *)mgmt->u.beacon.variable;
+ end = data + len;
+ while (pos < end) {
+ if (pos + 2 + pos[1] > end)
+ return NULL;
+
+ if (pos[0] == ie)
+ return pos;
+
+ pos += 2 + pos[1];
+ }
+ return NULL;
+}
+
+/* when we use 2 rx ants we send IEEE80211_SMPS_OFF */
+/* when we use 1 rx ant we send IEEE80211_SMPS_STATIC */
+struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
+ enum ieee80211_smps_mode smps,
+ u8 *da, u8 *bssid)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct sk_buff *skb;
+ struct ieee80211_mgmt_compat *action_frame;
+
+ /* 27 = header + category + action + smps mode */
+ skb = dev_alloc_skb(27 + hw->extra_tx_headroom);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, hw->extra_tx_headroom);
+ action_frame = (void *)skb_put(skb, 27);
+ memset(action_frame, 0, 27);
+ memcpy(action_frame->da, da, ETH_ALEN);
+ memcpy(action_frame->sa, rtlefuse->dev_addr, ETH_ALEN);
+ memcpy(action_frame->bssid, bssid, ETH_ALEN);
+ action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ action_frame->u.action.category = WLAN_CATEGORY_HT;
+ action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
+ switch (smps) {
+ case IEEE80211_SMPS_AUTOMATIC:/* 0 */
+ case IEEE80211_SMPS_NUM_MODES:/* 4 */
+ WARN_ON(1);
+ case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
+ break;
+ case IEEE80211_SMPS_STATIC:/* 2 */ /*MIMO_PS_STATIC*/
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_STATIC;/* 1 */
+ break;
+ case IEEE80211_SMPS_DYNAMIC:/* 3 */ /*MIMO_PS_DYNAMIC*/
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_DYNAMIC;/* 3 */
+ break;
+ }
+
+ return skb;
+}
+
+int rtl_send_smps_action(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ enum ieee80211_smps_mode smps)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+ struct rtl_tcb_desc tcb_desc;
+ u8 bssid[ETH_ALEN] = {0};
+
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+ if (rtlpriv->mac80211.act_scanning)
+ goto err_free;
+
+ if (!sta)
+ goto err_free;
+
+ if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
+ goto err_free;
+
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ goto err_free;
+
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP)
+ memcpy(bssid, rtlpriv->efuse.dev_addr, ETH_ALEN);
+ else
+ memcpy(bssid, rtlpriv->mac80211.bssid, ETH_ALEN);
+
+ skb = rtl_make_smps_action(hw, smps, sta->addr, bssid);
+ /* this is a type = mgmt * stype = action frame */
+ if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtl_sta_info *sta_entry =
+ (struct rtl_sta_info *) sta->drv_priv;
+ sta_entry->mimo_ps = smps;
+ /* rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); */
+
+ info->control.rates[0].idx = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+ info->band = hw->conf.chandef.chan->band;
+#else
+ info->band = hw->conf.channel->band;
+#endif
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+ info->control.sta = sta;
+ rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+ rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ }
+ return 1;
+
+err_free:
+ return 0;
+}
+/* EXPORT_SYMBOL(rtl_send_smps_action); */
+
+/* because mac80211 have issues when can receive del ba
+ * so here we just make a fake del_ba if we receive a ba_req
+ * but rx_agg was opened to let mac80211 release some ba
+ * related resources, so please this del_ba for tx */
+struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
+ u8 *sa, u8 *bssid, u16 tid)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct sk_buff *skb;
+ struct ieee80211_mgmt *action_frame;
+ u16 params;
+
+ /* 27 = header + category + action + smps mode */
+ skb = dev_alloc_skb(34 + hw->extra_tx_headroom);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, hw->extra_tx_headroom);
+ action_frame = (void *)skb_put(skb, 34);
+ memset(action_frame, 0, 34);
+ memcpy(action_frame->sa, sa, ETH_ALEN);
+ memcpy(action_frame->da, rtlefuse->dev_addr, ETH_ALEN);
+ memcpy(action_frame->bssid, bssid, ETH_ALEN);
+ action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ action_frame->u.action.category = WLAN_CATEGORY_BACK;
+ action_frame->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
+ params = (u16)(1 << 11); /* bit 11 initiator */
+ params |= (u16)(tid << 12); /* bit 15:12 TID number */
+
+ action_frame->u.action.u.delba.params = cpu_to_le16(params);
+ action_frame->u.action.u.delba.reason_code =
+ cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
+
+ return skb;
+}
+
+/*********************************************************
+ *
+ * IOT functions
+ *
+ *********************************************************/
+static bool rtl_chk_vendor_ouisub(struct ieee80211_hw *hw,
+ struct octet_string vendor_ie)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool matched = false;
+ static u8 athcap_1[] = { 0x00, 0x03, 0x7F };
+ static u8 athcap_2[] = { 0x00, 0x13, 0x74 };
+ static u8 broadcap_1[] = { 0x00, 0x10, 0x18 };
+ static u8 broadcap_2[] = { 0x00, 0x0a, 0xf7 };
+ static u8 broadcap_3[] = { 0x00, 0x05, 0xb5 };
+ static u8 racap[] = { 0x00, 0x0c, 0x43 };
+ static u8 ciscocap[] = { 0x00, 0x40, 0x96 };
+ static u8 marvcap[] = { 0x00, 0x50, 0x43 };
+
+ if (memcmp(vendor_ie.octet, athcap_1, 3) == 0 ||
+ memcmp(vendor_ie.octet, athcap_2, 3) == 0) {
+ rtlpriv->mac80211.vendor = PEER_ATH;
+ matched = true;
+ } else if (memcmp(vendor_ie.octet, broadcap_1, 3) == 0 ||
+ memcmp(vendor_ie.octet, broadcap_2, 3) == 0 ||
+ memcmp(vendor_ie.octet, broadcap_3, 3) == 0) {
+ rtlpriv->mac80211.vendor = PEER_BROAD;
+ matched = true;
+ } else if (memcmp(vendor_ie.octet, racap, 3) == 0) {
+ rtlpriv->mac80211.vendor = PEER_RAL;
+ matched = true;
+ } else if (memcmp(vendor_ie.octet, ciscocap, 3) == 0) {
+ rtlpriv->mac80211.vendor = PEER_CISCO;
+ matched = true;
+ } else if (memcmp(vendor_ie.octet, marvcap, 3) == 0) {
+ rtlpriv->mac80211.vendor = PEER_MARV;
+ matched = true;
+ }
+
+ return matched;
+}
+
+bool rtl_find_221_ie(struct ieee80211_hw *hw, u8 *data,
+ unsigned int len)
+{
+ struct ieee80211_mgmt *mgmt = (void *)data;
+ struct octet_string vendor_ie;
+ u8 *pos, *end;
+
+ pos = (u8 *)mgmt->u.beacon.variable;
+ end = data + len;
+ while (pos < end) {
+ if (pos[0] == 221) {
+ vendor_ie.length = pos[1];
+ vendor_ie.octet = &pos[2];
+ if (rtl_chk_vendor_ouisub(hw, vendor_ie))
+ return true;
+ }
+
+ if (pos + 2 + pos[1] > end)
+ return false;
+
+ pos += 2 + pos[1];
+ }
+ return false;
+}
+
+void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = (void *)data;
+ u32 vendor = PEER_UNKNOWN;
+
+ static u8 ap3_1[3] = { 0x00, 0x14, 0xbf };
+ static u8 ap3_2[3] = { 0x00, 0x1a, 0x70 };
+ static u8 ap3_3[3] = { 0x00, 0x1d, 0x7e };
+ static u8 ap4_1[3] = { 0x00, 0x90, 0xcc };
+ static u8 ap4_2[3] = { 0x00, 0x0e, 0x2e };
+ static u8 ap4_3[3] = { 0x00, 0x18, 0x02 };
+ static u8 ap4_4[3] = { 0x00, 0x17, 0x3f };
+ static u8 ap4_5[3] = { 0x00, 0x1c, 0xdf };
+ static u8 ap5_1[3] = { 0x00, 0x1c, 0xf0 };
+ static u8 ap5_2[3] = { 0x00, 0x21, 0x91 };
+ static u8 ap5_3[3] = { 0x00, 0x24, 0x01 };
+ static u8 ap5_4[3] = { 0x00, 0x15, 0xe9 };
+ static u8 ap5_5[3] = { 0x00, 0x17, 0x9A };
+ static u8 ap5_6[3] = { 0x00, 0x18, 0xE7 };
+ static u8 ap6_1[3] = { 0x00, 0x17, 0x94 };
+ static u8 ap7_1[3] = { 0x00, 0x14, 0xa4 };
+
+ if (mac->opmode != NL80211_IFTYPE_STATION)
+ return;
+
+ if (mac->link_state == MAC80211_NOLINK) {
+ mac->vendor = PEER_UNKNOWN;
+ return;
+ }
+
+ if (mac->cnt_after_linked > 2)
+ return;
+
+ /* check if this really is a beacon */
+ if (!ieee80211_is_beacon(hdr->frame_control))
+ return;
+
+ /* min. beacon length + FCS_LEN */
+ if (len <= 40 + FCS_LEN)
+ return;
+
+ /* and only beacons from the associated BSSID, please */
+ if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ return;
+
+ if (rtl_find_221_ie(hw, data, len))
+ vendor = mac->vendor;
+
+ if ((memcmp(mac->bssid, ap5_1, 3) == 0) ||
+ (memcmp(mac->bssid, ap5_2, 3) == 0) ||
+ (memcmp(mac->bssid, ap5_3, 3) == 0) ||
+ (memcmp(mac->bssid, ap5_4, 3) == 0) ||
+ (memcmp(mac->bssid, ap5_5, 3) == 0) ||
+ (memcmp(mac->bssid, ap5_6, 3) == 0) ||
+ vendor == PEER_ATH) {
+ vendor = PEER_ATH;
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>ath find\n"));
+ } else if ((memcmp(mac->bssid, ap4_4, 3) == 0) ||
+ (memcmp(mac->bssid, ap4_5, 3) == 0) ||
+ (memcmp(mac->bssid, ap4_1, 3) == 0) ||
+ (memcmp(mac->bssid, ap4_2, 3) == 0) ||
+ (memcmp(mac->bssid, ap4_3, 3) == 0) ||
+ vendor == PEER_RAL) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>ral find\n"));
+ vendor = PEER_RAL;
+ } else if (memcmp(mac->bssid, ap6_1, 3) == 0 ||
+ vendor == PEER_CISCO) {
+ vendor = PEER_CISCO;
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>cisco find\n"));
+ } else if ((memcmp(mac->bssid, ap3_1, 3) == 0) ||
+ (memcmp(mac->bssid, ap3_2, 3) == 0) ||
+ (memcmp(mac->bssid, ap3_3, 3) == 0) ||
+ vendor == PEER_BROAD) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>broad find\n"));
+ vendor = PEER_BROAD;
+ } else if (memcmp(mac->bssid, ap7_1, 3) == 0 ||
+ vendor == PEER_MARV) {
+ vendor = PEER_MARV;
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>marv find\n"));
+ }
+
+ mac->vendor = vendor;
+}
+
+/*********************************************************
+ *
+ * sysfs functions
+ *
+ *********************************************************/
+static ssize_t rtl_show_debug_level(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(d);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return sprintf(buf, "0x%08X\n", rtlpriv->dbg.global_debuglevel);
+}
+
+static ssize_t rtl_store_debug_level(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(d);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret) {
+ printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf);
+ } else {
+ rtlpriv->dbg.global_debuglevel = val;
+ printk(KERN_DEBUG "debuglevel:%x\n",
+ rtlpriv->dbg.global_debuglevel);
+ }
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
+ rtl_show_debug_level, rtl_store_debug_level);
+
+static struct attribute *rtl_sysfs_entries[] = {
+
+ &dev_attr_debug_level.attr,
+
+ NULL
+};
+
+/*
+ * "name" is folder name witch will be
+ * put in device directory like :
+ * sys/devices/pci0000:00/0000:00:1c.4/
+ * 0000:06:00.0/rtl_sysfs
+ */
+struct attribute_group rtl_attribute_group = {
+ .name = "rtlsysfs",
+ .attrs = rtl_sysfs_entries,
+};
+
+#ifdef VIF_TODO
+/*********************************************************
+ *
+ * vif functions
+ *
+ *********************************************************/
+static inline struct ieee80211_vif *
+rtl_get_vif(struct rtl_vif_info *vif_priv)
+{
+ return container_of((void *)vif_priv, struct ieee80211_vif, drv_priv);
+}
+
+/* Protected by ar->mutex or RCU */
+struct ieee80211_vif *rtl_get_main_vif(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_vif_info *cvif;
+
+ list_for_each_entry_rcu(cvif, &rtlpriv->vif_priv.vif_list, list) {
+ if (cvif->active)
+ return rtl_get_vif(cvif);
+ }
+
+ return NULL;
+}
+
+static inline bool is_main_vif(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = (rtl_get_main_vif(hw) == vif);
+ rcu_read_unlock();
+ return ret;
+}
+
+bool rtl_set_vif_info(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct rtl_vif_info *vif_info = (void *) vif->drv_priv;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int vif_id = -1;
+
+ if (rtlpriv->vif_priv.vifs >= MAX_VIRTUAL_MAC) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("vif number can not bigger than %d, now vifs is:%d\n",
+ MAX_VIRTUAL_MAC, rtlpriv->vif_priv.vifs));
+ return false;
+ }
+
+ rcu_read_lock();
+ vif_id = bitmap_find_free_region(&rtlpriv->vif_priv.vif_bitmap,
+ MAX_VIRTUAL_MAC, 0);
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("%s vid_id:%d\n", __func__, vif_id));
+
+ if (vif_id < 0) {
+ rcu_read_unlock();
+ return false;
+ }
+
+ BUG_ON(rtlpriv->vif_priv.vif[vif_id].id != vif_id);
+ vif_info->active = true;
+ vif_info->id = vif_id;
+ vif_info->enable_beacon = false;
+ rtlpriv->vif_priv.vifs++;
+ if (rtlpriv->vif_priv.vifs > 1) {
+ rtlpriv->psc.b_inactiveps = false;
+ rtlpriv->psc.b_swctrl_lps = false;
+ rtlpriv->psc.b_fwctrl_lps = false;
+ }
+
+ list_add_tail_rcu(&vif_info->list, &rtlpriv->vif_priv.vif_list);
+ rcu_assign_pointer(rtlpriv->vif_priv.vif[vif_id].vif, vif);
+
+ RT_TRACE(COMP_MAC80211, DBG_DMESG, ("vifaddress:%p %p %p\n",
+ rtlpriv->vif_priv.vif[vif_id].vif, vif, rtl_get_main_vif(hw)));
+
+ rcu_read_unlock();
+
+ return true;
+}
+#endif
+
+
+#if 0
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+#endif
+struct rtl_global_var global_var = {};
+
+int rtl_core_module_init(void)
+{
+ if (rtl_rate_control_register())
+ printk(KERN_DEBUG "rtl: Unable to register rtl_rc, use default RC !!\n");
+
+ /* add proc for debug */
+ rtl_proc_add_topdir();
+
+ /* init some global vars */
+ INIT_LIST_HEAD(&global_var.glb_priv_list);
+ spin_lock_init(&global_var.glb_list_lock);
+
+ return 0;
+}
+
+void rtl_core_module_exit(void)
+{
+ /*RC*/
+ rtl_rate_control_unregister();
+
+ /* add proc for debug */
+ rtl_proc_remove_topdir();
+}
+
+#if 0
+module_init(rtl_core_module_init);
+module_exit(rtl_core_module_exit);
+#endif
diff --git a/drivers/staging/rtl8821ae/base.h b/drivers/staging/rtl8821ae/base.h
new file mode 100644
index 000000000000..629d14f42f0b
--- /dev/null
+++ b/drivers/staging/rtl8821ae/base.h
@@ -0,0 +1,159 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_BASE_H__
+#define __RTL_BASE_H__
+
+#include "compat.h"
+
+enum ap_peer {
+ PEER_UNKNOWN = 0,
+ PEER_RTL = 1,
+ PEER_RTL_92SE = 2,
+ PEER_BROAD = 3,
+ PEER_RAL = 4,
+ PEER_ATH = 5,
+ PEER_CISCO = 6,
+ PEER_MARV = 7,
+ PEER_AIRGO = 9,
+ PEER_MAX = 10,
+} ;
+
+#define RTL_DUMMY_OFFSET 0
+#define RTL_DUMMY_UNIT 8
+#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
+#define RTL_TX_DESC_SIZE 32
+#define RTL_TX_HEADER_SIZE (RTL_TX_DESC_SIZE + RTL_TX_DUMMY_SIZE)
+
+#define HT_AMSDU_SIZE_4K 3839
+#define HT_AMSDU_SIZE_8K 7935
+
+#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
+#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */
+
+#define RTL_RATE_COUNT_LEGACY 12
+#define RTL_CHANNEL_COUNT 14
+
+#define FRAME_OFFSET_FRAME_CONTROL 0
+#define FRAME_OFFSET_DURATION 2
+#define FRAME_OFFSET_ADDRESS1 4
+#define FRAME_OFFSET_ADDRESS2 10
+#define FRAME_OFFSET_ADDRESS3 16
+#define FRAME_OFFSET_SEQUENCE 22
+#define FRAME_OFFSET_ADDRESS4 24
+
+#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \
+ WRITEEF2BYTE(_hdr, _val)
+#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \
+ WRITEEF1BYTE(_hdr, _val)
+#define SET_80211_HDR_PWR_MGNT(_hdr, _val) \
+ SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
+#define SET_80211_HDR_TO_DS(_hdr, _val) \
+ SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
+
+#define SET_80211_PS_POLL_AID(_hdr, _val) \
+ WRITEEF2BYTE(((u8*)(_hdr))+2, _val)
+#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
+ CP_MACADDR(((u8*)(_hdr))+4, (u8*)(_val))
+#define SET_80211_PS_POLL_TA(_hdr, _val) \
+ CP_MACADDR(((u8*)(_hdr))+10, (u8*)(_val))
+
+#define SET_80211_HDR_DURATION(_hdr, _val) \
+ WRITEEF2BYTE((u8*)(_hdr)+FRAME_OFFSET_DURATION, _val)
+#define SET_80211_HDR_ADDRESS1(_hdr, _val) \
+ CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val))
+#define SET_80211_HDR_ADDRESS2(_hdr, _val) \
+ CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS2, (u8*)(_val))
+#define SET_80211_HDR_ADDRESS3(_hdr, _val) \
+ CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8*)(_val))
+#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \
+ WRITEEF2BYTE((u8*)(_hdr)+FRAME_OFFSET_SEQUENCE, _val)
+
+#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) \
+ WRITEEF4BYTE(((u8*)(__phdr)) + 24, __val)
+#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \
+ WRITEEF4BYTE(((u8*)(__phdr)) + 28, __val)
+#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \
+ WRITEEF2BYTE(((u8*)(__phdr)) + 32, __val)
+#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) \
+ READEF2BYTE(((u8*)(__phdr)) + 34)
+#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
+ WRITEEF2BYTE(((u8*)(__phdr)) + 34, __val)
+#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
+ SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
+ (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
+
+int rtl_init_core(struct ieee80211_hw *hw);
+void rtl_deinit_core(struct ieee80211_hw *hw);
+void rtl_init_rx_config(struct ieee80211_hw *hw);
+void rtl_init_rfkill(struct ieee80211_hw *hw);
+void rtl_deinit_rfkill(struct ieee80211_hw *hw);
+
+void rtl_watch_dog_timer_callback(unsigned long data);
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
+
+bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
+bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
+
+void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rtl_watch_dog_timer_callback(unsigned long data);
+int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn);
+int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int rtl_tx_agg_oper(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid);
+int rtl_rx_agg_start(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid);
+int rtl_rx_agg_stop(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tid);
+void rtl_watchdog_wq_callback(void *data);
+void rtl_fwevt_wq_callback(void *data);
+
+void rtl_get_tcb_desc(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc);
+
+int rtl_send_smps_action(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ enum ieee80211_smps_mode smps);
+u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
+void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
+u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid);
+extern struct attribute_group rtl_attribute_group;
+void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
+extern struct rtl_global_var global_var;
+
+#ifdef VIF_TODO
+struct ieee80211_vif *rtl_get_main_vif(struct ieee80211_hw *hw);
+bool rtl_set_vif_info(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+#endif
+#endif
diff --git a/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c
new file mode 100644
index 000000000000..5a54bb10698c
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c
@@ -0,0 +1,3976 @@
+//============================================================
+// Description:
+//
+// This file is for 8812a1ant Co-exist mechanism
+//
+// History
+// 2012/11/15 Cosa first check in.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "halbt_precomp.h"
+#if 1
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8812A_1ANT GLCoexDm8812a1Ant;
+static PCOEX_DM_8812A_1ANT coex_dm=&GLCoexDm8812a1Ant;
+static COEX_STA_8812A_1ANT GLCoexSta8812a1Ant;
+static PCOEX_STA_8812A_1ANT coex_sta=&GLCoexSta8812a1Ant;
+
+const char *const GLBtInfoSrc8812a1Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8812a1ant_
+//============================================================
+#if 0
+void
+halbtc8812a1ant_Reg0x550Bit3(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN bSet
+ )
+{
+ u1Byte u1tmp=0;
+
+ u1tmp = btcoexist->btc_read_1byte(btcoexist, 0x550);
+ if(bSet)
+ {
+ u1tmp |= BIT3;
+ }
+ else
+ {
+ u1tmp &= ~BIT3;
+ }
+ btcoexist->btc_write_1byte(btcoexist, 0x550, u1tmp);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], set 0x550[3]=%d\n", (bSet? 1:0)));
+}
+#endif
+u1Byte
+halbtc8812a1ant_BtRssiState(
+ u1Byte level_num,
+ u1Byte rssi_thresh,
+ u1Byte rssi_thresh1
+ )
+{
+ s4Byte bt_rssi=0;
+ u1Byte bt_rssi_state;
+
+ bt_rssi = coex_sta->bt_rssi;
+
+ if(level_num == 2)
+ {
+ if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(bt_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else
+ {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(bt_rssi < rssi_thresh)
+ {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+ else if(level_num == 3)
+ {
+ if(rssi_thresh > rssi_thresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n"));
+ return coex_sta->pre_bt_rssi_state;
+ }
+
+ if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(bt_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(bt_rssi >= (rssi_thresh1+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else if(bt_rssi < rssi_thresh)
+ {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(bt_rssi < rssi_thresh1)
+ {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+
+ coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+ return bt_rssi_state;
+}
+
+u1Byte
+halbtc8812a1ant_WifiRssiState(
+ PBTC_COEXIST btcoexist,
+ u1Byte index,
+ u1Byte level_num,
+ u1Byte rssi_thresh,
+ u1Byte rssi_thresh1
+ )
+{
+ s4Byte wifi_rssi=0;
+ u1Byte wifi_rssi_state;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+
+ if(level_num == 2)
+ {
+ if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(wifi_rssi < rssi_thresh)
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+ else if(level_num == 3)
+ {
+ if(rssi_thresh > rssi_thresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n"));
+ return coex_sta->pre_wifi_rssi_state[index];
+ }
+
+ if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(wifi_rssi >= (rssi_thresh1+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT))
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else if(wifi_rssi < rssi_thresh)
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(wifi_rssi < rssi_thresh1)
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+
+ coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+ return wifi_rssi_state;
+}
+
+void
+halbtc8812a1ant_MonitorBtEnableDisable(
+ PBTC_COEXIST btcoexist
+ )
+{
+ static BOOLEAN pre_bt_disabled=false;
+ static u4Byte bt_disable_cnt=0;
+ BOOLEAN bt_active=true, bt_disable_by68=false, bt_disabled=false;
+ u4Byte u4_tmp=0;
+
+ // This function check if bt is disabled
+
+ if( coex_sta->high_priority_tx == 0 &&
+ coex_sta->high_priority_rx == 0 &&
+ coex_sta->low_priority_tx == 0 &&
+ coex_sta->low_priority_rx == 0)
+ {
+ bt_active = false;
+ }
+ if( coex_sta->high_priority_tx == 0xffff &&
+ coex_sta->high_priority_rx == 0xffff &&
+ coex_sta->low_priority_tx == 0xffff &&
+ coex_sta->low_priority_rx == 0xffff)
+ {
+ bt_active = false;
+ }
+ if(bt_active)
+ {
+ bt_disable_cnt = 0;
+ bt_disabled = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+ }
+ else
+ {
+ bt_disable_cnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n",
+ bt_disable_cnt));
+ if(bt_disable_cnt >= 2 ||bt_disable_by68)
+ {
+ bt_disabled = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+ }
+ }
+ if(pre_bt_disabled != bt_disabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled":"enabled"),
+ (bt_disabled ? "disabled":"enabled")));
+ pre_bt_disabled = bt_disabled;
+ if(!bt_disabled)
+ {
+ }
+ else
+ {
+ }
+ }
+}
+
+void
+halbtc8812a1ant_MonitorBtCtr(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u4Byte reg_hp_tx_rx, reg_lp_tx_rx, u4_tmp;
+ u4Byte reg_hp_tx=0, reg_hp_rx=0, reg_lp_tx=0, reg_lp_rx=0;
+ u1Byte u1_tmp;
+
+ reg_hp_tx_rx = 0x770;
+ reg_lp_tx_rx = 0x774;
+
+ u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_tx_rx);
+ reg_hp_tx = u4_tmp & bMaskLWord;
+ reg_hp_rx = (u4_tmp & bMaskHWord)>>16;
+
+ u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_tx_rx);
+ reg_lp_tx = u4_tmp & bMaskLWord;
+ reg_lp_rx = (u4_tmp & bMaskHWord)>>16;
+
+ coex_sta->high_priority_tx = reg_hp_tx;
+ coex_sta->high_priority_rx = reg_hp_rx;
+ coex_sta->low_priority_tx = reg_lp_tx;
+ coex_sta->low_priority_rx = reg_lp_rx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+ reg_hp_tx_rx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+ reg_lp_tx_rx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx));
+
+ // reset counter
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void
+halbtc8812a1ant_QueryBtInfo(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte dataLen=3;
+ u1Byte buf[5] = {0};
+ static u4Byte btInfoCnt=0;
+
+ if(!btInfoCnt ||
+ (coex_sta->bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_BT_RSP]-btInfoCnt)>2)
+ {
+ buf[0] = dataLen;
+ buf[1] = 0x1; // polling enable, 1=enable, 0=disable
+ buf[2] = 0x2; // polling time in seconds
+ buf[3] = 0x1; // auto report enable, 1=enable, 0=disable
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_INFO, (PVOID)&buf[0]);
+ }
+ btInfoCnt = coex_sta->bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_BT_RSP];
+}
+u1Byte
+halbtc8812a1ant_ActionAlgorithm(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN bt_hs_on=false;
+ u1Byte algorithm=BT_8812A_1ANT_COEX_ALGO_UNDEFINED;
+ u1Byte num_of_diff_profile=0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ if(!stack_info->bt_link_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No profile exists!!!\n"));
+ return algorithm;
+ }
+
+ if(stack_info->sco_exist)
+ num_of_diff_profile++;
+ if(stack_info->hid_exist)
+ num_of_diff_profile++;
+ if(stack_info->pan_exist)
+ num_of_diff_profile++;
+ if(stack_info->a2dp_exist)
+ num_of_diff_profile++;
+
+ if(num_of_diff_profile == 1)
+ {
+ if(stack_info->sco_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ if(stack_info->hid_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID;
+ }
+ else if(stack_info->a2dp_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_A2DP;
+ }
+ else if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANHS;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ }
+ else if(num_of_diff_profile == 2)
+ {
+ if(stack_info->sco_exist)
+ {
+ if(stack_info->hid_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID;
+ }
+ else if(stack_info->a2dp_exist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+ }
+ else if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( stack_info->hid_exist &&
+ stack_info->a2dp_exist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else if( stack_info->hid_exist &&
+ stack_info->pan_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( stack_info->pan_exist &&
+ stack_info->a2dp_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ }
+ else if(num_of_diff_profile == 3)
+ {
+ if(stack_info->sco_exist)
+ {
+ if( stack_info->hid_exist &&
+ stack_info->a2dp_exist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID;
+ }
+ else if( stack_info->hid_exist &&
+ stack_info->pan_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( stack_info->pan_exist &&
+ stack_info->a2dp_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( stack_info->hid_exist &&
+ stack_info->pan_exist &&
+ stack_info->a2dp_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ }
+ else if(num_of_diff_profile >= 3)
+ {
+ if(stack_info->sco_exist)
+ {
+ if( stack_info->hid_exist &&
+ stack_info->pan_exist &&
+ stack_info->a2dp_exist )
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
+
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"));
+ algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+BOOLEAN
+halbtc8812a1ant_NeedToDecBtPwr(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BOOLEAN ret=false;
+ BOOLEAN bt_hs_on=false, wifi_connected=false;
+ s4Byte bt_hs_rssi=0;
+
+ if(!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+ return false;
+ if(!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected))
+ return false;
+ if(!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+ return false;
+
+ if(wifi_connected)
+ {
+ if(bt_hs_on)
+ {
+ if(bt_hs_rssi > 37)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for HS mode!!\n"));
+ ret = true;
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for Wifi is connected!!\n"));
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+void
+halbtc8812a1ant_SetFwDacSwingLevel(
+ PBTC_COEXIST btcoexist,
+ u1Byte dac_swing_lvl
+ )
+{
+ u1Byte h2c_parameter[1] ={0};
+
+ // There are several type of dacswing
+ // 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+ h2c_parameter[0] = dac_swing_lvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void
+halbtc8812a1ant_SetFwDecBtPwr(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN dec_bt_pwr
+ )
+{
+ u1Byte dataLen=3;
+ u1Byte buf[5] = {0};
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power : %s\n",
+ (dec_bt_pwr? "Yes!!":"No!!")));
+
+ buf[0] = dataLen;
+ buf[1] = 0x3; // OP_Code
+ buf[2] = 0x1; // OP_Code_Length
+ if(dec_bt_pwr)
+ buf[3] = 0x1; // OP_Code_Content
+ else
+ buf[3] = 0x0;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);
+}
+
+void
+halbtc8812a1ant_DecBtPwr(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN dec_bt_pwr
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power = %s\n",
+ (force_exec? "force to":""), ((dec_bt_pwr)? "ON":"OFF")));
+ coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_dec_bt_pwr=%d, cur_dec_bt_pwr=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr));
+
+ if(coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+ return;
+ }
+ halbtc8812a1ant_SetFwDecBtPwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+ coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void
+halbtc8812a1ant_SetFwBtLnaConstrain(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN bt_lna_cons_on
+ )
+{
+ u1Byte dataLen=3;
+ u1Byte buf[5] = {0};
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT LNA Constrain: %s\n",
+ (bt_lna_cons_on? "ON!!":"OFF!!")));
+
+ buf[0] = dataLen;
+ buf[1] = 0x2; // OP_Code
+ buf[2] = 0x1; // OP_Code_Length
+ if(bt_lna_cons_on)
+ buf[3] = 0x1; // OP_Code_Content
+ else
+ buf[3] = 0x0;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);
+}
+
+void
+halbtc8812a1ant_SetBtLnaConstrain(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN bt_lna_cons_on
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Constrain = %s\n",
+ (force_exec? "force":""), ((bt_lna_cons_on)? "ON":"OFF")));
+ coex_dm->bCurBtLnaConstrain = bt_lna_cons_on;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtLnaConstrain=%d, bCurBtLnaConstrain=%d\n",
+ coex_dm->bPreBtLnaConstrain, coex_dm->bCurBtLnaConstrain));
+
+ if(coex_dm->bPreBtLnaConstrain == coex_dm->bCurBtLnaConstrain)
+ return;
+ }
+ halbtc8812a1ant_SetFwBtLnaConstrain(btcoexist, coex_dm->bCurBtLnaConstrain);
+
+ coex_dm->bPreBtLnaConstrain = coex_dm->bCurBtLnaConstrain;
+}
+
+void
+halbtc8812a1ant_SetFwBtPsdMode(
+ PBTC_COEXIST btcoexist,
+ u1Byte bt_psd_mode
+ )
+{
+ u1Byte dataLen=3;
+ u1Byte buf[5] = {0};
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT PSD mode=0x%x\n",
+ bt_psd_mode));
+
+ buf[0] = dataLen;
+ buf[1] = 0x4; // OP_Code
+ buf[2] = 0x1; // OP_Code_Length
+ buf[3] = bt_psd_mode; // OP_Code_Content
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);
+}
+
+
+void
+halbtc8812a1ant_SetBtPsdMode(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ u1Byte bt_psd_mode
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT PSD mode = 0x%x\n",
+ (force_exec? "force":""), bt_psd_mode));
+ coex_dm->bCurBtPsdMode = bt_psd_mode;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtPsdMode=0x%x, bCurBtPsdMode=0x%x\n",
+ coex_dm->bPreBtPsdMode, coex_dm->bCurBtPsdMode));
+
+ if(coex_dm->bPreBtPsdMode == coex_dm->bCurBtPsdMode)
+ return;
+ }
+ halbtc8812a1ant_SetFwBtPsdMode(btcoexist, coex_dm->bCurBtPsdMode);
+
+ coex_dm->bPreBtPsdMode = coex_dm->bCurBtPsdMode;
+}
+
+
+void
+halbtc8812a1ant_SetBtAutoReport(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN enable_auto_report
+ )
+{
+#if 0
+ u1Byte h2c_parameter[1] ={0};
+
+ h2c_parameter[0] = 0;
+
+ if(enable_auto_report)
+ {
+ h2c_parameter[0] |= BIT0;
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+ (enable_auto_report? "Enabled!!":"Disabled!!"), h2c_parameter[0]));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+#else
+
+#endif
+}
+
+void
+halbtc8812a1ant_BtAutoReport(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN enable_auto_report
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Auto report = %s\n",
+ (force_exec? "force to":""), ((enable_auto_report)? "Enabled":"Disabled")));
+ coex_dm->cur_bt_auto_report = enable_auto_report;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_bt_auto_report=%d, cur_bt_auto_report=%d\n",
+ coex_dm->pre_bt_auto_report, coex_dm->cur_bt_auto_report));
+
+ if(coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+ return;
+ }
+ halbtc8812a1ant_SetBtAutoReport(btcoexist, coex_dm->cur_bt_auto_report);
+
+ coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void
+halbtc8812a1ant_FwDacSwingLvl(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ u1Byte fw_dac_swing_lvl
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec? "force to":""), fw_dac_swing_lvl));
+ coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_fw_dac_swing_lvl=%d, cur_fw_dac_swing_lvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl, coex_dm->cur_fw_dac_swing_lvl));
+
+ if(coex_dm->pre_fw_dac_swing_lvl == coex_dm->cur_fw_dac_swing_lvl)
+ return;
+ }
+
+ halbtc8812a1ant_SetFwDacSwingLevel(btcoexist, coex_dm->cur_fw_dac_swing_lvl);
+
+ coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void
+halbtc8812a1ant_SetSwRfRxLpfCorner(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN rx_rf_shrink_on
+ )
+{
+ if(rx_rf_shrink_on)
+ {
+ //Shrink RF Rx LPF corner
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+ }
+ else
+ {
+ //Resume RF Rx LPF corner
+ // After initialized, we can use coex_dm->bt_rf0x1e_backup
+ if(btcoexist->bInitilized)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, coex_dm->bt_rf0x1e_backup);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_RfShrink(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN rx_rf_shrink_on
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec? "force to":""), ((rx_rf_shrink_on)? "ON":"OFF")));
+ coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_rf_rx_lpf_shrink=%d, cur_rf_rx_lpf_shrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink, coex_dm->cur_rf_rx_lpf_shrink));
+
+ if(coex_dm->pre_rf_rx_lpf_shrink == coex_dm->cur_rf_rx_lpf_shrink)
+ return;
+ }
+ halbtc8812a1ant_SetSwRfRxLpfCorner(btcoexist, coex_dm->cur_rf_rx_lpf_shrink);
+
+ coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void
+halbtc8812a1ant_SetSwPenaltyTxRateAdaptive(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN low_penalty_ra
+ )
+{
+ u1Byte u1_tmp;
+
+ u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x4fd);
+ u1_tmp |= BIT0;
+ if(low_penalty_ra)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+ u1_tmp &= ~BIT2;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+ u1_tmp |= BIT2;
+ }
+
+ btcoexist->btc_write_1byte(btcoexist, 0x4fd, u1_tmp);
+}
+
+void
+halbtc8812a1ant_LowPenaltyRa(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN low_penalty_ra
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec? "force to":""), ((low_penalty_ra)? "ON":"OFF")));
+ coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_low_penalty_ra=%d, cur_low_penalty_ra=%d\n",
+ coex_dm->pre_low_penalty_ra, coex_dm->cur_low_penalty_ra));
+
+ if(coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
+ return;
+ }
+ halbtc8812a1ant_SetSwPenaltyTxRateAdaptive(btcoexist, coex_dm->cur_low_penalty_ra);
+
+ coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void
+halbtc8812a1ant_SetDacSwingReg(
+ PBTC_COEXIST btcoexist,
+ u4Byte level
+ )
+{
+ u1Byte val=(u1Byte)level;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Write SwDacSwing = 0x%x\n", level));
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
+}
+
+void
+halbtc8812a1ant_SetSwFullTimeDacSwing(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN sw_dac_swing_on,
+ u4Byte sw_dac_swing_lvl
+ )
+{
+ if(sw_dac_swing_on)
+ {
+ halbtc8812a1ant_SetDacSwingReg(btcoexist, sw_dac_swing_lvl);
+ }
+ else
+ {
+ halbtc8812a1ant_SetDacSwingReg(btcoexist, 0x18);
+ }
+}
+
+
+void
+halbtc8812a1ant_DacSwing(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN dac_swing_on,
+ u4Byte dac_swing_lvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec? "force to":""), ((dac_swing_on)? "ON":"OFF"), dac_swing_lvl));
+ coex_dm->cur_dac_swing_on = dac_swing_on;
+ coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_dac_swing_on=%d, pre_dac_swing_lvl=0x%x, cur_dac_swing_on=%d, cur_dac_swing_lvl=0x%x\n",
+ coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl));
+
+ if( (coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+ (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl) )
+ return;
+ }
+ delay_ms(30);
+ halbtc8812a1ant_SetSwFullTimeDacSwing(btcoexist, dac_swing_on, dac_swing_lvl);
+
+ coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+ coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void
+halbtc8812a1ant_SetAdcBackOff(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN adc_back_off
+ )
+{
+ if(adc_back_off)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n"));
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n"));
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
+ }
+}
+
+void
+halbtc8812a1ant_AdcBackOff(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN adc_back_off
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n",
+ (force_exec? "force to":""), ((adc_back_off)? "ON":"OFF")));
+ coex_dm->cur_adc_back_off = adc_back_off;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_adc_back_off=%d, cur_adc_back_off=%d\n",
+ coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off));
+
+ if(coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
+ return;
+ }
+ halbtc8812a1ant_SetAdcBackOff(btcoexist, coex_dm->cur_adc_back_off);
+
+ coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off;
+}
+
+void
+halbtc8812a1ant_SetAgcTable(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN agc_table_en
+ )
+{
+ u1Byte rssi_adjust_val=0;
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+ if(agc_table_en)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x3fa58);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x37a58);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x2fa58);
+ rssi_adjust_val = 8;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x39258);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x31258);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x29258);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+ // set rssi_adjust_val for wifi module.
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssi_adjust_val);
+}
+
+
+void
+halbtc8812a1ant_AgcTable(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN agc_table_en
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n",
+ (force_exec? "force to":""), ((agc_table_en)? "Enable":"Disable")));
+ coex_dm->cur_agc_table_en = agc_table_en;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_agc_table_en=%d, cur_agc_table_en=%d\n",
+ coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en));
+
+ if(coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+ return;
+ }
+ halbtc8812a1ant_SetAgcTable(btcoexist, agc_table_en);
+
+ coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void
+halbtc8812a1ant_SetCoexTable(
+ PBTC_COEXIST btcoexist,
+ u4Byte val0x6c0,
+ u4Byte val0x6c4,
+ u4Byte val0x6c8,
+ u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4));
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void
+halbtc8812a1ant_CoexTable(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ u4Byte val0x6c0,
+ u4Byte val0x6c4,
+ u4Byte val0x6c8,
+ u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (force_exec? "force to":""), val0x6c0, val0x6c4, val0x6c8, val0x6cc));
+ coex_dm->cur_val0x6c0 = val0x6c0;
+ coex_dm->cur_val0x6c4 = val0x6c4;
+ coex_dm->cur_val0x6c8 = val0x6c8;
+ coex_dm->cur_val0x6cc = val0x6cc;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_val0x6c0=0x%x, pre_val0x6c4=0x%x, pre_val0x6c8=0x%x, pre_val0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4, coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], cur_val0x6c0=0x%x, cur_val0x6c4=0x%x, cur_val0x6c8=0x%x, cur_val0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4, coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc));
+
+ if( (coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+ (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+ (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+ (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc) )
+ return;
+ }
+ halbtc8812a1ant_SetCoexTable(btcoexist, val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+
+ coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+ coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+ coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+ coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void
+halbtc8812a1ant_SetFwIgnoreWlanAct(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN enable
+ )
+{
+ u1Byte dataLen=3;
+ u1Byte buf[5] = {0};
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], %s BT Ignore Wlan_Act\n",
+ (enable? "Enable":"Disable")));
+
+ buf[0] = dataLen;
+ buf[1] = 0x1; // OP_Code
+ buf[2] = 0x1; // OP_Code_Length
+ if(enable)
+ buf[3] = 0x1; // OP_Code_Content
+ else
+ buf[3] = 0x0;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);
+}
+
+void
+halbtc8812a1ant_IgnoreWlanAct(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN enable
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec? "force to":""), (enable? "ON":"OFF")));
+ coex_dm->cur_ignore_wlan_act = enable;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act, coex_dm->cur_ignore_wlan_act));
+
+ if(coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act)
+ return;
+ }
+ halbtc8812a1ant_SetFwIgnoreWlanAct(btcoexist, enable);
+
+ coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void
+halbtc8812a1ant_SetFwPstdma(
+ PBTC_COEXIST btcoexist,
+ u1Byte byte1,
+ u1Byte byte2,
+ u1Byte byte3,
+ u1Byte byte4,
+ u1Byte byte5
+ )
+{
+ u1Byte h2c_parameter[5] ={0};
+
+ h2c_parameter[0] = byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = byte5;
+
+ coex_dm->ps_tdma_para[0] = byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1]<<24|h2c_parameter[2]<<16|h2c_parameter[3]<<8|h2c_parameter[4]));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void
+halbtc8812a1ant_SetLpsRpwm(
+ PBTC_COEXIST btcoexist,
+ u1Byte lps_val,
+ u1Byte rpwm_val
+ )
+{
+ u1Byte lps=lps_val;
+ u1Byte rpwm=rpwm_val;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_LPS, &lps);
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_RPWM, &rpwm);
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT, NULL);
+}
+
+void
+halbtc8812a1ant_LpsRpwm(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ u1Byte lps_val,
+ u1Byte rpwm_val
+ )
+{
+ BOOLEAN bForceExecPwrCmd=false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set lps/rpwm=0x%x/0x%x \n",
+ (force_exec? "force to":""), lps_val, rpwm_val));
+ coex_dm->cur_lps = lps_val;
+ coex_dm->cur_rpwm = rpwm_val;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_lps/cur_lps=0x%x/0x%x, pre_rpwm/cur_rpwm=0x%x/0x%x!!\n",
+ coex_dm->pre_lps, coex_dm->cur_lps, coex_dm->pre_rpwm, coex_dm->cur_rpwm));
+
+ if( (coex_dm->pre_lps == coex_dm->cur_lps) &&
+ (coex_dm->pre_rpwm == coex_dm->cur_rpwm) )
+ {
+ return;
+ }
+ }
+ halbtc8812a1ant_SetLpsRpwm(btcoexist, lps_val, rpwm_val);
+
+ coex_dm->pre_lps = coex_dm->cur_lps;
+ coex_dm->pre_rpwm = coex_dm->cur_rpwm;
+}
+
+void
+halbtc8812a1ant_SwMechanism1(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN shrink_rx_lpf,
+ BOOLEAN low_penalty_ra,
+ BOOLEAN limited_dig,
+ BOOLEAN bt_lna_constrain
+ )
+{
+ //halbtc8812a1ant_RfShrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+ //halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, low_penalty_ra);
+
+ //no limited DIG
+ //halbtc8812a1ant_SetBtLnaConstrain(btcoexist, NORMAL_EXEC, bt_lna_constrain);
+}
+
+void
+halbtc8812a1ant_SwMechanism2(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN agc_table_shift,
+ BOOLEAN adc_back_off,
+ BOOLEAN sw_dac_swing,
+ u4Byte dac_swing_lvl
+ )
+{
+ //halbtc8812a1ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift);
+ //halbtc8812a1ant_AdcBackOff(btcoexist, NORMAL_EXEC, adc_back_off);
+ //halbtc8812a1ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing, dac_swing_lvl);
+}
+
+void
+halbtc8812a1ant_PsTdma(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN force_exec,
+ BOOLEAN turn_on,
+ u1Byte type
+ )
+{
+ BOOLEAN bTurnOnByCnt=false;
+ u1Byte psTdmaTypeByCnt=0, rssi_adjust_val=0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec? "force to":""), (turn_on? "ON":"OFF"), type));
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ if(!force_exec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma));
+
+ if( (coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+ (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma) )
+ return;
+ }
+ if(turn_on)
+ {
+ switch(type)
+ {
+ default:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x58);
+ break;
+ case 1:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x48);
+ rssi_adjust_val = 11;
+ break;
+ case 2:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x12, 0x12, 0x0, 0x48);
+ rssi_adjust_val = 14;
+ break;
+ case 3:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x40);
+ break;
+ case 4:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+ rssi_adjust_val = 17;
+ break;
+ case 5:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x61, 0x15, 0x3, 0x31, 0x0);
+ break;
+ case 6:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0x3, 0x0, 0x0);
+ break;
+ case 7:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xc, 0x5, 0x0, 0x0);
+ break;
+ case 8:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+ break;
+ case 9:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0xa, 0xa, 0x0, 0x48);
+ rssi_adjust_val = 18;
+ break;
+ case 10:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0xa, 0x0, 0x40);
+ break;
+ case 11:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x5, 0x5, 0x0, 0x48);
+ rssi_adjust_val = 20;
+ break;
+ case 12:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xeb, 0xa, 0x3, 0x31, 0x18);
+ break;
+
+ case 15:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0x3, 0x8, 0x0);
+ break;
+ case 16:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x10, 0x0);
+ rssi_adjust_val = 18;
+ break;
+
+ case 18:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+ rssi_adjust_val = 14;
+ break;
+
+ case 20:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0x25, 0x25, 0x0, 0x0);
+ break;
+ case 21:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x20, 0x3, 0x10, 0x40);
+ break;
+ case 22:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0x8, 0x8, 0x0, 0x40);
+ break;
+ case 23:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x25, 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 24:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x15, 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 25:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 26:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 27:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x25, 0x3, 0x31, 0x98);
+ rssi_adjust_val = 22;
+ break;
+ case 28:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x69, 0x25, 0x3, 0x31, 0x0);
+ break;
+ case 29:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xab, 0x1a, 0x1a, 0x1, 0x8);
+ break;
+ case 30:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+ break;
+ case 31:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0, 0x58);
+ break;
+ case 32:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xab, 0xa, 0x3, 0x31, 0x88);
+ break;
+ case 33:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xa3, 0x25, 0x3, 0x30, 0x88);
+ break;
+ case 34:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x8);
+ break;
+ case 35:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, 0x1a, 0x0, 0x8);
+ break;
+ case 36:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x12, 0x3, 0x14, 0x58);
+ break;
+ }
+ }
+ else
+ {
+ // disable PS tdma
+ switch(type)
+ {
+ case 8:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0, 0x0, 0x0);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4);
+ break;
+ case 0:
+ default:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ delay_ms(5);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+ break;
+ case 9:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4);
+ break;
+ case 10:
+ halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+ delay_ms(5);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+ break;
+ }
+ }
+ rssi_adjust_val =0;
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssi_adjust_val);
+
+ // update pre state
+ coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+ coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void
+halbtc8812a1ant_CoexAllOff(
+ PBTC_COEXIST btcoexist
+ )
+{
+ // fw all off
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ // sw all off
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+
+ // hw all off
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+}
+
+void
+halbtc8812a1ant_WifiParaAdjust(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN enable
+ )
+{
+ if(enable)
+ {
+ halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, true);
+ }
+ else
+ {
+ halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, false);
+ }
+}
+
+BOOLEAN
+halbtc8812a1ant_IsCommonAction(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BOOLEAN common=false, wifi_connected=false, wifi_busy=false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ //halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+
+ if(!wifi_connected &&
+ BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"));
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ common = true;
+ }
+ else if(wifi_connected &&
+ (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT non connected-idle!!\n"));
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ common = true;
+ }
+ else if(!wifi_connected &&
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"));
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ common = true;
+ }
+ else if(wifi_connected &&
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n"));
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8812a1ant_SwMechanism1(btcoexist,true,true,true,true);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ common = true;
+ }
+ else if(!wifi_connected &&
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE != coex_dm->bt_status) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ common = true;
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism1(btcoexist,true,true,true,true);
+
+ common = false;
+ }
+
+ return common;
+}
+
+
+void
+halbtc8812a1ant_TdmaDurationAdjustForAcl(
+ PBTC_COEXIST btcoexist
+ )
+{
+ static s4Byte up,dn,m,n,wait_count;
+ s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+ u1Byte retry_count=0, bt_info_ext;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], halbtc8812a1ant_TdmaDurationAdjustForAcl()\n"));
+ if(coex_dm->reset_tdma_adjust)
+ {
+ coex_dm->reset_tdma_adjust = false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
+
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ //============
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ wait_count = 0;
+ }
+ else
+ {
+ //acquire the BT TRx retry count from BT_Info byte2
+ retry_count = coex_sta->bt_retry_cnt;
+ bt_info_ext = coex_sta->bt_info_ext;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retry_count = %d\n", retry_count));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+ up, dn, m, n, wait_count));
+ result = 0;
+ wait_count++;
+
+ if(retry_count == 0) // no retry in the last 2-second duration
+ {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if(up >= n) // if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+ {
+ wait_count = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n"));
+ }
+ }
+ else if (retry_count <= 3) // <=3 retry in the last 2-second duration
+ {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) // if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+ {
+ if (wait_count <= 2)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+ }
+ }
+ else //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+ {
+ if (wait_count == 1)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+ }
+
+ if(result == -1)
+ {
+ if( (BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+ ((coex_dm->cur_ps_tdma == 1) ||(coex_dm->cur_ps_tdma == 2)) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ }
+ else if(coex_dm->cur_ps_tdma == 1)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ }
+ else if(coex_dm->cur_ps_tdma == 2)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ }
+ else if(coex_dm->cur_ps_tdma == 9)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ }
+ }
+ else if(result == 1)
+ {
+ if( (BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+ ((coex_dm->cur_ps_tdma == 1) ||(coex_dm->cur_ps_tdma == 2)) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ }
+ else if(coex_dm->cur_ps_tdma == 11)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ }
+ else if(coex_dm->cur_ps_tdma == 9)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ }
+ else if(coex_dm->cur_ps_tdma == 2)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 1);
+ coex_dm->ps_tdma_du_adj_type = 1;
+ }
+ }
+
+ if( coex_dm->cur_ps_tdma != 1 &&
+ coex_dm->cur_ps_tdma != 2 &&
+ coex_dm->cur_ps_tdma != 9 &&
+ coex_dm->cur_ps_tdma != 11 )
+ {
+ // recover to previous adjust type
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, coex_dm->ps_tdma_du_adj_type);
+ }
+ }
+}
+
+u1Byte
+halbtc8812a1ant_PsTdmaTypeByWifiRssi(
+ s4Byte wifi_rssi,
+ s4Byte pre_wifi_rssi,
+ u1Byte wifi_rssi_thresh
+ )
+{
+ u1Byte ps_tdma_type=0;
+
+ if(wifi_rssi > pre_wifi_rssi)
+ {
+ if(wifi_rssi > (wifi_rssi_thresh+5))
+ {
+ ps_tdma_type = 26;
+ }
+ else
+ {
+ ps_tdma_type = 25;
+ }
+ }
+ else
+ {
+ if(wifi_rssi > wifi_rssi_thresh)
+ {
+ ps_tdma_type = 26;
+ }
+ else
+ {
+ ps_tdma_type = 25;
+ }
+ }
+
+ return ps_tdma_type;
+}
+
+void
+halbtc8812a1ant_PsTdmaCheckForPowerSaveState(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN new_ps_state
+ )
+{
+ u1Byte lps_mode=0x0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
+
+ if(lps_mode) // already under LPS state
+ {
+ if(new_ps_state)
+ {
+ // keep state under LPS, do nothing.
+ }
+ else
+ {
+ // will leave LPS state, turn off psTdma first
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+ }
+ }
+ else // NO PS state
+ {
+ if(new_ps_state)
+ {
+ // will enter LPS state, turn off psTdma first
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+ }
+ else
+ {
+ // keep state under NO PS state, do nothing.
+ }
+ }
+}
+
+// SCO only or SCO+PAN(HS)
+void
+halbtc8812a1ant_ActionSco(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 4);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+
+void
+halbtc8812a1ant_ActionHid(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,false,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+//A2DP only / PAN(EDR) only/ A2DP+PAN(HS)
+void
+halbtc8812a1ant_ActionA2dp(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionA2dpPanHs(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u4Byte wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionPanEdr(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+
+//PAN(HS) only
+void
+halbtc8812a1ant_ActionPanHs(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // fw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ }
+ else
+ {
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+ }
+
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // fw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ }
+ else
+ {
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+ }
+
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+//PAN(EDR)+A2DP
+void
+halbtc8812a1ant_ActionPanEdrA2dp(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u4Byte wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionPanEdrHid(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state;
+ u4Byte wifi_bw;
+
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+// HID+A2DP+PAN(EDR)
+void
+halbtc8812a1ant_ActionHidA2dpPanEdr(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u4Byte wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionHidA2dp(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u4Byte wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0);
+ bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0);
+
+ if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist))
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18);
+ }
+ else
+ {
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionHs(
+ PBTC_COEXIST btcoexist,
+ BOOLEAN hs_connecting
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action for HS, hs_connecting=%d!!!\n", hs_connecting));
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+
+ if(hs_connecting)
+ {
+ halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xaaaaaaaa, 0xaaaaaaaa, 0xffff, 0x3);
+ }
+ else
+ {
+ if((coex_sta->high_priority_tx+coex_sta->high_priority_rx+
+ coex_sta->low_priority_tx+coex_sta->low_priority_rx)<=1200)
+ halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xaaaaaaaa, 0xaaaaaaaa, 0xffff, 0x3);
+ else
+ halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xffffffff, 0xffffffff, 0xffff, 0x3);
+ }
+}
+
+
+void
+halbtc8812a1ant_ActionWifiNotConnected(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BOOLEAN hs_connecting=false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ if(hs_connecting)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is connecting!!!\n"));
+ halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+}
+
+void
+halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN hs_connecting=false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ if(hs_connecting)
+ {
+ halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+ }
+ else if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+{
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 28);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+ {
+ if(stack_info->hid_exist)
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 35);
+ else
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 29);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+ }
+ else
+ {
+ //error condition, should not reach here, record error number for debugging.
+ coex_dm->error_condition = 1;
+ }
+}
+
+void
+halbtc8812a1ant_ActionWifiConnectedScan(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan()===>\n"));
+
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+
+ // psTdma
+ if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan(), bt is under inquiry/page scan\n"));
+ if(stack_info->sco_exist)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+ {
+ if(stack_info->hid_exist)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 34);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 4);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 33);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ //error condition, should not reach here
+ coex_dm->error_condition = 2;
+ }
+ }
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan()<===\n"));
+}
+
+void
+halbtc8812a1ant_ActionWifiConnectedSpecialPacket(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else
+ {
+ if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+ {
+ if(stack_info->sco_exist)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 28);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+ {
+ if(stack_info->hid_exist)
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 35);
+ else
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 29);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+ }
+ else
+ {
+ //error condition, should not reach here
+ coex_dm->error_condition = 3;
+ }
+ }
+}
+
+void
+halbtc8812a1ant_ActionWifiConnected(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN wifi_connected=false, wifi_busy=false, bt_hs_on=false;
+ BOOLEAN scan=false, link=false, roam=false;
+ BOOLEAN hs_connecting=false, under4way=false;
+ u4Byte wifi_bw;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()===>\n"));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(!wifi_connected)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi not connected<===\n"));
+ return;
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under4way);
+ if(under4way)
+ {
+ halbtc8812a1ant_ActionWifiConnectedSpecialPacket(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"));
+ return;
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ if(scan || link || roam)
+ {
+ halbtc8812a1ant_ActionWifiConnectedScan(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"));
+ return;
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ if(!wifi_busy)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi associated-idle!!!\n"));
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else
+ {
+ if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], bt is under inquiry/page scan!!!\n"));
+ if(stack_info->sco_exist)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x26, 0x0);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x26, 0x0);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+ {
+ if(stack_info->hid_exist && stack_info->numOfLink==1)
+ {
+ // hid only
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5fff5fff, 0x5fff5fff, 0xffff, 0x3);
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+
+ if(stack_info->hid_exist)
+ {
+ if(stack_info->a2dp_exist)
+ {
+ // hid+a2dp
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ // hid+hs
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ // hid+pan
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ coex_dm->error_condition = 4;
+ }
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else if(stack_info->a2dp_exist)
+ {
+ if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ // a2dp+hs
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ // a2dp+pan
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 36);
+ }
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ // a2dp only
+ halbtc8812a1ant_TdmaDurationAdjustForAcl(btcoexist);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ }
+ else if(stack_info->pan_exist)
+ {
+ // pan only
+ if(bt_hs_on)
+ {
+ coex_dm->error_condition = 5;
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ // temp state, do nothing!!!
+ //DbgPrint("error 6, coex_dm->bt_status=%d\n", coex_dm->bt_status);
+ //DbgPrint("error 6, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n",
+ //stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist);
+ //coex_dm->error_condition = 6;
+ }
+ }
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+ }
+ else
+ {
+ //error condition, should not reach here
+ coex_dm->error_condition = 7;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi busy!!!\n"));
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else
+ {
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is under progress!!!\n"));
+ //DbgPrint("coex_dm->bt_status = 0x%x\n", coex_dm->bt_status);
+ halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status)
+ {
+ if(stack_info->sco_exist)
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3);
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ if(bt_hs_on)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is under progress!!!\n"));
+ halbtc8812a1ant_ActionHs(btcoexist, hs_connecting);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3);
+ }
+ }
+ else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status)
+ {
+ if(stack_info->hid_exist && stack_info->numOfLink==1)
+ {
+ // hid only
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5fff5fff, 0x5fff5fff, 0xffff, 0x3);
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+
+ if(stack_info->hid_exist)
+ {
+ if(stack_info->a2dp_exist)
+ {
+ // hid+a2dp
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ // hid+hs
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ // hid+pan
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ else
+ {
+ coex_dm->error_condition = 8;
+ }
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else if(stack_info->a2dp_exist)
+ {
+ if(stack_info->pan_exist)
+ {
+ if(bt_hs_on)
+ {
+ // a2dp+hs
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ // a2dp+pan
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 36);
+ }
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ // a2dp only
+ halbtc8812a1ant_TdmaDurationAdjustForAcl(btcoexist);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ }
+ else if(stack_info->pan_exist)
+ {
+ // pan only
+ if(bt_hs_on)
+ {
+ coex_dm->error_condition = 9;
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ }
+ coex_dm->reset_tdma_adjust = true;
+ }
+ else
+ {
+ //DbgPrint("error 10, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n",
+ //stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist);
+ coex_dm->error_condition = 10;
+ }
+ }
+ }
+ else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+ }
+ else
+ {
+ //DbgPrint("error 11, coex_dm->bt_status=%d\n", coex_dm->bt_status);
+ //DbgPrint("error 11, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n",
+ //stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist);
+ //error condition, should not reach here
+ coex_dm->error_condition = 11;
+ }
+ }
+ }
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()<===\n"));
+}
+
+void
+halbtc8812a1ant_RunSwCoexistMechanism(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN wifi_under5g=false, wifi_busy=false, wifi_connected=false;
+ u1Byte bt_info_original=0, bt_retry_cnt=0;
+ u1Byte algorithm=0;
+
+ return;
+ if(stack_info->bProfileNotified)
+ {
+ algorithm = halbtc8812a1ant_ActionAlgorithm(btcoexist);
+ coex_dm->cur_algorithm = algorithm;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Algorithm = %d \n", coex_dm->cur_algorithm));
+
+ if(halbtc8812a1ant_IsCommonAction(btcoexist))
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action common.\n"));
+ }
+ else
+ {
+ switch(coex_dm->cur_algorithm)
+ {
+ case BT_8812A_1ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = SCO.\n"));
+ halbtc8812a1ant_ActionSco(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID.\n"));
+ halbtc8812a1ant_ActionHid(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP.\n"));
+ halbtc8812a1ant_ActionA2dp(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP+PAN(HS).\n"));
+ halbtc8812a1ant_ActionA2dpPanHs(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR).\n"));
+ halbtc8812a1ant_ActionPanEdr(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HS mode.\n"));
+ halbtc8812a1ant_ActionPanHs(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN+A2DP.\n"));
+ halbtc8812a1ant_ActionPanEdrA2dp(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR)+HID.\n"));
+ halbtc8812a1ant_ActionPanEdrHid(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP+PAN.\n"));
+ halbtc8812a1ant_ActionHidA2dpPanEdr(btcoexist);
+ break;
+ case BT_8812A_1ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP.\n"));
+ halbtc8812a1ant_ActionHidA2dp(btcoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = coexist All Off!!\n"));
+ halbtc8812a1ant_CoexAllOff(btcoexist);
+ break;
+ }
+ coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+ }
+ }
+}
+
+void
+halbtc8812a1ant_RunCoexistMechanism(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN wifi_under5g=false, wifi_busy=false, wifi_connected=false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()===>\n"));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+
+ if(wifi_under5g)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for 5G <===\n"));
+ return;
+ }
+
+ if(btcoexist->manual_control)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"));
+ return;
+ }
+
+ if(btcoexist->stop_coex_dm)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"));
+ return;
+ }
+
+ halbtc8812a1ant_RunSwCoexistMechanism(btcoexist);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], bt is disabled!!!\n"));
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ if(wifi_busy)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ }
+ else
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4);
+ // power save must executed before psTdma.
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ }
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ }
+ else if(coex_sta->under_ips)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n"));
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0);
+ halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+ halbtc8812a1ant_WifiParaAdjust(btcoexist, false);
+ }
+ else if(!wifi_connected)
+ {
+ BOOLEAN scan=false, link=false, roam=false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is non connected-idle !!!\n"));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if(scan || link || roam)
+ halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist);
+ else
+ halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+ }
+ else // wifi LPS/Busy
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is NOT under IPS!!!\n"));
+ halbtc8812a1ant_WifiParaAdjust(btcoexist, true);
+ halbtc8812a1ant_ActionWifiConnected(btcoexist);
+ }
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()<===\n"));
+}
+
+void
+halbtc8812a1ant_InitCoexDm(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BOOLEAN wifi_connected=false;
+ // force to reset coex mechanism
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(!wifi_connected) // non-connected scan
+ {
+ halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+ }
+ else // wifi is connected
+ {
+ halbtc8812a1ant_ActionWifiConnected(btcoexist);
+ }
+
+ halbtc8812a1ant_FwDacSwingLvl(btcoexist, FORCE_EXEC, 6);
+ halbtc8812a1ant_DecBtPwr(btcoexist, FORCE_EXEC, false);
+
+ // sw all off
+ halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false);
+ halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18);
+
+ halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3);
+}
+
+//============================================================
+// work around function start with wa_halbtc8812a1ant_
+//============================================================
+//============================================================
+// extern function start with EXhalbtc8812a1ant_
+//============================================================
+void
+EXhalbtc8812a1ant_InitHwConfig(
+ PBTC_COEXIST btcoexist
+ )
+{
+ u4Byte u4_tmp=0;
+ u2Byte u2Tmp=0;
+ u1Byte u1_tmp=0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n"));
+
+ // backup rf 0x1e value
+ coex_dm->bt_rf0x1e_backup =
+ btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+ //ant sw control to BT
+ btcoexist->btc_write_4byte(btcoexist, 0x900, 0x00000400);
+ btcoexist->btc_write_1byte(btcoexist, 0x76d, 0x1);
+ btcoexist->btc_write_1byte(btcoexist, 0xcb3, 0x77);
+ btcoexist->btc_write_1byte(btcoexist, 0xcb7, 0x40);
+
+ // 0x790[5:0]=0x5
+ u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+ u1_tmp &= 0xc0;
+ u1_tmp |= 0x5;
+ btcoexist->btc_write_1byte(btcoexist, 0x790, u1_tmp);
+
+ // PTA parameter
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, 0x0);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, 0xffff);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, 0x55555555);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, 0x55555555);
+
+ // coex parameters
+ btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1);
+
+ // enable counter statistics
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+
+ // enable PTA
+ btcoexist->btc_write_1byte(btcoexist, 0x40, 0x20);
+
+ // bt clock related
+ u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x4);
+ u1_tmp |= BIT7;
+ btcoexist->btc_write_1byte(btcoexist, 0x4, u1_tmp);
+
+ // bt clock related
+ u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x7);
+ u1_tmp |= BIT1;
+ btcoexist->btc_write_1byte(btcoexist, 0x7, u1_tmp);
+}
+
+void
+EXhalbtc8812a1ant_InitCoexDm(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+
+ btcoexist->stop_coex_dm = false;
+
+ halbtc8812a1ant_InitCoexDm(btcoexist);
+}
+
+void
+EXhalbtc8812a1ant_DisplayCoexInfo(
+ PBTC_COEXIST btcoexist
+ )
+{
+ PBTC_BOARD_INFO board_info=&btcoexist->boardInfo;
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ pu1Byte cli_buf=btcoexist->cli_buf;
+ u1Byte u1_tmp[4], i, bt_info_ext, psTdmaCase=0;
+ u4Byte u4_tmp[4];
+ BOOLEAN roam=false, scan=false, link=false, wifi_under5g=false;
+ BOOLEAN bt_hs_on=false, wifi_busy=false;
+ s4Byte wifi_rssi=0, bt_hs_rssi=0;
+ u4Byte wifi_bw, wifiTrafficDir;
+ u1Byte wifiDot11Chnl, wifiHsChnl;
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cli_buf);
+
+ if(btcoexist->manual_control)
+ {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[Under Manual Control]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+ if(btcoexist->stop_coex_dm)
+ {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[Coex is STOPPED]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+
+ if(!board_info->bBtExist)
+ {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cli_buf);
+ return;
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+ board_info->pgAntNum, board_info->btdmAntNum);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ ((stack_info->bProfileNotified)? "Yes":"No"), stack_info->hciVersion);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_FW_VER);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+ wifiDot11Chnl, wifiHsChnl, bt_hs_on);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+ coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1],
+ coex_dm->wifi_chnl_info[2]);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+ wifi_rssi, bt_hs_rssi);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", \
+ link, roam, scan);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+ (wifi_under5g? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY==wifi_bw)? "Legacy": (((BTC_WIFI_BW_HT40==wifi_bw)? "HT40":"HT20"))),
+ ((!wifi_busy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+ ((coex_sta->c2h_bt_inquiry_page)?("inquiry/page scan"):((BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)? "non-connected idle":
+ ( (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)? "connected-idle":"busy"))),
+ coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+ CL_PRINTF(cli_buf);
+
+ if(stack_info->bProfileNotified)
+ {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+ stack_info->sco_exist, stack_info->hid_exist, stack_info->pan_exist, stack_info->a2dp_exist);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ }
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+ (bt_info_ext&BIT0)? "Basic rate":"EDR rate");
+ CL_PRINTF(cli_buf);
+
+ for(i=0; i<BT_INFO_SRC_8812A_1ANT_MAX; i++)
+ {
+ if(coex_sta->bt_info_c2h_cnt[i])
+ {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8812a1Ant[i], \
+ coex_sta->bt_info_c2h[i][0], coex_sta->bt_info_c2h[i][1],
+ coex_sta->bt_info_c2h[i][2], coex_sta->bt_info_c2h[i][3],
+ coex_sta->bt_info_c2h[i][4], coex_sta->bt_info_c2h[i][5],
+ coex_sta->bt_info_c2h[i][6], coex_sta->bt_info_c2h_cnt[i]);
+ CL_PRINTF(cli_buf);
+ }
+ }
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)", \
+ ((coex_sta->under_ips? "IPS ON":"IPS OFF")),
+ ((coex_sta->under_lps? "LPS ON":"LPS OFF")),
+ btcoexist->bt_info.lps1Ant,
+ btcoexist->bt_info.rpwm1Ant);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ if(!btcoexist->manual_control)
+ {
+ // Sw mechanism
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d/ %d ", "SM1[ShRf/ LpRA/ LimDig/ btLna]", \
+ coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra, coex_dm->limited_dig, coex_dm->bCurBtLnaConstrain);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ CL_PRINTF(cli_buf);
+
+ // Fw mechanism
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ psTdmaCase = coex_dm->cur_ps_tdma;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \
+ coex_dm->ps_tdma_para[0], coex_dm->ps_tdma_para[1],
+ coex_dm->ps_tdma_para[2], coex_dm->ps_tdma_para[3],
+ coex_dm->ps_tdma_para[4], psTdmaCase);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Latest error condition(should be 0)", \
+ coex_dm->error_condition);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", \
+ coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+ CL_PRINTF(cli_buf);
+ }
+
+ // Hw setting
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+ coex_dm->bt_rf0x1e_backup);
+ CL_PRINTF(cli_buf);
+
+ u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778", \
+ u1_tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
+ u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x92c/ 0x930", \
+ (u1_tmp[0]), u4_tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+ u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x40/ 0x4f", \
+ u1_tmp[0], u1_tmp[1]);
+ CL_PRINTF(cli_buf);
+
+ u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+ u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+ u4_tmp[0], u1_tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+ u4_tmp[0]);
+ CL_PRINTF(cli_buf);
+
+#if 0
+ u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
+ u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xf4c);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0xf48/ 0xf4c (FA cnt)", \
+ u4_tmp[0], u4_tmp[1]);
+ CL_PRINTF(cli_buf);
+#endif
+
+ u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+ u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+ u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+ u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770(hp rx[31:16]/tx[15:0])", \
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+ coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void
+EXhalbtc8812a1ant_IpsNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ u4Byte u4_tmp=0;
+
+ if(btcoexist->manual_control || btcoexist->stop_coex_dm)
+ return;
+
+ if(BTC_IPS_ENTER == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+ coex_sta->under_ips = true;
+
+ // 0x4c[23]=1
+ u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u4_tmp |= BIT23;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u4_tmp);
+
+ halbtc8812a1ant_CoexAllOff(btcoexist);
+ }
+ else if(BTC_IPS_LEAVE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+ coex_sta->under_ips = false;
+ //halbtc8812a1ant_InitCoexDm(btcoexist);
+ }
+}
+
+void
+EXhalbtc8812a1ant_LpsNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ if(btcoexist->manual_control || btcoexist->stop_coex_dm)
+ return;
+
+ if(BTC_LPS_ENABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+ coex_sta->under_lps = true;
+ }
+ else if(BTC_IPS_LEAVE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+ coex_sta->under_lps = false;
+ }
+}
+
+void
+EXhalbtc8812a1ant_ScanNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN wifi_connected=false;
+
+ if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+ return;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(BTC_SCAN_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+ if(!wifi_connected) // non-connected scan
+ {
+ //set 0x550[3]=1 before PsTdma
+ //halbtc8812a1ant_Reg0x550Bit3(btcoexist, true);
+ halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist);
+ }
+ else // wifi is connected
+ {
+ halbtc8812a1ant_ActionWifiConnectedScan(btcoexist);
+ }
+ }
+ else if(BTC_SCAN_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+ if(!wifi_connected) // non-connected scan
+ {
+ //halbtc8812a1ant_Reg0x550Bit3(btcoexist, false);
+ halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+ }
+ else
+ {
+ halbtc8812a1ant_ActionWifiConnected(btcoexist);
+ }
+ }
+}
+
+void
+EXhalbtc8812a1ant_ConnectNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ PBTC_STACK_INFO stack_info=&btcoexist->stack_info;
+ BOOLEAN wifi_connected=false;
+
+ if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+ return;
+
+ if(BTC_ASSOCIATE_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ }
+ else
+ {
+ halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist);
+ }
+ }
+ else if(BTC_ASSOCIATE_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(!wifi_connected) // non-connected scan
+ {
+ //halbtc8812a1ant_Reg0x550Bit3(btcoexist, false);
+ halbtc8812a1ant_ActionWifiNotConnected(btcoexist);
+ }
+ else
+ {
+ halbtc8812a1ant_ActionWifiConnected(btcoexist);
+ }
+ }
+}
+
+void
+EXhalbtc8812a1ant_MediaStatusNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ u1Byte dataLen=5;
+ u1Byte buf[6] = {0};
+ u1Byte h2c_parameter[3] ={0};
+ BOOLEAN wifi_under5g=false;
+ u4Byte wifi_bw;
+ u1Byte wifi_central_chnl;
+
+ if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+ return;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+
+ // only 2.4G we need to inform bt the chnl mask
+ if(!wifi_under5g)
+ {
+ if(BTC_MEDIA_CONNECT == type)
+ {
+ h2c_parameter[0] = 0x1;
+ }
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl);
+ h2c_parameter[1] = wifi_central_chnl;
+ if(BTC_WIFI_BW_HT40 == wifi_bw)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
+
+ coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+ coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+ coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+ buf[0] = dataLen;
+ buf[1] = 0x5; // OP_Code
+ buf[2] = 0x3; // OP_Code_Length
+ buf[3] = h2c_parameter[0]; // OP_Code_Content
+ buf[4] = h2c_parameter[1];
+ buf[5] = h2c_parameter[2];
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]);
+}
+
+void
+EXhalbtc8812a1ant_SpecialPacketNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ BOOLEAN bSecurityLink=false;
+
+ if(btcoexist->manual_control ||btcoexist->stop_coex_dm)
+ return;
+
+ //if(type == BTC_PACKET_DHCP)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], special Packet(%d) notify\n", type));
+ if(btcoexist->bt_info.bt_disabled)
+ {
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ }
+ else
+ {
+ halbtc8812a1ant_ActionWifiConnectedSpecialPacket(btcoexist);
+ }
+ }
+}
+
+void
+EXhalbtc8812a1ant_BtInfoNotify(
+ PBTC_COEXIST btcoexist,
+ pu1Byte tmp_buf,
+ u1Byte length
+ )
+{
+ u1Byte bt_info=0;
+ u1Byte i, rsp_source=0;
+ static u4Byte set_bt_lna_cnt=0, set_bt_psd_mode=0;
+ BOOLEAN bt_busy=false, limited_dig=false;
+ BOOLEAN wifi_connected=false;
+ BOOLEAN bRejApAggPkt=false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify()===>\n"));
+
+
+ rsp_source = tmp_buf[0]&0xf;
+ if(rsp_source >= BT_INFO_SRC_8812A_1ANT_MAX)
+ rsp_source = BT_INFO_SRC_8812A_1ANT_WIFI_FW;
+ coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rsp_source, length));
+ for(i=0; i<length; i++)
+ {
+ coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
+ if(i == 1)
+ bt_info = tmp_buf[i];
+ if(i == length-1)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%2x]\n", tmp_buf[i]));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%2x, ", tmp_buf[i]));
+ }
+ }
+
+ if(btcoexist->manual_control)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n"));
+ return;
+ }
+ if(btcoexist->stop_coex_dm)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Coex STOPPED!!<===\n"));
+ return;
+ }
+
+ if(BT_INFO_SRC_8812A_1ANT_WIFI_FW != rsp_source)
+ {
+ coex_sta->bt_retry_cnt =
+ coex_sta->bt_info_c2h[rsp_source][2];
+
+ coex_sta->bt_rssi =
+ coex_sta->bt_info_c2h[rsp_source][3]*2+10;
+
+ coex_sta->bt_info_ext =
+ coex_sta->bt_info_c2h[rsp_source][4];
+
+ // Here we need to resend some wifi info to BT
+ // because bt is reset and loss of the info.
+ if( (coex_sta->bt_info_ext & BIT1) )
+ {
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ if(wifi_connected)
+ {
+ EXhalbtc8812a1ant_MediaStatusNotify(btcoexist, BTC_MEDIA_CONNECT);
+ }
+ else
+ {
+ EXhalbtc8812a1ant_MediaStatusNotify(btcoexist, BTC_MEDIA_DISCONNECT);
+ }
+
+ set_bt_psd_mode = 0;
+ }
+
+ // test-chip bt patch doesn't support, temporary remove.
+ // need to add back when mp-chip. 12/20/2012
+#if 0
+ if(set_bt_psd_mode <= 3)
+ {
+ halbtc8812a1ant_SetBtPsdMode(btcoexist, FORCE_EXEC, 0xd);
+ set_bt_psd_mode++;
+ }
+
+ if(coex_dm->bCurBtLnaConstrain)
+ {
+ if( (coex_sta->bt_info_ext & BIT2) )
+ {
+ }
+ else
+ {
+ if(set_bt_lna_cnt <= 3)
+ {
+ halbtc8812a1ant_SetBtLnaConstrain(btcoexist, FORCE_EXEC, true);
+ set_bt_lna_cnt++;
+ }
+ }
+ }
+ else
+ {
+ set_bt_lna_cnt = 0;
+ }
+#endif
+ // test-chip bt patch only rsp the status for BT_RSP,
+ // so temporary we consider the following only under BT_RSP
+ if(BT_INFO_SRC_8812A_1ANT_BT_RSP == rsp_source)
+ {
+ if( (coex_sta->bt_info_ext & BIT3) )
+ {
+ #if 0// temp disable because bt patch report the wrong value.
+ halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, false);
+ #endif
+ }
+ else
+ {
+ // BT already NOT ignore Wlan active, do nothing here.
+ }
+
+ if( (coex_sta->bt_info_ext & BIT4) )
+ {
+ // BT auto report already enabled, do nothing
+ }
+ else
+ {
+ halbtc8812a1ant_BtAutoReport(btcoexist, FORCE_EXEC, true);
+ }
+ }
+ }
+
+ // check BIT2 first ==> check if bt is under inquiry or page scan
+ if(bt_info & BT_INFO_8812A_1ANT_B_INQ_PAGE)
+ {
+ coex_sta->c2h_bt_inquiry_page = true;
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_INQ_PAGE;
+ }
+ else
+ {
+ coex_sta->c2h_bt_inquiry_page = false;
+ if(!(bt_info&BT_INFO_8812A_1ANT_B_CONNECTION))
+ {
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-connected idle!!!\n"));
+ }
+ else if(bt_info == BT_INFO_8812A_1ANT_B_CONNECTION) // connection exists but no busy
+ {
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt connected-idle!!!\n"));
+ }
+ else if((bt_info&BT_INFO_8812A_1ANT_B_SCO_ESCO) ||
+ (bt_info&BT_INFO_8812A_1ANT_B_SCO_BUSY))
+ {
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_SCO_BUSY;
+ bRejApAggPkt = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt sco busy!!!\n"));
+ }
+ else if(bt_info&BT_INFO_8812A_1ANT_B_ACL_BUSY)
+ {
+ if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
+ coex_dm->reset_tdma_adjust = true;
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl busy!!!\n"));
+ }
+#if 0
+ else if(bt_info&BT_INFO_8812A_1ANT_B_SCO_ESCO)
+ {
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl/sco busy!!!\n"));
+ }
+#endif
+ else
+ {
+ //DbgPrint("error, undefined bt_info=0x%x\n", bt_info);
+ coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-defined state!!!\n"));
+ }
+
+ // send delete BA to disable aggregation
+ //btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &bRejApAggPkt);
+ }
+
+ if( (BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) )
+ {
+ bt_busy = true;
+ }
+ else
+ {
+ bt_busy = false;
+ }
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+ if(bt_busy)
+ {
+ limited_dig = true;
+ }
+ else
+ {
+ limited_dig = false;
+ }
+ coex_dm->limited_dig = limited_dig;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+ halbtc8812a1ant_RunCoexistMechanism(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify()<===\n"));
+}
+
+void
+EXhalbtc8812a1ant_StackOperationNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte type
+ )
+{
+ if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+ }
+ else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+ }
+}
+
+void
+EXhalbtc8812a1ant_HaltNotify(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
+
+ halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
+ halbtc8812a1ant_PsTdma(btcoexist, FORCE_EXEC, false, 0);
+ btcoexist->btc_write_1byte(btcoexist, 0x4f, 0xf);
+ halbtc8812a1ant_WifiParaAdjust(btcoexist, false);
+}
+
+void
+EXhalbtc8812a1ant_PnpNotify(
+ PBTC_COEXIST btcoexist,
+ u1Byte pnpState
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n"));
+
+ if(BTC_WIFI_PNP_SLEEP == pnpState)
+ {
+ btcoexist->stop_coex_dm = true;
+ halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ }
+ else if(BTC_WIFI_PNP_WAKE_UP == pnpState)
+ {
+
+ }
+}
+
+void
+EXhalbtc8812a1ant_Periodical(
+ PBTC_COEXIST btcoexist
+ )
+{
+ BOOLEAN wifi_under5g=false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical()===>\n"));
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 1Ant Periodical!!\n"));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g);
+
+ if(wifi_under5g)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical(), return for 5G<===\n"));
+ halbtc8812a1ant_CoexAllOff(btcoexist);
+ return;
+ }
+
+ halbtc8812a1ant_QueryBtInfo(btcoexist);
+ halbtc8812a1ant_MonitorBtCtr(btcoexist);
+ halbtc8812a1ant_MonitorBtEnableDisable(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical()<===\n"));
+}
+
+void
+EXhalbtc8812a1ant_DbgControl(
+ PBTC_COEXIST btcoexist,
+ u1Byte opCode,
+ u1Byte opLen,
+ pu1Byte pData
+ )
+{
+ switch(opCode)
+ {
+ case BTC_DBG_SET_COEX_NORMAL:
+ btcoexist->manual_control = false;
+ halbtc8812a1ant_InitCoexDm(btcoexist);
+ break;
+ case BTC_DBG_SET_COEX_WIFI_ONLY:
+ btcoexist->manual_control = true;
+ halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9);
+ break;
+ case BTC_DBG_SET_COEX_BT_ONLY:
+ // todo
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h
new file mode 100644
index 000000000000..37bdab5ae9f1
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h
@@ -0,0 +1,205 @@
+//===========================================
+// The following is for 8812A_1ANT BT Co-exist definition
+//===========================================
+#define BT_INFO_8812A_1ANT_B_FTP BIT7
+#define BT_INFO_8812A_1ANT_B_A2DP BIT6
+#define BT_INFO_8812A_1ANT_B_HID BIT5
+#define BT_INFO_8812A_1ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8812A_1ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8812A_1ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8812A_1ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8812A_1ANT_B_CONNECTION BIT0
+
+#define BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \
+ (((_BT_INFO_EXT_&BIT0))? true:false)
+
+#define BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT 2
+
+#define
+#define OUT
+
+typedef enum _BT_INFO_SRC_8812A_1ANT{
+ BT_INFO_SRC_8812A_1ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8812A_1ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8812A_1ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8812A_1ANT_MAX
+}BT_INFO_SRC_8812A_1ANT,*PBT_INFO_SRC_8812A_1ANT;
+
+typedef enum _BT_8812A_1ANT_BT_STATUS{
+ BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8812A_1ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8812A_1ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8812A_1ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8812A_1ANT_BT_STATUS_MAX
+}BT_8812A_1ANT_BT_STATUS,*PBT_8812A_1ANT_BT_STATUS;
+
+typedef enum _BT_8812A_1ANT_COEX_ALGO{
+ BT_8812A_1ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8812A_1ANT_COEX_ALGO_SCO = 0x1,
+ BT_8812A_1ANT_COEX_ALGO_HID = 0x2,
+ BT_8812A_1ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8812A_1ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8812A_1ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8812A_1ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8812A_1ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8812A_1ANT_COEX_ALGO_MAX = 0xb,
+}BT_8812A_1ANT_COEX_ALGO,*PBT_8812A_1ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8812A_1ANT{
+ // fw mechanism
+ bool pre_dec_bt_pwr;
+ bool cur_dec_bt_pwr;
+ bool bPreBtLnaConstrain;
+ bool bCurBtLnaConstrain;
+ u8 bPreBtPsdMode;
+ u8 bCurBtPsdMode;
+ u8 pre_fw_dac_swing_lvl;
+ u8 cur_fw_dac_swing_lvl;
+ bool cur_ignore_wlan_act;
+ bool pre_ignore_wlan_act;
+ u8 pre_ps_tdma;
+ u8 cur_ps_tdma;
+ u8 ps_tdma_para[5];
+ u8 ps_tdma_du_adj_type;
+ bool reset_tdma_adjust;
+ bool pre_ps_tdma_on;
+ bool cur_ps_tdma_on;
+ bool pre_bt_auto_report;
+ bool cur_bt_auto_report;
+ u8 pre_lps;
+ u8 cur_lps;
+ u8 pre_rpwm;
+ u8 cur_rpwm;
+
+ // sw mechanism
+ bool pre_rf_rx_lpf_shrink;
+ bool cur_rf_rx_lpf_shrink;
+ u32 bt_rf0x1e_backup;
+ bool pre_low_penalty_ra;
+ bool cur_low_penalty_ra;
+ bool pre_dac_swing_on;
+ u32 pre_dac_swing_lvl;
+ bool cur_dac_swing_on;
+ u32 cur_dac_swing_lvl;
+ bool pre_adc_back_off;
+ bool cur_adc_back_off;
+ bool pre_agc_table_en;
+ bool cur_agc_table_en;
+ u32 pre_val0x6c0;
+ u32 cur_val0x6c0;
+ u32 pre_val0x6c4;
+ u32 cur_val0x6c4;
+ u32 pre_val0x6c8;
+ u32 cur_val0x6c8;
+ u8 pre_val0x6cc;
+ u8 cur_val0x6cc;
+ bool limited_dig;
+
+ // algorithm related
+ u8 pre_algorithm;
+ u8 cur_algorithm;
+ u8 bt_status;
+ u8 wifi_chnl_info[3];
+
+ u8 error_condition;
+} COEX_DM_8812A_1ANT, *PCOEX_DM_8812A_1ANT;
+
+typedef struct _COEX_STA_8812A_1ANT{
+ bool under_lps;
+ bool under_ips;
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 bt_rssi;
+ u8 pre_bt_rssi_state;
+ u8 pre_wifi_rssi_state[4];
+ bool c2h_bt_info_req_sent;
+ u8 bt_info_c2h[BT_INFO_SRC_8812A_1ANT_MAX][10];
+ u32 bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_MAX];
+ bool c2h_bt_inquiry_page;
+ u8 bt_retry_cnt;
+ u8 bt_info_ext;
+}COEX_STA_8812A_1ANT, *PCOEX_STA_8812A_1ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+void
+EXhalbtc8812a1ant_InitHwConfig(
+ PBTC_COEXIST btcoexist
+ );
+void
+EXhalbtc8812a1ant_InitCoexDm(
+ PBTC_COEXIST btcoexist
+ );
+void
+EXhalbtc8812a1ant_IpsNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_LpsNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_ScanNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_ConnectNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_MediaStatusNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_SpecialPacketNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_BtInfoNotify(
+ PBTC_COEXIST btcoexist,
+ u8 *tmp_buf,
+ u8 length
+ );
+void
+EXhalbtc8812a1ant_StackOperationNotify(
+ PBTC_COEXIST btcoexist,
+ u8 type
+ );
+void
+EXhalbtc8812a1ant_HaltNotify(
+ PBTC_COEXIST btcoexist
+ );
+void
+EXhalbtc8812a1ant_PnpNotify(
+ PBTC_COEXIST btcoexist,
+ u8 pnpState
+ );
+void
+EXhalbtc8812a1ant_Periodical(
+ PBTC_COEXIST btcoexist
+ );
+void
+EXhalbtc8812a1ant_DisplayCoexInfo(
+ PBTC_COEXIST btcoexist
+ );
+void
+EXhalbtc8812a1ant_DbgControl(
+ PBTC_COEXIST btcoexist,
+ u8 opCode,
+ u8 opLen,
+ u8 *pData
+ );
diff --git a/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c
new file mode 100644
index 000000000000..8e4293a35872
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c
@@ -0,0 +1,1614 @@
+//============================================================
+// Description:
+//
+// This file is for RTL8723A Co-exist mechanism
+//
+// History
+// 2012/08/22 Cosa first check in.
+// 2012/11/14 Cosa Revise for 8723A 1Ant out sourcing.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "Mp_Precomp.h"
+#if(BT_30_SUPPORT == 1)
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8723A_1ANT GLCoexDm8723a1Ant;
+static PCOEX_DM_8723A_1ANT pCoexDm=&GLCoexDm8723a1Ant;
+static COEX_STA_8723A_1ANT GLCoexSta8723a1Ant;
+static PCOEX_STA_8723A_1ANT pCoexSta=&GLCoexSta8723a1Ant;
+
+const char *const GLBtInfoSrc8723a1Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8723a1ant_
+//============================================================
+VOID
+halbtc8723a1ant_Reg0x550Bit3(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bSet
+ )
+{
+ u1Byte u1tmp=0;
+
+ u1tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x550);
+ if(bSet)
+ {
+ u1tmp |= BIT3;
+ }
+ else
+ {
+ u1tmp &= ~BIT3;
+ }
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x550, u1tmp);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], set 0x550[3]=%d\n", (bSet? 1:0)));
+}
+
+VOID
+halbtc8723a1ant_NotifyFwScan(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte scanType
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ if(BTC_SCAN_START == scanType)
+ H2C_Parameter[0] = 0x1;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Notify FW for wifi scan, write 0x3b=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3b, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a1ant_QueryBtInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ pCoexSta->bC2hBtInfoReqSent = true;
+
+ H2C_Parameter[0] |= BIT0; // trigger
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x38=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x38, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a1ant_SetSwRfRxLpfCorner(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ if(bRxRfShrinkOn)
+ {
+ //Shrink RF Rx LPF corner
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+ }
+ else
+ {
+ //Resume RF Rx LPF corner
+ // After initialized, we can use pCoexDm->btRf0x1eBackup
+ if(pBtCoexist->initilized)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup);
+ }
+ }
+}
+
+VOID
+halbtc8723a1ant_RfShrink(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF")));
+ pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+ pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink));
+
+ if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink)
+ return;
+ }
+ halbtc8723a1ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink);
+
+ pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink;
+}
+
+VOID
+halbtc8723a1ant_SetSwPenaltyTxRateAdaptive(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ u1Byte tmpU1;
+
+ tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd);
+ tmpU1 |= BIT0;
+ if(bLowPenaltyRa)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+ tmpU1 &= ~BIT2;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+ tmpU1 |= BIT2;
+ }
+
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1);
+}
+
+VOID
+halbtc8723a1ant_LowPenaltyRa(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF")));
+ pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa));
+
+ if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa)
+ return;
+ }
+ halbtc8723a1ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa);
+
+ pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa;
+}
+
+VOID
+halbtc8723a1ant_SetCoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc);
+}
+
+VOID
+halbtc8723a1ant_CoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (bForceExec? "force to":""), val0x6c0, val0x6c8, val0x6cc));
+ pCoexDm->curVal0x6c0 = val0x6c0;
+ pCoexDm->curVal0x6c8 = val0x6c8;
+ pCoexDm->curVal0x6cc = val0x6cc;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc));
+
+ if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
+ (pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) &&
+ (pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) )
+ return;
+ }
+ halbtc8723a1ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c8, val0x6cc);
+
+ pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0;
+ pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8;
+ pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc;
+}
+
+VOID
+halbtc8723a1ant_SetFwIgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bEnable
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ if(bEnable)
+ {
+ H2C_Parameter[0] |= BIT0; // function enable
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x25=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x25, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a1ant_IgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bEnable
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n",
+ (bForceExec? "force to":""), (bEnable? "ON":"OFF")));
+ pCoexDm->bCurIgnoreWlanAct = bEnable;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct));
+
+ if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
+ return;
+ }
+ halbtc8723a1ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable);
+
+ pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct;
+}
+
+VOID
+halbtc8723a1ant_SetFwPstdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type,
+ IN u1Byte byte1,
+ IN u1Byte byte2,
+ IN u1Byte byte3,
+ IN u1Byte byte4,
+ IN u1Byte byte5
+ )
+{
+ u1Byte H2C_Parameter[5] ={0};
+ u1Byte realByte1=byte1, realByte5=byte5;
+ BOOLEAN bApEnable=FALSE;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, &bApEnable);
+
+ // byte1[1:0] != 0 means enable pstdma
+ // for 2Ant bt coexist, if byte1 != 0 means enable pstdma
+ if(byte1)
+ {
+ if(bApEnable)
+ {
+ if(type != 5 && type != 12)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], FW for 1Ant AP mode\n"));
+ realByte1 &= ~BIT4;
+ realByte1 |= BIT5;
+
+ realByte5 |= BIT5;
+ realByte5 &= ~BIT6;
+ }
+ }
+ }
+ H2C_Parameter[0] = realByte1;
+ H2C_Parameter[1] = byte2;
+ H2C_Parameter[2] = byte3;
+ H2C_Parameter[3] = byte4;
+ H2C_Parameter[4] = realByte5;
+
+ pCoexDm->psTdmaPara[0] = realByte1;
+ pCoexDm->psTdmaPara[1] = byte2;
+ pCoexDm->psTdmaPara[2] = byte3;
+ pCoexDm->psTdmaPara[3] = byte4;
+ pCoexDm->psTdmaPara[4] = realByte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x3a(5bytes)=0x%x%08x\n",
+ H2C_Parameter[0],
+ H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3a, 5, H2C_Parameter);
+}
+
+VOID
+halbtc8723a1ant_PsTdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bTurnOn,
+ IN u1Byte type
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type));
+ pCoexDm->bCurPsTdmaOn = bTurnOn;
+ pCoexDm->curPsTdma = type;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ pCoexDm->prePsTdma, pCoexDm->curPsTdma));
+
+ if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
+ (pCoexDm->prePsTdma == pCoexDm->curPsTdma) )
+ return;
+ }
+ if(pCoexDm->bCurPsTdmaOn)
+ {
+ switch(pCoexDm->curPsTdma)
+ {
+ case 1:
+ default:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x1a, 0x1a, 0x0, 0x40);
+ break;
+ case 2:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x12, 0x12, 0x0, 0x40);
+ break;
+ case 3:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x3f, 0x3, 0x10, 0x40);
+ break;
+ case 4:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x15, 0x3, 0x10, 0x0);
+ break;
+ case 5:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0xa9, 0x15, 0x3, 0x35, 0xc0);
+ break;
+
+ case 8:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0);
+ break;
+ case 9:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x0, 0x40);
+ break;
+ case 10:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x0, 0x40);
+ break;
+ case 11:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x0, 0x40);
+ break;
+ case 12:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0xa9, 0xa, 0x3, 0x15, 0xc0);
+ break;
+
+ case 18:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0);
+ break;
+
+ case 20:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x2a, 0x2a, 0x0, 0x0);
+ break;
+ case 21:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x20, 0x3, 0x10, 0x40);
+ break;
+ case 22:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x1a, 0x1a, 0x2, 0x40);
+ break;
+ case 23:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x12, 0x12, 0x2, 0x40);
+ break;
+ case 24:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x2, 0x40);
+ break;
+ case 25:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x2, 0x40);
+ break;
+ case 26:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0);
+ break;
+ case 27:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x2, 0x40);
+ break;
+ case 28:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x3, 0x2f, 0x2f, 0x0, 0x0);
+ break;
+
+ }
+ }
+ else
+ {
+ // disable PS tdma
+ switch(pCoexDm->curPsTdma)
+ {
+ case 8:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x8, 0x0, 0x0, 0x0, 0x0);
+ break;
+ case 0:
+ default:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x0, 0x0, 0x0, 0x0, 0x0);
+ pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x210);
+ break;
+ case 9:
+ halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x0, 0x0, 0x0, 0x0, 0x0);
+ pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x110);
+ break;
+
+ }
+ }
+
+ // update pre state
+ pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn;
+ pCoexDm->prePsTdma = pCoexDm->curPsTdma;
+}
+
+
+VOID
+halbtc8723a1ant_CoexAllOff(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // fw all off
+ halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ // sw all off
+ halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ // hw all off
+ halbtc8723a1ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+}
+
+VOID
+halbtc8723a1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // force to reset coex mechanism
+ halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+}
+
+VOID
+halbtc8723a1ant_BtEnableAction(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+}
+
+VOID
+halbtc8723a1ant_MonitorBtCtr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u4Byte regHPTxRx, regLPTxRx, u4Tmp;
+ u4Byte regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0;
+ u1Byte u1Tmp;
+
+ regHPTxRx = 0x770;
+ regLPTxRx = 0x774;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx);
+ regHPTx = u4Tmp & MASKLWORD;
+ regHPRx = (u4Tmp & MASKHWORD)>>16;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx);
+ regLPTx = u4Tmp & MASKLWORD;
+ regLPRx = (u4Tmp & MASKHWORD)>>16;
+
+ pCoexSta->highPriorityTx = regHPTx;
+ pCoexSta->highPriorityRx = regHPRx;
+ pCoexSta->lowPriorityTx = regLPTx;
+ pCoexSta->lowPriorityRx = regLPRx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx));
+
+ // reset counter
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc);
+}
+
+VOID
+halbtc8723a1ant_MonitorBtEnableDisable(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static BOOLEAN bPreBtDisabled=FALSE;
+ static u4Byte btDisableCnt=0;
+ BOOLEAN bBtActive=true, bBtDisabled=FALSE;
+
+ // This function check if bt is disabled
+
+ if( pCoexSta->highPriorityTx == 0 &&
+ pCoexSta->highPriorityRx == 0 &&
+ pCoexSta->lowPriorityTx == 0 &&
+ pCoexSta->lowPriorityRx == 0)
+ {
+ bBtActive = FALSE;
+ }
+ if( pCoexSta->highPriorityTx == 0xffff &&
+ pCoexSta->highPriorityRx == 0xffff &&
+ pCoexSta->lowPriorityTx == 0xffff &&
+ pCoexSta->lowPriorityRx == 0xffff)
+ {
+ bBtActive = FALSE;
+ }
+ if(bBtActive)
+ {
+ btDisableCnt = 0;
+ bBtDisabled = FALSE;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+ }
+ else
+ {
+ btDisableCnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n",
+ btDisableCnt));
+ if(btDisableCnt >= 2)
+ {
+ bBtDisabled = true;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+ }
+ }
+ if(bPreBtDisabled != bBtDisabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n",
+ (bPreBtDisabled ? "disabled":"enabled"),
+ (bBtDisabled ? "disabled":"enabled")));
+ bPreBtDisabled = bBtDisabled;
+ if(!bBtDisabled)
+ {
+ halbtc8723a1ant_BtEnableAction(pBtCoexist);
+ }
+ else
+ {
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ }
+ }
+}
+
+VOID
+halbtc8723a1ant_TdmaDurationAdjust(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static s4Byte up,dn,m,n,WaitCount;
+ s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+ u1Byte retryCount=0;
+ u1Byte btState;
+ BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+ u4Byte wifiBw;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ btState = pCoexDm->btStatus;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], TdmaDurationAdjust()\n"));
+ if(pCoexDm->psTdmaGlobalCnt != pCoexDm->psTdmaMonitorCnt)
+ {
+ pCoexDm->psTdmaMonitorCnt = 0;
+ pCoexDm->psTdmaGlobalCnt = 0;
+ }
+ if(pCoexDm->psTdmaMonitorCnt == 0)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], first run BT A2DP + WiFi busy state!!\n"));
+ if(btState == BT_STATE_8723A_1ANT_ACL_ONLY_BUSY)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ else
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 22);
+ pCoexDm->psTdmaDuAdjType = 22;
+ }
+ //============
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ WaitCount = 0;
+ }
+ else
+ {
+ //acquire the BT TRx retry count from BT_Info byte2
+ retryCount = pCoexSta->btRetryCnt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], retryCount = %d\n", retryCount));
+ result = 0;
+ WaitCount++;
+
+ if(retryCount == 0) // no retry in the last 2-second duration
+ {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if(up >= n) // if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+ {
+ WaitCount = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Increase wifi duration!!\n"));
+ }
+ }
+ else if (retryCount <= 3) // <=3 retry in the last 2-second duration
+ {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) // if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount <= 2)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+ }
+ }
+ else //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount == 1)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+ }
+
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT TxRx counter H+L <= 1200\n"));
+ if(btState != BT_STATE_8723A_1ANT_ACL_ONLY_BUSY)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], NOT ACL only busy!\n"));
+ if(BTC_WIFI_BW_HT40 != wifiBw)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], 20MHz\n"));
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 22)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+ pCoexDm->psTdmaDuAdjType = 23;
+ }
+ else if(pCoexDm->curPsTdma == 23)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+ pCoexDm->psTdmaDuAdjType = 24;
+ }
+ else if(pCoexDm->curPsTdma == 24)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25);
+ pCoexDm->psTdmaDuAdjType = 25;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 25)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+ pCoexDm->psTdmaDuAdjType = 24;
+ }
+ else if(pCoexDm->curPsTdma == 24)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+ pCoexDm->psTdmaDuAdjType = 23;
+ }
+ else if(pCoexDm->curPsTdma == 23)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 22);
+ pCoexDm->psTdmaDuAdjType = 22;
+ }
+ }
+ // error handle, if not in the following state,
+ // set psTdma again.
+ if( (pCoexDm->psTdmaDuAdjType != 22) &&
+ (pCoexDm->psTdmaDuAdjType != 23) &&
+ (pCoexDm->psTdmaDuAdjType != 24) &&
+ (pCoexDm->psTdmaDuAdjType != 25) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n"));
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+ pCoexDm->psTdmaDuAdjType = 23;
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], 40MHz\n"));
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 23)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+ pCoexDm->psTdmaDuAdjType = 24;
+ }
+ else if(pCoexDm->curPsTdma == 24)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25);
+ pCoexDm->psTdmaDuAdjType = 25;
+ }
+ else if(pCoexDm->curPsTdma == 25)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 27);
+ pCoexDm->psTdmaDuAdjType = 27;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 27)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25);
+ pCoexDm->psTdmaDuAdjType = 25;
+ }
+ else if(pCoexDm->curPsTdma == 25)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+ pCoexDm->psTdmaDuAdjType = 24;
+ }
+ else if(pCoexDm->curPsTdma == 24)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23);
+ pCoexDm->psTdmaDuAdjType = 23;
+ }
+ }
+ // error handle, if not in the following state,
+ // set psTdma again.
+ if( (pCoexDm->psTdmaDuAdjType != 23) &&
+ (pCoexDm->psTdmaDuAdjType != 24) &&
+ (pCoexDm->psTdmaDuAdjType != 25) &&
+ (pCoexDm->psTdmaDuAdjType != 27) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n"));
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24);
+ pCoexDm->psTdmaDuAdjType = 24;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ACL only busy\n"));
+ if (result == -1)
+ {
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ }
+
+ // error handle, if not in the following state,
+ // set psTdma again.
+ if( (pCoexDm->psTdmaDuAdjType != 1) &&
+ (pCoexDm->psTdmaDuAdjType != 2) &&
+ (pCoexDm->psTdmaDuAdjType != 9) &&
+ (pCoexDm->psTdmaDuAdjType != 11) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n"));
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ }
+ }
+ }
+
+ // if current PsTdma not match with the recorded one (when scan, dhcp...),
+ // then we have to adjust it back to the previous record one.
+ if(pCoexDm->curPsTdma != pCoexDm->psTdmaDuAdjType)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+ pCoexDm->curPsTdma, pCoexDm->psTdmaDuAdjType));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+ if( !bScan && !bLink && !bRoam)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"));
+ }
+ }
+ pCoexDm->psTdmaMonitorCnt++;
+}
+
+
+VOID
+halbtc8723a1ant_CoexForWifiConnect(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bWifiConnected=FALSE, bWifiBusy=FALSE;
+ u1Byte btState, btInfoOriginal=0;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ btState = pCoexDm->btStatus;
+ btInfoOriginal = pCoexSta->btInfoC2h[BT_INFO_SRC_8723A_1ANT_BT_RSP][0];
+
+ if(bWifiConnected)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi connected!!\n"));
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+
+ if( !bWifiBusy &&
+ ((BT_STATE_8723A_1ANT_NO_CONNECTION == btState) ||
+ (BT_STATE_8723A_1ANT_CONNECT_IDLE == btState)) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], [Wifi is idle] or [Bt is non connected idle or Bt is connected idle]!!\n"));
+
+ if(BT_STATE_8723A_1ANT_NO_CONNECTION == btState)
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ else if(BT_STATE_8723A_1ANT_CONNECT_IDLE == btState)
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0);
+ }
+ else
+ {
+ if( (BT_STATE_8723A_1ANT_SCO_ONLY_BUSY == btState) ||
+ (BT_STATE_8723A_1ANT_ACL_SCO_BUSY == btState) ||
+ (BT_STATE_8723A_1ANT_HID_BUSY == btState) ||
+ (BT_STATE_8723A_1ANT_HID_SCO_BUSY == btState) )
+ {
+ pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0x60);
+ }
+ else
+ {
+ pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0);
+ }
+ switch(btState)
+ {
+ case BT_STATE_8723A_1ANT_NO_CONNECTION:
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ break;
+ case BT_STATE_8723A_1ANT_CONNECT_IDLE:
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ break;
+ case BT_STATE_8723A_1ANT_INQ_OR_PAG:
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ break;
+ case BT_STATE_8723A_1ANT_SCO_ONLY_BUSY:
+ case BT_STATE_8723A_1ANT_ACL_SCO_BUSY:
+ case BT_STATE_8723A_1ANT_HID_BUSY:
+ case BT_STATE_8723A_1ANT_HID_SCO_BUSY:
+ halbtc8723a1ant_TdmaDurationAdjust(pBtCoexist);
+ break;
+ case BT_STATE_8723A_1ANT_ACL_ONLY_BUSY:
+ if (btInfoOriginal&BT_INFO_8723A_1ANT_B_A2DP)
+ {
+ halbtc8723a1ant_TdmaDurationAdjust(pBtCoexist);
+ }
+ else if(btInfoOriginal&BT_INFO_8723A_1ANT_B_FTP)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ }
+ else if( (btInfoOriginal&BT_INFO_8723A_1ANT_B_A2DP) &&
+ (btInfoOriginal&BT_INFO_8723A_1ANT_B_FTP) )
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ }
+ else
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ }
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], error!!!, undefined case in halbtc8723a1ant_CoexForWifiConnect()!!\n"));
+ break;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is disconnected!!\n"));
+ }
+
+ pCoexDm->psTdmaGlobalCnt++;
+}
+
+//============================================================
+// work around function start with wa_halbtc8723a1ant_
+//============================================================
+VOID
+wa_halbtc8723a1ant_MonitorC2h(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte tmp1b=0x0;
+ u4Byte curC2hTotalCnt=0x0;
+ static u4Byte preC2hTotalCnt=0x0, sameCntPollingTime=0x0;
+
+ curC2hTotalCnt+=pCoexSta->btInfoC2hCnt[BT_INFO_SRC_8723A_1ANT_BT_RSP];
+
+ if(curC2hTotalCnt == preC2hTotalCnt)
+ {
+ sameCntPollingTime++;
+ }
+ else
+ {
+ preC2hTotalCnt = curC2hTotalCnt;
+ sameCntPollingTime = 0;
+ }
+
+ if(sameCntPollingTime >= 2)
+ {
+ tmp1b = pBtCoexist->btc_read_1byte(pBtCoexist, 0x1af);
+ if(tmp1b != 0x0)
+ {
+ pCoexSta->c2hHangDetectCnt++;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x1af, 0x0);
+ }
+ }
+}
+
+//============================================================
+// extern function start with EXhalbtc8723a1ant_
+//============================================================
+VOID
+EXhalbtc8723a1ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n"));
+
+ // backup rf 0x1e value
+ pCoexDm->btRf0x1eBackup =
+ pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20);
+
+ // enable counter statistics
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4);
+
+ // coex table
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, 0x0); // 1-Ant coex
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, 0xffff); // wifi break table
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c4, 0x55555555); //coex table
+
+ // antenna switch control parameter
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x858, 0xaaaaaaaa);
+
+ pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x210); //set antenna at wifi side if ANTSW is software control
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x870, 0x300); //SPDT(connected with TRSW) control by hardware PTA
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x874, 0x22804000); //ANTSW keep by GNT_BT
+
+ // coexistence parameters
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x1); // enable RTK mode PTA
+}
+
+VOID
+EXhalbtc8723a1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+
+ halbtc8723a1ant_InitCoexDm(pBtCoexist);
+}
+
+VOID
+EXhalbtc8723a1ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ struct btc_board_info * pBoardInfo=&pBtCoexist->board_info;
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ pu1Byte cliBuf=pBtCoexist->cli_buf;
+ u1Byte u1Tmp[4], i, btInfoExt, psTdmaCase=0;
+ u4Byte u4Tmp[4];
+ BOOLEAN bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE;
+ BOOLEAN bBtHsOn=FALSE, bWifiBusy=FALSE;
+ s4Byte wifiRssi=0, btHsRssi=0;
+ u4Byte wifiBw, wifiTrafficDir;
+ u1Byte wifiDot11Chnl, wifiHsChnl;
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cliBuf);
+
+ if(!pBoardInfo->bt_exist)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cliBuf);
+ return;
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+ pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num);
+ CL_PRINTF(cliBuf);
+
+ if(pBtCoexist->manual_control)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "[Action Manual control]!!");
+ CL_PRINTF(cliBuf);
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+ wifiDot11Chnl, wifiHsChnl, bBtHsOn);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+ pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1],
+ pCoexDm->wifiChnlInfo[2]);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+ wifiRssi, btHsRssi);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \
+ bLink, bRoam, bScan);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+ (bWifiUnder5G? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))),
+ ((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+ ((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8723A_1ANT_BT_STATUS_IDLE == pCoexDm->btStatus)? "idle":( (BT_8723A_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy"))),
+ pCoexSta->btRssi, pCoexSta->btRetryCnt);
+ CL_PRINTF(cliBuf);
+
+ if(pStackInfo->bProfileNotified)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+ pStackInfo->bScoExist, pStackInfo->bHidExist, pStackInfo->bPanExist, pStackInfo->bA2dpExist);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ }
+
+ btInfoExt = pCoexSta->btInfoExt;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+ (btInfoExt&BIT0)? "Basic rate":"EDR rate");
+ CL_PRINTF(cliBuf);
+
+ for(i=0; i<BT_INFO_SRC_8723A_1ANT_MAX; i++)
+ {
+ if(pCoexSta->btInfoC2hCnt[i])
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723a1Ant[i], \
+ pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1],
+ pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3],
+ pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5],
+ pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]);
+ CL_PRINTF(cliBuf);
+ }
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "write 0x1af=0x0 num", \
+ pCoexSta->c2hHangDetectCnt);
+ CL_PRINTF(cliBuf);
+
+ // Sw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d", "SM1[ShRf/ LpRA/ LimDig]", \
+ pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig);
+ CL_PRINTF(cliBuf);
+
+ // Fw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+ CL_PRINTF(cliBuf);
+
+ if(!pBtCoexist->manual_control)
+ {
+ psTdmaCase = pCoexDm->curPsTdma;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \
+ pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
+ pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
+ pCoexDm->psTdmaPara[4], psTdmaCase);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "IgnWlanAct", \
+ pCoexDm->bCurIgnoreWlanAct);
+ CL_PRINTF(cliBuf);
+ }
+
+ // Hw setting
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+ pCoexDm->btRf0x1eBackup);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778);
+ u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x783);
+ u1Tmp[2] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x796);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/ 0x783/ 0x796", \
+ u1Tmp[0], u1Tmp[1], u1Tmp[2]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x880);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x880", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x40", \
+ u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+ u4Tmp[0], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x484);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x484(rate adaptive)", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda0);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda4);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda8);
+ u4Tmp[3] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xdac);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0xda0/0xda4/0xda8/0xdac(FA cnt)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u4Tmp[3]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770 (hp rx[31:16]/tx[15:0])", \
+ pCoexSta->highPriorityRx, pCoexSta->highPriorityTx);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+ pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx);
+ CL_PRINTF(cliBuf);
+
+ // Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x41b);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x41b (mgntQ hang chk == 0xf)", \
+ u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+VOID
+EXhalbtc8723a1ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_IPS_ENTER == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+ halbtc8723a1ant_CoexAllOff(pBtCoexist);
+ }
+ else if(BTC_IPS_LEAVE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+ //halbtc8723a1ant_InitCoexDm(pBtCoexist);
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_LPS_ENABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+ }
+ else if(BTC_LPS_DISABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bWifiConnected=FALSE;
+
+ halbtc8723a1ant_NotifyFwScan(pBtCoexist, type);
+
+ if(pBtCoexist->btInfo.bBtDisabled)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ }
+ else
+ {
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(BTC_SCAN_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+ if(!bWifiConnected) // non-connected scan
+ {
+ //set 0x550[3]=1 before PsTdma
+ halbtc8723a1ant_Reg0x550Bit3(pBtCoexist, true);
+ }
+
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ }
+ else if(BTC_SCAN_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+ if(!bWifiConnected) // non-connected scan
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+ else
+ {
+ halbtc8723a1ant_CoexForWifiConnect(pBtCoexist);
+ }
+ }
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bWifiConnected=FALSE;
+
+ if(pBtCoexist->btInfo.bBtDisabled)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ }
+ else
+ {
+ if(BTC_ASSOCIATE_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+ //set 0x550[3]=1 before PsTdma
+ halbtc8723a1ant_Reg0x550Bit3(pBtCoexist, true);
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); // extend wifi slot
+ }
+ else if(BTC_ASSOCIATE_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(!bWifiConnected) // non-connected scan
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+ else
+ {
+ halbtc8723a1ant_CoexForWifiConnect(pBtCoexist);
+ }
+ }
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_MEDIA_CONNECT == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(type == BTC_PACKET_DHCP)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], DHCP Packet notify\n"));
+ if(pBtCoexist->btInfo.bBtDisabled)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ }
+ else
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 18);
+ }
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ )
+{
+ u1Byte btInfo=0;
+ u1Byte i, rspSource=0;
+ BOOLEAN bBtHsOn=FALSE, bBtBusy=FALSE, bForceLps=FALSE;
+
+ pCoexSta->bC2hBtInfoReqSent = FALSE;
+
+ rspSource = BT_INFO_SRC_8723A_1ANT_BT_RSP;
+ pCoexSta->btInfoC2hCnt[rspSource]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length));
+ for(i=0; i<length; i++)
+ {
+ pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
+ if(i == 0)
+ btInfo = tmpBuf[i];
+ if(i == length-1)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i]));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
+ }
+ }
+
+ if(BT_INFO_SRC_8723A_1ANT_WIFI_FW != rspSource)
+ {
+ pCoexSta->btRetryCnt =
+ pCoexSta->btInfoC2h[rspSource][1];
+
+ pCoexSta->btRssi =
+ pCoexSta->btInfoC2h[rspSource][2]*2+10;
+
+ pCoexSta->btInfoExt =
+ pCoexSta->btInfoC2h[rspSource][3];
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ // check BIT2 first ==> check if bt is under inquiry or page scan
+ if(btInfo & BT_INFO_8723A_1ANT_B_INQ_PAGE)
+ {
+ pCoexSta->bC2hBtInquiryPage = true;
+ }
+ else
+ {
+ pCoexSta->bC2hBtInquiryPage = FALSE;
+ }
+ btInfo &= ~BIT2;
+ if(!(btInfo & BIT0))
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_NO_CONNECTION;
+ bForceLps = FALSE;
+ }
+ else
+ {
+ bForceLps = true;
+ if(btInfo == 0x1)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_CONNECT_IDLE;
+ }
+ else if(btInfo == 0x9)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_ACL_ONLY_BUSY;
+ bBtBusy = true;
+ }
+ else if(btInfo == 0x13)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_SCO_ONLY_BUSY;
+ bBtBusy = true;
+ }
+ else if(btInfo == 0x1b)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_ACL_SCO_BUSY;
+ bBtBusy = true;
+ }
+ else if(btInfo == 0x29)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_HID_BUSY;
+ bBtBusy = true;
+ }
+ else if(btInfo == 0x3b)
+ {
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_HID_SCO_BUSY;
+ bBtBusy = true;
+ }
+ }
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &bBtBusy);
+ if(bForceLps)
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ else
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+
+ if( (BT_STATE_8723A_1ANT_NO_CONNECTION == pCoexDm->btStatus) ||
+ (BT_STATE_8723A_1ANT_CONNECT_IDLE == pCoexDm->btStatus) )
+ {
+ if(pCoexSta->bC2hBtInquiryPage)
+ pCoexDm->btStatus = BT_STATE_8723A_1ANT_INQ_OR_PAG;
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+ }
+ else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a1ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ halbtc8723a1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+
+ halbtc8723a1ant_LowPenaltyRa(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a1ant_RfShrink(pBtCoexist, FORCE_EXEC, FALSE);
+
+ halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+ EXhalbtc8723a1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+}
+
+VOID
+EXhalbtc8723a1ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE, bWifiConnected=FALSE;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 1Ant Periodical!!\n"));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ // work around for c2h hang
+ wa_halbtc8723a1ant_MonitorC2h(pBtCoexist);
+
+ halbtc8723a1ant_QueryBtInfo(pBtCoexist);
+ halbtc8723a1ant_MonitorBtCtr(pBtCoexist);
+ halbtc8723a1ant_MonitorBtEnableDisable(pBtCoexist);
+
+
+ if(bScan)
+ return;
+ if(bLink)
+ return;
+
+ if(bWifiConnected)
+ {
+ if(pBtCoexist->btInfo.bBtDisabled)
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+
+ halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ }
+ else
+ {
+ halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a1ant_CoexForWifiConnect(pBtCoexist);
+ }
+ }
+ else
+ {
+ halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ }
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h
new file mode 100644
index 000000000000..60992f59eb37
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h
@@ -0,0 +1,176 @@
+//===========================================
+// The following is for 8723A 1Ant BT Co-exist definition
+//===========================================
+#define BT_INFO_8723A_1ANT_B_FTP BIT7
+#define BT_INFO_8723A_1ANT_B_A2DP BIT6
+#define BT_INFO_8723A_1ANT_B_HID BIT5
+#define BT_INFO_8723A_1ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723A_1ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723A_1ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723A_1ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723A_1ANT_B_CONNECTION BIT0
+
+typedef enum _BT_STATE_8723A_1ANT{
+ BT_STATE_8723A_1ANT_DISABLED = 0,
+ BT_STATE_8723A_1ANT_NO_CONNECTION = 1,
+ BT_STATE_8723A_1ANT_CONNECT_IDLE = 2,
+ BT_STATE_8723A_1ANT_INQ_OR_PAG = 3,
+ BT_STATE_8723A_1ANT_ACL_ONLY_BUSY = 4,
+ BT_STATE_8723A_1ANT_SCO_ONLY_BUSY = 5,
+ BT_STATE_8723A_1ANT_ACL_SCO_BUSY = 6,
+ BT_STATE_8723A_1ANT_HID_BUSY = 7,
+ BT_STATE_8723A_1ANT_HID_SCO_BUSY = 8,
+ BT_STATE_8723A_1ANT_MAX
+}BT_STATE_8723A_1ANT, *PBT_STATE_8723A_1ANT;
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723A_1ANT 2
+
+typedef enum _BT_INFO_SRC_8723A_1ANT{
+ BT_INFO_SRC_8723A_1ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723A_1ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723A_1ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723A_1ANT_MAX
+}BT_INFO_SRC_8723A_1ANT,*PBT_INFO_SRC_8723A_1ANT;
+
+typedef enum _BT_8723A_1ANT_BT_STATUS{
+ BT_8723A_1ANT_BT_STATUS_IDLE = 0x0,
+ BT_8723A_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723A_1ANT_BT_STATUS_NON_IDLE = 0x2,
+ BT_8723A_1ANT_BT_STATUS_MAX
+}BT_8723A_1ANT_BT_STATUS,*PBT_8723A_1ANT_BT_STATUS;
+
+typedef enum _BT_8723A_1ANT_COEX_ALGO{
+ BT_8723A_1ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723A_1ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723A_1ANT_COEX_ALGO_HID = 0x2,
+ BT_8723A_1ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723A_1ANT_COEX_ALGO_PANEDR = 0x4,
+ BT_8723A_1ANT_COEX_ALGO_PANHS = 0x5,
+ BT_8723A_1ANT_COEX_ALGO_PANEDR_A2DP = 0x6,
+ BT_8723A_1ANT_COEX_ALGO_PANEDR_HID = 0x7,
+ BT_8723A_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x8,
+ BT_8723A_1ANT_COEX_ALGO_HID_A2DP = 0x9,
+ BT_8723A_1ANT_COEX_ALGO_MAX
+}BT_8723A_1ANT_COEX_ALGO,*PBT_8723A_1ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8723A_1ANT{
+ // fw mechanism
+ BOOLEAN bCurIgnoreWlanAct;
+ BOOLEAN bPreIgnoreWlanAct;
+ u1Byte prePsTdma;
+ u1Byte curPsTdma;
+ u1Byte psTdmaPara[5];
+ u1Byte psTdmaDuAdjType;
+ u4Byte psTdmaMonitorCnt;
+ u4Byte psTdmaGlobalCnt;
+ BOOLEAN bResetTdmaAdjust;
+ BOOLEAN bPrePsTdmaOn;
+ BOOLEAN bCurPsTdmaOn;
+
+ // sw mechanism
+ BOOLEAN bPreRfRxLpfShrink;
+ BOOLEAN bCurRfRxLpfShrink;
+ u4Byte btRf0x1eBackup;
+ BOOLEAN bPreLowPenaltyRa;
+ BOOLEAN bCurLowPenaltyRa;
+ u4Byte preVal0x6c0;
+ u4Byte curVal0x6c0;
+ u4Byte preVal0x6c8;
+ u4Byte curVal0x6c8;
+ u1Byte preVal0x6cc;
+ u1Byte curVal0x6cc;
+ BOOLEAN limited_dig;
+
+ // algorithm related
+ u1Byte preAlgorithm;
+ u1Byte curAlgorithm;
+ u1Byte btStatus;
+ u1Byte wifiChnlInfo[3];
+} COEX_DM_8723A_1ANT, *PCOEX_DM_8723A_1ANT;
+
+typedef struct _COEX_STA_8723A_1ANT{
+ u4Byte highPriorityTx;
+ u4Byte highPriorityRx;
+ u4Byte lowPriorityTx;
+ u4Byte lowPriorityRx;
+ u1Byte btRssi;
+ u1Byte preBtRssiState;
+ u1Byte preBtRssiState1;
+ u1Byte preWifiRssiState[4];
+ BOOLEAN bC2hBtInfoReqSent;
+ u1Byte btInfoC2h[BT_INFO_SRC_8723A_1ANT_MAX][10];
+ u4Byte btInfoC2hCnt[BT_INFO_SRC_8723A_1ANT_MAX];
+ BOOLEAN bC2hBtInquiryPage;
+ u1Byte btRetryCnt;
+ u1Byte btInfoExt;
+ //BOOLEAN bHoldForStackOperation;
+ //u1Byte bHoldPeriodCnt;
+ // this is for c2h hang work-around
+ u4Byte c2hHangDetectCnt;
+}COEX_STA_8723A_1ANT, *PCOEX_STA_8723A_1ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+VOID
+EXhalbtc8723a1ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a1ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ );
+VOID
+EXhalbtc8723a1ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a1ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a1ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a1ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ );
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h b/drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h
new file mode 100644
index 000000000000..d538ba3d0a2e
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h
@@ -0,0 +1,99 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __HALBT_PRECOMP_H__
+#define __HALBT_PRECOMP_H__
+/*************************************************************
+ * include files
+ *************************************************************/
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "../rtl8821ae/reg.h"
+#include "../rtl8821ae/def.h"
+#include "../rtl8821ae/phy.h"
+#include "../rtl8821ae/dm.h"
+#include "../rtl8821ae/fw.h"
+#include "../rtl8821ae/led.h"
+#include "../rtl8821ae/hw.h"
+#include "../rtl8821ae/pwrseqcmd.h"
+#include "../rtl8821ae/pwrseq.h"
+
+#include "halbtcoutsrc.h"
+
+
+#include "halbtc8192e2ant.h"
+#include "halbtc8723b1ant.h"
+#include "halbtc8723b2ant.h"
+
+
+
+#define GetDefaultAdapter(padapter) padapter
+
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define MASK12BITS 0xfff
+#define MASKH4BITS 0xf0000000
+#define MASKOFDM_D 0xffc00000
+#define MASKCCK 0x3f3f3f3f
+
+#endif /* __HALBT_PRECOMP_H__ */
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c
new file mode 100644
index 000000000000..1b04530d46bc
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c
@@ -0,0 +1,3891 @@
+//============================================================
+// Description:
+//
+// This file is for 8192e1ant Co-exist mechanism
+//
+// History
+// 2012/11/15 Cosa first check in.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "Mp_Precomp.h"
+#if(BT_30_SUPPORT == 1)
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8192E_1ANT GLCoexDm8192e1Ant;
+static PCOEX_DM_8192E_1ANT pCoexDm=&GLCoexDm8192e1Ant;
+static COEX_STA_8192E_1ANT GLCoexSta8192e1Ant;
+static PCOEX_STA_8192E_1ANT pCoexSta=&GLCoexSta8192e1Ant;
+
+const char *const GLBtInfoSrc8192e1Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+u4Byte GLCoexVerDate8192e1Ant=20130729;
+u4Byte GLCoexVer8192e1Ant=0x10;
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8192e1ant_
+//============================================================
+u1Byte
+halbtc8192e1ant_BtRssiState(
+ u1Byte levelNum,
+ u1Byte rssiThresh,
+ u1Byte rssiThresh1
+ )
+{
+ s4Byte btRssi=0;
+ u1Byte btRssiState=pCoexSta->preBtRssiState;
+
+ btRssi = pCoexSta->btRssi;
+
+ if(levelNum == 2)
+ {
+ if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(btRssi < rssiThresh)
+ {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+ else if(levelNum == 3)
+ {
+ if(rssiThresh > rssiThresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n"));
+ return pCoexSta->preBtRssiState;
+ }
+
+ if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(btRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else if(btRssi < rssiThresh)
+ {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(btRssi < rssiThresh1)
+ {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+
+ pCoexSta->preBtRssiState = btRssiState;
+
+ return btRssiState;
+}
+
+u1Byte
+halbtc8192e1ant_WifiRssiState(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte index,
+ IN u1Byte levelNum,
+ IN u1Byte rssiThresh,
+ IN u1Byte rssiThresh1
+ )
+{
+ s4Byte wifiRssi=0;
+ u1Byte wifiRssiState=pCoexSta->preWifiRssiState[index];
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+
+ if(levelNum == 2)
+ {
+ if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(wifiRssi < rssiThresh)
+ {
+ wifiRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+ else if(levelNum == 3)
+ {
+ if(rssiThresh > rssiThresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n"));
+ return pCoexSta->preWifiRssiState[index];
+ }
+
+ if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_MEDIUM) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(wifiRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else if(wifiRssi < rssiThresh)
+ {
+ wifiRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(wifiRssi < rssiThresh1)
+ {
+ wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+
+ pCoexSta->preWifiRssiState[index] = wifiRssiState;
+
+ return wifiRssiState;
+}
+
+VOID
+halbtc8192e1ant_Updatera_mask(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte type,
+ IN u4Byte rateMask
+ )
+{
+ if(BTC_RATE_DISABLE == type)
+ {
+ pCoexDm->curra_mask |= rateMask; // disable rate
+ }
+ else if(BTC_RATE_ENABLE == type)
+ {
+ pCoexDm->curra_mask &= ~rateMask; // enable rate
+ }
+
+ if( bForceExec || (pCoexDm->prera_mask != pCoexDm->curra_mask))
+ {
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_UPDATE_ra_mask, &pCoexDm->curra_mask);
+ }
+ pCoexDm->prera_mask = pCoexDm->curra_mask;
+}
+
+VOID
+halbtc8192e1ant_MonitorBtCtr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u4Byte regHPTxRx, regLPTxRx, u4Tmp;
+ u4Byte regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0;
+ u1Byte u1Tmp;
+
+ regHPTxRx = 0x770;
+ regLPTxRx = 0x774;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx);
+ regHPTx = u4Tmp & MASKLWORD;
+ regHPRx = (u4Tmp & MASKHWORD)>>16;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx);
+ regLPTx = u4Tmp & MASKLWORD;
+ regLPRx = (u4Tmp & MASKHWORD)>>16;
+
+ pCoexSta->highPriorityTx = regHPTx;
+ pCoexSta->highPriorityRx = regHPRx;
+ pCoexSta->lowPriorityTx = regLPTx;
+ pCoexSta->lowPriorityRx = regLPRx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx));
+
+ // reset counter
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc);
+}
+
+VOID
+halbtc8192e1ant_QueryBtInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ pCoexSta->bC2hBtInfoReqSent = true;
+
+ H2C_Parameter[0] |= BIT0; // trigger
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x61=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x61, 1, H2C_Parameter);
+}
+
+BOOLEAN
+halbtc8192e1ant_IsWifiStatusChanged(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static BOOLEAN bPreWifiBusy=FALSE, bPreUnder4way=FALSE, bPreBtHsOn=FALSE;
+ BOOLEAN bWifiBusy=FALSE, bUnder4way=FALSE, bBtHsOn=FALSE;
+ BOOLEAN bWifiConnected=FALSE;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way);
+
+ if(bWifiConnected)
+ {
+ if(bWifiBusy != bPreWifiBusy)
+ {
+ bPreWifiBusy = bWifiBusy;
+ return true;
+ }
+ if(bUnder4way != bPreUnder4way)
+ {
+ bPreUnder4way = bUnder4way;
+ return true;
+ }
+ if(bBtHsOn != bPreBtHsOn)
+ {
+ bPreBtHsOn = bBtHsOn;
+ return true;
+ }
+ }
+
+ return FALSE;
+}
+
+VOID
+halbtc8192e1ant_UpdateBtLinkInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+
+ pBtLinkInfo->bBtLinkExist = pCoexSta->bBtLinkExist;
+ pBtLinkInfo->bScoExist = pCoexSta->bScoExist;
+ pBtLinkInfo->bA2dpExist = pCoexSta->bA2dpExist;
+ pBtLinkInfo->bPanExist = pCoexSta->bPanExist;
+ pBtLinkInfo->bHidExist = pCoexSta->bHidExist;
+
+ // check if Sco only
+ if( pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist )
+ pBtLinkInfo->bScoOnly = true;
+ else
+ pBtLinkInfo->bScoOnly = FALSE;
+
+ // check if A2dp only
+ if( !pBtLinkInfo->bScoExist &&
+ pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist )
+ pBtLinkInfo->bA2dpOnly = true;
+ else
+ pBtLinkInfo->bA2dpOnly = FALSE;
+
+ // check if Pan only
+ if( !pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist )
+ pBtLinkInfo->bPanOnly = true;
+ else
+ pBtLinkInfo->bPanOnly = FALSE;
+
+ // check if Hid only
+ if( !pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bHidExist )
+ pBtLinkInfo->bHidOnly = true;
+ else
+ pBtLinkInfo->bHidOnly = FALSE;
+}
+
+u1Byte
+halbtc8192e1ant_ActionAlgorithm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ BOOLEAN bBtHsOn=FALSE;
+ u1Byte algorithm=BT_8192E_1ANT_COEX_ALGO_UNDEFINED;
+ u1Byte numOfDiffProfile=0;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+
+ if(!pBtLinkInfo->bBtLinkExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No BT link exists!!!\n"));
+ return algorithm;
+ }
+
+ if(pBtLinkInfo->bScoExist)
+ numOfDiffProfile++;
+ if(pBtLinkInfo->bHidExist)
+ numOfDiffProfile++;
+ if(pBtLinkInfo->bPanExist)
+ numOfDiffProfile++;
+ if(pBtLinkInfo->bA2dpExist)
+ numOfDiffProfile++;
+
+ if(numOfDiffProfile == 1)
+ {
+ if(pBtLinkInfo->bScoExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ if(pBtLinkInfo->bHidExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID;
+ }
+ else if(pBtLinkInfo->bA2dpExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_A2DP;
+ }
+ else if(pBtLinkInfo->bPanExist)
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANHS;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile == 2)
+ {
+ if(pBtLinkInfo->bScoExist)
+ {
+ if(pBtLinkInfo->bHidExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID;
+ }
+ else if(pBtLinkInfo->bA2dpExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+ }
+ else if(pBtLinkInfo->bPanExist)
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile == 3)
+ {
+ if(pBtLinkInfo->bScoExist)
+ {
+ if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID;
+ }
+ else if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile >= 3)
+ {
+ if(pBtLinkInfo->bScoExist)
+ {
+ if( pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
+
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"));
+ algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+VOID
+halbtc8192e1ant_SetFwDacSwingLevel(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte dacSwingLvl
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ // There are several type of dacswing
+ // 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+ H2C_Parameter[0] = dacSwingLvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dacSwingLvl));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x64=0x%x\n", H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x64, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_SetFwDecBtPwr(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte decBtPwrLvl
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ H2C_Parameter[0] = decBtPwrLvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power level = %d, FW write 0x62=0x%x\n",
+ decBtPwrLvl, H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x62, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_DecBtPwr(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte decBtPwrLvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power level = %d\n",
+ (bForceExec? "force to":""), decBtPwrLvl));
+ pCoexDm->curBtDecPwrLvl = decBtPwrLvl;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], BtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+ pCoexDm->preBtDecPwrLvl, pCoexDm->curBtDecPwrLvl));
+
+ if(pCoexDm->preBtDecPwrLvl == pCoexDm->curBtDecPwrLvl)
+ return;
+ }
+ halbtc8192e1ant_SetFwDecBtPwr(pBtCoexist, pCoexDm->curBtDecPwrLvl);
+
+ pCoexDm->preBtDecPwrLvl = pCoexDm->curBtDecPwrLvl;
+}
+
+VOID
+halbtc8192e1ant_SetFwBtLnaConstrain(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bBtLnaConsOn
+ )
+{
+ u1Byte H2C_Parameter[2] ={0};
+
+ H2C_Parameter[0] = 0x3; // opCode, 0x3=BT_SET_LNA_CONSTRAIN
+
+ if(bBtLnaConsOn)
+ {
+ H2C_Parameter[1] |= BIT0;
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT LNA Constrain: %s, FW write 0x69=0x%x\n",
+ (bBtLnaConsOn? "ON!!":"OFF!!"),
+ H2C_Parameter[0]<<8|H2C_Parameter[1]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x69, 2, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_SetBtLnaConstrain(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bBtLnaConsOn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Constrain = %s\n",
+ (bForceExec? "force":""), ((bBtLnaConsOn)? "ON":"OFF")));
+ pCoexDm->bCurBtLnaConstrain = bBtLnaConsOn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtLnaConstrain=%d, bCurBtLnaConstrain=%d\n",
+ pCoexDm->bPreBtLnaConstrain, pCoexDm->bCurBtLnaConstrain));
+
+ if(pCoexDm->bPreBtLnaConstrain == pCoexDm->bCurBtLnaConstrain)
+ return;
+ }
+ halbtc8192e1ant_SetFwBtLnaConstrain(pBtCoexist, pCoexDm->bCurBtLnaConstrain);
+
+ pCoexDm->bPreBtLnaConstrain = pCoexDm->bCurBtLnaConstrain;
+}
+
+VOID
+halbtc8192e1ant_SetFwBtPsdMode(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte btPsdMode
+ )
+{
+ u1Byte H2C_Parameter[2] ={0};
+
+ H2C_Parameter[0] = 0x2; // opCode, 0x2=BT_SET_PSD_MODE
+
+ H2C_Parameter[1] = btPsdMode;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT PSD mode=0x%x, FW write 0x69=0x%x\n",
+ H2C_Parameter[1],
+ H2C_Parameter[0]<<8|H2C_Parameter[1]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x69, 2, H2C_Parameter);
+}
+
+
+VOID
+halbtc8192e1ant_SetBtPsdMode(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte btPsdMode
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT PSD mode = 0x%x\n",
+ (bForceExec? "force":""), btPsdMode));
+ pCoexDm->bCurBtPsdMode = btPsdMode;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtPsdMode=0x%x, bCurBtPsdMode=0x%x\n",
+ pCoexDm->bPreBtPsdMode, pCoexDm->bCurBtPsdMode));
+
+ if(pCoexDm->bPreBtPsdMode == pCoexDm->bCurBtPsdMode)
+ return;
+ }
+ halbtc8192e1ant_SetFwBtPsdMode(pBtCoexist, pCoexDm->bCurBtPsdMode);
+
+ pCoexDm->bPreBtPsdMode = pCoexDm->bCurBtPsdMode;
+}
+
+
+VOID
+halbtc8192e1ant_SetBtAutoReport(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bEnableAutoReport
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ H2C_Parameter[0] = 0;
+
+ if(bEnableAutoReport)
+ {
+ H2C_Parameter[0] |= BIT0;
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+ (bEnableAutoReport? "Enabled!!":"Disabled!!"), H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x68, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_BtAutoReport(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bEnableAutoReport
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Auto report = %s\n",
+ (bForceExec? "force to":""), ((bEnableAutoReport)? "Enabled":"Disabled")));
+ pCoexDm->bCurBtAutoReport = bEnableAutoReport;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+ pCoexDm->bPreBtAutoReport, pCoexDm->bCurBtAutoReport));
+
+ if(pCoexDm->bPreBtAutoReport == pCoexDm->bCurBtAutoReport)
+ return;
+ }
+ halbtc8192e1ant_SetBtAutoReport(pBtCoexist, pCoexDm->bCurBtAutoReport);
+
+ pCoexDm->bPreBtAutoReport = pCoexDm->bCurBtAutoReport;
+}
+
+VOID
+halbtc8192e1ant_FwDacSwingLvl(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte fwDacSwingLvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n",
+ (bForceExec? "force to":""), fwDacSwingLvl));
+ pCoexDm->curFwDacSwingLvl = fwDacSwingLvl;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ pCoexDm->preFwDacSwingLvl, pCoexDm->curFwDacSwingLvl));
+
+ if(pCoexDm->preFwDacSwingLvl == pCoexDm->curFwDacSwingLvl)
+ return;
+ }
+
+ halbtc8192e1ant_SetFwDacSwingLevel(pBtCoexist, pCoexDm->curFwDacSwingLvl);
+
+ pCoexDm->preFwDacSwingLvl = pCoexDm->curFwDacSwingLvl;
+}
+
+VOID
+halbtc8192e1ant_SetSwRfRxLpfCorner(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ if(bRxRfShrinkOn)
+ {
+ //Shrink RF Rx LPF corner
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+ }
+ else
+ {
+ //Resume RF Rx LPF corner
+ // After initialized, we can use pCoexDm->btRf0x1eBackup
+ if(pBtCoexist->initilized)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_RfShrink(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF")));
+ pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+ pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink));
+
+ if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink)
+ return;
+ }
+ halbtc8192e1ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink);
+
+ pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink;
+}
+
+VOID
+halbtc8192e1ant_SetSwPenaltyTxRateAdaptive(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ u1Byte tmpU1;
+
+ tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd);
+ tmpU1 |= BIT0;
+ if(bLowPenaltyRa)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+ tmpU1 &= ~BIT2;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+ tmpU1 |= BIT2;
+ }
+
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1);
+}
+
+VOID
+halbtc8192e1ant_LowPenaltyRa(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF")));
+ pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa));
+
+ if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa)
+ return;
+ }
+ halbtc8192e1ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa);
+
+ pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa;
+}
+
+VOID
+halbtc8192e1ant_SetDacSwingReg(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u4Byte level
+ )
+{
+ u1Byte val=(u1Byte)level;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Write SwDacSwing = 0x%x\n", level));
+ pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x883, 0x3e, val);
+}
+
+VOID
+halbtc8192e1ant_SetSwFullTimeDacSwing(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bSwDacSwingOn,
+ IN u4Byte swDacSwingLvl
+ )
+{
+ if(bSwDacSwingOn)
+ {
+ halbtc8192e1ant_SetDacSwingReg(pBtCoexist, swDacSwingLvl);
+ }
+ else
+ {
+ halbtc8192e1ant_SetDacSwingReg(pBtCoexist, 0x18);
+ }
+}
+
+
+VOID
+halbtc8192e1ant_DacSwing(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bDacSwingOn,
+ IN u4Byte dacSwingLvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dacSwingLvl=0x%x\n",
+ (bForceExec? "force to":""), ((bDacSwingOn)? "ON":"OFF"), dacSwingLvl));
+ pCoexDm->bCurDacSwingOn = bDacSwingOn;
+ pCoexDm->curDacSwingLvl = dacSwingLvl;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ pCoexDm->bPreDacSwingOn, pCoexDm->preDacSwingLvl,
+ pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl));
+
+ if( (pCoexDm->bPreDacSwingOn == pCoexDm->bCurDacSwingOn) &&
+ (pCoexDm->preDacSwingLvl == pCoexDm->curDacSwingLvl) )
+ return;
+ }
+ mdelay(30);
+ halbtc8192e1ant_SetSwFullTimeDacSwing(pBtCoexist, bDacSwingOn, dacSwingLvl);
+
+ pCoexDm->bPreDacSwingOn = pCoexDm->bCurDacSwingOn;
+ pCoexDm->preDacSwingLvl = pCoexDm->curDacSwingLvl;
+}
+
+VOID
+halbtc8192e1ant_SetAdcBackOff(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bAdcBackOff
+ )
+{
+ if(bAdcBackOff)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n"));
+ pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x8db, 0x60, 0x3);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n"));
+ pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x8db, 0x60, 0x1);
+ }
+}
+
+VOID
+halbtc8192e1ant_AdcBackOff(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bAdcBackOff
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n",
+ (bForceExec? "force to":""), ((bAdcBackOff)? "ON":"OFF")));
+ pCoexDm->bCurAdcBackOff = bAdcBackOff;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+ pCoexDm->bPreAdcBackOff, pCoexDm->bCurAdcBackOff));
+
+ if(pCoexDm->bPreAdcBackOff == pCoexDm->bCurAdcBackOff)
+ return;
+ }
+ halbtc8192e1ant_SetAdcBackOff(pBtCoexist, pCoexDm->bCurAdcBackOff);
+
+ pCoexDm->bPreAdcBackOff = pCoexDm->bCurAdcBackOff;
+}
+
+VOID
+halbtc8192e1ant_SetAgcTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bAgcTableEn
+ )
+{
+ u1Byte rssiAdjustVal=0;
+
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+ if(bAgcTableEn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x3fa58);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x37a58);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x2fa58);
+ rssiAdjustVal = 8;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x39258);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x31258);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x29258);
+ }
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+ // set rssiAdjustVal for wifi module.
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssiAdjustVal);
+}
+
+
+VOID
+halbtc8192e1ant_AgcTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bAgcTableEn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n",
+ (bForceExec? "force to":""), ((bAgcTableEn)? "Enable":"Disable")));
+ pCoexDm->bCurAgcTableEn = bAgcTableEn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ pCoexDm->bPreAgcTableEn, pCoexDm->bCurAgcTableEn));
+
+ if(pCoexDm->bPreAgcTableEn == pCoexDm->bCurAgcTableEn)
+ return;
+ }
+ halbtc8192e1ant_SetAgcTable(pBtCoexist, bAgcTableEn);
+
+ pCoexDm->bPreAgcTableEn = pCoexDm->bCurAgcTableEn;
+}
+
+VOID
+halbtc8192e1ant_SetCoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c4,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc);
+}
+
+VOID
+halbtc8192e1ant_CoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c4,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (bForceExec? "force to":""), val0x6c0, val0x6c4, val0x6c8, val0x6cc));
+ pCoexDm->curVal0x6c0 = val0x6c0;
+ pCoexDm->curVal0x6c4 = val0x6c4;
+ pCoexDm->curVal0x6c8 = val0x6c8;
+ pCoexDm->curVal0x6cc = val0x6cc;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c4, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c4, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc));
+
+ if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
+ (pCoexDm->preVal0x6c4 == pCoexDm->curVal0x6c4) &&
+ (pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) &&
+ (pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) )
+ return;
+ }
+ halbtc8192e1ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+
+ pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0;
+ pCoexDm->preVal0x6c4 = pCoexDm->curVal0x6c4;
+ pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8;
+ pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc;
+}
+
+VOID
+halbtc8192e1ant_CoexTableWithType(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte type
+ )
+{
+ switch(type)
+ {
+ case 0:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55555555, 0x55555555, 0xffffff, 0x3);
+ break;
+ case 1:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55555555, 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 2:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5a5a5a5a, 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 3:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xaaaaaaaa, 0xaaaaaaaa, 0xffffff, 0x3);
+ break;
+ case 4:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xffffffff, 0xffffffff, 0xffffff, 0x3);
+ break;
+ case 5:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5fff5fff, 0x5fff5fff, 0xffffff, 0x3);
+ break;
+ case 6:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 7:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xddffddff, 0xddffddff, 0xffffff, 0x3);
+ break;
+ case 8:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5afa5afa, 0xffffff, 0x3);
+ break;
+ case 9:
+ halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5f5f5f5f, 0x5f5f5f5f, 0xffffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+VOID
+halbtc8192e1ant_SetFwIgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bEnable
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ if(bEnable)
+ {
+ H2C_Parameter[0] |= BIT0; // function enable
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x63, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_IgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bEnable
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n",
+ (bForceExec? "force to":""), (bEnable? "ON":"OFF")));
+ pCoexDm->bCurIgnoreWlanAct = bEnable;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct));
+
+ if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
+ return;
+ }
+ halbtc8192e1ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable);
+
+ pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct;
+}
+
+VOID
+halbtc8192e1ant_SetFwPstdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte byte1,
+ IN u1Byte byte2,
+ IN u1Byte byte3,
+ IN u1Byte byte4,
+ IN u1Byte byte5
+ )
+{
+ u1Byte H2C_Parameter[5] ={0};
+
+ H2C_Parameter[0] = byte1;
+ H2C_Parameter[1] = byte2;
+ H2C_Parameter[2] = byte3;
+ H2C_Parameter[3] = byte4;
+ H2C_Parameter[4] = byte5;
+
+ pCoexDm->psTdmaPara[0] = byte1;
+ pCoexDm->psTdmaPara[1] = byte2;
+ pCoexDm->psTdmaPara[2] = byte3;
+ pCoexDm->psTdmaPara[3] = byte4;
+ pCoexDm->psTdmaPara[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ H2C_Parameter[0],
+ H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x60, 5, H2C_Parameter);
+}
+
+VOID
+halbtc8192e1ant_SetLpsRpwm(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte lpsVal,
+ IN u1Byte rpwmVal
+ )
+{
+ u1Byte lps=lpsVal;
+ u1Byte rpwm=rpwmVal;
+
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_1ANT_LPS, &lps);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_1ANT_RPWM, &rpwm);
+}
+
+VOID
+halbtc8192e1ant_LpsRpwm(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte lpsVal,
+ IN u1Byte rpwmVal
+ )
+{
+ BOOLEAN bForceExecPwrCmd=FALSE;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set lps/rpwm=0x%x/0x%x \n",
+ (bForceExec? "force to":""), lpsVal, rpwmVal));
+ pCoexDm->curLps = lpsVal;
+ pCoexDm->curRpwm = rpwmVal;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preLps/curLps=0x%x/0x%x, preRpwm/curRpwm=0x%x/0x%x!!\n",
+ pCoexDm->preLps, pCoexDm->curLps, pCoexDm->preRpwm, pCoexDm->curRpwm));
+
+ if( (pCoexDm->preLps == pCoexDm->curLps) &&
+ (pCoexDm->preRpwm == pCoexDm->curRpwm) )
+ {
+ return;
+ }
+ }
+ halbtc8192e1ant_SetLpsRpwm(pBtCoexist, lpsVal, rpwmVal);
+
+ pCoexDm->preLps = pCoexDm->curLps;
+ pCoexDm->preRpwm = pCoexDm->curRpwm;
+}
+
+VOID
+halbtc8192e1ant_SwMechanism1(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bShrinkRxLPF,
+ IN BOOLEAN bLowPenaltyRA,
+ IN BOOLEAN limited_dig,
+ IN BOOLEAN bBTLNAConstrain
+ )
+{
+ //halbtc8192e1ant_RfShrink(pBtCoexist, NORMAL_EXEC, bShrinkRxLPF);
+ //halbtc8192e1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, bLowPenaltyRA);
+
+ //no limited DIG
+ //halbtc8192e1ant_SetBtLnaConstrain(pBtCoexist, NORMAL_EXEC, bBTLNAConstrain);
+}
+
+VOID
+halbtc8192e1ant_SwMechanism2(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bAGCTableShift,
+ IN BOOLEAN bADCBackOff,
+ IN BOOLEAN bSWDACSwing,
+ IN u4Byte dacSwingLvl
+ )
+{
+ //halbtc8192e1ant_AgcTable(pBtCoexist, NORMAL_EXEC, bAGCTableShift);
+ //halbtc8192e1ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, bADCBackOff);
+ //halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, bSWDACSwing, dacSwingLvl);
+}
+
+VOID
+halbtc8192e1ant_PsTdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bTurnOn,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bTurnOnByCnt=FALSE;
+ u1Byte psTdmaTypeByCnt=0, rssiAdjustVal=0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type));
+ pCoexDm->bCurPsTdmaOn = bTurnOn;
+ pCoexDm->curPsTdma = type;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ pCoexDm->prePsTdma, pCoexDm->curPsTdma));
+
+ if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
+ (pCoexDm->prePsTdma == pCoexDm->curPsTdma) )
+ return;
+ }
+ if(bTurnOn)
+ {
+ switch(type)
+ {
+ default:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x2c, 0x03, 0x10, 0x50);
+ break;
+ case 1:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x2c, 0x03, 0x10, 0x50);
+ rssiAdjustVal = 11;
+ break;
+ case 2:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x25, 0x03, 0x10, 0x50);
+ rssiAdjustVal = 14;
+ break;
+ case 3:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x40);
+ break;
+ case 4:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+ rssiAdjustVal = 17;
+ break;
+ case 5:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x61, 0x15, 0x3, 0x31, 0x0);
+ break;
+ case 6:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0x3, 0x0, 0x0);
+ break;
+ case 7:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xc, 0x5, 0x0, 0x0);
+ break;
+ case 8:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+ break;
+ case 9:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x1e, 0x03, 0x10, 0x50);
+ rssiAdjustVal = 18;
+ break;
+ case 10:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0xa, 0x0, 0x40);
+ break;
+ case 11:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x12, 0x03, 0x10, 0x50);
+ rssiAdjustVal = 20;
+ break;
+ case 12:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xeb, 0xa, 0x3, 0x31, 0x18);
+ break;
+
+ case 15:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0x3, 0x8, 0x0);
+ break;
+ case 16:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x10, 0x0);
+ rssiAdjustVal = 18;
+ break;
+
+ case 18:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x0);
+ rssiAdjustVal = 14;
+ break;
+
+ case 20:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0x25, 0x25, 0x0, 0x0);
+ break;
+ case 21:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x20, 0x3, 0x10, 0x40);
+ break;
+ case 22:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0x8, 0x8, 0x0, 0x40);
+ break;
+ case 23:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x3, 0x31, 0x18);
+ rssiAdjustVal = 22;
+ break;
+ case 24:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x15, 0x3, 0x31, 0x18);
+ rssiAdjustVal = 22;
+ break;
+ case 25:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+ rssiAdjustVal = 22;
+ break;
+ case 26:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0x3, 0x31, 0x18);
+ rssiAdjustVal = 22;
+ break;
+ case 27:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x3, 0x31, 0x98);
+ rssiAdjustVal = 22;
+ break;
+ case 28:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x69, 0x25, 0x3, 0x31, 0x0);
+ break;
+ case 29:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xab, 0x1a, 0x1a, 0x1, 0x10);
+ break;
+ case 30:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x14, 0x0);
+ break;
+ case 31:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x1a, 0x1a, 0, 0x58);
+ break;
+ case 32:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xab, 0xa, 0x3, 0x31, 0x90);
+ break;
+ case 33:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xa3, 0x25, 0x3, 0x30, 0x90);
+ break;
+ case 34:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x10);
+ break;
+ case 35:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x0, 0x10);
+ break;
+ case 36:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x12, 0x3, 0x14, 0x50);
+ break;
+ case 37:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x25, 0x3, 0x10, 0x50);
+ break;
+ case 38:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x90);
+ break;
+ }
+ }
+ else
+ {
+ // disable PS tdma
+ switch(type)
+ {
+ case 8: //0x778 = 1, ant2PTA
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x8, 0x0, 0x0, 0x0, 0x0);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x4);
+ break;
+ case 0: //0x778 = 1, ant2BT
+ default:
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ mdelay(5);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20);
+ break;
+ case 9: //0x778 = 1, ant2WIFI
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x4);
+ break;
+ case 10: //0x778 = 3, ant2BT
+ halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+ mdelay(5);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20);
+ break;
+ }
+ }
+ rssiAdjustVal =0;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssiAdjustVal);
+
+ // update pre state
+ pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn;
+ pCoexDm->prePsTdma = pCoexDm->curPsTdma;
+}
+
+VOID
+halbtc8192e1ant_SetSwitchSsType(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte ssType
+ )
+{
+ u1Byte mimoPs=BTC_MIMO_PS_DYNAMIC;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], REAL set SS Type = %d\n", ssType));
+
+ if(ssType == 1)
+ {
+ halbtc8192e1ant_Updatera_mask(pBtCoexist, FORCE_EXEC, BTC_RATE_DISABLE, 0xfff00000); // disable 2ss
+ halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+ // switch ofdm path
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xc04, 0x11);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xd04, 0x1);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x90c, 0x81111111);
+ // switch cck patch
+ pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0xe77, 0x4, 0x1);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xa07, 0x81);
+ mimoPs=BTC_MIMO_PS_STATIC;
+ }
+ else if(ssType == 2)
+ {
+ halbtc8192e1ant_Updatera_mask(pBtCoexist, FORCE_EXEC, BTC_RATE_ENABLE, 0xfff00000); // enable 2ss
+ halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 8);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xc04, 0x33);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xd04, 0x3);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x90c, 0x81121313);
+ pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0xe77, 0x4, 0x0);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0xa07, 0x41);
+ mimoPs=BTC_MIMO_PS_DYNAMIC;
+ }
+
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimoPs); // set rx 1ss or 2ss
+}
+
+VOID
+halbtc8192e1ant_SwitchSsType(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte newSsType
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], %s Switch SS Type = %d\n",
+ (bForceExec? "force to":""), newSsType));
+ pCoexDm->curSsType = newSsType;
+
+ if(!bForceExec)
+ {
+ if(pCoexDm->preSsType == pCoexDm->curSsType)
+ return;
+ }
+ halbtc8192e1ant_SetSwitchSsType(pBtCoexist, pCoexDm->curSsType);
+
+ pCoexDm->preSsType = pCoexDm->curSsType;
+}
+
+VOID
+halbtc8192e1ant_CoexAllOff(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ // sw all off
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+
+ // hw all off
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+}
+
+BOOLEAN
+halbtc8192e1ant_IsCommonAction(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bCommon=FALSE, bWifiConnected=FALSE, bWifiBusy=FALSE;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+
+ if(!bWifiConnected &&
+ BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"));
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ bCommon = true;
+ }
+ else if(bWifiConnected &&
+ (BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT non connected-idle!!\n"));
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ bCommon = true;
+ }
+ else if(!bWifiConnected &&
+ (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"));
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ bCommon = true;
+ }
+ else if(bWifiConnected &&
+ (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n"));
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,true,true,true,true);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ bCommon = true;
+ }
+ else if(!bWifiConnected &&
+ (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ bCommon = true;
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,true,true,true,true);
+
+ bCommon = FALSE;
+ }
+
+ return bCommon;
+}
+
+
+VOID
+halbtc8192e1ant_TdmaDurationAdjustForAcl(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte wifiStatus
+ )
+{
+ static s4Byte up,dn,m,n,WaitCount;
+ s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+ u1Byte retryCount=0, btInfoExt;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], TdmaDurationAdjustForAcl()\n"));
+
+ if( (BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) ||
+ (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifiStatus) ||
+ (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifiStatus) )
+ {
+ if( pCoexDm->curPsTdma != 1 &&
+ pCoexDm->curPsTdma != 2 &&
+ pCoexDm->curPsTdma != 3 &&
+ pCoexDm->curPsTdma != 9 )
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ WaitCount = 0;
+ }
+ return;
+ }
+
+ if(!pCoexDm->bAutoTdmaAdjust)
+ {
+ pCoexDm->bAutoTdmaAdjust = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ //============
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ WaitCount = 0;
+ }
+ else
+ {
+ //acquire the BT TRx retry count from BT_Info byte2
+ retryCount = pCoexSta->btRetryCnt;
+ btInfoExt = pCoexSta->btInfoExt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, WaitCount=%d\n",
+ up, dn, m, n, WaitCount));
+ result = 0;
+ WaitCount++;
+
+ if(retryCount == 0) // no retry in the last 2-second duration
+ {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if(up >= n) // if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+ {
+ WaitCount = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n"));
+ }
+ }
+ else if (retryCount <= 3) // <=3 retry in the last 2-second duration
+ {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) // if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount <= 2)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+ }
+ }
+ else //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount == 1)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+ }
+
+ if(result == -1)
+ {
+ if( (BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(btInfoExt)) &&
+ ((pCoexDm->curPsTdma == 1) ||(pCoexDm->curPsTdma == 2)) )
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ }
+ else if(result == 1)
+ {
+ if( (BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(btInfoExt)) &&
+ ((pCoexDm->curPsTdma == 1) ||(pCoexDm->curPsTdma == 2)) )
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ }
+
+ if( pCoexDm->curPsTdma != 1 &&
+ pCoexDm->curPsTdma != 2 &&
+ pCoexDm->curPsTdma != 9 &&
+ pCoexDm->curPsTdma != 11 )
+ {
+ // recover to previous adjust type
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType);
+ }
+ }
+}
+
+u1Byte
+halbtc8192e1ant_PsTdmaTypeByWifiRssi(
+ IN s4Byte wifiRssi,
+ IN s4Byte preWifiRssi,
+ IN u1Byte wifiRssiThresh
+ )
+{
+ u1Byte psTdmaType=0;
+
+ if(wifiRssi > preWifiRssi)
+ {
+ if(wifiRssi > (wifiRssiThresh+5))
+ {
+ psTdmaType = 26;
+ }
+ else
+ {
+ psTdmaType = 25;
+ }
+ }
+ else
+ {
+ if(wifiRssi > wifiRssiThresh)
+ {
+ psTdmaType = 26;
+ }
+ else
+ {
+ psTdmaType = 25;
+ }
+ }
+
+ return psTdmaType;
+}
+
+VOID
+halbtc8192e1ant_PsTdmaCheckForPowerSaveState(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bNewPsState
+ )
+{
+ u1Byte lpsMode=0x0;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_LPS_MODE, &lpsMode);
+
+ if(lpsMode) // already under LPS state
+ {
+ if(bNewPsState)
+ {
+ // keep state under LPS, do nothing.
+ }
+ else
+ {
+ // will leave LPS state, turn off psTdma first
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+ }
+ else // NO PS state
+ {
+ if(bNewPsState)
+ {
+ // will enter LPS state, turn off psTdma first
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+ else
+ {
+ // keep state under NO PS state, do nothing.
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_PowerSaveState(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte psType,
+ IN u1Byte lpsVal,
+ IN u1Byte rpwmVal
+ )
+{
+ BOOLEAN bLowPwrDisable=FALSE;
+
+ switch(psType)
+ {
+ case BTC_PS_WIFI_NATIVE:
+ // recover to original 32k low power setting
+ bLowPwrDisable = FALSE;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ break;
+ case BTC_PS_LPS_ON:
+ halbtc8192e1ant_PsTdmaCheckForPowerSaveState(pBtCoexist, true);
+ halbtc8192e1ant_LpsRpwm(pBtCoexist, NORMAL_EXEC, lpsVal, rpwmVal);
+ // when coex force to enter LPS, do not enter 32k low power.
+ bLowPwrDisable = true;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+ // power save must executed before psTdma.
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ break;
+ case BTC_PS_LPS_OFF:
+ halbtc8192e1ant_PsTdmaCheckForPowerSaveState(pBtCoexist, FALSE);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+
+VOID
+halbtc8192e1ant_ActionWifiOnly(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+}
+
+VOID
+halbtc8192e1ant_MonitorBtEnableDisable(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static BOOLEAN bPreBtDisabled=FALSE;
+ static u4Byte btDisableCnt=0;
+ BOOLEAN bBtActive=true, bBtDisabled=FALSE;
+
+ // This function check if bt is disabled
+
+ if( pCoexSta->highPriorityTx == 0 &&
+ pCoexSta->highPriorityRx == 0 &&
+ pCoexSta->lowPriorityTx == 0 &&
+ pCoexSta->lowPriorityRx == 0)
+ {
+ bBtActive = FALSE;
+ }
+ if( pCoexSta->highPriorityTx == 0xffff &&
+ pCoexSta->highPriorityRx == 0xffff &&
+ pCoexSta->lowPriorityTx == 0xffff &&
+ pCoexSta->lowPriorityRx == 0xffff)
+ {
+ bBtActive = FALSE;
+ }
+ if(bBtActive)
+ {
+ btDisableCnt = 0;
+ bBtDisabled = FALSE;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+ }
+ else
+ {
+ btDisableCnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n",
+ btDisableCnt));
+ if(btDisableCnt >= 2)
+ {
+ bBtDisabled = true;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+ halbtc8192e1ant_ActionWifiOnly(pBtCoexist);
+ }
+ }
+ if(bPreBtDisabled != bBtDisabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n",
+ (bPreBtDisabled ? "disabled":"enabled"),
+ (bBtDisabled ? "disabled":"enabled")));
+ bPreBtDisabled = bBtDisabled;
+ if(!bBtDisabled)
+ {
+ }
+ else
+ {
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ }
+ }
+}
+
+//=============================================
+//
+// Software Coex Mechanism start
+//
+//=============================================
+
+// SCO only or SCO+PAN(HS)
+VOID
+halbtc8192e1ant_ActionSco(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+
+VOID
+halbtc8192e1ant_ActionHid(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,FALSE,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+//A2DP only / PAN(EDR) only/ A2DP+PAN(HS)
+VOID
+halbtc8192e1ant_ActionA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionA2dpPanHs(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionPanEdr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+
+//PAN(HS) only
+VOID
+halbtc8192e1ant_ActionPanHs(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+//PAN(EDR)+A2DP
+VOID
+halbtc8192e1ant_ActionPanEdrA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionPanEdrHid(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+// HID+A2DP+PAN(EDR)
+VOID
+halbtc8192e1ant_ActionHidA2dpPanEdr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionHidA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+ wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18);
+ }
+ }
+ else
+ {
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18);
+ }
+ else
+ {
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+ }
+ }
+}
+
+//=============================================
+//
+// Non-Software Coex Mechanism start
+//
+//=============================================
+VOID
+halbtc8192e1ant_ActionBtInquiry(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+ // Note:
+ // Do not do DacSwing here, use original setting.
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if(bBtHsOn)
+ return;
+
+ if(!bWifiConnected)
+ {
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ }
+ else if( (pBtLinkInfo->bScoExist) ||
+ (pBtLinkInfo->bHidOnly) )
+ {
+ // SCO/HID-only busy
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 1);
+ }
+ else
+ {
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 30);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionBtScoHidOnlyBusy(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte wifiStatus
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ u1Byte btRssiState=BTC_RSSI_STATE_HIGH;
+
+ if(BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ }
+ else
+ {
+ if(pBtLinkInfo->bHidOnly)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ else
+ {
+ // dec bt power for diff level
+ btRssiState = halbtc8192e1ant_BtRssiState(3, 34, 42);
+ if( (btRssiState == BTC_RSSI_STATE_LOW) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_LOW) )
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ }
+ else if( (btRssiState == BTC_RSSI_STATE_MEDIUM) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_MEDIUM) )
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ else if( (btRssiState == BTC_RSSI_STATE_HIGH) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 6);
+ }
+
+ // sw dacSwing
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 0xc);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionHs(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action for HS!!!\n"));
+
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ if(BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)
+ {
+ // error, should not be here
+ pCoexDm->errorCondition = 1;
+ }
+ else if(BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 6);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 10);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ }
+ else if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus &&
+ !pBtCoexist->bt_link_info.bHidOnly)
+ {
+ if(pCoexDm->curSsType == 1)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 6);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 10);
+ //halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 38);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ }
+ }
+ else
+ {
+ halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionWifiConnectedBtAclBusy(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte wifiStatus
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+
+ if(pBtLinkInfo->bHidOnly)
+ {
+ halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist, wifiStatus);
+ pCoexDm->bAutoTdmaAdjust = FALSE;
+ return;
+ }
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ if( (pBtLinkInfo->bA2dpOnly) ||
+ (pBtLinkInfo->bHidExist&&pBtLinkInfo->bA2dpExist) )
+ {
+ halbtc8192e1ant_TdmaDurationAdjustForAcl(pBtCoexist, wifiStatus);
+ }
+ else if( (pBtLinkInfo->bPanOnly) ||
+ (pBtLinkInfo->bHidExist&&pBtLinkInfo->bPanExist) )
+ {
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->bAutoTdmaAdjust = FALSE;
+ }
+ else
+ {
+ if( (BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) ||
+ (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifiStatus) ||
+ (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifiStatus) )
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ else
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->bAutoTdmaAdjust = FALSE;
+ }
+
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 1);
+}
+
+
+VOID
+halbtc8192e1ant_ActionWifiNotConnected(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // power save state
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+}
+
+VOID
+halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+}
+
+VOID
+halbtc8192e1ant_ActionWifiConnectedScan(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // power save state
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly)
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+ else
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+ }
+ else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+ {
+ halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+ }
+ else
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+}
+
+
+VOID
+halbtc8192e1ant_ActionWifiConnectedSpecialPacket(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // power save state
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly)
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+ else
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT);
+ }
+ else
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+}
+
+VOID
+halbtc8192e1ant_ActionWifiConnected(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bWifiConnected=FALSE, bWifiBusy=FALSE;
+ BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+ BOOLEAN bUnder4way=FALSE;
+ u4Byte wifiBw;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()===>\n"));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(!bWifiConnected)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi not connected<===\n"));
+ return;
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way);
+ if(bUnder4way)
+ {
+ halbtc8192e1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"));
+ return;
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ if(bScan || bLink || bRoam)
+ {
+ halbtc8192e1ant_ActionWifiConnectedScan(pBtCoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"));
+ return;
+ }
+
+ // power save state
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly)
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0);
+ else
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ if(!bWifiBusy)
+ {
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+ }
+ else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+ {
+ halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+ }
+ else
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ }
+ else
+ {
+ if(BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ else if(BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ else if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus)
+ {
+ halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+ }
+ else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+ {
+ halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+ }
+ else
+ {
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+ }
+}
+
+VOID
+halbtc8192e1ant_RunSwCoexistMechanism(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bWifiUnder5G=FALSE, bWifiBusy=FALSE, bWifiConnected=FALSE;
+ u1Byte btInfoOriginal=0, btRetryCnt=0;
+ u1Byte algorithm=0;
+
+ return;
+
+ algorithm = halbtc8192e1ant_ActionAlgorithm(pBtCoexist);
+ pCoexDm->curAlgorithm = algorithm;
+
+ if(halbtc8192e1ant_IsCommonAction(pBtCoexist))
+ {
+ }
+ else
+ {
+ switch(pCoexDm->curAlgorithm)
+ {
+ case BT_8192E_1ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = SCO.\n"));
+ halbtc8192e1ant_ActionSco(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID.\n"));
+ halbtc8192e1ant_ActionHid(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP.\n"));
+ halbtc8192e1ant_ActionA2dp(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP+PAN(HS).\n"));
+ halbtc8192e1ant_ActionA2dpPanHs(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR).\n"));
+ halbtc8192e1ant_ActionPanEdr(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HS mode.\n"));
+ halbtc8192e1ant_ActionPanHs(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN+A2DP.\n"));
+ halbtc8192e1ant_ActionPanEdrA2dp(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR)+HID.\n"));
+ halbtc8192e1ant_ActionPanEdrHid(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP+PAN.\n"));
+ halbtc8192e1ant_ActionHidA2dpPanEdr(pBtCoexist);
+ break;
+ case BT_8192E_1ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP.\n"));
+ halbtc8192e1ant_ActionHidA2dp(pBtCoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = coexist All Off!!\n"));
+ halbtc8192e1ant_CoexAllOff(pBtCoexist);
+ break;
+ }
+ pCoexDm->preAlgorithm = pCoexDm->curAlgorithm;
+ }
+}
+
+VOID
+halbtc8192e1ant_RunCoexistMechanism(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()===>\n"));
+
+ if(pBtCoexist->manual_control)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"));
+ return;
+ }
+
+ if(pBtCoexist->bStopCoexDm)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"));
+ return;
+ }
+
+ if(pCoexSta->bUnderIps)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n"));
+ return;
+ }
+
+ halbtc8192e1ant_RunSwCoexistMechanism(pBtCoexist);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if(pCoexSta->bC2hBtInquiryPage)
+ {
+ halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ }
+
+ // 1ss or 2ss
+ if(pBtLinkInfo->bScoExist)
+ {
+ halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 1);
+ }
+ else if(bBtHsOn)
+ {
+ if(pBtLinkInfo->bHidOnly)
+ halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 1);
+ }
+ else
+ halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 2);
+
+ if(bBtHsOn)
+ {
+ halbtc8192e1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(!bWifiConnected)
+ {
+ BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is non connected-idle !!!\n"));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+ if(bScan || bLink || bRoam)
+ halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist);
+ else
+ halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist);
+ }
+ else
+ {
+ halbtc8192e1ant_ActionWifiConnected(pBtCoexist);
+ }
+}
+
+VOID
+halbtc8192e1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // force to reset coex mechanism
+ halbtc8192e1ant_FwDacSwingLvl(pBtCoexist, FORCE_EXEC, 6);
+ halbtc8192e1ant_DecBtPwr(pBtCoexist, FORCE_EXEC, 0);
+
+ // sw all off
+ halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE);
+ halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18);
+
+ halbtc8192e1ant_SwitchSsType(pBtCoexist, FORCE_EXEC, 2);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 8);
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0);
+}
+
+//============================================================
+// work around function start with wa_halbtc8192e1ant_
+//============================================================
+//============================================================
+// extern function start with EXhalbtc8192e1ant_
+//============================================================
+VOID
+EXhalbtc8192e1ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u4Byte u4Tmp=0;
+ u16 u2Tmp=0;
+ u1Byte u1Tmp=0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n"));
+
+ // backup rf 0x1e value
+ pCoexDm->btRf0x1eBackup =
+ pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+ // antenna sw ctrl to bt
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x4f, 0x6);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x944, 0x24);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x930, 0x700700);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20);
+ if(pBtCoexist->chipInterface == BTC_INTF_USB)
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x64, 0x30430004);
+ else
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x64, 0x30030004);
+
+ halbtc8192e1ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0);
+
+ // antenna switch control parameter
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x858, 0x55555555);
+
+ // coex parameters
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x1);
+ // 0x790[5:0]=0x5
+ u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x790);
+ u1Tmp &= 0xc0;
+ u1Tmp |= 0x5;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x790, u1Tmp);
+
+ // enable counter statistics
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4);
+
+ // enable PTA
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20);
+ // enable mailbox interface
+ u2Tmp = pBtCoexist->btc_read_2byte(pBtCoexist, 0x40);
+ u2Tmp |= BIT9;
+ pBtCoexist->btc_write_2byte(pBtCoexist, 0x40, u2Tmp);
+
+ // enable PTA I2C mailbox
+ u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x101);
+ u1Tmp |= BIT4;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x101, u1Tmp);
+
+ // enable bt clock when wifi is disabled.
+ u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x93);
+ u1Tmp |= BIT0;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x93, u1Tmp);
+ // enable bt clock when suspend.
+ u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x7);
+ u1Tmp |= BIT0;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x7, u1Tmp);
+}
+
+VOID
+EXhalbtc8192e1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+
+ pBtCoexist->bStopCoexDm = FALSE;
+
+ halbtc8192e1ant_InitCoexDm(pBtCoexist);
+}
+
+VOID
+EXhalbtc8192e1ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ struct btc_board_info * pBoardInfo=&pBtCoexist->board_info;
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ pu1Byte cliBuf=pBtCoexist->cli_buf;
+ u1Byte u1Tmp[4], i, btInfoExt, psTdmaCase=0;
+ u4Byte u4Tmp[4];
+ BOOLEAN bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE;
+ BOOLEAN bBtHsOn=FALSE, bWifiBusy=FALSE;
+ s4Byte wifiRssi=0, btHsRssi=0;
+ u4Byte wifiBw, wifiTrafficDir;
+ u1Byte wifiDot11Chnl, wifiHsChnl;
+ u4Byte fwVer=0, btPatchVer=0;
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cliBuf);
+
+ if(pBtCoexist->manual_control)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[Under Manual Control]============");
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+ CL_PRINTF(cliBuf);
+ }
+ if(pBtCoexist->bStopCoexDm)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[Coex is STOPPED]============");
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+ CL_PRINTF(cliBuf);
+ }
+
+ if(!pBoardInfo->bt_exist)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cliBuf);
+ return;
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+ pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", "CoexVer/ FwVer/ PatchVer", \
+ GLCoexVerDate8192e1Ant, GLCoexVer8192e1Ant, fwVer, btPatchVer, btPatchVer);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+ wifiDot11Chnl, wifiHsChnl, bBtHsOn);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+ pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1],
+ pCoexDm->wifiChnlInfo[2]);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+ wifiRssi, btHsRssi);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \
+ bLink, bRoam, bScan);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+ (bWifiUnder5G? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))),
+ ((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+ ((pBtCoexist->btInfo.bBtDisabled)? ("disabled"): ((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)? "non-connected idle":
+ ( (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy")))),
+ pCoexSta->btRssi, pCoexSta->btRetryCnt);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+ pBtLinkInfo->bScoExist, pBtLinkInfo->bHidExist, pBtLinkInfo->bPanExist, pBtLinkInfo->bA2dpExist);
+ CL_PRINTF(cliBuf);
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+ btInfoExt = pCoexSta->btInfoExt;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+ (btInfoExt&BIT0)? "Basic rate":"EDR rate");
+ CL_PRINTF(cliBuf);
+
+ for(i=0; i<BT_INFO_SRC_8192E_1ANT_MAX; i++)
+ {
+ if(pCoexSta->btInfoC2hCnt[i])
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8192e1Ant[i], \
+ pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1],
+ pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3],
+ pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5],
+ pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]);
+ CL_PRINTF(cliBuf);
+ }
+ }
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)", \
+ ((pCoexSta->bUnderIps? "IPS ON":"IPS OFF")),
+ ((pCoexSta->bUnderLps? "LPS ON":"LPS OFF")),
+ pBtCoexist->btInfo.lps1Ant,
+ pBtCoexist->btInfo.rpwm_1ant);
+ CL_PRINTF(cliBuf);
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "SS Type", \
+ pCoexDm->curSsType);
+ CL_PRINTF(cliBuf);
+
+ if(!pBtCoexist->manual_control)
+ {
+ // Sw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d/ %d ", "SM1[ShRf/ LpRA/ LimDig/ btLna]", \
+ pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig, pCoexDm->bCurBtLnaConstrain);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+ pCoexDm->bCurAgcTableEn, pCoexDm->bCurAdcBackOff, pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/ %s/ %d ", "DelBA/ BtCtrlAgg/ AggSize", \
+ (pBtCoexist->btInfo.reject_agg_pkt? "Yes":"No"), (pBtCoexist->btInfo.b_bt_ctrl_agg_buf_size? "Yes":"No"),
+ pBtCoexist->btInfo.aggBufSize);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Rate Mask", \
+ pBtCoexist->btInfo.ra_mask);
+ CL_PRINTF(cliBuf);
+
+ // Fw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+ CL_PRINTF(cliBuf);
+
+ psTdmaCase = pCoexDm->curPsTdma;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", "PS TDMA", \
+ pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
+ pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
+ pCoexDm->psTdmaPara[4], psTdmaCase, pCoexDm->bAutoTdmaAdjust);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Latest error condition(should be 0)", \
+ pCoexDm->errorCondition);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwrLvl/ IgnWlanAct", \
+ pCoexDm->curBtDecPwrLvl, pCoexDm->bCurIgnoreWlanAct);
+ CL_PRINTF(cliBuf);
+ }
+
+ // Hw setting
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+ pCoexDm->btRf0x1eBackup);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc04);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xd04);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x90c);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0xc04/ 0xd04/ 0x90c", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2]);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778", \
+ u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x92c);
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x930);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x92c/ 0x930", \
+ (u1Tmp[0]), u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40);
+ u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4f);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x40/ 0x4f", \
+ u1Tmp[0], u1Tmp[1]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+ u4Tmp[0], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770(hp rx[31:16]/tx[15:0])", \
+ pCoexSta->highPriorityRx, pCoexSta->highPriorityTx);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+ pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx);
+ CL_PRINTF(cliBuf);
+#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 1)
+ halbtc8192e1ant_MonitorBtCtr(pBtCoexist);
+#endif
+
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+VOID
+EXhalbtc8192e1ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ u4Byte u4Tmp=0;
+
+ if(pBtCoexist->manual_control || pBtCoexist->bStopCoexDm)
+ return;
+
+ if(BTC_IPS_ENTER == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+ pCoexSta->bUnderIps = true;
+ halbtc8192e1ant_CoexAllOff(pBtCoexist);
+ }
+ else if(BTC_IPS_LEAVE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+ pCoexSta->bUnderIps = FALSE;
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(pBtCoexist->manual_control || pBtCoexist->bStopCoexDm)
+ return;
+
+ if(BTC_LPS_ENABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+ pCoexSta->bUnderLps = true;
+ }
+ else if(BTC_LPS_DISABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+ pCoexSta->bUnderLps = FALSE;
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+ if(pBtCoexist->manual_control ||
+ pBtCoexist->bStopCoexDm ||
+ pBtCoexist->btInfo.bBtDisabled )
+ return;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if(pCoexSta->bC2hBtInquiryPage)
+ {
+ halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ }
+ else if(bBtHsOn)
+ {
+ halbtc8192e1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(BTC_SCAN_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+ if(!bWifiConnected) // non-connected scan
+ {
+ halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist);
+ }
+ else // wifi is connected
+ {
+ halbtc8192e1ant_ActionWifiConnectedScan(pBtCoexist);
+ }
+ }
+ else if(BTC_SCAN_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+ if(!bWifiConnected) // non-connected scan
+ {
+ halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist);
+ }
+ else
+ {
+ halbtc8192e1ant_ActionWifiConnected(pBtCoexist);
+ }
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+ if(pBtCoexist->manual_control ||
+ pBtCoexist->bStopCoexDm ||
+ pBtCoexist->btInfo.bBtDisabled )
+ return;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if(pCoexSta->bC2hBtInquiryPage)
+ {
+ halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ }
+ else if(bBtHsOn)
+ {
+ halbtc8192e1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+ if(BTC_ASSOCIATE_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+ halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist);
+ }
+ else if(BTC_ASSOCIATE_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(!bWifiConnected) // non-connected scan
+ {
+ halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist);
+ }
+ else
+ {
+ halbtc8192e1ant_ActionWifiConnected(pBtCoexist);
+ }
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ u1Byte H2C_Parameter[3] ={0};
+ u4Byte wifiBw;
+ u1Byte wifiCentralChnl;
+
+ if(pBtCoexist->manual_control ||
+ pBtCoexist->bStopCoexDm ||
+ pBtCoexist->btInfo.bBtDisabled )
+ return;
+
+ if(BTC_MEDIA_CONNECT == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
+ }
+
+ // only 2.4G we need to inform bt the chnl mask
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl);
+ if( (BTC_MEDIA_CONNECT == type) &&
+ (wifiCentralChnl <= 14) )
+ {
+ H2C_Parameter[0] = 0x1;
+ H2C_Parameter[1] = wifiCentralChnl;
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ H2C_Parameter[2] = 0x30;
+ else
+ H2C_Parameter[2] = 0x20;
+ }
+
+ pCoexDm->wifiChnlInfo[0] = H2C_Parameter[0];
+ pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1];
+ pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x66=0x%x\n",
+ H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x66, 3, H2C_Parameter);
+}
+
+VOID
+EXhalbtc8192e1ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ BOOLEAN bBtHsOn=FALSE;
+
+ if(pBtCoexist->manual_control ||
+ pBtCoexist->bStopCoexDm ||
+ pBtCoexist->btInfo.bBtDisabled )
+ return;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if(pCoexSta->bC2hBtInquiryPage)
+ {
+ halbtc8192e1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ }
+ else if(bBtHsOn)
+ {
+ halbtc8192e1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+ if( BTC_PACKET_DHCP == type ||
+ BTC_PACKET_EAPOL == type )
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], special Packet(%d) notify\n", type));
+ halbtc8192e1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ )
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info;
+ u1Byte btInfo=0;
+ u1Byte i, rspSource=0;
+ static u4Byte setBtPsdMode=0;
+ BOOLEAN bBtBusy=FALSE, limited_dig=FALSE;
+ BOOLEAN bWifiConnected=FALSE;
+ BOOLEAN b_bt_ctrl_agg_buf_size=FALSE;
+
+ pCoexSta->bC2hBtInfoReqSent = FALSE;
+
+ rspSource = tmpBuf[0]&0xf;
+ if(rspSource >= BT_INFO_SRC_8192E_1ANT_MAX)
+ rspSource = BT_INFO_SRC_8192E_1ANT_WIFI_FW;
+ pCoexSta->btInfoC2hCnt[rspSource]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length));
+ for(i=0; i<length; i++)
+ {
+ pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
+ if(i == 1)
+ btInfo = tmpBuf[i];
+ if(i == length-1)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i]));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
+ }
+ }
+
+ if(pBtCoexist->btInfo.bBtDisabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for BT is disabled <===\n"));
+ return;
+ }
+
+ if(pBtCoexist->manual_control)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n"));
+ return;
+ }
+ if(pBtCoexist->bStopCoexDm)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Coex STOPPED!!<===\n"));
+ return;
+ }
+
+ if(BT_INFO_SRC_8192E_1ANT_WIFI_FW != rspSource)
+ {
+ pCoexSta->btRetryCnt = // [3:0]
+ pCoexSta->btInfoC2h[rspSource][2]&0xf;
+
+ pCoexSta->btRssi =
+ pCoexSta->btInfoC2h[rspSource][3]*2+10;
+
+ pCoexSta->btInfoExt =
+ pCoexSta->btInfoC2h[rspSource][4];
+
+ // Here we need to resend some wifi info to BT
+ // because bt is reset and loss of the info.
+ if( (pCoexSta->btInfoExt & BIT1) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"));
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(bWifiConnected)
+ {
+ EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_CONNECT);
+ }
+ else
+ {
+ EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+ }
+
+ setBtPsdMode = 0;
+ }
+
+ // test-chip bt patch only rsp the status for BT_RSP,
+ // so temporary we consider the following only under BT_RSP
+ if(BT_INFO_SRC_8192E_1ANT_BT_RSP == rspSource)
+ {
+ if( (pCoexSta->btInfoExt & BIT3) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"));
+ halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+ }
+ else
+ {
+ // BT already NOT ignore Wlan active, do nothing here.
+ }
+#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 0)
+ if( (pCoexSta->btInfoExt & BIT4) )
+ {
+ // BT auto report already enabled, do nothing
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n"));
+ halbtc8192e1ant_BtAutoReport(pBtCoexist, FORCE_EXEC, true);
+ }
+#endif
+ }
+ }
+
+ // check BIT2 first ==> check if bt is under inquiry or page scan
+ if(btInfo & BT_INFO_8192E_1ANT_B_INQ_PAGE)
+ pCoexSta->bC2hBtInquiryPage = true;
+ else
+ pCoexSta->bC2hBtInquiryPage = FALSE;
+
+ // set link exist status
+ if(!(btInfo&BT_INFO_8192E_1ANT_B_CONNECTION))
+ {
+ pCoexSta->bBtLinkExist = FALSE;
+ pCoexSta->bPanExist = FALSE;
+ pCoexSta->bA2dpExist = FALSE;
+ pCoexSta->bHidExist = FALSE;
+ pCoexSta->bScoExist = FALSE;
+ }
+ else // connection exists
+ {
+ pCoexSta->bBtLinkExist = true;
+ if(btInfo & BT_INFO_8192E_1ANT_B_FTP)
+ pCoexSta->bPanExist = true;
+ else
+ pCoexSta->bPanExist = FALSE;
+ if(btInfo & BT_INFO_8192E_1ANT_B_A2DP)
+ pCoexSta->bA2dpExist = true;
+ else
+ pCoexSta->bA2dpExist = FALSE;
+ if(btInfo & BT_INFO_8192E_1ANT_B_HID)
+ pCoexSta->bHidExist = true;
+ else
+ pCoexSta->bHidExist = FALSE;
+ if(btInfo & BT_INFO_8192E_1ANT_B_SCO_ESCO)
+ pCoexSta->bScoExist = true;
+ else
+ pCoexSta->bScoExist = FALSE;
+ }
+
+ halbtc8192e1ant_UpdateBtLinkInfo(pBtCoexist);
+
+ if(!(btInfo&BT_INFO_8192E_1ANT_B_CONNECTION))
+ {
+ pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-connected idle!!!\n"));
+ }
+ else if(btInfo == BT_INFO_8192E_1ANT_B_CONNECTION) // connection exists but no busy
+ {
+ pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt connected-idle!!!\n"));
+ }
+ else if((btInfo&BT_INFO_8192E_1ANT_B_SCO_ESCO) ||
+ (btInfo&BT_INFO_8192E_1ANT_B_SCO_BUSY))
+ {
+ pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt sco busy!!!\n"));
+ }
+ else if( (btInfo&BT_INFO_8192E_1ANT_B_ACL_BUSY) ||
+ (btInfo&BT_INFO_8192E_1ANT_B_A2DP) ||
+ (btInfo&BT_INFO_8192E_1ANT_B_FTP) )
+ {
+ if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY != pCoexDm->btStatus)
+ pCoexDm->bAutoTdmaAdjust = FALSE;
+ pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl busy!!!\n"));
+ }
+ else
+ {
+ pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-defined state!!!\n"));
+ }
+
+ // ra mask check
+ if(pBtLinkInfo->bScoExist || pBtLinkInfo->bHidExist)
+ {
+ halbtc8192e1ant_Updatera_mask(pBtCoexist, NORMAL_EXEC, BTC_RATE_DISABLE, 0x00000003); // disable tx cck 1M/2M
+ }
+ else
+ {
+ halbtc8192e1ant_Updatera_mask(pBtCoexist, NORMAL_EXEC, BTC_RATE_ENABLE, 0x00000003); // enable tx cck 1M/2M
+ }
+
+ if( (BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) ||
+ (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) )
+ {
+ bBtBusy = true;
+ limited_dig = true;
+ if(pBtLinkInfo->bHidExist)
+ b_bt_ctrl_agg_buf_size = true;
+ }
+ else
+ {
+ bBtBusy = FALSE;
+ limited_dig = FALSE;
+ }
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+
+ //============================================
+ // Aggregation related setting
+ //============================================
+ // if sco, reject AddBA
+ //pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &bRejApAggPkt);
+
+ // decide BT control aggregation buf size or not
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, &b_bt_ctrl_agg_buf_size);
+ // real update aggregation setting
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+ //============================================
+
+ pCoexDm->limited_dig = limited_dig;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+ halbtc8192e1ant_RunCoexistMechanism(pBtCoexist);
+}
+
+VOID
+EXhalbtc8192e1ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+ }
+ else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
+
+ pBtCoexist->bStopCoexDm = true;
+ halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x4f, 0xf);
+
+ EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+}
+
+VOID
+EXhalbtc8192e1ant_PnpNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte pnpState
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n"));
+
+ if(BTC_WIFI_PNP_SLEEP == pnpState)
+ {
+ pBtCoexist->bStopCoexDm = true;
+ halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ }
+ else if(BTC_WIFI_PNP_WAKE_UP == pnpState)
+ {
+
+ }
+}
+
+VOID
+EXhalbtc8192e1ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static u1Byte disVerInfoCnt=0;
+ u4Byte fwVer=0, btPatchVer=0;
+ struct btc_board_info * pBoardInfo=&pBtCoexist->board_info;
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ==========================Periodical===========================\n"));
+
+ if(disVerInfoCnt <= 5)
+ {
+ disVerInfoCnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", \
+ pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num, pBoardInfo->btdm_ant_pos));
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], BT stack/ hci ext ver = %s / %d\n", \
+ ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion));
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", \
+ GLCoexVerDate8192e1Ant, GLCoexVer8192e1Ant, fwVer, btPatchVer, btPatchVer));
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
+ }
+#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 0)
+ halbtc8192e1ant_QueryBtInfo(pBtCoexist);
+ halbtc8192e1ant_MonitorBtCtr(pBtCoexist);
+ halbtc8192e1ant_MonitorBtEnableDisable(pBtCoexist);
+#else
+ if( halbtc8192e1ant_IsWifiStatusChanged(pBtCoexist) ||
+ pCoexDm->bAutoTdmaAdjust)
+ {
+ halbtc8192e1ant_RunCoexistMechanism(pBtCoexist);
+ }
+#endif
+}
+
+VOID
+EXhalbtc8192e1ant_DbgControl(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte opCode,
+ IN u1Byte opLen,
+ IN pu1Byte pData
+ )
+{
+ switch(opCode)
+ {
+ case BTC_DBG_SET_COEX_NORMAL:
+ pBtCoexist->manual_control = FALSE;
+ halbtc8192e1ant_InitCoexDm(pBtCoexist);
+ break;
+ case BTC_DBG_SET_COEX_WIFI_ONLY:
+ pBtCoexist->manual_control = true;
+ halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9);
+ break;
+ case BTC_DBG_SET_COEX_BT_ONLY:
+ // todo
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h
new file mode 100644
index 000000000000..a759b758faef
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h
@@ -0,0 +1,226 @@
+//===========================================
+// The following is for 8192E_1ANT BT Co-exist definition
+//===========================================
+#define BT_AUTO_REPORT_ONLY_8192E_1ANT 0
+
+#define BT_INFO_8192E_1ANT_B_FTP BIT7
+#define BT_INFO_8192E_1ANT_B_A2DP BIT6
+#define BT_INFO_8192E_1ANT_B_HID BIT5
+#define BT_INFO_8192E_1ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8192E_1ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8192E_1ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8192E_1ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8192E_1ANT_B_CONNECTION BIT0
+
+#define BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \
+ (((_BT_INFO_EXT_&BIT0))? true:FALSE)
+
+#define BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT 2
+
+typedef enum _BT_INFO_SRC_8192E_1ANT{
+ BT_INFO_SRC_8192E_1ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8192E_1ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8192E_1ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8192E_1ANT_MAX
+}BT_INFO_SRC_8192E_1ANT,*PBT_INFO_SRC_8192E_1ANT;
+
+typedef enum _BT_8192E_1ANT_BT_STATUS{
+ BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8192E_1ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8192E_1ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8192E_1ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8192E_1ANT_BT_STATUS_MAX
+}BT_8192E_1ANT_BT_STATUS,*PBT_8192E_1ANT_BT_STATUS;
+
+typedef enum _BT_8192E_1ANT_WIFI_STATUS{
+ BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN = 0x1,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN = 0x2,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT = 0x3,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE = 0x4,
+ BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY = 0x5,
+ BT_8192E_1ANT_WIFI_STATUS_MAX
+}BT_8192E_1ANT_WIFI_STATUS,*PBT_8192E_1ANT_WIFI_STATUS;
+
+typedef enum _BT_8192E_1ANT_COEX_ALGO{
+ BT_8192E_1ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8192E_1ANT_COEX_ALGO_SCO = 0x1,
+ BT_8192E_1ANT_COEX_ALGO_HID = 0x2,
+ BT_8192E_1ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8192E_1ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8192E_1ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8192E_1ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8192E_1ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8192E_1ANT_COEX_ALGO_MAX = 0xb,
+}BT_8192E_1ANT_COEX_ALGO,*PBT_8192E_1ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8192E_1ANT{
+ // fw mechanism
+ u1Byte preBtDecPwrLvl;
+ u1Byte curBtDecPwrLvl;
+ BOOLEAN bPreBtLnaConstrain;
+ BOOLEAN bCurBtLnaConstrain;
+ u1Byte bPreBtPsdMode;
+ u1Byte bCurBtPsdMode;
+ u1Byte preFwDacSwingLvl;
+ u1Byte curFwDacSwingLvl;
+ BOOLEAN bCurIgnoreWlanAct;
+ BOOLEAN bPreIgnoreWlanAct;
+ u1Byte prePsTdma;
+ u1Byte curPsTdma;
+ u1Byte psTdmaPara[5];
+ u1Byte psTdmaDuAdjType;
+ BOOLEAN bAutoTdmaAdjust;
+ BOOLEAN bPrePsTdmaOn;
+ BOOLEAN bCurPsTdmaOn;
+ BOOLEAN bPreBtAutoReport;
+ BOOLEAN bCurBtAutoReport;
+ u1Byte preLps;
+ u1Byte curLps;
+ u1Byte preRpwm;
+ u1Byte curRpwm;
+
+ // sw mechanism
+ BOOLEAN bPreRfRxLpfShrink;
+ BOOLEAN bCurRfRxLpfShrink;
+ u4Byte btRf0x1eBackup;
+ BOOLEAN bPreLowPenaltyRa;
+ BOOLEAN bCurLowPenaltyRa;
+ BOOLEAN bPreDacSwingOn;
+ u4Byte preDacSwingLvl;
+ BOOLEAN bCurDacSwingOn;
+ u4Byte curDacSwingLvl;
+ BOOLEAN bPreAdcBackOff;
+ BOOLEAN bCurAdcBackOff;
+ BOOLEAN bPreAgcTableEn;
+ BOOLEAN bCurAgcTableEn;
+ u4Byte preVal0x6c0;
+ u4Byte curVal0x6c0;
+ u4Byte preVal0x6c4;
+ u4Byte curVal0x6c4;
+ u4Byte preVal0x6c8;
+ u4Byte curVal0x6c8;
+ u1Byte preVal0x6cc;
+ u1Byte curVal0x6cc;
+ BOOLEAN limited_dig;
+
+ // algorithm related
+ u1Byte preAlgorithm;
+ u1Byte curAlgorithm;
+ u1Byte btStatus;
+ u1Byte wifiChnlInfo[3];
+
+ u1Byte preSsType;
+ u1Byte curSsType;
+
+ u4Byte prera_mask;
+ u4Byte curra_mask;
+
+ u1Byte errorCondition;
+} COEX_DM_8192E_1ANT, *PCOEX_DM_8192E_1ANT;
+
+typedef struct _COEX_STA_8192E_1ANT{
+ BOOLEAN bBtLinkExist;
+ BOOLEAN bScoExist;
+ BOOLEAN bA2dpExist;
+ BOOLEAN bHidExist;
+ BOOLEAN bPanExist;
+
+ BOOLEAN bUnderLps;
+ BOOLEAN bUnderIps;
+ u4Byte highPriorityTx;
+ u4Byte highPriorityRx;
+ u4Byte lowPriorityTx;
+ u4Byte lowPriorityRx;
+ u1Byte btRssi;
+ u1Byte preBtRssiState;
+ u1Byte preWifiRssiState[4];
+ BOOLEAN bC2hBtInfoReqSent;
+ u1Byte btInfoC2h[BT_INFO_SRC_8192E_1ANT_MAX][10];
+ u4Byte btInfoC2hCnt[BT_INFO_SRC_8192E_1ANT_MAX];
+ BOOLEAN bC2hBtInquiryPage;
+ u1Byte btRetryCnt;
+ u1Byte btInfoExt;
+}COEX_STA_8192E_1ANT, *PCOEX_STA_8192E_1ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+VOID
+EXhalbtc8192e1ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8192e1ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8192e1ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ );
+VOID
+EXhalbtc8192e1ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8192e1ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8192e1ant_PnpNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte pnpState
+ );
+VOID
+EXhalbtc8192e1ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8192e1ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8192e1ant_DbgControl(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte opCode,
+ IN u1Byte opLen,
+ IN pu1Byte pData
+ );
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c
new file mode 100644
index 000000000000..115908928ae4
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c
@@ -0,0 +1,4242 @@
+/**************************************************************
+ * Description:
+ *
+ * This file is for RTL8192E Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ **************************************************************/
+
+/**************************************************************
+ * include files
+ **************************************************************/
+#include "halbt_precomp.h"
+#if 1
+/**************************************************************
+ * Global variables, these are static variables
+ **************************************************************/
+static struct coex_dm_8192e_2ant glcoex_dm_8192e_2ant;
+static struct coex_dm_8192e_2ant *coex_dm = &glcoex_dm_8192e_2ant;
+static struct coex_sta_8192e_2ant glcoex_sta_8192e_2ant;
+static struct coex_sta_8192e_2ant *coex_sta = &glcoex_sta_8192e_2ant;
+
+const char *const GLBtInfoSrc8192e2Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+u32 glcoex_ver_date_8192e_2ant = 20130902;
+u32 glcoex_ver_8192e_2ant = 0x34;
+
+/**************************************************************
+ * local function proto type if needed
+ **************************************************************/
+/**************************************************************
+ * local function start with halbtc8192e2ant_
+ **************************************************************/
+u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+ int btrssi=0;
+ u8 btrssi_state = coex_sta->pre_bt_rssi_state;
+
+ btrssi = coex_sta->bt_rssi;
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state=LOW\n");
+ if (btrssi >= (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ btrssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to High\n");
+ } else {
+ btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Low\n");
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state=HIGH\n");
+ if (btrssi < rssi_thresh) {
+ btrssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Low\n");
+ } else {
+ btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi thresh error!!\n");
+ return coex_sta->pre_bt_rssi_state;
+ }
+
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state=LOW\n");
+ if(btrssi >= (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ btrssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Medium\n");
+ } else {
+ btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Low\n");
+ }
+ } else if ((coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi pre state=MEDIUM\n");
+ if (btrssi >= (rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ btrssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to High\n");
+ } else if (btrssi < rssi_thresh) {
+ btrssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Low\n");
+ } else {
+ btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Medium\n");
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state=HIGH\n");
+ if (btrssi < rssi_thresh1) {
+ btrssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Medium\n");
+ } else {
+ btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_bt_rssi_state = btrssi_state;
+
+ return btrssi_state;
+}
+
+u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist * btcoexist, u8 index,
+ u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+ int wifirssi = 0;
+ u8 wifirssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifirssi >= (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ wifirssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to High\n");
+ } else {
+ wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Low\n");
+ }
+ } else {
+ if (wifirssi < rssi_thresh) {
+ wifirssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Low\n");
+ } else {
+ wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI thresh error!!\n");
+ return coex_sta->pre_wifi_rssi_state[index];
+ }
+
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifirssi >= (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ wifirssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Medium\n");
+ } else {
+ wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Low\n");
+ }
+ } else if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (wifirssi >= (rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+ wifirssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to High\n");
+ } else if (wifirssi < rssi_thresh) {
+ wifirssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Low\n");
+ } else {
+ wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Medium\n");
+ }
+ } else {
+ if (wifirssi < rssi_thresh1) {
+ wifirssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Medium\n");
+ } else {
+ wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_wifi_rssi_state[index] = wifirssi_state;
+
+ return wifirssi_state;
+}
+
+void halbtc8192e2ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist)
+{
+ static bool pre_bt_disabled = false;
+ static u32 bt_disable_cnt = 0;
+ bool bt_active = true, bt_disabled = false;
+
+ /* This function check if bt is disabled */
+
+ if (coex_sta->high_priority_tx == 0 &&
+ coex_sta->high_priority_rx == 0 &&
+ coex_sta->low_priority_tx == 0 &&
+ coex_sta->low_priority_rx == 0)
+ bt_active = false;
+
+ if (coex_sta->high_priority_tx == 0xffff &&
+ coex_sta->high_priority_rx == 0xffff &&
+ coex_sta->low_priority_tx == 0xffff &&
+ coex_sta->low_priority_rx == 0xffff)
+ bt_active = false;
+
+ if (bt_active) {
+ bt_disable_cnt = 0;
+ bt_disabled = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
+ } else {
+ bt_disable_cnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters=0, %d times!!\n",
+ bt_disable_cnt);
+ if (bt_disable_cnt >= 2) {
+ bt_disabled = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
+ }
+ }
+ if (pre_bt_disabled != bt_disabled) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled":"enabled"),
+ (bt_disabled ? "disabled":"enabled"));
+ pre_bt_disabled = bt_disabled;
+ }
+}
+
+u32 halbtc8192e2ant_decidera_mask(struct btc_coexist *btcoexist,
+ u8 sstype, u32 ra_masktype)
+{
+ u32 disra_mask = 0x0;
+
+ switch (ra_masktype) {
+ case 0: /* normal mode */
+ if (sstype == 2)
+ disra_mask = 0x0; /* enable 2ss */
+ else
+ disra_mask = 0xfff00000;/* disable 2ss */
+ break;
+ case 1: /* disable cck 1/2 */
+ if(sstype == 2)
+ disra_mask = 0x00000003;/* enable 2ss */
+ else
+ disra_mask = 0xfff00003;/* disable 2ss */
+ break;
+ case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */
+ if(sstype == 2)
+ disra_mask = 0x0001f1f7;/* enable 2ss */
+ else
+ disra_mask = 0xfff1f1f7;/* disable 2ss */
+ break;
+ default:
+ break;
+ }
+
+ return disra_mask;
+}
+
+void halbtc8192e2ant_Updatera_mask(struct btc_coexist *btcoexist,
+ bool force_exec, u32 dis_ratemask)
+{
+ coex_dm->curra_mask = dis_ratemask;
+
+ if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask))
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
+ &coex_dm->curra_mask);
+ coex_dm->prera_mask = coex_dm->curra_mask;
+}
+
+void halbtc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ bool wifi_under_bmode = false;
+
+ coex_dm->cur_arfrtype = type;
+
+ if (force_exec || (coex_dm->pre_arfrtype != coex_dm->cur_arfrtype)) {
+ switch (coex_dm->cur_arfrtype) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_4byte(btcoexist, 0x430,
+ coex_dm->backup_arfr_cnt1);
+ btcoexist->btc_write_4byte(btcoexist, 0x434,
+ coex_dm->backup_arfr_cnt2);
+ break;
+ case 1:
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_BL_WIFI_UNDER_B_MODE,
+ &wifi_under_bmode);
+ if (wifi_under_bmode) {
+ btcoexist->btc_write_4byte(btcoexist, 0x430,
+ 0x0);
+ btcoexist->btc_write_4byte(btcoexist, 0x434,
+ 0x01010101);
+ } else {
+ btcoexist->btc_write_4byte(btcoexist, 0x430,
+ 0x0);
+ btcoexist->btc_write_4byte(btcoexist, 0x434,
+ 0x04030201);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_arfrtype = coex_dm->cur_arfrtype;
+}
+
+void halbtc8192e2ant_retrylimit(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ coex_dm->cur_retrylimit_type = type;
+
+ if (force_exec || (coex_dm->pre_retrylimit_type !=
+ coex_dm->cur_retrylimit_type)) {
+ switch (coex_dm->cur_retrylimit_type) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_2byte(btcoexist, 0x42a,
+ coex_dm->backup_retrylimit);
+ break;
+ case 1: /* retry limit=8 */
+ btcoexist->btc_write_2byte(btcoexist, 0x42a,
+ 0x0808);
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_retrylimit_type = coex_dm->cur_retrylimit_type;
+}
+
+void halbtc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ coex_dm->cur_ampdutime_type = type;
+
+ if (force_exec || (coex_dm->pre_ampdutime_type !=
+ coex_dm->cur_ampdutime_type)) {
+ switch (coex_dm->cur_ampdutime_type) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_1byte(btcoexist, 0x456,
+ coex_dm->backup_ampdu_maxtime);
+ break;
+ case 1: /* AMPDU timw = 0x38 * 32us */
+ btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_ampdutime_type = coex_dm->cur_ampdutime_type;
+}
+
+void halbtc8192e2ant_limited_tx(struct btc_coexist *btcoexist,
+ bool force_exec, u8 ra_masktype, u8 arfr_type,
+ u8 retrylimit_type, u8 ampdutime_type)
+{
+ u32 disra_mask = 0x0;
+
+ coex_dm->curra_masktype = ra_masktype;
+ disra_mask = halbtc8192e2ant_decidera_mask(btcoexist,
+ coex_dm->cur_sstype,
+ ra_masktype);
+ halbtc8192e2ant_Updatera_mask(btcoexist, force_exec, disra_mask);
+
+ halbtc8192e2ant_autorate_fallback_retry(btcoexist, force_exec,
+ arfr_type);
+ halbtc8192e2ant_retrylimit(btcoexist, force_exec, retrylimit_type);
+ halbtc8192e2ant_ampdu_maxtime(btcoexist, force_exec, ampdutime_type);
+}
+
+void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist,
+ bool force_exec, bool rej_ap_agg_pkt,
+ bool b_bt_ctrl_agg_buf_size,
+ u8 agg_buf_size)
+{
+ bool reject_rx_agg = rej_ap_agg_pkt;
+ bool bt_ctrl_rx_agg_size = b_bt_ctrl_agg_buf_size;
+ u8 rx_agg_size = agg_buf_size;
+
+ /*********************************************
+ * Rx Aggregation related setting
+ *********************************************/
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+ &reject_rx_agg);
+ /* decide BT control aggregation buf size or not */
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
+ &bt_ctrl_rx_agg_size);
+ /* aggregation buf size, only work
+ * when BT control Rx aggregation size. */
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size);
+ /* real update aggregation setting */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+
+
+}
+
+void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+ u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+ u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
+
+ reg_hp_txrx = 0x770;
+ reg_lp_txrx = 0x774;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+ reg_hp_tx = u32tmp & MASKLWORD;
+ reg_hp_rx = (u32tmp & MASKHWORD)>>16;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+ reg_lp_tx = u32tmp & MASKLWORD;
+ reg_lp_rx = (u32tmp & MASKHWORD)>>16;
+
+ coex_sta->high_priority_tx = reg_hp_tx;
+ coex_sta->high_priority_rx = reg_hp_rx;
+ coex_sta->low_priority_tx = reg_lp_tx;
+ coex_sta->low_priority_rx = reg_lp_rx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex] High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex] Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+
+ /* reset counter */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
+{
+ u8 h2c_parameter[1] ={0};
+
+ coex_sta->c2h_bt_info_req_sent = true;
+
+ h2c_parameter[0] |= BIT0; /* trigger */
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n",
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
+}
+
+bool halbtc8192e2ant_iswifi_status_changed(struct btc_coexist *btcoexist)
+{
+ static bool pre_wifi_busy = false;
+ static bool pre_under_4way = false, pre_bt_hson = false;
+ bool wifi_busy = false, under_4way = false, bt_hson = false;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
+ if (wifi_connected) {
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ return true;
+ }
+ if (under_4way != pre_under_4way) {
+ pre_under_4way = under_4way;
+ return true;
+ }
+ if (bt_hson != pre_bt_hson) {
+ pre_bt_hson = bt_hson;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hson = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+
+ bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+ bt_link_info->sco_exist = coex_sta->sco_exist;
+ bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+ bt_link_info->pan_exist = coex_sta->pan_exist;
+ bt_link_info->hid_exist = coex_sta->hid_exist;
+
+ /* work around for HS mode. */
+ if (bt_hson) {
+ bt_link_info->pan_exist = true;
+ bt_link_info->bt_link_exist = true;
+ }
+
+ /* check if Sco only */
+ if (bt_link_info->sco_exist &&
+ !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist &&
+ !bt_link_info->hid_exist)
+ bt_link_info->sco_only = true;
+ else
+ bt_link_info->sco_only = false;
+
+ /* check if A2dp only */
+ if (!bt_link_info->sco_exist &&
+ bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist &&
+ !bt_link_info->hid_exist)
+ bt_link_info->a2dp_only = true;
+ else
+ bt_link_info->a2dp_only = false;
+
+ /* check if Pan only */
+ if (!bt_link_info->sco_exist &&
+ !bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist &&
+ !bt_link_info->hid_exist)
+ bt_link_info->pan_only = true;
+ else
+ bt_link_info->pan_only = false;
+
+ /* check if Hid only */
+ if (!bt_link_info->sco_exist &&
+ !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist &&
+ bt_link_info->hid_exist)
+ bt_link_info->hid_only = true;
+ else
+ bt_link_info->hid_only = false;
+}
+
+u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ bool bt_hson=false;
+ u8 algorithm = BT_8192E_2ANT_COEX_ALGO_UNDEFINED;
+ u8 numOfDiffProfile = 0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+
+ if (!bt_link_info->bt_link_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "No BT link exists!!!\n");
+ return algorithm;
+ }
+
+ if (bt_link_info->sco_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->hid_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->pan_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->a2dp_exist)
+ numOfDiffProfile++;
+
+ if (numOfDiffProfile == 1) {
+ if (bt_link_info->sco_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO only\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+ } else {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID only\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "A2DP only\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "PAN(HS) only\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "PAN(EDR) only\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 2) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + HID\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + A2DP ==> SCO\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + PAN(HS)\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ if (stack_info->num_of_hid >= 2) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID*2 + A2DP\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID + A2DP\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
+ }
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID + PAN(HS)\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + HID + A2DP ==> HID\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + HID + PAN(HS)\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + HID + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + A2DP + PAN(HS)\n");
+ algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO + A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID + A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "HID + A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile >= 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hson) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "ErrorSCO+HID+A2DP+PAN(HS)\n");
+
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "SCO+HID+A2DP+PAN(EDR)\n");
+ algorithm =
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
+ u8 dac_swinglvl)
+{
+ u8 h2c_parameter[1] ={0};
+
+ /* There are several type of dacswing
+ * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
+ h2c_parameter[0] = dac_swinglvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swinglvl);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
+ u8 dec_btpwr_lvl)
+{
+ u8 h2c_parameter[1] ={0};
+
+ h2c_parameter[0] = dec_btpwr_lvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex] decrease Bt Power level = %d, FW write 0x62=0x%x\n",
+ dec_btpwr_lvl, h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
+ bool force_exec, u8 dec_btpwr_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power level = %d\n",
+ (force_exec? "force to":""), dec_btpwr_lvl);
+ coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ }
+ halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+ coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
+ bool enable_autoreport)
+{
+ u8 h2c_parameter[1] ={0};
+
+ h2c_parameter[0] = 0;
+
+ if (enable_autoreport)
+ h2c_parameter[0] |= BIT0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+ (enable_autoreport? "Enabled!!":"Disabled!!"),
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable_autoreport)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec? "force to":""),
+ ((enable_autoreport)? "Enabled":"Disabled"));
+ coex_dm->cur_bt_auto_report = enable_autoreport;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
+
+ if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+ return;
+ }
+ halbtc8192e2ant_set_bt_autoreport(btcoexist,
+ coex_dm->cur_bt_auto_report);
+
+ coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
+ bool force_exec, u8 fw_dac_swinglvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec? "force to":""), fw_dac_swinglvl);
+ coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ if (coex_dm->pre_fw_dac_swing_lvl ==
+ coex_dm->cur_fw_dac_swing_lvl)
+ return;
+ }
+
+ halbtc8192e2ant_setfw_dac_swinglevel(btcoexist,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void halbtc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+ bool rx_rf_shrink_on)
+{
+ if (rx_rf_shrink_on) {
+ /* Shrink RF Rx LPF corner */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff, 0xffffc);
+ } else {
+ /* Resume RF Rx LPF corner
+ * After initialized, we can use coex_dm->btRf0x1eBackup */
+ if (btcoexist->initilized) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff,
+ coex_dm->bt_rf0x1e_backup);
+ }
+ }
+}
+
+void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
+ bool force_exec, bool rx_rf_shrink_on)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec? "force to":""), ((rx_rf_shrink_on)? "ON":"OFF"));
+ coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ if (coex_dm->pre_rf_rx_lpf_shrink ==
+ coex_dm->cur_rf_rx_lpf_shrink)
+ return;
+ }
+ halbtc8192e2ant_set_sw_rf_rx_lpf_corner(btcoexist,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void halbtc8192e2ant_set_sw_penalty_tx_rateadaptive(
+ struct btc_coexist *btcoexist,
+ bool low_penalty_ra)
+{
+ u8 h2c_parameter[6] ={0};
+
+ h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty */
+
+ if (low_penalty_ra) {
+ h2c_parameter[1] |= BIT0;
+ /* normal rate except MCS7/6/5, OFDM54/48/36 */
+ h2c_parameter[2] = 0x00;
+ h2c_parameter[3] = 0xf7; /* MCS7 or OFDM54 */
+ h2c_parameter[4] = 0xf8; /* MCS6 or OFDM48 */
+ h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra? "ON!!":"OFF!!"));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+void halbtc8192e2ant_low_penalty_ra(struct btc_coexist *btcoexist,
+ bool force_exec, bool low_penalty_ra)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec? "force to":""), ((low_penalty_ra)? "ON":"OFF"));
+ coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex] bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
+
+ if (coex_dm->pre_low_penalty_ra ==
+ coex_dm->cur_low_penalty_ra)
+ return;
+ }
+ halbtc8192e2ant_set_sw_penalty_tx_rateadaptive(btcoexist,
+ coex_dm->cur_low_penalty_ra);
+
+ coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
+ u32 level)
+{
+ u8 val = (u8)level;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+void halbtc8192e2ant_setsw_fulltime_dacswing(struct btc_coexist *btcoexist,
+ bool sw_dac_swingon,
+ u32 sw_dac_swinglvl)
+{
+ if (sw_dac_swingon)
+ halbtc8192e2ant_set_dac_swingreg(btcoexist, sw_dac_swinglvl);
+ else
+ halbtc8192e2ant_set_dac_swingreg(btcoexist, 0x18);
+}
+
+
+void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
+ bool force_exec, bool dac_swingon,
+ u32 dac_swinglvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swinglvl=0x%x\n",
+ (force_exec? "force to":""),
+ ((dac_swingon)? "ON":"OFF"), dac_swinglvl);
+ coex_dm->cur_dac_swing_on = dac_swingon;
+ coex_dm->cur_dac_swing_lvl = dac_swinglvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, ",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
+
+ if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+ (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+ return;
+ }
+ mdelay(30);
+ halbtc8192e2ant_setsw_fulltime_dacswing(btcoexist, dac_swingon,
+ dac_swinglvl);
+
+ coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+ coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void halbtc8192e2ant_set_adc_backoff(struct btc_coexist *btcoexist,
+ bool adc_backoff)
+{
+ if(adc_backoff)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level On!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x3);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level Off!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x1);
+ }
+}
+
+void halbtc8192e2ant_adc_backoff(struct btc_coexist *btcoexist,
+ bool force_exec, bool adc_backoff)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn AdcBackOff = %s\n",
+ (force_exec? "force to":""), ((adc_backoff)? "ON":"OFF"));
+ coex_dm->cur_adc_back_off = adc_backoff;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+ coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off);
+
+ if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
+ return;
+ }
+ halbtc8192e2ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_back_off);
+
+ coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off;
+}
+
+void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
+ bool agc_table_en)
+{
+
+ /* BB AGC Gain Table */
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table On!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x071D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table Off!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
+ }
+}
+
+void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
+ bool force_exec, bool agc_table_en)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec? "force to":""),
+ ((agc_table_en)? "Enable":"Disable"));
+ coex_dm->cur_agc_table_en = agc_table_en;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+
+ if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+ return;
+ }
+ halbtc8192e2ant_set_agc_table(btcoexist, agc_table_en);
+
+ coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist, bool force_exec,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x, ",
+ (force_exec? "force to":""), val0x6c0);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ val0x6c4, val0x6c8, val0x6cc);
+ coex_dm->cur_val0x6c0 = val0x6c0;
+ coex_dm->cur_val0x6c4 = val0x6c4;
+ coex_dm->cur_val0x6c8 = val0x6c8;
+ coex_dm->cur_val0x6cc = val0x6cc;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, ",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, \n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+
+ if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+ (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+ (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+ (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+ return;
+ }
+ halbtc8192e2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
+
+ coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+ coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+ coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+ coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void halbtc8192e2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ switch (type) {
+ case 0:
+ halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 1:
+ halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 2:
+ halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5ffb5ffb, 0xffffff, 0x3);
+ break;
+ case 3:
+ halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
+ 0x5fdb5fdb, 0xffffff, 0x3);
+ break;
+ case 4:
+ halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
+ 0x5ffb5ffb, 0xffffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
+ bool enable)
+{
+ u8 h2c_parameter[1] ={0};
+
+ if (enable)
+ h2c_parameter[0] |= BIT0; /* function enable */
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec? "force to":""), (enable? "ON":"OFF"));
+ coex_dm->cur_ignore_wlan_act = enable;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d ",
+ coex_dm->pre_ignore_wlan_act);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->cur_ignore_wlan_act);
+
+ if (coex_dm->pre_ignore_wlan_act ==
+ coex_dm->cur_ignore_wlan_act)
+ return;
+ }
+ halbtc8192e2ant_set_fw_ignore_wlanact(btcoexist, enable);
+
+ coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
+ u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+ u8 h2c_parameter[5] ={0};
+
+ h2c_parameter[0] = byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = byte5;
+
+ coex_dm->ps_tdma_para[0] = byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void halbtc8192e2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+ bool shrink_rx_lpf, bool low_penalty_ra,
+ bool limited_dig, bool btlan_constrain)
+{
+ halbtc8192e2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+}
+
+void halbtc8192e2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+ bool agc_table_shift, bool adc_backoff,
+ bool sw_dac_swing, u32 dac_swinglvl)
+{
+ halbtc8192e2ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift);
+ halbtc8192e2ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+ dac_swinglvl);
+}
+
+void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
+ bool force_exec, bool turn_on, u8 type)
+{
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec? "force to":""), (turn_on? "ON":"OFF"), type);
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+
+ if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+ (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+ return;
+ }
+ if (turn_on) {
+ switch (type) {
+ case 1:
+ default:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ case 2:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x90);
+ break;
+ case 3:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1, 0x90);
+ break;
+ case 4:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10,
+ 0x3, 0xf1, 0x90);
+ break;
+ case 5:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0x60, 0x90);
+ break;
+ case 6:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0x60, 0x90);
+ break;
+ case 7:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0x70, 0x90);
+ break;
+ case 8:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xa3, 0x10,
+ 0x3, 0x70, 0x90);
+ break;
+ case 9:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x10);
+ break;
+ case 10:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x10);
+ break;
+ case 11:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1, 0x10);
+ break;
+ case 12:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10,
+ 0x3, 0xf1, 0x10);
+ break;
+ case 13:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe0, 0x10);
+ break;
+ case 14:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe0, 0x10);
+ break;
+ case 15:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf0, 0x10);
+ break;
+ case 16:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ 0x3, 0xf0, 0x10);
+ break;
+ case 17:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0x61, 0x20,
+ 0x03, 0x10, 0x10);
+ break;
+ case 18:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0xe1, 0x90);
+ break;
+ case 19:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0xe1, 0x90);
+ break;
+ case 20:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0x60, 0x90);
+ break;
+ case 21:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x15,
+ 0x03, 0x70, 0x90);
+ break;
+ case 71:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ }
+ } else {
+ /* disable PS tdma */
+ switch (type) {
+ default:
+ case 0:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0,
+ 0x0, 0x0);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4);
+ break;
+ case 1:
+ halbtc8192e2ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x8, 0x0);
+ mdelay(5);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+ break;
+ }
+ }
+
+ /* update pre state */
+ coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+ coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, u8 sstype)
+{
+ u8 mimops = BTC_MIMO_PS_DYNAMIC;
+ u32 disra_mask = 0x0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], REAL set SS Type = %d\n", sstype);
+
+ disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
+ coex_dm->curra_masktype);
+ halbtc8192e2ant_Updatera_mask(btcoexist, FORCE_EXEC, disra_mask);
+
+ if (sstype == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+ /* switch ofdm path */
+ btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x11);
+ btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x1);
+ btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81111111);
+ /* switch cck patch */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x1);
+ btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x81);
+ mimops=BTC_MIMO_PS_STATIC;
+ } else if (sstype == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
+ btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x33);
+ btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x3);
+ btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81121313);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x0);
+ btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x41);
+ mimops=BTC_MIMO_PS_DYNAMIC;
+ }
+ /* set rx 1ss or 2ss */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimops);
+}
+
+void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
+ bool force_exec, u8 new_sstype)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], %s Switch SS Type = %d\n",
+ (force_exec? "force to":""), new_sstype);
+ coex_dm->cur_sstype = new_sstype;
+
+ if (!force_exec) {
+ if (coex_dm->pre_sstype == coex_dm->cur_sstype)
+ return;
+ }
+ halbtc8192e2ant_set_switch_sstype(btcoexist, coex_dm->cur_sstype);
+
+ coex_dm->pre_sstype = coex_dm->cur_sstype;
+}
+
+void halbtc8192e2ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+ /* fw all off */
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+ /* sw all off */
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+ /* hw all off */
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+void halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ /* force to reset coex mechanism */
+
+ halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, FORCE_EXEC, 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, FORCE_EXEC, 0);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+ halbtc8192e2ant_switch_sstype(btcoexist, FORCE_EXEC, 2);
+
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+void halbtc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+ bool low_pwr_disable = true;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool common = false, wifi_connected = false, wifi_busy = false;
+ bool bt_hson = false, low_pwr_disable = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ if (bt_link_info->sco_exist || bt_link_info->hid_exist)
+ halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 0, 0, 0);
+ else
+ halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+
+ if (!wifi_connected) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi non-connected idle!!\n");
+
+ if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) ||
+ (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status)) {
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+ 2);
+ halbtc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 0);
+ } else {
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+ 1);
+ halbtc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 1);
+ }
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false,
+ false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false,
+ 0x18);
+
+ common = true;
+ } else {
+ if (BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Wifi connected + BT non connected-idle!!\n");
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+ 2);
+ halbtc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 0);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC,
+ 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ common = true;
+ } else if (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (bt_hson)
+ return false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Wifi connected + BT connected-idle!!\n");
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC,
+ 2);
+ halbtc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 0);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC,
+ 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ common = true;
+ } else {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (wifi_busy) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Wifi Connected-Busy + BT Busy!!\n");
+ common = false;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Wifi Connected-Idle + BT Busy!!\n");
+
+ halbtc8192e2ant_switch_sstype(btcoexist,
+ NORMAL_EXEC, 1);
+ halbtc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC,
+ 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 21);
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist,
+ NORMAL_EXEC, 6);
+ halbtc8192e2ant_dec_btpwr(btcoexist,
+ NORMAL_EXEC, 0);
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false,
+ false, false,
+ false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false,
+ false, false,
+ 0x18);
+ common = true;
+ }
+ }
+ }
+ return common;
+}
+
+void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+ bool sco_hid, bool tx_pause,
+ u8 max_interval)
+{
+ static int up, dn, m, n, wait_cnt;
+ /* 0: no change, +1: increase WiFi duration,
+ * -1: decrease WiFi duration */
+ int result;
+ u8 retry_cnt = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
+
+ if (!coex_dm->auto_tdma_adjust) {
+ coex_dm->auto_tdma_adjust = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ if (sco_hid) {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type = 13;
+ } else if (max_interval == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (max_interval == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ }
+ } else {
+ if (max_interval == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (max_interval == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (max_interval == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ }
+ }
+ } else {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (max_interval == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (max_interval == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ }
+ } else {
+ if (max_interval == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type = 1;
+ } else if (max_interval == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (max_interval == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ }
+ }
+ }
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ wait_cnt = 0;
+ } else {
+ /* acquire the BT TRx retry count from BT_Info byte2 */
+ retry_cnt = coex_sta->bt_retry_cnt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retry_cnt = %d\n", retry_cnt);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
+ up, dn, m, n, wait_cnt);
+ result = 0;
+ wait_cnt++;
+ /* no retry in the last 2-second duration */
+ if (retry_cnt == 0) {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) {
+ wait_cnt = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex]Increase wifi duration!!\n");
+ }
+ } else if (retry_cnt <= 3) {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) {
+ if (wait_cnt <= 2)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_cnt = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "Reduce wifi duration for retry<3\n");
+ }
+ } else {
+ if (wait_cnt == 1)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ wait_cnt = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "Decrease wifi duration for retryCounter>3!!\n");
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
+ if (max_interval == 1) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+
+ if (coex_dm->cur_ps_tdma == 71) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type = 13;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type =
+ 5;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type =
+ 13;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 71);
+ coex_dm->ps_tdma_du_adj_type = 71;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 71) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 71);
+ coex_dm->ps_tdma_du_adj_type =
+ 71;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type =
+ 9;
+ }
+ }
+ }
+ } else if (max_interval == 2) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if(coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if(coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if(coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ }
+ }
+ }
+ } else if (max_interval == 3) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8192e2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ }
+ }
+ }
+ }
+ }
+
+ /* if current PsTdma not match with
+ * the recorded one (when scan, dhcp...),
+ * then we have to adjust it back to the previous record one. */
+ if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
+ bool scan = false, link = false, roam = false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, " );
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if ( !scan && !link && !roam)
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true,
+ coex_dm->ps_tdma_du_adj_type);
+ else
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ }
+}
+
+/* SCO only or SCO+PAN(HS) */
+void halbtc8192e2ant_action_sco(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state=BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
+void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+ bool long_dist = false;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW ||
+ btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
+ (wifirssi_state == BTC_RSSI_STATE_LOW ||
+ wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
+ long_dist = true;
+ }
+ if (long_dist) {
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true,
+ 0x4);
+ } else {
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
+ 0x8);
+ }
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (long_dist)
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ else
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+
+ if (long_dist) {
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 17);
+ coex_dm->auto_tdma_adjust = false;
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ } else {
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
+ true, 1);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
+ false, 1);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
+ false, 1);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ }
+ }
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 2);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+ 2);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+ 2);
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ }
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x6);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x6);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x6);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+ }
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* PAN(HS) only */
+void halbtc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ }
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* PAN(EDR)+A2DP */
+void halbtc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state=BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+ 3);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false,
+ 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* HID+A2DP+PAN(EDR) */
+void halbtc8192e2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_bw;
+
+ wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+
+ halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+
+ if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+ } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
+ (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+ u8 algorithm = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], return for Manual CTRL <===\n");
+ return;
+ }
+
+ if (coex_sta->under_ips) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
+ return;
+ }
+
+ algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
+ if (coex_sta->c2h_bt_inquiry_page &&
+ (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
+ halbtc8192e2ant_action_bt_inquiry(btcoexist);
+ return;
+ }
+
+ coex_dm->cur_algorithm = algorithm;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Algorithm = %d \n", coex_dm->cur_algorithm);
+
+ if (halbtc8192e2ant_is_common_action(btcoexist)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common.\n");
+ coex_dm->auto_tdma_adjust = false;
+ } else {
+ if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
+ coex_dm->auto_tdma_adjust = false;
+ }
+ switch (coex_dm->cur_algorithm) {
+ case BT_8192E_2ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = SCO.\n");
+ halbtc8192e2ant_action_sco(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = SCO+PAN(EDR).\n");
+ halbtc8192e2ant_action_sco_pan(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID.\n");
+ halbtc8192e2ant_action_hid(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = A2DP.\n");
+ halbtc8192e2ant_action_a2dp(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
+ halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN(EDR).\n");
+ halbtc8192e2ant_action_pan_edr(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = HS mode.\n");
+ halbtc8192e2ant_action_pan_hs(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN+A2DP.\n");
+ halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
+ halbtc8192e2ant_action_pan_edr_hid(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
+ halbtc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
+ break;
+ case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID+A2DP.\n");
+ halbtc8192e2ant_action_hid_a2dp(btcoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "Action 2-Ant, algorithm = unknown!!\n");
+ /* halbtc8192e2ant_coex_alloff(btcoexist); */
+ break;
+ }
+ coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+ }
+}
+
+void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, bool backup)
+{
+ u16 u16tmp = 0;
+ u8 u8tmp = 0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
+
+ if (backup) {
+ /* backup rf 0x1e value */
+ coex_dm->bt_rf0x1e_backup =
+ btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A,
+ 0x1e, 0xfffff);
+
+ coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
+ 0x430);
+ coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist,
+ 0x434);
+ coex_dm->backup_retrylimit = btcoexist->btc_read_2byte(
+ btcoexist,
+ 0x42a);
+ coex_dm->backup_ampdu_maxtime = btcoexist->btc_read_1byte(
+ btcoexist,
+ 0x456);
+ }
+
+ /* antenna sw ctrl to bt */
+ btcoexist->btc_write_1byte(btcoexist, 0x4f, 0x6);
+ btcoexist->btc_write_1byte(btcoexist, 0x944, 0x24);
+ btcoexist->btc_write_4byte(btcoexist, 0x930, 0x700700);
+ btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
+ if (btcoexist->chip_interface == BTC_INTF_USB)
+ btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30430004);
+ else
+ btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30030004);
+
+ halbtc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+ /* antenna switch control parameter */
+ btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555);
+
+ /* coex parameters */
+ btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
+ /* 0x790[5:0]=0x5 */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+ u8tmp &= 0xc0;
+ u8tmp |= 0x5;
+ btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+ /* enable counter statistics */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+
+ /* enable PTA */
+ btcoexist->btc_write_1byte(btcoexist, 0x40, 0x20);
+ /* enable mailbox interface */
+ u16tmp = btcoexist->btc_read_2byte(btcoexist, 0x40);
+ u16tmp |= BIT9;
+ btcoexist->btc_write_2byte(btcoexist, 0x40, u16tmp);
+
+ /* enable PTA I2C mailbox */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x101);
+ u8tmp |= BIT4;
+ btcoexist->btc_write_1byte(btcoexist, 0x101, u8tmp);
+
+ /* enable bt clock when wifi is disabled. */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x93);
+ u8tmp |= BIT0;
+ btcoexist->btc_write_1byte(btcoexist, 0x93, u8tmp);
+ /* enable bt clock when suspend. */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x7);
+ u8tmp |= BIT0;
+ btcoexist->btc_write_1byte(btcoexist, 0x7, u8tmp);
+}
+
+/*************************************************************
+ * work around function start with wa_halbtc8192e2ant_
+ *************************************************************/
+
+/************************************************************
+ * extern function start with EXhalbtc8192e2ant_
+ ************************************************************/
+
+void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+ halbtc8192e2ant_init_hwconfig(btcoexist, true);
+}
+
+void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
+ halbtc8192e2ant_init_coex_dm(btcoexist);
+}
+
+void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info*stack_info = &btcoexist->stack_info;
+ u8 *cli_buf = btcoexist->cli_buf;
+ u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
+ u16 u16tmp[4];
+ u32 u32tmp[4];
+ bool roam = false, scan = false, link = false, wifi_under_5g = false;
+ bool bt_hson = false, wifi_busy = false;
+ int wifirssi = 0, bt_hs_rssi = 0;
+ u32 wifi_bw, wifi_traffic_dir;
+ u8 wifi_dot11_chnl, wifi_hs_chnl;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cli_buf);
+
+ if (btcoexist->manual_control) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ===========[Under Manual Control]===========");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+
+ if (!board_info->bt_exist) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cli_buf);
+ return;
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+ &wifi_dot11_chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsMode(HsChnl)",
+ wifi_dot11_chnl, bt_hson, wifi_hs_chnl);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0],
+ coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ &wifi_traffic_dir);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
+ (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
+ ((!wifi_busy) ? "idle" :
+ ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
+ "uplink" : "downlink")));
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ",
+ "BT [status/ rssi/ retryCnt]",
+ ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
+ ((coex_sta->c2h_bt_inquiry_page) ?
+ ("inquiry/page scan") :
+ ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) ? "non-connected idle" :
+ ((BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status) ? "connected-idle" : "busy")))),
+ coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP", stack_info->sco_exist,
+ stack_info->hid_exist, stack_info->pan_exist,
+ stack_info->a2dp_exist);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
+ CL_PRINTF(cli_buf);
+
+ for (i=0; i<BT_INFO_SRC_8192E_2ANT_MAX; i++) {
+ if (coex_sta->bt_info_c2h_cnt[i]) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x ",
+ GLBtInfoSrc8192e2Ant[i],
+ coex_sta->bt_info_c2h[i][0],
+ coex_sta->bt_info_c2h[i][1],
+ coex_sta->bt_info_c2h[i][2],
+ coex_sta->bt_info_c2h[i][3]);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "%02x %02x %02x(%d)",
+ coex_sta->bt_info_c2h[i][4],
+ coex_sta->bt_info_c2h[i][5],
+ coex_sta->bt_info_c2h[i][6],
+ coex_sta->bt_info_c2h_cnt[i]);
+ CL_PRINTF(cli_buf);
+ }
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "SS Type",
+ coex_dm->cur_sstype);
+ CL_PRINTF(cli_buf);
+
+ /* Sw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Sw mechanism]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Rate Mask",
+ btcoexist->bt_info.ra_mask);
+ CL_PRINTF(cli_buf);
+
+ /* Fw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Fw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ ps_tdma_case = coex_dm->cur_ps_tdma;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para[0],
+ coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+ coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct",
+ coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+ CL_PRINTF(cli_buf);
+
+ /* Hw setting */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Hw setting]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+ coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit,
+ coex_dm->backup_ampdu_maxtime);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
+ u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "0x430/0x434/0x42a/0x456",
+ u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778",
+ u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)",
+ u32tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x770(hp rx[31:16]/tx[15:0])",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x774(lp rx[31:16]/tx[15:0])",
+ coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+ CL_PRINTF(cli_buf);
+#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 1)
+ halbtc8192e2ant_monitor_bt_ctr(btcoexist);
+#endif
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_IPS_ENTER == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
+ coex_sta->under_ips = true;
+ halbtc8192e2ant_coex_alloff(btcoexist);
+ } else if (BTC_IPS_LEAVE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
+ coex_sta->under_ips = false;
+ }
+}
+
+void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_LPS_ENABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
+ coex_sta->under_lps = true;
+ } else if (BTC_LPS_DISABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
+ coex_sta->under_lps = false;
+ }
+}
+
+void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_SCAN_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
+ else if(BTC_SCAN_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
+}
+
+void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_ASSOCIATE_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
+ else if(BTC_ASSOCIATE_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
+}
+
+void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ u8 h2c_parameter[3] ={0};
+ u32 wifi_bw;
+ u8 wifi_center_chnl;
+
+ if (btcoexist->manual_control ||
+ btcoexist->stop_coex_dm ||
+ btcoexist->bt_info.bt_disabled)
+ return;
+
+ if (BTC_MEDIA_CONNECT == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
+
+ /* only 2.4G we need to inform bt the chnl mask */
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
+ &wifi_center_chnl);
+ if ((BTC_MEDIA_CONNECT == type) &&
+ (wifi_center_chnl <= 14)) {
+ h2c_parameter[0] = 0x1;
+ h2c_parameter[1] = wifi_center_chnl;
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
+
+ coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+ coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+ coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ if (type == BTC_PACKET_DHCP)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
+ }
+
+void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length )
+{
+ u8 bt_info = 0;
+ u8 i, rspSource = 0;
+ bool bt_busy = false, limited_dig = false;
+ bool wifi_connected = false;
+
+ coex_sta->c2h_bt_info_req_sent = false;
+
+ rspSource = tmp_buf[0] & 0xf;
+ if (rspSource >= BT_INFO_SRC_8192E_2ANT_MAX)
+ rspSource = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
+ coex_sta->bt_info_c2h_cnt[rspSource]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rspSource, length);
+ for (i = 0; i < length; i++) {
+ coex_sta->bt_info_c2h[rspSource][i] = tmp_buf[i];
+ if (i == 1)
+ bt_info = tmp_buf[i];
+ if (i == length-1)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
+ }
+
+ if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rspSource) {
+ coex_sta->bt_retry_cnt = /* [3:0] */
+ coex_sta->bt_info_c2h[rspSource][2] & 0xf;
+
+ coex_sta->bt_rssi =
+ coex_sta->bt_info_c2h[rspSource][3] * 2 + 10;
+
+ coex_sta->bt_info_ext =
+ coex_sta->bt_info_c2h[rspSource][4];
+
+ /* Here we need to resend some wifi info to BT
+ * because bt is reset and loss of the info. */
+ if ((coex_sta->bt_info_ext & BIT1)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "bit1, send wifi BW&Chnl to BT!!\n");
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ if (wifi_connected)
+ ex_halbtc8192e2ant_media_status_notify(
+ btcoexist,
+ BTC_MEDIA_CONNECT);
+ else
+ ex_halbtc8192e2ant_media_status_notify(
+ btcoexist,
+ BTC_MEDIA_DISCONNECT);
+ }
+
+ if ((coex_sta->bt_info_ext & BIT3)) {
+ if (!btcoexist->manual_control &&
+ !btcoexist->stop_coex_dm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "bit3, BT NOT ignore Wlan active!\n");
+ halbtc8192e2ant_IgnoreWlanAct(btcoexist,
+ FORCE_EXEC,
+ false);
+ }
+ } else {
+ /* BT already NOT ignore Wlan active,
+ * do nothing here. */
+ }
+
+#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
+ if ((coex_sta->bt_info_ext & BIT4)) {
+ /* BT auto report already enabled, do nothing */
+ } else {
+ halbtc8192e2ant_bt_autoreport(btcoexist, FORCE_EXEC,
+ true);
+ }
+#endif
+ }
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan */
+ if(bt_info & BT_INFO_8192E_2ANT_B_INQ_PAGE)
+ coex_sta->c2h_bt_inquiry_page = true;
+ else
+ coex_sta->c2h_bt_inquiry_page = false;
+
+ /* set link exist status */
+ if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
+ coex_sta->bt_link_exist = false;
+ coex_sta->pan_exist = false;
+ coex_sta->a2dp_exist = false;
+ coex_sta->hid_exist = false;
+ coex_sta->sco_exist = false;
+ } else {/* connection exists */
+ coex_sta->bt_link_exist = true;
+ if (bt_info & BT_INFO_8192E_2ANT_B_FTP)
+ coex_sta->pan_exist = true;
+ else
+ coex_sta->pan_exist = false;
+ if (bt_info & BT_INFO_8192E_2ANT_B_A2DP)
+ coex_sta->a2dp_exist = true;
+ else
+ coex_sta->a2dp_exist = false;
+ if (bt_info & BT_INFO_8192E_2ANT_B_HID)
+ coex_sta->hid_exist = true;
+ else
+ coex_sta->hid_exist = false;
+ if (bt_info & BT_INFO_8192E_2ANT_B_SCO_ESCO)
+ coex_sta->sco_exist = true;
+ else
+ coex_sta->sco_exist = false;
+ }
+
+ halbtc8192e2ant_update_btlink_info(btcoexist);
+
+ if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
+ coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Non-Connected idle!!!\n");
+ } else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
+ coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
+ } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
+ (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
+ coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
+ } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
+ coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
+ } else {
+ coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
+ }
+
+ if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8192E_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8192E_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+ bt_busy = true;
+ limited_dig = true;
+ } else {
+ bt_busy = false;
+ limited_dig = false;
+ }
+
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+ coex_dm->limited_dig = limited_dig;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+ halbtc8192e2ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ if (BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex] StackOP Inquiry/page/pair start notify\n");
+ else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex] StackOP Inquiry/page/pair finish notify\n");
+}
+
+void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+ halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
+ ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
+{
+ static u8 dis_ver_info_cnt = 0;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+ struct btc_board_info *board_info=&btcoexist->board_info;
+ struct btc_stack_info *stack_info=&btcoexist->stack_info;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "=======================Periodical=======================\n");
+ if (dis_ver_info_cnt <= 5) {
+ dis_ver_info_cnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "************************************************\n");
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "BT stack/ hci ext ver = %s / %d\n",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+ &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "************************************************\n");
+ }
+
+#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
+ halbtc8192e2ant_querybt_info(btcoexist);
+ halbtc8192e2ant_monitor_bt_ctr(btcoexist);
+ halbtc8192e2ant_monitor_bt_enable_disable(btcoexist);
+#else
+ if (halbtc8192e2ant_iswifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust)
+ halbtc8192e2ant_run_coexist_mechanism(btcoexist);
+#endif
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h
new file mode 100644
index 000000000000..6d109edb8950
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h
@@ -0,0 +1,162 @@
+/*****************************************************************
+ * The following is for 8192E 2Ant BT Co-exist definition
+ *****************************************************************/
+#define BT_AUTO_REPORT_ONLY_8192E_2ANT 0
+
+#define BT_INFO_8192E_2ANT_B_FTP BIT7
+#define BT_INFO_8192E_2ANT_B_A2DP BIT6
+#define BT_INFO_8192E_2ANT_B_HID BIT5
+#define BT_INFO_8192E_2ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8192E_2ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8192E_2ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8192E_2ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8192E_2ANT_B_CONNECTION BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT 2
+
+enum bt_info_src_8192e_2ant{
+ BT_INFO_SRC_8192E_2ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8192E_2ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8192E_2ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8192E_2ANT_MAX
+};
+
+enum bt_8192e_2ant_bt_status{
+ BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8192E_2ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8192E_2ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8192E_2ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8192E_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8192E_2ANT_BT_STATUS_MAX
+};
+
+enum bt_8192e_2ant_coex_algo{
+ BT_8192E_2ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8192E_2ANT_COEX_ALGO_SCO = 0x1,
+ BT_8192E_2ANT_COEX_ALGO_SCO_PAN = 0x2,
+ BT_8192E_2ANT_COEX_ALGO_HID = 0x3,
+ BT_8192E_2ANT_COEX_ALGO_A2DP = 0x4,
+ BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS = 0x5,
+ BT_8192E_2ANT_COEX_ALGO_PANEDR = 0x6,
+ BT_8192E_2ANT_COEX_ALGO_PANHS = 0x7,
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP = 0x8,
+ BT_8192E_2ANT_COEX_ALGO_PANEDR_HID = 0x9,
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0xa,
+ BT_8192E_2ANT_COEX_ALGO_HID_A2DP = 0xb,
+ BT_8192E_2ANT_COEX_ALGO_MAX = 0xc
+};
+
+struct coex_dm_8192e_2ant{
+ /* fw mechanism */
+ u8 pre_dec_bt_pwr;
+ u8 cur_dec_bt_pwr;
+ u8 pre_fw_dac_swing_lvl;
+ u8 cur_fw_dac_swing_lvl;
+ bool cur_ignore_wlan_act;
+ bool pre_ignore_wlan_act;
+ u8 pre_ps_tdma;
+ u8 cur_ps_tdma;
+ u8 ps_tdma_para[5];
+ u8 ps_tdma_du_adj_type;
+ bool reset_tdma_adjust;
+ bool auto_tdma_adjust;
+ bool pre_ps_tdma_on;
+ bool cur_ps_tdma_on;
+ bool pre_bt_auto_report;
+ bool cur_bt_auto_report;
+
+ /* sw mechanism */
+ bool pre_rf_rx_lpf_shrink;
+ bool cur_rf_rx_lpf_shrink;
+ u32 bt_rf0x1e_backup;
+ bool pre_low_penalty_ra;
+ bool cur_low_penalty_ra;
+ bool pre_dac_swing_on;
+ u32 pre_dac_swing_lvl;
+ bool cur_dac_swing_on;
+ u32 cur_dac_swing_lvl;
+ bool pre_adc_back_off;
+ bool cur_adc_back_off;
+ bool pre_agc_table_en;
+ bool cur_agc_table_en;
+ u32 pre_val0x6c0;
+ u32 cur_val0x6c0;
+ u32 pre_val0x6c4;
+ u32 cur_val0x6c4;
+ u32 pre_val0x6c8;
+ u32 cur_val0x6c8;
+ u8 pre_val0x6cc;
+ u8 cur_val0x6cc;
+ bool limited_dig;
+
+ u32 backup_arfr_cnt1; /* Auto Rate Fallback Retry cnt */
+ u32 backup_arfr_cnt2; /* Auto Rate Fallback Retry cnt */
+ u16 backup_retrylimit;
+ u8 backup_ampdu_maxtime;
+
+ /* algorithm related */
+ u8 pre_algorithm;
+ u8 cur_algorithm;
+ u8 bt_status;
+ u8 wifi_chnl_info[3];
+
+ u8 pre_sstype;
+ u8 cur_sstype;
+
+ u32 prera_mask;
+ u32 curra_mask;
+ u8 curra_masktype;
+ u8 pre_arfrtype;
+ u8 cur_arfrtype;
+ u8 pre_retrylimit_type;
+ u8 cur_retrylimit_type;
+ u8 pre_ampdutime_type;
+ u8 cur_ampdutime_type;
+};
+
+struct coex_sta_8192e_2ant{
+ bool bt_link_exist;
+ bool sco_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ bool pan_exist;
+
+ bool under_lps;
+ bool under_ips;
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 bt_rssi;
+ u8 pre_bt_rssi_state;
+ u8 pre_wifi_rssi_state[4];
+ bool c2h_bt_info_req_sent;
+ u8 bt_info_c2h[BT_INFO_SRC_8192E_2ANT_MAX][10];
+ u32 bt_info_c2h_cnt[BT_INFO_SRC_8192E_2ANT_MAX];
+ bool c2h_bt_inquiry_page;
+ u8 bt_retry_cnt;
+ u8 bt_info_ext;
+};
+
+/****************************************************************
+ * The following is interface which will notify coex module.
+ ****************************************************************/
+void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpBuf,u8 length);
+void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist);
+void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist);
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c
new file mode 100644
index 000000000000..3f5c4fd2e73f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c
@@ -0,0 +1,3780 @@
+//============================================================
+// Description:
+//
+// This file is for RTL8723A Co-exist mechanism
+//
+// History
+// 2012/08/22 Cosa first check in.
+// 2012/11/14 Cosa Revise for 8723A 2Ant out sourcing.
+//
+//============================================================
+
+//============================================================
+// include files
+//============================================================
+#include "Mp_Precomp.h"
+#if(BT_30_SUPPORT == 1)
+//============================================================
+// Global variables, these are static variables
+//============================================================
+static COEX_DM_8723A_2ANT GLCoexDm8723a2Ant;
+static PCOEX_DM_8723A_2ANT pCoexDm=&GLCoexDm8723a2Ant;
+static COEX_STA_8723A_2ANT GLCoexSta8723a2Ant;
+static PCOEX_STA_8723A_2ANT pCoexSta=&GLCoexSta8723a2Ant;
+
+const char *const GLBtInfoSrc8723a2Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+//============================================================
+// local function proto type if needed
+//============================================================
+//============================================================
+// local function start with halbtc8723a2ant_
+//============================================================
+BOOLEAN
+halbtc8723a2ant_IsWifiIdle(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bWifiConnected=FALSE, bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+ if(bWifiConnected)
+ return FALSE;
+ if(bScan)
+ return FALSE;
+ if(bLink)
+ return FALSE;
+ if(bRoam)
+ return FALSE;
+
+ return true;
+}
+
+u1Byte
+halbtc8723a2ant_BtRssiState(
+ u1Byte levelNum,
+ u1Byte rssiThresh,
+ u1Byte rssiThresh1
+ )
+{
+ s4Byte btRssi=0;
+ u1Byte btRssiState=pCoexSta->preBtRssiState;
+
+ btRssi = pCoexSta->btRssi;
+
+ if(levelNum == 2)
+ {
+ if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(btRssi < rssiThresh)
+ {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+ else if(levelNum == 3)
+ {
+ if(rssiThresh > rssiThresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n"));
+ return pCoexSta->preBtRssiState;
+ }
+
+ if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ }
+ else if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(btRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ }
+ else if(btRssi < rssiThresh)
+ {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(btRssi < rssiThresh1)
+ {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ }
+ else
+ {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+
+ pCoexSta->preBtRssiState = btRssiState;
+
+ return btRssiState;
+}
+
+u1Byte
+halbtc8723a2ant_WifiRssiState(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte index,
+ IN u1Byte levelNum,
+ IN u1Byte rssiThresh,
+ IN u1Byte rssiThresh1
+ )
+{
+ s4Byte wifiRssi=0;
+ u1Byte wifiRssiState=pCoexSta->preWifiRssiState[index];
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+
+ if(levelNum == 2)
+ {
+ if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else
+ {
+ if(wifiRssi < rssiThresh)
+ {
+ wifiRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+ else if(levelNum == 3)
+ {
+ if(rssiThresh > rssiThresh1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n"));
+ return pCoexSta->preWifiRssiState[index];
+ }
+
+ if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW))
+ {
+ if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ }
+ else if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_MEDIUM) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_MEDIUM))
+ {
+ if(wifiRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT))
+ {
+ wifiRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ }
+ else if(wifiRssi < rssiThresh)
+ {
+ wifiRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n"));
+ }
+ }
+ else
+ {
+ if(wifiRssi < rssiThresh1)
+ {
+ wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ }
+ else
+ {
+ wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+
+ pCoexSta->preWifiRssiState[index] = wifiRssiState;
+
+ return wifiRssiState;
+}
+
+VOID
+halbtc8723a2ant_IndicateWifiChnlBwInfo(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ u1Byte H2C_Parameter[3] ={0};
+ u4Byte wifiBw;
+ u1Byte wifiCentralChnl;
+
+ // only 2.4G we need to inform bt the chnl mask
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl);
+ if( (BTC_MEDIA_CONNECT == type) &&
+ (wifiCentralChnl <= 14) )
+ {
+ H2C_Parameter[0] = 0x1;
+ H2C_Parameter[1] = wifiCentralChnl;
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ H2C_Parameter[2] = 0x30;
+ else
+ H2C_Parameter[2] = 0x20;
+ }
+
+ pCoexDm->wifiChnlInfo[0] = H2C_Parameter[0];
+ pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1];
+ pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x19=0x%x\n",
+ H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x19, 3, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_QueryBtInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ pCoexSta->bC2hBtInfoReqSent = true;
+
+ H2C_Parameter[0] |= BIT0; // trigger
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x38=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x38, 1, H2C_Parameter);
+}
+u1Byte
+halbtc8723a2ant_ActionAlgorithm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ BOOLEAN bBtHsOn=FALSE, bBtBusy=FALSE, limited_dig=FALSE;
+ u1Byte algorithm=BT_8723A_2ANT_COEX_ALGO_UNDEFINED;
+ u1Byte numOfDiffProfile=0;
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+
+ //======================
+ // here we get BT status first
+ //======================
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_IDLE;
+
+ if((pStackInfo->bScoExist) ||(bBtHsOn) ||(pStackInfo->bHidExist))
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO or HID or HS exists, set BT non-idle !!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+ }
+ else
+ {
+ // A2dp profile
+ if( (pBtCoexist->stack_info.numOfLink == 1) &&
+ (pStackInfo->bA2dpExist) )
+ {
+ if( (pCoexSta->lowPriorityTx+ pCoexSta->lowPriorityRx) < 100)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP, low priority tx+rx < 100, set BT connected-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP, low priority tx+rx >= 100, set BT non-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+ }
+ }
+ // Pan profile
+ if( (pBtCoexist->stack_info.numOfLink == 1) &&
+ (pStackInfo->bPanExist) )
+ {
+ if((pCoexSta->lowPriorityTx+ pCoexSta->lowPriorityRx) < 600)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, low priority tx+rx < 600, set BT connected-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ else
+ {
+ if(pCoexSta->lowPriorityTx)
+ {
+ if((pCoexSta->lowPriorityRx /pCoexSta->lowPriorityTx)>9 )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, low priority rx/tx > 9, set BT connected-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ }
+ }
+ if(BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, set BT non-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+ }
+ }
+ // Pan+A2dp profile
+ if( (pBtCoexist->stack_info.numOfLink == 2) &&
+ (pStackInfo->bA2dpExist) &&
+ (pStackInfo->bPanExist) )
+ {
+ if((pCoexSta->lowPriorityTx+ pCoexSta->lowPriorityRx) < 600)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, low priority tx+rx < 600, set BT connected-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ else
+ {
+ if(pCoexSta->lowPriorityTx)
+ {
+ if((pCoexSta->lowPriorityRx /pCoexSta->lowPriorityTx)>9 )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, low priority rx/tx > 9, set BT connected-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE;
+ }
+ }
+ }
+ if(BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, set BT non-idle!!!\n"));
+ pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE;
+ }
+ }
+ }
+ if(BT_8723A_2ANT_BT_STATUS_IDLE != pCoexDm->btStatus)
+ {
+ bBtBusy = true;
+ limited_dig = true;
+ }
+ else
+ {
+ bBtBusy = FALSE;
+ limited_dig = FALSE;
+ }
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+ //======================
+
+ if(!pStackInfo->bBtLinkExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No profile exists!!!\n"));
+ return algorithm;
+ }
+
+ if(pStackInfo->bScoExist)
+ numOfDiffProfile++;
+ if(pStackInfo->bHidExist)
+ numOfDiffProfile++;
+ if(pStackInfo->bPanExist)
+ numOfDiffProfile++;
+ if(pStackInfo->bA2dpExist)
+ numOfDiffProfile++;
+
+ if(numOfDiffProfile == 1)
+ {
+ if(pStackInfo->bScoExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ if(pStackInfo->bHidExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID;
+ }
+ else if(pStackInfo->bA2dpExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_A2DP;
+ }
+ else if(pStackInfo->bPanExist)
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANHS;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile == 2)
+ {
+ if(pStackInfo->bScoExist)
+ {
+ if(pStackInfo->bHidExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID;
+ }
+ else if(pStackInfo->bA2dpExist)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+ }
+ else if(pStackInfo->bPanExist)
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( pStackInfo->bHidExist &&
+ pStackInfo->bA2dpExist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+ }
+ else if( pStackInfo->bHidExist &&
+ pStackInfo->bPanExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( pStackInfo->bPanExist &&
+ pStackInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile == 3)
+ {
+ if(pStackInfo->bScoExist)
+ {
+ if( pStackInfo->bHidExist &&
+ pStackInfo->bA2dpExist )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID;
+ }
+ else if( pStackInfo->bHidExist &&
+ pStackInfo->bPanExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ else if( pStackInfo->bPanExist &&
+ pStackInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_SCO;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ else
+ {
+ if( pStackInfo->bHidExist &&
+ pStackInfo->bPanExist &&
+ pStackInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ }
+ else if(numOfDiffProfile >= 3)
+ {
+ if(pStackInfo->bScoExist)
+ {
+ if( pStackInfo->bHidExist &&
+ pStackInfo->bPanExist &&
+ pStackInfo->bA2dpExist )
+ {
+ if(bBtHsOn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
+
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"));
+ algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+BOOLEAN
+halbtc8723a2ant_NeedToDecBtPwr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bRet=FALSE;
+ BOOLEAN bBtHsOn=FALSE, bWifiConnected=FALSE;
+ s4Byte btHsRssi=0;
+
+ if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn))
+ return FALSE;
+ if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected))
+ return FALSE;
+ if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi))
+ return FALSE;
+
+ if(bWifiConnected)
+ {
+ if(bBtHsOn)
+ {
+ if(btHsRssi > 37)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for HS mode!!\n"));
+ bRet = true;
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for Wifi is connected!!\n"));
+ bRet = true;
+ }
+ }
+
+ return bRet;
+}
+
+VOID
+halbtc8723a2ant_SetFwDacSwingLevel(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte dacSwingLvl
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ // There are several type of dacswing
+ // 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+ H2C_Parameter[0] = dacSwingLvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dacSwingLvl));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x29=0x%x\n", H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x29, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_SetFwDecBtPwr(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bDecBtPwr
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ H2C_Parameter[0] = 0;
+
+ if(bDecBtPwr)
+ {
+ H2C_Parameter[0] |= BIT1;
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power : %s, FW write 0x21=0x%x\n",
+ (bDecBtPwr? "Yes!!":"No!!"), H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x21, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_DecBtPwr(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bDecBtPwr
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power = %s\n",
+ (bForceExec? "force to":""), ((bDecBtPwr)? "ON":"OFF")));
+ pCoexDm->bCurDecBtPwr = bDecBtPwr;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+ pCoexDm->bPreDecBtPwr, pCoexDm->bCurDecBtPwr));
+
+ if(pCoexDm->bPreDecBtPwr == pCoexDm->bCurDecBtPwr)
+ return;
+ }
+ halbtc8723a2ant_SetFwDecBtPwr(pBtCoexist, pCoexDm->bCurDecBtPwr);
+
+ pCoexDm->bPreDecBtPwr = pCoexDm->bCurDecBtPwr;
+}
+
+VOID
+halbtc8723a2ant_FwDacSwingLvl(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u1Byte fwDacSwingLvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n",
+ (bForceExec? "force to":""), fwDacSwingLvl));
+ pCoexDm->curFwDacSwingLvl = fwDacSwingLvl;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ pCoexDm->preFwDacSwingLvl, pCoexDm->curFwDacSwingLvl));
+
+ if(pCoexDm->preFwDacSwingLvl == pCoexDm->curFwDacSwingLvl)
+ return;
+ }
+
+ halbtc8723a2ant_SetFwDacSwingLevel(pBtCoexist, pCoexDm->curFwDacSwingLvl);
+
+ pCoexDm->preFwDacSwingLvl = pCoexDm->curFwDacSwingLvl;
+}
+
+VOID
+halbtc8723a2ant_SetSwRfRxLpfCorner(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ if(bRxRfShrinkOn)
+ {
+ //Shrink RF Rx LPF corner
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7);
+ }
+ else
+ {
+ //Resume RF Rx LPF corner
+ // After initialized, we can use pCoexDm->btRf0x1eBackup
+ if(pBtCoexist->initilized)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup);
+ }
+ }
+}
+
+VOID
+halbtc8723a2ant_RfShrink(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bRxRfShrinkOn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF")));
+ pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+ pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink));
+
+ if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink)
+ return;
+ }
+ halbtc8723a2ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink);
+
+ pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink;
+}
+
+VOID
+halbtc8723a2ant_SetSwPenaltyTxRateAdaptive(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ u1Byte tmpU1;
+
+ tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd);
+ tmpU1 |= BIT0;
+ if(bLowPenaltyRa)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n"));
+ tmpU1 &= ~BIT2;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n"));
+ tmpU1 |= BIT2;
+ }
+
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1);
+}
+
+VOID
+halbtc8723a2ant_LowPenaltyRa(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bLowPenaltyRa
+ )
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF")));
+ pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa));
+
+ if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa)
+ return;
+ }
+ halbtc8723a2ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa);
+
+ pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa;
+}
+
+VOID
+halbtc8723a2ant_SetSwFullTimeDacSwing(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bSwDacSwingOn,
+ IN u4Byte swDacSwingLvl
+ )
+{
+ if(bSwDacSwingOn)
+ {
+ pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, swDacSwingLvl);
+ }
+ else
+ {
+ pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0);
+ }
+}
+
+
+VOID
+halbtc8723a2ant_DacSwing(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bDacSwingOn,
+ IN u4Byte dacSwingLvl
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dacSwingLvl=0x%x\n",
+ (bForceExec? "force to":""), ((bDacSwingOn)? "ON":"OFF"), dacSwingLvl));
+ pCoexDm->bCurDacSwingOn = bDacSwingOn;
+ pCoexDm->curDacSwingLvl = dacSwingLvl;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ pCoexDm->bPreDacSwingOn, pCoexDm->preDacSwingLvl,
+ pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl));
+
+ if( (pCoexDm->bPreDacSwingOn == pCoexDm->bCurDacSwingOn) &&
+ (pCoexDm->preDacSwingLvl == pCoexDm->curDacSwingLvl) )
+ return;
+ }
+ mdelay(30);
+ halbtc8723a2ant_SetSwFullTimeDacSwing(pBtCoexist, bDacSwingOn, dacSwingLvl);
+
+ pCoexDm->bPreDacSwingOn = pCoexDm->bCurDacSwingOn;
+ pCoexDm->preDacSwingLvl = pCoexDm->curDacSwingLvl;
+}
+
+VOID
+halbtc8723a2ant_SetAdcBackOff(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bAdcBackOff
+ )
+{
+ if(bAdcBackOff)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n"));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc04,0x3a07611);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n"));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc04,0x3a05611);
+ }
+}
+
+VOID
+halbtc8723a2ant_AdcBackOff(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bAdcBackOff
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n",
+ (bForceExec? "force to":""), ((bAdcBackOff)? "ON":"OFF")));
+ pCoexDm->bCurAdcBackOff = bAdcBackOff;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+ pCoexDm->bPreAdcBackOff, pCoexDm->bCurAdcBackOff));
+
+ if(pCoexDm->bPreAdcBackOff == pCoexDm->bCurAdcBackOff)
+ return;
+ }
+ halbtc8723a2ant_SetAdcBackOff(pBtCoexist, pCoexDm->bCurAdcBackOff);
+
+ pCoexDm->bPreAdcBackOff = pCoexDm->bCurAdcBackOff;
+}
+
+VOID
+halbtc8723a2ant_SetAgcTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bAgcTableEn
+ )
+{
+ u1Byte rssiAdjustVal=0;
+
+ if(bAgcTableEn)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4e1c0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4d1d0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4c1e0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4b1f0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4a200001);
+
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xdc000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x90000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x51000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x12000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1a, 0xfffff, 0x00355);
+
+ rssiAdjustVal = 6;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x641c0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x631d0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x621e0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x611f0001);
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x60200001);
+
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x32000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x71000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xb0000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xfc000);
+ pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1a, 0xfffff, 0x30355);
+ }
+
+ // set rssiAdjustVal for wifi module.
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssiAdjustVal);
+}
+
+
+VOID
+halbtc8723a2ant_AgcTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bAgcTableEn
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n",
+ (bForceExec? "force to":""), ((bAgcTableEn)? "Enable":"Disable")));
+ pCoexDm->bCurAgcTableEn = bAgcTableEn;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ pCoexDm->bPreAgcTableEn, pCoexDm->bCurAgcTableEn));
+
+ if(pCoexDm->bPreAgcTableEn == pCoexDm->bCurAgcTableEn)
+ return;
+ }
+ halbtc8723a2ant_SetAgcTable(pBtCoexist, bAgcTableEn);
+
+ pCoexDm->bPreAgcTableEn = pCoexDm->bCurAgcTableEn;
+}
+
+VOID
+halbtc8723a2ant_SetCoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8));
+ pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc));
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc);
+}
+
+VOID
+halbtc8723a2ant_CoexTable(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN u4Byte val0x6c0,
+ IN u4Byte val0x6c8,
+ IN u1Byte val0x6cc
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (bForceExec? "force to":""), val0x6c0, val0x6c8, val0x6cc));
+ pCoexDm->curVal0x6c0 = val0x6c0;
+ pCoexDm->curVal0x6c8 = val0x6c8;
+ pCoexDm->curVal0x6cc = val0x6cc;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc));
+
+ if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
+ (pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) &&
+ (pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) )
+ return;
+ }
+ halbtc8723a2ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c8, val0x6cc);
+
+ pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0;
+ pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8;
+ pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc;
+}
+
+VOID
+halbtc8723a2ant_SetFwIgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bEnable
+ )
+{
+ u1Byte H2C_Parameter[1] ={0};
+
+ if(bEnable)
+ {
+ H2C_Parameter[0] |= BIT0; // function enable
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x25=0x%x\n",
+ H2C_Parameter[0]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x25, 1, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_IgnoreWlanAct(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bEnable
+ )
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n",
+ (bForceExec? "force to":""), (bEnable? "ON":"OFF")));
+ pCoexDm->bCurIgnoreWlanAct = bEnable;
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct));
+
+ if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
+ return;
+ }
+ halbtc8723a2ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable);
+
+ pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct;
+}
+
+VOID
+halbtc8723a2ant_SetFwPstdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte byte1,
+ IN u1Byte byte2,
+ IN u1Byte byte3,
+ IN u1Byte byte4,
+ IN u1Byte byte5
+ )
+{
+ u1Byte H2C_Parameter[5] ={0};
+
+ H2C_Parameter[0] = byte1;
+ H2C_Parameter[1] = byte2;
+ H2C_Parameter[2] = byte3;
+ H2C_Parameter[3] = byte4;
+ H2C_Parameter[4] = byte5;
+
+ pCoexDm->psTdmaPara[0] = byte1;
+ pCoexDm->psTdmaPara[1] = byte2;
+ pCoexDm->psTdmaPara[2] = byte3;
+ pCoexDm->psTdmaPara[3] = byte4;
+ pCoexDm->psTdmaPara[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x3a(5bytes)=0x%x%08x\n",
+ H2C_Parameter[0],
+ H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4]));
+
+ pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3a, 5, H2C_Parameter);
+}
+
+VOID
+halbtc8723a2ant_PsTdma(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bForceExec,
+ IN BOOLEAN bTurnOn,
+ IN u1Byte type
+ )
+{
+ u4Byte btTxRxCnt=0;
+
+ btTxRxCnt = pCoexSta->highPriorityTx+pCoexSta->highPriorityRx+
+ pCoexSta->lowPriorityTx+pCoexSta->lowPriorityRx;
+
+ if(btTxRxCnt > 3000)
+ {
+ pCoexDm->bCurPsTdmaOn = true;
+ pCoexDm->curPsTdma = 8;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], turn ON PS TDMA, type=%d for BT tx/rx counters=%d(>3000)\n",
+ pCoexDm->curPsTdma, btTxRxCnt));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type));
+ pCoexDm->bCurPsTdmaOn = bTurnOn;
+ pCoexDm->curPsTdma = type;
+ }
+
+ if(!bForceExec)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ pCoexDm->prePsTdma, pCoexDm->curPsTdma));
+
+ if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
+ (pCoexDm->prePsTdma == pCoexDm->curPsTdma) )
+ return;
+ }
+ if(pCoexDm->bCurPsTdmaOn)
+ {
+ switch(pCoexDm->curPsTdma)
+ {
+ case 1:
+ default:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x98);
+ break;
+ case 2:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x98);
+ break;
+ case 3:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0xe1, 0x98);
+ break;
+ case 4:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x5, 0x5, 0xe1, 0x80);
+ break;
+ case 5:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x60, 0x98);
+ break;
+ case 6:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0x60, 0x98);
+ break;
+ case 7:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0x60, 0x98);
+ break;
+ case 8:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x5, 0x5, 0x60, 0x80);
+ break;
+ case 9:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x98);
+ break;
+ case 10:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x98);
+ break;
+ case 11:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0xe1, 0x98);
+ break;
+ case 12:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0xe1, 0x98);
+ break;
+ case 13:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x60, 0x98);
+ break;
+ case 14:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0x60, 0x98);
+ break;
+ case 15:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0x60, 0x98);
+ break;
+ case 16:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0x60, 0x98);
+ break;
+ case 17:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x2f, 0x2f, 0x60, 0x80);
+ break;
+ case 18:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0xe1, 0x98);
+ break;
+ case 19:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x25, 0xe1, 0x98);
+ break;
+ case 20:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x25, 0x60, 0x98);
+ break;
+ }
+ }
+ else
+ {
+ // disable PS tdma
+ switch(pCoexDm->curPsTdma)
+ {
+ case 0:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+ break;
+ case 1:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ break;
+ default:
+ halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0);
+ break;
+ }
+ }
+
+ // update pre state
+ pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn;
+ pCoexDm->prePsTdma = pCoexDm->curPsTdma;
+}
+
+
+VOID
+halbtc8723a2ant_CoexAllOff(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // fw all off
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ // sw all off
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ // hw all off
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+}
+
+VOID
+halbtc8723a2ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ // force to reset coex mechanism
+ halbtc8723a2ant_CoexTable(pBtCoexist, FORCE_EXEC, 0x55555555, 0xffff, 0x3);
+ halbtc8723a2ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, FORCE_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a2ant_RfShrink(pBtCoexist, FORCE_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, FORCE_EXEC, FALSE, 0xc0);
+}
+
+VOID
+halbtc8723a2ant_BtInquiryPage(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bLowPwrDisable=true;
+
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+}
+
+VOID
+halbtc8723a2ant_BtEnableAction(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BOOLEAN bWifiConnected=FALSE;
+
+ // Here we need to resend some wifi info to BT
+ // because bt is reset and loss of the info.
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if(bWifiConnected)
+ {
+ halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, BTC_MEDIA_CONNECT);
+ }
+ else
+ {
+ halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, BTC_MEDIA_DISCONNECT);
+ }
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE);
+}
+
+VOID
+halbtc8723a2ant_MonitorBtCtr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u4Byte regHPTxRx, regLPTxRx, u4Tmp;
+ u4Byte regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0;
+ u1Byte u1Tmp;
+
+ regHPTxRx = 0x770;
+ regLPTxRx = 0x774;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx);
+ regHPTx = u4Tmp & MASKLWORD;
+ regHPRx = (u4Tmp & MASKHWORD)>>16;
+
+ u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx);
+ regLPTx = u4Tmp & MASKLWORD;
+ regLPRx = (u4Tmp & MASKHWORD)>>16;
+
+ pCoexSta->highPriorityTx = regHPTx;
+ pCoexSta->highPriorityRx = regHPRx;
+ pCoexSta->lowPriorityTx = regLPTx;
+ pCoexSta->lowPriorityRx = regLPRx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx));
+
+ // reset counter
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc);
+}
+
+VOID
+halbtc8723a2ant_MonitorBtEnableDisable(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ static BOOLEAN bPreBtDisabled=FALSE;
+ static u4Byte btDisableCnt=0;
+ BOOLEAN bBtActive=true, bBtDisabled=FALSE;
+
+ // This function check if bt is disabled
+
+ if( pCoexSta->highPriorityTx == 0 &&
+ pCoexSta->highPriorityRx == 0 &&
+ pCoexSta->lowPriorityTx == 0 &&
+ pCoexSta->lowPriorityRx == 0)
+ {
+ bBtActive = FALSE;
+ }
+ if( pCoexSta->highPriorityTx == 0xffff &&
+ pCoexSta->highPriorityRx == 0xffff &&
+ pCoexSta->lowPriorityTx == 0xffff &&
+ pCoexSta->lowPriorityRx == 0xffff)
+ {
+ bBtActive = FALSE;
+ }
+ if(bBtActive)
+ {
+ btDisableCnt = 0;
+ bBtDisabled = FALSE;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n"));
+ }
+ else
+ {
+ btDisableCnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n",
+ btDisableCnt));
+ if(btDisableCnt >= 2)
+ {
+ bBtDisabled = true;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n"));
+ }
+ }
+ if(bPreBtDisabled != bBtDisabled)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n",
+ (bPreBtDisabled ? "disabled":"enabled"),
+ (bBtDisabled ? "disabled":"enabled")));
+ bPreBtDisabled = bBtDisabled;
+ if(!bBtDisabled)
+ {
+ halbtc8723a2ant_BtEnableAction(pBtCoexist);
+ }
+ }
+}
+
+BOOLEAN
+halbtc8723a2ant_IsCommonAction(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ BOOLEAN bCommon=FALSE, bWifiConnected=FALSE;
+ BOOLEAN bLowPwrDisable=FALSE;
+
+ if(!pStackInfo->bBtLinkExist)
+ {
+ bLowPwrDisable = FALSE;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+ }
+ else
+ {
+ bLowPwrDisable = true;
+ pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) &&
+ BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + Bt idle!!\n"));
+
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ bCommon = true;
+ }
+ else if(!halbtc8723a2ant_IsWifiIdle(pBtCoexist) &&
+ (BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + BT idle!!\n"));
+
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ bCommon = true;
+ }
+ else if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) &&
+ (BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + Bt connected idle!!\n"));
+
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ bCommon = true;
+ }
+ else if(!halbtc8723a2ant_IsWifiIdle(pBtCoexist) &&
+ (BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + Bt connected idle!!\n"));
+
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ bCommon = true;
+ }
+ else if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) &&
+ (BT_8723A_2ANT_BT_STATUS_NON_IDLE == pCoexDm->btStatus) )
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + BT non-idle!!\n"));
+
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+
+ bCommon = true;
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + BT non-idle!!\n"));
+ halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20);
+
+ bCommon = FALSE;
+ }
+
+ return bCommon;
+}
+VOID
+halbtc8723a2ant_TdmaDurationAdjust(
+ IN PBTC_COEXIST pBtCoexist,
+ IN BOOLEAN bScoHid,
+ IN BOOLEAN bTxPause,
+ IN u1Byte maxInterval
+ )
+{
+ static s4Byte up,dn,m,n,WaitCount;
+ s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration
+ u1Byte retryCount=0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], TdmaDurationAdjust()\n"));
+
+ if(pCoexDm->bResetTdmaAdjust)
+ {
+ pCoexDm->bResetTdmaAdjust = FALSE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
+ {
+ if(bScoHid)
+ {
+ if(bTxPause)
+ {
+ if(maxInterval == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ pCoexDm->psTdmaDuAdjType = 13;
+ }
+ else if(maxInterval == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(maxInterval == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ }
+ else
+ {
+ if(maxInterval == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(maxInterval == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(maxInterval == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ }
+ }
+ else
+ {
+ if(bTxPause)
+ {
+ if(maxInterval == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ pCoexDm->psTdmaDuAdjType = 5;
+ }
+ else if(maxInterval == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(maxInterval == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ }
+ else
+ {
+ if(maxInterval == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ else if(maxInterval == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(maxInterval == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ }
+ }
+ }
+ //============
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ WaitCount = 0;
+ }
+ else
+ {
+ //acquire the BT TRx retry count from BT_Info byte2
+ retryCount = pCoexSta->btRetryCnt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount));
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, WaitCount=%d\n",
+ up, dn, m, n, WaitCount));
+ result = 0;
+ WaitCount++;
+
+ if(retryCount == 0) // no retry in the last 2-second duration
+ {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if(up >= n) // if ³sÄò n ­Ó2¬í retry count¬°0, «h½Õ¼eWiFi duration
+ {
+ WaitCount = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n"));
+ }
+ }
+ else if (retryCount <= 3) // <=3 retry in the last 2-second duration
+ {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) // if ³sÄò 2 ­Ó2¬í retry count< 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount <= 2)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+ }
+ }
+ else //retry count > 3, ¥u­n1¦¸ retry count > 3, «h½Õ¯¶WiFi duration
+ {
+ if (WaitCount == 1)
+ m++; // ÁקK¤@ª½¦b¨â­Ólevel¤¤¨Ó¦^
+ else
+ m = 1;
+
+ if ( m >= 20) //m ³Ì¤j­È = 20 ' ³Ì¤j120¬í recheck¬O§_½Õ¾ã WiFi duration.
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], max Interval = %d\n", maxInterval));
+ if(maxInterval == 1)
+ {
+ if(bTxPause)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ pCoexDm->psTdmaDuAdjType = 5;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ pCoexDm->psTdmaDuAdjType = 13;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ else if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ pCoexDm->psTdmaDuAdjType = 5;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ pCoexDm->psTdmaDuAdjType = 13;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ }
+ }
+ }
+ else if(maxInterval == 2)
+ {
+ if(bTxPause)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ else if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ }
+ }
+ }
+ else if(maxInterval == 3)
+ {
+ if(bTxPause)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+ else if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ }
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+ if(pCoexDm->curPsTdma == 5)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 6)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 7)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 8)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ if(pCoexDm->curPsTdma == 13)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 14)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 15)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 16)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ if(result == -1)
+ {
+ if(pCoexDm->curPsTdma == 1)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+ else if(pCoexDm->curPsTdma == 9)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ }
+ else if (result == 1)
+ {
+ if(pCoexDm->curPsTdma == 4)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 3)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 2)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ else if(pCoexDm->curPsTdma == 12)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 11)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ else if(pCoexDm->curPsTdma == 10)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ }
+ }
+ }
+ }
+
+ // if current PsTdma not match with the recorded one (when scan, dhcp...),
+ // then we have to adjust it back to the previous record one.
+ if(pCoexDm->curPsTdma != pCoexDm->psTdmaDuAdjType)
+ {
+ BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+ pCoexDm->curPsTdma, pCoexDm->psTdmaDuAdjType));
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+ if( !bScan && !bLink && !bRoam)
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType);
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"));
+ }
+ }
+}
+
+// SCO only or SCO+PAN(HS)
+VOID
+halbtc8723a2ant_ActionSco(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1;
+ u4Byte wifiBw;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+
+VOID
+halbtc8723a2ant_ActionHid(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1;
+ u4Byte wifiBw;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+//A2DP only / PAN(EDR) only/ A2DP+PAN(HS)
+VOID
+halbtc8723a2ant_ActionA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 3);
+ }
+ else
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 1);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 3);
+ }
+ else
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 1);
+ }
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 3);
+ }
+ else
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 1);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 3);
+ }
+ else
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 1);
+ }
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+VOID
+halbtc8723a2ant_ActionPanEdr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+
+//PAN(HS) only
+VOID
+halbtc8723a2ant_ActionPanHs(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState;
+ u4Byte wifiBw;
+
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ }
+ else
+ {
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ }
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+ else
+ {
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0);
+ }
+
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+//PAN(EDR)+A2DP
+VOID
+halbtc8723a2ant_ActionPanEdrA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ }
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ }
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+VOID
+halbtc8723a2ant_ActionPanEdrHid(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1;
+ u4Byte wifiBw;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ }
+ else
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ }
+
+ // sw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+// HID+A2DP+PAN(EDR)
+VOID
+halbtc8723a2ant_ActionHidA2dpPanEdr(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ }
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ }
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+VOID
+halbtc8723a2ant_ActionHidA2dp(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte wifiRssiState, wifiRssiState1, btInfoExt;
+ u4Byte wifiBw;
+
+ btInfoExt = pCoexSta->btInfoExt;
+
+ if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist))
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if(BTC_WIFI_BW_HT40 == wifiBw)
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 3);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 1);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 3);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 1);
+ }
+ }
+
+ // sw mechanism
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0);
+ wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0);
+
+ // fw mechanism
+ if( (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 3);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 1);
+ }
+ }
+ else
+ {
+ if(btInfoExt&BIT0) //a2dp basic rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 3);
+ }
+ else //a2dp edr rate
+ {
+ halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 1);
+ }
+ }
+
+ // sw mechanism
+ if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) )
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ else
+ {
+ halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE);
+ halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0);
+ }
+ }
+}
+
+VOID
+halbtc8723a2ant_RunCoexistMechanism(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ u1Byte btInfoOriginal=0, btRetryCnt=0;
+ u1Byte algorithm=0;
+
+ if(pBtCoexist->manual_control)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Manual control!!!\n"));
+ return;
+ }
+
+ if(pStackInfo->bProfileNotified)
+ {
+ if(pCoexSta->bHoldForStackOperation)
+ {
+ // if bt inquiry/page/pair, do not execute.
+ return;
+ }
+
+ algorithm = halbtc8723a2ant_ActionAlgorithm(pBtCoexist);
+ if(pCoexSta->bHoldPeriodCnt && (BT_8723A_2ANT_COEX_ALGO_PANHS!=algorithm))
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex],Hold BT inquiry/page scan setting (cnt = %d)!!\n",
+ pCoexSta->bHoldPeriodCnt));
+ if(pCoexSta->bHoldPeriodCnt >= 6)
+ {
+ pCoexSta->bHoldPeriodCnt = 0;
+ // next time the coexist parameters should be reset again.
+ }
+ else
+ pCoexSta->bHoldPeriodCnt++;
+ return;
+ }
+
+ pCoexDm->curAlgorithm = algorithm;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Algorithm = %d \n", pCoexDm->curAlgorithm));
+ if(halbtc8723a2ant_IsCommonAction(pBtCoexist))
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant common.\n"));
+ pCoexDm->bResetTdmaAdjust = true;
+ }
+ else
+ {
+ if(pCoexDm->curAlgorithm != pCoexDm->preAlgorithm)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
+ pCoexDm->preAlgorithm, pCoexDm->curAlgorithm));
+ pCoexDm->bResetTdmaAdjust = true;
+ }
+ switch(pCoexDm->curAlgorithm)
+ {
+ case BT_8723A_2ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = SCO.\n"));
+ halbtc8723a2ant_ActionSco(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID.\n"));
+ halbtc8723a2ant_ActionHid(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = A2DP.\n"));
+ halbtc8723a2ant_ActionA2dp(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n"));
+ halbtc8723a2ant_ActionPanEdr(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HS mode.\n"));
+ halbtc8723a2ant_ActionPanHs(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n"));
+ halbtc8723a2ant_ActionPanEdrA2dp(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n"));
+ halbtc8723a2ant_ActionPanEdrHid(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n"));
+ halbtc8723a2ant_ActionHidA2dpPanEdr(pBtCoexist);
+ break;
+ case BT_8723A_2ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n"));
+ halbtc8723a2ant_ActionHidA2dp(pBtCoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"));
+ halbtc8723a2ant_CoexAllOff(pBtCoexist);
+ break;
+ }
+ pCoexDm->preAlgorithm = pCoexDm->curAlgorithm;
+ }
+ }
+}
+
+//============================================================
+// work around function start with wa_halbtc8723a2ant_
+//============================================================
+VOID
+wa_halbtc8723a2ant_MonitorC2h(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u1Byte tmp1b=0x0;
+ u4Byte curC2hTotalCnt=0x0;
+ static u4Byte preC2hTotalCnt=0x0, sameCntPollingTime=0x0;
+
+ curC2hTotalCnt+=pCoexSta->btInfoC2hCnt[BT_INFO_SRC_8723A_2ANT_BT_RSP];
+
+ if(curC2hTotalCnt == preC2hTotalCnt)
+ {
+ sameCntPollingTime++;
+ }
+ else
+ {
+ preC2hTotalCnt = curC2hTotalCnt;
+ sameCntPollingTime = 0;
+ }
+
+ if(sameCntPollingTime >= 2)
+ {
+ tmp1b = pBtCoexist->btc_read_1byte(pBtCoexist, 0x1af);
+ if(tmp1b != 0x0)
+ {
+ pCoexSta->c2hHangDetectCnt++;
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x1af, 0x0);
+ }
+ }
+}
+
+//============================================================
+// extern function start with EXhalbtc8723a2ant_
+//============================================================
+VOID
+EXhalbtc8723a2ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ u4Byte u4Tmp=0;
+ u1Byte u1Tmp=0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 2Ant Init HW Config!!\n"));
+
+ // backup rf 0x1e value
+ pCoexDm->btRf0x1eBackup =
+ pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+ // Enable counter statistics
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x3);
+ pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20);
+}
+
+VOID
+EXhalbtc8723a2ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+
+ halbtc8723a2ant_InitCoexDm(pBtCoexist);
+}
+
+VOID
+EXhalbtc8723a2ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ struct btc_board_info * pBoardInfo=&pBtCoexist->board_info;
+ PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info;
+ pu1Byte cliBuf=pBtCoexist->cli_buf;
+ u1Byte u1Tmp[4], i, btInfoExt, psTdmaCase=0;
+ u4Byte u4Tmp[4];
+ BOOLEAN bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE;
+ BOOLEAN bBtHsOn=FALSE, bWifiBusy=FALSE;
+ s4Byte wifiRssi=0, btHsRssi=0;
+ u4Byte wifiBw, wifiTrafficDir;
+ u1Byte wifiDot11Chnl, wifiHsChnl;
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cliBuf);
+
+ if(!pBoardInfo->bt_exist)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cliBuf);
+ return;
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+ pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num);
+ CL_PRINTF(cliBuf);
+
+ if(pBtCoexist->manual_control)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "[Action Manual control]!!");
+ CL_PRINTF(cliBuf);
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+ wifiDot11Chnl, wifiHsChnl, bBtHsOn);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+ pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1],
+ pCoexDm->wifiChnlInfo[2]);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+ wifiRssi, btHsRssi);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \
+ bLink, bRoam, bScan);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+ (bWifiUnder5G? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))),
+ ((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink")));
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+ ((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus)? "idle":( (BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy"))),
+ pCoexSta->btRssi, pCoexSta->btRetryCnt);
+ CL_PRINTF(cliBuf);
+
+ if(pStackInfo->bProfileNotified)
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+ pStackInfo->bScoExist, pStackInfo->bHidExist, pStackInfo->bPanExist, pStackInfo->bA2dpExist);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ }
+
+ btInfoExt = pCoexSta->btInfoExt;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \
+ (btInfoExt&BIT0)? "Basic rate":"EDR rate");
+ CL_PRINTF(cliBuf);
+
+ for(i=0; i<BT_INFO_SRC_8723A_2ANT_MAX; i++)
+ {
+ if(pCoexSta->btInfoC2hCnt[i])
+ {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723a2Ant[i], \
+ pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1],
+ pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3],
+ pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5],
+ pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]);
+ CL_PRINTF(cliBuf);
+ }
+ }
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "write 0x1af=0x0 num", \
+ pCoexSta->c2hHangDetectCnt);
+ CL_PRINTF(cliBuf);
+
+ // Sw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d", "SM1[ShRf/ LpRA/ LimDig]", \
+ pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+ pCoexDm->bCurAgcTableEn, pCoexDm->bCurAdcBackOff, pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl);
+ CL_PRINTF(cliBuf);
+
+ // Fw mechanism
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+ CL_PRINTF(cliBuf);
+
+ if(!pBtCoexist->manual_control)
+ {
+ psTdmaCase = pCoexDm->curPsTdma;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \
+ pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
+ pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
+ pCoexDm->psTdmaPara[4], psTdmaCase);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", \
+ pCoexDm->bCurDecBtPwr, pCoexDm->bCurIgnoreWlanAct);
+ CL_PRINTF(cliBuf);
+ }
+
+ // Hw setting
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+ pCoexDm->btRf0x1eBackup);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778);
+ u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x783);
+ u1Tmp[2] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x796);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/ 0x783/ 0x796", \
+ u1Tmp[0], u1Tmp[1], u1Tmp[2]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x880);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x880", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x40", \
+ u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+ u4Tmp[0], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x484);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x484(rate adaptive)", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \
+ u4Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda0);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda4);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda8);
+ u4Tmp[3] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xdac);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0xda0/0xda4/0xda8/0xdac(FA cnt)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u4Tmp[3]);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0);
+ u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4);
+ u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8);
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770 (hp rx[31:16]/tx[15:0])", \
+ pCoexSta->highPriorityRx, pCoexSta->highPriorityTx);
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \
+ pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx);
+ CL_PRINTF(cliBuf);
+
+ // Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang
+ u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x41b);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x41b (mgntQ hang chk == 0xf)", \
+ u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+VOID
+EXhalbtc8723a2ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_IPS_ENTER == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+ halbtc8723a2ant_CoexAllOff(pBtCoexist);
+ }
+ else if(BTC_IPS_LEAVE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+ //halbtc8723a2ant_InitCoexDm(pBtCoexist);
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_LPS_ENABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+ }
+ else if(BTC_LPS_DISABLE == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_SCAN_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+ }
+ else if(BTC_SCAN_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_ASSOCIATE_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+ }
+ else if(BTC_ASSOCIATE_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_MEDIA_CONNECT == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
+ }
+
+ halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, type);
+}
+
+VOID
+EXhalbtc8723a2ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(type == BTC_PACKET_DHCP)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], DHCP Packet notify\n"));
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ )
+{
+ u1Byte btInfo=0;
+ u1Byte i, rspSource=0;
+ BOOLEAN bBtBusy=FALSE, limited_dig=FALSE;
+ BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE;
+
+ pCoexSta->bC2hBtInfoReqSent = FALSE;
+
+ rspSource = BT_INFO_SRC_8723A_2ANT_BT_RSP;
+ pCoexSta->btInfoC2hCnt[rspSource]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length));
+ for(i=0; i<length; i++)
+ {
+ pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
+ if(i == 0)
+ btInfo = tmpBuf[i];
+ if(i == length-1)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i]));
+ }
+ else
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
+ }
+ }
+
+ if(BT_INFO_SRC_8723A_2ANT_WIFI_FW != rspSource)
+ {
+ pCoexSta->btRetryCnt =
+ pCoexSta->btInfoC2h[rspSource][1];
+
+ pCoexSta->btRssi =
+ pCoexSta->btInfoC2h[rspSource][2]*2+10;
+
+ pCoexSta->btInfoExt =
+ pCoexSta->btInfoC2h[rspSource][3];
+ }
+
+ pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ // check BIT2 first ==> check if bt is under inquiry or page scan
+ if(btInfo & BT_INFO_8723A_2ANT_B_INQ_PAGE)
+ {
+ pCoexSta->bC2hBtInquiryPage = true;
+ }
+ else
+ {
+ pCoexSta->bC2hBtInquiryPage = FALSE;
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ )
+{
+ if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n"));
+ pCoexSta->bHoldForStackOperation = true;
+ pCoexSta->bHoldPeriodCnt = 1;
+ halbtc8723a2ant_BtInquiryPage(pBtCoexist);
+ }
+ else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n"));
+ pCoexSta->bHoldForStackOperation = FALSE;
+ }
+}
+
+VOID
+EXhalbtc8723a2ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
+
+ halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+ EXhalbtc8723a2ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+}
+
+VOID
+EXhalbtc8723a2ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ )
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 2Ant Periodical!!\n"));
+
+ // work around for c2h hang
+ wa_halbtc8723a2ant_MonitorC2h(pBtCoexist);
+
+ halbtc8723a2ant_QueryBtInfo(pBtCoexist);
+ halbtc8723a2ant_MonitorBtCtr(pBtCoexist);
+ halbtc8723a2ant_MonitorBtEnableDisable(pBtCoexist);
+
+ halbtc8723a2ant_RunCoexistMechanism(pBtCoexist);
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h
new file mode 100644
index 000000000000..c07d3738aadc
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h
@@ -0,0 +1,179 @@
+//===========================================
+// The following is for 8723A 2Ant BT Co-exist definition
+//===========================================
+#define BT_INFO_8723A_2ANT_B_FTP BIT7
+#define BT_INFO_8723A_2ANT_B_A2DP BIT6
+#define BT_INFO_8723A_2ANT_B_HID BIT5
+#define BT_INFO_8723A_2ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723A_2ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723A_2ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723A_2ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723A_2ANT_B_CONNECTION BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT 2
+
+typedef enum _BT_INFO_SRC_8723A_2ANT{
+ BT_INFO_SRC_8723A_2ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723A_2ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723A_2ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723A_2ANT_MAX
+}BT_INFO_SRC_8723A_2ANT,*PBT_INFO_SRC_8723A_2ANT;
+
+typedef enum _BT_8723A_2ANT_BT_STATUS{
+ BT_8723A_2ANT_BT_STATUS_IDLE = 0x0,
+ BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723A_2ANT_BT_STATUS_NON_IDLE = 0x2,
+ BT_8723A_2ANT_BT_STATUS_MAX
+}BT_8723A_2ANT_BT_STATUS,*PBT_8723A_2ANT_BT_STATUS;
+
+typedef enum _BT_8723A_2ANT_COEX_ALGO{
+ BT_8723A_2ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723A_2ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723A_2ANT_COEX_ALGO_HID = 0x2,
+ BT_8723A_2ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723A_2ANT_COEX_ALGO_PANEDR = 0x4,
+ BT_8723A_2ANT_COEX_ALGO_PANHS = 0x5,
+ BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP = 0x6,
+ BT_8723A_2ANT_COEX_ALGO_PANEDR_HID = 0x7,
+ BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x8,
+ BT_8723A_2ANT_COEX_ALGO_HID_A2DP = 0x9,
+ BT_8723A_2ANT_COEX_ALGO_MAX
+}BT_8723A_2ANT_COEX_ALGO,*PBT_8723A_2ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8723A_2ANT{
+ // fw mechanism
+ BOOLEAN bPreDecBtPwr;
+ BOOLEAN bCurDecBtPwr;
+ //BOOLEAN bPreBtLnaConstrain;
+ //BOOLEAN bCurBtLnaConstrain;
+ //u1Byte bPreBtPsdMode;
+ //u1Byte bCurBtPsdMode;
+ u1Byte preFwDacSwingLvl;
+ u1Byte curFwDacSwingLvl;
+ BOOLEAN bCurIgnoreWlanAct;
+ BOOLEAN bPreIgnoreWlanAct;
+ u1Byte prePsTdma;
+ u1Byte curPsTdma;
+ u1Byte psTdmaPara[5];
+ u1Byte psTdmaDuAdjType;
+ BOOLEAN bResetTdmaAdjust;
+ BOOLEAN bPrePsTdmaOn;
+ BOOLEAN bCurPsTdmaOn;
+ //BOOLEAN bPreBtAutoReport;
+ //BOOLEAN bCurBtAutoReport;
+
+ // sw mechanism
+ BOOLEAN bPreRfRxLpfShrink;
+ BOOLEAN bCurRfRxLpfShrink;
+ u4Byte btRf0x1eBackup;
+ BOOLEAN bPreLowPenaltyRa;
+ BOOLEAN bCurLowPenaltyRa;
+ BOOLEAN bPreDacSwingOn;
+ u4Byte preDacSwingLvl;
+ BOOLEAN bCurDacSwingOn;
+ u4Byte curDacSwingLvl;
+ BOOLEAN bPreAdcBackOff;
+ BOOLEAN bCurAdcBackOff;
+ BOOLEAN bPreAgcTableEn;
+ BOOLEAN bCurAgcTableEn;
+ u4Byte preVal0x6c0;
+ u4Byte curVal0x6c0;
+ u4Byte preVal0x6c8;
+ u4Byte curVal0x6c8;
+ u1Byte preVal0x6cc;
+ u1Byte curVal0x6cc;
+ BOOLEAN limited_dig;
+
+ // algorithm related
+ u1Byte preAlgorithm;
+ u1Byte curAlgorithm;
+ u1Byte btStatus;
+ u1Byte wifiChnlInfo[3];
+} COEX_DM_8723A_2ANT, *PCOEX_DM_8723A_2ANT;
+
+typedef struct _COEX_STA_8723A_2ANT{
+ u4Byte highPriorityTx;
+ u4Byte highPriorityRx;
+ u4Byte lowPriorityTx;
+ u4Byte lowPriorityRx;
+ u1Byte btRssi;
+ u1Byte preBtRssiState;
+ u1Byte preBtRssiState1;
+ u1Byte preWifiRssiState[4];
+ BOOLEAN bC2hBtInfoReqSent;
+ u1Byte btInfoC2h[BT_INFO_SRC_8723A_2ANT_MAX][10];
+ u4Byte btInfoC2hCnt[BT_INFO_SRC_8723A_2ANT_MAX];
+ BOOLEAN bC2hBtInquiryPage;
+ u1Byte btRetryCnt;
+ u1Byte btInfoExt;
+ BOOLEAN bHoldForStackOperation;
+ u1Byte bHoldPeriodCnt;
+ // this is for c2h hang work-around
+ u4Byte c2hHangDetectCnt;
+}COEX_STA_8723A_2ANT, *PCOEX_STA_8723A_2ANT;
+
+//===========================================
+// The following is interface which will notify coex module.
+//===========================================
+VOID
+EXhalbtc8723a2ant_InitHwConfig(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a2ant_InitCoexDm(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a2ant_IpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_LpsNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_ScanNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_ConnectNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_MediaStatusNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_SpecialPacketNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_HaltNotify(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a2ant_Periodical(
+ IN PBTC_COEXIST pBtCoexist
+ );
+VOID
+EXhalbtc8723a2ant_BtInfoNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN pu1Byte tmpBuf,
+ IN u1Byte length
+ );
+VOID
+EXhalbtc8723a2ant_StackOperationNotify(
+ IN PBTC_COEXIST pBtCoexist,
+ IN u1Byte type
+ );
+VOID
+EXhalbtc8723a2ant_DisplayCoexInfo(
+ IN PBTC_COEXIST pBtCoexist
+ );
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c
new file mode 100644
index 000000000000..9677943fc20d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c
@@ -0,0 +1,4104 @@
+/***************************************************************
+ * Description:
+ *
+ * This file is for RTL8723B Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ ***************************************************************/
+
+
+/***************************************************************
+ * include files
+ ***************************************************************/
+#include "halbt_precomp.h"
+#if 1
+/***************************************************************
+ * Global variables, these are static variables
+ ***************************************************************/
+static struct coex_dm_8723b_1ant glcoex_dm_8723b_1ant;
+static struct coex_dm_8723b_1ant *coex_dm = &glcoex_dm_8723b_1ant;
+static struct coex_sta_8723b_1ant glcoex_sta_8723b_1ant;
+static struct coex_sta_8723b_1ant *coex_sta = &glcoex_sta_8723b_1ant;
+
+const char *const GLBtInfoSrc8723b1Ant[]={
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+u32 glcoex_ver_date_8723b_1ant = 20130906;
+u32 glcoex_ver_8723b_1ant = 0x45;
+
+/***************************************************************
+ * local function proto type if needed
+ ***************************************************************/
+/***************************************************************
+ * local function start with halbtc8723b1ant_
+ ***************************************************************/
+u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+ s32 bt_rssi=0;
+ u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
+
+ bt_rssi = coex_sta->bt_rssi;
+
+ if (level_num == 2){
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
+ return coex_sta->pre_bt_rssi_state;
+ }
+
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (bt_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh1) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+ return bt_rssi_state;
+}
+
+u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+ u8 index, u8 level_num,
+ u8 rssi_thresh, u8 rssi_thresh1)
+{
+ s32 wifi_rssi=0;
+ u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
+ return coex_sta->pre_wifi_rssi_state[index];
+ }
+
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (wifi_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh1) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+ return wifi_rssi_state;
+}
+
+void halbtc8723b1ant_updatera_mask(struct btc_coexist *btcoexist,
+ bool force_exec, u32 dis_rate_mask)
+{
+ coex_dm->curra_mask = dis_rate_mask;
+
+ if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask))
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
+ &coex_dm->curra_mask);
+
+ coex_dm->prera_mask = coex_dm->curra_mask;
+}
+
+void halbtc8723b1ant_auto_rate_fallback_retry(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ bool wifi_under_bmode = false;
+
+ coex_dm->cur_arfr_type = type;
+
+ if (force_exec || (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) {
+ switch (coex_dm->cur_arfr_type) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_4byte(btcoexist, 0x430,
+ coex_dm->backup_arfr_cnt1);
+ btcoexist->btc_write_4byte(btcoexist, 0x434,
+ coex_dm->backup_arfr_cnt2);
+ break;
+ case 1:
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_BL_WIFI_UNDER_B_MODE,
+ &wifi_under_bmode);
+ if (wifi_under_bmode) {
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x430, 0x0);
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x434, 0x01010101);
+ } else {
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x430, 0x0);
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x434, 0x04030201);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_arfr_type = coex_dm->cur_arfr_type;
+}
+
+void halbtc8723b1ant_retry_limit(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ coex_dm->cur_retry_limit_type = type;
+
+ if (force_exec || (coex_dm->pre_retry_limit_type !=
+ coex_dm->cur_retry_limit_type)) {
+
+ switch (coex_dm->cur_retry_limit_type) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_2byte(btcoexist, 0x42a,
+ coex_dm->backup_retry_limit);
+ break;
+ case 1: /* retry limit=8 */
+ btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808);
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type;
+}
+
+void halbtc8723b1ant_ampdu_maxtime(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ coex_dm->cur_ampdu_time_type = type;
+
+ if (force_exec || (coex_dm->pre_ampdu_time_type !=
+ coex_dm->cur_ampdu_time_type)) {
+ switch (coex_dm->cur_ampdu_time_type) {
+ case 0: /* normal mode */
+ btcoexist->btc_write_1byte(btcoexist, 0x456,
+ coex_dm->backup_ampdu_max_time);
+ break;
+ case 1: /* AMPDU timw = 0x38 * 32us */
+ btcoexist->btc_write_1byte(btcoexist,
+ 0x456, 0x38);
+ break;
+ default:
+ break;
+ }
+ }
+
+ coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type;
+}
+
+void halbtc8723b1ant_limited_tx(struct btc_coexist *btcoexist,
+ bool force_exec, u8 ra_maskType, u8 arfr_type,
+ u8 retry_limit_type, u8 ampdu_time_type)
+{
+ switch (ra_maskType) {
+ case 0: /* normal mode */
+ halbtc8723b1ant_updatera_mask(btcoexist, force_exec, 0x0);
+ break;
+ case 1: /* disable cck 1/2 */
+ halbtc8723b1ant_updatera_mask(btcoexist, force_exec,
+ 0x00000003);
+ break;
+ /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/
+ case 2:
+ halbtc8723b1ant_updatera_mask(btcoexist, force_exec,
+ 0x0001f1f7);
+ break;
+ default:
+ break;
+ }
+
+ halbtc8723b1ant_auto_rate_fallback_retry(btcoexist, force_exec,
+ arfr_type);
+ halbtc8723b1ant_retry_limit(btcoexist, force_exec, retry_limit_type);
+ halbtc8723b1ant_ampdu_maxtime(btcoexist, force_exec, ampdu_time_type);
+}
+
+void halbtc8723b1ant_limited_rx(struct btc_coexist *btcoexist,
+ bool force_exec, bool rej_ap_agg_pkt,
+ bool b_bt_ctrl_agg_buf_size, u8 agg_buf_size)
+{
+ bool reject_rx_agg = rej_ap_agg_pkt;
+ bool bt_ctrl_rx_agg_size = b_bt_ctrl_agg_buf_size;
+ u8 rxAggSize = agg_buf_size;
+
+ /**********************************************
+ * Rx Aggregation related setting
+ **********************************************/
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+ &reject_rx_agg);
+ /* decide BT control aggregation buf size or not */
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
+ &bt_ctrl_rx_agg_size);
+ /* aggregation buf size, only work
+ *when BT control Rx aggregation size. */
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rxAggSize);
+ /* real update aggregation setting */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+}
+
+void halbtc8723b1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+ u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+ u32 reg_hp_tx = 0, reg_hp_rx = 0;
+ u32 reg_lp_tx = 0, reg_lp_rx = 0;
+
+ reg_hp_txrx = 0x770;
+ reg_lp_txrx = 0x774;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+ reg_hp_tx = u32tmp & MASKLWORD;
+ reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+ reg_lp_tx = u32tmp & MASKLWORD;
+ reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ coex_sta->high_priority_tx = reg_hp_tx;
+ coex_sta->high_priority_rx = reg_hp_rx;
+ coex_sta->low_priority_tx = reg_lp_tx;
+ coex_sta->low_priority_rx = reg_lp_rx;
+
+ /* reset counter */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
+{
+ u8 h2c_parameter[1] = {0};
+
+ coex_sta->c2h_bt_info_req_sent = true;
+
+ h2c_parameter[0] |= BIT0; /* trigger*/
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n",
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
+}
+
+bool halbtc8723b1ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+ static bool pre_wifi_busy = false;
+ static bool pre_under_4way = false, pre_bt_hs_on = false;
+ bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
+ if (wifi_connected) {
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ return true;
+ }
+ if (under_4way != pre_under_4way) {
+ pre_under_4way = under_4way;
+ return true;
+ }
+ if (bt_hs_on != pre_bt_hs_on) {
+ pre_bt_hs_on = bt_hs_on;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void halbtc8723b1ant_update_bt_link_info(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+ bt_link_info->sco_exist = coex_sta->sco_exist;
+ bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+ bt_link_info->pan_exist = coex_sta->pan_exist;
+ bt_link_info->hid_exist = coex_sta->hid_exist;
+
+ /* work around for HS mode. */
+ if (bt_hs_on) {
+ bt_link_info->pan_exist = true;
+ bt_link_info->bt_link_exist = true;
+ }
+
+ /* check if Sco only */
+ if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->sco_only = true;
+ else
+ bt_link_info->sco_only = false;
+
+ /* check if A2dp only */
+ if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->a2dp_only = true;
+ else
+ bt_link_info->a2dp_only = false;
+
+ /* check if Pan only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->pan_only = true;
+ else
+ bt_link_info->pan_only = false;
+
+ /* check if Hid only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && bt_link_info->hid_exist )
+ bt_link_info->hid_only = true;
+ else
+ bt_link_info->hid_only = false;
+}
+
+u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+ u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED;
+ u8 numOfDiffProfile = 0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ if (!bt_link_info->bt_link_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
+ return algorithm;
+ }
+
+ if (bt_link_info->sco_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->hid_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->pan_exist)
+ numOfDiffProfile++;
+ if (bt_link_info->a2dp_exist)
+ numOfDiffProfile++;
+
+ if (numOfDiffProfile == 1) {
+ if (bt_link_info->sco_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO only\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = HID only\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP only\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "PAN(HS) only\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "PAN(EDR) only\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 2) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + A2DP ==> SCO\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile "
+ "= SCO + PAN(HS)\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile "
+ "= SCO + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "HID + A2DP\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "HID + PAN(HS)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "HID + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + HID + A2DP ==> HID\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + HID + PAN(HS)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + HID + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + A2DP + PAN(HS)\n");
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + "
+ "A2DP + PAN(EDR) ==> HID\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "HID + A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "HID + A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile >= 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Error!!! "
+ "BT Profile = SCO + "
+ "HID + A2DP + PAN(HS)\n");
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT Profile = "
+ "SCO + HID + A2DP + PAN(EDR)"
+ "==>PAN(EDR)+HID\n");
+ algorithm =
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+bool halbtc8723b1ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
+{
+ bool ret = false;
+ bool bt_hs_on = false, wifi_connected = false;
+ s32 bt_hs_rssi = 0;
+ u8 bt_rssi_state;
+
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+ return false;
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected))
+ return false;
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+ return false;
+
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 35, 0);
+
+ if (wifi_connected) {
+ if (bt_hs_on) {
+ if (bt_hs_rssi > 37)
+ ret = true;
+ } else {
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+void halbtc8723b1ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
+ u8 dac_swing_lvl)
+{
+ u8 h2c_parameter[1] = {0};
+
+ /* There are several type of dacswing
+ * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
+ h2c_parameter[0] = dac_swing_lvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool dec_bt_pwr)
+{
+ u8 h2c_parameter[1] = {0};
+
+ h2c_parameter[0] = 0;
+
+ if (dec_bt_pwr)
+ h2c_parameter[0] |= BIT1;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+ (dec_bt_pwr? "Yes!!":"No!!"),h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool force_exec, bool dec_bt_pwr)
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power = %s\n",
+ (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF"));
+ coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+
+ if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+ return;
+ }
+ halbtc8723b1ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+ coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void halbtc8723b1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
+ bool enable_auto_report)
+{
+ u8 h2c_parameter[1] = {0};
+
+ h2c_parameter[0] = 0;
+
+ if (enable_auto_report)
+ h2c_parameter[0] |= BIT0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+ (enable_auto_report? "Enabled!!":"Disabled!!"),
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_bt_auto_report(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable_auto_report)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec? "force to":""),
+ ((enable_auto_report)? "Enabled":"Disabled"));
+ coex_dm->cur_bt_auto_report = enable_auto_report;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreBtAutoReport=%d, "
+ "bCurBtAutoReport=%d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
+
+ if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+ return;
+ }
+ halbtc8723b1ant_set_bt_auto_report(btcoexist,
+ coex_dm->cur_bt_auto_report);
+
+ coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void halbtc8723b1ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+ bool force_exec, u8 fw_dac_swing_lvl)
+{
+ return;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec? "force to":""), fw_dac_swing_lvl);
+ coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preFwDacSwingLvl=%d, "
+ "curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ if (coex_dm->pre_fw_dac_swing_lvl ==
+ coex_dm->cur_fw_dac_swing_lvl)
+ return;
+ }
+
+ halbtc8723b1ant_set_fw_dac_swing_level(btcoexist,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void halbtc8723b1ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+ bool rx_rf_shrink_on)
+{
+ if (rx_rf_shrink_on) {
+ /*Shrink RF Rx LPF corner */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff, 0xffff7);
+ } else {
+ /*Resume RF Rx LPF corner
+ * After initialized, we can use coex_dm->btRf0x1eBackup */
+ if (btcoexist->initilized) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
+ 0x1e, 0xfffff,
+ coex_dm->bt_rf0x1e_backup);
+ }
+ }
+}
+
+void halbtc8723b1ant_rf_shrink(struct btc_coexist *btcoexist,
+ bool force_exec, bool rx_rf_shrink_on)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec? "force to":""),
+ ((rx_rf_shrink_on)? "ON":"OFF"));
+ coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreRfRxLpfShrink=%d, "
+ "bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ if (coex_dm->pre_rf_rx_lpf_shrink ==
+ coex_dm->cur_rf_rx_lpf_shrink)
+ return;
+ }
+ halbtc8723b1ant_set_sw_rf_rx_lpf_corner(btcoexist,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void halbtc8723b1ant_set_sw_penalty_tx_rate_adaptive(
+ struct btc_coexist *btcoexist,
+ bool low_penalty_ra)
+{
+ u8 h2c_parameter[6] = {0};
+
+ h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty */
+
+ if (low_penalty_ra) {
+ h2c_parameter[1] |= BIT0;
+ /*normal rate except MCS7/6/5, OFDM54/48/36 */
+ h2c_parameter[2] = 0x00;
+ h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54 */
+ h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48 */
+ h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+void halbtc8723b1ant_low_penalty_ra(struct btc_coexist *btcoexist,
+ bool force_exec, bool low_penalty_ra)
+{
+ coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+ if (!force_exec) {
+ if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
+ return;
+ }
+ halbtc8723b1ant_set_sw_penalty_tx_rate_adaptive(btcoexist,
+ coex_dm->cur_low_penalty_ra);
+
+ coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void halbtc8723b1ant_set_dac_swing_reg(struct btc_coexist *btcoexist, u32 level)
+{
+ u8 val = (u8) level;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+void halbtc8723b1ant_set_sw_full_time_dac_swing(struct btc_coexist *btcoexist,
+ bool sw_dac_swing_on,
+ u32 sw_dac_swing_lvl)
+{
+ if (sw_dac_swing_on)
+ halbtc8723b1ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
+ else
+ halbtc8723b1ant_set_dac_swing_reg(btcoexist, 0x18);
+}
+
+
+void halbtc8723b1ant_dac_swing(struct btc_coexist *btcoexist, bool force_exec,
+ bool dac_swing_on, u32 dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec ? "force to" : ""), (dac_swing_on ? "ON" : "OFF"),
+ dac_swing_lvl);
+
+ coex_dm->cur_dac_swing_on = dac_swing_on;
+ coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, "
+ "bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
+
+ if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+ (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+ return;
+ }
+ mdelay(30);
+ halbtc8723b1ant_set_sw_full_time_dac_swing(btcoexist, dac_swing_on,
+ dac_swing_lvl);
+
+ coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+ coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void halbtc8723b1ant_set_adc_backoff(struct btc_coexist *btcoexist,
+ bool adc_backoff)
+{
+ if (adc_backoff) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level On!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level Off!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
+ }
+}
+
+void halbtc8723b1ant_adc_backoff(struct btc_coexist *btcoexist,
+ bool force_exec, bool adc_backoff)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn AdcBackOff = %s\n",
+ (force_exec ? "force to" : ""), (adc_backoff ? "ON" : "OFF"));
+ coex_dm->cur_adc_backoff = adc_backoff;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+ coex_dm->pre_adc_backoff, coex_dm->cur_adc_backoff);
+
+ if(coex_dm->pre_adc_backoff == coex_dm->cur_adc_backoff)
+ return;
+ }
+ halbtc8723b1ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_backoff);
+
+ coex_dm->pre_adc_backoff =
+ coex_dm->cur_adc_backoff;
+}
+
+void halbtc8723b1ant_set_agc_table(struct btc_coexist *btcoexist,
+ bool adc_table_en)
+{
+ u8 rssi_adjust_val = 0;
+
+ btcoexist->btc_set_rf_reg(btcoexist,
+ BTC_RF_A, 0xef, 0xfffff, 0x02000);
+ if (adc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x3fa58);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x37a58);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x2fa58);
+ rssi_adjust_val = 8;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x39258);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x31258);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x29258);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+ /* set rssi_adjust_val for wifi module. */
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+ &rssi_adjust_val);
+}
+
+
+void halbtc8723b1ant_agc_table(struct btc_coexist *btcoexist,
+ bool force_exec, bool adc_table_en)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ (adc_table_en ? "Enable" : "Disable"));
+ coex_dm->cur_agc_table_en = adc_table_en;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en,
+ coex_dm->cur_agc_table_en);
+
+ if(coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+ return;
+ }
+ halbtc8723b1ant_set_agc_table(btcoexist, adc_table_en);
+
+ coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist,
+ bool force_exec, u32 val0x6c0,
+ u32 val0x6c4, u32 val0x6c8,
+ u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
+ " 0x6c4=0x%x, 0x6cc=0x%x\n", (force_exec ? "force to" : ""),
+ val0x6c0, val0x6c4, val0x6cc);
+ coex_dm->cur_val0x6c0 = val0x6c0;
+ coex_dm->cur_val0x6c4 = val0x6c4;
+ coex_dm->cur_val0x6c8 = val0x6c8;
+ coex_dm->cur_val0x6cc = val0x6cc;
+
+ if (!force_exec) {
+ if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+ (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+ (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+ (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+ return;
+ }
+ halbtc8723b1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
+
+ coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+ coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+ coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+ coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ switch (type) {
+ case 0:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x55555555, 0xffffff, 0x3);
+ break;
+ case 1:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 2:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 3:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0xaaaaaaaa, 0xffffff, 0x3);
+ break;
+ case 4:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5aaa5aaa, 0xffffff, 0x3);
+ break;
+ case 5:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0xaaaa5a5a, 0xffffff, 0x3);
+ break;
+ case 6:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0xaaaa5a5a, 0xffffff, 0x3);
+ break;
+ case 7:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5afa5afa,
+ 0x5afa5afa, 0xffffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
+ bool enable)
+{
+ u8 h2c_parameter[1] = {0};
+
+ if (enable)
+ h2c_parameter[0] |= BIT0; /* function enable */
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act,"
+ " FW write 0x63=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ coex_dm->cur_ignore_wlan_act = enable;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d, "
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
+
+ if (coex_dm->pre_ignore_wlan_act ==
+ coex_dm->cur_ignore_wlan_act)
+ return;
+ }
+ halbtc8723b1ant_SetFwIgnoreWlanAct(btcoexist, enable);
+
+ coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
+ u8 byte1, u8 byte2, u8 byte3,
+ u8 byte4, u8 byte5)
+{
+ u8 h2c_parameter[5] = {0};
+ u8 real_byte1 = byte1, real_byte5 = byte5;
+ bool ap_enable = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+
+ if (ap_enable) {
+ if ((byte1 & BIT4) && !(byte1 & BIT5)) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], FW for 1Ant AP mode\n");
+ real_byte1 &= ~BIT4;
+ real_byte1 |= BIT5;
+
+ real_byte5 |= BIT5;
+ real_byte5 &= ~BIT6;
+ }
+ }
+
+ h2c_parameter[0] = real_byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = real_byte5;
+
+ coex_dm->ps_tdma_para[0] = real_byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = real_byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void halbtc8723b1ant_SetLpsRpwm(struct btc_coexist *btcoexist,
+ u8 lps_val, u8 rpwm_val)
+{
+ u8 lps = lps_val;
+ u8 rpwm = rpwm_val;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_LPS, &lps);
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_RPWM, &rpwm);
+}
+
+void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist, bool force_exec,
+ u8 lps_val, u8 rpwm_val)
+{
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s set lps/rpwm=0x%x/0x%x \n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ coex_dm->cur_lps = lps_val;
+ coex_dm->cur_rpwm = rpwm_val;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RxBeaconMode=0x%x , LPS-RPWM=0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
+
+ if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
+ (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RPWM_Last=0x%x"
+ " , LPS-RPWM_Now=0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+
+ return;
+ }
+ }
+ halbtc8723b1ant_SetLpsRpwm(btcoexist, lps_val, rpwm_val);
+
+ coex_dm->pre_lps = coex_dm->cur_lps;
+ coex_dm->pre_rpwm = coex_dm->cur_rpwm;
+}
+
+void halbtc8723b1ant_sw_mechanism1(struct btc_coexist *btcoexist,
+ bool shrink_rx_lpf, bool low_penalty_ra,
+ bool limited_dig, bool bt_lna_constrain)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], SM1[ShRf/ LpRA/ LimDig/ btLna] = %d %d %d %d\n",
+ shrink_rx_lpf, low_penalty_ra, limited_dig, bt_lna_constrain);
+
+ halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
+}
+
+void halbtc8723b1ant_sw_mechanism2(struct btc_coexist *btcoexist,
+ bool agc_table_shift, bool adc_backoff,
+ bool sw_dac_swing, u32 dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], SM2[AgcT/ AdcB/ SwDacSwing(lvl)] = %d %d %d\n",
+ agc_table_shift, adc_backoff, sw_dac_swing);
+}
+
+void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
+ u8 ant_pos_type, bool init_hw_cfg,
+ bool wifi_off)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ u32 fw_ver = 0, u32tmp = 0;
+ bool pg_ext_switch = false;
+ bool use_ext_switch = false;
+ u8 h2c_parameter[2] = {0};
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_EXT_SWITCH, &pg_ext_switch);
+ /* [31:16]=fw ver, [15:0]=fw sub ver */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+
+
+ if ((fw_ver < 0xc0000) || pg_ext_switch)
+ use_ext_switch = true;
+
+ if (init_hw_cfg){
+ /*BT select s0/s1 is controlled by WiFi */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
+
+ /*Force GNT_BT to Normal */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+ } else if (wifi_off) {
+ /*Force GNT_BT to High */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3);
+ /*BT select s0/s1 is controlled by BT */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0);
+
+ /* 0x4c[24:23]=00, Set Antenna control by BT_RFE_CTRL
+ * BT Vendor 0xac=0xf002 */
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp &= ~BIT23;
+ u32tmp &= ~BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+ }
+
+ if (use_ext_switch) {
+ if (init_hw_cfg) {
+ /* 0x4c[23]=0, 0x4c[24]=1 Antenna control by WL/BT */
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp &= ~BIT23;
+ u32tmp |= BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT) {
+ /* Main Ant to BT for IPS case 0x4c[23]=1 */
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x64, 0x1,
+ 0x1);
+
+ /*tell firmware "no antenna inverse"*/
+ h2c_parameter[0] = 0;
+ h2c_parameter[1] = 1; /*ext switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ } else {
+ /*Aux Ant to BT for IPS case 0x4c[23]=1 */
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x64, 0x1,
+ 0x0);
+
+ /*tell firmware "antenna inverse"*/
+ h2c_parameter[0] = 1;
+ h2c_parameter[1] = 1; /*ext switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ }
+ }
+
+ /* fixed internal switch first*/
+ /* fixed internal switch S1->WiFi, S0->BT*/
+ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+ else/* fixed internal switch S0->WiFi, S1->BT*/
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+
+ /* ext switch setting */
+ switch (ant_pos_type) {
+ case BTC_ANT_PATH_WIFI:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x1);
+ else
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x2);
+ break;
+ case BTC_ANT_PATH_BT:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x2);
+ else
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x1);
+ break;
+ default:
+ case BTC_ANT_PATH_PTA:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x1);
+ else
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3,
+ 0x2);
+ break;
+ }
+
+ } else {
+ if (init_hw_cfg) {
+ /* 0x4c[23]=1, 0x4c[24]=0 Antenna control by 0x64*/
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp |= BIT23;
+ u32tmp &= ~BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT) {
+ /*Main Ant to WiFi for IPS case 0x4c[23]=1*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x64, 0x1,
+ 0x0);
+
+ /*tell firmware "no antenna inverse"*/
+ h2c_parameter[0] = 0;
+ h2c_parameter[1] = 0; /*internal switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ } else {
+ /*Aux Ant to BT for IPS case 0x4c[23]=1*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x64, 0x1,
+ 0x1);
+
+ /*tell firmware "antenna inverse"*/
+ h2c_parameter[0] = 1;
+ h2c_parameter[1] = 0; /*internal switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ }
+ }
+
+ /* fixed external switch first*/
+ /*Main->WiFi, Aux->BT*/
+ if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+ 0x3, 0x1);
+ else/*Main->BT, Aux->WiFi */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+ 0x3, 0x2);
+
+ /* internal switch setting*/
+ switch (ant_pos_type) {
+ case BTC_ANT_PATH_WIFI:
+ if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x0);
+ else
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x280);
+ break;
+ case BTC_ANT_PATH_BT:
+ if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x280);
+ else
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x0);
+ break;
+ default:
+ case BTC_ANT_PATH_PTA:
+ if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x200);
+ else
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ 0x80);
+ break;
+ }
+ }
+}
+
+void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
+ bool turn_on, u8 type)
+{
+ bool wifi_busy = false;
+ u8 rssi_adjust_val = 0;
+
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ if (!force_exec) {
+ if (coex_dm->cur_ps_tdma_on)
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ******** TDMA(on, %d) *********\n",
+ coex_dm->cur_ps_tdma);
+ else
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ******** TDMA(off, %d) ********\n",
+ coex_dm->cur_ps_tdma);
+
+
+ if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+ (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+ return;
+ }
+ if (turn_on) {
+ switch (type) {
+ default:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1a,
+ 0x1a, 0x0, 0x50);
+ break;
+ case 1:
+ if (wifi_busy)
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x3a, 0x03,
+ 0x10, 0x50);
+ else
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist,0x51,
+ 0x3a, 0x03,
+ 0x10, 0x51);
+
+ rssi_adjust_val = 11;
+ break;
+ case 2:
+ if (wifi_busy)
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x2b, 0x03,
+ 0x10, 0x50);
+ else
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x2b, 0x03,
+ 0x10, 0x51);
+ rssi_adjust_val = 14;
+ break;
+ case 3:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1d,
+ 0x1d, 0x0, 0x52);
+ break;
+ case 4:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
+ 0x3, 0x14, 0x0);
+ rssi_adjust_val = 17;
+ break;
+ case 5:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
+ 0x3, 0x11, 0x10);
+ break;
+ case 6:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x20,
+ 0x3, 0x11, 0x13);
+ break;
+ case 7:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xc,
+ 0x5, 0x0, 0x0);
+ break;
+ case 8:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
+ 0x3, 0x10, 0x0);
+ break;
+ case 9:
+ if(wifi_busy)
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x21, 0x3,
+ 0x10, 0x50);
+ else
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x21, 0x3,
+ 0x10, 0x50);
+ rssi_adjust_val = 18;
+ break;
+ case 10:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+ 0xa, 0x0, 0x40);
+ break;
+ case 11:
+ if (wifi_busy)
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x15, 0x03,
+ 0x10, 0x50);
+ else
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51,
+ 0x15, 0x03,
+ 0x10, 0x50);
+ rssi_adjust_val = 20;
+ break;
+ case 12:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x0a,
+ 0x0a, 0x0, 0x50);
+ break;
+ case 13:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x15,
+ 0x15, 0x0, 0x50);
+ break;
+ case 14:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x21,
+ 0x3, 0x10, 0x52);
+ break;
+ case 15:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+ 0x3, 0x8, 0x0);
+ break;
+ case 16:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
+ 0x3, 0x10, 0x0);
+ rssi_adjust_val = 18;
+ break;
+ case 18:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
+ 0x3, 0x10, 0x0);
+ rssi_adjust_val = 14;
+ break;
+ case 20:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x35,
+ 0x03, 0x11, 0x10);
+ break;
+ case 21:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
+ 0x03, 0x11, 0x10);
+ break;
+ case 22:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25,
+ 0x03, 0x11, 0x10);
+ break;
+ case 23:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 24:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+ 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 25:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 26:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0x3, 0x31, 0x18);
+ rssi_adjust_val = 22;
+ break;
+ case 27:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x3, 0x31, 0x98);
+ rssi_adjust_val = 22;
+ break;
+ case 28:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x69, 0x25,
+ 0x3, 0x31, 0x0);
+ break;
+ case 29:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xab, 0x1a,
+ 0x1a, 0x1, 0x10);
+ break;
+ case 30:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14,
+ 0x3, 0x10, 0x50);
+ break;
+ case 31:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1a,
+ 0x1a, 0, 0x58);
+ break;
+ case 32:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0xa,
+ 0x3, 0x10, 0x0);
+ break;
+ case 33:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x25,
+ 0x3, 0x30, 0x90);
+ break;
+ case 34:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x53, 0x1a,
+ 0x1a, 0x0, 0x10);
+ break;
+ case 35:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x63, 0x1a,
+ 0x1a, 0x0, 0x10);
+ break;
+ case 36:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12,
+ 0x3, 0x14, 0x50);
+ break;
+ /* SoftAP only with no sta associated,BT disable ,
+ * TDMA mode for power saving
+ * here softap mode screen off will cost 70-80mA for phone */
+ case 40:
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x23, 0x18,
+ 0x00, 0x10, 0x24);
+ break;
+ }
+ } else {
+ switch (type) {
+ case 8: /*PTA Control */
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0,
+ 0x0, 0x0, 0x0);
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA,
+ false, false);
+ break;
+ case 0:
+ default: /*Software control, Antenna at BT side */
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
+ 0x0, 0x0, 0x0);
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
+ false, false);
+ break;
+ case 9: /*Software control, Antenna at WiFi side */
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
+ 0x0, 0x0, 0x0);
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_WIFI,
+ false, false);
+ break;
+ }
+ }
+ rssi_adjust_val = 0;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
+ &rssi_adjust_val);
+
+ /* update pre state */
+ coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+ coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void halbtc8723b1ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+ /* fw all off */
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ /* sw all off */
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+
+ /* hw all off */
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
+{
+ bool commom = false, wifi_connected = false;
+ bool wifi_busy = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ if (!wifi_connected &&
+ BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + "
+ "BT non connected-idle!!\n");
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ commom = true;
+ } else if (wifi_connected &&
+ (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + "
+ "BT non connected-idle!!\n");
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ commom = true;
+ } else if (!wifi_connected &&
+ (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + "
+ "BT connected-idle!!\n");
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ commom = true;
+ } else if (wifi_connected &&
+ (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ commom = true;
+ } else if (!wifi_connected &&
+ (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
+ coex_dm->bt_status)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ commom = true;
+ } else {
+ if (wifi_busy)
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy"
+ " + BT Busy!!\n");
+ else
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle"
+ " + BT Busy!!\n");
+
+ commom = false;
+ }
+
+ return commom;
+}
+
+
+void halbtc8723b1ant_tdma_duration_adjust_for_acl(struct btc_coexist *btcoexist,
+ u8 wifi_status)
+{
+ static s32 up, dn, m, n, wait_count;
+ /* 0: no change, +1: increase WiFi duration,
+ * -1: decrease WiFi duration */
+ s32 result;
+ u8 retry_count = 0, bt_info_ext;
+ static bool pre_wifi_busy = false;
+ bool wifi_busy = false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjustForAcl()\n");
+
+ if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
+ wifi_busy = true;
+ else
+ wifi_busy = false;
+
+ if ((BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
+ wifi_status) ||
+ (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifi_status) ||
+ (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifi_status)) {
+ if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
+ coex_dm->cur_ps_tdma != 3 && coex_dm->cur_ps_tdma != 9) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ wait_count = 0;
+ }
+ return;
+ }
+
+ if (!coex_dm->auto_tdma_adjust) {
+ coex_dm->auto_tdma_adjust = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
+
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ wait_count = 0;
+ } else {
+ /*acquire the BT TRx retry count from BT_Info byte2 */
+ retry_count = coex_sta->bt_retry_cnt;
+ bt_info_ext = coex_sta->bt_info_ext;
+ result = 0;
+ wait_count++;
+ /* no retry in the last 2-second duration */
+ if (retry_count == 0) {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) {
+ wait_count = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi "
+ "duration!!\n");
+ }
+ } else if (retry_count <= 3) {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) {
+ if (wait_count <= 2)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration"
+ " for retryCounter<3!!\n");
+ }
+ } else {
+ if (wait_count == 1)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration"
+ " for retryCounter>3!!\n");
+ }
+
+ if (result == -1) {
+ if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+ ((coex_dm->cur_ps_tdma == 1) ||
+ (coex_dm->cur_ps_tdma == 2))) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ }
+ } else if(result == 1) {
+ if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
+ ((coex_dm->cur_ps_tdma == 1) ||
+ (coex_dm->cur_ps_tdma == 2))) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type = 1;
+ }
+ } else { /*no change */
+ /*if busy / idle change */
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC,
+ true,
+ coex_dm->cur_ps_tdma);
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex],********* TDMA(on, %d) ********\n",
+ coex_dm->cur_ps_tdma);
+ }
+
+ if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
+ coex_dm->cur_ps_tdma != 9 && coex_dm->cur_ps_tdma != 11) {
+ /* recover to previous adjust type */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ coex_dm->ps_tdma_du_adj_type);
+ }
+ }
+}
+
+u8 halbtc8723b1ant_ps_tdma_type_by_wifi_rssi(s32 wifi_rssi, s32 pre_wifi_rssi,
+ u8 wifi_rssi_thresh)
+{
+ u8 ps_tdma_type=0;
+
+ if (wifi_rssi > pre_wifi_rssi) {
+ if (wifi_rssi > (wifi_rssi_thresh + 5))
+ ps_tdma_type = 26;
+ else
+ ps_tdma_type = 25;
+ } else {
+ if (wifi_rssi > wifi_rssi_thresh)
+ ps_tdma_type = 26;
+ else
+ ps_tdma_type = 25;
+ }
+
+ return ps_tdma_type;
+}
+
+void halbtc8723b1ant_PsTdmaCheckForPowerSaveState(struct btc_coexist *btcoexist,
+ bool new_ps_state)
+{
+ u8 lps_mode = 0x0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
+
+ if (lps_mode) { /* already under LPS state */
+ if (new_ps_state) {
+ /* keep state under LPS, do nothing. */
+ } else {
+ /* will leave LPS state, turn off psTdma first */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ false, 0);
+ }
+ } else { /* NO PS state */
+ if (new_ps_state) {
+ /* will enter LPS state, turn off psTdma first */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ false, 0);
+ } else {
+ /* keep state under NO PS state, do nothing. */
+ }
+ }
+}
+
+void halbtc8723b1ant_power_save_state(struct btc_coexist *btcoexist,
+ u8 ps_type, u8 lps_val,
+ u8 rpwm_val)
+{
+ bool low_pwr_disable = false;
+
+ switch (ps_type) {
+ case BTC_PS_WIFI_NATIVE:
+ /* recover to original 32k low power setting */
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ break;
+ case BTC_PS_LPS_ON:
+ halbtc8723b1ant_PsTdmaCheckForPowerSaveState(btcoexist, true);
+ halbtc8723b1ant_LpsRpwm(btcoexist, NORMAL_EXEC, lps_val,
+ rpwm_val);
+ /* when coex force to enter LPS, do not enter 32k low power. */
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ /* power save must executed before psTdma. */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ break;
+ case BTC_PS_LPS_OFF:
+ halbtc8723b1ant_PsTdmaCheckForPowerSaveState(btcoexist, false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+void halbtc8723b1ant_action_wifi_only(struct btc_coexist *btcoexist)
+{
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
+}
+
+void halbtc8723b1ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist)
+{
+ static bool pre_bt_disabled = false;
+ static u32 bt_disable_cnt = 0;
+ bool bt_active = true, bt_disabled = false;
+
+ /* This function check if bt is disabled */
+
+ if (coex_sta->high_priority_tx == 0 &&
+ coex_sta->high_priority_rx == 0 &&
+ coex_sta->low_priority_tx == 0 &&
+ coex_sta->low_priority_rx == 0)
+ bt_active = false;
+
+ if (coex_sta->high_priority_tx == 0xffff &&
+ coex_sta->high_priority_rx == 0xffff &&
+ coex_sta->low_priority_tx == 0xffff &&
+ coex_sta->low_priority_rx == 0xffff)
+ bt_active = false;
+
+ if (bt_active) {
+ bt_disable_cnt = 0;
+ bt_disabled = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
+ } else {
+ bt_disable_cnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters=0, %d times!!\n",
+ bt_disable_cnt);
+ if (bt_disable_cnt >= 2) {
+ bt_disabled = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
+ halbtc8723b1ant_action_wifi_only(btcoexist);
+ }
+ }
+ if (pre_bt_disabled != bt_disabled) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
+ pre_bt_disabled = bt_disabled;
+ if (!bt_disabled) {
+ } else {
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
+ NULL);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS,
+ NULL);
+ }
+ }
+}
+
+/***************************************************
+ *
+ * Software Coex Mechanism start
+ *
+ ***************************************************/
+/* SCO only or SCO+PAN(HS) */
+void halbtc8723b1ant_action_sco(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state =
+ halbtc8723b1ant_wifi_rssi_state(btcoexist, 0, 2, 25, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+
+void halbtc8723b1ant_action_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
+void halbtc8723b1ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u32 wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b1ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+
+/* PAN(HS) only */
+void halbtc8723b1ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* fw mechanism */
+ if((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* fw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*PAN(EDR)+A2DP */
+void halbtc8723b1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u32 wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b1ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* HID+A2DP+PAN(EDR) */
+void halbtc8723b1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u32 wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b1ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u32 wifi_bw;
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 0, 2, 25, 0);
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0);
+
+ if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ else
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ /* sw mechanism */
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ } else {
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*****************************************************
+ *
+ * Non-Software Coex Mechanism start
+ *
+ *****************************************************/
+void halbtc8723b1ant_action_hs(struct btc_coexist *btcoexist)
+{
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2);
+}
+
+void halbtc8723b1ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool wifi_connected = false, ap_enable = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ if (!wifi_connected) {
+ halbtc8723b1ant_power_save_state(btcoexist,
+ BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ } else if (bt_link_info->sco_exist || bt_link_info->hid_only) {
+ /* SCO/HID-only busy */
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ } else {
+ if (ap_enable)
+ halbtc8723b1ant_power_save_state(btcoexist,
+ BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ else
+ halbtc8723b1ant_power_save_state(btcoexist,
+ BTC_PS_LPS_ON,
+ 0x50, 0x4);
+
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 30);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ }
+}
+
+void halbtc8723b1ant_action_bt_sco_hid_only_busy(struct btc_coexist * btcoexist,
+ u8 wifi_status)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ /* tdma and coex table */
+
+ if (bt_link_info->sco_exist) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ } else { /* HID */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
+ }
+}
+
+void halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
+ struct btc_coexist *btcoexist,
+ u8 wifi_status)
+{
+ u8 bt_rssi_state;
+
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 28, 0);
+
+ if (bt_link_info->hid_only) { /*HID */
+ halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+ wifi_status);
+ coex_dm->auto_tdma_adjust = false;
+ return;
+ } else if (bt_link_info->a2dp_only) { /*A2DP */
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_tdma_duration_adjust_for_acl(btcoexist,
+ wifi_status);
+ } else { /*for low BT RSSI */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->auto_tdma_adjust = false;
+ }
+
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) { /*HID+A2DP */
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->auto_tdma_adjust = false;
+ } else { /*for low BT RSSI*/
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->auto_tdma_adjust = false;
+ }
+
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
+ /*PAN(OPP,FTP), HID+PAN(OPP,FTP) */
+ } else if (bt_link_info->pan_only ||
+ (bt_link_info->hid_exist && bt_link_info->pan_exist)) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
+ coex_dm->auto_tdma_adjust = false;
+ /*A2DP+PAN(OPP,FTP), HID+A2DP+PAN(OPP,FTP)*/
+ } else if ((bt_link_info->a2dp_exist && bt_link_info->pan_exist) ||
+ (bt_link_info->hid_exist && bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist)) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ coex_dm->auto_tdma_adjust = false;
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ coex_dm->auto_tdma_adjust = false;
+ }
+}
+
+void halbtc8723b1ant_action_wifi_not_connected(struct btc_coexist *btcoexist)
+{
+ /* power save state */
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+
+ /* tdma and coex table */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+void halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(
+ struct btc_coexist *btcoexist)
+{
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+}
+
+void halbtc8723b1ant_ActionWifiConnectedScan(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+
+ /* tdma and coex table */
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+ if (bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 22);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ }
+ } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+ coex_dm->bt_status)) {
+ halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ }
+}
+
+void halbtc8723b1ant_action_wifi_connected_special_packet(
+ struct btc_coexist *btcoexist)
+{
+ bool hs_connecting = false;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+
+ /* tdma and coex table */
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+ if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 22);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 20);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ }
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ }
+}
+
+void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
+{
+ bool wifi_busy = false;
+ bool scan = false, link = false, roam = false;
+ bool under_4way = false, ap_enable = false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect()===>\n");
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+ if (under_4way) {
+ halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), "
+ "return for wifi is under 4way<===\n");
+ return;
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if (scan || link || roam) {
+ halbtc8723b1ant_ActionWifiConnectedScan(btcoexist);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), "
+ "return for wifi is under scan<===\n");
+ return;
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+ /* power save state */
+ if (!ap_enable &&
+ BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status &&
+ !btcoexist->bt_link_info.hid_only)
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_LPS_ON,
+ 0x50, 0x4);
+ else
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+
+ /* tdma and coex table */
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ if (!wifi_busy) {
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+ halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+ } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY ==
+ coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+ coex_dm->bt_status)) {
+ halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ false, 8);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 2);
+ }
+ } else {
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+ halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+ } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY ==
+ coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+ coex_dm->bt_status)) {
+ halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 2);
+ }
+ }
+}
+
+void halbtc8723b1ant_run_sw_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+ u8 algorithm = 0;
+
+ algorithm = halbtc8723b1ant_action_algorithm(btcoexist);
+ coex_dm->cur_algorithm = algorithm;
+
+ if (halbtc8723b1ant_is_common_action(btcoexist)) {
+ } else {
+ switch (coex_dm->cur_algorithm) {
+ case BT_8723B_1ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = SCO.\n");
+ halbtc8723b1ant_action_sco(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID.\n");
+ halbtc8723b1ant_action_hid(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = A2DP.\n");
+ halbtc8723b1ant_action_a2dp(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = "
+ "A2DP+PAN(HS).\n");
+ halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN(EDR).\n");
+ halbtc8723b1ant_action_pan_edr(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = HS mode.\n");
+ halbtc8723b1ant_action_pan_hs(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN+A2DP.\n");
+ halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = "
+ "PAN(EDR)+HID.\n");
+ halbtc8723b1ant_action_pan_edr_hid(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = "
+ "HID+A2DP+PAN.\n");
+ halbtc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID+A2DP.\n");
+ halbtc8723b1ant_action_hid_a2dp(btcoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action algorithm = "
+ "coexist All Off!!\n");
+ break;
+ }
+ coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+ }
+}
+
+void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool wifi_connected = false, bt_hs_on = false;
+ bool limited_dig = false, bIncreaseScanDevNum = false;
+ bool b_bt_ctrl_agg_buf_size = false;
+ u8 agg_buf_size = 5;
+ u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), "
+ "return for Manual CTRL <===\n");
+ return;
+ }
+
+ if (btcoexist->stop_coex_dm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), "
+ "return for Stop Coex DM <===\n");
+ return;
+ }
+
+ if (coex_sta->under_ips) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
+ return;
+ }
+
+ if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+ limited_dig = true;
+ bIncreaseScanDevNum = true;
+ }
+
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_INC_SCAN_DEV_NUM,
+ &bIncreaseScanDevNum);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) {
+ halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ } else {
+ if (wifi_connected) {
+ wifi_rssi_state =
+ halbtc8723b1ant_wifi_rssi_state(btcoexist,
+ 1, 2, 30, 0);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b1ant_limited_tx(btcoexist,
+ NORMAL_EXEC,
+ 1, 1, 1, 1);
+ } else {
+ halbtc8723b1ant_limited_tx(btcoexist,
+ NORMAL_EXEC,
+ 1, 1, 1, 1);
+ }
+ } else {
+ halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC,
+ 0, 0, 0, 0);
+ }
+ }
+
+ if (bt_link_info->sco_exist) {
+ b_bt_ctrl_agg_buf_size = true;
+ agg_buf_size = 0x3;
+ } else if (bt_link_info->hid_exist) {
+ b_bt_ctrl_agg_buf_size = true;
+ agg_buf_size = 0x5;
+ } else if (bt_link_info->a2dp_exist || bt_link_info->pan_exist) {
+ b_bt_ctrl_agg_buf_size = true;
+ agg_buf_size = 0x8;
+ }
+ halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+ b_bt_ctrl_agg_buf_size, agg_buf_size);
+
+ halbtc8723b1ant_run_sw_coexist_mechanism(btcoexist);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ if (coex_sta->c2h_bt_inquiry_page) {
+ halbtc8723b1ant_action_bt_inquiry(btcoexist);
+ return;
+ } else if (bt_hs_on) {
+ halbtc8723b1ant_action_hs(btcoexist);
+ return;
+ }
+
+
+ if (!wifi_connected) {
+ bool scan = false, link = false, roam = false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], wifi is non connected-idle !!!\n");
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if (scan || link || roam)
+ halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist);
+ else
+ halbtc8723b1ant_action_wifi_not_connected(btcoexist);
+ } else { /* wifi LPS/Busy */
+ halbtc8723b1ant_action_wifi_connected(btcoexist);
+ }
+}
+
+void halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ /* force to reset coex mechanism */
+ halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ halbtc8723b1ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+
+ /* sw all off */
+ halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8723b1ant_sw_mechanism2(btcoexist,false, false, false, 0x18);
+
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+}
+
+void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist, bool backup)
+{
+ u32 u32tmp = 0;
+ u8 u8tmp = 0;
+ u32 cnt_bt_cal_chk = 0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], 1Ant Init HW Config!!\n");
+
+ if (backup) {/* backup rf 0x1e value */
+ coex_dm->bt_rf0x1e_backup =
+ btcoexist->btc_get_rf_reg(btcoexist,
+ BTC_RF_A, 0x1e, 0xfffff);
+
+ coex_dm->backup_arfr_cnt1 =
+ btcoexist->btc_read_4byte(btcoexist, 0x430);
+ coex_dm->backup_arfr_cnt2 =
+ btcoexist->btc_read_4byte(btcoexist, 0x434);
+ coex_dm->backup_retry_limit =
+ btcoexist->btc_read_2byte(btcoexist, 0x42a);
+ coex_dm->backup_ampdu_max_time =
+ btcoexist->btc_read_1byte(btcoexist, 0x456);
+ }
+
+ /* WiFi goto standby while GNT_BT 0-->1 */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x780);
+ /* BT goto standby while GNT_BT 1-->0 */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x2, 0xfffff, 0x500);
+
+ btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
+ btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
+
+
+ /* BT calibration check */
+ while (cnt_bt_cal_chk <= 20) {
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d);
+ cnt_bt_cal_chk++;
+ if (u32tmp & BIT0) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], ########### BT "
+ "calibration(cnt=%d) ###########\n",
+ cnt_bt_cal_chk);
+ mdelay(50);
+ } else {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], ********** BT NOT "
+ "calibration (cnt=%d)**********\n",
+ cnt_bt_cal_chk);
+ break;
+ }
+ }
+
+ /* 0x790[5:0]=0x5 */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+ u8tmp &= 0xc0;
+ u8tmp |= 0x5;
+ btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+ /* Enable counter statistics */
+ /*0x76e[3] =1, WLAN_Act control by PTA */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+ btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
+
+ /*Antenna config */
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA, true, false);
+ /* PTA parameter */
+ halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+}
+
+void halbtc8723b1ant_wifi_off_hw_cfg(struct btc_coexist *btcoexist)
+{
+ /* set wlan_act to low */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0);
+}
+
+/**************************************************************
+ * work around function start with wa_halbtc8723b1ant_
+ **************************************************************/
+/**************************************************************
+ * extern function start with EXhalbtc8723b1ant_
+ **************************************************************/
+
+void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+ halbtc8723b1ant_init_hw_config(btcoexist, true);
+}
+
+void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
+
+ btcoexist->stop_coex_dm = false;
+
+ halbtc8723b1ant_init_coex_dm(btcoexist);
+
+ halbtc8723b1ant_query_bt_info(btcoexist);
+}
+
+void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ u8 *cli_buf = btcoexist->cli_buf;
+ u8 u8tmp[4], i, bt_info_ext, psTdmaCase=0;
+ u16 u16tmp[4];
+ u32 u32tmp[4];
+ bool roam = false, scan = false;
+ bool link = false, wifi_under_5g = false;
+ bool bt_hs_on = false, wifi_busy = false;
+ s32 wifi_rssi =0, bt_hs_rssi = 0;
+ u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck;
+ u8 wifi_dot11_chnl, wifi_hs_chnl;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cli_buf);
+
+ if (btcoexist->manual_control) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[Under Manual Control]==========");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+ if (btcoexist->stop_coex_dm) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[Coex is STOPPED]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+
+ if (!board_info->bt_exist) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cli_buf);
+ return;
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d",
+ "Ant PG Num/ Ant Mech/ Ant Pos:", \
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+ "BT stack/ hci ext ver", \
+ ((stack_info->profile_notified)? "Yes":"No"),
+ stack_info->hci_version);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer", \
+ glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+ &wifi_dot11_chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)", \
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+ "H2C Wifi inform bt chnl Info", \
+ coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1],
+ coex_dm->wifi_chnl_info[2]);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist,BTC_GET_BL_WIFI_UNDER_5G,
+ &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ &wifi_traffic_dir);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY==wifi_bw)? "Legacy":
+ (((BTC_WIFI_BW_HT40==wifi_bw)? "HT40":"HT20"))),
+ ((!wifi_busy)? "idle":
+ ((BTC_WIFI_TRAFFIC_TX==wifi_traffic_dir)?
+ "uplink":"downlink")));
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ",
+ "BT [status/ rssi/ retryCnt]",
+ ((btcoexist->bt_info.bt_disabled)? ("disabled"):
+ ((coex_sta->c2h_bt_inquiry_page)?("inquiry/page scan"):
+ ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)?
+ "non-connected idle":
+ ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)?
+ "connected-idle":"busy")))),
+ coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+ CL_PRINTF(cli_buf);
+
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
+ bt_link_info->hid_exist, bt_link_info->pan_exist,
+ bt_link_info->a2dp_exist);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+ CL_PRINTF(cli_buf);
+
+ for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) {
+ if (coex_sta->bt_info_c2h_cnt[i]) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x "
+ "%02x %02x %02x %02x(%d)",
+ GLBtInfoSrc8723b1Ant[i],
+ coex_sta->bt_info_c2h[i][0],
+ coex_sta->bt_info_c2h[i][1],
+ coex_sta->bt_info_c2h[i][2],
+ coex_sta->bt_info_c2h[i][3],
+ coex_sta->bt_info_c2h[i][4],
+ coex_sta->bt_info_c2h[i][5],
+ coex_sta->bt_info_c2h[i][6],
+ coex_sta->bt_info_c2h_cnt[i]);
+ CL_PRINTF(cli_buf);
+ }
+ }
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %s/%s, (0x%x/0x%x)",
+ "PS state, IPS/LPS, (lps/rpwm)", \
+ ((coex_sta->under_ips? "IPS ON":"IPS OFF")),
+ ((coex_sta->under_lps? "LPS ON":"LPS OFF")),
+ btcoexist->bt_info.lps_1ant,
+ btcoexist->bt_info.rpwm_1ant);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ if (!btcoexist->manual_control) {
+ /* Sw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Sw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", \
+ coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra,
+ btcoexist->bt_info.limited_dig);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+ coex_dm->cur_agc_table_en,
+ coex_dm->cur_adc_backoff,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
+ CL_PRINTF(cli_buf);
+
+
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ",
+ "Rate Mask", btcoexist->bt_info.ra_mask);
+ CL_PRINTF(cli_buf);
+
+ /* Fw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Fw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ psTdmaCase = coex_dm->cur_ps_tdma;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x %02x "
+ "case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para[0],
+ coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+ coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+ psTdmaCase, coex_dm->auto_tdma_adjust);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ",
+ "Latest error condition(should be 0)", \
+ coex_dm->error_condition);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+ coex_dm->cur_ignore_wlan_act);
+ CL_PRINTF(cli_buf);
+ }
+
+ /* Hw setting */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Hw setting]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+ coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
+ coex_dm->backup_ampdu_max_time);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
+ u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "0x430/0x434/0x42a/0x456",
+ u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
+ (u32tmp[1] & 0x3e000000) >> 25);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0] & 0x20)>> 5), u8tmp[1]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8)>>3), u8tmp[1],
+ ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8);
+ u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
+
+ fa_ofdm = ((u32tmp[0] & 0xffff0000) >> 16) +
+ ((u32tmp[1] & 0xffff0000) >> 16) +
+ (u32tmp[1] & 0xffff) +
+ (u32tmp[2] & 0xffff) + \
+ ((u32tmp[3] & 0xffff0000) >> 16) +
+ (u32tmp[3] & 0xffff) ;
+ fa_cck = (u8tmp[0] << 8) + u8tmp[1];
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "OFDM-CCA/OFDM-FA/CCK-FA",
+ u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2]);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
+ coex_sta->high_priority_tx);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
+ CL_PRINTF(cli_buf);
+#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 1)
+ halbtc8723b1ant_monitor_bt_ctr(btcoexist);
+#endif
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm)
+ return;
+
+ if (BTC_IPS_ENTER == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
+ coex_sta->under_ips = true;
+
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
+ false, true);
+ /* set PTA control */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
+ } else if (BTC_IPS_LEAVE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
+ coex_sta->under_ips = false;
+
+ halbtc8723b1ant_run_coexist_mechanism(btcoexist);
+ }
+}
+
+void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm)
+ return;
+
+ if (BTC_LPS_ENABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
+ coex_sta->under_lps = true;
+ } else if (BTC_LPS_DISABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
+ coex_sta->under_lps = false;
+ }
+}
+
+void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ bool wifi_connected = false, bt_hs_on = false;
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+ btcoexist->bt_info.bt_disabled)
+ return;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ halbtc8723b1ant_query_bt_info(btcoexist);
+
+ if (coex_sta->c2h_bt_inquiry_page) {
+ halbtc8723b1ant_action_bt_inquiry(btcoexist);
+ return;
+ } else if (bt_hs_on) {
+ halbtc8723b1ant_action_hs(btcoexist);
+ return;
+ }
+
+ if (BTC_SCAN_START == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
+ if (!wifi_connected) /* non-connected scan */
+ halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist);
+ else /* wifi is connected */
+ halbtc8723b1ant_ActionWifiConnectedScan(btcoexist);
+ } else if (BTC_SCAN_FINISH == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
+ if (!wifi_connected) /* non-connected scan */
+ halbtc8723b1ant_action_wifi_not_connected(btcoexist);
+ else
+ halbtc8723b1ant_action_wifi_connected(btcoexist);
+ }
+}
+
+void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ bool wifi_connected = false, bt_hs_on = false;
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+ btcoexist->bt_info.bt_disabled)
+ return;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ if (coex_sta->c2h_bt_inquiry_page) {
+ halbtc8723b1ant_action_bt_inquiry(btcoexist);
+ return;
+ } else if (bt_hs_on) {
+ halbtc8723b1ant_action_hs(btcoexist);
+ return;
+ }
+
+ if (BTC_ASSOCIATE_START == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
+ halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist);
+ } else if (BTC_ASSOCIATE_FINISH == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ if (!wifi_connected) /* non-connected scan */
+ halbtc8723b1ant_action_wifi_not_connected(btcoexist);
+ else
+ halbtc8723b1ant_action_wifi_connected(btcoexist);
+ }
+}
+
+void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ u8 h2c_parameter[3] ={0};
+ u32 wifi_bw;
+ u8 wifiCentralChnl;
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+ btcoexist->bt_info.bt_disabled )
+ return;
+
+ if (BTC_MEDIA_CONNECT == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
+
+ /* only 2.4G we need to inform bt the chnl mask */
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
+ &wifiCentralChnl);
+
+ if ((BTC_MEDIA_CONNECT == type) &&
+ (wifiCentralChnl <= 14)) {
+ h2c_parameter[0] = 0x0;
+ h2c_parameter[1] = wifiCentralChnl;
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
+
+ coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+ coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+ coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ bool bt_hs_on = false;
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+ btcoexist->bt_info.bt_disabled)
+ return;
+
+ coex_sta->special_pkt_period_cnt = 0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ if (coex_sta->c2h_bt_inquiry_page) {
+ halbtc8723b1ant_action_bt_inquiry(btcoexist);
+ return;
+ } else if (bt_hs_on) {
+ halbtc8723b1ant_action_hs(btcoexist);
+ return;
+ }
+
+ if (BTC_PACKET_DHCP == type ||
+ BTC_PACKET_EAPOL == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], special Packet(%d) notify\n", type);
+ halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
+ }
+}
+
+void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length)
+{
+ u8 bt_info = 0;
+ u8 i, rsp_source = 0;
+ bool wifi_connected = false;
+ bool bt_busy = false;
+
+ coex_sta->c2h_bt_info_req_sent = false;
+
+ rsp_source = tmp_buf[0] & 0xf;
+ if (rsp_source >= BT_INFO_SRC_8723B_1ANT_MAX)
+ rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
+ coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rsp_source, length);
+ for (i=0; i<length; i++) {
+ coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
+ if (i == 1)
+ bt_info = tmp_buf[i];
+ if (i == length - 1)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
+ }
+
+ if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) {
+ coex_sta->bt_retry_cnt = /* [3:0] */
+ coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
+
+ coex_sta->bt_rssi =
+ coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
+
+ coex_sta->bt_info_ext =
+ coex_sta->bt_info_c2h[rsp_source][4];
+
+ /* Here we need to resend some wifi info to BT
+ * because bt is reset and loss of the info.*/
+ if(coex_sta->bt_info_ext & BIT1)
+ {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check, "
+ "send wifi BW&Chnl to BT!!\n");
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ if(wifi_connected)
+ ex_halbtc8723b1ant_media_status_notify(btcoexist,
+ BTC_MEDIA_CONNECT);
+ else
+ ex_halbtc8723b1ant_media_status_notify(btcoexist,
+ BTC_MEDIA_DISCONNECT);
+ }
+
+ if (coex_sta->bt_info_ext & BIT3) {
+ if (!btcoexist->manual_control &&
+ !btcoexist->stop_coex_dm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, "
+ "set BT NOT ignore Wlan active!!\n");
+ halbtc8723b1ant_ignore_wlan_act(btcoexist,
+ FORCE_EXEC,
+ false);
+ }
+ } else {
+ /* BT already NOT ignore Wlan active, do nothing here.*/
+ }
+#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
+ if (coex_sta->bt_info_ext & BIT4) {
+ /* BT auto report already enabled, do nothing */
+ } else {
+ halbtc8723b1ant_bt_auto_report(btcoexist, FORCE_EXEC,
+ true);
+ }
+#endif
+ }
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan */
+ if (bt_info & BT_INFO_8723B_1ANT_B_INQ_PAGE)
+ coex_sta->c2h_bt_inquiry_page = true;
+ else
+ coex_sta->c2h_bt_inquiry_page = false;
+
+ /* set link exist status */
+ if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) {
+ coex_sta->bt_link_exist = false;
+ coex_sta->pan_exist = false;
+ coex_sta->a2dp_exist = false;
+ coex_sta->hid_exist = false;
+ coex_sta->sco_exist = false;
+ } else { /* connection exists */
+ coex_sta->bt_link_exist = true;
+ if (bt_info & BT_INFO_8723B_1ANT_B_FTP)
+ coex_sta->pan_exist = true;
+ else
+ coex_sta->pan_exist = false;
+ if (bt_info & BT_INFO_8723B_1ANT_B_A2DP)
+ coex_sta->a2dp_exist = true;
+ else
+ coex_sta->a2dp_exist = false;
+ if (bt_info & BT_INFO_8723B_1ANT_B_HID)
+ coex_sta->hid_exist = true;
+ else
+ coex_sta->hid_exist = false;
+ if (bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO)
+ coex_sta->sco_exist = true;
+ else
+ coex_sta->sco_exist = false;
+ }
+
+ halbtc8723b1ant_update_bt_link_info(btcoexist);
+
+ if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) {
+ coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT Non-Connected idle!!!\n");
+ /* connection exists but no busy */
+ } else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
+ coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ } else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
+ (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
+ coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT SCO busy!!!\n");
+ } else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
+ coex_dm->auto_tdma_adjust = false;
+
+ coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ } else {
+ coex_dm->bt_status =
+ BT_8723B_1ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
+ }
+
+ if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status))
+ bt_busy = true;
+ else
+ bt_busy = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+ halbtc8723b1ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+ btcoexist->stop_coex_dm = true;
+
+ halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, true);
+
+ halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
+ halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
+
+ ex_halbtc8723b1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Pnp notify\n");
+
+ if (BTC_WIFI_PNP_SLEEP == pnp_state) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Pnp notify to SLEEP\n");
+ btcoexist->stop_coex_dm = true;
+ halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
+ } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Pnp notify to WAKE UP\n");
+ btcoexist->stop_coex_dm = false;
+ halbtc8723b1ant_init_hw_config(btcoexist, false);
+ halbtc8723b1ant_init_coex_dm(btcoexist);
+ halbtc8723b1ant_query_bt_info(btcoexist);
+ }
+}
+
+void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ static u8 dis_ver_info_cnt = 0;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], =========================="
+ "Periodical===========================\n");
+
+ if (dis_ver_info_cnt <= 5) {
+ dis_ver_info_cnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], *************************"
+ "***************************************\n");
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ "
+ "Ant Pos = %d/ %d/ %d\n", \
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n", \
+ ((stack_info->profile_notified)? "Yes":"No"),
+ stack_info->hci_version);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+ &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer "
+ "= %d_%x/ 0x%x/ 0x%x(%d)\n", \
+ glcoex_ver_date_8723b_1ant,
+ glcoex_ver_8723b_1ant, fw_ver,
+ bt_patch_ver, bt_patch_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], *****************************"
+ "***********************************\n");
+ }
+
+#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
+ halbtc8723b1ant_query_bt_info(btcoexist);
+ halbtc8723b1ant_monitor_bt_ctr(btcoexist);
+ halbtc8723b1ant_monitor_bt_enable_disable(btcoexist);
+#else
+ if (halbtc8723b1ant_is_wifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust) {
+ if (coex_sta->special_pkt_period_cnt > 2)
+ halbtc8723b1ant_run_coexist_mechanism(btcoexist);
+ }
+
+ coex_sta->special_pkt_period_cnt++;
+#endif
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h
new file mode 100644
index 000000000000..5ce292f2e7c6
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h
@@ -0,0 +1,175 @@
+/**********************************************************************
+ * The following is for 8723B 1ANT BT Co-exist definition
+ **********************************************************************/
+#define BT_AUTO_REPORT_ONLY_8723B_1ANT 1
+
+#define BT_INFO_8723B_1ANT_B_FTP BIT7
+#define BT_INFO_8723B_1ANT_B_A2DP BIT6
+#define BT_INFO_8723B_1ANT_B_HID BIT5
+#define BT_INFO_8723B_1ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723B_1ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723B_1ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723B_1ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723B_1ANT_B_CONNECTION BIT0
+
+#define BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \
+ (((_BT_INFO_EXT_&BIT0))? true:false)
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT 2
+
+typedef enum _BT_INFO_SRC_8723B_1ANT{
+ BT_INFO_SRC_8723B_1ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723B_1ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723B_1ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723B_1ANT_MAX
+}BT_INFO_SRC_8723B_1ANT,*PBT_INFO_SRC_8723B_1ANT;
+
+typedef enum _BT_8723B_1ANT_BT_STATUS{
+ BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723B_1ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8723B_1ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8723B_1ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8723B_1ANT_BT_STATUS_MAX
+}BT_8723B_1ANT_BT_STATUS,*PBT_8723B_1ANT_BT_STATUS;
+
+typedef enum _BT_8723B_1ANT_WIFI_STATUS{
+ BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN = 0x1,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN = 0x2,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT = 0x3,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE = 0x4,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY = 0x5,
+ BT_8723B_1ANT_WIFI_STATUS_MAX
+}BT_8723B_1ANT_WIFI_STATUS,*PBT_8723B_1ANT_WIFI_STATUS;
+
+typedef enum _BT_8723B_1ANT_COEX_ALGO{
+ BT_8723B_1ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723B_1ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723B_1ANT_COEX_ALGO_HID = 0x2,
+ BT_8723B_1ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8723B_1ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8723B_1ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8723B_1ANT_COEX_ALGO_MAX = 0xb,
+}BT_8723B_1ANT_COEX_ALGO,*PBT_8723B_1ANT_COEX_ALGO;
+
+struct coex_dm_8723b_1ant{
+ /* fw mechanism */
+ bool pre_dec_bt_pwr;
+ bool cur_dec_bt_pwr;
+ u8 pre_fw_dac_swing_lvl;
+ u8 cur_fw_dac_swing_lvl;
+ bool cur_ignore_wlan_act;
+ bool pre_ignore_wlan_act;
+ u8 pre_ps_tdma;
+ u8 cur_ps_tdma;
+ u8 ps_tdma_para[5];
+ u8 ps_tdma_du_adj_type;
+ bool auto_tdma_adjust;
+ bool pre_ps_tdma_on;
+ bool cur_ps_tdma_on;
+ bool pre_bt_auto_report;
+ bool cur_bt_auto_report;
+ u8 pre_lps;
+ u8 cur_lps;
+ u8 pre_rpwm;
+ u8 cur_rpwm;
+
+ /* sw mechanism */
+ bool pre_rf_rx_lpf_shrink;
+ bool cur_rf_rx_lpf_shrink;
+ u32 bt_rf0x1e_backup;
+ bool pre_low_penalty_ra;
+ bool cur_low_penalty_ra;
+ bool pre_dac_swing_on;
+ u32 pre_dac_swing_lvl;
+ bool cur_dac_swing_on;
+ u32 cur_dac_swing_lvl;
+ bool pre_adc_backoff;
+ bool cur_adc_backoff;
+ bool pre_agc_table_en;
+ bool cur_agc_table_en;
+ u32 pre_val0x6c0;
+ u32 cur_val0x6c0;
+ u32 pre_val0x6c4;
+ u32 cur_val0x6c4;
+ u32 pre_val0x6c8;
+ u32 cur_val0x6c8;
+ u8 pre_val0x6cc;
+ u8 cur_val0x6cc;
+ bool limited_dig;
+
+ u32 backup_arfr_cnt1; /* Auto Rate Fallback Retry cnt */
+ u32 backup_arfr_cnt2; /* Auto Rate Fallback Retry cnt */
+ u16 backup_retry_limit;
+ u8 backup_ampdu_max_time;
+
+ /* algorithm related */
+ u8 pre_algorithm;
+ u8 cur_algorithm;
+ u8 bt_status;
+ u8 wifi_chnl_info[3];
+
+ u32 prera_mask;
+ u32 curra_mask;
+ u8 pre_arfr_type;
+ u8 cur_arfr_type;
+ u8 pre_retry_limit_type;
+ u8 cur_retry_limit_type;
+ u8 pre_ampdu_time_type;
+ u8 cur_ampdu_time_type;
+
+ u8 error_condition;
+};
+
+struct coex_sta_8723b_1ant{
+ bool bt_link_exist;
+ bool sco_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ bool pan_exist;
+
+ bool under_lps;
+ bool under_ips;
+ u32 special_pkt_period_cnt;
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 bt_rssi;
+ u8 pre_bt_rssi_state;
+ u8 pre_wifi_rssi_state[4];
+ bool c2h_bt_info_req_sent;
+ u8 bt_info_c2h[BT_INFO_SRC_8723B_1ANT_MAX][10];
+ u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_1ANT_MAX];
+ bool c2h_bt_inquiry_page;
+ u8 bt_retry_cnt;
+ u8 bt_info_ext;
+};
+
+/*************************************************************************
+ * The following is interface which will notify coex module.
+ *************************************************************************/
+void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length);
+void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpState);
+void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist);
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c
new file mode 100644
index 000000000000..d337bd0b3c1b
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c
@@ -0,0 +1,4185 @@
+/***************************************************************
+ * Description:
+ *
+ * This file is for RTL8723B Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ **************************************************************/
+/**************************************************************
+ * include files
+ **************************************************************/
+#include "halbt_precomp.h"
+#if 1
+/**************************************************************
+ * Global variables, these are static variables
+ **************************************************************/
+static struct coex_dm_8723b_2ant glcoex_dm_8723b_2ant;
+static struct coex_dm_8723b_2ant *coex_dm = &glcoex_dm_8723b_2ant;
+static struct coex_sta_8723b_2ant glcoex_sta_8723b_2ant;
+static struct coex_sta_8723b_2ant *coex_sta = &glcoex_sta_8723b_2ant;
+
+const char *const glbt_info_src_8723b_2ant[] = {
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+u32 glcoex_ver_date_8723b_2ant = 20131113;
+u32 glcoex_ver_8723b_2ant = 0x3f;
+
+/**************************************************************
+ * local function proto type if needed
+ **************************************************************/
+/**************************************************************
+ * local function start with halbtc8723b2ant_
+ **************************************************************/
+u8 halbtc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+ s32 bt_rssi = 0;
+ u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
+
+ bt_rssi = coex_sta->bt_rssi;
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
+ return coex_sta->pre_bt_rssi_state;
+ }
+
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (bt_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh1) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+ return bt_rssi_state;
+}
+
+u8 halbtc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+ u8 index, u8 level_num,
+ u8 rssi_thresh, u8 rssi_thresh1)
+{
+ s32 wifi_rssi=0;
+ u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
+ return coex_sta->pre_wifi_rssi_state[index];
+ }
+
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if(wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (wifi_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh1) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+ return wifi_rssi_state;
+}
+
+void halbtc8723b2ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist)
+{
+ static bool pre_bt_disabled = false;
+ static u32 bt_disable_cnt = 0;
+ bool bt_active = true, bt_disabled = false;
+
+ /* This function check if bt is disabled */
+ if (coex_sta->high_priority_tx == 0 &&
+ coex_sta->high_priority_rx == 0 &&
+ coex_sta->low_priority_tx == 0 &&
+ coex_sta->low_priority_rx == 0)
+ bt_active = false;
+
+ if (coex_sta->high_priority_tx == 0xffff &&
+ coex_sta->high_priority_rx == 0xffff &&
+ coex_sta->low_priority_tx == 0xffff &&
+ coex_sta->low_priority_rx == 0xffff)
+ bt_active = true;
+
+ if (bt_active) {
+ bt_disable_cnt = 0;
+ bt_disabled = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
+ } else {
+ bt_disable_cnt++;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters=0, %d times!!\n",
+ bt_disable_cnt);
+ if (bt_disable_cnt >= 2) {
+ bt_disabled = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
+ }
+ }
+
+ if (pre_bt_disabled != bt_disabled) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled":"enabled"),
+ (bt_disabled ? "disabled":"enabled"));
+
+ pre_bt_disabled = bt_disabled;
+ if (!bt_disabled) {
+ } else {
+ }
+ }
+}
+
+void halbtc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+ u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+ u32 reg_hp_tx = 0, reg_hp_rx = 0;
+ u32 reg_lp_tx = 0, reg_lp_rx = 0;
+
+ reg_hp_txrx = 0x770;
+ reg_lp_txrx = 0x774;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+ reg_hp_tx = u32tmp & MASKLWORD;
+ reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+ reg_lp_tx = u32tmp & MASKLWORD;
+ reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ coex_sta->high_priority_tx = reg_hp_tx;
+ coex_sta->high_priority_rx = reg_hp_rx;
+ coex_sta->low_priority_tx = reg_lp_tx;
+ coex_sta->low_priority_rx = reg_lp_rx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+
+ /* reset counter */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+void halbtc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
+{
+ u8 h2c_parameter[1] ={0};
+
+ coex_sta->c2h_bt_info_req_sent = true;
+
+ h2c_parameter[0] |= BIT0; /* trigger */
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n",
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
+}
+
+bool halbtc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+ static bool pre_wifi_busy = false;
+ static bool pre_under_4way = false;
+ static bool pre_bt_hs_on = false;
+ bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
+ if (wifi_connected) {
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ return true;
+ }
+
+ if (under_4way != pre_under_4way) {
+ pre_under_4way = under_4way;
+ return true;
+ }
+
+ if (bt_hs_on != pre_bt_hs_on) {
+ pre_bt_hs_on = bt_hs_on;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void halbtc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
+{
+ /*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+ bt_link_info->sco_exist = coex_sta->sco_exist;
+ bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+ bt_link_info->pan_exist = coex_sta->pan_exist;
+ bt_link_info->hid_exist = coex_sta->hid_exist;
+
+ /* work around for HS mode. */
+ if (bt_hs_on) {
+ bt_link_info->pan_exist = true;
+ bt_link_info->bt_link_exist = true;
+ }
+#else /* profile from bt stack */
+ bt_link_info->bt_link_exist = stack_info->bt_link_exist;
+ bt_link_info->sco_exist = stack_info->sco_exist;
+ bt_link_info->a2dp_exist = stack_info->a2dp_exist;
+ bt_link_info->pan_exist = stack_info->pan_exist;
+ bt_link_info->hid_exist = stack_info->hid_exist;
+
+ /*for win-8 stack HID report error*/
+ if (!stack_info->hid_exist)
+ stack_info->hid_exist = coex_sta->hid_exist;
+ /*sync BTInfo with BT firmware and stack*/
+ /* when stack HID report error, here we use the info from bt fw.*/
+ if (!stack_info->bt_link_exist)
+ stack_info->bt_link_exist = coex_sta->bt_link_exist;
+#endif
+ /* check if Sco only */
+ if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->sco_only = true;
+ else
+ bt_link_info->sco_only = false;
+
+ /* check if A2dp only */
+ if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->a2dp_only = true;
+ else
+ bt_link_info->a2dp_only = false;
+
+ /* check if Pan only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->pan_only = true;
+ else
+ bt_link_info->pan_only = false;
+
+ /* check if Hid only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && bt_link_info->hid_exist)
+ bt_link_info->hid_only = true;
+ else
+ bt_link_info->hid_only = false;
+}
+
+u8 halbtc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info=&btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+ u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED;
+ u8 num_of_diff_profile = 0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ if (!bt_link_info->bt_link_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
+ return algorithm;
+ }
+
+ if (bt_link_info->sco_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->hid_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->pan_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->a2dp_exist)
+ num_of_diff_profile++;
+
+ if (num_of_diff_profile == 1) {
+ if (bt_link_info->sco_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+ } else {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], A2DP only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], PAN(HS) only\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], PAN(EDR) only\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ } else if (num_of_diff_profile == 2) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + PAN(HS)\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + PAN(HS)\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex],A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ } else if (num_of_diff_profile == 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP"
+ " ==> HID\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + "
+ "PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + "
+ "PAN(EDR) ==> HID\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP + "
+ "PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ } else if (num_of_diff_profile >= 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Error!!! SCO + HID"
+ " + A2DP + PAN(HS)\n");
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP +"
+ " PAN(EDR)==>PAN(EDR)+HID\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+bool halbtc8723b2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
+{
+ bool bRet = false;
+ bool bt_hs_on = false, wifi_connected = false;
+ s32 bt_hs_rssi=0;
+ u8 bt_rssi_state;
+
+ if (!btcoexist->btc_get(btcoexist,
+ BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+ return false;
+ if (!btcoexist->btc_get(btcoexist,
+ BTC_GET_BL_WIFI_CONNECTED, &wifi_connected))
+ return false;
+ if (!btcoexist->btc_get(btcoexist,
+ BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+ return false;
+
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ if (wifi_connected) {
+ if (bt_hs_on) {
+ if (bt_hs_rssi > 37) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt "
+ "power for HS mode!!\n");
+ bRet = true;
+ }
+ } else {
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt "
+ "power for Wifi is connected!!\n");
+ bRet = true;
+ }
+ }
+ }
+
+ return bRet;
+}
+
+void halbtc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
+ u8 dac_swing_lvl)
+{
+ u8 h2c_parameter[1] ={0};
+
+ /* There are several type of dacswing
+ * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
+ h2c_parameter[0] = dac_swing_lvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool dec_bt_pwr)
+{
+ u8 h2c_parameter[1] = {0};
+
+ h2c_parameter[0] = 0;
+
+ if (dec_bt_pwr)
+ h2c_parameter[0] |= BIT1;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+ (dec_bt_pwr? "Yes!!":"No!!"), h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool force_exec, bool dec_bt_pwr)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power = %s\n",
+ (force_exec? "force to":""), (dec_bt_pwr? "ON":"OFF"));
+ coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+
+ if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+ return;
+ }
+ halbtc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+ coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+void halbtc8723b2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
+ bool enable_auto_report)
+{
+ u8 h2c_parameter[1] = {0};
+ h2c_parameter[0] = 0;
+
+ if (enable_auto_report)
+ h2c_parameter[0] |= BIT0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n",
+ (enable_auto_report? "Enabled!!":"Disabled!!"),
+ h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_bt_auto_report(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable_auto_report)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec? "force to":""),
+ ((enable_auto_report)? "Enabled":"Disabled"));
+ coex_dm->cur_bt_auto_report = enable_auto_report;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreBtAutoReport=%d, "
+ "bCurBtAutoReport=%d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
+
+ if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+ return;
+ }
+ halbtc8723b2ant_set_bt_auto_report(btcoexist,
+ coex_dm->cur_bt_auto_report);
+
+ coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
+void halbtc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+ bool force_exec, u8 fw_dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec? "force to":""), fw_dac_swing_lvl);
+ coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preFwDacSwingLvl=%d, "
+ "curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ if(coex_dm->pre_fw_dac_swing_lvl ==
+ coex_dm->cur_fw_dac_swing_lvl)
+ return;
+ }
+
+ halbtc8723b2ant_set_fw_dac_swing_level(btcoexist,
+ coex_dm->cur_fw_dac_swing_lvl);
+ coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+void halbtc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+ bool rx_rf_shrink_on)
+{
+ if (rx_rf_shrink_on) {
+ /* Shrink RF Rx LPF corner */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff, 0xffffc);
+ } else {
+ /* Resume RF Rx LPF corner */
+ /* After initialized, we can use coex_dm->btRf0x1eBackup */
+ if (btcoexist->initilized) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff,
+ coex_dm->bt_rf0x1e_backup);
+ }
+ }
+}
+
+void halbtc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
+ bool force_exec, bool rx_rf_shrink_on)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec? "force to":""), (rx_rf_shrink_on? "ON":"OFF"));
+ coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreRfRxLpfShrink=%d, "
+ "bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ if (coex_dm->pre_rf_rx_lpf_shrink ==
+ coex_dm->cur_rf_rx_lpf_shrink)
+ return;
+ }
+ halbtc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+void halbtc8723b2ant_set_sw_penalty_txrate_adaptive(
+ struct btc_coexist *btcoexist,
+ bool low_penalty_ra)
+{
+ u8 h2c_parameter[6] ={0};
+
+ h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
+
+ if (low_penalty_ra) {
+ h2c_parameter[1] |= BIT0;
+ /*normal rate except MCS7/6/5, OFDM54/48/36*/
+ h2c_parameter[2] = 0x00;
+ h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/
+ h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/
+ h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra? "ON!!":"OFF!!"));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+void halbtc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
+ bool force_exec, bool low_penalty_ra)
+{
+ /*return; */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec? "force to":""), (low_penalty_ra? "ON":"OFF"));
+ coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreLowPenaltyRa=%d, "
+ "bCurLowPenaltyRa=%d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
+
+ if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
+ return;
+ }
+ halbtc8723b2ant_set_sw_penalty_txrate_adaptive(btcoexist,
+ coex_dm->cur_low_penalty_ra);
+
+ coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+void halbtc8723b2ant_set_dac_swing_reg(struct btc_coexist * btcoexist,
+ u32 level)
+{
+ u8 val = (u8) level;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+void halbtc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoexist,
+ bool sw_dac_swing_on,
+ u32 sw_dac_swing_lvl)
+{
+ if(sw_dac_swing_on)
+ halbtc8723b2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
+ else
+ halbtc8723b2ant_set_dac_swing_reg(btcoexist, 0x18);
+}
+
+
+void halbtc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
+ bool force_exec, bool dac_swing_on,
+ u32 dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec? "force to":""),
+ (dac_swing_on? "ON":"OFF"), dac_swing_lvl);
+ coex_dm->cur_dac_swing_on = dac_swing_on;
+ coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x,"
+ " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
+
+ if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+ (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+ return;
+ }
+ mdelay(30);
+ halbtc8723b2ant_set_sw_fulltime_dac_swing(btcoexist, dac_swing_on,
+ dac_swing_lvl);
+
+ coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+ coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+void halbtc8723b2ant_set_adc_backoff(struct btc_coexist *btcoexist,
+ bool adc_backoff)
+{
+ if (adc_backoff) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level On!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x3);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level Off!\n");
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x1);
+ }
+}
+
+void halbtc8723b2ant_adc_backoff(struct btc_coexist *btcoexist,
+ bool force_exec, bool adc_backoff)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn AdcBackOff = %s\n",
+ (force_exec? "force to":""), (adc_backoff? "ON":"OFF"));
+ coex_dm->cur_adc_back_off = adc_backoff;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n",
+ coex_dm->pre_adc_back_off,
+ coex_dm->cur_adc_back_off);
+
+ if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
+ return;
+ }
+ halbtc8723b2ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_back_off);
+
+ coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off;
+}
+
+void halbtc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
+ bool agc_table_en)
+{
+ u8 rssi_adjust_val = 0;
+
+ /* BB AGC Gain Table */
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table On!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table Off!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001);
+ }
+
+
+ /* RF Gain */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x38fff);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x38ffe);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x380c3);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x28ce6);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
+
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x38fff);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x38ffe);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x380c3);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x28ce6);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0);
+
+ /* set rssiAdjustVal for wifi module. */
+ if (agc_table_en)
+ rssi_adjust_val = 8;
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+ &rssi_adjust_val);
+}
+
+void halbtc8723b2ant_agc_table(struct btc_coexist *btcoexist,
+ bool force_exec, bool agc_table_en)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec? "force to":""),
+ (agc_table_en? "Enable":"Disable"));
+ coex_dm->cur_agc_table_en = agc_table_en;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+
+ if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+ return;
+ }
+ halbtc8723b2ant_set_agc_table(btcoexist, agc_table_en);
+
+ coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+void halbtc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+void halbtc8723b2ant_coex_table(struct btc_coexist *btcoexist,
+ bool force_exec, u32 val0x6c0,
+ u32 val0x6c4, u32 val0x6c8,
+ u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
+ " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (force_exec? "force to":""), val0x6c0,
+ val0x6c4, val0x6c8, val0x6cc);
+ coex_dm->cur_val0x6c0 = val0x6c0;
+ coex_dm->cur_val0x6c4 = val0x6c4;
+ coex_dm->cur_val0x6c8 = val0x6c8;
+ coex_dm->cur_val0x6cc = val0x6cc;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], preVal0x6c0=0x%x, "
+ "preVal0x6c4=0x%x, preVal0x6c8=0x%x, "
+ "preVal0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], curVal0x6c0=0x%x, "
+ "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
+ "curVal0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+
+ if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+ (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+ (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+ (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+ return;
+ }
+ halbtc8723b2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
+
+ coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+ coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+ coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+ coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+void halbtc8723b2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ switch (type) {
+ case 0:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x55555555, 0xffff, 0x3);
+ break;
+ case 1:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5afa5afa, 0xffff, 0x3);
+ break;
+ case 2:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0x5a5a5a5a, 0xffff, 0x3);
+ break;
+ case 3:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
+ 0xaaaaaaaa, 0xffff, 0x3);
+ break;
+ case 4:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff,
+ 0xffffffff, 0xffff, 0x3);
+ break;
+ case 5:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+ 0x5fff5fff, 0xffff, 0x3);
+ break;
+ case 6:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5a5a5a5a, 0xffff, 0x3);
+ break;
+ case 7:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5afa5afa, 0xffff, 0x3);
+ break;
+ case 8:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea,
+ 0x5aea5aea, 0xffff, 0x3);
+ break;
+ case 9:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5aea5aea, 0xffff, 0x3);
+ break;
+ case 10:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5aff5aff, 0xffff, 0x3);
+ break;
+ case 11:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5a5f5a5f, 0xffff, 0x3);
+ break;
+ case 12:
+ halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5f5f5f5f, 0xffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+void halbtc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool enable)
+{
+ u8 h2c_parameter[1] ={0};
+
+ if (enable)
+ h2c_parameter[0] |= BIT0;/* function enable*/
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, "
+ "FW write 0x63=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+void halbtc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec? "force to":""), (enable? "ON":"OFF"));
+ coex_dm->cur_ignore_wlan_act = enable;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d, "
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
+
+ if (coex_dm->pre_ignore_wlan_act ==
+ coex_dm->cur_ignore_wlan_act)
+ return;
+ }
+ halbtc8723b2ant_set_fw_ignore_wlan_act(btcoexist, enable);
+
+ coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+void halbtc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
+ u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+ u8 h2c_parameter[5] ={0};
+
+ h2c_parameter[0] = byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = byte5;
+
+ coex_dm->ps_tdma_para[0] = byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+void halbtc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+ bool shrink_rx_lpf, bool low_penalty_ra,
+ bool limited_dig, bool bt_lna_constrain)
+{
+ /*
+ u32 wifi_bw;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if(BTC_WIFI_BW_HT40 != wifi_bw) //only shrink RF Rx LPF for HT40
+ {
+ if (shrink_rx_lpf)
+ shrink_rx_lpf = false;
+ }
+ */
+
+ halbtc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+ halbtc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
+}
+
+void halbtc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+ bool agc_table_shift, bool adc_backoff,
+ bool sw_dac_swing, u32 dac_swing_lvl)
+{
+ halbtc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift);
+ /*halbtc8723b2ant_adc_backoff(btcoexist, NORMAL_EXEC, adc_backoff);*/
+ halbtc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+ dac_swing_lvl);
+}
+
+void halbtc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
+ u8 antpos_type, bool init_hwcfg,
+ bool wifi_off)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ u32 fw_ver = 0, u32tmp=0;
+ bool pg_ext_switch = false;
+ bool use_ext_switch = false;
+ u8 h2c_parameter[2] ={0};
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_EXT_SWITCH, &pg_ext_switch);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+
+ if ((fw_ver<0xc0000) || pg_ext_switch)
+ use_ext_switch = true;
+
+ if (init_hwcfg) {
+ /* 0x4c[23]=0, 0x4c[24]=1 Antenna control by WL/BT */
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp &= ~BIT23;
+ u32tmp |= BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+ btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
+ btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
+
+ /* Force GNT_BT to low */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+
+ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
+ /* tell firmware "no antenna inverse" */
+ h2c_parameter[0] = 0;
+ h2c_parameter[1] = 1; /* ext switch type */
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ } else {
+ /* tell firmware "antenna inverse" */
+ h2c_parameter[0] = 1;
+ h2c_parameter[1] = 1; /* ext switch type */
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ }
+ }
+
+ /* ext switch setting */
+ if (use_ext_switch) {
+ /* fixed internal switch S1->WiFi, S0->BT */
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+ switch (antpos_type) {
+ case BTC_ANT_WIFI_AT_MAIN:
+ /* ext switch main at wifi */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+ 0x3, 0x1);
+ break;
+ case BTC_ANT_WIFI_AT_AUX:
+ /* ext switch aux at wifi */
+ btcoexist->btc_write_1byte_bitmask(btcoexist,
+ 0x92c, 0x3, 0x2);
+ break;
+ }
+ } else { /* internal switch */
+ /* fixed ext switch */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1);
+ switch (antpos_type) {
+ case BTC_ANT_WIFI_AT_MAIN:
+ /* fixed internal switch S1->WiFi, S0->BT */
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+ break;
+ case BTC_ANT_WIFI_AT_AUX:
+ /* fixed internal switch S0->WiFi, S1->BT */
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+ break;
+ }
+ }
+}
+
+
+void halbtc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
+ bool turn_on, u8 type)
+{
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec? "force to":""), (turn_on? "ON":"OFF"), type);
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+
+ if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+ (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+ return;
+ }
+ if (turn_on) {
+ switch (type) {
+ case 1:
+ default:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ case 2:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x90);
+ break;
+ case 3:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1, 0x90);
+ break;
+ case 4:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+ 0x03, 0xf1, 0x90);
+ break;
+ case 5:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0x60, 0x90);
+ break;
+ case 6:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0x60, 0x90);
+ break;
+ case 7:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0x70, 0x90);
+ break;
+ case 8:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
+ 0x3, 0x70, 0x90);
+ break;
+ case 9:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ case 10:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x90);
+ break;
+ case 11:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0xa, 0xe1, 0x90);
+ break;
+ case 12:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0xe1, 0x90);
+ break;
+ case 13:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0x60, 0x90);
+ break;
+ case 14:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0x60, 0x90);
+ break;
+ case 15:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0xa, 0x60, 0x90);
+ break;
+ case 16:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0x60, 0x90);
+ break;
+ case 17:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f,
+ 0x2f, 0x60, 0x90);
+ break;
+ case 18:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0xe1, 0x90);
+ break;
+ case 19:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0xe1, 0x90);
+ break;
+ case 20:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0x60, 0x90);
+ break;
+ case 21:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+ 0x03, 0x70, 0x90);
+ break;
+ case 71:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ }
+ } else {
+ /* disable PS tdma */
+ switch (type) {
+ case 0:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x40, 0x0);
+ break;
+ case 1:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x48, 0x0);
+ break;
+ default:
+ halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x40, 0x0);
+ break;
+ }
+ }
+
+ /* update pre state */
+ coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+ coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+void halbtc8723b2ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+ /* fw all off */
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ /* sw all off */
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+ /* hw all off */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+void halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ /* force to reset coex mechanism*/
+
+ halbtc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+void halbtc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+ bool wifi_connected = false;
+ bool low_pwr_disable = true;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ if (wifi_connected) {
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ } else {
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ }
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+ coex_dm->need_recover_0x948 = true;
+ coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948);
+
+ halbtc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_AUX,
+ false, false);
+}
+
+bool halbtc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
+{
+ bool bCommon = false, wifi_connected = false;
+ bool wifi_busy = false;
+ bool bt_hs_on = false, low_pwr_disable = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ if (!wifi_connected) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi non-connected idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false,
+ false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false,
+ 0x18);
+
+ bCommon = true;
+ } else {
+ if (BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + "
+ "BT non connected-idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 1);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 0xb);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ bCommon = true;
+ } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if(bt_hs_on)
+ return false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + "
+ "BT connected-idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false,
+ 1);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 0xb);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ bCommon = true;
+ } else {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (wifi_busy) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy + "
+ "BT Busy!!\n");
+ bCommon = false;
+ } else {
+ if(bt_hs_on)
+ return false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle + "
+ "BT Busy!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
+ 0x1, 0xfffff, 0x0);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC,
+ 7);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 21);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist,
+ NORMAL_EXEC,
+ 0xb);
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC,
+ true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC,
+ false);
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false,
+ false, false,
+ false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false,
+ false, false,
+ 0x18);
+ bCommon = true;
+ }
+ }
+ }
+
+ return bCommon;
+}
+void halbtc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+ bool sco_hid, bool tx_pause,
+ u8 max_interval)
+{
+ static s32 up, dn, m, n, wait_count;
+ /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
+ s32 result;
+ u8 retryCount=0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
+
+ if (!coex_dm->auto_tdma_adjust) {
+ coex_dm->auto_tdma_adjust = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ if (sco_hid) {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type = 13;
+ }else if (max_interval == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (max_interval == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ }
+ } else {
+ if(max_interval == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (max_interval == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (max_interval == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ }
+ }
+ } else {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (max_interval == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (max_interval == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ }
+ } else {
+ if (max_interval == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type = 1;
+ } else if (max_interval == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (max_interval == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ }
+ }
+ }
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n= 3;
+ result = 0;
+ wait_count = 0;
+ } else {
+ /*acquire the BT TRx retry count from BT_Info byte2*/
+ retryCount = coex_sta->bt_retry_cnt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retryCount = %d\n", retryCount);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+ up, dn, m, n, wait_count);
+ result = 0;
+ wait_count++;
+ /* no retry in the last 2-second duration*/
+ if (retryCount == 0) {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) {
+ wait_count = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi "
+ "duration!!\n");
+ }/* <=3 retry in the last 2-second duration*/
+ } else if (retryCount <= 3) {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) {
+ if (wait_count <= 2)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration "
+ "for retryCounter<3!!\n");
+ }
+ } else {
+ if (wait_count == 1)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration "
+ "for retryCounter>3!!\n");
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
+ if (max_interval == 1) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+
+ if (coex_dm->cur_ps_tdma == 71) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type = 13;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if(coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if(coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if(coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type =
+ 5;
+ } else if(coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if(coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if(coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type =
+ 13;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 71);
+ coex_dm->ps_tdma_du_adj_type = 71;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if(coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 71) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if(coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if(coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 71);
+ coex_dm->ps_tdma_du_adj_type =
+ 71;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type =
+ 9;
+ }
+ }
+ }
+ } else if(max_interval == 2) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 14){
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ }
+ }
+ }
+ } else if (max_interval == 3) {
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ halbtc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ halbtc8723b2ant_ps_tdma(
+ btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ }
+ }
+ }
+ }
+ }
+
+ /*if current PsTdma not match with the recorded one (when scan, dhcp..),
+ *then we have to adjust it back to the previous record one.*/
+ if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
+ bool scan = false, link = false, roam = false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, "
+ "curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if (!scan && !link && !roam)
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ coex_dm->ps_tdma_du_adj_type);
+ else
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under"
+ " progress, will adjust next time!!!\n");
+ }
+}
+
+/* SCO only or SCO+PAN(HS) */
+void halbtc8723b2ant_action_sco(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ /*for SCO quality at 11b/g mode*/
+ if (BTC_WIFI_BW_LEGACY == wifi_bw)
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ else /*for SCO quality & wifi performance balance at 11n mode*/
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
+
+ /*for voice quality */
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x4);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x4);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x4);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x4);
+ }
+ }
+}
+
+
+void halbtc8723b2ant_action_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ else /*for HID quality & wifi performance balance at 11n mode*/
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 9);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ else
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/
+void halbtc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+ u32 wifi_bw;
+ u8 ap_num = 0;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ wifi_rssi_state1 = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 1, 2, 40, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
+
+ /* define the office environment */
+ /* driver don't know AP num in Linux, so we will never enter this if */
+ if (ap_num >= 10 && BTC_RSSI_HIGH(wifi_rssi_state1)) {
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x18);
+ }
+ return;
+ }
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist,false, false, 1);
+ else
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist,false, true, 1);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false,0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 10);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+ else
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+
+/*PAN(HS) only*/
+void halbtc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) )
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+ halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*PAN(EDR)+A2DP*/
+void halbtc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_coex_table_with_type(btcoexist,NORMAL_EXEC, 12);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, false,
+ true, 3);
+ else
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, false,
+ false, 3);
+ } else {
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 3);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 11);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x780);
+ } else {
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 6);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 7);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ }
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ } else {
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_coex_table_with_type(btcoexist,NORMAL_EXEC, 11);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)){
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* HID+A2DP+PAN(EDR) */
+void halbtc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ true, 2);
+ else
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ false, 3);
+ } else {
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist))
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ else
+ halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ halbtc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ halbtc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+void halbtc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+ u8 algorithm = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), "
+ "return for Manual CTRL <===\n");
+ return;
+ }
+
+ if (coex_sta->under_ips) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
+ return;
+ }
+
+ algorithm = halbtc8723b2ant_action_algorithm(btcoexist);
+ if (coex_sta->c2h_bt_inquiry_page &&
+ (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
+ halbtc8723b2ant_action_bt_inquiry(btcoexist);
+ return;
+ } else {
+ if (coex_dm->need_recover_0x948) {
+ coex_dm->need_recover_0x948 = false;
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ coex_dm->backup_0x948);
+ }
+ }
+
+ coex_dm->cur_algorithm = algorithm;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d \n",
+ coex_dm->cur_algorithm);
+
+ if (halbtc8723b2ant_is_common_action(btcoexist)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common.\n");
+ coex_dm->auto_tdma_adjust = false;
+ } else {
+ if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], preAlgorithm=%d, "
+ "curAlgorithm=%d\n", coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
+ coex_dm->auto_tdma_adjust = false;
+ }
+ switch (coex_dm->cur_algorithm) {
+ case BT_8723B_2ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+ halbtc8723b2ant_action_sco(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+ halbtc8723b2ant_action_hid(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = A2DP.\n");
+ halbtc8723b2ant_action_a2dp(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = A2DP+PAN(HS).\n");
+ halbtc8723b2ant_action_a2dp_pan_hs(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN(EDR).\n");
+ halbtc8723b2ant_action_pan_edr(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HS mode.\n");
+ halbtc8723b2ant_action_pan_hs(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN+A2DP.\n");
+ halbtc8723b2ant_action_pan_edr_a2dp(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN(EDR)+HID.\n");
+ halbtc8723b2ant_action_pan_edr_hid(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HID+A2DP+PAN.\n");
+ halbtc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HID+A2DP.\n");
+ halbtc8723b2ant_action_hid_a2dp(btcoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = coexist All Off!!\n");
+ halbtc8723b2ant_coex_alloff(btcoexist);
+ break;
+ }
+ coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+ }
+}
+
+void halbtc8723b2ant_wifioff_hwcfg(struct btc_coexist *btcoexist)
+{
+ /* set wlan_act to low */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+ /* Force GNT_BT to High */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3);
+ /* BT select s0/s1 is controlled by BT */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0);
+}
+
+/*********************************************************************
+ * work around function start with wa_halbtc8723b2ant_
+ *********************************************************************/
+/*********************************************************************
+ * extern function start with EXhalbtc8723b2ant_
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+ u8 u8tmp = 0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
+ coex_dm->bt_rf0x1e_backup =
+ btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+ /* 0x790[5:0]=0x5 */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+ u8tmp &= 0xc0;
+ u8tmp |= 0x5;
+ btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+
+ /*Antenna config */
+ halbtc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN,
+ true, false);
+
+
+
+
+ /* PTA parameter */
+ halbtc8723b2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+ /* Enable counter statistics */
+ /*0x76e[3] =1, WLAN_Act control by PTA*/
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+ btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
+}
+
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
+ halbtc8723b2ant_init_coex_dm(btcoexist);
+}
+
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ struct btc_bt_link_info* bt_link_info = &btcoexist->bt_link_info;
+ u8 *cli_buf = btcoexist->cli_buf;
+ u8 u8tmp[4], i, bt_info_ext, ps_tdma_case=0;
+ u32 u32tmp[4];
+ bool roam = false, scan = false;
+ bool link = false, wifi_under_5g = false;
+ bool bt_hs_on = false, wifi_busy = false;
+ s32 wifi_rssi = 0, bt_hs_rssi = 0;
+ u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck;
+ u8 wifi_dot11_chnl, wifi_hs_chnl;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+ u8 ap_num = 0;
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cli_buf);
+
+ if (btcoexist->manual_control) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========[Under Manual Control]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+
+ if (!board_info->bt_exist) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cli_buf);
+ return;
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified)? "Yes":"No"),
+ stack_info->hci_version);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ fw_ver/ PatchVer",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+ &wifi_dot11_chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0],
+ coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d",
+ "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ &wifi_traffic_dir);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g? "5G":"2.4G"),
+ ((BTC_WIFI_BW_LEGACY == wifi_bw)? "Legacy":
+ (((BTC_WIFI_BW_HT40 == wifi_bw)? "HT40":"HT20"))),
+ ((!wifi_busy)? "idle":
+ ((BTC_WIFI_TRAFFIC_TX ==wifi_traffic_dir)?\
+ "uplink":"downlink")));
+ CL_PRINTF(cli_buf);
+
+
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP",
+ bt_link_info->sco_exist, bt_link_info->hid_exist,
+ bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext&BIT0)? "Basic rate":"EDR rate");
+ CL_PRINTF(cli_buf);
+
+ for (i=0; i<BT_INFO_SRC_8723B_2ANT_MAX; i++) {
+ if (coex_sta->bt_info_c2h_cnt[i]) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x "
+ "%02x %02x %02x %02x(%d)",
+ glbt_info_src_8723b_2ant[i], \
+ coex_sta->bt_info_c2h[i][0],
+ coex_sta->bt_info_c2h[i][1],
+ coex_sta->bt_info_c2h[i][2],
+ coex_sta->bt_info_c2h[i][3],
+ coex_sta->bt_info_c2h[i][4],
+ coex_sta->bt_info_c2h[i][5],
+ coex_sta->bt_info_c2h[i][6],
+ coex_sta->bt_info_c2h_cnt[i]);
+ CL_PRINTF(cli_buf);
+ }
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips? "IPS ON":"IPS OFF")),
+ ((coex_sta->under_lps? "LPS ON":"LPS OFF")));
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ /* Sw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ CL_PRINTF(cli_buf);
+
+ /* Fw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Fw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ ps_tdma_case = coex_dm->cur_ps_tdma;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para[0],
+ coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+ coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+ coex_dm->cur_ignore_wlan_act);
+ CL_PRINTF(cli_buf);
+
+ /* Hw setting */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Hw setting]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x778/0x880[29:25]", u8tmp[0],
+ (u32tmp[0]&0x3e000000) >> 25);
+ CL_PRINTF(cli_buf);
+
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0]&0x20)>> 5), u8tmp[1]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3);
+ CL_PRINTF(cli_buf);
+
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8)>>3), u8tmp[1],
+ ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8);
+ u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
+
+ fa_ofdm = ((u32tmp[0]&0xffff0000) >> 16) +
+ ((u32tmp[1]&0xffff0000) >> 16) +
+ (u32tmp[1] & 0xffff) +
+ (u32tmp[2] & 0xffff) +
+ ((u32tmp[3]&0xffff0000) >> 16) +
+ (u32tmp[3] & 0xffff) ;
+ fa_cck = (u8tmp[0] << 8) + u8tmp[1];
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "OFDM-CCA/OFDM-FA/CCK-FA", \
+ u32tmp[0]&0xffff, fa_ofdm, fa_cck);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x770(high-pri rx/tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
+ CL_PRINTF(cli_buf);
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 1)
+ halbtc8723b2ant_monitor_bt_ctr(btcoexist);
+#endif
+ btcoexist->btc_disp_dbg_msg(btcoexist,
+ BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_IPS_ENTER == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
+ coex_sta->under_ips = true;
+ halbtc8723b2ant_wifioff_hwcfg(btcoexist);
+ halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ halbtc8723b2ant_coex_alloff(btcoexist);
+ } else if (BTC_IPS_LEAVE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
+ coex_sta->under_ips = false;
+ ex_halbtc8723b2ant_init_hwconfig(btcoexist);
+ halbtc8723b2ant_init_coex_dm(btcoexist);
+ halbtc8723b2ant_query_bt_info(btcoexist);
+ }
+}
+
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_LPS_ENABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
+ coex_sta->under_lps = true;
+ } else if (BTC_LPS_DISABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
+ coex_sta->under_lps = false;
+ }
+}
+
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_SCAN_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
+ else if (BTC_SCAN_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
+}
+
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_ASSOCIATE_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
+ else if (BTC_ASSOCIATE_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
+}
+
+void ex_halbtc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ u8 h2c_parameter[3] ={0};
+ u32 wifi_bw;
+ u8 wifi_central_chnl;
+
+ if (BTC_MEDIA_CONNECT == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
+
+ /* only 2.4G we need to inform bt the chnl mask */
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl);
+ if ((BTC_MEDIA_CONNECT == type) &&
+ (wifi_central_chnl <= 14)) {
+ h2c_parameter[0] = 0x1;
+ h2c_parameter[1] = wifi_central_chnl;
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
+
+ coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+ coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+ coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ if (type == BTC_PACKET_DHCP)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
+}
+
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length)
+{
+ u8 btInfo = 0;
+ u8 i, rsp_source = 0;
+ bool bt_busy = false, limited_dig = false;
+ bool wifi_connected = false;
+
+ coex_sta->c2h_bt_info_req_sent = false;
+
+ rsp_source = tmpbuf[0]&0xf;
+ if(rsp_source >= BT_INFO_SRC_8723B_2ANT_MAX)
+ rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
+ coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rsp_source, length);
+ for (i = 0; i < length; i++) {
+ coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
+ if (i == 1)
+ btInfo = tmpbuf[i];
+ if (i == length-1)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x]\n", tmpbuf[i]);
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x, ", tmpbuf[i]);
+ }
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "return for Manual CTRL<===\n");
+ return;
+ }
+
+ if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) {
+ coex_sta->bt_retry_cnt = /* [3:0]*/
+ coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
+
+ coex_sta->bt_rssi =
+ coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
+
+ coex_sta->bt_info_ext =
+ coex_sta->bt_info_c2h[rsp_source][4];
+
+ /* Here we need to resend some wifi info to BT
+ because bt is reset and loss of the info.*/
+ if ((coex_sta->bt_info_ext & BIT1)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check,"
+ " send wifi BW&Chnl to BT!!\n");
+ btcoexist->btc_get(btcoexist,BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ if (wifi_connected)
+ ex_halbtc8723b2ant_media_status_notify(
+ btcoexist,
+ BTC_MEDIA_CONNECT);
+ else
+ ex_halbtc8723b2ant_media_status_notify(
+ btcoexist,
+ BTC_MEDIA_DISCONNECT);
+ }
+
+ if ((coex_sta->bt_info_ext & BIT3)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, "
+ "set BT NOT to ignore Wlan active!!\n");
+ halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
+ false);
+ } else {
+ /* BT already NOT ignore Wlan active, do nothing here.*/
+ }
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+ if ((coex_sta->bt_info_ext & BIT4)) {
+ /* BT auto report already enabled, do nothing*/
+ } else {
+ halbtc8723b2ant_bt_auto_report(btcoexist, FORCE_EXEC,
+ true);
+ }
+#endif
+ }
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan*/
+ if (btInfo & BT_INFO_8723B_2ANT_B_INQ_PAGE)
+ coex_sta->c2h_bt_inquiry_page = true;
+ else
+ coex_sta->c2h_bt_inquiry_page = false;
+
+ /* set link exist status*/
+ if (!(btInfo & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+ coex_sta->bt_link_exist = false;
+ coex_sta->pan_exist = false;
+ coex_sta->a2dp_exist = false;
+ coex_sta->hid_exist = false;
+ coex_sta->sco_exist = false;
+ } else {// connection exists
+ coex_sta->bt_link_exist = true;
+ if (btInfo & BT_INFO_8723B_2ANT_B_FTP)
+ coex_sta->pan_exist = true;
+ else
+ coex_sta->pan_exist = false;
+ if (btInfo & BT_INFO_8723B_2ANT_B_A2DP)
+ coex_sta->a2dp_exist = true;
+ else
+ coex_sta->a2dp_exist = false;
+ if (btInfo & BT_INFO_8723B_2ANT_B_HID)
+ coex_sta->hid_exist = true;
+ else
+ coex_sta->hid_exist = false;
+ if (btInfo & BT_INFO_8723B_2ANT_B_SCO_ESCO)
+ coex_sta->sco_exist = true;
+ else
+ coex_sta->sco_exist = false;
+ }
+
+ halbtc8723b2ant_update_bt_link_info(btcoexist);
+
+ if (!(btInfo & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT Non-Connected idle!!!\n");
+ /* connection exists but no busy */
+ } else if (btInfo == BT_INFO_8723B_2ANT_B_CONNECTION) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ } else if ((btInfo & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
+ (btInfo & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
+ coex_dm->bt_status =
+ BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ } else if (btInfo&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
+ coex_dm->bt_status =
+ BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ } else {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT Non-Defined state!!!\n");
+ }
+
+ if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+ bt_busy = true;
+ limited_dig = true;
+ } else {
+ bt_busy = false;
+ limited_dig = false;
+ }
+
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+ coex_dm->limited_dig = limited_dig;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+ halbtc8723b2ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+ halbtc8723b2ant_wifioff_hwcfg(btcoexist);
+ halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ ex_halbtc8723b2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ static u8 dis_ver_info_cnt = 0;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], =========================="
+ "Periodical===========================\n");
+
+ if (dis_ver_info_cnt <= 5) {
+ dis_ver_info_cnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], ****************************"
+ "************************************\n");
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ "
+ "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num,
+ board_info->btdm_ant_num, board_info->btdm_ant_pos);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ ((stack_info->profile_notified)? "Yes":"No"),
+ stack_info->hci_version);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+ &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], CoexVer/ fw_ver/ PatchVer = "
+ "%d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], *****************************"
+ "***********************************\n");
+ }
+
+#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+ halbtc8723b2ant_query_bt_info(btcoexist);
+ halbtc8723b2ant_monitor_bt_ctr(btcoexist);
+ halbtc8723b2ant_monitor_bt_enable_disable(btcoexist);
+#else
+ if (halbtc8723b2ant_is_wifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust)
+ halbtc8723b2ant_run_coexist_mechanism(btcoexist);
+#endif
+}
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h
new file mode 100644
index 000000000000..fa3784aa70cd
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h
@@ -0,0 +1,145 @@
+/************************************************************************
+ * The following is for 8723B 2Ant BT Co-exist definition
+ ************************************************************************/
+#define BT_AUTO_REPORT_ONLY_8723B_2ANT 1
+
+
+#define BT_INFO_8723B_2ANT_B_FTP BIT7
+#define BT_INFO_8723B_2ANT_B_A2DP BIT6
+#define BT_INFO_8723B_2ANT_B_HID BIT5
+#define BT_INFO_8723B_2ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723B_2ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723B_2ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723B_2ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723B_2ANT_B_CONNECTION BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT 2
+
+typedef enum _BT_INFO_SRC_8723B_2ANT{
+ BT_INFO_SRC_8723B_2ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723B_2ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723B_2ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723B_2ANT_MAX
+}BT_INFO_SRC_8723B_2ANT,*PBT_INFO_SRC_8723B_2ANT;
+
+typedef enum _BT_8723B_2ANT_BT_STATUS{
+ BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723B_2ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8723B_2ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8723B_2ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8723B_2ANT_BT_STATUS_MAX
+}BT_8723B_2ANT_BT_STATUS,*PBT_8723B_2ANT_BT_STATUS;
+
+typedef enum _BT_8723B_2ANT_COEX_ALGO{
+ BT_8723B_2ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723B_2ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723B_2ANT_COEX_ALGO_HID = 0x2,
+ BT_8723B_2ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8723B_2ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8723B_2ANT_COEX_ALGO_MAX = 0xb,
+}BT_8723B_2ANT_COEX_ALGO,*PBT_8723B_2ANT_COEX_ALGO;
+
+struct coex_dm_8723b_2ant{
+ /* fw mechanism */
+ bool pre_dec_bt_pwr;
+ bool cur_dec_bt_pwr;
+ u8 pre_fw_dac_swing_lvl;
+ u8 cur_fw_dac_swing_lvl;
+ bool cur_ignore_wlan_act;
+ bool pre_ignore_wlan_act;
+ u8 pre_ps_tdma;
+ u8 cur_ps_tdma;
+ u8 ps_tdma_para[5];
+ u8 ps_tdma_du_adj_type;
+ bool reset_tdma_adjust;
+ bool auto_tdma_adjust;
+ bool pre_ps_tdma_on;
+ bool cur_ps_tdma_on;
+ bool pre_bt_auto_report;
+ bool cur_bt_auto_report;
+
+ /* sw mechanism */
+ bool pre_rf_rx_lpf_shrink;
+ bool cur_rf_rx_lpf_shrink;
+ u32 bt_rf0x1e_backup;
+ bool pre_low_penalty_ra;
+ bool cur_low_penalty_ra;
+ bool pre_dac_swing_on;
+ u32 pre_dac_swing_lvl;
+ bool cur_dac_swing_on;
+ u32 cur_dac_swing_lvl;
+ bool pre_adc_back_off;
+ bool cur_adc_back_off;
+ bool pre_agc_table_en;
+ bool cur_agc_table_en;
+ u32 pre_val0x6c0;
+ u32 cur_val0x6c0;
+ u32 pre_val0x6c4;
+ u32 cur_val0x6c4;
+ u32 pre_val0x6c8;
+ u32 cur_val0x6c8;
+ u8 pre_val0x6cc;
+ u8 cur_val0x6cc;
+ bool limited_dig;
+
+ /* algorithm related */
+ u8 pre_algorithm;
+ u8 cur_algorithm;
+ u8 bt_status;
+ u8 wifi_chnl_info[3];
+
+ bool need_recover_0x948;
+ u16 backup_0x948;
+};
+
+struct coex_sta_8723b_2ant{
+ bool bt_link_exist;
+ bool sco_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ bool pan_exist;
+
+ bool under_lps;
+ bool under_ips;
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 bt_rssi;
+ u8 pre_bt_rssi_state;
+ u8 pre_wifi_rssi_state[4];
+ bool c2h_bt_info_req_sent;
+ u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10];
+ u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX];
+ bool c2h_bt_inquiry_page;
+ u8 bt_retry_cnt;
+ u8 bt_info_ext;
+};
+
+/*********************************************************************
+ * The following is interface which will notify coex module.
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length);
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_periodical(struct btc_coexist * btcoexist);
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c
new file mode 100644
index 000000000000..c4e83773ec98
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c
@@ -0,0 +1,1171 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "halbt_precomp.h"
+
+/*#if(BT_30_SUPPORT == 1)*/
+#if 1
+/***********************************************
+ * Global variables
+ ***********************************************/
+const char *const bt_profile_string[]={
+ "NONE",
+ "A2DP",
+ "PAN",
+ "HID",
+ "SCO",
+};
+
+const char *const bt_spec_string[]={
+ "1.0b",
+ "1.1",
+ "1.2",
+ "2.0+EDR",
+ "2.1+EDR",
+ "3.0+HS",
+ "4.0",
+};
+
+const char *const bt_link_role_string[]={
+ "Master",
+ "Slave",
+};
+
+const char *const h2c_state_string[]={
+ "successful",
+ "h2c busy",
+ "rf off",
+ "fw not read",
+};
+
+const char *const io_state_string[]={
+ "IO_STATUS_SUCCESS",
+ "IO_STATUS_FAIL_CANNOT_IO",
+ "IO_STATUS_FAIL_RF_OFF",
+ "IO_STATUS_FAIL_FW_READ_CLEAR_TIMEOUT",
+ "IO_STATUS_FAIL_WAIT_IO_EVENT_TIMEOUT",
+ "IO_STATUS_INVALID_LEN",
+ "IO_STATUS_IO_IDLE_QUEUE_EMPTY",
+ "IO_STATUS_IO_INSERT_WAIT_QUEUE_FAIL",
+ "IO_STATUS_UNKNOWN_FAIL",
+ "IO_STATUS_WRONG_LEVEL",
+ "IO_STATUS_H2C_STOPPED",
+};
+
+struct btc_coexist gl_bt_coexist;
+
+u32 btc_dbg_type[BTC_MSG_MAX];
+u8 btc_dbg_buf[100];
+
+/***************************************************
+ * Debug related function
+ ***************************************************/
+bool halbtc_is_bt_coexist_available(struct btc_coexist *btcoexist)
+{
+ if (!btcoexist->binded || NULL == btcoexist->adapter)
+ return false;
+
+ return true;
+}
+
+bool halbtc_is_wifi_busy(struct rtl_priv *rtlpriv)
+{
+
+ if (rtlpriv->link_info.b_busytraffic)
+ return true;
+ else
+ return false;
+}
+
+
+void halbtc_dbg_init(void)
+{
+ u8 i;
+
+ for (i = 0; i < BTC_MSG_MAX; i++)
+ btc_dbg_type[i] = 0;
+
+ btc_dbg_type[BTC_MSG_INTERFACE] = \
+// INTF_INIT |
+// INTF_NOTIFY |
+ 0;
+
+ btc_dbg_type[BTC_MSG_ALGORITHM] = \
+// ALGO_BT_RSSI_STATE |
+// ALGO_WIFI_RSSI_STATE |
+// ALGO_BT_MONITOR |
+// ALGO_TRACE |
+// ALGO_TRACE_FW |
+// ALGO_TRACE_FW_DETAIL |
+// ALGO_TRACE_FW_EXEC |
+// ALGO_TRACE_SW |
+// ALGO_TRACE_SW_DETAIL |
+// ALGO_TRACE_SW_EXEC |
+ 0;
+}
+
+bool halbtc_is_hw_mailbox_exist(struct btc_coexist *btcoexist)
+{
+ return true;
+}
+
+bool halbtc_is_bt40(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool is_ht40 = true;
+ enum ht_channel_width bw = rtlphy->current_chan_bw;
+
+
+ if (bw == HT_CHANNEL_WIDTH_20)
+ is_ht40 = false;
+ else if (bw == HT_CHANNEL_WIDTH_20_40)
+ is_ht40 = true;
+
+ return is_ht40;
+}
+
+bool halbtc_legacy(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ bool is_legacy = false;
+
+ if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
+ is_legacy = true;
+
+ return is_legacy;
+}
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+
+ if (rtlpriv->link_info.b_tx_busy_traffic)
+ return true;
+ else
+ return false;
+}
+
+u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv =
+ (struct rtl_priv *)btcoexist->adapter;
+ u32 wifi_bw = BTC_WIFI_BW_HT20;
+
+ if (halbtc_is_bt40(rtlpriv)){
+ wifi_bw = BTC_WIFI_BW_HT40;
+ } else {
+ if(halbtc_legacy(rtlpriv))
+ wifi_bw = BTC_WIFI_BW_LEGACY;
+ else
+ wifi_bw = BTC_WIFI_BW_HT20;
+ }
+ return wifi_bw;
+}
+
+u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 chnl = 1;
+
+
+ if (rtlphy->current_channel != 0)
+ chnl = rtlphy->current_channel;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "halbtc_get_wifi_central_chnl:%d\n",chnl);
+ return chnl;
+}
+
+void halbtc_leave_lps(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv;
+ struct rtl_ps_ctl *ppsc;
+ bool ap_enable = false;
+
+ rtlpriv = btcoexist->adapter;
+ ppsc = rtl_psc(rtlpriv);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+
+ if (ap_enable) {
+ printk("halbtc_leave_lps()<--dont leave lps under AP mode\n");
+ return;
+ }
+
+ btcoexist->bt_info.bt_ctrl_lps = true;
+ btcoexist->bt_info.bt_lps_on = false;
+}
+
+void halbtc_enter_lps(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv;
+ struct rtl_ps_ctl *ppsc;
+ bool ap_enable = false;
+
+ rtlpriv = btcoexist->adapter;
+ ppsc = rtl_psc(rtlpriv);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+
+ if (ap_enable) {
+ printk("halbtc_enter_lps()<--dont enter lps under AP mode\n");
+ return;
+ }
+
+ btcoexist->bt_info.bt_ctrl_lps = true;
+ btcoexist->bt_info.bt_lps_on = false;
+}
+
+void halbtc_normal_lps(struct btc_coexist *btcoexist)
+{
+ if (btcoexist->bt_info.bt_ctrl_lps) {
+ btcoexist->bt_info.bt_lps_on = false;
+ btcoexist->bt_info.bt_ctrl_lps = false;
+ }
+
+}
+
+void halbtc_leave_low_power(void)
+{
+}
+
+void halbtc_nomal_low_power(void)
+{
+}
+
+void halbtc_disable_low_power(void)
+{
+}
+
+void halbtc_aggregation_check(void)
+{
+}
+
+
+u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
+{
+ return 0;
+}
+
+s32 halbtc_get_wifi_rssi(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ s32 undecorated_smoothed_pwdb = 0;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ else /* associated entry pwdb */
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ return undecorated_smoothed_pwdb;
+}
+
+bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ bool *bool_tmp = (bool*)out_buf;
+ int *s32_tmp = (int*)out_buf;
+ u32 *u32_tmp = (u32*)out_buf;
+ u8 *u8_tmp = (u8*)out_buf;
+ bool tmp = false;
+
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return false;
+
+
+ switch (get_type){
+ case BTC_GET_BL_HS_OPERATION:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_HS_CONNECTING:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_CONNECTED:
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ tmp = true;
+
+ *bool_tmp = tmp;
+ break;
+ case BTC_GET_BL_WIFI_BUSY:
+ if(halbtc_is_wifi_busy(rtlpriv))
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_SCAN:
+ if (mac->act_scanning == true)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_LINK:
+ if (mac->link_state == MAC80211_LINKING)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_ROAM: /*TODO*/
+ if (mac->link_state == MAC80211_LINKING)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_4_WAY_PROGRESS: /*TODO*/
+ *bool_tmp = false;
+
+ break;
+ case BTC_GET_BL_WIFI_UNDER_5G:
+ *bool_tmp = false; /*TODO*/
+
+ case BTC_GET_BL_WIFI_DHCP: /*TODO*/
+ break;
+ case BTC_GET_BL_WIFI_SOFTAP_IDLE:
+ *bool_tmp = true;
+ break;
+ case BTC_GET_BL_WIFI_SOFTAP_LINKING:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_IN_EARLY_SUSPEND:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
+ if (NO_ENCRYPTION == rtlpriv->sec.pairwise_enc_algorithm)
+ *bool_tmp = false;
+ else
+ *bool_tmp = true;
+ break;
+ case BTC_GET_BL_WIFI_UNDER_B_MODE:
+ *bool_tmp = false; /*TODO*/
+ break;
+ case BTC_GET_BL_EXT_SWITCH:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_S4_WIFI_RSSI:
+ *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+ break;
+ case BTC_GET_S4_HS_RSSI: /*TODO*/
+ *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+ break;
+ case BTC_GET_U4_WIFI_BW:
+ *u32_tmp = halbtc_get_wifi_bw(btcoexist);
+ break;
+ case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
+ if (halbtc_is_wifi_uplink(rtlpriv))
+ *u32_tmp = BTC_WIFI_TRAFFIC_TX;
+ else
+ *u32_tmp = BTC_WIFI_TRAFFIC_RX;
+ break;
+ case BTC_GET_U4_WIFI_FW_VER:
+ *u32_tmp = rtlhal->fw_version;
+ break;
+ case BTC_GET_U4_BT_PATCH_VER:
+ *u32_tmp = halbtc_get_bt_patch_version(btcoexist);
+ break;
+ case BTC_GET_U1_WIFI_DOT11_CHNL:
+ *u8_tmp = rtlphy->current_channel;
+ break;
+ case BTC_GET_U1_WIFI_CENTRAL_CHNL:
+ *u8_tmp = halbtc_get_wifi_central_chnl(btcoexist);
+ break;
+ case BTC_GET_U1_WIFI_HS_CHNL:
+ *u8_tmp = 1;/* BT_OperateChnl(rtlpriv); */
+ break;
+ case BTC_GET_U1_MAC_PHY_MODE:
+ *u8_tmp = BTC_MP_UNKNOWN;
+ break;
+ case BTC_GET_U1_AP_NUM:
+ /* driver don't know AP num in Linux,
+ * So, the return value here is not right */
+ *u8_tmp = 1;/* pDefMgntInfo->NumBssDesc4Query; */
+ break;
+
+ /************* 1Ant **************/
+ case BTC_GET_U1_LPS_MODE:
+ *u8_tmp = btcoexist->pwr_mode_val[0];
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+ bool *bool_tmp = (bool *)in_buf;
+ u8 *u8_tmp = (u8 *)in_buf;
+ u32 *u32_tmp = (u32 *)in_buf;
+
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return false;
+
+ switch (set_type) {
+ /* set some bool type variables. */
+ case BTC_SET_BL_BT_DISABLE:
+ btcoexist->bt_info.bt_disabled = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_TRAFFIC_BUSY:
+ btcoexist->bt_info.bt_busy = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_LIMITED_DIG:
+ btcoexist->bt_info.limited_dig = *bool_tmp;
+ break;
+ case BTC_SET_BL_FORCE_TO_ROAM:
+ btcoexist->bt_info.force_to_roam = *bool_tmp;
+ break;
+ case BTC_SET_BL_TO_REJ_AP_AGG_PKT:
+ btcoexist->bt_info.reject_agg_pkt = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_CTRL_AGG_SIZE:
+ btcoexist->bt_info.b_bt_ctrl_buf_size = *bool_tmp;
+ break;
+ case BTC_SET_BL_INC_SCAN_DEV_NUM:
+ btcoexist->bt_info.increase_scan_dev_num = *bool_tmp;
+ break;
+ /* set some u1Byte type variables. */
+ case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON:
+ btcoexist->bt_info.rssi_adjust_for_agc_table_on = *u8_tmp;
+ break;
+ case BTC_SET_U1_AGG_BUF_SIZE:
+ btcoexist->bt_info.agg_buf_size = *u8_tmp;
+ break;
+ /* the following are some action which will be triggered */
+ case BTC_SET_ACT_GET_BT_RSSI:
+ /*BTHCI_SendGetBtRssiEvent(rtlpriv);*/
+ break;
+ case BTC_SET_ACT_AGGREGATE_CTRL:
+ halbtc_aggregation_check();
+ break;
+
+ /* 1Ant */
+ case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE:
+ btcoexist->bt_info.rssi_adjust_for_1ant_coex_type = *u8_tmp;
+ break;
+ case BTC_SET_UI_SCAN_SIG_COMPENSATION:
+ /* rtlpriv->mlmepriv.scan_compensation = *u8_tmp; */
+ break;
+ case BTC_SET_U1_1ANT_LPS:
+ btcoexist->bt_info.lps_1ant = *u8_tmp;
+ break;
+ case BTC_SET_U1_1ANT_RPWM:
+ btcoexist->bt_info.rpwm_1ant = *u8_tmp;
+ break;
+ /* the following are some action which will be triggered */
+ case BTC_SET_ACT_LEAVE_LPS:
+ halbtc_leave_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_ENTER_LPS:
+ halbtc_enter_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_NORMAL_LPS:
+ halbtc_normal_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_DISABLE_LOW_POWER:
+ halbtc_disable_low_power();
+ break;
+ case BTC_SET_ACT_UPDATE_ra_mask:
+ btcoexist->bt_info.ra_mask = *u32_tmp;
+ break;
+ case BTC_SET_ACT_SEND_MIMO_PS:
+ break;
+ case BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT:
+ btcoexist->bt_info.force_exec_pwr_cmd_cnt++;
+ break;
+ case BTC_SET_ACT_CTRL_BT_INFO: /*wait for 8812/8821*/
+ break;
+ case BTC_SET_ACT_CTRL_BT_COEX:
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+void halbtc_display_coex_statistics(struct btc_coexist *btcoexist)
+{
+}
+
+void halbtc_display_bt_link_info(struct btc_coexist *btcoexist)
+{
+}
+
+void halbtc_display_bt_fw_info(struct btc_coexist *btcoexist)
+{
+}
+
+void halbtc_display_fw_pwr_mode_cmd(struct btc_coexist *btcoexist)
+{
+}
+
+/************************************************************
+ * IO related function
+ ************************************************************/
+u8 halbtc_read_1byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_byte(rtlpriv, reg_addr);
+}
+
+
+u16 halbtc_read_2byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_word(rtlpriv, reg_addr);
+}
+
+
+u32 halbtc_read_4byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_dword(rtlpriv, reg_addr);
+}
+
+
+void halbtc_write_1byte(void *bt_context, u32 reg_addr, u8 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+void halbtc_bitmask_write_1byte(void *bt_context, u32 reg_addr,
+ u8 bit_mask, u8 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 original_value, bit_shift = 0;
+ u8 i;
+
+ if (bit_mask != MASKDWORD) {/*if not "double word" write*/
+ original_value = rtl_read_byte(rtlpriv, reg_addr);
+ for (i=0; i<=7; i++) {
+ if((bit_mask>>i)&0x1)
+ break;
+ }
+ bit_shift = i;
+ data = (original_value & (~bit_mask)) |
+ ((data << bit_shift) & bit_mask);
+ }
+ rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+
+void halbtc_write_2byte(void *bt_context, u32 reg_addr, u16 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_word(rtlpriv, reg_addr, data);
+}
+
+
+void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
+{
+ struct btc_coexist *btcoexist =
+ (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_dword(rtlpriv, reg_addr, data);
+}
+
+
+void halbtc_set_macreg(void *bt_context, u32 reg_addr, u32 bit_mask, u32 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
+}
+
+
+u32 halbtc_get_macreg(void *bt_context, u32 reg_addr, u32 bit_mask)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
+}
+
+
+void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask, u32 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
+}
+
+
+u32 halbtc_get_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_get_bbreg(rtlpriv->mac80211.hw,reg_addr, bit_mask);
+}
+
+
+void halbtc_set_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask, u32 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_set_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask, data);
+}
+
+
+u32 halbtc_get_rfreg(void *bt_context, u8 rf_path, u32 reg_addr, u32 bit_mask)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_get_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask);
+}
+
+
+void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id,
+ u32 cmd_len, u8 *cmd_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, element_id,
+ cmd_len, cmd_buf);
+}
+
+void halbtc_display_dbg_msg(void *bt_context, u8 disp_type)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ switch (disp_type) {
+ case BTC_DBG_DISP_COEX_STATISTICS:
+ halbtc_display_coex_statistics(btcoexist);
+ break;
+ case BTC_DBG_DISP_BT_LINK_INFO:
+ halbtc_display_bt_link_info(btcoexist);
+ break;
+ case BTC_DBG_DISP_BT_FW_VER:
+ halbtc_display_bt_fw_info(btcoexist);
+ break;
+ case BTC_DBG_DISP_FW_PWR_MODE_CMD:
+ halbtc_display_fw_pwr_mode_cmd(btcoexist);
+ break;
+ default:
+ break;
+ }
+}
+
+bool halbtc_under_ips(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ enum rf_pwrstate rtstate;
+
+ if (ppsc->b_inactiveps) {
+ rtstate = ppsc->rfpwr_state;
+
+ if (rtstate != ERFON &&
+ ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*****************************************************************
+ * Extern functions called by other module
+ *****************************************************************/
+bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ btcoexist->statistics.cnt_bind++;
+
+ halbtc_dbg_init();
+
+ if (btcoexist->binded)
+ return false;
+ else
+ btcoexist->binded = true;
+
+ btcoexist->chip_interface = BTC_INTF_UNKNOWN;
+
+ if (NULL == btcoexist->adapter)
+ btcoexist->adapter = adapter;
+
+ btcoexist->stack_info.profile_notified = false;
+
+ btcoexist->btc_read_1byte = halbtc_read_1byte;
+ btcoexist->btc_write_1byte = halbtc_write_1byte;
+ btcoexist->btc_write_1byte_bitmask = halbtc_bitmask_write_1byte;
+ btcoexist->btc_read_2byte = halbtc_read_2byte;
+ btcoexist->btc_write_2byte = halbtc_write_2byte;
+ btcoexist->btc_read_4byte = halbtc_read_4byte;
+ btcoexist->btc_write_4byte = halbtc_write_4byte;
+
+ btcoexist->btc_set_bb_reg = halbtc_set_bbreg;
+ btcoexist->btc_get_bb_reg = halbtc_get_bbreg;
+
+ btcoexist->btc_set_rf_reg = halbtc_set_rfreg;
+ btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
+
+ btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
+ btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
+
+ btcoexist->btc_get = halbtc_get;
+ btcoexist->btc_set = halbtc_set;
+
+ btcoexist->cli_buf = &btc_dbg_buf[0];
+
+ btcoexist->bt_info.b_bt_ctrl_buf_size = false;
+ btcoexist->bt_info.agg_buf_size = 5;
+
+ btcoexist->bt_info.increase_scan_dev_num = false;
+ return true;
+}
+
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->statistics.cnt_init_hw_config++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_init_hwconfig(btcoexist);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_init_hwconfig(btcoexist);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_init_hwconfig(btcoexist);
+ }
+
+}
+
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->statistics.cnt_init_coex_dm++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_init_coex_dm(btcoexist);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_init_coex_dm(btcoexist);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_init_coex_dm(btcoexist);
+ }
+
+ btcoexist->initilized = true;
+}
+
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 ips_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_ips_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (ERFOFF == type)
+ ips_type = BTC_IPS_ENTER;
+ else
+ ips_type = BTC_IPS_LEAVE;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_ips_notify(btcoexist, ips_type);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_ips_notify(btcoexist, ips_type);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_ips_notify(btcoexist, ips_type);
+ }
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 lps_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_lps_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (EACTIVE == type)
+ lps_type = BTC_LPS_DISABLE;
+ else
+ lps_type = BTC_LPS_ENABLE;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_lps_notify(btcoexist, lps_type);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_lps_notify(btcoexist, lps_type);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_lps_notify(btcoexist, lps_type);
+ }
+}
+
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 scan_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_scan_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (type)
+ scan_type = BTC_SCAN_START;
+ else
+ scan_type = BTC_SCAN_FINISH;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_scan_notify(btcoexist, scan_type);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_scan_notify(btcoexist, scan_type);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_scan_notify(btcoexist, scan_type);
+ }
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 asso_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_connect_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (action)
+ asso_type = BTC_ASSOCIATE_START;
+ else
+ asso_type = BTC_ASSOCIATE_FINISH;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_connect_notify(btcoexist, asso_type);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_connect_notify(btcoexist, asso_type);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_connect_notify(btcoexist, asso_type);
+ }
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
+ enum rt_media_status media_status)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 status;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_media_status_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (RT_MEDIA_CONNECT == media_status)
+ status = BTC_MEDIA_CONNECT;
+ else
+ status = BTC_MEDIA_DISCONNECT;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_media_status_notify(btcoexist, status);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_media_status_notify(btcoexist, status);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_media_status_notify(btcoexist, status);
+ }
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type)
+{
+ u8 packet_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_special_packet_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ /*if(PACKET_DHCP == pkt_type)*/
+ packet_type = BTC_PACKET_DHCP;
+ /*else if(PACKET_EAPOL == pkt_type)
+ packet_type = BTC_PACKET_EAPOL;
+ else
+ packet_type = BTC_PACKET_UNKNOWN;*/
+
+ halbtc_leave_low_power();
+
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_special_packet_notify(btcoexist,
+ packet_type);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_special_packet_notify(btcoexist,
+ packet_type);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_bt_info_notify++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_bt_info_notify(btcoexist, tmp_buf, length);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ // ex_halbtc8192e2ant_bt_info_notify(btcoexist, tmp_buf, length);
+ }
+}
+
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ u8 stack_op_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_stack_operation_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ stack_op_type = BTC_STACK_OP_NONE;
+}
+
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_halt_notify(btcoexist);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_halt_notify(btcoexist);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_halt_notify(btcoexist);
+ }
+}
+
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+}
+
+void exhalbtc_periodical(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_periodical++;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_periodical(btcoexist);
+ else if(btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_periodical(btcoexist);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) {
+ ex_halbtc8192e2ant_periodical(btcoexist);
+ }
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist,
+ u8 code, u8 len, u8 *data)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_dbg_ctrl++;
+}
+
+void exhalbtc_stack_update_profile_info(void)
+{
+}
+
+void exhalbtc_update_min_bt_rssi(char bt_rssi)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->stack_info.min_bt_rssi = bt_rssi;
+}
+
+
+void exhalbtc_set_hci_version(u16 hci_version)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->stack_info.hci_version = hci_version;
+}
+
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->bt_info.bt_real_fw_ver = bt_patch_version;
+ btcoexist->bt_info.bt_hci_ver = bt_hci_version;
+}
+
+void exhalbtc_set_bt_exist(bool bt_exist)
+{
+ gl_bt_coexist.board_info.bt_exist = bt_exist;
+}
+
+void exhalbtc_set_chip_type(u8 chip_type)
+{
+ switch (chip_type) {
+ default:
+ case BT_2WIRE:
+ case BT_ISSC_3WIRE:
+ case BT_ACCEL:
+ case BT_RTL8756:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF;
+ break;
+ case BT_CSR_BC4:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
+ break;
+ case BT_CSR_BC8:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
+ break;
+ case BT_RTL8723A:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A;
+ break;
+ case BT_RTL8821A:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821;
+ break;
+ case BT_RTL8723B:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B;
+ break;
+ }
+}
+
+void exhalbtc_set_ant_num(u8 type, u8 ant_num)
+{
+ if (BT_COEX_ANT_TYPE_PG == type) {
+ gl_bt_coexist.board_info.pg_ant_num = ant_num;
+ gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ } else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
+ gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ }
+}
+
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ if (btcoexist->board_info.btdm_ant_num == 2)
+ ex_halbtc8723b2ant_display_coex_info(btcoexist);
+ else if (btcoexist->board_info.btdm_ant_num == 1)
+ ex_halbtc8723b1ant_display_coex_info(btcoexist);
+}
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h
new file mode 100644
index 000000000000..787798e76217
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h
@@ -0,0 +1,549 @@
+#ifndef __HALBTC_OUT_SRC_H__
+#define __HALBTC_OUT_SRC_H__
+
+#include "../wifi.h"
+
+#define NORMAL_EXEC false
+#define FORCE_EXEC true
+
+#define BTC_RF_A RF90_PATH_A
+#define BTC_RF_B RF90_PATH_B
+#define BTC_RF_C RF90_PATH_C
+#define BTC_RF_D RF90_PATH_D
+
+#define BTC_SMSP SINGLEMAC_SINGLEPHY
+#define BTC_DMDP DUALMAC_DUALPHY
+#define BTC_DMSP DUALMAC_SINGLEPHY
+#define BTC_MP_UNKNOWN 0xff
+
+#define IN
+#define OUT
+
+#define BT_TMP_BUF_SIZE 100
+
+#define BT_COEX_ANT_TYPE_PG 0
+#define BT_COEX_ANT_TYPE_ANTDIV 1
+#define BT_COEX_ANT_TYPE_DETECTED 2
+
+#define BTC_MIMO_PS_STATIC 0
+#define BTC_MIMO_PS_DYNAMIC 1
+
+#define BTC_RATE_DISABLE 0
+#define BTC_RATE_ENABLE 1
+
+/* single Antenna definition */
+#define BTC_ANT_PATH_WIFI 0
+#define BTC_ANT_PATH_BT 1
+#define BTC_ANT_PATH_PTA 2
+/* dual Antenna definition */
+#define BTC_ANT_WIFI_AT_MAIN 0
+#define BTC_ANT_WIFI_AT_AUX 1
+/* coupler Antenna definition */
+#define BTC_ANT_WIFI_AT_CPL_MAIN 0
+#define BTC_ANT_WIFI_AT_CPL_AUX 1
+
+enum btc_chip_interface{
+ BTC_INTF_UNKNOWN = 0,
+ BTC_INTF_PCI = 1,
+ BTC_INTF_USB = 2,
+ BTC_INTF_SDIO = 3,
+ BTC_INTF_GSPI = 4,
+ BTC_INTF_MAX
+};
+
+enum btc_chip_type{
+ BTC_CHIP_UNDEF = 0,
+ BTC_CHIP_CSR_BC4 = 1,
+ BTC_CHIP_CSR_BC8 = 2,
+ BTC_CHIP_RTL8723A = 3,
+ BTC_CHIP_RTL8821 = 4,
+ BTC_CHIP_RTL8723B = 5,
+ BTC_CHIP_MAX
+};
+
+enum btc_msg_type{
+ BTC_MSG_INTERFACE = 0x0,
+ BTC_MSG_ALGORITHM = 0x1,
+ BTC_MSG_MAX
+};
+
+extern u32 btc_dbg_type[];
+
+/* following is for BTC_MSG_INTERFACE */
+#define INTF_INIT BIT0
+#define INTF_NOTIFY BIT2
+
+/* following is for BTC_ALGORITHM */
+#define ALGO_BT_RSSI_STATE BIT0
+#define ALGO_WIFI_RSSI_STATE BIT1
+#define ALGO_BT_MONITOR BIT2
+#define ALGO_TRACE BIT3
+#define ALGO_TRACE_FW BIT4
+#define ALGO_TRACE_FW_DETAIL BIT5
+#define ALGO_TRACE_FW_EXEC BIT6
+#define ALGO_TRACE_SW BIT7
+#define ALGO_TRACE_SW_DETAIL BIT8
+#define ALGO_TRACE_SW_EXEC BIT9
+
+
+
+#define CL_SPRINTF snprintf
+#define CL_PRINTF printk
+
+#define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+ printk(printstr, ##__VA_ARGS__); \
+ } \
+ } while(0)
+
+#define BTC_PRINT_F(dbgtype, dbgflag, printstr, ...) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+ printk(KERN_DEBUG "%s: ", __func__); \
+ printk(printstr, ##__VA_ARGS__); \
+ } \
+ } while(0)
+
+#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _ptr) \
+ do { \
+ if(unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \
+ int __i; \
+ u8* __ptr = (u8*)_Ptr; \
+ printk printstr; \
+ for( __i = 0; __i < 6; __i++ ) \
+ printk("%02X%s", __ptr[__i], (__i==5)?"":"-");\
+ printk(KERN_DEBUG "\n"); \
+ }\
+ } while(0)
+
+#define BTC_PRINT_DATA(dbgtype, dbgflag, _titlestring, _hexdata, _hexdatalen) \
+ do { \
+ if(unlikely(btc_dbg_type[dbgtype] & dbgflag) ) { \
+ int __i; \
+ u8 *__ptr = (u8*)_hexdata; \
+ printk(_titlestring); \
+ for( __i = 0; __i < (int)_hexdatalen; __i++ ) { \
+ printk("%02X%s", __ptr[__i], (((__i + 1) % 4) \
+ == 0)?" ":" ");\
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ printk(KERN_DEBUG "\n"); \
+ } \
+ } while(0)
+
+
+#define BTC_RSSI_HIGH(_rssi_) \
+ ((_rssi_==BTC_RSSI_STATE_HIGH || _rssi_==BTC_RSSI_STATE_STAY_HIGH) ? \
+ true : false)
+
+#define BTC_RSSI_MEDIUM(_rssi_) \
+ ((_rssi_==BTC_RSSI_STATE_MEDIUM || _rssi_==BTC_RSSI_STATE_STAY_MEDIUM) \
+ ? true : false)
+
+#define BTC_RSSI_LOW(_rssi_) \
+ ((_rssi_==BTC_RSSI_STATE_LOW || _rssi_==BTC_RSSI_STATE_STAY_LOW) ? \
+ true : false)
+
+
+enum btc_power_save_type {
+ BTC_PS_WIFI_NATIVE = 0,
+ BTC_PS_LPS_ON = 1,
+ BTC_PS_LPS_OFF = 2,
+ BTC_PS_LPS_MAX
+};
+
+struct btc_board_info {
+ /* The following is some board information */
+ u8 bt_chip_type;
+ u8 pg_ant_num; /* pg ant number */
+ u8 btdm_ant_num; /* ant number for btdm */
+ u8 btdm_ant_pos;
+ bool bt_exist;
+};
+
+enum btc_dbg_opcode{
+ BTC_DBG_SET_COEX_NORMAL = 0x0,
+ BTC_DBG_SET_COEX_WIFI_ONLY = 0x1,
+ BTC_DBG_SET_COEX_BT_ONLY = 0x2,
+ BTC_DBG_MAX
+};
+
+enum btc_rssi_state{
+ BTC_RSSI_STATE_HIGH = 0x0,
+ BTC_RSSI_STATE_MEDIUM = 0x1,
+ BTC_RSSI_STATE_LOW = 0x2,
+ BTC_RSSI_STATE_STAY_HIGH = 0x3,
+ BTC_RSSI_STATE_STAY_MEDIUM = 0x4,
+ BTC_RSSI_STATE_STAY_LOW = 0x5,
+ BTC_RSSI_MAX
+};
+
+enum btc_wifi_role{
+ BTC_ROLE_STATION = 0x0,
+ BTC_ROLE_AP = 0x1,
+ BTC_ROLE_IBSS = 0x2,
+ BTC_ROLE_HS_MODE = 0x3,
+ BTC_ROLE_MAX
+};
+
+enum btc_wifi_bw_mode{
+ BTC_WIFI_BW_LEGACY = 0x0,
+ BTC_WIFI_BW_HT20 = 0x1,
+ BTC_WIFI_BW_HT40 = 0x2,
+ BTC_WIFI_BW_MAX
+};
+
+enum btc_wifi_traffic_dir{
+ BTC_WIFI_TRAFFIC_TX = 0x0,
+ BTC_WIFI_TRAFFIC_RX = 0x1,
+ BTC_WIFI_TRAFFIC_MAX
+};
+
+enum btc_wifi_pnp{
+ BTC_WIFI_PNP_WAKE_UP = 0x0,
+ BTC_WIFI_PNP_SLEEP = 0x1,
+ BTC_WIFI_PNP_MAX
+};
+
+
+enum btc_get_type{
+ /* type bool */
+ BTC_GET_BL_HS_OPERATION,
+ BTC_GET_BL_HS_CONNECTING,
+ BTC_GET_BL_WIFI_CONNECTED,
+ BTC_GET_BL_WIFI_BUSY,
+ BTC_GET_BL_WIFI_SCAN,
+ BTC_GET_BL_WIFI_LINK,
+ BTC_GET_BL_WIFI_DHCP,
+ BTC_GET_BL_WIFI_SOFTAP_IDLE,
+ BTC_GET_BL_WIFI_SOFTAP_LINKING,
+ BTC_GET_BL_WIFI_IN_EARLY_SUSPEND,
+ BTC_GET_BL_WIFI_ROAM,
+ BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ BTC_GET_BL_WIFI_UNDER_5G,
+ BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
+ BTC_GET_BL_WIFI_UNDER_B_MODE,
+ BTC_GET_BL_EXT_SWITCH,
+
+ /* type s4Byte */
+ BTC_GET_S4_WIFI_RSSI,
+ BTC_GET_S4_HS_RSSI,
+
+ /* type u32 */
+ BTC_GET_U4_WIFI_BW,
+ BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ BTC_GET_U4_WIFI_FW_VER,
+ BTC_GET_U4_BT_PATCH_VER,
+
+ /* type u1Byte */
+ BTC_GET_U1_WIFI_DOT11_CHNL,
+ BTC_GET_U1_WIFI_CENTRAL_CHNL,
+ BTC_GET_U1_WIFI_HS_CHNL,
+ BTC_GET_U1_MAC_PHY_MODE,
+ BTC_GET_U1_AP_NUM,
+
+ /* for 1Ant */
+ BTC_GET_U1_LPS_MODE,
+ BTC_GET_BL_BT_SCO_BUSY,
+
+ /* for test mode */
+ BTC_GET_DRIVER_TEST_CFG,
+#if 0
+ BTC_GET_U1_LPS,
+ BTC_GET_U1_RPWM,
+#endif
+ BTC_GET_MAX
+};
+
+
+enum btc_set_type{
+ /* type bool */
+ BTC_SET_BL_BT_DISABLE,
+ BTC_SET_BL_BT_TRAFFIC_BUSY,
+ BTC_SET_BL_BT_LIMITED_DIG,
+ BTC_SET_BL_FORCE_TO_ROAM,
+ BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+ BTC_SET_BL_BT_CTRL_AGG_SIZE,
+ BTC_SET_BL_INC_SCAN_DEV_NUM,
+
+ /* type u1Byte */
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
+ BTC_SET_UI_SCAN_SIG_COMPENSATION,
+ BTC_SET_U1_AGG_BUF_SIZE,
+
+ /* type trigger some action */
+ BTC_SET_ACT_GET_BT_RSSI,
+ BTC_SET_ACT_AGGREGATE_CTRL,
+
+ /********* for 1Ant **********/
+ /* type bool */
+ BTC_SET_BL_BT_SCO_BUSY,
+ /* type u1Byte */
+ BTC_SET_U1_1ANT_LPS,
+ BTC_SET_U1_1ANT_RPWM,
+ /* type trigger some action */
+ BTC_SET_ACT_LEAVE_LPS,
+ BTC_SET_ACT_ENTER_LPS,
+ BTC_SET_ACT_NORMAL_LPS,
+ BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ BTC_SET_ACT_UPDATE_ra_mask,
+ BTC_SET_ACT_SEND_MIMO_PS,
+ /* BT Coex related */
+ BTC_SET_ACT_CTRL_BT_INFO,
+ BTC_SET_ACT_CTRL_BT_COEX,
+ /***************************/
+ BTC_SET_MAX
+};
+
+enum btc_dbg_disp_type{
+ BTC_DBG_DISP_COEX_STATISTICS = 0x0,
+ BTC_DBG_DISP_BT_LINK_INFO = 0x1,
+ BTC_DBG_DISP_BT_FW_VER = 0x2,
+ BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
+ BTC_DBG_DISP_MAX
+};
+
+enum btc_notify_type_ips{
+ BTC_IPS_LEAVE = 0x0,
+ BTC_IPS_ENTER = 0x1,
+ BTC_IPS_MAX
+};
+
+enum btc_notify_type_lps{
+ BTC_LPS_DISABLE = 0x0,
+ BTC_LPS_ENABLE = 0x1,
+ BTC_LPS_MAX
+};
+
+enum btc_notify_type_scan{
+ BTC_SCAN_FINISH = 0x0,
+ BTC_SCAN_START = 0x1,
+ BTC_SCAN_MAX
+};
+
+enum btc_notify_type_associate{
+ BTC_ASSOCIATE_FINISH = 0x0,
+ BTC_ASSOCIATE_START = 0x1,
+ BTC_ASSOCIATE_MAX
+};
+
+enum btc_notify_type_media_status{
+ BTC_MEDIA_DISCONNECT = 0x0,
+ BTC_MEDIA_CONNECT = 0x1,
+ BTC_MEDIA_MAX
+};
+
+enum btc_notify_type_special_packet{
+ BTC_PACKET_UNKNOWN = 0x0,
+ BTC_PACKET_DHCP = 0x1,
+ BTC_PACKET_ARP = 0x2,
+ BTC_PACKET_EAPOL = 0x3,
+ BTC_PACKET_MAX
+};
+
+enum btc_notify_type_stack_operation{
+ BTC_STACK_OP_NONE = 0x0,
+ BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1,
+ BTC_STACK_OP_INQ_PAGE_PAIR_FINISH = 0x2,
+ BTC_STACK_OP_MAX
+};
+
+
+typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr);
+
+typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr);
+
+typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr);
+
+typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u8 data);
+
+typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr,
+ u8 bit_mask, u8 data1b);
+
+typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
+
+typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
+
+typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
+ u8 bit_mask, u8 data);
+
+typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask);
+
+typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path,
+ u32 reg_addr, u32 bit_mask);
+
+typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id,
+ u32 cmd_len, u8 *cmd_buffer);
+
+typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
+
+typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
+
+typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
+
+struct btc_bt_info {
+ bool bt_disabled;
+ u8 rssi_adjust_for_agc_table_on;
+ u8 rssi_adjust_for_1ant_coex_type;
+ bool bt_busy;
+ u8 agg_buf_size;
+ bool limited_dig;
+ bool reject_agg_pkt;
+ bool b_bt_ctrl_buf_size;
+ bool increase_scan_dev_num;
+ u16 bt_hci_ver;
+ u16 bt_real_fw_ver;
+ u8 bt_fw_ver;
+
+ /* the following is for 1Ant solution */
+ bool bt_ctrl_lps;
+ bool bt_pwr_save_mode;
+ bool bt_lps_on;
+ bool force_to_roam;
+ u8 force_exec_pwr_cmd_cnt;
+ u8 lps_1ant;
+ u8 rpwm_1ant;
+ u32 ra_mask;
+};
+
+struct btc_stack_info {
+ bool profile_notified;
+ u16 hci_version; /* stack hci version */
+ u8 num_of_link;
+ bool bt_link_exist;
+ bool sco_exist;
+ bool acl_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ u8 num_of_hid;
+ bool pan_exist;
+ bool unknown_acl_exist;
+ char min_bt_rssi;
+};
+
+struct btc_statistics {
+ u32 cnt_bind;
+ u32 cnt_init_hw_config;
+ u32 cnt_init_coex_dm;
+ u32 cnt_ips_notify;
+ u32 cnt_lps_notify;
+ u32 cnt_scan_notify;
+ u32 cnt_connect_notify;
+ u32 cnt_media_status_notify;
+ u32 cnt_special_packet_notify;
+ u32 cnt_bt_info_notify;
+ u32 cnt_periodical;
+ u32 cnt_stack_operation_notify;
+ u32 cnt_dbg_ctrl;
+};
+
+struct btc_bt_link_info {
+ bool bt_link_exist;
+ bool sco_exist;
+ bool sco_only;
+ bool a2dp_exist;
+ bool a2dp_only;
+ bool hid_exist;
+ bool hid_only;
+ bool pan_exist;
+ bool pan_only;
+};
+
+enum btc_antenna_pos {
+ BTC_ANTENNA_AT_MAIN_PORT = 0x1,
+ BTC_ANTENNA_AT_AUX_PORT = 0x2,
+};
+
+struct btc_coexist {
+ /* make sure only one adapter can bind the data context */
+ bool binded;
+ /* default adapter */
+ void *adapter;
+ struct btc_board_info board_info;
+ /* some bt info referenced by non-bt module */
+ struct btc_bt_info bt_info;
+ struct btc_stack_info stack_info;
+ enum btc_chip_interface chip_interface;
+ struct btc_bt_link_info bt_link_info;
+
+ bool initilized;
+ bool stop_coex_dm;
+ bool manual_control;
+ u8 *cli_buf;
+ struct btc_statistics statistics;
+ u8 pwr_mode_val[10];
+
+ /* function pointers
+ * io related */
+ bfp_btc_r1 btc_read_1byte;
+ bfp_btc_w1 btc_write_1byte;
+ bfp_btc_w1_bit_mak btc_write_1byte_bitmask;
+ bfp_btc_r2 btc_read_2byte;
+ bfp_btc_w2 btc_write_2byte;
+ bfp_btc_r4 btc_read_4byte;
+ bfp_btc_w4 btc_write_4byte;
+
+ bfp_btc_set_bb_reg btc_set_bb_reg;
+ bfp_btc_get_bb_reg btc_get_bb_reg;
+
+
+ bfp_btc_set_rf_reg btc_set_rf_reg;
+ bfp_btc_get_rf_reg btc_get_rf_reg;
+
+
+ bfp_btc_fill_h2c btc_fill_h2c;
+
+ bfp_btc_disp_dbg_msg btc_disp_dbg_msg;
+
+ bfp_btc_get btc_get;
+ bfp_btc_set btc_set;
+};
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
+
+
+extern struct btc_coexist gl_bt_coexist;
+
+bool exhalbtc_initlize_variables(struct rtl_priv* adapter);
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist);
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action);
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
+ enum rt_media_status media_status);
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
+ u8 length);
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
+void exhalbtc_periodical(struct btc_coexist *btcoexist);
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
+ u8 *data);
+void exhalbtc_stack_update_profile_info(void);
+void exhalbtc_set_hci_version(u16 hci_version);
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
+void exhalbtc_update_min_bt_rssi(char bt_rssi);
+void exhalbtc_set_bt_exist(bool bt_exist);
+void exhalbtc_set_chip_type(u8 chip_type);
+void exhalbtc_set_ant_num(u8 type, u8 ant_num);
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
+ u8 *rssi_wifi, u8 *rssi_bt);
+void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
+void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
+#endif
diff --git a/drivers/staging/rtl8821ae/btcoexist/rtl_btc.c b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.c
new file mode 100644
index 000000000000..6653f147757c
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.c
@@ -0,0 +1,236 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+#include "rtl_btc.h"
+#include "halbt_precomp.h"
+
+struct rtl_btc_ops rtl_btc_operation ={
+ .btc_init_variables = rtl_btc_init_variables,
+ .btc_init_hal_vars = rtl_btc_init_hal_vars,
+ .btc_init_hw_config = rtl_btc_init_hw_config,
+ .btc_ips_notify = rtl_btc_ips_notify,
+ .btc_scan_notify = rtl_btc_scan_notify,
+ .btc_connect_notify = rtl_btc_connect_notify,
+ .btc_mediastatus_notify = rtl_btc_mediastatus_notify,
+ .btc_periodical = rtl_btc_periodical,
+ .btc_halt_notify = rtl_btc_halt_notify,
+ .btc_btinfo_notify = rtl_btc_btinfo_notify,
+ .btc_is_limited_dig = rtl_btc_is_limited_dig,
+ .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
+ .btc_is_bt_disabled = rtl_btc_is_bt_disabled,
+};
+
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
+{
+
+ exhalbtc_initlize_variables(rtlpriv);
+}
+
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
+{
+ u8 ant_num;
+ u8 bt_exist;
+ u8 bt_type;
+ ant_num = rtl_get_hwpg_ant_num(rtlpriv);
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, antNum is %d\n", __func__, ant_num));
+
+ bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, bt_exist is %d\n", __func__, bt_exist));
+ exhalbtc_set_bt_exist(bt_exist);
+
+ bt_type = rtl_get_hwpg_bt_type(rtlpriv);
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, bt_type is %d\n", __func__, bt_type));
+ exhalbtc_set_chip_type(bt_type);
+
+ exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num);
+
+}
+
+
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
+{
+ exhalbtc_init_hw_config(&gl_bt_coexist);
+ exhalbtc_init_coex_dm(&gl_bt_coexist);
+}
+
+
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type)
+{
+ exhalbtc_ips_notify(&gl_bt_coexist, type);
+}
+
+
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype)
+{
+ exhalbtc_scan_notify(&gl_bt_coexist, scantype);
+}
+
+
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action)
+{
+ exhalbtc_connect_notify(&gl_bt_coexist, action);
+}
+
+
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, enum rt_media_status mstatus)
+{
+ exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus);
+}
+
+void rtl_btc_periodical(struct rtl_priv *rtlpriv)
+{
+// rtl_bt_dm_monitor();
+ exhalbtc_periodical(&gl_bt_coexist);
+}
+
+void rtl_btc_halt_notify(void)
+{
+ exhalbtc_halt_notify(&gl_bt_coexist);
+}
+
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 * tmp_buf, u8 length)
+{
+ exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length);
+}
+
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv)
+{
+ return gl_bt_coexist.bt_info.limited_dig;
+}
+
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
+{
+ bool bt_change_edca = false;
+ u32 cur_edca_val;
+ u32 edca_bt_hs_uplink = 0x5ea42b, edca_bt_hs_downlink = 0x5ea42b;
+ u32 edca_hs;
+ u32 edca_addr = 0x504;
+
+ cur_edca_val = rtl_read_dword(rtlpriv, edca_addr);
+ if (halbtc_is_wifi_uplink(rtlpriv)){
+ if (cur_edca_val != edca_bt_hs_uplink){
+ edca_hs = edca_bt_hs_uplink;
+ bt_change_edca = true;
+ }
+ }else{
+ if (cur_edca_val != edca_bt_hs_downlink){
+ edca_hs = edca_bt_hs_downlink;
+ bt_change_edca = true;
+ }
+ }
+
+ if(bt_change_edca)
+ rtl_write_dword(rtlpriv, edca_addr, edca_hs);
+
+ return true;
+}
+
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
+{
+ if (gl_bt_coexist.bt_info.bt_disabled)
+ return true;
+ else
+ return false;
+}
+
+struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
+{
+ return &rtl_btc_operation;
+}
+//EXPORT_SYMBOL(rtl_btc_get_ops_pointer);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+{
+ u8 num;
+
+ if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
+ num = 2;
+ else
+ num = 1;
+
+ return num;
+}
+
+#if 0
+enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum rt_media_status m_status = RT_MEDIA_DISCONNECT;
+
+ u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+ if(bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ m_status = RT_MEDIA_CONNECT;
+ }
+
+ return m_status;
+}
+#endif
+
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv)
+{
+ return rtlpriv->btcoexist.btc_info.btcoexist;
+}
+
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+{
+ return rtlpriv->btcoexist.btc_info.bt_type;
+}
+
+
+#if 0
+
+MODULE_AUTHOR("Page He <page_he@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+
+static int __init rtl_btcoexist_module_init(void)
+{
+
+ //printk("%s, rtlpriv->btc_ops.btc_init_variables addr is %p\n", __func__, rtlpriv->btc_ops.btc_init_variables);
+
+ return 0;
+}
+
+static void __exit rtl_btcoexist_module_exit(void)
+{
+ return;
+}
+
+module_init(rtl_btcoexist_module_init);
+module_exit(rtl_btcoexist_module_exit);
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/btcoexist/rtl_btc.h b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.h
new file mode 100644
index 000000000000..452fbf1e6d1e
--- /dev/null
+++ b/drivers/staging/rtl8821ae/btcoexist/rtl_btc.h
@@ -0,0 +1,66 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_BTC_H__
+#define __RTL_BTC_H__
+
+#include "halbt_precomp.h"
+
+
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv);
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type);
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype);
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action);
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, enum rt_media_status mstatus);
+void rtl_btc_periodical(struct rtl_priv *rtlpriv);
+void rtl_btc_halt_notify(void);
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 * tmpBuf, u8 length);
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
+
+
+//extern struct rtl_btc_ops rtl_btc_operation;
+extern struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv);
+//enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw);
+
+
+
+
+
+
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/cam.c b/drivers/staging/rtl8821ae/cam.c
new file mode 100644
index 000000000000..72743e78954b
--- /dev/null
+++ b/drivers/staging/rtl8821ae/cam.c
@@ -0,0 +1,354 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "wifi.h"
+#include "cam.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+void rtl_cam_reset_sec_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->sec.use_defaultkey = false;
+ rtlpriv->sec.pairwise_enc_algorithm = NO_ENCRYPTION;
+ rtlpriv->sec.group_enc_algorithm = NO_ENCRYPTION;
+ memset(rtlpriv->sec.key_buf, 0, KEY_BUF_SIZE * MAX_KEY_LEN);
+ memset(rtlpriv->sec.key_len, 0, KEY_BUF_SIZE);
+ rtlpriv->sec.pairwise_key = NULL;
+}
+
+static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
+ u8 *mac_addr, u8 *key_cont_128, u16 us_config)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ u32 target_command;
+ u32 target_content = 0;
+ u8 entry_i;
+
+ RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_DMESG, "Key content :",
+ key_cont_128, 16);
+
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
+ target_command = target_command | BIT(31) | BIT(16);
+
+ if (entry_i == 0) {
+ target_content = (u32) (*(mac_addr + 0)) << 16 |
+ (u32) (*(mac_addr + 1)) << 24 | (u32) us_config;
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+ target_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_command);
+
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE %x: %x \n",
+ rtlpriv->cfg->maps[WCAMI], target_content));
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("The Key ID is %d\n", entry_no));
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE %x: %x \n",
+ rtlpriv->cfg->maps[RWCAM], target_command));
+
+ } else if (entry_i == 1) {
+
+ target_content = (u32) (*(mac_addr + 5)) << 24 |
+ (u32) (*(mac_addr + 4)) << 16 |
+ (u32) (*(mac_addr + 3)) << 8 |
+ (u32) (*(mac_addr + 2));
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+ target_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_command);
+
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE A4: %x \n", target_content));
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE A0: %x \n", target_command));
+
+ } else {
+
+ target_content =
+ (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 3)) <<
+ 24 | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 2))
+ << 16 |
+ (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 1)) << 8
+ | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 0));
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+ target_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_command);
+ udelay(100);
+
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE A4: %x \n", target_content));
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("WRITE A0: %x \n", target_command));
+ }
+ }
+
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("after set key, usconfig:%x\n", us_config));
+}
+
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+ u32 ul_default_key, u8 *key_content)
+{
+ u32 us_config;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, "
+ "ulUseDK=%x MacAddr %pM\n",
+ ul_entry_idx, ul_key_id, ul_enc_alg,
+ ul_default_key, mac_addr));
+
+ if (ul_key_id == TOTAL_CAM_ENTRY) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("ulKeyId exceed!\n"));
+ return 0;
+ }
+
+ if (ul_default_key == 1) {
+ us_config = CFG_VALID | ((u16) (ul_enc_alg) << 2);
+ } else {
+ us_config = CFG_VALID | ((ul_enc_alg) << 2) | ul_key_id;
+ }
+
+ rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
+ (u8 *) key_content, us_config);
+
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("end \n"));
+
+ return 1;
+
+}
+//EXPORT_SYMBOL(rtl_cam_add_one_entry);
+
+int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
+ u8 *mac_addr, u32 ul_key_id)
+{
+ u32 ul_command;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("key_idx:%d\n", ul_key_id));
+
+ ul_command = ul_key_id * CAM_CONTENT_COUNT;
+ ul_command = ul_command | BIT(31) | BIT(16);
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("rtl_cam_delete_one_entry(): WRITE A4: %x \n", 0));
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("rtl_cam_delete_one_entry(): WRITE A0: %x \n", ul_command));
+
+ return 0;
+
+}
+//EXPORT_SYMBOL(rtl_cam_delete_one_entry);
+
+void rtl_cam_reset_all_entry(struct ieee80211_hw *hw)
+{
+ u32 ul_command;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ ul_command = BIT(31) | BIT(30);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+}
+//EXPORT_SYMBOL(rtl_cam_reset_all_entry);
+
+void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ u32 ul_command;
+ u32 ul_content;
+ u32 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+
+ switch (rtlpriv->sec.pairwise_enc_algorithm) {
+ case WEP40_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
+ break;
+ case WEP104_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
+ break;
+ case TKIP_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
+ break;
+ case AESCCMP_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ break;
+ default:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ }
+
+ ul_content = (uc_index & 3) | ((u16) (ul_enc_algo) << 2);
+
+ ul_content |= BIT(15);
+ ul_command = CAM_CONTENT_COUNT * uc_index;
+ ul_command = ul_command | BIT(31) | BIT(16);
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("rtl_cam_mark_invalid(): WRITE A4: %x \n", ul_content));
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("rtl_cam_mark_invalid(): WRITE A0: %x \n", ul_command));
+}
+//EXPORT_SYMBOL(rtl_cam_mark_invalid);
+
+void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ u32 ul_command;
+ u32 ul_content;
+ u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ u8 entry_i;
+
+ switch (rtlpriv->sec.pairwise_enc_algorithm) {
+ case WEP40_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
+ break;
+ case WEP104_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
+ break;
+ case TKIP_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
+ break;
+ case AESCCMP_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ break;
+ default:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ }
+
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+
+ if (entry_i == 0) {
+ ul_content =
+ (uc_index & 0x03) | ((u16) (ul_encalgo) << 2);
+ ul_content |= BIT(15);
+
+ } else {
+ ul_content = 0;
+ }
+
+ ul_command = CAM_CONTENT_COUNT * uc_index + entry_i;
+ ul_command = ul_command | BIT(31) | BIT(16);
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("rtl_cam_empty_entry(): WRITE A4: %x \n",
+ ul_content));
+ RT_TRACE(COMP_SEC, DBG_LOUD,
+ ("rtl_cam_empty_entry(): WRITE A0: %x \n",
+ ul_command));
+ }
+
+}
+//EXPORT_SYMBOL(rtl_cam_empty_entry);
+
+u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> 4;
+ u8 entry_idx = 0;
+ u8 i, *addr;
+
+ if (NULL == sta_addr) {
+ RT_TRACE(COMP_SEC, DBG_EMERG,
+ ("sta_addr is NULL.\n"));
+ return TOTAL_CAM_ENTRY;
+ }
+ /* Does STA already exist? */
+ for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
+ addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
+ if(memcmp(addr, sta_addr, ETH_ALEN) == 0)
+ return i;
+ }
+ /* Get a free CAM entry. */
+ for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) {
+ if ((bitmap & BIT(0)) == 0) {
+ RT_TRACE(COMP_SEC, DBG_EMERG,
+ ("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n",
+ rtlpriv->sec.hwsec_cam_bitmap, entry_idx));
+ rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx;
+ memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx],
+ sta_addr, ETH_ALEN);
+ return entry_idx;
+ }
+ bitmap = bitmap >>1;
+ }
+ return TOTAL_CAM_ENTRY;
+}
+//EXPORT_SYMBOL(rtl_cam_get_free_entry);
+
+void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 bitmap;
+ u8 i, *addr;
+
+ if (NULL == sta_addr) {
+ RT_TRACE(COMP_SEC, DBG_EMERG,
+ ("sta_addr is NULL.\n"));
+ }
+
+ if ((sta_addr[0]|sta_addr[1]|sta_addr[2]|sta_addr[3]|\
+ sta_addr[4]|sta_addr[5]) == 0) {
+ RT_TRACE(COMP_SEC, DBG_EMERG,
+ ("sta_addr is 00:00:00:00:00:00.\n"));
+ return;
+ }
+ /* Does STA already exist? */
+ for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
+ addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
+ bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> i;
+ if (((bitmap & BIT(0)) == BIT(0)) &&
+ (memcmp(addr, sta_addr, ETH_ALEN) == 0)) {
+ /* Remove from HW Security CAM */
+ memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN);
+ rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
+ printk("&&&&&&&&&del entry %d\n",i);
+ }
+ }
+ return;
+}
+//EXPORT_SYMBOL(rtl_cam_del_entry);
diff --git a/drivers/staging/rtl8821ae/cam.h b/drivers/staging/rtl8821ae/cam.h
new file mode 100644
index 000000000000..326fa6784ae5
--- /dev/null
+++ b/drivers/staging/rtl8821ae/cam.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_CAM_H_
+#define __RTL_CAM_H_
+
+#define CAM_CONTENT_COUNT 8
+
+#define CFG_DEFAULT_KEY BIT(5)
+#define CFG_VALID BIT(15)
+
+#define PAIRWISE_KEYIDX 0
+#define CAM_PAIRWISE_KEY_POSITION 4
+
+#define CAM_CONFIG_USEDK 1
+#define CAM_CONFIG_NO_USEDK 0
+
+extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
+extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+ u32 ul_default_key, u8 *key_content);
+int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id);
+void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
+void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
+void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
+u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr);
+void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr);
+
+#endif
diff --git a/drivers/staging/rtl8821ae/compat.h b/drivers/staging/rtl8821ae/compat.h
new file mode 100644
index 000000000000..68269cc2d477
--- /dev/null
+++ b/drivers/staging/rtl8821ae/compat.h
@@ -0,0 +1,125 @@
+#ifndef __RTL_COMPAT_H__
+#define __RTL_COMPAT_H__
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+/*
+ * Use this if you want to use the same suspend and resume callbacks for suspend
+ * to RAM and hibernation.
+ */
+#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+struct dev_pm_ops name = { \
+ .suspend = suspend_fn, \
+ .resume = resume_fn, \
+ .freeze = suspend_fn, \
+ .thaw = resume_fn, \
+ .poweroff = suspend_fn, \
+ .restore = resume_fn, \
+}
+
+#define compat_pci_suspend(fn) \
+ int fn##_compat(struct pci_dev *pdev, pm_message_t state) \
+ { \
+ int r; \
+ \
+ r = fn(&pdev->dev); \
+ if (r) \
+ return r; \
+ \
+ pci_save_state(pdev); \
+ pci_disable_device(pdev); \
+ pci_set_power_state(pdev, PCI_D3hot); \
+ \
+ return 0; \
+ }
+
+#define compat_pci_resume(fn) \
+ int fn##_compat(struct pci_dev *pdev) \
+ { \
+ int r; \
+ \
+ pci_set_power_state(pdev, PCI_D0); \
+ r = pci_enable_device(pdev); \
+ if (r) \
+ return r; \
+ pci_restore_state(pdev); \
+ \
+ return fn(&pdev->dev); \
+ }
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+#define RX_FLAG_MACTIME_MPDU RX_FLAG_TSFT
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+#define RX_FLAG_MACTIME_MPDU RX_FLAG_MACTIME_START
+#else
+#endif
+//#define NETDEV_TX_OK
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+#define IEEE80211_KEY_FLAG_SW_MGMT IEEE80211_KEY_FLAG_SW_MGMT_TX
+#endif
+
+struct ieee80211_mgmt_compat {
+ __le16 frame_control;
+ __le16 duration;
+ u8 da[6];
+ u8 sa[6];
+ u8 bssid[6];
+ __le16 seq_ctrl;
+ union {
+ struct {
+ u8 category;
+ union {
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 status_code;
+ u8 variable[0];
+ } __attribute__ ((packed)) wme_action;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
+ __le16 capab;
+ __le16 timeout;
+ __le16 start_seq_num;
+ } __attribute__((packed)) addba_req;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
+ __le16 status;
+ __le16 capab;
+ __le16 timeout;
+ } __attribute__((packed)) addba_resp;
+ struct{
+ u8 action_code;
+ __le16 params;
+ __le16 reason_code;
+ } __attribute__((packed)) delba;
+ struct{
+ u8 action_code;
+ /* capab_info for open and confirm,
+ * reason for close
+ */
+ __le16 aux;
+ /* Followed in plink_confirm by status
+ * code, AID and supported rates,
+ * and directly by supported rates in
+ * plink_open and plink_close
+ */
+ u8 variable[0];
+ } __attribute__((packed)) plink_action;
+ struct{
+ u8 action_code;
+ u8 variable[0];
+ } __attribute__((packed)) mesh_action;
+ struct {
+ u8 action;
+ u8 smps_control;
+ } __attribute__ ((packed)) ht_smps;
+ } u;
+ } __attribute__ ((packed)) action;
+ } u;
+} __attribute__ ((packed));
+#endif
diff --git a/drivers/staging/rtl8821ae/core.c b/drivers/staging/rtl8821ae/core.c
new file mode 100644
index 000000000000..ff3139b6da65
--- /dev/null
+++ b/drivers/staging/rtl8821ae/core.c
@@ -0,0 +1,1464 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "core.h"
+#include "cam.h"
+#include "base.h"
+#include "ps.h"
+
+#include "btcoexist/rtl_btc.h"
+
+/*mutex for start & stop is must here. */
+static int rtl_op_start(struct ieee80211_hw *hw)
+{
+ int err = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (!is_hal_stop(rtlhal))
+ return 0;
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ return 0;
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ err = rtlpriv->intf_ops->adapter_start(hw);
+ if (err)
+ goto out;
+ rtl_watch_dog_timer_callback((unsigned long)hw);
+
+out:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+ return err;
+}
+
+static void rtl_op_stop(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if (is_hal_stop(rtlhal))
+ return;
+
+ /* here is must, because adhoc do stop and start,
+ * but stop with RFOFF may cause something wrong,
+ * like adhoc TP */
+ if (unlikely(ppsc->rfpwr_state == ERFOFF))
+ rtl_ips_nic_on(hw);
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+
+ mac->link_state = MAC80211_NOLINK;
+ memset(mac->bssid, 0, 6);
+ mac->vendor = PEER_UNKNOWN;
+
+ /*reset sec info */
+ rtl_cam_reset_sec_info(hw);
+
+ rtl_deinit_deferred_work(hw);
+ rtlpriv->intf_ops->adapter_stop(hw);
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_tcb_desc tcb_desc;
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+ if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
+ goto err_free;
+
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ goto err_free;
+
+ if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
+ rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+
+ return NETDEV_TX_OK;
+
+err_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+#else
+/*<delete in kernel end>*/
+static void rtl_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_tcb_desc tcb_desc;
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+ if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
+ goto err_free;
+
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ goto err_free;
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
+ rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+ if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
+ rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ return;
+
+err_free:
+ dev_kfree_skb_any(skb);
+ return;
+}
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+static int rtl_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ int err = 0;
+
+ if (mac->vif) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("vif has been set!! mac->vif = 0x%p\n", mac->vif));
+ return -EOPNOTSUPP;
+ }
+
+/*This flag is not defined before kernel 3.4*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+#endif
+
+ rtl_ips_nic_on(hw);
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ mac->p2p = P2P_ROLE_CLIENT;
+ /*fall through*/
+#else
+/*<delete in kernel end>*/
+ switch (vif->type) {
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ case NL80211_IFTYPE_STATION:
+ if (mac->beacon_enabled == 1) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("NL80211_IFTYPE_STATION \n"));
+ mac->beacon_enabled = 0;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+ rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("NL80211_IFTYPE_ADHOC \n"));
+
+ mac->link_state = MAC80211_LINKED;
+ rtlpriv->cfg->ops->set_bcn_reg(hw);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ mac->basic_rates = 0xfff;
+ else
+ mac->basic_rates = 0xff0;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *) (&mac->basic_rates));
+
+ break;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+ case NL80211_IFTYPE_P2P_GO:
+ mac->p2p = P2P_ROLE_GO;
+ /*fall through*/
+#endif
+/*<delete in kernel end>*/
+ case NL80211_IFTYPE_AP:
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("NL80211_IFTYPE_AP \n"));
+
+ mac->link_state = MAC80211_LINKED;
+ rtlpriv->cfg->ops->set_bcn_reg(hw);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ mac->basic_rates = 0xfff;
+ else
+ mac->basic_rates = 0xff0;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *) (&mac->basic_rates));
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("NL80211_IFTYPE_MESH_POINT \n"));
+
+ mac->link_state = MAC80211_LINKED;
+ rtlpriv->cfg->ops->set_bcn_reg(hw);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ mac->basic_rates = 0xfff;
+ else
+ mac->basic_rates = 0xff0;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *) (&mac->basic_rates));
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("operation mode %d is not support!\n", vif->type));
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+#ifdef VIF_TODO
+ if (!rtl_set_vif_info(hw, vif))
+ goto out;
+#endif
+
+ if (mac->p2p) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("p2p role %x \n",vif->type));
+ mac->basic_rates = 0xff0;/*disable cck rate for p2p*/
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *) (&mac->basic_rates));
+ }
+ mac->vif = vif;
+ mac->opmode = vif->type;
+ rtlpriv->cfg->ops->set_network_type(hw, vif->type);
+ memcpy(mac->mac_addr, vif->addr, ETH_ALEN);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+
+out:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+ return err;
+}
+
+static void rtl_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+
+ /* Free beacon resources */
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ if (mac->beacon_enabled == 1) {
+ mac->beacon_enabled = 0;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+ rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
+ }
+ }
+
+ /*
+ *Note: We assume NL80211_IFTYPE_UNSPECIFIED as
+ *NO LINK for our hardware.
+ */
+ mac->p2p = 0;
+ mac->vif = NULL;
+ mac->link_state = MAC80211_NOLINK;
+ memset(mac->bssid, 0, 6);
+ mac->vendor = PEER_UNKNOWN;
+ mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
+ rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+static int rtl_op_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype new_type, bool p2p)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int ret;
+ rtl_op_remove_interface(hw, vif);
+
+ vif->type = new_type;
+ vif->p2p = p2p;
+ ret = rtl_op_add_interface(hw, vif);
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ (" p2p %x\n",p2p));
+ return ret;
+}
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (mac->skip_scan)
+ return 1;
+
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /* BIT(2) */
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n"));
+ }
+
+ /*For IPS */
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (hw->conf.flags & IEEE80211_CONF_IDLE)
+ rtl_ips_nic_off(hw);
+ else
+ rtl_ips_nic_on(hw);
+ } else {
+ /*
+ *although rfoff may not cause by ips, but we will
+ *check the reason in set_rf_power_state function
+ */
+ if (unlikely(ppsc->rfpwr_state == ERFOFF))
+ rtl_ips_nic_on(hw);
+ }
+
+ /*For LPS */
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ cancel_delayed_work(&rtlpriv->works.ps_work);
+ cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
+ if (conf->flags & IEEE80211_CONF_PS) {
+ rtlpriv->psc.sw_ps_enabled = true;
+ /* sleep here is must, or we may recv the beacon and
+ * cause mac80211 into wrong ps state, this will cause
+ * power save nullfunc send fail, and further cause
+ * pkt loss, So sleep must quickly but not immediately
+ * because that will cause nullfunc send by mac80211
+ * fail, and cause pkt loss, we have tested that 5mA
+ * is worked very well */
+ if (!rtlpriv->psc.multi_buffered)
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.ps_work,
+ MSECS(5));
+ } else {
+ rtl_swlps_rf_awake(hw);
+ rtlpriv->psc.sw_ps_enabled = false;
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
+ hw->conf.long_frame_max_tx_count));
+ mac->retry_long = hw->conf.long_frame_max_tx_count;
+ mac->retry_short = hw->conf.long_frame_max_tx_count;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+ (u8 *) (&hw->conf.long_frame_max_tx_count));
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ struct ieee80211_channel *channel = hw->conf.chandef.chan;
+ enum nl80211_channel_type channel_type =
+ cfg80211_get_chandef_type(&(hw->conf.chandef));
+#else
+ struct ieee80211_channel *channel = hw->conf.channel;
+ enum nl80211_channel_type channel_type = hw->conf.channel_type;
+#endif
+ u8 wide_chan = (u8) channel->hw_value;
+
+ if (mac->act_scanning)
+ mac->n_channels++;
+
+ if (rtlpriv->dm.supp_phymode_switch &&
+ mac->link_state < MAC80211_LINKED &&
+ !mac->act_scanning) {
+ if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+ rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+ }
+
+ /*
+ *because we should back channel to
+ *current_network.chan in in scanning,
+ *So if set_chan == current_network.chan
+ *we should set it.
+ *because mac80211 tell us wrong bw40
+ *info for cisco1253 bw20, so we modify
+ *it here based on UPPER & LOWER
+ */
+ switch (channel_type) {
+ case NL80211_CHAN_HT20:
+ case NL80211_CHAN_NO_HT:
+ /* SC */
+ mac->cur_40_prime_sc =
+ PRIME_CHNL_OFFSET_DONT_CARE;
+ rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20;
+ mac->bw_40 = false;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ /* SC */
+ mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_UPPER;
+ rtlphy->current_chan_bw =
+ HT_CHANNEL_WIDTH_20_40;
+ mac->bw_40 = true;
+
+ /*wide channel */
+ wide_chan -= 2;
+
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ /* SC */
+ mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_LOWER;
+ rtlphy->current_chan_bw =
+ HT_CHANNEL_WIDTH_20_40;
+ mac->bw_40 = true;
+
+ /*wide channel */
+ wide_chan += 2;
+
+ break;
+ default:
+ mac->bw_40 = false;
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not processed \n"));
+ break;
+ }
+
+ if (wide_chan <= 0)
+ wide_chan = 1;
+
+ /* in scanning, when before we offchannel we may send a ps=1
+ * null to AP, and then we may send a ps = 0 null to AP quickly,
+ * but first null have cause AP's put lots of packet to hw tx
+ * buffer, these packet must be tx before off channel so we must
+ * delay more time to let AP flush these packets before
+ * offchannel, or dis-association or delete BA will happen by AP
+ */
+ if (rtlpriv->mac80211.offchan_deley) {
+ rtlpriv->mac80211.offchan_deley = false;
+ mdelay(50);
+ }
+
+ rtlphy->current_channel = wide_chan;
+
+ rtlpriv->cfg->ops->switch_channel(hw);
+ rtlpriv->cfg->ops->set_channel_access(hw);
+ rtlpriv->cfg->ops->set_bw_mode(hw,
+ channel_type);
+ }
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+
+ return 0;
+}
+
+static void rtl_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *new_flags, u64 multicast)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ *new_flags &= RTL_SUPPORTED_FILTERS;
+ if (0 == changed_flags)
+ return;
+
+ /*TODO: we disable broadcase now, so enable here */
+ if (changed_flags & FIF_ALLMULTI) {
+ if (*new_flags & FIF_ALLMULTI) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
+ rtlpriv->cfg->maps[MAC_RCR_AB];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Enable receive multicast frame.\n"));
+ } else {
+ mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
+ rtlpriv->cfg->maps[MAC_RCR_AB]);
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Disable receive multicast frame.\n"));
+ }
+ }
+
+ if (changed_flags & FIF_FCSFAIL) {
+ if (*new_flags & FIF_FCSFAIL) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Enable receive FCS error frame.\n"));
+ } else {
+ mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Disable receive FCS error frame.\n"));
+ }
+ }
+
+ /* if ssid not set to hw don't check bssid
+ * here just used for linked scanning, & linked
+ * and nolink check bssid is set in set network_type */
+ if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
+ (mac->link_state >= MAC80211_LINKED)) {
+ if (mac->opmode != NL80211_IFTYPE_AP &&
+ mac->opmode != NL80211_IFTYPE_MESH_POINT) {
+ if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
+ rtlpriv->cfg->ops->set_chk_bssid(hw, false);
+ } else {
+ rtlpriv->cfg->ops->set_chk_bssid(hw, true);
+ }
+ }
+ }
+
+ if (changed_flags & FIF_CONTROL) {
+ if (*new_flags & FIF_CONTROL) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
+
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Enable receive control frame.\n"));
+ } else {
+ mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Disable receive control frame.\n"));
+ }
+ }
+
+ if (changed_flags & FIF_OTHER_BSS) {
+ if (*new_flags & FIF_OTHER_BSS) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Enable receive other BSS's frame.\n"));
+ } else {
+ mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("Disable receive other BSS's frame.\n"));
+ }
+ }
+}
+static int rtl_op_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal= rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_sta_info *sta_entry;
+
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_add_tail(&sta_entry->list, &rtlpriv->entry_list);
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ sta_entry->wireless_mode = WIRELESS_MODE_G;
+ if (sta->supp_rates[0] <= 0xf)
+ sta_entry->wireless_mode = WIRELESS_MODE_B;
+ if (sta->ht_cap.ht_supported == true)
+ sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
+
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ sta_entry->wireless_mode = WIRELESS_MODE_G;
+ } else if (rtlhal->current_bandtype == BAND_ON_5G) {
+ sta_entry->wireless_mode = WIRELESS_MODE_A;
+ if (sta->ht_cap.ht_supported == true)
+ sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
+
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ sta_entry->wireless_mode = WIRELESS_MODE_A;
+ }
+ /*disable cck rate for p2p*/
+ if (mac->p2p)
+ sta->supp_rates[0] &= 0xfffffff0;
+
+ memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("Add sta addr is %pM\n",sta->addr));
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
+ }
+
+ return 0;
+}
+
+static int rtl_op_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *sta_entry;
+ if (sta) {
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("Remove sta addr is %pM\n",sta->addr));
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ sta_entry->wireless_mode = 0;
+ sta_entry->ratr_index = 0;
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_del(&sta_entry->list);
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+ }
+ return 0;
+}
+static int _rtl_get_hal_qnum(u16 queue)
+{
+ int qnum;
+
+ switch (queue) {
+ case 0:
+ qnum = AC3_VO;
+ break;
+ case 1:
+ qnum = AC2_VI;
+ break;
+ case 2:
+ qnum = AC0_BE;
+ break;
+ case 3:
+ qnum = AC1_BK;
+ break;
+ default:
+ qnum = AC0_BE;
+ break;
+ }
+ return qnum;
+}
+
+/*
+ *for mac80211 VO=0, VI=1, BE=2, BK=3
+ *for rtl819x BE=0, BK=1, VI=2, VO=3
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static int rtl_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *param)
+#else
+static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *param)
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ int aci;
+
+ if (queue >= AC_MAX) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("queue number %d is incorrect!\n", queue));
+ return -EINVAL;
+ }
+
+ aci = _rtl_get_hal_qnum(queue);
+ mac->ac[aci].aifs = param->aifs;
+ mac->ac[aci].cw_min = param->cw_min;
+ mac->ac[aci].cw_max = param->cw_max;
+ mac->ac[aci].tx_op = param->txop;
+ memcpy(&mac->edca_param[aci], param, sizeof(*param));
+ rtlpriv->cfg->ops->set_qos(hw, aci);
+ return 0;
+}
+
+static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ if ((vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ if ((changed & BSS_CHANGED_BEACON) ||
+ (changed & BSS_CHANGED_BEACON_ENABLED &&
+ bss_conf->enable_beacon)) {
+ if (mac->beacon_enabled == 0) {
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("BSS_CHANGED_BEACON_ENABLED \n"));
+
+ /*start hw beacon interrupt. */
+ /*rtlpriv->cfg->ops->set_bcn_reg(hw); */
+ mac->beacon_enabled = 1;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw,
+ rtlpriv->cfg->maps
+ [RTL_IBSS_INT_MASKS], 0);
+
+ if (rtlpriv->cfg->ops->linked_set_reg)
+ rtlpriv->cfg->ops->linked_set_reg(hw);
+ }
+ }
+ if ((changed & BSS_CHANGED_BEACON_ENABLED &&
+ !bss_conf->enable_beacon)){
+ if (mac->beacon_enabled == 1) {
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("ADHOC DISABLE BEACON\n"));
+
+ mac->beacon_enabled = 0;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+ rtlpriv->cfg->maps
+ [RTL_IBSS_INT_MASKS]);
+ }
+ }
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ RT_TRACE(COMP_BEACON, DBG_TRACE,
+ ("BSS_CHANGED_BEACON_INT\n"));
+ mac->beacon_interval = bss_conf->beacon_int;
+ rtlpriv->cfg->ops->set_bcn_intv(hw);
+ }
+ }
+
+ /*TODO: reference to enum ieee80211_bss_change */
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ struct ieee80211_sta *sta = NULL;
+ /* we should reset all sec info & cam
+ * before set cam after linked, we should not
+ * reset in disassoc, that will cause tkip->wep
+ * fail because some flag will be wrong */
+ /* reset sec info */
+ rtl_cam_reset_sec_info(hw);
+ /* reset cam to fix wep fail issue
+ * when change from wpa to wep */
+ rtl_cam_reset_all_entry(hw);
+
+ mac->link_state = MAC80211_LINKED;
+ mac->cnt_after_linked = 0;
+ mac->assoc_id = bss_conf->aid;
+ memcpy(mac->bssid, bss_conf->bssid, 6);
+
+ if (rtlpriv->cfg->ops->linked_set_reg)
+ rtlpriv->cfg->ops->linked_set_reg(hw);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid);
+
+ if (vif->type == NL80211_IFTYPE_STATION && sta)
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
+ RT_TRACE(COMP_EASY_CONCURRENT, DBG_LOUD,
+ ("send PS STATIC frame \n"));
+ if (rtlpriv->dm.supp_phymode_switch) {
+ if (sta->ht_cap.ht_supported)
+ rtl_send_smps_action(hw, sta,
+ IEEE80211_SMPS_STATIC);
+ }
+ rcu_read_unlock();
+
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("BSS_CHANGED_ASSOC\n"));
+ } else {
+ if (mac->link_state == MAC80211_LINKED)
+ rtl_lps_leave(hw);
+ if (ppsc->p2p_ps_info.p2p_ps_mode> P2P_PS_NONE)
+ rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+ mac->link_state = MAC80211_NOLINK;
+ memset(mac->bssid, 0, 6);
+ mac->vendor = PEER_UNKNOWN;
+
+ if (rtlpriv->dm.supp_phymode_switch) {
+ if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+ rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+ }
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("BSS_CHANGED_UN_ASSOC\n"));
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("BSS_CHANGED_ERP_CTS_PROT\n"));
+ mac->use_cts_protect = bss_conf->use_cts_prot;
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ RT_TRACE(COMP_MAC80211, DBG_LOUD,
+ ("BSS_CHANGED_ERP_PREAMBLE use short preamble:%x \n",
+ bss_conf->use_short_preamble));
+
+ mac->short_preamble = bss_conf->use_short_preamble;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
+ (u8 *) (&mac->short_preamble));
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("BSS_CHANGED_ERP_SLOT\n"));
+
+ if (bss_conf->use_short_slot)
+ mac->slot_time = RTL_SLOT_TIME_9;
+ else
+ mac->slot_time = RTL_SLOT_TIME_20;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+ (u8 *) (&mac->slot_time));
+ }
+
+ if (changed & BSS_CHANGED_HT) {
+ struct ieee80211_sta *sta = NULL;
+
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("BSS_CHANGED_HT\n"));
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid);
+ if (sta) {
+ if (sta->ht_cap.ampdu_density >
+ mac->current_ampdu_density)
+ mac->current_ampdu_density =
+ sta->ht_cap.ampdu_density;
+ if (sta->ht_cap.ampdu_factor <
+ mac->current_ampdu_factor)
+ mac->current_ampdu_factor =
+ sta->ht_cap.ampdu_factor;
+ }
+ rcu_read_unlock();
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
+ (u8 *) (&mac->max_mss_density));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR,
+ &mac->current_ampdu_factor);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE,
+ &mac->current_ampdu_density);
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ u32 basic_rates;
+ struct ieee80211_sta *sta = NULL;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
+ (u8 *) bss_conf->bssid);
+
+ RT_TRACE(COMP_MAC80211, DBG_DMESG,
+ ("bssid: %pM\n", bss_conf->bssid));
+
+ mac->vendor = PEER_UNKNOWN;
+ memcpy(mac->bssid, bss_conf->bssid, 6);
+ rtlpriv->cfg->ops->set_network_type(hw, vif->type);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ mac->mode = WIRELESS_MODE_A;
+ } else {
+ if (sta->supp_rates[0] <= 0xf)
+ mac->mode = WIRELESS_MODE_B;
+ else
+ mac->mode = WIRELESS_MODE_G;
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ mac->mode = WIRELESS_MODE_N_24G;
+ else
+ mac->mode = WIRELESS_MODE_N_5G;
+ }
+
+ /* just station need it, because ibss & ap mode will
+ * set in sta_add, and will be NULL here */
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ struct rtl_sta_info *sta_entry;
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ sta_entry->wireless_mode = mac->mode;
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ mac->ht_enable = true;
+
+ /*
+ * for cisco 1252 bw20 it's wrong
+ * if (ht_cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ * mac->bw_40 = true;
+ * }
+ * */
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ /* for 5G must << RATE_6M_INDEX=4,
+ * because 5G have no cck rate*/
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ basic_rates = sta->supp_rates[1] << 4;
+ else
+ basic_rates = sta->supp_rates[0];
+
+ mac->basic_rates = basic_rates;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *) (&basic_rates));
+ }
+ rcu_read_unlock();
+ }
+
+ /*
+ * For FW LPS and Keep Alive:
+ * To tell firmware we have connected
+ * to an AP. For 92SE/CE power save v2.
+ */
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ u8 keep_alive = 10;
+ u8 mstatus = RT_MEDIA_CONNECT;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_KEEP_ALIVE,
+ (u8 *) (&keep_alive));
+
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ (u8 *) (&mstatus));
+ ppsc->report_linked = true;
+
+ } else {
+
+ u8 mstatus = RT_MEDIA_DISCONNECT;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ (u8 *) (&mstatus));
+ ppsc->report_linked = false;
+
+ }
+
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_mediastatus_notify(
+ rtlpriv, ppsc->report_linked);
+ }
+ }
+
+out:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static u64 rtl_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+#else
+static u64 rtl_op_get_tsf(struct ieee80211_hw *hw)
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u64 tsf;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&tsf));
+ return tsf;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static void rtl_op_set_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u64 tsf)
+#else
+static void rtl_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+ mac->tsf = tsf;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss));
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+static void rtl_op_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+#else
+static void rtl_op_reset_tsf(struct ieee80211_hw *hw)
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp = 0;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp));
+}
+
+static void rtl_op_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ break;
+ case STA_NOTIFY_AWAKE:
+ break;
+ default:
+ break;
+ }
+}
+
+static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+/*<delete in kernel end>*/
+ ,u8 buf_size
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_TX_START: TID:%d\n", tid));
+ return rtl_tx_agg_start(hw, vif, sta, tid, ssn);
+ break;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+#else
+ case IEEE80211_AMPDU_TX_STOP:
+#endif
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid));
+ return rtl_tx_agg_stop(hw, vif, sta, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid));
+ rtl_tx_agg_oper(hw, sta, tid);
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_RX_START:TID:%d\n", tid));
+ return rtl_rx_agg_start(hw, sta, tid);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ RT_TRACE(COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid));
+ return rtl_rx_agg_stop(hw, sta, tid);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("IEEE80211_AMPDU_ERR!!!!:\n"));
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("\n"));
+ mac->act_scanning = true;
+ /*rtlpriv->btcops->btc_scan_notify(rtlpriv, 0); */
+ if (rtlpriv->link_info.b_higher_busytraffic) {
+ mac->skip_scan = true;
+ return;
+ }
+
+ if (rtlpriv->dm.supp_phymode_switch) {
+ if (rtlpriv->cfg->ops->check_switch_to_dmdp)
+ rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
+ }
+
+ if (mac->link_state == MAC80211_LINKED) {
+ rtl_lps_leave(hw);
+ mac->link_state = MAC80211_LINKED_SCANNING;
+ } else {
+ rtl_ips_nic_on(hw);
+ }
+
+ /* Dul mac */
+ rtlpriv->rtlhal.b_load_imrandiqk_setting_for2g = false;
+
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_SITE_SURVEY);
+
+ rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP_BAND0);
+
+}
+
+static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ RT_TRACE(COMP_MAC80211, DBG_LOUD, ("\n"));
+ mac->act_scanning = false;
+ mac->skip_scan = false;
+ if (rtlpriv->link_info.b_higher_busytraffic) {
+ return;
+ }
+
+ /* p2p will use 1/6/11 to scan */
+ if (mac->n_channels == 3)
+ mac->p2p_in_use = true;
+ else
+ mac->p2p_in_use = false;
+ mac->n_channels = 0;
+ /* Dul mac */
+ rtlpriv->rtlhal.b_load_imrandiqk_setting_for2g = false;
+
+ if (mac->link_state == MAC80211_LINKED_SCANNING) {
+ mac->link_state = MAC80211_LINKED;
+ if (mac->opmode == NL80211_IFTYPE_STATION) {
+ /* fix fwlps issue */
+ rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
+ }
+ }
+
+ rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE);
+ /* rtlpriv->btcops->btc_scan_notify(rtlpriv, 1); */
+}
+
+static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 key_type = NO_ENCRYPTION;
+ u8 key_idx;
+ bool group_key = false;
+ bool wep_only = false;
+ int err = 0;
+ u8 mac_addr[ETH_ALEN];
+ u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ u8 zero_addr[ETH_ALEN] = { 0 };
+
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("not open hw encryption\n"));
+ return -ENOSPC; /*User disabled HW-crypto */
+ }
+ /* To support IBSS, use sw-crypto for GTK */
+ if(((vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -ENOSPC;
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("%s hardware based encryption for keyidx: %d, mac: %pM\n",
+ cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
+ sta ? sta->addr : bcast_addr));
+ rtlpriv->sec.being_setkey = true;
+ rtl_ips_nic_on(hw);
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ /* <1> get encryption alg */
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key_type = WEP40_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP40\n"));
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP104\n"));
+ key_type = WEP104_ENCRYPTION;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_type = TKIP_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:TKIP\n"));
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_type = AESCCMP_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CCMP\n"));
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /* HW don't support CMAC encryption,
+ * use software CMAC encryption */
+ key_type = AESCMAC_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CMAC\n"));
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("HW don't support CMAC encryption, "
+ "use software CMAC encryption\n"));
+ err = -EOPNOTSUPP;
+ goto out_unlock;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("alg_err:%x!!!!:\n", key->cipher));
+ goto out_unlock;
+ }
+/*<delete in kernel start>*/
+#else
+ switch (key->alg) {
+ case ALG_WEP:
+ if (key->keylen == WLAN_KEY_LEN_WEP40) {
+ key_type = WEP40_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP40\n"));
+ } else {
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("alg:WEP104\n"));
+ key_type = WEP104_ENCRYPTION;
+ }
+ break;
+ case ALG_TKIP:
+ key_type = TKIP_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:TKIP\n"));
+ break;
+ case ALG_CCMP:
+ key_type = AESCCMP_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CCMP\n"));
+ break;
+ case ALG_AES_CMAC:
+ /*HW don't support CMAC encryption, use software CMAC encryption */
+ key_type = AESCMAC_ENCRYPTION;
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CMAC\n"));
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("HW don't support CMAC encryption, "
+ "use software CMAC encryption\n"));
+ err = -EOPNOTSUPP;
+ goto out_unlock;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("alg_err:%x!!!!:\n", key->alg));
+ goto out_unlock;
+ }
+#endif
+/*<delete in kernel end>*/
+ if(key_type == WEP40_ENCRYPTION ||
+ key_type == WEP104_ENCRYPTION ||
+ vif->type == NL80211_IFTYPE_ADHOC)
+ rtlpriv->sec.use_defaultkey = true;
+
+ /* <2> get key_idx */
+ key_idx = (u8) (key->keyidx);
+ if (key_idx > 3)
+ goto out_unlock;
+ /* <3> if pairwise key enable_hw_sec */
+ group_key = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+
+ /* wep always be group key, but there are two conditions:
+ * 1) wep only: is just for wep enc, in this condition
+ * rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION
+ * will be true & enable_hw_sec will be set when wep
+ * ke setting.
+ * 2) wep(group) + AES(pairwise): some AP like cisco
+ * may use it, in this condition enable_hw_sec will not
+ * be set when wep key setting */
+ /* we must reset sec_info after lingked before set key,
+ * or some flag will be wrong*/
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ if (!group_key || key_type == WEP40_ENCRYPTION ||
+ key_type == WEP104_ENCRYPTION) {
+ if (group_key) {
+ wep_only = true;
+ }
+ rtlpriv->cfg->ops->enable_hw_sec(hw);
+ }
+ } else {
+ if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) ||
+ rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
+ if (rtlpriv->sec.pairwise_enc_algorithm ==
+ NO_ENCRYPTION &&
+ (key_type == WEP40_ENCRYPTION ||
+ key_type == WEP104_ENCRYPTION))
+ wep_only = true;
+ rtlpriv->sec.pairwise_enc_algorithm = key_type;
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("set enable_hw_sec, key_type:%x(OPEN:0 WEP40:"
+ "1 TKIP:2 AES:4 WEP104:5)\n", key_type));
+ rtlpriv->cfg->ops->enable_hw_sec(hw);
+ }
+ }
+ /* <4> set key based on cmd */
+ switch (cmd) {
+ case SET_KEY:
+ if (wep_only) {
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("set WEP(group/pairwise) key\n"));
+ /* Pairwise key with an assigned MAC address. */
+ rtlpriv->sec.pairwise_enc_algorithm = key_type;
+ rtlpriv->sec.group_enc_algorithm = key_type;
+ /*set local buf about wep key. */
+ memcpy(rtlpriv->sec.key_buf[key_idx],
+ key->key, key->keylen);
+ rtlpriv->sec.key_len[key_idx] = key->keylen;
+ memcpy(mac_addr, zero_addr, ETH_ALEN);
+ } else if (group_key) { /* group key */
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("set group key\n"));
+ /* group key */
+ rtlpriv->sec.group_enc_algorithm = key_type;
+ /*set local buf about group key. */
+ memcpy(rtlpriv->sec.key_buf[key_idx],
+ key->key, key->keylen);
+ rtlpriv->sec.key_len[key_idx] = key->keylen;
+ memcpy(mac_addr, bcast_addr, ETH_ALEN);
+ } else { /* pairwise key */
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("set pairwise key\n"));
+ if (!sta) {
+ RT_ASSERT(false, ("pairwise key withnot"
+ "mac_addr\n"));
+
+ err = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+ /* Pairwise key with an assigned MAC address. */
+ rtlpriv->sec.pairwise_enc_algorithm = key_type;
+ /*set local buf about pairwise key. */
+ memcpy(rtlpriv->sec.key_buf[PAIRWISE_KEYIDX],
+ key->key, key->keylen);
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX] = key->keylen;
+ rtlpriv->sec.pairwise_key =
+ rtlpriv->sec.key_buf[PAIRWISE_KEYIDX];
+ memcpy(mac_addr, sta->addr, ETH_ALEN);
+ }
+ rtlpriv->cfg->ops->set_key(hw, key_idx, mac_addr,
+ group_key, key_type, wep_only,
+ false);
+ /* <5> tell mac80211 do something: */
+ /*must use sw generate IV, or can not work !!!!. */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ key->hw_key_idx = key_idx;
+ if (key_type == TKIP_ENCRYPTION)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ /*use software CCMP encryption for management frames (MFP) */
+ if (key_type == AESCCMP_ENCRYPTION)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+ break;
+ case DISABLE_KEY:
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("disable key delete one entry\n"));
+ /*set local buf about wep key. */
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ if (sta)
+ rtl_cam_del_entry(hw, sta->addr);
+ }
+ memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen);
+ rtlpriv->sec.key_len[key_idx] = 0;
+ memcpy(mac_addr, zero_addr, ETH_ALEN);
+ /*
+ *mac80211 will delete entrys one by one,
+ *so don't use rtl_cam_reset_all_entry
+ *or clear all entry here.
+ */
+ rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("cmd_err:%x!!!!:\n", cmd));
+ }
+out_unlock:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+ rtlpriv->sec.being_setkey = false;
+ return err;
+}
+
+static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ bool radio_state;
+ bool blocked;
+ u8 valid = 0;
+
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ return;
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+
+ /*if Radio On return true here */
+ radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
+
+ if (valid) {
+ if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) {
+ rtlpriv->rfkill.rfkill_state = radio_state;
+
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ (KERN_INFO "wireless radio switch turned %s\n",
+ radio_state ? "on" : "off"));
+
+ blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+ }
+ }
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+/* this function is called by mac80211 to flush tx buffer
+ * before switch channel or power save, or tx buffer packet
+ * maybe send after offchannel or rf sleep, this may cause
+ * dis-association by AP */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->intf_ops->flush)
+ rtlpriv->intf_ops->flush(hw, queues, drop);
+}
+#else
+static void rtl_op_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->intf_ops->flush)
+ rtlpriv->intf_ops->flush(hw, drop);
+}
+#endif
+
+const struct ieee80211_ops rtl_ops = {
+ .start = rtl_op_start,
+ .stop = rtl_op_stop,
+ .tx = rtl_op_tx,
+ .add_interface = rtl_op_add_interface,
+ .remove_interface = rtl_op_remove_interface,
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+ .change_interface = rtl_op_change_interface,
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ .config = rtl_op_config,
+ .configure_filter = rtl_op_configure_filter,
+ .set_key = rtl_op_set_key,
+ .conf_tx = rtl_op_conf_tx,
+ .bss_info_changed = rtl_op_bss_info_changed,
+ .get_tsf = rtl_op_get_tsf,
+ .set_tsf = rtl_op_set_tsf,
+ .reset_tsf = rtl_op_reset_tsf,
+ .sta_notify = rtl_op_sta_notify,
+ .ampdu_action = rtl_op_ampdu_action,
+ .sw_scan_start = rtl_op_sw_scan_start,
+ .sw_scan_complete = rtl_op_sw_scan_complete,
+ .rfkill_poll = rtl_op_rfkill_poll,
+ .sta_add = rtl_op_sta_add,
+ .sta_remove = rtl_op_sta_remove,
+ .flush = rtl_op_flush,
+};
diff --git a/drivers/staging/rtl8821ae/core.h b/drivers/staging/rtl8821ae/core.h
new file mode 100644
index 000000000000..f0c74e9239fd
--- /dev/null
+++ b/drivers/staging/rtl8821ae/core.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_CORE_H__
+#define __RTL_CORE_H__
+
+#define RTL_SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | FIF_CONTROL | \
+ FIF_OTHER_BSS | \
+ FIF_FCSFAIL | \
+ FIF_BCN_PRBRESP_PROMISC)
+
+#define RTL_SUPPORTED_CTRL_FILTER 0xFF
+
+extern const struct ieee80211_ops rtl_ops;
+#endif
diff --git a/drivers/staging/rtl8821ae/debug.c b/drivers/staging/rtl8821ae/debug.c
new file mode 100644
index 000000000000..8a6c794bda41
--- /dev/null
+++ b/drivers/staging/rtl8821ae/debug.c
@@ -0,0 +1,988 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "cam.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+#define GET_INODE_DATA(__node) PDE_DATA(__node)
+#else
+#define GET_INODE_DATA(__node) PDE(__node)->data
+#endif
+
+
+void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 i;
+
+ rtlpriv->dbg.global_debuglevel = DBG_DMESG;
+
+ rtlpriv->dbg.global_debugcomponents =
+ COMP_ERR |
+ COMP_FW |
+ COMP_INIT |
+ COMP_RECV |
+ COMP_SEND |
+ COMP_MLME |
+ COMP_SCAN |
+ COMP_INTR |
+ COMP_LED |
+ COMP_SEC |
+ COMP_BEACON |
+ COMP_RATE |
+ COMP_RXDESC |
+ COMP_DIG |
+ COMP_TXAGC |
+ COMP_POWER |
+ COMP_POWER_TRACKING |
+ COMP_BB_POWERSAVING |
+ COMP_SWAS |
+ COMP_RF |
+ COMP_TURBO |
+ COMP_RATR |
+ COMP_CMD |
+ COMP_EASY_CONCURRENT |
+ COMP_EFUSE |
+ COMP_QOS | COMP_MAC80211 | COMP_REGD |
+ COMP_CHAN |
+ COMP_BT_COEXIST |
+ COMP_IQK |
+ 0;
+
+ for (i = 0; i < DBGP_TYPE_MAX; i++)
+ rtlpriv->dbg.dbgp_type[i] = 0;
+
+ /*Init Debug flag enable condition */
+}
+
+struct proc_dir_entry *proc_topdir;
+static int rtl_proc_get_mac_0(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x000;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_0(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_0, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_0 = {
+ .open = dl_proc_open_mac_0,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_1(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x100;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_1(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_1, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_1 = {
+ .open = dl_proc_open_mac_1,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_2(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x200;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_2(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_2, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_2 = {
+ .open = dl_proc_open_mac_2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_3(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x300;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_3(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_3, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_3 = {
+ .open = dl_proc_open_mac_3,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_4(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x400;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_4(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_4, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_4 = {
+ .open = dl_proc_open_mac_4,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_5(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x500;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_5(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_5, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_5 = {
+ .open = dl_proc_open_mac_5,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_6(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x600;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_6(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_6, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_6 = {
+ .open = dl_proc_open_mac_6,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_mac_7(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, n, page;
+ int max = 0xff;
+ page = 0x700;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_mac_7(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_mac_7, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_mac_7 = {
+ .open = dl_proc_open_mac_7,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_8(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0x800;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_8(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_8, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_8 = {
+ .open = dl_proc_open_bb_8,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_9(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0x900;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_9(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_9, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_9 = {
+ .open = dl_proc_open_bb_9,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_a(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xa00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_a(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_a, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_a = {
+ .open = dl_proc_open_bb_a,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_b(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xb00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_b(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_b, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_b = {
+ .open = dl_proc_open_bb_b,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_c(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xc00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_c(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_c, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_c = {
+ .open = dl_proc_open_bb_c,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_d(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xd00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_d(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_d, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_d = {
+ .open = dl_proc_open_bb_d,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_e(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xe00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_e(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_e, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_e = {
+ .open = dl_proc_open_bb_e,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_bb_f(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n, page;
+ int max = 0xff;
+ page = 0xf00;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_bb_f(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_bb_f, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_bb_f = {
+ .open = dl_proc_open_bb_f,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_reg_rf_a(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n;
+ int max = 0x40;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n);
+ for (i = 0; i < 4 && n <= max; n += 1, i++)
+ seq_printf(m, "%8.8x ",
+ rtl_get_rfreg(hw, RF90_PATH_A, n, 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_rf_a(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_reg_rf_a, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_rf_a = {
+ .open = dl_proc_open_rf_a,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_reg_rf_b(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ int i, n;
+ int max = 0x40;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n);
+ for (i = 0; i < 4 && n <= max; n += 1, i++)
+ seq_printf(m, "%8.8x ",
+ rtl_get_rfreg(hw, RF90_PATH_B, n,
+ 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_rf_b(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_reg_rf_b, GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_rf_b = {
+ .open = dl_proc_open_rf_b,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_cam_register_1(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 target_cmd = 0;
+ u32 target_val=0;
+ u8 entry_i=0;
+ u32 ulstatus;
+ int i = 100, j = 0;
+
+ /* This dump the current register page */
+ seq_puts(m,
+ "\n#################### SECURITY CAM (0-10) ##################\n ");
+
+ for (j = 0; j < 11; j++) {
+ seq_printf(m, "\nD: %2x > ", j);
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ /* polling bit, and No Write enable, and address */
+ target_cmd = entry_i + CAM_CONTENT_COUNT * j;
+ target_cmd = target_cmd | BIT(31);
+
+ /* Check polling bit is clear */
+ while ((i--) >= 0) {
+ ulstatus = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RWCAM]);
+ if (ulstatus & BIT(31)) {
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_cmd);
+ target_val = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RCAMO]);
+ seq_printf(m, "%8.8x ", target_val);
+ }
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_cam_1(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_cam_register_1,
+ GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_cam_1 = {
+ .open = dl_proc_open_cam_1,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_cam_register_2(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 target_cmd = 0;
+ u32 target_val = 0;
+ u8 entry_i = 0;
+ u32 ulstatus;
+ int i = 100, j = 0;
+
+ /* This dump the current register page */
+ seq_puts(m,
+ "\n################### SECURITY CAM (11-21) ##################\n ");
+
+ for (j = 11; j < 22; j++) {
+ seq_printf(m, "\nD: %2x > ", j);
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ target_cmd = entry_i + CAM_CONTENT_COUNT * j;
+ target_cmd = target_cmd | BIT(31);
+
+ while ((i--) >= 0) {
+ ulstatus = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RWCAM]);
+ if (ulstatus & BIT(31)) {
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_cmd);
+ target_val = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RCAMO]);
+ seq_printf(m, "%8.8x ", target_val);
+ }
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_cam_2(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_cam_register_2,
+ GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_cam_2 = {
+ .open = dl_proc_open_cam_2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_proc_get_cam_register_3(struct seq_file *m, void *v)
+{
+ struct ieee80211_hw *hw = m->private;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 target_cmd = 0;
+ u32 target_val = 0;
+ u8 entry_i = 0;
+ u32 ulstatus;
+ int i = 100, j = 0;
+
+ /* This dump the current register page */
+ seq_puts(m,
+ "\n################### SECURITY CAM (22-31) ##################\n ");
+
+ for (j = 22; j < TOTAL_CAM_ENTRY; j++) {
+ seq_printf(m, "\nD: %2x > ", j);
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ target_cmd = entry_i+CAM_CONTENT_COUNT*j;
+ target_cmd = target_cmd | BIT(31);
+
+ while ((i--) >= 0) {
+ ulstatus = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RWCAM]);
+ if (ulstatus & BIT(31)) {
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_cmd);
+ target_val = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RCAMO]);
+ seq_printf(m, "%8.8x ", target_val);
+ }
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int dl_proc_open_cam_3(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_proc_get_cam_register_3,
+ GET_INODE_DATA(inode));
+}
+
+static const struct file_operations file_ops_cam_3 = {
+ .open = dl_proc_open_cam_3,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+void rtl_proc_add_one(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct proc_dir_entry *entry;
+
+ snprintf(rtlpriv->dbg.proc_name, 18, "%x-%x-%x-%x-%x-%x",
+ rtlefuse->dev_addr[0], rtlefuse->dev_addr[1],
+ rtlefuse->dev_addr[2], rtlefuse->dev_addr[3],
+ rtlefuse->dev_addr[4], rtlefuse->dev_addr[5]);
+
+ rtlpriv->dbg.proc_dir = proc_mkdir(rtlpriv->dbg.proc_name, proc_topdir);
+ if (!rtlpriv->dbg.proc_dir) {
+ RT_TRACE(COMP_INIT, DBG_EMERG, ("Unable to init "
+ "/proc/net/%s/%s\n", rtlpriv->cfg->name,
+ rtlpriv->dbg.proc_name));
+ return;
+ }
+
+ entry = proc_create_data("mac-0", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_0, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, DBG_EMERG,
+ ("Unable to initialize /proc/net/%s/%s/mac-0\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-1", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_1, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-1\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-2", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_2, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-2\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-3", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_3, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-3\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-4", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_4, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-4\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-5", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_5, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-5\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-6", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_6, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-6\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("mac-7", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_mac_7, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/mac-7\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-8", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_8, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-8\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-9", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_9, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-9\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-a", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_a, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-a\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-b", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_b, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-b\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-c", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_c, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-c\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-d", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_d, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-d\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-e", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_e, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-e\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("bb-f", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_bb_f, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/bb-f\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("rf-a", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_rf_a, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/rf-a\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("rf-b", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_rf_b, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/rf-b\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("cam-1", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_cam_1, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/cam-1\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("cam-2", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_cam_2, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/cam-2\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+
+ entry = proc_create_data("cam-3", S_IFREG | S_IRUGO,
+ rtlpriv->dbg.proc_dir, &file_ops_cam_3, hw);
+ if (!entry)
+ RT_TRACE(COMP_INIT, COMP_ERR,
+ ("Unable to initialize /proc/net/%s/%s/cam-3\n",
+ rtlpriv->cfg->name, rtlpriv->dbg.proc_name));
+}
+
+void rtl_proc_remove_one(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->dbg.proc_dir) {
+ remove_proc_entry("mac-0", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-1", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-2", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-3", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-4", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-5", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-6", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("mac-7", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-8", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-9", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-a", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-b", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-c", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-d", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-e", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("bb-f", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("rf-a", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("rf-b", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("cam-1", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("cam-2", rtlpriv->dbg.proc_dir);
+ remove_proc_entry("cam-3", rtlpriv->dbg.proc_dir);
+
+ remove_proc_entry(rtlpriv->dbg.proc_name, proc_topdir);
+
+ rtlpriv->dbg.proc_dir = NULL;
+ }
+}
+
+void rtl_proc_add_topdir(void)
+{
+ proc_topdir = proc_mkdir("rtlwifi", init_net.proc_net);
+}
+
+void rtl_proc_remove_topdir(void)
+{
+ if (proc_topdir)
+ remove_proc_entry("rtlwifi", init_net.proc_net);
+}
diff --git a/drivers/staging/rtl8821ae/debug.h b/drivers/staging/rtl8821ae/debug.h
new file mode 100644
index 000000000000..6c0a553e98b7
--- /dev/null
+++ b/drivers/staging/rtl8821ae/debug.h
@@ -0,0 +1,227 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_DEBUG_H__
+#define __RTL_DEBUG_H__
+
+/*--------------------------------------------------------------
+ Debug level
+--------------------------------------------------------------*/
+/*
+ *Fatal bug.
+ *For example, Tx/Rx/IO locked up,
+ *memory access violation,
+ *resource allocation failed,
+ *unexpected HW behavior, HW BUG
+ *and so on.
+ */
+#define DBG_EMERG 0
+
+/*
+ *Abnormal, rare, or unexpected cases.
+ *For example, Packet/IO Ctl canceled,
+ *device suprisely unremoved and so on.
+ */
+#define DBG_WARNING 2
+
+/*
+ *Normal case driver developer should
+ *open, we can see link status like
+ *assoc/AddBA/DHCP/adapter start and
+ *so on basic and useful informations.
+ */
+#define DBG_DMESG 3
+
+/*
+ *Normal case with useful information
+ *about current SW or HW state.
+ *For example, Tx/Rx descriptor to fill,
+ *Tx/Rx descriptor completed status,
+ *SW protocol state change, dynamic
+ *mechanism state change and so on.
+ */
+#define DBG_LOUD 4
+
+/*
+ *Normal case with detail execution
+ *flow or information.
+ */
+#define DBG_TRACE 5
+
+/*--------------------------------------------------------------
+ Define the rt_trace components
+--------------------------------------------------------------*/
+#define COMP_ERR BIT(0)
+#define COMP_FW BIT(1)
+#define COMP_INIT BIT(2) /*For init/deinit */
+#define COMP_RECV BIT(3) /*For Rx. */
+#define COMP_SEND BIT(4) /*For Tx. */
+#define COMP_MLME BIT(5) /*For MLME. */
+#define COMP_SCAN BIT(6) /*For Scan. */
+#define COMP_INTR BIT(7) /*For interrupt Related. */
+#define COMP_LED BIT(8) /*For LED. */
+#define COMP_SEC BIT(9) /*For sec. */
+#define COMP_BEACON BIT(10) /*For beacon. */
+#define COMP_RATE BIT(11) /*For rate. */
+#define COMP_RXDESC BIT(12) /*For rx desc. */
+#define COMP_DIG BIT(13) /*For DIG */
+#define COMP_TXAGC BIT(14) /*For Tx power */
+#define COMP_HIPWR BIT(15) /*For High Power Mechanism */
+#define COMP_POWER BIT(16) /*For lps/ips/aspm. */
+#define COMP_POWER_TRACKING BIT(17) /*For TX POWER TRACKING */
+#define COMP_BB_POWERSAVING BIT(18)
+#define COMP_SWAS BIT(19) /*For SW Antenna Switch */
+#define COMP_RF BIT(20) /*For RF. */
+#define COMP_TURBO BIT(21) /*For EDCA TURBO. */
+#define COMP_RATR BIT(22)
+#define COMP_CMD BIT(23)
+#define COMP_EFUSE BIT(24)
+#define COMP_QOS BIT(25)
+#define COMP_MAC80211 BIT(26)
+#define COMP_REGD BIT(27)
+#define COMP_CHAN BIT(28)
+#define COMP_EASY_CONCURRENT BIT(29)
+#define COMP_BT_COEXIST BIT(30)
+#define COMP_IQK BIT(31)
+
+/*--------------------------------------------------------------
+ Define the rt_print components
+--------------------------------------------------------------*/
+/* Define EEPROM and EFUSE check module bit*/
+#define EEPROM_W BIT(0)
+#define EFUSE_PG BIT(1)
+#define EFUSE_READ_ALL BIT(2)
+
+/* Define init check for module bit*/
+#define INIT_EEPROM BIT(0)
+#define INIT_TxPower BIT(1)
+#define INIT_IQK BIT(2)
+#define INIT_RF BIT(3)
+
+/* Define PHY-BB/RF/MAC check module bit */
+#define PHY_BBR BIT(0)
+#define PHY_BBW BIT(1)
+#define PHY_RFR BIT(2)
+#define PHY_RFW BIT(3)
+#define PHY_MACR BIT(4)
+#define PHY_MACW BIT(5)
+#define PHY_ALLR BIT(6)
+#define PHY_ALLW BIT(7)
+#define PHY_TXPWR BIT(8)
+#define PHY_PWRDIFF BIT(9)
+
+/* Define Dynamic Mechanism check module bit --> FDM */
+#define WA_IOT BIT(0)
+#define DM_PWDB BIT(1)
+#define DM_MONITOR BIT(2)
+#define DM_DIG BIT(3)
+#define DM_EDCA_TURBO BIT(4)
+
+enum dbgp_flag_e {
+ FQOS = 0,
+ FTX = 1,
+ FRX = 2,
+ FSEC = 3,
+ FMGNT = 4,
+ FMLME = 5,
+ FRESOURCE = 6,
+ FBEACON = 7,
+ FISR = 8,
+ FPHY = 9,
+ FMP = 10,
+ FEEPROM = 11,
+ FPWR = 12,
+ FDM = 13,
+ FDBGCtrl = 14,
+ FC2H = 15,
+ FBT = 16,
+ FINIT = 17,
+ FIOCTL = 18,
+ DBGP_TYPE_MAX
+};
+
+#define RT_ASSERT(_exp,fmt) \
+ do { \
+ if(!(_exp)) { \
+ printk(KERN_DEBUG "%s:%s(): ", KBUILD_MODNAME, \
+ __func__); \
+ printk fmt; \
+ } \
+ } while(0);
+
+#define RT_DISP(dbgtype, dbgflag, printstr)
+
+#define RT_TRACE(comp, level, fmt)\
+ do { \
+ if(unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && \
+ ((level) <= rtlpriv->dbg.global_debuglevel))) {\
+ printk(KERN_DEBUG "%s-%d:%s():<%lx-%x> ", \
+ KBUILD_MODNAME, \
+ rtlpriv->rtlhal.interfaceindex, __func__, \
+ in_interrupt(), in_atomic()); \
+ printk fmt; \
+ }\
+ } while(0);
+
+#define RTPRINT(rtlpriv, dbgtype, dbgflag, printstr) \
+ do { \
+ if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) { \
+ printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \
+ printk printstr; \
+ } \
+ } while(0);
+
+#define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \
+ _hexdatalen) \
+ do {\
+ if(unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents ) &&\
+ (_level <= rtlpriv->dbg.global_debuglevel ))) { \
+ int __i; \
+ u8* ptr = (u8*)_hexdata; \
+ printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \
+ printk(KERN_DEBUG "In process \"%s\" (pid %i):", \
+ current->comm, \
+ current->pid); \
+ printk(_titlestring); \
+ for( __i=0; __i<(int)_hexdatalen; __i++ ) { \
+ printk("%02X%s", ptr[__i], (((__i + 1) % 4) \
+ == 0)?" ":" ");\
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ printk(KERN_DEBUG "\n"); \
+ } \
+ } while(0);
+
+void rtl_dbgp_flag_init(struct ieee80211_hw *hw);
+void rtl_proc_add_one(struct ieee80211_hw *hw);
+void rtl_proc_remove_one(struct ieee80211_hw *hw);
+void rtl_proc_add_topdir(void);
+void rtl_proc_remove_topdir(void);
+#endif
diff --git a/drivers/staging/rtl8821ae/efuse.c b/drivers/staging/rtl8821ae/efuse.c
new file mode 100644
index 000000000000..250aae1ce631
--- /dev/null
+++ b/drivers/staging/rtl8821ae/efuse.c
@@ -0,0 +1,1285 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "wifi.h"
+#include "efuse.h"
+#include "btcoexist/halbt_precomp.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+static const u8 MAX_PGPKT_SIZE = 9;
+static const u8 PGPKT_DATA_SIZE = 8;
+static const int EFUSE_MAX_SIZE = 512;
+
+static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = {
+ {0, 0, 0, 2},
+ {0, 1, 0, 2},
+ {0, 2, 0, 2},
+ {1, 0, 0, 1},
+ {1, 0, 1, 1},
+ {1, 1, 0, 1},
+ {1, 1, 1, 3},
+ {1, 3, 0, 17},
+ {3, 3, 1, 48},
+ {10, 0, 0, 6},
+ {10, 3, 0, 1},
+ {10, 3, 1, 1},
+ {11, 0, 0, 28}
+};
+
+static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, u16 offset,
+ u8 * value);
+static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, u16 offset,
+ u16 * value);
+static void efuse_shadow_read_4byte(struct ieee80211_hw *hw, u16 offset,
+ u32 * value);
+static void efuse_shadow_write_1byte(struct ieee80211_hw *hw, u16 offset,
+ u8 value);
+static void efuse_shadow_write_2byte(struct ieee80211_hw *hw, u16 offset,
+ u16 value);
+static void efuse_shadow_write_4byte(struct ieee80211_hw *hw, u16 offset,
+ u32 value);
+static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr,
+ u8 data);
+static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse);
+static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset,
+ u8 *data);
+static int efuse_pg_packet_write(struct ieee80211_hw *hw, u8 offset,
+ u8 word_en, u8 * data);
+static void efuse_word_enable_data_read(u8 word_en, u8 * sourdata,
+ u8 * targetdata);
+static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
+ u16 efuse_addr, u8 word_en, u8 * data);
+static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite,
+ u8 pwrstate);
+static u16 efuse_get_current_size(struct ieee80211_hw *hw);
+static u8 efuse_calculate_word_cnts(u8 word_en);
+
+void efuse_initialize(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bytetemp;
+ u8 temp;
+
+ bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1);
+ temp = bytetemp | 0x20;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1);
+ temp = bytetemp & 0xFE;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
+ temp = bytetemp | 0x80;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, temp);
+
+ rtl_write_byte(rtlpriv, 0x2F8, 0x3);
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
+
+}
+
+u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 data;
+ u8 bytetemp;
+ u8 temp;
+ u32 k = 0;
+ const u32 efuse_real_content_len =
+ rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
+
+ if (address < efuse_real_content_len) {
+ temp = address & 0xFF;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ temp);
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+ temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ temp = bytetemp & 0x7F;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
+ temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ while (!(bytetemp & 0x80)) {
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->
+ maps[EFUSE_CTRL] + 3);
+ k++;
+ if (k == 1000) {
+ k = 0;
+ break;
+ }
+ }
+ data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+ return data;
+ } else
+ return 0xFF;
+
+}
+/* EXPORT_SYMBOL(efuse_read_1byte); */
+
+void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bytetemp;
+ u8 temp;
+ u32 k = 0;
+ const u32 efuse_real_content_len =
+ rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("Addr=%x Data =%x\n", address, value));
+
+ if (address < efuse_real_content_len) {
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
+
+ temp = address & 0xFF;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ temp);
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+
+ temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ temp = bytetemp | 0x80;
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+
+ while (bytetemp & 0x80) {
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->
+ maps[EFUSE_CTRL] + 3);
+ k++;
+ if (k == 100) {
+ k = 0;
+ break;
+ }
+ }
+ }
+
+}
+
+void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 value32;
+ u8 readbyte;
+ u16 retry;
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ (_offset & 0xff));
+ readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
+
+ readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
+ (readbyte & 0x7f));
+
+ retry = 0;
+ value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+ while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) {
+ value32 = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL]);
+ retry++;
+ }
+
+ udelay(50);
+ value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+
+ *pbuf = (u8) (value32 & 0xff);
+}
+
+void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 efuse_tbl[rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]];
+ u8 rtemp8[1];
+ u16 efuse_addr = 0;
+ u8 offset, wren;
+ u8 u1temp = 0;
+ u16 i;
+ u16 j;
+ const u16 efuse_max_section =
+ rtlpriv->cfg->maps[EFUSE_MAX_SECTION_MAP];
+ const u32 efuse_real_content_len =
+ rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
+ u16 efuse_word[efuse_max_section][EFUSE_MAX_WORD_UNIT];
+ u16 efuse_utilized = 0;
+ u8 efuse_usage;
+
+ if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) {
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("read_efuse(): Invalid offset(%#x) with read "
+ "bytes(%#x)!!\n", _offset, _size_byte));
+ return;
+ }
+
+ for (i = 0; i < efuse_max_section; i++)
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
+ efuse_word[i][j] = 0xFFFF;
+
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ if (*rtemp8 != 0xFF) {
+ efuse_utilized++;
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+ ("Addr=%d\n", efuse_addr));
+ efuse_addr++;
+ }
+
+ while ((*rtemp8 != 0xFF) && (efuse_addr < efuse_real_content_len)) {
+ /* Check PG header for section num. */
+ if((*rtemp8 & 0x1F ) == 0x0F) {/* extended header */
+ u1temp =( (*rtemp8 & 0xE0) >> 5);
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+
+ if((*rtemp8 & 0x0F) == 0x0F) {
+ efuse_addr++;
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+
+ if (*rtemp8 != 0xFF &&
+ (efuse_addr < efuse_real_content_len)) {
+ efuse_addr++;
+ }
+ continue;
+ } else {
+ offset = ((*rtemp8 & 0xF0) >> 1) | u1temp;
+ wren = (*rtemp8 & 0x0F);
+ efuse_addr++;
+ }
+ } else {
+ offset = ((*rtemp8 >> 4) & 0x0f);
+ wren = (*rtemp8 & 0x0f);
+ }
+
+ if (offset < efuse_max_section) {
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+ ("offset-%d Worden=%x\n", offset, wren));
+
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ if (!(wren & 0x01)) {
+ RTPRINT(rtlpriv, FEEPROM,
+ EFUSE_READ_ALL, ("Addr=%d\n",
+ efuse_addr));
+
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ efuse_addr++;
+ efuse_utilized++;
+ efuse_word[offset][i] = (*rtemp8 &
+ 0xff);
+
+ if (efuse_addr >=
+ efuse_real_content_len)
+ break;
+
+ RTPRINT(rtlpriv, FEEPROM,
+ EFUSE_READ_ALL, ("Addr=%d\n",
+ efuse_addr));
+
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ efuse_addr++;
+ efuse_utilized++;
+ efuse_word[offset][i] |=
+ (((u16) * rtemp8 << 8) & 0xff00);
+
+ if (efuse_addr >= efuse_real_content_len)
+ break;
+ }
+
+ wren >>= 1;
+ }
+ }
+
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+ ("Addr=%d\n", efuse_addr));
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ if (*rtemp8 != 0xFF && (efuse_addr < efuse_real_content_len)) {
+ efuse_utilized++;
+ efuse_addr++;
+ }
+ }
+
+ for (i = 0; i < efuse_max_section; i++) {
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
+ efuse_tbl[(i * 8) + (j * 2)] =
+ (efuse_word[i][j] & 0xff);
+ efuse_tbl[(i * 8) + ((j * 2) + 1)] =
+ ((efuse_word[i][j] >> 8) & 0xff);
+ }
+ }
+
+ for (i = 0; i < _size_byte; i++)
+ pbuf[i] = efuse_tbl[_offset + i];
+
+ rtlefuse->efuse_usedbytes = efuse_utilized;
+ efuse_usage = (u8) ((efuse_utilized * 100) / efuse_real_content_len);
+ rtlefuse->efuse_usedpercentage = efuse_usage;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
+ (u8 *) & efuse_utilized);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
+ (u8 *) & efuse_usage);
+}
+
+bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 section_idx, i, Base;
+ u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used;
+ bool bwordchanged, bresult = true;
+
+ for (section_idx = 0; section_idx < 16; section_idx++) {
+ Base = section_idx * 8;
+ bwordchanged = false;
+
+ for (i = 0; i < 8; i = i + 2) {
+ if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i] !=
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i]) ||
+ (rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i + 1] !=
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i +
+ 1])) {
+ words_need++;
+ bwordchanged = true;
+ }
+ }
+
+ if (bwordchanged == true)
+ hdr_num++;
+ }
+
+ totalbytes = hdr_num + words_need * 2;
+ efuse_used = rtlefuse->efuse_usedbytes;
+
+ if ((totalbytes + efuse_used) >= (EFUSE_MAX_SIZE -
+ rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))
+ bresult = false;
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("efuse_shadow_update_chk(): totalbytes(%#x), "
+ "hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
+ totalbytes, hdr_num, words_need, efuse_used));
+
+ return bresult;
+}
+
+void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
+ u16 offset, u32 *value)
+{
+ if (type == 1)
+ efuse_shadow_read_1byte(hw, offset, (u8 *) value);
+ else if (type == 2)
+ efuse_shadow_read_2byte(hw, offset, (u16 *) value);
+ else if (type == 4)
+ efuse_shadow_read_4byte(hw, offset, (u32 *) value);
+
+}
+
+void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
+ u32 value)
+{
+ if (type == 1)
+ efuse_shadow_write_1byte(hw, offset, (u8) value);
+ else if (type == 2)
+ efuse_shadow_write_2byte(hw, offset, (u16) value);
+ else if (type == 4)
+ efuse_shadow_write_4byte(hw, offset, (u32) value);
+
+}
+
+bool efuse_shadow_update(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u16 i, offset, base;
+ u8 word_en = 0x0F;
+ u8 first_pg = false;
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD, ("\n"));
+
+ if (!efuse_shadow_update_chk(hw)) {
+ efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+ memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("efuse out of capacity!!\n"));
+ return false;
+ }
+ efuse_power_switch(hw, true, true);
+
+ for (offset = 0; offset < 16; offset++) {
+
+ word_en = 0x0F;
+ base = offset * 8;
+
+ for (i = 0; i < 8; i++) {
+ if (first_pg == true) {
+
+ word_en &= ~(BIT(i / 2));
+
+ rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
+ } else {
+
+ if (rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] !=
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i]) {
+ word_en &= ~(BIT(i / 2));
+
+ rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
+ }
+ }
+ }
+
+ if (word_en != 0x0F) {
+ u8 tmpdata[8];
+ memcpy(tmpdata, (&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base]), 8);
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("U-efuse\n"), tmpdata, 8);
+
+ if (!efuse_pg_packet_write(hw, (u8) offset, word_en,
+ tmpdata)) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("PG section(%#x) fail!!\n", offset));
+ break;
+ }
+ }
+
+ }
+
+ efuse_power_switch(hw, true, false);
+ efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+
+ memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD, ("\n"));
+ return true;
+}
+
+void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ if (rtlefuse->autoload_failflag == true) {
+ memset((&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]),
+ 0xFF, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+ } else {
+ efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+ }
+
+ memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+}
+/* EXPORT_SYMBOL(rtl_efuse_shadow_map_update); */
+
+void efuse_force_write_vendor_Id(struct ieee80211_hw *hw)
+{
+ u8 tmpdata[8] = { 0xFF, 0xFF, 0xEC, 0x10, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ efuse_power_switch(hw, true, true);
+
+ efuse_pg_packet_write(hw, 1, 0xD, tmpdata);
+
+ efuse_power_switch(hw, true, false);
+
+}
+
+void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx)
+{
+}
+
+static void efuse_shadow_read_1byte(struct ieee80211_hw *hw,
+ u16 offset, u8 *value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+}
+
+static void efuse_shadow_read_2byte(struct ieee80211_hw *hw,
+ u16 offset, u16 *value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
+
+}
+
+static void efuse_shadow_read_4byte(struct ieee80211_hw *hw,
+ u16 offset, u32 *value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] << 16;
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] << 24;
+}
+
+static void efuse_shadow_write_1byte(struct ieee80211_hw *hw,
+ u16 offset, u8 value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value;
+}
+
+static void efuse_shadow_write_2byte(struct ieee80211_hw *hw,
+ u16 offset, u16 value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value & 0x00FF;
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] = value >> 8;
+
+}
+
+static void efuse_shadow_write_4byte(struct ieee80211_hw *hw,
+ u16 offset, u32 value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] =
+ (u8) (value & 0x000000FF);
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] =
+ (u8) ((value >> 8) & 0x0000FF);
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] =
+ (u8) ((value >> 16) & 0x00FF);
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] =
+ (u8) ((value >> 24) & 0xFF);
+
+}
+
+int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmpidx = 0;
+ int bresult;
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ (u8) (addr & 0xff));
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ ((u8) ((addr >> 8) & 0x03)) |
+ (rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2) &
+ 0xFC));
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
+
+ while (!(0x80 & rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
+ && (tmpidx < 100)) {
+ tmpidx++;
+ }
+
+ if (tmpidx < 100) {
+ *data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+ bresult = true;
+ } else {
+ *data = 0xff;
+ bresult = false;
+ }
+ return bresult;
+}
+/* EXPORT_SYMBOL(efuse_one_byte_read); */
+
+static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmpidx = 0;
+ bool bresult;
+
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("Addr = %x Data=%x\n", addr, data));
+
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff));
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ (rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] +
+ 2) & 0xFC) | (u8) ((addr >> 8) & 0x03));
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], data);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0xF2);
+
+ while ((0x80 & rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
+ && (tmpidx < 100)) {
+ tmpidx++;
+ }
+
+ if (tmpidx < 100)
+ bresult = true;
+ else
+ bresult = false;
+
+ return bresult;
+}
+
+static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ efuse_power_switch(hw, false, true);
+ read_efuse(hw, 0, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE], efuse);
+ efuse_power_switch(hw, false, false);
+}
+
+static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
+ u8 efuse_data, u8 offset, u8 *tmpdata,
+ u8 *readstate)
+{
+ bool bdataempty = true;
+ u8 hoffset;
+ u8 tmpidx;
+ u8 hworden;
+ u8 word_cnts;
+
+ hoffset = (efuse_data >> 4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ word_cnts = efuse_calculate_word_cnts(hworden);
+
+ if (hoffset == offset) {
+ for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) {
+ if (efuse_one_byte_read(hw, *efuse_addr + 1 + tmpidx,
+ &efuse_data)) {
+ tmpdata[tmpidx] = efuse_data;
+ if (efuse_data != 0xff)
+ bdataempty = true;
+ }
+ }
+
+ if (bdataempty == true) {
+ *readstate = PG_STATE_DATA;
+ } else {
+ *efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
+ *readstate = PG_STATE_HEADER;
+ }
+
+ } else {
+ *efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
+ *readstate = PG_STATE_HEADER;
+ }
+}
+
+static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
+{
+ u8 readstate = PG_STATE_HEADER;
+
+ bool bcontinual = true;
+
+ u8 efuse_data, word_cnts = 0;
+ u16 efuse_addr = 0;
+ u8 hworden = 0;
+ u8 tmpdata[8];
+
+ if (data == NULL)
+ return false;
+ if (offset > 15)
+ return false;
+
+ memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
+ memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
+
+ while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) {
+ if (readstate & PG_STATE_HEADER) {
+ if (efuse_one_byte_read(hw, efuse_addr, &efuse_data)
+ && (efuse_data != 0xFF))
+ efuse_read_data_case1(hw, &efuse_addr, efuse_data, offset,
+ tmpdata, &readstate);
+ else
+ bcontinual = false;
+ } else if (readstate & PG_STATE_DATA) {
+ efuse_word_enable_data_read(hworden, tmpdata, data);
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
+ readstate = PG_STATE_HEADER;
+ }
+
+ }
+
+ if ((data[0] == 0xff) && (data[1] == 0xff) &&
+ (data[2] == 0xff) && (data[3] == 0xff) &&
+ (data[4] == 0xff) && (data[5] == 0xff) &&
+ (data[6] == 0xff) && (data[7] == 0xff))
+ return false;
+ else
+ return true;
+
+}
+
+static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
+ u8 efuse_data, u8 offset, int *bcontinual,
+ u8 *write_state, struct pgpkt_struct *target_pkt,
+ int *repeat_times, int *bresult, u8 word_en)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct pgpkt_struct tmp_pkt;
+ int bdataempty = true;
+ u8 originaldata[8 * sizeof(u8)];
+ u8 badworden = 0x0F;
+ u8 match_word_en, tmp_word_en;
+ u8 tmpindex;
+ u8 tmp_header = efuse_data;
+ u8 tmp_word_cnts;
+
+ tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
+ tmp_pkt.word_en = tmp_header & 0x0F;
+ tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
+
+ if (tmp_pkt.offset != target_pkt->offset) {
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+ *write_state = PG_STATE_HEADER;
+ } else {
+ for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
+ if (efuse_one_byte_read(hw,
+ (*efuse_addr + 1 + tmpindex),
+ &efuse_data) && (efuse_data != 0xFF))
+ bdataempty = false;
+ }
+
+ if (bdataempty == false) {
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+ *write_state = PG_STATE_HEADER;
+ } else {
+ match_word_en = 0x0F;
+ if (!((target_pkt->word_en & BIT(0)) |
+ (tmp_pkt.word_en & BIT(0))))
+ match_word_en &= (~BIT(0));
+
+ if (!((target_pkt->word_en & BIT(1)) |
+ (tmp_pkt.word_en & BIT(1))))
+ match_word_en &= (~BIT(1));
+
+ if (!((target_pkt->word_en & BIT(2)) |
+ (tmp_pkt.word_en & BIT(2))))
+ match_word_en &= (~BIT(2));
+
+ if (!((target_pkt->word_en & BIT(3)) |
+ (tmp_pkt.word_en & BIT(3))))
+ match_word_en &= (~BIT(3));
+
+ if ((match_word_en & 0x0F) != 0x0F) {
+ badworden = efuse_word_enable_data_write(hw,
+ *efuse_addr + 1,
+ tmp_pkt.word_en,
+ target_pkt->data);
+
+ if (0x0F != (badworden & 0x0F)) {
+ u8 reorg_offset = offset;
+ u8 reorg_worden = badworden;
+ efuse_pg_packet_write(hw, reorg_offset,
+ reorg_worden,
+ originaldata);
+ }
+
+ tmp_word_en = 0x0F;
+ if ((target_pkt->word_en & BIT(0)) ^
+ (match_word_en & BIT(0)))
+ tmp_word_en &= (~BIT(0));
+
+ if ((target_pkt->word_en & BIT(1)) ^
+ (match_word_en & BIT(1)))
+ tmp_word_en &= (~BIT(1));
+
+ if ((target_pkt->word_en & BIT(2)) ^
+ (match_word_en & BIT(2)))
+ tmp_word_en &= (~BIT(2));
+
+ if ((target_pkt->word_en & BIT(3)) ^
+ (match_word_en & BIT(3)))
+ tmp_word_en &= (~BIT(3));
+
+ if ((tmp_word_en & 0x0F) != 0x0F) {
+ *efuse_addr = efuse_get_current_size(hw);
+ target_pkt->offset = offset;
+ target_pkt->word_en = tmp_word_en;
+ } else {
+ *bcontinual = false;
+ }
+ *write_state = PG_STATE_HEADER;
+ *repeat_times += 1;
+ if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ *bcontinual = false;
+ *bresult = false;
+ }
+ } else {
+ *efuse_addr += (2 * tmp_word_cnts) + 1;
+ target_pkt->offset = offset;
+ target_pkt->word_en = word_en;
+ *write_state = PG_STATE_HEADER;
+ }
+ }
+ }
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse PG_STATE_HEADER-1\n"));
+}
+
+static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
+ int *bcontinual, u8 *write_state,
+ struct pgpkt_struct target_pkt,
+ int *repeat_times, int *bresult)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct pgpkt_struct tmp_pkt;
+ u8 pg_header;
+ u8 tmp_header;
+ u8 originaldata[8 * sizeof(u8)];
+ u8 tmp_word_cnts;
+ u8 badworden = 0x0F;
+
+ pg_header = ((target_pkt.offset << 4) & 0xf0) | target_pkt.word_en;
+ efuse_one_byte_write(hw, *efuse_addr, pg_header);
+ efuse_one_byte_read(hw, *efuse_addr, &tmp_header);
+
+ if (tmp_header == pg_header) {
+ *write_state = PG_STATE_DATA;
+ } else if (tmp_header == 0xFF) {
+ *write_state = PG_STATE_HEADER;
+ *repeat_times += 1;
+ if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ *bcontinual = false;
+ *bresult = false;
+ }
+ } else {
+ tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
+ tmp_pkt.word_en = tmp_header & 0x0F;
+
+ tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
+
+ memset(originaldata, 0xff, 8 * sizeof(u8));
+
+ if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
+ badworden = efuse_word_enable_data_write(hw,
+ *efuse_addr + 1,
+ tmp_pkt.word_en,
+ originaldata);
+
+ if (0x0F != (badworden & 0x0F)) {
+ u8 reorg_offset = tmp_pkt.offset;
+ u8 reorg_worden = badworden;
+ efuse_pg_packet_write(hw, reorg_offset,
+ reorg_worden,
+ originaldata);
+ *efuse_addr = efuse_get_current_size(hw);
+ } else {
+ *efuse_addr = *efuse_addr +
+ (tmp_word_cnts * 2) + 1;
+ }
+ } else {
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+ }
+
+ *write_state = PG_STATE_HEADER;
+ *repeat_times += 1;
+ if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ *bcontinual = false;
+ *bresult = false;
+ }
+
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_HEADER-2\n"));
+ }
+}
+
+static int efuse_pg_packet_write(struct ieee80211_hw *hw,
+ u8 offset, u8 word_en, u8 *data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct pgpkt_struct target_pkt;
+ u8 write_state = PG_STATE_HEADER;
+ int bcontinual = true, bdataempty = true, bresult = true;
+ u16 efuse_addr = 0;
+ u8 efuse_data;
+ u8 target_word_cnts = 0;
+ u8 badworden = 0x0F;
+ static int repeat_times = 0;
+
+ if (efuse_get_current_size(hw) >= (EFUSE_MAX_SIZE -
+ rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse_pg_packet_write error \n"));
+ return false;
+ }
+
+ target_pkt.offset = offset;
+ target_pkt.word_en = word_en;
+
+ memset(target_pkt.data, 0xFF, 8 * sizeof(u8));
+
+ efuse_word_enable_data_read(word_en, data, target_pkt.data);
+ target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
+
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse Power ON\n"));
+
+ while (bcontinual && (efuse_addr < (EFUSE_MAX_SIZE -
+ rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) {
+
+ if (write_state == PG_STATE_HEADER) {
+ bdataempty = true;
+ badworden = 0x0F;
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_HEADER\n"));
+
+ if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
+ (efuse_data != 0xFF))
+ efuse_write_data_case1(hw, &efuse_addr,
+ efuse_data, offset,
+ &bcontinual,
+ &write_state,
+ &target_pkt,
+ &repeat_times, &bresult,
+ word_en);
+ else
+ efuse_write_data_case2(hw, &efuse_addr,
+ &bcontinual,
+ &write_state,
+ target_pkt,
+ &repeat_times,
+ &bresult);
+
+ } else if (write_state == PG_STATE_DATA) {
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_DATA\n"));
+ badworden = 0x0f;
+ badworden =
+ efuse_word_enable_data_write(hw, efuse_addr + 1,
+ target_pkt.word_en,
+ target_pkt.data);
+
+ if ((badworden & 0x0F) == 0x0F) {
+ bcontinual = false;
+ } else {
+ efuse_addr =
+ efuse_addr + (2 * target_word_cnts) + 1;
+
+ target_pkt.offset = offset;
+ target_pkt.word_en = badworden;
+ target_word_cnts =
+ efuse_calculate_word_cnts(target_pkt.
+ word_en);
+ write_state = PG_STATE_HEADER;
+ repeat_times++;
+ if (repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ bcontinual = false;
+ bresult = false;
+ }
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_HEADER-3\n"));
+ }
+ }
+ }
+
+ if (efuse_addr >= (EFUSE_MAX_SIZE -
+ rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("efuse_addr(%#x) Out of size!!\n", efuse_addr));
+ }
+
+ return true;
+}
+
+static void efuse_word_enable_data_read(u8 word_en, u8 * sourdata,
+ u8 *targetdata)
+{
+ if (!(word_en & BIT(0))) {
+ targetdata[0] = sourdata[0];
+ targetdata[1] = sourdata[1];
+ }
+
+ if (!(word_en & BIT(1))) {
+ targetdata[2] = sourdata[2];
+ targetdata[3] = sourdata[3];
+ }
+
+ if (!(word_en & BIT(2))) {
+ targetdata[4] = sourdata[4];
+ targetdata[5] = sourdata[5];
+ }
+
+ if (!(word_en & BIT(3))) {
+ targetdata[6] = sourdata[6];
+ targetdata[7] = sourdata[7];
+ }
+}
+
+static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
+ u16 efuse_addr, u8 word_en, u8 *data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 tmpaddr;
+ u16 start_addr = efuse_addr;
+ u8 badworden = 0x0F;
+ u8 tmpdata[8];
+
+ memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
+ RT_TRACE(COMP_EFUSE, DBG_LOUD,
+ ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr));
+
+ if (!(word_en & BIT(0))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[0]);
+ efuse_one_byte_write(hw, start_addr++, data[1]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[0]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[1]);
+ if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
+ badworden &= (~BIT(0));
+ }
+
+ if (!(word_en & BIT(1))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[2]);
+ efuse_one_byte_write(hw, start_addr++, data[3]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[2]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[3]);
+ if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
+ badworden &= (~BIT(1));
+ }
+
+ if (!(word_en & BIT(2))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[4]);
+ efuse_one_byte_write(hw, start_addr++, data[5]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[4]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[5]);
+ if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
+ badworden &= (~BIT(2));
+ }
+
+ if (!(word_en & BIT(3))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[6]);
+ efuse_one_byte_write(hw, start_addr++, data[7]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[6]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[7]);
+ if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
+ badworden &= (~BIT(3));
+ }
+
+ return badworden;
+}
+
+static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tempval;
+ u16 tmpV16;
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ if (pwrstate == true)
+ {
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0x69);
+
+ /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), default valid */
+ tmpV16 = rtl_read_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_ISO_CTRL]);
+
+ printk("SYS_ISO_CTRL=%04x.\n",tmpV16);
+ if( ! (tmpV16 & PWC_EV12V ) ){
+ tmpV16 |= PWC_EV12V ;
+ /* PlatformEFIOWrite2Byte(pAdapter,REG_SYS_ISO_CTRL,tmpV16); */
+ }
+ /* Reset: 0x0000h[28], default valid */
+ tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]);
+ printk("SYS_FUNC_EN=%04x.\n",tmpV16);
+ if( !(tmpV16 & FEN_ELDR) ){
+ tmpV16 |= FEN_ELDR ;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16);
+ }
+
+ /* Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid */
+ tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK] );
+ printk("SYS_CLK=%04x.\n",tmpV16);
+ if( (!(tmpV16 & LOADER_CLK_EN) ) ||(!(tmpV16 & ANA8M) ) )
+ {
+ tmpV16 |= (LOADER_CLK_EN |ANA8M ) ;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_CLK], tmpV16);
+ }
+
+ if(bwrite == true)
+ {
+ /* Enable LDO 2.5V before read/write action */
+ tempval = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
+ printk("EFUSE_TEST=%04x.\n",tmpV16);
+ tempval &= ~(BIT(3) | BIT(4) |BIT(5) | BIT(6));
+ tempval |= (VOLTAGE_V25 << 3);
+ tempval |= BIT(7);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, tempval);
+ }
+ }
+ else
+ {
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0x00);
+ if(bwrite == true){
+ /* Disable LDO 2.5V after read/write action */
+ tempval = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, (tempval & 0x7F));
+ }
+ }
+ }
+ else
+ {
+ if (pwrstate == true && (rtlhal->hw_type !=
+ HARDWARE_TYPE_RTL8192SE)) {
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE)
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS],
+ 0x69);
+
+ tmpV16 = rtl_read_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_ISO_CTRL]);
+ if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) {
+ tmpV16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V];
+ rtl_write_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_ISO_CTRL],
+ tmpV16);
+ }
+
+ tmpV16 = rtl_read_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_FUNC_EN]);
+ if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) {
+ tmpV16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR];
+ rtl_write_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16);
+ }
+
+ tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]);
+ if ((!(tmpV16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) ||
+ (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) {
+ tmpV16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] |
+ rtlpriv->cfg->maps[EFUSE_ANA8M]);
+ rtl_write_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_CLK], tmpV16);
+ }
+ }
+
+ if (pwrstate == true) {
+ if (bwrite == true) {
+ tempval = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] +
+ 3);
+
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) {
+ tempval &= 0x0F;
+ tempval |= (VOLTAGE_V25 << 4);
+ }
+
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] + 3,
+ (tempval | 0x80));
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
+ 0x03);
+ }
+
+ } else {
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE)
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0);
+
+ if (bwrite == true) {
+ tempval = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] +
+ 3);
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] + 3,
+ (tempval & 0x7F));
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
+ 0x02);
+ }
+
+ }
+ }
+
+}
+
+static u16 efuse_get_current_size(struct ieee80211_hw *hw)
+{
+ int bcontinual = true;
+ u16 efuse_addr = 0;
+ u8 hoffset, hworden;
+ u8 efuse_data, word_cnts;
+
+ while (bcontinual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
+ && (efuse_addr < EFUSE_MAX_SIZE)) {
+ if (efuse_data != 0xFF) {
+ hoffset = (efuse_data >> 4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ word_cnts = efuse_calculate_word_cnts(hworden);
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
+ } else {
+ bcontinual = false;
+ }
+ }
+
+ return efuse_addr;
+}
+
+static u8 efuse_calculate_word_cnts(u8 word_en)
+{
+ u8 word_cnts = 0;
+ if (!(word_en & BIT(0)))
+ word_cnts++;
+ if (!(word_en & BIT(1)))
+ word_cnts++;
+ if (!(word_en & BIT(2)))
+ word_cnts++;
+ if (!(word_en & BIT(3)))
+ word_cnts++;
+ return word_cnts;
+}
+
diff --git a/drivers/staging/rtl8821ae/efuse.h b/drivers/staging/rtl8821ae/efuse.h
new file mode 100644
index 000000000000..a9fcbe05cf9a
--- /dev/null
+++ b/drivers/staging/rtl8821ae/efuse.h
@@ -0,0 +1,130 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_EFUSE_H_
+#define __RTL_EFUSE_H_
+
+#define EFUSE_IC_ID_OFFSET 506
+
+/*
+#define EFUSE_REAL_CONTENT_LEN 512
+#define EFUSE_MAP_LEN 128
+#define EFUSE_MAX_SECTION 16
+#define EFUSE_MAX_WORD_UNIT 4
+#define EFUSE_IC_ID_OFFSET 506
+*/
+
+#define EFUSE_MAX_WORD_UNIT 4
+
+#define EFUSE_INIT_MAP 0
+#define EFUSE_MODIFY_MAP 1
+
+#define PG_STATE_HEADER 0x01
+#define PG_STATE_WORD_0 0x02
+#define PG_STATE_WORD_1 0x04
+#define PG_STATE_WORD_2 0x08
+#define PG_STATE_WORD_3 0x10
+#define PG_STATE_DATA 0x20
+
+#define PG_SWBYTE_H 0x01
+#define PG_SWBYTE_L 0x02
+
+#define _POWERON_DELAY_
+#define _PRE_EXECUTE_READ_CMD_
+
+#define EFUSE_REPEAT_THRESHOLD_ 3
+#define EFUSE_ERROE_HANDLE 1
+
+struct efuse_map {
+ u8 offset;
+ u8 word_start;
+ u8 byte_start;
+ u8 byte_cnts;
+};
+
+struct pgpkt_struct {
+ u8 offset;
+ u8 word_en;
+ u8 data[8];
+};
+
+enum efuse_data_item {
+ EFUSE_CHIP_ID = 0,
+ EFUSE_LDO_SETTING,
+ EFUSE_CLK_SETTING,
+ EFUSE_SDIO_SETTING,
+ EFUSE_CCCR,
+ EFUSE_SDIO_MODE,
+ EFUSE_OCR,
+ EFUSE_F0CIS,
+ EFUSE_F1CIS,
+ EFUSE_MAC_ADDR,
+ EFUSE_EEPROM_VER,
+ EFUSE_CHAN_PLAN,
+ EFUSE_TXPW_TAB
+};
+
+enum {
+ VOLTAGE_V25 = 0x03,
+ LDOE25_SHIFT = 28,
+};
+
+struct efuse_priv {
+ u8 id[2];
+ u8 ldo_setting[2];
+ u8 clk_setting[2];
+ u8 cccr;
+ u8 sdio_mode;
+ u8 ocr[3];
+ u8 cis0[17];
+ u8 cis1[48];
+ u8 mac_addr[6];
+ u8 eeprom_verno;
+ u8 channel_plan;
+ u8 tx_power_b[14];
+ u8 tx_power_g[14];
+};
+
+extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+extern void efuse_initialize(struct ieee80211_hw *hw);
+extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
+extern int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data);
+extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
+extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
+ u16 _size_byte, u8 * pbuf);
+extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
+ u16 offset, u32 * value);
+extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
+ u16 offset, u32 value);
+extern bool efuse_shadow_update(struct ieee80211_hw *hw);
+extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
+extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
+extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+#endif
diff --git a/drivers/staging/rtl8821ae/pci.c b/drivers/staging/rtl8821ae/pci.c
new file mode 100644
index 000000000000..a562aa60d595
--- /dev/null
+++ b/drivers/staging/rtl8821ae/pci.c
@@ -0,0 +1,2549 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "core.h"
+#include "wifi.h"
+#include "pci.h"
+#include "base.h"
+#include "ps.h"
+#include "efuse.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
+ INTEL_VENDOR_ID,
+ ATI_VENDOR_ID,
+ AMD_VENDOR_ID,
+ SIS_VENDOR_ID
+};
+
+static const u8 ac_to_hwq[] = {
+ VO_QUEUE,
+ VI_QUEUE,
+ BE_QUEUE,
+ BK_QUEUE
+};
+
+u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 fc = rtl_get_fc(skb);
+ u8 queue_index = skb_get_queue_mapping(skb);
+
+ if (unlikely(ieee80211_is_beacon(fc)))
+ return BEACON_QUEUE;
+ if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
+ return MGNT_QUEUE;
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+ if (ieee80211_is_nullfunc(fc))
+ return HIGH_QUEUE;
+
+ return ac_to_hwq[queue_index];
+}
+
+/* Update PCI dependent default settings*/
+static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+ u8 init_aspm;
+
+ ppsc->reg_rfps_level = 0;
+ ppsc->b_support_aspm = 0;
+
+ /*Update PCI ASPM setting */
+ ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
+ switch (rtlpci->const_pci_aspm) {
+ case 0:
+ /*No ASPM */
+ break;
+
+ case 1:
+ /*ASPM dynamically enabled/disable. */
+ ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
+ break;
+
+ case 2:
+ /*ASPM with Clock Req dynamically enabled/disable. */
+ ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
+ RT_RF_OFF_LEVL_CLK_REQ);
+ break;
+
+ case 3:
+ /*
+ * Always enable ASPM and Clock Req
+ * from initialization to halt.
+ * */
+ ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
+ ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
+ RT_RF_OFF_LEVL_CLK_REQ);
+ break;
+
+ case 4:
+ /*
+ * Always enable ASPM without Clock Req
+ * from initialization to halt.
+ * */
+ ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
+ RT_RF_OFF_LEVL_CLK_REQ);
+ ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
+ break;
+ }
+
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
+
+ /*Update Radio OFF setting */
+ switch (rtlpci->const_hwsw_rfoff_d3) {
+ case 1:
+ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
+ break;
+
+ case 2:
+ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
+ break;
+
+ case 3:
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
+ break;
+ }
+
+ /*Set HW definition to determine if it supports ASPM. */
+ switch (rtlpci->const_support_pciaspm) {
+ case 0:{
+ /*Not support ASPM. */
+ bool b_support_aspm = false;
+ ppsc->b_support_aspm = b_support_aspm;
+ break;
+ }
+ case 1:{
+ /*Support ASPM. */
+ bool b_support_aspm = true;
+ bool b_support_backdoor = true;
+ ppsc->b_support_aspm = b_support_aspm;
+
+ /*if(priv->oem_id == RT_CID_TOSHIBA &&
+ !priv->ndis_adapter.amd_l1_patch)
+ b_support_backdoor = false; */
+
+ ppsc->b_support_backdoor = b_support_backdoor;
+
+ break;
+ }
+ case 2:
+ /*ASPM value set by chipset. */
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
+ bool b_support_aspm = true;
+ ppsc->b_support_aspm = b_support_aspm;
+ }
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+
+ /* toshiba aspm issue, toshiba will set aspm selfly
+ * so we should not set aspm in driver */
+ pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
+ if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
+ init_aspm == 0x43)
+ ppsc->b_support_aspm = false;
+}
+
+static bool _rtl_pci_platform_switch_device_pci_aspm(struct ieee80211_hw *hw,
+ u8 value)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool bresult = false;
+
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
+ value |= 0x40;
+
+ pci_write_config_byte(rtlpci->pdev, 0x80, value);
+
+ return bresult;
+}
+
+/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
+static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool bresult = false;
+
+ pci_write_config_byte(rtlpci->pdev, 0x81, value);
+ bresult = true;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+ udelay(100);
+
+ return bresult;
+}
+
+/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
+static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+ u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ /*Retrieve original configuration settings. */
+ u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
+ u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
+ pcibridge_linkctrlreg;
+ u16 aspmlevel = 0;
+
+ if (!ppsc->b_support_aspm)
+ return;
+
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
+ RT_TRACE(COMP_POWER, DBG_TRACE,
+ ("PCI(Bridge) UNKNOWN.\n"));
+
+ return;
+ }
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
+ _rtl_pci_switch_clk_req(hw, 0x0);
+ }
+
+ if (1) {
+ /*for promising device will in L0 state after an I/O. */
+ u8 tmp_u1b;
+ pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
+ }
+
+ /*Set corresponding value. */
+ aspmlevel |= BIT(0) | BIT(1);
+ linkctrl_reg &= ~aspmlevel;
+ pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
+
+ _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
+ udelay(50);
+
+ /*4 Disable Pci Bridge ASPM */
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + (num4bytes << 2));
+ rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
+
+ udelay(50);
+
+}
+
+/*
+ *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
+ *power saving We should follow the sequence to enable
+ *RTL8192SE first then enable Pci Bridge ASPM
+ *or the system will show bluescreen.
+ */
+static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+ u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ u16 aspmlevel;
+ u8 u_pcibridge_aspmsetting;
+ u8 u_device_aspmsetting;
+
+ if (!ppsc->b_support_aspm)
+ return;
+
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
+ RT_TRACE(COMP_POWER, DBG_TRACE,
+ ("PCI(Bridge) UNKNOWN.\n"));
+ return;
+ }
+
+ /*4 Enable Pci Bridge ASPM */
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + (num4bytes << 2));
+
+ u_pcibridge_aspmsetting =
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg |
+ rtlpci->const_hostpci_aspm_setting;
+
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
+ u_pcibridge_aspmsetting &= ~BIT(0);
+
+ rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting);
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("PlatformEnableASPM(): Write reg[%x] = %x\n",
+ (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
+ u_pcibridge_aspmsetting));
+
+ udelay(50);
+
+ /*Get ASPM level (with/without Clock Req) */
+ aspmlevel = rtlpci->const_devicepci_aspm_setting;
+ u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
+
+ /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
+ /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
+
+ u_device_aspmsetting |= aspmlevel;
+
+ _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
+ _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
+ RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
+ }
+ udelay(100);
+}
+
+static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+
+ bool status = false;
+ u8 offset_e0;
+ unsigned offset_e4;
+
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + 0xE0);
+ rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0);
+
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + 0xE0);
+ rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0);
+
+ if (offset_e0 == 0xA0) {
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + 0xE4);
+ rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4);
+ if (offset_e4 & BIT(23))
+ status = true;
+ }
+
+ return status;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
+static u8 _rtl_pci_get_pciehdr_offset(struct ieee80211_hw *hw)
+{
+ u8 capability_offset;
+ u8 num4bytes = 0x34/4;
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u32 pcicfg_addr_port = (pcipriv->ndis_adapter.pcibridge_busnum << 16)|
+ (pcipriv->ndis_adapter.pcibridge_devnum << 11)|
+ (pcipriv->ndis_adapter.pcibridge_funcnum << 8)|
+ (1 << 31);
+
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS , pcicfg_addr_port
+ + (num4bytes << 2));
+ rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &capability_offset);
+ while (capability_offset != 0) {
+ struct rtl_pci_capabilities_header capability_hdr;
+
+ num4bytes = capability_offset / 4;
+ /* Read the header of the capability at this offset.
+ * If the retrieved capability is not the power management
+ * capability that we are looking for, follow the link to
+ * the next capability and continue looping.
+ */
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS ,
+ pcicfg_addr_port +
+ (num4bytes << 2));
+ rtl_pci_raw_read_port_ushort(PCI_CONF_DATA,
+ (u16*)&capability_hdr);
+ /* Found the PCI express capability. */
+ if (capability_hdr.capability_id ==
+ PCI_CAPABILITY_ID_PCI_EXPRESS)
+ break;
+ else
+ capability_offset = capability_hdr.next;
+ }
+ return capability_offset;
+}
+#endif
+/*<delete in kernel end>*/
+
+bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
+ struct rtl_priv **buddy_priv)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ bool b_find_buddy_priv = false;
+ struct rtl_priv *temp_priv = NULL;
+ struct rtl_pci_priv *temp_pcipriv = NULL;
+
+ if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
+ list_for_each_entry(temp_priv, &rtlpriv->glb_var->glb_priv_list,
+ list) {
+ if (temp_priv) {
+ temp_pcipriv =
+ (struct rtl_pci_priv *)temp_priv->priv;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ (("pcipriv->ndis_adapter.funcnumber %x \n"),
+ pcipriv->ndis_adapter.funcnumber));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ (("temp_pcipriv->ndis_adapter.funcnumber %x \n"),
+ temp_pcipriv->ndis_adapter.funcnumber));
+
+ if ((pcipriv->ndis_adapter.busnumber ==
+ temp_pcipriv->ndis_adapter.busnumber) &&
+ (pcipriv->ndis_adapter.devnumber ==
+ temp_pcipriv->ndis_adapter.devnumber) &&
+ (pcipriv->ndis_adapter.funcnumber !=
+ temp_pcipriv->ndis_adapter.funcnumber)) {
+ b_find_buddy_priv = true;
+ break;
+ }
+ }
+ }
+ }
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ (("b_find_buddy_priv %d \n"), b_find_buddy_priv));
+
+ if (b_find_buddy_priv)
+ *buddy_priv = temp_priv;
+
+ return b_find_buddy_priv;
+}
+
+void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+ u8 linkctrl_reg;
+ u8 num4bbytes;
+
+ num4bbytes = (capabilityoffset + 0x10) / 4;
+
+ /*Read Link Control Register */
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + (num4bbytes << 2));
+ rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
+
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
+}
+
+static void rtl_pci_parse_configuration(struct pci_dev *pdev,
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+
+ u8 tmp;
+ int pos;
+ u8 linkctrl_reg;
+
+ /*Link Control Register */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
+ pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
+
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("Link Control Register =%x\n",
+ pcipriv->ndis_adapter.linkctrl_reg));
+
+ pci_read_config_byte(pdev, 0x98, &tmp);
+ tmp |= BIT(4);
+ pci_write_config_byte(pdev, 0x98, tmp);
+
+ tmp = 0x17;
+ pci_write_config_byte(pdev, 0x70f, tmp);
+}
+
+static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ _rtl_pci_update_default_setting(hw);
+
+ if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
+ /*Always enable ASPM & Clock Req. */
+ rtl_pci_enable_aspm(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
+ }
+
+}
+
+static void _rtl_pci_io_handler_init(struct device *dev,
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->io.dev = dev;
+
+ rtlpriv->io.write8_async = pci_write8_async;
+ rtlpriv->io.write16_async = pci_write16_async;
+ rtlpriv->io.write32_async = pci_write32_async;
+
+ rtlpriv->io.read8_sync = pci_read8_sync;
+ rtlpriv->io.read16_sync = pci_read16_sync;
+ rtlpriv->io.read32_sync = pci_read32_sync;
+
+}
+
+static bool _rtl_pci_update_earlymode_info(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *tcb_desc,
+ u8 tid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 additionlen = FCS_LEN;
+ struct sk_buff *next_skb;
+
+ /* here open is 4, wep/tkip is 8, aes is 12*/
+ if (info->control.hw_key)
+ additionlen += info->control.hw_key->icv_len;
+
+ /* The most skb num is 6 */
+ tcb_desc->empkt_num = 0;
+ spin_lock_bh(&rtlpriv->locks.waitq_lock);
+ skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
+ struct ieee80211_tx_info *next_info =
+ IEEE80211_SKB_CB(next_skb);
+ if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tcb_desc->empkt_len[tcb_desc->empkt_num] =
+ next_skb->len + additionlen;
+ tcb_desc->empkt_num++;
+ } else {
+ break;
+ }
+
+ if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
+ next_skb))
+ break;
+
+ if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num)
+ break;
+ }
+ spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+ return true;
+}
+
+/* just for early mode now */
+static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+ struct ieee80211_tx_info *info = NULL;
+ int tid; /* should be int */
+
+ if (!rtlpriv->rtlhal.b_earlymode_enable)
+ return;
+ if (rtlpriv->dm.supp_phymode_switch &&
+ (rtlpriv->easy_concurrent_ctl.bswitch_in_process ||
+ (rtlpriv->buddy_priv &&
+ rtlpriv->buddy_priv->easy_concurrent_ctl.bswitch_in_process)))
+ return;
+ /* we just use em for BE/BK/VI/VO */
+ for (tid = 7; tid >= 0; tid--) {
+ u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ while (!mac->act_scanning &&
+ rtlpriv->psc.rfpwr_state == ERFON) {
+ struct rtl_tcb_desc tcb_desc;
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+ spin_lock_bh(&rtlpriv->locks.waitq_lock);
+ if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
+ (ring->entries - skb_queue_len(&ring->queue) >
+ rtlhal->max_earlymode_num)) {
+ skb = skb_dequeue(&mac->skb_waitq[tid]);
+ } else {
+ spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+ break;
+ }
+ spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+
+ /* Some macaddr can't do early mode. like
+ * multicast/broadcast/no_qos data */
+ info = IEEE80211_SKB_CB(skb);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ _rtl_pci_update_earlymode_info(hw, skb,
+ &tcb_desc, tid);
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+ rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
+#endif
+/*<delete in kernel end>*/
+ }
+ }
+}
+
+static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+ u16 fc;
+ u8 tid;
+ u8 *entry;
+
+
+ if (rtlpriv->use_new_trx_flow)
+ entry = (u8 *)(&ring->buffer_desc[ring->idx]);
+ else
+ entry = (u8 *)(&ring->desc[ring->idx]);
+
+ if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
+ return;
+
+ ring->idx = (ring->idx + 1) % ring->entries;
+
+ skb = __skb_dequeue(&ring->queue);
+
+ pci_unmap_single(rtlpci->pdev,
+ le32_to_cpu(rtlpriv->cfg->ops->
+ get_desc((u8 *) entry, true,
+ HW_DESC_TXBUFF_ADDR)),
+ skb->len, PCI_DMA_TODEVICE);
+
+ /* remove early mode header */
+ if(rtlpriv->rtlhal.b_earlymode_enable)
+ skb_pull(skb, EM_HDR_LEN);
+
+ RT_TRACE((COMP_INTR | COMP_SEND), DBG_TRACE,
+ ("new ring->idx:%d, "
+ "free: skb_queue_len:%d, free: seq:%d\n",
+ ring->idx,
+ skb_queue_len(&ring->queue),
+ *(u16 *) (skb->data + 22)));
+
+ if(prio == TXCMD_QUEUE) {
+ dev_kfree_skb(skb);
+ goto tx_status_ok;
+
+ }
+
+ /* for sw LPS, just after NULL skb send out, we can
+ * sure AP known we are slept, our we should not let
+ * rf to sleep*/
+ fc = rtl_get_fc(skb);
+ if (ieee80211_is_nullfunc(fc)) {
+ if(ieee80211_has_pm(fc)) {
+ rtlpriv->mac80211.offchan_deley = true;
+ rtlpriv->psc.state_inap = 1;
+ } else {
+ rtlpriv->psc.state_inap = 0;
+ }
+ }
+ if (ieee80211_is_action(fc)) {
+ struct ieee80211_mgmt_compat *action_frame =
+ (struct ieee80211_mgmt_compat *)skb->data;
+ if (action_frame->u.action.u.ht_smps.action ==
+ WLAN_HT_ACTION_SMPS) {
+ dev_kfree_skb(skb);
+ goto tx_status_ok;
+ }
+ }
+
+ /* update tid tx pkt num */
+ tid = rtl_get_tid(skb);
+ if (tid <= 7)
+ rtlpriv->link_info.tidtx_inperiod[tid]++;
+
+ info = IEEE80211_SKB_CB(skb);
+ ieee80211_tx_info_clear_status(info);
+
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ /*info->status.rates[0].count = 1; */
+
+ ieee80211_tx_status_irqsafe(hw, skb);
+
+ if ((ring->entries - skb_queue_len(&ring->queue))
+ == 2) {
+
+ RT_TRACE(COMP_ERR, DBG_LOUD,
+ ("more desc left, wake"
+ "skb_queue@%d,ring->idx = %d,"
+ "skb_queue_len = 0x%d\n",
+ prio, ring->idx,
+ skb_queue_len(&ring->queue)));
+
+ ieee80211_wake_queue(hw,
+ skb_get_queue_mapping
+ (skb));
+ }
+tx_status_ok:
+ skb = NULL;
+ }
+
+ if (((rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ (rtlpriv->link_info.num_rx_inperiod > 2)) {
+ rtl_lps_leave(hw);
+ }
+}
+
+static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
+ u8 *entry, int rxring_idx, int desc_idx)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u32 bufferaddress;
+ u8 tmp_one = 1;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (!skb)
+ return 0;
+ rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
+
+ /* just set skb->cb to mapping addr
+ * for pci_unmap_single use */
+ *((dma_addr_t *) skb->cb) = pci_map_single(rtlpci->pdev,
+ skb_tail_pointer(skb), rtlpci->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
+ bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb));
+ if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
+ return 0;
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ HW_DESC_RX_PREPARE,
+ (u8 *) & bufferaddress);
+ } else {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ HW_DESC_RXBUFF_ADDR,
+ (u8 *) & bufferaddress);
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ HW_DESC_RXPKT_LEN,
+ (u8 *) & rtlpci->rxbuffersize);
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ HW_DESC_RXOWN,
+ (u8 *) & tmp_one);
+ }
+
+ return 1;
+}
+
+/* In order to receive 8K AMSDU we have set skb to
+ * 9100bytes in init rx ring, but if this packet is
+ * not a AMSDU, this so big packet will be sent to
+ * TCP/IP directly, this cause big packet ping fail
+ * like: "ping -s 65507", so here we will realloc skb
+ * based on the true size of packet, I think mac80211
+ * do it will be better, but now mac80211 haven't */
+
+/* but some platform will fail when alloc skb sometimes.
+ * in this condition, we will send the old skb to
+ * mac80211 directly, this will not cause any other
+ * issues, but only be lost by TCP/IP */
+static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw,
+ struct sk_buff *skb, struct ieee80211_rx_status rx_status)
+{
+ if (unlikely(!rtl_action_proc(hw, skb, false))) {
+ dev_kfree_skb_any(skb);
+ } else {
+ struct sk_buff *uskb = NULL;
+ u8 *pdata;
+
+ uskb = dev_alloc_skb(skb->len + 128);
+ if (likely(uskb)) {
+ memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
+ sizeof(rx_status));
+ pdata = (u8 *)skb_put(uskb, skb->len);
+ memcpy(pdata, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+
+ ieee80211_rx_irqsafe(hw, uskb);
+ } else {
+ ieee80211_rx_irqsafe(hw, skb);
+ }
+ }
+}
+
+/*hsisr interrupt handler*/
+static void _rtl_pci_hs_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR],
+ rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) |
+ rtlpci->sys_irq_mask);
+
+
+}
+static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ int rxring_idx = RTL_PCI_RX_MPDU_QUEUE;
+
+ struct ieee80211_rx_status rx_status = { 0 };
+ unsigned int count = rtlpci->rxringcount;
+ bool unicast = false;
+ u8 hw_queue = 0;
+ unsigned int rx_remained_cnt;
+ u8 own;
+ u8 tmp_one;
+
+ struct rtl_stats status = {
+ .signal = 0,
+ .noise = -98,
+ .rate = 0,
+ };
+
+ /*RX NORMAL PKT */
+ while (count--) {
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+ u16 len;
+ /*rx buffer descriptor */
+ struct rtl_rx_buffer_desc *buffer_desc = NULL;
+ /*if use new trx flow, it means wifi info */
+ struct rtl_rx_desc *pdesc = NULL;
+ /*rx pkt */
+ struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
+ rtlpci->rx_ring[rxring_idx].idx];
+
+ if (rtlpriv->use_new_trx_flow) {
+ rx_remained_cnt =
+ rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
+ hw_queue);
+ if (rx_remained_cnt < 1)
+ return;
+
+ } else { /* rx descriptor */
+ pdesc = &rtlpci->rx_ring[rxring_idx].desc[
+ rtlpci->rx_ring[rxring_idx].idx];
+
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
+ false,
+ HW_DESC_OWN);
+ if (own) /* wait data to be filled by hardware */
+ return;
+ }
+
+ /* Get here means: data is filled already*/
+ /* AAAAAAttention !!!
+ * We can NOT access 'skb' before 'pci_unmap_single' */
+ pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb),
+ rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+
+ if (rtlpriv->use_new_trx_flow) {
+ buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
+ rtlpci->rx_ring[rxring_idx].idx];
+ /*means rx wifi info*/
+ pdesc = (struct rtl_rx_desc *)skb->data;
+ }
+
+ rtlpriv->cfg->ops->query_rx_desc(hw, &status,
+ &rx_status, (u8 *) pdesc, skb);
+
+ if (rtlpriv->use_new_trx_flow)
+ rtlpriv->cfg->ops->rx_check_dma_ok(hw,
+ (u8 *)buffer_desc,
+ hw_queue);
+
+
+ len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false,
+ HW_DESC_RXPKT_LEN);
+
+ if (skb->end - skb->tail > len) {
+ skb_put(skb, len);
+ if (rtlpriv->use_new_trx_flow)
+ skb_reserve(skb, status.rx_drvinfo_size +
+ status.rx_bufshift + 24);
+ else
+ skb_reserve(skb, status.rx_drvinfo_size +
+ status.rx_bufshift);
+
+ } else {
+ printk("skb->end - skb->tail = %d, len is %d\n",
+ skb->end - skb->tail, len);
+ break;
+ }
+
+ rtlpriv->cfg->ops->rx_command_packet_handler(hw, status, skb);
+
+ /*
+ *NOTICE This can not be use for mac80211,
+ *this is done in mac80211 code,
+ *if you done here sec DHCP will fail
+ *skb_trim(skb, skb->len - 4);
+ */
+
+ hdr = rtl_get_hdr(skb);
+ fc = rtl_get_fc(skb);
+
+ if (!status.b_crc && !status.b_hwerror) {
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
+ sizeof(rx_status));
+
+ if (is_broadcast_ether_addr(hdr->addr1)) {
+ ;/*TODO*/
+ } else if (is_multicast_ether_addr(hdr->addr1)) {
+ ;/*TODO*/
+ } else {
+ unicast = true;
+ rtlpriv->stats.rxbytesunicast += skb->len;
+ }
+
+ rtl_is_special_data(hw, skb, false);
+
+ if (ieee80211_is_data(fc)) {
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+ if (unicast)
+ rtlpriv->link_info.num_rx_inperiod++;
+ }
+
+ /* static bcn for roaming */
+ rtl_beacon_statistic(hw, skb);
+ rtl_p2p_info(hw, (void*)skb->data, skb->len);
+ /* for sw lps */
+ rtl_swlps_beacon(hw, (void*)skb->data, skb->len);
+ rtl_recognize_peer(hw, (void*)skb->data, skb->len);
+ if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
+ (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)&&
+ (ieee80211_is_beacon(fc) ||
+ ieee80211_is_probe_resp(fc))) {
+ dev_kfree_skb_any(skb);
+ } else {
+ _rtl_pci_rx_to_mac80211(hw, skb, rx_status);
+ }
+ } else {
+ dev_kfree_skb_any(skb);
+ }
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
+ rtlpci->rx_ring[hw_queue].next_rx_rp %=
+ RTL_PCI_MAX_RX_COUNT;
+
+
+ rx_remained_cnt--;
+ if (1/*rx_remained_cnt == 0*/) {
+ rtl_write_word(rtlpriv, 0x3B4,
+ rtlpci->rx_ring[hw_queue].next_rx_rp);
+ }
+ }
+ if (((rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ (rtlpriv->link_info.num_rx_inperiod > 2)) {
+ rtl_lps_leave(hw);
+ }
+
+ if (rtlpriv->use_new_trx_flow) {
+ _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
+ rxring_idx,
+ rtlpci->rx_ring[rxring_idx].idx);
+ } else {
+ _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
+ rtlpci->rx_ring[rxring_idx].idx);
+
+ if (rtlpci->rx_ring[rxring_idx].idx ==
+ rtlpci->rxringcount - 1)
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc,
+ false,
+ HW_DESC_RXERO,
+ (u8 *) & tmp_one);
+ }
+ rtlpci->rx_ring[rxring_idx].idx =
+ (rtlpci->rx_ring[rxring_idx].idx + 1) %
+ rtlpci->rxringcount;
+ }
+}
+
+static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
+{
+ struct ieee80211_hw *hw = dev_id;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ unsigned long flags;
+ u32 inta = 0;
+ u32 intb = 0;
+
+
+
+ if (rtlpci->irq_enabled == 0)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,flags);
+
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR], 0x0);
+
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE], 0x0);
+
+
+ /*read ISR: 4/8bytes */
+ rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
+
+
+ /*Shared IRQ or HW disappeared */
+ if (!inta || inta == 0xffff)
+ goto done;
+ /*<1> beacon related */
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon ok interrupt!\n"));
+ }
+
+ if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon err interrupt!\n"));
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon interrupt!\n"));
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE,
+ ("prepare beacon for interrupt!\n"));
+ tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
+ }
+
+
+ /*<2> tx related */
+ if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
+ RT_TRACE(COMP_ERR, DBG_TRACE, ("IMR_TXFOVW!\n"));
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("Manage ok interrupt!\n"));
+ _rtl_pci_tx_isr(hw, MGNT_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("HIGH_QUEUE ok interrupt!\n"));
+ _rtl_pci_tx_isr(hw, HIGH_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("BK Tx OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, BK_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("BE TX OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, BE_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("VI TX OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, VI_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("Vo TX OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, VO_QUEUE);
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(COMP_INTR, DBG_TRACE,
+ ("CMD TX OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, TXCMD_QUEUE);
+ }
+ }
+
+ /*<3> rx related */
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
+
+ _rtl_pci_rx_interrupt(hw);
+
+ }
+
+ if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("rx descriptor unavailable!\n"));
+ rtl_write_byte(rtlpriv, 0xb4, BIT(1) );
+ _rtl_pci_rx_interrupt(hw);
+ }
+
+ if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
+ RT_TRACE(COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
+ _rtl_pci_rx_interrupt(hw);
+ }
+
+ /*<4> fw related*/
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
+ RT_TRACE(COMP_INTR, DBG_TRACE,
+ ("firmware interrupt!\n"));
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.fwevt_wq, 0);
+ }
+ }
+
+ /*<5> hsisr related*/
+ /* Only 8188EE & 8723BE Supported.
+ * If Other ICs Come in, System will corrupt,
+ * because maps[RTL_IMR_HSISR_IND] & maps[MAC_HSISR]
+ * are not initialized*/
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
+ rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
+ if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
+ RT_TRACE(COMP_INTR, DBG_TRACE,
+ ("hsisr interrupt!\n"));
+ _rtl_pci_hs_interrupt(hw);
+ }
+ }
+
+
+ if(rtlpriv->rtlhal.b_earlymode_enable)
+ tasklet_schedule(&rtlpriv->works.irq_tasklet);
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR],
+ rtlpci->irq_mask[0]);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE],
+ rtlpci->irq_mask[1]);
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ return IRQ_HANDLED;
+
+done:
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
+{
+ _rtl_pci_tx_chk_waitq(hw);
+}
+
+static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl8192_tx_ring *ring = NULL;
+ struct ieee80211_hdr *hdr = NULL;
+ struct ieee80211_tx_info *info = NULL;
+ struct sk_buff *pskb = NULL;
+ struct rtl_tx_desc *pdesc = NULL;
+ struct rtl_tcb_desc tcb_desc;
+ /*This is for new trx flow*/
+ struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
+ u8 temp_one = 1;
+
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ /*NB: the beacon data buffer must be 32-bit aligned. */
+ pskb = ieee80211_beacon_get(hw, mac->vif);
+ if (pskb == NULL)
+ return;
+ hdr = rtl_get_hdr(pskb);
+ info = IEEE80211_SKB_CB(pskb);
+ pdesc = &ring->desc[0];
+ if (rtlpriv->use_new_trx_flow)
+ pbuffer_desc = &ring->buffer_desc[0];
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ (u8 *)pbuffer_desc, info, pskb,
+ BEACON_QUEUE, &tcb_desc);
+#else
+/*<delete in kernel end>*/
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ (u8 *)pbuffer_desc, info, NULL, pskb,
+ BEACON_QUEUE, &tcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+ __skb_queue_tail(&ring->queue, pskb);
+
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true, HW_DESC_OWN,
+ (u8 *) & temp_one);
+
+ return;
+}
+
+static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 i;
+ u16 desc_num;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
+ desc_num = TX_DESC_NUM_92E;
+ else
+ desc_num = RT_TXDESC_NUM;
+
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
+ rtlpci->txringcount[i] = desc_num;
+ }
+ /*
+ *we just alloc 2 desc for beacon queue,
+ *because we just need first desc in hw beacon.
+ */
+ rtlpci->txringcount[BEACON_QUEUE] = 2;
+
+ /*
+ *BE queue need more descriptor for performance
+ *consideration or, No more tx desc will happen,
+ *and may cause mac80211 mem leakage.
+ */
+ if (rtl_priv(hw)->use_new_trx_flow == false)
+ rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
+
+ rtlpci->rxbuffersize = 9100; /*2048/1024; */
+ rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
+}
+
+static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
+ struct pci_dev *pdev)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ rtlpriv->rtlhal.up_first_time = true;
+ rtlpriv->rtlhal.being_init_adapter = false;
+
+ rtlhal->hw = hw;
+ rtlpci->pdev = pdev;
+
+ /*Tx/Rx related var */
+ _rtl_pci_init_trx_var(hw);
+
+ /*IBSS*/ mac->beacon_interval = 100;
+
+ /*AMPDU*/
+ mac->min_space_cfg = 0;
+ mac->max_mss_density = 0;
+ /*set sane AMPDU defaults */
+ mac->current_ampdu_density = 7;
+ mac->current_ampdu_factor = 3;
+
+ /*QOS*/
+ rtlpci->acm_method = eAcmWay2_SW;
+
+ /*task */
+ tasklet_init(&rtlpriv->works.irq_tasklet,
+ (void (*)(unsigned long))_rtl_pci_irq_tasklet,
+ (unsigned long)hw);
+ tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
+ (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
+ (unsigned long)hw);
+}
+
+static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
+ unsigned int prio, unsigned int entries)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tx_buffer_desc *buffer_desc;
+ struct rtl_tx_desc *desc;
+ dma_addr_t buffer_desc_dma, desc_dma;
+ u32 nextdescaddress;
+ int i;
+
+ /* alloc tx buffer desc for new trx flow*/
+ if (rtlpriv->use_new_trx_flow) {
+ buffer_desc = pci_alloc_consistent(rtlpci->pdev,
+ sizeof(*buffer_desc) * entries,
+ &buffer_desc_dma);
+
+ if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Cannot allocate TX ring (prio = %d)\n",
+ prio));
+ return -ENOMEM;
+ }
+
+ memset(buffer_desc, 0, sizeof(*buffer_desc) * entries);
+ rtlpci->tx_ring[prio].buffer_desc = buffer_desc;
+ rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma;
+
+ rtlpci->tx_ring[prio].cur_tx_rp = 0;
+ rtlpci->tx_ring[prio].cur_tx_wp = 0;
+ rtlpci->tx_ring[prio].avl_desc = entries;
+
+ }
+
+ /* alloc dma for this ring */
+ desc = pci_alloc_consistent(rtlpci->pdev,
+ sizeof(*desc) * entries, &desc_dma);
+
+ if (!desc || (unsigned long)desc & 0xFF) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Cannot allocate TX ring (prio = %d)\n", prio));
+ return -ENOMEM;
+ }
+
+ memset(desc, 0, sizeof(*desc) * entries);
+ rtlpci->tx_ring[prio].desc = desc;
+ rtlpci->tx_ring[prio].dma = desc_dma;
+
+ rtlpci->tx_ring[prio].idx = 0;
+ rtlpci->tx_ring[prio].entries = entries;
+ skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("queue:%d, ring_addr:%p\n", prio, desc));
+
+ /* init every desc in this ring */
+ if (rtlpriv->use_new_trx_flow == false) {
+ for (i = 0; i < entries; i++) {
+ nextdescaddress = cpu_to_le32((u32) desc_dma +
+ ((i + 1) % entries) *
+ sizeof(*desc));
+
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) & (desc[i]),
+ true,
+ HW_DESC_TX_NEXTDESC_ADDR,
+ (u8 *) & nextdescaddress);
+ }
+ }
+ return 0;
+}
+
+static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ int i;
+
+ if (rtlpriv->use_new_trx_flow) {
+ struct rtl_rx_buffer_desc *entry = NULL;
+ /* alloc dma for this ring */
+ rtlpci->rx_ring[rxring_idx].buffer_desc =
+ pci_alloc_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].
+ buffer_desc) *
+ rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma);
+ if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
+ (unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Cannot allocate RX ring\n"));
+ return -ENOMEM;
+ }
+
+ memset(rtlpci->rx_ring[rxring_idx].buffer_desc, 0,
+ sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
+ rtlpci->rxringcount);
+
+ /* init every desc in this ring */
+ rtlpci->rx_ring[rxring_idx].idx = 0;
+ for (i = 0; i < rtlpci->rxringcount; i++) {
+ entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
+ if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ rxring_idx, i))
+ return -ENOMEM;
+ }
+ } else {
+ struct rtl_rx_desc *entry = NULL;
+ u8 tmp_one = 1;
+ /* alloc dma for this ring */
+ rtlpci->rx_ring[rxring_idx].desc =
+ pci_alloc_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].
+ desc) * rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma);
+ if (!rtlpci->rx_ring[rxring_idx].desc ||
+ (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Cannot allocate RX ring\n"));
+ return -ENOMEM;
+ }
+
+ memset(rtlpci->rx_ring[rxring_idx].desc, 0,
+ sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
+ rtlpci->rxringcount);
+
+ /* init every desc in this ring */
+ rtlpci->rx_ring[rxring_idx].idx = 0;
+ for (i = 0; i < rtlpci->rxringcount; i++) {
+ entry = &rtlpci->rx_ring[rxring_idx].desc[i];
+ if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ rxring_idx, i))
+ return -ENOMEM;
+ }
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
+ HW_DESC_RXERO, (u8 *) & tmp_one);
+ }
+ return 0;
+}
+
+static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
+ unsigned int prio)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
+
+ /* free every desc in this ring */
+ while (skb_queue_len(&ring->queue)) {
+ u8 *entry;
+ struct sk_buff *skb = __skb_dequeue(&ring->queue);
+ if (rtlpriv->use_new_trx_flow)
+ entry = (u8 *)(&ring->buffer_desc[ring->idx]);
+ else
+ entry = (u8 *)(&ring->desc[ring->idx]);
+
+ pci_unmap_single(rtlpci->pdev,
+ le32_to_cpu(rtlpriv->cfg->ops->get_desc(
+ (u8 *) entry, true, HW_DESC_TXBUFF_ADDR)),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+
+ /* free dma of this ring */
+ pci_free_consistent(rtlpci->pdev,
+ sizeof(*ring->desc) * ring->entries,
+ ring->desc, ring->dma);
+ ring->desc = NULL;
+ if (rtlpriv->use_new_trx_flow) {
+ pci_free_consistent(rtlpci->pdev,
+ sizeof(*ring->buffer_desc) * ring->entries,
+ ring->buffer_desc, ring->buffer_desc_dma);
+ ring->buffer_desc = NULL;
+ }
+}
+
+static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ int i;
+
+ /* free every desc in this ring */
+ for (i = 0; i < rtlpci->rxringcount; i++) {
+ struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i];
+ if (!skb)
+ continue;
+
+ pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb),
+ rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ kfree_skb(skb);
+ }
+
+ /* free dma of this ring */
+ if (rtlpriv->use_new_trx_flow) {
+ pci_free_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].
+ buffer_desc) * rtlpci->rxringcount,
+ rtlpci->rx_ring[rxring_idx].buffer_desc,
+ rtlpci->rx_ring[rxring_idx].dma);
+ rtlpci->rx_ring[rxring_idx].buffer_desc = NULL;
+ } else {
+ pci_free_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
+ rtlpci->rxringcount,
+ rtlpci->rx_ring[rxring_idx].desc,
+ rtlpci->rx_ring[rxring_idx].dma);
+ rtlpci->rx_ring[rxring_idx].desc = NULL;
+ }
+}
+
+static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ int ret;
+ int i, rxring_idx;
+
+ /* rxring_idx 0:RX_MPDU_QUEUE
+ * rxring_idx 1:RX_CMD_QUEUE */
+ for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
+ ret = _rtl_pci_init_rx_ring(hw, rxring_idx);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
+ ret = _rtl_pci_init_tx_ring(hw, i, rtlpci->txringcount[i]);
+ if (ret)
+ goto err_free_rings;
+ }
+
+ return 0;
+
+err_free_rings:
+ for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
+ _rtl_pci_free_rx_ring(hw, rxring_idx);
+
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
+ if (rtlpci->tx_ring[i].desc ||
+ rtlpci->tx_ring[i].buffer_desc)
+ _rtl_pci_free_tx_ring(hw, i);
+
+ return 1;
+}
+
+static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
+{
+ u32 i, rxring_idx;
+
+ /*free rx rings */
+ for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
+ _rtl_pci_free_rx_ring(hw, rxring_idx);
+
+ /*free tx rings */
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
+ _rtl_pci_free_tx_ring(hw, i);
+
+ return 0;
+}
+
+int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ int i, rxring_idx;
+ unsigned long flags;
+ u8 tmp_one = 1;
+ /* rxring_idx 0:RX_MPDU_QUEUE */
+ /* rxring_idx 1:RX_CMD_QUEUE */
+ for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
+ /* force the rx_ring[RX_MPDU_QUEUE/
+ * RX_CMD_QUEUE].idx to the first one */
+ /*new trx flow, do nothing*/
+ if ((rtlpriv->use_new_trx_flow == false) &&
+ rtlpci->rx_ring[rxring_idx].desc) {
+ struct rtl_rx_desc *entry = NULL;
+
+ for (i = 0; i < rtlpci->rxringcount; i++) {
+ entry = &rtlpci->rx_ring[rxring_idx].desc[i];
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry,
+ false,
+ HW_DESC_RXOWN,
+ (u8 *) & tmp_one);
+ }
+ }
+ rtlpci->rx_ring[rxring_idx].idx = 0; }
+
+ /* after reset, release previous pending packet,
+ * and force the tx idx to the first one */
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
+ if (rtlpci->tx_ring[i].desc ||
+ rtlpci->tx_ring[i].buffer_desc) {
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
+
+ while (skb_queue_len(&ring->queue)) {
+ u8 *entry;
+ struct sk_buff *skb =
+ __skb_dequeue(&ring->queue);
+ if (rtlpriv->use_new_trx_flow)
+ entry = (u8 *)(&ring->buffer_desc
+ [ring->idx]);
+ else
+ entry = (u8 *)(&ring->desc[ring->idx]);
+
+ pci_unmap_single(rtlpci->pdev,
+ le32_to_cpu(rtlpriv->cfg->ops->get_desc(
+ (u8 *)entry, true,
+ HW_DESC_TXBUFF_ADDR)),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+ ring->idx = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ return 0;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+#else
+/*<delete in kernel end>*/
+static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+#endif
+/*<delete in kernel end>*/
+ struct rtl_sta_info *sta_entry = NULL;
+ u8 tid = rtl_get_tid(skb);
+ u16 fc = rtl_get_fc(skb);
+
+ if(!sta)
+ return false;
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+
+ if (!rtlpriv->rtlhal.b_earlymode_enable)
+ return false;
+ if (ieee80211_is_nullfunc(fc))
+ return false;
+ if (ieee80211_is_qos_nullfunc(fc))
+ return false;
+ if (ieee80211_is_pspoll(fc)) {
+ return false;
+ }
+
+ if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
+ return false;
+ if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
+ return false;
+ if (tid > 7)
+ return false;
+ /* maybe every tid should be checked */
+ if (!rtlpriv->link_info.higher_busytxtraffic[tid])
+ return false;
+
+ spin_lock_bh(&rtlpriv->locks.waitq_lock);
+ skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
+ spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+
+ return true;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc)
+#else
+/*<delete in kernel end>*/
+static int rtl_pci_tx(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *sta_entry = NULL;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ struct ieee80211_sta *sta = info->control.sta;
+#endif
+/*<delete in kernel end>*/
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
+ u16 idx;
+ u8 own;
+ u8 temp_one = 1;
+ u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
+ unsigned long flags;
+ struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+ u16 fc = rtl_get_fc(skb);
+ u8 *pda_addr = hdr->addr1;
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ /*ssn */
+ u8 tid = 0;
+ u16 seq_number = 0;
+
+
+ if (ieee80211_is_mgmt(fc))
+ rtl_tx_mgmt_proc(hw, skb);
+
+ if (rtlpriv->psc.sw_ps_enabled) {
+ if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
+ !ieee80211_has_pm(fc))
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+ }
+
+ rtl_action_proc(hw, skb, true);
+
+ if (is_multicast_ether_addr(pda_addr))
+ rtlpriv->stats.txbytesmulticast += skb->len;
+ else if (is_broadcast_ether_addr(pda_addr))
+ rtlpriv->stats.txbytesbroadcast += skb->len;
+ else
+ rtlpriv->stats.txbytesunicast += skb->len;
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ ring = &rtlpci->tx_ring[hw_queue];
+ if (hw_queue != BEACON_QUEUE) {
+ if (rtlpriv->use_new_trx_flow)
+ idx = ring->cur_tx_wp;
+ else
+ idx = (ring->idx + skb_queue_len(&ring->queue)) %
+ ring->entries;
+ } else {
+ idx = 0;
+ }
+
+ pdesc = &ring->desc[idx];
+
+ if (rtlpriv->use_new_trx_flow) {
+ ptx_bd_desc = &ring->buffer_desc[idx];
+ } else {
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
+ true, HW_DESC_OWN);
+
+ if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("No more TX desc@%d, ring->idx = %d,"
+ "idx = %d, skb_queue_len = 0x%d\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue)));
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
+ flags);
+ return skb->len;
+ }
+ }
+
+ if (ieee80211_is_data_qos(fc)) {
+ tid = rtl_get_tid(skb);
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) &
+ IEEE80211_SCTL_SEQ) >> 4;
+ seq_number += 1;
+
+ if (!ieee80211_has_morefrags(hdr->frame_control))
+ sta_entry->tids[tid].seq_number = seq_number;
+ }
+ }
+
+ if (ieee80211_is_data(fc))
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ (u8 *)ptx_bd_desc, info, skb,
+ hw_queue, ptcb_desc);
+#else
+/*<delete in kernel end>*/
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ (u8 *)ptx_bd_desc, info, sta, skb,
+ hw_queue, ptcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+ __skb_queue_tail(&ring->queue, skb);
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true,
+ HW_DESC_OWN, (u8 *) & hw_queue);
+ } else {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true,
+ HW_DESC_OWN, (u8 *) & temp_one);
+ }
+
+ if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
+ hw_queue != BEACON_QUEUE) {
+
+ RT_TRACE(COMP_ERR, DBG_LOUD,
+ ("less desc left, stop skb_queue@%d, "
+ "ring->idx = %d,"
+ "idx = %d, skb_queue_len = 0x%d\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue)));
+
+ ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
+
+ return 0;
+}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+#else
+static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 i = 0;
+ int queue_id;
+ struct rtl8192_tx_ring *ring;
+
+ if (mac->skip_scan)
+ return;
+
+ for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
+ u32 queue_len;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ if (((queues >> queue_id) & 0x1) == 0) {
+ queue_id--;
+ continue;
+ }
+#endif
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ queue_len = skb_queue_len(&ring->queue);
+ if (queue_len == 0 || queue_id == BEACON_QUEUE ||
+ queue_id == TXCMD_QUEUE) {
+ queue_id--;
+ continue;
+ } else {
+ msleep(5);
+ i++;
+ }
+
+ /* we just wait 1s for all queues */
+ if (rtlpriv->psc.rfpwr_state == ERFOFF ||
+ is_hal_stop(rtlhal) || i >= 200)
+ return;
+ }
+}
+
+void rtl_pci_deinit(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ _rtl_pci_deinit_trx_ring(hw);
+
+ synchronize_irq(rtlpci->pdev->irq);
+ tasklet_kill(&rtlpriv->works.irq_tasklet);
+
+ flush_workqueue(rtlpriv->works.rtl_wq);
+ destroy_workqueue(rtlpriv->works.rtl_wq);
+
+}
+
+int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err;
+
+ _rtl_pci_init_struct(hw, pdev);
+
+ err = _rtl_pci_init_trx_ring(hw);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("tx ring initialization failed"));
+ return err;
+ }
+
+ return 1;
+}
+
+int rtl_pci_start(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ int err = 0;
+ RT_TRACE(COMP_INIT, DBG_DMESG, (" rtl_pci_start \n"));
+ rtl_pci_reset_trx_ring(hw);
+
+ rtlpriv->rtlhal.driver_is_goingto_unload = false;
+ err = rtlpriv->cfg->ops->hw_init(hw);
+ if (err) {
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("Failed to config hardware err %x!\n",err));
+ return err;
+ }
+
+ rtlpriv->cfg->ops->enable_interrupt(hw);
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n"));
+
+ rtl_init_rx_config(hw);
+
+ /*should after adapter start and interrupt enable. */
+ set_hal_start(rtlhal);
+
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+ rtlpriv->rtlhal.up_first_time = false;
+
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("rtl_pci_start OK\n"));
+ return 0;
+}
+
+void rtl_pci_stop(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 RFInProgressTimeOut = 0;
+
+ /*
+ *should before disable interrupt&adapter
+ *and will do it immediately.
+ */
+ set_hal_stop(rtlhal);
+
+ rtlpriv->cfg->ops->disable_interrupt(hw);
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ while (ppsc->rfchange_inprogress) {
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ if (RFInProgressTimeOut > 100) {
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ break;
+ }
+ mdelay(1);
+ RFInProgressTimeOut++;
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ }
+ ppsc->rfchange_inprogress = true;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+ rtlpriv->rtlhal.driver_is_goingto_unload = true;
+ rtlpriv->cfg->ops->hw_disable(hw);
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+ rtl_pci_enable_aspm(hw);
+}
+
+static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct pci_dev *bridge_pdev = pdev->bus->self;
+ u16 venderid;
+ u16 deviceid;
+ u8 revisionid;
+ u16 irqline;
+ u8 tmp;
+
+ venderid = pdev->vendor;
+ deviceid = pdev->device;
+ pci_read_config_byte(pdev, 0x8, &revisionid);
+ pci_read_config_word(pdev, 0x3C, &irqline);
+
+ if (deviceid == RTL_PCI_8192_DID ||
+ deviceid == RTL_PCI_0044_DID ||
+ deviceid == RTL_PCI_0047_DID ||
+ deviceid == RTL_PCI_8192SE_DID ||
+ deviceid == RTL_PCI_8174_DID ||
+ deviceid == RTL_PCI_8173_DID ||
+ deviceid == RTL_PCI_8172_DID ||
+ deviceid == RTL_PCI_8171_DID) {
+ switch (revisionid) {
+ case RTL_PCI_REVISION_ID_8192PCIE:
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("8192E is found but not supported now-"
+ "vid/did=%x/%x\n", venderid, deviceid));
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
+ return false;
+ break;
+ case RTL_PCI_REVISION_ID_8192SE:
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("8192SE is found - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("Err: Unknown device - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
+ break;
+
+ }
+ }else if(deviceid == RTL_PCI_8723AE_DID) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("8723AE PCI-E is found - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ } else if (deviceid == RTL_PCI_8192CET_DID ||
+ deviceid == RTL_PCI_8192CE_DID ||
+ deviceid == RTL_PCI_8191CE_DID ||
+ deviceid == RTL_PCI_8188CE_DID) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("8192C PCI-E is found - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ } else if (deviceid == RTL_PCI_8192DE_DID ||
+ deviceid == RTL_PCI_8192DE_DID2) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("8192D PCI-E is found - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ }else if(deviceid == RTL_PCI_8188EE_DID){
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
+ RT_TRACE(COMP_INIT,DBG_LOUD,
+ ("Find adapter, Hardware type is 8188EE\n"));
+ }else if (deviceid == RTL_PCI_8723BE_DID){
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
+ RT_TRACE(COMP_INIT,DBG_LOUD,
+ ("Find adapter, Hardware type is 8723BE\n"));
+ }else if (deviceid == RTL_PCI_8192EE_DID){
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
+ RT_TRACE(COMP_INIT,DBG_LOUD,
+ ("Find adapter, Hardware type is 8192EE\n"));
+ }else if (deviceid == RTL_PCI_8821AE_DID) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
+ RT_TRACE(COMP_INIT,DBG_LOUD,
+ ("Find adapter, Hardware type is 8821AE\n"));
+ }else if (deviceid == RTL_PCI_8812AE_DID) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
+ RT_TRACE(COMP_INIT,DBG_LOUD,
+ ("Find adapter, Hardware type is 8812AE\n"));
+ }else {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("Err: Unknown device -"
+ " vid/did=%x/%x\n", venderid, deviceid));
+
+ rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
+ if (revisionid == 0 || revisionid == 1) {
+ if (revisionid == 0) {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Find 92DE MAC0.\n"));
+ rtlhal->interfaceindex = 0;
+ } else if (revisionid == 1) {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Find 92DE MAC1.\n"));
+ rtlhal->interfaceindex = 1;
+ }
+ } else {
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Unknown device - "
+ "VendorID/DeviceID=%x/%x, Revision=%x\n",
+ venderid, deviceid, revisionid));
+ rtlhal->interfaceindex = 0;
+ }
+ }
+
+ /* 92ee use new trx flow */
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
+ rtlpriv->use_new_trx_flow = true;
+ else
+ rtlpriv->use_new_trx_flow = false;
+
+ /*find bus info */
+ pcipriv->ndis_adapter.busnumber = pdev->bus->number;
+ pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
+ pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
+
+ /*find bridge info */
+ pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
+ /* some ARM have no bridge_pdev and will crash here
+ * so we should check if bridge_pdev is NULL */
+ if (bridge_pdev) {
+ pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
+ for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
+ if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
+ pcipriv->ndis_adapter.pcibridge_vendor = tmp;
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("Pci Bridge Vendor is found index: %d\n",
+ tmp));
+ break;
+ }
+ }
+ }
+
+ if (pcipriv->ndis_adapter.pcibridge_vendor !=
+ PCI_BRIDGE_VENDOR_UNKNOWN) {
+ pcipriv->ndis_adapter.pcibridge_busnum =
+ bridge_pdev->bus->number;
+ pcipriv->ndis_adapter.pcibridge_devnum =
+ PCI_SLOT(bridge_pdev->devfn);
+ pcipriv->ndis_adapter.pcibridge_funcnum =
+ PCI_FUNC(bridge_pdev->devfn);
+ pcipriv->ndis_adapter.pcicfg_addrport =
+ (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
+ (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
+ (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*<delete in kernel end>*/
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
+ pci_pcie_cap(bridge_pdev);
+/*<delete in kernel start>*/
+#else
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
+ _rtl_pci_get_pciehdr_offset(hw);
+#endif
+/*<delete in kernel end>*/
+ pcipriv->ndis_adapter.num4bytes =
+ (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
+
+ rtl_pci_get_linkcontrol_field(hw);
+
+ if (pcipriv->ndis_adapter.pcibridge_vendor ==
+ PCI_BRIDGE_VENDOR_AMD) {
+ pcipriv->ndis_adapter.amd_l1_patch =
+ rtl_pci_get_amd_l1_patch(hw);
+ }
+ }
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("pcidev busnumber:devnumber:funcnumber:"
+ "vendor:link_ctl %d:%d:%d:%x:%x\n",
+ pcipriv->ndis_adapter.busnumber,
+ pcipriv->ndis_adapter.devnumber,
+ pcipriv->ndis_adapter.funcnumber,
+ pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg));
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("pci_bridge busnumber:devnumber:funcnumber:vendor:"
+ "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
+ pcipriv->ndis_adapter.pcibridge_busnum,
+ pcipriv->ndis_adapter.pcibridge_devnum,
+ pcipriv->ndis_adapter.pcibridge_funcnum,
+ pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg,
+ pcipriv->ndis_adapter.amd_l1_patch));
+
+ rtl_pci_parse_configuration(pdev, hw);
+ list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
+ return true;
+}
+
+static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+ ret = pci_enable_msi(rtlpci->pdev);
+ if (ret < 0)
+ return ret;
+
+ ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, hw);
+ if (ret < 0) {
+ pci_disable_msi(rtlpci->pdev);
+ return ret;
+ }
+
+ rtlpci->using_msi = true;
+
+ RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG, ("MSI Interrupt Mode!\n"));
+ return 0;
+}
+
+static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+
+ ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, hw);
+ if (ret < 0) {
+ return ret;
+ }
+
+ rtlpci->using_msi = false;
+ RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG,
+ ("Pin-based Interrupt Mode!\n"));
+ return 0;
+}
+
+static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+ if (rtlpci->msi_support == true) {
+ ret = rtl_pci_intr_mode_msi(hw);
+ if (ret < 0)
+ ret = rtl_pci_intr_mode_legacy(hw);
+ } else {
+ ret = rtl_pci_intr_mode_legacy(hw);
+ }
+ return ret;
+}
+
+/* this is used for other modules get
+ * hw pointer in rtl_pci_get_hw_pointer */
+struct ieee80211_hw *hw_export = NULL;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+int rtl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+
+#else
+int __devinit rtl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+#endif
+{
+ struct ieee80211_hw *hw = NULL;
+
+ struct rtl_priv *rtlpriv = NULL;
+ struct rtl_pci_priv *pcipriv = NULL;
+ struct rtl_pci *rtlpci;
+ unsigned long pmem_start, pmem_len, pmem_flags;
+ int err;
+
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ RT_ASSERT(false,
+ ("%s : Cannot enable new PCI device\n",
+ pci_name(pdev)));
+ return err;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ RT_ASSERT(false, ("Unable to obtain 32bit DMA "
+ "for consistent allocations\n"));
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
+ sizeof(struct rtl_priv), &rtl_ops);
+ if (!hw) {
+ RT_ASSERT(false,
+ ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
+ err = -ENOMEM;
+ goto fail1;
+ }
+ hw_export = hw;
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+ pci_set_drvdata(pdev, hw);
+
+ rtlpriv = hw->priv;
+ pcipriv = (void *)rtlpriv->priv;
+ pcipriv->dev.pdev = pdev;
+
+ /* init cfg & intf_ops */
+ rtlpriv->rtlhal.interface = INTF_PCI;
+ rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
+ rtlpriv->intf_ops = &rtl_pci_ops;
+ rtlpriv->glb_var = &global_var;
+
+ /*
+ *init dbgp flags before all
+ *other functions, because we will
+ *use it in other functions like
+ *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
+ *you can not use these macro
+ *before this
+ */
+ rtl_dbgp_flag_init(hw);
+
+ /* MEM map */
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ RT_ASSERT(false, ("Can't obtain PCI resources\n"));
+ return err;
+ }
+
+ pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
+ pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
+ pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
+
+ /*shared mem start */
+ rtlpriv->io.pci_mem_start =
+ (unsigned long)pci_iomap(pdev,
+ rtlpriv->cfg->bar_id, pmem_len);
+ if (rtlpriv->io.pci_mem_start == 0) {
+ RT_ASSERT(false, ("Can't map PCI mem\n"));
+ goto fail2;
+ }
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("mem mapped space: start: 0x%08lx len:%08lx "
+ "flags:%08lx, after map:0x%08lx\n",
+ pmem_start, pmem_len, pmem_flags,
+ rtlpriv->io.pci_mem_start));
+
+ /* Disable Clk Request */
+ pci_write_config_byte(pdev, 0x81, 0);
+ /* leave D3 mode */
+ pci_write_config_byte(pdev, 0x44, 0);
+ pci_write_config_byte(pdev, 0x04, 0x06);
+ pci_write_config_byte(pdev, 0x04, 0x07);
+
+ /* find adapter */
+ /* if chip not support, will return false */
+ if(!_rtl_pci_find_adapter(pdev, hw))
+ goto fail3;
+
+ /* Init IO handler */
+ _rtl_pci_io_handler_init(&pdev->dev, hw);
+
+ /*like read eeprom and so on */
+ rtlpriv->cfg->ops->read_eeprom_info(hw);
+
+ if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Can't init_sw_vars.\n"));
+ goto fail3;
+ }
+
+ rtlpriv->cfg->ops->init_sw_leds(hw);
+
+ /*aspm */
+ rtl_pci_init_aspm(hw);
+
+ /* Init mac80211 sw */
+ err = rtl_init_core(hw);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Can't allocate sw for mac80211.\n"));
+ goto fail3;
+ }
+
+ /* Init PCI sw */
+ err = !rtl_pci_init(hw, pdev);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Failed to init PCI.\n"));
+ goto fail3;
+ }
+
+ err = ieee80211_register_hw(hw);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Can't register mac80211 hw.\n"));
+ goto fail3;
+ } else {
+ rtlpriv->mac80211.mac80211_registered = 1;
+ }
+ /* the wiphy must have been registed to
+ * cfg80211 prior to regulatory_hint */
+ if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2)) {
+ RT_TRACE(COMP_ERR, DBG_WARNING, ("regulatory_hint fail\n"));
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("failed to create sysfs device attributes\n"));
+ goto fail3;
+ }
+ /* add for prov */
+ rtl_proc_add_one(hw);
+
+ /*init rfkill */
+ rtl_init_rfkill(hw);
+
+ rtlpci = rtl_pcidev(pcipriv);
+
+ err = rtl_pci_intr_mode_decide(hw);
+ if (err) {
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("%s: failed to register IRQ handler\n",
+ wiphy_name(hw->wiphy)));
+ goto fail3;
+ } else {
+ rtlpci->irq_alloc = 1;
+ }
+
+ set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+ return 0;
+
+fail3:
+ pci_set_drvdata(pdev, NULL);
+ rtl_deinit_core(hw);
+ ieee80211_free_hw(hw);
+
+ if (rtlpriv->io.pci_mem_start != 0)
+ pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+
+fail2:
+ pci_release_regions(pdev);
+
+fail1:
+
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+
+}
+/* EXPORT_SYMBOL(rtl_pci_probe); */
+
+struct ieee80211_hw *rtl_pci_get_hw_pointer(void)
+{
+ return hw_export;
+}
+/* EXPORT_SYMBOL(rtl_pci_get_hw_pointer); */
+
+void rtl_pci_disconnect(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
+
+ clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+
+ sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
+
+ /* add for prov */
+ rtl_proc_remove_one(hw);
+
+
+ /*ieee80211_unregister_hw will call ops_stop */
+ if (rtlmac->mac80211_registered == 1) {
+ ieee80211_unregister_hw(hw);
+ rtlmac->mac80211_registered = 0;
+ } else {
+ rtl_deinit_deferred_work(hw);
+ rtlpriv->intf_ops->adapter_stop(hw);
+ }
+
+ /*deinit rfkill */
+ rtl_deinit_rfkill(hw);
+
+ rtl_pci_deinit(hw);
+ rtl_deinit_core(hw);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
+
+ if (rtlpci->irq_alloc) {
+ synchronize_irq(rtlpci->pdev->irq);
+ free_irq(rtlpci->pdev->irq, hw);
+ rtlpci->irq_alloc = 0;
+ }
+
+ if (rtlpci->using_msi == true)
+ pci_disable_msi(rtlpci->pdev);
+
+ list_del(&rtlpriv->list);
+ if (rtlpriv->io.pci_mem_start != 0) {
+ pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+ pci_release_regions(pdev);
+ }
+
+ pci_disable_device(pdev);
+
+ rtl_pci_disable_aspm(hw);
+
+ pci_set_drvdata(pdev, NULL);
+
+ ieee80211_free_hw(hw);
+}
+/* EXPORT_SYMBOL(rtl_pci_disconnect); */
+
+/***************************************
+kernel pci power state define:
+PCI_D0 ((pci_power_t __force) 0)
+PCI_D1 ((pci_power_t __force) 1)
+PCI_D2 ((pci_power_t __force) 2)
+PCI_D3hot ((pci_power_t __force) 3)
+PCI_D3cold ((pci_power_t __force) 4)
+PCI_UNKNOWN ((pci_power_t __force) 5)
+
+This function is called when system
+goes into suspend state mac80211 will
+call rtl_mac_stop() from the mac80211
+suspend function first, So there is
+no need to call hw_disable here.
+****************************************/
+int rtl_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->cfg->ops->hw_suspend(hw);
+ rtl_deinit_rfkill(hw);
+
+ return 0;
+}
+/* EXPORT_SYMBOL(rtl_pci_suspend); */
+
+int rtl_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->cfg->ops->hw_resume(hw);
+ rtl_init_rfkill(hw);
+
+ return 0;
+}
+/* EXPORT_SYMBOL(rtl_pci_resume); */
+
+struct rtl_intf_ops rtl_pci_ops = {
+ .read_efuse_byte = read_efuse_byte,
+ .adapter_start = rtl_pci_start,
+ .adapter_stop = rtl_pci_stop,
+ .check_buddy_priv = rtl_pci_check_buddy_priv,
+ .adapter_tx = rtl_pci_tx,
+ .flush = rtl_pci_flush,
+ .reset_trx_ring = rtl_pci_reset_trx_ring,
+ .waitq_insert = rtl_pci_tx_chk_waitq_insert,
+
+ .disable_aspm = rtl_pci_disable_aspm,
+ .enable_aspm = rtl_pci_enable_aspm,
+};
diff --git a/drivers/staging/rtl8821ae/pci.h b/drivers/staging/rtl8821ae/pci.h
new file mode 100644
index 000000000000..06eaa521e0eb
--- /dev/null
+++ b/drivers/staging/rtl8821ae/pci.h
@@ -0,0 +1,353 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_PCI_H__
+#define __RTL_PCI_H__
+
+#include <linux/pci.h>
+/*
+1: MSDU packet queue,
+2: Rx Command Queue
+*/
+#define RTL_PCI_RX_MPDU_QUEUE 0
+#define RTL_PCI_RX_CMD_QUEUE 1
+#define RTL_PCI_MAX_RX_QUEUE 2
+
+#define RTL_PCI_MAX_RX_COUNT 512//64
+#define RTL_PCI_MAX_TX_QUEUE_COUNT 9
+
+#define RT_TXDESC_NUM 128
+#define TX_DESC_NUM_92E 512
+#define RT_TXDESC_NUM_BE_QUEUE 256
+
+#define BK_QUEUE 0
+#define BE_QUEUE 1
+#define VI_QUEUE 2
+#define VO_QUEUE 3
+#define BEACON_QUEUE 4
+#define TXCMD_QUEUE 5
+#define MGNT_QUEUE 6
+#define HIGH_QUEUE 7
+#define HCCA_QUEUE 8
+
+#define RTL_PCI_DEVICE(vend, dev, cfg) \
+ .vendor = (vend), \
+ .device = (dev), \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID,\
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+#define INTEL_VENDOR_ID 0x8086
+#define SIS_VENDOR_ID 0x1039
+#define ATI_VENDOR_ID 0x1002
+#define ATI_DEVICE_ID 0x7914
+#define AMD_VENDOR_ID 0x1022
+
+#define PCI_MAX_BRIDGE_NUMBER 255
+#define PCI_MAX_DEVICES 32
+#define PCI_MAX_FUNCTION 8
+
+#define PCI_CONF_ADDRESS 0x0CF8 /*PCI Configuration Space Address */
+#define PCI_CONF_DATA 0x0CFC /*PCI Configuration Space Data */
+
+#define PCI_CLASS_BRIDGE_DEV 0x06
+#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04
+#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10
+#define PCI_CAP_ID_EXP 0x10
+
+#define U1DONTCARE 0xFF
+#define U2DONTCARE 0xFFFF
+#define U4DONTCARE 0xFFFFFFFF
+
+#define RTL_PCI_8192_DID 0x8192 /*8192 PCI-E */
+#define RTL_PCI_8192SE_DID 0x8192 /*8192 SE */
+#define RTL_PCI_8174_DID 0x8174 /*8192 SE */
+#define RTL_PCI_8173_DID 0x8173 /*8191 SE Crab */
+#define RTL_PCI_8172_DID 0x8172 /*8191 SE RE */
+#define RTL_PCI_8171_DID 0x8171 /*8191 SE Unicron */
+#define RTL_PCI_0045_DID 0x0045 /*8190 PCI for Ceraga */
+#define RTL_PCI_0046_DID 0x0046 /*8190 Cardbus for Ceraga */
+#define RTL_PCI_0044_DID 0x0044 /*8192e PCIE for Ceraga */
+#define RTL_PCI_0047_DID 0x0047 /*8192e Express Card for Ceraga */
+#define RTL_PCI_700F_DID 0x700F
+#define RTL_PCI_701F_DID 0x701F
+#define RTL_PCI_DLINK_DID 0x3304
+#define RTL_PCI_8723AE_DID 0x8723 /*8723e */
+#define RTL_PCI_8192CET_DID 0x8191 /*8192ce */
+#define RTL_PCI_8192CE_DID 0x8178 /*8192ce */
+#define RTL_PCI_8191CE_DID 0x8177 /*8192ce */
+#define RTL_PCI_8188CE_DID 0x8176 /*8192ce */
+#define RTL_PCI_8192CU_DID 0x8191 /*8192ce */
+#define RTL_PCI_8192DE_DID 0x8193 /*8192de */
+#define RTL_PCI_8192DE_DID2 0x002B /*92DE*/
+#define RTL_PCI_8188EE_DID 0x8179 /*8188ee*/
+#define RTL_PCI_8723BE_DID 0xB723 /*8723be*/
+#define RTL_PCI_8192EE_DID 0x818B /*8192ee*/
+#define RTL_PCI_8821AE_DID 0x8821 /*8821ae*/
+#define RTL_PCI_8812AE_DID 0x8812 /*8812ae*/
+
+/*8192 support 16 pages of IO registers*/
+#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000
+#define RTL_MEM_MAPPED_IO_RANGE_8192PCIE 0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192SE 0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192CE 0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192DE 0x4000
+
+#define RTL_PCI_REVISION_ID_8190PCI 0x00
+#define RTL_PCI_REVISION_ID_8192PCIE 0x01
+#define RTL_PCI_REVISION_ID_8192SE 0x10
+#define RTL_PCI_REVISION_ID_8192CE 0x1
+#define RTL_PCI_REVISION_ID_8192DE 0x0
+
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+
+#define RTL_DEFAULT_HARDWARE_TYPE HARDWARE_TYPE_RTL8192CE
+
+enum pci_bridge_vendor {
+ PCI_BRIDGE_VENDOR_INTEL = 0x0, /*0b'0000,0001 */
+ PCI_BRIDGE_VENDOR_ATI, /*0b'0000,0010*/
+ PCI_BRIDGE_VENDOR_AMD, /*0b'0000,0100*/
+ PCI_BRIDGE_VENDOR_SIS, /*0b'0000,1000*/
+ PCI_BRIDGE_VENDOR_UNKNOWN, /*0b'0100,0000*/
+ PCI_BRIDGE_VENDOR_MAX,
+};
+
+struct rtl_pci_capabilities_header {
+ u8 capability_id;
+ u8 next;
+};
+
+/* In new TRX flow, Buffer_desc is new concept
+ * But TX wifi info == TX descriptor in old flow
+ * RX wifi info == RX descriptor in old flow */
+struct rtl_tx_buffer_desc {
+#if (RTL8192EE_SEG_NUM == 2)
+ u32 dword[2*(DMA_IS_64BIT + 1)*8]; /* seg = 8 */
+#elif (RTL8192EE_SEG_NUM == 1)
+ u32 dword[2*(DMA_IS_64BIT + 1)*4]; /* seg = 4 */
+#elif (RTL8192EE_SEG_NUM == 0)
+ u32 dword[2*(DMA_IS_64BIT + 1)*2]; /* seg = 2 */
+#endif
+} __packed;
+
+struct rtl_tx_desc {/*old: tx desc*//*new: tx wifi info*/
+ u32 dword[16];
+} __packed;
+
+struct rtl_rx_buffer_desc { /*rx buffer desc*/
+ u32 dword[2];
+} __packed;
+
+struct rtl_rx_desc { /*old: rx desc*//*new: rx wifi info*/
+ u32 dword[8];
+} __packed;
+
+struct rtl_tx_cmd_desc {
+ u32 dword[16];
+} __packed;
+
+struct rtl8192_tx_ring {
+ struct rtl_tx_desc *desc; /*tx desc / tx wifi info*/
+ dma_addr_t dma; /*tx desc dma memory / tx wifi info dma memory*/
+ unsigned int idx;
+ unsigned int entries;
+ struct sk_buff_head queue;
+ /*add for new trx flow*/
+ struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
+ dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/
+ u16 avl_desc; /* available_desc_to_write */
+ u16 cur_tx_wp; /* current_tx_write_point */
+ u16 cur_tx_rp; /* current_tx_read_point */
+};
+
+struct rtl8192_rx_ring {
+ struct rtl_rx_desc *desc;/*for old trx flow, not used in new trx*/
+ /*dma matches either 'desc' or 'buffer_desc'*/
+ dma_addr_t dma;
+ unsigned int idx;
+ struct sk_buff *rx_buf[RTL_PCI_MAX_RX_COUNT];
+ /*add for new trx flow*/
+ struct rtl_rx_buffer_desc *buffer_desc; /*rx buffer descriptor*/
+ u16 next_rx_rp; /* next_rx_read_point */
+};
+
+struct rtl_pci {
+ struct pci_dev *pdev;
+ bool irq_enabled;
+
+ /*Tx */
+ struct rtl8192_tx_ring tx_ring[RTL_PCI_MAX_TX_QUEUE_COUNT];
+ int txringcount[RTL_PCI_MAX_TX_QUEUE_COUNT];
+ u32 transmit_config;
+
+ /*Rx */
+ struct rtl8192_rx_ring rx_ring[RTL_PCI_MAX_RX_QUEUE];
+ int rxringcount;
+ u16 rxbuffersize;
+ u32 receive_config;
+
+ /*irq */
+ u8 irq_alloc;
+ u32 irq_mask[2];
+ u32 sys_irq_mask;
+
+ /*Bcn control register setting */
+ u32 reg_bcn_ctrl_val;
+
+ /*ASPM*/ u8 const_pci_aspm;
+ u8 const_amdpci_aspm;
+ u8 const_hwsw_rfoff_d3;
+ u8 const_support_pciaspm;
+ /*pci-e bridge */
+ u8 const_hostpci_aspm_setting;
+ /*pci-e device */
+ u8 const_devicepci_aspm_setting;
+ /*If it supports ASPM, Offset[560h] = 0x40,
+ otherwise Offset[560h] = 0x00. */
+ bool b_support_aspm;
+ bool b_support_backdoor;
+
+ /*QOS & EDCA */
+ enum acm_method acm_method;
+
+ u16 shortretry_limit;
+ u16 longretry_limit;
+
+ /* MSI support */
+ bool msi_support;
+ bool using_msi;
+};
+
+struct mp_adapter {
+ u8 linkctrl_reg;
+
+ u8 busnumber;
+ u8 devnumber;
+ u8 funcnumber;
+
+ u8 pcibridge_busnum;
+ u8 pcibridge_devnum;
+ u8 pcibridge_funcnum;
+
+ u8 pcibridge_vendor;
+ u16 pcibridge_vendorid;
+ u16 pcibridge_deviceid;
+
+ u32 pcicfg_addrport;
+ u8 num4bytes;
+
+ u8 pcibridge_pciehdr_offset;
+ u8 pcibridge_linkctrlreg;
+
+ bool amd_l1_patch;
+};
+
+struct rtl_pci_priv {
+ struct rtl_pci dev;
+ struct mp_adapter ndis_adapter;
+ struct rtl_led_ctl ledctl;
+ struct bt_coexist_info btcoexist;
+};
+
+#define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv))
+#define rtl_pcidev(pcipriv) (&((pcipriv)->dev))
+
+int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw);
+
+extern struct rtl_intf_ops rtl_pci_ops;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+int rtl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+#else
+int __devinit rtl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+#endif
+void rtl_pci_disconnect(struct pci_dev *pdev);
+int rtl_pci_suspend(struct device *dev);
+int rtl_pci_resume(struct device *dev);
+
+static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return 0xff & readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+{
+ writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write16_async(struct rtl_priv *rtlpriv,
+ u32 addr, u16 val)
+{
+ writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write32_async(struct rtl_priv *rtlpriv,
+ u32 addr, u32 val)
+{
+ writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val)
+{
+ outl(val, port);
+}
+
+static inline void rtl_pci_raw_write_port_uchar(u32 port, u8 val)
+{
+ outb(val, port);
+}
+
+static inline void rtl_pci_raw_read_port_uchar(u32 port, u8 * pval)
+{
+ *pval = inb(port);
+}
+
+static inline void rtl_pci_raw_read_port_ushort(u32 port, u16 * pval)
+{
+ *pval = inw(port);
+}
+
+static inline void rtl_pci_raw_read_port_ulong(u32 port, u32 * pval)
+{
+ *pval = inl(port);
+}
+
+#endif
diff --git a/drivers/staging/rtl8821ae/ps.c b/drivers/staging/rtl8821ae/ps.c
new file mode 100644
index 000000000000..7876442417f4
--- /dev/null
+++ b/drivers/staging/rtl8821ae/ps.c
@@ -0,0 +1,1025 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "base.h"
+#include "ps.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+#include "btcoexist/rtl_btc.h"
+
+bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool init_status = true;
+
+ /*<1> reset trx ring */
+ if (rtlhal->interface == INTF_PCI)
+ rtlpriv->intf_ops->reset_trx_ring(hw);
+
+ if (is_hal_stop(rtlhal))
+ RT_TRACE(COMP_ERR, DBG_WARNING, ("Driver is already down!\n"));
+
+ /*<2> Enable Adapter */
+ rtlpriv->cfg->ops->hw_init(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ /*init_status = false; */
+
+ /*<3> Enable Interrupt */
+ rtlpriv->cfg->ops->enable_interrupt(hw);
+
+ /*<enable timer> */
+ rtl_watch_dog_timer_callback((unsigned long)hw);
+
+ return init_status;
+}
+//EXPORT_SYMBOL(rtl_ps_enable_nic);
+
+bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
+{
+ bool status = true;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /*<1> Stop all timer */
+ rtl_deinit_deferred_work(hw);
+
+ /*<2> Disable Interrupt */
+ rtlpriv->cfg->ops->disable_interrupt(hw);
+
+ /*<3> Disable Adapter */
+ rtlpriv->cfg->ops->hw_disable(hw);
+
+ return status;
+}
+//EXPORT_SYMBOL(rtl_ps_disable_nic);
+
+bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate state_toset,
+ u32 changesource, bool protect_or_not)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ enum rf_pwrstate rtstate;
+ bool b_actionallowed = false;
+ u16 rfwait_cnt = 0;
+
+ /*protect_or_not = true; */
+
+ if (protect_or_not)
+ goto no_protect;
+
+ /*
+ *Only one thread can change
+ *the RF state at one time, and others
+ *should wait to be executed.
+ */
+ while (true) {
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("RF Change in progress!"
+ "Wait to set..state_toset(%d).\n",
+ state_toset));
+
+ /* Set RF after the previous action is done. */
+ while (ppsc->rfchange_inprogress) {
+ rfwait_cnt++;
+ mdelay(1);
+ /*
+ *Wait too long, return false to avoid
+ *to be stuck here.
+ */
+ if (rfwait_cnt > 100)
+ return false;
+ }
+ } else {
+ ppsc->rfchange_inprogress = true;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ break;
+ }
+ }
+
+no_protect:
+ rtstate = ppsc->rfpwr_state;
+
+ switch (state_toset) {
+ case ERFON:
+ ppsc->rfoff_reason &= (~changesource);
+
+ if ((changesource == RF_CHANGE_BY_HW) &&
+ (ppsc->b_hwradiooff == true)) {
+ ppsc->b_hwradiooff = false;
+ }
+
+ if (!ppsc->rfoff_reason) {
+ ppsc->rfoff_reason = 0;
+ b_actionallowed = true;
+ }
+
+ break;
+
+ case ERFOFF:
+
+ if ((changesource == RF_CHANGE_BY_HW) &&
+ (ppsc->b_hwradiooff == false)) {
+ ppsc->b_hwradiooff = true;
+ }
+
+ ppsc->rfoff_reason |= changesource;
+ b_actionallowed = true;
+ break;
+
+ case ERFSLEEP:
+ ppsc->rfoff_reason |= changesource;
+ b_actionallowed = true;
+ break;
+
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case not process \n"));
+ break;
+ }
+
+ if (b_actionallowed)
+ rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
+
+ if (!protect_or_not) {
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ }
+
+ return b_actionallowed;
+}
+//EXPORT_SYMBOL(rtl_ps_set_rf_state);
+
+static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ ppsc->b_swrf_processing = true;
+
+ if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) {
+ if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
+ RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
+ rtlhal->interface == INTF_PCI) {
+ rtlpriv->intf_ops->disable_aspm(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+ }
+ }
+
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_ips_notify(rtlpriv,
+ ppsc->inactive_pwrstate);
+ }
+ rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate,
+ RF_CHANGE_BY_IPS, false);
+
+ if (ppsc->inactive_pwrstate == ERFOFF &&
+ rtlhal->interface == INTF_PCI) {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
+ !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
+ rtlpriv->intf_ops->enable_aspm(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+ }
+ }
+
+ ppsc->b_swrf_processing = false;
+}
+
+void rtl_ips_nic_off_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks =
+ container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ enum rf_pwrstate rtstate;
+
+ if (mac->opmode != NL80211_IFTYPE_STATION) {
+ RT_TRACE(COMP_ERR, DBG_WARNING, ("not station return\n"));
+ return;
+ }
+
+ if (mac->p2p_in_use)
+ return;
+
+ if (mac->link_state > MAC80211_NOLINK)
+ return;
+
+ if (is_hal_stop(rtlhal))
+ return;
+
+ if (rtlpriv->sec.being_setkey)
+ return;
+
+ if(rtlpriv->cfg->ops->bt_turn_off_bt_coexist_before_enter_lps)
+ rtlpriv->cfg->ops->bt_turn_off_bt_coexist_before_enter_lps(hw);
+
+ if (ppsc->b_inactiveps) {
+ rtstate = ppsc->rfpwr_state;
+
+ /*
+ *Do not enter IPS in the following conditions:
+ *(1) RF is already OFF or Sleep
+ *(2) b_swrf_processing (indicates the IPS is still under going)
+ *(3) Connected (only disconnected can trigger IPS)
+ *(4) IBSS (send Beacon)
+ *(5) AP mode (send Beacon)
+ *(6) monitor mode (rcv packet)
+ */
+
+ if (rtstate == ERFON &&
+ !ppsc->b_swrf_processing &&
+ (mac->link_state == MAC80211_NOLINK) &&
+ !mac->act_scanning) {
+ RT_TRACE(COMP_RF, DBG_LOUD,
+ ("IPSEnter(): Turn off RF.\n"));
+
+ ppsc->inactive_pwrstate = ERFOFF;
+ ppsc->b_in_powersavemode = true;
+
+ /*rtl_pci_reset_trx_ring(hw); */
+ _rtl_ps_inactive_ps(hw);
+ }
+ }
+}
+
+void rtl_ips_nic_off(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /*
+ *because when link with ap, mac80211 will ask us
+ *to disable nic quickly after scan before linking,
+ *this will cause link failed, so we delay 100ms here
+ */
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.ips_nic_off_wq, MSECS(100));
+}
+
+/* NOTICE: any opmode should exc nic_on, or disable without
+ * nic_on may something wrong, like adhoc TP*/
+void rtl_ips_nic_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ enum rf_pwrstate rtstate;
+
+ cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+
+ spin_lock(&rtlpriv->locks.ips_lock);
+ if (ppsc->b_inactiveps) {
+ rtstate = ppsc->rfpwr_state;
+
+ if (rtstate != ERFON &&
+ !ppsc->b_swrf_processing &&
+ ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) {
+
+ ppsc->inactive_pwrstate = ERFON;
+ ppsc->b_in_powersavemode = false;
+ _rtl_ps_inactive_ps(hw);
+ }
+ }
+ spin_unlock(&rtlpriv->locks.ips_lock);
+}
+
+/*for FW LPS*/
+
+/*
+ *Determine if we can set Fw into PS mode
+ *in current condition.Return true if it
+ *can enter PS mode.
+ */
+static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u32 ps_timediff;
+
+ ps_timediff = jiffies_to_msecs(jiffies -
+ ppsc->last_delaylps_stamp_jiffies);
+
+ if (ps_timediff < 2000) {
+ RT_TRACE(COMP_POWER, DBG_LOUD,
+ ("Delay enter Fw LPS for DHCP, ARP,"
+ " or EAPOL exchanging state.\n"));
+ return false;
+ }
+
+ if (mac->link_state != MAC80211_LINKED)
+ return false;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ return false;
+
+ return true;
+}
+
+/* Change current and default preamble mode.*/
+void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool enter_fwlps;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ return;
+
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+
+ if (ppsc->dot11_psmode == rt_psmode)
+ return;
+
+ /* Update power save mode configured. */
+ ppsc->dot11_psmode = rt_psmode;
+
+ /*
+ *<FW control LPS>
+ *1. Enter PS mode
+ * Set RPWM to Fw to turn RF off and send H2C fw_pwrmode
+ * cmd to set Fw into PS mode.
+ *2. Leave PS mode
+ * Send H2C fw_pwrmode cmd to Fw to set Fw into Active
+ * mode and set RPWM to turn RF on.
+ */
+
+ if ((ppsc->b_fwctrl_lps) && ppsc->report_linked) {
+ if (ppsc->dot11_psmode == EACTIVE) {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("FW LPS leave ps_mode:%x\n",
+ FW_PS_ACTIVE_MODE));
+ enter_fwlps = false;
+ ppsc->pwr_mode = FW_PS_ACTIVE_MODE;
+ ppsc->smart_ps = 0;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_LPS_ACTION,
+ (u8 *)(&enter_fwlps));
+ if (ppsc->p2p_ps_info.opp_ps)
+ rtl_p2p_ps_cmd(hw,P2P_PS_ENABLE);
+
+ } else {
+ if (rtl_get_fwlps_doze(hw)) {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("FW LPS enter ps_mode:%x\n",
+ ppsc->fwctrl_psmode));
+ enter_fwlps = true;
+ ppsc->pwr_mode = ppsc->fwctrl_psmode;
+ ppsc->smart_ps = 2;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_FW_LPS_ACTION,
+ (u8 *)(&enter_fwlps));
+
+ } else {
+ /* Reset the power save related parameters. */
+ ppsc->dot11_psmode = EACTIVE;
+ }
+ }
+ }
+}
+
+/*Enter the leisure power save mode.*/
+void rtl_lps_enter(struct ieee80211_hw *hw)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned long flag;
+
+ if (!ppsc->b_fwctrl_lps)
+ return;
+
+ if (rtlpriv->sec.being_setkey)
+ return;
+
+ if (rtlpriv->link_info.b_busytraffic)
+ return;
+
+ /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
+ if (mac->cnt_after_linked < 5)
+ return;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ return;
+
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+
+ /* Idle for a while if we connect to AP a while ago. */
+ if (mac->cnt_after_linked >= 2) {
+ if (ppsc->dot11_psmode == EACTIVE) {
+ RT_TRACE(COMP_POWER, DBG_LOUD,
+ ("Enter 802.11 power save mode...\n"));
+
+ rtl_lps_set_psmode(hw, EAUTOPS);
+ }
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+}
+
+/*Leave the leisure power save mode.*/
+void rtl_lps_leave(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ unsigned long flag;
+
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+
+ if (ppsc->b_fwctrl_lps) {
+ if (ppsc->dot11_psmode != EACTIVE) {
+
+ /*FIX ME */
+ rtlpriv->cfg->ops->enable_interrupt(hw);
+
+ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
+ RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
+ rtlhal->interface == INTF_PCI) {
+ rtlpriv->intf_ops->disable_aspm(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+ }
+
+ RT_TRACE(COMP_POWER, DBG_LOUD,
+ ("Busy Traffic,Leave 802.11 power save..\n"));
+
+ rtl_lps_set_psmode(hw, EACTIVE);
+ }
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+}
+
+/* For sw LPS*/
+void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = (void *) data;
+ struct ieee80211_tim_ie *tim_ie;
+ u8 *tim;
+ u8 tim_len;
+ bool u_buffed;
+ bool m_buffed;
+
+ if (mac->opmode != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!rtlpriv->psc.b_swctrl_lps)
+ return;
+
+ if (rtlpriv->mac80211.link_state != MAC80211_LINKED)
+ return;
+
+ if (!rtlpriv->psc.sw_ps_enabled)
+ return;
+
+ if (rtlpriv->psc.b_fwctrl_lps)
+ return;
+
+ if (likely(!(hw->conf.flags & IEEE80211_CONF_PS)))
+ return;
+
+ /* check if this really is a beacon */
+ if (!ieee80211_is_beacon(hdr->frame_control))
+ return;
+
+ /* min. beacon length + FCS_LEN */
+ if (len <= 40 + FCS_LEN)
+ return;
+
+ /* and only beacons from the associated BSSID, please */
+ if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ return;
+
+ rtlpriv->psc.last_beacon = jiffies;
+
+ tim = rtl_find_ie(data, len - FCS_LEN, WLAN_EID_TIM);
+ if (!tim)
+ return;
+
+ if (tim[1] < sizeof(*tim_ie))
+ return;
+
+ tim_len = tim[1];
+ tim_ie = (struct ieee80211_tim_ie *) &tim[2];
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*<delete in kernel end>*/
+ if (!WARN_ON_ONCE(!hw->conf.ps_dtim_period))
+ rtlpriv->psc.dtim_counter = tim_ie->dtim_count;
+/*<delete in kernel start>*/
+#else
+ if (!WARN_ON_ONCE(!mac->vif->bss_conf.dtim_period))
+ rtlpriv->psc.dtim_counter = tim_ie->dtim_count;
+#endif
+/*<delete in kernel end>*/
+
+ /* Check whenever the PHY can be turned off again. */
+
+ /* 1. What about buffered unicast traffic for our AID? */
+ u_buffed = ieee80211_check_tim(tim_ie, tim_len,
+ rtlpriv->mac80211.assoc_id);
+
+ /* 2. Maybe the AP wants to send multicast/broadcast data? */
+ m_buffed = tim_ie->bitmap_ctrl & 0x01;
+ rtlpriv->psc.multi_buffered = m_buffed;
+
+ /* unicast will process by mac80211 through
+ * set ~IEEE80211_CONF_PS, So we just check
+ * multicast frames here */
+ if (!m_buffed ) {//&&) {// !rtlpriv->psc.tx_doing) {
+ /* back to low-power land. and delay is
+ * prevent null power save frame tx fail */
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.ps_work, MSECS(5));
+ } else {
+ RT_TRACE(COMP_POWER, DBG_DMESG,
+ ("u_bufferd: %x, m_buffered: %x\n",
+ u_buffed, m_buffed));
+ }
+}
+
+void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ unsigned long flag;
+
+ if (!rtlpriv->psc.b_swctrl_lps)
+ return;
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+
+ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
+ RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
+ rtlpriv->intf_ops->disable_aspm(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+ }
+
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS, false);
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+}
+
+void rtl_swlps_rfon_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks =
+ container_of_dwork_rtl(data, struct rtl_works, ps_rfon_wq);
+ struct ieee80211_hw *hw = rtlworks->hw;
+
+ rtl_swlps_rf_awake(hw);
+}
+
+void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ unsigned long flag;
+ u8 sleep_intv;
+
+ if (!rtlpriv->psc.sw_ps_enabled)
+ return;
+
+ if ((rtlpriv->sec.being_setkey) ||
+ (mac->opmode == NL80211_IFTYPE_ADHOC))
+ return;
+
+ /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
+ if ((mac->link_state != MAC80211_LINKED) || (mac->cnt_after_linked < 5))
+ return;
+
+ if (rtlpriv->link_info.b_busytraffic)
+ return;
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ if (rtlpriv->psc.rfchange_inprogress) {
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ return;
+ }
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS,false);
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
+ !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
+ rtlpriv->intf_ops->enable_aspm(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
+ }
+
+ /* here is power save alg, when this beacon is DTIM
+ * we will set sleep time to dtim_period * n;
+ * when this beacon is not DTIM, we will set sleep
+ * time to sleep_intv = rtlpriv->psc.dtim_counter or
+ * MAX_SW_LPS_SLEEP_INTV(default set to 5) */
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*<delete in kernel end>*/
+ if (rtlpriv->psc.dtim_counter == 0) {
+ if (hw->conf.ps_dtim_period == 1)
+ sleep_intv = hw->conf.ps_dtim_period * 2;
+ else
+ sleep_intv = hw->conf.ps_dtim_period;
+ } else {
+ sleep_intv = rtlpriv->psc.dtim_counter;
+ }
+/*<delete in kernel start>*/
+#else
+ if (rtlpriv->psc.dtim_counter == 0) {
+ if (mac->vif->bss_conf.dtim_period == 1)
+ sleep_intv = mac->vif->bss_conf.dtim_period * 2;
+ else
+ sleep_intv = mac->vif->bss_conf.dtim_period;
+ } else {
+ sleep_intv = rtlpriv->psc.dtim_counter;
+ }
+#endif
+/*<delete in kernel end>*/
+
+ if (sleep_intv > MAX_SW_LPS_SLEEP_INTV)
+ sleep_intv = MAX_SW_LPS_SLEEP_INTV;
+
+ /* this print should always be dtim_conter = 0 &
+ * sleep = dtim_period, that meaons, we should
+ * awake before every dtim */
+ RT_TRACE(COMP_POWER, DBG_DMESG,
+ ("dtim_counter:%x will sleep :%d beacon_intv\n",
+ rtlpriv->psc.dtim_counter, sleep_intv));
+
+ /* we tested that 40ms is enough for sw & hw sw delay */
+ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq,
+ MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
+}
+
+
+void rtl_swlps_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks =
+ container_of_dwork_rtl(data, struct rtl_works, ps_work);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool ps = false;
+
+ ps = (hw->conf.flags & IEEE80211_CONF_PS);
+
+ /* we can sleep after ps null send ok */
+ if (rtlpriv->psc.state_inap) {
+ rtl_swlps_rf_sleep(hw);
+
+ if (rtlpriv->psc.state && !ps) {
+ rtlpriv->psc.sleep_ms =
+ jiffies_to_msecs(jiffies -
+ rtlpriv->psc.last_action);
+ }
+
+ if (ps)
+ rtlpriv->psc.last_slept = jiffies;
+
+ rtlpriv->psc.last_action = jiffies;
+ rtlpriv->psc.state = ps;
+ }
+}
+
+
+void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_mgmt *mgmt = (void *)data;
+ struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
+ u8 *pos, *end, *ie;
+ u16 noa_len;
+ static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
+ u8 noa_num, index,i, noa_index = 0;
+ bool find_p2p_ie = false , find_p2p_ps_ie = false;
+ pos = (u8 *)mgmt->u.beacon.variable;
+ end = data + len;
+ ie = NULL;
+
+ while (pos + 1 < end) {
+
+ if (pos + 2 + pos[1] > end)
+ return;
+
+ if (pos[0] == 221 && pos[1] > 4) {
+ if (memcmp(&pos[2], p2p_oui_ie_type, 4) == 0) {
+ ie = pos + 2+4;
+ break;
+ }
+ }
+ pos += 2 + pos[1];
+ }
+
+ if (ie == NULL)
+ return;
+ find_p2p_ie = true;
+ /*to find noa ie*/
+ while (ie + 1 < end) {
+ noa_len = READEF2BYTE(&ie[1]);
+ if (ie + 3 + ie[1] > end)
+ return;
+
+ if (ie[0] == 12) {
+ find_p2p_ps_ie = true;
+ if ( (noa_len - 2) % 13 != 0){
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("P2P notice of absence: "
+ "invalid length.%d\n",noa_len));
+ return;
+ } else {
+ noa_num = (noa_len - 2) / 13;
+ }
+ noa_index = ie[3];
+ if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == P2P_PS_NONE
+ || noa_index != p2pinfo->noa_index) {
+ RT_TRACE(COMP_FW, DBG_LOUD,
+ ("update NOA ie.\n"));
+ p2pinfo->noa_index = noa_index;
+ p2pinfo->opp_ps= (ie[4] >> 7);
+ p2pinfo->ctwindow = ie[4] & 0x7F;
+ p2pinfo->noa_num = noa_num;
+ index = 5;
+ for (i = 0; i< noa_num; i++){
+ p2pinfo->noa_count_type[i] =
+ READEF1BYTE(ie+index);
+ index += 1;
+ p2pinfo->noa_duration[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ p2pinfo->noa_interval[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ p2pinfo->noa_start_time[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ }
+
+ if (p2pinfo->opp_ps == 1) {
+ p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
+ /* Driver should wait LPS
+ * entering CTWindow*/
+ if (rtlpriv->psc.b_fw_current_inpsmode){
+ rtl_p2p_ps_cmd(hw,
+ P2P_PS_ENABLE);
+ }
+ } else if (p2pinfo->noa_num > 0) {
+ p2pinfo->p2p_ps_mode = P2P_PS_NOA;
+ rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
+ } else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+ rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+ }
+ }
+
+ break;
+ }
+ ie += 3 + noa_len;
+ }
+
+ if (find_p2p_ie == true) {
+ if ((p2pinfo->p2p_ps_mode > P2P_PS_NONE) &&
+ (find_p2p_ps_ie == false))
+ rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+ }
+}
+
+void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_mgmt *mgmt = (void *)data;
+ struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
+ bool find_p2p_ie = false , find_p2p_ps_ie = false;
+ u8 noa_num, index,i, noa_index = 0;
+ u8 *pos, *end, *ie;
+ u16 noa_len;
+ static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
+
+ pos = (u8 *) &mgmt->u.action.category;
+ end = data + len;
+ ie = NULL;
+
+ if (pos[0] == 0x7f ) {
+ if (memcmp(&pos[1], p2p_oui_ie_type, 4) == 0) {
+ ie = pos + 3+4;
+ }
+ }
+
+ if (ie == NULL)
+ return;
+ find_p2p_ie = true;
+
+ RT_TRACE(COMP_FW, DBG_LOUD, ("action frame find P2P IE.\n"));
+ /*to find noa ie*/
+ while (ie + 1 < end) {
+ noa_len = READEF2BYTE(&ie[1]);
+ if (ie + 3 + ie[1] > end)
+ return;
+
+ if (ie[0] == 12) {
+ RT_TRACE(COMP_FW, DBG_LOUD, ("find NOA IE.\n"));
+ RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, ("noa ie "),
+ ie, noa_len);
+ find_p2p_ps_ie = true;
+ if ( (noa_len - 2) % 13 != 0){
+ RT_TRACE(COMP_FW, DBG_LOUD,
+ ("P2P notice of absence: "
+ "invalid length.%d\n",noa_len));
+ return;
+ } else {
+ noa_num = (noa_len - 2) / 13;
+ }
+ noa_index = ie[3];
+ if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == P2P_PS_NONE
+ || noa_index != p2pinfo->noa_index) {
+ p2pinfo->noa_index = noa_index;
+ p2pinfo->opp_ps= (ie[4] >> 7);
+ p2pinfo->ctwindow = ie[4] & 0x7F;
+ p2pinfo->noa_num = noa_num;
+ index = 5;
+ for (i = 0; i< noa_num; i++){
+ p2pinfo->noa_count_type[i] =
+ READEF1BYTE(ie+index);
+ index += 1;
+ p2pinfo->noa_duration[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ p2pinfo->noa_interval[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ p2pinfo->noa_start_time[i] =
+ READEF4BYTE(ie+index);
+ index += 4;
+ }
+
+ if (p2pinfo->opp_ps == 1) {
+ p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
+ /* Driver should wait LPS
+ * entering CTWindow */
+ if (rtlpriv->psc.b_fw_current_inpsmode){
+ rtl_p2p_ps_cmd(hw,
+ P2P_PS_ENABLE);
+ }
+ } else if (p2pinfo->noa_num > 0) {
+ p2pinfo->p2p_ps_mode = P2P_PS_NOA;
+ rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
+ } else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+ rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
+ }
+ }
+
+ break;
+ }
+ ie += 3 + noa_len;
+ }
+
+
+}
+
+
+void rtl_p2p_ps_cmd(struct ieee80211_hw *hw,u8 p2p_ps_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
+ struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
+
+ RT_TRACE(COMP_FW, DBG_LOUD, (" p2p state %x\n",p2p_ps_state));
+ switch (p2p_ps_state) {
+ case P2P_PS_DISABLE:
+ p2pinfo->p2p_ps_state = p2p_ps_state;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ (u8 *)(&p2p_ps_state));
+
+ p2pinfo->noa_index = 0;
+ p2pinfo->ctwindow = 0;
+ p2pinfo->opp_ps = 0;
+ p2pinfo->noa_num = 0;
+ p2pinfo->p2p_ps_mode = P2P_PS_NONE;
+ if (rtlps->b_fw_current_inpsmode == true) {
+ if (rtlps->smart_ps == 0) {
+ rtlps->smart_ps = 2;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_PWRMODE,
+ (u8 *)(&rtlps->pwr_mode));
+ }
+
+ }
+ break;
+ case P2P_PS_ENABLE:
+ if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+ p2pinfo->p2p_ps_state = p2p_ps_state;
+
+ if (p2pinfo->ctwindow > 0) {
+ if (rtlps->smart_ps != 0){
+ rtlps->smart_ps = 0;
+ rtlpriv->cfg->ops->set_hw_reg(
+ hw, HW_VAR_H2C_FW_PWRMODE,
+ (u8 *)(&rtlps->pwr_mode));
+ }
+ }
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ (u8 *)(&p2p_ps_state));
+
+ }
+ break;
+ case P2P_PS_SCAN:
+ case P2P_PS_SCAN_DONE:
+ case P2P_PS_ALLSTASLEEP:
+ if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
+ p2pinfo->p2p_ps_state = p2p_ps_state;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ (u8 *)(&p2p_ps_state));
+ }
+ break;
+ default:
+ break;
+
+ }
+ RT_TRACE(COMP_FW, DBG_LOUD, (" ctwindow %x oppps %x \n",
+ p2pinfo->ctwindow,p2pinfo->opp_ps));
+ RT_TRACE(COMP_FW, DBG_LOUD, ("count %x duration %x index %x interval %x"
+ " start time %x noa num %x\n",
+ p2pinfo->noa_count_type[0],
+ p2pinfo->noa_duration[0],
+ p2pinfo->noa_index,
+ p2pinfo->noa_interval[0],
+ p2pinfo->noa_start_time[0],
+ p2pinfo->noa_num));
+ RT_TRACE(COMP_FW, DBG_LOUD, ("end\n"));
+}
+
+void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = (void *) data;
+
+ if (!mac->p2p)
+ return;
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+ /* min. beacon length + FCS_LEN */
+ if (len <= 40 + FCS_LEN)
+ return;
+
+ /* and only beacons from the associated BSSID, please */
+ if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+ return;
+
+ /* check if this really is a beacon */
+ if (!(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_action(hdr->frame_control)))
+ return;
+
+ if (ieee80211_is_action(hdr->frame_control)) {
+ rtl_p2p_action_ie(hw,data,len - FCS_LEN);
+ } else {
+ rtl_p2p_noa_ie(hw,data,len - FCS_LEN);
+ }
+
+}
diff --git a/drivers/staging/rtl8821ae/ps.h b/drivers/staging/rtl8821ae/ps.h
new file mode 100644
index 000000000000..374ed77c4126
--- /dev/null
+++ b/drivers/staging/rtl8821ae/ps.h
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __REALTEK_RTL_PCI_PS_H__
+#define __REALTEK_RTL_PCI_PS_H__
+
+#define MAX_SW_LPS_SLEEP_INTV 5
+
+bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate state_toset, u32 changesource,
+ bool protect_or_not);
+bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
+bool rtl_ps_disable_nic(struct ieee80211_hw *hw);
+void rtl_ips_nic_off(struct ieee80211_hw *hw);
+void rtl_ips_nic_on(struct ieee80211_hw *hw);
+void rtl_ips_nic_off_wq_callback(void *data);
+void rtl_lps_enter(struct ieee80211_hw *hw);
+void rtl_lps_leave(struct ieee80211_hw *hw);
+
+void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode);
+
+void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len);
+void rtl_swlps_wq_callback(void *data);
+void rtl_swlps_rfon_wq_callback(void *data);
+void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
+void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
+void rtl_p2p_ps_cmd(struct ieee80211_hw *hw,u8 p2p_ps_state);
+void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
+#endif
diff --git a/drivers/staging/rtl8821ae/rc.c b/drivers/staging/rtl8821ae/rc.c
new file mode 100644
index 000000000000..d387f13ea7dc
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rc.c
@@ -0,0 +1,309 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "base.h"
+#include "rc.h"
+
+/*
+ *Finds the highest rate index we can use
+ *if skb is special data like DHCP/EAPOL, we set should
+ *it to lowest rate CCK_1M, otherwise we set rate to
+ *highest rate based on wireless mode used for iwconfig
+ *show Tx rate.
+ */
+static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb, bool not_data)
+{
+ struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_sta_info *sta_entry = NULL;
+ u8 wireless_mode = 0;
+
+ /*
+ *this rate is no use for true rate, firmware
+ *will control rate at all it just used for
+ *1.show in iwconfig in B/G mode
+ *2.in rtl_get_tcb_desc when we check rate is
+ * 1M we will not use FW rate but user rate.
+ */
+ if (rtlmac->opmode == NL80211_IFTYPE_AP ||
+ rtlmac->opmode == NL80211_IFTYPE_ADHOC ||
+ rtlmac->opmode == NL80211_IFTYPE_MESH_POINT) {
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ wireless_mode = sta_entry->wireless_mode;
+ } else {
+ return 0;
+ }
+ } else {
+ wireless_mode = rtlmac->mode;
+ }
+
+ if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true) || not_data) {
+ return 0;
+ } else {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (wireless_mode == WIRELESS_MODE_B) {
+ return B_MODE_MAX_RIX;
+ } else if (wireless_mode == WIRELESS_MODE_G) {
+ return G_MODE_MAX_RIX;
+ } else {
+ if (get_rf_type(rtlphy) != RF_2T2R)
+ return N_MODE_MCS7_RIX;
+ else
+ return N_MODE_MCS15_RIX;
+ }
+ } else {
+ if (wireless_mode == WIRELESS_MODE_A) {
+ return A_MODE_MAX_RIX;
+ } else {
+ if (get_rf_type(rtlphy) != RF_2T2R)
+ return N_MODE_MCS7_RIX;
+ else
+ return N_MODE_MCS15_RIX;
+ }
+ }
+ }
+}
+
+static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
+ struct ieee80211_sta *sta,
+ struct ieee80211_tx_rate *rate,
+ struct ieee80211_tx_rate_control *txrc,
+ u8 tries, char rix, int rtsctsenable,
+ bool not_data)
+{
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u8 sgi_20 = 0, sgi_40 = 0;
+
+ if (sta) {
+ sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+ sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+ }
+ rate->count = tries;
+ rate->idx = rix >= 0x00 ? rix : 0x00;
+
+ if (!not_data) {
+ if (txrc->short_preamble)
+ rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (sta && (sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ } else {
+ if (mac->bw_40)
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ }
+ if (sgi_20 || sgi_40)
+ rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (sta && sta->ht_cap.ht_supported)
+ rate->flags |= IEEE80211_TX_RC_MCS;
+ }
+}
+
+static void rtl_get_rate(void *ppriv, struct ieee80211_sta *sta,
+ void *priv_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+ struct rtl_priv *rtlpriv = ppriv;
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rates = tx_info->control.rates;
+ __le16 fc = rtl_get_fc(skb);
+ u8 try_per_rate, i, rix;
+ bool not_data = !ieee80211_is_data(fc);
+
+ if (rate_control_send_low(sta, priv_sta, txrc))
+ return;
+
+ rix = _rtl_rc_get_highest_rix(rtlpriv, sta, skb, not_data);
+ try_per_rate = 1;
+ _rtl_rc_rate_set_series(rtlpriv, sta, &rates[0], txrc,
+ try_per_rate, rix, 1, not_data);
+
+ if (!not_data) {
+ for (i = 1; i < 4; i++)
+ _rtl_rc_rate_set_series(rtlpriv, sta, &rates[i],
+ txrc, i, (rix - i), 1,
+ not_data);
+ }
+}
+
+static bool _rtl_tx_aggr_check(struct rtl_priv *rtlpriv,
+ struct rtl_sta_info *sta_entry, u16 tid)
+{
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ if (mac->act_scanning)
+ return false;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION &&
+ mac->cnt_after_linked < 3)
+ return false;
+
+ if (sta_entry->tids[tid].agg.agg_state == RTL_AGG_STOP)
+ return true;
+
+ return false;
+}
+
+/*mac80211 Rate Control callbacks*/
+static void rtl_tx_status(void *ppriv,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = ppriv;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+ __le16 fc = rtl_get_fc(skb);
+ struct rtl_sta_info *sta_entry;
+
+ if (!priv_sta || !ieee80211_is_data(fc))
+ return;
+
+ if (rtl_is_special_data(mac->hw, skb, true))
+ return;
+
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ return;
+
+ if (sta) {
+ /* Check if aggregation has to be enabled for this tid */
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ if ((sta->ht_cap.ht_supported == true) &&
+ !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ if (ieee80211_is_data_qos(fc)) {
+ u8 tid = rtl_get_tid(skb);
+ if (_rtl_tx_aggr_check(rtlpriv, sta_entry,
+ tid)) {
+ sta_entry->tids[tid].agg.agg_state =
+ RTL_AGG_PROGRESS;
+ /*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
+ /*<delete in kernel end>*/
+ ieee80211_start_tx_ba_session(sta, tid,
+ 5000);
+ /*<delete in kernel start>*/
+#else
+ ieee80211_start_tx_ba_session(sta, tid);
+#endif
+ /*<delete in kernel end>*/
+ }
+ }
+ }
+ }
+}
+
+static void rtl_rate_init(void *ppriv,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
+static void rtl_rate_update(void *ppriv,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ u32 changed,
+ enum nl80211_channel_type oper_chan_type)
+{
+}
+#else
+static void rtl_rate_update(void *ppriv,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta,
+ u32 changed)
+{
+}
+#endif
+static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ return rtlpriv;
+}
+
+static void rtl_rate_free(void *rtlpriv)
+{
+ return;
+}
+
+static void *rtl_rate_alloc_sta(void *ppriv,
+ struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct rtl_priv *rtlpriv = ppriv;
+ struct rtl_rate_priv *rate_priv;
+
+ rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
+ if (!rate_priv) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Unable to allocate private rc structure\n"));
+ return NULL;
+ }
+
+ rtlpriv->rate_priv = rate_priv;
+
+ return rate_priv;
+}
+
+static void rtl_rate_free_sta(void *rtlpriv,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+ struct rtl_rate_priv *rate_priv = priv_sta;
+ kfree(rate_priv);
+}
+
+static struct rate_control_ops rtl_rate_ops = {
+ .module = NULL,
+ .name = "rtl_rc",
+ .alloc = rtl_rate_alloc,
+ .free = rtl_rate_free,
+ .alloc_sta = rtl_rate_alloc_sta,
+ .free_sta = rtl_rate_free_sta,
+ .rate_init = rtl_rate_init,
+ .rate_update = rtl_rate_update,
+ .tx_status = rtl_tx_status,
+ .get_rate = rtl_get_rate,
+};
+
+int rtl_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rtl_rate_ops);
+}
+
+void rtl_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rtl_rate_ops);
+}
diff --git a/drivers/staging/rtl8821ae/rc.h b/drivers/staging/rtl8821ae/rc.h
new file mode 100644
index 000000000000..4afa2c20adcf
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rc.h
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_RC_H__
+#define __RTL_RC_H__
+
+#define B_MODE_MAX_RIX 3
+#define G_MODE_MAX_RIX 11
+#define A_MODE_MAX_RIX 7
+
+/* in mac80211 mcs0-mcs15 is idx0-idx15*/
+#define N_MODE_MCS7_RIX 7
+#define N_MODE_MCS15_RIX 15
+
+struct rtl_rate_priv {
+ u8 ht_cap;
+};
+
+int rtl_rate_control_register(void);
+void rtl_rate_control_unregister(void);
+#endif
diff --git a/drivers/staging/rtl8821ae/regd.c b/drivers/staging/rtl8821ae/regd.c
new file mode 100644
index 000000000000..0a4b3984b7ef
--- /dev/null
+++ b/drivers/staging/rtl8821ae/regd.c
@@ -0,0 +1,503 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "regd.h"
+
+static struct country_code_to_enum_rd allCountries[] = {
+ {COUNTRY_CODE_FCC, "US"},
+ {COUNTRY_CODE_IC, "US"},
+ {COUNTRY_CODE_ETSI, "EC"},
+ {COUNTRY_CODE_SPAIN, "EC"},
+ {COUNTRY_CODE_FRANCE, "EC"},
+ {COUNTRY_CODE_MKK, "JP"},
+ {COUNTRY_CODE_MKK1, "JP"},
+ {COUNTRY_CODE_ISRAEL, "EC"},
+ {COUNTRY_CODE_TELEC, "JP"},
+ {COUNTRY_CODE_MIC, "JP"},
+ {COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
+ {COUNTRY_CODE_WORLD_WIDE_13, "EC"},
+ {COUNTRY_CODE_TELEC_NETGEAR, "EC"},
+};
+
+/*
+ *Only these channels all allow active
+ *scan on all world regulatory domains
+ */
+#define RTL819x_2GHZ_CH01_11 \
+ REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
+
+/*
+ *We enable active scan on these a case
+ *by case basis by regulatory domain
+ */
+#define RTL819x_2GHZ_CH12_13 \
+ REG_RULE(2467-10, 2472+10, 40, 0, 20,\
+ NL80211_RRF_PASSIVE_SCAN)
+
+#define RTL819x_2GHZ_CH14 \
+ REG_RULE(2484-10, 2484+10, 40, 0, 20, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_OFDM)
+
+/* 5G chan 36 - chan 64*/
+#define RTL819x_5GHZ_5150_5350 \
+ REG_RULE(5150-10, 5350+10, 40, 0, 30, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+
+/* 5G chan 100 - chan 165*/
+#define RTL819x_5GHZ_5470_5850 \
+ REG_RULE(5470-10, 5850+10, 40, 0, 30, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+
+/* 5G chan 149 - chan 165*/
+#define RTL819x_5GHZ_5725_5850 \
+ REG_RULE(5725-10, 5850+10, 40, 0, 30, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+
+#define RTL819x_5GHZ_ALL \
+ RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5470_5850
+
+static const struct ieee80211_regdomain rtl_regdom_11 = {
+ .n_reg_rules = 1,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_12_13 = {
+ .n_reg_rules = 2,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_no_midband = {
+ .n_reg_rules = 3,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_5GHZ_5150_5350,
+ RTL819x_5GHZ_5725_5850,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_60_64 = {
+ .n_reg_rules = 3,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ RTL819x_5GHZ_5725_5850,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_14_60_64 = {
+ .n_reg_rules = 4,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ RTL819x_2GHZ_CH14,
+ RTL819x_5GHZ_5725_5850,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_14 = {
+ .n_reg_rules = 3,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ RTL819x_2GHZ_CH14,
+ }
+};
+
+static bool _rtl_is_radar_freq(u16 center_freq)
+{
+ return (center_freq >= 5260 && center_freq <= 5700);
+}
+
+static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
+{
+ enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_reg_rule *reg_rule;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+ u32 bandwidth = 0;
+ int r;
+#endif
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+
+ if (!wiphy->bands[band])
+ continue;
+
+ sband = wiphy->bands[band];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ if (_rtl_is_radar_freq(ch->center_freq) ||
+ (ch->flags & IEEE80211_CHAN_RADAR))
+ continue;
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (IS_ERR(reg_rule))
+ continue;
+#else
+ r = freq_reg_info(wiphy, ch->center_freq,
+ bandwidth, &reg_rule);
+ if (r)
+ continue;
+#endif
+
+ /*
+ *If 11d had a rule for this channel ensure
+ *we enable adhoc/beaconing if it allows us to
+ *use it. Note that we would have disabled it
+ *by applying our static world regdomain by
+ *default during init, prior to calling our
+ *regulatory_hint().
+ */
+
+ if (!(reg_rule->flags & NL80211_RRF_NO_IBSS))
+ ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
+ if (!(reg_rule->flags &
+ NL80211_RRF_PASSIVE_SCAN))
+ ch->flags &=
+ ~IEEE80211_CHAN_PASSIVE_SCAN;
+ } else {
+ if (ch->beacon_found)
+ ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN);
+ }
+ }
+ }
+}
+
+/* Allows active scan scan on Ch 12 and 13 */
+static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator
+ initiator)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ const struct ieee80211_reg_rule *reg_rule;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+ u32 bandwidth = 0;
+ int r;
+#endif
+
+ if (!wiphy->bands[IEEE80211_BAND_2GHZ])
+ return;
+ sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+
+ /*
+ *If no country IE has been received always enable active scan
+ *on these channels. This is only done for specific regulatory SKUs
+ */
+ if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
+ ch = &sband->channels[11]; /* CH 12 */
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ ch = &sband->channels[12]; /* CH 13 */
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ return;
+ }
+
+ /*
+ *If a country IE has been received check its rule for this
+ *channel first before enabling active scan. The passive scan
+ *would have been enforced by the initial processing of our
+ *custom regulatory domain.
+ */
+
+ ch = &sband->channels[11]; /* CH 12 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (!IS_ERR(reg_rule)) {
+#else
+ r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
+ if (!r) {
+#endif
+ if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ }
+
+ ch = &sband->channels[12]; /* CH 13 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (!IS_ERR(reg_rule)) {
+#else
+ r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
+ if (!r) {
+#endif
+ if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ }
+}
+
+/*
+ *Always apply Radar/DFS rules on
+ *freq range 5260 MHz - 5700 MHz
+ */
+static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
+ if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+ return;
+
+ sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ if (!_rtl_is_radar_freq(ch->center_freq))
+ continue;
+
+ /*
+ *We always enable radar detection/DFS on this
+ *frequency range. Additionally we also apply on
+ *this frequency range:
+ *- If STA mode does not yet have DFS supports disable
+ * active scanning
+ *- If adhoc mode does not support DFS yet then disable
+ * adhoc in the frequency.
+ *- If AP mode does not yet support radar detection/DFS
+ *do not allow AP mode
+ */
+ if (!(ch->flags & IEEE80211_CHAN_DISABLED))
+ ch->flags |= IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN;
+ }
+}
+
+static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator,
+ struct rtl_regulatory *reg)
+{
+ _rtl_reg_apply_beaconing_flags(wiphy, initiator);
+ _rtl_reg_apply_active_scan_flags(wiphy, initiator);
+ return;
+}
+
+static void _rtl_dump_channel_map(struct wiphy *wiphy)
+{
+ enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!wiphy->bands[band])
+ continue;
+ sband = wiphy->bands[band];
+ for (i = 0; i < sband->n_channels; i++)
+ ch = &sband->channels[i];
+ }
+}
+
+static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct rtl_regulatory *reg)
+{
+ /* We always apply this */
+ _rtl_reg_apply_radar_flags(wiphy);
+
+ switch (request->initiator) {
+ case NL80211_REGDOM_SET_BY_DRIVER:
+ case NL80211_REGDOM_SET_BY_CORE:
+ case NL80211_REGDOM_SET_BY_USER:
+ break;
+ case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ _rtl_reg_apply_world_flags(wiphy, request->initiator, reg);
+ break;
+ }
+
+ _rtl_dump_channel_map(wiphy);
+
+ return 0;
+}
+
+static const struct ieee80211_regdomain *_rtl_regdomain_select(
+ struct rtl_regulatory *reg)
+{
+ switch (reg->country_code) {
+ case COUNTRY_CODE_FCC:
+ return &rtl_regdom_no_midband;
+ case COUNTRY_CODE_IC:
+ return &rtl_regdom_11;
+ case COUNTRY_CODE_ETSI:
+ case COUNTRY_CODE_TELEC_NETGEAR:
+ return &rtl_regdom_60_64;
+ case COUNTRY_CODE_SPAIN:
+ case COUNTRY_CODE_FRANCE:
+ case COUNTRY_CODE_ISRAEL:
+ case COUNTRY_CODE_WORLD_WIDE_13:
+ return &rtl_regdom_12_13;
+ case COUNTRY_CODE_MKK:
+ case COUNTRY_CODE_MKK1:
+ case COUNTRY_CODE_TELEC:
+ case COUNTRY_CODE_MIC:
+ return &rtl_regdom_14_60_64;
+ case COUNTRY_CODE_GLOBAL_DOMAIN:
+ return &rtl_regdom_14;
+ default:
+ return &rtl_regdom_no_midband;
+ }
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
+ struct wiphy *wiphy,
+ void (*reg_notifier) (struct wiphy * wiphy,
+ struct regulatory_request *
+ request))
+#else
+static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
+ struct wiphy *wiphy,
+ int (*reg_notifier) (struct wiphy * wiphy,
+ struct regulatory_request *
+ request))
+#endif
+{
+ const struct ieee80211_regdomain *regd;
+
+ wiphy->reg_notifier = reg_notifier;
+
+ wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY;
+ wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+ regd = _rtl_regdomain_select(reg);
+ wiphy_apply_custom_regulatory(wiphy, regd);
+ _rtl_reg_apply_radar_flags(wiphy);
+ _rtl_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
+ return 0;
+}
+
+static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (allCountries[i].countrycode == countrycode)
+ return &allCountries[i];
+ }
+ return NULL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+int rtl_regd_init(struct ieee80211_hw *hw,
+ void (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *request))
+#else
+int rtl_regd_init(struct ieee80211_hw *hw,
+ int (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *request))
+#endif
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct wiphy *wiphy = hw->wiphy;
+ struct country_code_to_enum_rd *country = NULL;
+
+ if (wiphy == NULL || &rtlpriv->regd == NULL)
+ return -EINVAL;
+
+ /* init country_code from efuse channel plan */
+ rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan;
+
+ RT_TRACE(COMP_REGD, DBG_TRACE,
+ (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x\n",
+ rtlpriv->regd.country_code));
+
+ if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
+ RT_TRACE(COMP_REGD, DBG_DMESG,
+ (KERN_DEBUG "rtl: EEPROM indicates invalid country code"
+ "world wide 13 should be used\n"));
+
+ rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
+ }
+
+ country = _rtl_regd_find_country(rtlpriv->regd.country_code);
+
+ if (country) {
+ rtlpriv->regd.alpha2[0] = country->iso_name[0];
+ rtlpriv->regd.alpha2[1] = country->iso_name[1];
+ } else {
+ rtlpriv->regd.alpha2[0] = '0';
+ rtlpriv->regd.alpha2[1] = '0';
+ }
+
+ RT_TRACE(COMP_REGD, DBG_TRACE,
+ (KERN_DEBUG "rtl: Country alpha2 being used: %c%c\n",
+ rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]));
+
+ _rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier);
+
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_REGD, DBG_LOUD, ("\n"));
+
+ _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
+}
+#else
+int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_REGD, DBG_LOUD, ("\n"));
+
+ return _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
+}
+#endif
diff --git a/drivers/staging/rtl8821ae/regd.h b/drivers/staging/rtl8821ae/regd.h
new file mode 100644
index 000000000000..dceb3f18200b
--- /dev/null
+++ b/drivers/staging/rtl8821ae/regd.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_REGD_H__
+#define __RTL_REGD_H__
+
+#define IEEE80211_CHAN_NO_IBSS (1 << 2)
+#define IEEE80211_CHAN_PASSIVE_SCAN (1 << 1)
+#define WIPHY_FLAG_CUSTOM_REGULATORY BIT(0)
+#define WIPHY_FLAG_STRICT_REGULATORY BIT(1)
+#define WIPHY_FLAG_DISABLE_BEACON_HINTS BIT(2)
+
+struct country_code_to_enum_rd {
+ u16 countrycode;
+ const char *iso_name;
+};
+
+enum country_code_type_t {
+ COUNTRY_CODE_FCC = 0,
+ COUNTRY_CODE_IC = 1,
+ COUNTRY_CODE_ETSI = 2,
+ COUNTRY_CODE_SPAIN = 3,
+ COUNTRY_CODE_FRANCE = 4,
+ COUNTRY_CODE_MKK = 5,
+ COUNTRY_CODE_MKK1 = 6,
+ COUNTRY_CODE_ISRAEL = 7,
+ COUNTRY_CODE_TELEC = 8,
+ COUNTRY_CODE_MIC = 9,
+ COUNTRY_CODE_GLOBAL_DOMAIN = 10,
+ COUNTRY_CODE_WORLD_WIDE_13 = 11,
+ COUNTRY_CODE_TELEC_NETGEAR = 12,
+
+ /*add new channel plan above this line */
+ COUNTRY_CODE_MAX
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+int rtl_regd_init(struct ieee80211_hw *hw,
+ void (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *request));
+void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#else
+int rtl_regd_init(struct ieee80211_hw *hw,
+ int (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *request));
+int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#endif
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/btc.h b/drivers/staging/rtl8821ae/rtl8821ae/btc.h
new file mode 100644
index 000000000000..74ac189e3a88
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/btc.h
@@ -0,0 +1,87 @@
+
+/******************************************************************************
+ **
+ ** Copyright(c) 2009-2010 Realtek Corporation.
+ **
+ ** This program is free software; you can redistribute it and/or modify it
+ ** under the terms of version 2 of the GNU General Public License as
+ ** published by the Free Software Foundation.
+ **
+ ** This program is distributed in the hope that it will be useful, but WITHOUT
+ ** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ ** FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ ** more details.
+ **
+ ** You should have received a copy of the GNU General Public License along with
+ ** this program; if not, write to the Free Software Foundation, Inc.,
+ ** 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ **
+ ** The full GNU General Public License is included in this distribution in the
+ ** file called LICENSE.
+ **
+ ** Contact Information:
+ ** wlanfae <wlanfae@realtek.com>
+ ** Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ ** Hsinchu 300, Taiwan.
+ ** Larry Finger <Larry.Finger@lwfinger.net>
+ **
+ ******************************************************************************/
+
+#ifndef __RTL8821AE_BTC_H__
+#define __RTL8821AE_BTC_H__
+
+#include "../wifi.h"
+#include "hal_bt_coexist.h"
+
+struct bt_coexist_c2h_info {
+ u8 no_parse_c2h;
+ u8 has_c2h;
+};
+
+struct btdm_8821ae {
+ bool b_all_off;
+ bool b_agc_table_en;
+ bool b_adc_back_off_on;
+ bool b2_ant_hid_en;
+ bool b_low_penalty_rate_adaptive;
+ bool b_rf_rx_lpf_shrink;
+ bool b_reject_aggre_pkt;
+ bool b_tra_tdma_on;
+ u8 tra_tdma_nav;
+ u8 tra_tdma_ant;
+ bool b_tdma_on;
+ u8 tdma_ant;
+ u8 tdma_nav;
+ u8 tdma_dac_swing;
+ u8 fw_dac_swing_lvl;
+ bool b_ps_tdma_on;
+ u8 ps_tdma_byte[5];
+ bool b_pta_on;
+ u32 val_0x6c0;
+ u32 val_0x6c8;
+ u32 val_0x6cc;
+ bool b_sw_dac_swing_on;
+ u32 sw_dac_swing_lvl;
+ u32 wlan_act_hi;
+ u32 wlan_act_lo;
+ u32 bt_retry_index;
+ bool b_dec_bt_pwr;
+ bool b_ignore_wlan_act;
+};
+
+struct bt_coexist_8821ae {
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 c2h_bt_info;
+ bool b_c2h_bt_info_req_sent;
+ bool b_c2h_bt_inquiry_page;
+ u32 bt_inq_page_start_time;
+ u8 bt_retry_cnt;
+ u8 c2h_bt_info_original;
+ u8 bt_inquiry_page_cnt;
+ struct btdm_8821ae btdm;
+};
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/def.h b/drivers/staging/rtl8821ae/rtl8821ae/def.h
new file mode 100644
index 000000000000..72ebdeaa6e7d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/def.h
@@ -0,0 +1,442 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_DEF_H__
+#define __RTL8821AE_DEF_H__
+
+/*--------------------------Define -------------------------------------------*/
+/* BIT 7 HT Rate*/
+/*TxHT = 0*/
+#define MGN_1M 0x02
+#define MGN_2M 0x04
+#define MGN_5_5M 0x0b
+#define MGN_11M 0x16
+
+#define MGN_6M 0x0c
+#define MGN_9M 0x12
+#define MGN_12M 0x18
+#define MGN_18M 0x24
+#define MGN_24M 0x30
+#define MGN_36M 0x48
+#define MGN_48M 0x60
+#define MGN_54M 0x6c
+
+// TxHT = 1
+#define MGN_MCS0 0x80
+#define MGN_MCS1 0x81
+#define MGN_MCS2 0x82
+#define MGN_MCS3 0x83
+#define MGN_MCS4 0x84
+#define MGN_MCS5 0x85
+#define MGN_MCS6 0x86
+#define MGN_MCS7 0x87
+#define MGN_MCS8 0x88
+#define MGN_MCS9 0x89
+#define MGN_MCS10 0x8a
+#define MGN_MCS11 0x8b
+#define MGN_MCS12 0x8c
+#define MGN_MCS13 0x8d
+#define MGN_MCS14 0x8e
+#define MGN_MCS15 0x8f
+//VHT rate
+#define MGN_VHT1SS_MCS0 0x90
+#define MGN_VHT1SS_MCS1 0x91
+#define MGN_VHT1SS_MCS2 0x92
+#define MGN_VHT1SS_MCS3 0x93
+#define MGN_VHT1SS_MCS4 0x94
+#define MGN_VHT1SS_MCS5 0x95
+#define MGN_VHT1SS_MCS6 0x96
+#define MGN_VHT1SS_MCS7 0x97
+#define MGN_VHT1SS_MCS8 0x98
+#define MGN_VHT1SS_MCS9 0x99
+#define MGN_VHT2SS_MCS0 0x9a
+#define MGN_VHT2SS_MCS1 0x9b
+#define MGN_VHT2SS_MCS2 0x9c
+#define MGN_VHT2SS_MCS3 0x9d
+#define MGN_VHT2SS_MCS4 0x9e
+#define MGN_VHT2SS_MCS5 0x9f
+#define MGN_VHT2SS_MCS6 0xa0
+#define MGN_VHT2SS_MCS7 0xa1
+#define MGN_VHT2SS_MCS8 0xa2
+#define MGN_VHT2SS_MCS9 0xa3
+
+#define MGN_VHT3SS_MCS0 0xa4
+#define MGN_VHT3SS_MCS1 0xa5
+#define MGN_VHT3SS_MCS2 0xa6
+#define MGN_VHT3SS_MCS3 0xa7
+#define MGN_VHT3SS_MCS4 0xa8
+#define MGN_VHT3SS_MCS5 0xa9
+#define MGN_VHT3SS_MCS6 0xaa
+#define MGN_VHT3SS_MCS7 0xab
+#define MGN_VHT3SS_MCS8 0xac
+#define MGN_VHT3SS_MCS9 0xad
+
+#define MGN_MCS0_SG 0xc0
+#define MGN_MCS1_SG 0xc1
+#define MGN_MCS2_SG 0xc2
+#define MGN_MCS3_SG 0xc3
+#define MGN_MCS4_SG 0xc4
+#define MGN_MCS5_SG 0xc5
+#define MGN_MCS6_SG 0xc6
+#define MGN_MCS7_SG 0xc7
+#define MGN_MCS8_SG 0xc8
+#define MGN_MCS9_SG 0xc9
+#define MGN_MCS10_SG 0xca
+#define MGN_MCS11_SG 0xcb
+#define MGN_MCS12_SG 0xcc
+#define MGN_MCS13_SG 0xcd
+#define MGN_MCS14_SG 0xce
+#define MGN_MCS15_SG 0xcf
+
+#define MGN_UNKNOWN 0xff
+
+
+/* 30 ms */
+#define WIFI_NAV_UPPER_US 30000
+#define HAL_92C_NAV_UPPER_UNIT 128
+
+#define HAL_RETRY_LIMIT_INFRA 48
+#define HAL_RETRY_LIMIT_AP_ADHOC 7
+
+#define RESET_DELAY_8185 20
+
+#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
+#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
+
+#define NUM_OF_FIRMWARE_QUEUE 10
+#define NUM_OF_PAGES_IN_FW 0x100
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
+
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
+
+#define MAX_RX_DMA_BUFFER_SIZE 0x3E80
+
+
+#define MAX_LINES_HWCONFIG_TXT 1000
+#define MAX_BYTES_LINE_HWCONFIG_TXT 256
+
+#define SW_THREE_WIRE 0
+#define HW_THREE_WIRE 2
+
+#define BT_DEMO_BOARD 0
+#define BT_QA_BOARD 1
+#define BT_FPGA 2
+
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
+#define HAL_PRIME_CHNL_OFFSET_LOWER 1
+#define HAL_PRIME_CHNL_OFFSET_UPPER 2
+
+#define MAX_H2C_QUEUE_NUM 10
+
+#define RX_MPDU_QUEUE 0
+#define RX_CMD_QUEUE 1
+#define RX_MAX_QUEUE 2
+#define AC2QUEUEID(_AC) (_AC)
+
+#define C2H_RX_CMD_HDR_LEN 8
+#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 0, 16)
+#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 16, 8)
+#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 24, 7)
+#define GET_C2H_CMD_CONTINUE(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 31, 1)
+#define GET_C2H_CMD_CONTENT(__prxhdr) \
+ ((u8*)(__prxhdr) + C2H_RX_CMD_HDR_LEN)
+
+#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16)
+#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
+#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
+
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
+
+#define CHIP_8812 BIT(2)
+#define CHIP_8821 (BIT(0)|BIT(2))
+
+#define CHIP_8821A (BIT(0)|BIT(2))
+#define NORMAL_CHIP BIT(3)
+#define RF_TYPE_1T1R (~(BIT(4)|BIT(5)|BIT(6)))
+#define RF_TYPE_1T2R BIT(4)
+#define RF_TYPE_2T2R BIT(5)
+#define CHIP_VENDOR_UMC BIT(7)
+#define B_CUT_VERSION BIT(12)
+#define C_CUT_VERSION BIT(13)
+#define D_CUT_VERSION ((BIT(12)|BIT(13)))
+#define E_CUT_VERSION BIT(14)
+#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
+
+
+
+enum version_8821ae {
+ VERSION_TEST_CHIP_1T1R_8812 = 0x0004,
+ VERSION_TEST_CHIP_2T2R_8812 = 0x0024,
+ VERSION_NORMAL_TSMC_CHIP_1T1R_8812 = 0x100c,
+ VERSION_NORMAL_TSMC_CHIP_2T2R_8812 = 0x102c,
+ VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT = 0x200c,
+ VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT = 0x202c,
+ VERSION_TEST_CHIP_8821 = 0x0005,
+ VERSION_NORMAL_TSMC_CHIP_8821 = 0x000d,
+ VERSION_NORMAL_TSMC_CHIP_8821_B_CUT = 0x100d,
+ VERSION_UNKNOWN = 0xFF,
+};
+
+enum vht_data_sc{
+ VHT_DATA_SC_DONOT_CARE = 0,
+ VHT_DATA_SC_20_UPPER_OF_80MHZ = 1,
+ VHT_DATA_SC_20_LOWER_OF_80MHZ = 2,
+ VHT_DATA_SC_20_UPPERST_OF_80MHZ = 3,
+ VHT_DATA_SC_20_LOWEST_OF_80MHZ = 4,
+ VHT_DATA_SC_20_RECV1 = 5,
+ VHT_DATA_SC_20_RECV2 = 6,
+ VHT_DATA_SC_20_RECV3 = 7,
+ VHT_DATA_SC_20_RECV4 = 8,
+ VHT_DATA_SC_40_UPPER_OF_80MHZ = 9,
+ VHT_DATA_SC_40_LOWER_OF_80MHZ = 10,
+};
+
+
+/* MASK */
+#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2))
+#define CHIP_TYPE_MASK BIT(3)
+#define RF_TYPE_MASK (BIT(4)|BIT(5)|BIT(6))
+#define MANUFACTUER_MASK BIT(7)
+#define ROM_VERSION_MASK (BIT(11)|BIT(10)|BIT(9)|BIT(8))
+#define CUT_VERSION_MASK (BIT(15)|BIT(14)|BIT(13)|BIT(12))
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK)
+#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK)
+#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK)
+#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK)
+#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK)
+#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK)
+
+#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version))? false : true)
+#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\
+ ? true : false)
+#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\
+ ? true : false)
+
+#define IS_8812_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8812)? \
+ true : false)
+#define IS_8821_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8821)? \
+ true : false)
+
+#define IS_VENDOR_8812A_TEST_CHIP(version) ((IS_8812_SERIES(version)) ? \
+ ((IS_NORMAL_CHIP(version)) ? \
+ false : true) : false)
+#define IS_VENDOR_8812A_MP_CHIP(version) ((IS_8812_SERIES(version)) ? \
+ ((IS_NORMAL_CHIP(version)) ? \
+ true : false) : false)
+#define IS_VENDOR_8812A_C_CUT(version) ((IS_8812_SERIES(version)) ? \
+ ((GET_CVID_CUT_VERSION(version) == C_CUT_VERSION) ? \
+ true : false) : false)
+
+#define IS_VENDOR_8821A_TEST_CHIP(version) ((IS_8821_SERIES(version)) ? \
+ ((IS_NORMAL_CHIP(version)) ? \
+ false : true) : false)
+#define IS_VENDOR_8821A_MP_CHIP(version) ((IS_8821_SERIES(version)) ? \
+ ((IS_NORMAL_CHIP(version)) ? \
+ true : false) : false)
+#define IS_VENDOR_8821A_B_CUT(version) ((IS_8821_SERIES(version)) ? \
+ ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? \
+ true : false) : false)
+
+
+enum rf_optype {
+ RF_OP_BY_SW_3WIRE = 0,
+ RF_OP_BY_FW,
+ RF_OP_MAX
+};
+
+enum rf_power_state {
+ RF_ON,
+ RF_OFF,
+ RF_SLEEP,
+ RF_SHUT_DOWN,
+};
+
+enum power_save_mode {
+ POWER_SAVE_MODE_ACTIVE,
+ POWER_SAVE_MODE_SAVE,
+};
+
+enum power_polocy_config {
+ POWERCFG_MAX_POWER_SAVINGS,
+ POWERCFG_GLOBAL_POWER_SAVINGS,
+ POWERCFG_LOCAL_POWER_SAVINGS,
+ POWERCFG_LENOVO,
+};
+
+enum interface_select_pci {
+ INTF_SEL1_MINICARD = 0,
+ INTF_SEL0_PCIE = 1,
+ INTF_SEL2_RSV = 2,
+ INTF_SEL3_RSV = 3,
+};
+
+enum hal_fw_c2h_cmd_id {
+ HAL_FW_C2H_CMD_Read_MACREG = 0,
+ HAL_FW_C2H_CMD_Read_BBREG = 1,
+ HAL_FW_C2H_CMD_Read_RFREG = 2,
+ HAL_FW_C2H_CMD_Read_EEPROM = 3,
+ HAL_FW_C2H_CMD_Read_EFUSE = 4,
+ HAL_FW_C2H_CMD_Read_CAM = 5,
+ HAL_FW_C2H_CMD_Get_BasicRate = 6,
+ HAL_FW_C2H_CMD_Get_DataRate = 7,
+ HAL_FW_C2H_CMD_Survey = 8,
+ HAL_FW_C2H_CMD_SurveyDone = 9,
+ HAL_FW_C2H_CMD_JoinBss = 10,
+ HAL_FW_C2H_CMD_AddSTA = 11,
+ HAL_FW_C2H_CMD_DelSTA = 12,
+ HAL_FW_C2H_CMD_AtimDone = 13,
+ HAL_FW_C2H_CMD_TX_Report = 14,
+ HAL_FW_C2H_CMD_CCX_Report = 15,
+ HAL_FW_C2H_CMD_DTM_Report = 16,
+ HAL_FW_C2H_CMD_TX_Rate_Statistics = 17,
+ HAL_FW_C2H_CMD_C2HLBK = 18,
+ HAL_FW_C2H_CMD_C2HDBG = 19,
+ HAL_FW_C2H_CMD_C2HFEEDBACK = 20,
+ HAL_FW_C2H_CMD_MAX
+};
+
+enum rtl_desc_qsel {
+ QSLT_BK = 0x2,
+ QSLT_BE = 0x0,
+ QSLT_VI = 0x5,
+ QSLT_VO = 0x7,
+ QSLT_BEACON = 0x10,
+ QSLT_HIGH = 0x11,
+ QSLT_MGNT = 0x12,
+ QSLT_CMD = 0x13,
+};
+
+enum rtl_desc8821ae_rate {
+ DESC_RATE1M = 0x00,
+ DESC_RATE2M = 0x01,
+ DESC_RATE5_5M = 0x02,
+ DESC_RATE11M = 0x03,
+
+ DESC_RATE6M = 0x04,
+ DESC_RATE9M = 0x05,
+ DESC_RATE12M = 0x06,
+ DESC_RATE18M = 0x07,
+ DESC_RATE24M = 0x08,
+ DESC_RATE36M = 0x09,
+ DESC_RATE48M = 0x0a,
+ DESC_RATE54M = 0x0b,
+
+ DESC_RATEMCS0 = 0x0c,
+ DESC_RATEMCS1 = 0x0d,
+ DESC_RATEMCS2 = 0x0e,
+ DESC_RATEMCS3 = 0x0f,
+ DESC_RATEMCS4 = 0x10,
+ DESC_RATEMCS5 = 0x11,
+ DESC_RATEMCS6 = 0x12,
+ DESC_RATEMCS7 = 0x13,
+ DESC_RATEMCS8 = 0x14,
+ DESC_RATEMCS9 = 0x15,
+ DESC_RATEMCS10 = 0x16,
+ DESC_RATEMCS11 = 0x17,
+ DESC_RATEMCS12 = 0x18,
+ DESC_RATEMCS13 = 0x19,
+ DESC_RATEMCS14 = 0x1a,
+ DESC_RATEMCS15 = 0x1b,
+ DESC_RATEVHT1SS_MCS0 = 0x1c,
+ DESC_RATEVHT1SS_MCS1 = 0x1d,
+ DESC_RATEVHT1SS_MCS2 = 0x1e,
+ DESC_RATEVHT1SS_MCS3 = 0x1f,
+ DESC_RATEVHT1SS_MCS4 = 0x20,
+ DESC_RATEVHT1SS_MCS5 = 0x21,
+ DESC_RATEVHT1SS_MCS6 = 0x22,
+ DESC_RATEVHT1SS_MCS7 = 0x23,
+ DESC_RATEVHT1SS_MCS8 = 0x24,
+ DESC_RATEVHT1SS_MCS9 = 0x25,
+ DESC_RATEVHT2SS_MCS0 = 0x26,
+ DESC_RATEVHT2SS_MCS1 = 0x27,
+ DESC_RATEVHT2SS_MCS2 = 0x28,
+ DESC_RATEVHT2SS_MCS3 = 0x29,
+ DESC_RATEVHT2SS_MCS4 = 0x2a,
+ DESC_RATEVHT2SS_MCS5 = 0x2b,
+ DESC_RATEVHT2SS_MCS6 = 0x2c,
+ DESC_RATEVHT2SS_MCS7 = 0x2d,
+ DESC_RATEVHT2SS_MCS8 = 0x2e,
+ DESC_RATEVHT2SS_MCS9 = 0x2f,
+};
+
+enum rx_packet_type{
+ NORMAL_RX,
+ TX_REPORT1,
+ TX_REPORT2,
+ HIS_REPORT,
+ C2H_PACKET,
+};
+
+struct phy_sts_cck_8821ae_t {
+ u8 adc_pwdb_X[4];
+ u8 sq_rpt;
+ u8 cck_agc_rpt;
+};
+
+struct h2c_cmd_8821ae {
+ u8 element_id;
+ u32 cmd_len;
+ u8 *p_cmdbuffer;
+};
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/dm.c b/drivers/staging/rtl8821ae/rtl8821ae/dm.c
new file mode 100644
index 000000000000..e0efcd281dfe
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/dm.c
@@ -0,0 +1,3045 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+#include "trx.h"
+#include "../btcoexist/rtl_btc.h"
+
+struct dig_t dm_digtable;
+static struct ps_t dm_pstable;
+
+static const u32 rtl8812ae_txscaling_table[TXSCALE_TABLE_SIZE] =
+{
+ 0x081, // 0, -12.0dB
+ 0x088, // 1, -11.5dB
+ 0x090, // 2, -11.0dB
+ 0x099, // 3, -10.5dB
+ 0x0A2, // 4, -10.0dB
+ 0x0AC, // 5, -9.5dB
+ 0x0B6, // 6, -9.0dB
+ 0x0C0, // 7, -8.5dB
+ 0x0CC, // 8, -8.0dB
+ 0x0D8, // 9, -7.5dB
+ 0x0E5, // 10, -7.0dB
+ 0x0F2, // 11, -6.5dB
+ 0x101, // 12, -6.0dB
+ 0x110, // 13, -5.5dB
+ 0x120, // 14, -5.0dB
+ 0x131, // 15, -4.5dB
+ 0x143, // 16, -4.0dB
+ 0x156, // 17, -3.5dB
+ 0x16A, // 18, -3.0dB
+ 0x180, // 19, -2.5dB
+ 0x197, // 20, -2.0dB
+ 0x1AF, // 21, -1.5dB
+ 0x1C8, // 22, -1.0dB
+ 0x1E3, // 23, -0.5dB
+ 0x200, // 24, +0 dB
+ 0x21E, // 25, +0.5dB
+ 0x23E, // 26, +1.0dB
+ 0x261, // 27, +1.5dB
+ 0x285, // 28, +2.0dB
+ 0x2AB, // 29, +2.5dB
+ 0x2D3, // 30, +3.0dB
+ 0x2FE, // 31, +3.5dB
+ 0x32B, // 32, +4.0dB
+ 0x35C, // 33, +4.5dB
+ 0x38E, // 34, +5.0dB
+ 0x3C4, // 35, +5.5dB
+ 0x3FE // 36, +6.0dB
+};
+
+static const u32 rtl8821ae_txscaling_table[TXSCALE_TABLE_SIZE] = {
+ 0x081, // 0, -12.0dB
+ 0x088, // 1, -11.5dB
+ 0x090, // 2, -11.0dB
+ 0x099, // 3, -10.5dB
+ 0x0A2, // 4, -10.0dB
+ 0x0AC, // 5, -9.5dB
+ 0x0B6, // 6, -9.0dB
+ 0x0C0, // 7, -8.5dB
+ 0x0CC, // 8, -8.0dB
+ 0x0D8, // 9, -7.5dB
+ 0x0E5, // 10, -7.0dB
+ 0x0F2, // 11, -6.5dB
+ 0x101, // 12, -6.0dB
+ 0x110, // 13, -5.5dB
+ 0x120, // 14, -5.0dB
+ 0x131, // 15, -4.5dB
+ 0x143, // 16, -4.0dB
+ 0x156, // 17, -3.5dB
+ 0x16A, // 18, -3.0dB
+ 0x180, // 19, -2.5dB
+ 0x197, // 20, -2.0dB
+ 0x1AF, // 21, -1.5dB
+ 0x1C8, // 22, -1.0dB
+ 0x1E3, // 23, -0.5dB
+ 0x200, // 24, +0 dB
+ 0x21E, // 25, +0.5dB
+ 0x23E, // 26, +1.0dB
+ 0x261, // 27, +1.5dB
+ 0x285, // 28, +2.0dB
+ 0x2AB, // 29, +2.5dB
+ 0x2D3, // 30, +3.0dB
+ 0x2FE, // 31, +3.5dB
+ 0x32B, // 32, +4.0dB
+ 0x35C, // 33, +4.5dB
+ 0x38E, // 34, +5.0dB
+ 0x3C4, // 35, +5.5dB
+ 0x3FE // 36, +6.0dB
+};
+
+static const u32 ofdmswing_table[] = {
+ 0x0b40002d, // 0, -15.0dB
+ 0x0c000030, // 1, -14.5dB
+ 0x0cc00033, // 2, -14.0dB
+ 0x0d800036, // 3, -13.5dB
+ 0x0e400039, // 4, -13.0dB
+ 0x0f00003c, // 5, -12.5dB
+ 0x10000040, // 6, -12.0dB
+ 0x11000044, // 7, -11.5dB
+ 0x12000048, // 8, -11.0dB
+ 0x1300004c, // 9, -10.5dB
+ 0x14400051, // 10, -10.0dB
+ 0x15800056, // 11, -9.5dB
+ 0x16c0005b, // 12, -9.0dB
+ 0x18000060, // 13, -8.5dB
+ 0x19800066, // 14, -8.0dB
+ 0x1b00006c, // 15, -7.5dB
+ 0x1c800072, // 16, -7.0dB
+ 0x1e400079, // 17, -6.5dB
+ 0x20000080, // 18, -6.0dB
+ 0x22000088, // 19, -5.5dB
+ 0x24000090, // 20, -5.0dB
+ 0x26000098, // 21, -4.5dB
+ 0x288000a2, // 22, -4.0dB
+ 0x2ac000ab, // 23, -3.5dB
+ 0x2d4000b5, // 24, -3.0dB
+ 0x300000c0, // 25, -2.5dB
+ 0x32c000cb, // 26, -2.0dB
+ 0x35c000d7, // 27, -1.5dB
+ 0x390000e4, // 28, -1.0dB
+ 0x3c8000f2, // 29, -0.5dB
+ 0x40000100, // 30, +0dB
+ 0x43c0010f, // 31, +0.5dB
+ 0x47c0011f, // 32, +1.0dB
+ 0x4c000130, // 33, +1.5dB
+ 0x50800142, // 34, +2.0dB
+ 0x55400155, // 35, +2.5dB
+ 0x5a400169, // 36, +3.0dB
+ 0x5fc0017f, // 37, +3.5dB
+ 0x65400195, // 38, +4.0dB
+ 0x6b8001ae, // 39, +4.5dB
+ 0x71c001c7, // 40, +5.0dB
+ 0x788001e2, // 41, +5.5dB
+ 0x7f8001fe // 42, +6.0dB
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, // 0, -16.0dB
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, // 1, -15.5dB
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, // 2, -15.0dB
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, // 3, -14.5dB
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, // 4, -14.0dB
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, // 5, -13.5dB
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, // 6, -13.0dB
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, // 7, -12.5dB
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, // 8, -12.0dB
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, // 9, -11.5dB
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, // 10, -11.0dB
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, // 11, -10.5dB
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, // 12, -10.0dB
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, // 13, -9.5dB
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, // 14, -9.0dB
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, // 15, -8.5dB
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, // 16, -8.0dB
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, // 17, -7.5dB
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, // 18, -7.0dB
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, // 19, -6.5dB
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, // 20, -6.0dB
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, // 21, -5.5dB
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, // 22, -5.0dB
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, // 23, -4.5dB
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, // 24, -4.0dB
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, // 25, -3.5dB
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, // 26, -3.0dB
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, // 27, -2.5dB
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, // 28, -2.0dB
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, // 29, -1.5dB
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, // 30, -1.0dB
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, // 31, -0.5dB
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} // 32, +0dB
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8]= {
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, // 0, -16.0dB
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, // 1, -15.5dB
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, // 2, -15.0dB
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, // 3, -14.5dB
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, // 4, -14.0dB
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, // 5, -13.5dB
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, // 6, -13.0dB
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, // 7, -12.5dB
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, // 8, -12.0dB
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, // 9, -11.5dB
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, // 10, -11.0dB
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, // 11, -10.5dB
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, // 12, -10.0dB
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, // 13, -9.5dB
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, // 14, -9.0dB
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, // 15, -8.5dB
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, // 16, -8.0dB
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, // 17, -7.5dB
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, // 18, -7.0dB
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, // 19, -6.5dB
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, // 20, -6.0dB
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, // 21, -5.5dB
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, // 22, -5.0dB
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, // 23, -4.5dB
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, // 24, -4.0dB
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, // 25, -3.5dB
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, // 26, -3.0dB
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, // 27, -2.5dB
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, // 28, -2.0dB
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, // 29, -1.5dB
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, // 30, -1.0dB
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, // 31, -0.5dB
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} // 32, +0dB
+};
+
+static const u32 edca_setting_dl[PEER_MAX] = {
+ 0xa44f, /* 0 UNKNOWN */
+ 0x5ea44f, /* 1 REALTEK_90 */
+ 0x5e4322, /* 2 REALTEK_92SE */
+ 0x5ea42b, /* 3 BROAD */
+ 0xa44f, /* 4 RAL */
+ 0xa630, /* 5 ATH */
+ 0x5ea630, /* 6 CISCO */
+ 0x5ea42b, /* 7 MARVELL */
+};
+
+static const u32 edca_setting_ul[PEER_MAX] = {
+ 0x5e4322, /* 0 UNKNOWN */
+ 0xa44f, /* 1 REALTEK_90 */
+ 0x5ea44f, /* 2 REALTEK_92SE */
+ 0x5ea32b, /* 3 BROAD */
+ 0x5ea422, /* 4 RAL */
+ 0x5ea322, /* 5 ATH */
+ 0x3ea430, /* 6 CISCO */
+ 0x5ea44f, /* 7 MARV */
+};
+
+static u8 rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack[] =
+ {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9};
+static u8 rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack[] =
+ {0, 0, 0, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11};
+
+
+u8 rtl8812ae_delta_swing_table_idx_24gb_n_txpwrtrack[] =
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24gb_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
+u8 rtl8812ae_delta_swing_table_idx_24ga_n_txpwrtrack[] =
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24ga_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
+u8 rtl8812ae_delta_swing_table_idx_24gcckb_n_txpwrtrack[] =
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24gcckb_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
+u8 rtl8812ae_delta_swing_table_idx_24gccka_n_txpwrtrack[] =
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11};
+u8 rtl8812ae_delta_swing_table_idx_24gccka_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
+
+u8 rtl8812ae_delta_swing_table_idx_5gb_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13},
+ {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 14, 14, 15, 16, 17, 17, 17, 18, 18, 18},
+};
+u8 rtl8812ae_delta_swing_table_idx_5gb_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
+};
+u8 rtl8812ae_delta_swing_table_idx_5ga_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13},
+ {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13},
+ {0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 14, 15, 15, 15, 16, 16, 16, 17, 17, 18, 18},
+};
+u8 rtl8812ae_delta_swing_table_idx_5ga_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11},
+ {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
+ {0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_24gb_n_txpwrtrack[] =
+ {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24gb_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+u8 rtl8821ae_delta_swing_table_idx_24ga_n_txpwrtrack[] =
+ {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24ga_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+u8 rtl8821ae_delta_swing_table_idx_24gcckb_n_txpwrtrack[] =
+ {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24gcckb_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+u8 rtl8821ae_delta_swing_table_idx_24gccka_n_txpwrtrack[] =
+ {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
+u8 rtl8821ae_delta_swing_table_idx_24gccka_p_txpwrtrack[] =
+ {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
+
+u8 rtl8821ae_delta_swing_table_idx_5gb_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_5gb_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_5ga_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+u8 rtl8821ae_delta_swing_table_idx_5ga_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+void rtl8812ae_dm_read_and_config_txpower_track(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===> rtl8821ae_dm_read_and_config_txpower_track\n"));
+
+
+ memcpy(rtldm->delta_swing_table_idx_24ga_p,
+ rtl8812ae_delta_swing_table_idx_24ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24ga_n,
+ rtl8812ae_delta_swing_table_idx_24ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gb_p,
+ rtl8812ae_delta_swing_table_idx_24gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gb_n,
+ rtl8812ae_delta_swing_table_idx_24gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+ memcpy(rtldm->delta_swing_table_idx_24gccka_p,
+ rtl8812ae_delta_swing_table_idx_24gccka_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gccka_n,
+ rtl8812ae_delta_swing_table_idx_24gccka_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gcckb_p,
+ rtl8812ae_delta_swing_table_idx_24gcckb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gcckb_n,
+ rtl8812ae_delta_swing_table_idx_24gcckb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+ memcpy(rtldm->delta_swing_table_idx_5ga_p,
+ rtl8812ae_delta_swing_table_idx_5ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5ga_n,
+ rtl8812ae_delta_swing_table_idx_5ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5gb_p,
+ rtl8812ae_delta_swing_table_idx_5gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5gb_n,
+ rtl8812ae_delta_swing_table_idx_5gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+}
+
+void rtl8821ae_dm_read_and_config_txpower_track(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===> rtl8821ae_dm_read_and_config_txpower_track\n"));
+
+
+ memcpy(rtldm->delta_swing_table_idx_24ga_p,
+ rtl8821ae_delta_swing_table_idx_24ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24ga_n,
+ rtl8821ae_delta_swing_table_idx_24ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gb_p,
+ rtl8821ae_delta_swing_table_idx_24gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gb_n,
+ rtl8821ae_delta_swing_table_idx_24gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+ memcpy(rtldm->delta_swing_table_idx_24gccka_p,
+ rtl8821ae_delta_swing_table_idx_24gccka_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gccka_n,
+ rtl8821ae_delta_swing_table_idx_24gccka_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gcckb_p,
+ rtl8821ae_delta_swing_table_idx_24gcckb_p_txpwrtrack, DELTA_SWINGIDX_SIZE);
+ memcpy(rtldm->delta_swing_table_idx_24gcckb_n,
+ rtl8821ae_delta_swing_table_idx_24gcckb_n_txpwrtrack, DELTA_SWINGIDX_SIZE);
+
+ memcpy(rtldm->delta_swing_table_idx_5ga_p,
+ rtl8821ae_delta_swing_table_idx_5ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5ga_n,
+ rtl8821ae_delta_swing_table_idx_5ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5gb_p,
+ rtl8821ae_delta_swing_table_idx_5gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+ memcpy(rtldm->delta_swing_table_idx_5gb_n,
+ rtl8821ae_delta_swing_table_idx_5gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3);
+}
+
+
+
+#define CALCULATE_SWINGTALBE_OFFSET(_offset, _direction, _size, _deltaThermal) \
+ do {\
+ for(_offset = 0; _offset < _size; _offset++)\
+ {\
+ if(_deltaThermal < thermal_threshold[_direction][_offset])\
+ {\
+ if(_offset != 0)\
+ _offset--;\
+ break;\
+ }\
+ } \
+ if(_offset >= _size)\
+ _offset = _size-1;\
+ } while(0)
+
+
+void rtl8821ae_dm_txpower_track_adjust(struct ieee80211_hw *hw,
+ u8 type,u8 *pdirection,
+ u32 *poutwrite_val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 pwr_val = 0;
+
+ if (type == 0){
+ if (rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_A] <=
+ rtlpriv->dm.bb_swing_idx_ofdm_base[RF90_PATH_A]) {
+ *pdirection = 1;
+ pwr_val = rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A] - rtldm->bb_swing_idx_ofdm[RF90_PATH_A];
+ } else {
+ *pdirection = 2;
+ pwr_val = rtldm->bb_swing_idx_ofdm[RF90_PATH_A] - rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A];
+ }
+ } else if (type ==1) {
+ if (rtldm->bb_swing_idx_cck <= rtldm->bb_swing_idx_cck_base) {
+ *pdirection = 1;
+ pwr_val = rtldm->bb_swing_idx_cck_base - rtldm->bb_swing_idx_cck;
+ } else {
+ *pdirection = 2;
+ pwr_val = rtldm->bb_swing_idx_cck - rtldm->bb_swing_idx_cck_base;
+ }
+ }
+
+ if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1))
+ pwr_val = TXPWRTRACK_MAX_IDX;
+
+ *poutwrite_val = pwr_val |(pwr_val << 8)|(pwr_val << 16) | (pwr_val << 24);
+}
+
+void rtl8821ae_dm_clear_txpower_tracking_state(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ u8 p = 0;
+ rtldm->bb_swing_idx_cck_base = rtldm->default_cck_index;
+ rtldm->bb_swing_idx_cck = rtldm->default_cck_index;
+ rtldm->cck_index = 0;
+
+ for (p = RF90_PATH_A; p < MAX_RF_PATH; ++p) {
+ rtldm->bb_swing_idx_ofdm_base[p] = rtldm->default_ofdm_index;
+ rtldm->bb_swing_idx_ofdm[p] = rtldm->default_ofdm_index;
+ rtldm->ofdm_index[p] = rtldm->default_ofdm_index;
+
+ rtldm->power_index_offset[p] = 0;
+ rtldm->delta_power_index[p] = 0;
+ rtldm->delta_power_index_last[p] = 0;
+
+ rtldm->aboslute_ofdm_swing_idx[p] = 0; /*Initial Mix mode power tracking*/
+ rtldm->remnant_ofdm_swing_idx[p] = 0;
+ }
+
+ rtldm->modify_txagc_flag_path_a = false; /*Initial at Modify Tx Scaling Mode*/
+ rtldm->modify_txagc_flag_path_b = false; /*Initial at Modify Tx Scaling Mode*/
+ rtldm->remnant_cck_idx = 0;
+ rtldm->thermalvalue = rtlefuse->eeprom_thermalmeter;
+ rtldm->thermalvalue_iqk = rtlefuse->eeprom_thermalmeter;
+ rtldm->thermalvalue_lck = rtlefuse->eeprom_thermalmeter;
+}
+
+u8 rtl8821ae_dm_get_swing_index(struct ieee80211_hw *hw)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 i = 0;
+ u32 bb_swing;
+
+ bb_swing =rtl8821ae_phy_query_bb_reg(hw, rtlhal->current_bandtype, RF90_PATH_A);
+
+ for (i = 0; i < TXSCALE_TABLE_SIZE; ++i)
+ if ( bb_swing == rtl8821ae_txscaling_table[i])
+ break;
+
+ return i;
+}
+
+void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 default_swing_index = 0;
+ u8 p = 0;
+
+ rtlpriv->dm.txpower_track_control = true;
+ rtldm->thermalvalue = rtlefuse->eeprom_thermalmeter;
+ rtldm->thermalvalue_iqk = rtlefuse->eeprom_thermalmeter;
+ rtldm->thermalvalue_lck = rtlefuse->eeprom_thermalmeter;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_dm_read_and_config_txpower_track(hw);
+ else
+ rtl8821ae_dm_read_and_config_txpower_track(hw);
+
+ default_swing_index = rtl8821ae_dm_get_swing_index(hw);
+
+ rtldm->default_ofdm_index = (default_swing_index == TXSCALE_TABLE_SIZE) ? 24 : default_swing_index;
+ rtldm->default_cck_index = 24;
+
+ rtldm->bb_swing_idx_cck_base = rtldm->default_cck_index;
+ rtldm->cck_index = rtldm->default_cck_index;
+
+ for (p = RF90_PATH_A; p < MAX_RF_PATH; ++p)
+ {
+ rtldm->bb_swing_idx_ofdm_base[p] = rtldm->default_ofdm_index;
+ rtldm->ofdm_index[p] = rtldm->default_ofdm_index;
+ rtldm->delta_power_index[p] = 0;
+ rtldm->power_index_offset[p] = 0;
+ rtldm->delta_power_index_last[p] = 0;
+ }
+}
+
+static void rtl8821ae_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+ dm_pstable.pre_ccastate = CCA_MAX;
+ dm_pstable.cur_ccasate = CCA_MAX;
+ dm_pstable.pre_rfstate = RF_MAX;
+ dm_pstable.cur_rfstate = RF_MAX;
+ dm_pstable.rssi_val_min = 0;
+ dm_pstable.initialize = 0;
+}
+
+
+static void rtl8821ae_dm_diginit(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ //dm_digtable.dig_enable_flag = true;
+ dm_digtable.cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
+ /*dm_digtable.pre_igvalue = 0;
+ dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+ dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
+ dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;*/
+ dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
+ dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
+ dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+ dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+ dm_digtable.rx_gain_range_max = DM_DIG_MAX;
+ dm_digtable.rx_gain_range_min = DM_DIG_MIN;
+ dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+ dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
+ dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+ dm_digtable.pre_cck_cca_thres = 0xff;
+ dm_digtable.cur_cck_cca_thres = 0x83;
+ dm_digtable.forbidden_igi = DM_DIG_MIN;
+ dm_digtable.large_fa_hit = 0;
+ dm_digtable.recover_cnt = 0;
+ dm_digtable.dig_dynamic_min_0 = DM_DIG_MIN;
+ dm_digtable.dig_dynamic_min_1 = DM_DIG_MIN;
+ dm_digtable.b_media_connect_0 = false;
+ dm_digtable.b_media_connect_1 = false;
+ rtlpriv->dm.b_dm_initialgain_enable = true;
+ dm_digtable.bt30_cur_igi = 0x32;
+}
+
+static void rtl8821ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.bdynamic_txpower_enable = false;
+
+ rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+
+
+void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ rtlpriv->dm.bcurrent_turbo_edca = false;
+ rtlpriv->dm.bis_any_nonbepkts = false;
+ rtlpriv->dm.bis_cur_rdlstate = false;
+}
+
+
+void rtl8821ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rate_adaptive *p_ra = &(rtlpriv->ra);
+
+ p_ra->ratr_state = DM_RATR_STA_INIT;
+ p_ra->pre_ratr_state = DM_RATR_STA_INIT;
+
+ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+ rtlpriv->dm.b_useramask = true;
+ else
+ rtlpriv->dm.b_useramask = false;
+
+ p_ra->high_rssi_thresh_for_ra = 50;
+ p_ra->low_rssi_thresh_for_ra = 20;
+}
+
+
+static void rtl8821ae_dm_init_txpower_tracking(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.btxpower_tracking = true;
+ rtlpriv->dm.btxpower_trackinginit = false;
+ rtlpriv->dm.txpowercount = 0;
+ rtlpriv->dm.txpower_track_control = true;
+ rtlpriv->dm.thermalvalue = 0;
+
+ rtlpriv->dm.ofdm_index[0] = 30;
+ rtlpriv->dm.cck_index = 20;
+
+ rtlpriv->dm.bb_swing_idx_cck_base = rtlpriv->dm.cck_index;
+
+
+ rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_A] = rtlpriv->dm.ofdm_index[0];
+ rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_B] = rtlpriv->dm.ofdm_index[0];
+ rtlpriv->dm.delta_power_index[0] = 0;
+ rtlpriv->dm.delta_power_index_last[0] = 0;
+ rtlpriv->dm.power_index_offset[0] = 0;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ (" rtlpriv->dm.btxpower_tracking = %d\n",
+ rtlpriv->dm.btxpower_tracking));
+}
+
+
+void rtl8821ae_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap;
+
+ rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11));
+ rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL;
+}
+
+
+void rtl8821ae_dm_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = false;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+
+ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ rtl8821ae_dm_diginit(hw);
+ rtl8821ae_dm_init_rate_adaptive_mask(hw);
+ rtl8812ae_dm_path_diversity_init(hw);
+ rtl8821ae_dm_init_edca_turbo(hw);
+ rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(hw);
+#if 1
+ rtl8821ae_dm_init_dynamic_bb_powersaving(hw);
+ rtl8821ae_dm_init_dynamic_txpower(hw);
+ rtl8821ae_dm_init_txpower_tracking(hw);
+#endif
+ rtl8821ae_dm_init_dynamic_atc_switch(hw);
+}
+
+void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dig *rtl_dm_dig = &(rtlpriv->dm.dm_digtable);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ /* Determine the minimum RSSI */
+ if ((mac->link_state < MAC80211_LINKED) &&
+ (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ rtl_dm_dig->min_undecorated_pwdb_for_dm = 0;
+ RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+ ("Not connected to any \n"));
+ }
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_dm_dig->min_undecorated_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+ ("AP Client PWDB = 0x%lx \n",
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb));
+ } else {
+ rtl_dm_dig->min_undecorated_pwdb_for_dm =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+ ("STA Default Port PWDB = 0x%x \n",
+ rtl_dm_dig->min_undecorated_pwdb_for_dm));
+ }
+ } else {
+ rtl_dm_dig->min_undecorated_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD,
+ ("AP Ext Port or disconnect PWDB = 0x%x \n",
+ rtl_dm_dig->min_undecorated_pwdb_for_dm));
+ }
+ RT_TRACE(COMP_DIG, DBG_LOUD, ("MinUndecoratedPWDBForDM =%d\n",
+ rtl_dm_dig->min_undecorated_pwdb_for_dm));
+}
+
+#if 0
+void rtl8812ae_dm_rssi_dump_to_register(
+ struct ieee80211_hw *hw
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, RA_RSSI_DUMP, Adapter->RxStats.RxRSSIPercentage[0]);
+ rtl_write_byte(rtlpriv, RB_RSSI_DUMP, Adapter->RxStats.RxRSSIPercentage[1]);
+
+ /* Rx EVM*/
+ rtl_write_byte(rtlpriv, RS1_RX_EVM_DUMP, Adapter->RxStats.RxEVMdbm[0]);
+ rtl_write_byte(rtlpriv, RS2_RX_EVM_DUMP, Adapter->RxStats.RxEVMdbm[1]);
+
+ /*Rx SNR*/
+ rtl_write_byte(rtlpriv, RA_RX_SNR_DUMP, (u1Byte)(Adapter->RxStats.RxSNRdB[0]));
+ rtl_write_byte(rtlpriv, RB_RX_SNR_DUMP, (u1Byte)(Adapter->RxStats.RxSNRdB[1]));
+
+ /*Rx Cfo_Short*/
+ rtl_write_word(rtlpriv, RA_CFO_SHORT_DUMP, Adapter->RxStats.RxCfoShort[0]);
+ rtl_write_word(rtlpriv, RB_CFO_SHORT_DUMP, Adapter->RxStats.RxCfoShort[1]);
+
+ /*Rx Cfo_Tail*/
+ rtl_write_word(rtlpriv, RA_CFO_LONG_DUMP, Adapter->RxStats.RxCfoTail[0]);
+ rtl_write_word(rtlpriv, RB_CFO_LONG_DUMP, Adapter->RxStats.RxCfoTail[1]);
+
+}
+#endif
+
+static void rtl8821ae_dm_check_rssi_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *drv_priv;
+ u8 h2c_parameter[3] = { 0 };
+ long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
+
+
+ /* AP & ADHOC & MESH */
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+ if(drv_priv->rssi_stat.undecorated_smoothed_pwdb < tmp_entry_min_pwdb)
+ tmp_entry_min_pwdb = drv_priv->rssi_stat.undecorated_smoothed_pwdb;
+ if(drv_priv->rssi_stat.undecorated_smoothed_pwdb > tmp_entry_max_pwdb)
+ tmp_entry_max_pwdb = drv_priv->rssi_stat.undecorated_smoothed_pwdb;
+
+ /*h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+ h2c_parameter[1] = 0x20;
+ h2c_parameter[0] = drv_priv->rssi_stat;
+ rtl8821ae_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);*/
+ }
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+ /* If associated entry is found */
+ if (tmp_entry_max_pwdb != 0) {
+ rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = tmp_entry_max_pwdb;
+ RTPRINT(rtlpriv, FDM, DM_PWDB, ("EntryMaxPWDB = 0x%lx(%ld)\n",
+ tmp_entry_max_pwdb, tmp_entry_max_pwdb));
+ } else {
+ rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
+ }
+ /* If associated entry is found */
+ if (tmp_entry_min_pwdb != 0xff) {
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = tmp_entry_min_pwdb;
+ RTPRINT(rtlpriv, FDM, DM_PWDB, ("EntryMinPWDB = 0x%lx(%ld)\n",
+ tmp_entry_min_pwdb, tmp_entry_min_pwdb));
+ } else {
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
+ }
+ /* Indicate Rx signal strength to FW. */
+ if (rtlpriv->dm.b_useramask) {
+ h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+ h2c_parameter[1] = 0x20;
+ h2c_parameter[0] = 0;
+ rtl8821ae_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
+ } else {
+ rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undecorated_smoothed_pwdb);
+ }
+ rtl8821ae_dm_find_minimum_rssi(hw);
+ dm_digtable.rssi_val_min = rtlpriv->dm.dm_digtable.min_undecorated_pwdb_for_dm;
+}
+
+void rtl8821ae_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 current_cca)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (dm_digtable.cur_cck_cca_thres != current_cca)
+ rtl_write_byte(rtlpriv, DM_REG_CCK_CCA_11AC, current_cca);
+
+ dm_digtable.pre_cck_cca_thres = dm_digtable.cur_cck_cca_thres;
+ dm_digtable.cur_cck_cca_thres = current_cca;
+}
+
+void rtl8821ae_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if(dm_digtable.stop_dig)
+ return;
+
+ if (dm_digtable.cur_igvalue != current_igi){
+ rtl_set_bbreg(hw, DM_REG_IGI_A_11AC, DM_BIT_IGI_11AC, current_igi);
+ if (rtlpriv->phy.rf_type != RF_1T1R)
+ rtl_set_bbreg(hw, DM_REG_IGI_B_11AC, DM_BIT_IGI_11AC, current_igi);
+ }
+ //dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
+ dm_digtable.cur_igvalue = current_igi;
+}
+
+static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 dig_dynamic_min;
+ u8 dig_max_of_min;
+ bool first_connect, first_disconnect;
+ u8 dm_dig_max, dm_dig_min, offset;
+ u8 current_igi =dm_digtable.cur_igvalue;
+
+
+ RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig()==>\n"));
+
+
+ if (mac->act_scanning == true) {
+ RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig() Return: In Scan Progress \n"));
+ return;
+ }
+
+ /*add by Neil Chen to avoid PSD is processing*/
+ dig_dynamic_min = dm_digtable.dig_dynamic_min_0;
+ first_connect = (mac->link_state >= MAC80211_LINKED) &&
+ (dm_digtable.b_media_connect_0 == false);
+ first_disconnect = (mac->link_state < MAC80211_LINKED) &&
+ (dm_digtable.b_media_connect_0 == true);
+
+ /*1 Boundary Decision*/
+
+
+ dm_dig_max = 0x5A;
+
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8821AE)
+ dm_dig_min = DM_DIG_MIN;
+ else
+ dm_dig_min = 0x1C;
+
+ dig_max_of_min = DM_DIG_MAX_AP;
+
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8821AE)
+ offset = 20;
+ else
+ offset = 10;
+
+ if ((dm_digtable.rssi_val_min + offset) > dm_dig_max)
+ dm_digtable.rx_gain_range_max = dm_dig_max;
+ else if ((dm_digtable.rssi_val_min + offset) < dm_dig_min)
+ dm_digtable.rx_gain_range_max = dm_dig_min;
+ else
+ dm_digtable.rx_gain_range_max = dm_digtable.rssi_val_min + offset;
+
+ if(rtlpriv->dm.b_one_entry_only){
+ offset = 0;
+
+ if (dm_digtable.rssi_val_min - offset < dm_dig_min)
+ dig_dynamic_min = dm_dig_min;
+ else if (dm_digtable.rssi_val_min - offset > dig_max_of_min)
+ dig_dynamic_min = dig_max_of_min;
+ else
+ dig_dynamic_min = dm_digtable.rssi_val_min - offset;
+
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig() : bOneEntryOnly=TRUE, dig_dynamic_min=0x%x\n",
+ dig_dynamic_min));
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig() : dm_digtable.rssi_val_min=%d",dm_digtable.
+ rssi_val_min));
+ } else {
+ dig_dynamic_min = dm_dig_min;
+ }
+ } else {
+ dm_digtable.rx_gain_range_max = dm_dig_max;
+ dig_dynamic_min = dm_dig_min;
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig() : No Link\n"));
+ }
+
+ if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): Abnormally false alarm case. \n"));
+
+ if (dm_digtable.large_fa_hit != 3)
+ dm_digtable.large_fa_hit++;
+ if (dm_digtable.forbidden_igi < current_igi) {
+ dm_digtable.forbidden_igi = current_igi;
+ dm_digtable.large_fa_hit = 1;
+ }
+
+ if (dm_digtable.large_fa_hit >= 3) {
+ if((dm_digtable.forbidden_igi + 1) > dm_digtable.rx_gain_range_max)
+ dm_digtable.rx_gain_range_min = dm_digtable.rx_gain_range_max;
+ else
+ dm_digtable.rx_gain_range_min = (dm_digtable.forbidden_igi + 1);
+ dm_digtable.recover_cnt = 3600;
+ }
+
+ } else {
+ /*Recovery mechanism for IGI lower bound*/
+ if (dm_digtable.recover_cnt != 0)
+ dm_digtable.recover_cnt --;
+ else {
+ if (dm_digtable.large_fa_hit < 3) {
+ if ((dm_digtable.forbidden_igi -1) < dig_dynamic_min) {
+ dm_digtable.forbidden_igi = dig_dynamic_min;
+ dm_digtable.rx_gain_range_min = dig_dynamic_min;
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): Normal Case: At Lower Bound\n"));
+ } else {
+ dm_digtable.forbidden_igi --;
+ dm_digtable.rx_gain_range_min = (dm_digtable.forbidden_igi + 1);
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): Normal Case: Approach Lower Bound\n"));
+ }
+ } else {
+ dm_digtable.large_fa_hit = 0;
+ }
+ }
+ }
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): pDM_DigTable->LargeFAHit=%d\n",
+ dm_digtable.large_fa_hit));
+
+ if (rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10)
+ dm_digtable.rx_gain_range_min = dm_dig_min;
+
+ if (dm_digtable.rx_gain_range_min > dm_digtable.rx_gain_range_max)
+ dm_digtable.rx_gain_range_min = dm_digtable.rx_gain_range_max;
+
+ /*Adjust initial gain by false alarm*/
+ if (mac->link_state >= MAC80211_LINKED) {
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): DIG AfterLink\n"));
+ if (first_connect) {
+ if (dm_digtable.rssi_val_min <= dig_max_of_min)
+ current_igi = dm_digtable.rssi_val_min;
+ else
+ current_igi = dig_max_of_min;
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig: First Connect\n"));
+ } else {
+ if(rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2)
+ current_igi = current_igi + 4;
+ else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1)
+ current_igi = current_igi + 2;
+ else if(rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
+ current_igi = current_igi - 2;
+
+ if((rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10)
+ &&(rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)) {
+ current_igi = dm_digtable.rx_gain_range_min;
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): Beacon is less than 10 and FA is less than 768, IGI GOES TO 0x1E!!!!!!!!!!!!\n"));
+ }
+ }
+ } else{
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): DIG BeforeLink\n"));
+ if (first_disconnect){
+ current_igi = dm_digtable.rx_gain_range_min;
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): First DisConnect \n"));
+ } else {
+ /*2012.03.30 LukeLee: enable DIG before link but with very high thresholds*/
+ if (rtlpriv->falsealm_cnt.cnt_all > 2000)
+ current_igi = current_igi + 4;
+ else if (rtlpriv->falsealm_cnt.cnt_all > 600)
+ current_igi = current_igi + 2;
+ else if(rtlpriv->falsealm_cnt.cnt_all < 300)
+ current_igi = current_igi - 2;
+ if (current_igi >= 0x3e)
+ current_igi = 0x3e;
+ RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig(): England DIG \n"));
+ }
+ }
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): DIG End Adjust IGI\n"));
+ /* Check initial gain by upper/lower bound*/
+
+ if (current_igi > dm_digtable.rx_gain_range_max)
+ current_igi = dm_digtable.rx_gain_range_max;
+ if (current_igi < dm_digtable.rx_gain_range_min)
+ current_igi = dm_digtable.rx_gain_range_min;
+
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): rx_gain_range_max=0x%x, rx_gain_range_min=0x%x\n",
+ dm_digtable.rx_gain_range_max, dm_digtable.rx_gain_range_min));
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): TotalFA=%d\n", rtlpriv->falsealm_cnt.cnt_all));
+ RT_TRACE(COMP_DIG, DBG_LOUD,
+ ("rtl8821ae_dm_dig(): CurIGValue=0x%x\n", current_igi));
+
+ rtl8821ae_dm_write_dig(hw, current_igi);
+ dm_digtable.b_media_connect_0= ((mac->link_state >= MAC80211_LINKED) ? true :false);
+ dm_digtable.dig_dynamic_min_0 = dig_dynamic_min;
+}
+
+static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 cnt = 0;
+ struct rtl_sta_info *drv_priv;
+
+ rtlpriv->dm.b_one_entry_only = false;
+
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION &&
+ rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ rtlpriv->dm.b_one_entry_only = true;
+ return;
+ }
+
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP ||
+ rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC ||
+ rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) {
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+ cnt ++;
+ }
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+ if (cnt == 1)
+ rtlpriv->dm.b_one_entry_only = true;
+ }
+}
+
+
+static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+ u32 cck_enable =0;
+
+ /*read OFDM FA counter*/
+ falsealm_cnt->cnt_ofdm_fail = rtl_get_bbreg(hw, ODM_REG_OFDM_FA_11AC, BMASKLWORD);
+ falsealm_cnt->cnt_cck_fail = rtl_get_bbreg(hw, ODM_REG_CCK_FA_11AC, BMASKLWORD);
+
+ cck_enable = rtl_get_bbreg(hw, ODM_REG_BB_RX_PATH_11AC, BIT(28));
+ if (cck_enable) /*if(pDM_Odm->pBandType == ODM_BAND_2_4G)*/
+ falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail + falsealm_cnt->cnt_cck_fail;
+ else
+ falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail;
+
+ /*reset OFDM FA counter*/
+ rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1);
+ rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0);
+ /* reset CCK FA counter*/
+ rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 0);
+ rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 1);
+
+ RT_TRACE(COMP_DIG, DBG_LOUD, ("Cnt_Cck_fail=%d\n",
+ falsealm_cnt->cnt_cck_fail));
+ RT_TRACE(COMP_DIG, DBG_LOUD, ("cnt_ofdm_fail=%d\n",
+ falsealm_cnt->cnt_ofdm_fail));
+ RT_TRACE(COMP_DIG, DBG_LOUD, ("Total False Alarm=%d\n",
+ falsealm_cnt->cnt_all));
+}
+
+void rtl8812ae_dm_check_txpower_tracking_thermalmeter(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ static u8 tm_trigger = 0;
+
+ if (!rtlpriv->dm.btxpower_tracking)
+ return;
+
+ if (!tm_trigger) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16), 0x03);
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Trigger 8812 Thermal Meter!!\n"));
+ tm_trigger = 1;
+ return;
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Schedule TxPowerTracking direct call!!\n"));
+ rtl8812ae_dm_txpower_tracking_callback_thermalmeter(hw);
+ tm_trigger = 0;
+ }
+}
+
+static void rtl8821ae_dm_iq_calibrate(struct ieee80211_hw *hw)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (mac->link_state >= MAC80211_LINKED) {
+ /*if ((*rtldm->p_channel != rtldm->pre_channel )
+ && (!mac->act_scanning)) {
+ rtldm->pre_channel = *rtldm->p_channel;
+ rtldm->linked_interval = 0;
+ }*/
+
+ if(rtldm->linked_interval < 3)
+ rtldm->linked_interval ++;
+
+ if(rtldm->linked_interval == 2)
+ {
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_phy_iq_calibrate(hw, false);
+ else
+ rtl8821ae_phy_iq_calibrate(hw, false);
+ }
+ } else {
+ rtldm->linked_interval = 0;
+ }
+}
+
+
+void rtl8812ae_get_delta_swing_table(
+ struct ieee80211_hw *hw,
+ u8 **temperature_up_a,
+ u8 **temperature_down_a,
+ u8 **temperature_up_b,
+ u8 **temperature_down_b
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 channel = rtlphy->current_channel;
+ u8 rate = rtldm->tx_rate;
+
+
+ if ( 1 <= channel && channel <= 14) {
+ if (RX_HAL_IS_CCK_RATE(rate)) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_24gccka_p;
+ *temperature_down_a = rtldm->delta_swing_table_idx_24gccka_n;
+ *temperature_up_b = rtldm->delta_swing_table_idx_24gcckb_p;
+ *temperature_down_b = rtldm->delta_swing_table_idx_24gcckb_n;
+ } else {
+ *temperature_up_a = rtldm->delta_swing_table_idx_24ga_p;
+ *temperature_down_a = rtldm->delta_swing_table_idx_24ga_n;
+ *temperature_up_b = rtldm->delta_swing_table_idx_24gb_p;
+ *temperature_down_b = rtldm->delta_swing_table_idx_24gb_n;
+ }
+ } else if ( 36 <= channel && channel <= 64) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[0];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[0];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[0];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[0];
+ } else if ( 100 <= channel && channel <= 140) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[1];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[1];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[1];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[1];
+ } else if ( 149 <= channel && channel <= 173) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[2];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[2];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[2];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[2];
+ } else {
+ *temperature_up_a = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+ *temperature_down_a =(u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+ *temperature_up_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+ *temperature_down_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+ }
+
+ return;
+}
+
+void rtl8812ae_phy_lccalibrate(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("===> rtl8812ae_phy_lccalibrate\n"));
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("<=== rtl8812ae_phy_lccalibrate\n"));
+
+}
+
+void rtl8812ae_dm_update_init_rate(
+ struct ieee80211_hw *hw,
+ u8 rate
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 p = 0;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Get C2H Command! Rate=0x%x\n", rate));
+
+ rtldm->tx_rate = rate;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE){
+ rtl8821ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, RF90_PATH_A, 0);
+ }
+ else
+ {
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ {
+ rtl8812ae_dm_txpwr_track_set_pwr(hw, BBSWING, p, 0);
+ }
+ }
+
+}
+
+u8 rtl8812ae_hw_rate_to_mrate(
+ struct ieee80211_hw *hw,
+ u8 rate
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 ret_rate = MGN_1M;
+
+
+ switch(rate)
+ {
+ case DESC_RATE1M: ret_rate = MGN_1M; break;
+ case DESC_RATE2M: ret_rate = MGN_2M; break;
+ case DESC_RATE5_5M: ret_rate = MGN_5_5M; break;
+ case DESC_RATE11M: ret_rate = MGN_11M; break;
+ case DESC_RATE6M: ret_rate = MGN_6M; break;
+ case DESC_RATE9M: ret_rate = MGN_9M; break;
+ case DESC_RATE12M: ret_rate = MGN_12M; break;
+ case DESC_RATE18M: ret_rate = MGN_18M; break;
+ case DESC_RATE24M: ret_rate = MGN_24M; break;
+ case DESC_RATE36M: ret_rate = MGN_36M; break;
+ case DESC_RATE48M: ret_rate = MGN_48M; break;
+ case DESC_RATE54M: ret_rate = MGN_54M; break;
+ case DESC_RATEMCS0: ret_rate = MGN_MCS0; break;
+ case DESC_RATEMCS1: ret_rate = MGN_MCS1; break;
+ case DESC_RATEMCS2: ret_rate = MGN_MCS2; break;
+ case DESC_RATEMCS3: ret_rate = MGN_MCS3; break;
+ case DESC_RATEMCS4: ret_rate = MGN_MCS4; break;
+ case DESC_RATEMCS5: ret_rate = MGN_MCS5; break;
+ case DESC_RATEMCS6: ret_rate = MGN_MCS6; break;
+ case DESC_RATEMCS7: ret_rate = MGN_MCS7; break;
+ case DESC_RATEMCS8: ret_rate = MGN_MCS8; break;
+ case DESC_RATEMCS9: ret_rate = MGN_MCS9; break;
+ case DESC_RATEMCS10: ret_rate = MGN_MCS10; break;
+ case DESC_RATEMCS11: ret_rate = MGN_MCS11; break;
+ case DESC_RATEMCS12: ret_rate = MGN_MCS12; break;
+ case DESC_RATEMCS13: ret_rate = MGN_MCS13; break;
+ case DESC_RATEMCS14: ret_rate = MGN_MCS14; break;
+ case DESC_RATEMCS15: ret_rate = MGN_MCS15; break;
+ case DESC_RATEVHT1SS_MCS0: ret_rate = MGN_VHT1SS_MCS0; break;
+ case DESC_RATEVHT1SS_MCS1: ret_rate = MGN_VHT1SS_MCS1; break;
+ case DESC_RATEVHT1SS_MCS2: ret_rate = MGN_VHT1SS_MCS2; break;
+ case DESC_RATEVHT1SS_MCS3: ret_rate = MGN_VHT1SS_MCS3; break;
+ case DESC_RATEVHT1SS_MCS4: ret_rate = MGN_VHT1SS_MCS4; break;
+ case DESC_RATEVHT1SS_MCS5: ret_rate = MGN_VHT1SS_MCS5; break;
+ case DESC_RATEVHT1SS_MCS6: ret_rate = MGN_VHT1SS_MCS6; break;
+ case DESC_RATEVHT1SS_MCS7: ret_rate = MGN_VHT1SS_MCS7; break;
+ case DESC_RATEVHT1SS_MCS8: ret_rate = MGN_VHT1SS_MCS8; break;
+ case DESC_RATEVHT1SS_MCS9: ret_rate = MGN_VHT1SS_MCS9; break;
+ case DESC_RATEVHT2SS_MCS0: ret_rate = MGN_VHT2SS_MCS0; break;
+ case DESC_RATEVHT2SS_MCS1: ret_rate = MGN_VHT2SS_MCS1; break;
+ case DESC_RATEVHT2SS_MCS2: ret_rate = MGN_VHT2SS_MCS2; break;
+ case DESC_RATEVHT2SS_MCS3: ret_rate = MGN_VHT2SS_MCS3; break;
+ case DESC_RATEVHT2SS_MCS4: ret_rate = MGN_VHT2SS_MCS4; break;
+ case DESC_RATEVHT2SS_MCS5: ret_rate = MGN_VHT2SS_MCS5; break;
+ case DESC_RATEVHT2SS_MCS6: ret_rate = MGN_VHT2SS_MCS6; break;
+ case DESC_RATEVHT2SS_MCS7: ret_rate = MGN_VHT2SS_MCS7; break;
+ case DESC_RATEVHT2SS_MCS8: ret_rate = MGN_VHT2SS_MCS8; break;
+ case DESC_RATEVHT2SS_MCS9: ret_rate = MGN_VHT2SS_MCS9; break;
+
+ default:
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("HwRateToMRate8812(): Non supported Rate [%x]!!!\n",rate ));
+ break;
+ }
+ return ret_rate;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: odm_TxPwrTrackSetPwr88E()
+ *
+ * Overview: 88E change all channel tx power according to flag.
+ * OFDM & CCK are all different.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 04/23/2012 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+ enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 final_bb_swing_idx[2];
+ u8 pwr_tracking_limit = 26; /*+1.0dB*/
+ u8 tx_rate = 0xFF;
+ s8 final_ofdm_swing_index = 0;
+
+ if(rtldm->tx_rate != 0xFF)
+ tx_rate = rtl8812ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+ if(tx_rate != 0xFF) { /*20130429 Mimic Modify High Rate BBSwing Limit.*/
+ /*CCK*/
+ if((tx_rate >= MGN_1M) && (tx_rate <= MGN_11M))
+ pwr_tracking_limit = 32; /*+4dB*/
+ /*OFDM*/
+ else if((tx_rate >= MGN_6M) && (tx_rate <= MGN_48M))
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if(tx_rate == MGN_54M)
+ pwr_tracking_limit = 28; /*+2dB*/
+ /*HT*/
+ else if((tx_rate >= MGN_MCS0) && (tx_rate <= MGN_MCS2)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_MCS3) && (tx_rate <= MGN_MCS4)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_MCS5) && (tx_rate <= MGN_MCS7)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+
+ else if((tx_rate >= MGN_MCS8) && (tx_rate <= MGN_MCS10)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_MCS11) && (tx_rate <= MGN_MCS12)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_MCS13) && (tx_rate <= MGN_MCS15)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+
+ /*2 VHT*/
+ else if((tx_rate >= MGN_VHT1SS_MCS0) && (tx_rate <= MGN_VHT1SS_MCS2)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_VHT1SS_MCS3) && (tx_rate <= MGN_VHT1SS_MCS4)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_VHT1SS_MCS5)&&(tx_rate <= MGN_VHT1SS_MCS6)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS7) /*64QAM*/
+ pwr_tracking_limit = 26; /*+1dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS8) /*256QAM*/
+ pwr_tracking_limit = 24; /*+0dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS9) /*256QAM*/
+ pwr_tracking_limit = 22; /*-1dB*/
+
+ else if((tx_rate >= MGN_VHT2SS_MCS0)&&(tx_rate <= MGN_VHT2SS_MCS2)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_VHT2SS_MCS3)&&(tx_rate <= MGN_VHT2SS_MCS4)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_VHT2SS_MCS5)&&(tx_rate <= MGN_VHT2SS_MCS6)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+ else if(tx_rate == MGN_VHT2SS_MCS7) /*64QAM*/
+ pwr_tracking_limit = 26; /*+1dB*/
+ else if(tx_rate == MGN_VHT2SS_MCS8) /*256QAM*/
+ pwr_tracking_limit = 24; /*+0dB*/
+ else if(tx_rate == MGN_VHT2SS_MCS9) /*256QAM*/
+ pwr_tracking_limit = 22; /*-1dB*/
+ else
+ pwr_tracking_limit = 24;
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxRate=0x%x, PwrTrackingLimit=%d\n", tx_rate, pwr_tracking_limit));
+
+
+ if (method == BBSWING) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+ if (rf_path == RF90_PATH_A) {
+ final_bb_swing_idx[RF90_PATH_A] =
+ (rtldm->ofdm_index[RF90_PATH_A] > pwr_tracking_limit) ?
+ pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_A];
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d, \
+ pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_A], final_bb_swing_idx[RF90_PATH_A]));
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_A]]);
+ } else {
+ final_bb_swing_idx[RF90_PATH_B] =
+ rtldm->ofdm_index[RF90_PATH_B] > pwr_tracking_limit ? \
+ pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_B];
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_B]=%d, \
+ pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_B]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_B], final_bb_swing_idx[RF90_PATH_B]));
+
+ rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_B]]);
+ }
+ } else if (method == MIX_MODE) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->DefaultOfdmIndex=%d, \
+ pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
+ rtldm->default_ofdm_index, rtldm->aboslute_ofdm_swing_idx[rf_path],
+ rf_path ));
+
+
+ final_ofdm_swing_index = rtldm->default_ofdm_index + rtldm->aboslute_ofdm_swing_idx[rf_path];
+
+ if (rf_path == RF90_PATH_A) {
+ if(final_ofdm_swing_index > pwr_tracking_limit) { /*BBSwing higher then Limit*/
+
+ rtldm->remnant_cck_idx = final_ofdm_swing_index - pwr_tracking_limit;
+ /* CCK Follow the same compensate value as Path A*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit;
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]);
+
+ rtldm->modify_txagc_flag_path_a = true;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n",
+ pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path]));
+ } else if (final_ofdm_swing_index < 0) {
+ rtldm->remnant_cck_idx = final_ofdm_swing_index;
+ /* CCK Follow the same compensate value as Path A*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index;
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]);
+
+ rtldm->modify_txagc_flag_path_a = true;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d \n",
+ rtldm->remnant_ofdm_swing_idx[rf_path]));
+ } else {
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n",
+ final_ofdm_swing_index));
+
+ if(rtldm->modify_txagc_flag_path_a) { /*If TxAGC has changed, reset TxAGC again*/
+ rtldm->remnant_cck_idx = 0;
+ rtldm->remnant_ofdm_swing_idx[rf_path] = 0;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ rtldm->modify_txagc_flag_path_a = false;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE \n"));
+ }
+ }
+ }
+
+ if (rf_path == RF90_PATH_B) {
+ if(final_ofdm_swing_index > pwr_tracking_limit) { /*BBSwing higher then Limit*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit;
+
+ rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]);
+
+ rtldm->modify_txagc_flag_path_b = true;
+
+ /*Set TxAGC Page E{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_B Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n",
+ pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path]));
+ } else if (final_ofdm_swing_index < 0) {
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index;
+
+ rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]);
+
+ rtldm->modify_txagc_flag_path_b = true;
+
+ /*Set TxAGC Page E{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_B Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d \n",
+ rtldm->remnant_ofdm_swing_idx[rf_path] ));
+ } else {
+ rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_B Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n",
+ final_ofdm_swing_index));
+
+ if(rtldm->modify_txagc_flag_path_b) { /*If TxAGC has changed, reset TxAGC again*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = 0;
+
+ /*Set TxAGC Page E{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B);
+
+ rtldm->modify_txagc_flag_path_b = false;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_B dm_Odm->Modify_TxAGC_Flag = FALSE \n"));
+ }
+ }
+ }
+
+ } else {
+ return;
+ }
+}
+
+void rtl8812ae_dm_txpower_tracking_callback_thermalmeter
+ (struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0;
+ u8 thermal_value_avg_count = 0;
+ u32 thermal_value_avg = 0;
+
+ u8 ofdm_min_index = 6; /*OFDM BB Swing should be less than +3.0dB, which is required by Arthur*/
+ u8 index_for_channel = 0; /* GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/
+
+ /* 1. The following TWO tables decide the final index of OFDM/CCK swing table.*/
+ u8 *delta_swing_table_idx_tup_a;
+ u8 *delta_swing_table_idx_tdown_a;
+ u8 *delta_swing_table_idx_tup_b;
+ u8 *delta_swing_table_idx_tdown_b;
+
+ /*2. Initilization ( 7 steps in total )*/
+ rtl8812ae_get_delta_swing_table(hw, (u8**)&delta_swing_table_idx_tup_a,
+ (u8**)&delta_swing_table_idx_tdown_a,
+ (u8**)&delta_swing_table_idx_tup_b,
+ (u8**)&delta_swing_table_idx_tdown_b);
+
+ rtldm->btxpower_trackinginit = true;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter, \
+ \n pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:\
+ %d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ rtldm->bb_swing_idx_cck_base,
+ rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A],
+ rtldm->default_ofdm_index));
+
+ thermal_value = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER_8812A, 0xfc00); /*0x42: RF Reg[15:10] 88E*/
+ if( ! rtldm->txpower_track_control || rtlefuse->eeprom_thermalmeter == 0 ||
+ rtlefuse->eeprom_thermalmeter == 0xFF)
+ return;
+
+
+ /* 3. Initialize ThermalValues of RFCalibrateInfo*/
+
+ if(rtlhal->reloadtxpowerindex)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("reload ofdm index for band switch\n"));
+ }
+
+ /*4. Calculate average thermal meter*/
+ rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value;
+ rtldm->thermalvalue_avg_index++;
+ if(rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8812A)
+ /*Average times = c.AverageThermalNum*/
+ rtldm->thermalvalue_avg_index = 0;
+
+ for(i = 0; i < AVG_THERMAL_NUM_8812A; i++)
+ {
+ if(rtldm->thermalvalue_avg[i])
+ {
+ thermal_value_avg += rtldm->thermalvalue_avg[i];
+ thermal_value_avg_count++;
+ }
+ }
+
+ if(thermal_value_avg_count) /*Calculate Average ThermalValue after average enough times*/
+ {
+ thermal_value = (u8)(thermal_value_avg / thermal_value_avg_count);
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+ }
+
+ /*5. Calculate delta, delta_LCK, delta_IQK.*/
+ /*"delta" here is used to determine whether thermal value changes or not.*/
+ delta = (thermal_value > rtldm->thermalvalue) ? \
+ (thermal_value - rtldm->thermalvalue): \
+ (rtldm->thermalvalue - thermal_value);
+ delta_lck = (thermal_value > rtldm->thermalvalue_lck) ? \
+ (thermal_value - rtldm->thermalvalue_lck) : \
+ (rtldm->thermalvalue_lck - thermal_value);
+ delta_iqk = (thermal_value > rtldm->thermalvalue_iqk) ? \
+ (thermal_value - rtldm->thermalvalue_iqk) : \
+ (rtldm->thermalvalue_iqk - thermal_value);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
+ delta, delta_lck, delta_iqk));
+
+ /* 6. If necessary, do LCK. */
+
+ if (delta_lck >= IQK_THRESHOLD) /*Delta temperature is equal to or larger than 20 centigrade.*/
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_LCK(%d) >= Threshold_IQK(%d)\n",
+ delta_lck, IQK_THRESHOLD));
+ rtldm->thermalvalue_lck = thermal_value;
+ rtl8812ae_phy_lccalibrate(hw);
+ }
+
+ /*7. If necessary, move the index of swing table to adjust Tx power.*/
+
+ if (delta > 0 && rtldm->txpower_track_control)
+ {
+ /*"delta" here is used to record the absolute value of difference.*/
+ delta = thermal_value > rtlefuse->eeprom_thermalmeter ? \
+ (thermal_value - rtlefuse->eeprom_thermalmeter) : \
+ (rtlefuse->eeprom_thermalmeter - thermal_value);
+
+ if (delta >= TXPWR_TRACK_TABLE_SIZE)
+ delta = TXPWR_TRACK_TABLE_SIZE - 1;
+
+ /*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/
+
+ if(thermal_value > rtlefuse->eeprom_thermalmeter) {
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_swing_table_idx_tup_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_a[delta]));
+ rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta];
+ /*Record delta swing for mix mode power tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_swing_table_idx_tup_b[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_b[delta]));
+ rtldm->delta_power_index_last[RF90_PATH_B] = rtldm->delta_power_index[RF90_PATH_B];
+ rtldm->delta_power_index[RF90_PATH_B] = delta_swing_table_idx_tup_b[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B] = delta_swing_table_idx_tup_b[delta];
+ /*Record delta swing for mix mode power tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B]));
+
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_swing_table_idx_tdown_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_a[delta]));
+
+ rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta];
+ /* Record delta swing for mix mode power tracking*/
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("deltaSwingTableIdx_TDOWN_B[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_b[delta]));
+
+ rtldm->delta_power_index_last[RF90_PATH_B] = rtldm->delta_power_index[RF90_PATH_B];
+ rtldm->delta_power_index[RF90_PATH_B] = -1 * delta_swing_table_idx_tdown_b[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B] = -1 * delta_swing_table_idx_tdown_b[delta];
+ /*Record delta swing for mix mode power tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B]));
+ }
+
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("\n\n================================ [Path-%c] \
+ Calculating PowerIndexOffset ================================\n",
+ (p == RF90_PATH_A ? 'A' : 'B')));
+
+ if (rtldm->delta_power_index[p] == rtldm->delta_power_index_last[p])
+ /*If Thermal value changes but lookup table value still the same*/
+ rtldm->power_index_offset[p] = 0;
+ else
+ rtldm->power_index_offset[p] =
+ rtldm->delta_power_index[p] - rtldm->delta_power_index_last[p];
+ /*Power Index Diff between 2 times Power Tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->power_index_offset[p],
+ rtldm->delta_power_index[p] ,
+ rtldm->delta_power_index_last[p]));
+
+ rtldm->ofdm_index[p] =
+ rtldm->bb_swing_idx_ofdm_base[p] + rtldm->power_index_offset[p];
+ rtldm->cck_index =
+ rtldm->bb_swing_idx_cck_base + rtldm->power_index_offset[p];
+
+ rtldm->bb_swing_idx_cck = rtldm->cck_index;
+ rtldm->bb_swing_idx_ofdm[p] = rtldm->ofdm_index[p];
+
+ /*************Print BB Swing Base and Index Offset*************/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
+ rtldm->bb_swing_idx_cck,
+ rtldm->bb_swing_idx_cck_base,
+ rtldm->power_index_offset[p]));
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
+ rtldm->bb_swing_idx_ofdm[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->bb_swing_idx_ofdm_base[p],
+ rtldm->power_index_offset[p]));
+
+ /*7.1 Handle boundary conditions of index.*/
+
+
+ if(rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE -1)
+ {
+ rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE -1;
+ }
+ else if (rtldm->ofdm_index[p] < ofdm_min_index)
+ {
+ rtldm->ofdm_index[p] = ofdm_min_index;
+ }
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("\n\n======================================================\
+ ==================================================\n"));
+ if(rtldm->cck_index > TXSCALE_TABLE_SIZE -1)
+ rtldm->cck_index = TXSCALE_TABLE_SIZE -1;
+ else if (rtldm->cck_index < 0)
+ rtldm->cck_index = 0;
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The thermal meter is unchanged or TxPowerTracking OFF(%d): \
+ ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
+ rtldm->txpower_track_control,
+ thermal_value,
+ rtldm->thermalvalue));
+
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ rtldm->power_index_offset[p] = 0;
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
+ rtldm->cck_index, rtldm->bb_swing_idx_cck_base)); /*Print Swing base & current*/
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
+ rtldm->ofdm_index[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->bb_swing_idx_ofdm_base[p]));
+ }
+
+ if ((rtldm->power_index_offset[RF90_PATH_A] != 0 ||
+ rtldm->power_index_offset[RF90_PATH_B] != 0 ) &&
+ rtldm->txpower_track_control)
+ {
+ /*7.2 Configure the Swing Table to adjust Tx Power.*/
+ /*Always TRUE after Tx Power is adjusted by power tracking.*/
+ /*
+ 2012/04/23 MH According to Luke's suggestion, we can not write BB digital
+ to increase TX power. Otherwise, EVM will be bad.
+
+ 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E.
+ */
+ if (thermal_value > rtldm->thermalvalue)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Increasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_B],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+
+ } else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Decreasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_B],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+ }
+
+ if (thermal_value > rtlefuse->eeprom_thermalmeter) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature(%d) higher than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("**********Enter POWER Tracking MIX_MODE**********\n"));
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, 0);
+
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature(%d) lower than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("**********Enter POWER Tracking MIX_MODE**********\n"));
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel);
+
+ }
+
+ rtldm->bb_swing_idx_cck_base = rtldm->bb_swing_idx_cck; /*Record last time Power Tracking result as base.*/
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
+ rtldm->bb_swing_idx_ofdm_base[p] = rtldm->bb_swing_idx_ofdm[p];
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value));
+
+ rtldm->thermalvalue = thermal_value; /*Record last Power Tracking Thermal Value*/
+
+ }
+ /*Delta temperature is equal to or larger than 20 centigrade (When threshold is 8).*/
+ if ((delta_iqk >= IQK_THRESHOLD)) {
+
+ if ( !rtlphy->b_iqk_in_progress) {
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = true;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+
+ rtl8812ae_do_iqk(hw, delta_iqk, thermal_value, 8);
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = false;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+ }
+ }
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n"));
+}
+
+
+void rtl8821ae_get_delta_swing_table(
+ struct ieee80211_hw *hw,
+ u8 **temperature_up_a,
+ u8 **temperature_down_a,
+ u8 **temperature_up_b,
+ u8 **temperature_down_b
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 channel = rtlphy->current_channel;
+ u8 rate = rtldm->tx_rate;
+
+
+ if ( 1 <= channel && channel <= 14) {
+ if (RX_HAL_IS_CCK_RATE(rate)) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_24gccka_p;
+ *temperature_down_a = rtldm->delta_swing_table_idx_24gccka_n;
+ *temperature_up_b = rtldm->delta_swing_table_idx_24gcckb_p;
+ *temperature_down_b = rtldm->delta_swing_table_idx_24gcckb_n;
+ } else {
+ *temperature_up_a = rtldm->delta_swing_table_idx_24ga_p;
+ *temperature_down_a = rtldm->delta_swing_table_idx_24ga_n;
+ *temperature_up_b = rtldm->delta_swing_table_idx_24gb_p;
+ *temperature_down_b = rtldm->delta_swing_table_idx_24gb_n;
+ }
+ } else if ( 36 <= channel && channel <= 64) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[0];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[0];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[0];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[0];
+ } else if ( 100 <= channel && channel <= 140) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[1];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[1];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[1];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[1];
+ } else if ( 149 <= channel && channel <= 173) {
+ *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[2];
+ *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[2];
+ *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[2];
+ *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[2];
+ } else {
+ *temperature_up_a = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+ *temperature_down_a =(u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+ *temperature_up_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack;
+ *temperature_down_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack;
+ }
+
+ return;
+}
+
+void rtl8821ae_phy_lccalibrate(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("===> rtl8812ae_phy_lccalibrate\n"));
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("<=== rtl8812ae_phy_lccalibrate\n"));
+
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: odm_TxPwrTrackSetPwr88E()
+ *
+ * Overview: 88E change all channel tx power according to flag.
+ * OFDM & CCK are all different.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 04/23/2012 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+ enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 final_bb_swing_idx[1];
+ u8 pwr_tracking_limit = 26; /*+1.0dB*/
+ u8 tx_rate = 0xFF;
+ s8 final_ofdm_swing_index = 0;
+
+ if(rtldm->tx_rate != 0xFF)
+ tx_rate = rtl8812ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+ if(tx_rate != 0xFF) { /*20130429 Mimic Modify High Rate BBSwing Limit.*/
+ /*CCK*/
+ if((tx_rate >= MGN_1M) && (tx_rate <= MGN_11M))
+ pwr_tracking_limit = 32; /*+4dB*/
+ /*OFDM*/
+ else if((tx_rate >= MGN_6M) && (tx_rate <= MGN_48M))
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if(tx_rate == MGN_54M)
+ pwr_tracking_limit = 28; /*+2dB*/
+ /*HT*/
+ else if((tx_rate >= MGN_MCS0) && (tx_rate <= MGN_MCS2)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_MCS3) && (tx_rate <= MGN_MCS4)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_MCS5) && (tx_rate <= MGN_MCS7)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+#if 0
+ else if((tx_rate >= MGN_MCS8) && (tx_rate <= MGN_MCS10)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_MCS11) && (tx_rate <= MGN_MCS12)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_MCS13) && (tx_rate <= MGN_MCS15)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+#endif
+ /*2 VHT*/
+ else if((tx_rate >= MGN_VHT1SS_MCS0) && (tx_rate <= MGN_VHT1SS_MCS2)) /*QPSK/BPSK*/
+ pwr_tracking_limit = 34; /*+5dB*/
+ else if((tx_rate >= MGN_VHT1SS_MCS3) && (tx_rate <= MGN_VHT1SS_MCS4)) /*16QAM*/
+ pwr_tracking_limit = 30; /*+3dB*/
+ else if((tx_rate >= MGN_VHT1SS_MCS5)&&(tx_rate <= MGN_VHT1SS_MCS6)) /*64QAM*/
+ pwr_tracking_limit = 28; /*+2dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS7) /*64QAM*/
+ pwr_tracking_limit = 26; /*+1dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS8) /*256QAM*/
+ pwr_tracking_limit = 24; /*+0dB*/
+ else if(tx_rate == MGN_VHT1SS_MCS9) /*256QAM*/
+ pwr_tracking_limit = 22; /*-1dB*/
+ else
+ pwr_tracking_limit = 24;
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxRate=0x%x, PwrTrackingLimit=%d\n", tx_rate, pwr_tracking_limit));
+
+
+ if (method == BBSWING) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpwr_track_set_pwr\n"));
+
+ if (rf_path == RF90_PATH_A) {
+ final_bb_swing_idx[RF90_PATH_A] =
+ (rtldm->ofdm_index[RF90_PATH_A] > pwr_tracking_limit) ?
+ pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_A];
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d, \
+ pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_A], final_bb_swing_idx[RF90_PATH_A]));
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_A]]);
+ }
+ } else if (method == MIX_MODE) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->DefaultOfdmIndex=%d, \
+ pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
+ rtldm->default_ofdm_index, rtldm->aboslute_ofdm_swing_idx[rf_path],
+ rf_path ));
+
+
+ final_ofdm_swing_index = rtldm->default_ofdm_index + rtldm->aboslute_ofdm_swing_idx[rf_path];
+
+ if (rf_path == RF90_PATH_A) {
+ if(final_ofdm_swing_index > pwr_tracking_limit) { /*BBSwing higher then Limit*/
+
+ rtldm->remnant_cck_idx = final_ofdm_swing_index - pwr_tracking_limit;
+ /* CCK Follow the same compensate value as Path A*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit;
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]);
+
+ rtldm->modify_txagc_flag_path_a = true;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n",
+ pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path]));
+ } else if (final_ofdm_swing_index < 0) {
+ rtldm->remnant_cck_idx = final_ofdm_swing_index;
+ /* CCK Follow the same compensate value as Path A*/
+ rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index;
+
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]);
+
+ rtldm->modify_txagc_flag_path_a = true;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d \n",
+ rtldm->remnant_ofdm_swing_idx[rf_path]));
+ } else {
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n",
+ final_ofdm_swing_index));
+
+ if(rtldm->modify_txagc_flag_path_a) { /*If TxAGC has changed, reset TxAGC again*/
+ rtldm->remnant_cck_idx = 0;
+ rtldm->remnant_ofdm_swing_idx[rf_path] = 0;
+
+ /*Set TxAGC Page C{};*/
+ rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A);
+
+ rtldm->modify_txagc_flag_path_a = false;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE \n"));
+ }
+ }
+ }
+
+ } else {
+ return;
+ }
+}
+
+
+void rtl8821ae_dm_txpower_tracking_callback_thermalmeter
+ (struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0;
+ u8 thermal_value_avg_count = 0;
+ u32 thermal_value_avg = 0;
+
+ u8 ofdm_min_index = 6; /*OFDM BB Swing should be less than +3.0dB, which is required by Arthur*/
+ u8 index_for_channel = 0; /* GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/
+
+ /* 1. The following TWO tables decide the final index of OFDM/CCK swing table.*/
+ u8 *delta_swing_table_idx_tup_a;
+ u8 *delta_swing_table_idx_tdown_a;
+ u8 *delta_swing_table_idx_tup_b;
+ u8 *delta_swing_table_idx_tdown_b;
+
+ /*2. Initialization ( 7 steps in total )*/
+ rtl8821ae_get_delta_swing_table(hw, (u8**)&delta_swing_table_idx_tup_a,
+ (u8**)&delta_swing_table_idx_tdown_a,
+ (u8**)&delta_swing_table_idx_tup_b,
+ (u8**)&delta_swing_table_idx_tdown_b);
+
+ rtldm->btxpower_trackinginit = true;
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter, \
+ \n pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:\
+ %d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ rtldm->bb_swing_idx_cck_base,
+ rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A],
+ rtldm->default_ofdm_index));
+
+ thermal_value = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER_8812A, 0xfc00); /*0x42: RF Reg[15:10] 88E*/
+ if( ! rtldm->txpower_track_control || rtlefuse->eeprom_thermalmeter == 0 ||
+ rtlefuse->eeprom_thermalmeter == 0xFF)
+ return;
+
+
+ /* 3. Initialize ThermalValues of RFCalibrateInfo*/
+
+ if(rtlhal->reloadtxpowerindex)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("reload ofdm index for band switch\n"));
+ }
+
+ /*4. Calculate average thermal meter*/
+ rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value;
+ rtldm->thermalvalue_avg_index++;
+ if(rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8812A)
+ /*Average times = c.AverageThermalNum*/
+ rtldm->thermalvalue_avg_index = 0;
+
+ for(i = 0; i < AVG_THERMAL_NUM_8812A; i++)
+ {
+ if(rtldm->thermalvalue_avg[i])
+ {
+ thermal_value_avg += rtldm->thermalvalue_avg[i];
+ thermal_value_avg_count++;
+ }
+ }
+
+ if(thermal_value_avg_count) /*Calculate Average ThermalValue after average enough times*/
+ {
+ thermal_value = (u8)(thermal_value_avg / thermal_value_avg_count);
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+ }
+
+ /*5. Calculate delta, delta_LCK, delta_IQK.*/
+ /*"delta" here is used to determine whether thermal value changes or not.*/
+ delta = (thermal_value > rtldm->thermalvalue) ? \
+ (thermal_value - rtldm->thermalvalue): \
+ (rtldm->thermalvalue - thermal_value);
+ delta_lck = (thermal_value > rtldm->thermalvalue_lck) ? \
+ (thermal_value - rtldm->thermalvalue_lck) : \
+ (rtldm->thermalvalue_lck - thermal_value);
+ delta_iqk = (thermal_value > rtldm->thermalvalue_iqk) ? \
+ (thermal_value - rtldm->thermalvalue_iqk) : \
+ (rtldm->thermalvalue_iqk - thermal_value);
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
+ delta, delta_lck, delta_iqk));
+
+ /* 6. If necessary, do LCK. */
+
+ if (delta_lck >= IQK_THRESHOLD) /*Delta temperature is equal to or larger than 20 centigrade.*/
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_LCK(%d) >= Threshold_IQK(%d)\n",
+ delta_lck, IQK_THRESHOLD));
+ rtldm->thermalvalue_lck = thermal_value;
+ rtl8821ae_phy_lccalibrate(hw);
+ }
+
+ /*7. If necessary, move the index of swing table to adjust Tx power.*/
+
+ if (delta > 0 && rtldm->txpower_track_control)
+ {
+ /*"delta" here is used to record the absolute value of difference.*/
+ delta = thermal_value > rtlefuse->eeprom_thermalmeter ? \
+ (thermal_value - rtlefuse->eeprom_thermalmeter) : \
+ (rtlefuse->eeprom_thermalmeter - thermal_value);
+
+ if (delta >= TXSCALE_TABLE_SIZE)
+ delta = TXSCALE_TABLE_SIZE - 1;
+
+ /*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/
+
+ if(thermal_value > rtlefuse->eeprom_thermalmeter) {
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_swing_table_idx_tup_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_a[delta]));
+ rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta];
+ /*Record delta swing for mix mode power tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("delta_swing_table_idx_tdown_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_a[delta]));
+
+ rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta];
+
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta];
+ /* Record delta swing for mix mode power tracking*/
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A]));
+ }
+
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("\n\n================================ [Path-%c] \
+ Calculating PowerIndexOffset ================================\n",
+ (p == RF90_PATH_A ? 'A' : 'B')));
+
+ if (rtldm->delta_power_index[p] == rtldm->delta_power_index_last[p])
+ /*If Thermal value changes but lookup table value still the same*/
+ rtldm->power_index_offset[p] = 0;
+ else
+ rtldm->power_index_offset[p] =
+ rtldm->delta_power_index[p] - rtldm->delta_power_index_last[p];
+ /*Power Index Diff between 2 times Power Tracking*/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->power_index_offset[p],
+ rtldm->delta_power_index[p] ,
+ rtldm->delta_power_index_last[p]));
+
+ rtldm->ofdm_index[p] =
+ rtldm->bb_swing_idx_ofdm_base[p] + rtldm->power_index_offset[p];
+ rtldm->cck_index =
+ rtldm->bb_swing_idx_cck_base + rtldm->power_index_offset[p];
+
+ rtldm->bb_swing_idx_cck = rtldm->cck_index;
+ rtldm->bb_swing_idx_ofdm[p] = rtldm->ofdm_index[p];
+
+ /*************Print BB Swing Base and Index Offset*************/
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
+ rtldm->bb_swing_idx_cck,
+ rtldm->bb_swing_idx_cck_base,
+ rtldm->power_index_offset[p]));
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
+ rtldm->bb_swing_idx_ofdm[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->bb_swing_idx_ofdm_base[p],
+ rtldm->power_index_offset[p]));
+
+ /*7.1 Handle boundary conditions of index.*/
+
+
+ if(rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE -1)
+ {
+ rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE -1;
+ }
+ else if (rtldm->ofdm_index[p] < ofdm_min_index)
+ {
+ rtldm->ofdm_index[p] = ofdm_min_index;
+ }
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("\n\n======================================================\
+ ==================================================\n"));
+ if(rtldm->cck_index > TXSCALE_TABLE_SIZE -1)
+ rtldm->cck_index = TXSCALE_TABLE_SIZE -1;
+ else if (rtldm->cck_index < 0)
+ rtldm->cck_index = 0;
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("The thermal meter is unchanged or TxPowerTracking OFF(%d): \
+ ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
+ rtldm->txpower_track_control,
+ thermal_value,
+ rtldm->thermalvalue));
+
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ rtldm->power_index_offset[p] = 0;
+ }
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
+ rtldm->cck_index, rtldm->bb_swing_idx_cck_base)); /*Print Swing base & current*/
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
+ rtldm->ofdm_index[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->bb_swing_idx_ofdm_base[p]));
+ }
+
+ if ((rtldm->power_index_offset[RF90_PATH_A] != 0 ||
+ rtldm->power_index_offset[RF90_PATH_B] != 0 ) &&
+ rtldm->txpower_track_control)
+ {
+ /*7.2 Configure the Swing Table to adjust Tx Power.*/
+ /*Always TRUE after Tx Power is adjusted by power tracking.*/
+ /*
+ 2012/04/23 MH According to Luke's suggestion, we can not write BB digital
+ to increase TX power. Otherwise, EVM will be bad.
+
+ 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E.
+ */
+ if (thermal_value > rtldm->thermalvalue)
+ {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+ } else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue));
+ }
+
+ if (thermal_value > rtlefuse->eeprom_thermalmeter) {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature(%d) higher than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("**********Enter POWER Tracking MIX_MODE**********\n"));
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ rtl8821ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel);
+
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Temperature(%d) lower than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter));
+
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("**********Enter POWER Tracking MIX_MODE**********\n"));
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel);
+
+ }
+
+ rtldm->bb_swing_idx_cck_base = rtldm->bb_swing_idx_cck; /*Record last time Power Tracking result as base.*/
+ for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ rtldm->bb_swing_idx_ofdm_base[p] = rtldm->bb_swing_idx_ofdm[p];
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value));
+
+ rtldm->thermalvalue = thermal_value; /*Record last Power Tracking Thermal Value*/
+
+ }
+ /*Delta temperature is equal to or larger than 20 centigrade (When threshold is 8).*/
+ if ((delta_iqk >= IQK_THRESHOLD)) {
+
+ if ( !rtlphy->b_iqk_in_progress) {
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = true;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+
+ rtl8821ae_do_iqk(hw, delta_iqk, thermal_value, 8);
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = false;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+ }
+ }
+
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n"));
+}
+
+
+void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ static u8 tm_trigger = 0;
+
+ //if (!rtlpriv->dm.btxpower_tracking)
+ // return;
+
+ if (!tm_trigger) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16),
+ 0x03);
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Trigger 8821ae Thermal Meter!!\n"));
+ tm_trigger = 1;
+ return;
+ } else {
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD,
+ ("Schedule TxPowerTracking !!\n"));
+
+ rtl8821ae_dm_txpower_tracking_callback_thermalmeter(hw);
+ tm_trigger = 0;
+ }
+}
+
+
+void rtl8821ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rate_adaptive *p_ra = &(rtlpriv->ra);
+ u32 low_rssithresh_for_ra = p_ra->low2high_rssi_thresh_for_ra;
+ u32 high_rssithresh_for_ra = p_ra->high_rssi_thresh_for_ra;
+ u8 go_up_gap = 5;
+ struct ieee80211_sta *sta = NULL;
+
+ if (is_hal_stop(rtlhal)) {
+ RT_TRACE(COMP_RATE, DBG_LOUD,
+ ("driver is going to unload\n"));
+ return;
+ }
+
+ if (!rtlpriv->dm.b_useramask) {
+ RT_TRACE(COMP_RATE, DBG_LOUD,
+ ("driver does not control rate adaptive mask\n"));
+ return;
+ }
+
+ if (mac->link_state == MAC80211_LINKED &&
+ mac->opmode == NL80211_IFTYPE_STATION) {
+
+ switch (p_ra->pre_ratr_state) {
+ case DM_RATR_STA_MIDDLE:
+ high_rssithresh_for_ra += go_up_gap;
+ break;
+ case DM_RATR_STA_LOW:
+ high_rssithresh_for_ra += go_up_gap;
+ low_rssithresh_for_ra += go_up_gap;
+ break;
+ default:
+ break;
+ }
+
+ if (rtlpriv->dm.undecorated_smoothed_pwdb >
+ (long)high_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_HIGH;
+ else if (rtlpriv->dm.undecorated_smoothed_pwdb >
+ (long)low_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_MIDDLE;
+ else
+ p_ra->ratr_state = DM_RATR_STA_LOW;
+
+ if (p_ra->pre_ratr_state != p_ra->ratr_state ) {
+ RT_TRACE(COMP_RATE, DBG_LOUD,
+ ("RSSI = %ld\n",
+ rtlpriv->dm.undecorated_smoothed_pwdb));
+ RT_TRACE(COMP_RATE, DBG_LOUD,
+ ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
+ RT_TRACE(COMP_RATE, DBG_LOUD,
+ ("PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state));
+
+ rcu_read_lock();
+ sta = rtl_find_sta(hw, mac->bssid);
+ if (sta)
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta, p_ra->ratr_state);
+ rcu_read_unlock();
+
+ p_ra->pre_ratr_state = p_ra->ratr_state;
+ }
+ }
+}
+
+bool rtl8821ae_dm_is_edca_turbo_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->btcoexist.btc_ops->btc_is_disable_edca_turbo(rtlpriv))
+ return true;
+ if (rtlpriv->mac80211.mode == WIRELESS_MODE_B)
+ return true;
+
+ return false;
+}
+
+void rtl8821ae_dm_edca_choose_traffic_idx(
+ struct ieee80211_hw *hw, u64 cur_tx_bytes, u64 cur_rx_bytes, bool b_bias_on_rx,
+ bool *pb_is_cur_rdl_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if(b_bias_on_rx)
+ {
+ if (cur_tx_bytes > (cur_rx_bytes*4)) {
+ *pb_is_cur_rdl_state = false;
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("Uplink Traffic\n "));
+ } else {
+ *pb_is_cur_rdl_state = true;
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("Balance Traffic\n"));
+ }
+ } else {
+ if (cur_rx_bytes > (cur_tx_bytes*4)) {
+ *pb_is_cur_rdl_state = true;
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("Downlink Traffic\n"));
+ } else {
+ *pb_is_cur_rdl_state = false;
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("Balance Traffic\n"));
+ }
+ }
+ return ;
+}
+
+static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ /*Keep past Tx/Rx packet count for RT-to-RT EDCA turbo.*/
+ unsigned long cur_tx_ok_cnt = 0;
+ unsigned long cur_rx_ok_cnt = 0;
+ u32 edca_be_ul = 0x5ea42b;
+ u32 edca_be_dl = 0x5ea42b;
+ u32 edca_be = 0x5ea42b;
+ u8 iot_peer = 0;
+ bool *pb_is_cur_rdl_state = NULL;
+ bool b_last_is_cur_rdl_state = false;
+ bool b_bias_on_rx = false;
+ bool b_edca_turbo_on = false;
+
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("rtl8821ae_dm_check_edca_turbo=====>"));
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("Original BE PARAM: 0x%x\n",
+ rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N)));
+
+ /*===============================
+ list parameter for different platform
+ ===============================*/
+ b_last_is_cur_rdl_state = rtlpriv->dm.bis_cur_rdlstate;
+ pb_is_cur_rdl_state = &( rtlpriv->dm.bis_cur_rdlstate);
+
+ cur_tx_ok_cnt = rtlpriv->stats.txbytesunicast - rtldm->last_tx_ok_cnt;
+ cur_rx_ok_cnt = rtlpriv->stats.rxbytesunicast - rtldm->last_rx_ok_cnt;
+
+ rtldm->last_tx_ok_cnt = rtlpriv->stats.txbytesunicast;
+ rtldm->last_rx_ok_cnt = rtlpriv->stats.rxbytesunicast;
+
+ iot_peer = rtlpriv->mac80211.vendor;
+ b_bias_on_rx = (iot_peer == PEER_RAL || iot_peer == PEER_ATH) ?
+ true : false;
+ b_edca_turbo_on = ((!rtlpriv->dm.bis_any_nonbepkts) &&
+ (!rtlpriv->dm.b_disable_framebursting)) ?
+ true : false;
+
+ /*if (rtl8821ae_dm_is_edca_turbo_disable(hw))
+ goto dm_CheckEdcaTurbo_EXIT;*/
+
+ if ((iot_peer == PEER_CISCO) && (mac->mode == WIRELESS_MODE_N_24G))
+ {
+ edca_be_dl = edca_setting_dl[iot_peer];
+ edca_be_ul = edca_setting_ul[iot_peer];
+ }
+
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("bIsAnyNonBEPkts : 0x%x bDisableFrameBursting : 0x%x \n",
+ rtlpriv->dm.bis_any_nonbepkts, rtlpriv->dm.b_disable_framebursting));
+
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("bEdcaTurboOn : 0x%x bBiasOnRx : 0x%x\n",
+ b_edca_turbo_on, b_bias_on_rx));
+
+ if (b_edca_turbo_on) {
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("curTxOkCnt : 0x%lx \n",cur_tx_ok_cnt));
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("curRxOkCnt : 0x%lx \n",cur_rx_ok_cnt));
+ if(b_bias_on_rx)
+ rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt,
+ cur_rx_ok_cnt, true, pb_is_cur_rdl_state);
+ else
+ rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt,
+ cur_rx_ok_cnt, false, pb_is_cur_rdl_state);
+
+ edca_be = ((*pb_is_cur_rdl_state) == true) ? edca_be_dl : edca_be_ul;
+
+ rtl_write_dword(rtlpriv, DM_REG_EDCA_BE_11N, edca_be);
+
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("EDCA Turbo on: EDCA_BE:0x%x\n", edca_be));
+
+ rtlpriv->dm.bcurrent_turbo_edca = true;
+
+ RT_TRACE(COMP_TURBO, DBG_LOUD,
+ ("EDCA_BE_DL : 0x%x EDCA_BE_UL : 0x%x EDCA_BE : 0x%x \n",
+ edca_be_dl, edca_be_ul, edca_be));
+ } else {
+ if (rtlpriv->dm.bcurrent_turbo_edca) {
+ u8 tmp = AC0_BE;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ (u8 *) (&tmp));
+ }
+ rtlpriv->dm.bcurrent_turbo_edca = false;
+ }
+
+/* dm_CheckEdcaTurbo_EXIT: */
+ rtlpriv->dm.bis_any_nonbepkts = false;
+ rtldm->last_tx_ok_cnt = rtlpriv->stats.txbytesunicast;
+ rtldm->last_rx_ok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl8821ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 cur_cck_cca_thresh;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ /*dm_digtable.rssi_val_min = rtl8821ae_dm_initial_gain_min_pwdb(hw);*/
+ if (dm_digtable.rssi_val_min > 25)
+ cur_cck_cca_thresh = 0xcd;
+ else if ((dm_digtable.rssi_val_min <= 25) && (dm_digtable.rssi_val_min > 10))
+ cur_cck_cca_thresh = 0x83;
+ else {
+ if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+ cur_cck_cca_thresh = 0x83;
+ else
+ cur_cck_cca_thresh = 0x40;
+ }
+
+ } else {
+ if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+ cur_cck_cca_thresh = 0x83;
+ else
+ cur_cck_cca_thresh = 0x40;
+ }
+
+ if (dm_digtable.cur_cck_cca_thres != cur_cck_cca_thresh) {
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh);
+ }
+
+ dm_digtable.pre_cck_cca_thres = dm_digtable.cur_cck_cca_thres;
+ dm_digtable.cur_cck_cca_thres = cur_cck_cca_thresh;
+ RT_TRACE(COMP_DIG, DBG_TRACE,
+ ("CCK cca thresh hold =%x\n", dm_digtable.cur_cck_cca_thres));
+
+}
+
+void rtl8821ae_dm_dynamic_edcca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool b_fw_current_in_ps_mode = false;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw,HW_VAR_FW_PSMODE_STATUS, \
+ (u8*)(&b_fw_current_in_ps_mode));
+ if (b_fw_current_in_ps_mode)
+ return;
+}
+
+void rtl8812ae_dm_update_txpath(struct ieee80211_hw *hw, u8 path)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtldm->resp_tx_path != path) {
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("Need to Update Tx Path\n"));
+ if (path == RF90_PATH_A) {
+ /*Tx by Reg*/
+ rtl_set_bbreg(hw, 0x80c, 0xFFF0, 0x111);
+ /*Resp Tx by Txinfo*/
+ rtl_set_bbreg(hw, 0x6d8, BIT(7) | BIT(6), 1);
+ } else {
+ /*Tx by Reg*/
+ rtl_set_bbreg(hw, 0x80c, 0xFFF0, 0x222);
+ /*Resp Tx by Txinfo*/
+ rtl_set_bbreg(hw, 0x6d8, BIT(7) |BIT(6), 2);
+ }
+ }
+ rtldm->resp_tx_path = path;
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("Path=%s\n",(path == RF90_PATH_A) ? \
+ "RF90_PATH_A":"RF90_PATH_A"));
+}
+
+void rtl8812ae_dm_path_diversity_init(struct ieee80211_hw *hw)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ //rtl_set_bbreg(hw, 0x80c , BIT(29), 1); /*Tx path from Reg*/
+ rtl_set_bbreg(hw, 0x80c , 0xFFF0, 0x111); /*Tx by Reg*/
+ rtl_set_bbreg(hw, 0x6d8 , BIT(7) | BIT(6), 1); /*Resp Tx by Txinfo*/
+ rtl8812ae_dm_update_txpath(hw, RF90_PATH_A);
+
+ rtldm->path_sel = 1; /* TxInfo default at path-A*/
+}
+
+void rtl812ae_dm_set_txpath_by_txinfo(struct ieee80211_hw *hw,
+ u8 *pdesc)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ SET_TX_DESC_TX_ANT(pdesc, rtldm->path_sel);
+}
+
+void rtl8812ae_dm_path_statistics(struct ieee80211_hw *hw,
+ u32 rssi_a, u32 rssi_b)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ rtldm->patha_sum += rssi_a;
+ rtldm->patha_cnt ++;
+
+ rtldm->pathb_sum += rssi_b;
+ rtldm->pathb_cnt ++;
+}
+
+void rtl8812ae_dm_path_diversity(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 rssi_avg_a = 0;
+ u32 rssi_avg_b = 0;
+ u32 local_min_rssi = 0;
+ u32 min_rssi = 0xFF;
+ u8 tx_resp_path=0, target_path;
+ struct ieee80211_sta *sta = NULL;
+
+ sta = rtl_find_sta(hw, mac->bssid);
+ if (sta) {
+ /*Caculate RSSI per Path*/
+ rssi_avg_a = (rtldm->patha_cnt != 0) ? \
+ (rtldm->patha_sum / rtldm->patha_cnt) : 0;
+ rssi_avg_b = (rtldm->pathb_cnt != 0) ? \
+ (rtldm->pathb_sum / rtldm->pathb_cnt) : 0;
+
+ target_path = (rssi_avg_a == rssi_avg_b) ? rtldm->resp_tx_path : \
+ ((rssi_avg_a>=rssi_avg_b) ? RF90_PATH_A : RF90_PATH_B);
+
+ RT_TRACE(COMP_DIG, DBG_TRACE, \
+ ("assoc_id=%d, PathA_Sum=%d, PathA_Cnt=%d\n", \
+ mac->assoc_id, rtldm->patha_sum, rtldm->patha_cnt));
+ RT_TRACE(COMP_DIG, DBG_TRACE, \
+ ("assoc_id=%d, PathB_Sum=%d, PathB_Cnt=%d\n", \
+ mac->assoc_id, rtldm->pathb_sum, rtldm->pathb_cnt));
+ RT_TRACE(COMP_DIG, DBG_TRACE, \
+ ("assoc_id=%d, RssiAvgA= %d, RssiAvgB= %d\n", \
+ mac->assoc_id, rssi_avg_a, rssi_avg_b));
+
+ /*Select Resp Tx Path*/
+ local_min_rssi = (rssi_avg_a > rssi_avg_b) ? rssi_avg_b : rssi_avg_a;
+ if(local_min_rssi < min_rssi)
+ {
+ min_rssi = local_min_rssi;
+ tx_resp_path = target_path;
+ }
+
+ /*Select Tx DESC*/
+ if(target_path == RF90_PATH_A)
+ rtldm->path_sel = 1;
+ else
+ rtldm->path_sel = 2;
+
+ RT_TRACE(COMP_DIG, DBG_TRACE, \
+ ("Tx from TxInfo, TargetPath=%s\n", \
+ (target_path==RF90_PATH_A) ? \
+ "ODM_RF_PATH_A":"ODM_RF_PATH_B"));
+ RT_TRACE(COMP_DIG, DBG_TRACE, \
+ ("pDM_PathDiv->PathSel= %d\n", \
+ rtldm->path_sel));
+ }
+ rtldm->patha_cnt = 0;
+ rtldm->patha_sum = 0;
+ rtldm->pathb_cnt = 0;
+ rtldm->pathb_sum = 0;
+}
+
+void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 crystal_cap;
+ u32 packet_count;
+ int cfo_khz_a,cfo_khz_b,cfo_ave = 0, adjust_xtal = 0;
+ int cfo_ave_diff;
+
+ if (rtlpriv->mac80211.link_state < MAC80211_LINKED){
+ /*1.Enable ATC*/
+ if (rtldm->atc_status == ATC_STATUS_OFF)
+ {
+ rtl_set_bbreg(hw, RFC_AREA, BIT(14), ATC_STATUS_ON);
+ rtldm->atc_status = ATC_STATUS_ON;
+ }
+
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): No link!!\n"));
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): atc_status = %d\n", \
+ rtldm->atc_status));
+
+ if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap)
+ {
+ rtldm->crystal_cap = rtlpriv->efuse.crystalcap;
+ crystal_cap = rtldm->crystal_cap & 0x3f;
+ crystal_cap = crystal_cap & 0x3f;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, \
+ 0x7ff80000, (crystal_cap | (crystal_cap << 6)));
+ }
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): crystal_cap = 0x%x\n", \
+ rtldm->crystal_cap));
+ }else{
+ /*1. Calculate CFO for path-A & path-B*/
+ cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280;
+ cfo_khz_b = (int)(rtldm->cfo_tail[1] * 3125) / 1280;
+ packet_count = rtldm->packet_count;
+
+ /*2.No new packet*/
+ if (packet_count == rtldm->packet_count_pre) {
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): packet counter doesn't change\n"));
+ return;
+ }
+
+ rtldm->packet_count_pre = packet_count;
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): packet counter = %d\n", \
+ rtldm->packet_count));
+
+ /*3.Average CFO*/
+ if (rtlpriv->phy.rf_type == RF_1T1R)
+ cfo_ave = cfo_khz_a;
+ else
+ cfo_ave = (cfo_khz_a + cfo_khz_b) >> 1;
+
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch():"
+ "cfo_khz_a = %dkHz, cfo_khz_b = %dkHz, cfo_ave = %dkHz\n",
+ cfo_khz_a, cfo_khz_b, cfo_ave));
+
+ /*4.Avoid abnormal large CFO*/
+ cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave)?
+ (rtldm->cfo_ave_pre - cfo_ave):
+ (cfo_ave - rtldm->cfo_ave_pre);
+
+ if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0){
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): first large CFO hit\n"));
+ rtldm->large_cfo_hit = 1;
+ return;
+ }
+ else
+ rtldm->large_cfo_hit = 0;
+
+ rtldm->cfo_ave_pre = cfo_ave;
+
+ /*CFO tracking by adjusting Xtal cap.*/
+
+ /*1.Dynamic Xtal threshold*/
+ if (cfo_ave >= -rtldm->cfo_threshold &&
+ cfo_ave <= rtldm->cfo_threshold &&
+ rtldm->is_freeze == 0){
+ if (rtldm->cfo_threshold == CFO_THRESHOLD_XTAL){
+ rtldm->cfo_threshold = CFO_THRESHOLD_XTAL + 10;
+ rtldm->is_freeze = 1;
+ }
+ else
+ rtldm->cfo_threshold = CFO_THRESHOLD_XTAL;
+ }
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): Dynamic threshold = %d\n", \
+ rtldm->cfo_threshold));
+
+ /* 2.Calculate Xtal offset*/
+ if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f)
+ adjust_xtal = ((cfo_ave - CFO_THRESHOLD_XTAL) >> 2) + 1;
+ else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) && rtlpriv->dm.crystal_cap > 0)
+ adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 2) - 1;
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): "
+ "Crystal cap = 0x%x, Crystal cap offset = %d\n",
+ rtldm->crystal_cap, adjust_xtal));
+
+ /*3.Adjust Crystal Cap.*/
+ if (adjust_xtal != 0){
+ rtldm->is_freeze = 0;
+ rtldm->crystal_cap += adjust_xtal;
+
+ if (rtldm->crystal_cap > 0x3f)
+ rtldm->crystal_cap = 0x3f;
+ else if (rtldm->crystal_cap < 0)
+ rtldm->crystal_cap = 0;
+
+ crystal_cap = rtldm->crystal_cap & 0x3f;
+ crystal_cap = crystal_cap & 0x3f;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, \
+ 0x7ff80000, (crystal_cap | (crystal_cap << 6)));
+ RT_TRACE(COMP_DIG, DBG_LOUD, \
+ ("rtl8821ae_dm_dynamic_atc_switch(): New crystal cap = 0x%x \n", \
+ rtldm->crystal_cap));
+ }
+ }
+
+}
+
+void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool b_fw_current_inpsmode = false;
+ bool b_fw_ps_awake = true;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inpsmode));
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+ (u8 *) (&b_fw_ps_awake));
+
+ if(ppsc->p2p_ps_info.p2p_ps_mode)
+ b_fw_ps_awake = false;
+
+ if((ppsc->rfpwr_state == ERFON) &&
+ ((!b_fw_current_inpsmode) && b_fw_ps_awake) &&
+ (!ppsc->rfchange_inprogress)) {
+ rtl8821ae_dm_common_info_self_update(hw);
+ rtl8821ae_dm_false_alarm_counter_statistics(hw);
+ rtl8821ae_dm_check_rssi_monitor(hw);
+ rtl8821ae_dm_dig(hw);
+ rtl8821ae_dm_dynamic_edcca(hw);
+ rtl8821ae_dm_cck_packet_detection_thresh(hw);
+ rtl8821ae_dm_refresh_rate_adaptive_mask(hw);
+ rtl8821ae_dm_check_edca_turbo(hw);
+ rtl8821ae_dm_dynamic_atc_switch(hw);
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_dm_check_txpower_tracking_thermalmeter(hw);
+ else
+ rtl8821ae_dm_check_txpower_tracking_thermalmeter(hw);
+ rtl8821ae_dm_iq_calibrate(hw);
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv);
+ }
+ }
+
+ rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0;
+}
+
+void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
+ u8 *pdesc, u32 mac_id)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ struct fast_ant_trainning *pfat_table= &(rtldm->fat_table);
+
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8812AE)
+ return;
+
+ if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) ||
+ (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)){
+ SET_TX_DESC_TX_ANT(pdesc, pfat_table->antsel_a[mac_id]);
+ }
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/dm.h b/drivers/staging/rtl8821ae/rtl8821ae/dm.h
new file mode 100644
index 000000000000..ebbff9b6cacf
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/dm.h
@@ -0,0 +1,426 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_DM_H__
+#define __RTL8821AE_DM_H__
+
+#define MAIN_ANT 0
+#define AUX_ANT 1
+#define MAIN_ANT_CG_TRX 1
+#define AUX_ANT_CG_TRX 0
+#define MAIN_ANT_CGCS_RX 0
+#define AUX_ANT_CGCS_RX 1
+
+#define TXSCALE_TABLE_SIZE 37
+
+/*RF REG LIST*/
+#define DM_REG_RF_MODE_11N 0x00
+#define DM_REG_RF_0B_11N 0x0B
+#define DM_REG_CHNBW_11N 0x18
+#define DM_REG_T_METER_11N 0x24
+#define DM_REG_RF_25_11N 0x25
+#define DM_REG_RF_26_11N 0x26
+#define DM_REG_RF_27_11N 0x27
+#define DM_REG_RF_2B_11N 0x2B
+#define DM_REG_RF_2C_11N 0x2C
+#define DM_REG_RXRF_A3_11N 0x3C
+#define DM_REG_T_METER_92D_11N 0x42
+#define DM_REG_T_METER_88E_11N 0x42
+
+
+
+/*BB REG LIST*/
+/*PAGE 8 */
+#define DM_REG_BB_CTRL_11N 0x800
+#define DM_REG_RF_PIN_11N 0x804
+#define DM_REG_PSD_CTRL_11N 0x808
+#define DM_REG_TX_ANT_CTRL_11N 0x80C
+#define DM_REG_BB_PWR_SAV5_11N 0x818
+#define DM_REG_CCK_RPT_FORMAT_11N 0x824
+#define DM_REG_RX_DEFUALT_A_11N 0x858
+#define DM_REG_RX_DEFUALT_B_11N 0x85A
+#define DM_REG_BB_PWR_SAV3_11N 0x85C
+#define DM_REG_ANTSEL_CTRL_11N 0x860
+#define DM_REG_RX_ANT_CTRL_11N 0x864
+#define DM_REG_PIN_CTRL_11N 0x870
+#define DM_REG_BB_PWR_SAV1_11N 0x874
+#define DM_REG_ANTSEL_PATH_11N 0x878
+#define DM_REG_BB_3WIRE_11N 0x88C
+#define DM_REG_SC_CNT_11N 0x8C4
+#define DM_REG_PSD_DATA_11N 0x8B4
+/*PAGE 9*/
+#define DM_REG_ANT_MAPPING1_11N 0x914
+#define DM_REG_ANT_MAPPING2_11N 0x918
+/*PAGE A*/
+#define DM_REG_CCK_ANTDIV_PARA1_11N 0xA00
+#define DM_REG_CCK_CCA_11N 0xA0A
+#define DM_REG_CCK_CCA_11AC 0xA0A
+#define DM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
+#define DM_REG_CCK_ANTDIV_PARA3_11N 0xA10
+#define DM_REG_CCK_ANTDIV_PARA4_11N 0xA14
+#define DM_REG_CCK_FILTER_PARA1_11N 0xA22
+#define DM_REG_CCK_FILTER_PARA2_11N 0xA23
+#define DM_REG_CCK_FILTER_PARA3_11N 0xA24
+#define DM_REG_CCK_FILTER_PARA4_11N 0xA25
+#define DM_REG_CCK_FILTER_PARA5_11N 0xA26
+#define DM_REG_CCK_FILTER_PARA6_11N 0xA27
+#define DM_REG_CCK_FILTER_PARA7_11N 0xA28
+#define DM_REG_CCK_FILTER_PARA8_11N 0xA29
+#define DM_REG_CCK_FA_RST_11N 0xA2C
+#define DM_REG_CCK_FA_MSB_11N 0xA58
+#define DM_REG_CCK_FA_LSB_11N 0xA5C
+#define DM_REG_CCK_CCA_CNT_11N 0xA60
+#define DM_REG_BB_PWR_SAV4_11N 0xA74
+/*PAGE B */
+#define DM_REG_LNA_SWITCH_11N 0xB2C
+#define DM_REG_PATH_SWITCH_11N 0xB30
+#define DM_REG_RSSI_CTRL_11N 0xB38
+#define DM_REG_CONFIG_ANTA_11N 0xB68
+#define DM_REG_RSSI_BT_11N 0xB9C
+/*PAGE C */
+#define DM_REG_OFDM_FA_HOLDC_11N 0xC00
+#define DM_REG_RX_PATH_11N 0xC04
+#define DM_REG_TRMUX_11N 0xC08
+#define DM_REG_OFDM_FA_RSTC_11N 0xC0C
+#define DM_REG_RXIQI_MATRIX_11N 0xC14
+#define DM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
+#define DM_REG_IGI_A_11N 0xC50
+#define DM_REG_IGI_A_11AC 0xC50
+#define DM_REG_ANTDIV_PARA2_11N 0xC54
+#define DM_REG_IGI_B_11N 0xC58
+#define DM_REG_IGI_B_11AC 0xE50
+#define DM_REG_ANTDIV_PARA3_11N 0xC5C
+#define DM_REG_BB_PWR_SAV2_11N 0xC70
+#define DM_REG_RX_OFF_11N 0xC7C
+#define DM_REG_TXIQK_MATRIXA_11N 0xC80
+#define DM_REG_TXIQK_MATRIXB_11N 0xC88
+#define DM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
+#define DM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
+#define DM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
+#define DM_REG_ANTDIV_PARA1_11N 0xCA4
+#define DM_REG_OFDM_FA_TYPE1_11N 0xCF0
+/*PAGE D */
+#define DM_REG_OFDM_FA_RSTD_11N 0xD00
+#define DM_REG_OFDM_FA_TYPE2_11N 0xDA0
+#define DM_REG_OFDM_FA_TYPE3_11N 0xDA4
+#define DM_REG_OFDM_FA_TYPE4_11N 0xDA8
+/*PAGE E */
+#define DM_REG_TXAGC_A_6_18_11N 0xE00
+#define DM_REG_TXAGC_A_24_54_11N 0xE04
+#define DM_REG_TXAGC_A_1_MCS32_11N 0xE08
+#define DM_REG_TXAGC_A_MCS0_3_11N 0xE10
+#define DM_REG_TXAGC_A_MCS4_7_11N 0xE14
+#define DM_REG_TXAGC_A_MCS8_11_11N 0xE18
+#define DM_REG_TXAGC_A_MCS12_15_11N 0xE1C
+#define DM_REG_FPGA0_IQK_11N 0xE28
+#define DM_REG_TXIQK_TONE_A_11N 0xE30
+#define DM_REG_RXIQK_TONE_A_11N 0xE34
+#define DM_REG_TXIQK_PI_A_11N 0xE38
+#define DM_REG_RXIQK_PI_A_11N 0xE3C
+#define DM_REG_TXIQK_11N 0xE40
+#define DM_REG_RXIQK_11N 0xE44
+#define DM_REG_IQK_AGC_PTS_11N 0xE48
+#define DM_REG_IQK_AGC_RSP_11N 0xE4C
+#define DM_REG_BLUETOOTH_11N 0xE6C
+#define DM_REG_RX_WAIT_CCA_11N 0xE70
+#define DM_REG_TX_CCK_RFON_11N 0xE74
+#define DM_REG_TX_CCK_BBON_11N 0xE78
+#define DM_REG_OFDM_RFON_11N 0xE7C
+#define DM_REG_OFDM_BBON_11N 0xE80
+#define DM_REG_TX2RX_11N 0xE84
+#define DM_REG_TX2TX_11N 0xE88
+#define DM_REG_RX_CCK_11N 0xE8C
+#define DM_REG_RX_OFDM_11N 0xED0
+#define DM_REG_RX_WAIT_RIFS_11N 0xED4
+#define DM_REG_RX2RX_11N 0xED8
+#define DM_REG_STANDBY_11N 0xEDC
+#define DM_REG_SLEEP_11N 0xEE0
+#define DM_REG_PMPD_ANAEN_11N 0xEEC
+
+
+/*MAC REG LIST*/
+#define DM_REG_BB_RST_11N 0x02
+#define DM_REG_ANTSEL_PIN_11N 0x4C
+#define DM_REG_EARLY_MODE_11N 0x4D0
+#define DM_REG_RSSI_MONITOR_11N 0x4FE
+#define DM_REG_EDCA_VO_11N 0x500
+#define DM_REG_EDCA_VI_11N 0x504
+#define DM_REG_EDCA_BE_11N 0x508
+#define DM_REG_EDCA_BK_11N 0x50C
+#define DM_REG_TXPAUSE_11N 0x522
+#define DM_REG_RESP_TX_11N 0x6D8
+#define DM_REG_ANT_TRAIN_PARA1_11N 0x7b0
+#define DM_REG_ANT_TRAIN_PARA2_11N 0x7b4
+
+
+/*DIG Related*/
+#define DM_BIT_IGI_11N 0x0000007F
+#define DM_BIT_IGI_11AC 0xFFFFFFFF
+
+
+
+#define HAL_DM_DIG_DISABLE BIT(0)
+#define HAL_DM_HIPWR_DISABLE BIT(1)
+
+#define OFDM_TABLE_LENGTH 43
+#define CCK_TABLE_LENGTH 33
+
+#define OFDM_TABLE_SIZE 37
+#define CCK_TABLE_SIZE 33
+
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_FALSEALARM_THRESH_LOW 400
+#define DM_FALSEALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX 0x3e
+#define DM_DIG_MIN 0x1e
+
+#define DM_DIG_MAX_AP 0x32
+#define DM_DIG_MIN_AP 0x20
+
+#define DM_DIG_FA_UPPER 0x3e
+#define DM_DIG_FA_LOWER 0x1e
+#define DM_DIG_FA_TH0 0x200
+#define DM_DIG_FA_TH1 0x300
+#define DM_DIG_FA_TH2 0x400
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+#define RXPATHSELECTION_SS_TH_lOW 30
+#define RXPATHSELECTION_DIFF_TH 18
+
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+#define CTS2SELF_THVAL 30
+#define REGC38_TH 20
+
+#define WAIOTTHVal 25
+
+#define TXHIGHPWRLEVEL_NORMAL 0
+#define TXHIGHPWRLEVEL_LEVEL1 1
+#define TXHIGHPWRLEVEL_LEVEL2 2
+#define TXHIGHPWRLEVEL_BT1 3
+#define TXHIGHPWRLEVEL_BT2 4
+
+#define DM_TYPE_BYFW 0
+#define DM_TYPE_BYDRIVER 1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define TXPWRTRACK_MAX_IDX 6
+
+/* Dynamic ATC switch */
+#define ATC_STATUS_OFF 0x0 /* enable */
+#define ATC_STATUS_ON 0x1 /* disable */
+#define CFO_THRESHOLD_XTAL 10 /* kHz */
+#define CFO_THRESHOLD_ATC 80 /* kHz */
+
+#define AVG_THERMAL_NUM_8812A 4
+#define TXPWR_TRACK_TABLE_SIZE 30
+#define MAX_PATH_NUM_8812A 2
+#define MAX_PATH_NUM_8821A 1
+
+
+struct ps_t {
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+ u8 pre_rfstate;
+ u8 cur_rfstate;
+ u8 initialize;
+ long rssi_val_min;
+
+};
+
+struct dig_t {
+ u8 dig_enable_flag;
+ u8 dig_ext_port_stage;
+ u32 rssi_lowthresh;
+ u32 rssi_highthresh;
+
+ u32 fa_lowthresh;
+ u32 fa_highthresh;
+
+ u8 cursta_connectctate;
+ u8 presta_connectstate;
+ u8 curmultista_connectstate;
+
+ u8 pre_igvalue;
+ u8 cur_igvalue;
+ u8 bt30_cur_igi;
+ u8 backup_igvalue;
+ u8 stop_dig;
+
+ char backoff_val;
+ char backoff_val_range_max;
+ char backoff_val_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 rssi_val_min;
+
+ u8 pre_cck_cca_thres;
+ u8 cur_cck_cca_thres;
+ u8 pre_cck_pd_state;
+ u8 cur_cck_pd_state;
+
+ u8 large_fa_hit;
+ u8 forbidden_igi;
+ u32 recover_cnt;
+
+ u8 dig_dynamic_min_0;
+ u8 dig_dynamic_min_1;
+ bool b_media_connect_0;
+ bool b_media_connect_1;
+
+ u32 antdiv_rssi_max;
+ u32 rssi_max;
+};
+
+
+enum FAT_STATE {
+ FAT_NORMAL_STATE = 0,
+ FAT_TRAINING_STATE = 1,
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+ DIG_TYPE_THRESH_HIGH = 0,
+ DIG_TYPE_THRESH_LOW = 1,
+ DIG_TYPE_BACKOFF = 2,
+ DIG_TYPE_RX_GAIN_MIN = 3,
+ DIG_TYPE_RX_GAIN_MAX = 4,
+ DIG_TYPE_ENABLE = 5,
+ DIG_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+};
+
+enum tag_cck_packet_detection_threshold_type_definition {
+ CCK_PD_STAGE_LowRssi = 0,
+ CCK_PD_STAGE_HighRssi = 1,
+ CCK_FA_STAGE_Low = 2,
+ CCK_FA_STAGE_High = 3,
+ CCK_PD_STAGE_MAX = 4,
+};
+
+enum dm_1r_cca_e {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf_e {
+ RF_SAVE = 0,
+ RF_NORMAL = 1,
+ RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch_e {
+ ANS_ANTENNA_B = 1,
+ ANS_ANTENNA_A = 2,
+ ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+ DIG_EXT_PORT_STAGE_0 = 0,
+ DIG_EXT_PORT_STAGE_1 = 1,
+ DIG_EXT_PORT_STAGE_2 = 2,
+ DIG_EXT_PORT_STAGE_3 = 3,
+ DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+ DIG_STA_DISCONNECT = 0,
+ DIG_STA_CONNECT = 1,
+ DIG_STA_BEFORE_CONNECT = 2,
+ DIG_MULTISTA_DISCONNECT = 3,
+ DIG_MULTISTA_CONNECT = 4,
+ DIG_CONNECT_MAX
+};
+
+enum pwr_track_control_method {
+ BBSWING,
+ TXAGC,
+ MIX_MODE
+};
+
+#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
+#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
+#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
+#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
+#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
+#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \
+ (((struct rtl_priv *)(_priv))->mac80211.opmode == NL80211_IFTYPE_ADHOC)? \
+ (((struct rtl_priv *)(_priv))->dm.entry_min_undecoratedsmoothed_pwdb): \
+ (((struct rtl_priv *)(_priv))->dm.undecorated_smoothed_pwdb)
+
+extern struct dig_t dm_digtable;
+void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
+ u8 *pdesc, u32 mac_id);
+void rtl8821ae_dm_ant_sel_statistics(struct ieee80211_hw *hw,
+ u8 antsel_tr_mux, u32 mac_id,
+ u32 rx_pwdb_all);
+void rtl8821ae_dm_fast_antenna_trainning_callback(unsigned long data);
+void rtl8821ae_dm_init(struct ieee80211_hw *hw);
+void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw);
+void rtl8821ae_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi);
+void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw);
+void rtl8821ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl8821ae_dm_txpower_track_adjust(struct ieee80211_hw *hw,
+ u8 type,u8 *pdirection,
+ u32 *poutwrite_val);
+void rtl8821ae_dm_clear_txpower_tracking_state(struct ieee80211_hw *hw);
+void rtl8821ae_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 current_cca);
+void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(struct ieee80211_hw *hw);
+void rtl8812ae_dm_path_diversity(struct ieee80211_hw *hw);
+void rtl8812ae_dm_path_diversity_init(struct ieee80211_hw *hw);
+void rtl8812ae_dm_path_statistics(struct ieee80211_hw *hw,
+ u32 rssi_a, u32 rssi_b);
+void rtl812ae_dm_set_txpath_by_txinfo(struct ieee80211_hw *hw,
+ u8 *pdesc);
+void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+ enum pwr_track_control_method method,
+ u8 rf_path,
+ u8 channel_mapped_index);
+void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
+ enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index);
+
+void rtl8812ae_dm_update_init_rate(struct ieee80211_hw *hw, u8 rate);
+u8 rtl8812ae_hw_rate_to_mrate(struct ieee80211_hw *hw, u8 rate);
+void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw);
+void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/fw.c b/drivers/staging/rtl8821ae/rtl8821ae/fw.c
new file mode 100644
index 000000000000..46eb4125d18f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/fw.c
@@ -0,0 +1,1349 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "fw.h"
+#include "dm.h"
+
+static void _rtl8821ae_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp;
+
+ if (enable) {
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x05);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ //printk("0x80=%02x.\n",tmp);
+ } else {
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ //printk("0x80=%02x.\n",tmp);
+ }
+
+}
+
+static void _rtl8821ae_fw_block_write(struct ieee80211_hw *hw,
+ const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 blockSize = sizeof(u32);
+ u8 *bufferPtr = (u8 *) buffer;
+ u32 *pu4BytePtr = (u32 *) buffer;
+ u32 i, offset, blockCount, remainSize;
+
+ blockCount = size / blockSize;
+ remainSize = size % blockSize;
+
+ for (i = 0; i < blockCount; i++) {
+ offset = i * blockSize;
+ rtl_write_dword(rtlpriv, (FW_8821AE_START_ADDRESS + offset),
+ *(pu4BytePtr + i));
+ }
+
+ if (remainSize) {
+ offset = blockCount * blockSize;
+ bufferPtr += offset;
+ for (i = 0; i < remainSize; i++) {
+ rtl_write_byte(rtlpriv, (FW_8821AE_START_ADDRESS +
+ offset + i), *(bufferPtr + i));
+ }
+ }
+}
+
+static void _rtl8821ae_fw_page_write(struct ieee80211_hw *hw,
+ u32 page, const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value8;
+ u8 u8page = (u8) (page & 0x07);
+
+ value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+
+ rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+ _rtl8821ae_fw_block_write(hw, buffer, size);
+}
+
+static void _rtl8821ae_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
+{
+ u32 fwlen = *pfwlen;
+ u8 remain = (u8) (fwlen % 4);
+
+ remain = (remain == 0) ? 0 : (4 - remain);
+
+ while (remain > 0) {
+ pfwbuf[fwlen] = 0;
+ fwlen++;
+ remain--;
+ }
+
+ *pfwlen = fwlen;
+}
+
+static void _rtl8821ae_write_fw(struct ieee80211_hw *hw,
+ enum version_8821ae version,
+ u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 *bufferPtr = (u8 *) buffer;
+ u32 pageNums, remainSize;
+ u32 page, offset;
+
+ RT_TRACE(COMP_FW, DBG_LOUD, ("FW size is %d bytes,\n", size));
+
+ _rtl8821ae_fill_dummy(bufferPtr, &size);
+
+ pageNums = size / FW_8821AE_PAGE_SIZE;
+ remainSize = size % FW_8821AE_PAGE_SIZE;
+
+ if (pageNums > 8) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Page numbers should not greater then 8\n"));
+ }
+
+ for (page = 0; page < pageNums; page++) {
+ offset = page * FW_8821AE_PAGE_SIZE;
+ _rtl8821ae_fw_page_write(hw, page, (bufferPtr + offset),
+ FW_8821AE_PAGE_SIZE);
+ }
+
+ if (remainSize) {
+ offset = pageNums * FW_8821AE_PAGE_SIZE;
+ page = pageNums;
+ _rtl8821ae_fw_page_write(hw, page, (bufferPtr + offset),
+ remainSize);
+ }
+
+}
+
+static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err = -EIO;
+ u32 counter = 0;
+ u32 value32;
+
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ } while ((counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT) &&
+ (!(value32 & FWDL_CHKSUM_RPT)));
+
+ if (counter >= FW_8821AE_POLLING_TIMEOUT_COUNT) {
+ RT_TRACE(COMP_ERR, DBG_LOUD,
+ ("chksum report fail ! REG_MCUFWDL:0x%08x .\n",
+ value32));
+ goto exit;
+ }
+
+ RT_TRACE(COMP_FW, DBG_EMERG,
+ ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32));
+
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ value32 |= MCUFWDL_RDY;
+ value32 &= ~WINTINI_RDY;
+ rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+
+ rtl8821ae_firmware_selfreset(hw);
+
+ counter = 0;
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ if (value32 & WINTINI_RDY) {
+ RT_TRACE(COMP_FW, DBG_LOUD,
+ ("Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
+ value32));
+ err = 0;
+ goto exit;
+ }
+
+ udelay(FW_8821AE_POLLING_DELAY);
+
+ } while (counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT);
+
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32));
+
+exit:
+ return err;
+}
+
+int rtl8821ae_download_fw(struct ieee80211_hw *hw,
+ bool buse_wake_on_wlan_fw
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl8821a_firmware_header *pfwheader;
+ u8 *pfwdata;
+ u32 fwsize;
+ int err;
+ enum version_8821ae version = rtlhal->version;
+
+ if(!rtlhal->pfirmware)
+ return 1;
+
+ pfwheader = (struct rtl8821a_firmware_header *)rtlhal->pfirmware;
+ pfwdata = (u8 *) rtlhal->pfirmware;
+ fwsize = rtlhal->fwsize;
+ RT_TRACE(COMP_FW, DBG_DMESG,
+ ("normal Firmware SIZE %d \n",fwsize));
+
+ if (IS_FW_HEADER_EXIST_8812(pfwheader) || IS_FW_HEADER_EXIST_8821(pfwheader)) {
+ RT_TRACE(COMP_FW, DBG_DMESG,
+ ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtl8821a_firmware_header)));
+
+ pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header);
+ fwsize = fwsize - sizeof(struct rtl8821a_firmware_header);
+ }
+
+ if(rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)){
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+ rtl8821ae_firmware_selfreset(hw);
+ }
+ _rtl8821ae_enable_fw_download(hw, true);
+ _rtl8821ae_write_fw(hw, version, pfwdata, fwsize);
+ _rtl8821ae_enable_fw_download(hw, false);
+
+ err = _rtl8821ae_fw_free_to_go(hw);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Firmware is not ready to run!\n"));
+ } else {
+ RT_TRACE(COMP_FW, DBG_LOUD,
+ ("Firmware is ready to run!\n"));
+ }
+
+ return 0;
+}
+
+static bool _rtl8821ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val_hmetfr;
+ bool result = false;
+
+ val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+ if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
+ result = true;
+ return result;
+}
+
+static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 boxnum =0;
+ u16 box_reg = 0, box_extreg = 0;
+ u8 u1b_tmp = 0;
+ bool isfw_read = false;
+ u8 buf_index = 0;
+ bool bwrite_sucess = false;
+ u8 wait_h2c_limmit = 100;
+ /*u8 wait_writeh2c_limmit = 100;*/
+ u8 boxcontent[4], boxextcontent[4];
+ u32 h2c_waitcounter = 0;
+ unsigned long flag =0;
+ u8 idx =0;
+
+ RT_TRACE(COMP_CMD, DBG_LOUD, ("come in\n"));
+
+ while (true) {
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ if (rtlhal->b_h2c_setinprogress) {
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("H2C set in progress! Wait to set.."
+ "element_id(%d).\n", element_id));
+
+ while (rtlhal->b_h2c_setinprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+ flag);
+ h2c_waitcounter++;
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("Wait 100 us (%d times)...\n",
+ h2c_waitcounter));
+ udelay(100);
+
+ if (h2c_waitcounter > 1000)
+ return;
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+ flag);
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ } else {
+ rtlhal->b_h2c_setinprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ break;
+ }
+ }
+
+ while (!bwrite_sucess) {
+ /*cosa remove this because never reach this.*/
+#if 0
+ wait_writeh2c_limmit--;
+ if (wait_writeh2c_limmit == 0) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Write H2C fail because no trigger "
+ "for FW INT!\n"));
+ break;
+ }
+#endif
+
+ boxnum = rtlhal->last_hmeboxnum;
+ switch (boxnum) {
+ case 0:
+ box_reg = REG_HMEBOX_0;
+ box_extreg = REG_HMEBOX_EXT_0;
+ break;
+ case 1:
+ box_reg = REG_HMEBOX_1;
+ box_extreg = REG_HMEBOX_EXT_1;
+ break;
+ case 2:
+ box_reg = REG_HMEBOX_2;
+ box_extreg = REG_HMEBOX_EXT_2;
+ break;
+ case 3:
+ box_reg = REG_HMEBOX_3;
+ box_extreg = REG_HMEBOX_EXT_3;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+
+ isfw_read = false;
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_CR);
+
+ if (u1b_tmp != 0xEA)
+ isfw_read = true;
+ else {
+ if( rtl_read_byte(rtlpriv, REG_TXDMA_STATUS) == 0xEA ||
+ rtl_read_byte(rtlpriv, REG_TXPKT_EMPTY) == 0xEA)
+ rtl_write_byte(rtlpriv, REG_SYS_CFG1 + 3, 0xFF);
+ }
+
+ if (isfw_read == true) {
+ wait_h2c_limmit = 100;
+ isfw_read = _rtl8821ae_check_fw_read_last_h2c(hw, boxnum);
+ while (!isfw_read) {
+ /*wait until Fw read*/
+ wait_h2c_limmit--;
+ if (wait_h2c_limmit == 0) {
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("Waiting too long for FW read "
+ "clear HMEBox(%d)!\n", boxnum));
+ break;
+ }
+
+ udelay(10);
+
+ isfw_read = _rtl8821ae_check_fw_read_last_h2c(hw, boxnum);
+ u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("Waiting for FW read clear HMEBox(%d)!!! "
+ "0x130 = %2x\n", boxnum, u1b_tmp));
+ }
+ }
+
+ if (!isfw_read) {
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("Write H2C register BOX[%d] fail!!!!! "
+ "Fw do not read. \n", boxnum));
+ break;
+ }
+
+ memset(boxcontent, 0, sizeof(boxcontent));
+ memset(boxextcontent, 0, sizeof(boxextcontent));
+ boxcontent[0] = element_id;
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("Write element_id box_reg(%4x) = %2x \n",
+ box_reg, element_id));
+
+ switch (cmd_len) {
+ case 1:
+ case 2:
+ case 3:
+ /*boxcontent[0] &= ~(BIT(7));*/
+ memcpy((u8 *) (boxcontent) + 1,
+ p_cmdbuffer + buf_index, cmd_len);
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ /*boxcontent[0] |= (BIT(7));*/
+ memcpy((u8 *) (boxextcontent),
+ p_cmdbuffer + buf_index+3, cmd_len-3);
+ memcpy((u8 *) (boxcontent) + 1,
+ p_cmdbuffer + buf_index, 3);
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_extreg + idx,
+ boxextcontent[idx]);
+ }
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+
+ bwrite_sucess = true;
+
+ rtlhal->last_hmeboxnum = boxnum + 1;
+ if (rtlhal->last_hmeboxnum == 4)
+ rtlhal->last_hmeboxnum = 0;
+
+ RT_TRACE(COMP_CMD, DBG_LOUD,
+ ("pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum));
+ }
+
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ rtlhal->b_h2c_setinprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+
+ RT_TRACE(COMP_CMD, DBG_LOUD, ("go out\n"));
+}
+
+void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 tmp_cmdbuf[2];
+
+ if (rtlhal->bfw_ready == false) {
+ RT_ASSERT(false, ("return H2C cmd because of Fw "
+ "download fail!!!\n"));
+ return;
+ }
+
+ memset(tmp_cmdbuf, 0, 8);
+ memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
+ _rtl8821ae_fill_h2c_command(hw, element_id, cmd_len, (u8 *) & tmp_cmdbuf);
+
+ return;
+}
+
+void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw)
+{
+ u8 u1b_tmp;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3))));
+ }else {
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(0))));
+ }
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2))));
+ udelay(50);
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(3)));
+ }else {
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(0)));
+ }
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp | BIT(2)));
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, (" _8051Reset8812ae(): 8051 reset success .\n"));
+
+}
+
+void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 u1_h2c_set_pwrmode[H2C_8821AE_PWEMODE_LENGTH] = { 0 };
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 rlbm,power_state = 0;
+ RT_TRACE(COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode));
+
+ SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
+ rlbm = 0;/*YJ,temp,120316. FW now not support RLBM=2.*/
+ SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
+ SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, (rtlpriv->mac80211.p2p) ? ppsc->smart_ps : 1);
+ SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode, ppsc->reg_max_lps_awakeintvl);
+ SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
+ if(mode == FW_PS_ACTIVE_MODE)
+ {
+ power_state |= FW_PWR_STATE_ACTIVE;
+ }
+ else
+ {
+ power_state |= FW_PWR_STATE_RF_OFF;
+ }
+ SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode \n",
+ u1_h2c_set_pwrmode, H2C_8821AE_PWEMODE_LENGTH);
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_SETPWRMODE, H2C_8821AE_PWEMODE_LENGTH, u1_h2c_set_pwrmode);
+
+}
+
+void rtl8821ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+ u8 u1_joinbssrpt_parm[1] = { 0 };
+
+ SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
+
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_JOINBSSRPT, 1, u1_joinbssrpt_parm);
+}
+
+void rtl8821ae_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, u8 ap_offload_enable)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 u1_apoffload_parm[H2C_8821AE_AP_OFFLOAD_LENGTH] = { 0 };
+
+ SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable);
+ SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->bhiddenssid);
+ SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0);
+
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AP_OFFLOAD, H2C_8821AE_AP_OFFLOAD_LENGTH, u1_apoffload_parm);
+
+}
+
+static bool _rtl8821ae_cmd_send_packet(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ u8 own;
+ unsigned long flags;
+ struct sk_buff *pskb = NULL;
+
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+ pdesc = &ring->desc[0];
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
+
+ rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
+
+ __skb_queue_tail(&ring->queue, skb);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+ return true;
+}
+
+#define BEACON_PG 0 /* ->1 */
+#define PSPOLL_PG 2
+#define NULL_PG 3
+#define PROBERSP_PG 4 /* ->5 */
+
+#define BEACON_PG_8812 0
+#define PSPOLL_PG_8812 1
+#define NULL_PG_8812 2
+#define PROBERSP_PG_8812 3
+
+#define BEACON_PG_8821 0
+#define PSPOLL_PG_8821 1
+#define NULL_PG_8821 2
+#define PROBERSP_PG_8821 3
+
+#define TOTAL_RESERVED_PKT_LEN_8812 2048
+#define TOTAL_RESERVED_PKT_LEN_8821 1024
+
+
+static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
+ /* page 0 */
+ 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64,
+ 0x40, 0x16, 0x9f, 0x23, 0xd4, 0x46, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x20, 0x04, 0x00, 0x06, 0x64, 0x6c,
+ 0x69, 0x6e, 0x6b, 0x31, 0x01, 0x08, 0x82, 0x84,
+ 0x8b, 0x96, 0x0c, 0x18, 0x30, 0x48, 0x03, 0x01,
+ 0x0b, 0x06, 0x02, 0x00, 0x00, 0x2a, 0x01, 0x8b,
+ 0x32, 0x04, 0x12, 0x24, 0x60, 0x6c, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x28, 0x8c, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* page 1 */
+ 0xa4, 0x10, 0x01, 0xc0, 0x40, 0x16, 0x9f, 0x23,
+ 0xd4, 0x46, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x28, 0x8c, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* page 2 */
+ 0x48, 0x01, 0x00, 0x00, 0x40, 0x16, 0x9f, 0x23,
+ 0xd4, 0x46, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64,
+ 0x40, 0x16, 0x9f, 0x23, 0xd4, 0x46, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1a, 0x00, 0x28, 0x8c, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* page 3 */
+ 0xc8, 0x01, 0x00, 0x00, 0x40, 0x16, 0x9f, 0x23,
+ 0xd4, 0x46, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64,
+ 0x40, 0x16, 0x9f, 0x23, 0xd4, 0x46, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+
+static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
+ 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5,
+ 0xE0, 0x46, 0x9A, 0x57, 0x71, 0x30, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x30, 0x04, 0x00, 0x0C, 0x4E, 0x45,
+ 0x54, 0x47, 0x45, 0x41, 0x52, 0x5F, 0x31, 0x35,
+ 0x30, 0x4E, 0x01, 0x08, 0x82, 0x84, 0x8B, 0x96,
+ 0x0C, 0x12, 0x18, 0x24, 0x03, 0x01, 0x03, 0x06,
+ 0x02, 0x00, 0x00, 0x2A, 0x01, 0x8A, 0x32, 0x04,
+ 0x30, 0x48, 0x60, 0x6C, 0xDD, 0x18, 0x00, 0x50,
+ 0xF2, 0x01, 0x01, 0x00, 0x00, 0x50, 0xF2, 0x02,
+ 0x01, 0x00, 0x00, 0x50, 0xF2, 0x02, 0x01, 0x00,
+ 0x00, 0x50, 0xF2, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xA4, 0x10, 0x02, 0xC0, 0xE0, 0x46, 0x9A, 0x57,
+ 0x71, 0x30, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x01, 0x00, 0x00, 0xE0, 0x46, 0x9A, 0x57,
+ 0x71, 0x30, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5,
+ 0xE0, 0x46, 0x9A, 0x57, 0x71, 0x30, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xC8, 0x01, 0x00, 0x00, 0xE0, 0x46, 0x9A, 0x57,
+ 0x71, 0x30, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5,
+ 0xE0, 0x46, 0x9A, 0x57, 0x71, 0x30, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+
+ u32 totalpacketlen;
+ bool rtstatus;
+ u8 u1RsvdPageLoc[5] = { 0 };
+ bool b_dlok = false;
+
+ u8* beacon;
+ u8* p_pspoll;
+ u8* nullfunc;
+ u8* p_probersp;
+ /*---------------------------------------------------------
+ (1) beacon
+ ---------------------------------------------------------*/
+ beacon = &reserved_page_packet_8812[BEACON_PG_8812 * 512];
+ SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+ /*-------------------------------------------------------
+ (2) ps-poll
+ --------------------------------------------------------*/
+ p_pspoll = &reserved_page_packet_8812[PSPOLL_PG_8812 * 512];
+ SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+ SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+ SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG_8812);
+
+ /*--------------------------------------------------------
+ (3) null data
+ ---------------------------------------------------------*/
+ nullfunc = &reserved_page_packet_8812[NULL_PG_8812* 512];
+ SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+ SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG_8812);
+
+ /*---------------------------------------------------------
+ (4) probe response
+ ----------------------------------------------------------*/
+ p_probersp = &reserved_page_packet_8812[PROBERSP_PG_8812 * 512];
+ SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+ SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG_8812);
+
+ totalpacketlen = TOTAL_RESERVED_PKT_LEN_8812;
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+ &reserved_page_packet_8812[0], totalpacketlen);
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+ u1RsvdPageLoc, 3);
+
+
+ skb = dev_alloc_skb(totalpacketlen);
+ memcpy((u8 *) skb_put(skb, totalpacketlen),
+ &reserved_page_packet_8812, totalpacketlen);
+
+ rtstatus = _rtl8821ae_cmd_send_packet(hw, skb);
+
+ if (rtstatus)
+ b_dlok = true;
+
+ if (b_dlok) {
+ RT_TRACE(COMP_POWER, DBG_LOUD,
+ ("Set RSVD page location to Fw.\n"));
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "H2C_RSVDPAGE:\n",
+ u1RsvdPageLoc, 3);
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE,
+ sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+ } else
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
+}
+
+void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+
+ u32 totalpacketlen;
+ bool rtstatus;
+ u8 u1RsvdPageLoc[5] = { 0 };
+ bool b_dlok = false;
+
+ u8* beacon;
+ u8* p_pspoll;
+ u8* nullfunc;
+ u8* p_probersp;
+ /*---------------------------------------------------------
+ (1) beacon
+ ---------------------------------------------------------*/
+ beacon = &reserved_page_packet_8821[BEACON_PG_8821 * 256];
+ SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+ /*-------------------------------------------------------
+ (2) ps-poll
+ --------------------------------------------------------*/
+ p_pspoll = &reserved_page_packet_8821[PSPOLL_PG_8821 * 256];
+ SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+ SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+ SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG_8821);
+
+ /*--------------------------------------------------------
+ (3) null data
+ ---------------------------------------------------------*/
+ nullfunc = &reserved_page_packet_8821[NULL_PG_8821 * 256];
+ SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+ SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG_8821);
+
+ /*---------------------------------------------------------
+ (4) probe response
+ ----------------------------------------------------------*/
+ p_probersp = &reserved_page_packet_8821[PROBERSP_PG_8821 * 256];
+ SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+ SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG_8821);
+
+ totalpacketlen = TOTAL_RESERVED_PKT_LEN_8821;
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+ &reserved_page_packet_8821[0], totalpacketlen);
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n",
+ u1RsvdPageLoc, 3);
+
+
+ skb = dev_alloc_skb(totalpacketlen);
+ memcpy((u8 *) skb_put(skb, totalpacketlen),
+ &reserved_page_packet_8821, totalpacketlen);
+
+ rtstatus = _rtl8821ae_cmd_send_packet(hw, skb);
+
+ if (rtstatus)
+ b_dlok = true;
+
+ if (b_dlok) {
+ RT_TRACE(COMP_POWER, DBG_LOUD,
+ ("Set RSVD page location to Fw.\n"));
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "H2C_RSVDPAGE:\n",
+ u1RsvdPageLoc, 3);
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE,
+ sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+ } else
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
+}
+
+/*Should check FW support p2p or not.*/
+void rtl8821ae_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow)
+{
+ u8 u1_ctwindow_period[1] ={ ctwindow};
+
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_P2P_PS_CTW_CMD, 1, u1_ctwindow_period);
+
+}
+
+void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info);
+ struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload;
+ u8 i;
+ u16 ctwindow;
+ u32 start_time, tsf_low;
+
+ switch(p2p_ps_state)
+ {
+ case P2P_PS_DISABLE:
+ RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_DISABLE \n"));
+ memset(p2p_ps_offload, 0, 1);
+ break;
+ case P2P_PS_ENABLE:
+ RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_ENABLE \n"));
+ /* update CTWindow value. */
+ if( p2pinfo->ctwindow > 0 )
+ {
+ p2p_ps_offload->CTWindow_En = 1;
+ ctwindow = p2pinfo->ctwindow;
+ rtl8821ae_set_p2p_ctw_period_cmd(hw, ctwindow);
+ }
+
+ /* hw only support 2 set of NoA */
+ for( i=0 ; i<p2pinfo->noa_num ; i++)
+ {
+ /* To control the register setting for which NOA*/
+ rtl_write_byte(rtlpriv, 0x5cf, (i << 4));
+ if(i == 0)
+ p2p_ps_offload->NoA0_En = 1;
+ else
+ p2p_ps_offload->NoA1_En = 1;
+
+ /* config P2P NoA Descriptor Register */
+ rtl_write_dword(rtlpriv, 0x5E0, p2pinfo->noa_duration[i]);
+ rtl_write_dword(rtlpriv, 0x5E4, p2pinfo->noa_interval[i]);
+
+ /*Get Current TSF value */
+ tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+ start_time = p2pinfo->noa_start_time[i];
+ if(p2pinfo->noa_count_type[i] != 1)
+ {
+ while( start_time <= (tsf_low+(50*1024) ) ) {
+ start_time += p2pinfo->noa_interval[i];
+ if(p2pinfo->noa_count_type[i] != 255)
+ p2pinfo->noa_count_type[i]--;
+ }
+ }
+ rtl_write_dword(rtlpriv, 0x5E8, start_time);
+ rtl_write_dword(rtlpriv, 0x5EC, p2pinfo->noa_count_type[i] );
+
+ }
+
+ if( (p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0) )
+ {
+ /* rst p2p circuit */
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
+
+ p2p_ps_offload->Offload_En = 1;
+
+ if(P2P_ROLE_GO == rtlpriv->mac80211.p2p)
+ {
+ p2p_ps_offload->role= 1;
+ p2p_ps_offload->AllStaSleep = 0;
+ }
+ else
+ {
+ p2p_ps_offload->role= 0;
+ }
+
+ p2p_ps_offload->discovery = 0;
+ }
+ break;
+ case P2P_PS_SCAN:
+ RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_SCAN \n"));
+ p2p_ps_offload->discovery = 1;
+ break;
+ case P2P_PS_SCAN_DONE:
+ RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_SCAN_DONE \n"));
+ p2p_ps_offload->discovery = 0;
+ p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
+ break;
+ default:
+ break;
+ }
+
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload);
+
+}
+
+void rtl8812ae_c2h_ra_report_handler(
+ struct ieee80211_hw *hw,
+ u8 *cmd_buf,
+ u8 cmd_len
+)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 rate = cmd_buf[0] & 0x3F;
+
+ rtlhal->current_ra_rate= rtl8812ae_hw_rate_to_mrate(hw, rate);
+
+ rtl8812ae_dm_update_init_rate(hw, rate);
+}
+
+
+void _rtl8812ae_c2h_content_parsing(
+ struct ieee80211_hw *hw,
+ u8 c2h_cmd_id,
+ u8 c2h_cmd_len,
+ u8 *tmp_buf
+)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (c2h_cmd_id) {
+ case C2H_8812_DBG:
+ RT_TRACE(COMP_FW, DBG_LOUD,("[C2H], C2H_8812_DBG!!\n"));
+ break;
+
+ case C2H_8812_RA_RPT:
+ rtl8812ae_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len);
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+void rtl8812ae_c2h_packet_handler(
+ struct ieee80211_hw *hw,
+ u8 *buffer,
+ u8 length
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 c2h_cmd_id=0, c2h_cmd_seq=0, c2h_cmd_len=0;
+ u8 *tmp_buf=NULL;
+
+ c2h_cmd_id = buffer[0];
+ c2h_cmd_seq = buffer[1];
+ c2h_cmd_len = length -2;
+ tmp_buf = buffer + 2;
+
+ RT_TRACE(COMP_FW, DBG_LOUD,
+ ("[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n",
+ c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len));
+
+ RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD,
+ "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len);
+ _rtl8812ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+}
+
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/fw.h b/drivers/staging/rtl8821ae/rtl8821ae/fw.h
new file mode 100644
index 000000000000..30eec880026c
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/fw.h
@@ -0,0 +1,321 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE__FW__H__
+#define __RTL8821AE__FW__H__
+
+#define FW_8821AE_SIZE 0x8000
+#define FW_8821AE_START_ADDRESS 0x1000
+#define FW_8821AE_END_ADDRESS 0x5FFF
+#define FW_8821AE_PAGE_SIZE 4096
+#define FW_8821AE_POLLING_DELAY 5
+#define FW_8821AE_POLLING_TIMEOUT_COUNT 6000
+
+#define IS_FW_HEADER_EXIST_8812(_pfwhdr) \
+ ((_pfwhdr->signature&0xFFF0) == 0x9500 )
+
+#define IS_FW_HEADER_EXIST_8821(_pfwhdr) \
+ ((_pfwhdr->signature&0xFFF0) == 0x2100 )
+
+#define USE_OLD_WOWLAN_DEBUG_FW 0
+
+#define H2C_8821AE_RSVDPAGE_LOC_LEN 5
+#define H2C_8821AE_PWEMODE_LENGTH 5
+#define H2C_8821AE_JOINBSSRPT_LENGTH 1
+#define H2C_8821AE_AP_OFFLOAD_LENGTH 3
+#define H2C_8821AE_WOWLAN_LENGTH 3
+#define H2C_8821AE_KEEP_ALIVE_CTRL_LENGTH 3
+#if(USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define H2C_8821AE_REMOTE_WAKE_CTRL_LEN 1
+#else
+#define H2C_8821AE_REMOTE_WAKE_CTRL_LEN 3
+#endif
+#define H2C_8821AE_AOAC_GLOBAL_INFO_LEN 2
+#define H2C_8821AE_AOAC_RSVDPAGE_LOC_LEN 7
+
+
+/* Fw PS state for RPWM.
+*BIT[2:0] = HW state
+*BIT[3] = Protocol PS state, 1: register active state , 0: register sleep state
+*BIT[4] = sub-state
+*/
+#define FW_PS_GO_ON BIT(0)
+#define FW_PS_TX_NULL BIT(1)
+#define FW_PS_RF_ON BIT(2)
+#define FW_PS_REGISTER_ACTIVE BIT(3)
+
+#define FW_PS_DPS BIT(0)
+#define FW_PS_LCLK (FW_PS_DPS)
+#define FW_PS_RF_OFF BIT(1)
+#define FW_PS_ALL_ON BIT(2)
+#define FW_PS_ST_ACTIVE BIT(3)
+#define FW_PS_ISR_ENABLE BIT(4)
+#define FW_PS_IMR_ENABLE BIT(5)
+
+
+#define FW_PS_ACK BIT(6)
+#define FW_PS_TOGGLE BIT(7)
+
+ /* 8821AE RPWM value*/
+ /* BIT[0] = 1: 32k, 0: 40M*/
+#define FW_PS_CLOCK_OFF BIT(0) /* 32k*/
+#define FW_PS_CLOCK_ON 0 /*40M*/
+
+#define FW_PS_STATE_MASK (0x0F)
+#define FW_PS_STATE_HW_MASK (0x07)
+#define FW_PS_STATE_INT_MASK (0x3F) /*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/
+
+#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x))
+#define FW_PS_STATE_HW(x) (FW_PS_STATE_HW_MASK & (x))
+#define FW_PS_STATE_INT(x) (FW_PS_STATE_INT_MASK & (x))
+#define FW_PS_ISR_VAL(x) ((x) & 0x70)
+#define FW_PS_IMR_MASK(x) ((x) & 0xDF)
+#define FW_PS_KEEP_IMR(x) ((x) & 0x20)
+
+
+#define FW_PS_STATE_S0 (FW_PS_DPS)
+#define FW_PS_STATE_S1 (FW_PS_LCLK)
+#define FW_PS_STATE_S2 (FW_PS_RF_OFF)
+#define FW_PS_STATE_S3 (FW_PS_ALL_ON)
+#define FW_PS_STATE_S4 ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON))
+
+#define FW_PS_STATE_ALL_ON_8821AE (FW_PS_CLOCK_ON) /* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/
+#define FW_PS_STATE_RF_ON_8821AE (FW_PS_CLOCK_ON) /* (FW_PS_RF_ON)*/
+#define FW_PS_STATE_RF_OFF_8821AE (FW_PS_CLOCK_ON) /* 0x0*/
+#define FW_PS_STATE_RF_OFF_LOW_PWR_8821AE (FW_PS_CLOCK_OFF) /* (FW_PS_STATE_RF_OFF)*/
+
+#define FW_PS_STATE_ALL_ON_92C (FW_PS_STATE_S4)
+#define FW_PS_STATE_RF_ON_92C (FW_PS_STATE_S3)
+#define FW_PS_STATE_RF_OFF_92C (FW_PS_STATE_S2)
+#define FW_PS_STATE_RF_OFF_LOW_PWR_92C (FW_PS_STATE_S1)
+
+
+/* For 8821AE H2C PwrMode Cmd ID 5.*/
+#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define FW_PWR_STATE_RF_OFF 0
+
+#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK )
+#define FW_PS_IS_CLK_ON(x) ((x) & (FW_PS_RF_OFF |FW_PS_ALL_ON ))
+#define FW_PS_IS_RF_ON(x) ((x) & (FW_PS_ALL_ON))
+#define FW_PS_IS_ACTIVE(x) ((x) & (FW_PS_ST_ACTIVE))
+#define FW_PS_IS_CPWM_INT(x) ((x) & 0x40)
+
+#define FW_CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
+
+#define IS_IN_LOW_POWER_STATE_8821AE(FwPSState) \
+ (FW_PS_STATE(FwPSState) == FW_PS_CLOCK_OFF)
+
+#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define FW_PWR_STATE_RF_OFF 0
+
+struct rtl8821a_firmware_header {
+ u16 signature;
+ u8 category;
+ u8 function;
+ u16 version;
+ u8 subversion;
+ u8 rsvd1;
+ u8 month;
+ u8 date;
+ u8 hour;
+ u8 minute;
+ u16 ramcodeSize;
+ u16 rsvd2;
+ u32 svnindex;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+};
+
+enum rtl8812_c2h_evt{
+ C2H_8812_DBG = 0,
+ C2H_8812_LB = 1,
+ C2H_8812_TXBF = 2,
+ C2H_8812_TX_REPORT = 3,
+ C2H_8812_BT_INFO = 9,
+ C2H_8812_BT_MP = 11,
+ C2H_8812_RA_RPT=12,
+
+ C2H_8812_FW_SWCHNL = 0x10,
+ C2H_8812_IQK_FINISH = 0x11,
+ MAX_8812_C2HEVENT
+};
+
+enum rtl8821a_h2c_cmd {
+ H2C_8821AE_RSVDPAGE = 0,
+ H2C_8821AE_JOINBSSRPT = 1,
+ H2C_8821AE_SCAN = 2,
+ H2C_8821AE_KEEP_ALIVE_CTRL = 3,
+ H2C_8821AE_DISCONNECT_DECISION = 4,
+#if(USE_OLD_WOWLAN_DEBUG_FW == 1)
+ H2C_8821AE_WO_WLAN = 5,
+#endif
+ H2C_8821AE_INIT_OFFLOAD = 6,
+#if(USE_OLD_WOWLAN_DEBUG_FW == 1)
+ H2C_8821AE_REMOTE_WAKE_CTRL = 7,
+#endif
+ H2C_8821AE_AP_OFFLOAD = 8,
+ H2C_8821AE_BCN_RSVDPAGE = 9,
+ H2C_8821AE_PROBERSP_RSVDPAGE = 10,
+
+ H2C_8821AE_SETPWRMODE = 0x20,
+ H2C_8821AE_PS_TUNING_PARA = 0x21,
+ H2C_8821AE_PS_TUNING_PARA2 = 0x22,
+ H2C_8821AE_PS_LPS_PARA = 0x23,
+ H2C_8821AE_P2P_PS_OFFLOAD = 024,
+
+#if(USE_OLD_WOWLAN_DEBUG_FW == 0)
+ H2C_8821AE_WO_WLAN = 0x80,
+ H2C_8821AE_REMOTE_WAKE_CTRL = 0x81,
+ H2C_8821AE_AOAC_GLOBAL_INFO = 0x82,
+ H2C_8821AE_AOAC_RSVDPAGE = 0x83,
+#endif
+ H2C_RSSI_REPORT = 0x42,
+ H2C_8821AE_RA_MASK = 0x40,
+ H2C_8821AE_SELECTIVE_SUSPEND_ROF_CMD,
+ H2C_8821AE_P2P_PS_MODE,
+ H2C_8821AE_PSD_RESULT,
+ /*Not defined CTW CMD for P2P yet*/
+ H2C_8821AE_P2P_PS_CTW_CMD,
+ MAX_8821AE_H2CCMD
+};
+
+#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1:0))
+
+#define SET_8821AE_H2CCMD_WOWLAN_FUNC_ENABLE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 3, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_ALL_PKT_DROP(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 4, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_GPIO_ACTIVE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 5, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_REKEY_WAKE_UP(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 6, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 7, 1, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_GPIONUM(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_WOWLAN_GPIO_DURATION(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+
+
+#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_RLBM(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 4, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 4, 4, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value)
+#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+4, 0, 8, __Value)
+#define GET_8821AE_H2CCMD_PWRMODE_PARM_MODE(__pH2CCmd) \
+ LE_BITS_TO_1BYTE(__pH2CCmd, 0, 8)
+
+#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+/* AP_OFFLOAD */
+#define SET_H2CCMD_AP_OFFLOAD_ON(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value)
+#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value)
+
+/* Keep Alive Control*/
+#define SET_8821AE_H2CCMD_KEEP_ALIVE_ENABLE(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_8821AE_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8821AE_H2CCMD_KEEP_ALIVE_PERIOD(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+
+/*REMOTE_WAKE_CTRL */
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_EN(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#if(USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 3, 1, __Value)
+#else
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_PAIRWISE_ENC_ALG(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#endif
+
+/* GTK_OFFLOAD */
+#define SET_8821AE_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+
+/* AOAC_RSVDPAGE_LOC */
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd), 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value)
+#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__pH2CCmd, __Value) \
+ SET_BITS_TO_LE_1BYTE((__pH2CCmd)+4, 0, 8, __Value)
+
+int rtl8821ae_download_fw(struct ieee80211_hw *hw,
+ bool buse_wake_on_wlan_fw);
+void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
+void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl8821ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void rtl8821ae_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, u8 ap_offload_enable);
+void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
+void rtl8812ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 length);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c
new file mode 100644
index 000000000000..8bee772d766f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c
@@ -0,0 +1,519 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "hal_bt_coexist.h"
+#include "../pci.h"
+#include "dm.h"
+#include "fw.h"
+#include "phy.h"
+#include "reg.h"
+#include "hal_btc.h"
+
+static bool bt_operation_on = false;
+
+void rtl8821ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw, bool b_reject)
+{
+#if 0
+ struct rtl_priv rtlpriv = rtl_priv(hw);
+ PRX_TS_RECORD pRxTs = NULL;
+
+ if(b_reject){
+ // Do not allow receiving A-MPDU aggregation.
+ if (rtlpriv->mac80211.vendor == PEER_CISCO) {
+ if (pHTInfo->bAcceptAddbaReq) {
+ RTPRINT(FBT, BT_TRACE, ("BT_Disallow AMPDU \n"));
+ pHTInfo->bAcceptAddbaReq = FALSE;
+ if(GetTs(Adapter, (PTS_COMMON_INFO*)(&pRxTs), pMgntInfo->Bssid, 0, RX_DIR, FALSE))
+ TsInitDelBA(Adapter, (PTS_COMMON_INFO)pRxTs, RX_DIR);
+ }
+ } else {
+ if (!pHTInfo->bAcceptAddbaReq) {
+ RTPRINT(FBT, BT_TRACE, ("BT_Allow AMPDU BT Idle\n"));
+ pHTInfo->bAcceptAddbaReq = TRUE;
+ }
+ }
+ } else {
+ if(rtlpriv->mac80211.vendor == PEER_CISCO) {
+ if (!pHTInfo->bAcceptAddbaReq) {
+ RTPRINT(FBT, BT_TRACE, ("BT_Allow AMPDU \n"));
+ pHTInfo->bAcceptAddbaReq = TRUE;
+ }
+ }
+ }
+#endif
+}
+
+void _rtl8821ae_dm_bt_check_wifi_state(struct ieee80211_hw *hw)
+{
+struct rtl_priv *rtlpriv = rtl_priv(hw);
+struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+if (rtlpriv->link_info.b_busytraffic) {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_IDLE;
+
+ if(rtlpriv->link_info.b_tx_busy_traffic) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_UPLINK;
+ } else {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_UPLINK;
+ }
+
+ if(rtlpriv->link_info.b_rx_busy_traffic) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_DOWNLINK;
+ } else {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_DOWNLINK;
+ }
+} else {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_IDLE;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_UPLINK;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_DOWNLINK;
+}
+
+if (rtlpriv->mac80211.mode == WIRELESS_MODE_G
+ || rtlpriv->mac80211.mode == WIRELESS_MODE_B) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_LEGACY;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT20;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT40;
+} else {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_LEGACY;
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_HT40;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT20;
+ } else {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_HT20;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT40;
+ }
+}
+
+if (bt_operation_on) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT30;
+} else {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT30;
+}
+}
+
+
+u8 rtl8821ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
+ u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ long undecoratedsmoothed_pwdb = 0;
+ u8 bt_rssi_state = 0;
+
+ undecoratedsmoothed_pwdb = rtl8821ae_dm_bt_get_rx_ss(hw);
+
+ if(level_num == 2) {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+
+ if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) {
+ if(undecoratedsmoothed_pwdb >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_HIGH;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to High\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n"));
+ }
+ } else {
+ if(undecoratedsmoothed_pwdb < rssi_thresh) {
+ bt_rssi_state = BT_RSSI_STATE_LOW;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at High\n"));
+ }
+ }
+ } else if(level_num == 3) {
+ if(rssi_thresh > rssi_thresh1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 thresh error!!\n"));
+ return rtlpcipriv->btcoexist.bt_pre_rssi_state;
+ }
+
+ if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) {
+ if(undecoratedsmoothed_pwdb >= (rssi_thresh+BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Medium\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n"));
+ }
+ } else if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_MEDIUM) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_MEDIUM)) {
+ if(undecoratedsmoothed_pwdb >= (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_HIGH;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to High\n"));
+ } else if(undecoratedsmoothed_pwdb < rssi_thresh) {
+ bt_rssi_state = BT_RSSI_STATE_LOW;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Medium\n"));
+ }
+ } else {
+ if(undecoratedsmoothed_pwdb < rssi_thresh1) {
+ bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,("[DM][BT], RSSI_1 state switch to Medium\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at High\n"));
+ }
+ }
+ }
+
+ rtlpcipriv->btcoexist.bt_pre_rssi_state1 = bt_rssi_state;
+
+ return bt_rssi_state;
+}
+
+u8 rtl8821ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
+ u8 level_num, u8 rssi_thresh, u8 rssi_thresh1)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ long undecoratedsmoothed_pwdb = 0;
+ u8 bt_rssi_state = 0;
+
+ undecoratedsmoothed_pwdb = rtl8821ae_dm_bt_get_rx_ss(hw);
+
+ if (level_num == 2) {
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+
+ if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)){
+ if (undecoratedsmoothed_pwdb
+ >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to High\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state stay at Low\n"));
+ }
+ } else {
+ if (undecoratedsmoothed_pwdb < rssi_thresh) {
+ bt_rssi_state = BT_RSSI_STATE_LOW;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_LOW;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to Low\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state stay at High\n"));
+ }
+ }
+ }
+ else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI thresh error!!\n"));
+ return rtlpcipriv->btcoexist.bt_pre_rssi_state;
+ }
+ if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) {
+ if(undecoratedsmoothed_pwdb
+ >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to Medium\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state stay at Low\n"));
+ }
+ } else if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_MEDIUM) ||
+ (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_MEDIUM)) {
+ if (undecoratedsmoothed_pwdb
+ >= (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) {
+ bt_rssi_state = BT_RSSI_STATE_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to High\n"));
+ } else if(undecoratedsmoothed_pwdb < rssi_thresh)
+ {
+ bt_rssi_state = BT_RSSI_STATE_LOW;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_LOW;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to Low\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state stay at Medium\n"));
+ }
+ } else {
+ if(undecoratedsmoothed_pwdb < rssi_thresh1) {
+ bt_rssi_state = BT_RSSI_STATE_MEDIUM;
+ rtlpcipriv->btcoexist.current_state
+ |= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
+ rtlpcipriv->btcoexist.current_state
+ &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state switch to Medium\n"));
+ } else {
+ bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], RSSI state stay at High\n"));
+ }
+ }
+ }
+
+ rtlpcipriv->btcoexist.bt_pre_rssi_state = bt_rssi_state;
+ return bt_rssi_state;
+}
+long rtl8821ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ long undecoratedsmoothed_pwdb = 0;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ undecoratedsmoothed_pwdb = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
+ } else {
+ undecoratedsmoothed_pwdb
+ = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_get_rx_ss() = %ld\n", undecoratedsmoothed_pwdb));
+
+ return undecoratedsmoothed_pwdb;
+}
+
+void rtl8821ae_dm_bt_balance(struct ieee80211_hw *hw,
+ bool b_balance_on, u8 ms0, u8 ms1)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[3] ={0};
+
+ if (b_balance_on) {
+ h2c_parameter[2] = 1;
+ h2c_parameter[1] = ms1;
+ h2c_parameter[0] = ms0;
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ } else {
+ h2c_parameter[2] = 0;
+ h2c_parameter[1] = 0;
+ h2c_parameter[0] = 0;
+ }
+ rtlpcipriv->btcoexist.b_balance_on = b_balance_on;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n",
+ b_balance_on?"ON":"OFF", ms0, ms1,
+ h2c_parameter[0]<<16 | h2c_parameter[1]<<8 | h2c_parameter[2]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0xc, 3, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ if (type == BT_AGCTABLE_OFF) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]AGCTable Off!\n"));
+ rtl_write_dword(rtlpriv, 0xc78,0x641c0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x631d0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x621e0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x611f0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x60200001);
+
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0x32000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0x71000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0xb0000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0xfc000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_G1, 0xfffff, 0x30355);
+ } else if (type == BT_AGCTABLE_ON) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]AGCTable On!\n"));
+ rtl_write_dword(rtlpriv, 0xc78,0x4e1c0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x4d1d0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x4c1e0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x4b1f0001);
+ rtl_write_dword(rtlpriv, 0xc78,0x4a200001);
+
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0xdc000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0x90000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0x51000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_AGC_HP, 0xfffff, 0x12000);
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A,
+ RF_RX_G1, 0xfffff, 0x00355);
+
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+ }
+}
+
+void rtl8821ae_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ if (type == BT_BB_BACKOFF_OFF) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]BBBackOffLevel Off!\n"));
+ rtl_write_dword(rtlpriv, 0xc04,0x3a05611);
+ } else if (type == BT_BB_BACKOFF_ON) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]BBBackOffLevel On!\n"));
+ rtl_write_dword(rtlpriv, 0xc04,0x3a07611);
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+ }
+}
+
+void rtl8821ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_fw_coex_all_off()\n"));
+
+ if(rtlpcipriv->btcoexist.b_fw_coexist_all_off)
+ return;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_fw_coex_all_off(), real Do\n"));
+ rtl8821ae_dm_bt_fw_coex_all_off_8723a(hw);
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = true;
+}
+
+void rtl8821ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_sw_coex_all_off()\n"));
+
+ if(rtlpcipriv->btcoexist.b_sw_coexist_all_off)
+ return;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_sw_coex_all_off(), real Do\n"));
+ rtl8821ae_dm_bt_sw_coex_all_off_8723a(hw);
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = true;
+}
+
+void rtl8821ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_hw_coex_all_off()\n"));
+
+ if(rtlpcipriv->btcoexist.b_hw_coexist_all_off)
+ return;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("rtl8821ae_dm_bt_hw_coex_all_off(), real Do\n"));
+
+ rtl8821ae_dm_bt_hw_coex_all_off_8723a(hw);
+
+ rtlpcipriv->btcoexist.b_hw_coexist_all_off = true;
+}
+
+void rtl8821ae_btdm_coex_all_off(struct ieee80211_hw *hw)
+{
+ rtl8821ae_dm_bt_fw_coex_all_off(hw);
+ rtl8821ae_dm_bt_sw_coex_all_off(hw);
+ rtl8821ae_dm_bt_hw_coex_all_off(hw);
+}
+
+bool rtl8821ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ if((rtlpcipriv->btcoexist.previous_state
+ == rtlpcipriv->btcoexist.current_state)
+ && (rtlpcipriv->btcoexist.previous_state_h
+ == rtlpcipriv->btcoexist.current_state_h))
+ return false;
+ else
+ return true;
+}
+
+bool rtl8821ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->link_info.b_tx_busy_traffic)
+ return true;
+ else
+ return false;
+}
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h
new file mode 100644
index 000000000000..b365f82f481c
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h
@@ -0,0 +1,169 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_HAL_BT_COEXIST_H__
+#define __RTL8821AE_HAL_BT_COEXIST_H__
+
+#include "../wifi.h"
+
+/* The reg define is for 8723 */
+#define REG_HIGH_PRIORITY_TXRX 0x770
+#define REG_LOW_PRIORITY_TXRX 0x774
+
+#define BT_FW_COEX_THRESH_TOL 6
+#define BT_FW_COEX_THRESH_20 20
+#define BT_FW_COEX_THRESH_23 23
+#define BT_FW_COEX_THRESH_25 25
+#define BT_FW_COEX_THRESH_30 30
+#define BT_FW_COEX_THRESH_35 35
+#define BT_FW_COEX_THRESH_40 40
+#define BT_FW_COEX_THRESH_45 45
+#define BT_FW_COEX_THRESH_47 47
+#define BT_FW_COEX_THRESH_50 50
+#define BT_FW_COEX_THRESH_55 55
+
+#define BT_COEX_STATE_BT30 BIT(0)
+#define BT_COEX_STATE_WIFI_HT20 BIT(1)
+#define BT_COEX_STATE_WIFI_HT40 BIT(2)
+#define BT_COEX_STATE_WIFI_LEGACY BIT(3)
+
+#define BT_COEX_STATE_WIFI_RSSI_LOW BIT(4)
+#define BT_COEX_STATE_WIFI_RSSI_MEDIUM BIT(5)
+#define BT_COEX_STATE_WIFI_RSSI_HIGH BIT(6)
+#define BT_COEX_STATE_DEC_BT_POWER BIT(7)
+
+#define BT_COEX_STATE_WIFI_IDLE BIT(8)
+#define BT_COEX_STATE_WIFI_UPLINK BIT(9)
+#define BT_COEX_STATE_WIFI_DOWNLINK BIT(10)
+
+#define BT_COEX_STATE_BT_INQ_PAGE BIT(11)
+#define BT_COEX_STATE_BT_IDLE BIT(12)
+#define BT_COEX_STATE_BT_UPLINK BIT(13)
+#define BT_COEX_STATE_BT_DOWNLINK BIT(14)
+
+#define BT_COEX_STATE_HOLD_FOR_BT_OPERATION BIT(15)
+#define BT_COEX_STATE_BT_RSSI_LOW BIT(19)
+
+#define BT_COEX_STATE_PROFILE_HID BIT(20)
+#define BT_COEX_STATE_PROFILE_A2DP BIT(21)
+#define BT_COEX_STATE_PROFILE_PAN BIT(22)
+#define BT_COEX_STATE_PROFILE_SCO BIT(23)
+
+#define BT_COEX_STATE_WIFI_RSSI_1_LOW BIT(24)
+#define BT_COEX_STATE_WIFI_RSSI_1_MEDIUM BIT(25)
+#define BT_COEX_STATE_WIFI_RSSI_1_HIGH BIT(26)
+
+#define BT_COEX_STATE_BTINFO_COMMON BIT(30)
+#define BT_COEX_STATE_BTINFO_B_HID_SCOESCO BIT(31)
+#define BT_COEX_STATE_BTINFO_B_FTP_A2DP BIT(29)
+
+#define BT_COEX_STATE_BT_CNT_LEVEL_0 BIT(0)
+#define BT_COEX_STATE_BT_CNT_LEVEL_1 BIT(1)
+#define BT_COEX_STATE_BT_CNT_LEVEL_2 BIT(2)
+#define BT_COEX_STATE_BT_CNT_LEVEL_3 BIT(3)
+
+#define BT_RSSI_STATE_HIGH 0
+#define BT_RSSI_STATE_MEDIUM 1
+#define BT_RSSI_STATE_LOW 2
+#define BT_RSSI_STATE_STAY_HIGH 3
+#define BT_RSSI_STATE_STAY_MEDIUM 4
+#define BT_RSSI_STATE_STAY_LOW 5
+
+#define BT_AGCTABLE_OFF 0
+#define BT_AGCTABLE_ON 1
+#define BT_BB_BACKOFF_OFF 0
+#define BT_BB_BACKOFF_ON 1
+#define BT_FW_NAV_OFF 0
+#define BT_FW_NAV_ON 1
+
+#define BT_COEX_MECH_NONE 0
+#define BT_COEX_MECH_SCO 1
+#define BT_COEX_MECH_HID 2
+#define BT_COEX_MECH_A2DP 3
+#define BT_COEX_MECH_PAN 4
+#define BT_COEX_MECH_HID_A2DP 5
+#define BT_COEX_MECH_HID_PAN 6
+#define BT_COEX_MECH_PAN_A2DP 7
+#define BT_COEX_MECH_HID_SCO_ESCO 8
+#define BT_COEX_MECH_FTP_A2DP 9
+#define BT_COEX_MECH_COMMON 10
+#define BT_COEX_MECH_MAX 11
+
+#define BT_DBG_PROFILE_NONE 0
+#define BT_DBG_PROFILE_SCO 1
+#define BT_DBG_PROFILE_HID 2
+#define BT_DBG_PROFILE_A2DP 3
+#define BT_DBG_PROFILE_PAN 4
+#define BT_DBG_PROFILE_HID_A2DP 5
+#define BT_DBG_PROFILE_HID_PAN 6
+#define BT_DBG_PROFILE_PAN_A2DP 7
+#define BT_DBG_PROFILE_MAX 9
+
+#define BTINFO_B_FTP BIT(7)
+#define BTINFO_B_A2DP BIT(6)
+#define BTINFO_B_HID BIT(5)
+#define BTINFO_B_SCO_BUSY BIT(4)
+#define BTINFO_B_ACL_BUSY BIT(3)
+#define BTINFO_B_INQ_PAGE BIT(2)
+#define BTINFO_B_SCO_ESCO BIT(1)
+#define BTINFO_B_CONNECTION BIT(0)
+
+
+void rtl8821ae_btdm_coex_all_off(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw);
+
+void rtl8821ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw);
+long rtl8821ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_balance(struct ieee80211_hw *hw,
+ bool b_balance_on, u8 ms0, u8 ms1);
+void rtl8821ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type);
+void rtl8821ae_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type);
+u8 rtl8821ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
+ u8 level_num, u8 rssi_thresh, u8 rssi_thresh1);
+u8 rtl8821ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
+ u8 level_num, u8 rssi_thresh, u8 rssi_thresh1);
+void _rtl8821ae_dm_bt_check_wifi_state(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw,
+ bool b_reject);
+
+#if 0
+VOID
+BTDM_PWDBMonitor(
+ PADAPTER Adapter
+ );
+
+BOOLEAN
+BTDM_DIGByBTRSSI(
+ PADAPTER Adapter
+ );
+#endif
+bool rtl8821ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw);
+bool rtl8821ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw);
+#endif
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c
new file mode 100644
index 000000000000..7b1d113505fb
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c
@@ -0,0 +1,2069 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "hal_btc.h"
+#include "../pci.h"
+#include "phy.h"
+#include "fw.h"
+#include "reg.h"
+#include "def.h"
+#include "../btcoexist/rtl_btc.h"
+
+static struct bt_coexist_8821ae hal_coex_8821ae;
+
+void rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if(!rtlpcipriv->btcoexist.bt_coexistence)
+ return;
+
+ if(ppsc->b_inactiveps) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("[BT][DM], Before enter IPS, turn off all Coexist DM\n"));
+ rtlpcipriv->btcoexist.current_state = 0;
+ rtlpcipriv->btcoexist.previous_state = 0;
+ rtlpcipriv->btcoexist.current_state_h = 0;
+ rtlpcipriv->btcoexist.previous_state_h = 0;
+ rtl8821ae_btdm_coex_all_off(hw);
+ }
+}
+
+
+enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum rt_media_status m_status = RT_MEDIA_DISCONNECT;
+
+ u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+ if(bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ m_status = RT_MEDIA_CONNECT;
+ }
+
+ return m_status;
+}
+
+void rtl_8821ae_bt_wifi_media_status_notify(struct ieee80211_hw *hw, bool mstatus)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 h2c_parameter[3] ={0};
+ u8 chnl;
+
+ if(!rtlpcipriv->btcoexist.bt_coexistence)
+ return;
+
+ if(RT_MEDIA_CONNECT == mstatus)
+ h2c_parameter[0] = 0x1; // 0: disconnected, 1:connected
+ else
+ h2c_parameter[0] = 0x0;
+
+ if(mgnt_link_status_query(hw)) {
+ chnl = rtlphy->current_channel;
+ h2c_parameter[1] = chnl;
+ }
+
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40){
+ h2c_parameter[2] = 0x30;
+ } else {
+ h2c_parameter[2] = 0x20;
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("[BTCoex], FW write 0x19=0x%x\n",
+ h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x19, 3, h2c_parameter);
+
+}
+
+
+bool rtl8821ae_dm_bt_is_wifi_busy(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if(rtlpriv->link_info.b_busytraffic ||
+ rtlpriv->link_info.b_rx_busy_traffic ||
+ rtlpriv->link_info.b_tx_busy_traffic)
+ return true;
+ else
+ return false;
+}
+void rtl8821ae_dm_bt_set_fw_3a(struct ieee80211_hw *hw,
+ u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[5] ={0};
+ h2c_parameter[0] = byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = byte5;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], FW write 0x3a(4bytes)=0x%x%8x\n",
+ h2c_parameter[0], h2c_parameter[1]<<24 | h2c_parameter[2]<<16 | h2c_parameter[3]<<8 | h2c_parameter[4]));
+ rtl8821ae_fill_h2c_cmd(hw, 0x3a, 5, h2c_parameter);
+}
+
+bool rtl8821ae_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Need to decrease bt power\n"));
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_DEC_BT_POWER;
+ return true;
+ }
+
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_DEC_BT_POWER;
+ return false;
+}
+
+
+bool rtl8821ae_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ if ((rtlpcipriv->btcoexist.previous_state
+ == rtlpcipriv->btcoexist.current_state)
+ &&(rtlpcipriv->btcoexist.previous_state_h
+ == rtlpcipriv->btcoexist.current_state_h)) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[DM][BT], Coexist state do not change!!\n"));
+ return true;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[DM][BT], Coexist state changed!!\n"));
+ return false;
+ }
+}
+
+void rtl8821ae_dm_bt_set_coex_table(struct ieee80211_hw *hw,
+ u32 val_0x6c0, u32 val_0x6c8, u32 val_0x6cc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6c0=0x%x\n", val_0x6c0));
+ rtl_write_dword(rtlpriv, 0x6c0, val_0x6c0);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6c8=0x%x\n", val_0x6c8));
+ rtl_write_dword(rtlpriv, 0x6c8, val_0x6c8);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6cc=0x%x\n", val_0x6cc));
+ rtl_write_byte(rtlpriv, 0x6cc, val_0x6cc);
+}
+
+void rtl8821ae_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool b_mode)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (BT_PTA_MODE_ON == b_mode) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("PTA mode on, "));
+ /* Enable GPIO 0/1/2/3/8 pins for bt */
+ rtl_write_byte(rtlpriv, 0x40, 0x20);
+ rtlpcipriv->btcoexist.b_hw_coexist_all_off = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("PTA mode off\n"));
+ rtl_write_byte(rtlpriv, 0x40, 0x0);
+ }
+}
+
+void rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(struct ieee80211_hw *hw, u8 type)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (BT_RF_RX_LPF_CORNER_SHRINK == type) {
+ /* Shrink RF Rx LPF corner, 0x1e[7:4]=1111 ==> [11:4] by Jenyu */
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Shrink RF Rx LPF corner!!\n"));
+ /* PHY_SetRFReg(Adapter, (RF_RADIO_PATH_E)PathA, 0x1e, 0xf0, 0xf); */
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff, 0xf0ff7);
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+ } else if(BT_RF_RX_LPF_CORNER_RESUME == type) {
+ /*Resume RF Rx LPF corner*/
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Resume RF Rx LPF corner!!\n"));
+ /* PHY_SetRFReg(Adapter, (RF_RADIO_PATH_E)PathA, 0x1e, 0xf0,
+ * pHalData->btcoexist.BtRfRegOrigin1E); */
+ rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff,
+ rtlpcipriv->btcoexist.bt_rfreg_origin_1e);
+ }
+}
+
+void rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(struct ieee80211_hw *hw,
+ u8 ra_type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u8 tmp_u1;
+
+ tmp_u1 = rtl_read_byte(rtlpriv, 0x4fd);
+ tmp_u1 |= BIT(0);
+ if (BT_TX_RATE_ADAPTIVE_LOW_PENALTY == ra_type) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Tx rate adaptive, set low penalty!!\n"));
+ tmp_u1 &= ~BIT(2);
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+ } else if(BT_TX_RATE_ADAPTIVE_NORMAL == ra_type) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Tx rate adaptive, set normal!!\n"));
+ tmp_u1 |= BIT(2);
+ }
+
+ rtl_write_byte(rtlpriv, 0x4fd, tmp_u1);
+}
+
+void rtl8821ae_dm_bt_btdm_structure_reload(struct ieee80211_hw *hw,
+ struct btdm_8821ae *p_btdm)
+{
+ p_btdm->b_all_off = false;
+ p_btdm->b_agc_table_en = false;
+ p_btdm->b_adc_back_off_on = false;
+ p_btdm->b2_ant_hid_en = false;
+ p_btdm->b_low_penalty_rate_adaptive = false;
+ p_btdm->b_rf_rx_lpf_shrink = false;
+ p_btdm->b_reject_aggre_pkt= false;
+
+ p_btdm->b_tdma_on = false;
+ p_btdm->tdma_ant = TDMA_2ANT;
+ p_btdm->tdma_nav = TDMA_NAV_OFF;
+ p_btdm->tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ p_btdm->fw_dac_swing_lvl = 0x20;
+
+ p_btdm->b_tra_tdma_on = false;
+ p_btdm->tra_tdma_ant = TDMA_2ANT;
+ p_btdm->tra_tdma_nav = TDMA_NAV_OFF;
+ p_btdm->b_ignore_wlan_act = false;
+
+ p_btdm->b_ps_tdma_on = false;
+ p_btdm->ps_tdma_byte[0] = 0x0;
+ p_btdm->ps_tdma_byte[1] = 0x0;
+ p_btdm->ps_tdma_byte[2] = 0x0;
+ p_btdm->ps_tdma_byte[3] = 0x8;
+ p_btdm->ps_tdma_byte[4] = 0x0;
+
+ p_btdm->b_pta_on = true;
+ p_btdm->val_0x6c0 = 0x5a5aaaaa;
+ p_btdm->val_0x6c8 = 0xcc;
+ p_btdm->val_0x6cc = 0x3;
+
+ p_btdm->b_sw_dac_swing_on = false;
+ p_btdm->sw_dac_swing_lvl = 0xc0;
+ p_btdm->wlan_act_hi = 0x20;
+ p_btdm->wlan_act_lo = 0x10;
+ p_btdm->bt_retry_index = 2;
+
+ p_btdm->b_dec_bt_pwr = false;
+}
+
+void rtl8821ae_dm_bt_btdm_structure_reload_all_off(struct ieee80211_hw *hw,
+ struct btdm_8821ae *p_btdm)
+{
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, p_btdm);
+ p_btdm->b_all_off = true;
+ p_btdm->b_pta_on = false;
+ p_btdm->wlan_act_hi = 0x10;
+}
+
+bool rtl8821ae_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct btdm_8821ae btdm8821ae;
+ bool b_common = false;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+ if(!rtl8821ae_dm_bt_is_wifi_busy(hw)
+ && !rtlpcipriv->btcoexist.b_bt_busy) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("Wifi idle + Bt idle, bt coex mechanism always off!!\n"));
+ rtl8821ae_dm_bt_btdm_structure_reload_all_off(hw, &btdm8821ae);
+ b_common = true;
+ } else if (rtl8821ae_dm_bt_is_wifi_busy(hw)
+ && !rtlpcipriv->btcoexist.b_bt_busy) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("Wifi non-idle + Bt disabled/idle!!\n"));
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_rf_rx_lpf_shrink = false;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ /* sw mechanism */
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ btdm8821ae.b_pta_on = true;
+ btdm8821ae.val_0x6c0 = 0x5a5aaaaa;
+ btdm8821ae.val_0x6c8 = 0xcccc;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ btdm8821ae.b2_ant_hid_en = false;
+
+ b_common = true;
+ }else if (rtlpcipriv->btcoexist.b_bt_busy) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("Bt non-idle!\n"));
+ if(mgnt_link_status_query(hw) == RT_MEDIA_CONNECT){
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi connection exist\n"))
+ b_common = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("No Wifi connection!\n"));
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = false;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ /* sw mechanism */
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ btdm8821ae.b_pta_on = true;
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0x0000ffff;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ btdm8821ae.b2_ant_hid_en = false;
+
+ b_common = true;
+ }
+ }
+
+ if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ if(b_common)
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_COMMON;
+
+ if (b_common && rtl8821ae_dm_bt_is_coexist_state_changed(hw))
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+
+ return b_common;
+}
+
+void rtl8821ae_dm_bt_set_sw_full_time_dac_swing(
+ struct ieee80211_hw * hw, bool b_sw_dac_swing_on, u32 sw_dac_swing_lvl)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (b_sw_dac_swing_on) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl));
+ rtl8821ae_phy_set_bb_reg(hw, 0x880, 0xff000000, sw_dac_swing_lvl);
+ rtlpcipriv->btcoexist.b_sw_coexist_all_off = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], SwDacSwing Off!\n"));
+ rtl8821ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
+ }
+}
+
+void rtl8821ae_dm_bt_set_fw_dec_bt_pwr(
+ struct ieee80211_hw *hw, bool b_dec_bt_pwr)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] ={0};
+
+ h2c_parameter[0] = 0;
+
+ if (b_dec_bt_pwr) {
+ h2c_parameter[0] |= BIT(1);
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], decrease Bt Power : %s, write 0x21=0x%x\n",
+ (b_dec_bt_pwr? "Yes!!":"No!!"), h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x21, 1, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_set_fw_2_ant_hid(struct ieee80211_hw *hw,
+ bool b_enable, bool b_dac_swing_on)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] ={0};
+
+ if (b_enable) {
+ h2c_parameter[0] |= BIT(0);
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ }
+ if (b_dac_swing_on) {
+ h2c_parameter[0] |= BIT(1); /* Dac Swing default enable */
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15=0x%x\n",
+ (b_enable ? "ON!!":"OFF!!"), (b_dac_swing_on ? "ON":"OFF"),
+ h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x15, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_tdma_ctrl(struct ieee80211_hw *hw,
+ bool b_enable, u8 ant_num, u8 nav_en, u8 dac_swing_en)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u8 h2c_parameter[1] ={0};
+ u8 h2c_parameter1[1] = {0};
+
+ h2c_parameter[0] = 0;
+ h2c_parameter1[0] = 0;
+
+ if(b_enable) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], set BT PTA update manager to trigger update!!\n"));
+ h2c_parameter1[0] |= BIT(0);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], turn TDMA mode ON!!\n"));
+ h2c_parameter[0] |= BIT(0); /* function enable */
+ if (TDMA_1ANT == ant_num) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_1ANT\n"));
+ h2c_parameter[0] |= BIT(1);
+ } else if(TDMA_2ANT == ant_num) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_2ANT\n"));
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], Unknown Ant\n"));
+ }
+
+ if (TDMA_NAV_OFF == nav_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_NAV_OFF\n"));
+ } else if (TDMA_NAV_ON == nav_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_NAV_ON\n"));
+ h2c_parameter[0] |= BIT(2);
+ }
+
+ if (TDMA_DAC_SWING_OFF == dac_swing_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], TDMA_DAC_SWING_OFF\n"));
+ } else if(TDMA_DAC_SWING_ON == dac_swing_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], TDMA_DAC_SWING_ON\n"));
+ h2c_parameter[0] |= BIT(4);
+ }
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], set BT PTA update manager to no update!!\n"));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], turn TDMA mode OFF!!\n"));
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], FW2AntTDMA, write 0x26=0x%x\n", h2c_parameter1[0]));
+ rtl8821ae_fill_h2c_cmd(hw, 0x26, 1, h2c_parameter1);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], FW2AntTDMA, write 0x14=0x%x\n", h2c_parameter[0]));
+ rtl8821ae_fill_h2c_cmd(hw, 0x14, 1, h2c_parameter);
+
+ if (!b_enable) {
+ /* delay_ms(2);
+ * PlatformEFIOWrite1Byte(Adapter, 0x778, 0x1); */
+ }
+}
+
+
+void rtl8821ae_dm_bt_set_fw_ignore_wlan_act( struct ieee80211_hw *hw, bool b_enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u8 h2c_parameter[1] ={0};
+
+ if (b_enable) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], BT Ignore Wlan_Act !!\n"));
+ h2c_parameter[0] |= BIT(0); // function enable
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], BT don't ignore Wlan_Act !!\n"));
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25=0x%x\n",
+ h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x25, 1, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(struct ieee80211_hw *hw,
+ bool b_enable, u8 ant_num, u8 nav_en
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ //struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u8 h2c_parameter[2] ={0};
+
+
+ if (b_enable) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], turn TTDMA mode ON!!\n"));
+ h2c_parameter[0] |= BIT(0); // function enable
+ if (TDMA_1ANT == ant_num) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_1ANT\n"));
+ h2c_parameter[0] |= BIT(1);
+ } else if (TDMA_2ANT == ant_num) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_2ANT\n"));
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], Unknown Ant\n"));
+ }
+
+ if (TDMA_NAV_OFF == nav_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_NAV_OFF\n"));
+ } else if (TDMA_NAV_ON == nav_en) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_NAV_ON\n"));
+ h2c_parameter[1] |= BIT(0);
+ }
+
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], turn TTDMA mode OFF!!\n"));
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], FW Traditional TDMA, write 0x33=0x%x\n",
+ h2c_parameter[0] << 8| h2c_parameter[1]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x33, 2, h2c_parameter);
+}
+
+
+void rtl8821ae_dm_bt_set_fw_dac_swing_level(struct ieee80211_hw *hw,
+ u8 dac_swing_lvl)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] ={0};
+ h2c_parameter[0] = dac_swing_lvl;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], write 0x29=0x%x\n", h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x29, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_bt_hid_info(struct ieee80211_hw *hw, bool b_enable)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] ={0};
+ h2c_parameter[0] = 0;
+
+ if(b_enable){
+ h2c_parameter[0] |= BIT(0);
+ rtlpcipriv->btcoexist.b_fw_coexist_all_off = false;
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], Set BT HID information=0x%x\n", b_enable));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], write 0x24=0x%x\n", h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x24, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_bt_retry_index(struct ieee80211_hw *hw,
+ u8 retry_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] ={0};
+ h2c_parameter[0] = retry_index;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], Set BT Retry Index=%d\n", retry_index));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], write 0x23=0x%x\n", h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x23, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_set_fw_wlan_act(struct ieee80211_hw *hw,
+ u8 wlan_act_hi, u8 wlan_act_lo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter_hi[1] ={0};
+ u8 h2c_parameter_lo[1] ={0};
+ h2c_parameter_hi[0] = wlan_act_hi;
+ h2c_parameter_lo[0] = wlan_act_lo;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], Set WLAN_ACT Hi:Lo=0x%x/0x%x\n", wlan_act_hi, wlan_act_lo));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], write 0x22=0x%x\n", h2c_parameter_hi[0]));
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], write 0x11=0x%x\n", h2c_parameter_lo[0]));
+
+ /* WLAN_ACT = High duration, unit:ms */
+ rtl8821ae_fill_h2c_cmd(hw, 0x22, 1, h2c_parameter_hi);
+ /* WLAN_ACT = Low duration, unit:3*625us */
+ rtl8821ae_fill_h2c_cmd(hw, 0x11, 1, h2c_parameter_lo);
+}
+
+void rtl8821ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8821ae *p_btdm)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct btdm_8821ae *p_btdm_8821ae = &hal_coex_8821ae.btdm;
+ u8 i;
+
+ bool b_fw_current_inpsmode = false;
+ bool b_fw_ps_awake = true;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inpsmode));
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+ (u8 *) (&b_fw_ps_awake));
+
+ // check new setting is different with the old one,
+ // if all the same, don't do the setting again.
+ if (memcmp(p_btdm_8821ae, p_btdm, sizeof(struct btdm_8821ae)) == 0) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], the same coexist setting, return!!\n"));
+ return;
+ } else { //save the new coexist setting
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], UPDATE TO NEW COEX SETTING!!\n"));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bAllOff=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_all_off, p_btdm->b_all_off));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new b_agc_table_en=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_agc_table_en, p_btdm->b_agc_table_en));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new b_adc_back_off_on=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_adc_back_off_on, p_btdm->b_adc_back_off_on));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new b2_ant_hid_en=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b2_ant_hid_en, p_btdm->b2_ant_hid_en));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bLowPenaltyRateAdaptive=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_low_penalty_rate_adaptive,
+ p_btdm->b_low_penalty_rate_adaptive));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bRfRxLpfShrink=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_rf_rx_lpf_shrink, p_btdm->b_rf_rx_lpf_shrink));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bRejectAggrePkt=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_reject_aggre_pkt, p_btdm->b_reject_aggre_pkt));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new b_tdma_on=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_tdma_on, p_btdm->b_tdma_on));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new tdmaAnt=0x%x/ 0x%x \n",
+ p_btdm_8821ae->tdma_ant, p_btdm->tdma_ant));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new tdmaNav=0x%x/ 0x%x \n",
+ p_btdm_8821ae->tdma_nav, p_btdm->tdma_nav));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new tdma_dac_swing=0x%x/ 0x%x \n",
+ p_btdm_8821ae->tdma_dac_swing, p_btdm->tdma_dac_swing));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new fw_dac_swing_lvl=0x%x/ 0x%x \n",
+ p_btdm_8821ae->fw_dac_swing_lvl, p_btdm->fw_dac_swing_lvl));
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bTraTdmaOn=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_tra_tdma_on, p_btdm->b_tra_tdma_on));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new traTdmaAnt=0x%x/ 0x%x \n",
+ p_btdm_8821ae->tra_tdma_ant, p_btdm->tra_tdma_ant));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new traTdmaNav=0x%x/ 0x%x \n",
+ p_btdm_8821ae->tra_tdma_nav, p_btdm->tra_tdma_nav));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bPsTdmaOn=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_ps_tdma_on, p_btdm->b_ps_tdma_on));
+ for(i=0; i<5; i++)
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new psTdmaByte[i]=0x%x/ 0x%x \n",
+ p_btdm_8821ae->ps_tdma_byte[i], p_btdm->ps_tdma_byte[i]));
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bIgnoreWlanAct=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_ignore_wlan_act, p_btdm->b_ignore_wlan_act));
+
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new bPtaOn=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_pta_on, p_btdm->b_pta_on));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new val_0x6c0=0x%x/ 0x%x \n",
+ p_btdm_8821ae->val_0x6c0, p_btdm->val_0x6c0));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new val_0x6c8=0x%x/ 0x%x \n",
+ p_btdm_8821ae->val_0x6c8, p_btdm->val_0x6c8));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new val_0x6cc=0x%x/ 0x%x \n",
+ p_btdm_8821ae->val_0x6cc, p_btdm->val_0x6cc));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new b_sw_dac_swing_on=0x%x/ 0x%x \n",
+ p_btdm_8821ae->b_sw_dac_swing_on, p_btdm->b_sw_dac_swing_on));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new sw_dac_swing_lvl=0x%x/ 0x%x \n",
+ p_btdm_8821ae->sw_dac_swing_lvl, p_btdm->sw_dac_swing_lvl));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new wlanActHi=0x%x/ 0x%x \n",
+ p_btdm_8821ae->wlan_act_hi, p_btdm->wlan_act_hi));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new wlanActLo=0x%x/ 0x%x \n",
+ p_btdm_8821ae->wlan_act_lo, p_btdm->wlan_act_lo));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], original/new btRetryIndex=0x%x/ 0x%x \n",
+ p_btdm_8821ae->bt_retry_index, p_btdm->bt_retry_index));
+
+ memcpy(p_btdm_8821ae, p_btdm, sizeof(struct btdm_8821ae));
+ }
+ /*
+ * Here we only consider when Bt Operation
+ * inquiry/paging/pairing is ON
+ * we only need to turn off TDMA */
+
+ if (rtlpcipriv->btcoexist.b_hold_for_bt_operation) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], set to ignore wlanAct for BT OP!!\n"));
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, true);
+ return;
+ }
+
+ if (p_btdm->b_all_off) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex], disable all coexist mechanism !!\n"));
+ rtl8821ae_btdm_coex_all_off(hw);
+ return;
+ }
+
+ rtl8821ae_dm_bt_reject_ap_aggregated_packet(hw, p_btdm->b_reject_aggre_pkt);
+
+ if(p_btdm->b_low_penalty_rate_adaptive)
+ rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw,
+ BT_TX_RATE_ADAPTIVE_LOW_PENALTY);
+ else
+ rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw,
+ BT_TX_RATE_ADAPTIVE_NORMAL);
+
+ if(p_btdm->b_rf_rx_lpf_shrink)
+ rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_SHRINK);
+ else
+ rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME);
+
+ if(p_btdm->b_agc_table_en)
+ rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_ON);
+ else
+ rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF);
+
+ if(p_btdm->b_adc_back_off_on)
+ rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_ON);
+ else
+ rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_OFF);
+
+ rtl8821ae_dm_bt_set_fw_bt_retry_index(hw, p_btdm->bt_retry_index);
+
+ rtl8821ae_dm_bt_set_fw_dac_swing_level(hw, p_btdm->fw_dac_swing_lvl);
+ rtl8821ae_dm_bt_set_fw_wlan_act(hw, p_btdm->wlan_act_hi, p_btdm->wlan_act_lo);
+
+ rtl8821ae_dm_bt_set_coex_table(hw, p_btdm->val_0x6c0,
+ p_btdm->val_0x6c8, p_btdm->val_0x6cc);
+ rtl8821ae_dm_bt_set_hw_pta_mode(hw, p_btdm->b_pta_on);
+
+ /*
+ * Note: There is a constraint between TDMA and 2AntHID
+ * Only one of 2AntHid and tdma can be turn on
+ * We should turn off those mechanisms should be turned off first
+ * and then turn on those mechanisms should be turned on.
+ */
+#if 1
+ if(p_btdm->b2_ant_hid_en) {
+ // turn off tdma
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on,
+ p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant,
+ p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+
+ // turn off Pstdma
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); // Antenna control by PTA, 0x870 = 0x300.
+
+ // turn on 2AntHid
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, true);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, true, true);
+ } else if(p_btdm->b_tdma_on) {
+ // turn off 2AntHid
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+ // turn off pstdma
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); // Antenna control by PTA, 0x870 = 0x300.
+
+ // turn on tdma
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, true, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+ } else if(p_btdm->b_ps_tdma_on) {
+ // turn off 2AntHid
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+ // turn off tdma
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+
+ // turn on pstdma
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+ rtl8821ae_dm_bt_set_fw_3a(hw,
+ p_btdm->ps_tdma_byte[0],
+ p_btdm->ps_tdma_byte[1],
+ p_btdm->ps_tdma_byte[2],
+ p_btdm->ps_tdma_byte[3],
+ p_btdm->ps_tdma_byte[4]);
+ } else {
+ // turn off 2AntHid
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+
+ // turn off tdma
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+
+ // turn off pstdma
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act);
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); // Antenna control by PTA, 0x870 = 0x300.
+ }
+#else
+ if (p_btdm->b_tdma_on) {
+ if(p_btdm->b_ps_tdma_on) {
+ } else {
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+ }
+ /* Turn off 2AntHID first then turn tdma ON */
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, true,
+ p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+ } else {
+ /* Turn off tdma first then turn 2AntHID ON if need */
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant,
+ p_btdm->tdma_nav, p_btdm->tdma_dac_swing);
+ if (p_btdm->b2_ant_hid_en) {
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, true);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, true, true);
+ } else {
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+ }
+ if(p_btdm->b_ps_tdma_on) {
+ rtl8821ae_dm_bt_set_fw_3a(hw, p_btdm->ps_tdma_byte[0], p_btdm->ps_tdma_byte[1],
+ p_btdm->ps_tdma_byte[2], p_btdm->ps_tdma_byte[3], p_btdm->ps_tdma_byte[4]);
+ } else {
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+ }
+ }
+#endif
+
+ /*
+ * Note:
+ * We should add delay for making sure sw DacSwing can be set successfully.
+ * because of that rtl8821ae_dm_bt_set_fw_2_ant_hid() and rtl8821ae_dm_bt_set_fw_tdma_ctrl()
+ * will overwrite the reg 0x880.
+ */
+ mdelay(30);
+ rtl8821ae_dm_bt_set_sw_full_time_dac_swing(hw,
+ p_btdm->b_sw_dac_swing_on, p_btdm->sw_dac_swing_lvl);
+ rtl8821ae_dm_bt_set_fw_dec_bt_pwr(hw, p_btdm->b_dec_bt_pwr);
+}
+
+void rtl8821ae_dm_bt_bt_state_update_2_ant_hid(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], HID busy!!\n"));
+ rtlpcipriv->btcoexist.b_bt_busy = true;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE;
+}
+
+void rtl8821ae_dm_bt_bt_state_update_2_ant_pan(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ bool b_idle = false;
+
+ if (hal_coex_8821ae.low_priority_tx >=
+ hal_coex_8821ae.low_priority_rx) {
+ if((hal_coex_8821ae.low_priority_tx/
+ hal_coex_8821ae.low_priority_rx) > 10) {
+ b_idle = true;
+ }
+ } else {
+ if((hal_coex_8821ae.low_priority_rx/
+ hal_coex_8821ae.low_priority_tx) > 10) {
+ b_idle = true;
+ }
+ }
+
+ if(!b_idle) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], PAN busy!!\n"));
+ rtlpcipriv->btcoexist.b_bt_busy = true;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], PAN idle!!\n"));
+ }
+}
+
+void rtl8821ae_dm_bt_2_ant_sco_action(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct btdm_8821ae btdm8821ae;
+ u8 bt_rssi_state;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+ /* coex table */
+ btdm8821ae.val_0x6c0 = 0x5a5aaaaa;
+ btdm8821ae.val_0x6c8 = 0xcc;
+ btdm8821ae.val_0x6cc = 0x3;
+ /* sw mechanism */
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ /* fw mechanism */
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+ bt_rssi_state
+ = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, BT_FW_COEX_THRESH_47, 0);
+
+ /* coex table */
+ btdm8821ae.val_0x6c0 = 0x5a5aaaaa;
+ btdm8821ae.val_0x6c8 = 0xcc;
+ btdm8821ae.val_0x6cc = 0x3;
+ /* sw mechanism */
+ if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+ btdm8821ae.b_agc_table_en = true;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ } else {
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+ /* fw mechanism */
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ }
+
+ if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ if(rtl8821ae_dm_bt_is_coexist_state_changed(hw))
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+}
+
+void rtl8821ae_dm_bt_2_ant_hid_action(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct btdm_8821ae btdm8821ae;
+ u8 bt_rssi_state;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0xffff;
+ btdm8821ae.val_0x6cc = 0x3;
+ btdm8821ae.b_ignore_wlan_act = true;
+
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = true;
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+
+ btdm8821ae.b_tra_tdma_on = false;
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ btdm8821ae.b2_ant_hid_en = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+ bt_rssi_state =
+ rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
+
+ if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = true;
+ btdm8821ae.sw_dac_swing_lvl = 0x20;
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = false;
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ btdm8821ae.b2_ant_hid_en = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = false;
+ btdm8821ae.b_tdma_on = false;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF;
+ btdm8821ae.b2_ant_hid_en = true;
+ btdm8821ae.fw_dac_swing_lvl = 0x20;
+ }
+ }
+
+ if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ if (rtl8821ae_dm_bt_is_coexist_state_changed(hw)) {
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+ }
+}
+
+
+void rtl8821ae_dm_bt_2_ant_2_dp_action_no_profile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct btdm_8821ae btdm8821ae;
+ u8 bt_rssi_state;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("HT40\n"));
+ if (rtl8821ae_dm_bt_is_wifi_up_link(hw)) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Uplink\n"));
+ /* coex table */
+ btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+ btdm8821ae.val_0x6c8 = 0xcccc;
+ btdm8821ae.val_0x6cc = 0x3;
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ // fw mechanism
+ btdm8821ae.b_tra_tdma_on = true;
+ btdm8821ae.b_tdma_on = true;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+ btdm8821ae.b2_ant_hid_en = false;
+ //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+ //if(btSpec >= BT_SPEC_2_1_EDR)
+ {
+ btdm8821ae.wlan_act_hi = 0x10;
+ btdm8821ae.wlan_act_lo = 0x10;
+ }
+ //else
+ //{
+ //btdm8821ae.wlanActHi = 0x20;
+ //btdm8821ae.wlanActLo = 0x20;
+ //}
+ btdm8821ae.bt_retry_index = 2;
+ btdm8821ae.fw_dac_swing_lvl = 0x18;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Downlink\n"));
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+ btdm8821ae.val_0x6c8 = 0xcc;
+ btdm8821ae.val_0x6cc = 0x3;
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ // fw mechanism
+ btdm8821ae.b_tra_tdma_on = true;
+ btdm8821ae.b_tdma_on = true;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+ btdm8821ae.b2_ant_hid_en = false;
+ //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+ //if(btSpec >= BT_SPEC_2_1_EDR)
+ {
+ btdm8821ae.wlan_act_hi = 0x10;
+ btdm8821ae.wlan_act_lo = 0x10;
+ }
+ //else
+ //{
+ // btdm8821ae.wlanActHi = 0x20;
+ // btdm8821ae.wlanActLo = 0x20;
+ //}
+ btdm8821ae.bt_retry_index = 2;
+ btdm8821ae.fw_dac_swing_lvl = 0x40;
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("HT20 or Legacy\n"));
+ bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, BT_FW_COEX_THRESH_47, 0);
+
+ if(rtl8821ae_dm_bt_is_wifi_up_link(hw))
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Uplink\n"));
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+ btdm8821ae.val_0x6c8 = 0xcccc;
+ btdm8821ae.val_0x6cc = 0x3;
+ // sw mechanism
+ if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) )
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi high \n"));
+ btdm8821ae.b_agc_table_en = true;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi low \n"));
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+ // fw mechanism
+ btdm8821ae.b_tra_tdma_on = true;
+ btdm8821ae.b_tdma_on = true;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+ btdm8821ae.b2_ant_hid_en = false;
+ //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+ //if(btSpec >= BT_SPEC_2_1_EDR)
+ {
+ btdm8821ae.wlan_act_hi = 0x10;
+ btdm8821ae.wlan_act_lo = 0x10;
+ }
+ //else
+ //{
+ //btdm8821ae.wlanActHi = 0x20;
+ //btdm8821ae.wlanActLo = 0x20;
+ //}
+ btdm8821ae.bt_retry_index = 2;
+ btdm8821ae.fw_dac_swing_lvl = 0x18;
+ }
+ else
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Downlink\n"));
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x5a5a5a5a;
+ btdm8821ae.val_0x6c8 = 0xcc;
+ btdm8821ae.val_0x6cc = 0x3;
+ // sw mechanism
+ if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) )
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi high \n"));
+ btdm8821ae.b_agc_table_en = true;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+ else
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi low \n"));
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+ // fw mechanism
+ btdm8821ae.b_tra_tdma_on = true;
+ btdm8821ae.b_tdma_on = true;
+ btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON;
+ btdm8821ae.b2_ant_hid_en = false;
+ //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP);
+ //if(btSpec >= BT_SPEC_2_1_EDR)
+ {
+ btdm8821ae.wlan_act_hi = 0x10;
+ btdm8821ae.wlan_act_lo = 0x10;
+ }
+ //else
+ //{
+ //btdm8821ae.wlanActHi = 0x20;
+ //btdm8821ae.wlanActLo = 0x20;
+ //}
+ btdm8821ae.bt_retry_index = 2;
+ btdm8821ae.fw_dac_swing_lvl = 0x40;
+ }
+ }
+
+ if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ if (rtl8821ae_dm_bt_is_coexist_state_changed(hw)) {
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+ }
+}
+
+
+//============================================================
+// extern function start with BTDM_
+//============================================================
+u32 rtl8821ae_dm_bt_tx_rx_couter_h(struct ieee80211_hw *hw)
+{
+ u32 counters=0;
+
+ counters = hal_coex_8821ae.high_priority_tx + hal_coex_8821ae.high_priority_rx ;
+ return counters;
+}
+
+u32 rtl8821ae_dm_bt_tx_rx_couter_l(struct ieee80211_hw *hw)
+{
+ u32 counters=0;
+
+ counters = hal_coex_8821ae.low_priority_tx + hal_coex_8821ae.low_priority_rx ;
+ return counters;
+}
+
+u8 rtl8821ae_dm_bt_bt_tx_rx_counter_level(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u32 bt_tx_rx_cnt = 0;
+ u8 bt_tx_rx_cnt_lvl = 0;
+
+ bt_tx_rx_cnt = rtl8821ae_dm_bt_tx_rx_couter_h(hw)
+ + rtl8821ae_dm_bt_tx_rx_couter_l(hw);
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt));
+
+ rtlpcipriv->btcoexist.current_state_h &= ~\
+ (BT_COEX_STATE_BT_CNT_LEVEL_0 | BT_COEX_STATE_BT_CNT_LEVEL_1|
+ BT_COEX_STATE_BT_CNT_LEVEL_2);
+
+ if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_3) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT TxRx Counters at level 3\n"));
+ bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_3;
+ rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_3;
+ } else if(bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT TxRx Counters at level 2\n"));
+ bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_2;
+ rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_2;
+ } else if(bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT TxRx Counters at level 1\n"));
+ bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_1;
+ rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_1;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT TxRx Counters at level 0\n"));
+ bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_0;
+ rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_0;
+ }
+ return bt_tx_rx_cnt_lvl;
+}
+
+
+void rtl8821ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct btdm_8821ae btdm8821ae;
+
+ u8 bt_rssi_state, bt_rssi_state1;
+ u8 bt_tx_rx_cnt_lvl = 0;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ bt_tx_rx_cnt_lvl = rtl8821ae_dm_bt_bt_tx_rx_counter_level(hw);
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl));
+
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0xffff;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = true;
+ if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+ bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
+ bt_rssi_state1 = rtl8821ae_dm_bt_check_coex_rssi_state1(hw, 2, 27, 0);
+
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0xffff;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ // sw mechanism
+ if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+ btdm8821ae.b_agc_table_en = true;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = true;
+ if( (bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("Wifi rssi-1 high \n"));
+ // only rssi high we need to do this,
+ // when rssi low, the value will modified by fw
+ rtl_write_byte(rtlpriv, 0x883, 0x40);
+ if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x83;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x83;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x83;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 low \n"));
+ if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2)
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ }
+ }
+
+ if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ // Always ignore WlanAct if bHid|bSCOBusy|bSCOeSCO
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+ hal_coex_8821ae.bt_inq_page_start_time, bt_tx_rx_cnt_lvl));
+ if( (hal_coex_8821ae.bt_inq_page_start_time) ||
+ (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], Set BT inquiry / page scan 0x3a setting\n"));
+ btdm8821ae.b_ps_tdma_on = true;
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x2;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+
+ if(rtl8821ae_dm_bt_is_coexist_state_changed(hw)) {
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+ }
+}
+
+void rtl8821ae_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct btdm_8821ae btdm8821ae;
+
+ u8 bt_rssi_state, bt_rssi_state1;
+ u32 bt_tx_rx_cnt_lvl = 0;
+
+ rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae);
+
+ btdm8821ae.b_rf_rx_lpf_shrink = true;
+ btdm8821ae.b_low_penalty_rate_adaptive = true;
+ btdm8821ae.b_reject_aggre_pkt = false;
+
+ bt_tx_rx_cnt_lvl = rtl8821ae_dm_bt_bt_tx_rx_counter_level(hw);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl));
+
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n"));
+ bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 37, 0);
+
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0xffff;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ // sw mechanism
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = true;
+ if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+ if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+ if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n"));
+ bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
+ bt_rssi_state1 = rtl8821ae_dm_bt_check_coex_rssi_state1(hw, 2, 27, 0);
+
+ // coex table
+ btdm8821ae.val_0x6c0 = 0x55555555;
+ btdm8821ae.val_0x6c8 = 0xffff;
+ btdm8821ae.val_0x6cc = 0x3;
+
+ // sw mechanism
+ if( (bt_rssi_state == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n"));
+ btdm8821ae.b_agc_table_en = true;
+ btdm8821ae.b_adc_back_off_on = true;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n"));
+ btdm8821ae.b_agc_table_en = false;
+ btdm8821ae.b_adc_back_off_on = false;
+ btdm8821ae.b_sw_dac_swing_on = false;
+ }
+
+ // fw mechanism
+ btdm8821ae.b_ps_tdma_on = true;
+ if( (bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
+ (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH) ) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 high \n"));
+ // only rssi high we need to do this,
+ // when rssi low, the value will modified by fw
+ rtl_write_byte(rtlpriv, 0x883, 0x40);
+ if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x81;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 low \n"));
+ if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xa;
+ btdm8821ae.ps_tdma_byte[2] = 0xa;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n"));
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0xf;
+ btdm8821ae.ps_tdma_byte[2] = 0xf;
+ btdm8821ae.ps_tdma_byte[3] = 0x0;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+ }
+ }
+
+ if(rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) {
+ btdm8821ae.b_dec_bt_pwr = true;
+ }
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+ hal_coex_8821ae.bt_inq_page_start_time, bt_tx_rx_cnt_lvl));
+
+ if( (hal_coex_8821ae.bt_inq_page_start_time) ||
+ (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl) )
+ {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], Set BT inquiry / page scan 0x3a setting\n"));
+ btdm8821ae.b_ps_tdma_on = true;
+ btdm8821ae.ps_tdma_byte[0] = 0xa3;
+ btdm8821ae.ps_tdma_byte[1] = 0x5;
+ btdm8821ae.ps_tdma_byte[2] = 0x5;
+ btdm8821ae.ps_tdma_byte[3] = 0x83;
+ btdm8821ae.ps_tdma_byte[4] = 0x80;
+ }
+
+ if(rtl8821ae_dm_bt_is_coexist_state_changed(hw)){
+ rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae);
+ }
+}
+
+void rtl8821ae_dm_bt_inq_page_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 cur_time;
+ cur_time = jiffies;
+ if (hal_coex_8821ae.b_c2h_bt_inquiry_page) {
+ //pHalData->btcoexist.halCoex8821ae.btInquiryPageCnt++;
+ // bt inquiry or page is started.
+ if(hal_coex_8821ae.bt_inq_page_start_time == 0){
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT_INQ_PAGE;
+ hal_coex_8821ae.bt_inq_page_start_time = cur_time;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT Inquiry/page is started at time : 0x%x \n",
+ hal_coex_8821ae.bt_inq_page_start_time));
+ }
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x \n",
+ hal_coex_8821ae.bt_inq_page_start_time, cur_time));
+
+ if (hal_coex_8821ae.bt_inq_page_start_time) {
+ if ((((long)cur_time - (long)hal_coex_8821ae.bt_inq_page_start_time) / HZ) >= 10) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT Inquiry/page >= 10sec!!!"));
+ hal_coex_8821ae.bt_inq_page_start_time = 0;
+ rtlpcipriv->btcoexist.current_state &=~ BT_COEX_STATE_BT_INQ_PAGE;
+ }
+ }
+
+#if 0
+ if (hal_coex_8821ae.b_c2h_bt_inquiry_page) {
+ hal_coex_8821ae.b_c2h_bt_inquiry_page++;
+ // bt inquiry or page is started.
+ } if(hal_coex_8821ae.b_c2h_bt_inquiry_page) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT_INQ_PAGE;
+ if(hal_coex_8821ae.bt_inquiry_page_cnt >= 4)
+ hal_coex_8821ae.bt_inquiry_page_cnt = 0;
+ hal_coex_8821ae.bt_inquiry_page_cnt++;
+ } else {
+ rtlpcipriv->btcoexist.current_state &=~ BT_COEX_STATE_BT_INQ_PAGE;
+ }
+#endif
+}
+
+void rtl8821ae_dm_bt_reset_action_profile_state(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ rtlpcipriv->btcoexist.current_state &= ~\
+ (BT_COEX_STATE_PROFILE_HID | BT_COEX_STATE_PROFILE_A2DP|
+ BT_COEX_STATE_PROFILE_PAN | BT_COEX_STATE_PROFILE_SCO);
+
+ rtlpcipriv->btcoexist.current_state &= ~\
+ (BT_COEX_STATE_BTINFO_COMMON | BT_COEX_STATE_BTINFO_B_HID_SCOESCO|
+ BT_COEX_STATE_BTINFO_B_FTP_A2DP);
+}
+
+void _rtl8821ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u8 bt_retry_cnt;
+ u8 bt_info_original;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex] Get bt info by fw!!\n"));
+
+ _rtl8821ae_dm_bt_check_wifi_state(hw);
+
+ if (hal_coex_8821ae.b_c2h_bt_info_req_sent) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("[BTCoex] c2h for bt_info not rcvd yet!!\n"));
+ }
+
+ bt_retry_cnt = hal_coex_8821ae.bt_retry_cnt;
+ bt_info_original = hal_coex_8821ae.c2h_bt_info_original;
+
+ // when bt inquiry or page scan, we have to set h2c 0x25
+ // ignore wlanact for continuous 4x2secs
+ rtl8821ae_dm_bt_inq_page_monitor(hw);
+ rtl8821ae_dm_bt_reset_action_profile_state(hw);
+
+ if(rtl8821ae_dm_bt_is_2_ant_common_action(hw)) {
+ rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_COMMON;
+ rtlpcipriv->btcoexist.bt_profile_action= BT_COEX_MECH_COMMON;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Action 2-Ant common.\n"));
+ } else {
+ if( (bt_info_original & BTINFO_B_HID) ||
+ (bt_info_original & BTINFO_B_SCO_BUSY) ||
+ (bt_info_original & BTINFO_B_SCO_ESCO) ) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_HID_SCOESCO;
+ rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_HID_SCO_ESCO;
+ rtlpcipriv->btcoexist.bt_profile_action = BT_COEX_MECH_HID_SCO_ESCO;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n"));
+ rtl8821ae_dm_bt_2_ant_hid_sco_esco(hw);
+ } else if( (bt_info_original & BTINFO_B_FTP) ||
+ (bt_info_original & BTINFO_B_A2DP) ) {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_FTP_A2DP;
+ rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_FTP_A2DP;
+ rtlpcipriv->btcoexist.bt_profile_action = BT_COEX_MECH_FTP_A2DP;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("BTInfo: bFTP|bA2DP\n"));
+ rtl8821ae_dm_bt_2_ant_ftp_a2dp(hw);
+ } else {
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_HID_SCOESCO;
+ rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_NONE;
+ rtlpcipriv->btcoexist.bt_profile_action= BT_COEX_MECH_NONE;
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BTInfo: undefined case!!!!\n"));
+ rtl8821ae_dm_bt_2_ant_hid_sco_esco(hw);
+ }
+ }
+}
+
+void _rtl8821ae_dm_bt_coexist_1_ant(struct ieee80211_hw *hw)
+{
+ return;
+}
+
+void rtl8821ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+ rtl8821ae_dm_bt_set_coex_table(hw, 0x5a5aaaaa, 0xcc, 0x3);
+ rtl8821ae_dm_bt_set_hw_pta_mode(hw, true);
+}
+
+void rtl8821ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+ rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, false);
+ rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
+ rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
+ rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, false, TDMA_2ANT, TDMA_NAV_OFF);
+ rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, TDMA_2ANT,
+ TDMA_NAV_OFF, TDMA_DAC_SWING_OFF);
+ rtl8821ae_dm_bt_set_fw_dac_swing_level(hw, 0);
+ rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false);
+ rtl8821ae_dm_bt_set_fw_bt_retry_index(hw, 2);
+ rtl8821ae_dm_bt_set_fw_wlan_act(hw, 0x10, 0x10);
+ rtl8821ae_dm_bt_set_fw_dec_bt_pwr(hw, false);
+}
+
+void rtl8821ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw)
+{
+ rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF);
+ rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_OFF);
+ rtl8821ae_dm_bt_reject_ap_aggregated_packet(hw, false);
+
+ rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw,
+ BT_TX_RATE_ADAPTIVE_NORMAL);
+ rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME);
+ rtl8821ae_dm_bt_set_sw_full_time_dac_swing(hw, false, 0xc0);
+}
+
+void rtl8821ae_dm_bt_query_bt_information(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 h2c_parameter[1] = {0};
+
+ hal_coex_8821ae.b_c2h_bt_info_req_sent = true;
+
+ h2c_parameter[0] |= BIT(0);
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("Query Bt information, write 0x38=0x%x\n", h2c_parameter[0]));
+
+ rtl8821ae_fill_h2c_cmd(hw, 0x38, 1, h2c_parameter);
+}
+
+void rtl8821ae_dm_bt_bt_hw_counters_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u32 reg_hp_tx_rx, reg_lp_tx_rx, u32_tmp;
+ u32 reg_hp_tx=0, reg_hp_rx=0, reg_lp_tx=0, reg_lp_rx=0;
+
+ reg_hp_tx_rx = REG_HIGH_PRIORITY_TXRX;
+ reg_lp_tx_rx = REG_LOW_PRIORITY_TXRX;
+
+ u32_tmp = rtl_read_dword(rtlpriv, reg_hp_tx_rx);
+ reg_hp_tx = u32_tmp & MASKLWORD;
+ reg_hp_rx = (u32_tmp & MASKHWORD)>>16;
+
+ u32_tmp = rtl_read_dword(rtlpriv, reg_lp_tx_rx);
+ reg_lp_tx = u32_tmp & MASKLWORD;
+ reg_lp_rx = (u32_tmp & MASKHWORD)>>16;
+
+ if(rtlpcipriv->btcoexist.lps_counter > 1) {
+ reg_hp_tx %= rtlpcipriv->btcoexist.lps_counter;
+ reg_hp_rx %= rtlpcipriv->btcoexist.lps_counter;
+ reg_lp_tx %= rtlpcipriv->btcoexist.lps_counter;
+ reg_lp_rx %= rtlpcipriv->btcoexist.lps_counter;
+ }
+
+ hal_coex_8821ae.high_priority_tx = reg_hp_tx;
+ hal_coex_8821ae.high_priority_rx = reg_hp_rx;
+ hal_coex_8821ae.low_priority_tx = reg_lp_tx;
+ hal_coex_8821ae.low_priority_rx = reg_lp_rx;
+
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+ reg_hp_tx_rx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx));
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
+ reg_lp_tx_rx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx));
+ rtlpcipriv->btcoexist.lps_counter = 0;
+ //rtl_write_byte(rtlpriv, 0x76e, 0xc);
+}
+
+void rtl8821ae_dm_bt_bt_enable_disable_check(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ bool bt_alife = true;
+
+ if (hal_coex_8821ae.high_priority_tx == 0 &&
+ hal_coex_8821ae.high_priority_rx == 0 &&
+ hal_coex_8821ae.low_priority_tx == 0 &&
+ hal_coex_8821ae.low_priority_rx == 0) {
+ bt_alife = false;
+ }
+ if (hal_coex_8821ae.high_priority_tx == 0xeaea &&
+ hal_coex_8821ae.high_priority_rx == 0xeaea &&
+ hal_coex_8821ae.low_priority_tx == 0xeaea &&
+ hal_coex_8821ae.low_priority_rx == 0xeaea) {
+ bt_alife = false;
+ }
+ if (hal_coex_8821ae.high_priority_tx == 0xffff &&
+ hal_coex_8821ae.high_priority_rx == 0xffff &&
+ hal_coex_8821ae.low_priority_tx == 0xffff &&
+ hal_coex_8821ae.low_priority_rx == 0xffff) {
+ bt_alife = false;
+ }
+ if (bt_alife) {
+ rtlpcipriv->btcoexist.bt_active_zero_cnt = 0;
+ rtlpcipriv->btcoexist.b_cur_bt_disabled = false;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is enabled !!\n"));
+ } else {
+ rtlpcipriv->btcoexist.bt_active_zero_cnt++;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,
+ ("8821AE bt all counters=0, %d times!!\n",
+ rtlpcipriv->btcoexist.bt_active_zero_cnt));
+ if (rtlpcipriv->btcoexist.bt_active_zero_cnt >= 2) {
+ rtlpcipriv->btcoexist.b_cur_bt_disabled = true;
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is disabled !!\n"));
+ }
+ }
+ if (rtlpcipriv->btcoexist.b_pre_bt_disabled !=
+ rtlpcipriv->btcoexist.b_cur_bt_disabled) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is from %s to %s!!\n",
+ (rtlpcipriv->btcoexist.b_pre_bt_disabled ? "disabled":"enabled"),
+ (rtlpcipriv->btcoexist.b_cur_bt_disabled ? "disabled":"enabled")));
+ rtlpcipriv->btcoexist.b_pre_bt_disabled
+ = rtlpcipriv->btcoexist.b_cur_bt_disabled;
+ }
+}
+
+
+void rtl8821ae_dm_bt_coexist(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ rtl8821ae_dm_bt_query_bt_information(hw);
+ rtl8821ae_dm_bt_bt_hw_counters_monitor(hw);
+ rtl8821ae_dm_bt_bt_enable_disable_check(hw);
+
+ if (rtlpcipriv->btcoexist.bt_ant_num == ANT_X2) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], 2 Ant mechanism\n"));
+ _rtl8821ae_dm_bt_coexist_2_ant(hw);
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], 1 Ant mechanism\n"));
+ _rtl8821ae_dm_bt_coexist_1_ant(hw);
+ }
+
+ if (!rtl8821ae_dm_bt_is_same_coexist_state(hw)) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("[BTCoex], Coexist State[bitMap] change from 0x%x%8x to 0x%x%8x\n",
+ rtlpcipriv->btcoexist.previous_state_h,
+ rtlpcipriv->btcoexist.previous_state,
+ rtlpcipriv->btcoexist.current_state_h,
+ rtlpcipriv->btcoexist.current_state));
+ rtlpcipriv->btcoexist.previous_state
+ = rtlpcipriv->btcoexist.current_state;
+ rtlpcipriv->btcoexist.previous_state_h
+ = rtlpcipriv->btcoexist.current_state_h;
+ }
+}
+
+void rtl8821ae_dm_bt_parse_bt_info(struct ieee80211_hw *hw, u8 * tmp_buf, u8 len)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ u8 bt_info;
+ u8 i;
+
+ hal_coex_8821ae.b_c2h_bt_info_req_sent = false;
+ hal_coex_8821ae.bt_retry_cnt = 0;
+ for (i = 0; i < len; i++) {
+ if (i == 0) {
+ hal_coex_8821ae.c2h_bt_info_original = tmp_buf[i];
+ } else if (i == 1) {
+ hal_coex_8821ae.bt_retry_cnt = tmp_buf[i];
+ }
+ if(i == len-1) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("0x%2x]", tmp_buf[i]));
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("0x%2x, ", tmp_buf[i]));
+ }
+ }
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,
+ ("BT info bt_info (Data)= 0x%x\n",hal_coex_8821ae.c2h_bt_info_original));
+ bt_info = hal_coex_8821ae.c2h_bt_info_original;
+
+ if(bt_info & BIT(2)){
+ hal_coex_8821ae.b_c2h_bt_inquiry_page = true;
+ } else {
+ hal_coex_8821ae.b_c2h_bt_inquiry_page = false;
+ }
+
+ if (bt_info & BTINFO_B_CONNECTION) {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTC2H], BTInfo: bConnect=true\n"));
+ rtlpcipriv->btcoexist.b_bt_busy = true;
+ rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE;
+ } else {
+ RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTC2H], BTInfo: bConnect=false\n"));
+ rtlpcipriv->btcoexist.b_bt_busy = false;
+ rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT_IDLE;
+ }
+}
+void rtl_8821ae_c2h_command_handle(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct c2h_evt_hdr c2h_event;
+ u8 * ptmp_buf = NULL;
+ u8 index = 0;
+ u8 u1b_tmp = 0;
+ memset(&c2h_event, 0, sizeof(c2h_event));
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL);
+ RT_TRACE(COMP_FW, DBG_DMESG,
+ ("&&&&&&: REG_C2HEVT_MSG_NORMAL is 0x%x\n", u1b_tmp));
+ c2h_event.cmd_id = u1b_tmp & 0xF;
+ c2h_event.cmd_len = (u1b_tmp & 0xF0) >> 4;
+ c2h_event.cmd_seq = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 1);
+ RT_TRACE(COMP_FW, DBG_DMESG, ("cmd_id: %d, cmd_len: %d, cmd_seq: %d\n",
+ c2h_event.cmd_id , c2h_event.cmd_len, c2h_event.cmd_seq));
+ u1b_tmp = rtl_read_byte(rtlpriv, 0x01AF);
+ if (u1b_tmp == C2H_EVT_HOST_CLOSE) {
+ return;
+ } else if (u1b_tmp != C2H_EVT_FW_CLOSE) {
+ rtl_write_byte(rtlpriv, 0x1AF, 0x00);
+ return;
+ }
+ ptmp_buf = kmalloc(c2h_event.cmd_len, GFP_KERNEL);
+ if(ptmp_buf == NULL) {
+ RT_TRACE(COMP_FW, DBG_TRACE, ("malloc cmd buf failed\n"));
+ return;
+ }
+
+ /* Read the content */
+ for (index = 0; index < c2h_event.cmd_len; index ++) {
+ ptmp_buf[index] = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 2+ index);
+ }
+
+ switch(c2h_event.cmd_id) {
+ case C2H_BT_RSSI:
+ break;
+
+ case C2H_BT_OP_MODE:
+ break;
+
+ case BT_INFO:
+ RT_TRACE(COMP_FW, DBG_TRACE,
+ ("BT info Byte[0] (ID) is 0x%x\n", c2h_event.cmd_id));
+ RT_TRACE(COMP_FW, DBG_TRACE,
+ ("BT info Byte[1] (Seq) is 0x%x\n", c2h_event.cmd_seq));
+ RT_TRACE(COMP_FW, DBG_TRACE,
+ ("BT info Byte[2] (Data)= 0x%x\n", ptmp_buf[0]));
+
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, ptmp_buf, c2h_event.cmd_len);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if(ptmp_buf)
+ kfree(ptmp_buf);
+
+ rtl_write_byte(rtlpriv, 0x01AF, C2H_EVT_HOST_CLOSE);
+}
+
+
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h
new file mode 100644
index 000000000000..a94474faca49
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h
@@ -0,0 +1,160 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_HAL_BTC_H__
+#define __RTL8821AE_HAL_BTC_H__
+
+#include "../wifi.h"
+#include "btc.h"
+#include "hal_bt_coexist.h"
+
+#define BT_TXRX_CNT_THRES_1 1200
+#define BT_TXRX_CNT_THRES_2 1400
+#define BT_TXRX_CNT_THRES_3 3000
+#define BT_TXRX_CNT_LEVEL_0 0 // < 1200
+#define BT_TXRX_CNT_LEVEL_1 1 // >= 1200 && < 1400
+#define BT_TXRX_CNT_LEVEL_2 2 // >= 1400
+#define BT_TXRX_CNT_LEVEL_3 3
+
+
+
+#define BT_COEX_DISABLE 0
+#define BT_Q_PKT_OFF 0
+#define BT_Q_PKT_ON 1
+
+#define BT_TX_PWR_OFF 0
+#define BT_TX_PWR_ON 1
+
+/* TDMA mode definition */
+#define TDMA_2ANT 0
+#define TDMA_1ANT 1
+#define TDMA_NAV_OFF 0
+#define TDMA_NAV_ON 1
+#define TDMA_DAC_SWING_OFF 0
+#define TDMA_DAC_SWING_ON 1
+
+/* PTA mode related definition */
+#define BT_PTA_MODE_OFF 0
+#define BT_PTA_MODE_ON 1
+
+/* Penalty Tx Rate Adaptive */
+#define BT_TX_RATE_ADAPTIVE_NORMAL 0
+#define BT_TX_RATE_ADAPTIVE_LOW_PENALTY 1
+
+/* RF Corner */
+#define BT_RF_RX_LPF_CORNER_RESUME 0
+#define BT_RF_RX_LPF_CORNER_SHRINK 1
+
+#define C2H_EVT_HOST_CLOSE 0x00
+#define C2H_EVT_FW_CLOSE 0xFF
+
+enum bt_traffic_mode {
+ BT_MOTOR_EXT_BE = 0x00,
+ BT_MOTOR_EXT_GUL = 0x01,
+ BT_MOTOR_EXT_GUB = 0x02,
+ BT_MOTOR_EXT_GULB = 0x03
+};
+
+enum bt_traffic_mode_profile {
+ BT_PROFILE_NONE,
+ BT_PROFILE_A2DP,
+ BT_PROFILE_PAN,
+ BT_PROFILE_HID,
+ BT_PROFILE_SCO
+};
+
+enum hci_ext_bt_operation {
+ HCI_BT_OP_NONE = 0x0,
+ HCI_BT_OP_INQUIRE_START = 0x1,
+ HCI_BT_OP_INQUIRE_FINISH = 0x2,
+ HCI_BT_OP_PAGING_START = 0x3,
+ HCI_BT_OP_PAGING_SUCCESS = 0x4,
+ HCI_BT_OP_PAGING_UNSUCCESS = 0x5,
+ HCI_BT_OP_PAIRING_START = 0x6,
+ HCI_BT_OP_PAIRING_FINISH = 0x7,
+ HCI_BT_OP_BT_DEV_ENABLE = 0x8,
+ HCI_BT_OP_BT_DEV_DISABLE = 0x9,
+ HCI_BT_OP_MAX,
+};
+
+enum bt_spec {
+ BT_SPEC_1_0_b = 0x00,
+ BT_SPEC_1_1 = 0x01,
+ BT_SPEC_1_2 = 0x02,
+ BT_SPEC_2_0_EDR = 0x03,
+ BT_SPEC_2_1_EDR = 0x04,
+ BT_SPEC_3_0_HS = 0x05,
+ BT_SPEC_4_0 = 0x06
+};
+
+struct c2h_evt_hdr {
+ u8 cmd_id;
+ u8 cmd_len;
+ u8 cmd_seq;
+};
+
+enum bt_state{
+ BT_INFO_STATE_DISABLED = 0,
+ BT_INFO_STATE_NO_CONNECTION = 1,
+ BT_INFO_STATE_CONNECT_IDLE = 2,
+ BT_INFO_STATE_INQ_OR_PAG = 3,
+ BT_INFO_STATE_ACL_ONLY_BUSY = 4,
+ BT_INFO_STATE_SCO_ONLY_BUSY = 5,
+ BT_INFO_STATE_ACL_SCO_BUSY = 6,
+ BT_INFO_STATE_HID_BUSY = 7,
+ BT_INFO_STATE_HID_SCO_BUSY = 8,
+ BT_INFO_STATE_MAX = 7
+};
+
+enum rtl8723be_c2h_evt {
+ C2H_DBG = 0,
+ C2H_TSF = 1,
+ C2H_AP_RPT_RSP = 2,
+ C2H_CCX_TX_RPT = 3, // The FW notify the report of the specific tx packet.
+ C2H_BT_RSSI = 4,
+ C2H_BT_OP_MODE = 5,
+ C2H_HW_INFO_EXCH = 10,
+ C2H_C2H_H2C_TEST = 11,
+ BT_INFO = 9,
+ MAX_C2HEVENT
+};
+
+
+
+void rtl8821ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_coexist(struct ieee80211_hw *hw);
+void rtl8821ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8821ae *p_btdm);
+void rtl_8821ae_c2h_command_handle(struct ieee80211_hw * hw);
+void rtl_8821ae_bt_wifi_media_status_notify(struct ieee80211_hw * hw, bool mstatus);
+void rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps(struct ieee80211_hw *hw);
+
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hw.c b/drivers/staging/rtl8821ae/rtl8821ae/hw.c
new file mode 100644
index 000000000000..1b8583b689d4
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hw.c
@@ -0,0 +1,3347 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+#include "led.h"
+#include "hw.h"
+#include "pwrseqcmd.h"
+#include "pwrseq.h"
+#include "btc.h"
+#include "../btcoexist/rtl_btc.h"
+
+#define LLT_CONFIG 5
+
+static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+ struct sk_buff *skb = __skb_dequeue(&ring->queue);
+
+ pci_unmap_single(rtlpci->pdev,
+ le32_to_cpu(rtlpriv->cfg->ops->get_desc(
+ (u8 *) entry, true, HW_DESC_TXBUFF_ADDR)),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+
+}
+
+static void _rtl8821ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+ u8 set_bits, u8 clear_bits)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpci->reg_bcn_ctrl_val |= set_bits;
+ rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
+
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+}
+
+void _rtl8821ae_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte &= ~(BIT(0));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+void _rtl8821ae_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte |= BIT(0);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl8821ae_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(1));
+}
+
+static void _rtl8821ae_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(1), 0);
+}
+
+static void _rtl8821ae_set_fw_clock_on(struct ieee80211_hw *hw,
+ u8 rpwm_val, bool b_need_turn_off_ckk)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool b_support_remote_wake_up;
+ u32 count = 0,isr_regaddr,content;
+ bool b_schedule_timer = b_need_turn_off_ckk;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
+ (u8 *) (&b_support_remote_wake_up));
+
+ if (!rtlhal->bfw_ready)
+ return;
+ if (!rtlpriv->psc.b_fw_current_inpsmode)
+ return;
+
+ while (1) {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (rtlhal->bfw_clk_change_in_progress) {
+ while (rtlhal->bfw_clk_change_in_progress) {
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ count++;
+ udelay(100);
+ if (count > 1000)
+ return;
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ }
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ } else {
+ rtlhal->bfw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ break;
+ }
+ }
+
+ if (IS_IN_LOW_POWER_STATE_8821AE(rtlhal->fw_ps_state)) {
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
+ (u8 *) (&rpwm_val));
+ if (FW_PS_IS_ACK(rpwm_val)) {
+ isr_regaddr = REG_HISR;
+ content = rtl_read_dword(rtlpriv, isr_regaddr);
+ while (!(content & IMR_CPWM) && (count < 500)) {
+ udelay(50);
+ count++;
+ content = rtl_read_dword(rtlpriv, isr_regaddr);
+ }
+
+ if (content & IMR_CPWM) {
+ rtl_write_word(rtlpriv,isr_regaddr, 0x0100);
+ rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_8821AE;
+ RT_TRACE(COMP_POWER, DBG_LOUD, ("Receive CPWM INT!!! Set pHalData->FwPSState = %X\n", rtlhal->fw_ps_state));
+ }
+ }
+
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->bfw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (b_schedule_timer) {
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ }
+
+ } else {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->bfw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ }
+
+
+}
+
+static void _rtl8821ae_set_fw_clock_off(struct ieee80211_hw *hw,
+ u8 rpwm_val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ enum rf_pwrstate rtstate;
+ bool b_schedule_timer = false;
+ u8 queue;
+
+ if (!rtlhal->bfw_ready)
+ return;
+ if (!rtlpriv->psc.b_fw_current_inpsmode)
+ return;
+ if (!rtlhal->ballow_sw_to_change_hwclc)
+ return;
+ rtlpriv->cfg->ops->get_hw_reg(hw,HW_VAR_RF_STATE,(u8 *)(&rtstate));
+ if (rtstate == ERFOFF ||rtlpriv->psc.inactive_pwrstate ==ERFOFF)
+ return;
+
+ for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
+ ring = &rtlpci->tx_ring[queue];
+ if (skb_queue_len(&ring->queue)) {
+ b_schedule_timer = true;
+ break;
+ }
+ }
+
+ if (b_schedule_timer) {
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ return;
+ }
+
+ if (FW_PS_STATE(rtlhal->fw_ps_state) != FW_PS_STATE_RF_OFF_LOW_PWR_8821AE) {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (!rtlhal->bfw_clk_change_in_progress) {
+ rtlhal->bfw_clk_change_in_progress = true;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
+ rtl_write_word(rtlpriv, REG_HISR, 0x0100);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+ (u8 *) (&rpwm_val));
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->bfw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ } else {
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ }
+ }
+
+}
+
+static void _rtl8821ae_set_fw_ps_rf_on(struct ieee80211_hw *hw)
+{
+ u8 rpwm_val = 0;
+ rpwm_val |= (FW_PS_STATE_RF_OFF_8821AE | FW_PS_ACK);
+ _rtl8821ae_set_fw_clock_on(hw, rpwm_val, true);
+}
+
+static void _rtl8821ae_fwlps_leave(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool b_fw_current_inps = false;
+ u8 rpwm_val = 0,fw_pwrmode = FW_PS_ACTIVE_MODE;
+
+ if (ppsc->b_low_power_enable){
+ rpwm_val = (FW_PS_STATE_ALL_ON_8821AE|FW_PS_ACK);/* RF on */
+ _rtl8821ae_set_fw_clock_on(hw, rpwm_val, false);
+ rtlhal->ballow_sw_to_change_hwclc = false;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ (u8 *) (&fw_pwrmode));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inps));
+ } else {
+ rpwm_val = FW_PS_STATE_ALL_ON_8821AE; /* RF on */
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+ (u8 *) (&rpwm_val));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ (u8 *) (&fw_pwrmode));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inps));
+ }
+
+}
+
+static void _rtl8821ae_fwlps_enter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool b_fw_current_inps = true;
+ u8 rpwm_val;
+
+ if (ppsc->b_low_power_enable){
+ rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_8821AE; /* RF off */
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inps));
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_PWRMODE,
+ (u8 *) (&ppsc->fwctrl_psmode));
+ rtlhal->ballow_sw_to_change_hwclc = true;
+ _rtl8821ae_set_fw_clock_off(hw, rpwm_val);
+
+
+ } else {
+ rpwm_val = FW_PS_STATE_RF_OFF_8821AE; /* RF off */
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&b_fw_current_inps));
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_PWRMODE,
+ (u8 *) (&ppsc->fwctrl_psmode));
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_SET_RPWM,
+ (u8 *) (&rpwm_val));
+ }
+
+}
+
+void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:
+ *((u32 *)(val)) = rtl_read_dword(rtlpriv, REG_MACID);
+ *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_MACID + 4);
+ break;
+ case HW_VAR_BSSID:
+ *((u32 *)(val)) = rtl_read_dword(rtlpriv, REG_BSSID);
+ *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_BSSID+4);
+ break;
+ case HW_VAR_MEDIA_STATUS:
+ val[0] = rtl_read_byte(rtlpriv, REG_CR+2) & 0x3;
+ break;
+ case HW_VAR_SLOT_TIME:
+ *((u8 *)(val)) = mac->slot_time;
+ break;
+ case HW_VAR_BEACON_INTERVAL:
+ *((u16 *)(val)) = rtl_read_word(rtlpriv, REG_BCN_INTERVAL);
+ break;
+ case HW_VAR_ATIM_WINDOW:
+ *((u16 *)(val)) = rtl_read_word(rtlpriv, REG_ATIMWND);
+ break;
+ case HW_VAR_RCR:
+ *((u32 *) (val)) = rtlpci->receive_config;
+ break;
+ case HW_VAR_RF_STATE:
+ *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+ break;
+ case HW_VAR_FWLPS_RF_ON:{
+ enum rf_pwrstate rfState;
+ u32 val_rcr;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw,
+ HW_VAR_RF_STATE,
+ (u8 *) (&rfState));
+ if (rfState == ERFOFF) {
+ *((bool *) (val)) = true;
+ } else {
+ val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ val_rcr &= 0x00070000;
+ if (val_rcr)
+ *((bool *) (val)) = false;
+ else
+ *((bool *) (val)) = true;
+ }
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ *((bool *) (val)) = ppsc->b_fw_current_inpsmode;
+ break;
+ case HW_VAR_CORRECT_TSF:{
+ u64 tsf;
+ u32 *ptsf_low = (u32 *) & tsf;
+ u32 *ptsf_high = ((u32 *) & tsf) + 1;
+
+ *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+ *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+ *((u64 *) (val)) = tsf;
+
+ break;
+ }
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process %x\n",variable));
+ break;
+ }
+}
+
+
+void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 idx;
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:{
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_MACID + idx),
+ val[idx]);
+ }
+ break;
+ }
+ case HW_VAR_BASIC_RATE:{
+ u16 b_rate_cfg = ((u16 *) val)[0];
+ u8 rate_index = 0;
+ b_rate_cfg = b_rate_cfg & 0x15f;
+ b_rate_cfg |= 0x01;
+ rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff);
+ rtl_write_byte(rtlpriv, REG_RRSR + 1,
+ (b_rate_cfg >> 8) & 0xff);
+ while (b_rate_cfg > 0x1) {
+ b_rate_cfg = (b_rate_cfg >> 1);
+ rate_index++;
+ }
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+ rate_index);
+ break;
+ }
+ case HW_VAR_BSSID:{
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+ val[idx]);
+ }
+ break;
+ }
+ case HW_VAR_SIFS:{
+ rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
+
+ rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+
+ if (!mac->ht_enable)
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ 0x0e0e);
+ else
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ *((u16 *) val));
+ break;
+ }
+ case HW_VAR_SLOT_TIME:{
+ u8 e_aci;
+
+ RT_TRACE(COMP_MLME, DBG_LOUD,
+ ("HW_VAR_SLOT_TIME %x\n", val[0]));
+
+ rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ (u8 *) (&e_aci));
+ }
+ break;
+ }
+ case HW_VAR_ACK_PREAMBLE:{
+ u8 reg_tmp;
+ u8 short_preamble = (bool) (*(u8 *) val);
+ reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2);
+ if (short_preamble){
+ reg_tmp |= BIT(1);
+ rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+ } else {
+ reg_tmp &= (~BIT(1));
+ rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+ }
+ break;
+ }
+ case HW_VAR_WPA_CONFIG:
+ rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ break;
+ case HW_VAR_AMPDU_MIN_SPACE:{
+ u8 min_spacing_to_set;
+ u8 sec_min_space;
+
+ min_spacing_to_set = *((u8 *) val);
+ if (min_spacing_to_set <= 7) {
+ sec_min_space = 0;
+
+ if (min_spacing_to_set < sec_min_space)
+ min_spacing_to_set = sec_min_space;
+
+ mac->min_space_cfg = ((mac->min_space_cfg &
+ 0xf8) |
+ min_spacing_to_set);
+
+ *val = min_spacing_to_set;
+
+ RT_TRACE(COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg));
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ }
+ break;
+ }
+ case HW_VAR_SHORTGI_DENSITY:{
+ u8 density_to_set;
+
+ density_to_set = *((u8 *) val);
+ mac->min_space_cfg |= (density_to_set << 3);
+
+ RT_TRACE(COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg));
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+
+ break;
+ }
+ case HW_VAR_AMPDU_FACTOR:{
+ u32 ampdu_len = (*((u8 *)val));
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ if(ampdu_len < VHT_AGG_SIZE_128K)
+ ampdu_len = (0x2000 << (*((u8 *)val))) -1;
+ else
+ ampdu_len = 0x1ffff;
+ } else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ if(ampdu_len < HT_AGG_SIZE_64K)
+ ampdu_len = (0x2000 << (*((u8 *)val))) -1;
+ else
+ ampdu_len = 0xffff;
+ }
+ ampdu_len |= BIT(31);
+
+ rtl_write_dword(rtlpriv,
+ REG_AMPDU_MAX_LENGTH_8812, ampdu_len);
+ break;
+ }
+ case HW_VAR_AC_PARAM:{
+ u8 e_aci = *((u8 *) val);
+ rtl8821ae_dm_init_edca_turbo(hw);
+
+ if (rtlpci->acm_method != eAcmWay2_SW)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_ACM_CTRL,
+ (u8 *) (&e_aci));
+ break;
+ }
+ case HW_VAR_ACM_CTRL:{
+ u8 e_aci = *((u8 *) val);
+ union aci_aifsn *p_aci_aifsn =
+ (union aci_aifsn *)(&(mac->ac[0].aifs));
+ u8 acm = p_aci_aifsn->f.acm;
+ u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+ acm_ctrl =
+ acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
+
+ if (acm) {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl |= AcmHw_BeqEn;
+ break;
+ case AC2_VI:
+ acm_ctrl |= AcmHw_ViqEn;
+ break;
+ case AC3_VO:
+ acm_ctrl |= AcmHw_VoqEn;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("HW_VAR_ACM_CTRL acm set "
+ "failed: eACI is %d\n", acm));
+ break;
+ }
+ } else {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl &= (~AcmHw_BeqEn);
+ break;
+ case AC2_VI:
+ acm_ctrl &= (~AcmHw_ViqEn);
+ break;
+ case AC3_VO:
+ acm_ctrl &= (~AcmHw_BeqEn);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ }
+
+ RT_TRACE(COMP_QOS, DBG_TRACE,
+ ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+ "Write 0x%X\n", acm_ctrl));
+ rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+ break;
+ }
+ case HW_VAR_RCR:{
+ rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
+ rtlpci->receive_config = ((u32 *) (val))[0];
+ break;
+ }
+ case HW_VAR_RETRY_LIMIT:{
+ u8 retry_limit = ((u8 *) (val))[0];
+
+ rtl_write_word(rtlpriv, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+ retry_limit << RETRY_LIMIT_LONG_SHIFT);
+ break;
+ }
+ case HW_VAR_DUAL_TSF_RST:
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ rtlefuse->efuse_usedbytes = *((u16 *) val);
+ break;
+ case HW_VAR_EFUSE_USAGE:
+ rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ break;
+ case HW_VAR_IO_CMD:
+ rtl8821ae_phy_set_io_cmd(hw, (*(enum io_type *)val));
+ break;
+ case HW_VAR_SET_RPWM:{
+ u8 rpwm_val;
+
+ rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
+ udelay(1);
+
+ if (rpwm_val & BIT(7)) {
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
+ (*(u8 *) val));
+ } else {
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
+ ((*(u8 *) val) | BIT(7)));
+ }
+
+ break;
+ }
+ case HW_VAR_H2C_FW_PWRMODE:{
+ rtl8821ae_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ ppsc->b_fw_current_inpsmode = *((bool *) val);
+ break;
+
+ case HW_VAR_RESUME_CLK_ON:
+ _rtl8821ae_set_fw_ps_rf_on(hw);
+ break;
+
+ case HW_VAR_FW_LPS_ACTION:{
+ bool b_enter_fwlps = *((bool *) val);
+
+ if (b_enter_fwlps)
+ _rtl8821ae_fwlps_enter(hw);
+ else
+ _rtl8821ae_fwlps_leave(hw);
+
+ break;
+ }
+
+ case HW_VAR_H2C_FW_JOINBSSRPT:{
+ u8 mstatus = (*(u8 *) val);
+ u8 tmp_regcr, tmp_reg422,bcnvalid_reg;
+ u8 count = 0, dlbcn_count = 0;
+ bool b_recover = false;
+
+ if (mstatus == RT_MEDIA_CONNECT) {
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID,
+ NULL);
+
+ tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr | BIT(0)));
+
+ _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(3));
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(4), 0);
+
+ tmp_reg422 =
+ rtl_read_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 & (~BIT(6)));
+ if (tmp_reg422 & BIT(6))
+ b_recover = true;
+
+ do {
+ bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2);
+ rtl_write_byte(rtlpriv, REG_TDECTRL+2,(bcnvalid_reg | BIT(0)));
+ _rtl8821ae_return_beacon_queue_skb(hw);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_set_fw_rsvdpagepkt(hw, 0);
+ else
+ rtl8821ae_set_fw_rsvdpagepkt(hw, 0);
+ bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2);
+ count = 0;
+ while (!(bcnvalid_reg & BIT(0)) && count <20){
+ count++;
+ udelay(10);
+ bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2);
+ }
+ dlbcn_count++;
+ } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count <5);
+
+ if (bcnvalid_reg & BIT(0))
+ rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0));
+
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
+ _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(4));
+
+ if (b_recover) {
+ rtl_write_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422);
+ }
+
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr & ~(BIT(0))));
+ }
+ rtl8821ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+
+ break;
+ }
+ case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:{
+ rtl8821ae_set_p2p_ps_offload_cmd(hw, (*(u8 *) val));
+ break;
+ }
+
+ case HW_VAR_AID:{
+ u16 u2btmp;
+ u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+ u2btmp &= 0xC000;
+ rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
+ mac->assoc_id));
+
+ break;
+ }
+ case HW_VAR_CORRECT_TSF:{
+ u8 btype_ibss = ((u8 *) (val))[0];
+
+ if (btype_ibss == true)
+ _rtl8821ae_stop_tx_beacon(hw);
+
+ _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(3));
+
+ rtl_write_dword(rtlpriv, REG_TSFTR,
+ (u32) (mac->tsf & 0xffffffff));
+ rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+ (u32) ((mac->tsf >> 32) & 0xffffffff));
+
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
+
+ if (btype_ibss == true)
+ _rtl8821ae_resume_tx_beacon(hw);
+
+ break;
+
+ }
+ case HW_VAR_NAV_UPPER: {
+ u32 us_nav_upper = ((u32)*val);
+
+ if(us_nav_upper > HAL_92C_NAV_UPPER_UNIT * 0xFF)
+ {
+ RT_TRACE(COMP_INIT , DBG_WARNING,
+ ("The setting value (0x%08X us) of NAV_UPPER"
+ " is larger than (%d * 0xFF)!!!\n",
+ us_nav_upper, HAL_92C_NAV_UPPER_UNIT));
+ break;
+ }
+ rtl_write_byte(rtlpriv, REG_NAV_UPPER,
+ ((u8)((us_nav_upper + HAL_92C_NAV_UPPER_UNIT - 1) / HAL_92C_NAV_UPPER_UNIT)));
+ break;
+ }
+ case HW_VAR_KEEP_ALIVE: {
+ u8 array[2];
+ array[0] = 0xff;
+ array[1] = *((u8 *)val);
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_KEEP_ALIVE_CTRL, 2, array);
+ }
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case "
+ "not process %x\n",variable));
+ break;
+ }
+}
+
+static bool _rtl8821ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool status = true;
+ long count = 0;
+ u32 value = _LLT_INIT_ADDR(address) |
+ _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+ rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+
+ do {
+ value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+ break;
+
+ if (count > POLLING_LLT_THRESHOLD) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Failed to polling write LLT done at "
+ "address %d!\n", address));
+ status = false;
+ break;
+ }
+ } while (++count);
+
+ return status;
+}
+
+static bool _rtl8821ae_llt_table_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned short i;
+ u8 txpktbuf_bndy;
+ u8 maxPage;
+ bool status;
+
+ maxPage = 255;
+ txpktbuf_bndy = 0xF8;
+
+
+ rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
+ rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, MAX_RX_DMA_BUFFER_SIZE - 1);
+
+ rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, REG_PBP, 0x31);
+ rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
+
+ for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+ status = _rtl8821ae_llt_write(hw, i, i + 1);
+ if (true != status)
+ return status;
+ }
+
+ status = _rtl8821ae_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+ if (true != status)
+ return status;
+
+ for (i = txpktbuf_bndy; i < maxPage; i++) {
+ status = _rtl8821ae_llt_write(hw, i, (i + 1));
+ if (true != status)
+ return status;
+ }
+
+ status = _rtl8821ae_llt_write(hw, maxPage, txpktbuf_bndy);
+ if (true != status)
+ return status;
+
+ rtl_write_dword(rtlpriv, REG_RQPN, 0x80e70808);
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x00);
+
+ return true;
+}
+
+static void _rtl8821ae_gen_refresh_led_state(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlpriv->rtlhal.up_first_time)
+ return;
+
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_sw_led_on(hw, pLed0);
+ else
+ rtl8821ae_sw_led_on(hw, pLed0);
+ else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_sw_led_on(hw, pLed0);
+ else
+ rtl8821ae_sw_led_on(hw, pLed0);
+ else
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_sw_led_off(hw, pLed0);
+ else
+ rtl8821ae_sw_led_off(hw, pLed0);
+}
+
+static bool _rtl8821ae_init_mac(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u8 bytetmp = 0;
+ u16 wordtmp = 0;
+ bool b_mac_func_enable = rtlhal->b_mac_func_enable;
+
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+
+ /*Auto Power Down to CHIP-off State*/
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7));
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ /* HW Power on sequence*/
+ if(!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8812_NIC_ENABLE_FLOW)) {
+ RT_TRACE(COMP_INIT,DBG_LOUD,("init 8812 MAC Fail as power on failure\n"));
+ return false;
+ }
+ } else {
+ /* HW Power on sequence */
+ if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_A_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8821A_NIC_ENABLE_FLOW)){
+ RT_TRACE(COMP_INIT,DBG_LOUD,("init 8821 MAC Fail as power on failure\n"));
+ return false;
+ }
+ }
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4);
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_CR);
+ bytetmp = 0xff;
+ rtl_write_byte(rtlpriv, REG_CR, bytetmp);
+ mdelay(2);
+
+ bytetmp |= 0x7f;
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, bytetmp);
+ mdelay(2);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CFG + 3);
+ if (bytetmp & BIT(0)) {
+ bytetmp = rtl_read_byte(rtlpriv, 0x7c);
+ bytetmp |= BIT(6);
+ rtl_write_byte(rtlpriv, 0x7c, bytetmp);
+ }
+ }
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1);
+ bytetmp &= ~BIT(4);
+ rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp);
+
+ rtl_write_word(rtlpriv, REG_CR, 0x2ff);
+
+ if (!b_mac_func_enable) {
+ if (!_rtl8821ae_llt_table_init(hw))
+ return false;
+ }
+
+ rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
+ rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff);
+
+ /* Enable FW Beamformer Interrupt */
+ bytetmp = rtl_read_byte(rtlpriv, REG_FWIMR + 3);
+ rtl_write_byte(rtlpriv, REG_FWIMR + 3, bytetmp | BIT(6));
+
+ wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
+ wordtmp &= 0xf;
+ wordtmp |= 0xF5B1;
+ rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
+
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+ rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF);
+ /*low address*/
+ rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
+ rtlpci->tx_ring[BEACON_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_MGQ_DESA,
+ rtlpci->tx_ring[MGNT_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_VOQ_DESA,
+ rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_VIQ_DESA,
+ rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_BEQ_DESA,
+ rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_BKQ_DESA,
+ rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_HQ_DESA,
+ rtlpci->tx_ring[HIGH_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_RX_DESA,
+ rtlpci->rx_ring[RX_MPDU_QUEUE].dma & DMA_BIT_MASK(32));
+
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x77);
+
+ rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
+
+ rtl_write_byte(rtlpriv, REG_SECONDARY_CCA_CTRL, 0x3);
+ _rtl8821ae_gen_refresh_led_state(hw);
+
+ return true;
+}
+
+static void _rtl8821ae_hw_configure(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u32 reg_rrsr;
+
+ reg_rrsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+
+ rtl_write_dword(rtlpriv, REG_RRSR, reg_rrsr);
+ /* ARFB table 9 for 11ac 5G 2SS */
+ rtl_write_dword(rtlpriv, REG_ARFR0 + 4, 0xfffff000);
+ /* ARFB table 10 for 11ac 5G 1SS */
+ rtl_write_dword(rtlpriv, REG_ARFR1 + 4, 0x003ff000);
+ /* ARFB table 11 for 11ac 24G 1SS */
+ rtl_write_dword(rtlpriv, REG_ARFR2, 0x00000015);
+ rtl_write_dword(rtlpriv, REG_ARFR2 + 4, 0x003ff000);
+ /* ARFB table 12 for 11ac 24G 1SS */
+ rtl_write_dword(rtlpriv, REG_ARFR3, 0x00000015);
+ rtl_write_dword(rtlpriv, REG_ARFR3 + 4, 0xffcff000);
+ /* 0x420[7] = 0 , enable retry AMPDU in new AMPD not signal MPDU. */
+ rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F00);
+ rtl_write_byte(rtlpriv, REG_AMPDU_MAX_TIME, 0x70);
+
+ /*Set retry limit*/
+ rtl_write_word(rtlpriv, REG_RL, 0x0707);
+
+
+ /* Set Data / Response auto rate fallack retry count*/
+ rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000);
+ rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504);
+ rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
+ rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
+
+ rtlpci->reg_bcn_ctrl_val = 0x1d;
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val);
+
+ /* TBTT prohibit hold time. Suggested by designer TimChen. */
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1,0xff); // 8 ms
+
+ /* AGGR_BK_TIME Reg51A 0x16 */
+ rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
+
+ /*For Rx TP. Suggested by SD1 Richard. Added by tynli. 2010.04.12.*/
+ rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666);
+
+ rtl_write_byte(rtlpriv, REG_HT_SINGLE_AMPDU, 0x80);
+ rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20);
+ rtl_write_word(rtlpriv, REG_MAX_AGGR_NUM, 0x1F1F);
+}
+
+static u16 _rtl8821ae_mdio_read(struct rtl_priv *rtlpriv, u8 addr)
+{
+ u16 ret = 0;
+ u8 tmp = 0, count = 0;
+
+ rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(6));
+ tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6) ;
+ count = 0;
+ while (tmp && count < 20) {
+ udelay(10);
+ tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6);
+ count++;
+ }
+ if (0 == tmp)
+ ret = rtl_read_word(rtlpriv, REG_MDIO_RDATA);
+
+ return ret;
+}
+
+void _rtl8821ae_mdio_write(struct rtl_priv *rtlpriv, u8 addr, u16 data)
+{
+ u8 tmp = 0, count = 0;
+
+ rtl_write_word(rtlpriv, REG_MDIO_WDATA, data);
+ rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(5));
+ tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5) ;
+ count = 0;
+ while (tmp && count < 20) {
+ udelay(10);
+ tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5);
+ count++;
+ }
+}
+
+static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
+{
+ u16 read_addr = addr & 0xfffc;
+ u8 tmp = 0, count = 0, ret = 0;
+
+ rtl_write_word(rtlpriv, REG_DBI_ADDR, read_addr);
+ rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x2);
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+ count = 0;
+ while (tmp && count < 20) {
+ udelay(10);
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+ count++;
+ }
+ if (0 == tmp) {
+ read_addr = REG_DBI_RDATA + addr % 4;
+ ret = rtl_read_word(rtlpriv, read_addr);
+ }
+ return ret;
+}
+
+void _rtl8821ae_dbi_write(struct rtl_priv *rtlpriv, u16 addr, u8 data)
+{
+ u8 tmp = 0, count = 0;
+ u16 wrtie_addr, remainder = addr % 4;
+
+ wrtie_addr = REG_DBI_WDATA + remainder;
+ rtl_write_byte(rtlpriv, wrtie_addr, data);
+
+ wrtie_addr = (addr & 0xfffc) | (BIT(0) << (remainder + 12));
+ rtl_write_word(rtlpriv, REG_DBI_ADDR, wrtie_addr);
+
+ rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x1);
+
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+ count = 0;
+ while (tmp && count < 20) {
+ udelay(10);
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG);
+ count++;
+ }
+
+}
+
+static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ if (_rtl8821ae_mdio_read(rtlpriv, 0x04) != 0x8544)
+ _rtl8821ae_mdio_write(rtlpriv, 0x04, 0x8544);
+
+ if (_rtl8821ae_mdio_read(rtlpriv, 0x0b) != 0x0070)
+ _rtl8821ae_mdio_write(rtlpriv, 0x0b, 0x0070);
+ }
+
+ tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
+ _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7));
+
+ tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
+ _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ tmp = _rtl8821ae_dbi_read(rtlpriv, 0x718);
+ _rtl8821ae_dbi_write(rtlpriv, 0x718, tmp|BIT(4));
+ }
+}
+
+void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 sec_reg_value;
+ u8 tmp;
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm));
+
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("not open hw encryption\n"));
+ return;
+ }
+
+ sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
+
+ if (rtlpriv->sec.use_defaultkey) {
+ sec_reg_value |= SCR_TxUseDK;
+ sec_reg_value |= SCR_RxUseDK;
+ }
+
+ sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+
+ tmp = rtl_read_byte(rtlpriv, REG_CR + 1);
+ rtl_write_byte(rtlpriv, REG_CR + 1, tmp | BIT(1));
+
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("The SECR-value %x \n", sec_reg_value));
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+
+}
+
+#if 0
+bool _rtl8821ae_check_pcie_dma_hang(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp;
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL+3);
+ if (!(tmp&BIT(2))) {
+ rtl_write_byte(rtlpriv, REG_DBI_CTRL+3, tmp|BIT(2));
+ mdelay(100);
+ }
+
+ tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL+3);
+ if (tmp&BIT(0) || tmp&BIT(1)) {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("rtl8821ae_check_pcie_dma_hang(): TRUE! Reset PCIE DMA!\n"));
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void _rtl8821ae_reset_pcie_interface_dma(struct ieee80211_hw *hw,
+ bool mac_power_on, bool watch_dog)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp;
+ bool release_mac_rx_pause;
+ u8 backup_pcie_dma_pause;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("_rtl8821ae_reset_pcie_interface_dma()\n"));
+
+ tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL);
+ tmp &= ~BIT(1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, tmp);
+ tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2);
+ tmp |= BIT2;
+ rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp);
+
+ tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
+ if (tmp & BIT(2)) {
+ release_mac_rx_pause = false;
+ } else {
+ rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, tmp | BIT(2));
+ release_mac_rx_pause = true;
+ }
+ backup_pcie_dma_pause = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+1);
+ if (backup_pcie_dma_pause != 0xFF)
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0xFF);
+
+ if (mac_power_on)
+ rtl_write_byte(rtlpriv, REG_CR, 0);
+
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+ tmp &= ~BIT(0);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, tmp);
+
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+ tmp |= ~BIT(0);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, tmp);
+
+ if (mac_power_on)
+ rtl_write_byte(rtlpriv, REG_CR, 0xFF);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL+2);
+ tmp |= BIT1;
+ rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL+2, tmp);
+
+ if (watch_dog) {
+ u32 rqpn = 0;
+ u32 rqpn_npq = 0;
+ u8 tx_page_boundary = _RQPN_Init_8812E(Adapter, &rqpn_npq, &rqpn);
+
+ if(LLT_table_init_8812(Adapter, TX_PAGE_BOUNDARY, RQPN, RQPN_NPQ) == RT_STATUS_FAILURE)
+ return false;
+
+ PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
+ PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK);
+
+ // <1> Reset Tx descriptor
+ Adapter->HalFunc.ResetTxDescHandler(Adapter,Adapter->NumTxDesc);
+
+ // <2> Reset Rx descriptor
+ Adapter->HalFunc.ResetRxDescHandler(Adapter,Adapter->NumRxDesc);
+
+ // <3> Reset RFDs
+ FreeRFDs( Adapter, TRUE);
+
+ // <4> Reset TCBs
+ FreeTCBs( Adapter, TRUE);
+
+ // We should set all Rx desc own bit to 1 to prevent from RDU after enable Rx DMA. 2013.02.18, by tynli.
+ PrepareAllRxDescBuffer(Adapter);
+
+ PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
+ PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
+
+ //
+ // Initialize TRx DMA address.
+ //
+ // Because set 0x100 to 0x0 will cause the Rx descriptor address 0x340 be cleared to zero on 88EE,
+ // we should re-initialize Rx desc. address before enable DMA. 2012.11.07. by tynli.
+ InitTRxDescHwAddress8812AE(Adapter);
+ }
+
+ // In MAC power on state, BB and RF maybe in ON state, if we release TRx DMA here
+ // it will cause packets to be started to Tx/Rx, so we release Tx/Rx DMA later.
+ if(!bInMACPowerOn || bInWatchDog)
+ {
+ // 8. release TRX DMA
+ //write 0x284 bit[18] = 1'b0
+ //write 0x301 = 0x00
+ if(bReleaseMACRxPause)
+ {
+ u1Tmp = PlatformEFIORead1Byte(Adapter, REG_RXDMA_CONTROL);
+ PlatformEFIOWrite1Byte(Adapter, REG_RXDMA_CONTROL, (u1Tmp&~BIT2));
+ }
+ PlatformEFIOWrite1Byte(Adapter, REG_PCIE_CTRL_REG+1, BackUpPcieDMAPause);
+ }
+
+ if(IS_HARDWARE_TYPE_8821E(Adapter))
+ {
+ //9. lock system register
+ // write 0xCC bit[2] = 1'b0
+ u1Tmp = PlatformEFIORead1Byte(Adapter, REG_PMC_DBG_CTRL2_8723B);
+ u1Tmp &= ~(BIT2);
+ PlatformEFIOWrite1Byte(Adapter, REG_PMC_DBG_CTRL2_8723B, u1Tmp);
+ }
+
+ return RT_STATUS_SUCCESS;
+}
+#endif
+
+// Static MacID Mapping (cf. Used in MacIdDoStaticMapping) ----------
+#define MAC_ID_STATIC_FOR_DEFAULT_PORT 0
+#define MAC_ID_STATIC_FOR_BROADCAST_MULTICAST 1
+#define MAC_ID_STATIC_FOR_BT_CLIENT_START 2
+#define MAC_ID_STATIC_FOR_BT_CLIENT_END 3
+// -----------------------------------------------------------
+
+void rtl8821ae_macid_initialize_mediastatus(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 media_rpt[4] = {RT_MEDIA_CONNECT, 1, \
+ MAC_ID_STATIC_FOR_BROADCAST_MULTICAST, \
+ MAC_ID_STATIC_FOR_BT_CLIENT_END};
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, \
+ HW_VAR_H2C_FW_MEDIASTATUSRPT, media_rpt);
+
+ RT_TRACE(COMP_INIT,DBG_LOUD, \
+ ("Initialize MacId media status: from %d to %d\n", \
+ MAC_ID_STATIC_FOR_BROADCAST_MULTICAST, \
+ MAC_ID_STATIC_FOR_BT_CLIENT_END));
+}
+
+int rtl8821ae_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool rtstatus = true;
+ int err;
+ u8 tmp_u1b;
+ u32 nav_upper = WIFI_NAV_UPPER_US;
+
+ rtlpriv->rtlhal.being_init_adapter = true;
+ rtlpriv->intf_ops->disable_aspm(hw);
+
+ /*YP wowlan not considered*/
+
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_CR);
+ if (tmp_u1b!=0 && tmp_u1b != 0xEA) {
+ rtlhal->b_mac_func_enable = true;
+ RT_TRACE(COMP_INIT,DBG_LOUD,(" MAC has already power on.\n"));
+ } else {
+ rtlhal->b_mac_func_enable = false;
+ rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE;
+ }
+
+/* if (_rtl8821ae_check_pcie_dma_hang(hw)) {
+ _rtl8821ae_reset_pcie_interface_dma(hw,rtlhal->b_mac_func_enable,false);
+ rtlhal->b_mac_func_enable = false;
+ } */
+
+ rtstatus = _rtl8821ae_init_mac(hw);
+ if (rtstatus != true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Init MAC failed\n"));
+ err = 1;
+ return err;
+ }
+
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CFG);
+ tmp_u1b &= 0x7F;
+ rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b);
+
+ err = rtl8821ae_download_fw(hw, false);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("Failed to download FW. Init HW "
+ "without FW now..\n"));
+ err = 1;
+ rtlhal->bfw_ready = false;
+ return err;
+ } else {
+ rtlhal->bfw_ready = true;
+ }
+ rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE;
+ rtlhal->bfw_clk_change_in_progress = false;
+ rtlhal->ballow_sw_to_change_hwclc = false;
+ rtlhal->last_hmeboxnum = 0;
+
+ /*SIC_Init(Adapter);
+ if(pHalData->AMPDUBurstMode)
+ PlatformEFIOWrite1Byte(Adapter,REG_AMPDU_BURST_MODE_8812, 0x7F);*/
+
+ rtl8821ae_phy_mac_config(hw);
+ /* because last function modify RCR, so we update
+ * rcr var here, or TP will unstable for receive_config
+ * is wrong, RX RCR_ACRC32 will cause TP unstable & Rx
+ * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
+ rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
+ rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);*/
+ rtl8821ae_phy_bb_config(hw);
+
+ rtl8821ae_phy_rf_config(hw);
+
+ _rtl8821ae_hw_configure(hw);
+
+ rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_2_4G);
+
+ /*set wireless mode*/
+
+ rtlhal->b_mac_func_enable = true;
+
+ rtl_cam_reset_all_entry(hw);
+
+ rtl8821ae_enable_hw_security_config(hw);
+
+ ppsc->rfpwr_state = ERFON;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+ _rtl8821ae_enable_aspm_back_door(hw);
+ rtlpriv->intf_ops->enable_aspm(hw);
+
+ //rtl8821ae_bt_hw_init(hw);
+ rtlpriv->rtlhal.being_init_adapter = false;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_NAV_UPPER, (u8 *)&nav_upper);
+
+ //rtl8821ae_dm_check_txpower_tracking(hw);
+ //rtl8821ae_phy_lc_calibrate(hw);
+
+ /* Release Rx DMA*/
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
+ if (tmp_u1b & BIT(2)) {
+ /* Release Rx DMA if needed*/
+ tmp_u1b &= ~BIT(2);
+ rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, tmp_u1b);
+ }
+
+ /* Release Tx/Rx PCIE DMA if*/
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0);
+
+ rtl8821ae_dm_init(hw);
+ rtl8821ae_macid_initialize_mediastatus(hw);
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("rtl8821ae_hw_init() <====\n"));
+ return err;
+}
+
+static enum version_8821ae _rtl8821ae_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum version_8821ae version = VERSION_UNKNOWN;
+ u32 value32;
+
+ value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1);
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("ReadChipVersion8812A 0xF0 = 0x%x \n", value32));
+
+
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtlphy->rf_type = RF_2T2R;
+ else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ rtlphy->rf_type = RF_1T1R;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("RF_Type is %x!!\n", rtlphy->rf_type));
+
+
+ if (value32 & TRP_VAUX_EN)
+ {
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ if(rtlphy->rf_type == RF_2T2R)
+ version = VERSION_TEST_CHIP_2T2R_8812;
+ else
+ version = VERSION_TEST_CHIP_1T1R_8812;
+ }
+ else
+ version = VERSION_TEST_CHIP_8821;
+ } else {
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ u32 rtl_id = ((value32 & CHIP_VER_RTL_MASK) >> 12) +1 ;
+
+ if(rtlphy->rf_type == RF_2T2R)
+ version = (enum version_8821ae)(CHIP_8812 | NORMAL_CHIP | RF_TYPE_2T2R);
+ else
+ version = (enum version_8821ae)(CHIP_8812 | NORMAL_CHIP);
+
+ version = (enum version_8821ae)(version| (rtl_id << 12));
+ }
+ else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ {
+ u32 rtl_id = value32 & CHIP_VER_RTL_MASK;
+
+ version = (enum version_8821ae)(CHIP_8821 | NORMAL_CHIP | rtl_id);
+ }
+ }
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R"));
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ {
+ /*WL_HWROF_EN.*/
+ value32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
+ rtlphy->hw_rof_enable= ((value32 & WL_HWROF_EN) ? 1 : 0);
+ }
+
+ switch(version)
+ {
+ case VERSION_TEST_CHIP_1T1R_8812:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_TEST_CHIP_1T1R_8812.\n"));
+ break;
+ case VERSION_TEST_CHIP_2T2R_8812:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_TEST_CHIP_2T2R_8812.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_1T1R_8812:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_2T2R_8812:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812 C CUT.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812 C CUT.\n"));
+ break;
+ case VERSION_TEST_CHIP_8821:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_TEST_CHIP_8821.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_8821:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 A CUT.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_8821_B_CUT:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 B CUT.\n"));
+ break;
+ default:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Chip Version ID: Unknown (0x%X).\n", version));
+ break;
+ }
+
+ return version;
+}
+
+static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
+ enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+ bt_msr &= 0xfc;
+
+ rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
+ RT_TRACE(COMP_BEACON, DBG_LOUD,
+ ("clear 0x550 when set HW_VAR_MEDIA_STATUS\n"));
+
+ if (type == NL80211_IFTYPE_UNSPECIFIED ||
+ type == NL80211_IFTYPE_STATION) {
+ _rtl8821ae_stop_tx_beacon(hw);
+ _rtl8821ae_enable_bcn_sub_func(hw);
+ } else if (type == NL80211_IFTYPE_ADHOC ||
+ type == NL80211_IFTYPE_AP) {
+ _rtl8821ae_resume_tx_beacon(hw);
+ _rtl8821ae_disable_bcn_sub_func(hw);
+ } else {
+ RT_TRACE(COMP_ERR, DBG_WARNING,("Set HW_VAR_MEDIA_STATUS: "
+ "No such media status(%x).\n", type));
+ }
+
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ bt_msr |= MSR_NOLINK;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to NO LINK!\n"));
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ bt_msr |= MSR_ADHOC;
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to Ad Hoc!\n"));
+ break;
+ case NL80211_IFTYPE_STATION:
+ bt_msr |= MSR_INFRA;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to STA!\n"));
+ break;
+ case NL80211_IFTYPE_AP:
+ bt_msr |= MSR_AP;
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to AP!\n"));
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Network type %d not support!\n", type));
+ return 1;
+ break;
+
+ }
+
+ rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ rtlpriv->cfg->ops->led_control(hw, ledaction);
+ if ((bt_msr & ~0xfc) == MSR_AP)
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ else
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+
+ return 0;
+}
+
+void rtl8821ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u32 reg_rcr = rtlpci->receive_config;
+
+ if (rtlpriv->psc.rfpwr_state != ERFON)
+ return;
+
+ if (check_bssid == true) {
+ reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *) (&reg_rcr));
+ _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(4));
+ } else if (check_bssid == false) {
+ reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_RCR, (u8 *) (&reg_rcr));
+ }
+
+}
+
+int rtl8821ae_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("rtl8821ae_set_network_type!\n"));
+
+ if (_rtl8821ae_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+
+ if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+ if (type != NL80211_IFTYPE_AP)
+ rtl8821ae_set_check_bssid(hw, true);
+ } else {
+ rtl8821ae_set_check_bssid(hw, false);
+ }
+
+ return 0;
+}
+
+/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */
+void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ rtl8821ae_dm_init_edca_turbo(hw);
+ switch (aci) {
+ case AC1_BK:
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
+ break;
+ case AC0_BE:
+ /* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); */
+ break;
+ case AC2_VI:
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
+ break;
+ case AC3_VO:
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
+ break;
+ default:
+ RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+ break;
+ }
+}
+
+void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
+ /* there are some C2H CMDs have been sent before system interrupt is enabled, e.g., C2H, CPWM.
+ *So we need to clear all C2H events that FW has notified, otherwise FW won't schedule any commands anymore.
+ */
+ //rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0);
+ /*enable system interrupt*/
+ rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF);
+}
+
+void rtl8821ae_disable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED);
+ rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED);
+ rtlpci->irq_enabled = false;
+ synchronize_irq(rtlpci->pdev->irq);
+}
+
+static void _rtl8821ae_poweroff_adapter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 u1b_tmp;
+
+ rtlhal->b_mac_func_enable = false;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ /* Combo (PCIe + USB) Card and PCIe-MF Card */
+ /* 1. Run LPS WL RFOFF flow */
+ //RT_TRACE(COMP_INIT, DBG_LOUD, ("=====>CardDisableRTL8812E,RTL8821A_NIC_LPS_ENTER_FLOW\n"));
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8821A_NIC_LPS_ENTER_FLOW);
+ }
+ /* 2. 0x1F[7:0] = 0 */
+ /* turn off RF */
+ //rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
+ if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) &&
+ rtlhal->bfw_ready ) {
+ rtl8821ae_firmware_selfreset(hw);
+ }
+
+ /* Reset MCU. Suggested by Filen. */
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2))));
+
+ /* g. MCUFWDL 0x80[1:0]=0 */
+ /* reset MCU ready status */
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ /* HW card disable configuration. */
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8821A_NIC_DISABLE_FLOW);
+ } else {
+ /* HW card disable configuration. */
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8812_NIC_DISABLE_FLOW);
+ }
+
+ /* Reset MCU IO Wrapper */
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0))));
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0));
+
+ /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */
+ /* lock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
+}
+
+void rtl8821ae_card_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum nl80211_iftype opmode;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("rtl8821ae_card_disable.\n"));
+
+ mac->link_state = MAC80211_NOLINK;
+ opmode = NL80211_IFTYPE_UNSPECIFIED;
+ _rtl8821ae_set_media_status(hw, opmode);
+ if (rtlpriv->rtlhal.driver_is_goingto_unload ||
+ ppsc->rfoff_reason > RF_CHANGE_BY_PS)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ _rtl8821ae_poweroff_adapter(hw);
+
+ /* after power off we should do iqk again */
+ rtlpriv->phy.iqk_initialized = false;
+}
+
+void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, *p_inta);
+
+
+ *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+
+}
+
+
+void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u16 bcn_interval, atim_window;
+
+ bcn_interval = mac->beacon_interval;
+ atim_window = 2; /*FIX MERGE */
+ rtl8821ae_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
+ rtl_write_byte(rtlpriv, 0x606, 0x30);
+ rtlpci->reg_bcn_ctrl_val |= BIT(3);
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+ rtl8821ae_enable_interrupt(hw);
+}
+
+void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval = mac->beacon_interval;
+
+ RT_TRACE(COMP_BEACON, DBG_DMESG,
+ ("beacon_interval:%d\n", bcn_interval));
+ rtl8821ae_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl8821ae_enable_interrupt(hw);
+}
+
+void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ RT_TRACE(COMP_INTR, DBG_LOUD,
+ ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr));
+
+ if (add_msr)
+ rtlpci->irq_mask[0] |= add_msr;
+ if (rm_msr)
+ rtlpci->irq_mask[0] &= (~rm_msr);
+ rtl8821ae_disable_interrupt(hw);
+ rtl8821ae_enable_interrupt(hw);
+}
+
+static u8 _rtl8821ae_get_chnl_group(u8 chnl)
+{
+ u8 group = 0;
+
+ if (chnl <= 14) {
+ if (1 <= chnl && chnl <= 2 )
+ group = 0;
+ else if (3 <= chnl && chnl <= 5 )
+ group = 1;
+ else if (6 <= chnl && chnl <= 8 )
+ group = 2;
+ else if (9 <= chnl && chnl <= 11)
+ group = 3;
+ else /*if (12 <= chnl && chnl <= 14)*/
+ group = 4;
+ } else {
+ if (36 <= chnl && chnl <= 42)
+ group = 0;
+ else if (44 <= chnl && chnl <= 48)
+ group = 1;
+ else if (50 <= chnl && chnl <= 58)
+ group = 2;
+ else if (60 <= chnl && chnl <= 64)
+ group = 3;
+ else if (100 <= chnl && chnl <= 106)
+ group = 4;
+ else if (108 <= chnl && chnl <= 114)
+ group = 5;
+ else if (116 <= chnl && chnl <= 122)
+ group = 6;
+ else if (124 <= chnl && chnl <= 130)
+ group = 7;
+ else if (132 <= chnl && chnl <= 138)
+ group = 8;
+ else if (140 <= chnl && chnl <= 144)
+ group = 9;
+ else if (149 <= chnl && chnl <= 155)
+ group = 10;
+ else if (157 <= chnl && chnl <= 161)
+ group = 11;
+ else if (165 <= chnl && chnl <= 171)
+ group = 12;
+ else if (173 <= chnl && chnl <= 177)
+ group = 13;
+ else
+ /*RT_TRACE(COMP_EFUSE,DBG_LOUD,
+ ("5G, Channel %d in Group not found \n",chnl));*/
+ RT_ASSERT(!COMP_EFUSE,
+ ("5G, Channel %d in Group not found \n",chnl));
+ }
+ return group;
+}
+
+static void _rtl8821ae_read_power_value_fromprom(struct ieee80211_hw *hw,
+ struct txpower_info_2g *pwrinfo24g,
+ struct txpower_info_5g *pwrinfo5g,
+ bool autoload_fail,
+ u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 rfPath, eeAddr=EEPROM_TX_PWR_INX, group,TxCount=0;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("hal_ReadPowerValueFromPROM8821ae(): PROMContent[0x%x]=0x%x\n", (eeAddr+1), hwinfo[eeAddr+1]));
+ if (0xFF == hwinfo[eeAddr+1]) /*YJ,add,120316*/
+ autoload_fail = true;
+
+ if (autoload_fail)
+ {
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("auto load fail : Use Default value!\n"));
+ for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) {
+ /*2.4G default value*/
+ for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
+ pwrinfo24g->index_cck_base[rfPath][group] = 0x2D;
+ pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D;
+ }
+ for (TxCount = 0;TxCount < MAX_TX_COUNT;TxCount++) {
+ if (TxCount == 0) {
+ pwrinfo24g->bw20_diff[rfPath][0] = 0x02;
+ pwrinfo24g->ofdm_diff[rfPath][0] = 0x04;
+ } else {
+ pwrinfo24g->bw20_diff[rfPath][TxCount] = 0xFE;
+ pwrinfo24g->bw40_diff[rfPath][TxCount] = 0xFE;
+ pwrinfo24g->cck_diff[rfPath][TxCount] = 0xFE;
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0xFE;
+ }
+ }
+ /*5G default value*/
+ for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++)
+ pwrinfo5g->index_bw40_base[rfPath][group] = 0x2A;
+
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (TxCount == 0) {
+ pwrinfo5g->ofdm_diff[rfPath][0] = 0x04;
+ pwrinfo5g->bw20_diff[rfPath][0] = 0x00;
+ pwrinfo5g->bw80_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->bw160_diff[rfPath][0] = 0xFE;
+ } else {
+ pwrinfo5g->ofdm_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->bw20_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->bw40_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->bw80_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->bw160_diff[rfPath][0] = 0xFE;
+ }
+ }
+ }
+ return;
+ }
+
+ rtl_priv(hw)->efuse.b_txpwr_fromeprom = true;
+
+ for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) {
+ /*2.4G default value*/
+ for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
+ pwrinfo24g->index_cck_base[rfPath][group] = hwinfo[eeAddr++];
+ if (pwrinfo24g->index_cck_base[rfPath][group] == 0xFF)
+ pwrinfo24g->index_cck_base[rfPath][group] = 0x2D;
+ }
+ for (group = 0 ; group < MAX_CHNL_GROUP_24G - 1; group++) {
+ pwrinfo24g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++];
+ if (pwrinfo24g->index_bw40_base[rfPath][group] == 0xFF)
+ pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D;
+ }
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount ++) {
+ if (TxCount == 0) {
+ pwrinfo24g->bw40_diff[rfPath][TxCount] = 0;
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->bw20_diff[rfPath][TxCount] = 0x02;
+ } else {
+ pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
+ if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3)) /*bit sign number to 8 bit sign number*/
+ pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0x04;
+ } else {
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
+ if(pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3)) /*bit sign number to 8 bit sign number*/
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ }
+ pwrinfo24g->cck_diff[rfPath][TxCount] = 0;
+ eeAddr++;
+ } else {
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->bw40_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo24g->bw40_diff[rfPath][TxCount] = (hwinfo[eeAddr]&0xf0) >> 4;
+ if (pwrinfo24g->bw40_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo24g->bw40_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->bw20_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
+ if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ eeAddr++;
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
+ if(pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo24g->cck_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo24g->cck_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
+ if(pwrinfo24g->cck_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo24g->cck_diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+ }
+ }
+
+ /*5G default value*/
+ for (group = 0 ; group < MAX_CHNL_GROUP_5G; group ++) {
+ pwrinfo5g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++];
+ if (pwrinfo5g->index_bw40_base[rfPath][group] == 0xFF)
+ pwrinfo5g->index_bw40_base[rfPath][group] = 0xFE;
+ }
+
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (TxCount == 0) {
+ pwrinfo5g->bw40_diff[rfPath][TxCount] = 0;
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->bw20_diff[rfPath][TxCount] = 0x0;
+ } else {
+ pwrinfo5g->bw20_diff[rfPath][0] = (hwinfo[eeAddr] & 0xf0) >> 4;
+ if(pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->ofdm_diff[rfPath][TxCount] = 0x4;
+ } else {
+ pwrinfo5g->ofdm_diff[rfPath][0] = (hwinfo[eeAddr] & 0x0f);
+ if(pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+ } else {
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->bw40_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo5g->bw40_diff[rfPath][TxCount]= (hwinfo[eeAddr] & 0xf0) >> 4;
+ if(pwrinfo5g->bw40_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo5g->bw40_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->bw20_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo5g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
+ if(pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+ }
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->ofdm_diff[rfPath][1] = 0xFE;
+ pwrinfo5g->ofdm_diff[rfPath][2] = 0xFE;
+ } else {
+ pwrinfo5g->ofdm_diff[rfPath][1] = (hwinfo[eeAddr] & 0xf0) >> 4;
+ pwrinfo5g->ofdm_diff[rfPath][2] = (hwinfo[eeAddr] & 0x0f);
+ }
+ eeAddr++;
+ if (hwinfo[eeAddr] == 0xFF)
+ pwrinfo5g->ofdm_diff[rfPath][3] = 0xFE;
+ else
+ pwrinfo5g->ofdm_diff[rfPath][3] = (hwinfo[eeAddr] & 0x0f);
+
+ eeAddr++;
+
+ for (TxCount = 1; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (pwrinfo5g->ofdm_diff[rfPath][TxCount] == 0xFF)
+ pwrinfo5g->ofdm_diff[rfPath][TxCount] = 0xFE;
+ else if(pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3))
+ pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ }
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->bw80_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo5g->bw80_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
+ if(pwrinfo5g->bw80_diff[rfPath][TxCount] & BIT(3)) //4bit sign number to 8 bit sign number
+ pwrinfo5g->bw80_diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (hwinfo[eeAddr] == 0xFF) {
+ pwrinfo5g->bw160_diff[rfPath][TxCount] = 0xFE;
+ } else {
+ pwrinfo5g->bw160_diff[rfPath][TxCount]= (hwinfo[eeAddr] & 0x0f);
+ if(pwrinfo5g->bw160_diff[rfPath][TxCount] & BIT(3)) //4bit sign number to 8 bit sign number
+ pwrinfo5g->bw160_diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+ }
+ }
+}
+
+static void _rtl8812ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct txpower_info_2g pwrinfo24g;
+ struct txpower_info_5g pwrinfo5g;
+ u8 channel5g[CHANNEL_MAX_NUMBER_5G] =
+ {36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112,
+ 114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151,
+ 153,155,157,159,161,163,165,167,168,169,171,173,175,177};
+ u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
+ u8 rf_path, index;
+ u8 i;
+
+ _rtl8821ae_read_power_value_fromprom(hw, &pwrinfo24g, &pwrinfo5g, autoload_fail, hwinfo);
+
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < CHANNEL_MAX_NUMBER_2G; i++) {
+ index = _rtl8821ae_get_chnl_group(i + 1);
+
+ if (i == CHANNEL_MAX_NUMBER_2G - 1) {
+ rtlefuse->txpwrlevel_cck[rf_path][i] =
+ pwrinfo24g.index_cck_base[rf_path][5];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+ pwrinfo24g.index_bw40_base[rf_path][index];
+ } else {
+ rtlefuse->txpwrlevel_cck[rf_path][i] =
+ pwrinfo24g.index_cck_base[rf_path][index];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+ pwrinfo24g.index_bw40_base[rf_path][index];
+ }
+ }
+
+ for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) {
+ index = _rtl8821ae_get_chnl_group(channel5g[i]);
+ rtlefuse->txpwr_5g_bw40base[rf_path][i] = pwrinfo5g.index_bw40_base[rf_path][index];
+ }
+ for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) {
+ u8 upper, lower;
+ index = _rtl8821ae_get_chnl_group(channel5g_80m[i]);
+ upper = pwrinfo5g.index_bw40_base[rf_path][index];
+ lower = pwrinfo5g.index_bw40_base[rf_path][index + 1];
+
+ rtlefuse->txpwr_5g_bw80base[rf_path][i] = (upper + lower) / 2;
+ }
+ for (i = 0; i < MAX_TX_COUNT; i++) {
+ rtlefuse->txpwr_cckdiff[rf_path][i] = pwrinfo24g.cck_diff[rf_path][i];
+ rtlefuse->txpwr_legacyhtdiff[rf_path][i] = pwrinfo24g.ofdm_diff[rf_path][i];
+ rtlefuse->txpwr_ht20diff[rf_path][i] = pwrinfo24g.bw20_diff[rf_path][i];
+ rtlefuse->txpwr_ht40diff[rf_path][i] = pwrinfo24g.bw40_diff[rf_path][i];
+
+ rtlefuse->txpwr_5g_ofdmdiff[rf_path][i] = pwrinfo5g.ofdm_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw20diff[rf_path][i] = pwrinfo5g.bw20_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw40diff[rf_path][i] = pwrinfo5g.bw40_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw80diff[rf_path][i] = pwrinfo5g.bw80_diff[rf_path][i];
+ }
+ }
+
+ if (!autoload_fail){
+ rtlefuse->eeprom_regulatory =
+ hwinfo[EEPROM_RF_BOARD_OPTION] & 0x07;/*bit0~2*/
+ if (hwinfo[EEPROM_RF_BOARD_OPTION] == 0xFF)
+ rtlefuse->eeprom_regulatory = 0;
+ } else {
+ rtlefuse->eeprom_regulatory = 0;
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory ));
+}
+
+static void _rtl8821ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct txpower_info_2g pwrinfo24g;
+ struct txpower_info_5g pwrinfo5g;
+ u8 channel5g[CHANNEL_MAX_NUMBER_5G] =
+ {36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112,
+ 114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151,
+ 153,155,157,159,161,163,165,167,168,169,171,173,175,177};
+ u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
+ u8 rf_path, index;
+ u8 i;
+
+ _rtl8821ae_read_power_value_fromprom(hw, &pwrinfo24g, &pwrinfo5g, autoload_fail, hwinfo);
+
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < CHANNEL_MAX_NUMBER_2G; i++) {
+ index = _rtl8821ae_get_chnl_group(i + 1);
+
+ if (i == CHANNEL_MAX_NUMBER_2G - 1) {
+ rtlefuse->txpwrlevel_cck[rf_path][i] = pwrinfo24g.index_cck_base[rf_path][5];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = pwrinfo24g.index_bw40_base[rf_path][index];
+ } else {
+ rtlefuse->txpwrlevel_cck[rf_path][i] = pwrinfo24g.index_cck_base[rf_path][index];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = pwrinfo24g.index_bw40_base[rf_path][index];
+ }
+ }
+
+ for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) {
+ index = _rtl8821ae_get_chnl_group(channel5g[i]);
+ rtlefuse->txpwr_5g_bw40base[rf_path][i] = pwrinfo5g.index_bw40_base[rf_path][index];
+ }
+ for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) {
+ u8 upper, lower;
+ index = _rtl8821ae_get_chnl_group(channel5g_80m[i]);
+ upper = pwrinfo5g.index_bw40_base[rf_path][index];
+ lower = pwrinfo5g.index_bw40_base[rf_path][index + 1];
+
+ rtlefuse->txpwr_5g_bw80base[rf_path][i] = (upper + lower) / 2;
+ }
+ for (i = 0; i < MAX_TX_COUNT; i++) {
+ rtlefuse->txpwr_cckdiff[rf_path][i] = pwrinfo24g.cck_diff[rf_path][i];
+ rtlefuse->txpwr_legacyhtdiff[rf_path][i] = pwrinfo24g.ofdm_diff[rf_path][i];
+ rtlefuse->txpwr_ht20diff[rf_path][i] = pwrinfo24g.bw20_diff[rf_path][i];
+ rtlefuse->txpwr_ht40diff[rf_path][i] = pwrinfo24g.bw40_diff[rf_path][i];
+
+ rtlefuse->txpwr_5g_ofdmdiff[rf_path][i] = pwrinfo5g.ofdm_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw20diff[rf_path][i] = pwrinfo5g.bw20_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw40diff[rf_path][i] = pwrinfo5g.bw40_diff[rf_path][i];
+ rtlefuse->txpwr_5g_bw80diff[rf_path][i] = pwrinfo5g.bw80_diff[rf_path][i];
+ }
+ }
+
+ if (!autoload_fail){
+ rtlefuse->eeprom_regulatory = hwinfo[EEPROM_RF_BOARD_OPTION] & 0x07;/*bit0~2*/
+ if (hwinfo[EEPROM_RF_BOARD_OPTION] == 0xFF)
+ rtlefuse->eeprom_regulatory = 0;
+ } else {
+ rtlefuse->eeprom_regulatory = 0;
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory ));
+}
+
+static void _rtl8812ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_test )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u16 i, usvalue;
+ u8 hwinfo[HWSET_MAX_SIZE];
+ u16 eeprom_id;
+
+ if (b_pseudo_test) {
+ /* need add */
+ }
+
+ if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ rtl_efuse_shadow_map_update(hw);
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE);
+ } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("RTL819X Not boot from eeprom, check it !!"));
+ }
+
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP \n"),
+ hwinfo, HWSET_MAX_SIZE);
+
+ eeprom_id = *((u16 *) & hwinfo[0]);
+ if (eeprom_id != RTL_EEPROM_ID) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ rtlefuse->autoload_failflag = true;
+ } else {
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ rtlefuse->autoload_failflag = false;
+ }
+
+ if (rtlefuse->autoload_failflag == true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("RTL8812AE autoload_failflag, check it !!"));
+ return;
+ }
+
+ rtlefuse->eeprom_version = *(u8 *) & hwinfo[EEPROM_VERSION];
+ if (rtlefuse->eeprom_version == 0xff)
+ rtlefuse->eeprom_version = 0;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM version: 0x%2x\n", rtlefuse->eeprom_version));
+
+ rtlefuse->eeprom_vid = *(u16 *) &hwinfo[EEPROM_VID];
+ rtlefuse->eeprom_did = *(u16 *) &hwinfo[EEPROM_DID];
+ rtlefuse->eeprom_svid = *(u16 *) &hwinfo[EEPROM_SVID];
+ rtlefuse->eeprom_smid = *(u16 *) &hwinfo[EEPROM_SMID];
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROMId = 0x%4x\n", eeprom_id));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid));
+
+ /*customer ID*/
+ rtlefuse->eeprom_oemid = *(u8 *) & hwinfo[EEPROM_CUSTOMER_ID];
+ if (rtlefuse->eeprom_oemid == 0xFF)
+ rtlefuse->eeprom_oemid = 0;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *) & hwinfo[EEPROM_MAC_ADDR + i];
+ *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+ }
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("dev_addr: %pM\n", rtlefuse->dev_addr));
+
+ _rtl8812ae_read_txpower_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag, hwinfo);
+
+ /*board type*/
+ rtlefuse->board_type = (((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) & 0xE0 ) >> 5);
+ if ((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) == 0xff )
+ rtlefuse->board_type = 0;
+ rtlhal->boad_type = rtlefuse->board_type;
+
+ rtl8812ae_read_bt_coexist_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag, hwinfo);
+
+ rtlefuse->eeprom_channelplan = *(u8 *) & hwinfo[EEPROM_CHANNELPLAN];
+ if (rtlefuse->eeprom_channelplan == 0xff)
+ rtlefuse->eeprom_channelplan = 0x7F;
+
+ /* set channel plan to world wide 13 */
+ //rtlefuse->channel_plan = (u8) rtlefuse->eeprom_channelplan;
+
+ /*parse xtal*/
+ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE];
+ if ( rtlefuse->crystalcap == 0xFF )
+ rtlefuse->crystalcap = 0x20;
+
+ rtlefuse->eeprom_thermalmeter = *(u8 *) & hwinfo[EEPROM_THERMAL_METER];
+ if ((rtlefuse->eeprom_thermalmeter == 0xff) ||rtlefuse->autoload_failflag )
+ {
+ rtlefuse->b_apk_thermalmeterignore = true;
+ rtlefuse->eeprom_thermalmeter = 0xff;
+ }
+
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+
+ if (rtlefuse->autoload_failflag == false) {
+ rtlefuse->antenna_div_cfg = *(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] & 0x18 >> 3;
+ if (*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] == 0xff)
+ rtlefuse->antenna_div_cfg = 0x00;
+ /*if (BT_1ant())
+ rtlefuse->antenna_div_cfg = 0;*/
+ rtlefuse->antenna_div_type = *(u8 *) & hwinfo[EEPROM_RF_ANTENNA_OPT_88E];
+ if (rtlefuse->antenna_div_type == 0xFF)
+ {
+ rtlefuse->antenna_div_type = FIXED_HW_ANTDIV;
+ }
+ } else {
+ rtlefuse->antenna_div_cfg = 0;
+ rtlefuse->antenna_div_type = 0;
+ }
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n",
+ rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type));
+
+ /*Hal_ReadPAType_8821A()*/
+ /*Hal_EfuseParseRateIndicationOption8821A()*/
+ /*Hal_ReadEfusePCIeCap8821AE()*/
+
+ pcipriv->ledctl.bled_opendrain = true;
+
+ if (rtlhal->oem_id == RT_CID_DEFAULT) {
+ switch (rtlefuse->eeprom_oemid) {
+ case RT_CID_DEFAULT:
+ break;
+ case EEPROM_CID_TOSHIBA:
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ break;
+ case EEPROM_CID_CCX:
+ rtlhal->oem_id = RT_CID_CCX;
+ break;
+ case EEPROM_CID_QMI:
+ rtlhal->oem_id = RT_CID_819x_QMI;
+ break;
+ case EEPROM_CID_WHQL:
+ break;
+ default:
+ break;
+
+ }
+ }
+}
+
+static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_test )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u16 i, usvalue;
+ u8 hwinfo[HWSET_MAX_SIZE];
+ u16 eeprom_id;
+
+ if (b_pseudo_test) {
+ /* need add */
+ }
+
+ if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ rtl_efuse_shadow_map_update(hw);
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE);
+ } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("RTL819X Not boot from eeprom, check it !!"));
+ }
+
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP \n"),
+ hwinfo, HWSET_MAX_SIZE);
+
+ eeprom_id = *((u16 *) & hwinfo[0]);
+ if (eeprom_id != RTL_EEPROM_ID) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ rtlefuse->autoload_failflag = true;
+ } else {
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ rtlefuse->autoload_failflag = false;
+ }
+
+ if (rtlefuse->autoload_failflag == true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("RTL8812AE autoload_failflag, check it !!"));
+ return;
+ }
+
+ rtlefuse->eeprom_version = *(u8 *) & hwinfo[EEPROM_VERSION];
+ if (rtlefuse->eeprom_version == 0xff)
+ rtlefuse->eeprom_version = 0;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM version: 0x%2x\n", rtlefuse->eeprom_version));
+
+ rtlefuse->eeprom_vid = *(u16 *) &hwinfo[EEPROM_VID];
+ rtlefuse->eeprom_did = *(u16 *) &hwinfo[EEPROM_DID];
+ rtlefuse->eeprom_svid = *(u16 *) &hwinfo[EEPROM_SVID];
+ rtlefuse->eeprom_smid = *(u16 *) &hwinfo[EEPROM_SMID];
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROMId = 0x%4x\n", eeprom_id));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid));
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid));
+
+ /*customer ID*/
+ rtlefuse->eeprom_oemid = *(u8 *) & hwinfo[EEPROM_CUSTOMER_ID];
+ if (rtlefuse->eeprom_oemid == 0xFF)
+ rtlefuse->eeprom_oemid = 0;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *) & hwinfo[EEPROM_MAC_ADDR + i];
+ *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+ }
+
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("dev_addr: %pM\n", rtlefuse->dev_addr));
+
+ _rtl8821ae_read_txpower_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag, hwinfo);
+
+ /*board type*/
+ rtlefuse->board_type = (((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) & 0xE0 ) >> 5);
+ if ((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) == 0xff )
+ rtlefuse->board_type = 0;
+ rtlhal->boad_type = rtlefuse->board_type;
+
+ rtl8821ae_read_bt_coexist_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag, hwinfo);
+
+ rtlefuse->eeprom_channelplan = *(u8 *) & hwinfo[EEPROM_CHANNELPLAN];
+ if (rtlefuse->eeprom_channelplan == 0xff)
+ rtlefuse->eeprom_channelplan = 0x7F;
+
+ /* set channel plan to world wide 13 */
+ //rtlefuse->channel_plan = (u8) rtlefuse->eeprom_channelplan;
+
+ /*parse xtal*/
+ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE];
+ if ( rtlefuse->crystalcap == 0xFF )
+ rtlefuse->crystalcap = 0x20;
+
+ rtlefuse->eeprom_thermalmeter = *(u8 *) & hwinfo[EEPROM_THERMAL_METER];
+ if ((rtlefuse->eeprom_thermalmeter == 0xff) ||rtlefuse->autoload_failflag )
+ {
+ rtlefuse->b_apk_thermalmeterignore = true;
+ rtlefuse->eeprom_thermalmeter = 0x18;
+ }
+
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+
+ if (rtlefuse->autoload_failflag == false) {
+ rtlefuse->antenna_div_cfg = (*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] & BIT(3))?true:false;
+ /*if (BT_1ant())
+ rtlefuse->antenna_div_cfg = 0;*/
+
+ rtlefuse->antenna_div_type = CG_TRX_HW_ANTDIV;
+ } else {
+ rtlefuse->antenna_div_cfg = 0;
+ rtlefuse->antenna_div_type = 0;
+ }
+
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n",
+ rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type));
+
+ pcipriv->ledctl.bled_opendrain = true;
+
+ if (rtlhal->oem_id == RT_CID_DEFAULT) {
+ switch (rtlefuse->eeprom_oemid) {
+ case RT_CID_DEFAULT:
+ break;
+ case EEPROM_CID_TOSHIBA:
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ break;
+ case EEPROM_CID_CCX:
+ rtlhal->oem_id = RT_CID_CCX;
+ break;
+ case EEPROM_CID_QMI:
+ rtlhal->oem_id = RT_CID_819x_QMI;
+ break;
+ case EEPROM_CID_WHQL:
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+
+/*static void _rtl8821ae_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ pcipriv->ledctl.bled_opendrain = true;
+ switch (rtlhal->oem_id) {
+ case RT_CID_819x_HP:
+ pcipriv->ledctl.bled_opendrain = true;
+ break;
+ case RT_CID_819x_Lenovo:
+ case RT_CID_DEFAULT:
+ case RT_CID_TOSHIBA:
+ case RT_CID_CCX:
+ case RT_CID_819x_Acer:
+ case RT_CID_WHQL:
+ default:
+ break;
+ }
+ RT_TRACE(COMP_INIT, DBG_DMESG,
+ ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
+}*/
+
+void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_u1b;
+
+ rtlhal->version = _rtl8821ae_read_chip_version(hw);
+
+ if (get_rf_type(rtlphy) == RF_1T1R)
+ rtlpriv->dm.brfpath_rxenable[0] = true;
+ else
+ rtlpriv->dm.brfpath_rxenable[0] =
+ rtlpriv->dm.brfpath_rxenable[1] = true;
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
+ rtlhal->version));
+
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+ if (tmp_u1b & BIT(4)) {
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("Boot from EEPROM\n"));
+ rtlefuse->epromtype = EEPROM_93C46;
+ } else {
+ RT_TRACE(COMP_INIT, DBG_DMESG, ("Boot from EFUSE\n"));
+ rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
+ }
+
+ if (tmp_u1b & BIT(5)) {
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ rtlefuse->autoload_failflag = false;
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ _rtl8812ae_read_adapter_info(hw, false);
+ else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ _rtl8821ae_read_adapter_info(hw, false);
+ } else {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n"));
+ }
+ /*hal_ReadRFType_8812A()*/
+ //_rtl8821ae_hal_customized_behavior(hw);
+}
+
+static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 ratr_value;
+ u8 ratr_index = 0;
+ u8 b_nmode = mac->ht_enable;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+ u16 shortgi_rate;
+ u32 tmp_ratr_value;
+ u8 b_curtxbw_40mhz = mac->bw_40;
+ u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = mac->mode;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_value = sta->supp_rates[1] << 4;
+ else
+ ratr_value = sta->supp_rates[0];
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_value = 0xfff;
+ ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ if (ratr_value & 0x0000000c)
+ ratr_value &= 0x0000000d;
+ else
+ ratr_value &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_value &= 0x00000FF5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ b_nmode = 1;
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
+ ratr_value &= 0x0007F005;
+ } else {
+ u32 ratr_mask;
+
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R)
+ ratr_mask = 0x000ff005;
+ else
+ ratr_mask = 0x0f0ff005;
+
+ ratr_value &= ratr_mask;
+ }
+ break;
+ default:
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_value &= 0x000ff0ff;
+ else
+ ratr_value &= 0x0f0ff0ff;
+
+ break;
+ }
+
+ if ( (rtlpcipriv->btcoexist.bt_coexistence) &&
+ (rtlpcipriv->btcoexist.bt_coexist_type == BT_CSR_BC4) &&
+ (rtlpcipriv->btcoexist.bt_cur_state) &&
+ (rtlpcipriv->btcoexist.bt_ant_isolation) &&
+ ((rtlpcipriv->btcoexist.bt_service == BT_SCO)||
+ (rtlpcipriv->btcoexist.bt_service == BT_BUSY)) )
+ ratr_value &= 0x0fffcfc0;
+ else
+ ratr_value &= 0x0FFFFFFF;
+
+ if (b_nmode && ((b_curtxbw_40mhz &&
+ b_curshortgi_40mhz) || (!b_curtxbw_40mhz &&
+ b_curshortgi_20mhz))) {
+
+ ratr_value |= 0x10000000;
+ tmp_ratr_value = (ratr_value >> 12);
+
+ for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+ if ((1 << shortgi_rate) & tmp_ratr_value)
+ break;
+ }
+
+ shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+
+ RT_TRACE(COMP_RATR, DBG_DMESG,
+ ("%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)));
+}
+
+
+static u8 _rtl8821ae_mrate_idx_to_arfr_id(
+ struct ieee80211_hw *hw, u8 rate_index,
+ enum wireless_mode wirelessmode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 ret = 0;
+ switch(rate_index){
+ case RATR_INX_WIRELESS_NGB:
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 1;
+ else
+ ret = 0;
+ ;break;
+ case RATR_INX_WIRELESS_N:
+ case RATR_INX_WIRELESS_NG:
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 5;
+ else
+ ret = 4;
+ ;break;
+ case RATR_INX_WIRELESS_NB:
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 3;
+ else
+ ret = 2;
+ ;break;
+ case RATR_INX_WIRELESS_GB:
+ ret = 6;
+ break;
+ case RATR_INX_WIRELESS_G:
+ ret = 7;
+ break;
+ case RATR_INX_WIRELESS_B:
+ ret = 8;
+ break;
+ case RATR_INX_WIRELESS_MC:
+ if ((wirelessmode == WIRELESS_MODE_B)
+ || (wirelessmode == WIRELESS_MODE_G)
+ || (wirelessmode == WIRELESS_MODE_N_24G)
+ || (wirelessmode == WIRELESS_MODE_AC_24G))
+ ret = 6;
+ else
+ ret = 7;
+ case RATR_INX_WIRELESS_AC_5N:
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 10;
+ else
+ ret = 9;
+ break;
+ case RATR_INX_WIRELESS_AC_24N:
+ if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+ {
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 10;
+ else
+ ret = 9;
+ } else {
+ if(rtlphy->rf_type == RF_1T1R)
+ ret = 11;
+ else
+ ret = 12;
+ }
+ break;
+ default:
+ ret = 0;break;
+ }
+ return ret;
+}
+
+static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_sta_info * sta_entry = NULL;
+ u32 ratr_bitmap;
+ u8 ratr_index;
+ u8 b_curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ ? 1 : 0;
+ u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = 0;
+ bool b_shortgi = false;
+ u8 rate_mask[7];
+ u8 macid = 0;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ wirelessmode = sta_entry->wireless_mode;
+ if (mac->opmode == NL80211_IFTYPE_STATION ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT)
+ b_curtxbw_40mhz = mac->bw_40;
+ else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC)
+ macid = sta->aid + 1;
+
+ ratr_bitmap = sta->supp_rates[0];
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_bitmap = 0xfff;
+
+ ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
+/*mac id owner*/
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ ratr_index = RATR_INX_WIRELESS_B;
+ if (ratr_bitmap & 0x0000000c)
+ ratr_bitmap &= 0x0000000d;
+ else
+ ratr_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_index = RATR_INX_WIRELESS_GB;
+
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00000f00;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x00000ff0;
+ else
+ ratr_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_A:
+ ratr_index = RATR_INX_WIRELESS_G;
+ ratr_bitmap &= 0x00000ff0;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ if (wirelessmode == WIRELESS_MODE_N_24G)
+ ratr_index = RATR_INX_WIRELESS_NGB;
+ else
+ ratr_index = RATR_INX_WIRELESS_NG;
+
+ if (mimo_ps == IEEE80211_SMPS_STATIC || mimo_ps == IEEE80211_SMPS_DYNAMIC) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00070000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0007f000;
+ else
+ ratr_bitmap &= 0x0007f005;
+ } else {
+ if ( rtlphy->rf_type == RF_1T1R) {
+ if (b_curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
+ } else {
+ if (b_curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0fff0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0ffff000;
+ else
+ ratr_bitmap &= 0x0ffff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0fff0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0ffff000;
+ else
+ ratr_bitmap &= 0x0ffff005;
+ }
+ }
+ }
+ if ((b_curtxbw_40mhz && b_curshortgi_40mhz) ||
+ (!b_curtxbw_40mhz && b_curshortgi_20mhz)) {
+
+ if (macid == 0)
+ b_shortgi = true;
+ else if (macid == 1)
+ b_shortgi = false;
+ }
+ break;
+
+ case WIRELESS_MODE_AC_24G:
+ ratr_index = RATR_INX_WIRELESS_AC_24N;
+ if(rssi_level == 1)
+ ratr_bitmap &= 0xfc3f0000;
+ else if(rssi_level == 2)
+ ratr_bitmap &= 0xfffff000;
+ else
+ ratr_bitmap &= 0xffffffff;
+ break;
+
+ case WIRELESS_MODE_AC_5G:
+ ratr_index = RATR_INX_WIRELESS_AC_5N;
+
+ if (rtlphy->rf_type == RF_1T1R)
+ {
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ if(rssi_level == 1) /*add by Gary for ac-series*/
+ ratr_bitmap &= 0x003f8000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x003ff000;
+ else
+ ratr_bitmap &= 0x003ff010;
+ }
+ else
+ ratr_bitmap &= 0x000ff010;
+ }
+ else
+ {
+ if(rssi_level == 1) /* add by Gary for ac-series*/
+ ratr_bitmap &= 0xfe3f8000; /*VHT 2SS MCS3~9*/
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0xfffff000; /*VHT 2SS MCS0~9*/
+ else
+ ratr_bitmap &= 0xfffff010; /*All*/
+ }
+ break;
+
+ default:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_bitmap &= 0x000ff0ff;
+ else
+ ratr_bitmap &= 0x0f0ff0ff;
+ break;
+
+ }
+
+ sta_entry->ratr_index = ratr_index;
+
+ RT_TRACE(COMP_RATR, DBG_DMESG,
+ ("ratr_bitmap :%x\n", ratr_bitmap));
+ *(u32 *) & rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
+ (ratr_index << 28));
+ rate_mask[0] = macid;
+ rate_mask[1] = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode) | (b_shortgi ? 0x80 : 0x00);
+ rate_mask[2] = b_curtxbw_40mhz;
+ /* if (prox_priv->proxim_modeinfo->power_output > 0)
+ rate_mask[2] |= BIT(6); */
+
+ rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff);
+ rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >>8);
+ rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
+ rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
+
+ RT_TRACE(COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
+ "ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4], rate_mask[5],
+ rate_mask[6]));
+ rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RA_MASK, 7, rate_mask);
+ _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
+}
+
+void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (rtlpriv->dm.b_useramask)
+ rtl8821ae_update_hal_rate_mask(hw, sta, rssi_level);
+ else
+ /*RT_TRACE(COMP_RATR,DBG_LOUD,("rtl8821ae_update_hal_rate_tbl(): Error! 8821ae FW RA Only"));*/
+ rtl8821ae_update_hal_rate_table(hw, sta);
+}
+
+void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 sifs_timer;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+ (u8 *) & mac->slot_time);
+ if (!mac->ht_enable)
+ sifs_timer = 0x0a0a;
+ else
+ sifs_timer = 0x0e0e;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *) & sifs_timer);
+}
+
+bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ u8 u1tmp = 0;
+ bool b_actuallyset = false;
+
+ if (rtlpriv->rtlhal.being_init_adapter)
+ return false;
+
+ if (ppsc->b_swrf_processing)
+ return false;
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ return false;
+ } else {
+ ppsc->rfchange_inprogress = true;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ }
+
+ cur_rfstate = ppsc->rfpwr_state;
+
+ rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
+ rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2) & ~(BIT(1)));
+
+ u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2);
+
+ if (rtlphy->polarity_ctl) {
+ e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON;
+ } else {
+ e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
+ }
+
+ if ((ppsc->b_hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("GPIOChangeRF - HW Radio ON, RF ON\n"));
+
+ e_rfpowerstate_toset = ERFON;
+ ppsc->b_hwradiooff = false;
+ b_actuallyset = true;
+ } else if ((ppsc->b_hwradiooff == false)
+ && (e_rfpowerstate_toset == ERFOFF)) {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("GPIOChangeRF - HW Radio OFF, RF OFF\n"));
+
+ e_rfpowerstate_toset = ERFOFF;
+ ppsc->b_hwradiooff = true;
+ b_actuallyset = true;
+ }
+
+ if (b_actuallyset) {
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ } else {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ }
+
+ *valid = 1;
+ return !ppsc->b_hwradiooff;
+
+}
+
+void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 *macaddr = p_macaddr;
+ u32 entry_id = 0;
+ bool is_pairwise = false;
+
+ static u8 cam_const_addr[4][6] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+ };
+ static u8 cam_const_broad[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ if (clear_all) {
+ u8 idx = 0;
+ u8 cam_offset = 0;
+ u8 clear_number = 5;
+
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("clear_all\n"));
+
+ for (idx = 0; idx < clear_number; idx++) {
+ rtl_cam_mark_invalid(hw, cam_offset + idx);
+ rtl_cam_empty_entry(hw, cam_offset + idx);
+
+ if (idx < 5) {
+ memset(rtlpriv->sec.key_buf[idx], 0,
+ MAX_KEY_LEN);
+ rtlpriv->sec.key_len[idx] = 0;
+ }
+ }
+
+ } else {
+ switch (enc_algo) {
+ case WEP40_ENCRYPTION:
+ enc_algo = CAM_WEP40;
+ break;
+ case WEP104_ENCRYPTION:
+ enc_algo = CAM_WEP104;
+ break;
+ case TKIP_ENCRYPTION:
+ enc_algo = CAM_TKIP;
+ break;
+ case AESCCMP_ENCRYPTION:
+ enc_algo = CAM_AES;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case "
+ "not process \n"));
+ enc_algo = CAM_TKIP;
+ break;
+ }
+
+ if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+ macaddr = cam_const_addr[key_index];
+ entry_id = key_index;
+ } else {
+ if (is_group) {
+ macaddr = cam_const_broad;
+ entry_id = key_index;
+ } else {
+ if (mac->opmode == NL80211_IFTYPE_AP) {
+ entry_id = rtl_cam_get_free_entry(hw, p_macaddr);
+ if (entry_id >= TOTAL_CAM_ENTRY) {
+ RT_TRACE(COMP_SEC, DBG_EMERG,
+ ("Can not find free hw security cam entry\n"));
+ return;
+ }
+ } else {
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ }
+
+ key_index = PAIRWISE_KEYIDX;
+ is_pairwise = true;
+ }
+ }
+
+ if (rtlpriv->sec.key_len[key_index] == 0) {
+ RT_TRACE(COMP_SEC, DBG_DMESG,
+ ("delete one entry, entry_id is %d\n",entry_id));
+ if (mac->opmode == NL80211_IFTYPE_AP)
+ rtl_cam_del_entry(hw, p_macaddr);
+ rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+ } else {
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("add one entry\n"));
+ if (is_pairwise) {
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("set Pairwiase key\n"));
+
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[key_index]);
+ } else {
+ RT_TRACE(COMP_SEC, DBG_DMESG, ("set group key\n"));
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_cam_add_one_entry(hw,
+ rtlefuse->dev_addr,
+ PAIRWISE_KEYIDX,
+ CAM_PAIRWISE_KEY_POSITION,
+ enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf
+ [entry_id]);
+ }
+
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[entry_id]);
+ }
+
+ }
+ }
+}
+
+
+void rtl8812ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool auto_load_fail, u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value;
+
+ if (!auto_load_fail) {
+ value = *(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION];
+ if (((value & 0xe0) >> 5) == 0x1)
+ rtlpriv->btcoexist.btc_info.btcoexist = 1;
+ else
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8812A;
+
+ value = hwinfo[EEPROM_RF_BT_SETTING];
+ rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+ } else {
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8812A;
+ rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+ }
+ /*move BT_InitHalVars() to init_sw_vars*/
+}
+
+void rtl8821ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool auto_load_fail, u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value;
+ u32 tmpu_32;
+
+ if (!auto_load_fail) {
+ tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
+ if(tmpu_32 & BIT(18))
+ rtlpriv->btcoexist.btc_info.btcoexist = 1;
+ else
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8821A;
+
+ value = hwinfo[EEPROM_RF_BT_SETTING];
+ rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+ } else {
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8821A;
+ rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+ }
+ /*move BT_InitHalVars() to init_sw_vars*/
+}
+
+void rtl8821ae_bt_reg_init(struct ieee80211_hw* hw)
+{
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+
+ /* 0:Low, 1:High, 2:From Efuse. */
+ rtlpcipriv->btcoexist.b_reg_bt_iso = 2;
+ /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
+ rtlpcipriv->btcoexist.b_reg_bt_sco= 3;
+ /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
+ rtlpcipriv->btcoexist.b_reg_bt_sco= 0;
+}
+
+
+void rtl8821ae_bt_hw_init(struct ieee80211_hw* hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv);
+ }
+}
+
+void rtl8821ae_suspend(struct ieee80211_hw *hw)
+{
+}
+
+void rtl8821ae_resume(struct ieee80211_hw *hw)
+{
+}
+
+/* Turn on AAP (RCR:bit 0) for promicuous mode. */
+void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw,
+ bool allow_all_da, bool write_into_reg)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ if (allow_all_da) /* Set BIT0 */
+ rtlpci->receive_config |= RCR_AAP;
+ else /* Clear BIT0 */
+ rtlpci->receive_config &= ~RCR_AAP;
+
+ if(write_into_reg)
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+
+
+ RT_TRACE(COMP_TURBO | COMP_INIT, DBG_LOUD,
+ ("receive_config=0x%08X, write_into_reg=%d\n",
+ rtlpci->receive_config, write_into_reg ));
+}
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/hw.h b/drivers/staging/rtl8821ae/rtl8821ae/hw.h
new file mode 100644
index 000000000000..4fb6bf0d1da2
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/hw.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_HW_H__
+#define __RTL8821AE_HW_H__
+
+void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw);
+
+void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb);
+int rtl8821ae_hw_init(struct ieee80211_hw *hw);
+void rtl8821ae_card_disable(struct ieee80211_hw *hw);
+void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw);
+void rtl8821ae_disable_interrupt(struct ieee80211_hw *hw);
+int rtl8821ae_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl8821ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level);
+void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+
+void rtl8821ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8* hwinfo);
+void rtl8812ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8* hwinfo);
+void rtl8821ae_bt_reg_init(struct ieee80211_hw* hw);
+void rtl8821ae_bt_hw_init(struct ieee80211_hw* hw);
+void rtl8821ae_suspend(struct ieee80211_hw *hw);
+void rtl8821ae_resume(struct ieee80211_hw *hw);
+void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw,
+ bool allow_all_da,
+ bool write_into_reg);
+void _rtl8821ae_stop_tx_beacon(struct ieee80211_hw *hw);
+void _rtl8821ae_resume_tx_beacon(struct ieee80211_hw *hw);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/led.c b/drivers/staging/rtl8821ae/rtl8821ae/led.c
new file mode 100644
index 000000000000..130a4f4b24a2
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/led.c
@@ -0,0 +1,239 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "reg.h"
+
+static void _rtl8821ae_init_led(struct ieee80211_hw *hw,
+ struct rtl_led *pled,
+ enum rtl_led_pin ledpin)
+{
+ pled->hw = hw;
+ pled->ledpin = ledpin;
+ pled->b_ledon = false;
+}
+
+void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ u8 ledcfg;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+ ledcfg &= ~BIT(6);
+ rtl_write_byte(rtlpriv,
+ REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5));
+ break;
+ case LED_PIN_LED1:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+ rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ pled->b_ledon = true;
+}
+
+void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ u16 ledreg = REG_LEDCFG1;
+ u8 ledcfg = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (pled->ledpin) {
+ case LED_PIN_LED0:
+ ledreg = REG_LEDCFG1;
+ break;
+
+ case LED_PIN_LED1:
+ ledreg = REG_LEDCFG2;
+ break;
+
+ case LED_PIN_GPIO0:
+ default:
+ break;
+ }
+
+ RT_TRACE(COMP_LED, DBG_LOUD, ("In SwLedOn, LedAddr:%X LEDPIN=%d \n", ledreg, pled->ledpin));
+
+ ledcfg = rtl_read_byte(rtlpriv, ledreg);
+ ledcfg |= BIT(5); /*Set 0x4c[21]*/
+ ledcfg &= ~(BIT(7) | BIT(6) | BIT(3) |BIT(2) | BIT(1) |BIT(0));
+ /*Clear 0x4c[23:22] and 0x4c[19:16]*/
+ rtl_write_byte(rtlpriv, ledreg, ledcfg); /*SW control led0 on.*/
+ pled->b_ledon = true;
+}
+
+void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u8 ledcfg;
+
+ RT_TRACE(COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg &= 0xf0;
+ if (pcipriv->ledctl.bled_opendrain == true) {
+ ledcfg &= 0x90; /* Set to software control. */
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
+ ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
+ ledcfg &= 0xFE;
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg);
+ }
+ else {
+ ledcfg &= ~BIT(6);
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(3) | BIT(5)));
+ }
+ break;
+ case LED_PIN_LED1:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+ ledcfg &= 0x10; /* Set to software control. */
+ rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3));
+
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ pled->b_ledon = false;
+}
+
+void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled){
+ u16 ledreg = REG_LEDCFG1;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+
+ switch(pled->ledpin)
+ {
+ case LED_PIN_LED0:
+ ledreg = REG_LEDCFG1;
+ break;
+
+ case LED_PIN_LED1:
+ ledreg = REG_LEDCFG2;
+ break;
+
+ case LED_PIN_GPIO0:
+ default:
+ break;
+ }
+
+ RT_TRACE(COMP_LED,DBG_LOUD,("In SwLedOff,LedAddr:%X LEDPIN=%d\n", ledreg, pled->ledpin));
+
+ if(pcipriv->ledctl.bled_opendrain == true) /*Open-drain arrangement for controlling the LED*/
+ {
+ u8 ledcfg = rtl_read_byte(rtlpriv, ledreg);
+
+ ledreg &= 0xd0; /* Set to software control.*/
+ rtl_write_byte(rtlpriv, ledreg, (ledcfg | BIT(3)));
+
+ /*Open-drain arrangement*/
+ ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
+ ledcfg &= 0xFE;/*Set GPIO[8] to input mode*/
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg);
+ }
+ else
+ {
+ rtl_write_byte(rtlpriv, ledreg, 0x28);
+ }
+
+ pled->b_ledon = false;
+}
+
+void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ _rtl8821ae_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
+ _rtl8821ae_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+static void _rtl8821ae_sw_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ switch (ledaction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtl8812ae_sw_led_on(hw, pLed0);
+ else
+ rtl8821ae_sw_led_on(hw, pLed0);
+ break;
+ case LED_CTL_POWER_OFF:
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)\
+ rtl8812ae_sw_led_off(hw, pLed0);
+ else
+ rtl8821ae_sw_led_off(hw, pLed0);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtl8821ae_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+ (ledaction == LED_CTL_TX ||
+ ledaction == LED_CTL_RX ||
+ ledaction == LED_CTL_SITE_SURVEY ||
+ ledaction == LED_CTL_LINK ||
+ ledaction == LED_CTL_NO_LINK ||
+ ledaction == LED_CTL_START_TO_LINK ||
+ ledaction == LED_CTL_POWER_ON)) {
+ return;
+ }
+ RT_TRACE(COMP_LED, DBG_LOUD, ("ledaction %d, \n",
+ ledaction));
+ _rtl8821ae_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/led.h b/drivers/staging/rtl8821ae/rtl8821ae/led.h
new file mode 100644
index 000000000000..44be401ba21f
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/led.h
@@ -0,0 +1,40 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_LED_H__
+#define __RTL8821AE_LED_H__
+
+void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw);
+void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8821ae_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/phy.c b/drivers/staging/rtl8821ae/rtl8821ae/phy.c
new file mode 100644
index 000000000000..1dd33016b4b9
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/phy.c
@@ -0,0 +1,5525 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+#include "trx.h"
+#include "../btcoexist/halbt_precomp.h"
+#include "hw.h"
+
+#define READ_NEXT_PAIR(array_table,v1, v2, i) do { i += 2; v1 = array_table[i]; v2 = array_table[i+1]; } while(0)
+
+static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset);
+static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data);
+static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask);
+static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw);
+static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype);
+static bool _rtl8812ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype);
+static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype);
+static bool _rtl8812ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype);
+static void _rtl8821ae_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+
+static long _rtl8821ae_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx);
+static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw);
+static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw);
+
+void rtl8812ae_fixspur(
+ struct ieee80211_hw *hw,
+ enum ht_channel_width band_width,
+ u8 channel
+)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ /*C cut Item12 ADC FIFO CLOCK*/
+ if(IS_VENDOR_8812A_C_CUT(rtlhal->version))
+ {
+ if(band_width == HT_CHANNEL_WIDTH_20_40 && channel == 11)
+ rtl_set_bbreg(hw, RRFMOD, 0xC00, 0x3) ;
+ /* 0x8AC[11:10] = 2'b11*/
+ else
+ rtl_set_bbreg(hw, RRFMOD, 0xC00, 0x2);
+ /* 0x8AC[11:10] = 2'b10*/
+
+
+ /* <20120914, Kordan> A workaround to resolve
+ 2480Mhz spur by setting ADC clock as 160M. (Asked by Binson)*/
+ if (band_width == HT_CHANNEL_WIDTH_20 &&
+ (channel == 13 || channel == 14)) {
+ rtl_set_bbreg(hw, RRFMOD, 0x300, 0x3);
+ /*0x8AC[9:8] = 2'b11*/
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1);
+ /* 0x8C4[30] = 1*/
+ } else if (band_width == HT_CHANNEL_WIDTH_20_40 &&
+ channel == 11) {
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1);
+ /*0x8C4[30] = 1*/
+ } else if (band_width != HT_CHANNEL_WIDTH_80) {
+ rtl_set_bbreg(hw, RRFMOD, 0x300, 0x2);
+ /*0x8AC[9:8] = 2'b10*/
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0);
+ /*0x8C4[30] = 0*/
+ }
+ }
+ else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ /* <20120914, Kordan> A workaround to resolve
+ 2480Mhz spur by setting ADC clock as 160M. (Asked by Binson)*/
+ if (band_width == HT_CHANNEL_WIDTH_20 &&
+ (channel == 13 || channel == 14))
+ rtl_set_bbreg(hw, RRFMOD, 0x300, 0x3);
+ /*0x8AC[9:8] = 11*/
+ else if (channel <= 14) /*2.4G only*/
+ rtl_set_bbreg(hw, RRFMOD, 0x300, 0x2);
+ /*0x8AC[9:8] = 10*/
+ }
+
+}
+
+u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 returnvalue, originalvalue, bitshift;
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "bitmask(%#x)\n", regaddr,
+ bitmask));
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
+ "Addr[0x%x]=0x%x\n", bitmask,
+ regaddr, originalvalue));
+
+ return returnvalue;
+
+}
+
+void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 originalvalue, bitshift;
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+ " data(%#x)\n", regaddr, bitmask,
+ data));
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | ((data << bitshift) & bitmask));
+ }
+
+ rtl_write_dword(rtlpriv, regaddr, data);
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+ " data(%#x)\n", regaddr, bitmask,
+ data));
+
+}
+
+u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, readback_value, bitshift;
+ unsigned long flags;
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask));
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+
+ original_value = _rtl8821ae_phy_rf_serial_read(hw,rfpath, regaddr);
+ bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+ RT_TRACE(COMP_RF, DBG_TRACE,
+ ("regaddr(%#x), rfpath(%#x), "
+ "bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value));
+
+ return readback_value;
+}
+
+void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, bitshift;
+ unsigned long flags;
+
+ RT_TRACE(COMP_RF, DBG_TRACE,
+ ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath));
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl8821ae_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+ bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+
+ _rtl8821ae_phy_rf_serial_write(hw, rfpath, regaddr, data);
+
+
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+ RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath));
+
+}
+
+static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool b_is_pi_mode =false;
+ u32 retvalue = 0;
+
+ /* 2009/06/17 MH We can not execute IO for power save or other accident mode.*/
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("return all one\n"));
+ return 0xFFFFFFFF;
+ }
+
+ /* <20120809, Kordan> CCA OFF(when entering), asked by James to avoid reading the wrong value.
+ <20120828, Kordan> Toggling CCA would affect RF 0x0, skip it!*/
+ if (offset != 0x0 &&
+ !((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ || (IS_VENDOR_8812A_C_CUT(rtlhal->version))))
+ rtl_set_bbreg(hw, RCCAONSEC, 0x8, 1);
+
+ offset &= 0xff;
+
+ if (rfpath == RF90_PATH_A)
+ b_is_pi_mode = (bool) rtl_get_bbreg(hw, 0xC00, 0x4);
+ else if (rfpath == RF90_PATH_B)
+ b_is_pi_mode = (bool) rtl_get_bbreg(hw, 0xE00, 0x4);
+
+ rtl_set_bbreg(hw, RHSSIREAD_8821AE, 0xff, offset);
+
+ if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ || (IS_VENDOR_8812A_C_CUT(rtlhal->version)))
+ udelay(20);
+
+ if (b_is_pi_mode)
+ {
+ if (rfpath == RF90_PATH_A) {
+ retvalue = rtl_get_bbreg(hw, RA_PIREAD_8821A, BLSSIREADBACKDATA);
+ }
+ else if (rfpath == RF90_PATH_B){
+ retvalue = rtl_get_bbreg(hw, RB_PIREAD_8821A, BLSSIREADBACKDATA);
+ }
+ }
+ else
+ {
+ if (rfpath == RF90_PATH_A) {
+ retvalue = rtl_get_bbreg(hw, RA_SIREAD_8821A, BLSSIREADBACKDATA);
+ }
+ else if (rfpath == RF90_PATH_B){
+ retvalue = rtl_get_bbreg(hw, RB_SIREAD_8821A, BLSSIREADBACKDATA);
+ }
+ }
+
+ /*<20120809, Kordan> CCA ON(when exiting), asked by James to avoid reading the wrong value.
+ <20120828, Kordan> Toggling CCA would affect RF 0x0, skip it!*/
+ if (offset != 0x0 && ! ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ || (IS_VENDOR_8812A_C_CUT(rtlhal->version))))
+ rtl_set_bbreg(hw, RCCAONSEC, 0x8, 0);
+ return retvalue;
+}
+
+#if 0
+static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+ u32 newoffset;
+ u32 tmplong, tmplong2;
+ u8 rfpi_enable = 0;
+ u32 retvalue;
+
+ offset &= 0xff;
+ newoffset = offset;
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("return all one\n"));
+ return 0xFFFFFFFF;
+ }
+ tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+ if (rfpath == RF90_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+ tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+ (newoffset << 23) | BLSSIREADEDGE;
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong & (~BLSSIREADEDGE));
+ mdelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+ mdelay(1);
+ /*rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong | BLSSIREADEDGE);*/
+ mdelay(1);
+ if (rfpath == RF90_PATH_A)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ else if (rfpath == RF90_PATH_B)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+ BIT(8));
+ if (rfpi_enable)
+ retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+ BLSSIREADBACKDATA);
+ else
+ retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+ BLSSIREADBACKDATA);
+ RT_TRACE(COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rflssi_readback,
+ retvalue));
+ return retvalue;
+}
+#endif
+
+static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data)
+{
+ u32 data_and_addr;
+ u32 newoffset;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("stop\n"));
+ return;
+ }
+ offset &= 0xff;
+ newoffset = offset;
+ data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+ rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+ RT_TRACE(COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset,
+ data_and_addr));
+}
+
+static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((bitmask >> i) & 0x1) == 1)
+ break;
+ }
+ return i;
+}
+
+bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool rtstatus = 0;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = _rtl8812ae_phy_config_mac_with_headerfile(hw);
+ else
+ rtstatus = _rtl8821ae_phy_config_mac_with_headerfile(hw);
+
+ return rtstatus;
+}
+
+bool rtl8821ae_phy_bb_config(struct ieee80211_hw *hw)
+{
+ bool rtstatus = true;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 regval;
+ u8 crystal_cap;
+ //u32 tmp;
+
+ _rtl8821ae_phy_init_bb_rf_register_definition(hw);
+
+ regval = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN);
+ regval |= regval | FEN_PCIEA;
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, regval);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
+ regval | FEN_BB_GLB_RSTN | FEN_BBRSTB);
+
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x7);/*RF_EN | RF_RSTB | RF_SDMRSTB*/
+ rtl_write_byte(rtlpriv, REG_OPT_CTRL + 2, 0x7);/*RF_EN | RF_RSTB | RF_SDMRSTB*/
+
+ rtstatus = _rtl8821ae_phy_bb8821a_config_parafile(hw);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ {
+ crystal_cap = rtlefuse->crystalcap & 0x3F;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0x7FF80000, (crystal_cap | (crystal_cap << 6)));
+ }else{
+ crystal_cap = rtlefuse->crystalcap & 0x3F;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000, (crystal_cap | (crystal_cap << 6)));
+ }
+ rtlphy->reg_837 = rtl_read_byte(rtlpriv, 0x837);
+
+ return rtstatus;
+}
+
+bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw)
+{
+ return rtl8821ae_phy_rf6052_config(hw);
+}
+
+
+u32 phy_get_tx_bb_swing_8812A(
+ struct ieee80211_hw *hw,
+ u8 band,
+ u8 rf_path
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ char bb_swing_2g = (char) ((-1 * 0xFF) & 0xFF);
+ char bb_swing_5g = (char) ((-1 * 0xFF) & 0xFF);
+ u32 out = 0x200;
+ const char auto_temp = -1;
+
+ RT_TRACE(COMP_SCAN, DBG_LOUD,
+ ("===> PHY_GetTxBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d\n",
+ (int)bb_swing_2g, (int)bb_swing_5g));
+
+ if ( rtlefuse->autoload_failflag) {
+ if ( band == BAND_ON_2_4G ) {
+ rtldm->bb_swing_diff_2g = bb_swing_2g;
+ if (bb_swing_2g == 0) out = 0x200; // 0 dB
+ else if (bb_swing_2g == -3) out = 0x16A; // -3 dB
+ else if (bb_swing_2g == -6) out = 0x101; // -6 dB
+ else if (bb_swing_2g == -9) out = 0x0B6; // -9 dB
+ else {
+ rtldm->bb_swing_diff_2g = 0;
+ out = 0x200;
+ }
+
+ } else if ( band == BAND_ON_5G ) {
+ rtldm->bb_swing_diff_5g = bb_swing_5g;
+ if (bb_swing_5g == 0) out = 0x200; // 0 dB
+ else if (bb_swing_5g == -3) out = 0x16A; // -3 dB
+ else if (bb_swing_5g == -6) out = 0x101; // -6 dB
+ else if (bb_swing_5g == -9) out = 0x0B6; // -9 dB
+ else {
+ if ( rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ rtldm->bb_swing_diff_5g = -3;
+ out = 0x16A;
+ } else {
+ rtldm->bb_swing_diff_5g = 0;
+ out = 0x200;
+ }
+ }
+ } else {
+ rtldm->bb_swing_diff_2g = -3;
+ rtldm->bb_swing_diff_5g = -3;
+ out = 0x16A; // -3 dB
+ }
+ }
+ else
+ {
+ u32 swing = 0, swing_a = 0, swing_b = 0;
+
+ if (band == BAND_ON_2_4G)
+ {
+ if (0xFF == auto_temp)
+ {
+ efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing);
+ swing = (swing == 0xFF) ? 0x00 : swing;
+ }
+ else if (bb_swing_2g == 0) swing = 0x00; // 0 dB
+ else if (bb_swing_2g == -3) swing = 0x05; // -3 dB
+ else if (bb_swing_2g == -6) swing = 0x0A; // -6 dB
+ else if (bb_swing_2g == -9) swing = 0xFF; // -9 dB
+ else swing = 0x00;
+ }
+ else
+ {
+ if (0xFF == auto_temp)
+ {
+ efuse_shadow_read(hw, 1, 0xC7, (u32 *)&swing);
+ swing = (swing == 0xFF) ? 0x00 : swing;
+ }
+ else if (bb_swing_5g == 0) swing = 0x00; // 0 dB
+ else if (bb_swing_5g == -3) swing = 0x05; // -3 dB
+ else if (bb_swing_5g == -6) swing = 0x0A; // -6 dB
+ else if (bb_swing_5g == -9) swing = 0xFF; // -9 dB
+ else swing = 0x00;
+ }
+
+ swing_a = (swing & 0x3) >> 0; // 0xC6/C7[1:0]
+ swing_b = (swing & 0xC) >> 2; // 0xC6/C7[3:2]
+ RT_TRACE(COMP_SCAN, DBG_LOUD,
+ ("===> PHY_GetTxBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n",
+ swing_a, swing_b));
+
+ //3 Path-A
+ if (swing_a == 0x0) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = 0;
+ else
+ rtldm->bb_swing_diff_5g = 0;
+ out = 0x200; // 0 dB
+ } else if (swing_a == 0x1) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -3;
+ else
+ rtldm->bb_swing_diff_5g = -3;
+ out = 0x16A; // -3 dB
+ } else if (swing_a == 0x2) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -6;
+ else
+ rtldm->bb_swing_diff_5g = -6;
+ out = 0x101; // -6 dB
+ } else if (swing_a == 0x3) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -9;
+ else
+ rtldm->bb_swing_diff_5g = -9;
+ out = 0x0B6; // -9 dB
+ }
+
+ //3 Path-B
+ if (swing_b == 0x0) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = 0;
+ else
+ rtldm->bb_swing_diff_5g = 0;
+ out = 0x200; // 0 dB
+ } else if (swing_b == 0x1) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -3;
+ else
+ rtldm->bb_swing_diff_5g = -3;
+ out = 0x16A; // -3 dB
+ } else if (swing_b == 0x2) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -6;
+ else
+ rtldm->bb_swing_diff_5g = -6;
+ out = 0x101; // -6 dB
+ } else if (swing_b == 0x3) {
+ if (band == BAND_ON_2_4G)
+ rtldm->bb_swing_diff_2g = -9;
+ else
+ rtldm->bb_swing_diff_5g = -9;
+ out = 0x0B6; // -9 dB
+ }
+ }
+
+ RT_TRACE(COMP_SCAN, DBG_LOUD,
+ ("<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out));
+ return out;
+}
+void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtlpriv);
+ u8 current_band = rtlhal->current_bandtype;
+ u32 txpath, rxpath;
+ //u8 i, value8;
+ char bb_diff_between_band;
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("\n"));
+ txpath = rtl8821ae_phy_query_bb_reg(hw, RTXPATH, 0xf0);
+ rxpath = rtl8821ae_phy_query_bb_reg(hw, RCCK_RX, 0x0f000000);
+ rtlhal->current_bandtype = (enum band_type) band;
+ /* reconfig BB/RF according to wireless mode */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* BB & RF Config */
+ RT_TRACE(COMP_CMD, DBG_DMESG, ("2.4G\n"));
+ rtl_set_bbreg(hw, ROFDMCCKEN, BOFDMEN|BCCKEN, 0x03);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ /* 0xCB0[15:12] = 0x7 (LNA_On)*/
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF000, 0x7);
+ /* 0xCB0[7:4] = 0x7 (PAPE_A)*/
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF0, 0x7);
+ }
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ rtl_set_bbreg(hw, 0x830, 0xE, 0x4); /*0x830[3:1] = 0x4*/
+ rtl_set_bbreg(hw, 0x834, 0x3, 0x1); /*0x834[1:0] = 0x1*/
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xF00, 0); // 0xC1C[11:8] = 0
+ else
+ rtl_set_bbreg(hw, 0x82c, 0x3, 0); // 0x82C[1:0] = 2b'00
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
+ rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
+ }
+
+ rtl_set_bbreg(hw, RTXPATH, 0xf0, txpath);
+ rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, rxpath);
+
+ rtl_write_byte(rtlpriv, REG_CCK_CHECK, 0x0);
+ } else {/* 5G band */
+ u16 count, reg_41a;
+ RT_TRACE(COMP_CMD, DBG_DMESG, ("5G\n"));
+
+ if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ /*0xCB0[15:12] = 0x5 (LNA_On)*/
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF000, 0x5);
+ /*0xCB0[7:4] = 0x4 (PAPE_A)*/
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF0, 0x4);
+ }
+ /*CCK_CHECK_en*/
+ rtl_write_byte(rtlpriv, REG_CCK_CHECK, 0x80);
+
+ count = 0;
+ reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
+ RT_TRACE(COMP_SCAN, DBG_LOUD, ("Reg41A value %d", reg_41a));
+ reg_41a &= 0x30;
+ while ((reg_41a!= 0x30) && (count < 50)) {
+ udelay(50);
+ RT_TRACE(COMP_SCAN, DBG_LOUD, ("Delay 50us \n"));
+
+ reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
+ reg_41a &= 0x30;
+ count++;
+ RT_TRACE(COMP_SCAN, DBG_LOUD, ("Reg41A value %d", reg_41a));
+ }
+ if (count != 0)
+ RT_TRACE(COMP_MLME, DBG_LOUD,
+ ("PHY_SwitchWirelessBand8812(): Switch to 5G Band. "
+ "Count = %d reg41A=0x%x\n", count, reg_41a));
+
+ // 2012/02/01, Sinda add registry to switch workaround without long-run verification for scan issue.
+ rtl_set_bbreg(hw, ROFDMCCKEN, BOFDMEN|BCCKEN, 0x03);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ rtl_set_bbreg(hw, 0x830, 0xE, 0x3); /*0x830[3:1] = 0x3*/
+ rtl_set_bbreg(hw, 0x834, 0x3, 0x2); /*0x834[1:0] = 0x2*/
+ }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ /* AGC table select */
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xF00, 1); /* 0xC1C[11:8] = 1*/
+ } else
+ rtl_set_bbreg(hw, 0x82c, 0x3, 1); // 0x82C[1:0] = 2'b00
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+ rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
+ rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
+ }
+
+ rtl_set_bbreg(hw, RTXPATH, 0xf0, txpath);
+ rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, rxpath);
+
+ RT_TRACE(COMP_SCAN, DBG_LOUD,
+ ("==>PHY_SwitchWirelessBand8812() BAND_ON_5G settings OFDM index 0x%x\n",
+ rtlpriv->dm.ofdm_index[RF90_PATH_A]));
+ }
+
+ if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) ||
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)) {
+ rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000,
+ phy_get_tx_bb_swing_8812A(hw, band, RF90_PATH_A)); // 0xC1C[31:21]
+ rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000,
+ phy_get_tx_bb_swing_8812A(hw, band, RF90_PATH_B)); // 0xE1C[31:21]
+
+ /* <20121005, Kordan> When TxPowerTrack is ON, we should take care of the change of BB swing.
+ That is, reset all info to trigger Tx power tracking.*/
+ if (band != current_band) {
+ bb_diff_between_band = (rtldm->bb_swing_diff_2g - rtldm->bb_swing_diff_5g);
+ bb_diff_between_band = (band == BAND_ON_2_4G) ? bb_diff_between_band : (-1 * bb_diff_between_band);
+ rtldm->default_ofdm_index += bb_diff_between_band * 2;
+ }
+ rtl8821ae_dm_clear_txpower_tracking_state(hw);
+ }
+
+ RT_TRACE(COMP_SCAN, DBG_TRACE,
+ ("<==rtl8821ae_phy_switch_wirelessband():Switch Band OK.\n"));
+ return;
+}
+
+static bool _rtl8821ae_check_condition(struct ieee80211_hw *hw,
+ const u32 Condition
+ )
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 _board = rtlefuse->board_type; /*need efuse define*/
+ u32 _interface = rtlhal->interface;
+ u32 _platform = 0x08;/*SupportPlatform */
+ u32 cond = Condition;
+
+ if ( Condition == 0xCDCDCDCD )
+ return true;
+
+ cond = Condition & 0xFF;
+ if ( (_board != cond) == 0 && cond != 0xFF)
+ return false;
+
+ cond = Condition & 0xFF00;
+ cond = cond >> 8;
+ if ( (_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = Condition & 0xFF0000;
+ cond = cond >> 16;
+ if ( (_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+static void _rtl8821ae_config_rf_reg(struct ieee80211_hw *hw,
+ u32 addr,
+ u32 data,
+ enum radio_path rfpath,
+ u32 regaddr
+ )
+{
+ if ( addr == 0xfe || addr == 0xffe) {
+ mdelay(50);
+ } else {
+ rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data);
+ udelay(1);
+ }
+}
+
+static void _rtl8821ae_config_rf_radio_a(struct ieee80211_hw *hw,
+ u32 addr, u32 data)
+{
+ u32 content = 0x1000; /*RF Content: radio_a_txt*/
+ u32 maskforphyset = (u32)(content & 0xE000);
+
+ _rtl8821ae_config_rf_reg(hw, addr, data, RF90_PATH_A, addr | maskforphyset);
+
+}
+
+static void _rtl8821ae_config_rf_radio_b(struct ieee80211_hw *hw,
+ u32 addr, u32 data)
+{
+ u32 content = 0x1001; /*RF Content: radio_b_txt*/
+ u32 maskforphyset = (u32)(content & 0xE000);
+
+ _rtl8821ae_config_rf_reg(hw, addr, data, RF90_PATH_B, addr | maskforphyset);
+
+}
+
+static void _rtl8812ae_config_bb_reg(struct ieee80211_hw *hw,
+ u32 addr, u32 data)
+{
+ if ( addr == 0xfe) {
+ mdelay(50);
+ } else if ( addr == 0xfd)
+ mdelay(5);
+ else if ( addr == 0xfc)
+ mdelay(1);
+ else if ( addr == 0xfb)
+ udelay(50);
+ else if ( addr == 0xfa)
+ udelay(5);
+ else if ( addr == 0xf9)
+ udelay(1);
+ else {
+ rtl_set_bbreg(hw, addr, MASKDWORD,data);
+ }
+ udelay(1);
+}
+
+static void _rtl8821ae_config_bb_reg(struct ieee80211_hw *hw,
+ u32 addr, u32 data)
+{
+ if ( addr == 0xfe) {
+ mdelay(50);
+ } else if ( addr == 0xfd)
+ mdelay(5);
+ else if ( addr == 0xfc)
+ mdelay(1);
+ else if ( addr == 0xfb)
+ udelay(50);
+ else if ( addr == 0xfa)
+ udelay(5);
+ else if ( addr == 0xf9)
+ udelay(1);
+
+ rtl_set_bbreg(hw, addr, MASKDWORD,data);
+ udelay(1);
+}
+
+static void _rtl8821ae_phy_init_tx_power_by_rate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ u8 band, rfpath, txnum, rate_section;
+
+ for ( band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band )
+ for ( rfpath = 0; rfpath < TX_PWR_BY_RATE_NUM_RF; ++rfpath )
+ for ( txnum = 0; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum )
+ for ( rate_section = 0; rate_section < TX_PWR_BY_RATE_NUM_SECTION; ++rate_section )
+ rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = 0;
+}
+
+void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
+ u8 band, u8 path,
+ u8 rate_section,
+ u8 txnum, u8 value)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (path > RF90_PATH_D) {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n", path));
+ return;
+ }
+
+ if (band == BAND_ON_2_4G) {
+ switch (rate_section) {
+ case CCK:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value;
+ break;
+ case OFDM:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value;
+ break;
+ case HT_MCS0_MCS7:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value;
+ break;
+ case HT_MCS8_MCS15:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value;
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][4] = value;
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][5] = value;
+ break;
+ default:
+ RT_TRACE(COMP_INIT, DBG_LOUD, ( "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
+ rate_section, path, txnum ) );
+ break;
+ };
+ } else if (band == BAND_ON_5G) {
+ switch (rate_section) {
+ case OFDM:
+ rtlphy->txpwr_by_rate_base_5g[path][txnum][0] = value;
+ break;
+ case HT_MCS0_MCS7:
+ rtlphy->txpwr_by_rate_base_5g[path][txnum][1] = value;
+ break;
+ case HT_MCS8_MCS15:
+ rtlphy->txpwr_by_rate_base_5g[path][txnum][2] = value;
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ rtlphy->txpwr_by_rate_base_5g[path][txnum][3] = value;
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ rtlphy->txpwr_by_rate_base_5g[path][txnum][4] = value;
+ break;
+ default:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid RateSection %d in Band 5G, Rf Path %d, "
+ "%dTx in PHY_SetTxPowerByRateBase()\n",
+ rate_section, path, txnum));
+ break;
+ };
+ } else {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid Band %d in PHY_SetTxPowerByRateBase()\n", band));
+ }
+
+}
+
+u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
+ u8 band, u8 path,
+ u8 txnum, u8 rate_section)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 value = 0;
+
+ if (path > RF90_PATH_D) {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n", path));
+ return 0;
+ }
+
+ if (band == BAND_ON_2_4G) {
+ switch (rate_section) {
+ case CCK:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0];
+ break;
+ case OFDM:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1];
+ break;
+ case HT_MCS0_MCS7:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2];
+ break;
+ case HT_MCS8_MCS15:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3];
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][4];
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][5];
+ break;
+ default:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid RateSection %d in Band 2.4G, Rf Path %d,"
+ " %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum));
+ break;
+ };
+ } else if (band == BAND_ON_5G) {
+ switch (rate_section) {
+ case OFDM:
+ value = rtlphy->txpwr_by_rate_base_5g[path][txnum][0];
+ break;
+ case HT_MCS0_MCS7:
+ value = rtlphy->txpwr_by_rate_base_5g[path][txnum][1];
+ break;
+ case HT_MCS8_MCS15:
+ value = rtlphy->txpwr_by_rate_base_5g[path][txnum][2];
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ value = rtlphy->txpwr_by_rate_base_5g[path][txnum][3];
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ value = rtlphy->txpwr_by_rate_base_5g[path][txnum][4];
+ break;
+ default:
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid RateSection %d in Band 5G, Rf Path %d,"
+ " %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum));
+ break;
+ };
+ } else {
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band));
+ }
+
+ return value;
+
+}
+void _rtl8821ae_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u16 rawValue = 0;
+ u8 base = 0, path = 0;
+
+ for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) {
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][0] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, CCK, RF_1TX, base);
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][2] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][4] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][6] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS8_MCS15, RF_2TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][8] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][11] >> 8) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][2] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, OFDM, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][4] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS0_MCS7, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][6] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS8_MCS15, RF_2TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][8] >> 24) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base );
+
+ rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][11] >> 8) & 0xFF;
+ base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base );
+ }
+}
+
+void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start,
+ u8 end, u8 base_val)
+{
+ char i = 0;
+ u8 temp_value = 0;
+ u32 temp_data = 0;
+
+ for (i = 3; i >= 0; --i)
+ {
+ if (i >= start && i <= end) {
+ // Get the exact value
+ temp_value = (u8) (*data >> (i * 8)) & 0xF;
+ temp_value += ((u8) ((*data >> (i * 8 + 4)) & 0xF)) * 10;
+
+ // Change the value to a relative value
+ temp_value = (temp_value > base_val) ? temp_value - base_val : base_val - temp_value;
+ } else {
+ temp_value = (u8) (*data >> (i * 8)) & 0xFF;
+ }
+ temp_data <<= 8;
+ temp_data |= temp_value;
+ }
+ *data = temp_data;
+}
+
+void _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 base = 0, rfPath = 0;
+
+ for (rfPath = RF90_PATH_A; rfPath <= RF90_PATH_B; ++rfPath) {
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, CCK);
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G CCK 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][0] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, OFDM );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G OFDM 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][1] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][2] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, HT_MCS0_MCS7 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G HTMCS0-7 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][3] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][4] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, HT_MCS8_MCS15 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G HTMCS8-15 2TX: %d\n", base ) );
+
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][5] ),
+ 0, 3, base );
+
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][6] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G VHT1SSMCS0-9 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][7] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][8] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9] ),
+ 0, 1, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G VHT2SSMCS0-9 2TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9] ),
+ 2, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][10] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][11] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, OFDM );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G OFDM 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][1] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][2] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, HT_MCS0_MCS7 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G HTMCS0-7 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][3] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][4] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, HT_MCS8_MCS15 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G HTMCS8-15 2TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][5] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][6] ),
+ 0, 3, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G VHT1SSMCS0-9 1TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][7] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][8] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9] ),
+ 0, 1, base );
+
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9 );
+ RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G VHT2SSMCS0-9 2TX: %d\n", base ) );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9] ),
+ 2, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][10] ),
+ 0, 3, base );
+ _phy_convert_txpower_dbm_to_relative_value(
+ &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][11] ),
+ 0, 3, base );
+ }
+
+ RT_TRACE(COMP_POWER, DBG_TRACE,
+ ("<===_rtl8821ae_phy_convert_txpower_dbm_to_relative_value()\n"));
+
+}
+
+void _rtl8821ae_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw)
+{
+ _rtl8821ae_phy_store_txpower_by_rate_base(hw);
+ _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(hw);
+}
+
+static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool rtstatus;
+
+ /*TX POWER LIMIT
+ PHY_InitTxPowerLimit
+ PHY_ConfigRFWithCustomPowerLimitTableParaFile*/
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = _rtl8812ae_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ else{
+ rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ }
+ if (rtstatus != true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
+ return false;
+ }
+ _rtl8821ae_phy_init_tx_power_by_rate(hw);
+ if (rtlefuse->autoload_failflag == false) {
+ //rtlphy->pwrgroup_cnt = 0;
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = _rtl8812ae_phy_config_bb_with_pgheaderfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ else{
+ rtstatus = _rtl8821ae_phy_config_bb_with_pgheaderfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ }
+ }
+ if (rtstatus != true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
+ return false;
+ }
+
+ _rtl8821ae_phy_txpower_by_rate_configuration(hw);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = _rtl8812ae_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_AGC_TAB);
+ else
+ rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_AGC_TAB);
+
+ if (rtstatus != true) {
+ RT_TRACE(COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
+ return false;
+ }
+ rtlphy->bcck_high_power = (bool) (rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ 0x200));
+ return true;
+}
+
+static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i, v1, v2;
+ u32 arraylength;
+ u32 *ptrarray;
+
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Read rtl8812AE_MAC_REG_Array\n"));
+ arraylength = RTL8812AEMAC_1T_ARRAYLEN;
+ ptrarray = RTL8812AE_MAC_REG_ARRAY;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Img:RTL8812AE_MAC_REG_ARRAY LEN %d\n",arraylength));
+ for (i = 0; i < arraylength; i += 2) {
+ v1 = ptrarray[i];
+ v2 = (u8) ptrarray[i + 1];
+ if (v1<0xCDCDCDCD) {
+ rtl_write_byte(rtlpriv, v1, (u8) v2);
+ } else {
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylength -2)
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylength -2) {
+ rtl_write_byte(rtlpriv,v1,v2);
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylength -2)
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ }
+ }
+ }
+ return true;
+}
+
+static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i, v1, v2;
+ u32 arraylength;
+ u32 *ptrarray;
+
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("Read rtl8821AE_MAC_REG_Array\n"));
+ arraylength = RTL8821AEMAC_1T_ARRAYLEN;
+ ptrarray = RTL8821AE_MAC_REG_ARRAY;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Img:RTL8821AE_MAC_REG_ARRAY LEN %d\n",arraylength));
+ for (i = 0; i < arraylength; i += 2) {
+ v1 = ptrarray[i];
+ v2 = (u8) ptrarray[i + 1];
+ if (v1<0xCDCDCDCD) {
+ rtl_write_byte(rtlpriv, v1, (u8) v2);
+ continue;
+ } else {
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylength -2)
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylength -2) {
+ rtl_write_byte(rtlpriv,v1,v2);
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylength -2)
+ READ_NEXT_PAIR(ptrarray, v1, v2, i);
+ }
+ }
+ }
+ return true;
+}
+
+static bool _rtl8812ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ int i;
+ u32 *array_table;
+ u16 arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 v1 = 0, v2 = 0;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ arraylen = RTL8812AEPHY_REG_1TARRAYLEN;
+ array_table = RTL8812AE_PHY_REG_ARRAY;
+
+ for (i = 0; i < arraylen; i += 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1<0xCDCDCDCD) {
+ _rtl8812ae_config_bb_reg(hw, v1, v2);
+ continue;
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2) {
+ _rtl8812ae_config_bb_reg(hw,v1,v2);
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ }
+ }
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ arraylen = RTL8812AEAGCTAB_1TARRAYLEN;
+ array_table = RTL8812AE_AGC_TAB_ARRAY;
+
+ for (i = 0; i < arraylen; i = i + 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1 < 0xCDCDCDCD) {
+ rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+ udelay(1);
+ continue;
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ }else{/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ {
+ rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+ udelay(1);
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table,v1, v2, i);
+ }
+ }
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("The agctab_array_table[0] is "
+ "%x Rtl818EEPHY_REGArray[1] is %x \n",
+ array_table[i],
+ array_table[i + 1]));
+ }
+ }
+ return true;
+}
+
+static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ int i;
+ u32 *array_table;
+ u16 arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 v1 = 0, v2 = 0;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ arraylen = RTL8821AEPHY_REG_1TARRAYLEN;
+ array_table = RTL8821AE_PHY_REG_ARRAY;
+
+ for (i = 0; i < arraylen; i += 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1<0xCDCDCDCD) {
+ _rtl8821ae_config_bb_reg(hw, v1, v2);
+ continue;
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2) {
+ _rtl8821ae_config_bb_reg(hw,v1,v2);
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ }
+ }
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ arraylen = RTL8821AEAGCTAB_1TARRAYLEN;
+ array_table = RTL8821AE_AGC_TAB_ARRAY;
+
+ for (i = 0; i < arraylen; i = i + 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1 < 0xCDCDCDCD) {
+ rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+ udelay(1);
+ continue;
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8821ae_check_condition(hw,v1)) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ }else{/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen -2)
+ {
+ rtl_set_bbreg(hw, v1, MASKDWORD, v2);
+ udelay(1);
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen -2)
+ READ_NEXT_PAIR(array_table, v1, v2, i);
+ }
+ }
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("The agctab_array_table[0] is "
+ "%x Rtl818EEPHY_REGArray[1] is %x \n",
+ array_table[i],
+ array_table[i + 1]));
+ }
+ }
+ return true;
+}
+
+static u8 _rtl8821ae_get_rate_selection_index(u32 regaddr)
+{
+ u8 index = 0;
+
+ regaddr &= 0xFFF;
+ if (regaddr >= 0xC20 && regaddr <= 0xC4C)
+ index = (u8) ((regaddr - 0xC20) / 4);
+ else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
+ index = (u8) ((regaddr - 0xE20) / 4);
+ else
+ RT_ASSERT(!COMP_INIT,
+ ("Invalid RegAddr 0x%x in"
+ "PHY_GetRateSectionIndexOfTxPowerByRate()\n",regaddr));
+
+ return index;
+}
+
+static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw,
+ u32 band, u32 rfpath,
+ u32 txnum, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 rate_section = _rtl8821ae_get_rate_selection_index(regaddr);
+
+ if (band != BAND_ON_2_4G && band != BAND_ON_5G)
+ RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid Band %d\n", band));
+
+ if (rfpath > MAX_RF_PATH)
+ RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid RfPath %d\n", rfpath));
+
+ if (txnum > MAX_RF_PATH)
+ RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid TxNum %d\n", txnum ) );
+
+ rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data;
+ RT_TRACE(COMP_INIT, DBG_WARNING,( "pHalData->TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n",
+ band, rfpath, txnum, rate_section, rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section]));
+
+}
+
+static bool _rtl8812ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i;
+ u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+ u32 v1, v2, v3, v4, v5, v6;
+
+ phy_regarray_pg_len = RTL8812AEPHY_REG_ARRAY_PGLEN;
+ phy_regarray_table_pg = RTL8812AE_PHY_REG_ARRAY_PG;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i += 6) {
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ v4 = phy_regarray_table_pg[i+3];
+ v5 = phy_regarray_table_pg[i+4];
+ v6 = phy_regarray_table_pg[i+5];
+
+ if (v1<0xCDCDCDCD) {
+ if ( (v4 == 0xfe) || (v4 == 0xffe))
+ mdelay(50);
+ else
+ /*_rtl8821ae_store_pwrIndex_diffrate_offset*/
+ _rtl8821ae_store_tx_power_by_rate(hw, v1, v2, v3, v4, v5, v6);
+ continue;
+ } else {
+ if (!_rtl8821ae_check_condition(hw,v1)) { /*don't need the hw_body*/
+ i += 2; /* skip the pair of expression*/
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ while (v2 != 0xDEAD) {
+ i += 3;
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ }
+ }
+ }
+ }
+ } else {
+
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("configtype != BaseBand_Config_PHY_REG\n"));
+ }
+ return true;
+}
+
+static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i;
+ u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+ u32 v1, v2, v3, v4, v5, v6;
+
+ phy_regarray_pg_len = RTL8821AEPHY_REG_ARRAY_PGLEN;
+ phy_regarray_table_pg = RTL8821AE_PHY_REG_ARRAY_PG;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i += 6) {
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ v4 = phy_regarray_table_pg[i+3];
+ v5 = phy_regarray_table_pg[i+4];
+ v6 = phy_regarray_table_pg[i+5];
+
+ if (v1<0xCDCDCDCD) {
+ if (v4 == 0xfe)
+ mdelay(50);
+ else if (v4 == 0xfd)
+ mdelay(5);
+ else if (v4 == 0xfc)
+ mdelay(1);
+ else if (v4 == 0xfb)
+ udelay(50);
+ else if (v4 == 0xfa)
+ udelay(5);
+ else if (v4 == 0xf9)
+ udelay(1);
+
+ /*_rtl8821ae_store_pwrIndex_diffrate_offset*/
+ _rtl8821ae_store_tx_power_by_rate(hw, v1, v2, v3, v4, v5, v6);
+ continue;
+ } else {
+ if (!_rtl8821ae_check_condition(hw,v1)) { /*don't need the hw_body*/
+ i += 2; /* skip the pair of expression*/
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ while (v2 != 0xDEAD) {
+ i += 3;
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ }
+ }
+ }
+ }
+ } else {
+
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("configtype != BaseBand_Config_PHY_REG\n"));
+ }
+ return true;
+}
+
+bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw * hw,
+ enum radio_path rfpath)
+{
+ #define READ_NEXT_RF_PAIR_8812(radioa_array_table,v1, v2, i) do { i += 2; v1 = radioa_array_table[i]; v2 = radioa_array_table[i+1]; } while(0)
+
+ int i;
+ bool rtstatus = true;
+ u32 *radioa_array_table_a, *radioa_array_table_b;
+ u16 radioa_arraylen_a, radioa_arraylen_b;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 v1 = 0, v2 = 0;
+
+ radioa_arraylen_a = RTL8812AE_RADIOA_1TARRAYLEN;
+ radioa_array_table_a= RTL8812AE_RADIOA_ARRAY;
+ radioa_arraylen_b= RTL8812AE_RADIOB_1TARRAYLEN;
+ radioa_array_table_b = RTL8812AE_RADIOB_ARRAY;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Radio_A:RTL8821AE_RADIOA_ARRAY %d\n",radioa_arraylen_a));
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Radio No %x\n", rfpath));
+ rtstatus = true;
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen_a; i = i + 2) {
+ v1 = radioa_array_table_a[i];
+ v2 = radioa_array_table_a[i+1];
+ if (v1<0xcdcdcdcd) {
+ _rtl8821ae_config_rf_radio_a(hw,v1,v2);
+ continue;
+ }else{/*This line is the start line of branch.*/
+ if(!_rtl8821ae_check_condition(hw,v1)){
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen_a-2)
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen_a -2) {
+ _rtl8821ae_config_rf_radio_a(hw,v1,v2);
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < radioa_arraylen_a-2)
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i);
+ }
+ }
+ }
+ break;
+ case RF90_PATH_B:
+ for (i = 0; i < radioa_arraylen_b; i = i + 2) {
+ v1 = radioa_array_table_b[i];
+ v2 = radioa_array_table_b[i+1];
+ if (v1<0xcdcdcdcd) {
+ _rtl8821ae_config_rf_radio_b(hw,v1,v2);
+ continue;
+ }else{/*This line is the start line of branch.*/
+ if(!_rtl8821ae_check_condition(hw,v1)){
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen_b-2)
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen_b-2) {
+ _rtl8821ae_config_rf_radio_b(hw,v1,v2);
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < radioa_arraylen_b-2)
+ READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i);
+ }
+ }
+ }
+ break;
+ case RF90_PATH_C:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ case RF90_PATH_D:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ return true;
+}
+
+
+bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw * hw,
+ enum radio_path rfpath)
+{
+ #define READ_NEXT_RF_PAIR(v1, v2, i) do { i += 2; v1 = radioa_array_table[i]; v2 = radioa_array_table[i+1]; } while(0)
+
+ int i;
+ bool rtstatus = true;
+ u32 *radioa_array_table;
+ u16 radioa_arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ //struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 v1 = 0, v2 = 0;
+
+ radioa_arraylen = RTL8821AE_RADIOA_1TARRAYLEN;
+ radioa_array_table = RTL8821AE_RADIOA_ARRAY;
+ RT_TRACE(COMP_INIT, DBG_LOUD,
+ ("Radio_A:RTL8821AE_RADIOA_ARRAY %d\n",radioa_arraylen));
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("Radio No %x\n", rfpath));
+ rtstatus = true;
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen; i = i + 2) {
+ v1 = radioa_array_table[i];
+ v2 = radioa_array_table[i+1];
+ if (v1<0xcdcdcdcd) {
+ _rtl8821ae_config_rf_radio_a(hw,v1,v2);
+ }else{/*This line is the start line of branch.*/
+ if(!_rtl8821ae_check_condition(hw,v1)){
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen -2)
+ READ_NEXT_RF_PAIR(v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {/*Configure matched pairs and skip to end of if-else.*/
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < radioa_arraylen -2) {
+ _rtl8821ae_config_rf_radio_a(hw,v1,v2);
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < radioa_arraylen -2)
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ }
+ }
+ }
+ break;
+
+ case RF90_PATH_B:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ case RF90_PATH_C:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ case RF90_PATH_D:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ return true;
+}
+
+void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->default_initialgain[0] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[1] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[2] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[3] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("Default initial gain (c50=0x%x, "
+ "c58=0x%x, c60=0x%x, c68=0x%x \n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]));
+
+ rtlphy->framesync = (u8) rtl_get_bbreg(hw,
+ ROFDM0_RXDETECTOR3, MASKBYTE0);
+ rtlphy->framesync_c34 = rtl_get_bbreg(hw,
+ ROFDM0_RXDETECTOR2, MASKDWORD);
+
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("Default framesync (0x%x) = 0x%x \n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync));
+}
+
+static void _rtl8821ae_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = RA_LSSIWRITE_8821A;
+ rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = RB_LSSIWRITE_8821A;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RHSSIREAD_8821AE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RHSSIREAD_8821AE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback = RA_SIREAD_8821A;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback = RB_SIREAD_8821A;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi = RA_PIREAD_8821A;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi = RB_PIREAD_8821A;
+}
+
+void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 txpwr_level;
+ long txpwr_dbm;
+
+ txpwr_level = rtlphy->cur_cck_txpwridx;
+ txpwr_dbm = _rtl8821ae_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_B, txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+ if (_rtl8821ae_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_G,
+ txpwr_level) > txpwr_dbm)
+ txpwr_dbm =
+ _rtl8821ae_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+ txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+ if (_rtl8821ae_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_N_24G,
+ txpwr_level) > txpwr_dbm)
+ txpwr_dbm =
+ _rtl8821ae_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+ txpwr_level);
+ *powerlevel = txpwr_dbm;
+}
+
+static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index)
+{
+ u8 channel_5g[CHANNEL_MAX_NUMBER_5G] =
+ {36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112,
+ 114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151,
+ 153,155,157,159,161,163,165,167,168,169,171,173,175,177};
+ u8 i = 0;
+ bool in_24g = true;
+
+ if (channel <= 14) {
+ in_24g = true;
+ *chnl_index = channel - 1;
+ } else {
+ in_24g = false;
+
+ for (i = 0; i < sizeof(channel_5g) / sizeof(u8); ++i) {
+ if (channel_5g[i] == channel) {
+ *chnl_index = i;
+ return in_24g;
+ }
+ }
+ }
+ return in_24g;
+}
+
+static char _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate)
+{
+ char rate_section = 0;
+ switch (rate) {
+ case DESC_RATE1M:
+ case DESC_RATE2M:
+ case DESC_RATE5_5M:
+ case DESC_RATE11M:
+ rate_section = 0;
+ break;
+
+ case DESC_RATE6M:
+ case DESC_RATE9M:
+ case DESC_RATE12M:
+ case DESC_RATE18M:
+ rate_section = 1;
+ break;
+
+ case DESC_RATE24M:
+ case DESC_RATE36M:
+ case DESC_RATE48M:
+ case DESC_RATE54M:
+ rate_section = 2;
+ break;
+
+ case DESC_RATEMCS0:
+ case DESC_RATEMCS1:
+ case DESC_RATEMCS2:
+ case DESC_RATEMCS3:
+ rate_section = 3;
+ break;
+
+ case DESC_RATEMCS4:
+ case DESC_RATEMCS5:
+ case DESC_RATEMCS6:
+ case DESC_RATEMCS7:
+ rate_section = 4;
+ break;
+
+ case DESC_RATEMCS8:
+ case DESC_RATEMCS9:
+ case DESC_RATEMCS10:
+ case DESC_RATEMCS11:
+ rate_section = 5;
+ break;
+
+ case DESC_RATEMCS12:
+ case DESC_RATEMCS13:
+ case DESC_RATEMCS14:
+ case DESC_RATEMCS15:
+ rate_section = 6;
+ break;
+
+ case DESC_RATEVHT1SS_MCS0:
+ case DESC_RATEVHT1SS_MCS1:
+ case DESC_RATEVHT1SS_MCS2:
+ case DESC_RATEVHT1SS_MCS3:
+ rate_section = 7;
+ break;
+
+ case DESC_RATEVHT1SS_MCS4:
+ case DESC_RATEVHT1SS_MCS5:
+ case DESC_RATEVHT1SS_MCS6:
+ case DESC_RATEVHT1SS_MCS7:
+ rate_section = 8;
+ break;
+
+ case DESC_RATEVHT1SS_MCS8:
+ case DESC_RATEVHT1SS_MCS9:
+ case DESC_RATEVHT2SS_MCS0:
+ case DESC_RATEVHT2SS_MCS1:
+ rate_section = 9;
+ break;
+
+ case DESC_RATEVHT2SS_MCS2:
+ case DESC_RATEVHT2SS_MCS3:
+ case DESC_RATEVHT2SS_MCS4:
+ case DESC_RATEVHT2SS_MCS5:
+ rate_section = 10;
+ break;
+
+ case DESC_RATEVHT2SS_MCS6:
+ case DESC_RATEVHT2SS_MCS7:
+ case DESC_RATEVHT2SS_MCS8:
+ case DESC_RATEVHT2SS_MCS9:
+ rate_section = 11;
+ break;
+
+ default:
+ RT_ASSERT(true, ("Rate_Section is Illegal\n"));
+ break;
+ }
+
+ return rate_section;
+}
+
+static char _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw,
+ u8 band, u8 path, u8 rate)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 shift = 0, rate_section, tx_num;
+ char tx_pwr_diff = 0;
+
+ rate_section = _rtl8821ae_phy_get_ratesection_intxpower_byrate(path, rate);
+ tx_num = RF_TX_NUM_NONIMPLEMENT;
+
+ if (tx_num == RF_TX_NUM_NONIMPLEMENT) {
+ if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15 ) ||
+ (rate >= DESC_RATEVHT2SS_MCS2 && rate <= DESC_RATEVHT2SS_MCS9))
+ tx_num = RF_2TX;
+ else
+ tx_num = RF_1TX;
+ }
+
+ switch (rate) {
+ case DESC_RATE1M: shift = 0; break;
+ case DESC_RATE2M: shift = 8; break;
+ case DESC_RATE5_5M: shift = 16; break;
+ case DESC_RATE11M: shift = 24; break;
+
+ case DESC_RATE6M: shift = 0; break;
+ case DESC_RATE9M: shift = 8; break;
+ case DESC_RATE12M: shift = 16; break;
+ case DESC_RATE18M: shift = 24; break;
+
+ case DESC_RATE24M: shift = 0; break;
+ case DESC_RATE36M: shift = 8; break;
+ case DESC_RATE48M: shift = 16; break;
+ case DESC_RATE54M: shift = 24; break;
+
+ case DESC_RATEMCS0: shift = 0; break;
+ case DESC_RATEMCS1: shift = 8; break;
+ case DESC_RATEMCS2: shift = 16; break;
+ case DESC_RATEMCS3: shift = 24; break;
+
+ case DESC_RATEMCS4: shift = 0; break;
+ case DESC_RATEMCS5: shift = 8; break;
+ case DESC_RATEMCS6: shift = 16; break;
+ case DESC_RATEMCS7: shift = 24; break;
+
+ case DESC_RATEMCS8: shift = 0; break;
+ case DESC_RATEMCS9: shift = 8; break;
+ case DESC_RATEMCS10: shift = 16; break;
+ case DESC_RATEMCS11: shift = 24; break;
+
+ case DESC_RATEMCS12: shift = 0; break;
+ case DESC_RATEMCS13: shift = 8; break;
+ case DESC_RATEMCS14: shift = 16; break;
+ case DESC_RATEMCS15: shift = 24; break;
+
+ case DESC_RATEVHT1SS_MCS0: shift = 0; break;
+ case DESC_RATEVHT1SS_MCS1: shift = 8; break;
+ case DESC_RATEVHT1SS_MCS2: shift = 16; break;
+ case DESC_RATEVHT1SS_MCS3: shift = 24; break;
+
+ case DESC_RATEVHT1SS_MCS4: shift = 0; break;
+ case DESC_RATEVHT1SS_MCS5: shift = 8; break;
+ case DESC_RATEVHT1SS_MCS6: shift = 16; break;
+ case DESC_RATEVHT1SS_MCS7: shift = 24; break;
+
+ case DESC_RATEVHT1SS_MCS8: shift = 0; break;
+ case DESC_RATEVHT1SS_MCS9: shift = 8; break;
+ case DESC_RATEVHT2SS_MCS0: shift = 16; break;
+ case DESC_RATEVHT2SS_MCS1: shift = 24; break;
+
+ case DESC_RATEVHT2SS_MCS2: shift = 0; break;
+ case DESC_RATEVHT2SS_MCS3: shift = 8; break;
+ case DESC_RATEVHT2SS_MCS4: shift = 16; break;
+ case DESC_RATEVHT2SS_MCS5: shift = 24; break;
+
+ case DESC_RATEVHT2SS_MCS6: shift = 0; break;
+ case DESC_RATEVHT2SS_MCS7: shift = 8; break;
+ case DESC_RATEVHT2SS_MCS8: shift = 16; break;
+ case DESC_RATEVHT2SS_MCS9: shift = 24; break;
+
+ default:
+ RT_ASSERT(true, ("Rate_Section is Illegal\n"));
+ break;
+ }
+
+ tx_pwr_diff = (u8) (rtlphy->tx_power_by_rate_offset[band][path][tx_num][rate_section] >> shift) & 0xff;
+
+ return tx_pwr_diff;
+}
+
+static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path,
+ u8 rate, u8 bandwidth, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 index = (channel - 1);
+ u8 txpower = 0;
+ bool in_24g = false;
+ char powerdiff_byrate = 0;
+
+ if (((rtlhal->current_bandtype == BAND_ON_2_4G) && (channel > 14 || channel < 1)) ||
+ ((rtlhal->current_bandtype == BAND_ON_5G) && (channel <= 14))) {
+ index = 0;
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("Illegal channel!!\n"));
+ }
+
+ in_24g = _rtl8821ae_phy_get_chnl_index(channel, &index);
+ if (in_24g) {
+ if (RX_HAL_IS_CCK_RATE(rate))
+ txpower = rtlefuse->txpwrlevel_cck[path][index];
+ else if ( DESC_RATE6M <= rate )
+ txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
+ else
+ RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("invalid rate\n"));
+
+ if (DESC_RATE6M <= rate && rate <= DESC_RATE54M && !RX_HAL_IS_CCK_RATE(rate))
+ txpower += rtlefuse->txpwr_legacyhtdiff[path][TX_1S];
+
+ if (bandwidth == HT_CHANNEL_WIDTH_20) {
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht20diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht20diff[path][TX_2S];
+ } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht40diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht40diff[path][TX_2S];
+ } else if (bandwidth == HT_CHANNEL_WIDTH_80) {
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht40diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_ht40diff[path][TX_2S];
+ }
+
+ } else {
+ if (DESC_RATE6M <= rate)
+ txpower = rtlefuse->txpwr_5g_bw40base[path][index];
+ else
+ RT_TRACE(COMP_POWER_TRACKING, DBG_WARNING,("INVALID Rate.\n"));
+
+ if (DESC_RATE6M <= rate && rate <= DESC_RATE54M && !RX_HAL_IS_CCK_RATE(rate))
+ txpower += rtlefuse->txpwr_5g_ofdmdiff[path][TX_1S];
+
+ if (bandwidth == HT_CHANNEL_WIDTH_20) {
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_2S];
+ } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_2S];
+ } else if (bandwidth == HT_CHANNEL_WIDTH_80) {
+ u8 channel_5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
+ u8 i = 0;
+ for (i = 0; i < sizeof(channel_5g_80m) / sizeof(u8); ++i)
+ if (channel_5g_80m[i] == channel)
+ index = i;
+
+ if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower = rtlefuse->txpwr_5g_bw80base[path][index]
+ + rtlefuse->txpwr_5g_bw80diff[path][TX_1S];
+ if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) ||
+ (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9))
+ txpower = rtlefuse->txpwr_5g_bw80base[path][index]
+ + rtlefuse->txpwr_5g_bw80diff[path][TX_1S]
+ + rtlefuse->txpwr_5g_bw80diff[path][TX_2S];
+ }
+ }
+ if (rtlefuse->eeprom_regulatory != 2)
+ powerdiff_byrate = _rtl8821ae_phy_get_txpower_by_rate(hw,
+ (u8)(!in_24g), path, rate);
+
+ if (rate == DESC_RATEVHT1SS_MCS8 || rate == DESC_RATEVHT1SS_MCS9 ||
+ rate == DESC_RATEVHT2SS_MCS8 || rate == DESC_RATEVHT2SS_MCS9)
+ txpower -= powerdiff_byrate;
+ else
+ txpower += powerdiff_byrate;
+
+ if (rate > DESC_RATE11M)
+ txpower += rtlpriv->dm.remnant_ofdm_swing_idx[path];
+ else
+ txpower += rtlpriv->dm.remnant_cck_idx;
+
+ if (txpower > MAX_POWER_INDEX)
+ txpower = MAX_POWER_INDEX;
+
+ return txpower;
+}
+
+static void _rtl8821ae_phy_set_txpower_index(struct ieee80211_hw *hw,
+ u8 power_index, u8 path, u8 rate)
+{
+ struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+ if (path == RF90_PATH_A) {
+ switch (rate) {
+ case DESC_RATE1M:
+ rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE2M:
+ rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE5_5M:
+ rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE11M:
+ rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATE6M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE9M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE12M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE18M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATE24M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE36M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE48M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE54M:
+ rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS0:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS1:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS2:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS3:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS4:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS5:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS6:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS7:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS8:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS9:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS10:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS11:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS12:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS13:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS14:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS15:
+ rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS0:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS1:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS2:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS3:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS4:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS5:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS6:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS7:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS8:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS9:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS0:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS1:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT2SS_MCS2:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS3:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS4:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS5:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT2SS_MCS6:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS7:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS8:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS9:
+ rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE3, power_index);
+ break;
+
+ default:
+ RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid Rate!!\n"));
+ break;
+ }
+ } else if (path == RF90_PATH_B) {
+ switch (rate) {
+ case DESC_RATE1M:
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE2M:
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE5_5M:
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE11M:
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATE6M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE9M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE12M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE18M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATE24M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE0, power_index);
+ break;
+ case DESC_RATE36M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE1, power_index);
+ break;
+ case DESC_RATE48M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE2, power_index);
+ break;
+ case DESC_RATE54M:
+ rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS0:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS1:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS2:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS3:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS4:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS5:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS6:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS7:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS8:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS9:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS10:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS11:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEMCS12:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEMCS13:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEMCS14:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEMCS15:
+ rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS0:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS1:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS2:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS3:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS4:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS5:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS6:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS7:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT1SS_MCS8:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT1SS_MCS9:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS0:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS1:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT2SS_MCS2:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS3:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS4:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS5:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE3, power_index);
+ break;
+
+ case DESC_RATEVHT2SS_MCS6:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE0, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS7:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE1, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS8:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE2, power_index);
+ break;
+ case DESC_RATEVHT2SS_MCS9:
+ rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE3, power_index);
+ break;
+
+ default:
+ RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid Rate!!\n"));
+ break;
+ }
+ } else {
+ RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid RFPath!!\n"));
+ }
+}
+
+void _rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw,
+ u8 *array, u8 path, u8 channel,
+ u8 size)
+{
+ struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy);
+ u8 i;
+ u8 power_index;
+ for (i = 0; i < size; i ++) {
+ power_index = _rtl8821ae_get_txpower_index(hw, path, array[i],
+ rtlphy->current_chan_bw, channel);
+ _rtl8821ae_phy_set_txpower_index(hw, power_index, path, array[i]);
+ }
+}
+
+static void _rtl8821ae_phy_txpower_training_by_path(struct ieee80211_hw *hw,
+ u8 bw, u8 channel, u8 path)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ u8 i;
+ u32 power_level, data, offset;
+
+ if(path >= rtlphy->num_total_rfpath)
+ return;
+
+ data = 0;
+ if (path == RF90_PATH_A) {
+ power_level =
+ _rtl8821ae_get_txpower_index(hw, RF90_PATH_A,
+ DESC_RATEMCS7, bw, channel);
+ offset = RA_TXPWRTRAING;
+ } else {
+ power_level =
+ _rtl8821ae_get_txpower_index(hw, RF90_PATH_A,
+ DESC_RATEMCS7, bw, channel);
+ offset = RB_TXPWRTRAING;
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (i == 0)
+ power_level = power_level - 10;
+ else if (i == 1)
+ power_level = power_level - 8;
+ else
+ power_level = power_level - 6;
+
+ data |= (((power_level > 2) ? (power_level) : 2) << (i * 8));
+ }
+ rtl_set_bbreg(hw, offset, 0xffffff, data);
+}
+
+void rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, u8 channel, u8 path)
+{
+ //struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy);
+ u8 cck_rates[] = {DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M};
+ u8 ofdm_rates[] = {DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, DESC_RATE18M,
+ DESC_RATE24M, DESC_RATE36M, DESC_RATE48M, DESC_RATE54M};
+ u8 ht_rates_1t[] = {DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, DESC_RATEMCS3,
+ DESC_RATEMCS4, DESC_RATEMCS5, DESC_RATEMCS6, DESC_RATEMCS7};
+ u8 ht_rates_2t[] = {DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, DESC_RATEMCS11,
+ DESC_RATEMCS12, DESC_RATEMCS13, DESC_RATEMCS14, DESC_RATEMCS15};
+ u8 vht_rates_1t[] = {DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, DESC_RATEVHT1SS_MCS2,
+ DESC_RATEVHT1SS_MCS3, DESC_RATEVHT1SS_MCS4,
+ DESC_RATEVHT1SS_MCS5, DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
+ DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9};
+ u8 vht_rates_2t[] = {DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, DESC_RATEVHT2SS_MCS2,
+ DESC_RATEVHT2SS_MCS3, DESC_RATEVHT2SS_MCS4,
+ DESC_RATEVHT2SS_MCS5, DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
+ DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9};
+ //u8 i,size;
+ //u8 power_index;
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,cck_rates,path,channel,
+ sizeof(cck_rates) / sizeof(u8));
+
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,ofdm_rates,path,channel,
+ sizeof(ofdm_rates) / sizeof(u8));
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,ht_rates_1t,path,channel,
+ sizeof(ht_rates_1t) / sizeof(u8));
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,vht_rates_1t,path,channel,
+ sizeof(vht_rates_1t) / sizeof(u8));
+
+ if (rtlphy->num_total_rfpath >= 2) {
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,ht_rates_2t,path,channel,
+ sizeof(ht_rates_2t) / sizeof(u8));
+ _rtl8821ae_phy_set_txpower_level_by_path(hw,vht_rates_2t,path,channel,
+ sizeof(vht_rates_2t) / sizeof(u8));
+ }
+
+ _rtl8821ae_phy_txpower_training_by_path(hw, rtlphy->current_chan_bw, channel, path);
+}
+/*just in case, write txpower in DW, to reduce time*/
+#if 0
+void _rtl8821ae_phy_get_txpower_index_by_rate_array(struct ieee80211_hw *hw, u8 channel,
+ u8 *rate, u8 path, u8 bw, u8 *power_index, u8 size)
+{
+ u8 i;
+ for (i = 0; i < size; i++)
+ power_index[i] = _rtl8821ae_get_txpower_index(hw, path, rate[i], bw, channel);
+}
+
+void rtl8821ae_phy_set_txpower_level_by_path2(struct ieee80211_hw *hw, u8 channel, u8 path)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy);
+ u8 cck_rates[] = {DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M};
+ u8 ofdm_rates[] = {DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, DESC_RATE18M,
+ DESC_RATE24M, DESC_RATE36M, DESC_RATE48M, DESC_RATE54M};
+ u8 ht_rates_1t[] = {DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, DESC_RATEMCS3,
+ DESC_RATEMCS4, DESC_RATEMCS5, DESC_RATEMCS6, DESC_RATEMCS7};
+ u8 ht_rates_2t[] = {DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, DESC_RATEMCS11,
+ DESC_RATEMCS12, DESC_RATEMCS13, DESC_RATEMCS14, DESC_RATEMCS15};
+ u8 vht_rates_1t[] = {DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, DESC_RATEVHT1SS_MCS4,
+ DESC_RATEVHT1SS_MCS5, DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9};
+ u8 vht_rates_2t[] = {DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, DESC_RATEVHT2SS_MCS4,
+ DESC_RATEVHT2SS_MCS5, DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9};
+ u8 i, j;
+ u8 pwridx[48] = {0};
+ u8 cs = sizeof(cck_rates) / sizeof(u8);
+ u8 os = sizeof(ofdm_rates) / sizeof(u8);
+ u8 h1s = sizeof(ht_rates_1t) / sizeof(u8);
+ u8 h2s = sizeof(ht_rates_2t) / sizeof(u8);
+ u8 v1s = sizeof(vht_rates_1t) / sizeof(u8);
+ u8 v2s = sizeof(vht_rates_2t) / sizeof(u8);
+
+ u8 len, start;
+ u32 reg_addr, power_index;
+ u8 bw = rtlphy->current_chan_bw;
+
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ ofdm_rates, path, bw, &pwridx[cs], os);
+
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ ht_rates_1t, path, bw, &pwridx[cs+os], h1s);
+
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ vht_rates_1t, path, bw, &pwridx[cs+os+h1s+h2s], v1s);
+
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ cck_rates, path, bw, pwridx, cs);
+
+ start = 0;
+ } else {
+ start = cs;
+ }
+
+ reg_addr = (path == 0) ? RTXAGC_A_CCK11_CCK1 : RTXAGC_B_CCK11_CCK1;
+ reg_addr += start;
+
+ len = cs + os + h1s + h2s + v1s;
+ if (rtlphy->num_total_rfpath >= 2) {
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ ht_rates_2t, path, bw, &pwridx[cs+os+h1s], h2s);
+
+ _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel,
+ vht_rates_2t, path, bw, &pwridx[cs+os+h1s+h2s+v1s], v2s);
+
+ len += v2s;
+ }
+ for (i = start; i < len; i += 4) {
+ power_index = 0;
+ for (j = 0; j < 4; j++)
+ power_index |= (pwridx[i+j] << (j*8));
+ rtl_set_bbreg(hw, reg_addr + i, MASKDWORD, power_index);
+ }
+
+ _rtl8821ae_phy_txpower_training_by_path(hw, rtlphy->current_chan_bw, channel, path);
+}
+#endif
+
+void rtl8821ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 path = 0;
+
+ for (path = RF90_PATH_A; path < rtlphy->num_total_rfpath; ++path )
+ rtl8821ae_phy_set_txpower_level_by_path(hw, channel, path);
+}
+
+static long _rtl8821ae_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx)
+{
+ long offset;
+ long pwrout_dbm;
+
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ offset = -7;
+ break;
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ offset = -8;
+ break;
+ default:
+ offset = -8;
+ break;
+ }
+ pwrout_dbm = txpwridx / 2 + offset;
+ return pwrout_dbm;
+}
+
+void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum io_type iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN;
+
+ if (!is_hal_stop(rtlhal)) {
+ switch (operation) {
+ case SCAN_OPT_BACKUP_BAND0:
+ iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *) & iotype);
+
+ break;
+ case SCAN_OPT_BACKUP_BAND1:
+ iotype = IO_CMD_PAUSE_BAND1_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *) & iotype);
+
+ break;
+ case SCAN_OPT_RESTORE:
+ iotype = IO_CMD_RESUME_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *) & iotype);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Unknown Scan Backup operation.\n"));
+ break;
+ }
+ }
+}
+
+static void _rtl8821ae_phy_set_reg_bw(struct rtl_priv * rtlpriv, u8 bw)
+{
+ u16 reg_rf_mode_bw, tmp = 0;
+ reg_rf_mode_bw = rtl_read_word(rtlpriv, REG_TRXPTCL_CTL);
+ switch (bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, reg_rf_mode_bw & 0xFE7F);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ tmp = reg_rf_mode_bw | BIT(7);
+ rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFEFF);
+ break;
+ case HT_CHANNEL_WIDTH_80:
+ tmp = reg_rf_mode_bw | BIT(8);
+ rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFF7F);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_WARNING,("unknown Bandwidth: 0x%x\n",bw));
+ break;
+ }
+}
+
+static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv * rtlpriv)
+{
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u8 sc_set_40 = 0, sc_set_20 =0;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
+ if(mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_LOWER)
+ sc_set_40 = VHT_DATA_SC_40_LOWER_OF_80MHZ;
+ else if(mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_UPPER)
+ sc_set_40 = VHT_DATA_SC_40_UPPER_OF_80MHZ;
+ else
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("SCMapping: Not Correct Primary40MHz Setting \n"));
+
+ if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) &&
+ (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER))
+ sc_set_20 = VHT_DATA_SC_20_LOWEST_OF_80MHZ;
+ else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) &&
+ (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER))
+ sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+ else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) &&
+ (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER))
+ sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+ else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) &&
+ (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER))
+ sc_set_20 = VHT_DATA_SC_20_UPPERST_OF_80MHZ;
+ else
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("SCMapping: Not Correct Primary40MHz Setting \n"));
+ } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER)
+ sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+ else if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER)
+ sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+ else
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("SCMapping: Not Correct Primary40MHz Setting \n"));
+ }
+ return ((sc_set_40 << 4) | sc_set_20);
+}
+
+void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 sub_chnl = 0;
+ u8 l1pk_val = 0;
+
+ RT_TRACE(COMP_SCAN, DBG_TRACE,
+ ("Switch to %s bandwidth\n",
+ (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" :
+ (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40 ?
+ "40MHz" : "80MHz"))))
+
+
+
+ _rtl8821ae_phy_set_reg_bw(rtlpriv, rtlphy->current_chan_bw);
+ sub_chnl = _rtl8821ae_phy_get_secondary_chnl(rtlpriv);
+ rtl_write_byte(rtlpriv, 0x0483, sub_chnl);
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300200);
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0);
+
+ if(rtlphy->rf_type == RF_2T2R)
+ rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, 7);
+ else
+ rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, 8);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300201);
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0);
+ rtl_set_bbreg(hw, RRFMOD, 0x3C, sub_chnl);
+ rtl_set_bbreg(hw, RCCAONSEC, 0xf0000000, sub_chnl);
+
+ if(rtlphy->reg_837 & BIT(2))
+ l1pk_val = 6;
+ else
+ {
+ if(rtlphy->rf_type == RF_2T2R)
+ l1pk_val = 7;
+ else
+ l1pk_val = 8;
+ }
+ rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, l1pk_val); // 0x848[25:22] = 0x6
+
+ if(sub_chnl == VHT_DATA_SC_20_UPPER_OF_80MHZ)
+ rtl_set_bbreg(hw, RCCK_SYSTEM, BCCK_SYSTEM, 1);
+ else
+ rtl_set_bbreg(hw, RCCK_SYSTEM, BCCK_SYSTEM, 0);
+ break;
+
+ case HT_CHANNEL_WIDTH_80:
+ rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300202); // 0x8ac[21,20,9:6,1,0]=8'b11100010
+ rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1); // 0x8c4[30] = 1
+ rtl_set_bbreg(hw, RRFMOD, 0x3C, sub_chnl);
+ rtl_set_bbreg(hw, RCCAONSEC, 0xf0000000, sub_chnl);
+
+ if(rtlphy->reg_837 & BIT(2))
+ l1pk_val = 5;
+ else
+ {
+ if(rtlphy->rf_type == RF_2T2R)
+ l1pk_val = 6;
+ else
+ l1pk_val = 7;
+ }
+ rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, l1pk_val);
+
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ break;
+ }
+
+ rtl8812ae_fixspur(hw, rtlphy->current_chan_bw, rtlphy->current_channel);
+
+ rtl8821ae_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+ rtlphy->set_bwmode_inprogress = false;
+
+ RT_TRACE(COMP_SCAN, DBG_LOUD, (" \n"));
+}
+
+void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_bw = rtlphy->current_chan_bw;
+
+ if (rtlphy->set_bwmode_inprogress)
+ return;
+ rtlphy->set_bwmode_inprogress = true;
+ if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+ rtl8821ae_phy_set_bw_mode_callback(hw);
+ } else {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("FALSE driver sleep or unload\n"));
+ rtlphy->set_bwmode_inprogress = false;
+ rtlphy->current_chan_bw = tmp_bw;
+ }
+}
+
+void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 channel = rtlphy->current_channel;
+ u8 path;
+ u32 data;
+
+ RT_TRACE(COMP_SCAN, DBG_TRACE,
+ ("switch to channel%d\n", rtlphy->current_channel));
+ if (is_hal_stop(rtlhal))
+ return;
+
+ if (36 <= channel && channel <= 48)
+ data = 0x494;
+ else if (50 <= channel && channel <= 64)
+ data = 0x453;
+ else if (100 <= channel && channel <= 116)
+ data = 0x452;
+ else if (118 <= channel)
+ data = 0x412;
+ else
+ data = 0x96a;
+ rtl_set_bbreg(hw, RFC_AREA, 0x1ffe0000, data);
+
+
+ for(path = RF90_PATH_A; path < rtlphy->num_total_rfpath; path++)
+ {
+ if (36 <= channel && channel <= 64)
+ data = 0x101;
+ else if (100 <= channel && channel <= 140)
+ data = 0x301;
+ else if (140 < channel)
+ data = 0x501;
+ else
+ data = 0x000;
+ rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW,
+ BIT(18)|BIT(17)|BIT(16)|BIT(9)|BIT(8), data);
+
+ rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW,
+ BMASKBYTE0, channel);
+
+ if (channel > 14) {
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
+ if (36 <= channel && channel <= 64)
+ data = 0x114E9;
+ else if (100 <= channel && channel <= 140)
+ data = 0x110E9;
+ else
+ data = 0x110E9;
+ rtl8821ae_phy_set_rf_reg(hw, path, RF_APK,
+ BRFREGOFFSETMASK, data);
+ }
+ }
+ }
+ RT_TRACE(COMP_SCAN, DBG_TRACE, ("\n"));
+}
+
+u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 timeout = 1000, timecount = 0;
+ u8 channel = rtlphy->current_channel;
+
+ if (rtlphy->sw_chnl_inprogress)
+ return 0;
+ if (rtlphy->set_bwmode_inprogress)
+ return 0;
+
+ if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
+ RT_TRACE(COMP_CHAN, DBG_LOUD,
+ ("sw_chnl_inprogress false driver sleep or unload\n"));
+ return 0;
+ }
+ while (rtlphy->lck_inprogress && timecount < timeout) {
+ mdelay(50);
+ timecount += 50;
+ }
+
+ if (rtlphy->current_channel > 14 && rtlhal->current_bandtype != BAND_ON_5G)
+ rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_5G);
+ else if (rtlphy->current_channel <= 14 && rtlhal->current_bandtype != BAND_ON_2_4G)
+ rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_2_4G);
+
+ rtlphy->sw_chnl_inprogress = true;
+ if (channel == 0)
+ channel = 1;
+
+ RT_TRACE(COMP_SCAN, DBG_TRACE,
+ ("switch to channel%d, band type is %d\n", rtlphy->current_channel, rtlhal->current_bandtype));
+
+ rtl8821ae_phy_sw_chnl_callback(hw);
+
+ rtl8821ae_dm_clear_txpower_tracking_state(hw);
+ rtl8821ae_phy_set_txpower_level(hw, rtlphy->current_channel);
+
+ RT_TRACE(COMP_SCAN, DBG_TRACE, ("\n"));
+ rtlphy->sw_chnl_inprogress = false;
+ return 1;
+}
+
+#if 0
+static u8 _rtl8821ae_phy_path_b_iqk(struct ieee80211_hw *hw)
+{
+ u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ u8 result = 0x00;
+
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
+ mdelay(IQK_DELAY_TIME);
+ reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+ reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
+ reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
+ reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
+ reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
+
+ if (!(reg_eac & BIT(31)) &&
+ (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+ if (!(reg_eac & BIT(30)) &&
+ (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ return result;
+}
+
+static u8 _rtl8821ae_phy_path_a_rx_iqk(struct ieee80211_hw *hw, bool config_pathb)
+{
+ u32 reg_eac, reg_e94, reg_e9c, reg_ea4,u32temp;
+ u8 result = 0x00;
+
+ /*Get TXIMR Setting*/
+ /*Modify RX IQK mode table*/
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf117b);
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000);
+
+ /*IQK Setting*/
+ rtl_set_bbreg(hw, RTx_IQK, MASKDWORD, 0x01007c00);
+ rtl_set_bbreg(hw, RRx_IQK, MASKDWORD, 0x81004800);
+
+ /*path a IQK setting*/
+ rtl_set_bbreg(hw, RTx_IQK_Tone_A, MASKDWORD, 0x10008c1c);
+ rtl_set_bbreg(hw, RRx_IQK_Tone_A, MASKDWORD, 0x30008c1c);
+ rtl_set_bbreg(hw, RTx_IQK_PI_A, MASKDWORD, 0x82160804);
+ rtl_set_bbreg(hw, RRx_IQK_PI_A, MASKDWORD, 0x28160000);
+
+ /*LO calibration Setting*/
+ rtl_set_bbreg(hw, RIQK_AGC_Rsp, MASKDWORD, 0x0046a911);
+ /*one shot,path A LOK & iqk*/
+ rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf8000000);
+
+ mdelay(IQK_DELAY_TIME);
+
+ reg_eac = rtl_get_bbreg(hw, RRx_Power_After_IQK_A_2, MASKDWORD);
+ reg_e94 = rtl_get_bbreg(hw, RTx_Power_Before_IQK_A, MASKDWORD);
+ reg_e9c = rtl_get_bbreg(hw, RTx_Power_After_IQK_A, MASKDWORD);
+
+
+ if (!(reg_eac & BIT(28)) &&
+ (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+
+ u32temp = 0x80007C00 | (reg_e94&0x3FF0000) | ((reg_e9c&0x3FF0000) >> 16);
+ rtl_set_bbreg(hw, RTx_IQK, MASKDWORD, u32temp);
+ /*RX IQK*/
+ /*Modify RX IQK mode table*/
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7ffa);
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000);
+
+ /*IQK Setting*/
+ rtl_set_bbreg(hw, RRx_IQK, MASKDWORD, 0x01004800);
+
+ /*path a IQK setting*/
+ rtl_set_bbreg(hw, RTx_IQK_Tone_A, MASKDWORD, 0x30008c1c);
+ rtl_set_bbreg(hw, RRx_IQK_Tone_A, MASKDWORD, 0x10008c1c);
+ rtl_set_bbreg(hw, RTx_IQK_PI_A, MASKDWORD, 0x82160c05);
+ rtl_set_bbreg(hw, RRx_IQK_PI_A, MASKDWORD, 0x28160c05);
+
+ /*LO calibration Setting*/
+ rtl_set_bbreg(hw, RIQK_AGC_Rsp, MASKDWORD, 0x0046a911);
+ /*one shot,path A LOK & iqk*/
+ rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf8000000);
+
+ mdelay(IQK_DELAY_TIME);
+
+ reg_eac = rtl_get_bbreg(hw, RRx_Power_After_IQK_A_2, MASKDWORD);
+ reg_e94 = rtl_get_bbreg(hw, RTx_Power_Before_IQK_A, MASKDWORD);
+ reg_e9c = rtl_get_bbreg(hw, RTx_Power_After_IQK_A, MASKDWORD);
+ reg_ea4 = rtl_get_bbreg(hw, RRx_Power_Before_IQK_A_2, MASKDWORD);
+
+ if (!(reg_eac & BIT(27)) &&
+ (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((reg_eac & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ return result;
+}
+#endif
+
+u8 _rtl8812ae_get_right_chnl_place_for_iqk(u8 chnl)
+{
+ u8 channel_all[TARGET_CHNL_NUM_2G_5G_8812] =
+ {1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,38,40,42,\
+ 44,46,48,50,52,54,56,58,60,62,64,100,\
+ 102,104,106,108,110,112,114,116,118,\
+ 120,122,124,126,128,130,132,134,136,\
+ 138,140,149,151,153,155,157,159,161,\
+ 163,165};
+ u8 place = chnl;
+
+ if(chnl > 14)
+ {
+ for(place = 14; place<sizeof(channel_all); place++)
+ {
+ if(channel_all[place] == chnl)
+ {
+ return place-13;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void _rtl8812ae_iqk_rx_fill_iqc(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32 rx_x,
+ u32 rx_y
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (path) {
+ case RF90_PATH_A:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ if (rx_x >> 1 ==0x112 || rx_y >> 1 == 0x3ee){
+ rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x100);
+ rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X = %x;;RX_Y = %x ====>fill to IQC\n",
+ rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+ }
+ else{
+ rtl_set_bbreg(hw, 0xc10, 0x000003ff, rx_x >> 1);
+ rtl_set_bbreg(hw, 0xc10, 0x03ff0000, rx_y >> 1);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X = %x;;RX_Y = %x ====>fill to IQC\n",
+ rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("0xc10 = %x ====>fill to IQC\n",
+ rtl_read_dword(rtlpriv, 0xc10)));
+ }
+ }
+ break;
+ case RF90_PATH_B:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ if (rx_x >> 1 ==0x112 || rx_y >> 1 == 0x3ee){
+ rtl_set_bbreg(hw, 0xe10, 0x000003ff, 0x100);
+ rtl_set_bbreg(hw, 0xe10, 0x03ff0000, 0);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X = %x;;RX_Y = %x ====>fill to IQC\n",
+ rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+ }
+ else{
+ rtl_set_bbreg(hw, 0xe10, 0x000003ff, rx_x >> 1);
+ rtl_set_bbreg(hw, 0xe10, 0x03ff0000, rx_y >> 1);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X = %x;;RX_Y = %x====>fill to IQC\n ",
+ rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("0xe10 = %x====>fill to IQC\n",
+ rtl_read_dword(rtlpriv, 0xe10)));
+ }
+ }
+ break;
+ default:
+ break;
+ };
+}
+
+void _rtl8812ae_iqk_tx_fill_iqc(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32 tx_x,
+ u32 tx_y
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (path) {
+ case RF90_PATH_A:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+ rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+ rtl_set_bbreg(hw, 0xccc, 0x000007ff, tx_y);
+ rtl_set_bbreg(hw, 0xcd4, 0x000007ff, tx_x);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX_X = %x;;TX_Y = %x =====> fill to IQC\n",
+ tx_x & 0x000007ff, tx_y & 0x000007ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("0xcd4 = %x;;0xccc = %x ====>fill to IQC\n",
+ rtl_get_bbreg(hw, 0xcd4, 0x000007ff),
+ rtl_get_bbreg(hw, 0xccc, 0x000007ff)));
+ }
+ break;
+ case RF90_PATH_B:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+ rtl_write_dword(rtlpriv, 0xe90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xec4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xec8, 0x20000000);
+ rtl_set_bbreg(hw, 0xecc, 0x000007ff, tx_y);
+ rtl_set_bbreg(hw, 0xed4, 0x000007ff, tx_x);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX_X = %x;;TX_Y = %x =====> fill to IQC\n",
+ tx_x&0x000007ff, tx_y&0x000007ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("0xed4 = %x;;0xecc = %x ====>fill to IQC\n",
+ rtl_get_bbreg(hw, 0xed4, 0x000007ff),
+ rtl_get_bbreg(hw, 0xecc, 0x000007ff)));
+ }
+ break;
+ default:
+ break;
+ };
+}
+
+void _rtl8812ae_iqk_backup_macbb(
+ struct ieee80211_hw *hw,
+ u32 *macbb_backup,
+ u32 *backup_macbb_reg,
+ u32 mac_bb_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*save MACBB default value*/
+ for (i = 0; i < mac_bb_num; i++) {
+ macbb_backup[i] =rtl_read_dword(rtlpriv,backup_macbb_reg[i]);
+ }
+
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupMacBB Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_backup_afe(
+ struct ieee80211_hw *hw,
+ u32 *afe_backup,
+ u32 *backup_afe_REG,
+ u32 afe_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*Save AFE Parameters */
+ for (i = 0; i < afe_num; i++){
+ afe_backup[i] = rtl_read_dword(rtlpriv, backup_afe_REG[i]);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupAFE Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_backup_rf(
+ struct ieee80211_hw *hw,
+ u32 *rfa_backup,
+ u32 *rfb_backup,
+ u32 *backup_rf_reg,
+ u32 rf_num
+ )
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*Save RF Parameters*/
+ for (i = 0; i < rf_num; i++){
+ rfa_backup[i] = rtl_get_rfreg(hw, RF90_PATH_A, backup_rf_reg[i], BMASKDWORD);
+ rfb_backup[i] = rtl_get_rfreg(hw, RF90_PATH_B, backup_rf_reg[i], BMASKDWORD);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupRF Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_configure_mac(
+ struct ieee80211_hw *hw
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ /* ========MAC register setting========*/
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ rtl_write_byte(rtlpriv, 0x522, 0x3f);
+ rtl_set_bbreg(hw, 0x550, BIT(11) | BIT(3), 0x0);
+ rtl_write_byte(rtlpriv, 0x808, 0x00); /*RX ante off*/
+ rtl_set_bbreg(hw, 0x838, 0xf, 0xc); /*CCA off*/
+}
+
+#define cal_num 10
+
+void _rtl8812ae_iqk_tx(
+ struct ieee80211_hw *hw,
+ u8 chnl_idx
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u8 delay_count;
+ u8 cal0_retry, cal1_retry;
+ u8 tx0_average = 0, tx1_average = 0, rx0_average = 0, rx1_average = 0;
+ int tx0_x = 0, tx0_y = 0, rx0_x = 0, rx0_y = 0;
+ int tx_x0[cal_num], tx_y0[cal_num], rx_x0[cal_num], rx_y0[cal_num];
+ int tx1_x = 0, tx1_y = 0, rx1_x = 0, rx1_y = 0;
+ int tx_x1[cal_num], tx_y1[cal_num], rx_x1[cal_num], rx_y1[cal_num];
+ bool tx0iqkok= false, rx0iqkok = false, tx0_fail = true, rx0_fail;
+ bool iqk0_ready = false, tx0_finish = false, rx0_finish = false;
+ bool tx1iqkok = false, rx1iqkok = false, tx1_fail = true, rx1_fail;
+ bool iqk1_ready = false, tx1_finish = false, rx1_finish = false, vdf_enable = false;
+ int i, tx_dt[3] = {0}, rx_dt[3] = {0}, ii, dx = 0, dy = 0;
+
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("BandWidth = %d.\n",
+ rtlphy->current_chan_bw));
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80){
+ vdf_enable = true;
+ }
+ vdf_enable = false;
+
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*========Path-A AFE all on========*/
+ /*Port 0 DAC/ADC on*/
+ rtl_write_dword(rtlpriv, 0xc60, 0x77777777);
+ rtl_write_dword(rtlpriv, 0xc64, 0x77777777);
+
+ /* Port 1 DAC/ADC off*/
+ rtl_write_dword(rtlpriv, 0xe60, 0x77777777);
+ rtl_write_dword(rtlpriv, 0xe64, 0x77777777);
+
+ rtl_write_dword(rtlpriv, 0xc68, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xe68, 0x19791979);
+ rtl_set_bbreg(hw,0xc00, 0xf, 0x4);/*hardware 3-wire off*/
+ rtl_set_bbreg(hw,0xe00, 0xf, 0x4);/*hardware 3-wire off*/
+
+ /*DAC/ADC sampling rate (160 MHz)*/
+ rtl_set_bbreg(hw, 0xc5c, BIT(26) | BIT(25) | BIT(24), 0x7);
+ rtl_set_bbreg(hw, 0xe5c, BIT(26) | BIT(25) | BIT(24), 0x7);
+ rtl_set_bbreg(hw, 0x8c4, BIT(30), 0x1);
+
+ /*====== Path A TX IQK RF Setting ======*/
+ rtl_set_bbreg(hw,0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ rtl_set_rfreg(hw,RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x80002);
+ rtl_set_rfreg(hw,RF90_PATH_A, 0x30, BRFREGOFFSETMASK, 0x20000);
+ rtl_set_rfreg(hw,RF90_PATH_A, 0x31, BRFREGOFFSETMASK, 0x3fffd);
+ rtl_set_rfreg(hw,RF90_PATH_A, 0x32, BRFREGOFFSETMASK, 0xfe83f);
+ rtl_set_rfreg(hw,RF90_PATH_A, 0x65, BRFREGOFFSETMASK, 0x931d5);
+ rtl_set_rfreg(hw,RF90_PATH_A, 0x8f, BRFREGOFFSETMASK, 0x8a001);
+ /*====== Path A TX IQK RF Setting ======*/
+ rtl_set_rfreg(hw,RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x80002);
+ rtl_set_rfreg(hw,RF90_PATH_B, 0x30, BRFREGOFFSETMASK, 0x20000);
+ rtl_set_rfreg(hw,RF90_PATH_B, 0x31, BRFREGOFFSETMASK, 0x3fffd);
+ rtl_set_rfreg(hw,RF90_PATH_B, 0x32, BRFREGOFFSETMASK, 0xfe83f);
+ rtl_set_rfreg(hw,RF90_PATH_B, 0x65, BRFREGOFFSETMASK, 0x931d5);
+ rtl_set_rfreg(hw,RF90_PATH_B, 0x8f, BRFREGOFFSETMASK, 0x8a001);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+ rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1);
+ rtl_set_bbreg(hw, 0xe94, BIT(0), 0x1);
+ rtl_write_dword(rtlpriv, 0x978, 0x29002000);/* TX (X,Y)*/
+ rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);/* RX (X,Y)*/
+ rtl_write_dword(rtlpriv, 0x984, 0x00462910);/*[0]:AGC_en, [15]:idac_K_Mask*/
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/
+
+ /*ExternalPA_5G == 0*/
+ rtl_write_dword(rtlpriv, 0xc88, 0x821403f1);
+ rtl_write_dword(rtlpriv, 0xe88, 0x821403f1);
+
+ if (rtlhal->current_bandtype){
+ rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96);
+ rtl_write_dword(rtlpriv, 0xe8c, 0x68163e96);
+ }
+ else{
+ rtl_write_dword(rtlpriv, 0xc8c, 0x28163e96);
+ rtl_write_dword(rtlpriv, 0xe8c, 0x28163e96);
+ }
+
+ if (vdf_enable){}
+ else{
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+ rtl_write_dword(rtlpriv, 0xce8, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xe80, 0x18008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+ rtl_write_dword(rtlpriv, 0xe84, 0x38008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+ rtl_write_dword(rtlpriv, 0xee8, 0x00000000);
+
+ cal0_retry = 0;
+ cal1_retry = 0;
+ while(1){
+ /*one shot*/
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);/* cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+ rtl_write_dword(rtlpriv, 0xeb8, 0x00100000);/* cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); /*Delay 25ms*/
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xeb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ if (!tx0_finish)
+ iqk0_ready = (bool) rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if (!tx1_finish)
+ iqk1_ready = (bool) rtl_get_bbreg(hw, 0xd40, BIT(10));
+ if ((iqk0_ready && iqk1_ready) || (delay_count>20))
+ break;
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("TX delay_count = %d\n", delay_count));
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ /* ============TXIQK Check==============*/
+ tx0_fail = (bool) rtl_get_bbreg(hw, 0xd00, BIT(12));
+ tx1_fail = (bool) rtl_get_bbreg(hw, 0xd40, BIT(12));
+ if (!(tx0_fail || tx0_finish)){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+ tx_x0[tx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+ tx_y0[tx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+ tx0iqkok = true;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n",
+ tx0_average, (tx_x0[tx0_average]) >> 21 & 0x000007ff,
+ tx0_average, (tx_y0[tx0_average]) >> 21 & 0x000007ff));
+
+ tx0_average++;
+ }
+ else{
+ tx0iqkok = false;
+ cal0_retry++;
+ if (cal0_retry == 10)
+ break;
+ }
+ if (!(tx1_fail || tx1_finish)){
+ rtl_write_dword(rtlpriv, 0xeb8, 0x02000000);
+ tx_x1[tx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+ rtl_write_dword(rtlpriv, 0xeb8, 0x04000000);
+ tx_y1[tx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+ tx1iqkok= true;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX_X1[%d] = %x ;; TX_Y1[%d] = %x\n",
+ tx1_average, (tx_x1[tx1_average]) >> 21 & 0x000007ff,
+ tx1_average, (tx_y1[tx1_average]) >> 21 & 0x000007ff));
+
+ tx1_average++;
+ }
+ else{
+ tx1iqkok = false;
+ cal1_retry++;
+ if (cal1_retry == 10)
+ break;
+ }
+ }
+ else{
+ tx0iqkok = false;
+ tx1iqkok = false;
+ cal0_retry++;
+ cal1_retry++;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("Delay 20ms TX IQK Not Ready!!!!!\n"));
+ if (cal0_retry == 10)
+ break;
+ }
+ if (tx0_average >= 2){
+ for (i = 0; i < tx0_average; i++){
+ for (ii = i+1; ii <tx0_average; ii++){
+ dx = (tx_x0[i] >> 21) - (tx_x0[ii] >> 21);
+ if (dx < 4 && dx > -4){
+ dy = (tx_y0[i]>>21) - (tx_y0[ii]>>21);
+ if (dy < 4 && dy > -4){
+ tx0_x = ((tx_x0[i] >> 21) + (tx_x0[ii] >> 21)) / 2;
+ tx0_y = ((tx_y0[i] >> 21) + (tx_y0[ii] >> 21)) / 2;
+ tx_x0[0] = tx_x0[i];
+ tx_y0[1] = tx_y0[ii];
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX0_X = %x;;TX0_Y = %x\n",
+ tx0_x & 0x000007ff, tx0_y & 0x000007ff));
+ if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+ && vdf_enable) {
+ tx_dt[0] = (tx_dt[i] + tx_dt[ii]) / 2;
+ }
+ tx0_finish = true;
+ }
+ }
+ }
+ }
+ }
+ if (tx1_average >= 2){
+ for (i = 0; i < tx1_average; i++){
+ for (ii = i+1; ii < tx1_average; ii++){
+ dx = (tx_x1[i] >> 21) - (tx_x1[ii] >> 21);
+ if (dx < 4 && dx > -4){
+ dy = (tx_y1[i] >> 21) - (tx_y1[ii] >> 21);
+ if (dy < 4 && dy > -4){
+ tx1_x = ((tx_x1[i] >> 21) + (tx_x1[ii] >> 21)) / 2;
+ tx1_y = ((tx_y1[i] >> 21) + (tx_y1[ii] >> 21)) / 2;
+ tx_x1[0] = tx_x1[i];
+ tx_y1[1] = tx_y1[ii];
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX1_X = %x;;TX1_Y = %x\n",
+ tx1_x & 0x000007ff, tx1_y & 0x000007ff));
+ if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+ && vdf_enable) {
+ tx_dt[0] = (tx_dt[i] + tx_dt[ii]) / 2;
+ }
+ tx1_finish = true;
+ }
+ }
+ }
+ }
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX0_Average = %d, TX1_Average = %d\n",
+ tx0_average, tx1_average));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TX0_finish = %d, TX1_finish = %d\n",
+ tx0_finish, tx1_finish));
+ if (tx0_finish && tx1_finish)
+ break;
+ if ((cal0_retry + tx0_average) >= 10
+ || (cal1_retry + tx1_average) >= 10 )
+ break;
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TXA_cal_retry = %d\n", cal0_retry));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("TXB_cal_retry = %d\n", cal1_retry));
+
+ }
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x58, 0x7fe00,
+ rtl_get_rfreg(hw, RF90_PATH_A, 0x8, 0xffc00)); /*Load LOK*/
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x58, 0x7fe00,
+ rtl_get_rfreg(hw, RF90_PATH_B, 0x8, 0xffc00)); /* Load LOK*/
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+
+
+ if (vdf_enable) {}
+ else{
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ if (tx0_finish) {
+ /*====== Path A RX IQK RF Setting======*/
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x80000);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x3); /* BW 20M*/
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x30, BRFREGOFFSETMASK, 0x30000);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x31, BRFREGOFFSETMASK, 0x3f7ff);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x32, BRFREGOFFSETMASK, 0xfe7bf);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x8f, BRFREGOFFSETMASK, 0x88001);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x65, BRFREGOFFSETMASK, 0x931d6);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x00000);
+ }
+ if (tx1_finish){
+ /*====== Path B RX IQK RF Setting======*/
+ rtl_set_rfreg(hw, RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x80000);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x30, BRFREGOFFSETMASK, 0x30000);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x31, BRFREGOFFSETMASK, 0x3f7ff);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x32, BRFREGOFFSETMASK, 0xfe7bf);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x8f, BRFREGOFFSETMASK, 0x88001);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x65, BRFREGOFFSETMASK, 0x931d1);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x00000);
+ }
+ rtl_set_bbreg(hw, 0x978, BIT(31), 0x1);
+ rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a890);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+ if (tx0_finish) {
+ rtl_write_dword(rtlpriv, 0xc80, 0x38008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+ rtl_write_dword(rtlpriv, 0xc84, 0x18008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+ rtl_write_dword(rtlpriv, 0xc88, 0x02140119);
+ rtl_write_dword(rtlpriv, 0xc8c, 0x28160cc0);
+ }
+ if (tx1_finish){
+ rtl_write_dword(rtlpriv, 0xe80, 0x38008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/
+ rtl_write_dword(rtlpriv, 0xe84, 0x18008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/
+ rtl_write_dword(rtlpriv, 0xe88, 0x02140119);
+ rtl_write_dword(rtlpriv, 0xe8c, 0x28160ca0);
+ }
+ cal0_retry = 0;
+ cal1_retry = 0;
+ while(1){
+ /* one shot*/
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ if (tx0_finish){
+ rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0[rx0_average % 2]) >> 21 & 0x000007ff);
+ rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0[rx0_average % 2]) >> 21 & 0x000007ff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00300000);/*cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);
+ mdelay(5); /*Delay 10ms*/
+ }
+ if (tx1_finish){
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x1[rx1_average % 2]) >> 21 & 0x000007ff);
+ rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y1[rx1_average % 2]) >> 21 & 0x000007ff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/
+ rtl_write_dword(rtlpriv, 0xeb8, 0x00300000);/*cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+ rtl_write_dword(rtlpriv, 0xeb8, 0x00100000);/* cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module*/
+ }
+ mdelay(10); /*Delay 10ms*/
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xeb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ if (!rx0_finish && tx0_finish)
+ iqk0_ready = (bool) rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if (!rx1_finish && tx1_finish)
+ iqk1_ready = (bool) rtl_get_bbreg(hw, 0xd40, BIT(10));
+ if ((iqk0_ready && iqk1_ready)||(delay_count>20))
+ break;
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX delay_count = %d\n", delay_count));
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============RXIQK Check==============
+ rx0_fail = (bool) rtl_get_bbreg(hw, 0xd00, BIT(11));
+ rx1_fail = (bool) rtl_get_bbreg(hw, 0xd40, BIT(11));
+ if (!(rx0_fail || rx0_finish) && tx0_finish){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x06000000);
+ rx_x0[rx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x08000000);
+ rx_y0[rx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21;
+ rx0iqkok= true;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X0[%d] = %x ;; RX_Y0[%d] = %x\n",
+ rx0_average, (rx_x0[rx0_average]) >> 21 & 0x000007ff,
+ rx0_average, (rx_y0[rx0_average]) >> 21 & 0x000007ff));
+
+ rx0_average++;
+ }
+ else{
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("1. RXA_cal_retry = %d\n", cal0_retry));
+ rx0iqkok = false;
+ cal0_retry++;
+ if (cal0_retry == 10)
+ break;
+ }
+ if (!(rx1_fail || rx1_finish) && tx1_finish){
+ rtl_write_dword(rtlpriv, 0xeb8, 0x06000000);
+ rx_x1[rx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+ rtl_write_dword(rtlpriv, 0xeb8, 0x08000000);
+ rx_y1[rx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21;
+ rx1iqkok = true;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX_X1[%d] = %x ;; RX_Y1[%d] = %x\n",
+ rx1_average, (rx_x1[rx1_average]) >> 21 & 0x000007ff,
+ rx1_average, (rx_y1[rx1_average]) >> 21 & 0x000007ff));
+
+ rx1_average++;
+ }
+ else{
+ rx1iqkok= false;
+ cal1_retry++;
+ if (cal1_retry == 10)
+ break;
+ }
+
+ }
+ else{
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("2. RXA_cal_retry = %d\n", cal0_retry));
+ rx0iqkok = false;
+ rx1iqkok = false;
+ cal0_retry++;
+ cal1_retry++;
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("Delay 20ms RX IQK Not Ready!!!!!\n"));
+ if (cal0_retry == 10)
+ break;
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("3. RXA_cal_retry = %d\n", cal0_retry));
+ if (rx0_average >= 2){
+ for (i = 0; i < rx0_average; i++){
+ for (ii = i+1; ii < rx0_average; ii++){
+ dx = (rx_x0[i] >> 21) - (rx_x0[ii] >> 21);
+ if (dx < 4 && dx > -4){
+ dy = (rx_y0[i] >> 21) - (rx_y0[ii] >> 21);
+ if (dy < 4 && dy > -4){
+ rx0_x = ((rx_x0[i]>>21) + (rx_x0[ii] >> 21)) / 2;
+ rx0_y = ((rx_y0[i]>>21) + (rx_y0[ii] >> 21)) / 2;
+ if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+ && vdf_enable) {
+ rx_dt[0] = (rx_dt[i] + rx_dt[ii]) / 2;
+ }
+ rx0_finish = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (rx1_average >= 2){
+ for (i = 0; i < rx1_average; i++){
+ for (ii = i+1; ii < rx1_average; ii++){
+ dx = (rx_x1[i] >> 21) - (rx_x1[ii] >> 21);
+ if (dx < 4 && dx > -4){
+ dy = (rx_y1[i] >> 21) - (rx_y1[ii] >> 21);
+ if (dy < 4 && dy > -4){
+ rx1_x = ((rx_x1[i] >> 21) + (rx_x1[ii] >> 21)) / 2;
+ rx1_y = ((rx_y1[i] >> 21) + (rx_y1[ii] >> 21)) / 2;
+ if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
+ && vdf_enable) {
+ rx_dt[0] = (rx_dt[i] + rx_dt[ii]) / 2;
+ }
+ rx1_finish = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX0_Average = %d, RX1_Average = %d\n",
+ rx0_average, rx1_average));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RX0_finish = %d, RX1_finish = %d\n",
+ rx0_finish, rx1_finish));
+ if ((rx0_finish|| !tx0_finish) && (rx1_finish || !tx1_finish) )
+ break;
+ if ((cal0_retry + rx0_average) >= 10
+ || (cal1_retry + rx1_average) >= 10
+ || rx0_average == 3
+ || rx1_average == 3)
+ break;
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RXA_cal_retry = %d\n", cal0_retry));
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RXB_cal_retry = %d\n", cal1_retry));
+ }
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ switch (rtlphy->current_chan_bw)
+ {
+ case HT_CHANNEL_WIDTH_20_40:
+ {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x1);
+ }
+ break;
+ case HT_CHANNEL_WIDTH_80:
+ {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x0);
+ }
+ break;
+ default:
+ break;
+
+ }
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/
+ /*FillIQK Result*/
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("========Path_A =======\n"));
+
+ if (tx0_finish){
+ _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_A, tx0_x, tx0_y);
+ }
+ else{
+ _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_A, 0x200, 0x0);
+ }
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+ || vdf_enable){
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 0 --> Page C*/
+ rtl_set_bbreg(hw, 0xce8, 0x3fff0000, tx_dt[0] & 0x00003fff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ }
+
+ if (rx0_finish == 1){
+ _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_A, rx0_x, rx0_y);
+ }
+ else{
+ _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_A, 0x200, 0x0);
+ }
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+ || vdf_enable){
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 0 --> Page C*/
+ rtl_set_bbreg(hw, 0xce8, 0x00003fff, rx_dt[0] & 0x00003fff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ }
+
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("========Path_B =======\n"));
+
+ if (tx1_finish){
+ _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_B, tx1_x, tx1_y);
+ }
+ else{
+ _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_B, 0x200, 0x0);
+ }
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+ || vdf_enable){
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/
+ rtl_set_bbreg(hw, 0xee8, 0x3fff0000, tx_dt[0] & 0x00003fff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ }
+
+ if (rx1_finish == 1){
+ _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_B, rx1_x, rx1_y);
+ }
+ else{
+ _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_B, 0x200, 0x0);
+ }
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80
+ || vdf_enable){
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/
+ rtl_set_bbreg(hw, 0xee8, 0x00003fff, rx_dt[0] & 0x00003fff);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ }
+}
+
+void _rtl8812ae_iqk_restore_rf(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32 *backup_rf_reg,
+ u32 *rf_backup,
+ u32 rf_reg_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ for (i = 0; i < rf_reg_num; i++)
+ rtl_set_rfreg(hw, path, backup_rf_reg[i], BRFREGOFFSETMASK, rf_backup[i]);
+
+ rtl_set_rfreg(hw, path, 0xef, BRFREGOFFSETMASK, 0x0);
+
+ switch(path){
+ case RF90_PATH_A:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RestoreRF Path A Success!!!!\n"));
+ }
+ break;
+ case RF90_PATH_B:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RestoreRF Path B Success!!!!\n"));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void _rtl8812ae_iqk_restore_afe(
+ struct ieee80211_hw *hw,
+ u32 *afe_backup,
+ u32 *backup_afe_reg,
+ u32 afe_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*Reload AFE Parameters */
+ for (i = 0; i < afe_num; i++){
+ rtl_write_dword(rtlpriv, backup_afe_reg[i], afe_backup[i]);
+ }
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/
+ rtl_write_dword(rtlpriv, 0xc80, 0x0);
+ rtl_write_dword(rtlpriv, 0xc84, 0x0);
+ rtl_write_dword(rtlpriv, 0xc88, 0x0);
+ rtl_write_dword(rtlpriv, 0xc8c, 0x3c000000);
+ rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xc94, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+ rtl_write_dword(rtlpriv, 0xcb8, 0x0);
+ rtl_write_dword(rtlpriv, 0xe80, 0x0);
+ rtl_write_dword(rtlpriv, 0xe84, 0x0);
+ rtl_write_dword(rtlpriv, 0xe88, 0x0);
+ rtl_write_dword(rtlpriv, 0xe8c, 0x3c000000);
+ rtl_write_dword(rtlpriv, 0xe90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xe94, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xec4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xec8, 0x20000000);
+ rtl_write_dword(rtlpriv, 0xeb8, 0x0);
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RestoreAFE Success!!!!\n"));
+}
+
+void _rtl8812ae_iqk_restore_macbb(
+ struct ieee80211_hw *hw,
+ u32 *macbb_backup,
+ u32 *backup_macbb_reg,
+ u32 macbb_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/
+ //Reload MacBB Parameters
+ for (i = 0; i < macbb_num; i++){
+ rtl_write_dword(rtlpriv, backup_macbb_reg[i], macbb_backup[i]);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("RestoreMacBB Success!!!!\n"));
+}
+
+#define MACBB_REG_NUM 10
+#define AFE_REG_NUM 14
+#define RF_REG_NUM 3
+
+static void _rtl8812ae_phy_iq_calibrate(
+ struct ieee80211_hw *hw,
+ u8 channel)
+{
+ u32 macbb_backup[MACBB_REG_NUM];
+ u32 afe_backup[AFE_REG_NUM];
+ u32 rfa_backup[RF_REG_NUM];
+ u32 rfb_backup[RF_REG_NUM];
+ u32 backup_macbb_reg[MACBB_REG_NUM] = {0xb00, 0x520, 0x550,
+ 0x808, 0x90c, 0xc00, 0xe00,
+ 0x8c4,0x838, 0x82c};
+ u32 backup_afe_reg[AFE_REG_NUM] = {0xc5c, 0xc60, 0xc64, 0xc68,
+ 0xcb8, 0xcb0, 0xcb4,0xe5c,
+ 0xe60, 0xe64, 0xe68, 0xeb8,
+ 0xeb0, 0xeb4};
+ u32 backup_rf_reg[RF_REG_NUM] = {0x65, 0x8f, 0x0};
+ u8 chnl_idx = _rtl8812ae_get_right_chnl_place_for_iqk(channel);
+
+ _rtl8812ae_iqk_backup_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+ _rtl8812ae_iqk_backup_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+ _rtl8812ae_iqk_backup_rf(hw, rfa_backup, rfb_backup, backup_rf_reg, RF_REG_NUM);
+
+ _rtl8812ae_iqk_configure_mac(hw);
+ _rtl8812ae_iqk_tx(hw, chnl_idx);
+ _rtl8812ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfa_backup, RF_REG_NUM);
+ _rtl8812ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfb_backup, RF_REG_NUM); // PATH_A ?
+
+ _rtl8812ae_iqk_restore_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+ _rtl8812ae_iqk_restore_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+}
+
+
+void _rtl8821ae_iqk_backup_macbb(
+ struct ieee80211_hw *hw,
+ u32 *macbb_backup,
+ u32 *backup_macbb_reg,
+ u32 mac_bb_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*save MACBB default value*/
+ for (i = 0; i < mac_bb_num; i++) {
+ macbb_backup[i] =rtl_read_dword(rtlpriv,backup_macbb_reg[i]);
+ }
+
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupMacBB Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_backup_afe(
+ struct ieee80211_hw *hw,
+ u32 *afe_backup,
+ u32 *backup_afe_REG,
+ u32 afe_num
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*Save AFE Parameters */
+ for (i = 0; i < afe_num; i++){
+ afe_backup[i] = rtl_read_dword(rtlpriv, backup_afe_REG[i]);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupAFE Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_backup_rf(
+ struct ieee80211_hw *hw,
+ u32 *rfa_backup,
+ u32 *rfb_backup,
+ u32 *backup_rf_reg,
+ u32 rf_num
+ )
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*Save RF Parameters*/
+ for (i = 0; i < rf_num; i++){
+ rfa_backup[i] = rtl_get_rfreg(hw, RF90_PATH_A, backup_rf_reg[i], BMASKDWORD);
+ rfb_backup[i] = rtl_get_rfreg(hw, RF90_PATH_B, backup_rf_reg[i], BMASKDWORD);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupRF Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_configure_mac(
+ struct ieee80211_hw *hw
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ /* ========MAC register setting========*/
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ rtl_write_byte(rtlpriv, 0x522, 0x3f);
+ rtl_set_bbreg(hw, 0x550, BIT(11) | BIT(3), 0x0);
+ rtl_write_byte(rtlpriv, 0x808, 0x00); /*RX ante off*/
+ rtl_set_bbreg(hw, 0x838, 0xf, 0xc); /*CCA off*/
+}
+
+
+void _rtl8821ae_iqk_tx_fill_iqc(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32 tx_x,
+ u32 tx_y
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ switch (path) {
+ case RF90_PATH_A:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+ rtl_set_bbreg(hw, 0xccc, 0x000007ff, tx_y);
+ rtl_set_bbreg(hw, 0xcd4, 0x000007ff, tx_x);
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("TX_X = %x;;TX_Y = %x =====> fill to IQC\n", tx_x, tx_y));
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("0xcd4 = %x;;0xccc = %x ====>fill to IQC\n", rtl_get_bbreg(hw, 0xcd4, 0x000007ff), rtl_get_bbreg(hw, 0xccc, 0x000007ff)));
+ }
+ break;
+ default:
+ break;
+ };
+}
+
+
+void _rtl8821ae_iqk_rx_fill_iqc(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32 rx_x,
+ u32 rx_y
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ switch (path) {
+ case RF90_PATH_A:
+ {
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ rtl_set_bbreg(hw, 0xc10, 0x000003ff, rx_x>>1);
+ rtl_set_bbreg(hw, 0xc10, 0x03ff0000, rx_y>>1);
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("rx_x = %x;;rx_y = %x ====>fill to IQC\n", rx_x>>1, rx_y>>1));
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("0xc10 = %x ====>fill to IQC\n", rtl_read_dword(rtlpriv, 0xc10)));
+ }
+ break;
+ default:
+ break;
+ };
+}
+
+
+
+#define cal_num 10
+
+void _rtl8821ae_iqk_tx(
+ struct ieee80211_hw *hw,
+ enum radio_path path
+ )
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u32 tx_fail, rx_fail, delay_count, iqk_ready, cal_retry, cal = 0, temp_reg65;
+ int tx_x = 0, tx_y = 0, rx_x = 0, rx_y = 0, tx_average = 0, rx_average = 0;
+ int tx_x0[cal_num], tx_y0[cal_num], tx_x0_rxk[cal_num], tx_y0_rxk[cal_num], rx_x0[cal_num], rx_y0[cal_num];
+ bool tx0iqkok = false, rx0iqkok = false;
+ bool vdf_enable = false;
+ int i, k, vdf_y[3], vdf_x[3], tx_dt[3], rx_dt[3], ii, dx = 0, dy = 0, tx_finish = 0, rx_finish = 0;
+
+
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("BandWidth = %d.\n",
+ rtlphy->current_chan_bw));
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80){
+ vdf_enable = true;
+ }
+
+ while (cal < cal_num) {
+ switch (path) {
+ case RF90_PATH_A:
+ {
+ temp_reg65 = rtl_get_rfreg(hw, path, 0x65, 0xffffffff);
+ //Path-A LOK
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/
+ /*========Path-A AFE all on========*/
+ /*Port 0 DAC/ADC on*/
+ rtl_write_dword(rtlpriv, 0xc60, 0x77777777);
+ rtl_write_dword(rtlpriv, 0xc64, 0x77777777);
+ rtl_write_dword(rtlpriv, 0xc68, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc6c, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc70, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc74, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc78, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc7c, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc80, 0x19791979);
+ rtl_write_dword(rtlpriv, 0xc84, 0x19791979);
+
+ rtl_set_bbreg(hw, 0xc00, 0xf, 0x4); /*hardware 3-wire off*/
+
+ // LOK Setting
+ //====== LOK ======
+ /*DAC/ADC sampling rate (160 MHz)*/
+ rtl_set_bbreg(hw, 0xc5c, BIT(26) | BIT(25) | BIT(24), 0x7);
+
+ // 2. LoK RF Setting (at BW = 20M)
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80002);
+ rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x3); // BW 20M
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+ rtl_set_bbreg(hw, 0xcb8, 0xf, 0xd);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+ rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1);
+ rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y)
+ rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y)
+ rtl_write_dword(rtlpriv, 0x984, 0x00462910);// [0]:AGC_en, [15]:idac_K_Mask
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc88, 0x821403f4);
+
+ if (rtlhal->current_bandtype)
+ rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96);
+ else
+ rtl_write_dword(rtlpriv, 0xc8c, 0x28163e96);
+
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ rtl_set_rfreg(hw, path, 0x58, 0x7fe00, rtl_get_rfreg(hw, path, 0x8, 0xffc00)); // Load LOK
+
+ switch (rtlphy->current_chan_bw)
+ {
+ case 1:
+ {
+ rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x1);
+ }
+ break;
+ case 2:
+ {
+ rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x0);
+ }
+ break;
+ default:
+ break;
+
+ }
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+
+ // 3. TX RF Setting
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+ //ODM_SetBBReg(pDM_Odm, 0xcb8, 0xf, 0xd);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+ rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1);
+ rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y)
+ rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y)
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc88, 0x821403f1);
+ if (rtlhal->current_bandtype)
+ rtl_write_dword(rtlpriv, 0xc8c, 0x40163e96);
+ else
+ rtl_write_dword(rtlpriv, 0xc8c, 0x00163e96);
+
+ if (vdf_enable == 1){
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_enable\n"));
+ for (k = 0;k <= 2; k++){
+ switch (k){
+ case 0:
+ {
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c38);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0);
+ }
+ break;
+ case 1:
+ {
+ rtl_set_bbreg(hw, 0xc80, BIT(28), 0x0);
+ rtl_set_bbreg(hw, 0xc84, BIT(28), 0x0);
+ rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0);
+ }
+ break;
+ case 2:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("vdf_y[1] = %x;;;vdf_y[0] = %x\n", vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("vdf_x[1] = %x;;;vdf_x[0] = %x\n", vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff));
+ tx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20);
+ tx_dt[cal] = ((16*tx_dt[cal])*10000/15708);
+ tx_dt[cal] = (tx_dt[cal] >> 1 )+(tx_dt[cal] & BIT(0));
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c20);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_set_bbreg(hw, 0xce8, BIT(31), 0x1);
+ rtl_set_bbreg(hw, 0xce8, 0x3fff0000, tx_dt[cal] & 0x00003fff);
+ }
+ break;
+ default:
+ break;
+ }
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready) || (delay_count>20)){
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============TXIQK Check==============
+ tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+ if (~tx_fail){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+ vdf_x[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+ vdf_y[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ tx0iqkok = true;
+ break;
+ }
+ else{
+ rtl_set_bbreg(hw, 0xccc, 0x000007ff, 0x0);
+ rtl_set_bbreg(hw, 0xcd4, 0x000007ff, 0x200);
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10) {
+ break;
+ }
+ }
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10){
+ break;
+ }
+ }
+ }
+ }
+ if (k == 3){
+ tx_x0[cal] = vdf_x[k-1] ;
+ tx_y0[cal] = vdf_y[k-1];
+ }
+ }
+
+ else {
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready) || (delay_count>20)) {
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============TXIQK Check==============
+ tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+ if (~tx_fail){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+ tx_x0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+ tx_y0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ tx0iqkok = true;
+ break;
+ }
+ else{
+ rtl_set_bbreg(hw, 0xccc, 0x000007ff, 0x0);
+ rtl_set_bbreg(hw, 0xcd4, 0x000007ff, 0x200);
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10) {
+ break;
+ }
+ }
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+ }
+
+
+ if (tx0iqkok == false)
+ break; // TXK fail, Don't do RXK
+
+ if (vdf_enable == 1){
+ rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0); // TX VDF Disable
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RXVDF Start\n"));
+ for (k = 0;k <= 2; k++){
+ //====== RX mode TXK (RXK Step 1) ======
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ // 1. TX RF Setting
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+
+ rtl_set_bbreg(hw, 0xcb8, 0xf, 0xd);
+ rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y)
+ rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y)
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ switch (k){
+ case 0:
+ {
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c38);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_set_bbreg(hw, 0xce8, BIT(30), 0x0);
+ }
+ break;
+ case 1:
+ {
+ rtl_write_dword(rtlpriv, 0xc80, 0x08008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x28008c38);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_set_bbreg(hw, 0xce8, BIT(30), 0x0);
+ }
+ break;
+ case 2:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_Y[1] = %x;;;VDF_Y[0] = %x\n", vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_X[1] = %x;;;VDF_X[0] = %x\n", vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff));
+ rx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20);
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("Rx_dt = %d\n", rx_dt[cal]));
+ rx_dt[cal] = ((16*rx_dt[cal])*10000/13823);
+ rx_dt[cal] = (rx_dt[cal] >> 1 )+(rx_dt[cal] & BIT(0));
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c20);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_set_bbreg(hw, 0xce8, 0x00003fff, rx_dt[cal] & 0x00003fff);
+ }
+ break;
+ default:
+ break;
+ }
+ rtl_write_dword(rtlpriv, 0xc88, 0x821603e0);
+ rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96);
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready)||(delay_count>20)){
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============TXIQK Check==============
+ tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+ if (~tx_fail){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+ tx_x0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+ tx_y0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ tx0iqkok = true;
+ break;
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+
+ if (tx0iqkok == false){ //If RX mode TXK fail, then take TXK Result
+ tx_x0_rxk[cal] = tx_x0[cal];
+ tx_y0_rxk[cal] = tx_y0[cal];
+ tx0iqkok = true;
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RXK Step 1 fail\n"));
+ }
+
+
+ //====== RX IQK ======
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ // 1. RX RF Setting
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8);
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+
+ rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0_rxk[cal])>>21&0x000007ff);
+ rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0_rxk[cal])>>21&0x000007ff);
+ rtl_set_bbreg(hw, 0x978, BIT(31), 0x1);
+ rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0);
+ rtl_set_bbreg(hw, 0xcb8, 0xF, 0xe);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a911);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_set_bbreg(hw, 0xc80, BIT(29), 0x1);
+ rtl_set_bbreg(hw, 0xc84, BIT(29), 0x0);
+ rtl_write_dword(rtlpriv, 0xc88, 0x02140119);
+
+ rtl_write_dword(rtlpriv, 0xc8c, 0x28160d00); /* pDM_Odm->SupportInterface == 1 */
+
+ if (k==2){
+ rtl_set_bbreg(hw, 0xce8, BIT(30), 0x1); //RX VDF Enable
+ }
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready)||(delay_count>20)){
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============RXIQK Check==============
+ rx_fail = rtl_get_bbreg(hw, 0xd00, BIT(11));
+ if (rx_fail == 0){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x06000000);
+ vdf_x[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x08000000);
+ vdf_y[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rx0iqkok = true;
+ break;
+ }
+ else{
+ rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x200>>1);
+ rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0x0>>1);
+ rx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+
+ }
+ }
+ else{
+ rx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+
+ }
+ if (k == 3){
+ rx_x0[cal] = vdf_x[k-1] ;
+ rx_y0[cal] = vdf_y[k-1];
+ }
+ rtl_set_bbreg(hw, 0xce8, BIT(31), 0x1); // TX VDF Enable
+ }
+
+ else{
+ //====== RX mode TXK (RXK Step 1) ======
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ // 1. TX RF Setting
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001);
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0xb00, 0x03000100);
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_write_dword(rtlpriv, 0xc88, 0x821603e0);
+ //ODM_Write4Byte(pDM_Odm, 0xc8c, 0x68163e96);
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready)||(delay_count>20)){
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============TXIQK Check==============
+ tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12));
+
+ if (~tx_fail){
+ rtl_write_dword(rtlpriv, 0xcb8, 0x02000000);
+ tx_x0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x04000000);
+ tx_y0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ tx0iqkok = true;
+ break;
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+ else{
+ tx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+
+
+ if (tx0iqkok == false){ //If RX mode TXK fail, then take TXK Result
+ tx_x0_rxk[cal] = tx_x0[cal];
+ tx_y0_rxk[cal] = tx_y0[cal];
+ tx0iqkok = true;
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("1"));
+ }
+
+
+ //====== RX IQK ======
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ // 1. RX RF Setting
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000);
+ rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000);
+ rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f);
+ rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb);
+ rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001);
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8);
+ rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000);
+
+ rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0_rxk[cal])>>21&0x000007ff);
+ rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0_rxk[cal])>>21&0x000007ff);
+ rtl_set_bbreg(hw, 0x978, BIT(31), 0x1);
+ rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0);
+ //ODM_SetBBReg(pDM_Odm, 0xcb8, 0xF, 0xe);
+ rtl_write_dword(rtlpriv, 0x90c, 0x00008000);
+ rtl_write_dword(rtlpriv, 0x984, 0x0046a911);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc80, 0x38008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16
+ rtl_write_dword(rtlpriv, 0xc84, 0x18008c10);// RX_Tone_idx[9:0], RxK_Mask[29]
+ rtl_write_dword(rtlpriv, 0xc88, 0x02140119);
+
+ rtl_write_dword(rtlpriv, 0xc8c, 0x28160d00); /*pDM_Odm->SupportInterface == 1*/
+
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] ±N SI/PI ¨Ï¥ÎÅv¤Áµ¹ iqk_dpk module
+
+ cal_retry = 0;
+ while(1){
+ // one shot
+ rtl_write_dword(rtlpriv, 0x980, 0xfa000000);
+ rtl_write_dword(rtlpriv, 0x980, 0xf8000000);
+
+ mdelay(10); //Delay 10ms
+ rtl_write_dword(rtlpriv, 0xcb8, 0x00000000);
+ delay_count = 0;
+ while (1){
+ iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10));
+ if ((~iqk_ready)||(delay_count>20)){
+ break;
+ }
+ else{
+ mdelay(1);
+ delay_count++;
+ }
+ }
+
+ if (delay_count < 20){ // If 20ms No Result, then cal_retry++
+ // ============RXIQK Check==============
+ rx_fail = rtl_get_bbreg(hw, 0xd00, BIT(11));
+ if (rx_fail == 0){
+ /*
+ ODM_Write4Byte(pDM_Odm, 0xcb8, 0x05000000);
+ reg1 = ODM_GetBBReg(pDM_Odm, 0xd00, 0xffffffff);
+ ODM_Write4Byte(pDM_Odm, 0xcb8, 0x06000000);
+ reg2 = ODM_GetBBReg(pDM_Odm, 0xd00, 0x0000001f);
+ DbgPrint("reg1 = %d, reg2 = %d", reg1, reg2);
+ Image_Power = (reg2<<32)+reg1;
+ DbgPrint("Before PW = %d\n", Image_Power);
+ ODM_Write4Byte(pDM_Odm, 0xcb8, 0x07000000);
+ reg1 = ODM_GetBBReg(pDM_Odm, 0xd00, 0xffffffff);
+ ODM_Write4Byte(pDM_Odm, 0xcb8, 0x08000000);
+ reg2 = ODM_GetBBReg(pDM_Odm, 0xd00, 0x0000001f);
+ Image_Power = (reg2<<32)+reg1;
+ DbgPrint("After PW = %d\n", Image_Power);
+ */
+
+ rtl_write_dword(rtlpriv, 0xcb8, 0x06000000);
+ rx_x0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rtl_write_dword(rtlpriv, 0xcb8, 0x08000000);
+ rx_y0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21;
+ rx0iqkok = true;
+ break;
+ }
+ else{
+ rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x200>>1);
+ rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0x0>>1);
+ rx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+
+ }
+ }
+ else{
+ rx0iqkok = false;
+ cal_retry++;
+ if (cal_retry == 10)
+ break;
+ }
+ }
+ }
+
+ if (tx0iqkok)
+ tx_average++;
+ if (rx0iqkok)
+ rx_average++;
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65);
+ }
+ break;
+ default:
+ break;
+ }
+ cal++;
+ }
+
+ // FillIQK Result
+ switch (path){
+ case RF90_PATH_A:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("========Path_A =======\n"));
+ if (tx_average == 0)
+ break;
+
+ for (i = 0; i < tx_average; i++){
+ RT_TRACE(COMP_IQK, DBG_LOUD, (" TX_X0_RXK[%d] = %x ;; TX_Y0_RXK[%d] = %x\n", i, (tx_x0_rxk[i])>>21&0x000007ff, i, (tx_y0_rxk[i])>>21&0x000007ff));
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n", i, (tx_x0[i])>>21&0x000007ff, i, (tx_y0[i])>>21&0x000007ff));
+ }
+ for (i = 0; i < tx_average; i++){
+ for (ii = i+1; ii <tx_average; ii++){
+ dx = (tx_x0[i]>>21) - (tx_x0[ii]>>21);
+ if (dx < 3 && dx > -3){
+ dy = (tx_y0[i]>>21) - (tx_y0[ii]>>21);
+ if (dy < 3 && dy > -3){
+ tx_x = ((tx_x0[i]>>21) + (tx_x0[ii]>>21))/2;
+ tx_y = ((tx_y0[i]>>21) + (tx_y0[ii]>>21))/2;
+ tx_finish = 1;
+ break;
+ }
+ }
+ }
+ if (tx_finish == 1)
+ break;
+ }
+
+ if (tx_finish == 1){
+ _rtl8821ae_iqk_tx_fill_iqc(hw, path, tx_x, tx_y); // ?
+ }
+ else{
+ _rtl8821ae_iqk_tx_fill_iqc(hw, path, 0x200, 0x0);
+ }
+
+ if (rx_average == 0)
+ break;
+
+ for (i = 0; i < rx_average; i++){
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RX_X0[%d] = %x ;; RX_Y0[%d] = %x\n", i, (rx_x0[i])>>21&0x000007ff, i, (rx_y0[i])>>21&0x000007ff));
+ }
+ for (i = 0; i < rx_average; i++){
+ for (ii = i+1; ii <rx_average; ii++){
+ dx = (rx_x0[i]>>21) - (rx_x0[ii]>>21);
+ if (dx < 4 && dx > -4){
+ dy = (rx_y0[i]>>21) - (rx_y0[ii]>>21);
+ if (dy < 4 && dy > -4){
+ rx_x = ((rx_x0[i]>>21) + (rx_x0[ii]>>21))/2;
+ rx_y = ((rx_y0[i]>>21) + (rx_y0[ii]>>21))/2;
+ rx_finish = 1;
+ break;
+ }
+ }
+ }
+ if (rx_finish == 1)
+ break;
+ }
+
+ if (rx_finish == 1){
+ _rtl8821ae_iqk_rx_fill_iqc(hw, path, rx_x, rx_y);
+ }
+ else{
+ _rtl8821ae_iqk_rx_fill_iqc(hw, path, 0x200, 0x0);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void _rtl8821ae_iqk_restore_rf(
+ struct ieee80211_hw *hw,
+ enum radio_path path,
+ u32* backup_rf_reg,
+ u32* rf_backup,
+ u32 rf_reg_num
+ )
+{
+ u32 i;
+ struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ for (i = 0; i < RF_REG_NUM; i++)
+ rtl_set_rfreg(hw, path, backup_rf_reg[i], RFREG_OFFSET_MASK, rf_backup[i]);
+
+ switch(path){
+ case RF90_PATH_A:
+ {
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreRF Path A Success!!!!\n"));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void _rtl8821ae_iqk_restore_afe(
+ struct ieee80211_hw *hw,
+ u32* afe_backup,
+ u32* backup_afe_reg,
+ u32 afe_num
+ )
+{
+ u32 i;
+ struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ //Reload AFE Parameters
+ for (i = 0; i < afe_num; i++){
+ rtl_write_dword(rtlpriv, backup_afe_reg[i], afe_backup[i]);
+ }
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1
+ rtl_write_dword(rtlpriv, 0xc80, 0x0);
+ rtl_write_dword(rtlpriv, 0xc84, 0x0);
+ rtl_write_dword(rtlpriv, 0xc88, 0x0);
+ rtl_write_dword(rtlpriv, 0xc8c, 0x3c000000);
+ rtl_write_dword(rtlpriv, 0xc90, 0x00000080);
+ rtl_write_dword(rtlpriv, 0xc94, 0x00000000);
+ rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
+ rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
+ rtl_write_dword(rtlpriv, 0xcb8, 0x0);
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreAFE Success!!!!\n"));
+}
+
+void _rtl8821ae_iqk_restore_macbb(
+ struct ieee80211_hw *hw,
+ u32* macbb_backup,
+ u32* backup_macbb_reg,
+ u32 macbb_num
+ )
+{
+ u32 i;
+ struct rtl_priv* rtlpriv = rtl_priv(hw);
+
+ rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C
+ //Reload MacBB Parameters
+ for (i = 0; i < macbb_num; i++){
+ rtl_write_dword(rtlpriv, backup_macbb_reg[i], macbb_backup[i]);
+ }
+ RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreMacBB Success!!!!\n"));
+}
+
+
+#undef MACBB_REG_NUM
+#undef AFE_REG_NUM
+#undef RF_REG_NUM
+
+#define MACBB_REG_NUM 11
+#define AFE_REG_NUM 12
+#define RF_REG_NUM 3
+
+static void _rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw)
+{
+ u32 macbb_backup[MACBB_REG_NUM];
+ u32 afe_backup[AFE_REG_NUM];
+ u32 rfa_backup[RF_REG_NUM];
+ u32 rfb_backup[RF_REG_NUM];
+ u32 backup_macbb_reg[MACBB_REG_NUM] = {0xb00, 0x520, 0x550, 0x808, 0x90c, 0xc00, 0xc50,
+ 0xe00, 0xe50, 0x838, 0x82c};
+ u32 backup_afe_reg[AFE_REG_NUM] = {0xc5c, 0xc60, 0xc64, 0xc68, 0xc6c, 0xc70, 0xc74,
+ 0xc78, 0xc7c, 0xc80, 0xc84, 0xcb8};
+ u32 backup_rf_reg[RF_REG_NUM] = {0x65, 0x8f, 0x0};
+
+ _rtl8821ae_iqk_backup_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+ _rtl8821ae_iqk_backup_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+ _rtl8821ae_iqk_backup_rf(hw, rfa_backup, rfb_backup, backup_rf_reg, RF_REG_NUM);
+
+ _rtl8821ae_iqk_configure_mac(hw);
+ _rtl8821ae_iqk_tx(hw, RF90_PATH_A);
+ _rtl8821ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfa_backup, RF_REG_NUM);
+
+ _rtl8821ae_iqk_restore_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM);
+ _rtl8821ae_iqk_restore_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM);
+}
+
+static void _rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+ u8 tmpreg;
+ u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+ if ((tmpreg & 0x70) != 0)
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+ else
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+ if ((tmpreg & 0x70) != 0) {
+ rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+
+ if (is2t)
+ rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+ MASK12BITS);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+ (rf_a_mode & 0x8FFFF) | 0x10000);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+ (rf_b_mode & 0x8FFFF) | 0x10000);
+ }
+ lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdfbe0);
+ /* rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000); */
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, 0x8c0a);
+
+ mdelay(100);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdffe0);
+
+ if ((tmpreg & 0x70) != 0) {
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, rf_b_mode);
+ } else {
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ }
+RT_TRACE(COMP_INIT,DBG_LOUD,("\n"));
+
+}
+
+static void _rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool main)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ //struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ //struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ RT_TRACE(COMP_INIT,DBG_LOUD,("\n"));
+
+ if (main)
+ rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x1);
+ else
+ rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x2);
+}
+
+#undef IQK_ADDA_REG_NUM
+#undef IQK_DELAY_TIME
+
+void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (!rtlphy->b_iqk_in_progress)
+ {
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = true;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+
+ _rtl8812ae_phy_iq_calibrate(hw, rtlphy->current_channel);
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = false;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+ }
+}
+
+void rtl8812ae_reset_iqk_result(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 i;
+
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n",
+ (int)(sizeof(rtlphy->iqk_matrix_regsetting) /
+ sizeof(struct iqk_matrix_regs)),
+ IQK_MATRIX_SETTINGS_NUM));
+
+ for(i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
+ {
+ rtlphy->iqk_matrix_regsetting[i].value[0][0] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][2] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][4] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][6] = 0x100;
+
+ rtlphy->iqk_matrix_regsetting[i].value[0][1] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][3] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][5] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][7] = 0x0;
+
+ rtlphy->iqk_matrix_regsetting[i].b_iqk_done = false;
+
+ }
+ }
+}
+
+void rtl8812ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+ u8 thermal_value, u8 threshold)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ rtl8812ae_reset_iqk_result(hw);
+
+ rtldm->thermalvalue_iqk= thermal_value;
+ rtl8812ae_phy_iq_calibrate(hw, false);
+}
+
+void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (!rtlphy->b_iqk_in_progress)
+ {
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = true;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+
+ _rtl8821ae_phy_iq_calibrate(hw);
+
+ spin_lock(&rtlpriv->locks.iqk_lock);
+ rtlphy->b_iqk_in_progress = false;
+ spin_unlock(&rtlpriv->locks.iqk_lock);
+ }
+}
+
+void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 i;
+
+ RT_TRACE(COMP_IQK, DBG_LOUD,
+ ("rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n",
+ (int)(sizeof(rtlphy->iqk_matrix_regsetting) /
+ sizeof(struct iqk_matrix_regs)),
+ IQK_MATRIX_SETTINGS_NUM));
+
+ for(i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
+ {
+ rtlphy->iqk_matrix_regsetting[i].value[0][0] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][2] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][4] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][6] = 0x100;
+
+ rtlphy->iqk_matrix_regsetting[i].value[0][1] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][3] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][5] =
+ rtlphy->iqk_matrix_regsetting[i].value[0][7] = 0x0;
+
+ rtlphy->iqk_matrix_regsetting[i].b_iqk_done = false;
+
+ }
+ }
+}
+
+void rtl8821ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+ u8 thermal_value, u8 threshold)
+{
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+
+ rtl8821ae_reset_iqk_result(hw);
+
+ rtldm->thermalvalue_iqk= thermal_value;
+ rtl8821ae_phy_iq_calibrate(hw, false);
+}
+
+void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u32 timeout = 2000, timecount = 0;
+
+
+ while (rtlpriv->mac80211.act_scanning && timecount < timeout) {
+ udelay(50);
+ timecount += 50;
+ }
+
+ rtlphy->lck_inprogress = true;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("LCK:Start!!! currentband %x delay %d ms\n",
+ rtlhal->current_bandtype, timecount));
+
+ _rtl8821ae_phy_lc_calibrate(hw, false);
+
+ rtlphy->lck_inprogress = false;
+}
+
+void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (rtlphy->b_apk_done)
+ return;
+
+ return;
+}
+
+void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
+{
+ _rtl8821ae_phy_set_rfpath_switch(hw, bmain);
+}
+
+bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool b_postprocessing = false;
+
+ RT_TRACE(COMP_CMD, DBG_TRACE,
+ ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress));
+ do {
+ switch (iotype) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ RT_TRACE(COMP_CMD, DBG_TRACE,
+ ("[IO CMD] Resume DM after scan.\n"));
+ b_postprocessing = true;
+ break;
+ case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
+ case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
+ RT_TRACE(COMP_CMD, DBG_TRACE,
+ ("[IO CMD] Pause DM before scan.\n"));
+ b_postprocessing = true;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ } while (false);
+ if (b_postprocessing && !rtlphy->set_io_inprogress) {
+ rtlphy->set_io_inprogress = true;
+ rtlphy->current_io_type = iotype;
+ } else {
+ return false;
+ }
+ rtl8821ae_phy_set_io(hw);
+ RT_TRACE(COMP_CMD, DBG_TRACE, ("IO Type(%#x)\n", iotype));
+ return true;
+}
+
+static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ RT_TRACE(COMP_CMD, DBG_TRACE,
+ ("--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress));
+ switch (rtlphy->current_io_type) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ if (rtlpriv->mac80211.opmode== NL80211_IFTYPE_ADHOC)
+ _rtl8821ae_resume_tx_beacon(hw);
+ rtl8821ae_dm_write_dig(hw, rtlphy->initgain_backup.xaagccore1);
+ rtl8821ae_dm_write_cck_cca_thres(hw, rtlphy->initgain_backup.cca);
+ break;
+ case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
+ if (rtlpriv->mac80211.opmode== NL80211_IFTYPE_ADHOC)
+ _rtl8821ae_stop_tx_beacon(hw);
+ rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
+ rtl8821ae_dm_write_dig(hw, 0x17);
+ rtlphy->initgain_backup.cca = dm_digtable.cur_cck_cca_thres;
+ rtl8821ae_dm_write_cck_cca_thres(hw, 0x40);
+ break;
+ case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ break;
+ }
+ rtlphy->set_io_inprogress = false;
+ RT_TRACE(COMP_CMD, DBG_TRACE,
+ ("(%#x)\n", rtlphy->current_io_type));
+}
+
+static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+
+#if 0
+static void _rtl8821ae_phy_set_rf_sleep(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ /*rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ while (u4b_tmp != 0 && delay > 0) {
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ delay--;
+ }
+ if (delay == 0) {
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ RT_TRACE(COMP_POWER, DBG_TRACE,
+ ("Switch RF timeout !!!.\n"));
+ return;
+ }*/
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+#endif
+
+static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool bresult = true;
+ u8 i, queue_id;
+ struct rtl8192_tx_ring *ring = NULL;
+
+ switch (rfpwr_state) {
+ case ERFON:{
+ if ((ppsc->rfpwr_state == ERFOFF) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+ bool rtstatus = false;
+ u32 InitializeCount = 0;
+ do {
+ InitializeCount++;
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic enable\n"));
+ rtstatus = rtl_ps_enable_nic(hw);
+ } while ((rtstatus != true)
+ && (InitializeCount < 10));
+ RT_CLEAR_PS_LEVEL(ppsc,
+ RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("Set ERFON sleeped:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->
+ last_sleep_jiffies)));
+ ppsc->last_awake_jiffies = jiffies;
+ rtl8821ae_phy_set_rf_on(hw);
+ }
+ if (mac->link_state == MAC80211_LINKED) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ }
+ break;
+ }
+ case ERFOFF:{
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] =%d before "
+ "doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue)));
+
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("\n ERFSLEEP: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ break;
+ }
+ }
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic disable\n"));
+ rtl_ps_disable_nic(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_POWER_OFF);
+ }
+ }
+ break;
+ }
+ /*case ERFSLEEP:{
+ if (ppsc->rfpwr_state == ERFOFF)
+ break;
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] =%d before "
+ "doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue)));
+
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(COMP_ERR, DBG_WARNING,
+ ("\n ERFSLEEP: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ break;
+ }
+ }
+ RT_TRACE(COMP_RF, DBG_DMESG,
+ ("Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies)));
+ ppsc->last_sleep_jiffies = jiffies;
+ _rtl8821ae_phy_set_rf_sleep(hw);
+ break;
+ }*/
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("switch case not process \n"));
+ bresult = false;
+ break;
+ }
+ if (bresult)
+ ppsc->rfpwr_state = rfpwr_state;
+ return bresult;
+}
+
+bool rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ bool bresult = false;
+
+ if (rfpwr_state == ppsc->rfpwr_state)
+ return bresult;
+ bresult = _rtl8821ae_phy_set_rf_power_state(hw, rfpwr_state);
+ return bresult;
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/phy.h b/drivers/staging/rtl8821ae/rtl8821ae/phy.h
new file mode 100644
index 000000000000..a80bf739940a
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/phy.h
@@ -0,0 +1,258 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_PHY_H__
+#define __RTL8821AE_PHY_H__
+
+/*It must always set to 4, otherwise read efuse table sequence will be wrong.*/
+#define MAX_TX_COUNT 4
+#define TX_1S 0
+#define TX_2S 1
+#define TX_3S 2
+#define TX_4S 3
+
+#define MAX_POWER_INDEX 0x3F
+
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+#define RT_CANNOT_IO(hw) false
+#define HIGHPOWER_RADIOA_ARRAYLEN 22
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM 9
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 10
+#define index_mapping_NUM 15
+
+#define APK_BB_REG_NUM 5
+#define APK_AFE_REG_NUM 16
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 2
+
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50
+#define AntennaDiversityValue 0x80
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define Reset_Cnt_Limit 3
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_MAC_REG_NUM 4
+
+#define RF6052_MAX_PATH 2
+
+#define CT_OFFSET_MAC_ADDR 0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
+
+#define CT_OFFSET_CHANNEL_PLAH 0x75
+#define CT_OFFSET_THERMAL_METER 0x78
+#define CT_OFFSET_RF_OPTION 0x79
+#define CT_OFFSET_VERSION 0x7E
+#define CT_OFFSET_CUSTOMER_ID 0x7F
+
+#define RTL8821AE_MAX_PATH_NUM 2
+
+#define TARGET_CHNL_NUM_2G_5G_8812 59
+
+enum swchnlcmd_id {
+ CMDID_END,
+ CMDID_SET_TXPOWEROWER_LEVEL,
+ CMDID_BBREGWRITE10,
+ CMDID_WRITEPORT_ULONG,
+ CMDID_WRITEPORT_USHORT,
+ CMDID_WRITEPORT_UCHAR,
+ CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+ enum swchnlcmd_id cmdid;
+ u32 para1;
+ u32 para2;
+ u32 msdelay;
+};
+
+enum hw90_block_e {
+ HW90_BLOCK_MAC = 0,
+ HW90_BLOCK_PHY0 = 1,
+ HW90_BLOCK_PHY1 = 2,
+ HW90_BLOCK_RF = 3,
+ HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+ BASEBAND_CONFIG_PHY_REG = 0,
+ BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+ RA_OFFSET_LEGACY_OFDM1,
+ RA_OFFSET_LEGACY_OFDM2,
+ RA_OFFSET_HT_OFDM1,
+ RA_OFFSET_HT_OFDM2,
+ RA_OFFSET_HT_OFDM3,
+ RA_OFFSET_HT_OFDM4,
+ RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+ ANTENNA_NONE,
+ ANTENNA_D,
+ ANTENNA_C,
+ ANTENNA_CD,
+ ANTENNA_B,
+ ANTENNA_BD,
+ ANTENNA_BC,
+ ANTENNA_BCD,
+ ANTENNA_A,
+ ANTENNA_AD,
+ ANTENNA_AC,
+ ANTENNA_ACD,
+ ANTENNA_AB,
+ ANTENNA_ABD,
+ ANTENNA_ABC,
+ ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+ u32 r_tx_antenna:4;
+ u32 r_ant_l:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_non_ht_s1:4;
+ u32 ofdm_txsc:2;
+ u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+ u8 r_cckrx_enable_2:2;
+ u8 r_cckrx_enable:2;
+ u8 r_ccktx_enable:4;
+};
+
+
+struct efuse_contents {
+ u8 mac_addr[ETH_ALEN];
+ u8 cck_tx_power_idx[6];
+ u8 ht40_1s_tx_power_idx[6];
+ u8 ht40_2s_tx_power_idx_diff[3];
+ u8 ht20_tx_power_idx_diff[3];
+ u8 ofdm_tx_power_idx_diff[3];
+ u8 ht40_max_power_offset[3];
+ u8 ht20_max_power_offset[3];
+ u8 channel_plan;
+ u8 thermal_meter;
+ u8 rf_option[5];
+ u8 version;
+ u8 oem_id;
+ u8 regulatory;
+};
+
+struct tx_power_struct {
+ u8 cck[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_1s[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_2s[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht20_diff[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_diff[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_txpowerdiff;
+ u8 groupht20[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 groupht40[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 pwrgroup_cnt;
+ u32 mcs_original_offset[4][16];
+};
+enum _ANT_DIV_TYPE
+{
+ NO_ANTDIV = 0xFF,
+ CG_TRX_HW_ANTDIV = 0x01,
+ CGCS_RX_HW_ANTDIV = 0x02,
+ FIXED_HW_ANTDIV = 0x03,
+ CG_TRX_SMART_ANTDIV = 0x04,
+ CGCS_RX_SW_ANTDIV = 0x05,
+
+};
+
+extern u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+extern void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+extern u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+extern void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+extern bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw);
+extern bool rtl8821ae_phy_bb_config(struct ieee80211_hw *hw);
+extern bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band);
+extern void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+extern void rtl8821ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+extern void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
+ u8 operation);
+extern void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+extern void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+extern u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw);
+extern void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+extern void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+extern bool rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+u8 _rtl8812ae_get_right_chnl_place_for_iqk(u8 chnl);
+void rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, u8 channel, u8 path);
+void rtl8812ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+ u8 thermal_value, u8 threshold);
+void rtl8821ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index,
+ u8 thermal_value, u8 threshold);
+void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw);
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c
new file mode 100644
index 000000000000..a2e4a01b712b
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c
@@ -0,0 +1,199 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseqcmd.h"
+#include "pwrseq.h"
+
+/*
+ drivers should parse below arrays and do the corresponding actions
+*/
+//3 Power on Array
+struct wlan_pwr_cfg rtl8812_power_on_flow[RTL8812_TRANS_CARDEMU_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ RTL8812_TRANS_CARDEMU_TO_ACT
+ RTL8812_TRANS_END
+};
+
+//3Radio off GPIO Array
+struct wlan_pwr_cfg rtl8812_radio_off_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ RTL8812_TRANS_ACT_TO_CARDEMU
+ RTL8812_TRANS_END
+};
+
+//3Card Disable Array
+struct wlan_pwr_cfg rtl8812_card_disable_flow[ RTL8812_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8812_TRANS_END_STEPS ] =
+{
+ RTL8812_TRANS_ACT_TO_CARDEMU
+ RTL8812_TRANS_CARDEMU_TO_CARDDIS
+ RTL8812_TRANS_END
+};
+
+//3 Card Enable Array
+struct wlan_pwr_cfg rtl8812_card_enable_flow[ RTL8812_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8812_TRANS_END_STEPS ] =
+{
+ RTL8812_TRANS_CARDDIS_TO_CARDEMU
+ RTL8812_TRANS_CARDEMU_TO_ACT
+ RTL8812_TRANS_END
+};
+
+//3Suspend Array
+struct wlan_pwr_cfg rtl8812_suspend_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ RTL8812_TRANS_ACT_TO_CARDEMU
+ RTL8812_TRANS_CARDEMU_TO_SUS
+ RTL8812_TRANS_END
+};
+
+//3 Resume Array
+struct wlan_pwr_cfg rtl8812_resume_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ RTL8812_TRANS_SUS_TO_CARDEMU
+ RTL8812_TRANS_CARDEMU_TO_ACT
+ RTL8812_TRANS_END
+};
+
+
+
+//3HWPDN Array
+struct wlan_pwr_cfg rtl8812_hwpdn_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ RTL8812_TRANS_ACT_TO_CARDEMU
+ RTL8812_TRANS_CARDEMU_TO_PDN
+ RTL8812_TRANS_END
+};
+
+//3 Enter LPS
+struct wlan_pwr_cfg rtl8812_enter_lps_flow[RTL8812_TRANS_ACT_TO_LPS_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ //FW behavior
+ RTL8812_TRANS_ACT_TO_LPS
+ RTL8812_TRANS_END
+};
+
+//3 Leave LPS
+struct wlan_pwr_cfg rtl8812_leave_lps_flow[RTL8812_TRANS_LPS_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS]=
+{
+ //FW behavior
+ RTL8812_TRANS_LPS_TO_ACT
+ RTL8812_TRANS_END
+};
+
+
+/*
+ drivers should parse below arrays and do the corresponding actions
+*/
+/*3 Power on Array*/
+struct wlan_pwr_cfg rtl8821A_power_on_flow[RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_CARDEMU_TO_ACT
+ RTL8821A_TRANS_END
+};
+
+/*3Radio off GPIO Array */
+struct wlan_pwr_cfg rtl8821A_radio_off_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_ACT_TO_CARDEMU
+ RTL8821A_TRANS_END
+};
+
+/*3Card Disable Array*/
+struct wlan_pwr_cfg rtl8821A_card_disable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_ACT_TO_CARDEMU
+ RTL8821A_TRANS_CARDEMU_TO_CARDDIS
+ RTL8821A_TRANS_END
+};
+
+/*3 Card Enable Array*/
+struct wlan_pwr_cfg rtl8821A_card_enable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS /*RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS*/
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_CARDDIS_TO_CARDEMU
+ RTL8821A_TRANS_CARDEMU_TO_ACT
+ RTL8821A_TRANS_END
+};
+
+/*3Suspend Array*/
+struct wlan_pwr_cfg rtl8821A_suspend_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_ACT_TO_CARDEMU
+ RTL8821A_TRANS_CARDEMU_TO_SUS
+ RTL8821A_TRANS_END
+};
+
+/*3 Resume Array*/
+struct wlan_pwr_cfg rtl8821A_resume_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_SUS_TO_CARDEMU
+ RTL8821A_TRANS_CARDEMU_TO_ACT
+ RTL8821A_TRANS_END
+};
+
+/*3HWPDN Array*/
+struct wlan_pwr_cfg rtl8821A_hwpdn_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ RTL8821A_TRANS_ACT_TO_CARDEMU
+ RTL8821A_TRANS_CARDEMU_TO_PDN
+ RTL8821A_TRANS_END
+};
+
+/*3 Enter LPS */
+struct wlan_pwr_cfg rtl8821A_enter_lps_flow[RTL8821A_TRANS_ACT_TO_LPS_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ /*FW behavior*/
+ RTL8821A_TRANS_ACT_TO_LPS
+ RTL8821A_TRANS_END
+};
+
+/*3 Leave LPS */
+struct wlan_pwr_cfg rtl8821A_leave_lps_flow[RTL8821A_TRANS_LPS_TO_ACT_STEPS
+ + RTL8821A_TRANS_END_STEPS] =
+{
+ /*FW behavior*/
+ RTL8821A_TRANS_LPS_TO_ACT
+ RTL8821A_TRANS_END
+};
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h
new file mode 100644
index 000000000000..480a6bb6d76b
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h
@@ -0,0 +1,413 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_PWRSEQ_H__
+#define __RTL8821AE_PWRSEQ_H__
+
+#include "pwrseqcmd.h"
+#include "../btcoexist/halbt_precomp.h"
+
+#define RTL8812_TRANS_CARDEMU_TO_ACT_STEPS 15
+#define RTL8812_TRANS_ACT_TO_CARDEMU_STEPS 15
+#define RTL8812_TRANS_CARDEMU_TO_SUS_STEPS 15
+#define RTL8812_TRANS_SUS_TO_CARDEMU_STEPS 15
+#define RTL8812_TRANS_CARDEMU_TO_PDN_STEPS 25
+#define RTL8812_TRANS_PDN_TO_CARDEMU_STEPS 15
+#define RTL8812_TRANS_ACT_TO_LPS_STEPS 15
+#define RTL8812_TRANS_LPS_TO_ACT_STEPS 15
+#define RTL8812_TRANS_END_STEPS 1
+
+
+#define RTL8812_TRANS_CARDEMU_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0},/* disable SW LPS 0x04[10]=0*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]=0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0},/* disable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* polling until return 0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT0, 0},/**/
+
+#define RTL8812_TRANS_ACT_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0c00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xc00[7:0] = 4 turn off 3-wire */ \
+ {0x0e00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xe00[7:0] = 4 turn off 3-wire */ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /* 0x2[0] = 0 RESET BB, CLOSE RF */ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /* Whole BB is reset*/ \
+ /*{0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},//0x1F[7:0] = 0 turn off RF*/ \
+ /*{0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},//0x4C[23] = 0x4E[7] = 0, switch DPDT_SEL_P output from register 0x65[2] */ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x2A}, /* 0x07[7:0] = 0x28 sps pwm mode 0x2a for BT coex*/ \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */ \
+ /*{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0|BIT1, 0}, // 0x02[1:0] = 0 reset BB */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/
+
+#define RTL8812_TRANS_CARDEMU_TO_SUS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xcc},\
+ {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xEC},\
+ {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x07},/* gpio11 input mode, gpio10~8 output mode */ \
+ {0x0045, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio 0~7 output same value as input ?? */ \
+ {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xff},/* gpio0~7 output mode */ \
+ {0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* 0x47[7:0] = 00 gpio mode */ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* suspend option all off */ \
+ {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, BIT7},/*0x14[7] = 1 turn on ZCD */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 turn on ZCD */ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, BIT4},/*0x23[4] = 1 hpon LDO sleep mode */ \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, BIT3}, /*0x04[11] = 2b'11 enable WL suspend for PCIe*/
+
+#define RTL8812_TRANS_SUS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0}, /*0x04[11] = 2b'01enable WL suspend*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, 0},/*0x23[4] = 0 hpon LDO sleep mode leave */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 turn off ZCD */ \
+ {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, 0},/*0x14[7] = 0 turn off ZCD */ \
+ {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio0~7 input mode */ \
+ {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio11 input mode, gpio10~8 input mode */
+
+#define RTL8812_TRANS_CARDEMU_TO_CARDDIS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ /**{0x0194, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, //0x194[0]=0 , disable 32K clock*/ \
+ /**{0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x94}, //0x93=0x94 , 90[30] =0 enable 500k ANA clock .switch clock from 12M to 500K , 90 [26] =0 disable EEprom loader clock*/ \
+ {0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0}, /*0x03[2] = 0, reset 8051*/ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x05}, /*0x80=05h if reload fw, fill the default value of host_CPU handshake field*/ \
+ {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xcc},\
+ {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xEC},\
+ {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x07},/* gpio11 input mode, gpio10~8 output mode */ \
+ {0x0045, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio 0~7 output same value as input ?? */ \
+ {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xff},/* gpio0~7 output mode */ \
+ {0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* 0x47[7:0] = 00 gpio mode */ \
+ {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, BIT7},/*0x14[7] = 1 turn on ZCD */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 turn on ZCD */ \
+ {0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/*0x12[0] = 0 force PFM mode */ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, BIT4},/*0x23[4] = 1 hpon LDO sleep mode */ \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07=0x20 , SOP option to disable BG/MB*/ \
+ {0x001f, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /*0x01f[1]=0 , disable RFC_0 control REG_RF_CTRL_8812 */ \
+ {0x0076, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /*0x076[1]=0 , disable RFC_1 control REG_OPT_CTRL_8812 +2 */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, BIT3}, /*0x04[11] = 2b'01 enable WL suspend*/
+
+#define RTL8812_TRANS_CARDDIS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*0x12[0] = 1 force PWM mode */ \
+ {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, 0},/*0x14[7] = 0 turn off ZCD */ \
+ {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 turn off ZCD */ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, 0},/*0x23[4] = 0 hpon LDO leave sleep mode */ \
+ {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio0~7 input mode */ \
+ {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio11 input mode, gpio10~8 input mode */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0}, /*0x04[10] = 0, enable SW LPS PCIE only*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0}, /*0x04[11] = 2b'01enable WL suspend*/ \
+ {0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, BIT2}, /*0x03[2] = 1, enable 8051*/ \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*PCIe DMA start*/
+
+
+#define RTL8812_TRANS_CARDEMU_TO_PDN \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/
+
+#define RTL8812_TRANS_PDN_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/
+
+#define RTL8812_TRANS_ACT_TO_LPS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/ \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \
+ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x0c00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xc00[7:0] = 4 turn off 3-wire */ \
+ {0x0e00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xe00[7:0] = 4 turn off 3-wire */ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled,and clock are gated,and RF closed*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /* Whole BB is reset*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/ \
+ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/
+
+
+#define RTL8812_TRANS_LPS_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/ \
+ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/ \
+ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/ \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*. 0x08[4] = 0 switch TSF to 40M*/ \
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]=0 TSF in 40M*/ \
+ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6|BIT7, 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*. 0x101[1] = 1*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*. 0x02[1:0] = 2b'11 enable BB macro*/ \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
+
+#define RTL8812_TRANS_END \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0,PWR_CMD_END, 0, 0}, //
+
+
+extern struct wlan_pwr_cfg rtl8812_power_on_flow[RTL8812_TRANS_CARDEMU_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_radio_off_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_card_disable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_card_enable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_suspend_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_resume_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_hwpdn_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_enter_lps_flow[RTL8812_TRANS_ACT_TO_LPS_STEPS+RTL8812_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8812_leave_lps_flow[RTL8812_TRANS_LPS_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS];
+
+/*
+ Check document WM-20130516-JackieLau-RTL8821A_Power_Architecture-R10.vsd
+ There are 6 HW Power States:
+ 0: POFF--Power Off
+ 1: PDN--Power Down
+ 2: CARDEMU--Card Emulation
+ 3: ACT--Active Mode
+ 4: LPS--Low Power State
+ 5: SUS--Suspend
+
+ The transition from different states are defined below
+ TRANS_CARDEMU_TO_ACT
+ TRANS_ACT_TO_CARDEMU
+ TRANS_CARDEMU_TO_SUS
+ TRANS_SUS_TO_CARDEMU
+ TRANS_CARDEMU_TO_PDN
+ TRANS_ACT_TO_LPS
+ TRANS_LPS_TO_ACT
+
+ TRANS_END
+*/
+#define RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS 25
+#define RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS 15
+#define RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS 15
+#define RTL8821A_TRANS_SUS_TO_CARDEMU_STEPS 15
+#define RTL8821A_TRANS_CARDDIS_TO_CARDEMU_STEPS 15
+#define RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS 15
+#define RTL8821A_TRANS_PDN_TO_CARDEMU_STEPS 15
+#define RTL8821A_TRANS_ACT_TO_LPS_STEPS 15
+#define RTL8821A_TRANS_LPS_TO_ACT_STEPS 15
+#define RTL8821A_TRANS_END_STEPS 1
+
+
+#define RTL8821A_TRANS_CARDEMU_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0}, /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/ \
+ {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x67[0] = 0 to disable BT_GPS_SEL pins*/ \
+ {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},/*Delay 1ms*/ \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, 0}, /*0x00[5] = 1b'0 release analog Ips to digital ,1:isolation*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT4|BIT3|BIT2), 0},/* disable SW LPS 0x04[10]=0 and WLSUS_EN 0x04[12:11]=0*/ \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0 , BIT0},/* Disable USB suspend */ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0 , 0},/* Enable USB suspend */ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* release WLON reset 0x04[16]=1*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]=0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT4|BIT3), 0},/* disable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* polling until return 0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT0, 0},/**/ \
+ {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*0x4C[24] = 0x4F[0] = 1, switch DPDT_SEL_P output from WL BB */\
+ {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT5|BIT4), (BIT5|BIT4)},/*0x66[13] = 0x67[5] = 1, switch for PAPE_G/PAPE_A from WL BB ; 0x66[12] = 0x67[4] = 1, switch LNAON from WL BB */\
+ {0x0025, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6, 0},/*anapar_mac<118> , 0x25[6]=0 by wlan single function*/\
+ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable falling edge triggering interrupt*/\
+ {0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable GPIO9 interrupt mode*/\
+ {0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Enable GPIO9 input mode*/\
+ {0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*Enable HSISR GPIO[C:0] interrupt*/\
+ {0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable HSISR GPIO9 interrupt*/\
+ {0x007A, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x3A},/*0x7A = 0x3A start BT*/\
+ {0x002E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF , 0x82 },/* 0x2C[23:12]=0x820 ; XTAL trim */ \
+ {0x0010, PWR_CUT_A_MSK , PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6 , BIT6 },/* 0x10[6]=1 ; MP·s¼W¹ï©ó0x2Cªº±±¨îÅv¡A¶·§â0x10[6]³]¬°1¤~¯àÅýWLAN±±¨î */ \
+
+
+#define RTL8821A_TRANS_ACT_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/ \
+ {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*0x4C[24] = 0x4F[0] = 0, switch DPDT_SEL_P output from register 0x65[2] */\
+ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Enable rising edge triggering interrupt*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5}, /*0x00[5] = 1b'1 analog Ips to digital ,1:isolation*/ \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /*0x20[0] = 1b'0 disable LDOA12 MACRO block*/ \
+
+
+#define RTL8821A_TRANS_CARDEMU_TO_SUS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4|BIT3, (BIT4|BIT3)}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SDIO SOP option to disable BG/MB/ACK/SWR*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3|BIT4}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8821A_TRANS_SUS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
+
+#define RTL8821A_TRANS_CARDEMU_TO_CARDDIS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07=0x20 , SOP option to disable BG/MB*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, BIT2}, /*0x04[10] = 1, enable SW LPS*/ \
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8821A_TRANS_CARDDIS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/\
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*PCIe DMA start*/
+
+
+#define RTL8821A_TRANS_CARDEMU_TO_PDN \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK|PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SOP option to disable BG/MB/ACK/SWR*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/* 0x04[16] = 0*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/
+
+#define RTL8821A_TRANS_PDN_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/
+
+#define RTL8821A_TRANS_ACT_TO_LPS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/ \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*Tx Pause*/ \
+ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled,and clock are gated*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Whole BB is reset*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/ \
+ {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/*When driver enter Sus/ Disable, enable LOP for BT*/ \
+ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/ \
+
+
+#define RTL8821A_TRANS_LPS_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\
+ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\
+ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*. 0x08[4] = 0 switch TSF to 40M*/\
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]=0 TSF in 40M*/\
+ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6|BIT7, 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/\
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*. 0x101[1] = 1*/\
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*. 0x02[1:0] = 2b'11 enable BB macro*/\
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
+
+#define RTL8821A_TRANS_END \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0,PWR_CMD_END, 0, 0}, //
+
+extern struct wlan_pwr_cfg rtl8821A_power_on_flow[RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_radio_off_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_card_disable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_card_enable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS/*RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS*/
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_suspend_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_resume_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_hwpdn_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_enter_lps_flow[RTL8821A_TRANS_ACT_TO_LPS_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8821A_leave_lps_flow[RTL8821A_TRANS_LPS_TO_ACT_STEPS
+ + RTL8821A_TRANS_END_STEPS];
+
+/*RTL8812 Power Configuration CMDs for PCIe interface*/
+#define RTL8812_NIC_PWR_ON_FLOW rtl8812_power_on_flow
+#define RTL8812_NIC_RF_OFF_FLOW rtl8812_radio_off_flow
+#define RTL8812_NIC_DISABLE_FLOW rtl8812_card_disable_flow
+#define RTL8812_NIC_ENABLE_FLOW rtl8812_card_enable_flow
+#define RTL8812_NIC_SUSPEND_FLOW rtl8812_suspend_flow
+#define RTL8812_NIC_RESUME_FLOW rtl8812_resume_flow
+#define RTL8812_NIC_PDN_FLOW rtl8812_hwpdn_flow
+#define RTL8812_NIC_LPS_ENTER_FLOW rtl8812_enter_lps_flow
+#define RTL8812_NIC_LPS_LEAVE_FLOW rtl8812_leave_lps_flow
+
+/* RTL8821 Power Configuration CMDs for PCIe interface */
+#define RTL8821A_NIC_PWR_ON_FLOW rtl8821A_power_on_flow
+#define RTL8821A_NIC_RF_OFF_FLOW rtl8821A_radio_off_flow
+#define RTL8821A_NIC_DISABLE_FLOW rtl8821A_card_disable_flow
+#define RTL8821A_NIC_ENABLE_FLOW rtl8821A_card_enable_flow
+#define RTL8821A_NIC_SUSPEND_FLOW rtl8821A_suspend_flow
+#define RTL8821A_NIC_RESUME_FLOW rtl8821A_resume_flow
+#define RTL8821A_NIC_PDN_FLOW rtl8821A_hwpdn_flow
+#define RTL8821A_NIC_LPS_ENTER_FLOW rtl8821A_enter_lps_flow
+#define RTL8821A_NIC_LPS_LEAVE_FLOW rtl8821A_leave_lps_flow
+
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c
new file mode 100644
index 000000000000..ff1887187770
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseq.h"
+
+
+/*
+* Description:
+* This routine deal with the Power Configuration CMDs
+* parsing for RTL8723/RTL8188E Series IC.
+* Assumption:
+* We should follow specific format which was released from HW SD.
+*
+* 2011.07.07, added by Roger.
+*/
+bool rtl_hal_pwrseqcmdparsing (struct rtl_priv* rtlpriv, u8 cut_version,
+ u8 fab_version, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[])
+
+{
+ struct wlan_pwr_cfg pwr_cfg_cmd = {0};
+ bool polling_bit = false;
+ u32 ary_idx=0;
+ u8 value = 0;
+ u32 offset = 0;
+ u32 polling_count = 0;
+ u32 max_polling_cnt = 5000;
+
+ do {
+ pwr_cfg_cmd = pwrcfgcmd[ary_idx];
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), fab_msk(%#x),"
+ "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
+ GET_PWR_CFG_OFFSET(pwr_cfg_cmd), GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd), GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_BASE(pwr_cfg_cmd), GET_PWR_CFG_CMD(pwr_cfg_cmd),
+ GET_PWR_CFG_MASK(pwr_cfg_cmd), GET_PWR_CFG_VALUE(pwr_cfg_cmd)));
+
+ if ((GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd)&fab_version) &&
+ (GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd)&cut_version) &&
+ (GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd)&interface_type)) {
+ switch (GET_PWR_CFG_CMD(pwr_cfg_cmd)) {
+ case PWR_CMD_READ:
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n"));
+ break;
+
+ case PWR_CMD_WRITE: {
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n"));
+ offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+ /*Read the value from system register*/
+ value = rtl_read_byte(rtlpriv, offset);
+ value = value & (~(GET_PWR_CFG_MASK(pwr_cfg_cmd)));
+ value = value | (GET_PWR_CFG_VALUE(pwr_cfg_cmd)
+ & GET_PWR_CFG_MASK(pwr_cfg_cmd));
+
+ /*Write the value back to system register*/
+ rtl_write_byte(rtlpriv, offset, value);
+ }
+ break;
+
+ case PWR_CMD_POLLING:
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n"));
+ polling_bit = false;
+ offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+ do {
+ value = rtl_read_byte(rtlpriv, offset);
+
+ value = value & GET_PWR_CFG_MASK(pwr_cfg_cmd);
+ if (value == (GET_PWR_CFG_VALUE(pwr_cfg_cmd)
+ & GET_PWR_CFG_MASK(pwr_cfg_cmd)))
+ polling_bit=true;
+ else
+ udelay(10);
+
+ if (polling_count++ > max_polling_cnt) {
+ return false;
+ }
+ } while (!polling_bit);
+
+ break;
+
+ case PWR_CMD_DELAY:
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n"));
+ if (GET_PWR_CFG_VALUE(pwr_cfg_cmd) == PWRSEQ_DELAY_US)
+ udelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+ else
+ mdelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+ break;
+
+ case PWR_CMD_END:
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n"));
+ return true;
+ break;
+
+ default:
+ RT_ASSERT(false,
+ ("rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"));
+ break;
+ }
+
+ }
+
+ ary_idx++;
+ } while (1);
+
+ return true;
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h
new file mode 100644
index 000000000000..571e7e50d5b5
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h
@@ -0,0 +1,71 @@
+#ifndef __RTL8821AE_PWRSEQCMD_H__
+#define __RTL8821AE_PWRSEQCMD_H__
+
+#include "../wifi.h"
+/*---------------------------------------------*/
+/*The value of cmd: 4 bits */
+/*---------------------------------------------*/
+#define PWR_CMD_READ 0x00
+#define PWR_CMD_WRITE 0x01
+#define PWR_CMD_POLLING 0x02
+#define PWR_CMD_DELAY 0x03
+#define PWR_CMD_END 0x04
+
+/* define the base address of each block */
+#define PWR_BASEADDR_MAC 0x00
+#define PWR_BASEADDR_USB 0x01
+#define PWR_BASEADDR_PCIE 0x02
+#define PWR_BASEADDR_SDIO 0x03
+
+#define PWR_INTF_SDIO_MSK BIT(0)
+#define PWR_INTF_USB_MSK BIT(1)
+#define PWR_INTF_PCI_MSK BIT(2)
+#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+#define PWR_FAB_TSMC_MSK BIT(0)
+#define PWR_FAB_UMC_MSK BIT(1)
+#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+#define PWR_CUT_TESTCHIP_MSK BIT(0)
+#define PWR_CUT_A_MSK BIT(1)
+#define PWR_CUT_B_MSK BIT(2)
+#define PWR_CUT_C_MSK BIT(3)
+#define PWR_CUT_D_MSK BIT(4)
+#define PWR_CUT_E_MSK BIT(5)
+#define PWR_CUT_F_MSK BIT(6)
+#define PWR_CUT_G_MSK BIT(7)
+#define PWR_CUT_ALL_MSK 0xFF
+
+
+enum pwrseq_delay_unit {
+ PWRSEQ_DELAY_US,
+ PWRSEQ_DELAY_MS,
+};
+
+struct wlan_pwr_cfg {
+ u16 offset;
+ u8 cut_msk;
+ u8 fab_msk:4;
+ u8 interface_msk:4;
+ u8 base:4;
+ u8 cmd:4;
+ u8 msk;
+ u8 value;
+
+};
+
+#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
+#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
+#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
+#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
+#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
+#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
+#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
+#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
+
+bool rtl_hal_pwrseqcmdparsing(struct rtl_priv * rtlpriv, u8 cut_version,
+ u8 fab_version, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[]);
+
+#endif
+
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/reg.h b/drivers/staging/rtl8821ae/rtl8821ae/reg.h
new file mode 100644
index 000000000000..beffb4243b1e
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/reg.h
@@ -0,0 +1,2427 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_REG_H__
+#define __RTL8821AE_REG_H__
+
+#define TXPKT_BUF_SELECT 0x69
+#define RXPKT_BUF_SELECT 0xA5
+#define DISABLE_TRXPKT_BUF_ACCESS 0x0
+
+#define REG_SYS_ISO_CTRL 0x0000
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_APS_FSMCO 0x0004
+#define REG_SYS_CLKR 0x0008
+#define REG_9346CR 0x000A
+#define REG_EE_VPD 0x000C
+#define REG_AFE_MISC 0x0010
+#define REG_SPS0_CTRL 0x0011
+#define REG_SPS_OCP_CFG 0x0018
+#define REG_RSV_CTRL 0x001C
+#define REG_RF_CTRL 0x001F
+#define REG_LDOA15_CTRL 0x0020
+#define REG_LDOV12D_CTRL 0x0021
+#define REG_LDOHCI12_CTRL 0x0022
+#define REG_LPLDO_CTRL 0x0023
+#define REG_AFE_XTAL_CTRL 0x0024
+#define REG_AFE_LDO_CTRL 0x0027 /* 1.5v for 8188EE test chip, 1.4v for MP chip */
+#define REG_AFE_PLL_CTRL 0x0028
+#define REG_MAC_PHY_CTRL 0x002c
+#define REG_EFUSE_CTRL 0x0030
+#define REG_EFUSE_TEST 0x0034
+#define REG_PWR_DATA 0x0038
+#define REG_CAL_TIMER 0x003C
+#define REG_ACLK_MON 0x003E
+#define REG_GPIO_MUXCFG 0x0040
+#define REG_GPIO_IO_SEL 0x0042
+#define REG_MAC_PINMUX_CFG 0x0043
+#define REG_GPIO_PIN_CTRL 0x0044
+#define REG_GPIO_INTM 0x0048
+#define REG_LEDCFG0 0x004C
+#define REG_LEDCFG1 0x004D
+#define REG_LEDCFG2 0x004E
+#define REG_LEDCFG3 0x004F
+#define REG_FSIMR 0x0050
+#define REG_FSISR 0x0054
+#define REG_HSIMR 0x0058
+#define REG_HSISR 0x005c
+#define REG_GPIO_PIN_CTRL_2 0x0060
+#define REG_GPIO_IO_SEL_2 0x0062
+#define REG_MULTI_FUNC_CTRL 0x0068
+#define REG_GPIO_OUTPUT 0x006c
+#define REG_OPT_CTRL 0x0074
+#define REG_AFE_XTAL_CTRL_EXT 0x0078
+#define REG_XCK_OUT_CTRL 0x007c
+#define REG_MCUFWDL 0x0080
+#define REG_WOL_EVENT 0x0081
+#define REG_MCUTSTCFG 0x0084
+
+
+#define REG_HIMR 0x00B0
+#define REG_HISR 0x00B4
+#define REG_HIMRE 0x00B8
+#define REG_HISRE 0x00BC
+
+#define REG_PMC_DBG_CTRL2 0x00CC
+
+#define REG_EFUSE_ACCESS 0x00CF
+
+#define REG_BIST_SCAN 0x00D0
+#define REG_BIST_RPT 0x00D4
+#define REG_BIST_ROM_RPT 0x00D8
+#define REG_USB_SIE_INTF 0x00E0
+#define REG_PCIE_MIO_INTF 0x00E4
+#define REG_PCIE_MIO_INTD 0x00E8
+#define REG_HPON_FSM 0x00EC
+#define REG_SYS_CFG 0x00F0
+#define REG_GPIO_OUTSTS 0x00F4
+#define REG_SYS_CFG1 0x00FC
+#define REG_ROM_VERSION 0x00FD
+
+#define REG_CR 0x0100
+#define REG_PBP 0x0104
+#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
+#define REG_TRXDMA_CTRL 0x010C
+#define REG_TRXFF_BNDY 0x0114
+#define REG_TRXFF_STATUS 0x0118
+#define REG_RXFF_PTR 0x011C
+
+#define REG_CPWM 0x012F
+#define REG_FWIMR 0x0130
+#define REG_FWISR 0x0134
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_PKTBUF_DBG_DATA_L 0x0144
+#define REG_PKTBUF_DBG_DATA_H 0x0148
+#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2)
+
+#define REG_TC0_CTRL 0x0150
+#define REG_TC1_CTRL 0x0154
+#define REG_TC2_CTRL 0x0158
+#define REG_TC3_CTRL 0x015C
+#define REG_TC4_CTRL 0x0160
+#define REG_TCUNIT_BASE 0x0164
+#define REG_MBIST_START 0x0174
+#define REG_MBIST_DONE 0x0178
+#define REG_MBIST_FAIL 0x017C
+#define REG_32K_CTRL 0x0194
+#define REG_C2HEVT_MSG_NORMAL 0x01A0
+#define REG_C2HEVT_CLEAR 0x01AF
+#define REG_C2HEVT_MSG_TEST 0x01B8
+#define REG_MCUTST_1 0x01c0
+#define REG_FMETHR 0x01C8
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX_0 0x01D0
+#define REG_HMEBOX_1 0x01D4
+#define REG_HMEBOX_2 0x01D8
+#define REG_HMEBOX_3 0x01DC
+
+#define REG_LLT_INIT 0x01E0
+#define REG_BB_ACCEESS_CTRL 0x01E8
+#define REG_BB_ACCESS_DATA 0x01EC
+
+#define REG_HMEBOX_EXT_0 0x01F0
+#define REG_HMEBOX_EXT_1 0x01F4
+#define REG_HMEBOX_EXT_2 0x01F8
+#define REG_HMEBOX_EXT_3 0x01FC
+
+#define REG_RQPN 0x0200
+#define REG_FIFOPAGE 0x0204
+#define REG_TDECTRL 0x0208
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define REG_RQPN_NPQ 0x0214
+
+#define REG_RXDMA_AGG_PG_TH 0x0280
+#define REG_FW_UPD_RDPTR 0x0284 /* FW shall update this register before FW write RXPKT_RELEASE_POLL to 1 */
+#define REG_RXDMA_CONTROL 0x0286 /* Control the RX DMA.*/
+#define REG_RXPKT_NUM 0x0287 /* The number of packets in RXPKTBUF. */
+
+#define REG_PCIE_CTRL_REG 0x0300
+#define REG_INT_MIG 0x0304
+#define REG_BCNQ_DESA 0x0308
+#define REG_HQ_DESA 0x0310
+#define REG_MGQ_DESA 0x0318
+#define REG_VOQ_DESA 0x0320
+#define REG_VIQ_DESA 0x0328
+#define REG_BEQ_DESA 0x0330
+#define REG_BKQ_DESA 0x0338
+#define REG_RX_DESA 0x0340
+
+#define REG_DBI_WDATA 0x0348
+#define REG_DBI_RDATA 0x034C
+#define REG_DBI_ADDR 0x0350
+#define REG_DBI_FLAG 0x0352
+#define REG_MDIO_WDATA 0x0354
+#define REG_MDIO_RDATA 0x0356
+#define REG_MDIO_CTL 0x0358
+#define REG_DBG_SEL 0x0360
+#define REG_PCIE_HRPWM 0x0361
+#define REG_PCIE_HCPWM 0x0363
+#define REG_UART_CTRL 0x0364
+#define REG_WATCH_DOG 0x0368
+#define REG_UART_TX_DESA 0x0370
+#define REG_UART_RX_DESA 0x0378
+
+
+#define REG_HDAQ_DESA_NODEF 0x0000
+#define REG_CMDQ_DESA_NODEF 0x0000
+
+#define REG_VOQ_INFORMATION 0x0400
+#define REG_VIQ_INFORMATION 0x0404
+#define REG_BEQ_INFORMATION 0x0408
+#define REG_BKQ_INFORMATION 0x040C
+#define REG_MGQ_INFORMATION 0x0410
+#define REG_HGQ_INFORMATION 0x0414
+#define REG_BCNQ_INFORMATION 0x0418
+#define REG_TXPKT_EMPTY 0x041A
+
+
+#define REG_CPU_MGQ_INFORMATION 0x041C
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define REG_HWSEQ_CTRL 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY 0x0425
+#define REG_MULTI_BCNQ_EN 0x0426
+#define REG_MULTI_BCNQ_OFFSET 0x0427
+#define REG_SPEC_SIFS 0x0428
+#define REG_RL 0x042A
+#define REG_DARFRC 0x0430
+#define REG_RARFRC 0x0438
+#define REG_RRSR 0x0440
+#define REG_ARFR0 0x0444
+#define REG_ARFR1 0x044C
+#define REG_CCK_CHECK 0x0454
+#define REG_AMPDU_MAX_TIME 0x0456
+#define REG_AGGLEN_LMT 0x0458
+#define REG_AMPDU_MIN_SPACE 0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
+#define REG_FAST_EDCA_CTRL 0x0460
+#define REG_RD_RESP_PKT_TH 0x0463
+#define REG_INIRTS_RATE_SEL 0x0480
+#define REG_INIDATA_RATE_SEL 0x0484
+#define REG_ARFR2 0x048C
+#define REG_ARFR3 0x0494
+#define REG_POWER_STATUS 0x04A4
+#define REG_POWER_STAGE1 0x04B4
+#define REG_POWER_STAGE2 0x04B8
+#define REG_PKT_LIFE_TIME 0x04C0
+#define REG_STBC_SETTING 0x04C4
+#define REG_HT_SINGLE_AMPDU 0x04C7
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_MAX_AGGR_NUM 0x04CA
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
+#define REG_EARLY_MODE_CONTROL 0x04D0
+#define REG_NQOS_SEQ 0x04DC
+#define REG_QOS_SEQ 0x04DE
+#define REG_NEED_CPU_HANDLE 0x04E0
+#define REG_PKT_LOSE_RPT 0x04E1
+#define REG_PTCL_ERR_STATUS 0x04E2
+#define REG_TX_RPT_CTRL 0x04EC
+#define REG_TX_RPT_TIME 0x04F0
+#define REG_DUMMY 0x04FC
+
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_BCNTCFG 0x0510
+#define REG_PIFS 0x0512
+#define REG_RDG_PIFS 0x0513
+#define REG_SIFS_CTX 0x0514
+#define REG_SIFS_TRX 0x0516
+#define REG_AGGR_BREAK_TIME 0x051A
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define REG_TXPAUSE 0x0522
+#define REG_DIS_TXREQ_CLR 0x0523
+#define REG_RD_CTRL 0x0524
+#define REG_TBTT_PROHIBIT 0x0540
+#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
+#define REG_BCN_CTRL 0x0550
+#define REG_USTIME_TSF 0x0551
+#define REG_MBID_NUM 0x0552
+#define REG_DUAL_TSF_RST 0x0553
+#define REG_BCN_INTERVAL 0x0554
+#define REG_MBSSID_BCN_SPACE 0x0554
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_RXTSF_OFFSET_OFDM 0x055F
+#define REG_TSFTR 0x0560
+#define REG_INIT_TSFTR 0x0564
+#define REG_SECONDARY_CCA_CTRL 0x0577
+#define REG_PSTIMER 0x0580
+#define REG_TIMER0 0x0584
+#define REG_TIMER1 0x0588
+#define REG_ACMHWCTRL 0x05C0
+#define REG_ACMRSTCTRL 0x05C1
+#define REG_ACMAVG 0x05C2
+#define REG_VO_ADMTIME 0x05C4
+#define REG_VI_ADMTIME 0x05C6
+#define REG_BE_ADMTIME 0x05C8
+#define REG_EDCA_RANDOM_GEN 0x05CC
+#define REG_NOA_DESC_SEL 0x05CF
+#define REG_NOA_DESC_DURATION 0x05E0
+#define REG_NOA_DESC_INTERVAL 0x05E4
+#define REG_NOA_DESC_START 0x05E8
+#define REG_NOA_DESC_COUNT 0x05EC
+#define REG_SCH_TX_CMD 0x05F8
+
+#define REG_APSD_CTRL 0x0600
+#define REG_BWOPMODE 0x0603
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DLK_TIME 0x060D
+#define REG_RX_DRVINFO_SZ 0x060F
+
+#define REG_MACID 0x0610
+#define REG_BSSID 0x0618
+#define REG_MAR 0x0620
+#define REG_MBIDCAMCFG 0x0628
+
+#define REG_USTIME_EDCA 0x0638
+#define REG_MAC_SPEC_SIFS 0x063A
+#define REG_RESP_SIFS_CCK 0x063C
+#define REG_RESP_SIFS_OFDM 0x063E
+#define REG_ACKTO 0x0640
+#define REG_CTS2TO 0x0641
+#define REG_EIFS 0x0642
+
+#define REG_NAV_CTRL 0x0650
+#define REG_NAV_UPPER 0x0652
+#define REG_BACAMCMD 0x0654
+#define REG_BACAMCONTENT 0x0658
+#define REG_LBDLY 0x0660
+#define REG_FWDLY 0x0661
+#define REG_RXERR_RPT 0x0664
+#define REG_TRXPTCL_CTL 0x0668
+
+#define REG_CAMCMD 0x0670
+#define REG_CAMWRITE 0x0674
+#define REG_CAMREAD 0x0678
+#define REG_CAMDBG 0x067C
+#define REG_SECCFG 0x0680
+
+#define REG_WOW_CTRL 0x0690
+#define REG_PSSTATUS 0x0691
+#define REG_PS_RX_INFO 0x0692
+#define REG_UAPSD_TID 0x0693
+#define REG_LPNAV_CTRL 0x0694
+#define REG_WKFMCAM_NUM 0x0698
+#define REG_WKFMCAM_RWD 0x069C
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BCN_PSR_RPT 0x06A8
+#define REG_CALB32K_CTRL 0x06AC
+#define REG_PKT_MON_CTRL 0x06B4
+#define REG_BT_COEX_TABLE 0x06C0
+#define REG_WMAC_RESP_TXINFO 0x06D8
+
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_TEST_USB_TXQS 0xFE48
+#define REG_TEST_SIE_VID 0xFE60
+#define REG_TEST_SIE_PID 0xFE62
+#define REG_TEST_SIE_OPTIONAL 0xFE64
+#define REG_TEST_SIE_CHIRP_K 0xFE65
+#define REG_TEST_SIE_PHY 0xFE66
+#define REG_TEST_SIE_MAC_ADDR 0xFE70
+#define REG_TEST_SIE_STRING 0xFE80
+
+#define REG_NORMAL_SIE_VID 0xFE60
+#define REG_NORMAL_SIE_PID 0xFE62
+#define REG_NORMAL_SIE_OPTIONAL 0xFE64
+#define REG_NORMAL_SIE_EP 0xFE65
+#define REG_NORMAL_SIE_PHY 0xFE68
+#define REG_NORMAL_SIE_MAC_ADDR 0xFE70
+#define REG_NORMAL_SIE_STRING 0xFE80
+
+#define CR9346 REG_9346CR
+#define MSR (REG_CR + 2)
+#define ISR REG_HISR
+#define TSFR REG_TSFTR
+
+#define MACIDR0 REG_MACID
+#define MACIDR4 (REG_MACID + 4)
+
+#define PBP REG_PBP
+
+#define IDR0 MACIDR0
+#define IDR4 MACIDR4
+
+#define UNUSED_REGISTER 0x1BF
+#define DCAM UNUSED_REGISTER
+#define PSR UNUSED_REGISTER
+#define BBADDR UNUSED_REGISTER
+#define PHYDATAR UNUSED_REGISTER
+
+#define INVALID_BBRF_VALUE 0x12345678
+
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+#define CMDEEPROM_EN BIT(5)
+#define CMDEEPROM_SEL BIT(4)
+#define CMD9346CR_9356SEL BIT(4)
+#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL)
+#define AUTOLOAD_EFUSE CMDEEPROM_EN
+
+#define GPIOSEL_GPIO 0
+#define GPIOSEL_ENBT BIT(5)
+
+#define GPIO_IN REG_GPIO_PIN_CTRL
+#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
+#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
+#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
+
+/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
+#define HSIMR_GPIO12_0_INT_EN BIT(0)
+#define HSIMR_SPS_OCP_INT_EN BIT(5)
+#define HSIMR_RON_INT_EN BIT(6)
+#define HSIMR_PDN_INT_EN BIT(7)
+#define HSIMR_GPIO9_INT_EN BIT(25)
+
+
+/*
+* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte)
+*/
+#define HSISR_GPIO12_0_INT BIT(0)
+#define HSISR_SPS_OCP_INT BIT(5)
+#define HSISR_RON_INT_EN BIT(6)
+#define HSISR_PDNINT BIT(7)
+#define HSISR_GPIO9_INT BIT(25)
+
+#define MSR_NOLINK 0x00
+#define MSR_ADHOC 0x01
+#define MSR_INFRA 0x02
+#define MSR_AP 0x03
+
+#define RRSR_RSC_OFFSET 21
+#define RRSR_SHORT_OFFSET 23
+#define RRSR_RSC_BW_40M 0x600000
+#define RRSR_RSC_UPSUBCHNL 0x400000
+#define RRSR_RSC_LOWSUBCHNL 0x200000
+#define RRSR_SHORT 0x800000
+#define RRSR_1M BIT(0)
+#define RRSR_2M BIT(1)
+#define RRSR_5_5M BIT(2)
+#define RRSR_11M BIT(3)
+#define RRSR_6M BIT(4)
+#define RRSR_9M BIT(5)
+#define RRSR_12M BIT(6)
+#define RRSR_18M BIT(7)
+#define RRSR_24M BIT(8)
+#define RRSR_36M BIT(9)
+#define RRSR_48M BIT(10)
+#define RRSR_54M BIT(11)
+#define RRSR_MCS0 BIT(12)
+#define RRSR_MCS1 BIT(13)
+#define RRSR_MCS2 BIT(14)
+#define RRSR_MCS3 BIT(15)
+#define RRSR_MCS4 BIT(16)
+#define RRSR_MCS5 BIT(17)
+#define RRSR_MCS6 BIT(18)
+#define RRSR_MCS7 BIT(19)
+#define BRSR_ACKSHORTPMB BIT(23)
+
+#define RATR_1M 0x00000001
+#define RATR_2M 0x00000002
+#define RATR_55M 0x00000004
+#define RATR_11M 0x00000008
+#define RATR_6M 0x00000010
+#define RATR_9M 0x00000020
+#define RATR_12M 0x00000040
+#define RATR_18M 0x00000080
+#define RATR_24M 0x00000100
+#define RATR_36M 0x00000200
+#define RATR_48M 0x00000400
+#define RATR_54M 0x00000800
+#define RATR_MCS0 0x00001000
+#define RATR_MCS1 0x00002000
+#define RATR_MCS2 0x00004000
+#define RATR_MCS3 0x00008000
+#define RATR_MCS4 0x00010000
+#define RATR_MCS5 0x00020000
+#define RATR_MCS6 0x00040000
+#define RATR_MCS7 0x00080000
+#define RATR_MCS8 0x00100000
+#define RATR_MCS9 0x00200000
+#define RATR_MCS10 0x00400000
+#define RATR_MCS11 0x00800000
+#define RATR_MCS12 0x01000000
+#define RATR_MCS13 0x02000000
+#define RATR_MCS14 0x04000000
+#define RATR_MCS15 0x08000000
+
+#define RATE_1M BIT(0)
+#define RATE_2M BIT(1)
+#define RATE_5_5M BIT(2)
+#define RATE_11M BIT(3)
+#define RATE_6M BIT(4)
+#define RATE_9M BIT(5)
+#define RATE_12M BIT(6)
+#define RATE_18M BIT(7)
+#define RATE_24M BIT(8)
+#define RATE_36M BIT(9)
+#define RATE_48M BIT(10)
+#define RATE_54M BIT(11)
+#define RATE_MCS0 BIT(12)
+#define RATE_MCS1 BIT(13)
+#define RATE_MCS2 BIT(14)
+#define RATE_MCS3 BIT(15)
+#define RATE_MCS4 BIT(16)
+#define RATE_MCS5 BIT(17)
+#define RATE_MCS6 BIT(18)
+#define RATE_MCS7 BIT(19)
+#define RATE_MCS8 BIT(20)
+#define RATE_MCS9 BIT(21)
+#define RATE_MCS10 BIT(22)
+#define RATE_MCS11 BIT(23)
+#define RATE_MCS12 BIT(24)
+#define RATE_MCS13 BIT(25)
+#define RATE_MCS14 BIT(26)
+#define RATE_MCS15 BIT(27)
+
+#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\
+ RATR_24M| RATR_36M | RATR_48M | RATR_54M)
+#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\
+ RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\
+ RATR_MCS6 | RATR_MCS7)
+#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\
+ RATR_MCS11| RATR_MCS12 | RATR_MCS13 |\
+ RATR_MCS14 | RATR_MCS15)
+
+#define BW_OPMODE_20MHZ BIT(2)
+#define BW_OPMODE_5G BIT(1)
+#define BW_OPMODE_11J BIT(0)
+
+#define CAM_VALID BIT(15)
+#define CAM_NOTVALID 0x0000
+#define CAM_USEDK BIT(5)
+
+#define CAM_NONE 0x0
+#define CAM_WEP40 0x01
+#define CAM_TKIP 0x02
+#define CAM_AES 0x04
+#define CAM_WEP104 0x05
+
+#define TOTAL_CAM_ENTRY 32
+#define HALF_CAM_ENTRY 16
+
+#define CAM_WRITE BIT(16)
+#define CAM_READ 0x00000000
+#define CAM_POLLINIG BIT(31)
+
+#define SCR_USEDK 0x01
+#define SCR_TXSEC_ENABLE 0x02
+#define SCR_RXSEC_ENABLE 0x04
+
+#define WOW_PMEN BIT(0)
+#define WOW_WOMEN BIT(1)
+#define WOW_MAGIC BIT(2)
+#define WOW_UWF BIT(3)
+
+/*********************************************
+* 8188 IMR/ISR bits
+**********************************************/
+#define IMR_DISABLED 0x0
+/* IMR DW0(0x0060-0063) Bit 0-31 */
+#define IMR_TXCCK BIT(30) /* TXRPT interrupt when CCX bit of the packet is set */
+#define IMR_PSTIMEOUT BIT(29) /* Power Save Time Out Interrupt */
+#define IMR_GTINT4 BIT(28) /* When GTIMER4 expires, this bit is set to 1 */
+#define IMR_GTINT3 BIT(27) /* When GTIMER3 expires, this bit is set to 1 */
+#define IMR_TBDER BIT(26) /* Transmit Beacon0 Error */
+#define IMR_TBDOK BIT(25) /* Transmit Beacon0 OK */
+#define IMR_TSF_BIT32_TOGGLE BIT(24) /* TSF Timer BIT32 toggle indication interrupt */
+#define IMR_BCNDMAINT0 BIT(20) /* Beacon DMA Interrupt 0 */
+#define IMR_BCNDOK0 BIT(16) /* Beacon Queue DMA OK0 */
+#define IMR_HSISR_IND_ON_INT BIT(15) /* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */
+#define IMR_BCNDMAINT_E BIT(14) /* Beacon DMA Interrupt Extension for Win7 */
+#define IMR_ATIMEND BIT(12) /* CTWidnow End or ATIM Window End */
+#define IMR_HISR1_IND_INT BIT(11) /* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1)*/
+#define IMR_C2HCMD BIT(10) /* CPU to Host Command INT Status, Write 1 clear */
+#define IMR_CPWM2 BIT(9) /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_CPWM BIT(8) /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_HIGHDOK BIT(7) /* High Queue DMA OK */
+#define IMR_MGNTDOK BIT(6) /* Management Queue DMA OK */
+#define IMR_BKDOK BIT(5) /* AC_BK DMA OK */
+#define IMR_BEDOK BIT(4) /* AC_BE DMA OK */
+#define IMR_VIDOK BIT(3) /* AC_VI DMA OK */
+#define IMR_VODOK BIT(2) /* AC_VO DMA OK */
+#define IMR_RDU BIT(1) /* Rx Descriptor Unavailable */
+#define IMR_ROK BIT(0) /* Receive DMA OK */
+
+/* IMR DW1(0x00B4-00B7) Bit 0-31 */
+#define IMR_BCNDMAINT7 BIT(27) /* Beacon DMA Interrupt 7 */
+#define IMR_BCNDMAINT6 BIT(26) /* Beacon DMA Interrupt 6 */
+#define IMR_BCNDMAINT5 BIT(25) /* Beacon DMA Interrupt 5 */
+#define IMR_BCNDMAINT4 BIT(24) /* Beacon DMA Interrupt 4 */
+#define IMR_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */
+#define IMR_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */
+#define IMR_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */
+#define IMR_BCNDOK7 BIT(20) /* Beacon Queue DMA OK Interrupt 7 */
+#define IMR_BCNDOK6 BIT(19) /* Beacon Queue DMA OK Interrupt 6 */
+#define IMR_BCNDOK5 BIT(18) /* Beacon Queue DMA OK Interrupt 5 */
+#define IMR_BCNDOK4 BIT(17) /* Beacon Queue DMA OK Interrupt 4 */
+#define IMR_BCNDOK3 BIT(16) /* Beacon Queue DMA OK Interrupt 3 */
+#define IMR_BCNDOK2 BIT(15) /* Beacon Queue DMA OK Interrupt 2 */
+#define IMR_BCNDOK1 BIT(14) /* Beacon Queue DMA OK Interrupt 1 */
+#define IMR_ATIMEND_E BIT(13) /* ATIM Window End Extension for Win7 */
+#define IMR_TXERR BIT(11) /* Tx Error Flag Interrupt Status, write 1 clear. */
+#define IMR_RXERR BIT(10) /* Rx Error Flag INT Status, Write 1 clear */
+#define IMR_TXFOVW BIT(9) /* Transmit FIFO Overflow */
+#define IMR_RXFOVW BIT(8) /* Receive FIFO Overflow */
+
+
+#define HWSET_MAX_SIZE 512
+#define EFUSE_MAX_SECTION 64
+#define EFUSE_REAL_CONTENT_LEN 256
+#define EFUSE_OOB_PROTECT_BYTES 18 /* PG data exclude header, dummy 7 bytes from CP test and reserved 1byte.*/
+
+
+#define EEPROM_DEFAULT_TSSI 0x0
+#define EEPROM_DEFAULT_TXPOWERDIFF 0x0
+#define EEPROM_DEFAULT_CRYSTALCAP 0x5
+#define EEPROM_DEFAULT_BOARDTYPE 0x02
+#define EEPROM_DEFAULT_TXPOWER 0x1010
+#define EEPROM_DEFAULT_HT2T_TXPWR 0x10
+
+#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
+#define EEPROM_DEFAULT_THERMALMETER 0x18
+#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0
+#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5
+#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
+#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
+#define EEPROM_DEFAULT_HT20_DIFF 2
+#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
+#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
+#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
+
+#define RF_OPTION1 0x79
+#define RF_OPTION2 0x7A
+#define RF_OPTION3 0x7B
+#define RF_OPTION4 0xC3
+
+#define EEPROM_DEFAULT_PID 0x1234
+#define EEPROM_DEFAULT_VID 0x5678
+#define EEPROM_DEFAULT_CUSTOMERID 0xAB
+#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD
+#define EEPROM_DEFAULT_VERSION 0
+
+#define EEPROM_CHANNEL_PLAN_FCC 0x0
+#define EEPROM_CHANNEL_PLAN_IC 0x1
+#define EEPROM_CHANNEL_PLAN_ETSI 0x2
+#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
+#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
+#define EEPROM_CHANNEL_PLAN_MKK 0x5
+#define EEPROM_CHANNEL_PLAN_MKK1 0x6
+#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
+#define EEPROM_CHANNEL_PLAN_TELEC 0x8
+#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
+#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
+#define EEPROM_CHANNEL_PLAN_NCC 0xB
+#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_TOSHIBA 0x4
+#define EEPROM_CID_CCX 0x10
+#define EEPROM_CID_QMI 0x0D
+#define EEPROM_CID_WHQL 0xFE
+
+#define RTL_EEPROM_ID 0x8129
+
+#define EEPROM_HPON 0x02
+#define EEPROM_CLK 0x06
+#define EEPROM_TESTR 0x08
+
+
+#define EEPROM_TXPOWERCCK 0x10
+#define EEPROM_TXPOWERHT40_1S 0x16
+#define EEPROM_TXPOWERHT20DIFF 0x1B
+#define EEPROM_TXPOWER_OFDMDIFF 0x1B
+
+
+
+#define EEPROM_TX_PWR_INX 0x10
+
+#define EEPROM_CHANNELPLAN 0xB8
+#define EEPROM_XTAL_8821AE 0xB9
+#define EEPROM_THERMAL_METER 0xBA
+#define EEPROM_IQK_LCK_88E 0xBB
+
+#define EEPROM_RF_BOARD_OPTION 0xC1
+#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
+#define EEPROM_RF_BT_SETTING 0xC3
+#define EEPROM_VERSION 0xC4
+#define EEPROM_CUSTOMER_ID 0xC5
+#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
+
+#define EEPROM_MAC_ADDR 0xD0
+#define EEPROM_VID 0xD6
+#define EEPROM_DID 0xD8
+#define EEPROM_SVID 0xDA
+#define EEPROM_SMID 0xDC
+
+#define STOPBECON BIT(6)
+#define STOPHIGHT BIT(5)
+#define STOPMGT BIT(4)
+#define STOPVO BIT(3)
+#define STOPVI BIT(2)
+#define STOPBE BIT(1)
+#define STOPBK BIT(0)
+
+#define RCR_APPFCS BIT(31)
+#define RCR_APP_MIC BIT(30)
+#define RCR_APP_ICV BIT(29)
+#define RCR_APP_PHYST_RXFF BIT(28)
+#define RCR_APP_BA_SSN BIT(27)
+#define RCR_NONQOS_VHT BIT(26)
+#define RCR_ENMBID BIT(24)
+#define RCR_LSIGEN BIT(23)
+#define RCR_MFBEN BIT(22)
+#define RCR_HTC_LOC_CTRL BIT(14)
+#define RCR_AMF BIT(13)
+#define RCR_ACF BIT(12)
+#define RCR_ADF BIT(11)
+#define RCR_AICV BIT(9)
+#define RCR_ACRC32 BIT(8)
+#define RCR_CBSSID_BCN BIT(7)
+#define RCR_CBSSID_DATA BIT(6)
+#define RCR_CBSSID RCR_CBSSID_DATA
+#define RCR_APWRMGT BIT(5)
+#define RCR_ADD3 BIT(4)
+#define RCR_AB BIT(3)
+#define RCR_AM BIT(2)
+#define RCR_APM BIT(1)
+#define RCR_AAP BIT(0)
+#define RCR_MXDMA_OFFSET 8
+#define RCR_FIFO_OFFSET 13
+
+#define RSV_CTRL 0x001C
+#define RD_CTRL 0x0524
+
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_USB_VID 0xFE60
+#define REG_USB_PID 0xFE62
+#define REG_USB_OPTIONAL 0xFE64
+#define REG_USB_CHIRP_K 0xFE65
+#define REG_USB_PHY 0xFE66
+#define REG_USB_MAC_ADDR 0xFE70
+#define REG_USB_HRPWM 0xFE58
+#define REG_USB_HCPWM 0xFE57
+
+#define SW18_FPWM BIT(3)
+
+#define ISO_MD2PP BIT(0)
+#define ISO_UA2USB BIT(1)
+#define ISO_UD2CORE BIT(2)
+#define ISO_PA2PCIE BIT(3)
+#define ISO_PD2CORE BIT(4)
+#define ISO_IP2MAC BIT(5)
+#define ISO_DIOP BIT(6)
+#define ISO_DIOE BIT(7)
+#define ISO_EB2CORE BIT(8)
+#define ISO_DIOR BIT(9)
+
+#define PWC_EV25V BIT(14)
+#define PWC_EV12V BIT(15)
+
+#define FEN_BBRSTB BIT(0)
+#define FEN_BB_GLB_RSTN BIT(1)
+#define FEN_USBA BIT(2)
+#define FEN_UPLL BIT(3)
+#define FEN_USBD BIT(4)
+#define FEN_DIO_PCIE BIT(5)
+#define FEN_PCIEA BIT(6)
+#define FEN_PPLL BIT(7)
+#define FEN_PCIED BIT(8)
+#define FEN_DIOE BIT(9)
+#define FEN_CPUEN BIT(10)
+#define FEN_DCORE BIT(11)
+#define FEN_ELDR BIT(12)
+#define FEN_DIO_RF BIT(13)
+#define FEN_HWPDN BIT(14)
+#define FEN_MREGEN BIT(15)
+
+#define PFM_LDALL BIT(0)
+#define PFM_ALDN BIT(1)
+#define PFM_LDKP BIT(2)
+#define PFM_WOWL BIT(3)
+#define EnPDN BIT(4)
+#define PDN_PL BIT(5)
+#define APFM_ONMAC BIT(8)
+#define APFM_OFF BIT(9)
+#define APFM_RSM BIT(10)
+#define AFSM_HSUS BIT(11)
+#define AFSM_PCIE BIT(12)
+#define APDM_MAC BIT(13)
+#define APDM_HOST BIT(14)
+#define APDM_HPDN BIT(15)
+#define RDY_MACON BIT(16)
+#define SUS_HOST BIT(17)
+#define ROP_ALD BIT(20)
+#define ROP_PWR BIT(21)
+#define ROP_SPS BIT(22)
+#define SOP_MRST BIT(25)
+#define SOP_FUSE BIT(26)
+#define SOP_ABG BIT(27)
+#define SOP_AMB BIT(28)
+#define SOP_RCK BIT(29)
+#define SOP_A8M BIT(30)
+#define XOP_BTCK BIT(31)
+
+#define ANAD16V_EN BIT(0)
+#define ANA8M BIT(1)
+#define MACSLP BIT(4)
+#define LOADER_CLK_EN BIT(5)
+#define _80M_SSC_DIS BIT(7)
+#define _80M_SSC_EN_HO BIT(8)
+#define PHY_SSC_RSTB BIT(9)
+#define SEC_CLK_EN BIT(10)
+#define MAC_CLK_EN BIT(11)
+#define SYS_CLK_EN BIT(12)
+#define RING_CLK_EN BIT(13)
+
+#define BOOT_FROM_EEPROM BIT(4)
+#define EEPROM_EN BIT(5)
+
+#define AFE_BGEN BIT(0)
+#define AFE_MBEN BIT(1)
+#define MAC_ID_EN BIT(7)
+
+#define WLOCK_ALL BIT(0)
+#define WLOCK_00 BIT(1)
+#define WLOCK_04 BIT(2)
+#define WLOCK_08 BIT(3)
+#define WLOCK_40 BIT(4)
+#define R_DIS_PRST_0 BIT(5)
+#define R_DIS_PRST_1 BIT(6)
+#define LOCK_ALL_EN BIT(7)
+
+#define RF_EN BIT(0)
+#define RF_RSTB BIT(1)
+#define RF_SDMRSTB BIT(2)
+
+#define LDA15_EN BIT(0)
+#define LDA15_STBY BIT(1)
+#define LDA15_OBUF BIT(2)
+#define LDA15_REG_VOS BIT(3)
+#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
+
+#define LDV12_EN BIT(0)
+#define LDV12_SDBY BIT(1)
+#define LPLDO_HSM BIT(2)
+#define LPLDO_LSM_DIS BIT(3)
+#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
+
+#define XTAL_EN BIT(0)
+#define XTAL_BSEL BIT(1)
+#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
+#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
+#define XTAL_GATE_USB BIT(8)
+#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
+#define XTAL_GATE_AFE BIT(11)
+#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
+#define XTAL_RF_GATE BIT(14)
+#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
+#define XTAL_GATE_DIG BIT(17)
+#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
+#define XTAL_BT_GATE BIT(20)
+#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
+#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
+
+#define CKDLY_AFE BIT(26)
+#define CKDLY_USB BIT(27)
+#define CKDLY_DIG BIT(28)
+#define CKDLY_BT BIT(29)
+
+#define APLL_EN BIT(0)
+#define APLL_320_EN BIT(1)
+#define APLL_FREF_SEL BIT(2)
+#define APLL_EDGE_SEL BIT(3)
+#define APLL_WDOGB BIT(4)
+#define APLL_LPFEN BIT(5)
+
+#define APLL_REF_CLK_13MHZ 0x1
+#define APLL_REF_CLK_19_2MHZ 0x2
+#define APLL_REF_CLK_20MHZ 0x3
+#define APLL_REF_CLK_25MHZ 0x4
+#define APLL_REF_CLK_26MHZ 0x5
+#define APLL_REF_CLK_38_4MHZ 0x6
+#define APLL_REF_CLK_40MHZ 0x7
+
+#define APLL_320EN BIT(14)
+#define APLL_80EN BIT(15)
+#define APLL_1MEN BIT(24)
+
+#define ALD_EN BIT(18)
+#define EF_PD BIT(19)
+#define EF_FLAG BIT(31)
+
+#define EF_TRPT BIT(7)
+#define LDOE25_EN BIT(31)
+
+#define RSM_EN BIT(0)
+#define Timer_EN BIT(4)
+
+#define TRSW0EN BIT(2)
+#define TRSW1EN BIT(3)
+#define EROM_EN BIT(4)
+#define EnBT BIT(5)
+#define EnUart BIT(8)
+#define Uart_910 BIT(9)
+#define EnPMAC BIT(10)
+#define SIC_SWRST BIT(11)
+#define EnSIC BIT(12)
+#define SIC_23 BIT(13)
+#define EnHDP BIT(14)
+#define SIC_LBK BIT(15)
+
+#define LED0PL BIT(4)
+#define LED1PL BIT(12)
+#define LED0DIS BIT(7)
+
+#define MCUFWDL_EN BIT(0)
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_CHKSUM_RPT BIT(2)
+#define MACINI_RDY BIT(3)
+#define BBINI_RDY BIT(4)
+#define RFINI_RDY BIT(5)
+#define WINTINI_RDY BIT(6)
+#define CPRST BIT(23)
+
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define TRP_B15V_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23)
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+#define CHIP_VER_RTL_MASK 0xF000
+#define CHIP_VER_RTL_SHIFT 12
+
+#define REG_LBMODE (REG_CR + 3)
+
+#define HCI_TXDMA_EN BIT(0)
+#define HCI_RXDMA_EN BIT(1)
+#define TXDMA_EN BIT(2)
+#define RXDMA_EN BIT(3)
+#define PROTOCOL_EN BIT(4)
+#define SCHEDULE_EN BIT(5)
+#define MACTXEN BIT(6)
+#define MACRXEN BIT(7)
+#define ENSWBCN BIT(8)
+#define ENSEC BIT(9)
+
+#define _NETTYPE(x) (((x) & 0x3) << 16)
+#define MASK_NETTYPE 0x30000
+#define NT_NO_LINK 0x0
+#define NT_LINK_AD_HOC 0x1
+#define NT_LINK_AP 0x2
+#define NT_AS_AP 0x3
+
+#define _LBMODE(x) (((x) & 0xF) << 24)
+#define MASK_LBMODE 0xF000000
+#define LOOPBACK_NORMAL 0x0
+#define LOOPBACK_IMMEDIATELY 0xB
+#define LOOPBACK_MAC_DELAY 0x3
+#define LOOPBACK_PHY 0x1
+#define LOOPBACK_DMA 0x7
+
+#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
+#define _PSRX_MASK 0xF
+#define _PSTX_MASK 0xF0
+#define _PSRX(x) (x)
+#define _PSTX(x) ((x) << 4)
+
+#define PBP_64 0x0
+#define PBP_128 0x1
+#define PBP_256 0x2
+#define PBP_512 0x3
+#define PBP_1024 0x4
+
+#define RXDMA_ARBBW_EN BIT(0)
+#define RXSHFT_EN BIT(1)
+#define RXDMA_AGG_EN BIT(2)
+#define QS_VO_QUEUE BIT(8)
+#define QS_VI_QUEUE BIT(9)
+#define QS_BE_QUEUE BIT(10)
+#define QS_BK_QUEUE BIT(11)
+#define QS_MANAGER_QUEUE BIT(12)
+#define QS_HIGH_QUEUE BIT(13)
+
+#define HQSEL_VOQ BIT(0)
+#define HQSEL_VIQ BIT(1)
+#define HQSEL_BEQ BIT(2)
+#define HQSEL_BKQ BIT(3)
+#define HQSEL_MGTQ BIT(4)
+#define HQSEL_HIQ BIT(5)
+
+#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8 )
+#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6 )
+#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4 )
+
+#define QUEUE_LOW 1
+#define QUEUE_NORMAL 2
+#define QUEUE_HIGH 3
+
+#define _LLT_NO_ACTIVE 0x0
+#define _LLT_WRITE_ACCESS 0x1
+#define _LLT_READ_ACCESS 0x2
+
+#define _LLT_INIT_DATA(x) ((x) & 0xFF)
+#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
+#define _LLT_OP(x) (((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
+
+#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
+#define BB_WRITE_EN BIT(30)
+#define BB_READ_EN BIT(31)
+
+#define _HPQ(x) ((x) & 0xFF)
+#define _LPQ(x) (((x) & 0xFF) << 8)
+#define _PUBQ(x) (((x) & 0xFF) << 16)
+#define _NPQ(x) ((x) & 0xFF)
+
+#define HPQ_PUBLIC_DIS BIT(24)
+#define LPQ_PUBLIC_DIS BIT(25)
+#define LD_RQPN BIT(31)
+
+#define BCN_VALID BIT(16)
+#define BCN_HEAD(x) (((x) & 0xFF) << 8)
+#define BCN_HEAD_MASK 0xFF00
+
+#define BLK_DESC_NUM_SHIFT 4
+#define BLK_DESC_NUM_MASK 0xF
+
+#define DROP_DATA_EN BIT(9)
+
+#define EN_AMPDU_RTY_NEW BIT(7)
+
+#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
+
+#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
+
+#define RATE_REG_BITMAP_ALL 0xFFFFF
+
+#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
+
+#define _RRSR_RSC(x) (((x) & 0x3) << 21)
+#define RRSR_RSC_RESERVED 0x0
+#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
+#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
+#define RRSR_RSC_DUPLICATE_MODE 0x3
+
+#define USE_SHORT_G1 BIT(20)
+
+#define _AGGLMT_MCS0(x) ((x) & 0xF)
+#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
+#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
+#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
+#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
+#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
+#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
+#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
+
+#define RETRY_LIMIT_SHORT_SHIFT 8
+#define RETRY_LIMIT_LONG_SHIFT 0
+
+#define _DARF_RC1(x) ((x) & 0x1F)
+#define _DARF_RC2(x) (((x) & 0x1F) << 8)
+#define _DARF_RC3(x) (((x) & 0x1F) << 16)
+#define _DARF_RC4(x) (((x) & 0x1F) << 24)
+#define _DARF_RC5(x) ((x) & 0x1F)
+#define _DARF_RC6(x) (((x) & 0x1F) << 8)
+#define _DARF_RC7(x) (((x) & 0x1F) << 16)
+#define _DARF_RC8(x) (((x) & 0x1F) << 24)
+
+#define _RARF_RC1(x) ((x) & 0x1F)
+#define _RARF_RC2(x) (((x) & 0x1F) << 8)
+#define _RARF_RC3(x) (((x) & 0x1F) << 16)
+#define _RARF_RC4(x) (((x) & 0x1F) << 24)
+#define _RARF_RC5(x) ((x) & 0x1F)
+#define _RARF_RC6(x) (((x) & 0x1F) << 8)
+#define _RARF_RC7(x) (((x) & 0x1F) << 16)
+#define _RARF_RC8(x) (((x) & 0x1F) << 24)
+
+#define AC_PARAM_TXOP_LIMIT_OFFSET 16
+#define AC_PARAM_ECW_MAX_OFFSET 12
+#define AC_PARAM_ECW_MIN_OFFSET 8
+#define AC_PARAM_AIFS_OFFSET 0
+
+#define _AIFS(x) (x)
+#define _ECW_MAX_MIN(x) ((x) << 8)
+#define _TXOP_LIMIT(x) ((x) << 16)
+
+#define _BCNIFS(x) ((x) & 0xFF)
+#define _BCNECW(x) ((((x) & 0xF))<< 8)
+
+#define _LRL(x) ((x) & 0x3F)
+#define _SRL(x) (((x) & 0x3F) << 8)
+
+#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
+#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8);
+
+#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
+#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8);
+
+#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
+
+#define DIS_EDCA_CNT_DWN BIT(11)
+
+#define EN_MBSSID BIT(1)
+#define EN_TXBCN_RPT BIT(2)
+#define EN_BCN_FUNCTION BIT(3)
+
+#define TSFTR_RST BIT(0)
+#define TSFTR1_RST BIT(1)
+
+#define STOP_BCNQ BIT(6)
+
+#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
+#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
+
+#define AcmHw_HwEn BIT(0)
+#define AcmHw_BeqEn BIT(1)
+#define AcmHw_ViqEn BIT(2)
+#define AcmHw_VoqEn BIT(3)
+#define AcmHw_BeqStatus BIT(4)
+#define AcmHw_ViqStatus BIT(5)
+#define AcmHw_VoqStatus BIT(6)
+
+#define APSDOFF BIT(6)
+#define APSDOFF_STATUS BIT(7)
+
+#define BW_20MHZ BIT(2)
+
+#define RATE_BITMAP_ALL 0xFFFFF
+
+#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
+
+#define TSFRST BIT(0)
+#define DIS_GCLK BIT(1)
+#define PAD_SEL BIT(2)
+#define PWR_ST BIT(6)
+#define PWRBIT_OW_EN BIT(7)
+#define ACRC BIT(8)
+#define CFENDFORM BIT(9)
+#define ICV BIT(10)
+
+#define AAP BIT(0)
+#define APM BIT(1)
+#define AM BIT(2)
+#define AB BIT(3)
+#define ADD3 BIT(4)
+#define APWRMGT BIT(5)
+#define CBSSID BIT(6)
+#define CBSSID_DATA BIT(6)
+#define CBSSID_BCN BIT(7)
+#define ACRC32 BIT(8)
+#define AICV BIT(9)
+#define ADF BIT(11)
+#define ACF BIT(12)
+#define AMF BIT(13)
+#define HTC_LOC_CTRL BIT(14)
+#define UC_DATA_EN BIT(16)
+#define BM_DATA_EN BIT(17)
+#define MFBEN BIT(22)
+#define LSIGEN BIT(23)
+#define EnMBID BIT(24)
+#define APP_BASSN BIT(27)
+#define APP_PHYSTS BIT(28)
+#define APP_ICV BIT(29)
+#define APP_MIC BIT(30)
+#define APP_FCS BIT(31)
+
+#define _MIN_SPACE(x) ((x) & 0x7)
+#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
+
+#define RXERR_TYPE_OFDM_PPDU 0
+#define RXERR_TYPE_OFDM_FALSE_ALARM 1
+#define RXERR_TYPE_OFDM_MPDU_OK 2
+#define RXERR_TYPE_OFDM_MPDU_FAIL 3
+#define RXERR_TYPE_CCK_PPDU 4
+#define RXERR_TYPE_CCK_FALSE_ALARM 5
+#define RXERR_TYPE_CCK_MPDU_OK 6
+#define RXERR_TYPE_CCK_MPDU_FAIL 7
+#define RXERR_TYPE_HT_PPDU 8
+#define RXERR_TYPE_HT_FALSE_ALARM 9
+#define RXERR_TYPE_HT_MPDU_TOTAL 10
+#define RXERR_TYPE_HT_MPDU_OK 11
+#define RXERR_TYPE_HT_MPDU_FAIL 12
+#define RXERR_TYPE_RX_FULL_DROP 15
+
+#define RXERR_COUNTER_MASK 0xFFFFF
+#define RXERR_RPT_RST BIT(27)
+#define _RXERR_RPT_SEL(type) ((type) << 28)
+
+#define SCR_TxUseDK BIT(0)
+#define SCR_RxUseDK BIT(1)
+#define SCR_TxEncEnable BIT(2)
+#define SCR_RxDecEnable BIT(3)
+#define SCR_SKByA2 BIT(4)
+#define SCR_NoSKMC BIT(5)
+#define SCR_TXBCUSEDK BIT(6)
+#define SCR_RXBCUSEDK BIT(7)
+
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define TRP_B15V_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define BT_FUNC BIT(16)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23)
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+#define USB_IS_HIGH_SPEED 0
+#define USB_IS_FULL_SPEED 1
+#define USB_SPEED_MASK BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK 0xF
+#define USB_NORMAL_SIE_EP_SHIFT 4
+
+#define USB_TEST_EP_MASK 0x30
+#define USB_TEST_EP_SHIFT 4
+
+#define USB_AGG_EN BIT(3)
+
+#define MAC_ADDR_LEN 6
+#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/
+
+#define POLLING_LLT_THRESHOLD 20
+#define POLLING_READY_TIMEOUT_COUNT 3000
+
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
+#define EPROM_CMD_CONFIG 0x3
+#define EPROM_CMD_LOAD 1
+
+#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE
+
+#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
+
+#define RA_LSSIWRITE_8821A 0xc90
+#define RB_LSSIWRITE_8821A 0xe90
+
+#define RA_PIREAD_8821A 0xd04
+#define RB_PIREAD_8821A 0xd44
+#define RA_SIREAD_8821A 0xd08
+#define RB_SIREAD_8821A 0xd48
+
+#define RPMAC_RESET 0x100
+#define RPMAC_TXSTART 0x104
+#define RPMAC_TXLEGACYSIG 0x108
+#define RPMAC_TXHTSIG1 0x10c
+#define RPMAC_TXHTSIG2 0x110
+#define RPMAC_PHYDEBUG 0x114
+#define RPMAC_TXPACKETNUM 0x118
+#define RPMAC_TXIDLE 0x11c
+#define RPMAC_TXMACHEADER0 0x120
+#define RPMAC_TXMACHEADER1 0x124
+#define RPMAC_TXMACHEADER2 0x128
+#define RPMAC_TXMACHEADER3 0x12c
+#define RPMAC_TXMACHEADER4 0x130
+#define RPMAC_TXMACHEADER5 0x134
+#define RPMAC_TXDADATYPE 0x138
+#define RPMAC_TXRANDOMSEED 0x13c
+#define RPMAC_CCKPLCPPREAMBLE 0x140
+#define RPMAC_CCKPLCPHEADER 0x144
+#define RPMAC_CCKCRC16 0x148
+#define RPMAC_OFDMRXCRC32OK 0x170
+#define RPMAC_OFDMRXCRC32Er 0x174
+#define RPMAC_OFDMRXPARITYER 0x178
+#define RPMAC_OFDMRXCRC8ER 0x17c
+#define RPMAC_CCKCRXRC16ER 0x180
+#define RPMAC_CCKCRXRC32ER 0x184
+#define RPMAC_CCKCRXRC32OK 0x188
+#define RPMAC_TXSTATUS 0x18c
+
+#define RFPGA0_RFMOD 0x800
+
+#define RFPGA0_TXINFO 0x804
+#define RFPGA0_PSDFUNCTION 0x808
+
+#define RFPGA0_TXGAINSTAGE 0x80c
+
+#define RFPGA0_RFTIMING1 0x810
+#define RFPGA0_RFTIMING2 0x814
+
+#define RFPGA0_XA_HSSIPARAMETER1 0x820
+#define RFPGA0_XA_HSSIPARAMETER2 0x824
+#define RFPGA0_XB_HSSIPARAMETER1 0x828
+#define RFPGA0_XB_HSSIPARAMETER2 0x82c
+#define RCCAONSEC 0x838
+
+#define RFPGA0_XA_LSSIPARAMETER 0x840
+#define RFPGA0_XB_LSSIPARAMETER 0x844
+#define RL1PEAKTH 0x848
+
+#define RFPGA0_RFWAKEUPPARAMETER 0x850
+#define RFPGA0_RFSLEEPUPPARAMETER 0x854
+
+#define RFPGA0_XAB_SWITCHCONTROL 0x858
+#define RFPGA0_XCD_SWITCHCONTROL 0x85c
+
+#define RFPGA0_XA_RFINTERFACEOE 0x860
+#define RFC_AREA 0x860
+#define RFPGA0_XB_RFINTERFACEOE 0x864
+
+#define RFPGA0_XAB_RFINTERFACESW 0x870
+#define RFPGA0_XCD_RFINTERFACESW 0x874
+
+#define rFPGA0_XAB_RFPARAMETER 0x878
+#define rFPGA0_XCD_RFPARAMETER 0x87c
+
+#define RFPGA0_ANALOGPARAMETER1 0x880
+#define RFPGA0_ANALOGPARAMETER2 0x884
+#define RFPGA0_ANALOGPARAMETER3 0x888
+#define RFPGA0_ANALOGPARAMETER4 0x88c
+
+#define RFPGA0_XA_LSSIREADBACK 0x8a0
+#define RFPGA0_XB_LSSIREADBACK 0x8a4
+#define RFPGA0_XC_LSSIREADBACK 0x8a8
+//#define RFPGA0_XD_LSSIREADBACK 0x8ac
+#define RRFMOD 0x8ac
+#define RHSSIREAD_8821AE 0x8b0
+
+#define RFPGA0_PSDREPORT 0x8b4
+#define TRANSCEIVEA_HSPI_READBACK 0x8b8
+#define TRANSCEIVEB_HSPI_READBACK 0x8bc
+//#define REG_SC_CNT 0x8c4
+#define RADC_BUF_CLK 0x8c4
+#define RFPGA0_XAB_RFINTERFACERB 0x8e0
+#define RFPGA0_XCD_RFINTERFACERB 0x8e4
+
+#define RFPGA1_RFMOD 0x900
+
+#define RFPGA1_TXBLOCK 0x904
+#define RFPGA1_DEBUGSELECT 0x908
+#define RFPGA1_TXINFO 0x90c
+
+#define RCCK_SYSTEM 0xa00
+#define BCCK_SYSTEM 0x10
+
+
+#define RCCK0_AFESETTING 0xa04
+#define RCCK0_CCA 0xa08
+
+#define RCCK0_RXAGC1 0xa0c
+#define RCCK0_RXAGC2 0xa10
+
+#define RCCK0_RXHP 0xa14
+
+#define RCCK0_DSPPARAMETER1 0xa18
+#define RCCK0_DSPPARAMETER2 0xa1c
+
+#define RCCK0_TXFILTER1 0xa20
+#define RCCK0_TXFILTER2 0xa24
+#define RCCK0_DEBUGPORT 0xa28
+#define RCCK0_FALSEALARMREPORT 0xa2c
+#define RCCK0_TRSSIREPORT 0xa50
+#define RCCK0_RXREPORT 0xa54
+#define RCCK0_FACOUNTERLOWER 0xa5c
+#define RCCK0_FACOUNTERUPPER 0xa58
+#define RCCK0_CCA_CNT 0xa60
+
+
+/* PageB(0xB00) */
+#define rPdp_AntA 0xb00
+#define rPdp_AntA_4 0xb04
+#define rPdp_AntA_8 0xb08
+#define rPdp_AntA_C 0xb0c
+#define rPdp_AntA_10 0xb10
+#define rPdp_AntA_14 0xb14
+#define rPdp_AntA_18 0xb18
+#define rPdp_AntA_1C 0xb1c
+#define rPdp_AntA_20 0xb20
+#define rPdp_AntA_24 0xb24
+
+#define rConfig_Pmpd_AntA 0xb28
+#define rConfig_ram64x16 0xb2c
+
+#define rBndA 0xb30
+#define rHssiPar 0xb34
+
+#define rConfig_AntA 0xb68
+#define rConfig_AntB 0xb6c
+
+#define rPdp_AntB 0xb70
+#define rPdp_AntB_4 0xb74
+#define rPdp_AntB_8 0xb78
+#define rPdp_AntB_C 0xb7c
+#define rPdp_AntB_10 0xb80
+#define rPdp_AntB_14 0xb84
+#define rPdp_AntB_18 0xb88
+#define rPdp_AntB_1C 0xb8c
+#define rPdp_AntB_20 0xb90
+#define rPdp_AntB_24 0xb94
+
+#define rConfig_Pmpd_AntB 0xb98
+
+#define rBndB 0xba0
+
+#define rAPK 0xbd8
+#define rPm_Rx0_AntA 0xbdc
+#define rPm_Rx1_AntA 0xbe0
+#define rPm_Rx2_AntA 0xbe4
+#define rPm_Rx3_AntA 0xbe8
+#define rPm_Rx0_AntB 0xbec
+#define rPm_Rx1_AntB 0xbf0
+#define rPm_Rx2_AntB 0xbf4
+#define rPm_Rx3_AntB 0xbf8
+
+/*RSSI Dump*/
+#define RA_RSSI_DUMP 0xBF0
+#define RB_RSSI_DUMP 0xBF1
+#define RS1_RX_EVM_DUMP 0xBF4
+#define RS2_RX_EVM_DUMP 0xBF5
+#define RA_RX_SNR_DUMP 0xBF6
+#define RB_RX_SNR_DUMP 0xBF7
+#define RA_CFO_SHORT_DUMP 0xBF8
+#define RB_CFO_SHORT_DUMP 0xBFA
+#define RA_CFO_LONG_DUMP 0xBEC
+#define RB_CFO_LONG_DUMP 0xBEE
+
+/*Page C*/
+#define ROFDM0_LSTF 0xc00
+
+#define ROFDM0_TRXPATHENABLE 0xc04
+#define ROFDM0_TRMUXPAR 0xc08
+#define ROFDM0_TRSWISOLATION 0xc0c
+
+#define ROFDM0_XARXAFE 0xc10
+#define ROFDM0_XARXIQIMBALANCE 0xc14
+#define ROFDM0_XBRXAFE 0xc18
+#define ROFDM0_XBRXIQIMBALANCE 0xc1c
+#define ROFDM0_XCRXAFE 0xc20
+#define ROFDM0_XCRXIQIMBANLANCE 0xc24
+#define ROFDM0_XDRXAFE 0xc28
+#define ROFDM0_XDRXIQIMBALANCE 0xc2c
+
+#define ROFDM0_RXDETECTOR1 0xc30
+#define ROFDM0_RXDETECTOR2 0xc34
+#define ROFDM0_RXDETECTOR3 0xc38
+#define ROFDM0_RXDETECTOR4 0xc3c
+
+#define ROFDM0_RXDSP 0xc40
+#define ROFDM0_CFOANDDAGC 0xc44
+#define ROFDM0_CCADROPTHRESHOLD 0xc48
+#define ROFDM0_ECCATHRESHOLD 0xc4c
+
+#define ROFDM0_XAAGCCORE1 0xc50
+#define ROFDM0_XAAGCCORE2 0xc54
+#define ROFDM0_XBAGCCORE1 0xc58
+#define ROFDM0_XBAGCCORE2 0xc5c
+#define ROFDM0_XCAGCCORE1 0xc60
+#define ROFDM0_XCAGCCORE2 0xc64
+#define ROFDM0_XDAGCCORE1 0xc68
+#define ROFDM0_XDAGCCORE2 0xc6c
+
+#define ROFDM0_AGCPARAMETER1 0xc70
+#define ROFDM0_AGCPARAMETER2 0xc74
+#define ROFDM0_AGCRSSITABLE 0xc78
+#define ROFDM0_HTSTFAGC 0xc7c
+
+#define ROFDM0_XATXIQIMBALANCE 0xc80
+#define ROFDM0_XATXAFE 0xc84
+#define ROFDM0_XBTXIQIMBALANCE 0xc88
+#define ROFDM0_XBTXAFE 0xc8c
+#define ROFDM0_XCTXIQIMBALANCE 0xc90
+#define ROFDM0_XCTXAFE 0xc94
+#define ROFDM0_XDTXIQIMBALANCE 0xc98
+#define ROFDM0_XDTXAFE 0xc9c
+
+#define ROFDM0_RXIQEXTANTA 0xca0
+#define ROFDM0_TXCOEFF1 0xca4
+#define ROFDM0_TXCOEFF2 0xca8
+#define ROFDM0_TXCOEFF3 0xcac
+#define ROFDM0_TXCOEFF4 0xcb0
+#define ROFDM0_TXCOEFF5 0xcb4
+#define ROFDM0_TXCOEFF6 0xcb8
+
+/*Path_A RFE control */
+#define RA_RFE_CTRL_8812 0xcb8
+/*Path_B RFE control*/
+#define RB_RFE_CTRL_8812 0xeb8
+
+#define ROFDM0_RXHPPARAMETER 0xce0
+#define ROFDM0_TXPSEUDONOISEWGT 0xce4
+#define ROFDM0_FRAMESYNC 0xcf0
+#define ROFDM0_DFSREPORT 0xcf4
+
+
+#define ROFDM1_LSTF 0xd00
+#define ROFDM1_TRXPATHENABLE 0xd04
+
+#define ROFDM1_CF0 0xd08
+#define ROFDM1_CSI1 0xd10
+#define ROFDM1_SBD 0xd14
+#define ROFDM1_CSI2 0xd18
+#define ROFDM1_CFOTRACKING 0xd2c
+#define ROFDM1_TRXMESAURE1 0xd34
+#define ROFDM1_INTFDET 0xd3c
+#define ROFDM1_PSEUDONOISESTATEAB 0xd50
+#define ROFDM1_PSEUDONOISESTATECD 0xd54
+#define ROFDM1_RXPSEUDONOISEWGT 0xd58
+
+#define ROFDM_PHYCOUNTER1 0xda0
+#define ROFDM_PHYCOUNTER2 0xda4
+#define ROFDM_PHYCOUNTER3 0xda8
+
+#define ROFDM_SHORTCFOAB 0xdac
+#define ROFDM_SHORTCFOCD 0xdb0
+#define ROFDM_LONGCFOAB 0xdb4
+#define ROFDM_LONGCFOCD 0xdb8
+#define ROFDM_TAILCF0AB 0xdbc
+#define ROFDM_TAILCF0CD 0xdc0
+#define ROFDM_PWMEASURE1 0xdc4
+#define ROFDM_PWMEASURE2 0xdc8
+#define ROFDM_BWREPORT 0xdcc
+#define ROFDM_AGCREPORT 0xdd0
+#define ROFDM_RXSNR 0xdd4
+#define ROFDM_RXEVMCSI 0xdd8
+#define ROFDM_SIGREPORT 0xddc
+
+#define RTXAGC_A_CCK11_CCK1 0xc20
+#define RTXAGC_A_OFDM18_OFDM6 0xc24
+#define RTXAGC_A_OFDM54_OFDM24 0xc28
+#define RTXAGC_A_MCS03_MCS00 0xc2c
+#define RTXAGC_A_MCS07_MCS04 0xc30
+#define RTXAGC_A_MCS11_MCS08 0xc34
+#define RTXAGC_A_MCS15_MCS12 0xc38
+#define RTXAGC_A_NSS1INDEX3_NSS1INDEX0 0xc3c
+#define RTXAGC_A_NSS1INDEX7_NSS1INDEX4 0xc40
+#define RTXAGC_A_NSS2INDEX1_NSS1INDEX8 0xc44
+#define RTXAGC_A_NSS2INDEX5_NSS2INDEX2 0xc48
+#define RTXAGC_A_NSS2INDEX9_NSS2INDEX6 0xc4c
+#define RTXAGC_B_CCK11_CCK1 0xe20
+#define RTXAGC_B_OFDM18_OFDM6 0xe24
+#define RTXAGC_B_OFDM54_OFDM24 0xe28
+#define RTXAGC_B_MCS03_MCS00 0xe2c
+#define RTXAGC_B_MCS07_MCS04 0xe30
+#define RTXAGC_B_MCS11_MCS08 0xe34
+#define RTXAGC_B_MCS15_MCS12 0xe38
+#define RTXAGC_B_NSS1INDEX3_NSS1INDEX0 0xe3c
+#define RTXAGC_B_NSS1INDEX7_NSS1INDEX4 0xe40
+#define RTXAGC_B_NSS2INDEX1_NSS1INDEX8 0xe44
+#define RTXAGC_B_NSS2INDEX5_NSS2INDEX2 0xe48
+#define RTXAGC_B_NSS2INDEX9_NSS2INDEX6 0xe4c
+
+#define RA_TXPWRTRAING 0xc54
+#define RB_TXPWRTRAING 0xe54
+
+
+#define RFPGA0_IQK 0xe28
+#define RTx_IQK_Tone_A 0xe30
+#define RRx_IQK_Tone_A 0xe34
+#define RTx_IQK_PI_A 0xe38
+#define RRx_IQK_PI_A 0xe3c
+
+#define RTx_IQK 0xe40
+#define RRx_IQK 0xe44
+#define RIQK_AGC_Pts 0xe48
+#define RIQK_AGC_Rsp 0xe4c
+#define RTx_IQK_Tone_B 0xe50
+#define RRx_IQK_Tone_B 0xe54
+#define RTx_IQK_PI_B 0xe58
+#define RRx_IQK_PI_B 0xe5c
+#define RIQK_AGC_Cont 0xe60
+
+#define RBlue_Tooth 0xe6c
+#define RRx_Wait_CCA 0xe70
+#define RTx_CCK_RFON 0xe74
+#define RTx_CCK_BBON 0xe78
+#define RTx_OFDM_RFON 0xe7c
+#define RTx_OFDM_BBON 0xe80
+#define RTx_To_Rx 0xe84
+#define RTx_To_Tx 0xe88
+#define RRx_CCK 0xe8c
+
+#define RTx_Power_Before_IQK_A 0xe94
+#define RTx_Power_After_IQK_A 0xe9c
+
+#define RRx_Power_Before_IQK_A 0xea0
+#define RRx_Power_Before_IQK_A_2 0xea4
+#define RRx_Power_After_IQK_A 0xea8
+#define RRx_Power_After_IQK_A_2 0xeac
+
+#define RTx_Power_Before_IQK_B 0xeb4
+#define RTx_Power_After_IQK_B 0xebc
+
+#define RRx_Power_Before_IQK_B 0xec0
+#define RRx_Power_Before_IQK_B_2 0xec4
+#define RRx_Power_After_IQK_B 0xec8
+#define RRx_Power_After_IQK_B_2 0xecc
+
+#define RRx_OFDM 0xed0
+#define RRx_Wait_RIFS 0xed4
+#define RRx_TO_Rx 0xed8
+#define RStandby 0xedc
+#define RSleep 0xee0
+#define RPMPD_ANAEN 0xeec
+
+#define RZEBRA1_HSSIENABLE 0x0
+#define RZEBRA1_TRXENABLE1 0x1
+#define RZEBRA1_TRXENABLE2 0x2
+#define RZEBRA1_AGC 0x4
+#define RZEBRA1_CHARGEPUMP 0x5
+#define RZEBRA1_CHANNEL 0x7
+
+#define RZEBRA1_TXGAIN 0x8
+#define RZEBRA1_TXLPF 0x9
+#define RZEBRA1_RXLPF 0xb
+#define RZEBRA1_RXHPFCORNER 0xc
+
+#define RGLOBALCTRL 0
+#define RRTL8256_TXLPF 19
+#define RRTL8256_RXLPF 11
+#define RRTL8258_TXLPF 0x11
+#define RRTL8258_RXLPF 0x13
+#define RRTL8258_RSSILPF 0xa
+
+#define RF_AC 0x00
+
+#define RF_IQADJ_G1 0x01
+#define RF_IQADJ_G2 0x02
+#define RF_POW_TRSW 0x05
+
+#define RF_GAIN_RX 0x06
+#define RF_GAIN_TX 0x07
+
+#define RF_TXM_IDAC 0x08
+#define RF_BS_IQGEN 0x0F
+
+#define RF_MODE1 0x10
+#define RF_MODE2 0x11
+
+#define RF_RX_AGC_HP 0x12
+#define RF_TX_AGC 0x13
+#define RF_BIAS 0x14
+#define RF_IPA 0x15
+#define RF_POW_ABILITY 0x17
+#define RF_MODE_AG 0x18
+#define RRFCHANNEL 0x18
+#define RF_CHNLBW 0x18
+#define RF_TOP 0x19
+
+#define RF_RX_G1 0x1A
+#define RF_RX_G2 0x1B
+
+#define RF_RX_BB2 0x1C
+#define RF_RX_BB1 0x1D
+
+#define RF_RCK1 0x1E
+#define RF_RCK2 0x1F
+
+#define RF_TX_G1 0x20
+#define RF_TX_G2 0x21
+#define RF_TX_G3 0x22
+
+#define RF_TX_BB1 0x23
+#define RF_T_METER 0x24
+#define RF_T_METER_88E 0x42
+#define RF_T_METER_8812A 0x42
+
+#define RF_SYN_G1 0x25
+#define RF_SYN_G2 0x26
+#define RF_SYN_G3 0x27
+#define RF_SYN_G4 0x28
+#define RF_SYN_G5 0x29
+#define RF_SYN_G6 0x2A
+#define RF_SYN_G7 0x2B
+#define RF_SYN_G8 0x2C
+
+#define RF_RCK_OS 0x30
+#define RF_TXPA_G1 0x31
+#define RF_TXPA_G2 0x32
+#define RF_TXPA_G3 0x33
+
+#define RF_TX_BIAS_A 0x35
+#define RF_TX_BIAS_D 0x36
+#define RF_LOBF_9 0x38
+#define RF_RXRF_A3 0x3C
+#define RF_TRSW 0x3F
+
+#define RF_TXRF_A2 0x41
+#define RF_TXPA_G4 0x46
+#define RF_TXPA_A4 0x4B
+
+#define RF_APK 0x63
+
+#define RF_WE_LUT 0xEF
+
+#define BBBRESETB 0x100
+#define BGLOBALRESETB 0x200
+#define BOFDMTXSTART 0x4
+#define BCCKTXSTART 0x8
+#define BCRC32DEBUG 0x100
+#define BPMACLOOPBACK 0x10
+#define BTXLSIG 0xffffff
+#define BOFDMTXRATE 0xf
+#define BOFDMTXRESERVED 0x10
+#define BOFDMTXLENGTH 0x1ffe0
+#define BOFDMTXPARITY 0x20000
+#define BTXHTSIG1 0xffffff
+#define BTXHTMCSRATE 0x7f
+#define BTXHTBW 0x80
+#define BTXHTLENGTH 0xffff00
+#define BTXHTSIG2 0xffffff
+#define BTXHTSMOOTHING 0x1
+#define BTXHTSOUNDING 0x2
+#define BTXHTRESERVED 0x4
+#define BTXHTAGGREATION 0x8
+#define BTXHTSTBC 0x30
+#define BTXHTADVANCECODING 0x40
+#define BTXHTSHORTGI 0x80
+#define BTXHTNUMBERHT_LTF 0x300
+#define BTXHTCRC8 0x3fc00
+#define BCOUNTERRESET 0x10000
+#define BNUMOFOFDMTX 0xffff
+#define BNUMOFCCKTX 0xffff0000
+#define BTXIDLEINTERVAL 0xffff
+#define BOFDMSERVICE 0xffff0000
+#define BTXMACHEADER 0xffffffff
+#define BTXDATAINIT 0xff
+#define BTXHTMODE 0x100
+#define BTXDATATYPE 0x30000
+#define BTXRANDOMSEED 0xffffffff
+#define BCCKTXPREAMBLE 0x1
+#define BCCKTXSFD 0xffff0000
+#define BCCKTXSIG 0xff
+#define BCCKTXSERVICE 0xff00
+#define BCCKLENGTHEXT 0x8000
+#define BCCKTXLENGHT 0xffff0000
+#define BCCKTXCRC16 0xffff
+#define BCCKTXSTATUS 0x1
+#define BOFDMTXSTATUS 0x2
+#define IS_BB_REG_OFFSET_92S(_Offset) \
+ ((_Offset >= 0x800) && (_Offset <= 0xfff))
+
+#define BRFMOD 0x1
+#define BJAPANMODE 0x2
+#define BCCKTXSC 0x30
+/* Block & Path enable*/
+#define ROFDMCCKEN 0x808
+#define BCCKEN 0x10000000
+#define BOFDMEN 0x20000000
+#define RRXPATH 0x808 /* Rx antenna*/
+#define BRXPATH 0xff
+#define RTXPATH 0x80c /* Tx antenna*/
+#define BTXPATH 0x0fffffff
+#define RCCK_RX 0xa04 /* for cck rx path selection*/
+#define BCCK_RX 0x0c000000
+#define RVHTLEN_USE_LSIG 0x8c3 /* Use LSIG for VHT length*/
+
+
+#define BOFDMRXADCPHASE 0x10000
+#define BOFDMTXDACPHASE 0x40000
+#define BXATXAGC 0x3f
+
+#define BXBTXAGC 0xf00
+#define BXCTXAGC 0xf000
+#define BXDTXAGC 0xf0000
+
+#define BPASTART 0xf0000000
+#define BTRSTART 0x00f00000
+#define BRFSTART 0x0000f000
+#define BBBSTART 0x000000f0
+#define BBBCCKSTART 0x0000000f
+#define BPAEND 0xf
+#define BTREND 0x0f000000
+#define BRFEND 0x000f0000
+#define BCCAMASK 0x000000f0
+#define BR2RCCAMASK 0x00000f00
+#define BHSSI_R2TDELAY 0xf8000000
+#define BHSSI_T2RDELAY 0xf80000
+#define BCONTXHSSI 0x400
+#define BIGFROMCCK 0x200
+#define BAGCADDRESS 0x3f
+#define BRXHPTX 0x7000
+#define BRXHP2RX 0x38000
+#define BRXHPCCKINI 0xc0000
+#define BAGCTXCODE 0xc00000
+#define BAGCRXCODE 0x300000
+
+#define B3WIREDATALENGTH 0x800
+#define B3WIREADDREAALENGTH 0x400
+
+#define B3WIRERFPOWERDOWN 0x1
+#define B5GPAPEPOLARITY 0x40000000
+#define B2GPAPEPOLARITY 0x80000000
+#define BRFSW_TXDEFAULTANT 0x3
+#define BRFSW_TXOPTIONANT 0x30
+#define BRFSW_RXDEFAULTANT 0x300
+#define BRFSW_RXOPTIONANT 0x3000
+#define BRFSI_3WIREDATA 0x1
+#define BRFSI_3WIRECLOCK 0x2
+#define BRFSI_3WIRELOAD 0x4
+#define BRFSI_3WIRERW 0x8
+#define BRFSI_3WIRE 0xf
+
+#define BRFSI_RFENV 0x10
+
+#define BRFSI_TRSW 0x20
+#define BRFSI_TRSWB 0x40
+#define BRFSI_ANTSW 0x100
+#define BRFSI_ANTSWB 0x200
+#define BRFSI_PAPE 0x400
+#define BRFSI_PAPE5G 0x800
+#define BBANDSELECT 0x1
+#define BHTSIG2_GI 0x80
+#define BHTSIG2_SMOOTHING 0x01
+#define BHTSIG2_SOUNDING 0x02
+#define BHTSIG2_AGGREATON 0x08
+#define BHTSIG2_STBC 0x30
+#define BHTSIG2_ADVCODING 0x40
+#define BHTSIG2_NUMOFHTLTF 0x300
+#define BHTSIG2_CRC8 0x3fc
+#define BHTSIG1_MCS 0x7f
+#define BHTSIG1_BANDWIDTH 0x80
+#define BHTSIG1_HTLENGTH 0xffff
+#define BLSIG_RATE 0xf
+#define BLSIG_RESERVED 0x10
+#define BLSIG_LENGTH 0x1fffe
+#define BLSIG_PARITY 0x20
+#define BCCKRXPHASE 0x4
+
+#define BLSSIREADADDRESS 0x7f800000
+#define BLSSIREADEDGE 0x80000000
+
+#define BLSSIREADBACKDATA 0xfffff
+
+#define BLSSIREADOKFLAG 0x1000
+#define BCCKSAMPLERATE 0x8
+#define BREGULATOR0STANDBY 0x1
+#define BREGULATORPLLSTANDBY 0x2
+#define BREGULATOR1STANDBY 0x4
+#define BPLLPOWERUP 0x8
+#define BDPLLPOWERUP 0x10
+#define BDA10POWERUP 0x20
+#define BAD7POWERUP 0x200
+#define BDA6POWERUP 0x2000
+#define BXTALPOWERUP 0x4000
+#define B40MDCLKPOWERUP 0x8000
+#define BDA6DEBUGMODE 0x20000
+#define BDA6SWING 0x380000
+
+#define BADCLKPHASE 0x4000000
+#define B80MCLKDELAY 0x18000000
+#define BAFEWATCHDOGENABLE 0x20000000
+
+#define BXTALCAP01 0xc0000000
+#define BXTALCAP23 0x3
+#define BXTALCAP92X 0x0f000000
+#define BXTALCAP 0x0f000000
+
+#define BINTDIFCLKENABLE 0x400
+#define BEXTSIGCLKENABLE 0x800
+#define BBANDGAP_MBIAS_POWERUP 0x10000
+#define BAD11SH_GAIN 0xc0000
+#define BAD11NPUT_RANGE 0x700000
+#define BAD110P_CURRENT 0x3800000
+#define BLPATH_LOOPBACK 0x4000000
+#define BQPATH_LOOPBACK 0x8000000
+#define BAFE_LOOPBACK 0x10000000
+#define BDA10_SWING 0x7e0
+#define BDA10_REVERSE 0x800
+#define BDA_CLK_SOURCE 0x1000
+#define BDA7INPUT_RANGE 0x6000
+#define BDA7_GAIN 0x38000
+#define BDA7OUTPUT_CM_MODE 0x40000
+#define BDA7INPUT_CM_MODE 0x380000
+#define BDA7CURRENT 0xc00000
+#define BREGULATOR_ADJUST 0x7000000
+#define BAD11POWERUP_ATTX 0x1
+#define BDA10PS_ATTX 0x10
+#define BAD11POWERUP_ATRX 0x100
+#define BDA10PS_ATRX 0x1000
+#define BCCKRX_AGC_FORMAT 0x200
+#define BPSDFFT_SAMPLE_POINT 0xc000
+#define BPSD_AVERAGE_NUM 0x3000
+#define BIQPATH_CONTROL 0xc00
+#define BPSD_FREQ 0x3ff
+#define BPSD_ANTENNA_PATH 0x30
+#define BPSD_IQ_SWITCH 0x40
+#define BPSD_RX_TRIGGER 0x400000
+#define BPSD_TX_TRIGGER 0x80000000
+#define BPSD_SINE_TONE_SCALE 0x7f000000
+#define BPSD_REPORT 0xffff
+
+#define BOFDM_TXSC 0x30000000
+#define BCCK_TXON 0x1
+#define BOFDM_TXON 0x2
+#define BDEBUG_PAGE 0xfff
+#define BDEBUG_ITEM 0xff
+#define BANTL 0x10
+#define BANT_NONHT 0x100
+#define BANT_HT1 0x1000
+#define BANT_HT2 0x10000
+#define BANT_HT1S1 0x100000
+#define BANT_NONHTS1 0x1000000
+
+#define BCCK_BBMODE 0x3
+#define BCCK_TXPOWERSAVING 0x80
+#define BCCK_RXPOWERSAVING 0x40
+
+#define BCCK_SIDEBAND 0x10
+
+#define BCCK_SCRAMBLE 0x8
+#define BCCK_ANTDIVERSITY 0x8000
+#define BCCK_CARRIER_RECOVERY 0x4000
+#define BCCK_TXRATE 0x3000
+#define BCCK_DCCANCEL 0x0800
+#define BCCK_ISICANCEL 0x0400
+#define BCCK_MATCH_FILTER 0x0200
+#define BCCK_EQUALIZER 0x0100
+#define BCCK_PREAMBLE_DETECT 0x800000
+#define BCCK_FAST_FALSECCA 0x400000
+#define BCCK_CH_ESTSTART 0x300000
+#define BCCK_CCA_COUNT 0x080000
+#define BCCK_CS_LIM 0x070000
+#define BCCK_BIST_MODE 0x80000000
+#define BCCK_CCAMASK 0x40000000
+#define BCCK_TX_DAC_PHASE 0x4
+#define BCCK_RX_ADC_PHASE 0x20000000
+#define BCCKR_CP_MODE 0x0100
+#define BCCK_TXDC_OFFSET 0xf0
+#define BCCK_RXDC_OFFSET 0xf
+#define BCCK_CCA_MODE 0xc000
+#define BCCK_FALSECS_LIM 0x3f00
+#define BCCK_CS_RATIO 0xc00000
+#define BCCK_CORGBIT_SEL 0x300000
+#define BCCK_PD_LIM 0x0f0000
+#define BCCK_NEWCCA 0x80000000
+#define BCCK_RXHP_OF_IG 0x8000
+#define BCCK_RXIG 0x7f00
+#define BCCK_LNA_POLARITY 0x800000
+#define BCCK_RX1ST_BAIN 0x7f0000
+#define BCCK_RF_EXTEND 0x20000000
+#define BCCK_RXAGC_SATLEVEL 0x1f000000
+#define BCCK_RXAGC_SATCOUNT 0xe0
+#define bCCKRxRFSettle 0x1f
+#define BCCK_FIXED_RXAGC 0x8000
+#define BCCK_ANTENNA_POLARITY 0x2000
+#define BCCK_TXFILTER_TYPE 0x0c00
+#define BCCK_RXAGC_REPORTTYPE 0x0300
+#define BCCK_RXDAGC_EN 0x80000000
+#define BCCK_RXDAGC_PERIOD 0x20000000
+#define BCCK_RXDAGC_SATLEVEL 0x1f000000
+#define BCCK_TIMING_RECOVERY 0x800000
+#define BCCK_TXC0 0x3f0000
+#define BCCK_TXC1 0x3f000000
+#define BCCK_TXC2 0x3f
+#define BCCK_TXC3 0x3f00
+#define BCCK_TXC4 0x3f0000
+#define BCCK_TXC5 0x3f000000
+#define BCCK_TXC6 0x3f
+#define BCCK_TXC7 0x3f00
+#define BCCK_DEBUGPORT 0xff0000
+#define BCCK_DAC_DEBUG 0x0f000000
+#define BCCK_FALSEALARM_ENABLE 0x8000
+#define BCCK_FALSEALARM_READ 0x4000
+#define BCCK_TRSSI 0x7f
+#define BCCK_RXAGC_REPORT 0xfe
+#define BCCK_RXREPORT_ANTSEL 0x80000000
+#define BCCK_RXREPORT_MFOFF 0x40000000
+#define BCCK_RXREPORT_SQLOSS 0x20000000
+#define BCCK_RXREPORT_PKTLOSS 0x10000000
+#define BCCK_RXREPORT_LOCKEDBIT 0x08000000
+#define BCCK_RXREPORT_RATEERROR 0x04000000
+#define BCCK_RXREPORT_RXRATE 0x03000000
+#define BCCK_RXFA_COUNTER_LOWER 0xff
+#define BCCK_RXFA_COUNTER_UPPER 0xff000000
+#define BCCK_RXHPAGC_START 0xe000
+#define BCCK_RXHPAGC_FINAL 0x1c00
+#define BCCK_RXFALSEALARM_ENABLE 0x8000
+#define BCCK_FACOUNTER_FREEZE 0x4000
+#define BCCK_TXPATH_SEL 0x10000000
+#define BCCK_DEFAULT_RXPATH 0xc000000
+#define BCCK_OPTION_RXPATH 0x3000000
+
+#define BNUM_OFSTF 0x3
+#define BSHIFT_L 0xc0
+#define BGI_TH 0xc
+#define BRXPATH_A 0x1
+#define BRXPATH_B 0x2
+#define BRXPATH_C 0x4
+#define BRXPATH_D 0x8
+#define BTXPATH_A 0x1
+#define BTXPATH_B 0x2
+#define BTXPATH_C 0x4
+#define BTXPATH_D 0x8
+#define BTRSSI_FREQ 0x200
+#define BADC_BACKOFF 0x3000
+#define BDFIR_BACKOFF 0xc000
+#define BTRSSI_LATCH_PHASE 0x10000
+#define BRX_LDC_OFFSET 0xff
+#define BRX_QDC_OFFSET 0xff00
+#define BRX_DFIR_MODE 0x1800000
+#define BRX_DCNF_TYPE 0xe000000
+#define BRXIQIMB_A 0x3ff
+#define BRXIQIMB_B 0xfc00
+#define BRXIQIMB_C 0x3f0000
+#define BRXIQIMB_D 0xffc00000
+#define BDC_DC_NOTCH 0x60000
+#define BRXNB_NOTCH 0x1f000000
+#define BPD_TH 0xf
+#define BPD_TH_OPT2 0xc000
+#define BPWED_TH 0x700
+#define BIFMF_WIN_L 0x800
+#define BPD_OPTION 0x1000
+#define BMF_WIN_L 0xe000
+#define BBW_SEARCH_L 0x30000
+#define BWIN_ENH_L 0xc0000
+#define BBW_TH 0x700000
+#define BED_TH2 0x3800000
+#define BBW_OPTION 0x4000000
+#define BRADIO_TH 0x18000000
+#define BWINDOW_L 0xe0000000
+#define BSBD_OPTION 0x1
+#define BFRAME_TH 0x1c
+#define BFS_OPTION 0x60
+#define BDC_SLOPE_CHECK 0x80
+#define BFGUARD_COUNTER_DC_L 0xe00
+#define BFRAME_WEIGHT_SHORT 0x7000
+#define BSUB_TUNE 0xe00000
+#define BFRAME_DC_LENGTH 0xe000000
+#define BSBD_START_OFFSET 0x30000000
+#define BFRAME_TH_2 0x7
+#define BFRAME_GI2_TH 0x38
+#define BGI2_SYNC_EN 0x40
+#define BSARCH_SHORT_EARLY 0x300
+#define BSARCH_SHORT_LATE 0xc00
+#define BSARCH_GI2_LATE 0x70000
+#define BCFOANTSUM 0x1
+#define BCFOACC 0x2
+#define BCFOSTARTOFFSET 0xc
+#define BCFOLOOPBACK 0x70
+#define BCFOSUMWEIGHT 0x80
+#define BDAGCENABLE 0x10000
+#define BTXIQIMB_A 0x3ff
+#define BTXIQIMB_b 0xfc00
+#define BTXIQIMB_C 0x3f0000
+#define BTXIQIMB_D 0xffc00000
+#define BTXIDCOFFSET 0xff
+#define BTXIQDCOFFSET 0xff00
+#define BTXDFIRMODE 0x10000
+#define BTXPESUDO_NOISEON 0x4000000
+#define BTXPESUDO_NOISE_A 0xff
+#define BTXPESUDO_NOISE_B 0xff00
+#define BTXPESUDO_NOISE_C 0xff0000
+#define BTXPESUDO_NOISE_D 0xff000000
+#define BCCA_DROPOPTION 0x20000
+#define BCCA_DROPTHRES 0xfff00000
+#define BEDCCA_H 0xf
+#define BEDCCA_L 0xf0
+#define BLAMBDA_ED 0x300
+#define BRX_INITIALGAIN 0x7f
+#define BRX_ANTDIV_EN 0x80
+#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00
+#define BRX_HIGHPOWER_FLOW 0x8000
+#define BRX_AGC_FREEZE_THRES 0xc0000
+#define BRX_FREEZESTEP_AGC1 0x300000
+#define BRX_FREEZESTEP_AGC2 0xc00000
+#define BRX_FREEZESTEP_AGC3 0x3000000
+#define BRX_FREEZESTEP_AGC0 0xc000000
+#define BRXRSSI_CMP_EN 0x10000000
+#define BRXQUICK_AGCEN 0x20000000
+#define BRXAGC_FREEZE_THRES_MODE 0x40000000
+#define BRX_OVERFLOW_CHECKTYPE 0x80000000
+#define BRX_AGCSHIFT 0x7f
+#define BTRSW_TRI_ONLY 0x80
+#define BPOWER_THRES 0x300
+#define BRXAGC_EN 0x1
+#define BRXAGC_TOGETHER_EN 0x2
+#define BRXAGC_MIN 0x4
+#define BRXHP_INI 0x7
+#define BRXHP_TRLNA 0x70
+#define BRXHP_RSSI 0x700
+#define BRXHP_BBP1 0x7000
+#define BRXHP_BBP2 0x70000
+#define BRXHP_BBP3 0x700000
+#define BRSSI_H 0x7f0000
+#define BRSSI_GEN 0x7f000000
+#define BRXSETTLE_TRSW 0x7
+#define BRXSETTLE_LNA 0x38
+#define BRXSETTLE_RSSI 0x1c0
+#define BRXSETTLE_BBP 0xe00
+#define BRXSETTLE_RXHP 0x7000
+#define BRXSETTLE_ANTSW_RSSI 0x38000
+#define BRXSETTLE_ANTSW 0xc0000
+#define BRXPROCESS_TIME_DAGC 0x300000
+#define BRXSETTLE_HSSI 0x400000
+#define BRXPROCESS_TIME_BBPPW 0x800000
+#define BRXANTENNA_POWER_SHIFT 0x3000000
+#define BRSSI_TABLE_SELECT 0xc000000
+#define BRXHP_FINAL 0x7000000
+#define BRXHPSETTLE_BBP 0x7
+#define BRXHTSETTLE_HSSI 0x8
+#define BRXHTSETTLE_RXHP 0x70
+#define BRXHTSETTLE_BBPPW 0x80
+#define BRXHTSETTLE_IDLE 0x300
+#define BRXHTSETTLE_RESERVED 0x1c00
+#define BRXHT_RXHP_EN 0x8000
+#define BRXAGC_FREEZE_THRES 0x30000
+#define BRXAGC_TOGETHEREN 0x40000
+#define BRXHTAGC_MIN 0x80000
+#define BRXHTAGC_EN 0x100000
+#define BRXHTDAGC_EN 0x200000
+#define BRXHT_RXHP_BBP 0x1c00000
+#define BRXHT_RXHP_FINAL 0xe0000000
+#define BRXPW_RADIO_TH 0x3
+#define BRXPW_RADIO_EN 0x4
+#define BRXMF_HOLD 0x3800
+#define BRXPD_DELAY_TH1 0x38
+#define BRXPD_DELAY_TH2 0x1c0
+#define BRXPD_DC_COUNT_MAX 0x600
+#define BRXPD_DELAY_TH 0x8000
+#define BRXPROCESS_DELAY 0xf0000
+#define BRXSEARCHRANGE_GI2_EARLY 0x700000
+#define BRXFRAME_FUARD_COUNTER_L 0x3800000
+#define BRXSGI_GUARD_L 0xc000000
+#define BRXSGI_SEARCH_L 0x30000000
+#define BRXSGI_TH 0xc0000000
+#define BDFSCNT0 0xff
+#define BDFSCNT1 0xff00
+#define BDFSFLAG 0xf0000
+#define BMF_WEIGHT_SUM 0x300000
+#define BMINIDX_TH 0x7f000000
+#define BDAFORMAT 0x40000
+#define BTXCH_EMU_ENABLE 0x01000000
+#define BTRSW_ISOLATION_A 0x7f
+#define BTRSW_ISOLATION_B 0x7f00
+#define BTRSW_ISOLATION_C 0x7f0000
+#define BTRSW_ISOLATION_D 0x7f000000
+#define BEXT_LNA_GAIN 0x7c00
+
+#define BSTBC_EN 0x4
+#define BANTENNA_MAPPING 0x10
+#define BNSS 0x20
+#define BCFO_ANTSUM_ID 0x200
+#define BPHY_COUNTER_RESET 0x8000000
+#define BCFO_REPORT_GET 0x4000000
+#define BOFDM_CONTINUE_TX 0x10000000
+#define BOFDM_SINGLE_CARRIER 0x20000000
+#define BOFDM_SINGLE_TONE 0x40000000
+#define BHT_DETECT 0x100
+#define BCFOEN 0x10000
+#define BCFOVALUE 0xfff00000
+#define BSIGTONE_RE 0x3f
+#define BSIGTONE_IM 0x7f00
+#define BCOUNTER_CCA 0xffff
+#define BCOUNTER_PARITYFAIL 0xffff0000
+#define BCOUNTER_RATEILLEGAL 0xffff
+#define BCOUNTER_CRC8FAIL 0xffff0000
+#define BCOUNTER_MCSNOSUPPORT 0xffff
+#define BCOUNTER_FASTSYNC 0xffff
+#define BSHORTCFO 0xfff
+#define BSHORTCFOT_LENGTH 12
+#define BSHORTCFOF_LENGTH 11
+#define BLONGCFO 0x7ff
+#define BLONGCFOT_LENGTH 11
+#define BLONGCFOF_LENGTH 11
+#define BTAILCFO 0x1fff
+#define BTAILCFOT_LENGTH 13
+#define BTAILCFOF_LENGTH 12
+#define BNOISE_EN_PWDB 0xffff
+#define BCC_POWER_DB 0xffff0000
+#define BMOISE_PWDB 0xffff
+#define BPOWERMEAST_LENGTH 10
+#define BPOWERMEASF_LENGTH 3
+#define BRX_HT_BW 0x1
+#define BRXSC 0x6
+#define BRX_HT 0x8
+#define BNB_INTF_DET_ON 0x1
+#define BINTF_WIN_LEN_CFG 0x30
+#define BNB_INTF_TH_CFG 0x1c0
+#define BRFGAIN 0x3f
+#define BTABLESEL 0x40
+#define BTRSW 0x80
+#define BRXSNR_A 0xff
+#define BRXSNR_B 0xff00
+#define BRXSNR_C 0xff0000
+#define BRXSNR_D 0xff000000
+#define BSNR_EVMT_LENGTH 8
+#define BSNR_EVMF_LENGTH 1
+#define BCSI1ST 0xff
+#define BCSI2ND 0xff00
+#define BRXEVM1ST 0xff0000
+#define BRXEVM2ND 0xff000000
+#define BSIGEVM 0xff
+#define BPWDB 0xff00
+#define BSGIEN 0x10000
+
+#define BSFACTOR_QMA1 0xf
+#define BSFACTOR_QMA2 0xf0
+#define BSFACTOR_QMA3 0xf00
+#define BSFACTOR_QMA4 0xf000
+#define BSFACTOR_QMA5 0xf0000
+#define BSFACTOR_QMA6 0xf0000
+#define BSFACTOR_QMA7 0xf00000
+#define BSFACTOR_QMA8 0xf000000
+#define BSFACTOR_QMA9 0xf0000000
+#define BCSI_SCHEME 0x100000
+
+#define BNOISE_LVL_TOP_SET 0x3
+#define BCHSMOOTH 0x4
+#define BCHSMOOTH_CFG1 0x38
+#define BCHSMOOTH_CFG2 0x1c0
+#define BCHSMOOTH_CFG3 0xe00
+#define BCHSMOOTH_CFG4 0x7000
+#define BMRCMODE 0x800000
+#define BTHEVMCFG 0x7000000
+
+#define BLOOP_FIT_TYPE 0x1
+#define BUPD_CFO 0x40
+#define BUPD_CFO_OFFDATA 0x80
+#define BADV_UPD_CFO 0x100
+#define BADV_TIME_CTRL 0x800
+#define BUPD_CLKO 0x1000
+#define BFC 0x6000
+#define BTRACKING_MODE 0x8000
+#define BPHCMP_ENABLE 0x10000
+#define BUPD_CLKO_LTF 0x20000
+#define BCOM_CH_CFO 0x40000
+#define BCSI_ESTI_MODE 0x80000
+#define BADV_UPD_EQZ 0x100000
+#define BUCHCFG 0x7000000
+#define BUPDEQZ 0x8000000
+
+#define BRX_PESUDO_NOISE_ON 0x20000000
+#define BRX_PESUDO_NOISE_A 0xff
+#define BRX_PESUDO_NOISE_B 0xff00
+#define BRX_PESUDO_NOISE_C 0xff0000
+#define BRX_PESUDO_NOISE_D 0xff000000
+#define BRX_PESUDO_NOISESTATE_A 0xffff
+#define BRX_PESUDO_NOISESTATE_B 0xffff0000
+#define BRX_PESUDO_NOISESTATE_C 0xffff
+#define BRX_PESUDO_NOISESTATE_D 0xffff0000
+
+#define BZEBRA1_HSSIENABLE 0x8
+#define BZEBRA1_TRXCONTROL 0xc00
+#define BZEBRA1_TRXGAINSETTING 0x07f
+#define BZEBRA1_RXCOUNTER 0xc00
+#define BZEBRA1_TXCHANGEPUMP 0x38
+#define BZEBRA1_RXCHANGEPUMP 0x7
+#define BZEBRA1_CHANNEL_NUM 0xf80
+#define BZEBRA1_TXLPFBW 0x400
+#define BZEBRA1_RXLPFBW 0x600
+
+#define BRTL8256REG_MODE_CTRL1 0x100
+#define BRTL8256REG_MODE_CTRL0 0x40
+#define BRTL8256REG_TXLPFBW 0x18
+#define BRTL8256REG_RXLPFBW 0x600
+
+#define BRTL8258_TXLPFBW 0xc
+#define BRTL8258_RXLPFBW 0xc00
+#define BRTL8258_RSSILPFBW 0xc0
+
+#define BBYTE0 0x1
+#define BBYTE1 0x2
+#define BBYTE2 0x4
+#define BBYTE3 0x8
+#define BWORD0 0x3
+#define BWORD1 0xc
+#define BWORD 0xf
+
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define MASK12BITS 0xfff
+#define MASKH4BITS 0xf0000000
+#define MASKOFDM_D 0xffc00000
+#define MASKCCK 0x3f3f3f3f
+
+#define MASK4BITS 0x0f
+#define MASK20BITS 0xfffff
+#define RFREG_OFFSET_MASK 0xfffff
+
+#define BENABLE 0x1
+#define BDISABLE 0x0
+
+#define LEFT_ANTENNA 0x0
+#define RIGHT_ANTENNA 0x1
+
+#define TCHECK_TXSTATUS 500
+#define TUPDATE_RXCOUNTER 100
+
+#define REG_UN_used_register 0x01bf
+
+/* WOL bit information */
+#define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0)
+#define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1)
+#define HAL92C_WOL_DISASSOC_EVENT BIT(2)
+#define HAL92C_WOL_DEAUTH_EVENT BIT(3)
+#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT(4)
+
+#define WOL_REASON_PTK_UPDATE BIT(0)
+#define WOL_REASON_GTK_UPDATE BIT(1)
+#define WOL_REASON_DISASSOC BIT(2)
+#define WOL_REASON_DEAUTH BIT(3)
+#define WOL_REASON_FW_DISCONNECT BIT(4)
+
+#define RA_RFE_PINMUX 0xcb0 /* Path_A RFE control pinmux*/
+#define RB_RFE_PINMUX 0xeb0 /* Path_B RFE control pinmux*/
+
+#define RA_RFE_INV 0xcb4
+#define RB_RFE_INV 0xeb4
+
+/* RXIQC */
+#define RA_RXIQC_AB 0xc10 /*RxIQ imbalance matrix coeff. A & B*/
+#define RA_RXIQC_CD 0xc14 /*RxIQ imbalance matrix coeff. C & D*/
+#define RA_TXSCALE 0xc1c /* Pah_A TX scaling factor*/
+#define RB_TXSCALE 0xe1c /* Path_B TX scaling factor*/
+#define RB_RXIQC_AB 0xe10 /*RxIQ imbalance matrix coeff. A & B*/
+#define RB_RXIQC_CD 0xe14 /*RxIQ imbalance matrix coeff. C & D*/
+#define RXIQC_AC 0x02ff /*bit mask for IQC matrix element A & C*/
+#define RXIQC_BD 0x02ff0000 /*bit mask for IQC matrix element A & C*/
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+#define EFUSE_SEL(x) (((x) & 0x3) << 8)
+#define EFUSE_SEL_MASK 0x300
+#define EFUSE_WIFI_SEL_0 0x0
+
+/*REG_MULTI_FUNC_CTRL(For RTL8723 Only)*/
+#define WL_HWPDN_EN BIT(0) /* Enable GPIO[9] as WiFi HW PDn source*/
+#define WL_HWPDN_SL BIT(1) /* WiFi HW PDn polarity control*/
+#define WL_FUNC_EN BIT(2) // WiFi function enable
+#define WL_HWROF_EN BIT(3) // Enable GPIO[9] as WiFi RF HW PDn source
+#define BT_HWPDN_EN BIT(16) // Enable GPIO[11] as BT HW PDn source
+#define BT_HWPDN_SL BIT(17) // BT HW PDn polarity control
+#define BT_FUNC_EN BIT(18) // BT function enable
+#define BT_HWROF_EN BIT(19) // Enable GPIO[11] as BT/GPS RF HW PDn source
+#define GPS_HWPDN_EN BIT(20) // Enable GPIO[10] as GPS HW PDn source
+#define GPS_HWPDN_SL BIT(21) // GPS HW PDn polarity control
+#define GPS_FUNC_EN BIT(22) // GPS function enable
+
+
+#define BMASKBYTE0 0xff
+#define BMASKBYTE1 0xff00
+#define BMASKBYTE2 0xff0000
+#define BMASKBYTE3 0xff000000
+#define BMASKHWORD 0xffff0000
+#define BMASKLWORD 0x0000ffff
+#define BMASKDWORD 0xffffffff
+#define BMASK12BITS 0xfff
+#define BMASKH4BITS 0xf0000000
+#define BMASKOFDM_D 0xffc00000
+#define BMASKCCK 0x3f3f3f3f
+
+#define BRFREGOFFSETMASK 0xfffff
+
+#define ODM_REG_CCK_RPT_FORMAT_11AC 0x804
+#define ODM_REG_BB_RX_PATH_11AC 0x808
+/*PAGE 9*/
+#define ODM_REG_OFDM_FA_RST_11AC 0x9A4
+/*PAGE A*/
+#define ODM_REG_CCK_CCA_11AC 0xA0A
+#define ODM_REG_CCK_FA_RST_11AC 0xA2C
+#define ODM_REG_CCK_FA_11AC 0xA5C
+/*PAGE C*/
+#define ODM_REG_IGI_A_11AC 0xC50
+/*PAGE E*/
+#define ODM_REG_IGI_B_11AC 0xE50
+/*PAGE F*/
+#define ODM_REG_OFDM_FA_11AC 0xF48
+
+
+//2 MAC REG LIST
+
+
+
+
+//DIG Related
+#define ODM_BIT_IGI_11AC 0xFFFFFFFF
+#define ODM_BIT_CCK_RPT_FORMAT_11AC BIT16
+#define ODM_BIT_BB_RX_PATH_11AC 0xF
+
+typedef enum AGGRE_SIZE{
+ HT_AGG_SIZE_8K = 0,
+ HT_AGG_SIZE_16K = 1,
+ HT_AGG_SIZE_32K = 2,
+ HT_AGG_SIZE_64K = 3,
+ VHT_AGG_SIZE_128K = 4,
+ VHT_AGG_SIZE_256K = 5,
+ VHT_AGG_SIZE_512K = 6,
+ VHT_AGG_SIZE_1024K = 7,
+}AGGRE_SIZE_E, *PAGGRE_SIZE_E;
+
+#define REG_AMPDU_MAX_LENGTH_8812 0x0458
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/rf.c b/drivers/staging/rtl8821ae/rtl8821ae/rf.c
new file mode 100644
index 000000000000..87c1c9746c43
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/rf.c
@@ -0,0 +1,464 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
+
+void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 3);
+ rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 3);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 1);
+ rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 1);
+ break;
+ case HT_CHANNEL_WIDTH_80:
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 0);
+ rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 0);
+ break;
+ default:
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", bandwidth));
+ break;
+ }
+}
+
+void rtl8821ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 tx_agc[2] = {0, 0}, tmpval;
+ bool turbo_scanoff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+ u8 direction;
+ u32 pwrtrac_value;
+
+ if (rtlefuse->eeprom_regulatory != 0)
+ turbo_scanoff = true;
+
+ if (mac->act_scanning == true) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+
+ if (turbo_scanoff) {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ }
+ } else {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+
+ if (rtlefuse->eeprom_regulatory == 0) {
+ tmpval =
+ (rtlphy->mcs_txpwrlevel_origoffset[0][6]) +
+ (rtlphy->mcs_txpwrlevel_origoffset[0][7] <<
+ 8);
+ tx_agc[RF90_PATH_A] += tmpval;
+
+ tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) +
+ (rtlphy->mcs_txpwrlevel_origoffset[0][15] <<
+ 24);
+ tx_agc[RF90_PATH_B] += tmpval;
+ }
+ }
+
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ ptr = (u8 *) (&(tx_agc[idx1]));
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+ rtl8821ae_dm_txpower_track_adjust(hw,1,&direction,&pwrtrac_value);
+ if (direction ==1){
+ tx_agc[0] += pwrtrac_value;
+ tx_agc[1] += pwrtrac_value;
+ } else if (direction == 2){
+ tx_agc[0] -= pwrtrac_value;
+ tx_agc[1] -= pwrtrac_value;
+ }
+ tmpval = tx_agc[RF90_PATH_A] ;
+ rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKDWORD, tmpval);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 1~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_A_CCK11_CCK1));
+
+ tmpval = tx_agc[RF90_PATH_B] ;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKDWORD, tmpval);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_CCK1));
+}
+
+static void rtl8821ae_phy_get_power_base(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm, u8 *ppowerlevel_bw20, u8 *ppowerlevel_bw40, u8 channel,
+ u32 *ofdmbase, u32 *mcsbase)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 powerBase0, powerBase1;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerBase0 = ppowerlevel_ofdm[i];
+
+ powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
+ (powerBase0 << 8) | powerBase0;
+ *(ofdmbase + i) = powerBase0;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [OFDM power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+ powerlevel[i] = ppowerlevel_bw20[i];
+ }else{
+ powerlevel[i] = ppowerlevel_bw40[i];
+ }
+ powerBase1 = powerlevel[i];
+ powerBase1 = (powerBase1 << 24) |
+ (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
+
+ *(mcsbase + i) = powerBase1;
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [MCS power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+ }
+}
+
+static void _rtl8821ae_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ u8 channel, u8 index,
+ u32 *powerBase0,
+ u32 *powerBase1,
+ u32 *p_outwriteval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 i, chnlgroup = 0, pwr_diff_limit[4],pwr_diff = 0,customer_pwr_diff;
+ u32 writeVal, customer_limit, rf;
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (rtlefuse->eeprom_regulatory) {
+ case 0:
+ chnlgroup = 0;
+
+ writeVal =
+ rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index +
+ (rf ? 8 : 0)]
+ + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("RTK better performance, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 1:
+ if (rtlphy->pwrgroup_cnt == 1)
+ chnlgroup = 0;
+ else {
+ if(channel<3)
+ chnlgroup = 0;
+ else if (channel <6)
+ chnlgroup = 1;
+ else if (channel <9)
+ chnlgroup = 2;
+ else if (channel <12)
+ chnlgroup = 3;
+ else if (channel < 14)
+ chnlgroup = 4;
+ else if (channel == 14)
+ chnlgroup = 5;
+ }
+
+ writeVal =
+ rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+ [index + (rf ? 8 : 0)] + ((index < 2) ?
+ powerBase0[rf] :
+ powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Realtek regulatory, 20MHz, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+
+ break;
+ case 2:
+ writeVal =
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Better regulatory, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 3:
+ chnlgroup = 0;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 40MHz "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht40[rf][channel -
+ 1]));
+ } else {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 20MHz "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht20[rf][channel -
+ 1]));
+ }
+
+ if (index < 2)
+ pwr_diff = rtlefuse->txpwr_legacyhtdiff[rf][channel-1];
+ else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20)
+ pwr_diff = rtlefuse->txpwr_ht20diff[rf][channel-1];
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+ customer_pwr_diff = rtlefuse->pwrgroup_ht40[rf][channel-1];
+ else
+ customer_pwr_diff = rtlefuse->pwrgroup_ht20[rf][channel-1];
+
+ if (pwr_diff > customer_pwr_diff)
+ pwr_diff = 0;
+ else
+ pwr_diff = customer_pwr_diff - pwr_diff;
+
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] =
+ (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index + (rf ? 8 : 0)] & (0x7f <<
+ (i * 8))) >> (i * 8));
+
+ if(pwr_diff_limit[i] > pwr_diff)
+ pwr_diff_limit[i] = pwr_diff;
+ }
+
+ customer_limit = (pwr_diff_limit[3] << 24) |
+ (pwr_diff_limit[2] << 16) |
+ (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer's limit rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), customer_limit));
+
+ writeVal = customer_limit +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer, writeVal rf(%c)= 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ default:
+ chnlgroup = 0;
+ writeVal =
+ rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+ [index + (rf ? 8 : 0)]
+ + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("RTK better performance, writeVal "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ }
+
+ if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+ writeVal = writeVal - 0x06060606;
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_BT2)
+ writeVal = writeVal - 0x0c0c0c0c;
+ *(p_outwriteval + rf) = writeVal;
+ }
+}
+
+static void _rtl8821ae_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ u8 index, u32 *pValue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 regoffset_a[6] = {
+ RTXAGC_A_OFDM18_OFDM6, RTXAGC_A_OFDM54_OFDM24,
+ RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+ RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+ };
+ u16 regoffset_b[6] = {
+ RTXAGC_B_OFDM18_OFDM6, RTXAGC_B_OFDM54_OFDM24,
+ RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+ RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+ };
+ u8 i, rf, pwr_val[4];
+ u32 writeVal;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeVal = pValue[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8) ((writeVal & (0x7f <<
+ (i * 8))) >> (i * 8));
+
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ (pwr_val[1] << 8) | pwr_val[0];
+
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Set 0x%x = %08x\n", regoffset, writeVal));
+ }
+}
+
+void rtl8821ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm, u8 *ppowerlevel_bw20, u8 *ppowerlevel_bw40, u8 channel)
+{
+ u32 writeVal[2], powerBase0[2], powerBase1[2];
+ u8 index;
+ u8 direction;
+ u32 pwrtrac_value;
+
+ rtl8821ae_phy_get_power_base(hw, ppowerlevel_ofdm, ppowerlevel_bw20, ppowerlevel_bw40,
+ channel, &powerBase0[0], &powerBase1[0]);
+
+ rtl8821ae_dm_txpower_track_adjust(hw,1,&direction,&pwrtrac_value);
+
+ for (index = 0; index < 6; index++) {
+ _rtl8821ae_get_txpower_writeval_by_regulatory(hw,
+ channel, index,
+ &powerBase0[0],
+ &powerBase1[0],
+ &writeVal[0]);
+ if (direction ==1){
+ writeVal[0] += pwrtrac_value;
+ writeVal[1] += pwrtrac_value;
+ } else if (direction == 2){
+ writeVal[0] -= pwrtrac_value;
+ writeVal[1] -= pwrtrac_value;
+ }
+ _rtl8821ae_write_ofdm_power_reg(hw, index, &writeVal[0]);
+ }
+}
+
+bool rtl8821ae_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (rtlphy->rf_type == RF_1T1R)
+ rtlphy->num_total_rfpath = 1;
+ else
+ rtlphy->num_total_rfpath = 2;
+
+ return _rtl8821ae_phy_rf6052_config_parafile(hw);
+
+}
+
+static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ //u32 u4_regvalue = 0;
+ u8 rfpath;
+ bool rtstatus = true;
+ //struct bb_reg_def *pphyreg;
+
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ switch (rfpath) {
+ case RF90_PATH_A: {
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = rtl8812ae_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ else
+ rtstatus = rtl8821ae_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ break;
+ }
+ case RF90_PATH_B: {
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ rtstatus = rtl8812ae_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ else
+ rtstatus = rtl8821ae_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ break;
+ }
+ case RF90_PATH_C:
+ break;
+ case RF90_PATH_D:
+ break;
+ }
+
+ if (rtstatus != true) {
+ RT_TRACE(COMP_INIT, DBG_TRACE,
+ ("Radio[%d] Fail!!", rfpath));
+ return false;
+ }
+
+ }
+
+ /*put arrays in dm.c*/
+ /*_rtl8821ae_config_rf_txpwr_track_headerfile(hw);*/
+ RT_TRACE(COMP_INIT, DBG_TRACE, ("\n"));
+ return rtstatus;
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/rf.h b/drivers/staging/rtl8821ae/rtl8821ae/rf.h
new file mode 100644
index 000000000000..b665c0ff1b7d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/rf.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_RF_H__
+#define __RTL8821AE_RF_H__
+
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG 0x3F
+
+extern void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+ u8 bandwidth);
+extern void rtl8821ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+extern void rtl8821ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm,
+ u8 *ppowerlevel_bw20,
+ u8 *ppowerlevel_bw40,
+ u8 channel);
+extern bool rtl8821ae_phy_rf6052_config(struct ieee80211_hw *hw);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/sw.c b/drivers/staging/rtl8821ae/rtl8821ae/sw.c
new file mode 100644
index 000000000000..a8d175569770
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/sw.c
@@ -0,0 +1,499 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "hw.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+#include "table.h"
+#include "hal_btc.h"
+#include "../btcoexist/rtl_btc.h"
+
+void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ /*close ASPM for AMD defaultly */
+ rtlpci->const_amdpci_aspm = 0;
+
+ /*
+ * ASPM PS mode.
+ * 0 - Disable ASPM,
+ * 1 - Enable ASPM without Clock Req,
+ * 2 - Enable ASPM with Clock Req,
+ * 3 - Always Enable ASPM with Clock Req,
+ * 4 - Always Enable ASPM without Clock Req.
+ * set default to RTL8192CE:3 RTL8192E:2
+ * */
+ rtlpci->const_pci_aspm = 3;
+
+ /*Setting for PCI-E device */
+ rtlpci->const_devicepci_aspm_setting = 0x03;
+
+ /*Setting for PCI-E bridge */
+ rtlpci->const_hostpci_aspm_setting = 0x02;
+
+ /*
+ * In Hw/Sw Radio Off situation.
+ * 0 - Default,
+ * 1 - From ASPM setting without low Mac Pwr,
+ * 2 - From ASPM setting with low Mac Pwr,
+ * 3 - Bus D3
+ * set default to RTL8192CE:0 RTL8192SE:2
+ */
+ rtlpci->const_hwsw_rfoff_d3 = 0;
+
+ /*
+ * This setting works for those device with
+ * backdoor ASPM setting such as EPHY setting.
+ * 0 - Not support ASPM,
+ * 1 - Support ASPM,
+ * 2 - According to chipset.
+ */
+ rtlpci->const_support_pciaspm = 1;
+}
+
+/*InitializeVariables8812E*/
+int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+{
+ int err = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ const struct firmware *firmware;
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ char *fw_name = NULL;
+
+ rtl8821ae_bt_reg_init(hw);
+ rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
+
+ rtlpriv->dm.b_dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.b_disable_framebursting = 0;;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25);
+
+ mac->ht_enable = true;
+
+ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
+ /*following 2 is for register 5G band, refer to _rtl_init_mac80211()*/
+ rtlpriv->rtlhal.bandset = BAND_ON_BOTH;
+ rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
+
+ rtlpci->receive_config = (RCR_APPFCS |
+ RCR_APP_MIC |
+ RCR_APP_ICV |
+ RCR_APP_PHYST_RXFF |
+ RCR_NONQOS_VHT |
+ RCR_HTC_LOC_CTRL |
+ RCR_AMF |
+ RCR_ACF |
+ RCR_ADF | /*This bit controls the PS-Poll packet filter.*/
+ RCR_AICV |
+ RCR_ACRC32 |
+ RCR_AB |
+ RCR_AM |
+ RCR_APM |
+ 0);
+
+
+ rtlpci->irq_mask[0] =
+ (u32) (IMR_PSTIMEOUT |
+ IMR_GTINT3 |
+ /*IMR_TBDER |
+ IMR_TBDOK |
+ IMR_BCNDMAINT0 |*/
+ IMR_HSISR_IND_ON_INT |
+ IMR_C2HCMD |
+ IMR_HIGHDOK |
+ IMR_MGNTDOK |
+ IMR_BKDOK |
+ IMR_BEDOK |
+ IMR_VIDOK |
+ IMR_VODOK |
+ IMR_RDU |
+ IMR_ROK |
+ 0);
+
+ rtlpci->irq_mask[1] =
+ (u32)( IMR_RXFOVW |
+ IMR_TXFOVW |
+ 0);
+
+ /* for LPS & IPS */
+ rtlpriv->psc.b_inactiveps = rtlpriv->cfg->mod_params->b_inactiveps;
+ rtlpriv->psc.b_swctrl_lps = rtlpriv->cfg->mod_params->b_swctrl_lps;
+ rtlpriv->psc.b_fwctrl_lps = rtlpriv->cfg->mod_params->b_fwctrl_lps;
+ rtlpriv->psc.b_reg_fwctrl_lps = 3;
+ rtlpriv->psc.reg_max_lps_awakeintvl = 5;
+ /* for ASPM, you can close aspm through
+ * set const_support_pciaspm = 0 */
+ rtl8821ae_init_aspm_vars(hw);
+
+ if (rtlpriv->psc.b_reg_fwctrl_lps == 1)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
+ else if (rtlpriv->psc.b_reg_fwctrl_lps == 2)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
+ else if (rtlpriv->psc.b_reg_fwctrl_lps == 3)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
+
+ /* for firmware buf */
+ rtlpriv->rtlhal.pfirmware = (u8 *) vmalloc(0x8000);
+ if (!rtlpriv->rtlhal.pfirmware) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Can't alloc buffer for fw.\n"));
+ return 1;
+ }
+
+ fw_name = "rtlwifi/rtl8821aefw.bin";
+ err = request_firmware(&firmware, fw_name, rtlpriv->io.dev);
+ if (err) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Failed to request firmware!\n"));
+ return 1;
+ }
+
+ if (firmware->size > 0x8000) {
+ RT_TRACE(COMP_ERR, DBG_EMERG,
+ ("Firmware is too big!\n"));
+ release_firmware(firmware);
+ return 1;
+ }
+
+ memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
+ rtlpriv->rtlhal.fwsize = firmware->size;
+ release_firmware(firmware);
+
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
+ rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
+ }
+
+ RT_TRACE(COMP_INIT, DBG_LOUD, (" FirmwareDownload OK\n"));
+ return err;
+}
+
+void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ //printk("=========>rtl8821ae_deinit_sw_vars().\n");
+ if (rtlpriv->cfg->ops->get_btc_status()){
+ //printk("=========>rtl8821ae_deinit_sw_vars().get_btc_status\n");
+ rtlpriv->btcoexist.btc_ops->btc_halt_notify();
+ }
+ if (rtlpriv->rtlhal.pfirmware) {
+ //printk("=========>rtl8821ae_deinit_sw_vars().rtlpriv->rtlhal.pfirmware\n");
+ vfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+ }
+ //printk("<=========rtl8821ae_deinit_sw_vars().\n");
+}
+
+u32 rtl8812ae_rx_command_packet_handler(
+ struct ieee80211_hw *hw,
+ struct rtl_stats status,
+ struct sk_buff *skb
+ )
+{
+ u32 result = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (status.packet_report_type) {
+ case NORMAL_RX:
+ result = 0;
+ break;
+ case C2H_PACKET:
+ rtl8812ae_c2h_packet_handler(hw, skb->data, (u8) skb->len);
+ result = 1;
+ RT_TRACE(COMP_RECV, DBG_LOUD,
+ ("===>rtl8821ae_rx_command_packet_handler(): (u8) skb->len=%d\n\n", skb->len));
+ break;
+ default:
+ RT_TRACE(COMP_RECV, DBG_LOUD,
+ ("===>rtl8821ae_rx_command_packet_handler(): No this packet type!!\n"));
+ break;
+ }
+
+ return result;
+}
+
+
+/* get bt coexist status */
+bool rtl8821ae_get_btc_status(void)
+{
+ return true;
+}
+
+struct rtl_hal_ops rtl8821ae_hal_ops = {
+ .init_sw_vars = rtl8821ae_init_sw_vars,
+ .deinit_sw_vars = rtl8821ae_deinit_sw_vars,
+ .read_eeprom_info = rtl8821ae_read_eeprom_info,
+ .interrupt_recognized = rtl8821ae_interrupt_recognized,
+ .hw_init = rtl8821ae_hw_init,
+ .hw_disable = rtl8821ae_card_disable,
+ .hw_suspend = rtl8821ae_suspend,
+ .hw_resume = rtl8821ae_resume,
+ .enable_interrupt = rtl8821ae_enable_interrupt,
+ .disable_interrupt = rtl8821ae_disable_interrupt,
+ .set_network_type = rtl8821ae_set_network_type,
+ .set_chk_bssid = rtl8821ae_set_check_bssid,
+ .set_qos = rtl8821ae_set_qos,
+ .set_bcn_reg = rtl8821ae_set_beacon_related_registers,
+ .set_bcn_intv = rtl8821ae_set_beacon_interval,
+ .update_interrupt_mask = rtl8821ae_update_interrupt_mask,
+ .get_hw_reg = rtl8821ae_get_hw_reg,
+ .set_hw_reg = rtl8821ae_set_hw_reg,
+ .update_rate_tbl = rtl8821ae_update_hal_rate_tbl,
+ .fill_tx_desc = rtl8821ae_tx_fill_desc,
+ .fill_tx_cmddesc = rtl8821ae_tx_fill_cmddesc,
+ .query_rx_desc = rtl8821ae_rx_query_desc,
+ .set_channel_access = rtl8821ae_update_channel_access_setting,
+ .radio_onoff_checking = rtl8821ae_gpio_radio_on_off_checking,
+ .set_bw_mode = rtl8821ae_phy_set_bw_mode,
+ .switch_channel = rtl8821ae_phy_sw_chnl,
+ .dm_watchdog = rtl8821ae_dm_watchdog,
+ .scan_operation_backup = rtl8821ae_phy_scan_operation_backup,
+ .set_rf_power_state = rtl8821ae_phy_set_rf_power_state,
+ .led_control = rtl8821ae_led_control,
+ .set_desc = rtl8821ae_set_desc,
+ .get_desc = rtl8821ae_get_desc,
+ .is_tx_desc_closed = rtl8821ae_is_tx_desc_closed,
+ .tx_polling = rtl8821ae_tx_polling,
+ .enable_hw_sec = rtl8821ae_enable_hw_security_config,
+ .set_key = rtl8821ae_set_key,
+ .init_sw_leds = rtl8821ae_init_sw_leds,
+ .allow_all_destaddr = rtl8821ae_allow_all_destaddr,
+ .get_bbreg = rtl8821ae_phy_query_bb_reg,
+ .set_bbreg = rtl8821ae_phy_set_bb_reg,
+ .get_rfreg = rtl8821ae_phy_query_rf_reg,
+ .set_rfreg = rtl8821ae_phy_set_rf_reg,
+ .c2h_command_handle = rtl_8821ae_c2h_command_handle,
+ .bt_wifi_media_status_notify = rtl_8821ae_bt_wifi_media_status_notify,
+ .bt_turn_off_bt_coexist_before_enter_lps = rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps,
+ .fill_h2c_cmd = rtl8821ae_fill_h2c_cmd,
+ .get_btc_status = rtl8821ae_get_btc_status,
+ .rx_command_packet_handler = rtl8812ae_rx_command_packet_handler,
+};
+
+struct rtl_mod_params rtl8821ae_mod_params = {
+ .sw_crypto = false,
+ .b_inactiveps = false,//true,
+ .b_swctrl_lps = false,
+ .b_fwctrl_lps = false, //true,
+};
+
+struct rtl_hal_cfg rtl8821ae_hal_cfg = {
+ .bar_id = 2,
+ .write_readback = true,
+ .name = "rtl8821ae_pci",
+ .fw_name = "rtlwifi/rtl8821aefw.bin",
+ .ops = &rtl8821ae_hal_ops,
+ .mod_params = &rtl8821ae_mod_params,
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+ .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+ .maps[SYS_CLK] = REG_SYS_CLKR,
+ .maps[MAC_RCR_AM] = AM,
+ .maps[MAC_RCR_AB] = AB,
+ .maps[MAC_RCR_ACRC32] = ACRC32,
+ .maps[MAC_RCR_ACF] = ACF,
+ .maps[MAC_RCR_AAP] = AAP,
+ .maps[MAC_HIMR] = REG_HIMR,
+ .maps[MAC_HIMRE] = REG_HIMRE,
+
+
+ .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS,
+
+ .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+ .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_CLK] = 0,
+ .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+ .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+ .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+ .maps[EFUSE_ANA8M] = ANA8M,
+ .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+ .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+ .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+ .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
+
+ .maps[RWCAM] = REG_CAMCMD,
+ .maps[WCAMI] = REG_CAMWRITE,
+ .maps[RCAMO] = REG_CAMREAD,
+ .maps[CAMDBG] = REG_CAMDBG,
+ .maps[SECR] = REG_SECCFG,
+ .maps[SEC_CAM_NONE] = CAM_NONE,
+ .maps[SEC_CAM_WEP40] = CAM_WEP40,
+ .maps[SEC_CAM_TKIP] = CAM_TKIP,
+ .maps[SEC_CAM_AES] = CAM_AES,
+ .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+ .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+ .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+ .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+ .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+ .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+ .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+/* .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, */ /*need check*/
+ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+ .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+ .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+ .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+ .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+ .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+ .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+/* .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,*/
+/* .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,*/
+
+ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+ .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+ .maps[RTL_IMR_BcnInt] = IMR_BCNDMAINT0,
+ .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+ .maps[RTL_IMR_RDU] = IMR_RDU,
+ .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+ .maps[RTL_IMR_BDOK] = IMR_BCNDOK0,
+ .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+ .maps[RTL_IMR_TBDER] = IMR_TBDER,
+ .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+ .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+ .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+ .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+ .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+ .maps[RTL_IMR_VODOK] = IMR_VODOK,
+ .maps[RTL_IMR_ROK] = IMR_ROK,
+ .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER),
+
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+static struct pci_device_id rtl8821ae_pci_ids[] = {
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)},
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)},
+ {},
+};
+#else
+static struct pci_device_id rtl8821ae_pci_ids[] __devinitdata = {
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)},
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)},
+ {},
+};
+#endif
+
+MODULE_DEVICE_TABLE(pci, rtl8821ae_pci_ids);
+
+MODULE_AUTHOR("Ping Yan<ping_yan@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin");
+
+module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444);
+module_param_named(ips, rtl8821ae_mod_params.b_inactiveps, bool, 0444);
+module_param_named(swlps, rtl8821ae_mod_params.b_swctrl_lps, bool, 0444);
+module_param_named(fwlps, rtl8821ae_mod_params.b_fwctrl_lps, bool, 0444);
+MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
+MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
+MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+static const SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+compat_pci_suspend(rtl_pci_suspend)
+compat_pci_resume(rtl_pci_resume)
+#endif
+
+static struct pci_driver rtl8821ae_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rtl8821ae_pci_ids,
+ .probe = rtl_pci_probe,
+ .remove = rtl_pci_disconnect,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+ .driver.pm = &rtlwifi_pm_ops,
+#elif defined(CONFIG_PM)
+ .suspend = rtl_pci_suspend_compat,
+ .resume = rtl_pci_resume_compat,
+#endif
+
+};
+
+
+extern int rtl_core_module_init(void);
+extern void rtl_core_module_exit(void);
+
+static int __init rtl8821ae_module_init(void)
+{
+ int ret;
+
+ ret = rtl_core_module_init();
+ if (ret)
+ return ret;
+
+ //printk("==========>rtl8821ae_module_init().\n");
+ ret = pci_register_driver(&rtl8821ae_driver);
+ if (ret)
+ RT_ASSERT(false, (": No device found\n"));
+
+ return ret;
+}
+
+static void __exit rtl8821ae_module_exit(void)
+{
+ pci_unregister_driver(&rtl8821ae_driver);
+ rtl_core_module_exit();
+}
+
+module_init(rtl8821ae_module_init);
+module_exit(rtl8821ae_module_exit);
diff --git a/drivers/staging/rtl8188eu/include/h2clbk.h b/drivers/staging/rtl8821ae/rtl8821ae/sw.h
index e595030ac8a3..3d49b2f043da 100644
--- a/drivers/staging/rtl8188eu/include/h2clbk.h
+++ b/drivers/staging/rtl8821ae/rtl8821ae/sw.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ * Copyright(c) 2009-2010 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -8,28 +8,32 @@
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
*
- ******************************************************************************/
-
-
-#define _H2CLBK_H_
-
-
-#include <rtl8711_spec.h>
-#include <TypeDef.h>
-
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
-void _lbk_cmd(struct adapter *adapter);
+#ifndef __RTL8821AE_SW_H__
+#define __RTL8821AE_SW_H__
-void _lbk_rsp(struct adapter *adapter);
+int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw);
+void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw);
+void rtl8821ae_init_var_map(struct ieee80211_hw *hw);
+bool rtl8821ae_get_btc_status(void);
-void _lbk_evt(IN struct adapter *adapter);
-void h2c_event_callback(unsigned char *dev, unsigned char *pbuf);
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/table.c b/drivers/staging/rtl8821ae/rtl8821ae/table.c
new file mode 100644
index 000000000000..a6c4ca4fd9b2
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/table.c
@@ -0,0 +1,4002 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on 2010/ 5/18, 1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+u32 RTL8812AE_PHY_REG_ARRAY[] = {
+ 0x800, 0x8020D010,
+ 0x804, 0x080112E0,
+ 0x808, 0x0E028233,
+ 0x80C, 0x12131113,
+ 0x810, 0x20101263,
+ 0x814, 0x020C3D10,
+ 0x818, 0x03A00385,
+ 0x820, 0x00000000,
+ 0x824, 0x00030FE0,
+ 0x828, 0x00000000,
+ 0x82C, 0x002083DD,
+ 0x830, 0x2AAA6C86,
+ 0x834, 0x0037A706,
+ 0x838, 0x06C89B44,
+ 0x83C, 0x0000095B,
+ 0x840, 0xC0000001,
+ 0x844, 0x40003CDE,
+ 0x848, 0x6210FF8B,
+ 0x84C, 0x6CFDFFB8,
+ 0x850, 0x28874706,
+ 0x854, 0x0001520C,
+ 0x858, 0x8060E000,
+ 0x85C, 0x74210168,
+ 0x860, 0x6929C321,
+ 0x864, 0x79727432,
+ 0x868, 0x8CA7A314,
+ 0x86C, 0x338C2878,
+ 0x870, 0x03333333,
+ 0x874, 0x31602C2E,
+ 0x878, 0x00003152,
+ 0x87C, 0x000FC000,
+ 0x8A0, 0x00000013,
+ 0x8A4, 0x7F7F7F7F,
+ 0x8A8, 0xA202033E,
+ 0x8AC, 0x0FF0FA0A,
+ 0x8B0, 0x00000600,
+ 0x8B4, 0x000FC080,
+ 0x8B8, 0x6C0057FF,
+ 0x8BC, 0x4CA520A3,
+ 0x8C0, 0x27F00020,
+ 0x8C4, 0x00000000,
+ 0x8C8, 0x00013169,
+ 0x8CC, 0x08248492,
+ 0x8D0, 0x0000B800,
+ 0x8DC, 0x00000000,
+ 0x8D4, 0x940008A0,
+ 0x8D8, 0x290B5612,
+ 0x8F8, 0x400002C0,
+ 0x8FC, 0x00000000,
+ 0xFF0F07D8, 0xABCD,
+ 0x900, 0x00000700,
+ 0xFF0F07D0, 0xCDEF,
+ 0x900, 0x00000700,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x900, 0x00000700,
+ 0xFF0F07D8, 0xDEAD,
+ 0x90C, 0x00000000,
+ 0x910, 0x0000FC00,
+ 0x914, 0x00000404,
+ 0x918, 0x1C1028C0,
+ 0x91C, 0x64B11A1C,
+ 0x920, 0xE0767233,
+ 0x924, 0x055AA500,
+ 0x928, 0x00000004,
+ 0x92C, 0xFFFE0000,
+ 0x930, 0xFFFFFFFE,
+ 0x934, 0x001FFFFF,
+ 0x960, 0x00000000,
+ 0x964, 0x00000000,
+ 0x968, 0x00000000,
+ 0x96C, 0x00000000,
+ 0x970, 0x801FFFFF,
+ 0x978, 0x00000000,
+ 0x97C, 0x00000000,
+ 0x980, 0x00000000,
+ 0x984, 0x00000000,
+ 0x988, 0x00000000,
+ 0x990, 0x27100000,
+ 0x994, 0xFFFF0100,
+ 0x998, 0xFFFFFF5C,
+ 0x99C, 0xFFFFFFFF,
+ 0x9A0, 0x000000FF,
+ 0x9A4, 0x00080080,
+ 0x9A8, 0x00000000,
+ 0x9AC, 0x00000000,
+ 0x9B0, 0x81081008,
+ 0x9B4, 0x00000000,
+ 0x9B8, 0x01081008,
+ 0x9BC, 0x01081008,
+ 0x9D0, 0x00000000,
+ 0x9D4, 0x00000000,
+ 0x9D8, 0x00000000,
+ 0x9DC, 0x00000000,
+ 0x9E4, 0x00000002,
+ 0x9E8, 0x000002D5,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x01FF000C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E7F000F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x11144028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0000,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00900000,
+ 0xA70, 0x101FFF00,
+ 0xA74, 0x00000008,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x218075B2,
+ 0xA84, 0x001F8C80,
+ 0xB00, 0x03100000,
+ 0xB04, 0x0000B000,
+ 0xB08, 0xAE0201EB,
+ 0xB0C, 0x01003207,
+ 0xB10, 0x00009807,
+ 0xB14, 0x01000000,
+ 0xB18, 0x00000002,
+ 0xB1C, 0x00000002,
+ 0xB20, 0x0000001F,
+ 0xB24, 0x03020100,
+ 0xB28, 0x07060504,
+ 0xB2C, 0x0B0A0908,
+ 0xB30, 0x0F0E0D0C,
+ 0xB34, 0x13121110,
+ 0xB38, 0x17161514,
+ 0xB3C, 0x0000003A,
+ 0xB40, 0x00000000,
+ 0xB44, 0x00000000,
+ 0xB48, 0x13000032,
+ 0xB4C, 0x48080000,
+ 0xB50, 0x00000000,
+ 0xB54, 0x00000000,
+ 0xB58, 0x00000000,
+ 0xB5C, 0x00000000,
+ 0xC00, 0x00000007,
+ 0xC04, 0x00042020,
+ 0xC08, 0x80410231,
+ 0xC0C, 0x00000000,
+ 0xC10, 0x00000100,
+ 0xC14, 0x01000000,
+ 0xC1C, 0x40000003,
+ 0xC20, 0x12121212,
+ 0xC24, 0x12121212,
+ 0xC28, 0x12121212,
+ 0xC2C, 0x12121212,
+ 0xC30, 0x12121212,
+ 0xC34, 0x12121212,
+ 0xC38, 0x12121212,
+ 0xC3C, 0x12121212,
+ 0xC40, 0x12121212,
+ 0xC44, 0x12121212,
+ 0xC48, 0x12121212,
+ 0xC4C, 0x12121212,
+ 0xC50, 0x00000020,
+ 0xC54, 0x0008121C,
+ 0xC58, 0x30000C1C,
+ 0xC5C, 0x00000058,
+ 0xC60, 0x34344443,
+ 0xC64, 0x07003333,
+ 0xC68, 0x59791979,
+ 0xC6C, 0x59795979,
+ 0xC70, 0x19795979,
+ 0xC74, 0x19795979,
+ 0xC78, 0x19791979,
+ 0xC7C, 0x19791979,
+ 0xC80, 0x19791979,
+ 0xC84, 0x19791979,
+ 0xC94, 0x0100005C,
+ 0xC98, 0x00000000,
+ 0xC9C, 0x00000000,
+ 0xCA0, 0x00000029,
+ 0xCA4, 0x08040201,
+ 0xCA8, 0x80402010,
+ 0xFF0F0740, 0xABCD,
+ 0xCB0, 0x77547717,
+ 0xFF0F01C0, 0xCDEF,
+ 0xCB0, 0x77547717,
+ 0xFF0F02C0, 0xCDEF,
+ 0xCB0, 0x77547717,
+ 0xFF0F07D8, 0xCDEF,
+ 0xCB0, 0x54547710,
+ 0xFF0F07D0, 0xCDEF,
+ 0xCB0, 0x54547710,
+ 0xCDCDCDCD, 0xCDCD,
+ 0xCB0, 0x77547777,
+ 0xFF0F0740, 0xDEAD,
+ 0xCB4, 0x00000077,
+ 0xCB8, 0x00508242,
+ 0xE00, 0x00000007,
+ 0xE04, 0x00042020,
+ 0xE08, 0x80410231,
+ 0xE0C, 0x00000000,
+ 0xE10, 0x00000100,
+ 0xE14, 0x01000000,
+ 0xE1C, 0x40000003,
+ 0xE20, 0x12121212,
+ 0xE24, 0x12121212,
+ 0xE28, 0x12121212,
+ 0xE2C, 0x12121212,
+ 0xE30, 0x12121212,
+ 0xE34, 0x12121212,
+ 0xE38, 0x12121212,
+ 0xE3C, 0x12121212,
+ 0xE40, 0x12121212,
+ 0xE44, 0x12121212,
+ 0xE48, 0x12121212,
+ 0xE4C, 0x12121212,
+ 0xE50, 0x00000020,
+ 0xE54, 0x0008121C,
+ 0xE58, 0x30000C1C,
+ 0xE5C, 0x00000058,
+ 0xE60, 0x34344443,
+ 0xE64, 0x07003333,
+ 0xE68, 0x59791979,
+ 0xE6C, 0x59795979,
+ 0xE70, 0x19795979,
+ 0xE74, 0x19795979,
+ 0xE78, 0x19791979,
+ 0xE7C, 0x19791979,
+ 0xE80, 0x19791979,
+ 0xE84, 0x19791979,
+ 0xE94, 0x0100005C,
+ 0xE98, 0x00000000,
+ 0xE9C, 0x00000000,
+ 0xEA0, 0x00000029,
+ 0xEA4, 0x08040201,
+ 0xEA8, 0x80402010,
+ 0xFF0F0740, 0xABCD,
+ 0xEB0, 0x77547717,
+ 0xFF0F01C0, 0xCDEF,
+ 0xEB0, 0x77547717,
+ 0xFF0F02C0, 0xCDEF,
+ 0xEB0, 0x77547717,
+ 0xFF0F07D8, 0xCDEF,
+ 0xEB0, 0x54547710,
+ 0xFF0F07D0, 0xCDEF,
+ 0xEB0, 0x54547710,
+ 0xCDCDCDCD, 0xCDCD,
+ 0xEB0, 0x77547777,
+ 0xFF0F0740, 0xDEAD,
+ 0xEB4, 0x00000077,
+ 0xEB8, 0x00508242,
+};
+
+u32 RTL8821AE_PHY_REG_ARRAY[] = {
+ 0x800, 0x0020D090,
+ 0x804, 0x080112E0,
+ 0x808, 0x0E028211,
+ 0x80C, 0x92131111,
+ 0x810, 0x20101261,
+ 0x814, 0x020C3D10,
+ 0x818, 0x03A00385,
+ 0x820, 0x00000000,
+ 0x824, 0x00030FE0,
+ 0x828, 0x00000000,
+ 0x82C, 0x002081DD,
+ 0x830, 0x2AAA8E24,
+ 0x834, 0x0037A706,
+ 0x838, 0x06489B44,
+ 0x83C, 0x0000095B,
+ 0x840, 0xC0000001,
+ 0x844, 0x40003CDE,
+ 0x848, 0x62103F8B,
+ 0x84C, 0x6CFDFFB8,
+ 0x850, 0x28874706,
+ 0x854, 0x0001520C,
+ 0x858, 0x8060E000,
+ 0x85C, 0x74210168,
+ 0x860, 0x6929C321,
+ 0x864, 0x79727432,
+ 0x868, 0x8CA7A314,
+ 0x86C, 0x888C2878,
+ 0x870, 0x08888888,
+ 0x874, 0x31612C2E,
+ 0x878, 0x00000152,
+ 0x87C, 0x000FD000,
+ 0x8A0, 0x00000013,
+ 0x8A4, 0x7F7F7F7F,
+ 0x8A8, 0xA2000338,
+ 0x8AC, 0x0FF0FA0A,
+ 0x8B4, 0x000FC080,
+ 0x8B8, 0x6C10D7FF,
+ 0x8BC, 0x0CA52090,
+ 0x8C0, 0x1BF00020,
+ 0x8C4, 0x00000000,
+ 0x8C8, 0x00013169,
+ 0x8CC, 0x08248492,
+ 0x8D4, 0x940008A0,
+ 0x8D8, 0x290B5612,
+ 0x8F8, 0x400002C0,
+ 0x8FC, 0x00000000,
+ 0x900, 0x00000700,
+ 0x90C, 0x00000000,
+ 0x910, 0x0000FC00,
+ 0x914, 0x00000404,
+ 0x918, 0x1C1028C0,
+ 0x91C, 0x64B11A1C,
+ 0x920, 0xE0767233,
+ 0x924, 0x055AA500,
+ 0x928, 0x00000004,
+ 0x92C, 0xFFFE0000,
+ 0x930, 0xFFFFFFFE,
+ 0x934, 0x001FFFFF,
+ 0x960, 0x00000000,
+ 0x964, 0x00000000,
+ 0x968, 0x00000000,
+ 0x96C, 0x00000000,
+ 0x970, 0x801FFFFF,
+ 0x974, 0x000003FF,
+ 0x978, 0x00000000,
+ 0x97C, 0x00000000,
+ 0x980, 0x00000000,
+ 0x984, 0x00000000,
+ 0x988, 0x00000000,
+ 0x990, 0x27100000,
+ 0x994, 0xFFFF0100,
+ 0x998, 0xFFFFFF5C,
+ 0x99C, 0xFFFFFFFF,
+ 0x9A0, 0x000000FF,
+ 0x9A4, 0x00480080,
+ 0x9A8, 0x00000000,
+ 0x9AC, 0x00000000,
+ 0x9B0, 0x81081008,
+ 0x9B4, 0x01081008,
+ 0x9B8, 0x01081008,
+ 0x9BC, 0x01081008,
+ 0x9D0, 0x00000000,
+ 0x9D4, 0x00000000,
+ 0x9D8, 0x00000000,
+ 0x9DC, 0x00000000,
+ 0x9E0, 0x00005D00,
+ 0x9E4, 0x00000002,
+ 0x9E8, 0x00000001,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x01FF000C,
+ 0xA08, 0x8C8A8300,
+ 0xA0C, 0x2E68000F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x11144028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0000,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00900000,
+ 0xA70, 0x101FFF00,
+ 0xA74, 0x00000008,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x21805490,
+ 0xA84, 0x001F0000,
+ 0xB00, 0x03100040,
+ 0xB04, 0x0000B000,
+ 0xB08, 0xAE0201EB,
+ 0xB0C, 0x01003207,
+ 0xB10, 0x00009807,
+ 0xB14, 0x01000000,
+ 0xB18, 0x00000002,
+ 0xB1C, 0x00000002,
+ 0xB20, 0x0000001F,
+ 0xB24, 0x03020100,
+ 0xB28, 0x07060504,
+ 0xB2C, 0x0B0A0908,
+ 0xB30, 0x0F0E0D0C,
+ 0xB34, 0x13121110,
+ 0xB38, 0x17161514,
+ 0xB3C, 0x0000003A,
+ 0xB40, 0x00000000,
+ 0xB44, 0x00000000,
+ 0xB48, 0x13000032,
+ 0xB4C, 0x48080000,
+ 0xB50, 0x00000000,
+ 0xB54, 0x00000000,
+ 0xB58, 0x00000000,
+ 0xB5C, 0x00000000,
+ 0xC00, 0x00000007,
+ 0xC04, 0x00042020,
+ 0xC08, 0x80410231,
+ 0xC0C, 0x00000000,
+ 0xC10, 0x00000100,
+ 0xC14, 0x01000000,
+ 0xC1C, 0x40000003,
+ 0xC20, 0x2C2C2C2C,
+ 0xC24, 0x30303030,
+ 0xC28, 0x30303030,
+ 0xC2C, 0x2C2C2C2C,
+ 0xC30, 0x2C2C2C2C,
+ 0xC34, 0x2C2C2C2C,
+ 0xC38, 0x2C2C2C2C,
+ 0xC3C, 0x2A2A2A2A,
+ 0xC40, 0x2A2A2A2A,
+ 0xC44, 0x2A2A2A2A,
+ 0xC48, 0x2A2A2A2A,
+ 0xC4C, 0x2A2A2A2A,
+ 0xC50, 0x00000020,
+ 0xC54, 0x001C1208,
+ 0xC58, 0x30000C1C,
+ 0xC5C, 0x00000058,
+ 0xC60, 0x34344443,
+ 0xC64, 0x07003333,
+ 0xC68, 0x19791979,
+ 0xC6C, 0x19791979,
+ 0xC70, 0x19791979,
+ 0xC74, 0x19791979,
+ 0xC78, 0x19791979,
+ 0xC7C, 0x19791979,
+ 0xC80, 0x19791979,
+ 0xC84, 0x19791979,
+ 0xC94, 0x0100005C,
+ 0xC98, 0x00000000,
+ 0xC9C, 0x00000000,
+ 0xCA0, 0x00000029,
+ 0xCA4, 0x08040201,
+ 0xCA8, 0x80402010,
+ 0xCB0, 0x77775747,
+ 0xCB4, 0x10000077,
+ 0xCB8, 0x00508240,
+};
+
+u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
+ 0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840,
+ 0, 0, 0, 0x00000c24, 0xffffffff, 0x42424444,
+ 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323638,
+ 0, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444,
+ 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303236,
+ 0, 0, 1, 0x00000c34, 0xffffffff, 0x38404242,
+ 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283034,
+ 0, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444,
+ 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303236,
+ 0, 0, 0, 0x00000c44, 0xffffffff, 0x42422426,
+ 0, 0, 1, 0x00000c48, 0xffffffff, 0x30343840,
+ 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
+ 0, 1, 0, 0x00000e20, 0xffffffff, 0x34363840,
+ 0, 1, 0, 0x00000e24, 0xffffffff, 0x42424444,
+ 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323638,
+ 0, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444,
+ 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303236,
+ 0, 1, 1, 0x00000e34, 0xffffffff, 0x38404242,
+ 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283034,
+ 0, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444,
+ 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303236,
+ 0, 1, 0, 0x00000e44, 0xffffffff, 0x42422426,
+ 0, 1, 1, 0x00000e48, 0xffffffff, 0x30343840,
+ 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
+ 1, 0, 0, 0x00000c24, 0xffffffff, 0x42424444,
+ 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323640,
+ 1, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444,
+ 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303236,
+ 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404242,
+ 1, 0, 1, 0x00000c38, 0xffffffff, 0x26283034,
+ 1, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444,
+ 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303236,
+ 1, 0, 0, 0x00000c44, 0xffffffff, 0x42422426,
+ 1, 0, 1, 0x00000c48, 0xffffffff, 0x30343840,
+ 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
+ 1, 1, 0, 0x00000e24, 0xffffffff, 0x42424444,
+ 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323640,
+ 1, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444,
+ 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303236,
+ 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404242,
+ 1, 1, 1, 0x00000e38, 0xffffffff, 0x26283034,
+ 1, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444,
+ 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303236,
+ 1, 1, 0, 0x00000e44, 0xffffffff, 0x42422426,
+ 1, 1, 1, 0x00000e48, 0xffffffff, 0x30343840,
+ 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628
+};
+
+u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
+ 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
+ 0, 0, 0, 0x00000c24, 0xffffffff, 0x36363838,
+ 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
+ 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363838,
+ 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
+ 0, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636,
+ 0, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
+ 0, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022,
+ 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343636,
+ 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032,
+ 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343636,
+ 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830,
+ 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636,
+ 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
+ 1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022
+};
+
+/* it seems not used
+u8 *RTL8821AE_TXPWR_LMT_ARRAY[] = {
+ "FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "01", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "02", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "02", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "02", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "03", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "03", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "03", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "04", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "04", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "04", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "05", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "05", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "05", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "06", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "06", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "06", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "07", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "07", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "07", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "08", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "08", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "08", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "09", "34",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "09", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "09", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "10", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "10", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "10", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "11", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "11", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "11", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "12", "63",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "12", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "12", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "13", "63",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "13", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "13", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "14", "63",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "14", "63",
+ "MKK", "2.4G", "20M", "CCK", "1T", "14", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "01", "30",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "01", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "01", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "02", "30",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "02", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "02", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "03", "30",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "03", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "03", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "04", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "04", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "04", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "05", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "05", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "05", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "06", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "06", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "06", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "07", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "07", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "07", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "08", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "08", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "08", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "09", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "09", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "09", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "10", "30",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "10", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "10", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "11", "30",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "11", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "11", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "12", "63",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "12", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "12", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "13", "63",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "13", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "13", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "14", "63",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "14", "63",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "14", "63",
+ "FCC", "2.4G", "20M", "HT", "1T", "01", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "01", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "01", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "02", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "02", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "02", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "03", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "03", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "03", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "04", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "04", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "04", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "05", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "05", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "05", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "06", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "06", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "06", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "07", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "07", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "07", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "08", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "08", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "08", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "09", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "09", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "09", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "10", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "10", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "10", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "11", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "11", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "11", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "12", "63",
+ "ETSI", "2.4G", "20M", "HT", "1T", "12", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "12", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "13", "63",
+ "ETSI", "2.4G", "20M", "HT", "1T", "13", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "13", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "14", "63",
+ "ETSI", "2.4G", "20M", "HT", "1T", "14", "63",
+ "MKK", "2.4G", "20M", "HT", "1T", "14", "63",
+ "FCC", "2.4G", "20M", "HT", "2T", "01", "30",
+ "ETSI", "2.4G", "20M", "HT", "2T", "01", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "01", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "02", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "02", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "02", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "03", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "03", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "03", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "04", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "04", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "04", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "05", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "05", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "05", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "06", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "06", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "06", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "07", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "07", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "07", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "08", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "08", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "08", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "09", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "09", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "09", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "10", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "10", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "10", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "11", "30",
+ "ETSI", "2.4G", "20M", "HT", "2T", "11", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "11", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "12", "63",
+ "ETSI", "2.4G", "20M", "HT", "2T", "12", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "12", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "13", "63",
+ "ETSI", "2.4G", "20M", "HT", "2T", "13", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "13", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "14", "63",
+ "ETSI", "2.4G", "20M", "HT", "2T", "14", "63",
+ "MKK", "2.4G", "20M", "HT", "2T", "14", "63",
+ "FCC", "2.4G", "40M", "HT", "1T", "01", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "01", "63",
+ "MKK", "2.4G", "40M", "HT", "1T", "01", "63",
+ "FCC", "2.4G", "40M", "HT", "1T", "02", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "02", "63",
+ "MKK", "2.4G", "40M", "HT", "1T", "02", "63",
+ "FCC", "2.4G", "40M", "HT", "1T", "03", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "03", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "03", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "04", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "04", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "04", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "05", "32",
+ "ETSI", "2.4G", "40M", "HT", "1T", "05", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "05", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "06", "32",
+ "ETSI", "2.4G", "40M", "HT", "1T", "06", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "06", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "07", "32",
+ "ETSI", "2.4G", "40M", "HT", "1T", "07", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "07", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "08", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "08", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "08", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "09", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "09", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "09", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "10", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "10", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "10", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "11", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "11", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "11", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "12", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "12", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "12", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "13", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "13", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "13", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "14", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "14", "63",
+ "MKK", "2.4G", "40M", "HT", "1T", "14", "63",
+ "FCC", "2.4G", "40M", "HT", "2T", "01", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "01", "63",
+ "MKK", "2.4G", "40M", "HT", "2T", "01", "63",
+ "FCC", "2.4G", "40M", "HT", "2T", "02", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "02", "63",
+ "MKK", "2.4G", "40M", "HT", "2T", "02", "63",
+ "FCC", "2.4G", "40M", "HT", "2T", "03", "30",
+ "ETSI", "2.4G", "40M", "HT", "2T", "03", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "03", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "04", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "04", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "04", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "05", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "05", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "05", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "06", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "06", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "06", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "07", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "07", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "07", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "08", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "08", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "08", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "09", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "09", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "09", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "10", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "10", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "10", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "11", "30",
+ "ETSI", "2.4G", "40M", "HT", "2T", "11", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "11", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "12", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "12", "32",
+ "MKK", "2.4G", "40M", "HT", "2T", "12", "32",
+ "FCC", "2.4G", "40M", "HT", "2T", "13", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "13", "32",
+ "MKK", "2.4G", "40M", "HT", "2T", "13", "32",
+ "FCC", "2.4G", "40M", "HT", "2T", "14", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "14", "63",
+ "MKK", "2.4G", "40M", "HT", "2T", "14", "63",
+ "FCC", "5G", "20M", "OFDM", "1T", "36", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "36", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "36", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "40", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "40", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "40", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "44", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "44", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "44", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "48", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "48", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "48", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "52", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "52", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "52", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "56", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "56", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "56", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "60", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "60", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "60", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "64", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "64", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "64", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "100", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "100", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "100", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "114", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "114", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "114", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "108", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "108", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "108", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "112", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "112", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "112", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "116", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "116", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "116", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "120", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "120", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "120", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "124", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "124", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "124", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "128", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "128", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "128", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "132", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "132", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "132", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "136", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "136", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "136", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "140", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "140", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "140", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "149", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "149", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "149", "63",
+ "FCC", "5G", "20M", "OFDM", "1T", "153", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "153", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "153", "63",
+ "FCC", "5G", "20M", "OFDM", "1T", "157", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "157", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "157", "63",
+ "FCC", "5G", "20M", "OFDM", "1T", "161", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "161", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "161", "63",
+ "FCC", "5G", "20M", "OFDM", "1T", "165", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "165", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "165", "63",
+ "FCC", "5G", "20M", "HT", "1T", "36", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "36", "30",
+ "MKK", "5G", "20M", "HT", "1T", "36", "30",
+ "FCC", "5G", "20M", "HT", "1T", "40", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "40", "30",
+ "MKK", "5G", "20M", "HT", "1T", "40", "30",
+ "FCC", "5G", "20M", "HT", "1T", "44", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "44", "30",
+ "MKK", "5G", "20M", "HT", "1T", "44", "30",
+ "FCC", "5G", "20M", "HT", "1T", "48", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "48", "30",
+ "MKK", "5G", "20M", "HT", "1T", "48", "30",
+ "FCC", "5G", "20M", "HT", "1T", "52", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "52", "30",
+ "MKK", "5G", "20M", "HT", "1T", "52", "30",
+ "FCC", "5G", "20M", "HT", "1T", "56", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "56", "30",
+ "MKK", "5G", "20M", "HT", "1T", "56", "30",
+ "FCC", "5G", "20M", "HT", "1T", "60", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "60", "30",
+ "MKK", "5G", "20M", "HT", "1T", "60", "30",
+ "FCC", "5G", "20M", "HT", "1T", "64", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "64", "30",
+ "MKK", "5G", "20M", "HT", "1T", "64", "30",
+ "FCC", "5G", "20M", "HT", "1T", "100", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "100", "30",
+ "MKK", "5G", "20M", "HT", "1T", "100", "30",
+ "FCC", "5G", "20M", "HT", "1T", "114", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "114", "30",
+ "MKK", "5G", "20M", "HT", "1T", "114", "30",
+ "FCC", "5G", "20M", "HT", "1T", "108", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "108", "30",
+ "MKK", "5G", "20M", "HT", "1T", "108", "30",
+ "FCC", "5G", "20M", "HT", "1T", "112", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "112", "30",
+ "MKK", "5G", "20M", "HT", "1T", "112", "30",
+ "FCC", "5G", "20M", "HT", "1T", "116", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "116", "30",
+ "MKK", "5G", "20M", "HT", "1T", "116", "30",
+ "FCC", "5G", "20M", "HT", "1T", "120", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "120", "30",
+ "MKK", "5G", "20M", "HT", "1T", "120", "30",
+ "FCC", "5G", "20M", "HT", "1T", "124", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "124", "30",
+ "MKK", "5G", "20M", "HT", "1T", "124", "30",
+ "FCC", "5G", "20M", "HT", "1T", "128", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "128", "30",
+ "MKK", "5G", "20M", "HT", "1T", "128", "30",
+ "FCC", "5G", "20M", "HT", "1T", "132", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "132", "30",
+ "MKK", "5G", "20M", "HT", "1T", "132", "30",
+ "FCC", "5G", "20M", "HT", "1T", "136", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "136", "30",
+ "MKK", "5G", "20M", "HT", "1T", "136", "30",
+ "FCC", "5G", "20M", "HT", "1T", "140", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "140", "30",
+ "MKK", "5G", "20M", "HT", "1T", "140", "30",
+ "FCC", "5G", "20M", "HT", "1T", "149", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "149", "30",
+ "MKK", "5G", "20M", "HT", "1T", "149", "63",
+ "FCC", "5G", "20M", "HT", "1T", "153", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "153", "30",
+ "MKK", "5G", "20M", "HT", "1T", "153", "63",
+ "FCC", "5G", "20M", "HT", "1T", "157", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "157", "30",
+ "MKK", "5G", "20M", "HT", "1T", "157", "63",
+ "FCC", "5G", "20M", "HT", "1T", "161", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "161", "30",
+ "MKK", "5G", "20M", "HT", "1T", "161", "63",
+ "FCC", "5G", "20M", "HT", "1T", "165", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "165", "30",
+ "MKK", "5G", "20M", "HT", "1T", "165", "63",
+ "FCC", "5G", "20M", "HT", "2T", "36", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "36", "30",
+ "MKK", "5G", "20M", "HT", "2T", "36", "30",
+ "FCC", "5G", "20M", "HT", "2T", "40", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "40", "30",
+ "MKK", "5G", "20M", "HT", "2T", "40", "30",
+ "FCC", "5G", "20M", "HT", "2T", "44", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "44", "30",
+ "MKK", "5G", "20M", "HT", "2T", "44", "30",
+ "FCC", "5G", "20M", "HT", "2T", "48", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "48", "30",
+ "MKK", "5G", "20M", "HT", "2T", "48", "30",
+ "FCC", "5G", "20M", "HT", "2T", "52", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "52", "30",
+ "MKK", "5G", "20M", "HT", "2T", "52", "30",
+ "FCC", "5G", "20M", "HT", "2T", "56", "32",
+ "ETSI", "5G", "20M", "HT", "2T", "56", "30",
+ "MKK", "5G", "20M", "HT", "2T", "56", "30",
+ "FCC", "5G", "20M", "HT", "2T", "60", "30",
+ "ETSI", "5G", "20M", "HT", "2T", "60", "30",
+ "MKK", "5G", "20M", "HT", "2T", "60", "30",
+ "FCC", "5G", "20M", "HT", "2T", "64", "26",
+ "ETSI", "5G", "20M", "HT", "2T", "64", "30",
+ "MKK", "5G", "20M", "HT", "2T", "64", "30",
+ "FCC", "5G", "20M", "HT", "2T", "100", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "100", "30",
+ "MKK", "5G", "20M", "HT", "2T", "100", "30",
+ "FCC", "5G", "20M", "HT", "2T", "114", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "114", "30",
+ "MKK", "5G", "20M", "HT", "2T", "114", "30",
+ "FCC", "5G", "20M", "HT", "2T", "108", "30",
+ "ETSI", "5G", "20M", "HT", "2T", "108", "30",
+ "MKK", "5G", "20M", "HT", "2T", "108", "30",
+ "FCC", "5G", "20M", "HT", "2T", "112", "32",
+ "ETSI", "5G", "20M", "HT", "2T", "112", "30",
+ "MKK", "5G", "20M", "HT", "2T", "112", "30",
+ "FCC", "5G", "20M", "HT", "2T", "116", "32",
+ "ETSI", "5G", "20M", "HT", "2T", "116", "30",
+ "MKK", "5G", "20M", "HT", "2T", "116", "30",
+ "FCC", "5G", "20M", "HT", "2T", "120", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "120", "30",
+ "MKK", "5G", "20M", "HT", "2T", "120", "30",
+ "FCC", "5G", "20M", "HT", "2T", "124", "32",
+ "ETSI", "5G", "20M", "HT", "2T", "124", "30",
+ "MKK", "5G", "20M", "HT", "2T", "124", "30",
+ "FCC", "5G", "20M", "HT", "2T", "128", "30",
+ "ETSI", "5G", "20M", "HT", "2T", "128", "30",
+ "MKK", "5G", "20M", "HT", "2T", "128", "30",
+ "FCC", "5G", "20M", "HT", "2T", "132", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "132", "30",
+ "MKK", "5G", "20M", "HT", "2T", "132", "30",
+ "FCC", "5G", "20M", "HT", "2T", "136", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "136", "30",
+ "MKK", "5G", "20M", "HT", "2T", "136", "30",
+ "FCC", "5G", "20M", "HT", "2T", "140", "26",
+ "ETSI", "5G", "20M", "HT", "2T", "140", "30",
+ "MKK", "5G", "20M", "HT", "2T", "140", "30",
+ "FCC", "5G", "20M", "HT", "2T", "149", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "149", "30",
+ "MKK", "5G", "20M", "HT", "2T", "149", "63",
+ "FCC", "5G", "20M", "HT", "2T", "153", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "153", "30",
+ "MKK", "5G", "20M", "HT", "2T", "153", "63",
+ "FCC", "5G", "20M", "HT", "2T", "157", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "157", "30",
+ "MKK", "5G", "20M", "HT", "2T", "157", "63",
+ "FCC", "5G", "20M", "HT", "2T", "161", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "161", "30",
+ "MKK", "5G", "20M", "HT", "2T", "161", "63",
+ "FCC", "5G", "20M", "HT", "2T", "165", "34",
+ "ETSI", "5G", "20M", "HT", "2T", "165", "30",
+ "MKK", "5G", "20M", "HT", "2T", "165", "63",
+ "FCC", "5G", "40M", "HT", "1T", "38", "26",
+ "ETSI", "5G", "40M", "HT", "1T", "38", "30",
+ "MKK", "5G", "40M", "HT", "1T", "38", "30",
+ "FCC", "5G", "40M", "HT", "1T", "46", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "46", "30",
+ "MKK", "5G", "40M", "HT", "1T", "46", "30",
+ "FCC", "5G", "40M", "HT", "1T", "54", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "54", "30",
+ "MKK", "5G", "40M", "HT", "1T", "54", "30",
+ "FCC", "5G", "40M", "HT", "1T", "62", "26",
+ "ETSI", "5G", "40M", "HT", "1T", "62", "30",
+ "MKK", "5G", "40M", "HT", "1T", "62", "30",
+ "FCC", "5G", "40M", "HT", "1T", "102", "24",
+ "ETSI", "5G", "40M", "HT", "1T", "102", "30",
+ "MKK", "5G", "40M", "HT", "1T", "102", "30",
+ "FCC", "5G", "40M", "HT", "1T", "110", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "110", "30",
+ "MKK", "5G", "40M", "HT", "1T", "110", "30",
+ "FCC", "5G", "40M", "HT", "1T", "118", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "118", "30",
+ "MKK", "5G", "40M", "HT", "1T", "118", "30",
+ "FCC", "5G", "40M", "HT", "1T", "126", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "126", "30",
+ "MKK", "5G", "40M", "HT", "1T", "126", "30",
+ "FCC", "5G", "40M", "HT", "1T", "134", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "134", "30",
+ "MKK", "5G", "40M", "HT", "1T", "134", "30",
+ "FCC", "5G", "40M", "HT", "1T", "151", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "151", "30",
+ "MKK", "5G", "40M", "HT", "1T", "151", "63",
+ "FCC", "5G", "40M", "HT", "1T", "159", "30",
+ "ETSI", "5G", "40M", "HT", "1T", "159", "30",
+ "MKK", "5G", "40M", "HT", "1T", "159", "63",
+ "FCC", "5G", "40M", "HT", "2T", "38", "28",
+ "ETSI", "5G", "40M", "HT", "2T", "38", "30",
+ "MKK", "5G", "40M", "HT", "2T", "38", "30",
+ "FCC", "5G", "40M", "HT", "2T", "46", "28",
+ "ETSI", "5G", "40M", "HT", "2T", "46", "30",
+ "MKK", "5G", "40M", "HT", "2T", "46", "30",
+ "FCC", "5G", "40M", "HT", "2T", "54", "30",
+ "ETSI", "5G", "40M", "HT", "2T", "54", "30",
+ "MKK", "5G", "40M", "HT", "2T", "54", "30",
+ "FCC", "5G", "40M", "HT", "2T", "62", "30",
+ "ETSI", "5G", "40M", "HT", "2T", "62", "30",
+ "MKK", "5G", "40M", "HT", "2T", "62", "30",
+ "FCC", "5G", "40M", "HT", "2T", "102", "26",
+ "ETSI", "5G", "40M", "HT", "2T", "102", "30",
+ "MKK", "5G", "40M", "HT", "2T", "102", "30",
+ "FCC", "5G", "40M", "HT", "2T", "110", "30",
+ "ETSI", "5G", "40M", "HT", "2T", "110", "30",
+ "MKK", "5G", "40M", "HT", "2T", "110", "30",
+ "FCC", "5G", "40M", "HT", "2T", "118", "34",
+ "ETSI", "5G", "40M", "HT", "2T", "118", "30",
+ "MKK", "5G", "40M", "HT", "2T", "118", "30",
+ "FCC", "5G", "40M", "HT", "2T", "126", "32",
+ "ETSI", "5G", "40M", "HT", "2T", "126", "30",
+ "MKK", "5G", "40M", "HT", "2T", "126", "30",
+ "FCC", "5G", "40M", "HT", "2T", "134", "30",
+ "ETSI", "5G", "40M", "HT", "2T", "134", "30",
+ "MKK", "5G", "40M", "HT", "2T", "134", "30",
+ "FCC", "5G", "40M", "HT", "2T", "151", "34",
+ "ETSI", "5G", "40M", "HT", "2T", "151", "30",
+ "MKK", "5G", "40M", "HT", "2T", "151", "63",
+ "FCC", "5G", "40M", "HT", "2T", "159", "34",
+ "ETSI", "5G", "40M", "HT", "2T", "159", "30",
+ "MKK", "5G", "40M", "HT", "2T", "159", "63",
+ "FCC", "5G", "80M", "VHT", "1T", "42", "22",
+ "ETSI", "5G", "80M", "VHT", "1T", "42", "30",
+ "MKK", "5G", "80M", "VHT", "1T", "42", "30",
+ "FCC", "5G", "80M", "VHT", "1T", "58", "20",
+ "ETSI", "5G", "80M", "VHT", "1T", "58", "30",
+ "MKK", "5G", "80M", "VHT", "1T", "58", "30",
+ "FCC", "5G", "80M", "VHT", "1T", "106", "20",
+ "ETSI", "5G", "80M", "VHT", "1T", "106", "30",
+ "MKK", "5G", "80M", "VHT", "1T", "106", "30",
+ "FCC", "5G", "80M", "VHT", "1T", "122", "28",
+ "ETSI", "5G", "80M", "VHT", "1T", "122", "30",
+ "MKK", "5G", "80M", "VHT", "1T", "122", "30",
+ "FCC", "5G", "80M", "VHT", "1T", "155", "30",
+ "ETSI", "5G", "80M", "VHT", "1T", "155", "30",
+ "MKK", "5G", "80M", "VHT", "1T", "155", "63",
+ "FCC", "5G", "80M", "VHT", "2T", "42", "28",
+ "ETSI", "5G", "80M", "VHT", "2T", "42", "30",
+ "MKK", "5G", "80M", "VHT", "2T", "42", "30",
+ "FCC", "5G", "80M", "VHT", "2T", "58", "26",
+ "ETSI", "5G", "80M", "VHT", "2T", "58", "30",
+ "MKK", "5G", "80M", "VHT", "2T", "58", "30",
+ "FCC", "5G", "80M", "VHT", "2T", "106", "28",
+ "ETSI", "5G", "80M", "VHT", "2T", "106", "30",
+ "MKK", "5G", "80M", "VHT", "2T", "106", "30",
+ "FCC", "5G", "80M", "VHT", "2T", "122", "32",
+ "ETSI", "5G", "80M", "VHT", "2T", "122", "30",
+ "MKK", "5G", "80M", "VHT", "2T", "122", "30",
+ "FCC", "5G", "80M", "VHT", "2T", "155", "34",
+ "ETSI", "5G", "80M", "VHT", "2T", "155", "30",
+ "MKK", "5G", "80M", "VHT", "2T", "155", "63"
+};*/
+
+u32 RTL8812AE_RADIOA_ARRAY[] = {
+ 0x000, 0x00010000,
+ 0x018, 0x0001712A,
+ 0x056, 0x00051CF2,
+ 0x066, 0x00040000,
+ 0x01E, 0x00080000,
+ 0x089, 0x00000080,
+ 0xFF0F0740, 0xABCD,
+ 0x086, 0x00014B38,
+ 0xFF0F02C0, 0xCDEF,
+ 0x086, 0x00014B38,
+ 0xFF0F01C0, 0xCDEF,
+ 0x086, 0x00014B38,
+ 0xFF0F07D8, 0xCDEF,
+ 0x086, 0x00014B3A,
+ 0xFF0F07D0, 0xCDEF,
+ 0x086, 0x00014B3A,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x086, 0x00014B38,
+ 0xFF0F0740, 0xDEAD,
+ 0x0B1, 0x0001FC1A,
+ 0x0B3, 0x000F0810,
+ 0x0B4, 0x0001A78D,
+ 0x0BA, 0x00086180,
+ 0x018, 0x00000006,
+ 0x0EF, 0x00002000,
+ 0xFF0F07D8, 0xABCD,
+ 0x03B, 0x0003F218,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xFF0F07D0, 0xCDEF,
+ 0x03B, 0x0003F218,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03B, 0x00038A58,
+ 0x03B, 0x00037A58,
+ 0x03B, 0x0002A590,
+ 0x03B, 0x00027A50,
+ 0x03B, 0x00018248,
+ 0x03B, 0x00010240,
+ 0x03B, 0x00008240,
+ 0xFF0F07D8, 0xDEAD,
+ 0x0EF, 0x00000100,
+ 0xFF0F07D8, 0xABCD,
+ 0x034, 0x0000A4EE,
+ 0x034, 0x00009076,
+ 0x034, 0x00008073,
+ 0x034, 0x00007070,
+ 0x034, 0x0000606D,
+ 0x034, 0x0000506A,
+ 0x034, 0x00004049,
+ 0x034, 0x00003046,
+ 0x034, 0x00002028,
+ 0x034, 0x00001025,
+ 0x034, 0x00000022,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000ADF4,
+ 0x034, 0x00009DF1,
+ 0x034, 0x00008DEE,
+ 0x034, 0x00007DEB,
+ 0x034, 0x00006DE8,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146B,
+ 0x034, 0x0000006D,
+ 0xFF0F07D8, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x0EF, 0x00000002,
+ 0x008, 0x00008400,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x0003A02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x0003202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x0002B064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x00023070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0001B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00012085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0000A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00002080,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x0007A02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x0007202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x0006B064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x00023070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0005B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00052085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0004A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00042080,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x000BA02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x000B202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x000AB064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x000A3070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0009B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00092085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0008A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00082080,
+ 0x03C, 0x00010000,
+ 0x0EF, 0x00001100,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0004ADF5,
+ 0x034, 0x00049DF2,
+ 0x034, 0x00048DEF,
+ 0x034, 0x00047DEC,
+ 0x034, 0x00046DE9,
+ 0x034, 0x00045DC9,
+ 0x034, 0x00044CE8,
+ 0x034, 0x000438CA,
+ 0x034, 0x00042889,
+ 0x034, 0x0004184A,
+ 0x034, 0x0004044A,
+ 0xFF0F0740, 0xDEAD,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0002ADF5,
+ 0x034, 0x00029DF2,
+ 0x034, 0x00028DEF,
+ 0x034, 0x00027DEC,
+ 0x034, 0x00026DE9,
+ 0x034, 0x00025DC9,
+ 0x034, 0x00024CE8,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x0002184A,
+ 0x034, 0x0002044A,
+ 0xFF0F0740, 0xDEAD,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000AFF7,
+ 0x034, 0x00009DF7,
+ 0x034, 0x00008DF4,
+ 0x034, 0x00007DF1,
+ 0x034, 0x00006DEE,
+ 0x034, 0x00005DCD,
+ 0x034, 0x00004CEB,
+ 0x034, 0x000038CC,
+ 0x034, 0x0000288B,
+ 0x034, 0x0000184C,
+ 0x034, 0x0000044C,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0xFF0F0740, 0xABCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001D4,
+ 0x035, 0x000081D4,
+ 0x035, 0x000101D4,
+ 0x035, 0x000201B4,
+ 0x035, 0x000281B4,
+ 0x035, 0x000301B4,
+ 0x035, 0x000401B4,
+ 0x035, 0x000481B4,
+ 0x035, 0x000501B4,
+ 0xFF0F02C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001D4,
+ 0x035, 0x000081D4,
+ 0x035, 0x000101D4,
+ 0x035, 0x000201B4,
+ 0x035, 0x000281B4,
+ 0x035, 0x000301B4,
+ 0x035, 0x000401B4,
+ 0x035, 0x000481B4,
+ 0x035, 0x000501B4,
+ 0xFF0F01C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001D4,
+ 0x035, 0x000081D4,
+ 0x035, 0x000101D4,
+ 0x035, 0x000201B4,
+ 0x035, 0x000281B4,
+ 0x035, 0x000301B4,
+ 0x035, 0x000401B4,
+ 0x035, 0x000481B4,
+ 0x035, 0x000501B4,
+ 0xFF0F07D8, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001D4,
+ 0x035, 0x000081D4,
+ 0x035, 0x000101D4,
+ 0x035, 0x000201B4,
+ 0x035, 0x000281B4,
+ 0x035, 0x000301B4,
+ 0x035, 0x000401B4,
+ 0x035, 0x000481B4,
+ 0x035, 0x000501B4,
+ 0xFF0F07D0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001D4,
+ 0x035, 0x000081D4,
+ 0x035, 0x000101D4,
+ 0x035, 0x000201B4,
+ 0x035, 0x000281B4,
+ 0x035, 0x000301B4,
+ 0x035, 0x000401B4,
+ 0x035, 0x000481B4,
+ 0x035, 0x000501B4,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x00000188,
+ 0x035, 0x00008147,
+ 0x035, 0x00010147,
+ 0x035, 0x000201D7,
+ 0x035, 0x000281D7,
+ 0x035, 0x000301D7,
+ 0x035, 0x000401D8,
+ 0x035, 0x000481D8,
+ 0x035, 0x000501D8,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0xFF0F0740, 0xABCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00004BFB,
+ 0x036, 0x0000CBFB,
+ 0x036, 0x00014BFB,
+ 0x036, 0x0001CBFB,
+ 0x036, 0x00024F4B,
+ 0x036, 0x0002CF4B,
+ 0x036, 0x00034F4B,
+ 0x036, 0x0003CF4B,
+ 0x036, 0x00044F4B,
+ 0x036, 0x0004CF4B,
+ 0x036, 0x00054F4B,
+ 0x036, 0x0005CF4B,
+ 0xFF0F02C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00004BFB,
+ 0x036, 0x0000CBFB,
+ 0x036, 0x00014BFB,
+ 0x036, 0x0001CBFB,
+ 0x036, 0x00024F4B,
+ 0x036, 0x0002CF4B,
+ 0x036, 0x00034F4B,
+ 0x036, 0x0003CF4B,
+ 0x036, 0x00044F4B,
+ 0x036, 0x0004CF4B,
+ 0x036, 0x00054F4B,
+ 0x036, 0x0005CF4B,
+ 0xFF0F01C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00004BFB,
+ 0x036, 0x0000CBFB,
+ 0x036, 0x00014BFB,
+ 0x036, 0x0001CBFB,
+ 0x036, 0x00024F4B,
+ 0x036, 0x0002CF4B,
+ 0x036, 0x00034F4B,
+ 0x036, 0x0003CF4B,
+ 0x036, 0x00044F4B,
+ 0x036, 0x0004CF4B,
+ 0x036, 0x00054F4B,
+ 0x036, 0x0005CF4B,
+ 0xFF0F07D8, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00004BFB,
+ 0x036, 0x0000CBFB,
+ 0x036, 0x00014BFB,
+ 0x036, 0x0001CBFB,
+ 0x036, 0x00024F4B,
+ 0x036, 0x0002CF4B,
+ 0x036, 0x00034F4B,
+ 0x036, 0x0003CF4B,
+ 0x036, 0x00044F4B,
+ 0x036, 0x0004CF4B,
+ 0x036, 0x00054F4B,
+ 0x036, 0x0005CF4B,
+ 0xFF0F07D0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00004BFB,
+ 0x036, 0x0000CBFB,
+ 0x036, 0x00014BFB,
+ 0x036, 0x0001CBFB,
+ 0x036, 0x00024F4B,
+ 0x036, 0x0002CF4B,
+ 0x036, 0x00034F4B,
+ 0x036, 0x0003CF4B,
+ 0x036, 0x00044F4B,
+ 0x036, 0x0004CF4B,
+ 0x036, 0x00054F4B,
+ 0x036, 0x0005CF4B,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00084EB4,
+ 0x036, 0x0008CC35,
+ 0x036, 0x00094C35,
+ 0x036, 0x0009CC35,
+ 0x036, 0x000A4935,
+ 0x036, 0x000ACC35,
+ 0x036, 0x000B4C35,
+ 0x036, 0x000BCC35,
+ 0x036, 0x000C4EB4,
+ 0x036, 0x000CCEB5,
+ 0x036, 0x000D4EB5,
+ 0x036, 0x000DCEB5,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0xFF0F0740, 0xABCD,
+ 0x03C, 0x000002CC,
+ 0x03C, 0x00000522,
+ 0x03C, 0x00000902,
+ 0xFF0F02C0, 0xCDEF,
+ 0x03C, 0x000002CC,
+ 0x03C, 0x00000522,
+ 0x03C, 0x00000902,
+ 0xFF0F01C0, 0xCDEF,
+ 0x03C, 0x000002CC,
+ 0x03C, 0x00000522,
+ 0x03C, 0x00000902,
+ 0xFF0F07D8, 0xCDEF,
+ 0x03C, 0x000002CC,
+ 0x03C, 0x00000522,
+ 0x03C, 0x00000902,
+ 0xFF0F07D0, 0xCDEF,
+ 0x03C, 0x000002CC,
+ 0x03C, 0x00000522,
+ 0x03C, 0x00000902,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03C, 0x000002A8,
+ 0x03C, 0x000005A2,
+ 0x03C, 0x00000880,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000002,
+ 0x0DF, 0x00000080,
+ 0x01F, 0x00040064,
+ 0xFF0F0740, 0xABCD,
+ 0x061, 0x000FDD43,
+ 0x062, 0x00038F4B,
+ 0x063, 0x00032117,
+ 0x064, 0x000194AC,
+ 0x065, 0x000931D1,
+ 0xFF0F02C0, 0xCDEF,
+ 0x061, 0x000FDD43,
+ 0x062, 0x00038F4B,
+ 0x063, 0x00032117,
+ 0x064, 0x000194AC,
+ 0x065, 0x000931D1,
+ 0xFF0F01C0, 0xCDEF,
+ 0x061, 0x000FDD43,
+ 0x062, 0x00038F4B,
+ 0x063, 0x00032117,
+ 0x064, 0x000194AC,
+ 0x065, 0x000931D1,
+ 0xFF0F07D8, 0xCDEF,
+ 0x061, 0x000FDD43,
+ 0x062, 0x00038F4B,
+ 0x063, 0x00032117,
+ 0x064, 0x000194AC,
+ 0x065, 0x000931D1,
+ 0xFF0F07D0, 0xCDEF,
+ 0x061, 0x000FDD43,
+ 0x062, 0x00038F4B,
+ 0x063, 0x00032117,
+ 0x064, 0x000194AC,
+ 0x065, 0x000931D1,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x061, 0x000E5D53,
+ 0x062, 0x00038FCD,
+ 0x063, 0x000314EB,
+ 0x064, 0x000196AC,
+ 0x065, 0x000911D7,
+ 0xFF0F0740, 0xDEAD,
+ 0x008, 0x00008400,
+ 0x01C, 0x000739D2,
+ 0x0B4, 0x0001E78D,
+ 0x018, 0x0001F12A,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0B4, 0x0001A78D,
+ 0x018, 0x0001712A,
+};
+
+u32 RTL8812AE_RADIOB_ARRAY[] = {
+ 0x056, 0x00051CF2,
+ 0x066, 0x00040000,
+ 0x089, 0x00000080,
+ 0xFF0F0740, 0xABCD,
+ 0x086, 0x00014B38,
+ 0xFF0F01C0, 0xCDEF,
+ 0x086, 0x00014B38,
+ 0xFF0F02C0, 0xCDEF,
+ 0x086, 0x00014B38,
+ 0xFF0F07D8, 0xCDEF,
+ 0x086, 0x00014B3A,
+ 0xFF0F07D0, 0xCDEF,
+ 0x086, 0x00014B3A,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x086, 0x00014B38,
+ 0xFF0F0740, 0xDEAD,
+ 0x018, 0x00000006,
+ 0x0EF, 0x00002000,
+ 0xFF0F07D8, 0xABCD,
+ 0x03B, 0x0003F218,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xFF0F07D0, 0xCDEF,
+ 0x03B, 0x0003F218,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03B, 0x00038A58,
+ 0x03B, 0x00037A58,
+ 0x03B, 0x0002A590,
+ 0x03B, 0x00027A50,
+ 0x03B, 0x00018248,
+ 0x03B, 0x00010240,
+ 0x03B, 0x00008240,
+ 0xFF0F07D8, 0xDEAD,
+ 0x0EF, 0x00000100,
+ 0xFF0F07D8, 0xABCD,
+ 0x034, 0x0000A4EE,
+ 0x034, 0x00009076,
+ 0x034, 0x00008073,
+ 0x034, 0x00007070,
+ 0x034, 0x0000606D,
+ 0x034, 0x0000506A,
+ 0x034, 0x00004049,
+ 0x034, 0x00003046,
+ 0x034, 0x00002028,
+ 0x034, 0x00001025,
+ 0x034, 0x00000022,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000ADF4,
+ 0x034, 0x00009DF1,
+ 0x034, 0x00008DEE,
+ 0x034, 0x00007DEB,
+ 0x034, 0x00006DE8,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146B,
+ 0x034, 0x0000006D,
+ 0xFF0F07D8, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x0EF, 0x00000002,
+ 0x008, 0x00008400,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x0003A02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x0003202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x0002B064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x00023070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0001B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00012085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0000A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00002080,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x0007A02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x0007202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x0006B064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x00063070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0005B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00052085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0004A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00042080,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000080,
+ 0x03B, 0x000BA02C,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000400,
+ 0x03B, 0x000B202C,
+ 0x03C, 0x00010000,
+ 0x03A, 0x000000A0,
+ 0x03B, 0x000AB064,
+ 0x03C, 0x00004000,
+ 0x03A, 0x000000D8,
+ 0x03B, 0x000A3070,
+ 0x03C, 0x00004000,
+ 0x03A, 0x00000468,
+ 0x03B, 0x0009B870,
+ 0x03C, 0x00010000,
+ 0x03A, 0x00000098,
+ 0x03B, 0x00092085,
+ 0x03C, 0x000E4000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x0008A080,
+ 0x03C, 0x000F0000,
+ 0x03A, 0x00000418,
+ 0x03B, 0x00082080,
+ 0x03C, 0x00010000,
+ 0x0EF, 0x00001100,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0004A0B2,
+ 0x034, 0x000490AF,
+ 0x034, 0x00048070,
+ 0x034, 0x0004706D,
+ 0x034, 0x00046050,
+ 0x034, 0x0004504D,
+ 0x034, 0x0004404A,
+ 0x034, 0x00043047,
+ 0x034, 0x0004200A,
+ 0x034, 0x00041007,
+ 0x034, 0x00040004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0004ADF5,
+ 0x034, 0x00049DF2,
+ 0x034, 0x00048DEF,
+ 0x034, 0x00047DEC,
+ 0x034, 0x00046DE9,
+ 0x034, 0x00045DC9,
+ 0x034, 0x00044CE8,
+ 0x034, 0x000438CA,
+ 0x034, 0x00042889,
+ 0x034, 0x0004184A,
+ 0x034, 0x0004044A,
+ 0xFF0F0740, 0xDEAD,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0002A0B2,
+ 0x034, 0x000290AF,
+ 0x034, 0x00028070,
+ 0x034, 0x0002706D,
+ 0x034, 0x00026050,
+ 0x034, 0x0002504D,
+ 0x034, 0x0002404A,
+ 0x034, 0x00023047,
+ 0x034, 0x0002200A,
+ 0x034, 0x00021007,
+ 0x034, 0x00020004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0002ADF5,
+ 0x034, 0x00029DF2,
+ 0x034, 0x00028DEF,
+ 0x034, 0x00027DEC,
+ 0x034, 0x00026DE9,
+ 0x034, 0x00025DC9,
+ 0x034, 0x00024CE8,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x0002184A,
+ 0x034, 0x0002044A,
+ 0xFF0F0740, 0xDEAD,
+ 0xFF0F0740, 0xABCD,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F01C0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x0000A0B2,
+ 0x034, 0x000090AF,
+ 0x034, 0x00008070,
+ 0x034, 0x0000706D,
+ 0x034, 0x00006050,
+ 0x034, 0x0000504D,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x0000200A,
+ 0x034, 0x00001007,
+ 0x034, 0x00000004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000AFF7,
+ 0x034, 0x00009DF7,
+ 0x034, 0x00008DF4,
+ 0x034, 0x00007DF1,
+ 0x034, 0x00006DEE,
+ 0x034, 0x00005DCD,
+ 0x034, 0x00004CEB,
+ 0x034, 0x000038CC,
+ 0x034, 0x0000288B,
+ 0x034, 0x0000184C,
+ 0x034, 0x0000044C,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0xFF0F0740, 0xABCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001C5,
+ 0x035, 0x000081C5,
+ 0x035, 0x000101C5,
+ 0x035, 0x00020174,
+ 0x035, 0x00028174,
+ 0x035, 0x00030174,
+ 0x035, 0x00040185,
+ 0x035, 0x00048185,
+ 0x035, 0x00050185,
+ 0x0EF, 0x00000000,
+ 0xFF0F01C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001C5,
+ 0x035, 0x000081C5,
+ 0x035, 0x000101C5,
+ 0x035, 0x00020174,
+ 0x035, 0x00028174,
+ 0x035, 0x00030174,
+ 0x035, 0x00040185,
+ 0x035, 0x00048185,
+ 0x035, 0x00050185,
+ 0x0EF, 0x00000000,
+ 0xFF0F02C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001C5,
+ 0x035, 0x000081C5,
+ 0x035, 0x000101C5,
+ 0x035, 0x00020174,
+ 0x035, 0x00028174,
+ 0x035, 0x00030174,
+ 0x035, 0x00040185,
+ 0x035, 0x00048185,
+ 0x035, 0x00050185,
+ 0x0EF, 0x00000000,
+ 0xFF0F07D8, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001C5,
+ 0x035, 0x000081C5,
+ 0x035, 0x000101C5,
+ 0x035, 0x00020174,
+ 0x035, 0x00028174,
+ 0x035, 0x00030174,
+ 0x035, 0x00040185,
+ 0x035, 0x00048185,
+ 0x035, 0x00050185,
+ 0x0EF, 0x00000000,
+ 0xFF0F07D0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x000001C5,
+ 0x035, 0x000081C5,
+ 0x035, 0x000101C5,
+ 0x035, 0x00020174,
+ 0x035, 0x00028174,
+ 0x035, 0x00030174,
+ 0x035, 0x00040185,
+ 0x035, 0x00048185,
+ 0x035, 0x00050185,
+ 0x0EF, 0x00000000,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x035, 0x00000186,
+ 0x035, 0x00008186,
+ 0x035, 0x00010185,
+ 0x035, 0x000201D5,
+ 0x035, 0x000281D5,
+ 0x035, 0x000301D5,
+ 0x035, 0x000401D5,
+ 0x035, 0x000481D5,
+ 0x035, 0x000501D5,
+ 0x0EF, 0x00000000,
+ 0xFF0F0740, 0xDEAD,
+ 0xFF0F0740, 0xABCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00005B8B,
+ 0x036, 0x0000DB8B,
+ 0x036, 0x00015B8B,
+ 0x036, 0x0001DB8B,
+ 0x036, 0x000262DB,
+ 0x036, 0x0002E2DB,
+ 0x036, 0x000362DB,
+ 0x036, 0x0003E2DB,
+ 0x036, 0x0004553B,
+ 0x036, 0x0004D53B,
+ 0x036, 0x0005553B,
+ 0x036, 0x0005D53B,
+ 0xFF0F01C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00005B8B,
+ 0x036, 0x0000DB8B,
+ 0x036, 0x00015B8B,
+ 0x036, 0x0001DB8B,
+ 0x036, 0x000262DB,
+ 0x036, 0x0002E2DB,
+ 0x036, 0x000362DB,
+ 0x036, 0x0003E2DB,
+ 0x036, 0x0004553B,
+ 0x036, 0x0004D53B,
+ 0x036, 0x0005553B,
+ 0x036, 0x0005D53B,
+ 0xFF0F02C0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00005B8B,
+ 0x036, 0x0000DB8B,
+ 0x036, 0x00015B8B,
+ 0x036, 0x0001DB8B,
+ 0x036, 0x000262DB,
+ 0x036, 0x0002E2DB,
+ 0x036, 0x000362DB,
+ 0x036, 0x0003E2DB,
+ 0x036, 0x0004553B,
+ 0x036, 0x0004D53B,
+ 0x036, 0x0005553B,
+ 0x036, 0x0005D53B,
+ 0xFF0F07D8, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00005B8B,
+ 0x036, 0x0000DB8B,
+ 0x036, 0x00015B8B,
+ 0x036, 0x0001DB8B,
+ 0x036, 0x000262DB,
+ 0x036, 0x0002E2DB,
+ 0x036, 0x000362DB,
+ 0x036, 0x0003E2DB,
+ 0x036, 0x0004553B,
+ 0x036, 0x0004D53B,
+ 0x036, 0x0005553B,
+ 0x036, 0x0005D53B,
+ 0xFF0F07D0, 0xCDEF,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00005B8B,
+ 0x036, 0x0000DB8B,
+ 0x036, 0x00015B8B,
+ 0x036, 0x0001DB8B,
+ 0x036, 0x000262DB,
+ 0x036, 0x0002E2DB,
+ 0x036, 0x000362DB,
+ 0x036, 0x0003E2DB,
+ 0x036, 0x0004553B,
+ 0x036, 0x0004D53B,
+ 0x036, 0x0005553B,
+ 0x036, 0x0005D53B,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x036, 0x00084EB4,
+ 0x036, 0x0008C9B4,
+ 0x036, 0x000949B4,
+ 0x036, 0x0009C9B4,
+ 0x036, 0x000A4935,
+ 0x036, 0x000AC935,
+ 0x036, 0x000B4935,
+ 0x036, 0x000BC935,
+ 0x036, 0x000C4EB4,
+ 0x036, 0x000CCEB4,
+ 0x036, 0x000D4EB4,
+ 0x036, 0x000DCEB4,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0xFF0F0740, 0xABCD,
+ 0x03C, 0x000002DC,
+ 0x03C, 0x00000524,
+ 0x03C, 0x00000902,
+ 0xFF0F01C0, 0xCDEF,
+ 0x03C, 0x000002DC,
+ 0x03C, 0x00000524,
+ 0x03C, 0x00000902,
+ 0xFF0F02C0, 0xCDEF,
+ 0x03C, 0x000002DC,
+ 0x03C, 0x00000524,
+ 0x03C, 0x00000902,
+ 0xFF0F07D8, 0xCDEF,
+ 0x03C, 0x000002DC,
+ 0x03C, 0x00000524,
+ 0x03C, 0x00000902,
+ 0xFF0F07D0, 0xCDEF,
+ 0x03C, 0x000002DC,
+ 0x03C, 0x00000524,
+ 0x03C, 0x00000902,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03C, 0x000002AA,
+ 0x03C, 0x000005A2,
+ 0x03C, 0x00000880,
+ 0xFF0F0740, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000002,
+ 0x0DF, 0x00000080,
+ 0xFF0F0740, 0xABCD,
+ 0x061, 0x000EAC43,
+ 0x062, 0x00038F47,
+ 0x063, 0x00031157,
+ 0x064, 0x0001C4AC,
+ 0x065, 0x000931D1,
+ 0xFF0F01C0, 0xCDEF,
+ 0x061, 0x000EAC43,
+ 0x062, 0x00038F47,
+ 0x063, 0x00031157,
+ 0x064, 0x0001C4AC,
+ 0x065, 0x000931D1,
+ 0xFF0F02C0, 0xCDEF,
+ 0x061, 0x000EAC43,
+ 0x062, 0x00038F47,
+ 0x063, 0x00031157,
+ 0x064, 0x0001C4AC,
+ 0x065, 0x000931D1,
+ 0xFF0F07D8, 0xCDEF,
+ 0x061, 0x000EAC43,
+ 0x062, 0x00038F47,
+ 0x063, 0x00031157,
+ 0x064, 0x0001C4AC,
+ 0x065, 0x000931D1,
+ 0xFF0F07D0, 0xCDEF,
+ 0x061, 0x000EAC43,
+ 0x062, 0x00038F47,
+ 0x063, 0x00031157,
+ 0x064, 0x0001C4AC,
+ 0x065, 0x000931D1,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x061, 0x000E5D53,
+ 0x062, 0x00038FCD,
+ 0x063, 0x000314EB,
+ 0x064, 0x000196AC,
+ 0x065, 0x000931D7,
+ 0xFF0F0740, 0xDEAD,
+ 0x008, 0x00008400,
+};
+
+u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 0x018, 0x0001712A,
+ 0x056, 0x00051CF2,
+ 0x066, 0x00040000,
+ 0x000, 0x00010000,
+ 0x01E, 0x00080000,
+ 0x082, 0x00000830,
+ 0x083, 0x00021800,
+ 0x084, 0x00028000,
+ 0x085, 0x00048000,
+ 0x086, 0x00094838,
+ 0x087, 0x00044980,
+ 0x088, 0x00048000,
+ 0x089, 0x0000D480,
+ 0x08A, 0x00042240,
+ 0x08B, 0x000F0380,
+ 0x08C, 0x00090000,
+ 0x08D, 0x00022852,
+ 0x08E, 0x00065540,
+ 0x08F, 0x00088001,
+ 0x0EF, 0x00020000,
+ 0x03E, 0x00000380,
+ 0x03F, 0x00090018,
+ 0x03E, 0x00020380,
+ 0x03F, 0x000A0018,
+ 0x03E, 0x00040308,
+ 0x03F, 0x000A0018,
+ 0x03E, 0x00060018,
+ 0x03F, 0x000A0018,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x089, 0x00000080,
+ 0x08B, 0x00080180,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x00038027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x00030113,
+ 0x03C, 0x00082000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x00028027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x000000CC,
+ 0x03B, 0x00027027,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x0001F913,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000010C,
+ 0x03B, 0x00017F10,
+ 0x03C, 0x00012000,
+ 0x03A, 0x000000D0,
+ 0x03B, 0x00008027,
+ 0x03C, 0x000CA000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x00078027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x00070113,
+ 0x03C, 0x00082000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x00068027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x000000CC,
+ 0x03B, 0x00067027,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x0005F913,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000010C,
+ 0x03B, 0x00057F10,
+ 0x03C, 0x00012000,
+ 0x03A, 0x000000D0,
+ 0x03B, 0x00048027,
+ 0x03C, 0x000CA000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x000B8027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x00000244,
+ 0x03B, 0x000B0113,
+ 0x03C, 0x00082000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x000A8027,
+ 0x03C, 0x00082000,
+ 0x03A, 0x000000CC,
+ 0x03B, 0x000A7027,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000014C,
+ 0x03B, 0x0009F913,
+ 0x03C, 0x00042000,
+ 0x03A, 0x0000010C,
+ 0x03B, 0x00097F10,
+ 0x03C, 0x00012000,
+ 0x03A, 0x000000D0,
+ 0x03B, 0x00088027,
+ 0x03C, 0x000CA000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00001100,
+ 0xFF0F0104, 0xABCD,
+ 0x034, 0x0004ADF3,
+ 0x034, 0x00049DF0,
+ 0xFF0F0204, 0xCDEF,
+ 0x034, 0x0004ADF3,
+ 0x034, 0x00049DF0,
+ 0xFF0F0404, 0xCDEF,
+ 0x034, 0x0004ADF3,
+ 0x034, 0x00049DF0,
+ 0xFF0F0200, 0xCDEF,
+ 0x034, 0x0004ADF5,
+ 0x034, 0x00049DF2,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0004A0F3,
+ 0x034, 0x000490B1,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0004ADF7,
+ 0x034, 0x00049DF3,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x034, 0x00048DED,
+ 0x034, 0x00047DEA,
+ 0x034, 0x00046DE7,
+ 0x034, 0x00045CE9,
+ 0x034, 0x00044CE6,
+ 0x034, 0x000438C6,
+ 0x034, 0x00042886,
+ 0x034, 0x00041486,
+ 0x034, 0x00040447,
+ 0xFF0F0204, 0xCDEF,
+ 0x034, 0x00048DED,
+ 0x034, 0x00047DEA,
+ 0x034, 0x00046DE7,
+ 0x034, 0x00045CE9,
+ 0x034, 0x00044CE6,
+ 0x034, 0x000438C6,
+ 0x034, 0x00042886,
+ 0x034, 0x00041486,
+ 0x034, 0x00040447,
+ 0xFF0F0404, 0xCDEF,
+ 0x034, 0x00048DED,
+ 0x034, 0x00047DEA,
+ 0x034, 0x00046DE7,
+ 0x034, 0x00045CE9,
+ 0x034, 0x00044CE6,
+ 0x034, 0x000438C6,
+ 0x034, 0x00042886,
+ 0x034, 0x00041486,
+ 0x034, 0x00040447,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x000480AE,
+ 0x034, 0x000470AB,
+ 0x034, 0x0004608B,
+ 0x034, 0x00045069,
+ 0x034, 0x00044048,
+ 0x034, 0x00043045,
+ 0x034, 0x00042026,
+ 0x034, 0x00041023,
+ 0x034, 0x00040002,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x00048DEF,
+ 0x034, 0x00047DEC,
+ 0x034, 0x00046DE9,
+ 0x034, 0x00045CCB,
+ 0x034, 0x0004488D,
+ 0x034, 0x0004348D,
+ 0x034, 0x0004248A,
+ 0x034, 0x0004108D,
+ 0x034, 0x0004008A,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0200, 0xABCD,
+ 0x034, 0x0002ADF4,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0002A0F3,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0002ADF7,
+ 0xFF0F0200, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x034, 0x00029DF4,
+ 0xFF0F0204, 0xCDEF,
+ 0x034, 0x00029DF4,
+ 0xFF0F0404, 0xCDEF,
+ 0x034, 0x00029DF4,
+ 0xFF0F0200, 0xCDEF,
+ 0x034, 0x00029DF1,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x000290F0,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x00029DF2,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x034, 0x00028DF1,
+ 0x034, 0x00027DEE,
+ 0x034, 0x00026DEB,
+ 0x034, 0x00025CEC,
+ 0x034, 0x00024CE9,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x00021489,
+ 0x034, 0x0002044A,
+ 0xFF0F0204, 0xCDEF,
+ 0x034, 0x00028DF1,
+ 0x034, 0x00027DEE,
+ 0x034, 0x00026DEB,
+ 0x034, 0x00025CEC,
+ 0x034, 0x00024CE9,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x00021489,
+ 0x034, 0x0002044A,
+ 0xFF0F0404, 0xCDEF,
+ 0x034, 0x00028DF1,
+ 0x034, 0x00027DEE,
+ 0x034, 0x00026DEB,
+ 0x034, 0x00025CEC,
+ 0x034, 0x00024CE9,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x00021489,
+ 0x034, 0x0002044A,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x000280AF,
+ 0x034, 0x000270AC,
+ 0x034, 0x0002608B,
+ 0x034, 0x00025069,
+ 0x034, 0x00024048,
+ 0x034, 0x00023045,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020002,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x00028DEE,
+ 0x034, 0x00027DEB,
+ 0x034, 0x00026CCD,
+ 0x034, 0x00025CCA,
+ 0x034, 0x0002488C,
+ 0x034, 0x0002384C,
+ 0x034, 0x00022849,
+ 0x034, 0x00021449,
+ 0x034, 0x0002004D,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F02C0, 0xABCD,
+ 0x034, 0x0000A0D7,
+ 0x034, 0x000090D3,
+ 0x034, 0x000080B1,
+ 0x034, 0x000070AE,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000ADF7,
+ 0x034, 0x00009DF4,
+ 0x034, 0x00008DF1,
+ 0x034, 0x00007DEE,
+ 0xFF0F02C0, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x034, 0x00006DEB,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000038CA,
+ 0x034, 0x00002889,
+ 0x034, 0x00001489,
+ 0x034, 0x0000044A,
+ 0xFF0F0204, 0xCDEF,
+ 0x034, 0x00006DEB,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000038CA,
+ 0x034, 0x00002889,
+ 0x034, 0x00001489,
+ 0x034, 0x0000044A,
+ 0xFF0F0404, 0xCDEF,
+ 0x034, 0x00006DEB,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000038CA,
+ 0x034, 0x00002889,
+ 0x034, 0x00001489,
+ 0x034, 0x0000044A,
+ 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x0000608D,
+ 0x034, 0x0000506B,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x00002044,
+ 0x034, 0x00001025,
+ 0x034, 0x00000004,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x00006DCD,
+ 0x034, 0x00005CCD,
+ 0x034, 0x00004CCA,
+ 0x034, 0x0000388C,
+ 0x034, 0x00002888,
+ 0x034, 0x00001488,
+ 0x034, 0x00000486,
+ 0xFF0F0104, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0xFF0F0104, 0xABCD,
+ 0x035, 0x00000187,
+ 0x035, 0x00008187,
+ 0x035, 0x00010187,
+ 0x035, 0x00020188,
+ 0x035, 0x00028188,
+ 0x035, 0x00030188,
+ 0x035, 0x00040188,
+ 0x035, 0x00048188,
+ 0x035, 0x00050188,
+ 0xFF0F0204, 0xCDEF,
+ 0x035, 0x00000187,
+ 0x035, 0x00008187,
+ 0x035, 0x00010187,
+ 0x035, 0x00020188,
+ 0x035, 0x00028188,
+ 0x035, 0x00030188,
+ 0x035, 0x00040188,
+ 0x035, 0x00048188,
+ 0x035, 0x00050188,
+ 0xFF0F0404, 0xCDEF,
+ 0x035, 0x00000187,
+ 0x035, 0x00008187,
+ 0x035, 0x00010187,
+ 0x035, 0x00020188,
+ 0x035, 0x00028188,
+ 0x035, 0x00030188,
+ 0x035, 0x00040188,
+ 0x035, 0x00048188,
+ 0x035, 0x00050188,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x035, 0x00000145,
+ 0x035, 0x00008145,
+ 0x035, 0x00010145,
+ 0x035, 0x00020196,
+ 0x035, 0x00028196,
+ 0x035, 0x00030196,
+ 0x035, 0x000401C7,
+ 0x035, 0x000481C7,
+ 0x035, 0x000501C7,
+ 0xFF0F0104, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0xFF0F0104, 0xABCD,
+ 0x036, 0x00085733,
+ 0x036, 0x0008D733,
+ 0x036, 0x00095733,
+ 0x036, 0x0009D733,
+ 0x036, 0x000A64B4,
+ 0x036, 0x000AE4B4,
+ 0x036, 0x000B64B4,
+ 0x036, 0x000BE4B4,
+ 0x036, 0x000C64B4,
+ 0x036, 0x000CE4B4,
+ 0x036, 0x000D64B4,
+ 0x036, 0x000DE4B4,
+ 0xFF0F0204, 0xCDEF,
+ 0x036, 0x00085733,
+ 0x036, 0x0008D733,
+ 0x036, 0x00095733,
+ 0x036, 0x0009D733,
+ 0x036, 0x000A64B4,
+ 0x036, 0x000AE4B4,
+ 0x036, 0x000B64B4,
+ 0x036, 0x000BE4B4,
+ 0x036, 0x000C64B4,
+ 0x036, 0x000CE4B4,
+ 0x036, 0x000D64B4,
+ 0x036, 0x000DE4B4,
+ 0xFF0F0404, 0xCDEF,
+ 0x036, 0x00085733,
+ 0x036, 0x0008D733,
+ 0x036, 0x00095733,
+ 0x036, 0x0009D733,
+ 0x036, 0x000A64B4,
+ 0x036, 0x000AE4B4,
+ 0x036, 0x000B64B4,
+ 0x036, 0x000BE4B4,
+ 0x036, 0x000C64B4,
+ 0x036, 0x000CE4B4,
+ 0x036, 0x000D64B4,
+ 0x036, 0x000DE4B4,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x036, 0x000056B3,
+ 0x036, 0x0000D6B3,
+ 0x036, 0x000156B3,
+ 0x036, 0x0001D6B3,
+ 0x036, 0x00026634,
+ 0x036, 0x0002E634,
+ 0x036, 0x00036634,
+ 0x036, 0x0003E634,
+ 0x036, 0x000467B4,
+ 0x036, 0x0004E7B4,
+ 0x036, 0x000567B4,
+ 0x036, 0x0005E7B4,
+ 0xFF0F0104, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0xFF0F0104, 0xABCD,
+ 0x03C, 0x000001C8,
+ 0x03C, 0x00000492,
+ 0xFF0F0204, 0xCDEF,
+ 0x03C, 0x000001C8,
+ 0x03C, 0x00000492,
+ 0xFF0F0404, 0xCDEF,
+ 0x03C, 0x000001C8,
+ 0x03C, 0x00000492,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03C, 0x0000022A,
+ 0x03C, 0x00000594,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x03C, 0x00000800,
+ 0xFF0F0204, 0xCDEF,
+ 0x03C, 0x00000800,
+ 0xFF0F0404, 0xCDEF,
+ 0x03C, 0x00000800,
+ 0xFF0F02C0, 0xCDEF,
+ 0x03C, 0x00000820,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x03C, 0x00000900,
+ 0xFF0F0104, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000002,
+ 0xFF0F0104, 0xABCD,
+ 0x008, 0x0004E400,
+ 0xFF0F0204, 0xCDEF,
+ 0x008, 0x0004E400,
+ 0xFF0F0404, 0xCDEF,
+ 0x008, 0x0004E400,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x008, 0x00002000,
+ 0xFF0F0104, 0xDEAD,
+ 0x0EF, 0x00000000,
+ 0x0DF, 0x000000C0,
+ 0x01F, 0x00040064,
+ 0xFF0F0104, 0xABCD,
+ 0x058, 0x000A7284,
+ 0x059, 0x000600EC,
+ 0xFF0F0204, 0xCDEF,
+ 0x058, 0x000A7284,
+ 0x059, 0x000600EC,
+ 0xFF0F0404, 0xCDEF,
+ 0x058, 0x000A7284,
+ 0x059, 0x000600EC,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x058, 0x00081184,
+ 0x059, 0x0006016C,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x061, 0x000E8D73,
+ 0x062, 0x00093FC5,
+ 0xFF0F0204, 0xCDEF,
+ 0x061, 0x000E8D73,
+ 0x062, 0x00093FC5,
+ 0xFF0F0404, 0xCDEF,
+ 0x061, 0x000E8D73,
+ 0x062, 0x00093FC5,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x061, 0x000EAD53,
+ 0x062, 0x00093BC4,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x063, 0x000110E9,
+ 0xFF0F0204, 0xCDEF,
+ 0x063, 0x000110E9,
+ 0xFF0F0404, 0xCDEF,
+ 0x063, 0x000110E9,
+ 0xFF0F0200, 0xCDEF,
+ 0x063, 0x000710E9,
+ 0xFF0F02C0, 0xCDEF,
+ 0x063, 0x000110E9,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x063, 0x000714E9,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0104, 0xABCD,
+ 0x064, 0x0001C27C,
+ 0xFF0F0204, 0xCDEF,
+ 0x064, 0x0001C27C,
+ 0xFF0F0404, 0xCDEF,
+ 0x064, 0x0001C27C,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x064, 0x0001C67C,
+ 0xFF0F0104, 0xDEAD,
+ 0xFF0F0200, 0xABCD,
+ 0x065, 0x00093016,
+ 0xFF0F02C0, 0xCDEF,
+ 0x065, 0x00093015,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x065, 0x00091016,
+ 0xFF0F0200, 0xDEAD,
+ 0x018, 0x00000006,
+ 0x0EF, 0x00002000,
+ 0x03B, 0x0003824B,
+ 0x03B, 0x0003024B,
+ 0x03B, 0x0002844B,
+ 0x03B, 0x00020F4B,
+ 0x03B, 0x00018F4B,
+ 0x03B, 0x000104B2,
+ 0x03B, 0x00008049,
+ 0x03B, 0x00000148,
+ 0x03B, 0x0007824B,
+ 0x03B, 0x0007024B,
+ 0x03B, 0x0006824B,
+ 0x03B, 0x00060F4B,
+ 0x03B, 0x00058F4B,
+ 0x03B, 0x000504B2,
+ 0x03B, 0x00048049,
+ 0x03B, 0x00040148,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x034, 0x0000ADF3,
+ 0x034, 0x00009DEF,
+ 0x034, 0x00008DEC,
+ 0x034, 0x00007DE9,
+ 0x034, 0x00006CED,
+ 0x034, 0x00005CE9,
+ 0x034, 0x000044E9,
+ 0x034, 0x000034E6,
+ 0x034, 0x0000246A,
+ 0x034, 0x00001467,
+ 0x034, 0x00000068,
+ 0x0EF, 0x00000000,
+ 0x0ED, 0x00000010,
+ 0x044, 0x0000ADF2,
+ 0x044, 0x00009DEF,
+ 0x044, 0x00008DEC,
+ 0x044, 0x00007DE9,
+ 0x044, 0x00006CEC,
+ 0x044, 0x00005CE9,
+ 0x044, 0x000044EC,
+ 0x044, 0x000034E9,
+ 0x044, 0x0000246C,
+ 0x044, 0x00001469,
+ 0x044, 0x0000006C,
+ 0x0ED, 0x00000000,
+ 0x0ED, 0x00000001,
+ 0x040, 0x00038DA7,
+ 0x040, 0x000300C2,
+ 0x040, 0x000288E2,
+ 0x040, 0x000200B8,
+ 0x040, 0x000188A5,
+ 0x040, 0x00010FBC,
+ 0x040, 0x00008F71,
+ 0x040, 0x00000240,
+ 0x0ED, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000120,
+ 0x035, 0x00008120,
+ 0x035, 0x00010120,
+ 0x036, 0x00000085,
+ 0x036, 0x00008085,
+ 0x036, 0x00010085,
+ 0x036, 0x00018085,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C31,
+ 0x052, 0x00000622,
+ 0x053, 0x000FC70B,
+ 0x054, 0x0000017E,
+ 0x056, 0x00051DF3,
+ 0x051, 0x00000C01,
+ 0x052, 0x000006D6,
+ 0x053, 0x000FC649,
+ 0x070, 0x00049661,
+ 0x071, 0x0007843E,
+ 0x072, 0x00000382,
+ 0x074, 0x00051400,
+ 0x035, 0x00000160,
+ 0x035, 0x00008160,
+ 0x035, 0x00010160,
+ 0x036, 0x00000124,
+ 0x036, 0x00008124,
+ 0x036, 0x00010124,
+ 0x036, 0x00018124,
+ 0x0ED, 0x0000000C,
+ 0x045, 0x00000140,
+ 0x045, 0x00008140,
+ 0x045, 0x00010140,
+ 0x046, 0x00000124,
+ 0x046, 0x00008124,
+ 0x046, 0x00010124,
+ 0x046, 0x00018124,
+ 0x0DF, 0x00000088,
+ 0x0B3, 0x000F0E18,
+ 0x0B4, 0x0001214C,
+ 0x0B7, 0x0003000C,
+ 0x01C, 0x000539D2,
+ 0x018, 0x0001F12A,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x018, 0x0001712A,
+};
+
+u32 RTL8812AE_MAC_REG_ARRAY[] = {
+ 0x010, 0x0000000C,
+ 0xFF0F0180, 0xABCD,
+ 0x025, 0x0000000F,
+ 0xFF0F01C0, 0xCDEF,
+ 0x025, 0x0000000F,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x025, 0x0000006F,
+ 0xFF0F0180, 0xDEAD,
+ 0x072, 0x00000000,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000007,
+ 0x43F, 0x00000008,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x00000000,
+ 0x446, 0x00000000,
+ 0x447, 0x00000000,
+ 0x448, 0x00000000,
+ 0x449, 0x000000F0,
+ 0x44A, 0x0000000F,
+ 0x44B, 0x0000003E,
+ 0x44C, 0x00000010,
+ 0x44D, 0x00000000,
+ 0x44E, 0x00000000,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x000000F0,
+ 0x452, 0x0000000F,
+ 0x453, 0x00000000,
+ 0x45B, 0x00000080,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000050,
+ 0x55D, 0x000000FF,
+ 0x604, 0x00000001,
+ 0x605, 0x00000030,
+ 0x607, 0x00000003,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000050,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000080,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+ 0x718, 0x00000040,
+};
+
+u32 RTL8821AE_MAC_REG_ARRAY[] = {
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000007,
+ 0x43F, 0x00000008,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x00000000,
+ 0x446, 0x00000000,
+ 0x447, 0x00000000,
+ 0x448, 0x00000000,
+ 0x449, 0x000000F0,
+ 0x44A, 0x0000000F,
+ 0x44B, 0x0000003E,
+ 0x44C, 0x00000010,
+ 0x44D, 0x00000000,
+ 0x44E, 0x00000000,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x000000F0,
+ 0x452, 0x0000000F,
+ 0x453, 0x00000000,
+ 0x456, 0x0000005E,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x4C8, 0x0000003F,
+ 0x4C9, 0x000000FF,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000050,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x607, 0x00000007,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000050,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+ 0x718, 0x00000040,
+};
+
+u32 RTL8812AE_AGC_TAB_ARRAY[] = {
+ 0xFF0F07D8, 0xABCD,
+ 0x81C, 0xFC000001,
+ 0x81C, 0xFB020001,
+ 0x81C, 0xFA040001,
+ 0x81C, 0xF9060001,
+ 0x81C, 0xF8080001,
+ 0x81C, 0xF70A0001,
+ 0x81C, 0xF60C0001,
+ 0x81C, 0xF50E0001,
+ 0x81C, 0xF4100001,
+ 0x81C, 0xF3120001,
+ 0x81C, 0xF2140001,
+ 0x81C, 0xF1160001,
+ 0x81C, 0xF0180001,
+ 0x81C, 0xEF1A0001,
+ 0x81C, 0xEE1C0001,
+ 0x81C, 0xED1E0001,
+ 0x81C, 0xEC200001,
+ 0x81C, 0xEB220001,
+ 0x81C, 0xEA240001,
+ 0x81C, 0xCD260001,
+ 0x81C, 0xCC280001,
+ 0x81C, 0xCB2A0001,
+ 0x81C, 0xCA2C0001,
+ 0x81C, 0xC92E0001,
+ 0x81C, 0xC8300001,
+ 0x81C, 0xA6320001,
+ 0x81C, 0xA5340001,
+ 0x81C, 0xA4360001,
+ 0x81C, 0xA3380001,
+ 0x81C, 0xA23A0001,
+ 0x81C, 0x883C0001,
+ 0x81C, 0x873E0001,
+ 0x81C, 0x86400001,
+ 0x81C, 0x85420001,
+ 0x81C, 0x84440001,
+ 0x81C, 0x83460001,
+ 0x81C, 0x82480001,
+ 0x81C, 0x814A0001,
+ 0x81C, 0x484C0001,
+ 0x81C, 0x474E0001,
+ 0x81C, 0x46500001,
+ 0x81C, 0x45520001,
+ 0x81C, 0x44540001,
+ 0x81C, 0x43560001,
+ 0x81C, 0x42580001,
+ 0x81C, 0x415A0001,
+ 0x81C, 0x255C0001,
+ 0x81C, 0x245E0001,
+ 0x81C, 0x23600001,
+ 0x81C, 0x22620001,
+ 0x81C, 0x21640001,
+ 0x81C, 0x21660001,
+ 0x81C, 0x21680001,
+ 0x81C, 0x216A0001,
+ 0x81C, 0x216C0001,
+ 0x81C, 0x216E0001,
+ 0x81C, 0x21700001,
+ 0x81C, 0x21720001,
+ 0x81C, 0x21740001,
+ 0x81C, 0x21760001,
+ 0x81C, 0x21780001,
+ 0x81C, 0x217A0001,
+ 0x81C, 0x217C0001,
+ 0x81C, 0x217E0001,
+ 0xFF0F07D0, 0xCDEF,
+ 0x81C, 0xF9000001,
+ 0x81C, 0xF8020001,
+ 0x81C, 0xF7040001,
+ 0x81C, 0xF6060001,
+ 0x81C, 0xF5080001,
+ 0x81C, 0xF40A0001,
+ 0x81C, 0xF30C0001,
+ 0x81C, 0xF20E0001,
+ 0x81C, 0xF1100001,
+ 0x81C, 0xF0120001,
+ 0x81C, 0xEF140001,
+ 0x81C, 0xEE160001,
+ 0x81C, 0xED180001,
+ 0x81C, 0xEC1A0001,
+ 0x81C, 0xEB1C0001,
+ 0x81C, 0xEA1E0001,
+ 0x81C, 0xCD200001,
+ 0x81C, 0xCC220001,
+ 0x81C, 0xCB240001,
+ 0x81C, 0xCA260001,
+ 0x81C, 0xC9280001,
+ 0x81C, 0xC82A0001,
+ 0x81C, 0xC72C0001,
+ 0x81C, 0xC62E0001,
+ 0x81C, 0xA5300001,
+ 0x81C, 0xA4320001,
+ 0x81C, 0xA3340001,
+ 0x81C, 0xA2360001,
+ 0x81C, 0x88380001,
+ 0x81C, 0x873A0001,
+ 0x81C, 0x863C0001,
+ 0x81C, 0x853E0001,
+ 0x81C, 0x84400001,
+ 0x81C, 0x83420001,
+ 0x81C, 0x82440001,
+ 0x81C, 0x81460001,
+ 0x81C, 0x48480001,
+ 0x81C, 0x474A0001,
+ 0x81C, 0x464C0001,
+ 0x81C, 0x454E0001,
+ 0x81C, 0x44500001,
+ 0x81C, 0x43520001,
+ 0x81C, 0x42540001,
+ 0x81C, 0x41560001,
+ 0x81C, 0x25580001,
+ 0x81C, 0x245A0001,
+ 0x81C, 0x235C0001,
+ 0x81C, 0x225E0001,
+ 0x81C, 0x21600001,
+ 0x81C, 0x21620001,
+ 0x81C, 0x21640001,
+ 0x81C, 0x21660001,
+ 0x81C, 0x21680001,
+ 0x81C, 0x216A0001,
+ 0x81C, 0x236C0001,
+ 0x81C, 0x226E0001,
+ 0x81C, 0x21700001,
+ 0x81C, 0x21720001,
+ 0x81C, 0x21740001,
+ 0x81C, 0x21760001,
+ 0x81C, 0x21780001,
+ 0x81C, 0x217A0001,
+ 0x81C, 0x217C0001,
+ 0x81C, 0x217E0001,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x81C, 0xFF000001,
+ 0x81C, 0xFF020001,
+ 0x81C, 0xFF040001,
+ 0x81C, 0xFF060001,
+ 0x81C, 0xFF080001,
+ 0x81C, 0xFE0A0001,
+ 0x81C, 0xFD0C0001,
+ 0x81C, 0xFC0E0001,
+ 0x81C, 0xFB100001,
+ 0x81C, 0xFA120001,
+ 0x81C, 0xF9140001,
+ 0x81C, 0xF8160001,
+ 0x81C, 0xF7180001,
+ 0x81C, 0xF61A0001,
+ 0x81C, 0xF51C0001,
+ 0x81C, 0xF41E0001,
+ 0x81C, 0xF3200001,
+ 0x81C, 0xF2220001,
+ 0x81C, 0xF1240001,
+ 0x81C, 0xF0260001,
+ 0x81C, 0xEF280001,
+ 0x81C, 0xEE2A0001,
+ 0x81C, 0xED2C0001,
+ 0x81C, 0xEC2E0001,
+ 0x81C, 0xEB300001,
+ 0x81C, 0xEA320001,
+ 0x81C, 0xE9340001,
+ 0x81C, 0xE8360001,
+ 0x81C, 0xE7380001,
+ 0x81C, 0xE63A0001,
+ 0x81C, 0xE53C0001,
+ 0x81C, 0xC73E0001,
+ 0x81C, 0xC6400001,
+ 0x81C, 0xC5420001,
+ 0x81C, 0xC4440001,
+ 0x81C, 0xC3460001,
+ 0x81C, 0xC2480001,
+ 0x81C, 0xC14A0001,
+ 0x81C, 0xA74C0001,
+ 0x81C, 0xA64E0001,
+ 0x81C, 0xA5500001,
+ 0x81C, 0xA4520001,
+ 0x81C, 0xA3540001,
+ 0x81C, 0xA2560001,
+ 0x81C, 0xA1580001,
+ 0x81C, 0x675A0001,
+ 0x81C, 0x665C0001,
+ 0x81C, 0x655E0001,
+ 0x81C, 0x64600001,
+ 0x81C, 0x63620001,
+ 0x81C, 0x48640001,
+ 0x81C, 0x47660001,
+ 0x81C, 0x46680001,
+ 0x81C, 0x456A0001,
+ 0x81C, 0x446C0001,
+ 0x81C, 0x436E0001,
+ 0x81C, 0x42700001,
+ 0x81C, 0x41720001,
+ 0x81C, 0x41740001,
+ 0x81C, 0x41760001,
+ 0x81C, 0x41780001,
+ 0x81C, 0x417A0001,
+ 0x81C, 0x417C0001,
+ 0x81C, 0x417E0001,
+ 0xFF0F07D8, 0xDEAD,
+ 0xFF0F0180, 0xABCD,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F0280, 0xCDEF,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F01C0, 0xCDEF,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F02C0, 0xCDEF,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F07D8, 0xCDEF,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F07D0, 0xCDEF,
+ 0x81C, 0xFC800001,
+ 0x81C, 0xFB820001,
+ 0x81C, 0xFA840001,
+ 0x81C, 0xF9860001,
+ 0x81C, 0xF8880001,
+ 0x81C, 0xF78A0001,
+ 0x81C, 0xF68C0001,
+ 0x81C, 0xF58E0001,
+ 0x81C, 0xF4900001,
+ 0x81C, 0xF3920001,
+ 0x81C, 0xF2940001,
+ 0x81C, 0xF1960001,
+ 0x81C, 0xF0980001,
+ 0x81C, 0xEF9A0001,
+ 0x81C, 0xEE9C0001,
+ 0x81C, 0xED9E0001,
+ 0x81C, 0xECA00001,
+ 0x81C, 0xEBA20001,
+ 0x81C, 0xEAA40001,
+ 0x81C, 0xE9A60001,
+ 0x81C, 0xE8A80001,
+ 0x81C, 0xE7AA0001,
+ 0x81C, 0xE6AC0001,
+ 0x81C, 0xE5AE0001,
+ 0x81C, 0xE4B00001,
+ 0x81C, 0xE3B20001,
+ 0x81C, 0xA8B40001,
+ 0x81C, 0xA7B60001,
+ 0x81C, 0xA6B80001,
+ 0x81C, 0xA5BA0001,
+ 0x81C, 0xA4BC0001,
+ 0x81C, 0xA3BE0001,
+ 0x81C, 0xA2C00001,
+ 0x81C, 0xA1C20001,
+ 0x81C, 0x68C40001,
+ 0x81C, 0x67C60001,
+ 0x81C, 0x66C80001,
+ 0x81C, 0x65CA0001,
+ 0x81C, 0x64CC0001,
+ 0x81C, 0x47CE0001,
+ 0x81C, 0x46D00001,
+ 0x81C, 0x45D20001,
+ 0x81C, 0x44D40001,
+ 0x81C, 0x43D60001,
+ 0x81C, 0x42D80001,
+ 0x81C, 0x08DA0001,
+ 0x81C, 0x07DC0001,
+ 0x81C, 0x06DE0001,
+ 0x81C, 0x05E00001,
+ 0x81C, 0x04E20001,
+ 0x81C, 0x03E40001,
+ 0x81C, 0x02E60001,
+ 0x81C, 0x01E80001,
+ 0x81C, 0x01EA0001,
+ 0x81C, 0x01EC0001,
+ 0x81C, 0x01EE0001,
+ 0x81C, 0x01F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x81C, 0xFF800001,
+ 0x81C, 0xFF820001,
+ 0x81C, 0xFF840001,
+ 0x81C, 0xFE860001,
+ 0x81C, 0xFD880001,
+ 0x81C, 0xFC8A0001,
+ 0x81C, 0xFB8C0001,
+ 0x81C, 0xFA8E0001,
+ 0x81C, 0xF9900001,
+ 0x81C, 0xF8920001,
+ 0x81C, 0xF7940001,
+ 0x81C, 0xF6960001,
+ 0x81C, 0xF5980001,
+ 0x81C, 0xF49A0001,
+ 0x81C, 0xF39C0001,
+ 0x81C, 0xF29E0001,
+ 0x81C, 0xF1A00001,
+ 0x81C, 0xF0A20001,
+ 0x81C, 0xEFA40001,
+ 0x81C, 0xEEA60001,
+ 0x81C, 0xEDA80001,
+ 0x81C, 0xECAA0001,
+ 0x81C, 0xEBAC0001,
+ 0x81C, 0xEAAE0001,
+ 0x81C, 0xE9B00001,
+ 0x81C, 0xE8B20001,
+ 0x81C, 0xE7B40001,
+ 0x81C, 0xE6B60001,
+ 0x81C, 0xE5B80001,
+ 0x81C, 0xE4BA0001,
+ 0x81C, 0xE3BC0001,
+ 0x81C, 0xA8BE0001,
+ 0x81C, 0xA7C00001,
+ 0x81C, 0xA6C20001,
+ 0x81C, 0xA5C40001,
+ 0x81C, 0xA4C60001,
+ 0x81C, 0xA3C80001,
+ 0x81C, 0xA2CA0001,
+ 0x81C, 0xA1CC0001,
+ 0x81C, 0x68CE0001,
+ 0x81C, 0x67D00001,
+ 0x81C, 0x66D20001,
+ 0x81C, 0x65D40001,
+ 0x81C, 0x64D60001,
+ 0x81C, 0x47D80001,
+ 0x81C, 0x46DA0001,
+ 0x81C, 0x45DC0001,
+ 0x81C, 0x44DE0001,
+ 0x81C, 0x43E00001,
+ 0x81C, 0x42E20001,
+ 0x81C, 0x08E40001,
+ 0x81C, 0x07E60001,
+ 0x81C, 0x06E80001,
+ 0x81C, 0x05EA0001,
+ 0x81C, 0x04EC0001,
+ 0x81C, 0x03EE0001,
+ 0x81C, 0x02F00001,
+ 0x81C, 0x01F20001,
+ 0x81C, 0x01F40001,
+ 0x81C, 0x01F60001,
+ 0x81C, 0x01F80001,
+ 0x81C, 0x01FA0001,
+ 0x81C, 0x01FC0001,
+ 0x81C, 0x01FE0001,
+ 0xFF0F0180, 0xDEAD,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+};
+
+u32 RTL8821AE_AGC_TAB_ARRAY[] = {
+ 0x81C, 0xBF000001,
+ 0x81C, 0xBF020001,
+ 0x81C, 0xBF040001,
+ 0x81C, 0xBF060001,
+ 0x81C, 0xBE080001,
+ 0x81C, 0xBD0A0001,
+ 0x81C, 0xBC0C0001,
+ 0x81C, 0xBA0E0001,
+ 0x81C, 0xB9100001,
+ 0x81C, 0xB8120001,
+ 0x81C, 0xB7140001,
+ 0x81C, 0xB6160001,
+ 0x81C, 0xB5180001,
+ 0x81C, 0xB41A0001,
+ 0x81C, 0xB31C0001,
+ 0x81C, 0xB21E0001,
+ 0x81C, 0xB1200001,
+ 0x81C, 0xB0220001,
+ 0x81C, 0xAF240001,
+ 0x81C, 0xAE260001,
+ 0x81C, 0xAD280001,
+ 0x81C, 0xAC2A0001,
+ 0x81C, 0xAB2C0001,
+ 0x81C, 0xAA2E0001,
+ 0x81C, 0xA9300001,
+ 0x81C, 0xA8320001,
+ 0x81C, 0xA7340001,
+ 0x81C, 0xA6360001,
+ 0x81C, 0xA5380001,
+ 0x81C, 0xA43A0001,
+ 0x81C, 0xA33C0001,
+ 0x81C, 0x673E0001,
+ 0x81C, 0x66400001,
+ 0x81C, 0x65420001,
+ 0x81C, 0x64440001,
+ 0x81C, 0x63460001,
+ 0x81C, 0x62480001,
+ 0x81C, 0x614A0001,
+ 0x81C, 0x474C0001,
+ 0x81C, 0x464E0001,
+ 0x81C, 0x45500001,
+ 0x81C, 0x44520001,
+ 0x81C, 0x43540001,
+ 0x81C, 0x42560001,
+ 0x81C, 0x41580001,
+ 0x81C, 0x285A0001,
+ 0x81C, 0x275C0001,
+ 0x81C, 0x265E0001,
+ 0x81C, 0x25600001,
+ 0x81C, 0x24620001,
+ 0x81C, 0x0A640001,
+ 0x81C, 0x09660001,
+ 0x81C, 0x08680001,
+ 0x81C, 0x076A0001,
+ 0x81C, 0x066C0001,
+ 0x81C, 0x056E0001,
+ 0x81C, 0x04700001,
+ 0x81C, 0x03720001,
+ 0x81C, 0x02740001,
+ 0x81C, 0x01760001,
+ 0x81C, 0x01780001,
+ 0x81C, 0x017A0001,
+ 0x81C, 0x017C0001,
+ 0x81C, 0x017E0001,
+ 0xFF0F02C0, 0xABCD,
+ 0x81C, 0xFB000101,
+ 0x81C, 0xFA020101,
+ 0x81C, 0xF9040101,
+ 0x81C, 0xF8060101,
+ 0x81C, 0xF7080101,
+ 0x81C, 0xF60A0101,
+ 0x81C, 0xF50C0101,
+ 0x81C, 0xF40E0101,
+ 0x81C, 0xF3100101,
+ 0x81C, 0xF2120101,
+ 0x81C, 0xF1140101,
+ 0x81C, 0xF0160101,
+ 0x81C, 0xEF180101,
+ 0x81C, 0xEE1A0101,
+ 0x81C, 0xED1C0101,
+ 0x81C, 0xEC1E0101,
+ 0x81C, 0xEB200101,
+ 0x81C, 0xEA220101,
+ 0x81C, 0xE9240101,
+ 0x81C, 0xE8260101,
+ 0x81C, 0xE7280101,
+ 0x81C, 0xE62A0101,
+ 0x81C, 0xE52C0101,
+ 0x81C, 0xE42E0101,
+ 0x81C, 0xE3300101,
+ 0x81C, 0xA5320101,
+ 0x81C, 0xA4340101,
+ 0x81C, 0xA3360101,
+ 0x81C, 0x87380101,
+ 0x81C, 0x863A0101,
+ 0x81C, 0x853C0101,
+ 0x81C, 0x843E0101,
+ 0x81C, 0x69400101,
+ 0x81C, 0x68420101,
+ 0x81C, 0x67440101,
+ 0x81C, 0x66460101,
+ 0x81C, 0x49480101,
+ 0x81C, 0x484A0101,
+ 0x81C, 0x474C0101,
+ 0x81C, 0x2A4E0101,
+ 0x81C, 0x29500101,
+ 0x81C, 0x28520101,
+ 0x81C, 0x27540101,
+ 0x81C, 0x26560101,
+ 0x81C, 0x25580101,
+ 0x81C, 0x245A0101,
+ 0x81C, 0x235C0101,
+ 0x81C, 0x055E0101,
+ 0x81C, 0x04600101,
+ 0x81C, 0x03620101,
+ 0x81C, 0x02640101,
+ 0x81C, 0x01660101,
+ 0x81C, 0x01680101,
+ 0x81C, 0x016A0101,
+ 0x81C, 0x016C0101,
+ 0x81C, 0x016E0101,
+ 0x81C, 0x01700101,
+ 0x81C, 0x01720101,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x81C, 0xFF000101,
+ 0x81C, 0xFF020101,
+ 0x81C, 0xFE040101,
+ 0x81C, 0xFD060101,
+ 0x81C, 0xFC080101,
+ 0x81C, 0xFD0A0101,
+ 0x81C, 0xFC0C0101,
+ 0x81C, 0xFB0E0101,
+ 0x81C, 0xFA100101,
+ 0x81C, 0xF9120101,
+ 0x81C, 0xF8140101,
+ 0x81C, 0xF7160101,
+ 0x81C, 0xF6180101,
+ 0x81C, 0xF51A0101,
+ 0x81C, 0xF41C0101,
+ 0x81C, 0xF31E0101,
+ 0x81C, 0xF2200101,
+ 0x81C, 0xF1220101,
+ 0x81C, 0xF0240101,
+ 0x81C, 0xEF260101,
+ 0x81C, 0xEE280101,
+ 0x81C, 0xED2A0101,
+ 0x81C, 0xEC2C0101,
+ 0x81C, 0xEB2E0101,
+ 0x81C, 0xEA300101,
+ 0x81C, 0xE9320101,
+ 0x81C, 0xE8340101,
+ 0x81C, 0xE7360101,
+ 0x81C, 0xE6380101,
+ 0x81C, 0xE53A0101,
+ 0x81C, 0xE43C0101,
+ 0x81C, 0xE33E0101,
+ 0x81C, 0xA5400101,
+ 0x81C, 0xA4420101,
+ 0x81C, 0xA3440101,
+ 0x81C, 0x87460101,
+ 0x81C, 0x86480101,
+ 0x81C, 0x854A0101,
+ 0x81C, 0x844C0101,
+ 0x81C, 0x694E0101,
+ 0x81C, 0x68500101,
+ 0x81C, 0x67520101,
+ 0x81C, 0x66540101,
+ 0x81C, 0x49560101,
+ 0x81C, 0x48580101,
+ 0x81C, 0x475A0101,
+ 0x81C, 0x2A5C0101,
+ 0x81C, 0x295E0101,
+ 0x81C, 0x28600101,
+ 0x81C, 0x27620101,
+ 0x81C, 0x26640101,
+ 0x81C, 0x25660101,
+ 0x81C, 0x24680101,
+ 0x81C, 0x236A0101,
+ 0x81C, 0x056C0101,
+ 0x81C, 0x046E0101,
+ 0x81C, 0x03700101,
+ 0x81C, 0x02720101,
+ 0xFF0F02C0, 0xDEAD,
+ 0x81C, 0x01740101,
+ 0x81C, 0x01760101,
+ 0x81C, 0x01780101,
+ 0x81C, 0x017A0101,
+ 0x81C, 0x017C0101,
+ 0x81C, 0x017E0101,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+
+};
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/table.h b/drivers/staging/rtl8821ae/rtl8821ae/table.h
new file mode 100644
index 000000000000..b9d7b266a33a
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/table.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on 2010/ 5/18, 1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_TABLE__H_
+#define __RTL8821AE_TABLE__H_
+
+#include <linux/types.h>
+#define RTL8821AEPHY_REG_1TARRAYLEN 344
+extern u32 RTL8821AE_PHY_REG_ARRAY[];
+#define RTL8812AEPHY_REG_1TARRAYLEN 490
+extern u32 RTL8812AE_PHY_REG_ARRAY[];
+#define RTL8821AEPHY_REG_ARRAY_PGLEN 90
+extern u32 RTL8821AE_PHY_REG_ARRAY_PG[];
+#define RTL8812AEPHY_REG_ARRAY_PGLEN 276
+extern u32 RTL8812AE_PHY_REG_ARRAY_PG[];
+//#define RTL8723BE_RADIOA_1TARRAYLEN 206
+//extern u8 *RTL8821AE_TXPWR_LMT_ARRAY[];
+#define RTL8812AE_RADIOA_1TARRAYLEN 1264
+extern u32 RTL8812AE_RADIOA_ARRAY[];
+#define RTL8812AE_RADIOB_1TARRAYLEN 1240
+extern u32 RTL8812AE_RADIOB_ARRAY[];
+#define RTL8821AE_RADIOA_1TARRAYLEN 1176
+extern u32 RTL8821AE_RADIOA_ARRAY[];
+#define RTL8821AEMAC_1T_ARRAYLEN 194
+extern u32 RTL8821AE_MAC_REG_ARRAY[];
+#define RTL8812AEMAC_1T_ARRAYLEN 214
+extern u32 RTL8812AE_MAC_REG_ARRAY[];
+#define RTL8821AEAGCTAB_1TARRAYLEN 382
+extern u32 RTL8821AE_AGC_TAB_ARRAY[];
+#define RTL8812AEAGCTAB_1TARRAYLEN 1312
+extern u32 RTL8812AE_AGC_TAB_ARRAY[];
+
+
+#endif
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/trx.c b/drivers/staging/rtl8821ae/rtl8821ae/trx.c
new file mode 100644
index 000000000000..dd0f6dcb3085
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/trx.c
@@ -0,0 +1,1050 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "../stats.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "trx.h"
+#include "led.h"
+#include "dm.h"
+
+static u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
+{
+ u16 fc = rtl_get_fc(skb);
+
+ if (unlikely(ieee80211_is_beacon(fc)))
+ return QSLT_BEACON;
+ if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
+ return QSLT_MGNT;
+
+ return skb->priority;
+}
+
+/* mac80211's rate_idx is like this:
+ *
+ * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
+ *
+ * B/G rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
+ *
+ * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
+ * A rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
+ */
+static int _rtl8821ae_rate_mapping(struct ieee80211_hw *hw,
+ bool isht, u8 desc_rate)
+{
+ int rate_idx;
+
+ if (false == isht) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
+#else
+ if (IEEE80211_BAND_2GHZ == hw->conf.channel->band) {
+#endif
+ switch (desc_rate) {
+ case DESC_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC_RATE54M:
+ rate_idx = 11;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ } else {
+ switch (desc_rate) {
+ case DESC_RATE6M:
+ rate_idx = 0;
+ break;
+ case DESC_RATE9M:
+ rate_idx = 1;
+ break;
+ case DESC_RATE12M:
+ rate_idx = 2;
+ break;
+ case DESC_RATE18M:
+ rate_idx = 3;
+ break;
+ case DESC_RATE24M:
+ rate_idx = 4;
+ break;
+ case DESC_RATE36M:
+ rate_idx = 5;
+ break;
+ case DESC_RATE48M:
+ rate_idx = 6;
+ break;
+ case DESC_RATE54M:
+ rate_idx = 7;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ }
+ } else {
+ switch(desc_rate) {
+ case DESC_RATEMCS0:
+ rate_idx = 0;
+ break;
+ case DESC_RATEMCS1:
+ rate_idx = 1;
+ break;
+ case DESC_RATEMCS2:
+ rate_idx = 2;
+ break;
+ case DESC_RATEMCS3:
+ rate_idx = 3;
+ break;
+ case DESC_RATEMCS4:
+ rate_idx = 4;
+ break;
+ case DESC_RATEMCS5:
+ rate_idx = 5;
+ break;
+ case DESC_RATEMCS6:
+ rate_idx = 6;
+ break;
+ case DESC_RATEMCS7:
+ rate_idx = 7;
+ break;
+ case DESC_RATEMCS8:
+ rate_idx = 8;
+ break;
+ case DESC_RATEMCS9:
+ rate_idx = 9;
+ break;
+ case DESC_RATEMCS10:
+ rate_idx = 10;
+ break;
+ case DESC_RATEMCS11:
+ rate_idx = 11;
+ break;
+ case DESC_RATEMCS12:
+ rate_idx = 12;
+ break;
+ case DESC_RATEMCS13:
+ rate_idx = 13;
+ break;
+ case DESC_RATEMCS14:
+ rate_idx = 14;
+ break;
+ case DESC_RATEMCS15:
+ rate_idx = 15;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ }
+ return rate_idx;
+}
+
+static void _rtl8821ae_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstatus, u8 *pdesc,
+ struct rx_fwinfo_8821ae *p_drvinfo, bool bpacket_match_bssid,
+ bool bpacket_toself, bool b_packet_beacon)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct phy_sts_cck_8821ae_t *cck_buf;
+ struct phy_status_rpt *p_phystRpt = (struct phy_status_rpt *)p_drvinfo;
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ char rx_pwr_all = 0, rx_pwr[4];
+ u8 rf_rx_num = 0, evm, pwdb_all;
+ u8 i, max_spatial_stream;
+ u32 rssi, total_rssi = 0;
+ bool b_is_cck = pstatus->b_is_cck;
+ u8 lan_idx,vga_idx;
+
+ /* Record it for next packet processing */
+ pstatus->b_packet_matchbssid = bpacket_match_bssid;
+ pstatus->b_packet_toself = bpacket_toself;
+ pstatus->b_packet_beacon = b_packet_beacon;
+ pstatus->rx_mimo_signalquality[0] = -1;
+ pstatus->rx_mimo_signalquality[1] = -1;
+
+ if (b_is_cck) {
+ u8 cck_highpwr;
+ u8 cck_agc_rpt;
+ /* CCK Driver info Structure is not the same as OFDM packet. */
+ cck_buf = (struct phy_sts_cck_8821ae_t *)p_drvinfo;
+ cck_agc_rpt = cck_buf->cck_agc_rpt;
+
+ /* (1)Hardware does not provide RSSI for CCK */
+ /* (2)PWDB, Average PWDB calculated by
+ * hardware (for rate adaptive) */
+ if (ppsc->rfpwr_state == ERFON)
+ cck_highpwr = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2,
+ BIT(9));
+ else
+ cck_highpwr = false;
+
+ lan_idx = ((cck_agc_rpt & 0xE0) >> 5);
+ vga_idx = (cck_agc_rpt & 0x1f);
+ switch (lan_idx) {
+ case 7:
+ if(vga_idx <= 27)
+ rx_pwr_all = -100 + 2*(27-vga_idx); /*VGA_idx = 27~2*/
+ else
+ rx_pwr_all = -100;
+ break;
+ case 6:
+ rx_pwr_all = -48 + 2*(2-vga_idx); /*VGA_idx = 2~0*/
+ break;
+ case 5:
+ rx_pwr_all = -42 + 2*(7-vga_idx); /*VGA_idx = 7~5*/
+ break;
+ case 4:
+ rx_pwr_all = -36 + 2*(7-vga_idx); /*VGA_idx = 7~4*/
+ break;
+ case 3:
+ rx_pwr_all = -24 + 2*(7-vga_idx); /*VGA_idx = 7~0*/
+ break;
+ case 2:
+ if(cck_highpwr)
+ rx_pwr_all = -12 + 2*(5-vga_idx); /*VGA_idx = 5~0*/
+ else
+ rx_pwr_all = -6+ 2*(5-vga_idx);
+ break;
+ case 1:
+ rx_pwr_all = 8-2*vga_idx;
+ break;
+ case 0:
+ rx_pwr_all = 14-2*vga_idx;
+ break;
+ default:
+ break;
+ }
+ rx_pwr_all += 6;
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ /* CCK gain is smaller than OFDM/MCS gain, */
+ /* so we add gain diff by experiences,
+ * the val is 6 */
+ pwdb_all += 6;
+ if(pwdb_all > 100)
+ pwdb_all = 100;
+ /* modify the offset to make the same
+ * gain index with OFDM. */
+ if(pwdb_all > 34 && pwdb_all <= 42)
+ pwdb_all -= 2;
+ else if(pwdb_all > 26 && pwdb_all <= 34)
+ pwdb_all -= 6;
+ else if(pwdb_all > 14 && pwdb_all <= 26)
+ pwdb_all -= 8;
+ else if(pwdb_all > 4 && pwdb_all <= 14)
+ pwdb_all -= 4;
+ if (cck_highpwr == false){
+ if (pwdb_all >= 80)
+ pwdb_all =((pwdb_all-80)<<1)+((pwdb_all-80)>>1)+80;
+ else if((pwdb_all <= 78) && (pwdb_all >= 20))
+ pwdb_all += 3;
+ if(pwdb_all>100)
+ pwdb_all = 100;
+ }
+
+ pstatus->rx_pwdb_all = pwdb_all;
+ pstatus->recvsignalpower = rx_pwr_all;
+
+ /* (3) Get Signal Quality (EVM) */
+ if (bpacket_match_bssid) {
+ u8 sq;
+
+ if (pstatus->rx_pwdb_all > 40)
+ sq = 100;
+ else {
+ sq = cck_buf->sq_rpt;
+ if (sq > 64)
+ sq = 0;
+ else if (sq < 20)
+ sq = 100;
+ else
+ sq = ((64 - sq) * 100) / 44;
+ }
+
+ pstatus->signalquality = sq;
+ pstatus->rx_mimo_signalquality[0] = sq;
+ pstatus->rx_mimo_signalquality[1] = -1;
+ }
+ } else {
+ rtlpriv->dm.brfpath_rxenable[0] =
+ rtlpriv->dm.brfpath_rxenable[1] = true;
+
+ /* (1)Get RSSI for HT rate */
+ for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
+
+ /* we will judge RF RX path now. */
+ if (rtlpriv->dm.brfpath_rxenable[i])
+ rf_rx_num++;
+
+ rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
+
+ /* Translate DBM to percentage. */
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
+ total_rssi += rssi;
+
+ /* Get Rx snr value in DB */
+ rtlpriv->stats.rx_snr_db[i] = (long)(p_drvinfo->rxsnr[i] / 2);
+
+ /* Record Signal Strength for next packet */
+ if (bpacket_match_bssid)
+ pstatus->rx_mimo_signalstrength[i] = (u8) rssi;
+ }
+
+ /* (2)PWDB, Average PWDB calculated by
+ * hardware (for rate adaptive) */
+ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ pstatus->rx_pwdb_all = pwdb_all;
+ pstatus->rxpower = rx_pwr_all;
+ pstatus->recvsignalpower = rx_pwr_all;
+
+ /* (3)EVM of HT rate */
+ if (pstatus->b_is_ht && pstatus->rate >= DESC_RATEMCS8 &&
+ pstatus->rate <= DESC_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+
+ for (i = 0; i < max_spatial_stream; i++) {
+ evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+
+ if (bpacket_match_bssid) {
+ /* Fill value in RFD, Get the first
+ * spatial stream only */
+ if (i == 0)
+ pstatus->signalquality = (u8) (evm & 0xff);
+ pstatus->rx_mimo_signalquality[i] = (u8) (evm & 0xff);
+ }
+ }
+ }
+
+ /* UI BSS List signal strength(in percentage),
+ * make it good looking, from 0~100. */
+ if (b_is_cck)
+ pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ pwdb_all));
+ else if (rf_rx_num != 0)
+ pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ total_rssi /= rf_rx_num));
+ /*HW antenna diversity*/
+ rtldm->fat_table.antsel_rx_keep_0 = p_phystRpt->ant_sel;
+ rtldm->fat_table.antsel_rx_keep_1 = p_phystRpt->ant_sel_b;
+ rtldm->fat_table.antsel_rx_keep_2 = p_phystRpt->antsel_rx_keep_2;
+
+}
+#if 0
+static void _rtl8821ae_smart_antenna(struct ieee80211_hw *hw,
+ struct rtl_stats *pstatus)
+{
+ struct rtl_dm *rtldm= rtl_dm(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse =rtl_efuse(rtl_priv(hw));
+ u8 antsel_tr_mux;
+ struct fast_ant_trainning *pfat_table = &(rtldm->fat_table);
+
+ if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV) {
+ if (pfat_table->fat_state == FAT_TRAINING_STATE) {
+ if (pstatus->b_packet_toself) {
+ antsel_tr_mux = (pfat_table->antsel_rx_keep_2 << 2) |
+ (pfat_table->antsel_rx_keep_1 << 1) | pfat_table->antsel_rx_keep_0;
+ pfat_table->ant_sum_rssi[antsel_tr_mux] += pstatus->rx_pwdb_all;
+ pfat_table->ant_rssi_cnt[antsel_tr_mux]++;
+ }
+ }
+ } else if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) ||
+ (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)) {
+ if (pstatus->b_packet_toself || pstatus->b_packet_matchbssid) {
+ antsel_tr_mux = (pfat_table->antsel_rx_keep_2 << 2) |
+ (pfat_table->antsel_rx_keep_1 << 1) | pfat_table->antsel_rx_keep_0;
+ rtl8821ae_dm_ant_sel_statistics(hw, antsel_tr_mux, 0, pstatus->rx_pwdb_all);
+ }
+
+ }
+}
+#endif
+static void _rtl8821ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb, struct rtl_stats *pstatus,
+ u8 *pdesc, struct rx_fwinfo_8821ae *p_drvinfo)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct ieee80211_hdr *hdr;
+ u8 *tmp_buf;
+ u8 *praddr;
+ u8 *psaddr;
+ u16 fc, type;
+ bool b_packet_matchbssid, b_packet_toself, b_packet_beacon;
+
+ tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
+
+ hdr = (struct ieee80211_hdr *)tmp_buf;
+ fc = le16_to_cpu(hdr->frame_control);
+ type = WLAN_FC_GET_TYPE(fc);
+ praddr = hdr->addr1;
+ psaddr = ieee80211_get_SA(hdr);
+ memcpy(pstatus->psaddr, psaddr, ETH_ALEN);
+
+ b_packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
+ (!ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ?
+ hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ?
+ hdr->addr2 : hdr->addr3)) && (!pstatus->b_hwerror) &&
+ (!pstatus->b_crc) && (!pstatus->b_icv));
+
+ b_packet_toself = b_packet_matchbssid &&
+ (!ether_addr_equal(praddr, rtlefuse->dev_addr));
+
+ if (ieee80211_is_beacon(fc))
+ b_packet_beacon = true;
+ else
+ b_packet_beacon = false;
+
+ if (b_packet_beacon && b_packet_matchbssid)
+ rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++;
+
+ _rtl8821ae_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo,
+ b_packet_matchbssid, b_packet_toself,
+ b_packet_beacon);
+ /*_rtl8821ae_smart_antenna(hw, pstatus); */
+ rtl_process_phyinfo(hw, tmp_buf, pstatus);
+}
+
+static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
+ u8 *virtualaddress)
+{
+ u32 dwtmp = 0;
+ memset(virtualaddress, 0, 8);
+
+ SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
+ if (ptcb_desc->empkt_num == 1)
+ dwtmp = ptcb_desc->empkt_len[0];
+ else {
+ dwtmp = ptcb_desc->empkt_len[0];
+ dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+ dwtmp += ptcb_desc->empkt_len[1];
+ }
+ SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
+
+ if (ptcb_desc->empkt_num <= 3)
+ dwtmp = ptcb_desc->empkt_len[2];
+ else {
+ dwtmp = ptcb_desc->empkt_len[2];
+ dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+ dwtmp += ptcb_desc->empkt_len[3];
+ }
+ SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
+ if (ptcb_desc->empkt_num <= 5)
+ dwtmp = ptcb_desc->empkt_len[4];
+ else {
+ dwtmp = ptcb_desc->empkt_len[4];
+ dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+ dwtmp += ptcb_desc->empkt_len[5];
+ }
+ SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
+ SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
+ if (ptcb_desc->empkt_num <= 7)
+ dwtmp = ptcb_desc->empkt_len[6];
+ else {
+ dwtmp = ptcb_desc->empkt_len[6];
+ dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+ dwtmp += ptcb_desc->empkt_len[7];
+ }
+ SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
+ if (ptcb_desc->empkt_num <= 9)
+ dwtmp = ptcb_desc->empkt_len[8];
+ else {
+ dwtmp = ptcb_desc->empkt_len[8];
+ dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4;
+ dwtmp += ptcb_desc->empkt_len[9];
+ }
+ SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
+}
+
+bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *status,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rx_fwinfo_8821ae *p_drvinfo;
+ struct ieee80211_hdr *hdr;
+
+ u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+
+ status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+ status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+ status->b_icv = (u16) GET_RX_DESC_ICV(pdesc);
+ status->b_crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ status->b_hwerror = (status->b_crc | status->b_icv);
+ status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+ status->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
+ status->b_shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+ status->b_isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ status->b_isfirst_ampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+ status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ status->macid = GET_RX_DESC_MACID(pdesc);
+ status->b_is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+
+ status->b_is_cck = RX_HAL_IS_CCK_RATE(status->rate);
+
+ if (GET_RX_STATUS_DESC_RPT_SEL(pdesc))
+ status->packet_report_type = C2H_PACKET;
+ else
+ status->packet_report_type = NORMAL_RX;
+
+ if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+ status->wake_match = BIT(2);
+ else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+ status->wake_match = BIT(1);
+ else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
+ status->wake_match = BIT(0);
+ else
+ status->wake_match = 0;
+
+ if (status->wake_match)
+ RT_TRACE(COMP_RXDESC,DBG_LOUD,
+ ("GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",status->wake_match ));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+#else
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->band = hw->conf.channel->band;
+#endif
+
+ hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size
+ + status->rx_bufshift);
+
+ if (status->b_crc)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (status->rx_is40Mhzpacket)
+ rx_status->flag |= RX_FLAG_40MHZ;
+
+ if (status->b_is_ht)
+ rx_status->flag |= RX_FLAG_HT;
+
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+
+ /* hw will set status->decrypted true, if it finds the
+ * frame is open data frame or mgmt frame. */
+ /* So hw will not decryption robust management frame
+ * for IEEE80211w but still set status->decrypted
+ * true, so here we should set it back to undecrypted
+ * for IEEE80211w frame, and mac80211 sw will help
+ * to decrypt it */
+ if (status->decrypted) {
+ if (!hdr) {
+ WARN_ON_ONCE(true);
+ pr_err("decrypted is true but hdr NULL, from skb %p\n",
+ rtl_get_hdr(skb));
+ return false;
+ }
+
+ if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ (ieee80211_has_protected(hdr->frame_control)))
+ rx_status->flag &= ~RX_FLAG_DECRYPTED;
+ else
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ }
+
+ /* rate_idx: index of data rate into band's
+ * supported rates or MCS index if HT rates
+ * are use (RX_FLAG_HT)*/
+ /* Notice: this is diff with windows define */
+ rx_status->rate_idx = _rtl8821ae_rate_mapping(hw,
+ status->b_is_ht, status->rate);
+
+ rx_status->mactime = status->timestamp_low;
+ if (phystatus == true) {
+ p_drvinfo = (struct rx_fwinfo_8821ae *)(skb->data +
+ status->rx_bufshift);
+
+ _rtl8821ae_translate_rx_signal_stuff(hw,
+ skb, status, pdesc,
+ p_drvinfo);
+ }
+
+ /*rx_status->qual = status->signal; */
+ rx_status->signal = status->recvsignalpower + 10;
+ /*rx_status->noise = -status->noise; */
+ if (status->packet_report_type == TX_REPORT2){
+ status->macid_valid_entry[0] = GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
+ status->macid_valid_entry[1] = GET_RX_RPT2_DESC_MACID_VALID_2(pdesc);
+ }
+ return true;
+}
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+ struct ieee80211_tx_info *info, struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
+#else
+/*<delete in kernel end>*/
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ struct ieee80211_sta *sta = info->control.sta;
+#endif
+/*<delete in kernel end>*/
+ u8 *pdesc = (u8 *) pdesc_tx;
+ u16 seq_number;
+ u16 fc = le16_to_cpu(hdr->frame_control);
+ unsigned int buf_len = 0;
+ unsigned int skb_len = skb->len;
+ u8 fw_qsel = _rtl8821ae_map_hwqueue_to_fwqueue(skb, hw_queue);
+ bool b_firstseg = ((hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+ bool b_lastseg = ((hdr->frame_control &
+ cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+ dma_addr_t mapping;
+ u8 bw_40 = 0;
+ u8 short_gi = 0;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION) {
+ bw_40 = mac->bw_40;
+ } else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (sta)
+ bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ }
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
+ /* reserve 8 byte for AMPDU early mode */
+ if (rtlhal->b_earlymode_enable) {
+ skb_push(skb, EM_HDR_LEN);
+ memset(skb->data, 0, EM_HDR_LEN);
+ }
+ buf_len = skb->len;
+ mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("DMA mapping error"));
+ return;
+ }
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8821ae));
+ if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
+ b_firstseg = true;
+ b_lastseg = true;
+ }
+ if (b_firstseg) {
+ if (rtlhal->b_earlymode_enable) {
+ SET_TX_DESC_PKT_OFFSET(pdesc, 1);
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN + EM_HDR_LEN);
+ if (ptcb_desc->empkt_num) {
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num));
+ _rtl8821ae_insert_emcontent(ptcb_desc, (u8 *)(skb->data));
+ }
+ } else {
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ }
+
+ /* ptcb_desc->use_driver_rate = true; */
+ SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ if (ptcb_desc->hw_rate > DESC_RATEMCS0) {
+ short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
+ } else {
+ short_gi = (ptcb_desc->use_shortpreamble) ? 1 :0;
+ }
+ SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ SET_TX_DESC_AGG_ENABLE(pdesc, 1);
+ SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ }
+ SET_TX_DESC_SEQ(pdesc, seq_number);
+ SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->b_rts_enable &&
+ !ptcb_desc->b_cts_enable) ? 1 : 0));
+ SET_TX_DESC_HW_RTS_ENABLE(pdesc,0);
+ SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->b_cts_enable) ? 1 : 0));
+ /* SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->b_rts_stbc) ? 1 : 0));*/
+
+ SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
+ /* SET_TX_DESC_RTS_BW(pdesc, 0);*/
+ SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
+ SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <= DESC_RATE54M) ?
+ (ptcb_desc->b_rts_use_shortpreamble ? 1 : 0) :
+ (ptcb_desc->b_rts_use_shortgi ? 1 : 0)));
+
+ if(ptcb_desc->btx_enable_sw_calc_duration)
+ SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
+
+ if (bw_40) {
+ if (ptcb_desc->b_packet_bw) {
+ SET_TX_DESC_DATA_BW(pdesc, 1);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, mac->cur_40_prime_sc);
+ }
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ }
+
+ SET_TX_DESC_LINIP(pdesc, 0);
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
+ if (sta) {
+ u8 ampdu_density = sta->ht_cap.ampdu_density;
+ SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ }
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/*<delete in kernel end>*/
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ break;
+ default:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ break;
+
+ }
+/*<delete in kernel start>*/
+#else
+ switch (keyconf->alg) {
+ case ALG_WEP:
+ case ALG_TKIP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ break;
+ case ALG_CCMP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ break;
+ default:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ break;
+
+ }
+#endif
+/*<delete in kernel end>*/
+ }
+
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+ SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+ SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
+ SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ? 1 : 0);
+ SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+
+#if 0
+ SET_TX_DESC_USE_RATE(pdesc, 1);
+ SET_TX_DESC_TX_RATE(pdesc, 0x04);
+
+ SET_TX_DESC_RETRY_LIMIT_ENABLE(pdesc, 1);
+ SET_TX_DESC_DATA_RETRY_LIMIT(pdesc, 0x3f);
+#endif
+
+ /*SET_TX_DESC_PWR_STATUS(pdesc, pwr_status);*/
+ /* Set TxRate and RTSRate in TxDesc */
+ /* This prevent Tx initial rate of new-coming packets */
+ /* from being overwritten by retried packet rate.*/
+ if (!ptcb_desc->use_driver_rate) {
+ /*SET_TX_DESC_RTS_RATE(pdesc, 0x08); */
+ /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */
+ }
+ if (ieee80211_is_data_qos(fc)) {
+ if (mac->rdg_en) {
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("Enable RDG function.\n"));
+ SET_TX_DESC_RDG_ENABLE(pdesc, 1);
+ SET_TX_DESC_HTC(pdesc, 1);
+ }
+ }
+ }
+
+ SET_TX_DESC_FIRST_SEG(pdesc, (b_firstseg ? 1 : 0));
+ SET_TX_DESC_LAST_SEG(pdesc, (b_lastseg ? 1 : 0));
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+ //if (rtlpriv->dm.b_useramask) {
+ if(1){
+ SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
+ SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ } else {
+ SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
+ SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ }
+/* if (ieee80211_is_data_qos(fc))
+ SET_TX_DESC_QOS(pdesc, 1);
+*/
+ if (!ieee80211_is_data_qos(fc)) {
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ SET_TX_DESC_HWSEQ_SEL(pdesc, 0);
+ }
+ SET_TX_DESC_MORE_FRAG(pdesc, (b_lastseg ? 0 : 1));
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
+ SET_TX_DESC_BMC(pdesc, 1);
+ }
+
+ rtl8821ae_dm_set_tx_ant_by_tx_info(hw,pdesc,ptcb_desc->mac_id);
+ RT_TRACE(COMP_SEND, DBG_TRACE, ("\n"));
+}
+
+void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool b_firstseg,
+ bool b_lastseg, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 fw_queue = QSLT_BEACON;
+
+ dma_addr_t mapping = pci_map_single(rtlpci->pdev,
+ skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(COMP_SEND, DBG_TRACE,
+ ("DMA mapping error"));
+ return;
+ }
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+ SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+
+ SET_TX_DESC_USE_RATE(pdesc, 1);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
+ SET_TX_DESC_DISABLE_FB(pdesc, 1);
+
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+/*
+ if(IsCtrlNDPA(VirtualAddress) || IsMgntNDPA(VirtualAddress))
+ {
+ SET_TX_DESC_DATA_RETRY_LIMIT_8812(pDesc, 5);
+ SET_TX_DESC_RETRY_LIMIT_ENABLE_8812(pDesc, 1);
+
+ if(IsMgntNDPA(VirtualAddress))
+ {
+ SET_TX_DESC_NDPA_8812(pDesc, 1);
+ SET_TX_DESC_RTS_SC_8812(pDesc, SCMapping_8812(Adapter, pTcb));
+ }
+ else
+ {
+ SET_TX_DESC_NDPA_8812(pDesc, 2);
+ SET_TX_DESC_RTS_SC_8812(pDesc, SCMapping_8812(Adapter, pTcb));
+ }
+ }*/
+
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
+
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+
+ SET_TX_DESC_MACID(pdesc, 0);
+
+ SET_TX_DESC_OWN(pdesc, 1);
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C Tx Cmd Content\n",
+ pdesc, TX_DESC_SIZE);
+}
+
+void rtl8821ae_set_desc(struct ieee80211_hw * hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+{
+ if (istx == true) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ SET_TX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_TX_NEXTDESC_ADDR:
+ SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR txdesc :%d"
+ " not process\n", desc_name));
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_RXOWN:
+ SET_RX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ break;
+ case HW_DESC_RXERO:
+ SET_RX_DESC_EOR(pdesc, 1);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR rxdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ }
+}
+
+u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+{
+ u32 ret = 0;
+
+ if (istx == true) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_TX_DESC_OWN(pdesc);
+ break;
+ case HW_DESC_TXBUFF_ADDR:
+ ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR txdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_RX_DESC_OWN(pdesc);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ ret = GET_RX_DESC_PKT_LEN(pdesc);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR rxdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ }
+ return ret;
+}
+
+bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+ u8 own = (u8) rtl8821ae_get_desc(entry, true, HW_DESC_OWN);
+
+ /*
+ *beacon packet will only use the first
+ *descriptor defautly,and the own may not
+ *be cleared by the hardware
+ */
+ if (own)
+ return false;
+ else
+ return true;
+}
+
+
+void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (hw_queue == BEACON_QUEUE) {
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
+ } else {
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
+ BIT(0) << (hw_queue));
+ }
+}
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/trx.h b/drivers/staging/rtl8821ae/rtl8821ae/trx.h
new file mode 100644
index 000000000000..da93e5c7ece7
--- /dev/null
+++ b/drivers/staging/rtl8821ae/rtl8821ae/trx.h
@@ -0,0 +1,641 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8821AE_TRX_H__
+#define __RTL8821AE_TRX_H__
+
+#define TX_DESC_SIZE 40
+#define TX_DESC_AGGR_SUBFRAME_SIZE 32
+
+#define RX_DESC_SIZE 32
+#define RX_DRV_INFO_SIZE_UNIT 8
+
+#define TX_DESC_NEXT_DESC_OFFSET 40
+#define USB_HWDESC_HEADER_LEN 40
+#define CRCLENGTH 4
+
+#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
+#define SET_TX_DESC_OFFSET(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
+#define SET_TX_DESC_BMC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
+#define SET_TX_DESC_HTC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
+#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
+#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
+#define SET_TX_DESC_LINIP(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
+#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
+#define SET_TX_DESC_GF(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_TX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_TX_DESC_PKT_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 0, 16)
+#define GET_TX_DESC_OFFSET(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 16, 8)
+#define GET_TX_DESC_BMC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 24, 1)
+#define GET_TX_DESC_HTC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 25, 1)
+#define GET_TX_DESC_LAST_SEG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_TX_DESC_FIRST_SEG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_TX_DESC_LINIP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_TX_DESC_NO_ACM(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_TX_DESC_GF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_TX_DESC_OWN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_TX_DESC_MACID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val)
+#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
+#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
+#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
+#define SET_TX_DESC_PIFS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
+#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val)
+#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
+#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
+#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val)
+
+
+#define SET_TX_DESC_PAID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val)
+#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val)
+#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val)
+#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
+#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
+#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
+#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
+#define SET_TX_DESC_RAW(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
+#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
+#define SET_TX_DESC_BT_INT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val)
+#define SET_TX_DESC_GID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val)
+
+
+#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val)
+#define SET_TX_DESC_CHK_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val)
+#define SET_TX_DESC_EARLY_MODE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val)
+#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val)
+#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val)
+#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val)
+#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val)
+#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val)
+#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val)
+#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val)
+#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val)
+#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val)
+#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val)
+#define SET_TX_DESC_NDPA(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val)
+#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val)
+#define SET_TX_DESC_TX_ANT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 4, __val)
+
+#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val)
+#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val)
+
+
+#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
+ SET_BITS_TO_LE_1BYTE(__pdesc+20, 6, 1, __val)
+#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val)
+#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val)
+#define SET_TX_DESC_CTROL_STBC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val)
+#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val)
+#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+
+
+#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
+
+#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
+
+#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val)
+
+#define SET_TX_DESC_SEQ(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val)
+
+#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
+
+#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
+
+
+#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val)
+
+#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+48, 0, 32)
+
+#define GET_RX_DESC_PKT_LEN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 0, 14)
+#define GET_RX_DESC_CRC32(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 14, 1)
+#define GET_RX_DESC_ICV(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 15, 1)
+#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 20, 3)
+#define GET_RX_DESC_QOS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 24, 2)
+#define GET_RX_DESC_PHYST(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_RX_DESC_LS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_RX_DESC_FS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_RX_DESC_EOR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_RX_DESC_OWN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
+#define SET_RX_DESC_EOR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_RX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_RX_DESC_MACID(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 0, 7)
+#define GET_RX_DESC_TID(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 8, 4)
+#define GET_RX_DESC_AMSDU(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
+#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+#define GET_RX_DESC_PAGGR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+#define GET_RX_DESC_CHKERR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
+#define GET_RX_DESC_IPVER(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
+#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 22, 1)
+#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 23, 1)
+#define GET_RX_DESC_PAM(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
+#define GET_RX_DESC_MD(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
+#define GET_RX_DESC_MF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
+
+
+#define GET_RX_DESC_SEQ(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
+#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 18, 6)
+#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
+
+
+#define GET_RX_DESC_RXMCS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
+#define GET_RX_DESC_RXHT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
+#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 7, 1)
+#define GET_RX_DESC_HTC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
+#define GET_RX_STATUS_DESC_EOSP(__pdesc) \
+ LE_BITS_TO_4BYTE( __pdesc+12, 11, 1)
+#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \
+ LE_BITS_TO_4BYTE( __pdesc+12, 12, 2)
+
+#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE( __pdesc+12, 29, 1)
+#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE( __pdesc+12, 30, 1)
+#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE( __pdesc+12, 31, 1)
+
+#define GET_RX_DESC_SPLCP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 0, 1)
+#define GET_RX_STATUS_DESC_LDPC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 1, 1)
+#define GET_RX_STATUS_DESC_STBC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 2, 1)
+#define GET_RX_DESC_BW(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 4, 2)
+
+#define GET_RX_DESC_TSFL(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
+
+#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
+#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
+
+#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
+#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+
+
+/* TX report 2 format in Rx desc*/
+
+#define GET_RX_RPT2_DESC_PKT_LEN(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE( __pRxStatusDesc, 0, 9)
+#define GET_RX_RPT2_DESC_MACID_VALID_1(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE( __pRxStatusDesc+16, 0, 32)
+#define GET_RX_RPT2_DESC_MACID_VALID_2(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE( __pRxStatusDesc+20, 0, 32)
+
+#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value)
+#define SET_EARLYMODE_LEN0(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value)
+#define SET_EARLYMODE_LEN1(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value)
+#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value)
+#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value)
+#define SET_EARLYMODE_LEN3(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value)
+#define SET_EARLYMODE_LEN4(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value)
+
+#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
+do { \
+ if(_size > TX_DESC_NEXT_DESC_OFFSET) \
+ memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
+ else \
+ memset(__pdesc, 0, _size); \
+} while (0);
+
+#define RX_HAL_IS_CCK_RATE(rxmcs)\
+ (rxmcs == DESC_RATE1M ||\
+ rxmcs == DESC_RATE2M ||\
+ rxmcs == DESC_RATE5_5M ||\
+ rxmcs == DESC_RATE11M)
+
+#define IS_LITTLE_ENDIAN 1
+
+struct phy_rx_agc_info_t {
+ #if IS_LITTLE_ENDIAN
+ u8 gain:7,trsw:1;
+ #else
+ u8 trsw:1,gain:7;
+ #endif
+};
+struct phy_status_rpt{
+ struct phy_rx_agc_info_t path_agc[2];
+ u8 ch_corr[2];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ u8 cck_rpt_b_ofdm_cfosho_b;
+ u8 rsvd_1;//ch_corr_msb;
+ u8 noise_power_db_msb;
+ u8 path_cfotail[2];
+ u8 pcts_mask[2];
+ u8 stream_rxevm[2];
+ u8 path_rxsnr[2];
+ u8 noise_power_db_lsb;
+ u8 rsvd_2[3];
+ u8 stream_csi[2];
+ u8 stream_target_csi[2];
+ u8 sig_evm;
+ u8 rsvd_3;
+#if IS_LITTLE_ENDIAN
+ u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 idle_long:1;
+ u8 r_ant_train_en:1;
+ u8 ant_sel_b:1;
+ u8 ant_sel:1;
+#else /* _BIG_ENDIAN_ */
+ u8 ant_sel:1;
+ u8 ant_sel_b:1;
+ u8 r_ant_train_en:1;
+ u8 idle_long:1;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
+#endif
+}__packed;
+
+struct rx_fwinfo_8821ae {
+ u8 gain_trsw[4];
+ u8 pwdb_all;
+ u8 cfosho[4];
+ u8 cfotail[4];
+ char rxevm[2];
+ char rxsnr[4];
+ u8 pdsnr[2];
+ u8 csi_current[2];
+ u8 csi_target[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+} __packed;
+
+struct tx_desc_8821ae {
+ u32 pktsize:16;
+ u32 offset:8;
+ u32 bmc:1;
+ u32 htc:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 linip:1;
+ u32 noacm:1;
+ u32 gf:1;
+ u32 own:1;
+
+ u32 macid:6;
+ u32 rsvd0:2;
+ u32 queuesel:5;
+ u32 rd_nav_ext:1;
+ u32 lsig_txop_en:1;
+ u32 pifs:1;
+ u32 rateid:4;
+ u32 nav_usehdr:1;
+ u32 en_descid:1;
+ u32 sectype:2;
+ u32 pktoffset:8;
+
+ u32 rts_rc:6;
+ u32 data_rc:6;
+ u32 agg_en:1;
+ u32 rdg_en:1;
+ u32 bar_retryht:2;
+ u32 agg_break:1;
+ u32 morefrag:1;
+ u32 raw:1;
+ u32 ccx:1;
+ u32 ampdudensity:3;
+ u32 bt_int:1;
+ u32 ant_sela:1;
+ u32 ant_selb:1;
+ u32 txant_cck:2;
+ u32 txant_l:2;
+ u32 txant_ht:2;
+
+ u32 nextheadpage:8;
+ u32 tailpage:8;
+ u32 seq:12;
+ u32 cpu_handle:1;
+ u32 tag1:1;
+ u32 trigger_int:1;
+ u32 hwseq_en:1;
+
+ u32 rtsrate:5;
+ u32 apdcfe:1;
+ u32 qos:1;
+ u32 hwseq_ssn:1;
+ u32 userrate:1;
+ u32 dis_rtsfb:1;
+ u32 dis_datafb:1;
+ u32 cts2self:1;
+ u32 rts_en:1;
+ u32 hwrts_en:1;
+ u32 portid:1;
+ u32 pwr_status:3;
+ u32 waitdcts:1;
+ u32 cts2ap_en:1;
+ u32 txsc:2;
+ u32 stbc:2;
+ u32 txshort:1;
+ u32 txbw:1;
+ u32 rtsshort:1;
+ u32 rtsbw:1;
+ u32 rtssc:2;
+ u32 rtsstbc:2;
+
+ u32 txrate:6;
+ u32 shortgi:1;
+ u32 ccxt:1;
+ u32 txrate_fb_lmt:5;
+ u32 rtsrate_fb_lmt:4;
+ u32 retrylmt_en:1;
+ u32 txretrylmt:6;
+ u32 usb_txaggnum:8;
+
+ u32 txagca:5;
+ u32 txagcb:5;
+ u32 usemaxlen:1;
+ u32 maxaggnum:5;
+ u32 mcsg1maxlen:4;
+ u32 mcsg2maxlen:4;
+ u32 mcsg3maxlen:4;
+ u32 mcs7sgimaxlen:4;
+
+ u32 txbuffersize:16;
+ u32 sw_offset30:8;
+ u32 sw_offset31:4;
+ u32 rsvd1:1;
+ u32 antsel_c:1;
+ u32 null_0:1;
+ u32 null_1:1;
+
+ u32 txbuffaddr;
+ u32 txbufferaddr64;
+ u32 nextdescaddress;
+ u32 nextdescaddress64;
+
+ u32 reserve_pass_pcie_mm_limit[4];
+} __packed;
+
+struct rx_desc_8821ae {
+ u32 length:14;
+ u32 crc32:1;
+ u32 icverror:1;
+ u32 drv_infosize:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 phystatus:1;
+ u32 swdec:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 eor:1;
+ u32 own:1;
+
+ u32 macid:6;
+ u32 tid:4;
+ u32 hwrsvd:5;
+ u32 paggr:1;
+ u32 faggr:1;
+ u32 a1_fit:4;
+ u32 a2_fit:4;
+ u32 pam:1;
+ u32 pwr:1;
+ u32 moredata:1;
+ u32 morefrag:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+
+ u32 seq:12;
+ u32 frag:4;
+ u32 nextpktlen:14;
+ u32 nextind:1;
+ u32 rsvd:1;
+
+ u32 rxmcs:6;
+ u32 rxht:1;
+ u32 amsdu:1;
+ u32 splcp:1;
+ u32 bandwidth:1;
+ u32 htc:1;
+ u32 tcpchk_rpt:1;
+ u32 ipcchk_rpt:1;
+ u32 tcpchk_valid:1;
+ u32 hwpcerr:1;
+ u32 hwpcind:1;
+ u32 iv0:16;
+
+ u32 iv1;
+
+ u32 tsfl;
+
+ u32 bufferaddress;
+ u32 bufferaddress64;
+
+} __packed;
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+ struct ieee80211_tx_info *info, struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
+#else
+/*<delete in kernel end>*/
+void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *status,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+void rtl8821ae_set_desc(struct ieee80211_hw * hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
+void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
+void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+ bool b_firstseg, bool b_lastseg,
+ struct sk_buff *skb);
+#endif
diff --git a/drivers/staging/rtl8821ae/stats.c b/drivers/staging/rtl8821ae/stats.c
new file mode 100644
index 000000000000..a20c0f8f65ec
--- /dev/null
+++ b/drivers/staging/rtl8821ae/stats.c
@@ -0,0 +1,283 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#include "wifi.h"
+#include "stats.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#include <linux/export.h>
+#endif
+
+u8 rtl_query_rxpwrpercentage(char antpower)
+{
+ if ((antpower <= -100) || (antpower >= 20))
+ return 0;
+ else if (antpower >= 0)
+ return 100;
+ else
+ return (100 + antpower);
+}
+//EXPORT_SYMBOL(rtl_query_rxpwrpercentage);
+
+u8 rtl_evm_db_to_percentage(char value)
+{
+ char ret_val;
+ ret_val = value;
+
+ if (ret_val >= 0)
+ ret_val = 0;
+ if (ret_val <= -33)
+ ret_val = -33;
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+ if (ret_val == 99)
+ ret_val = 100;
+
+ return ret_val;
+}
+//EXPORT_SYMBOL(rtl_evm_db_to_percentage);
+
+long rtl_translate_todbm(struct ieee80211_hw *hw,
+ u8 signal_strength_index)
+{
+ long signal_power;
+
+ signal_power = (long)((signal_strength_index + 1) >> 1);
+ signal_power -= 95;
+ return signal_power;
+}
+
+long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig)
+{
+ long retsig;
+
+ if (currsig >= 61 && currsig <= 100)
+ retsig = 90 + ((currsig - 60) / 4);
+ else if (currsig >= 41 && currsig <= 60)
+ retsig = 78 + ((currsig - 40) / 2);
+ else if (currsig >= 31 && currsig <= 40)
+ retsig = 66 + (currsig - 30);
+ else if (currsig >= 21 && currsig <= 30)
+ retsig = 54 + (currsig - 20);
+ else if (currsig >= 5 && currsig <= 20)
+ retsig = 42 + (((currsig - 5) * 2) / 3);
+ else if (currsig == 4)
+ retsig = 36;
+ else if (currsig == 3)
+ retsig = 27;
+ else if (currsig == 2)
+ retsig = 18;
+ else if (currsig == 1)
+ retsig = 9;
+ else
+ retsig = currsig;
+
+ return retsig;
+}
+//EXPORT_SYMBOL(rtl_signal_scale_mapping);
+
+void rtl_process_ui_rssi(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 rfpath;
+ u32 last_rssi, tmpval;
+
+ if (!pstatus->b_packet_toself && !pstatus->b_packet_beacon)
+ return;
+
+ rtlpriv->stats.pwdb_all_cnt += pstatus->rx_pwdb_all;
+ rtlpriv->stats.rssi_calculate_cnt++;
+
+ if (rtlpriv->stats.ui_rssi.total_num++ >= PHY_RSSI_SLID_WIN_MAX) {
+ rtlpriv->stats.ui_rssi.total_num = PHY_RSSI_SLID_WIN_MAX;
+ last_rssi = rtlpriv->stats.ui_rssi.elements[
+ rtlpriv->stats.ui_rssi.index];
+ rtlpriv->stats.ui_rssi.total_val -= last_rssi;
+ }
+ rtlpriv->stats.ui_rssi.total_val += pstatus->signalstrength;
+ rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.index++] =
+ pstatus->signalstrength;
+ if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
+ rtlpriv->stats.ui_rssi.index = 0;
+ tmpval = rtlpriv->stats.ui_rssi.total_val /
+ rtlpriv->stats.ui_rssi.total_num;
+ rtlpriv->stats.signal_strength = rtl_translate_todbm(hw,
+ (u8) tmpval);
+ pstatus->rssi = rtlpriv->stats.signal_strength;
+
+ if (pstatus->b_is_cck)
+ return;
+
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ pstatus->rx_mimo_signalstrength[rfpath];
+
+ }
+ if (pstatus->rx_mimo_signalstrength[rfpath] >
+ rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ rtlpriv->stats.rx_rssi_percentage[rfpath] + 1;
+ } else {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ }
+ rtlpriv->stats.rx_snr_db[rfpath] = pstatus->rx_snr[rfpath];
+ rtlpriv->stats.rx_evm_dbm[rfpath] =
+ pstatus->rx_mimo_evm_dbm[rfpath];
+ rtlpriv->stats.rx_cfo_short[rfpath] =
+ pstatus->cfo_short[rfpath];
+ rtlpriv->stats.rx_cfo_tail[rfpath] = pstatus->cfo_tail[rfpath];
+ }
+}
+
+static void rtl_update_rxsignalstatistics(struct ieee80211_hw *hw,
+ struct rtl_stats *pstatus)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int weighting = 0;
+
+ if (rtlpriv->stats.recv_signal_power == 0)
+ rtlpriv->stats.recv_signal_power = pstatus->recvsignalpower;
+ if (pstatus->recvsignalpower > rtlpriv->stats.recv_signal_power)
+ weighting = 5;
+ else if (pstatus->recvsignalpower < rtlpriv->stats.recv_signal_power)
+ weighting = (-5);
+ rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power *
+ 5 + pstatus->recvsignalpower + weighting) / 6;
+}
+
+static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *drv_priv = NULL;
+ struct ieee80211_sta *sta = NULL;
+ long undecorated_smoothed_pwdb;
+
+ rcu_read_lock();
+ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+ sta = rtl_find_sta(hw, pstatus->psaddr);
+
+ /* adhoc or ap mode */
+ if (sta) {
+ drv_priv = (struct rtl_sta_info *) sta->drv_priv;
+ undecorated_smoothed_pwdb =
+ drv_priv->rssi_stat.undecorated_smoothed_pwdb;
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ }
+
+ if (undecorated_smoothed_pwdb < 0)
+ undecorated_smoothed_pwdb = pstatus->rx_pwdb_all;
+ if (pstatus->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
+ undecorated_smoothed_pwdb = (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ undecorated_smoothed_pwdb = undecorated_smoothed_pwdb + 1;
+ } else {
+ undecorated_smoothed_pwdb = (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ }
+
+ if(sta) {
+ drv_priv->rssi_stat.undecorated_smoothed_pwdb =
+ undecorated_smoothed_pwdb;
+ } else {
+ rtlpriv->dm.undecorated_smoothed_pwdb = undecorated_smoothed_pwdb;
+ }
+ rcu_read_unlock();
+
+ rtl_update_rxsignalstatistics(hw, pstatus);
+}
+
+static void rtl_process_ui_link_quality(struct ieee80211_hw *hw,
+ struct rtl_stats *pstatus)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 last_evm, n_stream, tmpval;
+
+ if (pstatus->signalquality == 0)
+ return;
+
+ if (rtlpriv->stats.ui_link_quality.total_num++ >=
+ PHY_LINKQUALITY_SLID_WIN_MAX) {
+ rtlpriv->stats.ui_link_quality.total_num =
+ PHY_LINKQUALITY_SLID_WIN_MAX;
+ last_evm = rtlpriv->stats.ui_link_quality.elements[
+ rtlpriv->stats.ui_link_quality.index];
+ rtlpriv->stats.ui_link_quality.total_val -= last_evm;
+ }
+ rtlpriv->stats.ui_link_quality.total_val += pstatus->signalquality;
+ rtlpriv->stats.ui_link_quality.elements[
+ rtlpriv->stats.ui_link_quality.index++] =
+ pstatus->signalquality;
+ if (rtlpriv->stats.ui_link_quality.index >=
+ PHY_LINKQUALITY_SLID_WIN_MAX)
+ rtlpriv->stats.ui_link_quality.index = 0;
+ tmpval = rtlpriv->stats.ui_link_quality.total_val /
+ rtlpriv->stats.ui_link_quality.total_num;
+ rtlpriv->stats.signal_quality = tmpval;
+ rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+ for (n_stream = 0; n_stream < 2; n_stream++) {
+ if (pstatus->rx_mimo_signalquality[n_stream] != -1) {
+ if (rtlpriv->stats.rx_evm_percentage[n_stream] == 0) {
+ rtlpriv->stats.rx_evm_percentage[n_stream] =
+ pstatus->rx_mimo_signalquality[n_stream];
+ }
+ rtlpriv->stats.rx_evm_percentage[n_stream] =
+ ((rtlpriv->stats.rx_evm_percentage[n_stream]
+ * (RX_SMOOTH_FACTOR - 1)) +
+ (pstatus->rx_mimo_signalquality[n_stream] * 1)) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+}
+
+void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
+ struct rtl_stats *pstatus)
+{
+
+ if (!pstatus->b_packet_matchbssid)
+ return;
+
+ rtl_process_ui_rssi(hw, pstatus);
+ rtl_process_pwdb(hw, pstatus);
+ rtl_process_ui_link_quality(hw, pstatus);
+}
+//EXPORT_SYMBOL(rtl_process_phyinfo);
diff --git a/drivers/staging/rtl8821ae/stats.h b/drivers/staging/rtl8821ae/stats.h
new file mode 100644
index 000000000000..d69d0cfd7e14
--- /dev/null
+++ b/drivers/staging/rtl8821ae/stats.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_STATS_H__
+#define __RTL_STATS_H__
+
+#define PHY_RSSI_SLID_WIN_MAX 100
+#define PHY_LINKQUALITY_SLID_WIN_MAX 20
+#define PHY_BEACON_RSSI_SLID_WIN_MAX 10
+
+/* Rx smooth factor */
+#define RX_SMOOTH_FACTOR 20
+
+u8 rtl_query_rxpwrpercentage(char antpower);
+u8 rtl_evm_db_to_percentage(char value);
+long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig);
+void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
+ struct rtl_stats *pstatus);
+
+#endif
diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h
new file mode 100644
index 000000000000..17a9d9f8781d
--- /dev/null
+++ b/drivers/staging/rtl8821ae/wifi.h
@@ -0,0 +1,2534 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_WIFI_H__
+#define __RTL_WIFI_H__
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/version.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "debug.h"
+
+
+#define RF_CHANGE_BY_INIT 0
+#define RF_CHANGE_BY_IPS BIT(28)
+#define RF_CHANGE_BY_PS BIT(29)
+#define RF_CHANGE_BY_HW BIT(30)
+#define RF_CHANGE_BY_SW BIT(31)
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_MAC_REG_NUM 4
+#define IQK_THRESHOLD 8
+
+#define MAX_KEY_LEN 61
+#define KEY_BUF_SIZE 5
+
+/* QoS related. */
+/*aci: 0x00 Best Effort*/
+/*aci: 0x01 Background*/
+/*aci: 0x10 Video*/
+/*aci: 0x11 Voice*/
+/*Max: define total number.*/
+#define AC0_BE 0
+#define AC1_BK 1
+#define AC2_VI 2
+#define AC3_VO 3
+#define AC_MAX 4
+#define QOS_QUEUE_NUM 4
+#define RTL_MAC80211_NUM_QUEUE 5
+
+#define QBSS_LOAD_SIZE 5
+#define MAX_WMMELE_LENGTH 64
+
+#define TOTAL_CAM_ENTRY 32
+
+/*slot time for 11g. */
+#define RTL_SLOT_TIME_9 9
+#define RTL_SLOT_TIME_20 20
+
+/*related with tcp/ip. */
+/*if_ether.h*/
+#define ETH_P_PAE 0x888E /*Port Access Entity
+ *(IEEE 802.1X) */
+#define ETH_P_IP 0x0800 /*Internet Protocol packet */
+#define ETH_P_ARP 0x0806 /*Address Resolution packet */
+#define SNAP_SIZE 6
+#define PROTOC_TYPE_SIZE 2
+
+/*related with 802.11 frame*/
+#define MAC80211_3ADDR_LEN 24
+#define MAC80211_4ADDR_LEN 30
+
+#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max
+ * channel number */
+#define CHANNEL_MAX_NUMBER_2G 14
+#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
+ *"phy_GetChnlGroup8812A" and
+ * "Hal_ReadTxPowerInfo8812A"*/
+#define CHANNEL_MAX_NUMBER_5G_80M 7
+#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, ch4~9, ch10~14
+ * total three groups */
+#define MAX_PG_GROUP 13
+#define CHANNEL_GROUP_MAX_2G 3
+#define CHANNEL_GROUP_IDX_5GL 3
+#define CHANNEL_GROUP_IDX_5GM 6
+#define CHANNEL_GROUP_IDX_5GH 9
+#define CHANNEL_GROUP_MAX_5G 9
+#define CHANNEL_MAX_NUMBER_2G 14
+#define AVG_THERMAL_NUM 8
+#define AVG_THERMAL_NUM_92E 4
+#define AVG_THERMAL_NUM_88E 4
+#define AVG_THERMAL_NUM_8723BE 4
+#define MAX_TID_COUNT 9
+#define MAX_NUM_RATES 264
+
+/*for 88E use*/
+/*It must always set to 4, otherwise read efuse table sequence will be wrong.*/
+#define MAX_TX_COUNT 4
+#define MAX_RF_PATH 4
+#define MAX_CHNL_GROUP_24G 6
+#define MAX_CHNL_GROUP_5G 14
+
+/* BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON. */
+#define MAX_TX_QUEUE 9
+
+#define TX_PWR_BY_RATE_NUM_BAND 2
+#define TX_PWR_BY_RATE_NUM_RF 4
+#define TX_PWR_BY_RATE_NUM_SECTION 12
+#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6
+#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5
+
+#define DELTA_SWINGIDX_SIZE 30
+#define BAND_NUM 3
+/*Now, it's just for 8192ee
+ *not OK yet, keep it 0*/
+#define DMA_IS_64BIT 0
+#define RTL8192EE_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
+
+struct txpower_info_2g {
+ u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
+ u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
+ /*If only one tx, only BW20 and OFDM are used.*/
+ u8 cck_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+struct txpower_info_5g {
+ u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_5G];
+ /*If only one tx, only BW20, OFDM, BW80 and BW160 are used.*/
+ u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+
+/* for early mode */
+#define EM_HDR_LEN 8
+#define FCS_LEN 4
+
+#define MAX_VIRTUAL_MAC 1
+
+enum rf_tx_num {
+ RF_1TX = 0,
+ RF_2TX,
+ RF_MAX_TX_NUM,
+ RF_TX_NUM_NONIMPLEMENT,
+};
+
+enum rate_section {
+ CCK = 0,
+ OFDM,
+ HT_MCS0_MCS7,
+ HT_MCS8_MCS15,
+ VHT_1SSMCS0_1SSMCS9,
+ VHT_2SSMCS0_2SSMCS9,
+};
+
+enum intf_type {
+ INTF_PCI = 0,
+ INTF_USB = 1,
+};
+
+enum radio_path {
+ RF90_PATH_A = 0,
+ RF90_PATH_B = 1,
+ RF90_PATH_C = 2,
+ RF90_PATH_D = 3,
+};
+
+enum rt_eeprom_type {
+ EEPROM_93C46,
+ EEPROM_93C56,
+ EEPROM_BOOT_EFUSE,
+};
+
+enum rtl_status {
+ RTL_STATUS_INTERFACE_START = 0,
+};
+
+enum hardware_type {
+ HARDWARE_TYPE_RTL8192E,
+ HARDWARE_TYPE_RTL8192U,
+ HARDWARE_TYPE_RTL8192SE,
+ HARDWARE_TYPE_RTL8192SU,
+ HARDWARE_TYPE_RTL8192CE,
+ HARDWARE_TYPE_RTL8192CU,
+ HARDWARE_TYPE_RTL8192DE,
+ HARDWARE_TYPE_RTL8192DU,
+ HARDWARE_TYPE_RTL8723AE,
+ HARDWARE_TYPE_RTL8188EE,
+ HARDWARE_TYPE_RTL8723BE,
+ HARDWARE_TYPE_RTL8192EE,
+ HARDWARE_TYPE_RTL8821AE,
+ HARDWARE_TYPE_RTL8812AE,
+ /* keep it last */
+ HARDWARE_TYPE_NUM
+};
+
+enum scan_operation_backup_opt {
+ SCAN_OPT_BACKUP_BAND0 = 0,
+ SCAN_OPT_BACKUP_BAND1,
+ SCAN_OPT_RESTORE,
+ SCAN_OPT_MAX
+};
+
+/*RF state.*/
+enum rf_pwrstate {
+ ERFON,
+ ERFSLEEP,
+ ERFOFF
+};
+
+struct bb_reg_def {
+ u32 rfintfs;
+ u32 rfintfi;
+ u32 rfintfo;
+ u32 rfintfe;
+ u32 rf3wire_offset;
+ u32 rflssi_select;
+ u32 rftxgain_stage;
+ u32 rfhssi_para1;
+ u32 rfhssi_para2;
+ u32 rfswitch_control;
+ u32 rfagc_control1;
+ u32 rfagc_control2;
+ u32 rfrxiq_imbalance;
+ u32 rfrx_afe;
+ u32 rftxiq_imbalance;
+ u32 rftx_afe;
+ u32 rflssi_readback;
+ u32 rflssi_readbackpi;
+};
+
+enum io_type {
+ IO_CMD_PAUSE_BAND0_DM_BY_SCAN = 0,
+ IO_CMD_PAUSE_BAND1_DM_BY_SCAN = 1,
+ IO_CMD_RESUME_DM_BY_SCAN = 2,
+};
+
+enum hw_variables {
+ HW_VAR_ETHER_ADDR,
+ HW_VAR_MULTICAST_REG,
+ HW_VAR_BASIC_RATE,
+ HW_VAR_BSSID,
+ HW_VAR_MEDIA_STATUS,
+ HW_VAR_SECURITY_CONF,
+ HW_VAR_BEACON_INTERVAL,
+ HW_VAR_ATIM_WINDOW,
+ HW_VAR_LISTEN_INTERVAL,
+ HW_VAR_CS_COUNTER,
+ HW_VAR_DEFAULTKEY0,
+ HW_VAR_DEFAULTKEY1,
+ HW_VAR_DEFAULTKEY2,
+ HW_VAR_DEFAULTKEY3,
+ HW_VAR_SIFS,
+ HW_VAR_DIFS,
+ HW_VAR_EIFS,
+ HW_VAR_SLOT_TIME,
+ HW_VAR_ACK_PREAMBLE,
+ HW_VAR_CW_CONFIG,
+ HW_VAR_CW_VALUES,
+ HW_VAR_RATE_FALLBACK_CONTROL,
+ HW_VAR_CONTENTION_WINDOW,
+ HW_VAR_RETRY_COUNT,
+ HW_VAR_TR_SWITCH,
+ HW_VAR_COMMAND,
+ HW_VAR_WPA_CONFIG,
+ HW_VAR_AMPDU_MIN_SPACE,
+ HW_VAR_SHORTGI_DENSITY,
+ HW_VAR_AMPDU_FACTOR,
+ HW_VAR_MCS_RATE_AVAILABLE,
+ HW_VAR_AC_PARAM,
+ HW_VAR_ACM_CTRL,
+ HW_VAR_DIS_Req_Qsize,
+ HW_VAR_CCX_CHNL_LOAD,
+ HW_VAR_CCX_NOISE_HISTOGRAM,
+ HW_VAR_CCX_CLM_NHM,
+ HW_VAR_TxOPLimit,
+ HW_VAR_TURBO_MODE,
+ HW_VAR_RF_STATE,
+ HW_VAR_RF_OFF_BY_HW,
+ HW_VAR_BUS_SPEED,
+ HW_VAR_SET_DEV_POWER,
+
+ HW_VAR_RCR,
+ HW_VAR_RATR_0,
+ HW_VAR_RRSR,
+ HW_VAR_CPU_RST,
+ HW_VAR_CECHK_BSSID,
+ HW_VAR_LBK_MODE,
+ HW_VAR_AES_11N_FIX,
+ HW_VAR_USB_RX_AGGR,
+ HW_VAR_USER_CONTROL_TURBO_MODE,
+ HW_VAR_RETRY_LIMIT,
+ HW_VAR_INIT_TX_RATE,
+ HW_VAR_TX_RATE_REG,
+ HW_VAR_EFUSE_USAGE,
+ HW_VAR_EFUSE_BYTES,
+ HW_VAR_AUTOLOAD_STATUS,
+ HW_VAR_RF_2R_DISABLE,
+ HW_VAR_SET_RPWM,
+ HW_VAR_H2C_FW_PWRMODE,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ HW_VAR_H2C_FW_MEDIASTATUSRPT,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ HW_VAR_FW_PSMODE_STATUS,
+ HW_VAR_RESUME_CLK_ON,
+ HW_VAR_FW_LPS_ACTION,
+ HW_VAR_1X1_RECV_COMBINE,
+ HW_VAR_STOP_SEND_BEACON,
+ HW_VAR_TSF_TIMER,
+ HW_VAR_IO_CMD,
+
+ HW_VAR_RF_RECOVERY,
+ HW_VAR_H2C_FW_UPDATE_GTK,
+ HW_VAR_WF_MASK,
+ HW_VAR_WF_CRC,
+ HW_VAR_WF_IS_MAC_ADDR,
+ HW_VAR_H2C_FW_OFFLOAD,
+ HW_VAR_RESET_WFCRC,
+
+ HW_VAR_HANDLE_FW_C2H,
+ HW_VAR_DL_FW_RSVD_PAGE,
+ HW_VAR_AID,
+ HW_VAR_HW_SEQ_ENABLE,
+ HW_VAR_CORRECT_TSF,
+ HW_VAR_BCN_VALID,
+ HW_VAR_FWLPS_RF_ON,
+ HW_VAR_DUAL_TSF_RST,
+ HW_VAR_SWITCH_EPHY_WoWLAN,
+ HW_VAR_INT_MIGRATION,
+ HW_VAR_INT_AC,
+ HW_VAR_RF_TIMING,
+
+ HAL_DEF_WOWLAN,
+ HW_VAR_MRC,
+ HW_VAR_KEEP_ALIVE,
+ HW_VAR_NAV_UPPER,
+};
+
+enum rt_media_status {
+ RT_MEDIA_DISCONNECT = 0,
+ RT_MEDIA_CONNECT = 1
+};
+
+enum rt_oem_id {
+ RT_CID_DEFAULT = 0,
+ RT_CID_8187_ALPHA0 = 1,
+ RT_CID_8187_SERCOMM_PS = 2,
+ RT_CID_8187_HW_LED = 3,
+ RT_CID_8187_NETGEAR = 4,
+ RT_CID_WHQL = 5,
+ RT_CID_819x_CAMEO = 6,
+ RT_CID_819x_RUNTOP = 7,
+ RT_CID_819x_Senao = 8,
+ RT_CID_TOSHIBA = 9,
+ RT_CID_819x_Netcore = 10,
+ RT_CID_Nettronix = 11,
+ RT_CID_DLINK = 12,
+ RT_CID_PRONET = 13,
+ RT_CID_COREGA = 14,
+ RT_CID_819x_ALPHA = 15,
+ RT_CID_819x_Sitecom = 16,
+ RT_CID_CCX = 17,
+ RT_CID_819x_Lenovo = 18,
+ RT_CID_819x_QMI = 19,
+ RT_CID_819x_Edimax_Belkin = 20,
+ RT_CID_819x_Sercomm_Belkin = 21,
+ RT_CID_819x_CAMEO1 = 22,
+ RT_CID_819x_MSI = 23,
+ RT_CID_819x_Acer = 24,
+ RT_CID_819x_HP = 27,
+ RT_CID_819x_CLEVO = 28,
+ RT_CID_819x_Arcadyan_Belkin = 29,
+ RT_CID_819x_SAMSUNG = 30,
+ RT_CID_819x_WNC_COREGA = 31,
+ RT_CID_819x_Foxcoon = 32,
+ RT_CID_819x_DELL = 33,
+ RT_CID_819x_PRONETS = 34,
+ RT_CID_819x_Edimax_ASUS = 35,
+ RT_CID_NETGEAR = 36,
+ RT_CID_PLANEX = 37,
+ RT_CID_CC_C = 38,
+};
+
+enum hw_descs {
+ HW_DESC_OWN,
+ HW_DESC_RXOWN,
+ HW_DESC_TX_NEXTDESC_ADDR,
+ HW_DESC_TXBUFF_ADDR,
+ HW_DESC_RXBUFF_ADDR,
+ HW_DESC_RXPKT_LEN,
+ HW_DESC_RXERO,
+ HW_DESC_RX_PREPARE,
+};
+
+enum prime_sc {
+ PRIME_CHNL_OFFSET_DONT_CARE = 0,
+ PRIME_CHNL_OFFSET_LOWER = 1,
+ PRIME_CHNL_OFFSET_UPPER = 2,
+};
+
+enum rf_type {
+ RF_1T1R = 0,
+ RF_1T2R = 1,
+ RF_2T2R = 2,
+ RF_2T2R_GREEN = 3,
+};
+
+enum ht_channel_width {
+ HT_CHANNEL_WIDTH_20 = 0,
+ HT_CHANNEL_WIDTH_20_40 = 1,
+ HT_CHANNEL_WIDTH_80 = 2,
+};
+
+/* Ref: 802.11i spec D10.0 7.3.2.25.1
+Cipher Suites Encryption Algorithms */
+enum rt_enc_alg {
+ NO_ENCRYPTION = 0,
+ WEP40_ENCRYPTION = 1,
+ TKIP_ENCRYPTION = 2,
+ RSERVED_ENCRYPTION = 3,
+ AESCCMP_ENCRYPTION = 4,
+ WEP104_ENCRYPTION = 5,
+ AESCMAC_ENCRYPTION = 6, /*IEEE802.11w */
+};
+
+enum rtl_hal_state {
+ _HAL_STATE_STOP = 0,
+ _HAL_STATE_START = 1,
+};
+
+enum rtl_var_map {
+ /*reg map */
+ SYS_ISO_CTRL = 0,
+ SYS_FUNC_EN,
+ SYS_CLK,
+ MAC_RCR_AM,
+ MAC_RCR_AB,
+ MAC_RCR_ACRC32,
+ MAC_RCR_ACF,
+ MAC_RCR_AAP,
+ MAC_HIMR,
+ MAC_HIMRE,
+ MAC_HSISR,
+
+ /*efuse map */
+ EFUSE_TEST,
+ EFUSE_CTRL,
+ EFUSE_CLK,
+ EFUSE_CLK_CTRL,
+ EFUSE_PWC_EV12V,
+ EFUSE_FEN_ELDR,
+ EFUSE_LOADER_CLK_EN,
+ EFUSE_ANA8M,
+ EFUSE_HWSET_MAX_SIZE,
+ EFUSE_MAX_SECTION_MAP,
+ EFUSE_REAL_CONTENT_SIZE,
+ EFUSE_OOB_PROTECT_BYTES_LEN,
+ EFUSE_ACCESS,
+ /*CAM map */
+ RWCAM,
+ WCAMI,
+ RCAMO,
+ CAMDBG,
+ SECR,
+ SEC_CAM_NONE,
+ SEC_CAM_WEP40,
+ SEC_CAM_TKIP,
+ SEC_CAM_AES,
+ SEC_CAM_WEP104,
+
+ /*IMR map */
+ RTL_IMR_BCNDMAINT6, /*Beacon DMA Interrupt 6 */
+ RTL_IMR_BCNDMAINT5, /*Beacon DMA Interrupt 5 */
+ RTL_IMR_BCNDMAINT4, /*Beacon DMA Interrupt 4 */
+ RTL_IMR_BCNDMAINT3, /*Beacon DMA Interrupt 3 */
+ RTL_IMR_BCNDMAINT2, /*Beacon DMA Interrupt 2 */
+ RTL_IMR_BCNDMAINT1, /*Beacon DMA Interrupt 1 */
+ RTL_IMR_BCNDOK8, /*Beacon Queue DMA OK Interrupt 8 */
+ RTL_IMR_BCNDOK7, /*Beacon Queue DMA OK Interrupt 7 */
+ RTL_IMR_BCNDOK6, /*Beacon Queue DMA OK Interrupt 6 */
+ RTL_IMR_BCNDOK5, /*Beacon Queue DMA OK Interrupt 5 */
+ RTL_IMR_BCNDOK4, /*Beacon Queue DMA OK Interrupt 4 */
+ RTL_IMR_BCNDOK3, /*Beacon Queue DMA OK Interrupt 3 */
+ RTL_IMR_BCNDOK2, /*Beacon Queue DMA OK Interrupt 2 */
+ RTL_IMR_BCNDOK1, /*Beacon Queue DMA OK Interrupt 1 */
+ RTL_IMR_TIMEOUT2, /*Timeout interrupt 2 */
+ RTL_IMR_TIMEOUT1, /*Timeout interrupt 1 */
+ RTL_IMR_TXFOVW, /*Transmit FIFO Overflow */
+ RTL_IMR_PSTIMEOUT, /*Power save time out interrupt */
+ RTL_IMR_BcnInt, /*Beacon DMA Interrupt 0 */
+ RTL_IMR_RXFOVW, /*Receive FIFO Overflow */
+ RTL_IMR_RDU, /*Receive Descriptor Unavailable */
+ RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */
+ RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrupt */
+ RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */
+ RTL_IMR_COMDOK, /*Command Queue DMA OK Interrupt*/
+ RTL_IMR_TBDOK, /*Transmit Beacon OK interrupt */
+ RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */
+ RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */
+ RTL_IMR_BKDOK, /*AC_BK DMA OK Interrupt */
+ RTL_IMR_BEDOK, /*AC_BE DMA OK Interrupt */
+ RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */
+ RTL_IMR_VODOK, /*AC_VO DMA Interrupt */
+ RTL_IMR_ROK, /*Receive DMA OK Interrupt */
+ RTL_IMR_HSISR_IND, /*HSISR Interrupt*/
+ RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK |
+ * RTL_IMR_TBDER) */
+ RTL_IMR_C2HCMD, /*fw interrupt*/
+
+ /*CCK Rates, TxHT = 0 */
+ RTL_RC_CCK_RATE1M,
+ RTL_RC_CCK_RATE2M,
+ RTL_RC_CCK_RATE5_5M,
+ RTL_RC_CCK_RATE11M,
+
+ /*OFDM Rates, TxHT = 0 */
+ RTL_RC_OFDM_RATE6M,
+ RTL_RC_OFDM_RATE9M,
+ RTL_RC_OFDM_RATE12M,
+ RTL_RC_OFDM_RATE18M,
+ RTL_RC_OFDM_RATE24M,
+ RTL_RC_OFDM_RATE36M,
+ RTL_RC_OFDM_RATE48M,
+ RTL_RC_OFDM_RATE54M,
+
+ RTL_RC_HT_RATEMCS7,
+ RTL_RC_HT_RATEMCS15,
+
+ /*keep it last */
+ RTL_VAR_MAP_MAX,
+};
+
+/*Firmware PS mode for control LPS.*/
+enum _fw_ps_mode {
+ FW_PS_ACTIVE_MODE = 0,
+ FW_PS_MIN_MODE = 1,
+ FW_PS_MAX_MODE = 2,
+ FW_PS_DTIM_MODE = 3,
+ FW_PS_VOIP_MODE = 4,
+ FW_PS_UAPSD_WMM_MODE = 5,
+ FW_PS_UAPSD_MODE = 6,
+ FW_PS_IBSS_MODE = 7,
+ FW_PS_WWLAN_MODE = 8,
+ FW_PS_PM_Radio_Off = 9,
+ FW_PS_PM_Card_Disable = 10,
+};
+
+enum rt_psmode {
+ EACTIVE, /*Active/Continuous access. */
+ EMAXPS, /*Max power save mode. */
+ EFASTPS, /*Fast power save mode. */
+ EAUTOPS, /*Auto power save mode. */
+};
+
+/*LED related.*/
+enum led_ctl_mode {
+ LED_CTL_POWER_ON = 1,
+ LED_CTL_LINK = 2,
+ LED_CTL_NO_LINK = 3,
+ LED_CTL_TX = 4,
+ LED_CTL_RX = 5,
+ LED_CTL_SITE_SURVEY = 6,
+ LED_CTL_POWER_OFF = 7,
+ LED_CTL_START_TO_LINK = 8,
+ LED_CTL_START_WPS = 9,
+ LED_CTL_STOP_WPS = 10,
+};
+
+enum rtl_led_pin {
+ LED_PIN_GPIO0,
+ LED_PIN_LED0,
+ LED_PIN_LED1,
+ LED_PIN_LED2
+};
+
+/*QoS related.*/
+/*acm implementation method.*/
+enum acm_method {
+ eAcmWay0_SwAndHw = 0,
+ eAcmWay1_HW = 1,
+ eAcmWay2_SW = 2,
+};
+
+enum macphy_mode {
+ SINGLEMAC_SINGLEPHY = 0,
+ DUALMAC_DUALPHY,
+ DUALMAC_SINGLEPHY,
+};
+
+enum band_type {
+ BAND_ON_2_4G = 0,
+ BAND_ON_5G,
+ BAND_ON_BOTH,
+ BANDMAX
+};
+
+/*aci/aifsn Field.
+Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/
+union aci_aifsn {
+ u8 char_data;
+
+ struct {
+ u8 aifsn:4;
+ u8 acm:1;
+ u8 aci:2;
+ u8 reserved:1;
+ } f; /* Field */
+};
+
+/*mlme related.*/
+enum wireless_mode {
+ WIRELESS_MODE_UNKNOWN = 0x00,
+ WIRELESS_MODE_A = 0x01,
+ WIRELESS_MODE_B = 0x02,
+ WIRELESS_MODE_G = 0x04,
+ WIRELESS_MODE_AUTO = 0x08,
+ WIRELESS_MODE_N_24G = 0x10,
+ WIRELESS_MODE_N_5G = 0x20,
+ WIRELESS_MODE_AC_5G = 0x40,
+ WIRELESS_MODE_AC_24G = 0x80
+};
+
+enum ratr_table_mode {
+ RATR_INX_WIRELESS_NGB = 0, /* BGN 40 Mhz 2SS 1SS */
+ RATR_INX_WIRELESS_NG = 1, /* GN or N */
+ RATR_INX_WIRELESS_NB = 2, /* BGN 20 Mhz 2SS 1SS or BN */
+ RATR_INX_WIRELESS_N = 3,
+ RATR_INX_WIRELESS_GB = 4,
+ RATR_INX_WIRELESS_G = 5,
+ RATR_INX_WIRELESS_B = 6,
+ RATR_INX_WIRELESS_MC = 7,
+ RATR_INX_WIRELESS_AC_5N = 8,
+ RATR_INX_WIRELESS_AC_24N = 9,
+};
+
+enum rtl_link_state {
+ MAC80211_NOLINK = 0,
+ MAC80211_LINKING = 1,
+ MAC80211_LINKED = 2,
+ MAC80211_LINKED_SCANNING = 3,
+};
+
+enum act_category {
+ ACT_CAT_QOS = 1,
+ ACT_CAT_DLS = 2,
+ ACT_CAT_BA = 3,
+ ACT_CAT_HT = 7,
+ ACT_CAT_WMM = 17,
+};
+
+enum ba_action {
+ ACT_ADDBAREQ = 0,
+ ACT_ADDBARSP = 1,
+ ACT_DELBA = 2,
+};
+
+enum rt_polarity_ctl {
+ RT_POLARITY_LOW_ACT = 0,
+ RT_POLARITY_HIGH_ACT = 1,
+};
+
+
+struct octet_string {
+ u8 *octet;
+ u16 length;
+};
+
+struct rtl_hdr_3addr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ u8 payload[0];
+} __packed;
+
+struct rtl_info_element {
+ u8 id;
+ u8 len;
+ u8 data[0];
+} __packed;
+
+struct rtl_probe_rsp {
+ struct rtl_hdr_3addr header;
+ u32 time_stamp[2];
+ __le16 beacon_interval;
+ __le16 capability;
+ /*SSID, supported rates, FH params, DS params,
+ CF params, IBSS params, TIM (if beacon), RSN */
+ struct rtl_info_element info_element[0];
+} __packed;
+
+/*LED related.*/
+/*ledpin Identify how to implement this SW led.*/
+struct rtl_led {
+ void *hw;
+ enum rtl_led_pin ledpin;
+ bool b_ledon;
+};
+
+struct rtl_led_ctl {
+ bool bled_opendrain;
+ struct rtl_led sw_led0;
+ struct rtl_led sw_led1;
+};
+
+struct rtl_qos_parameters {
+ __le16 cw_min;
+ __le16 cw_max;
+ u8 aifs;
+ u8 flag;
+ __le16 tx_op;
+} __packed;
+
+struct rt_smooth_data {
+ u32 elements[100]; /*array to store values */
+ u32 index; /*index to current array to store */
+ u32 total_num; /*num of valid elements */
+ u32 total_val; /*sum of valid elements */
+};
+
+struct rtl_ht_agg {
+ u16 txq_id;
+ u16 wait_for_ba;
+ u16 start_idx;
+ u64 bitmap;
+ u32 rate_n_flags;
+ u8 agg_state;
+ u8 rx_agg_state;
+};
+
+struct rtl_tid_data {
+ u16 seq_number;
+ struct rtl_ht_agg agg;
+};
+
+struct rssi_sta {
+ long undecorated_smoothed_pwdb;
+};
+
+struct rtl_sta_info {
+ struct list_head list;
+ u8 ratr_index;
+ u8 wireless_mode;
+ u8 mimo_ps;
+ u8 mac_addr[6];
+ struct rtl_tid_data tids[MAX_TID_COUNT];
+
+ /* just used for ap adhoc or mesh*/
+ struct rssi_sta rssi_stat;
+} __packed;
+
+#ifdef VIF_TODO
+struct rtl_vif {
+ unsigned int id;
+ /* struct ieee80211_vif __rcu *vif; */
+ struct ieee80211_vif *vif;
+};
+
+struct rtl_vif_info {
+ struct list_head list;
+ bool active;
+ unsigned int id;
+ struct sk_buff *beacon;
+ bool enable_beacon;
+};
+
+struct vif_priv {
+ struct list_head vif_list;
+
+ /* interface mode settings */
+ unsigned long vif_bitmap;
+ unsigned int vifs;
+ struct rtl_vif vif[MAX_VIRTUAL_MAC];
+
+ /* beaconing */
+ spinlock_t beacon_lock;
+ unsigned int global_pretbtt;
+ unsigned int global_beacon_int;
+ /* struct rtl_vif_info __rcu *beacon_iter; */
+ struct rtl_vif_info *beacon_iter;
+ unsigned int beacon_enabled;
+};
+#endif
+
+struct false_alarm_statistics {
+ u32 cnt_parity_fail;
+ u32 cnt_rate_illegal;
+ u32 cnt_crc8_fail;
+ u32 cnt_mcs_fail;
+ u32 cnt_fast_fsync_fail;
+ u32 cnt_sb_search_fail;
+ u32 cnt_ofdm_fail;
+ u32 cnt_cck_fail;
+ u32 cnt_all;
+ u32 cnt_ofdm_cca;
+ u32 cnt_cck_cca;
+ u32 cnt_cca_all;
+ u32 cnt_bw_usc;
+ u32 cnt_bw_lsc;
+};
+
+struct init_gain {
+ u8 xaagccore1;
+ u8 xbagccore1;
+ u8 xcagccore1;
+ u8 xdagccore1;
+ u8 cca;
+
+};
+
+struct wireless_stats {
+ unsigned long txbytesunicast;
+ unsigned long txbytesmulticast;
+ unsigned long txbytesbroadcast;
+ unsigned long rxbytesunicast;
+
+ long rx_snr_db[4];
+ /*Correct smoothed ss in Dbm, only used
+ in driver to report real power now. */
+ long recv_signal_power;
+ long signal_quality;
+ long last_sigstrength_inpercent;
+
+ u32 rssi_calculate_cnt;
+ u32 pwdb_all_cnt;
+
+ /*Transformed, in dbm. Beautified signal
+ strength for UI, not correct. */
+ long signal_strength;
+
+ u8 rx_rssi_percentage[4];
+ u8 rx_evm_dbm[4];
+ u8 rx_evm_percentage[2];
+
+ u16 rx_cfo_short[4];
+ u16 rx_cfo_tail[4];
+
+ struct rt_smooth_data ui_rssi;
+ struct rt_smooth_data ui_link_quality;
+};
+
+struct rate_adaptive {
+ u8 rate_adaptive_disabled;
+ u8 ratr_state;
+ u16 reserve;
+
+ u32 high_rssi_thresh_for_ra;
+ u32 high2low_rssi_thresh_for_ra;
+ u8 low2high_rssi_thresh_for_ra;
+ u32 low_rssi_thresh_for_ra;
+ u32 upper_rssi_threshold_ratr;
+ u32 middleupper_rssi_threshold_ratr;
+ u32 middle_rssi_threshold_ratr;
+ u32 middlelow_rssi_threshold_ratr;
+ u32 low_rssi_threshold_ratr;
+ u32 ultralow_rssi_threshold_ratr;
+ u32 low_rssi_threshold_ratr_40m;
+ u32 low_rssi_threshold_ratr_20m;
+ u8 ping_rssi_enable;
+ u32 ping_rssi_ratr;
+ u32 ping_rssi_thresh_for_ra;
+ u32 last_ratr;
+ u8 pre_ratr_state;
+ u8 ldpc_thres;
+ bool use_ldpc;
+ bool lower_rts_rate;
+ bool is_special_data;
+};
+
+struct regd_pair_mapping {
+ u16 reg_dmnenum;
+ u16 reg_5ghz_ctl;
+ u16 reg_2ghz_ctl;
+};
+
+struct dynamic_primary_cca {
+ u8 pricca_flag;
+ u8 intf_flag;
+ u8 intf_type;
+ u8 dup_rts_flag;
+ u8 monitor_flag;
+ u8 ch_offset;
+ u8 mf_state;
+};
+
+struct rtl_regulatory {
+ char alpha2[2];
+ u16 country_code;
+ u16 max_power_level;
+ u32 tp_scale;
+ u16 current_rd;
+ u16 current_rd_ext;
+ int16_t power_limit;
+ struct regd_pair_mapping *regpair;
+};
+
+struct rtl_rfkill {
+ bool rfkill_state; /*0 is off, 1 is on */
+};
+
+/*for P2P PS**/
+#define P2P_MAX_NOA_NUM 2
+
+enum p2p_role {
+ P2P_ROLE_DISABLE = 0,
+ P2P_ROLE_DEVICE = 1,
+ P2P_ROLE_CLIENT = 2,
+ P2P_ROLE_GO = 3
+};
+
+enum p2p_ps_state {
+ P2P_PS_DISABLE = 0,
+ P2P_PS_ENABLE = 1,
+ P2P_PS_SCAN = 2,
+ P2P_PS_SCAN_DONE = 3,
+ P2P_PS_ALLSTASLEEP = 4, /* for P2P GO */
+};
+
+enum p2p_ps_mode {
+ P2P_PS_NONE = 0,
+ P2P_PS_CTWINDOW = 1,
+ P2P_PS_NOA = 2,
+ P2P_PS_MIX = 3, /* CTWindow and NoA */
+};
+
+struct rtl_p2p_ps_info {
+ enum p2p_ps_mode p2p_ps_mode; /* indicate p2p ps mode */
+ enum p2p_ps_state p2p_ps_state; /* indicate p2p ps state */
+ u8 noa_index; /* Identifies and instance of Notice of Absence timing. */
+ /* Client traffic window. A period of time in TU after TBTT. */
+ u8 ctwindow;
+ u8 opp_ps; /* opportunistic power save. */
+ u8 noa_num; /* number of NoA descriptor in P2P IE. */
+ /* Count for owner, Type of client. */
+ u8 noa_count_type[P2P_MAX_NOA_NUM];
+ /* Max duration for owner, preferred or
+ * min acceptable duration for client. */
+ u32 noa_duration[P2P_MAX_NOA_NUM];
+ /* Length of interval for owner, preferred or
+ * max acceptable interval of client. */
+ u32 noa_interval[P2P_MAX_NOA_NUM];
+ /* schedule expressed in terms of the lower 4 bytes of the TSF timer. */
+ u32 noa_start_time[P2P_MAX_NOA_NUM];
+};
+
+struct p2p_ps_offload_t {
+ u8 Offload_En:1;
+ u8 role:1; /* 1: Owner, 0: Client */
+ u8 CTWindow_En:1;
+ u8 NoA0_En:1;
+ u8 NoA1_En:1;
+ u8 AllStaSleep:1;
+ u8 discovery:1;
+ u8 reserved:1;
+};
+
+#define IQK_MATRIX_REG_NUM 8
+#define IQK_MATRIX_SETTINGS_NUM (14+24+21) /* Channels_2_4G_NUM + Channels_5G_20M_NUM + Channels_5G */
+struct iqk_matrix_regs {
+ bool b_iqk_done;
+ long value[1][IQK_MATRIX_REG_NUM];
+};
+
+struct rtl_phy {
+ struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */
+ struct init_gain initgain_backup;
+ enum io_type current_io_type;
+
+ u8 rf_mode;
+ u8 rf_type;
+ u8 current_chan_bw;
+ u8 set_bwmode_inprogress;
+ u8 sw_chnl_inprogress;
+ u8 sw_chnl_stage;
+ u8 sw_chnl_step;
+ u8 current_channel;
+ u8 h2c_box_num;
+ u8 set_io_inprogress;
+ u8 lck_inprogress;
+
+ /* record for power tracking */
+ s32 reg_e94;
+ s32 reg_e9c;
+ s32 reg_ea4;
+ s32 reg_eac;
+ s32 reg_eb4;
+ s32 reg_ebc;
+ s32 reg_ec4;
+ s32 reg_ecc;
+ u8 rfpienable;
+ u8 reserve_0;
+ u16 reserve_1;
+ u32 reg_c04, reg_c08, reg_874;
+ u32 adda_backup[16];
+ u32 iqk_mac_backup[IQK_MAC_REG_NUM];
+ u32 iqk_bb_backup[10];
+ bool iqk_initialized;
+
+ bool rfpath_rx_enable[MAX_RF_PATH];
+ /*Jaguar*/
+ u8 reg_837;
+ /* Dul mac */
+ bool b_need_iqk;
+ struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM];
+
+ bool b_rfpi_enable;
+
+ bool b_iqk_in_progress;
+
+ u8 pwrgroup_cnt;
+ u8 bcck_high_power;
+ /* this is for 88E & 8723A */
+ u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
+ /* this is for 92EE */
+ u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_SECTION];
+ u8 txpwr_by_rate_base_24g[TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [MAX_BASE_NUM_IN_PHY_REG_PG_24G];
+
+ u8 txpwr_by_rate_base_5g[TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [MAX_BASE_NUM_IN_PHY_REG_PG_5G];
+ u8 default_initialgain[4];
+
+ /* the current Tx power level */
+ u8 cur_cck_txpwridx;
+ u8 cur_ofdm24g_txpwridx;
+ u8 cur_bw20_txpwridx;
+ u8 cur_bw40_txpwridx;
+
+ u32 rfreg_chnlval[2];
+ bool b_apk_done;
+ u32 reg_rf3c[2]; /* pathA / pathB */
+
+ u32 backup_rf_0x1a;/*92ee*/
+ /* bfsync */
+ u8 framesync;
+ u32 framesync_c34;
+
+ u8 num_total_rfpath;
+ u16 rf_pathmap;
+
+ u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/
+
+ enum rt_polarity_ctl polarity_ctl;
+};
+
+#define RTL_AGG_STOP 0
+#define RTL_AGG_PROGRESS 1
+#define RTL_AGG_START 2
+#define RTL_AGG_OPERATIONAL 3
+#define RTL_RX_AGG_START 1
+#define RTL_RX_AGG_STOP 0
+
+struct rtl_priv;
+struct rtl_io {
+ struct device *dev;
+
+ /*PCI MEM map */
+ unsigned long pci_mem_end; /*shared mem end */
+ unsigned long pci_mem_start; /*shared mem start */
+
+ /*PCI IO map */
+ unsigned long pci_base_addr; /*device I/O address */
+
+ void (*write8_async)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
+ void (*write16_async)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32_async)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
+
+ u8 (*read8_sync)(struct rtl_priv *rtlpriv, u32 addr);
+ u16 (*read16_sync)(struct rtl_priv *rtlpriv, u32 addr);
+ u32 (*read32_sync)(struct rtl_priv *rtlpriv, u32 addr);
+
+};
+
+struct rtl_mac {
+ u8 mac_addr[ETH_ALEN];
+ u8 mac80211_registered;
+ u8 beacon_enabled;
+
+ u32 tx_ss_num;
+ u32 rx_ss_num;
+
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ enum nl80211_iftype opmode;
+
+ /*Probe Beacon management */
+ enum rtl_link_state link_state;
+
+ int n_channels;
+ int n_bitrates;
+
+ bool offchan_deley;
+ u8 p2p; /*using p2p role*/
+ bool p2p_in_use;
+
+ /*filters */
+ u32 rx_conf;
+
+ bool act_scanning;
+ u8 cnt_after_linked;
+ bool skip_scan;
+
+ /* early mode */
+ /* skb wait queue */
+ struct sk_buff_head skb_waitq[MAX_TID_COUNT];
+
+ /*RDG*/
+ bool rdg_en;
+
+ /*AP*/
+ u8 bssid[6];
+ u32 vendor;
+ u32 basic_rates; /* b/g rates */
+ u8 ht_enable;
+ u8 bw_40;
+ u8 mode; /* wireless mode */
+ u8 slot_time;
+ u8 short_preamble;
+ u8 use_cts_protect;
+ u8 cur_40_prime_sc;
+ u8 cur_40_prime_sc_bk;
+ u8 cur_80_prime_sc;
+ u64 tsf;
+ u8 retry_short;
+ u8 retry_long;
+ u16 assoc_id;
+ bool bhiddenssid;
+
+ /*IBSS*/
+ int beacon_interval;
+
+ /*AMPDU*/
+ u8 min_space_cfg; /*For Min spacing configurations */
+ u8 max_mss_density;
+ u8 current_ampdu_factor;
+ u8 current_ampdu_density;
+
+ /*QOS & EDCA */
+ struct ieee80211_tx_queue_params edca_param[RTL_MAC80211_NUM_QUEUE];
+ struct rtl_qos_parameters ac[AC_MAX];
+};
+
+struct rtl_hal {
+ struct ieee80211_hw *hw;
+
+ bool driver_is_goingto_unload;
+ bool up_first_time;
+ bool bfirst_init;
+ bool being_init_adapter;
+ bool b_bbrf_ready;
+ bool b_mac_func_enable;
+ bool b_pre_edcca_enable;
+
+ enum intf_type interface;
+ u16 hw_type; /*92c or 92d or 92s and so on */
+ u8 ic_class;
+ u8 oem_id;
+ u32 version; /*version of chip */
+ u8 state; /*stop 0, start 1 */
+ u8 boad_type;
+
+ /*firmware */
+ u32 fwsize;
+ u8 *pfirmware;
+ u16 fw_version;
+ u16 fw_subversion;
+ bool b_h2c_setinprogress;
+ u8 last_hmeboxnum;
+ bool bfw_ready;
+
+ /*Reserve page start offset except beacon in TxQ. */
+ u8 fw_rsvdpage_startoffset;
+ u8 h2c_txcmd_seq;
+ u8 current_ra_rate;
+
+ /* FW Cmd IO related */
+ u16 fwcmd_iomap;
+ u32 fwcmd_ioparam;
+ bool set_fwcmd_inprogress;
+ u8 current_fwcmd_io;
+
+ bool bfw_clk_change_in_progress;
+ bool ballow_sw_to_change_hwclc;
+ u8 fw_ps_state;
+ struct p2p_ps_offload_t p2p_ps_offload;
+ /**/
+ bool driver_going2unload;
+
+ /*AMPDU init min space*/
+ u8 minspace_cfg; /*For Min spacing configurations */
+
+ /* Dul mac */
+ enum macphy_mode macphymode;
+ enum band_type current_bandtype; /* 0:2.4G, 1:5G */
+ enum band_type current_bandtypebackup;
+ enum band_type bandset;
+ /* dual MAC 0--Mac0 1--Mac1 */
+ u32 interfaceindex;
+ /* just for DulMac S3S4 */
+ u8 macphyctl_reg;
+ bool b_earlymode_enable;
+ u8 max_earlymode_num;
+ /* Dul mac*/
+ bool during_mac0init_radiob;
+ bool during_mac1init_radioa;
+ bool reloadtxpowerindex;
+ /* True if IMR or IQK have done
+ for 2.4G in scan progress */
+ bool b_load_imrandiqk_setting_for2g;
+
+ bool disable_amsdu_8k;
+ bool bmaster_of_dmsp;
+ bool bslave_of_dmsp;
+
+ u16 rx_tag;/*for 92ee*/
+ u8 rts_en;
+};
+
+struct rtl_security {
+ /*default 0 */
+ bool use_sw_sec;
+
+ bool being_setkey;
+ bool use_defaultkey;
+ /*Encryption Algorithm for Unicast Packet */
+ enum rt_enc_alg pairwise_enc_algorithm;
+ /*Encryption Algorithm for Broadcast/Multicast */
+ enum rt_enc_alg group_enc_algorithm;
+ /*Cam Entry Bitmap */
+ u32 hwsec_cam_bitmap;
+ u8 hwsec_cam_sta_addr[TOTAL_CAM_ENTRY][ETH_ALEN];
+ /*local Key buffer, indx 0 is for
+ pairwise key 1-4 is for agoup key. */
+ u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN];
+ u8 key_len[KEY_BUF_SIZE];
+
+ /*The pointer of Pairwise Key,
+ it always points to KeyBuf[4] */
+ u8 *pairwise_key;
+};
+
+struct rtl_dig {
+ u8 dig_enable_flag;
+ u8 dig_ext_port_stage;
+
+ u32 rssi_lowthresh;
+ u32 rssi_highthresh;
+
+ u32 fa_lowthresh;
+ u32 fa_highthresh;
+
+ u8 cursta_connectstate;
+ u8 presta_connectstate;
+ u8 curmultista_connectstate;
+
+ u8 pre_igvalue;
+ u8 cur_igvalue;
+
+ char backoff_val;
+ char backoff_val_range_max;
+ char backoff_val_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 rssi_val_min;
+ u8 min_undecorated_pwdb_for_dm;
+ long last_min_undecorated_pwdb_for_dm;
+
+ u8 pre_cck_pd_state;
+ u8 cur_cck_pd_state;
+
+ u8 large_fa_hit;
+ u8 forbidden_igi;
+ u32 recover_cnt;
+
+};
+
+struct rtl_pstbl {
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+
+ u8 pre_rfstate;
+ u8 cur_rfstate;
+
+ long rssi_val_min;
+
+};
+
+#define ASSOCIATE_ENTRY_NUM (32+1)
+
+struct fast_ant_trainning {
+ u8 bssid[6];
+ u8 antsel_rx_keep_0;
+ u8 antsel_rx_keep_1;
+ u8 antsel_rx_keep_2;
+ u32 ant_sum_rssi[7];
+ u32 ant_rssi_cnt[7];
+ u32 ant_ave_rssi[7];
+ u8 fat_state;
+ u32 train_idx;
+ u8 antsel_a[ASSOCIATE_ENTRY_NUM];
+ u8 antsel_b[ASSOCIATE_ENTRY_NUM];
+ u8 antsel_c[ASSOCIATE_ENTRY_NUM];
+ u32 main_ant_sum[ASSOCIATE_ENTRY_NUM];
+ u32 aux_ant_sum[ASSOCIATE_ENTRY_NUM];
+ u32 main_ant_cnt[ASSOCIATE_ENTRY_NUM];
+ u32 aux_ant_cnt[ASSOCIATE_ENTRY_NUM];
+ u8 rx_idle_ant;
+ bool b_becomelinked;
+};
+
+struct dm_phy_dbg_info {
+ char rx_snrdb[4];
+ u64 num_qry_phy_status;
+ u64 num_qry_phy_status_cck;
+ u64 num_qry_phy_status_ofdm;
+ u16 num_qry_beacon_pkt;
+ u16 num_non_be_pkt;
+ s32 rx_evm[4];
+};
+
+struct rtl_dm {
+ /*PHY status for DM */
+ long entry_min_undecoratedsmoothed_pwdb;
+ long undecorated_smoothed_pwdb; /*out dm */
+ long entry_max_undecoratedsmoothed_pwdb;
+ bool b_dm_initialgain_enable;
+ bool bdynamic_txpower_enable;
+ bool bcurrent_turbo_edca;
+ bool bis_any_nonbepkts; /*out dm */
+ bool bis_cur_rdlstate;
+ bool btxpower_trackinginit;
+ bool b_disable_framebursting;
+ bool b_cck_inch14;
+ bool btxpower_tracking;
+ bool b_useramask;
+ bool brfpath_rxenable[4];
+ bool binform_fw_driverctrldm;
+ bool bcurrent_mrc_switch;
+ u8 txpowercount;
+
+ u8 thermalvalue_rxgain;
+ u8 thermalvalue_iqk;
+ u8 thermalvalue_lck;
+ u8 thermalvalue;
+ u8 thermalvalue_avg[AVG_THERMAL_NUM];
+ u8 thermalvalue_avg_index;
+ bool bdone_txpower;
+ u8 last_dtp_lvl;
+ u8 dynamic_txhighpower_lvl; /*Tx high power level */
+ u8 dm_flag; /*Indicate if each dynamic mechanism's status. */
+ u8 dm_type;
+ u8 txpower_track_control;
+ bool binterrupt_migration;
+ bool bdisable_tx_int;
+ char ofdm_index[MAX_RF_PATH];
+ u8 default_ofdm_index;
+ u8 default_cck_index;
+ char cck_index;
+ char delta_power_index[MAX_RF_PATH];
+ char delta_power_index_last[MAX_RF_PATH];
+ char power_index_offset[MAX_RF_PATH];
+ char aboslute_ofdm_swing_idx[MAX_RF_PATH];
+ char remnant_ofdm_swing_idx[MAX_RF_PATH];
+ char remnant_cck_idx;
+ bool modify_txagc_flag_path_a;
+ bool modify_txagc_flag_path_b;
+
+ bool b_one_entry_only;
+ struct dm_phy_dbg_info dbginfo;
+ /* Dynamic ATC switch */
+
+ bool atc_status;
+ bool large_cfo_hit;
+ bool is_freeze;
+ int cfo_tail[2];
+ int cfo_ave_pre;
+ int crystal_cap;
+ u8 cfo_threshold;
+ u32 packet_count;
+ u32 packet_count_pre;
+ u8 tx_rate;
+
+
+ /*88e tx power tracking*/
+ u8 bb_swing_idx_ofdm[MAX_RF_PATH];
+ u8 bb_swing_idx_ofdm_current;
+ u8 bb_swing_idx_ofdm_base[MAX_RF_PATH];
+ bool bb_swing_flag_Ofdm;
+ u8 bb_swing_idx_cck;
+ u8 bb_swing_idx_cck_current;
+ u8 bb_swing_idx_cck_base;
+ bool bb_swing_flag_cck;
+
+ char bb_swing_diff_2g;
+ char bb_swing_diff_5g;
+
+ u8 delta_swing_table_idx_24gccka_p[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gccka_n[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gcckb_p[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gcckb_n[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24ga_p[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24ga_n[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gb_p[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24gb_n[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5ga_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5ga_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5gb_p[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_5gb_n[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24ga_p_8188e[DELTA_SWINGIDX_SIZE];
+ u8 delta_swing_table_idx_24ga_n_8188e[DELTA_SWINGIDX_SIZE];
+
+
+ /* DMSP */
+ bool supp_phymode_switch;
+
+ /* DulMac */
+ struct rtl_dig dm_digtable;
+ struct rtl_pstbl dm_pstable;
+ struct fast_ant_trainning fat_table;
+
+ u8 resp_tx_path;
+ u8 path_sel;
+ u32 patha_sum;
+ u32 pathb_sum;
+ u32 patha_cnt;
+ u32 pathb_cnt;
+
+ u8 pre_channel;
+ u8 *p_channel;
+ u8 linked_interval;
+
+ u64 last_tx_ok_cnt;
+ u64 last_rx_ok_cnt;
+};
+
+#define EFUSE_MAX_LOGICAL_SIZE 256
+
+struct rtl_efuse {
+ bool bautoLoad_ok;
+ bool bootfromefuse;
+ u16 max_physical_size;
+
+ u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
+ u16 efuse_usedbytes;
+ u8 efuse_usedpercentage;
+#ifdef EFUSE_REPG_WORKAROUND
+ bool efuse_re_pg_sec1flag;
+ u8 efuse_re_pg_data[8];
+#endif
+
+ u8 autoload_failflag;
+ u8 autoload_status;
+
+ short epromtype;
+ u16 eeprom_vid;
+ u16 eeprom_did;
+ u16 eeprom_svid;
+ u16 eeprom_smid;
+ u8 eeprom_oemid;
+ u16 eeprom_channelplan;
+ u8 eeprom_version;
+
+ u8 dev_addr[6];
+ u8 board_type;
+ u8 wowlan_enable;
+ u8 antenna_div_cfg;
+ u8 antenna_div_type;
+
+ bool b_txpwr_fromeprom;
+ u8 eeprom_crystalcap;
+ u8 eeprom_tssi[2];
+ u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
+ u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
+ u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
+ u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
+ u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
+ u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX];
+
+
+ u8 internal_pa_5g[2]; /* pathA / pathB */
+ u8 eeprom_c9;
+ u8 eeprom_cc;
+
+ /*For power group */
+ u8 eeprom_pwrgroup[2][3];
+ u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
+ u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
+
+ u8 txpwrlevel_cck[MAX_RF_PATH][CHANNEL_MAX_NUMBER_2G];
+ /*For HT 40MHZ pwr */
+ u8 txpwrlevel_ht40_1s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ /*For HT 40MHZ pwr */
+ u8 txpwrlevel_ht40_2s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ char txpwr_cckdiff[MAX_RF_PATH][MAX_TX_COUNT]; /*CCK_24G_Diff*/
+ /*HT 20<->40 Pwr diff */
+ char txpwr_ht20diff[MAX_RF_PATH][MAX_TX_COUNT]; /*BW20_24G_Diff*/
+ char txpwr_ht40diff[MAX_RF_PATH][MAX_TX_COUNT];/*BW40_24G_Diff*/
+ /*For HT<->legacy pwr diff */
+ char txpwr_legacyhtdiff[MAX_RF_PATH][MAX_TX_COUNT];/*OFDM_24G_Diff*/
+
+ u8 txpwr_5g_bw40base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ u8 txpwr_5g_bw80base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M];
+ char txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT];
+
+ u8 txpwr_safetyflag; /* Band edge enable flag */
+ u16 eeprom_txpowerdiff;
+ u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
+ u8 antenna_txpwdiff[3];
+
+ u8 eeprom_regulatory;
+ u8 eeprom_thermalmeter;
+ u8 thermalmeter[2];/*ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
+ u16 tssi_13dbm;
+ u8 crystalcap; /* CrystalCap. */
+ u8 delta_iqk;
+ u8 delta_lck;
+
+ u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */
+ bool b_apk_thermalmeterignore;
+
+ bool b1x1_recvcombine;
+ bool b1ss_support;
+
+ /*channel plan */
+ u8 channel_plan;
+};
+
+struct rtl_ps_ctl {
+ bool pwrdomain_protect;
+ bool b_in_powersavemode;
+ bool rfchange_inprogress;
+ bool b_swrf_processing;
+ bool b_hwradiooff;
+ /*
+ * just for PCIE ASPM
+ * If it supports ASPM, Offset[560h] = 0x40,
+ * otherwise Offset[560h] = 0x00.
+ * */
+ bool b_support_aspm;
+ bool b_support_backdoor;
+
+ /*for LPS */
+ enum rt_psmode dot11_psmode; /*Power save mode configured. */
+ bool b_swctrl_lps;
+ bool b_fwctrl_lps;
+ u8 fwctrl_psmode;
+ /*For Fw control LPS mode */
+ u8 b_reg_fwctrl_lps;
+ /*Record Fw PS mode status. */
+ bool b_fw_current_inpsmode;
+ u8 reg_max_lps_awakeintvl;
+ bool report_linked;
+ bool b_low_power_enable;/*for 32k*/
+
+ /*for IPS */
+ bool b_inactiveps;
+
+ u32 rfoff_reason;
+
+ /*RF OFF Level */
+ u32 cur_ps_level;
+ u32 reg_rfps_level;
+
+ /*just for PCIE ASPM */
+ u8 const_amdpci_aspm;
+
+ enum rf_pwrstate inactive_pwrstate;
+ enum rf_pwrstate rfpwr_state; /*cur power state */
+
+ /* for SW LPS*/
+ bool sw_ps_enabled;
+ bool state;
+ bool state_inap;
+ bool multi_buffered;
+ u16 nullfunc_seq;
+ unsigned int dtim_counter;
+ unsigned int sleep_ms;
+ unsigned long last_sleep_jiffies;
+ unsigned long last_awake_jiffies;
+ unsigned long last_delaylps_stamp_jiffies;
+ unsigned long last_dtim;
+ unsigned long last_beacon;
+ unsigned long last_action;
+ unsigned long last_slept;
+
+ /*For P2P PS */
+ struct rtl_p2p_ps_info p2p_ps_info;
+ u8 pwr_mode;
+ u8 smart_ps;
+};
+
+struct rtl_stats {
+ u8 psaddr[ETH_ALEN];
+ u32 mac_time[2];
+ s8 rssi;
+ u8 signal;
+ u8 noise;
+ u8 rate; /* hw desc rate */
+ u8 rawdata;
+ u8 received_channel;
+ u8 control;
+ u8 mask;
+ u8 freq;
+ u16 len;
+ u64 tsf;
+ u32 beacon_time;
+ u8 nic_type;
+ u16 length;
+ u8 signalquality; /*in 0-100 index. */
+ /*
+ * Real power in dBm for this packet,
+ * no beautification and aggregation.
+ * */
+ s32 recvsignalpower;
+ s8 rxpower; /*in dBm Translate from PWdB */
+ u8 signalstrength; /*in 0-100 index. */
+ u16 b_hwerror:1;
+ u16 b_crc:1;
+ u16 b_icv:1;
+ u16 b_shortpreamble:1;
+ u16 antenna:1;
+ u16 decrypted:1;
+ u16 wakeup:1;
+ u32 timestamp_low;
+ u32 timestamp_high;
+ bool b_shift;
+
+ u8 rx_drvinfo_size;
+ u8 rx_bufshift;
+ bool b_isampdu;
+ bool b_isfirst_ampdu;
+ bool rx_is40Mhzpacket;
+ u32 rx_pwdb_all;
+ u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
+ s8 rx_mimo_signalquality[4];
+ u8 rx_mimo_evm_dbm[4];
+ u16 cfo_short[4]; /* per-path's Cfo_short */
+ u16 cfo_tail[4];
+ u8 rx_pwr[4]; /* per-path's pwdb */
+ u8 rx_snr[4]; /* per-path's SNR */
+ u8 bandwidth;
+ u8 bt_coex_pwr_adjust;
+ bool b_packet_matchbssid;
+ bool b_is_cck;
+ bool b_is_ht;
+ bool b_packet_toself;
+ bool b_packet_beacon; /*for rssi */
+ char cck_adc_pwdb[4]; /*for rx path selection */
+
+ u8 packet_report_type;
+
+ u32 macid;
+ u8 wake_match;
+ u32 bt_rx_rssi_percentage;
+ u32 macid_valid_entry[2];
+};
+
+struct rt_link_detect {
+ /* count for roaming */
+ u32 bcn_rx_inperiod;
+ u32 roam_times;
+
+ u32 num_tx_in4period[4];
+ u32 num_rx_in4period[4];
+
+ u32 num_tx_inperiod;
+ u32 num_rx_inperiod;
+
+ bool b_busytraffic;
+ bool b_tx_busy_traffic;
+ bool b_rx_busy_traffic;
+ bool b_higher_busytraffic;
+ bool b_higher_busyrxtraffic;
+
+ u32 tidtx_in4period[MAX_TID_COUNT][4];
+ u32 tidtx_inperiod[MAX_TID_COUNT];
+ bool higher_busytxtraffic[MAX_TID_COUNT];
+};
+
+struct rtl_tcb_desc {
+ u8 b_packet_bw:1;
+ u8 b_multicast:1;
+ u8 b_broadcast:1;
+
+ u8 b_rts_stbc:1;
+ u8 b_rts_enable:1;
+ u8 b_cts_enable:1;
+ u8 b_rts_use_shortpreamble:1;
+ u8 b_rts_use_shortgi:1;
+ u8 rts_sc:1;
+ u8 b_rts_bw:1;
+ u8 rts_rate;
+
+ u8 use_shortgi:1;
+ u8 use_shortpreamble:1;
+ u8 use_driver_rate:1;
+ u8 disable_ratefallback:1;
+
+ u8 ratr_index;
+ u8 mac_id;
+ u8 hw_rate;
+
+ u8 b_last_inipkt:1;
+ u8 b_cmd_or_init:1;
+ u8 queue_index;
+
+ /* early mode */
+ u8 empkt_num;
+ /* The max value by HW */
+ u32 empkt_len[10];
+ bool btx_enable_sw_calc_duration;
+ /* used for hal construct pkt,
+ * we may set desc when tx */
+ u8 self_desc;
+};
+
+struct proxim {
+ bool proxim_on;
+
+ void *proximity_priv;
+ int (*proxim_rx)(struct ieee80211_hw *hw, struct rtl_stats *status,
+ struct sk_buff *skb);
+ u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
+};
+
+struct rtl_hal_ops {
+ int (*init_sw_vars)(struct ieee80211_hw *hw);
+ void (*deinit_sw_vars)(struct ieee80211_hw *hw);
+ void (*read_eeprom_info)(struct ieee80211_hw *hw);
+ void (*interrupt_recognized)(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb);
+ int (*hw_init)(struct ieee80211_hw *hw);
+ void (*hw_disable)(struct ieee80211_hw *hw);
+ void (*hw_suspend)(struct ieee80211_hw *hw);
+ void (*hw_resume)(struct ieee80211_hw *hw);
+ void (*enable_interrupt)(struct ieee80211_hw *hw);
+ void (*disable_interrupt)(struct ieee80211_hw *hw);
+ int (*set_network_type)(struct ieee80211_hw *hw,
+ enum nl80211_iftype type);
+ void (*set_chk_bssid)(struct ieee80211_hw *hw,
+ bool check_bssid);
+ void (*set_bw_mode)(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+ u8 (*switch_channel)(struct ieee80211_hw *hw);
+ void (*set_qos)(struct ieee80211_hw *hw, int aci);
+ void (*set_bcn_reg)(struct ieee80211_hw *hw);
+ void (*set_bcn_intv)(struct ieee80211_hw *hw);
+ void (*update_interrupt_mask)(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+ void (*get_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val);
+ void (*set_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val);
+ void (*update_rate_tbl)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 rssi_level);
+ void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc,
+ u8 *desc, u8 queue_index,
+ struct sk_buff *skb, dma_addr_t addr);
+ u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw,
+ u8 queue_index);
+ void (*rx_check_dma_ok)(struct ieee80211_hw *hw, u8 *header_desc,
+ u8 queue_index);
+ void (*fill_tx_desc)(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr,
+ u8 *pdesc_tx, u8 *pbd_desc,
+ struct ieee80211_tx_info *info,
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+/*<delete in kernel end>*/
+ struct ieee80211_sta *sta,
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+ struct sk_buff *skb, u8 hw_queue,
+ struct rtl_tcb_desc *ptcb_desc);
+ void (*fill_tx_cmddesc)(struct ieee80211_hw *hw, u8 *pdesc,
+ bool b_firstseg, bool b_lastseg,
+ struct sk_buff *skb);
+ bool (*query_rx_desc)(struct ieee80211_hw *hw,
+ struct rtl_stats *status,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+ void (*set_channel_access)(struct ieee80211_hw *hw);
+ bool (*radio_onoff_checking)(struct ieee80211_hw *hw, u8 *valid);
+ void (*dm_watchdog)(struct ieee80211_hw *hw);
+ void (*scan_operation_backup)(struct ieee80211_hw *hw, u8 operation);
+ bool (*set_rf_power_state)(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+ void (*led_control)(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction);
+ void (*set_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
+ u32 (*get_desc)(u8 *pdesc, bool istx, u8 desc_name);
+ bool (*is_tx_desc_closed)(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
+ void (*tx_polling)(struct ieee80211_hw *hw, u8 hw_queue);
+ void (*enable_hw_sec)(struct ieee80211_hw *hw);
+ void (*set_key)(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+ void (*init_sw_leds)(struct ieee80211_hw *hw);
+ u32 (*get_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+ void (*set_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+ u32 data);
+ u32 (*get_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+ void (*set_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
+ void (*allow_all_destaddr)(struct ieee80211_hw *hw,
+ bool allow_all_da, bool write_into_reg);
+ void (*linked_set_reg)(struct ieee80211_hw *hw);
+ void (*check_switch_to_dmdp)(struct ieee80211_hw *hw);
+ void (*dualmac_easy_concurrent)(struct ieee80211_hw *hw);
+ void (*dualmac_switch_to_dmdp)(struct ieee80211_hw *hw);
+ void (*c2h_command_handle)(struct ieee80211_hw *hw);
+ void (*bt_wifi_media_status_notify)(struct ieee80211_hw *hw,
+ bool mstate);
+ void (*bt_turn_off_bt_coexist_before_enter_lps)(struct ieee80211_hw *hw);
+ void (*fill_h2c_cmd)(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
+ bool (*get_btc_status)(void);
+ u32 (*rx_command_packet_handler)(struct ieee80211_hw *hw,
+ struct rtl_stats status,
+ struct sk_buff *skb);
+};
+
+struct rtl_intf_ops {
+ /*com */
+ void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+ int (*adapter_start)(struct ieee80211_hw *hw);
+ void (*adapter_stop)(struct ieee80211_hw *hw);
+ bool (*check_buddy_priv)(struct ieee80211_hw *hw,
+ struct rtl_priv **buddy_priv);
+
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ int (*adapter_tx)(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc);
+#else
+/*<delete in kernel end>*/
+ int (*adapter_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
+#else
+ void (*flush)(struct ieee80211_hw *hw, bool drop);
+#endif
+ int (*reset_trx_ring)(struct ieee80211_hw *hw);
+/*<delete in kernel start>*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+ bool (*waitq_insert)(struct ieee80211_hw *hw, struct sk_buff *skb);
+#else
+/*<delete in kernel end>*/
+ bool (*waitq_insert)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb);
+/*<delete in kernel start>*/
+#endif
+/*<delete in kernel end>*/
+
+ /*pci */
+ void (*disable_aspm)(struct ieee80211_hw *hw);
+ void (*enable_aspm)(struct ieee80211_hw *hw);
+
+ /*usb */
+};
+
+struct rtl_mod_params {
+ /* default: 0 = using hardware encryption */
+ bool sw_crypto;
+
+ /* default: 1 = using no linked power save */
+ bool b_inactiveps;
+
+ /* default: 1 = using linked sw power save */
+ bool b_swctrl_lps;
+
+ /* default: 1 = using linked fw power save */
+ bool b_fwctrl_lps;
+};
+
+struct rtl_hal_cfg {
+ u8 bar_id;
+ bool write_readback;
+ char *name;
+ char *fw_name;
+ struct rtl_hal_ops *ops;
+ struct rtl_mod_params *mod_params;
+
+ /*this map used for some registers or vars
+ defined int HAL but used in MAIN */
+ u32 maps[RTL_VAR_MAP_MAX];
+
+};
+
+struct rtl_locks {
+ /* mutex */
+ struct mutex conf_mutex;
+
+ /*spin lock */
+ spinlock_t ips_lock;
+ spinlock_t irq_th_lock;
+ spinlock_t h2c_lock;
+ spinlock_t rf_ps_lock;
+ spinlock_t rf_lock;
+ spinlock_t lps_lock;
+ spinlock_t waitq_lock;
+ spinlock_t entry_list_lock;
+
+ /*FW clock change */
+ spinlock_t fw_ps_lock;
+
+ /*Dul mac*/
+ spinlock_t cck_and_rw_pagea_lock;
+
+ /*Easy concurrent*/
+ spinlock_t check_sendpkt_lock;
+
+ spinlock_t iqk_lock;
+};
+
+struct rtl_works {
+ struct ieee80211_hw *hw;
+
+ /*timer */
+ struct timer_list watchdog_timer;
+ struct timer_list dualmac_easyconcurrent_retrytimer;
+ struct timer_list fw_clockoff_timer;
+ struct timer_list fast_antenna_trainning_timer;
+ /*task */
+ struct tasklet_struct irq_tasklet;
+ struct tasklet_struct irq_prepare_bcn_tasklet;
+
+ /*work queue */
+ struct workqueue_struct *rtl_wq;
+ struct delayed_work watchdog_wq;
+ struct delayed_work ips_nic_off_wq;
+
+ /* For SW LPS */
+ struct delayed_work ps_work;
+ struct delayed_work ps_rfon_wq;
+ struct delayed_work fwevt_wq;
+};
+
+struct rtl_debug {
+ u32 dbgp_type[DBGP_TYPE_MAX];
+ u32 global_debuglevel;
+ u64 global_debugcomponents;
+
+ /* add for proc debug */
+ struct proc_dir_entry *proc_dir;
+ char proc_name[20];
+};
+
+#define MIMO_PS_STATIC 0
+#define MIMO_PS_DYNAMIC 1
+#define MIMO_PS_NOLIMIT 3
+
+struct rtl_dualmac_easy_concurrent_ctl {
+ enum band_type currentbandtype_backfordmdp;
+ bool bclose_bbandrf_for_dmsp;
+ bool bchange_to_dmdp;
+ bool bchange_to_dmsp;
+ bool bswitch_in_process;
+};
+
+struct rtl_dmsp_ctl {
+ bool bactivescan_for_slaveofdmsp;
+ bool bscan_for_anothermac_fordmsp;
+ bool bscan_for_itself_fordmsp;
+ bool bwritedig_for_anothermacofdmsp;
+ u32 curdigvalue_for_anothermacofdmsp;
+ bool bchangecckpdstate_for_anothermacofdmsp;
+ u8 curcckpdstate_for_anothermacofdmsp;
+ bool bchangetxhighpowerlvl_for_anothermacofdmsp;
+ u8 curtxhighlvl_for_anothermacofdmsp;
+ long rssivalmin_for_anothermacofdmsp;
+};
+
+struct rtl_global_var {
+ /* from this list we can get
+ * other adapter's rtl_priv */
+ struct list_head glb_priv_list;
+ spinlock_t glb_list_lock;
+};
+
+struct rtl_btc_info {
+ u8 bt_type;
+ u8 btcoexist;
+ u8 ant_num;
+};
+
+struct rtl_btc_ops {
+ void (*btc_init_variables)(struct rtl_priv *rtlpriv);
+ void (*btc_init_hal_vars)(struct rtl_priv *rtlpriv);
+ void (*btc_init_hw_config)(struct rtl_priv *rtlpriv);
+ void (*btc_ips_notify)(struct rtl_priv *rtlpriv, u8 type);
+ void (*btc_scan_notify)(struct rtl_priv *rtlpriv, u8 scantype);
+ void (*btc_connect_notify)(struct rtl_priv *rtlpriv, u8 action);
+ void (*btc_mediastatus_notify)(struct rtl_priv *rtlpriv,
+ enum rt_media_status mstatus);
+ void (*btc_periodical)(struct rtl_priv *rtlpriv);
+ void (*btc_halt_notify)(void);
+ void (*btc_btinfo_notify)(struct rtl_priv *rtlpriv,
+ u8 *tmp_buf, u8 length);
+ bool (*btc_is_limited_dig)(struct rtl_priv *rtlpriv);
+ bool (*btc_is_disable_edca_turbo)(struct rtl_priv *rtlpriv);
+ bool (*btc_is_bt_disabled)(struct rtl_priv *rtlpriv);
+};
+
+struct rtl_bt_coexist {
+ struct rtl_btc_ops *btc_ops;
+ struct rtl_btc_info btc_info;
+};
+
+
+struct rtl_priv {
+ struct list_head list;
+#ifdef VIF_TODO
+ struct vif_priv vif_priv;
+#endif
+ struct rtl_priv *buddy_priv;
+ struct rtl_global_var *glb_var;
+ struct rtl_dualmac_easy_concurrent_ctl easy_concurrent_ctl;
+ struct rtl_dmsp_ctl dmsp_ctl;
+ struct rtl_locks locks;
+ struct rtl_works works;
+ struct rtl_mac mac80211;
+ struct rtl_hal rtlhal;
+ struct rtl_regulatory regd;
+ struct rtl_rfkill rfkill;
+ struct rtl_io io;
+ struct rtl_phy phy;
+ struct rtl_dm dm;
+ struct rtl_security sec;
+ struct rtl_efuse efuse;
+
+ struct rtl_ps_ctl psc;
+ struct rate_adaptive ra;
+ struct dynamic_primary_cca primarycca;
+ struct wireless_stats stats;
+ struct rt_link_detect link_info;
+ struct false_alarm_statistics falsealm_cnt;
+
+ struct rtl_rate_priv *rate_priv;
+
+ struct rtl_debug dbg;
+
+ /* sta entry list for ap adhoc or mesh */
+ struct list_head entry_list;
+
+ /*
+ *hal_cfg : for diff cards
+ *intf_ops : for diff interface usb/pcie
+ */
+ struct rtl_hal_cfg *cfg;
+ struct rtl_intf_ops *intf_ops;
+
+ /*this var will be set by set_bit,
+ and was used to indicate status of
+ interface or hardware */
+ unsigned long status;
+
+ /* intel Proximity, should be alloc mem
+ * in intel Proximity module and can only
+ * be used in intel Proximity mode */
+ struct proxim proximity;
+
+ /*for bt coexist use*/
+ struct rtl_bt_coexist btcoexist;
+
+ /* separate 92ee from other ICs,
+ * 92ee use new trx flow. */
+ bool use_new_trx_flow;
+ /*This must be the last item so
+ that it points to the data allocated
+ beyond this structure like:
+ rtl_pci_priv or rtl_usb_priv */
+ u8 priv[0];
+};
+
+#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
+#define rtl_mac(rtlpriv) (&((rtlpriv)->mac80211))
+#define rtl_hal(rtlpriv) (&((rtlpriv)->rtlhal))
+#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse))
+#define rtl_psc(rtlpriv) (&((rtlpriv)->psc))
+#define rtl_sec(rtlpriv) (&((rtlpriv)->sec))
+#define rtl_dm(rtlpriv) (&((rtlpriv)->dm))
+/***************************************
+ Bluetooth Co-existance Related
+****************************************/
+
+enum bt_ant_num {
+ ANT_X2 = 0,
+ ANT_X1 = 1,
+};
+
+enum bt_co_type {
+ BT_2WIRE = 0,
+ BT_ISSC_3WIRE = 1,
+ BT_ACCEL = 2,
+ BT_CSR_BC4 = 3,
+ BT_CSR_BC8 = 4,
+ BT_RTL8756 = 5,
+ BT_RTL8723A = 6,
+ BT_RTL8821A = 7,
+ BT_RTL8723B = 8,
+ BT_RTL8192E = 9,
+ BT_RTL8812A = 11,
+};
+
+enum bt_total_ant_num {
+ ANT_TOTAL_X2 = 0,
+ ANT_TOTAL_X1 = 1
+};
+
+enum bt_cur_state {
+ BT_OFF = 0,
+ BT_ON = 1,
+};
+
+enum bt_service_type {
+ BT_SCO = 0,
+ BT_A2DP = 1,
+ BT_HID = 2,
+ BT_HID_IDLE = 3,
+ BT_SCAN = 4,
+ BT_IDLE = 5,
+ BT_OTHER_ACTION = 6,
+ BT_BUSY = 7,
+ BT_OTHERBUSY = 8,
+ BT_PAN = 9,
+};
+
+enum bt_radio_shared {
+ BT_RADIO_SHARED = 0,
+ BT_RADIO_INDIVIDUAL = 1,
+};
+
+struct bt_coexist_info {
+
+ /* EEPROM BT info. */
+ u8 eeprom_bt_coexist;
+ u8 eeprom_bt_type;
+ u8 eeprom_bt_ant_num;
+ u8 eeprom_bt_ant_isolation;
+ u8 eeprom_bt_radio_shared;
+
+ u8 bt_coexistence;
+ u8 bt_ant_num;
+ u8 bt_coexist_type;
+ u8 bt_state;
+ u8 bt_cur_state; /* 0:on, 1:off */
+ u8 bt_ant_isolation; /* 0:good, 1:bad */
+ u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
+ u8 bt_service;
+ u8 bt_radio_shared_type;
+ u8 bt_rfreg_origin_1e;
+ u8 bt_rfreg_origin_1f;
+ u8 bt_rssi_state;
+ u32 ratio_tx;
+ u32 ratio_pri;
+ u32 bt_edca_ul;
+ u32 bt_edca_dl;
+
+ bool b_init_set;
+ bool b_bt_busy_traffic;
+ bool b_bt_traffic_mode_set;
+ bool b_bt_non_traffic_mode_set;
+
+ bool b_fw_coexist_all_off;
+ bool b_sw_coexist_all_off;
+ bool b_hw_coexist_all_off;
+ u32 current_state;
+ u32 previous_state;
+ u32 current_state_h;
+ u32 previous_state_h;
+
+ u8 bt_pre_rssi_state;
+ u8 bt_pre_rssi_state1;
+
+ u8 b_reg_bt_iso;
+ u8 b_reg_bt_sco;
+ bool b_balance_on;
+ u8 bt_active_zero_cnt;
+ bool b_cur_bt_disabled;
+ bool b_pre_bt_disabled;
+
+ u8 bt_profile_case;
+ u8 bt_profile_action;
+ bool b_bt_busy;
+ bool b_hold_for_bt_operation;
+ u8 lps_counter;
+};
+
+
+/****************************************
+ mem access macro define start
+ Call endian free function when
+ 1. Read/write packet content.
+ 2. Before write integer to IO.
+ 3. After read integer from IO.
+****************************************/
+/* Convert little data endian to host */
+#define EF1BYTE(_val) \
+ ((u8)(_val))
+#define EF2BYTE(_val) \
+ (le16_to_cpu(_val))
+#define EF4BYTE(_val) \
+ (le32_to_cpu(_val))
+
+/* Read data from memory */
+#define READEF1BYTE(_ptr) \
+ EF1BYTE(*((u8 *)(_ptr)))
+#define READEF2BYTE(_ptr) \
+ EF2BYTE(*((u16 *)(_ptr)))
+#define READEF4BYTE(_ptr) \
+ EF4BYTE(*((u32 *)(_ptr)))
+
+/* Write data to memory */
+#define WRITEEF1BYTE(_ptr, _val) \
+ ((*((u8 *)(_ptr))) = EF1BYTE(_val))
+#define WRITEEF2BYTE(_ptr, _val) \
+ ((*((u16 *)(_ptr))) = EF2BYTE(_val))
+#define WRITEEF4BYTE(_ptr, _val) \
+ ((*((u32 *)(_ptr))) = EF4BYTE(_val))
+
+/*Example:
+BIT_LEN_MASK_32(0) => 0x00000000
+BIT_LEN_MASK_32(1) => 0x00000001
+BIT_LEN_MASK_32(2) => 0x00000003
+BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
+#define BIT_LEN_MASK_32(__bitlen) \
+ (0xFFFFFFFF >> (32 - (__bitlen)))
+#define BIT_LEN_MASK_16(__bitlen) \
+ (0xFFFF >> (16 - (__bitlen)))
+#define BIT_LEN_MASK_8(__bitlen) \
+ (0xFF >> (8 - (__bitlen)))
+
+/*Example:
+BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
+BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
+#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
+
+/*Description:
+Return 4-byte value in host byte ordering from
+4-byte pointer in little-endian system.*/
+#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
+ (EF4BYTE(*((u32 *)(__pstart))))
+#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
+ (EF2BYTE(*((u16 *)(__pstart))))
+#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
+ (EF1BYTE(*((u8 *)(__pstart))))
+
+/*Description:
+Translate subfield (continuous bits in little-endian) of 4-byte
+value to host byte ordering.*/
+#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_32(__bitlen) \
+ )
+#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_16(__bitlen) \
+ )
+#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_8(__bitlen) \
+ )
+
+/*Description:
+Mask subfield (continuous bits in little-endian) of 4-byte value
+and return the result in 4-byte value in host byte ordering.*/
+#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen)) \
+ )
+#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen)) \
+ )
+#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
+ )
+
+/*Description:
+Set subfield of little-endian 4-byte value to specified value. */
+#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u32 *)(__pstart)) = EF4BYTE \
+ ( \
+ LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset))\
+ );
+#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u16 *)(__pstart)) = EF2BYTE \
+ ( \
+ LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset))\
+ );
+#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u8 *)(__pstart)) = EF1BYTE \
+ ( \
+ LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
+ );
+
+#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
+ (__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
+
+/****************************************
+ mem access macro define end
+****************************************/
+
+#define byte(x, n) ((x >> (8 * n)) & 0xff)
+
+#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC)
+#define RTL_WATCH_DOG_TIME 2000
+#define MSECS(t) msecs_to_jiffies(t)
+#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
+#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA)
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */
+#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */
+#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /*PCI D3 mode */
+/*NIC halt, re-initialize hw parameters*/
+#define RT_RF_OFF_LEVL_HALT_NIC BIT(3)
+#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /*FW free, re-download the FW */
+#define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */
+/*Always enable ASPM and Clock Req in initialization.*/
+#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6)
+/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/
+#define RT_PS_LEVEL_ASPM BIT(7)
+/*When LPS is on, disable 2R if no packet is received or transmitted.*/
+#define RT_RF_LPS_DISALBE_2R BIT(30)
+#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */
+#define RT_IN_PS_LEVEL(ppsc, _ps_flg) \
+ ((ppsc->cur_ps_level & _ps_flg) ? true : false)
+#define RT_CLEAR_PS_LEVEL(ppsc, _ps_flg) \
+ (ppsc->cur_ps_level &= (~(_ps_flg)))
+#define RT_SET_PS_LEVEL(ppsc, _ps_flg) \
+ (ppsc->cur_ps_level |= _ps_flg)
+
+#define container_of_dwork_rtl(x,y,z) \
+ container_of(container_of(x, struct delayed_work, work), y, z)
+
+#define FILL_OCTET_STRING(_os,_octet,_len) \
+ (_os).octet = (u8 *)(_octet); \
+ (_os).length = (_len);
+
+#define CP_MACADDR(des,src) \
+ ((des)[0] = (src)[0],(des)[1] = (src)[1],\
+ (des)[2] = (src)[2],(des)[3] = (src)[3],\
+ (des)[4] = (src)[4],(des)[5] = (src)[5])
+
+static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return rtlpriv->io.read8_sync(rtlpriv, addr);
+}
+
+static inline u16 rtl_read_word(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return rtlpriv->io.read16_sync(rtlpriv, addr);
+}
+
+static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return rtlpriv->io.read32_sync(rtlpriv, addr);
+}
+
+static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
+{
+ rtlpriv->io.write8_async(rtlpriv, addr, val8);
+
+ if (rtlpriv->cfg->write_readback)
+ rtlpriv->io.read8_sync(rtlpriv, addr);
+}
+
+static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
+{
+ rtlpriv->io.write16_async(rtlpriv, addr, val16);
+
+ if (rtlpriv->cfg->write_readback)
+ rtlpriv->io.read16_sync(rtlpriv, addr);
+}
+
+static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
+ u32 addr, u32 val32)
+{
+ rtlpriv->io.write32_async(rtlpriv, addr, val32);
+
+ if (rtlpriv->cfg->write_readback)
+ rtlpriv->io.read32_sync(rtlpriv, addr);
+}
+
+static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask)
+{
+ return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_bbreg(hw,
+ regaddr,
+ bitmask);
+}
+
+static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_bbreg(hw,
+ regaddr, bitmask,
+ data);
+
+}
+
+static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask)
+{
+ return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_rfreg(hw,
+ rfpath,
+ regaddr,
+ bitmask);
+}
+
+static inline void rtl_set_rfreg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_rfreg(hw,
+ rfpath, regaddr,
+ bitmask, data);
+}
+
+static inline bool is_hal_stop(struct rtl_hal *rtlhal)
+{
+ return (_HAL_STATE_STOP == rtlhal->state);
+}
+
+static inline void set_hal_start(struct rtl_hal *rtlhal)
+{
+ rtlhal->state = _HAL_STATE_START;
+}
+
+static inline void set_hal_stop(struct rtl_hal *rtlhal)
+{
+ rtlhal->state = _HAL_STATE_STOP;
+}
+
+static inline u8 get_rf_type(struct rtl_phy *rtlphy)
+{
+ return rtlphy->rf_type;
+}
+
+static inline struct ieee80211_hdr *rtl_get_hdr(struct sk_buff *skb)
+{
+ return (struct ieee80211_hdr *)(skb->data);
+}
+
+static inline u16 rtl_get_fc(struct sk_buff *skb)
+{
+ return le16_to_cpu(rtl_get_hdr(skb)->frame_control);
+}
+
+static inline u16 rtl_get_tid_h(struct ieee80211_hdr *hdr)
+{
+ return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static inline u16 rtl_get_tid(struct sk_buff *skb)
+{
+ return rtl_get_tid_h(rtl_get_hdr(skb));
+}
+
+static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
+ u8 *mac_addr)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ return ieee80211_find_sta(mac->vif, mac_addr);
+}
+
+struct ieee80211_hw *rtl_pci_get_hw_pointer(void);
+#endif
diff --git a/drivers/staging/rts5139/ms.c b/drivers/staging/rts5139/ms.c
index 9253f6ab2e08..390292ad4566 100644
--- a/drivers/staging/rts5139/ms.c
+++ b/drivers/staging/rts5139/ms.c
@@ -1033,8 +1033,7 @@ static int ms_read_attribute_info(struct rts51x_chip *chip)
((u32) buf[cur_addr_off + 5] << 16) |
((u32) buf[cur_addr_off + 6] << 8) |
buf[cur_addr_off + 7];
- RTS51X_DEBUGP("sys_info_addr = 0x%x,"
- "sys_info_size = 0x%x\n",
+ RTS51X_DEBUGP("sys_info_addr = 0x%x, sys_info_size = 0x%x\n",
sys_info_addr, sys_info_size);
if (sys_info_size != 96) {
kfree(buf);
@@ -1069,8 +1068,7 @@ static int ms_read_attribute_info(struct rts51x_chip *chip)
((u32) buf[cur_addr_off + 5] << 16) |
((u32) buf[cur_addr_off + 6] << 8) |
buf[cur_addr_off + 7];
- RTS51X_DEBUGP("model_name_addr = 0x%x,"
- "model_name_size = 0x%x\n",
+ RTS51X_DEBUGP("model_name_addr = 0x%x, model_name_size = 0x%x\n",
model_name_addr, model_name_size);
if (model_name_size != 48) {
kfree(buf);
@@ -1751,8 +1749,7 @@ static int ms_copy_page(struct rts51x_chip *chip, u16 old_blk, u16 new_blk,
retval = ms_read_status_reg(chip);
if (retval != STATUS_SUCCESS) {
uncorrect_flag = 1;
- RTS51X_DEBUGP("Uncorrectable"
- "error\n");
+ RTS51X_DEBUGP("Uncorrectable error\n");
} else {
uncorrect_flag = 0;
}
@@ -1770,8 +1767,7 @@ static int ms_copy_page(struct rts51x_chip *chip, u16 old_blk, u16 new_blk,
ms_write_extra_data(chip, old_blk, i,
extra,
MS_EXTRA_SIZE);
- RTS51X_DEBUGP("page %d :"
- "extra[0] = 0x%x\n",
+ RTS51X_DEBUGP("page %d : extra[0] = 0x%x\n",
i, extra[0]);
MS_SET_BAD_BLOCK_FLG(ms_card);
@@ -1932,8 +1928,7 @@ static int ms_auto_copy_page(struct rts51x_chip *chip, u16 old_blk, u16 new_blk,
u8 page_len, bus_width, val = 0;
u8 extra[MS_EXTRA_SIZE];
- RTS51X_DEBUGP("Auto copy page from 0x%x to 0x%x,"
- "logical block is 0x%x\n",
+ RTS51X_DEBUGP("Auto copy page from 0x%x to 0x%x, logical block is 0x%x\n",
old_blk, new_blk, log_blk);
RTS51X_DEBUGP("start_page = %d, end_page = %d\n", start_page,
end_page);
@@ -2555,8 +2550,7 @@ static int ms_build_l2p_tbl(struct rts51x_chip *chip, int seg_no)
for (log_blk = 0; log_blk < 494; log_blk++) {
tmp_blk = segment->l2p_table[log_blk];
if (tmp_blk < ms_card->boot_block) {
- RTS51X_DEBUGP("Boot block is not the first"
- "normal block.\n");
+ RTS51X_DEBUGP("Boot block is not the first normal block.\n");
if (chip->card_wp & MS_CARD)
break;
@@ -3976,8 +3970,7 @@ static int rts51x_ms_rw_multi_sector(struct scsi_cmnd *srb, struct rts51x_chip *
end_page = start_page + (u8) total_sec_cnt;
page_cnt = end_page - start_page;
- RTS51X_DEBUGP("start_page = %d, end_page = %d,"
- "page_cnt = %d\n",
+ RTS51X_DEBUGP("start_page = %d, end_page = %d, page_cnt = %d\n",
start_page, end_page, page_cnt);
if (srb->sc_data_direction == DMA_FROM_DEVICE)
diff --git a/drivers/staging/rts5139/ms_mg.c b/drivers/staging/rts5139/ms_mg.c
index 54cfd85259a9..00862c1a36db 100644
--- a/drivers/staging/rts5139/ms_mg.c
+++ b/drivers/staging/rts5139/ms_mg.c
@@ -35,6 +35,7 @@
#include "rts51x_scsi.h"
#include "rts51x_card.h"
#include "ms.h"
+#include "ms_mg.h"
#ifdef SUPPORT_MAGIC_GATE
diff --git a/drivers/staging/rts5139/rts51x.c b/drivers/staging/rts5139/rts51x.c
index a8d2d046b44f..c8d06d4fddd2 100644
--- a/drivers/staging/rts5139/rts51x.c
+++ b/drivers/staging/rts5139/rts51x.c
@@ -215,7 +215,7 @@ void rts51x_try_to_exit_ss(struct rts51x_chip *chip)
* a USB port reset, whether from this driver or a different one.
*/
-int rts51x_pre_reset(struct usb_interface *iface)
+static int rts51x_pre_reset(struct usb_interface *iface)
{
struct rts51x_chip *chip = usb_get_intfdata(iface);
@@ -226,7 +226,7 @@ int rts51x_pre_reset(struct usb_interface *iface)
return 0;
}
-int rts51x_post_reset(struct usb_interface *iface)
+static int rts51x_post_reset(struct usb_interface *iface)
{
struct rts51x_chip *chip = usb_get_intfdata(iface);
@@ -832,7 +832,7 @@ static void rts51x_disconnect(struct usb_interface *intf)
* Initialization and registration
***********************************************************************/
-struct usb_device_id rts5139_usb_ids[] = {
+static struct usb_device_id rts5139_usb_ids[] = {
{USB_DEVICE(0x0BDA, 0x0139)},
{USB_DEVICE(0x0BDA, 0x0129)},
{} /* Terminating entry */
diff --git a/drivers/staging/rts5139/rts51x_fop.c b/drivers/staging/rts5139/rts51x_fop.c
index dee7d8af564e..677d18b3dcd5 100644
--- a/drivers/staging/rts5139/rts51x_fop.c
+++ b/drivers/staging/rts5139/rts51x_fop.c
@@ -93,7 +93,7 @@ static int rts51x_sd_direct_cmnd(struct rts51x_chip *chip,
}
retval =
- copy_to_user((void *)cmnd->buf, (void *)buf, cmnd->buf_len);
+ copy_to_user(cmnd->buf, (void *)buf, cmnd->buf_len);
if (retval) {
kfree(buf);
TRACE_RET(chip, STATUS_NOMEM);
@@ -109,7 +109,7 @@ static int rts51x_sd_direct_cmnd(struct rts51x_chip *chip,
TRACE_RET(chip, STATUS_NOMEM);
retval =
- copy_from_user((void *)buf, (void *)cmnd->buf,
+ copy_from_user((void *)buf, cmnd->buf,
cmnd->buf_len);
if (retval) {
kfree(buf);
@@ -154,7 +154,7 @@ static int rts51x_sd_get_rsp(struct rts51x_chip *chip, struct sd_rsp *rsp)
else
count = (rsp->rsp_len < 6) ? rsp->rsp_len : 6;
- retval = copy_to_user((void *)rsp->rsp, (void *)sd_card->rsp, count);
+ retval = copy_to_user(rsp->rsp, (void *)sd_card->rsp, count);
if (retval)
TRACE_RET(chip, STATUS_NOMEM);
@@ -250,7 +250,7 @@ long rts51x_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RTS5139_IOC_SD_DIRECT:
retval =
- copy_from_user((void *)&cmnd, (void *)arg,
+ copy_from_user((void *)&cmnd, (void __user *)arg,
sizeof(struct sd_direct_cmnd));
if (retval) {
retval = -ENOMEM;
@@ -265,7 +265,7 @@ long rts51x_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case RTS5139_IOC_SD_GET_RSP:
retval =
- copy_from_user((void *)&rsp, (void *)arg,
+ copy_from_user(&rsp, (void __user *)arg,
sizeof(struct sd_rsp));
if (retval) {
retval = -ENOMEM;
diff --git a/drivers/staging/rts5139/rts51x_fop.h b/drivers/staging/rts5139/rts51x_fop.h
index eb45acf50d1a..c691ee99720e 100644
--- a/drivers/staging/rts5139/rts51x_fop.h
+++ b/drivers/staging/rts5139/rts51x_fop.h
@@ -35,12 +35,12 @@
struct sd_direct_cmnd {
u8 cmnd[12];
- void *buf;
+ void __user *buf;
int buf_len;
};
struct sd_rsp {
- void *rsp;
+ void __user *rsp;
int rsp_len;
};
diff --git a/drivers/staging/rts5139/rts51x_scsi.c b/drivers/staging/rts5139/rts51x_scsi.c
index 3a990253c780..75282fea38f7 100644
--- a/drivers/staging/rts5139/rts51x_scsi.c
+++ b/drivers/staging/rts5139/rts51x_scsi.c
@@ -441,7 +441,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rts51x_chip *chip)
return TRANSPORT_GOOD;
}
-unsigned char formatter_inquiry_str[20] = {
+static unsigned char formatter_inquiry_str[20] = {
'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K',
'-', 'M', 'G', /* Byte[47:49] */
0x0B, /* Byte[50]: MG, MS, MSPro, MSXC */
@@ -1990,7 +1990,7 @@ static int show_info(struct seq_file *m, struct Scsi_Host *host)
/* queue a command */
/* This is always called with scsi_lock(host) held */
-int queuecommand_lck(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
+static int queuecommand_lck(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
{
struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
diff --git a/drivers/staging/rts5139/rts51x_transport.c b/drivers/staging/rts5139/rts51x_transport.c
index c172f4ae7c23..74588d249b32 100644
--- a/drivers/staging/rts5139/rts51x_transport.c
+++ b/drivers/staging/rts5139/rts51x_transport.c
@@ -646,9 +646,9 @@ int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status)
chip->usb->intr_urb->actual_length);
}
-u8 media_not_present[] = {
+static u8 media_not_present[] = {
0x70, 0, 0x02, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0 };
-u8 invalid_cmd_field[] = {
+static u8 invalid_cmd_field[] = {
0x70, 0, 0x05, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0 };
void rts51x_invoke_transport(struct scsi_cmnd *srb, struct rts51x_chip *chip)
diff --git a/drivers/staging/rts5139/sd.c b/drivers/staging/rts5139/sd.c
index 4283b0917f24..da5a9b8fb69c 100644
--- a/drivers/staging/rts5139/sd.c
+++ b/drivers/staging/rts5139/sd.c
@@ -1529,7 +1529,7 @@ static int sd_tuning_rx(struct rts51x_chip *chip)
int i, j;
u32 raw_phase_map[3], phase_map;
u8 final_phase;
- int (*tuning_cmd) (struct rts51x_chip *chip, u8 sample_point);
+ int (*tuning_cmd)(struct rts51x_chip *chip, u8 sample_point);
if (CHK_SD(sd_card)) {
if (CHK_SD_DDR50(sd_card))
@@ -1627,7 +1627,7 @@ static int sd_tuning_tx(struct rts51x_chip *chip)
int i, j;
u32 raw_phase_map[3], phase_map;
u8 final_phase;
- int (*tuning_cmd) (struct rts51x_chip *chip, u8 sample_point);
+ int (*tuning_cmd)(struct rts51x_chip *chip, u8 sample_point);
if (CHK_SD(sd_card)) {
if (CHK_SD_DDR50(sd_card))
diff --git a/drivers/staging/rts5139/sd_cprm.c b/drivers/staging/rts5139/sd_cprm.c
index d4689839e15a..cede6c07394f 100644
--- a/drivers/staging/rts5139/sd_cprm.c
+++ b/drivers/staging/rts5139/sd_cprm.c
@@ -35,6 +35,7 @@
#include "rts51x_scsi.h"
#include "rts51x_card.h"
#include "rts51x_chip.h"
+#include "sd_cprm.h"
#include "sd.h"
#ifdef SUPPORT_CPRM
diff --git a/drivers/staging/rts5139/xd.c b/drivers/staging/rts5139/xd.c
index 10fea7e16ace..be432351be86 100644
--- a/drivers/staging/rts5139/xd.c
+++ b/drivers/staging/rts5139/xd.c
@@ -473,7 +473,8 @@ static int reset_xd(struct rts51x_chip *chip)
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP *
(2 + i + chip->option.rts51x_xd_rw_step)
- + XD_TIME_RWN_STEP * (i + chip->option.rts51x_xd_rwn_step));
+ + XD_TIME_RWN_STEP *
+ (i + chip->option.rts51x_xd_rwn_step));
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 +
i) + XD_TIME_RWN_STEP * (3 + i));
@@ -1526,8 +1527,8 @@ static int xd_read_multiple_pages(struct rts51x_chip *chip, u32 phy_blk,
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
- rts51x_trans_dma_enable(chip->srb->sc_data_direction, chip, page_cnt * 512,
- DMA_512);
+ rts51x_trans_dma_enable(chip->srb->sc_data_direction, chip,
+ page_cnt * 512, DMA_512);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_READ_PAGES);
@@ -1745,8 +1746,8 @@ static int xd_write_multiple_pages(struct rts51x_chip *chip, u32 old_blk,
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
RING_BUFFER);
- rts51x_trans_dma_enable(chip->srb->sc_data_direction, chip, page_cnt * 512,
- DMA_512);
+ rts51x_trans_dma_enable(chip->srb->sc_data_direction, chip,
+ page_cnt * 512, DMA_512);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_WRITE_PAGES);
@@ -1842,8 +1843,8 @@ static int xd_delay_write(struct rts51x_chip *chip)
return STATUS_SUCCESS;
}
-int rts51x_xd_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector,
- u16 sector_cnt)
+int rts51x_xd_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip,
+ u32 start_sector, u16 sector_cnt)
{
struct xd_info *xd_card = &(chip->xd_card);
unsigned int lun = SCSI_LUN(srb);
@@ -1883,7 +1884,8 @@ int rts51x_xd_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sect
retval = xd_build_l2p_tbl(chip, zone_no);
if (retval != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
- rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ rts51x_set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, retval);
}
}
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index edf979f18a6c..d22916a4b9d8 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -259,7 +259,7 @@ static int ms_read_bytes(struct rtsx_chip *chip,
MS_TRANSFER_END, MS_TRANSFER_END);
for (i = 0; i < data_len - 1; i++)
- rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
if (data_len % 2)
rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len, 0, 0);
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 8586ac5d2144..d2d1345fb06c 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -1031,8 +1031,10 @@ static void rtsx_remove(struct pci_dev *pci)
/* PCI IDs */
static DEFINE_PCI_DEVICE_TABLE(rtsx_ids) = {
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5208), PCI_CLASS_OTHERS << 16, 0xFF0000 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5288), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5208),
+ PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5288),
+ PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, },
};
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 3055eb10c076..01aeb01bebe5 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -105,8 +105,10 @@ void try_to_switch_sdio_ctrl(struct rtsx_chip *chip)
RTSX_DEBUGP("reg 0xFF34: 0x%x, reg 0xFF38: 0x%x\n", reg1, reg2);
if ((reg1 & 0xC0) && (reg2 & 0xC0)) {
chip->sd_int = 1;
- rtsx_write_register(chip, SDIO_CTRL, 0xFF, SDIO_BUS_CTRL | SDIO_CD_CTRL);
- rtsx_write_register(chip, PWR_GATE_CTRL, LDO3318_PWR_MASK, LDO_ON);
+ rtsx_write_register(chip, SDIO_CTRL, 0xFF,
+ SDIO_BUS_CTRL | SDIO_CD_CTRL);
+ rtsx_write_register(chip, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, LDO_ON);
}
}
@@ -452,7 +454,8 @@ void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip)
}
#ifdef DISABLE_CARD_INT
-void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset, unsigned long *need_release)
+void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset,
+ unsigned long *need_release)
{
u8 release_map = 0, reset_map = 0;
@@ -504,11 +507,14 @@ void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset, unsigne
}
reset_map = 0;
- if (!(chip->card_exist & XD_CARD) && (xd_cnt > (DEBOUNCE_CNT-1)))
+ if (!(chip->card_exist & XD_CARD) &&
+ (xd_cnt > (DEBOUNCE_CNT-1)))
reset_map |= XD_CARD;
- if (!(chip->card_exist & SD_CARD) && (sd_cnt > (DEBOUNCE_CNT-1)))
+ if (!(chip->card_exist & SD_CARD) &&
+ (sd_cnt > (DEBOUNCE_CNT-1)))
reset_map |= SD_CARD;
- if (!(chip->card_exist & MS_CARD) && (ms_cnt > (DEBOUNCE_CNT-1)))
+ if (!(chip->card_exist & MS_CARD) &&
+ (ms_cnt > (DEBOUNCE_CNT-1)))
reset_map |= MS_CARD;
}
@@ -549,7 +555,8 @@ void rtsx_init_cards(struct rtsx_chip *chip)
if (!(chip->card_exist & MS_CARD))
clear_bit(MS_NR, &(chip->need_release));
- RTSX_DEBUGP("chip->need_release = 0x%x\n", (unsigned int)(chip->need_release));
+ RTSX_DEBUGP("chip->need_release = 0x%x\n",
+ (unsigned int)(chip->need_release));
#ifdef SUPPORT_OCP
if (chip->need_release) {
@@ -588,8 +595,10 @@ void rtsx_init_cards(struct rtsx_chip *chip)
release_xd_card(chip);
- if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN))
- rtsx_write_register(chip, HOST_SLEEP_STATE, 0xC0, 0xC0);
+ if (CHECK_PID(chip, 0x5288) &&
+ CHECK_BARO_PKG(chip, QFN))
+ rtsx_write_register(chip, HOST_SLEEP_STATE,
+ 0xC0, 0xC0);
}
if (chip->need_release & MS_CARD) {
@@ -610,13 +619,15 @@ void rtsx_init_cards(struct rtsx_chip *chip)
}
if (chip->need_reset) {
- RTSX_DEBUGP("chip->need_reset = 0x%x\n", (unsigned int)(chip->need_reset));
+ RTSX_DEBUGP("chip->need_reset = 0x%x\n",
+ (unsigned int)(chip->need_reset));
rtsx_reset_cards(chip);
}
if (chip->need_reinit) {
- RTSX_DEBUGP("chip->need_reinit = 0x%x\n", (unsigned int)(chip->need_reinit));
+ RTSX_DEBUGP("chip->need_reinit = 0x%x\n",
+ (unsigned int)(chip->need_reinit));
rtsx_reinit_cards(chip, 0);
}
@@ -624,7 +635,7 @@ void rtsx_init_cards(struct rtsx_chip *chip)
static inline u8 double_depth(u8 depth)
{
- return ((depth > 1) ? (depth - 1) : depth);
+ return (depth > 1) ? (depth - 1) : depth;
}
int switch_ssc_clock(struct rtsx_chip *chip, int clk)
@@ -641,7 +652,8 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
max_N = 120;
max_div = CLK_DIV_4;
- RTSX_DEBUGP("Switch SSC clock to %dMHz (cur_clk = %d)\n", clk, chip->cur_clk);
+ RTSX_DEBUGP("Switch SSC clock to %dMHz (cur_clk = %d)\n",
+ clk, chip->cur_clk);
if ((clk <= 2) || (N > max_N))
TRACE_RET(chip, STATUS_FAIL);
@@ -676,8 +688,10 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, N);
rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (sd_vpclk_phase_reset) {
- rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
- rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
}
retval = rtsx_send_cmd(chip, 0, WAIT_TIME);
@@ -786,8 +800,10 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk)
if (sd_vpclk_phase_reset) {
udelay(200);
- RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
- RTSX_WRITE_REG(chip, SD_VPCLK1_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
+ RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
+ RTSX_WRITE_REG(chip, SD_VPCLK1_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
udelay(200);
}
RTSX_WRITE_REG(chip, CLK_CTL, 0xFF, 0);
@@ -797,7 +813,8 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk)
return STATUS_SUCCESS;
}
-void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size)
+void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip,
+ u32 byte_cnt, u8 pack_size)
{
if (pack_size > DMA_1024)
pack_size = DMA_512;
@@ -810,10 +827,12 @@ void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip, u32 b
rtsx_add_cmd(chip, WRITE_REG_CMD, DMATC0, 0xFF, (u8)byte_cnt);
if (dir == DMA_FROM_DEVICE) {
- rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, 0x03 | DMA_PACK_SIZE_MASK,
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
DMA_DIR_FROM_CARD | DMA_EN | pack_size);
} else {
- rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL, 0x03 | DMA_PACK_SIZE_MASK,
+ rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
DMA_DIR_TO_CARD | DMA_EN | pack_size);
}
@@ -903,7 +922,8 @@ int card_power_off(struct rtsx_chip *chip, u8 card)
return STATUS_SUCCESS;
}
-int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 sec_addr, u16 sec_cnt)
+int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
+ u32 sec_addr, u16 sec_cnt)
{
int retval;
unsigned int lun = SCSI_LUN(srb);
@@ -921,7 +941,8 @@ int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 sec_addr, u16 sec
rtsx_release_chip(chip);
TRACE_RET(chip, STATUS_FAIL);
}
- if (detect_card_cd(chip, chip->cur_card) != STATUS_SUCCESS)
+ if (detect_card_cd(chip, chip->cur_card) !=
+ STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
if (!chip->rw_need_retry) {
@@ -1016,7 +1037,8 @@ void toggle_gpio(struct rtsx_chip *chip, u8 gpio)
void turn_on_led(struct rtsx_chip *chip, u8 gpio)
{
if (CHECK_PID(chip, 0x5288))
- rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), (u8)(1 << gpio));
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
+ (u8)(1 << gpio));
else
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
}
@@ -1026,7 +1048,8 @@ void turn_off_led(struct rtsx_chip *chip, u8 gpio)
if (CHECK_PID(chip, 0x5288))
rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
else
- rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), (u8)(1 << gpio));
+ rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
+ (u8)(1 << gpio));
}
int detect_card_cd(struct rtsx_chip *chip, int card)
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index 382e73a54f4d..bbfa665c5c99 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -2867,7 +2867,7 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
- if ((get_lun_card(chip, lun) != MS_CARD)) {
+ if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
@@ -2983,7 +2983,7 @@ static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
- if ((get_lun_card(chip, lun) != SD_CARD)) {
+ if (get_lun_card(chip, lun) != SD_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
@@ -3046,7 +3046,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
- if ((get_lun_card(chip, lun) != MS_CARD)) {
+ if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
@@ -3151,7 +3151,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
- if ((get_lun_card(chip, lun) != MS_CARD)) {
+ if (get_lun_card(chip, lun) != MS_CARD) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 97b7b012983e..694d3834962c 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -625,8 +625,8 @@ out:
return err;
}
-static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
- enum dma_data_direction dma_dir, int timeout)
+static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
+ size_t len, enum dma_data_direction dma_dir, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
diff --git a/drivers/staging/sb105x/Kconfig b/drivers/staging/sb105x/Kconfig
deleted file mode 100644
index 245e7847a354..000000000000
--- a/drivers/staging/sb105x/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config SB105X
- tristate "SystemBase PCI Multiport UART"
- select SERIAL_CORE
- depends on PCI && X86 && TTY && BROKEN
- help
- A driver for the SystemBase Multi-2/PCI serial card
-
- To compile this driver a module, choose M here: the module
- will be called "sb105x".
diff --git a/drivers/staging/sb105x/Makefile b/drivers/staging/sb105x/Makefile
deleted file mode 100644
index b1bf3779acae..000000000000
--- a/drivers/staging/sb105x/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_SB105X) += sb105x.o
-
-sb105x-y := sb_pci_mp.o
diff --git a/drivers/staging/sb105x/sb_mp_register.h b/drivers/staging/sb105x/sb_mp_register.h
deleted file mode 100644
index 276c1bbcc18d..000000000000
--- a/drivers/staging/sb105x/sb_mp_register.h
+++ /dev/null
@@ -1,295 +0,0 @@
-
-/*
- * SB105X_UART.h
- *
- * Copyright (C) 2008 systembase
- *
- * UART registers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef UART_SB105X_H
-#define UART_SB105X_H
-
-/*
- * option register
- */
-
-/* Device Information Register */
-#define MP_OPTR_DIR0 0x04 /* port0 ~ port8 */
-#define MP_OPTR_DIR1 0x05 /* port8 ~ port15 */
-#define MP_OPTR_DIR2 0x06 /* port16 ~ port23 */
-#define MP_OPTR_DIR3 0x07 /* port24 ~ port31 */
-
-#define DIR_UART_16C550 0
-#define DIR_UART_16C1050 1
-#define DIR_UART_16C1050A 2
-
-#define DIR_CLK_1843200 0x0 /* input clock 1843200 Hz */
-#define DIR_CLK_3686400 0x1 /* input clock 3686400 Hz */
-#define DIR_CLK_7372800 0x2 /* input clock 7372800 Hz */
-#define DIR_CLK_14745600 0x3 /* input clock 14745600 Hz */
-#define DIR_CLK_29491200 0x4 /* input clock 29491200 Hz */
-#define DIR_CLK_58985400 0x5 /* input clock 58985400 Hz */
-
-/* Interface Information Register */
-#define MP_OPTR_IIR0 0x08 /* port0 ~ port8 */
-#define MP_OPTR_IIR1 0x09 /* port8 ~ port15 */
-#define MP_OPTR_IIR2 0x0A /* port16 ~ port23 */
-#define MP_OPTR_IIR3 0x0B /* port24 ~ port31 */
-
-#define IIR_RS232 0x00 /* RS232 type */
-#define IIR_RS422 0x10 /* RS422 type */
-#define IIR_RS485 0x20 /* RS485 type */
-#define IIR_TYPE_MASK 0x30
-
-/* Interrupt Mask Register */
-#define MP_OPTR_IMR0 0x0C /* port0 ~ port8 */
-#define MP_OPTR_IMR1 0x0D /* port8 ~ port15 */
-#define MP_OPTR_IMR2 0x0E /* port16 ~ port23 */
-#define MP_OPTR_IMR3 0x0F /* port24 ~ port31 */
-
-/* Interrupt Poll Register */
-#define MP_OPTR_IPR0 0x10 /* port0 ~ port8 */
-#define MP_OPTR_IPR1 0x11 /* port8 ~ port15 */
-#define MP_OPTR_IPR2 0x12 /* port16 ~ port23 */
-#define MP_OPTR_IPR3 0x13 /* port24 ~ port31 */
-
-/* General Purpose Output Control Register */
-#define MP_OPTR_GPOCR 0x20
-
-/* General Purpose Output Data Register */
-#define MP_OPTR_GPODR 0x21
-
-/* Parallel Additional Function Register */
-#define MP_OPTR_PAFR 0x23
-
-/*
- * systembase 16c105x UART register
- */
-
-#define PAGE_0 0
-#define PAGE_1 1
-#define PAGE_2 2
-#define PAGE_3 3
-#define PAGE_4 4
-
-/*
- * ******************************************************************
- * * DLAB=0 =============== Page 0 Registers *
- * ******************************************************************
- */
-
-#define SB105X_RX 0 /* In: Receive buffer */
-#define SB105X_TX 0 /* Out: Transmit buffer */
-
-#define SB105X_IER 1 /* Out: Interrupt Enable Register */
-
-#define SB105X_IER_CTSI 0x80 /* CTS# Interrupt Enable (Requires EFR[4] = 1) */
-#define SB105X_IER_RTSI 0x40 /* RTS# Interrupt Enable (Requires EFR[4] = 1) */
-#define SB105X_IER_XOI 0x20 /* Xoff Interrupt Enable (Requires EFR[4] = 1) */
-#define SB105X_IER_SME 0x10 /* Sleep Mode Enable (Requires EFR[4] = 1) */
-#define SB105X_IER_MSI 0x08 /* Enable Modem status interrupt */
-#define SB105X_IER_RLSI 0x04 /* Enable receiver line status interrupt */
-#define SB105X_IER_THRI 0x02 /* Enable Transmitter holding register int. */
-#define SB105X_IER_RDI 0x01 /* Enable receiver data interrupt */
-
-#define SB105X_ISR 2 /* In: Interrupt ID Register */
-
-#define SB105X_ISR_NOINT 0x01 /* No interrupts pending */
-#define SB105X_ISR_RLSI 0x06 /* Receiver line status interrupt (Priority = 1)*/
-#define SB105X_ISR_RDAI 0x0c /* Receive Data Available interrupt */
-#define SB105X_ISR_CTII 0x04 /* Character Timeout Indication interrupt */
-#define SB105X_ISR_THRI 0x02 /* Transmitter holding register empty */
-#define SB105X_ISR_MSI 0x00 /* Modem status interrupt */
-#define SB105X_ISR_RXCI 0x10 /* Receive Xoff or Special Character interrupt */
-#define SB105X_ISR_RCSI 0x20 /* RTS#, CTS# status interrupt during Auto RTS/CTS flow control */
-
-#define SB105X_FCR 2 /* Out: FIFO Control Register */
-
-#define SB105X_FCR_FEN 0x01 /* FIFO Enable */
-#define SB105X_FCR_RXFR 0x02 /* RX FIFO Reset */
-#define SB105X_FCR_TXFR 0x04 /* TX FIFO Reset */
-#define SB105X_FCR_DMS 0x08 /* DMA Mode Select */
-
-#define SB105X_FCR_RTR08 0x00 /* Receive Trigger Level set at 8 */
-#define SB105X_FCR_RTR16 0x40 /* Receive Trigger Level set at 16 */
-#define SB105X_FCR_RTR56 0x80 /* Receive Trigger Level set at 56 */
-#define SB105X_FCR_RTR60 0xc0 /* Receive Trigger Level set at 60 */
-#define SB105X_FCR_TTR08 0x00 /* Transmit Trigger Level set at 8 */
-#define SB105X_FCR_TTR16 0x10 /* Transmit Trigger Level set at 16 */
-#define SB105X_FCR_TTR32 0x20 /* Transmit Trigger Level set at 32 */
-#define SB105X_FCR_TTR56 0x30 /* Transmit Trigger Level set at 56 */
-
-#define SB105X_LCR 3 /* Out: Line Control Register */
-/*
- * * Note: if the word length is 5 bits (SB105X_LCR_WLEN5), then setting
- * * SB105X_LCR_STOP will select 1.5 stop bits, not 2 stop bits.
- */
-#define SB105X_LCR_DLAB 0x80 /* Divisor Latch Enable */
-#define SB105X_LCR_SBC 0x40 /* Break Enable*/
-#define SB105X_LCR_SPAR 0x20 /* Set Stick parity */
-#define SB105X_LCR_EPAR 0x10 /* Even parity select */
-#define SB105X_LCR_PAREN 0x08 /* Parity Enable */
-#define SB105X_LCR_STOP 0x04 /* Stop bits: 0->1 bit, 1->2 bits, 1 and SB105X_LCR_WLEN5 -> 1.5 bit */
-#define SB105X_LCR_WLEN5 0x00 /* Wordlength: 5 bits */
-#define SB105X_LCR_WLEN6 0x01 /* Wordlength: 6 bits */
-#define SB105X_LCR_WLEN7 0x02 /* Wordlength: 7 bits */
-#define SB105X_LCR_WLEN8 0x03 /* Wordlength: 8 bits */
-
-#define SB105X_LCR_BF 0xBF
-
-#define SB105X_MCR 4 /* Out: Modem Control Register */
-#define SB105X_MCR_CPS 0x80 /* Clock Prescaler Select */
-#define SB105X_MCR_P2S 0x40 /* Page 2 Select /Xoff Re-Transmit Access Enable */
-#define SB105X_MCR_XOA 0x20 /* Xon Any Enable */
-#define SB105X_MCR_ILB 0x10 /* Internal Loopback Enable */
-#define SB105X_MCR_OUT2 0x08 /* Out2/Interrupt Output Enable*/
-#define SB105X_MCR_OUT1 0x04 /* Out1/Interrupt Output Enable */
-#define SB105X_MCR_RTS 0x02 /* RTS# Output */
-#define SB105X_MCR_DTR 0x01 /* DTR# Output */
-
-#define SB105X_LSR 5 /* In: Line Status Register */
-#define SB105X_LSR_RFEI 0x80 /* Receive FIFO data error Indicator */
-#define SB105X_LSR_TEMI 0x40 /* THR and TSR Empty Indicator */
-#define SB105X_LSR_THRE 0x20 /* THR Empty Indicator */
-#define SB105X_LSR_BII 0x10 /* Break interrupt indicator */
-#define SB105X_LSR_FEI 0x08 /* Frame error indicator */
-#define SB105X_LSR_PEI 0x04 /* Parity error indicator */
-#define SB105X_LSR_OEI 0x02 /* Overrun error indicator */
-#define SB105X_LSR_RDRI 0x01 /* Receive data ready Indicator*/
-
-#define SB105X_MSR 6 /* In: Modem Status Register */
-#define SB105X_MSR_DCD 0x80 /* Data Carrier Detect */
-#define SB105X_MSR_RI 0x40 /* Ring Indicator */
-#define SB105X_MSR_DSR 0x20 /* Data Set Ready */
-#define SB105X_MSR_CTS 0x10 /* Clear to Send */
-#define SB105X_MSR_DDCD 0x08 /* Delta DCD */
-#define SB105X_MSR_DRI 0x04 /* Delta ring indicator */
-#define SB105X_MSR_DDSR 0x02 /* Delta DSR */
-#define SB105X_MSR_DCTS 0x01 /* Delta CTS */
-
-#define SB105XA_MDR 6 /* Out: Multi Drop mode Register */
-#define SB105XA_MDR_NPS 0x08 /* 9th Bit Polarity Select */
-#define SB105XA_MDR_AME 0x02 /* Auto Multi-drop Enable */
-#define SB105XA_MDR_MDE 0x01 /* Multi Drop Enable */
-
-#define SB105X_SPR 7 /* I/O: Scratch Register */
-
-/*
- * DLAB=1
- */
-#define SB105X_DLL 0 /* Out: Divisor Latch Low */
-#define SB105X_DLM 1 /* Out: Divisor Latch High */
-
-/*
- * ******************************************************************
- * * DLAB(LCR[7]) = 0 , MCR[6] = 1 ============= Page 2 Registers *
- * ******************************************************************
- */
-#define SB105X_GICR 1 /* Global Interrupt Control Register */
-#define SB105X_GICR_GIM 0x01 /* Global Interrupt Mask */
-
-#define SB105X_GISR 2 /* Global Interrupt Status Register */
-#define SB105X_GISR_MGICR0 0x80 /* Mirror the content of GICR[0] */
-#define SB105X_GISR_CS3IS 0x08 /* SB105X of CS3# Interrupt Status */
-#define SB105X_GISR_CS2IS 0x04 /* SB105X of CS2# Interrupt Status */
-#define SB105X_GISR_CS1IS 0x02 /* SB105X of CS1# Interrupt Status */
-#define SB105X_GISR_CS0IS 0x01 /* SB105X of CS0# Interrupt Status */
-
-#define SB105X_TFCR 5 /* Transmit FIFO Count Register */
-
-#define SB105X_RFCR 6 /* Receive FIFO Count Register */
-
-#define SB105X_FSR 7 /* Flow Control Status Register */
-#define SB105X_FSR_THFS 0x20 /* Transmit Hardware Flow Control Status */
-#define SB105X_FSR_TSFS 0x10 /* Transmit Software Flow Control Status */
-#define SB105X_FSR_RHFS 0x02 /* Receive Hardware Flow Control Status */
-#define SB105X_FSR_RSFS 0x01 /* Receive Software Flow Control Status */
-
-/*
- * ******************************************************************
- * * LCR = 0xBF, PSR[0] = 0 ============= Page 3 Registers *
- * ******************************************************************
- */
-
-#define SB105X_PSR 0 /* Page Select Register */
-#define SB105X_PSR_P3KEY 0xA4 /* Page 3 Select Key */
-#define SB105X_PSR_P4KEY 0xA5 /* Page 5 Select Key */
-
-#define SB105X_ATR 1 /* Auto Toggle Control Register */
-#define SB105X_ATR_RPS 0x80 /* RXEN Polarity Select */
-#define SB105X_ATR_RCMS 0x40 /* RXEN Control Mode Select */
-#define SB105X_ATR_TPS 0x20 /* TXEN Polarity Select */
-#define SB105X_ATR_TCMS 0x10 /* TXEN Control Mode Select */
-#define SB105X_ATR_ATDIS 0x00 /* Auto Toggle is disabled */
-#define SB105X_ATR_ART 0x01 /* RTS#/TXEN pin operates as TXEN */
-#define SB105X_ATR_ADT 0x02 /* DTR#/TXEN pin operates as TXEN */
-#define SB105X_ATR_A80 0x03 /* only in 80 pin use */
-
-#define SB105X_EFR 2 /* (Auto) Enhanced Feature Register */
-#define SB105X_EFR_ACTS 0x80 /* Auto-CTS Flow Control Enable */
-#define SB105X_EFR_ARTS 0x40 /* Auto-RTS Flow Control Enable */
-#define SB105X_EFR_SCD 0x20 /* Special Character Detect */
-#define SB105X_EFR_EFBEN 0x10 /* Enhanced Function Bits Enable */
-
-#define SB105X_XON1 4 /* Xon1 Character Register */
-#define SB105X_XON2 5 /* Xon2 Character Register */
-#define SB105X_XOFF1 6 /* Xoff1 Character Register */
-#define SB105X_XOFF2 7 /* Xoff2 Character Register */
-
-/*
- * ******************************************************************
- * * LCR = 0xBF, PSR[0] = 1 ============ Page 4 Registers *
- * ******************************************************************
- */
-
-#define SB105X_AFR 1 /* Additional Feature Register */
-#define SB105X_AFR_GIPS 0x20 /* Global Interrupt Polarity Select */
-#define SB105X_AFR_GIEN 0x10 /* Global Interrupt Enable */
-#define SB105X_AFR_AFEN 0x01 /* 256-byte FIFO Enable */
-
-#define SB105X_XRCR 2 /* Xoff Re-transmit Count Register */
-#define SB105X_XRCR_NRC1 0x00 /* Transmits Xoff Character whenever the number of received data is 1 during XOFF status */
-#define SB105X_XRCR_NRC4 0x01 /* Transmits Xoff Character whenever the number of received data is 4 during XOFF status */
-#define SB105X_XRCR_NRC8 0x02 /* Transmits Xoff Character whenever the number of received data is 8 during XOFF status */
-#define SB105X_XRCR_NRC16 0x03 /* Transmits Xoff Character whenever the number of received data is 16 during XOFF status */
-
-#define SB105X_TTR 4 /* Transmit FIFO Trigger Level Register */
-#define SB105X_RTR 5 /* Receive FIFO Trigger Level Register */
-#define SB105X_FUR 6 /* Flow Control Upper Threshold Register */
-#define SB105X_FLR 7 /* Flow Control Lower Threshold Register */
-
-
-/* page 0 */
-
-#define SB105X_GET_CHAR(port) inb((port)->iobase + SB105X_RX)
-#define SB105X_GET_IER(port) inb((port)->iobase + SB105X_IER)
-#define SB105X_GET_ISR(port) inb((port)->iobase + SB105X_ISR)
-#define SB105X_GET_LCR(port) inb((port)->iobase + SB105X_LCR)
-#define SB105X_GET_MCR(port) inb((port)->iobase + SB105X_MCR)
-#define SB105X_GET_LSR(port) inb((port)->iobase + SB105X_LSR)
-#define SB105X_GET_MSR(port) inb((port)->iobase + SB105X_MSR)
-#define SB105X_GET_SPR(port) inb((port)->iobase + SB105X_SPR)
-
-#define SB105X_PUT_CHAR(port,v) outb((v),(port)->iobase + SB105X_TX )
-#define SB105X_PUT_IER(port,v) outb((v),(port)->iobase + SB105X_IER )
-#define SB105X_PUT_FCR(port,v) outb((v),(port)->iobase + SB105X_FCR )
-#define SB105X_PUT_LCR(port,v) outb((v),(port)->iobase + SB105X_LCR )
-#define SB105X_PUT_MCR(port,v) outb((v),(port)->iobase + SB105X_MCR )
-#define SB105X_PUT_SPR(port,v) outb((v),(port)->iobase + SB105X_SPR )
-
-
-/* page 1 */
-#define SB105X_GET_REG(port,reg) inb((port)->iobase + (reg))
-#define SB105X_PUT_REG(port,reg,v) outb((v),(port)->iobase + (reg))
-
-/* page 2 */
-
-#define SB105X_PUT_PSR(port,v) outb((v),(port)->iobase + SB105X_PSR )
-
-#endif
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
deleted file mode 100644
index c9d6ee3903ad..000000000000
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ /dev/null
@@ -1,3189 +0,0 @@
-#include "sb_pci_mp.h"
-#include <linux/module.h>
-#include <linux/parport.h>
-
-extern struct parport *parport_pc_probe_port(unsigned long base_lo,
- unsigned long base_hi,
- int irq, int dma,
- struct device *dev,
- int irqflags);
-
-static struct mp_device_t mp_devs[MAX_MP_DEV];
-static int mp_nrpcibrds = sizeof(mp_pciboards)/sizeof(mppcibrd_t);
-static int NR_BOARD=0;
-static int NR_PORTS=0;
-static struct mp_port multi_ports[MAX_MP_PORT];
-static struct irq_info irq_lists[NR_IRQS];
-
-static _INLINE_ unsigned int serial_in(struct mp_port *mtpt, int offset);
-static _INLINE_ void serial_out(struct mp_port *mtpt, int offset, int value);
-static _INLINE_ unsigned int read_option_register(struct mp_port *mtpt, int offset);
-static int sb1054_get_register(struct sb_uart_port *port, int page, int reg);
-static int sb1054_set_register(struct sb_uart_port *port, int page, int reg, int value);
-static void SendATCommand(struct mp_port *mtpt);
-static int set_deep_fifo(struct sb_uart_port *port, int status);
-static int get_deep_fifo(struct sb_uart_port *port);
-static int get_device_type(int arg);
-static int set_auto_rts(struct sb_uart_port *port, int status);
-static void mp_stop(struct tty_struct *tty);
-static void __mp_start(struct tty_struct *tty);
-static void mp_start(struct tty_struct *tty);
-static void mp_tasklet_action(unsigned long data);
-static inline void mp_update_mctrl(struct sb_uart_port *port, unsigned int set, unsigned int clear);
-static int mp_startup(struct sb_uart_state *state, int init_hw);
-static void mp_shutdown(struct sb_uart_state *state);
-static void mp_change_speed(struct sb_uart_state *state, struct MP_TERMIOS *old_termios);
-
-static inline int __mp_put_char(struct sb_uart_port *port, struct circ_buf *circ, unsigned char c);
-static int mp_put_char(struct tty_struct *tty, unsigned char ch);
-
-static void mp_put_chars(struct tty_struct *tty);
-static int mp_write(struct tty_struct *tty, const unsigned char *buf, int count);
-static int mp_write_room(struct tty_struct *tty);
-static int mp_chars_in_buffer(struct tty_struct *tty);
-static void mp_flush_buffer(struct tty_struct *tty);
-static void mp_send_xchar(struct tty_struct *tty, char ch);
-static void mp_throttle(struct tty_struct *tty);
-static void mp_unthrottle(struct tty_struct *tty);
-static int mp_get_info(struct sb_uart_state *state, struct serial_struct *retinfo);
-static int mp_set_info(struct sb_uart_state *state, struct serial_struct *newinfo);
-static int mp_get_lsr_info(struct sb_uart_state *state, unsigned int *value);
-
-static int mp_tiocmget(struct tty_struct *tty);
-static int mp_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear);
-static int mp_break_ctl(struct tty_struct *tty, int break_state);
-static int mp_do_autoconfig(struct sb_uart_state *state);
-static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg);
-static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt);
-static int mp_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
-static void mp_set_termios(struct tty_struct *tty, struct MP_TERMIOS *old_termios);
-static void mp_close(struct tty_struct *tty, struct file *filp);
-static void mp_wait_until_sent(struct tty_struct *tty, int timeout);
-static void mp_hangup(struct tty_struct *tty);
-static void mp_update_termios(struct sb_uart_state *state);
-static int mp_block_til_ready(struct file *filp, struct sb_uart_state *state);
-static struct sb_uart_state *uart_get(struct uart_driver *drv, int line);
-static int mp_open(struct tty_struct *tty, struct file *filp);
-static const char *mp_type(struct sb_uart_port *port);
-static void mp_change_pm(struct sb_uart_state *state, int pm_state);
-static inline void mp_report_port(struct uart_driver *drv, struct sb_uart_port *port);
-static void mp_configure_port(struct uart_driver *drv, struct sb_uart_state *state, struct sb_uart_port *port);
-static void mp_unconfigure_port(struct uart_driver *drv, struct sb_uart_state *state);
-static int mp_register_driver(struct uart_driver *drv);
-static void mp_unregister_driver(struct uart_driver *drv);
-static int mp_add_one_port(struct uart_driver *drv, struct sb_uart_port *port);
-static int mp_remove_one_port(struct uart_driver *drv, struct sb_uart_port *port);
-static void autoconfig(struct mp_port *mtpt, unsigned int probeflags);
-static void autoconfig_irq(struct mp_port *mtpt);
-static void multi_stop_tx(struct sb_uart_port *port);
-static void multi_start_tx(struct sb_uart_port *port);
-static void multi_stop_rx(struct sb_uart_port *port);
-static void multi_enable_ms(struct sb_uart_port *port);
-static _INLINE_ void receive_chars(struct mp_port *mtpt, int *status );
-static _INLINE_ void transmit_chars(struct mp_port *mtpt);
-static _INLINE_ void check_modem_status(struct mp_port *mtpt);
-static inline void multi_handle_port(struct mp_port *mtpt);
-static irqreturn_t multi_interrupt(int irq, void *dev_id);
-static void serial_do_unlink(struct irq_info *i, struct mp_port *mtpt);
-static int serial_link_irq_chain(struct mp_port *mtpt);
-static void serial_unlink_irq_chain(struct mp_port *mtpt);
-static void multi_timeout(unsigned long data);
-static unsigned int multi_tx_empty(struct sb_uart_port *port);
-static unsigned int multi_get_mctrl(struct sb_uart_port *port);
-static void multi_set_mctrl(struct sb_uart_port *port, unsigned int mctrl);
-static void multi_break_ctl(struct sb_uart_port *port, int break_state);
-static int multi_startup(struct sb_uart_port *port);
-static void multi_shutdown(struct sb_uart_port *port);
-static unsigned int multi_get_divisor(struct sb_uart_port *port, unsigned int baud);
-static void multi_set_termios(struct sb_uart_port *port, struct MP_TERMIOS *termios, struct MP_TERMIOS *old);
-static void multi_pm(struct sb_uart_port *port, unsigned int state, unsigned int oldstate);
-static void multi_release_std_resource(struct mp_port *mtpt);
-static void multi_release_port(struct sb_uart_port *port);
-static int multi_request_port(struct sb_uart_port *port);
-static void multi_config_port(struct sb_uart_port *port, int flags);
-static int multi_verify_port(struct sb_uart_port *port, struct serial_struct *ser);
-static const char *multi_type(struct sb_uart_port *port);
-static void __init multi_init_ports(void);
-static void __init multi_register_ports(struct uart_driver *drv);
-static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd);
-
-static int deep[256];
-static int deep_count;
-static int fcr_arr[256];
-static int fcr_count;
-static int ttr[256];
-static int ttr_count;
-static int rtr[256];
-static int rtr_count;
-
-module_param_array(deep,int,&deep_count,0);
-module_param_array(fcr_arr,int,&fcr_count,0);
-module_param_array(ttr,int,&ttr_count,0);
-module_param_array(rtr,int,&rtr_count,0);
-
-static _INLINE_ unsigned int serial_in(struct mp_port *mtpt, int offset)
-{
- return inb(mtpt->port.iobase + offset);
-}
-
-static _INLINE_ void serial_out(struct mp_port *mtpt, int offset, int value)
-{
- outb(value, mtpt->port.iobase + offset);
-}
-
-static _INLINE_ unsigned int read_option_register(struct mp_port *mtpt, int offset)
-{
- return inb(mtpt->option_base_addr + offset);
-}
-
-static int sb1053a_get_interface(struct mp_port *mtpt, int port_num)
-{
- unsigned long option_base_addr = mtpt->option_base_addr;
- unsigned int interface = 0;
-
- switch (port_num)
- {
- case 0:
- case 1:
- /* set GPO[1:0] = 00 */
- outb(0x00, option_base_addr + MP_OPTR_GPODR);
- break;
- case 2:
- case 3:
- /* set GPO[1:0] = 01 */
- outb(0x01, option_base_addr + MP_OPTR_GPODR);
- break;
- case 4:
- case 5:
- /* set GPO[1:0] = 10 */
- outb(0x02, option_base_addr + MP_OPTR_GPODR);
- break;
- default:
- break;
- }
-
- port_num &= 0x1;
-
- /* get interface */
- interface = inb(option_base_addr + MP_OPTR_IIR0 + port_num);
-
- /* set GPO[1:0] = 11 */
- outb(0x03, option_base_addr + MP_OPTR_GPODR);
-
- return (interface);
-}
-
-static int sb1054_get_register(struct sb_uart_port *port, int page, int reg)
-{
- int ret = 0;
- unsigned int lcr = 0;
- unsigned int mcr = 0;
- unsigned int tmp = 0;
-
- if( page <= 0)
- {
- printk(" page 0 can not use this function\n");
- return -1;
- }
-
- switch(page)
- {
- case 1:
- lcr = SB105X_GET_LCR(port);
- tmp = lcr | SB105X_LCR_DLAB;
- SB105X_PUT_LCR(port, tmp);
-
- tmp = SB105X_GET_LCR(port);
-
- ret = SB105X_GET_REG(port,reg);
- SB105X_PUT_LCR(port,lcr);
- break;
- case 2:
- mcr = SB105X_GET_MCR(port);
- tmp = mcr | SB105X_MCR_P2S;
- SB105X_PUT_MCR(port,tmp);
-
- ret = SB105X_GET_REG(port,reg);
-
- SB105X_PUT_MCR(port,mcr);
- break;
- case 3:
- lcr = SB105X_GET_LCR(port);
- tmp = lcr | SB105X_LCR_BF;
- SB105X_PUT_LCR(port,tmp);
- SB105X_PUT_REG(port,SB105X_PSR,SB105X_PSR_P3KEY);
-
- ret = SB105X_GET_REG(port,reg);
-
- SB105X_PUT_LCR(port,lcr);
- break;
- case 4:
- lcr = SB105X_GET_LCR(port);
- tmp = lcr | SB105X_LCR_BF;
- SB105X_PUT_LCR(port,tmp);
- SB105X_PUT_REG(port,SB105X_PSR,SB105X_PSR_P4KEY);
-
- ret = SB105X_GET_REG(port,reg);
-
- SB105X_PUT_LCR(port,lcr);
- break;
- default:
- printk(" error invalid page number \n");
- return -1;
- }
-
- return ret;
-}
-
-static int sb1054_set_register(struct sb_uart_port *port, int page, int reg, int value)
-{
- int lcr = 0;
- int mcr = 0;
- int ret = 0;
-
- if( page <= 0)
- {
- printk(" page 0 can not use this function\n");
- return -1;
- }
- switch(page)
- {
- case 1:
- lcr = SB105X_GET_LCR(port);
- SB105X_PUT_LCR(port, lcr | SB105X_LCR_DLAB);
-
- SB105X_PUT_REG(port,reg,value);
-
- SB105X_PUT_LCR(port, lcr);
- ret = 1;
- break;
- case 2:
- mcr = SB105X_GET_MCR(port);
- SB105X_PUT_MCR(port, mcr | SB105X_MCR_P2S);
-
- SB105X_PUT_REG(port,reg,value);
-
- SB105X_PUT_MCR(port, mcr);
- ret = 1;
- break;
- case 3:
- lcr = SB105X_GET_LCR(port);
- SB105X_PUT_LCR(port, lcr | SB105X_LCR_BF);
- SB105X_PUT_PSR(port, SB105X_PSR_P3KEY);
-
- SB105X_PUT_REG(port,reg,value);
-
- SB105X_PUT_LCR(port, lcr);
- ret = 1;
- break;
- case 4:
- lcr = SB105X_GET_LCR(port);
- SB105X_PUT_LCR(port, lcr | SB105X_LCR_BF);
- SB105X_PUT_PSR(port, SB105X_PSR_P4KEY);
-
- SB105X_PUT_REG(port,reg,value);
-
- SB105X_PUT_LCR(port, lcr);
- ret = 1;
- break;
- default:
- printk(" error invalid page number \n");
- return -1;
- }
-
- return ret;
-}
-
-static int set_multidrop_mode(struct sb_uart_port *port, unsigned int mode)
-{
- int mdr = SB105XA_MDR_NPS;
-
- if (mode & MDMODE_ENABLE)
- {
- mdr |= SB105XA_MDR_MDE;
- }
-
- if (1) //(mode & MDMODE_AUTO)
- {
- int efr = 0;
- mdr |= SB105XA_MDR_AME;
- efr = sb1054_get_register(port, PAGE_3, SB105X_EFR);
- efr |= SB105X_EFR_SCD;
- sb1054_set_register(port, PAGE_3, SB105X_EFR, efr);
- }
-
- sb1054_set_register(port, PAGE_1, SB105XA_MDR, mdr);
- port->mdmode &= ~0x6;
- port->mdmode |= mode;
- printk("[%d] multidrop init: %x\n", port->line, port->mdmode);
-
- return 0;
-}
-
-static int get_multidrop_addr(struct sb_uart_port *port)
-{
- return sb1054_get_register(port, PAGE_3, SB105X_XOFF2);
-}
-
-static int set_multidrop_addr(struct sb_uart_port *port, unsigned int addr)
-{
- sb1054_set_register(port, PAGE_3, SB105X_XOFF2, addr);
-
- return 0;
-}
-
-static void SendATCommand(struct mp_port *mtpt)
-{
- // a t cr lf
- unsigned char ch[] = {0x61,0x74,0x0d,0x0a,0x0};
- unsigned char lineControl;
- unsigned char i=0;
- unsigned char Divisor = 0xc;
-
- lineControl = serial_inp(mtpt,UART_LCR);
- serial_outp(mtpt,UART_LCR,(lineControl | UART_LCR_DLAB));
- serial_outp(mtpt,UART_DLL,(Divisor & 0xff));
- serial_outp(mtpt,UART_DLM,(Divisor & 0xff00)>>8); //baudrate is 4800
-
-
- serial_outp(mtpt,UART_LCR,lineControl);
- serial_outp(mtpt,UART_LCR,0x03); // N-8-1
- serial_outp(mtpt,UART_FCR,7);
- serial_outp(mtpt,UART_MCR,0x3);
- while(ch[i]){
- while((serial_inp(mtpt,UART_LSR) & 0x60) !=0x60){
- ;
- }
- serial_outp(mtpt,0,ch[i++]);
- }
-
-
-}// end of SendATCommand()
-
-static int set_deep_fifo(struct sb_uart_port *port, int status)
-{
- int afr_status = 0;
- afr_status = sb1054_get_register(port, PAGE_4, SB105X_AFR);
-
- if(status == ENABLE)
- {
- afr_status |= SB105X_AFR_AFEN;
- }
- else
- {
- afr_status &= ~SB105X_AFR_AFEN;
- }
-
- sb1054_set_register(port,PAGE_4,SB105X_AFR,afr_status);
- sb1054_set_register(port,PAGE_4,SB105X_TTR,ttr[port->line]);
- sb1054_set_register(port,PAGE_4,SB105X_RTR,rtr[port->line]);
- afr_status = sb1054_get_register(port, PAGE_4, SB105X_AFR);
-
- return afr_status;
-}
-
-static int get_device_type(int arg)
-{
- int ret;
- ret = inb(mp_devs[arg].option_reg_addr+MP_OPTR_DIR0);
- ret = (ret & 0xf0) >> 4;
- switch (ret)
- {
- case DIR_UART_16C550:
- return PORT_16C55X;
- case DIR_UART_16C1050:
- return PORT_16C105X;
- case DIR_UART_16C1050A:
- /*
- if (mtpt->port.line < 2)
- {
- return PORT_16C105XA;
- }
- else
- {
- if (mtpt->device->device_id & 0x50)
- {
- return PORT_16C55X;
- }
- else
- {
- return PORT_16C105X;
- }
- }*/
- return PORT_16C105XA;
- default:
- return PORT_UNKNOWN;
- }
-
-}
-static int get_deep_fifo(struct sb_uart_port *port)
-{
- int afr_status = 0;
- afr_status = sb1054_get_register(port, PAGE_4, SB105X_AFR);
- return afr_status;
-}
-
-static int set_auto_rts(struct sb_uart_port *port, int status)
-{
- int atr_status = 0;
-
-#if 0
- int efr_status = 0;
-
- efr_status = sb1054_get_register(port, PAGE_3, SB105X_EFR);
- if(status == ENABLE)
- efr_status |= SB105X_EFR_ARTS;
- else
- efr_status &= ~SB105X_EFR_ARTS;
- sb1054_set_register(port,PAGE_3,SB105X_EFR,efr_status);
- efr_status = sb1054_get_register(port, PAGE_3, SB105X_EFR);
-#endif
-
-//ATR
- atr_status = sb1054_get_register(port, PAGE_3, SB105X_ATR);
- switch(status)
- {
- case RS422PTP:
- atr_status = (SB105X_ATR_TPS) | (SB105X_ATR_A80);
- break;
- case RS422MD:
- atr_status = (SB105X_ATR_TPS) | (SB105X_ATR_TCMS) | (SB105X_ATR_A80);
- break;
- case RS485NE:
- atr_status = (SB105X_ATR_RCMS) | (SB105X_ATR_TPS) | (SB105X_ATR_TCMS) | (SB105X_ATR_A80);
- break;
- case RS485ECHO:
- atr_status = (SB105X_ATR_TPS) | (SB105X_ATR_TCMS) | (SB105X_ATR_A80);
- break;
- }
-
- sb1054_set_register(port,PAGE_3,SB105X_ATR,atr_status);
- atr_status = sb1054_get_register(port, PAGE_3, SB105X_ATR);
-
- return atr_status;
-}
-
-static void mp_stop(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
- unsigned long flags;
-
- spin_lock_irqsave(&port->lock, flags);
- port->ops->stop_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void __mp_start(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
-
- if (!uart_circ_empty(&state->info->xmit) && state->info->xmit.buf &&
- !tty->stopped && !tty->hw_stopped)
- port->ops->start_tx(port);
-}
-
-static void mp_start(struct tty_struct *tty)
-{
- __mp_start(tty);
-}
-
-static void mp_tasklet_action(unsigned long data)
-{
- struct sb_uart_state *state = (struct sb_uart_state *)data;
- struct tty_struct *tty;
-
- printk("tasklet is called!\n");
- tty = state->info->tty;
- tty_wakeup(tty);
-}
-
-static inline void mp_update_mctrl(struct sb_uart_port *port, unsigned int set, unsigned int clear)
-{
- unsigned int old;
-
- old = port->mctrl;
- port->mctrl = (old & ~clear) | set;
- if (old != port->mctrl)
- port->ops->set_mctrl(port, port->mctrl);
-}
-
-#define uart_set_mctrl(port,set) mp_update_mctrl(port,set,0)
-#define uart_clear_mctrl(port,clear) mp_update_mctrl(port,0,clear)
-
-static int mp_startup(struct sb_uart_state *state, int init_hw)
-{
- struct sb_uart_info *info = state->info;
- struct sb_uart_port *port = state->port;
- unsigned long page;
- int retval = 0;
-
- if (info->flags & UIF_INITIALIZED)
- return 0;
-
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
-
- if (port->type == PORT_UNKNOWN)
- return 0;
-
- if (!info->xmit.buf) {
- page = get_zeroed_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- info->xmit.buf = (unsigned char *) page;
-
- uart_circ_clear(&info->xmit);
- }
-
- retval = port->ops->startup(port);
- if (retval == 0) {
- if (init_hw) {
- mp_change_speed(state, NULL);
-
- if (info->tty && (info->tty->termios.c_cflag & CBAUD))
- uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
- }
-
- info->flags |= UIF_INITIALIZED;
-
- if (info->tty)
- clear_bit(TTY_IO_ERROR, &info->tty->flags);
- }
-
- if (retval && capable(CAP_SYS_ADMIN))
- retval = 0;
-
- return retval;
-}
-
-static void mp_shutdown(struct sb_uart_state *state)
-{
- struct sb_uart_info *info = state->info;
- struct sb_uart_port *port = state->port;
-
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
-
- if (info->flags & UIF_INITIALIZED) {
- info->flags &= ~UIF_INITIALIZED;
-
- if (!info->tty || (info->tty->termios.c_cflag & HUPCL))
- uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
-
- wake_up_interruptible(&info->delta_msr_wait);
-
- port->ops->shutdown(port);
-
- synchronize_irq(port->irq);
- }
- tasklet_kill(&info->tlet);
-
- if (info->xmit.buf) {
- free_page((unsigned long)info->xmit.buf);
- info->xmit.buf = NULL;
- }
-}
-
-static void mp_change_speed(struct sb_uart_state *state, struct MP_TERMIOS *old_termios)
-{
- struct tty_struct *tty = state->info->tty;
- struct sb_uart_port *port = state->port;
-
- if (!tty || port->type == PORT_UNKNOWN)
- return;
-
- if (tty->termios.c_cflag & CRTSCTS)
- state->info->flags |= UIF_CTS_FLOW;
- else
- state->info->flags &= ~UIF_CTS_FLOW;
-
- if (tty->termios.c_cflag & CLOCAL)
- state->info->flags &= ~UIF_CHECK_CD;
- else
- state->info->flags |= UIF_CHECK_CD;
-
- port->ops->set_termios(port, &tty->termios, old_termios);
-}
-
-static inline int __mp_put_char(struct sb_uart_port *port, struct circ_buf *circ, unsigned char c)
-{
- unsigned long flags;
- int ret = 0;
-
- if (!circ->buf)
- return 0;
-
- spin_lock_irqsave(&port->lock, flags);
- if (uart_circ_chars_free(circ) != 0) {
- circ->buf[circ->head] = c;
- circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
- ret = 1;
- }
- spin_unlock_irqrestore(&port->lock, flags);
- return ret;
-}
-
-static int mp_put_char(struct tty_struct *tty, unsigned char ch)
-{
- struct sb_uart_state *state = tty->driver_data;
-
- return __mp_put_char(state->port, &state->info->xmit, ch);
-}
-
-static void mp_put_chars(struct tty_struct *tty)
-{
- mp_start(tty);
-}
-
-static int mp_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port;
- struct circ_buf *circ;
- int c, ret = 0;
-
- if (!state || !state->info) {
- return -EL3HLT;
- }
-
- port = state->port;
- circ = &state->info->xmit;
-
- if (!circ->buf)
- return 0;
-
- while (1) {
- c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(circ->buf + circ->head, buf, c);
-
- circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
- buf += c;
- count -= c;
- ret += c;
- }
- mp_start(tty);
- return ret;
-}
-
-static int mp_write_room(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
-
- return uart_circ_chars_free(&state->info->xmit);
-}
-
-static int mp_chars_in_buffer(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
-
- return uart_circ_chars_pending(&state->info->xmit);
-}
-
-static void mp_flush_buffer(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port;
- unsigned long flags;
-
- if (!state || !state->info) {
- return;
- }
-
- port = state->port;
- spin_lock_irqsave(&port->lock, flags);
- uart_circ_clear(&state->info->xmit);
- spin_unlock_irqrestore(&port->lock, flags);
- wake_up_interruptible(&tty->write_wait);
- tty_wakeup(tty);
-}
-
-static void mp_send_xchar(struct tty_struct *tty, char ch)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
- unsigned long flags;
-
- if (port->ops->send_xchar)
- port->ops->send_xchar(port, ch);
- else {
- port->x_char = ch;
- if (ch) {
- spin_lock_irqsave(&port->lock, flags);
- port->ops->start_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
- }
- }
-}
-
-static void mp_throttle(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
-
- if (I_IXOFF(tty))
- mp_send_xchar(tty, STOP_CHAR(tty));
-
- if (tty->termios.c_cflag & CRTSCTS)
- uart_clear_mctrl(state->port, TIOCM_RTS);
-}
-
-static void mp_unthrottle(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
-
- if (I_IXOFF(tty)) {
- if (port->x_char)
- port->x_char = 0;
- else
- mp_send_xchar(tty, START_CHAR(tty));
- }
-
- if (tty->termios.c_cflag & CRTSCTS)
- uart_set_mctrl(port, TIOCM_RTS);
-}
-
-static int mp_get_info(struct sb_uart_state *state, struct serial_struct *retinfo)
-{
- struct sb_uart_port *port = state->port;
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = port->type;
- tmp.line = port->line;
- tmp.port = port->iobase;
- if (HIGH_BITS_OFFSET)
- tmp.port_high = (long) port->iobase >> HIGH_BITS_OFFSET;
- tmp.irq = port->irq;
- tmp.flags = port->flags;
- tmp.xmit_fifo_size = port->fifosize;
- tmp.baud_base = port->uartclk / 16;
- tmp.close_delay = state->close_delay;
- tmp.closing_wait = state->closing_wait == USF_CLOSING_WAIT_NONE ?
- ASYNC_CLOSING_WAIT_NONE :
- state->closing_wait;
- tmp.custom_divisor = port->custom_divisor;
- tmp.hub6 = port->hub6;
- tmp.io_type = port->iotype;
- tmp.iomem_reg_shift = port->regshift;
- tmp.iomem_base = (void *)port->mapbase;
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
- return 0;
-}
-
-static int mp_set_info(struct sb_uart_state *state, struct serial_struct *newinfo)
-{
- struct serial_struct new_serial;
- struct sb_uart_port *port = state->port;
- unsigned long new_port;
- unsigned int change_irq, change_port, closing_wait;
- unsigned int old_custom_divisor;
- unsigned int old_flags, new_flags;
- int retval = 0;
-
- if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
- return -EFAULT;
-
- new_port = new_serial.port;
- if (HIGH_BITS_OFFSET)
- new_port += (unsigned long) new_serial.port_high << HIGH_BITS_OFFSET;
-
- new_serial.irq = irq_canonicalize(new_serial.irq);
-
- closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
- USF_CLOSING_WAIT_NONE : new_serial.closing_wait;
- MP_STATE_LOCK(state);
-
- change_irq = new_serial.irq != port->irq;
- change_port = new_port != port->iobase ||
- (unsigned long)new_serial.iomem_base != port->mapbase ||
- new_serial.hub6 != port->hub6 ||
- new_serial.io_type != port->iotype ||
- new_serial.iomem_reg_shift != port->regshift ||
- new_serial.type != port->type;
- old_flags = port->flags;
- new_flags = new_serial.flags;
- old_custom_divisor = port->custom_divisor;
-
- if (!capable(CAP_SYS_ADMIN)) {
- retval = -EPERM;
- if (change_irq || change_port ||
- (new_serial.baud_base != port->uartclk / 16) ||
- (new_serial.close_delay != state->close_delay) ||
- (closing_wait != state->closing_wait) ||
- (new_serial.xmit_fifo_size != port->fifosize) ||
- (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0))
- goto exit;
- port->flags = ((port->flags & ~UPF_USR_MASK) |
- (new_flags & UPF_USR_MASK));
- port->custom_divisor = new_serial.custom_divisor;
- goto check_and_exit;
- }
-
- if (port->ops->verify_port)
- retval = port->ops->verify_port(port, &new_serial);
-
- if ((new_serial.irq >= NR_IRQS) || (new_serial.irq < 0) ||
- (new_serial.baud_base < 9600))
- retval = -EINVAL;
-
- if (retval)
- goto exit;
-
- if (change_port || change_irq) {
- retval = -EBUSY;
-
- if (uart_users(state) > 1)
- goto exit;
-
- mp_shutdown(state);
- }
-
- if (change_port) {
- unsigned long old_iobase, old_mapbase;
- unsigned int old_type, old_iotype, old_hub6, old_shift;
-
- old_iobase = port->iobase;
- old_mapbase = port->mapbase;
- old_type = port->type;
- old_hub6 = port->hub6;
- old_iotype = port->iotype;
- old_shift = port->regshift;
-
- if (old_type != PORT_UNKNOWN)
- port->ops->release_port(port);
-
- port->iobase = new_port;
- port->type = new_serial.type;
- port->hub6 = new_serial.hub6;
- port->iotype = new_serial.io_type;
- port->regshift = new_serial.iomem_reg_shift;
- port->mapbase = (unsigned long)new_serial.iomem_base;
-
- if (port->type != PORT_UNKNOWN) {
- retval = port->ops->request_port(port);
- } else {
- retval = 0;
- }
-
- if (retval && old_type != PORT_UNKNOWN) {
- port->iobase = old_iobase;
- port->type = old_type;
- port->hub6 = old_hub6;
- port->iotype = old_iotype;
- port->regshift = old_shift;
- port->mapbase = old_mapbase;
- retval = port->ops->request_port(port);
- if (retval)
- port->type = PORT_UNKNOWN;
-
- retval = -EBUSY;
- }
- }
-
- port->irq = new_serial.irq;
- port->uartclk = new_serial.baud_base * 16;
- port->flags = (port->flags & ~UPF_CHANGE_MASK) |
- (new_flags & UPF_CHANGE_MASK);
- port->custom_divisor = new_serial.custom_divisor;
- state->close_delay = new_serial.close_delay;
- state->closing_wait = closing_wait;
- port->fifosize = new_serial.xmit_fifo_size;
- if (state->info->tty)
- state->info->tty->low_latency =
- (port->flags & UPF_LOW_LATENCY) ? 1 : 0;
-
-check_and_exit:
- retval = 0;
- if (port->type == PORT_UNKNOWN)
- goto exit;
- if (state->info->flags & UIF_INITIALIZED) {
- if (((old_flags ^ port->flags) & UPF_SPD_MASK) ||
- old_custom_divisor != port->custom_divisor) {
- if (port->flags & UPF_SPD_MASK) {
- printk(KERN_NOTICE
- "%s sets custom speed on ttyMP%d. This "
- "is deprecated.\n", current->comm,
- port->line);
- }
- mp_change_speed(state, NULL);
- }
- } else
- retval = mp_startup(state, 1);
-exit:
- MP_STATE_UNLOCK(state);
- return retval;
-}
-
-
-static int mp_get_lsr_info(struct sb_uart_state *state, unsigned int *value)
-{
- struct sb_uart_port *port = state->port;
- unsigned int result;
-
- result = port->ops->tx_empty(port);
-
- if (port->x_char ||
- ((uart_circ_chars_pending(&state->info->xmit) > 0) &&
- !state->info->tty->stopped && !state->info->tty->hw_stopped))
- result &= ~TIOCSER_TEMT;
-
- return put_user(result, value);
-}
-
-static int mp_tiocmget(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
- int result = -EIO;
-
- MP_STATE_LOCK(state);
- if (!(tty->flags & (1 << TTY_IO_ERROR))) {
- result = port->mctrl;
- spin_lock_irq(&port->lock);
- result |= port->ops->get_mctrl(port);
- spin_unlock_irq(&port->lock);
- }
- MP_STATE_UNLOCK(state);
- return result;
-}
-
-static int mp_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
- int ret = -EIO;
-
-
- MP_STATE_LOCK(state);
- if (!(tty->flags & (1 << TTY_IO_ERROR))) {
- mp_update_mctrl(port, set, clear);
- ret = 0;
- }
- MP_STATE_UNLOCK(state);
-
- return ret;
-}
-
-static int mp_break_ctl(struct tty_struct *tty, int break_state)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
-
- MP_STATE_LOCK(state);
-
- if (port->type != PORT_UNKNOWN)
- port->ops->break_ctl(port, break_state);
-
- MP_STATE_UNLOCK(state);
- return 0;
-}
-
-static int mp_do_autoconfig(struct sb_uart_state *state)
-{
- struct sb_uart_port *port = state->port;
- int flags, ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (mutex_lock_interruptible(&state->mutex))
- return -ERESTARTSYS;
- ret = -EBUSY;
- if (uart_users(state) == 1) {
- mp_shutdown(state);
-
- if (port->type != PORT_UNKNOWN)
- port->ops->release_port(port);
-
- flags = UART_CONFIG_TYPE;
- if (port->flags & UPF_AUTO_IRQ)
- flags |= UART_CONFIG_IRQ;
-
- port->ops->config_port(port, flags);
-
- ret = mp_startup(state, 1);
- }
- MP_STATE_UNLOCK(state);
- return ret;
-}
-
-static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg)
-{
- struct sb_uart_port *port = state->port;
- DECLARE_WAITQUEUE(wait, current);
- struct sb_uart_icount cprev, cnow;
- int ret;
-
- spin_lock_irq(&port->lock);
- memcpy(&cprev, &port->icount, sizeof(struct sb_uart_icount));
-
- port->ops->enable_ms(port);
- spin_unlock_irq(&port->lock);
-
- add_wait_queue(&state->info->delta_msr_wait, &wait);
- for (;;) {
- spin_lock_irq(&port->lock);
- memcpy(&cnow, &port->icount, sizeof(struct sb_uart_icount));
- spin_unlock_irq(&port->lock);
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
- ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
- ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
- ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
- ret = 0;
- break;
- }
-
- schedule();
-
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- cprev = cnow;
- }
-
- current->state = TASK_RUNNING;
- remove_wait_queue(&state->info->delta_msr_wait, &wait);
-
- return ret;
-}
-
-static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
-{
- struct serial_icounter_struct icount = {};
- struct sb_uart_icount cnow;
- struct sb_uart_port *port = state->port;
-
- spin_lock_irq(&port->lock);
- memcpy(&cnow, &port->icount, sizeof(struct sb_uart_icount));
- spin_unlock_irq(&port->lock);
-
- icount.cts = cnow.cts;
- icount.dsr = cnow.dsr;
- icount.rng = cnow.rng;
- icount.dcd = cnow.dcd;
- icount.rx = cnow.rx;
- icount.tx = cnow.tx;
- icount.frame = cnow.frame;
- icount.overrun = cnow.overrun;
- icount.parity = cnow.parity;
- icount.brk = cnow.brk;
- icount.buf_overrun = cnow.buf_overrun;
-
- return copy_to_user(icnt, &icount, sizeof(icount)) ? -EFAULT : 0;
-}
-
-static int mp_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct mp_port *info = (struct mp_port *)state->port;
- int ret = -ENOIOCTLCMD;
-
-
- switch (cmd) {
- case TIOCSMULTIDROP:
- /* set multi-drop mode enable or disable, and default operation mode is H/W mode */
- if (info->port.type == PORT_16C105XA)
- {
- //arg &= ~0x6;
- //state->port->mdmode = 0;
- return set_multidrop_mode((struct sb_uart_port *)info, (unsigned int)arg);
- }
- ret = -ENOTSUPP;
- break;
- case GETDEEPFIFO:
- ret = get_deep_fifo(state->port);
- return ret;
- case SETDEEPFIFO:
- ret = set_deep_fifo(state->port,arg);
- deep[state->port->line] = arg;
- return ret;
- case SETTTR:
- if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
- ret = sb1054_set_register(state->port,PAGE_4,SB105X_TTR,arg);
- ttr[state->port->line] = arg;
- }
- return ret;
- case SETRTR:
- if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
- ret = sb1054_set_register(state->port,PAGE_4,SB105X_RTR,arg);
- rtr[state->port->line] = arg;
- }
- return ret;
- case GETTTR:
- if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
- ret = sb1054_get_register(state->port,PAGE_4,SB105X_TTR);
- }
- return ret;
- case GETRTR:
- if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
- ret = sb1054_get_register(state->port,PAGE_4,SB105X_RTR);
- }
- return ret;
-
- case SETFCR:
- if (info->port.type == PORT_16C105X || info->port.type == PORT_16C105XA){
- ret = sb1054_set_register(state->port,PAGE_1,SB105X_FCR,arg);
- }
- else{
- serial_out(info,2,arg);
- }
-
- return ret;
- case TIOCSMDADDR:
- /* set multi-drop address */
- if (info->port.type == PORT_16C105XA)
- {
- state->port->mdmode |= MDMODE_ADDR;
- return set_multidrop_addr((struct sb_uart_port *)info, (unsigned int)arg);
- }
- ret = -ENOTSUPP;
- break;
-
- case TIOCGMDADDR:
- /* set multi-drop address */
- if ((info->port.type == PORT_16C105XA) && (state->port->mdmode & MDMODE_ADDR))
- {
- return get_multidrop_addr((struct sb_uart_port *)info);
- }
- ret = -ENOTSUPP;
- break;
-
- case TIOCSENDADDR:
- /* send address in multi-drop mode */
- if ((info->port.type == PORT_16C105XA)
- && (state->port->mdmode & (MDMODE_ENABLE)))
- {
- if (mp_chars_in_buffer(tty) > 0)
- {
- tty_wait_until_sent(tty, 0);
- }
- //while ((serial_in(info, UART_LSR) & 0x60) != 0x60);
- //while (sb1054_get_register(state->port, PAGE_2, SB105X_TFCR) != 0);
- while ((serial_in(info, UART_LSR) & 0x60) != 0x60);
- serial_out(info, UART_SCR, (int)arg);
- }
- break;
-
- case TIOCGSERIAL:
- ret = mp_get_info(state, (struct serial_struct *)arg);
- break;
-
- case TIOCSSERIAL:
- ret = mp_set_info(state, (struct serial_struct *)arg);
- break;
-
- case TIOCSERCONFIG:
- ret = mp_do_autoconfig(state);
- break;
-
- case TIOCSERGWILD: /* obsolete */
- case TIOCSERSWILD: /* obsolete */
- ret = 0;
- break;
- /* for Multiport */
- case TIOCGNUMOFPORT: /* Get number of ports */
- return NR_PORTS;
- case TIOCGGETDEVID:
- return mp_devs[arg].device_id;
- case TIOCGGETREV:
- return mp_devs[arg].revision;
- case TIOCGGETNRPORTS:
- return mp_devs[arg].nr_ports;
- case TIOCGGETBDNO:
- return NR_BOARD;
- case TIOCGGETINTERFACE:
- if (mp_devs[arg].revision == 0xc0)
- {
- /* for SB16C1053APCI */
- return (sb1053a_get_interface(info, info->port.line));
- }
- else
- {
- return (inb(mp_devs[arg].option_reg_addr+MP_OPTR_IIR0+(state->port->line/8)));
- }
- case TIOCGGETPORTTYPE:
- ret = get_device_type(arg);
- return ret;
- case TIOCSMULTIECHO: /* set to multi-drop mode(RS422) or echo mode(RS485)*/
- outb( ( inb(info->interface_config_addr) & ~0x03 ) | 0x01 ,
- info->interface_config_addr);
- return 0;
- case TIOCSPTPNOECHO: /* set to multi-drop mode(RS422) or echo mode(RS485) */
- outb( ( inb(info->interface_config_addr) & ~0x03 ) ,
- info->interface_config_addr);
- return 0;
- }
-
- if (ret != -ENOIOCTLCMD)
- goto out;
-
- if (tty->flags & (1 << TTY_IO_ERROR)) {
- ret = -EIO;
- goto out;
- }
-
- switch (cmd) {
- case TIOCMIWAIT:
- ret = mp_wait_modem_status(state, arg);
- break;
-
- case TIOCGICOUNT:
- ret = mp_get_count(state, (struct serial_icounter_struct *)arg);
- break;
- }
-
- if (ret != -ENOIOCTLCMD)
- goto out;
-
- MP_STATE_LOCK(state);
- switch (cmd) {
- case TIOCSERGETLSR: /* Get line status register */
- ret = mp_get_lsr_info(state, (unsigned int *)arg);
- break;
-
- default: {
- struct sb_uart_port *port = state->port;
- if (port->ops->ioctl)
- ret = port->ops->ioctl(port, cmd, arg);
- break;
- }
- }
-
- MP_STATE_UNLOCK(state);
-out:
- return ret;
-}
-
-static void mp_set_termios(struct tty_struct *tty, struct MP_TERMIOS *old_termios)
-{
- struct sb_uart_state *state = tty->driver_data;
- unsigned long flags;
- unsigned int cflag = tty->termios.c_cflag;
-
-#define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-
- if ((cflag ^ old_termios->c_cflag) == 0 &&
- RELEVANT_IFLAG(tty->termios.c_iflag ^ old_termios->c_iflag) == 0)
- return;
-
- mp_change_speed(state, old_termios);
-
- if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
- uart_clear_mctrl(state->port, TIOCM_RTS | TIOCM_DTR);
-
- if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
- unsigned int mask = TIOCM_DTR;
- if (!(cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags))
- mask |= TIOCM_RTS;
- uart_set_mctrl(state->port, mask);
- }
-
- if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
- spin_lock_irqsave(&state->port->lock, flags);
- tty->hw_stopped = 0;
- __mp_start(tty);
- spin_unlock_irqrestore(&state->port->lock, flags);
- }
-
- if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
- spin_lock_irqsave(&state->port->lock, flags);
- if (!(state->port->ops->get_mctrl(state->port) & TIOCM_CTS)) {
- tty->hw_stopped = 1;
- state->port->ops->stop_tx(state->port);
- }
- spin_unlock_irqrestore(&state->port->lock, flags);
- }
-}
-
-static void mp_close(struct tty_struct *tty, struct file *filp)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port;
-
- printk("mp_close!\n");
- if (!state || !state->port)
- return;
-
- port = state->port;
-
- printk("close1 %d\n", __LINE__);
- MP_STATE_LOCK(state);
-
- printk("close2 %d\n", __LINE__);
- if (tty_hung_up_p(filp))
- goto done;
-
- printk("close3 %d\n", __LINE__);
- if ((tty->count == 1) && (state->count != 1)) {
- printk("mp_close: bad serial port count; tty->count is 1, "
- "state->count is %d\n", state->count);
- state->count = 1;
- }
- printk("close4 %d\n", __LINE__);
- if (--state->count < 0) {
- printk("rs_close: bad serial port count for ttyMP%d: %d\n",
- port->line, state->count);
- state->count = 0;
- }
- if (state->count)
- goto done;
-
- tty->closing = 1;
-
- printk("close5 %d\n", __LINE__);
- if (state->closing_wait != USF_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, state->closing_wait);
-
- printk("close6 %d\n", __LINE__);
- if (state->info->flags & UIF_INITIALIZED) {
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
- port->ops->stop_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
- mp_wait_until_sent(tty, port->timeout);
- }
- printk("close7 %d\n", __LINE__);
-
- mp_shutdown(state);
- printk("close8 %d\n", __LINE__);
- mp_flush_buffer(tty);
- tty_ldisc_flush(tty);
- tty->closing = 0;
- state->info->tty = NULL;
- if (state->info->blocked_open)
- {
- if (state->close_delay)
- {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(state->close_delay);
- }
- }
- else
- {
- mp_change_pm(state, 3);
- }
- printk("close8 %d\n", __LINE__);
-
- state->info->flags &= ~UIF_NORMAL_ACTIVE;
- wake_up_interruptible(&state->info->open_wait);
-
-done:
- printk("close done\n");
- MP_STATE_UNLOCK(state);
- module_put(THIS_MODULE);
-}
-
-static void mp_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- struct sb_uart_state *state = tty->driver_data;
- struct sb_uart_port *port = state->port;
- unsigned long char_time, expire;
-
- if (port->type == PORT_UNKNOWN || port->fifosize == 0)
- return;
-
- char_time = (port->timeout - HZ/50) / port->fifosize;
- char_time = char_time / 5;
- if (char_time == 0)
- char_time = 1;
- if (timeout && timeout < char_time)
- char_time = timeout;
-
- if (timeout == 0 || timeout > 2 * port->timeout)
- timeout = 2 * port->timeout;
-
- expire = jiffies + timeout;
-
- while (!port->ops->tx_empty(port)) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(char_time);
- if (signal_pending(current))
- break;
- if (time_after(jiffies, expire))
- break;
- }
- set_current_state(TASK_RUNNING); /* might not be needed */
-}
-
-static void mp_hangup(struct tty_struct *tty)
-{
- struct sb_uart_state *state = tty->driver_data;
-
- MP_STATE_LOCK(state);
- if (state->info && state->info->flags & UIF_NORMAL_ACTIVE) {
- mp_flush_buffer(tty);
- mp_shutdown(state);
- state->count = 0;
- state->info->flags &= ~UIF_NORMAL_ACTIVE;
- state->info->tty = NULL;
- wake_up_interruptible(&state->info->open_wait);
- wake_up_interruptible(&state->info->delta_msr_wait);
- }
- MP_STATE_UNLOCK(state);
-}
-
-static void mp_update_termios(struct sb_uart_state *state)
-{
- struct tty_struct *tty = state->info->tty;
- struct sb_uart_port *port = state->port;
-
- if (!(tty->flags & (1 << TTY_IO_ERROR))) {
- mp_change_speed(state, NULL);
-
- if (tty->termios.c_cflag & CBAUD)
- uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
- }
-}
-
-static int mp_block_til_ready(struct file *filp, struct sb_uart_state *state)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct sb_uart_info *info = state->info;
- struct sb_uart_port *port = state->port;
- unsigned int mctrl;
-
- info->blocked_open++;
- state->count--;
-
- add_wait_queue(&info->open_wait, &wait);
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (tty_hung_up_p(filp) || info->tty == NULL)
- break;
-
- if (!(info->flags & UIF_INITIALIZED))
- break;
-
- if ((filp->f_flags & O_NONBLOCK) ||
- (info->tty->termios.c_cflag & CLOCAL) ||
- (info->tty->flags & (1 << TTY_IO_ERROR))) {
- break;
- }
-
- if (info->tty->termios.c_cflag & CBAUD)
- uart_set_mctrl(port, TIOCM_DTR);
-
- spin_lock_irq(&port->lock);
- port->ops->enable_ms(port);
- mctrl = port->ops->get_mctrl(port);
- spin_unlock_irq(&port->lock);
- if (mctrl & TIOCM_CAR)
- break;
-
- MP_STATE_UNLOCK(state);
- schedule();
- MP_STATE_LOCK(state);
-
- if (signal_pending(current))
- break;
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&info->open_wait, &wait);
-
- state->count++;
- info->blocked_open--;
-
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- if (!info->tty || tty_hung_up_p(filp))
- return -EAGAIN;
-
- return 0;
-}
-
-static struct sb_uart_state *uart_get(struct uart_driver *drv, int line)
-{
- struct sb_uart_state *state;
-
- MP_MUTEX_LOCK(mp_mutex);
- state = drv->state + line;
- if (mutex_lock_interruptible(&state->mutex)) {
- state = ERR_PTR(-ERESTARTSYS);
- goto out;
- }
- state->count++;
- if (!state->port) {
- state->count--;
- MP_STATE_UNLOCK(state);
- state = ERR_PTR(-ENXIO);
- goto out;
- }
-
- if (!state->info) {
- state->info = kmalloc(sizeof(struct sb_uart_info), GFP_KERNEL);
- if (state->info) {
- memset(state->info, 0, sizeof(struct sb_uart_info));
- init_waitqueue_head(&state->info->open_wait);
- init_waitqueue_head(&state->info->delta_msr_wait);
-
- state->port->info = state->info;
-
- tasklet_init(&state->info->tlet, mp_tasklet_action,
- (unsigned long)state);
- } else {
- state->count--;
- MP_STATE_UNLOCK(state);
- state = ERR_PTR(-ENOMEM);
- }
- }
-
-out:
- MP_MUTEX_UNLOCK(mp_mutex);
- return state;
-}
-
-static int mp_open(struct tty_struct *tty, struct file *filp)
-{
- struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
- struct sb_uart_state *state;
- int retval;
- int line = tty->index;
- struct mp_port *mtpt;
-
- retval = -ENODEV;
- if (line >= tty->driver->num)
- goto fail;
-
- state = uart_get(drv, line);
-
- if (IS_ERR(state)) {
- retval = PTR_ERR(state);
- goto fail;
- }
-
- mtpt = (struct mp_port *)state->port;
-
- tty->driver_data = state;
- tty->low_latency = (state->port->flags & UPF_LOW_LATENCY) ? 1 : 0;
- tty->alt_speed = 0;
- state->info->tty = tty;
-
- if (tty_hung_up_p(filp)) {
- retval = -EAGAIN;
- state->count--;
- MP_STATE_UNLOCK(state);
- goto fail;
- }
-
- if (state->count == 1)
- mp_change_pm(state, 0);
-
- retval = mp_startup(state, 0);
-
- if (retval == 0)
- retval = mp_block_til_ready(filp, state);
- MP_STATE_UNLOCK(state);
-
- if (retval == 0 && !(state->info->flags & UIF_NORMAL_ACTIVE)) {
- state->info->flags |= UIF_NORMAL_ACTIVE;
-
- mp_update_termios(state);
- }
-
- uart_clear_mctrl(state->port, TIOCM_RTS);
- try_module_get(THIS_MODULE);
-fail:
- return retval;
-}
-
-
-static const char *mp_type(struct sb_uart_port *port)
-{
- const char *str = NULL;
-
- if (port->ops->type)
- str = port->ops->type(port);
-
- if (!str)
- str = "unknown";
-
- return str;
-}
-
-static void mp_change_pm(struct sb_uart_state *state, int pm_state)
-{
- struct sb_uart_port *port = state->port;
- if (port->ops->pm)
- port->ops->pm(port, pm_state, state->pm_state);
- state->pm_state = pm_state;
-}
-
-static inline void mp_report_port(struct uart_driver *drv, struct sb_uart_port *port)
-{
- char address[64];
-
- switch (port->iotype) {
- case UPIO_PORT:
- snprintf(address, sizeof(address),"I/O 0x%x", port->iobase);
- break;
- case UPIO_HUB6:
- snprintf(address, sizeof(address),"I/O 0x%x offset 0x%x", port->iobase, port->hub6);
- break;
- case UPIO_MEM:
- snprintf(address, sizeof(address),"MMIO 0x%lx", port->mapbase);
- break;
- default:
- snprintf(address, sizeof(address),"*unknown*" );
- strlcpy(address, "*unknown*", sizeof(address));
- break;
- }
-
- printk( "%s%d at %s (irq = %d) is a %s\n",
- drv->dev_name, port->line, address, port->irq, mp_type(port));
-
-}
-
-static void mp_configure_port(struct uart_driver *drv, struct sb_uart_state *state, struct sb_uart_port *port)
-{
- unsigned int flags;
-
-
- if (!port->iobase && !port->mapbase && !port->membase)
- {
- DPRINTK("%s error \n",__FUNCTION__);
- return;
- }
- flags = UART_CONFIG_TYPE;
- if (port->flags & UPF_AUTO_IRQ)
- flags |= UART_CONFIG_IRQ;
- if (port->flags & UPF_BOOT_AUTOCONF) {
- port->type = PORT_UNKNOWN;
- port->ops->config_port(port, flags);
- }
-
- if (port->type != PORT_UNKNOWN) {
- unsigned long flags;
-
- mp_report_port(drv, port);
-
- spin_lock_irqsave(&port->lock, flags);
- port->ops->set_mctrl(port, 0);
- spin_unlock_irqrestore(&port->lock, flags);
-
- mp_change_pm(state, 3);
- }
-}
-
-static void mp_unconfigure_port(struct uart_driver *drv, struct sb_uart_state *state)
-{
- struct sb_uart_port *port = state->port;
- struct sb_uart_info *info = state->info;
-
- if (info && info->tty)
- tty_hangup(info->tty);
-
- MP_STATE_LOCK(state);
-
- state->info = NULL;
-
- if (port->type != PORT_UNKNOWN)
- port->ops->release_port(port);
-
- port->type = PORT_UNKNOWN;
-
- if (info) {
- tasklet_kill(&info->tlet);
- kfree(info);
- }
-
- MP_STATE_UNLOCK(state);
-}
-static struct tty_operations mp_ops = {
- .open = mp_open,
- .close = mp_close,
- .write = mp_write,
- .put_char = mp_put_char,
- .flush_chars = mp_put_chars,
- .write_room = mp_write_room,
- .chars_in_buffer= mp_chars_in_buffer,
- .flush_buffer = mp_flush_buffer,
- .ioctl = mp_ioctl,
- .throttle = mp_throttle,
- .unthrottle = mp_unthrottle,
- .send_xchar = mp_send_xchar,
- .set_termios = mp_set_termios,
- .stop = mp_stop,
- .start = mp_start,
- .hangup = mp_hangup,
- .break_ctl = mp_break_ctl,
- .wait_until_sent= mp_wait_until_sent,
-#ifdef CONFIG_PROC_FS
- .proc_fops = NULL,
-#endif
- .tiocmget = mp_tiocmget,
- .tiocmset = mp_tiocmset,
-};
-
-static int mp_register_driver(struct uart_driver *drv)
-{
- struct tty_driver *normal = NULL;
- int i, retval;
-
- drv->state = kmalloc(sizeof(struct sb_uart_state) * drv->nr, GFP_KERNEL);
- retval = -ENOMEM;
- if (!drv->state)
- {
- printk("SB PCI Error: Kernel memory allocation error!\n");
- goto out;
- }
- memset(drv->state, 0, sizeof(struct sb_uart_state) * drv->nr);
-
- normal = alloc_tty_driver(drv->nr);
- if (!normal)
- {
- printk("SB PCI Error: tty allocation error!\n");
- goto out;
- }
-
- drv->tty_driver = normal;
-
- normal->owner = drv->owner;
- normal->magic = TTY_DRIVER_MAGIC;
- normal->driver_name = drv->driver_name;
- normal->name = drv->dev_name;
- normal->major = drv->major;
- normal->minor_start = drv->minor;
-
- normal->num = MAX_MP_PORT ;
-
- normal->type = TTY_DRIVER_TYPE_SERIAL;
- normal->subtype = SERIAL_TYPE_NORMAL;
- normal->init_termios = tty_std_termios;
- normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
- normal->driver_state = drv;
-
- tty_set_operations(normal, &mp_ops);
-
-for (i = 0; i < drv->nr; i++) {
- struct sb_uart_state *state = drv->state + i;
-
- state->close_delay = 500;
- state->closing_wait = 30000;
-
- mutex_init(&state->mutex);
- }
-
- retval = tty_register_driver(normal);
-out:
- if (retval < 0) {
- printk("Register tty driver Fail!\n");
- put_tty_driver(normal);
- kfree(drv->state);
- }
-
- return retval;
-}
-
-void mp_unregister_driver(struct uart_driver *drv)
-{
- struct tty_driver *normal = NULL;
-
- normal = drv->tty_driver;
-
- if (!normal)
- {
- return;
- }
-
- tty_unregister_driver(normal);
- put_tty_driver(normal);
- drv->tty_driver = NULL;
-
-
- kfree(drv->state);
-
-}
-
-static int mp_add_one_port(struct uart_driver *drv, struct sb_uart_port *port)
-{
- struct sb_uart_state *state;
- int ret = 0;
-
-
- if (port->line >= drv->nr)
- return -EINVAL;
-
- state = drv->state + port->line;
-
- MP_MUTEX_LOCK(mp_mutex);
- if (state->port) {
- ret = -EINVAL;
- goto out;
- }
-
- state->port = port;
-
- spin_lock_init(&port->lock);
- port->cons = drv->cons;
- port->info = state->info;
-
- mp_configure_port(drv, state, port);
-
- tty_register_device(drv->tty_driver, port->line, port->dev);
-
-out:
- MP_MUTEX_UNLOCK(mp_mutex);
-
-
- return ret;
-}
-
-static int mp_remove_one_port(struct uart_driver *drv, struct sb_uart_port *port)
-{
- struct sb_uart_state *state = drv->state + port->line;
-
- if (state->port != port)
- printk(KERN_ALERT "Removing wrong port: %p != %p\n",
- state->port, port);
-
- MP_MUTEX_LOCK(mp_mutex);
-
- tty_unregister_device(drv->tty_driver, port->line);
-
- mp_unconfigure_port(drv, state);
- state->port = NULL;
- MP_MUTEX_UNLOCK(mp_mutex);
-
- return 0;
-}
-
-static void autoconfig(struct mp_port *mtpt, unsigned int probeflags)
-{
- unsigned char status1, scratch, scratch2, scratch3;
- unsigned char save_lcr, save_mcr;
- unsigned long flags;
-
- unsigned char u_type;
- unsigned char b_ret = 0;
-
- if (!mtpt->port.iobase && !mtpt->port.mapbase && !mtpt->port.membase)
- return;
-
- DEBUG_AUTOCONF("ttyMP%d: autoconf (0x%04x, 0x%p): ",
- mtpt->port.line, mtpt->port.iobase, mtpt->port.membase);
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
-
- if (!(mtpt->port.flags & UPF_BUGGY_UART)) {
- scratch = serial_inp(mtpt, UART_IER);
- serial_outp(mtpt, UART_IER, 0);
-#ifdef __i386__
- outb(0xff, 0x080);
-#endif
- scratch2 = serial_inp(mtpt, UART_IER) & 0x0f;
- serial_outp(mtpt, UART_IER, 0x0F);
-#ifdef __i386__
- outb(0, 0x080);
-#endif
- scratch3 = serial_inp(mtpt, UART_IER) & 0x0F;
- serial_outp(mtpt, UART_IER, scratch);
- if (scratch2 != 0 || scratch3 != 0x0F) {
- DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
- scratch2, scratch3);
- goto out;
- }
- }
-
- save_mcr = serial_in(mtpt, UART_MCR);
- save_lcr = serial_in(mtpt, UART_LCR);
-
- if (!(mtpt->port.flags & UPF_SKIP_TEST)) {
- serial_outp(mtpt, UART_MCR, UART_MCR_LOOP | 0x0A);
- status1 = serial_inp(mtpt, UART_MSR) & 0xF0;
- serial_outp(mtpt, UART_MCR, save_mcr);
- if (status1 != 0x90) {
- DEBUG_AUTOCONF("LOOP test failed (%02x) ",
- status1);
- goto out;
- }
- }
-
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR, 0);
- serial_outp(mtpt, UART_LCR, 0);
-
- serial_outp(mtpt, UART_FCR, UART_FCR_ENABLE_FIFO);
- scratch = serial_in(mtpt, UART_IIR) >> 6;
-
- DEBUG_AUTOCONF("iir=%d ", scratch);
- if(mtpt->device->nr_ports >= 8)
- b_ret = read_option_register(mtpt,(MP_OPTR_DIR0 + ((mtpt->port.line)/8)));
- else
- b_ret = read_option_register(mtpt,MP_OPTR_DIR0);
- u_type = (b_ret & 0xf0) >> 4;
- if(mtpt->port.type == PORT_UNKNOWN )
- {
- switch (u_type)
- {
- case DIR_UART_16C550:
- mtpt->port.type = PORT_16C55X;
- break;
- case DIR_UART_16C1050:
- mtpt->port.type = PORT_16C105X;
- break;
- case DIR_UART_16C1050A:
- if (mtpt->port.line < 2)
- {
- mtpt->port.type = PORT_16C105XA;
- }
- else
- {
- if (mtpt->device->device_id & 0x50)
- {
- mtpt->port.type = PORT_16C55X;
- }
- else
- {
- mtpt->port.type = PORT_16C105X;
- }
- }
- break;
- default:
- mtpt->port.type = PORT_UNKNOWN;
- break;
- }
- }
-
- if(mtpt->port.type == PORT_UNKNOWN )
- {
-printk("unknow2\n");
- switch (scratch) {
- case 0:
- case 1:
- mtpt->port.type = PORT_UNKNOWN;
- break;
- case 2:
- case 3:
- mtpt->port.type = PORT_16C55X;
- break;
- }
- }
-
- serial_outp(mtpt, UART_LCR, save_lcr);
-
- mtpt->port.fifosize = uart_config[mtpt->port.type].dfl_xmit_fifo_size;
- mtpt->capabilities = uart_config[mtpt->port.type].flags;
-
- if (mtpt->port.type == PORT_UNKNOWN)
- goto out;
- serial_outp(mtpt, UART_MCR, save_mcr);
- serial_outp(mtpt, UART_FCR, (UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT));
- serial_outp(mtpt, UART_FCR, 0);
- (void)serial_in(mtpt, UART_RX);
- serial_outp(mtpt, UART_IER, 0);
-
-out:
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
- DEBUG_AUTOCONF("type=%s\n", uart_config[mtpt->port.type].name);
-}
-
-static void autoconfig_irq(struct mp_port *mtpt)
-{
- unsigned char save_mcr, save_ier;
- unsigned long irqs;
- int irq;
-
- /* forget possible initially masked and pending IRQ */
- probe_irq_off(probe_irq_on());
- save_mcr = serial_inp(mtpt, UART_MCR);
- save_ier = serial_inp(mtpt, UART_IER);
- serial_outp(mtpt, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
-
- irqs = probe_irq_on();
- serial_outp(mtpt, UART_MCR, 0);
- serial_outp(mtpt, UART_MCR,
- UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
-
- serial_outp(mtpt, UART_IER, 0x0f); /* enable all intrs */
- (void)serial_inp(mtpt, UART_LSR);
- (void)serial_inp(mtpt, UART_RX);
- (void)serial_inp(mtpt, UART_IIR);
- (void)serial_inp(mtpt, UART_MSR);
- serial_outp(mtpt, UART_TX, 0xFF);
- irq = probe_irq_off(irqs);
-
- serial_outp(mtpt, UART_MCR, save_mcr);
- serial_outp(mtpt, UART_IER, save_ier);
-
- mtpt->port.irq = (irq > 0) ? irq : 0;
-}
-
-static void multi_stop_tx(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
-
- if (mtpt->ier & UART_IER_THRI) {
- mtpt->ier &= ~UART_IER_THRI;
- serial_out(mtpt, UART_IER, mtpt->ier);
- }
-
- tasklet_schedule(&port->info->tlet);
-}
-
-static void multi_start_tx(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
-
- if (!(mtpt->ier & UART_IER_THRI)) {
- mtpt->ier |= UART_IER_THRI;
- serial_out(mtpt, UART_IER, mtpt->ier);
- }
-}
-
-static void multi_stop_rx(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
-
- mtpt->ier &= ~UART_IER_RLSI;
- mtpt->port.read_status_mask &= ~UART_LSR_DR;
- serial_out(mtpt, UART_IER, mtpt->ier);
-}
-
-static void multi_enable_ms(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
-
- mtpt->ier |= UART_IER_MSI;
- serial_out(mtpt, UART_IER, mtpt->ier);
-}
-
-
-static _INLINE_ void receive_chars(struct mp_port *mtpt, int *status )
-{
- struct tty_struct *tty = mtpt->port.info->tty;
- unsigned char lsr = *status;
- int max_count = 256;
- unsigned char ch;
- char flag;
-
- //lsr &= mtpt->port.read_status_mask;
-
- do {
- if ((lsr & UART_LSR_PE) && (mtpt->port.mdmode & MDMODE_ENABLE))
- {
- ch = serial_inp(mtpt, UART_RX);
- }
- else if (lsr & UART_LSR_SPECIAL)
- {
- flag = 0;
- ch = serial_inp(mtpt, UART_RX);
-
- if (lsr & UART_LSR_BI)
- {
-
- mtpt->port.icount.brk++;
- flag = TTY_BREAK;
-
- if (sb_uart_handle_break(&mtpt->port))
- goto ignore_char;
- }
- if (lsr & UART_LSR_PE)
- {
- mtpt->port.icount.parity++;
- flag = TTY_PARITY;
- }
- if (lsr & UART_LSR_FE)
- {
- mtpt->port.icount.frame++;
- flag = TTY_FRAME;
- }
- if (lsr & UART_LSR_OE)
- {
- mtpt->port.icount.overrun++;
- flag = TTY_OVERRUN;
- }
- tty_insert_flip_char(tty, ch, flag);
- }
- else
- {
- ch = serial_inp(mtpt, UART_RX);
- tty_insert_flip_char(tty, ch, 0);
- }
-ignore_char:
- lsr = serial_inp(mtpt, UART_LSR);
- } while ((lsr & UART_LSR_DR) && (max_count-- > 0));
-
- tty_flip_buffer_push(tty);
-}
-
-
-
-
-static _INLINE_ void transmit_chars(struct mp_port *mtpt)
-{
- struct circ_buf *xmit = &mtpt->port.info->xmit;
- int count;
-
- if (mtpt->port.x_char) {
- serial_outp(mtpt, UART_TX, mtpt->port.x_char);
- mtpt->port.icount.tx++;
- mtpt->port.x_char = 0;
- return;
- }
- if (uart_circ_empty(xmit) || uart_tx_stopped(&mtpt->port)) {
- multi_stop_tx(&mtpt->port);
- return;
- }
-
- count = uart_circ_chars_pending(xmit);
-
- if(count > mtpt->port.fifosize)
- {
- count = mtpt->port.fifosize;
- }
-
- printk("[%d] mdmode: %x\n", mtpt->port.line, mtpt->port.mdmode);
- do {
-#if 0
- /* check multi-drop mode */
- if ((mtpt->port.mdmode & (MDMODE_ENABLE | MDMODE_ADDR)) == (MDMODE_ENABLE | MDMODE_ADDR))
- {
- printk("send address\n");
- /* send multi-drop address */
- serial_out(mtpt, UART_SCR, xmit->buf[xmit->tail]);
- }
- else
-#endif
- {
- serial_out(mtpt, UART_TX, xmit->buf[xmit->tail]);
- }
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- mtpt->port.icount.tx++;
- } while (--count > 0);
-}
-
-
-
-static _INLINE_ void check_modem_status(struct mp_port *mtpt)
-{
- int status;
-
- status = serial_in(mtpt, UART_MSR);
-
- if ((status & UART_MSR_ANY_DELTA) == 0)
- return;
-
- if (status & UART_MSR_TERI)
- mtpt->port.icount.rng++;
- if (status & UART_MSR_DDSR)
- mtpt->port.icount.dsr++;
- if (status & UART_MSR_DDCD)
- sb_uart_handle_dcd_change(&mtpt->port, status & UART_MSR_DCD);
- if (status & UART_MSR_DCTS)
- sb_uart_handle_cts_change(&mtpt->port, status & UART_MSR_CTS);
-
- wake_up_interruptible(&mtpt->port.info->delta_msr_wait);
-}
-
-static inline void multi_handle_port(struct mp_port *mtpt)
-{
- unsigned int status = serial_inp(mtpt, UART_LSR);
-
- //printk("lsr: %x\n", status);
-
- if ((status & UART_LSR_DR) || (status & UART_LSR_SPECIAL))
- receive_chars(mtpt, &status);
- check_modem_status(mtpt);
- if (status & UART_LSR_THRE)
- {
- if ((mtpt->port.type == PORT_16C105X)
- || (mtpt->port.type == PORT_16C105XA))
- transmit_chars(mtpt);
- else
- {
- if (mtpt->interface >= RS485NE)
- uart_set_mctrl(&mtpt->port, TIOCM_RTS);
-
- transmit_chars(mtpt);
-
-
- if (mtpt->interface >= RS485NE)
- {
- while((status=serial_in(mtpt,UART_LSR) &0x60)!=0x60);
- uart_clear_mctrl(&mtpt->port, TIOCM_RTS);
- }
- }
- }
-}
-
-
-
-static irqreturn_t multi_interrupt(int irq, void *dev_id)
-{
- struct irq_info *iinfo = dev_id;
- struct list_head *lhead, *end = NULL;
- int pass_counter = 0;
-
-
- spin_lock(&iinfo->lock);
-
- lhead = iinfo->head;
- do {
- struct mp_port *mtpt;
- unsigned int iir;
-
- mtpt = list_entry(lhead, struct mp_port, list);
-
- iir = serial_in(mtpt, UART_IIR);
- printk("interrupt! port %d, iir 0x%x\n", mtpt->port.line, iir); //wlee
- if (!(iir & UART_IIR_NO_INT))
- {
- printk("interrupt handle\n");
- spin_lock(&mtpt->port.lock);
- multi_handle_port(mtpt);
- spin_unlock(&mtpt->port.lock);
-
- end = NULL;
- } else if (end == NULL)
- end = lhead;
-
- lhead = lhead->next;
- if (lhead == iinfo->head && pass_counter++ > PASS_LIMIT)
- {
- printk(KERN_ERR "multi: too much work for "
- "irq%d\n", irq);
- printk( "multi: too much work for "
- "irq%d\n", irq);
- break;
- }
- } while (lhead != end);
-
- spin_unlock(&iinfo->lock);
-
-
- return IRQ_HANDLED;
-}
-
-static void serial_do_unlink(struct irq_info *i, struct mp_port *mtpt)
-{
- spin_lock_irq(&i->lock);
-
- if (!list_empty(i->head)) {
- if (i->head == &mtpt->list)
- i->head = i->head->next;
- list_del(&mtpt->list);
- } else {
- i->head = NULL;
- }
-
- spin_unlock_irq(&i->lock);
-}
-
-static int serial_link_irq_chain(struct mp_port *mtpt)
-{
- struct irq_info *i = irq_lists + mtpt->port.irq;
- int ret, irq_flags = mtpt->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
- spin_lock_irq(&i->lock);
-
- if (i->head) {
- list_add(&mtpt->list, i->head);
- spin_unlock_irq(&i->lock);
-
- ret = 0;
- } else {
- INIT_LIST_HEAD(&mtpt->list);
- i->head = &mtpt->list;
- spin_unlock_irq(&i->lock);
-
- ret = request_irq(mtpt->port.irq, multi_interrupt,
- irq_flags, "serial", i);
- if (ret < 0)
- serial_do_unlink(i, mtpt);
- }
-
- return ret;
-}
-
-
-
-
-static void serial_unlink_irq_chain(struct mp_port *mtpt)
-{
- struct irq_info *i = irq_lists + mtpt->port.irq;
-
- if (list_empty(i->head))
- {
- free_irq(mtpt->port.irq, i);
- }
- serial_do_unlink(i, mtpt);
-}
-
-static void multi_timeout(unsigned long data)
-{
- struct mp_port *mtpt = (struct mp_port *)data;
-
-
- spin_lock(&mtpt->port.lock);
- multi_handle_port(mtpt);
- spin_unlock(&mtpt->port.lock);
-
- mod_timer(&mtpt->timer, jiffies+1 );
-}
-
-static unsigned int multi_tx_empty(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned long flags;
- unsigned int ret;
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
- ret = serial_in(mtpt, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
-
- return ret;
-}
-
-
-static unsigned int multi_get_mctrl(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned char status;
- unsigned int ret;
-
- status = serial_in(mtpt, UART_MSR);
-
- ret = 0;
- if (status & UART_MSR_DCD)
- ret |= TIOCM_CAR;
- if (status & UART_MSR_RI)
- ret |= TIOCM_RNG;
- if (status & UART_MSR_DSR)
- ret |= TIOCM_DSR;
- if (status & UART_MSR_CTS)
- ret |= TIOCM_CTS;
- return ret;
-}
-
-static void multi_set_mctrl(struct sb_uart_port *port, unsigned int mctrl)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned char mcr = 0;
-
- mctrl &= 0xff;
-
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (mctrl & TIOCM_DTR)
- mcr |= UART_MCR_DTR;
- if (mctrl & TIOCM_OUT1)
- mcr |= UART_MCR_OUT1;
- if (mctrl & TIOCM_OUT2)
- mcr |= UART_MCR_OUT2;
- if (mctrl & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
-
-
- serial_out(mtpt, UART_MCR, mcr);
-}
-
-
-static void multi_break_ctl(struct sb_uart_port *port, int break_state)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned long flags;
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
- if (break_state == -1)
- mtpt->lcr |= UART_LCR_SBC;
- else
- mtpt->lcr &= ~UART_LCR_SBC;
- serial_out(mtpt, UART_LCR, mtpt->lcr);
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
-}
-
-
-
-static int multi_startup(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned long flags;
- int retval;
-
- mtpt->capabilities = uart_config[mtpt->port.type].flags;
- mtpt->mcr = 0;
-
- if (mtpt->capabilities & UART_CLEAR_FIFO) {
- serial_outp(mtpt, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_outp(mtpt, UART_FCR, UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
- serial_outp(mtpt, UART_FCR, 0);
- }
-
- (void) serial_inp(mtpt, UART_LSR);
- (void) serial_inp(mtpt, UART_RX);
- (void) serial_inp(mtpt, UART_IIR);
- (void) serial_inp(mtpt, UART_MSR);
- //test-wlee 9-bit disable
- serial_outp(mtpt, UART_MSR, 0);
-
-
- if (!(mtpt->port.flags & UPF_BUGGY_UART) &&
- (serial_inp(mtpt, UART_LSR) == 0xff)) {
- printk("ttyS%d: LSR safety check engaged!\n", mtpt->port.line);
- //return -ENODEV;
- }
-
- if ((!is_real_interrupt(mtpt->port.irq)) || (mtpt->poll_type==TYPE_POLL)) {
- unsigned int timeout = mtpt->port.timeout;
-
- timeout = timeout > 6 ? (timeout / 2 - 2) : 1;
-
- mtpt->timer.data = (unsigned long)mtpt;
- mod_timer(&mtpt->timer, jiffies + timeout);
- }
- else
- {
- retval = serial_link_irq_chain(mtpt);
- if (retval)
- return retval;
- }
-
- serial_outp(mtpt, UART_LCR, UART_LCR_WLEN8);
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
- if ((is_real_interrupt(mtpt->port.irq))||(mtpt->poll_type==TYPE_INTERRUPT))
- mtpt->port.mctrl |= TIOCM_OUT2;
-
- multi_set_mctrl(&mtpt->port, mtpt->port.mctrl);
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
-
-
- mtpt->ier = UART_IER_RLSI | UART_IER_RDI;
- serial_outp(mtpt, UART_IER, mtpt->ier);
-
- (void) serial_inp(mtpt, UART_LSR);
- (void) serial_inp(mtpt, UART_RX);
- (void) serial_inp(mtpt, UART_IIR);
- (void) serial_inp(mtpt, UART_MSR);
-
- return 0;
-}
-
-
-
-static void multi_shutdown(struct sb_uart_port *port)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned long flags;
-
-
- mtpt->ier = 0;
- serial_outp(mtpt, UART_IER, 0);
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
- mtpt->port.mctrl &= ~TIOCM_OUT2;
-
- multi_set_mctrl(&mtpt->port, mtpt->port.mctrl);
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
-
- serial_out(mtpt, UART_LCR, serial_inp(mtpt, UART_LCR) & ~UART_LCR_SBC);
- serial_outp(mtpt, UART_FCR, UART_FCR_ENABLE_FIFO |
- UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT);
- serial_outp(mtpt, UART_FCR, 0);
-
-
- (void) serial_in(mtpt, UART_RX);
-
- if ((!is_real_interrupt(mtpt->port.irq))||(mtpt->poll_type==TYPE_POLL))
- {
- del_timer_sync(&mtpt->timer);
- }
- else
- {
- serial_unlink_irq_chain(mtpt);
- }
-}
-
-
-
-static unsigned int multi_get_divisor(struct sb_uart_port *port, unsigned int baud)
-{
- unsigned int quot;
-
- if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
- baud == (port->uartclk/4))
- quot = 0x8001;
- else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
- baud == (port->uartclk/8))
- quot = 0x8002;
- else
- quot = sb_uart_get_divisor(port, baud);
-
- return quot;
-}
-
-
-
-
-static void multi_set_termios(struct sb_uart_port *port, struct MP_TERMIOS *termios, struct MP_TERMIOS *old)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- unsigned char cval, fcr = 0;
- unsigned long flags;
- unsigned int baud, quot;
-
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- cval = 0x00;
- break;
- case CS6:
- cval = 0x01;
- break;
- case CS7:
- cval = 0x02;
- break;
- default:
- case CS8:
- cval = 0x03;
- break;
- }
-
- if (termios->c_cflag & CSTOPB)
- cval |= 0x04;
- if (termios->c_cflag & PARENB)
- cval |= UART_LCR_PARITY;
- if (!(termios->c_cflag & PARODD))
- cval |= UART_LCR_EPAR;
-
-#ifdef CMSPAR
- if (termios->c_cflag & CMSPAR)
- cval |= UART_LCR_SPAR;
-#endif
-
- baud = sb_uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
- quot = multi_get_divisor(port, baud);
-
- if (mtpt->capabilities & UART_USE_FIFO) {
- //if (baud < 2400)
- // fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
- //else
- // fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_8;
-
- // fcr = UART_FCR_ENABLE_FIFO | 0x90;
- fcr = fcr_arr[mtpt->port.line];
- }
-
- spin_lock_irqsave(&mtpt->port.lock, flags);
-
- sb_uart_update_timeout(port, termios->c_cflag, baud);
-
- mtpt->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
- if (termios->c_iflag & INPCK)
- mtpt->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (termios->c_iflag & (BRKINT | PARMRK))
- mtpt->port.read_status_mask |= UART_LSR_BI;
-
- mtpt->port.ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- mtpt->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
- if (termios->c_iflag & IGNBRK) {
- mtpt->port.ignore_status_mask |= UART_LSR_BI;
- if (termios->c_iflag & IGNPAR)
- mtpt->port.ignore_status_mask |= UART_LSR_OE;
- }
-
- if ((termios->c_cflag & CREAD) == 0)
- mtpt->port.ignore_status_mask |= UART_LSR_DR;
-
- mtpt->ier &= ~UART_IER_MSI;
- if (UART_ENABLE_MS(&mtpt->port, termios->c_cflag))
- mtpt->ier |= UART_IER_MSI;
-
- serial_out(mtpt, UART_IER, mtpt->ier);
-
- if (mtpt->capabilities & UART_STARTECH) {
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR,
- termios->c_cflag & CRTSCTS ? UART_EFR_CTS :0);
- }
-
- serial_outp(mtpt, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
-
- serial_outp(mtpt, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_outp(mtpt, UART_DLM, quot >> 8); /* MS of divisor */
-
- serial_outp(mtpt, UART_LCR, cval); /* reset DLAB */
- mtpt->lcr = cval; /* Save LCR */
-
- if (fcr & UART_FCR_ENABLE_FIFO) {
- /* emulated UARTs (Lucent Venus 167x) need two steps */
- serial_outp(mtpt, UART_FCR, UART_FCR_ENABLE_FIFO);
- }
-
- serial_outp(mtpt, UART_FCR, fcr); /* set fcr */
-
-
- if ((mtpt->port.type == PORT_16C105X)
- || (mtpt->port.type == PORT_16C105XA))
- {
- if(deep[mtpt->port.line]!=0)
- set_deep_fifo(port, ENABLE);
-
- if (mtpt->interface != RS232)
- set_auto_rts(port,mtpt->interface);
-
- }
- else
- {
- if (mtpt->interface >= RS485NE)
- {
- uart_clear_mctrl(&mtpt->port, TIOCM_RTS);
- }
- }
-
- if(mtpt->device->device_id == PCI_DEVICE_ID_MP4M)
- {
- SendATCommand(mtpt);
- printk("SendATCommand\n");
- }
- multi_set_mctrl(&mtpt->port, mtpt->port.mctrl);
- spin_unlock_irqrestore(&mtpt->port.lock, flags);
-}
-
-static void multi_pm(struct sb_uart_port *port, unsigned int state, unsigned int oldstate)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- if (state) {
- if (mtpt->capabilities & UART_STARTECH) {
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR, UART_EFR_ECB);
- serial_outp(mtpt, UART_LCR, 0);
- serial_outp(mtpt, UART_IER, UART_IERX_SLEEP);
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR, 0);
- serial_outp(mtpt, UART_LCR, 0);
- }
-
- if (mtpt->pm)
- mtpt->pm(port, state, oldstate);
- }
- else
- {
- if (mtpt->capabilities & UART_STARTECH) {
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR, UART_EFR_ECB);
- serial_outp(mtpt, UART_LCR, 0);
- serial_outp(mtpt, UART_IER, 0);
- serial_outp(mtpt, UART_LCR, 0xBF);
- serial_outp(mtpt, UART_EFR, 0);
- serial_outp(mtpt, UART_LCR, 0);
- }
-
- if (mtpt->pm)
- mtpt->pm(port, state, oldstate);
- }
-}
-
-static void multi_release_std_resource(struct mp_port *mtpt)
-{
- unsigned int size = 8 << mtpt->port.regshift;
-
- switch (mtpt->port.iotype) {
- case UPIO_MEM:
- if (!mtpt->port.mapbase)
- break;
-
- if (mtpt->port.flags & UPF_IOREMAP) {
- iounmap(mtpt->port.membase);
- mtpt->port.membase = NULL;
- }
-
- release_mem_region(mtpt->port.mapbase, size);
- break;
-
- case UPIO_HUB6:
- case UPIO_PORT:
- release_region(mtpt->port.iobase,size);
- break;
- }
-}
-
-static void multi_release_port(struct sb_uart_port *port)
-{
-}
-
-static int multi_request_port(struct sb_uart_port *port)
-{
- return 0;
-}
-
-static void multi_config_port(struct sb_uart_port *port, int flags)
-{
- struct mp_port *mtpt = (struct mp_port *)port;
- int probeflags = PROBE_ANY;
-
- if (flags & UART_CONFIG_TYPE)
- autoconfig(mtpt, probeflags);
- if (mtpt->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
- autoconfig_irq(mtpt);
-
- if (mtpt->port.type == PORT_UNKNOWN)
- multi_release_std_resource(mtpt);
-}
-
-static int multi_verify_port(struct sb_uart_port *port, struct serial_struct *ser)
-{
- if (ser->irq >= NR_IRQS || ser->irq < 0 ||
- ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
- ser->type == PORT_STARTECH)
- return -EINVAL;
- return 0;
-}
-
-static const char *multi_type(struct sb_uart_port *port)
-{
- int type = port->type;
-
- if (type >= ARRAY_SIZE(uart_config))
- type = 0;
- return uart_config[type].name;
-}
-
-static struct sb_uart_ops multi_pops = {
- .tx_empty = multi_tx_empty,
- .set_mctrl = multi_set_mctrl,
- .get_mctrl = multi_get_mctrl,
- .stop_tx = multi_stop_tx,
- .start_tx = multi_start_tx,
- .stop_rx = multi_stop_rx,
- .enable_ms = multi_enable_ms,
- .break_ctl = multi_break_ctl,
- .startup = multi_startup,
- .shutdown = multi_shutdown,
- .set_termios = multi_set_termios,
- .pm = multi_pm,
- .type = multi_type,
- .release_port = multi_release_port,
- .request_port = multi_request_port,
- .config_port = multi_config_port,
- .verify_port = multi_verify_port,
-};
-
-static struct uart_driver multi_reg = {
- .owner = THIS_MODULE,
- .driver_name = "goldel_tulip",
- .dev_name = "ttyMP",
- .major = SB_TTY_MP_MAJOR,
- .minor = 0,
- .nr = MAX_MP_PORT,
- .cons = NULL,
-};
-
-static void __init multi_init_ports(void)
-{
- struct mp_port *mtpt;
- static int first = 1;
- int i,j,k;
- unsigned char osc;
- unsigned char b_ret = 0;
- static struct mp_device_t *sbdev;
-
- if (!first)
- return;
- first = 0;
-
- mtpt = multi_ports;
-
- for (k=0;k<NR_BOARD;k++)
- {
- sbdev = &mp_devs[k];
-
- for (i = 0; i < sbdev->nr_ports; i++, mtpt++)
- {
- mtpt->device = sbdev;
- mtpt->port.iobase = sbdev->uart_access_addr + 8*i;
- mtpt->port.irq = sbdev->irq;
- if ( ((sbdev->device_id == PCI_DEVICE_ID_MP4)&&(sbdev->revision==0x91)))
- mtpt->interface_config_addr = sbdev->option_reg_addr + 0x08 + i;
- else if (sbdev->revision == 0xc0)
- mtpt->interface_config_addr = sbdev->option_reg_addr + 0x08 + (i & 0x1);
- else
- mtpt->interface_config_addr = sbdev->option_reg_addr + 0x08 + i/8;
-
- mtpt->option_base_addr = sbdev->option_reg_addr;
-
- mtpt->poll_type = sbdev->poll_type;
-
- mtpt->port.uartclk = BASE_BAUD * 16;
-
- /* get input clock information */
- osc = inb(sbdev->option_reg_addr + MP_OPTR_DIR0 + i/8) & 0x0F;
- if (osc==0x0f)
- osc = 0;
- for(j=0;j<osc;j++)
- mtpt->port.uartclk *= 2;
- mtpt->port.flags |= STD_COM_FLAGS | UPF_SHARE_IRQ ;
- mtpt->port.iotype = UPIO_PORT;
- mtpt->port.ops = &multi_pops;
-
- if (sbdev->revision == 0xc0)
- {
- /* for SB16C1053APCI */
- b_ret = sb1053a_get_interface(mtpt, i);
- }
- else
- {
- b_ret = read_option_register(mtpt,(MP_OPTR_IIR0 + i/8));
- printk("IIR_RET = %x\n",b_ret);
- }
-
- /* default to RS232 */
- mtpt->interface = RS232;
- if (IIR_RS422 == (b_ret & IIR_TYPE_MASK))
- mtpt->interface = RS422PTP;
- if (IIR_RS485 == (b_ret & IIR_TYPE_MASK))
- mtpt->interface = RS485NE;
- }
- }
-}
-
-static void __init multi_register_ports(struct uart_driver *drv)
-{
- int i;
-
- multi_init_ports();
-
- for (i = 0; i < NR_PORTS; i++) {
- struct mp_port *mtpt = &multi_ports[i];
-
- mtpt->port.line = i;
- mtpt->port.ops = &multi_pops;
- init_timer(&mtpt->timer);
- mtpt->timer.function = multi_timeout;
- mp_add_one_port(drv, &mtpt->port);
- }
-}
-
-/**
- * pci_remap_base - remap BAR value of pci device
- *
- * PARAMETERS
- * pcidev - pci_dev structure address
- * offset - BAR offset PCI_BASE_ADDRESS_0 ~ PCI_BASE_ADDRESS_4
- * address - address to be changed BAR value
- * size - size of address space
- *
- * RETURNS
- * If this function performs successful, it returns 0. Otherwise, It returns -1.
- */
-static int pci_remap_base(struct pci_dev *pcidev, unsigned int offset,
- unsigned int address, unsigned int size)
-{
-#if 0
- struct resource *root;
- unsigned index = (offset - 0x10) >> 2;
-#endif
-
- pci_write_config_dword(pcidev, offset, address);
-#if 0
- root = pcidev->resource[index].parent;
- release_resource(&pcidev->resource[index]);
- address &= ~0x1;
- pcidev->resource[index].start = address;
- pcidev->resource[index].end = address + size - 1;
-
- if (request_resource(root, &pcidev->resource[index]) != NULL)
- {
- printk(KERN_ERR "pci remap conflict!! 0x%x\n", address);
- return (-1);
- }
-#endif
-
- return (0);
-}
-
-static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
-{
- static struct mp_device_t *sbdev = mp_devs;
- unsigned long addr = 0;
- int j;
- struct resource *ret = NULL;
-
- sbdev->device_id = brd.device_id;
- pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &(sbdev->revision));
- sbdev->name = brd.name;
- sbdev->uart_access_addr = pcidev->resource[0].start & PCI_BASE_ADDRESS_IO_MASK;
-
- /* check revision. The SB16C1053APCI's option i/o address is BAR4 */
- if (sbdev->revision == 0xc0)
- {
- /* SB16C1053APCI */
- sbdev->option_reg_addr = pcidev->resource[4].start & PCI_BASE_ADDRESS_IO_MASK;
- }
- else
- {
- sbdev->option_reg_addr = pcidev->resource[1].start & PCI_BASE_ADDRESS_IO_MASK;
- }
-#if 1
- if (sbdev->revision == 0xc0)
- {
- outb(0x00, sbdev->option_reg_addr + MP_OPTR_GPOCR);
- inb(sbdev->option_reg_addr + MP_OPTR_GPOCR);
- outb(0x83, sbdev->option_reg_addr + MP_OPTR_GPOCR);
- }
-#endif
-
- sbdev->irq = pcidev->irq;
-
- if ((brd.device_id & 0x0800) || !(brd.device_id &0xff00))
- {
- sbdev->poll_type = TYPE_INTERRUPT;
- }
- else
- {
- sbdev->poll_type = TYPE_POLL;
- }
-
- /* codes which is specific to each board*/
- switch(brd.device_id){
- case PCI_DEVICE_ID_MP1 :
- case PCIE_DEVICE_ID_MP1 :
- case PCIE_DEVICE_ID_MP1E :
- case PCIE_DEVICE_ID_GT_MP1 :
- sbdev->nr_ports = 1;
- break;
- case PCI_DEVICE_ID_MP2 :
- case PCIE_DEVICE_ID_MP2 :
- case PCIE_DEVICE_ID_GT_MP2 :
- case PCIE_DEVICE_ID_MP2B :
- case PCIE_DEVICE_ID_MP2E :
- sbdev->nr_ports = 2;
-
- /* serial base address remap */
- if (sbdev->revision == 0xc0)
- {
- int prev_port_addr = 0;
-
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_0, &prev_port_addr);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_1, prev_port_addr + 8, 8);
- }
- break;
- case PCI_DEVICE_ID_MP4 :
- case PCI_DEVICE_ID_MP4A :
- case PCIE_DEVICE_ID_MP4 :
- case PCI_DEVICE_ID_GT_MP4 :
- case PCI_DEVICE_ID_GT_MP4A :
- case PCIE_DEVICE_ID_GT_MP4 :
- case PCI_DEVICE_ID_MP4M :
- case PCIE_DEVICE_ID_MP4B :
- sbdev->nr_ports = 4;
-
- if(sbdev->revision == 0x91){
- sbdev->reserved_addr[0] = pcidev->resource[0].start & PCI_BASE_ADDRESS_IO_MASK;
- outb(0x03 , sbdev->reserved_addr[0] + 0x01);
- outb(0x03 , sbdev->reserved_addr[0] + 0x02);
- outb(0x01 , sbdev->reserved_addr[0] + 0x20);
- outb(0x00 , sbdev->reserved_addr[0] + 0x21);
- request_region(sbdev->reserved_addr[0], 32, sbdev->name);
- sbdev->uart_access_addr = pcidev->resource[1].start & PCI_BASE_ADDRESS_IO_MASK;
- sbdev->option_reg_addr = pcidev->resource[2].start & PCI_BASE_ADDRESS_IO_MASK;
- }
-
- /* SB16C1053APCI */
- if (sbdev->revision == 0xc0)
- {
- int prev_port_addr = 0;
-
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_0, &prev_port_addr);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_1, prev_port_addr + 8, 8);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_2, prev_port_addr + 16, 8);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_3, prev_port_addr + 24, 8);
- }
- break;
- case PCI_DEVICE_ID_MP6 :
- case PCI_DEVICE_ID_MP6A :
- case PCI_DEVICE_ID_GT_MP6 :
- case PCI_DEVICE_ID_GT_MP6A :
- sbdev->nr_ports = 6;
-
- /* SB16C1053APCI */
- if (sbdev->revision == 0xc0)
- {
- int prev_port_addr = 0;
-
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_0, &prev_port_addr);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_1, prev_port_addr + 8, 8);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_2, prev_port_addr + 16, 16);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_3, prev_port_addr + 32, 16);
- }
- break;
- case PCI_DEVICE_ID_MP8 :
- case PCIE_DEVICE_ID_MP8 :
- case PCI_DEVICE_ID_GT_MP8 :
- case PCIE_DEVICE_ID_GT_MP8 :
- case PCIE_DEVICE_ID_MP8B :
- sbdev->nr_ports = 8;
- break;
- case PCI_DEVICE_ID_MP32 :
- case PCIE_DEVICE_ID_MP32 :
- case PCI_DEVICE_ID_GT_MP32 :
- case PCIE_DEVICE_ID_GT_MP32 :
- {
- int portnum_hex=0;
- portnum_hex = inb(sbdev->option_reg_addr);
- sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16);
- }
- break;
-#ifdef CONFIG_PARPORT_PC
- case PCI_DEVICE_ID_MP2S1P :
- sbdev->nr_ports = 2;
-
- /* SB16C1053APCI */
- if (sbdev->revision == 0xc0)
- {
- int prev_port_addr = 0;
-
- pci_read_config_dword(pcidev, PCI_BASE_ADDRESS_0, &prev_port_addr);
- pci_remap_base(pcidev, PCI_BASE_ADDRESS_1, prev_port_addr + 8, 8);
- }
-
- /* add PC compatible parallel port */
- parport_pc_probe_port(pcidev->resource[2].start, pcidev->resource[3].start, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &pcidev->dev, 0);
- break;
- case PCI_DEVICE_ID_MP1P :
- /* add PC compatible parallel port */
- parport_pc_probe_port(pcidev->resource[2].start, pcidev->resource[3].start, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &pcidev->dev, 0);
- break;
-#endif
- }
-
- ret = request_region(sbdev->uart_access_addr, (8*sbdev->nr_ports), sbdev->name);
-
- if (sbdev->revision == 0xc0)
- {
- ret = request_region(sbdev->option_reg_addr, 0x40, sbdev->name);
- }
- else
- {
- ret = request_region(sbdev->option_reg_addr, 0x20, sbdev->name);
- }
-
-
- NR_BOARD++;
- NR_PORTS += sbdev->nr_ports;
-
- /* Enable PCI interrupt */
- addr = sbdev->option_reg_addr + MP_OPTR_IMR0;
- for(j=0; j < (sbdev->nr_ports/8)+1; j++)
- {
- if (sbdev->poll_type == TYPE_INTERRUPT)
- {
- outb(0xff,addr +j);
- }
- }
- sbdev++;
-
- return 0;
-}
-
-static int __init multi_init(void)
-{
- int ret, i;
- struct pci_dev *dev = NULL;
-
- if(fcr_count==0)
- {
- for(i=0;i<256;i++)
- {
- fcr_arr[i] = 0x01;
-
- }
- }
- if(deep_count==0)
- {
- for(i=0;i<256;i++)
- {
- deep[i] = 1;
-
- }
- }
- if(rtr_count==0)
- {
- for(i=0;i<256;i++)
- {
- rtr[i] = 0x10;
- }
- }
- if(ttr_count==0)
- {
- for(i=0;i<256;i++)
- {
- ttr[i] = 0x38;
- }
- }
-
-
-printk("MULTI INIT\n");
- for( i=0; i< mp_nrpcibrds; i++)
- {
-
- while( (dev = pci_get_device(mp_pciboards[i].vendor_id, mp_pciboards[i].device_id, dev) ) )
-
- {
-printk("FOUND~~~\n");
-// Cent OS bug fix
-// if (mp_pciboards[i].device_id & 0x0800)
- {
- int status;
- pci_disable_device(dev);
- status = pci_enable_device(dev);
-
- if (status != 0)
- {
- printk("Multiport Board Enable Fail !\n\n");
- status = -ENXIO;
- return status;
- }
- }
-
- init_mp_dev(dev, mp_pciboards[i]);
- }
- }
-
- for (i = 0; i < NR_IRQS; i++)
- spin_lock_init(&irq_lists[i].lock);
-
- ret = mp_register_driver(&multi_reg);
-
- if (ret >= 0)
- multi_register_ports(&multi_reg);
-
- return ret;
-}
-
-static void __exit multi_exit(void)
-{
- int i;
-
- for (i = 0; i < NR_PORTS; i++)
- mp_remove_one_port(&multi_reg, &multi_ports[i].port);
-
- mp_unregister_driver(&multi_reg);
-}
-
-module_init(multi_init);
-module_exit(multi_exit);
-
-MODULE_DESCRIPTION("SystemBase Multiport PCI/PCIe CORE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sb105x/sb_pci_mp.h b/drivers/staging/sb105x/sb_pci_mp.h
deleted file mode 100644
index 80ae4ab04603..000000000000
--- a/drivers/staging/sb105x/sb_pci_mp.h
+++ /dev/null
@@ -1,291 +0,0 @@
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/wait.h>
-#include <linux/tty_driver.h>
-#include <linux/pci.h>
-#include <linux/circ_buf.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/serial.h>
-#include <linux/interrupt.h>
-
-
-#include <linux/parport.h>
-#include <linux/ctype.h>
-#include <linux/poll.h>
-
-
-#define MP_TERMIOS ktermios
-
-#include "sb_mp_register.h"
-#include "sb_ser_core.h"
-
-#define DRIVER_VERSION "1.1"
-#define DRIVER_DATE "2012/01/05"
-#define DRIVER_AUTHOR "SYSTEMBASE<tech@sysbas.com>"
-#define DRIVER_DESC "SystemBase PCI/PCIe Multiport Core"
-
-#define SB_TTY_MP_MAJOR 54
-#define PCI_VENDOR_ID_MULTIPORT 0x14A1
-
-#define PCI_DEVICE_ID_MP1 0x4d01
-#define PCI_DEVICE_ID_MP2 0x4d02
-#define PCI_DEVICE_ID_MP4 0x4d04
-#define PCI_DEVICE_ID_MP4A 0x4d54
-#define PCI_DEVICE_ID_MP6 0x4d06
-#define PCI_DEVICE_ID_MP6A 0x4d56
-#define PCI_DEVICE_ID_MP8 0x4d08
-#define PCI_DEVICE_ID_MP32 0x4d32
-/* Parallel port */
-#define PCI_DEVICE_ID_MP1P 0x4301
-#define PCI_DEVICE_ID_MP2S1P 0x4303
-
-#define PCIE_DEVICE_ID_MP1 0x4501
-#define PCIE_DEVICE_ID_MP2 0x4502
-#define PCIE_DEVICE_ID_MP4 0x4504
-#define PCIE_DEVICE_ID_MP8 0x4508
-#define PCIE_DEVICE_ID_MP32 0x4532
-
-#define PCIE_DEVICE_ID_MP1E 0x4e01
-#define PCIE_DEVICE_ID_MP2E 0x4e02
-#define PCIE_DEVICE_ID_MP2B 0x4b02
-#define PCIE_DEVICE_ID_MP4B 0x4b04
-#define PCIE_DEVICE_ID_MP8B 0x4b08
-
-#define PCI_DEVICE_ID_GT_MP4 0x0004
-#define PCI_DEVICE_ID_GT_MP4A 0x0054
-#define PCI_DEVICE_ID_GT_MP6 0x0006
-#define PCI_DEVICE_ID_GT_MP6A 0x0056
-#define PCI_DEVICE_ID_GT_MP8 0x0008
-#define PCI_DEVICE_ID_GT_MP32 0x0032
-
-#define PCIE_DEVICE_ID_GT_MP1 0x1501
-#define PCIE_DEVICE_ID_GT_MP2 0x1502
-#define PCIE_DEVICE_ID_GT_MP4 0x1504
-#define PCIE_DEVICE_ID_GT_MP8 0x1508
-#define PCIE_DEVICE_ID_GT_MP32 0x1532
-
-#define PCI_DEVICE_ID_MP4M 0x4604 //modem
-
-#define MAX_MP_DEV 8
-#define BD_MAX_PORT 32 /* Max serial port in one board */
-#define MAX_MP_PORT 256 /* Max serial port in one PC */
-
-#define PORT_16C105XA 3
-#define PORT_16C105X 2
-#define PORT_16C55X 1
-
-#define ENABLE 1
-#define DISABLE 0
-
-/* ioctls */
-#define TIOCGNUMOFPORT 0x545F
-#define TIOCSMULTIECHO 0x5440
-#define TIOCSPTPNOECHO 0x5441
-
-#define TIOCGOPTIONREG 0x5461
-#define TIOCGDISABLEIRQ 0x5462
-#define TIOCGENABLEIRQ 0x5463
-#define TIOCGSOFTRESET 0x5464
-#define TIOCGSOFTRESETR 0x5465
-#define TIOCGREGINFO 0x5466
-#define TIOCGGETLSR 0x5467
-#define TIOCGGETDEVID 0x5468
-#define TIOCGGETBDNO 0x5469
-#define TIOCGGETINTERFACE 0x546A
-#define TIOCGGETREV 0x546B
-#define TIOCGGETNRPORTS 0x546C
-#define TIOCGGETPORTTYPE 0x546D
-#define GETDEEPFIFO 0x54AA
-#define SETDEEPFIFO 0x54AB
-#define SETFCR 0x54BA
-#define SETTTR 0x54B1
-#define SETRTR 0x54B2
-#define GETTTR 0x54B3
-#define GETRTR 0x54B4
-
-/* multi-drop mode related ioctl commands */
-#define TIOCSMULTIDROP 0x5470
-#define TIOCSMDADDR 0x5471
-#define TIOCGMDADDR 0x5472
-#define TIOCSENDADDR 0x5473
-
-
-/* serial interface */
-#define RS232 1
-#define RS422PTP 2
-#define RS422MD 3
-#define RS485NE 4
-#define RS485ECHO 5
-
-#define serial_inp(up, offset) serial_in(up, offset)
-#define serial_outp(up, offset, value) serial_out(up, offset, value)
-
-#define PASS_LIMIT 256
-#define is_real_interrupt(irq) ((irq) != 0)
-
-#define PROBE_ANY (~0)
-
-static DEFINE_MUTEX(mp_mutex);
-#define MP_MUTEX_LOCK(x) mutex_lock(&(x))
-#define MP_MUTEX_UNLOCK(x) mutex_unlock(&(x))
-#define MP_STATE_LOCK(x) mutex_lock(&((x)->mutex))
-#define MP_STATE_UNLOCK(x) mutex_unlock(&((x)->mutex))
-
-
-#define UART_LSR_SPECIAL 0x1E
-
-#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
-#define uart_users(state) ((state)->count + ((state)->info ? (state)->info->blocked_open : 0))
-
-
-//#define MP_DEBUG 1
-#undef MP_DEBUG
-
-#ifdef MP_DEBUG
-#define DPRINTK(x...) printk(x)
-#else
-#define DPRINTK(x...) do { } while (0)
-#endif
-
-#ifdef MP_DEBUG
-#define DEBUG_AUTOCONF(fmt...) printk(fmt)
-#else
-#define DEBUG_AUTOCONF(fmt...) do { } while (0)
-#endif
-
-#ifdef MP_DEBUG
-#define DEBUG_INTR(fmt...) printk(fmt)
-#else
-#define DEBUG_INTR(fmt...) do { } while (0)
-#endif
-
-#if defined(__i386__) && defined(CONFIG_M486)
-#define SERIAL_INLINE
-#endif
-#ifdef SERIAL_INLINE
-#define _INLINE_ inline
-#else
-#define _INLINE_
-#endif
-
-#define TYPE_POLL 1
-#define TYPE_INTERRUPT 2
-
-
-struct mp_device_t {
- unsigned short device_id;
- unsigned char revision;
- char *name;
- unsigned long uart_access_addr;
- unsigned long option_reg_addr;
- unsigned long reserved_addr[4];
- int irq;
- int nr_ports;
- int poll_type;
-};
-
-typedef struct mppcibrd {
- char *name;
- unsigned short vendor_id;
- unsigned short device_id;
-} mppcibrd_t;
-
-static mppcibrd_t mp_pciboards[] = {
-
- { "Multi-1 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP1} ,
- { "Multi-2 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP2} ,
- { "Multi-4 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP4} ,
- { "Multi-4 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP4A} ,
- { "Multi-6 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP6} ,
- { "Multi-6 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP6A} ,
- { "Multi-8 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP8} ,
- { "Multi-32 PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP32} ,
-
- { "Multi-1P PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP1P} ,
- { "Multi-2S1P PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP2S1P} ,
-
- { "Multi-4(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP4} ,
- { "Multi-4(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP4A} ,
- { "Multi-6(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP6} ,
- { "Multi-6(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP6A} ,
- { "Multi-8(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP8} ,
- { "Multi-32(GT) PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_GT_MP32} ,
-
- { "Multi-1 PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP1} ,
- { "Multi-2 PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP2} ,
- { "Multi-4 PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP4} ,
- { "Multi-8 PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP8} ,
- { "Multi-32 PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP32} ,
-
- { "Multi-1 PCIe E", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP1E} ,
- { "Multi-2 PCIe E", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP2E} ,
- { "Multi-2 PCIe B", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP2B} ,
- { "Multi-4 PCIe B", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP4B} ,
- { "Multi-8 PCIe B", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_MP8B} ,
-
- { "Multi-1(GT) PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_GT_MP1} ,
- { "Multi-2(GT) PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_GT_MP2} ,
- { "Multi-4(GT) PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_GT_MP4} ,
- { "Multi-8(GT) PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_GT_MP8} ,
- { "Multi-32(GT) PCIe", PCI_VENDOR_ID_MULTIPORT , PCIE_DEVICE_ID_GT_MP32} ,
-
- { "Multi-4M PCI", PCI_VENDOR_ID_MULTIPORT , PCI_DEVICE_ID_MP4M} ,
-};
-
-struct mp_port {
- struct sb_uart_port port;
-
- struct timer_list timer; /* "no irq" timer */
- struct list_head list; /* ports on this IRQ */
- unsigned int capabilities; /* port capabilities */
- unsigned short rev;
- unsigned char acr;
- unsigned char ier;
- unsigned char lcr;
- unsigned char mcr;
- unsigned char mcr_mask; /* mask of user bits */
- unsigned char mcr_force; /* mask of forced bits */
- unsigned char lsr_break_flag;
-
- void (*pm)(struct sb_uart_port *port,
- unsigned int state, unsigned int old);
- struct mp_device_t *device;
- unsigned long interface_config_addr;
- unsigned long option_base_addr;
- unsigned char interface;
- unsigned char poll_type;
-};
-
-struct irq_info {
- spinlock_t lock;
- struct list_head *head;
-};
-
-struct sb105x_uart_config {
- char *name;
- int dfl_xmit_fifo_size;
- int flags;
-};
-
-static const struct sb105x_uart_config uart_config[] = {
- { "unknown", 1, 0 },
- { "16550A", 16, UART_CLEAR_FIFO | UART_USE_FIFO },
- { "SB16C1050", 128, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH },
- { "SB16C1050A", 128, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH },
-};
-
-
-
diff --git a/drivers/staging/sb105x/sb_ser_core.h b/drivers/staging/sb105x/sb_ser_core.h
deleted file mode 100644
index c8fb99173299..000000000000
--- a/drivers/staging/sb105x/sb_ser_core.h
+++ /dev/null
@@ -1,368 +0,0 @@
-#include <linux/wait.h>
-
-#define UART_CONFIG_TYPE (1 << 0)
-#define UART_CONFIG_IRQ (1 << 1)
-#define UPIO_PORT (0)
-#define UPIO_HUB6 (1)
-#define UPIO_MEM (2)
-#define UPIO_MEM32 (3)
-#define UPIO_AU (4) /* Au1x00 type IO */
-#define UPIO_TSI (5) /* Tsi108/109 type IO */
-#define UPF_FOURPORT (1 << 1)
-#define UPF_SAK (1 << 2)
-#define UPF_SPD_MASK (0x1030)
-#define UPF_SPD_HI (0x0010)
-#define UPF_SPD_VHI (0x0020)
-#define UPF_SPD_CUST (0x0030)
-#define UPF_SPD_SHI (0x1000)
-#define UPF_SPD_WARP (0x1010)
-#define UPF_SKIP_TEST (1 << 6)
-#define UPF_AUTO_IRQ (1 << 7)
-#define UPF_HARDPPS_CD (1 << 11)
-#define UPF_LOW_LATENCY (1 << 13)
-#define UPF_BUGGY_UART (1 << 14)
-#define UPF_MAGIC_MULTIPLIER (1 << 16)
-#define UPF_CONS_FLOW (1 << 23)
-#define UPF_SHARE_IRQ (1 << 24)
-#define UPF_BOOT_AUTOCONF (1 << 28)
-#define UPF_DEAD (1 << 30)
-#define UPF_IOREMAP (1 << 31)
-#define UPF_CHANGE_MASK (0x17fff)
-#define UPF_USR_MASK (UPF_SPD_MASK|UPF_LOW_LATENCY)
-#define USF_CLOSING_WAIT_INF (0)
-#define USF_CLOSING_WAIT_NONE (~0U)
-
-#define UART_XMIT_SIZE PAGE_SIZE
-
-#define UIF_CHECK_CD (1 << 25)
-#define UIF_CTS_FLOW (1 << 26)
-#define UIF_NORMAL_ACTIVE (1 << 29)
-#define UIF_INITIALIZED (1 << 31)
-#define UIF_SUSPENDED (1 << 30)
-
-#define WAKEUP_CHARS 256
-
-#define uart_circ_empty(circ) ((circ)->head == (circ)->tail)
-#define uart_circ_clear(circ) ((circ)->head = (circ)->tail = 0)
-
-#define uart_circ_chars_pending(circ) \
- (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
-#define uart_circ_chars_free(circ) \
- (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
-#define uart_tx_stopped(port) \
- ((port)->info->tty->stopped || (port)->info->tty->hw_stopped)
-
-#define UART_ENABLE_MS(port,cflag) ((port)->flags & UPF_HARDPPS_CD || \
- (cflag) & CRTSCTS || \
- !((cflag) & CLOCAL))
-
-
-struct sb_uart_port;
-struct sb_uart_info;
-struct serial_struct;
-struct device;
-
-struct sb_uart_ops {
- unsigned int (*tx_empty)(struct sb_uart_port *);
- void (*set_mctrl)(struct sb_uart_port *, unsigned int mctrl);
- unsigned int (*get_mctrl)(struct sb_uart_port *);
- void (*stop_tx)(struct sb_uart_port *);
- void (*start_tx)(struct sb_uart_port *);
- void (*send_xchar)(struct sb_uart_port *, char ch);
- void (*stop_rx)(struct sb_uart_port *);
- void (*enable_ms)(struct sb_uart_port *);
- void (*break_ctl)(struct sb_uart_port *, int ctl);
- int (*startup)(struct sb_uart_port *);
- void (*shutdown)(struct sb_uart_port *);
- void (*set_termios)(struct sb_uart_port *, struct MP_TERMIOS *new,
- struct MP_TERMIOS *old);
- void (*pm)(struct sb_uart_port *, unsigned int state,
- unsigned int oldstate);
- int (*set_wake)(struct sb_uart_port *, unsigned int state);
-
- const char *(*type)(struct sb_uart_port *);
-
- void (*release_port)(struct sb_uart_port *);
-
- int (*request_port)(struct sb_uart_port *);
- void (*config_port)(struct sb_uart_port *, int);
- int (*verify_port)(struct sb_uart_port *, struct serial_struct *);
- int (*ioctl)(struct sb_uart_port *, unsigned int, unsigned long);
-};
-
-
-struct sb_uart_icount {
- __u32 cts;
- __u32 dsr;
- __u32 rng;
- __u32 dcd;
- __u32 rx;
- __u32 tx;
- __u32 frame;
- __u32 overrun;
- __u32 parity;
- __u32 brk;
- __u32 buf_overrun;
-};
-typedef unsigned int upf_t;
-
-struct sb_uart_port {
- spinlock_t lock; /* port lock */
- unsigned int iobase; /* in/out[bwl] */
- unsigned char __iomem *membase; /* read/write[bwl] */
- unsigned int irq; /* irq number */
- unsigned int uartclk; /* base uart clock */
- unsigned int fifosize; /* tx fifo size */
- unsigned char x_char; /* xon/xoff char */
- unsigned char regshift; /* reg offset shift */
- unsigned char iotype; /* io access style */
- unsigned char unused1;
-
-
- unsigned int read_status_mask; /* driver specific */
- unsigned int ignore_status_mask; /* driver specific */
- struct sb_uart_info *info; /* pointer to parent info */
- struct sb_uart_icount icount; /* statistics */
-
- struct console *cons; /* struct console, if any */
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
- unsigned long sysrq; /* sysrq timeout */
-#endif
-
- upf_t flags;
-
- unsigned int mctrl; /* current modem ctrl settings */
- unsigned int timeout; /* character-based timeout */
- unsigned int type; /* port type */
- const struct sb_uart_ops *ops;
- unsigned int custom_divisor;
- unsigned int line; /* port index */
- unsigned long mapbase; /* for ioremap */
- struct device *dev; /* parent device */
- unsigned char hub6; /* this should be in the 8250 driver */
- unsigned char unused[3];
-};
-
-#define mdmode unused[2]
-#define MDMODE_ADDR 0x1
-#define MDMODE_ENABLE 0x2
-#define MDMODE_AUTO 0x4
-#define MDMODE_ADDRSEND 0x8
-
-struct sb_uart_state {
- unsigned int close_delay; /* msec */
- unsigned int closing_wait; /* msec */
-
-
- int count;
- int pm_state;
- struct sb_uart_info *info;
- struct sb_uart_port *port;
-
- struct mutex mutex;
-};
-
-typedef unsigned int uif_t;
-
-struct sb_uart_info {
- struct tty_struct *tty;
- struct circ_buf xmit;
- uif_t flags;
-
- int blocked_open;
-
- struct tasklet_struct tlet;
-
- wait_queue_head_t open_wait;
- wait_queue_head_t delta_msr_wait;
-};
-
-
-struct module;
-struct tty_driver;
-
-struct uart_driver {
- struct module *owner;
- const char *driver_name;
- const char *dev_name;
- int major;
- int minor;
- int nr;
- struct console *cons;
-
- struct sb_uart_state *state;
- struct tty_driver *tty_driver;
-};
-
-void sb_uart_write_wakeup(struct sb_uart_port *port)
-{
- struct sb_uart_info *info = port->info;
- tasklet_schedule(&info->tlet);
-}
-
-void sb_uart_update_timeout(struct sb_uart_port *port, unsigned int cflag,
- unsigned int baud)
-{
- unsigned int bits;
-
- switch (cflag & CSIZE)
- {
- case CS5:
- bits = 7;
- break;
-
- case CS6:
- bits = 8;
- break;
-
- case CS7:
- bits = 9;
- break;
-
- default:
- bits = 10;
- break;
- }
-
- if (cflag & CSTOPB)
- {
- bits++;
- }
-
- if (cflag & PARENB)
- {
- bits++;
- }
-
- bits = bits * port->fifosize;
-
- port->timeout = (HZ * bits) / baud + HZ/50;
-}
-unsigned int sb_uart_get_baud_rate(struct sb_uart_port *port, struct MP_TERMIOS *termios,
- struct MP_TERMIOS *old, unsigned int min,
- unsigned int max)
-{
- unsigned int try, baud, altbaud = 38400;
- upf_t flags = port->flags & UPF_SPD_MASK;
-
- if (flags == UPF_SPD_HI)
- altbaud = 57600;
- if (flags == UPF_SPD_VHI)
- altbaud = 115200;
- if (flags == UPF_SPD_SHI)
- altbaud = 230400;
- if (flags == UPF_SPD_WARP)
- altbaud = 460800;
-
- for (try = 0; try < 2; try++) {
-
- switch (termios->c_cflag & (CBAUD | CBAUDEX))
- {
- case B921600 : baud = 921600; break;
- case B460800 : baud = 460800; break;
- case B230400 : baud = 230400; break;
- case B115200 : baud = 115200; break;
- case B57600 : baud = 57600; break;
- case B38400 : baud = 38400; break;
- case B19200 : baud = 19200; break;
- case B9600 : baud = 9600; break;
- case B4800 : baud = 4800; break;
- case B2400 : baud = 2400; break;
- case B1800 : baud = 1800; break;
- case B1200 : baud = 1200; break;
- case B600 : baud = 600; break;
- case B300 : baud = 300; break;
- case B200 : baud = 200; break;
- case B150 : baud = 150; break;
- case B134 : baud = 134; break;
- case B110 : baud = 110; break;
- case B75 : baud = 75; break;
- case B50 : baud = 50; break;
- default : baud = 9600; break;
- }
-
- if (baud == 38400)
- baud = altbaud;
-
- if (baud == 0)
- baud = 9600;
-
- if (baud >= min && baud <= max)
- return baud;
-
- termios->c_cflag &= ~CBAUD;
- if (old) {
- termios->c_cflag |= old->c_cflag & CBAUD;
- old = NULL;
- continue;
- }
-
- termios->c_cflag |= B9600;
- }
-
- return 0;
-}
-unsigned int sb_uart_get_divisor(struct sb_uart_port *port, unsigned int baud)
-{
- unsigned int quot;
-
- if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
- quot = port->custom_divisor;
- else
- quot = (port->uartclk + (8 * baud)) / (16 * baud);
-
- return quot;
-}
-
-
-
-static inline int sb_uart_handle_break(struct sb_uart_port *port)
-{
- struct sb_uart_info *info = port->info;
-
- if (port->flags & UPF_SAK)
- do_SAK(info->tty);
- return 0;
-}
-
-static inline void sb_uart_handle_dcd_change(struct sb_uart_port *port, unsigned int status)
-{
- struct sb_uart_info *info = port->info;
-
- port->icount.dcd++;
-
- if (info->flags & UIF_CHECK_CD) {
- if (status)
- wake_up_interruptible(&info->open_wait);
- else if (info->tty)
- tty_hangup(info->tty);
- }
-}
-
-static inline void sb_uart_handle_cts_change(struct sb_uart_port *port, unsigned int status)
-{
- struct sb_uart_info *info = port->info;
- struct tty_struct *tty = info->tty;
-
- port->icount.cts++;
-
- if (info->flags & UIF_CTS_FLOW) {
- if (tty->hw_stopped) {
- if (status) {
- tty->hw_stopped = 0;
- port->ops->start_tx(port);
- sb_uart_write_wakeup(port);
- }
- } else {
- if (!status) {
- tty->hw_stopped = 1;
- port->ops->stop_tx(port);
- }
- }
- }
-}
-
-
-
diff --git a/drivers/staging/sbe-2t3e3/2t3e3.h b/drivers/staging/sbe-2t3e3/2t3e3.h
index ccad049c1122..e7bf721f3fd1 100644
--- a/drivers/staging/sbe-2t3e3/2t3e3.h
+++ b/drivers/staging/sbe-2t3e3/2t3e3.h
@@ -613,12 +613,12 @@
* descriptor list and data buffer
*
**********************************************************************/
-typedef struct {
+struct t3e3_rx_desc {
u32 rdes0;
u32 rdes1;
u32 rdes2;
u32 rdes3;
-} t3e3_rx_desc_t;
+};
#define SBE_2T3E3_RX_DESC_RING_SIZE 64
@@ -648,12 +648,12 @@ typedef struct {
/*********************/
-typedef struct {
+struct t3e3_tx_desc {
u32 tdes0;
u32 tdes1;
u32 tdes2;
u32 tdes3;
-} t3e3_tx_desc_t;
+};
#define SBE_2T3E3_TX_DESC_RING_SIZE 256
@@ -701,7 +701,7 @@ struct channel {
} h;
/* statistics */
- t3e3_stats_t s;
+ struct t3e3_stats s;
/* running */
struct {
@@ -709,7 +709,7 @@ struct channel {
} r;
/* parameters */
- t3e3_param_t p;
+ struct t3e3_param p;
u32 liu_regs[SBE_2T3E3_LIU_REG_MAX]; /* LIU registers */
u32 framer_regs[SBE_2T3E3_FRAMER_REG_MAX]; /* Framer registers */
@@ -723,12 +723,12 @@ struct channel {
u32 interrupt_enable_mask;
/* receive chain/ring */
- t3e3_rx_desc_t *rx_ring;
+ struct t3e3_rx_desc *rx_ring;
struct sk_buff *rx_data[SBE_2T3E3_RX_DESC_RING_SIZE];
u32 rx_ring_current_read;
/* transmit chain/ring */
- t3e3_tx_desc_t *tx_ring;
+ struct t3e3_tx_desc *tx_ring;
struct sk_buff *tx_data[SBE_2T3E3_TX_DESC_RING_SIZE];
u32 tx_ring_current_read;
u32 tx_ring_current_write;
@@ -760,8 +760,7 @@ void t3e3_init(struct channel *);
void t3e3_if_up(struct channel *);
void t3e3_if_down(struct channel *);
int t3e3_if_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void t3e3_if_config(struct channel *, u32, char *,
- t3e3_resp_t *, int *);
+void t3e3_if_config(struct channel *, u32, char *, struct t3e3_resp *, int *);
void t3e3_set_frame_type(struct channel *, u32);
u32 t3e3_eeprom_read_word(struct channel *, u32);
void t3e3_read_card_serial_number(struct channel *);
@@ -838,7 +837,7 @@ static inline int has_two_ports(struct pci_dev *pdev)
return pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0;
}
-#define dev_to_priv(dev) (*(struct channel **) ((hdlc_device*)(dev) + 1))
+#define dev_to_priv(dev) (*(struct channel **) ((hdlc_device *)(dev) + 1))
static inline u32 dc_read(unsigned long addr, u32 reg)
{
diff --git a/drivers/staging/sbe-2t3e3/ctrl.c b/drivers/staging/sbe-2t3e3/ctrl.c
index d280bcfd660a..e0964ac9e7d7 100644
--- a/drivers/staging/sbe-2t3e3/ctrl.c
+++ b/drivers/staging/sbe-2t3e3/ctrl.c
@@ -164,12 +164,12 @@ static void t3e3_reg_write(struct channel *sc, u32 *reg)
}
}
-static void t3e3_port_get(struct channel *sc, t3e3_param_t *param)
+static void t3e3_port_get(struct channel *sc, struct t3e3_param *param)
{
- memcpy(param, &(sc->p), sizeof(t3e3_param_t));
+ memcpy(param, &(sc->p), sizeof(struct t3e3_param));
}
-static void t3e3_port_set(struct channel *sc, t3e3_param_t *param)
+static void t3e3_port_set(struct channel *sc, struct t3e3_param *param)
{
if (param->frame_mode != 0xff)
cpld_set_frame_mode(sc, param->frame_mode);
@@ -216,8 +216,7 @@ static void t3e3_port_set(struct channel *sc, t3e3_param_t *param)
cpld_set_scrambler(sc, param->scrambler);
}
-static void t3e3_port_get_stats(struct channel *sc,
- t3e3_stats_t *stats)
+static void t3e3_port_get_stats(struct channel *sc, struct t3e3_stats *stats)
{
u32 result;
@@ -279,18 +278,18 @@ static void t3e3_port_get_stats(struct channel *sc,
result += exar7250_read(sc, SBE_2T3E3_FRAMER_REG_PMON_HOLDING_REGISTER);
sc->s.CP_BIT += result;
- memcpy(stats, &(sc->s), sizeof(t3e3_stats_t));
+ memcpy(stats, &(sc->s), sizeof(struct t3e3_stats));
}
static void t3e3_port_del_stats(struct channel *sc)
{
- memset(&(sc->s), 0, sizeof(t3e3_stats_t));
+ memset(&(sc->s), 0, sizeof(struct t3e3_stats));
}
void t3e3_if_config(struct channel *sc, u32 cmd, char *set,
- t3e3_resp_t *ret, int *rlen)
+ struct t3e3_resp *ret, int *rlen)
{
- t3e3_param_t *param = (t3e3_param_t *)set;
+ struct t3e3_param *param = (struct t3e3_param *)set;
u32 *data = (u32 *)set;
/* turn off all interrupt */
diff --git a/drivers/staging/sbe-2t3e3/ctrl.h b/drivers/staging/sbe-2t3e3/ctrl.h
index c11a58871845..41f144d75c36 100644
--- a/drivers/staging/sbe-2t3e3/ctrl.h
+++ b/drivers/staging/sbe-2t3e3/ctrl.h
@@ -84,7 +84,7 @@
#define NG_SBE_2T3E3_NODE_TYPE "sbe2T3E3"
#define NG_SBE_2T3E3_COOKIE 0x03800891
-typedef struct t3e3_param {
+struct t3e3_param {
u_int8_t frame_mode; /* FRAME_MODE_* */
u_int8_t crc; /* CRC_* */
u_int8_t receiver_on; /* ON/OFF */
@@ -102,9 +102,9 @@ typedef struct t3e3_param {
u_int8_t fractional_mode; /* FRACTIONAL_MODE_* */
u_int8_t bandwidth_start; /* 0-255 */
u_int8_t bandwidth_stop; /* 0-255 */
-} t3e3_param_t;
+};
-typedef struct t3e3_stats {
+struct t3e3_stats {
u_int64_t in_bytes;
u32 in_packets, in_dropped;
u32 in_errors, in_error_desc, in_error_coll, in_error_drib,
@@ -117,15 +117,15 @@ typedef struct t3e3_stats {
u_int8_t LOC, LOF, OOF, LOS, AIS, FERF, IDLE, AIC, FEAC;
u_int16_t FEBE_code;
u32 LCV, FRAMING_BIT, PARITY_ERROR, FEBE_count, CP_BIT;
-} t3e3_stats_t;
+};
-typedef struct t3e3_resp {
+struct t3e3_resp {
union {
- t3e3_param_t param;
- t3e3_stats_t stats;
+ struct t3e3_param param;
+ struct t3e3_stats stats;
u32 data;
} u;
-} t3e3_resp_t;
+};
#endif /* CTRL_H */
diff --git a/drivers/staging/sbe-2t3e3/dc.c b/drivers/staging/sbe-2t3e3/dc.c
index f207b9e015ce..02510f67ac45 100644
--- a/drivers/staging/sbe-2t3e3/dc.c
+++ b/drivers/staging/sbe-2t3e3/dc.c
@@ -316,13 +316,13 @@ static int dc_init_descriptor_list(struct channel *sc)
if (sc->ether.rx_ring == NULL)
sc->ether.rx_ring = kcalloc(SBE_2T3E3_RX_DESC_RING_SIZE,
- sizeof(t3e3_rx_desc_t), GFP_KERNEL);
+ sizeof(struct t3e3_rx_desc), GFP_KERNEL);
if (sc->ether.rx_ring == NULL)
return -ENOMEM;
if (sc->ether.tx_ring == NULL)
sc->ether.tx_ring = kcalloc(SBE_2T3E3_TX_DESC_RING_SIZE,
- sizeof(t3e3_tx_desc_t), GFP_KERNEL);
+ sizeof(struct t3e3_tx_desc), GFP_KERNEL);
if (sc->ether.tx_ring == NULL) {
kfree(sc->ether.rx_ring);
sc->ether.rx_ring = NULL;
@@ -339,7 +339,8 @@ static int dc_init_descriptor_list(struct channel *sc)
SBE_2T3E3_RX_DESC_SECOND_ADDRESS_CHAINED | SBE_2T3E3_MTU;
if (sc->ether.rx_data[i] == NULL) {
- if (!(m = dev_alloc_skb(MCLBYTES))) {
+ m = dev_alloc_skb(MCLBYTES);
+ if (!m) {
for (j = 0; j < i; j++) {
dev_kfree_skb_any(sc->ether.rx_data[j]);
sc->ether.rx_data[j] = NULL;
diff --git a/drivers/staging/sbe-2t3e3/intr.c b/drivers/staging/sbe-2t3e3/intr.c
index efdeb7510047..1bf74b788008 100644
--- a/drivers/staging/sbe-2t3e3/intr.c
+++ b/drivers/staging/sbe-2t3e3/intr.c
@@ -118,7 +118,7 @@ void dc_intr_rx(struct channel *sc)
{
u32 current_read;
u32 error_mask, error;
- t3e3_rx_desc_t *current_desc;
+ struct t3e3_rx_desc *current_desc;
struct sk_buff *m, *m2;
unsigned rcv_len;
@@ -292,7 +292,7 @@ void dc_intr_tx(struct channel *sc)
{
u32 current_read, current_write;
u32 last_segment, error;
- t3e3_tx_desc_t *current_desc;
+ struct t3e3_tx_desc *current_desc;
spin_lock(&sc->ether.tx_lock);
diff --git a/drivers/staging/sbe-2t3e3/maps.c b/drivers/staging/sbe-2t3e3/maps.c
index 7084fbe7b794..e5494502cde1 100644
--- a/drivers/staging/sbe-2t3e3/maps.c
+++ b/drivers/staging/sbe-2t3e3/maps.c
@@ -13,8 +13,7 @@
#include <linux/kernel.h>
#include "2t3e3.h"
-const u32 cpld_reg_map[][2] =
-{
+const u32 cpld_reg_map[][2] = {
{ 0x0000, 0x0080 }, /* 0 - Port Control Register A (PCRA) */
{ 0x0004, 0x0084 }, /* 1 - Port Control Register B (PCRB) */
{ 0x0008, 0x0088 }, /* 2 - LCV Count Register (PLCR) */
@@ -35,8 +34,7 @@ const u32 cpld_reg_map[][2] =
{ 0x0070, 0x00f0 }, /* 17 - Port Bandwidth Stop (PBWL) */
};
-const u32 cpld_val_map[][2] =
-{
+const u32 cpld_val_map[][2] = {
{ 0x01, 0x02 }, /* LIU1 / LIU2 select for Serial Chip Select */
{ 0x04, 0x08 }, /* DAC1 / DAC2 select for Serial Chip Select */
{ 0x00, 0x04 }, /* LOOP1 / LOOP2 - select of loop timing source */
@@ -94,8 +92,7 @@ const u32 t3e3_framer_reg_map[] = {
0x81 /* 47 - LINE_INTERFACE_SCAN */
};
-const u32 t3e3_liu_reg_map[] =
-{
+const u32 t3e3_liu_reg_map[] = {
0x00, /* REG0 */
0x01, /* REG1 */
0x02, /* REG2 */
diff --git a/drivers/staging/sbe-2t3e3/module.c b/drivers/staging/sbe-2t3e3/module.c
index 0e32be5c2471..a6f93a43d216 100644
--- a/drivers/staging/sbe-2t3e3/module.c
+++ b/drivers/staging/sbe-2t3e3/module.c
@@ -122,7 +122,7 @@ static void t3e3_remove_card(struct pci_dev *pdev)
struct channel *channel0 = pci_get_drvdata(pdev);
struct card *card = channel0->card;
- del_timer(&card->timer);
+ del_timer_sync(&card->timer);
if (has_two_ports(channel0->pdev)) {
t3e3_remove_channel(&card->channels[1]);
pci_dev_put(card->channels[1].pdev);
diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
index 1f5088b3c10b..fe6c9513c9cd 100644
--- a/drivers/staging/sbe-2t3e3/netdev.c
+++ b/drivers/staging/sbe-2t3e3/netdev.c
@@ -25,8 +25,8 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct channel *sc = dev_to_priv(dev);
int cmd_2t3e3, len, rlen;
- t3e3_param_t param;
- t3e3_resp_t resp;
+ struct t3e3_param param;
+ struct t3e3_resp resp;
void __user *data = ifr->ifr_data + sizeof(cmd_2t3e3) + sizeof(len);
if (cmd == SIOCWANDEV)
@@ -61,7 +61,7 @@ static struct net_device_stats *t3e3_get_stats(struct net_device *dev)
{
struct net_device_stats *nstats = &dev->stats;
struct channel *sc = dev_to_priv(dev);
- t3e3_stats_t *stats = &sc->s;
+ struct t3e3_stats *stats = &sc->s;
memset(nstats, 0, sizeof(struct net_device_stats));
nstats->rx_packets = stats->in_packets;
diff --git a/drivers/staging/sep/sep_crypto.c b/drivers/staging/sep/sep_crypto.c
index 7fc267550c65..965485f71fe9 100644
--- a/drivers/staging/sep/sep_crypto.c
+++ b/drivers/staging/sep/sep_crypto.c
@@ -112,7 +112,7 @@ static void sep_do_callback(struct work_struct *work)
* on what operation is to be done
*/
static int sep_submit_work(struct workqueue_struct *work_queue,
- void(*funct)(void *),
+ void (*funct)(void *),
void *data)
{
struct sep_work_struct *sep_work;
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
index 122614c4092b..e301207eb0bd 100644
--- a/drivers/staging/sep/sep_main.c
+++ b/drivers/staging/sep/sep_main.c
@@ -1266,9 +1266,8 @@ static int sep_lock_user_pages(struct sep_device *sep,
/* Check the number of pages locked - if not all then exit with error */
if (result != num_pages) {
dev_warn(&sep->pdev->dev,
- "[PID%d] not all pages locked by get_user_pages, "
- "result 0x%X, num_pages 0x%X\n",
- current->pid, result, num_pages);
+ "[PID%d] not all pages locked by get_user_pages, result 0x%X, num_pages 0x%X\n",
+ current->pid, result, num_pages);
error = -ENOMEM;
goto end_function_with_error3;
}
@@ -1293,9 +1292,9 @@ static int sep_lock_user_pages(struct sep_device *sep,
lli_array[count].block_size = PAGE_SIZE;
dev_dbg(&sep->pdev->dev,
- "[PID%d] lli_array[%x].bus_address is %08lx, "
- "lli_array[%x].block_size is (hex) %x\n", current->pid,
- count, (unsigned long)lli_array[count].bus_address,
+ "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
+ current->pid, count,
+ (unsigned long)lli_array[count].bus_address,
count, lli_array[count].block_size);
}
@@ -1314,8 +1313,7 @@ static int sep_lock_user_pages(struct sep_device *sep,
"[PID%d] After check if page 0 has all data\n",
current->pid);
dev_dbg(&sep->pdev->dev,
- "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
- "lli_array[0].block_size is (hex) %x\n",
+ "[PID%d] lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
current->pid,
(unsigned long)lli_array[0].bus_address,
lli_array[0].block_size);
@@ -1332,8 +1330,7 @@ static int sep_lock_user_pages(struct sep_device *sep,
"[PID%d] After last page size adjustment\n",
current->pid);
dev_dbg(&sep->pdev->dev,
- "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
- "lli_array[%x].block_size is (hex) %x\n",
+ "[PID%d] lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
current->pid,
num_pages - 1,
(unsigned long)lli_array[num_pages - 1].bus_address,
@@ -1449,8 +1446,7 @@ static int sep_lli_table_secure_dma(struct sep_device *sep,
start_page += PAGE_SIZE;
dev_dbg(&sep->pdev->dev,
- "[PID%d] lli_array[%x].bus_address is %08lx, "
- "lli_array[%x].block_size is (hex) %x\n",
+ "[PID%d] lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is (hex) %x\n",
current->pid,
count, (unsigned long)lli_array[count].bus_address,
count, lli_array[count].block_size);
@@ -1469,8 +1465,7 @@ static int sep_lli_table_secure_dma(struct sep_device *sep,
dev_dbg(&sep->pdev->dev,
"[PID%d] After check if page 0 has all data\n"
- "lli_array[0].bus_address is (hex) %08lx, "
- "lli_array[0].block_size is (hex) %x\n",
+ "lli_array[0].bus_address is (hex) %08lx, lli_array[0].block_size is (hex) %x\n",
current->pid,
(unsigned long)lli_array[0].bus_address,
lli_array[0].block_size);
@@ -1484,8 +1479,7 @@ static int sep_lli_table_secure_dma(struct sep_device *sep,
dev_dbg(&sep->pdev->dev,
"[PID%d] After last page size adjustment\n"
- "lli_array[%x].bus_address is (hex) %08lx, "
- "lli_array[%x].block_size is (hex) %x\n",
+ "lli_array[%x].bus_address is (hex) %08lx, lli_array[%x].block_size is (hex) %x\n",
current->pid, num_pages - 1,
(unsigned long)lli_array[num_pages - 1].bus_address,
num_pages - 1,
@@ -1745,9 +1739,8 @@ static void sep_debug_print_lli_tables(struct sep_device *sep,
while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
dev_dbg(&sep->pdev->dev,
- "[PID%d] lli table %08lx, "
- "table_data_size is (hex) %lx\n",
- current->pid, table_count, table_data_size);
+ "[PID%d] lli table %08lx, table_data_size is (hex) %lx\n",
+ current->pid, table_count, table_data_size);
dev_dbg(&sep->pdev->dev,
"[PID%d] num_table_entries is (hex) %lx\n",
current->pid, num_table_entries);
@@ -1762,8 +1755,8 @@ static void sep_debug_print_lli_tables(struct sep_device *sep,
(unsigned long) lli_table_ptr);
dev_dbg(&sep->pdev->dev,
- "[PID%d] phys address is %08lx "
- "block size is (hex) %x\n", current->pid,
+ "[PID%d] phys address is %08lx block size is (hex) %x\n",
+ current->pid,
(unsigned long)lli_table_ptr->bus_address,
lli_table_ptr->block_size);
}
@@ -1772,14 +1765,12 @@ static void sep_debug_print_lli_tables(struct sep_device *sep,
lli_table_ptr--;
dev_dbg(&sep->pdev->dev,
- "[PID%d] phys lli_table_ptr->block_size "
- "is (hex) %x\n",
+ "[PID%d] phys lli_table_ptr->block_size is (hex) %x\n",
current->pid,
lli_table_ptr->block_size);
dev_dbg(&sep->pdev->dev,
- "[PID%d] phys lli_table_ptr->physical_address "
- "is %08lx\n",
+ "[PID%d] phys lli_table_ptr->physical_address is %08lx\n",
current->pid,
(unsigned long)lli_table_ptr->bus_address);
@@ -1788,13 +1779,11 @@ static void sep_debug_print_lli_tables(struct sep_device *sep,
num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
dev_dbg(&sep->pdev->dev,
- "[PID%d] phys table_data_size is "
- "(hex) %lx num_table_entries is"
- " %lx bus_address is%lx\n",
- current->pid,
- table_data_size,
- num_table_entries,
- (unsigned long)lli_table_ptr->bus_address);
+ "[PID%d] phys table_data_size is (hex) %lx num_table_entries is %lx bus_address is%lx\n",
+ current->pid,
+ table_data_size,
+ num_table_entries,
+ (unsigned long)lli_table_ptr->bus_address);
if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
lli_table_ptr = (struct sep_lli_entry *)
@@ -2244,14 +2233,12 @@ static int sep_construct_dma_tables_from_lli(
table_data_size = out_table_data_size;
dev_dbg(&sep->pdev->dev,
- "[PID%d] construct tables from lli"
- " in_table_data_size is (hex) %x\n", current->pid,
- in_table_data_size);
+ "[PID%d] construct tables from lli in_table_data_size is (hex) %x\n",
+ current->pid, in_table_data_size);
dev_dbg(&sep->pdev->dev,
- "[PID%d] construct tables from lli"
- "out_table_data_size is (hex) %x\n", current->pid,
- out_table_data_size);
+ "[PID%d] construct tables from lli out_table_data_size is (hex) %x\n",
+ current->pid, out_table_data_size);
/* Construct input lli table */
sep_build_lli_table(sep, &lli_in_array[current_in_entry],
@@ -2316,8 +2303,7 @@ static int sep_construct_dma_tables_from_lli(
info_in_entry_ptr->block_size);
dev_dbg(&sep->pdev->dev,
- "[PID%d] output lli_table_out_ptr:"
- "%08lx %08x\n",
+ "[PID%d] output lli_table_out_ptr: %08lx %08x\n",
current->pid,
(unsigned long)info_out_entry_ptr->bus_address,
info_out_entry_ptr->block_size);
@@ -2446,8 +2432,8 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
- "[PID%d] sep_lock_kernel_pages for input "
- "virtual buffer failed\n", current->pid);
+ "[PID%d] sep_lock_kernel_pages for input virtual buffer failed\n",
+ current->pid);
goto end_function;
}
@@ -2460,8 +2446,8 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
if (error) {
dev_warn(&sep->pdev->dev,
- "[PID%d] sep_lock_kernel_pages for output "
- "virtual buffer failed\n", current->pid);
+ "[PID%d] sep_lock_kernel_pages for output virtual buffer failed\n",
+ current->pid);
goto end_function_free_lli_in;
}
@@ -2476,8 +2462,8 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
- "[PID%d] sep_lock_user_pages for input "
- "virtual buffer failed\n", current->pid);
+ "[PID%d] sep_lock_user_pages for input virtual buffer failed\n",
+ current->pid);
goto end_function;
}
@@ -2491,8 +2477,7 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
SEP_DRIVER_OUT_FLAG, dma_ctx);
if (error) {
dev_warn(&sep->pdev->dev,
- "[PID%d] secure dma table setup "
- " for output virtual buffer failed\n",
+ "[PID%d] secure dma table setup for output virtual buffer failed\n",
current->pid);
goto end_function_free_lli_in;
@@ -2512,8 +2497,7 @@ static int sep_prepare_input_output_dma_table(struct sep_device *sep,
if (error) {
dev_warn(&sep->pdev->dev,
- "[PID%d] sep_lock_user_pages"
- " for output virtual buffer failed\n",
+ "[PID%d] sep_lock_user_pages for output virtual buffer failed\n",
current->pid);
goto end_function_free_lli_in;
@@ -2826,8 +2810,7 @@ int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
if (error) {
dev_warn(&sep->pdev->dev,
- "prepare DMA table call failed "
- "from prepare DCB call\n");
+ "prepare DMA table call failed from prepare DCB call\n");
goto end_function_error;
}
@@ -2889,7 +2872,8 @@ static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
* Go over each DCB and see if
* tail pointer must be updated
*/
- for (i = 0; i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
+ for (i = 0; i < (*dma_ctx)->nr_dcb_creat;
+ i++, dcb_table_ptr++) {
if (dcb_table_ptr->out_vr_tail_pt) {
pt_hold = (unsigned long)dcb_table_ptr->
out_vr_tail_pt;
@@ -3089,6 +3073,7 @@ static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCPREPAREDCB start\n",
current->pid);
+ /* fall-through */
case SEP_IOCPREPAREDCB_SECURE_DMA:
dev_dbg(&sep->pdev->dev,
"[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
@@ -3762,8 +3747,7 @@ static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
if (actual_count != count_user) {
dev_warn(&sep->pdev->dev,
- "[PID%d] inconsistent message "
- "sizes 0x%08zX vs 0x%08zX\n",
+ "[PID%d] inconsistent message sizes 0x%08zX vs 0x%08zX\n",
current->pid, actual_count, count_user);
error = -EMSGSIZE;
goto end_function;
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index f0fcbf7c7d7f..0267dd8b84b7 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -724,7 +724,7 @@ static int qt_startup(struct usb_serial *serial)
goto startup_error;
}
- switch (serial->dev->descriptor.idProduct) {
+ switch (le16_to_cpu(serial->dev->descriptor.idProduct)) {
case QUATECH_DSU100:
case QUATECH_QSU100:
case QUATECH_ESU100A:
@@ -762,7 +762,9 @@ static int qt_startup(struct usb_serial *serial)
}
- status = box_set_prebuffer_level(serial); /* sets to default value */
+ status = box_set_prebuffer_level(serial); /* sets to
+ * default value
+ */
if (status < 0) {
dev_dbg(dev, "box_set_prebuffer_level failed\n");
goto startup_error;
@@ -887,7 +889,8 @@ static int qt_open(struct tty_struct *tty,
(SERIAL_MSR_CTS | SERIAL_MSR_DSR | SERIAL_MSR_RI | SERIAL_MSR_CD);
/* Set Baud rate to default and turn off (default)flow control here */
- result = qt_setuart(serial, port->port_number, DEFAULT_DIVISOR, DEFAULT_LCR);
+ result = qt_setuart(serial, port->port_number, DEFAULT_DIVISOR,
+ DEFAULT_LCR);
if (result < 0) {
dev_dbg(&port->dev, "qt_setuart failed\n");
return result;
@@ -1014,11 +1017,13 @@ static void qt_close(struct usb_serial_port *port)
/* Close uart channel */
status = qt_close_channel(serial, index);
if (status < 0)
- dev_dbg(&port->dev, "%s - qt_close_channel failed.\n", __func__);
+ dev_dbg(&port->dev, "%s - qt_close_channel failed.\n",
+ __func__);
port0->open_ports--;
- dev_dbg(&port->dev, "qt_num_open_ports in close%d\n", port0->open_ports);
+ dev_dbg(&port->dev, "qt_num_open_ports in close%d\n",
+ port0->open_ports);
if (port0->open_ports == 0) {
if (serial->port[0]->interrupt_in_urb) {
@@ -1235,7 +1240,8 @@ static void qt_set_termios(struct tty_struct *tty,
/* Now determine flow control */
if (cflag & CRTSCTS) {
- dev_dbg(&port->dev, "%s - Enabling HW flow control\n", __func__);
+ dev_dbg(&port->dev, "%s - Enabling HW flow control\n",
+ __func__);
/* Enable RTS/CTS flow control */
status = box_set_hw_flow_ctrl(port->serial, index, 1);
diff --git a/drivers/staging/silicom/bp_mod.h b/drivers/staging/silicom/bp_mod.h
index 8154a7bf050f..82b4963e97b6 100644
--- a/drivers/staging/silicom/bp_mod.h
+++ b/drivers/staging/silicom/bp_mod.h
@@ -497,11 +497,19 @@ static inline unsigned int jiffies_to_msecs(const unsigned long j)
#define BPCTLI_STATUS 0x00008 /* Device Status - RO */
/* HW related */
-#define BPCTLI_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
-#define BPCTLI_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define BPCTLI_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW
+ * Defineable Pin 6
+ */
+#define BPCTLI_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW
+ * Defineable Pin 7
+ */
#define BPCTLI_CTRL_SDP0_DATA 0x00040000 /* SWDPIN 0 value */
-#define BPCTLI_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
-#define BPCTLI_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */
+#define BPCTLI_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6
+ * 0=in 1=out
+ */
+#define BPCTLI_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7
+ * 0=in 1=out
+ */
#define BPCTLI_CTRL_SDP0_DIR 0x00400000 /* SDP0 Input or output */
#define BPCTLI_CTRL_SWDPIN1 0x00080000
#define BPCTLI_CTRL_SDP1_DIR 0x00800000
@@ -565,7 +573,9 @@ static inline unsigned int jiffies_to_msecs(const unsigned long j)
#define BPCTLI_SWFW_PHY0_SM 0x02
#define BPCTLI_SWFW_PHY1_SM 0x04
-#define BPCTLI_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+#define BPCTLI_SW_FW_SYNC 0x05B5C /* Software-Firmware
+ * Synchronization - RW
+ */
#define BPCTLI_SWSM 0x05B50 /* SW Semaphore */
#define BPCTLI_FWSM 0x05B54 /* FW Semaphore */
@@ -623,7 +633,8 @@ static inline unsigned int jiffies_to_msecs(const unsigned long j)
/*#define BP10G_MCLK_DATA_OUT9 BP10G_I2C_CLK_OUT
#define BP10G_MDIO_DATA_OUT9 BP10G_I2C_DATA_OUT*/
- /*#define BP10G_MCLK_DATA_OUT9*//*BP10G_I2C_DATA_OUT */
+ /*#define BP10G_MCLK_DATA_OUT9*/
+ /*BP10G_I2C_DATA_OUT */
#define BP10G_MDIO_DATA_OUT9 BP10G_I2C_DATA_OUT /*BP10G_I2C_CLK_OUT */
/* VIA EOSDP ! */
@@ -698,5 +709,3 @@ static inline unsigned int jiffies_to_msecs(const unsigned long j)
readl((void *)((a)->mem_map) + BP10GB_##reg))
#endif
-
-int bp_proc_create(void);
diff --git a/drivers/staging/silicom/bpctl_mod.c b/drivers/staging/silicom/bpctl_mod.c
index 20325f53328e..6b9365b28e8a 100644
--- a/drivers/staging/silicom/bpctl_mod.c
+++ b/drivers/staging/silicom/bpctl_mod.c
@@ -117,12 +117,12 @@ static int wdt_timer(struct bpctl_dev *pbpctl_dev, int *time_left);
static struct bpctl_dev *get_status_port_fn(struct bpctl_dev *pbpctl_dev);
static void if_scan_init(void);
-int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block);
-int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block);
-int bp_proc_create(void);
+static int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block);
+static int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block);
+static int bp_proc_create(void);
-int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
-int get_dev_idx_bsf(int bus, int slot, int func);
+static int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
+static int get_dev_idx_bsf(int bus, int slot, int func);
static int bp_get_dev_idx_bsf(struct net_device *dev, int *index)
{
@@ -262,7 +262,7 @@ static struct notifier_block bp_notifier_block = {
.notifier_call = bp_device_event,
};
-int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
+static int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
int wdt_time_left(struct bpctl_dev *pbpctl_dev);
static void write_pulse(struct bpctl_dev *pbpctl_dev,
@@ -1502,7 +1502,8 @@ static int send_wdt_pulse(struct bpctl_dev *pbpctl_dev)
return 0;
}
-void send_bypass_clear_pulse(struct bpctl_dev *pbpctl_dev, unsigned int value)
+static void send_bypass_clear_pulse(struct bpctl_dev *pbpctl_dev,
+ unsigned int value)
{
uint32_t ctrl_ext = 0;
@@ -1757,7 +1758,7 @@ static int wdt_pulse_int(struct bpctl_dev *pbpctl_dev)
/*************************************/
/* CMND_ON 0x4 (100)*/
-int cmnd_on(struct bpctl_dev *pbpctl_dev)
+static int cmnd_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1774,7 +1775,7 @@ int cmnd_on(struct bpctl_dev *pbpctl_dev)
}
/* CMND_OFF 0x2 (10)*/
-int cmnd_off(struct bpctl_dev *pbpctl_dev)
+static int cmnd_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1792,7 +1793,7 @@ int cmnd_off(struct bpctl_dev *pbpctl_dev)
}
/* BYPASS_ON (0xa)*/
-int bypass_on(struct bpctl_dev *pbpctl_dev)
+static int bypass_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1813,7 +1814,7 @@ int bypass_on(struct bpctl_dev *pbpctl_dev)
}
/* BYPASS_OFF (0x8 111)*/
-int bypass_off(struct bpctl_dev *pbpctl_dev)
+static int bypass_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1836,7 +1837,7 @@ int bypass_off(struct bpctl_dev *pbpctl_dev)
}
/* TAP_OFF (0x9)*/
-int tap_off(struct bpctl_dev *pbpctl_dev)
+static int tap_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if ((pbpctl_dev->bp_caps & TAP_CAP)
@@ -1849,7 +1850,7 @@ int tap_off(struct bpctl_dev *pbpctl_dev)
}
/* TAP_ON (0xb)*/
-int tap_on(struct bpctl_dev *pbpctl_dev)
+static int tap_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if ((pbpctl_dev->bp_caps & TAP_CAP)
@@ -1862,7 +1863,7 @@ int tap_on(struct bpctl_dev *pbpctl_dev)
}
/* DISC_OFF (0x9)*/
-int disc_off(struct bpctl_dev *pbpctl_dev)
+static int disc_off(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if ((pbpctl_dev->bp_caps & DISC_CAP) && (pbpctl_dev->bp_ext_ver >= 0x8)) {
@@ -1874,7 +1875,7 @@ int disc_off(struct bpctl_dev *pbpctl_dev)
}
/* DISC_ON (0xb)*/
-int disc_on(struct bpctl_dev *pbpctl_dev)
+static int disc_on(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if ((pbpctl_dev->bp_caps & DISC_CAP) && (pbpctl_dev->bp_ext_ver >= 0x8)) {
@@ -1885,58 +1886,8 @@ int disc_on(struct bpctl_dev *pbpctl_dev)
return ret;
}
-/* DISC_PORT_ON */
-int disc_port_on(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- struct bpctl_dev *pbpctl_dev_m;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1)
- write_data(pbpctl_dev_m, TX_DISA);
- else
- write_data(pbpctl_dev_m, TX_DISB);
-
- msec_delay_bp(LATCH_DELAY);
-
- }
- return ret;
-}
-
-/* DISC_PORT_OFF */
-int disc_port_off(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- struct bpctl_dev *pbpctl_dev_m;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1)
- write_data(pbpctl_dev_m, TX_ENA);
- else
- write_data(pbpctl_dev_m, TX_ENB);
-
- msec_delay_bp(LATCH_DELAY);
-
- }
- return ret;
-}
-
/*TWO_PORT_LINK_HW_EN (0xe)*/
-int tpl_hw_on(struct bpctl_dev *pbpctl_dev)
+static int tpl_hw_on(struct bpctl_dev *pbpctl_dev)
{
int ret = 0, ctrl = 0;
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -1964,7 +1915,7 @@ int tpl_hw_on(struct bpctl_dev *pbpctl_dev)
}
/*TWO_PORT_LINK_HW_DIS (0xc)*/
-int tpl_hw_off(struct bpctl_dev *pbpctl_dev)
+static int tpl_hw_off(struct bpctl_dev *pbpctl_dev)
{
int ret = 0, ctrl = 0;
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -1990,7 +1941,7 @@ int tpl_hw_off(struct bpctl_dev *pbpctl_dev)
}
/* WDT_OFF (0x6 110)*/
-int wdt_off(struct bpctl_dev *pbpctl_dev)
+static int wdt_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -2013,7 +1964,7 @@ int wdt_off(struct bpctl_dev *pbpctl_dev)
static unsigned int
wdt_val_array[] = { 1000, 1500, 2000, 3000, 4000, 8000, 16000, 32000, 0 };
-int wdt_on(struct bpctl_dev *pbpctl_dev, unsigned int timeout)
+static int wdt_on(struct bpctl_dev *pbpctl_dev, unsigned int timeout)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -2065,7 +2016,7 @@ int wdt_on(struct bpctl_dev *pbpctl_dev, unsigned int timeout)
return BP_NOT_CAP;
}
-void bp75_put_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
+static void bp75_put_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
{
u32 swsm;
@@ -2076,7 +2027,7 @@ void bp75_put_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
BPCTL_WRITE_REG(pbpctl_dev, SWSM, swsm);
}
-s32 bp75_get_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
+static s32 bp75_get_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
{
u32 swsm;
s32 ret_val = 0;
@@ -2190,7 +2141,8 @@ static s32 bp75_acquire_phy(struct bpctl_dev *pbpctl_dev)
return ret_val;
}
-s32 bp75_read_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset, u16 *data)
+static s32 bp75_read_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset,
+ u16 *data)
{
u32 i, mdic = 0;
s32 ret_val = 0;
@@ -2223,7 +2175,8 @@ s32 bp75_read_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset, u16 *data)
return ret_val;
}
-s32 bp75_write_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset, u16 data)
+static s32 bp75_write_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset,
+ u16 data)
{
u32 i, mdic = 0;
s32 ret_val = 0;
@@ -2353,11 +2306,10 @@ static int set_tx(struct bpctl_dev *pbpctl_dev, int tx_state)
if (PEG5_IF_SERIES(pbpctl_dev->subdevice)) {
if (tx_state) {
uint16_t mii_reg;
- if (!
- (ret =
- bp75_read_phy_reg(pbpctl_dev,
- BPCTLI_PHY_CONTROL,
- &mii_reg))) {
+ ret = bp75_read_phy_reg(pbpctl_dev,
+ BPCTLI_PHY_CONTROL,
+ &mii_reg);
+ if (!ret) {
if (mii_reg & BPCTLI_MII_CR_POWER_DOWN) {
ret =
bp75_write_phy_reg
@@ -2369,17 +2321,15 @@ static int set_tx(struct bpctl_dev *pbpctl_dev, int tx_state)
}
} else {
uint16_t mii_reg;
- if (!
- (ret =
- bp75_read_phy_reg(pbpctl_dev,
- BPCTLI_PHY_CONTROL,
- &mii_reg))) {
+ ret = bp75_read_phy_reg(pbpctl_dev,
+ BPCTLI_PHY_CONTROL,
+ &mii_reg);
+ if (!ret) {
mii_reg |= BPCTLI_MII_CR_POWER_DOWN;
- ret =
- bp75_write_phy_reg(pbpctl_dev,
- BPCTLI_PHY_CONTROL,
- mii_reg);
+ ret = bp75_write_phy_reg(pbpctl_dev,
+ BPCTLI_PHY_CONTROL,
+ mii_reg);
}
}
@@ -2534,7 +2484,7 @@ static int set_bp_force_link(struct bpctl_dev *pbpctl_dev, int tx_state)
}
/*RESET_CONT 0x20 */
-int reset_cont(struct bpctl_dev *pbpctl_dev)
+static int reset_cont(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -2551,7 +2501,7 @@ int reset_cont(struct bpctl_dev *pbpctl_dev)
}
/*DIS_BYPASS_CAP 0x22 */
-int dis_bypass_cap(struct bpctl_dev *pbpctl_dev)
+static int dis_bypass_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
@@ -2570,7 +2520,7 @@ int dis_bypass_cap(struct bpctl_dev *pbpctl_dev)
}
/*EN_BYPASS_CAP 0x24 */
-int en_bypass_cap(struct bpctl_dev *pbpctl_dev)
+static int en_bypass_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
@@ -2586,7 +2536,7 @@ int en_bypass_cap(struct bpctl_dev *pbpctl_dev)
}
/* BYPASS_STATE_PWRON 0x26*/
-int bypass_state_pwron(struct bpctl_dev *pbpctl_dev)
+static int bypass_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, BYPASS_STATE_PWRON);
@@ -2600,7 +2550,7 @@ int bypass_state_pwron(struct bpctl_dev *pbpctl_dev)
}
/* NORMAL_STATE_PWRON 0x28*/
-int normal_state_pwron(struct bpctl_dev *pbpctl_dev)
+static int normal_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP)
|| (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP)) {
@@ -2615,7 +2565,7 @@ int normal_state_pwron(struct bpctl_dev *pbpctl_dev)
}
/* BYPASS_STATE_PWROFF 0x27*/
-int bypass_state_pwroff(struct bpctl_dev *pbpctl_dev)
+static int bypass_state_pwroff(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP) {
write_data(pbpctl_dev, BYPASS_STATE_PWROFF);
@@ -2626,7 +2576,7 @@ int bypass_state_pwroff(struct bpctl_dev *pbpctl_dev)
}
/* NORMAL_STATE_PWROFF 0x29*/
-int normal_state_pwroff(struct bpctl_dev *pbpctl_dev)
+static int normal_state_pwroff(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP)) {
write_data(pbpctl_dev, NORMAL_STATE_PWROFF);
@@ -2637,7 +2587,7 @@ int normal_state_pwroff(struct bpctl_dev *pbpctl_dev)
}
/*TAP_STATE_PWRON 0x2a*/
-int tap_state_pwron(struct bpctl_dev *pbpctl_dev)
+static int tap_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, TAP_STATE_PWRON);
@@ -2648,7 +2598,7 @@ int tap_state_pwron(struct bpctl_dev *pbpctl_dev)
}
/*DIS_TAP_CAP 0x2c*/
-int dis_tap_cap(struct bpctl_dev *pbpctl_dev)
+static int dis_tap_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, DIS_TAP_CAP);
@@ -2659,7 +2609,7 @@ int dis_tap_cap(struct bpctl_dev *pbpctl_dev)
}
/*EN_TAP_CAP 0x2e*/
-int en_tap_cap(struct bpctl_dev *pbpctl_dev)
+static int en_tap_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, EN_TAP_CAP);
@@ -2670,7 +2620,7 @@ int en_tap_cap(struct bpctl_dev *pbpctl_dev)
}
/*DISC_STATE_PWRON 0x2a*/
-int disc_state_pwron(struct bpctl_dev *pbpctl_dev)
+static int disc_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2683,7 +2633,7 @@ int disc_state_pwron(struct bpctl_dev *pbpctl_dev)
}
/*DIS_DISC_CAP 0x2c*/
-int dis_disc_cap(struct bpctl_dev *pbpctl_dev)
+static int dis_disc_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_DIS_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2695,60 +2645,8 @@ int dis_disc_cap(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-/*DISC_STATE_PWRON 0x2a*/
-int disc_port_state_pwron(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- struct bpctl_dev *pbpctl_dev_m;
-
- return BP_NOT_CAP;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1)
- write_data(pbpctl_dev_m, TX_DISA_PWRUP);
- else
- write_data(pbpctl_dev_m, TX_DISB_PWRUP);
-
- msec_delay_bp(LATCH_DELAY);
-
- }
- return ret;
-}
-
-int normal_port_state_pwron(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- struct bpctl_dev *pbpctl_dev_m;
- return BP_NOT_CAP;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1)
- write_data(pbpctl_dev_m, TX_ENA_PWRUP);
- else
- write_data(pbpctl_dev_m, TX_ENB_PWRUP);
-
- msec_delay_bp(LATCH_DELAY);
-
- }
- return ret;
-}
-
/*EN_TAP_CAP 0x2e*/
-int en_disc_cap(struct bpctl_dev *pbpctl_dev)
+static int en_disc_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_DIS_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2760,7 +2658,7 @@ int en_disc_cap(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int std_nic_on(struct bpctl_dev *pbpctl_dev)
+static int std_nic_on(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & STD_NIC_CAP) {
@@ -2814,7 +2712,7 @@ int std_nic_on(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int std_nic_off(struct bpctl_dev *pbpctl_dev)
+static int std_nic_off(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & STD_NIC_CAP) {
@@ -2977,7 +2875,7 @@ static void wd_reset_timer(unsigned long param)
}
/*WAIT_AT_PWRUP 0x80 */
-int bp_wait_at_pwup_en(struct bpctl_dev *pbpctl_dev)
+static int bp_wait_at_pwup_en(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -2992,7 +2890,7 @@ int bp_wait_at_pwup_en(struct bpctl_dev *pbpctl_dev)
}
/*DIS_WAIT_AT_PWRUP 0x81 */
-int bp_wait_at_pwup_dis(struct bpctl_dev *pbpctl_dev)
+static int bp_wait_at_pwup_dis(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3009,7 +2907,7 @@ int bp_wait_at_pwup_dis(struct bpctl_dev *pbpctl_dev)
/*EN_HW_RESET 0x82 */
-int bp_hw_reset_en(struct bpctl_dev *pbpctl_dev)
+static int bp_hw_reset_en(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3025,7 +2923,7 @@ int bp_hw_reset_en(struct bpctl_dev *pbpctl_dev)
/*DIS_HW_RESET 0x83 */
-int bp_hw_reset_dis(struct bpctl_dev *pbpctl_dev)
+static int bp_hw_reset_dis(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3040,7 +2938,7 @@ int bp_hw_reset_dis(struct bpctl_dev *pbpctl_dev)
}
-int wdt_exp_mode(struct bpctl_dev *pbpctl_dev, int mode)
+static int wdt_exp_mode(struct bpctl_dev *pbpctl_dev, int mode)
{
uint32_t status_reg = 0, status_reg1 = 0;
@@ -3091,7 +2989,7 @@ int wdt_exp_mode(struct bpctl_dev *pbpctl_dev, int mode)
return BP_NOT_CAP;
}
-int bypass_fw_ver(struct bpctl_dev *pbpctl_dev)
+static int bypass_fw_ver(struct bpctl_dev *pbpctl_dev)
{
if (is_bypass_fn(pbpctl_dev))
return read_reg(pbpctl_dev, VER_REG_ADDR);
@@ -3099,7 +2997,7 @@ int bypass_fw_ver(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_sign_check(struct bpctl_dev *pbpctl_dev)
+static int bypass_sign_check(struct bpctl_dev *pbpctl_dev)
{
if (is_bypass_fn(pbpctl_dev))
@@ -3210,7 +3108,7 @@ static int bp_force_link_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_from_last_read(struct bpctl_dev *pbpctl_dev)
+static int bypass_from_last_read(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -3230,7 +3128,7 @@ int bypass_from_last_read(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_status_clear(struct bpctl_dev *pbpctl_dev)
+static int bypass_status_clear(struct bpctl_dev *pbpctl_dev)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -3244,7 +3142,7 @@ int bypass_status_clear(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_flag_status(struct bpctl_dev *pbpctl_dev)
+static int bypass_flag_status(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_CAP)) {
@@ -3257,7 +3155,7 @@ int bypass_flag_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_flag_status_clear(struct bpctl_dev *pbpctl_dev)
+static int bypass_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_CAP) {
@@ -3272,7 +3170,7 @@ int bypass_flag_status_clear(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_change_status(struct bpctl_dev *pbpctl_dev)
+static int bypass_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -3291,18 +3189,6 @@ int bypass_change_status(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int bypass_off_status(struct bpctl_dev *pbpctl_dev)
-{
-
- if (pbpctl_dev->bp_caps & BP_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
- return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
- BYPASS_OFF_MASK) == BYPASS_OFF_MASK) ? 1 : 0);
- }
- }
- return BP_NOT_CAP;
-}
-
static int bypass_status(struct bpctl_dev *pbpctl_dev)
{
u32 ctrl_ext = 0;
@@ -3386,7 +3272,7 @@ static int bypass_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int default_pwron_status(struct bpctl_dev *pbpctl_dev)
+static int default_pwron_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3418,7 +3304,7 @@ static int default_pwroff_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_bypass_cap_status(struct bpctl_dev *pbpctl_dev)
+static int dis_bypass_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
@@ -3431,31 +3317,7 @@ int dis_bypass_cap_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int cmd_en_status(struct bpctl_dev *pbpctl_dev)
-{
-
- if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
- return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
- CMND_EN_MASK) == CMND_EN_MASK) ? 1 : 0);
- }
- }
- return BP_NOT_CAP;
-}
-
-int wdt_en_status(struct bpctl_dev *pbpctl_dev)
-{
-
- if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
- return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
- WDT_EN_MASK) == WDT_EN_MASK) ? 1 : 0);
- }
- }
- return BP_NOT_CAP;
-}
-
-int wdt_programmed(struct bpctl_dev *pbpctl_dev, int *timeout)
+static int wdt_programmed(struct bpctl_dev *pbpctl_dev, int *timeout)
{
int ret = 0;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -3481,40 +3343,7 @@ int wdt_programmed(struct bpctl_dev *pbpctl_dev, int *timeout)
return ret;
}
-int bypass_support(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
-
- if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
- ret =
- ((((read_reg(pbpctl_dev, PRODUCT_CAP_REG_ADDR)) &
- BYPASS_SUPPORT_MASK) ==
- BYPASS_SUPPORT_MASK) ? 1 : 0);
- } else if (pbpctl_dev->bp_ext_ver == PXG2BPI_VER)
- ret = 1;
- } else
- ret = BP_NOT_CAP;
- return ret;
-}
-
-int tap_support(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
-
- if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
- ret =
- ((((read_reg(pbpctl_dev, PRODUCT_CAP_REG_ADDR)) &
- TAP_SUPPORT_MASK) == TAP_SUPPORT_MASK) ? 1 : 0);
- } else if (pbpctl_dev->bp_ext_ver == PXG2BPI_VER)
- ret = 0;
- } else
- ret = BP_NOT_CAP;
- return ret;
-}
-
-int normal_support(struct bpctl_dev *pbpctl_dev)
+static int normal_support(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -3530,7 +3359,7 @@ int normal_support(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int get_bp_prod_caps(struct bpctl_dev *pbpctl_dev)
+static int get_bp_prod_caps(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & SW_CTL_CAP) &&
(pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER))
@@ -3539,7 +3368,7 @@ int get_bp_prod_caps(struct bpctl_dev *pbpctl_dev)
}
-int tap_flag_status(struct bpctl_dev *pbpctl_dev)
+static int tap_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_STATUS_CAP) {
@@ -3551,7 +3380,7 @@ int tap_flag_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int tap_flag_status_clear(struct bpctl_dev *pbpctl_dev)
+static int tap_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
uint32_t status_reg = 0;
if (pbpctl_dev->bp_caps & TAP_STATUS_CAP) {
@@ -3565,7 +3394,7 @@ int tap_flag_status_clear(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int tap_change_status(struct bpctl_dev *pbpctl_dev)
+static int tap_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
@@ -3582,17 +3411,7 @@ int tap_change_status(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int tap_off_status(struct bpctl_dev *pbpctl_dev)
-{
- if (pbpctl_dev->bp_caps & TAP_CAP) {
- if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
- return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
- TAP_OFF_MASK) == TAP_OFF_MASK) ? 1 : 0);
- }
- return BP_NOT_CAP;
-}
-
-int tap_status(struct bpctl_dev *pbpctl_dev)
+static int tap_status(struct bpctl_dev *pbpctl_dev)
{
u32 ctrl_ext = 0;
@@ -3631,7 +3450,7 @@ int tap_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int default_pwron_tap_status(struct bpctl_dev *pbpctl_dev)
+static int default_pwron_tap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
@@ -3642,7 +3461,7 @@ int default_pwron_tap_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_tap_cap_status(struct bpctl_dev *pbpctl_dev)
+static int dis_tap_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
@@ -3653,7 +3472,7 @@ int dis_tap_cap_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_flag_status(struct bpctl_dev *pbpctl_dev)
+static int disc_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3665,7 +3484,7 @@ int disc_flag_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_flag_status_clear(struct bpctl_dev *pbpctl_dev)
+static int disc_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
uint32_t status_reg = 0;
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3679,7 +3498,7 @@ int disc_flag_status_clear(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_change_status(struct bpctl_dev *pbpctl_dev)
+static int disc_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3690,7 +3509,7 @@ int disc_change_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_off_status(struct bpctl_dev *pbpctl_dev)
+static int disc_off_status(struct bpctl_dev *pbpctl_dev)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
u32 ctrl_ext = 0;
@@ -3786,7 +3605,7 @@ static int disc_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int default_pwron_disc_status(struct bpctl_dev *pbpctl_dev)
+static int default_pwron_disc_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3797,7 +3616,7 @@ int default_pwron_disc_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_disc_cap_status(struct bpctl_dev *pbpctl_dev)
+static int dis_disc_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DIS_DISC_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3808,55 +3627,7 @@ int dis_disc_cap_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_port_status(struct bpctl_dev *pbpctl_dev)
-{
- int ret = BP_NOT_CAP;
- struct bpctl_dev *pbpctl_dev_m;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1) {
- return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
- TX_DISA_MASK) == TX_DISA_MASK) ? 1 : 0);
- } else
- return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
- TX_DISB_MASK) == TX_DISB_MASK) ? 1 : 0);
-
- }
- return ret;
-}
-
-int default_pwron_disc_port_status(struct bpctl_dev *pbpctl_dev)
-{
- int ret = BP_NOT_CAP;
- struct bpctl_dev *pbpctl_dev_m;
-
- if ((is_bypass_fn(pbpctl_dev)) == 1)
- pbpctl_dev_m = pbpctl_dev;
- else
- pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
- if (pbpctl_dev_m == NULL)
- return BP_NOT_CAP;
-
- if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1)
- return ret;
- /* return((((read_reg(pbpctl_dev,STATUS_TAP_REG_ADDR)) & TX_DISA_MASK)==TX_DISA_MASK)?1:0); */
- else
- return ret;
- /* return((((read_reg(pbpctl_dev,STATUS_TAP_REG_ADDR)) & TX_DISA_MASK)==TX_DISA_MASK)?1:0); */
-
- }
- return ret;
-}
-
-int wdt_exp_mode_status(struct bpctl_dev *pbpctl_dev)
+static int wdt_exp_mode_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver <= PXG2BPI_VER)
@@ -3879,7 +3650,7 @@ int wdt_exp_mode_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int tpl2_flag_status(struct bpctl_dev *pbpctl_dev)
+static int tpl2_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps_ex & TPL2_CAP_EX) {
@@ -3890,22 +3661,7 @@ int tpl2_flag_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int tpl_hw_status(struct bpctl_dev *pbpctl_dev)
-{
- struct bpctl_dev *pbpctl_dev_b = NULL;
-
- pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
- if (!pbpctl_dev_b)
- return BP_NOT_CAP;
-
- if (TPL_IF_SERIES(pbpctl_dev->subdevice))
- return (((BPCTL_READ_REG(pbpctl_dev, CTRL)) &
- BPCTLI_CTRL_SWDPIN0) != 0 ? 1 : 0);
- return BP_NOT_CAP;
-}
-
-
-int bp_wait_at_pwup_status(struct bpctl_dev *pbpctl_dev)
+static int bp_wait_at_pwup_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3916,7 +3672,7 @@ int bp_wait_at_pwup_status(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int bp_hw_reset_status(struct bpctl_dev *pbpctl_dev)
+static int bp_hw_reset_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3930,7 +3686,7 @@ int bp_hw_reset_status(struct bpctl_dev *pbpctl_dev)
}
-int std_nic_status(struct bpctl_dev *pbpctl_dev)
+static int std_nic_status(struct bpctl_dev *pbpctl_dev)
{
int status_val = 0;
@@ -3978,7 +3734,7 @@ int std_nic_status(struct bpctl_dev *pbpctl_dev)
/******************************************************/
/**************SW_INIT*********************************/
/******************************************************/
-void bypass_caps_init(struct bpctl_dev *pbpctl_dev)
+static void bypass_caps_init(struct bpctl_dev *pbpctl_dev)
{
u_int32_t ctrl_ext = 0;
struct bpctl_dev *pbpctl_dev_m = NULL;
@@ -4196,23 +3952,7 @@ void bypass_caps_init(struct bpctl_dev *pbpctl_dev)
}
}
-int bypass_off_init(struct bpctl_dev *pbpctl_dev)
-{
- int ret = cmnd_on(pbpctl_dev);
- if (ret < 0)
- return ret;
- if (INTEL_IF_SERIES(pbpctl_dev->subdevice))
- return dis_bypass_cap(pbpctl_dev);
- wdt_off(pbpctl_dev);
- if (pbpctl_dev->bp_caps & BP_CAP)
- bypass_off(pbpctl_dev);
- if (pbpctl_dev->bp_caps & TAP_CAP)
- tap_off(pbpctl_dev);
- cmnd_off(pbpctl_dev);
- return 0;
-}
-
-void remove_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
+static void remove_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
#ifdef BP_SELF_TEST
struct bpctl_dev *pbpctl_dev_sl = NULL;
@@ -4241,7 +3981,7 @@ void remove_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
}
-int init_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
+static int init_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
init_timer(&pbpctl_dev->bp_timer);
@@ -4288,7 +4028,7 @@ int bp_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
#endif
-int set_bypass_wd_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
+static int set_bypass_wd_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->reset_time != param) {
@@ -4307,7 +4047,7 @@ int set_bypass_wd_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
return BP_NOT_CAP;
}
-int get_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP)
return pbpctl_dev->reset_time;
@@ -4378,7 +4118,7 @@ int is_bypass_fn(struct bpctl_dev *pbpctl_dev)
return (((pbpctl_dev->func == 0) || (pbpctl_dev->func == 2)) ? 1 : 0);
}
-int set_bypass_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
+static int set_bypass_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
@@ -4396,12 +4136,12 @@ int set_bypass_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_fn(struct bpctl_dev *pbpctl_dev)
{
return bypass_status(pbpctl_dev);
}
-int get_bypass_change_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_change_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4409,7 +4149,7 @@ int get_bypass_change_fn(struct bpctl_dev *pbpctl_dev)
return bypass_change_status(pbpctl_dev);
}
-int set_dis_bypass_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
+static int set_dis_bypass_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4428,7 +4168,7 @@ int set_dis_bypass_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
return ret;
}
-int get_dis_bypass_fn(struct bpctl_dev *pbpctl_dev)
+static int get_dis_bypass_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4436,7 +4176,7 @@ int get_dis_bypass_fn(struct bpctl_dev *pbpctl_dev)
return dis_bypass_cap_status(pbpctl_dev);
}
-int set_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
+static int set_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4455,7 +4195,7 @@ int set_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4463,7 +4203,7 @@ int get_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev)
return default_pwroff_status(pbpctl_dev);
}
-int set_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
+static int set_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4482,7 +4222,7 @@ int set_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4490,7 +4230,7 @@ int get_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev)
return default_pwron_status(pbpctl_dev);
}
-int set_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int timeout)
+static int set_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int timeout)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4512,7 +4252,7 @@ int set_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int timeout)
return ret;
}
-int get_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int *timeout)
+static int get_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int *timeout)
{
if (!pbpctl_dev)
return -1;
@@ -4520,7 +4260,7 @@ int get_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int *timeout)
return wdt_programmed(pbpctl_dev, timeout);
}
-int get_wd_expire_time_fn(struct bpctl_dev *pbpctl_dev, int *time_left)
+static int get_wd_expire_time_fn(struct bpctl_dev *pbpctl_dev, int *time_left)
{
if (!pbpctl_dev)
return -1;
@@ -4528,7 +4268,7 @@ int get_wd_expire_time_fn(struct bpctl_dev *pbpctl_dev, int *time_left)
return wdt_timer(pbpctl_dev, time_left);
}
-int reset_bypass_wd_timer_fn(struct bpctl_dev *pbpctl_dev)
+static int reset_bypass_wd_timer_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4536,7 +4276,7 @@ int reset_bypass_wd_timer_fn(struct bpctl_dev *pbpctl_dev)
return wdt_timer_reload(pbpctl_dev);
}
-int get_wd_set_caps_fn(struct bpctl_dev *pbpctl_dev)
+static int get_wd_set_caps_fn(struct bpctl_dev *pbpctl_dev)
{
int bp_status = 0;
@@ -4560,7 +4300,7 @@ int get_wd_set_caps_fn(struct bpctl_dev *pbpctl_dev)
return bp_status;
}
-int set_std_nic_fn(struct bpctl_dev *pbpctl_dev, int nic_mode)
+static int set_std_nic_fn(struct bpctl_dev *pbpctl_dev, int nic_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4580,7 +4320,7 @@ int set_std_nic_fn(struct bpctl_dev *pbpctl_dev, int nic_mode)
return ret;
}
-int get_std_nic_fn(struct bpctl_dev *pbpctl_dev)
+static int get_std_nic_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4588,7 +4328,7 @@ int get_std_nic_fn(struct bpctl_dev *pbpctl_dev)
return std_nic_status(pbpctl_dev);
}
-int set_tap_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
+static int set_tap_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -4604,7 +4344,7 @@ int set_tap_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_tap_fn(struct bpctl_dev *pbpctl_dev)
+static int get_tap_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4612,7 +4352,7 @@ int get_tap_fn(struct bpctl_dev *pbpctl_dev)
return tap_status(pbpctl_dev);
}
-int set_tap_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
+static int set_tap_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4630,7 +4370,7 @@ int set_tap_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
return ret;
}
-int get_tap_pwup_fn(struct bpctl_dev *pbpctl_dev)
+static int get_tap_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4642,7 +4382,7 @@ int get_tap_pwup_fn(struct bpctl_dev *pbpctl_dev)
return ((ret == 0) ? 1 : 0);
}
-int get_tap_change_fn(struct bpctl_dev *pbpctl_dev)
+static int get_tap_change_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4650,7 +4390,7 @@ int get_tap_change_fn(struct bpctl_dev *pbpctl_dev)
return tap_change_status(pbpctl_dev);
}
-int set_dis_tap_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
+static int set_dis_tap_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4667,7 +4407,7 @@ int set_dis_tap_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
return BP_NOT_CAP;
}
-int get_dis_tap_fn(struct bpctl_dev *pbpctl_dev)
+static int get_dis_tap_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4675,7 +4415,7 @@ int get_dis_tap_fn(struct bpctl_dev *pbpctl_dev)
return dis_tap_cap_status(pbpctl_dev);
}
-int set_disc_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
+static int set_disc_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
if (!pbpctl_dev)
return -1;
@@ -4692,7 +4432,7 @@ int set_disc_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
return BP_NOT_CAP;
}
-int get_disc_fn(struct bpctl_dev *pbpctl_dev)
+static int get_disc_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4703,7 +4443,7 @@ int get_disc_fn(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int set_disc_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
+static int set_disc_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4721,7 +4461,7 @@ int set_disc_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
return ret;
}
-int get_disc_pwup_fn(struct bpctl_dev *pbpctl_dev)
+static int get_disc_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4731,7 +4471,7 @@ int get_disc_pwup_fn(struct bpctl_dev *pbpctl_dev)
return (ret == 0 ? 1 : (ret < 0 ? BP_NOT_CAP : 0));
}
-int get_disc_change_fn(struct bpctl_dev *pbpctl_dev)
+static int get_disc_change_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4741,7 +4481,7 @@ int get_disc_change_fn(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int set_dis_disc_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
+static int set_dis_disc_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4759,7 +4499,7 @@ int set_dis_disc_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
return BP_NOT_CAP;
}
-int get_dis_disc_fn(struct bpctl_dev *pbpctl_dev)
+static int get_dis_disc_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4770,55 +4510,7 @@ int get_dis_disc_fn(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int set_disc_port_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
-{
- int ret = BP_NOT_CAP;
- if (!pbpctl_dev)
- return -1;
-
- if (!disc_mode)
- ret = disc_port_off(pbpctl_dev);
- else
- ret = disc_port_on(pbpctl_dev);
-
- return ret;
-}
-
-int get_disc_port_fn(struct bpctl_dev *pbpctl_dev)
-{
- if (!pbpctl_dev)
- return -1;
-
- return disc_port_status(pbpctl_dev);
-}
-
-int set_disc_port_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
-{
- int ret = BP_NOT_CAP;
- if (!pbpctl_dev)
- return -1;
-
- if (!disc_mode)
- ret = normal_port_state_pwron(pbpctl_dev);
- else
- ret = disc_port_state_pwron(pbpctl_dev);
-
- return ret;
-}
-
-int get_disc_port_pwup_fn(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- if (!pbpctl_dev)
- return -1;
-
- ret = default_pwron_disc_port_status(pbpctl_dev);
- if (ret < 0)
- return ret;
- return ((ret == 0) ? 1 : 0);
-}
-
-int get_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev)
+static int get_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4826,7 +4518,7 @@ int get_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev)
return wdt_exp_mode_status(pbpctl_dev);
}
-int set_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev, int param)
+static int set_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
@@ -4834,19 +4526,7 @@ int set_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev, int param)
return wdt_exp_mode(pbpctl_dev, param);
}
-int reset_cont_fn(struct bpctl_dev *pbpctl_dev)
-{
- int ret = 0;
- if (!pbpctl_dev)
- return -1;
-
- ret = cmnd_on(pbpctl_dev);
- if (ret < 0)
- return ret;
- return reset_cont(pbpctl_dev);
-}
-
-int set_tx_fn(struct bpctl_dev *pbpctl_dev, int tx_state)
+static int set_tx_fn(struct bpctl_dev *pbpctl_dev, int tx_state)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -4867,7 +4547,7 @@ int set_tx_fn(struct bpctl_dev *pbpctl_dev, int tx_state)
return set_tx(pbpctl_dev, tx_state);
}
-int set_bp_force_link_fn(int dev_num, int tx_state)
+static int set_bp_force_link_fn(int dev_num, int tx_state)
{
static struct bpctl_dev *bpctl_dev_curr;
@@ -4879,7 +4559,7 @@ int set_bp_force_link_fn(int dev_num, int tx_state)
return set_bp_force_link(bpctl_dev_curr, tx_state);
}
-int set_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev, int param)
+static int set_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
@@ -4887,7 +4567,7 @@ int set_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev, int param)
return set_bypass_wd_auto(pbpctl_dev, param);
}
-int get_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev)
+static int get_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4914,7 +4594,7 @@ int get_bp_self_test_fn(struct bpctl_dev *pbpctl_dev)
#endif
-int get_bypass_caps_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bypass_caps_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4923,7 +4603,8 @@ int get_bypass_caps_fn(struct bpctl_dev *pbpctl_dev)
}
-int get_bypass_slave_fn(struct bpctl_dev *pbpctl_dev, struct bpctl_dev **pbpctl_dev_out)
+static int get_bypass_slave_fn(struct bpctl_dev *pbpctl_dev,
+ struct bpctl_dev **pbpctl_dev_out)
{
int idx_dev = 0;
if (!pbpctl_dev)
@@ -4955,7 +4636,7 @@ int get_bypass_slave_fn(struct bpctl_dev *pbpctl_dev, struct bpctl_dev **pbpctl_
return 0;
}
-int is_bypass(struct bpctl_dev *pbpctl_dev)
+static int is_bypass(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4966,7 +4647,7 @@ int is_bypass(struct bpctl_dev *pbpctl_dev)
return 0;
}
-int get_tx_fn(struct bpctl_dev *pbpctl_dev)
+static int get_tx_fn(struct bpctl_dev *pbpctl_dev)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
@@ -4986,7 +4667,7 @@ int get_tx_fn(struct bpctl_dev *pbpctl_dev)
return tx_status(pbpctl_dev);
}
-int get_bp_force_link_fn(int dev_num)
+static int get_bp_force_link_fn(int dev_num)
{
static struct bpctl_dev *bpctl_dev_curr;
@@ -5049,7 +4730,7 @@ static void bp_tpl_timer_fn(unsigned long param)
mod_timer(&pbpctl_dev->bp_tpl_timer, jiffies + BP_LINK_MON_DELAY * HZ);
}
-void remove_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
+static void remove_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
@@ -5067,7 +4748,7 @@ void remove_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
return;
}
-int init_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
+static int init_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -5080,7 +4761,7 @@ int init_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
return BP_NOT_CAP;
}
-int set_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
+static int set_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
{
if (!pbpctl_dev)
return -1;
@@ -5098,17 +4779,7 @@ int set_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
return BP_NOT_CAP;
}
-int get_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
-{
- if (!pbpctl_dev)
- return -1;
- if (pbpctl_dev->bp_caps & TPL_CAP)
- return pbpctl_dev->bp_tpl_flag;
-
- return BP_NOT_CAP;
-}
-
-int set_tpl_fn(struct bpctl_dev *pbpctl_dev, int tpl_mode)
+static int set_tpl_fn(struct bpctl_dev *pbpctl_dev, int tpl_mode)
{
struct bpctl_dev *pbpctl_dev_b = NULL;
@@ -5138,7 +4809,7 @@ int set_tpl_fn(struct bpctl_dev *pbpctl_dev, int tpl_mode)
return BP_NOT_CAP;
}
-int get_tpl_fn(struct bpctl_dev *pbpctl_dev)
+static int get_tpl_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (!pbpctl_dev)
@@ -5152,7 +4823,7 @@ int get_tpl_fn(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int set_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
+static int set_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -5172,7 +4843,7 @@ int set_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -5185,7 +4856,7 @@ int get_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev)
return ret;
}
-int set_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
+static int set_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -5205,7 +4876,7 @@ int set_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev)
+static int get_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -5220,7 +4891,7 @@ int get_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev)
}
-int get_bypass_info_fn(struct bpctl_dev *pbpctl_dev, char *dev_name,
+static int get_bypass_info_fn(struct bpctl_dev *pbpctl_dev, char *dev_name,
char *add_param)
{
if (!pbpctl_dev)
@@ -5232,7 +4903,7 @@ int get_bypass_info_fn(struct bpctl_dev *pbpctl_dev, char *dev_name,
return 0;
}
-int get_dev_idx_bsf(int bus, int slot, int func)
+static int get_dev_idx_bsf(int bus, int slot, int func)
{
int idx_dev = 0;
for (idx_dev = 0;
@@ -7022,12 +6693,6 @@ int set_wd_exp_mode_sd(int ifindex, int param)
}
EXPORT_SYMBOL(set_wd_exp_mode_sd);
-int reset_cont_sd(int ifindex)
-{
- return reset_cont_fn(get_dev_idx_p(ifindex));
-
-}
-
int set_tx_sd(int ifindex, int tx_state)
{
return set_tx_fn(get_dev_idx_p(ifindex), tx_state);
@@ -7118,7 +6783,7 @@ EXPORT_SYMBOL(bp_if_scan_sd);
static struct proc_dir_entry *bp_procfs_dir;
-int bp_proc_create(void)
+static int bp_proc_create(void)
{
bp_procfs_dir = proc_mkdir(BP_PROC_DIR, init_net.proc_net);
if (bp_procfs_dir == (struct proc_dir_entry *)0) {
@@ -7746,7 +7411,7 @@ static int show_wd_autoreset(struct seq_file *m, void *v)
}
RW_FOPS(wd_autoreset)
-int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block)
+static int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block)
{
struct bypass_pfs_sd *current_pfs = &(pbp_device_block->bypass_pfs_set);
static struct proc_dir_entry *procfs_dir;
@@ -7816,7 +7481,7 @@ int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block)
return ret;
}
-int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block)
+static int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block)
{
struct bypass_pfs_sd *current_pfs = &pbp_device_block->bypass_pfs_set;
diff --git a/drivers/staging/silicom/bypasslib/bp_ioctl.h b/drivers/staging/silicom/bypasslib/bp_ioctl.h
index 2d1ef5384436..bf47f786866b 100644
--- a/drivers/staging/silicom/bypasslib/bp_ioctl.h
+++ b/drivers/staging/silicom/bypasslib/bp_ioctl.h
@@ -51,9 +51,9 @@
#define WDT_STEP_TIME 0x10 /* BIT_4 */
#define WD_MIN_TIME_GET(desc) (desc & 0xf)
-#define WD_STEP_COUNT_GET(desc) (desc>>5) & 0xf
+#define WD_STEP_COUNT_GET(desc) ((desc>>5) & 0xf)
-typedef enum {
+enum {
IS_BYPASS = 1,
GET_BYPASS_SLAVE,
GET_BYPASS_CAPS,
@@ -103,7 +103,7 @@ typedef enum {
SET_BP_HW_RESET,
} CMND_TYPE;
-typedef enum {
+enum {
IF_SCAN_SD,
GET_DEV_NUM_SD,
IS_BYPASS_SD,
@@ -156,7 +156,7 @@ typedef enum {
} CMND_TYPE_SD;
-#define SIOCGIFBYPASS SIOCDEVPRIVATE+10
+#define SIOCGIFBYPASS (SIOCDEVPRIVATE+10)
struct bp_info {
char prod_name[14];
diff --git a/drivers/staging/silicom/bypasslib/libbp_sd.h b/drivers/staging/silicom/bypasslib/libbp_sd.h
index 3b4f8364ed18..cac4b0b2ed78 100644
--- a/drivers/staging/silicom/bypasslib/libbp_sd.h
+++ b/drivers/staging/silicom/bypasslib/libbp_sd.h
@@ -18,7 +18,7 @@
* @if_index: network device index
*
* Output:
- * 1 - if device is bypass controlling device,
+ * 1 - if device is bypass controlling device,
* 0 - if device is bypass slave device
* -1 - device not support Bypass
**/
@@ -30,7 +30,7 @@ int is_bypass_sd(int if_index);
*
* Output:
* network device index of the slave device
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int get_bypass_slave_sd(int if_index);
@@ -39,55 +39,72 @@ int get_bypass_slave_sd(int if_index);
* @if_index: network device index
*
* Output:
- * flags word on success;flag word is a 32-bit mask word with each bit defines different
- * capability as described bellow.
+ * flags word on success;flag word is a 32-bit mask word with each bit defines
+ * different capability as described bellow.
* Value of 1 for supporting this feature. 0 for not supporting this feature.
- * -1 - on failure (if the device is not capable of the operation or not a Bypass device)
- * Bit feature description
- *
- * 0 BP_CAP The interface is Bypass capable in general
- *
- * 1 BP_STATUS_CAP The interface can report of the current Bypass mode
- *
- * 2 BP_STATUS_CHANGE_CAP The interface can report on a change to bypass mode from
- * the last time the mode was defined
- *
- * 3 SW_CTL_CAP The interface is Software controlled capable for bypass/non bypass modes.
- *
- * 4 BP_DIS_CAP The interface is capable of disabling the Bypass mode at all times.
- * This mode will retain its mode even during power loss and also after
- * power recovery. This will overcome on any bypass operation due to
- * watchdog timeout or set bypass command.
- *
- * 5 BP_DIS_STATUS_CAP The interface can report of the current DIS_BP_CAP
- *
- * 6 STD_NIC_CAP The interface is capable to be configured to operate as standard, non Bypass,
- * NIC interface (have direct connection to interfaces at all power modes)
- *
- * 7 BP_PWOFF_NO_CAP The interface can be in Bypass mode at power off state
- *
- * 8 BP_PWOFF_OFF_CAP The interface can disconnect the Bypass mode at power off state without
- * effecting all the other states of operation
- *
- * 9 BP_PWOFF_CTL_CAP The behavior of the Bypass mode at Power-off state can be controlled by
- * software without effecting any other state
- *
- *10 BP_PWUP_ON_CAP The interface can be in Bypass mode when power is turned on
- * (until the system take control of the bypass functionality)
- *
- *11 BP_PWUP_OFF_CAP The interface can disconnect from Bypass mode when power is turned on
- * (until the system take control of the bypass functionality)
- *
- *12 BP_PWUP_CTL_CAP The behavior of the Bypass mode at Power-up can be controlled by software
- *
- *13 WD_CTL_CAP The interface has watchdog capabilities to turn to Bypass mode when not reset
- * for defined period of time.
- *
- *14 WD_STATUS_CAP The interface can report on the watchdog status (Active/inactive)
- *
- *15 WD_TIMEOUT_CAP The interface can report the time left till watchdog triggers to Bypass mode.
- *
- *16-31 RESERVED
+ * -1 - on failure (if the device is not capable of the operation or not a
+ * Bypass device)
+ * Bit feature description
+ *
+ * 0 BP_CAP The interface is Bypass capable in general
+ *
+ * 1 BP_STATUS_CAP The interface can report of the current Bypass
+ * mode
+ *
+ * 2 BP_STATUS_CHANGE_CAP The interface can report on a change to bypass
+ * mode from the last time the mode was defined
+ *
+ * 3 SW_CTL_CAP The interface is Software controlled capable for
+ * bypass/non bypass modes.
+ *
+ * 4 BP_DIS_CAP The interface is capable of disabling the Bypass
+ * mode at all times. This mode will retain its
+ * mode even during power loss and also after power
+ * recovery. This will overcome on any bypass
+ * operation due to watchdog timeout or set bypass
+ * command.
+ *
+ * 5 BP_DIS_STATUS_CAP The interface can report of the current
+ * DIS_BP_CAP
+ *
+ * 6 STD_NIC_CAP The interface is capable to be configured to
+ * operate as standard, non Bypass, NIC interface
+ * (have direct connection to interfaces at all
+ * power modes)
+ *
+ * 7 BP_PWOFF_NO_CAP The interface can be in Bypass mode at power off
+ * state
+ *
+ * 8 BP_PWOFF_OFF_CAP The interface can disconnect the Bypass mode at
+ * power off state without effecting all the other
+ * states of operation
+ *
+ * 9 BP_PWOFF_CTL_CAP The behavior of the Bypass mode at Power-off
+ * state can be controlled by software without
+ * effecting any other state
+ *
+ *10 BP_PWUP_ON_CAP The interface can be in Bypass mode when power
+ * is turned on (until the system take control of
+ * the bypass functionality)
+ *
+ *11 BP_PWUP_OFF_CAP The interface can disconnect from Bypass mode
+ * when power is turned on (until the system take
+ * control of the bypass functionality)
+ *
+ *12 BP_PWUP_CTL_CAP The behavior of the Bypass mode at Power-up can
+ * be controlled by software
+ *
+ *13 WD_CTL_CAP The interface has watchdog capabilities to turn
+ * to Bypass mode when not reset for defined period
+ * of time.
+ *
+ *14 WD_STATUS_CAP The interface can report on the watchdog status
+ * (Active/inactive)
+ *
+ *15 WD_TIMEOUT_CAP The interface can report the time left till
+ * watchdog triggers to Bypass mode.
+ *
+ *16-31 RESERVED
*
* **/
int get_bypass_caps_sd(int if_index);
@@ -97,34 +114,35 @@ int get_bypass_caps_sd(int if_index);
* @if_index: network device index
*
* Output:
- *
- * Set of numbers defining the various parameters of the watchdog capable
+ *
+ * Set of numbers defining the various parameters of the watchdog capable
* to be set to as described bellow.
* -1 - on failure (device not support Bypass or it's a slave device)
- *
+ *
* Bit feature description
- *
+ *
* 0-3 WD_MIN_TIME The interface WD minimal time period in 100mS units
- *
- * 4 WD_STEP_TIME The steps of the WD timer in
+ *
+ * 4 WD_STEP_TIME The steps of the WD timer in
* 0 - for linear steps (WD_MIN_TIME * X)
- * 1 - for multiply by 2 from previous step (WD_MIN_TIME * 2^X)
- *
- * 5-8 WD_STEP_COUNT Number of steps the WD timer supports in 2^X
+ * 1 - for multiply by 2 from previous step
+ * (WD_MIN_TIME * 2^X)
+ *
+ * 5-8 WD_STEP_COUNT Number of steps the WD timer supports in 2^X
* (X bit available for defining the value)
- *
- *
- *
+ *
+ *
+ *
**/
int get_wd_set_caps_sd(int if_index);
/**
* set_bypass - set Bypass state
* @if_index: network device index of the controlling device
- * @bypass_mode: bypass mode (1=on, 0=off)
+ * @bypass_mode: bypass mode (1=on, 0=off)
* Output:
* 0 - on success
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int set_bypass_sd(int if_index, int bypass_mode);
@@ -133,7 +151,7 @@ int set_bypass_sd(int if_index, int bypass_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int get_bypass_sd(int if_index);
@@ -142,7 +160,7 @@ int get_bypass_sd(int if_index);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int get_bypass_change_sd(int if_index);
@@ -152,8 +170,8 @@ int get_bypass_change_sd(int if_index);
* @dis_bypass: disable bypass(1=dis, 0=en)
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int set_dis_bypass_sd(int if_index, int dis_bypass);
@@ -162,8 +180,8 @@ int set_dis_bypass_sd(int if_index, int dis_bypass);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (normal Bypass mode/ Disable bypass)
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int get_dis_bypass_sd(int if_index);
@@ -172,9 +190,9 @@ int get_dis_bypass_sd(int if_index);
* @if_index: network device index of the controlling device
* @bypass_mode: bypass mode setting at power off state (1=BP en, 0=BP Dis)
* Output:
- * 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * 0 - on success
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int set_bypass_pwoff_sd(int if_index, int bypass_mode);
@@ -183,8 +201,8 @@ int set_bypass_pwoff_sd(int if_index, int bypass_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (Disable bypass at power off state / normal Bypass mode)
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int get_bypass_pwoff_sd(int if_index);
@@ -193,9 +211,9 @@ int get_bypass_pwoff_sd(int if_index);
* @if_index: network device index of the controlling device
* @bypass_mode: bypass mode setting at power up state (1=BP en, 0=BP Dis)
* Output:
- * 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * 0 - on success
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int set_bypass_pwup_sd(int if_index, int bypass_mode);
@@ -204,59 +222,60 @@ int set_bypass_pwup_sd(int if_index, int bypass_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (Disable bypass at power up state / normal Bypass mode)
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int get_bypass_pwup_sd(int if_index);
/**
* set_bypass_wd - Set watchdog state
* @if_index: network device index of the controlling device
- * @ms_timeout: requested timeout (in ms units), 0 for disabling the watchdog timer
- * @ms_timeout_set(output): requested timeout (in ms units),
- * that the adapter supports and will be used by the watchdog
+ * @ms_timeout: requested timeout (in ms units), 0 for disabling the watchdog
+ * timer
+ * @ms_timeout_set(output): requested timeout (in ms units), that the adapter
+ * supports and will be used by the watchdog
* Output:
- * 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * 0 - on success
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int set_bypass_wd_sd(int if_index, int ms_timeout, int *ms_timeout_set);
/**
* get_bypass_wd - Get watchdog state
* @if_index: network device index of the controlling device
- * @ms_timeout (output): WDT timeout (in ms units),
+ * @ms_timeout (output): WDT timeout (in ms units),
* -1 for unknown wdt status
* 0 if WDT is disabled
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int get_bypass_wd_sd(int if_index, int *ms_timeout_set);
/**
* get_wd_expire_time - Get watchdog expire
* @if_index: network device index of the controlling device
- * @ms_time_left (output): time left till watchdog time expire,
+ * @ms_time_left (output): time left till watchdog time expire,
* -1 if WDT has expired
* 0 if WDT is disabled
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device or unknown wdt status)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device or unknown wdt status)
**/
int get_wd_expire_time_sd(int if_index, int *ms_time_left);
/**
* reset_bypass_wd_timer - Reset watchdog timer
* @if_index: network device index of the controlling device
- *
+ *
* Output:
* 1 - on success
* 0 - watchdog is not configured
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device or unknown wdt status)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device or unknown wdt status)
**/
int reset_bypass_wd_timer_sd(int if_index);
@@ -264,53 +283,54 @@ int reset_bypass_wd_timer_sd(int if_index);
* set_std_nic - Standard NIC mode of operation
* @if_index: network device index of the controlling device
* @nic_mode: 0/1 (Default Bypass mode / Standard NIC mode)
- *
+ *
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int set_std_nic_sd(int if_index, int nic_mode);
/**
* get_std_nic - Get Standard NIC mode setting
* @if_index: network device index of the controlling device
- *
+ *
* Output:
* 0/1 (Default Bypass mode / Standard NIC mode) on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass or it's a slave device)
**/
int get_std_nic_sd(int if_index);
/**
- * set_tx - set transmitter enable/disable
+ * set_tx - set transmitter enable/disable
* @if_index: network device index of the controlling device
* @tx_state: 0/1 (Transmit Disable / Transmit Enable)
- *
+ *
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation )
+ * -1 - on failure (device is not capable of the operation )
**/
int set_tx_sd(int if_index, int tx_state);
/**
* get_std_nic - get transmitter state (disable / enable)
* @if_index: network device index of the controlling device
- *
+ *
* Output:
* 0/1 (ransmit Disable / Transmit Enable) on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * Bypass)
**/
int get_tx_sd(int if_index);
/**
* set_tap - set TAP state
* @if_index: network device index of the controlling device
- * @tap_mode: 1 tap mode , 0 normal nic mode
+ * @tap_mode: 1 tap mode , 0 normal nic mode
* Output:
* 0 - on success
- * -1 - on failure (device not support TAP or it's a slave device)
+ * -1 - on failure (device not support TAP or it's a slave device)
**/
int set_tap_sd(int if_index, int tap_mode);
@@ -319,7 +339,7 @@ int set_tap_sd(int if_index, int tap_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support TAP or it's a slave device)
+ * -1 - on failure (device not support TAP or it's a slave device)
**/
int get_tap_sd(int if_index);
@@ -328,7 +348,7 @@ int get_tap_sd(int if_index);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support TAP or it's a slave device)
+ * -1 - on failure (device not support TAP or it's a slave device)
**/
int get_tap_change_sd(int if_index);
@@ -338,8 +358,8 @@ int get_tap_change_sd(int if_index);
* @dis_tap: disable tap(1=dis, 0=en)
* Output:
* 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support TAP
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * TAP or it's a slave device)
**/
int set_dis_tap_sd(int if_index, int dis_tap);
@@ -348,8 +368,8 @@ int set_dis_tap_sd(int if_index, int dis_tap);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (normal TAP mode/ Disable TAP)
- * -1 - on failure (device is not capable of the operation ordevice not support TAP
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not support
+ * TAP or it's a slave device)
**/
int get_dis_tap_sd(int if_index);
@@ -358,9 +378,9 @@ int get_dis_tap_sd(int if_index);
* @if_index: network device index of the controlling device
* @bypass_mode: tap mode setting at power up state (1=TAP en, 0=TAP Dis)
* Output:
- * 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support TAP
- * or it's a slave device)
+ * 0 - on success
+ * -1 - on failure (device is not capable of the operation or device not
+ * support TAP or it's a slave device)
**/
int set_tap_pwup_sd(int if_index, int tap_mode);
@@ -369,18 +389,18 @@ int set_tap_pwup_sd(int if_index, int tap_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (Disable TAP at power up state / normal TAP mode)
- * -1 - on failure (device is not capable of the operation ordevice not support TAP
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not
+ * support TAP or it's a slave device)
**/
int get_tap_pwup_sd(int if_index);
/**
* set_bp_disc - set Disconnect state
* @if_index: network device index of the controlling device
- * @tap_mode: 1 disc mode , 0 non-disc mode
+ * @tap_mode: 1 disc mode , 0 non-disc mode
* Output:
* 0 - on success
- * -1 - on failure (device not support Disconnect or it's a slave device)
+ * -1 - on failure (device not support Disconnect or it's a slave device)
**/
int set_bp_disc_sd(int if_index, int disc_mode);
@@ -389,7 +409,7 @@ int set_bp_disc_sd(int if_index, int disc_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support Disconnect or it's a slave device)
+ * -1 - on failure (device not support Disconnect or it's a slave device)
**/
int get_bp_disc_sd(int if_index);
@@ -398,7 +418,7 @@ int get_bp_disc_sd(int if_index);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support Disconnect or it's a slave device)
+ * -1 - on failure (device not support Disconnect or it's a slave device)
**/
int get_bp_disc_change_sd(int if_index);
@@ -408,8 +428,8 @@ int get_bp_disc_change_sd(int if_index);
* @dis_tap: disable tap(1=dis, 0=en)
* Output:
* 0 - on success
- * -1 - on failure (device is not capable ofthe operation ordevice not support Disconnect
- * or it's a slave device)
+ * -1 - on failure (device is not capable ofthe operation or device not
+ * support Disconnect or it's a slave device)
**/
int set_bp_dis_disc_sd(int if_index, int dis_disc);
@@ -418,8 +438,8 @@ int set_bp_dis_disc_sd(int if_index, int dis_disc);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - on success (normal Disconnect mode/ Disable Disconnect)
- * -1 - on failure (device is not capable of the operation ordevice not support Disconnect
- * or it's a slave device)
+ * -1 - on failure (device is not capable of the operation or device not
+ * support Disconnect or it's a slave device)
**/
int get_bp_dis_disc_sd(int if_index);
@@ -428,9 +448,9 @@ int get_bp_dis_disc_sd(int if_index);
* @if_index: network device index of the controlling device
* @disc_mode: tap mode setting at power up state (1=Disc en, 0=Disc Dis)
* Output:
- * 0 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Disconnect
- * or it's a slave device)
+ * 0 - on success
+ * -1 - on failure (device is not capable of the operation or device not
+ * support Disconnect or it's a slave device)
**/
int set_bp_disc_pwup_sd(int if_index, int disc_mode);
@@ -438,19 +458,20 @@ int set_bp_disc_pwup_sd(int if_index, int disc_mode);
* get_bp_disc_pwup - Get Disconnect mode state at power-up state
* @if_index: network device index of the controlling device
* Output:
- * 0/1 - on success (Disable Disconnect at power up state / normal Disconnect mode)
- * -1 - on failure (device is not capable of the operation ordevice not support TAP
- * or it's a slave device)
+ * 0/1 - on success (Disable Disconnect at power up state / normal Disconnect
+ * mode)
+ * -1 - on failure (device is not capable of the operation or device not
+ * support TAP or it's a slave device)
**/
int get_bp_disc_pwup_sd(int if_index);
/**
* set_wd_exp_mode - Set adapter state when WDT expired.
* @if_index: network device index of the controlling device
- * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
+ * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
* Output:
* 0 - on success
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int set_wd_exp_mode_sd(int if_index, int bypass_mode);
@@ -459,39 +480,41 @@ int set_wd_exp_mode_sd(int if_index, int bypass_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (bypass/tap) on success
- * -1 - on failure (device not support Bypass or it's a slave device)
+ * -1 - on failure (device not support Bypass or it's a slave device)
**/
int get_wd_exp_mode_sd(int if_index);
/**
* set_wd_autoreset - reset WDT periodically.
* @if_index: network device index of the controlling device
- * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
+ * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
* Output:
* 1 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device or unknown wdt status)
+ * -1 - on failure (device is not capable of the operation or device not
+ * support Bypass or it's a slave device or unknown wdt
+ * status)
**/
int set_wd_autoreset_sd(int if_index, int time);
/**
* set_wd_autoreset - reset WDT periodically.
* @if_index: network device index of the controlling device
- * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
+ * @bypass_mode: adapter mode (1=tap mode, 0=bypass mode)
* Output:
* 1 - on success
- * -1 - on failure (device is not capable of the operation ordevice not support Bypass
- * or it's a slave device or unknown wdt status)
+ * -1 - on failure (device is not capable of the operation or device not
+ * support Bypass or it's a slave device or unknown wdt
+ * status)
**/
int get_wd_autoreset_sd(int if_index);
/**
* set_tpl - set TPL state
* @if_index: network device index of the controlling device
- * @tpl_mode: 1 tpl mode , 0 normal nic mode
+ * @tpl_mode: 1 tpl mode , 0 normal nic mode
* Output:
* 0 - on success
- * -1 - on failure (device not support TPL)
+ * -1 - on failure (device not support TPL)
**/
int set_tpl_sd(int if_index, int tpl_mode);
@@ -500,7 +523,7 @@ int set_tpl_sd(int if_index, int tpl_mode);
* @if_index: network device index of the controlling device
* Output:
* 0/1 - (off/on) on success
- * -1 - on failure (device not support TPL or it's a slave device)
+ * -1 - on failure (device not support TPL or it's a slave device)
**/
int get_tpl_sd(int if_index);
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README
index 53052c4e78ae..4fa50e73ce86 100644
--- a/drivers/staging/slicoss/README
+++ b/drivers/staging/slicoss/README
@@ -5,43 +5,3 @@ This driver is supposed to support:
Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
The driver was actually tested on Oasis and Kalahari cards.
-
-TODO:
- - move firmware loading to request_firmware()
- - remove direct memory access of structures
- - any remaining sparse and checkpatch.pl warnings
-
- - use net_device_ops
- - use dev->stats rather than adapter->stats
- - don't cast netdev_priv it is already void
- - GET RID OF MACROS
- - work on all architectures
- - without CONFIG_X86_64 confusion
- - do 64 bit correctly
- - don't depend on order of union
- - get rid of ASSERT(), use BUG() instead but only where necessary
- looks like most aren't really useful
- - no new SIOCDEVPRIVATE ioctl allowed
- - don't use module_param for configuring interrupt mitigation
- use ethtool instead
- - reorder code to elminate use of forward declarations
- - don't keep private linked list of drivers.
- - remove all the gratiutous debug infrastructure
- - use PCI_DEVICE()
- - do ethtool correctly using ethtool_ops
- - NAPI?
- - wasted overhead of extra stats
- - state variables for things that are
- easily available and shouldn't be kept in card structure, cardnum, ...
- slotnumber, events, ...
- - get rid of slic_spinlock wrapper
- - volatile == bad design => bad code
- - locking too fine grained, not designed just throw more locks
- at problem
-
-
-Please send patches to:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer
-<charrer@alacritech.com> as well as they are also able to test out any
-changes.
diff --git a/drivers/staging/slicoss/TODO b/drivers/staging/slicoss/TODO
new file mode 100644
index 000000000000..62ff1008b1ee
--- /dev/null
+++ b/drivers/staging/slicoss/TODO
@@ -0,0 +1,38 @@
+TODO:
+ - move firmware loading to request_firmware()
+ - remove direct memory access of structures
+ - any remaining sparse and checkpatch.pl warnings
+
+ - use net_device_ops
+ - use dev->stats rather than adapter->stats
+ - don't cast netdev_priv it is already void
+ - GET RID OF MACROS
+ - work on all architectures
+ - without CONFIG_X86_64 confusion
+ - do 64 bit correctly
+ - don't depend on order of union
+ - get rid of ASSERT(), use BUG() instead but only where necessary
+ looks like most aren't really useful
+ - no new SIOCDEVPRIVATE ioctl allowed
+ - don't use module_param for configuring interrupt mitigation
+ use ethtool instead
+ - reorder code to elminate use of forward declarations
+ - don't keep private linked list of drivers.
+ - remove all the gratiutous debug infrastructure
+ - use PCI_DEVICE()
+ - do ethtool correctly using ethtool_ops
+ - NAPI?
+ - wasted overhead of extra stats
+ - state variables for things that are
+ easily available and shouldn't be kept in card structure, cardnum, ...
+ slotnumber, events, ...
+ - get rid of slic_spinlock wrapper
+ - volatile == bad design => bad code
+ - locking too fine grained, not designed just throw more locks
+ at problem
+
+Please send patches to:
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer
+<charrer@alacritech.com> as well as they are also able to test out any
+changes.
diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h
index 4c7822bd5358..702902cdb461 100644
--- a/drivers/staging/slicoss/slic.h
+++ b/drivers/staging/slicoss/slic.h
@@ -464,9 +464,12 @@ struct adapter {
/*
* SLIC Handles
*/
- struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS+1]; /* Object handles*/
- struct slic_handle *pfree_slic_handles; /* Free object handles*/
- struct slic_spinlock handle_lock; /* Object handle list lock*/
+ /* Object handles*/
+ struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS+1];
+ /* Free object handles*/
+ struct slic_handle *pfree_slic_handles;
+ /* Object handle list lock*/
+ struct slic_spinlock handle_lock;
ushort slic_handle_ix;
u32 xmitq_full;
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 1426ca49bfe8..e27b88f02ccd 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -100,11 +100,11 @@
#include "slic.h"
static uint slic_first_init = 1;
-static char *slic_banner = "Alacritech SLIC Technology(tm) Server "\
+static char *slic_banner = "Alacritech SLIC Technology(tm) Server "
"and Storage Accelerator (Non-Accelerated)";
static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00";
-static char *slic_product_name = "SLIC Technology(tm) Server "\
+static char *slic_product_name = "SLIC Technology(tm) Server "
"and Storage Accelerator (Non-Accelerated)";
static char *slic_vendor = "Alacritech, Inc.";
@@ -144,29 +144,6 @@ static const struct pci_device_id slic_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
-#define SLIC_GET_SLIC_HANDLE(_adapter, _pslic_handle) \
-{ \
- spin_lock_irqsave(&_adapter->handle_lock.lock, \
- _adapter->handle_lock.flags); \
- _pslic_handle = _adapter->pfree_slic_handles; \
- if (_pslic_handle) { \
- _adapter->pfree_slic_handles = _pslic_handle->next; \
- } \
- spin_unlock_irqrestore(&_adapter->handle_lock.lock, \
- _adapter->handle_lock.flags); \
-}
-
-#define SLIC_FREE_SLIC_HANDLE(_adapter, _pslic_handle) \
-{ \
- _pslic_handle->type = SLIC_HANDLE_FREE; \
- spin_lock_irqsave(&_adapter->handle_lock.lock, \
- _adapter->handle_lock.flags); \
- _pslic_handle->next = _adapter->pfree_slic_handles; \
- _adapter->pfree_slic_handles = _pslic_handle; \
- spin_unlock_irqrestore(&_adapter->handle_lock.lock, \
- _adapter->handle_lock.flags); \
-}
-
static inline void slic_reg32_write(void __iomem *reg, u32 value, bool flush)
{
writel(value, reg);
@@ -1442,7 +1419,13 @@ static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page)
while ((cmdcnt < SLIC_CMDQ_CMDSINPAGE) &&
(adapter->slic_handle_ix < 256)) {
/* Allocate and initialize a SLIC_HANDLE for this command */
- SLIC_GET_SLIC_HANDLE(adapter, pslic_handle);
+ spin_lock_irqsave(&adapter->handle_lock.lock,
+ adapter->handle_lock.flags);
+ pslic_handle = adapter->pfree_slic_handles;
+ if (pslic_handle)
+ adapter->pfree_slic_handles = pslic_handle->next;
+ spin_unlock_irqrestore(&adapter->handle_lock.lock,
+ adapter->handle_lock.flags);
pslic_handle->type = SLIC_HANDLE_CMD;
pslic_handle->address = (void *) cmd;
pslic_handle->offset = (ushort) adapter->slic_handle_ix++;
@@ -1830,7 +1813,7 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
#endif
seq_printf(seq, "driver_version : %s\n", slic_proc_version);
- seq_puts(seq, "Microcode versions: \n");
+ seq_puts(seq, "Microcode versions:\n");
seq_printf(seq, " Gigabit (gb) : %s %s\n",
MOJAVE_UCODE_VERS_STRING, MOJAVE_UCODE_VERS_DATE);
seq_printf(seq, " Gigabit Receiver : %s %s\n",
@@ -1917,16 +1900,14 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
if (config->OEMFruFormat == VENDOR4_FRU_FORMAT) {
seq_printf(seq,
- "Serial # : "
- "%c%c%c%c%c%c%c%c%c%c%c%c\n",
+ "Serial # : %c%c%c%c%c%c%c%c%c%c%c%c\n",
fru[8], fru[9], fru[10],
fru[11], fru[12], fru[13],
fru[16], fru[17], fru[18],
fru[19], fru[20], fru[21]);
} else {
seq_printf(seq,
- "Serial # : "
- "%c%c%c%c%c%c%c%c%c%c%c%c%c%c\n",
+ "Serial # : %c%c%c%c%c%c%c%c%c%c%c%c%c%c\n",
fru[8], fru[9], fru[10],
fru[11], fru[12], fru[13],
fru[14], fru[15], fru[16],
@@ -1974,8 +1955,7 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
{
seq_puts(seq, "FRU Information:\n");
seq_printf(seq,
- " Part # : "
- "%c%c%c%c%c%c%c%c\n",
+ " Part # : %c%c%c%c%c%c%c%c\n",
oemfru[0], oemfru[1], oemfru[2],
oemfru[3], oemfru[4], oemfru[5],
oemfru[6], oemfru[7]);
@@ -2002,20 +1982,17 @@ static int slic_debug_card_show(struct seq_file *seq, void *v)
{
seq_puts(seq, "FRU Information:\n");
seq_printf(seq,
- " FRU Number : "
- "%c%c%c%c%c%c%c%c\n",
+ " FRU Number : %c%c%c%c%c%c%c%c\n",
oemfru[0], oemfru[1], oemfru[2],
oemfru[3], oemfru[4], oemfru[5],
oemfru[6], oemfru[7]);
seq_sprintf(seq,
- " Part Number : "
- "%c%c%c%c%c%c%c%c\n",
+ " Part Number : %c%c%c%c%c%c%c%c\n",
oemfru[8], oemfru[9], oemfru[10],
oemfru[11], oemfru[12], oemfru[13],
oemfru[14], oemfru[15]);
seq_printf(seq,
- " EC Level : "
- "%c%c%c%c%c%c%c%c\n",
+ " EC Level : %c%c%c%c%c%c%c%c\n",
oemfru[16], oemfru[17], oemfru[18],
oemfru[19], oemfru[20], oemfru[21],
oemfru[22], oemfru[23]);
@@ -2412,8 +2389,7 @@ static void slic_xmit_fail(struct adapter *adapter,
switch (status) {
case XMIT_FAIL_LINK_STATE:
dev_err(&adapter->netdev->dev,
- "reject xmit skb[%p: %x] linkstate[%s] "
- "adapter[%s:%d] card[%s:%d]\n",
+ "reject xmit skb[%p: %x] linkstate[%s] adapter[%s:%d] card[%s:%d]\n",
skb, skb->pkt_type,
SLIC_LINKSTATE(adapter->linkstate),
SLIC_ADAPTER_STATE(adapter->state),
@@ -2428,8 +2404,7 @@ static void slic_xmit_fail(struct adapter *adapter,
break;
case XMIT_FAIL_HOSTCMD_FAIL:
dev_err(&adapter->netdev->dev,
- "xmit_start skb[%p] type[%x] No host commands "
- "available\n", skb, skb->pkt_type);
+ "xmit_start skb[%p] type[%x] No host commands available\n", skb, skb->pkt_type);
break;
}
}
@@ -2642,8 +2617,7 @@ static void slic_interrupt_card_up(u32 isr, struct adapter *adapter,
}
} else if (isr & ISR_XDROP) {
dev_err(&dev->dev,
- "isr & ISR_ERR [%x] "
- "ISR_XDROP \n", isr);
+ "isr & ISR_ERR [%x] ISR_XDROP\n", isr);
} else {
dev_err(&dev->dev,
"isr & ISR_ERR [%x]\n",
@@ -2970,7 +2944,7 @@ static void slic_card_cleanup(struct sliccard *card)
{
if (card->loadtimerset) {
card->loadtimerset = 0;
- del_timer(&card->loadtimer);
+ del_timer_sync(&card->loadtimer);
}
slic_debug_card_destroy(card);
@@ -3269,8 +3243,7 @@ static int slic_card_init(struct sliccard *card, struct adapter *adapter)
if (!peeprom) {
dev_err(&adapter->pcidev->dev,
- "eeprom read failed to get memory "
- "bus %d slot %d\n", adapter->busnumber,
+ "eeprom read failed to get memory bus %d slot %d\n", adapter->busnumber,
adapter->slotnumber);
return -ENOMEM;
} else {
@@ -3703,7 +3676,7 @@ static int slic_entry_probe(struct pci_dev *pcidev,
err = slic_card_locate(adapter);
if (err) {
dev_err(&pcidev->dev, "cannot locate card\n");
- goto err_out_free_mmio_region;
+ goto err_out_unmap;
}
card = adapter->card;
@@ -3743,8 +3716,6 @@ static int slic_entry_probe(struct pci_dev *pcidev,
err_out_unmap:
iounmap(memmapped_ioaddr);
-err_out_free_mmio_region:
- release_mem_region(mmio_start, mmio_len);
err_out_free_netdev:
free_netdev(netdev);
err_out_exit_slic_probe:
diff --git a/drivers/staging/sm7xxfb/Kconfig b/drivers/staging/sm7xxfb/Kconfig
deleted file mode 100644
index e2922ae3a3ee..000000000000
--- a/drivers/staging/sm7xxfb/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-config FB_SM7XX
- tristate "Silicon Motion SM7XX framebuffer support"
- depends on FB && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- Frame buffer driver for the Silicon Motion SM710, SM712, SM721
- and SM722 chips.
-
- This driver is also available as a module. The module will be
- called sm7xxfb. If you want to compile it as a module, say M
- here and read <file:Documentation/kbuild/modules.txt>.
diff --git a/drivers/staging/sm7xxfb/Makefile b/drivers/staging/sm7xxfb/Makefile
deleted file mode 100644
index 48f471cf9f36..000000000000
--- a/drivers/staging/sm7xxfb/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_FB_SM7XX) += sm7xxfb.o
diff --git a/drivers/staging/sm7xxfb/TODO b/drivers/staging/sm7xxfb/TODO
deleted file mode 100644
index 1fcead591c16..000000000000
--- a/drivers/staging/sm7xxfb/TODO
+++ /dev/null
@@ -1,9 +0,0 @@
-TODO:
-- Dual head support
-- 2D acceleration support
-- use kernel coding style
-- refine the code and remove unused code
-- move it to drivers/video/sm7xxfb.c
-
-Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and
-Teddy Wang <teddy.wang@siliconmotion.com.cn>.
diff --git a/drivers/staging/sm7xxfb/sm7xx.h b/drivers/staging/sm7xxfb/sm7xx.h
deleted file mode 100644
index 85998615b801..000000000000
--- a/drivers/staging/sm7xxfb/sm7xx.h
+++ /dev/null
@@ -1,779 +0,0 @@
-/*
- * Silicon Motion SM712 frame buffer device
- *
- * Copyright (C) 2006 Silicon Motion Technology Corp.
- * Authors: Ge Wang, gewang@siliconmotion.com
- * Boyod boyod.yang@siliconmotion.com.cn
- *
- * Copyright (C) 2009 Lemote, Inc.
- * Author: Wu Zhangjin, wuzhangjin@gmail.com
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#define NR_PALETTE 256
-
-#define FB_ACCEL_SMI_LYNX 88
-
-#define SCREEN_X_RES 1024
-#define SCREEN_Y_RES 600
-#define SCREEN_BPP 16
-
-/*Assume SM712 graphics chip has 4MB VRAM */
-#define SM712_VIDEOMEMORYSIZE 0x00400000
-/*Assume SM722 graphics chip has 8MB VRAM */
-#define SM722_VIDEOMEMORYSIZE 0x00800000
-
-#define dac_reg (0x3c8)
-#define dac_val (0x3c9)
-
-extern void __iomem *smtc_RegBaseAddress;
-#define smtc_mmiowb(dat, reg) writeb(dat, smtc_RegBaseAddress + reg)
-#define smtc_mmioww(dat, reg) writew(dat, smtc_RegBaseAddress + reg)
-#define smtc_mmiowl(dat, reg) writel(dat, smtc_RegBaseAddress + reg)
-
-#define smtc_mmiorb(reg) readb(smtc_RegBaseAddress + reg)
-#define smtc_mmiorw(reg) readw(smtc_RegBaseAddress + reg)
-#define smtc_mmiorl(reg) readl(smtc_RegBaseAddress + reg)
-
-#define SIZE_SR00_SR04 (0x04 - 0x00 + 1)
-#define SIZE_SR10_SR24 (0x24 - 0x10 + 1)
-#define SIZE_SR30_SR75 (0x75 - 0x30 + 1)
-#define SIZE_SR80_SR93 (0x93 - 0x80 + 1)
-#define SIZE_SRA0_SRAF (0xAF - 0xA0 + 1)
-#define SIZE_GR00_GR08 (0x08 - 0x00 + 1)
-#define SIZE_AR00_AR14 (0x14 - 0x00 + 1)
-#define SIZE_CR00_CR18 (0x18 - 0x00 + 1)
-#define SIZE_CR30_CR4D (0x4D - 0x30 + 1)
-#define SIZE_CR90_CRA7 (0xA7 - 0x90 + 1)
-#define SIZE_VPR (0x6C + 1)
-#define SIZE_DPR (0x44 + 1)
-
-static inline void smtc_crtcw(int reg, int val)
-{
- smtc_mmiowb(reg, 0x3d4);
- smtc_mmiowb(val, 0x3d5);
-}
-
-static inline unsigned int smtc_crtcr(int reg)
-{
- smtc_mmiowb(reg, 0x3d4);
- return smtc_mmiorb(0x3d5);
-}
-
-static inline void smtc_grphw(int reg, int val)
-{
- smtc_mmiowb(reg, 0x3ce);
- smtc_mmiowb(val, 0x3cf);
-}
-
-static inline unsigned int smtc_grphr(int reg)
-{
- smtc_mmiowb(reg, 0x3ce);
- return smtc_mmiorb(0x3cf);
-}
-
-static inline void smtc_attrw(int reg, int val)
-{
- smtc_mmiorb(0x3da);
- smtc_mmiowb(reg, 0x3c0);
- smtc_mmiorb(0x3c1);
- smtc_mmiowb(val, 0x3c0);
-}
-
-static inline void smtc_seqw(int reg, int val)
-{
- smtc_mmiowb(reg, 0x3c4);
- smtc_mmiowb(val, 0x3c5);
-}
-
-static inline unsigned int smtc_seqr(int reg)
-{
- smtc_mmiowb(reg, 0x3c4);
- return smtc_mmiorb(0x3c5);
-}
-
-/* The next structure holds all information relevant for a specific video mode.
- */
-
-struct ModeInit {
- int mmSizeX;
- int mmSizeY;
- int bpp;
- int hz;
- unsigned char Init_MISC;
- unsigned char Init_SR00_SR04[SIZE_SR00_SR04];
- unsigned char Init_SR10_SR24[SIZE_SR10_SR24];
- unsigned char Init_SR30_SR75[SIZE_SR30_SR75];
- unsigned char Init_SR80_SR93[SIZE_SR80_SR93];
- unsigned char Init_SRA0_SRAF[SIZE_SRA0_SRAF];
- unsigned char Init_GR00_GR08[SIZE_GR00_GR08];
- unsigned char Init_AR00_AR14[SIZE_AR00_AR14];
- unsigned char Init_CR00_CR18[SIZE_CR00_CR18];
- unsigned char Init_CR30_CR4D[SIZE_CR30_CR4D];
- unsigned char Init_CR90_CRA7[SIZE_CR90_CRA7];
-};
-
-/**********************************************************************
- SM712 Mode table.
- **********************************************************************/
-struct ModeInit VGAMode[] = {
- {
- /* mode#0: 640 x 480 16Bpp 60Hz */
- 640, 480, 16, 60,
- /* Init_MISC */
- 0xE3,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x00, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
- 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
- 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
- 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
- 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
- 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
- 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
- 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
- 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
- },
- },
- {
- /* mode#1: 640 x 480 24Bpp 60Hz */
- 640, 480, 24, 60,
- /* Init_MISC */
- 0xE3,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x00, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
- 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
- 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
- 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
- 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
- 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
- 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
- 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
- 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
- },
- },
- {
- /* mode#0: 640 x 480 32Bpp 60Hz */
- 640, 480, 32, 60,
- /* Init_MISC */
- 0xE3,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x00, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
- 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
- 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
- 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
- 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
- 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
- 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
- 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
- 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
- },
- },
-
- { /* mode#2: 800 x 600 16Bpp 60Hz */
- 800, 600, 16, 60,
- /* Init_MISC */
- 0x2B,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
- 0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
- 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
- 0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
- 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
- 0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
- 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
- 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
- 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
- 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
- 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
- },
- },
- { /* mode#3: 800 x 600 24Bpp 60Hz */
- 800, 600, 24, 60,
- 0x2B,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x36, 0x03, 0x20, 0x09, 0xC0, 0x36, 0x36, 0x36,
- 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x36, 0x36, 0x36,
- 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
- 0x04, 0x55, 0x59, 0x36, 0x36, 0x00, 0x00, 0x36,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
- 0x02, 0x45, 0x30, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x36,
- 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x36, 0x36,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
- 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
- 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
- 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
- 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
- },
- },
- { /* mode#7: 800 x 600 32Bpp 60Hz */
- 800, 600, 32, 60,
- /* Init_MISC */
- 0x2B,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
- 0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
- 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
- 0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
- 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
- 0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
- 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
- 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
- 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
- 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
- },
- { /* Init_CR90_CRA7 */
- 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
- 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
- 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
- },
- },
- /* We use 1024x768 table to light 1024x600 panel for lemote */
- { /* mode#4: 1024 x 600 16Bpp 60Hz */
- 1024, 600, 16, 60,
- /* Init_MISC */
- 0xEB,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x00, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xC8, 0x40, 0x14, 0x60, 0x00, 0x0A, 0x17, 0x20,
- 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x00, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x22, 0x03, 0x24, 0x09, 0xC0, 0x22, 0x22, 0x22,
- 0x22, 0x22, 0x22, 0x22, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x22, 0x22, 0x22,
- 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
- 0x00, 0x60, 0x59, 0x22, 0x22, 0x00, 0x00, 0x22,
- 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x16, 0x02, 0x0D, 0x82, 0x09, 0x02,
- 0x04, 0x45, 0x3F, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
- 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
- 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
- 0xA3, 0x7F, 0x00, 0x82, 0x0b, 0x6f, 0x57, 0x00,
- 0x5c, 0x0f, 0xE0, 0xe0, 0x7F, 0x57,
- },
- { /* Init_CR90_CRA7 */
- 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
- 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
- },
- },
- { /* mode#5: 1024 x 768 24Bpp 60Hz */
- 1024, 768, 24, 60,
- /* Init_MISC */
- 0xEB,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x30, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
- 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
- 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
- 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
- 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
- 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
- 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
- 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
- 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
- },
- { /* Init_CR90_CRA7 */
- 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
- 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
- },
- },
- { /* mode#4: 1024 x 768 32Bpp 60Hz */
- 1024, 768, 32, 60,
- /* Init_MISC */
- 0xEB,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x32, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
- 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
- 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
- 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
- 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
- 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
- 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
- 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
- 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
- },
- { /* Init_CR90_CRA7 */
- 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
- 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
- },
- },
- { /* mode#6: 320 x 240 16Bpp 60Hz */
- 320, 240, 16, 60,
- /* Init_MISC */
- 0xEB,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x32, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
- 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
- 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
- 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
- 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
- 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
- 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
- 0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
- 0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
- 0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
- },
- { /* Init_CR90_CRA7 */
- 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
- 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
- },
- },
-
- { /* mode#8: 320 x 240 32Bpp 60Hz */
- 320, 240, 32, 60,
- /* Init_MISC */
- 0xEB,
- { /* Init_SR0_SR4 */
- 0x03, 0x01, 0x0F, 0x03, 0x0E,
- },
- { /* Init_SR10_SR24 */
- 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
- 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xC4, 0x32, 0x02, 0x01, 0x01,
- },
- { /* Init_SR30_SR75 */
- 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
- 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
- 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
- 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
- 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
- 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
- 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
- 0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
- 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
- },
- { /* Init_SR80_SR93 */
- 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
- 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
- 0x00, 0x00, 0x00, 0x00,
- },
- { /* Init_SRA0_SRAF */
- 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
- 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
- },
- { /* Init_GR00_GR08 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
- 0xFF,
- },
- { /* Init_AR00_AR14 */
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x41, 0x00, 0x0F, 0x00, 0x00,
- },
- { /* Init_CR00_CR18 */
- 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
- 0xFF,
- },
- { /* Init_CR30_CR4D */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
- 0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
- 0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
- 0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
- },
- { /* Init_CR90_CRA7 */
- 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
- 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
- },
- },
-};
-
-#define numVGAModes ARRAY_SIZE(VGAMode)
diff --git a/drivers/staging/sm7xxfb/sm7xxfb.c b/drivers/staging/sm7xxfb/sm7xxfb.c
deleted file mode 100644
index 6176d98744cc..000000000000
--- a/drivers/staging/sm7xxfb/sm7xxfb.c
+++ /dev/null
@@ -1,1028 +0,0 @@
-/*
- * Silicon Motion SM7XX frame buffer device
- *
- * Copyright (C) 2006 Silicon Motion Technology Corp.
- * Authors: Ge Wang, gewang@siliconmotion.com
- * Boyod boyod.yang@siliconmotion.com.cn
- *
- * Copyright (C) 2009 Lemote, Inc.
- * Author: Wu Zhangjin, wuzhangjin@gmail.com
- *
- * Copyright (C) 2011 Igalia, S.L.
- * Author: Javier M. Mellid <jmunhoz@igalia.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- * Framebuffer driver for Silicon Motion SM710, SM712, SM721 and SM722 chips
- */
-
-#include <linux/io.h>
-#include <linux/fb.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/console.h>
-#include <linux/screen_info.h>
-
-#ifdef CONFIG_PM
-#include <linux/pm.h>
-#endif
-
-#include "sm7xx.h"
-
-/*
-* Private structure
-*/
-struct smtcfb_info {
- struct pci_dev *pdev;
- struct fb_info fb;
- u16 chip_id;
- u8 chip_rev_id;
-
- void __iomem *lfb; /* linear frame buffer */
- void __iomem *dp_regs; /* drawing processor control regs */
- void __iomem *vp_regs; /* video processor control regs */
- void __iomem *cp_regs; /* capture processor control regs */
- void __iomem *mmio; /* memory map IO port */
-
- u_int width;
- u_int height;
- u_int hz;
-
- u32 colreg[17];
-};
-
-void __iomem *smtc_RegBaseAddress; /* Memory Map IO starting address */
-
-static struct fb_var_screeninfo smtcfb_var = {
- .xres = 1024,
- .yres = 600,
- .xres_virtual = 1024,
- .yres_virtual = 600,
- .bits_per_pixel = 16,
- .red = {16, 8, 0},
- .green = {8, 8, 0},
- .blue = {0, 8, 0},
- .activate = FB_ACTIVATE_NOW,
- .height = -1,
- .width = -1,
- .vmode = FB_VMODE_NONINTERLACED,
- .nonstd = 0,
- .accel_flags = FB_ACCELF_TEXT,
-};
-
-static struct fb_fix_screeninfo smtcfb_fix = {
- .id = "smXXXfb",
- .type = FB_TYPE_PACKED_PIXELS,
- .visual = FB_VISUAL_TRUECOLOR,
- .line_length = 800 * 3,
- .accel = FB_ACCEL_SMI_LYNX,
- .type_aux = 0,
- .xpanstep = 0,
- .ypanstep = 0,
- .ywrapstep = 0,
-};
-
-struct vesa_mode {
- char index[6];
- u16 lfb_width;
- u16 lfb_height;
- u16 lfb_depth;
-};
-
-static struct vesa_mode vesa_mode_table[] = {
- {"0x301", 640, 480, 8},
- {"0x303", 800, 600, 8},
- {"0x305", 1024, 768, 8},
- {"0x307", 1280, 1024, 8},
-
- {"0x311", 640, 480, 16},
- {"0x314", 800, 600, 16},
- {"0x317", 1024, 768, 16},
- {"0x31A", 1280, 1024, 16},
-
- {"0x312", 640, 480, 24},
- {"0x315", 800, 600, 24},
- {"0x318", 1024, 768, 24},
- {"0x31B", 1280, 1024, 24},
-};
-
-struct screen_info smtc_scr_info;
-
-/* process command line options, get vga parameter */
-static int __init sm7xx_vga_setup(char *options)
-{
- int i;
-
- if (!options || !*options)
- return -EINVAL;
-
- smtc_scr_info.lfb_width = 0;
- smtc_scr_info.lfb_height = 0;
- smtc_scr_info.lfb_depth = 0;
-
- pr_debug("sm7xx_vga_setup = %s\n", options);
-
- for (i = 0; i < ARRAY_SIZE(vesa_mode_table); i++) {
- if (strstr(options, vesa_mode_table[i].index)) {
- smtc_scr_info.lfb_width = vesa_mode_table[i].lfb_width;
- smtc_scr_info.lfb_height =
- vesa_mode_table[i].lfb_height;
- smtc_scr_info.lfb_depth = vesa_mode_table[i].lfb_depth;
- return 0;
- }
- }
-
- return -1;
-}
-__setup("vga=", sm7xx_vga_setup);
-
-static void sm712_setpalette(int regno, unsigned red, unsigned green,
- unsigned blue, struct fb_info *info)
-{
- /* set bit 5:4 = 01 (write LCD RAM only) */
- smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x10);
-
- smtc_mmiowb(regno, dac_reg);
- smtc_mmiowb(red >> 10, dac_val);
- smtc_mmiowb(green >> 10, dac_val);
- smtc_mmiowb(blue >> 10, dac_val);
-}
-
-/* chan_to_field
- *
- * convert a colour value into a field position
- *
- * from pxafb.c
- */
-
-static inline unsigned int chan_to_field(unsigned int chan,
- struct fb_bitfield *bf)
-{
- chan &= 0xffff;
- chan >>= 16 - bf->length;
- return chan << bf->offset;
-}
-
-static int smtc_blank(int blank_mode, struct fb_info *info)
-{
- /* clear DPMS setting */
- switch (blank_mode) {
- case FB_BLANK_UNBLANK:
- /* Screen On: HSync: On, VSync : On */
- smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
- smtc_seqw(0x6a, 0x16);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
- smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
- smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
- break;
- case FB_BLANK_NORMAL:
- /* Screen Off: HSync: On, VSync : On Soft blank */
- smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
- smtc_seqw(0x6a, 0x16);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
- break;
- case FB_BLANK_VSYNC_SUSPEND:
- /* Screen On: HSync: On, VSync : Off */
- smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
- smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
- smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
- break;
- case FB_BLANK_HSYNC_SUSPEND:
- /* Screen On: HSync: Off, VSync : On */
- smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
- smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
- smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
- break;
- case FB_BLANK_POWERDOWN:
- /* Screen On: HSync: Off, VSync : Off */
- smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
- smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
- smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned trans, struct fb_info *info)
-{
- struct smtcfb_info *sfb;
- u32 val;
-
- sfb = info->par;
-
- if (regno > 255)
- return 1;
-
- switch (sfb->fb.fix.visual) {
- case FB_VISUAL_DIRECTCOLOR:
- case FB_VISUAL_TRUECOLOR:
- /*
- * 16/32 bit true-colour, use pseudo-palette for 16 base color
- */
- if (regno < 16) {
- if (sfb->fb.var.bits_per_pixel == 16) {
- u32 *pal = sfb->fb.pseudo_palette;
- val = chan_to_field(red, &sfb->fb.var.red);
- val |= chan_to_field(green, &sfb->fb.var.green);
- val |= chan_to_field(blue, &sfb->fb.var.blue);
-#ifdef __BIG_ENDIAN
- pal[regno] =
- ((red & 0xf800) >> 8) |
- ((green & 0xe000) >> 13) |
- ((green & 0x1c00) << 3) |
- ((blue & 0xf800) >> 3);
-#else
- pal[regno] = val;
-#endif
- } else {
- u32 *pal = sfb->fb.pseudo_palette;
- val = chan_to_field(red, &sfb->fb.var.red);
- val |= chan_to_field(green, &sfb->fb.var.green);
- val |= chan_to_field(blue, &sfb->fb.var.blue);
-#ifdef __BIG_ENDIAN
- val =
- (val & 0xff00ff00 >> 8) |
- (val & 0x00ff00ff << 8);
-#endif
- pal[regno] = val;
- }
- }
- break;
-
- case FB_VISUAL_PSEUDOCOLOR:
- /* color depth 8 bit */
- sm712_setpalette(regno, red, green, blue, info);
- break;
-
- default:
- return 1; /* unknown type */
- }
-
- return 0;
-
-}
-
-#ifdef __BIG_ENDIAN
-static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, size_t
- count, loff_t *ppos)
-{
- unsigned long p = *ppos;
-
- u32 *buffer, *dst;
- u32 __iomem *src;
- int c, i, cnt = 0, err = 0;
- unsigned long total_size;
-
- if (!info || !info->screen_base)
- return -ENODEV;
-
- if (info->state != FBINFO_STATE_RUNNING)
- return -EPERM;
-
- total_size = info->screen_size;
-
- if (total_size == 0)
- total_size = info->fix.smem_len;
-
- if (p >= total_size)
- return 0;
-
- if (count >= total_size)
- count = total_size;
-
- if (count + p > total_size)
- count = total_size - p;
-
- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- src = (u32 __iomem *) (info->screen_base + p);
-
- if (info->fbops->fb_sync)
- info->fbops->fb_sync(info);
-
- while (count) {
- c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
- dst = buffer;
- for (i = c >> 2; i--;) {
- *dst = fb_readl(src++);
- *dst =
- (*dst & 0xff00ff00 >> 8) |
- (*dst & 0x00ff00ff << 8);
- dst++;
- }
- if (c & 3) {
- u8 *dst8 = (u8 *) dst;
- u8 __iomem *src8 = (u8 __iomem *) src;
-
- for (i = c & 3; i--;) {
- if (i & 1) {
- *dst8++ = fb_readb(++src8);
- } else {
- *dst8++ = fb_readb(--src8);
- src8 += 2;
- }
- }
- src = (u32 __iomem *) src8;
- }
-
- if (copy_to_user(buf, buffer, c)) {
- err = -EFAULT;
- break;
- }
- *ppos += c;
- buf += c;
- cnt += c;
- count -= c;
- }
-
- kfree(buffer);
-
- return (err) ? err : cnt;
-}
-
-static ssize_t
-smtcfb_write(struct fb_info *info, const char __user *buf, size_t count,
- loff_t *ppos)
-{
- unsigned long p = *ppos;
-
- u32 *buffer, *src;
- u32 __iomem *dst;
- int c, i, cnt = 0, err = 0;
- unsigned long total_size;
-
- if (!info || !info->screen_base)
- return -ENODEV;
-
- if (info->state != FBINFO_STATE_RUNNING)
- return -EPERM;
-
- total_size = info->screen_size;
-
- if (total_size == 0)
- total_size = info->fix.smem_len;
-
- if (p > total_size)
- return -EFBIG;
-
- if (count > total_size) {
- err = -EFBIG;
- count = total_size;
- }
-
- if (count + p > total_size) {
- if (!err)
- err = -ENOSPC;
-
- count = total_size - p;
- }
-
- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- dst = (u32 __iomem *) (info->screen_base + p);
-
- if (info->fbops->fb_sync)
- info->fbops->fb_sync(info);
-
- while (count) {
- c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
- src = buffer;
-
- if (copy_from_user(src, buf, c)) {
- err = -EFAULT;
- break;
- }
-
- for (i = c >> 2; i--;) {
- fb_writel((*src & 0xff00ff00 >> 8) |
- (*src & 0x00ff00ff << 8), dst++);
- src++;
- }
- if (c & 3) {
- u8 *src8 = (u8 *) src;
- u8 __iomem *dst8 = (u8 __iomem *) dst;
-
- for (i = c & 3; i--;) {
- if (i & 1) {
- fb_writeb(*src8++, ++dst8);
- } else {
- fb_writeb(*src8++, --dst8);
- dst8 += 2;
- }
- }
- dst = (u32 __iomem *) dst8;
- }
-
- *ppos += c;
- buf += c;
- cnt += c;
- count -= c;
- }
-
- kfree(buffer);
-
- return (cnt) ? cnt : err;
-}
-#endif /* ! __BIG_ENDIAN */
-
-static void sm7xx_set_timing(struct smtcfb_info *sfb)
-{
- int i = 0, j = 0;
- u32 m_nScreenStride;
-
- dev_dbg(&sfb->pdev->dev,
- "sfb->width=%d sfb->height=%d "
- "sfb->fb.var.bits_per_pixel=%d sfb->hz=%d\n",
- sfb->width, sfb->height, sfb->fb.var.bits_per_pixel, sfb->hz);
-
- for (j = 0; j < numVGAModes; j++) {
- if (VGAMode[j].mmSizeX == sfb->width &&
- VGAMode[j].mmSizeY == sfb->height &&
- VGAMode[j].bpp == sfb->fb.var.bits_per_pixel &&
- VGAMode[j].hz == sfb->hz) {
-
- dev_dbg(&sfb->pdev->dev,
- "VGAMode[j].mmSizeX=%d VGAMode[j].mmSizeY=%d "
- "VGAMode[j].bpp=%d VGAMode[j].hz=%d\n",
- VGAMode[j].mmSizeX, VGAMode[j].mmSizeY,
- VGAMode[j].bpp, VGAMode[j].hz);
-
- dev_dbg(&sfb->pdev->dev, "VGAMode index=%d\n", j);
-
- smtc_mmiowb(0x0, 0x3c6);
-
- smtc_seqw(0, 0x1);
-
- smtc_mmiowb(VGAMode[j].Init_MISC, 0x3c2);
-
- /* init SEQ register SR00 - SR04 */
- for (i = 0; i < SIZE_SR00_SR04; i++)
- smtc_seqw(i, VGAMode[j].Init_SR00_SR04[i]);
-
- /* init SEQ register SR10 - SR24 */
- for (i = 0; i < SIZE_SR10_SR24; i++)
- smtc_seqw(i + 0x10,
- VGAMode[j].Init_SR10_SR24[i]);
-
- /* init SEQ register SR30 - SR75 */
- for (i = 0; i < SIZE_SR30_SR75; i++)
- if ((i + 0x30) != 0x62 &&
- (i + 0x30) != 0x6a &&
- (i + 0x30) != 0x6b)
- smtc_seqw(i + 0x30,
- VGAMode[j].Init_SR30_SR75[i]);
-
- /* init SEQ register SR80 - SR93 */
- for (i = 0; i < SIZE_SR80_SR93; i++)
- smtc_seqw(i + 0x80,
- VGAMode[j].Init_SR80_SR93[i]);
-
- /* init SEQ register SRA0 - SRAF */
- for (i = 0; i < SIZE_SRA0_SRAF; i++)
- smtc_seqw(i + 0xa0,
- VGAMode[j].Init_SRA0_SRAF[i]);
-
- /* init Graphic register GR00 - GR08 */
- for (i = 0; i < SIZE_GR00_GR08; i++)
- smtc_grphw(i, VGAMode[j].Init_GR00_GR08[i]);
-
- /* init Attribute register AR00 - AR14 */
- for (i = 0; i < SIZE_AR00_AR14; i++)
- smtc_attrw(i, VGAMode[j].Init_AR00_AR14[i]);
-
- /* init CRTC register CR00 - CR18 */
- for (i = 0; i < SIZE_CR00_CR18; i++)
- smtc_crtcw(i, VGAMode[j].Init_CR00_CR18[i]);
-
- /* init CRTC register CR30 - CR4D */
- for (i = 0; i < SIZE_CR30_CR4D; i++)
- smtc_crtcw(i + 0x30,
- VGAMode[j].Init_CR30_CR4D[i]);
-
- /* init CRTC register CR90 - CRA7 */
- for (i = 0; i < SIZE_CR90_CRA7; i++)
- smtc_crtcw(i + 0x90,
- VGAMode[j].Init_CR90_CRA7[i]);
- }
- }
- smtc_mmiowb(0x67, 0x3c2);
-
- /* set VPR registers */
- writel(0x0, sfb->vp_regs + 0x0C);
- writel(0x0, sfb->vp_regs + 0x40);
-
- /* set data width */
- m_nScreenStride =
- (sfb->width * sfb->fb.var.bits_per_pixel) / 64;
- switch (sfb->fb.var.bits_per_pixel) {
- case 8:
- writel(0x0, sfb->vp_regs + 0x0);
- break;
- case 16:
- writel(0x00020000, sfb->vp_regs + 0x0);
- break;
- case 24:
- writel(0x00040000, sfb->vp_regs + 0x0);
- break;
- case 32:
- writel(0x00030000, sfb->vp_regs + 0x0);
- break;
- }
- writel((u32) (((m_nScreenStride + 2) << 16) | m_nScreenStride),
- sfb->vp_regs + 0x10);
-
-}
-
-static void smtc_set_timing(struct smtcfb_info *sfb)
-{
- switch (sfb->chip_id) {
- case 0x710:
- case 0x712:
- case 0x720:
- sm7xx_set_timing(sfb);
- break;
- }
-}
-
-static void smtcfb_setmode(struct smtcfb_info *sfb)
-{
- switch (sfb->fb.var.bits_per_pixel) {
- case 32:
- sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
- sfb->fb.fix.line_length = sfb->fb.var.xres * 4;
- sfb->fb.var.red.length = 8;
- sfb->fb.var.green.length = 8;
- sfb->fb.var.blue.length = 8;
- sfb->fb.var.red.offset = 16;
- sfb->fb.var.green.offset = 8;
- sfb->fb.var.blue.offset = 0;
- break;
- case 24:
- sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
- sfb->fb.fix.line_length = sfb->fb.var.xres * 3;
- sfb->fb.var.red.length = 8;
- sfb->fb.var.green.length = 8;
- sfb->fb.var.blue.length = 8;
- sfb->fb.var.red.offset = 16;
- sfb->fb.var.green.offset = 8;
- sfb->fb.var.blue.offset = 0;
- break;
- case 8:
- sfb->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
- sfb->fb.fix.line_length = sfb->fb.var.xres;
- sfb->fb.var.red.length = 3;
- sfb->fb.var.green.length = 3;
- sfb->fb.var.blue.length = 2;
- sfb->fb.var.red.offset = 5;
- sfb->fb.var.green.offset = 2;
- sfb->fb.var.blue.offset = 0;
- break;
- case 16:
- default:
- sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
- sfb->fb.fix.line_length = sfb->fb.var.xres * 2;
- sfb->fb.var.red.length = 5;
- sfb->fb.var.green.length = 6;
- sfb->fb.var.blue.length = 5;
- sfb->fb.var.red.offset = 11;
- sfb->fb.var.green.offset = 5;
- sfb->fb.var.blue.offset = 0;
- break;
- }
-
- sfb->width = sfb->fb.var.xres;
- sfb->height = sfb->fb.var.yres;
- sfb->hz = 60;
- smtc_set_timing(sfb);
-}
-
-static int smtc_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
- /* sanity checks */
- if (var->xres_virtual < var->xres)
- var->xres_virtual = var->xres;
-
- if (var->yres_virtual < var->yres)
- var->yres_virtual = var->yres;
-
- /* set valid default bpp */
- if ((var->bits_per_pixel != 8) && (var->bits_per_pixel != 16) &&
- (var->bits_per_pixel != 24) && (var->bits_per_pixel != 32))
- var->bits_per_pixel = 16;
-
- return 0;
-}
-
-static int smtc_set_par(struct fb_info *info)
-{
- smtcfb_setmode(info->par);
-
- return 0;
-}
-
-static struct fb_ops smtcfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = smtc_check_var,
- .fb_set_par = smtc_set_par,
- .fb_setcolreg = smtc_setcolreg,
- .fb_blank = smtc_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_imageblit = cfb_imageblit,
- .fb_copyarea = cfb_copyarea,
-#ifdef __BIG_ENDIAN
- .fb_read = smtcfb_read,
- .fb_write = smtcfb_write,
-#endif
-};
-
-/*
- * alloc struct smtcfb_info and assign default values
- */
-static struct smtcfb_info *smtc_alloc_fb_info(struct pci_dev *pdev)
-{
- struct smtcfb_info *sfb;
-
- sfb = kzalloc(sizeof(*sfb), GFP_KERNEL);
-
- if (!sfb)
- return NULL;
-
- sfb->pdev = pdev;
-
- sfb->fb.flags = FBINFO_FLAG_DEFAULT;
- sfb->fb.fbops = &smtcfb_ops;
- sfb->fb.fix = smtcfb_fix;
- sfb->fb.var = smtcfb_var;
- sfb->fb.pseudo_palette = sfb->colreg;
- sfb->fb.par = sfb;
-
- return sfb;
-}
-
-/*
- * free struct smtcfb_info
- */
-static void smtc_free_fb_info(struct smtcfb_info *sfb)
-{
- kfree(sfb);
-}
-
-/*
- * Unmap in the memory mapped IO registers
- */
-
-static void smtc_unmap_mmio(struct smtcfb_info *sfb)
-{
- if (sfb && smtc_RegBaseAddress)
- smtc_RegBaseAddress = NULL;
-}
-
-/*
- * Map in the screen memory
- */
-
-static int smtc_map_smem(struct smtcfb_info *sfb,
- struct pci_dev *pdev, u_long smem_len)
-{
-
- sfb->fb.fix.smem_start = pci_resource_start(pdev, 0);
-
-#ifdef __BIG_ENDIAN
- if (sfb->fb.var.bits_per_pixel == 32)
- sfb->fb.fix.smem_start += 0x800000;
-#endif
-
- sfb->fb.fix.smem_len = smem_len;
-
- sfb->fb.screen_base = sfb->lfb;
-
- if (!sfb->fb.screen_base) {
- dev_err(&pdev->dev,
- "%s: unable to map screen memory\n", sfb->fb.fix.id);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/*
- * Unmap in the screen memory
- *
- */
-static void smtc_unmap_smem(struct smtcfb_info *sfb)
-{
- if (sfb && sfb->fb.screen_base) {
- iounmap(sfb->fb.screen_base);
- sfb->fb.screen_base = NULL;
- }
-}
-
-/*
- * We need to wake up the device and make sure its in linear memory mode.
- */
-static inline void sm7xx_init_hw(void)
-{
- outb_p(0x18, 0x3c4);
- outb_p(0x11, 0x3c5);
-}
-
-static int smtcfb_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct smtcfb_info *sfb;
- u_long smem_size = 0x00800000; /* default 8MB */
- int err;
- unsigned long mmio_base;
-
- dev_info(&pdev->dev, "Silicon Motion display driver.");
-
- err = pci_enable_device(pdev); /* enable SMTC chip */
- if (err)
- return err;
-
- sprintf(smtcfb_fix.id, "sm%Xfb", ent->device);
-
- sfb = smtc_alloc_fb_info(pdev);
-
- if (!sfb) {
- err = -ENOMEM;
- goto failed_free;
- }
-
- sfb->chip_id = ent->device;
-
- pci_set_drvdata(pdev, sfb);
-
- sm7xx_init_hw();
-
- /* get mode parameter from smtc_scr_info */
- if (smtc_scr_info.lfb_width != 0) {
- sfb->fb.var.xres = smtc_scr_info.lfb_width;
- sfb->fb.var.yres = smtc_scr_info.lfb_height;
- sfb->fb.var.bits_per_pixel = smtc_scr_info.lfb_depth;
- } else {
- /* default resolution 1024x600 16bit mode */
- sfb->fb.var.xres = SCREEN_X_RES;
- sfb->fb.var.yres = SCREEN_Y_RES;
- sfb->fb.var.bits_per_pixel = SCREEN_BPP;
- }
-
-#ifdef __BIG_ENDIAN
- if (sfb->fb.var.bits_per_pixel == 24)
- sfb->fb.var.bits_per_pixel = (smtc_scr_info.lfb_depth = 32);
-#endif
- /* Map address and memory detection */
- mmio_base = pci_resource_start(pdev, 0);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
-
- switch (sfb->chip_id) {
- case 0x710:
- case 0x712:
- sfb->fb.fix.mmio_start = mmio_base + 0x00400000;
- sfb->fb.fix.mmio_len = 0x00400000;
- smem_size = SM712_VIDEOMEMORYSIZE;
-#ifdef __BIG_ENDIAN
- sfb->lfb = ioremap(mmio_base, 0x00c00000);
-#else
- sfb->lfb = ioremap(mmio_base, 0x00800000);
-#endif
- sfb->mmio = (smtc_RegBaseAddress =
- sfb->lfb + 0x00700000);
- sfb->dp_regs = sfb->lfb + 0x00408000;
- sfb->vp_regs = sfb->lfb + 0x0040c000;
-#ifdef __BIG_ENDIAN
- if (sfb->fb.var.bits_per_pixel == 32) {
- sfb->lfb += 0x800000;
- dev_info(&pdev->dev, "sfb->lfb=%p", sfb->lfb);
- }
-#endif
- if (!smtc_RegBaseAddress) {
- dev_err(&pdev->dev,
- "%s: unable to map memory mapped IO!",
- sfb->fb.fix.id);
- err = -ENOMEM;
- goto failed_fb;
- }
-
- /* set MCLK = 14.31818 * (0x16 / 0x2) */
- smtc_seqw(0x6a, 0x16);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x62, 0x3e);
- /* enable PCI burst */
- smtc_seqw(0x17, 0x20);
- /* enable word swap */
-#ifdef __BIG_ENDIAN
- if (sfb->fb.var.bits_per_pixel == 32)
- smtc_seqw(0x17, 0x30);
-#endif
- break;
- case 0x720:
- sfb->fb.fix.mmio_start = mmio_base;
- sfb->fb.fix.mmio_len = 0x00200000;
- smem_size = SM722_VIDEOMEMORYSIZE;
- sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
- sfb->lfb = sfb->dp_regs + 0x00200000;
- sfb->mmio = (smtc_RegBaseAddress =
- sfb->dp_regs + 0x000c0000);
- sfb->vp_regs = sfb->dp_regs + 0x800;
-
- smtc_seqw(0x62, 0xff);
- smtc_seqw(0x6a, 0x0d);
- smtc_seqw(0x6b, 0x02);
- break;
- default:
- dev_err(&pdev->dev,
- "No valid Silicon Motion display chip was detected!");
-
- goto failed_fb;
- }
-
- /* can support 32 bpp */
- if (15 == sfb->fb.var.bits_per_pixel)
- sfb->fb.var.bits_per_pixel = 16;
-
- sfb->fb.var.xres_virtual = sfb->fb.var.xres;
- sfb->fb.var.yres_virtual = sfb->fb.var.yres;
- err = smtc_map_smem(sfb, pdev, smem_size);
- if (err)
- goto failed;
-
- smtcfb_setmode(sfb);
-
- err = register_framebuffer(&sfb->fb);
- if (err < 0)
- goto failed;
-
- dev_info(&pdev->dev,
- "Silicon Motion SM%X Rev%X primary display mode %dx%d-%d Init Complete.",
- sfb->chip_id, sfb->chip_rev_id, sfb->fb.var.xres,
- sfb->fb.var.yres, sfb->fb.var.bits_per_pixel);
-
- return 0;
-
-failed:
- dev_err(&pdev->dev, "Silicon Motion, Inc. primary display init fail.");
-
- smtc_unmap_smem(sfb);
- smtc_unmap_mmio(sfb);
-failed_fb:
- smtc_free_fb_info(sfb);
-
-failed_free:
- pci_disable_device(pdev);
-
- return err;
-}
-
-/*
- * 0x710 (LynxEM)
- * 0x712 (LynxEM+)
- * 0x720 (Lynx3DM, Lynx3DM+)
- */
-static const struct pci_device_id smtcfb_pci_table[] = {
- { PCI_DEVICE(0x126f, 0x710), },
- { PCI_DEVICE(0x126f, 0x712), },
- { PCI_DEVICE(0x126f, 0x720), },
- {0,}
-};
-
-static void smtcfb_pci_remove(struct pci_dev *pdev)
-{
- struct smtcfb_info *sfb;
-
- sfb = pci_get_drvdata(pdev);
- smtc_unmap_smem(sfb);
- smtc_unmap_mmio(sfb);
- unregister_framebuffer(&sfb->fb);
- smtc_free_fb_info(sfb);
-}
-
-#ifdef CONFIG_PM
-static int smtcfb_pci_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct smtcfb_info *sfb;
-
- sfb = pci_get_drvdata(pdev);
-
- /* set the hw in sleep mode use external clock and self memory refresh
- * so that we can turn off internal PLLs later on
- */
- smtc_seqw(0x20, (smtc_seqr(0x20) | 0xc0));
- smtc_seqw(0x69, (smtc_seqr(0x69) & 0xf7));
-
- console_lock();
- fb_set_suspend(&sfb->fb, 1);
- console_unlock();
-
- /* additionally turn off all function blocks including internal PLLs */
- smtc_seqw(0x21, 0xff);
-
- return 0;
-}
-
-static int smtcfb_pci_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct smtcfb_info *sfb;
-
- sfb = pci_get_drvdata(pdev);
-
- /* reinit hardware */
- sm7xx_init_hw();
- switch (sfb->chip_id) {
- case 0x710:
- case 0x712:
- /* set MCLK = 14.31818 * (0x16 / 0x2) */
- smtc_seqw(0x6a, 0x16);
- smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x62, 0x3e);
- /* enable PCI burst */
- smtc_seqw(0x17, 0x20);
-#ifdef __BIG_ENDIAN
- if (sfb->fb.var.bits_per_pixel == 32)
- smtc_seqw(0x17, 0x30);
-#endif
- break;
- case 0x720:
- smtc_seqw(0x62, 0xff);
- smtc_seqw(0x6a, 0x0d);
- smtc_seqw(0x6b, 0x02);
- break;
- }
-
- smtc_seqw(0x34, (smtc_seqr(0x34) | 0xc0));
- smtc_seqw(0x33, ((smtc_seqr(0x33) | 0x08) & 0xfb));
-
- smtcfb_setmode(sfb);
-
- console_lock();
- fb_set_suspend(&sfb->fb, 0);
- console_unlock();
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(sm7xx_pm_ops, smtcfb_pci_suspend, smtcfb_pci_resume);
-#define SM7XX_PM_OPS (&sm7xx_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define SM7XX_PM_OPS NULL
-
-#endif /* !CONFIG_PM */
-
-static struct pci_driver smtcfb_driver = {
- .name = "smtcfb",
- .id_table = smtcfb_pci_table,
- .probe = smtcfb_pci_probe,
- .remove = smtcfb_pci_remove,
- .driver.pm = SM7XX_PM_OPS,
-};
-
-module_pci_driver(smtcfb_driver);
-
-MODULE_AUTHOR("Siliconmotion ");
-MODULE_DESCRIPTION("Framebuffer driver for SMI Graphic Cards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index 4e18fb405344..c62d74c47906 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -40,7 +40,7 @@ const struct old_serial_port *spk_serial_init(int index)
cval |= UART_LCR_EPAR;
if (synth_request_region(ser->port, 8)) {
/* try to take it back. */
- printk(KERN_INFO "Ports not available, trying to steal them\n");
+ pr_info("Ports not available, trying to steal them\n");
__release_region(&ioport_resource, ser->port, 8);
err = synth_request_region(ser->port, 8);
if (err) {
@@ -106,7 +106,7 @@ static void start_serial_interrupt(int irq)
"serial", (void *) synth_readbuf_handler);
if (rv)
- printk(KERN_ERR "Unable to request Speakup serial I R Q\n");
+ pr_err("Unable to request Speakup serial I R Q\n");
/* Set MCR */
outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2,
speakup_info.port_tts + UART_MCR);
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index 15fdec323a70..756d01535d3e 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -270,10 +270,12 @@ static void do_catch_up(struct spk_synth *synth)
if (jiffies >= jiff_max) {
if (!in_escape)
spk_serial_out(PROCSPEECH);
- spin_lock_irqsave(&speakup_info.spinlock, flags);
+ spin_lock_irqsave(&speakup_info.spinlock,
+ flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock,
+ flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 1b6d581c438b..b5e74e9de6bd 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -17,7 +17,7 @@ menuconfig TIDSPBRIDGE
config TIDSPBRIDGE_DVFS
bool "Enable Bridge Dynamic Voltage and Frequency Scaling (DVFS)"
- depends on TIDSPBRIDGE && OMAP_PM_SRF && CPU_FREQ
+ depends on TIDSPBRIDGE && CPU_FREQ
help
DVFS allows DSP Bridge to initiate the operating point change to
scale the chip voltage and frequency in order to match the
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index e322fb7aebe1..c2829aa7780f 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -2127,7 +2127,7 @@ void dump_dl_modules(struct bridge_dev_context *bridge_context)
u32 module_size;
u32 module_struct_size = 0;
u32 sect_ndx;
- char *sect_str ;
+ char *sect_str;
int status = 0;
status = dev_get_intf_fxns(dev_object, &intf_fxns);
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index b770b2281ce8..8945b4e3a2a6 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -280,9 +280,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
/* Wait until the state has moved to ON */
- while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
- OMAP_INTRANSITION_MASK)
- ;
+ while (*pdata->dsp_prm_read(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST)&
+ OMAP_INTRANSITION_MASK);
/* Disable Automatic transition */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
@@ -419,7 +418,8 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* Assert RST1 i.e only the RST only for DSP megacell */
if (!status) {
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
- OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
+ OMAP3430_RST1_IVA2_MASK,
+ OMAP3430_IVA2_MOD,
OMAP2_RM_RSTCTRL);
/* Mask address with 1K for compatibility */
@@ -432,7 +432,8 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* Reset and Unreset the RST2, so that BOOTADDR is copied to
* IVA2 SYSC register */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
- OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD,
+ OMAP2_RM_RSTCTRL);
udelay(100);
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -446,7 +447,8 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* Only make TLB entry if both addresses are non-zero */
for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
entry_ndx++) {
- struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
+ struct bridge_ioctl_extproc *e =
+ &dev_context->atlb_entry[entry_ndx];
struct hw_mmu_map_attrs_t map_attrs = {
.endianism = e->endianism,
.element_size = e->elem_size,
@@ -641,8 +643,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
/* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
* before turning off the clocks.. This is to ensure that there are no
* pending L3 or other transactons from IVA2 */
- dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
- OMAP_POWERSTATEST_MASK;
+ dsp_pwr_state = (*pdata->dsp_prm_read)
+ (OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
if (dsp_pwr_state != PWRDM_POWER_OFF) {
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -682,8 +684,9 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
dev_context->mbox = NULL;
}
/* Reset IVA2 clocks*/
- (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
- OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+ (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK |
+ OMAP3430_RST2_IVA2_MASK | OMAP3430_RST3_IVA2_MASK,
+ OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dsp_clock_disable_all(dev_context->dsp_per_clks);
dsp_clk_disable(DSP_CLK_IVA2);
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index 1862afd80dc1..657104f37f7d 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -99,7 +99,8 @@ int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
return -EPERM;
}
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
- OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
+ OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
}
if (timeout == 0) {
pr_err("%s: Timed out waiting for DSP off mode\n", __func__);
@@ -209,7 +210,8 @@ int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd,
return -EPERM;
}
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
- OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
+ OMAP2_PM_PWSTST) &
+ OMAP_POWERSTATEST_MASK;
}
if (!timeout) {
@@ -355,7 +357,7 @@ int pre_scale_dsp(struct bridge_dev_context *dev_context, void *pargs)
(dev_context->brd_state == BRD_DSP_HIBERNATION)) {
dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n");
return 0;
- } else if ((dev_context->brd_state == BRD_RUNNING)) {
+ } else if (dev_context->brd_state == BRD_RUNNING) {
/* Send a prenotification to DSP */
dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__);
sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY);
@@ -396,13 +398,14 @@ int post_scale_dsp(struct bridge_dev_context *dev_context,
io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n",
__func__);
- } else if ((dev_context->brd_state == BRD_RUNNING)) {
+ } else if (dev_context->brd_state == BRD_RUNNING) {
/* Update the OPP value in shared memory */
io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
/* Send a post notification to DSP */
sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_POSTNOTIFY);
- dev_dbg(bridge, "OPP: %s wrote to shm. Sent post notification "
- "to DSP\n", __func__);
+ dev_dbg(bridge,
+ "OPP: %s wrote to shm. Sent post notification to DSP\n",
+ __func__);
} else {
status = -EPERM;
}
diff --git a/drivers/staging/tidspbridge/dynload/tramp.c b/drivers/staging/tidspbridge/dynload/tramp.c
index 404af1895980..5f0431305fbb 100644
--- a/drivers/staging/tidspbridge/dynload/tramp.c
+++ b/drivers/staging/tidspbridge/dynload/tramp.c
@@ -503,7 +503,7 @@ static int priv_tgt_img_gen(struct dload_state *dlthis, u32 base,
* TRAMPOLINES ARE TREATED AS 2ND PASS even though this is really
* the first (and only) relocation that will be performed on them.
*/
-static int priv_pkt_relo(struct dload_state *dlthis, tgt_au_t * data,
+static int priv_pkt_relo(struct dload_state *dlthis, tgt_au_t *data,
struct reloc_record_t *rp[], u32 relo_count)
{
int ret_val = 1;
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index 190ca3fe7327..2ae48c9a9362 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -101,14 +101,14 @@ static int dcd_uuid_from_string(char *sz_uuid, struct dsp_uuid *uuid_obj)
* if the converted value doesn't fit in u32. So, convert the
* last six bytes to u64 and memcpy what is needed
*/
- if(sscanf(sz_uuid, "%8x%c%4hx%c%4hx%c%2hhx%2hhx%c%llx",
+ if (sscanf(sz_uuid, "%8x%c%4hx%c%4hx%c%2hhx%2hhx%c%llx",
&uuid_tmp.data1, &c, &uuid_tmp.data2, &c,
&uuid_tmp.data3, &c, &uuid_tmp.data4,
&uuid_tmp.data5, &c, &t) != 10)
return -EINVAL;
t = cpu_to_be64(t);
- memcpy(&uuid_tmp.data6[0], ((char*)&t) + 2, 6);
+ memcpy(&uuid_tmp.data6[0], ((char *)&t) + 2, 6);
*uuid_obj = uuid_tmp;
return 0;
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index be26917a6896..757ae20b38ee 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -738,7 +738,7 @@ void mem_ext_phys_pool_release(void)
* Allocate physically contiguous, uncached memory from external memory pool
*/
-static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
+static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 *phys_addr)
{
u32 new_alloc_ptr;
u32 offset;
diff --git a/drivers/staging/tidspbridge/rmgr/mgr.c b/drivers/staging/tidspbridge/rmgr/mgr.c
index b32ba0ad2a07..93e6282f122b 100644
--- a/drivers/staging/tidspbridge/rmgr/mgr.c
+++ b/drivers/staging/tidspbridge/rmgr/mgr.c
@@ -266,15 +266,15 @@ int mgr_enum_processor_info(u32 processor_id,
* this is a clumsy overwrite */
processor_info->processor_type = DSPTYPE64;
} else {
- dev_dbg(bridge, "%s: Failed to get DCD processor info "
- "%x\n", __func__, status2);
+ dev_dbg(bridge, "%s: Failed to get DCD processor info %x\n",
+ __func__, status2);
status = -EPERM;
}
}
*pu_num_procs = proc_index;
if (proc_detect == false) {
- dev_dbg(bridge, "%s: Failed to get proc info from DCD, so use "
- "CFG registry\n", __func__);
+ dev_dbg(bridge, "%s: Failed to get proc info from DCD, so use CFG registry\n",
+ __func__);
processor_info->processor_type = DSPTYPE64;
}
func_end:
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c
index ca3805046a73..5ac507ccd19d 100644
--- a/drivers/staging/tidspbridge/rmgr/nldr.c
+++ b/drivers/staging/tidspbridge/rmgr/nldr.c
@@ -623,7 +623,7 @@ void nldr_delete(struct nldr_object *nldr_obj)
* ======== nldr_get_fxn_addr ========
*/
int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
- char *str_fxn, u32 * addr)
+ char *str_fxn, u32 *addr)
{
struct dbll_sym_val *dbll_sym;
struct nldr_object *nldr_obj;
@@ -1751,9 +1751,8 @@ static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
}
if (ref_count && (*ref_count > 0)) {
*ref_count -= 1;
- if (other_ref) {
+ if (other_ref)
*other_ref -= 1;
- }
}
if (ref_count && *ref_count == 0) {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index 87dfa92ab45b..9d3044a384ee 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -246,7 +246,7 @@ static void fill_stream_def(struct node_object *hnode,
struct node_strmdef *pstrm_def,
struct dsp_strmattr *pattrs);
static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
-static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
+static int get_fxn_address(struct node_object *hnode, u32 *fxn_addr,
u32 phase);
static int get_node_props(struct dcd_manager *hdcd_mgr,
struct node_object *hnode,
@@ -406,7 +406,7 @@ int node_allocate(struct proc_object *hprocessor,
/* check for page aligned Heap size */
if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
- pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
+ pr_err("%s: node heap size not aligned to 4K, size = 0x%x\n",
__func__, attr_in->heap_size);
status = -EINVAL;
} else {
@@ -703,9 +703,9 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
pattr = &node_dfltbufattrs; /* set defaults */
status = proc_get_processor_id(pnode->processor, &proc_id);
- if (proc_id != DSP_UNIT) {
+ if (proc_id != DSP_UNIT)
goto func_end;
- }
+
/* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
* virt address, so set this info in this node's translator
* object for future ref. If MEM_GETVIRTUALSEGID then retrieve
@@ -886,11 +886,10 @@ int node_connect(struct node_object *node1, u32 stream1,
if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY)
return -EPERM; /* illegal stream mode */
- if (node1_type != NODE_GPP) {
+ if (node1_type != NODE_GPP)
hnode_mgr = node1->node_mgr;
- } else {
+ else
hnode_mgr = node2->node_mgr;
- }
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
@@ -1576,7 +1575,7 @@ func_end:
* Purpose:
* Frees the message buffer.
*/
-int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
+int node_free_msg_buf(struct node_object *hnode, u8 *pbuffer,
struct dsp_bufferattr *pattr)
{
struct node_object *pnode = (struct node_object *)hnode;
@@ -2322,7 +2321,8 @@ int node_terminate(struct node_object *hnode, int *pstatus)
if (!hdeh_mgr)
goto func_cont;
- bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
+ bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
+ DSP_EXCEPTIONABORT);
}
}
func_cont:
@@ -2640,7 +2640,7 @@ static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
* Purpose:
* Retrieves the address for create, execute or delete phase for a node.
*/
-static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
+static int get_fxn_address(struct node_object *hnode, u32 *fxn_addr,
u32 phase)
{
char *pstr_fxn_name = NULL;
diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt
new file mode 100644
index 000000000000..8d078e4de3b8
--- /dev/null
+++ b/drivers/staging/unisys/Documentation/overview.txt
@@ -0,0 +1,174 @@
+
+Overview
+
+This document describes the driver set for Unisys Secure Partitioning (s-Par®).
+
+s-Par is firmware that provides hardware partitioning capabilities for
+splitting large-scale Intel x86 servers into multiple isolated
+partitions. s-Par provides a set of para-virtualized device drivers to
+allow guest partitions on the same server to share devices that would
+normally be unsharable; specifically, PCI network interfaces and host
+bus adapters that do not support shared access via SR-IOV. The shared
+device is owned and managed by a small, single-purpose service
+partition, which communicates with each guest partition sharing that
+device through an area of shared memory called a channel. Additional
+drivers provide support interfaces for communicating with s-Par
+services, logging and diagnostics, and accessing the Linux console
+from the s-Par user interface.
+
+The driver stack consists of a set of support modules, a set of bus
+modules, and a set of device driver modules. The support modules
+handle a number of common functions across each of the other
+drivers. The bus modules provide organization for the device driver
+modules, which provide the shared device functionality.
+
+These drivers are for the Unisys virtual PCI hardware model where the
+hypervisor need not intervene (other than normal interrupt handling)
+in the interactions between the client drivers and the virtual adapter
+firmware in the adapter service partition.
+
+Driver Descriptions
+
+Device Modules
+
+The modules in this section handle shared devices and the virtual
+buses required to support them. These modules use functions in and
+depend on the modules described in the support modules section.
+
+visorchipset
+
+The visorchipset module receives device creation and destruction
+events from the Command service partition of s-Par, as well as
+controlling registration of shared device drivers with the s-Par
+driver core. The events received are used to populate other s-Par
+modules with their assigned shared devices. Visorchipset is required
+for shared device drivers to function properly. Visorchipset also
+stores information for handling dump disk device creation during
+kdump.
+
+In operation, the visorchipset module processes device creation and
+destruction messages sent by s-Par's Command service partition through
+a channel. These messages result in creation (or destruction) of each
+virtual bus and virtual device. Each bus and device is also associated
+with a communication channel, which is used to communicate with one or
+more IO service partitions to perform device IO on behalf of the
+guest.
+
+virthba
+
+The virthba module provides access to a shared SCSI host bus adapter
+and one or more disk devices, by proxying SCSI commands between the
+guest and the service partition that owns the shared SCSI adapter,
+using a channel between the guest and the service partition. The disks
+that appear on the shared bus are defined by the s-Par configuration
+and enforced by the service partition, while the guest driver handles
+sending commands and handling responses. Each disk is shared as a
+whole to a guest. Sharing the bus adapter in this way provides
+resiliency; should the device encounter an error, only the service
+partition is rebooted, and the device is reinitialized. This allows
+guests to continue running and to recover from the error.
+
+virtnic
+
+The virtnic module provides a paravirtualized network interface to a
+guest by proxying buffer information between the guest and the service
+partition that owns the shared network interface, using a channel
+between the guest and the service partition. The connectivity of this
+interface with the shared interface and possibly other guest
+partitions is defined by the s-Par configuration and enforced by the
+service partition; the guest driver handles communication and link
+status.
+
+visorserial
+
+The visorserial module allows the console of the linux guest to be
+accessed via the s-Par console serial channel. It creates devices in
+/dev/visorserialclientX which behave like a serial terminal and are
+connected to the diagnostics system in s-Par. By assigning a getty to
+the terminal in the guest, a user could log into and access the guest
+from the s-Par diagnostics SWITCH RUN terminal.
+
+visorbus
+
+The visorbus module handles the bus functions for most functional
+drivers except visorserial, visordiag, virthba, and virtnic. It
+maintains the sysfs subtree /sys/devices/visorbus*/. It is responsible
+for device creation and destruction of the devices on its bus.
+
+visorclientbus
+
+The visorclientbus module forwards the bus functions for virthba, and
+virtnic to the virtpci driver.
+
+virtpci
+
+The virtpci module handles the bus functions for virthba, and virtnic.
+
+s-Par Integration Modules
+
+The modules in this section provide integration with s-Par guest
+partition services like diagnostics and remote desktop. These modules
+depend on functions in the modules described in the support modules
+section.
+
+visorvideoclient
+
+The visorvideoclient module provides functionality for video support
+for the Unisys s-Par Partition Desktop application. The guest OS must
+also have the UEFI GOP protocol enabled for the partition desktop to
+function. visorconinclient The visorconinclient module provides
+keyboard and mouse support for the Unisys s-Par Partition Desktop
+application.
+
+sparstop
+
+The sparstop module handles requests from the Unisys s-Par platform to
+shutdown the linux guest. It allows a program on the guest to perform
+clean-up functions on the guest before the guest is shut down or
+rebooted using ACPI.
+
+visordiag
+
+This driver provides the ability for the guest to write information
+into the s-Par diagnostics subsystem. It creates a set of devices
+named /dev/visordiag.X which can be written to by the guest to add
+text to the s-Par system log.
+
+Support Modules
+
+The modules described in this section provide functions and
+abstractions to support the modules described in the previous
+sections, to avoid having duplicated functionality.
+
+visornoop
+
+The visornoop module is a placeholder that responds to device
+create/destroy messages that are currently not in use by linux guests.
+
+visoruislib
+
+The visoruislib module is a support library, used to handle requests
+from virtpci.
+
+visorchannelstub
+
+The visorchannelstub module provides support routines for storing and
+retrieving data from a channel.
+
+visorchannel
+
+The visorchannel module is a support library that abstracts reading
+and writing a channel in memory.
+
+visorutil
+
+The visorutil module is a support library required by all other s-Par
+driver modules. Among its features it abstracts reading, writing, and
+manipulating a block of memory.
+
+Minimum Required Driver Set
+
+The drivers required to boot a Linux guest are visorchipset, visorbus,
+visorvideoclient, visorconinclient, visoruislib, visorchannelstub,
+visorchannel, and visorutil. The other drivers are required by the
+product configurations that are currently being marketed.
diff --git a/drivers/staging/unisys/Documentation/proc-entries.txt b/drivers/staging/unisys/Documentation/proc-entries.txt
new file mode 100644
index 000000000000..426f92b1c577
--- /dev/null
+++ b/drivers/staging/unisys/Documentation/proc-entries.txt
@@ -0,0 +1,93 @@
+ s-Par Proc Entries
+This document describes the proc entries created by the Unisys s-Par modules.
+
+Support Module Entries
+These entries are provided primarily for debugging.
+
+/proc/uislib/info: This entry contains debugging information for the
+uislib module, including bus information and memory usage.
+
+/proc/visorchipset/controlvm: This directory contains debugging
+entries for the controlvm channel used by visorchipset.
+
+/proc/uislib/platform: This entry is used to display the platform
+number this node is in the system. For some guests, this may be
+invalid.
+
+/proc/visorchipset/chipsetready: This entry is written to by scripts
+to signify that any user level activity has been completed before the
+guest can be considered running and is shown as running in the s-Par
+UI.
+
+Device Entries
+These entries provide status of the devices shared by a service partition.
+
+/proc/uislib/vbus: this is a directory containing entries for each
+virtual bus. Each numbered sub-directory contains an info entry, which
+describes the devices that appear on that bus.
+
+/proc/uislib/cycles_before_wait: This entry is used to tune
+performance, by setting the number of cycles we wait before going idle
+when in polling mode. A longer time will reduce message latency but
+spend more processing time polling.
+
+/proc/uislib/smart_wakeup: This entry is used to tune performance, by
+enabling or disabling smart wakeup.
+
+/proc/virthba/info: This entry contains debugging information for the
+virthba module, including interrupt information and memory usage.
+
+/proc/virthba/enable_ints: This entry controls interrupt use by the
+virthba module. Writing a 0 to this entry will disable interrupts.
+
+/proc/virtnic/info: This entry contains debugging information for the
+virtnic module, including interrupt information, send and receive
+counts, and other device information.
+
+/proc/virtnic/ethX: This is a directory containing entries for each
+virtual NIC. Each named subdirectory contains two entries,
+clientstring and zone.
+
+/proc/virtpci/info: This entry contains debugging information for the
+virtpci module, including virtual PCI bus information and device
+locations.
+
+/proc/virtnic/enable_ints: This entry controls interrupt use by the
+virtnic module. Writing a 0 to this entry will disable interrupts.
+
+Visorconinclient, visordiag, visornoop, visorserialclient, and
+visorvideoclient Entries
+
+The entries in proc for these modules all follow the same
+pattern. Each module has its own proc directory with the same name,
+e.g. visordiag presents a /proc/visordiag directory. Inside of the
+module's directory are a device directory, which contains one numbered
+directory for each device provided by that module. Each device has a
+diag entry that presents the device number and visorbus name for that
+device. The module directory also has a driver/diag entry, which
+reports the corresponding s-Par version number of the driver.
+
+Automated Installation Entries
+
+These entries are used to pass information between the s-Par platform
+and the Linux-based installation and recovery tool. These values are
+read/write, however, the guest can only reset them to 0, or report an
+error status through the installer entry. The values are only set via
+s-Par's firmware interface, to help prevent accidentally booting into
+the tool.
+
+/proc/visorchipset/boottotool: This entry instructs s-Par that the
+next reboot will launch the installation and recovery tool. If set to
+0, the next boot will happen according to the UEFI boot manager
+settings.
+
+/proc/visorchipset/toolaction: This entry indicates the installation
+and recovery tool mode requested for the next boot.
+
+/proc/visorchipset/installer: this entry is used by the installation
+and recovery tool to pass status and result information back to the
+s-Par firmware.
+
+/proc/visorchipset/partition: This directory contains the guest
+partition configuration data for each virtual bus, for use during
+installation and at runtime for s-Par service partitions.
diff --git a/drivers/staging/unisys/Kconfig b/drivers/staging/unisys/Kconfig
new file mode 100644
index 000000000000..ac080c9dcf46
--- /dev/null
+++ b/drivers/staging/unisys/Kconfig
@@ -0,0 +1,20 @@
+#
+# Unisys SPAR driver configuration
+#
+menuconfig UNISYSSPAR
+ bool "Unisys SPAR driver support"
+ depends on X86_64
+ ---help---
+ Support for the Unisys SPAR drivers
+
+if UNISYSSPAR
+
+source "drivers/staging/unisys/visorutil/Kconfig"
+source "drivers/staging/unisys/visorchannel/Kconfig"
+source "drivers/staging/unisys/visorchipset/Kconfig"
+source "drivers/staging/unisys/channels/Kconfig"
+source "drivers/staging/unisys/uislib/Kconfig"
+source "drivers/staging/unisys/virtpci/Kconfig"
+source "drivers/staging/unisys/virthba/Kconfig"
+
+endif # UNISYSSPAR
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
new file mode 100644
index 000000000000..c9cef0b91531
--- /dev/null
+++ b/drivers/staging/unisys/MAINTAINERS
@@ -0,0 +1,6 @@
+Unisys s-Par drivers
+M: Ben Romer <sparmaintainer@unisys.com>
+S: Maintained
+F: Documentation/s-Par/overview.txt
+F: Documentation/s-Par/proc-entries.txt
+F: drivers/staging/unisys/
diff --git a/drivers/staging/unisys/Makefile b/drivers/staging/unisys/Makefile
new file mode 100644
index 000000000000..b988d6940aae
--- /dev/null
+++ b/drivers/staging/unisys/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for Unisys SPAR drivers
+#
+obj-$(CONFIG_UNISYS_VISORUTIL) += visorutil/
+obj-$(CONFIG_UNISYS_VISORCHANNEL) += visorchannel/
+obj-$(CONFIG_UNISYS_VISORCHIPSET) += visorchipset/
+obj-$(CONFIG_UNISYS_CHANNELSTUB) += channels/
+obj-$(CONFIG_UNISYS_UISLIB) += uislib/
+obj-$(CONFIG_UNISYS_VIRTPCI) += virtpci/
+obj-$(CONFIG_UNISYS_VIRTHBA) += virthba/
diff --git a/drivers/staging/unisys/TODO b/drivers/staging/unisys/TODO
new file mode 100644
index 000000000000..034ac61c44f2
--- /dev/null
+++ b/drivers/staging/unisys/TODO
@@ -0,0 +1,21 @@
+TODO:
+ -checkpatch warnings
+ -move /proc entries to /sys
+ -proper major number(s)
+ -add other drivers needed for full functionality:
+ -visorclientbus
+ -visorbus
+ -visordiag
+ -virtnic
+ -visornoop
+ -visorserial
+ -visorvideoclient
+ -visorconinclient
+ -sparstop
+ -move individual drivers into proper driver subsystems
+
+
+Patches to:
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ Ken Cox <jkc@redhat.com>
+ Unisys s-Par maintainer mailing list <sparmaintainer@unisys.com>
diff --git a/drivers/staging/unisys/channels/Kconfig b/drivers/staging/unisys/channels/Kconfig
new file mode 100644
index 000000000000..47a235385567
--- /dev/null
+++ b/drivers/staging/unisys/channels/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys channels configuration
+#
+
+config UNISYS_CHANNELSTUB
+ tristate "Unisys channelstub driver"
+ depends on UNISYSSPAR
+ ---help---
+ If you say Y here, you will enable the Unisys channels driver.
+
diff --git a/drivers/staging/unisys/channels/Makefile b/drivers/staging/unisys/channels/Makefile
new file mode 100644
index 000000000000..e60b0aef4dcd
--- /dev/null
+++ b/drivers/staging/unisys/channels/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for Unisys channelstub
+#
+
+obj-$(CONFIG_UNISYS_CHANNELSTUB) += visorchannelstub.o
+
+visorchannelstub-y := channel.o chanstub.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/channels/channel.c b/drivers/staging/unisys/channels/channel.c
new file mode 100644
index 000000000000..f6452595b742
--- /dev/null
+++ b/drivers/staging/unisys/channels/channel.c
@@ -0,0 +1,219 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/kernel.h>
+#ifdef CONFIG_MODVERSIONS
+#include <config/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h> /* for module_init and module_exit */
+#include <linux/slab.h> /* for memcpy */
+#include <linux/types.h>
+
+/* Implementation of exported functions for Supervisor channels */
+#include "channel.h"
+
+/*
+ * Routine Description:
+ * Tries to insert the prebuilt signal pointed to by pSignal into the nth
+ * Queue of the Channel pointed to by pChannel
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to the signal
+ *
+ * Assumptions:
+ * - pChannel, Queue and pSignal are valid.
+ * - If insertion fails due to a full queue, the caller will determine the
+ * retry policy (e.g. wait & try again, report an error, etc.).
+ *
+ * Return value:
+ * 1 if the insertion succeeds, 0 if the queue was full.
+ */
+unsigned char
+visor_signal_insert(CHANNEL_HEADER __iomem *pChannel, U32 Queue, void *pSignal)
+{
+ void __iomem *psignal;
+ unsigned int head, tail, nof;
+
+ SIGNAL_QUEUE_HEADER __iomem *pqhdr =
+ (SIGNAL_QUEUE_HEADER __iomem *)
+ ((char __iomem *) pChannel + readq(&pChannel->oChannelSpace))
+ + Queue;
+
+ /* capture current head and tail */
+ head = readl(&pqhdr->Head);
+ tail = readl(&pqhdr->Tail);
+
+ /* queue is full if (head + 1) % n equals tail */
+ if (((head + 1) % readl(&pqhdr->MaxSignalSlots)) == tail) {
+ nof = readq(&pqhdr->NumOverflows) + 1;
+ writeq(nof, &pqhdr->NumOverflows);
+ return 0;
+ }
+
+ /* increment the head index */
+ head = (head + 1) % readl(&pqhdr->MaxSignalSlots);
+
+ /* copy signal to the head location from the area pointed to
+ * by pSignal
+ */
+ psignal = (char __iomem *)pqhdr + readq(&pqhdr->oSignalBase) +
+ (head * readl(&pqhdr->SignalSize));
+ MEMCPY_TOIO(psignal, pSignal, readl(&pqhdr->SignalSize));
+
+ VolatileBarrier();
+ writel(head, &pqhdr->Head);
+
+ writeq(readq(&pqhdr->NumSignalsSent) + 1, &pqhdr->NumSignalsSent);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(visor_signal_insert);
+
+/*
+ * Routine Description:
+ * Removes one signal from Channel pChannel's nth Queue at the
+ * time of the call and copies it into the memory pointed to by
+ * pSignal.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to where the signals are to be copied
+ *
+ * Assumptions:
+ * - pChannel and Queue are valid.
+ * - pSignal points to a memory area large enough to hold queue's SignalSize
+ *
+ * Return value:
+ * 1 if the removal succeeds, 0 if the queue was empty.
+ */
+unsigned char
+visor_signal_remove(CHANNEL_HEADER __iomem *pChannel, U32 Queue, void *pSignal)
+{
+ void __iomem *psource;
+ unsigned int head, tail;
+ SIGNAL_QUEUE_HEADER __iomem *pqhdr =
+ (SIGNAL_QUEUE_HEADER __iomem *) ((char __iomem *) pChannel +
+ readq(&pChannel->oChannelSpace)) + Queue;
+
+ /* capture current head and tail */
+ head = readl(&pqhdr->Head);
+ tail = readl(&pqhdr->Tail);
+
+ /* queue is empty if the head index equals the tail index */
+ if (head == tail) {
+ writeq(readq(&pqhdr->NumEmptyCnt) + 1, &pqhdr->NumEmptyCnt);
+ return 0;
+ }
+
+ /* advance past the 'empty' front slot */
+ tail = (tail + 1) % readl(&pqhdr->MaxSignalSlots);
+
+ /* copy signal from tail location to the area pointed to by pSignal */
+ psource = (char __iomem *) pqhdr + readq(&pqhdr->oSignalBase) +
+ (tail * readl(&pqhdr->SignalSize));
+ MEMCPY_FROMIO(pSignal, psource, readl(&pqhdr->SignalSize));
+
+ VolatileBarrier();
+ writel(tail, &pqhdr->Tail);
+
+ writeq(readq(&pqhdr->NumSignalsReceived) + 1,
+ &pqhdr->NumSignalsReceived);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(visor_signal_remove);
+
+/*
+ * Routine Description:
+ * Removes all signals present in Channel pChannel's nth Queue at the
+ * time of the call and copies them into the memory pointed to by
+ * pSignal. Returns the # of signals copied as the value of the routine.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to where the signals are to be copied
+ *
+ * Assumptions:
+ * - pChannel and Queue are valid.
+ * - pSignal points to a memory area large enough to hold Queue's MaxSignals
+ * # of signals, each of which is Queue's SignalSize.
+ *
+ * Return value:
+ * # of signals copied.
+ */
+unsigned int
+SignalRemoveAll(pCHANNEL_HEADER pChannel, U32 Queue, void *pSignal)
+{
+ void *psource;
+ unsigned int head, tail, signalCount = 0;
+ pSIGNAL_QUEUE_HEADER pqhdr =
+ (pSIGNAL_QUEUE_HEADER) ((char *) pChannel +
+ pChannel->oChannelSpace) + Queue;
+
+ /* capture current head and tail */
+ head = pqhdr->Head;
+ tail = pqhdr->Tail;
+
+ /* queue is empty if the head index equals the tail index */
+ if (head == tail)
+ return 0;
+
+ while (head != tail) {
+ /* advance past the 'empty' front slot */
+ tail = (tail + 1) % pqhdr->MaxSignalSlots;
+
+ /* copy signal from tail location to the area pointed
+ * to by pSignal
+ */
+ psource =
+ (char *) pqhdr + pqhdr->oSignalBase +
+ (tail * pqhdr->SignalSize);
+ MEMCPY((char *) pSignal + (pqhdr->SignalSize * signalCount),
+ psource, pqhdr->SignalSize);
+
+ VolatileBarrier();
+ pqhdr->Tail = tail;
+
+ signalCount++;
+ pqhdr->NumSignalsReceived++;
+ }
+
+ return signalCount;
+}
+
+/*
+ * Routine Description:
+ * Determine whether a signal queue is empty.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ *
+ * Return value:
+ * 1 if the signal queue is empty, 0 otherwise.
+ */
+unsigned char
+visor_signalqueue_empty(CHANNEL_HEADER __iomem *pChannel, U32 Queue)
+{
+ SIGNAL_QUEUE_HEADER __iomem *pqhdr =
+ (SIGNAL_QUEUE_HEADER __iomem *) ((char __iomem *) pChannel +
+ readq(&pChannel->oChannelSpace)) + Queue;
+ return readl(&pqhdr->Head) == readl(&pqhdr->Tail);
+}
+EXPORT_SYMBOL_GPL(visor_signalqueue_empty);
+
diff --git a/drivers/staging/unisys/channels/chanstub.c b/drivers/staging/unisys/channels/chanstub.c
new file mode 100644
index 000000000000..f504f49a436a
--- /dev/null
+++ b/drivers/staging/unisys/channels/chanstub.c
@@ -0,0 +1,70 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#define EXPORT_SYMTAB
+#include <linux/kernel.h>
+#ifdef CONFIG_MODVERSIONS
+#include <config/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h> /* for module_init and module_exit */
+#include <linux/slab.h> /* for memcpy */
+#include <linux/types.h>
+
+#include "channel.h"
+#include "chanstub.h"
+#include "version.h"
+
+static __init int
+channel_mod_init(void)
+{
+ return 0;
+}
+
+static __exit void
+channel_mod_exit(void)
+{
+}
+
+unsigned char
+SignalInsert_withLock(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal, spinlock_t *lock)
+{
+ unsigned char result;
+ unsigned long flags;
+ spin_lock_irqsave(lock, flags);
+ result = visor_signal_insert(pChannel, Queue, pSignal);
+ spin_unlock_irqrestore(lock, flags);
+ return result;
+}
+
+unsigned char
+SignalRemove_withLock(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal, spinlock_t *lock)
+{
+ unsigned char result;
+ spin_lock(lock);
+ result = visor_signal_remove(pChannel, Queue, pSignal);
+ spin_unlock(lock);
+ return result;
+}
+
+module_init(channel_mod_init);
+module_exit(channel_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bryan Glaudel");
+MODULE_ALIAS("uischan");
+ /* this is extracted during depmod and kept in modules.dep */
diff --git a/drivers/staging/unisys/channels/chanstub.h b/drivers/staging/unisys/channels/chanstub.h
new file mode 100644
index 000000000000..8d727debca67
--- /dev/null
+++ b/drivers/staging/unisys/channels/chanstub.h
@@ -0,0 +1,23 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __CHANSTUB_H__
+#define __CHANSTUB_H__
+unsigned char SignalInsert_withLock(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal, spinlock_t *lock);
+unsigned char SignalRemove_withLock(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal, spinlock_t *lock);
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/channel.h b/drivers/staging/unisys/common-spar/include/channels/channel.h
new file mode 100644
index 000000000000..aee204172b21
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/channel.h
@@ -0,0 +1,661 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __CHANNEL_H__
+#define __CHANNEL_H__
+
+/*
+* Whenever this file is changed a corresponding change must be made in
+* the Console/ServicePart/visordiag_early/supervisor_channel.h file
+* which is needed for Linux kernel compiles. These two files must be
+* in sync.
+*/
+
+/* define the following to prevent include nesting in kernel header
+ * files of similar abreviated content
+ */
+#define __SUPERVISOR_CHANNEL_H__
+
+#include "commontypes.h"
+
+#define SIGNATURE_16(A, B) ((A) | (B<<8))
+#define SIGNATURE_32(A, B, C, D) \
+ (SIGNATURE_16(A, B) | (SIGNATURE_16(C, D) << 16))
+#define SIGNATURE_64(A, B, C, D, E, F, G, H) \
+ (SIGNATURE_32(A, B, C, D) | ((U64)(SIGNATURE_32(E, F, G, H)) << 32))
+
+#ifndef lengthof
+#define lengthof(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER))
+#endif
+#ifndef COVERQ
+#define COVERQ(v, d) (((v)+(d)-1) / (d))
+#endif
+#ifndef COVER
+#define COVER(v, d) ((d)*COVERQ(v, d))
+#endif
+
+#ifndef GUID0
+#define GUID0 {0, 0, 0, {0, 0, 0, 0, 0, 0, 0, 0} }
+#endif
+
+/* The C language is inconsistent with respect to where it allows literal
+ * constants, especially literal constant structs. Literal constant structs
+ * are allowed for initialization only, whereas other types of literal
+ * constants are allowed anywhere. We get around this inconsistency by
+ * declaring a "static const" variable for each GUID. This variable can be
+ * used in expressions where the literal constant would not be allowed.
+ */
+static const GUID Guid0 = GUID0;
+
+#define ULTRA_CHANNEL_PROTOCOL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L')
+
+typedef enum {
+ CHANNELSRV_UNINITIALIZED = 0, /* channel is in an undefined state */
+ CHANNELSRV_READY = 1 /* channel has been initialized by server */
+} CHANNEL_SERVERSTATE;
+
+typedef enum {
+ CHANNELCLI_DETACHED = 0,
+ CHANNELCLI_DISABLED = 1, /* client can see channel but is NOT
+ * allowed to use it unless given TBD
+ * explicit request (should actually be
+ * < DETACHED) */
+ CHANNELCLI_ATTACHING = 2, /* legacy EFI client request
+ * for EFI server to attach */
+ CHANNELCLI_ATTACHED = 3, /* idle, but client may want
+ * to use channel any time */
+ CHANNELCLI_BUSY = 4, /* client either wants to use or is
+ * using channel */
+ CHANNELCLI_OWNED = 5 /* "no worries" state - client can
+ * access channel anytime */
+} CHANNEL_CLIENTSTATE;
+static inline const U8 *
+ULTRA_CHANNELCLI_STRING(U32 v)
+{
+ switch (v) {
+ case CHANNELCLI_DETACHED:
+ return (const U8 *) ("DETACHED");
+ case CHANNELCLI_DISABLED:
+ return (const U8 *) ("DISABLED");
+ case CHANNELCLI_ATTACHING:
+ return (const U8 *) ("ATTACHING");
+ case CHANNELCLI_ATTACHED:
+ return (const U8 *) ("ATTACHED");
+ case CHANNELCLI_BUSY:
+ return (const U8 *) ("BUSY");
+ case CHANNELCLI_OWNED:
+ return (const U8 *) ("OWNED");
+ default:
+ break;
+ }
+ return (const U8 *) ("?");
+}
+
+#define ULTRA_CHANNELSRV_IS_READY(x) ((x) == CHANNELSRV_READY)
+#define ULTRA_CHANNEL_SERVER_READY(pChannel) \
+ (ULTRA_CHANNELSRV_IS_READY(readl(&(pChannel)->SrvState)))
+
+#define ULTRA_VALID_CHANNELCLI_TRANSITION(o, n) \
+ (((((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_DISABLED)) || \
+ (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DISABLED)) || \
+ (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DISABLED)) || \
+ (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DETACHED)) || \
+ (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DETACHED)) || \
+ (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_ATTACHING)) || \
+ (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_ATTACHED)) || \
+ (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_ATTACHED)) || \
+ (((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_ATTACHED)) || \
+ (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_BUSY)) || \
+ (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_OWNED)) || \
+ (((o) == CHANNELCLI_DISABLED) && ((n) == CHANNELCLI_OWNED)) || \
+ (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_OWNED)) || \
+ (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_OWNED)) || \
+ (((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_OWNED)) || (0)) \
+ ? (1) : (0))
+
+#define ULTRA_CHANNEL_CLIENT_CHK_TRANSITION(old, new, chanId, logCtx, \
+ file, line) \
+ do { \
+ if (!ULTRA_VALID_CHANNELCLI_TRANSITION(old, new)) \
+ UltraLogEvent(logCtx, \
+ CHANNELSTATE_DIAG_EVENTID_TRANSITERR, \
+ CHANNELSTATE_DIAG_SEVERITY, \
+ CHANNELSTATE_DIAG_SUBSYS, \
+ __func__, __LINE__, \
+ "%s Channel StateTransition INVALID! (%s) %s(%d)-->%s(%d) @%s:%d\n", \
+ chanId, "CliState<x>", \
+ ULTRA_CHANNELCLI_STRING(old), \
+ old, \
+ ULTRA_CHANNELCLI_STRING(new), \
+ new, \
+ PathName_Last_N_Nodes((U8 *)file, 4), \
+ line); \
+ } while (0)
+
+#define ULTRA_CHANNEL_CLIENT_TRANSITION(pChan, chanId, \
+ newstate, logCtx) \
+ do { \
+ ULTRA_CHANNEL_CLIENT_CHK_TRANSITION( \
+ readl(&(((CHANNEL_HEADER __iomem *) \
+ (pChan))->CliStateOS)), \
+ newstate, \
+ chanId, logCtx, __FILE__, __LINE__); \
+ UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK, \
+ CHANNELSTATE_DIAG_SEVERITY, \
+ CHANNELSTATE_DIAG_SUBSYS, \
+ __func__, __LINE__, \
+ "%s Channel StateTransition (%s) %s(%d)-->%s(%d) @%s:%d\n", \
+ chanId, "CliStateOS", \
+ ULTRA_CHANNELCLI_STRING( \
+ readl(&((CHANNEL_HEADER __iomem *) \
+ (pChan))->CliStateOS)), \
+ readl(&((CHANNEL_HEADER __iomem *) \
+ (pChan))->CliStateOS), \
+ ULTRA_CHANNELCLI_STRING(newstate), \
+ newstate, \
+ PathName_Last_N_Nodes(__FILE__, 4), __LINE__); \
+ writel(newstate, &((CHANNEL_HEADER __iomem *) \
+ (pChan))->CliStateOS); \
+ MEMORYBARRIER; \
+ } while (0)
+
+#define ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(pChan, chanId, logCtx) \
+ ULTRA_channel_client_acquire_os(pChan, chanId, logCtx, \
+ (char *)__FILE__, __LINE__, \
+ (char *)__func__)
+#define ULTRA_CHANNEL_CLIENT_RELEASE_OS(pChan, chanId, logCtx) \
+ ULTRA_channel_client_release_os(pChan, chanId, logCtx, \
+ (char *)__FILE__, __LINE__, (char *)__func__)
+
+/* Values for ULTRA_CHANNEL_PROTOCOL.CliErrorBoot: */
+/* throttling invalid boot channel statetransition error due to client
+ * disabled */
+#define ULTRA_CLIERRORBOOT_THROTTLEMSG_DISABLED 0x01
+
+/* throttling invalid boot channel statetransition error due to client
+ * not attached */
+#define ULTRA_CLIERRORBOOT_THROTTLEMSG_NOTATTACHED 0x02
+
+/* throttling invalid boot channel statetransition error due to busy channel */
+#define ULTRA_CLIERRORBOOT_THROTTLEMSG_BUSY 0x04
+
+/* Values for ULTRA_CHANNEL_PROTOCOL.CliErrorOS: */
+/* throttling invalid guest OS channel statetransition error due to
+ * client disabled */
+#define ULTRA_CLIERROROS_THROTTLEMSG_DISABLED 0x01
+
+/* throttling invalid guest OS channel statetransition error due to
+ * client not attached */
+#define ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED 0x02
+
+/* throttling invalid guest OS channel statetransition error due to
+ * busy channel */
+#define ULTRA_CLIERROROS_THROTTLEMSG_BUSY 0x04
+
+/* Values for ULTRA_CHANNEL_PROTOCOL.Features: This define exists so
+* that windows guest can look at the FeatureFlags in the io channel,
+* and configure the windows driver to use interrupts or not based on
+* this setting. This flag is set in uislib after the
+* ULTRA_VHBA_init_channel is called. All feature bits for all
+* channels should be defined here. The io channel feature bits are
+* defined right here */
+#define ULTRA_IO_DRIVER_ENABLES_INTS (0x1ULL << 1)
+#define ULTRA_IO_CHANNEL_IS_POLLING (0x1ULL << 3)
+#define ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS (0x1ULL << 4)
+#define ULTRA_IO_DRIVER_DISABLES_INTS (0x1ULL << 5)
+#define ULTRA_IO_DRIVER_SUPPORTS_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
+
+#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
+/* Common Channel Header */
+typedef struct _CHANNEL_HEADER {
+ U64 Signature; /* Signature */
+ U32 LegacyState; /* DEPRECATED - being replaced by */
+ /* / SrvState, CliStateBoot, and CliStateOS below */
+ U32 HeaderSize; /* sizeof(CHANNEL_HEADER) */
+ U64 Size; /* Total size of this channel in bytes */
+ U64 Features; /* Flags to modify behavior */
+ GUID Type; /* Channel type: data, bus, control, etc. */
+ U64 PartitionHandle; /* ID of guest partition */
+ U64 Handle; /* Device number of this channel in client */
+ U64 oChannelSpace; /* Offset in bytes to channel specific area */
+ U32 VersionId; /* CHANNEL_HEADER Version ID */
+ U32 PartitionIndex; /* Index of guest partition */
+ GUID ZoneGuid; /* Guid of Channel's zone */
+ U32 oClientString; /* offset from channel header to
+ * nul-terminated ClientString (0 if
+ * ClientString not present) */
+ U32 CliStateBoot; /* CHANNEL_CLIENTSTATE of pre-boot
+ * EFI client of this channel */
+ U32 CmdStateCli; /* CHANNEL_COMMANDSTATE (overloaded in
+ * Windows drivers, see ServerStateUp,
+ * ServerStateDown, etc) */
+ U32 CliStateOS; /* CHANNEL_CLIENTSTATE of Guest OS
+ * client of this channel */
+ U32 ChannelCharacteristics; /* CHANNEL_CHARACTERISTIC_<xxx> */
+ U32 CmdStateSrv; /* CHANNEL_COMMANDSTATE (overloaded in
+ * Windows drivers, see ServerStateUp,
+ * ServerStateDown, etc) */
+ U32 SrvState; /* CHANNEL_SERVERSTATE */
+ U8 CliErrorBoot; /* bits to indicate err states for
+ * boot clients, so err messages can
+ * be throttled */
+ U8 CliErrorOS; /* bits to indicate err states for OS
+ * clients, so err messages can be
+ * throttled */
+ U8 Filler[1]; /* Pad out to 128 byte cacheline */
+ /* Please add all new single-byte values below here */
+ U8 RecoverChannel;
+} CHANNEL_HEADER, *pCHANNEL_HEADER, ULTRA_CHANNEL_PROTOCOL;
+
+#define ULTRA_CHANNEL_ENABLE_INTS (0x1ULL << 0)
+
+/* Subheader for the Signal Type variation of the Common Channel */
+typedef struct _SIGNAL_QUEUE_HEADER {
+ /* 1st cache line */
+ U32 VersionId; /* SIGNAL_QUEUE_HEADER Version ID */
+ U32 Type; /* Queue type: storage, network */
+ U64 Size; /* Total size of this queue in bytes */
+ U64 oSignalBase; /* Offset to signal queue area */
+ U64 FeatureFlags; /* Flags to modify behavior */
+ U64 NumSignalsSent; /* Total # of signals placed in this queue */
+ U64 NumOverflows; /* Total # of inserts failed due to
+ * full queue */
+ U32 SignalSize; /* Total size of a signal for this queue */
+ U32 MaxSignalSlots; /* Max # of slots in queue, 1 slot is
+ * always empty */
+ U32 MaxSignals; /* Max # of signals in queue
+ * (MaxSignalSlots-1) */
+ U32 Head; /* Queue head signal # */
+ /* 2nd cache line */
+ U64 NumSignalsReceived; /* Total # of signals removed from this queue */
+ U32 Tail; /* Queue tail signal # (on separate
+ * cache line) */
+ U32 Reserved1; /* Reserved field */
+ U64 Reserved2; /* Resrved field */
+ U64 ClientQueue;
+ U64 NumInterruptsReceived; /* Total # of Interrupts received. This
+ * is incremented by the ISR in the
+ * guest windows driver */
+ U64 NumEmptyCnt; /* Number of times that visor_signal_remove
+ * is called and returned Empty
+ * Status. */
+ U32 ErrorFlags; /* Error bits set during SignalReinit
+ * to denote trouble with client's
+ * fields */
+ U8 Filler[12]; /* Pad out to 64 byte cacheline */
+} SIGNAL_QUEUE_HEADER, *pSIGNAL_QUEUE_HEADER;
+
+#pragma pack(pop)
+
+#define SignalInit(chan, QHDRFLD, QDATAFLD, QDATATYPE, ver, typ) \
+ do { \
+ MEMSET(&chan->QHDRFLD, 0, sizeof(chan->QHDRFLD)); \
+ chan->QHDRFLD.VersionId = ver; \
+ chan->QHDRFLD.Type = typ; \
+ chan->QHDRFLD.Size = sizeof(chan->QDATAFLD); \
+ chan->QHDRFLD.SignalSize = sizeof(QDATATYPE); \
+ chan->QHDRFLD.oSignalBase = (UINTN)(chan->QDATAFLD)- \
+ (UINTN)(&chan->QHDRFLD); \
+ chan->QHDRFLD.MaxSignalSlots = \
+ sizeof(chan->QDATAFLD)/sizeof(QDATATYPE); \
+ chan->QHDRFLD.MaxSignals = chan->QHDRFLD.MaxSignalSlots-1; \
+ } while (0)
+
+/* Generic function useful for validating any type of channel when it is
+ * received by the client that will be accessing the channel.
+ * Note that <logCtx> is only needed for callers in the EFI environment, and
+ * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
+ */
+static inline int
+ULTRA_check_channel_client(void __iomem *pChannel,
+ GUID expectedTypeGuid,
+ char *channelName,
+ U64 expectedMinBytes,
+ U32 expectedVersionId,
+ U64 expectedSignature,
+ char *fileName, int lineNumber, void *logCtx)
+{
+ if (MEMCMP(&expectedTypeGuid, &Guid0, sizeof(GUID)) != 0)
+ /* caller wants us to verify type GUID */
+ if (MEMCMP_IO(&(((CHANNEL_HEADER __iomem *) (pChannel))->Type),
+ &expectedTypeGuid, sizeof(GUID)) != 0) {
+ CHANNEL_GUID_MISMATCH(expectedTypeGuid, channelName,
+ "type", expectedTypeGuid,
+ ((CHANNEL_HEADER __iomem *)
+ (pChannel))->Type, fileName,
+ lineNumber, logCtx);
+ return 0;
+ }
+ if (expectedMinBytes > 0) /* caller wants us to verify
+ * channel size */
+ if (readq(&((CHANNEL_HEADER __iomem *)
+ (pChannel))->Size) < expectedMinBytes) {
+ CHANNEL_U64_MISMATCH(expectedTypeGuid, channelName,
+ "size", expectedMinBytes,
+ ((CHANNEL_HEADER __iomem *)
+ (pChannel))->Size, fileName,
+ lineNumber, logCtx);
+ return 0;
+ }
+ if (expectedVersionId > 0) /* caller wants us to verify
+ * channel version */
+ if (readl(&((CHANNEL_HEADER __iomem *) (pChannel))->VersionId)
+ != expectedVersionId) {
+ CHANNEL_U32_MISMATCH(expectedTypeGuid, channelName,
+ "version", expectedVersionId,
+ ((CHANNEL_HEADER __iomem *)
+ (pChannel))->VersionId, fileName,
+ lineNumber, logCtx);
+ return 0;
+ }
+ if (expectedSignature > 0) /* caller wants us to verify
+ * channel signature */
+ if (readq(&((CHANNEL_HEADER __iomem *) (pChannel))->Signature)
+ != expectedSignature) {
+ CHANNEL_U64_MISMATCH(expectedTypeGuid, channelName,
+ "signature", expectedSignature,
+ ((CHANNEL_HEADER __iomem *)
+ (pChannel))->Signature, fileName,
+ lineNumber, logCtx);
+ return 0;
+ }
+ return 1;
+}
+
+/* Generic function useful for validating any type of channel when it is about
+ * to be initialized by the server of the channel.
+ * Note that <logCtx> is only needed for callers in the EFI environment, and
+ * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
+ */
+static inline int
+ULTRA_check_channel_server(GUID typeGuid,
+ char *channelName,
+ U64 expectedMinBytes,
+ U64 actualBytes,
+ char *fileName, int lineNumber, void *logCtx)
+{
+ if (expectedMinBytes > 0) /* caller wants us to verify
+ * channel size */
+ if (actualBytes < expectedMinBytes) {
+ CHANNEL_U64_MISMATCH(typeGuid, channelName, "size",
+ expectedMinBytes, actualBytes,
+ fileName, lineNumber, logCtx);
+ return 0;
+ }
+ return 1;
+}
+
+/* Given a file pathname <s> (with '/' or '\' separating directory nodes),
+ * returns a pointer to the beginning of a node within that pathname such
+ * that the number of nodes from that pointer to the end of the string is
+ * NOT more than <n>. Note that if the pathname has less than <n> nodes
+ * in it, the return pointer will be to the beginning of the string.
+ */
+static inline U8 *
+PathName_Last_N_Nodes(U8 *s, unsigned int n)
+{
+ U8 *p = s;
+ unsigned int node_count = 0;
+ while (*p != '\0') {
+ if ((*p == '/') || (*p == '\\'))
+ node_count++;
+ p++;
+ }
+ if (node_count <= n)
+ return s;
+ while (n > 0) {
+ p--;
+ if (p == s)
+ break; /* should never happen, unless someone
+ * is changing the string while we are
+ * looking at it!! */
+ if ((*p == '/') || (*p == '\\'))
+ n--;
+ }
+ return p + 1;
+}
+
+static inline int
+ULTRA_channel_client_acquire_os(void __iomem *pChannel, U8 *chanId,
+ void *logCtx, char *file, int line, char *func)
+{
+ CHANNEL_HEADER __iomem *pChan = pChannel;
+
+ if (readl(&pChan->CliStateOS) == CHANNELCLI_DISABLED) {
+ if ((readb(&pChan->CliErrorOS)
+ & ULTRA_CLIERROROS_THROTTLEMSG_DISABLED) == 0) {
+ /* we are NOT throttling this message */
+ writeb(readb(&pChan->CliErrorOS) |
+ ULTRA_CLIERROROS_THROTTLEMSG_DISABLED,
+ &pChan->CliErrorOS);
+ /* throttle until acquire successful */
+
+ UltraLogEvent(logCtx,
+ CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel StateTransition INVALID! - acquire failed because OS client DISABLED @%s:%d\n",
+ chanId, PathName_Last_N_Nodes(
+ (U8 *) file, 4), line);
+ }
+ return 0;
+ }
+ if ((readl(&pChan->CliStateOS) != CHANNELCLI_OWNED)
+ && (readl(&pChan->CliStateBoot) == CHANNELCLI_DISABLED)) {
+ /* Our competitor is DISABLED, so we can transition to OWNED */
+ UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel StateTransition (%s) %s(%d)-->%s(%d) @%s:%d\n",
+ chanId, "CliStateOS",
+ ULTRA_CHANNELCLI_STRING(
+ readl(&pChan->CliStateOS)),
+ readl(&pChan->CliStateOS),
+ ULTRA_CHANNELCLI_STRING(CHANNELCLI_OWNED),
+ CHANNELCLI_OWNED,
+ PathName_Last_N_Nodes((U8 *) file, 4), line);
+ writel(CHANNELCLI_OWNED, &pChan->CliStateOS);
+ MEMORYBARRIER;
+ }
+ if (readl(&pChan->CliStateOS) == CHANNELCLI_OWNED) {
+ if (readb(&pChan->CliErrorOS) != 0) {
+ /* we are in an error msg throttling state;
+ * come out of it */
+ UltraLogEvent(logCtx,
+ CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel OS client acquire now successful @%s:%d\n",
+ chanId, PathName_Last_N_Nodes((U8 *) file,
+ 4), line);
+ writeb(0, &pChan->CliErrorOS);
+ }
+ return 1;
+ }
+
+ /* We have to do it the "hard way". We transition to BUSY,
+ * and can use the channel iff our competitor has not also
+ * transitioned to BUSY. */
+ if (readl(&pChan->CliStateOS) != CHANNELCLI_ATTACHED) {
+ if ((readb(&pChan->CliErrorOS)
+ & ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED) == 0) {
+ /* we are NOT throttling this message */
+ writeb(readb(&pChan->CliErrorOS) |
+ ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED,
+ &pChan->CliErrorOS);
+ /* throttle until acquire successful */
+ UltraLogEvent(logCtx,
+ CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel StateTransition INVALID! - acquire failed because OS client NOT ATTACHED (state=%s(%d)) @%s:%d\n",
+ chanId,
+ ULTRA_CHANNELCLI_STRING(
+ readl(&pChan->CliStateOS)),
+ readl(&pChan->CliStateOS),
+ PathName_Last_N_Nodes((U8 *) file, 4),
+ line);
+ }
+ return 0;
+ }
+ writel(CHANNELCLI_BUSY, &pChan->CliStateOS);
+ MEMORYBARRIER;
+ if (readl(&pChan->CliStateBoot) == CHANNELCLI_BUSY) {
+ if ((readb(&pChan->CliErrorOS)
+ & ULTRA_CLIERROROS_THROTTLEMSG_BUSY) == 0) {
+ /* we are NOT throttling this message */
+ writeb(readb(&pChan->CliErrorOS) |
+ ULTRA_CLIERROROS_THROTTLEMSG_BUSY,
+ &pChan->CliErrorOS);
+ /* throttle until acquire successful */
+ UltraLogEvent(logCtx,
+ CHANNELSTATE_DIAG_EVENTID_TRANSITBUSY,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel StateTransition failed - host OS acquire failed because boot BUSY @%s:%d\n",
+ chanId, PathName_Last_N_Nodes((U8 *) file,
+ 4), line);
+ }
+ /* reset busy */
+ writel(CHANNELCLI_ATTACHED, &pChan->CliStateOS);
+ MEMORYBARRIER;
+ return 0;
+ }
+ if (readb(&pChan->CliErrorOS) != 0) {
+ /* we are in an error msg throttling state; come out of it */
+ UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel OS client acquire now successful @%s:%d\n",
+ chanId, PathName_Last_N_Nodes((U8 *) file, 4),
+ line);
+ writeb(0, &pChan->CliErrorOS);
+ }
+ return 1;
+}
+
+static inline void
+ULTRA_channel_client_release_os(void __iomem *pChannel, U8 *chanId,
+ void *logCtx, char *file, int line, char *func)
+{
+ CHANNEL_HEADER __iomem *pChan = pChannel;
+ if (readb(&pChan->CliErrorOS) != 0) {
+ /* we are in an error msg throttling state; come out of it */
+ UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel OS client error state cleared @%s:%d\n",
+ chanId, PathName_Last_N_Nodes((U8 *) file, 4),
+ line);
+ writeb(0, &pChan->CliErrorOS);
+ }
+ if (readl(&pChan->CliStateOS) == CHANNELCLI_OWNED)
+ return;
+ if (readl(&pChan->CliStateOS) != CHANNELCLI_BUSY) {
+ UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
+ CHANNELSTATE_DIAG_SEVERITY,
+ CHANNELSTATE_DIAG_SUBSYS, func, line,
+ "%s Channel StateTransition INVALID! - release failed because OS client NOT BUSY (state=%s(%d)) @%s:%d\n",
+ chanId,
+ ULTRA_CHANNELCLI_STRING(
+ readl(&pChan->CliStateOS)),
+ readl(&pChan->CliStateOS),
+ PathName_Last_N_Nodes((U8 *) file, 4), line);
+ /* return; */
+ }
+ writel(CHANNELCLI_ATTACHED, &pChan->CliStateOS); /* release busy */
+}
+
+/*
+* Routine Description:
+* Tries to insert the prebuilt signal pointed to by pSignal into the nth
+* Queue of the Channel pointed to by pChannel
+*
+* Parameters:
+* pChannel: (IN) points to the IO Channel
+* Queue: (IN) nth Queue of the IO Channel
+* pSignal: (IN) pointer to the signal
+*
+* Assumptions:
+* - pChannel, Queue and pSignal are valid.
+* - If insertion fails due to a full queue, the caller will determine the
+* retry policy (e.g. wait & try again, report an error, etc.).
+*
+* Return value: 1 if the insertion succeeds, 0 if the queue was
+* full.
+*/
+
+unsigned char visor_signal_insert(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal);
+
+/*
+* Routine Description:
+* Removes one signal from Channel pChannel's nth Queue at the
+* time of the call and copies it into the memory pointed to by
+* pSignal.
+*
+* Parameters:
+* pChannel: (IN) points to the IO Channel
+* Queue: (IN) nth Queue of the IO Channel
+* pSignal: (IN) pointer to where the signals are to be copied
+*
+* Assumptions:
+* - pChannel and Queue are valid.
+* - pSignal points to a memory area large enough to hold queue's SignalSize
+*
+* Return value: 1 if the removal succeeds, 0 if the queue was
+* empty.
+*/
+
+unsigned char visor_signal_remove(CHANNEL_HEADER __iomem *pChannel, U32 Queue,
+ void *pSignal);
+
+/*
+* Routine Description:
+* Removes all signals present in Channel pChannel's nth Queue at the
+* time of the call and copies them into the memory pointed to by
+* pSignal. Returns the # of signals copied as the value of the routine.
+*
+* Parameters:
+* pChannel: (IN) points to the IO Channel
+* Queue: (IN) nth Queue of the IO Channel
+* pSignal: (IN) pointer to where the signals are to be copied
+*
+* Assumptions:
+* - pChannel and Queue are valid.
+* - pSignal points to a memory area large enough to hold Queue's MaxSignals
+* # of signals, each of which is Queue's SignalSize.
+*
+* Return value:
+* # of signals copied.
+*/
+unsigned int SignalRemoveAll(pCHANNEL_HEADER pChannel, U32 Queue,
+ void *pSignal);
+
+/*
+* Routine Description:
+* Determine whether a signal queue is empty.
+*
+* Parameters:
+* pChannel: (IN) points to the IO Channel
+* Queue: (IN) nth Queue of the IO Channel
+*
+* Return value:
+* 1 if the signal queue is empty, 0 otherwise.
+*/
+unsigned char visor_signalqueue_empty(CHANNEL_HEADER __iomem *pChannel,
+ U32 Queue);
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/channel_guid.h b/drivers/staging/unisys/common-spar/include/channels/channel_guid.h
new file mode 100644
index 000000000000..ae0dc2b2ad14
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/channel_guid.h
@@ -0,0 +1,64 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * CHANNEL Guids
+ */
+
+/* Used in IOChannel
+ * {414815ed-c58c-11da-95a9-00e08161165f}
+ */
+#define ULTRA_VHBA_CHANNEL_PROTOCOL_GUID \
+ { 0x414815ed, 0xc58c, 0x11da, \
+ { 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f } }
+static const GUID UltraVhbaChannelProtocolGuid =
+ ULTRA_VHBA_CHANNEL_PROTOCOL_GUID;
+
+/* Used in IOChannel
+ * {8cd5994d-c58e-11da-95a9-00e08161165f}
+ */
+#define ULTRA_VNIC_CHANNEL_PROTOCOL_GUID \
+ { 0x8cd5994d, 0xc58e, 0x11da, \
+ { 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f } }
+static const GUID UltraVnicChannelProtocolGuid =
+ ULTRA_VNIC_CHANNEL_PROTOCOL_GUID;
+
+/* Used in IOChannel
+ * {72120008-4AAB-11DC-8530-444553544200}
+ */
+#define ULTRA_SIOVM_GUID \
+ { 0x72120008, 0x4AAB, 0x11DC, \
+ { 0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00 } }
+static const GUID UltraSIOVMGuid = ULTRA_SIOVM_GUID;
+
+
+/* Used in visornoop/visornoop_main.c
+ * {5b52c5ac-e5f5-4d42-8dff-429eaecd221f}
+ */
+#define ULTRA_CONTROLDIRECTOR_CHANNEL_PROTOCOL_GUID \
+ { 0x5b52c5ac, 0xe5f5, 0x4d42, \
+ { 0x8d, 0xff, 0x42, 0x9e, 0xae, 0xcd, 0x22, 0x1f } }
+
+static const GUID UltraControlDirectorChannelProtocolGuid =
+ ULTRA_CONTROLDIRECTOR_CHANNEL_PROTOCOL_GUID;
+
+/* Used in visorchipset/visorchipset_main.c
+ * {B4E79625-AEDE-4EAA-9E11-D3EDDCD4504C}
+ */
+#define ULTRA_DIAG_POOL_CHANNEL_PROTOCOL_GUID \
+ {0xb4e79625, 0xaede, 0x4eaa, \
+ { 0x9e, 0x11, 0xd3, 0xed, 0xdc, 0xd4, 0x50, 0x4c } }
+
+
diff --git a/drivers/staging/unisys/common-spar/include/channels/controlframework.h b/drivers/staging/unisys/common-spar/include/channels/controlframework.h
new file mode 100644
index 000000000000..512643348349
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/controlframework.h
@@ -0,0 +1,77 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Module Name:
+ * controlframework.h
+ *
+ * Abstract: This file defines common structures in the unmanaged
+ * Ultravisor (mostly EFI) space.
+ *
+ */
+
+#ifndef _CONTROL_FRAMEWORK_H_
+#define _CONTROL_FRAMEWORK_H_
+
+#include "commontypes.h"
+#include "channel.h"
+
+#define ULTRA_MEMORY_COUNT_Ki 1024
+
+/* Scale order 0 is one 32-bit (4-byte) word (in 64 or 128-bit
+ * architecture potentially 64 or 128-bit word) */
+#define ULTRA_MEMORY_PAGE_WORD 4
+
+/* Define Ki scale page to be traditional 4KB page */
+#define ULTRA_MEMORY_PAGE_Ki (ULTRA_MEMORY_PAGE_WORD * ULTRA_MEMORY_COUNT_Ki)
+typedef struct _ULTRA_SEGMENT_STATE {
+ U16 Enabled:1; /* Bit 0: May enter other states */
+ U16 Active:1; /* Bit 1: Assigned to active partition */
+ U16 Alive:1; /* Bit 2: Configure message sent to
+ * service/server */
+ U16 Revoked:1; /* Bit 3: similar to partition state
+ * ShuttingDown */
+ U16 Allocated:1; /* Bit 4: memory (device/port number)
+ * has been selected by Command */
+ U16 Known:1; /* Bit 5: has been introduced to the
+ * service/guest partition */
+ U16 Ready:1; /* Bit 6: service/Guest partition has
+ * responded to introduction */
+ U16 Operating:1; /* Bit 7: resource is configured and
+ * operating */
+ /* Note: don't use high bit unless we need to switch to ushort
+ * which is non-compliant */
+} ULTRA_SEGMENT_STATE;
+static const ULTRA_SEGMENT_STATE SegmentStateRunning = {
+ 1, 1, 1, 0, 1, 1, 1, 1
+};
+static const ULTRA_SEGMENT_STATE SegmentStatePaused = {
+ 1, 1, 1, 0, 1, 1, 1, 0
+};
+static const ULTRA_SEGMENT_STATE SegmentStateStandby = {
+ 1, 1, 0, 0, 1, 1, 1, 0
+};
+typedef union {
+ U64 Full;
+ struct {
+ U8 Major; /* will be 1 for the first release and
+ * increment thereafter */
+ U8 Minor;
+ U16 Maintenance;
+ U32 Revision; /* Subversion revision */
+ } Part;
+} ULTRA_COMPONENT_VERSION;
+
+#endif /* _CONTROL_FRAMEWORK_H_ not defined */
diff --git a/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h b/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h
new file mode 100644
index 000000000000..47f1c4fa1e7e
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h
@@ -0,0 +1,619 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __CONTROLVMCHANNEL_H__
+#define __CONTROLVMCHANNEL_H__
+
+#include "commontypes.h"
+#include "channel.h"
+#include "controlframework.h"
+enum { INVALID_GUEST_FIRMWARE, SAMPLE_GUEST_FIRMWARE,
+ TIANO32_GUEST_FIRMWARE, TIANO64_GUEST_FIRMWARE
+};
+
+/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
+#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_GUID \
+ {0x2b3c2d10, 0x7ef5, 0x4ad8, \
+ {0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d} }
+
+static const GUID UltraControlvmChannelProtocolGuid =
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_GUID;
+
+#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE \
+ ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define CONTROLVM_MESSAGE_MAX 64
+
+/* Must increment this whenever you insert or delete fields within
+* this channel struct. Also increment whenever you change the meaning
+* of fields within this channel struct so as to break pre-existing
+* software. Note that you can usually add fields to the END of the
+* channel struct withOUT needing to increment this. */
+#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID 1
+
+#define ULTRA_CONTROLVM_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, \
+ UltraControlvmChannelProtocolGuid, \
+ "controlvm", \
+ sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL), \
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_CONTROLVM_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraControlvmChannelProtocolGuid, \
+ "controlvm", \
+ sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL), \
+ actualBytes, __FILE__, __LINE__, logCtx))
+
+#define MY_DEVICE_INDEX 0
+#define MAX_MACDATA_LEN 8 /* number of bytes for MAC address in config packet */
+#define MAX_SERIAL_NUM 32
+
+#define DISK_ZERO_PUN_NUMBER 1 /* Target ID on the SCSI bus for LUN 0 */
+#define DISK_ZERO_LUN_NUMBER 3 /* Logical Unit Number */
+
+/* Defines for various channel queues... */
+#define CONTROLVM_QUEUE_REQUEST 0
+#define CONTROLVM_QUEUE_RESPONSE 1
+#define CONTROLVM_QUEUE_EVENT 2
+#define CONTROLVM_QUEUE_ACK 3
+
+/* Max number of messages stored during IOVM creation to be reused
+ * after crash */
+#define CONTROLVM_CRASHMSG_MAX 2
+
+/** Ids for commands that may appear in either queue of a ControlVm channel.
+ *
+ * Commands that are initiated by the command partition (CP), by an IO or
+ * console service partition (SP), or by a guest partition (GP)are:
+ * - issued on the RequestQueue queue (q #0) in the ControlVm channel
+ * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel
+ *
+ * Events that are initiated by an IO or console service partition (SP) or
+ * by a guest partition (GP) are:
+ * - issued on the EventQueue queue (q #2) in the ControlVm channel
+ * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel
+ */
+typedef enum {
+ CONTROLVM_INVALID = 0,
+ /* SWITCH commands required Parameter: SwitchNumber */
+ /* BUS commands required Parameter: BusNumber */
+ CONTROLVM_BUS_CREATE = 0x101, /* CP --> SP, GP */
+ CONTROLVM_BUS_DESTROY = 0x102, /* CP --> SP, GP */
+ CONTROLVM_BUS_CONFIGURE = 0x104, /* CP --> SP */
+ CONTROLVM_BUS_CHANGESTATE = 0x105, /* CP --> SP, GP */
+ CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106, /* SP, GP --> CP */
+/* DEVICE commands required Parameter: BusNumber, DeviceNumber */
+
+ CONTROLVM_DEVICE_CREATE = 0x201, /* CP --> SP, GP */
+ CONTROLVM_DEVICE_DESTROY = 0x202, /* CP --> SP, GP */
+ CONTROLVM_DEVICE_CONFIGURE = 0x203, /* CP --> SP */
+ CONTROLVM_DEVICE_CHANGESTATE = 0x204, /* CP --> SP, GP */
+ CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205, /* SP, GP --> CP */
+ CONTROLVM_DEVICE_RECONFIGURE = 0x206, /* CP --> Boot */
+/* DISK commands required Parameter: BusNumber, DeviceNumber */
+ CONTROLVM_DISK_CREATE = 0x221, /* CP --> SP */
+ CONTROLVM_DISK_DESTROY = 0x222, /* CP --> SP */
+ CONTROLVM_DISK_CONFIGURE = 0x223, /* CP --> SP */
+ CONTROLVM_DISK_CHANGESTATE = 0x224, /* CP --> SP */
+/* CHIPSET commands */
+ CONTROLVM_CHIPSET_INIT = 0x301, /* CP --> SP, GP */
+ CONTROLVM_CHIPSET_STOP = 0x302, /* CP --> SP, GP */
+ CONTROLVM_CHIPSET_SHUTDOWN = 0x303, /* CP --> SP */
+ CONTROLVM_CHIPSET_READY = 0x304, /* CP --> SP */
+ CONTROLVM_CHIPSET_SELFTEST = 0x305, /* CP --> SP */
+
+} CONTROLVM_ID;
+
+struct InterruptInfo {
+ /**< specifies interrupt info. It is used to send interrupts
+ * for this channel. The peer at the end of this channel
+ * who has registered an interrupt (using recv fields
+ * above) will receive the interrupt. Passed as a parameter
+ * to Issue_VMCALL_IO_QUEUE_TRANSITION, which generates the
+ * interrupt. Currently this is used by IOPart-SP to wake
+ * up GP when Data Channel transitions from empty to
+ * non-empty.*/
+ U64 sendInterruptHandle;
+
+ /**< specifies interrupt handle. It is used to retrieve the
+ * corresponding interrupt pin from Monitor; and the
+ * interrupt pin is used to connect to the corresponding
+ * intrrupt. Used by IOPart-GP only. */
+ U64 recvInterruptHandle;
+
+ /**< specifies interrupt vector. It, interrupt pin, and shared are
+ * used to connect to the corresponding interrupt. Used by
+ * IOPart-GP only. */
+ U32 recvInterruptVector;
+
+ /**< specifies if the recvInterrupt is shared. It, interrupt pin
+ * and vector are used to connect to 0 = not shared; 1 = shared.
+ * the corresponding interrupt. Used by IOPart-GP only. */
+ U8 recvInterruptShared;
+ U8 reserved[3]; /* Natural alignment purposes */
+};
+
+struct PciId {
+ U16 Domain;
+ U8 Bus;
+ U8 Slot;
+ U8 Func;
+ U8 Reserved[3]; /* Natural alignment purposes */
+};
+
+struct PciConfigHdr {
+ U16 VendorId;
+ U16 SubSysVendor;
+ U16 DeviceId;
+ U16 SubSysDevice;
+ U32 ClassCode;
+ U32 Reserved; /* Natural alignment purposes */
+};
+
+struct ScsiId {
+ U32 Bus;
+ U32 Target;
+ U32 Lun;
+ U32 Host; /* Command should ignore this for *
+ * DiskArrival/RemovalEvents */
+};
+
+struct WWID {
+ U32 wwid1;
+ U32 wwid2;
+};
+
+struct virtDiskInfo {
+ U32 switchNo; /* defined by SWITCH_CREATE */
+ U32 externalPortNo; /* 0 for SAS RAID provided (external)
+ * virtual disks, 1 for virtual disk
+ * images, 2 for gold disk images */
+ U16 VirtualDiskIndex; /* Index of disk descriptor in the
+ * VirtualDisk segment associated with
+ * externalPortNo */
+ U16 Reserved1;
+ U32 Reserved2;
+};
+
+typedef enum {
+ CONTROLVM_ACTION_NONE = 0,
+ CONTROLVM_ACTION_SET_RESTORE = 0x05E7,
+ CONTROLVM_ACTION_CLEAR_RESTORE = 0x0C18,
+ CONTROLVM_ACTION_RESTORING = 0x08E5,
+ CONTROLVM_ACTION_RESTORE_BUSY = 0x0999,
+ CONTROLVM_ACTION_CLEAR_NVRAM = 0xB01
+} CONTROLVM_ACTION;
+
+typedef enum _ULTRA_TOOL_ACTIONS {
+ /* enumeration that defines intended action */
+ ULTRA_TOOL_ACTION_NONE = 0, /* normal boot of boot disk */
+ ULTRA_TOOL_ACTION_INSTALL = 1, /* install source disk(s) to boot
+ * disk */
+ ULTRA_TOOL_ACTION_CAPTURE = 2, /* capture boot disk to target disk(s)
+ * as 'gold image' */
+ ULTRA_TOOL_ACTION_REPAIR = 3, /* use source disk(s) to repair
+ * installation on boot disk */
+ ULTRA_TOOL_ACTION_CLEAN = 4, /* 'scrub' virtual disk before
+ * releasing back to storage pool */
+ ULTRA_TOOL_ACTION_UPGRADE = 5, /* upgrade to use content of images
+ * referenced from newer blueprint */
+ ULTRA_TOOL_ACTION_DIAG = 6, /* use tool to invoke diagnostic script
+ * provided by blueprint */
+ ULTRA_TOOL_ACTION_FAILED = 7, /* used when tool fails installation
+ and cannot continue */
+ ULTRA_TOOL_ACTION_COUNT = 8
+} ULTRA_TOOL_ACTIONS;
+
+typedef struct _ULTRA_EFI_SPAR_INDICATION {
+ U64 BootToFirmwareUI:1; /* Bit 0: Stop in uefi ui */
+ U64 ClearNvram:1; /* Bit 1: Clear NVRAM */
+ U64 ClearCmos:1; /* Bit 2: Clear CMOS */
+ U64 BootToTool:1; /* Bit 3: Run install tool */
+ /* remaining bits are available */
+} ULTRA_EFI_SPAR_INDICATION;
+
+typedef enum {
+ ULTRA_CHIPSET_FEATURE_REPLY = 0x00000001,
+ ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
+ ULTRA_CHIPSET_FEATURE_PCIVBUS = 0x00000004
+} ULTRA_CHIPSET_FEATURE;
+
+/** This is the common structure that is at the beginning of every
+ * ControlVm message (both commands and responses) in any ControlVm
+ * queue. Commands are easily distinguished from responses by
+ * looking at the flags.response field.
+ */
+typedef struct _CONTROLVM_MESSAGE_HEADER {
+ U32 Id; /* See CONTROLVM_ID. */
+ /* For requests, indicates the message type. */
+ /* For responses, indicates the type of message we are responding to. */
+
+ U32 MessageSize; /* Includes size of this struct + size
+ * of message */
+ U32 SegmentIndex; /* Index of segment containing Vm
+ * message/information */
+ U32 CompletionStatus; /* Error status code or result of
+ * message completion */
+ struct {
+ U32 failed:1; /**< =1 in a response to * signify
+ * failure */
+ U32 responseExpected:1; /**< =1 in all messages that expect a
+ * response (Control ignores this
+ * bit) */
+ U32 server:1; /**< =1 in all bus & device-related
+ * messages where the message
+ * receiver is to act as the bus or
+ * device server */
+ U32 testMessage:1; /**< =1 for testing use only
+ * (Control and Command ignore this
+ * bit) */
+ U32 partialCompletion:1; /**< =1 if there are forthcoming
+ * responses/acks associated
+ * with this message */
+ U32 preserve:1; /**< =1 this is to let us know to
+ * preserve channel contents
+ * (for running guests)*/
+ U32 writerInDiag:1; /**< =1 the DiagWriter is active in the
+ * Diagnostic Partition*/
+
+ /* remaining bits in this 32-bit word are available */
+ } Flags;
+ U32 Reserved; /* Natural alignment */
+ U64 MessageHandle; /* Identifies the particular message instance,
+ * and is used to match particular */
+ /* request instances with the corresponding response instance. */
+ U64 PayloadVmOffset; /* Offset of payload area from start of this
+ * instance of ControlVm segment */
+ U32 PayloadMaxBytes; /* Maximum bytes allocated in payload
+ * area of ControlVm segment */
+ U32 PayloadBytes; /* Actual number of bytes of payload
+ * area to copy between IO/Command; */
+ /* if non-zero, there is a payload to copy. */
+} CONTROLVM_MESSAGE_HEADER;
+
+typedef struct _CONTROLVM_PACKET_DEVICE_CREATE {
+ U32 busNo; /**< bus # (0..n-1) from the msg receiver's
+ * perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 devNo; /**< bus-relative (0..n-1) device number */
+ U64 channelAddr; /**< Guest physical address of the channel, which
+ * can be dereferenced by the receiver
+ * of this ControlVm command */
+ U64 channelBytes; /**< specifies size of the channel in bytes */
+ GUID dataTypeGuid;/**< specifies format of data in channel */
+ GUID devInstGuid; /**< instance guid for the device */
+ struct InterruptInfo intr; /**< specifies interrupt information */
+} CONTROLVM_PACKET_DEVICE_CREATE; /* for CONTROLVM_DEVICE_CREATE */
+
+typedef struct _CONTROLVM_PACKET_DEVICE_CONFIGURE {
+ U32 busNo; /**< bus # (0..n-1) from the msg
+ * receiver's perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 devNo; /**< bus-relative (0..n-1) device number */
+} CONTROLVM_PACKET_DEVICE_CONFIGURE; /* for CONTROLVM_DEVICE_CONFIGURE */
+
+typedef struct _CONTROLVM_MESSAGE_DEVICE_CREATE {
+ CONTROLVM_MESSAGE_HEADER Header;
+ CONTROLVM_PACKET_DEVICE_CREATE Packet;
+} CONTROLVM_MESSAGE_DEVICE_CREATE; /* total 128 bytes */
+
+typedef struct _CONTROLVM_MESSAGE_DEVICE_CONFIGURE {
+ CONTROLVM_MESSAGE_HEADER Header;
+ CONTROLVM_PACKET_DEVICE_CONFIGURE Packet;
+} CONTROLVM_MESSAGE_DEVICE_CONFIGURE; /* total 56 bytes */
+
+/* This is the format for a message in any ControlVm queue. */
+typedef struct _CONTROLVM_MESSAGE_PACKET {
+ union {
+
+ /* BEGIN Request messages */
+ struct {
+ U32 busNo; /*< bus # (0..n-1) from the msg
+ * receiver's perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 deviceCount; /*< indicates the max number of
+ * devices on this bus */
+ U64 channelAddr; /*< Guest physical address of the
+ * channel, which can be
+ * dereferenced by the receiver
+ * of this ControlVm command */
+ U64 channelBytes; /*< size of the channel in bytes */
+ GUID busDataTypeGuid;/*< indicates format of data in bus
+ * channel */
+ GUID busInstGuid; /*< instance guid for the bus */
+ } createBus; /* for CONTROLVM_BUS_CREATE */
+ struct {
+ U32 busNo; /*< bus # (0..n-1) from the msg
+ * receiver's perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 reserved; /* Natural alignment purposes */
+ } destroyBus; /* for CONTROLVM_BUS_DESTROY */
+ struct {
+ U32 busNo; /*< bus # (0..n-1) from the
+ * msg receiver's
+ * perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 reserved1; /* for alignment purposes */
+ U64 guestHandle; /* This is used to convert
+ * guest physical address to real
+ * physical address for DMA, for ex. */
+ U64 recvBusInterruptHandle;/*< specifies interrupt
+ * info. It is used by SP to register
+ * to receive interrupts from the CP.
+ * This interrupt is used for bus
+ * level notifications. The
+ * corresponding
+ * sendBusInterruptHandle is kept in
+ * CP. */
+ } configureBus; /* for CONTROLVM_BUS_CONFIGURE */
+
+ /* for CONTROLVM_DEVICE_CREATE */
+ CONTROLVM_PACKET_DEVICE_CREATE createDevice;
+ struct {
+ U32 busNo; /*< bus # (0..n-1) from the msg
+ * receiver's perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 devNo; /*< bus-relative (0..n-1) device
+ * number */
+ } destroyDevice; /* for CONTROLVM_DEVICE_DESTROY */
+
+ /* for CONTROLVM_DEVICE_CONFIGURE */
+ CONTROLVM_PACKET_DEVICE_CONFIGURE configureDevice;
+ struct {
+ U32 busNo; /*< bus # (0..n-1) from the msg
+ * receiver's perspective */
+
+ /* Control uses header SegmentIndex field to access bus number... */
+ U32 devNo; /*< bus-relative (0..n-1) device
+ * number */
+ } reconfigureDevice; /* for CONTROLVM_DEVICE_RECONFIGURE */
+ struct {
+ U32 busNo;
+ ULTRA_SEGMENT_STATE state;
+ U8 reserved[2]; /* Natural alignment purposes */
+ } busChangeState; /* for CONTROLVM_BUS_CHANGESTATE */
+ struct {
+ U32 busNo;
+ U32 devNo;
+ ULTRA_SEGMENT_STATE state;
+ struct {
+ U32 physicalDevice:1; /* =1 if message is for
+ * a physical device */
+ /* remaining bits in this 32-bit word are available */
+ } flags;
+ U8 reserved[2]; /* Natural alignment purposes */
+ } deviceChangeState; /* for CONTROLVM_DEVICE_CHANGESTATE */
+ struct {
+ U32 busNo;
+ U32 devNo;
+ ULTRA_SEGMENT_STATE state;
+ U8 reserved[6]; /* Natural alignment purposes */
+ } deviceChangeStateEvent; /* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
+ struct {
+ U32 busCount; /*< indicates the max number of busses */
+ U32 switchCount; /*< indicates the max number of
+ * switches (applicable for service
+ * partition only) */
+ ULTRA_CHIPSET_FEATURE features;
+ U32 platformNumber; /* Platform Number */
+ } initChipset; /* for CONTROLVM_CHIPSET_INIT */
+ struct {
+ U32 Options; /*< reserved */
+ U32 Test; /*< bit 0 set to run embedded selftest */
+ } chipsetSelftest; /* for CONTROLVM_CHIPSET_SELFTEST */
+
+ /* END Request messages */
+
+ /* BEGIN Response messages */
+
+ /* END Response messages */
+
+ /* BEGIN Event messages */
+
+ /* END Event messages */
+
+ /* BEGIN Ack messages */
+
+ /* END Ack messages */
+ U64 addr; /*< a physical address of something, that
+ * can be dereferenced by the receiver of
+ * this ControlVm command (depends on
+ * command id) */
+ U64 handle; /*< a handle of something (depends on
+ * command id) */
+ };
+} CONTROLVM_MESSAGE_PACKET;
+
+/* All messages in any ControlVm queue have this layout. */
+typedef struct _CONTROLVM_MESSAGE {
+ CONTROLVM_MESSAGE_HEADER hdr;
+ CONTROLVM_MESSAGE_PACKET cmd;
+} CONTROLVM_MESSAGE;
+
+typedef struct _DEVICE_MAP {
+ GUEST_PHYSICAL_ADDRESS DeviceChannelAddress;
+ U64 DeviceChannelSize;
+ U32 CA_Index;
+ U32 Reserved; /* natural alignment */
+ U64 Reserved2; /* Align structure on 32-byte boundary */
+} DEVICE_MAP;
+
+typedef struct _GUEST_DEVICES {
+ DEVICE_MAP VideoChannel;
+ DEVICE_MAP KeyboardChannel;
+ DEVICE_MAP NetworkChannel;
+ DEVICE_MAP StorageChannel;
+ DEVICE_MAP ConsoleChannel;
+ U32 PartitionIndex;
+ U32 Pad;
+} GUEST_DEVICES;
+
+typedef struct _ULTRA_CONTROLVM_CHANNEL_PROTOCOL {
+ CHANNEL_HEADER Header;
+ GUEST_PHYSICAL_ADDRESS gpControlVm; /* guest physical address of
+ * this channel */
+ GUEST_PHYSICAL_ADDRESS gpPartitionTables; /* guest physical address of
+ * partition tables */
+ GUEST_PHYSICAL_ADDRESS gpDiagGuest; /* guest physical address of
+ * diagnostic channel */
+ GUEST_PHYSICAL_ADDRESS gpBootRomDisk; /* guest phys addr of (read
+ * only) Boot ROM disk */
+ GUEST_PHYSICAL_ADDRESS gpBootRamDisk; /* guest phys addr of writable
+ * Boot RAM disk */
+ GUEST_PHYSICAL_ADDRESS gpAcpiTable; /* guest phys addr of acpi
+ * table */
+ GUEST_PHYSICAL_ADDRESS gpControlChannel; /* guest phys addr of control
+ * channel */
+ GUEST_PHYSICAL_ADDRESS gpDiagRomDisk; /* guest phys addr of diagnostic
+ * ROM disk */
+ GUEST_PHYSICAL_ADDRESS gpNvram; /* guest phys addr of NVRAM
+ * channel */
+ U64 RequestPayloadOffset; /* Offset to request payload area */
+ U64 EventPayloadOffset; /* Offset to event payload area */
+ U32 RequestPayloadBytes; /* Bytes available in request payload
+ * area */
+ U32 EventPayloadBytes; /* Bytes available in event payload area */
+ U32 ControlChannelBytes;
+ U32 NvramChannelBytes; /* Bytes in PartitionNvram segment */
+ U32 MessageBytes; /* sizeof(CONTROLVM_MESSAGE) */
+ U32 MessageCount; /* CONTROLVM_MESSAGE_MAX */
+ GUEST_PHYSICAL_ADDRESS gpSmbiosTable; /* guest phys addr of SMBIOS
+ * tables */
+ GUEST_PHYSICAL_ADDRESS gpPhysicalSmbiosTable; /* guest phys addr of
+ * SMBIOS table */
+ /* ULTRA_MAX_GUESTS_PER_SERVICE */
+ GUEST_DEVICES gpObsoleteGuestDevices[16];
+
+ /* guest physical address of EFI firmware image base */
+ GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareImageBase;
+
+ /* guest physical address of EFI firmware entry point */
+ GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareEntryPoint;
+
+ /* guest EFI firmware image size */
+ U64 VirtualGuestFirmwareImageSize;
+
+ /* GPA = 1MB where EFI firmware image is copied to */
+ GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareBootBase;
+ GUEST_PHYSICAL_ADDRESS VirtualGuestImageBase;
+ GUEST_PHYSICAL_ADDRESS VirtualGuestImageSize;
+ U64 PrototypeControlChannelOffset;
+ GUEST_PHYSICAL_ADDRESS VirtualGuestPartitionHandle;
+
+ U16 RestoreAction; /* Restore Action field to restore the guest
+ * partition */
+ U16 DumpAction; /* For Windows guests it shows if the visordisk
+ * is running in dump mode */
+ U16 NvramFailCount;
+ U16 SavedCrashMsgCount; /* = CONTROLVM_CRASHMSG_MAX */
+ U32 SavedCrashMsgOffset; /* Offset to request payload area needed
+ * for crash dump */
+ U32 InstallationError; /* Type of error encountered during
+ * installation */
+ U32 InstallationTextId; /* Id of string to display */
+ U16 InstallationRemainingSteps; /* Number of remaining installation
+ * steps (for progress bars) */
+ U8 ToolAction; /* ULTRA_TOOL_ACTIONS Installation Action
+ * field */
+ U8 Reserved; /* alignment */
+ ULTRA_EFI_SPAR_INDICATION EfiSparIndication;
+ ULTRA_EFI_SPAR_INDICATION EfiSparIndicationSupported;
+ U32 SPReserved;
+ U8 Reserved2[28]; /* Force signals to begin on 128-byte cache
+ * line */
+ SIGNAL_QUEUE_HEADER RequestQueue; /* Service or guest partition
+ * uses this queue to send
+ * requests to Control */
+ SIGNAL_QUEUE_HEADER ResponseQueue; /* Control uses this queue to
+ * respond to service or guest
+ * partition requests */
+ SIGNAL_QUEUE_HEADER EventQueue; /* Control uses this queue to
+ * send events to service or
+ * guest partition */
+ SIGNAL_QUEUE_HEADER EventAckQueue; /* Service or guest partition
+ * uses this queue to ack
+ * Control events */
+
+ /* Request fixed-size message pool - does not include payload */
+ CONTROLVM_MESSAGE RequestMsg[CONTROLVM_MESSAGE_MAX];
+
+ /* Response fixed-size message pool - does not include payload */
+ CONTROLVM_MESSAGE ResponseMsg[CONTROLVM_MESSAGE_MAX];
+
+ /* Event fixed-size message pool - does not include payload */
+ CONTROLVM_MESSAGE EventMsg[CONTROLVM_MESSAGE_MAX];
+
+ /* Ack fixed-size message pool - does not include payload */
+ CONTROLVM_MESSAGE EventAckMsg[CONTROLVM_MESSAGE_MAX];
+
+ /* Message stored during IOVM creation to be reused after crash */
+ CONTROLVM_MESSAGE SavedCrashMsg[CONTROLVM_CRASHMSG_MAX];
+} ULTRA_CONTROLVM_CHANNEL_PROTOCOL;
+
+/* Offsets for VM channel attributes... */
+#define VM_CH_REQ_QUEUE_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, RequestQueue)
+#define VM_CH_RESP_QUEUE_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ResponseQueue)
+#define VM_CH_EVENT_QUEUE_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventQueue)
+#define VM_CH_ACK_QUEUE_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventAckQueue)
+#define VM_CH_REQ_MSG_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, RequestMsg)
+#define VM_CH_RESP_MSG_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ResponseMsg)
+#define VM_CH_EVENT_MSG_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventMsg)
+#define VM_CH_ACK_MSG_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventAckMsg)
+#define VM_CH_CRASH_MSG_OFFSET \
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, SavedCrashMsg)
+
+/* The following header will be located at the beginning of PayloadVmOffset for
+ * various ControlVm commands. The receiver of a ControlVm command with a
+ * PayloadVmOffset will dereference this address and then use ConnectionOffset,
+ * InitiatorOffset, and TargetOffset to get the location of UTF-8 formatted
+ * strings that can be parsed to obtain command-specific information. The value
+ * of TotalLength should equal PayloadBytes. The format of the strings at
+ * PayloadVmOffset will take different forms depending on the message. See the
+ * following Wiki page for more information:
+ * https://ustr-linux-1.na.uis.unisys.com/spar/index.php/ControlVm_Parameters_Area
+ */
+typedef struct _ULTRA_CONTROLVM_PARAMETERS_HEADER {
+ U32 TotalLength;
+ U32 HeaderLength;
+ U32 ConnectionOffset;
+ U32 ConnectionLength;
+ U32 InitiatorOffset;
+ U32 InitiatorLength;
+ U32 TargetOffset;
+ U32 TargetLength;
+ U32 ClientOffset;
+ U32 ClientLength;
+ U32 NameOffset;
+ U32 NameLength;
+ GUID Id;
+ U32 Revision;
+ U32 Reserved; /* Natural alignment */
+} ULTRA_CONTROLVM_PARAMETERS_HEADER;
+
+#endif /* __CONTROLVMCHANNEL_H__ */
diff --git a/drivers/staging/unisys/common-spar/include/channels/diagchannel.h b/drivers/staging/unisys/common-spar/include/channels/diagchannel.h
new file mode 100644
index 000000000000..c93515eb211d
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/diagchannel.h
@@ -0,0 +1,427 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*++
+ *
+ * Module Name:
+ *
+ * diagchannel.h
+ *
+ * Abstract:
+ *
+ * This file defines the DiagChannel protocol. This protocol is used to aid in
+ * preserving event data sent by external applications. This protocol provides
+ * a region for event data to reside in. This data will eventually be sent to
+ * the Boot Partition where it will be committed to memory and/or disk. This
+ * file contains platform-independent data that can be built using any
+ * Supervisor build environment (Windows, Linux, EFI).
+ *
+*/
+
+#ifndef _DIAG_CHANNEL_H_
+#define _DIAG_CHANNEL_H_
+
+#include "commontypes.h"
+#include "channel.h"
+
+/* {EEA7A573-DB82-447c-8716-EFBEAAAE4858} */
+#define ULTRA_DIAG_CHANNEL_PROTOCOL_GUID \
+ {0xeea7a573, 0xdb82, 0x447c, \
+ {0x87, 0x16, 0xef, 0xbe, 0xaa, 0xae, 0x48, 0x58} }
+
+static const GUID UltraDiagChannelProtocolGuid =
+ ULTRA_DIAG_CHANNEL_PROTOCOL_GUID;
+
+/* {E850F968-3263-4484-8CA5-2A35D087A5A8} */
+#define ULTRA_DIAG_ROOT_CHANNEL_PROTOCOL_GUID \
+ {0xe850f968, 0x3263, 0x4484, \
+ {0x8c, 0xa5, 0x2a, 0x35, 0xd0, 0x87, 0xa5, 0xa8} }
+
+#define ULTRA_DIAG_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+
+/* Must increment this whenever you insert or delete fields within this channel
+* struct. Also increment whenever you change the meaning of fields within this
+* channel struct so as to break pre-existing software. Note that you can
+* usually add fields to the END of the channel struct withOUT needing to
+* increment this. */
+#define ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID 2
+
+#define ULTRA_DIAG_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, \
+ UltraDiagChannelProtocolGuid, \
+ "diag", \
+ sizeof(ULTRA_DIAG_CHANNEL_PROTOCOL), \
+ ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_DIAG_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_DIAG_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraDiagChannelProtocolGuid, \
+ "diag", \
+ sizeof(ULTRA_DIAG_CHANNEL_PROTOCOL), \
+ actualBytes, __FILE__, __LINE__, logCtx))
+#define MAX_MODULE_NAME_SIZE 128 /* Maximum length of module name... */
+#define MAX_ADDITIONAL_INFO_SIZE 256 /* Maximum length of any additional info
+ * accompanying event... */
+#define MAX_SUBSYSTEMS 64 /* Maximum number of subsystems allowed in
+ * DiagChannel... */
+#define LOW_SUBSYSTEMS 32 /* Half of MAX_SUBSYSTEMS to allow 64-bit
+ * math */
+#define SUBSYSTEM_DEBUG 0 /* Standard subsystem for debug events */
+#define SUBSYSTEM_DEFAULT 1 /* Default subsystem for legacy calls to
+ * ReportEvent */
+
+/* few useful subsystem mask values */
+#define SUBSYSTEM_MASK_DEBUG 0x01 /* Standard subsystem for debug
+ * events */
+#define SUBSYSTEM_MASK_DEFAULT 0x02 /* Default subsystem for legacy calls to
+ * ReportEvents */
+
+/* Event parameter "Severity" is overloaded with Cause in byte 2 and Severity in
+ * byte 0, bytes 1 and 3 are reserved */
+#define SEVERITY_MASK 0x0FF /* mask out all but the Severity in byte 0 */
+#define CAUSE_MASK 0x0FF0000 /* mask out all but the cause in byte 2 */
+#define CAUSE_SHIFT_AMT 16 /* shift 2 bytes to place it in byte 2 */
+
+/* SubsystemSeverityFilter */
+#define SEVERITY_FILTER_MASK 0x0F /* mask out the Cause half, SeverityFilter is
+ * in the lower nibble */
+#define CAUSE_FILTER_MASK 0xF0 /* mask out the Severity half, CauseFilter is in
+ * the upper nibble */
+#define CAUSE_FILTER_SHIFT_AMT 4 /* shift amount to place it in lower or upper
+ * nibble */
+
+/* Copied from EFI's EFI_TIME struct in efidef.h. EFI headers are not allowed
+* in some of the Supervisor areas, such as Monitor, so it has been "ported" here
+* for use in diagnostic event timestamps... */
+typedef struct _DIAG_EFI_TIME {
+ U16 Year; /* 1998 - 20XX */
+ U8 Month; /* 1 - 12 */
+ U8 Day; /* 1 - 31 */
+ U8 Hour; /* 0 - 23 */
+ U8 Minute; /* 0 - 59 */
+ U8 Second; /* 0 - 59 */
+ U8 Pad1;
+ U32 Nanosecond; /* 0 - 999, 999, 999 */
+ S16 TimeZone; /* -1440 to 1440 or 2047 */
+ U8 Daylight;
+ U8 Pad2;
+} DIAG_EFI_TIME;
+
+typedef enum {
+ ULTRA_COMPONENT_GUEST = 0,
+ ULTRA_COMPONENT_MONITOR = 0x01,
+ ULTRA_COMPONENT_CCM = 0x02, /* Common Control module */
+ /* RESERVED 0x03 - 0x7 */
+
+ /* Ultravisor Components */
+ ULTRA_COMPONENT_BOOT = 0x08,
+ ULTRA_COMPONENT_IDLE = 0x09,
+ ULTRA_COMPONENT_CONTROL = 0x0A,
+ ULTRA_COMPONENT_LOGGER = 0x0B,
+ ULTRA_COMPONENT_ACPI = 0X0C,
+ /* RESERVED 0x0D - 0x0F */
+
+ /* sPAR Components */
+ ULTRA_COMPONENT_COMMAND = 0x10,
+ ULTRA_COMPONENT_IODRIVER = 0x11,
+ ULTRA_COMPONENT_CONSOLE = 0x12,
+ ULTRA_COMPONENT_OPERATIONS = 0x13,
+ ULTRA_COMPONENT_MANAGEMENT = 0x14,
+ ULTRA_COMPONENT_DIAG = 0x15,
+ ULTRA_COMPONENT_HWDIAG = 0x16,
+ ULTRA_COMPONENT_PSERVICES = 0x17,
+ ULTRA_COMPONENT_PDIAG = 0x18
+ /* RESERVED 0x18 - 0x1F */
+} ULTRA_COMPONENT_TYPES;
+
+/* Structure: DIAG_CHANNEL_EVENT Purpose: Contains attributes that make up an
+ * event to be written to the DIAG_CHANNEL memory. Attributes: EventId: Id of
+ * the diagnostic event to write to memory. Severity: Severity of the event
+ * (Error, Info, etc). ModuleName: Module/file name where event originated.
+ * LineNumber: Line number in module name where event originated. Timestamp:
+ * Date/time when event was received by ReportEvent, and written to DiagChannel.
+ * Reserved: Padding to align structure on a 64-byte cache line boundary.
+ * AdditionalInfo: Array of characters for additional event info (may be
+ * empty). */
+typedef struct _DIAG_CHANNEL_EVENT {
+ U32 EventId;
+ U32 Severity;
+ U8 ModuleName[MAX_MODULE_NAME_SIZE];
+ U32 LineNumber;
+ DIAG_EFI_TIME Timestamp; /* Size = 16 bytes */
+ U32 PartitionNumber; /* Filled in by Diag Switch as pool blocks are
+ * filled */
+ U16 VirtualProcessorNumber;
+ U16 LogicalProcessorNumber;
+ U8 ComponentType; /* ULTRA_COMPONENT_TYPES */
+ U8 Subsystem;
+ U16 Reserved0; /* pad to U64 alignment */
+ U32 BlockNumber; /* filled in by DiagSwitch as pool blocks are
+ * filled */
+ U32 BlockNumberHigh;
+ U32 EventNumber; /* filled in by DiagSwitch as pool blocks are
+ * filled */
+ U32 EventNumberHigh;
+
+ /* The BlockNumber and EventNumber fields are set only by DiagSwitch
+ * and referenced only by WinDiagDisplay formatting tool as
+ * additional diagnostic information. Other tools including
+ * WinDiagDisplay currently ignore these 'Reserved' bytes. */
+ U8 Reserved[8];
+ U8 AdditionalInfo[MAX_ADDITIONAL_INFO_SIZE];
+
+ /* NOTE: Changesto DIAG_CHANNEL_EVENT generally need to be reflected in
+ * existing copies *
+ * - for AppOS at
+ * GuestLinux/visordiag_early/supervisor_diagchannel.h *
+ * - for WinDiagDisplay at
+ * EFI/Ultra/Tools/WinDiagDisplay/WinDiagDisplay/diagstruct.h */
+} DIAG_CHANNEL_EVENT;
+
+/* Levels of severity for diagnostic events, in order from lowest severity to
+* highest (i.e. fatal errors are the most severe, and should always be logged,
+* but info events rarely need to be logged except during debugging). The values
+* DIAG_SEVERITY_ENUM_BEGIN and DIAG_SEVERITY_ENUM_END are not valid severity
+* values. They exist merely to dilineate the list, so that future additions
+* won't require changes to the driver (i.e. when checking for out-of-range
+* severities in SetSeverity). The values DIAG_SEVERITY_OVERRIDE and
+* DIAG_SEVERITY_SHUTOFF are not valid severity values for logging events but
+* they are valid for controlling the amount of event data. This enum is also
+* defined in DotNet\sParFramework\ControlFramework\ControlFramework.cs. If a
+* change is made to this enum, they should also be reflected in that file. */
+typedef enum { DIAG_SEVERITY_ENUM_BEGIN = 0,
+ DIAG_SEVERITY_OVERRIDE = DIAG_SEVERITY_ENUM_BEGIN,
+ DIAG_SEVERITY_VERBOSE = DIAG_SEVERITY_OVERRIDE, /* 0 */
+ DIAG_SEVERITY_INFO = DIAG_SEVERITY_VERBOSE + 1, /* 1 */
+ DIAG_SEVERITY_WARNING = DIAG_SEVERITY_INFO + 1, /* 2 */
+ DIAG_SEVERITY_ERR = DIAG_SEVERITY_WARNING + 1, /* 3 */
+ DIAG_SEVERITY_PRINT = DIAG_SEVERITY_ERR + 1, /* 4 */
+ DIAG_SEVERITY_SHUTOFF = DIAG_SEVERITY_PRINT + 1, /* 5 */
+ DIAG_SEVERITY_ENUM_END = DIAG_SEVERITY_SHUTOFF, /* 5 */
+ DIAG_SEVERITY_NONFATAL_ERR = DIAG_SEVERITY_ERR,
+ DIAG_SEVERITY_FATAL_ERR = DIAG_SEVERITY_PRINT
+} DIAG_SEVERITY;
+
+/* Event Cause enums
+*
+* Levels of cause for diagnostic events, in order from least to greatest cause
+* Internal errors are most urgent since ideally they should never exist
+* Invalid requests are preventable by avoiding invalid inputs
+* Operations errors depend on environmental factors which may impact which
+* requests are possible
+* Manifest provides intermediate value to capture firmware and configuration
+* version information
+* Trace provides suplimental debug information in release firmware
+* Unknown Log captures unclasified LogEvent calls.
+* Debug is the least urgent since it provides suplimental debug information only
+* in debug firmware
+* Unknown Debug captures unclassified DebugEvent calls.
+* This enum is also defined in
+* DotNet\sParFramework\ControlFramework\ControlFramework.cs.
+* If a change is made to this enum, they should also be reflected in that
+* file. */
+
+
+
+/* A cause value "DIAG_CAUSE_FILE_XFER" together with a severity value of
+* "DIAG_SEVERITY_PRINT" (=4), is used for transferring text or binary file to
+* the Diag partition. This cause-severity combination will be used by Logger
+* DiagSwitch to segregate events into block types. The files are transferred in
+* 256 byte chunks maximum, in the AdditionalInfo field of the DIAG_CHANNEL_EVENT
+* structure. In the file transfer mode, some event fields will have different
+* meaning: EventId specifies the file offset, severity specifies the block type,
+* ModuleName specifies the filename, LineNumber specifies the number of valid
+* data bytes in an event and AdditionalInfo contains up to 256 bytes of data. */
+
+/* The Diag DiagWriter appends event blocks to events.raw as today, and for data
+ * blocks uses DIAG_CHANNEL_EVENT
+ * PartitionNumber to extract and append 'AdditionalInfo' to filename (specified
+ * by ModuleName). */
+
+/* The Dell PDiag uses this new mechanism to stash DSET .zip onto the
+ * 'diagnostic' virtual disk. */
+typedef enum {
+ DIAG_CAUSE_UNKNOWN = 0,
+ DIAG_CAUSE_UNKNOWN_DEBUG = DIAG_CAUSE_UNKNOWN + 1, /* 1 */
+ DIAG_CAUSE_DEBUG = DIAG_CAUSE_UNKNOWN_DEBUG + 1, /* 2 */
+ DIAG_CAUSE_UNKNOWN_LOG = DIAG_CAUSE_DEBUG + 1, /* 3 */
+ DIAG_CAUSE_TRACE = DIAG_CAUSE_UNKNOWN_LOG + 1, /* 4 */
+ DIAG_CAUSE_MANIFEST = DIAG_CAUSE_TRACE + 1, /* 5 */
+ DIAG_CAUSE_OPERATIONS_ERROR = DIAG_CAUSE_MANIFEST + 1, /* 6 */
+ DIAG_CAUSE_INVALID_REQUEST = DIAG_CAUSE_OPERATIONS_ERROR + 1, /* 7 */
+ DIAG_CAUSE_INTERNAL_ERROR = DIAG_CAUSE_INVALID_REQUEST + 1, /* 8 */
+ DIAG_CAUSE_FILE_XFER = DIAG_CAUSE_INTERNAL_ERROR + 1, /* 9 */
+ DIAG_CAUSE_ENUM_END = DIAG_CAUSE_FILE_XFER /* 9 */
+} DIAG_CAUSE;
+
+/* Event Cause category defined into the byte 2 of Severity */
+#define CAUSE_DEBUG (DIAG_CAUSE_DEBUG << CAUSE_SHIFT_AMT)
+#define CAUSE_TRACE (DIAG_CAUSE_TRACE << CAUSE_SHIFT_AMT)
+#define CAUSE_MANIFEST (DIAG_CAUSE_MANIFEST << CAUSE_SHIFT_AMT)
+#define CAUSE_OPERATIONS_ERROR (DIAG_CAUSE_OPERATIONS_ERROR << CAUSE_SHIFT_AMT)
+#define CAUSE_INVALID_REQUEST (DIAG_CAUSE_INVALID_REQUEST << CAUSE_SHIFT_AMT)
+#define CAUSE_INTERNAL_ERROR (DIAG_CAUSE_INTERNAL_ERROR << CAUSE_SHIFT_AMT)
+#define CAUSE_FILE_XFER (DIAG_CAUSE_FILE_XFER << CAUSE_SHIFT_AMT)
+#define CAUSE_ENUM_END CAUSE_FILE_XFER
+
+/* Combine Cause and Severity categories into one */
+#define CAUSE_DEBUG_SEVERITY_VERBOSE \
+ (CAUSE_DEBUG | DIAG_SEVERITY_VERBOSE)
+#define CAUSE_TRACE_SEVERITY_VERBOSE \
+ (CAUSE_TRACE | DIAG_SEVERITY_VERBOSE)
+#define CAUSE_MANIFEST_SEVERITY_VERBOSE\
+ (CAUSE_MANIFEST | DIAG_SEVERITY_VERBOSE)
+#define CAUSE_OPERATIONS_SEVERITY_VERBOSE \
+ (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_VERBOSE)
+#define CAUSE_INVALID_SEVERITY_VERBOSE \
+ (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_VERBOSE)
+#define CAUSE_INTERNAL_SEVERITY_VERBOSE \
+ (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_VERBOSE)
+
+#define CAUSE_DEBUG_SEVERITY_INFO \
+ (CAUSE_DEBUG | DIAG_SEVERITY_INFO)
+#define CAUSE_TRACE_SEVERITY_INFO \
+ (CAUSE_TRACE | DIAG_SEVERITY_INFO)
+#define CAUSE_MANIFEST_SEVERITY_INFO \
+ (CAUSE_MANIFEST | DIAG_SEVERITY_INFO)
+#define CAUSE_OPERATIONS_SEVERITY_INFO \
+ (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_INFO)
+#define CAUSE_INVALID_SEVERITY_INFO \
+ (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_INFO)
+#define CAUSE_INTERNAL_SEVERITY_INFO \
+ (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_INFO)
+
+#define CAUSE_DEBUG_SEVERITY_WARN \
+ (CAUSE_DEBUG | DIAG_SEVERITY_WARNING)
+#define CAUSE_TRACE_SEVERITY_WARN \
+ (CAUSE_TRACE | DIAG_SEVERITY_WARNING)
+#define CAUSE_MANIFEST_SEVERITY_WARN \
+ (CAUSE_MANIFEST | DIAG_SEVERITY_WARNING)
+#define CAUSE_OPERATIONS_SEVERITY_WARN \
+ (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_WARNING)
+#define CAUSE_INVALID_SEVERITY_WARN \
+ (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_WARNING)
+#define CAUSE_INTERNAL_SEVERITY_WARN \
+ (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_WARNING)
+
+#define CAUSE_DEBUG_SEVERITY_ERR \
+ (CAUSE_DEBUG | DIAG_SEVERITY_ERR)
+#define CAUSE_TRACE_SEVERITY_ERR \
+ (CAUSE_TRACE | DIAG_SEVERITY_ERR)
+#define CAUSE_MANIFEST_SEVERITY_ERR \
+ (CAUSE_MANIFEST | DIAG_SEVERITY_ERR)
+#define CAUSE_OPERATIONS_SEVERITY_ERR \
+ (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_ERR)
+#define CAUSE_INVALID_SEVERITY_ERR \
+ (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_ERR)
+#define CAUSE_INTERNAL_SEVERITY_ERR \
+ (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_ERR)
+
+#define CAUSE_DEBUG_SEVERITY_PRINT \
+ (CAUSE_DEBUG | DIAG_SEVERITY_PRINT)
+#define CAUSE_TRACE_SEVERITY_PRINT \
+ (CAUSE_TRACE | DIAG_SEVERITY_PRINT)
+#define CAUSE_MANIFEST_SEVERITY_PRINT \
+ (CAUSE_MANIFEST | DIAG_SEVERITY_PRINT)
+#define CAUSE_OPERATIONS_SEVERITY_PRINT \
+ (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_PRINT)
+#define CAUSE_INVALID_SEVERITY_PRINT \
+ (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_PRINT)
+#define CAUSE_INTERNAL_SEVERITY_PRINT \
+ (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_PRINT)
+#define CAUSE_FILE_XFER_SEVERITY_PRINT \
+ (CAUSE_FILE_XFER | DIAG_SEVERITY_PRINT)
+
+/* Structure: DIAG_CHANNEL_PROTOCOL_HEADER
+ *
+ * Purpose: Contains attributes that make up the header specific to the
+ * DIAG_CHANNEL area.
+ *
+ * Attributes:
+ *
+ * DiagLock: Diag Channel spinlock.
+ *
+ *IsChannelInitialized: 1 iff SignalInit was called for this channel; otherwise
+ * 0, and assume the channel is not ready for use yet.
+ *
+ * Reserved: Padding to allign the fields in this structure.
+ *
+ *SubsystemSeverityFilter: Level of severity on a subsystem basis that controls
+ * whether events are logged. Any event's severity for a
+ * particular subsystem below this level will be discarded.
+ */
+typedef struct _DIAG_CHANNEL_PROTOCOL_HEADER {
+ volatile U32 DiagLock;
+ U8 IsChannelInitialized;
+ U8 Reserved[3];
+ U8 SubsystemSeverityFilter[64];
+} DIAG_CHANNEL_PROTOCOL_HEADER;
+
+/* The Diagram for the Diagnostic Channel: */
+/* ----------------------- */
+/* | Channel Header | Defined by ULTRA_CHANNEL_PROTOCOL */
+/* ----------------------- */
+/* | Signal Queue Header | Defined by SIGNAL_QUEUE_HEADER */
+/* ----------------------- */
+/* | DiagChannel Header | Defined by DIAG_CHANNEL_PROTOCOL_HEADER */
+/* ----------------------- */
+/* | Channel Event Info | Defined by (DIAG_CHANNEL_EVENT * MAX_EVENTS) */
+/* ----------------------- */
+/* | Reserved | Reserved (pad out to 4MB) */
+/* ----------------------- */
+
+/* Offsets/sizes for diagnostic channel attributes... */
+#define DIAG_CH_QUEUE_HEADER_OFFSET (sizeof(ULTRA_CHANNEL_PROTOCOL))
+#define DIAG_CH_QUEUE_HEADER_SIZE (sizeof(SIGNAL_QUEUE_HEADER))
+#define DIAG_CH_PROTOCOL_HEADER_OFFSET \
+ (DIAG_CH_QUEUE_HEADER_OFFSET + DIAG_CH_QUEUE_HEADER_SIZE)
+#define DIAG_CH_PROTOCOL_HEADER_SIZE (sizeof(DIAG_CHANNEL_PROTOCOL_HEADER))
+#define DIAG_CH_EVENT_OFFSET \
+ (DIAG_CH_PROTOCOL_HEADER_OFFSET + DIAG_CH_PROTOCOL_HEADER_SIZE)
+#define DIAG_CH_SIZE (4096 * 1024)
+
+/* For Control and Idle Partitions with larger (8 MB) diagnostic(root)
+ * channels */
+#define DIAG_CH_LRG_SIZE (2 * DIAG_CH_SIZE) /* 8 MB */
+
+/*
+ * Structure: ULTRA_DIAG_CHANNEL_PROTOCOL
+ *
+ * Purpose: Contains attributes that make up the DIAG_CHANNEL memory.
+ *
+ * Attributes:
+ *
+ * CommonChannelHeader: Header info common to all channels.
+ *
+ * QueueHeader: Queue header common to all channels - used to determine where to
+ * store event.
+ *
+ * DiagChannelHeader: Diagnostic channel header info (see
+ * DIAG_CHANNEL_PROTOCOL_HEADER comments).
+ *
+ * Events: Area where diagnostic events (up to MAX_EVENTS) are written.
+ *
+ *Reserved: Reserved area to allow for correct channel size padding.
+*/
+typedef struct _ULTRA_DIAG_CHANNEL_PROTOCOL {
+ ULTRA_CHANNEL_PROTOCOL CommonChannelHeader;
+ SIGNAL_QUEUE_HEADER QueueHeader;
+ DIAG_CHANNEL_PROTOCOL_HEADER DiagChannelHeader;
+ DIAG_CHANNEL_EVENT Events[(DIAG_CH_SIZE - DIAG_CH_EVENT_OFFSET) /
+ sizeof(DIAG_CHANNEL_EVENT)];
+}
+ULTRA_DIAG_CHANNEL_PROTOCOL;
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/iochannel.h b/drivers/staging/unisys/common-spar/include/channels/iochannel.h
new file mode 100644
index 000000000000..8de1d249d55f
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/iochannel.h
@@ -0,0 +1,933 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION */
+/* All rights reserved. */
+#ifndef __IOCHANNEL_H__
+#define __IOCHANNEL_H__
+
+/*
+* Everything needed for IOPart-GuestPart communication is define in
+* this file. Note: Everything is OS-independent because this file is
+* used by Windows, Linux and possible EFI drivers. */
+
+
+/*
+* Communication flow between the IOPart and GuestPart uses the channel headers
+* channel state. The following states are currently being used:
+* UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED
+*
+* additional states will be used later. No locking is needed to switch between
+* states due to the following rules:
+*
+* 1. IOPart is only the only partition allowed to change from UNIT
+* 2. IOPart is only the only partition allowed to change from
+* CHANNEL_ATTACHING
+* 3. GuestPart is only the only partition allowed to change from
+* CHANNEL_ATTACHED
+*
+* The state changes are the following: IOPart sees the channel is in UNINIT,
+* UNINIT -> CHANNEL_ATTACHING (performed only by IOPart)
+* CHANNEL_ATTACHING -> CHANNEL_ATTACHED (performed only by IOPart)
+* CHANNEL_ATTACHED -> CHANNEL_OPENED (performed only by GuestPart)
+*/
+
+#include "commontypes.h"
+#include "vmcallinterface.h"
+
+#define _ULTRA_CONTROLVM_CHANNEL_INLINE_
+#include <linux/dma-direction.h>
+#include "controlvmchannel.h"
+#include "vbuschannel.h"
+#undef _ULTRA_CONTROLVM_CHANNEL_INLINE_
+#include "channel.h"
+
+/*
+ * CHANNEL Guids
+ */
+
+#include "channel_guid.h"
+
+#define ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE \
+ ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+
+/* Must increment these whenever you insert or delete fields within this channel
+* struct. Also increment whenever you change the meaning of fields within this
+* channel struct so as to break pre-existing software. Note that you can
+* usually add fields to the END of the channel struct withOUT needing to
+* increment this. */
+#define ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID 2
+#define ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID 2
+#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_VERSIONID 1
+
+#define ULTRA_VHBA_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, UltraVhbaChannelProtocolGuid, \
+ "vhba", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_VHBA_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraVhbaChannelProtocolGuid, \
+ "vhba", MIN_IO_CHANNEL_SIZE, actualBytes, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_VNIC_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, UltraVnicChannelProtocolGuid, \
+ "vnic", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_VNIC_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraVnicChannelProtocolGuid, \
+ "vnic", MIN_IO_CHANNEL_SIZE, actualBytes, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_VSWITCH_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, UltraVswitchChannelProtocolGuid, \
+ "vswitch", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VSWITCH_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+#define ULTRA_VSWITCH_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraVswitchChannelProtocolGuid, \
+ "vswitch", MIN_IO_CHANNEL_SIZE, \
+ actualBytes, \
+ __FILE__, __LINE__, logCtx))
+/*
+* Everything necessary to handle SCSI & NIC traffic between Guest Partition and
+* IO Partition is defined below. */
+
+
+/*
+* Defines and enums.
+*/
+
+#define MINNUM(a, b) (((a) < (b)) ? (a) : (b))
+#define MAXNUM(a, b) (((a) > (b)) ? (a) : (b))
+
+/* these define the two queues per data channel between iopart and
+ * ioguestparts */
+#define IOCHAN_TO_IOPART 0 /* used by ioguestpart to 'insert' signals to
+ * iopart */
+#define IOCHAN_FROM_GUESTPART 0 /* used by iopart to 'remove' signals from
+ * ioguestpart - same queue as previous queue */
+
+#define IOCHAN_TO_GUESTPART 1 /* used by iopart to 'insert' signals to
+ * ioguestpart */
+#define IOCHAN_FROM_IOPART 1 /* used by ioguestpart to 'remove' signals from
+ * iopart - same queue as previous queue */
+
+/* these define the two queues per control channel between controlpart and "its"
+ * guests, which includes the iopart */
+#define CTRLCHAN_TO_CTRLGUESTPART 0 /* used by ctrlguestpart to 'insert' signals
+ * to ctrlpart */
+#define CTLRCHAN_FROM_CTRLPART 0 /* used by ctrlpart to 'remove' signals from
+ * ctrlquestpart - same queue as previous
+ * queue */
+
+#define CTRLCHAN_TO_CTRLPART 1 /* used by ctrlpart to 'insert' signals to
+ * ctrlguestpart */
+#define CTRLCHAN_FROM_CTRLGUESTPART 1 /* used by ctrguestpart to 'remove'
+ * signals from ctrlpart - same queue as
+ * previous queue */
+
+/* these define the Event & Ack queues per control channel Events are generated
+* by CTRLGUESTPART and sent to CTRLPART; Acks are generated by CTRLPART and sent
+* to CTRLGUESTPART. */
+#define CTRLCHAN_EVENT_TO_CTRLPART 2 /* used by ctrlguestpart to 'insert' Events
+ * to ctrlpart */
+#define CTRLCHAN_EVENT_FROM_CTRLGUESTPART 2 /* used by ctrlpart to 'remove'
+ * Events from ctrlguestpart */
+
+#define CTRLCHAN_ACK_TO_CTRLGUESTPART 3 /* used by ctrlpart to 'insert' Acks to
+ * ctrlguestpart */
+#define CTRLCHAN_ACK_FROM_CTRLPART 3 /* used by ctrlguestpart to 'remove' Events
+ * from ctrlpart */
+
+/* size of cdb - i.e., scsi cmnd */
+#define MAX_CMND_SIZE 16
+
+#define MAX_SENSE_SIZE 64
+
+#define MAX_PHYS_INFO 64
+
+/* Because GuestToGuestCopy is limited to 4KiB segments, and we have limited the
+* Emulex Driver to 256 scatter list segments via the lpfc_sg_seg_cnt parameter
+* to 256, the maximum I/O size is limited to 256 * 4 KiB = 1 MB */
+#define MAX_IO_SIZE (1024*1024) /* 1 MB */
+
+/* NOTE 1: lpfc defines its support for segments in
+* #define LPFC_SG_SEG_CNT 64
+*
+* NOTE 2: In Linux, frags array in skb is currently allocated to be
+* MAX_SKB_FRAGS size, which is 18 which is smaller than MAX_PHYS_INFO for
+* now. */
+
+#ifndef MAX_SERIAL_NUM
+#define MAX_SERIAL_NUM 32
+#endif /* MAX_SERIAL_NUM */
+
+#define MAX_SCSI_BUSES 1
+#define MAX_SCSI_TARGETS 8
+#define MAX_SCSI_LUNS 16
+#define MAX_SCSI_FROM_HOST 0xFFFFFFFF /* Indicator to use Physical HBA
+ * SCSI Host value */
+
+/* various types of network packets that can be sent in cmdrsp */
+typedef enum { NET_RCV_POST = 0, /* submit buffer to hold receiving
+ * incoming packet */
+ /* virtnic -> uisnic */
+ NET_RCV, /* incoming packet received */
+ /* uisnic -> virtpci */
+ NET_XMIT, /* for outgoing net packets */
+ /* virtnic -> uisnic */
+ NET_XMIT_DONE, /* outgoing packet xmitted */
+ /* uisnic -> virtpci */
+ NET_RCV_ENBDIS, /* enable/disable packet reception */
+ /* virtnic -> uisnic */
+ NET_RCV_ENBDIS_ACK, /* acknowledge enable/disable packet
+ * reception */
+ /* uisnic -> virtnic */
+ NET_RCV_PROMISC, /* enable/disable promiscuous mode */
+ /* virtnic -> uisnic */
+ NET_CONNECT_STATUS, /* indicate the loss or restoration of a network
+ * connection */
+ /* uisnic -> virtnic */
+ NET_MACADDR, /* indicates the client has requested to update
+ * its MAC addr */
+ NET_MACADDR_ACK, /* Mac addres */
+
+} NET_TYPES;
+
+#define ETH_HEADER_SIZE 14 /* size of ethernet header */
+
+#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */
+#define ETH_MIN_PACKET_SIZE (ETH_HEADER_SIZE + ETH_MIN_DATA_SIZE)
+
+#define ETH_DEF_DATA_SIZE 1500 /* default data size */
+#define ETH_DEF_PACKET_SIZE (ETH_HEADER_SIZE + ETH_DEF_DATA_SIZE)
+
+#define ETH_MAX_MTU 16384 /* maximum data size */
+
+#ifndef MAX_MACADDR_LEN
+#define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */
+#endif /* MAX_MACADDR_LEN */
+
+#define ETH_IS_LOCALLY_ADMINISTERED(Address) \
+ (((U8 *) (Address))[0] & ((U8) 0x02))
+#define NIC_VENDOR_ID 0x0008000B
+
+/* various types of scsi task mgmt commands */
+typedef enum { TASK_MGMT_ABORT_TASK =
+ 1, TASK_MGMT_BUS_RESET, TASK_MGMT_LUN_RESET,
+ TASK_MGMT_TARGET_RESET,
+} TASK_MGMT_TYPES;
+
+/* various types of vdisk mgmt commands */
+typedef enum { VDISK_MGMT_ACQUIRE = 1, VDISK_MGMT_RELEASE,
+} VDISK_MGMT_TYPES;
+
+/* this is used in the vdest field */
+#define VDEST_ALL 0xFFFF
+
+#define MIN_NUMSIGNALS 64
+#define MAX_NUMSIGNALS 4096
+
+/* MAX_NET_RCV_BUF specifies the number of rcv buffers that are created by each
+* guest's virtnic and posted to uisnic. Uisnic, for each channel, keeps the rcv
+* buffers posted and uses them to receive data on behalf of the guest's virtnic.
+* NOTE: the num_rcv_bufs is configurable for each VNIC. So the following is
+* simply an upperlimit on what each VNIC can provide. Setting it to half of the
+* NUMSIGNALS to prevent queue full deadlocks */
+#define MAX_NET_RCV_BUFS (MIN_NUMSIGNALS / 2)
+
+/*
+ * structs with pragma pack */
+
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+
+#pragma pack(push, 1)
+
+struct guest_phys_info {
+ U64 address;
+ U64 length;
+};
+
+#define GPI_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct guest_phys_info))
+
+struct uisscsi_dest {
+ U32 channel; /* channel == bus number */
+ U32 id; /* id == target number */
+ U32 lun; /* lun == logical unit number */
+};
+
+struct vhba_wwnn {
+ U32 wwnn1;
+ U32 wwnn2;
+};
+
+/* WARNING: Values stired in this structure must contain maximum counts (not
+ * maximum values). */
+struct vhba_config_max { /* 20 bytes */
+ U32 max_channel; /* maximum channel for devices attached to this
+ * bus */
+ U32 max_id; /* maximum SCSI ID for devices attached to this
+ * bus */
+ U32 max_lun; /* maximum SCSI LUN for devices attached to this
+ * bus */
+ U32 cmd_per_lun; /* maximum number of outstanding commands per
+ * lun that are allowed at one time */
+ U32 max_io_size; /* maximum io size for devices attached to this
+ * bus */
+ /* max io size is often determined by the resource of the hba. e.g */
+ /* max scatter gather list length * page size / sector size */
+};
+
+struct uiscmdrsp_scsi {
+ void *scsicmd; /* the handle to the cmd that was received -
+ * send it back as is in the rsp packet. */
+ U8 cmnd[MAX_CMND_SIZE]; /* the cdb for the command */
+ U32 bufflen; /* length of data to be transferred out or in */
+ U16 guest_phys_entries; /* Number of entries in scatter-gather (sg)
+ * list */
+ struct guest_phys_info gpi_list[MAX_PHYS_INFO]; /* physical address
+ * information for each
+ * fragment */
+ enum dma_data_direction data_dir; /* direction of the data, if any */
+ struct uisscsi_dest vdest; /* identifies the virtual hba, id,
+ * channel, lun to which cmd was sent */
+
+ /* the following fields are needed to queue the rsp back to cmd
+ * originator */
+ int linuxstat; /* the original Linux status - for use by linux
+ * vdisk code */
+ U8 scsistat; /* the scsi status */
+ U8 addlstat; /* non-scsi status - covers cases like timeout
+ * needed by windows guests */
+#define ADDL_RESET 1
+#define ADDL_TIMEOUT 2
+#define ADDL_INTERNAL_ERROR 3
+#define ADDL_SEL_TIMEOUT 4
+#define ADDL_CMD_TIMEOUT 5
+#define ADDL_BAD_TARGET 6
+#define ADDL_RETRY 7
+
+ /* the following fields are need to determine the result of command */
+ U8 sensebuf[MAX_SENSE_SIZE]; /* sense info in case cmd failed; */
+ /* it holds the sense_data struct; */
+ /* see that struct for details. */
+ void *vdisk; /* contains pointer to the vdisk so that we can clean up
+ * when the IO completes. */
+ int no_disk_result; /* used to return no disk inquiry result */
+ /* when no_disk_result is set to 1, */
+ /* scsi.scsistat is SAM_STAT_GOOD */
+ /* scsi.addlstat is 0 */
+ /* scsi.linuxstat is SAM_STAT_GOOD */
+ /* That is, there is NO error. */
+};
+
+/*
+* Defines to support sending correct inquiry result when no disk is
+* configured. */
+
+/* From SCSI SPC2 -
+ *
+ * If the target is not capable of supporting a device on this logical unit, the
+ * device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b
+ * and PERIPHERAL DEVICE TYPE set to 1Fh).
+ *
+ *The device server is capable of supporting the specified peripheral device
+ *type on this logical unit. However, the physical device is not currently
+ *connected to this logical unit.
+ */
+
+#define DEV_NOT_PRESENT 0x7f /* old name - compatibility */
+#define DEV_NOT_CAPABLE 0x7f /* peripheral qualifier of 0x3 */
+ /* peripheral type of 0x1f */
+ /* specifies no device but target present */
+
+#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1 */
+ /* peripheral type of 0 - disk */
+ /* specifies device capable, but not present */
+
+#define DEV_PROC_CAPABLE_NOT_PRESENT 0x23 /* peripheral qualifier of 0x1 */
+ /* peripheral type of 3 - processor */
+ /* specifies device capable, but not present */
+
+#define DEV_HISUPPORT 0x10; /* HiSup = 1; shows support for report luns */
+ /* must be returned for lun 0. */
+
+/* NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
+* in buf[4] some linux code accesses bytes beyond 5 to retrieve vendor, product
+* & revision. Yikes! So let us always send back 36 bytes, the minimum for
+* inquiry result. */
+#define NO_DISK_INQUIRY_RESULT_LEN 36
+
+#define MIN_INQUIRY_RESULT_LEN 5 /* we need at least 5 bytes minimum for inquiry
+ * result */
+
+/* SCSI device version for no disk inquiry result */
+#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
+
+/* Windows and Linux want different things for a non-existent lun. So, we'll let
+ * caller pass in the peripheral qualifier and type.
+ * NOTE:[4] SCSI returns (n-4); so we return length-1-4 or length-5. */
+
+#define SET_NO_DISK_INQUIRY_RESULT(buf, len, lun, lun0notpresent, notpresent) \
+ do { \
+ MEMSET(buf, 0, \
+ MINNUM(len, \
+ (unsigned int) NO_DISK_INQUIRY_RESULT_LEN)); \
+ buf[2] = (U8) SCSI_SPC2_VER; \
+ if (lun == 0) { \
+ buf[0] = (U8) lun0notpresent; \
+ buf[3] = (U8) DEV_HISUPPORT; \
+ } else \
+ buf[0] = (U8) notpresent; \
+ buf[4] = (U8) ( \
+ MINNUM(len, \
+ (unsigned int) NO_DISK_INQUIRY_RESULT_LEN) - 5); \
+ if (len >= NO_DISK_INQUIRY_RESULT_LEN) { \
+ buf[8] = 'D'; \
+ buf[9] = 'E'; \
+ buf[10] = 'L'; \
+ buf[11] = 'L'; \
+ buf[16] = 'P'; \
+ buf[17] = 'S'; \
+ buf[18] = 'E'; \
+ buf[19] = 'U'; \
+ buf[20] = 'D'; \
+ buf[21] = 'O'; \
+ buf[22] = ' '; \
+ buf[23] = 'D'; \
+ buf[24] = 'E'; \
+ buf[25] = 'V'; \
+ buf[26] = 'I'; \
+ buf[27] = 'C'; \
+ buf[28] = 'E'; \
+ buf[30] = ' '; \
+ buf[31] = '.'; \
+ } \
+ } while (0)
+
+
+/*
+* Struct & Defines to support sense information.
+*/
+
+
+/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
+* initialized in exactly the manner that is recommended in Windows (hence the
+* odd values).
+* When set, these fields will have the following values:
+* ErrorCode = 0x70 indicates current error
+* Valid = 1 indicates sense info is valid
+* SenseKey contains sense key as defined by SCSI specs.
+* AdditionalSenseCode contains sense key as defined by SCSI specs.
+* AdditionalSenseCodeQualifier contains qualifier to sense code as defined by
+* scsi docs.
+* AdditionalSenseLength contains will be sizeof(sense_data)-8=10.
+*/
+struct sense_data {
+ U8 ErrorCode:7;
+ U8 Valid:1;
+ U8 SegmentNumber;
+ U8 SenseKey:4;
+ U8 Reserved:1;
+ U8 IncorrectLength:1;
+ U8 EndOfMedia:1;
+ U8 FileMark:1;
+ U8 Information[4];
+ U8 AdditionalSenseLength;
+ U8 CommandSpecificInformation[4];
+ U8 AdditionalSenseCode;
+ U8 AdditionalSenseCodeQualifier;
+ U8 FieldReplaceableUnitCode;
+ U8 SenseKeySpecific[3];
+};
+
+/* some SCSI ADSENSE codes */
+#ifndef SCSI_ADSENSE_LUN_NOT_READY
+#define SCSI_ADSENSE_LUN_NOT_READY 0x04
+#endif /* */
+#ifndef SCSI_ADSENSE_ILLEGAL_COMMAND
+#define SCSI_ADSENSE_ILLEGAL_COMMAND 0x20
+#endif /* */
+#ifndef SCSI_ADSENSE_ILLEGAL_BLOCK
+#endif /* */
+#ifndef SCSI_ADSENSE_ILLEGAL_BLOCK
+#define SCSI_ADSENSE_ILLEGAL_BLOCK 0x21
+#endif /* */
+#ifndef SCSI_ADSENSE_INVALID_CDB
+#define SCSI_ADSENSE_INVALID_CDB 0x24
+#endif /* */
+#ifndef SCSI_ADSENSE_INVALID_LUN
+#define SCSI_ADSENSE_INVALID_LUN 0x25
+#endif /* */
+#ifndef SCSI_ADWRITE_PROTECT
+#define SCSI_ADWRITE_PROTECT 0x27
+#endif /* */
+#ifndef SCSI_ADSENSE_MEDIUM_CHANGED
+#define SCSI_ADSENSE_MEDIUM_CHANGED 0x28
+#endif /* */
+#ifndef SCSI_ADSENSE_BUS_RESET
+#define SCSI_ADSENSE_BUS_RESET 0x29
+#endif /* */
+#ifndef SCSI_ADSENSE_NO_MEDIA_IN_DEVICE
+#define SCSI_ADSENSE_NO_MEDIA_IN_DEVICE 0x3a
+#endif /* */
+
+struct net_pkt_xmt {
+ int len; /* full length of data in the packet */
+ int num_frags; /* number of fragments in frags containing data */
+ struct phys_info frags[MAX_PHYS_INFO]; /* physical page information for
+ * each fragment */
+ char ethhdr[ETH_HEADER_SIZE]; /* the ethernet header */
+ struct {
+
+ /* these are needed for csum at uisnic end */
+ U8 valid; /* 1 = rest of this struct is valid - else
+ * ignore */
+ U8 hrawoffv; /* 1 = hwrafoff is valid */
+ U8 nhrawoffv; /* 1 = nhwrafoff is valid */
+ U16 protocol; /* specifies packet protocol */
+ U32 csum; /* value used to set skb->csum at IOPart */
+ U32 hrawoff; /* value used to set skb->h.raw at IOPart */
+ /* hrawoff points to the start of the TRANSPORT LAYER HEADER */
+ U32 nhrawoff; /* value used to set skb->nh.raw at IOPart */
+ /* nhrawoff points to the start of the NETWORK LAYER HEADER */
+ } lincsum;
+
+ /* **** NOTE ****
+ * The full packet is described in frags but the ethernet header is
+ * separately kept in ethhdr so that uisnic doesn't have "MAP" the
+ * guest memory to get to the header. uisnic needs ethhdr to
+ * determine how to route the packet.
+ */
+};
+
+struct net_pkt_xmtdone {
+ U32 xmt_done_result; /* result of NET_XMIT */
+#define XMIT_SUCCESS 0
+#define XMIT_FAILED 1
+};
+
+/* RCVPOST_BUF_SIZe must be at most page_size(4096) - cache_line_size (64) The
+* reason is because dev_skb_alloc which is used to generate RCV_POST skbs in
+* virtnic requires that there is "overhead" in the buffer, and pads 16 bytes. I
+* prefer to use 1 full cache line size for "overhead" so that transfers are
+* better. IOVM requires that a buffer be represented by 1 phys_info structure
+* which can only cover page_size. */
+#define RCVPOST_BUF_SIZE 4032
+#define MAX_NET_RCV_CHAIN \
+ ((ETH_MAX_MTU+ETH_HEADER_SIZE + RCVPOST_BUF_SIZE-1) / RCVPOST_BUF_SIZE)
+
+struct net_pkt_rcvpost {
+ /* rcv buf size must be large enough to include ethernet data len +
+ * ethernet header len - we are choosing 2K because it is guaranteed
+ * to be describable */
+ struct phys_info frag; /* physical page information for the
+ * single fragment 2K rcv buf */
+ U64 UniqueNum; /* This is used to make sure that
+ * receive posts are returned to */
+ /* the Adapter which sent them origonally. */
+};
+
+struct net_pkt_rcv {
+
+ /* the number of receive buffers that can be chained */
+ /* is based on max mtu and size of each rcv buf */
+ U32 rcv_done_len; /* length of received data */
+ U8 numrcvbufs; /* number of receive buffers that contain the */
+ /* incoming data; guest end MUST chain these together. */
+ void *rcvbuf[MAX_NET_RCV_CHAIN]; /* the list of receive buffers
+ * that must be chained; */
+ /* each entry is a receive buffer provided by NET_RCV_POST. */
+ /* NOTE: first rcvbuf in the chain will also be provided in net.buf. */
+ U64 UniqueNum;
+ U32 RcvsDroppedDelta;
+};
+
+struct net_pkt_enbdis {
+ void *context;
+ U16 enable; /* 1 = enable, 0 = disable */
+};
+
+struct net_pkt_macaddr {
+ void *context;
+ U8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
+};
+
+/* cmd rsp packet used for VNIC network traffic */
+struct uiscmdrsp_net {
+ NET_TYPES type;
+ void *buf;
+ union {
+ struct net_pkt_xmt xmt; /* used for NET_XMIT */
+ struct net_pkt_xmtdone xmtdone; /* used for NET_XMIT_DONE */
+ struct net_pkt_rcvpost rcvpost; /* used for NET_RCV_POST */
+ struct net_pkt_rcv rcv; /* used for NET_RCV */
+ struct net_pkt_enbdis enbdis; /* used for NET_RCV_ENBDIS, */
+ /* NET_RCV_ENBDIS_ACK, */
+ /* NET_RCV_PROMSIC, */
+ /* and NET_CONNECT_STATUS */
+ struct net_pkt_macaddr macaddr;
+ };
+};
+
+struct uiscmdrsp_scsitaskmgmt {
+ TASK_MGMT_TYPES tasktype;
+
+ /* the type of task */
+ struct uisscsi_dest vdest;
+
+ /* the vdisk for which this task mgmt is generated */
+ void *scsicmd;
+
+ /* This is some handle that the guest has saved off for its own use.
+ * Its value is preserved by iopart & returned as is in the task mgmt
+ * rsp. */
+ void *notify;
+
+ /* For linux guests, this is a pointer to wait_queue_head that a
+ * thread is waiting on to see if the taskmgmt command has completed.
+ * For windows guests, this is a pointer to a location that a waiting
+ * thread is testing to see if the taskmgmt command has completed.
+ * When the rsp is received by guest, the thread receiving the
+ * response uses this to notify the the thread waiting for taskmgmt
+ * command completion. Its value is preserved by iopart & returned
+ * as is in the task mgmt rsp. */
+ void *notifyresult;
+
+ /* this is a handle to location in guest where the result of the
+ * taskmgmt command (result field) is to saved off when the response
+ * is handled. Its value is preserved by iopart & returned as is in
+ * the task mgmt rsp. */
+ char result;
+
+ /* result of taskmgmt command - set by IOPart - values are: */
+#define TASK_MGMT_FAILED 0
+#define TASK_MGMT_SUCCESS 1
+};
+
+/* The following is used by uissd to send disk add/remove notifications to
+ * Guest */
+/* Note that the vHba pointer is not used by the Client/Guest side. */
+struct uiscmdrsp_disknotify {
+ U8 add; /* 0-remove, 1-add */
+ void *vHba; /* Pointer to vhba_info for channel info to
+ * route msg */
+ U32 channel, id, lun; /* SCSI Path of Disk to added or removed */
+};
+
+/* The following is used by virthba/vSCSI to send the Acquire/Release commands
+* to the IOVM. */
+struct uiscmdrsp_vdiskmgmt {
+ VDISK_MGMT_TYPES vdisktype;
+
+ /* the type of task */
+ struct uisscsi_dest vdest;
+
+ /* the vdisk for which this task mgmt is generated */
+ void *scsicmd;
+
+ /* This is some handle that the guest has saved off for its own use.
+ * Its value is preserved by iopart & returned as is in the task mgmt
+ * rsp. */
+ void *notify;
+
+ /* For linux guests, this is a pointer to wait_queue_head that a
+ * thread is waiting on to see if the taskmgmt command has completed.
+ * For windows guests, this is a pointer to a location that a waiting
+ * thread is testing to see if the taskmgmt command has completed.
+ * When the rsp is received by guest, the thread receiving the
+ * response uses this to notify the the thread waiting for taskmgmt
+ * command completion. Its value is preserved by iopart & returned
+ * as is in the task mgmt rsp. */
+ void *notifyresult;
+
+ /* this is a handle to location in guest where the result of the
+ * taskmgmt command (result field) is to saved off when the response
+ * is handled. Its value is preserved by iopart & returned as is in
+ * the task mgmt rsp. */
+ char result;
+
+ /* result of taskmgmt command - set by IOPart - values are: */
+#define VDISK_MGMT_FAILED 0
+#define VDISK_MGMT_SUCCESS 1
+};
+
+/* keeping cmd & rsp info in one structure for now cmd rsp packet for scsi */
+struct uiscmdrsp {
+ char cmdtype;
+
+ /* describes what type of information is in the struct */
+#define CMD_SCSI_TYPE 1
+#define CMD_NET_TYPE 2
+#define CMD_SCSITASKMGMT_TYPE 3
+#define CMD_NOTIFYGUEST_TYPE 4
+#define CMD_VDISKMGMT_TYPE 5
+ union {
+ struct uiscmdrsp_scsi scsi;
+ struct uiscmdrsp_net net;
+ struct uiscmdrsp_scsitaskmgmt scsitaskmgmt;
+ struct uiscmdrsp_disknotify disknotify;
+ struct uiscmdrsp_vdiskmgmt vdiskmgmt;
+ };
+ void *private_data; /* used to send the response when the cmd is
+ * done (scsi & scsittaskmgmt). */
+ struct uiscmdrsp *next; /* General Purpose Queue Link */
+ struct uiscmdrsp *activeQ_next; /* Used to track active commands */
+ struct uiscmdrsp *activeQ_prev; /* Used to track active commands */
+};
+
+/* This is just the header of the IO channel. It is assumed that directly after
+* this header there is a large region of memory which contains the command and
+* response queues as specified in cmdQ and rspQ SIGNAL_QUEUE_HEADERS. */
+typedef struct _ULTRA_IO_CHANNEL_PROTOCOL {
+ CHANNEL_HEADER ChannelHeader;
+ SIGNAL_QUEUE_HEADER cmdQ;
+ SIGNAL_QUEUE_HEADER rspQ;
+ union {
+ struct {
+ struct vhba_wwnn wwnn; /* 8 bytes */
+ struct vhba_config_max max; /* 20 bytes */
+ } vhba; /* 28 */
+ struct {
+ U8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
+ U32 num_rcv_bufs; /* 4 */
+ U32 mtu; /* 4 */
+ GUID zoneGuid; /* 16 */
+ } vnic; /* total 30 */
+ };
+
+#define MAX_CLIENTSTRING_LEN 1024
+ U8 clientString[MAX_CLIENTSTRING_LEN]; /* NULL terminated - so holds
+ * max - 1 bytes */
+} ULTRA_IO_CHANNEL_PROTOCOL;
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+
+/* define offsets to members of struct uiscmdrsp */
+#define OFFSET_CMDTYPE OFFSETOF(struct uiscmdrsp, cmdtype)
+#define OFFSET_SCSI OFFSETOF(struct uiscmdrsp, scsi)
+#define OFFSET_NET OFFSETOF(struct uiscmdrsp, net)
+#define OFFSET_SCSITASKMGMT OFFSETOF(struct uiscmdrsp, scsitaskmgmt)
+#define OFFSET_NEXT OFFSETOF(struct uiscmdrsp, next)
+
+/* define offsets to members of struct uiscmdrsp_net */
+#define OFFSET_TYPE OFFSETOF(struct uiscmdrsp_net, type)
+#define OFFSET_BUF OFFSETOF(struct uiscmdrsp_net, buf)
+#define OFFSET_XMT OFFSETOF(struct uiscmdrsp_net, xmt)
+#define OFFSET_XMT_DONE_RESULT OFFSETOF(struct uiscmdrsp_net, xmtdone)
+#define OFFSET_RCVPOST OFFSETOF(struct uiscmdrsp_net, rcvpost)
+#define OFFSET_RCV_DONE_LEN OFFSETOF(struct uiscmdrsp_net, rcv)
+#define OFFSET_ENBDIS OFFSETOF(struct uiscmdrsp_net, enbdis)
+
+/* define offsets to members of struct net_pkt_rcvpost */
+#define OFFSET_TOTALLEN OFFSETOF(struct net_pkt_rcvpost, totallen)
+#define OFFSET_FRAG OFFSETOF(struct net_pkt_rcvpost, frag)
+
+/*
+* INLINE functions for initializing and accessing I/O data channels
+*/
+
+
+#define NUMSIGNALS(x, q) (((ULTRA_IO_CHANNEL_PROTOCOL *)(x))->q.MaxSignalSlots)
+#define SIZEOF_PROTOCOL (COVER(sizeof(ULTRA_IO_CHANNEL_PROTOCOL), 64))
+#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64))
+
+#define IO_CHANNEL_SIZE(x) COVER(SIZEOF_PROTOCOL + \
+ (NUMSIGNALS(x, cmdQ) + \
+ NUMSIGNALS(x, rspQ)) * SIZEOF_CMDRSP, 4096)
+#define MIN_IO_CHANNEL_SIZE COVER(SIZEOF_PROTOCOL + \
+ 2 * MIN_NUMSIGNALS * SIZEOF_CMDRSP, 4096)
+#ifdef __GNUC__
+/* These defines should only ever be used in service partitons */
+/* because they rely on the size of uiscmdrsp */
+#define QSLOTSFROMBYTES(bytes) (((bytes-SIZEOF_PROTOCOL)/2)/SIZEOF_CMDRSP)
+#define QSIZEFROMBYTES(bytes) (QSLOTSFROMBYTES(bytes)*SIZEOF_CMDRSP)
+#define SignalQInit(x) \
+ do { \
+ x->cmdQ.Size = QSIZEFROMBYTES(x->ChannelHeader.Size); \
+ x->cmdQ.oSignalBase = SIZEOF_PROTOCOL - \
+ OFFSETOF(ULTRA_IO_CHANNEL_PROTOCOL, cmdQ); \
+ x->cmdQ.SignalSize = SIZEOF_CMDRSP; \
+ x->cmdQ.MaxSignalSlots = \
+ QSLOTSFROMBYTES(x->ChannelHeader.Size); \
+ x->cmdQ.MaxSignals = x->cmdQ.MaxSignalSlots - 1; \
+ x->rspQ.Size = QSIZEFROMBYTES(x->ChannelHeader.Size); \
+ x->rspQ.oSignalBase = \
+ (SIZEOF_PROTOCOL + x->cmdQ.Size) - \
+ OFFSETOF(ULTRA_IO_CHANNEL_PROTOCOL, rspQ); \
+ x->rspQ.SignalSize = SIZEOF_CMDRSP; \
+ x->rspQ.MaxSignalSlots = \
+ QSLOTSFROMBYTES(x->ChannelHeader.Size); \
+ x->rspQ.MaxSignals = x->rspQ.MaxSignalSlots - 1; \
+ x->ChannelHeader.oChannelSpace = \
+ OFFSETOF(ULTRA_IO_CHANNEL_PROTOCOL, cmdQ); \
+ } while (0)
+
+#define INIT_CLIENTSTRING(chan, type, clientStr, clientStrLen) \
+ do { \
+ if (clientStr) { \
+ chan->ChannelHeader.oClientString = \
+ OFFSETOF(type, clientString); \
+ MEMCPY(chan->clientString, clientStr, \
+ MINNUM(clientStrLen, \
+ (U32) (MAX_CLIENTSTRING_LEN - 1))); \
+ chan->clientString[MINNUM(clientStrLen, \
+ (U32) (MAX_CLIENTSTRING_LEN \
+ - 1))] \
+ = '\0'; \
+ } \
+ else \
+ if (clientStrLen > 0) \
+ return 0; \
+ } while (0)
+
+
+#define ULTRA_IO_CHANNEL_SERVER_READY(x, chanId, logCtx) \
+ ULTRA_CHANNEL_SERVER_TRANSITION(x, chanId, SrvState, CHANNELSRV_READY, \
+ logCtx);
+
+#define ULTRA_IO_CHANNEL_SERVER_NOTREADY(x, chanId, logCtx) \
+ ULTRA_CHANNEL_SERVER_TRANSITION(x, chanId, SrvState, \
+ CHANNELSRV_UNINITIALIZED, logCtx);
+
+static inline int ULTRA_VHBA_init_channel(ULTRA_IO_CHANNEL_PROTOCOL *x,
+ struct vhba_wwnn *wwnn,
+ struct vhba_config_max *max,
+ unsigned char *clientStr,
+ U32 clientStrLen, U64 bytes) {
+ MEMSET(x, 0, sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
+ x->ChannelHeader.VersionId = ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID;
+ x->ChannelHeader.Signature = ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE;
+ x->ChannelHeader.SrvState = CHANNELSRV_UNINITIALIZED;
+ x->ChannelHeader.HeaderSize = sizeof(x->ChannelHeader);
+ x->ChannelHeader.Size = COVER(bytes, 4096);
+ x->ChannelHeader.Type = UltraVhbaChannelProtocolGuid;
+ x->ChannelHeader.ZoneGuid = Guid0;
+ x->vhba.wwnn = *wwnn;
+ x->vhba.max = *max;
+ INIT_CLIENTSTRING(x, ULTRA_IO_CHANNEL_PROTOCOL, clientStr,
+ clientStrLen);
+ SignalQInit(x);
+ if ((x->cmdQ.MaxSignalSlots > MAX_NUMSIGNALS) ||
+ (x->rspQ.MaxSignalSlots > MAX_NUMSIGNALS)) {
+ return 0;
+ }
+ if ((x->cmdQ.MaxSignalSlots < MIN_NUMSIGNALS) ||
+ (x->rspQ.MaxSignalSlots < MIN_NUMSIGNALS)) {
+ return 0;
+ }
+ return 1;
+}
+
+static inline void ULTRA_VHBA_set_max(ULTRA_IO_CHANNEL_PROTOCOL *x,
+ struct vhba_config_max *max) {
+ x->vhba.max = *max;
+}
+
+static inline int ULTRA_VNIC_init_channel(ULTRA_IO_CHANNEL_PROTOCOL *x,
+ unsigned char *macaddr,
+ U32 num_rcv_bufs, U32 mtu,
+ GUID zoneGuid,
+ unsigned char *clientStr,
+ U32 clientStrLen,
+ U64 bytes) {
+ MEMSET(x, 0, sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
+ x->ChannelHeader.VersionId = ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID;
+ x->ChannelHeader.Signature = ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE;
+ x->ChannelHeader.SrvState = CHANNELSRV_UNINITIALIZED;
+ x->ChannelHeader.HeaderSize = sizeof(x->ChannelHeader);
+ x->ChannelHeader.Size = COVER(bytes, 4096);
+ x->ChannelHeader.Type = UltraVnicChannelProtocolGuid;
+ x->ChannelHeader.ZoneGuid = Guid0;
+ MEMCPY(x->vnic.macaddr, macaddr, MAX_MACADDR_LEN);
+ x->vnic.num_rcv_bufs = num_rcv_bufs;
+ x->vnic.mtu = mtu;
+ x->vnic.zoneGuid = zoneGuid;
+ INIT_CLIENTSTRING(x, ULTRA_IO_CHANNEL_PROTOCOL, clientStr,
+ clientStrLen);
+ SignalQInit(x);
+ if ((x->cmdQ.MaxSignalSlots > MAX_NUMSIGNALS) ||
+ (x->rspQ.MaxSignalSlots > MAX_NUMSIGNALS)) {
+ return 0;
+ }
+ if ((x->cmdQ.MaxSignalSlots < MIN_NUMSIGNALS) ||
+ (x->rspQ.MaxSignalSlots < MIN_NUMSIGNALS)) {
+ return 0;
+ }
+ return 1;
+}
+
+#endif /* __GNUC__ */
+
+/*
+* INLINE function for expanding a guest's pfn-off-size into multiple 4K page
+* pfn-off-size entires.
+*/
+
+
+/* we deal with 4K page sizes when we it comes to passing page information
+ * between */
+/* Guest and IOPartition. */
+#define PI_PAGE_SIZE 0x1000
+#define PI_PAGE_MASK 0x0FFF
+#define PI_PAGE_SHIFT 12
+
+/* returns next non-zero index on success or zero on failure (i.e. out of
+ * room)
+ */
+static INLINE U16
+add_physinfo_entries(U32 inp_pfn, /* input - specifies the pfn to be used
+ * to add entries */
+ U16 inp_off, /* input - specifies the off to be used
+ * to add entries */
+ U32 inp_len, /* input - specifies the len to be used
+ * to add entries */
+ U16 index, /* input - index in array at which new
+ * entries are added */
+ U16 max_pi_arr_entries, /* input - specifies the maximum
+ * entries pi_arr can hold */
+ struct phys_info pi_arr[]) /* input & output - array to
+ * which entries are added */
+{
+ U32 len;
+ U16 i, firstlen;
+
+ firstlen = PI_PAGE_SIZE - inp_off;
+ if (inp_len <= firstlen) {
+
+ /* the input entry spans only one page - add as is */
+ if (index >= max_pi_arr_entries)
+ return 0;
+ pi_arr[index].pi_pfn = inp_pfn;
+ pi_arr[index].pi_off = (U16) inp_off;
+ pi_arr[index].pi_len = (U16) inp_len;
+ return index + 1;
+ }
+
+ /* this entry spans multiple pages */
+ for (len = inp_len, i = 0; len;
+ len -= pi_arr[index + i].pi_len, i++) {
+ if (index + i >= max_pi_arr_entries)
+ return 0;
+ pi_arr[index + i].pi_pfn = inp_pfn + i;
+ if (i == 0) {
+ pi_arr[index].pi_off = inp_off;
+ pi_arr[index].pi_len = firstlen;
+ }
+
+ else {
+ pi_arr[index + i].pi_off = 0;
+ pi_arr[index + i].pi_len =
+ (U16) MINNUM(len, (U32) PI_PAGE_SIZE);
+ }
+
+ }
+ return index + i;
+}
+
+#endif /* __IOCHANNEL_H__ */
diff --git a/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h b/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h
new file mode 100644
index 000000000000..99dbbcf3d11e
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h
@@ -0,0 +1,135 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VBUSCHANNEL_H__
+#define __VBUSCHANNEL_H__
+
+/* The vbus channel is the channel area provided via the BUS_CREATE controlvm
+ * message for each virtual bus. This channel area is provided to both server
+ * and client ends of the bus. The channel header area is initialized by
+ * the server, and the remaining information is filled in by the client.
+ * We currently use this for the client to provide various information about
+ * the client devices and client drivers for the server end to see.
+ */
+#include "commontypes.h"
+#include "vbusdeviceinfo.h"
+#include "channel.h"
+
+/* {193b331b-c58f-11da-95a9-00e08161165f} */
+#define ULTRA_VBUS_CHANNEL_PROTOCOL_GUID \
+ {0x193b331b, 0xc58f, 0x11da, \
+ {0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f} }
+static const GUID UltraVbusChannelProtocolGuid =
+ ULTRA_VBUS_CHANNEL_PROTOCOL_GUID;
+
+#define ULTRA_VBUS_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+
+/* Must increment this whenever you insert or delete fields within this channel
+* struct. Also increment whenever you change the meaning of fields within this
+* channel struct so as to break pre-existing software. Note that you can
+* usually add fields to the END of the channel struct withOUT needing to
+* increment this. */
+#define ULTRA_VBUS_CHANNEL_PROTOCOL_VERSIONID 1
+
+#define ULTRA_VBUS_CHANNEL_OK_CLIENT(pChannel, logCtx) \
+ (ULTRA_check_channel_client(pChannel, \
+ UltraVbusChannelProtocolGuid, \
+ "vbus", \
+ sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL), \
+ ULTRA_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VBUS_CHANNEL_PROTOCOL_SIGNATURE, \
+ __FILE__, __LINE__, logCtx))
+
+#define ULTRA_VBUS_CHANNEL_OK_SERVER(actualBytes, logCtx) \
+ (ULTRA_check_channel_server(UltraVbusChannelProtocolGuid, \
+ "vbus", \
+ sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL), \
+ actualBytes, \
+ __FILE__, __LINE__, logCtx))
+
+
+#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
+typedef struct _ULTRA_VBUS_HEADERINFO {
+ U32 structBytes; /* size of this struct in bytes */
+ U32 deviceInfoStructBytes; /* sizeof(ULTRA_VBUS_DEVICEINFO) */
+ U32 devInfoCount; /* num of items in DevInfo member */
+ /* (this is the allocated size) */
+ U32 chpInfoByteOffset; /* byte offset from beginning of this struct */
+ /* to the the ChpInfo struct (below) */
+ U32 busInfoByteOffset; /* byte offset from beginning of this struct */
+ /* to the the BusInfo struct (below) */
+ U32 devInfoByteOffset; /* byte offset from beginning of this struct */
+ /* to the the DevInfo array (below) */
+ U8 reserved[104];
+} ULTRA_VBUS_HEADERINFO;
+
+typedef struct _ULTRA_VBUS_CHANNEL_PROTOCOL {
+ ULTRA_CHANNEL_PROTOCOL ChannelHeader; /* initialized by server */
+ ULTRA_VBUS_HEADERINFO HdrInfo; /* initialized by server */
+ /* the remainder of this channel is filled in by the client */
+ ULTRA_VBUS_DEVICEINFO ChpInfo; /* describes client chipset device and
+ * driver */
+ ULTRA_VBUS_DEVICEINFO BusInfo; /* describes client bus device and
+ * driver */
+ ULTRA_VBUS_DEVICEINFO DevInfo[0]; /* describes client device and
+ * driver for */
+ /* each device on the bus */
+} ULTRA_VBUS_CHANNEL_PROTOCOL;
+
+#define VBUS_CH_SIZE_EXACT(MAXDEVICES) \
+ (sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL) + ((MAXDEVICES) * \
+ sizeof(ULTRA_VBUS_DEVICEINFO)))
+#define VBUS_CH_SIZE(MAXDEVICES) COVER(VBUS_CH_SIZE_EXACT(MAXDEVICES), 4096)
+
+static INLINE void
+ULTRA_VBUS_init_channel(ULTRA_VBUS_CHANNEL_PROTOCOL __iomem *x,
+ int bytesAllocated)
+{
+ /* Please note that the memory at <x> does NOT necessarily have space
+ * for DevInfo structs allocated at the end, which is why we do NOT use
+ * <bytesAllocated> to clear. */
+ memset_io(x, 0, sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL));
+ if (bytesAllocated < (int) sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL))
+ return;
+ writel(ULTRA_VBUS_CHANNEL_PROTOCOL_VERSIONID,
+ &x->ChannelHeader.VersionId);
+ writeq(ULTRA_VBUS_CHANNEL_PROTOCOL_SIGNATURE,
+ &x->ChannelHeader.Signature);
+ writel(CHANNELSRV_READY, &x->ChannelHeader.SrvState);
+ writel(sizeof(x->ChannelHeader), &x->ChannelHeader.HeaderSize);
+ writeq(bytesAllocated, &x->ChannelHeader.Size);
+ memcpy_toio(&x->ChannelHeader.Type, &UltraVbusChannelProtocolGuid,
+ sizeof(x->ChannelHeader.Type));
+ memcpy_toio(&x->ChannelHeader.ZoneGuid, &Guid0,
+ sizeof(x->ChannelHeader.ZoneGuid));
+ writel(sizeof(ULTRA_VBUS_HEADERINFO), &x->HdrInfo.structBytes);
+ writel(sizeof(ULTRA_VBUS_HEADERINFO), &x->HdrInfo.chpInfoByteOffset);
+ writel(readl(&x->HdrInfo.chpInfoByteOffset) +
+ sizeof(ULTRA_VBUS_DEVICEINFO),
+ &x->HdrInfo.busInfoByteOffset);
+ writel(readl(&x->HdrInfo.busInfoByteOffset)
+ + sizeof(ULTRA_VBUS_DEVICEINFO),
+ &x->HdrInfo.devInfoByteOffset);
+ writel(sizeof(ULTRA_VBUS_DEVICEINFO),
+ &x->HdrInfo.deviceInfoStructBytes);
+ bytesAllocated -= (sizeof(ULTRA_CHANNEL_PROTOCOL)
+ + readl(&x->HdrInfo.devInfoByteOffset));
+ writel(bytesAllocated / readl(&x->HdrInfo.deviceInfoStructBytes),
+ &x->HdrInfo.devInfoCount);
+}
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h b/drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h
new file mode 100644
index 000000000000..de30d321d982
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h
@@ -0,0 +1,92 @@
+/* controlvmcompletionstatus.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* Defines for all valid values returned in the response message header
+ * completionStatus field. See controlvmchannel.h for description of
+ * the header: _CONTROLVM_MESSAGE_HEADER.
+ */
+
+#ifndef __CONTROLVMCOMPLETIONSTATUS_H__
+#define __CONTROLVMCOMPLETIONSTATUS_H__
+
+/* General Errors------------------------------------------------------[0-99] */
+#define CONTROLVM_RESP_SUCCESS 0
+#define CONTROLVM_RESP_ERROR_ALREADY_DONE 1
+#define CONTROLVM_RESP_ERROR_IOREMAP_FAILED 2
+#define CONTROLVM_RESP_ERROR_KMALLOC_FAILED 3
+#define CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN 4
+#define CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT 5
+
+/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */
+#define CONTROLVM_RESP_ERROR_CLIENT_SWITCHCOUNT_NONZERO 100
+#define CONTROLVM_RESP_ERROR_EXPECTED_CHIPSET_INIT 101
+
+/* Maximum Limit----------------------------------------------------[200-299] */
+#define CONTROLVM_RESP_ERROR_MAX_BUSES 201 /* BUS_CREATE */
+#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202 /* DEVICE_CREATE */
+/* Payload and Parameter Related------------------------------------[400-499] */
+#define CONTROLVM_RESP_ERROR_PAYLOAD_INVALID 400 /* SWITCH_ATTACHEXTPORT,
+ * DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_ERROR_INITIATOR_PARAMETER_INVALID 401 /* Multiple */
+#define CONTROLVM_RESP_ERROR_TARGET_PARAMETER_INVALID 402 /* DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_ERROR_CLIENT_PARAMETER_INVALID 403 /* DEVICE_CONFIGURE */
+/* Specified[Packet Structure] Value-------------------------------[500-599] */
+#define CONTROLVM_RESP_ERROR_BUS_INVALID 500 /* SWITCH_ATTACHINTPORT,
+ * BUS_CONFIGURE,
+ * DEVICE_CREATE,
+ * DEVICE_CONFIG
+ * DEVICE_DESTROY */
+#define CONTROLVM_RESP_ERROR_DEVICE_INVALID 501 /* SWITCH_ATTACHINTPORT */
+ /* DEVICE_CREATE,
+ * DEVICE_CONFIGURE,
+ * DEVICE_DESTROY */
+#define CONTROLVM_RESP_ERROR_CHANNEL_INVALID 502 /* DEVICE_CREATE,
+ * DEVICE_CONFIGURE */
+/* Partition Driver Callback Interface----------------------[600-699] */
+#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE 604 /* BUS_CREATE,
+ * BUS_DESTROY,
+ * DEVICE_CREATE,
+ * DEVICE_DESTROY */
+/* Unable to invoke VIRTPCI callback */
+#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR 605 /* BUS_CREATE,
+ * BUS_DESTROY,
+ * DEVICE_CREATE,
+ * DEVICE_DESTROY */
+/* VIRTPCI Callback returned error */
+#define CONTROLVM_RESP_ERROR_GENERIC_DRIVER_CALLBACK_ERROR 606 /* SWITCH_ATTACHEXTPORT,
+ * SWITCH_DETACHEXTPORT
+ * DEVICE_CONFIGURE */
+
+/* generic device callback returned error */
+/* Bus Related------------------------------------------------------[700-799] */
+#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700 /* BUS_DESTROY */
+/* Channel Related--------------------------------------------------[800-899] */
+#define CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN 800 /* GET_CHANNELINFO,
+ * DEVICE_DESTROY */
+#define CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL 801 /* DEVICE_CREATE */
+/* Chipset Shutdown Related---------------------------------------[1000-1099] */
+#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_FAILED 1000
+#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
+
+/* Chipset Stop Related-------------------------------------------[1100-1199] */
+#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_BUS 1100
+#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_SWITCH 1101
+
+/* Device Related-------------------------------------------------[1400-1499] */
+#define CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT 1400
+
+#endif /* __CONTROLVMCOMPLETIONSTATUS_H__ not defined */
diff --git a/drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h b/drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h
new file mode 100644
index 000000000000..4c6294d20606
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h
@@ -0,0 +1,310 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* Please note that this file is to be used ONLY for defining diagnostic
+ * subsystem values for the appos (sPAR Linux service partitions) component.
+ */
+#ifndef __APPOS_SUBSYSTEMS_H__
+#define __APPOS_SUBSYSTEMS_H__
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/string.h>
+#else
+#include <stdio.h>
+#include <string.h>
+#endif
+
+static inline char *
+subsys_unknown_to_s(int subsys, char *s, int n)
+{
+ snprintf(s, n, "SUBSYS-%-2.2d", subsys);
+ s[n - 1] = '\0';
+ return s;
+}
+
+#define SUBSYS_TO_MASK(subsys) (1ULL << (subsys))
+
+/* The first SUBSYS_APPOS_MAX subsystems are the same for each AppOS type
+ * (IOVM, SMS, etc.) The rest have unique values for each AppOS type.
+ */
+#define SUBSYS_APPOS_MAX 16
+
+#define SUBSYS_APPOS_DEFAULT 1 /* or "other" */
+#define SUBSYS_APPOS_CHIPSET 2 /* controlvm and other */
+ /* low-level sPAR activity */
+#define SUBSYS_APPOS_BUS 3 /* sPAR bus */
+/* DAK #define SUBSYS_APPOS_DIAG 4 // diagnostics and dump */
+#define SUBSYS_APPOS_CHANNELACCESS 5 /* generic channel access */
+#define SUBSYS_APPOS_NICCLIENT 6 /* virtual NIC client */
+#define SUBSYS_APPOS_HBACLIENT 7 /* virtual HBA client */
+#define SUBSYS_APPOS_CONSOLESERIAL 8 /* sPAR virtual serial console */
+#define SUBSYS_APPOS_UISLIB 9 /* */
+#define SUBSYS_APPOS_VRTCUPDD 10 /* */
+#define SUBSYS_APPOS_WATCHDOG 11 /* watchdog timer and healthcheck */
+#define SUBSYS_APPOS_13 13 /* available */
+#define SUBSYS_APPOS_14 14 /* available */
+#define SUBSYS_APPOS_15 15 /* available */
+#define SUBSYS_APPOS_16 16 /* available */
+static inline char *
+subsys_generic_to_s(int subsys, char *s, int n)
+{
+ switch (subsys) {
+ case SUBSYS_APPOS_DEFAULT:
+ strncpy(s, "APPOS_DEFAULT", n);
+ break;
+ case SUBSYS_APPOS_CHIPSET:
+ strncpy(s, "APPOS_CHIPSET", n);
+ break;
+ case SUBSYS_APPOS_BUS:
+ strncpy(s, "APPOS_BUS", n);
+ break;
+ case SUBSYS_APPOS_CHANNELACCESS:
+ strncpy(s, "APPOS_CHANNELACCESS", n);
+ break;
+ case SUBSYS_APPOS_NICCLIENT:
+ strncpy(s, "APPOS_NICCLIENT", n);
+ break;
+ case SUBSYS_APPOS_HBACLIENT:
+ strncpy(s, "APPOS_HBACLIENT", n);
+ break;
+ case SUBSYS_APPOS_CONSOLESERIAL:
+ strncpy(s, "APPOS_CONSOLESERIAL", n);
+ break;
+ case SUBSYS_APPOS_UISLIB:
+ strncpy(s, "APPOS_UISLIB", n);
+ break;
+ case SUBSYS_APPOS_VRTCUPDD:
+ strncpy(s, "APPOS_VRTCUPDD", n);
+ break;
+ case SUBSYS_APPOS_WATCHDOG:
+ strncpy(s, "APPOS_WATCHDOG", n);
+ break;
+ case SUBSYS_APPOS_13:
+ strncpy(s, "APPOS_13", n);
+ break;
+ case SUBSYS_APPOS_14:
+ strncpy(s, "APPOS_14", n);
+ break;
+ case SUBSYS_APPOS_15:
+ strncpy(s, "APPOS_15", n);
+ break;
+ case SUBSYS_APPOS_16:
+ strncpy(s, "APPOS_16", n);
+ break;
+ default:
+ subsys_unknown_to_s(subsys, s, n);
+ break;
+ }
+ s[n - 1] = '\0';
+ return s;
+}
+
+/* CONSOLE */
+
+#define SUBSYS_CONSOLE_VIDEO (SUBSYS_APPOS_MAX + 1) /* 17 */
+#define SUBSYS_CONSOLE_KBDMOU (SUBSYS_APPOS_MAX + 2) /* 18 */
+#define SUBSYS_CONSOLE_04 (SUBSYS_APPOS_MAX + 4)
+#define SUBSYS_CONSOLE_05 (SUBSYS_APPOS_MAX + 5)
+#define SUBSYS_CONSOLE_06 (SUBSYS_APPOS_MAX + 6)
+#define SUBSYS_CONSOLE_07 (SUBSYS_APPOS_MAX + 7)
+#define SUBSYS_CONSOLE_08 (SUBSYS_APPOS_MAX + 8)
+#define SUBSYS_CONSOLE_09 (SUBSYS_APPOS_MAX + 9)
+#define SUBSYS_CONSOLE_10 (SUBSYS_APPOS_MAX + 10)
+#define SUBSYS_CONSOLE_11 (SUBSYS_APPOS_MAX + 11)
+#define SUBSYS_CONSOLE_12 (SUBSYS_APPOS_MAX + 12)
+#define SUBSYS_CONSOLE_13 (SUBSYS_APPOS_MAX + 13)
+#define SUBSYS_CONSOLE_14 (SUBSYS_APPOS_MAX + 14)
+#define SUBSYS_CONSOLE_15 (SUBSYS_APPOS_MAX + 15)
+#define SUBSYS_CONSOLE_16 (SUBSYS_APPOS_MAX + 16)
+#define SUBSYS_CONSOLE_17 (SUBSYS_APPOS_MAX + 17)
+#define SUBSYS_CONSOLE_18 (SUBSYS_APPOS_MAX + 18)
+#define SUBSYS_CONSOLE_19 (SUBSYS_APPOS_MAX + 19)
+#define SUBSYS_CONSOLE_20 (SUBSYS_APPOS_MAX + 20)
+#define SUBSYS_CONSOLE_21 (SUBSYS_APPOS_MAX + 21)
+#define SUBSYS_CONSOLE_22 (SUBSYS_APPOS_MAX + 22)
+#define SUBSYS_CONSOLE_23 (SUBSYS_APPOS_MAX + 23)
+#define SUBSYS_CONSOLE_24 (SUBSYS_APPOS_MAX + 24)
+#define SUBSYS_CONSOLE_25 (SUBSYS_APPOS_MAX + 25)
+#define SUBSYS_CONSOLE_26 (SUBSYS_APPOS_MAX + 26)
+#define SUBSYS_CONSOLE_27 (SUBSYS_APPOS_MAX + 27)
+#define SUBSYS_CONSOLE_28 (SUBSYS_APPOS_MAX + 28)
+#define SUBSYS_CONSOLE_29 (SUBSYS_APPOS_MAX + 29)
+#define SUBSYS_CONSOLE_30 (SUBSYS_APPOS_MAX + 30)
+#define SUBSYS_CONSOLE_31 (SUBSYS_APPOS_MAX + 31)
+#define SUBSYS_CONSOLE_32 (SUBSYS_APPOS_MAX + 32)
+#define SUBSYS_CONSOLE_33 (SUBSYS_APPOS_MAX + 33)
+#define SUBSYS_CONSOLE_34 (SUBSYS_APPOS_MAX + 34)
+#define SUBSYS_CONSOLE_35 (SUBSYS_APPOS_MAX + 35)
+#define SUBSYS_CONSOLE_36 (SUBSYS_APPOS_MAX + 36)
+#define SUBSYS_CONSOLE_37 (SUBSYS_APPOS_MAX + 37)
+#define SUBSYS_CONSOLE_38 (SUBSYS_APPOS_MAX + 38)
+#define SUBSYS_CONSOLE_39 (SUBSYS_APPOS_MAX + 39)
+#define SUBSYS_CONSOLE_40 (SUBSYS_APPOS_MAX + 40)
+#define SUBSYS_CONSOLE_41 (SUBSYS_APPOS_MAX + 41)
+#define SUBSYS_CONSOLE_42 (SUBSYS_APPOS_MAX + 42)
+#define SUBSYS_CONSOLE_43 (SUBSYS_APPOS_MAX + 43)
+#define SUBSYS_CONSOLE_44 (SUBSYS_APPOS_MAX + 44)
+#define SUBSYS_CONSOLE_45 (SUBSYS_APPOS_MAX + 45)
+#define SUBSYS_CONSOLE_46 (SUBSYS_APPOS_MAX + 46)
+
+static inline char *
+subsys_console_to_s(int subsys, char *s, int n)
+{
+ switch (subsys) {
+ case SUBSYS_CONSOLE_VIDEO:
+ strncpy(s, "CONSOLE_VIDEO", n);
+ break;
+ case SUBSYS_CONSOLE_KBDMOU:
+ strncpy(s, "CONSOLE_KBDMOU", n);
+ break;
+ case SUBSYS_CONSOLE_04:
+ strncpy(s, "CONSOLE_04", n);
+ break;
+ case SUBSYS_CONSOLE_05:
+ strncpy(s, "CONSOLE_05", n);
+ break;
+ case SUBSYS_CONSOLE_06:
+ strncpy(s, "CONSOLE_06", n);
+ break;
+ case SUBSYS_CONSOLE_07:
+ strncpy(s, "CONSOLE_07", n);
+ break;
+ case SUBSYS_CONSOLE_08:
+ strncpy(s, "CONSOLE_08", n);
+ break;
+ case SUBSYS_CONSOLE_09:
+ strncpy(s, "CONSOLE_09", n);
+ break;
+ case SUBSYS_CONSOLE_10:
+ strncpy(s, "CONSOLE_10", n);
+ break;
+ case SUBSYS_CONSOLE_11:
+ strncpy(s, "CONSOLE_11", n);
+ break;
+ case SUBSYS_CONSOLE_12:
+ strncpy(s, "CONSOLE_12", n);
+ break;
+ case SUBSYS_CONSOLE_13:
+ strncpy(s, "CONSOLE_13", n);
+ break;
+ case SUBSYS_CONSOLE_14:
+ strncpy(s, "CONSOLE_14", n);
+ break;
+ case SUBSYS_CONSOLE_15:
+ strncpy(s, "CONSOLE_15", n);
+ break;
+ case SUBSYS_CONSOLE_16:
+ strncpy(s, "CONSOLE_16", n);
+ break;
+ case SUBSYS_CONSOLE_17:
+ strncpy(s, "CONSOLE_17", n);
+ break;
+ case SUBSYS_CONSOLE_18:
+ strncpy(s, "CONSOLE_18", n);
+ break;
+ case SUBSYS_CONSOLE_19:
+ strncpy(s, "CONSOLE_19", n);
+ break;
+ case SUBSYS_CONSOLE_20:
+ strncpy(s, "CONSOLE_20", n);
+ break;
+ case SUBSYS_CONSOLE_21:
+ strncpy(s, "CONSOLE_21", n);
+ break;
+ case SUBSYS_CONSOLE_22:
+ strncpy(s, "CONSOLE_22", n);
+ break;
+ case SUBSYS_CONSOLE_23:
+ strncpy(s, "CONSOLE_23", n);
+ break;
+ case SUBSYS_CONSOLE_24:
+ strncpy(s, "CONSOLE_24", n);
+ break;
+ case SUBSYS_CONSOLE_25:
+ strncpy(s, "CONSOLE_25", n);
+ break;
+ case SUBSYS_CONSOLE_26:
+ strncpy(s, "CONSOLE_26", n);
+ break;
+ case SUBSYS_CONSOLE_27:
+ strncpy(s, "CONSOLE_27", n);
+ break;
+ case SUBSYS_CONSOLE_28:
+ strncpy(s, "CONSOLE_28", n);
+ break;
+ case SUBSYS_CONSOLE_29:
+ strncpy(s, "CONSOLE_29", n);
+ break;
+ case SUBSYS_CONSOLE_30:
+ strncpy(s, "CONSOLE_30", n);
+ break;
+ case SUBSYS_CONSOLE_31:
+ strncpy(s, "CONSOLE_31", n);
+ break;
+ case SUBSYS_CONSOLE_32:
+ strncpy(s, "CONSOLE_32", n);
+ break;
+ case SUBSYS_CONSOLE_33:
+ strncpy(s, "CONSOLE_33", n);
+ break;
+ case SUBSYS_CONSOLE_34:
+ strncpy(s, "CONSOLE_34", n);
+ break;
+ case SUBSYS_CONSOLE_35:
+ strncpy(s, "CONSOLE_35", n);
+ break;
+ case SUBSYS_CONSOLE_36:
+ strncpy(s, "CONSOLE_36", n);
+ break;
+ case SUBSYS_CONSOLE_37:
+ strncpy(s, "CONSOLE_37", n);
+ break;
+ case SUBSYS_CONSOLE_38:
+ strncpy(s, "CONSOLE_38", n);
+ break;
+ case SUBSYS_CONSOLE_39:
+ strncpy(s, "CONSOLE_39", n);
+ break;
+ case SUBSYS_CONSOLE_40:
+ strncpy(s, "CONSOLE_40", n);
+ break;
+ case SUBSYS_CONSOLE_41:
+ strncpy(s, "CONSOLE_41", n);
+ break;
+ case SUBSYS_CONSOLE_42:
+ strncpy(s, "CONSOLE_42", n);
+ break;
+ case SUBSYS_CONSOLE_43:
+ strncpy(s, "CONSOLE_43", n);
+ break;
+ case SUBSYS_CONSOLE_44:
+ strncpy(s, "CONSOLE_44", n);
+ break;
+ case SUBSYS_CONSOLE_45:
+ strncpy(s, "CONSOLE_45", n);
+ break;
+ case SUBSYS_CONSOLE_46:
+ strncpy(s, "CONSOLE_46", n);
+ break;
+ default:
+ subsys_unknown_to_s(subsys, s, n);
+ break;
+ }
+ s[n - 1] = '\0';
+ return s;
+}
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h b/drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h
new file mode 100644
index 000000000000..7304e9a0648c
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h
@@ -0,0 +1,53 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* Linux GCC Version (32-bit and 64-bit) */
+static inline unsigned long
+__unisys_vmcall_gnuc(unsigned long tuple, unsigned long reg_ebx,
+ unsigned long reg_ecx)
+{
+ unsigned long result = 0;
+
+ unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
+ cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
+ if (cpuid_ecx & 0x80000000) {
+ __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx)
+ );
+ } else {
+ result = -1;
+ }
+ return result;
+}
+
+static inline unsigned long
+__unisys_extended_vmcall_gnuc(unsigned long long tuple,
+ unsigned long long reg_ebx,
+ unsigned long long reg_ecx,
+ unsigned long long reg_edx)
+{
+ unsigned long result = 0;
+
+ unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
+ cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
+ if (cpuid_ecx & 0x80000000) {
+ __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx),
+ "d"(reg_edx));
+ } else {
+ result = -1;
+ }
+ return result;
+ }
diff --git a/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h b/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h
new file mode 100644
index 000000000000..ae708faaa94d
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h
@@ -0,0 +1,209 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VBUSDEVICEINFO_H__
+#define __VBUSDEVICEINFO_H__
+
+#include "commontypes.h"
+
+#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
+
+/* An array of this struct is present in the channel area for each vbus.
+ * (See vbuschannel.h.)
+ * It is filled in by the client side to provide info about the device
+ * and driver from the client's perspective.
+ */
+typedef struct _ULTRA_VBUS_DEVICEINFO {
+ U8 devType[16]; /* short string identifying the device type */
+ U8 drvName[16]; /* driver .sys file name */
+ U8 infoStrings[96]; /* sequence of tab-delimited id strings: */
+ /* <DRIVER_REV> <DRIVER_VERTAG> <DRIVER_COMPILETIME> */
+ U8 reserved[128]; /* pad size to 256 bytes */
+} ULTRA_VBUS_DEVICEINFO;
+
+#pragma pack(pop)
+
+/* Reads chars from the buffer at <src> for <srcmax> bytes, and writes to
+ * the buffer at <p>, which is <remain> bytes long, ensuring never to
+ * overflow the buffer at <p>, using the following rules:
+ * - printable characters are simply copied from the buffer at <src> to the
+ * buffer at <p>
+ * - intervening streaks of non-printable characters in the buffer at <src>
+ * are replaced with a single space in the buffer at <p>
+ * Note that we pay no attention to '\0'-termination.
+ * Returns the number of bytes written to <p>.
+ *
+ * Pass <p> == NULL and <remain> == 0 for this special behavior. In this
+ * case, we simply return the number of bytes that WOULD HAVE been written
+ * to a buffer at <p>, had it been infinitely big.
+ */
+static inline int
+VBUSCHANNEL_sanitize_buffer(char *p, int remain, char __iomem *src, int srcmax)
+{
+ int chars = 0;
+ int nonprintable_streak = 0;
+ while (srcmax > 0) {
+ if ((readb(src) >= ' ') && (readb(src) < 0x7f)) {
+ if (nonprintable_streak) {
+ if (remain > 0) {
+ *p = ' ';
+ p++;
+ remain--;
+ chars++;
+ } else if (p == NULL)
+ chars++;
+ nonprintable_streak = 0;
+ }
+ if (remain > 0) {
+ *p = readb(src);
+ p++;
+ remain--;
+ chars++;
+ } else if (p == NULL)
+ chars++;
+ } else
+ nonprintable_streak = 1;
+ src++;
+ srcmax--;
+ }
+ return chars;
+}
+
+#define VBUSCHANNEL_ADDACHAR(ch, p, remain, chars) \
+ do { \
+ if (remain <= 0) \
+ break; \
+ *p = ch; \
+ p++; chars++; remain--; \
+ } while (0)
+
+/* Converts the non-negative value at <num> to an ascii decimal string
+ * at <p>, writing at most <remain> bytes. Note there is NO '\0' termination
+ * written to <p>.
+ *
+ * Returns the number of bytes written to <p>.
+ *
+ * Note that we create this function because we need to do this operation in
+ * an environment-independent way (since we are in a common header file).
+ */
+static inline int
+VBUSCHANNEL_itoa(char *p, int remain, int num)
+{
+ int digits = 0;
+ char s[32];
+ int i;
+
+ if (num == 0) {
+ /* '0' is a special case */
+ if (remain <= 0)
+ return 0;
+ *p = '0';
+ return 1;
+ }
+ /* form a backwards decimal ascii string in <s> */
+ while (num > 0) {
+ if (digits >= (int) sizeof(s))
+ return 0;
+ s[digits++] = (num % 10) + '0';
+ num = num / 10;
+ }
+ if (remain < digits) {
+ /* not enough room left at <p> to hold number, so fill with
+ * '?' */
+ for (i = 0; i < remain; i++, p++)
+ *p = '?';
+ return remain;
+ }
+ /* plug in the decimal ascii string representing the number, by */
+ /* reversing the string we just built in <s> */
+ i = digits;
+ while (i > 0) {
+ i--;
+ *p = s[i];
+ p++;
+ }
+ return digits;
+}
+
+/* Reads <devInfo>, and converts its contents to a printable string at <p>,
+ * writing at most <remain> bytes. Note there is NO '\0' termination
+ * written to <p>.
+ *
+ * Pass <devix> >= 0 if you want a device index presented.
+ *
+ * Returns the number of bytes written to <p>.
+ */
+static inline int
+VBUSCHANNEL_devInfoToStringBuffer(ULTRA_VBUS_DEVICEINFO __iomem *devInfo,
+ char *p, int remain, int devix)
+{
+ char __iomem *psrc;
+ int nsrc, x, i, pad;
+ int chars = 0;
+
+ psrc = &(devInfo->devType[0]);
+ nsrc = sizeof(devInfo->devType);
+ if (VBUSCHANNEL_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0)
+ return 0;
+
+ /* emit device index */
+ if (devix >= 0) {
+ VBUSCHANNEL_ADDACHAR('[', p, remain, chars);
+ x = VBUSCHANNEL_itoa(p, remain, devix);
+ p += x;
+ remain -= x;
+ chars += x;
+ VBUSCHANNEL_ADDACHAR(']', p, remain, chars);
+ } else {
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ }
+
+ /* emit device type */
+ x = VBUSCHANNEL_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ pad = 15 - x; /* pad device type to be exactly 15 chars */
+ for (i = 0; i < pad; i++)
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+
+ /* emit driver name */
+ psrc = &(devInfo->drvName[0]);
+ nsrc = sizeof(devInfo->drvName);
+ x = VBUSCHANNEL_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ pad = 15 - x; /* pad driver name to be exactly 15 chars */
+ for (i = 0; i < pad; i++)
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+ VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
+
+ /* emit strings */
+ psrc = &(devInfo->infoStrings[0]);
+ nsrc = sizeof(devInfo->infoStrings);
+ x = VBUSCHANNEL_sanitize_buffer(p, remain, psrc, nsrc);
+ p += x;
+ remain -= x;
+ chars += x;
+ VBUSCHANNEL_ADDACHAR('\n', p, remain, chars);
+
+ return chars;
+}
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/version.h b/drivers/staging/unisys/common-spar/include/version.h
new file mode 100644
index 000000000000..00b0ebb09eae
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/version.h
@@ -0,0 +1,46 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* version.h */
+
+/* Common version/release info needed by all components goes here.
+ * (This file must compile cleanly in all environments.)
+ * Ultimately, this will be combined with defines generated dynamically as
+ * part of the sysgen, and some of the defines below may in fact end up
+ * being replaced with dynamically generated ones.
+ */
+#ifndef __VERSION_H__
+#define __VERSION_H__
+
+#define SPARVER1 "1"
+#define SPARVER2 "0"
+#define SPARVER3 "0"
+#define SPARVER4 "0"
+
+#define VERSION SPARVER1 "." SPARVER2 "." SPARVER3 "." SPARVER4
+#define VERSIONDATE __DATE__
+
+/* Here are various version forms needed in Windows environments.
+ */
+#define VISOR_PRODUCTVERSION SPARVERCOMMA
+#define VISOR_PRODUCTVERSION_STR SPARVER1 "." SPARVER2 "." SPARVER3 "." \
+ SPARVER4
+#define VISOR_OBJECTVERSION_STR SPARVER1 "," SPARVER2 "," SPARVER3 "," \
+ SPARVER4
+
+#define COPYRIGHT "Unisys Corporation"
+#define COPYRIGHTDATE "2010 - 2013"
+
+#endif
diff --git a/drivers/staging/unisys/common-spar/include/vmcallinterface.h b/drivers/staging/unisys/common-spar/include/vmcallinterface.h
new file mode 100644
index 000000000000..14c404367fa7
--- /dev/null
+++ b/drivers/staging/unisys/common-spar/include/vmcallinterface.h
@@ -0,0 +1,167 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __IOMONINTF_H__
+#define __IOMONINTF_H__
+
+/*
+* This file contains all structures needed to support the VMCALLs for IO
+* Virtualization. The VMCALLs are provided by Monitor and used by IO code
+* running on IO Partitions.
+*/
+
+#ifdef __GNUC__
+#include "iovmcall_gnuc.h"
+#endif /* */
+#include "diagchannel.h"
+
+#ifdef VMCALL_IO_CONTROLVM_ADDR
+#undef VMCALL_IO_CONTROLVM_ADDR
+#endif /* */
+
+/* define subsystem number for AppOS, used in uislib driver */
+#define MDS_APPOS 0x4000000000000000L /* subsystem = 62 - AppOS */
+typedef enum { /* VMCALL identification tuples */
+ /* Note: when a new VMCALL is added:
+ * - the 1st 2 hex digits correspond to one of the
+ * VMCALL_MONITOR_INTERFACE types and
+ * - the next 2 hex digits are the nth relative instance of within a
+ * type
+ * E.G. for VMCALL_VIRTPART_RECYCLE_PART,
+ * - the 0x02 identifies it as a VMCALL_VIRTPART type and
+ * - the 0x01 identifies it as the 1st instance of a VMCALL_VIRTPART
+ * type of VMCALL
+ */
+
+ VMCALL_IO_CONTROLVM_ADDR = 0x0501, /* used by all Guests, not just
+ * IO */
+ VMCALL_IO_DIAG_ADDR = 0x0508,
+ VMCALL_IO_VISORSERIAL_ADDR = 0x0509,
+ VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET = 0x0708, /* Allow caller to
+ * query virtual time
+ * offset */
+ VMCALL_CHANNEL_VERSION_MISMATCH = 0x0709,
+ VMCALL_POST_CODE_LOGEVENT = 0x070B, /* LOGEVENT Post Code (RDX) with
+ * specified subsystem mask (RCX
+ * - monitor_subsystems.h) and
+ * severity (RDX) */
+ VMCALL_GENERIC_SURRENDER_QUANTUM_FOREVER = 0x0802, /* Yield the
+ * remainder & all
+ * future quantums of
+ * the caller */
+ VMCALL_MEASUREMENT_DO_NOTHING = 0x0901,
+ VMCALL_UPDATE_PHYSICAL_TIME = 0x0a02 /* Allow
+ * ULTRA_SERVICE_CAPABILITY_TIME
+ * capable guest to make
+ * VMCALL */
+} VMCALL_MONITOR_INTERFACE_METHOD_TUPLE;
+
+#define VMCALL_SUCCESS 0
+#define VMCALL_SUCCESSFUL(result) (result == 0)
+
+#ifdef __GNUC__
+#define unisys_vmcall(tuple, reg_ebx, reg_ecx) \
+ __unisys_vmcall_gnuc(tuple, reg_ebx, reg_ecx)
+#define unisys_extended_vmcall(tuple, reg_ebx, reg_ecx, reg_edx) \
+ __unisys_extended_vmcall_gnuc(tuple, reg_ebx, reg_ecx, reg_edx)
+#define ISSUE_IO_VMCALL(InterfaceMethod, param, result) \
+ (result = unisys_vmcall(InterfaceMethod, (param) & 0xFFFFFFFF, \
+ (param) >> 32))
+#define ISSUE_IO_EXTENDED_VMCALL(InterfaceMethod, param1, param2, \
+ param3, result) \
+ (result = unisys_extended_vmcall(InterfaceMethod, param1, \
+ param2, param3))
+
+ /* The following uses VMCALL_POST_CODE_LOGEVENT interface but is currently
+ * not used much */
+#define ISSUE_IO_VMCALL_POSTCODE_SEVERITY(postcode, severity) \
+do { \
+ U32 _tempresult = VMCALL_SUCCESS; \
+ ISSUE_IO_EXTENDED_VMCALL(VMCALL_POST_CODE_LOGEVENT, severity, \
+ MDS_APPOS, postcode, _tempresult); \
+} while (0)
+#endif
+
+/* Structures for IO VMCALLs */
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+#pragma pack(push, 1)
+struct phys_info {
+ U64 pi_pfn;
+ U16 pi_off;
+ U16 pi_len;
+};
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+typedef struct phys_info IO_DATA_STRUCTURE;
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+#pragma pack(push, 1)
+/* Parameters to VMCALL_IO_CONTROLVM_ADDR interface */
+typedef struct _VMCALL_IO_CONTROLVM_ADDR_PARAMS {
+ /* The Guest-relative physical address of the ControlVm channel.
+ * This VMCall fills this in with the appropriate address. */
+ U64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
+ /* the size of the ControlVm channel in bytes This VMCall fills this
+ * in with the appropriate address. */
+ U32 ChannelBytes; /* contents provided by this VMCALL (OUT) */
+ U8 Unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */
+} VMCALL_IO_CONTROLVM_ADDR_PARAMS;
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+#pragma pack(push, 1)
+/* Parameters to VMCALL_IO_DIAG_ADDR interface */
+typedef struct _VMCALL_IO_DIAG_ADDR_PARAMS {
+ /* The Guest-relative physical address of the diagnostic channel.
+ * This VMCall fills this in with the appropriate address. */
+ U64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
+} VMCALL_IO_DIAG_ADDR_PARAMS;
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+
+/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
+/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
+#pragma pack(push, 1)
+/* Parameters to VMCALL_IO_VISORSERIAL_ADDR interface */
+typedef struct _VMCALL_IO_VISORSERIAL_ADDR_PARAMS {
+ /* The Guest-relative physical address of the serial console
+ * channel. This VMCall fills this in with the appropriate
+ * address. */
+ U64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
+} VMCALL_IO_VISORSERIAL_ADDR_PARAMS;
+
+#pragma pack(pop)
+/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
+
+/* Parameters to VMCALL_CHANNEL_MISMATCH interface */
+typedef struct _VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS {
+ U8 ChannelName[32]; /* Null terminated string giving name of channel
+ * (IN) */
+ U8 ItemName[32]; /* Null terminated string giving name of
+ * mismatched item (IN) */
+ U32 SourceLineNumber; /* line# where invoked. (IN) */
+ U8 SourceFileName[36]; /* source code where invoked - Null terminated
+ * string (IN) */
+} VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS;
+
+#endif /* __IOMONINTF_H__ */
diff --git a/drivers/staging/unisys/include/commontypes.h b/drivers/staging/unisys/include/commontypes.h
new file mode 100644
index 000000000000..ef12af4a72db
--- /dev/null
+++ b/drivers/staging/unisys/include/commontypes.h
@@ -0,0 +1,170 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef _COMMONTYPES_H_
+#define _COMMONTYPES_H_
+
+/* define the following to prevent include nesting in kernel header files of
+ * similar abreviated content */
+#define _SUPERVISOR_COMMONTYPES_H_
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#else
+#include <stdint.h>
+#include <syslog.h>
+#endif
+
+#define U8 uint8_t
+#define U16 uint16_t
+#define U32 uint32_t
+#define U64 uint64_t
+#define S8 int8_t
+#define S16 int16_t
+#define S32 int32_t
+#define S64 int64_t
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_X86_32
+#define UINTN U32
+#else
+#define UINTN U64
+#endif
+
+#else
+
+#include <stdint.h>
+#if __WORDSIZE == 32
+#define UINTN U32
+#elif __WORDSIZE == 64
+#define UINTN U64
+#else
+#error Unsupported __WORDSIZE
+#endif
+
+#endif
+
+typedef struct {
+ U32 data1;
+ U16 data2;
+ U16 data3;
+ U8 data4[8];
+} __attribute__ ((__packed__)) GUID;
+
+#ifndef GUID0
+#define GUID0 {0, 0, 0, {0, 0, 0, 0, 0, 0, 0, 0} }
+#endif
+typedef U64 GUEST_PHYSICAL_ADDRESS;
+
+#define MEMSET(ptr, val, len) memset(ptr, val, len)
+#define MEMCMP(m1, m2, len) memcmp(m1, m2, len)
+#define MEMCMP_IO(m1, m2, len) memcmp((void __force *)m1, m2, len)
+#define STRLEN(s) ((UINTN)strlen((const char *)s))
+#define STRCPY(d, s) (strcpy((char *)d, (const char *)s))
+
+#define INLINE inline
+#define OFFSETOF offsetof
+
+#ifdef __KERNEL__
+#define MEMORYBARRIER mb()
+#define MEMCPY(dest, src, len) memcpy(dest, src, len)
+#define MEMCPY_TOIO(dest, src, len) memcpy_toio(dest, src, len)
+#define MEMCPY_FROMIO(dest, src, len) memcpy_fromio(dest, src, len)
+
+#define CHANNEL_GUID_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50], s2[50], s3[50]; \
+ pr_err("Channel mismatch on channel=%s(%s) field=%s expected=%s actual=%s @%s:%d\n", \
+ chName, GUID_format2(&chType, s1), field, \
+ GUID_format2(&expected, s2), GUID_format2(&actual, s3), \
+ fil, lin); \
+ } while (0)
+#define CHANNEL_U32_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50]; \
+ pr_err("Channel mismatch on channel=%s(%s) field=%s expected=0x%-8.8lx actual=0x%-8.8lx @%s:%d\n", \
+ chName, GUID_format2(&chType, s1), field, \
+ (unsigned long)expected, (unsigned long)actual, \
+ fil, lin); \
+ } while (0)
+
+#define CHANNEL_U64_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50]; \
+ pr_err("Channel mismatch on channel=%s(%s) field=%s expected=0x%-8.8Lx actual=0x%-8.8Lx @%s:%d\n", \
+ chName, GUID_format2(&chType, s1), field, \
+ (unsigned long long)expected, \
+ (unsigned long long)actual, \
+ fil, lin); \
+ } while (0)
+
+#define UltraLogEvent(logCtx, EventId, Severity, SubsystemMask, pFunctionName, \
+ LineNumber, Str, args...) \
+ pr_info(Str, ## args)
+
+#else
+#define MEMCPY(dest, src, len) memcpy(dest, src, len)
+
+#define MEMORYBARRIER mb()
+
+#define CHANNEL_GUID_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50], s2[50], s3[50]; \
+ syslog(LOG_USER | LOG_ERR, \
+ "Channel mismatch on channel=%s(%s) field=%s expected=%s actual=%s @%s:%d", \
+ chName, GUID_format2(&chType, s1), field, \
+ GUID_format2(&expected, s2), GUID_format2(&actual, s3), \
+ fil, lin); \
+ } while (0)
+
+#define CHANNEL_U32_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50]; \
+ syslog(LOG_USER | LOG_ERR, \
+ "Channel mismatch on channel=%s(%s) field=%s expected=0x%-8.8lx actual=0x%-8.8lx @%s:%d", \
+ chName, GUID_format2(&chType, s1), field, \
+ (unsigned long)expected, (unsigned long)actual, \
+ fil, lin); \
+ } while (0)
+
+#define CHANNEL_U64_MISMATCH(chType, chName, field, expected, actual, fil, \
+ lin, logCtx) \
+ do { \
+ char s1[50]; \
+ syslog(LOG_USER | LOG_ERR, \
+ "Channel mismatch on channel=%s(%s) field=%s expected=0x%-8.8Lx actual=0x%-8.8Lx @%s:%d", \
+ chName, GUID_format2(&chType, s1), field, \
+ (unsigned long long)expected, \
+ (unsigned long long)actual, \
+ fil, lin); \
+ } while (0)
+
+#define UltraLogEvent(logCtx, EventId, Severity, SubsystemMask, pFunctionName, \
+ LineNumber, Str, args...) \
+ syslog(LOG_USER | LOG_INFO, Str, ## args)
+#endif
+
+#define VolatileBarrier() MEMORYBARRIER
+
+#endif
+#include "guidutils.h"
diff --git a/drivers/staging/unisys/include/guestlinuxdebug.h b/drivers/staging/unisys/include/guestlinuxdebug.h
new file mode 100644
index 000000000000..c3de8496e5d6
--- /dev/null
+++ b/drivers/staging/unisys/include/guestlinuxdebug.h
@@ -0,0 +1,182 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __GUESTLINUXDEBUG_H__
+#define __GUESTLINUXDEBUG_H__
+
+/*
+* This file contains supporting interface for "vmcallinterface.h", particuarly
+* regarding adding additional structure and functionality to linux
+* ISSUE_IO_VMCALL_POSTCODE_SEVERITY */
+
+
+/******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/
+#include "vmcallinterface.h"
+typedef enum { /* POSTCODE driver identifier tuples */
+ /* visorchipset driver files */
+ VISOR_CHIPSET_PC = 0xA0,
+ VISOR_CHIPSET_PC_controlvm_c = 0xA1,
+ VISOR_CHIPSET_PC_controlvm_cm2 = 0xA2,
+ VISOR_CHIPSET_PC_controlvm_direct_c = 0xA3,
+ VISOR_CHIPSET_PC_file_c = 0xA4,
+ VISOR_CHIPSET_PC_parser_c = 0xA5,
+ VISOR_CHIPSET_PC_testing_c = 0xA6,
+ VISOR_CHIPSET_PC_visorchipset_main_c = 0xA7,
+ VISOR_CHIPSET_PC_visorswitchbus_c = 0xA8,
+ /* visorbus driver files */
+ VISOR_BUS_PC = 0xB0,
+ VISOR_BUS_PC_businst_attr_c = 0xB1,
+ VISOR_BUS_PC_channel_attr_c = 0xB2,
+ VISOR_BUS_PC_devmajorminor_attr_c = 0xB3,
+ VISOR_BUS_PC_visorbus_main_c = 0xB4,
+ /* visorclientbus driver files */
+ VISOR_CLIENT_BUS_PC = 0xC0,
+ VISOR_CLIENT_BUS_PC_visorclientbus_main_c = 0xC1,
+ /* virt hba driver files */
+ VIRT_HBA_PC = 0xC2,
+ VIRT_HBA_PC_virthba_c = 0xC3,
+ /* virtpci driver files */
+ VIRT_PCI_PC = 0xC4,
+ VIRT_PCI_PC_virtpci_c = 0xC5,
+ /* virtnic driver files */
+ VIRT_NIC_PC = 0xC6,
+ VIRT_NIC_P_virtnic_c = 0xC7,
+ /* uislib driver files */
+ UISLIB_PC = 0xD0,
+ UISLIB_PC_uislib_c = 0xD1,
+ UISLIB_PC_uisqueue_c = 0xD2,
+ UISLIB_PC_uisthread_c = 0xD3,
+ UISLIB_PC_uisutils_c = 0xD4,
+} DRIVER_PC;
+
+typedef enum { /* POSTCODE event identifier tuples */
+ ATTACH_PORT_ENTRY_PC = 0x001,
+ ATTACH_PORT_FAILURE_PC = 0x002,
+ ATTACH_PORT_SUCCESS_PC = 0x003,
+ BUS_FAILURE_PC = 0x004,
+ BUS_CREATE_ENTRY_PC = 0x005,
+ BUS_CREATE_FAILURE_PC = 0x006,
+ BUS_CREATE_EXIT_PC = 0x007,
+ BUS_CONFIGURE_ENTRY_PC = 0x008,
+ BUS_CONFIGURE_FAILURE_PC = 0x009,
+ BUS_CONFIGURE_EXIT_PC = 0x00A,
+ CHIPSET_INIT_ENTRY_PC = 0x00B,
+ CHIPSET_INIT_SUCCESS_PC = 0x00C,
+ CHIPSET_INIT_FAILURE_PC = 0x00D,
+ CHIPSET_INIT_EXIT_PC = 0x00E,
+ CREATE_WORKQUEUE_PC = 0x00F,
+ CREATE_WORKQUEUE_FAILED_PC = 0x0A0,
+ CONTROLVM_INIT_FAILURE_PC = 0x0A1,
+ DEVICE_CREATE_ENTRY_PC = 0x0A2,
+ DEVICE_CREATE_FAILURE_PC = 0x0A3,
+ DEVICE_CREATE_SUCCESS_PC = 0x0A4,
+ DEVICE_CREATE_EXIT_PC = 0x0A5,
+ DEVICE_ADD_PC = 0x0A6,
+ DEVICE_REGISTER_FAILURE_PC = 0x0A7,
+ DEVICE_CHANGESTATE_ENTRY_PC = 0x0A8,
+ DEVICE_CHANGESTATE_FAILURE_PC = 0x0A9,
+ DEVICE_CHANGESTATE_EXIT_PC = 0x0AA,
+ DRIVER_ENTRY_PC = 0x0AB,
+ DRIVER_EXIT_PC = 0x0AC,
+ MALLOC_FAILURE_PC = 0x0AD,
+ QUEUE_DELAYED_WORK_PC = 0x0AE,
+ UISLIB_THREAD_FAILURE_PC = 0x0B7,
+ VBUS_CHANNEL_ENTRY_PC = 0x0B8,
+ VBUS_CHANNEL_FAILURE_PC = 0x0B9,
+ VBUS_CHANNEL_EXIT_PC = 0x0BA,
+ VHBA_CREATE_ENTRY_PC = 0x0BB,
+ VHBA_CREATE_FAILURE_PC = 0x0BC,
+ VHBA_CREATE_EXIT_PC = 0x0BD,
+ VHBA_CREATE_SUCCESS_PC = 0x0BE,
+ VHBA_COMMAND_HANDLER_PC = 0x0BF,
+ VHBA_PROBE_ENTRY_PC = 0x0C0,
+ VHBA_PROBE_FAILURE_PC = 0x0C1,
+ VHBA_PROBE_EXIT_PC = 0x0C2,
+ VNIC_CREATE_ENTRY_PC = 0x0C3,
+ VNIC_CREATE_FAILURE_PC = 0x0C4,
+ VNIC_CREATE_SUCCESS_PC = 0x0C5,
+ VNIC_PROBE_ENTRY_PC = 0x0C6,
+ VNIC_PROBE_FAILURE_PC = 0x0C7,
+ VNIC_PROBE_EXIT_PC = 0x0C8,
+ VPCI_CREATE_ENTRY_PC = 0x0C9,
+ VPCI_CREATE_FAILURE_PC = 0x0CA,
+ VPCI_CREATE_EXIT_PC = 0x0CB,
+ VPCI_PROBE_ENTRY_PC = 0x0CC,
+ VPCI_PROBE_FAILURE_PC = 0x0CD,
+ VPCI_PROBE_EXIT_PC = 0x0CE,
+ CRASH_DEV_ENTRY_PC = 0x0CF,
+ CRASH_DEV_EXIT_PC = 0x0D0,
+ CRASH_DEV_HADDR_NULL = 0x0D1,
+ CRASH_DEV_CONTROLVM_NULL = 0x0D2,
+ CRASH_DEV_RD_BUS_FAIULRE_PC = 0x0D3,
+ CRASH_DEV_RD_DEV_FAIULRE_PC = 0x0D4,
+ CRASH_DEV_BUS_NULL_FAILURE_PC = 0x0D5,
+ CRASH_DEV_DEV_NULL_FAILURE_PC = 0x0D6,
+ CRASH_DEV_CTRL_RD_FAILURE_PC = 0x0D7,
+ CRASH_DEV_COUNT_FAILURE_PC = 0x0D8,
+ SAVE_MSG_BUS_FAILURE_PC = 0x0D9,
+ SAVE_MSG_DEV_FAILURE_PC = 0x0DA,
+ CALLHOME_INIT_FAILURE_PC = 0x0DB
+} EVENT_PC;
+
+#ifdef __GNUC__
+
+#define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR
+#define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING
+#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT /* TODO-> Info currently
+ * doesnt show, so we
+ * set info=warning */
+/* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR);
+ * Please also note that the resulting postcode is in hex, so if you are
+ * searching for the __LINE__ number, convert it first to decimal. The line
+ * number combined with driver and type of call, will allow you to track down
+ * exactly what line an error occured on, or where the last driver
+ * entered/exited from.
+ */
+
+/* BASE FUNCTIONS */
+#define POSTCODE_LINUX_A(DRIVER_PC, EVENT_PC, pc32bit, severity) \
+do { \
+ unsigned long long post_code_temp; \
+ post_code_temp = (((U64)DRIVER_PC) << 56) | (((U64)EVENT_PC) << 44) | \
+ ((((U64)__LINE__) & 0xFFF) << 32) | \
+ (((U64)pc32bit) & 0xFFFFFFFF); \
+ ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
+} while (0)
+
+#define POSTCODE_LINUX_B(DRIVER_PC, EVENT_PC, pc16bit1, pc16bit2, severity) \
+do { \
+ unsigned long long post_code_temp; \
+ post_code_temp = (((U64)DRIVER_PC) << 56) | (((U64)EVENT_PC) << 44) | \
+ ((((U64)__LINE__) & 0xFFF) << 32) | \
+ ((((U64)pc16bit1) & 0xFFFF) << 16) | \
+ (((U64)pc16bit2) & 0xFFFF); \
+ ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \
+} while (0)
+
+/* MOST COMMON */
+#define POSTCODE_LINUX_2(EVENT_PC, severity) \
+ POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, 0x0000, severity);
+
+#define POSTCODE_LINUX_3(EVENT_PC, pc32bit, severity) \
+ POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, pc32bit, severity);
+
+
+#define POSTCODE_LINUX_4(EVENT_PC, pc16bit1, pc16bit2, severity) \
+ POSTCODE_LINUX_B(CURRENT_FILE_PC, EVENT_PC, pc16bit1, \
+ pc16bit2, severity);
+
+#endif
+#endif
diff --git a/drivers/staging/unisys/include/guidutils.h b/drivers/staging/unisys/include/guidutils.h
new file mode 100644
index 000000000000..75caf929cd6d
--- /dev/null
+++ b/drivers/staging/unisys/include/guidutils.h
@@ -0,0 +1,203 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* guidutils.h
+ *
+ * These are GUID manipulation inlines that can be used from either
+ * kernel-mode or user-mode.
+ *
+ */
+#ifndef __GUIDUTILS_H__
+#define __GUIDUTILS_H__
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#define GUID_STRTOUL kstrtoul
+#else
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdlib.h>
+
+#define GUID_STRTOUL strtoul
+#endif
+
+static inline char *
+GUID_format1(const GUID *guid, char *s)
+{
+ sprintf(s, "{%-8.8lx-%-4.4x-%-4.4x-%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x}",
+ (ulong) guid->data1,
+ guid->data2,
+ guid->data3,
+ guid->data4[0],
+ guid->data4[1],
+ guid->data4[2],
+ guid->data4[3],
+ guid->data4[4], guid->data4[5], guid->data4[6], guid->data4[7]);
+ return s;
+}
+
+/** Format a GUID in Microsoft's 'what in the world were they thinking'
+ * format.
+ */
+static inline char *
+GUID_format2(const GUID *guid, char *s)
+{
+ sprintf(s, "{%-8.8lx-%-4.4x-%-4.4x-%-2.2x%-2.2x-%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x}",
+ (ulong) guid->data1,
+ guid->data2,
+ guid->data3,
+ guid->data4[0],
+ guid->data4[1],
+ guid->data4[2],
+ guid->data4[3],
+ guid->data4[4], guid->data4[5], guid->data4[6], guid->data4[7]);
+ return s;
+}
+
+/**
+ * Like GUID_format2 but without the curly braces and the
+ * hex digits in upper case
+ */
+static inline char *
+GUID_format3(const GUID *guid, char *s)
+{
+ sprintf(s, "%-8.8lX-%-4.4X-%-4.4X-%-2.2X%-2.2X-%-2.2X%-2.2X%-2.2X%-2.2X%-2.2X%-2.2X",
+ (ulong) guid->data1,
+ guid->data2,
+ guid->data3,
+ guid->data4[0],
+ guid->data4[1],
+ guid->data4[2],
+ guid->data4[3],
+ guid->data4[4], guid->data4[5], guid->data4[6], guid->data4[7]);
+ return s;
+}
+
+/** Parse a guid string in any of these forms:
+ * {11111111-2222-3333-4455-66778899aabb}
+ * {11111111-2222-3333-445566778899aabb}
+ * 11111111-2222-3333-4455-66778899aabb
+ * 11111111-2222-3333-445566778899aabb
+ */
+static inline GUID
+GUID_scan(U8 *p)
+{
+ GUID guid = GUID0;
+ U8 x[33];
+ int count = 0;
+ int c, i = 0;
+ U8 cdata1[9];
+ U8 cdata2[5];
+ U8 cdata3[5];
+ U8 cdata4[3];
+ int dashcount = 0;
+ int brace = 0;
+ unsigned long uldata;
+
+ if (!p)
+ return guid;
+ if (*p == '{') {
+ p++;
+ brace = 1;
+ }
+ while (count < 32) {
+ if (*p == '}')
+ return guid;
+ if (*p == '\0')
+ return guid;
+ c = toupper(*p);
+ p++;
+ if (c == '-') {
+ switch (dashcount) {
+ case 0:
+ if (i != 8)
+ return guid;
+ break;
+ case 1:
+ if (i != 4)
+ return guid;
+ break;
+ case 2:
+ if (i != 4)
+ return guid;
+ break;
+ case 3:
+ if (i != 4)
+ return guid;
+ break;
+ default:
+ return guid;
+ }
+ dashcount++;
+ i = 0;
+ continue;
+ }
+ if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F'))
+ i++;
+ else
+ return guid;
+ x[count++] = c;
+ }
+ x[count] = '\0';
+ if (brace) {
+ if (*p == '}')
+ p++;
+ else
+ return guid;
+ }
+ if (dashcount == 3 || dashcount == 4)
+ ;
+ else
+ return guid;
+ memset(cdata1, 0, sizeof(cdata1));
+ memset(cdata2, 0, sizeof(cdata2));
+ memset(cdata3, 0, sizeof(cdata3));
+ memset(cdata4, 0, sizeof(cdata4));
+ memcpy(cdata1, x + 0, 8);
+ memcpy(cdata2, x + 8, 4);
+ memcpy(cdata3, x + 12, 4);
+
+ if (GUID_STRTOUL((char *) cdata1, 16, &uldata) == 0)
+ guid.data1 = (U32)uldata;
+ if (GUID_STRTOUL((char *) cdata2, 16, &uldata) == 0)
+ guid.data2 = (U16)uldata;
+ if (GUID_STRTOUL((char *) cdata3, 16, &uldata) == 0)
+ guid.data3 = (U16)uldata;
+
+ for (i = 0; i < 8; i++) {
+ memcpy(cdata4, x + 16 + (i * 2), 2);
+ if (GUID_STRTOUL((char *) cdata4, 16, &uldata) == 0)
+ guid.data4[i] = (U8) uldata;
+ }
+
+ return guid;
+}
+
+static inline char *
+GUID_sanitize(char *inputGuidStr, char *outputGuidStr)
+{
+ GUID g;
+ GUID guid0 = GUID0;
+ *outputGuidStr = '\0';
+ g = GUID_scan((U8 *) inputGuidStr);
+ if (memcmp(&g, &guid0, sizeof(GUID)) == 0)
+ return outputGuidStr; /* bad GUID format */
+ return GUID_format1(&g, outputGuidStr);
+}
+
+#endif
diff --git a/drivers/staging/unisys/include/periodic_work.h b/drivers/staging/unisys/include/periodic_work.h
new file mode 100644
index 000000000000..6c7190bdcd66
--- /dev/null
+++ b/drivers/staging/unisys/include/periodic_work.h
@@ -0,0 +1,40 @@
+/* periodic_work.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __PERIODIC_WORK_H__
+#define __PERIODIC_WORK_H__
+
+#include "timskmod.h"
+
+
+
+/* PERIODIC_WORK an opaque structure to users.
+ * Fields are declared only in the implementation .c files.
+ */
+typedef struct PERIODIC_WORK_Tag PERIODIC_WORK;
+
+PERIODIC_WORK *visor_periodic_work_create(ulong jiffy_interval,
+ struct workqueue_struct *workqueue,
+ void (*workfunc)(void *),
+ void *workfuncarg,
+ const char *devnam);
+void visor_periodic_work_destroy(PERIODIC_WORK *periodic_work);
+BOOL visor_periodic_work_nextperiod(PERIODIC_WORK *periodic_work);
+BOOL visor_periodic_work_start(PERIODIC_WORK *periodic_work);
+BOOL visor_periodic_work_stop(PERIODIC_WORK *periodic_work);
+
+#endif
diff --git a/drivers/staging/unisys/include/procobjecttree.h b/drivers/staging/unisys/include/procobjecttree.h
new file mode 100644
index 000000000000..c81d11287e68
--- /dev/null
+++ b/drivers/staging/unisys/include/procobjecttree.h
@@ -0,0 +1,48 @@
+/* procobjecttree.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/** @file *********************************************************************
+ *
+ * This describes the interfaces necessary for creating a tree of types,
+ * objects, and properties in /proc.
+ *
+ ******************************************************************************
+ */
+
+#ifndef __PROCOBJECTTREE_H__
+#define __PROCOBJECTTREE_H__
+
+#include "uniklog.h"
+#include "timskmod.h"
+
+/* These are opaque structures to users.
+ * Fields are declared only in the implementation .c files.
+ */
+typedef struct MYPROCOBJECT_Tag MYPROCOBJECT;
+typedef struct MYPROCTYPE_Tag MYPROCTYPE;
+
+MYPROCOBJECT *visor_proc_CreateObject(MYPROCTYPE *type, const char *name,
+ void *context);
+void visor_proc_DestroyObject(MYPROCOBJECT *obj);
+MYPROCTYPE *visor_proc_CreateType(struct proc_dir_entry *procRootDir,
+ const char **name,
+ const char **propertyNames,
+ void (*show_property)(struct seq_file *,
+ void *, int));
+void visor_proc_DestroyType(MYPROCTYPE *type);
+
+#endif
diff --git a/drivers/staging/unisys/include/sparstop.h b/drivers/staging/unisys/include/sparstop.h
new file mode 100644
index 000000000000..3603ac607643
--- /dev/null
+++ b/drivers/staging/unisys/include/sparstop.h
@@ -0,0 +1,30 @@
+/* sparstop.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __SPARSTOP_H__
+#define __SPARSTOP_H__
+
+#include "timskmod.h"
+#include "version.h"
+#include <linux/ctype.h>
+
+typedef void (*SPARSTOP_COMPLETE_FUNC) (void *context, int status);
+
+int sp_stop(void *context, SPARSTOP_COMPLETE_FUNC get_complete_func);
+void test_remove_stop_device(void);
+
+#endif
diff --git a/drivers/staging/unisys/include/timskmod.h b/drivers/staging/unisys/include/timskmod.h
new file mode 100644
index 000000000000..5fd5ad514464
--- /dev/null
+++ b/drivers/staging/unisys/include/timskmod.h
@@ -0,0 +1,324 @@
+/* timskmod.h
+ *
+ * Copyright � 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __TIMSKMOD_H__
+#define __TIMSKMOD_H__
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+#include <linux/cdev.h>
+#include <linux/types.h>
+#include <asm/irq.h>
+#include <linux/io.h>
+#include <asm/dma.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+/* #define EXPORT_SYMTAB */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/fcntl.h>
+#include <linux/aio.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/seq_file.h>
+#include <linux/mm.h>
+
+/* #define DEBUG */
+#ifndef BOOL
+#define BOOL int
+#endif
+#define FALSE 0
+#define TRUE 1
+#if !defined SUCCESS
+#define SUCCESS 0
+#endif
+#define FAILURE (-1)
+#define DRIVERNAMEMAX 50
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define STRUCTSEQUAL(x, y) (memcmp(&x, &y, sizeof(x)) == 0)
+#ifndef HOSTADDRESS
+#define HOSTADDRESS unsigned long long
+#endif
+
+typedef long VMMIO; /**< Virtual MMIO address (returned from ioremap), which
+ * is a virtual address pointer to a memory-mapped region.
+ * These are declared as "long" instead of u32* to force you to
+ * use readb()/writeb()/memcpy_fromio()/etc to access them.
+ * (On x86 we could probably get away with treating them as
+ * pointers.)
+ */
+typedef long VMMIO8; /**< #VMMIO pointing to 8-bit data */
+typedef long VMMIO16;/**< #VMMIO pointing to 16-bit data */
+typedef long VMMIO32;/**< #VMMIO pointing to 32-bit data */
+
+#define LOCKSEM(sem) down_interruptible(sem)
+#define LOCKSEM_UNINTERRUPTIBLE(sem) down(sem)
+#define UNLOCKSEM(sem) up(sem)
+
+/** lock read/write semaphore for reading.
+ Note that all read/write semaphores are of the "uninterruptible" variety.
+ @param sem (rw_semaphore *) points to semaphore to lock
+ */
+#define LOCKREADSEM(sem) down_read(sem)
+
+/** unlock read/write semaphore for reading.
+ Note that all read/write semaphores are of the "uninterruptible" variety.
+ @param sem (rw_semaphore *) points to semaphore to unlock
+ */
+#define UNLOCKREADSEM(sem) up_read(sem)
+
+/** lock read/write semaphore for writing.
+ Note that all read/write semaphores are of the "uninterruptible" variety.
+ @param sem (rw_semaphore *) points to semaphore to lock
+ */
+#define LOCKWRITESEM(sem) down_write(sem)
+
+/** unlock read/write semaphore for writing.
+ Note that all read/write semaphores are of the "uninterruptible" variety.
+ @param sem (rw_semaphore *) points to semaphore to unlock
+ */
+#define UNLOCKWRITESEM(sem) up_write(sem)
+
+#ifdef ENABLE_RETURN_TRACE
+#define RETTRACE(x) \
+ do { \
+ if (1) { \
+ INFODRV("RET 0x%lx in %s", \
+ (ulong)(x), __func__); \
+ } \
+ } while (0)
+#else
+#define RETTRACE(x)
+#endif
+
+/** Try to evaulate the provided expression, and do a RETINT(x) iff
+ * the expression evaluates to < 0.
+ * @param x the expression to try
+ */
+#define ASSERT(cond) \
+ do { if (!(cond)) \
+ HUHDRV("ASSERT failed - %s", \
+ __stringify(cond)); \
+ } while (0)
+
+#define sizeofmember(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER))
+/** "Covered quotient" function */
+#define COVQ(v, d) (((v) + (d) - 1) / (d))
+#define SWAPPOINTERS(p1, p2) \
+ do { \
+ void *SWAPPOINTERS_TEMP = (void *)p1; \
+ (void *)(p1) = (void *)(p2); \
+ (void *)(p2) = SWAPPOINTERS_TEMP; \
+ } while (0)
+
+/**
+ * @addtogroup driverlogging
+ * @{
+ */
+
+#define PRINTKDRV(fmt, args...) LOGINF(fmt, ## args)
+#define TBDDRV(fmt, args...) LOGERR(fmt, ## args)
+#define HUHDRV(fmt, args...) LOGERR(fmt, ## args)
+#define ERRDRV(fmt, args...) LOGERR(fmt, ## args)
+#define WARNDRV(fmt, args...) LOGWRN(fmt, ## args)
+#define SECUREDRV(fmt, args...) LOGWRN(fmt, ## args)
+#define INFODRV(fmt, args...) LOGINF(fmt, ## args)
+#define DEBUGDRV(fmt, args...) DBGINF(fmt, ## args)
+
+#define PRINTKDEV(devname, fmt, args...) LOGINFDEV(devname, fmt, ## args)
+#define TBDDEV(devname, fmt, args...) LOGERRDEV(devname, fmt, ## args)
+#define HUHDEV(devname, fmt, args...) LOGERRDEV(devname, fmt, ## args)
+#define ERRDEV(devname, fmt, args...) LOGERRDEV(devname, fmt, ## args)
+#define ERRDEVX(devno, fmt, args...) LOGERRDEVX(devno, fmt, ## args)
+#define WARNDEV(devname, fmt, args...) LOGWRNDEV(devname, fmt, ## args)
+#define SECUREDEV(devname, fmt, args...) LOGWRNDEV(devname, fmt, ## args)
+#define INFODEV(devname, fmt, args...) LOGINFDEV(devname, fmt, ## args)
+#define INFODEVX(devno, fmt, args...) LOGINFDEVX(devno, fmt, ## args)
+#define DEBUGDEV(devname, fmt, args...) DBGINFDEV(devname, fmt, ## args)
+
+
+/* @} */
+
+/** Verifies the consistency of your PRIVATEDEVICEDATA structure using
+ * conventional "signature" fields:
+ * <p>
+ * - sig1 should contain the size of the structure
+ * - sig2 should contain a pointer to the beginning of the structure
+ */
+#define DDLOOKSVALID(dd) \
+ ((dd != NULL) && \
+ ((dd)->sig1 == sizeof(PRIVATEDEVICEDATA)) && \
+ ((dd)->sig2 == dd))
+
+/** Verifies the consistency of your PRIVATEFILEDATA structure using
+ * conventional "signature" fields:
+ * <p>
+ * - sig1 should contain the size of the structure
+ * - sig2 should contain a pointer to the beginning of the structure
+ */
+#define FDLOOKSVALID(fd) \
+ ((fd != NULL) && \
+ ((fd)->sig1 == sizeof(PRIVATEFILEDATA)) && \
+ ((fd)->sig2 == fd))
+
+/** Locks dd->lockDev if you havn't already locked it */
+#define LOCKDEV(dd) \
+ { \
+ if (!lockedDev) { \
+ spin_lock(&dd->lockDev); \
+ lockedDev = TRUE; \
+ } \
+ }
+
+/** Unlocks dd->lockDev if you previously locked it */
+#define UNLOCKDEV(dd) \
+ { \
+ if (lockedDev) { \
+ spin_unlock(&dd->lockDev); \
+ lockedDev = FALSE; \
+ } \
+ }
+
+/** Locks dd->lockDevISR if you havn't already locked it */
+#define LOCKDEVISR(dd) \
+ { \
+ if (!lockedDevISR) { \
+ spin_lock_irqsave(&dd->lockDevISR, flags); \
+ lockedDevISR = TRUE; \
+ } \
+ }
+
+/** Unlocks dd->lockDevISR if you previously locked it */
+#define UNLOCKDEVISR(dd) \
+ { \
+ if (lockedDevISR) { \
+ spin_unlock_irqrestore(&dd->lockDevISR, flags); \
+ lockedDevISR = FALSE; \
+ } \
+ }
+
+/** Locks LockGlobalISR if you havn't already locked it */
+#define LOCKGLOBALISR \
+ { \
+ if (!lockedGlobalISR) { \
+ spin_lock_irqsave(&LockGlobalISR, flags); \
+ lockedGlobalISR = TRUE; \
+ } \
+ }
+
+/** Unlocks LockGlobalISR if you previously locked it */
+#define UNLOCKGLOBALISR \
+ { \
+ if (lockedGlobalISR) { \
+ spin_unlock_irqrestore(&LockGlobalISR, flags); \
+ lockedGlobalISR = FALSE; \
+ } \
+ }
+
+/** Locks LockGlobal if you havn't already locked it */
+#define LOCKGLOBAL \
+ { \
+ if (!lockedGlobal) { \
+ spin_lock(&LockGlobal); \
+ lockedGlobal = TRUE; \
+ } \
+ }
+
+/** Unlocks LockGlobal if you previously locked it */
+#define UNLOCKGLOBAL \
+ { \
+ if (lockedGlobal) { \
+ spin_unlock(&LockGlobal); \
+ lockedGlobal = FALSE; \
+ } \
+ }
+
+/** Use this at the beginning of functions where you intend to
+ * use #LOCKDEV/#UNLOCKDEV, #LOCKDEVISR/#UNLOCKDEVISR,
+ * #LOCKGLOBAL/#UNLOCKGLOBAL, #LOCKGLOBALISR/#UNLOCKGLOBALISR.
+ *
+ * Note that __attribute__((unused)) is how you tell GNU C to suppress
+ * any warning messages about the variable being unused.
+ */
+#define LOCKPREAMBLE \
+ ulong flags __attribute__((unused)) = 0; \
+ BOOL lockedDev __attribute__((unused)) = FALSE; \
+ BOOL lockedDevISR __attribute__((unused)) = FALSE; \
+ BOOL lockedGlobal __attribute__((unused)) = FALSE; \
+ BOOL lockedGlobalISR __attribute__((unused)) = FALSE
+
+
+
+/** Sleep for an indicated number of seconds (for use in kernel mode).
+ * @param x the number of seconds to sleep.
+ */
+#define SLEEP(x) \
+ do { current->state = TASK_INTERRUPTIBLE; \
+ schedule_timeout((x)*HZ); \
+ } while (0)
+
+/** Sleep for an indicated number of jiffies (for use in kernel mode).
+ * @param x the number of jiffies to sleep.
+ */
+#define SLEEPJIFFIES(x) \
+ do { current->state = TASK_INTERRUPTIBLE; \
+ schedule_timeout(x); \
+ } while (0)
+
+#ifndef max
+#define max(a, b) (((a) > (b)) ? (a):(b))
+#endif
+
+static inline struct cdev *cdev_alloc_init(struct module *owner,
+ const struct file_operations *fops)
+{
+ struct cdev *cdev = NULL;
+ cdev = cdev_alloc();
+ if (!cdev)
+ return NULL;
+ cdev->ops = fops;
+ cdev->owner = owner;
+
+ /* Note that the memory allocated for cdev will be deallocated
+ * when the usage count drops to 0, because it is controlled
+ * by a kobject of type ktype_cdev_dynamic. (This
+ * deallocation could very well happen outside of our kernel
+ * module, like via the cdev_put in __fput() for example.)
+ */
+ return cdev;
+}
+
+#include "timskmodutils.h"
+
+#endif
diff --git a/drivers/staging/unisys/include/timskmodutils.h b/drivers/staging/unisys/include/timskmodutils.h
new file mode 100644
index 000000000000..2d81d46bf11e
--- /dev/null
+++ b/drivers/staging/unisys/include/timskmodutils.h
@@ -0,0 +1,75 @@
+/* timskmodutils.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __TIMSKMODUTILS_H__
+#define __TIMSKMODUTILS_H__
+
+#include "timskmod.h"
+
+void *kmalloc_kernel(size_t siz);
+void myprintk(const char *myDrvName, const char *devname,
+ const char *template, ...);
+
+/*--------------------------------*
+ *--- GENERAL MESSAGEQ STUFF ---*
+ *--------------------------------*/
+
+struct MessageQEntry;
+
+/** the data structure used to hold an arbitrary data item that you want
+ * to place on a #MESSAGEQ. Declare and initialize as follows:
+ *
+ * This structure should be considered opaque; the client using it should
+ * never access the fields directly.
+ * Refer to these functions for more info:
+ *
+ * @ingroup messageq
+ */
+typedef struct MessageQEntry {
+ void *data;
+ struct MessageQEntry *qNext;
+ struct MessageQEntry *qPrev;
+} MESSAGEQENTRY;
+
+/** the data structure used to hold a FIFO queue of #MESSAGEQENTRY<b></b>s.
+ * Declare and initialize as follows:
+ * @code
+ * MESSAGEQ myQueue;
+ * @endcode
+ * This structure should be considered opaque; the client using it should
+ * never access the fields directly.
+ * Refer to these functions for more info:
+ *
+ * @ingroup messageq
+ */
+typedef struct MessageQ {
+ MESSAGEQENTRY *qHead;
+ MESSAGEQENTRY *qTail;
+ struct semaphore nQEntries;
+ spinlock_t queueLock;
+} MESSAGEQ;
+
+char *cyclesToSeconds(u64 cycles, u64 cyclesPerSecond,
+ char *buf, size_t bufsize);
+char *cyclesToIterationSeconds(u64 cycles, u64 cyclesPerSecond,
+ u64 iterations, char *buf, size_t bufsize);
+char *cyclesToSomethingsPerSecond(u64 cycles, u64 cyclesPerSecond,
+ u64 somethings, char *buf, size_t bufsize);
+struct seq_file *visor_seq_file_new_buffer(void *buf, size_t buf_size);
+void visor_seq_file_done_buffer(struct seq_file *m);
+
+#endif
diff --git a/drivers/staging/unisys/include/uisqueue.h b/drivers/staging/unisys/include/uisqueue.h
new file mode 100644
index 000000000000..6dab3900994a
--- /dev/null
+++ b/drivers/staging/unisys/include/uisqueue.h
@@ -0,0 +1,440 @@
+/* uisqueue.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Unisys IO Virtualization header NOTE: This file contains only Linux
+ * specific structs. All OS-independent structs are in iochannel.h.xx
+ */
+
+#ifndef __UISQUEUE_H__
+#define __UISQUEUE_H__
+
+#include "linux/version.h"
+#include "iochannel.h"
+#include "uniklog.h"
+#include <linux/atomic.h>
+#include <linux/semaphore.h>
+
+#include "controlvmchannel.h"
+#include "controlvmcompletionstatus.h"
+
+struct uisqueue_info {
+
+ CHANNEL_HEADER __iomem *chan;
+ /* channel containing queues in which scsi commands &
+ * responses are queued
+ */
+ U64 packets_sent;
+ U64 packets_received;
+ U64 interrupts_sent;
+ U64 interrupts_received;
+ U64 max_not_empty_cnt;
+ U64 total_wakeup_cnt;
+ U64 non_empty_wakeup_cnt;
+
+ struct {
+ SIGNAL_QUEUE_HEADER Reserved1; /* */
+ SIGNAL_QUEUE_HEADER Reserved2; /* */
+ } safe_uis_queue;
+ unsigned int (*send_int_if_needed)(struct uisqueue_info *info,
+ unsigned int whichcqueue,
+ unsigned char issueInterruptIfEmpty,
+ U64 interruptHandle,
+ unsigned char io_termination);
+};
+
+/* uisqueue_put_cmdrsp_with_lock_client queues a commmand or response
+ * to the specified queue, at the tail if the queue is full but
+ * oktowait == 0, then it return 0 indicating failure. otherwise it
+ * wait for the queue to become non-full. If command is queued, return
+ * 1 for success.
+ */
+#define DONT_ISSUE_INTERRUPT 0
+#define ISSUE_INTERRUPT 1
+
+#define DONT_WAIT 0
+#define OK_TO_WAIT 1
+#define UISLIB_LOCK_PREFIX \
+ ".section .smp_locks,\"a\"\n" \
+ _ASM_ALIGN "\n" \
+ _ASM_PTR "661f\n" /* address */ \
+ ".previous\n" \
+ "661:\n\tlock; "
+
+unsigned long long uisqueue_InterlockedOr(unsigned long long __iomem *Target,
+ unsigned long long Set);
+unsigned long long uisqueue_InterlockedAnd(unsigned long long __iomem *Target,
+ unsigned long long Set);
+
+unsigned int uisqueue_send_int_if_needed(struct uisqueue_info *pqueueinfo,
+ unsigned int whichqueue,
+ unsigned char issueInterruptIfEmpty,
+ U64 interruptHandle,
+ unsigned char io_termination);
+
+int uisqueue_put_cmdrsp_with_lock_client(struct uisqueue_info *queueinfo,
+ struct uiscmdrsp *cmdrsp,
+ unsigned int queue,
+ void *insertlock,
+ unsigned char issueInterruptIfEmpty,
+ U64 interruptHandle,
+ char oktowait,
+ U8 *channelId);
+
+/* uisqueue_get_cmdrsp gets the cmdrsp entry at the head of the queue
+ * and copies it to the area pointed by cmdrsp param.
+ * returns 0 if queue is empty, 1 otherwise
+ */
+int
+
+uisqueue_get_cmdrsp(struct uisqueue_info *queueinfo, void *cmdrsp,
+ unsigned int queue);
+
+#define MAX_NAME_SIZE_UISQUEUE 64
+
+struct extport_info {
+ U8 valid:1;
+ /* if 1, indicates this extport slot is occupied
+ * if 0, indicates that extport slot is unoccupied */
+
+ U32 num_devs_using;
+ /* When extport is added, this is set to 0. For exports
+ * located in NETWORK switches:
+ * Each time a VNIC, i.e., intport, is added to the switch this
+ * is used to assign a pref_pnic for the VNIC and when assigned
+ * to a VNIC this counter is incremented. When a VNIC is
+ * deleted, the extport corresponding to the VNIC's pref_pnic
+ * is located and its num_devs_using is decremented. For VNICs,
+ * num_devs_using is basically used to load-balance transmit
+ * traffic from VNICs.
+ */
+
+ struct switch_info *swtch;
+ struct PciId pci_id;
+ char name[MAX_NAME_SIZE_UISQUEUE];
+ union {
+ struct vhba_wwnn wwnn;
+ unsigned char macaddr[MAX_MACADDR_LEN];
+ };
+};
+
+struct device_info {
+ void __iomem *chanptr;
+ U64 channelAddr;
+ U64 channelBytes;
+ GUID channelTypeGuid;
+ GUID devInstGuid;
+ struct InterruptInfo intr;
+ struct switch_info *swtch;
+ char devid[30]; /* "vbus<busno>:dev<devno>" */
+ U16 polling;
+ struct semaphore interrupt_callback_lock;
+ U32 busNo;
+ U32 devNo;
+ int (*interrupt)(void *);
+ void *interrupt_context;
+ void *private_data;
+ struct list_head list_polling_device_channels;
+ unsigned long long moved_to_tail_cnt;
+ unsigned long long first_busy_cnt;
+ unsigned long long last_on_list_cnt;
+};
+
+typedef enum {
+ RECOVERY_LAN = 1,
+ IB_LAN = 2
+} SWITCH_TYPE;
+
+struct bus_info {
+ U32 busNo, deviceCount;
+ struct device_info **device;
+ U64 guestHandle, recvBusInterruptHandle;
+ GUID busInstGuid;
+ ULTRA_VBUS_CHANNEL_PROTOCOL __iomem *pBusChannel;
+ int busChannelBytes;
+ struct proc_dir_entry *proc_dir; /* proc/uislib/vbus/<x> */
+ struct proc_dir_entry *proc_info; /* proc/uislib/vbus/<x>/info */
+ char name[25];
+ char partitionName[99];
+ struct bus_info *next;
+ U8 localVnic; /* 1 if local vnic created internally
+ * by IOVM; 0 otherwise... */
+};
+
+#define DEDICATED_SWITCH(pSwitch) ((pSwitch->extPortCount == 1) && \
+ (pSwitch->intPortCount == 1))
+
+struct sn_list_entry {
+ struct uisscsi_dest pdest; /* scsi bus, target, lun for
+ * phys disk */
+ U8 sernum[MAX_SERIAL_NUM]; /* serial num of physical
+ * disk.. The length is always
+ * MAX_SERIAL_NUM, padded with
+ * spaces */
+ struct sn_list_entry *next;
+};
+
+struct networkPolicy {
+ U32 promiscuous:1;
+ U32 macassign:1;
+ U32 peerforwarding:1;
+ U32 nonotify:1;
+ U32 standby:1;
+ U32 callhome:2;
+ char ip_addr[30];
+};
+
+/*
+ * IO messages sent to UisnicControlChanFunc & UissdControlChanFunc by
+ * code that processes the ControlVm channel messages.
+ */
+
+
+typedef enum {
+ IOPART_ADD_VNIC,
+ IOPART_DEL_VNIC,
+ IOPART_DEL_ALL_VNICS,
+ IOPART_ADD_VHBA,
+ IOPART_ADD_VDISK,
+ IOPART_DEL_VHBA,
+ IOPART_DEL_VDISK,
+ IOPART_DEL_ALL_VDISKS_FOR_VHBA,
+ IOPART_DEL_ALL_VHBAS,
+ IOPART_ATTACH_PHBA,
+ IOPART_DETACH_PHBA, /* 10 */
+ IOPART_ATTACH_PNIC,
+ IOPART_DETACH_PNIC,
+ IOPART_DETACH_VHBA,
+ IOPART_DETACH_VNIC,
+ IOPART_PAUSE_VDISK,
+ IOPART_RESUME_VDISK,
+ IOPART_ADD_DEVICE, /* add generic device */
+ IOPART_DEL_DEVICE, /* del generic device */
+} IOPART_MSG_TYPE;
+
+struct add_virt_iopart {
+ void *chanptr; /* pointer to data channel */
+ U64 guestHandle; /* used to convert guest physical
+ * address to real physical address
+ * for DMA, for ex. */
+ U64 recvBusInterruptHandle; /* used to register to receive
+ * bus level interrupts. */
+ struct InterruptInfo intr; /* contains recv & send
+ * interrupt info */
+ /* recvInterruptHandle is used to register to receive
+ * interrupts on the data channel. Used by GuestLinux/Windows
+ * IO drivers to connect to interrupt. sendInterruptHandle is
+ * used by IOPart drivers as parameter to
+ * Issue_VMCALL_IO_QUEUE_TRANSITION to interrupt thread in
+ * guest linux/windows IO drivers when data channel queue for
+ * vhba/vnic goes from EMPTY to NON-EMPTY. */
+ struct switch_info *swtch; /* pointer to the virtual
+ * switch to which the vnic is
+ * connected */
+
+ U8 useG2GCopy; /* Used to determine if a virtual HBA
+ * needs to use G2G copy. */
+ U8 Filler[7];
+
+ U32 busNo;
+ U32 devNo;
+ char *params;
+ ulong params_bytes;
+
+};
+
+struct add_vdisk_iopart {
+ void *chanptr; /* pointer to data channel */
+ int implicit;
+ struct uisscsi_dest vdest; /* scsi bus, target, lun for virt disk */
+ struct uisscsi_dest pdest; /* scsi bus, target, lun for phys disk */
+ U8 sernum[MAX_SERIAL_NUM]; /* serial num of physical disk */
+ U32 serlen; /* length of serial num */
+ U32 busNo;
+ U32 devNo;
+};
+
+struct del_vdisk_iopart {
+ void *chanptr; /* pointer to data channel */
+ struct uisscsi_dest vdest; /* scsi bus, target, lun for virt disk */
+ U32 busNo;
+ U32 devNo;
+};
+
+struct del_virt_iopart {
+ void *chanptr; /* pointer to data channel */
+ U32 busNo;
+ U32 devNo;
+};
+
+struct det_virt_iopart { /* detach internal port */
+ void *chanptr; /* pointer to data channel */
+ struct switch_info *swtch;
+};
+
+struct paures_vdisk_iopart {
+ void *chanptr; /* pointer to data channel */
+ struct uisscsi_dest vdest; /* scsi bus, target, lun for virt disk */
+};
+
+struct add_switch_iopart { /* add switch */
+ struct switch_info *swtch;
+ char *params;
+ ulong params_bytes;
+};
+
+struct del_switch_iopart { /* destroy switch */
+ struct switch_info *swtch;
+};
+
+struct io_msgs {
+
+ IOPART_MSG_TYPE msgtype;
+
+ /* additional params needed by some messages */
+ union {
+ struct add_virt_iopart add_vhba;
+ struct add_virt_iopart add_vnic;
+ struct add_vdisk_iopart add_vdisk;
+ struct del_virt_iopart del_vhba;
+ struct del_virt_iopart del_vnic;
+ struct det_virt_iopart det_vhba;
+ struct det_virt_iopart det_vnic;
+ struct del_vdisk_iopart del_vdisk;
+ struct del_virt_iopart del_all_vdisks_for_vhba;
+ struct add_virt_iopart add_device;
+ struct del_virt_iopart del_device;
+ struct det_virt_iopart det_intport;
+ struct add_switch_iopart add_switch;
+ struct del_switch_iopart del_switch;
+ struct extport_info *extPort; /* for attach or detach
+ * pnic/generic delete all
+ * vhbas/allvnics need no
+ * parameters */
+ struct paures_vdisk_iopart paures_vdisk;
+ };
+};
+
+/*
+* Guest messages sent to VirtControlChanFunc by code that processes
+* the ControlVm channel messages.
+*/
+
+typedef enum {
+ GUEST_ADD_VBUS,
+ GUEST_ADD_VHBA,
+ GUEST_ADD_VNIC,
+ GUEST_DEL_VBUS,
+ GUEST_DEL_VHBA,
+ GUEST_DEL_VNIC,
+ GUEST_DEL_ALL_VHBAS,
+ GUEST_DEL_ALL_VNICS,
+ GUEST_DEL_ALL_VBUSES, /* deletes all vhbas & vnics on all
+ * buses and deletes all buses */
+ GUEST_PAUSE_VHBA,
+ GUEST_PAUSE_VNIC,
+ GUEST_RESUME_VHBA,
+ GUEST_RESUME_VNIC
+} GUESTPART_MSG_TYPE;
+
+struct add_vbus_guestpart {
+ void __iomem *chanptr; /* pointer to data channel for bus -
+ * NOT YET USED */
+ U32 busNo; /* bus number to be created/deleted */
+ U32 deviceCount; /* max num of devices on bus */
+ GUID busTypeGuid; /* indicates type of bus */
+ GUID busInstGuid; /* instance guid for device */
+};
+
+struct del_vbus_guestpart {
+ U32 busNo; /* bus number to be deleted */
+ /* once we start using the bus's channel, add can dump busNo
+ * into the channel header and then delete will need only one
+ * parameter, chanptr. */
+};
+
+struct add_virt_guestpart {
+ void __iomem *chanptr; /* pointer to data channel */
+ U32 busNo; /* bus number for the operation */
+ U32 deviceNo; /* number of device on the bus */
+ GUID devInstGuid; /* instance guid for device */
+ struct InterruptInfo intr; /* recv/send interrupt info */
+ /* recvInterruptHandle contains info needed in order to
+ * register to receive interrupts on the data channel.
+ * sendInterruptHandle contains handle which is provided to
+ * monitor VMCALL that will cause an interrupt to be generated
+ * for the other end.
+ */
+};
+
+struct pause_virt_guestpart {
+ void __iomem *chanptr; /* pointer to data channel */
+};
+
+struct resume_virt_guestpart {
+ void __iomem *chanptr; /* pointer to data channel */
+};
+
+struct del_virt_guestpart {
+ void __iomem *chanptr; /* pointer to data channel */
+};
+
+struct init_chipset_guestpart {
+ U32 busCount; /* indicates the max number of busses */
+ U32 switchCount; /* indicates the max number of switches */
+};
+
+struct guest_msgs {
+
+ GUESTPART_MSG_TYPE msgtype;
+
+ /* additional params needed by messages */
+ union {
+ struct add_vbus_guestpart add_vbus;
+ struct add_virt_guestpart add_vhba;
+ struct add_virt_guestpart add_vnic;
+ struct pause_virt_guestpart pause_vhba;
+ struct pause_virt_guestpart pause_vnic;
+ struct resume_virt_guestpart resume_vhba;
+ struct resume_virt_guestpart resume_vnic;
+ struct del_vbus_guestpart del_vbus;
+ struct del_virt_guestpart del_vhba;
+ struct del_virt_guestpart del_vnic;
+ struct del_vbus_guestpart del_all_vhbas;
+ struct del_vbus_guestpart del_all_vnics;
+ /* del_all_vbuses needs no parameters */
+ };
+ struct init_chipset_guestpart init_chipset;
+
+};
+
+#ifndef __xg
+#define __xg(x) ((volatile long *)(x))
+#endif
+
+/*
+* Below code is a copy of Linux kernel's cmpxchg function located at
+* this place
+* http://tcsxeon:8080/source/xref/00trunk-AppOS-linux/include/asm-x86/cmpxchg_64.h#84
+* Reason for creating our own version of cmpxchg along with
+* UISLIB_LOCK_PREFIX is to make the operation atomic even for non SMP
+* guests.
+*/
+
+#define uislibcmpxchg64(p, o, n, s) cmpxchg(p, o, n)
+
+#endif /* __UISQUEUE_H__ */
diff --git a/drivers/staging/unisys/include/uisthread.h b/drivers/staging/unisys/include/uisthread.h
new file mode 100644
index 000000000000..2b1fba759098
--- /dev/null
+++ b/drivers/staging/unisys/include/uisthread.h
@@ -0,0 +1,46 @@
+/* uisthread.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*****************************************************************************/
+/* Unisys thread utilities header */
+/*****************************************************************************/
+
+
+#ifndef __UISTHREAD_H__
+#define __UISTHREAD_H__
+
+
+#include "linux/completion.h"
+
+struct uisthread_info {
+ struct task_struct *task;
+ int id;
+ int should_stop;
+ struct completion has_stopped;
+};
+
+
+/* returns 0 for failure, 1 for success */
+int uisthread_start(
+ struct uisthread_info *thrinfo,
+ int (*threadfn)(void *),
+ void *thrcontext,
+ char *name);
+
+void uisthread_stop(struct uisthread_info *thrinfo);
+
+#endif /* __UISTHREAD_H__ */
diff --git a/drivers/staging/unisys/include/uisutils.h b/drivers/staging/unisys/include/uisutils.h
new file mode 100644
index 000000000000..5fdab3a3464a
--- /dev/null
+++ b/drivers/staging/unisys/include/uisutils.h
@@ -0,0 +1,348 @@
+/* uisutils.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Unisys Virtual HBA utilities header
+ */
+
+#ifndef __UISUTILS__H__
+#define __UISUTILS__H__
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+
+#include "vmcallinterface.h"
+#include "channel.h"
+#include "uisthread.h"
+#include "uisqueue.h"
+#include "diagnostics/appos_subsystems.h"
+#include "vbusdeviceinfo.h"
+#include <linux/atomic.h>
+
+/* This is the MAGIC number stuffed by virthba in host->this_id. Used to
+ * identify virtual hbas.
+ */
+#define UIS_MAGIC_VHBA 707
+
+/* global function pointers that act as callback functions into
+ * uisnicmod, uissdmod, and virtpcimod
+ */
+extern int (*UisnicControlChanFunc)(struct io_msgs *);
+extern int (*UissdControlChanFunc)(struct io_msgs *);
+extern int (*VirtControlChanFunc)(struct guest_msgs *);
+
+/* Return values of above callback functions: */
+#define CCF_ERROR 0 /* completed and failed */
+#define CCF_OK 1 /* completed successfully */
+#define CCF_PENDING 2 /* operation still pending */
+extern atomic_t UisUtils_Registered_Services;
+
+typedef unsigned int MACARRAY[MAX_MACADDR_LEN];
+typedef struct ReqHandlerInfo_struct {
+ GUID switchTypeGuid;
+ int (*controlfunc)(struct io_msgs *);
+ unsigned long min_channel_bytes;
+ int (*Server_Channel_Ok)(unsigned long channelBytes);
+ int (*Server_Channel_Init)
+ (void *x, unsigned char *clientStr, U32 clientStrLen, U64 bytes);
+ char switch_type_name[99];
+ struct list_head list_link; /* links into ReqHandlerInfo_list */
+} ReqHandlerInfo_t;
+
+ReqHandlerInfo_t *ReqHandlerAdd(GUID switchTypeGuid,
+ const char *switch_type_name,
+ int (*controlfunc)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*Server_Channel_Ok)(unsigned long
+ channelBytes),
+ int (*Server_Channel_Init)
+ (void *x, unsigned char *clientStr,
+ U32 clientStrLen, U64 bytes));
+ReqHandlerInfo_t *ReqHandlerFind(GUID switchTypeGuid);
+int ReqHandlerDel(GUID switchTypeGuid);
+
+#define uislib_ioremap_cache(addr, size) \
+ dbg_ioremap_cache(addr, size, __FILE__, __LINE__)
+
+static inline void __iomem *
+dbg_ioremap_cache(U64 addr, unsigned long size, char *file, int line)
+{
+ void __iomem *new;
+ new = ioremap_cache(addr, size);
+ return new;
+}
+
+#define uislib_ioremap(addr, size) dbg_ioremap(addr, size, __FILE__, __LINE__)
+
+static inline void *
+dbg_ioremap(U64 addr, unsigned long size, char *file, int line)
+{
+ void *new;
+ new = ioremap(addr, size);
+ return new;
+}
+
+#define uislib_iounmap(addr) dbg_iounmap(addr, __FILE__, __LINE__)
+
+static inline void
+dbg_iounmap(void __iomem *addr, char *file, int line)
+{
+ iounmap(addr);
+}
+
+#define PROC_READ_BUFFER_SIZE 131072 /* size of the buffer to allocate to
+ * hold all of /proc/XXX/info */
+int uisutil_add_proc_line_ex(int *total, char **buffer, int *buffer_remaining,
+ char *format, ...);
+
+int uisctrl_register_req_handler(int type, void *fptr,
+ ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo);
+int uisctrl_register_req_handler_ex(GUID switchTypeGuid,
+ const char *switch_type_name,
+ int (*fptr)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*Server_Channel_Ok)(unsigned long
+ channelBytes),
+ int (*Server_Channel_Init)
+ (void *x, unsigned char *clientStr,
+ U32 clientStrLen, U64 bytes),
+ ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo);
+
+int uisctrl_unregister_req_handler_ex(GUID switchTypeGuid);
+unsigned char *util_map_virt(struct phys_info *sg);
+void util_unmap_virt(struct phys_info *sg);
+unsigned char *util_map_virt_atomic(struct phys_info *sg);
+void util_unmap_virt_atomic(void *buf);
+int uislib_server_inject_add_vnic(U32 switchNo, U32 BusNo, U32 numIntPorts,
+ U32 numExtPorts, MACARRAY pmac[],
+ pCHANNEL_HEADER **chan);
+void uislib_server_inject_del_vnic(U32 switchNo, U32 busNo, U32 numIntPorts,
+ U32 numExtPorts);
+int uislib_client_inject_add_bus(U32 busNo, GUID instGuid,
+ U64 channelAddr, ulong nChannelBytes);
+int uislib_client_inject_del_bus(U32 busNo);
+
+int uislib_client_inject_add_vhba(U32 busNo, U32 devNo,
+ U64 phys_chan_addr, U32 chan_bytes,
+ int is_test_addr, GUID instGuid,
+ struct InterruptInfo *intr);
+int uislib_client_inject_pause_vhba(U32 busNo, U32 devNo);
+int uislib_client_inject_resume_vhba(U32 busNo, U32 devNo);
+int uislib_client_inject_del_vhba(U32 busNo, U32 devNo);
+int uislib_client_inject_add_vnic(U32 busNo, U32 devNo,
+ U64 phys_chan_addr, U32 chan_bytes,
+ int is_test_addr, GUID instGuid,
+ struct InterruptInfo *intr);
+int uislib_client_inject_pause_vnic(U32 busNo, U32 devNo);
+int uislib_client_inject_resume_vnic(U32 busNo, U32 devNo);
+int uislib_client_inject_del_vnic(U32 busNo, U32 devNo);
+#ifdef STORAGE_CHANNEL
+U64 uislib_storage_channel(int client_id);
+#endif
+int uislib_get_owned_pdest(struct uisscsi_dest *pdest);
+
+int uislib_send_event(CONTROLVM_ID id, CONTROLVM_MESSAGE_PACKET *event);
+
+/* structure used by vhba & vnic to keep track of queue & thread info */
+struct chaninfo {
+ struct uisqueue_info *queueinfo;
+ /* this specifies the queue structures for a channel */
+ /* ALLOCATED BY THE OTHER END - WE JUST GET A POINTER TO THE MEMORY */
+ spinlock_t insertlock;
+ /* currently used only in virtnic when sending data to uisnic */
+ /* to synchronize the inserts into the signal queue */
+ struct uisthread_info threadinfo;
+ /* this specifies the thread structures used by the thread that */
+ /* handles this channel */
+};
+
+/* this is the wait code for all the threads - it is used to get
+* something from a queue choices: wait_for_completion_interruptible,
+* _timeout, interruptible_timeout
+*/
+#define UIS_THREAD_WAIT_MSEC(x) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ schedule_timeout(msecs_to_jiffies(x)); \
+}
+#define UIS_THREAD_WAIT_USEC(x) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ schedule_timeout(usecs_to_jiffies(x)); \
+}
+#define UIS_THREAD_WAIT UIS_THREAD_WAIT_MSEC(5)
+#define UIS_THREAD_WAIT_SEC(x) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ schedule_timeout((x)*HZ); \
+}
+
+/* This is a hack until we fix IOVM to initialize the channel header
+ * correctly at DEVICE_CREATE time, INSTEAD OF waiting until
+ * DEVICE_CONFIGURE time.
+ */
+#define WAIT_FOR_VALID_GUID(guid) \
+ do { \
+ while (MEMCMP_IO(&guid, &Guid0, sizeof(Guid0)) == 0) { \
+ LOGERR("Waiting for non-0 GUID (why???)...\n"); \
+ UIS_THREAD_WAIT_SEC(5); \
+ } \
+ LOGERR("OK... GUID is non-0 now\n"); \
+ } while (0)
+
+/* CopyFragsInfoFromSkb returns the number of entries added to frags array
+ * Returns -1 on failure.
+ */
+unsigned int uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx,
+ void *skb_in,
+ unsigned int firstfraglen,
+ unsigned int frags_max,
+ struct phys_info frags[]);
+
+static inline unsigned int
+Issue_VMCALL_IO_CONTROLVM_ADDR(U64 *ControlAddress, U32 *ControlBytes)
+{
+ VMCALL_IO_CONTROLVM_ADDR_PARAMS params;
+ int result = VMCALL_SUCCESS;
+ U64 physaddr;
+
+ physaddr = virt_to_phys(&params);
+ ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
+ if (VMCALL_SUCCESSFUL(result)) {
+ *ControlAddress = params.ChannelAddress;
+ *ControlBytes = params.ChannelBytes;
+ }
+ return result;
+}
+
+static inline unsigned int Issue_VMCALL_IO_DIAG_ADDR(U64 *DiagChannelAddress)
+{
+ VMCALL_IO_DIAG_ADDR_PARAMS params;
+ int result = VMCALL_SUCCESS;
+ U64 physaddr;
+
+ physaddr = virt_to_phys(&params);
+ ISSUE_IO_VMCALL(VMCALL_IO_DIAG_ADDR, physaddr, result);
+ if (VMCALL_SUCCESSFUL(result))
+ *DiagChannelAddress = params.ChannelAddress;
+ return result;
+}
+
+static inline unsigned int
+Issue_VMCALL_IO_VISORSERIAL_ADDR(U64 *DiagChannelAddress)
+{
+ VMCALL_IO_VISORSERIAL_ADDR_PARAMS params;
+ int result = VMCALL_SUCCESS;
+ U64 physaddr;
+
+ physaddr = virt_to_phys(&params);
+ ISSUE_IO_VMCALL(VMCALL_IO_VISORSERIAL_ADDR, physaddr, result);
+ if (VMCALL_SUCCESSFUL(result))
+ *DiagChannelAddress = params.ChannelAddress;
+ return result;
+}
+
+static inline S64 Issue_VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET(void)
+{
+ U64 result = VMCALL_SUCCESS;
+ U64 physaddr = 0;
+
+ ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
+ result);
+ return result;
+}
+
+static inline S64 Issue_VMCALL_MEASUREMENT_DO_NOTHING(void)
+{
+ U64 result = VMCALL_SUCCESS;
+ U64 physaddr = 0;
+
+ ISSUE_IO_VMCALL(VMCALL_MEASUREMENT_DO_NOTHING, physaddr, result);
+ return result;
+}
+
+struct log_info_t {
+ volatile unsigned long long last_cycles;
+ unsigned long long delta_sum[64];
+ unsigned long long delta_cnt[64];
+ unsigned long long max_delta[64];
+ unsigned long long min_delta[64];
+};
+
+static inline int Issue_VMCALL_UPDATE_PHYSICAL_TIME(U64 adjustment)
+{
+ int result = VMCALL_SUCCESS;
+
+ ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
+ return result;
+}
+
+static inline unsigned int
+Issue_VMCALL_CHANNEL_MISMATCH(const char *ChannelName,
+ const char *ItemName,
+ U32 SourceLineNumber, const char *path_n_fn)
+{
+ VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS params;
+ int result = VMCALL_SUCCESS;
+ U64 physaddr;
+ char *last_slash = NULL;
+
+ strncpy(params.ChannelName, ChannelName,
+ lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS, ChannelName));
+ strncpy(params.ItemName, ItemName,
+ lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS, ItemName));
+ params.SourceLineNumber = SourceLineNumber;
+
+ last_slash = strrchr(path_n_fn, '/');
+ if (last_slash != NULL) {
+ last_slash++;
+ strncpy(params.SourceFileName, last_slash,
+ lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS,
+ SourceFileName));
+ } else
+ strncpy(params.SourceFileName,
+ "Cannot determine source filename",
+ lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS,
+ SourceFileName));
+
+ physaddr = virt_to_phys(&params);
+ ISSUE_IO_VMCALL(VMCALL_CHANNEL_VERSION_MISMATCH, physaddr, result);
+ return result;
+}
+
+static inline unsigned int Issue_VMCALL_FATAL_BYE_BYE(void)
+{
+ int result = VMCALL_SUCCESS;
+ U64 physaddr = 0;
+
+ ISSUE_IO_VMCALL(VMCALL_GENERIC_SURRENDER_QUANTUM_FOREVER, physaddr,
+ result);
+ return result;
+}
+
+#define UIS_DAEMONIZE(nam)
+void *uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln);
+#define UISCACHEALLOC(cur_pool) uislib_cache_alloc(cur_pool, __FILE__, __LINE__)
+void uislib_cache_free(struct kmem_cache *cur_pool, void *p, char *fn, int ln);
+#define UISCACHEFREE(cur_pool, p) \
+ uislib_cache_free(cur_pool, p, __FILE__, __LINE__)
+
+void uislib_enable_channel_interrupts(U32 busNo, U32 devNo,
+ int (*interrupt)(void *),
+ void *interrupt_context);
+void uislib_disable_channel_interrupts(U32 busNo, U32 devNo);
+void uislib_force_channel_interrupt(U32 busNo, U32 devNo);
+
+#endif /* __UISUTILS__H__ */
diff --git a/drivers/staging/unisys/include/uniklog.h b/drivers/staging/unisys/include/uniklog.h
new file mode 100644
index 000000000000..4d7b87cefa61
--- /dev/null
+++ b/drivers/staging/unisys/include/uniklog.h
@@ -0,0 +1,193 @@
+/* uniklog.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* This module contains macros to aid developers in logging messages.
+ *
+ * This module is affected by the DEBUG compiletime option.
+ *
+ */
+#ifndef __UNIKLOG_H__
+#define __UNIKLOG_H__
+
+
+#include <linux/printk.h>
+
+/*
+ * # DBGINF
+ *
+ * \brief Log debug informational message - log a LOG_INFO message only
+ * if DEBUG compiletime option enabled
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the
+ * format string.
+ * \return nothing
+ *
+ * Log a message at the LOG_INFO level, but only if DEBUG is enabled. If
+ * DEBUG is disabled, this expands to a no-op.
+ */
+
+/*
+ * # DBGVER
+ *
+ * \brief Log debug verbose message - log a LOG_DEBUG message only if
+ * DEBUG compiletime option enabled
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the
+ * format string.
+ * \return nothing
+ *
+ * Log a message at the LOG_DEBUG level, but only if DEBUG is enabled. If
+ * DEBUG is disabled, this expands to a no-op. Note also that LOG_DEBUG
+ * messages can be enabled/disabled at runtime as well.
+ */
+#define DBGINFDEV(devname, fmt, args...) do { } while (0)
+#define DBGVERDEV(devname, fmt, args...) do { } while (0)
+#define DBGINF(fmt, args...) do { } while (0)
+#define DBGVER(fmt, args...) do { } while (0)
+
+/*
+ * # LOGINF
+ *
+ * \brief Log informational message - logs a message at the LOG_INFO level
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the
+ * format string.
+ * \return nothing
+ *
+ * Logs the specified message at the LOG_INFO level.
+ */
+
+#define LOGINF(fmt, args...) pr_info(fmt, ## args)
+#define LOGINFDEV(devname, fmt, args...) \
+ pr_info("%s " fmt, devname, ## args)
+#define LOGINFDEVX(devno, fmt, args...) \
+ pr_info("dev%d " fmt, devno, ## args)
+#define LOGINFNAME(vnic, fmt, args...) \
+ do { \
+ if (vnic != NULL) { \
+ pr_info("%s " fmt, vnic->name, ## args); \
+ } else { \
+ pr_info(fmt, ## args); \
+ } \
+ } while (0)
+
+/*
+ * # LOGVER
+ *
+ * \brief Log verbose message - logs a message at the LOG_DEBUG level,
+ * which can be disabled at runtime
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the format
+ * \param string.
+ * \return nothing
+ *
+ * Logs the specified message at the LOG_DEBUG level. Note also that
+ * LOG_DEBUG messages can be enabled/disabled at runtime as well.
+ */
+#define LOGVER(fmt, args...) pr_debug(fmt, ## args)
+#define LOGVERDEV(devname, fmt, args...) \
+ pr_debug("%s " fmt, devname, ## args)
+#define LOGVERNAME(vnic, fmt, args...) \
+ do { \
+ if (vnic != NULL) { \
+ pr_debug("%s " fmt, vnic->name, ## args); \
+ } else { \
+ pr_debug(fmt, ## args); \
+ } \
+ } while (0)
+
+
+/*
+ * # LOGERR
+ *
+ * \brief Log error message - logs a message at the LOG_ERR level,
+ * including source line number information
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the format
+ * \param string.
+ * \return nothing
+ *
+ * Logs the specified error message at the LOG_ERR level. It will also
+ * include the file, line number, and function name of where the error
+ * originated in the log message.
+ */
+#define LOGERR(fmt, args...) pr_err(fmt, ## args)
+#define LOGERRDEV(devname, fmt, args...) \
+ pr_err("%s " fmt, devname, ## args)
+#define LOGERRDEVX(devno, fmt, args...) \
+ pr_err("dev%d " fmt, devno, ## args)
+#define LOGERRNAME(vnic, fmt, args...) \
+ do { \
+ if (vnic != NULL) { \
+ pr_err("%s " fmt, vnic->name, ## args); \
+ } else { \
+ pr_err(fmt, ## args); \
+ } \
+ } while (0)
+#define LOGORDUMPERR(seqfile, fmt, args...) do { \
+ if (seqfile) { \
+ seq_printf(seqfile, fmt, ## args); \
+ } else { \
+ LOGERR(fmt, ## args); \
+ } \
+ } while (0)
+
+/*
+ * # LOGWRN
+ *
+ * \brief Log warning message - Logs a message at the LOG_WARNING level,
+ * including source line number information
+ *
+ * \param devname the device name of the device reporting this message, or
+ * NULL if this message is NOT device-related.
+ * \param fmt printf()-style format string containing the message to log.
+ * \param args Optional arguments to be formatted and inserted into the format
+ * \param string.
+ * \return nothing
+ *
+ * Logs the specified error message at the LOG_WARNING level. It will also
+ * include the file, line number, and function name of where the error
+ * originated in the log message.
+ */
+#define LOGWRN(fmt, args...) pr_warn(fmt, ## args)
+#define LOGWRNDEV(devname, fmt, args...) \
+ pr_warn("%s " fmt, devname, ## args)
+#define LOGWRNNAME(vnic, fmt, args...) \
+ do { \
+ if (vnic != NULL) { \
+ pr_warn("%s " fmt, vnic->name, ## args); \
+ } else { \
+ pr_warn(fmt, ## args); \
+ } \
+ } while (0)
+
+#endif /* __UNIKLOG_H__ */
diff --git a/drivers/staging/unisys/include/vbushelper.h b/drivers/staging/unisys/include/vbushelper.h
new file mode 100644
index 000000000000..93e35f039ded
--- /dev/null
+++ b/drivers/staging/unisys/include/vbushelper.h
@@ -0,0 +1,47 @@
+/* vbushelper.h
+ *
+ * Copyright © 2011 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VBUSHELPER_H__
+#define __VBUSHELPER_H__
+
+#include "vbusdeviceinfo.h"
+
+/* TARGET_HOSTNAME specified as -DTARGET_HOSTNAME=\"thename\" on the
+ * command line */
+
+#define TARGET_HOSTNAME "linuxguest"
+
+static inline void
+BusDeviceInfo_Init(ULTRA_VBUS_DEVICEINFO *pBusDeviceInfo,
+ const char *deviceType, const char *driverName,
+ const char *ver, const char *verTag,
+ const char *buildDate, const char *buildTime)
+{
+ memset(pBusDeviceInfo, 0, sizeof(ULTRA_VBUS_DEVICEINFO));
+ snprintf(pBusDeviceInfo->devType, sizeof(pBusDeviceInfo->devType),
+ "%s", (deviceType) ? deviceType : "unknownType");
+ snprintf(pBusDeviceInfo->drvName, sizeof(pBusDeviceInfo->drvName),
+ "%s", (driverName) ? driverName : "unknownDriver");
+ snprintf(pBusDeviceInfo->infoStrings,
+ sizeof(pBusDeviceInfo->infoStrings), "%s\t%s\t%s %s\t%s",
+ (ver) ? ver : "unknownVer",
+ (verTag) ? verTag : "unknownVerTag",
+ (buildDate) ? buildDate : "noBuildDate",
+ (buildTime) ? buildTime : "nobuildTime", TARGET_HOSTNAME);
+}
+
+#endif
diff --git a/drivers/staging/unisys/uislib/Kconfig b/drivers/staging/unisys/uislib/Kconfig
new file mode 100644
index 000000000000..8d87d9c81c66
--- /dev/null
+++ b/drivers/staging/unisys/uislib/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys uislib configuration
+#
+
+config UNISYS_UISLIB
+ tristate "Unisys uislib driver"
+ depends on UNISYSSPAR && UNISYS_VISORCHIPSET && UNISYS_CHANNELSTUB
+ ---help---
+ If you say Y here, you will enable the Unisys uislib driver.
+
diff --git a/drivers/staging/unisys/uislib/Makefile b/drivers/staging/unisys/uislib/Makefile
new file mode 100644
index 000000000000..6e44d49458f5
--- /dev/null
+++ b/drivers/staging/unisys/uislib/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for Unisys uislib
+#
+
+obj-$(CONFIG_UNISYS_UISLIB) += visoruislib.o
+
+visoruislib-y := uislib.o uisqueue.o uisthread.o uisutils.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/channels
+ccflags-y += -Idrivers/staging/unisys/visorchipset
+ccflags-y += -Idrivers/staging/unisys/sparstopdriver
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/uislib/uislib.c b/drivers/staging/unisys/uislib/uislib.c
new file mode 100644
index 000000000000..8ea9c46e56ae
--- /dev/null
+++ b/drivers/staging/unisys/uislib/uislib.c
@@ -0,0 +1,2421 @@
+/* uislib.c
+ *
+ * Copyright � 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* @ALL_INSPECTED */
+#define EXPORT_SYMTAB
+#include <linux/kernel.h>
+#include <linux/highmem.h>
+#ifdef CONFIG_MODVERSIONS
+#include <config/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include "commontypes.h"
+
+#include <linux/version.h>
+#include "uniklog.h"
+#include "diagnostics/appos_subsystems.h"
+#include "uisutils.h"
+#include "vbuschannel.h"
+
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h> /* for copy_from_user */
+#include <linux/ctype.h> /* for toupper */
+#include <linux/list.h>
+
+#include "sparstop.h"
+#include "visorchipset.h"
+#include "chanstub.h"
+#include "version.h"
+#include "guestlinuxdebug.h"
+
+#define SET_PROC_OWNER(x, y)
+
+#define UISLIB_TEST_PROC
+#define POLLJIFFIES_NORMAL 1
+/* Choose whether or not you want to wakeup the request-polling thread
+ * after an IO termination:
+ * this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC UISLIB_PC_uislib_c
+#define __MYFILE__ "uislib.c"
+
+/* global function pointers that act as callback functions into virtpcimod */
+int (*VirtControlChanFunc)(struct guest_msgs *);
+
+static int ProcReadBufferValid;
+static char *ProcReadBuffer; /* Note this MUST be global,
+ * because the contents must */
+static unsigned int chipset_inited;
+
+#define WAIT_ON_CALLBACK(handle) \
+ do { \
+ if (handle) \
+ break; \
+ UIS_THREAD_WAIT; \
+ } while (1)
+
+static struct bus_info *BusListHead;
+static rwlock_t BusListLock;
+static int BusListCount; /* number of buses in the list */
+static int MaxBusCount; /* maximum number of buses expected */
+static U64 PhysicalDataChan;
+static int PlatformNumber;
+
+static struct uisthread_info Incoming_ThreadInfo;
+static BOOL Incoming_Thread_Started = FALSE;
+static LIST_HEAD(List_Polling_Device_Channels);
+static unsigned long long tot_moved_to_tail_cnt;
+static unsigned long long tot_wait_cnt;
+static unsigned long long tot_wakeup_cnt;
+static unsigned long long tot_schedule_cnt;
+static int en_smart_wakeup = 1;
+static DEFINE_SEMAPHORE(Lock_Polling_Device_Channels); /* unlocked */
+static DECLARE_WAIT_QUEUE_HEAD(Wakeup_Polling_Device_Channels);
+static int Go_Polling_Device_Channels;
+
+static struct proc_dir_entry *uislib_proc_dir;
+static struct proc_dir_entry *uislib_proc_vbus_dir;
+static struct proc_dir_entry *vnic_proc_entry; /* Used to be "datachan" */
+static struct proc_dir_entry *ctrlchan_proc_entry;
+static struct proc_dir_entry *pmem_proc_entry;
+static struct proc_dir_entry *info_proc_entry;
+static struct proc_dir_entry *switch_proc_entry;
+static struct proc_dir_entry *extport_proc_entry;
+static struct proc_dir_entry *platformnumber_proc_entry;
+static struct proc_dir_entry *bus_proc_entry;
+static struct proc_dir_entry *dev_proc_entry;
+static struct proc_dir_entry *chipset_proc_entry;
+static struct proc_dir_entry *cycles_before_wait_proc_entry;
+static struct proc_dir_entry *reset_counts_proc_entry;
+static struct proc_dir_entry *smart_wakeup_proc_entry;
+static struct proc_dir_entry *disable_proc_entry;
+
+#define DIR_PROC_ENTRY "uislib"
+#define DIR_VBUS_PROC_ENTRY "vbus"
+#define VNIC_PROC_ENTRY_FN "vnic" /* Used to be "datachan" */
+#define CTRLCHAN_PROC_ENTRY_FN "ctrlchan"
+#define PMEM_PROC_ENTRY_FN "phys_to_virt"
+#define INFO_PROC_ENTRY_FN "info"
+#define SWITCH_PROC_ENTRY_FN "switch"
+#define SWITCH_COUNT_PROC_ENTRY_FN "switch_count"
+#define EXTPORT_PROC_ENTRY_FN "extport"
+#define PLATFORMNUMBER_PROC_ENTRY_FN "platform"
+#define BUS_PROC_ENTRY_FN "bus"
+#define DEV_PROC_ENTRY_FN "device"
+#define CHIPSET_PROC_ENTRY_FN "chipset"
+#define CYCLES_BEFORE_WAIT_PROC_ENTRY_FN "cycles_before_wait"
+#define RESET_COUNTS_PROC_ENTRY_FN "reset_counts"
+#define SMART_WAKEUP_PROC_ENTRY_FN "smart_wakeup"
+#define CALLHOME_PROC_ENTRY_FN "callhome"
+#define CALLHOME_THROTTLED_PROC_ENTRY_FN "callhome_throttled"
+#define DISABLE_PROC_ENTRY_FN "switch_state"
+#ifdef UISLIB_TEST_PROC
+static struct proc_dir_entry *test_proc_entry;
+#define TEST_PROC_ENTRY_FN "test"
+#endif
+static unsigned long long cycles_before_wait, wait_cycles;
+
+/*****************************************************/
+/* local functions */
+/*****************************************************/
+
+static int proc_info_vbus_show(struct seq_file *m, void *v);
+static int
+proc_info_vbus_open(struct inode *inode, struct file *filp)
+{
+ /* proc_info_vbus_show will grab this from seq_file.private: */
+ struct bus_info *bus = PDE_DATA(inode);
+ return single_open(filp, proc_info_vbus_show, bus);
+}
+
+static const struct file_operations proc_info_vbus_fops = {
+ .open = proc_info_vbus_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t uislib_proc_read_writeonly(struct file *file,
+ char __user *buffer,
+ size_t count, loff_t *ppos);
+
+static ssize_t vnic_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+
+static const struct file_operations proc_vnic_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = vnic_proc_write,
+};
+
+static ssize_t chipset_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+
+static const struct file_operations proc_chipset_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = chipset_proc_write,
+};
+
+static ssize_t info_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static const struct file_operations proc_info_fops = {
+ .read = info_proc_read,
+};
+
+static ssize_t platformnumber_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static const struct file_operations proc_platformnumber_fops = {
+ .read = platformnumber_proc_read,
+};
+
+static ssize_t cycles_before_wait_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_cycles_before_wait_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = cycles_before_wait_proc_write,
+};
+
+static ssize_t reset_counts_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_reset_counts_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = reset_counts_proc_write,
+};
+
+static ssize_t smart_wakeup_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_smart_wakeup_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = smart_wakeup_proc_write,
+};
+
+static ssize_t test_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_test_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = test_proc_write,
+};
+
+static ssize_t bus_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_bus_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = bus_proc_write,
+};
+
+static ssize_t dev_proc_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_dev_fops = {
+ .read = uislib_proc_read_writeonly,
+ .write = dev_proc_write,
+};
+
+static void
+init_msg_header(CONTROLVM_MESSAGE *msg, U32 id, uint rsp, uint svr)
+{
+ memset(msg, 0, sizeof(CONTROLVM_MESSAGE));
+ msg->hdr.Id = id;
+ msg->hdr.Flags.responseExpected = rsp;
+ msg->hdr.Flags.server = svr;
+}
+
+static void
+create_bus_proc_entries(struct bus_info *bus)
+{
+ bus->proc_dir = proc_mkdir(bus->name, uislib_proc_vbus_dir);
+ if (!bus->proc_dir) {
+ LOGERR("failed to create /proc/uislib/vbus/%s directory",
+ bus->name);
+ return;
+ }
+ bus->proc_info = proc_create_data("info", 0, bus->proc_dir,
+ &proc_info_vbus_fops, bus);
+ if (!bus->proc_info) {
+ LOGERR("failed to create /proc/uislib/vbus/%s/info", bus->name);
+ remove_proc_entry(bus->name, uislib_proc_vbus_dir);
+ bus->proc_dir = NULL;
+ return;
+ }
+ SET_PROC_OWNER(bus->proc_info, THIS_MODULE);
+
+}
+
+static __iomem void *
+init_vbus_channel(U64 channelAddr, U32 channelBytes, int isServer)
+{
+ void *rc = NULL;
+ void __iomem *pChan = uislib_ioremap_cache(channelAddr, channelBytes);
+ if (!pChan) {
+ LOGERR("CONTROLVM_BUS_CREATE error: ioremap_cache of channelAddr:%Lx for channelBytes:%llu failed",
+ (unsigned long long) channelAddr,
+ (unsigned long long) channelBytes);
+ rc = NULL;
+ goto Away;
+ }
+ if (isServer) {
+ memset_io(pChan, 0, channelBytes);
+ if (!ULTRA_VBUS_CHANNEL_OK_SERVER(channelBytes, NULL)) {
+ ERRDRV("%s channel cannot be used", __func__);
+ uislib_iounmap(pChan);
+ rc = NULL;
+ goto Away;
+ }
+ ULTRA_VBUS_init_channel(pChan, channelBytes);
+ } else {
+ if (!ULTRA_VBUS_CHANNEL_OK_CLIENT(pChan, NULL)) {
+ ERRDRV("%s channel cannot be used", __func__);
+ uislib_iounmap(pChan);
+ rc = NULL;
+ goto Away;
+ }
+ }
+ rc = pChan;
+Away:
+ return rc;
+}
+
+static int
+create_bus(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ U32 busNo, deviceCount;
+ struct bus_info *tmp, *bus;
+ size_t size;
+
+ if (MaxBusCount == BusListCount) {
+ LOGERR("CONTROLVM_BUS_CREATE Failed: max buses:%d already created\n",
+ MaxBusCount);
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, MaxBusCount,
+ POSTCODE_SEVERITY_ERR);
+ return CONTROLVM_RESP_ERROR_MAX_BUSES;
+ }
+
+ busNo = msg->cmd.createBus.busNo;
+ deviceCount = msg->cmd.createBus.deviceCount;
+
+ POSTCODE_LINUX_4(BUS_CREATE_ENTRY_PC, busNo, deviceCount,
+ POSTCODE_SEVERITY_INFO);
+
+ size =
+ sizeof(struct bus_info) +
+ (deviceCount * sizeof(struct device_info *));
+ bus = kzalloc(size, GFP_ATOMIC);
+ if (!bus) {
+ LOGERR("CONTROLVM_BUS_CREATE Failed: kmalloc for bus failed.\n");
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ }
+
+ /* Currently by default, the bus Number is the GuestHandle.
+ * Configure Bus message can override this.
+ */
+ if (msg->hdr.Flags.testMessage) {
+ /* This implies we're the IOVM so set guest handle to 0... */
+ bus->guestHandle = 0;
+ bus->busNo = busNo;
+ bus->localVnic = 1;
+ } else
+ bus->busNo = bus->guestHandle = busNo;
+ sprintf(bus->name, "%d", (int) bus->busNo);
+ bus->deviceCount = deviceCount;
+ bus->device =
+ (struct device_info **) ((char *) bus + sizeof(struct bus_info));
+ bus->busInstGuid = msg->cmd.createBus.busInstGuid;
+ bus->busChannelBytes = 0;
+ bus->pBusChannel = NULL;
+
+ /* add bus to our bus list - but check for duplicates first */
+ read_lock(&BusListLock);
+ for (tmp = BusListHead; tmp; tmp = tmp->next) {
+ if (tmp->busNo == bus->busNo)
+ break;
+ }
+ read_unlock(&BusListLock);
+ if (tmp) {
+ /* found a bus already in the list with same busNo -
+ * reject add
+ */
+ LOGERR("CONTROLVM_BUS_CREATE Failed: bus %d already exists.\n",
+ bus->busNo);
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ POSTCODE_SEVERITY_ERR);
+ kfree(bus);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ if ((msg->cmd.createBus.channelAddr != 0)
+ && (msg->cmd.createBus.channelBytes != 0)) {
+ bus->busChannelBytes = msg->cmd.createBus.channelBytes;
+ bus->pBusChannel =
+ init_vbus_channel(msg->cmd.createBus.channelAddr,
+ msg->cmd.createBus.channelBytes,
+ msg->hdr.Flags.server);
+ }
+ /* the msg is bound for virtpci; send guest_msgs struct to callback */
+ if (!msg->hdr.Flags.server) {
+ struct guest_msgs cmd;
+ cmd.msgtype = GUEST_ADD_VBUS;
+ cmd.add_vbus.busNo = busNo;
+ cmd.add_vbus.chanptr = bus->pBusChannel;
+ cmd.add_vbus.deviceCount = deviceCount;
+ cmd.add_vbus.busTypeGuid = msg->cmd.createBus.busDataTypeGuid;
+ cmd.add_vbus.busInstGuid = msg->cmd.createBus.busInstGuid;
+ if (!VirtControlChanFunc) {
+ kfree(bus);
+ LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci callback not registered.");
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ POSTCODE_SEVERITY_ERR);
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+ if (!VirtControlChanFunc(&cmd)) {
+ kfree(bus);
+ LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci GUEST_ADD_VBUS returned error.");
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ POSTCODE_SEVERITY_ERR);
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+ }
+ create_bus_proc_entries(bus);
+
+ /* add bus at the head of our list */
+ write_lock(&BusListLock);
+ if (!BusListHead)
+ BusListHead = bus;
+ else {
+ bus->next = BusListHead;
+ BusListHead = bus;
+ }
+ BusListCount++;
+ write_unlock(&BusListLock);
+
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->busNo,
+ POSTCODE_SEVERITY_INFO);
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+destroy_bus(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ int i;
+ struct bus_info *bus, *prev = NULL;
+ U32 busNo;
+
+ busNo = msg->cmd.destroyBus.busNo;
+
+ /* find and delete the bus */
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; prev = bus, bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* found the bus - ensure that all device
+ * slots are NULL
+ */
+ for (i = 0; i < bus->deviceCount; i++) {
+ if (bus->device[i] != NULL) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: device %i attached to bus %d.",
+ i, busNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED;
+ }
+ }
+ read_unlock(&BusListLock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (!msg->hdr.Flags.server) {
+ struct guest_msgs cmd;
+ cmd.msgtype = GUEST_DEL_VBUS;
+ cmd.del_vbus.busNo = busNo;
+ if (!VirtControlChanFunc) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: virtpci callback not registered.");
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+ if (!VirtControlChanFunc(&cmd)) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: virtpci GUEST_DEL_VBUS returned error.");
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+ }
+ /* remove the bus from the list */
+ write_lock(&BusListLock);
+ if (prev) /* not at head */
+ prev->next = bus->next;
+ else
+ BusListHead = bus->next;
+ BusListCount--;
+ write_unlock(&BusListLock);
+ break;
+ }
+ }
+
+ if (!bus) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: failed to find bus %d.\n",
+ busNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ if (bus->proc_info) {
+ remove_proc_entry("info", bus->proc_dir);
+ bus->proc_info = NULL;
+ }
+ if (bus->proc_dir) {
+ remove_proc_entry(bus->name, uislib_proc_vbus_dir);
+ bus->proc_dir = NULL;
+ }
+ if (bus->pBusChannel) {
+ uislib_iounmap(bus->pBusChannel);
+ bus->pBusChannel = NULL;
+ }
+
+ kfree(bus);
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+create_device(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ struct device_info *dev;
+ struct bus_info *bus;
+ U32 busNo, devNo;
+ int result = CONTROLVM_RESP_SUCCESS;
+ U64 minSize = MIN_IO_CHANNEL_SIZE;
+ ReqHandlerInfo_t *pReqHandler;
+
+ busNo = msg->cmd.createDevice.busNo;
+ devNo = msg->cmd.createDevice.devNo;
+
+ POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+
+ dev = kzalloc(sizeof(struct device_info), GFP_ATOMIC);
+ if (!dev) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: kmalloc for dev failed.\n");
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ }
+
+ dev->channelTypeGuid = msg->cmd.createDevice.dataTypeGuid;
+ dev->intr = msg->cmd.createDevice.intr;
+ dev->channelAddr = msg->cmd.createDevice.channelAddr;
+ dev->busNo = busNo;
+ dev->devNo = devNo;
+ sema_init(&dev->interrupt_callback_lock, 1); /* unlocked */
+ sprintf(dev->devid, "vbus%u:dev%u", (unsigned) busNo, (unsigned) devNo);
+ /* map the channel memory for the device. */
+ if (msg->hdr.Flags.testMessage)
+ dev->chanptr = (void __iomem *)__va(dev->channelAddr);
+ else {
+ pReqHandler = ReqHandlerFind(dev->channelTypeGuid);
+ if (pReqHandler)
+ /* generic service handler registered for this
+ * channel
+ */
+ minSize = pReqHandler->min_channel_bytes;
+ if (minSize > msg->cmd.createDevice.channelBytes) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: channel size is too small, channel size:0x%lx, required size:0x%lx",
+ (ulong) msg->cmd.createDevice.channelBytes,
+ (ulong) minSize);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL;
+ goto Away;
+ }
+ dev->chanptr =
+ uislib_ioremap_cache(dev->channelAddr,
+ msg->cmd.createDevice.channelBytes);
+ if (!dev->chanptr) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: ioremap_cache of channelAddr:%Lx for channelBytes:%llu failed",
+ dev->channelAddr,
+ msg->cmd.createDevice.channelBytes);
+ result = CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ goto Away;
+ }
+ }
+ dev->devInstGuid = msg->cmd.createDevice.devInstGuid;
+ dev->channelBytes = msg->cmd.createDevice.channelBytes;
+
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* make sure the device number is valid */
+ if (devNo >= bus->deviceCount) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: device (%d) >= deviceCount (%d).",
+ devNo, bus->deviceCount);
+ result = CONTROLVM_RESP_ERROR_MAX_DEVICES;
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
+ devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ read_unlock(&BusListLock);
+ goto Away;
+ }
+ /* make sure this device is not already set */
+ if (bus->device[devNo]) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: device %d is already exists.",
+ devNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
+ devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ read_unlock(&BusListLock);
+ goto Away;
+ }
+ read_unlock(&BusListLock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (!msg->hdr.Flags.server) {
+ struct guest_msgs cmd;
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVhbaChannelProtocolGuid,
+ sizeof(GUID))) {
+ WAIT_FOR_VALID_GUID(((CHANNEL_HEADER
+ __iomem *) (dev->
+ chanptr))->
+ Type);
+ if (!ULTRA_VHBA_CHANNEL_OK_CLIENT
+ (dev->chanptr, NULL)) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed:[CLIENT]VHBA dev %d chan invalid.",
+ devNo);
+ POSTCODE_LINUX_4
+ (DEVICE_CREATE_FAILURE_PC,
+ devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
+ goto Away;
+ }
+ cmd.msgtype = GUEST_ADD_VHBA;
+ cmd.add_vhba.chanptr = dev->chanptr;
+ cmd.add_vhba.busNo = busNo;
+ cmd.add_vhba.deviceNo = devNo;
+ cmd.add_vhba.devInstGuid =
+ dev->devInstGuid;
+ cmd.add_vhba.intr = dev->intr;
+ } else
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVnicChannelProtocolGuid,
+ sizeof(GUID))) {
+ WAIT_FOR_VALID_GUID(((CHANNEL_HEADER
+ __iomem *) (dev->
+ chanptr))->
+ Type);
+ if (!ULTRA_VNIC_CHANNEL_OK_CLIENT
+ (dev->chanptr, NULL)) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: VNIC[CLIENT] dev %d chan invalid.",
+ devNo);
+ POSTCODE_LINUX_4
+ (DEVICE_CREATE_FAILURE_PC,
+ devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
+ goto Away;
+ }
+ cmd.msgtype = GUEST_ADD_VNIC;
+ cmd.add_vnic.chanptr = dev->chanptr;
+ cmd.add_vnic.busNo = busNo;
+ cmd.add_vnic.deviceNo = devNo;
+ cmd.add_vnic.devInstGuid =
+ dev->devInstGuid;
+ cmd.add_vhba.intr = dev->intr;
+ } else {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: unknown channelTypeGuid.\n");
+ POSTCODE_LINUX_4
+ (DEVICE_CREATE_FAILURE_PC, devNo,
+ busNo, POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ goto Away;
+ }
+
+ if (!VirtControlChanFunc) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci callback not registered.");
+ POSTCODE_LINUX_4
+ (DEVICE_CREATE_FAILURE_PC, devNo,
+ busNo, POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ goto Away;
+ }
+
+ if (!VirtControlChanFunc(&cmd)) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci GUEST_ADD_[VHBA||VNIC] returned error.");
+ POSTCODE_LINUX_4
+ (DEVICE_CREATE_FAILURE_PC, devNo,
+ busNo, POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ goto Away;
+ }
+ }
+ bus->device[devNo] = dev;
+ POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+ return CONTROLVM_RESP_SUCCESS;
+ }
+ }
+ read_unlock(&BusListLock);
+
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: failed to find bus %d.", busNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_BUS_INVALID;
+
+Away:
+ if (!msg->hdr.Flags.testMessage) {
+ uislib_iounmap(dev->chanptr);
+ dev->chanptr = NULL;
+ }
+
+ kfree(dev);
+ return result;
+}
+
+static int
+pause_device(CONTROLVM_MESSAGE *msg)
+{
+ U32 busNo, devNo;
+ struct bus_info *bus;
+ struct device_info *dev;
+ struct guest_msgs cmd;
+
+ busNo = msg->cmd.deviceChangeState.busNo;
+ devNo = msg->cmd.deviceChangeState.devNo;
+
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* make sure the device number is valid */
+ if (devNo >= bus->deviceCount) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: device(%d) >= deviceCount(%d).",
+ devNo, bus->deviceCount);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ }
+ /* make sure this device exists */
+ dev = bus->device[devNo];
+ if (!dev) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: device %d does not exist.",
+ devNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ read_unlock(&BusListLock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVhbaChannelProtocolGuid, sizeof(GUID))) {
+ cmd.msgtype = GUEST_PAUSE_VHBA;
+ cmd.pause_vhba.chanptr = dev->chanptr;
+ } else
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVnicChannelProtocolGuid,
+ sizeof(GUID))) {
+ cmd.msgtype = GUEST_PAUSE_VNIC;
+ cmd.pause_vnic.chanptr = dev->chanptr;
+ } else {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: unknown channelTypeGuid.\n");
+ return
+ CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ }
+
+ if (!VirtControlChanFunc) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: virtpci callback not registered.");
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+
+ if (!VirtControlChanFunc(&cmd)) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: virtpci GUEST_PAUSE_[VHBA||VNIC] returned error.");
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+ break;
+ }
+ }
+
+ if (!bus) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: bus %d does not exist",
+ busNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_BUS_INVALID;
+ }
+
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+resume_device(CONTROLVM_MESSAGE *msg)
+{
+ U32 busNo, devNo;
+ struct bus_info *bus;
+ struct device_info *dev;
+ struct guest_msgs cmd;
+
+ busNo = msg->cmd.deviceChangeState.busNo;
+ devNo = msg->cmd.deviceChangeState.devNo;
+
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* make sure the device number is valid */
+ if (devNo >= bus->deviceCount) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: device(%d) >= deviceCount(%d).",
+ devNo, bus->deviceCount);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ }
+ /* make sure this device exists */
+ dev = bus->device[devNo];
+ if (!dev) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: device %d does not exist.",
+ devNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ read_unlock(&BusListLock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (!memcmp(&dev->channelTypeGuid,
+ &UltraVhbaChannelProtocolGuid,
+ sizeof(GUID))) {
+ cmd.msgtype = GUEST_RESUME_VHBA;
+ cmd.resume_vhba.chanptr = dev->chanptr;
+ } else
+ if (!memcmp(&dev->channelTypeGuid,
+ &UltraVnicChannelProtocolGuid,
+ sizeof(GUID))) {
+ cmd.msgtype = GUEST_RESUME_VNIC;
+ cmd.resume_vnic.chanptr = dev->chanptr;
+ } else {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: unknown channelTypeGuid.\n");
+ return
+ CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ }
+
+ if (!VirtControlChanFunc) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: virtpci callback not registered.");
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+
+ if (!VirtControlChanFunc(&cmd)) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: virtpci GUEST_RESUME_[VHBA||VNIC] returned error.");
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+ break;
+ }
+ }
+
+ if (!bus) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: bus %d does not exist",
+ busNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_BUS_INVALID;
+ }
+
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ U32 busNo, devNo;
+ struct bus_info *bus;
+ struct device_info *dev;
+ struct guest_msgs cmd;
+
+ busNo = msg->cmd.destroyDevice.busNo;
+ devNo = msg->cmd.destroyDevice.devNo;
+
+ read_lock(&BusListLock);
+ LOGINF("destroy_device called for busNo=%u, devNo=%u", busNo, devNo);
+ for (bus = BusListHead; bus; bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* make sure the device number is valid */
+ if (devNo >= bus->deviceCount) {
+ LOGERR("CONTROLVM_DEVICE_DESTORY Failed: device(%d) >= deviceCount(%d).",
+ devNo, bus->deviceCount);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ }
+ /* make sure this device exists */
+ dev = bus->device[devNo];
+ if (!dev) {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: device %d does not exist.",
+ devNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ read_unlock(&BusListLock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVhbaChannelProtocolGuid, sizeof(GUID))) {
+ cmd.msgtype = GUEST_DEL_VHBA;
+ cmd.del_vhba.chanptr = dev->chanptr;
+ } else
+ if (!memcmp
+ (&dev->channelTypeGuid,
+ &UltraVnicChannelProtocolGuid,
+ sizeof(GUID))) {
+ cmd.msgtype = GUEST_DEL_VNIC;
+ cmd.del_vnic.chanptr = dev->chanptr;
+ } else {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: unknown channelTypeGuid.\n");
+ return
+ CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ }
+
+ if (!VirtControlChanFunc) {
+ LOGERR("CONTROLVM_DEVICE_DESTORY Failed: virtpci callback not registered.");
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+
+ if (!VirtControlChanFunc(&cmd)) {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: virtpci GUEST_DEL_[VHBA||VNIC] returned error.");
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+/* you must disable channel interrupts BEFORE you unmap the channel,
+ * because if you unmap first, there may still be some activity going
+ * on which accesses the channel and you will get a "unable to handle
+ * kernel paging request"
+ */
+ if (dev->polling) {
+ LOGINF("calling uislib_disable_channel_interrupts");
+ uislib_disable_channel_interrupts(busNo, devNo);
+ }
+ /* unmap the channel memory for the device. */
+ if (!msg->hdr.Flags.testMessage) {
+ LOGINF("destroy_device, doing iounmap");
+ uislib_iounmap(dev->chanptr);
+ }
+ kfree(dev);
+ bus->device[devNo] = NULL;
+ break;
+ }
+ }
+
+ if (!bus) {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: bus %d does not exist",
+ busNo);
+ read_unlock(&BusListLock);
+ return CONTROLVM_RESP_ERROR_BUS_INVALID;
+ }
+
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+init_chipset(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ MaxBusCount = msg->cmd.initChipset.busCount;
+ PlatformNumber = msg->cmd.initChipset.platformNumber;
+ PhysicalDataChan = 0;
+
+ /* We need to make sure we have our functions registered
+ * before processing messages. If we are a test vehicle the
+ * testMessage for init_chipset will be set. We can ignore the
+ * waits for the callbacks, since this will be manually entered
+ * from a user. If no testMessage is set, we will wait for the
+ * functions.
+ */
+ if (!msg->hdr.Flags.testMessage)
+ WAIT_ON_CALLBACK(VirtControlChanFunc);
+
+ chipset_inited = 1;
+ POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
+
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+stop_chipset(CONTROLVM_MESSAGE *msg, char *buf)
+{
+ /* Check that all buses and switches have been torn down and
+ * destroyed.
+ */
+ if (BusListHead) {
+ /* Buses still exist. */
+ LOGERR("CONTROLVM_CHIPSET_STOP: BusListHead is not NULL");
+ return CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_BUS;
+ }
+ if (BusListCount) {
+ /* BusListHead is NULL, but BusListCount != 0 */
+ LOGERR("CONTROLVM_CHIPSET_STOP: BusListCount != 0");
+ return CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_BUS;
+ }
+
+ /* Buses are shut down. */
+ return visorchipset_chipset_notready();
+}
+
+static int
+delete_bus_glue(U32 busNo)
+{
+ CONTROLVM_MESSAGE msg;
+
+ init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
+ msg.cmd.destroyBus.busNo = busNo;
+ if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("destroy_bus failed. busNo=0x%x\n", busNo);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+delete_device_glue(U32 busNo, U32 devNo)
+{
+ CONTROLVM_MESSAGE msg;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
+ msg.cmd.destroyDevice.busNo = busNo;
+ msg.cmd.destroyDevice.devNo = devNo;
+ if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("destroy_device failed. busNo=0x%x devNo=0x%x\n", busNo,
+ devNo);
+ return 0;
+ }
+ return 1;
+}
+
+int
+uislib_client_inject_add_bus(U32 busNo, GUID instGuid,
+ U64 channelAddr, ulong nChannelBytes)
+{
+ CONTROLVM_MESSAGE msg;
+
+ LOGINF("enter busNo=0x%x\n", busNo);
+ /* step 0: init the chipset */
+ POSTCODE_LINUX_3(CHIPSET_INIT_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
+
+ if (!chipset_inited) {
+ /* step: initialize the chipset */
+ init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0);
+ /* this change is needed so that console will come up
+ * OK even when the bus 0 create comes in late. If the
+ * bus 0 create is the first create, then the add_vnic
+ * will work fine, but if the bus 0 create arrives
+ * after number 4, then the add_vnic will fail, and the
+ * ultraboot will fail.
+ */
+ msg.cmd.initChipset.busCount = 23;
+ msg.cmd.initChipset.switchCount = 0;
+ if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("init_chipset failed.\n");
+ return 0;
+ }
+ LOGINF("chipset initialized\n");
+ POSTCODE_LINUX_3(CHIPSET_INIT_EXIT_PC, busNo,
+ POSTCODE_SEVERITY_INFO);
+ }
+
+ /* step 1: create a bus */
+ POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_WARNING);
+ init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
+ msg.cmd.createBus.busNo = busNo;
+ msg.cmd.createBus.deviceCount = 23; /* devNo+1; */
+ msg.cmd.createBus.channelAddr = channelAddr;
+ msg.cmd.createBus.channelBytes = nChannelBytes;
+ if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("create_bus failed.\n");
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_add_bus);
+
+
+int
+uislib_client_inject_del_bus(U32 busNo)
+{
+ return delete_bus_glue(busNo);
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_del_bus);
+
+int
+uislib_client_inject_pause_vhba(U32 busNo, U32 devNo)
+{
+ CONTROLVM_MESSAGE msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.deviceChangeState.busNo = busNo;
+ msg.cmd.deviceChangeState.devNo = devNo;
+ msg.cmd.deviceChangeState.state = SegmentStateStandby;
+ rc = pause_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VHBA pause_device failed. busNo=0x%x devNo=0x%x\n",
+ busNo, devNo);
+ return rc;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vhba);
+
+int
+uislib_client_inject_resume_vhba(U32 busNo, U32 devNo)
+{
+ CONTROLVM_MESSAGE msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.deviceChangeState.busNo = busNo;
+ msg.cmd.deviceChangeState.devNo = devNo;
+ msg.cmd.deviceChangeState.state = SegmentStateRunning;
+ rc = resume_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VHBA resume_device failed. busNo=0x%x devNo=0x%x\n",
+ busNo, devNo);
+ return rc;
+ }
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vhba);
+
+int
+uislib_client_inject_add_vhba(U32 busNo, U32 devNo,
+ U64 phys_chan_addr, U32 chan_bytes,
+ int is_test_addr, GUID instGuid,
+ struct InterruptInfo *intr)
+{
+ CONTROLVM_MESSAGE msg;
+
+ LOGINF(" enter busNo=0x%x devNo=0x%x\n", busNo, devNo);
+ /* chipset init'ed with bus bus has been previously created -
+ * Verify it still exists step 2: create the VHBA device on the
+ * bus
+ */
+ POSTCODE_LINUX_4(VHBA_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
+ if (is_test_addr)
+ /* signify that the physical channel address does NOT
+ * need to be ioremap()ed
+ */
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.createDevice.busNo = busNo;
+ msg.cmd.createDevice.devNo = devNo;
+ msg.cmd.createDevice.devInstGuid = instGuid;
+ if (intr)
+ msg.cmd.createDevice.intr = *intr;
+ else
+ memset(&msg.cmd.createDevice.intr, 0,
+ sizeof(struct InterruptInfo));
+ msg.cmd.createDevice.channelAddr = phys_chan_addr;
+ if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
+ LOGERR("wrong channel size.chan_bytes = 0x%x IO_CHANNEL_SIZE= 0x%x\n",
+ chan_bytes, (unsigned int) MIN_IO_CHANNEL_SIZE);
+ POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, chan_bytes,
+ MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ msg.cmd.createDevice.channelBytes = chan_bytes;
+ msg.cmd.createDevice.dataTypeGuid = UltraVhbaChannelProtocolGuid;
+ if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VHBA create_device failed.\n");
+ POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ POSTCODE_LINUX_4(VHBA_CREATE_SUCCESS_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_add_vhba);
+
+int
+uislib_client_inject_del_vhba(U32 busNo, U32 devNo)
+{
+ return delete_device_glue(busNo, devNo);
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_del_vhba);
+
+int
+uislib_client_inject_add_vnic(U32 busNo, U32 devNo,
+ U64 phys_chan_addr, U32 chan_bytes,
+ int is_test_addr, GUID instGuid,
+ struct InterruptInfo *intr)
+{
+ CONTROLVM_MESSAGE msg;
+
+ LOGINF(" enter busNo=0x%x devNo=0x%x\n", busNo, devNo);
+ /* chipset init'ed with bus bus has been previously created -
+ * Verify it still exists step 2: create the VNIC device on the
+ * bus
+ */
+ POSTCODE_LINUX_4(VNIC_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
+ if (is_test_addr)
+ /* signify that the physical channel address does NOT
+ * need to be ioremap()ed
+ */
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.createDevice.busNo = busNo;
+ msg.cmd.createDevice.devNo = devNo;
+ msg.cmd.createDevice.devInstGuid = instGuid;
+ if (intr)
+ msg.cmd.createDevice.intr = *intr;
+ else
+ memset(&msg.cmd.createDevice.intr, 0,
+ sizeof(struct InterruptInfo));
+ msg.cmd.createDevice.channelAddr = phys_chan_addr;
+ if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
+ LOGERR("wrong channel size.chan_bytes = 0x%x IO_CHANNEL_SIZE= 0x%x\n",
+ chan_bytes, (unsigned int) MIN_IO_CHANNEL_SIZE);
+ POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, chan_bytes,
+ MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ msg.cmd.createDevice.channelBytes = chan_bytes;
+ msg.cmd.createDevice.dataTypeGuid = UltraVnicChannelProtocolGuid;
+ if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VNIC create_device failed.\n");
+ POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ POSTCODE_LINUX_4(VNIC_CREATE_SUCCESS_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_add_vnic);
+
+int
+uislib_client_inject_pause_vnic(U32 busNo, U32 devNo)
+{
+ CONTROLVM_MESSAGE msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.deviceChangeState.busNo = busNo;
+ msg.cmd.deviceChangeState.devNo = devNo;
+ msg.cmd.deviceChangeState.state = SegmentStateStandby;
+ rc = pause_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VNIC pause_device failed. busNo=0x%x devNo=0x%x\n",
+ busNo, devNo);
+ return -1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vnic);
+
+int
+uislib_client_inject_resume_vnic(U32 busNo, U32 devNo)
+{
+ CONTROLVM_MESSAGE msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.deviceChangeState.busNo = busNo;
+ msg.cmd.deviceChangeState.devNo = devNo;
+ msg.cmd.deviceChangeState.state = SegmentStateRunning;
+ rc = resume_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("VNIC resume_device failed. busNo=0x%x devNo=0x%x\n",
+ busNo, devNo);
+ return -1;
+ }
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vnic);
+
+int
+uislib_client_inject_del_vnic(U32 busNo, U32 devNo)
+{
+ return delete_device_glue(busNo, devNo);
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_del_vnic);
+
+static int
+uislib_client_add_vnic(U32 busNo)
+{
+ BOOL busCreated = FALSE;
+ int devNo = 0; /* Default to 0, since only one device
+ * will be created for this bus... */
+ GUID dummyGuid = GUID0;
+ CONTROLVM_MESSAGE msg;
+
+ init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.createBus.busNo = busNo;
+ msg.cmd.createBus.deviceCount = 4;
+ msg.cmd.createBus.channelAddr = 0;
+ msg.cmd.createBus.channelBytes = 0;
+ if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("client create_bus failed");
+ return 0;
+ }
+ busCreated = TRUE;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.createDevice.busNo = busNo;
+ msg.cmd.createDevice.devNo = devNo;
+ msg.cmd.createDevice.devInstGuid = dummyGuid;
+ memset(&msg.cmd.createDevice.intr, 0, sizeof(struct InterruptInfo));
+ msg.cmd.createDevice.channelAddr = PhysicalDataChan;
+ msg.cmd.createDevice.channelBytes = MIN_IO_CHANNEL_SIZE;
+ msg.cmd.createDevice.dataTypeGuid = UltraVnicChannelProtocolGuid;
+ if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("client create_device failed");
+ goto AwayCleanup;
+ }
+
+ return 1;
+
+AwayCleanup:
+ if (busCreated) {
+ init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.destroyBus.busNo = busNo;
+ if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
+ LOGERR("client destroy_bus failed.\n");
+ }
+
+ return 0;
+} /* end uislib_client_add_vnic */
+EXPORT_SYMBOL_GPL(uislib_client_add_vnic);
+
+static int
+uislib_client_delete_vnic(U32 busNo)
+{
+ int devNo = 0; /* Default to 0, since only one device
+ * will be created for this bus... */
+ CONTROLVM_MESSAGE msg;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.destroyDevice.busNo = busNo;
+ msg.cmd.destroyDevice.devNo = devNo;
+ if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ /* Don't error exit - try to see if bus can be destroyed... */
+ LOGERR("client destroy_device failed.\n");
+ }
+
+ init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.destroyBus.busNo = busNo;
+ if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
+ LOGERR("client destroy_bus failed.\n");
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uislib_client_delete_vnic);
+/* end client_delete_vnic */
+
+void *
+uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln)
+{
+ /* __GFP_NORETRY means "ok to fail", meaning kmalloc() can
+ * return NULL. If you do NOT specify __GFP_NORETRY, Linux
+ * will go to extreme measures to get memory for you (like,
+ * invoke oom killer), which will probably cripple the system.
+ */
+ void *p = kmem_cache_alloc(cur_pool, GFP_ATOMIC | __GFP_NORETRY);
+ if (p == NULL) {
+ LOGERR("uislib_malloc failed to alloc uiscmdrsp @%s:%d",
+ fn, ln);
+ return NULL;
+ }
+ return p;
+}
+EXPORT_SYMBOL_GPL(uislib_cache_alloc);
+
+void
+uislib_cache_free(struct kmem_cache *cur_pool, void *p, char *fn, int ln)
+{
+ if (p == NULL) {
+ LOGERR("uislib_free NULL pointer @%s:%d", fn, ln);
+ return;
+ }
+ kmem_cache_free(cur_pool, p);
+}
+EXPORT_SYMBOL_GPL(uislib_cache_free);
+
+/*****************************************************/
+/* proc filesystem callback functions */
+/*****************************************************/
+
+static ssize_t
+vnic_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int action = 0xffff, busNo = 0, i, result = 0;
+ char buf[4];
+ char direction;
+/* GUID guid; */
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("echo > /proc/uislib/vnic copy_from_user ****FAILED.\n");
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%d%c", &action, &direction);
+ if (i != 2) {
+ LOGERR("unable to parse vnic proc parameters.\n");
+ return -EFAULT;
+ }
+
+ if ((direction != '-') && (direction != '+')) {
+ LOGERR("unable to determine whether to add or delete vnic\n");
+ return -EFAULT;
+ }
+
+ /* if (i < 1), i.e., if we didn't even read the action field,
+ * then action will default to 0xffff and the code below will
+ * fall through the switch and print usage.
+ */
+ switch (action) {
+ case 0:
+ /* call client method... */
+ busNo = 0; /* All client drivers use bus value of 0... */
+ if (direction == '+')
+ result = uislib_client_add_vnic(busNo);
+ else
+ result = uislib_client_delete_vnic(busNo);
+ if (!result) {
+ LOGERR("echo 0%c > /proc/uislib/vnic failed (client end)",
+ direction);
+ return -EFAULT;
+ }
+ return count;
+
+ default:
+ break;
+ }
+
+ LOGERR("USAGE: echo <action><direction (up/down)> > /proc/uislib/vnic");
+ LOGERR(" ");
+ LOGERR("Client Syntax");
+ LOGERR("-------------");
+ LOGERR("0+ ==> add vnic");
+ LOGERR("0- ==> delete vnic");
+ LOGERR(" ");
+ return count;
+} /* end vnic_proc_write */
+
+static ssize_t
+chipset_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int i, action = 0xffff;
+ char buf[4];
+ CONTROLVM_MESSAGE msg;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ memset(&msg, 0, sizeof(CONTROLVM_MESSAGE));
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user ****FAILED.\n");
+ return -EFAULT;
+ }
+
+ if (chipset_inited) {
+ LOGINF("Chipset already initialized\n");
+ return -EFAULT;
+ }
+ i = sscanf(buf, "%x", &action);
+
+ /* if (i < 1), i.e., if we didn't even read the action field,
+ * then action will default to 0xffff and the code below will
+ * fall through the switch and print usage.
+ */
+ switch (action) {
+ case 1:
+ /* GUEST */
+ /* step: initialize the chipset */
+ init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0);
+ msg.hdr.Flags.testMessage = 0;
+ msg.cmd.initChipset.busCount = 23;
+ msg.cmd.initChipset.switchCount = 23;
+
+ if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("init_chipset failed.\n");
+ return 0;
+ }
+ return 1;
+ case 2:
+ /* BOTH */
+ init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0);
+ msg.hdr.Flags.testMessage = 1;
+ msg.cmd.initChipset.busCount = 23;
+ msg.cmd.initChipset.switchCount = 23;
+
+ if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("init_chipset failed.\n");
+ return 0;
+ }
+ return 1;
+
+ default:
+ break;
+ }
+
+ LOGERR("usage: 1 ==> init_chipset client\n");
+ LOGERR("usage: 2 ==> init_chipset test\n");
+ return -EFAULT;
+}
+
+#define PLINE(...) uisutil_add_proc_line_ex(&tot, buff, \
+ buff_len, __VA_ARGS__)
+
+static int
+info_proc_read_helper(char **buff, int *buff_len)
+{
+ int i, tot = 0;
+ struct bus_info *bus;
+
+ if (PLINE("\nBuses:\n") < 0)
+ goto err_done;
+
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+
+ if (PLINE(" bus=0x%p, busNo=%d, deviceCount=%d\n",
+ bus, bus->busNo, bus->deviceCount) < 0)
+ goto err_done_unlock;
+
+
+ if (PLINE(" Devices:\n") < 0)
+ goto err_done_unlock;
+
+ for (i = 0; i < bus->deviceCount; i++) {
+ if (bus->device[i]) {
+ if (PLINE(" busNo %d, device[%i]: 0x%p, chanptr=0x%p, swtch=0x%p\n",
+ bus->busNo, i, bus->device[i],
+ bus->device[i]->chanptr,
+ bus->device[i]->swtch) < 0)
+ goto err_done_unlock;
+
+ if (PLINE(" first_busy_cnt=%llu, moved_to_tail_cnt=%llu, last_on_list_cnt=%llu\n",
+ bus->device[i]->first_busy_cnt,
+ bus->device[i]->moved_to_tail_cnt,
+ bus->device[i]->last_on_list_cnt) < 0)
+ goto err_done_unlock;
+ }
+ }
+ }
+ read_unlock(&BusListLock);
+
+ if (PLINE("UisUtils_Registered_Services: %d\n",
+ atomic_read(&UisUtils_Registered_Services)) < 0)
+ goto err_done;
+ if (PLINE("cycles_before_wait %llu wait_cycles:%llu\n",
+ cycles_before_wait, wait_cycles) < 0)
+ goto err_done;
+ if (PLINE("tot_wakeup_cnt %llu:tot_wait_cnt %llu:tot_schedule_cnt %llu\n",
+ tot_wakeup_cnt, tot_wait_cnt, tot_schedule_cnt) < 0)
+ goto err_done;
+ if (PLINE("en_smart_wakeup %d\n", en_smart_wakeup) < 0)
+ goto err_done;
+ if (PLINE("tot_moved_to_tail_cnt %llu\n", tot_moved_to_tail_cnt) < 0)
+ goto err_done;
+
+ return tot;
+
+err_done_unlock:
+ read_unlock(&BusListLock);
+err_done:
+ return -1;
+}
+
+static ssize_t
+info_proc_read(struct file *file, char __user *buf, size_t len, loff_t *offset)
+{
+ char *temp;
+ int totalBytes = 0;
+ int remaining_bytes = PROC_READ_BUFFER_SIZE;
+
+/* *start = buf; */
+ if (ProcReadBuffer == NULL) {
+ DBGINF("ProcReadBuffer == NULL; allocating buffer.\n.");
+ ProcReadBuffer = vmalloc(PROC_READ_BUFFER_SIZE);
+
+ if (ProcReadBuffer == NULL) {
+ LOGERR("failed to allocate buffer to provide proc data.\n");
+ return -ENOMEM;
+ }
+ }
+
+ temp = ProcReadBuffer;
+
+ if ((*offset == 0) || (!ProcReadBufferValid)) {
+ DBGINF("calling info_proc_read_helper.\n");
+ /* if the read fails, then -1 will be returned */
+ totalBytes = info_proc_read_helper(&temp, &remaining_bytes);
+ ProcReadBufferValid = 1;
+ } else
+ totalBytes = strlen(ProcReadBuffer);
+
+ return simple_read_from_buffer(buf, len, offset,
+ ProcReadBuffer, totalBytes);
+}
+
+static ssize_t
+platformnumber_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int length = 0;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ length = sprintf(vbuf, "%d\n", PlatformNumber);
+
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+#ifdef UISLIB_TEST_PROC
+
+/* proc/uislib/vbus/<x>/info */
+static int
+proc_info_vbus_show(struct seq_file *m, void *v)
+{
+ struct bus_info *bus = m->private;
+ int i, devInfoCount, x;
+ char buf[999];
+
+ if (bus == NULL)
+ return 0;
+ seq_printf(m, "Client device / client driver info for %s partition (vbus #%d):\n",
+ bus->partitionName, bus->busNo);
+ if ((bus->busChannelBytes == 0) || (bus->pBusChannel == NULL))
+ return 0;
+ devInfoCount =
+ (bus->busChannelBytes -
+ sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL)) /
+ sizeof(ULTRA_VBUS_DEVICEINFO);
+ x = VBUSCHANNEL_devInfoToStringBuffer(&bus->pBusChannel->ChpInfo, buf,
+ sizeof(buf) - 1, -1);
+ buf[x] = '\0';
+ seq_printf(m, "%s", buf);
+ x = VBUSCHANNEL_devInfoToStringBuffer(&bus->pBusChannel->BusInfo,
+ buf, sizeof(buf) - 1, -1);
+ buf[x] = '\0';
+ seq_printf(m, "%s", buf);
+ for (i = 0; i < devInfoCount; i++) {
+ x = VBUSCHANNEL_devInfoToStringBuffer(&bus->pBusChannel->
+ DevInfo[i], buf,
+ sizeof(buf) - 1, i);
+ if (x > 0) {
+ buf[x] = '\0';
+ seq_printf(m, "%s", buf);
+ }
+ }
+ return 0;
+}
+
+static ssize_t
+bus_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int server_flag = 0;
+ int i, action = 0xffff, result;
+ char buf[16];
+ CONTROLVM_MESSAGE msg;
+ U32 busNo, deviceCount;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ memset(&msg, 0, sizeof(CONTROLVM_MESSAGE));
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("echo > /proc/uislib/bus: copy_from_user ****FAILED.");
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%x-%d-%d", &action, &busNo, &deviceCount);
+
+ /* if (i < 1), i.e., if we didn't even read the action field,
+ * then action will default to 0xffff and the code below will
+ * fall through the switch and print usage.
+ */
+ switch (action) {
+ case 0:
+ /* destroy a bus */
+ if (i != 2)
+ break;
+ init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, server_flag);
+ msg.cmd.destroyBus.busNo = busNo;
+
+ result = destroy_bus(&msg, NULL);
+
+ if (result != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("echo 0-%d > /proc/uislib/bus {CONTROLVM_BUS_DESTROY Failed} Result(%d)",
+ busNo, result);
+ return -EFAULT;
+ }
+ return count;
+ case 1:
+ /* create a bus */
+ if (i != 3)
+ break;
+ init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, server_flag);
+ msg.cmd.createBus.busNo = busNo;
+ msg.cmd.createBus.deviceCount = deviceCount;
+
+ result = create_bus(&msg, NULL);
+
+ if (result != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("echo 1-%d-%d > /proc/uislib/bus {CONTROLVM_BUS_CREATE Failed} Result(%d)",
+ busNo, deviceCount, result);
+ return -EFAULT;
+ }
+
+ return count;
+ default:
+ break;
+ }
+
+ LOGERR("USAGE: echo <action>-<busNo>... > /proc/uislib/bus");
+ LOGERR(" ");
+ LOGERR("Destruct Syntax ControlVM Message Id");
+ LOGERR("--------------- ---------------------");
+ LOGERR("0-<busNo> ==> CONTROLVM_BUS_DESTROY");
+ LOGERR(" ");
+ LOGERR("Construct Syntax ControlVM Message Id");
+ LOGERR("----------------------- -------------------- ");
+ LOGERR("1-<busNo>-<deviceCount> ==> CONTROLVM_BUS_CREATE");
+
+ return -EFAULT;
+}
+
+static ssize_t
+uislib_proc_read_writeonly(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t
+dev_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int server_flag = 0;
+ CONTROLVM_MESSAGE msg;
+ U32 busNo, devNo;
+ char buf[32];
+ unsigned int chanptr;
+ int type, i, action = 0xffff, result;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("echo > /proc/uislib/device: copy_from_user ****FAILED.");
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%x-%d-%d-%x-%d",
+ &action, &busNo, &devNo, &chanptr, &type);
+
+ switch (action) {
+ case 0:
+ if (i != 3)
+ break;
+
+ /* destroy a device */
+ init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, server_flag);
+ msg.cmd.destroyDevice.busNo = busNo;
+ msg.cmd.destroyDevice.devNo = devNo;
+
+ result = destroy_device(&msg, NULL);
+
+ if (result != CONTROLVM_RESP_SUCCESS) {
+ LOGERR("echo 0-%d-%d > /proc/uislib/device {CONTROLVM_DEVICE_DESTROY Failed} Result(%d)",
+ busNo, devNo, result);
+ return -EFAULT;
+ }
+
+ return count;
+
+ case 1:
+ if (i != 5)
+ break;
+
+ /* create a device */
+ init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, server_flag);
+ msg.cmd.createDevice.busNo = busNo;
+ msg.cmd.createDevice.devNo = devNo;
+ msg.cmd.createDevice.channelAddr = __pa(chanptr);
+ msg.cmd.createDevice.channelBytes = MIN_IO_CHANNEL_SIZE;
+
+ if (type == 0)
+ msg.cmd.createDevice.dataTypeGuid =
+ UltraVhbaChannelProtocolGuid;
+ else if (type == 1)
+ msg.cmd.createDevice.dataTypeGuid =
+ UltraVnicChannelProtocolGuid;
+ else {
+ LOGERR("echo 1-%d-%d-%x-<type> > /proc/uislib/devce failed: invalid device type %d.",
+ busNo, devNo, chanptr, type);
+ return -EFAULT;
+ }
+
+ result = create_device(&msg, NULL);
+
+ if (result != CONTROLVM_RESP_SUCCESS) {
+ if (type == 0)
+ LOGERR("echo 1-%d-%d-%x-0 > /proc/uislib/device {CONTROLVM_DEVICE_CREATE[vHBA] Failed} Result(%d)",
+ busNo, devNo, chanptr, result);
+ else
+ LOGERR("echo 1-%d-%d-%x-1 > /proc/uislib/device {CONTROLVM_DEVICE_CREATE[vNIC] Failed} Result(%d)",
+ busNo, devNo, chanptr, result);
+ return -EFAULT;
+ }
+
+ default:
+ break;
+ }
+
+ LOGERR("USAGE: echo <action>-<busNo>-<devNo>... > /proc/uislib/device");
+ LOGERR(" ");
+ LOGERR("Destruct Syntax ControlVM Message Id");
+ LOGERR("----------------- ------------------------");
+ LOGERR("0-<busNo>-<devNo> ==> CONTROLVM_DEVICE_DESTROY");
+ LOGERR(" ");
+ LOGERR("Construct Syntax ControlVM Message Id");
+ LOGERR
+ ("---------------------------------- ----------------------- ");
+ LOGERR
+ ("1-<busNo>-<devNo>-<chanptr>-<type> ==> CONTROLVM_DEVICE_CREATE");
+ LOGERR(" <type = 0>: vHBA");
+ LOGERR(" <type = 1>: vNIC");
+ LOGERR(" ");
+
+ return -EFAULT;
+}
+
+static ssize_t
+cycles_before_wait_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[16];
+
+#define CYCLES_BEFORE_WAIT_USE_ERROR { \
+ LOGERR("Incorrect Call Home Input.\n"); \
+ pr_info("Please pass Call Home Event Parameters in the form:\n"); \
+ pr_info("EventID Category Type[parameter1][parameter2][parameter3][parameter4][parameter5][parameter6]\n"); \
+ return -EFAULT; \
+}
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (count == 0)
+ CYCLES_BEFORE_WAIT_USE_ERROR;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed.\n");
+ return -EFAULT;
+ }
+ buf[count - 1] = '\0'; /* Replace the LF at the end of the
+ * input with a NULL */
+ /* Pull out the cycles_before_wait must be decimal integer */
+ if (sscanf(buf, "%lld", &cycles_before_wait) != 1)
+ CYCLES_BEFORE_WAIT_USE_ERROR;
+
+ return count;
+}
+
+static ssize_t
+reset_counts_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[16];
+ unsigned long long new_value;
+ struct bus_info *bus;
+ int i;
+
+#define RESET_COUNTS_USE_ERROR { \
+ LOGERR("Incorrect reset_counts Input.\n"); \
+ pr_info("Please pass the new value for the counters:\n"); \
+ pr_info("e.g. echo 0 > reset_counts\n"); \
+ return -EFAULT; \
+ }
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (count == 0)
+ RESET_COUNTS_USE_ERROR;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed.\n");
+ return -EFAULT;
+ }
+ buf[count - 1] = '\0'; /* Replace the LF at the end of the
+ * input with a NULL */
+ /* Pull out the reset_counts must be decimal integer */
+ if (sscanf(buf, "%llu", &new_value) != 1)
+ RESET_COUNTS_USE_ERROR;
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+
+ for (i = 0; i < bus->deviceCount; i++) {
+ if (bus->device[i]) {
+ bus->device[i]->first_busy_cnt = new_value;
+ bus->device[i]->moved_to_tail_cnt = new_value;
+ bus->device[i]->last_on_list_cnt = new_value;
+ }
+ }
+ }
+ read_unlock(&BusListLock);
+ tot_moved_to_tail_cnt = new_value;
+ tot_wait_cnt = new_value;
+ tot_wakeup_cnt = new_value;
+ tot_schedule_cnt = new_value;
+ return count;
+}
+
+static ssize_t
+smart_wakeup_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[16];
+ int new_value;
+
+#define SMART_WAKEUP_USE_ERROR { \
+ LOGERR("Incorrect smart_wakeup Input 0 disables smart_wakeup, and 1 enables smart_wakeup.\n"); \
+ pr_info("echo 0 > smart_wakeup\n"); \
+ pr_info("echo 1 > smart_wakeup\n"); \
+ return -EFAULT; \
+ }
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (count == 0)
+ SMART_WAKEUP_USE_ERROR;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed.\n");
+ return -EFAULT;
+ }
+ buf[count - 1] = '\0'; /* Replace the LF at the end of the
+ * input with a NULL */
+ /* Pull out the smart_wakeup must be decimal integer */
+ if (sscanf(buf, "%d", &new_value) != 1)
+ SMART_WAKEUP_USE_ERROR;
+ en_smart_wakeup = new_value;
+ return count;
+}
+
+static ssize_t
+test_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int i, action = 0xffff;
+ char buf[16];
+ CONTROLVM_MESSAGE msg;
+ S64 vrtc_offset;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ memset(&msg, 0, sizeof(CONTROLVM_MESSAGE));
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user ****FAILED.\n");
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%x", &action);
+
+ /* if (i < 1), i.e., if we didn't even read the action field,
+ * then action will default to 0xffff and the code below will
+ * fall through the switch and print usage. */
+ switch (action) {
+ case 6:
+ msg.hdr.Id = CONTROLVM_CHIPSET_STOP;
+ msg.hdr.Flags.responseExpected = 1;
+ stop_chipset(&msg, NULL);
+ break;
+ case 7:
+ vrtc_offset = 0;
+ LOGERR("about to issue QUERY vrtc_offset=%LX", vrtc_offset);
+ vrtc_offset = Issue_VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET();
+ LOGERR("result is vrtc_offset=%LX", vrtc_offset);
+ break;
+ case 8:
+ vrtc_offset = 60;
+ LOGERR("about to increase physical time by 0x%LX seconds",
+ vrtc_offset);
+ vrtc_offset = Issue_VMCALL_UPDATE_PHYSICAL_TIME(vrtc_offset);
+ break;
+ case 9:
+ vrtc_offset = -60;
+ LOGERR("about to decrease physical time by 0x%LX seconds",
+ vrtc_offset);
+ vrtc_offset = Issue_VMCALL_UPDATE_PHYSICAL_TIME(vrtc_offset);
+ break;
+ default:
+ LOGERR("usage: 6 for CHIPSET_STOP\n");
+ LOGERR(" 7 for VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET()\n");
+ LOGERR(" 8 for VMCALL_UPDATE_PHYSICAL_TIME(60)\n");
+ LOGERR(" 9 for VMCALL_UPDATE_PHYSICAL_TIME(-60)\n");
+ return -EFAULT;
+ break;
+ }
+ return count;
+}
+
+#endif /* UISLIB_TEST_PROC */
+static struct device_info *
+find_dev(U32 busNo, U32 devNo)
+{
+ struct bus_info *bus;
+ struct device_info *dev = NULL;
+
+ read_lock(&BusListLock);
+ for (bus = BusListHead; bus; bus = bus->next) {
+ if (bus->busNo == busNo) {
+ /* make sure the device number is valid */
+ if (devNo >= bus->deviceCount) {
+ LOGERR("%s bad busNo, devNo=%d,%d",
+ __func__,
+ (int) (busNo), (int) (devNo));
+ goto Away;
+ }
+ dev = bus->device[devNo];
+ if (!dev)
+ LOGERR("%s bad busNo, devNo=%d,%d",
+ __func__,
+ (int) (busNo), (int) (devNo));
+ goto Away;
+ }
+ }
+Away:
+ read_unlock(&BusListLock);
+ return dev;
+}
+
+/* This thread calls the "interrupt" function for each device that has
+ * enabled such using uislib_enable_channel_interrupts(). The "interrupt"
+ * function typically reads and processes the devices's channel input
+ * queue. This thread repeatedly does this, until the thread is told to stop
+ * (via uisthread_stop()). Sleeping rules:
+ * - If we have called the "interrupt" function for all devices, and all of
+ * them have reported "nothing processed" (returned 0), then we will go to
+ * sleep for a maximum of POLLJIFFIES_NORMAL jiffies.
+ * - If anyone calls uislib_force_channel_interrupt(), the above jiffy
+ * sleep will be interrupted, and we will resume calling the "interrupt"
+ * function for all devices.
+ * - The list of devices is dynamically re-ordered in order to
+ * attempt to preserve fairness. Whenever we spin thru the list of
+ * devices and call the dev->interrupt() function, if we find
+ * devices which report that there is still more work to do, the
+ * the first such device we find is moved to the end of the device
+ * list. This ensures that extremely busy devices don't starve out
+ * less-busy ones.
+ *
+ */
+static int
+Process_Incoming(void *v)
+{
+ unsigned long long cur_cycles, old_cycles, idle_cycles, delta_cycles;
+ struct list_head *new_tail = NULL;
+ int i;
+ UIS_DAEMONIZE("dev_incoming");
+ for (i = 0; i < 16; i++) {
+ old_cycles = get_cycles();
+ wait_event_timeout(Wakeup_Polling_Device_Channels,
+ 0, POLLJIFFIES_NORMAL);
+ cur_cycles = get_cycles();
+ if (wait_cycles == 0) {
+ wait_cycles = (cur_cycles - old_cycles);
+ } else {
+ if (wait_cycles < (cur_cycles - old_cycles))
+ wait_cycles = (cur_cycles - old_cycles);
+ }
+ }
+ LOGINF("wait_cycles=%llu", wait_cycles);
+ cycles_before_wait = wait_cycles;
+ idle_cycles = 0;
+ Go_Polling_Device_Channels = 0;
+ while (1) {
+ struct list_head *lelt, *tmp;
+ struct device_info *dev = NULL;
+
+ /* poll each channel for input */
+ LOCKSEM_UNINTERRUPTIBLE(&Lock_Polling_Device_Channels);
+ new_tail = NULL;
+ list_for_each_safe(lelt, tmp, &List_Polling_Device_Channels) {
+ int rc = 0;
+ dev = list_entry(lelt, struct device_info,
+ list_polling_device_channels);
+ LOCKSEM_UNINTERRUPTIBLE(&dev->interrupt_callback_lock);
+ if (dev->interrupt)
+ rc = dev->interrupt(dev->interrupt_context);
+ else
+ continue;
+ UNLOCKSEM(&dev->interrupt_callback_lock);
+ if (rc) {
+ /* dev->interrupt returned, but there
+ * is still more work to do.
+ * Reschedule work to occur as soon as
+ * possible. */
+ idle_cycles = 0;
+ if (new_tail == NULL) {
+ dev->first_busy_cnt++;
+ if (!
+ (list_is_last
+ (lelt,
+ &List_Polling_Device_Channels))) {
+ new_tail = lelt;
+ dev->moved_to_tail_cnt++;
+ } else
+ dev->last_on_list_cnt++;
+ }
+
+ }
+ if (Incoming_ThreadInfo.should_stop)
+ break;
+ }
+ if (new_tail != NULL) {
+ tot_moved_to_tail_cnt++;
+ list_move_tail(new_tail, &List_Polling_Device_Channels);
+ }
+ UNLOCKSEM(&Lock_Polling_Device_Channels);
+ cur_cycles = get_cycles();
+ delta_cycles = cur_cycles - old_cycles;
+ old_cycles = cur_cycles;
+
+ /* At this point, we have scanned thru all of the
+ * channels, and at least one of the following is true:
+ * - there is no input waiting on any of the channels
+ * - we have received a signal to stop this thread
+ */
+ if (Incoming_ThreadInfo.should_stop)
+ break;
+ if (en_smart_wakeup == 0xFF) {
+ LOGINF("en_smart_wakeup set to 0xff, to force exiting process_incoming");
+ break;
+ }
+ /* wait for POLLJIFFIES_NORMAL jiffies, or until
+ * someone wakes up Wakeup_Polling_Device_Channels,
+ * whichever comes first only do a wait when we have
+ * been idle for cycles_before_wait cycles.
+ */
+ if (idle_cycles > cycles_before_wait) {
+ Go_Polling_Device_Channels = 0;
+ tot_wait_cnt++;
+ wait_event_timeout(Wakeup_Polling_Device_Channels,
+ Go_Polling_Device_Channels,
+ POLLJIFFIES_NORMAL);
+ Go_Polling_Device_Channels = 1;
+ } else {
+ tot_schedule_cnt++;
+ schedule();
+ idle_cycles = idle_cycles + delta_cycles;
+ }
+ }
+ DBGINF("exiting.\n");
+ complete_and_exit(&Incoming_ThreadInfo.has_stopped, 0);
+}
+
+static BOOL
+Initialize_incoming_thread(void)
+{
+ if (Incoming_Thread_Started)
+ return TRUE;
+ if (!uisthread_start(&Incoming_ThreadInfo,
+ &Process_Incoming, NULL, "dev_incoming")) {
+ LOGERR("uisthread_start Initialize_incoming_thread ****FAILED");
+ return FALSE;
+ }
+ Incoming_Thread_Started = TRUE;
+ return TRUE;
+}
+
+/* Add a new device/channel to the list being processed by
+ * Process_Incoming().
+ * <interrupt> - indicates the function to call periodically.
+ * <interrupt_context> - indicates the data to pass to the <interrupt>
+ * function.
+ */
+void
+uislib_enable_channel_interrupts(U32 busNo, U32 devNo,
+ int (*interrupt)(void *),
+ void *interrupt_context)
+{
+ struct device_info *dev;
+ dev = find_dev(busNo, devNo);
+ if (!dev) {
+ LOGERR("%s busNo=%d, devNo=%d", __func__, (int) (busNo),
+ (int) (devNo));
+ return;
+ }
+ LOCKSEM_UNINTERRUPTIBLE(&Lock_Polling_Device_Channels);
+ Initialize_incoming_thread();
+ dev->interrupt = interrupt;
+ dev->interrupt_context = interrupt_context;
+ dev->polling = TRUE;
+ list_add_tail(&(dev->list_polling_device_channels),
+ &List_Polling_Device_Channels);
+ UNLOCKSEM(&Lock_Polling_Device_Channels);
+}
+EXPORT_SYMBOL_GPL(uislib_enable_channel_interrupts);
+
+/* Remove a device/channel from the list being processed by
+ * Process_Incoming().
+ */
+void
+uislib_disable_channel_interrupts(U32 busNo, U32 devNo)
+{
+ struct device_info *dev;
+ dev = find_dev(busNo, devNo);
+ if (!dev) {
+ LOGERR("%s busNo=%d, devNo=%d", __func__, (int) (busNo),
+ (int) (devNo));
+ return;
+ }
+ LOCKSEM_UNINTERRUPTIBLE(&Lock_Polling_Device_Channels);
+ list_del(&dev->list_polling_device_channels);
+ dev->polling = FALSE;
+ dev->interrupt = NULL;
+ UNLOCKSEM(&Lock_Polling_Device_Channels);
+}
+EXPORT_SYMBOL_GPL(uislib_disable_channel_interrupts);
+
+static void
+do_wakeup_polling_device_channels(struct work_struct *dummy)
+{
+ if (!Go_Polling_Device_Channels) {
+ Go_Polling_Device_Channels = 1;
+ wake_up(&Wakeup_Polling_Device_Channels);
+ }
+}
+
+static DECLARE_WORK(Work_wakeup_polling_device_channels,
+ do_wakeup_polling_device_channels);
+
+/* Call this function when you want to send a hint to Process_Incoming() that
+ * your device might have more requests.
+ */
+void
+uislib_force_channel_interrupt(U32 busNo, U32 devNo)
+{
+ if (en_smart_wakeup == 0)
+ return;
+ if (Go_Polling_Device_Channels)
+ return;
+ /* The point of using schedule_work() instead of just doing
+ * the work inline is to force a slight delay before waking up
+ * the Process_Incoming() thread.
+ */
+ tot_wakeup_cnt++;
+ schedule_work(&Work_wakeup_polling_device_channels);
+}
+EXPORT_SYMBOL_GPL(uislib_force_channel_interrupt);
+
+/*****************************************************/
+/* Module Init & Exit functions */
+/*****************************************************/
+
+static int __init
+uislib_mod_init(void)
+{
+
+ LOGINF("MONITORAPIS");
+
+ LOGINF("sizeof(struct uiscmdrsp):%lu bytes\n",
+ (ulong) sizeof(struct uiscmdrsp));
+ LOGINF("sizeof(struct phys_info):%lu\n",
+ (ulong) sizeof(struct phys_info));
+ LOGINF("sizeof(uiscmdrsp_scsi):%lu\n",
+ (ulong) sizeof(struct uiscmdrsp_scsi));
+ LOGINF("sizeof(uiscmdrsp_net):%lu\n",
+ (ulong) sizeof(struct uiscmdrsp_net));
+ LOGINF("sizeof(CONTROLVM_MESSAGE):%lu bytes\n",
+ (ulong) sizeof(CONTROLVM_MESSAGE));
+ LOGINF("sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL):%lu bytes\n",
+ (ulong) sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL));
+ LOGINF("sizeof(CHANNEL_HEADER):%lu bytes\n",
+ (ulong) sizeof(CHANNEL_HEADER));
+ LOGINF("sizeof(ULTRA_IO_CHANNEL_PROTOCOL):%lu bytes\n",
+ (ulong) sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
+ LOGINF("SIZEOF_CMDRSP:%lu bytes\n", SIZEOF_CMDRSP);
+ LOGINF("SIZEOF_PROTOCOL:%lu bytes\n", SIZEOF_PROTOCOL);
+
+ /* initialize global pointers to NULL */
+ BusListHead = NULL;
+ BusListCount = MaxBusCount = 0;
+ rwlock_init(&BusListLock);
+ VirtControlChanFunc = NULL;
+
+ /* Issue VMCALL_GET_CONTROLVM_ADDR to get CtrlChanPhysAddr and
+ * then map this physical address to a virtual address. */
+ POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ /* create the proc entries for the channels */
+ uislib_proc_dir = proc_mkdir(DIR_PROC_ENTRY, NULL);
+ /* (e.g., for /proc/uislib/vbus/<x>/info) */
+ uislib_proc_vbus_dir = proc_mkdir(DIR_VBUS_PROC_ENTRY, uislib_proc_dir);
+
+ vnic_proc_entry = proc_create(VNIC_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_vnic_fops);
+ SET_PROC_OWNER(vnic_proc_entry, THIS_MODULE);
+
+ /* for testing purposes only, create the proc entries for
+ * enqueuing Control Channel messages */
+ chipset_proc_entry =
+ proc_create(CHIPSET_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_chipset_fops);
+ SET_PROC_OWNER(chipset_proc_entry, THIS_MODULE);
+
+ info_proc_entry = proc_create(INFO_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_info_fops);
+ SET_PROC_OWNER(info_proc_entry, THIS_MODULE);
+
+ platformnumber_proc_entry =
+ proc_create(PLATFORMNUMBER_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_platformnumber_fops);
+ SET_PROC_OWNER(platformnumberinfo_proc_entry, THIS_MODULE);
+
+ cycles_before_wait_proc_entry =
+ proc_create(CYCLES_BEFORE_WAIT_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_cycles_before_wait_fops);
+ SET_PROC_OWNER(cycles_before_wait_proc_entry, THIS_MODULE);
+
+ reset_counts_proc_entry =
+ proc_create(RESET_COUNTS_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_reset_counts_fops);
+ SET_PROC_OWNER(reset_counts_proc_entry, THIS_MODULE);
+
+ smart_wakeup_proc_entry =
+ proc_create(SMART_WAKEUP_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_smart_wakeup_fops);
+ SET_PROC_OWNER(smart_wakeup_proc_entry, THIS_MODULE);
+
+#ifdef UISLIB_TEST_PROC
+ test_proc_entry = proc_create(TEST_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_test_fops);
+ SET_PROC_OWNER(test_proc_entry, THIS_MODULE);
+
+ bus_proc_entry = proc_create(BUS_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_bus_fops);
+ SET_PROC_OWNER(bus_proc_entry, THIS_MODULE);
+
+ dev_proc_entry = proc_create(DEV_PROC_ENTRY_FN, 0, uislib_proc_dir,
+ &proc_dev_fops);
+ SET_PROC_OWNER(dev_proc_entry, THIS_MODULE);
+#endif /* UISLIB_TEST_PROC */
+ POSTCODE_LINUX_3(DRIVER_EXIT_PC, 0, POSTCODE_SEVERITY_INFO);
+ return 0;
+}
+
+static void __exit
+uislib_mod_exit(void)
+{
+ if (disable_proc_entry)
+ remove_proc_entry(DISABLE_PROC_ENTRY_FN, uislib_proc_dir);
+ if (cycles_before_wait_proc_entry)
+ remove_proc_entry(CYCLES_BEFORE_WAIT_PROC_ENTRY_FN,
+ uislib_proc_dir);
+ if (reset_counts_proc_entry)
+ remove_proc_entry(RESET_COUNTS_PROC_ENTRY_FN, uislib_proc_dir);
+ if (smart_wakeup_proc_entry)
+ remove_proc_entry(SMART_WAKEUP_PROC_ENTRY_FN, uislib_proc_dir);
+ if (ctrlchan_proc_entry)
+ remove_proc_entry(CTRLCHAN_PROC_ENTRY_FN, uislib_proc_dir);
+ if (pmem_proc_entry)
+ remove_proc_entry(PMEM_PROC_ENTRY_FN, uislib_proc_dir);
+ if (info_proc_entry)
+ remove_proc_entry(INFO_PROC_ENTRY_FN, uislib_proc_dir);
+ if (switch_proc_entry)
+ remove_proc_entry(SWITCH_PROC_ENTRY_FN, uislib_proc_dir);
+ if (extport_proc_entry)
+ remove_proc_entry(EXTPORT_PROC_ENTRY_FN, uislib_proc_dir);
+ if (platformnumber_proc_entry)
+ remove_proc_entry(PLATFORMNUMBER_PROC_ENTRY_FN,
+ uislib_proc_dir);
+ if (bus_proc_entry)
+ remove_proc_entry(BUS_PROC_ENTRY_FN, uislib_proc_dir);
+ if (dev_proc_entry)
+ remove_proc_entry(DEV_PROC_ENTRY_FN, uislib_proc_dir);
+ if (vnic_proc_entry)
+ remove_proc_entry(VNIC_PROC_ENTRY_FN, uislib_proc_dir);
+ if (chipset_proc_entry)
+ remove_proc_entry(CHIPSET_PROC_ENTRY_FN, uislib_proc_dir);
+ if (uislib_proc_vbus_dir)
+ remove_proc_entry(DIR_VBUS_PROC_ENTRY, uislib_proc_dir);
+ if (uislib_proc_dir)
+ remove_proc_entry(DIR_PROC_ENTRY, NULL);
+
+ if (ProcReadBuffer) {
+ vfree(ProcReadBuffer);
+ ProcReadBuffer = NULL;
+ }
+
+ DBGINF("goodbye.\n");
+ return;
+}
+
+module_init(uislib_mod_init);
+module_exit(uislib_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Usha Srinivasan");
+MODULE_ALIAS("uislib");
+ /* this is extracted during depmod and kept in modules.dep */
diff --git a/drivers/staging/unisys/uislib/uisqueue.c b/drivers/staging/unisys/uislib/uisqueue.c
new file mode 100644
index 000000000000..40598ff1f4f2
--- /dev/null
+++ b/drivers/staging/unisys/uislib/uisqueue.c
@@ -0,0 +1,160 @@
+/* uisqueue.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* @ALL_INSPECTED */
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "uisutils.h"
+
+#include "chanstub.h"
+
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages */
+#define CURRENT_FILE_PC UISLIB_PC_uisqueue_c
+#define __MYFILE__ "uisqueue.c"
+
+#define CHECK_CACHE_ALIGN 0
+
+/*****************************************************/
+/* Exported functions */
+/*****************************************************/
+unsigned long long
+uisqueue_InterlockedOr(unsigned long long __iomem *Target,
+ unsigned long long Set)
+{
+ unsigned long long i;
+ unsigned long long j;
+
+ j = readq(Target);
+ do {
+ i = j;
+ j = uislibcmpxchg64((__force unsigned long long *)Target,
+ i, i | Set, sizeof(*(Target)));
+
+ } while (i != j);
+
+ return j;
+}
+EXPORT_SYMBOL_GPL(uisqueue_InterlockedOr);
+
+unsigned long long
+uisqueue_InterlockedAnd(unsigned long long __iomem *Target,
+ unsigned long long Set)
+{
+ unsigned long long i;
+ unsigned long long j;
+
+ j = readq(Target);
+ do {
+ i = j;
+ j = uislibcmpxchg64((__force unsigned long long *)Target,
+ i, i & Set, sizeof(*(Target)));
+
+ } while (i != j);
+
+ return j;
+}
+EXPORT_SYMBOL_GPL(uisqueue_InterlockedAnd);
+
+static U8
+do_locked_client_insert(struct uisqueue_info *queueinfo,
+ unsigned int whichqueue,
+ void *pSignal,
+ spinlock_t *lock,
+ unsigned char issueInterruptIfEmpty,
+ U64 interruptHandle, U8 *channelId)
+{
+ unsigned long flags;
+ unsigned char queueWasEmpty;
+ unsigned int locked = 0;
+ unsigned int acquired = 0;
+ U8 rc = 0;
+
+ spin_lock_irqsave(lock, flags);
+ locked = 1;
+
+ if (!ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(queueinfo->chan, channelId, NULL))
+ goto Away;
+
+ acquired = 1;
+
+ queueWasEmpty = visor_signalqueue_empty(queueinfo->chan, whichqueue);
+ if (!visor_signal_insert(queueinfo->chan, whichqueue, pSignal))
+ goto Away;
+ ULTRA_CHANNEL_CLIENT_RELEASE_OS(queueinfo->chan, channelId, NULL);
+ acquired = 0;
+ spin_unlock_irqrestore(lock, flags);
+ locked = 0;
+
+ queueinfo->packets_sent++;
+
+ rc = 1;
+Away:
+ if (acquired) {
+ ULTRA_CHANNEL_CLIENT_RELEASE_OS(queueinfo->chan, channelId,
+ NULL);
+ acquired = 0;
+ }
+ if (locked) {
+ spin_unlock_irqrestore((spinlock_t *) lock, flags);
+ locked = 0;
+ }
+
+ return rc;
+}
+
+int
+uisqueue_put_cmdrsp_with_lock_client(struct uisqueue_info *queueinfo,
+ struct uiscmdrsp *cmdrsp,
+ unsigned int whichqueue,
+ void *insertlock,
+ unsigned char issueInterruptIfEmpty,
+ U64 interruptHandle,
+ char oktowait, U8 *channelId)
+{
+ while (!do_locked_client_insert(queueinfo, whichqueue, cmdrsp,
+ (spinlock_t *) insertlock,
+ issueInterruptIfEmpty,
+ interruptHandle, channelId)) {
+ if (oktowait != OK_TO_WAIT) {
+ LOGERR("****FAILED visor_signal_insert failed; cannot wait; insert aborted\n");
+ return 0; /* failed to queue */
+ }
+ /* try again */
+ LOGERR("****FAILED visor_signal_insert failed; waiting to try again\n");
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(10));
+ }
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uisqueue_put_cmdrsp_with_lock_client);
+
+/* uisqueue_get_cmdrsp gets the cmdrsp entry at the head of the queue
+ * returns NULL if queue is empty */
+int
+uisqueue_get_cmdrsp(struct uisqueue_info *queueinfo,
+ void *cmdrsp, unsigned int whichqueue)
+{
+ if (!visor_signal_remove(queueinfo->chan, whichqueue, cmdrsp))
+ return 0;
+
+ queueinfo->packets_received++;
+
+ return 1; /* Success */
+}
+EXPORT_SYMBOL_GPL(uisqueue_get_cmdrsp);
diff --git a/drivers/staging/unisys/uislib/uisthread.c b/drivers/staging/unisys/uislib/uisthread.c
new file mode 100644
index 000000000000..782b06aad56d
--- /dev/null
+++ b/drivers/staging/unisys/uislib/uisthread.c
@@ -0,0 +1,85 @@
+/* uisthread.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* @ALL_INSPECTED */
+#include <asm/processor.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include "uniklog.h"
+#include "uisutils.h"
+#include "uisthread.h"
+
+#define KILL(a, b, c) kill_pid(find_vpid(a), b, c)
+
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC UISLIB_PC_uisthread_c
+#define __MYFILE__ "uisthread.c"
+
+/*****************************************************/
+/* Exported functions */
+/*****************************************************/
+
+/* returns 0 for failure, 1 for success */
+int
+uisthread_start(struct uisthread_info *thrinfo,
+ int (*threadfn)(void *), void *thrcontext, char *name)
+{
+ thrinfo->should_stop = 0;
+ /* used to stop the thread */
+ init_completion(&thrinfo->has_stopped);
+ thrinfo->task = kthread_create(threadfn, thrcontext, name, NULL);
+ if (IS_ERR(thrinfo->task)) {
+ thrinfo->id = 0;
+ return 0; /* failure */
+ }
+ thrinfo->id = thrinfo->task->pid;
+ wake_up_process(thrinfo->task);
+ LOGINF("started thread pid:%d\n", thrinfo->id);
+ return 1;
+
+}
+EXPORT_SYMBOL_GPL(uisthread_start);
+
+void
+uisthread_stop(struct uisthread_info *thrinfo)
+{
+ int ret;
+ int stopped = 0;
+ if (thrinfo->id == 0)
+ return; /* thread not running */
+
+ LOGINF("uisthread_stop stopping id:%d\n", thrinfo->id);
+ thrinfo->should_stop = 1;
+ ret = KILL(thrinfo->id, SIGHUP, 1);
+ if (ret) {
+ LOGERR("unable to signal thread %d\n", ret);
+ } else {
+ /* give up if the thread has NOT died in 1 minute */
+ if (wait_for_completion_timeout(&thrinfo->has_stopped, 60 * HZ))
+ stopped = 1;
+ else
+ LOGERR("timed out trying to signal thread\n");
+ }
+ if (stopped) {
+ LOGINF("uisthread_stop stopped id:%d\n", thrinfo->id);
+ thrinfo->id = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(uisthread_stop);
diff --git a/drivers/staging/unisys/uislib/uisutils.c b/drivers/staging/unisys/uislib/uisutils.c
new file mode 100644
index 000000000000..3178f75e1ebe
--- /dev/null
+++ b/drivers/staging/unisys/uislib/uisutils.c
@@ -0,0 +1,350 @@
+/* uisutils.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <commontypes.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include "uniklog.h"
+#include "uisutils.h"
+#include "version.h"
+#include "vbushelper.h"
+#include "guidutils.h"
+#include <linux/skbuff.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/highmem.h>
+#endif
+
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC UISLIB_PC_uisutils_c
+#define __MYFILE__ "uisutils.c"
+
+/* exports */
+atomic_t UisUtils_Registered_Services = ATOMIC_INIT(0);
+ /* num registrations via
+ * uisctrl_register_req_handler() or
+ * uisctrl_register_req_handler_ex() */
+
+
+/*****************************************************/
+/* Utility functions */
+/*****************************************************/
+
+int
+uisutil_add_proc_line_ex(int *total, char **buffer, int *buffer_remaining,
+ char *format, ...)
+{
+ va_list args;
+ int len;
+
+ DBGINF("buffer = 0x%p : *buffer = 0x%p.\n", buffer, *buffer);
+ va_start(args, format);
+ len = vsnprintf(*buffer, *buffer_remaining, format, args);
+ if (len >= *buffer_remaining) {
+ *buffer += *buffer_remaining;
+ *total += *buffer_remaining;
+ *buffer_remaining = 0;
+ LOGERR("bytes remaining is too small!\n");
+ return -1;
+ }
+ *buffer_remaining -= len;
+ *buffer += len;
+ *total += len;
+ return len;
+}
+EXPORT_SYMBOL_GPL(uisutil_add_proc_line_ex);
+
+int
+uisctrl_register_req_handler(int type, void *fptr,
+ ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo)
+{
+ LOGINF("type = %d, fptr = 0x%p.\n", type, fptr);
+
+ switch (type) {
+ case 2:
+ if (fptr) {
+ if (!VirtControlChanFunc)
+ atomic_inc(&UisUtils_Registered_Services);
+ VirtControlChanFunc = fptr;
+ } else {
+ if (VirtControlChanFunc)
+ atomic_dec(&UisUtils_Registered_Services);
+ VirtControlChanFunc = NULL;
+ }
+ break;
+
+ default:
+ LOGERR("invalid type %d.\n", type);
+ return 0;
+ }
+ if (chipset_DriverInfo)
+ BusDeviceInfo_Init(chipset_DriverInfo,
+ "chipset", "uislib",
+ VERSION, NULL, __DATE__, __TIME__);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uisctrl_register_req_handler);
+
+int
+uisctrl_register_req_handler_ex(GUID switchTypeGuid,
+ const char *switch_type_name,
+ int (*controlfunc)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*Server_Channel_Ok)(unsigned long
+ channelBytes),
+ int (*Server_Channel_Init)
+ (void *x, unsigned char *clientStr,
+ U32 clientStrLen, U64 bytes),
+ ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo)
+{
+ char s[99];
+ ReqHandlerInfo_t *pReqHandlerInfo;
+ int rc = 0; /* assume failure */
+ LOGINF("type=%s, controlfunc=0x%p.\n",
+ GUID_format1(&switchTypeGuid, s), controlfunc);
+ if (!controlfunc) {
+ LOGERR("%s: controlfunc must be supplied\n",
+ GUID_format1(&switchTypeGuid, s));
+ goto Away;
+ }
+ if (!Server_Channel_Ok) {
+ LOGERR("%s: Server_Channel_Ok must be supplied\n",
+ GUID_format1(&switchTypeGuid, s));
+ goto Away;
+ }
+ if (!Server_Channel_Init) {
+ LOGERR("%s: Server_Channel_Init must be supplied\n",
+ GUID_format1(&switchTypeGuid, s));
+ goto Away;
+ }
+ pReqHandlerInfo = ReqHandlerAdd(switchTypeGuid,
+ switch_type_name,
+ controlfunc,
+ min_channel_bytes,
+ Server_Channel_Ok, Server_Channel_Init);
+ if (!pReqHandlerInfo) {
+ LOGERR("failed to add %s to server list\n",
+ GUID_format1(&switchTypeGuid, s));
+ goto Away;
+ }
+
+ atomic_inc(&UisUtils_Registered_Services);
+ rc = 1; /* success */
+Away:
+ if (rc) {
+ if (chipset_DriverInfo)
+ BusDeviceInfo_Init(chipset_DriverInfo,
+ "chipset", "uislib",
+ VERSION, NULL,
+ __DATE__, __TIME__);
+ } else
+ LOGERR("failed to register type %s.\n",
+ GUID_format1(&switchTypeGuid, s));
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(uisctrl_register_req_handler_ex);
+
+int
+uisctrl_unregister_req_handler_ex(GUID switchTypeGuid)
+{
+ char s[99];
+ int rc = 0; /* assume failure */
+ LOGINF("type=%s.\n", GUID_format1(&switchTypeGuid, s));
+ if (ReqHandlerDel(switchTypeGuid) < 0) {
+ LOGERR("failed to remove %s from server list\n",
+ GUID_format1(&switchTypeGuid, s));
+ goto Away;
+ }
+ atomic_dec(&UisUtils_Registered_Services);
+ rc = 1; /* success */
+Away:
+ if (!rc)
+ LOGERR("failed to unregister type %s.\n",
+ GUID_format1(&switchTypeGuid, s));
+ return rc;
+}
+EXPORT_SYMBOL_GPL(uisctrl_unregister_req_handler_ex);
+
+/*
+ * unsigned int uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx,
+ * void *skb_in,
+ * unsigned int firstfraglen,
+ * unsigned int frags_max,
+ * struct phys_info frags[])
+ *
+ * calling_ctx - input - a string that is displayed to show
+ * who called * this func
+ * void *skb_in - skb whose frag info we're copying type is hidden so we
+ * don't need to include skbbuff in uisutils.h which is
+ * included in non-networking code.
+ * unsigned int firstfraglen - input - length of first fragment in skb
+ * unsigned int frags_max - input - max len of frags array
+ * struct phys_info frags[] - output - frags array filled in on output
+ * return value indicates number of
+ * entries filled in frags
+ */
+unsigned int
+uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx, void *skb_in,
+ unsigned int firstfraglen,
+ unsigned int frags_max,
+ struct phys_info frags[])
+{
+ unsigned int count = 0, ii, size, offset = 0, numfrags;
+ struct sk_buff *skb = skb_in;
+
+ numfrags = skb_shinfo(skb)->nr_frags;
+
+ while (firstfraglen) {
+ if (count == frags_max) {
+ LOGERR("%s frags array too small: max:%d count:%d\n",
+ calling_ctx, frags_max, count);
+ return -1; /* failure */
+ }
+ frags[count].pi_pfn =
+ page_to_pfn(virt_to_page(skb->data + offset));
+ frags[count].pi_off =
+ (unsigned long) (skb->data + offset) & PI_PAGE_MASK;
+ size =
+ min(firstfraglen,
+ (unsigned int) (PI_PAGE_SIZE - frags[count].pi_off));
+ /* can take smallest of firstfraglen(what's left) OR
+ * bytes left in the page
+ */
+ frags[count].pi_len = size;
+ firstfraglen -= size;
+ offset += size;
+ count++;
+ }
+ if (numfrags) {
+ if ((count + numfrags) > frags_max) {
+ LOGERR("**** FAILED %s frags array too small: max:%d count+nr_frags:%d\n",
+ calling_ctx, frags_max, count + numfrags);
+ return -1; /* failure */
+ }
+
+ for (ii = 0; ii < numfrags; ii++) {
+ count = add_physinfo_entries(page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[ii])), /* pfn */
+ skb_shinfo(skb)->frags[ii].
+ page_offset,
+ skb_shinfo(skb)->frags[ii].
+ size, count, frags_max,
+ frags);
+ if (count == 0) {
+ LOGERR("**** FAILED to add physinfo entries\n");
+ return -1; /* failure */
+ }
+ }
+ }
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *skbinlist;
+ int c;
+ for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
+ skbinlist = skbinlist->next) {
+
+ c = uisutil_copy_fragsinfo_from_skb("recursive",
+ skbinlist,
+ skbinlist->len -
+ skbinlist->data_len,
+ frags_max - count,
+ &frags[count]);
+ if (c == -1) {
+ LOGERR("**** FAILED recursive call failed\n");
+ return -1;
+ }
+ count += c;
+ }
+ }
+ return count;
+}
+EXPORT_SYMBOL_GPL(uisutil_copy_fragsinfo_from_skb);
+
+static LIST_HEAD(ReqHandlerInfo_list); /* list of ReqHandlerInfo_t */
+static DEFINE_SPINLOCK(ReqHandlerInfo_list_lock);
+
+ReqHandlerInfo_t *
+ReqHandlerAdd(GUID switchTypeGuid,
+ const char *switch_type_name,
+ int (*controlfunc)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*Server_Channel_Ok)(unsigned long channelBytes),
+ int (*Server_Channel_Init)
+ (void *x, unsigned char *clientStr, U32 clientStrLen, U64 bytes))
+{
+ ReqHandlerInfo_t *rc = NULL;
+
+ rc = kzalloc(sizeof(*rc), GFP_ATOMIC);
+ if (!rc)
+ return NULL;
+ rc->switchTypeGuid = switchTypeGuid;
+ rc->controlfunc = controlfunc;
+ rc->min_channel_bytes = min_channel_bytes;
+ rc->Server_Channel_Ok = Server_Channel_Ok;
+ rc->Server_Channel_Init = Server_Channel_Init;
+ if (switch_type_name)
+ strncpy(rc->switch_type_name, switch_type_name,
+ sizeof(rc->switch_type_name) - 1);
+ spin_lock(&ReqHandlerInfo_list_lock);
+ list_add_tail(&(rc->list_link), &ReqHandlerInfo_list);
+ spin_unlock(&ReqHandlerInfo_list_lock);
+
+ return rc;
+}
+
+ReqHandlerInfo_t *
+ReqHandlerFind(GUID switchTypeGuid)
+{
+ struct list_head *lelt, *tmp;
+ ReqHandlerInfo_t *entry = NULL;
+ spin_lock(&ReqHandlerInfo_list_lock);
+ list_for_each_safe(lelt, tmp, &ReqHandlerInfo_list) {
+ entry = list_entry(lelt, ReqHandlerInfo_t, list_link);
+ if (memcmp
+ (&entry->switchTypeGuid, &switchTypeGuid,
+ sizeof(GUID)) == 0) {
+ spin_unlock(&ReqHandlerInfo_list_lock);
+ return entry;
+ }
+ }
+ spin_unlock(&ReqHandlerInfo_list_lock);
+ return NULL;
+}
+
+int
+ReqHandlerDel(GUID switchTypeGuid)
+{
+ struct list_head *lelt, *tmp;
+ ReqHandlerInfo_t *entry = NULL;
+ int rc = -1;
+ spin_lock(&ReqHandlerInfo_list_lock);
+ list_for_each_safe(lelt, tmp, &ReqHandlerInfo_list) {
+ entry = list_entry(lelt, ReqHandlerInfo_t, list_link);
+ if (memcmp
+ (&entry->switchTypeGuid, &switchTypeGuid,
+ sizeof(GUID)) == 0) {
+ list_del(lelt);
+ kfree(entry);
+ rc++;
+ }
+ }
+ spin_unlock(&ReqHandlerInfo_list_lock);
+ return rc;
+}
diff --git a/drivers/staging/unisys/virthba/Kconfig b/drivers/staging/unisys/virthba/Kconfig
new file mode 100644
index 000000000000..c0d7986e78cb
--- /dev/null
+++ b/drivers/staging/unisys/virthba/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys virthba configuration
+#
+
+config UNISYS_VIRTHBA
+ tristate "Unisys virthba driver"
+ depends on UNISYSSPAR && UNISYS_VISORCHIPSET && UNISYS_CHANNELSTUB && UNISYS_UISLIB && UNISYS_VIRTPCI && SCSI
+ ---help---
+ If you say Y here, you will enable the Unisys virthba driver.
+
diff --git a/drivers/staging/unisys/virthba/Makefile b/drivers/staging/unisys/virthba/Makefile
new file mode 100644
index 000000000000..632b1c08b975
--- /dev/null
+++ b/drivers/staging/unisys/virthba/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for Unisys virthba
+#
+
+obj-$(CONFIG_UNISYS_VIRTHBA) += virthba.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/uislib
+ccflags-y += -Idrivers/staging/unisys/timskmod
+ccflags-y += -Idrivers/staging/unisys/visorchipset
+ccflags-y += -Idrivers/staging/unisys/virtpci
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/virthba/virthba.c b/drivers/staging/unisys/virthba/virthba.c
new file mode 100644
index 000000000000..817b11dfa19c
--- /dev/null
+++ b/drivers/staging/unisys/virthba/virthba.c
@@ -0,0 +1,1823 @@
+/* virthba.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#define EXPORT_SYMTAB
+
+/* if you want to turn on some debugging of write device data or read
+ * device data, define these two undefs. You will probably want to
+ * customize the code which is here since it was written assuming
+ * reading and writing a specific data file df.64M.txt which is a
+ * 64Megabyte file created by Art Nilson using a scritp I wrote called
+ * cr_test_data.pl. The data file consists of 256 byte lines of text
+ * which start with an 8 digit sequence number, a colon, and then
+ * letters after that */
+
+#undef DBGINF
+
+#include <linux/kernel.h>
+#ifdef CONFIG_MODVERSIONS
+#include <config/modversions.h>
+#endif
+
+#include "uniklog.h"
+#include "diagnostics/appos_subsystems.h"
+#include "uisutils.h"
+#include "uisqueue.h"
+#include "uisthread.h"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <asm/param.h>
+#include <linux/proc_fs.h>
+#include <linux/types.h>
+
+#include "virthba.h"
+#include "virtpci.h"
+#include "visorchipset.h"
+#include "version.h"
+#include "guestlinuxdebug.h"
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC VIRT_HBA_PC_virthba_c
+#define __MYFILE__ "virthba.c"
+
+/* NOTE: L1_CACHE_BYTES >=128 */
+#define DEVICE_ATTRIBUTE struct device_attribute
+
+/*****************************************************/
+/* Forward declarations */
+/*****************************************************/
+static int virthba_probe(struct virtpci_dev *dev,
+ const struct pci_device_id *id);
+static void virthba_remove(struct virtpci_dev *dev);
+static int virthba_abort_handler(struct scsi_cmnd *scsicmd);
+static int virthba_bus_reset_handler(struct scsi_cmnd *scsicmd);
+static int virthba_device_reset_handler(struct scsi_cmnd *scsicmd);
+static int virthba_host_reset_handler(struct scsi_cmnd *scsicmd);
+static const char *virthba_get_info(struct Scsi_Host *shp);
+static int virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+static int virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
+ void (*virthba_cmnd_done)(struct scsi_cmnd *));
+
+#ifdef DEF_SCSI_QCMD
+DEF_SCSI_QCMD(virthba_queue_command)
+#else
+#define virthba_queue_command virthba_queue_command_lck
+#endif
+
+
+static int virthba_slave_alloc(struct scsi_device *scsidev);
+static int virthba_slave_configure(struct scsi_device *scsidev);
+static void virthba_slave_destroy(struct scsi_device *scsidev);
+static int process_incoming_rsps(void *);
+static int virthba_serverup(struct virtpci_dev *virtpcidev);
+static int virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state);
+static void doDiskAddRemove(struct work_struct *work);
+static void virthba_serverdown_complete(struct work_struct *work);
+
+static ssize_t info_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static ssize_t rqwu_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t enable_ints_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t enable_ints_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+
+/*****************************************************/
+/* Globals */
+/*****************************************************/
+
+static int rsltq_wait_usecs = 4000; /* Default 4ms */
+static unsigned int MaxBuffLen;
+
+/* Module options */
+static char *virthba_options = "NONE";
+
+static const struct pci_device_id virthba_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_VIRTHBA)},
+ {0},
+};
+
+/* export virthba_id_table */
+MODULE_DEVICE_TABLE(pci, virthba_id_table);
+
+static struct workqueue_struct *virthba_serverdown_workqueue;
+
+static struct virtpci_driver virthba_driver = {
+ .name = "uisvirthba",
+ .version = VERSION,
+ .vertag = NULL,
+ .build_date = __DATE__,
+ .build_time = __TIME__,
+ .id_table = virthba_id_table,
+ .probe = virthba_probe,
+ .remove = virthba_remove,
+ .resume = virthba_serverup,
+ .suspend = virthba_serverdown
+};
+
+/* The Send and Recive Buffers of the IO Queue may both be full */
+#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS*2)
+#define INTERRUPT_VECTOR_MASK 0x3F
+
+struct scsipending {
+ char cmdtype; /* Type of pointer that is being stored */
+ void *sent; /* The Data being tracked */
+ /* struct scsi_cmnd *type for virthba_queue_command */
+ /* struct uiscmdrsp *type for management commands */
+};
+
+#define VIRTHBA_ERROR_COUNT 30
+#define IOS_ERROR_THRESHOLD 1000
+struct virtdisk_info {
+ U32 valid;
+ U32 channel, id, lun; /* Disk Path */
+ atomic_t ios_threshold;
+ atomic_t error_count;
+ struct virtdisk_info *next;
+};
+/* Each Scsi_Host has a host_data area that contains this struct. */
+struct virthba_info {
+ struct Scsi_Host *scsihost;
+ struct virtpci_dev *virtpcidev;
+ struct list_head dev_info_list;
+ struct chaninfo chinfo;
+ struct InterruptInfo intr; /* use recvInterrupt info to receive
+ interrupts when IOs complete */
+ int interrupt_vector;
+ struct scsipending pending[MAX_PENDING_REQUESTS]; /* Tracks the requests
+ that have been */
+ /* forwarded to the IOVM and haven't returned yet */
+ unsigned int nextinsert; /* Start search for next pending
+ free slot here */
+ spinlock_t privlock;
+ bool serverdown;
+ bool serverchangingstate;
+ unsigned long long acquire_failed_cnt;
+ unsigned long long interrupts_rcvd;
+ unsigned long long interrupts_notme;
+ unsigned long long interrupts_disabled;
+ struct work_struct serverdown_completion;
+ U64 __iomem *flags_addr;
+ atomic_t interrupt_rcvd;
+ wait_queue_head_t rsp_queue;
+ struct virtdisk_info head;
+};
+
+/* Work Data for DARWorkQ */
+struct diskaddremove {
+ U8 add; /* 0-remove, 1-add */
+ struct Scsi_Host *shost; /* Scsi Host for this virthba instance */
+ U32 channel, id, lun; /* Disk Path */
+ struct diskaddremove *next;
+};
+
+#define virtpci_dev_to_virthba_virthba_get_info(d) \
+ container_of(d, struct virthba_info, virtpcidev)
+
+static DEVICE_ATTRIBUTE *virthba_shost_attrs[];
+static struct scsi_host_template virthba_driver_template = {
+ .name = "Unisys Virtual HBA",
+ .proc_name = "uisvirthba",
+ .info = virthba_get_info,
+ .ioctl = virthba_ioctl,
+ .queuecommand = virthba_queue_command,
+ .eh_abort_handler = virthba_abort_handler,
+ .eh_device_reset_handler = virthba_device_reset_handler,
+ .eh_bus_reset_handler = virthba_bus_reset_handler,
+ .eh_host_reset_handler = virthba_host_reset_handler,
+ .shost_attrs = virthba_shost_attrs,
+
+#define VIRTHBA_MAX_CMNDS 128
+ .can_queue = VIRTHBA_MAX_CMNDS,
+ .sg_tablesize = 64, /* largest number of address/length pairs */
+ .this_id = -1,
+ .slave_alloc = virthba_slave_alloc,
+ .slave_configure = virthba_slave_configure,
+ .slave_destroy = virthba_slave_destroy,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+struct virthba_devices_open {
+ struct virthba_info *virthbainfo;
+};
+
+static const struct file_operations proc_info_fops = {
+ .read = info_proc_read,
+};
+
+static const struct file_operations proc_rqwu_fops = {
+ .write = rqwu_proc_write,
+};
+
+static const struct file_operations proc_enable_ints_fops = {
+ .read = enable_ints_read,
+ .write = enable_ints_write,
+};
+
+
+#define VIRTHBASOPENMAX 1
+/* array of open devices maintained by open() and close(); */
+static struct virthba_devices_open VirtHbasOpen[VIRTHBASOPENMAX];
+static struct proc_dir_entry *virthba_proc_dir;
+static struct proc_dir_entry *info_proc_entry;
+static struct proc_dir_entry *rqwaitus_proc_entry;
+static struct proc_dir_entry *enable_ints_proc_entry;
+#define INFO_PROC_ENTRY_FN "info"
+#define ENABLE_INTS_ENTRY_FN "enable_ints"
+#define RQWU_PROC_ENTRY_FN "rqwait_usecs"
+#define DIR_PROC_ENTRY "virthba"
+
+/*****************************************************/
+/* Local Functions */
+/*****************************************************/
+static int
+add_scsipending_entry(struct virthba_info *vhbainfo, char cmdtype, void *new)
+{
+ unsigned long flags;
+ int insert_location;
+
+ spin_lock_irqsave(&vhbainfo->privlock, flags);
+ insert_location = vhbainfo->nextinsert;
+ while (vhbainfo->pending[insert_location].sent != NULL) {
+ insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
+ if (insert_location == (int) vhbainfo->nextinsert) {
+ LOGERR("Queue should be full. insert_location<<%d>> Unable to find open slot for pending commands.\n",
+ insert_location);
+ spin_unlock_irqrestore(&vhbainfo->privlock, flags);
+ return -1;
+ }
+ }
+
+ vhbainfo->pending[insert_location].cmdtype = cmdtype;
+ vhbainfo->pending[insert_location].sent = new;
+ vhbainfo->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
+ spin_unlock_irqrestore(&vhbainfo->privlock, flags);
+
+ return insert_location;
+}
+
+static unsigned int
+add_scsipending_entry_with_wait(struct virthba_info *vhbainfo, char cmdtype,
+ void *new)
+{
+ int insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
+
+ while (insert_location == -1) {
+ LOGERR("Failed to find empty queue slot. Waiting to try again\n");
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(10));
+ insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
+ }
+
+ return (unsigned int) insert_location;
+}
+
+static void *
+del_scsipending_entry(struct virthba_info *vhbainfo, uintptr_t del)
+{
+ unsigned long flags;
+ void *sent = NULL;
+
+ if (del >= MAX_PENDING_REQUESTS) {
+ LOGERR("Invalid queue position <<%lu>> given to delete. MAX_PENDING_REQUESTS <<%d>>\n",
+ (unsigned long) del, MAX_PENDING_REQUESTS);
+ } else {
+ spin_lock_irqsave(&vhbainfo->privlock, flags);
+
+ if (vhbainfo->pending[del].sent == NULL)
+ LOGERR("Deleting already cleared queue entry at <<%lu>>.\n",
+ (unsigned long) del);
+
+ sent = vhbainfo->pending[del].sent;
+
+ vhbainfo->pending[del].cmdtype = 0;
+ vhbainfo->pending[del].sent = NULL;
+ spin_unlock_irqrestore(&vhbainfo->privlock, flags);
+ }
+
+ return sent;
+}
+
+/* DARWorkQ (Disk Add/Remove) */
+static struct work_struct DARWorkQ;
+static struct diskaddremove *DARWorkQHead;
+static spinlock_t DARWorkQLock;
+static unsigned short DARWorkQSched;
+#define QUEUE_DISKADDREMOVE(dar) { \
+ spin_lock_irqsave(&DARWorkQLock, flags); \
+ if (!DARWorkQHead) { \
+ DARWorkQHead = dar; \
+ dar->next = NULL; \
+ } \
+ else { \
+ dar->next = DARWorkQHead; \
+ DARWorkQHead = dar; \
+ } \
+ if (!DARWorkQSched) { \
+ schedule_work(&DARWorkQ); \
+ DARWorkQSched = 1; \
+ } \
+ spin_unlock_irqrestore(&DARWorkQLock, flags); \
+}
+
+static inline void
+SendDiskAddRemove(struct diskaddremove *dar)
+{
+ struct scsi_device *sdev;
+ int error;
+
+ sdev = scsi_device_lookup(dar->shost, dar->channel, dar->id, dar->lun);
+ if (sdev) {
+ if (!(dar->add))
+ scsi_remove_device(sdev);
+ } else if (dar->add) {
+ error =
+ scsi_add_device(dar->shost, dar->channel, dar->id,
+ dar->lun);
+ if (error)
+ LOGERR("Failed scsi_add_device: host_no=%d[chan=%d:id=%d:lun=%d]\n",
+ dar->shost->host_no, dar->channel, dar->id,
+ dar->lun);
+ } else
+ LOGERR("Failed scsi_device_lookup:[chan=%d:id=%d:lun=%d]\n",
+ dar->channel, dar->id, dar->lun);
+ kfree(dar);
+}
+
+/*****************************************************/
+/* DARWorkQ Handler Thread */
+/*****************************************************/
+static void
+doDiskAddRemove(struct work_struct *work)
+{
+ struct diskaddremove *dar;
+ struct diskaddremove *tmphead;
+ int i = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&DARWorkQLock, flags);
+ tmphead = DARWorkQHead;
+ DARWorkQHead = NULL;
+ DARWorkQSched = 0;
+ spin_unlock_irqrestore(&DARWorkQLock, flags);
+ while (tmphead) {
+ dar = tmphead;
+ tmphead = dar->next;
+ SendDiskAddRemove(dar);
+ i++;
+ }
+}
+
+/*****************************************************/
+/* Routine to add entry to DARWorkQ */
+/*****************************************************/
+static void
+process_disk_notify(struct Scsi_Host *shost, struct uiscmdrsp *cmdrsp)
+{
+ struct diskaddremove *dar;
+ unsigned long flags;
+
+ dar = kzalloc(sizeof(struct diskaddremove), GFP_ATOMIC);
+ if (dar) {
+ dar->add = cmdrsp->disknotify.add;
+ dar->shost = shost;
+ dar->channel = cmdrsp->disknotify.channel;
+ dar->id = cmdrsp->disknotify.id;
+ dar->lun = cmdrsp->disknotify.lun;
+ QUEUE_DISKADDREMOVE(dar);
+ } else {
+ LOGERR("kmalloc failed for dar. host_no=%d[chan=%d:id=%d:lun=%d]\n",
+ shost->host_no, cmdrsp->disknotify.channel,
+ cmdrsp->disknotify.id, cmdrsp->disknotify.lun);
+ }
+}
+
+/*****************************************************/
+/* Probe Remove Functions */
+/*****************************************************/
+static irqreturn_t
+virthba_ISR(int irq, void *dev_id)
+{
+ struct virthba_info *virthbainfo = (struct virthba_info *) dev_id;
+ CHANNEL_HEADER __iomem *pChannelHeader;
+ SIGNAL_QUEUE_HEADER __iomem *pqhdr;
+ U64 mask;
+ unsigned long long rc1;
+
+ if (virthbainfo == NULL)
+ return IRQ_NONE;
+ virthbainfo->interrupts_rcvd++;
+ pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
+ if (((readq(&pChannelHeader->Features)
+ & ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0)
+ && ((readq(&pChannelHeader->Features) &
+ ULTRA_IO_DRIVER_DISABLES_INTS) !=
+ 0)) {
+ virthbainfo->interrupts_disabled++;
+ mask = ~ULTRA_CHANNEL_ENABLE_INTS;
+ rc1 = uisqueue_InterlockedAnd(virthbainfo->flags_addr, mask);
+ }
+ if (visor_signalqueue_empty(pChannelHeader, IOCHAN_FROM_IOPART)) {
+ virthbainfo->interrupts_notme++;
+ return IRQ_NONE;
+ }
+ pqhdr = (SIGNAL_QUEUE_HEADER __iomem *)
+ ((char __iomem *) pChannelHeader +
+ readq(&pChannelHeader->oChannelSpace)) + IOCHAN_FROM_IOPART;
+ writeq(readq(&pqhdr->NumInterruptsReceived) + 1,
+ &pqhdr->NumInterruptsReceived);
+ atomic_set(&virthbainfo->interrupt_rcvd, 1);
+ wake_up_interruptible(&virthbainfo->rsp_queue);
+ return IRQ_HANDLED;
+}
+
+static int
+virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
+{
+ int error;
+ struct Scsi_Host *scsihost;
+ struct virthba_info *virthbainfo;
+ int rsp;
+ int i;
+ irq_handler_t handler = virthba_ISR;
+ CHANNEL_HEADER __iomem *pChannelHeader;
+ SIGNAL_QUEUE_HEADER __iomem *pqhdr;
+ U64 mask;
+
+ LOGVER("entering virthba_probe...\n");
+ LOGVER("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+
+ LOGINF("entering virthba_probe...\n");
+ LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+ POSTCODE_LINUX_2(VHBA_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ /* call scsi_host_alloc to register a scsi host adapter
+ * instance - this virthba that has just been created is an
+ * instance of a scsi host adapter. This scsi_host_alloc
+ * function allocates a new Scsi_Host struct & performs basic
+ * initializatoin. The host is not published to the scsi
+ * midlayer until scsi_add_host is called.
+ */
+ DBGINF("calling scsi_host_alloc.\n");
+
+ /* arg 2 passed in length of extra space we want allocated
+ * with scsi_host struct for our own use scsi_host_alloc
+ * assign host_no
+ */
+ scsihost = scsi_host_alloc(&virthba_driver_template,
+ sizeof(struct virthba_info));
+ if (scsihost == NULL)
+ return -ENODEV;
+
+ DBGINF("scsihost: 0x%p, scsihost->this_id: %d, host_no: %d.\n",
+ scsihost, scsihost->this_id, scsihost->host_no);
+
+ scsihost->this_id = UIS_MAGIC_VHBA;
+ /* linux treats max-channel differently than max-id & max-lun.
+ * In the latter cases, those two values result in 0 to max-1
+ * (inclusive) being scanned. But in the case of channels, the
+ * scan is 0 to max (inclusive); so we will subtract one from
+ * the max-channel value.
+ */
+ LOGINF("virtpcidev->scsi.max.max_channel=%u, max_id=%u, max_lun=%u, cmd_per_lun=%u, max_io_size=%u\n",
+ (unsigned) virtpcidev->scsi.max.max_channel - 1,
+ (unsigned) virtpcidev->scsi.max.max_id,
+ (unsigned) virtpcidev->scsi.max.max_lun,
+ (unsigned) virtpcidev->scsi.max.cmd_per_lun,
+ (unsigned) virtpcidev->scsi.max.max_io_size);
+ scsihost->max_channel = (unsigned) virtpcidev->scsi.max.max_channel;
+ scsihost->max_id = (unsigned) virtpcidev->scsi.max.max_id;
+ scsihost->max_lun = (unsigned) virtpcidev->scsi.max.max_lun;
+ scsihost->cmd_per_lun = (unsigned) virtpcidev->scsi.max.cmd_per_lun;
+ scsihost->max_sectors =
+ (unsigned short) (virtpcidev->scsi.max.max_io_size >> 9);
+ scsihost->sg_tablesize =
+ (unsigned short) (virtpcidev->scsi.max.max_io_size / PAGE_SIZE);
+ if (scsihost->sg_tablesize > MAX_PHYS_INFO)
+ scsihost->sg_tablesize = MAX_PHYS_INFO;
+ LOGINF("scsihost->max_channel=%u, max_id=%u, max_lun=%u, cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n",
+ scsihost->max_channel, scsihost->max_id, scsihost->max_lun,
+ scsihost->cmd_per_lun, scsihost->max_sectors,
+ scsihost->sg_tablesize);
+ LOGINF("scsihost->can_queue=%u, scsihost->cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n",
+ scsihost->can_queue, scsihost->cmd_per_lun, scsihost->max_sectors,
+ scsihost->sg_tablesize);
+
+ DBGINF("calling scsi_add_host\n");
+
+ /* this creates "host%d" in sysfs. If 2nd argument is NULL,
+ * then this generic /sys/devices/platform/host? device is
+ * created and /sys/scsi_host/host? ->
+ * /sys/devices/platform/host? If 2nd argument is not NULL,
+ * then this generic /sys/devices/<path>/host? is created and
+ * host? points to that device instead.
+ */
+ error = scsi_add_host(scsihost, &virtpcidev->generic_dev);
+ if (error) {
+ LOGERR("scsi_add_host ****FAILED 0x%x TBD - RECOVER\n", error);
+ POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ /* decr refcount on scsihost which was incremented by
+ * scsi_add_host so the scsi_host gets deleted
+ */
+ scsi_host_put(scsihost);
+ return -ENODEV;
+ }
+
+ virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ memset(virthbainfo, 0, sizeof(struct virthba_info));
+ for (i = 0; i < VIRTHBASOPENMAX; i++) {
+ if (VirtHbasOpen[i].virthbainfo == NULL) {
+ VirtHbasOpen[i].virthbainfo = virthbainfo;
+ break;
+ }
+ }
+ virthbainfo->interrupt_vector = -1;
+ virthbainfo->chinfo.queueinfo = &virtpcidev->queueinfo;
+ virthbainfo->virtpcidev = virtpcidev;
+ spin_lock_init(&virthbainfo->chinfo.insertlock);
+
+ DBGINF("generic_dev: 0x%p, queueinfo: 0x%p.\n",
+ &virtpcidev->generic_dev, &virtpcidev->queueinfo);
+
+ init_waitqueue_head(&virthbainfo->rsp_queue);
+ spin_lock_init(&virthbainfo->privlock);
+ memset(&virthbainfo->pending, 0, sizeof(virthbainfo->pending));
+ virthbainfo->serverdown = false;
+ virthbainfo->serverchangingstate = false;
+
+ virthbainfo->intr = virtpcidev->intr;
+ /* save of host within virthba_info */
+ virthbainfo->scsihost = scsihost;
+
+ /* save of host within virtpci_dev */
+ virtpcidev->scsi.scsihost = scsihost;
+
+ /* Setup workqueue for serverdown messages */
+ INIT_WORK(&virthbainfo->serverdown_completion,
+ virthba_serverdown_complete);
+
+ writeq(readq(&virthbainfo->chinfo.queueinfo->chan->Features) |
+ ULTRA_IO_CHANNEL_IS_POLLING,
+ &virthbainfo->chinfo.queueinfo->chan->Features);
+ /* start thread that will receive scsicmnd responses */
+ DBGINF("starting rsp thread -- queueinfo: 0x%p, threadinfo: 0x%p.\n",
+ virthbainfo->chinfo.queueinfo, &virthbainfo->chinfo.threadinfo);
+
+ pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
+ pqhdr = (SIGNAL_QUEUE_HEADER __iomem *)
+ ((char __iomem *)pChannelHeader +
+ readq(&pChannelHeader->oChannelSpace)) + IOCHAN_FROM_IOPART;
+ virthbainfo->flags_addr = &pqhdr->FeatureFlags;
+
+ if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
+ process_incoming_rsps,
+ virthbainfo, "vhba_incoming")) {
+ LOGERR("uisthread_start rsp ****FAILED\n");
+ /* decr refcount on scsihost which was incremented by
+ * scsi_add_host so the scsi_host gets deleted
+ */
+ POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ scsi_host_put(scsihost);
+ return -ENODEV;
+ }
+ LOGINF("sendInterruptHandle=0x%16llX",
+ virthbainfo->intr.sendInterruptHandle);
+ LOGINF("recvInterruptHandle=0x%16llX",
+ virthbainfo->intr.recvInterruptHandle);
+ LOGINF("recvInterruptVector=0x%8X",
+ virthbainfo->intr.recvInterruptVector);
+ LOGINF("recvInterruptShared=0x%2X",
+ virthbainfo->intr.recvInterruptShared);
+ LOGINF("scsihost.hostt->name=%s", scsihost->hostt->name);
+ virthbainfo->interrupt_vector =
+ virthbainfo->intr.recvInterruptHandle & INTERRUPT_VECTOR_MASK;
+ rsp = request_irq(virthbainfo->interrupt_vector, handler, IRQF_SHARED,
+ scsihost->hostt->name, virthbainfo);
+ if (rsp != 0) {
+ LOGERR("request_irq(%d) uislib_virthba_ISR request failed with rsp=%d\n",
+ virthbainfo->interrupt_vector, rsp);
+ virthbainfo->interrupt_vector = -1;
+ POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ } else {
+ U64 __iomem *Features_addr =
+ &virthbainfo->chinfo.queueinfo->chan->Features;
+ LOGERR("request_irq(%d) uislib_virthba_ISR request succeeded\n",
+ virthbainfo->interrupt_vector);
+ mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
+ ULTRA_IO_DRIVER_DISABLES_INTS);
+ uisqueue_InterlockedAnd(Features_addr, mask);
+ mask = ULTRA_IO_DRIVER_ENABLES_INTS;
+ uisqueue_InterlockedOr(Features_addr, mask);
+ rsltq_wait_usecs = 4000000;
+ }
+
+ DBGINF("calling scsi_scan_host.\n");
+ scsi_scan_host(scsihost);
+ DBGINF("return from scsi_scan_host.\n");
+
+ LOGINF("virthba added scsihost:0x%p\n", scsihost);
+ POSTCODE_LINUX_2(VHBA_PROBE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ return 0;
+}
+
+static void
+virthba_remove(struct virtpci_dev *virtpcidev)
+{
+ struct virthba_info *virthbainfo;
+ struct Scsi_Host *scsihost =
+ (struct Scsi_Host *) virtpcidev->scsi.scsihost;
+
+ LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+ virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ if (virthbainfo->interrupt_vector != -1)
+ free_irq(virthbainfo->interrupt_vector, virthbainfo);
+ LOGINF("Removing virtpcidev: 0x%p, virthbainfo: 0x%p\n", virtpcidev,
+ virthbainfo);
+
+ DBGINF("removing scsihost: 0x%p, scsihost->this_id: %d\n", scsihost,
+ scsihost->this_id);
+ scsi_remove_host(scsihost);
+
+ DBGINF("stopping thread.\n");
+ uisthread_stop(&virthbainfo->chinfo.threadinfo);
+
+ DBGINF("calling scsi_host_put\n");
+
+ /* decr refcount on scsihost which was incremented by
+ * scsi_add_host so the scsi_host gets deleted
+ */
+ scsi_host_put(scsihost);
+ LOGINF("virthba removed scsi_host.\n");
+}
+
+static int
+forward_vdiskmgmt_command(VDISK_MGMT_TYPES vdiskcmdtype,
+ struct Scsi_Host *scsihost,
+ struct uisscsi_dest *vdest)
+{
+ struct uiscmdrsp *cmdrsp;
+ struct virthba_info *virthbainfo =
+ (struct virthba_info *) scsihost->hostdata;
+ int notifyresult = 0xffff;
+ wait_queue_head_t notifyevent;
+
+ LOGINF("vDiskMgmt:%d %d:%d:%d\n", vdiskcmdtype,
+ vdest->channel, vdest->id, vdest->lun);
+
+ if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
+ DBGINF("Server is down/changing state. Returning Failure.\n");
+ return FAILED;
+ }
+
+ cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
+ if (cmdrsp == NULL) {
+ LOGERR("kmalloc of cmdrsp failed.\n");
+ return FAILED; /* reject */
+ }
+
+ init_waitqueue_head(&notifyevent);
+
+ /* issue VDISK_MGMT_CMD
+ * set type to command - as opposed to task mgmt
+ */
+ cmdrsp->cmdtype = CMD_VDISKMGMT_TYPE;
+ /* specify the event that has to be triggered when this cmd is
+ * complete
+ */
+ cmdrsp->vdiskmgmt.notify = (void *) &notifyevent;
+ cmdrsp->vdiskmgmt.notifyresult = (void *) &notifyresult;
+
+ /* save destination */
+ cmdrsp->vdiskmgmt.vdisktype = vdiskcmdtype;
+ cmdrsp->vdiskmgmt.vdest.channel = vdest->channel;
+ cmdrsp->vdiskmgmt.vdest.id = vdest->id;
+ cmdrsp->vdiskmgmt.vdest.lun = vdest->lun;
+ cmdrsp->vdiskmgmt.scsicmd =
+ (void *) (uintptr_t)
+ add_scsipending_entry_with_wait(virthbainfo, CMD_VDISKMGMT_TYPE,
+ (void *) cmdrsp);
+
+ uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
+ cmdrsp, IOCHAN_TO_IOPART,
+ &virthbainfo->chinfo.insertlock,
+ DONT_ISSUE_INTERRUPT, (U64) NULL,
+ OK_TO_WAIT, "vhba");
+ LOGINF("VdiskMgmt waiting on event notifyevent=0x%p\n",
+ cmdrsp->scsitaskmgmt.notify);
+ wait_event(notifyevent, notifyresult != 0xffff);
+ LOGINF("VdiskMgmt complete; result:%d\n", cmdrsp->vdiskmgmt.result);
+ kfree(cmdrsp);
+ return SUCCESS;
+}
+
+/*****************************************************/
+/* Scsi Host support functions */
+/*****************************************************/
+
+static int
+forward_taskmgmt_command(TASK_MGMT_TYPES tasktype, struct scsi_device *scsidev)
+{
+ struct uiscmdrsp *cmdrsp;
+ struct virthba_info *virthbainfo =
+ (struct virthba_info *) scsidev->host->hostdata;
+ int notifyresult = 0xffff;
+ wait_queue_head_t notifyevent;
+
+ LOGINF("TaskMgmt:%d %d:%d:%d\n", tasktype,
+ scsidev->channel, scsidev->id, scsidev->lun);
+
+ if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
+ DBGINF("Server is down/changing state. Returning Failure.\n");
+ return FAILED;
+ }
+
+ cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
+ if (cmdrsp == NULL) {
+ LOGERR("kmalloc of cmdrsp failed.\n");
+ return FAILED; /* reject */
+ }
+
+ init_waitqueue_head(&notifyevent);
+
+ /* issue TASK_MGMT_ABORT_TASK */
+ /* set type to command - as opposed to task mgmt */
+ cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
+ /* specify the event that has to be triggered when this */
+ /* cmd is complete */
+ cmdrsp->scsitaskmgmt.notify = (void *) &notifyevent;
+ cmdrsp->scsitaskmgmt.notifyresult = (void *) &notifyresult;
+
+ /* save destination */
+ cmdrsp->scsitaskmgmt.tasktype = tasktype;
+ cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
+ cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
+ cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
+ cmdrsp->scsitaskmgmt.scsicmd =
+ (void *) (uintptr_t)
+ add_scsipending_entry_with_wait(virthbainfo,
+ CMD_SCSITASKMGMT_TYPE,
+ (void *) cmdrsp);
+
+ uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
+ cmdrsp, IOCHAN_TO_IOPART,
+ &virthbainfo->chinfo.insertlock,
+ DONT_ISSUE_INTERRUPT, (U64) NULL,
+ OK_TO_WAIT, "vhba");
+ LOGINF("TaskMgmt waiting on event notifyevent=0x%p\n",
+ cmdrsp->scsitaskmgmt.notify);
+ wait_event(notifyevent, notifyresult != 0xffff);
+ LOGINF("TaskMgmt complete; result:%d\n", cmdrsp->scsitaskmgmt.result);
+ kfree(cmdrsp);
+ return SUCCESS;
+}
+
+/* The abort handler returns SUCCESS if it has succeeded to make LLDD
+ * and all related hardware forget about the scmd.
+ */
+static int
+virthba_abort_handler(struct scsi_cmnd *scsicmd)
+{
+ /* issue TASK_MGMT_ABORT_TASK */
+ struct scsi_device *scsidev;
+ struct virtdisk_info *vdisk;
+
+ scsidev = scsicmd->device;
+ for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ vdisk->next; vdisk = vdisk->next) {
+ if ((scsidev->channel == vdisk->channel)
+ && (scsidev->id == vdisk->id)
+ && (scsidev->lun == vdisk->lun)) {
+ if (atomic_read(&vdisk->error_count) <
+ VIRTHBA_ERROR_COUNT) {
+ atomic_inc(&vdisk->error_count);
+ POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
+ POSTCODE_SEVERITY_INFO);
+ } else
+ atomic_set(&vdisk->ios_threshold,
+ IOS_ERROR_THRESHOLD);
+ }
+ }
+ return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd->device);
+}
+
+static int
+virthba_bus_reset_handler(struct scsi_cmnd *scsicmd)
+{
+ /* issue TASK_MGMT_TARGET_RESET for each target on the bus */
+ struct scsi_device *scsidev;
+ struct virtdisk_info *vdisk;
+
+ scsidev = scsicmd->device;
+ for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ vdisk->next; vdisk = vdisk->next) {
+ if ((scsidev->channel == vdisk->channel)
+ && (scsidev->id == vdisk->id)
+ && (scsidev->lun == vdisk->lun)) {
+ if (atomic_read(&vdisk->error_count) <
+ VIRTHBA_ERROR_COUNT) {
+ atomic_inc(&vdisk->error_count);
+ POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
+ POSTCODE_SEVERITY_INFO);
+ } else
+ atomic_set(&vdisk->ios_threshold,
+ IOS_ERROR_THRESHOLD);
+ }
+ }
+ return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd->device);
+}
+
+static int
+virthba_device_reset_handler(struct scsi_cmnd *scsicmd)
+{
+ /* issue TASK_MGMT_LUN_RESET */
+ struct scsi_device *scsidev;
+ struct virtdisk_info *vdisk;
+
+ scsidev = scsicmd->device;
+ for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ vdisk->next; vdisk = vdisk->next) {
+ if ((scsidev->channel == vdisk->channel)
+ && (scsidev->id == vdisk->id)
+ && (scsidev->lun == vdisk->lun)) {
+ if (atomic_read(&vdisk->error_count) <
+ VIRTHBA_ERROR_COUNT) {
+ atomic_inc(&vdisk->error_count);
+ POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
+ POSTCODE_SEVERITY_INFO);
+ } else
+ atomic_set(&vdisk->ios_threshold,
+ IOS_ERROR_THRESHOLD);
+ }
+ }
+ return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd->device);
+}
+
+static int
+virthba_host_reset_handler(struct scsi_cmnd *scsicmd)
+{
+ /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
+ LOGERR("virthba_host_reset_handler Not yet implemented\n");
+ return SUCCESS;
+}
+
+static char virthba_get_info_str[256];
+
+static const char *
+virthba_get_info(struct Scsi_Host *shp)
+{
+ /* Return version string */
+ sprintf(virthba_get_info_str, "virthba, version %s\n", VIRTHBA_VERSION);
+ return virthba_get_info_str;
+}
+
+static int
+virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
+{
+ DBGINF("In virthba_ioctl: ioctl: cmd=0x%x\n", cmd);
+ return -EINVAL;
+}
+
+/* This returns SCSI_MLQUEUE_DEVICE_BUSY if the signal queue to IOpart
+ * is full.
+ */
+static int
+virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
+ void (*virthba_cmnd_done)(struct scsi_cmnd *))
+{
+ struct scsi_device *scsidev = scsicmd->device;
+ int insert_location;
+ unsigned char op;
+ unsigned char *cdb = scsicmd->cmnd;
+ struct Scsi_Host *scsihost = scsidev->host;
+ struct uiscmdrsp *cmdrsp;
+ unsigned int i;
+ struct virthba_info *virthbainfo =
+ (struct virthba_info *) scsihost->hostdata;
+ struct scatterlist *sg = NULL;
+ struct scatterlist *sgl = NULL;
+ int sg_failed = 0;
+
+ if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
+ DBGINF("Server is down/changing state. Returning SCSI_MLQUEUE_DEVICE_BUSY.\n");
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+
+ cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
+ if (cmdrsp == NULL) {
+ LOGERR("kmalloc of cmdrsp failed.\n");
+ return 1; /* reject the command */
+ }
+
+ /* now saving everything we need from scsi_cmd into cmdrsp
+ * before we queue cmdrsp set type to command - as opposed to
+ * task mgmt
+ */
+ cmdrsp->cmdtype = CMD_SCSI_TYPE;
+ /* save the pending insertion location. Deletion from pending
+ * will return the scsicmd pointer for completion
+ */
+ insert_location =
+ add_scsipending_entry(virthbainfo, CMD_SCSI_TYPE, (void *) scsicmd);
+ if (insert_location != -1) {
+ cmdrsp->scsi.scsicmd = (void *) (uintptr_t) insert_location;
+ } else {
+ LOGERR("Queue is full. Returning busy.\n");
+ kfree(cmdrsp);
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+ /* save done function that we have call when cmd is complete */
+ scsicmd->scsi_done = virthba_cmnd_done;
+ /* save destination */
+ cmdrsp->scsi.vdest.channel = scsidev->channel;
+ cmdrsp->scsi.vdest.id = scsidev->id;
+ cmdrsp->scsi.vdest.lun = scsidev->lun;
+ /* save datadir */
+ cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
+ memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
+
+ cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
+
+ /* keep track of the max buffer length so far. */
+ if (cmdrsp->scsi.bufflen > MaxBuffLen)
+ MaxBuffLen = cmdrsp->scsi.bufflen;
+
+ if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) {
+ LOGERR("scsicmd use_sg:%d greater than MAX:%d\n",
+ scsi_sg_count(scsicmd), MAX_PHYS_INFO);
+ del_scsipending_entry(virthbainfo, (uintptr_t) insert_location);
+ kfree(cmdrsp);
+ return 1; /* reject the command */
+ }
+
+ /* This is what we USED to do when we assumed we were running */
+ /* uissd & virthba on the same Linux system. */
+ /* cmdrsp->scsi.buffer = scsicmd->request_buffer; */
+ /* The following code does NOT make that assumption. */
+ /* convert buffer to phys information */
+ if (scsi_sg_count(scsicmd) == 0) {
+ if (scsi_bufflen(scsicmd) > 0) {
+ LOGERR("**** FAILED No scatter list for bufflen > 0\n");
+ BUG_ON(scsi_sg_count(scsicmd) == 0);
+ }
+ DBGINF("No sg; buffer:0x%p bufflen:%d\n",
+ scsi_sglist(scsicmd), scsi_bufflen(scsicmd));
+ } else {
+ /* buffer is scatterlist - copy it out */
+ sgl = scsi_sglist(scsicmd);
+
+ for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
+
+ cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
+ cmdrsp->scsi.gpi_list[i].length = sg->length;
+ if ((i != 0) && (sg->offset != 0))
+ LOGINF("Offset on a sg_entry other than zero =<<%d>>.\n",
+ sg->offset);
+ }
+
+ if (sg_failed) {
+ LOGERR("Start sg_list dump (entries %d, bufflen %d)...\n",
+ scsi_sg_count(scsicmd), cmdrsp->scsi.bufflen);
+ for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
+ LOGERR(" Entry(%d): page->[0x%p], phys->[0x%Lx], off(%d), len(%d)\n",
+ i, sg_page(sg),
+ (unsigned long long) sg_phys(sg),
+ sg->offset, sg->length);
+ }
+ LOGERR("Done sg_list dump.\n");
+ /* BUG(); ***** For now, let it fail in uissd
+ * if it is a problem, as it might just
+ * work
+ */
+ }
+
+ cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
+ }
+
+ op = cdb[0];
+ i = uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
+ cmdrsp, IOCHAN_TO_IOPART,
+ &virthbainfo->chinfo.
+ insertlock,
+ DONT_ISSUE_INTERRUPT,
+ (U64) NULL, DONT_WAIT, "vhba");
+ if (i == 0) {
+ /* queue must be full - and we said don't wait - return busy */
+ LOGERR("uisqueue_put_cmdrsp_with_lock ****FAILED\n");
+ kfree(cmdrsp);
+ del_scsipending_entry(virthbainfo, (uintptr_t) insert_location);
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+
+ /* we're done with cmdrsp space - data from it has been copied
+ * into channel - free it now.
+ */
+ kfree(cmdrsp);
+ return 0; /* non-zero implies host/device is busy */
+}
+
+static int
+virthba_slave_alloc(struct scsi_device *scsidev)
+{
+ /* this called by the midlayer before scan for new devices -
+ * LLD can alloc any struc & do init if needed.
+ */
+ struct virtdisk_info *vdisk;
+ struct virtdisk_info *tmpvdisk;
+ struct virthba_info *virthbainfo;
+ struct Scsi_Host *scsihost = (struct Scsi_Host *) scsidev->host;
+
+ virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ if (!virthbainfo) {
+ LOGERR("Could not find virthba_info for scsihost\n");
+ return 0; /* even though we errored, treat as success */
+ }
+ for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
+ if (vdisk->next->valid &&
+ (vdisk->next->channel == scsidev->channel) &&
+ (vdisk->next->id == scsidev->id) &&
+ (vdisk->next->lun == scsidev->lun))
+ return 0;
+ }
+ tmpvdisk = kzalloc(sizeof(struct virtdisk_info), GFP_ATOMIC);
+ if (!tmpvdisk) { /* error allocating */
+ LOGERR("Could not allocate memory for disk\n");
+ return 0;
+ }
+
+ tmpvdisk->channel = scsidev->channel;
+ tmpvdisk->id = scsidev->id;
+ tmpvdisk->lun = scsidev->lun;
+ tmpvdisk->valid = 1;
+ vdisk->next = tmpvdisk;
+ return 0; /* success */
+}
+
+static int
+virthba_slave_configure(struct scsi_device *scsidev)
+{
+ return 0; /* success */
+}
+
+static void
+virthba_slave_destroy(struct scsi_device *scsidev)
+{
+ /* midlevel calls this after device has been quiesced and
+ * before it is to be deleted.
+ */
+ struct virtdisk_info *vdisk, *delvdisk;
+ struct virthba_info *virthbainfo;
+ struct Scsi_Host *scsihost = (struct Scsi_Host *) scsidev->host;
+
+ virthbainfo = (struct virthba_info *) scsihost->hostdata;
+ if (!virthbainfo)
+ LOGERR("Could not find virthba_info for scsihost\n");
+ for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
+ if (vdisk->next->valid &&
+ (vdisk->next->channel == scsidev->channel) &&
+ (vdisk->next->id == scsidev->id) &&
+ (vdisk->next->lun == scsidev->lun)) {
+ delvdisk = vdisk->next;
+ vdisk->next = vdisk->next->next;
+ kfree(delvdisk);
+ return;
+ }
+ }
+ return;
+}
+
+/*****************************************************/
+/* Scsi Cmnd support thread */
+/*****************************************************/
+
+static void
+do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+{
+ struct virtdisk_info *vdisk;
+ struct scsi_device *scsidev;
+ struct sense_data *sd;
+
+ scsidev = scsicmd->device;
+ memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
+ sd = (struct sense_data *) scsicmd->sense_buffer;
+
+ /* Do not log errors for disk-not-present inquiries */
+ if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
+ (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
+ (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
+ return;
+
+ /* Okay see what our error_count is here.... */
+ for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
+ vdisk->next; vdisk = vdisk->next) {
+ if ((scsidev->channel != vdisk->channel)
+ || (scsidev->id != vdisk->id)
+ || (scsidev->lun != vdisk->lun))
+ continue;
+
+ if (atomic_read(&vdisk->error_count) < VIRTHBA_ERROR_COUNT) {
+ atomic_inc(&vdisk->error_count);
+ LOGERR("SCSICMD ****FAILED scsicmd:0x%p op:0x%x <%d:%d:%d:%d> 0x%x-0x%x-0x%x-0x%x-0x%x.\n",
+ scsicmd, cmdrsp->scsi.cmnd[0],
+ scsidev->host->host_no, scsidev->id,
+ scsidev->channel, scsidev->lun,
+ cmdrsp->scsi.linuxstat, sd->Valid, sd->SenseKey,
+ sd->AdditionalSenseCode,
+ sd->AdditionalSenseCodeQualifier);
+ if (atomic_read(&vdisk->error_count) ==
+ VIRTHBA_ERROR_COUNT) {
+ LOGERR("Throtling SCSICMD errors disk <%d:%d:%d:%d>\n",
+ scsidev->host->host_no, scsidev->id,
+ scsidev->channel, scsidev->lun);
+ }
+ atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
+ }
+ }
+}
+
+static void
+do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+{
+ struct scsi_device *scsidev;
+ unsigned char buf[36];
+ struct scatterlist *sg;
+ unsigned int i;
+ char *thispage;
+ char *thispage_orig;
+ int bufind = 0;
+ struct virtdisk_info *vdisk;
+
+ scsidev = scsicmd->device;
+ if ((cmdrsp->scsi.cmnd[0] == INQUIRY)
+ && (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
+ if (cmdrsp->scsi.no_disk_result == 0)
+ return;
+
+ /* Linux scsi code is weird; it wants
+ * a device at Lun 0 to issue report
+ * luns, but we don't want a disk
+ * there so we'll present a processor
+ * there. */
+ SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
+ scsidev->lun,
+ DEV_DISK_CAPABLE_NOT_PRESENT,
+ DEV_NOT_CAPABLE);
+
+ if (scsi_sg_count(scsicmd) == 0) {
+ if (scsi_bufflen(scsicmd) > 0) {
+ LOGERR("**** FAILED No scatter list for bufflen > 0\n");
+ BUG_ON(scsi_sg_count(scsicmd) ==
+ 0);
+ }
+ memcpy(scsi_sglist(scsicmd), buf,
+ cmdrsp->scsi.bufflen);
+ return;
+ }
+
+ sg = scsi_sglist(scsicmd);
+ for (i = 0; i < scsi_sg_count(scsicmd); i++) {
+ DBGVER("copying OUT OF buf into 0x%p %d\n",
+ sg_page(sg + i), sg[i].length);
+ thispage_orig = kmap_atomic(sg_page(sg + i));
+ thispage = (void *) ((unsigned long)thispage_orig |
+ sg[i].offset);
+ memcpy(thispage, buf + bufind, sg[i].length);
+ kunmap_atomic(thispage_orig);
+ bufind += sg[i].length;
+ }
+ } else {
+
+ vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
+ for ( ; vdisk->next; vdisk = vdisk->next) {
+ if ((scsidev->channel != vdisk->channel)
+ || (scsidev->id != vdisk->id)
+ || (scsidev->lun != vdisk->lun))
+ continue;
+
+ if (atomic_read(&vdisk->ios_threshold) > 0) {
+ atomic_dec(&vdisk->ios_threshold);
+ if (atomic_read(&vdisk->ios_threshold) == 0) {
+ LOGERR("Resetting error count for disk\n");
+ atomic_set(&vdisk->error_count, 0);
+ }
+ }
+ }
+ }
+}
+
+static void
+complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+{
+ DBGINF("cmdrsp: 0x%p, scsistat:0x%x.\n", cmdrsp, cmdrsp->scsi.scsistat);
+
+ /* take what we need out of cmdrsp and complete the scsicmd */
+ scsicmd->result = cmdrsp->scsi.linuxstat;
+ if (cmdrsp->scsi.linuxstat)
+ do_scsi_linuxstat(cmdrsp, scsicmd);
+ else
+ do_scsi_nolinuxstat(cmdrsp, scsicmd);
+
+ if (scsicmd->scsi_done) {
+ DBGVER("Scsi_DONE\n");
+ scsicmd->scsi_done(scsicmd);
+ }
+}
+
+static inline void
+complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
+{
+ /* copy the result of the taskmgmt and */
+ /* wake up the error handler that is waiting for this */
+ *(int *) cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result;
+ wake_up_all((wait_queue_head_t *) cmdrsp->vdiskmgmt.notify);
+ LOGINF("set notify result to %d\n", cmdrsp->vdiskmgmt.result);
+}
+
+static inline void
+complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
+{
+ /* copy the result of the taskmgmt and */
+ /* wake up the error handler that is waiting for this */
+ *(int *) cmdrsp->scsitaskmgmt.notifyresult =
+ cmdrsp->scsitaskmgmt.result;
+ wake_up_all((wait_queue_head_t *) cmdrsp->scsitaskmgmt.notify);
+ LOGINF("set notify result to %d\n", cmdrsp->scsitaskmgmt.result);
+}
+
+static void
+drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
+ struct uiscmdrsp *cmdrsp)
+{
+ unsigned long flags;
+ int qrslt = 0;
+ struct scsi_cmnd *scsicmd;
+ struct Scsi_Host *shost = virthbainfo->scsihost;
+
+ while (1) {
+ spin_lock_irqsave(&virthbainfo->chinfo.insertlock, flags);
+ if (!ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(dc->queueinfo->chan,
+ "vhba", NULL)) {
+ spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock,
+ flags);
+ virthbainfo->acquire_failed_cnt++;
+ break;
+ }
+ qrslt = uisqueue_get_cmdrsp(dc->queueinfo, cmdrsp,
+ IOCHAN_FROM_IOPART);
+ ULTRA_CHANNEL_CLIENT_RELEASE_OS(dc->queueinfo->chan,
+ "vhba", NULL);
+ spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, flags);
+ if (qrslt == 0)
+ break;
+ if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
+ /* scsicmd location is returned by the
+ * deletion
+ */
+ scsicmd = del_scsipending_entry(virthbainfo,
+ (uintptr_t) cmdrsp->scsi.scsicmd);
+ if (!scsicmd)
+ break;
+ /* complete the orig cmd */
+ complete_scsi_command(cmdrsp, scsicmd);
+ } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
+ if (!del_scsipending_entry(virthbainfo,
+ (uintptr_t) cmdrsp->scsitaskmgmt.scsicmd))
+ break;
+ complete_taskmgmt_command(cmdrsp);
+ } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
+ /* The vHba pointer has no meaning in
+ * a Client/Guest Partition. Let's be
+ * safe and set it to NULL now. Do
+ * not use it here! */
+ cmdrsp->disknotify.vHba = NULL;
+ process_disk_notify(shost, cmdrsp);
+ } else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
+ if (!del_scsipending_entry(virthbainfo,
+ (uintptr_t) cmdrsp->vdiskmgmt.scsicmd))
+ break;
+ complete_vdiskmgmt_command(cmdrsp);
+ } else
+ LOGERR("Invalid cmdtype %d\n", cmdrsp->cmdtype);
+ /* cmdrsp is now available for reuse */
+ }
+}
+
+
+/* main function for the thread that waits for scsi commands to arrive
+ * in a specified queue
+ */
+static int
+process_incoming_rsps(void *v)
+{
+ struct virthba_info *virthbainfo = v;
+ struct chaninfo *dc = &virthbainfo->chinfo;
+ struct uiscmdrsp *cmdrsp = NULL;
+ const int SZ = sizeof(struct uiscmdrsp);
+ U64 mask;
+ unsigned long long rc1;
+
+ UIS_DAEMONIZE("vhba_incoming");
+ /* alloc once and reuse */
+ cmdrsp = kmalloc(SZ, GFP_ATOMIC);
+ if (cmdrsp == NULL) {
+ LOGERR("process_incoming_rsps ****FAILED to malloc - thread exiting\n");
+ complete_and_exit(&dc->threadinfo.has_stopped, 0);
+ return 0;
+ }
+ mask = ULTRA_CHANNEL_ENABLE_INTS;
+ while (1) {
+ wait_event_interruptible_timeout(virthbainfo->rsp_queue,
+ (atomic_read(&virthbainfo->interrupt_rcvd) == 1),
+ usecs_to_jiffies(rsltq_wait_usecs));
+ atomic_set(&virthbainfo->interrupt_rcvd, 0);
+ /* drain queue */
+ drain_queue(virthbainfo, dc, cmdrsp);
+ rc1 = uisqueue_InterlockedOr(virthbainfo->flags_addr, mask);
+ if (dc->threadinfo.should_stop)
+ break;
+ }
+
+ kfree(cmdrsp);
+
+ DBGINF("exiting processing incoming rsps.\n");
+ complete_and_exit(&dc->threadinfo.has_stopped, 0);
+}
+
+/*****************************************************/
+/* proc filesystem functions */
+/*****************************************************/
+
+static ssize_t
+info_proc_read(struct file *file, char __user *buf, size_t len, loff_t *offset)
+{
+ int length = 0;
+ U64 phys_flags_addr;
+ int i;
+ struct virthba_info *virthbainfo;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ for (i = 0; i < VIRTHBASOPENMAX; i++) {
+ if (VirtHbasOpen[i].virthbainfo == NULL)
+ continue;
+
+ virthbainfo = VirtHbasOpen[i].virthbainfo;
+ length += sprintf(vbuf + length, "CHANSOCK is not defined.\n");
+
+ length += sprintf(vbuf + length, "MaxBuffLen:%d\n", MaxBuffLen);
+
+ length += sprintf(vbuf + length, "\nvirthba result queue poll wait:%d usecs.\n",
+ rsltq_wait_usecs);
+
+ length += sprintf(vbuf + length,
+ "\nModule build: Date:%s Time:%s\n",
+ __DATE__, __TIME__);
+ length += sprintf(vbuf + length, "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
+ virthbainfo->interrupts_rcvd,
+ virthbainfo->interrupts_disabled);
+ length += sprintf(vbuf + length, "\ninterrupts_notme = %llu,\n",
+ virthbainfo->interrupts_notme);
+ phys_flags_addr = virt_to_phys((__force void *)
+ virthbainfo->flags_addr);
+ length += sprintf(vbuf + length, "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
+ virthbainfo->flags_addr, phys_flags_addr,
+ (__le64)readq(virthbainfo->flags_addr));
+ length += sprintf(vbuf + length, "acquire_failed_cnt:%llu\n",
+ virthbainfo->acquire_failed_cnt);
+ length += sprintf(vbuf + length, "\n");
+ }
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+static ssize_t
+enable_ints_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t
+enable_ints_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[4];
+ int i, new_value;
+ struct virthba_info *virthbainfo;
+ U64 __iomem *Features_addr;
+ U64 mask;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ buf[count] = '\0';
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed. buf<<%.*s>> count<<%lu>>\n",
+ (int) count, buf, count);
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%d", &new_value);
+
+ if (i < 1) {
+ LOGERR("Failed to scan value for enable_ints, buf<<%.*s>>",
+ (int) count, buf);
+ return -EFAULT;
+ }
+
+ /* set all counts to new_value usually 0 */
+ for (i = 0; i < VIRTHBASOPENMAX; i++) {
+ if (VirtHbasOpen[i].virthbainfo != NULL) {
+ virthbainfo = VirtHbasOpen[i].virthbainfo;
+ Features_addr =
+ &virthbainfo->chinfo.queueinfo->chan->Features;
+ if (new_value == 1) {
+ mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
+ ULTRA_IO_DRIVER_DISABLES_INTS);
+ uisqueue_InterlockedAnd(Features_addr, mask);
+ mask = ULTRA_IO_DRIVER_ENABLES_INTS;
+ uisqueue_InterlockedOr(Features_addr, mask);
+ rsltq_wait_usecs = 4000000;
+ } else {
+ mask = ~(ULTRA_IO_DRIVER_ENABLES_INTS |
+ ULTRA_IO_DRIVER_DISABLES_INTS);
+ uisqueue_InterlockedAnd(Features_addr, mask);
+ mask = ULTRA_IO_CHANNEL_IS_POLLING;
+ uisqueue_InterlockedOr(Features_addr, mask);
+ rsltq_wait_usecs = 4000;
+ }
+ }
+ }
+ return count;
+}
+
+static ssize_t
+rqwu_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[16];
+ int i, usecs;
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed. buf<<%.*s>> count<<%lu>>\n",
+ (int) count, buf, count);
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%d", &usecs);
+
+ if (i < 1) {
+ LOGERR("Failed to scan value for rqwait_usecs buf<<%.*s>>",
+ (int) count, buf);
+ return -EFAULT;
+ }
+
+ /* set global wait time */
+ rsltq_wait_usecs = usecs;
+ return count;
+}
+
+/* As per VirtpciFunc returns 1 for success and 0 for failure */
+static int
+virthba_serverup(struct virtpci_dev *virtpcidev)
+{
+ struct virthba_info *virthbainfo =
+ (struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi.
+ scsihost)->hostdata;
+
+ DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+
+ if (!virthbainfo->serverdown) {
+ DBGINF("Server up message recieved while server is already up.\n");
+ return 1;
+ }
+ if (virthbainfo->serverchangingstate) {
+ LOGERR("Server already processing change state message\n");
+ return 0;
+ }
+
+ virthbainfo->serverchangingstate = true;
+ /* Must transition channel to ATTACHED state BEFORE we
+ * can start using the device again
+ */
+ ULTRA_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan,
+ dev_name(&virtpcidev->generic_dev),
+ CHANNELCLI_ATTACHED, NULL);
+
+ /* Start Processing the IOVM Response Queue Again */
+ if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
+ process_incoming_rsps,
+ virthbainfo, "vhba_incoming")) {
+ LOGERR("uisthread_start rsp ****FAILED\n");
+ return 0;
+ }
+ virthbainfo->serverdown = false;
+ virthbainfo->serverchangingstate = false;
+
+ return 1;
+}
+
+static void
+virthba_serverdown_complete(struct work_struct *work)
+{
+ struct virthba_info *virthbainfo;
+ struct virtpci_dev *virtpcidev;
+ int i;
+ struct scsipending *pendingdel = NULL;
+ struct scsi_cmnd *scsicmd = NULL;
+ struct uiscmdrsp *cmdrsp;
+ unsigned long flags;
+
+ virthbainfo = container_of(work, struct virthba_info,
+ serverdown_completion);
+
+ /* Stop Using the IOVM Response Queue (queue should be drained
+ * by the end)
+ */
+ uisthread_stop(&virthbainfo->chinfo.threadinfo);
+
+ /* Fail Commands that weren't completed */
+ spin_lock_irqsave(&virthbainfo->privlock, flags);
+ for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
+ pendingdel = &(virthbainfo->pending[i]);
+ switch (pendingdel->cmdtype) {
+ case CMD_SCSI_TYPE:
+ scsicmd = (struct scsi_cmnd *) pendingdel->sent;
+ scsicmd->result = (DID_RESET << 16);
+ if (scsicmd->scsi_done)
+ scsicmd->scsi_done(scsicmd);
+ break;
+ case CMD_SCSITASKMGMT_TYPE:
+ cmdrsp = (struct uiscmdrsp *) pendingdel->sent;
+ DBGINF("cmdrsp=0x%x, notify=0x%x\n", cmdrsp,
+ cmdrsp->scsitaskmgmt.notify);
+ *(int *) cmdrsp->scsitaskmgmt.notifyresult =
+ TASK_MGMT_FAILED;
+ wake_up_all((wait_queue_head_t *)
+ cmdrsp->scsitaskmgmt.notify);
+ break;
+ case CMD_VDISKMGMT_TYPE:
+ cmdrsp = (struct uiscmdrsp *) pendingdel->sent;
+ *(int *) cmdrsp->vdiskmgmt.notifyresult =
+ VDISK_MGMT_FAILED;
+ wake_up_all((wait_queue_head_t *)
+ cmdrsp->vdiskmgmt.notify);
+ break;
+ default:
+ if (pendingdel->sent != NULL)
+ LOGERR("Unknown command type: 0x%x. Only freeing list structure.\n",
+ pendingdel->cmdtype);
+ }
+ pendingdel->cmdtype = 0;
+ pendingdel->sent = NULL;
+ }
+ spin_unlock_irqrestore(&virthbainfo->privlock, flags);
+
+ virtpcidev = virthbainfo->virtpcidev;
+
+ DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+ virthbainfo->serverdown = true;
+ virthbainfo->serverchangingstate = false;
+ /* Return the ServerDown response to Command */
+ visorchipset_device_pause_response(virtpcidev->busNo,
+ virtpcidev->deviceNo, 0);
+}
+
+/* As per VirtpciFunc returns 1 for success and 0 for failure */
+static int
+virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state)
+{
+ struct virthba_info *virthbainfo =
+ (struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi.
+ scsihost)->hostdata;
+
+ DBGINF("virthba_serverdown");
+ DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
+ virtpcidev->deviceNo);
+
+ if (!virthbainfo->serverdown && !virthbainfo->serverchangingstate) {
+ virthbainfo->serverchangingstate = true;
+ queue_work(virthba_serverdown_workqueue,
+ &virthbainfo->serverdown_completion);
+ } else if (virthbainfo->serverchangingstate) {
+ LOGERR("Server already processing change state message\n");
+ return 0;
+ } else
+ LOGERR("Server already down, but another server down message received.");
+
+ return 1;
+}
+
+/*****************************************************/
+/* Module Init & Exit functions */
+/*****************************************************/
+
+static int __init
+virthba_parse_line(char *str)
+{
+ DBGINF("In virthba_parse_line %s\n", str);
+ return 1;
+}
+
+static void __init
+virthba_parse_options(char *line)
+{
+ char *next = line;
+
+ POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ if (line == NULL || !*line)
+ return;
+ while ((line = next) != NULL) {
+ next = strchr(line, ' ');
+ if (next != NULL)
+ *next++ = 0;
+ if (!virthba_parse_line(line))
+ DBGINF("Unknown option '%s'\n", line);
+ }
+
+ POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+}
+
+static int __init
+virthba_mod_init(void)
+{
+ int error;
+ int i;
+
+ LOGINF("Entering virthba_mod_init...\n");
+
+ POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ virthba_parse_options(virthba_options);
+
+ error = virtpci_register_driver(&virthba_driver);
+ if (error < 0) {
+ LOGERR("register ****FAILED 0x%x\n", error);
+ POSTCODE_LINUX_3(VHBA_CREATE_FAILURE_PC, error,
+ POSTCODE_SEVERITY_ERR);
+ } else {
+ /* create the proc directories */
+ virthba_proc_dir = proc_mkdir(DIR_PROC_ENTRY, NULL);
+ info_proc_entry = proc_create(INFO_PROC_ENTRY_FN, 0,
+ virthba_proc_dir,
+ &proc_info_fops);
+ rqwaitus_proc_entry = proc_create(RQWU_PROC_ENTRY_FN, 0,
+ virthba_proc_dir,
+ &proc_rqwu_fops);
+ enable_ints_proc_entry = proc_create(ENABLE_INTS_ENTRY_FN, 0,
+ virthba_proc_dir,
+ &proc_enable_ints_fops);
+
+ /* Initialize DARWorkQ */
+ INIT_WORK(&DARWorkQ, doDiskAddRemove);
+ spin_lock_init(&DARWorkQLock);
+
+ /* clear out array */
+ for (i = 0; i < VIRTHBASOPENMAX; i++)
+ VirtHbasOpen[i].virthbainfo = NULL;
+ /* Initialize the serverdown workqueue */
+ virthba_serverdown_workqueue =
+ create_singlethread_workqueue("virthba_serverdown");
+ if (virthba_serverdown_workqueue == NULL) {
+ LOGERR("**** FAILED virthba_serverdown_workqueue creation\n");
+ POSTCODE_LINUX_2(VHBA_CREATE_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ error = -1;
+ }
+ }
+
+ POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ LOGINF("Leaving virthba_mod_init\n");
+ return error;
+}
+
+static ssize_t
+virthba_acquire_lun(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uisscsi_dest vdest;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ int i;
+
+ i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
+ if (i != 3)
+ return i;
+
+ return forward_vdiskmgmt_command(VDISK_MGMT_ACQUIRE, shost, &vdest);
+}
+
+static ssize_t
+virthba_release_lun(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uisscsi_dest vdest;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ int i;
+
+ i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
+ if (i != 3)
+ return i;
+
+ return forward_vdiskmgmt_command(VDISK_MGMT_RELEASE, shost, &vdest);
+}
+
+#define CLASS_DEVICE_ATTR(_name, _mode, _show, _store) \
+ struct device_attribute class_device_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+static CLASS_DEVICE_ATTR(acquire_lun, S_IWUSR, NULL, virthba_acquire_lun);
+static CLASS_DEVICE_ATTR(release_lun, S_IWUSR, NULL, virthba_release_lun);
+
+static DEVICE_ATTRIBUTE *virthba_shost_attrs[] = {
+ &class_device_attr_acquire_lun,
+ &class_device_attr_release_lun,
+ NULL
+};
+
+static void __exit
+virthba_mod_exit(void)
+{
+ LOGINF("entering virthba_mod_exit...\n");
+
+ virtpci_unregister_driver(&virthba_driver);
+ /* unregister is going to call virthba_remove */
+ /* destroy serverdown completion workqueue */
+ if (virthba_serverdown_workqueue) {
+ destroy_workqueue(virthba_serverdown_workqueue);
+ virthba_serverdown_workqueue = NULL;
+ }
+
+ if (info_proc_entry)
+ remove_proc_entry(INFO_PROC_ENTRY_FN, virthba_proc_dir);
+
+ if (rqwaitus_proc_entry)
+ remove_proc_entry(RQWU_PROC_ENTRY_FN, NULL);
+
+ if (enable_ints_proc_entry)
+ remove_proc_entry(ENABLE_INTS_ENTRY_FN, NULL);
+
+ if (virthba_proc_dir)
+ remove_proc_entry(DIR_PROC_ENTRY, NULL);
+
+ LOGINF("Leaving virthba_mod_exit\n");
+
+}
+
+/* specify function to be run at module insertion time */
+module_init(virthba_mod_init);
+
+/* specify function to be run when module is removed */
+module_exit(virthba_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Usha Srinivasan");
+MODULE_ALIAS("uisvirthba");
+ /* this is extracted during depmod and kept in modules.dep */
+/* module parameter */
+module_param(virthba_options, charp, S_IRUGO);
diff --git a/drivers/staging/unisys/virthba/virthba.h b/drivers/staging/unisys/virthba/virthba.h
new file mode 100644
index 000000000000..88b797439a16
--- /dev/null
+++ b/drivers/staging/unisys/virthba/virthba.h
@@ -0,0 +1,31 @@
+/* virthba.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Unisys Virtual HBA driver header
+ */
+
+
+
+#ifndef __VIRTHBA_H__
+#define __VIRTHBA_H__
+
+
+#define VIRTHBA_VERSION "01.00"
+
+
+#endif /* __VIRTHBA_H__ */
diff --git a/drivers/staging/unisys/virtpci/Kconfig b/drivers/staging/unisys/virtpci/Kconfig
new file mode 100644
index 000000000000..e59efcbc4d3b
--- /dev/null
+++ b/drivers/staging/unisys/virtpci/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys virtpci configuration
+#
+
+config UNISYS_VIRTPCI
+ tristate "Unisys virtpci driver"
+ depends on UNISYSSPAR && UNISYS_UISLIB
+ ---help---
+ If you say Y here, you will enable the Unisys virtpci driver.
+
diff --git a/drivers/staging/unisys/virtpci/Makefile b/drivers/staging/unisys/virtpci/Makefile
new file mode 100644
index 000000000000..f9399aabddd1
--- /dev/null
+++ b/drivers/staging/unisys/virtpci/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for Unisys virtpci
+#
+
+obj-$(CONFIG_UNISYS_VIRTPCI) += virtpci.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/uislib
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/virtpci/virtpci.c b/drivers/staging/unisys/virtpci/virtpci.c
new file mode 100644
index 000000000000..8e34650b00b5
--- /dev/null
+++ b/drivers/staging/unisys/virtpci/virtpci.c
@@ -0,0 +1,1771 @@
+/* virtpci.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#define EXPORT_SYMTAB
+
+#include <linux/kernel.h>
+#ifdef CONFIG_MODVERSIONS
+#include <config/modversions.h>
+#endif
+#include "uniklog.h"
+#include "diagnostics/appos_subsystems.h"
+#include "uisutils.h"
+#include "commontypes.h"
+#include "vbuschannel.h"
+#include "vbushelper.h"
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/proc_fs.h>
+#include <linux/if_ether.h>
+#include <linux/version.h>
+#include "version.h"
+#include "guestlinuxdebug.h"
+
+struct driver_private {
+ struct kobject kobj;
+ struct klist klist_devices;
+ struct klist_node knode_bus;
+ struct module_kobject *mkobj;
+ struct device_driver *driver;
+};
+#define to_driver(obj) container_of(obj, struct driver_private, kobj)
+
+/* bus_id went away in 2.6.30 - the size was 20 bytes, so we'll define
+ * it ourselves, and a macro to make getting the field a bit simpler.
+ */
+#ifndef BUS_ID_SIZE
+#define BUS_ID_SIZE 20
+#endif
+
+#define BUS_ID(x) dev_name(x)
+
+#include "virtpci.h"
+
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC VIRT_PCI_PC_virtpci_c
+#define __MYFILE__ "virtpci.c"
+
+#define VIRTPCI_VERSION "01.00"
+
+/*****************************************************/
+/* Forward declarations */
+/*****************************************************/
+
+static int delete_vbus_device(struct device *vbus, void *data);
+static int match_busid(struct device *dev, void *data);
+static void virtpci_bus_release(struct device *dev);
+static void virtpci_device_release(struct device *dev);
+static int virtpci_device_add(struct device *parentbus, int devtype,
+ struct add_virt_guestpart *addparams,
+ struct scsi_adap_info *scsi,
+ struct net_adap_info *net);
+static int virtpci_device_del(struct device *parentbus, int devtype,
+ struct vhba_wwnn *wwnn, unsigned char macaddr[]);
+static int virtpci_device_serverdown(struct device *parentbus, int devtype,
+ struct vhba_wwnn *wwnn,
+ unsigned char macaddr[]);
+static int virtpci_device_serverup(struct device *parentbus, int devtype,
+ struct vhba_wwnn *wwnn,
+ unsigned char macaddr[]);
+static ssize_t virtpci_driver_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+static ssize_t virtpci_driver_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count);
+static int virtpci_bus_match(struct device *dev, struct device_driver *drv);
+static int virtpci_uevent(struct device *dev, struct kobj_uevent_env *env);
+static int virtpci_device_suspend(struct device *dev, pm_message_t state);
+static int virtpci_device_resume(struct device *dev);
+static int virtpci_device_probe(struct device *dev);
+static int virtpci_device_remove(struct device *dev);
+static ssize_t virt_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t info_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+
+static const struct file_operations proc_virt_fops = {
+ .write = virt_proc_write,
+};
+
+static const struct file_operations proc_info_fops = {
+ .read = info_proc_read,
+};
+
+/*****************************************************/
+/* Globals */
+/*****************************************************/
+
+/* methods in bus_type struct allow the bus code to serve as an
+ * intermediary between the device core and individual device core and
+ * individual drivers
+ */
+static struct bus_type virtpci_bus_type = {
+ .name = "uisvirtpci",
+ .match = virtpci_bus_match,
+ .uevent = virtpci_uevent,
+ .suspend = virtpci_device_suspend,
+ .resume = virtpci_device_resume,
+};
+
+static struct device virtpci_rootbus_device = {
+ .init_name = "vbusroot", /* root bus */
+ .release = virtpci_bus_release
+};
+
+/* filled in with info about parent chipset driver when we register with it */
+static ULTRA_VBUS_DEVICEINFO Chipset_DriverInfo;
+
+static const struct sysfs_ops virtpci_driver_sysfs_ops = {
+ .show = virtpci_driver_attr_show,
+ .store = virtpci_driver_attr_store,
+};
+
+static struct kobj_type virtpci_driver_kobj_type = {
+ .sysfs_ops = &virtpci_driver_sysfs_ops,
+};
+
+static struct virtpci_dev *VpcidevListHead;
+static DEFINE_RWLOCK(VpcidevListLock);
+
+/* filled in with info about this driver, wrt it servicing client busses */
+static ULTRA_VBUS_DEVICEINFO Bus_DriverInfo;
+
+/* virtpci_proc_dir_entry is used to create the proc entry directory
+ * for virtpci
+ */
+static struct proc_dir_entry *virtpci_proc_dir;
+/* virt_proc_entry is used to tell virtpci to add/delete vhbas/vnics/vbuses */
+static struct proc_dir_entry *virt_proc_entry;
+/* info_proc_entry is used to tell virtpci to display current info
+ * kept in the driver
+ */
+static struct proc_dir_entry *info_proc_entry;
+#define VIRT_PROC_ENTRY_FN "virt"
+#define INFO_PROC_ENTRY_FN "info"
+#define DIR_PROC_ENTRY "virtpci"
+
+struct virtpci_busdev {
+ struct device virtpci_bus_device;
+};
+
+/*****************************************************/
+/* Local functions */
+/*****************************************************/
+
+static inline
+int WAIT_FOR_IO_CHANNEL(ULTRA_IO_CHANNEL_PROTOCOL __iomem *chanptr)
+{
+ int count = 120;
+ while (count > 0) {
+
+ if (ULTRA_CHANNEL_SERVER_READY(&chanptr->ChannelHeader))
+ return 1;
+ UIS_THREAD_WAIT_SEC(1);
+ count--;
+ }
+ return 0;
+}
+
+/* Write the contents of <info> to the ULTRA_VBUS_CHANNEL_PROTOCOL.ChpInfo. */
+static int write_vbus_chpInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
+ ULTRA_VBUS_DEVICEINFO *info)
+{
+ int off;
+ if (!chan) {
+ LOGERR("vbus channel not present");
+ return -1;
+ }
+ off = sizeof(ULTRA_CHANNEL_PROTOCOL) + chan->HdrInfo.chpInfoByteOffset;
+ if (chan->HdrInfo.chpInfoByteOffset == 0) {
+ LOGERR("vbus channel not used, because chpInfoByteOffset == 0");
+ return -1;
+ }
+ memcpy(((U8 *) (chan)) + off, info, sizeof(*info));
+ return 0;
+}
+
+/* Write the contents of <info> to the ULTRA_VBUS_CHANNEL_PROTOCOL.BusInfo. */
+static int write_vbus_busInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
+ ULTRA_VBUS_DEVICEINFO *info)
+{
+ int off;
+ if (!chan) {
+ LOGERR("vbus channel not present");
+ return -1;
+ }
+ off = sizeof(ULTRA_CHANNEL_PROTOCOL) + chan->HdrInfo.busInfoByteOffset;
+ if (chan->HdrInfo.busInfoByteOffset == 0) {
+ LOGERR("vbus channel not used, because busInfoByteOffset == 0");
+ return -1;
+ }
+ memcpy(((U8 *) (chan)) + off, info, sizeof(*info));
+ return 0;
+}
+
+/* Write the contents of <info> to the
+ * ULTRA_VBUS_CHANNEL_PROTOCOL.DevInfo[<devix>].
+ */
+static int
+write_vbus_devInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
+ ULTRA_VBUS_DEVICEINFO *info, int devix)
+{
+ int off;
+ if (!chan) {
+ LOGERR("vbus channel not present");
+ return -1;
+ }
+ off =
+ (sizeof(ULTRA_CHANNEL_PROTOCOL) +
+ chan->HdrInfo.devInfoByteOffset) +
+ (chan->HdrInfo.deviceInfoStructBytes * devix);
+ if (chan->HdrInfo.devInfoByteOffset == 0) {
+ LOGERR("vbus channel not used, because devInfoByteOffset == 0");
+ return -1;
+ }
+ memcpy(((U8 *) (chan)) + off, info, sizeof(*info));
+ return 0;
+}
+
+/* adds a vbus
+ * returns 0 failure, 1 success,
+ */
+static int add_vbus(struct add_vbus_guestpart *addparams)
+{
+ int ret;
+ struct device *vbus;
+ vbus = kzalloc(sizeof(struct device), GFP_ATOMIC);
+
+ POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ if (!vbus)
+ return 0;
+
+ dev_set_name(vbus, "vbus%d", addparams->busNo);
+ vbus->release = virtpci_bus_release;
+ vbus->parent = &virtpci_rootbus_device; /* root bus is parent */
+ vbus->bus = &virtpci_bus_type; /* bus type */
+ vbus->platform_data = (__force void *)addparams->chanptr;
+
+ /* register a virt bus device -
+ * this bus shows up under /sys/devices with .name value
+ * "virtpci%d" any devices added to this bus then show up under
+ * /sys/devices/virtpci0
+ */
+ ret = device_register(vbus);
+ if (ret) {
+ LOGERR("device_register FAILED:%d\n", ret);
+ POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ write_vbus_chpInfo(vbus->platform_data /* chanptr */ ,
+ &Chipset_DriverInfo);
+ write_vbus_busInfo(vbus->platform_data /* chanptr */ , &Bus_DriverInfo);
+ LOGINF("Added vbus %d; device %s created successfully\n",
+ addparams->busNo, BUS_ID(vbus));
+ POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ return 1;
+}
+
+/* for CHANSOCK wwwnn/max are AUTO-GENERATED; for normal channels,
+ * wwnn/max are in the channel header.
+ */
+#define GET_SCSIADAPINFO_FROM_CHANPTR(chanptr) { \
+ memcpy_fromio(&scsi.wwnn, \
+ &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vhba.wwnn, \
+ sizeof(struct vhba_wwnn)); \
+ memcpy_fromio(&scsi.max, \
+ &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vhba.max, \
+ sizeof(struct vhba_config_max)); \
+ }
+
+/* find bus device with the busid that matches - match_busid matches bus_id */
+#define GET_BUS_DEV(busno) { \
+ sprintf(busid, "vbus%d", busno); \
+ vbus = bus_find_device(&virtpci_bus_type, NULL, \
+ (void *)busid, match_busid); \
+ if (!vbus) { \
+ LOGERR("**** FAILED to find vbus %s\n", busid); \
+ return 0; \
+ } \
+}
+
+/* adds a vhba
+ * returns 0 failure, 1 success,
+ */
+static int add_vhba(struct add_virt_guestpart *addparams)
+{
+ int i;
+ struct scsi_adap_info scsi;
+ struct device *vbus;
+ unsigned char busid[BUS_ID_SIZE];
+
+ POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ if (!WAIT_FOR_IO_CHANNEL
+ ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) addparams->chanptr)) {
+ LOGERR("Timed out. Channel not ready\n");
+ POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ GET_SCSIADAPINFO_FROM_CHANPTR(addparams->chanptr);
+
+ GET_BUS_DEV(addparams->busNo);
+
+ LOGINF("Adding vhba wwnn:%x:%x config:%d-%d-%d-%d chanptr:%p\n",
+ scsi.wwnn.wwnn1, scsi.wwnn.wwnn2,
+ scsi.max.max_channel, scsi.max.max_id, scsi.max.max_lun,
+ scsi.max.cmd_per_lun, addparams->chanptr);
+ i = virtpci_device_add(vbus, VIRTHBA_TYPE, addparams, &scsi, NULL);
+ if (i) {
+ LOGINF("Added vhba wwnn:%x:%x chanptr:%p\n", scsi.wwnn.wwnn1,
+ scsi.wwnn.wwnn2, addparams->chanptr);
+ POSTCODE_LINUX_3(VPCI_CREATE_EXIT_PC, i,
+ POSTCODE_SEVERITY_INFO);
+ }
+ return i;
+
+}
+
+/* for CHANSOCK macaddr is AUTO-GENERATED; for normal channels,
+ * macaddr is in the channel header.
+ */
+#define GET_NETADAPINFO_FROM_CHANPTR(chanptr) { \
+ memcpy_fromio(net.mac_addr, \
+ ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vnic.macaddr, \
+ MAX_MACADDR_LEN); \
+ net.num_rcv_bufs = \
+ readl(&((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vnic.num_rcv_bufs); \
+ net.mtu = readl(&((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vnic.mtu); \
+ memcpy_fromio(&net.zoneGuid, \
+ &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ chanptr)->vnic.zoneGuid, \
+ sizeof(GUID)); \
+}
+
+/* adds a vnic
+ * returns 0 failure, 1 success,
+ */
+static int
+add_vnic(struct add_virt_guestpart *addparams)
+{
+ int i;
+ struct net_adap_info net;
+ struct device *vbus;
+ unsigned char busid[BUS_ID_SIZE];
+
+ POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ if (!WAIT_FOR_IO_CHANNEL
+ ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) addparams->chanptr)) {
+ LOGERR("Timed out, channel not ready\n");
+ POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ GET_NETADAPINFO_FROM_CHANPTR(addparams->chanptr);
+
+ GET_BUS_DEV(addparams->busNo);
+
+ LOGINF("Adding vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x rcvbufs:%d mtu:%d chanptr:%p{%-8.8lx-%-4.4x-%-4.4x-%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x%-2.2x}\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2], net.mac_addr[3],
+ net.mac_addr[4], net.mac_addr[5], net.num_rcv_bufs, net.mtu,
+ addparams->chanptr, (ulong) net.zoneGuid.data1, net.zoneGuid.data2,
+ net.zoneGuid.data3, net.zoneGuid.data4[0], net.zoneGuid.data4[1],
+ net.zoneGuid.data4[2], net.zoneGuid.data4[3],
+ net.zoneGuid.data4[4], net.zoneGuid.data4[5],
+ net.zoneGuid.data4[6], net.zoneGuid.data4[7]);
+ i = virtpci_device_add(vbus, VIRTNIC_TYPE, addparams, NULL, &net);
+ if (i) {
+ LOGINF("Added vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ POSTCODE_LINUX_3(VPCI_CREATE_EXIT_PC, i,
+ POSTCODE_SEVERITY_INFO);
+ return 1;
+ }
+ return 0;
+}
+
+/* delete vbus
+ * returns 0 failure, 1 success,
+ */
+static int
+delete_vbus(struct del_vbus_guestpart *delparams)
+{
+ struct device *vbus;
+ unsigned char busid[BUS_ID_SIZE];
+
+ GET_BUS_DEV(delparams->busNo);
+ /* ensure that bus has no devices? -- TBD */
+ LOGINF("Deleting %s\n", BUS_ID(vbus));
+ if (delete_vbus_device(vbus, NULL))
+ return 0; /* failure */
+ LOGINF("Deleted vbus %d\n", delparams->busNo);
+ return 1;
+}
+
+static int
+delete_vbus_device(struct device *vbus, void *data)
+{
+ int checkforroot = (data != NULL);
+ struct device *pDev = &virtpci_rootbus_device;
+
+ if ((checkforroot) && match_busid(vbus, (void *) BUS_ID(pDev))) {
+ /* skip it - don't delete root bus */
+ LOGINF("skipping root bus\n");
+ return 0; /* pretend no error */
+ }
+ LOGINF("Calling unregister for %s\n", BUS_ID(vbus));
+ device_unregister(vbus);
+ kfree(vbus);
+ LOGINF("VBus unregister and freed\n");
+ return 0; /* no error */
+}
+
+/* pause vhba
+* returns 0 failure, 1 success,
+*/
+static int pause_vhba(struct pause_virt_guestpart *pauseparams)
+{
+ int i;
+ struct scsi_adap_info scsi;
+
+ GET_SCSIADAPINFO_FROM_CHANPTR(pauseparams->chanptr);
+
+ LOGINF("Pausing vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1, scsi.wwnn.wwnn2);
+ i = virtpci_device_serverdown(NULL /*no parent bus */ , VIRTHBA_TYPE,
+ &scsi.wwnn, NULL);
+ if (i)
+ LOGINF("Paused vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1,
+ scsi.wwnn.wwnn2);
+ return i;
+}
+
+/* pause vnic
+ * returns 0 failure, 1 success,
+ */
+static int pause_vnic(struct pause_virt_guestpart *pauseparams)
+{
+ int i;
+ struct net_adap_info net;
+
+ GET_NETADAPINFO_FROM_CHANPTR(pauseparams->chanptr);
+
+ LOGINF("Pausing vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ i = virtpci_device_serverdown(NULL /*no parent bus */ , VIRTNIC_TYPE,
+ NULL, net.mac_addr);
+ if (i) {
+ LOGINF(" Paused vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ }
+ return i;
+}
+
+/* resume vhba
+ * returns 0 failure, 1 success,
+ */
+static int resume_vhba(struct resume_virt_guestpart *resumeparams)
+{
+ int i;
+ struct scsi_adap_info scsi;
+
+ GET_SCSIADAPINFO_FROM_CHANPTR(resumeparams->chanptr);
+
+ LOGINF("Resuming vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1, scsi.wwnn.wwnn2);
+ i = virtpci_device_serverup(NULL /*no parent bus */ , VIRTHBA_TYPE,
+ &scsi.wwnn, NULL);
+ if (i)
+ LOGINF("Resumed vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1,
+ scsi.wwnn.wwnn2);
+ return i;
+}
+
+/* resume vnic
+* returns 0 failure, 1 success,
+*/
+static int
+resume_vnic(struct resume_virt_guestpart *resumeparams)
+{
+ int i;
+ struct net_adap_info net;
+
+ GET_NETADAPINFO_FROM_CHANPTR(resumeparams->chanptr);
+
+ LOGINF("Resuming vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ i = virtpci_device_serverup(NULL /*no parent bus */ , VIRTNIC_TYPE,
+ NULL, net.mac_addr);
+ if (i) {
+ LOGINF(" Resumed vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ }
+ return i;
+}
+
+/* delete vhba
+* returns 0 failure, 1 success,
+*/
+static int delete_vhba(struct del_virt_guestpart *delparams)
+{
+ int i;
+ struct scsi_adap_info scsi;
+
+ GET_SCSIADAPINFO_FROM_CHANPTR(delparams->chanptr);
+
+ LOGINF("Deleting vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1, scsi.wwnn.wwnn2);
+ i = virtpci_device_del(NULL /*no parent bus */ , VIRTHBA_TYPE,
+ &scsi.wwnn, NULL);
+ if (i) {
+ LOGINF("Deleted vhba wwnn:%x:%x\n", scsi.wwnn.wwnn1,
+ scsi.wwnn.wwnn2);
+ return 1;
+ }
+ return 0;
+}
+
+/* deletes a vnic
+ * returns 0 failure, 1 success,
+ */
+static int delete_vnic(struct del_virt_guestpart *delparams)
+{
+ int i;
+ struct net_adap_info net;
+
+ GET_NETADAPINFO_FROM_CHANPTR(delparams->chanptr);
+
+ LOGINF("Deleting vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ i = virtpci_device_del(NULL /*no parent bus */ , VIRTNIC_TYPE, NULL,
+ net.mac_addr);
+ if (i) {
+ LOGINF("Deleted vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5]);
+ }
+ return i;
+}
+
+#define DELETE_ONE_VPCIDEV(vpcidev) { \
+ LOGINF("calling device_unregister:%p\n", &vpcidev->generic_dev); \
+ device_unregister(&vpcidev->generic_dev); \
+ LOGINF("Deleted %p\n", vpcidev); \
+ kfree(vpcidev); \
+}
+
+/* deletes all vhbas and vnics
+ * returns 0 failure, 1 success,
+ */
+static void delete_all(void)
+{
+ int count = 0;
+ unsigned long flags;
+ struct virtpci_dev *tmpvpcidev, *nextvpcidev;
+
+ /* delete the entire vhba/vnic list in one shot */
+ write_lock_irqsave(&VpcidevListLock, flags);
+ tmpvpcidev = VpcidevListHead;
+ VpcidevListHead = NULL;
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+
+ /* delete one vhba/vnic at a time */
+ while (tmpvpcidev) {
+ nextvpcidev = tmpvpcidev->next;
+ /* delete the vhba/vnic at tmpvpcidev */
+ DELETE_ONE_VPCIDEV(tmpvpcidev);
+ tmpvpcidev = nextvpcidev;
+ count++;
+ }
+ LOGINF("Deleted %d vhbas/vnics.\n", count);
+
+ /* now delete each vbus */
+ if (bus_for_each_dev
+ (&virtpci_bus_type, NULL, (void *) 1, delete_vbus_device))
+ LOGERR("delete of all vbus failed\n");
+}
+
+/* deletes all vnics or vhbas
+ * returns 0 failure, 1 success,
+ */
+static int delete_all_virt(VIRTPCI_DEV_TYPE devtype, struct del_vbus_guestpart *delparams)
+{
+ int i;
+ unsigned char busid[BUS_ID_SIZE];
+ struct device *vbus;
+
+ GET_BUS_DEV(delparams->busNo);
+
+ if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
+ LOGERR("**** FAILED to delete all devices; devtype:%d not vhba:%d or vnic:%d\n",
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ return 0;
+ }
+
+ LOGINF("Deleting all %s in vbus %s\n",
+ devtype == VIRTHBA_TYPE ? "vhbas" : "vnics", busid);
+ /* delete all vhbas/vnics */
+ i = virtpci_device_del(vbus, devtype, NULL, NULL);
+ if (i > 0)
+ LOGINF("Deleted %d %s\n", i,
+ devtype == VIRTHBA_TYPE ? "vhbas" : "vnics");
+ return 1;
+}
+
+static int virtpci_ctrlchan_func(struct guest_msgs *msg)
+{
+ switch (msg->msgtype) {
+ case GUEST_ADD_VBUS:
+ return add_vbus(&msg->add_vbus);
+ case GUEST_ADD_VHBA:
+ return add_vhba(&msg->add_vhba);
+ case GUEST_ADD_VNIC:
+ return add_vnic(&msg->add_vnic);
+ case GUEST_DEL_VBUS:
+ return delete_vbus(&msg->del_vbus);
+ case GUEST_DEL_VHBA:
+ return delete_vhba(&msg->del_vhba);
+ case GUEST_DEL_VNIC:
+ return delete_vnic(&msg->del_vhba);
+ case GUEST_DEL_ALL_VHBAS:
+ return delete_all_virt(VIRTHBA_TYPE, &msg->del_all_vhbas);
+ case GUEST_DEL_ALL_VNICS:
+ return delete_all_virt(VIRTNIC_TYPE, &msg->del_all_vnics);
+ case GUEST_DEL_ALL_VBUSES:
+ delete_all();
+ return 1;
+ case GUEST_PAUSE_VHBA:
+ return pause_vhba(&msg->pause_vhba);
+ case GUEST_PAUSE_VNIC:
+ return pause_vnic(&msg->pause_vnic);
+ case GUEST_RESUME_VHBA:
+ return resume_vhba(&msg->resume_vhba);
+ case GUEST_RESUME_VNIC:
+ return resume_vnic(&msg->resume_vnic);
+ default:
+ LOGERR("invalid message type %d.\n", msg->msgtype);
+ return 0;
+ }
+}
+
+/* same as driver_helper in bus.c linux */
+static int match_busid(struct device *dev, void *data)
+{
+ const char *name = data;
+
+ if (strcmp(name, BUS_ID(dev)) == 0)
+ return 1;
+ return 0;
+}
+
+/*****************************************************/
+/* Bus functions */
+/*****************************************************/
+
+static const struct pci_device_id *
+virtpci_match_device(const struct pci_device_id *ids,
+ const struct virtpci_dev *dev)
+{
+ while (ids->vendor || ids->subvendor || ids->class_mask) {
+ DBGINF("ids->vendor:%x dev->vendor:%x ids->device:%x dev->device:%x\n",
+ ids->vendor, dev->vendor, ids->device, dev->device);
+
+ if ((ids->vendor == dev->vendor)
+ && (ids->device == dev->device))
+ return ids;
+
+ ids++;
+ }
+ return NULL;
+}
+
+/* NOTE: !!!!!! This function is called when a new device is added
+* for this bus. Or, it is called for existing devices when a new
+* driver is added for this bus. It returns nonzero if a given device
+* can be handled by the given driver.
+*/
+static int virtpci_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct virtpci_dev *virtpcidev = device_to_virtpci_dev(dev);
+ struct virtpci_driver *virtpcidrv = driver_to_virtpci_driver(drv);
+ int match = 0;
+
+ DBGINF("In virtpci_bus_match dev->bus_id:%s drv->name:%s\n",
+ dev->bus_id, drv->name);
+
+ /* check ids list for a match */
+ if (virtpci_match_device(virtpcidrv->id_table, virtpcidev))
+ match = 1;
+
+ DBGINF("returning match:%d\n", match);
+ return match; /* 0 - no match; 1 - yes it matches */
+}
+
+static int virtpci_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ DBGINF("In virtpci_hotplug\n");
+ /* add variables to the environment prior to the generation of
+ * hotplug events to user space
+ */
+ if (add_uevent_var(env, "VIRTPCI_VERSION=%s", VIRTPCI_VERSION))
+ return -ENOMEM;
+ return 0;
+}
+
+static int virtpci_device_suspend(struct device *dev, pm_message_t state)
+{
+ DBGINF("In virtpci_device_suspend -NYI ****\n");
+ return 0;
+}
+
+static int virtpci_device_resume(struct device *dev)
+{
+ DBGINF("In virtpci_device_resume -NYI ****\n");
+ return 0;
+}
+
+/* For a child device just created on a client bus, fill in
+ * information about the driver that is controlling this device into
+ * the the appropriate slot within the vbus channel of the bus
+ * instance.
+ */
+static void fix_vbus_devInfo(struct device *dev, int devNo, int devType,
+ struct virtpci_driver *virtpcidrv)
+{
+ struct device *vbus;
+ void *pChan;
+ ULTRA_VBUS_DEVICEINFO devInfo;
+ const char *stype;
+
+ if (!dev) {
+ LOGERR("%s dev is NULL", __func__);
+ return;
+ }
+ if (!virtpcidrv) {
+ LOGERR("%s driver is NULL", __func__);
+ return;
+ }
+ vbus = dev->parent;
+ if (!vbus) {
+ LOGERR("%s dev has no parent bus", __func__);
+ return;
+ }
+ pChan = vbus->platform_data;
+ if (!pChan) {
+ LOGERR("%s dev bus has no channel", __func__);
+ return;
+ }
+ switch (devType) {
+ case PCI_DEVICE_ID_VIRTHBA:
+ stype = "vHBA";
+ break;
+ case PCI_DEVICE_ID_VIRTNIC:
+ stype = "vNIC";
+ break;
+ default:
+ stype = "unknown";
+ break;
+ }
+ BusDeviceInfo_Init(&devInfo, stype,
+ virtpcidrv->name,
+ virtpcidrv->version,
+ virtpcidrv->vertag,
+ virtpcidrv->build_date, virtpcidrv->build_time);
+ write_vbus_devInfo(pChan, &devInfo, devNo);
+
+ /* Re-write bus+chipset info, because it is possible that this
+ * was previously written by our good counterpart, visorbus.
+ */
+ write_vbus_chpInfo(pChan, &Chipset_DriverInfo);
+ write_vbus_busInfo(pChan, &Bus_DriverInfo);
+}
+
+/* This function is called to query the existence of a specific device
+* and whether this driver can work with it. It should return -ENODEV
+* in case of failure.
+*/
+static int virtpci_device_probe(struct device *dev)
+{
+ struct virtpci_dev *virtpcidev = device_to_virtpci_dev(dev);
+ struct virtpci_driver *virtpcidrv =
+ driver_to_virtpci_driver(dev->driver);
+ const struct pci_device_id *id;
+ int error = 0;
+
+ LOGINF("In virtpci_device_probe dev:%p virtpcidev:%p virtpcidrv:%p\n",
+ dev, virtpcidev, virtpcidrv); /* VERBOSE/DEBUG ? */
+ POSTCODE_LINUX_2(VPCI_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ /* static match and static probe vs dynamic match & dynamic
+ * probe - do we care?.
+ */
+ if (!virtpcidrv->id_table)
+ return -ENODEV;
+
+ id = virtpci_match_device(virtpcidrv->id_table, virtpcidev);
+ if (!id)
+ return -ENODEV;
+
+ /* increment reference count */
+ get_device(dev);
+
+ /* if virtpcidev is not already claimed & probe function is
+ * valid, probe it
+ */
+ if (!virtpcidev->mydriver && virtpcidrv->probe) {
+ /* call the probe function - virthba or virtnic probe
+ * is what it should be
+ */
+ error = virtpcidrv->probe(virtpcidev, id);
+ if (!error) {
+ fix_vbus_devInfo(dev, virtpcidev->deviceNo,
+ virtpcidev->device, virtpcidrv);
+ virtpcidev->mydriver = virtpcidrv;
+ POSTCODE_LINUX_2(VPCI_PROBE_EXIT_PC,
+ POSTCODE_SEVERITY_INFO);
+ } else
+ put_device(dev);
+ }
+ POSTCODE_LINUX_2(VPCI_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return error; /* -ENODEV for probe failure */
+}
+
+static int virtpci_device_remove(struct device *dev_)
+{
+ /* dev_ passed in is the HBA device which we called
+ * generic_dev in our virtpcidev struct
+ */
+ struct virtpci_dev *virtpcidev = device_to_virtpci_dev(dev_);
+ struct virtpci_driver *virtpcidrv = virtpcidev->mydriver;
+ LOGINF("In virtpci_device_remove bus_id:%s dev_:%p virtpcidev:%p dev->driver:%p drivername:%s\n",
+ BUS_ID(dev_), dev_, virtpcidev, dev_->driver,
+ dev_->driver->name); /* VERBOSE/DEBUG */
+ if (virtpcidrv) {
+ /* TEMP: assuming we have only one such driver for now */
+ if (virtpcidrv->remove)
+ virtpcidrv->remove(virtpcidev);
+ virtpcidev->mydriver = NULL;
+ }
+
+ DBGINF("calling putdevice\n");
+ put_device(dev_);
+
+ DBGINF("Leaving\n");
+ return 0;
+}
+
+/*****************************************************/
+/* Bus functions */
+/*****************************************************/
+
+static void virtpci_bus_release(struct device *dev)
+{
+ /* this function is called when the last reference to the
+ * device is removed
+ */
+ DBGINF("In virtpci_bus_release\n");
+ /* what else is supposed to happen here? */
+}
+
+/*****************************************************/
+/* Adapter functions */
+/*****************************************************/
+
+static int virtpci_device_add(struct device *parentbus, int devtype,
+ struct add_virt_guestpart *addparams,
+ struct scsi_adap_info *scsi, /* NULL for VNIC add */
+ struct net_adap_info *net /* NULL for VHBA add */)
+{
+ struct virtpci_dev *virtpcidev = NULL;
+ struct virtpci_dev *tmpvpcidev = NULL, *prev;
+ unsigned long flags;
+ int ret;
+ ULTRA_IO_CHANNEL_PROTOCOL __iomem *pIoChan = NULL;
+ struct device *pDev;
+
+ LOGINF("virtpci_device_add parentbus:%p chanptr:%p\n", parentbus,
+ addparams->chanptr);
+
+ POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
+ LOGERR("**** FAILED to add device; devtype:%d not vhba:%d or vnic:%d\n",
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ POSTCODE_LINUX_3(VPCI_CREATE_FAILURE_PC, devtype,
+ POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ /* add a Virtual Device */
+ virtpcidev = kzalloc(sizeof(struct virtpci_dev), GFP_ATOMIC);
+ if (virtpcidev == NULL) {
+ LOGERR("can't add device - malloc FALLED\n");
+ POSTCODE_LINUX_2(MALLOC_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ /* initialize stuff unique to virtpci_dev struct */
+ virtpcidev->devtype = devtype;
+ if (devtype == VIRTHBA_TYPE) {
+ virtpcidev->device = PCI_DEVICE_ID_VIRTHBA;
+ virtpcidev->scsi = *scsi;
+ } else {
+ virtpcidev->device = PCI_DEVICE_ID_VIRTNIC;
+ virtpcidev->net = *net;
+ }
+ virtpcidev->vendor = PCI_VENDOR_ID_UNISYS;
+ virtpcidev->busNo = addparams->busNo;
+ virtpcidev->deviceNo = addparams->deviceNo;
+
+ virtpcidev->queueinfo.chan = addparams->chanptr;
+ virtpcidev->queueinfo.send_int_if_needed = NULL;
+
+ /* Set up safe queue... */
+ pIoChan = (ULTRA_IO_CHANNEL_PROTOCOL __iomem *)
+ virtpcidev->queueinfo.chan;
+
+ virtpcidev->intr = addparams->intr;
+
+ /* initialize stuff in the device portion of the struct */
+ virtpcidev->generic_dev.bus = &virtpci_bus_type;
+ virtpcidev->generic_dev.parent = parentbus;
+ virtpcidev->generic_dev.release = virtpci_device_release;
+
+ dev_set_name(&virtpcidev->generic_dev, "%x:%x",
+ addparams->busNo, addparams->deviceNo);
+
+ /* add the vhba/vnic to virtpci device list - but check for
+ * duplicate wwnn/macaddr first
+ */
+ write_lock_irqsave(&VpcidevListLock, flags);
+ for (tmpvpcidev = VpcidevListHead; tmpvpcidev;
+ tmpvpcidev = tmpvpcidev->next) {
+ if (devtype == VIRTHBA_TYPE) {
+ if ((tmpvpcidev->scsi.wwnn.wwnn1 == scsi->wwnn.wwnn1) &&
+ (tmpvpcidev->scsi.wwnn.wwnn2 == scsi->wwnn.wwnn2)) {
+ /* duplicate - already have vpcidev
+ with this wwnn */
+ break;
+ }
+ } else
+ if (memcmp
+ (tmpvpcidev->net.mac_addr, net->mac_addr,
+ MAX_MACADDR_LEN) == 0) {
+ /* duplicate - already have vnic with this wwnn */
+ break;
+ }
+ }
+ if (tmpvpcidev) {
+ /* found a vhba/vnic already in the list with same
+ * wwnn or macaddr - reject add
+ */
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+ kfree(virtpcidev);
+ LOGERR("**** FAILED vhba/vnic already exists in the list\n");
+ POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ /* add it at the head */
+ if (!VpcidevListHead)
+ VpcidevListHead = virtpcidev;
+ else {
+ /* insert virtpcidev at the head of our linked list of
+ * vpcidevs
+ */
+ virtpcidev->next = VpcidevListHead;
+ VpcidevListHead = virtpcidev;
+ }
+
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+
+ /* Must transition channel to ATTACHED state BEFORE
+ * registering the device, because polling of the channel
+ * queues can begin at any time after device_register().
+ */
+ pDev = &virtpcidev->generic_dev;
+ ULTRA_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
+ BUS_ID(pDev),
+ CHANNELCLI_ATTACHED, NULL);
+
+ /* don't register until device has been added to
+ * list. Otherwise, a device_unregister from this function can
+ * cause a "scheduling while atomic".
+ */
+ DBGINF("registering device:%p with bus_id:%s\n",
+ &virtpcidev->generic_dev, virtpcidev->generic_dev.bus_id);
+ ret = device_register(&virtpcidev->generic_dev);
+ /* NOTE: THIS IS CALLING HOTPLUG virtpci_hotplug!!!
+ * This call to device_register results in virtpci_bus_match
+ * being called !!!!! And, if match returns success, then
+ * virtpcidev->generic_dev.driver is setup to core_driver,
+ * i.e., virtpci and the probe function
+ * virtpcidev->generic_dev.driver->probe is called which
+ * results in virtpci_device_probe being called. And if
+ * virtpci_device_probe is successful
+ */
+ if (ret) {
+ LOGERR("device_register returned %d\n", ret);
+ pDev = &virtpcidev->generic_dev;
+ ULTRA_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
+ BUS_ID(pDev),
+ CHANNELCLI_DETACHED, NULL);
+ /* remove virtpcidev, the one we just added, from the list */
+ write_lock_irqsave(&VpcidevListLock, flags);
+ for (tmpvpcidev = VpcidevListHead, prev = NULL;
+ tmpvpcidev;
+ prev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
+ if (tmpvpcidev == virtpcidev) {
+ if (prev)
+ prev->next = tmpvpcidev->next;
+ else
+ VpcidevListHead = tmpvpcidev->next;
+ break;
+ }
+ }
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+ kfree(virtpcidev);
+ return 0;
+ }
+
+ LOGINF("Added %s:%d:%d &virtpcidev->generic_dev:%p\n",
+ (devtype == VIRTHBA_TYPE) ? "virthba" : "virtnic",
+ addparams->busNo, addparams->deviceNo, &virtpcidev->generic_dev);
+ POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ return 1;
+}
+
+static int virtpci_device_serverdown(struct device *parentbus,
+ int devtype,
+ struct vhba_wwnn *wwnn,
+ unsigned char macaddr[])
+{
+ int pausethisone = 0;
+ bool found = false;
+ struct virtpci_dev *tmpvpcidev, *prevvpcidev;
+ struct virtpci_driver *vpcidriver;
+ unsigned long flags;
+ int rc = 0;
+
+ if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
+ LOGERR("**** FAILED to pause device; devtype:%d not vhba:%d or vnic:%d\n",
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ return 0;
+ }
+
+ /* find the vhba or vnic in virtpci device list */
+ write_lock_irqsave(&VpcidevListLock, flags);
+
+ for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL;
+ (tmpvpcidev && !found);
+ prevvpcidev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
+ if (tmpvpcidev->devtype != devtype)
+ continue;
+
+ if (devtype == VIRTHBA_TYPE) {
+ pausethisone =
+ ((tmpvpcidev->scsi.wwnn.wwnn1 == wwnn->wwnn1) &&
+ (tmpvpcidev->scsi.wwnn.wwnn2 == wwnn->wwnn2));
+ /* devtype is vhba, we're pausing vhba whose
+ * wwnn matches the current device's wwnn
+ */
+ } else { /* VIRTNIC_TYPE */
+ pausethisone =
+ memcmp(tmpvpcidev->net.mac_addr, macaddr,
+ MAX_MACADDR_LEN) == 0;
+ /* devtype is vnic, we're pausing vnic whose
+ * macaddr matches the current device's macaddr */
+ }
+
+ if (!pausethisone)
+ continue;
+
+ found = true;
+ vpcidriver = tmpvpcidev->mydriver;
+ rc = vpcidriver->suspend(tmpvpcidev, 0);
+ }
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+
+ if (!found) {
+ LOGERR("**** FAILED to find vhba/vnic in the list\n");
+ return 0;
+ }
+
+ return rc;
+}
+
+static int virtpci_device_serverup(struct device *parentbus,
+ int devtype,
+ struct vhba_wwnn *wwnn,
+ unsigned char macaddr[])
+{
+ int resumethisone = 0;
+ bool found = false;
+ struct virtpci_dev *tmpvpcidev, *prevvpcidev;
+ struct virtpci_driver *vpcidriver;
+ unsigned long flags;
+ int rc = 0;
+
+ if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
+ LOGERR("**** FAILED to resume device; devtype:%d not vhba:%d or vnic:%d\n",
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ return 0;
+ }
+
+ /* find the vhba or vnic in virtpci device list */
+ write_lock_irqsave(&VpcidevListLock, flags);
+
+ for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL;
+ (tmpvpcidev && !found);
+ prevvpcidev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
+ if (tmpvpcidev->devtype != devtype)
+ continue;
+
+ if (devtype == VIRTHBA_TYPE) {
+ resumethisone =
+ ((tmpvpcidev->scsi.wwnn.wwnn1 == wwnn->wwnn1) &&
+ (tmpvpcidev->scsi.wwnn.wwnn2 == wwnn->wwnn2));
+ /* devtype is vhba, we're resuming vhba whose
+ * wwnn matches the current device's wwnn */
+ } else { /* VIRTNIC_TYPE */
+ resumethisone =
+ memcmp(tmpvpcidev->net.mac_addr, macaddr,
+ MAX_MACADDR_LEN) == 0;
+ /* devtype is vnic, we're resuming vnic whose
+ * macaddr matches the current device's macaddr */
+ }
+
+ if (!resumethisone)
+ continue;
+
+ found = true;
+ vpcidriver = tmpvpcidev->mydriver;
+ /* This should be done at BUS resume time, but an
+ * existing problem prevents us from ever getting a bus
+ * resume... This hack would fail to work should we
+ * ever have a bus that contains NO devices, since we
+ * would never even get here in that case.
+ */
+ fix_vbus_devInfo(&tmpvpcidev->generic_dev, tmpvpcidev->deviceNo,
+ tmpvpcidev->device, vpcidriver);
+ rc = vpcidriver->resume(tmpvpcidev);
+ }
+
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+
+ if (!found) {
+ LOGERR("**** FAILED to find vhba/vnic in the list\n");
+ return 0;
+ }
+
+ return rc;
+}
+
+static int virtpci_device_del(struct device *parentbus,
+ int devtype, struct vhba_wwnn *wwnn,
+ unsigned char macaddr[])
+{
+ int count = 0, all = 0, delthisone;
+ struct virtpci_dev *tmpvpcidev, *prevvpcidev, *dellist = NULL;
+ unsigned long flags;
+
+#define DEL_CONTINUE { \
+ prevvpcidev = tmpvpcidev;\
+ tmpvpcidev = tmpvpcidev->next;\
+ continue; \
+}
+
+ if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
+ LOGERR("**** FAILED to delete device; devtype:%d not vhba:%d or vnic:%d\n",
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ return 0;
+ }
+
+ /* see if we are to delete all - NOTE: all implies we have a
+ * valid parentbus
+ */
+ all = ((devtype == VIRTHBA_TYPE) && (wwnn == NULL)) ||
+ ((devtype == VIRTNIC_TYPE) && (macaddr == NULL));
+
+ /* find all the vhba or vnic or both in virtpci device list
+ * keep list of ones we are deleting so we can call
+ * device_unregister after we release the lock; otherwise we
+ * encounter "schedule while atomic"
+ */
+ write_lock_irqsave(&VpcidevListLock, flags);
+ for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL; tmpvpcidev;) {
+ if (tmpvpcidev->devtype != devtype)
+ DEL_CONTINUE;
+
+ if (all) {
+ delthisone =
+ (tmpvpcidev->generic_dev.parent == parentbus);
+ /* we're deleting all vhbas or vnics on the
+ * specified parent bus
+ */
+ } else if (devtype == VIRTHBA_TYPE) {
+ delthisone =
+ ((tmpvpcidev->scsi.wwnn.wwnn1 == wwnn->wwnn1) &&
+ (tmpvpcidev->scsi.wwnn.wwnn2 == wwnn->wwnn2));
+ /* devtype is vhba, we're deleting vhba whose
+ * wwnn matches the current device's wwnn
+ */
+ } else { /* VIRTNIC_TYPE */
+ delthisone =
+ memcmp(tmpvpcidev->net.mac_addr, macaddr,
+ MAX_MACADDR_LEN) == 0;
+ /* devtype is vnic, we're deleting vnic whose
+ * macaddr matches the current device's macaddr
+ */
+ }
+
+ if (!delthisone)
+ DEL_CONTINUE;
+
+ /* take vhba/vnic out of the list */
+ if (prevvpcidev)
+ /* not at head */
+ prevvpcidev->next = tmpvpcidev->next;
+ else
+ VpcidevListHead = tmpvpcidev->next;
+
+ /* add it to our deletelist */
+ tmpvpcidev->next = dellist;
+ dellist = tmpvpcidev;
+
+ count++;
+ if (!all)
+ break; /* done */
+ /* going to top of loop again - set tmpvpcidev to next
+ * one we're to process
+ */
+ if (prevvpcidev)
+ tmpvpcidev = prevvpcidev->next;
+ else
+ tmpvpcidev = VpcidevListHead;
+ }
+ write_unlock_irqrestore(&VpcidevListLock, flags);
+
+ if (!all && (count == 0)) {
+ LOGERR("**** FAILED to find vhba/vnic in the list\n");
+ return 0;
+ }
+
+ /* now delete each one from delete list */
+ while (dellist) {
+ /* save next */
+ tmpvpcidev = dellist->next;
+ /* delete the vhba/vnic at dellist */
+ DELETE_ONE_VPCIDEV(dellist);
+ /* do next */
+ dellist = tmpvpcidev;
+ }
+
+ return count;
+}
+
+static void virtpci_device_release(struct device *dev_)
+{
+ /* this function is called when the last reference to the
+ * device is removed
+ */
+ LOGINF("In virtpci_device_release:%p - NOT YET IMPLEMENTED\n", dev_);
+}
+
+/*****************************************************/
+/* Driver functions */
+/*****************************************************/
+
+#define kobj_to_device_driver(obj) container_of(obj, struct device_driver, kobj)
+#define attribute_to_driver_attribute(obj) \
+ container_of(obj, struct driver_attribute, attr)
+
+static ssize_t virtpci_driver_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct driver_attribute *dattr = attribute_to_driver_attribute(attr);
+ ssize_t ret = 0;
+
+ struct driver_private *dprivate = to_driver(kobj);
+ struct device_driver *driver;
+ if (dprivate != NULL)
+ driver = dprivate->driver;
+ else
+ driver = NULL;
+
+ DBGINF("In virtpci_driver_attr_show driver->name:%s\n", driver->name);
+ if (driver) {
+ if (dattr->show)
+ ret = dattr->show(driver, buf);
+ }
+ return ret;
+}
+
+static ssize_t virtpci_driver_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct driver_attribute *dattr = attribute_to_driver_attribute(attr);
+ ssize_t ret = 0;
+
+ struct driver_private *dprivate = to_driver(kobj);
+ struct device_driver *driver;
+ if (dprivate != NULL)
+ driver = dprivate->driver;
+ else
+ driver = NULL;
+
+ DBGINF("In virtpci_driver_attr_store driver->name:%s\n", driver->name);
+
+ if (driver) {
+ if (dattr->store)
+ ret = dattr->store(driver, buf, count);
+ }
+ return ret;
+}
+
+/* register a new virtpci driver */
+int virtpci_register_driver(struct virtpci_driver *drv)
+{
+ int result = 0;
+
+ DBGINF("In virtpci_register_driver\n");
+
+ if (drv->id_table == NULL) {
+ LOGERR("id_table missing\n");
+ return 1;
+ }
+ /* initialize core driver fields needed to call driver_register */
+ drv->core_driver.name = drv->name; /* name of driver in sysfs */
+ drv->core_driver.bus = &virtpci_bus_type; /* type of bus this
+ * driver works with */
+ drv->core_driver.probe = virtpci_device_probe; /* called to query the
+ * existence of a
+ * specific device and
+ * whether this driver
+ *can work with it */
+ drv->core_driver.remove = virtpci_device_remove; /* called when the
+ * device is removed
+ * from the system */
+ /* register with core */
+ result = driver_register(&drv->core_driver);
+ /* calls bus_add_driver which calls driver_attach and
+ * module_add_driver
+ */
+ if (result)
+ return result; /* failed */
+
+ drv->core_driver.p->kobj.ktype = &virtpci_driver_kobj_type;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtpci_register_driver);
+
+void virtpci_unregister_driver(struct virtpci_driver *drv)
+{
+ DBGINF("In virtpci_unregister_driver drv:%p\n", drv);
+ driver_unregister(&drv->core_driver);
+ /* driver_unregister calls bus_remove_driver
+ * bus_remove_driver calls device_detach
+ * device_detach calls device_release_driver for each of the
+ * driver's devices
+ * device_release driver calls drv->remove which is
+ * virtpci_device_remove
+ * virtpci_device_remove calls virthba_remove
+ */
+ DBGINF("Leaving\n");
+}
+EXPORT_SYMBOL_GPL(virtpci_unregister_driver);
+
+/*****************************************************/
+/* proc filesystem functions */
+/*****************************************************/
+struct print_vbus_info {
+ int *length;
+ char *buf;
+};
+
+static int print_vbus(struct device *vbus, void *data)
+{
+ struct print_vbus_info *p = (struct print_vbus_info *) data;
+ int l = *(p->length);
+
+ *(p->length) = l + sprintf(p->buf + l, "bus_id:%s\n", dev_name(vbus));
+ return 0; /* no error */
+}
+
+static ssize_t info_proc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int length = 0;
+ struct virtpci_dev *tmpvpcidev;
+ unsigned long flags;
+ struct print_vbus_info printparam;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ length += sprintf(vbuf + length, "CHANSOCK is not defined.\n");
+
+ length += sprintf(vbuf + length, "\n Virtual PCI Bus devices\n");
+ printparam.length = &length;
+ printparam.buf = vbuf;
+ if (bus_for_each_dev(&virtpci_bus_type, NULL,
+ (void *) &printparam, print_vbus))
+ LOGERR("delete of all vbus failed\n");
+
+ length += sprintf(vbuf + length, "\n Virtual PCI devices\n");
+ read_lock_irqsave(&VpcidevListLock, flags);
+ tmpvpcidev = VpcidevListHead;
+ while (tmpvpcidev) {
+ if (tmpvpcidev->devtype == VIRTHBA_TYPE) {
+ length += sprintf(vbuf + length, "[%d:%d] VHba:%08x:%08x max-config:%d-%d-%d-%d",
+ tmpvpcidev->busNo, tmpvpcidev->deviceNo,
+ tmpvpcidev->scsi.wwnn.wwnn1,
+ tmpvpcidev->scsi.wwnn.wwnn2,
+ tmpvpcidev->scsi.max.max_channel,
+ tmpvpcidev->scsi.max.max_id,
+ tmpvpcidev->scsi.max.max_lun,
+ tmpvpcidev->scsi.max.cmd_per_lun);
+ } else {
+ length += sprintf(vbuf + length, "[%d:%d] VNic:%02x:%02x:%02x:%02x:%02x:%02x num_rcv_bufs:%d mtu:%d",
+ tmpvpcidev->busNo, tmpvpcidev->deviceNo,
+ tmpvpcidev->net.mac_addr[0],
+ tmpvpcidev->net.mac_addr[1],
+ tmpvpcidev->net.mac_addr[2],
+ tmpvpcidev->net.mac_addr[3],
+ tmpvpcidev->net.mac_addr[4],
+ tmpvpcidev->net.mac_addr[5],
+ tmpvpcidev->net.num_rcv_bufs,
+ tmpvpcidev->net.mtu);
+ }
+ length +=
+ sprintf(vbuf + length, " chanptr:%p\n",
+ tmpvpcidev->queueinfo.chan);
+ tmpvpcidev = tmpvpcidev->next;
+ }
+ read_unlock_irqrestore(&VpcidevListLock, flags);
+
+ length +=
+ sprintf(vbuf + length, "\nModule build: Date:%s Time:%s\n", __DATE__,
+ __TIME__);
+
+ length += sprintf(vbuf + length, "\n");
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+static ssize_t virt_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[16];
+ int type, i, action = 0xffff;
+ unsigned int busno, deviceno;
+ void __iomem *chanptr;
+ struct add_vbus_guestpart busaddparams;
+ struct add_virt_guestpart addparams;
+ struct del_vbus_guestpart busdelparams;
+ struct del_virt_guestpart delparams;
+ GUID dummyGuid = GUID0;
+#ifdef STORAGE_CHANNEL
+ U64 storagechannel;
+#endif
+
+#define PRINT_USAGE_RETURN {\
+ LOGERR("usage: 0-0-<chanptr> ==> delete vhba\n"); \
+ LOGERR("usage: 0-1-<chanptr>-<busNo>-<deviceNo> ==> add vhba\n"); \
+ LOGERR("usage: 0-f-<busNo> ==> delete all vhbas\n"); \
+ LOGERR("\n"); \
+ LOGERR("usage: 1-0-<chanptr> ==> delete vnic\n"); \
+ LOGERR("usage: 1-1-<chanptr>-<busNo>-<deviceNo> ==> add vnic\n"); \
+ LOGERR("usage: 1-f-<busNo> ==> delete all vnics\n"); \
+ LOGERR("\n"); \
+ LOGERR("usage: 6-0-<busNo> ==> delete vbus\n"); \
+ LOGERR("usage: 6-1-<busNo> ==> add vbus\n"); \
+ LOGERR("usage: 6-f ==> delete all vbuses\n"); \
+ LOGERR("usage: 98-<busNo>-<deviceNo> ==> INJECT Client delete vnic\n"); \
+ LOGERR("usage: 99-<chanptr>-<busNo>-<deviceNo> ==> INJECT Client add vnic\n"); \
+ return -EINVAL; \
+}
+
+ if (count >= ARRAY_SIZE(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("copy_from_user failed.\n");
+ return -EFAULT;
+ }
+
+ i = sscanf(buf, "%x-%x", &type, &action);
+ if (i < 2)
+ PRINT_USAGE_RETURN;
+
+ if (type == 0x98) {
+ /* client inject delete vnic */
+ i = sscanf(buf, "%x-%d-%d", &type, &busno, &deviceno);
+ if (i != 3)
+ PRINT_USAGE_RETURN;
+ uislib_client_inject_del_vnic(busno, deviceno);
+ return count; /* success */
+ } else if (type == 0x99) {
+ /* client inject add vnic */
+ i = sscanf(buf, "%x-%p-%d-%d", &type, &chanptr, &busno,
+ &deviceno);
+ if (i != 4)
+ PRINT_USAGE_RETURN;
+ if (!uislib_client_inject_add_vnic(busno, deviceno,
+ __pa(chanptr),
+ MIN_IO_CHANNEL_SIZE,
+ 1, /* test msg */
+ dummyGuid, /* inst guid */
+ NULL)) { /*interrupt info */
+ LOGERR("FAILED to inject add vnic\n");
+ return -EFAULT;
+ }
+ return count; /* success */
+ }
+
+ if ((type != VIRTHBA_TYPE) && (type != VIRTNIC_TYPE)
+ && (type != VIRTBUS_TYPE))
+ PRINT_USAGE_RETURN;
+
+ if (type == VIRTBUS_TYPE) {
+ i = sscanf(buf, "%x-%x-%d", &type, &action, &busno);
+ switch (action) {
+ case 0:
+ /* delete vbus */
+ if (i != 3)
+ break;
+ busdelparams.busNo = busno;
+ if (delete_vbus(&busdelparams))
+ return count; /* success */
+ return -EFAULT;
+
+ case 1:
+ /* add vbus */
+ if (i != 3)
+ break;
+ busaddparams.chanptr = NULL; /* NOT YET USED */
+ busaddparams.busNo = busno;
+ if (add_vbus(&busaddparams))
+ return count; /* success */
+ return -EFAULT;
+
+ case 0xf:
+ /* delete all vbuses and all vhbas/vnics on the buses */
+ if (i != 2)
+ break;
+ delete_all();
+ return count; /* success */
+ default:
+ break;
+ }
+ PRINT_USAGE_RETURN;
+ }
+
+ /* if (type == VIRTNIC_TYPE) or if (type == VIRTHBA_TYPE) */
+ switch (action) {
+ case 0:
+ /* delete vhba/vnic */
+ i = sscanf(buf, "%x-%x-%p", &type, &action, &chanptr);
+ if (i != 3)
+ break;
+ delparams.chanptr = chanptr;
+ if (type == VIRTHBA_TYPE) {
+ if (delete_vhba(&delparams))
+ return count; /* success */
+ } else {
+ if (delete_vnic(&delparams))
+ return count; /* success */
+ }
+ return -EFAULT;
+
+ case 1:
+ /* add vhba/vnic */
+ i = sscanf(buf, "%x-%x-%p-%d-%d", &type, &action, &chanptr,
+ &busno, &deviceno);
+ if (i != 5)
+ break;
+ addparams.chanptr = chanptr;
+ addparams.busNo = busno;
+ addparams.deviceNo = deviceno;
+ if (type == VIRTHBA_TYPE) {
+ if (add_vhba(&addparams))
+ return count; /* success */
+ } else {
+ if (add_vnic(&addparams))
+ return count; /* success */
+ }
+ return -EFAULT;
+
+#ifdef STORAGE_CHANNEL
+ case 2:
+ /* add vhba */
+ i = sscanf(buf, "%x-%x-%d-%d", &type, &action, &busno,
+ &deviceno);
+ if (i != 4)
+ break;
+ storagechannel = uislib_storage_channel(0); /* Get my storage channel */
+ /* ioremap_cache it now */
+ addparams.chanptr =
+ (void *) ioremap_cache(storagechannel, IO_CHANNEL_SIZE);
+ if (addparams.chanptr == NULL) {
+ LOGERR("Failure to get remap storage channel.\n");
+ return -EFAULT;
+ }
+ addparams.busNo = busno;
+ addparams.deviceNo = deviceno;
+ if (type == VIRTHBA_TYPE) {
+ if (add_vhba(&addparams))
+ return count; /* success */
+ }
+ return -EFAULT;
+#endif
+ case 0xf:
+ /* delete all vhbas/vnics */
+ i = sscanf(buf, "%x-%x-%d", &type, &action, &busno);
+ if (i != 3)
+ break;
+ busdelparams.busNo = busno;
+ delete_all_virt(type, &busdelparams);
+ return count; /* success */
+ default:
+ break;
+ }
+ PRINT_USAGE_RETURN;
+}
+
+/*****************************************************/
+/* Module Init & Exit functions */
+/*****************************************************/
+
+static int __init virtpci_mod_init(void)
+{
+ int ret;
+
+
+ LOGINF("Module build: Date:%s Time:%s...\n", __DATE__, __TIME__);
+
+ POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ ret = bus_register(&virtpci_bus_type);
+ /* creates /sys/bus/uisvirtpci which contains devices &
+ * drivers directory
+ */
+ if (ret) {
+ LOGERR("bus_register ****FAILED:%d\n", ret);
+ POSTCODE_LINUX_3(VPCI_CREATE_FAILURE_PC, ret,
+ POSTCODE_SEVERITY_ERR);
+ return ret;
+ }
+ DBGINF("bus_register successful\n");
+ BusDeviceInfo_Init(&Bus_DriverInfo,
+ "clientbus", "virtpci",
+ VERSION, NULL, __DATE__, __TIME__);
+
+ /* create a root bus used to parent all the virtpci buses. */
+ ret = device_register(&virtpci_rootbus_device);
+ if (ret) {
+ LOGERR("device_register FAILED:%d\n", ret);
+ bus_unregister(&virtpci_bus_type);
+ POSTCODE_LINUX_3(VPCI_CREATE_FAILURE_PC, ret,
+ POSTCODE_SEVERITY_ERR);
+ return ret;
+ }
+ DBGINF("device_register successful ret:%x\n", ret);
+
+ if (!uisctrl_register_req_handler(2, (void *) &virtpci_ctrlchan_func,
+ &Chipset_DriverInfo)) {
+ LOGERR("uisctrl_register_req_handler ****FAILED.\n");
+ POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
+ device_unregister(&virtpci_rootbus_device);
+ bus_unregister(&virtpci_bus_type);
+ return -1;
+ }
+
+ LOGINF("successfully registered virtpci_ctrlchan_func (0x%p) as callback.\n",
+ (void *) &virtpci_ctrlchan_func);
+ /* create the proc directories */
+ virtpci_proc_dir = proc_mkdir(DIR_PROC_ENTRY, NULL);
+ virt_proc_entry = proc_create(VIRT_PROC_ENTRY_FN, 0, virtpci_proc_dir,
+ &proc_virt_fops);
+ info_proc_entry = proc_create(INFO_PROC_ENTRY_FN, 0, virtpci_proc_dir,
+ &proc_info_fops);
+ LOGINF("Leaving\n");
+ POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ return 0;
+}
+
+static void __exit virtpci_mod_exit(void)
+{
+ LOGINF("virtpci_mod_exit...\n");
+
+ /* unregister the callback function */
+ if (!uisctrl_register_req_handler(2, NULL, NULL))
+ LOGERR("uisctrl_register_req_handler ****FAILED.\n");
+
+ device_unregister(&virtpci_rootbus_device);
+ bus_unregister(&virtpci_bus_type);
+
+ if (virt_proc_entry)
+ remove_proc_entry(VIRT_PROC_ENTRY_FN, virtpci_proc_dir);
+
+ if (info_proc_entry)
+ remove_proc_entry(INFO_PROC_ENTRY_FN, virtpci_proc_dir);
+
+ if (virtpci_proc_dir)
+ remove_proc_entry(DIR_PROC_ENTRY, NULL);
+
+ LOGINF("Leaving\n");
+
+}
+
+module_init(virtpci_mod_init);
+module_exit(virtpci_mod_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Usha Srinivasan");
+MODULE_ALIAS("uisvirtpci");
+
diff --git a/drivers/staging/unisys/virtpci/virtpci.h b/drivers/staging/unisys/virtpci/virtpci.h
new file mode 100644
index 000000000000..b8fd07bc5435
--- /dev/null
+++ b/drivers/staging/unisys/virtpci/virtpci.h
@@ -0,0 +1,104 @@
+/* virtpci.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Unisys Virtual PCI driver header
+ */
+
+#ifndef __VIRTPCI_H__
+#define __VIRTPCI_H__
+
+#include "uisqueue.h"
+#include <linux/version.h>
+
+#define PCI_DEVICE_ID_VIRTHBA 0xAA00
+#define PCI_DEVICE_ID_VIRTNIC 0xAB00
+
+struct scsi_adap_info {
+ void *scsihost; /* scsi host if this device is a scsi hba */
+ struct vhba_wwnn wwnn; /* the world wide node name of vhba */
+ struct vhba_config_max max; /* various max specifications used
+ * to config vhba */
+};
+
+struct net_adap_info {
+ struct net_device *netdev; /* network device if this
+ * device is a NIC */
+ u8 mac_addr[MAX_MACADDR_LEN];
+ int num_rcv_bufs;
+ unsigned mtu;
+ GUID zoneGuid;
+};
+
+typedef enum {
+ VIRTHBA_TYPE = 0,
+ VIRTNIC_TYPE = 1,
+ VIRTBUS_TYPE = 6,
+} VIRTPCI_DEV_TYPE;
+
+struct virtpci_dev {
+ VIRTPCI_DEV_TYPE devtype; /* indicates type of the
+ * virtual pci device */
+ struct virtpci_driver *mydriver; /* which driver has allocated
+ * this device */
+ unsigned short vendor; /* vendor id for device */
+ unsigned short device; /* device id for device */
+ U32 busNo; /* number of bus on which device exists */
+ U32 deviceNo; /* device's number on the bus */
+ struct InterruptInfo intr; /* interrupt info */
+ struct device generic_dev; /* generic device */
+ union {
+ struct scsi_adap_info scsi;
+ struct net_adap_info net;
+ };
+
+ struct uisqueue_info queueinfo; /* holds ptr to channel where cmds &
+ * rsps are queued & retrieved */
+ struct virtpci_dev *next; /* points to next virtpci device */
+};
+
+struct virtpci_driver {
+ struct list_head node;
+ const char *name; /* the name of the driver in sysfs */
+ const char *version;
+ const char *vertag;
+ const char *build_date;
+ const char *build_time;
+ const struct pci_device_id *id_table; /* must be non-NULL for probe
+ * to be called */
+ int (*probe)(struct virtpci_dev *dev,
+ const struct pci_device_id *id); /* device inserted */
+ void (*remove)(struct virtpci_dev *dev); /* Device removed (NULL if
+ * not a hot-plug capable
+ * driver) */
+ int (*suspend)(struct virtpci_dev *dev,
+ u32 state); /* Device suspended */
+ int (*resume)(struct virtpci_dev *dev); /* Device woken up */
+ int (*enable_wake)(struct virtpci_dev *dev,
+ u32 state, int enable); /* Enable wake event */
+ struct device_driver core_driver; /* VIRTPCI core fills this in */
+};
+
+#define driver_to_virtpci_driver(in_drv) \
+ container_of(in_drv, struct virtpci_driver, core_driver)
+#define device_to_virtpci_dev(in_dev) \
+ container_of(in_dev, struct virtpci_dev, generic_dev)
+
+int virtpci_register_driver(struct virtpci_driver *);
+void virtpci_unregister_driver(struct virtpci_driver *);
+
+#endif /* __VIRTPCI_H__ */
diff --git a/drivers/staging/unisys/visorchannel/Kconfig b/drivers/staging/unisys/visorchannel/Kconfig
new file mode 100644
index 000000000000..41c3b4b997eb
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys visorchannel configuration
+#
+
+config UNISYS_VISORCHANNEL
+ tristate "Unisys visorchannel driver"
+ depends on UNISYSSPAR && UNISYS_VISORUTIL
+ ---help---
+ If you say Y here, you will enable the Unisys visorchannel driver.
+
diff --git a/drivers/staging/unisys/visorchannel/Makefile b/drivers/staging/unisys/visorchannel/Makefile
new file mode 100644
index 000000000000..f0060be55bc5
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for Unisys visorchannel
+#
+
+obj-$(CONFIG_UNISYS_VISORCHANNEL) += visorchannel.o
+
+visorchannel-y := visorchannel_main.o visorchannel_funcs.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+ccflags-y += -Idrivers/staging/unisys/visorutil
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/visorchannel/globals.h b/drivers/staging/unisys/visorchannel/globals.h
new file mode 100644
index 000000000000..668f832ca566
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/globals.h
@@ -0,0 +1,29 @@
+/* globals.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VISORCHANNEL_GLOBALS_H__
+#define __VISORCHANNEL_GLOBALS_H__
+
+#include "uniklog.h"
+#include "timskmod.h"
+#include "memregion.h"
+#include "version.h"
+
+#define MYDRVNAME "visorchannel"
+
+
+#endif
diff --git a/drivers/staging/unisys/visorchannel/visorchannel.h b/drivers/staging/unisys/visorchannel/visorchannel.h
new file mode 100644
index 000000000000..62d29a233fd0
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/visorchannel.h
@@ -0,0 +1,106 @@
+/* visorchannel.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VISORCHANNEL_H__
+#define __VISORCHANNEL_H__
+
+#include "commontypes.h"
+#include "memregion.h"
+#include "channel.h"
+#ifndef HOSTADDRESS
+#define HOSTADDRESS U64
+#endif
+#ifndef BOOL
+#define BOOL int
+#endif
+
+/* VISORCHANNEL is an opaque structure to users.
+ * Fields are declared only in the implementation .c files.
+ */
+typedef struct VISORCHANNEL_Tag VISORCHANNEL;
+
+/* Note that for visorchannel_create() and visorchannel_create_overlapped(),
+ * <channelBytes> and <guid> arguments may be 0 if we are a channel CLIENT.
+ * In this case, the values can simply be read from the channel header.
+ */
+VISORCHANNEL *visorchannel_create(HOSTADDRESS physaddr,
+ ulong channelBytes, GUID guid);
+VISORCHANNEL *visorchannel_create_overlapped(ulong channelBytes,
+ VISORCHANNEL *parent, ulong off,
+ GUID guid);
+VISORCHANNEL *visorchannel_create_with_lock(HOSTADDRESS physaddr,
+ ulong channelBytes, GUID guid);
+VISORCHANNEL *visorchannel_create_overlapped_with_lock(ulong channelBytes,
+ VISORCHANNEL *parent,
+ ulong off, GUID guid);
+void visorchannel_destroy(VISORCHANNEL *channel);
+int visorchannel_read(VISORCHANNEL *channel, ulong offset,
+ void *local, ulong nbytes);
+int visorchannel_write(VISORCHANNEL *channel, ulong offset,
+ void *local, ulong nbytes);
+int visorchannel_clear(VISORCHANNEL *channel, ulong offset,
+ U8 ch, ulong nbytes);
+BOOL visorchannel_signalremove(VISORCHANNEL *channel, U32 queue, void *msg);
+BOOL visorchannel_signalinsert(VISORCHANNEL *channel, U32 queue, void *msg);
+int visorchannel_signalqueue_slots_avail(VISORCHANNEL *channel, U32 queue);
+int visorchannel_signalqueue_max_slots(VISORCHANNEL *channel, U32 queue);
+
+HOSTADDRESS visorchannel_get_physaddr(VISORCHANNEL *channel);
+ulong visorchannel_get_nbytes(VISORCHANNEL *channel);
+char *visorchannel_id(VISORCHANNEL *channel, char *s);
+char *visorchannel_zoneid(VISORCHANNEL *channel, char *s);
+U64 visorchannel_get_clientpartition(VISORCHANNEL *channel);
+GUID visorchannel_get_GUID(VISORCHANNEL *channel);
+MEMREGION *visorchannel_get_memregion(VISORCHANNEL *channel);
+char *visorchannel_GUID_id(GUID *guid, char *s);
+void visorchannel_debug(VISORCHANNEL *channel, int nQueues,
+ struct seq_file *seq, U32 off);
+void visorchannel_dump_section(VISORCHANNEL *chan, char *s,
+ int off, int len, struct seq_file *seq);
+void *visorchannel_get_header(VISORCHANNEL *channel);
+
+#define VISORCHANNEL_CHANGE_SERVER_STATE(chan, chanId, newstate) \
+ do { \
+ U8 *p = (U8 *)visorchannel_get_header(chan); \
+ if (p) { \
+ ULTRA_CHANNEL_SERVER_TRANSITION(p, chanId, SrvState, \
+ newstate, logCtx); \
+ visorchannel_write \
+ (chan, \
+ offsetof(ULTRA_CHANNEL_PROTOCOL, SrvState), \
+ p + \
+ offsetof(ULTRA_CHANNEL_PROTOCOL, SrvState), \
+ sizeof(U32)); \
+ } \
+ } while (0)
+
+#define VISORCHANNEL_CHANGE_CLIENT_STATE(chan, chanId, newstate) \
+ do { \
+ U8 *p = (U8 *)visorchannel_get_header(chan); \
+ if (p) { \
+ ULTRA_CHANNEL_CLIENT_TRANSITION(p, chanId, \
+ newstate, logCtx); \
+ visorchannel_write \
+ (chan, \
+ offsetof(ULTRA_CHANNEL_PROTOCOL, CliStateOS), \
+ p + \
+ offsetof(ULTRA_CHANNEL_PROTOCOL, CliStateOS), \
+ sizeof(U32)); \
+ } \
+ } while (0)
+
+#endif
diff --git a/drivers/staging/unisys/visorchannel/visorchannel_funcs.c b/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
new file mode 100644
index 000000000000..053681616ba3
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
@@ -0,0 +1,674 @@
+/* visorchannel_funcs.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * This provides Supervisor channel communication primitives, which are
+ * independent of the mechanism used to access the channel data. All channel
+ * data is accessed using the memregion abstraction. (memregion has both
+ * a CM2 implementation and a direct memory implementation.)
+ */
+
+#include "globals.h"
+#include "visorchannel.h"
+#include "guidutils.h"
+
+#define MYDRVNAME "visorchannel"
+
+struct VISORCHANNEL_Tag {
+ MEMREGION *memregion; /* from visor_memregion_create() */
+ CHANNEL_HEADER chan_hdr;
+ GUID guid;
+ ulong size;
+ BOOL needs_lock;
+ spinlock_t insert_lock;
+ spinlock_t remove_lock;
+
+ struct {
+ SIGNAL_QUEUE_HEADER req_queue;
+ SIGNAL_QUEUE_HEADER rsp_queue;
+ SIGNAL_QUEUE_HEADER event_queue;
+ SIGNAL_QUEUE_HEADER ack_queue;
+ } safe_uis_queue;
+};
+
+/* Creates the VISORCHANNEL abstraction for a data area in memory, but does
+ * NOT modify this data area.
+ */
+static VISORCHANNEL *
+visorchannel_create_guts(HOSTADDRESS physaddr, ulong channelBytes,
+ VISORCHANNEL *parent, ulong off, GUID guid,
+ BOOL needs_lock)
+{
+ VISORCHANNEL *p = NULL;
+ void *rc = NULL;
+
+ p = kmalloc(sizeof(VISORCHANNEL), GFP_KERNEL|__GFP_NORETRY);
+ if (p == NULL) {
+ ERRDRV("allocation failed: (status=0)\n");
+ rc = NULL;
+ goto Away;
+ }
+ p->memregion = NULL;
+ p->needs_lock = needs_lock;
+ spin_lock_init(&p->insert_lock);
+ spin_lock_init(&p->remove_lock);
+
+ /* prepare chan_hdr (abstraction to read/write channel memory) */
+ if (parent == NULL)
+ p->memregion =
+ visor_memregion_create(physaddr, sizeof(CHANNEL_HEADER));
+ else
+ p->memregion =
+ visor_memregion_create_overlapped(parent->memregion,
+ off,
+ sizeof(CHANNEL_HEADER));
+ if (p->memregion == NULL) {
+ ERRDRV("visor_memregion_create failed failed: (status=0)\n");
+ rc = NULL;
+ goto Away;
+ }
+ if (visor_memregion_read(p->memregion, 0, &p->chan_hdr,
+ sizeof(CHANNEL_HEADER)) < 0) {
+ ERRDRV("visor_memregion_read failed: (status=0)\n");
+ rc = NULL;
+ goto Away;
+ }
+ if (channelBytes == 0)
+ /* we had better be a CLIENT of this channel */
+ channelBytes = (ulong) p->chan_hdr.Size;
+ if (STRUCTSEQUAL(guid, Guid0))
+ /* we had better be a CLIENT of this channel */
+ guid = p->chan_hdr.Type;
+ if (visor_memregion_resize(p->memregion, channelBytes) < 0) {
+ ERRDRV("visor_memregion_resize failed: (status=0)\n");
+ rc = NULL;
+ goto Away;
+ }
+ p->size = channelBytes;
+ p->guid = guid;
+
+ rc = p;
+Away:
+
+ if (rc == NULL) {
+ if (p != NULL) {
+ visorchannel_destroy(p);
+ p = NULL;
+ }
+ }
+ return rc;
+}
+
+VISORCHANNEL *
+visorchannel_create(HOSTADDRESS physaddr, ulong channelBytes, GUID guid)
+{
+ return visorchannel_create_guts(physaddr, channelBytes, NULL, 0, guid,
+ FALSE);
+}
+EXPORT_SYMBOL_GPL(visorchannel_create);
+
+VISORCHANNEL *
+visorchannel_create_with_lock(HOSTADDRESS physaddr, ulong channelBytes,
+ GUID guid)
+{
+ return visorchannel_create_guts(physaddr, channelBytes, NULL, 0, guid,
+ TRUE);
+}
+EXPORT_SYMBOL_GPL(visorchannel_create_with_lock);
+
+VISORCHANNEL *
+visorchannel_create_overlapped(ulong channelBytes,
+ VISORCHANNEL *parent, ulong off, GUID guid)
+{
+ return visorchannel_create_guts(0, channelBytes, parent, off, guid,
+ FALSE);
+}
+EXPORT_SYMBOL_GPL(visorchannel_create_overlapped);
+
+VISORCHANNEL *
+visorchannel_create_overlapped_with_lock(ulong channelBytes,
+ VISORCHANNEL *parent, ulong off,
+ GUID guid)
+{
+ return visorchannel_create_guts(0, channelBytes, parent, off, guid,
+ TRUE);
+}
+EXPORT_SYMBOL_GPL(visorchannel_create_overlapped_with_lock);
+
+void
+visorchannel_destroy(VISORCHANNEL *channel)
+{
+ if (channel == NULL)
+ return;
+ if (channel->memregion != NULL) {
+ visor_memregion_destroy(channel->memregion);
+ channel->memregion = NULL;
+ }
+ kfree(channel);
+}
+EXPORT_SYMBOL_GPL(visorchannel_destroy);
+
+HOSTADDRESS
+visorchannel_get_physaddr(VISORCHANNEL *channel)
+{
+ return visor_memregion_get_physaddr(channel->memregion);
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_physaddr);
+
+ulong
+visorchannel_get_nbytes(VISORCHANNEL *channel)
+{
+ return channel->size;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_nbytes);
+
+char *
+visorchannel_GUID_id(GUID *guid, char *s)
+{
+ return GUID_format1(guid, s);
+}
+EXPORT_SYMBOL_GPL(visorchannel_GUID_id);
+
+char *
+visorchannel_id(VISORCHANNEL *channel, char *s)
+{
+ return visorchannel_GUID_id(&channel->guid, s);
+}
+EXPORT_SYMBOL_GPL(visorchannel_id);
+
+char *
+visorchannel_zoneid(VISORCHANNEL *channel, char *s)
+{
+ return visorchannel_GUID_id(&channel->chan_hdr.ZoneGuid, s);
+}
+EXPORT_SYMBOL_GPL(visorchannel_zoneid);
+
+HOSTADDRESS
+visorchannel_get_clientpartition(VISORCHANNEL *channel)
+{
+ return channel->chan_hdr.PartitionHandle;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_clientpartition);
+
+GUID
+visorchannel_get_GUID(VISORCHANNEL *channel)
+{
+ return channel->guid;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_GUID);
+
+MEMREGION *
+visorchannel_get_memregion(VISORCHANNEL *channel)
+{
+ return channel->memregion;
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_memregion);
+
+int
+visorchannel_read(VISORCHANNEL *channel, ulong offset,
+ void *local, ulong nbytes)
+{
+ int rc = visor_memregion_read(channel->memregion, offset,
+ local, nbytes);
+ if ((rc >= 0) && (offset == 0) && (nbytes >= sizeof(CHANNEL_HEADER)))
+ memcpy(&channel->chan_hdr, local, sizeof(CHANNEL_HEADER));
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_read);
+
+int
+visorchannel_write(VISORCHANNEL *channel, ulong offset,
+ void *local, ulong nbytes)
+{
+ if (offset == 0 && nbytes >= sizeof(CHANNEL_HEADER))
+ memcpy(&channel->chan_hdr, local, sizeof(CHANNEL_HEADER));
+ return visor_memregion_write(channel->memregion, offset, local, nbytes);
+}
+EXPORT_SYMBOL_GPL(visorchannel_write);
+
+int
+visorchannel_clear(VISORCHANNEL *channel, ulong offset, U8 ch, ulong nbytes)
+{
+ int rc = -1;
+ int bufsize = 65536;
+ int written = 0;
+ U8 *buf = vmalloc(bufsize);
+
+ if (buf == NULL) {
+ ERRDRV("%s failed memory allocation", __func__);
+ goto Away;
+ }
+ memset(buf, ch, bufsize);
+ while (nbytes > 0) {
+ ulong thisbytes = bufsize;
+ int x = -1;
+ if (nbytes < thisbytes)
+ thisbytes = nbytes;
+ x = visor_memregion_write(channel->memregion, offset + written,
+ buf, thisbytes);
+ if (x < 0) {
+ rc = x;
+ goto Away;
+ }
+ written += thisbytes;
+ nbytes -= thisbytes;
+ }
+ rc = 0;
+
+Away:
+ if (buf != NULL) {
+ vfree(buf);
+ buf = NULL;
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_clear);
+
+void *
+visorchannel_get_header(VISORCHANNEL *channel)
+{
+ return (void *) &(channel->chan_hdr);
+}
+EXPORT_SYMBOL_GPL(visorchannel_get_header);
+
+/** Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a
+ * channel header
+ */
+#define SIG_QUEUE_OFFSET(chan_hdr, q) \
+ ((chan_hdr)->oChannelSpace + ((q) * sizeof(SIGNAL_QUEUE_HEADER)))
+
+/** Return offset of a specific queue entry (data) from the beginning of a
+ * channel header
+ */
+#define SIG_DATA_OFFSET(chan_hdr, q, sig_hdr, slot) \
+ (SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->oSignalBase + \
+ ((slot) * (sig_hdr)->SignalSize))
+
+/** Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back
+ * into host memory
+ */
+#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
+ (visor_memregion_write(channel->memregion, \
+ SIG_QUEUE_OFFSET(&channel->chan_hdr, queue)+ \
+ offsetof(SIGNAL_QUEUE_HEADER, FIELD), \
+ &((sig_hdr)->FIELD), \
+ sizeof((sig_hdr)->FIELD)) >= 0)
+
+static BOOL
+sig_read_header(VISORCHANNEL *channel, U32 queue,
+ SIGNAL_QUEUE_HEADER *sig_hdr)
+{
+ BOOL rc = FALSE;
+
+ if (channel->chan_hdr.oChannelSpace < sizeof(CHANNEL_HEADER)) {
+ ERRDRV("oChannelSpace too small: (status=%d)\n", rc);
+ goto Away;
+ }
+
+ /* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */
+
+ if (visor_memregion_read(channel->memregion,
+ SIG_QUEUE_OFFSET(&channel->chan_hdr, queue),
+ sig_hdr, sizeof(SIGNAL_QUEUE_HEADER)) < 0) {
+ ERRDRV("queue=%d SIG_QUEUE_OFFSET=%d",
+ queue, (int)SIG_QUEUE_OFFSET(&channel->chan_hdr, queue));
+ ERRDRV("visor_memregion_read of signal queue failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ rc = TRUE;
+Away:
+ return rc;
+}
+
+static BOOL
+sig_do_data(VISORCHANNEL *channel, U32 queue,
+ SIGNAL_QUEUE_HEADER *sig_hdr, U32 slot, void *data, BOOL is_write)
+{
+ BOOL rc = FALSE;
+ int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue,
+ sig_hdr, slot);
+ if (is_write) {
+ if (visor_memregion_write(channel->memregion,
+ signal_data_offset,
+ data, sig_hdr->SignalSize) < 0) {
+ ERRDRV("visor_memregion_write of signal data failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ } else {
+ if (visor_memregion_read(channel->memregion, signal_data_offset,
+ data, sig_hdr->SignalSize) < 0) {
+ ERRDRV("visor_memregion_read of signal data failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ }
+ rc = TRUE;
+Away:
+ return rc;
+}
+
+static inline BOOL
+sig_read_data(VISORCHANNEL *channel, U32 queue,
+ SIGNAL_QUEUE_HEADER *sig_hdr, U32 slot, void *data)
+{
+ return sig_do_data(channel, queue, sig_hdr, slot, data, FALSE);
+}
+
+static inline BOOL
+sig_write_data(VISORCHANNEL *channel, U32 queue,
+ SIGNAL_QUEUE_HEADER *sig_hdr, U32 slot, void *data)
+{
+ return sig_do_data(channel, queue, sig_hdr, slot, data, TRUE);
+}
+
+static inline unsigned char
+safe_sig_queue_validate(pSIGNAL_QUEUE_HEADER psafe_sqh,
+ pSIGNAL_QUEUE_HEADER punsafe_sqh,
+ U32 *phead, U32 *ptail)
+{
+ if ((*phead >= psafe_sqh->MaxSignalSlots)
+ || (*ptail >= psafe_sqh->MaxSignalSlots)) {
+ /* Choose 0 or max, maybe based on current tail value */
+ *phead = 0;
+ *ptail = 0;
+
+ /* Sync with client as necessary */
+ punsafe_sqh->Head = *phead;
+ punsafe_sqh->Tail = *ptail;
+
+ ERRDRV("safe_sig_queue_validate: head = 0x%x, tail = 0x%x, MaxSlots = 0x%x",
+ *phead, *ptail, psafe_sqh->MaxSignalSlots);
+ return 0;
+ }
+ return 1;
+} /* end safe_sig_queue_validate */
+
+BOOL
+visorchannel_signalremove(VISORCHANNEL *channel, U32 queue, void *msg)
+{
+ BOOL rc = FALSE;
+ SIGNAL_QUEUE_HEADER sig_hdr;
+
+ if (channel->needs_lock)
+ spin_lock(&channel->remove_lock);
+
+ if (!sig_read_header(channel, queue, &sig_hdr)) {
+ rc = FALSE;
+ goto Away;
+ }
+ if (sig_hdr.Head == sig_hdr.Tail) {
+ rc = FALSE; /* no signals to remove */
+ goto Away;
+ }
+ sig_hdr.Tail = (sig_hdr.Tail + 1) % sig_hdr.MaxSignalSlots;
+ if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.Tail, msg)) {
+ ERRDRV("sig_read_data failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ sig_hdr.NumSignalsReceived++;
+
+ /* For each data field in SIGNAL_QUEUE_HEADER that was modified,
+ * update host memory.
+ */
+ MEMORYBARRIER;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, Tail)) {
+ ERRDRV("visor_memregion_write of Tail failed: (status=%d)\n",
+ rc);
+ goto Away;
+ }
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumSignalsReceived)) {
+ ERRDRV("visor_memregion_write of NumSignalsReceived failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ rc = TRUE;
+Away:
+ if (channel->needs_lock)
+ spin_unlock(&channel->remove_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalremove);
+
+BOOL
+visorchannel_signalinsert(VISORCHANNEL *channel, U32 queue, void *msg)
+{
+ BOOL rc = FALSE;
+ SIGNAL_QUEUE_HEADER sig_hdr;
+
+ if (channel->needs_lock)
+ spin_lock(&channel->insert_lock);
+
+ if (!sig_read_header(channel, queue, &sig_hdr)) {
+ rc = FALSE;
+ goto Away;
+ }
+
+ sig_hdr.Head = ((sig_hdr.Head + 1) % sig_hdr.MaxSignalSlots);
+ if (sig_hdr.Head == sig_hdr.Tail) {
+ sig_hdr.NumOverflows++;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumOverflows)) {
+ ERRDRV("visor_memregion_write of NumOverflows failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ rc = FALSE;
+ goto Away;
+ }
+
+ if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.Head, msg)) {
+ ERRDRV("sig_write_data failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ sig_hdr.NumSignalsSent++;
+
+ /* For each data field in SIGNAL_QUEUE_HEADER that was modified,
+ * update host memory.
+ */
+ MEMORYBARRIER;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, Head)) {
+ ERRDRV("visor_memregion_write of Head failed: (status=%d)\n",
+ rc);
+ goto Away;
+ }
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumSignalsSent)) {
+ ERRDRV("visor_memregion_write of NumSignalsSent failed: (status=%d)\n", rc);
+ goto Away;
+ }
+ rc = TRUE;
+Away:
+ if (channel->needs_lock)
+ spin_unlock(&channel->insert_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
+
+
+int
+visorchannel_signalqueue_slots_avail(VISORCHANNEL *channel, U32 queue)
+{
+ SIGNAL_QUEUE_HEADER sig_hdr;
+ U32 slots_avail, slots_used;
+ U32 head, tail;
+
+ if (!sig_read_header(channel, queue, &sig_hdr))
+ return 0;
+ head = sig_hdr.Head;
+ tail = sig_hdr.Tail;
+ if (head < tail)
+ head = head + sig_hdr.MaxSignalSlots;
+ slots_used = (head - tail);
+ slots_avail = sig_hdr.MaxSignals - slots_used;
+ return (int) slots_avail;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalqueue_slots_avail);
+
+int
+visorchannel_signalqueue_max_slots(VISORCHANNEL *channel, U32 queue)
+{
+ SIGNAL_QUEUE_HEADER sig_hdr;
+ if (!sig_read_header(channel, queue, &sig_hdr))
+ return 0;
+ return (int) sig_hdr.MaxSignals;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalqueue_max_slots);
+
+static void
+sigqueue_debug(SIGNAL_QUEUE_HEADER *q, int which, struct seq_file *seq)
+{
+ seq_printf(seq, "Signal Queue #%d\n", which);
+ seq_printf(seq, " VersionId = %lu\n", (ulong) q->VersionId);
+ seq_printf(seq, " Type = %lu\n", (ulong) q->Type);
+ seq_printf(seq, " oSignalBase = %llu\n",
+ (long long) q->oSignalBase);
+ seq_printf(seq, " SignalSize = %lu\n", (ulong) q->SignalSize);
+ seq_printf(seq, " MaxSignalSlots = %lu\n",
+ (ulong) q->MaxSignalSlots);
+ seq_printf(seq, " MaxSignals = %lu\n", (ulong) q->MaxSignals);
+ seq_printf(seq, " FeatureFlags = %-16.16Lx\n",
+ (long long) q->FeatureFlags);
+ seq_printf(seq, " NumSignalsSent = %llu\n",
+ (long long) q->NumSignalsSent);
+ seq_printf(seq, " NumSignalsReceived = %llu\n",
+ (long long) q->NumSignalsReceived);
+ seq_printf(seq, " NumOverflows = %llu\n",
+ (long long) q->NumOverflows);
+ seq_printf(seq, " Head = %lu\n", (ulong) q->Head);
+ seq_printf(seq, " Tail = %lu\n", (ulong) q->Tail);
+}
+
+void
+visorchannel_debug(VISORCHANNEL *channel, int nQueues,
+ struct seq_file *seq, U32 off)
+{
+ HOSTADDRESS addr = 0;
+ ulong nbytes = 0, nbytes_region = 0;
+ MEMREGION *memregion = NULL;
+ CHANNEL_HEADER hdr;
+ CHANNEL_HEADER *phdr = &hdr;
+ char s[99];
+ int i = 0;
+ int errcode = 0;
+
+ if (channel == NULL) {
+ ERRDRV("%s no channel", __func__);
+ return;
+ }
+ memregion = channel->memregion;
+ if (memregion == NULL) {
+ ERRDRV("%s no memregion", __func__);
+ return;
+ }
+ addr = visor_memregion_get_physaddr(memregion);
+ nbytes_region = visor_memregion_get_nbytes(memregion);
+ errcode = visorchannel_read(channel, off,
+ phdr, sizeof(CHANNEL_HEADER));
+ if (errcode < 0) {
+ seq_printf(seq,
+ "Read of channel header failed with errcode=%d)\n",
+ errcode);
+ if (off == 0) {
+ phdr = &channel->chan_hdr;
+ seq_puts(seq, "(following data may be stale)\n");
+ } else
+ return;
+ }
+ nbytes = (ulong) (phdr->Size);
+ seq_printf(seq, "--- Begin channel @0x%-16.16Lx for 0x%lx bytes (region=0x%lx bytes) ---\n",
+ addr + off, nbytes, nbytes_region);
+ seq_printf(seq, "Type = %s\n", GUID_format2(&phdr->Type, s));
+ seq_printf(seq, "ZoneGuid = %s\n",
+ GUID_format2(&phdr->ZoneGuid, s));
+ seq_printf(seq, "Signature = 0x%-16.16Lx\n",
+ (long long) phdr->Signature);
+ seq_printf(seq, "LegacyState = %lu\n", (ulong) phdr->LegacyState);
+ seq_printf(seq, "SrvState = %lu\n", (ulong) phdr->SrvState);
+ seq_printf(seq, "CliStateBoot = %lu\n", (ulong) phdr->CliStateBoot);
+ seq_printf(seq, "CliStateOS = %lu\n", (ulong) phdr->CliStateOS);
+ seq_printf(seq, "HeaderSize = %lu\n", (ulong) phdr->HeaderSize);
+ seq_printf(seq, "Size = %llu\n", (long long) phdr->Size);
+ seq_printf(seq, "Features = 0x%-16.16llx\n",
+ (long long) phdr->Features);
+ seq_printf(seq, "PartitionHandle = 0x%-16.16llx\n",
+ (long long) phdr->PartitionHandle);
+ seq_printf(seq, "Handle = 0x%-16.16llx\n",
+ (long long) phdr->Handle);
+ seq_printf(seq, "VersionId = %lu\n", (ulong) phdr->VersionId);
+ seq_printf(seq, "oChannelSpace = %llu\n",
+ (long long) phdr->oChannelSpace);
+ if ((phdr->oChannelSpace == 0) || (errcode < 0))
+ ;
+ else
+ for (i = 0; i < nQueues; i++) {
+ SIGNAL_QUEUE_HEADER q;
+ errcode = visorchannel_read(channel,
+ off + phdr->oChannelSpace +
+ (i * sizeof(q)),
+ &q, sizeof(q));
+ if (errcode < 0) {
+ seq_printf(seq,
+ "failed to read signal queue #%d from channel @0x%-16.16Lx errcode=%d\n",
+ i, addr, errcode);
+ continue;
+ }
+ sigqueue_debug(&q, i, seq);
+ }
+ seq_printf(seq, "--- End channel @0x%-16.16Lx for 0x%lx bytes ---\n",
+ addr + off, nbytes);
+}
+EXPORT_SYMBOL_GPL(visorchannel_debug);
+
+void
+visorchannel_dump_section(VISORCHANNEL *chan, char *s,
+ int off, int len, struct seq_file *seq)
+{
+ char *buf, *tbuf, *fmtbuf;
+ int fmtbufsize = 0;
+ int i;
+ int errcode = 0;
+
+ fmtbufsize = 100 * COVQ(len, 16);
+ buf = kmalloc(len, GFP_KERNEL|__GFP_NORETRY);
+ fmtbuf = kmalloc(fmtbufsize, GFP_KERNEL|__GFP_NORETRY);
+ if (buf == NULL || fmtbuf == NULL)
+ goto Away;
+
+ errcode = visorchannel_read(chan, off, buf, len);
+ if (errcode < 0) {
+ ERRDRV("%s failed to read %s from channel errcode=%d",
+ s, __func__, errcode);
+ goto Away;
+ }
+ seq_printf(seq, "channel %s:\n", s);
+ tbuf = buf;
+ while (len > 0) {
+ i = (len < 16) ? len : 16;
+ hex_dump_to_buffer(tbuf, i, 16, 1, fmtbuf, fmtbufsize, TRUE);
+ seq_printf(seq, "%s\n", fmtbuf);
+ tbuf += 16;
+ len -= 16;
+ }
+
+Away:
+ if (buf != NULL) {
+ kfree(buf);
+ buf = NULL;
+ }
+ if (fmtbuf != NULL) {
+ kfree(fmtbuf);
+ fmtbuf = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(visorchannel_dump_section);
diff --git a/drivers/staging/unisys/visorchannel/visorchannel_main.c b/drivers/staging/unisys/visorchannel/visorchannel_main.c
new file mode 100644
index 000000000000..482ee0ac1c16
--- /dev/null
+++ b/drivers/staging/unisys/visorchannel/visorchannel_main.c
@@ -0,0 +1,49 @@
+/* visorchannel_main.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * This is a module "wrapper" around visorchannel_funcs.
+ */
+
+#include "globals.h"
+#include "channel.h"
+#include "visorchannel.h"
+#include "guidutils.h"
+
+#define MYDRVNAME "visorchannel"
+
+static int __init
+visorchannel_init(void)
+{
+ INFODRV("driver version %s loaded", VERSION);
+ return 0;
+}
+
+static void
+visorchannel_exit(void)
+{
+ INFODRV("driver unloaded");
+}
+
+module_init(visorchannel_init);
+module_exit(visorchannel_exit);
+
+MODULE_AUTHOR("Unisys");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Supervisor channel driver for service partition: ver "
+ VERSION);
+MODULE_VERSION(VERSION);
diff --git a/drivers/staging/unisys/visorchipset/Kconfig b/drivers/staging/unisys/visorchipset/Kconfig
new file mode 100644
index 000000000000..7ca2fbca9d57
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys visorchipset configuration
+#
+
+config UNISYS_VISORCHIPSET
+ tristate "Unisys visorchipset driver"
+ depends on UNISYSSPAR && UNISYS_VISORUTIL && UNISYS_VISORCHANNEL
+ ---help---
+ If you say Y here, you will enable the Unisys visorchipset driver.
+
diff --git a/drivers/staging/unisys/visorchipset/Makefile b/drivers/staging/unisys/visorchipset/Makefile
new file mode 100644
index 000000000000..f5e8650e1b0e
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for Unisys visorchipset
+#
+
+obj-$(CONFIG_UNISYS_VISORCHIPSET) += visorchipset.o
+
+visorchipset-y := visorchipset_main.o controlvm_direct.o file.o filexfer.o \
+ parser.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/uislib
+ccflags-y += -Idrivers/staging/unisys/visorchannel
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
+ccflags-y += -Idrivers/staging/unisys/visorutil
+ccflags-y += -Iinclude/generated
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
+
diff --git a/drivers/staging/unisys/visorchipset/controlvm.h b/drivers/staging/unisys/visorchipset/controlvm.h
new file mode 100644
index 000000000000..873fa12dfe6f
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/controlvm.h
@@ -0,0 +1,27 @@
+/* controlvm.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __CONTROLVM_H__
+#define __CONTROLVM_H__
+
+#include "timskmod.h"
+
+int controlvm_init(void);
+void controlvm_deinit(void);
+HOSTADDRESS controlvm_get_channel_address(void);
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/controlvm_direct.c b/drivers/staging/unisys/visorchipset/controlvm_direct.c
new file mode 100644
index 000000000000..b911ea85c093
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/controlvm_direct.c
@@ -0,0 +1,62 @@
+/* controlvm_direct.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* This is a controlvm-related code that is dependent upon firmware running
+ * on a virtual partition.
+ */
+
+#include "globals.h"
+#include "uisutils.h"
+#include "controlvm.h"
+#define CURRENT_FILE_PC VISOR_CHIPSET_PC_controlvm_direct_c
+
+
+/* We can fill in this code when we learn how to make vmcalls... */
+
+
+
+int controlvm_init(void)
+{
+ return 0;
+}
+
+
+
+void controlvm_deinit(void)
+{
+}
+
+
+
+HOSTADDRESS controlvm_get_channel_address(void)
+{
+ static BOOL warned = FALSE;
+ U64 addr = 0;
+
+ U32 size = 0;
+
+ if (!VMCALL_SUCCESSFUL(Issue_VMCALL_IO_CONTROLVM_ADDR(&addr, &size))) {
+ if (!warned) {
+ ERRDRV("%s - vmcall to determine controlvm channel addr failed",
+ __func__);
+ warned = TRUE;
+ }
+ return 0;
+ }
+ INFODRV("controlvm addr=%Lx", addr);
+ return addr;
+}
diff --git a/drivers/staging/unisys/visorchipset/file.c b/drivers/staging/unisys/visorchipset/file.c
new file mode 100644
index 000000000000..e214a1153282
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/file.c
@@ -0,0 +1,226 @@
+/* file.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* This contains the implementation that allows a usermode program to
+ * communicate with the visorchipset driver using a device/file interface.
+ */
+
+#include "globals.h"
+#include "visorchannel.h"
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include "uisutils.h"
+#include "file.h"
+
+#define CURRENT_FILE_PC VISOR_CHIPSET_PC_file_c
+
+static struct cdev Cdev;
+static VISORCHANNEL **PControlVm_channel;
+static dev_t MajorDev = -1; /**< indicates major num for device */
+static BOOL Registered = FALSE;
+
+static int visorchipset_open(struct inode *inode, struct file *file);
+static int visorchipset_release(struct inode *inode, struct file *file);
+static int visorchipset_mmap(struct file *file, struct vm_area_struct *vma);
+#ifdef HAVE_UNLOCKED_IOCTL
+long visorchipset_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#else
+int visorchipset_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+#endif
+
+static const struct file_operations visorchipset_fops = {
+ .owner = THIS_MODULE,
+ .open = visorchipset_open,
+ .read = NULL,
+ .write = NULL,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = visorchipset_ioctl,
+#else
+ .ioctl = visorchipset_ioctl,
+#endif
+ .release = visorchipset_release,
+ .mmap = visorchipset_mmap,
+};
+
+int
+visorchipset_file_init(dev_t majorDev, VISORCHANNEL **pControlVm_channel)
+{
+ int rc = -1;
+
+ PControlVm_channel = pControlVm_channel;
+ MajorDev = majorDev;
+ cdev_init(&Cdev, &visorchipset_fops);
+ Cdev.owner = THIS_MODULE;
+ if (MAJOR(MajorDev) == 0) {
+ /* dynamic major device number registration required */
+ if (alloc_chrdev_region(&MajorDev, 0, 1, MYDRVNAME) < 0) {
+ ERRDRV("Unable to allocate+register char device %s",
+ MYDRVNAME);
+ goto Away;
+ }
+ Registered = TRUE;
+ INFODRV("New major number %d registered\n", MAJOR(MajorDev));
+ } else {
+ /* static major device number registration required */
+ if (register_chrdev_region(MajorDev, 1, MYDRVNAME) < 0) {
+ ERRDRV("Unable to register char device %s", MYDRVNAME);
+ goto Away;
+ }
+ Registered = TRUE;
+ INFODRV("Static major number %d registered\n", MAJOR(MajorDev));
+ }
+ if (cdev_add(&Cdev, MKDEV(MAJOR(MajorDev), 0), 1) < 0) {
+ ERRDRV("failed to create char device: (status=%d)\n", rc);
+ goto Away;
+ }
+ INFODRV("Registered char device for %s (major=%d)",
+ MYDRVNAME, MAJOR(MajorDev));
+ rc = 0;
+Away:
+ return rc;
+}
+
+void
+visorchipset_file_cleanup(void)
+{
+ if (Cdev.ops != NULL)
+ cdev_del(&Cdev);
+ Cdev.ops = NULL;
+ if (Registered) {
+ if (MAJOR(MajorDev) >= 0) {
+ unregister_chrdev_region(MajorDev, 1);
+ MajorDev = MKDEV(0, 0);
+ }
+ Registered = FALSE;
+ }
+}
+
+static int
+visorchipset_open(struct inode *inode, struct file *file)
+{
+ unsigned minor_number = iminor(inode);
+ int rc = -ENODEV;
+
+ DEBUGDRV("%s", __func__);
+ if (minor_number != 0)
+ goto Away;
+ file->private_data = NULL;
+ rc = 0;
+Away:
+ if (rc < 0)
+ ERRDRV("%s minor=%d failed", __func__, minor_number);
+ return rc;
+}
+
+static int
+visorchipset_release(struct inode *inode, struct file *file)
+{
+ DEBUGDRV("%s", __func__);
+ return 0;
+}
+
+static int
+visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ ulong physAddr = 0;
+ ulong offset = vma->vm_pgoff << PAGE_SHIFT;
+ GUEST_PHYSICAL_ADDRESS addr = 0;
+
+ /* sv_enable_dfp(); */
+ DEBUGDRV("%s", __func__);
+ if (offset & (PAGE_SIZE - 1)) {
+ ERRDRV("%s virtual address NOT page-aligned!", __func__);
+ return -ENXIO; /* need aligned offsets */
+ }
+ switch (offset) {
+ case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
+ vma->vm_flags |= VM_IO;
+ if (*PControlVm_channel == NULL) {
+ ERRDRV("%s no controlvm channel yet", __func__);
+ return -ENXIO;
+ }
+ visorchannel_read(*PControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ gpControlChannel), &addr,
+ sizeof(addr));
+ if (addr == 0) {
+ ERRDRV("%s control channel address is 0", __func__);
+ return -ENXIO;
+ }
+ physAddr = (ulong) (addr);
+ DEBUGDRV("mapping physical address = 0x%lx", physAddr);
+ if (remap_pfn_range(vma, vma->vm_start,
+ physAddr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ /*pgprot_noncached */
+ (vma->vm_page_prot))) {
+ ERRDRV("%s remap_pfn_range failed", __func__);
+ return -EAGAIN;
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+ DEBUGDRV("%s success!", __func__);
+ return 0;
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+long
+visorchipset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+#else
+int
+visorchipset_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+#endif
+{
+ int rc = SUCCESS;
+ S64 adjustment;
+ S64 vrtc_offset;
+ DBGINF("entered visorchipset_ioctl, cmd=%d", cmd);
+ switch (cmd) {
+ case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
+ /* get the physical rtc offset */
+ vrtc_offset = Issue_VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET();
+ if (copy_to_user
+ ((void __user *)arg, &vrtc_offset, sizeof(vrtc_offset))) {
+ rc = -EFAULT;
+ goto Away;
+ }
+ DBGINF("insde visorchipset_ioctl, cmd=%d, vrtc_offset=%lld",
+ cmd, vrtc_offset);
+ break;
+ case VMCALL_UPDATE_PHYSICAL_TIME:
+ if (copy_from_user
+ (&adjustment, (void __user *)arg, sizeof(adjustment))) {
+ rc = -EFAULT;
+ goto Away;
+ }
+ DBGINF("insde visorchipset_ioctl, cmd=%d, adjustment=%lld", cmd,
+ adjustment);
+ rc = Issue_VMCALL_UPDATE_PHYSICAL_TIME(adjustment);
+ break;
+ default:
+ LOGERR("visorchipset_ioctl received invalid command");
+ rc = -EFAULT;
+ break;
+ }
+Away:
+ DBGINF("exiting %d!", rc);
+ return rc;
+}
diff --git a/drivers/staging/unisys/visorchipset/file.h b/drivers/staging/unisys/visorchipset/file.h
new file mode 100644
index 000000000000..597282aa9a06
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/file.h
@@ -0,0 +1,26 @@
+/* file.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __FILE_H__
+#define __FILE_H__
+
+#include "globals.h"
+
+int visorchipset_file_init(dev_t majorDev, VISORCHANNEL **pControlVm_channel);
+void visorchipset_file_cleanup(void);
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/filexfer.c b/drivers/staging/unisys/visorchipset/filexfer.c
new file mode 100644
index 000000000000..431cff844a43
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/filexfer.c
@@ -0,0 +1,506 @@
+/* filexfer.c
+ *
+ * Copyright © 2013 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* Code here-in is the "glue" that connects controlvm messages with the
+ * sparfilexfer driver, which is used to transfer file contents as payload
+ * across the controlvm channel.
+ */
+
+#include "globals.h"
+#include "controlvm.h"
+#include "visorchipset.h"
+#include "filexfer.h"
+
+#ifdef ENABLE_SPARFILEXFER /* sparfilexfer kernel module enabled in build */
+#include "sparfilexfer.h"
+
+/* Driver-global memory */
+static LIST_HEAD(Request_list); /* list of struct any_request *, via
+ * req_list memb */
+
+/* lock for above pool for allocation of any_request structs, and pool
+* name; note that kmem_cache_create requires that we keep the storage
+* for the pool name for the life of the pool
+ */
+static DEFINE_SPINLOCK(Request_list_lock);
+
+static struct kmem_cache *Request_memory_pool;
+static const char Request_memory_pool_name[] = "filexfer_request_pool";
+size_t Caller_req_context_bytes = 0; /* passed to filexfer_constructor() */
+
+/* This structure defines a single controlvm GETFILE conversation, which
+ * consists of a single controlvm request message and 1 or more controlvm
+ * response messages.
+ */
+struct getfile_request {
+ CONTROLVM_MESSAGE_HEADER controlvm_header;
+ atomic_t buffers_in_use;
+ GET_CONTIGUOUS_CONTROLVM_PAYLOAD_FUNC get_contiguous_controlvm_payload;
+ CONTROLVM_RESPOND_WITH_PAYLOAD_FUNC controlvm_respond_with_payload;
+};
+
+/* This structure defines a single controlvm PUTFILE conversation, which
+ * consists of a single controlvm request with a filename, and additional
+ * controlvm messages with file data.
+ */
+struct putfile_request {
+ GET_CONTROLVM_FILEDATA_FUNC get_controlvm_filedata;
+ CONTROLVM_RESPOND_FUNC controlvm_end_putFile;
+};
+
+/* This structure defines a single file transfer operation, which can either
+ * be a GETFILE or PUTFILE.
+ */
+struct any_request {
+ struct list_head req_list;
+ ulong2 file_request_number;
+ ulong2 data_sequence_number;
+ TRANSMITFILE_DUMP_FUNC dump_func;
+ BOOL is_get;
+ union {
+ struct getfile_request get;
+ struct putfile_request put;
+ };
+ /* Size of caller_context_data will be
+ * <Caller_req_context_bytes> bytes. I aligned this because I
+ * am paranoid about what happens when an arbitrary data
+ * structure with unknown alignment requirements gets copied
+ * here. I want caller_context_data to be aligned to the
+ * coarsest possible alignment boundary that could be required
+ * for any user data structure.
+ */
+ u8 caller_context_data[1] __aligned(sizeof(ulong2);
+};
+
+/*
+ * Links the any_request into the global list of allocated requests
+ * (<Request_list>).
+ */
+static void
+unit_tracking_create(struct list_head *dev_list_link)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&Request_list_lock, flags);
+ list_add(dev_list_link, &Request_list);
+ spin_unlock_irqrestore(&Request_list_lock, flags);
+}
+
+/* Unlinks a any_request from the global list (<Request_list>).
+ */
+static void
+unit_tracking_destroy(struct list_head *dev_list_link)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&Request_list_lock, flags);
+ list_del(dev_list_link);
+ spin_unlock_irqrestore(&Request_list_lock, flags);
+}
+
+/* Allocate memory for and return a new any_request struct, and
+ * link it to the global list of outstanding requests.
+ */
+static struct any_request *
+alloc_request(char *fn, int ln)
+{
+ struct any_request *req = (struct any_request *)
+ (visorchipset_cache_alloc(Request_memory_pool,
+ FALSE,
+ fn, ln));
+ if (!req)
+ return NULL;
+ memset(req, 0, sizeof(struct any_request) + Caller_req_context_bytes);
+ unit_tracking_create(&req->req_list);
+ return req;
+}
+
+/* Book-end for alloc_request().
+ */
+static void
+free_request(struct any_request *req, char *fn, int ln)
+{
+ unit_tracking_destroy(&req->req_list);
+ visorchipset_cache_free(Request_memory_pool, req, fn, ln);
+}
+
+/* Constructor for filexfer.o.
+ */
+int
+filexfer_constructor(size_t req_context_bytes)
+{
+ int rc = -1;
+
+ Caller_req_context_bytes = req_context_bytes;
+ Request_memory_pool =
+ kmem_cache_create(Request_memory_pool_name,
+ sizeof(struct any_request) +
+ Caller_req_context_bytes,
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!Request_memory_pool) {
+ LOGERR("failed to alloc Request_memory_pool");
+ rc = -ENOMEM;
+ goto Away;
+ }
+ rc = 0;
+Away:
+ if (rc < 0) {
+ if (Request_memory_pool) {
+ kmem_cache_destroy(Request_memory_pool);
+ Request_memory_pool = NULL;
+ }
+ }
+ return rc;
+}
+
+/* Destructor for filexfer.o.
+ */
+void
+filexfer_destructor(void)
+{
+ if (Request_memory_pool) {
+ kmem_cache_destroy(Request_memory_pool);
+ Request_memory_pool = NULL;
+ }
+}
+
+/* This function will obtain an available chunk from the controlvm payload area,
+ * store the size in bytes of the chunk in <actual_size>, and return a pointer
+ * to the chunk. The function is passed to the sparfilexfer driver, which calls
+ * it whenever payload space is required to copy file data into.
+ */
+static void *
+get_empty_bucket_for_getfile_data(void *context,
+ ulong min_size, ulong max_size,
+ ulong *actual_size)
+{
+ void *bucket;
+ struct any_request *req = (struct any_request *) context;
+
+ if (!req->is_get) {
+ LOGERR("%s - unexpected call", __func__);
+ return NULL;
+ }
+ bucket = (*req->get.get_contiguous_controlvm_payload)
+ (min_size, max_size, actual_size);
+ if (bucket != NULL) {
+ atomic_inc(&req->get.buffers_in_use);
+ DBGINF("%s - sent %lu-byte buffer", __func__, *actual_size);
+ }
+ return bucket;
+}
+
+/* This function will send a controlvm response with data in the payload
+ * (whose space was obtained with get_empty_bucket_for_getfile_data). The
+ * function is passed to the sparfilexfer driver, which calls it whenever it
+ * wants to send file data back across the controlvm channel.
+ */
+static int
+send_full_getfile_data_bucket(void *context, void *bucket,
+ ulong bucket_actual_size, ulong bucket_used_size)
+{
+ struct any_request *req = (struct any_request *) context;
+
+ if (!req->is_get) {
+ LOGERR("%s - unexpected call", __func__);
+ return 0;
+ }
+ DBGINF("sending buffer for %lu/%lu",
+ bucket_used_size, bucket_actual_size);
+ if (!(*req->get.controlvm_respond_with_payload)
+ (&req->get.controlvm_header,
+ req->file_request_number,
+ req->data_sequence_number++,
+ 0, bucket, bucket_actual_size, bucket_used_size, TRUE))
+ atomic_dec(&req->get.buffers_in_use);
+ return 0;
+}
+
+/* This function will send a controlvm response indicating the end of a
+ * GETFILE transfer. The function is passed to the sparfilexfer driver.
+ */
+static void
+send_end_of_getfile_data(void *context, int status)
+{
+ struct any_request *req = (struct any_request *) context;
+ if (!req->is_get) {
+ LOGERR("%s - unexpected call", __func__);
+ return;
+ }
+ LOGINF("status=%d", status);
+ (*req->get.controlvm_respond_with_payload)
+ (&req->get.controlvm_header,
+ req->file_request_number,
+ req->data_sequence_number++, status, NULL, 0, 0, FALSE);
+ free_request(req, __FILE__, __LINE__);
+ module_put(THIS_MODULE);
+}
+
+/* This function supplies data for a PUTFILE transfer.
+ * The function is passed to the sparfilexfer driver.
+ */
+static int
+get_putfile_data(void *context, void *pbuf, size_t bufsize,
+ BOOL buf_is_userspace, size_t *bytes_transferred)
+{
+ struct any_request *req = (struct any_request *) context;
+ if (req->is_get) {
+ LOGERR("%s - unexpected call", __func__);
+ return -1;
+ }
+ return (*req->put.get_controlvm_filedata) (&req->caller_context_data[0],
+ pbuf, bufsize,
+ buf_is_userspace,
+ bytes_transferred);
+}
+
+/* This function is called to indicate the end of a PUTFILE transfer.
+ * The function is passed to the sparfilexfer driver.
+ */
+static void
+end_putfile(void *context, int status)
+{
+ struct any_request *req = (struct any_request *) context;
+ if (req->is_get) {
+ LOGERR("%s - unexpected call", __func__);
+ return;
+ }
+ (*req->put.controlvm_end_putFile) (&req->caller_context_data[0],
+ status);
+ free_request(req, __FILE__, __LINE__);
+ module_put(THIS_MODULE);
+}
+
+/* Refer to filexfer.h for description. */
+BOOL
+filexfer_getFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ ulong2 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ GET_CONTIGUOUS_CONTROLVM_PAYLOAD_FUNC
+ get_contiguous_controlvm_payload,
+ CONTROLVM_RESPOND_WITH_PAYLOAD_FUNC
+ controlvm_respond_with_payload,
+ TRANSMITFILE_DUMP_FUNC dump_func)
+{
+ BOOL use_count_up = FALSE;
+ BOOL failed = TRUE;
+ struct any_request *req = alloc_request(__FILE__, __LINE__);
+
+ if (!req) {
+ LOGERR("allocation of any_request failed");
+ goto Away;
+ }
+ /* We need to increment this module's use count because we're handing
+ * off pointers to functions within this module to be used by
+ * another module.
+ */
+ __module_get(THIS_MODULE);
+ use_count_up = TRUE;
+ req->is_get = TRUE;
+ req->file_request_number = file_request_number;
+ req->data_sequence_number = 0;
+ req->dump_func = dump_func;
+ req->get.controlvm_header = *msgHdr;
+ atomic_set(&req->get.buffers_in_use, 0);
+ req->get.get_contiguous_controlvm_payload =
+ get_contiguous_controlvm_payload;
+ req->get.controlvm_respond_with_payload =
+ controlvm_respond_with_payload;
+ if (sparfilexfer_local2remote(req, /* context, passed to
+ * callback funcs */
+ file_name,
+ file_request_number,
+ uplink_index,
+ disk_index,
+ get_empty_bucket_for_getfile_data,
+ send_full_getfile_data_bucket,
+ send_end_of_getfile_data) < 0) {
+ LOGERR("sparfilexfer_local2remote failed");
+ goto Away;
+ }
+ failed = FALSE;
+Away:
+ if (failed) {
+ if (use_count_up) {
+ module_put(THIS_MODULE);
+ use_count_up = FALSE;
+ }
+ if (req) {
+ free_request(req, __FILE__, __LINE__);
+ req = NULL;
+ }
+ return FALSE;
+ } else {
+ return TRUE;
+ /* success; send callbacks will be called for responses */
+ }
+}
+
+/* Refer to filexfer.h for description. */
+void *
+filexfer_putFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ ulong2 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ TRANSMITFILE_INIT_CONTEXT_FUNC init_context,
+ GET_CONTROLVM_FILEDATA_FUNC get_controlvm_filedata,
+ CONTROLVM_RESPOND_FUNC controlvm_end_putFile,
+ TRANSMITFILE_DUMP_FUNC dump_func)
+{
+ BOOL use_count_up = FALSE;
+ BOOL failed = TRUE;
+ struct any_request *req = alloc_request(__FILE__, __LINE__);
+ void *caller_ctx = NULL;
+
+ if (!req) {
+ LOGERR("allocation of any_request failed");
+ goto Away;
+ }
+ caller_ctx = (void *) (&(req->caller_context_data[0]));
+ /* We need to increment this module's use count because we're handing
+ * off pointers to functions within this module to be used by
+ * another module.
+ */
+ __module_get(THIS_MODULE);
+ use_count_up = TRUE;
+ req->is_get = FALSE;
+ req->file_request_number = file_request_number;
+ req->data_sequence_number = 0;
+ req->dump_func = dump_func;
+ req->put.get_controlvm_filedata = get_controlvm_filedata;
+ req->put.controlvm_end_putFile = controlvm_end_putFile;
+ (*init_context) (caller_ctx, msgHdr, file_request_number);
+ if (sparfilexfer_remote2local(req, /* context, passed to
+ * callback funcs */
+ file_name,
+ file_request_number,
+ uplink_index,
+ disk_index,
+ get_putfile_data, end_putfile) < 0) {
+ LOGERR("sparfilexfer_remote2local failed");
+ goto Away;
+ }
+ failed = FALSE;
+Away:
+ if (failed) {
+ if (use_count_up) {
+ module_put(THIS_MODULE);
+ use_count_up = FALSE;
+ }
+ if (req) {
+ free_request(req, __FILE__, __LINE__);
+ req = NULL;
+ }
+ return NULL;
+ } else {
+ return caller_ctx;
+ /* success; callbacks will be called for responses */
+ }
+}
+
+static void
+dump_get_request(struct seq_file *f, struct getfile_request *getreq)
+{
+ seq_printf(f, " buffers_in_use=%d\n",
+ atomic_read(&getreq->buffers_in_use));
+}
+
+static void
+dump_put_request(struct seq_file *f, struct putfile_request *putreq)
+{
+}
+
+static void
+dump_request(struct seq_file *f, struct any_request *req)
+{
+ seq_printf(f, "* %s id=%llu seq=%llu\n",
+ ((req->is_get) ? "Get" : "Put"),
+ req->file_request_number, req->data_sequence_number);
+ if (req->is_get)
+ dump_get_request(f, &req->get);
+ else
+ dump_put_request(f, &req->put);
+ if (req->dump_func)
+ (*req->dump_func) (f, &(req->caller_context_data[0]), " ");
+}
+
+void
+filexfer_dump(struct seq_file *f)
+{
+ ulong flags;
+ struct list_head *entry;
+
+ seq_puts(f, "Outstanding TRANSMIT_FILE requests:\n");
+ spin_lock_irqsave(&Request_list_lock, flags);
+ list_for_each(entry, &Request_list) {
+ struct any_request *req;
+ req = list_entry(entry, struct any_request, req_list);
+ dump_request(f, req);
+ }
+ spin_unlock_irqrestore(&Request_list_lock, flags);
+}
+
+#else /* ifdef ENABLE_SPARFILEXFER */
+int
+filexfer_constructor(size_t req_context_bytes)
+{
+ return 0; /* success */
+}
+
+void
+filexfer_destructor(void)
+{
+}
+
+BOOL
+filexfer_getFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u64 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ GET_CONTIGUOUS_CONTROLVM_PAYLOAD_FUNC
+ get_contiguous_controlvm_payload,
+ CONTROLVM_RESPOND_WITH_PAYLOAD_FUNC
+ controlvm_respond_with_payload,
+ TRANSMITFILE_DUMP_FUNC dump_func)
+{
+ /* since no sparfilexfer module exists to call, we just fail */
+ return FALSE;
+}
+
+void *
+filexfer_putFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u64 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ TRANSMITFILE_INIT_CONTEXT_FUNC init_context,
+ GET_CONTROLVM_FILEDATA_FUNC get_controlvm_filedata,
+ CONTROLVM_RESPOND_FUNC controlvm_end_putFile,
+ TRANSMITFILE_DUMP_FUNC dump_func)
+{
+ /* since no sparfilexfer module exists to call, we just fail */
+ return NULL;
+}
+
+void
+filexfer_dump(struct seq_file *f)
+{
+}
+
+#endif /* ifdef ENABLE_SPARFILEXFER */
diff --git a/drivers/staging/unisys/visorchipset/filexfer.h b/drivers/staging/unisys/visorchipset/filexfer.h
new file mode 100644
index 000000000000..a1bfca69cdcf
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/filexfer.h
@@ -0,0 +1,147 @@
+/* filexfer.h
+ *
+ * Copyright © 2013 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* This header file defines the interface that filexfer.c provides to other
+ * code in the visorchipset driver.
+ */
+
+#ifndef __FILEXFER_H__
+#define __FILEXFER_H__
+
+#include "globals.h"
+#include "controlvmchannel.h"
+#include <linux/seq_file.h>
+
+typedef void *(*GET_CONTIGUOUS_CONTROLVM_PAYLOAD_FUNC) (ulong min_size,
+ ulong max_size,
+ ulong *actual_size);
+
+typedef BOOL
+(*CONTROLVM_RESPOND_WITH_PAYLOAD_FUNC) (CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u64 fileRequestNumber,
+ u64 dataSequenceNumber,
+ int response,
+ void *bucket, ulong payloadChunkSize,
+ ulong payloadUsedBytes, BOOL partial);
+
+typedef void
+(*TRANSMITFILE_INIT_CONTEXT_FUNC)(void *ctx,
+ const CONTROLVM_MESSAGE_HEADER *hdr,
+ u64 file_request_number);
+typedef void (*TRANSMITFILE_DUMP_FUNC) (struct seq_file *f, void *ctx,
+ const char *pfx);
+typedef int (*GET_CONTROLVM_FILEDATA_FUNC) (void *ctx,
+ void *buf, size_t bufsize,
+ BOOL buf_is_userspace,
+ size_t *bytes_transferred);
+typedef void (*CONTROLVM_RESPOND_FUNC) (void *ctx, int response);
+
+/* Call once to initialize filexfer.o.
+ * req_context_bytes number of bytes the caller needs to keep track of each file
+ * transfer conversation. The <ctx_init_value> passed to filexfer_putFile() is
+ * assumed to be this many bytes in size. Code within filexfer.o will copy this
+ * into a dynamically-allocated area, and pass back a pointer to that area in
+ * callback functions.
+ */
+int filexfer_constructor(size_t req_context_bytes);
+
+/* Call once to clean up filexfer.o */
+void filexfer_destructor(void);
+
+/* Call this to dump diagnostic info about all outstanding getFiles/putFiles */
+void filexfer_dump(struct seq_file *f);
+
+/* Call to transfer a file from the local filesystem (i.e., from the environment
+ * where this driver is running) across the controlvm channel to a remote
+ * environment. 1 or more controlvm responses will be sent as a result, each
+ * of which whose payload contains file data. Only the last controlvm message
+ * will have Flags.partialCompletion==0.
+ *
+ * msgHdr the controlvm message header of the GETFILE request which
+ * we just received
+ * file_request_number this is all data from the GETFILE request that
+ * uplink_index define which file is to be transferred
+ * disk_index
+ * file_name
+ * get_contiguous_controlvm_payload function to call when space is needed
+ * in the payload area
+ * controlvm_respond_with_payload function to call to send each controlvm
+ * response containing file data as the
+ * payload; returns FALSE only if the
+ * payload buffer was freed inline
+ * dump_func function to dump context data in
+ * human-readable format
+ *
+ * Returns TRUE iff the file transfer request has been successfully initiated,
+ * or FALSE to indicate failure.
+ */
+BOOL
+filexfer_getFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u64 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ GET_CONTIGUOUS_CONTROLVM_PAYLOAD_FUNC
+ get_contiguous_controlvm_payload,
+ CONTROLVM_RESPOND_WITH_PAYLOAD_FUNC
+ controlvm_respond_with_payload,
+ TRANSMITFILE_DUMP_FUNC dump_func);
+
+/* Call to create a file in the local filesystem (i.e., in the environment
+ * where this driver is running) from data received as payload in
+ * controlvm channel messages from a remote environment. 1 or more controlvm
+ * messages will be received for this transfer, and only the last will have
+ * Flags.partialCompletion==0.
+ *
+ * msgHdr the controlvm message header of the PUTFILE request which
+ * we just received
+ * file_request_number this is all data from the PUTFILE request that
+ * uplink_index define which file is to be created in the local
+ * disk_index filesystem
+ * file_name
+ * init_context function to call to initialize the
+ * <req_context_bytes>-sized storage area returned by
+ * this func; note that it would NOT be sufficient to
+ * allow the caller to initialize this upon return, as
+ * the the other user-supplied callbacks might have
+ * already been called by then
+ * get_controlvm_filedata function to call to obtain more data for the file
+ * being written; refer to get_controlvm_filedata()
+ * in visorchipset_main.c for a complete description
+ * of parameters
+ * controlvm_end_putFile function to call to indicate that creation of the
+ * local file has completed; set <response> to a
+ * negative value to indicate an error
+ * dump_func function to dump context data in human-readable
+ * format
+ *
+ * Returns a pointer to a dynamically-allocated storage area of size
+ * <req_context_bytes> which the caller can use, or NULL for error. The
+ * caller should NEVER free the returned pointer, but should expect to receive
+ * it as the <ctx> argument when callback functions are called.
+ */
+void *filexfer_putFile(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u64 file_request_number,
+ uint uplink_index,
+ uint disk_index,
+ char *file_name,
+ TRANSMITFILE_INIT_CONTEXT_FUNC init_context,
+ GET_CONTROLVM_FILEDATA_FUNC get_controlvm_filedata,
+ CONTROLVM_RESPOND_FUNC controlvm_end_putFile,
+ TRANSMITFILE_DUMP_FUNC dump_func);
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/globals.h b/drivers/staging/unisys/visorchipset/globals.h
new file mode 100644
index 000000000000..a0e6d4fc03ae
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/globals.h
@@ -0,0 +1,45 @@
+/* globals.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+
+#ifndef __VISORCHIPSET_GLOBALS_H__
+#define __VISORCHIPSET_GLOBALS_H__
+
+#include "uniklog.h"
+#include "diagnostics/appos_subsystems.h"
+#include "timskmod.h"
+#include "visorchipset.h"
+#include "visorchipset_umode.h"
+#include "version.h"
+
+#define MYDRVNAME "visorchipset"
+
+
+/* module parameters */
+
+extern int visorchipset_testvnic;
+extern int visorchipset_testvnicclient;
+extern int visorchipset_testmsg;
+extern int visorchipset_major;
+extern int visorchipset_serverregwait;
+extern int visorchipset_clientregwait;
+extern int visorchipset_testteardown;
+extern int visorchipset_disable_controlvm;
+extern int visorchipset_crash_kernel;
+extern int visorchipset_holdchipsetready;
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/parser.c b/drivers/staging/unisys/visorchipset/parser.c
new file mode 100644
index 000000000000..b408d415bd8c
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/parser.c
@@ -0,0 +1,474 @@
+/* parser.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include "parser.h"
+#include "memregion.h"
+#include "controlvmchannel.h"
+#include <linux/ctype.h>
+#include <linux/mm.h>
+
+#define MYDRVNAME "visorchipset_parser"
+#define CURRENT_FILE_PC VISOR_CHIPSET_PC_parser_c
+
+/* We will refuse to allocate more than this many bytes to copy data from
+ * incoming payloads. This serves as a throttling mechanism.
+ */
+#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
+static ulong Controlvm_Payload_Bytes_Buffered;
+
+struct PARSER_CONTEXT_Tag {
+ ulong allocbytes;
+ ulong param_bytes;
+ u8 *curr;
+ ulong bytes_remaining;
+ BOOL byte_stream;
+ char data[0];
+};
+
+static PARSER_CONTEXT *
+parser_init_guts(U64 addr, U32 bytes, BOOL isLocal,
+ BOOL hasStandardPayloadHeader, BOOL *tryAgain)
+{
+ int allocbytes = sizeof(PARSER_CONTEXT) + bytes;
+ PARSER_CONTEXT *rc = NULL;
+ PARSER_CONTEXT *ctx = NULL;
+ MEMREGION *rgn = NULL;
+ ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+
+ if (tryAgain)
+ *tryAgain = FALSE;
+ if (!hasStandardPayloadHeader)
+ /* alloc and 0 extra byte to ensure payload is
+ * '\0'-terminated
+ */
+ allocbytes++;
+ if ((Controlvm_Payload_Bytes_Buffered + bytes)
+ > MAX_CONTROLVM_PAYLOAD_BYTES) {
+ ERRDRV("%s (%s:%d) - prevented allocation of %d bytes to prevent exceeding throttling max (%d)",
+ __func__, __FILE__, __LINE__, allocbytes,
+ MAX_CONTROLVM_PAYLOAD_BYTES);
+ if (tryAgain)
+ *tryAgain = TRUE;
+ rc = NULL;
+ goto Away;
+ }
+ ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
+ if (ctx == NULL) {
+ ERRDRV("%s (%s:%d) - failed to allocate %d bytes",
+ __func__, __FILE__, __LINE__, allocbytes);
+ if (tryAgain)
+ *tryAgain = TRUE;
+ rc = NULL;
+ goto Away;
+ }
+
+ ctx->allocbytes = allocbytes;
+ ctx->param_bytes = bytes;
+ ctx->curr = NULL;
+ ctx->bytes_remaining = 0;
+ ctx->byte_stream = FALSE;
+ if (isLocal) {
+ void *p;
+ if (addr > virt_to_phys(high_memory - 1)) {
+ ERRDRV("%s - bad local address (0x%-16.16Lx for %lu)",
+ __func__,
+ (unsigned long long) addr, (ulong) bytes);
+ rc = NULL;
+ goto Away;
+ }
+ p = __va((ulong) (addr));
+ memcpy(ctx->data, p, bytes);
+ } else {
+ rgn = visor_memregion_create(addr, bytes);
+ if (!rgn) {
+ rc = NULL;
+ goto Away;
+ }
+ if (visor_memregion_read(rgn, 0, ctx->data, bytes) < 0) {
+ rc = NULL;
+ goto Away;
+ }
+ }
+ if (!hasStandardPayloadHeader) {
+ ctx->byte_stream = TRUE;
+ rc = ctx;
+ goto Away;
+ }
+ phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
+ if (phdr->TotalLength != bytes) {
+ ERRDRV("%s - bad total length %lu (should be %lu)",
+ __func__,
+ (ulong) (phdr->TotalLength), (ulong) (bytes));
+ rc = NULL;
+ goto Away;
+ }
+ if (phdr->TotalLength < phdr->HeaderLength) {
+ ERRDRV("%s - total length < header length (%lu < %lu)",
+ __func__,
+ (ulong) (phdr->TotalLength),
+ (ulong) (phdr->HeaderLength));
+ rc = NULL;
+ goto Away;
+ }
+ if (phdr->HeaderLength < sizeof(ULTRA_CONTROLVM_PARAMETERS_HEADER)) {
+ ERRDRV("%s - header is too small (%lu < %lu)",
+ __func__,
+ (ulong) (phdr->HeaderLength),
+ (ulong) (sizeof(ULTRA_CONTROLVM_PARAMETERS_HEADER)));
+ rc = NULL;
+ goto Away;
+ }
+
+ rc = ctx;
+Away:
+ if (rgn) {
+ visor_memregion_destroy(rgn);
+ rgn = NULL;
+ }
+ if (rc)
+ Controlvm_Payload_Bytes_Buffered += ctx->param_bytes;
+ else {
+ if (ctx) {
+ parser_done(ctx);
+ ctx = NULL;
+ }
+ }
+ return rc;
+}
+
+PARSER_CONTEXT *
+parser_init(U64 addr, U32 bytes, BOOL isLocal, BOOL *tryAgain)
+{
+ return parser_init_guts(addr, bytes, isLocal, TRUE, tryAgain);
+}
+
+/* Call this instead of parser_init() if the payload area consists of just
+ * a sequence of bytes, rather than a ULTRA_CONTROLVM_PARAMETERS_HEADER
+ * structures. Afterwards, you can call parser_simpleString_get() or
+ * parser_byteStream_get() to obtain the data.
+ */
+PARSER_CONTEXT *
+parser_init_byteStream(U64 addr, U32 bytes, BOOL isLocal, BOOL *tryAgain)
+{
+ return parser_init_guts(addr, bytes, isLocal, FALSE, tryAgain);
+}
+
+/* Obtain '\0'-terminated copy of string in payload area.
+ */
+char *
+parser_simpleString_get(PARSER_CONTEXT *ctx)
+{
+ if (!ctx->byte_stream)
+ return NULL;
+ return ctx->data; /* note this IS '\0'-terminated, because of
+ * the num of bytes we alloc+clear in
+ * parser_init_byteStream() */
+}
+
+/* Obtain a copy of the buffer in the payload area.
+ */
+void *
+parser_byteStream_get(PARSER_CONTEXT *ctx, ulong *nbytes)
+{
+ if (!ctx->byte_stream)
+ return NULL;
+ if (nbytes)
+ *nbytes = ctx->param_bytes;
+ return (void *) ctx->data;
+}
+
+GUID
+parser_id_get(PARSER_CONTEXT *ctx)
+{
+ ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+
+ if (ctx == NULL) {
+ ERRDRV("%s (%s:%d) - no context",
+ __func__, __FILE__, __LINE__);
+ return Guid0;
+ }
+ phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
+ return phdr->Id;
+}
+
+void
+parser_param_start(PARSER_CONTEXT *ctx, PARSER_WHICH_STRING which_string)
+{
+ ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+
+ if (ctx == NULL) {
+ ERRDRV("%s (%s:%d) - no context",
+ __func__, __FILE__, __LINE__);
+ goto Away;
+ }
+ phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
+ switch (which_string) {
+ case PARSERSTRING_INITIATOR:
+ ctx->curr = ctx->data + phdr->InitiatorOffset;
+ ctx->bytes_remaining = phdr->InitiatorLength;
+ break;
+ case PARSERSTRING_TARGET:
+ ctx->curr = ctx->data + phdr->TargetOffset;
+ ctx->bytes_remaining = phdr->TargetLength;
+ break;
+ case PARSERSTRING_CONNECTION:
+ ctx->curr = ctx->data + phdr->ConnectionOffset;
+ ctx->bytes_remaining = phdr->ConnectionLength;
+ break;
+ case PARSERSTRING_NAME:
+ ctx->curr = ctx->data + phdr->NameOffset;
+ ctx->bytes_remaining = phdr->NameLength;
+ break;
+ default:
+ ERRDRV("%s - bad which_string %d", __func__, which_string);
+ break;
+ }
+
+Away:
+ return;
+}
+
+void
+parser_done(PARSER_CONTEXT *ctx)
+{
+ if (!ctx)
+ return;
+ Controlvm_Payload_Bytes_Buffered -= ctx->param_bytes;
+ kfree(ctx);
+}
+
+/** Return length of string not counting trailing spaces. */
+static int
+string_length_no_trail(char *s, int len)
+{
+ int i = len - 1;
+ while (i >= 0) {
+ if (!isspace(s[i]))
+ return i + 1;
+ i--;
+ }
+ return 0;
+}
+
+/** Grab the next name and value out of the parameter buffer.
+ * The entire parameter buffer looks like this:
+ * <name>=<value>\0
+ * <name>=<value>\0
+ * ...
+ * \0
+ * If successful, the next <name> value is returned within the supplied
+ * <nam> buffer (the value is always upper-cased), and the corresponding
+ * <value> is returned within a kmalloc()ed buffer, whose pointer is
+ * provided as the return value of this function.
+ * (The total number of bytes allocated is strlen(<value>)+1.)
+ *
+ * NULL is returned to indicate failure, which can occur for several reasons:
+ * - all <name>=<value> pairs have already been processed
+ * - bad parameter
+ * - parameter buffer ends prematurely (couldn't find an '=' or '\0' within
+ * the confines of the parameter buffer)
+ * - the <nam> buffer is not large enough to hold the <name> of the next
+ * parameter
+ */
+void *
+parser_param_get(PARSER_CONTEXT *ctx, char *nam, int namesize)
+{
+ u8 *pscan, *pnam = nam;
+ ulong nscan;
+ int value_length = -1, orig_value_length = -1;
+ void *value = NULL;
+ int i;
+ int closing_quote = 0;
+
+ if (!ctx)
+ return NULL;
+ pscan = ctx->curr;
+ nscan = ctx->bytes_remaining;
+ if (nscan == 0)
+ return NULL;
+ if (*pscan == '\0')
+ /* This is the normal return point after you have processed
+ * all of the <name>=<value> pairs in a syntactically-valid
+ * parameter buffer.
+ */
+ return NULL;
+
+ /* skip whitespace */
+ while (isspace(*pscan)) {
+ pscan++;
+ nscan--;
+ if (nscan == 0)
+ return NULL;
+ }
+
+ while (*pscan != ':') {
+ if (namesize <= 0) {
+ ERRDRV("%s - name too big", __func__);
+ return NULL;
+ }
+ *pnam = toupper(*pscan);
+ pnam++;
+ namesize--;
+ pscan++;
+ nscan--;
+ if (nscan == 0) {
+ ERRDRV("%s - unexpected end of input parsing name",
+ __func__);
+ return NULL;
+ }
+ }
+ if (namesize <= 0) {
+ ERRDRV("%s - name too big", __func__);
+ return NULL;
+ }
+ *pnam = '\0';
+ nam[string_length_no_trail(nam, strlen(nam))] = '\0';
+
+ /* point to char immediately after ":" in "<name>:<value>" */
+ pscan++;
+ nscan--;
+ /* skip whitespace */
+ while (isspace(*pscan)) {
+ pscan++;
+ nscan--;
+ if (nscan == 0) {
+ ERRDRV("%s - unexpected end of input looking for value",
+ __func__);
+ return NULL;
+ }
+ }
+ if (nscan == 0) {
+ ERRDRV("%s - unexpected end of input looking for value",
+ __func__);
+ return NULL;
+ }
+ if (*pscan == '\'' || *pscan == '"') {
+ closing_quote = *pscan;
+ pscan++;
+ nscan--;
+ if (nscan == 0) {
+ ERRDRV("%s - unexpected end of input after %c",
+ __func__, closing_quote);
+ return NULL;
+ }
+ }
+
+ /* look for a separator character, terminator character, or
+ * end of data
+ */
+ for (i = 0, value_length = -1; i < nscan; i++) {
+ if (closing_quote) {
+ if (pscan[i] == '\0') {
+ ERRDRV("%s - unexpected end of input parsing quoted value", __func__);
+ return NULL;
+ }
+ if (pscan[i] == closing_quote) {
+ value_length = i;
+ break;
+ }
+ } else
+ if (pscan[i] == ',' || pscan[i] == ';'
+ || pscan[i] == '\0') {
+ value_length = i;
+ break;
+ }
+ }
+ if (value_length < 0) {
+ if (closing_quote) {
+ ERRDRV("%s - unexpected end of input parsing quoted value", __func__);
+ return NULL;
+ }
+ value_length = nscan;
+ }
+ orig_value_length = value_length;
+ if (closing_quote == 0)
+ value_length = string_length_no_trail(pscan, orig_value_length);
+ value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
+ if (value == NULL)
+ return NULL;
+ memcpy(value, pscan, value_length);
+ ((u8 *) (value))[value_length] = '\0';
+
+ pscan += orig_value_length;
+ nscan -= orig_value_length;
+
+ /* skip past separator or closing quote */
+ if (nscan > 0) {
+ if (*pscan != '\0') {
+ pscan++;
+ nscan--;
+ }
+ }
+
+ if (closing_quote && (nscan > 0)) {
+ /* we still need to skip around the real separator if present */
+ /* first, skip whitespace */
+ while (isspace(*pscan)) {
+ pscan++;
+ nscan--;
+ if (nscan == 0)
+ break;
+ }
+ if (nscan > 0) {
+ if (*pscan == ',' || *pscan == ';') {
+ pscan++;
+ nscan--;
+ } else if (*pscan != '\0') {
+ ERRDRV("%s - missing separator after quoted string", __func__);
+ kfree(value);
+ value = NULL;
+ return NULL;
+ }
+ }
+ }
+ ctx->curr = pscan;
+ ctx->bytes_remaining = nscan;
+ return value;
+}
+
+void *
+parser_string_get(PARSER_CONTEXT *ctx)
+{
+ u8 *pscan;
+ ulong nscan;
+ int value_length = -1;
+ void *value = NULL;
+ int i;
+
+ if (!ctx)
+ return NULL;
+ pscan = ctx->curr;
+ nscan = ctx->bytes_remaining;
+ if (nscan == 0)
+ return NULL;
+ if (!pscan)
+ return NULL;
+ for (i = 0, value_length = -1; i < nscan; i++)
+ if (pscan[i] == '\0') {
+ value_length = i;
+ break;
+ }
+ if (value_length < 0) /* '\0' was not included in the length */
+ value_length = nscan;
+ value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
+ if (value == NULL)
+ return NULL;
+ if (value_length > 0)
+ memcpy(value, pscan, value_length);
+ ((u8 *) (value))[value_length] = '\0';
+ return value;
+}
diff --git a/drivers/staging/unisys/visorchipset/parser.h b/drivers/staging/unisys/visorchipset/parser.h
new file mode 100644
index 000000000000..a0cc50a533cd
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/parser.h
@@ -0,0 +1,45 @@
+/* parser.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __PARSER_H__
+#define __PARSER_H__
+
+#include "uniklog.h"
+#include "timskmod.h"
+#include "channel.h"
+
+typedef enum {
+ PARSERSTRING_INITIATOR,
+ PARSERSTRING_TARGET,
+ PARSERSTRING_CONNECTION,
+ PARSERSTRING_NAME,
+} PARSER_WHICH_STRING;
+
+typedef struct PARSER_CONTEXT_Tag PARSER_CONTEXT;
+
+PARSER_CONTEXT *parser_init(U64 addr, U32 bytes, BOOL isLocal, BOOL *tryAgain);
+PARSER_CONTEXT *parser_init_byteStream(U64 addr, U32 bytes, BOOL isLocal,
+ BOOL *tryAgain);
+void parser_param_start(PARSER_CONTEXT *ctx, PARSER_WHICH_STRING which_string);
+void *parser_param_get(PARSER_CONTEXT *ctx, char *nam, int namesize);
+void *parser_string_get(PARSER_CONTEXT *ctx);
+GUID parser_id_get(PARSER_CONTEXT *ctx);
+char *parser_simpleString_get(PARSER_CONTEXT *ctx);
+void *parser_byteStream_get(PARSER_CONTEXT *ctx, ulong *nbytes);
+void parser_done(PARSER_CONTEXT *ctx);
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/testing.h b/drivers/staging/unisys/visorchipset/testing.h
new file mode 100644
index 000000000000..a44f5556cb21
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/testing.h
@@ -0,0 +1,41 @@
+/* testing.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VISORCHIPSET_TESTING_H__
+#define __VISORCHIPSET_TESTING_H__
+
+#define VISORCHIPSET_TEST_PROC
+#include "globals.h"
+#include "controlvmchannel.h"
+
+void test_produce_test_message(CONTROLVM_MESSAGE *msg, int isLocalTestAddr);
+BOOL test_consume_test_message(CONTROLVM_MESSAGE *msg);
+void test_manufacture_vnic_client_add(void *p);
+void test_manufacture_vnic_client_add_phys(HOSTADDRESS addr);
+void test_manufacture_preamble_messages(void);
+void test_manufacture_device_attach(ulong busNo, ulong devNo);
+void test_manufacture_device_add(ulong busNo, ulong devNo, GUID dataTypeGuid,
+ void *pChannel);
+void test_manufacture_add_bus(ulong busNo, ulong maxDevices,
+ GUID id, u8 *name, BOOL isServer);
+void test_manufacture_device_destroy(ulong busNo, ulong devNo);
+void test_manufacture_bus_destroy(ulong busNo);
+void test_manufacture_detach_externalPort(ulong switchNo, ulong externalPortNo);
+void test_manufacture_detach_internalPort(ulong switchNo, ulong internalPortNo);
+void test_cleanup(void);
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
new file mode 100644
index 000000000000..d4bf203cdfdf
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
@@ -0,0 +1,307 @@
+/* visorchipset.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __VISORCHIPSET_H__
+#define __VISORCHIPSET_H__
+
+#include "timskmod.h"
+#include "channel.h"
+#include "controlvmchannel.h"
+#include "parser.h"
+#include "procobjecttree.h"
+#include "vbusdeviceinfo.h"
+#include "vbushelper.h"
+
+/** Describes the state from the perspective of which controlvm messages have
+ * been received for a bus or device.
+ */
+typedef struct {
+ U32 created:1;
+ U32 attached:1;
+ U32 configured:1;
+ U32 running:1;
+ /* Add new fields above. */
+ /* Remaining bits in this 32-bit word are unused. */
+} VISORCHIPSET_STATE;
+
+typedef enum {
+ /** address is guest physical, but outside of the physical memory
+ * region that is controlled by the running OS (this is the normal
+ * address type for Supervisor channels)
+ */
+ ADDRTYPE_localPhysical,
+
+ /** address is guest physical, and withIN the confines of the
+ * physical memory controlled by the running OS.
+ */
+ ADDRTYPE_localTest,
+} VISORCHIPSET_ADDRESSTYPE;
+
+typedef enum {
+ CRASH_dev,
+ CRASH_bus,
+} CRASH_OBJ_TYPE;
+
+/** Attributes for a particular Supervisor channel.
+ */
+typedef struct {
+ VISORCHIPSET_ADDRESSTYPE addrType;
+ HOSTADDRESS channelAddr;
+ struct InterruptInfo intr;
+ U64 nChannelBytes;
+ GUID channelTypeGuid;
+ GUID channelInstGuid;
+
+} VISORCHIPSET_CHANNEL_INFO;
+
+/** Attributes for a particular Supervisor device.
+ * Any visorchipset client can query these attributes using
+ * visorchipset_get_client_device_info() or
+ * visorchipset_get_server_device_info().
+ */
+typedef struct {
+ struct list_head entry;
+ U32 busNo;
+ U32 devNo;
+ GUID devInstGuid;
+ VISORCHIPSET_STATE state;
+ VISORCHIPSET_CHANNEL_INFO chanInfo;
+ U32 Reserved1; /* CONTROLVM_ID */
+ U64 Reserved2;
+ U32 switchNo; /* when devState.attached==1 */
+ U32 internalPortNo; /* when devState.attached==1 */
+ CONTROLVM_MESSAGE_HEADER pendingMsgHdr; /* CONTROLVM_MESSAGE */
+ /** For private use by the bus driver */
+ void *bus_driver_context;
+
+} VISORCHIPSET_DEVICE_INFO;
+
+static inline VISORCHIPSET_DEVICE_INFO *
+finddevice(struct list_head *list, U32 busNo, U32 devNo)
+{
+ VISORCHIPSET_DEVICE_INFO *p;
+
+ list_for_each_entry(p, list, entry) {
+ if (p->busNo == busNo && p->devNo == devNo)
+ return p;
+ }
+ return NULL;
+}
+
+static inline void delbusdevices(struct list_head *list, U32 busNo)
+{
+ VISORCHIPSET_DEVICE_INFO *p;
+
+ list_for_each_entry(p, list, entry) {
+ if (p->busNo == busNo) {
+ list_del(&p->entry);
+ kfree(p);
+ }
+ }
+}
+
+/** Attributes for a particular Supervisor bus.
+ * (For a service partition acting as the server for buses/devices, there
+ * is a 1-to-1 relationship between busses and guest partitions.)
+ * Any visorchipset client can query these attributes using
+ * visorchipset_get_client_bus_info() or visorchipset_get_bus_info().
+ */
+typedef struct {
+ struct list_head entry;
+ U32 busNo;
+ VISORCHIPSET_STATE state;
+ VISORCHIPSET_CHANNEL_INFO chanInfo;
+ GUID partitionGuid;
+ U64 partitionHandle;
+ U8 *name; /* UTF8 */
+ U8 *description; /* UTF8 */
+ U64 Reserved1;
+ U32 Reserved2;
+ MYPROCOBJECT *procObject;
+ struct {
+ U32 server:1;
+ /* Add new fields above. */
+ /* Remaining bits in this 32-bit word are unused. */
+ } flags;
+ CONTROLVM_MESSAGE_HEADER pendingMsgHdr; /* CONTROLVM MsgHdr */
+ /** For private use by the bus driver */
+ void *bus_driver_context;
+ U64 devNo;
+
+} VISORCHIPSET_BUS_INFO;
+
+static inline VISORCHIPSET_BUS_INFO *
+findbus(struct list_head *list, U32 busNo)
+{
+ VISORCHIPSET_BUS_INFO *p;
+
+ list_for_each_entry(p, list, entry) {
+ if (p->busNo == busNo)
+ return p;
+ }
+ return NULL;
+}
+
+/** Attributes for a particular Supervisor switch.
+ */
+typedef struct {
+ U32 switchNo;
+ VISORCHIPSET_STATE state;
+ GUID switchTypeGuid;
+ U8 *authService1;
+ U8 *authService2;
+ U8 *authService3;
+ U8 *securityContext;
+ U64 Reserved;
+ U32 Reserved2; /* CONTROLVM_ID */
+ struct device dev;
+ BOOL dev_exists;
+ CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
+
+} VISORCHIPSET_SWITCH_INFO;
+
+/** Attributes for a particular Supervisor external port, which is connected
+ * to a specific switch.
+ */
+typedef struct {
+ U32 switchNo;
+ U32 externalPortNo;
+ VISORCHIPSET_STATE state;
+ GUID networkZoneGuid;
+ int pdPort;
+ U8 *ip;
+ U8 *ipNetmask;
+ U8 *ipBroadcast;
+ U8 *ipNetwork;
+ U8 *ipGateway;
+ U8 *ipDNS;
+ U64 Reserved1;
+ U32 Reserved2; /* CONTROLVM_ID */
+ struct device dev;
+ BOOL dev_exists;
+ CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
+
+} VISORCHIPSET_EXTERNALPORT_INFO;
+
+/** Attributes for a particular Supervisor internal port, which is how a
+ * device connects to a particular switch.
+ */
+typedef struct {
+ U32 switchNo;
+ U32 internalPortNo;
+ VISORCHIPSET_STATE state;
+ U32 busNo; /* valid only when state.attached == 1 */
+ U32 devNo; /* valid only when state.attached == 1 */
+ U64 Reserved1;
+ U32 Reserved2; /* CONTROLVM_ID */
+ CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
+ MYPROCOBJECT *procObject;
+
+} VISORCHIPSET_INTERNALPORT_INFO;
+
+/* These functions will be called from within visorchipset when certain
+ * events happen. (The implementation of these functions is outside of
+ * visorchipset.)
+ */
+typedef struct {
+ void (*bus_create)(ulong busNo);
+ void (*bus_destroy)(ulong busNo);
+ void (*device_create)(ulong busNo, ulong devNo);
+ void (*device_destroy)(ulong busNo, ulong devNo);
+ void (*device_pause)(ulong busNo, ulong devNo);
+ void (*device_resume)(ulong busNo, ulong devNo);
+ int (*get_channel_info)(GUID typeGuid, ulong *minSize,
+ ulong *maxSize);
+} VISORCHIPSET_BUSDEV_NOTIFIERS;
+
+/* These functions live inside visorchipset, and will be called to indicate
+ * responses to specific events (by code outside of visorchipset).
+ * For now, the value for each response is simply either:
+ * 0 = it worked
+ * -1 = it failed
+ */
+typedef struct {
+ void (*bus_create)(ulong busNo, int response);
+ void (*bus_destroy)(ulong busNo, int response);
+ void (*device_create)(ulong busNo, ulong devNo, int response);
+ void (*device_destroy)(ulong busNo, ulong devNo, int response);
+ void (*device_pause)(ulong busNo, ulong devNo, int response);
+ void (*device_resume)(ulong busNo, ulong devNo, int response);
+} VISORCHIPSET_BUSDEV_RESPONDERS;
+
+/** Register functions (in the bus driver) to get called by visorchipset
+ * whenever a bus or device appears for which this service partition is
+ * to be the server for. visorchipset will fill in <responders>, to
+ * indicate functions the bus driver should call to indicate message
+ * responses.
+ */
+void
+visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
+ VISORCHIPSET_BUSDEV_RESPONDERS *responders,
+ ULTRA_VBUS_DEVICEINFO *driverInfo);
+
+/** Register functions (in the bus driver) to get called by visorchipset
+ * whenever a bus or device appears for which this service partition is
+ * to be the client for. visorchipset will fill in <responders>, to
+ * indicate functions the bus driver should call to indicate message
+ * responses.
+ */
+void
+visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
+ VISORCHIPSET_BUSDEV_RESPONDERS *responders,
+ ULTRA_VBUS_DEVICEINFO *driverInfo);
+
+typedef void (*SPARREPORTEVENT_COMPLETE_FUNC) (CONTROLVM_MESSAGE *msg,
+ int status);
+
+void visorchipset_device_pause_response(ulong busNo, ulong devNo, int response);
+
+BOOL visorchipset_get_bus_info(ulong busNo, VISORCHIPSET_BUS_INFO *busInfo);
+BOOL visorchipset_get_device_info(ulong busNo, ulong devNo,
+ VISORCHIPSET_DEVICE_INFO *devInfo);
+BOOL visorchipset_get_switch_info(ulong switchNo,
+ VISORCHIPSET_SWITCH_INFO *switchInfo);
+BOOL visorchipset_get_externalport_info(ulong switchNo, ulong externalPortNo,
+ VISORCHIPSET_EXTERNALPORT_INFO
+ *externalPortInfo);
+BOOL visorchipset_set_bus_context(ulong busNo, void *context);
+BOOL visorchipset_set_device_context(ulong busNo, ulong devNo, void *context);
+int visorchipset_chipset_ready(void);
+int visorchipset_chipset_selftest(void);
+int visorchipset_chipset_notready(void);
+void visorchipset_controlvm_respond_reportEvent(CONTROLVM_MESSAGE *msg,
+ void *payload);
+void visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type);
+void *visorchipset_cache_alloc(struct kmem_cache *pool,
+ BOOL ok_to_block, char *fn, int ln);
+void visorchipset_cache_free(struct kmem_cache *pool, void *p,
+ char *fn, int ln);
+
+#if defined(TRANSMITFILE_DEBUG) || defined(DEBUG)
+#define DBG_GETFILE_PAYLOAD(msg, controlvm_header) \
+ LOGINF(msg, \
+ (ulong)controlvm_header.PayloadVmOffset, \
+ (ulong)controlvm_header.PayloadMaxBytes)
+#define DBG_GETFILE(fmt, ...) LOGINF(fmt, ##__VA_ARGS__)
+#define DBG_PUTFILE(fmt, ...) LOGINF(fmt, ##__VA_ARGS__)
+#else
+#define DBG_GETFILE_PAYLOAD(msg, controlvm_header)
+#define DBG_GETFILE(fmt, ...)
+#define DBG_PUTFILE(fmt, ...)
+#endif
+
+#endif
diff --git a/drivers/staging/unisys/visorchipset/visorchipset_main.c b/drivers/staging/unisys/visorchipset/visorchipset_main.c
new file mode 100644
index 000000000000..8252ca14695d
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/visorchipset_main.c
@@ -0,0 +1,2945 @@
+/* visorchipset_main.c
+ *
+ * Copyright � 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include "globals.h"
+#include "controlvm.h"
+#include "visorchipset.h"
+#include "procobjecttree.h"
+#include "visorchannel.h"
+#include "periodic_work.h"
+#include "testing.h"
+#include "file.h"
+#include "parser.h"
+#include "uniklog.h"
+#include "uisutils.h"
+#include "guidutils.h"
+#include "controlvmcompletionstatus.h"
+#include "guestlinuxdebug.h"
+#include "filexfer.h"
+
+#include <linux/nls.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
+#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
+ * vnic loopback test */
+#define TEST_VNIC_SWITCHNO 1
+#define TEST_VNIC_BUSNO 9
+
+#define MAX_NAME_SIZE 128
+#define MAX_IP_SIZE 50
+#define MAXOUTSTANDINGCHANNELCOMMAND 256
+#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
+#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
+
+/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
+* we switch to slow polling mode. As soon as we get a controlvm
+* message, we switch back to fast polling mode.
+*/
+#define MIN_IDLE_SECONDS 10
+static ulong Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+static ulong Most_recent_message_jiffies; /* when we got our last
+ * controlvm message */
+static inline char *
+NONULLSTR(char *s)
+{
+ if (s)
+ return s;
+ else
+ return "";
+}
+
+static int serverregistered;
+static int clientregistered;
+
+#define MAX_CHIPSET_EVENTS 2
+static U8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
+
+static struct delayed_work Periodic_controlvm_work;
+static struct workqueue_struct *Periodic_controlvm_workqueue;
+static DEFINE_SEMAPHORE(NotifierLock);
+
+typedef struct {
+ CONTROLVM_MESSAGE message;
+ unsigned int crc;
+} MESSAGE_ENVELOPE;
+
+static CONTROLVM_MESSAGE_HEADER g_DiagMsgHdr;
+static CONTROLVM_MESSAGE_HEADER g_ChipSetMsgHdr;
+static CONTROLVM_MESSAGE_HEADER g_DelDumpMsgHdr;
+static const GUID UltraDiagPoolChannelProtocolGuid =
+ ULTRA_DIAG_POOL_CHANNEL_PROTOCOL_GUID;
+/* 0xffffff is an invalid Bus/Device number */
+static ulong g_diagpoolBusNo = 0xffffff;
+static ulong g_diagpoolDevNo = 0xffffff;
+static CONTROLVM_MESSAGE_PACKET g_DeviceChangeStatePacket;
+
+/* Only VNIC and VHBA channels are sent to visorclientbus (aka
+ * "visorhackbus")
+ */
+#define FOR_VISORHACKBUS(channel_type_guid) \
+ ((memcmp(&channel_type_guid, &UltraVnicChannelProtocolGuid, \
+ sizeof(GUID)) == 0) || \
+ (memcmp(&channel_type_guid, &UltraVhbaChannelProtocolGuid, \
+ sizeof(GUID)) == 0))
+#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
+
+#define is_diagpool_channel(channel_type_guid) \
+ (memcmp(&channel_type_guid, \
+ &UltraDiagPoolChannelProtocolGuid, sizeof(GUID)) == 0)
+
+typedef enum {
+ PARTPROP_invalid,
+ PARTPROP_name,
+ PARTPROP_description,
+ PARTPROP_handle,
+ PARTPROP_busNumber,
+ /* add new properties above, but don't forget to change
+ * InitPartitionProperties() and show_partition_property() also...
+ */
+ PARTPROP_last
+} PARTITION_property;
+static const char *PartitionTypeNames[] = { "partition", NULL };
+
+static char *PartitionPropertyNames[PARTPROP_last + 1];
+static void
+InitPartitionProperties(void)
+{
+ char **p = PartitionPropertyNames;
+ p[PARTPROP_invalid] = "";
+ p[PARTPROP_name] = "name";
+ p[PARTPROP_description] = "description";
+ p[PARTPROP_handle] = "handle";
+ p[PARTPROP_busNumber] = "busNumber";
+ p[PARTPROP_last] = NULL;
+}
+
+typedef enum {
+ CTLVMPROP_invalid,
+ CTLVMPROP_physAddr,
+ CTLVMPROP_controlChannelAddr,
+ CTLVMPROP_controlChannelBytes,
+ CTLVMPROP_sparBootPart,
+ CTLVMPROP_sparStoragePart,
+ CTLVMPROP_livedumpLength,
+ CTLVMPROP_livedumpCrc32,
+ /* add new properties above, but don't forget to change
+ * InitControlVmProperties() show_controlvm_property() also...
+ */
+ CTLVMPROP_last
+} CONTROLVM_property;
+
+static const char *ControlVmTypeNames[] = { "controlvm", NULL };
+
+static char *ControlVmPropertyNames[CTLVMPROP_last + 1];
+static void
+InitControlVmProperties(void)
+{
+ char **p = ControlVmPropertyNames;
+ p[CTLVMPROP_invalid] = "";
+ p[CTLVMPROP_physAddr] = "physAddr";
+ p[CTLVMPROP_controlChannelAddr] = "controlChannelAddr";
+ p[CTLVMPROP_controlChannelBytes] = "controlChannelBytes";
+ p[CTLVMPROP_sparBootPart] = "spar_boot_part";
+ p[CTLVMPROP_sparStoragePart] = "spar_storage_part";
+ p[CTLVMPROP_livedumpLength] = "livedumpLength";
+ p[CTLVMPROP_livedumpCrc32] = "livedumpCrc32";
+ p[CTLVMPROP_last] = NULL;
+}
+
+static MYPROCOBJECT *ControlVmObject;
+static MYPROCTYPE *PartitionType;
+static MYPROCTYPE *ControlVmType;
+
+#define VISORCHIPSET_DIAG_PROC_ENTRY_FN "diagdump"
+static struct proc_dir_entry *diag_proc_dir;
+
+#define VISORCHIPSET_CHIPSET_PROC_ENTRY_FN "chipsetready"
+static struct proc_dir_entry *chipset_proc_dir;
+
+#define VISORCHIPSET_PARAHOTPLUG_PROC_ENTRY_FN "parahotplug"
+static struct proc_dir_entry *parahotplug_proc_dir;
+
+static LIST_HEAD(BusInfoList);
+static LIST_HEAD(DevInfoList);
+
+static struct proc_dir_entry *ProcDir;
+static VISORCHANNEL *ControlVm_channel;
+
+static ssize_t visorchipset_proc_read_writeonly(struct file *file,
+ char __user *buf,
+ size_t len, loff_t *offset);
+static ssize_t proc_read_installer(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static ssize_t proc_write_installer(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t proc_read_toolaction(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static ssize_t proc_write_toolaction(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t proc_read_bootToTool(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static ssize_t proc_write_bootToTool(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+static const struct file_operations proc_installer_fops = {
+ .read = proc_read_installer,
+ .write = proc_write_installer,
+};
+
+static const struct file_operations proc_toolaction_fops = {
+ .read = proc_read_toolaction,
+ .write = proc_write_toolaction,
+};
+
+static const struct file_operations proc_bootToTool_fops = {
+ .read = proc_read_bootToTool,
+ .write = proc_write_bootToTool,
+};
+
+typedef struct {
+ U8 __iomem *ptr; /* pointer to base address of payload pool */
+ U64 offset; /* offset from beginning of controlvm
+ * channel to beginning of payload * pool */
+ U32 bytes; /* number of bytes in payload pool */
+} CONTROLVM_PAYLOAD_INFO;
+
+/* Manages the request payload in the controlvm channel */
+static CONTROLVM_PAYLOAD_INFO ControlVm_payload_info;
+
+static pCHANNEL_HEADER Test_Vnic_channel;
+
+typedef struct {
+ CONTROLVM_MESSAGE_HEADER Dumpcapture_header;
+ CONTROLVM_MESSAGE_HEADER Gettextdump_header;
+ CONTROLVM_MESSAGE_HEADER Dumpcomplete_header;
+ BOOL Gettextdump_outstanding;
+ u32 crc32;
+ ulong length;
+ atomic_t buffers_in_use;
+ ulong destination;
+} LIVEDUMP_INFO;
+/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
+ * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
+ */
+static LIVEDUMP_INFO LiveDump_info;
+
+/* The following globals are used to handle the scenario where we are unable to
+ * offload the payload from a controlvm message due to memory requirements. In
+ * this scenario, we simply stash the controlvm message, then attempt to
+ * process it again the next time controlvm_periodic_work() runs.
+ */
+static CONTROLVM_MESSAGE ControlVm_Pending_Msg;
+static BOOL ControlVm_Pending_Msg_Valid = FALSE;
+
+/* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
+ * TRANSMIT_FILE PutFile payloads.
+ */
+static struct kmem_cache *Putfile_buffer_list_pool;
+static const char Putfile_buffer_list_pool_name[] =
+ "controlvm_putfile_buffer_list_pool";
+
+/* This identifies a data buffer that has been received via a controlvm messages
+ * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
+ */
+struct putfile_buffer_entry {
+ struct list_head next; /* putfile_buffer_entry list */
+ PARSER_CONTEXT *parser_ctx; /* points to buffer containing input data */
+};
+
+/* List of struct putfile_request *, via next_putfile_request member.
+ * Each entry in this list identifies an outstanding TRANSMIT_FILE
+ * conversation.
+ */
+static LIST_HEAD(Putfile_request_list);
+
+/* This describes a buffer and its current state of transfer (e.g., how many
+ * bytes have already been supplied as putfile data, and how many bytes are
+ * remaining) for a putfile_request.
+ */
+struct putfile_active_buffer {
+ /* a payload from a controlvm message, containing a file data buffer */
+ PARSER_CONTEXT *parser_ctx;
+ /* points within data area of parser_ctx to next byte of data */
+ u8 *pnext;
+ /* # bytes left from <pnext> to the end of this data buffer */
+ size_t bytes_remaining;
+};
+
+#define PUTFILE_REQUEST_SIG 0x0906101302281211
+/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
+ * conversation. Structs of this type are dynamically linked into
+ * <Putfile_request_list>.
+ */
+struct putfile_request {
+ u64 sig; /* PUTFILE_REQUEST_SIG */
+
+ /* header from original TransmitFile request */
+ CONTROLVM_MESSAGE_HEADER controlvm_header;
+ u64 file_request_number; /* from original TransmitFile request */
+
+ /* link to next struct putfile_request */
+ struct list_head next_putfile_request;
+
+ /* most-recent sequence number supplied via a controlvm message */
+ u64 data_sequence_number;
+
+ /* head of putfile_buffer_entry list, which describes the data to be
+ * supplied as putfile data;
+ * - this list is added to when controlvm messages come in that supply
+ * file data
+ * - this list is removed from via the hotplug program that is actually
+ * consuming these buffers to write as file data */
+ struct list_head input_buffer_list;
+ spinlock_t req_list_lock; /* lock for input_buffer_list */
+
+ /* waiters for input_buffer_list to go non-empty */
+ wait_queue_head_t input_buffer_wq;
+
+ /* data not yet read within current putfile_buffer_entry */
+ struct putfile_active_buffer active_buf;
+
+ /* <0 = failed, 0 = in-progress, >0 = successful; */
+ /* note that this must be set with req_list_lock, and if you set <0, */
+ /* it is your responsibility to also free up all of the other objects */
+ /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
+ /* before releasing the lock */
+ int completion_status;
+};
+
+static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
+
+struct parahotplug_request {
+ struct list_head list;
+ int id;
+ unsigned long expiration;
+ CONTROLVM_MESSAGE msg;
+};
+
+static LIST_HEAD(Parahotplug_request_list);
+static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
+static void parahotplug_process_list(void);
+
+/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
+ * CONTROLVM_REPORTEVENT.
+ */
+static VISORCHIPSET_BUSDEV_NOTIFIERS BusDev_Server_Notifiers;
+static VISORCHIPSET_BUSDEV_NOTIFIERS BusDev_Client_Notifiers;
+
+static void bus_create_response(ulong busNo, int response);
+static void bus_destroy_response(ulong busNo, int response);
+static void device_create_response(ulong busNo, ulong devNo, int response);
+static void device_destroy_response(ulong busNo, ulong devNo, int response);
+static void device_resume_response(ulong busNo, ulong devNo, int response);
+
+static VISORCHIPSET_BUSDEV_RESPONDERS BusDev_Responders = {
+ .bus_create = bus_create_response,
+ .bus_destroy = bus_destroy_response,
+ .device_create = device_create_response,
+ .device_destroy = device_destroy_response,
+ .device_pause = visorchipset_device_pause_response,
+ .device_resume = device_resume_response,
+};
+
+/* info for /dev/visorchipset */
+static dev_t MajorDev = -1; /**< indicates major num for device */
+
+/* /sys/devices/platform/visorchipset */
+static struct platform_device Visorchipset_platform_device = {
+ .name = "visorchipset",
+ .id = -1,
+};
+
+/* Function prototypes */
+static void controlvm_respond(CONTROLVM_MESSAGE_HEADER *msgHdr, int response);
+static void controlvm_respond_chipset_init(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ int response,
+ ULTRA_CHIPSET_FEATURE features);
+static void controlvm_respond_physdev_changestate(CONTROLVM_MESSAGE_HEADER *
+ msgHdr, int response,
+ ULTRA_SEGMENT_STATE state);
+
+static void
+show_partition_property(struct seq_file *f, void *ctx, int property)
+{
+ VISORCHIPSET_BUS_INFO *info = (VISORCHIPSET_BUS_INFO *) (ctx);
+
+ switch (property) {
+ case PARTPROP_name:
+ seq_printf(f, "%s\n", NONULLSTR(info->name));
+ break;
+ case PARTPROP_description:
+ seq_printf(f, "%s\n", NONULLSTR(info->description));
+ break;
+ case PARTPROP_handle:
+ seq_printf(f, "0x%-16.16Lx\n", info->partitionHandle);
+ break;
+ case PARTPROP_busNumber:
+ seq_printf(f, "%d\n", info->busNo);
+ break;
+ default:
+ seq_printf(f, "(%d??)\n", property);
+ break;
+ }
+}
+
+static void
+show_controlvm_property(struct seq_file *f, void *ctx, int property)
+{
+ /* Note: ctx is not needed since we only have 1 controlvm channel */
+ switch (property) {
+ case CTLVMPROP_physAddr:
+ if (ControlVm_channel == NULL)
+ seq_puts(f, "0x0\n");
+ else
+ seq_printf(f, "0x%-16.16Lx\n",
+ visorchannel_get_physaddr
+ (ControlVm_channel));
+ break;
+ case CTLVMPROP_controlChannelAddr:
+ if (ControlVm_channel == NULL)
+ seq_puts(f, "0x0\n");
+ else {
+ GUEST_PHYSICAL_ADDRESS addr = 0;
+ visorchannel_read(ControlVm_channel,
+ offsetof
+ (ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ gpControlChannel), &addr,
+ sizeof(addr));
+ seq_printf(f, "0x%-16.16Lx\n", (u64) (addr));
+ }
+ break;
+ case CTLVMPROP_controlChannelBytes:
+ if (ControlVm_channel == NULL)
+ seq_puts(f, "0x0\n");
+ else {
+ U32 bytes = 0;
+ visorchannel_read(ControlVm_channel,
+ offsetof
+ (ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ ControlChannelBytes), &bytes,
+ sizeof(bytes));
+ seq_printf(f, "%lu\n", (ulong) (bytes));
+ }
+ break;
+ case CTLVMPROP_sparBootPart:
+ seq_puts(f, "0:0:0:0/1\n");
+ break;
+ case CTLVMPROP_sparStoragePart:
+ seq_puts(f, "0:0:0:0/2\n");
+ break;
+ case CTLVMPROP_livedumpLength:
+ seq_printf(f, "%lu\n", LiveDump_info.length);
+ break;
+ case CTLVMPROP_livedumpCrc32:
+ seq_printf(f, "%lu\n", (ulong) LiveDump_info.crc32);
+ break;
+ default:
+ seq_printf(f, "(%d??)\n", property);
+ break;
+ }
+}
+
+static void
+proc_Init(void)
+{
+ if (ProcDir == NULL) {
+ ProcDir = proc_mkdir(MYDRVNAME, NULL);
+ if (ProcDir == NULL) {
+ LOGERR("failed to create /proc directory %s",
+ MYDRVNAME);
+ POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ }
+ }
+}
+
+static void
+proc_DeInit(void)
+{
+ if (ProcDir != NULL)
+ remove_proc_entry(MYDRVNAME, NULL);
+ ProcDir = NULL;
+}
+
+#if 0
+static void
+testUnicode(void)
+{
+ wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
+ char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
+ wchar_t unicode2[99];
+
+ /* NOTE: Either due to a bug, or feature I don't understand, the
+ * kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
+ * trailed NUL byte!! REALLY!!!!! Arrrrgggghhhhh
+ */
+
+ LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
+ LOGINF("utf8_wcstombs=%d",
+ chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
+ if (chrs >= 0)
+ s[chrs] = '\0'; /* GRRRRRRRR */
+ LOGINF("s='%s'", s);
+ LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
+ if (chrs >= 0)
+ unicode2[chrs] = 0; /* GRRRRRRRR */
+ if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
+ LOGINF("strings match... good");
+ else
+ LOGINF("strings did not match!!");
+}
+#endif
+
+static void
+busInfo_clear(void *v)
+{
+ VISORCHIPSET_BUS_INFO *p = (VISORCHIPSET_BUS_INFO *) (v);
+
+ if (p->procObject) {
+ visor_proc_DestroyObject(p->procObject);
+ p->procObject = NULL;
+ }
+ kfree(p->name);
+ p->name = NULL;
+
+ kfree(p->description);
+ p->description = NULL;
+
+ p->state.created = 0;
+ memset(p, 0, sizeof(VISORCHIPSET_BUS_INFO));
+}
+
+static void
+devInfo_clear(void *v)
+{
+ VISORCHIPSET_DEVICE_INFO *p = (VISORCHIPSET_DEVICE_INFO *) (v);
+ p->state.created = 0;
+ memset(p, 0, sizeof(VISORCHIPSET_DEVICE_INFO));
+}
+
+static U8
+check_chipset_events(void)
+{
+ int i;
+ U8 send_msg = 1;
+ /* Check events to determine if response should be sent */
+ for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
+ send_msg &= chipset_events[i];
+ return send_msg;
+}
+
+static void
+clear_chipset_events(void)
+{
+ int i;
+ /* Clear chipset_events */
+ for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
+ chipset_events[i] = 0;
+}
+
+void
+visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
+ VISORCHIPSET_BUSDEV_RESPONDERS *responders,
+ ULTRA_VBUS_DEVICEINFO *driverInfo)
+{
+ LOCKSEM_UNINTERRUPTIBLE(&NotifierLock);
+ if (notifiers == NULL) {
+ memset(&BusDev_Server_Notifiers, 0,
+ sizeof(BusDev_Server_Notifiers));
+ serverregistered = 0; /* clear flag */
+ } else {
+ BusDev_Server_Notifiers = *notifiers;
+ serverregistered = 1; /* set flag */
+ }
+ if (responders)
+ *responders = BusDev_Responders;
+ if (driverInfo)
+ BusDeviceInfo_Init(driverInfo, "chipset", "visorchipset",
+ VERSION, NULL, __DATE__, __TIME__);
+
+ UNLOCKSEM(&NotifierLock);
+}
+EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
+
+void
+visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
+ VISORCHIPSET_BUSDEV_RESPONDERS *responders,
+ ULTRA_VBUS_DEVICEINFO *driverInfo)
+{
+ LOCKSEM_UNINTERRUPTIBLE(&NotifierLock);
+ if (notifiers == NULL) {
+ memset(&BusDev_Client_Notifiers, 0,
+ sizeof(BusDev_Client_Notifiers));
+ clientregistered = 0; /* clear flag */
+ } else {
+ BusDev_Client_Notifiers = *notifiers;
+ clientregistered = 1; /* set flag */
+ }
+ if (responders)
+ *responders = BusDev_Responders;
+ if (driverInfo)
+ BusDeviceInfo_Init(driverInfo, "chipset(bolts)", "visorchipset",
+ VERSION, NULL, __DATE__, __TIME__);
+ UNLOCKSEM(&NotifierLock);
+}
+EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
+
+static void
+cleanup_controlvm_structures(void)
+{
+ VISORCHIPSET_BUS_INFO *bi;
+ VISORCHIPSET_DEVICE_INFO *di;
+
+ list_for_each_entry(bi, &BusInfoList, entry) {
+ busInfo_clear(bi);
+ list_del(&bi->entry);
+ kfree(bi);
+ }
+
+ list_for_each_entry(di, &DevInfoList, entry) {
+ devInfo_clear(di);
+ list_del(&di->entry);
+ kfree(di);
+ }
+}
+
+static void
+chipset_init(CONTROLVM_MESSAGE *inmsg)
+{
+ static int chipset_inited;
+ ULTRA_CHIPSET_FEATURE features = 0;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+ if (chipset_inited) {
+ LOGERR("CONTROLVM_CHIPSET_INIT Failed: Already Done.");
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto Away;
+ }
+ chipset_inited = 1;
+ POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
+
+ /* Set features to indicate we support parahotplug (if Command
+ * also supports it). */
+ features =
+ inmsg->cmd.initChipset.
+ features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
+
+ /* Set the "reply" bit so Command knows this is a
+ * features-aware driver. */
+ features |= ULTRA_CHIPSET_FEATURE_REPLY;
+
+Away:
+ if (rc < 0)
+ cleanup_controlvm_structures();
+ if (inmsg->hdr.Flags.responseExpected)
+ controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
+}
+
+static void
+controlvm_init_response(CONTROLVM_MESSAGE *msg,
+ CONTROLVM_MESSAGE_HEADER *msgHdr, int response)
+{
+ memset(msg, 0, sizeof(CONTROLVM_MESSAGE));
+ memcpy(&msg->hdr, msgHdr, sizeof(CONTROLVM_MESSAGE_HEADER));
+ msg->hdr.PayloadBytes = 0;
+ msg->hdr.PayloadVmOffset = 0;
+ msg->hdr.PayloadMaxBytes = 0;
+ if (response < 0) {
+ msg->hdr.Flags.failed = 1;
+ msg->hdr.CompletionStatus = (U32) (-response);
+ }
+}
+
+static void
+controlvm_respond(CONTROLVM_MESSAGE_HEADER *msgHdr, int response)
+{
+ CONTROLVM_MESSAGE outmsg;
+ if (!ControlVm_channel)
+ return;
+ controlvm_init_response(&outmsg, msgHdr, response);
+ /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
+ * back the deviceChangeState structure in the packet. */
+ if (msgHdr->Id == CONTROLVM_DEVICE_CHANGESTATE
+ && g_DeviceChangeStatePacket.deviceChangeState.busNo ==
+ g_diagpoolBusNo
+ && g_DeviceChangeStatePacket.deviceChangeState.devNo ==
+ g_diagpoolDevNo)
+ outmsg.cmd = g_DeviceChangeStatePacket;
+ if (outmsg.hdr.Flags.testMessage == 1) {
+ LOGINF("%s controlvm_msg=0x%x response=%d for test message",
+ __func__, outmsg.hdr.Id, response);
+ return;
+ }
+ if (!visorchannel_signalinsert(ControlVm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ LOGERR("signalinsert failed!");
+ return;
+ }
+}
+
+static void
+controlvm_respond_chipset_init(CONTROLVM_MESSAGE_HEADER *msgHdr, int response,
+ ULTRA_CHIPSET_FEATURE features)
+{
+ CONTROLVM_MESSAGE outmsg;
+ if (!ControlVm_channel)
+ return;
+ controlvm_init_response(&outmsg, msgHdr, response);
+ outmsg.cmd.initChipset.features = features;
+ if (!visorchannel_signalinsert(ControlVm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ LOGERR("signalinsert failed!");
+ return;
+ }
+}
+
+static void
+controlvm_respond_physdev_changestate(CONTROLVM_MESSAGE_HEADER *msgHdr,
+ int response, ULTRA_SEGMENT_STATE state)
+{
+ CONTROLVM_MESSAGE outmsg;
+ if (!ControlVm_channel)
+ return;
+ controlvm_init_response(&outmsg, msgHdr, response);
+ outmsg.cmd.deviceChangeState.state = state;
+ outmsg.cmd.deviceChangeState.flags.physicalDevice = 1;
+ if (!visorchannel_signalinsert(ControlVm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ LOGERR("signalinsert failed!");
+ return;
+ }
+}
+
+void
+visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
+{
+ U32 localSavedCrashMsgOffset;
+ U16 localSavedCrashMsgCount;
+
+ /* get saved message count */
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ SavedCrashMsgCount),
+ &localSavedCrashMsgCount, sizeof(U16)) < 0) {
+ LOGERR("failed to get Saved Message Count");
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
+ LOGERR("Saved Message Count incorrect %d",
+ localSavedCrashMsgCount);
+ POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
+ localSavedCrashMsgCount,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* get saved crash message offset */
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ SavedCrashMsgOffset),
+ &localSavedCrashMsgOffset, sizeof(U32)) < 0) {
+ LOGERR("failed to get Saved Message Offset");
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ if (type == CRASH_bus) {
+ if (visorchannel_write(ControlVm_channel,
+ localSavedCrashMsgOffset,
+ msg, sizeof(CONTROLVM_MESSAGE)) < 0) {
+ LOGERR("SAVE_MSG_BUS_FAILURE: Failed to write CrashCreateBusMsg!");
+ POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ } else {
+ if (visorchannel_write(ControlVm_channel,
+ localSavedCrashMsgOffset +
+ sizeof(CONTROLVM_MESSAGE), msg,
+ sizeof(CONTROLVM_MESSAGE)) < 0) {
+ LOGERR("SAVE_MSG_DEV_FAILURE: Failed to write CrashCreateDevMsg!");
+ POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(visorchipset_save_message);
+
+static void
+bus_responder(CONTROLVM_ID cmdId, ulong busNo, int response)
+{
+ VISORCHIPSET_BUS_INFO *p = NULL;
+ BOOL need_clear = FALSE;
+
+ p = findbus(&BusInfoList, busNo);
+ if (!p) {
+ LOGERR("internal error busNo=%lu", busNo);
+ return;
+ }
+ if (response < 0) {
+ if ((cmdId == CONTROLVM_BUS_CREATE) &&
+ (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
+ /* undo the row we just created... */
+ delbusdevices(&DevInfoList, busNo);
+ } else {
+ if (cmdId == CONTROLVM_BUS_CREATE)
+ p->state.created = 1;
+ if (cmdId == CONTROLVM_BUS_DESTROY)
+ need_clear = TRUE;
+ }
+
+ if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ LOGERR("bus_responder no pending msg");
+ return; /* no controlvm response needed */
+ }
+ if (p->pendingMsgHdr.Id != (U32) cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ return;
+ }
+ controlvm_respond(&p->pendingMsgHdr, response);
+ p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ if (need_clear) {
+ busInfo_clear(p);
+ delbusdevices(&DevInfoList, busNo);
+ }
+}
+
+static void
+device_changestate_responder(CONTROLVM_ID cmdId,
+ ulong busNo, ulong devNo, int response,
+ ULTRA_SEGMENT_STATE responseState)
+{
+ VISORCHIPSET_DEVICE_INFO *p = NULL;
+ CONTROLVM_MESSAGE outmsg;
+
+ if (!ControlVm_channel)
+ return;
+
+ p = finddevice(&DevInfoList, busNo, devNo);
+ if (!p) {
+ LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
+ return;
+ }
+ if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ LOGERR("device_responder no pending msg");
+ return; /* no controlvm response needed */
+ }
+ if (p->pendingMsgHdr.Id != cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ return;
+ }
+
+ controlvm_init_response(&outmsg, &p->pendingMsgHdr, response);
+
+ outmsg.cmd.deviceChangeState.busNo = busNo;
+ outmsg.cmd.deviceChangeState.devNo = devNo;
+ outmsg.cmd.deviceChangeState.state = responseState;
+
+ if (!visorchannel_signalinsert(ControlVm_channel,
+ CONTROLVM_QUEUE_REQUEST, &outmsg)) {
+ LOGERR("signalinsert failed!");
+ return;
+ }
+
+ p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+}
+
+static void
+device_responder(CONTROLVM_ID cmdId, ulong busNo, ulong devNo, int response)
+{
+ VISORCHIPSET_DEVICE_INFO *p = NULL;
+ BOOL need_clear = FALSE;
+
+ p = finddevice(&DevInfoList, busNo, devNo);
+ if (!p) {
+ LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
+ return;
+ }
+ if (response >= 0) {
+ if (cmdId == CONTROLVM_DEVICE_CREATE)
+ p->state.created = 1;
+ if (cmdId == CONTROLVM_DEVICE_DESTROY)
+ need_clear = TRUE;
+ }
+
+ if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ LOGERR("device_responder no pending msg");
+ return; /* no controlvm response needed */
+ }
+ if (p->pendingMsgHdr.Id != (U32) cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ return;
+ }
+ controlvm_respond(&p->pendingMsgHdr, response);
+ p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ if (need_clear)
+ devInfo_clear(p);
+}
+
+static void
+bus_epilog(U32 busNo,
+ U32 cmd, CONTROLVM_MESSAGE_HEADER *msgHdr,
+ int response, BOOL needResponse)
+{
+ BOOL notified = FALSE;
+
+ VISORCHIPSET_BUS_INFO *pBusInfo = findbus(&BusInfoList, busNo);
+
+ if (!pBusInfo) {
+ LOGERR("HUH? bad busNo=%d", busNo);
+ return;
+ }
+ if (needResponse) {
+ memcpy(&pBusInfo->pendingMsgHdr, msgHdr,
+ sizeof(CONTROLVM_MESSAGE_HEADER));
+ } else
+ pBusInfo->pendingMsgHdr.Id = CONTROLVM_INVALID;
+
+ LOCKSEM_UNINTERRUPTIBLE(&NotifierLock);
+ if (response == CONTROLVM_RESP_SUCCESS) {
+ switch (cmd) {
+ case CONTROLVM_BUS_CREATE:
+ /* We can't tell from the bus_create
+ * information which of our 2 bus flavors the
+ * devices on this bus will ultimately end up.
+ * FORTUNATELY, it turns out it is harmless to
+ * send the bus_create to both of them. We can
+ * narrow things down a little bit, though,
+ * because we know: - BusDev_Server can handle
+ * either server or client devices
+ * - BusDev_Client can handle ONLY client
+ * devices */
+ if (BusDev_Server_Notifiers.bus_create) {
+ (*BusDev_Server_Notifiers.bus_create) (busNo);
+ notified = TRUE;
+ }
+ if ((!pBusInfo->flags.server) /*client */ &&
+ BusDev_Client_Notifiers.bus_create) {
+ (*BusDev_Client_Notifiers.bus_create) (busNo);
+ notified = TRUE;
+ }
+ break;
+ case CONTROLVM_BUS_DESTROY:
+ if (BusDev_Server_Notifiers.bus_destroy) {
+ (*BusDev_Server_Notifiers.bus_destroy) (busNo);
+ notified = TRUE;
+ }
+ if ((!pBusInfo->flags.server) /*client */ &&
+ BusDev_Client_Notifiers.bus_destroy) {
+ (*BusDev_Client_Notifiers.bus_destroy) (busNo);
+ notified = TRUE;
+ }
+ break;
+ }
+ }
+ if (notified)
+ /* The callback function just called above is responsible
+ * for calling the appropriate VISORCHIPSET_BUSDEV_RESPONDERS
+ * function, which will call bus_responder()
+ */
+ ;
+ else
+ bus_responder(cmd, busNo, response);
+ UNLOCKSEM(&NotifierLock);
+}
+
+static void
+device_epilog(U32 busNo, U32 devNo, ULTRA_SEGMENT_STATE state, U32 cmd,
+ CONTROLVM_MESSAGE_HEADER *msgHdr, int response,
+ BOOL needResponse, BOOL for_visorbus)
+{
+ VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers = NULL;
+ BOOL notified = FALSE;
+
+ VISORCHIPSET_DEVICE_INFO *pDevInfo =
+ finddevice(&DevInfoList, busNo, devNo);
+ char *envp[] = {
+ "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
+ NULL
+ };
+
+ if (!pDevInfo) {
+ LOGERR("HUH? bad busNo=%d, devNo=%d", busNo, devNo);
+ return;
+ }
+ if (for_visorbus)
+ notifiers = &BusDev_Server_Notifiers;
+ else
+ notifiers = &BusDev_Client_Notifiers;
+ if (needResponse) {
+ memcpy(&pDevInfo->pendingMsgHdr, msgHdr,
+ sizeof(CONTROLVM_MESSAGE_HEADER));
+ } else
+ pDevInfo->pendingMsgHdr.Id = CONTROLVM_INVALID;
+
+ LOCKSEM_UNINTERRUPTIBLE(&NotifierLock);
+ if (response >= 0) {
+ switch (cmd) {
+ case CONTROLVM_DEVICE_CREATE:
+ if (notifiers->device_create) {
+ (*notifiers->device_create) (busNo, devNo);
+ notified = TRUE;
+ }
+ break;
+ case CONTROLVM_DEVICE_CHANGESTATE:
+ /* ServerReady / ServerRunning / SegmentStateRunning */
+ if (state.Alive == SegmentStateRunning.Alive &&
+ state.Operating == SegmentStateRunning.Operating) {
+ if (notifiers->device_resume) {
+ (*notifiers->device_resume) (busNo,
+ devNo);
+ notified = TRUE;
+ }
+ }
+ /* ServerNotReady / ServerLost / SegmentStateStandby */
+ else if (state.Alive == SegmentStateStandby.Alive &&
+ state.Operating ==
+ SegmentStateStandby.Operating) {
+ /* technically this is standby case
+ * where server is lost
+ */
+ if (notifiers->device_pause) {
+ (*notifiers->device_pause) (busNo,
+ devNo);
+ notified = TRUE;
+ }
+ } else if (state.Alive == SegmentStatePaused.Alive &&
+ state.Operating ==
+ SegmentStatePaused.Operating) {
+ /* this is lite pause where channel is
+ * still valid just 'pause' of it
+ */
+ if (busNo == g_diagpoolBusNo
+ && devNo == g_diagpoolDevNo) {
+ LOGINF("DEVICE_CHANGESTATE(DiagpoolChannel busNo=%d devNo=%d is pausing...)",
+ busNo, devNo);
+ /* this will trigger the
+ * diag_shutdown.sh script in
+ * the visorchipset hotplug */
+ kobject_uevent_env
+ (&Visorchipset_platform_device.dev.
+ kobj, KOBJ_ONLINE, envp);
+ }
+ }
+ break;
+ case CONTROLVM_DEVICE_DESTROY:
+ if (notifiers->device_destroy) {
+ (*notifiers->device_destroy) (busNo, devNo);
+ notified = TRUE;
+ }
+ break;
+ }
+ }
+ if (notified)
+ /* The callback function just called above is responsible
+ * for calling the appropriate VISORCHIPSET_BUSDEV_RESPONDERS
+ * function, which will call device_responder()
+ */
+ ;
+ else
+ device_responder(cmd, busNo, devNo, response);
+ UNLOCKSEM(&NotifierLock);
+}
+
+static void
+bus_create(CONTROLVM_MESSAGE *inmsg)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->createBus.busNo;
+ int rc = CONTROLVM_RESP_SUCCESS;
+ VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+
+
+ pBusInfo = findbus(&BusInfoList, busNo);
+ if (pBusInfo && (pBusInfo->state.created == 1)) {
+ LOGERR("CONTROLVM_BUS_CREATE Failed: bus %lu already exists",
+ busNo);
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto Away;
+ }
+ pBusInfo = kzalloc(sizeof(VISORCHIPSET_BUS_INFO), GFP_KERNEL);
+ if (pBusInfo == NULL) {
+ LOGERR("CONTROLVM_BUS_CREATE Failed: bus %lu kzalloc failed",
+ busNo);
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto Away;
+ }
+
+ INIT_LIST_HEAD(&pBusInfo->entry);
+ pBusInfo->busNo = busNo;
+ pBusInfo->devNo = cmd->createBus.deviceCount;
+
+ POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
+
+ if (inmsg->hdr.Flags.testMessage == 1)
+ pBusInfo->chanInfo.addrType = ADDRTYPE_localTest;
+ else
+ pBusInfo->chanInfo.addrType = ADDRTYPE_localPhysical;
+
+ pBusInfo->flags.server = inmsg->hdr.Flags.server;
+ pBusInfo->chanInfo.channelAddr = cmd->createBus.channelAddr;
+ pBusInfo->chanInfo.nChannelBytes = cmd->createBus.channelBytes;
+ pBusInfo->chanInfo.channelTypeGuid = cmd->createBus.busDataTypeGuid;
+ pBusInfo->chanInfo.channelInstGuid = cmd->createBus.busInstGuid;
+
+ list_add(&pBusInfo->entry, &BusInfoList);
+
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
+
+Away:
+ bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
+ rc, inmsg->hdr.Flags.responseExpected == 1);
+}
+
+static void
+bus_destroy(CONTROLVM_MESSAGE *inmsg)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->destroyBus.busNo;
+ VISORCHIPSET_BUS_INFO *pBusInfo;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ pBusInfo = findbus(&BusInfoList, busNo);
+ if (!pBusInfo) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: bus %lu invalid", busNo);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto Away;
+ }
+ if (pBusInfo->state.created == 0) {
+ LOGERR("CONTROLVM_BUS_DESTROY Failed: bus %lu already destroyed",
+ busNo);
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto Away;
+ }
+
+Away:
+ bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
+ rc, inmsg->hdr.Flags.responseExpected == 1);
+}
+
+static void
+bus_configure(CONTROLVM_MESSAGE *inmsg, PARSER_CONTEXT *parser_ctx)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->configureBus.busNo;
+ VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+ int rc = CONTROLVM_RESP_SUCCESS;
+ char s[99];
+
+ busNo = cmd->configureBus.busNo;
+ POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
+
+ pBusInfo = findbus(&BusInfoList, busNo);
+ if (!pBusInfo) {
+ LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu invalid",
+ busNo);
+ POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto Away;
+ }
+ if (pBusInfo->state.created == 0) {
+ LOGERR("CONTROLVM_BUS_CONFIGURE Failed: Invalid bus %lu - not created yet",
+ busNo);
+ POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto Away;
+ }
+ /* TBD - add this check to other commands also... */
+ if (pBusInfo->pendingMsgHdr.Id != CONTROLVM_INVALID) {
+ LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu MsgId=%u outstanding",
+ busNo, (uint) pBusInfo->pendingMsgHdr.Id);
+ POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+ goto Away;
+ }
+
+ pBusInfo->partitionHandle = cmd->configureBus.guestHandle;
+ pBusInfo->partitionGuid = parser_id_get(parser_ctx);
+ parser_param_start(parser_ctx, PARSERSTRING_NAME);
+ pBusInfo->name = parser_string_get(parser_ctx);
+
+ visorchannel_GUID_id(&pBusInfo->partitionGuid, s);
+ pBusInfo->procObject =
+ visor_proc_CreateObject(PartitionType, s, (void *) (pBusInfo));
+ if (pBusInfo->procObject == NULL) {
+ LOGERR("CONTROLVM_BUS_CONFIGURE Failed: busNo=%lu failed to create /proc entry",
+ busNo);
+ POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto Away;
+ }
+ POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
+Away:
+ bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
+ rc, inmsg->hdr.Flags.responseExpected == 1);
+}
+
+static void
+my_device_create(CONTROLVM_MESSAGE *inmsg)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->createDevice.busNo;
+ ulong devNo = cmd->createDevice.devNo;
+ VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
+ VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ pDevInfo = finddevice(&DevInfoList, busNo, devNo);
+ if (pDevInfo && (pDevInfo->state.created == 1)) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: busNo=%lu, devNo=%lu already exists",
+ busNo, devNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ goto Away;
+ }
+ pBusInfo = findbus(&BusInfoList, busNo);
+ if (!pBusInfo) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: Invalid bus %lu - out of range",
+ busNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto Away;
+ }
+ if (pBusInfo->state.created == 0) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: Invalid bus %lu - not created yet",
+ busNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+ goto Away;
+ }
+ pDevInfo = kzalloc(sizeof(VISORCHIPSET_DEVICE_INFO), GFP_KERNEL);
+ if (pDevInfo == NULL) {
+ LOGERR("CONTROLVM_DEVICE_CREATE Failed: busNo=%lu, devNo=%lu kmaloc failed",
+ busNo, devNo);
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ goto Away;
+ }
+
+ INIT_LIST_HEAD(&pDevInfo->entry);
+ pDevInfo->busNo = busNo;
+ pDevInfo->devNo = devNo;
+ pDevInfo->devInstGuid = cmd->createDevice.devInstGuid;
+ POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+
+ if (inmsg->hdr.Flags.testMessage == 1)
+ pDevInfo->chanInfo.addrType = ADDRTYPE_localTest;
+ else
+ pDevInfo->chanInfo.addrType = ADDRTYPE_localPhysical;
+ pDevInfo->chanInfo.channelAddr = cmd->createDevice.channelAddr;
+ pDevInfo->chanInfo.nChannelBytes = cmd->createDevice.channelBytes;
+ pDevInfo->chanInfo.channelTypeGuid = cmd->createDevice.dataTypeGuid;
+ pDevInfo->chanInfo.intr = cmd->createDevice.intr;
+ list_add(&pDevInfo->entry, &DevInfoList);
+ POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
+ POSTCODE_SEVERITY_INFO);
+Away:
+ /* get the bus and devNo for DiagPool channel */
+ if (is_diagpool_channel(pDevInfo->chanInfo.channelTypeGuid)) {
+ g_diagpoolBusNo = busNo;
+ g_diagpoolDevNo = devNo;
+ LOGINF("CONTROLVM_DEVICE_CREATE for DiagPool channel: busNo=%lu, devNo=%lu",
+ g_diagpoolBusNo, g_diagpoolDevNo);
+ }
+ device_epilog(busNo, devNo, SegmentStateRunning,
+ CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
+ inmsg->hdr.Flags.responseExpected == 1,
+ FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+}
+
+static void
+my_device_changestate(CONTROLVM_MESSAGE *inmsg)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->deviceChangeState.busNo;
+ ulong devNo = cmd->deviceChangeState.devNo;
+ ULTRA_SEGMENT_STATE state = cmd->deviceChangeState.state;
+ VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ pDevInfo = finddevice(&DevInfoList, busNo, devNo);
+ if (!pDevInfo) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: busNo=%lu, devNo=%lu invalid (doesn't exist)",
+ busNo, devNo);
+ POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ goto Away;
+ }
+ if (pDevInfo->state.created == 0) {
+ LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: busNo=%lu, devNo=%lu invalid (not created)",
+ busNo, devNo);
+ POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_SEVERITY_ERR);
+ rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ }
+Away:
+ if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
+ device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
+ &inmsg->hdr, rc,
+ inmsg->hdr.Flags.responseExpected == 1,
+ FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+}
+
+static void
+my_device_destroy(CONTROLVM_MESSAGE *inmsg)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
+ ulong busNo = cmd->destroyDevice.busNo;
+ ulong devNo = cmd->destroyDevice.devNo;
+ VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ pDevInfo = finddevice(&DevInfoList, busNo, devNo);
+ if (!pDevInfo) {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: busNo=%lu, devNo=%lu invalid",
+ busNo, devNo);
+ rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ goto Away;
+ }
+ if (pDevInfo->state.created == 0) {
+ LOGERR("CONTROLVM_DEVICE_DESTROY Failed: busNo=%lu, devNo=%lu already destroyed",
+ busNo, devNo);
+ rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+
+Away:
+ if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
+ device_epilog(busNo, devNo, SegmentStateRunning,
+ CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
+ inmsg->hdr.Flags.responseExpected == 1,
+ FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+}
+
+/* When provided with the physical address of the controlvm channel
+ * (phys_addr), the offset to the payload area we need to manage
+ * (offset), and the size of this payload area (bytes), fills in the
+ * CONTROLVM_PAYLOAD_INFO struct. Returns TRUE for success or FALSE
+ * for failure.
+ */
+static int
+initialize_controlvm_payload_info(HOSTADDRESS phys_addr, U64 offset, U32 bytes,
+ CONTROLVM_PAYLOAD_INFO *info)
+{
+ U8 __iomem *payload = NULL;
+ int rc = CONTROLVM_RESP_SUCCESS;
+
+ if (info == NULL) {
+ LOGERR("HUH ? CONTROLVM_PAYLOAD_INIT Failed : Programmer check at %s:%d",
+ __FILE__, __LINE__);
+ rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+ goto Away;
+ }
+ memset(info, 0, sizeof(CONTROLVM_PAYLOAD_INFO));
+ if ((offset == 0) || (bytes == 0)) {
+ LOGERR("CONTROLVM_PAYLOAD_INIT Failed: RequestPayloadOffset=%llu RequestPayloadBytes=%llu!",
+ (u64) offset, (u64) bytes);
+ rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+ goto Away;
+ }
+ payload = ioremap_cache(phys_addr + offset, bytes);
+ if (payload == NULL) {
+ LOGERR("CONTROLVM_PAYLOAD_INIT Failed: ioremap_cache %llu for %llu bytes failed",
+ (u64) offset, (u64) bytes);
+ rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
+ goto Away;
+ }
+
+ info->offset = offset;
+ info->bytes = bytes;
+ info->ptr = payload;
+ LOGINF("offset=%llu, bytes=%lu, ptr=%p",
+ (u64) (info->offset), (ulong) (info->bytes), info->ptr);
+
+Away:
+ if (rc < 0) {
+ if (payload != NULL) {
+ iounmap(payload);
+ payload = NULL;
+ }
+ }
+ return rc;
+}
+
+static void
+destroy_controlvm_payload_info(CONTROLVM_PAYLOAD_INFO *info)
+{
+ if (info->ptr != NULL) {
+ iounmap(info->ptr);
+ info->ptr = NULL;
+ }
+ memset(info, 0, sizeof(CONTROLVM_PAYLOAD_INFO));
+}
+
+static void
+initialize_controlvm_payload(void)
+{
+ HOSTADDRESS phys_addr = visorchannel_get_physaddr(ControlVm_channel);
+ U64 payloadOffset = 0;
+ U32 payloadBytes = 0;
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ RequestPayloadOffset),
+ &payloadOffset, sizeof(payloadOffset)) < 0) {
+ LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
+ POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ RequestPayloadBytes),
+ &payloadBytes, sizeof(payloadBytes)) < 0) {
+ LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
+ POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ initialize_controlvm_payload_info(phys_addr,
+ payloadOffset, payloadBytes,
+ &ControlVm_payload_info);
+}
+
+/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
+ * Returns CONTROLVM_RESP_xxx code.
+ */
+int
+visorchipset_chipset_ready(void)
+{
+ kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
+ return CONTROLVM_RESP_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
+
+int
+visorchipset_chipset_selftest(void)
+{
+ char env_selftest[20];
+ char *envp[] = { env_selftest, NULL };
+ sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
+ kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
+ envp);
+ return CONTROLVM_RESP_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
+
+/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
+ * Returns CONTROLVM_RESP_xxx code.
+ */
+int
+visorchipset_chipset_notready(void)
+{
+ kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
+ return CONTROLVM_RESP_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
+
+static void
+chipset_ready(CONTROLVM_MESSAGE_HEADER *msgHdr)
+{
+ int rc = visorchipset_chipset_ready();
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msgHdr->Flags.responseExpected && !visorchipset_holdchipsetready)
+ controlvm_respond(msgHdr, rc);
+ if (msgHdr->Flags.responseExpected && visorchipset_holdchipsetready) {
+ /* Send CHIPSET_READY response when all modules have been loaded
+ * and disks mounted for the partition
+ */
+ g_ChipSetMsgHdr = *msgHdr;
+ LOGINF("Holding CHIPSET_READY response");
+ }
+}
+
+static void
+chipset_selftest(CONTROLVM_MESSAGE_HEADER *msgHdr)
+{
+ int rc = visorchipset_chipset_selftest();
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msgHdr->Flags.responseExpected)
+ controlvm_respond(msgHdr, rc);
+}
+
+static void
+chipset_notready(CONTROLVM_MESSAGE_HEADER *msgHdr)
+{
+ int rc = visorchipset_chipset_notready();
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ rc = -rc;
+ if (msgHdr->Flags.responseExpected)
+ controlvm_respond(msgHdr, rc);
+}
+
+/* This is your "one-stop" shop for grabbing the next message from the
+ * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
+ */
+static BOOL
+read_controlvm_event(CONTROLVM_MESSAGE *msg)
+{
+ if (visorchannel_signalremove(ControlVm_channel,
+ CONTROLVM_QUEUE_EVENT, msg)) {
+ /* got a message */
+ if (msg->hdr.Flags.testMessage == 1) {
+ LOGERR("ignoring bad CONTROLVM_QUEUE_EVENT msg with controlvm_msg_id=0x%x because Flags.testMessage is nonsensical (=1)", msg->hdr.Id);
+ return FALSE;
+ } else
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/*
+ * The general parahotplug flow works as follows. The visorchipset
+ * driver receives a DEVICE_CHANGESTATE message from Command
+ * specifying a physical device to enable or disable. The CONTROLVM
+ * message handler calls parahotplug_process_message, which then adds
+ * the message to a global list and kicks off a udev event which
+ * causes a user level script to enable or disable the specified
+ * device. The udev script then writes to
+ * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
+ * to get called, at which point the appropriate CONTROLVM message is
+ * retrieved from the list and responded to.
+ */
+
+#define PARAHOTPLUG_TIMEOUT_MS 2000
+
+/*
+ * Generate unique int to match an outstanding CONTROLVM message with a
+ * udev script /proc response
+ */
+static int
+parahotplug_next_id(void)
+{
+ static atomic_t id = ATOMIC_INIT(0);
+ return atomic_inc_return(&id);
+}
+
+/*
+ * Returns the time (in jiffies) when a CONTROLVM message on the list
+ * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
+ */
+static unsigned long
+parahotplug_next_expiration(void)
+{
+ return jiffies + PARAHOTPLUG_TIMEOUT_MS * HZ / 1000;
+}
+
+/*
+ * Create a parahotplug_request, which is basically a wrapper for a
+ * CONTROLVM_MESSAGE that we can stick on a list
+ */
+static struct parahotplug_request *
+parahotplug_request_create(CONTROLVM_MESSAGE *msg)
+{
+ struct parahotplug_request *req =
+ kmalloc(sizeof(struct parahotplug_request),
+ GFP_KERNEL|__GFP_NORETRY);
+ if (req == NULL)
+ return NULL;
+
+ req->id = parahotplug_next_id();
+ req->expiration = parahotplug_next_expiration();
+ req->msg = *msg;
+
+ return req;
+}
+
+/*
+ * Free a parahotplug_request.
+ */
+static void
+parahotplug_request_destroy(struct parahotplug_request *req)
+{
+ kfree(req);
+}
+
+/*
+ * Cause uevent to run the user level script to do the disable/enable
+ * specified in (the CONTROLVM message in) the specified
+ * parahotplug_request
+ */
+static void
+parahotplug_request_kickoff(struct parahotplug_request *req)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &req->msg.cmd;
+ char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
+ env_func[40];
+ char *envp[] = {
+ env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
+ };
+
+ sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
+ sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
+ sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
+ cmd->deviceChangeState.state.Active);
+ sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
+ cmd->deviceChangeState.busNo);
+ sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
+ cmd->deviceChangeState.devNo >> 3);
+ sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
+ cmd->deviceChangeState.devNo & 0x7);
+
+ LOGINF("parahotplug_request_kickoff: state=%d, bdf=%d/%d/%d, id=%u\n",
+ cmd->deviceChangeState.state.Active,
+ cmd->deviceChangeState.busNo, cmd->deviceChangeState.devNo >> 3,
+ cmd->deviceChangeState.devNo & 7, req->id);
+
+ kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
+ envp);
+}
+
+/*
+ * Remove any request from the list that's been on there too long and
+ * respond with an error.
+ */
+static void
+parahotplug_process_list(void)
+{
+ struct list_head *pos = NULL;
+ struct list_head *tmp = NULL;
+
+ spin_lock(&Parahotplug_request_list_lock);
+
+ list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
+ struct parahotplug_request *req =
+ list_entry(pos, struct parahotplug_request, list);
+ if (time_after_eq(jiffies, req->expiration)) {
+ list_del(pos);
+ if (req->msg.hdr.Flags.responseExpected)
+ controlvm_respond_physdev_changestate(
+ &req->msg.hdr,
+ CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
+ req->msg.cmd.deviceChangeState.state);
+ parahotplug_request_destroy(req);
+ }
+ }
+
+ spin_unlock(&Parahotplug_request_list_lock);
+}
+
+/*
+ * Called from the /proc handler, which means the user script has
+ * finished the enable/disable. Find the matching identifier, and
+ * respond to the CONTROLVM message with success.
+ */
+static int
+parahotplug_request_complete(int id, U16 active)
+{
+ struct list_head *pos = NULL;
+ struct list_head *tmp = NULL;
+
+ spin_lock(&Parahotplug_request_list_lock);
+
+ /* Look for a request matching "id". */
+ list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
+ struct parahotplug_request *req =
+ list_entry(pos, struct parahotplug_request, list);
+ if (req->id == id) {
+ /* Found a match. Remove it from the list and
+ * respond.
+ */
+ list_del(pos);
+ spin_unlock(&Parahotplug_request_list_lock);
+ req->msg.cmd.deviceChangeState.state.Active = active;
+ if (req->msg.hdr.Flags.responseExpected)
+ controlvm_respond_physdev_changestate(
+ &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
+ req->msg.cmd.deviceChangeState.state);
+ parahotplug_request_destroy(req);
+ return 0;
+ }
+ }
+
+ spin_unlock(&Parahotplug_request_list_lock);
+ return -1;
+}
+
+/*
+ * Enables or disables a PCI device by kicking off a udev script
+ */
+static void
+parahotplug_process_message(CONTROLVM_MESSAGE *inmsg)
+{
+ struct parahotplug_request *req;
+
+ req = parahotplug_request_create(inmsg);
+
+ if (req == NULL) {
+ LOGERR("parahotplug_process_message: couldn't allocate request");
+ return;
+ }
+
+ if (inmsg->cmd.deviceChangeState.state.Active) {
+ /* For enable messages, just respond with success
+ * right away. This is a bit of a hack, but there are
+ * issues with the early enable messages we get (with
+ * either the udev script not detecting that the device
+ * is up, or not getting called at all). Fortunately
+ * the messages that get lost don't matter anyway, as
+ * devices are automatically enabled at
+ * initialization.
+ */
+ parahotplug_request_kickoff(req);
+ controlvm_respond_physdev_changestate(&inmsg->hdr,
+ CONTROLVM_RESP_SUCCESS,
+ inmsg->cmd.
+ deviceChangeState.state);
+ parahotplug_request_destroy(req);
+ } else {
+ /* For disable messages, add the request to the
+ * request list before kicking off the udev script. It
+ * won't get responded to until the script has
+ * indicated it's done.
+ */
+ spin_lock(&Parahotplug_request_list_lock);
+ list_add_tail(&(req->list), &Parahotplug_request_list);
+ spin_unlock(&Parahotplug_request_list_lock);
+
+ parahotplug_request_kickoff(req);
+ }
+}
+
+/*
+ * Gets called when the udev script writes to
+ * /proc/visorchipset/parahotplug. Expects input in the form of "<id>
+ * <active>" where <id> is the identifier passed to the script that
+ * matches a request on the request list, and <active> is 0 or 1
+ * indicating whether the device is now enabled or not.
+ */
+static ssize_t
+parahotplug_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[64];
+ uint id;
+ ushort active;
+
+ if (count > sizeof(buf) - 1) {
+ LOGERR("parahotplug_proc_write: count (%d) exceeds size of buffer (%d)",
+ (int) count, (int) sizeof(buf));
+ return -EINVAL;
+ }
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("parahotplug_proc_write: copy_from_user failed");
+ return -EFAULT;
+ }
+ buf[count] = '\0';
+
+ if (sscanf(buf, "%u %hu", &id, &active) != 2) {
+ id = 0;
+ active = 0;
+ }
+
+ if (active != 1 && active != 0) {
+ LOGERR("parahotplug_proc_write: invalid active field");
+ return -EINVAL;
+ }
+
+ parahotplug_request_complete((int) id, (U16) active);
+
+ return count;
+}
+
+static const struct file_operations parahotplug_proc_fops = {
+ .owner = THIS_MODULE,
+ .read = visorchipset_proc_read_writeonly,
+ .write = parahotplug_proc_write,
+};
+
+/* Process a controlvm message.
+ * Return result:
+ * FALSE - this function will return FALSE only in the case where the
+ * controlvm message was NOT processed, but processing must be
+ * retried before reading the next controlvm message; a
+ * scenario where this can occur is when we need to throttle
+ * the allocation of memory in which to copy out controlvm
+ * payload data
+ * TRUE - processing of the controlvm message completed,
+ * either successfully or with an error.
+ */
+static BOOL
+handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
+{
+ CONTROLVM_MESSAGE_PACKET *cmd = &inmsg.cmd;
+ U64 parametersAddr = 0;
+ U32 parametersBytes = 0;
+ PARSER_CONTEXT *parser_ctx = NULL;
+ BOOL isLocalAddr = FALSE;
+ CONTROLVM_MESSAGE ackmsg;
+
+ /* create parsing context if necessary */
+ isLocalAddr = (inmsg.hdr.Flags.testMessage == 1);
+ if (channel_addr == 0) {
+ LOGERR("HUH? channel_addr is 0!");
+ return TRUE;
+ }
+ parametersAddr = channel_addr + inmsg.hdr.PayloadVmOffset;
+ parametersBytes = inmsg.hdr.PayloadBytes;
+
+ /* Parameter and channel addresses within test messages actually lie
+ * within our OS-controlled memory. We need to know that, because it
+ * makes a difference in how we compute the virtual address.
+ */
+ if (parametersAddr != 0 && parametersBytes != 0) {
+ BOOL retry = FALSE;
+ parser_ctx =
+ parser_init_byteStream(parametersAddr, parametersBytes,
+ isLocalAddr, &retry);
+ if (!parser_ctx) {
+ if (retry) {
+ LOGWRN("throttling to copy payload");
+ return FALSE;
+ }
+ LOGWRN("parsing failed");
+ LOGWRN("inmsg.hdr.Id=0x%lx", (ulong) inmsg.hdr.Id);
+ LOGWRN("parametersAddr=0x%llx", (u64) parametersAddr);
+ LOGWRN("parametersBytes=%lu", (ulong) parametersBytes);
+ LOGWRN("isLocalAddr=%d", isLocalAddr);
+ }
+ }
+
+ if (!isLocalAddr) {
+ controlvm_init_response(&ackmsg, &inmsg.hdr,
+ CONTROLVM_RESP_SUCCESS);
+ if ((ControlVm_channel)
+ &&
+ (!visorchannel_signalinsert
+ (ControlVm_channel, CONTROLVM_QUEUE_ACK, &ackmsg)))
+ LOGWRN("failed to send ACK failed");
+ }
+ switch (inmsg.hdr.Id) {
+ case CONTROLVM_CHIPSET_INIT:
+ LOGINF("CHIPSET_INIT(#busses=%lu,#switches=%lu)",
+ (ulong) inmsg.cmd.initChipset.busCount,
+ (ulong) inmsg.cmd.initChipset.switchCount);
+ chipset_init(&inmsg);
+ break;
+ case CONTROLVM_BUS_CREATE:
+ LOGINF("BUS_CREATE(%lu,#devs=%lu)",
+ (ulong) cmd->createBus.busNo,
+ (ulong) cmd->createBus.deviceCount);
+ bus_create(&inmsg);
+ break;
+ case CONTROLVM_BUS_DESTROY:
+ LOGINF("BUS_DESTROY(%lu)", (ulong) cmd->destroyBus.busNo);
+ bus_destroy(&inmsg);
+ break;
+ case CONTROLVM_BUS_CONFIGURE:
+ LOGINF("BUS_CONFIGURE(%lu)", (ulong) cmd->configureBus.busNo);
+ bus_configure(&inmsg, parser_ctx);
+ break;
+ case CONTROLVM_DEVICE_CREATE:
+ LOGINF("DEVICE_CREATE(%lu,%lu)",
+ (ulong) cmd->createDevice.busNo,
+ (ulong) cmd->createDevice.devNo);
+ my_device_create(&inmsg);
+ break;
+ case CONTROLVM_DEVICE_CHANGESTATE:
+ if (cmd->deviceChangeState.flags.physicalDevice) {
+ LOGINF("DEVICE_CHANGESTATE for physical device (%lu,%lu, active=%lu)",
+ (ulong) cmd->deviceChangeState.busNo,
+ (ulong) cmd->deviceChangeState.devNo,
+ (ulong) cmd->deviceChangeState.state.Active);
+ parahotplug_process_message(&inmsg);
+ } else {
+ LOGINF("DEVICE_CHANGESTATE for virtual device (%lu,%lu, state.Alive=0x%lx)",
+ (ulong) cmd->deviceChangeState.busNo,
+ (ulong) cmd->deviceChangeState.devNo,
+ (ulong) cmd->deviceChangeState.state.Alive);
+ /* save the hdr and cmd structures for later use */
+ /* when sending back the response to Command */
+ my_device_changestate(&inmsg);
+ g_DiagMsgHdr = inmsg.hdr;
+ g_DeviceChangeStatePacket = inmsg.cmd;
+ break;
+ }
+ break;
+ case CONTROLVM_DEVICE_DESTROY:
+ LOGINF("DEVICE_DESTROY(%lu,%lu)",
+ (ulong) cmd->destroyDevice.busNo,
+ (ulong) cmd->destroyDevice.devNo);
+ my_device_destroy(&inmsg);
+ break;
+ case CONTROLVM_DEVICE_CONFIGURE:
+ LOGINF("DEVICE_CONFIGURE(%lu,%lu)",
+ (ulong) cmd->configureDevice.busNo,
+ (ulong) cmd->configureDevice.devNo);
+ /* no op for now, just send a respond that we passed */
+ if (inmsg.hdr.Flags.responseExpected)
+ controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
+ break;
+ case CONTROLVM_CHIPSET_READY:
+ LOGINF("CHIPSET_READY");
+ chipset_ready(&inmsg.hdr);
+ break;
+ case CONTROLVM_CHIPSET_SELFTEST:
+ LOGINF("CHIPSET_SELFTEST");
+ chipset_selftest(&inmsg.hdr);
+ break;
+ case CONTROLVM_CHIPSET_STOP:
+ LOGINF("CHIPSET_STOP");
+ chipset_notready(&inmsg.hdr);
+ break;
+ default:
+ LOGERR("unrecognized controlvm cmd=%d", (int) inmsg.hdr.Id);
+ if (inmsg.hdr.Flags.responseExpected)
+ controlvm_respond(&inmsg.hdr,
+ -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
+ break;
+ }
+
+ if (parser_ctx != NULL) {
+ parser_done(parser_ctx);
+ parser_ctx = NULL;
+ }
+ return TRUE;
+}
+
+static void
+controlvm_periodic_work(struct work_struct *work)
+{
+ VISORCHIPSET_CHANNEL_INFO chanInfo;
+ CONTROLVM_MESSAGE inmsg;
+ char s[99];
+ BOOL gotACommand = FALSE;
+ BOOL handle_command_failed = FALSE;
+ static U64 Poll_Count;
+
+ /* make sure visorbus server is registered for controlvm callbacks */
+ if (visorchipset_serverregwait && !serverregistered)
+ goto Away;
+ /* make sure visorclientbus server is regsitered for controlvm
+ * callbacks
+ */
+ if (visorchipset_clientregwait && !clientregistered)
+ goto Away;
+
+ memset(&chanInfo, 0, sizeof(VISORCHIPSET_CHANNEL_INFO));
+ if (!ControlVm_channel) {
+ HOSTADDRESS addr = controlvm_get_channel_address();
+ if (addr != 0) {
+ ControlVm_channel =
+ visorchannel_create_with_lock
+ (addr,
+ sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL),
+ UltraControlvmChannelProtocolGuid);
+ if (ControlVm_channel == NULL)
+ LOGERR("failed to create controlvm channel");
+ else if (ULTRA_CONTROLVM_CHANNEL_OK_CLIENT
+ (visorchannel_get_header(ControlVm_channel),
+ NULL)) {
+ LOGINF("Channel %s (ControlVm) discovered",
+ visorchannel_id(ControlVm_channel, s));
+ initialize_controlvm_payload();
+ } else {
+ LOGERR("controlvm channel is invalid");
+ visorchannel_destroy(ControlVm_channel);
+ ControlVm_channel = NULL;
+ }
+ }
+ }
+
+ Poll_Count++;
+ if ((ControlVm_channel != NULL) || (Poll_Count >= 250))
+ ; /* keep going */
+ else
+ goto Away;
+
+ /* Check events to determine if response to CHIPSET_READY
+ * should be sent
+ */
+ if (visorchipset_holdchipsetready
+ && (g_ChipSetMsgHdr.Id != CONTROLVM_INVALID)) {
+ if (check_chipset_events() == 1) {
+ LOGINF("Sending CHIPSET_READY response");
+ controlvm_respond(&g_ChipSetMsgHdr, 0);
+ clear_chipset_events();
+ memset(&g_ChipSetMsgHdr, 0,
+ sizeof(CONTROLVM_MESSAGE_HEADER));
+ }
+ }
+
+ if (ControlVm_channel) {
+ while (visorchannel_signalremove(ControlVm_channel,
+ CONTROLVM_QUEUE_RESPONSE,
+ &inmsg)) {
+ if (inmsg.hdr.PayloadMaxBytes != 0) {
+ LOGERR("Payload of size %lu returned @%lu with unexpected message id %d.",
+ (ulong) inmsg.hdr.PayloadMaxBytes,
+ (ulong) inmsg.hdr.PayloadVmOffset,
+ inmsg.hdr.Id);
+ }
+ }
+ if (!gotACommand) {
+ if (ControlVm_Pending_Msg_Valid) {
+ /* we throttled processing of a prior
+ * msg, so try to process it again
+ * rather than reading a new one
+ */
+ inmsg = ControlVm_Pending_Msg;
+ ControlVm_Pending_Msg_Valid = FALSE;
+ gotACommand = TRUE;
+ } else
+ gotACommand = read_controlvm_event(&inmsg);
+ }
+ }
+
+ handle_command_failed = FALSE;
+ while (gotACommand && (!handle_command_failed)) {
+ Most_recent_message_jiffies = jiffies;
+ if (ControlVm_channel) {
+ if (handle_command(inmsg,
+ visorchannel_get_physaddr
+ (ControlVm_channel)))
+ gotACommand = read_controlvm_event(&inmsg);
+ else {
+ /* this is a scenario where throttling
+ * is required, but probably NOT an
+ * error...; we stash the current
+ * controlvm msg so we will attempt to
+ * reprocess it on our next loop
+ */
+ handle_command_failed = TRUE;
+ ControlVm_Pending_Msg = inmsg;
+ ControlVm_Pending_Msg_Valid = TRUE;
+ }
+
+ } else {
+ handle_command(inmsg, 0);
+ gotACommand = FALSE;
+ }
+ }
+
+ /* parahotplug_worker */
+ parahotplug_process_list();
+
+Away:
+
+ if (time_after(jiffies,
+ Most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
+ /* it's been longer than MIN_IDLE_SECONDS since we
+ * processed our last controlvm message; slow down the
+ * polling
+ */
+ if (Poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW) {
+ LOGINF("switched to slow controlvm polling");
+ Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+ }
+ } else {
+ if (Poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST) {
+ Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ LOGINF("switched to fast controlvm polling");
+ }
+ }
+
+ queue_delayed_work(Periodic_controlvm_workqueue,
+ &Periodic_controlvm_work, Poll_jiffies);
+}
+
+static void
+setup_crash_devices_work_queue(struct work_struct *work)
+{
+
+ CONTROLVM_MESSAGE localCrashCreateBusMsg;
+ CONTROLVM_MESSAGE localCrashCreateDevMsg;
+ CONTROLVM_MESSAGE msg;
+ HOSTADDRESS host_addr;
+ U32 localSavedCrashMsgOffset;
+ U16 localSavedCrashMsgCount;
+
+ /* make sure visorbus server is registered for controlvm callbacks */
+ if (visorchipset_serverregwait && !serverregistered)
+ goto Away;
+
+ /* make sure visorclientbus server is regsitered for controlvm
+ * callbacks
+ */
+ if (visorchipset_clientregwait && !clientregistered)
+ goto Away;
+
+ POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ /* send init chipset msg */
+ msg.hdr.Id = CONTROLVM_CHIPSET_INIT;
+ msg.cmd.initChipset.busCount = 23;
+ msg.cmd.initChipset.switchCount = 0;
+
+ chipset_init(&msg);
+
+ host_addr = controlvm_get_channel_address();
+ if (!host_addr) {
+ LOGERR("Huh? Host address is NULL");
+ POSTCODE_LINUX_2(CRASH_DEV_HADDR_NULL, POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ ControlVm_channel =
+ visorchannel_create_with_lock
+ (host_addr,
+ sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL),
+ UltraControlvmChannelProtocolGuid);
+
+ if (ControlVm_channel == NULL) {
+ LOGERR("failed to create controlvm channel");
+ POSTCODE_LINUX_2(CRASH_DEV_CONTROLVM_NULL,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* get saved message count */
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ SavedCrashMsgCount),
+ &localSavedCrashMsgCount, sizeof(U16)) < 0) {
+ LOGERR("failed to get Saved Message Count");
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
+ LOGERR("Saved Message Count incorrect %d",
+ localSavedCrashMsgCount);
+ POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
+ localSavedCrashMsgCount,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* get saved crash message offset */
+ if (visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ SavedCrashMsgOffset),
+ &localSavedCrashMsgOffset, sizeof(U32)) < 0) {
+ LOGERR("failed to get Saved Message Offset");
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* read create device message for storage bus offset */
+ if (visorchannel_read(ControlVm_channel,
+ localSavedCrashMsgOffset,
+ &localCrashCreateBusMsg,
+ sizeof(CONTROLVM_MESSAGE)) < 0) {
+ LOGERR("CRASH_DEV_RD_BUS_FAIULRE: Failed to read CrashCreateBusMsg!");
+ POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* read create device message for storage device */
+ if (visorchannel_read(ControlVm_channel,
+ localSavedCrashMsgOffset +
+ sizeof(CONTROLVM_MESSAGE),
+ &localCrashCreateDevMsg,
+ sizeof(CONTROLVM_MESSAGE)) < 0) {
+ LOGERR("CRASH_DEV_RD_DEV_FAIULRE: Failed to read CrashCreateDevMsg!");
+ POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* reuse IOVM create bus message */
+ if (localCrashCreateBusMsg.cmd.createBus.channelAddr != 0)
+ bus_create(&localCrashCreateBusMsg);
+ else {
+ LOGERR("CrashCreateBusMsg is null, no dump will be taken");
+ POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ /* reuse create device message for storage device */
+ if (localCrashCreateDevMsg.cmd.createDevice.channelAddr != 0)
+ my_device_create(&localCrashCreateDevMsg);
+ else {
+ LOGERR("CrashCreateDevMsg is null, no dump will be taken");
+ POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ LOGINF("Bus and device ready for dumping");
+ POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ return;
+
+Away:
+
+ Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+
+ queue_delayed_work(Periodic_controlvm_workqueue,
+ &Periodic_controlvm_work, Poll_jiffies);
+}
+
+static void
+bus_create_response(ulong busNo, int response)
+{
+ bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
+}
+
+static void
+bus_destroy_response(ulong busNo, int response)
+{
+ bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
+}
+
+static void
+device_create_response(ulong busNo, ulong devNo, int response)
+{
+ device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
+}
+
+static void
+device_destroy_response(ulong busNo, ulong devNo, int response)
+{
+ device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
+}
+
+void
+visorchipset_device_pause_response(ulong busNo, ulong devNo, int response)
+{
+
+ device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
+ busNo, devNo, response,
+ SegmentStateStandby);
+}
+EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
+
+static void
+device_resume_response(ulong busNo, ulong devNo, int response)
+{
+ device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
+ busNo, devNo, response,
+ SegmentStateRunning);
+}
+
+BOOL
+visorchipset_get_bus_info(ulong busNo, VISORCHIPSET_BUS_INFO *busInfo)
+{
+ void *p = findbus(&BusInfoList, busNo);
+ if (!p) {
+ LOGERR("(%lu) failed", busNo);
+ return FALSE;
+ }
+ memcpy(busInfo, p, sizeof(VISORCHIPSET_BUS_INFO));
+ return TRUE;
+}
+EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
+
+BOOL
+visorchipset_set_bus_context(ulong busNo, void *context)
+{
+ VISORCHIPSET_BUS_INFO *p = findbus(&BusInfoList, busNo);
+ if (!p) {
+ LOGERR("(%lu) failed", busNo);
+ return FALSE;
+ }
+ p->bus_driver_context = context;
+ return TRUE;
+}
+EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
+
+BOOL
+visorchipset_get_device_info(ulong busNo, ulong devNo,
+ VISORCHIPSET_DEVICE_INFO *devInfo)
+{
+ void *p = finddevice(&DevInfoList, busNo, devNo);
+ if (!p) {
+ LOGERR("(%lu,%lu) failed", busNo, devNo);
+ return FALSE;
+ }
+ memcpy(devInfo, p, sizeof(VISORCHIPSET_DEVICE_INFO));
+ return TRUE;
+}
+EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
+
+BOOL
+visorchipset_set_device_context(ulong busNo, ulong devNo, void *context)
+{
+ VISORCHIPSET_DEVICE_INFO *p = finddevice(&DevInfoList, busNo, devNo);
+ if (!p) {
+ LOGERR("(%lu,%lu) failed", busNo, devNo);
+ return FALSE;
+ }
+ p->bus_driver_context = context;
+ return TRUE;
+}
+EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
+
+/* Generic wrapper function for allocating memory from a kmem_cache pool.
+ */
+void *
+visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
+ char *fn, int ln)
+{
+ gfp_t gfp;
+ void *p;
+
+ if (ok_to_block)
+ gfp = GFP_KERNEL;
+ else
+ gfp = GFP_ATOMIC;
+ /* __GFP_NORETRY means "ok to fail", meaning
+ * kmem_cache_alloc() can return NULL, implying the caller CAN
+ * cope with failure. If you do NOT specify __GFP_NORETRY,
+ * Linux will go to extreme measures to get memory for you
+ * (like, invoke oom killer), which will probably cripple the
+ * system.
+ */
+ gfp |= __GFP_NORETRY;
+ p = kmem_cache_alloc(pool, gfp);
+ if (!p) {
+ LOGERR("kmem_cache_alloc failed early @%s:%d\n", fn, ln);
+ return NULL;
+ }
+ atomic_inc(&Visorchipset_cache_buffers_in_use);
+ return p;
+}
+
+/* Generic wrapper function for freeing memory from a kmem_cache pool.
+ */
+void
+visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
+{
+ if (!p) {
+ LOGERR("NULL pointer @%s:%d\n", fn, ln);
+ return;
+ }
+ atomic_dec(&Visorchipset_cache_buffers_in_use);
+ kmem_cache_free(pool, p);
+}
+
+#define gettoken(bufp) strsep(bufp, " -\t\n")
+
+static ssize_t
+chipset_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[512];
+ char *token, *p;
+
+ if (count > sizeof(buf) - 1) {
+ LOGERR("chipset_proc_write: count (%d) exceeds size of buffer (%d)",
+ (int) count, (int) sizeof(buffer));
+ return -EINVAL;
+ }
+ if (copy_from_user(buf, buffer, count)) {
+ LOGERR("chipset_proc_write: copy_from_user failed");
+ return -EFAULT;
+ }
+ buf[count] = '\0';
+
+ p = buf;
+ token = gettoken(&p);
+
+ if (strcmp(token, "CALLHOMEDISK_MOUNTED") == 0) {
+ token = gettoken(&p);
+ /* The Call Home Disk has been mounted */
+ if (strcmp(token, "0") == 0)
+ chipset_events[0] = 1;
+ } else if (strcmp(token, "MODULES_LOADED") == 0) {
+ token = gettoken(&p);
+ /* All modules for the partition have been loaded */
+ if (strcmp(token, "0") == 0)
+ chipset_events[1] = 1;
+ } else if (token == NULL) {
+ /* No event specified */
+ LOGERR("No event was specified to send CHIPSET_READY response");
+ return -1;
+ } else {
+ /* Unsupported event specified */
+ LOGERR("%s is an invalid event for sending CHIPSET_READY response", token);
+ return -1;
+ }
+
+ return count;
+}
+
+static ssize_t
+visorchipset_proc_read_writeonly(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ return 0;
+}
+
+/**
+ * Reads the InstallationError, InstallationTextId,
+ * InstallationRemainingSteps fields of ControlVMChannel.
+ */
+static ssize_t
+proc_read_installer(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int length = 0;
+ U16 remainingSteps;
+ U32 error, textId;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationRemainingSteps), &remainingSteps,
+ sizeof(U16));
+ visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationError), &error, sizeof(U32));
+ visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationTextId), &textId, sizeof(U32));
+
+ length = sprintf(vbuf, "%u %u %u\n", remainingSteps, error, textId);
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+/**
+ * Writes to the InstallationError, InstallationTextId,
+ * InstallationRemainingSteps fields of
+ * ControlVMChannel.
+ * Input: RemainingSteps Error TextId
+ * Limit 32 characters input
+ */
+#define UINT16_MAX (65535U)
+#define UINT32_MAX (4294967295U)
+static ssize_t
+proc_write_installer(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ U16 remainingSteps;
+ U32 error, textId;
+
+ /* Check to make sure there is no buffer overflow */
+ if (count > (sizeof(buf) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ WARN(1, "Error copying from user space\n");
+ return -EFAULT;
+ }
+
+ if (sscanf(buf, "%hu %i %i", &remainingSteps, &error, &textId) != 3) {
+ remainingSteps = UINT16_MAX;
+ error = UINT32_MAX;
+ textId = UINT32_MAX;
+ }
+
+ if (remainingSteps != UINT16_MAX) {
+ if (visorchannel_write
+ (ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationRemainingSteps), &remainingSteps,
+ sizeof(U16)) < 0)
+ WARN(1, "Installation Status Write Failed - Write function error - RemainingSteps = %d\n",
+ remainingSteps);
+ }
+
+ if (error != UINT32_MAX) {
+ if (visorchannel_write
+ (ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationError), &error, sizeof(U32)) < 0)
+ WARN(1, "Installation Status Write Failed - Write function error - Error = %d\n",
+ error);
+ }
+
+ if (textId != UINT32_MAX) {
+ if (visorchannel_write
+ (ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ InstallationTextId), &textId, sizeof(U32)) < 0)
+ WARN(1, "Installation Status Write Failed - Write function error - TextId = %d\n",
+ textId);
+ }
+
+ /* So this function isn't called multiple times, must return
+ * size of buffer
+ */
+ return count;
+}
+
+/**
+ * Reads the ToolAction field of ControlVMChannel.
+ */
+static ssize_t
+proc_read_toolaction(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int length = 0;
+ U8 toolAction;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ ToolAction), &toolAction, sizeof(U8));
+
+ length = sprintf(vbuf, "%u\n", toolAction);
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+/**
+ * Writes to the ToolAction field of ControlVMChannel.
+ * Input: ToolAction
+ * Limit 3 characters input
+ */
+#define UINT8_MAX (255U)
+static ssize_t
+proc_write_toolaction(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ char buf[3];
+ U8 toolAction;
+
+ /* Check to make sure there is no buffer overflow */
+ if (count > (sizeof(buf) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ WARN(1, "Error copying from user space\n");
+ return -EFAULT;
+ }
+
+ if (sscanf(buf, "%hhd", &toolAction) != 1)
+ toolAction = UINT8_MAX;
+
+ if (toolAction != UINT8_MAX) {
+ if (visorchannel_write
+ (ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ToolAction),
+ &toolAction, sizeof(U8)) < 0)
+ WARN(1, "Installation ToolAction Write Failed - ToolAction = %d\n",
+ toolAction);
+ }
+
+ /* So this function isn't called multiple times, must return
+ * size of buffer
+ */
+ return count;
+}
+
+/**
+ * Reads the EfiSparIndication.BootToTool field of ControlVMChannel.
+ */
+static ssize_t
+proc_read_bootToTool(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int length = 0;
+ ULTRA_EFI_SPAR_INDICATION efiSparIndication;
+ char *vbuf;
+ loff_t pos = *offset;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos > 0 || !len)
+ return 0;
+
+ vbuf = kzalloc(len, GFP_KERNEL);
+ if (!vbuf)
+ return -ENOMEM;
+
+ visorchannel_read(ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
+ EfiSparIndication), &efiSparIndication,
+ sizeof(ULTRA_EFI_SPAR_INDICATION));
+
+ length = sprintf(vbuf, "%d\n", (int) efiSparIndication.BootToTool);
+ if (copy_to_user(buf, vbuf, length)) {
+ kfree(vbuf);
+ return -EFAULT;
+ }
+
+ kfree(vbuf);
+ *offset += length;
+ return length;
+}
+
+/**
+ * Writes to the EfiSparIndication.BootToTool field of ControlVMChannel.
+ * Input: 1 or 0 (1 being on, 0 being off)
+ */
+static ssize_t
+proc_write_bootToTool(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ char buf[3];
+ int inputVal;
+ ULTRA_EFI_SPAR_INDICATION efiSparIndication;
+
+ /* Check to make sure there is no buffer overflow */
+ if (count > (sizeof(buf) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count)) {
+ WARN(1, "Error copying from user space\n");
+ return -EFAULT;
+ }
+
+ if (sscanf(buf, "%i", &inputVal) != 1)
+ inputVal = 0;
+
+ efiSparIndication.BootToTool = (inputVal == 1 ? 1 : 0);
+
+ if (visorchannel_write
+ (ControlVm_channel,
+ offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EfiSparIndication),
+ &efiSparIndication, sizeof(ULTRA_EFI_SPAR_INDICATION)) < 0)
+ printk
+ ("Installation BootToTool Write Failed - BootToTool = %d\n",
+ (int) efiSparIndication.BootToTool);
+
+ /* So this function isn't called multiple times, must return
+ * size of buffer
+ */
+ return count;
+}
+
+static const struct file_operations chipset_proc_fops = {
+ .owner = THIS_MODULE,
+ .read = visorchipset_proc_read_writeonly,
+ .write = chipset_proc_write,
+};
+
+static int __init
+visorchipset_init(void)
+{
+ int rc = 0, x = 0;
+ struct proc_dir_entry *installer_file;
+ struct proc_dir_entry *toolaction_file;
+ struct proc_dir_entry *bootToTool_file;
+
+ LOGINF("chipset driver version %s loaded", VERSION);
+ /* process module options */
+ POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ LOGINF("option - testvnic=%d", visorchipset_testvnic);
+ LOGINF("option - testvnicclient=%d", visorchipset_testvnicclient);
+ LOGINF("option - testmsg=%d", visorchipset_testmsg);
+ LOGINF("option - testteardown=%d", visorchipset_testteardown);
+ LOGINF("option - major=%d", visorchipset_major);
+ LOGINF("option - serverregwait=%d", visorchipset_serverregwait);
+ LOGINF("option - clientregwait=%d", visorchipset_clientregwait);
+ LOGINF("option - holdchipsetready=%d", visorchipset_holdchipsetready);
+
+ memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
+ memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
+ memset(&ControlVm_payload_info, 0, sizeof(ControlVm_payload_info));
+ memset(&LiveDump_info, 0, sizeof(LiveDump_info));
+ atomic_set(&LiveDump_info.buffers_in_use, 0);
+
+ if (visorchipset_testvnic) {
+ ERRDRV("testvnic option no longer supported: (status = %d)\n",
+ x);
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
+ rc = x;
+ goto Away;
+ }
+
+ controlvm_init();
+ MajorDev = MKDEV(visorchipset_major, 0);
+ rc = visorchipset_file_init(MajorDev, &ControlVm_channel);
+ if (rc < 0) {
+ ERRDRV("visorchipset_file_init(MajorDev, &ControlVm_channel): error (status=%d)\n", rc);
+ POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
+ goto Away;
+ }
+
+ proc_Init();
+ memset(PartitionPropertyNames, 0, sizeof(PartitionPropertyNames));
+ memset(ControlVmPropertyNames, 0, sizeof(ControlVmPropertyNames));
+ InitPartitionProperties();
+ InitControlVmProperties();
+
+ PartitionType = visor_proc_CreateType(ProcDir, PartitionTypeNames,
+ (const char **)
+ PartitionPropertyNames,
+ &show_partition_property);
+ ControlVmType =
+ visor_proc_CreateType(ProcDir, ControlVmTypeNames,
+ (const char **) ControlVmPropertyNames,
+ &show_controlvm_property);
+
+ ControlVmObject = visor_proc_CreateObject(ControlVmType, NULL, NULL);
+
+ /* Setup Installation fields */
+ installer_file = proc_create("installer", 0644, ProcDir,
+ &proc_installer_fops);
+ /* Setup the ToolAction field */
+ toolaction_file = proc_create("toolaction", 0644, ProcDir,
+ &proc_toolaction_fops);
+ /* Setup the BootToTool field */
+ bootToTool_file = proc_create("boottotool", 0644, ProcDir,
+ &proc_bootToTool_fops);
+
+ memset(&g_DiagMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ chipset_proc_dir = proc_create(VISORCHIPSET_CHIPSET_PROC_ENTRY_FN,
+ 0644, ProcDir, &chipset_proc_fops);
+ memset(&g_ChipSetMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ parahotplug_proc_dir =
+ proc_create(VISORCHIPSET_PARAHOTPLUG_PROC_ENTRY_FN, 0200,
+ ProcDir, &parahotplug_proc_fops);
+ memset(&g_DelDumpMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ if (filexfer_constructor(sizeof(struct putfile_request)) < 0) {
+ ERRDRV("filexfer_constructor failed: (status=-1)\n");
+ POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
+ rc = -1;
+ goto Away;
+ }
+ Putfile_buffer_list_pool =
+ kmem_cache_create(Putfile_buffer_list_pool_name,
+ sizeof(struct putfile_buffer_entry),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!Putfile_buffer_list_pool) {
+ ERRDRV("failed to alloc Putfile_buffer_list_pool: (status=-1)\n");
+ POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
+ rc = -1;
+ goto Away;
+ }
+ if (visorchipset_disable_controlvm) {
+ LOGINF("visorchipset_init:controlvm disabled");
+ } else {
+ /* if booting in a crash kernel */
+ if (visorchipset_crash_kernel)
+ INIT_DELAYED_WORK(&Periodic_controlvm_work,
+ setup_crash_devices_work_queue);
+ else
+ INIT_DELAYED_WORK(&Periodic_controlvm_work,
+ controlvm_periodic_work);
+ Periodic_controlvm_workqueue =
+ create_singlethread_workqueue("visorchipset_controlvm");
+
+ if (Periodic_controlvm_workqueue == NULL) {
+ ERRDRV("cannot create controlvm workqueue: (status=%d)\n",
+ -ENOMEM);
+ POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
+ DIAG_SEVERITY_ERR);
+ rc = -ENOMEM;
+ goto Away;
+ }
+ Most_recent_message_jiffies = jiffies;
+ Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ rc = queue_delayed_work(Periodic_controlvm_workqueue,
+ &Periodic_controlvm_work, Poll_jiffies);
+ if (rc < 0) {
+ ERRDRV("queue_delayed_work(Periodic_controlvm_workqueue, &Periodic_controlvm_work, Poll_jiffies): error (status=%d)\n", rc);
+ POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
+ DIAG_SEVERITY_ERR);
+ goto Away;
+ }
+
+ }
+
+ Visorchipset_platform_device.dev.devt = MajorDev;
+ if (platform_device_register(&Visorchipset_platform_device) < 0) {
+ ERRDRV("platform_device_register(visorchipset) failed: (status=-1)\n");
+ POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
+ rc = -1;
+ goto Away;
+ }
+ LOGINF("visorchipset device created");
+ POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
+ rc = 0;
+Away:
+ if (rc) {
+ LOGERR("visorchipset_init failed");
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
+ POSTCODE_SEVERITY_ERR);
+ }
+ return rc;
+}
+
+static void
+visorchipset_exit(void)
+{
+ char s[99];
+ POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+
+ if (visorchipset_disable_controlvm) {
+ ;
+ } else {
+ cancel_delayed_work(&Periodic_controlvm_work);
+ flush_workqueue(Periodic_controlvm_workqueue);
+ destroy_workqueue(Periodic_controlvm_workqueue);
+ Periodic_controlvm_workqueue = NULL;
+ destroy_controlvm_payload_info(&ControlVm_payload_info);
+ }
+ Test_Vnic_channel = NULL;
+ if (Putfile_buffer_list_pool) {
+ kmem_cache_destroy(Putfile_buffer_list_pool);
+ Putfile_buffer_list_pool = NULL;
+ }
+ filexfer_destructor();
+ if (ControlVmObject) {
+ visor_proc_DestroyObject(ControlVmObject);
+ ControlVmObject = NULL;
+ }
+ cleanup_controlvm_structures();
+
+ if (ControlVmType) {
+ visor_proc_DestroyType(ControlVmType);
+ ControlVmType = NULL;
+ }
+ if (PartitionType) {
+ visor_proc_DestroyType(PartitionType);
+ PartitionType = NULL;
+ }
+ if (diag_proc_dir) {
+ remove_proc_entry(VISORCHIPSET_DIAG_PROC_ENTRY_FN, ProcDir);
+ diag_proc_dir = NULL;
+ }
+ memset(&g_DiagMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ if (chipset_proc_dir) {
+ remove_proc_entry(VISORCHIPSET_CHIPSET_PROC_ENTRY_FN, ProcDir);
+ chipset_proc_dir = NULL;
+ }
+ memset(&g_ChipSetMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ if (parahotplug_proc_dir) {
+ remove_proc_entry(VISORCHIPSET_PARAHOTPLUG_PROC_ENTRY_FN,
+ ProcDir);
+ parahotplug_proc_dir = NULL;
+ }
+
+ memset(&g_DelDumpMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+
+ proc_DeInit();
+ if (ControlVm_channel != NULL) {
+ LOGINF("Channel %s (ControlVm) disconnected",
+ visorchannel_id(ControlVm_channel, s));
+ visorchannel_destroy(ControlVm_channel);
+ ControlVm_channel = NULL;
+ }
+ controlvm_deinit();
+ visorchipset_file_cleanup();
+ POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+ LOGINF("chipset driver unloaded");
+}
+
+module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
+int visorchipset_testvnic = 0;
+
+module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
+int visorchipset_testvnicclient = 0;
+
+module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_testmsg,
+ "1 to manufacture the chipset, bus, and switch messages");
+int visorchipset_testmsg = 0;
+
+module_param_named(major, visorchipset_major, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
+int visorchipset_major = 0;
+
+module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_serverreqwait,
+ "1 to have the module wait for the visor bus to register");
+int visorchipset_serverregwait = 0; /* default is off */
+module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
+int visorchipset_clientregwait = 1; /* default is on */
+module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_testteardown,
+ "1 to test teardown of the chipset, bus, and switch");
+int visorchipset_testteardown = 0; /* default is off */
+module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
+ S_IRUGO);
+MODULE_PARM_DESC(visorchipset_disable_controlvm,
+ "1 to disable polling of controlVm channel");
+int visorchipset_disable_controlvm = 0; /* default is off */
+module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_crash_kernel,
+ "1 means we are running in crash kernel");
+int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
+module_param_named(holdchipsetready, visorchipset_holdchipsetready,
+ int, S_IRUGO);
+MODULE_PARM_DESC(visorchipset_holdchipsetready,
+ "1 to hold response to CHIPSET_READY");
+int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
+ * response immediately */
+module_init(visorchipset_init);
+module_exit(visorchipset_exit);
+
+MODULE_AUTHOR("Unisys");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
+ VERSION);
+MODULE_VERSION(VERSION);
diff --git a/drivers/staging/unisys/visorchipset/visorchipset_umode.h b/drivers/staging/unisys/visorchipset/visorchipset_umode.h
new file mode 100644
index 000000000000..259e840376a5
--- /dev/null
+++ b/drivers/staging/unisys/visorchipset/visorchipset_umode.h
@@ -0,0 +1,37 @@
+/* visorchipset_umode.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/** @file *********************************************************************
+ *
+ * This describes structures needed for the interface between the
+ * visorchipset driver and a user-mode component that opens the device.
+ *
+ ******************************************************************************
+ */
+
+#ifndef __VISORCHIPSET_UMODE_H
+#define __VISORCHIPSET_UMODE_H
+
+
+
+/** The user-mode program can access the control channel buffer directly
+ * via this memory map.
+ */
+#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET (0x00000000)
+#define VISORCHIPSET_MMAP_CONTROLCHANSIZE (0x00400000) /* 4MB */
+
+#endif /* __VISORCHIPSET_UMODE_H */
diff --git a/drivers/staging/unisys/visorutil/Kconfig b/drivers/staging/unisys/visorutil/Kconfig
new file mode 100644
index 000000000000..4ff61a7c506b
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys timskmod configuration
+#
+
+config UNISYS_VISORUTIL
+ tristate "Unisys visorutil driver"
+ depends on UNISYSSPAR
+ ---help---
+ If you say Y here, you will enable the Unisys visorutil driver.
+
diff --git a/drivers/staging/unisys/visorutil/Makefile b/drivers/staging/unisys/visorutil/Makefile
new file mode 100644
index 000000000000..3f463888dcec
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Unisys timskmod
+#
+
+obj-$(CONFIG_UNISYS_VISORUTIL) += visorutil.o
+
+visorutil-y := charqueue.o easyproc.o periodic_work.o procobjecttree.o \
+ memregion_direct.o visorkmodutils.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -DCONFIG_SPAR_GUEST -DGUESTDRIVERBUILD -DNOAUTOVERSION
diff --git a/drivers/staging/unisys/visorutil/charqueue.c b/drivers/staging/unisys/visorutil/charqueue.c
new file mode 100644
index 000000000000..0ceede129e87
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/charqueue.c
@@ -0,0 +1,141 @@
+/* charqueue.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Simple character queue implementation for Linux kernel mode.
+ */
+
+#include "charqueue.h"
+
+#define MYDRVNAME "charqueue"
+
+#define IS_EMPTY(charqueue) (charqueue->head == charqueue->tail)
+
+
+
+struct CHARQUEUE_Tag {
+ int alloc_size;
+ int nslots;
+ spinlock_t lock;
+ int head, tail;
+ unsigned char buf[0];
+};
+
+
+
+CHARQUEUE *visor_charqueue_create(ulong nslots)
+{
+ int alloc_size = sizeof(CHARQUEUE) + nslots + 1;
+ CHARQUEUE *cq = kmalloc(alloc_size, GFP_KERNEL|__GFP_NORETRY);
+ if (cq == NULL) {
+ ERRDRV("visor_charqueue_create allocation failed (alloc_size=%d)",
+ alloc_size);
+ return NULL;
+ }
+ cq->alloc_size = alloc_size;
+ cq->nslots = nslots;
+ cq->head = cq->tail = 0;
+ spin_lock_init(&cq->lock);
+ return cq;
+}
+EXPORT_SYMBOL_GPL(visor_charqueue_create);
+
+
+
+void visor_charqueue_enqueue(CHARQUEUE *charqueue, unsigned char c)
+{
+ int alloc_slots = charqueue->nslots+1; /* 1 slot is always empty */
+
+ spin_lock(&charqueue->lock);
+ charqueue->head = (charqueue->head+1) % alloc_slots;
+ if (charqueue->head == charqueue->tail)
+ /* overflow; overwrite the oldest entry */
+ charqueue->tail = (charqueue->tail+1) % alloc_slots;
+ charqueue->buf[charqueue->head] = c;
+ spin_unlock(&charqueue->lock);
+}
+EXPORT_SYMBOL_GPL(visor_charqueue_enqueue);
+
+
+
+BOOL visor_charqueue_is_empty(CHARQUEUE *charqueue)
+{
+ BOOL b;
+ spin_lock(&charqueue->lock);
+ b = IS_EMPTY(charqueue);
+ spin_unlock(&charqueue->lock);
+ return b;
+}
+EXPORT_SYMBOL_GPL(visor_charqueue_is_empty);
+
+
+
+static int charqueue_dequeue_1(CHARQUEUE *charqueue)
+{
+ int alloc_slots = charqueue->nslots + 1; /* 1 slot is always empty */
+
+ if (IS_EMPTY(charqueue))
+ return -1;
+ charqueue->tail = (charqueue->tail+1) % alloc_slots;
+ return charqueue->buf[charqueue->tail];
+}
+
+
+
+int charqueue_dequeue(CHARQUEUE *charqueue)
+{
+ int rc;
+
+ spin_lock(&charqueue->lock);
+ rc = charqueue_dequeue_1(charqueue);
+ spin_unlock(&charqueue->lock);
+ return rc;
+}
+
+
+
+int visor_charqueue_dequeue_n(CHARQUEUE *charqueue, unsigned char *buf, int n)
+{
+ int rc, counter = 0, c;
+
+ spin_lock(&charqueue->lock);
+ for (;;) {
+ if (n <= 0)
+ break; /* no more buffer space */
+ c = charqueue_dequeue_1(charqueue);
+ if (c < 0)
+ break; /* no more input */
+ *buf = (unsigned char)(c);
+ buf++;
+ n--;
+ counter++;
+ }
+ rc = counter;
+ spin_unlock(&charqueue->lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_charqueue_dequeue_n);
+
+
+
+void visor_charqueue_destroy(CHARQUEUE *charqueue)
+{
+ if (charqueue == NULL)
+ return;
+ kfree(charqueue);
+}
+EXPORT_SYMBOL_GPL(visor_charqueue_destroy);
diff --git a/drivers/staging/unisys/visorutil/charqueue.h b/drivers/staging/unisys/visorutil/charqueue.h
new file mode 100644
index 000000000000..e82ae0bc8959
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/charqueue.h
@@ -0,0 +1,37 @@
+/* charqueue.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __CHARQUEUE_H__
+#define __CHARQUEUE_H__
+
+#include "uniklog.h"
+#include "timskmod.h"
+
+/* CHARQUEUE is an opaque structure to users.
+ * Fields are declared only in the implementation .c files.
+ */
+typedef struct CHARQUEUE_Tag CHARQUEUE;
+
+CHARQUEUE *visor_charqueue_create(ulong nslots);
+void visor_charqueue_enqueue(CHARQUEUE *charqueue, unsigned char c);
+int charqueue_dequeue(CHARQUEUE *charqueue);
+int visor_charqueue_dequeue_n(CHARQUEUE *charqueue, unsigned char *buf, int n);
+BOOL visor_charqueue_is_empty(CHARQUEUE *charqueue);
+void visor_charqueue_destroy(CHARQUEUE *charqueue);
+
+#endif
+
diff --git a/drivers/staging/unisys/visorutil/easyproc.c b/drivers/staging/unisys/visorutil/easyproc.c
new file mode 100644
index 000000000000..60b6b83d1b20
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/easyproc.c
@@ -0,0 +1,371 @@
+/* Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/** @file *********************************************************************
+ *
+ * Handle procfs-specific tasks.
+ * Note that this file does not know about any module-specific things, nor
+ * does it know anything about what information to reveal as part of the proc
+ * entries. The 2 functions that take care of displaying device and
+ * driver specific information are passed as parameters to
+ * visor_easyproc_InitDriver().
+ *
+ * void show_device_info(struct seq_file *seq, void *p);
+ * void show_driver_info(struct seq_file *seq);
+ *
+ * The second parameter to show_device_info is actually a pointer to the
+ * device-specific info to show. It is the context that was originally
+ * passed to visor_easyproc_InitDevice().
+ *
+ ******************************************************************************
+ */
+
+#include <linux/proc_fs.h>
+
+#include "uniklog.h"
+#include "timskmod.h"
+#include "easyproc.h"
+
+#define MYDRVNAME "easyproc"
+
+
+
+/*
+ * /proc/<ProcId> ProcDir
+ * /proc/<ProcId>/driver ProcDriverDir
+ * /proc/<ProcId>/driver/diag ProcDriverDiagFile
+ * /proc/<ProcId>/device ProcDeviceDir
+ * /proc/<ProcId>/device/0 procDevicexDir
+ * /proc/<ProcId>/device/0/diag procDevicexDiagFile
+ */
+
+
+static ssize_t proc_write_device(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+static ssize_t proc_write_driver(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+
+static struct proc_dir_entry *
+ createProcDir(char *name, struct proc_dir_entry *parent)
+{
+ struct proc_dir_entry *p = proc_mkdir_mode(name, S_IFDIR, parent);
+ if (p == NULL)
+ ERRDRV("failed to create /proc directory %s", name);
+ return p;
+}
+
+static int seq_show_driver(struct seq_file *seq, void *offset);
+static int proc_open_driver(struct inode *inode, struct file *file)
+{
+ return single_open(file, seq_show_driver, PDE_DATA(inode));
+}
+static const struct file_operations proc_fops_driver = {
+ .open = proc_open_driver,
+ .read = seq_read,
+ .write = proc_write_driver,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int seq_show_device(struct seq_file *seq, void *offset);
+static int seq_show_device_property(struct seq_file *seq, void *offset);
+static int proc_open_device(struct inode *inode, struct file *file)
+{
+ return single_open(file, seq_show_device, PDE_DATA(inode));
+}
+static const struct file_operations proc_fops_device = {
+ .open = proc_open_device,
+ .read = seq_read,
+ .write = proc_write_device,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+static int proc_open_device_property(struct inode *inode, struct file *file)
+{
+ return single_open(file, seq_show_device_property, PDE_DATA(inode));
+}
+static const struct file_operations proc_fops_device_property = {
+ .open = proc_open_device_property,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+
+void visor_easyproc_InitDriver(struct easyproc_driver_info *pdriver,
+ char *procId,
+ void (*show_driver_info)(struct seq_file *),
+ void (*show_device_info)(struct seq_file *,
+ void *))
+{
+ memset(pdriver, 0, sizeof(struct easyproc_driver_info));
+ pdriver->ProcId = procId;
+ if (pdriver->ProcId == NULL)
+ ERRDRV("ProcId cannot be NULL (trouble ahead)!");
+ pdriver->Show_driver_info = show_driver_info;
+ pdriver->Show_device_info = show_device_info;
+ if (pdriver->ProcDir == NULL)
+ pdriver->ProcDir = createProcDir(pdriver->ProcId, NULL);
+ if ((pdriver->ProcDir != NULL) && (pdriver->ProcDriverDir == NULL))
+ pdriver->ProcDriverDir = createProcDir("driver",
+ pdriver->ProcDir);
+ if ((pdriver->ProcDir != NULL) && (pdriver->ProcDeviceDir == NULL))
+ pdriver->ProcDeviceDir = createProcDir("device",
+ pdriver->ProcDir);
+ if ((pdriver->ProcDriverDir != NULL) &&
+ (pdriver->ProcDriverDiagFile == NULL)) {
+ pdriver->ProcDriverDiagFile =
+ proc_create_data("diag", 0,
+ pdriver->ProcDriverDir,
+ &proc_fops_driver, pdriver);
+ if (pdriver->ProcDriverDiagFile == NULL)
+ ERRDRV("failed to register /proc/%s/driver/diag entry",
+ pdriver->ProcId);
+ }
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_InitDriver);
+
+
+
+void visor_easyproc_InitDriverEx(struct easyproc_driver_info *pdriver,
+ char *procId,
+ void (*show_driver_info)(struct seq_file *),
+ void (*show_device_info)(struct seq_file *,
+ void *),
+ void (*write_driver_info)(char *buf,
+ size_t count,
+ loff_t *ppos),
+ void (*write_device_info)(char *buf,
+ size_t count,
+ loff_t *ppos,
+ void *p))
+{
+ visor_easyproc_InitDriver(pdriver, procId,
+ show_driver_info, show_device_info);
+ pdriver->Write_driver_info = write_driver_info;
+ pdriver->Write_device_info = write_device_info;
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_InitDriverEx);
+
+
+
+void visor_easyproc_DeInitDriver(struct easyproc_driver_info *pdriver)
+{
+ if (pdriver->ProcDriverDiagFile != NULL) {
+ remove_proc_entry("diag", pdriver->ProcDriverDir);
+ pdriver->ProcDriverDiagFile = NULL;
+ }
+ if (pdriver->ProcDriverDir != NULL) {
+ remove_proc_entry("driver", pdriver->ProcDir);
+ pdriver->ProcDriverDir = NULL;
+ }
+ if (pdriver->ProcDeviceDir != NULL) {
+ remove_proc_entry("device", pdriver->ProcDir);
+ pdriver->ProcDeviceDir = NULL;
+ }
+ if (pdriver->ProcDir != NULL) {
+ remove_proc_entry(pdriver->ProcId, NULL);
+ pdriver->ProcDir = NULL;
+ }
+ pdriver->ProcId = NULL;
+ pdriver->Show_driver_info = NULL;
+ pdriver->Show_device_info = NULL;
+ pdriver->Write_driver_info = NULL;
+ pdriver->Write_device_info = NULL;
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_DeInitDriver);
+
+
+
+void visor_easyproc_InitDevice(struct easyproc_driver_info *pdriver,
+ struct easyproc_device_info *p, int devno,
+ void *devdata)
+{
+ if ((pdriver->ProcDeviceDir != NULL) && (p->procDevicexDir == NULL)) {
+ char s[29];
+ sprintf(s, "%d", devno);
+ p->procDevicexDir = createProcDir(s, pdriver->ProcDeviceDir);
+ p->devno = devno;
+ }
+ p->devdata = devdata;
+ p->pdriver = pdriver;
+ p->devno = devno;
+ if ((p->procDevicexDir != NULL) && (p->procDevicexDiagFile == NULL)) {
+ p->procDevicexDiagFile =
+ proc_create_data("diag", 0, p->procDevicexDir,
+ &proc_fops_device, p);
+ if (p->procDevicexDiagFile == NULL)
+ ERRDEVX(devno, "failed to register /proc/%s/device/%d/diag entry",
+ pdriver->ProcId, devno
+ );
+ }
+ memset(&(p->device_property_info[0]), 0,
+ sizeof(p->device_property_info));
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_InitDevice);
+
+
+
+void visor_easyproc_CreateDeviceProperty(struct easyproc_device_info *p,
+ void (*show_property_info)
+ (struct seq_file *, void *),
+ char *property_name)
+{
+ size_t i;
+ struct easyproc_device_property_info *px = NULL;
+
+ if (p->procDevicexDir == NULL) {
+ ERRDRV("state error");
+ return;
+ }
+ for (i = 0; i < ARRAY_SIZE(p->device_property_info); i++) {
+ if (p->device_property_info[i].procEntry == NULL) {
+ px = &(p->device_property_info[i]);
+ break;
+ }
+ }
+ if (!px) {
+ ERRDEVX(p->devno, "too many device properties");
+ return;
+ }
+ px->devdata = p->devdata;
+ px->pdriver = p->pdriver;
+ px->procEntry = proc_create_data(property_name, 0, p->procDevicexDir,
+ &proc_fops_device_property, px);
+ if (strlen(property_name)+1 > sizeof(px->property_name)) {
+ ERRDEVX(p->devno, "device property name %s too long",
+ property_name);
+ return;
+ }
+ strcpy(px->property_name, property_name);
+ if (px->procEntry == NULL) {
+ ERRDEVX(p->devno, "failed to register /proc/%s/device/%d/%s entry",
+ p->pdriver->ProcId, p->devno, property_name
+ );
+ return;
+ }
+ px->show_device_property_info = show_property_info;
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_CreateDeviceProperty);
+
+
+
+void visor_easyproc_DeInitDevice(struct easyproc_driver_info *pdriver,
+ struct easyproc_device_info *p, int devno)
+{
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(p->device_property_info); i++) {
+ if (p->device_property_info[i].procEntry != NULL) {
+ struct easyproc_device_property_info *px =
+ &(p->device_property_info[i]);
+ remove_proc_entry(px->property_name, p->procDevicexDir);
+ px->procEntry = NULL;
+ }
+ }
+ if (p->procDevicexDiagFile != NULL) {
+ remove_proc_entry("diag", p->procDevicexDir);
+ p->procDevicexDiagFile = NULL;
+ }
+ if (p->procDevicexDir != NULL) {
+ char s[29];
+ sprintf(s, "%d", devno);
+ remove_proc_entry(s, pdriver->ProcDeviceDir);
+ p->procDevicexDir = NULL;
+ }
+ p->devdata = NULL;
+ p->pdriver = NULL;
+}
+EXPORT_SYMBOL_GPL(visor_easyproc_DeInitDevice);
+
+
+
+static int seq_show_driver(struct seq_file *seq, void *offset)
+{
+ struct easyproc_driver_info *p =
+ (struct easyproc_driver_info *)(seq->private);
+ if (!p)
+ return 0;
+ (*(p->Show_driver_info))(seq);
+ return 0;
+}
+
+
+
+static int seq_show_device(struct seq_file *seq, void *offset)
+{
+ struct easyproc_device_info *p =
+ (struct easyproc_device_info *)(seq->private);
+ if ((!p) || (!(p->pdriver)))
+ return 0;
+ (*(p->pdriver->Show_device_info))(seq, p->devdata);
+ return 0;
+}
+
+
+
+static int seq_show_device_property(struct seq_file *seq, void *offset)
+{
+ struct easyproc_device_property_info *p =
+ (struct easyproc_device_property_info *)(seq->private);
+ if ((!p) || (!(p->show_device_property_info)))
+ return 0;
+ (*(p->show_device_property_info))(seq, p->devdata);
+ return 0;
+}
+
+
+
+static ssize_t proc_write_driver(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct easyproc_driver_info *p = NULL;
+ char local_buf[256];
+ if (seq == NULL)
+ return 0;
+ p = (struct easyproc_driver_info *)(seq->private);
+ if ((!p) || (!(p->Write_driver_info)))
+ return 0;
+ if (count >= sizeof(local_buf))
+ return -ENOMEM;
+ if (copy_from_user(local_buf, buffer, count))
+ return -EFAULT;
+ local_buf[count] = '\0'; /* be friendly */
+ (*(p->Write_driver_info))(local_buf, count, ppos);
+ return count;
+}
+
+
+
+static ssize_t proc_write_device(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct easyproc_device_info *p = NULL;
+ char local_buf[256];
+ if (seq == NULL)
+ return 0;
+ p = (struct easyproc_device_info *)(seq->private);
+ if ((!p) || (!(p->pdriver)) || (!(p->pdriver->Write_device_info)))
+ return 0;
+ if (count >= sizeof(local_buf))
+ return -ENOMEM;
+ if (copy_from_user(local_buf, buffer, count))
+ return -EFAULT;
+ local_buf[count] = '\0'; /* be friendly */
+ (*(p->pdriver->Write_device_info))(local_buf, count, ppos, p->devdata);
+ return count;
+}
diff --git a/drivers/staging/unisys/visorutil/easyproc.h b/drivers/staging/unisys/visorutil/easyproc.h
new file mode 100644
index 000000000000..1cef1fd33d53
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/easyproc.h
@@ -0,0 +1,92 @@
+/* easyproc.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/** @file *********************************************************************
+ *
+ * This describes the interfaces necessary for a simple /proc file
+ * implementation for a driver.
+ *
+ ******************************************************************************
+ */
+
+#ifndef __EASYPROC_H__
+#define __EASYPROC_H__
+
+#include "timskmod.h"
+
+
+struct easyproc_driver_info {
+ struct proc_dir_entry *ProcDir;
+ struct proc_dir_entry *ProcDriverDir;
+ struct proc_dir_entry *ProcDriverDiagFile;
+ struct proc_dir_entry *ProcDeviceDir;
+ char *ProcId;
+ void (*Show_device_info)(struct seq_file *seq, void *p);
+ void (*Show_driver_info)(struct seq_file *seq);
+ void (*Write_device_info)(char *buf, size_t count,
+ loff_t *ppos, void *p);
+ void (*Write_driver_info)(char *buf, size_t count, loff_t *ppos);
+};
+
+/* property is a file under /proc/<x>/device/<x>/<property_name> */
+struct easyproc_device_property_info {
+ char property_name[25];
+ struct proc_dir_entry *procEntry;
+ struct easyproc_driver_info *pdriver;
+ void *devdata;
+ void (*show_device_property_info)(struct seq_file *seq, void *p);
+};
+
+struct easyproc_device_info {
+ struct proc_dir_entry *procDevicexDir;
+ struct proc_dir_entry *procDevicexDiagFile;
+ struct easyproc_driver_info *pdriver;
+ void *devdata;
+ int devno;
+ /* allow for a number of custom properties for each device: */
+ struct easyproc_device_property_info device_property_info[10];
+};
+
+void visor_easyproc_InitDevice(struct easyproc_driver_info *pdriver,
+ struct easyproc_device_info *p, int devno,
+ void *devdata);
+void visor_easyproc_DeInitDevice(struct easyproc_driver_info *pdriver,
+ struct easyproc_device_info *p, int devno);
+void visor_easyproc_InitDriver(struct easyproc_driver_info *pdriver,
+ char *procId,
+ void (*show_driver_info)(struct seq_file *),
+ void (*show_device_info)(struct seq_file *,
+ void *));
+void visor_easyproc_InitDriverEx(struct easyproc_driver_info *pdriver,
+ char *procId,
+ void (*show_driver_info)(struct seq_file *),
+ void (*show_device_info)(struct seq_file *,
+ void *),
+ void (*Write_driver_info)(char *buf,
+ size_t count,
+ loff_t *ppos),
+ void (*Write_device_info)(char *buf,
+ size_t count,
+ loff_t *ppos,
+ void *p));
+void visor_easyproc_DeInitDriver(struct easyproc_driver_info *pdriver);
+void visor_easyproc_CreateDeviceProperty(struct easyproc_device_info *p,
+ void (*show_property_info)
+ (struct seq_file *, void *),
+ char *property_name);
+
+#endif
diff --git a/drivers/staging/unisys/visorutil/memregion.h b/drivers/staging/unisys/visorutil/memregion.h
new file mode 100644
index 000000000000..bb122dbeafbc
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/memregion.h
@@ -0,0 +1,43 @@
+/* memregion.h
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#ifndef __MEMREGION_H__
+#define __MEMREGION_H__
+
+#include "timskmod.h"
+
+/* MEMREGION is an opaque structure to users.
+ * Fields are declared only in the implementation .c files.
+ */
+typedef struct MEMREGION_Tag MEMREGION;
+
+MEMREGION *visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes);
+MEMREGION *visor_memregion_create_overlapped(MEMREGION *parent,
+ ulong offset, ulong nbytes);
+int visor_memregion_resize(MEMREGION *memregion, ulong newsize);
+int visor_memregion_read(MEMREGION *memregion,
+ ulong offset, void *dest, ulong nbytes);
+int visor_memregion_write(MEMREGION *memregion,
+ ulong offset, void *src, ulong nbytes);
+void visor_memregion_destroy(MEMREGION *memregion);
+HOSTADDRESS visor_memregion_get_physaddr(MEMREGION *memregion);
+ulong visor_memregion_get_nbytes(MEMREGION *memregion);
+void memregion_dump(MEMREGION *memregion, char *s,
+ ulong off, ulong len, struct seq_file *seq);
+void *visor_memregion_get_pointer(MEMREGION *memregion);
+
+#endif
diff --git a/drivers/staging/unisys/visorutil/memregion_direct.c b/drivers/staging/unisys/visorutil/memregion_direct.c
new file mode 100644
index 000000000000..2c1061d55ee9
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/memregion_direct.c
@@ -0,0 +1,223 @@
+/* memregion_direct.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * This is an implementation of memory regions that can be used to read/write
+ * channel memory (in main memory of the host system) from code running in
+ * a virtual partition.
+ */
+#include "uniklog.h"
+#include "timskmod.h"
+#include "memregion.h"
+
+#define MYDRVNAME "memregion"
+
+struct MEMREGION_Tag {
+ HOSTADDRESS physaddr;
+ ulong nbytes;
+ void *mapped;
+ BOOL requested;
+ BOOL overlapped;
+};
+
+static BOOL mapit(MEMREGION *memregion);
+static void unmapit(MEMREGION *memregion);
+
+MEMREGION *
+visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes)
+{
+ MEMREGION *rc = NULL;
+ MEMREGION *memregion = kzalloc(sizeof(MEMREGION),
+ GFP_KERNEL | __GFP_NORETRY);
+ if (memregion == NULL) {
+ ERRDRV("visor_memregion_create allocation failed");
+ return NULL;
+ }
+ memregion->physaddr = physaddr;
+ memregion->nbytes = nbytes;
+ memregion->overlapped = FALSE;
+ if (!mapit(memregion)) {
+ rc = NULL;
+ goto Away;
+ }
+ rc = memregion;
+Away:
+ if (rc == NULL) {
+ if (memregion != NULL) {
+ visor_memregion_destroy(memregion);
+ memregion = NULL;
+ }
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_create);
+
+MEMREGION *
+visor_memregion_create_overlapped(MEMREGION *parent, ulong offset, ulong nbytes)
+{
+ MEMREGION *memregion = NULL;
+
+ if (parent == NULL) {
+ ERRDRV("%s parent is NULL", __func__);
+ return NULL;
+ }
+ if (parent->mapped == NULL) {
+ ERRDRV("%s parent is not mapped!", __func__);
+ return NULL;
+ }
+ if ((offset >= parent->nbytes) ||
+ ((offset + nbytes) >= parent->nbytes)) {
+ ERRDRV("%s range (%lu,%lu) out of parent range",
+ __func__, offset, nbytes);
+ return NULL;
+ }
+ memregion = kzalloc(sizeof(MEMREGION), GFP_KERNEL|__GFP_NORETRY);
+ if (memregion == NULL) {
+ ERRDRV("%s allocation failed", __func__);
+ return NULL;
+ }
+
+ memregion->physaddr = parent->physaddr + offset;
+ memregion->nbytes = nbytes;
+ memregion->mapped = ((u8 *) (parent->mapped)) + offset;
+ memregion->requested = FALSE;
+ memregion->overlapped = TRUE;
+ return memregion;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_create_overlapped);
+
+
+static BOOL
+mapit(MEMREGION *memregion)
+{
+ ulong physaddr = (ulong) (memregion->physaddr);
+ ulong nbytes = memregion->nbytes;
+
+ memregion->requested = FALSE;
+ if (!request_mem_region(physaddr, nbytes, MYDRVNAME))
+ ERRDRV("cannot reserve channel memory @0x%lx for 0x%lx-- no big deal", physaddr, nbytes);
+ else
+ memregion->requested = TRUE;
+ memregion->mapped = ioremap_cache(physaddr, nbytes);
+ if (memregion->mapped == NULL) {
+ ERRDRV("cannot ioremap_cache channel memory @0x%lx for 0x%lx",
+ physaddr, nbytes);
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static void
+unmapit(MEMREGION *memregion)
+{
+ if (memregion->mapped != NULL) {
+ iounmap(memregion->mapped);
+ memregion->mapped = NULL;
+ }
+ if (memregion->requested) {
+ release_mem_region((ulong) (memregion->physaddr),
+ memregion->nbytes);
+ memregion->requested = FALSE;
+ }
+}
+
+HOSTADDRESS
+visor_memregion_get_physaddr(MEMREGION *memregion)
+{
+ return memregion->physaddr;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_get_physaddr);
+
+ulong
+visor_memregion_get_nbytes(MEMREGION *memregion)
+{
+ return memregion->nbytes;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_get_nbytes);
+
+void *
+visor_memregion_get_pointer(MEMREGION *memregion)
+{
+ return memregion->mapped;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_get_pointer);
+
+int
+visor_memregion_resize(MEMREGION *memregion, ulong newsize)
+{
+ if (newsize == memregion->nbytes)
+ return 0;
+ if (memregion->overlapped)
+ /* no error check here - we no longer know the
+ * parent's range!
+ */
+ memregion->nbytes = newsize;
+ else {
+ unmapit(memregion);
+ memregion->nbytes = newsize;
+ if (!mapit(memregion))
+ return -1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(visor_memregion_resize);
+
+
+static int
+memregion_readwrite(BOOL is_write,
+ MEMREGION *memregion, ulong offset,
+ void *local, ulong nbytes)
+{
+ if (offset + nbytes > memregion->nbytes) {
+ ERRDRV("memregion_readwrite offset out of range!!");
+ return -EFAULT;
+ }
+ if (is_write)
+ memcpy_toio(memregion->mapped + offset, local, nbytes);
+ else
+ memcpy_fromio(local, memregion->mapped + offset, nbytes);
+
+ return 0;
+}
+
+int
+visor_memregion_read(MEMREGION *memregion, ulong offset, void *dest,
+ ulong nbytes)
+{
+ return memregion_readwrite(FALSE, memregion, offset, dest, nbytes);
+}
+EXPORT_SYMBOL_GPL(visor_memregion_read);
+
+int
+visor_memregion_write(MEMREGION *memregion, ulong offset, void *src,
+ ulong nbytes)
+{
+ return memregion_readwrite(TRUE, memregion, offset, src, nbytes);
+}
+EXPORT_SYMBOL_GPL(visor_memregion_write);
+
+void
+visor_memregion_destroy(MEMREGION *memregion)
+{
+ if (memregion == NULL)
+ return;
+ if (!memregion->overlapped)
+ unmapit(memregion);
+ kfree(memregion);
+}
+EXPORT_SYMBOL_GPL(visor_memregion_destroy);
+
diff --git a/drivers/staging/unisys/visorutil/periodic_work.c b/drivers/staging/unisys/visorutil/periodic_work.c
new file mode 100644
index 000000000000..0670a3174682
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/periodic_work.c
@@ -0,0 +1,236 @@
+/* periodic_work.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/*
+ * Helper functions to schedule periodic work in Linux kernel mode.
+ */
+
+#include "uniklog.h"
+#include "timskmod.h"
+#include "periodic_work.h"
+
+#define MYDRVNAME "periodic_work"
+
+
+
+struct PERIODIC_WORK_Tag {
+ rwlock_t lock;
+ struct delayed_work work;
+ void (*workfunc)(void *);
+ void *workfuncarg;
+ BOOL is_scheduled;
+ BOOL want_to_stop;
+ ulong jiffy_interval;
+ struct workqueue_struct *workqueue;
+ const char *devnam;
+};
+
+
+
+static void periodic_work_func(struct work_struct *work)
+{
+ PERIODIC_WORK *periodic_work =
+ container_of(work, struct PERIODIC_WORK_Tag, work.work);
+ (*periodic_work->workfunc)(periodic_work->workfuncarg);
+}
+
+
+
+PERIODIC_WORK *visor_periodic_work_create(ulong jiffy_interval,
+ struct workqueue_struct *workqueue,
+ void (*workfunc)(void *),
+ void *workfuncarg,
+ const char *devnam)
+{
+ PERIODIC_WORK *periodic_work = kzalloc(sizeof(PERIODIC_WORK),
+ GFP_KERNEL | __GFP_NORETRY);
+ if (periodic_work == NULL) {
+ ERRDRV("periodic_work allocation failed ");
+ return NULL;
+ }
+ rwlock_init(&periodic_work->lock);
+ periodic_work->jiffy_interval = jiffy_interval;
+ periodic_work->workqueue = workqueue;
+ periodic_work->workfunc = workfunc;
+ periodic_work->workfuncarg = workfuncarg;
+ periodic_work->devnam = devnam;
+ return periodic_work;
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_create);
+
+
+
+void visor_periodic_work_destroy(PERIODIC_WORK *periodic_work)
+{
+ if (periodic_work == NULL)
+ return;
+ kfree(periodic_work);
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_destroy);
+
+
+
+/** Call this from your periodic work worker function to schedule the next
+ * call.
+ * If this function returns FALSE, there was a failure and the
+ * periodic work is no longer scheduled
+ */
+BOOL visor_periodic_work_nextperiod(PERIODIC_WORK *periodic_work)
+{
+ BOOL rc = FALSE;
+ write_lock(&periodic_work->lock);
+ if (periodic_work->want_to_stop) {
+ periodic_work->is_scheduled = FALSE;
+ periodic_work->want_to_stop = FALSE;
+ rc = TRUE; /* yes, TRUE; see visor_periodic_work_stop() */
+ goto Away;
+ } else if (queue_delayed_work(periodic_work->workqueue,
+ &periodic_work->work,
+ periodic_work->jiffy_interval) < 0) {
+ ERRDEV(periodic_work->devnam, "queue_delayed_work failed!");
+ periodic_work->is_scheduled = FALSE;
+ rc = FALSE;
+ goto Away;
+ }
+ rc = TRUE;
+Away:
+ write_unlock(&periodic_work->lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_nextperiod);
+
+
+
+/** This function returns TRUE iff new periodic work was actually started.
+ * If this function returns FALSE, then no work was started
+ * (either because it was already started, or because of a failure).
+ */
+BOOL visor_periodic_work_start(PERIODIC_WORK *periodic_work)
+{
+ BOOL rc = FALSE;
+
+ write_lock(&periodic_work->lock);
+ if (periodic_work->is_scheduled) {
+ rc = FALSE;
+ goto Away;
+ }
+ if (periodic_work->want_to_stop) {
+ ERRDEV(periodic_work->devnam,
+ "dev_start_periodic_work failed!");
+ rc = FALSE;
+ goto Away;
+ }
+ INIT_DELAYED_WORK(&periodic_work->work, &periodic_work_func);
+ if (queue_delayed_work(periodic_work->workqueue,
+ &periodic_work->work,
+ periodic_work->jiffy_interval) < 0) {
+ ERRDEV(periodic_work->devnam,
+ "%s queue_delayed_work failed!", __func__);
+ rc = FALSE;
+ goto Away;
+ }
+ periodic_work->is_scheduled = TRUE;
+ rc = TRUE;
+Away:
+ write_unlock(&periodic_work->lock);
+ return rc;
+
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_start);
+
+
+
+
+/** This function returns TRUE iff your call actually stopped the periodic
+ * work.
+ *
+ * -- PAY ATTENTION... this is important --
+ *
+ * NO NO #1
+ *
+ * Do NOT call this function from some function that is running on the
+ * same workqueue as the work you are trying to stop might be running
+ * on! If you violate this rule, visor_periodic_work_stop() MIGHT work,
+ * but it also MIGHT get hung up in an infinite loop saying
+ * "waiting for delayed work...". This will happen if the delayed work
+ * you are trying to cancel has been put in the workqueue list, but can't
+ * run yet because we are running that same workqueue thread right now.
+ *
+ * Bottom line: If you need to call visor_periodic_work_stop() from a
+ * workitem, be sure the workitem is on a DIFFERENT workqueue than the
+ * workitem that you are trying to cancel.
+ *
+ * If I could figure out some way to check for this "no no" condition in
+ * the code, I would. It would have saved me the trouble of writing this
+ * long comment. And also, don't think this is some "theoretical" race
+ * condition. It is REAL, as I have spent the day chasing it.
+ *
+ * NO NO #2
+ *
+ * Take close note of the locks that you own when you call this function.
+ * You must NOT own any locks that are needed by the periodic work
+ * function that is currently installed. If you DO, a deadlock may result,
+ * because stopping the periodic work often involves waiting for the last
+ * iteration of the periodic work function to complete. Again, if you hit
+ * this deadlock, you will get hung up in an infinite loop saying
+ * "waiting for delayed work...".
+ */
+BOOL visor_periodic_work_stop(PERIODIC_WORK *periodic_work)
+{
+ BOOL stopped_something = FALSE;
+
+ write_lock(&periodic_work->lock);
+ stopped_something = periodic_work->is_scheduled &&
+ (!periodic_work->want_to_stop);
+ while (periodic_work->is_scheduled) {
+ periodic_work->want_to_stop = TRUE;
+ if (cancel_delayed_work(&periodic_work->work)) {
+ /* We get here if the delayed work was pending as
+ * delayed work, but was NOT run.
+ */
+ ASSERT(periodic_work->is_scheduled);
+ periodic_work->is_scheduled = FALSE;
+ } else {
+ /* If we get here, either the delayed work:
+ * - was run, OR,
+ * - is running RIGHT NOW on another processor, OR,
+ * - wasn't even scheduled (there is a miniscule
+ * timing window where this could be the case)
+ * flush_workqueue() would make sure it is finished
+ * executing, but that still isn't very useful, which
+ * explains the loop...
+ */
+ }
+ if (periodic_work->is_scheduled) {
+ write_unlock(&periodic_work->lock);
+ WARNDEV(periodic_work->devnam,
+ "waiting for delayed work...");
+ /* We rely on the delayed work function running here,
+ * and eventually calling
+ * visor_periodic_work_nextperiod(),
+ * which will see that want_to_stop is set, and
+ * subsequently clear is_scheduled.
+ */
+ SLEEPJIFFIES(10);
+ write_lock(&periodic_work->lock);
+ } else
+ periodic_work->want_to_stop = FALSE;
+ }
+ write_unlock(&periodic_work->lock);
+ return stopped_something;
+}
+EXPORT_SYMBOL_GPL(visor_periodic_work_stop);
diff --git a/drivers/staging/unisys/visorutil/procobjecttree.c b/drivers/staging/unisys/visorutil/procobjecttree.c
new file mode 100644
index 000000000000..67a19e1c7b02
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/procobjecttree.c
@@ -0,0 +1,348 @@
+/* procobjecttree.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include "procobjecttree.h"
+
+#define MYDRVNAME "procobjecttree"
+
+
+
+/** This is context info that we stash in each /proc file entry, which we
+ * need in order to call the callback function that supplies the /proc read
+ * info for that file.
+ */
+typedef struct {
+ void (*show_property)(struct seq_file *, void *, int);
+ MYPROCOBJECT *procObject;
+ int propertyIndex;
+
+} PROCDIRENTRYCONTEXT;
+
+/** This describes the attributes of a tree rooted at
+ * <procDirRoot>/<name[0]>/<name[1]>/...
+ * Properties for each object of this type will be located under
+ * <procDirRoot>/<name[0]>/<name[1]>/.../<objectName>/<propertyName>.
+ */
+struct MYPROCTYPE_Tag {
+ const char **name; /**< node names for this type, ending with NULL */
+ int nNames; /**< num of node names in <name> */
+
+ /** root dir for this type tree in /proc */
+ struct proc_dir_entry *procDirRoot;
+
+ struct proc_dir_entry **procDirs; /**< for each node in <name> */
+
+ /** bottom dir where objects will be rooted; i.e., this is
+ * <procDirRoot>/<name[0]>/<name[1]>/.../, which is the same as the
+ * last entry in the <procDirs> array. */
+ struct proc_dir_entry *procDir;
+
+ /** name for each property that objects of this type can have */
+ const char **propertyNames;
+
+ int nProperties; /**< num of names in <propertyNames> */
+
+ /** Call this, passing MYPROCOBJECT.context and the property index
+ * whenever someone reads the proc entry */
+ void (*show_property)(struct seq_file *, void *, int);
+};
+
+
+
+struct MYPROCOBJECT_Tag {
+ MYPROCTYPE *type;
+
+ /** This is the name of the dir node in /proc under which the
+ * properties of this object will appear as files. */
+ char *name;
+
+ int namesize; /**< number of bytes allocated for name */
+ void *context; /**< passed to MYPROCTYPE.show_property */
+
+ /** <type.procDirRoot>/<type.name[0]>/<type.name[1]>/.../<name> */
+ struct proc_dir_entry *procDir;
+
+ /** a proc dir entry for each of the properties of the object;
+ * properties are identified in MYPROCTYPE.propertyNames, so each of
+ * the <procDirProperties> describes a single file like
+ * <type.procDirRoot>/<type.name[0]>/<type.name[1]>/...
+ * /<name>/<propertyName>
+ */
+ struct proc_dir_entry **procDirProperties;
+
+ /** this is a holding area for the context information that is needed
+ * to run the /proc callback function */
+ PROCDIRENTRYCONTEXT *procDirPropertyContexts;
+};
+
+
+
+static struct proc_dir_entry *
+createProcDir(const char *name, struct proc_dir_entry *parent)
+{
+ struct proc_dir_entry *p = proc_mkdir_mode(name, S_IFDIR, parent);
+ if (p == NULL)
+ ERRDRV("failed to create /proc directory %s", name);
+ return p;
+}
+
+static struct proc_dir_entry *
+createProcFile(const char *name, struct proc_dir_entry *parent,
+ const struct file_operations *fops, void *data)
+{
+ struct proc_dir_entry *p = proc_create_data(name, 0, parent,
+ fops, data);
+ if (p == NULL)
+ ERRDRV("failed to create /proc file %s", name);
+ return p;
+}
+
+static int seq_show(struct seq_file *seq, void *offset);
+static int proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, seq_show, PDE_DATA(inode));
+}
+
+static const struct file_operations proc_fops = {
+ .open = proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+
+MYPROCTYPE *visor_proc_CreateType(struct proc_dir_entry *procDirRoot,
+ const char **name,
+ const char **propertyNames,
+ void (*show_property)(struct seq_file *,
+ void *, int))
+{
+ int i = 0;
+ MYPROCTYPE *rc = NULL, *type = NULL;
+ struct proc_dir_entry *parent = NULL;
+
+ if (procDirRoot == NULL) {
+ ERRDRV("procDirRoot cannot be NULL!\n");
+ goto Away;
+ }
+ if (name == NULL || name[0] == NULL) {
+ ERRDRV("name must contain at least 1 node name!\n");
+ goto Away;
+ }
+ type = kzalloc(sizeof(MYPROCTYPE), GFP_KERNEL | __GFP_NORETRY);
+ if (type == NULL) {
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ type->name = name;
+ type->propertyNames = propertyNames;
+ type->nProperties = 0;
+ type->nNames = 0;
+ type->show_property = show_property;
+ type->procDirRoot = procDirRoot;
+ if (type->propertyNames != NULL)
+ while (type->propertyNames[type->nProperties] != NULL)
+ type->nProperties++;
+ while (type->name[type->nNames] != NULL)
+ type->nNames++;
+ type->procDirs = kzalloc((type->nNames + 1) *
+ sizeof(struct proc_dir_entry *),
+ GFP_KERNEL | __GFP_NORETRY);
+ if (type->procDirs == NULL) {
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ parent = procDirRoot;
+ for (i = 0; i < type->nNames; i++) {
+ type->procDirs[i] = createProcDir(type->name[i], parent);
+ if (type->procDirs[i] == NULL) {
+ rc = NULL;
+ goto Away;
+ }
+ parent = type->procDirs[i];
+ }
+ type->procDir = type->procDirs[type->nNames-1];
+ rc = type;
+Away:
+ if (rc == NULL) {
+ if (type != NULL) {
+ visor_proc_DestroyType(type);
+ type = NULL;
+ }
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_proc_CreateType);
+
+
+
+void visor_proc_DestroyType(MYPROCTYPE *type)
+{
+ if (type == NULL)
+ return;
+ if (type->procDirs != NULL) {
+ int i = type->nNames-1;
+ while (i >= 0) {
+ if (type->procDirs[i] != NULL) {
+ struct proc_dir_entry *parent = NULL;
+ if (i == 0)
+ parent = type->procDirRoot;
+ else
+ parent = type->procDirs[i-1];
+ remove_proc_entry(type->name[i], parent);
+ }
+ i--;
+ }
+ kfree(type->procDirs);
+ type->procDirs = NULL;
+ }
+ kfree(type);
+}
+EXPORT_SYMBOL_GPL(visor_proc_DestroyType);
+
+
+
+MYPROCOBJECT *visor_proc_CreateObject(MYPROCTYPE *type,
+ const char *name, void *context)
+{
+ MYPROCOBJECT *obj = NULL, *rc = NULL;
+ int i = 0;
+
+ if (type == NULL) {
+ ERRDRV("type cannot be NULL\n");
+ goto Away;
+ }
+ obj = kzalloc(sizeof(MYPROCOBJECT), GFP_KERNEL | __GFP_NORETRY);
+ if (obj == NULL) {
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ obj->type = type;
+ obj->context = context;
+ if (name == NULL) {
+ obj->name = NULL;
+ obj->procDir = type->procDir;
+ } else {
+ obj->namesize = strlen(name)+1;
+ obj->name = kmalloc(obj->namesize, GFP_KERNEL | __GFP_NORETRY);
+ if (obj->name == NULL) {
+ obj->namesize = 0;
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ strcpy(obj->name, name);
+ obj->procDir = createProcDir(obj->name, type->procDir);
+ if (obj->procDir == NULL) {
+ goto Away;
+ }
+ }
+ obj->procDirPropertyContexts =
+ kzalloc((type->nProperties + 1) * sizeof(PROCDIRENTRYCONTEXT),
+ GFP_KERNEL | __GFP_NORETRY);
+ if (obj->procDirPropertyContexts == NULL) {
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ obj->procDirProperties =
+ kzalloc((type->nProperties + 1) * sizeof(struct proc_dir_entry *),
+ GFP_KERNEL | __GFP_NORETRY);
+ if (obj->procDirProperties == NULL) {
+ ERRDRV("out of memory\n");
+ goto Away;
+ }
+ for (i = 0; i < type->nProperties; i++) {
+ obj->procDirPropertyContexts[i].procObject = obj;
+ obj->procDirPropertyContexts[i].propertyIndex = i;
+ obj->procDirPropertyContexts[i].show_property =
+ type->show_property;
+ if (type->propertyNames[i][0] != '\0') {
+ /* only create properties that have names */
+ obj->procDirProperties[i] =
+ createProcFile(type->propertyNames[i],
+ obj->procDir, &proc_fops,
+ &obj->procDirPropertyContexts[i]);
+ if (obj->procDirProperties[i] == NULL) {
+ rc = NULL;
+ goto Away;
+ }
+ }
+ }
+ rc = obj;
+Away:
+ if (rc == NULL) {
+ if (obj != NULL) {
+ visor_proc_DestroyObject(obj);
+ obj = NULL;
+ }
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_proc_CreateObject);
+
+
+
+void visor_proc_DestroyObject(MYPROCOBJECT *obj)
+{
+ MYPROCTYPE *type = NULL;
+ if (obj == NULL)
+ return;
+ type = obj->type;
+ if (type == NULL)
+ return;
+ if (obj->procDirProperties != NULL) {
+ int i = 0;
+ for (i = 0; i < type->nProperties; i++) {
+ if (obj->procDirProperties[i] != NULL) {
+ remove_proc_entry(type->propertyNames[i],
+ obj->procDir);
+ obj->procDirProperties[i] = NULL;
+ }
+ }
+ kfree(obj->procDirProperties);
+ obj->procDirProperties = NULL;
+ }
+ if (obj->procDirPropertyContexts != NULL) {
+ kfree(obj->procDirPropertyContexts);
+ obj->procDirPropertyContexts = NULL;
+ }
+ if (obj->procDir != NULL) {
+ if (obj->name != NULL)
+ remove_proc_entry(obj->name, type->procDir);
+ obj->procDir = NULL;
+ }
+ if (obj->name != NULL) {
+ kfree(obj->name);
+ obj->name = NULL;
+ }
+ kfree(obj);
+}
+EXPORT_SYMBOL_GPL(visor_proc_DestroyObject);
+
+
+
+static int seq_show(struct seq_file *seq, void *offset)
+{
+ PROCDIRENTRYCONTEXT *ctx = (PROCDIRENTRYCONTEXT *)(seq->private);
+ if (ctx == NULL) {
+ ERRDRV("I don't have a freakin' clue...");
+ return 0;
+ }
+ (*ctx->show_property)(seq, ctx->procObject->context,
+ ctx->propertyIndex);
+ return 0;
+}
diff --git a/drivers/staging/unisys/visorutil/visorkmodutils.c b/drivers/staging/unisys/visorutil/visorkmodutils.c
new file mode 100644
index 000000000000..a7d1e94ca3c3
--- /dev/null
+++ b/drivers/staging/unisys/visorutil/visorkmodutils.c
@@ -0,0 +1,71 @@
+/* timskmodutils.c
+ *
+ * Copyright © 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include "uniklog.h"
+#include "timskmod.h"
+
+#define MYDRVNAME "timskmodutils"
+
+/** Callers to interfaces that set __GFP_NORETRY flag below
+ * must check for a NULL (error) result as we are telling the
+ * kernel interface that it is okay to fail.
+ */
+
+void *kmalloc_kernel(size_t siz)
+{
+ return kmalloc(siz, GFP_KERNEL | __GFP_NORETRY);
+}
+
+/* Use these handy-dandy seq_file_xxx functions if you want to call some
+ * functions that write stuff into a seq_file, but you actually just want
+ * to dump that output into a buffer. Use them as follows:
+ * - call visor_seq_file_new_buffer to create the seq_file (you supply the buf)
+ * - call whatever functions you want that take a seq_file as an argument
+ * (the buf you supplied will get the output data)
+ * - call visor_seq_file_done_buffer to dispose of your seq_file
+ */
+struct seq_file *visor_seq_file_new_buffer(void *buf, size_t buf_size)
+{
+ struct seq_file *rc = NULL;
+ struct seq_file *m = kmalloc_kernel(sizeof(struct seq_file));
+
+ if (m == NULL) {
+ rc = NULL;
+ goto Away;
+ }
+ memset(m, 0, sizeof(struct seq_file));
+ m->buf = buf;
+ m->size = buf_size;
+ rc = m;
+Away:
+ if (rc == NULL) {
+ visor_seq_file_done_buffer(m);
+ m = NULL;
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(visor_seq_file_new_buffer);
+
+
+
+void visor_seq_file_done_buffer(struct seq_file *m)
+{
+ if (!m)
+ return;
+ kfree(m);
+}
+EXPORT_SYMBOL_GPL(visor_seq_file_done_buffer);
diff --git a/drivers/staging/usbip/Kconfig b/drivers/staging/usbip/Kconfig
index 886000980474..bd99e9e47e50 100644
--- a/drivers/staging/usbip/Kconfig
+++ b/drivers/staging/usbip/Kconfig
@@ -1,7 +1,6 @@
config USBIP_CORE
tristate "USB/IP support"
depends on USB && NET
- default N
---help---
This enables pushing USB packets over IP to allow remote
machines direct access to USB devices. It provides the
@@ -18,7 +17,6 @@ config USBIP_CORE
config USBIP_VHCI_HCD
tristate "VHCI hcd"
depends on USBIP_CORE
- default N
---help---
This enables the USB/IP virtual host controller driver,
which is run on the remote machine.
@@ -29,7 +27,6 @@ config USBIP_VHCI_HCD
config USBIP_HOST
tristate "Host driver"
depends on USBIP_CORE
- default N
---help---
This enables the USB/IP host driver, which is run on the
machine that is sharing the USB devices.
@@ -40,6 +37,5 @@ config USBIP_HOST
config USBIP_DEBUG
bool "Debug messages for USB/IP"
depends on USBIP_CORE
- default N
---help---
This enables the debug messages from the USB/IP drivers.
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index a73e437ec215..266e2b0ce9a8 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -86,6 +86,7 @@ struct bus_id_priv {
char status;
int interf_count;
struct stub_device *sdev;
+ struct usb_device *udev;
char shutdown_busid;
};
@@ -93,7 +94,7 @@ struct bus_id_priv {
extern struct kmem_cache *stub_priv_cache;
/* stub_dev.c */
-extern struct usb_driver stub_driver;
+extern struct usb_device_driver stub_driver;
/* stub_main.c */
struct bus_id_priv *get_busid_priv(const char *busid);
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 76a1ff0e6275..773d8ca07a00 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -87,13 +87,16 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
int sockfd = 0;
struct socket *socket;
ssize_t err = -EINVAL;
+ int rv;
if (!sdev) {
dev_err(dev, "sdev is null\n");
return -ENODEV;
}
- sscanf(buf, "%d", &sockfd);
+ rv = sscanf(buf, "%d", &sockfd);
+ if (rv != 1)
+ return -EINVAL;
if (sockfd != -1) {
dev_info(dev, "stub up\n");
@@ -279,21 +282,19 @@ static void stub_device_unusable(struct usbip_device *ud)
*
* Allocates and initializes a new stub_device struct.
*/
-static struct stub_device *stub_device_alloc(struct usb_device *udev,
- struct usb_interface *interface)
+static struct stub_device *stub_device_alloc(struct usb_device *udev)
{
struct stub_device *sdev;
- int busnum = interface_to_busnum(interface);
- int devnum = interface_to_devnum(interface);
+ int busnum = udev->bus->busnum;
+ int devnum = udev->devnum;
- dev_dbg(&interface->dev, "allocating stub device");
+ dev_dbg(&udev->dev, "allocating stub device");
/* yes, it's a new device */
sdev = kzalloc(sizeof(struct stub_device), GFP_KERNEL);
if (!sdev)
return NULL;
- sdev->interface = usb_get_intf(interface);
sdev->udev = usb_get_dev(udev);
/*
@@ -322,7 +323,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev,
usbip_start_eh(&sdev->ud);
- dev_dbg(&interface->dev, "register new interface\n");
+ dev_dbg(&udev->dev, "register new device\n");
return sdev;
}
@@ -332,32 +333,21 @@ static void stub_device_free(struct stub_device *sdev)
kfree(sdev);
}
-/*
- * If a usb device has multiple active interfaces, this driver is bound to all
- * the active interfaces. However, usbip exports *a* usb device (i.e., not *an*
- * active interface). Currently, a userland program must ensure that it
- * looks at the usbip's sysfs entries of only the first active interface.
- *
- * TODO: use "struct usb_device_driver" to bind a usb device.
- * However, it seems it is not fully supported in mainline kernel yet
- * (2.6.19.2).
- */
-static int stub_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
+static int stub_probe(struct usb_device *udev)
{
- struct usb_device *udev = interface_to_usbdev(interface);
struct stub_device *sdev = NULL;
- const char *udev_busid = dev_name(interface->dev.parent);
+ const char *udev_busid = dev_name(&udev->dev);
int err = 0;
struct bus_id_priv *busid_priv;
+ int rc;
- dev_dbg(&interface->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter\n");
/* check we should claim or not by busid_table */
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) ||
(busid_priv->status == STUB_BUSID_OTHER)) {
- dev_info(&interface->dev,
+ dev_info(&udev->dev,
"%s is not in match_busid table... skip!\n",
udev_busid);
@@ -383,60 +373,41 @@ static int stub_probe(struct usb_interface *interface,
return -ENODEV;
}
- if (busid_priv->status == STUB_BUSID_ALLOC) {
- sdev = busid_priv->sdev;
- if (!sdev)
- return -ENODEV;
-
- busid_priv->interf_count++;
- dev_info(&interface->dev,
- "usbip-host: register new interface (bus %u dev %u ifn %u)\n",
- udev->bus->busnum, udev->devnum,
- interface->cur_altsetting->desc.bInterfaceNumber);
-
- /* set private data to usb_interface */
- usb_set_intfdata(interface, sdev);
-
- err = stub_add_files(&interface->dev);
- if (err) {
- dev_err(&interface->dev, "stub_add_files for %s\n",
- udev_busid);
- usb_set_intfdata(interface, NULL);
- busid_priv->interf_count--;
- return err;
- }
-
- usb_get_intf(interface);
- return 0;
- }
-
/* ok, this is my device */
- sdev = stub_device_alloc(udev, interface);
+ sdev = stub_device_alloc(udev);
if (!sdev)
return -ENOMEM;
- dev_info(&interface->dev,
- "usbip-host: register new device (bus %u dev %u ifn %u)\n",
- udev->bus->busnum, udev->devnum,
- interface->cur_altsetting->desc.bInterfaceNumber);
+ dev_info(&udev->dev,
+ "usbip-host: register new device (bus %u dev %u)\n",
+ udev->bus->busnum, udev->devnum);
- busid_priv->interf_count = 0;
busid_priv->shutdown_busid = 0;
- /* set private data to usb_interface */
- usb_set_intfdata(interface, sdev);
- busid_priv->interf_count++;
+ /* set private data to usb_device */
+ dev_set_drvdata(&udev->dev, sdev);
busid_priv->sdev = sdev;
+ busid_priv->udev = udev;
+
+ /*
+ * Claim this hub port.
+ * It doesn't matter what value we pass as owner
+ * (struct dev_state) as long as it is unique.
+ */
+ rc = usb_hub_claim_port(udev->parent, udev->portnum,
+ (struct usb_dev_state *) udev);
+ if (rc) {
+ dev_dbg(&udev->dev, "unable to claim port\n");
+ return rc;
+ }
- err = stub_add_files(&interface->dev);
+ err = stub_add_files(&udev->dev);
if (err) {
- dev_err(&interface->dev, "stub_add_files for %s\n", udev_busid);
- usb_set_intfdata(interface, NULL);
- usb_put_intf(interface);
+ dev_err(&udev->dev, "stub_add_files for %s\n", udev_busid);
+ dev_set_drvdata(&udev->dev, NULL);
usb_put_dev(udev);
kthread_stop_put(sdev->ud.eh);
- busid_priv->interf_count = 0;
busid_priv->sdev = NULL;
stub_device_free(sdev);
return err;
@@ -461,13 +432,14 @@ static void shutdown_busid(struct bus_id_priv *busid_priv)
* called in usb_disconnect() or usb_deregister()
* but only if actconfig(active configuration) exists
*/
-static void stub_disconnect(struct usb_interface *interface)
+static void stub_disconnect(struct usb_device *udev)
{
struct stub_device *sdev;
- const char *udev_busid = dev_name(interface->dev.parent);
+ const char *udev_busid = dev_name(&udev->dev);
struct bus_id_priv *busid_priv;
+ int rc;
- dev_dbg(&interface->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter\n");
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv) {
@@ -475,41 +447,37 @@ static void stub_disconnect(struct usb_interface *interface)
return;
}
- sdev = usb_get_intfdata(interface);
+ sdev = dev_get_drvdata(&udev->dev);
/* get stub_device */
if (!sdev) {
- dev_err(&interface->dev, "could not get device");
+ dev_err(&udev->dev, "could not get device");
return;
}
- usb_set_intfdata(interface, NULL);
+ dev_set_drvdata(&udev->dev, NULL);
/*
* NOTE: rx/tx threads are invoked for each usb_device.
*/
- stub_remove_files(&interface->dev);
+ stub_remove_files(&udev->dev);
- /* If usb reset is called from event handler */
- if (busid_priv->sdev->ud.eh == current) {
- busid_priv->interf_count--;
+ /* release port */
+ rc = usb_hub_release_port(udev->parent, udev->portnum,
+ (struct usb_dev_state *) udev);
+ if (rc) {
+ dev_dbg(&udev->dev, "unable to release port\n");
return;
}
- if (busid_priv->interf_count > 1) {
- busid_priv->interf_count--;
- shutdown_busid(busid_priv);
- usb_put_intf(interface);
+ /* If usb reset is called from event handler */
+ if (busid_priv->sdev->ud.eh == current)
return;
- }
-
- busid_priv->interf_count = 0;
/* shutdown the current connection */
shutdown_busid(busid_priv);
usb_put_dev(sdev->udev);
- usb_put_intf(interface);
/* free sdev */
busid_priv->sdev = NULL;
@@ -523,28 +491,34 @@ static void stub_disconnect(struct usb_interface *interface)
}
}
-/*
- * Presence of pre_reset and post_reset prevents the driver from being unbound
- * when the device is being reset
- */
+#ifdef CONFIG_PM
-static int stub_pre_reset(struct usb_interface *interface)
+/* These functions need usb_port_suspend and usb_port_resume,
+ * which reside in drivers/usb/core/usb.h. Skip for now. */
+
+static int stub_suspend(struct usb_device *udev, pm_message_t message)
{
- dev_dbg(&interface->dev, "pre_reset\n");
+ dev_dbg(&udev->dev, "stub_suspend\n");
+
return 0;
}
-static int stub_post_reset(struct usb_interface *interface)
+static int stub_resume(struct usb_device *udev, pm_message_t message)
{
- dev_dbg(&interface->dev, "post_reset\n");
+ dev_dbg(&udev->dev, "stub_resume\n");
+
return 0;
}
-struct usb_driver stub_driver = {
+#endif /* CONFIG_PM */
+
+struct usb_device_driver stub_driver = {
.name = "usbip-host",
.probe = stub_probe,
.disconnect = stub_disconnect,
- .id_table = stub_table,
- .pre_reset = stub_pre_reset,
- .post_reset = stub_post_reset,
+#ifdef CONFIG_PM
+ .suspend = stub_suspend,
+ .resume = stub_resume,
+#endif
+ .supports_autosuspend = 0,
};
diff --git a/drivers/staging/usbip/stub_main.c b/drivers/staging/usbip/stub_main.c
index baf857f7cc88..9c5832abbdf1 100644
--- a/drivers/staging/usbip/stub_main.c
+++ b/drivers/staging/usbip/stub_main.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/module.h>
+#include <linux/device.h>
#include "usbip_common.h"
#include "stub.h"
@@ -187,6 +188,34 @@ static ssize_t store_match_busid(struct device_driver *dev, const char *buf,
static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid,
store_match_busid);
+static ssize_t rebind_store(struct device_driver *dev, const char *buf,
+ size_t count)
+{
+ int ret;
+ int len;
+ struct bus_id_priv *bid;
+
+ /* buf length should be less that BUSID_SIZE */
+ len = strnlen(buf, BUSID_SIZE);
+
+ if (!(len < BUSID_SIZE))
+ return -EINVAL;
+
+ bid = get_busid_priv(buf);
+ if (!bid)
+ return -ENODEV;
+
+ ret = device_attach(&bid->udev->dev);
+ if (ret < 0) {
+ dev_err(&bid->udev->dev, "rebind failed\n");
+ return ret;
+ }
+
+ return count;
+}
+
+static DRIVER_ATTR_WO(rebind);
+
static struct stub_priv *stub_priv_pop_from_listhead(struct list_head *listhead)
{
struct stub_priv *priv, *tmp;
@@ -254,7 +283,7 @@ static int __init usbip_host_init(void)
return -ENOMEM;
}
- ret = usb_register(&stub_driver);
+ ret = usb_register_device_driver(&stub_driver, THIS_MODULE);
if (ret) {
pr_err("usb_register failed %d\n", ret);
goto err_usb_register;
@@ -267,11 +296,18 @@ static int __init usbip_host_init(void)
goto err_create_file;
}
+ ret = driver_create_file(&stub_driver.drvwrap.driver,
+ &driver_attr_rebind);
+ if (ret) {
+ pr_err("driver_create_file failed\n");
+ goto err_create_file;
+ }
+
pr_info(DRIVER_DESC " v" USBIP_VERSION "\n");
return ret;
err_create_file:
- usb_deregister(&stub_driver);
+ usb_deregister_device_driver(&stub_driver);
err_usb_register:
kmem_cache_destroy(stub_priv_cache);
return ret;
@@ -282,11 +318,14 @@ static void __exit usbip_host_exit(void)
driver_remove_file(&stub_driver.drvwrap.driver,
&driver_attr_match_busid);
+ driver_remove_file(&stub_driver.drvwrap.driver,
+ &driver_attr_rebind);
+
/*
* deregister() calls stub_disconnect() for all devices. Device
* specific data is cleared in stub_disconnect().
*/
- usb_deregister(&stub_driver);
+ usb_deregister_device_driver(&stub_driver);
kmem_cache_destroy(stub_priv_cache);
}
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 5d1d4a183300..e0b6d6b42728 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -142,31 +142,19 @@ static int tweak_set_interface_cmd(struct urb *urb)
static int tweak_set_configuration_cmd(struct urb *urb)
{
+ struct stub_priv *priv = (struct stub_priv *) urb->context;
+ struct stub_device *sdev = priv->sdev;
struct usb_ctrlrequest *req;
__u16 config;
+ int err;
req = (struct usb_ctrlrequest *) urb->setup_packet;
config = le16_to_cpu(req->wValue);
- /*
- * I have never seen a multi-config device. Very rare.
- * For most devices, this will be called to choose a default
- * configuration only once in an initialization phase.
- *
- * set_configuration may change a device configuration and its device
- * drivers will be unbound and assigned for a new device configuration.
- * This means this usbip driver will be also unbound when called, then
- * eventually reassigned to the device as far as driver matching
- * condition is kept.
- *
- * Unfortunately, an existing usbip connection will be dropped
- * due to this driver unbinding. So, skip here.
- * A user may need to set a special configuration value before
- * exporting the device.
- */
- dev_info(&urb->dev->dev, "usb_set_configuration %d to %s... skip!\n",
- config, dev_name(&urb->dev->dev));
-
+ err = usb_set_configuration(sdev->udev, config);
+ if (err && err != -ENODEV)
+ dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
+ config, err);
return 0;
}
@@ -550,7 +538,7 @@ static void stub_rx_pdu(struct usbip_device *ud)
int ret;
struct usbip_header pdu;
struct stub_device *sdev = container_of(ud, struct stub_device, ud);
- struct device *dev = &sdev->interface->dev;
+ struct device *dev = &sdev->udev->dev;
usbip_dbg_stub_rx("Enter\n");
diff --git a/drivers/staging/usbip/stub_tx.c b/drivers/staging/usbip/stub_tx.c
index cd5326ae38cc..1622563823a3 100644
--- a/drivers/staging/usbip/stub_tx.c
+++ b/drivers/staging/usbip/stub_tx.c
@@ -74,12 +74,12 @@ void stub_complete(struct urb *urb)
/* OK */
break;
case -ENOENT:
- dev_info(&urb->dev->dev, "stopped by a call to usb_kill_urb() "
- "because of cleaning up a virtual connection\n");
+ dev_info(&urb->dev->dev,
+ "stopped by a call to usb_kill_urb() because of cleaning up a virtual connection\n");
return;
case -ECONNRESET:
- dev_info(&urb->dev->dev, "unlinked by a call to "
- "usb_unlink_urb()\n");
+ dev_info(&urb->dev->dev,
+ "unlinked by a call to usb_unlink_urb()\n");
break;
case -EPIPE:
dev_info(&urb->dev->dev, "endpoint %d is stalled\n",
@@ -89,8 +89,9 @@ void stub_complete(struct urb *urb)
dev_info(&urb->dev->dev, "device removed?\n");
break;
default:
- dev_info(&urb->dev->dev, "urb completion with non-zero status "
- "%d\n", urb->status);
+ dev_info(&urb->dev->dev,
+ "urb completion with non-zero status %d\n",
+ urb->status);
break;
}
@@ -228,8 +229,7 @@ static int stub_send_ret_submit(struct stub_device *sdev)
if (txsize != sizeof(pdu_header) + urb->actual_length) {
dev_err(&sdev->interface->dev,
- "actual length of urb %d does not "
- "match iso packet sizes %zu\n",
+ "actual length of urb %d does not match iso packet sizes %zu\n",
urb->actual_length,
txsize-sizeof(pdu_header));
kfree(iov);
diff --git a/drivers/staging/usbip/uapi/usbip.h b/drivers/staging/usbip/uapi/usbip.h
new file mode 100644
index 000000000000..fa5db30ede36
--- /dev/null
+++ b/drivers/staging/usbip/uapi/usbip.h
@@ -0,0 +1,26 @@
+/*
+ * usbip.h
+ *
+ * USBIP uapi defines and function prototypes etc.
+*/
+
+#ifndef _UAPI_LINUX_USBIP_H
+#define _UAPI_LINUX_USBIP_H
+
+/* usbip device status - exported in usbip device sysfs status */
+enum usbip_device_status {
+ /* sdev is available. */
+ SDEV_ST_AVAILABLE = 0x01,
+ /* sdev is now used. */
+ SDEV_ST_USED,
+ /* sdev is unusable because of a fatal error. */
+ SDEV_ST_ERROR,
+
+ /* vdev does not connect a remote device. */
+ VDEV_ST_NULL,
+ /* vdev is used, but the USB address is not assigned yet */
+ VDEV_ST_NOTASSIGNED,
+ VDEV_ST_USED,
+ VDEV_ST_ERROR
+};
+#endif /* _UAPI_LINUX_USBIP_H */
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 4470cd321d65..184fa70365db 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -55,7 +55,8 @@ static ssize_t usbip_debug_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- sscanf(buf, "%lx", &usbip_debug_flag);
+ if (sscanf(buf, "%lx", &usbip_debug_flag) != 1)
+ return -EINVAL;
return count;
}
DEVICE_ATTR_RW(usbip_debug);
@@ -99,26 +100,8 @@ static void usbip_dump_usb_device(struct usb_device *udev)
struct device *dev = &udev->dev;
int i;
- dev_dbg(dev, " devnum(%d) devpath(%s) ",
- udev->devnum, udev->devpath);
-
- switch (udev->speed) {
- case USB_SPEED_HIGH:
- pr_debug("SPD_HIGH ");
- break;
- case USB_SPEED_FULL:
- pr_debug("SPD_FULL ");
- break;
- case USB_SPEED_LOW:
- pr_debug("SPD_LOW ");
- break;
- case USB_SPEED_UNKNOWN:
- pr_debug("SPD_UNKNOWN ");
- break;
- default:
- pr_debug("SPD_ERROR ");
- break;
- }
+ dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)",
+ udev->devnum, udev->devpath, usb_speed_string(udev->speed));
pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport);
@@ -195,8 +178,8 @@ static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd)
}
pr_debug(" ");
- pr_debug("bRequestType(%02X) bRequest(%02X) wValue(%04X) wIndex(%04X) "
- "wLength(%04X) ", cmd->bRequestType, cmd->bRequest,
+ pr_debug("bRequestType(%02X) bRequest(%02X) wValue(%04X) wIndex(%04X) wLength(%04X) ",
+ cmd->bRequestType, cmd->bRequest,
cmd->wValue, cmd->wIndex, cmd->wLength);
pr_debug("\n ");
@@ -307,8 +290,7 @@ void usbip_dump_header(struct usbip_header *pdu)
switch (pdu->base.command) {
case USBIP_CMD_SUBMIT:
- pr_debug("USBIP_CMD_SUBMIT: "
- "x_flags %u x_len %u sf %u #p %d iv %d\n",
+ pr_debug("USBIP_CMD_SUBMIT: x_flags %u x_len %u sf %u #p %d iv %d\n",
pdu->u.cmd_submit.transfer_flags,
pdu->u.cmd_submit.transfer_buffer_length,
pdu->u.cmd_submit.start_frame,
@@ -367,7 +349,6 @@ int usbip_recv(struct socket *sock, void *buf, int size)
msg.msg_namelen = 0;
msg.msg_control = NULL;
msg.msg_controllen = 0;
- msg.msg_namelen = 0;
msg.msg_flags = MSG_NOSIGNAL;
result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
@@ -706,8 +687,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
if (total_length != urb->actual_length) {
dev_err(&urb->dev->dev,
- "total length of iso packets %d not equal to actual "
- "length of buffer %d\n",
+ "total length of iso packets %d not equal to actual length of buffer %d\n",
total_length, urb->actual_length);
if (ud->side == USBIP_STUB)
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index 7e6c5436d972..732fb636a1e5 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include <linux/usb.h>
#include <linux/wait.h>
+#include "uapi/usbip.h"
#define USBIP_VERSION "1.0.0"
@@ -235,22 +236,6 @@ enum usbip_side {
USBIP_STUB,
};
-enum usbip_status {
- /* sdev is available. */
- SDEV_ST_AVAILABLE = 0x01,
- /* sdev is now used. */
- SDEV_ST_USED,
- /* sdev is unusable because of a fatal error. */
- SDEV_ST_ERROR,
-
- /* vdev does not connect a remote device. */
- VDEV_ST_NULL,
- /* vdev is used, but the USB address is not assigned yet */
- VDEV_ST_NOTASSIGNED,
- VDEV_ST_USED,
- VDEV_ST_ERROR
-};
-
/* event handler */
#define USBIP_EH_SHUTDOWN (1 << 0)
#define USBIP_EH_BYE (1 << 1)
@@ -271,7 +256,7 @@ enum usbip_status {
/* a common structure for stub_device and vhci_device */
struct usbip_device {
enum usbip_side side;
- enum usbip_status status;
+ enum usbip_device_status status;
/* lock for status */
spinlock_t lock;
diff --git a/drivers/staging/usbip/userspace/README b/drivers/staging/usbip/userspace/README
index 00a1658b272b..f528ba462b5f 100644
--- a/drivers/staging/usbip/userspace/README
+++ b/drivers/staging/usbip/userspace/README
@@ -9,8 +9,8 @@
- USB/IP device drivers
Found in the staging directory of the Linux kernel.
- - sysfsutils >= 2.0.0
- sysfsutils library
+ - libudev >= 2.0
+ libudev library
- libwrap0-dev
tcp wrapper library
@@ -19,6 +19,10 @@
- libtool, automake >= 1.9, autoconf >= 2.5.0, pkg-config
+[Optional]
+ - hwdata
+ Contains USB device identification data.
+
[Install]
0. Generate configuration scripts.
diff --git a/drivers/staging/usbip/userspace/configure.ac b/drivers/staging/usbip/userspace/configure.ac
index 0ee5d9263996..607d05c5ccfd 100644
--- a/drivers/staging/usbip/userspace/configure.ac
+++ b/drivers/staging/usbip/userspace/configure.ac
@@ -1,7 +1,7 @@
dnl Process this file with autoconf to produce a configure script.
AC_PREREQ(2.59)
-AC_INIT([usbip-utils], [1.1.1], [linux-usb@vger.kernel.org])
+AC_INIT([usbip-utils], [2.0], [linux-usb@vger.kernel.org])
AC_DEFINE([USBIP_VERSION], [0x00000111], [binary-coded decimal version number])
CURRENT=0
@@ -44,11 +44,11 @@ AC_FUNC_REALLOC
AC_CHECK_FUNCS([memset mkdir regcomp socket strchr strerror strstr dnl
strtoul])
-AC_CHECK_HEADER([sysfs/libsysfs.h],
- [AC_CHECK_LIB([sysfs], [sysfs_open_directory_list],
- [LIBS="$LIBS -lsysfs"],
- [AC_MSG_ERROR([Missing sysfs2 library!])])],
- [AC_MSG_ERROR([Missing /usr/include/sysfs/libsysfs.h])])
+AC_CHECK_HEADER([libudev.h],
+ [AC_CHECK_LIB([udev], [udev_new],
+ [LIBS="$LIBS -ludev"],
+ [AC_MSG_ERROR([Missing udev library!])])],
+ [AC_MSG_ERROR([Missing /usr/include/libudev.h])])
# Checks for libwrap library.
AC_MSG_CHECKING([whether to use the libwrap (TCP wrappers) library])
diff --git a/drivers/staging/usbip/userspace/libsrc/Makefile.am b/drivers/staging/usbip/userspace/libsrc/Makefile.am
index 4921189e0260..7c8f8a4d54e4 100644
--- a/drivers/staging/usbip/userspace/libsrc/Makefile.am
+++ b/drivers/staging/usbip/userspace/libsrc/Makefile.am
@@ -4,4 +4,5 @@ libusbip_la_LDFLAGS = -version-info @LIBUSBIP_VERSION@
lib_LTLIBRARIES := libusbip.la
libusbip_la_SOURCES := names.c names.h usbip_host_driver.c usbip_host_driver.h \
- usbip_common.c usbip_common.h vhci_driver.c vhci_driver.h
+ usbip_common.c usbip_common.h vhci_driver.c vhci_driver.h \
+ sysfs_utils.c sysfs_utils.h
diff --git a/drivers/staging/usbip/userspace/libsrc/list.h b/drivers/staging/usbip/userspace/libsrc/list.h
new file mode 100644
index 000000000000..8d0c936e184f
--- /dev/null
+++ b/drivers/staging/usbip/userspace/libsrc/list.h
@@ -0,0 +1,136 @@
+#ifndef _LIST_H
+#define _LIST_H
+
+/* Stripped down implementation of linked list taken
+ * from the Linux Kernel.
+ */
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+static inline void INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head * prev, struct list_head * next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+#define POISON_POINTER_DELTA 0
+#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty() on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+static inline void __list_del_entry(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = LIST_POISON1;
+ entry->prev = LIST_POISON2;
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+#endif
diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c
index 3c8d28b771e0..81ff8522405c 100644
--- a/drivers/staging/usbip/userspace/libsrc/names.c
+++ b/drivers/staging/usbip/userspace/libsrc/names.c
@@ -169,14 +169,14 @@ static void *my_malloc(size_t size)
struct pool *p;
p = calloc(1, sizeof(struct pool));
- if (!p) {
- free(p);
+ if (!p)
return NULL;
- }
p->mem = calloc(1, size);
- if (!p->mem)
+ if (!p->mem) {
+ free(p);
return NULL;
+ }
p->next = pool_head;
pool_head = p;
diff --git a/drivers/staging/usbip/userspace/libsrc/sysfs_utils.c b/drivers/staging/usbip/userspace/libsrc/sysfs_utils.c
new file mode 100644
index 000000000000..36ac88ece0b8
--- /dev/null
+++ b/drivers/staging/usbip/userspace/libsrc/sysfs_utils.c
@@ -0,0 +1,31 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include "sysfs_utils.h"
+#include "usbip_common.h"
+
+int write_sysfs_attribute(const char *attr_path, const char *new_value,
+ size_t len)
+{
+ int fd;
+ int length;
+
+ fd = open(attr_path, O_WRONLY);
+ if (fd < 0) {
+ dbg("error opening attribute %s", attr_path);
+ return -1;
+ }
+
+ length = write(fd, new_value, len);
+ if (length < 0) {
+ dbg("error writing to attribute %s", attr_path);
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+
+ return 0;
+}
diff --git a/drivers/staging/usbip/userspace/libsrc/sysfs_utils.h b/drivers/staging/usbip/userspace/libsrc/sysfs_utils.h
new file mode 100644
index 000000000000..32ac1d105d18
--- /dev/null
+++ b/drivers/staging/usbip/userspace/libsrc/sysfs_utils.h
@@ -0,0 +1,8 @@
+
+#ifndef __SYSFS_UTILS_H
+#define __SYSFS_UTILS_H
+
+int write_sysfs_attribute(const char *attr_path, const char *new_value,
+ size_t len);
+
+#endif
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_common.c b/drivers/staging/usbip/userspace/libsrc/usbip_common.c
index 66f03cc62ac6..238bf5bf7f4c 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_common.c
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_common.c
@@ -2,6 +2,7 @@
* Copyright (C) 2005-2007 Takahiro Hirofuchi
*/
+#include <libudev.h>
#include "usbip_common.h"
#include "names.h"
@@ -12,6 +13,8 @@ int usbip_use_syslog;
int usbip_use_stderr;
int usbip_use_debug;
+extern struct udev *udev_context;
+
struct speed_string {
int num;
char *speed;
@@ -23,6 +26,8 @@ static const struct speed_string speed_strings[] = {
{ USB_SPEED_LOW, "1.5", "Low Speed(1.5Mbps)" },
{ USB_SPEED_FULL, "12", "Full Speed(12Mbps)" },
{ USB_SPEED_HIGH, "480", "High Speed(480Mbps)" },
+ { USB_SPEED_WIRELESS, "53.3-480", "Wireless"},
+ { USB_SPEED_SUPER, "5000", "Super Speed(5000Mbps)" },
{ 0, NULL, NULL }
};
@@ -109,75 +114,61 @@ void dump_usb_device(struct usbip_usb_device *udev)
}
-int read_attr_value(struct sysfs_device *dev, const char *name,
+int read_attr_value(struct udev_device *dev, const char *name,
const char *format)
{
- char attrpath[SYSFS_PATH_MAX];
- struct sysfs_attribute *attr;
+ const char *attr;
int num = 0;
int ret;
- snprintf(attrpath, sizeof(attrpath), "%s/%s", dev->path, name);
-
- attr = sysfs_open_attribute(attrpath);
+ attr = udev_device_get_sysattr_value(dev, name);
if (!attr) {
- dbg("sysfs_open_attribute failed: %s", attrpath);
- return 0;
- }
-
- ret = sysfs_read_attribute(attr);
- if (ret < 0) {
- dbg("sysfs_read_attribute failed");
+ err("udev_device_get_sysattr_value failed");
goto err;
}
- ret = sscanf(attr->value, format, &num);
+ /* The client chooses the device configuration
+ * when attaching it so right after being bound
+ * to usbip-host on the server the device will
+ * have no configuration.
+ * Therefore, attributes such as bConfigurationValue
+ * and bNumInterfaces will not exist and sscanf will
+ * fail. Check for these cases and don't treat them
+ * as errors.
+ */
+
+ ret = sscanf(attr, format, &num);
if (ret < 1) {
- dbg("sscanf failed");
- goto err;
+ if (strcmp(name, "bConfigurationValue") &&
+ strcmp(name, "bNumInterfaces")) {
+ err("sscanf failed for attribute %s", name);
+ goto err;
+ }
}
err:
- sysfs_close_attribute(attr);
return num;
}
-int read_attr_speed(struct sysfs_device *dev)
+int read_attr_speed(struct udev_device *dev)
{
- char attrpath[SYSFS_PATH_MAX];
- struct sysfs_attribute *attr;
- char speed[100];
- int ret;
-
- snprintf(attrpath, sizeof(attrpath), "%s/%s", dev->path, "speed");
-
- attr = sysfs_open_attribute(attrpath);
- if (!attr) {
- dbg("sysfs_open_attribute failed: %s", attrpath);
- return 0;
- }
+ const char *speed;
- ret = sysfs_read_attribute(attr);
- if (ret < 0) {
- dbg("sysfs_read_attribute failed");
+ speed = udev_device_get_sysattr_value(dev, "speed");
+ if (!speed) {
+ err("udev_device_get_sysattr_value failed");
goto err;
}
- ret = sscanf(attr->value, "%99s\n", speed);
- if (ret < 1) {
- dbg("sscanf failed");
- goto err;
- }
-err:
- sysfs_close_attribute(attr);
-
for (int i = 0; speed_strings[i].speed != NULL; i++) {
if (!strcmp(speed, speed_strings[i].speed))
return speed_strings[i].num;
}
+err:
+
return USB_SPEED_UNKNOWN;
}
@@ -188,9 +179,10 @@ err:
} while (0)
-int read_usb_device(struct sysfs_device *sdev, struct usbip_usb_device *udev)
+int read_usb_device(struct udev_device *sdev, struct usbip_usb_device *udev)
{
uint32_t busnum, devnum;
+ const char *path, *name;
READ_ATTR(udev, uint8_t, sdev, bDeviceClass, "%02x\n");
READ_ATTR(udev, uint8_t, sdev, bDeviceSubClass, "%02x\n");
@@ -207,10 +199,13 @@ int read_usb_device(struct sysfs_device *sdev, struct usbip_usb_device *udev)
READ_ATTR(udev, uint8_t, sdev, devnum, "%d\n");
udev->speed = read_attr_speed(sdev);
- strncpy(udev->path, sdev->path, SYSFS_PATH_MAX);
- strncpy(udev->busid, sdev->name, SYSFS_BUS_ID_SIZE);
+ path = udev_device_get_syspath(sdev);
+ name = udev_device_get_sysname(sdev);
- sscanf(sdev->name, "%u-%u", &busnum, &devnum);
+ strncpy(udev->path, path, SYSFS_PATH_MAX);
+ strncpy(udev->busid, name, SYSFS_BUS_ID_SIZE);
+
+ sscanf(name, "%u-%u", &busnum, &devnum);
udev->busnum = busnum;
return 0;
@@ -220,13 +215,13 @@ int read_usb_interface(struct usbip_usb_device *udev, int i,
struct usbip_usb_interface *uinf)
{
char busid[SYSFS_BUS_ID_SIZE];
- struct sysfs_device *sif;
+ struct udev_device *sif;
sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i);
- sif = sysfs_open_device("usb", busid);
+ sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid);
if (!sif) {
- dbg("sysfs_open_device(\"usb\", \"%s\") failed", busid);
+ err("udev_device_new_from_subsystem_sysname %s failed", busid);
return -1;
}
@@ -234,8 +229,6 @@ int read_usb_interface(struct usbip_usb_device *udev, int i,
READ_ATTR(uinf, uint8_t, sif, bInterfaceSubClass, "%02x\n");
READ_ATTR(uinf, uint8_t, sif, bInterfaceProtocol, "%02x\n");
- sysfs_close_device(sif);
-
return 0;
}
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_common.h b/drivers/staging/usbip/userspace/libsrc/usbip_common.h
index 938ad1c36947..23be848f24fb 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_common.h
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_common.h
@@ -5,7 +5,7 @@
#ifndef __USBIP_COMMON_H
#define __USBIP_COMMON_H
-#include <sysfs/libsysfs.h>
+#include <libudev.h>
#include <stdint.h>
#include <stdio.h>
@@ -14,6 +14,8 @@
#include <syslog.h>
#include <unistd.h>
+#include <linux/usb/ch9.h>
+#include "../../uapi/usbip.h"
#ifndef USBIDS_FILE
#define USBIDS_FILE "/usr/share/hwdata/usb.ids"
@@ -28,6 +30,15 @@
#define USBIP_HOST_DRV_NAME "usbip-host"
#define USBIP_VHCI_DRV_NAME "vhci_hcd"
+/* sysfs constants */
+#define SYSFS_MNT_PATH "/sys"
+#define SYSFS_BUS_NAME "bus"
+#define SYSFS_BUS_TYPE "usb"
+#define SYSFS_DRIVERS_NAME "drivers"
+
+#define SYSFS_PATH_MAX 256
+#define SYSFS_BUS_ID_SIZE 32
+
extern int usbip_use_syslog;
extern int usbip_use_stderr;
extern int usbip_use_debug ;
@@ -76,30 +87,6 @@ extern int usbip_use_debug ;
abort(); \
} while (0)
-enum usb_device_speed {
- USB_SPEED_UNKNOWN = 0, /* enumerating */
- USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
- USB_SPEED_HIGH, /* usb 2.0 */
- USB_SPEED_VARIABLE /* wireless (usb 2.5) */
-};
-
-/* FIXME: how to sync with drivers/usbip_common.h ? */
-enum usbip_device_status {
- /* sdev is available. */
- SDEV_ST_AVAILABLE = 0x01,
- /* sdev is now used. */
- SDEV_ST_USED,
- /* sdev is unusable because of a fatal error. */
- SDEV_ST_ERROR,
-
- /* vdev does not connect a remote device. */
- VDEV_ST_NULL,
- /* vdev is used, but the USB address is not assigned yet */
- VDEV_ST_NOTASSIGNED,
- VDEV_ST_USED,
- VDEV_ST_ERROR
-};
-
struct usbip_usb_interface {
uint8_t bInterfaceClass;
uint8_t bInterfaceSubClass;
@@ -131,8 +118,8 @@ struct usbip_usb_device {
void dump_usb_interface(struct usbip_usb_interface *);
void dump_usb_device(struct usbip_usb_device *);
-int read_usb_device(struct sysfs_device *sdev, struct usbip_usb_device *udev);
-int read_attr_value(struct sysfs_device *dev, const char *name,
+int read_usb_device(struct udev_device *sdev, struct usbip_usb_device *udev);
+int read_attr_value(struct udev_device *dev, const char *name,
const char *format);
int read_usb_interface(struct usbip_usb_device *udev, int i,
struct usbip_usb_interface *uinf);
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c
index 71a449cf50db..c5bf60b135b9 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c
@@ -18,102 +18,65 @@
#include <sys/types.h>
#include <sys/stat.h>
+#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
+#include <libudev.h>
+
#include "usbip_common.h"
#include "usbip_host_driver.h"
+#include "list.h"
+#include "sysfs_utils.h"
#undef PROGNAME
#define PROGNAME "libusbip"
struct usbip_host_driver *host_driver;
+struct udev *udev_context;
-#define SYSFS_OPEN_RETRIES 100
-
-/* only the first interface value is true! */
static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
{
- char attrpath[SYSFS_PATH_MAX];
- struct sysfs_attribute *attr;
+ char status_attr_path[SYSFS_PATH_MAX];
+ int fd;
+ int length;
+ char status;
int value = 0;
- int rc;
- struct stat s;
- int retries = SYSFS_OPEN_RETRIES;
-
- /* This access is racy!
- *
- * Just after detach, our driver removes the sysfs
- * files and recreates them.
- *
- * We may try and fail to open the usbip_status of
- * an exported device in the (short) window where
- * it has been removed and not yet recreated.
- *
- * This is a bug in the interface. Nothing we can do
- * except work around it here by polling for the sysfs
- * usbip_status to reappear.
- */
-
- snprintf(attrpath, SYSFS_PATH_MAX, "%s/%s:%d.%d/usbip_status",
- udev->path, udev->busid, udev->bConfigurationValue, 0);
-
- while (retries > 0) {
- if (stat(attrpath, &s) == 0)
- break;
-
- if (errno != ENOENT) {
- dbg("stat failed: %s", attrpath);
- return -1;
- }
-
- usleep(10000); /* 10ms */
- retries--;
- }
- if (retries == 0)
- dbg("usbip_status not ready after %d retries",
- SYSFS_OPEN_RETRIES);
- else if (retries < SYSFS_OPEN_RETRIES)
- dbg("warning: usbip_status ready after %d retries",
- SYSFS_OPEN_RETRIES - retries);
+ snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
+ udev->path);
- attr = sysfs_open_attribute(attrpath);
- if (!attr) {
- dbg("sysfs_open_attribute failed: %s", attrpath);
+ if ((fd = open(status_attr_path, O_RDONLY)) < 0) {
+ err("error opening attribute %s", status_attr_path);
return -1;
}
- rc = sysfs_read_attribute(attr);
- if (rc) {
- dbg("sysfs_read_attribute failed: %s", attrpath);
- sysfs_close_attribute(attr);
+ length = read(fd, &status, 1);
+ if (length < 0) {
+ err("error reading attribute %s", status_attr_path);
+ close(fd);
return -1;
}
- value = atoi(attr->value);
-
- sysfs_close_attribute(attr);
+ value = atoi(&status);
return value;
}
-static struct usbip_exported_device *usbip_exported_device_new(char *sdevpath)
+static
+struct usbip_exported_device *usbip_exported_device_new(const char *sdevpath)
{
struct usbip_exported_device *edev = NULL;
+ struct usbip_exported_device *edev_old;
size_t size;
int i;
- edev = calloc(1, sizeof(*edev));
- if (!edev) {
- dbg("calloc failed");
- return NULL;
- }
+ edev = calloc(1, sizeof(struct usbip_exported_device));
- edev->sudev = sysfs_open_device_path(sdevpath);
+ edev->sudev = udev_device_new_from_syspath(udev_context, sdevpath);
if (!edev->sudev) {
- dbg("sysfs_open_device_path failed: %s", sdevpath);
+ err("udev_device_new_from_syspath: %s", sdevpath);
goto err;
}
@@ -124,11 +87,13 @@ static struct usbip_exported_device *usbip_exported_device_new(char *sdevpath)
goto err;
/* reallocate buffer to include usb interface data */
- size = sizeof(*edev) + edev->udev.bNumInterfaces *
+ size = sizeof(struct usbip_exported_device) + edev->udev.bNumInterfaces *
sizeof(struct usbip_usb_interface);
+ edev_old = edev;
edev = realloc(edev, size);
if (!edev) {
+ edev = edev_old;
dbg("realloc failed");
goto err;
}
@@ -138,160 +103,88 @@ static struct usbip_exported_device *usbip_exported_device_new(char *sdevpath)
return edev;
err:
- if (edev && edev->sudev)
- sysfs_close_device(edev->sudev);
+ if (edev->sudev)
+ udev_device_unref(edev->sudev);
if (edev)
free(edev);
return NULL;
}
-static int check_new(struct dlist *dlist, struct sysfs_device *target)
-{
- struct sysfs_device *dev;
-
- dlist_for_each_data(dlist, dev, struct sysfs_device) {
- if (!strncmp(dev->bus_id, target->bus_id, SYSFS_BUS_ID_SIZE))
- /* device found and is not new */
- return 0;
- }
- return 1;
-}
-
-static void delete_nothing(void *unused_data)
-{
- /*
- * NOTE: Do not delete anything, but the container will be deleted.
- */
- (void) unused_data;
-}
-
static int refresh_exported_devices(void)
{
- /* sysfs_device of usb_interface */
- struct sysfs_device *suintf;
- struct dlist *suintf_list;
- /* sysfs_device of usb_device */
- struct sysfs_device *sudev;
- struct dlist *sudev_list;
struct usbip_exported_device *edev;
-
- sudev_list = dlist_new_with_delete(sizeof(struct sysfs_device),
- delete_nothing);
-
- suintf_list = sysfs_get_driver_devices(host_driver->sysfs_driver);
- if (!suintf_list) {
- /*
- * Not an error condition. There are simply no devices bound to
- * the driver yet.
- */
- dbg("bind " USBIP_HOST_DRV_NAME ".ko to a usb device to be "
- "exportable!");
- return 0;
- }
-
- /* collect unique USB devices (not interfaces) */
- dlist_for_each_data(suintf_list, suintf, struct sysfs_device) {
- /* get usb device of this usb interface */
- sudev = sysfs_get_device_parent(suintf);
- if (!sudev) {
- dbg("sysfs_get_device_parent failed: %s", suintf->name);
- continue;
- }
-
- if (check_new(sudev_list, sudev)) {
- /* insert item at head of list */
- dlist_unshift(sudev_list, sudev);
- }
- }
-
- dlist_for_each_data(sudev_list, sudev, struct sysfs_device) {
- edev = usbip_exported_device_new(sudev->path);
- if (!edev) {
- dbg("usbip_exported_device_new failed");
- continue;
+ struct udev_enumerate *enumerate;
+ struct udev_list_entry *devices, *dev_list_entry;
+ struct udev_device *dev;
+ const char *path;
+
+ enumerate = udev_enumerate_new(udev_context);
+ udev_enumerate_add_match_subsystem(enumerate, "usb");
+ udev_enumerate_scan_devices(enumerate);
+
+ devices = udev_enumerate_get_list_entry(enumerate);
+
+ udev_list_entry_foreach(dev_list_entry, devices) {
+ path = udev_list_entry_get_name(dev_list_entry);
+ dev = udev_device_new_from_syspath(udev_context, path);
+
+ /* Check whether device uses usbip-host driver. */
+ if (!strcmp(udev_device_get_driver(dev),
+ USBIP_HOST_DRV_NAME)) {
+ edev = usbip_exported_device_new(path);
+ if (!edev) {
+ dbg("usbip_exported_device_new failed");
+ continue;
+ }
+
+ list_add(&edev->node, &host_driver->edev_list);
+ host_driver->ndevs++;
}
-
- dlist_unshift(host_driver->edev_list, edev);
- host_driver->ndevs++;
}
- dlist_destroy(sudev_list);
-
return 0;
}
-static struct sysfs_driver *open_sysfs_host_driver(void)
+static void usbip_exported_device_destroy(void)
{
- char bus_type[] = "usb";
- char sysfs_mntpath[SYSFS_PATH_MAX];
- char host_drv_path[SYSFS_PATH_MAX];
- struct sysfs_driver *host_drv;
- int rc;
-
- rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
- if (rc < 0) {
- dbg("sysfs_get_mnt_path failed");
- return NULL;
- }
-
- snprintf(host_drv_path, SYSFS_PATH_MAX, "%s/%s/%s/%s/%s",
- sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME,
- USBIP_HOST_DRV_NAME);
+ struct list_head *i, *tmp;
+ struct usbip_exported_device *edev;
- host_drv = sysfs_open_driver_path(host_drv_path);
- if (!host_drv) {
- dbg("sysfs_open_driver_path failed");
- return NULL;
+ list_for_each_safe(i, tmp, &host_driver->edev_list) {
+ edev = list_entry(i, struct usbip_exported_device, node);
+ list_del(i);
+ free(edev);
}
-
- return host_drv;
-}
-
-static void usbip_exported_device_delete(void *dev)
-{
- struct usbip_exported_device *edev = dev;
- sysfs_close_device(edev->sudev);
- free(dev);
}
int usbip_host_driver_open(void)
{
int rc;
- host_driver = calloc(1, sizeof(*host_driver));
- if (!host_driver) {
- dbg("calloc failed");
+ udev_context = udev_new();
+ if (!udev_context) {
+ err("udev_new failed");
return -1;
}
- host_driver->ndevs = 0;
- host_driver->edev_list =
- dlist_new_with_delete(sizeof(struct usbip_exported_device),
- usbip_exported_device_delete);
- if (!host_driver->edev_list) {
- dbg("dlist_new_with_delete failed");
- goto err_free_host_driver;
- }
+ host_driver = calloc(1, sizeof(*host_driver));
- host_driver->sysfs_driver = open_sysfs_host_driver();
- if (!host_driver->sysfs_driver)
- goto err_destroy_edev_list;
+ host_driver->ndevs = 0;
+ INIT_LIST_HEAD(&host_driver->edev_list);
rc = refresh_exported_devices();
if (rc < 0)
- goto err_close_sysfs_driver;
+ goto err_free_host_driver;
return 0;
-err_close_sysfs_driver:
- sysfs_close_driver(host_driver->sysfs_driver);
-err_destroy_edev_list:
- dlist_destroy(host_driver->edev_list);
err_free_host_driver:
free(host_driver);
host_driver = NULL;
+ udev_unref(udev_context);
+
return -1;
}
@@ -300,30 +193,22 @@ void usbip_host_driver_close(void)
if (!host_driver)
return;
- if (host_driver->edev_list)
- dlist_destroy(host_driver->edev_list);
- if (host_driver->sysfs_driver)
- sysfs_close_driver(host_driver->sysfs_driver);
+ usbip_exported_device_destroy();
free(host_driver);
host_driver = NULL;
+
+ udev_unref(udev_context);
}
int usbip_host_refresh_device_list(void)
{
int rc;
- if (host_driver->edev_list)
- dlist_destroy(host_driver->edev_list);
+ usbip_exported_device_destroy();
host_driver->ndevs = 0;
- host_driver->edev_list =
- dlist_new_with_delete(sizeof(struct usbip_exported_device),
- usbip_exported_device_delete);
- if (!host_driver->edev_list) {
- dbg("dlist_new_with_delete failed");
- return -1;
- }
+ INIT_LIST_HEAD(&host_driver->edev_list);
rc = refresh_exported_devices();
if (rc < 0)
@@ -335,8 +220,7 @@ int usbip_host_refresh_device_list(void)
int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
{
char attr_name[] = "usbip_sockfd";
- char attr_path[SYSFS_PATH_MAX];
- struct sysfs_attribute *attr;
+ char sockfd_attr_path[SYSFS_PATH_MAX];
char sockfd_buff[30];
int ret;
@@ -356,41 +240,32 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
}
/* only the first interface is true */
- snprintf(attr_path, sizeof(attr_path), "%s/%s:%d.%d/%s",
- edev->udev.path, edev->udev.busid,
- edev->udev.bConfigurationValue, 0, attr_name);
-
- attr = sysfs_open_attribute(attr_path);
- if (!attr) {
- dbg("sysfs_open_attribute failed: %s", attr_path);
- return -1;
- }
+ snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
+ edev->udev.path, attr_name);
snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
- dbg("write: %s", sockfd_buff);
- ret = sysfs_write_attribute(attr, sockfd_buff, strlen(sockfd_buff));
+ ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff,
+ strlen(sockfd_buff));
if (ret < 0) {
- dbg("sysfs_write_attribute failed: sockfd %s to %s",
- sockfd_buff, attr_path);
- goto err_write_sockfd;
+ err("write_sysfs_attribute failed: sockfd %s to %s",
+ sockfd_buff, sockfd_attr_path);
+ return ret;
}
- dbg("connect: %s", edev->udev.busid);
-
-err_write_sockfd:
- sysfs_close_attribute(attr);
+ info("connect: %s", edev->udev.busid);
return ret;
}
struct usbip_exported_device *usbip_host_get_device(int num)
{
+ struct list_head *i;
struct usbip_exported_device *edev;
- struct dlist *dlist = host_driver->edev_list;
int cnt = 0;
- dlist_for_each_data(dlist, edev, struct usbip_exported_device) {
+ list_for_each(i, &host_driver->edev_list) {
+ edev = list_entry(i, struct usbip_exported_device, node);
if (num == cnt)
return edev;
else
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h
index 34fd14cbfc49..2a31f855c616 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h
@@ -21,18 +21,19 @@
#include <stdint.h>
#include "usbip_common.h"
+#include "list.h"
struct usbip_host_driver {
int ndevs;
- struct sysfs_driver *sysfs_driver;
/* list of exported device */
- struct dlist *edev_list;
+ struct list_head edev_list;
};
struct usbip_exported_device {
- struct sysfs_device *sudev;
+ struct udev_device *sudev;
int32_t status;
struct usbip_usb_device udev;
+ struct list_head node;
struct usbip_usb_interface uinf[];
};
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
index 209df9b37cb4..8901fcb7946f 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
@@ -6,44 +6,28 @@
#include "vhci_driver.h"
#include <limits.h>
#include <netdb.h>
+#include <libudev.h>
+#include "sysfs_utils.h"
#undef PROGNAME
#define PROGNAME "libusbip"
struct usbip_vhci_driver *vhci_driver;
+struct udev *udev_context;
static struct usbip_imported_device *
imported_device_init(struct usbip_imported_device *idev, char *busid)
{
- struct sysfs_device *sudev;
+ struct udev_device *sudev;
- sudev = sysfs_open_device("usb", busid);
+ sudev = udev_device_new_from_subsystem_sysname(udev_context,
+ "usb", busid);
if (!sudev) {
- dbg("sysfs_open_device failed: %s", busid);
+ dbg("udev_device_new_from_subsystem_sysname failed: %s", busid);
goto err;
}
read_usb_device(sudev, &idev->udev);
- sysfs_close_device(sudev);
-
- /* add class devices of this imported device */
- struct usbip_class_device *cdev;
- dlist_for_each_data(vhci_driver->cdev_list, cdev,
- struct usbip_class_device) {
- if (!strncmp(cdev->dev_path, idev->udev.path,
- strlen(idev->udev.path))) {
- struct usbip_class_device *new_cdev;
- /*
- * alloc and copy because dlist is linked
- * from only one list
- */
- new_cdev = calloc(1, sizeof(*new_cdev));
- if (!new_cdev)
- goto err;
-
- memcpy(new_cdev, cdev, sizeof(*new_cdev));
- dlist_unshift(idev->cdev_list, (void *) new_cdev);
- }
- }
+ udev_device_unref(sudev);
return idev;
@@ -53,7 +37,7 @@ err:
-static int parse_status(char *value)
+static int parse_status(const char *value)
{
int ret = 0;
char *c;
@@ -100,12 +84,6 @@ static int parse_status(char *value)
idev->busnum = (devid >> 16);
idev->devnum = (devid & 0x0000ffff);
- idev->cdev_list = dlist_new(sizeof(struct usbip_class_device));
- if (!idev->cdev_list) {
- dbg("dlist_new failed");
- return -1;
- }
-
if (idev->status != VDEV_ST_NULL
&& idev->status != VDEV_ST_NOTASSIGNED) {
idev = imported_device_init(idev, lbusid);
@@ -129,156 +107,35 @@ static int parse_status(char *value)
return 0;
}
-
-static int check_usbip_device(struct sysfs_class_device *cdev)
-{
- /* /sys/class/video4linux/video0/device */
- char class_path[SYSFS_PATH_MAX];
- /* /sys/devices/platform/vhci_hcd/usb6/6-1:1.1 */
- char dev_path[SYSFS_PATH_MAX];
- int ret;
- struct usbip_class_device *usbip_cdev;
-
- snprintf(class_path, sizeof(class_path), "%s/device", cdev->path);
-
- ret = sysfs_get_link(class_path, dev_path, sizeof(dev_path));
- if (ret == 0) {
- if (!strncmp(dev_path, vhci_driver->hc_device->path,
- strlen(vhci_driver->hc_device->path))) {
- /* found usbip device */
- usbip_cdev = calloc(1, sizeof(*usbip_cdev));
- if (!usbip_cdev) {
- dbg("calloc failed");
- return -1;
- }
- dlist_unshift(vhci_driver->cdev_list, usbip_cdev);
- strncpy(usbip_cdev->class_path, class_path,
- sizeof(usbip_cdev->class_path));
- strncpy(usbip_cdev->dev_path, dev_path,
- sizeof(usbip_cdev->dev_path));
- dbg("found: %s %s", class_path, dev_path);
- }
- }
-
- return 0;
-}
-
-
-static int search_class_for_usbip_device(char *cname)
-{
- struct sysfs_class *class;
- struct dlist *cdev_list;
- struct sysfs_class_device *cdev;
- int ret = 0;
-
- class = sysfs_open_class(cname);
- if (!class) {
- dbg("sysfs_open_class failed");
- return -1;
- }
-
- dbg("class: %s", class->name);
-
- cdev_list = sysfs_get_class_devices(class);
- if (!cdev_list)
- /* nothing */
- goto out;
-
- dlist_for_each_data(cdev_list, cdev, struct sysfs_class_device) {
- dbg("cdev: %s", cdev->name);
- ret = check_usbip_device(cdev);
- if (ret < 0)
- goto out;
- }
-
-out:
- sysfs_close_class(class);
-
- return ret;
-}
-
-
-static int refresh_class_device_list(void)
-{
- int ret;
- struct dlist *cname_list;
- char *cname;
- char sysfs_mntpath[SYSFS_PATH_MAX];
- char class_path[SYSFS_PATH_MAX];
-
- ret = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
- if (ret < 0) {
- dbg("sysfs_get_mnt_path failed");
- return -1;
- }
-
- snprintf(class_path, sizeof(class_path), "%s/%s", sysfs_mntpath,
- SYSFS_CLASS_NAME);
-
- /* search under /sys/class */
- cname_list = sysfs_open_directory_list(class_path);
- if (!cname_list) {
- dbg("sysfs_open_directory failed");
- return -1;
- }
-
- dlist_for_each_data(cname_list, cname, char) {
- ret = search_class_for_usbip_device(cname);
- if (ret < 0) {
- sysfs_close_list(cname_list);
- return -1;
- }
- }
-
- sysfs_close_list(cname_list);
-
- /* search under /sys/block */
- ret = search_class_for_usbip_device(SYSFS_BLOCK_NAME);
- if (ret < 0)
- return -1;
-
- return 0;
-}
-
-
static int refresh_imported_device_list(void)
{
- struct sysfs_attribute *attr_status;
-
+ const char *attr_status;
- attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status");
+ attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device,
+ "status");
if (!attr_status) {
- dbg("sysfs_get_device_attr(\"status\") failed: %s",
- vhci_driver->hc_device->name);
+ err("udev_device_get_sysattr_value failed");
return -1;
}
- dbg("name: %s path: %s len: %d method: %d value: %s",
- attr_status->name, attr_status->path, attr_status->len,
- attr_status->method, attr_status->value);
-
- return parse_status(attr_status->value);
+ return parse_status(attr_status);
}
static int get_nports(void)
{
char *c;
int nports = 0;
- struct sysfs_attribute *attr_status;
+ const char *attr_status;
- attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status");
+ attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device,
+ "status");
if (!attr_status) {
- dbg("sysfs_get_device_attr(\"status\") failed: %s",
- vhci_driver->hc_device->name);
+ err("udev_device_get_sysattr_value failed");
return -1;
}
- dbg("name: %s path: %s len: %d method: %d value: %s",
- attr_status->name, attr_status->path, attr_status->len,
- attr_status->method, attr_status->value);
-
/* skip a header line */
- c = strchr(attr_status->value, '\n');
+ c = strchr(attr_status, '\n');
if (!c)
return 0;
c++;
@@ -295,71 +152,66 @@ static int get_nports(void)
return nports;
}
-static int get_hc_busid(char *sysfs_mntpath, char *hc_busid)
-{
- struct sysfs_driver *sdriver;
- char sdriver_path[SYSFS_PATH_MAX];
-
- struct sysfs_device *hc_dev;
- struct dlist *hc_devs;
-
- int found = 0;
-
- snprintf(sdriver_path, SYSFS_PATH_MAX, "%s/%s/%s/%s/%s", sysfs_mntpath,
- SYSFS_BUS_NAME, USBIP_VHCI_BUS_TYPE, SYSFS_DRIVERS_NAME,
- USBIP_VHCI_DRV_NAME);
-
- sdriver = sysfs_open_driver_path(sdriver_path);
- if (!sdriver) {
- dbg("sysfs_open_driver_path failed: %s", sdriver_path);
- dbg("make sure " USBIP_CORE_MOD_NAME ".ko and "
- USBIP_VHCI_DRV_NAME ".ko are loaded!");
- return -1;
- }
-
- hc_devs = sysfs_get_driver_devices(sdriver);
- if (!hc_devs) {
- dbg("sysfs_get_driver failed");
- goto err;
- }
-
- /* assume only one vhci_hcd */
- dlist_for_each_data(hc_devs, hc_dev, struct sysfs_device) {
- strncpy(hc_busid, hc_dev->bus_id, SYSFS_BUS_ID_SIZE);
- found = 1;
- }
-
-err:
- sysfs_close_driver(sdriver);
-
- if (found)
- return 0;
-
- dbg("%s not found", hc_busid);
- return -1;
-}
-
-static int read_record(int rhport, char *host, char *port, char *busid)
+/*
+ * Read the given port's record.
+ *
+ * To avoid buffer overflow we will read the entire line and
+ * validate each part's size. The initial buffer is padded by 4 to
+ * accommodate the 2 spaces, 1 newline and an additional character
+ * which is needed to properly validate the 3rd part without it being
+ * truncated to an acceptable length.
+ */
+static int read_record(int rhport, char *host, unsigned long host_len,
+ char *port, unsigned long port_len, char *busid)
{
+ int part;
FILE *file;
char path[PATH_MAX+1];
+ char *buffer, *start, *end;
+ char delim[] = {' ', ' ', '\n'};
+ int max_len[] = {(int)host_len, (int)port_len, SYSFS_BUS_ID_SIZE};
+ size_t buffer_len = host_len + port_len + SYSFS_BUS_ID_SIZE + 4;
+
+ buffer = malloc(buffer_len);
+ if (!buffer)
+ return -1;
snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport);
file = fopen(path, "r");
if (!file) {
err("fopen");
+ free(buffer);
return -1;
}
- if (fscanf(file, "%s %s %s\n", host, port, busid) != 3) {
- err("fscanf");
+ if (fgets(buffer, buffer_len, file) == NULL) {
+ err("fgets");
+ free(buffer);
fclose(file);
return -1;
}
-
fclose(file);
+ /* validate the length of each of the 3 parts */
+ start = buffer;
+ for (part = 0; part < 3; part++) {
+ end = strchr(start, delim[part]);
+ if (end == NULL || (end - start) > max_len[part]) {
+ free(buffer);
+ return -1;
+ }
+ start = end + 1;
+ }
+
+ if (sscanf(buffer, "%s %s %s\n", host, port, busid) != 3) {
+ err("sscanf");
+ free(buffer);
+ return -1;
+ }
+
+ free(buffer);
+
return 0;
}
@@ -367,30 +219,21 @@ static int read_record(int rhport, char *host, char *port, char *busid)
int usbip_vhci_driver_open(void)
{
- int ret;
- char hc_busid[SYSFS_BUS_ID_SIZE];
-
- vhci_driver = (struct usbip_vhci_driver *) calloc(1, sizeof(*vhci_driver));
- if (!vhci_driver) {
- dbg("calloc failed");
+ udev_context = udev_new();
+ if (!udev_context) {
+ err("udev_new failed");
return -1;
}
- ret = sysfs_get_mnt_path(vhci_driver->sysfs_mntpath, SYSFS_PATH_MAX);
- if (ret < 0) {
- dbg("sysfs_get_mnt_path failed");
- goto err;
- }
-
- ret = get_hc_busid(vhci_driver->sysfs_mntpath, hc_busid);
- if (ret < 0)
- goto err;
+ vhci_driver = calloc(1, sizeof(struct usbip_vhci_driver));
/* will be freed in usbip_driver_close() */
- vhci_driver->hc_device = sysfs_open_device(USBIP_VHCI_BUS_TYPE,
- hc_busid);
+ vhci_driver->hc_device =
+ udev_device_new_from_subsystem_sysname(udev_context,
+ USBIP_VHCI_BUS_TYPE,
+ USBIP_VHCI_DRV_NAME);
if (!vhci_driver->hc_device) {
- dbg("sysfs_open_device failed");
+ err("udev_device_new_from_subsystem_sysname failed");
goto err;
}
@@ -398,29 +241,21 @@ int usbip_vhci_driver_open(void)
dbg("available ports: %d", vhci_driver->nports);
- vhci_driver->cdev_list = dlist_new(sizeof(struct usbip_class_device));
- if (!vhci_driver->cdev_list)
- goto err;
-
- if (refresh_class_device_list())
- goto err;
-
if (refresh_imported_device_list())
goto err;
-
return 0;
-
err:
- if (vhci_driver->cdev_list)
- dlist_destroy(vhci_driver->cdev_list);
- if (vhci_driver->hc_device)
- sysfs_close_device(vhci_driver->hc_device);
+ udev_device_unref(vhci_driver->hc_device);
+
if (vhci_driver)
free(vhci_driver);
vhci_driver = NULL;
+
+ udev_unref(udev_context);
+
return -1;
}
@@ -430,53 +265,24 @@ void usbip_vhci_driver_close()
if (!vhci_driver)
return;
- if (vhci_driver->cdev_list)
- dlist_destroy(vhci_driver->cdev_list);
-
- for (int i = 0; i < vhci_driver->nports; i++) {
- if (vhci_driver->idev[i].cdev_list)
- dlist_destroy(vhci_driver->idev[i].cdev_list);
- }
+ udev_device_unref(vhci_driver->hc_device);
- if (vhci_driver->hc_device)
- sysfs_close_device(vhci_driver->hc_device);
free(vhci_driver);
vhci_driver = NULL;
+
+ udev_unref(udev_context);
}
int usbip_vhci_refresh_device_list(void)
{
- if (vhci_driver->cdev_list)
- dlist_destroy(vhci_driver->cdev_list);
-
-
- for (int i = 0; i < vhci_driver->nports; i++) {
- if (vhci_driver->idev[i].cdev_list)
- dlist_destroy(vhci_driver->idev[i].cdev_list);
- }
-
- vhci_driver->cdev_list = dlist_new(sizeof(struct usbip_class_device));
- if (!vhci_driver->cdev_list)
- goto err;
-
- if (refresh_class_device_list())
- goto err;
if (refresh_imported_device_list())
goto err;
return 0;
err:
- if (vhci_driver->cdev_list)
- dlist_destroy(vhci_driver->cdev_list);
-
- for (int i = 0; i < vhci_driver->nports; i++) {
- if (vhci_driver->idev[i].cdev_list)
- dlist_destroy(vhci_driver->idev[i].cdev_list);
- }
-
dbg("failed to refresh device list");
return -1;
}
@@ -494,24 +300,24 @@ int usbip_vhci_get_free_port(void)
int usbip_vhci_attach_device2(uint8_t port, int sockfd, uint32_t devid,
uint32_t speed) {
- struct sysfs_attribute *attr_attach;
char buff[200]; /* what size should be ? */
+ char attach_attr_path[SYSFS_PATH_MAX];
+ char attr_attach[] = "attach";
+ const char *path;
int ret;
- attr_attach = sysfs_get_device_attr(vhci_driver->hc_device, "attach");
- if (!attr_attach) {
- dbg("sysfs_get_device_attr(\"attach\") failed: %s",
- vhci_driver->hc_device->name);
- return -1;
- }
-
- snprintf(buff, sizeof(buff), "%u %u %u %u",
+ snprintf(buff, sizeof(buff), "%u %d %u %u",
port, sockfd, devid, speed);
dbg("writing: %s", buff);
- ret = sysfs_write_attribute(attr_attach, buff, strlen(buff));
+ path = udev_device_get_syspath(vhci_driver->hc_device);
+ snprintf(attach_attr_path, sizeof(attach_attr_path), "%s/%s",
+ path, attr_attach);
+ dbg("attach attribute path: %s", attach_attr_path);
+
+ ret = write_sysfs_attribute(attach_attr_path, buff, strlen(buff));
if (ret < 0) {
- dbg("sysfs_write_attribute failed");
+ dbg("write_sysfs_attribute failed");
return -1;
}
@@ -536,23 +342,23 @@ int usbip_vhci_attach_device(uint8_t port, int sockfd, uint8_t busnum,
int usbip_vhci_detach_device(uint8_t port)
{
- struct sysfs_attribute *attr_detach;
+ char detach_attr_path[SYSFS_PATH_MAX];
+ char attr_detach[] = "detach";
char buff[200]; /* what size should be ? */
+ const char *path;
int ret;
- attr_detach = sysfs_get_device_attr(vhci_driver->hc_device, "detach");
- if (!attr_detach) {
- dbg("sysfs_get_device_attr(\"detach\") failed: %s",
- vhci_driver->hc_device->name);
- return -1;
- }
-
snprintf(buff, sizeof(buff), "%u", port);
dbg("writing: %s", buff);
- ret = sysfs_write_attribute(attr_detach, buff, strlen(buff));
+ path = udev_device_get_syspath(vhci_driver->hc_device);
+ snprintf(detach_attr_path, sizeof(detach_attr_path), "%s/%s",
+ path, attr_detach);
+ dbg("detach attribute path: %s", detach_attr_path);
+
+ ret = write_sysfs_attribute(detach_attr_path, buff, strlen(buff));
if (ret < 0) {
- dbg("sysfs_write_attribute failed");
+ dbg("write_sysfs_attribute failed");
return -1;
}
@@ -573,7 +379,8 @@ int usbip_vhci_imported_device_dump(struct usbip_imported_device *idev)
if (idev->status == VDEV_ST_NULL || idev->status == VDEV_ST_NOTASSIGNED)
return 0;
- ret = read_record(idev->port, host, serv, remote_busid);
+ ret = read_record(idev->port, host, sizeof(host), serv, sizeof(serv),
+ remote_busid);
if (ret) {
err("read_record");
read_record_error = 1;
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h b/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
index e071f8049c1f..fa2316cf2cac 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
@@ -5,7 +5,7 @@
#ifndef __VHCI_DRIVER_H
#define __VHCI_DRIVER_H
-#include <sysfs/libsysfs.h>
+#include <libudev.h>
#include <stdint.h>
#include "usbip_common.h"
@@ -13,11 +13,6 @@
#define USBIP_VHCI_BUS_TYPE "platform"
#define MAXNPORT 128
-struct usbip_class_device {
- char class_path[SYSFS_PATH_MAX];
- char dev_path[SYSFS_PATH_MAX];
-};
-
struct usbip_imported_device {
uint8_t port;
uint32_t status;
@@ -28,18 +23,13 @@ struct usbip_imported_device {
uint8_t devnum;
/* usbip_class_device list */
- struct dlist *cdev_list;
struct usbip_usb_device udev;
};
struct usbip_vhci_driver {
- char sysfs_mntpath[SYSFS_PATH_MAX];
/* /sys/devices/platform/vhci_hcd */
- struct sysfs_device *hc_device;
-
- /* usbip_class_device list */
- struct dlist *cdev_list;
+ struct udev_device *hc_device;
int nports;
struct usbip_imported_device idev[MAXNPORT];
diff --git a/drivers/staging/usbip/userspace/src/Makefile.am b/drivers/staging/usbip/userspace/src/Makefile.am
index b4f8c4b04b2f..e81a4ebadeff 100644
--- a/drivers/staging/usbip/userspace/src/Makefile.am
+++ b/drivers/staging/usbip/userspace/src/Makefile.am
@@ -8,5 +8,4 @@ usbip_SOURCES := usbip.h utils.h usbip.c utils.c usbip_network.c \
usbip_attach.c usbip_detach.c usbip_list.c \
usbip_bind.c usbip_unbind.c usbip_port.c
-
usbipd_SOURCES := usbip_network.h usbipd.c usbip_network.c
diff --git a/drivers/staging/usbip/userspace/src/usbip_attach.c b/drivers/staging/usbip/userspace/src/usbip_attach.c
index 085841196522..716a79e284c4 100644
--- a/drivers/staging/usbip/userspace/src/usbip_attach.c
+++ b/drivers/staging/usbip/userspace/src/usbip_attach.c
@@ -17,7 +17,6 @@
*/
#include <sys/stat.h>
-#include <sysfs/libsysfs.h>
#include <limits.h>
#include <stdint.h>
diff --git a/drivers/staging/usbip/userspace/src/usbip_bind.c b/drivers/staging/usbip/userspace/src/usbip_bind.c
index 9ecaf6e574df..fa46141ae68b 100644
--- a/drivers/staging/usbip/userspace/src/usbip_bind.c
+++ b/drivers/staging/usbip/userspace/src/usbip_bind.c
@@ -16,7 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <sysfs/libsysfs.h>
+#include <libudev.h>
#include <errno.h>
#include <stdio.h>
@@ -28,6 +28,7 @@
#include "usbip_common.h"
#include "utils.h"
#include "usbip.h"
+#include "sysfs_utils.h"
enum unbind_status {
UNBIND_ST_OK,
@@ -48,167 +49,92 @@ void usbip_bind_usage(void)
/* call at unbound state */
static int bind_usbip(char *busid)
{
- char bus_type[] = "usb";
char attr_name[] = "bind";
- char sysfs_mntpath[SYSFS_PATH_MAX];
char bind_attr_path[SYSFS_PATH_MAX];
- char intf_busid[SYSFS_BUS_ID_SIZE];
- struct sysfs_device *busid_dev;
- struct sysfs_attribute *bind_attr;
- struct sysfs_attribute *bConfValue;
- struct sysfs_attribute *bNumIntfs;
- int i, failed = 0;
- int rc, ret = -1;
-
- rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
- if (rc < 0) {
- err("sysfs must be mounted: %s", strerror(errno));
- return -1;
- }
+ int rc = -1;
snprintf(bind_attr_path, sizeof(bind_attr_path), "%s/%s/%s/%s/%s/%s",
- sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME,
- USBIP_HOST_DRV_NAME, attr_name);
-
- bind_attr = sysfs_open_attribute(bind_attr_path);
- if (!bind_attr) {
- dbg("problem getting bind attribute: %s", strerror(errno));
- return -1;
- }
+ SYSFS_MNT_PATH, SYSFS_BUS_NAME, SYSFS_BUS_TYPE,
+ SYSFS_DRIVERS_NAME, USBIP_HOST_DRV_NAME, attr_name);
- busid_dev = sysfs_open_device(bus_type, busid);
- if (!busid_dev) {
- dbg("sysfs_open_device %s failed: %s", busid, strerror(errno));
- goto err_close_bind_attr;
- }
-
- bConfValue = sysfs_get_device_attr(busid_dev, "bConfigurationValue");
- bNumIntfs = sysfs_get_device_attr(busid_dev, "bNumInterfaces");
-
- if (!bConfValue || !bNumIntfs) {
- dbg("problem getting device attributes: %s",
+ rc = write_sysfs_attribute(bind_attr_path, busid, strlen(busid));
+ if (rc < 0) {
+ err("error binding device %s to driver: %s", busid,
strerror(errno));
- goto err_close_busid_dev;
- }
-
- for (i = 0; i < atoi(bNumIntfs->value); i++) {
- snprintf(intf_busid, SYSFS_BUS_ID_SIZE, "%s:%.1s.%d", busid,
- bConfValue->value, i);
-
- rc = sysfs_write_attribute(bind_attr, intf_busid,
- SYSFS_BUS_ID_SIZE);
- if (rc < 0) {
- dbg("bind driver at %s failed", intf_busid);
- failed = 1;
- }
+ return -1;
}
- if (!failed)
- ret = 0;
-
-err_close_busid_dev:
- sysfs_close_device(busid_dev);
-err_close_bind_attr:
- sysfs_close_attribute(bind_attr);
-
- return ret;
+ return 0;
}
/* buggy driver may cause dead lock */
static int unbind_other(char *busid)
{
- char bus_type[] = "usb";
- char intf_busid[SYSFS_BUS_ID_SIZE];
- struct sysfs_device *busid_dev;
- struct sysfs_device *intf_dev;
- struct sysfs_driver *intf_drv;
- struct sysfs_attribute *unbind_attr;
- struct sysfs_attribute *bConfValue;
- struct sysfs_attribute *bDevClass;
- struct sysfs_attribute *bNumIntfs;
- int i, rc;
enum unbind_status status = UNBIND_ST_OK;
- busid_dev = sysfs_open_device(bus_type, busid);
- if (!busid_dev) {
- dbg("sysfs_open_device %s failed: %s", busid, strerror(errno));
- return -1;
+ char attr_name[] = "unbind";
+ char unbind_attr_path[SYSFS_PATH_MAX];
+ int rc = -1;
+
+ struct udev *udev;
+ struct udev_device *dev;
+ const char *driver;
+ const char *bDevClass;
+
+ /* Create libudev context. */
+ udev = udev_new();
+
+ /* Get the device. */
+ dev = udev_device_new_from_subsystem_sysname(udev, "usb", busid);
+ if (!dev) {
+ dbg("unable to find device with bus ID %s", busid);
+ goto err_close_busid_dev;
}
- bConfValue = sysfs_get_device_attr(busid_dev, "bConfigurationValue");
- bDevClass = sysfs_get_device_attr(busid_dev, "bDeviceClass");
- bNumIntfs = sysfs_get_device_attr(busid_dev, "bNumInterfaces");
- if (!bConfValue || !bDevClass || !bNumIntfs) {
- dbg("problem getting device attributes: %s",
- strerror(errno));
+ /* Check what kind of device it is. */
+ bDevClass = udev_device_get_sysattr_value(dev, "bDeviceClass");
+ if (!bDevClass) {
+ dbg("unable to get bDevClass device attribute");
goto err_close_busid_dev;
}
- if (!strncmp(bDevClass->value, "09", bDevClass->len)) {
+ if (!strncmp(bDevClass, "09", strlen(bDevClass))) {
dbg("skip unbinding of hub");
goto err_close_busid_dev;
}
- for (i = 0; i < atoi(bNumIntfs->value); i++) {
- snprintf(intf_busid, SYSFS_BUS_ID_SIZE, "%s:%.1s.%d", busid,
- bConfValue->value, i);
- intf_dev = sysfs_open_device(bus_type, intf_busid);
- if (!intf_dev) {
- dbg("could not open interface device: %s",
- strerror(errno));
- goto err_close_busid_dev;
- }
-
- dbg("%s -> %s", intf_dev->name, intf_dev->driver_name);
-
- if (!strncmp("unknown", intf_dev->driver_name, SYSFS_NAME_LEN))
- /* unbound interface */
- continue;
-
- if (!strncmp(USBIP_HOST_DRV_NAME, intf_dev->driver_name,
- SYSFS_NAME_LEN)) {
- /* already bound to usbip-host */
- status = UNBIND_ST_USBIP_HOST;
- continue;
- }
-
- /* unbinding */
- intf_drv = sysfs_open_driver(bus_type, intf_dev->driver_name);
- if (!intf_drv) {
- dbg("could not open interface driver on %s: %s",
- intf_dev->name, strerror(errno));
- goto err_close_intf_dev;
- }
+ /* Get the device driver. */
+ driver = udev_device_get_driver(dev);
+ if (!driver) {
+ /* No driver bound to this device. */
+ goto out;
+ }
- unbind_attr = sysfs_get_driver_attr(intf_drv, "unbind");
- if (!unbind_attr) {
- dbg("problem getting interface driver attribute: %s",
- strerror(errno));
- goto err_close_intf_drv;
- }
+ if (!strncmp(USBIP_HOST_DRV_NAME, driver,
+ strlen(USBIP_HOST_DRV_NAME))) {
+ /* Already bound to usbip-host. */
+ status = UNBIND_ST_USBIP_HOST;
+ goto out;
+ }
- rc = sysfs_write_attribute(unbind_attr, intf_dev->bus_id,
- SYSFS_BUS_ID_SIZE);
- if (rc < 0) {
- /* NOTE: why keep unbinding other interfaces? */
- dbg("unbind driver at %s failed", intf_dev->bus_id);
- status = UNBIND_ST_FAILED;
- }
+ /* Unbind device from driver. */
+ snprintf(unbind_attr_path, sizeof(unbind_attr_path), "%s/%s/%s/%s/%s/%s",
+ SYSFS_MNT_PATH, SYSFS_BUS_NAME, SYSFS_BUS_TYPE,
+ SYSFS_DRIVERS_NAME, driver, attr_name);
- sysfs_close_driver(intf_drv);
- sysfs_close_device(intf_dev);
+ rc = write_sysfs_attribute(unbind_attr_path, busid, strlen(busid));
+ if (rc < 0) {
+ err("error unbinding device %s from driver", busid);
+ goto err_close_busid_dev;
}
goto out;
-err_close_intf_drv:
- sysfs_close_driver(intf_drv);
-err_close_intf_dev:
- sysfs_close_device(intf_dev);
err_close_busid_dev:
status = UNBIND_ST_FAILED;
out:
- sysfs_close_device(busid_dev);
+ udev_device_unref(dev);
+ udev_unref(udev);
return status;
}
@@ -216,6 +142,17 @@ out:
static int bind_device(char *busid)
{
int rc;
+ struct udev *udev;
+ struct udev_device *dev;
+
+ /* Check whether the device with this bus ID exists. */
+ udev = udev_new();
+ dev = udev_device_new_from_subsystem_sysname(udev, "usb", busid);
+ if (!dev) {
+ err("device with the specified bus ID does not exist");
+ return -1;
+ }
+ udev_unref(udev);
rc = unbind_other(busid);
if (rc == UNBIND_ST_FAILED) {
@@ -240,7 +177,7 @@ static int bind_device(char *busid)
return -1;
}
- printf("bind device on busid %s: complete\n", busid);
+ info("bind device on busid %s: complete", busid);
return 0;
}
diff --git a/drivers/staging/usbip/userspace/src/usbip_detach.c b/drivers/staging/usbip/userspace/src/usbip_detach.c
index 13308df613a2..05c6d15856eb 100644
--- a/drivers/staging/usbip/userspace/src/usbip_detach.c
+++ b/drivers/staging/usbip/userspace/src/usbip_detach.c
@@ -16,8 +16,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <sysfs/libsysfs.h>
-
#include <ctype.h>
#include <limits.h>
#include <stdint.h>
diff --git a/drivers/staging/usbip/userspace/src/usbip_list.c b/drivers/staging/usbip/userspace/src/usbip_list.c
index 237e099337a1..d5ce34a410e7 100644
--- a/drivers/staging/usbip/userspace/src/usbip_list.c
+++ b/drivers/staging/usbip/userspace/src/usbip_list.c
@@ -17,7 +17,7 @@
*/
#include <sys/types.h>
-#include <sysfs/libsysfs.h>
+#include <libudev.h>
#include <errno.h>
#include <stdbool.h>
@@ -54,7 +54,7 @@ static int get_exported_devices(char *host, int sockfd)
struct usbip_usb_device udev;
struct usbip_usb_interface uintf;
unsigned int i;
- int j, rc;
+ int rc, j;
rc = usbip_net_send_op_common(sockfd, OP_REQ_DEVLIST, 0);
if (rc < 0) {
@@ -107,19 +107,20 @@ static int get_exported_devices(char *host, int sockfd)
for (j = 0; j < udev.bNumInterfaces; j++) {
rc = usbip_net_recv(sockfd, &uintf, sizeof(uintf));
if (rc < 0) {
- dbg("usbip_net_recv failed: usbip_usb_intf[%d]",
- j);
+ err("usbip_net_recv failed: usbip_usb_intf[%d]",
+ j);
return -1;
}
usbip_net_pack_usb_interface(0, &uintf);
usbip_names_get_class(class_name, sizeof(class_name),
- uintf.bInterfaceClass,
- uintf.bInterfaceSubClass,
- uintf.bInterfaceProtocol);
+ uintf.bInterfaceClass,
+ uintf.bInterfaceSubClass,
+ uintf.bInterfaceProtocol);
printf("%11s: %2d - %s\n", "", j, class_name);
}
+
printf("\n");
}
@@ -150,8 +151,8 @@ static int list_exported_devices(char *host)
return 0;
}
-static void print_device(char *busid, char *vendor, char *product,
- bool parsable)
+static void print_device(const char *busid, const char *vendor,
+ const char *product, bool parsable)
{
if (parsable)
printf("busid=%s#usbid=%.4s:%.4s#", busid, vendor, product);
@@ -165,106 +166,73 @@ static void print_product_name(char *product_name, bool parsable)
printf(" %s\n", product_name);
}
-static void print_interface(char *busid, char *driver, bool parsable)
-{
- if (parsable)
- printf("%s=%s#", busid, driver);
- else
- printf("%9s%s -> %s\n", "", busid, driver);
-}
-
-static int is_device(void *x)
-{
- struct sysfs_attribute *devpath;
- struct sysfs_device *dev = x;
- int ret = 0;
-
- devpath = sysfs_get_device_attr(dev, "devpath");
- if (devpath && *devpath->value != '0')
- ret = 1;
-
- return ret;
-}
-
-static int devcmp(void *a, void *b)
-{
- return strcmp(a, b);
-}
-
static int list_devices(bool parsable)
{
- char bus_type[] = "usb";
- char busid[SYSFS_BUS_ID_SIZE];
+ struct udev *udev;
+ struct udev_enumerate *enumerate;
+ struct udev_list_entry *devices, *dev_list_entry;
+ struct udev_device *dev;
+ const char *path;
+ const char *idVendor;
+ const char *idProduct;
+ const char *bConfValue;
+ const char *bNumIntfs;
+ const char *busid;
char product_name[128];
- struct sysfs_bus *ubus;
- struct sysfs_device *dev;
- struct sysfs_device *intf;
- struct sysfs_attribute *idVendor;
- struct sysfs_attribute *idProduct;
- struct sysfs_attribute *bConfValue;
- struct sysfs_attribute *bNumIntfs;
- struct dlist *devlist;
- int i;
int ret = -1;
- ubus = sysfs_open_bus(bus_type);
- if (!ubus) {
- err("could not open %s bus: %s", bus_type, strerror(errno));
- return -1;
- }
-
- devlist = sysfs_get_bus_devices(ubus);
- if (!devlist) {
- err("could not get %s bus devices: %s", bus_type,
- strerror(errno));
- goto err_out;
- }
-
- /* remove interfaces and root hubs from device list */
- dlist_filter_sort(devlist, is_device, devcmp);
-
- if (!parsable) {
- printf("Local USB devices\n");
- printf("=================\n");
- }
- dlist_for_each_data(devlist, dev, struct sysfs_device) {
- idVendor = sysfs_get_device_attr(dev, "idVendor");
- idProduct = sysfs_get_device_attr(dev, "idProduct");
- bConfValue = sysfs_get_device_attr(dev, "bConfigurationValue");
- bNumIntfs = sysfs_get_device_attr(dev, "bNumInterfaces");
+ /* Create libudev context. */
+ udev = udev_new();
+
+ /* Create libudev device enumeration. */
+ enumerate = udev_enumerate_new(udev);
+
+ /* Take only USB devices that are not hubs and do not have
+ * the bInterfaceNumber attribute, i.e. are not interfaces.
+ */
+ udev_enumerate_add_match_subsystem(enumerate, "usb");
+ udev_enumerate_add_nomatch_sysattr(enumerate, "bDeviceClass", "09");
+ udev_enumerate_add_nomatch_sysattr(enumerate, "bInterfaceNumber", NULL);
+ udev_enumerate_scan_devices(enumerate);
+
+ devices = udev_enumerate_get_list_entry(enumerate);
+
+ /* Show information about each device. */
+ udev_list_entry_foreach(dev_list_entry, devices) {
+ path = udev_list_entry_get_name(dev_list_entry);
+ dev = udev_device_new_from_syspath(udev, path);
+
+ /* Get device information. */
+ idVendor = udev_device_get_sysattr_value(dev, "idVendor");
+ idProduct = udev_device_get_sysattr_value(dev, "idProduct");
+ bConfValue = udev_device_get_sysattr_value(dev, "bConfigurationValue");
+ bNumIntfs = udev_device_get_sysattr_value(dev, "bNumInterfaces");
+ busid = udev_device_get_sysname(dev);
if (!idVendor || !idProduct || !bConfValue || !bNumIntfs) {
err("problem getting device attributes: %s",
strerror(errno));
goto err_out;
}
- /* get product name */
+ /* Get product name. */
usbip_names_get_product(product_name, sizeof(product_name),
- strtol(idVendor->value, NULL, 16),
- strtol(idProduct->value, NULL, 16));
- print_device(dev->bus_id, idVendor->value, idProduct->value,
- parsable);
+ strtol(idVendor, NULL, 16),
+ strtol(idProduct, NULL, 16));
+
+ /* Print information. */
+ print_device(busid, idVendor, idProduct, parsable);
print_product_name(product_name, parsable);
- for (i = 0; i < atoi(bNumIntfs->value); i++) {
- snprintf(busid, sizeof(busid), "%s:%.1s.%d",
- dev->bus_id, bConfValue->value, i);
- intf = sysfs_open_device(bus_type, busid);
- if (!intf) {
- err("could not open device interface: %s",
- strerror(errno));
- goto err_out;
- }
- print_interface(busid, intf->driver_name, parsable);
- sysfs_close_device(intf);
- }
printf("\n");
+
+ udev_device_unref(dev);
}
ret = 0;
err_out:
- sysfs_close_bus(ubus);
+ udev_enumerate_unref(enumerate);
+ udev_unref(udev);
return ret;
}
diff --git a/drivers/staging/usbip/userspace/src/usbip_network.h b/drivers/staging/usbip/userspace/src/usbip_network.h
index f19ae19799b4..c1e875cf1078 100644
--- a/drivers/staging/usbip/userspace/src/usbip_network.h
+++ b/drivers/staging/usbip/userspace/src/usbip_network.h
@@ -10,7 +10,6 @@
#endif
#include <sys/types.h>
-#include <sysfs/libsysfs.h>
#include <stdint.h>
diff --git a/drivers/staging/usbip/userspace/src/usbip_unbind.c b/drivers/staging/usbip/userspace/src/usbip_unbind.c
index d5a9ab6af2a6..a4a496c9cbaf 100644
--- a/drivers/staging/usbip/userspace/src/usbip_unbind.c
+++ b/drivers/staging/usbip/userspace/src/usbip_unbind.c
@@ -16,7 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <sysfs/libsysfs.h>
+#include <libudev.h>
#include <errno.h>
#include <stdio.h>
@@ -27,6 +27,7 @@
#include "usbip_common.h"
#include "utils.h"
#include "usbip.h"
+#include "sysfs_utils.h"
static const char usbip_unbind_usage_string[] =
"usbip unbind <args>\n"
@@ -41,115 +42,69 @@ void usbip_unbind_usage(void)
static int unbind_device(char *busid)
{
char bus_type[] = "usb";
- struct sysfs_driver *usbip_host_drv;
- struct sysfs_device *dev;
- struct dlist *devlist;
- int verified = 0;
int rc, ret = -1;
- char attr_name[] = "bConfigurationValue";
- char sysfs_mntpath[SYSFS_PATH_MAX];
- char busid_attr_path[SYSFS_PATH_MAX];
- struct sysfs_attribute *busid_attr;
- char *val = NULL;
- int len;
-
- /* verify the busid device is using usbip-host */
- usbip_host_drv = sysfs_open_driver(bus_type, USBIP_HOST_DRV_NAME);
- if (!usbip_host_drv) {
- err("could not open %s driver: %s", USBIP_HOST_DRV_NAME,
- strerror(errno));
- return -1;
- }
+ char unbind_attr_name[] = "unbind";
+ char unbind_attr_path[SYSFS_PATH_MAX];
+ char rebind_attr_name[] = "rebind";
+ char rebind_attr_path[SYSFS_PATH_MAX];
- devlist = sysfs_get_driver_devices(usbip_host_drv);
- if (!devlist) {
- err("%s is not in use by any devices", USBIP_HOST_DRV_NAME);
- goto err_close_usbip_host_drv;
- }
+ struct udev *udev;
+ struct udev_device *dev;
+ const char *driver;
- dlist_for_each_data(devlist, dev, struct sysfs_device) {
- if (!strncmp(busid, dev->name, strlen(busid)) &&
- !strncmp(dev->driver_name, USBIP_HOST_DRV_NAME,
- strlen(USBIP_HOST_DRV_NAME))) {
- verified = 1;
- break;
- }
- }
+ /* Create libudev context. */
+ udev = udev_new();
- if (!verified) {
- err("device on busid %s is not using %s", busid,
- USBIP_HOST_DRV_NAME);
- goto err_close_usbip_host_drv;
+ /* Check whether the device with this bus ID exists. */
+ dev = udev_device_new_from_subsystem_sysname(udev, "usb", busid);
+ if (!dev) {
+ err("device with the specified bus ID does not exist");
+ goto err_close_udev;
}
- /*
- * NOTE: A read and write of an attribute value of the device busid
- * refers to must be done to start probing. That way a rebind of the
- * default driver for the device occurs.
- *
- * This seems very hackish and adds a lot of pointless code. I think it
- * should be done in the kernel by the driver after del_match_busid is
- * finished!
- */
-
- rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
- if (rc < 0) {
- err("sysfs must be mounted: %s", strerror(errno));
- return -1;
+ /* Check whether the device is using usbip-host driver. */
+ driver = udev_device_get_driver(dev);
+ if (!driver || strcmp(driver, "usbip-host")) {
+ err("device is not bound to usbip-host driver");
+ goto err_close_udev;
}
- snprintf(busid_attr_path, sizeof(busid_attr_path), "%s/%s/%s/%s/%s/%s",
- sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DEVICES_NAME,
- busid, attr_name);
+ /* Unbind device from driver. */
+ snprintf(unbind_attr_path, sizeof(unbind_attr_path), "%s/%s/%s/%s/%s/%s",
+ SYSFS_MNT_PATH, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME,
+ USBIP_HOST_DRV_NAME, unbind_attr_name);
- /* read a device attribute */
- busid_attr = sysfs_open_attribute(busid_attr_path);
- if (!busid_attr) {
- err("could not open %s/%s: %s", busid, attr_name,
- strerror(errno));
- return -1;
- }
-
- if (sysfs_read_attribute(busid_attr) < 0) {
- err("problem reading attribute: %s", strerror(errno));
- goto err_out;
+ rc = write_sysfs_attribute(unbind_attr_path, busid, strlen(busid));
+ if (rc < 0) {
+ err("error unbinding device %s from driver", busid);
+ goto err_close_udev;
}
- len = busid_attr->len;
- val = malloc(len);
- *val = *busid_attr->value;
- sysfs_close_attribute(busid_attr);
-
- /* notify driver of unbind */
+ /* Notify driver of unbind. */
rc = modify_match_busid(busid, 0);
if (rc < 0) {
err("unable to unbind device on %s", busid);
- goto err_out;
+ goto err_close_udev;
}
- /* write the device attribute */
- busid_attr = sysfs_open_attribute(busid_attr_path);
- if (!busid_attr) {
- err("could not open %s/%s: %s", busid, attr_name,
- strerror(errno));
- return -1;
- }
+ /* Trigger new probing. */
+ snprintf(rebind_attr_path, sizeof(unbind_attr_path), "%s/%s/%s/%s/%s/%s",
+ SYSFS_MNT_PATH, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME,
+ USBIP_HOST_DRV_NAME, rebind_attr_name);
- rc = sysfs_write_attribute(busid_attr, val, len);
+ rc = write_sysfs_attribute(rebind_attr_path, busid, strlen(busid));
if (rc < 0) {
- err("problem writing attribute: %s", strerror(errno));
- goto err_out;
+ err("error rebinding");
+ goto err_close_udev;
}
- sysfs_close_attribute(busid_attr);
ret = 0;
- printf("unbind device on busid %s: complete\n", busid);
+ info("unbind device on busid %s: complete", busid);
-err_out:
- free(val);
-err_close_usbip_host_drv:
- sysfs_close_driver(usbip_host_drv);
+err_close_udev:
+ udev_device_unref(dev);
+ udev_unref(udev);
return ret;
}
diff --git a/drivers/staging/usbip/userspace/src/usbipd.c b/drivers/staging/usbip/userspace/src/usbipd.c
index 7980f8b5517b..2cae4ce48d11 100644
--- a/drivers/staging/usbip/userspace/src/usbipd.c
+++ b/drivers/staging/usbip/userspace/src/usbipd.c
@@ -43,6 +43,7 @@
#include "usbip_host_driver.h"
#include "usbip_common.h"
#include "usbip_network.h"
+#include "list.h"
#undef PROGNAME
#define PROGNAME "usbipd"
@@ -93,6 +94,7 @@ static int recv_request_import(int sockfd)
struct op_common reply;
struct usbip_exported_device *edev;
struct usbip_usb_device pdu_udev;
+ struct list_head *i;
int found = 0;
int error = 0;
int rc;
@@ -107,8 +109,8 @@ static int recv_request_import(int sockfd)
}
PACK_OP_IMPORT_REQUEST(0, &req);
- dlist_for_each_data(host_driver->edev_list, edev,
- struct usbip_exported_device) {
+ list_for_each(i, &host_driver->edev_list) {
+ edev = list_entry(i, struct usbip_exported_device, node);
if (!strncmp(req.busid, edev->udev.busid, SYSFS_BUS_ID_SIZE)) {
info("found requested device: %s", req.busid);
found = 1;
@@ -161,13 +163,12 @@ static int send_reply_devlist(int connfd)
struct usbip_usb_device pdu_udev;
struct usbip_usb_interface pdu_uinf;
struct op_devlist_reply reply;
- int i;
- int rc;
+ struct list_head *j;
+ int rc, i;
reply.ndev = 0;
/* number of exported devices */
- dlist_for_each_data(host_driver->edev_list, edev,
- struct usbip_exported_device) {
+ list_for_each(j, &host_driver->edev_list) {
reply.ndev += 1;
}
info("exportable devices: %d", reply.ndev);
@@ -185,8 +186,8 @@ static int send_reply_devlist(int connfd)
return -1;
}
- dlist_for_each_data(host_driver->edev_list, edev,
- struct usbip_exported_device) {
+ list_for_each(j, &host_driver->edev_list) {
+ edev = list_entry(j, struct usbip_exported_device, node);
dump_usb_device(&edev->udev);
memcpy(&pdu_udev, &edev->udev, sizeof(pdu_udev));
usbip_net_pack_usb_device(1, &pdu_udev);
@@ -203,9 +204,9 @@ static int send_reply_devlist(int connfd)
usbip_net_pack_usb_interface(1, &pdu_uinf);
rc = usbip_net_send(connfd, &pdu_uinf,
- sizeof(pdu_uinf));
+ sizeof(pdu_uinf));
if (rc < 0) {
- dbg("usbip_net_send failed: pdu_uinf");
+ err("usbip_net_send failed: pdu_uinf");
return -1;
}
}
diff --git a/drivers/staging/usbip/userspace/src/utils.c b/drivers/staging/usbip/userspace/src/utils.c
index 2d4966e6289c..2b3d6d235015 100644
--- a/drivers/staging/usbip/userspace/src/utils.c
+++ b/drivers/staging/usbip/userspace/src/utils.c
@@ -16,61 +16,37 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <sysfs/libsysfs.h>
-
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include "usbip_common.h"
#include "utils.h"
+#include "sysfs_utils.h"
int modify_match_busid(char *busid, int add)
{
- char bus_type[] = "usb";
char attr_name[] = "match_busid";
- char buff[SYSFS_BUS_ID_SIZE + 4];
- char sysfs_mntpath[SYSFS_PATH_MAX];
+ char command[SYSFS_BUS_ID_SIZE + 4];
char match_busid_attr_path[SYSFS_PATH_MAX];
- struct sysfs_attribute *match_busid_attr;
- int rc, ret = 0;
-
- if (strnlen(busid, SYSFS_BUS_ID_SIZE) > SYSFS_BUS_ID_SIZE - 1) {
- dbg("busid is too long");
- return -1;
- }
-
- rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
- if (rc < 0) {
- err("sysfs must be mounted: %s", strerror(errno));
- return -1;
- }
+ int rc;
snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
- "%s/%s/%s/%s/%s/%s", sysfs_mntpath, SYSFS_BUS_NAME, bus_type,
- SYSFS_DRIVERS_NAME, USBIP_HOST_DRV_NAME, attr_name);
-
- match_busid_attr = sysfs_open_attribute(match_busid_attr_path);
- if (!match_busid_attr) {
- dbg("problem getting match_busid attribute: %s",
- strerror(errno));
- return -1;
- }
+ "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
+ SYSFS_BUS_TYPE, SYSFS_DRIVERS_NAME, USBIP_HOST_DRV_NAME,
+ attr_name);
if (add)
- snprintf(buff, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
+ snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
else
- snprintf(buff, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
+ snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
- dbg("write \"%s\" to %s", buff, match_busid_attr->path);
-
- rc = sysfs_write_attribute(match_busid_attr, buff, sizeof(buff));
+ rc = write_sysfs_attribute(match_busid_attr_path, command,
+ sizeof(command));
if (rc < 0) {
dbg("failed to write match_busid: %s", strerror(errno));
- ret = -1;
+ return -1;
}
- sysfs_close_attribute(match_busid_attr);
-
- return ret;
+ return 0;
}
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 72391ef87646..1e84577230ef 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -205,8 +205,6 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
}
}
- pr_info("changed %d\n", changed);
-
if ((hcd->state == HC_STATE_SUSPENDED) && (changed == 1))
usb_hcd_resume_root_hub(hcd);
@@ -273,14 +271,14 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
break;
case USB_PORT_FEAT_POWER:
- usbip_dbg_vhci_rh(" ClearPortFeature: "
- "USB_PORT_FEAT_POWER\n");
+ usbip_dbg_vhci_rh(
+ " ClearPortFeature: USB_PORT_FEAT_POWER\n");
dum->port_status[rhport] = 0;
dum->resuming = 0;
break;
case USB_PORT_FEAT_C_RESET:
- usbip_dbg_vhci_rh(" ClearPortFeature: "
- "USB_PORT_FEAT_C_RESET\n");
+ usbip_dbg_vhci_rh(
+ " ClearPortFeature: USB_PORT_FEAT_C_RESET\n");
switch (dum->vdev[rhport].speed) {
case USB_SPEED_HIGH:
dum->port_status[rhport] |=
@@ -339,16 +337,17 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (dum->vdev[rhport].ud.status ==
VDEV_ST_NOTASSIGNED) {
- usbip_dbg_vhci_rh(" enable rhport %d "
- "(status %u)\n",
- rhport,
- dum->vdev[rhport].ud.status);
+ usbip_dbg_vhci_rh(
+ " enable rhport %d (status %u)\n",
+ rhport,
+ dum->vdev[rhport].ud.status);
dum->port_status[rhport] |=
USB_PORT_STAT_ENABLE;
}
}
((__le16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]);
- ((__le16 *) buf)[1] = cpu_to_le16(dum->port_status[rhport] >> 16);
+ ((__le16 *) buf)[1] =
+ cpu_to_le16(dum->port_status[rhport] >> 16);
usbip_dbg_vhci_rh(" GetPortStatus bye %x %x\n", ((u16 *)buf)[0],
((u16 *)buf)[1]);
@@ -360,12 +359,12 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case SetPortFeature:
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- usbip_dbg_vhci_rh(" SetPortFeature: "
- "USB_PORT_FEAT_SUSPEND\n");
+ usbip_dbg_vhci_rh(
+ " SetPortFeature: USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_RESET:
- usbip_dbg_vhci_rh(" SetPortFeature: "
- "USB_PORT_FEAT_RESET\n");
+ usbip_dbg_vhci_rh(
+ " SetPortFeature: USB_PORT_FEAT_RESET\n");
/* if it's already running, disconnect first */
if (dum->port_status[rhport] & USB_PORT_STAT_ENABLE) {
dum->port_status[rhport] &=
@@ -537,9 +536,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
case USB_REQ_GET_DESCRIPTOR:
if (ctrlreq->wValue == cpu_to_le16(USB_DT_DEVICE << 8))
- usbip_dbg_vhci_hc("Not yet?: "
- "Get_Descriptor to device 0 "
- "(get max pipe size)\n");
+ usbip_dbg_vhci_hc(
+ "Not yet?:Get_Descriptor to device 0 (get max pipe size)\n");
if (vdev->udev)
usb_put_dev(vdev->udev);
@@ -548,8 +546,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
default:
/* NOT REACHED */
- dev_err(dev, "invalid request to devnum 0 bRequest %u, "
- "wValue %u\n", ctrlreq->bRequest,
+ dev_err(dev,
+ "invalid request to devnum 0 bRequest %u, wValue %u\n",
+ ctrlreq->bRequest,
ctrlreq->wValue);
ret = -EINVAL;
goto no_need_xmit;
@@ -1070,8 +1069,9 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
spin_unlock(&the_controller->lock);
if (connected > 0) {
- dev_info(&pdev->dev, "We have %d active connection%s. Do not "
- "suspend.\n", connected, (connected == 1 ? "" : "s"));
+ dev_info(&pdev->dev,
+ "We have %d active connection%s. Do not suspend.\n",
+ connected, (connected == 1 ? "" : "s"));
ret = -EBUSY;
} else {
dev_info(&pdev->dev, "suspend vhci_hcd");
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index 9b51586d11d9..e0980324fb03 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -47,8 +47,8 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
* up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
* port number and its peer IP address.
*/
- out += sprintf(out, "prt sta spd bus dev socket "
- "local_busid\n");
+ out += sprintf(out,
+ "prt sta spd bus dev socket local_busid\n");
for (i = 0; i < VHCI_NPORTS; i++) {
struct vhci_device *vdev = port_to_vdev(i);
@@ -114,7 +114,8 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
int err;
__u32 rhport = 0;
- sscanf(buf, "%u", &rhport);
+ if (sscanf(buf, "%u", &rhport) != 1)
+ return -EINVAL;
/* check rhport */
if (rhport >= VHCI_NPORTS) {
@@ -149,7 +150,8 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
case USB_SPEED_WIRELESS:
break;
default:
- pr_err("speed %d\n", speed);
+ pr_err("Failed attach request for unsupported USB speed: %s\n",
+ usb_speed_string(speed));
return -EINVAL;
}
@@ -181,7 +183,8 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
* @devid: unique device identifier in a remote host
* @speed: usb device speed in a remote host
*/
- sscanf(buf, "%u %u %u %u", &rhport, &sockfd, &devid, &speed);
+ if (sscanf(buf, "%u %u %u %u", &rhport, &sockfd, &devid, &speed) != 1)
+ return -EINVAL;
usbip_dbg_vhci_sysfs("rhport(%u) sockfd(%u) devid(%u) speed(%u)\n",
rhport, sockfd, devid, speed);
@@ -214,8 +217,9 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- dev_info(dev, "rhport(%u) sockfd(%d) devid(%u) speed(%u)\n",
- rhport, sockfd, devid, speed);
+ dev_info(dev,
+ "rhport(%u) sockfd(%d) devid(%u) speed(%u) speed_str(%s)\n",
+ rhport, sockfd, devid, speed, usb_speed_string(speed));
vdev->devid = devid;
vdev->speed = speed;
diff --git a/drivers/staging/vt6655/80211mgr.c b/drivers/staging/vt6655/80211mgr.c
index 7949d58ad7d1..9aa2e46c4a5d 100644
--- a/drivers/staging/vt6655/80211mgr.c
+++ b/drivers/staging/vt6655/80211mgr.c
@@ -91,12 +91,15 @@ vMgrEncodeBeacon(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_CAPINFO);
+ pFrame->pqwTimestamp = (PQWORD)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_TS);
+ pFrame->pwBeaconInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_BCN_INT);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_CAPINFO);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_BEACON_OFF_SSID;
@@ -124,16 +127,20 @@ vMgrDecodeBeacon(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_BEACON_OFF_CAPINFO);
+ pFrame->pqwTimestamp = (PQWORD)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_TS);
+ pFrame->pwBeaconInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_BCN_INT);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_BEACON_OFF_CAPINFO);
/* Information elements */
- pItem = (PWLAN_IE)((unsigned char *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)))
- + WLAN_BEACON_OFF_SSID);
+ pItem = (PWLAN_IE)((unsigned char *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))) +
+ WLAN_BEACON_OFF_SSID);
while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
switch (pItem->byElementID) {
case WLAN_EID_SSID:
@@ -171,7 +178,8 @@ vMgrDecodeBeacon(
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ pFrame->pRSNWPA =
+ (PWLAN_IE_RSN_EXT)pItem;
}
break;
@@ -181,7 +189,8 @@ vMgrDecodeBeacon(
break;
case WLAN_EID_EXTSUPP_RATES:
if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pExtSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_COUNTRY: /* 7 */
@@ -191,7 +200,8 @@ vMgrDecodeBeacon(
case WLAN_EID_PWR_CONSTRAINT: /* 32 */
if (pFrame->pIE_PowerConstraint == NULL)
- pFrame->pIE_PowerConstraint = (PWLAN_IE_PW_CONST)pItem;
+ pFrame->pIE_PowerConstraint =
+ (PWLAN_IE_PW_CONST)pItem;
break;
case WLAN_EID_CH_SWITCH: /* 37 */
@@ -210,7 +220,9 @@ vMgrDecodeBeacon(
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in beacon decode.\n", pItem->byElementID);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Unrecognized EID=%dd in beacon decode.\n",
+ pItem->byElementID);
break;
}
@@ -282,9 +294,11 @@ vMgrEncodeDisassociation(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_DISASSOC_OFF_REASON);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON + sizeof(*(pFrame->pwReason));
+ pFrame->pwReason = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_DISASSOC_OFF_REASON);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON +
+ sizeof(*(pFrame->pwReason));
return;
}
@@ -308,8 +322,9 @@ vMgrDecodeDisassociation(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_DISASSOC_OFF_REASON);
+ pFrame->pwReason = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_DISASSOC_OFF_REASON);
return;
}
@@ -332,11 +347,14 @@ vMgrEncodeAssocRequest(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCREQ_OFF_LISTEN_INT);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCREQ_OFF_LISTEN_INT + sizeof(*(pFrame->pwListenInterval));
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCREQ_OFF_CAP_INFO);
+ pFrame->pwListenInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCREQ_OFF_LISTEN_INT);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCREQ_OFF_LISTEN_INT +
+ sizeof(*(pFrame->pwListenInterval));
return;
}
@@ -360,10 +378,12 @@ vMgrDecodeAssocRequest(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCREQ_OFF_LISTEN_INT);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCREQ_OFF_CAP_INFO);
+ pFrame->pwListenInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCREQ_OFF_LISTEN_INT);
/* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -377,7 +397,8 @@ vMgrDecodeAssocRequest(
break;
case WLAN_EID_SUPP_RATES:
if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_RSN:
@@ -387,16 +408,19 @@ vMgrDecodeAssocRequest(
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ pFrame->pRSNWPA =
+ (PWLAN_IE_RSN_EXT)pItem;
}
break;
case WLAN_EID_EXTSUPP_RATES:
if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pExtSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in assocreq decode.\n",
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Unrecognized EID=%dd in assocreq decode.\n",
pItem->byElementID);
break;
}
@@ -424,14 +448,17 @@ vMgrEncodeAssocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_AID);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCRESP_OFF_AID
- + sizeof(*(pFrame->pwAid));
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_CAP_INFO);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_STATUS);
+ pFrame->pwAid = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_AID);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCRESP_OFF_AID +
+ sizeof(*(pFrame->pwAid));
return;
}
@@ -457,16 +484,20 @@ vMgrDecodeAssocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_AID);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_CAP_INFO);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_STATUS);
+ pFrame->pwAid = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_AID);
/* Information elements */
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCRESP_OFF_SUPP_RATES);
+ pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_ASSOCRESP_OFF_SUPP_RATES);
pItem = (PWLAN_IE)(pFrame->pSuppRates);
pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
@@ -474,7 +505,9 @@ vMgrDecodeAssocResponse(
if ((((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) &&
(pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pFrame->pExtSuppRates=[%p].\n", pItem);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "pFrame->pExtSuppRates=[%p].\n",
+ pItem);
} else {
pFrame->pExtSuppRates = NULL;
}
@@ -500,13 +533,17 @@ vMgrEncodeReassocRequest(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_LISTEN_INT);
- pFrame->pAddrCurrAP = (PIEEE_ADDR)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_CURR_AP);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCREQ_OFF_CURR_AP + sizeof(*(pFrame->pAddrCurrAP));
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_CAP_INFO);
+ pFrame->pwListenInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_LISTEN_INT);
+ pFrame->pAddrCurrAP = (PIEEE_ADDR)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_CURR_AP);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCREQ_OFF_CURR_AP +
+ sizeof(*(pFrame->pAddrCurrAP));
return;
}
@@ -531,12 +568,15 @@ vMgrDecodeReassocRequest(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_LISTEN_INT);
- pFrame->pAddrCurrAP = (PIEEE_ADDR)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_CURR_AP);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_CAP_INFO);
+ pFrame->pwListenInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_LISTEN_INT);
+ pFrame->pAddrCurrAP = (PIEEE_ADDR)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCREQ_OFF_CURR_AP);
/* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -550,7 +590,8 @@ vMgrDecodeReassocRequest(
break;
case WLAN_EID_SUPP_RATES:
if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_RSN:
@@ -560,16 +601,19 @@ vMgrDecodeReassocRequest(
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ pFrame->pRSNWPA =
+ (PWLAN_IE_RSN_EXT)pItem;
}
break;
case WLAN_EID_EXTSUPP_RATES:
if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pExtSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unrecognized EID=%dd in reassocreq decode.\n",
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Unrecognized EID=%dd in reassocreq decode.\n",
pItem->byElementID);
break;
}
@@ -631,16 +675,20 @@ vMgrDecodeProbeRequest(
case WLAN_EID_SUPP_RATES:
if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_EXTSUPP_RATES:
if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pExtSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Bad EID=%dd in probereq\n", pItem->byElementID);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Bad EID=%dd in probereq\n",
+ pItem->byElementID);
break;
}
@@ -668,15 +716,18 @@ vMgrEncodeProbeResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_CAP_INFO);
+ pFrame->pqwTimestamp = (PQWORD)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_TS);
+ pFrame->pwBeaconInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_BCN_INT);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_CAP_INFO);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_PROBERESP_OFF_CAP_INFO +
- sizeof(*(pFrame->pwCapInfo));
+ sizeof(*(pFrame->pwCapInfo));
return;
}
@@ -702,12 +753,15 @@ vMgrDecodeProbeResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_CAP_INFO);
+ pFrame->pqwTimestamp = (PQWORD)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_TS);
+ pFrame->pwBeaconInterval = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_BCN_INT);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_PROBERESP_OFF_CAP_INFO);
/* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -721,7 +775,8 @@ vMgrDecodeProbeResponse(
break;
case WLAN_EID_SUPP_RATES:
if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_FH_PARMS:
break;
@@ -735,7 +790,8 @@ vMgrDecodeProbeResponse(
break;
case WLAN_EID_IBSS_PARMS:
if (pFrame->pIBSSParms == NULL)
- pFrame->pIBSSParms = (PWLAN_IE_IBSS_PARMS)pItem;
+ pFrame->pIBSSParms =
+ (PWLAN_IE_IBSS_PARMS)pItem;
break;
case WLAN_EID_RSN:
@@ -745,7 +801,8 @@ vMgrDecodeProbeResponse(
case WLAN_EID_RSN_WPA:
if (pFrame->pRSNWPA == NULL) {
if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
+ pFrame->pRSNWPA =
+ (PWLAN_IE_RSN_EXT)pItem;
}
break;
case WLAN_EID_ERP:
@@ -754,7 +811,8 @@ vMgrDecodeProbeResponse(
break;
case WLAN_EID_EXTSUPP_RATES:
if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
+ pFrame->pExtSuppRates =
+ (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_COUNTRY: /* 7 */
@@ -764,7 +822,8 @@ vMgrDecodeProbeResponse(
case WLAN_EID_PWR_CONSTRAINT: /* 32 */
if (pFrame->pIE_PowerConstraint == NULL)
- pFrame->pIE_PowerConstraint = (PWLAN_IE_PW_CONST)pItem;
+ pFrame->pIE_PowerConstraint =
+ (PWLAN_IE_PW_CONST)pItem;
break;
case WLAN_EID_CH_SWITCH: /* 37 */
@@ -783,7 +842,9 @@ vMgrDecodeProbeResponse(
break;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Bad EID=%dd in proberesp\n", pItem->byElementID);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Bad EID=%dd in proberesp\n",
+ pItem->byElementID);
break;
}
@@ -811,13 +872,17 @@ vMgrEncodeAuthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwAuthAlgorithm = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_AUTH_ALG);
- pFrame->pwAuthSequence = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_AUTH_SEQ);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_STATUS);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_AUTHEN_OFF_STATUS + sizeof(*(pFrame->pwStatus));
+ pFrame->pwAuthAlgorithm = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_AUTH_ALG);
+ pFrame->pwAuthSequence = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_AUTH_SEQ);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_STATUS);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_AUTHEN_OFF_STATUS +
+ sizeof(*(pFrame->pwStatus));
return;
}
@@ -843,12 +908,15 @@ vMgrDecodeAuthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwAuthAlgorithm = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_AUTH_ALG);
- pFrame->pwAuthSequence = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_AUTH_SEQ);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_STATUS);
+ pFrame->pwAuthAlgorithm = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_AUTH_ALG);
+ pFrame->pwAuthSequence = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_AUTH_SEQ);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_AUTHEN_OFF_STATUS);
/* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -880,9 +948,11 @@ vMgrEncodeDeauthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_DEAUTHEN_OFF_REASON);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON + sizeof(*(pFrame->pwReason));
+ pFrame->pwReason = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_DEAUTHEN_OFF_REASON);
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON +
+ sizeof(*(pFrame->pwReason));
return;
}
@@ -906,8 +976,9 @@ vMgrDecodeDeauthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_DEAUTHEN_OFF_REASON);
+ pFrame->pwReason = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_DEAUTHEN_OFF_REASON);
return;
}
@@ -931,14 +1002,18 @@ vMgrEncodeReassocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_AID);
-
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCRESP_OFF_AID + sizeof(*(pFrame->pwAid));
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_CAP_INFO);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_STATUS);
+ pFrame->pwAid = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_AID);
+
+ pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCRESP_OFF_AID +
+ sizeof(*(pFrame->pwAid));
return;
}
@@ -964,16 +1039,20 @@ vMgrDecodeReassocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
/* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_AID);
+ pFrame->pwCapInfo = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_CAP_INFO);
+ pFrame->pwStatus = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_STATUS);
+ pFrame->pwAid = (unsigned short *)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_AID);
/* Information elements */
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCRESP_OFF_SUPP_RATES);
+ pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)
+ (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
+ WLAN_REASSOCRESP_OFF_SUPP_RATES);
pItem = (PWLAN_IE)(pFrame->pSuppRates);
pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
diff --git a/drivers/staging/vt6655/aes_ccmp.c b/drivers/staging/vt6655/aes_ccmp.c
index 82b0bd1c056a..4ccfe06481fc 100644
--- a/drivers/staging/vt6655/aes_ccmp.c
+++ b/drivers/staging/vt6655/aes_ccmp.c
@@ -46,8 +46,7 @@
* SBOX Table
*/
-static unsigned char sbox_table[256] =
-{
+static unsigned char sbox_table[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
@@ -153,9 +152,8 @@ static void SubBytes(unsigned char *in, unsigned char *out)
{
int i;
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < 16; i++)
out[i] = sbox_table[in[i]];
- }
}
static void ShiftRows(unsigned char *in, unsigned char *out)
@@ -205,8 +203,7 @@ static void AESv128(unsigned char *key, unsigned char *data, unsigned char *ciph
SubBytes(ciphertext, TmpdataA);
ShiftRows(TmpdataA, TmpdataB);
xor_128(TmpdataB, abyRoundKey, ciphertext);
- } else /* round 1 ~ 9 */
- {
+ } else /* round 1 ~ 9 */{
SubBytes(ciphertext, TmpdataA);
ShiftRows(TmpdataA, TmpdataB);
MixColumns(&TmpdataB[0], &TmpdataA[0]);
@@ -311,13 +308,11 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
/* CCMP */
AESv128(pbyRxKey, MIC_IV, abyMIC);
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyTmp[kk] = MIC_HDR1[kk] ^ abyMIC[kk];
- }
AESv128(pbyRxKey, abyTmp, abyMIC);
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyTmp[kk] = MIC_HDR2[kk] ^ abyMIC[kk];
- }
AESv128(pbyRxKey, abyTmp, abyMIC);
wCnt = 1;
@@ -330,12 +325,10 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
AESv128(pbyRxKey, abyCTRPLD, abyTmp);
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyPlainText[kk] = abyTmp[kk] ^ pbyPayload[kk];
- }
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
- }
AESv128(pbyRxKey, abyTmp, abyMIC);
memcpy(pbyPayload, abyPlainText, 16);
@@ -345,27 +338,23 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
/* last payload */
memcpy(&(abyLastCipher[0]), pbyPayload, jj);
- for (ii = jj; ii < 16; ii++) {
+ for (ii = jj; ii < 16; ii++)
abyLastCipher[ii] = 0x00;
- }
abyCTRPLD[14] = (unsigned char)(wCnt >> 8);
abyCTRPLD[15] = (unsigned char)(wCnt & 0xff);
AESv128(pbyRxKey, abyCTRPLD, abyTmp);
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyPlainText[kk] = abyTmp[kk] ^ abyLastCipher[kk];
- }
memcpy(pbyPayload, abyPlainText, jj);
pbyPayload += jj;
/* for MIC calculation */
- for (ii = jj; ii < 16; ii++) {
+ for (ii = jj; ii < 16; ii++)
abyPlainText[ii] = 0x00;
- }
- for (kk = 0; kk < 16; kk++) {
+ for (kk = 0; kk < 16; kk++)
abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
- }
AESv128(pbyRxKey, abyTmp, abyMIC);
/* =>above is the calculate MIC */
@@ -375,9 +364,8 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
abyCTRPLD[14] = (unsigned char)(wCnt >> 8);
abyCTRPLD[15] = (unsigned char)(wCnt & 0xff);
AESv128(pbyRxKey, abyCTRPLD, abyTmp);
- for (kk = 0; kk < 8; kk++) {
+ for (kk = 0; kk < 8; kk++)
abyTmp[kk] = abyTmp[kk] ^ pbyPayload[kk];
- }
/* =>above is the dec-MIC from packet */
/* -------------------------------------------- */
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 32d808ec502a..f8e41487f856 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -276,9 +276,8 @@ typedef struct tagSRxDesc {
volatile SRDES1 m_rd1RD1;
volatile u32 buff_addr;
volatile u32 next_desc;
- struct tagSRxDesc *next;//4 bytes
- volatile PDEVICE_RD_INFO pRDInfo;//4 bytes
- volatile u32 Reserved[2];//8 bytes
+ struct tagSRxDesc *next __aligned(8);
+ volatile PDEVICE_RD_INFO pRDInfo __aligned(8);
} __attribute__ ((__packed__))
SRxDesc, *PSRxDesc;
typedef const SRxDesc *PCSRxDesc;
@@ -361,9 +360,8 @@ typedef struct tagSTxDesc {
volatile STDES1 m_td1TD1;
volatile u32 buff_addr;
volatile u32 next_desc;
- struct tagSTxDesc *next; //4 bytes
- volatile PDEVICE_TD_INFO pTDInfo;//4 bytes
- volatile u32 Reserved[2];//8 bytes
+ struct tagSTxDesc *next __aligned(8);
+ volatile PDEVICE_TD_INFO pTDInfo __aligned(8);
} __attribute__ ((__packed__))
STxDesc, *PSTxDesc;
typedef const STxDesc *PCSTxDesc;
@@ -375,9 +373,8 @@ typedef struct tagSTxSyncDesc {
volatile u32 next_desc; // pointer to next logical descriptor
volatile unsigned short m_wFIFOCtl;
volatile unsigned short m_wTimeStamp;
- struct tagSTxSyncDesc *next; //4 bytes
- volatile PDEVICE_TD_INFO pTDInfo;//4 bytes
- volatile u32 m_dwReserved2;
+ struct tagSTxSyncDesc *next __aligned(8);
+ volatile PDEVICE_TD_INFO pTDInfo __aligned(8);
} __attribute__ ((__packed__))
STxSyncDesc, *PSTxSyncDesc;
typedef const STxSyncDesc *PCSTxSyncDesc;
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 0a29c9015419..771bf35ae044 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -729,27 +729,27 @@ device_receive_frame(
// Soft MIC
if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) {
if (bIsWEP) {
- unsigned long *pdwMIC_L;
- unsigned long *pdwMIC_R;
- unsigned long dwMIC_Priority;
- unsigned long dwMICKey0 = 0, dwMICKey1 = 0;
- unsigned long dwLocalMIC_L = 0;
- unsigned long dwLocalMIC_R = 0;
+ __le32 *pdwMIC_L;
+ __le32 *pdwMIC_R;
+ __le32 dwMIC_Priority;
+ __le32 dwMICKey0 = 0, dwMICKey1 = 0;
+ u32 dwLocalMIC_L = 0;
+ u32 dwLocalMIC_R = 0;
viawget_wpa_header *wpahdr;
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[24]));
- dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[28]));
+ dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[24]));
+ dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[28]));
} else {
if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[16]));
- dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[20]));
+ dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[16]));
+ dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[20]));
} else if ((pKey->dwKeyIndex & BIT28) == 0) {
- dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[16]));
- dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[20]));
+ dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[16]));
+ dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[20]));
} else {
- dwMICKey0 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[24]));
- dwMICKey1 = cpu_to_le32(*(unsigned long *)(&pKey->abyKey[28]));
+ dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[24]));
+ dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[28]));
}
}
@@ -763,14 +763,14 @@ device_receive_frame(
MIC_vGetMIC(&dwLocalMIC_L, &dwLocalMIC_R);
MIC_vUnInit();
- pdwMIC_L = (unsigned long *)(skb->data + 4 + FrameSize);
- pdwMIC_R = (unsigned long *)(skb->data + 4 + FrameSize + 4);
+ pdwMIC_L = (__le32 *)(skb->data + 4 + FrameSize);
+ pdwMIC_R = (__le32 *)(skb->data + 4 + FrameSize + 4);
//DBG_PRN_GRP12(("RxL: %lx, RxR: %lx\n", *pdwMIC_L, *pdwMIC_R));
//DBG_PRN_GRP12(("LocalL: %lx, LocalR: %lx\n", dwLocalMIC_L, dwLocalMIC_R));
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dwMICKey0= %lx,dwMICKey1= %lx \n", dwMICKey0, dwMICKey1);
- if ((cpu_to_le32(*pdwMIC_L) != dwLocalMIC_L) ||
- (cpu_to_le32(*pdwMIC_R) != dwLocalMIC_R) ||
+ if ((le32_to_cpu(*pdwMIC_L) != dwLocalMIC_L) ||
+ (le32_to_cpu(*pdwMIC_R) != dwLocalMIC_R) ||
pDevice->bRxMICFail) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC comparison is fail!\n");
pDevice->bRxMICFail = false;
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index eab3b41f9e3c..78b5809b8304 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -242,7 +242,7 @@ bool KeybSetKey(
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pbyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pbyBSSID, (u32 *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
@@ -306,7 +306,7 @@ bool KeybSetKey(
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[j].wKeyCtl, j, uKeyIdx, pbyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[j].wKeyCtl, j, uKeyIdx, pbyBSSID, (u32 *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
@@ -670,7 +670,7 @@ bool KeybSetDefaultKey(
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl, MAX_KEY_TABLE-1, uKeyIdx, pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl, MAX_KEY_TABLE-1, uKeyIdx, pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID, (u32 *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
@@ -766,7 +766,7 @@ bool KeybSetAllGroupKey(
if (uKeyLength == WLAN_WEP104_KEYLEN)
pKey->abyKey[15] |= 0x80;
}
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pTable->KeyTable[i].abyBSSID, (unsigned long *)pKey->abyKey, byLocalID);
+ MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pTable->KeyTable[i].abyBSSID, (u32 *)pKey->abyKey, byLocalID);
if ((dwKeyIndex & USE_KEYRSC) == 0) {
// RSC set by NIC
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 21bd8a1126d7..0ec079fa0398 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -1428,10 +1428,10 @@ bool MACbPSWakeup(unsigned long dwIoBase)
*/
void MACvSetKeyEntry(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx,
- unsigned int uKeyIdx, unsigned char *pbyAddr, unsigned long *pdwKey, unsigned char byLocalID)
+ unsigned int uKeyIdx, unsigned char *pbyAddr, u32 *pdwKey, unsigned char byLocalID)
{
unsigned short wOffset;
- unsigned long dwData;
+ u32 dwData;
int ii;
if (byLocalID <= 1)
@@ -1445,7 +1445,7 @@ void MACvSetKeyEntry(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned in
dwData |= wKeyCtl;
dwData <<= 16;
dwData |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5));
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData, wKeyCtl);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "1. wOffset: %d, Data: %X, KeyCtl:%X\n", wOffset, dwData, wKeyCtl);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
@@ -1460,7 +1460,7 @@ void MACvSetKeyEntry(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned in
dwData |= *(pbyAddr+1);
dwData <<= 8;
dwData |= *(pbyAddr+0);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "2. wOffset: %d, Data: %lX\n", wOffset, dwData);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "2. wOffset: %d, Data: %X\n", wOffset, dwData);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
@@ -1470,7 +1470,7 @@ void MACvSetKeyEntry(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned in
wOffset += (uKeyIdx * 4);
for (ii = 0; ii < 4; ii++) {
// always push 128 bits
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "3.(%d) wOffset: %d, Data: %lX\n", ii, wOffset+ii, *pdwKey);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "3.(%d) wOffset: %d, Data: %X\n", ii, wOffset+ii, *pdwKey);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii);
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 3f177f7c581c..4615db05549f 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -1038,7 +1038,7 @@ bool MACbFlushSYNCFifo(unsigned long dwIoBase);
bool MACbPSWakeup(unsigned long dwIoBase);
void MACvSetKeyEntry(unsigned long dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx,
- unsigned int uKeyIdx, unsigned char *pbyAddr, unsigned long *pdwKey, unsigned char byLocalID);
+ unsigned int uKeyIdx, unsigned char *pbyAddr, u32 *pdwKey, unsigned char byLocalID);
void MACvDisableKeyEntry(unsigned long dwIoBase, unsigned int uEntryIdx);
void MACvSetDefaultKeyEntry(unsigned long dwIoBase, unsigned int uKeyLen,
unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID);
diff --git a/drivers/staging/vt6655/michael.c b/drivers/staging/vt6655/michael.c
index 7ea5f7f82f43..ade4c855f20d 100644
--- a/drivers/staging/vt6655/michael.c
+++ b/drivers/staging/vt6655/michael.c
@@ -53,14 +53,14 @@
*/
static void s_vClear(void); // Clear the internal message,
// resets the object to the state just after construction.
-static void s_vSetKey(unsigned long dwK0, unsigned long dwK1);
+static void s_vSetKey(u32 dwK0, u32 dwK1);
static void s_vAppendByte(unsigned char b); // Add a single byte to the internal message
/*--------------------- Export Variables --------------------------*/
-static unsigned long L, R; // Current state
+static u32 L, R; /* Current state */
-static unsigned long K0, K1; // Key
-static unsigned long M; // Message accumulator (single word)
+static u32 K0, K1; /* Key */
+static u32 M; /* Message accumulator (single word) */
static unsigned int nBytesInM; // # bytes in M
/*--------------------- Export Functions --------------------------*/
@@ -98,7 +98,7 @@ static void s_vClear(void)
M = 0;
}
-static void s_vSetKey(unsigned long dwK0, unsigned long dwK1)
+static void s_vSetKey(u32 dwK0, u32 dwK1)
{
// Set the key
K0 = dwK0;
@@ -129,7 +129,7 @@ static void s_vAppendByte(unsigned char b)
}
}
-void MIC_vInit(unsigned long dwK0, unsigned long dwK1)
+void MIC_vInit(u32 dwK0, u32 dwK1)
{
// Set the key
s_vSetKey(dwK0, dwK1);
@@ -155,7 +155,7 @@ void MIC_vAppend(unsigned char *src, unsigned int nBytes)
}
}
-void MIC_vGetMIC(unsigned long *pdwL, unsigned long *pdwR)
+void MIC_vGetMIC(u32 *pdwL, u32 *pdwR)
{
// Append the minimum padding
s_vAppendByte(0x5a);
diff --git a/drivers/staging/vt6655/michael.h b/drivers/staging/vt6655/michael.h
index 387d20623aa3..f6c2c15d9cb6 100644
--- a/drivers/staging/vt6655/michael.h
+++ b/drivers/staging/vt6655/michael.h
@@ -31,11 +31,13 @@
#ifndef __MICHAEL_H__
#define __MICHAEL_H__
+#include <linux/types.h>
+
/*--------------------- Export Definitions -------------------------*/
/*--------------------- Export Types ------------------------------*/
-void MIC_vInit(unsigned long dwK0, unsigned long dwK1);
+void MIC_vInit(u32 dwK0, u32 dwK1);
void MIC_vUnInit(void);
@@ -44,7 +46,7 @@ void MIC_vAppend(unsigned char *src, unsigned int nBytes);
/* Get the MIC result. Destination should accept 8 bytes of result. */
/* This also resets the message to empty. */
-void MIC_vGetMIC(unsigned long *pdwL, unsigned long *pdwR);
+void MIC_vGetMIC(u32 *pdwL, u32 *pdwR);
/*--------------------- Export Macros ------------------------------*/
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 6affd6edac0d..c2653eb4f76c 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1257,11 +1257,11 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
// unsigned char abyTmp[8];
// unsigned long dwCRC;
unsigned int cbMICHDR = 0;
- unsigned long dwMICKey0, dwMICKey1;
- unsigned long dwMIC_Priority;
- unsigned long *pdwMIC_L;
- unsigned long *pdwMIC_R;
- unsigned long dwSafeMIC_L, dwSafeMIC_R; //Fix "Last Frag Size" < "MIC length".
+ u32 dwMICKey0, dwMICKey1;
+ u32 dwMIC_Priority;
+ u32 *pdwMIC_L;
+ u32 *pdwMIC_R;
+ u32 dwSafeMIC_L, dwSafeMIC_R; /* Fix "Last Frag Size" < "MIC length". */
bool bMIC2Frag = false;
unsigned int uMICFragLen = 0;
unsigned int uMACfragNum = 1;
@@ -1434,21 +1434,21 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
//////////////////////////////////////////////////////////////////
if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
+ dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
+ dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
} else if ((pTransmitKey->dwKeyIndex & AUTHENTICATOR_KEY) != 0) {
- dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
+ dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
+ dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
} else {
- dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[24]);
- dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[28]);
+ dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[24]);
+ dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[28]);
}
// DO Software Michael
MIC_vInit(dwMICKey0, dwMICKey1);
MIC_vAppend((unsigned char *)&(psEthHeader->abyDstAddr[0]), 12);
dwMIC_Priority = 0;
MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC KEY: %X, %X\n", dwMICKey0, dwMICKey1);
}
///////////////////////////////////////////////////////////////////
@@ -1624,10 +1624,10 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
if (bMIC2Frag == false) {
if (uTmpLen != 0)
MIC_vAppend((pbyBuffer + uLength), uTmpLen);
- pdwMIC_L = (unsigned long *)(pbyBuffer + uLength + uTmpLen);
- pdwMIC_R = (unsigned long *)(pbyBuffer + uLength + uTmpLen + 4);
+ pdwMIC_L = (u32 *)(pbyBuffer + uLength + uTmpLen);
+ pdwMIC_R = (u32 *)(pbyBuffer + uLength + uTmpLen + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Last MIC:%lX, %lX\n", *pdwMIC_L, *pdwMIC_R);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Last MIC:%X, %X\n", *pdwMIC_L, *pdwMIC_R);
} else {
if (uMICFragLen >= 4) {
memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)),
@@ -1744,8 +1744,8 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
uMICFragLen = cbFragPayloadSize - uTmpLen;
ASSERT(uMICFragLen < cbMIClen);
- pdwMIC_L = (unsigned long *)(pbyBuffer + uLength + uTmpLen);
- pdwMIC_R = (unsigned long *)(pbyBuffer + uLength + uTmpLen + 4);
+ pdwMIC_L = (u32 *)(pbyBuffer + uLength + uTmpLen);
+ pdwMIC_R = (u32 *)(pbyBuffer + uLength + uTmpLen + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
dwSafeMIC_L = *pdwMIC_L;
dwSafeMIC_R = *pdwMIC_R;
@@ -1759,7 +1759,7 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "\n");
*/
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get MIC:%lX, %lX\n", *pdwMIC_L, *pdwMIC_R);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get MIC:%X, %X\n", *pdwMIC_L, *pdwMIC_R);
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Middle frag len: %d\n", uTmpLen);
/*
@@ -1873,8 +1873,8 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFrameBodySize);
- pdwMIC_L = (unsigned long *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize);
- pdwMIC_R = (unsigned long *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize + 4);
+ pdwMIC_L = (u32 *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize);
+ pdwMIC_R = (u32 *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
@@ -1887,7 +1887,7 @@ s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyT
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "uLength: %d, %d\n", uLength, cbFrameBodySize);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderLength, uPadding, cbIVlen);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC:%x, %x\n", *pdwMIC_L, *pdwMIC_R);
/*
for (ii = 0; ii < 8; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", *(((unsigned char *)(pdwMIC_L) + ii)));
@@ -2592,10 +2592,10 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, un
unsigned int uPadding = 0;
unsigned int cbMICHDR = 0;
unsigned int uLength = 0;
- unsigned long dwMICKey0, dwMICKey1;
- unsigned long dwMIC_Priority;
- unsigned long *pdwMIC_L;
- unsigned long *pdwMIC_R;
+ u32 dwMICKey0, dwMICKey1;
+ u32 dwMIC_Priority;
+ u32 *pdwMIC_L;
+ u32 *pdwMIC_R;
unsigned short wTxBufSize;
unsigned int cbMacHdLen;
SEthernetHeader sEthHeader;
@@ -2841,22 +2841,22 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, un
}
if ((pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
+ dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
+ dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
// DO Software Michael
MIC_vInit(dwMICKey0, dwMICKey1);
MIC_vAppend((unsigned char *)&(sEthHeader.abyDstAddr[0]), 12);
dwMIC_Priority = 0;
MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "DMA0_tx_8021:MIC KEY: %X, %X\n", dwMICKey0, dwMICKey1);
uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
MIC_vAppend((pbyTxBufferAddr + uLength), cbFrameBodySize);
- pdwMIC_L = (unsigned long *)(pbyTxBufferAddr + uLength + cbFrameBodySize);
- pdwMIC_R = (unsigned long *)(pbyTxBufferAddr + uLength + cbFrameBodySize + 4);
+ pdwMIC_L = (u32 *)(pbyTxBufferAddr + uLength + cbFrameBodySize);
+ pdwMIC_R = (u32 *)(pbyTxBufferAddr + uLength + cbFrameBodySize + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
@@ -2869,7 +2869,7 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, un
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "uLength: %d, %d\n", uLength, cbFrameBodySize);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC:%x, %x\n", *pdwMIC_L, *pdwMIC_R);
}
diff --git a/drivers/staging/vt6655/wmgr.c b/drivers/staging/vt6655/wmgr.c
index 5200a2a0ecca..b673bc9b2130 100644
--- a/drivers/staging/vt6655/wmgr.c
+++ b/drivers/staging/vt6655/wmgr.c
@@ -967,10 +967,10 @@ s_vMgrRxAssocResponse(
sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
// decode the frame
vMgrDecodeAssocResponse(&sFrame);
- if ((sFrame.pwCapInfo == 0) ||
- (sFrame.pwStatus == 0) ||
- (sFrame.pwAid == 0) ||
- (sFrame.pSuppRates == 0)) {
+ if ((sFrame.pwCapInfo == NULL) ||
+ (sFrame.pwStatus == NULL) ||
+ (sFrame.pwAid == NULL) ||
+ (sFrame.pSuppRates == NULL)) {
DBG_PORT80(0xCC);
return;
}
@@ -1812,10 +1812,10 @@ s_vMgrRxBeacon(
// decode the beacon frame
vMgrDecodeBeacon(&sFrame);
- if ((sFrame.pwBeaconInterval == 0) ||
- (sFrame.pwCapInfo == 0) ||
- (sFrame.pSSID == 0) ||
- (sFrame.pSuppRates == 0)) {
+ if ((sFrame.pwBeaconInterval == NULL) ||
+ (sFrame.pwCapInfo == NULL) ||
+ (sFrame.pSSID == NULL) ||
+ (sFrame.pSuppRates == NULL)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Rx beacon frame error\n");
return;
}
@@ -2072,7 +2072,7 @@ s_vMgrRxBeacon(
if (bTSFLargeDiff)
bUpdateTSF = true;
- if (pDevice->bEnablePSMode && (sFrame.pTIM != 0)) {
+ if (pDevice->bEnablePSMode && (sFrame.pTIM != NULL)) {
// deal with DTIM, analysis TIM
pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? true : false;
pMgmt->byDTIMCount = sFrame.pTIM->byDTIMCount;
@@ -4107,11 +4107,11 @@ s_vMgrRxProbeResponse(
sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
vMgrDecodeProbeResponse(&sFrame);
- if ((sFrame.pqwTimestamp == 0) ||
- (sFrame.pwBeaconInterval == 0) ||
- (sFrame.pwCapInfo == 0) ||
- (sFrame.pSSID == 0) ||
- (sFrame.pSuppRates == 0)) {
+ if ((sFrame.pqwTimestamp == NULL) ||
+ (sFrame.pwBeaconInterval == NULL) ||
+ (sFrame.pwCapInfo == NULL) ||
+ (sFrame.pSSID == NULL) ||
+ (sFrame.pSuppRates == NULL)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe resp:Fail addr:[%p] \n", pRxPacket->p80211Header);
DBG_PORT80(0xCC);
return;
@@ -4120,7 +4120,7 @@ s_vMgrRxProbeResponse(
if (sFrame.pSSID->len == 0)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Rx Probe resp: SSID len = 0 \n");
- if (sFrame.pDSParms != 0) {
+ if (sFrame.pDSParms != NULL) {
if (byCurrChannel > CB_MAX_CHANNEL_24G) {
// channel remapping to
byIEChannel = get_channel_mapping(pMgmt->pAdapter, sFrame.pDSParms->byCurrChannel, PHY_TYPE_11A);
diff --git a/drivers/staging/vt6656/80211hdr.h b/drivers/staging/vt6656/80211hdr.h
index 000304ffad6c..1e778ba7c634 100644
--- a/drivers/staging/vt6656/80211hdr.h
+++ b/drivers/staging/vt6656/80211hdr.h
@@ -151,7 +151,7 @@
#ifdef __BIG_ENDIAN
/* GET & SET Frame Control bit */
-#define WLAN_GET_FC_PRVER(n) ((((u16)(n) >> 8) & (BIT0 | BIT1))
+#define WLAN_GET_FC_PRVER(n) (((u16)(n) >> 8) & (BIT0 | BIT1))
#define WLAN_GET_FC_FTYPE(n) ((((u16)(n) >> 8) & (BIT2 | BIT3)) >> 2)
#define WLAN_GET_FC_FSTYPE(n) ((((u16)(n) >> 8) \
& (BIT4|BIT5|BIT6|BIT7)) >> 4)
diff --git a/drivers/staging/vt6656/aes_ccmp.c b/drivers/staging/vt6656/aes_ccmp.c
index 61b9f7bdb858..e2bfa8d266cd 100644
--- a/drivers/staging/vt6656/aes_ccmp.c
+++ b/drivers/staging/vt6656/aes_ccmp.c
@@ -269,9 +269,9 @@ bool AESbGenCCMP(u8 *pbyRxKey, u8 *pbyFrame, u16 wFrameSize)
/* MIC_HDR1 */
MIC_HDR1[0] = (u8)(wHLen >> 8);
MIC_HDR1[1] = (u8)(wHLen & 0xff);
- byTmp = (u8)(pMACHeader->frame_control & 0xff);
+ byTmp = (u8)(le16_to_cpu(pMACHeader->frame_control) >> 8);
MIC_HDR1[2] = byTmp & 0x8f;
- byTmp = (u8)(pMACHeader->frame_control >> 8);
+ byTmp = (u8)(le16_to_cpu(pMACHeader->frame_control) & 0xff);
byTmp &= 0x87;
MIC_HDR1[3] = byTmp | 0x40;
memcpy(&(MIC_HDR1[4]), pMACHeader->addr1, ETH_ALEN);
@@ -279,7 +279,7 @@ bool AESbGenCCMP(u8 *pbyRxKey, u8 *pbyFrame, u16 wFrameSize)
/* MIC_HDR2 */
memcpy(&(MIC_HDR2[0]), pMACHeader->addr3, ETH_ALEN);
- byTmp = (u8)(pMACHeader->seq_ctrl & 0xff);
+ byTmp = (u8)(le16_to_cpu(pMACHeader->seq_ctrl) >> 8);
MIC_HDR2[6] = byTmp & 0x0f;
MIC_HDR2[7] = 0;
diff --git a/drivers/staging/vt6656/card.h b/drivers/staging/vt6656/card.h
index c3017a74fa0f..f843e50875f9 100644
--- a/drivers/staging/vt6656/card.h
+++ b/drivers/staging/vt6656/card.h
@@ -39,13 +39,6 @@ typedef enum _CARD_PHY_TYPE {
PHY_TYPE_11A
} CARD_PHY_TYPE, *PCARD_PHY_TYPE;
-typedef enum _CARD_OP_MODE {
- OP_MODE_INFRASTRUCTURE = 0,
- OP_MODE_ADHOC,
- OP_MODE_AP,
- OP_MODE_UNKNOWN
-} CARD_OP_MODE, *PCARD_OP_MODE;
-
#define CB_MAX_CHANNEL_24G 14
#define CB_MAX_CHANNEL_5G 42 /* add channel9(5045MHz), 41==>42 */
#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G+CB_MAX_CHANNEL_5G)
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 1f422574c3d8..e2abe3ddd244 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -158,10 +158,10 @@ typedef enum __device_msg_level {
/*
* Enum of context types for SendPacket
*/
-typedef enum _CONTEXT_TYPE {
- CONTEXT_DATA_PACKET = 1,
- CONTEXT_MGMT_PACKET
-} CONTEXT_TYPE;
+enum {
+ CONTEXT_DATA_PACKET = 1,
+ CONTEXT_MGMT_PACKET
+};
/* RCB (Receive Control Block) */
struct vnt_rcb {
@@ -180,9 +180,7 @@ struct vnt_usb_send_context {
struct sk_buff *pPacket;
struct urb *pUrb;
unsigned int uBufLen;
- CONTEXT_TYPE Type;
- struct ethhdr sEthHeader;
- void *Next;
+ u8 type;
bool bBoolInUse;
unsigned char Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
};
@@ -206,29 +204,10 @@ typedef struct _DEFAULT_CONFIG {
/*
* Structure to keep track of USB interrupt packets
*/
-typedef struct {
- unsigned int uDataLen;
- u8 * pDataBuf;
- /* struct urb *pUrb; */
- bool bInUse;
-} INT_BUFFER, *PINT_BUFFER;
-
-/* 0:11A 1:11B 2:11G */
-typedef enum _VIA_BB_TYPE
-{
- BB_TYPE_11A = 0,
- BB_TYPE_11B,
- BB_TYPE_11G
-} VIA_BB_TYPE, *PVIA_BB_TYPE;
-
-/* 0:11a, 1:11b, 2:11gb (only CCK in BasicRate), 3:11ga(OFDM in BasicRate) */
-typedef enum _VIA_PKT_TYPE
-{
- PK_TYPE_11A = 0,
- PK_TYPE_11B,
- PK_TYPE_11GB,
- PK_TYPE_11GA
-} VIA_PKT_TYPE, *PVIA_PKT_TYPE;
+struct vnt_interrupt_buffer {
+ u8 *data_buf;
+ bool in_use;
+};
/*++ NDIS related */
@@ -297,16 +276,6 @@ typedef struct tagSPMKIDCandidateEvent {
PMKID_CANDIDATE CandidateList[MAX_PMKIDLIST];
} SPMKIDCandidateEvent, *PSPMKIDCandidateEvent;
-/*++ 802.11h related */
-#define MAX_QUIET_COUNT 8
-
-typedef struct tagSQuietControl {
- bool bEnable;
- u32 dwStartTime;
- u8 byPeriod;
- u16 wDuration;
-} SQuietControl, *PSQuietControl;
-
/* The receive duplicate detection cache entry */
typedef struct tagSCacheEntry{
u16 wFmSequence;
@@ -386,8 +355,6 @@ struct vnt_private {
OPTIONS sOpts;
- struct tasklet_struct CmdWorkItem;
- struct tasklet_struct EventWorkItem;
struct work_struct read_work_item;
struct work_struct rx_mng_work_item;
@@ -437,29 +404,11 @@ struct vnt_private {
struct vnt_tx_pkt_info pkt_info[16];
/* Variables to track resources for the Interrupt In Pipe */
- INT_BUFFER intBuf;
- int fKillEventPollingThread;
- int bEventAvailable;
+ struct vnt_interrupt_buffer int_buf;
/* default config from file by user setting */
DEFAULT_CONFIG config_file;
- /* Statistic for USB */
- unsigned long ulBulkInPosted;
- unsigned long ulBulkInError;
- unsigned long ulBulkInContCRCError;
- unsigned long ulBulkInBytesRead;
-
- unsigned long ulBulkOutPosted;
- unsigned long ulBulkOutError;
- unsigned long ulBulkOutContCRCError;
- unsigned long ulBulkOutBytesWrite;
-
- unsigned long ulIntInPosted;
- unsigned long ulIntInError;
- unsigned long ulIntInContCRCError;
- unsigned long ulIntInBytesRead;
-
/* Version control */
u16 wFirmwareVersion;
u8 byLocalID;
@@ -480,11 +429,6 @@ struct vnt_private {
int bExistSWNetAddr;
/* Maintain statistical debug info. */
- unsigned long packetsReceived;
- unsigned long packetsReceivedDropped;
- unsigned long packetsReceivedOverflow;
- unsigned long packetsSent;
- unsigned long packetsSentDropped;
unsigned long SendContextsInUse;
unsigned long RcvBuffersInUse;
@@ -549,8 +493,8 @@ struct vnt_private {
u8 byCWMaxMin;
/* Rate */
- VIA_BB_TYPE byBBType; /* 0: 11A, 1:11B, 2:11G */
- VIA_PKT_TYPE byPacketType; /* 0:11a 1:11b 2:11gb 3:11ga */
+ u8 byBBType; /* 0: 11A, 1:11B, 2:11G */
+ u8 byPacketType; /* 0:11a 1:11b 2:11gb 3:11ga */
u16 wBasicRate;
u8 byACKRate;
u8 byTopOFDMBasicRate;
@@ -588,7 +532,9 @@ struct vnt_private {
u16 wFragmentationThreshold;
u8 byShortRetryLimit;
u8 byLongRetryLimit;
- CARD_OP_MODE eOPMode;
+
+ enum nl80211_iftype op_mode;
+
int bBSSIDFilter;
u16 wMaxTransmitMSDULifetime;
u8 abyBSSID[ETH_ALEN];
@@ -807,5 +753,6 @@ struct vnt_private {
(fMP_DISCONNECTED | fMP_RESET_IN_PROGRESS | fMP_HALT_IN_PROGRESS | fMP_INIT_IN_PROGRESS | fMP_SURPRISE_REMOVED)) == 0)
int device_alloc_frag_buf(struct vnt_private *, PSDeFragControlBlock pDeF);
+void vnt_configure_filter(struct vnt_private *);
#endif
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index eca04c0c1d97..4ccaa7e29a1c 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -627,7 +627,7 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
// Now it only supports 802.11g Infrastructure Mode, and support rate must up to 54 Mbps
if (pDevice->bDiversityEnable && (FrameSize>50) &&
- (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
+ pDevice->op_mode == NL80211_IFTYPE_STATION &&
(pDevice->bLinkPass == true)) {
BBvAntennaDiversity(pDevice, s_byGetRateIdx(*pbyRxRate), 0);
}
@@ -1227,14 +1227,13 @@ static int s_bAPModeRxData(struct vnt_private *pDevice, struct sk_buff *skb,
if (is_multicast_ether_addr((u8 *)(skb->data+cbHeaderOffset))) {
if (pMgmt->sNodeDBTable[0].bPSEnable) {
- skbcpy = dev_alloc_skb((int)pDevice->rx_buf_sz);
+ skbcpy = netdev_alloc_skb(pDevice->dev, pDevice->rx_buf_sz);
// if any node in PS mode, buffer packet until DTIM.
if (skbcpy == NULL) {
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "relay multicast no skb available \n");
}
else {
- skbcpy->dev = pDevice->dev;
skbcpy->len = FrameSize;
memcpy(skbcpy->data, skb->data+cbHeaderOffset, FrameSize);
skb_queue_tail(&(pMgmt->sNodeDBTable[0].sTxPSQueue), skbcpy);
@@ -1295,63 +1294,66 @@ static int s_bAPModeRxData(struct vnt_private *pDevice, struct sk_buff *skb,
void RXvWorkItem(struct work_struct *work)
{
- struct vnt_private *pDevice =
+ struct vnt_private *priv =
container_of(work, struct vnt_private, read_work_item);
- int ntStatus;
- struct vnt_rcb *pRCB = NULL;
+ int status;
+ struct vnt_rcb *rcb = NULL;
- if (pDevice->Flags & fMP_DISCONNECTED)
+ if (priv->Flags & fMP_DISCONNECTED)
return;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Polling Thread\n");
- spin_lock_irq(&pDevice->lock);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Polling Thread\n");
- while ((pDevice->Flags & fMP_POST_READS) &&
- MP_IS_READY(pDevice) &&
- (pDevice->NumRecvFreeList != 0) ) {
- pRCB = pDevice->FirstRecvFreeList;
- pDevice->NumRecvFreeList--;
- DequeueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList);
- ntStatus = PIPEnsBulkInUsbRead(pDevice, pRCB);
- }
- pDevice->bIsRxWorkItemQueued = false;
- spin_unlock_irq(&pDevice->lock);
+ spin_lock_irq(&priv->lock);
+
+ while ((priv->Flags & fMP_POST_READS) && MP_IS_READY(priv) &&
+ (priv->NumRecvFreeList != 0)) {
+ rcb = priv->FirstRecvFreeList;
+
+ priv->NumRecvFreeList--;
+
+ DequeueRCB(priv->FirstRecvFreeList, priv->LastRecvFreeList);
+
+ status = PIPEnsBulkInUsbRead(priv, rcb);
+ }
+ priv->bIsRxWorkItemQueued = false;
+
+ spin_unlock_irq(&priv->lock);
}
-void RXvFreeRCB(struct vnt_rcb *pRCB, int bReAllocSkb)
+void RXvFreeRCB(struct vnt_rcb *rcb, int re_alloc_skb)
{
- struct vnt_private *pDevice = pRCB->pDevice;
+ struct vnt_private *priv = rcb->pDevice;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->RXvFreeRCB\n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->RXvFreeRCB\n");
- if (bReAllocSkb == false) {
- kfree_skb(pRCB->skb);
- bReAllocSkb = true;
+ if (re_alloc_skb == false) {
+ kfree_skb(rcb->skb);
+ re_alloc_skb = true;
}
- if (bReAllocSkb == true) {
- pRCB->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- // todo error handling
- if (pRCB->skb == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to re-alloc rx skb\n");
- }else {
- pRCB->skb->dev = pDevice->dev;
- }
- }
- //
- // Insert the RCB back in the Recv free list
- //
- EnqueueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList, pRCB);
- pDevice->NumRecvFreeList++;
+ if (re_alloc_skb == true) {
+ rcb->skb = netdev_alloc_skb(priv->dev, priv->rx_buf_sz);
+ /* TODO error handling */
+ if (!rcb->skb) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
+ " Failed to re-alloc rx skb\n");
+ }
+ }
- if ((pDevice->Flags & fMP_POST_READS) && MP_IS_READY(pDevice) &&
- (pDevice->bIsRxWorkItemQueued == false) ) {
+ /* Insert the RCB back in the Recv free list */
+ EnqueueRCB(priv->FirstRecvFreeList, priv->LastRecvFreeList, rcb);
+ priv->NumRecvFreeList++;
- pDevice->bIsRxWorkItemQueued = true;
- schedule_work(&pDevice->read_work_item);
- }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----RXFreeRCB %d %d\n",pDevice->NumRecvFreeList, pDevice->NumRecvMngList);
+ if ((priv->Flags & fMP_POST_READS) && MP_IS_READY(priv) &&
+ (priv->bIsRxWorkItemQueued == false)) {
+ priv->bIsRxWorkItemQueued = true;
+ schedule_work(&priv->read_work_item);
+ }
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----RXFreeRCB %d %d\n",
+ priv->NumRecvFreeList, priv->NumRecvMngList);
}
void RXvMngWorkItem(struct work_struct *work)
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index e0e93869a681..cca56b2f243d 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -70,119 +70,117 @@ void INTvWorkItem(struct vnt_private *pDevice)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Interrupt Polling Thread\n");
spin_lock_irq(&pDevice->lock);
- if (pDevice->fKillEventPollingThread != true)
- ntStatus = PIPEnsInterruptRead(pDevice);
+
+ ntStatus = PIPEnsInterruptRead(pDevice);
+
spin_unlock_irq(&pDevice->lock);
}
-void INTnsProcessData(struct vnt_private *pDevice)
+void INTnsProcessData(struct vnt_private *priv)
{
- PSINTData pINTData;
- struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- struct net_device_stats *pStats = &pDevice->stats;
+ struct vnt_interrupt_data *int_data;
+ struct vnt_manager *mgmt = &priv->vnt_mgmt;
+ struct net_device_stats *stats = &priv->stats;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptProcessData\n");
- pINTData = (PSINTData) pDevice->intBuf.pDataBuf;
- if (pINTData->byTSR0 & TSR_VALID) {
- if (pINTData->byTSR0 & (TSR_TMO | TSR_RETRYTMO))
- pDevice->wstats.discard.retries++;
+ int_data = (struct vnt_interrupt_data *)priv->int_buf.data_buf;
+
+ if (int_data->tsr0 & TSR_VALID) {
+ if (int_data->tsr0 & (TSR_TMO | TSR_RETRYTMO))
+ priv->wstats.discard.retries++;
else
- pStats->tx_packets++;
+ stats->tx_packets++;
- BSSvUpdateNodeTxCounter(pDevice,
- pINTData->byTSR0,
- pINTData->byPkt0);
- /*DBG_PRN_GRP01(("TSR0 %02x\n", pINTData->byTSR0));*/
+ BSSvUpdateNodeTxCounter(priv,
+ int_data->tsr0,
+ int_data->pkt0);
}
- if (pINTData->byTSR1 & TSR_VALID) {
- if (pINTData->byTSR1 & (TSR_TMO | TSR_RETRYTMO))
- pDevice->wstats.discard.retries++;
+
+ if (int_data->tsr1 & TSR_VALID) {
+ if (int_data->tsr1 & (TSR_TMO | TSR_RETRYTMO))
+ priv->wstats.discard.retries++;
else
- pStats->tx_packets++;
+ stats->tx_packets++;
- BSSvUpdateNodeTxCounter(pDevice,
- pINTData->byTSR1,
- pINTData->byPkt1);
- /*DBG_PRN_GRP01(("TSR1 %02x\n", pINTData->byTSR1));*/
+ BSSvUpdateNodeTxCounter(priv,
+ int_data->tsr1,
+ int_data->pkt1);
}
- if (pINTData->byTSR2 & TSR_VALID) {
- if (pINTData->byTSR2 & (TSR_TMO | TSR_RETRYTMO))
- pDevice->wstats.discard.retries++;
+
+ if (int_data->tsr2 & TSR_VALID) {
+ if (int_data->tsr2 & (TSR_TMO | TSR_RETRYTMO))
+ priv->wstats.discard.retries++;
else
- pStats->tx_packets++;
+ stats->tx_packets++;
- BSSvUpdateNodeTxCounter(pDevice,
- pINTData->byTSR2,
- pINTData->byPkt2);
- /*DBG_PRN_GRP01(("TSR2 %02x\n", pINTData->byTSR2));*/
+ BSSvUpdateNodeTxCounter(priv,
+ int_data->tsr2,
+ int_data->pkt2);
}
- if (pINTData->byTSR3 & TSR_VALID) {
- if (pINTData->byTSR3 & (TSR_TMO | TSR_RETRYTMO))
- pDevice->wstats.discard.retries++;
+
+ if (int_data->tsr3 & TSR_VALID) {
+ if (int_data->tsr3 & (TSR_TMO | TSR_RETRYTMO))
+ priv->wstats.discard.retries++;
else
- pStats->tx_packets++;
+ stats->tx_packets++;
- BSSvUpdateNodeTxCounter(pDevice,
- pINTData->byTSR3,
- pINTData->byPkt3);
- /*DBG_PRN_GRP01(("TSR3 %02x\n", pINTData->byTSR3));*/
+ BSSvUpdateNodeTxCounter(priv,
+ int_data->tsr3,
+ int_data->pkt3);
}
- if (pINTData->byISR0 != 0) {
- if (pINTData->byISR0 & ISR_BNTX) {
- if (pDevice->eOPMode == OP_MODE_AP) {
- if (pMgmt->byDTIMCount > 0) {
- pMgmt->byDTIMCount--;
- pMgmt->sNodeDBTable[0].bRxPSPoll =
+
+ if (int_data->isr0 != 0) {
+ if (int_data->isr0 & ISR_BNTX) {
+ if (priv->op_mode == NL80211_IFTYPE_AP) {
+ if (mgmt->byDTIMCount > 0) {
+ mgmt->byDTIMCount--;
+ mgmt->sNodeDBTable[0].bRxPSPoll =
false;
- } else if (pMgmt->byDTIMCount == 0) {
+ } else if (mgmt->byDTIMCount == 0) {
/* check if multicast tx buffering */
- pMgmt->byDTIMCount =
- pMgmt->byDTIMPeriod-1;
- pMgmt->sNodeDBTable[0].bRxPSPoll = true;
- if (pMgmt->sNodeDBTable[0].bPSEnable)
- bScheduleCommand((void *) pDevice,
+ mgmt->byDTIMCount =
+ mgmt->byDTIMPeriod-1;
+ mgmt->sNodeDBTable[0].bRxPSPoll = true;
+ if (mgmt->sNodeDBTable[0].bPSEnable)
+ bScheduleCommand((void *) priv,
WLAN_CMD_RX_PSPOLL,
NULL);
}
- bScheduleCommand((void *) pDevice,
+ bScheduleCommand((void *) priv,
WLAN_CMD_BECON_SEND,
NULL);
- } /* if (pDevice->eOPMode == OP_MODE_AP) */
- pDevice->bBeaconSent = true;
+ }
+ priv->bBeaconSent = true;
} else {
- pDevice->bBeaconSent = false;
+ priv->bBeaconSent = false;
}
- if (pINTData->byISR0 & ISR_TBTT) {
- if (pDevice->bEnablePSMode)
- bScheduleCommand((void *) pDevice,
+
+ if (int_data->isr0 & ISR_TBTT) {
+ if (priv->bEnablePSMode)
+ bScheduleCommand((void *) priv,
WLAN_CMD_TBTT_WAKEUP,
NULL);
- if (pDevice->bChannelSwitch) {
- pDevice->byChannelSwitchCount--;
- if (pDevice->byChannelSwitchCount == 0)
- bScheduleCommand((void *) pDevice,
+ if (priv->bChannelSwitch) {
+ priv->byChannelSwitchCount--;
+ if (priv->byChannelSwitchCount == 0)
+ bScheduleCommand((void *) priv,
WLAN_CMD_11H_CHSW,
NULL);
}
}
- pDevice->qwCurrTSF = cpu_to_le64(pINTData->qwTSF);
- /*DBG_PRN_GRP01(("ISR0 = %02x ,
- LoTsf = %08x,
- HiTsf = %08x\n",
- pINTData->byISR0,
- pINTData->dwLoTSF,
- pINTData->dwHiTSF)); */
+ priv->qwCurrTSF = le64_to_cpu(int_data->tsf);
}
- if (pINTData->byISR1 != 0)
- if (pINTData->byISR1 & ISR_GPIO3)
- bScheduleCommand((void *) pDevice,
+
+ if (int_data->isr1 != 0)
+ if (int_data->isr1 & ISR_GPIO3)
+ bScheduleCommand((void *) priv,
WLAN_CMD_RADIO,
NULL);
- pDevice->intBuf.uDataLen = 0;
- pDevice->intBuf.bInUse = false;
- pStats->tx_errors = pDevice->wstats.discard.retries;
- pStats->tx_dropped = pDevice->wstats.discard.retries;
+ priv->int_buf.in_use = false;
+
+ stats->tx_errors = priv->wstats.discard.retries;
+ stats->tx_dropped = priv->wstats.discard.retries;
}
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
index 8e6e217ba4ff..08db868e1d07 100644
--- a/drivers/staging/vt6656/int.h
+++ b/drivers/staging/vt6656/int.h
@@ -32,29 +32,28 @@
#include "device.h"
-typedef struct tagSINTData {
- u8 byTSR0;
- u8 byPkt0;
- u16 wTime0;
- u8 byTSR1;
- u8 byPkt1;
- u16 wTime1;
- u8 byTSR2;
- u8 byPkt2;
- u16 wTime2;
- u8 byTSR3;
- u8 byPkt3;
- u16 wTime3;
- u64 qwTSF;
- u8 byISR0;
- u8 byISR1;
- u8 byRTSSuccess;
- u8 byRTSFail;
- u8 byACKFail;
- u8 byFCSErr;
- u8 abySW[2];
-} __attribute__ ((__packed__))
-SINTData, *PSINTData;
+struct vnt_interrupt_data {
+ u8 tsr0;
+ u8 pkt0;
+ u16 time0;
+ u8 tsr1;
+ u8 pkt1;
+ u16 time1;
+ u8 tsr2;
+ u8 pkt2;
+ u16 time2;
+ u8 tsr3;
+ u8 pkt3;
+ u16 time3;
+ __le64 tsf;
+ u8 isr0;
+ u8 isr1;
+ u8 rts_success;
+ u8 rts_fail;
+ u8 ack_fail;
+ u8 fcs_err;
+ u8 sw[2];
+} __packed;
void INTvWorkItem(struct vnt_private *);
void INTnsProcessData(struct vnt_private *);
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 3a68dfa23d84..cf4c06a42880 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -41,6 +41,7 @@
#include "wpactl.h"
#include "control.h"
#include "rndis.h"
+#include "baseband.h"
static const long frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484,
@@ -57,7 +58,7 @@ struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
struct vnt_private *pDevice = netdev_priv(dev);
long ldBm;
- pDevice->wstats.status = pDevice->eOPMode;
+ pDevice->wstats.status = pDevice->op_mode;
RFvRSSITodBm(pDevice, (u8)(pDevice->uCurrRSSI), &ldBm);
pDevice->wstats.qual.level = ldBm;
pDevice->wstats.qual.noise = 0;
@@ -724,10 +725,10 @@ int iwctl_giwaplist(struct net_device *dev, struct iw_request_info *info,
if (!wrq->pointer)
return -EINVAL;
- sock = kzalloc(sizeof(struct sockaddr) * IW_MAX_AP, GFP_KERNEL);
+ sock = kcalloc(IW_MAX_AP, sizeof(struct sockaddr), GFP_KERNEL);
if (sock == NULL)
return -ENOMEM;
- qual = kzalloc(sizeof(struct iw_quality) * IW_MAX_AP, GFP_KERNEL);
+ qual = kcalloc(IW_MAX_AP, sizeof(struct iw_quality), GFP_KERNEL);
if (qual == NULL) {
kfree(sock);
return -ENOMEM;
@@ -1394,7 +1395,8 @@ int iwctl_giwpower(struct net_device *dev, struct iw_request_info *info,
if (pMgmt == NULL)
return -EFAULT;
- if ((wrq->disabled = (mode == WMAC_POWER_CAM)))
+ wrq->disabled = (mode == WMAC_POWER_CAM);
+ if (wrq->disabled)
return 0;
if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 54414ed27191..3ce19ddbc569 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -47,25 +47,19 @@ static int msglevel =MSG_LEVEL_INFO;
*
* Parameters:
* In:
- * uByteidx - Index of Mask
- * byData - Mask Value to write
+ * mc_filter (mac filter)
* Out:
* none
*
* Return Value: none
*
*/
-void MACvWriteMultiAddr(struct vnt_private *pDevice, u32 uByteIdx, u8 byData)
+void MACvWriteMultiAddr(struct vnt_private *pDevice, u64 mc_filter)
{
- u8 byData1;
+ __le64 le_mc = cpu_to_le64(mc_filter);
- byData1 = byData;
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- (u16) (MAC_REG_MAR0 + uByteIdx),
- MESSAGE_REQUEST_MACREG,
- 1,
- &byData1);
+ CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, MAC_REG_MAR0,
+ MESSAGE_REQUEST_MACREG, sizeof(le_mc), (u8 *)&le_mc);
}
/*
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index 0db1be5b01c8..4053e431ef99 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -403,7 +403,7 @@
#define MAC_REVISION_A0 0x00
#define MAC_REVISION_A1 0x01
-void MACvWriteMultiAddr(struct vnt_private *, u32, u8);
+void MACvWriteMultiAddr(struct vnt_private *, u64);
void MACbShutdown(struct vnt_private *);
void MACvSetBBType(struct vnt_private *, u8);
void MACvDisableKeyEntry(struct vnt_private *, u32);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 58edcae74efc..3c9323069e01 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -256,7 +256,7 @@ device_set_options(struct vnt_private *pDevice) {
pDevice->byShortPreamble = PREAMBLE_TYPE_DEF;
pDevice->ePSMode = PS_MODE_DEF;
pDevice->b11hEnable = X80211h_MODE_DEF;
- pDevice->eOPMode = OP_MODE_DEF;
+ pDevice->op_mode = NL80211_IFTYPE_UNSPECIFIED;
pDevice->uConnectionRate = DATA_RATE_DEF;
if (pDevice->uConnectionRate < RATE_AUTO) pDevice->bFixRate = true;
pDevice->byBBType = BBP_TYPE_DEF;
@@ -409,7 +409,7 @@ static int device_init_registers(struct vnt_private *pDevice)
if (pDevice->abyCCKPwrTbl[ii] == 0)
pDevice->abyCCKPwrTbl[ii] = pDevice->byCCKPwr;
- pDevice->abyOFDMPwrTbl[ii] =
+ pDevice->abyOFDMPwrTbl[ii] =
pDevice->abyEEPROM[ii + EEP_OFS_OFDM_PWR_TBL];
if (pDevice->abyOFDMPwrTbl[ii] == 0)
pDevice->abyOFDMPwrTbl[ii] = pDevice->byOFDMPwrG;
@@ -749,44 +749,47 @@ err_nomem:
return rc;
}
-static void device_free_tx_bufs(struct vnt_private *pDevice)
+static void device_free_tx_bufs(struct vnt_private *priv)
{
- struct vnt_usb_send_context *pTxContext;
- int ii;
+ struct vnt_usb_send_context *tx_context;
+ int ii;
- for (ii = 0; ii < pDevice->cbTD; ii++) {
+ for (ii = 0; ii < priv->cbTD; ii++) {
+ tx_context = priv->apTD[ii];
+ /* deallocate URBs */
+ if (tx_context->pUrb) {
+ usb_kill_urb(tx_context->pUrb);
+ usb_free_urb(tx_context->pUrb);
+ }
- pTxContext = pDevice->apTD[ii];
- /* deallocate URBs */
- if (pTxContext->pUrb) {
- usb_kill_urb(pTxContext->pUrb);
- usb_free_urb(pTxContext->pUrb);
- }
- kfree(pTxContext);
- }
- return;
+ kfree(tx_context);
+ }
+
+ return;
}
-static void device_free_rx_bufs(struct vnt_private *pDevice)
+static void device_free_rx_bufs(struct vnt_private *priv)
{
- struct vnt_rcb *pRCB;
+ struct vnt_rcb *rcb;
int ii;
- for (ii = 0; ii < pDevice->cbRD; ii++) {
+ for (ii = 0; ii < priv->cbRD; ii++) {
+ rcb = priv->apRCB[ii];
- pRCB = pDevice->apRCB[ii];
- /* deallocate URBs */
- if (pRCB->pUrb) {
- usb_kill_urb(pRCB->pUrb);
- usb_free_urb(pRCB->pUrb);
- }
- /* deallocate skb */
- if (pRCB->skb)
- dev_kfree_skb(pRCB->skb);
- }
- kfree(pDevice->pRCBMem);
+ /* deallocate URBs */
+ if (rcb->pUrb) {
+ usb_kill_urb(rcb->pUrb);
+ usb_free_urb(rcb->pUrb);
+ }
- return;
+ /* deallocate skb */
+ if (rcb->skb)
+ dev_kfree_skb(rcb->skb);
+ }
+
+ kfree(priv->pRCBMem);
+
+ return;
}
static void usb_device_reset(struct vnt_private *pDevice)
@@ -798,95 +801,109 @@ static void usb_device_reset(struct vnt_private *pDevice)
return ;
}
-static void device_free_int_bufs(struct vnt_private *pDevice)
+static void device_free_int_bufs(struct vnt_private *priv)
{
- kfree(pDevice->intBuf.pDataBuf);
- return;
+ kfree(priv->int_buf.data_buf);
+
+ return;
}
-static bool device_alloc_bufs(struct vnt_private *pDevice)
+static bool device_alloc_bufs(struct vnt_private *priv)
{
- struct vnt_usb_send_context *pTxContext;
- struct vnt_rcb *pRCB;
+ struct vnt_usb_send_context *tx_context;
+ struct vnt_rcb *rcb;
int ii;
- for (ii = 0; ii < pDevice->cbTD; ii++) {
+ for (ii = 0; ii < priv->cbTD; ii++) {
+ tx_context = kmalloc(sizeof(struct vnt_usb_send_context),
+ GFP_KERNEL);
+ if (tx_context == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
+ "%s : allocate tx usb context failed\n",
+ priv->dev->name);
+ goto free_tx;
+ }
- pTxContext = kmalloc(sizeof(struct vnt_usb_send_context), GFP_KERNEL);
- if (pTxContext == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : allocate tx usb context failed\n", pDevice->dev->name);
- goto free_tx;
- }
- pDevice->apTD[ii] = pTxContext;
- pTxContext->pDevice = (void *) pDevice;
- /* allocate URBs */
- pTxContext->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
- if (pTxContext->pUrb == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "alloc tx urb failed\n");
- goto free_tx;
- }
- pTxContext->bBoolInUse = false;
- }
+ priv->apTD[ii] = tx_context;
+ tx_context->pDevice = priv;
+
+ /* allocate URBs */
+ tx_context->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (tx_context->pUrb == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR,
+ KERN_ERR "alloc tx urb failed\n");
+ goto free_tx;
+ }
+
+ tx_context->bBoolInUse = false;
+ }
- /* allocate RCB mem */
- pDevice->pRCBMem = kzalloc((sizeof(struct vnt_rcb) * pDevice->cbRD),
+ /* allocate RCB mem */
+ priv->pRCBMem = kzalloc((sizeof(struct vnt_rcb) * priv->cbRD),
GFP_KERNEL);
- if (pDevice->pRCBMem == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : alloc rx usb context failed\n", pDevice->dev->name);
- goto free_tx;
- }
+ if (priv->pRCBMem == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
+ "%s : alloc rx usb context failed\n",
+ priv->dev->name);
+ goto free_tx;
+ }
- pDevice->FirstRecvFreeList = NULL;
- pDevice->LastRecvFreeList = NULL;
- pDevice->FirstRecvMngList = NULL;
- pDevice->LastRecvMngList = NULL;
- pDevice->NumRecvFreeList = 0;
+ priv->FirstRecvFreeList = NULL;
+ priv->LastRecvFreeList = NULL;
+ priv->FirstRecvMngList = NULL;
+ priv->LastRecvMngList = NULL;
+ priv->NumRecvFreeList = 0;
- pRCB = (struct vnt_rcb *)pDevice->pRCBMem;
+ rcb = (struct vnt_rcb *)priv->pRCBMem;
- for (ii = 0; ii < pDevice->cbRD; ii++) {
+ for (ii = 0; ii < priv->cbRD; ii++) {
+ priv->apRCB[ii] = rcb;
+ rcb->pDevice = priv;
- pDevice->apRCB[ii] = pRCB;
- pRCB->pDevice = (void *) pDevice;
- /* allocate URBs */
- pRCB->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
+ /* allocate URBs */
+ rcb->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (rcb->pUrb == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
+ " Failed to alloc rx urb\n");
+ goto free_rx_tx;
+ }
- if (pRCB->pUrb == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to alloc rx urb\n");
- goto free_rx_tx;
- }
- pRCB->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- if (pRCB->skb == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to alloc rx skb\n");
- goto free_rx_tx;
- }
- pRCB->skb->dev = pDevice->dev;
- pRCB->bBoolInUse = false;
- EnqueueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList, pRCB);
- pDevice->NumRecvFreeList++;
- pRCB++;
- }
+ rcb->skb = netdev_alloc_skb(priv->dev, priv->rx_buf_sz);
+ if (rcb->skb == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
+ " Failed to alloc rx skb\n");
+ goto free_rx_tx;
+ }
+
+ rcb->bBoolInUse = false;
+
+ EnqueueRCB(priv->FirstRecvFreeList,
+ priv->LastRecvFreeList, rcb);
- pDevice->pInterruptURB = usb_alloc_urb(0, GFP_ATOMIC);
- if (pDevice->pInterruptURB == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int urb\n");
- goto free_rx_tx;
+ priv->NumRecvFreeList++;
+ rcb++;
}
- pDevice->intBuf.pDataBuf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
- if (pDevice->intBuf.pDataBuf == NULL) {
- DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int buf\n");
- usb_free_urb(pDevice->pInterruptURB);
- goto free_rx_tx;
+ priv->pInterruptURB = usb_alloc_urb(0, GFP_ATOMIC);
+ if (priv->pInterruptURB == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR"Failed to alloc int urb\n");
+ goto free_rx_tx;
}
- return true;
+ priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
+ if (priv->int_buf.data_buf == NULL) {
+ DBG_PRT(MSG_LEVEL_ERR, KERN_ERR"Failed to alloc int buf\n");
+ usb_free_urb(priv->pInterruptURB);
+ goto free_rx_tx;
+ }
+
+ return true;
free_rx_tx:
- device_free_rx_bufs(pDevice);
+ device_free_rx_bufs(priv);
free_tx:
- device_free_tx_bufs(pDevice);
+ device_free_tx_bufs(priv);
return false;
}
@@ -931,13 +948,11 @@ static void device_free_frag_bufs(struct vnt_private *pDevice)
int device_alloc_frag_buf(struct vnt_private *pDevice,
PSDeFragControlBlock pDeF)
{
+ pDeF->skb = netdev_alloc_skb(pDevice->dev, pDevice->rx_buf_sz);
+ if (!pDeF->skb)
+ return false;
- pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- if (pDeF->skb == NULL)
- return false;
- pDeF->skb->dev = pDevice->dev;
-
- return true;
+ return true;
}
static int device_open(struct net_device *dev)
@@ -974,8 +989,6 @@ static int device_open(struct net_device *dev)
goto free_all;
}
- device_set_multi(pDevice->dev);
-
/* init for key management */
KeyvInitTable(pDevice,&pDevice->sKey);
memcpy(pDevice->vnt_mgmt.abyMACAddr,
@@ -992,16 +1005,12 @@ static int device_open(struct net_device *dev)
vMgrObjectInit(pDevice);
- tasklet_init(&pDevice->EventWorkItem, (void *)INTvWorkItem, (unsigned long)pDevice);
-
schedule_delayed_work(&pDevice->second_callback_work, HZ);
- pDevice->int_interval = 100; /* max 100 microframes */
+ pDevice->int_interval = 1; /* bInterval is set to 1 */
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pDevice->bIsRxWorkItemQueued = true;
- pDevice->fKillEventPollingThread = false;
- pDevice->bEventAvailable = false;
pDevice->bWPADEVUp = false;
pDevice->bwextstep0 = false;
@@ -1084,7 +1093,6 @@ static int device_close(struct net_device *dev)
MP_SET_FLAG(pDevice, fMP_DISCONNECTED);
MP_CLEAR_FLAG(pDevice, fMP_POST_WRITES);
MP_CLEAR_FLAG(pDevice, fMP_POST_READS);
- pDevice->fKillEventPollingThread = true;
cancel_delayed_work_sync(&pDevice->run_command_work);
cancel_delayed_work_sync(&pDevice->second_callback_work);
@@ -1098,8 +1106,6 @@ static int device_close(struct net_device *dev)
cancel_work_sync(&pDevice->rx_mng_work_item);
cancel_work_sync(&pDevice->read_work_item);
- tasklet_kill(&pDevice->EventWorkItem);
-
pDevice->bRoaming = false;
pDevice->bIsRoaming = false;
pDevice->bEnableRoaming = false;
@@ -1350,69 +1356,73 @@ static int Read_config_file(struct vnt_private *pDevice)
static void device_set_multi(struct net_device *dev)
{
- struct vnt_private *pDevice = netdev_priv(dev);
- struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ if (priv->flags & DEVICE_FLAGS_OPENED) {
+ spin_lock_irqsave(&priv->lock, flags);
+
+ bScheduleCommand(priv, WLAN_CMD_CONFIGURE_FILTER, NULL);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+}
+
+void vnt_configure_filter(struct vnt_private *priv)
+{
+ struct net_device *dev = priv->dev;
+ struct vnt_manager *mgmt = &priv->vnt_mgmt;
struct netdev_hw_addr *ha;
- u32 mc_filter[2];
- int ii;
- u8 pbyData[8] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- u8 byTmpMode = 0;
+ u64 mc_filter = 0;
+ u8 tmp = 0;
int rc;
- spin_lock_irq(&pDevice->lock);
- rc = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- MAC_REG_RCR,
- MESSAGE_REQUEST_MACREG,
- 1,
- &byTmpMode
- );
- if (rc == 0) pDevice->byRxMode = byTmpMode;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode in= %x\n", pDevice->byRxMode);
-
- if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
- DBG_PRT(MSG_LEVEL_ERR,KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
- /* unconditionally log net taps */
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
- }
- else if ((netdev_mc_count(dev) > pDevice->multicast_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- MAC_REG_MAR0,
- MESSAGE_REQUEST_MACREG,
- 8,
- pbyData
- );
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
- }
- else {
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
- }
- for (ii = 0; ii < 4; ii++) {
- MACvWriteMultiAddr(pDevice, ii, *((u8 *)&mc_filter[0] + ii));
- MACvWriteMultiAddr(pDevice, ii+ 4, *((u8 *)&mc_filter[1] + ii));
- }
- pDevice->byRxMode &= ~(RCR_UNICAST);
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
- }
+ rc = CONTROLnsRequestIn(priv, MESSAGE_TYPE_READ,
+ MAC_REG_RCR, MESSAGE_REQUEST_MACREG, 1, &tmp);
+ if (rc == 0)
+ priv->byRxMode = tmp;
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "priv->byRxMode in= %x\n",
+ priv->byRxMode);
+
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ DBG_PRT(MSG_LEVEL_ERR, KERN_NOTICE
+ "%s: Promiscuous mode enabled.\n", dev->name);
+ /* unconditionally log net taps */
+ priv->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
+ } else if ((netdev_mc_count(dev) > priv->multicast_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ mc_filter = ~0x0;
+ MACvWriteMultiAddr(priv, mc_filter);
+
+ priv->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
+ } else {
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- /*
- * If AP mode, don't enable RCR_UNICAST since HW only compares
- * addr1 with local MAC
- */
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
- pDevice->byRxMode &= ~(RCR_UNICAST);
- }
- ControlvWriteByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_RCR, pDevice->byRxMode);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode out= %x\n", pDevice->byRxMode);
- spin_unlock_irq(&pDevice->lock);
+ mc_filter |= 1ULL << (bit_nr & 0x3f);
+ }
+
+ MACvWriteMultiAddr(priv, mc_filter);
+
+ priv->byRxMode &= ~(RCR_UNICAST);
+ priv->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
+ }
+
+ if (mgmt->eConfigMode == WMAC_CONFIG_AP) {
+ /*
+ * If AP mode, don't enable RCR_UNICAST since HW only compares
+ * addr1 with local MAC
+ */
+ priv->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
+ priv->byRxMode &= ~(RCR_UNICAST);
+ }
+
+ ControlvWriteByte(priv, MESSAGE_REQUEST_MACREG,
+ MAC_REG_RCR, priv->byRxMode);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "priv->byRxMode out= %x\n", priv->byRxMode);
}
static struct net_device_stats *device_get_stats(struct net_device *dev)
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index e7d5487d1041..43da58927cd2 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -67,22 +67,23 @@ void PSvEnablePowerSaving(struct vnt_private *pDevice, u16 wListenInterval)
/* set period of power up before TBTT */
MACvWriteWord(pDevice, MAC_REG_PWBT, C_PWBT);
- if (pDevice->eOPMode != OP_MODE_ADHOC) {
+ if (pDevice->op_mode != NL80211_IFTYPE_ADHOC) {
/* set AID */
MACvWriteWord(pDevice, MAC_REG_AIDATIM, wAID);
- } else {
- /* set ATIM Window */
- /* MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow); */
}
- /* Warren:06-18-2004,the sequence must follow PSEN->AUTOSLEEP->GO2DOZE */
+ /* Warren:06-18-2004,the sequence must follow
+ * PSEN->AUTOSLEEP->GO2DOZE
+ */
/* enable power saving hw function */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_PSEN);
/* Set AutoSleep */
MACvRegBitsOn(pDevice, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
- /* Warren:MUST turn on this once before turn on AUTOSLEEP ,or the AUTOSLEEP doesn't work */
+ /* Warren:MUST turn on this once before turn on AUTOSLEEP ,or the
+ * AUTOSLEEP doesn't work
+ */
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_GO2DOZE);
if (wListenInterval >= 2) {
@@ -105,8 +106,10 @@ void PSvEnablePowerSaving(struct vnt_private *pDevice, u16 wListenInterval)
pDevice->bEnablePSMode = true;
- /* We don't send null pkt in ad hoc mode since beacon will handle this. */
- if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)
+ /* We don't send null pkt in ad hoc mode
+ * since beacon will handle this.
+ */
+ if (pDevice->op_mode == NL80211_IFTYPE_STATION)
PSbSendNullPacket(pDevice);
pDevice->bPWBitOn = true;
@@ -137,7 +140,7 @@ void PSvDisablePowerSaving(struct vnt_private *pDevice)
MACvRegBitsOn(pDevice, MAC_REG_PSCTL, PSCTL_ALBCN);
pDevice->bEnablePSMode = false;
- if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)
+ if (pDevice->op_mode == NL80211_IFTYPE_STATION)
PSbSendNullPacket(pDevice);
pDevice->bPWBitOn = false;
@@ -226,15 +229,19 @@ void PSvSendPSPOLL(struct vnt_private *pDevice)
WLAN_SET_FC_PWRMGT(0)
));
- pTxPacket->p80211Header->sA2.wDurationID = pMgmt->wCurrAID | BIT14 | BIT15;
- memcpy(pTxPacket->p80211Header->sA2.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA2.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
+ pTxPacket->p80211Header->sA2.wDurationID =
+ pMgmt->wCurrAID | BIT14 | BIT15;
+ memcpy(pTxPacket->p80211Header->sA2.abyAddr1, pMgmt->abyCurrBSSID,
+ WLAN_ADDR_LEN);
+ memcpy(pTxPacket->p80211Header->sA2.abyAddr2, pMgmt->abyMACAddr,
+ WLAN_ADDR_LEN);
pTxPacket->cbMPDULen = WLAN_HDR_ADDR2_LEN;
pTxPacket->cbPayloadLen = 0;
/* log failure if sending failed */
if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send PS-Poll packet failed..\n");
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Send PS-Poll packet failed..\n");
}
/*
@@ -276,16 +283,21 @@ int PSbSendNullPacket(struct vnt_private *pDevice)
pTxPacket->p80211Header->sA3.wFrameCtl = cpu_to_le16(flags);
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA)
- pTxPacket->p80211Header->sA3.wFrameCtl |= cpu_to_le16((u16)WLAN_SET_FC_TODS(1));
-
- memcpy(pTxPacket->p80211Header->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
+ pTxPacket->p80211Header->sA3.wFrameCtl |=
+ cpu_to_le16((u16)WLAN_SET_FC_TODS(1));
+
+ memcpy(pTxPacket->p80211Header->sA3.abyAddr1, pMgmt->abyCurrBSSID,
+ WLAN_ADDR_LEN);
+ memcpy(pTxPacket->p80211Header->sA3.abyAddr2, pMgmt->abyMACAddr,
+ WLAN_ADDR_LEN);
+ memcpy(pTxPacket->p80211Header->sA3.abyAddr3, pMgmt->abyCurrBSSID,
+ WLAN_BSSID_LEN);
pTxPacket->cbMPDULen = WLAN_HDR_ADDR3_LEN;
pTxPacket->cbPayloadLen = 0;
/* log error if sending failed */
if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Send Null Packet failed !\n");
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Send Null Packet failed !\n");
return false;
}
return true;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 51fff896fcb5..3840323858fc 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -118,7 +118,7 @@ static void s_vSWencryption(struct vnt_private *pDevice,
static unsigned int s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
u32 cbFrameLength, u16 wRate, int bNeedAck);
-static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
+static __le16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
u8 rsv_type, u8 pkt_type, u32 frame_lenght, u16 current_rate);
static u16 s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
@@ -129,10 +129,10 @@ static u16 s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
union vnt_tx_data_head *head, u32 cbFrameLength, int bNeedAck,
struct ethhdr *psEthHeader, u16 wCurrentRate, u8 byFBOption);
-static u16 s_uGetDataDuration(struct vnt_private *pDevice,
+static __le16 s_uGetDataDuration(struct vnt_private *pDevice,
u8 byPktType, int bNeedAck);
-static u16 s_uGetRTSCTSDuration(struct vnt_private *pDevice,
+static __le16 s_uGetRTSCTSDuration(struct vnt_private *pDevice,
u8 byDurType, u32 cbFrameLength, u8 byPktType, u16 wRate,
int bNeedAck, u8 byFBOption);
@@ -327,7 +327,7 @@ static void s_vSWencryption(struct vnt_private *pDevice,
}
}
-static u16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
+static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
{
return cpu_to_le16(wTimeStampOff[priv->byPreambleType % 2]
[rate % MAX_RATE]);
@@ -359,7 +359,7 @@ static u32 s_uGetTxRsvTime(struct vnt_private *priv, u8 pkt_type,
return data_time;
}
-static u16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
+static __le16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
u32 frame_length, u16 rate, int need_ack)
{
return cpu_to_le16((u16)s_uGetTxRsvTime(priv, pkt_type,
@@ -367,7 +367,7 @@ static u16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
+static __le16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
u8 rsv_type, u8 pkt_type, u32 frame_lenght, u16 current_rate)
{
u32 rrv_time, rts_time, cts_time, ack_time, data_time;
@@ -402,7 +402,7 @@ static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
rrv_time = cts_time + ack_time + data_time + 2 * priv->uSIFS;
- return rrv_time;
+ return cpu_to_le16((u16)rrv_time);
}
rrv_time = rts_time + cts_time + ack_time + data_time + 3 * priv->uSIFS;
@@ -411,7 +411,7 @@ static u16 s_uGetRTSCTSRsvTime(struct vnt_private *priv,
}
//byFreqType 0: 5GHz, 1:2.4Ghz
-static u16 s_uGetDataDuration(struct vnt_private *pDevice,
+static __le16 s_uGetDataDuration(struct vnt_private *pDevice,
u8 byPktType, int bNeedAck)
{
u32 uAckTime = 0;
@@ -430,7 +430,7 @@ static u16 s_uGetDataDuration(struct vnt_private *pDevice,
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static u16 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
+static __le16 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
u32 cbFrameLength, u8 byPktType, u16 wRate, int bNeedAck,
u8 byFBOption)
{
@@ -481,14 +481,14 @@ static u16 vnt_rxtx_datahead_g(struct vnt_private *priv, u8 pkt_type, u16 rate,
PK_TYPE_11B, &buf->b);
/* Get Duration and TimeStamp */
- buf->wDuration_a = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wDuration_b = s_uGetDataDuration(priv, PK_TYPE_11B, need_ack);
+ buf->duration_a = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_b = s_uGetDataDuration(priv, PK_TYPE_11B, need_ack);
- buf->wTimeStampOff_a = vnt_time_stamp_off(priv, rate);
- buf->wTimeStampOff_b = vnt_time_stamp_off(priv,
+ buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate);
+ buf->time_stamp_off_b = vnt_time_stamp_off(priv,
priv->byTopCCKBasicRate);
- return buf->wDuration_a;
+ return le16_to_cpu(buf->duration_a);
}
static u16 vnt_rxtx_datahead_g_fb(struct vnt_private *priv, u8 pkt_type,
@@ -502,17 +502,17 @@ static u16 vnt_rxtx_datahead_g_fb(struct vnt_private *priv, u8 pkt_type,
PK_TYPE_11B, &buf->b);
/* Get Duration and TimeStamp */
- buf->wDuration_a = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wDuration_b = s_uGetDataDuration(priv, PK_TYPE_11B, need_ack);
+ buf->duration_a = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_b = s_uGetDataDuration(priv, PK_TYPE_11B, need_ack);
- buf->wDuration_a_f0 = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wDuration_a_f1 = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_a_f0 = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_a_f1 = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wTimeStampOff_a = vnt_time_stamp_off(priv, rate);
- buf->wTimeStampOff_b = vnt_time_stamp_off(priv,
+ buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate);
+ buf->time_stamp_off_b = vnt_time_stamp_off(priv,
priv->byTopCCKBasicRate);
- return buf->wDuration_a;
+ return le16_to_cpu(buf->duration_a);
}
static u16 vnt_rxtx_datahead_a_fb(struct vnt_private *priv, u8 pkt_type,
@@ -522,14 +522,14 @@ static u16 vnt_rxtx_datahead_a_fb(struct vnt_private *priv, u8 pkt_type,
/* Get SignalField,ServiceField,Length */
BBvCalculateParameter(priv, frame_len, rate, pkt_type, &buf->a);
/* Get Duration and TimeStampOff */
- buf->wDuration = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wDuration_f0 = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wDuration_f1 = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_f0 = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration_f1 = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wTimeStampOff = vnt_time_stamp_off(priv, rate);
+ buf->time_stamp_off = vnt_time_stamp_off(priv, rate);
- return buf->wDuration;
+ return le16_to_cpu(buf->duration);
}
static u16 vnt_rxtx_datahead_ab(struct vnt_private *priv, u8 pkt_type,
@@ -539,26 +539,27 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_private *priv, u8 pkt_type,
/* Get SignalField,ServiceField,Length */
BBvCalculateParameter(priv, frame_len, rate, pkt_type, &buf->ab);
/* Get Duration and TimeStampOff */
- buf->wDuration = s_uGetDataDuration(priv, pkt_type, need_ack);
+ buf->duration = s_uGetDataDuration(priv, pkt_type, need_ack);
- buf->wTimeStampOff = vnt_time_stamp_off(priv, rate);
+ buf->time_stamp_off = vnt_time_stamp_off(priv, rate);
- return buf->wDuration;
+ return le16_to_cpu(buf->duration);
}
static int vnt_fill_ieee80211_rts(struct vnt_private *priv,
struct ieee80211_rts *rts, struct ethhdr *eth_hdr,
- u16 duration)
+ __le16 duration)
{
rts->duration = duration;
rts->frame_control = TYPE_CTL_RTS;
- if (priv->eOPMode == OP_MODE_ADHOC || priv->eOPMode == OP_MODE_AP)
+ if (priv->op_mode == NL80211_IFTYPE_ADHOC ||
+ priv->op_mode == NL80211_IFTYPE_AP)
memcpy(rts->ra, eth_hdr->h_dest, ETH_ALEN);
else
memcpy(rts->ra, priv->abyBSSID, ETH_ALEN);
- if (priv->eOPMode == OP_MODE_AP)
+ if (priv->op_mode == NL80211_IFTYPE_AP)
memcpy(rts->ta, priv->abyBSSID, ETH_ALEN);
else
memcpy(rts->ta, eth_hdr->h_source, ETH_ALEN);
@@ -578,14 +579,14 @@ static u16 vnt_rxtx_rts_g_head(struct vnt_private *priv,
BBvCalculateParameter(priv, rts_frame_len,
priv->byTopOFDMBasicRate, pkt_type, &buf->a);
- buf->wDuration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
+ buf->duration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
PK_TYPE_11B, priv->byTopCCKBasicRate, need_ack, fb_option);
- buf->wDuration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ buf->duration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- buf->wDuration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
+ buf->duration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration_aa);
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->duration_aa);
return vnt_rxtx_datahead_g(priv, pkt_type, current_rate,
&buf->data_head, frame_len, need_ack);
@@ -604,24 +605,24 @@ static u16 vnt_rxtx_rts_g_fb_head(struct vnt_private *priv,
priv->byTopOFDMBasicRate, pkt_type, &buf->a);
- buf->wDuration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
+ buf->duration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
PK_TYPE_11B, priv->byTopCCKBasicRate, need_ack, fb_option);
- buf->wDuration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ buf->duration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- buf->wDuration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
+ buf->duration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- buf->wRTSDuration_ba_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F0,
+ buf->rts_duration_ba_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F0,
frame_len, pkt_type, priv->tx_rate_fb0, need_ack, fb_option);
- buf->wRTSDuration_aa_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
+ buf->rts_duration_aa_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
frame_len, pkt_type, priv->tx_rate_fb0, need_ack, fb_option);
- buf->wRTSDuration_ba_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F1,
+ buf->rts_duration_ba_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F1,
frame_len, pkt_type, priv->tx_rate_fb1, need_ack, fb_option);
- buf->wRTSDuration_aa_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
+ buf->rts_duration_aa_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
frame_len, pkt_type, priv->tx_rate_fb1, need_ack, fb_option);
- vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration_aa);
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->duration_aa);
return vnt_rxtx_datahead_g_fb(priv, pkt_type, current_rate,
&buf->data_head, frame_len, need_ack);
@@ -637,10 +638,10 @@ static u16 vnt_rxtx_rts_ab_head(struct vnt_private *priv,
BBvCalculateParameter(priv, rts_frame_len,
priv->byTopOFDMBasicRate, pkt_type, &buf->ab);
- buf->wDuration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ buf->duration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration);
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->duration);
return vnt_rxtx_datahead_ab(priv, pkt_type, current_rate,
&buf->data_head, frame_len, need_ack);
@@ -656,16 +657,16 @@ static u16 vnt_rxtx_rts_a_fb_head(struct vnt_private *priv,
BBvCalculateParameter(priv, rts_frame_len,
priv->byTopOFDMBasicRate, pkt_type, &buf->a);
- buf->wDuration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ buf->duration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
pkt_type, current_rate, need_ack, fb_option);
- buf->wRTSDuration_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
+ buf->rts_duration_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
frame_len, pkt_type, priv->tx_rate_fb0, need_ack, fb_option);
- buf->wRTSDuration_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
+ buf->rts_duration_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
frame_len, pkt_type, priv->tx_rate_fb1, need_ack, fb_option);
- vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration);
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->duration);
return vnt_rxtx_datahead_a_fb(priv, pkt_type, current_rate,
&buf->data_head, frame_len, need_ack);
@@ -727,19 +728,19 @@ static u16 s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
/* Get SignalField,ServiceField,Length */
BBvCalculateParameter(pDevice, uCTSFrameLen,
pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
- pBuf->wDuration_ba = s_uGetRTSCTSDuration(pDevice, CTSDUR_BA,
+ pBuf->duration_ba = s_uGetRTSCTSDuration(pDevice, CTSDUR_BA,
cbFrameLength, byPktType,
wCurrentRate, bNeedAck, byFBOption);
/* Get CTSDuration_ba_f0 */
- pBuf->wCTSDuration_ba_f0 = s_uGetRTSCTSDuration(pDevice,
+ pBuf->cts_duration_ba_f0 = s_uGetRTSCTSDuration(pDevice,
CTSDUR_BA_F0, cbFrameLength, byPktType,
pDevice->tx_rate_fb0, bNeedAck, byFBOption);
/* Get CTSDuration_ba_f1 */
- pBuf->wCTSDuration_ba_f1 = s_uGetRTSCTSDuration(pDevice,
+ pBuf->cts_duration_ba_f1 = s_uGetRTSCTSDuration(pDevice,
CTSDUR_BA_F1, cbFrameLength, byPktType,
pDevice->tx_rate_fb1, bNeedAck, byFBOption);
/* Get CTS Frame body */
- pBuf->data.duration = pBuf->wDuration_ba;
+ pBuf->data.duration = pBuf->duration_ba;
pBuf->data.frame_control = TYPE_CTL_CTS;
memcpy(pBuf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
@@ -751,11 +752,11 @@ static u16 s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
BBvCalculateParameter(pDevice, uCTSFrameLen,
pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
/* Get CTSDuration_ba */
- pBuf->wDuration_ba = s_uGetRTSCTSDuration(pDevice,
+ pBuf->duration_ba = s_uGetRTSCTSDuration(pDevice,
CTSDUR_BA, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, byFBOption);
/*Get CTS Frame body*/
- pBuf->data.duration = pBuf->wDuration_ba;
+ pBuf->data.duration = pBuf->duration_ba;
pBuf->data.frame_control = TYPE_CTL_CTS;
memcpy(pBuf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
@@ -815,16 +816,16 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_rts *pBuf =
&tx_buffer->tx_head.tx_rts.rts;
- pBuf->wRTSTxRrvTime_aa = s_uGetRTSCTSRsvTime(pDevice, 2,
+ pBuf->rts_rrv_time_aa = s_uGetRTSCTSRsvTime(pDevice, 2,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wRTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 1,
+ pBuf->rts_rrv_time_ba = s_uGetRTSCTSRsvTime(pDevice, 1,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wRTSTxRrvTime_bb = s_uGetRTSCTSRsvTime(pDevice, 0,
+ pBuf->rts_rrv_time_bb = s_uGetRTSCTSRsvTime(pDevice, 0,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time_a = vnt_rxtx_rsvtime_le16(pDevice,
byPktType, cbFrameSize, wCurrentRate, bNeedACK);
- pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time_b = vnt_rxtx_rsvtime_le16(pDevice,
PK_TYPE_11B, cbFrameSize,
pDevice->byTopCCKBasicRate, bNeedACK);
@@ -845,13 +846,13 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_cts *pBuf = &tx_buffer->
tx_head.tx_cts.cts;
- pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time_a = vnt_rxtx_rsvtime_le16(pDevice,
byPktType, cbFrameSize, wCurrentRate, bNeedACK);
- pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time_b = vnt_rxtx_rsvtime_le16(pDevice,
PK_TYPE_11B, cbFrameSize,
pDevice->byTopCCKBasicRate, bNeedACK);
- pBuf->wCTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 3,
+ pBuf->cts_rrv_time_ba = s_uGetRTSCTSRsvTime(pDevice, 3,
byPktType, cbFrameSize, wCurrentRate);
if (need_mic) {
@@ -879,10 +880,10 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_ab *pBuf = &tx_buffer->
tx_head.tx_ab.ab;
- pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 2,
+ pBuf->rts_rrv_time = s_uGetRTSCTSRsvTime(pDevice, 2,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice,
byPktType, cbFrameSize, wCurrentRate, bNeedACK);
/* Fill RTS */
@@ -893,7 +894,7 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_ab *pBuf = &tx_buffer->
tx_head.tx_ab.ab;
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice,
PK_TYPE_11A, cbFrameSize,
wCurrentRate, bNeedACK);
@@ -913,10 +914,10 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_ab *pBuf = &tx_buffer->
tx_head.tx_ab.ab;
- pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 0,
+ pBuf->rts_rrv_time = s_uGetRTSCTSRsvTime(pDevice, 0,
byPktType, cbFrameSize, wCurrentRate);
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice,
PK_TYPE_11B, cbFrameSize, wCurrentRate,
bNeedACK);
@@ -928,7 +929,7 @@ static u16 s_vGenerateTxParameter(struct vnt_private *pDevice,
struct vnt_rrv_time_ab *pBuf = &tx_buffer->
tx_head.tx_ab.ab;
- pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice,
+ pBuf->rrv_time = vnt_rxtx_rsvtime_le16(pDevice,
PK_TYPE_11B, cbFrameSize,
wCurrentRate, bNeedACK);
@@ -991,8 +992,8 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
//Set packet type
pTxBufHead->wFIFOCtl |= (u16)(byPktType<<8);
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
+ if (pDevice->op_mode == NL80211_IFTYPE_ADHOC ||
+ pDevice->op_mode == NL80211_IFTYPE_AP) {
if (is_multicast_ether_addr(psEthHeader->h_dest)) {
bNeedACK = false;
pTxBufHead->wFIFOCtl =
@@ -1292,7 +1293,7 @@ static void s_vGenerateMACHeader(struct vnt_private *pDevice,
pMACHeader->frame_control = TYPE_802_11_DATA;
- if (pDevice->eOPMode == OP_MODE_AP) {
+ if (pDevice->op_mode == NL80211_IFTYPE_AP) {
memcpy(&(pMACHeader->addr1[0]),
&(psEthHeader->h_dest[0]),
ETH_ALEN);
@@ -1302,7 +1303,7 @@ static void s_vGenerateMACHeader(struct vnt_private *pDevice,
ETH_ALEN);
pMACHeader->frame_control |= FC_FROMDS;
} else {
- if (pDevice->eOPMode == OP_MODE_ADHOC) {
+ if (pDevice->op_mode == NL80211_IFTYPE_ADHOC) {
memcpy(&(pMACHeader->addr1[0]),
&(psEthHeader->h_dest[0]),
ETH_ALEN);
@@ -1541,8 +1542,8 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
pbyIVHead = (u8 *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding);
pbyPayloadHead = (u8 *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding + cbIVlen);
do {
- if ((pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
- (pDevice->bLinkPass == true)) {
+ if (pDevice->op_mode == NL80211_IFTYPE_STATION &&
+ pDevice->bLinkPass == true) {
pbyBSSID = pDevice->abyBSSID;
// get pairwise key
if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
@@ -1560,7 +1561,7 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
pbyBSSID = pDevice->abyBroadcastAddr;
if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KEY is NULL. OP Mode[%d]\n", pDevice->eOPMode);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KEY is NULL. OP Mode[%d]\n", pDevice->op_mode);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get GTK.\n");
}
@@ -1592,14 +1593,14 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
struct vnt_tx_datahead_g *data_head = &pTX_Buffer->tx_head.
tx_cts.tx.head.cts_g.data_head;
- data_head->wDuration_a =
+ data_head->duration_a =
cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- data_head->wDuration_b =
+ data_head->duration_b =
cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
} else {
struct vnt_tx_datahead_ab *data_head = &pTX_Buffer->tx_head.
tx_ab.tx.head.data_head_ab;
- data_head->wDuration =
+ data_head->duration =
cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
}
}
@@ -1609,7 +1610,7 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
pTX_Buffer->byType = 0x00;
pContext->pPacket = NULL;
- pContext->Type = CONTEXT_MGMT_PACKET;
+ pContext->type = CONTEXT_MGMT_PACKET;
pContext->uBufLen = (u16)cbReqCount + 4; //USB header
if (WLAN_GET_FC_TODS(pMACHeader->frame_control) == 0) {
@@ -1701,7 +1702,7 @@ CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice,
pTX_Buffer->byType = 0x01;
pContext->pPacket = NULL;
- pContext->Type = CONTEXT_MGMT_PACKET;
+ pContext->type = CONTEXT_MGMT_PACKET;
pContext->uBufLen = (u16)cbReqCount + 4; //USB header
PIPEnsSendBulkOut(pDevice,pContext);
@@ -2032,14 +2033,14 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
struct vnt_tx_datahead_g *data_head = &pTX_Buffer->tx_head.
tx_cts.tx.head.cts_g.data_head;
- data_head->wDuration_a =
+ data_head->duration_a =
cpu_to_le16(p80211Header->sA2.wDurationID);
- data_head->wDuration_b =
+ data_head->duration_b =
cpu_to_le16(p80211Header->sA2.wDurationID);
} else {
struct vnt_tx_datahead_ab *data_head = &pTX_Buffer->tx_head.
tx_ab.tx.head.data_head_ab;
- data_head->wDuration =
+ data_head->duration =
cpu_to_le16(p80211Header->sA2.wDurationID);
}
}
@@ -2049,7 +2050,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
pTX_Buffer->byType = 0x00;
pContext->pPacket = skb;
- pContext->Type = CONTEXT_MGMT_PACKET;
+ pContext->type = CONTEXT_MGMT_PACKET;
pContext->uBufLen = (u16)cbReqCount + 4; //USB header
if (WLAN_GET_FC_TODS(pMACHeader->frame_control) == 0) {
@@ -2305,7 +2306,7 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
else {
- if (pDevice->eOPMode == OP_MODE_ADHOC) {
+ if (pDevice->op_mode == NL80211_IFTYPE_ADHOC) {
// Adhoc Tx rate decided from node DB
if (is_multicast_ether_addr(pDevice->sTxEthHeader.h_dest)) {
// Multicast use highest data rate
@@ -2336,7 +2337,7 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
}
- if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) {
+ if (pDevice->op_mode == NL80211_IFTYPE_STATION) {
// Infra STA rate decided from AP Node, index = 0
pDevice->wCurrentRate = pMgmt->sNodeDBTable[0].wTxDataRate;
}
@@ -2439,11 +2440,11 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
pTX_Buffer->wTxByteCount = (u16)BytesToWrite;
pContext->pPacket = skb;
- pContext->Type = CONTEXT_DATA_PACKET;
+ pContext->type = CONTEXT_DATA_PACKET;
pContext->uBufLen = (u16)BytesToWrite + 4 ; //USB header
s_vSaveTxPktInfo(pDevice, (u8)(pTX_Buffer->byPKTNO & 0x0F),
- &pContext->sEthHeader.h_dest[0],
+ &pDevice->sTxEthHeader.h_dest[0],
(u16)(BytesToWrite-uHeaderLen),
pTX_Buffer->fifo_head.wFIFOCtl);
@@ -2593,11 +2594,11 @@ int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
pTX_Buffer->wTxByteCount = (u16)BytesToWrite;
pContext->pPacket = NULL;
- pContext->Type = CONTEXT_DATA_PACKET;
+ pContext->type = CONTEXT_DATA_PACKET;
pContext->uBufLen = (u16)BytesToWrite + 4 ; //USB header
s_vSaveTxPktInfo(pDevice, (u8)(pTX_Buffer->byPKTNO & 0x0F),
- &pContext->sEthHeader.h_dest[0],
+ &pDevice->sTxEthHeader.h_dest[0],
(u16)(BytesToWrite - uHeaderLen),
pTX_Buffer->fifo_head.wFIFOCtl);
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index b3ee6d01aa88..6d6539d29d04 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -53,68 +53,68 @@ struct vnt_mic_hdr {
/* RsvTime buffer header */
struct vnt_rrv_time_rts {
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
+ __le16 rts_rrv_time_ba;
+ __le16 rts_rrv_time_aa;
+ __le16 rts_rrv_time_bb;
u16 wReserved;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
+ __le16 rrv_time_b;
+ __le16 rrv_time_a;
} __packed;
struct vnt_rrv_time_cts {
- u16 wCTSTxRrvTime_ba;
+ __le16 cts_rrv_time_ba;
u16 wReserved;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
+ __le16 rrv_time_b;
+ __le16 rrv_time_a;
} __packed;
struct vnt_rrv_time_ab {
- u16 wRTSTxRrvTime;
- u16 wTxRrvTime;
+ __le16 rts_rrv_time;
+ __le16 rrv_time;
} __packed;
/* TX data header */
struct vnt_tx_datahead_g {
struct vnt_phy_field b;
struct vnt_phy_field a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
+ __le16 duration_b;
+ __le16 duration_a;
+ __le16 time_stamp_off_b;
+ __le16 time_stamp_off_a;
} __packed;
struct vnt_tx_datahead_g_fb {
struct vnt_phy_field b;
struct vnt_phy_field a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
+ __le16 duration_b;
+ __le16 duration_a;
+ __le16 duration_a_f0;
+ __le16 duration_a_f1;
+ __le16 time_stamp_off_b;
+ __le16 time_stamp_off_a;
} __packed;
struct vnt_tx_datahead_ab {
struct vnt_phy_field ab;
- u16 wDuration;
- u16 wTimeStampOff;
+ __le16 duration;
+ __le16 time_stamp_off;
} __packed;
struct vnt_tx_datahead_a_fb {
struct vnt_phy_field a;
- u16 wDuration;
- u16 wTimeStampOff;
- u16 wDuration_f0;
- u16 wDuration_f1;
+ __le16 duration;
+ __le16 time_stamp_off;
+ __le16 duration_f0;
+ __le16 duration_f1;
} __packed;
/* RTS buffer header */
struct vnt_rts_g {
struct vnt_phy_field b;
struct vnt_phy_field a;
- u16 wDuration_ba;
- u16 wDuration_aa;
- u16 wDuration_bb;
+ __le16 duration_ba;
+ __le16 duration_aa;
+ __le16 duration_bb;
u16 wReserved;
struct ieee80211_rts data;
struct vnt_tx_datahead_g data_head;
@@ -123,21 +123,21 @@ struct vnt_rts_g {
struct vnt_rts_g_fb {
struct vnt_phy_field b;
struct vnt_phy_field a;
- u16 wDuration_ba;
- u16 wDuration_aa;
- u16 wDuration_bb;
+ __le16 duration_ba;
+ __le16 duration_aa;
+ __le16 duration_bb;
u16 wReserved;
- u16 wRTSDuration_ba_f0;
- u16 wRTSDuration_aa_f0;
- u16 wRTSDuration_ba_f1;
- u16 wRTSDuration_aa_f1;
+ __le16 rts_duration_ba_f0;
+ __le16 rts_duration_aa_f0;
+ __le16 rts_duration_ba_f1;
+ __le16 rts_duration_aa_f1;
struct ieee80211_rts data;
struct vnt_tx_datahead_g_fb data_head;
} __packed;
struct vnt_rts_ab {
struct vnt_phy_field ab;
- u16 wDuration;
+ __le16 duration;
u16 wReserved;
struct ieee80211_rts data;
struct vnt_tx_datahead_ab data_head;
@@ -145,10 +145,10 @@ struct vnt_rts_ab {
struct vnt_rts_a_fb {
struct vnt_phy_field a;
- u16 wDuration;
+ __le16 duration;
u16 wReserved;
- u16 wRTSDuration_f0;
- u16 wRTSDuration_f1;
+ __le16 rts_duration_f0;
+ __le16 rts_duration_f1;
struct ieee80211_rts data;
struct vnt_tx_datahead_a_fb data_head;
} __packed;
@@ -156,7 +156,7 @@ struct vnt_rts_a_fb {
/* CTS buffer header */
struct vnt_cts {
struct vnt_phy_field b;
- u16 wDuration_ba;
+ __le16 duration_ba;
u16 wReserved;
struct ieee80211_cts data;
u16 reserved2;
@@ -165,10 +165,10 @@ struct vnt_cts {
struct vnt_cts_fb {
struct vnt_phy_field b;
- u16 wDuration_ba;
+ __le16 duration_ba;
u16 wReserved;
- u16 wCTSDuration_ba_f0;
- u16 wCTSDuration_ba_f1;
+ __le16 cts_duration_ba_f0;
+ __le16 cts_duration_ba_f1;
struct ieee80211_cts data;
u16 reserved2;
struct vnt_tx_datahead_g_fb data_head;
@@ -234,8 +234,8 @@ struct vnt_tx_short_buf_head {
u16 fifo_ctl;
u16 time_stamp;
struct vnt_phy_field ab;
- u16 duration;
- u16 time_stamp_off;
+ __le16 duration;
+ __le16 time_stamp_off;
} __packed;
struct vnt_beacon_buffer {
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 01cf09999b6d..c5838d99f89f 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -105,6 +105,8 @@ int PIPEnsControlOutAsyn(struct vnt_private *pDevice, u8 byRequest,
int PIPEnsControlOut(struct vnt_private *pDevice, u8 byRequest, u16 wValue,
u16 wIndex, u16 wLength, u8 *pbyBuffer)
+ __releases(&pDevice->lock)
+ __acquires(&pDevice->lock)
{
int ntStatus = 0;
int ii;
@@ -167,6 +169,8 @@ int PIPEnsControlOut(struct vnt_private *pDevice, u8 byRequest, u16 wValue,
int PIPEnsControlIn(struct vnt_private *pDevice, u8 byRequest, u16 wValue,
u16 wIndex, u16 wLength, u8 *pbyBuffer)
+ __releases(&pDevice->lock)
+ __acquires(&pDevice->lock)
{
int ntStatus = 0;
int ii;
@@ -295,40 +299,38 @@ static void s_nsControlInUsbIoCompleteRead(struct urb *urb)
*
*/
-int PIPEnsInterruptRead(struct vnt_private *pDevice)
+int PIPEnsInterruptRead(struct vnt_private *priv)
{
- int ntStatus = STATUS_FAILURE;
+ int status = STATUS_FAILURE;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartInterruptUsbRead()\n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "---->s_nsStartInterruptUsbRead()\n");
- if(pDevice->intBuf.bInUse == true){
- return (STATUS_FAILURE);
- }
- pDevice->intBuf.bInUse = true;
-// pDevice->bEventAvailable = false;
- pDevice->ulIntInPosted++;
-
- //
- // Now that we have created the urb, we will send a
- // request to the USB device object.
- //
- pDevice->pInterruptURB->interval = pDevice->int_interval;
-
-usb_fill_bulk_urb(pDevice->pInterruptURB,
- pDevice->usb,
- usb_rcvbulkpipe(pDevice->usb, 1),
- (void *) pDevice->intBuf.pDataBuf,
+ if (priv->int_buf.in_use == true)
+ return STATUS_FAILURE;
+
+ priv->int_buf.in_use = true;
+
+ usb_fill_int_urb(priv->pInterruptURB,
+ priv->usb,
+ usb_rcvintpipe(priv->usb, 1),
+ priv->int_buf.data_buf,
MAX_INTERRUPT_SIZE,
s_nsInterruptUsbIoCompleteRead,
- pDevice);
+ priv,
+ priv->int_interval);
- ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC);
- if (ntStatus != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus);
- }
+ status = usb_submit_urb(priv->pInterruptURB, GFP_ATOMIC);
+ if (status) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Submit int URB failed %d\n", status);
+ priv->int_buf.in_use = false;
+ }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----s_nsStartInterruptUsbRead Return(%x)\n",ntStatus);
- return ntStatus;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "<----s_nsStartInterruptUsbRead Return(%x)\n", status);
+
+ return status;
}
/*
@@ -348,67 +350,48 @@ usb_fill_bulk_urb(pDevice->pInterruptURB,
static void s_nsInterruptUsbIoCompleteRead(struct urb *urb)
{
- struct vnt_private *pDevice = (struct vnt_private *)urb->context;
- int ntStatus;
+ struct vnt_private *priv = urb->context;
+ int status;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptUsbIoCompleteRead\n");
- //
- // The context given to IoSetCompletionRoutine is the receive buffer object
- //
-
- //
- // We have a number of cases:
- // 1) The USB read timed out and we received no data.
- // 2) The USB read timed out and we received some data.
- // 3) The USB read was successful and fully filled our irp buffer.
- // 4) The irp was cancelled.
- // 5) Some other failure from the USB device object.
- //
- ntStatus = urb->status;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsInterruptUsbIoCompleteRead Status %d\n", ntStatus);
-
- // if we were not successful, we need to free the int buffer for future use right here
- // otherwise interrupt data handler will free int buffer after it handle it.
- if (( ntStatus != STATUS_SUCCESS )) {
- pDevice->ulBulkInError++;
- pDevice->intBuf.bInUse = false;
-
-// if (ntStatus == USBD_STATUS_CRC) {
-// pDevice->ulIntInContCRCError++;
-// }
-
-// if (ntStatus == STATUS_NOT_CONNECTED )
-// {
- pDevice->fKillEventPollingThread = true;
-// }
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"IntUSBIoCompleteControl STATUS = %d\n", ntStatus );
- } else {
- pDevice->ulIntInBytesRead += (unsigned long) urb->actual_length;
- pDevice->ulIntInContCRCError = 0;
- pDevice->bEventAvailable = true;
- INTnsProcessData(pDevice);
- }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "---->s_nsInterruptUsbIoCompleteRead\n");
+
+ switch (urb->status) {
+ case 0:
+ case -ETIMEDOUT:
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ priv->int_buf.in_use = false;
+ return;
+ default:
+ break;
+ }
- if (pDevice->fKillEventPollingThread != true) {
- usb_fill_bulk_urb(pDevice->pInterruptURB,
- pDevice->usb,
- usb_rcvbulkpipe(pDevice->usb, 1),
- (void *) pDevice->intBuf.pDataBuf,
- MAX_INTERRUPT_SIZE,
- s_nsInterruptUsbIoCompleteRead,
- pDevice);
+ status = urb->status;
- ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC);
- if (ntStatus != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus);
- }
- }
- //
- // We return STATUS_MORE_PROCESSING_REQUIRED so that the completion
- // routine (IofCompleteRequest) will stop working on the irp.
- //
- return ;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "s_nsInterruptUsbIoCompleteRead Status %d\n", status);
+
+ if (status != STATUS_SUCCESS) {
+ priv->int_buf.in_use = false;
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "IntUSBIoCompleteControl STATUS = %d\n", status);
+ } else {
+ INTnsProcessData(priv);
+ }
+
+ status = usb_submit_urb(priv->pInterruptURB, GFP_ATOMIC);
+ if (status) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Submit int URB failed %d\n", status);
+ } else {
+ priv->int_buf.in_use = true;
+ }
+
+ return;
}
/*
@@ -425,45 +408,41 @@ static void s_nsInterruptUsbIoCompleteRead(struct urb *urb)
*
*/
-int PIPEnsBulkInUsbRead(struct vnt_private *pDevice, struct vnt_rcb *pRCB)
+int PIPEnsBulkInUsbRead(struct vnt_private *priv, struct vnt_rcb *rcb)
{
- int ntStatus = 0;
- struct urb *pUrb;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartBulkInUsbRead\n");
+ int status = 0;
+ struct urb *urb;
- if (pDevice->Flags & fMP_DISCONNECTED)
- return STATUS_FAILURE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartBulkInUsbRead\n");
- pDevice->ulBulkInPosted++;
+ if (priv->Flags & fMP_DISCONNECTED)
+ return STATUS_FAILURE;
- pUrb = pRCB->pUrb;
- //
- // Now that we have created the urb, we will send a
- // request to the USB device object.
- //
- if (pRCB->skb == NULL) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pRCB->skb is null \n");
- return ntStatus;
- }
+ urb = rcb->pUrb;
+ if (rcb->skb == NULL) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"rcb->skb is null\n");
+ return status;
+ }
- usb_fill_bulk_urb(pUrb,
- pDevice->usb,
- usb_rcvbulkpipe(pDevice->usb, 2),
- (void *) (pRCB->skb->data),
+ usb_fill_bulk_urb(urb,
+ priv->usb,
+ usb_rcvbulkpipe(priv->usb, 2),
+ (void *) (rcb->skb->data),
MAX_TOTAL_SIZE_WITH_ALL_HEADERS,
s_nsBulkInUsbIoCompleteRead,
- pRCB);
+ rcb);
- ntStatus = usb_submit_urb(pUrb, GFP_ATOMIC);
- if (ntStatus != 0) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Rx URB failed %d\n", ntStatus);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status != 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Submit Rx URB failed %d\n", status);
return STATUS_FAILURE ;
}
- pRCB->Ref = 1;
- pRCB->bBoolInUse= true;
- return ntStatus;
+ rcb->Ref = 1;
+ rcb->bBoolInUse = true;
+
+ return status;
}
/*
@@ -483,51 +462,47 @@ int PIPEnsBulkInUsbRead(struct vnt_private *pDevice, struct vnt_rcb *pRCB)
static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
{
- struct vnt_rcb *pRCB = (struct vnt_rcb *)urb->context;
- struct vnt_private *pDevice = pRCB->pDevice;
- unsigned long bytesRead;
- int bIndicateReceive = false;
- int bReAllocSkb = false;
- int status;
+ struct vnt_rcb *rcb = urb->context;
+ struct vnt_private *priv = rcb->pDevice;
+ int re_alloc_skb = false;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkInUsbIoCompleteRead\n");
- status = urb->status;
- bytesRead = urb->actual_length;
-
- if (status) {
- pDevice->ulBulkInError++;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK In failed %d\n", status);
-//todo...xxxxxx
-// if (status == USBD_STATUS_CRC) {
-// pDevice->ulBulkInContCRCError++;
-// }
-// if (status == STATUS_DEVICE_NOT_CONNECTED )
-// {
-// MP_SET_FLAG(pDevice, fMP_DISCONNECTED);
-// }
- } else {
- if (bytesRead)
- bIndicateReceive = true;
- pDevice->ulBulkInContCRCError = 0;
- pDevice->ulBulkInBytesRead += bytesRead;
- }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkInUsbIoCompleteRead\n");
- if (bIndicateReceive) {
- spin_lock(&pDevice->lock);
- if (RXbBulkInProcessData(pDevice, pRCB, bytesRead) == true)
- bReAllocSkb = true;
- spin_unlock(&pDevice->lock);
- }
- pRCB->Ref--;
- if (pRCB->Ref == 0)
- {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RxvFreeNormal %d \n",pDevice->NumRecvFreeList);
- spin_lock(&pDevice->lock);
- RXvFreeRCB(pRCB, bReAllocSkb);
- spin_unlock(&pDevice->lock);
- }
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ case -ETIMEDOUT:
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "BULK In failed %d\n", urb->status);
+ break;
+ }
+
+ if (urb->actual_length) {
+ spin_lock(&priv->lock);
+
+ if (RXbBulkInProcessData(priv, rcb, urb->actual_length) == true)
+ re_alloc_skb = true;
+
+ spin_unlock(&priv->lock);
+ }
+
+ rcb->Ref--;
+ if (rcb->Ref == 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RxvFreeNormal %d\n",
+ priv->NumRecvFreeList);
+ spin_lock(&priv->lock);
+
+ RXvFreeRCB(rcb, re_alloc_skb);
- return;
+ spin_unlock(&priv->lock);
+ }
+
+ return;
}
/*
@@ -544,53 +519,40 @@ static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
*
*/
-int PIPEnsSendBulkOut(struct vnt_private *pDevice,
- struct vnt_usb_send_context *pContext)
+int PIPEnsSendBulkOut(struct vnt_private *priv,
+ struct vnt_usb_send_context *context)
{
int status;
- struct urb *pUrb;
+ struct urb *urb;
- pDevice->bPWBitOn = false;
+ priv->bPWBitOn = false;
-/*
- if (pDevice->pPendingBulkOutContext != NULL) {
- pDevice->NumContextsQueued++;
- EnqueueContext(pDevice->FirstTxContextQueue, pDevice->LastTxContextQueue, pContext);
- status = STATUS_PENDING;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send pending!\n");
- return status;
- }
-*/
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsSendBulkOut\n");
-
- if (MP_IS_READY(pDevice) && (pDevice->Flags & fMP_POST_WRITES)) {
-
- pUrb = pContext->pUrb;
- pDevice->ulBulkOutPosted++;
-// pDevice->pPendingBulkOutContext = pContext;
- usb_fill_bulk_urb(
- pUrb,
- pDevice->usb,
- usb_sndbulkpipe(pDevice->usb, 3),
- (void *) &(pContext->Data[0]),
- pContext->uBufLen,
- s_nsBulkOutIoCompleteWrite,
- pContext);
-
- status = usb_submit_urb(pUrb, GFP_ATOMIC);
- if (status != 0)
- {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Tx URB failed %d\n", status);
- pContext->bBoolInUse = false;
- return STATUS_FAILURE;
- }
- return STATUS_PENDING;
- }
- else {
- pContext->bBoolInUse = false;
- return STATUS_RESOURCES;
- }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsSendBulkOut\n");
+
+ if (!(MP_IS_READY(priv) && priv->Flags & fMP_POST_WRITES)) {
+ context->bBoolInUse = false;
+ return STATUS_RESOURCES;
+ }
+
+ urb = context->pUrb;
+
+ usb_fill_bulk_urb(urb,
+ priv->usb,
+ usb_sndbulkpipe(priv->usb, 3),
+ context->Data,
+ context->uBufLen,
+ s_nsBulkOutIoCompleteWrite,
+ context);
+
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status != 0) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Submit Tx URB failed %d\n", status);
+ context->bBoolInUse = false;
+ return STATUS_FAILURE;
+ }
+
+ return STATUS_PENDING;
}
/*
@@ -623,68 +585,49 @@ int PIPEnsSendBulkOut(struct vnt_private *pDevice,
static void s_nsBulkOutIoCompleteWrite(struct urb *urb)
{
- struct vnt_private *pDevice;
- int status;
- CONTEXT_TYPE ContextType;
- unsigned long ulBufLen;
- struct vnt_usb_send_context *pContext;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkOutIoCompleteWrite\n");
- //
- // The context given to IoSetCompletionRoutine is an USB_CONTEXT struct
- //
- pContext = (struct vnt_usb_send_context *)urb->context;
-
- pDevice = pContext->pDevice;
- ContextType = pContext->Type;
- ulBufLen = pContext->uBufLen;
-
- if (!netif_device_present(pDevice->dev))
- return;
+ struct vnt_usb_send_context *context = urb->context;
+ struct vnt_private *priv = context->pDevice;
+ u8 context_type = context->type;
- //
- // Perform various IRP, URB, and buffer 'sanity checks'
- //
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkOutIoCompleteWrite\n");
- status = urb->status;
-
- if(status == STATUS_SUCCESS) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Write %d bytes\n",(int)ulBufLen);
- pDevice->ulBulkOutBytesWrite += ulBufLen;
- pDevice->ulBulkOutContCRCError = 0;
- } else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK Out failed %d\n", status);
- pDevice->ulBulkOutError++;
- }
+ switch (urb->status) {
+ case 0:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "Write %d bytes\n", context->uBufLen);
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ context->bBoolInUse = false;
+ return;
+ case -ETIMEDOUT:
+ default:
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "BULK Out failed %d\n", urb->status);
+ break;
+ }
-// pDevice->ulCheckForHangCount = 0;
-// pDevice->pPendingBulkOutContext = NULL;
+ if (!netif_device_present(priv->dev))
+ return;
- if ( CONTEXT_DATA_PACKET == ContextType ) {
- // Indicate to the protocol the status of the sent packet and return
- // ownership of the packet.
- if (pContext->pPacket != NULL) {
- dev_kfree_skb_irq(pContext->pPacket);
- pContext->pPacket = NULL;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"tx %d bytes\n",(int)ulBufLen);
- }
+ if (CONTEXT_DATA_PACKET == context_type) {
+ if (context->pPacket != NULL) {
+ dev_kfree_skb_irq(context->pPacket);
+ context->pPacket = NULL;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "tx %d bytes\n", context->uBufLen);
+ }
- pDevice->dev->trans_start = jiffies;
+ priv->dev->trans_start = jiffies;
+ }
- if (status == STATUS_SUCCESS) {
- pDevice->packetsSent++;
- }
- else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send USB error! [%08xh]\n", status);
- pDevice->packetsSentDropped++;
- }
+ if (priv->bLinkPass == true) {
+ if (netif_queue_stopped(priv->dev))
+ netif_wake_queue(priv->dev);
+ }
- }
- if (pDevice->bLinkPass == true) {
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
- }
- pContext->bBoolInUse = false;
+ context->bBoolInUse = false;
- return;
+ return;
}
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 6b9522914634..3cf3f24247a3 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -293,17 +293,11 @@ void vRunCommand(struct work_struct *work)
case WLAN_CMD_SCAN_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
+ if (pDevice->bRadioOff == true)
+ break;
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)
+ break;
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID;
@@ -311,16 +305,12 @@ void vRunCommand(struct work_struct *work)
pMgmt->uScanChannel = pDevice->byMinChannel;
if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
pDevice->eCommandState = WLAN_CMD_SCAN_END;
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
+ break;
} else {
if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d\n", pMgmt->uScanChannel);
pMgmt->uScanChannel++;
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
+ break;
}
if (pMgmt->uScanChannel == pDevice->byMinChannel) {
// pMgmt->eScanType = WMAC_SCAN_ACTIVE; //mike mark
@@ -420,16 +410,13 @@ void vRunCommand(struct work_struct *work)
memset(&wrqu, 0, sizeof(wrqu));
wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_DISASSOCIATE_START:
pDevice->byReAssocCount = 0;
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pMgmt->eCurrState != WMAC_STATE_ASSOC)) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
+ break;
} else {
pDevice->bwextstep0 = false;
pDevice->bwextstep1 = false;
@@ -458,17 +445,14 @@ void vRunCommand(struct work_struct *work)
netif_stop_queue(pDevice->dev);
if (pDevice->bNeedRadioOFF == true)
CARDbRadioPowerOff(pDevice);
- s_bCommandComplete(pDevice);
+
break;
case WLAN_CMD_SSID_START:
pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff == true) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
+ if (pDevice->bRadioOff == true)
+ break;
memcpy(pMgmt->abyAdHocSSID, pMgmt->abyDesireSSID,
((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN);
@@ -489,11 +473,9 @@ void vRunCommand(struct work_struct *work)
if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
if (pItemSSID->len == pItemSSIDCurr->len) {
- if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
+ if (!memcmp(pItemSSID->abySSID,
+ pItemSSIDCurr->abySSID, pItemSSID->len))
+ break;
}
netif_stop_queue(pDevice->dev);
pDevice->bLinkPass = false;
@@ -582,7 +564,6 @@ void vRunCommand(struct work_struct *work)
}
}
}
- s_bCommandComplete(pDevice);
break;
case WLAN_AUTHENTICATE_WAIT:
@@ -612,7 +593,6 @@ void vRunCommand(struct work_struct *work)
}
pDevice->byLinkWaitCount = 0;
- s_bCommandComplete(pDevice);
break;
case WLAN_ASSOCIATE_WAIT:
@@ -647,7 +627,6 @@ void vRunCommand(struct work_struct *work)
return;
}
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_AP_MODE_START:
@@ -683,7 +662,6 @@ void vRunCommand(struct work_struct *work)
ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER);
schedule_delayed_work(&pDevice->second_callback_work, HZ);
}
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_TX_PSPACKET_START:
@@ -738,8 +716,6 @@ void vRunCommand(struct work_struct *work)
pMgmt->sNodeDBTable[ii].bRxPSPoll = false;
}
}
-
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_RADIO_START:
@@ -760,11 +736,8 @@ void vRunCommand(struct work_struct *work)
1,
&byTmp);
- if (ntStatus != STATUS_SUCCESS) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
+ if (ntStatus != STATUS_SUCCESS)
+ break;
if ((byTmp & GPIO3_DATA) == 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_RADIO_START_OFF........................\n");
// Old commands are useless.
@@ -833,7 +806,6 @@ void vRunCommand(struct work_struct *work)
}
}
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_CHANGE_BBSENSITIVITY_START:
@@ -843,24 +815,20 @@ void vRunCommand(struct work_struct *work)
BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Change sensitivity pDevice->byBBVGACurrent = %x\n", pDevice->byBBVGACurrent);
pDevice->bStopDataPkt = false;
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_TBTT_WAKEUP_START:
PSbIsNextTBTTWakeUp(pDevice);
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_BECON_SEND_START:
bMgrPrepareBeaconToSend(pDevice, pMgmt);
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_SETPOWER_START:
RFbSetPower(pDevice, pDevice->wCurrentRate, pMgmt->uCurrChannel);
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_CHANGE_ANTENNA_START:
@@ -878,12 +846,10 @@ void vRunCommand(struct work_struct *work)
else
BBvSetAntennaMode(pDevice, ANT_RXA);
}
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_REMOVE_ALLKEY_START:
KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID);
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_MAC_DISPOWERSAVING_START:
@@ -898,7 +864,6 @@ void vRunCommand(struct work_struct *work)
NULL
);
}
- s_bCommandComplete(pDevice);
break;
case WLAN_CMD_11H_CHSW_START:
@@ -906,14 +871,17 @@ void vRunCommand(struct work_struct *work)
pDevice->bChannelSwitch = false;
pMgmt->uCurrChannel = pDevice->byNewChannel;
pDevice->bStopDataPkt = false;
- s_bCommandComplete(pDevice);
break;
+ case WLAN_CMD_CONFIGURE_FILTER_START:
+ vnt_configure_filter(pDevice);
+ break;
default:
- s_bCommandComplete(pDevice);
break;
} //switch
+ s_bCommandComplete(pDevice);
+
spin_unlock_irq(&pDevice->lock);
return;
}
@@ -1009,6 +977,11 @@ static int s_bCommandComplete(struct vnt_private *pDevice)
pDevice->eCommandState = WLAN_CMD_11H_CHSW_START;
break;
+ case WLAN_CMD_CONFIGURE_FILTER:
+ pDevice->eCommandState =
+ WLAN_CMD_CONFIGURE_FILTER_START;
+ break;
+
default:
break;
}
diff --git a/drivers/staging/vt6656/wcmd.h b/drivers/staging/vt6656/wcmd.h
index caf2684ce915..736572101bad 100644
--- a/drivers/staging/vt6656/wcmd.h
+++ b/drivers/staging/vt6656/wcmd.h
@@ -51,7 +51,8 @@ typedef enum tagCMD_CODE {
WLAN_CMD_REMOVE_ALLKEY,
WLAN_CMD_MAC_DISPOWERSAVING,
WLAN_CMD_11H_CHSW,
- WLAN_CMD_RUN_AP
+ WLAN_CMD_RUN_AP,
+ WLAN_CMD_CONFIGURE_FILTER
} CMD_CODE, *PCMD_CODE;
#define CMD_Q_SIZE 32
@@ -96,6 +97,7 @@ typedef enum tagCMD_STATE {
WLAN_CMD_REMOVE_ALLKEY_START,
WLAN_CMD_MAC_DISPOWERSAVING_START,
WLAN_CMD_11H_CHSW_START,
+ WLAN_CMD_CONFIGURE_FILTER_START,
WLAN_CMD_IDLE
} CMD_STATE, *PCMD_STATE;
diff --git a/drivers/staging/vt6656/wmgr.c b/drivers/staging/vt6656/wmgr.c
index d74b0e7cb171..0d69719a7426 100644
--- a/drivers/staging/vt6656/wmgr.c
+++ b/drivers/staging/vt6656/wmgr.c
@@ -2164,12 +2164,12 @@ void vMgrCreateOwnIBSS(struct vnt_private *pDevice, PCMD_STATUS pStatus)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1);
pMgmt->byDTIMPeriod = DEFAULT_DTIM_PERIOD;
pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1;
- pDevice->eOPMode = OP_MODE_AP;
+ pDevice->op_mode = NL80211_IFTYPE_AP;
}
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_IBSS(1);
- pDevice->eOPMode = OP_MODE_ADHOC;
+ pDevice->op_mode = NL80211_IFTYPE_ADHOC;
}
if (pDevice->bEncryptionEnable) {
@@ -2359,7 +2359,7 @@ void vMgrJoinBSSBegin(struct vnt_private *pDevice, PCMD_STATUS pStatus)
pMgmt->eCurrState = WMAC_STATE_JOINTED;
// Adopt BSS state in Adapter Device Object
- pDevice->eOPMode = OP_MODE_INFRASTRUCTURE;
+ pDevice->op_mode = NL80211_IFTYPE_STATION;
memcpy(pDevice->abyBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
// Add current BSS to Candidate list
@@ -2500,7 +2500,7 @@ void vMgrJoinBSSBegin(struct vnt_private *pDevice, PCMD_STATUS pStatus)
pMgmt->eCurrMode = WMAC_MODE_IBSS_STA;
pMgmt->eCurrState = WMAC_STATE_STARTED;
// Adopt BSS state in Adapter Device Object
- pDevice->eOPMode = OP_MODE_ADHOC;
+ pDevice->op_mode = NL80211_IFTYPE_ADHOC;
pDevice->bLinkPass = true;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER);
memcpy(pDevice->abyBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
diff --git a/drivers/staging/winbond/localpara.h b/drivers/staging/winbond/localpara.h
index 84effc47d79d..8ca80ddda59a 100644
--- a/drivers/staging/winbond/localpara.h
+++ b/drivers/staging/winbond/localpara.h
@@ -58,9 +58,13 @@
#define LOCAL_11B_BASIC_RATE_BITMAP 0x826
#define LOCAL_11B_OPERATION_RATE_BITMAP 0x826
#define LOCAL_11G_BASIC_RATE_BITMAP 0x826 /* 1, 2, 5.5, 11 */
-#define LOCAL_11G_OPERATION_RATE_BITMAP 0x130c1240 /* 6, 9, 12, 18, 24, 36, 48, 54 */
+#define LOCAL_11G_OPERATION_RATE_BITMAP 0x130c1240 /* 6, 9, 12, 18,
+ * 24, 36, 48, 54
+ */
#define LOCAL_11A_BASIC_RATE_BITMAP 0x01001040 /* 6, 12, 24 */
-#define LOCAL_11A_OPERATION_RATE_BITMAP 0x120c0200 /* 9, 18, 36, 48, 54 */
+#define LOCAL_11A_OPERATION_RATE_BITMAP 0x120c0200 /* 9, 18, 36,
+ * 48, 54
+ */
#define PWR_ACTIVE 0
@@ -140,7 +144,9 @@ struct wb_local_para {
/* Unit time count for the decision to enter PS mode */
u16 CheckCountForPS;
u8 boHasTxActivity;/* tx activity has occurred */
- u8 boMacPsValid; /* Power save mode obtained from H/W is valid or not */
+ u8 boMacPsValid; /* Power save mode obtained
+ * from H/W is valid or not
+ */
/* Rate */
u8 TxRateMode; /*
@@ -162,35 +168,57 @@ struct wb_local_para {
u8 NumOfBRate;
u8 NumOfSRate;
- u8 NumOfDsssRateInSRate; /* number of DSSS rates in supported rate set */
+ u8 NumOfDsssRateInSRate; /* number of DSSS rates in
+ * supported rate set
+ */
u8 reserved1;
u32 dwBasicRateBitmap; /* bit map of basic rates */
- u32 dwSupportRateBitmap; /* bit map of all support rates including basic and operational rates */
+ u32 dwSupportRateBitmap; /* bit map of all support rates
+ * including basic and operational
+ * rates
+ */
/* For SME/MLME handler */
- u16 wOldSTAindex; /* valid when boHandover=TRUE, store old connected STA index */
- u16 wConnectedSTAindex; /* Index of peerly connected AP or IBSS in the descriptionset. */
- u16 Association_ID; /* The Association ID in the (Re)Association Response frame. */
- u16 ListenInterval; /* The listen interval when SME invoking MLME_ (Re)Associate_Request(). */
+ u16 wOldSTAindex; /* valid when boHandover=TRUE,
+ * store old connected STA index
+ */
+ u16 wConnectedSTAindex; /* Index of peerly connected AP or
+ * IBSS in the descriptionset.
+ */
+ u16 Association_ID; /* The Association ID in the
+ * (Re)Association Response frame.
+ */
+ u16 ListenInterval; /* The listen interval when SME invoking
+ * MLME_ (Re)Associate_Request().
+ */
struct radio_off RadioOffStatus;
u8 Reserved0[2];
- u8 boMsRadioOff; /* Ndis demands to be true when set Disassoc. OID and be false when set SSID OID. */
+ u8 boMsRadioOff; /* Ndis demands to be true when set
+ * Disassoc. OID and be false when
+ * set SSID OID.
+ */
u8 bAntennaNo; /* which antenna */
- u8 bConnectFlag; /* the connect status flag for roaming task */
+ u8 bConnectFlag; /* the connect status flag for
+ * roaming task
+ */
u8 RoamStatus;
u8 reserved7[3];
- struct chan_info CurrentChan; /* Current channel no. and channel band. It may be changed by scanning. */
+ struct chan_info CurrentChan; /* Current channel no. and channel band.
+ * It may be changed by scanning.
+ */
u8 boHandover; /* Roaming, Handover to other AP. */
u8 boCCAbusy;
- u16 CWMax; /* It may not be the real value that H/W used */
+ u16 CWMax; /* It may not be the real value
+ * that H/W used
+ */
u8 CWMin; /* 255: set according to 802.11 spec. */
u8 reserved2;
@@ -200,7 +228,9 @@ struct wb_local_para {
u8 bPreambleMode; /* AUTO, s32 */
u8 boNonERPpresent;
- u8 boProtectMechanism; /* H/W will take the necessary action based on this variable */
+ u8 boProtectMechanism; /* H/W will take the necessary action
+ * based on this variable
+ */
u8 boShortPreamble; /* Same here */
u8 boShortSlotTime; /* Same here */
u8 reserved_3;
@@ -213,8 +243,12 @@ struct wb_local_para {
u32 HwBssidValid;
/* For scan list */
- u8 BssListCount; /* Total count of valid descriptor indexes */
- u8 boReceiveUncorrectInfo; /* important settings in beacon/probe resp. have been changed */
+ u8 BssListCount; /* Total count of valid
+ * descriptor indexes
+ */
+ u8 boReceiveUncorrectInfo; /* important settings in beacon/probe
+ * resp. have been changed
+ */
u8 NoOfJoinerInIbss;
u8 reserved_4;
@@ -228,7 +262,9 @@ struct wb_local_para {
*/
u8 JoinerInIbss[(MAX_BSS_DESCRIPT_ELEMENT + 3) & ~0x03];
- /* General Statistics, count at Rx_handler or Tx_callback interrupt handler */
+ /* General Statistics, count at Rx_handler or
+ * Tx_callback interrupt handler
+ */
u64 GS_XMIT_OK; /* Good Frames Transmitted */
u64 GS_RCV_OK; /* Good Frames Received */
u32 GS_RCV_ERROR; /* Frames received with crc error */
@@ -248,10 +284,18 @@ struct wb_local_para {
u32 _dot11WEPUndecryptableCount;
u32 _dot11FrameDuplicateCount;
- struct chan_info IbssChanSetting; /* 2B. Start IBSS Channel setting by registry or WWU. */
- u8 reserved_5[2]; /* It may not be used after considering RF type, region and modulation type. */
+ struct chan_info IbssChanSetting; /* 2B. Start IBSS Channel
+ * setting by registry or
+ * WWU.
+ */
+ u8 reserved_5[2]; /* It may not be used after
+ * considering RF type, region
+ * and modulation type.
+ */
- u8 reserved_6[2]; /* two variables are for wep key error detection */
+ u8 reserved_6[2]; /* two variables are for wep
+ * key error detection
+ */
u32 bWepKeyError;
u32 bToSelfPacketReceived;
u32 WepKeyDetectTimerCount;
diff --git a/drivers/staging/winbond/wb35reg.c b/drivers/staging/winbond/wb35reg.c
index a5e255bb0f8b..bbc5ddcce6f5 100644
--- a/drivers/staging/winbond/wb35reg.c
+++ b/drivers/staging/winbond/wb35reg.c
@@ -76,41 +76,111 @@ void Wb35Reg_Update(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
switch (RegisterNo) {
- case 0x3b0: reg->U1B0 = RegisterValue; break;
- case 0x3bc: reg->U1BC_LEDConfigure = RegisterValue; break;
- case 0x400: reg->D00_DmaControl = RegisterValue; break;
- case 0x800: reg->M00_MacControl = RegisterValue; break;
- case 0x804: reg->M04_MulticastAddress1 = RegisterValue; break;
- case 0x808: reg->M08_MulticastAddress2 = RegisterValue; break;
- case 0x824: reg->M24_MacControl = RegisterValue; break;
- case 0x828: reg->M28_MacControl = RegisterValue; break;
- case 0x82c: reg->M2C_MacControl = RegisterValue; break;
- case 0x838: reg->M38_MacControl = RegisterValue; break;
- case 0x840: reg->M40_MacControl = RegisterValue; break;
- case 0x844: reg->M44_MacControl = RegisterValue; break;
- case 0x848: reg->M48_MacControl = RegisterValue; break;
- case 0x84c: reg->M4C_MacStatus = RegisterValue; break;
- case 0x860: reg->M60_MacControl = RegisterValue; break;
- case 0x868: reg->M68_MacControl = RegisterValue; break;
- case 0x870: reg->M70_MacControl = RegisterValue; break;
- case 0x874: reg->M74_MacControl = RegisterValue; break;
- case 0x878: reg->M78_ERPInformation = RegisterValue; break;
- case 0x87C: reg->M7C_MacControl = RegisterValue; break;
- case 0x880: reg->M80_MacControl = RegisterValue; break;
- case 0x884: reg->M84_MacControl = RegisterValue; break;
- case 0x888: reg->M88_MacControl = RegisterValue; break;
- case 0x898: reg->M98_MacControl = RegisterValue; break;
- case 0x100c: reg->BB0C = RegisterValue; break;
- case 0x102c: reg->BB2C = RegisterValue; break;
- case 0x1030: reg->BB30 = RegisterValue; break;
- case 0x103c: reg->BB3C = RegisterValue; break;
- case 0x1048: reg->BB48 = RegisterValue; break;
- case 0x104c: reg->BB4C = RegisterValue; break;
- case 0x1050: reg->BB50 = RegisterValue; break;
- case 0x1054: reg->BB54 = RegisterValue; break;
- case 0x1058: reg->BB58 = RegisterValue; break;
- case 0x105c: reg->BB5C = RegisterValue; break;
- case 0x1060: reg->BB60 = RegisterValue; break;
+ case 0x3b0:
+ reg->U1B0 = RegisterValue;
+ break;
+ case 0x3bc:
+ reg->U1BC_LEDConfigure = RegisterValue;
+ break;
+ case 0x400:
+ reg->D00_DmaControl = RegisterValue;
+ break;
+ case 0x800:
+ reg->M00_MacControl = RegisterValue;
+ break;
+ case 0x804:
+ reg->M04_MulticastAddress1 = RegisterValue;
+ break;
+ case 0x808:
+ reg->M08_MulticastAddress2 = RegisterValue;
+ break;
+ case 0x824:
+ reg->M24_MacControl = RegisterValue;
+ break;
+ case 0x828:
+ reg->M28_MacControl = RegisterValue;
+ break;
+ case 0x82c:
+ reg->M2C_MacControl = RegisterValue;
+ break;
+ case 0x838:
+ reg->M38_MacControl = RegisterValue;
+ break;
+ case 0x840:
+ reg->M40_MacControl = RegisterValue;
+ break;
+ case 0x844:
+ reg->M44_MacControl = RegisterValue;
+ break;
+ case 0x848:
+ reg->M48_MacControl = RegisterValue;
+ break;
+ case 0x84c:
+ reg->M4C_MacStatus = RegisterValue;
+ break;
+ case 0x860:
+ reg->M60_MacControl = RegisterValue;
+ break;
+ case 0x868:
+ reg->M68_MacControl = RegisterValue;
+ break;
+ case 0x870:
+ reg->M70_MacControl = RegisterValue;
+ break;
+ case 0x874:
+ reg->M74_MacControl = RegisterValue;
+ break;
+ case 0x878:
+ reg->M78_ERPInformation = RegisterValue;
+ break;
+ case 0x87C:
+ reg->M7C_MacControl = RegisterValue;
+ break;
+ case 0x880:
+ reg->M80_MacControl = RegisterValue;
+ break;
+ case 0x884:
+ reg->M84_MacControl = RegisterValue;
+ break;
+ case 0x888:
+ reg->M88_MacControl = RegisterValue;
+ break;
+ case 0x898:
+ reg->M98_MacControl = RegisterValue;
+ break;
+ case 0x100c:
+ reg->BB0C = RegisterValue;
+ break;
+ case 0x102c:
+ reg->BB2C = RegisterValue;
+ break;
+ case 0x1030:
+ reg->BB30 = RegisterValue;
+ break;
+ case 0x103c:
+ reg->BB3C = RegisterValue;
+ break;
+ case 0x1048:
+ reg->BB48 = RegisterValue;
+ break;
+ case 0x104c:
+ reg->BB4C = RegisterValue;
+ break;
+ case 0x1050:
+ reg->BB50 = RegisterValue;
+ break;
+ case 0x1054:
+ reg->BB54 = RegisterValue;
+ break;
+ case 0x1058:
+ reg->BB58 = RegisterValue;
+ break;
+ case 0x105c:
+ reg->BB5C = RegisterValue;
+ break;
+ case 0x1060:
+ reg->BB60 = RegisterValue;
+ break;
}
}
diff --git a/drivers/staging/winbond/wb35rx.c b/drivers/staging/winbond/wb35rx.c
index 8d71bc2f5940..f006b166aebc 100644
--- a/drivers/staging/winbond/wb35rx.c
+++ b/drivers/staging/winbond/wb35rx.c
@@ -16,7 +16,8 @@
#include "core.h"
#include "wb35rx_f.h"
-static void packet_came(struct ieee80211_hw *hw, char *pRxBufferAddress, int PacketSize)
+static void packet_came(struct ieee80211_hw *hw, char *pRxBufferAddress,
+ int PacketSize)
{
struct wbsoft_priv *priv = hw->priv;
struct sk_buff *skb;
@@ -64,7 +65,8 @@ static void Wb35Rx_adjust(struct wb35_descriptor *pRxDes)
} else if (DecryptionMethod) { /* For TKIP and CCMP */
for (i = 7; i > 1; i--)
pRxBufferAddress[i] = pRxBufferAddress[i - 2];
- pRxDes->buffer_address[0] = pRxBufferAddress + 2; /* Update the descriptor, shift 8 byte */
+ /* Update the descriptor, shift 8 byte */
+ pRxDes->buffer_address[0] = pRxBufferAddress + 2;
BufferSize -= 8; /* 8 byte for IV + ICV */
}
pRxDes->buffer_size[0] = BufferSize;
@@ -95,7 +97,9 @@ static u16 Wb35Rx_indicate(struct ieee80211_hw *hw)
/* Parse the bulkin buffer */
while (BufferSize >= 4) {
- if ((cpu_to_le32(*(u32 *)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) /* Is ending? */
+ /* Is ending? */
+ if ((cpu_to_le32(*(u32 *)pRxBufferAddress) & 0x0fffffff) ==
+ RX_END_TAG)
break;
/* Get the R00 R01 first */
@@ -108,7 +112,8 @@ static u16 Wb35Rx_indicate(struct ieee80211_hw *hw)
/* Basic check for Rx length. Is length valid? */
if (PacketSize > MAX_PACKET_SIZE) {
- pr_debug("Serious ERROR : Rx data size too long, size =%d\n", PacketSize);
+ pr_debug("Serious ERROR : Rx data size too long, size =%d\n",
+ PacketSize);
pWb35Rx->EP3vm_state = VM_STOP;
pWb35Rx->Ep3ErrorCount2++;
break;
@@ -118,7 +123,8 @@ static u16 Wb35Rx_indicate(struct ieee80211_hw *hw)
* Wb35Rx_indicate() is called synchronously so it isn't
* necessary to set "RxDes.Desctriptor_ID = RxBufferID;"
*/
- BufferSize -= 8; /* subtract 8 byte for 35's USB header length */
+ /* subtract 8 byte for 35's USB header length */
+ BufferSize -= 8;
pRxBufferAddress += 8;
RxDes.buffer_address[0] = pRxBufferAddress;
@@ -255,7 +261,7 @@ static void Wb35Rx(struct ieee80211_hw *hw)
pWb35Rx->pDRx = kzalloc(MAX_USB_RX_BUFFER, GFP_ATOMIC);
if (!pWb35Rx->pDRx) {
- printk("w35und: Rx memory alloc failed\n");
+ dev_info(&hw->wiphy->dev, "w35und: Rx memory alloc failed\n");
goto error;
}
pRxBufferAddress = pWb35Rx->pDRx;
@@ -270,7 +276,7 @@ static void Wb35Rx(struct ieee80211_hw *hw)
retv = usb_submit_urb(urb, GFP_ATOMIC);
if (retv != 0) {
- printk("Rx URB sending error\n");
+ dev_info(&hw->wiphy->dev, "Rx URB sending error\n");
goto error;
}
return;
@@ -306,7 +312,9 @@ static void Wb35Rx_reset_descriptor(struct hw_data *pHwData)
pWb35Rx->EP3vm_state = VM_STOP;
pWb35Rx->rx_halt = 0;
- /* Initial the Queue. The last buffer is reserved for used if the Rx resource is unavailable. */
+ /* Initial the Queue. The last buffer is reserved for used
+ * if the Rx resource is unavailable.
+ */
for (i = 0; i < MAX_USB_RX_BUFFER_NUMBER; i++)
pWb35Rx->RxOwner[i] = 1;
}
@@ -328,7 +336,8 @@ void Wb35Rx_stop(struct hw_data *pHwData)
/* Canceling the Irp if already sends it out. */
if (pWb35Rx->EP3vm_state == VM_RUNNING) {
- usb_unlink_urb(pWb35Rx->RxUrb); /* Only use unlink, let Wb35Rx_destroy to free them */
+ /* Only use unlink, let Wb35Rx_destroy to free them */
+ usb_unlink_urb(pWb35Rx->RxUrb);
pr_debug("EP3 Rx stop\n");
}
}
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 07891a3e316e..0d29624416c3 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -788,7 +788,6 @@ static int wb35_probe(struct usb_interface *intf,
dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
- dev->channel_change_time = 1000;
dev->max_signal = 100;
dev->queues = 1;
diff --git a/drivers/staging/wlags49_h2/dhf.c b/drivers/staging/wlags49_h2/dhf.c
index 13d360fa58ec..4877464f04b0 100644
--- a/drivers/staging/wlags49_h2/dhf.c
+++ b/drivers/staging/wlags49_h2/dhf.c
@@ -106,7 +106,7 @@
*---------------------------------------------------------------------------*/
/* 12345678901234 */
-char signature[14] = "FUPU7D37dhfwci";
+static char signature[14] = "FUPU7D37dhfwci";
/*-----------------------------------------------------------------------------
*
@@ -123,8 +123,8 @@ char signature[14] = "FUPU7D37dhfwci";
#define DL_SIZE 2000
/* CFG_IDENTITY_STRCT pri_identity = { LOF(CFG_IDENTITY_STRCT), CFG_PRI_IDENTITY }; */
-CFG_SUP_RANGE_STRCT mfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_MFI_SUP_RANGE };
-CFG_SUP_RANGE_STRCT cfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_CFI_SUP_RANGE };
+static CFG_SUP_RANGE_STRCT mfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_MFI_SUP_RANGE };
+static CFG_SUP_RANGE_STRCT cfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_CFI_SUP_RANGE };
/* Note: could be used rather than the above explained and defined DL_SIZE if need arises
* CFG_DL_BUF_STRCT dl_buf = { LOF(CFG_DL_BUF_STRCT), CFG_DL_BUF };
*/
@@ -139,7 +139,7 @@ CFG_SUP_RANGE_STRCT cfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_CFI_S
* This is only relevant if the DHF used without reloading the driver/utility.
*/
-LTV_INFO_STRUCT ltv_info[] = {
+static LTV_INFO_STRUCT ltv_info[] = {
{ (LTVP)&mfi_sup, LOF(CFG_SUP_RANGE_STRCT) } ,
{ (LTVP)&cfi_sup, LOF(CFG_SUP_RANGE_STRCT) } ,
{ (LTVP) NULL, 0 }
@@ -169,7 +169,7 @@ static int check_comp_fw(memimage *fw);
* station firmware image to be downloaded is compatible.
*.ENDDOC END DOCUMENTATION
*************************************************************************************************************/
-int
+static int
check_comp_fw(memimage *fw)
{
CFG_RANGE20_STRCT *p;
diff --git a/drivers/staging/wlags49_h2/hcf.c b/drivers/staging/wlags49_h2/hcf.c
index 4aac26d486f1..f44d888ecd6e 100644
--- a/drivers/staging/wlags49_h2/hcf.c
+++ b/drivers/staging/wlags49_h2/hcf.c
@@ -250,7 +250,7 @@ HCF_STATIC hcf_8 BASED mic_pad[8] = { 0x5A, 0, 0, 0, 0, 0, 0, 0 }; //MIC pa
#endif // HCF_TYPE_WPA
#if defined MSF_COMPONENT_ID
-CFG_IDENTITY_STRCT BASED cfg_drv_identity = {
+static CFG_IDENTITY_STRCT BASED cfg_drv_identity = {
sizeof(cfg_drv_identity)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_IDENTITY, // (0x0826)
MSF_COMPONENT_ID,
@@ -259,7 +259,7 @@ CFG_IDENTITY_STRCT BASED cfg_drv_identity = {
MSF_COMPONENT_MINOR_VER
} ;
-CFG_RANGES_STRCT BASED cfg_drv_sup_range = {
+static CFG_RANGES_STRCT BASED cfg_drv_sup_range = {
sizeof(cfg_drv_sup_range)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_SUP_RANGE, // (0x0827)
@@ -271,7 +271,7 @@ CFG_RANGES_STRCT BASED cfg_drv_sup_range = {
}}
} ;
-struct CFG_RANGE3_STRCT BASED cfg_drv_act_ranges_pri = {
+static struct CFG_RANGE3_STRCT BASED cfg_drv_act_ranges_pri = {
sizeof(cfg_drv_act_ranges_pri)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_ACT_RANGES_PRI, // (0x0828)
@@ -288,7 +288,7 @@ struct CFG_RANGE3_STRCT BASED cfg_drv_act_ranges_pri = {
} ;
-struct CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_sta = {
+static struct CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_sta = {
sizeof(cfg_drv_act_ranges_sta)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_ACT_RANGES_STA, // (0x0829)
@@ -333,7 +333,7 @@ struct CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_sta = {
} ;
-struct CFG_RANGE6_STRCT BASED cfg_drv_act_ranges_hsi = {
+static struct CFG_RANGE6_STRCT BASED cfg_drv_act_ranges_hsi = {
sizeof(cfg_drv_act_ranges_hsi)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_ACT_RANGES_HSI, // (0x082A)
COMP_ROLE_ACT,
@@ -370,7 +370,7 @@ struct CFG_RANGE6_STRCT BASED cfg_drv_act_ranges_hsi = {
} ;
-CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_apf = {
+static CFG_RANGE4_STRCT BASED cfg_drv_act_ranges_apf = {
sizeof(cfg_drv_act_ranges_apf)/sizeof(hcf_16) - 1, //length of RID
CFG_DRV_ACT_RANGES_APF, // (0x082B)
@@ -3099,7 +3099,7 @@ hcf_service_nic( IFBP ifbp, wci_bufp bufp, unsigned int len )
#define L *p
#define R *(p+1)
-void
+static void
calc_mic( hcf_32* p, hcf_32 m )
{
#if HCF_BIG_ENDIAN
@@ -3415,7 +3415,7 @@ calibrate( IFBP ifbp )
*.ENDDOC END DOCUMENTATION
*
************************************************************************************************************/
-int
+static int
check_mic( IFBP ifbp )
{
int rc = HCF_SUCCESS;
diff --git a/drivers/staging/wlags49_h2/sta_h2.c b/drivers/staging/wlags49_h2/sta_h2.c
index 25ac76f7d366..0ba8defc023a 100644
--- a/drivers/staging/wlags49_h2/sta_h2.c
+++ b/drivers/staging/wlags49_h2/sta_h2.c
@@ -4435,7 +4435,7 @@ static const CFG_PROG_STRCT fw_image_code[] = {
0000,
0x000F368E, /* Start execution address */
},
- { 0000, 0000, 0000, 0000, 00000000, 0000, 00000000}
+ { 0000, 0000, 0000, 0000, 00000000, 0000, NULL}
};
static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index 650def88e5c2..fc98b6dbdb89 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -76,38 +76,23 @@
#include <linux/seq_file.h>
#include <linux/types.h>
#include <linux/kernel.h>
-// #include <linux/sched.h>
-// #include <linux/ptrace.h>
-// #include <linux/slab.h>
-// #include <linux/ctype.h>
-// #include <linux/string.h>
-// #include <linux/timer.h>
-//#include <linux/interrupt.h>
-// #include <linux/tqueue.h>
-// #include <linux/in.h>
-// #include <linux/delay.h>
-// #include <asm/io.h>
-// // #include <asm/bitops.h>
#include <linux/unistd.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-// #include <linux/skbuff.h>
-// #include <linux/if_arp.h>
-// #include <linux/ioport.h>
#define BIN_DL 0
#if BIN_DL
#include <linux/vmalloc.h>
-#endif // BIN_DL
+#endif /* BIN_DL */
#include <debug.h>
#include <hcf.h>
#include <dhf.h>
-//in order to get around:: wl_main.c:2229: `HREG_EV_RDMAD' undeclared (first use in this function)
+/* in order to get around:: wl_main.c:2229: `HREG_EV_RDMAD' undeclared (first use in this function) */
#include <hcfdef.h>
#include <wl_if.h>
@@ -133,8 +118,7 @@
******************************************************************************/
#define VALID_PARAM(C) \
{ \
- if (!(C)) \
- { \
+ if (!(C)) { \
printk(KERN_INFO "Wireless, parameter error: \"%s\"\n", #C); \
goto failed; \
} \
@@ -142,7 +126,7 @@
/*******************************************************************************
* local functions
******************************************************************************/
-void wl_isr_handler( unsigned long p );
+void wl_isr_handler(unsigned long p);
#if 0 //SCULL_USE_PROC /* don't waste space if unused */
static int scull_read_procmem(struct seq_file *m, void *v);
@@ -168,7 +152,7 @@ static const struct file_operations scull_read_procmem_fops = {
/*******************************************************************************
* module parameter definitions - set with 'insmod'
******************************************************************************/
-static p_u16 irq_mask = 0xdeb8; // IRQ3,4,5,7,9,10,11,12,14,15
+static p_u16 irq_mask = 0xdeb8; /* IRQ3,4,5,7,9,10,11,12,14,15 */
static p_s8 irq_list[4] = { -1 };
#if 0
@@ -183,7 +167,7 @@ static p_u16 PARM_AUTH_KEY_MGMT_SUITE = PARM_DEFAULT_AUTH_KEY_MGMT_SUITE;
static p_u16 PARM_BRSC_2GHZ = PARM_DEFAULT_BRSC_2GHZ;
static p_u16 PARM_BRSC_5GHZ = PARM_DEFAULT_BRSC_5GHZ;
static p_u16 PARM_COEXISTENCE = PARM_DEFAULT_COEXISTENCE;
-static p_u16 PARM_CONNECTION_CONTROL = PARM_DEFAULT_CONNECTION_CONTROL; //;?rename and move
+static p_u16 PARM_CONNECTION_CONTROL = PARM_DEFAULT_CONNECTION_CONTROL; /* ;?rename and move */
static p_char *PARM_CREATE_IBSS = PARM_DEFAULT_CREATE_IBSS_STR;
static p_char *PARM_DESIRED_SSID = PARM_DEFAULT_SSID;
static p_char *PARM_DOWNLOAD_FIRMWARE = "";
@@ -220,7 +204,7 @@ static p_u16 PARM_RTS_THRESHOLD3 = PARM_DEFAULT_RTS_THRESHOLD;
static p_u16 PARM_RTS_THRESHOLD4 = PARM_DEFAULT_RTS_THRESHOLD;
static p_u16 PARM_RTS_THRESHOLD5 = PARM_DEFAULT_RTS_THRESHOLD;
static p_u16 PARM_RTS_THRESHOLD6 = PARM_DEFAULT_RTS_THRESHOLD;
-#endif // USE_WDS
+#endif /* USE_WDS */
static p_u16 PARM_RTS_THRESHOLD = PARM_DEFAULT_RTS_THRESHOLD;
static p_u16 PARM_SRSC_2GHZ = PARM_DEFAULT_SRSC_2GHZ;
static p_u16 PARM_SRSC_5GHZ = PARM_DEFAULT_SRSC_5GHZ;
@@ -234,7 +218,7 @@ static p_u16 PARM_TX_RATE3 = PARM_DEFAULT_TX_RATE_2GHZ;
static p_u16 PARM_TX_RATE4 = PARM_DEFAULT_TX_RATE_2GHZ;
static p_u16 PARM_TX_RATE5 = PARM_DEFAULT_TX_RATE_2GHZ;
static p_u16 PARM_TX_RATE6 = PARM_DEFAULT_TX_RATE_2GHZ;
-#endif // USE_WDS
+#endif /* USE_WDS */
static p_u16 PARM_TX_RATE = PARM_DEFAULT_TX_RATE_2GHZ;
#ifdef USE_WDS
static p_u8 PARM_WDS_ADDRESS1[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR;
@@ -243,7 +227,7 @@ static p_u8 PARM_WDS_ADDRESS3[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR;
static p_u8 PARM_WDS_ADDRESS4[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR;
static p_u8 PARM_WDS_ADDRESS5[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR;
static p_u8 PARM_WDS_ADDRESS6[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR;
-#endif // USE_WDS
+#endif /* USE_WDS */
#if 0
@@ -299,39 +283,41 @@ MODULE_PARM(PARM_BRSC_2GHZ, "b");
MODULE_PARM_DESC(PARM_BRSC_2GHZ, "Basic Rate Set Control 2.4 GHz");
MODULE_PARM(PARM_BRSC_5GHZ, "b");
MODULE_PARM_DESC(PARM_BRSC_5GHZ, "Basic Rate Set Control 5.0 GHz");
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA
-//;?seems reasonable that even an AP-only driver could afford this small additional footprint
+#if 1 /* (HCF_TYPE) & HCF_TYPE_STA */
+/* ;?seems reasonable that even an AP-only driver could afford this small additional footprint */
MODULE_PARM(PARM_PM_ENABLED, "h");
MODULE_PARM_DESC(PARM_PM_ENABLED, "Power Management State (0 - 2, 8001 - 8002) [0]");
MODULE_PARM(PARM_PORT_TYPE, "b");
MODULE_PARM_DESC(PARM_PORT_TYPE, "Port Type (1 - 3) [1]");
-//;?MODULE_PARM(PARM_CREATE_IBSS, "s");
-//;?MODULE_PARM_DESC(PARM_CREATE_IBSS, "Create IBSS (<string> N or Y) [N]");
-//;?MODULE_PARM(PARM_MULTICAST_RX, "s");
-//;?MODULE_PARM_DESC(PARM_MULTICAST_RX, "Multicast Receive Enable (<string> N or Y) [Y]");
-//;?MODULE_PARM(PARM_MAX_SLEEP, "h");
-//;?MODULE_PARM_DESC(PARM_MAX_SLEEP, "Maximum Power Management Sleep Duration (0 - 65535) [100]");
-//;?MODULE_PARM(PARM_NETWORK_ADDR, "6b");
-//;?MODULE_PARM_DESC(PARM_NETWORK_ADDR, "Hardware Ethernet Address ([0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff]) [<factory value>]");
-//;?MODULE_PARM(PARM_AUTHENTICATION, "b");
-//
-//tracker 12448
-//;?MODULE_PARM_DESC(PARM_AUTHENTICATION, "Authentication Type (0-2) [0] 0=Open 1=SharedKey 2=LEAP");
-//;?MODULE_PARM_DESC(authentication, "Authentication Type (1-2) [1] 1=Open 2=SharedKey");
-//tracker 12448
-//
-//;?MODULE_PARM(PARM_OWN_ATIM_WINDOW, "b");
-//;?MODULE_PARM_DESC(PARM_OWN_ATIM_WINDOW, "ATIM Window time in TU for IBSS creation (0-100) [0]");
-//;?MODULE_PARM(PARM_PM_HOLDOVER_DURATION, "b");
-//;?MODULE_PARM_DESC(PARM_PM_HOLDOVER_DURATION, "Time station remains awake after MAC frame transfer when PM is on (0-65535) [100]");
-//;?MODULE_PARM(PARM_PROMISCUOUS_MODE, "s");
-//;?MODULE_PARM_DESC(PARM_PROMISCUOUS_MODE, "Promiscuous Mode Enable (<string> Y or N ) [N]" );
-//;?
+/*
+ * ;?MODULE_PARM(PARM_CREATE_IBSS, "s");
+ *;?MODULE_PARM_DESC(PARM_CREATE_IBSS, "Create IBSS (<string> N or Y) [N]");
+ *;?MODULE_PARM(PARM_MULTICAST_RX, "s");
+ *;?MODULE_PARM_DESC(PARM_MULTICAST_RX, "Multicast Receive Enable (<string> N or Y) [Y]");
+ *;?MODULE_PARM(PARM_MAX_SLEEP, "h");
+ *;?MODULE_PARM_DESC(PARM_MAX_SLEEP, "Maximum Power Management Sleep Duration (0 - 65535) [100]");
+ *;?MODULE_PARM(PARM_NETWORK_ADDR, "6b");
+ *;?MODULE_PARM_DESC(PARM_NETWORK_ADDR, "Hardware Ethernet Address ([0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff]) [<factory value>]");
+ *;?MODULE_PARM(PARM_AUTHENTICATION, "b");
+ *
+ *tracker 12448
+ *;?MODULE_PARM_DESC(PARM_AUTHENTICATION, "Authentication Type (0-2) [0] 0=Open 1=SharedKey 2=LEAP");
+ *;?MODULE_PARM_DESC(authentication, "Authentication Type (1-2) [1] 1=Open 2=SharedKey");
+ *tracker 12448
+ *
+ *;?MODULE_PARM(PARM_OWN_ATIM_WINDOW, "b");
+ *;?MODULE_PARM_DESC(PARM_OWN_ATIM_WINDOW, "ATIM Window time in TU for IBSS creation (0-100) [0]");
+ *;?MODULE_PARM(PARM_PM_HOLDOVER_DURATION, "b");
+ *;?MODULE_PARM_DESC(PARM_PM_HOLDOVER_DURATION, "Time station remains awake after MAC frame transfer when PM is on (0-65535) [100]");
+ *;?MODULE_PARM(PARM_PROMISCUOUS_MODE, "s");
+ *;?MODULE_PARM_DESC(PARM_PROMISCUOUS_MODE, "Promiscuous Mode Enable (<string> Y or N ) [N]" );
+ *;?
+ */
MODULE_PARM(PARM_CONNECTION_CONTROL, "b");
MODULE_PARM_DESC(PARM_CONNECTION_CONTROL, "Connection Control (0 - 3) [2]");
#endif /* HCF_STA */
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_AP
- //;?should we restore this to allow smaller memory footprint
+#if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */
+ /* ;?should we restore this to allow smaller memory footprint */
MODULE_PARM(PARM_OWN_DTIM_PERIOD, "b");
MODULE_PARM_DESC(PARM_OWN_DTIM_PERIOD, "DTIM Period (0 - 255) [1]");
MODULE_PARM(PARM_REJECT_ANY, "s");
@@ -394,11 +380,12 @@ MODULE_PARM_DESC(PARM_COEXISTENCE, "Coexistence (0-7) [0]");
#if DBG
static p_u32 pc_debug = DBG_LVL;
-//MODULE_PARM(pc_debug, "i");
-/*static ;?conflicts with my understanding of CL parameters and breaks now I moved
+/*
+ * MODULE_PARM(pc_debug, "i");
+ *static ;?conflicts with my understanding of CL parameters and breaks now I moved
* the correspondig logic to wl_profile
- */ p_u32 DebugFlag = ~0; //recognizable "undefined value" rather then DBG_DEFAULTS;
-//MODULE_PARM(DebugFlag, "l");
+ */ p_u32 DebugFlag = ~0; /* recognizable "undefined value" rather then DBG_DEFAULTS; */
+/* MODULE_PARM(DebugFlag, "l"); */
static struct dbg_info wl_info = { KBUILD_MODNAME, 0, 0 };
struct dbg_info *DbgInfo = &wl_info;
@@ -407,8 +394,8 @@ struct dbg_info *DbgInfo = &wl_info;
#ifdef USE_RTS
static p_char *useRTS = "N";
-MODULE_PARM( useRTS, "s" );
-MODULE_PARM_DESC( useRTS, "Use RTS test interface (<string> N or Y) [N]" );
+MODULE_PARM(useRTS, "s");
+MODULE_PARM_DESC(useRTS, "Use RTS test interface (<string> N or Y) [N]");
#endif /* USE_RTS */
/*******************************************************************************
@@ -427,7 +414,7 @@ extern memimage fw_image; // firmware image to be downloaded
#endif /* HCF_STA */
-int wl_insert( struct net_device *dev )
+int wl_insert(struct net_device *dev)
{
int result = 0;
int hcf_status = HCF_SUCCESS;
@@ -436,10 +423,10 @@ int wl_insert( struct net_device *dev )
struct wl_private *lp = wl_priv(dev);
/* Initialize the adapter hardware. */
- memset( &( lp->hcfCtx ), 0, sizeof( IFB_STRCT ));
+ memset(&(lp->hcfCtx), 0, sizeof(IFB_STRCT));
/* Initialize the adapter parameters. */
- spin_lock_init( &( lp->slock ));
+ spin_lock_init(&(lp->slock));
/* Initialize states */
//lp->lockcount = 0; //PE1DNN
@@ -448,31 +435,31 @@ int wl_insert( struct net_device *dev )
lp->dev = dev;
- DBG_PARAM( DbgInfo, "irq_mask", "0x%04x", irq_mask & 0x0FFFF );
- DBG_PARAM( DbgInfo, "irq_list", "0x%02x 0x%02x 0x%02x 0x%02x",
+ DBG_PARAM(DbgInfo, "irq_mask", "0x%04x", irq_mask & 0x0FFFF);
+ DBG_PARAM(DbgInfo, "irq_list", "0x%02x 0x%02x 0x%02x 0x%02x",
irq_list[0] & 0x0FF, irq_list[1] & 0x0FF,
- irq_list[2] & 0x0FF, irq_list[3] & 0x0FF );
- DBG_PARAM( DbgInfo, PARM_NAME_DESIRED_SSID, "\"%s\"", PARM_DESIRED_SSID );
- DBG_PARAM( DbgInfo, PARM_NAME_OWN_SSID, "\"%s\"", PARM_OWN_SSID );
- DBG_PARAM( DbgInfo, PARM_NAME_OWN_CHANNEL, "%d", PARM_OWN_CHANNEL);
- DBG_PARAM( DbgInfo, PARM_NAME_SYSTEM_SCALE, "%d", PARM_SYSTEM_SCALE );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE, "%d", PARM_TX_RATE );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD, "%d", PARM_RTS_THRESHOLD );
- DBG_PARAM( DbgInfo, PARM_NAME_MICROWAVE_ROBUSTNESS, "\"%s\"", PARM_MICROWAVE_ROBUSTNESS );
- DBG_PARAM( DbgInfo, PARM_NAME_OWN_NAME, "\"%s\"", PARM_OWN_NAME );
+ irq_list[2] & 0x0FF, irq_list[3] & 0x0FF);
+ DBG_PARAM(DbgInfo, PARM_NAME_DESIRED_SSID, "\"%s\"", PARM_DESIRED_SSID);
+ DBG_PARAM(DbgInfo, PARM_NAME_OWN_SSID, "\"%s\"", PARM_OWN_SSID);
+ DBG_PARAM(DbgInfo, PARM_NAME_OWN_CHANNEL, "%d", PARM_OWN_CHANNEL);
+ DBG_PARAM(DbgInfo, PARM_NAME_SYSTEM_SCALE, "%d", PARM_SYSTEM_SCALE);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE, "%d", PARM_TX_RATE);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD, "%d", PARM_RTS_THRESHOLD);
+ DBG_PARAM(DbgInfo, PARM_NAME_MICROWAVE_ROBUSTNESS, "\"%s\"", PARM_MICROWAVE_ROBUSTNESS);
+ DBG_PARAM(DbgInfo, PARM_NAME_OWN_NAME, "\"%s\"", PARM_OWN_NAME);
//;? DBG_PARAM( DbgInfo, PARM_NAME_ENABLE_ENCRYPTION, "\"%s\"", PARM_ENABLE_ENCRYPTION );
- DBG_PARAM( DbgInfo, PARM_NAME_KEY1, "\"%s\"", PARM_KEY1 );
- DBG_PARAM( DbgInfo, PARM_NAME_KEY2, "\"%s\"", PARM_KEY2 );
- DBG_PARAM( DbgInfo, PARM_NAME_KEY3, "\"%s\"", PARM_KEY3 );
- DBG_PARAM( DbgInfo, PARM_NAME_KEY4, "\"%s\"", PARM_KEY4 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_KEY, "%d", PARM_TX_KEY );
- DBG_PARAM( DbgInfo, PARM_NAME_MULTICAST_RATE, "%d", PARM_MULTICAST_RATE );
- DBG_PARAM( DbgInfo, PARM_NAME_DOWNLOAD_FIRMWARE, "\"%s\"", PARM_DOWNLOAD_FIRMWARE );
- DBG_PARAM( DbgInfo, PARM_NAME_AUTH_KEY_MGMT_SUITE, "%d", PARM_AUTH_KEY_MGMT_SUITE );
+ DBG_PARAM(DbgInfo, PARM_NAME_KEY1, "\"%s\"", PARM_KEY1);
+ DBG_PARAM(DbgInfo, PARM_NAME_KEY2, "\"%s\"", PARM_KEY2);
+ DBG_PARAM(DbgInfo, PARM_NAME_KEY3, "\"%s\"", PARM_KEY3);
+ DBG_PARAM(DbgInfo, PARM_NAME_KEY4, "\"%s\"", PARM_KEY4);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_KEY, "%d", PARM_TX_KEY);
+ DBG_PARAM(DbgInfo, PARM_NAME_MULTICAST_RATE, "%d", PARM_MULTICAST_RATE);
+ DBG_PARAM(DbgInfo, PARM_NAME_DOWNLOAD_FIRMWARE, "\"%s\"", PARM_DOWNLOAD_FIRMWARE);
+ DBG_PARAM(DbgInfo, PARM_NAME_AUTH_KEY_MGMT_SUITE, "%d", PARM_AUTH_KEY_MGMT_SUITE);
//;?#if (HCF_TYPE) & HCF_TYPE_STA
//;?should we make this code conditional depending on in STA mode
//;? DBG_PARAM( DbgInfo, PARM_NAME_PORT_TYPE, "%d", PARM_PORT_TYPE );
- DBG_PARAM( DbgInfo, PARM_NAME_PM_ENABLED, "%04x", PARM_PM_ENABLED );
+ DBG_PARAM(DbgInfo, PARM_NAME_PM_ENABLED, "%04x", PARM_PM_ENABLED);
//;? DBG_PARAM( DbgInfo, PARM_NAME_CREATE_IBSS, "\"%s\"", PARM_CREATE_IBSS );
//;? DBG_PARAM( DbgInfo, PARM_NAME_MULTICAST_RX, "\"%s\"", PARM_MULTICAST_RX );
//;? DBG_PARAM( DbgInfo, PARM_NAME_MAX_SLEEP, "%d", PARM_MAX_SLEEP );
@@ -488,24 +475,24 @@ int wl_insert( struct net_device *dev )
#if 1 //;? (HCF_TYPE) & HCF_TYPE_AP
//;?should we restore this to allow smaller memory footprint
//;?I guess: no, since this is Debug mode only
- DBG_PARAM( DbgInfo, PARM_NAME_OWN_DTIM_PERIOD, "%d", PARM_OWN_DTIM_PERIOD );
- DBG_PARAM( DbgInfo, PARM_NAME_REJECT_ANY, "\"%s\"", PARM_REJECT_ANY );
- DBG_PARAM( DbgInfo, PARM_NAME_EXCLUDE_UNENCRYPTED, "\"%s\"", PARM_EXCLUDE_UNENCRYPTED );
- DBG_PARAM( DbgInfo, PARM_NAME_MULTICAST_PM_BUFFERING, "\"%s\"", PARM_MULTICAST_PM_BUFFERING );
- DBG_PARAM( DbgInfo, PARM_NAME_INTRA_BSS_RELAY, "\"%s\"", PARM_INTRA_BSS_RELAY );
+ DBG_PARAM(DbgInfo, PARM_NAME_OWN_DTIM_PERIOD, "%d", PARM_OWN_DTIM_PERIOD);
+ DBG_PARAM(DbgInfo, PARM_NAME_REJECT_ANY, "\"%s\"", PARM_REJECT_ANY);
+ DBG_PARAM(DbgInfo, PARM_NAME_EXCLUDE_UNENCRYPTED, "\"%s\"", PARM_EXCLUDE_UNENCRYPTED);
+ DBG_PARAM(DbgInfo, PARM_NAME_MULTICAST_PM_BUFFERING, "\"%s\"", PARM_MULTICAST_PM_BUFFERING);
+ DBG_PARAM(DbgInfo, PARM_NAME_INTRA_BSS_RELAY, "\"%s\"", PARM_INTRA_BSS_RELAY);
#ifdef USE_WDS
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD1, "%d", PARM_RTS_THRESHOLD1 );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD2, "%d", PARM_RTS_THRESHOLD2 );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD3, "%d", PARM_RTS_THRESHOLD3 );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD4, "%d", PARM_RTS_THRESHOLD4 );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD5, "%d", PARM_RTS_THRESHOLD5 );
- DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD6, "%d", PARM_RTS_THRESHOLD6 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE1, "%d", PARM_TX_RATE1 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE2, "%d", PARM_TX_RATE2 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE3, "%d", PARM_TX_RATE3 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE4, "%d", PARM_TX_RATE4 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE5, "%d", PARM_TX_RATE5 );
- DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE6, "%d", PARM_TX_RATE6 );
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD1, "%d", PARM_RTS_THRESHOLD1);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD2, "%d", PARM_RTS_THRESHOLD2);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD3, "%d", PARM_RTS_THRESHOLD3);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD4, "%d", PARM_RTS_THRESHOLD4);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD5, "%d", PARM_RTS_THRESHOLD5);
+ DBG_PARAM(DbgInfo, PARM_NAME_RTS_THRESHOLD6, "%d", PARM_RTS_THRESHOLD6);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE1, "%d", PARM_TX_RATE1);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE2, "%d", PARM_TX_RATE2);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE3, "%d", PARM_TX_RATE3);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE4, "%d", PARM_TX_RATE4);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE5, "%d", PARM_TX_RATE5);
+ DBG_PARAM(DbgInfo, PARM_NAME_TX_RATE6, "%d", PARM_TX_RATE6);
DBG_PARAM(DbgInfo, PARM_NAME_WDS_ADDRESS1, "\"%pM\"",
PARM_WDS_ADDRESS1);
DBG_PARAM(DbgInfo, PARM_NAME_WDS_ADDRESS2, "\"%pM\"",
@@ -521,28 +508,28 @@ int wl_insert( struct net_device *dev )
#endif /* USE_WDS */
#endif /* HCF_AP */
- VALID_PARAM( !PARM_DESIRED_SSID || ( strlen( PARM_DESIRED_SSID ) <= PARM_MAX_NAME_LEN ));
- VALID_PARAM( !PARM_OWN_SSID || ( strlen( PARM_OWN_SSID ) <= PARM_MAX_NAME_LEN ));
- VALID_PARAM(( PARM_OWN_CHANNEL <= PARM_MAX_OWN_CHANNEL ));
- VALID_PARAM(( PARM_SYSTEM_SCALE >= PARM_MIN_SYSTEM_SCALE ) && ( PARM_SYSTEM_SCALE <= PARM_MAX_SYSTEM_SCALE ));
- VALID_PARAM(( PARM_TX_RATE >= PARM_MIN_TX_RATE ) && ( PARM_TX_RATE <= PARM_MAX_TX_RATE ));
- VALID_PARAM(( PARM_RTS_THRESHOLD <= PARM_MAX_RTS_THRESHOLD ));
- VALID_PARAM( !PARM_MICROWAVE_ROBUSTNESS || strchr( "NnYy", PARM_MICROWAVE_ROBUSTNESS[0] ) != NULL );
- VALID_PARAM( !PARM_OWN_NAME || ( strlen( PARM_NAME_OWN_NAME ) <= PARM_MAX_NAME_LEN ));
- VALID_PARAM(( PARM_ENABLE_ENCRYPTION <= PARM_MAX_ENABLE_ENCRYPTION ));
- VALID_PARAM( is_valid_key_string( PARM_KEY1 ));
- VALID_PARAM( is_valid_key_string( PARM_KEY2 ));
- VALID_PARAM( is_valid_key_string( PARM_KEY3 ));
- VALID_PARAM( is_valid_key_string( PARM_KEY4 ));
- VALID_PARAM(( PARM_TX_KEY >= PARM_MIN_TX_KEY ) && ( PARM_TX_KEY <= PARM_MAX_TX_KEY ));
-
- VALID_PARAM(( PARM_MULTICAST_RATE >= PARM_MIN_MULTICAST_RATE ) &&
- ( PARM_MULTICAST_RATE <= PARM_MAX_MULTICAST_RATE ));
-
- VALID_PARAM( !PARM_DOWNLOAD_FIRMWARE || ( strlen( PARM_DOWNLOAD_FIRMWARE ) <= 255 /*;?*/ ));
- VALID_PARAM(( PARM_AUTH_KEY_MGMT_SUITE < PARM_MAX_AUTH_KEY_MGMT_SUITE ));
-
- VALID_PARAM( !PARM_LOAD_BALANCING || strchr( "NnYy", PARM_LOAD_BALANCING[0] ) != NULL );
+ VALID_PARAM(!PARM_DESIRED_SSID || (strlen(PARM_DESIRED_SSID) <= PARM_MAX_NAME_LEN));
+ VALID_PARAM(!PARM_OWN_SSID || (strlen(PARM_OWN_SSID) <= PARM_MAX_NAME_LEN));
+ VALID_PARAM((PARM_OWN_CHANNEL <= PARM_MAX_OWN_CHANNEL));
+ VALID_PARAM((PARM_SYSTEM_SCALE >= PARM_MIN_SYSTEM_SCALE) && (PARM_SYSTEM_SCALE <= PARM_MAX_SYSTEM_SCALE));
+ VALID_PARAM((PARM_TX_RATE >= PARM_MIN_TX_RATE) && (PARM_TX_RATE <= PARM_MAX_TX_RATE));
+ VALID_PARAM((PARM_RTS_THRESHOLD <= PARM_MAX_RTS_THRESHOLD));
+ VALID_PARAM(!PARM_MICROWAVE_ROBUSTNESS || strchr("NnYy", PARM_MICROWAVE_ROBUSTNESS[0]) != NULL);
+ VALID_PARAM(!PARM_OWN_NAME || (strlen(PARM_NAME_OWN_NAME) <= PARM_MAX_NAME_LEN));
+ VALID_PARAM((PARM_ENABLE_ENCRYPTION <= PARM_MAX_ENABLE_ENCRYPTION));
+ VALID_PARAM(is_valid_key_string(PARM_KEY1));
+ VALID_PARAM(is_valid_key_string(PARM_KEY2));
+ VALID_PARAM(is_valid_key_string(PARM_KEY3));
+ VALID_PARAM(is_valid_key_string(PARM_KEY4));
+ VALID_PARAM((PARM_TX_KEY >= PARM_MIN_TX_KEY) && (PARM_TX_KEY <= PARM_MAX_TX_KEY));
+
+ VALID_PARAM((PARM_MULTICAST_RATE >= PARM_MIN_MULTICAST_RATE) &&
+ (PARM_MULTICAST_RATE <= PARM_MAX_MULTICAST_RATE));
+
+ VALID_PARAM(!PARM_DOWNLOAD_FIRMWARE || (strlen(PARM_DOWNLOAD_FIRMWARE) <= 255 /*;?*/));
+ VALID_PARAM((PARM_AUTH_KEY_MGMT_SUITE < PARM_MAX_AUTH_KEY_MGMT_SUITE));
+
+ VALID_PARAM(!PARM_LOAD_BALANCING || strchr("NnYy", PARM_LOAD_BALANCING[0]) != NULL);
VALID_PARAM( !PARM_MEDIUM_DISTRIBUTION || strchr( "NnYy", PARM_MEDIUM_DISTRIBUTION[0] ) != NULL );
VALID_PARAM(( PARM_TX_POW_LEVEL <= PARM_MAX_TX_POW_LEVEL ));
@@ -601,33 +588,25 @@ int wl_insert( struct net_device *dev )
lp->MulticastRate[0] = PARM_DEFAULT_MULTICAST_RATE_2GHZ;
lp->MulticastRate[1] = PARM_DEFAULT_MULTICAST_RATE_5GHZ;
- if ( strchr( "Yy", PARM_MICROWAVE_ROBUSTNESS[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_MICROWAVE_ROBUSTNESS[0] ) != NULL )
lp->MicrowaveRobustness = 1;
- } else {
+ else
lp->MicrowaveRobustness = 0;
- }
- if ( PARM_DESIRED_SSID && ( strlen( PARM_DESIRED_SSID ) <= HCF_MAX_NAME_LEN )) {
+ if ( PARM_DESIRED_SSID && ( strlen( PARM_DESIRED_SSID ) <= HCF_MAX_NAME_LEN ))
strcpy( lp->NetworkName, PARM_DESIRED_SSID );
- }
- if ( PARM_OWN_SSID && ( strlen( PARM_OWN_SSID ) <= HCF_MAX_NAME_LEN )) {
+ if ( PARM_OWN_SSID && ( strlen( PARM_OWN_SSID ) <= HCF_MAX_NAME_LEN ))
strcpy( lp->NetworkName, PARM_OWN_SSID );
- }
- if ( PARM_OWN_NAME && ( strlen( PARM_OWN_NAME ) <= HCF_MAX_NAME_LEN )) {
+ if ( PARM_OWN_NAME && ( strlen( PARM_OWN_NAME ) <= HCF_MAX_NAME_LEN ))
strcpy( lp->StationName, PARM_OWN_NAME );
- }
lp->EnableEncryption = PARM_ENABLE_ENCRYPTION;
- if ( PARM_KEY1 && ( strlen( PARM_KEY1 ) <= MAX_KEY_LEN )) {
+ if ( PARM_KEY1 && ( strlen( PARM_KEY1 ) <= MAX_KEY_LEN ))
strcpy( lp->Key1, PARM_KEY1 );
- }
- if ( PARM_KEY2 && ( strlen( PARM_KEY2 ) <= MAX_KEY_LEN )) {
+ if ( PARM_KEY2 && ( strlen( PARM_KEY2 ) <= MAX_KEY_LEN ))
strcpy( lp->Key2, PARM_KEY2 );
- }
- if ( PARM_KEY3 && ( strlen( PARM_KEY3 ) <= MAX_KEY_LEN )) {
+ if ( PARM_KEY3 && ( strlen( PARM_KEY3 ) <= MAX_KEY_LEN ))
strcpy( lp->Key3, PARM_KEY3 );
- }
- if ( PARM_KEY4 && ( strlen( PARM_KEY4 ) <= MAX_KEY_LEN )) {
+ if ( PARM_KEY4 && ( strlen( PARM_KEY4 ) <= MAX_KEY_LEN ))
strcpy( lp->Key4, PARM_KEY4 );
- }
lp->TransmitKeyID = PARM_TX_KEY;
@@ -639,17 +618,15 @@ int wl_insert( struct net_device *dev )
lp->DownloadFirmware = 1 ; //;?to be upgraded PARM_DOWNLOAD_FIRMWARE;
lp->AuthKeyMgmtSuite = PARM_AUTH_KEY_MGMT_SUITE;
- if ( strchr( "Yy", PARM_LOAD_BALANCING[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_LOAD_BALANCING[0] ) != NULL )
lp->loadBalancing = 1;
- } else {
+ else
lp->loadBalancing = 0;
- }
- if ( strchr( "Yy", PARM_MEDIUM_DISTRIBUTION[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_MEDIUM_DISTRIBUTION[0] ) != NULL )
lp->mediumDistribution = 1;
- } else {
+ else
lp->mediumDistribution = 0;
- }
lp->txPowLevel = PARM_TX_POW_LEVEL;
@@ -665,24 +642,20 @@ int wl_insert( struct net_device *dev )
lp->atimWindow = PARM_OWN_ATIM_WINDOW;
lp->holdoverDuration = PARM_PM_HOLDOVER_DURATION;
lp->PMEnabled = PARM_PM_ENABLED; //;?
- if ( strchr( "Yy", PARM_CREATE_IBSS[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_CREATE_IBSS[0] ) != NULL )
lp->CreateIBSS = 1;
- } else {
+ else
lp->CreateIBSS = 0;
- }
- if ( strchr( "Nn", PARM_MULTICAST_RX[0] ) != NULL ) {
+ if ( strchr( "Nn", PARM_MULTICAST_RX[0] ) != NULL )
lp->MulticastReceive = 0;
- } else {
+ else
lp->MulticastReceive = 1;
- }
- if ( strchr( "Yy", PARM_PROMISCUOUS_MODE[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_PROMISCUOUS_MODE[0] ) != NULL )
lp->promiscuousMode = 1;
- } else {
+ else
lp->promiscuousMode = 0;
- }
- for( i = 0; i < ETH_ALEN; i++ ) {
- lp->MACAddress[i] = PARM_NETWORK_ADDR[i];
- }
+ for( i = 0; i < ETH_ALEN; i++ )
+ lp->MACAddress[i] = PARM_NETWORK_ADDR[i];
lp->connectionControl = PARM_CONNECTION_CONTROL;
@@ -691,26 +664,22 @@ int wl_insert( struct net_device *dev )
//;?should we restore this to allow smaller memory footprint
lp->DTIMPeriod = PARM_OWN_DTIM_PERIOD;
- if ( strchr( "Yy", PARM_REJECT_ANY[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_REJECT_ANY[0] ) != NULL )
lp->RejectAny = 1;
- } else {
+ else
lp->RejectAny = 0;
- }
- if ( strchr( "Nn", PARM_EXCLUDE_UNENCRYPTED[0] ) != NULL ) {
+ if ( strchr( "Nn", PARM_EXCLUDE_UNENCRYPTED[0] ) != NULL )
lp->ExcludeUnencrypted = 0;
- } else {
+ else
lp->ExcludeUnencrypted = 1;
- }
- if ( strchr( "Yy", PARM_MULTICAST_PM_BUFFERING[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_MULTICAST_PM_BUFFERING[0] ) != NULL )
lp->multicastPMBuffering = 1;
- } else {
+ else
lp->multicastPMBuffering = 0;
- }
- if ( strchr( "Yy", PARM_INTRA_BSS_RELAY[0] ) != NULL ) {
+ if ( strchr( "Yy", PARM_INTRA_BSS_RELAY[0] ) != NULL )
lp->intraBSSRelay = 1;
- } else {
+ else
lp->intraBSSRelay = 0;
- }
lp->ownBeaconInterval = PARM_OWN_BEACON_INTERVAL;
lp->coexistence = PARM_COEXISTENCE;
@@ -750,11 +719,10 @@ int wl_insert( struct net_device *dev )
#endif /* USE_WDS */
#endif /* HCF_AP */
#ifdef USE_RTS
- if ( strchr( "Yy", useRTS[0] ) != NULL ) {
+ if ( strchr( "Yy", useRTS[0] ) != NULL )
lp->useRTS = 1;
- } else {
+ else
lp->useRTS = 0;
- }
#endif /* USE_RTS */
@@ -1560,7 +1528,8 @@ int wl_put_ltv( struct wl_private *lp )
hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord ));
/* Own Name (Station Nickname) */
- if (( len = ( strlen( lp->StationName ) + 1 ) & ~0x01 ) != 0 ) {
+ len = (strlen(lp->StationName) + 1) & ~0x01;
+ if (len != 0) {
//DBG_TRACE( DbgInfo, "CFG_CNF_OWN_NAME : %s\n",
// lp->StationName );
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 965b1c0a4753..77e4be21e44b 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -68,31 +68,14 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
-// #include <linux/sched.h>
-// #include <linux/ptrace.h>
-// #include <linux/slab.h>
-// #include <linux/ctype.h>
-// #include <linux/string.h>
-//#include <linux/timer.h>
-// #include <linux/interrupt.h>
-// #include <linux/in.h>
-// #include <linux/delay.h>
-// #include <linux/skbuff.h>
-// #include <asm/io.h>
-// // #include <asm/bitops.h>
-
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
-// #include <linux/skbuff.h>
-// #include <linux/if_arp.h>
-// #include <linux/ioport.h>
#include <debug.h>
#include <hcf.h>
#include <dhf.h>
-// #include <hcfdef.h>
#include <wl_if.h>
#include <wl_internal.h>
@@ -104,16 +87,15 @@
#ifdef USE_PROFILE
#include <wl_profile.h>
-#endif /* USE_PROFILE */
+#endif /* USE_PROFILE */
#ifdef BUS_PCMCIA
#include <wl_cs.h>
-#endif /* BUS_PCMCIA */
+#endif /* BUS_PCMCIA */
#ifdef BUS_PCI
#include <wl_pci.h>
-#endif /* BUS_PCI */
-
+#endif /* BUS_PCI */
#if HCF_ENCAP
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN - 8)
@@ -121,17 +103,15 @@
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN)
#endif
-//static int mtu = MTU_MAX;
-//MODULE_PARM(mtu, "i");
-//MODULE_PARM_DESC(mtu, "MTU");
-
/*******************************************************************************
* macros
******************************************************************************/
#define BLOCK_INPUT(buf, len) \
- desc->buf_addr = buf; \
- desc->BUF_SIZE = len; \
- status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0)
+ do { \
+ desc->buf_addr = buf; \
+ desc->BUF_SIZE = len; \
+ status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0); \
+ } while (0)
#define BLOCK_INPUT_DMA(buf, len) memcpy( buf, desc_next->buf_addr, pktlen )
@@ -158,20 +138,12 @@
* errno value otherwise
*
******************************************************************************/
-int wl_init( struct net_device *dev )
+int wl_init(struct net_device *dev)
{
-// unsigned long flags;
-// struct wl_private *lp = wl_priv(dev);
-
- DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ return 0;
+} /* wl_init */
- /* Nothing to do, but grab the spinlock anyway just in case we ever need
- this routine */
-// wl_lock( lp, &flags );
-// wl_unlock( lp, &flags );
-
- return 0;
-} // wl_init
/*============================================================================*/
/*******************************************************************************
@@ -193,17 +165,20 @@ int wl_init( struct net_device *dev )
* errno otherwise
*
******************************************************************************/
-int wl_config( struct net_device *dev, struct ifmap *map )
+int wl_config(struct net_device *dev, struct ifmap *map)
{
- DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
- DBG_PARAM( DbgInfo, "map", "0x%p", map );
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "map", "0x%p", map);
+
+ /*
+ * The only thing we care about here is a port change.
+ * Since this not needed, ignore the request.
+ */
+ DBG_TRACE(DbgInfo, "%s: %s called.\n", dev->name, __func__);
- /* The only thing we care about here is a port change. Since this not needed,
- ignore the request. */
- DBG_TRACE(DbgInfo, "%s: %s called.\n", dev->name, __func__);
+ return 0;
+} /* wl_config */
- return 0;
-} // wl_config
/*============================================================================*/
/*******************************************************************************
@@ -224,48 +199,47 @@ int wl_config( struct net_device *dev, struct ifmap *map )
* statistics.
*
******************************************************************************/
-struct net_device_stats *wl_stats( struct net_device *dev )
+struct net_device_stats *wl_stats(struct net_device *dev)
{
#ifdef USE_WDS
- int count;
-#endif /* USE_WDS */
- unsigned long flags;
- struct net_device_stats *pStats;
- struct wl_private *lp = wl_priv(dev);
+ int count;
+#endif /* USE_WDS */
+ unsigned long flags;
+ struct net_device_stats *pStats;
+ struct wl_private *lp = wl_priv(dev);
- //DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
+ /*DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev ); */
- pStats = NULL;
+ pStats = NULL;
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- wl_unlock( lp, &flags );
- return NULL;
- }
-#endif /* USE_RTS */
+ if (lp->useRTS == 1) {
+ wl_unlock(lp, &flags);
+ return NULL;
+ }
+#endif /* USE_RTS */
- /* Return the statistics for the appropriate device */
+ /* Return the statistics for the appropriate device */
#ifdef USE_WDS
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( dev == lp->wds_port[count].dev ) {
- pStats = &( lp->wds_port[count].stats );
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (dev == lp->wds_port[count].dev)
+ pStats = &(lp->wds_port[count].stats);
}
- }
-#endif /* USE_WDS */
+#endif /* USE_WDS */
+
+ /* If pStats is still NULL, then the device is not a WDS port */
+ if (pStats == NULL)
+ pStats = &(lp->stats);
- /* If pStats is still NULL, then the device is not a WDS port */
- if( pStats == NULL ) {
- pStats = &( lp->stats );
- }
+ wl_unlock(lp, &flags);
- wl_unlock( lp, &flags );
+ return pStats;
+} /* wl_stats */
- return pStats;
-} // wl_stats
/*============================================================================*/
/*******************************************************************************
@@ -288,75 +262,77 @@ struct net_device_stats *wl_stats( struct net_device *dev )
******************************************************************************/
int wl_open(struct net_device *dev)
{
- int status = HCF_SUCCESS;
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
+ int status = HCF_SUCCESS;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_TRACE( DbgInfo, "Skipping device open, in RTS mode\n" );
- wl_unlock( lp, &flags );
- return -EIO;
- }
-#endif /* USE_RTS */
+ if (lp->useRTS == 1) {
+ DBG_TRACE(DbgInfo, "Skipping device open, in RTS mode\n");
+ wl_unlock(lp, &flags);
+ return -EIO;
+ }
+#endif /* USE_RTS */
#ifdef USE_PROFILE
- parse_config( dev );
+ parse_config(dev);
#endif
- if( lp->portState == WVLAN_PORT_STATE_DISABLED ) {
- DBG_TRACE( DbgInfo, "Enabling Port 0\n" );
- status = wl_enable( lp );
-
- if( status != HCF_SUCCESS ) {
- DBG_TRACE( DbgInfo, "Enable port 0 failed: 0x%x\n", status );
- }
- }
-
- // Holding the lock too long, make a gap to allow other processes
- wl_unlock(lp, &flags);
- wl_lock( lp, &flags );
-
- if ( strlen( lp->fw_image_filename ) ) {
- DBG_TRACE( DbgInfo, ";???? Kludgy way to force a download\n" );
- status = wl_go( lp );
- } else {
- status = wl_apply( lp );
- }
-
- // Holding the lock too long, make a gap to allow other processes
- wl_unlock(lp, &flags);
- wl_lock( lp, &flags );
-
- if( status != HCF_SUCCESS ) {
- // Unsuccessful, try reset of the card to recover
- status = wl_reset( dev );
- }
-
- // Holding the lock too long, make a gap to allow other processes
- wl_unlock(lp, &flags);
- wl_lock( lp, &flags );
-
- if( status == HCF_SUCCESS ) {
- netif_carrier_on( dev );
- WL_WDS_NETIF_CARRIER_ON( lp );
-
- lp->is_handling_int = WL_HANDLING_INT; // Start handling interrupts
- wl_act_int_on( lp );
-
- netif_start_queue( dev );
- WL_WDS_NETIF_START_QUEUE( lp );
- } else {
- wl_hcf_error( dev, status ); /* Report the error */
- netif_device_detach( dev ); /* Stop the device and queue */
- }
-
- wl_unlock( lp, &flags );
-
- return status;
-} // wl_open
+ if (lp->portState == WVLAN_PORT_STATE_DISABLED) {
+ DBG_TRACE(DbgInfo, "Enabling Port 0\n");
+ status = wl_enable(lp);
+
+ if (status != HCF_SUCCESS) {
+ DBG_TRACE(DbgInfo, "Enable port 0 failed: 0x%x\n",
+ status);
+ }
+ }
+
+ /* Holding the lock too long, make a gap to allow other processes */
+ wl_unlock(lp, &flags);
+ wl_lock(lp, &flags);
+
+ if (strlen(lp->fw_image_filename)) {
+ DBG_TRACE(DbgInfo, ";???? Kludgy way to force a download\n");
+ status = wl_go(lp);
+ } else {
+ status = wl_apply(lp);
+ }
+
+ /* Holding the lock too long, make a gap to allow other processes */
+ wl_unlock(lp, &flags);
+ wl_lock(lp, &flags);
+
+ /* Unsuccessful, try reset of the card to recover */
+ if (status != HCF_SUCCESS)
+ status = wl_reset(dev);
+
+ /* Holding the lock too long, make a gap to allow other processes */
+ wl_unlock(lp, &flags);
+ wl_lock(lp, &flags);
+
+ if (status == HCF_SUCCESS) {
+ netif_carrier_on(dev);
+ WL_WDS_NETIF_CARRIER_ON(lp);
+
+ /* Start handling interrupts */
+ lp->is_handling_int = WL_HANDLING_INT;
+ wl_act_int_on(lp);
+
+ netif_start_queue(dev);
+ WL_WDS_NETIF_START_QUEUE(lp);
+ } else {
+ wl_hcf_error(dev, status); /* Report the error */
+ netif_device_detach(dev); /* Stop the device and queue */
+ }
+
+ wl_unlock(lp, &flags);
+
+ return status;
+} /* wl_open */
+
/*============================================================================*/
/*******************************************************************************
@@ -377,73 +353,70 @@ int wl_open(struct net_device *dev)
* errno otherwise
*
******************************************************************************/
-int wl_close( struct net_device *dev )
+int wl_close(struct net_device *dev)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
- DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- /* Mark the adapter as busy */
- netif_stop_queue( dev );
- WL_WDS_NETIF_STOP_QUEUE( lp );
+ /* Mark the adapter as busy */
+ netif_stop_queue(dev);
+ WL_WDS_NETIF_STOP_QUEUE(lp);
- netif_carrier_off( dev );
- WL_WDS_NETIF_CARRIER_OFF( lp );
+ netif_carrier_off(dev);
+ WL_WDS_NETIF_CARRIER_OFF(lp);
- /* Shutdown the adapter:
- Disable adapter interrupts
- Stop Tx/Rx
- Update statistics
- Set low power mode
- */
+ /*
+ * Shutdown the adapter:
+ * Disable adapter interrupts
+ * Stop Tx/Rx
+ * Update statistics
+ * Set low power mode
+ */
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
- wl_act_int_off( lp );
- lp->is_handling_int = WL_NOT_HANDLING_INT; // Stop handling interrupts
+ wl_act_int_off(lp);
+ /* Stop handling interrupts */
+ lp->is_handling_int = WL_NOT_HANDLING_INT;
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_TRACE( DbgInfo, "Skipping device close, in RTS mode\n" );
- wl_unlock( lp, &flags );
- return -EIO;
- }
-#endif /* USE_RTS */
+ if (lp->useRTS == 1) {
+ DBG_TRACE(DbgInfo, "Skipping device close, in RTS mode\n");
+ wl_unlock(lp, &flags);
+ return -EIO;
+ }
+#endif /* USE_RTS */
- /* Disable the ports */
- wl_disable( lp );
+ /* Disable the ports */
+ wl_disable(lp);
- wl_unlock( lp, &flags );
+ wl_unlock(lp, &flags);
+
+ return 0;
+} /* wl_close */
- return 0;
-} // wl_close
/*============================================================================*/
static void wl_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION_STR, sizeof(info->version));
-// strlcpy(info.fw_version, priv->fw_name,
-// sizeof(info.fw_version));
-
- if (dev->dev.parent) {
- dev_set_name(dev->dev.parent, "%s", info->bus_info);
- //strlcpy(info->bus_info, dev->dev.parent->bus_id,
- // sizeof(info->bus_info));
- } else {
- snprintf(info->bus_info, sizeof(info->bus_info),
- "PCMCIA FIXME");
-// "PCMCIA 0x%lx", priv->hw.iobase);
- }
-} // wl_get_drvinfo
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION_STR, sizeof(info->version));
+
+ if (dev->dev.parent) {
+ dev_set_name(dev->dev.parent, "%s", info->bus_info);
+ } else {
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA FIXME");
+ }
+} /* wl_get_drvinfo */
static struct ethtool_ops wl_ethtool_ops = {
- .get_drvinfo = wl_get_drvinfo,
- .get_link = ethtool_op_get_link,
+ .get_drvinfo = wl_get_drvinfo,
+ .get_link = ethtool_op_get_link,
};
-
/*******************************************************************************
* wl_ioctl()
*******************************************************************************
@@ -464,81 +437,86 @@ static struct ethtool_ops wl_ethtool_ops = {
* errno value otherwise
*
******************************************************************************/
-int wl_ioctl( struct net_device *dev, struct ifreq *rq, int cmd )
+int wl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
- int ret = 0;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
+ int ret = 0;
- DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- DBG_PARAM(DbgInfo, "rq", "0x%p", rq);
- DBG_PARAM(DbgInfo, "cmd", "0x%04x", cmd);
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "rq", "0x%p", rq);
+ DBG_PARAM(DbgInfo, "cmd", "0x%04x", cmd);
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
- wl_act_int_off( lp );
+ wl_act_int_off(lp);
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- /* Handle any RTS IOCTL here */
- if( cmd == WL_IOCTL_RTS ) {
- DBG_TRACE( DbgInfo, "IOCTL: WL_IOCTL_RTS\n" );
- ret = wvlan_rts( (struct rtsreq *)rq, dev->base_addr );
- } else {
- DBG_TRACE( DbgInfo, "IOCTL not supported in RTS mode: 0x%X\n", cmd );
- ret = -EOPNOTSUPP;
- }
+ if (lp->useRTS == 1) {
+ /* Handle any RTS IOCTL here */
+ if (cmd == WL_IOCTL_RTS) {
+ DBG_TRACE(DbgInfo, "IOCTL: WL_IOCTL_RTS\n");
+ ret = wvlan_rts((struct rtsreq *)rq, dev->base_addr);
+ } else {
+ DBG_TRACE(DbgInfo,
+ "IOCTL not supported in RTS mode: 0x%X\n",
+ cmd);
+ ret = -EOPNOTSUPP;
+ }
- goto out_act_int_on_unlock;
- }
-#endif /* USE_RTS */
+ goto out_act_int_on_unlock;
+ }
+#endif /* USE_RTS */
- /* Only handle UIL IOCTL requests when the UIL has the system blocked. */
- if( !(( lp->flags & WVLAN2_UIL_BUSY ) && ( cmd != WVLAN2_IOCTL_UIL ))) {
+ /* Only handle UIL IOCTL requests when the UIL has the system blocked. */
+ if (!((lp->flags & WVLAN2_UIL_BUSY) && (cmd != WVLAN2_IOCTL_UIL))) {
#ifdef USE_UIL
- struct uilreq *urq = (struct uilreq *)rq;
+ struct uilreq *urq = (struct uilreq *)rq;
#endif /* USE_UIL */
- switch( cmd ) {
- // ================== Private IOCTLs (up to 16) ==================
+ switch (cmd) {
+ /* ================== Private IOCTLs (up to 16) ================== */
#ifdef USE_UIL
- case WVLAN2_IOCTL_UIL:
- DBG_TRACE( DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL\n" );
- ret = wvlan_uil( urq, lp );
- break;
-#endif /* USE_UIL */
-
- default:
- DBG_TRACE(DbgInfo, "IOCTL CODE NOT SUPPORTED: 0x%X\n", cmd );
- ret = -EOPNOTSUPP;
- break;
+ case WVLAN2_IOCTL_UIL:
+ DBG_TRACE(DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL\n");
+ ret = wvlan_uil(urq, lp);
+ break;
+#endif /* USE_UIL */
+
+ default:
+ DBG_TRACE(DbgInfo, "IOCTL CODE NOT SUPPORTED: 0x%X\n",
+ cmd);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ } else {
+ DBG_WARNING(DbgInfo,
+ "DEVICE IS BUSY, CANNOT PROCESS REQUEST\n");
+ ret = -EBUSY;
}
- } else {
- DBG_WARNING( DbgInfo, "DEVICE IS BUSY, CANNOT PROCESS REQUEST\n" );
- ret = -EBUSY;
- }
#ifdef USE_RTS
out_act_int_on_unlock:
-#endif /* USE_RTS */
- wl_act_int_on( lp );
+#endif /* USE_RTS */
+ wl_act_int_on(lp);
- wl_unlock( lp, &flags );
+ wl_unlock(lp, &flags);
+
+ return ret;
+} /* wl_ioctl */
- return ret;
-} // wl_ioctl
/*============================================================================*/
#ifdef CONFIG_NET_POLL_CONTROLLER
-void wl_poll(struct net_device *dev)
+static void wl_poll(struct net_device *dev)
{
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
- struct pt_regs regs;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
+ struct pt_regs regs;
- wl_lock( lp, &flags );
- wl_isr(dev->irq, dev, &regs);
- wl_unlock( lp, &flags );
+ wl_lock(lp, &flags);
+ wl_isr(dev->irq, dev, &regs);
+ wl_unlock(lp, &flags);
}
#endif
@@ -559,53 +537,54 @@ void wl_poll(struct net_device *dev)
* N/A
*
******************************************************************************/
-void wl_tx_timeout( struct net_device *dev )
+void wl_tx_timeout(struct net_device *dev)
{
#ifdef USE_WDS
- int count;
-#endif /* USE_WDS */
- unsigned long flags;
- struct wl_private *lp = wl_priv(dev);
- struct net_device_stats *pStats = NULL;
+ int count;
+#endif /* USE_WDS */
+ unsigned long flags;
+ struct wl_private *lp = wl_priv(dev);
+ struct net_device_stats *pStats = NULL;
- DBG_WARNING( DbgInfo, "%s: Transmit timeout.\n", dev->name );
+ DBG_WARNING(DbgInfo, "%s: Transmit timeout.\n", dev->name);
- wl_lock( lp, &flags );
+ wl_lock(lp, &flags);
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_TRACE( DbgInfo, "Skipping tx_timeout handler, in RTS mode\n" );
- wl_unlock( lp, &flags );
- return;
- }
-#endif /* USE_RTS */
-
- /* Figure out which device (the "root" device or WDS port) this timeout
- is for */
+ if (lp->useRTS == 1) {
+ DBG_TRACE(DbgInfo,
+ "Skipping tx_timeout handler, in RTS mode\n");
+ wl_unlock(lp, &flags);
+ return;
+ }
+#endif /* USE_RTS */
+
+ /* Figure out which device (the "root" device or WDS port) this timeout
+ is for */
#ifdef USE_WDS
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( dev == lp->wds_port[count].dev ) {
- pStats = &( lp->wds_port[count].stats );
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (dev == lp->wds_port[count].dev) {
+ pStats = &(lp->wds_port[count].stats);
- /* Break the loop so that we can use the counter to access WDS
- information in the private structure */
- break;
+ /* Break the loop so that we can use the counter to access WDS
+ information in the private structure */
+ break;
+ }
}
- }
-#endif /* USE_WDS */
+#endif /* USE_WDS */
+
+ /* If pStats is still NULL, then the device is not a WDS port */
+ if (pStats == NULL)
+ pStats = &(lp->stats);
- /* If pStats is still NULL, then the device is not a WDS port */
- if( pStats == NULL ) {
- pStats = &( lp->stats );
- }
+ /* Accumulate the timeout error */
+ pStats->tx_errors++;
- /* Accumulate the timeout error */
- pStats->tx_errors++;
+ wl_unlock(lp, &flags);
+} /* wl_tx_timeout */
- wl_unlock( lp, &flags );
-} // wl_tx_timeout
/*============================================================================*/
/*******************************************************************************
@@ -626,103 +605,105 @@ void wl_tx_timeout( struct net_device *dev )
* 1 on error
*
******************************************************************************/
-int wl_send( struct wl_private *lp )
+int wl_send(struct wl_private *lp)
{
- int status;
- DESC_STRCT *desc;
- WVLAN_LFRAME *txF = NULL;
- struct list_head *element;
- int len;
+ int status;
+ DESC_STRCT *desc;
+ WVLAN_LFRAME *txF = NULL;
+ struct list_head *element;
+ int len;
/*------------------------------------------------------------------------*/
- if( lp == NULL ) {
- DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
- return FALSE;
- }
- if( lp->dev == NULL ) {
- DBG_ERROR( DbgInfo, "net_device struct in wl_private is NULL\n" );
- return FALSE;
- }
-
- /* Check for the availability of FIDs; if none are available, don't take any
- frames off the txQ */
- if( lp->hcfCtx.IFB_RscInd == 0 ) {
- return FALSE;
- }
-
- /* Reclaim the TxQ Elements and place them back on the free queue */
- if( !list_empty( &( lp->txQ[0] ))) {
- element = lp->txQ[0].next;
-
- txF = (WVLAN_LFRAME * )list_entry( element, WVLAN_LFRAME, node );
- if( txF != NULL ) {
- lp->txF.skb = txF->frame.skb;
- lp->txF.port = txF->frame.port;
-
- txF->frame.skb = NULL;
- txF->frame.port = 0;
-
- list_del( &( txF->node ));
- list_add( element, &( lp->txFree ));
-
- lp->txQ_count--;
-
- if( lp->txQ_count < TX_Q_LOW_WATER_MARK ) {
- if( lp->netif_queue_on == FALSE ) {
- DBG_TX( DbgInfo, "Kickstarting Q: %d\n", lp->txQ_count );
- netif_wake_queue( lp->dev );
- WL_WDS_NETIF_WAKE_QUEUE( lp );
- lp->netif_queue_on = TRUE;
- }
- }
- }
- }
-
- if( lp->txF.skb == NULL ) {
- return FALSE;
- }
-
- /* If the device has resources (FIDs) available, then Tx the packet */
- /* Format the TxRequest and send it to the adapter */
- len = lp->txF.skb->len < ETH_ZLEN ? ETH_ZLEN : lp->txF.skb->len;
-
- desc = &( lp->desc_tx );
- desc->buf_addr = lp->txF.skb->data;
- desc->BUF_CNT = len;
- desc->next_desc_addr = NULL;
-
- status = hcf_send_msg( &( lp->hcfCtx ), desc, lp->txF.port );
-
- if( status == HCF_SUCCESS ) {
- lp->dev->trans_start = jiffies;
-
- DBG_TX( DbgInfo, "Transmit...\n" );
-
- if( lp->txF.port == HCF_PORT_0 ) {
- lp->stats.tx_packets++;
- lp->stats.tx_bytes += lp->txF.skb->len;
- }
+ if (lp == NULL) {
+ DBG_ERROR(DbgInfo, "Private adapter struct is NULL\n");
+ return FALSE;
+ }
+ if (lp->dev == NULL) {
+ DBG_ERROR(DbgInfo, "net_device struct in wl_private is NULL\n");
+ return FALSE;
+ }
+
+ /*
+ * Check for the availability of FIDs; if none are available,
+ * don't take any frames off the txQ
+ */
+ if (lp->hcfCtx.IFB_RscInd == 0)
+ return FALSE;
+
+ /* Reclaim the TxQ Elements and place them back on the free queue */
+ if (!list_empty(&(lp->txQ[0]))) {
+ element = lp->txQ[0].next;
+
+ txF = (WVLAN_LFRAME *) list_entry(element, WVLAN_LFRAME, node);
+ if (txF != NULL) {
+ lp->txF.skb = txF->frame.skb;
+ lp->txF.port = txF->frame.port;
+
+ txF->frame.skb = NULL;
+ txF->frame.port = 0;
+
+ list_del(&(txF->node));
+ list_add(element, &(lp->txFree));
+
+ lp->txQ_count--;
+
+ if (lp->txQ_count < TX_Q_LOW_WATER_MARK) {
+ if (lp->netif_queue_on == FALSE) {
+ DBG_TX(DbgInfo, "Kickstarting Q: %d\n",
+ lp->txQ_count);
+ netif_wake_queue(lp->dev);
+ WL_WDS_NETIF_WAKE_QUEUE(lp);
+ lp->netif_queue_on = TRUE;
+ }
+ }
+ }
+ }
+
+ if (lp->txF.skb == NULL)
+ return FALSE;
+
+ /* If the device has resources (FIDs) available, then Tx the packet */
+ /* Format the TxRequest and send it to the adapter */
+ len = lp->txF.skb->len < ETH_ZLEN ? ETH_ZLEN : lp->txF.skb->len;
+
+ desc = &(lp->desc_tx);
+ desc->buf_addr = lp->txF.skb->data;
+ desc->BUF_CNT = len;
+ desc->next_desc_addr = NULL;
+
+ status = hcf_send_msg(&(lp->hcfCtx), desc, lp->txF.port);
+
+ if (status == HCF_SUCCESS) {
+ lp->dev->trans_start = jiffies;
+
+ DBG_TX(DbgInfo, "Transmit...\n");
+ if (lp->txF.port == HCF_PORT_0) {
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += lp->txF.skb->len;
+ }
#ifdef USE_WDS
- else
- {
- lp->wds_port[(( lp->txF.port >> 8 ) - 1)].stats.tx_packets++;
- lp->wds_port[(( lp->txF.port >> 8 ) - 1)].stats.tx_bytes += lp->txF.skb->len;
- }
+ else {
+ lp->wds_port[((lp->txF.port >> 8) -
+ 1)].stats.tx_packets++;
+ lp->wds_port[((lp->txF.port >> 8) -
+ 1)].stats.tx_bytes += lp->txF.skb->len;
+ }
-#endif /* USE_WDS */
+#endif /* USE_WDS */
- /* Free the skb and perform queue cleanup, as the buffer was
- transmitted successfully */
- dev_kfree_skb( lp->txF.skb );
+ /* Free the skb and perform queue cleanup, as the buffer was
+ transmitted successfully */
+ dev_kfree_skb(lp->txF.skb);
- lp->txF.skb = NULL;
- lp->txF.port = 0;
- }
+ lp->txF.skb = NULL;
+ lp->txF.port = 0;
+ }
+
+ return TRUE;
+} /* wl_send */
- return TRUE;
-} // wl_send
/*============================================================================*/
/*******************************************************************************
@@ -744,75 +725,74 @@ int wl_send( struct wl_private *lp )
* 1 on error
*
******************************************************************************/
-int wl_tx( struct sk_buff *skb, struct net_device *dev, int port )
+int wl_tx(struct sk_buff *skb, struct net_device *dev, int port)
{
- unsigned long flags;
- struct wl_private *lp = wl_priv(dev);
- WVLAN_LFRAME *txF = NULL;
- struct list_head *element;
+ unsigned long flags;
+ struct wl_private *lp = wl_priv(dev);
+ WVLAN_LFRAME *txF = NULL;
+ struct list_head *element;
/*------------------------------------------------------------------------*/
- /* Grab the spinlock */
- wl_lock( lp, &flags );
-
- if( lp->flags & WVLAN2_UIL_BUSY ) {
- DBG_WARNING( DbgInfo, "UIL has device blocked\n" );
- /* Start dropping packets here??? */
- wl_unlock( lp, &flags );
- return 1;
- }
+ /* Grab the spinlock */
+ wl_lock(lp, &flags);
+ if (lp->flags & WVLAN2_UIL_BUSY) {
+ DBG_WARNING(DbgInfo, "UIL has device blocked\n");
+ /* Start dropping packets here??? */
+ wl_unlock(lp, &flags);
+ return 1;
+ }
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_PRINT( "RTS: we're getting a Tx...\n" );
- wl_unlock( lp, &flags );
- return 1;
- }
-#endif /* USE_RTS */
-
- if( !lp->use_dma ) {
- /* Get an element from the queue */
- element = lp->txFree.next;
- txF = (WVLAN_LFRAME *)list_entry( element, WVLAN_LFRAME, node );
- if( txF == NULL ) {
- DBG_ERROR( DbgInfo, "Problem with list_entry\n" );
- wl_unlock( lp, &flags );
- return 1;
- }
- /* Fill out the frame */
- txF->frame.skb = skb;
- txF->frame.port = port;
- /* Move the frame to the txQ */
- /* NOTE: Here's where we would do priority queueing */
- list_move(&(txF->node), &(lp->txQ[0]));
-
- lp->txQ_count++;
- if( lp->txQ_count >= DEFAULT_NUM_TX_FRAMES ) {
- DBG_TX( DbgInfo, "Q Full: %d\n", lp->txQ_count );
- if( lp->netif_queue_on == TRUE ) {
- netif_stop_queue( lp->dev );
- WL_WDS_NETIF_STOP_QUEUE( lp );
- lp->netif_queue_on = FALSE;
- }
- }
- }
- wl_act_int_off( lp ); /* Disable Interrupts */
-
- /* Send the data to the hardware using the appropriate method */
+ if (lp->useRTS == 1) {
+ DBG_PRINT("RTS: we're getting a Tx...\n");
+ wl_unlock(lp, &flags);
+ return 1;
+ }
+#endif /* USE_RTS */
+
+ if (!lp->use_dma) {
+ /* Get an element from the queue */
+ element = lp->txFree.next;
+ txF = (WVLAN_LFRAME *) list_entry(element, WVLAN_LFRAME, node);
+ if (txF == NULL) {
+ DBG_ERROR(DbgInfo, "Problem with list_entry\n");
+ wl_unlock(lp, &flags);
+ return 1;
+ }
+ /* Fill out the frame */
+ txF->frame.skb = skb;
+ txF->frame.port = port;
+ /* Move the frame to the txQ */
+ /* NOTE: Here's where we would do priority queueing */
+ list_move(&(txF->node), &(lp->txQ[0]));
+
+ lp->txQ_count++;
+ if (lp->txQ_count >= DEFAULT_NUM_TX_FRAMES) {
+ DBG_TX(DbgInfo, "Q Full: %d\n", lp->txQ_count);
+ if (lp->netif_queue_on == TRUE) {
+ netif_stop_queue(lp->dev);
+ WL_WDS_NETIF_STOP_QUEUE(lp);
+ lp->netif_queue_on = FALSE;
+ }
+ }
+ }
+ wl_act_int_off(lp); /* Disable Interrupts */
+
+ /* Send the data to the hardware using the appropriate method */
#ifdef ENABLE_DMA
- if( lp->use_dma ) {
- wl_send_dma( lp, skb, port );
- }
- else
+ if (lp->use_dma) {
+ wl_send_dma(lp, skb, port);
+ } else
#endif
- {
- wl_send( lp );
- }
- /* Re-enable Interrupts, release the spinlock and return */
- wl_act_int_on( lp );
- wl_unlock( lp, &flags );
- return 0;
-} // wl_tx
+ {
+ wl_send(lp);
+ }
+ /* Re-enable Interrupts, release the spinlock and return */
+ wl_act_int_on(lp);
+ wl_unlock(lp, &flags);
+ return 0;
+} /* wl_tx */
+
/*============================================================================*/
/*******************************************************************************
@@ -835,67 +815,68 @@ int wl_tx( struct sk_buff *skb, struct net_device *dev, int port )
******************************************************************************/
int wl_rx(struct net_device *dev)
{
- int port;
- struct sk_buff *skb;
- struct wl_private *lp = wl_priv(dev);
- int status;
- hcf_16 pktlen;
- hcf_16 hfs_stat;
- DESC_STRCT *desc;
+ int port;
+ struct sk_buff *skb;
+ struct wl_private *lp = wl_priv(dev);
+ int status;
+ hcf_16 pktlen;
+ hcf_16 hfs_stat;
+ DESC_STRCT *desc;
/*------------------------------------------------------------------------*/
- DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- if(!( lp->flags & WVLAN2_UIL_BUSY )) {
+ if (!(lp->flags & WVLAN2_UIL_BUSY)) {
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_PRINT( "RTS: We're getting an Rx...\n" );
- return -EIO;
- }
-#endif /* USE_RTS */
-
- /* Read the HFS_STAT register from the lookahead buffer */
- hfs_stat = (hcf_16)(( lp->lookAheadBuf[HFS_STAT] ) |
- ( lp->lookAheadBuf[HFS_STAT + 1] << 8 ));
-
- /* Make sure the frame isn't bad */
- if(( hfs_stat & HFS_STAT_ERR ) != HCF_SUCCESS ) {
- DBG_WARNING( DbgInfo, "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
- lp->lookAheadBuf[HFS_STAT] );
- return -EIO;
- }
-
- /* Determine what port this packet is for */
- port = ( hfs_stat >> 8 ) & 0x0007;
- DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
-
- pktlen = lp->hcfCtx.IFB_RxLen;
- if (pktlen != 0) {
- skb = ALLOC_SKB(pktlen);
- if (skb != NULL) {
- /* Set the netdev based on the port */
- switch( port ) {
+ if (lp->useRTS == 1) {
+ DBG_PRINT("RTS: We're getting an Rx...\n");
+ return -EIO;
+ }
+#endif /* USE_RTS */
+
+ /* Read the HFS_STAT register from the lookahead buffer */
+ hfs_stat = (hcf_16) ((lp->lookAheadBuf[HFS_STAT]) |
+ (lp->lookAheadBuf[HFS_STAT + 1] << 8));
+
+ /* Make sure the frame isn't bad */
+ if ((hfs_stat & HFS_STAT_ERR) != HCF_SUCCESS) {
+ DBG_WARNING(DbgInfo,
+ "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
+ lp->lookAheadBuf[HFS_STAT]);
+ return -EIO;
+ }
+
+ /* Determine what port this packet is for */
+ port = (hfs_stat >> 8) & 0x0007;
+ DBG_RX(DbgInfo, "Rx frame for port %d\n", port);
+
+ pktlen = lp->hcfCtx.IFB_RxLen;
+ if (pktlen != 0) {
+ skb = ALLOC_SKB(pktlen);
+ if (skb != NULL) {
+ /* Set the netdev based on the port */
+ switch (port) {
#ifdef USE_WDS
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- case 6:
- skb->dev = lp->wds_port[port-1].dev;
- break;
-#endif /* USE_WDS */
-
- case 0:
- default:
- skb->dev = dev;
- break;
- }
-
- desc = &( lp->desc_rx );
-
- desc->next_desc_addr = NULL;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ skb->dev = lp->wds_port[port - 1].dev;
+ break;
+#endif /* USE_WDS */
+
+ case 0:
+ default:
+ skb->dev = dev;
+ break;
+ }
+
+ desc = &(lp->desc_rx);
+
+ desc->next_desc_addr = NULL;
/*
#define BLOCK_INPUT(buf, len) \
@@ -904,67 +885,73 @@ int wl_rx(struct net_device *dev)
status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0)
*/
- GET_PACKET( skb->dev, skb, pktlen );
+ GET_PACKET(skb->dev, skb, pktlen);
- if( status == HCF_SUCCESS ) {
- netif_rx( skb );
+ if (status == HCF_SUCCESS) {
+ netif_rx(skb);
- if( port == 0 ) {
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pktlen;
- }
+ if (port == 0) {
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pktlen;
+ }
#ifdef USE_WDS
- else
- {
- lp->wds_port[port-1].stats.rx_packets++;
- lp->wds_port[port-1].stats.rx_bytes += pktlen;
- }
-#endif /* USE_WDS */
-
- dev->last_rx = jiffies;
+ else {
+ lp->wds_port[port -
+ 1].stats.
+ rx_packets++;
+ lp->wds_port[port -
+ 1].stats.
+ rx_bytes += pktlen;
+ }
+#endif /* USE_WDS */
+
+ dev->last_rx = jiffies;
#ifdef WIRELESS_EXT
#ifdef WIRELESS_SPY
- if( lp->spydata.spy_number > 0 ) {
- char *srcaddr = skb->mac.raw + MAC_ADDR_SIZE;
+ if (lp->spydata.spy_number > 0) {
+ char *srcaddr =
+ skb->mac.raw +
+ MAC_ADDR_SIZE;
- wl_spy_gather( dev, srcaddr );
- }
+ wl_spy_gather(dev, srcaddr);
+ }
#endif /* WIRELESS_SPY */
#endif /* WIRELESS_EXT */
- } else {
- DBG_ERROR( DbgInfo, "Rx request to card FAILED\n" );
+ } else {
+ DBG_ERROR(DbgInfo,
+ "Rx request to card FAILED\n");
- if( port == 0 ) {
- lp->stats.rx_dropped++;
- }
+ if (port == 0)
+ lp->stats.rx_dropped++;
#ifdef USE_WDS
- else
- {
- lp->wds_port[port-1].stats.rx_dropped++;
- }
-#endif /* USE_WDS */
-
- dev_kfree_skb( skb );
- }
- } else {
- DBG_ERROR( DbgInfo, "Could not alloc skb\n" );
-
- if( port == 0 ) {
- lp->stats.rx_dropped++;
- }
+ else {
+ lp->wds_port[port -
+ 1].stats.
+ rx_dropped++;
+ }
+#endif /* USE_WDS */
+
+ dev_kfree_skb(skb);
+ }
+ } else {
+ DBG_ERROR(DbgInfo, "Could not alloc skb\n");
+
+ if (port == 0)
+ lp->stats.rx_dropped++;
#ifdef USE_WDS
- else
- {
- lp->wds_port[port-1].stats.rx_dropped++;
- }
-#endif /* USE_WDS */
- }
- }
- }
-
- return 0;
-} // wl_rx
+ else {
+ lp->wds_port[port -
+ 1].stats.rx_dropped++;
+ }
+#endif /* USE_WDS */
+ }
+ }
+ }
+
+ return 0;
+} /* wl_rx */
+
/*============================================================================*/
/*******************************************************************************
@@ -986,142 +973,159 @@ int wl_rx(struct net_device *dev)
******************************************************************************/
#ifdef NEW_MULTICAST
-void wl_multicast( struct net_device *dev )
+void wl_multicast(struct net_device *dev)
{
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA //;?should we return an error status in AP mode
-//;?seems reasonable that even an AP-only driver could afford this small additional footprint
+#if 1 /* (HCF_TYPE) & HCF_TYPE_STA */
+ /*
+ * should we return an error status in AP mode ?
+ * seems reasonable that even an AP-only driver
+ * could afford this small additional footprint
+ */
- int x;
- struct netdev_hw_addr *ha;
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
+ int x;
+ struct netdev_hw_addr *ha;
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
- DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- if( !wl_adapter_is_open( dev ))
- return;
+ if (!wl_adapter_is_open(dev))
+ return;
#if DBG
- if( DBG_FLAGS( DbgInfo ) & DBG_PARAM_ON ) {
- DBG_PRINT(" flags: %s%s%s\n",
- ( dev->flags & IFF_PROMISC ) ? "Promiscuous " : "",
- ( dev->flags & IFF_MULTICAST ) ? "Multicast " : "",
- ( dev->flags & IFF_ALLMULTI ) ? "All-Multicast" : "" );
+ if (DBG_FLAGS(DbgInfo) & DBG_PARAM_ON) {
+ DBG_PRINT(" flags: %s%s%s\n",
+ (dev->flags & IFF_PROMISC) ? "Promiscuous " : "",
+ (dev->flags & IFF_MULTICAST) ? "Multicast " : "",
+ (dev->flags & IFF_ALLMULTI) ? "All-Multicast" : "");
- DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
+ DBG_PRINT(" mc_count: %d\n", netdev_mc_count(dev));
- netdev_for_each_mc_addr(ha, dev)
- DBG_PRINT(" %pM (%d)\n", ha->addr, dev->addr_len);
- }
+ netdev_for_each_mc_addr(ha, dev)
+ DBG_PRINT(" %pM (%d)\n", ha->addr, dev->addr_len);
+ }
#endif /* DBG */
- if(!( lp->flags & WVLAN2_UIL_BUSY )) {
+ if (!(lp->flags & WVLAN2_UIL_BUSY)) {
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_TRACE( DbgInfo, "Skipping multicast, in RTS mode\n" );
- return;
- }
-#endif /* USE_RTS */
-
- wl_lock( lp, &flags );
- wl_act_int_off( lp );
-
- if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_STA ) {
- if( dev->flags & IFF_PROMISC ) {
- /* Enable promiscuous mode */
- lp->ltvRecord.len = 2;
- lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 1 );
- DBG_PRINT( "Enabling Promiscuous mode (IFF_PROMISC)\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
- }
- else if ((netdev_mc_count(dev) > HCF_MAX_MULTICAST) ||
- ( dev->flags & IFF_ALLMULTI )) {
- /* Shutting off this filter will enable all multicast frames to
- be sent up from the device; however, this is a static RID, so
- a call to wl_apply() is needed */
- lp->ltvRecord.len = 2;
- lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 );
- DBG_PRINT( "Enabling all multicast mode (IFF_ALLMULTI)\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
- wl_apply( lp );
- }
- else if (!netdev_mc_empty(dev)) {
- /* Set the multicast addresses */
- lp->ltvRecord.len = ( netdev_mc_count(dev) * 3 ) + 1;
- lp->ltvRecord.typ = CFG_GROUP_ADDR;
-
- x = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
- ha->addr, ETH_ALEN);
- DBG_PRINT( "Setting multicast list\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
- } else {
- /* Disable promiscuous mode */
- lp->ltvRecord.len = 2;
- lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 );
- DBG_PRINT( "Disabling Promiscuous mode\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
-
- /* Disable multicast mode */
- lp->ltvRecord.len = 2;
- lp->ltvRecord.typ = CFG_GROUP_ADDR;
- DBG_PRINT( "Disabling Multicast mode\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
-
- /* Turning on this filter will prevent all multicast frames from
- being sent up from the device; however, this is a static RID,
- so a call to wl_apply() is needed */
- lp->ltvRecord.len = 2;
- lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
- lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 1 );
- DBG_PRINT( "Disabling all multicast mode (IFF_ALLMULTI)\n" );
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
- wl_apply( lp );
- }
- }
- wl_act_int_on( lp );
- wl_unlock( lp, &flags );
- }
+ if (lp->useRTS == 1) {
+ DBG_TRACE(DbgInfo, "Skipping multicast, in RTS mode\n");
+ return;
+ }
+#endif /* USE_RTS */
+
+ wl_lock(lp, &flags);
+ wl_act_int_off(lp);
+
+ if (CNV_INT_TO_LITTLE(lp->hcfCtx.IFB_FWIdentity.comp_id) ==
+ COMP_ID_FW_STA) {
+ if (dev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ lp->ltvRecord.len = 2;
+ lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(1);
+ DBG_PRINT
+ ("Enabling Promiscuous mode (IFF_PROMISC)\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+ } else if ((netdev_mc_count(dev) > HCF_MAX_MULTICAST)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Shutting off this filter will enable all multicast frames to
+ be sent up from the device; however, this is a static RID, so
+ a call to wl_apply() is needed */
+ lp->ltvRecord.len = 2;
+ lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(0);
+ DBG_PRINT
+ ("Enabling all multicast mode (IFF_ALLMULTI)\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+ wl_apply(lp);
+ } else if (!netdev_mc_empty(dev)) {
+ /* Set the multicast addresses */
+ lp->ltvRecord.len =
+ (netdev_mc_count(dev) * 3) + 1;
+ lp->ltvRecord.typ = CFG_GROUP_ADDR;
+
+ x = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(&
+ (lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
+ ha->addr, ETH_ALEN);
+ DBG_PRINT("Setting multicast list\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+ } else {
+ /* Disable promiscuous mode */
+ lp->ltvRecord.len = 2;
+ lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(0);
+ DBG_PRINT("Disabling Promiscuous mode\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+
+ /* Disable multicast mode */
+ lp->ltvRecord.len = 2;
+ lp->ltvRecord.typ = CFG_GROUP_ADDR;
+ DBG_PRINT("Disabling Multicast mode\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+
+ /*
+ * Turning on this filter will prevent all multicast frames from
+ * being sent up from the device; however, this is a static RID,
+ * so a call to wl_apply() is needed
+ */
+ lp->ltvRecord.len = 2;
+ lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
+ lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(1);
+ DBG_PRINT
+ ("Disabling all multicast mode (IFF_ALLMULTI)\n");
+ hcf_put_info(&(lp->hcfCtx),
+ (LTVP) & (lp->ltvRecord));
+ wl_apply(lp);
+ }
+ }
+ wl_act_int_on(lp);
+ wl_unlock(lp, &flags);
+ }
#endif /* HCF_STA */
-} // wl_multicast
+} /* wl_multicast */
+
/*============================================================================*/
#else /* NEW_MULTICAST */
-void wl_multicast( struct net_device *dev, int num_addrs, void *addrs )
+void wl_multicast(struct net_device *dev, int num_addrs, void *addrs)
{
- DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
- DBG_PARAM( DbgInfo, "num_addrs", "%d", num_addrs );
- DBG_PARAM( DbgInfo, "addrs", "0x%p", addrs );
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "num_addrs", "%d", num_addrs);
+ DBG_PARAM(DbgInfo, "addrs", "0x%p", addrs);
#error Obsolete set multicast interface!
-} // wl_multicast
+} /* wl_multicast */
+
/*============================================================================*/
#endif /* NEW_MULTICAST */
-static const struct net_device_ops wl_netdev_ops =
-{
- .ndo_start_xmit = &wl_tx_port0,
+static const struct net_device_ops wl_netdev_ops = {
+ .ndo_start_xmit = &wl_tx_port0,
- .ndo_set_config = &wl_config,
- .ndo_get_stats = &wl_stats,
- .ndo_set_rx_mode = &wl_multicast,
+ .ndo_set_config = &wl_config,
+ .ndo_get_stats = &wl_stats,
+ .ndo_set_rx_mode = &wl_multicast,
- .ndo_init = &wl_insert,
- .ndo_open = &wl_adapter_open,
- .ndo_stop = &wl_adapter_close,
- .ndo_do_ioctl = &wl_ioctl,
+ .ndo_init = &wl_insert,
+ .ndo_open = &wl_adapter_open,
+ .ndo_stop = &wl_adapter_close,
+ .ndo_do_ioctl = &wl_ioctl,
- .ndo_tx_timeout = &wl_tx_timeout,
+ .ndo_tx_timeout = &wl_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = wl_poll,
+ .ndo_poll_controller = wl_poll,
#endif
};
@@ -1144,48 +1148,51 @@ static const struct net_device_ops wl_netdev_ops =
* device.
*
******************************************************************************/
-struct net_device * wl_device_alloc( void )
+struct net_device *wl_device_alloc(void)
{
- struct net_device *dev = NULL;
- struct wl_private *lp = NULL;
-
- /* Alloc a net_device struct */
- dev = alloc_etherdev(sizeof(struct wl_private));
- if (!dev)
- return NULL;
-
- /* Initialize the 'next' pointer in the struct. Currently only used for PCI,
- but do it here just in case it's used for other buses in the future */
- lp = wl_priv(dev);
-
+ struct net_device *dev = NULL;
+ struct wl_private *lp = NULL;
+
+ /* Alloc a net_device struct */
+ dev = alloc_etherdev(sizeof(struct wl_private));
+ if (!dev)
+ return NULL;
+
+ /*
+ * Initialize the 'next' pointer in the struct.
+ * Currently only used for PCI,
+ * but do it here just in case it's used
+ * for other buses in the future
+ */
+ lp = wl_priv(dev);
+
+ /* Check MTU */
+ if (dev->mtu > MTU_MAX) {
+ DBG_WARNING(DbgInfo, "%s: MTU set too high, limiting to %d.\n",
+ dev->name, MTU_MAX);
+ dev->mtu = MTU_MAX;
+ }
- /* Check MTU */
- if( dev->mtu > MTU_MAX )
- {
- DBG_WARNING( DbgInfo, "%s: MTU set too high, limiting to %d.\n",
- dev->name, MTU_MAX );
- dev->mtu = MTU_MAX;
- }
+ /* Setup the function table in the device structure. */
- /* Setup the function table in the device structure. */
+ dev->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
+ lp->wireless_data.spy_data = &lp->spy_data;
+ dev->wireless_data = &lp->wireless_data;
- dev->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
- lp->wireless_data.spy_data = &lp->spy_data;
- dev->wireless_data = &lp->wireless_data;
+ dev->netdev_ops = &wl_netdev_ops;
- dev->netdev_ops = &wl_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
- dev->watchdog_timeo = TX_TIMEOUT;
+ dev->ethtool_ops = &wl_ethtool_ops;
- dev->ethtool_ops = &wl_ethtool_ops;
+ netif_stop_queue(dev);
- netif_stop_queue( dev );
+ /* Allocate virtual devices for WDS support if needed */
+ WL_WDS_DEVICE_ALLOC(lp);
- /* Allocate virtual devices for WDS support if needed */
- WL_WDS_DEVICE_ALLOC( lp );
+ return dev;
+} /* wl_device_alloc */
- return dev;
-} // wl_device_alloc
/*============================================================================*/
/*******************************************************************************
@@ -1206,15 +1213,14 @@ struct net_device * wl_device_alloc( void )
* N/A
*
******************************************************************************/
-void wl_device_dealloc( struct net_device *dev )
+void wl_device_dealloc(struct net_device *dev)
{
-// struct wl_private *lp = wl_priv(dev);
+ /* Dealloc the WDS ports */
+ WL_WDS_DEVICE_DEALLOC(lp);
- /* Dealloc the WDS ports */
- WL_WDS_DEVICE_DEALLOC( lp );
+ free_netdev(dev);
+} /* wl_device_dealloc */
- free_netdev( dev );
-} // wl_device_dealloc
/*============================================================================*/
/*******************************************************************************
@@ -1235,15 +1241,16 @@ void wl_device_dealloc( struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port0( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port0(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 0\n" );
+ DBG_TX(DbgInfo, "Tx on Port 0\n");
- return wl_tx( skb, dev, HCF_PORT_0 );
+ return wl_tx(skb, dev, HCF_PORT_0);
#ifdef ENABLE_DMA
- return wl_tx_dma( skb, dev, HCF_PORT_0 );
+ return wl_tx_dma(skb, dev, HCF_PORT_0);
#endif
-} // wl_tx_port0
+} /* wl_tx_port0i */
+
/*============================================================================*/
#ifdef USE_WDS
@@ -1266,11 +1273,12 @@ int wl_tx_port0( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port1( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port1(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 1\n" );
- return wl_tx( skb, dev, HCF_PORT_1 );
-} // wl_tx_port1
+ DBG_TX(DbgInfo, "Tx on Port 1\n");
+ return wl_tx(skb, dev, HCF_PORT_1);
+} /* wl_tx_port1 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1291,11 +1299,12 @@ int wl_tx_port1( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port2( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port2(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 2\n" );
- return wl_tx( skb, dev, HCF_PORT_2 );
-} // wl_tx_port2
+ DBG_TX(DbgInfo, "Tx on Port 2\n");
+ return wl_tx(skb, dev, HCF_PORT_2);
+} /* wl_tx_port2 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1316,11 +1325,12 @@ int wl_tx_port2( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port3( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port3(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 3\n" );
- return wl_tx( skb, dev, HCF_PORT_3 );
-} // wl_tx_port3
+ DBG_TX(DbgInfo, "Tx on Port 3\n");
+ return wl_tx(skb, dev, HCF_PORT_3);
+} /* wl_tx_port3 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1341,11 +1351,12 @@ int wl_tx_port3( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port4( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port4(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 4\n" );
- return wl_tx( skb, dev, HCF_PORT_4 );
-} // wl_tx_port4
+ DBG_TX(DbgInfo, "Tx on Port 4\n");
+ return wl_tx(skb, dev, HCF_PORT_4);
+} /* wl_tx_port4 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1366,11 +1377,12 @@ int wl_tx_port4( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port5( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port5(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 5\n" );
- return wl_tx( skb, dev, HCF_PORT_5 );
-} // wl_tx_port5
+ DBG_TX(DbgInfo, "Tx on Port 5\n");
+ return wl_tx(skb, dev, HCF_PORT_5);
+} /* wl_tx_port5 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1391,11 +1403,12 @@ int wl_tx_port5( struct sk_buff *skb, struct net_device *dev )
* N/A
*
******************************************************************************/
-int wl_tx_port6( struct sk_buff *skb, struct net_device *dev )
+int wl_tx_port6(struct sk_buff *skb, struct net_device *dev)
{
- DBG_TX( DbgInfo, "Tx on Port 6\n" );
- return wl_tx( skb, dev, HCF_PORT_6 );
-} // wl_tx_port6
+ DBG_TX(DbgInfo, "Tx on Port 6\n");
+ return wl_tx(skb, dev, HCF_PORT_6);
+} /* wl_tx_port6 */
+
/*============================================================================*/
/*******************************************************************************
@@ -1417,50 +1430,52 @@ int wl_tx_port6( struct sk_buff *skb, struct net_device *dev )
* structs in the private adapter structure.
*
******************************************************************************/
-void wl_wds_device_alloc( struct wl_private *lp )
+void wl_wds_device_alloc(struct wl_private *lp)
{
- int count;
+ int count;
- /* WDS support requires additional net_device structs to be allocated,
- so that user space apps can use these virtual devices to specify the
- port on which to Tx/Rx */
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- struct net_device *dev_wds = NULL;
+ /* WDS support requires additional net_device structs to be allocated,
+ so that user space apps can use these virtual devices to specify the
+ port on which to Tx/Rx */
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ struct net_device *dev_wds = NULL;
+
+ dev_wds = kzalloc(sizeof(struct net_device), GFP_KERNEL);
+ if (!dev_wds)
+ return;
+
+ ether_setup(dev_wds);
+
+ lp->wds_port[count].dev = dev_wds;
+
+ /* Re-use wl_init for all the devices, as it currently does nothing, but
+ * is required. Re-use the stats/tx_timeout handler for all as well; the
+ * WDS port which is requesting these operations can be determined by
+ * the net_device pointer. Set the private member of all devices to point
+ * to the same net_device struct; that way, all information gets
+ * funnelled through the one "real" net_device. Name the WDS ports
+ * "wds<n>"
+ * */
+ lp->wds_port[count].dev->init = &wl_init;
+ lp->wds_port[count].dev->get_stats = &wl_stats;
+ lp->wds_port[count].dev->tx_timeout = &wl_tx_timeout;
+ lp->wds_port[count].dev->watchdog_timeo = TX_TIMEOUT;
+ lp->wds_port[count].dev->priv = lp;
+
+ sprintf(lp->wds_port[count].dev->name, "wds%d", count);
+ }
- dev_wds = kzalloc(sizeof(struct net_device), GFP_KERNEL);
- if (!dev_wds)
- return;
+ /* Register the Tx handlers */
+ lp->wds_port[0].dev->hard_start_xmit = &wl_tx_port1;
+ lp->wds_port[1].dev->hard_start_xmit = &wl_tx_port2;
+ lp->wds_port[2].dev->hard_start_xmit = &wl_tx_port3;
+ lp->wds_port[3].dev->hard_start_xmit = &wl_tx_port4;
+ lp->wds_port[4].dev->hard_start_xmit = &wl_tx_port5;
+ lp->wds_port[5].dev->hard_start_xmit = &wl_tx_port6;
+
+ WL_WDS_NETIF_STOP_QUEUE(lp);
+} /* wl_wds_device_alloc */
- ether_setup( dev_wds );
-
- lp->wds_port[count].dev = dev_wds;
-
- /* Re-use wl_init for all the devices, as it currently does nothing, but
- is required. Re-use the stats/tx_timeout handler for all as well; the
- WDS port which is requesting these operations can be determined by
- the net_device pointer. Set the private member of all devices to point
- to the same net_device struct; that way, all information gets
- funnelled through the one "real" net_device. Name the WDS ports
- "wds<n>" */
- lp->wds_port[count].dev->init = &wl_init;
- lp->wds_port[count].dev->get_stats = &wl_stats;
- lp->wds_port[count].dev->tx_timeout = &wl_tx_timeout;
- lp->wds_port[count].dev->watchdog_timeo = TX_TIMEOUT;
- lp->wds_port[count].dev->priv = lp;
-
- sprintf( lp->wds_port[count].dev->name, "wds%d", count );
- }
-
- /* Register the Tx handlers */
- lp->wds_port[0].dev->hard_start_xmit = &wl_tx_port1;
- lp->wds_port[1].dev->hard_start_xmit = &wl_tx_port2;
- lp->wds_port[2].dev->hard_start_xmit = &wl_tx_port3;
- lp->wds_port[3].dev->hard_start_xmit = &wl_tx_port4;
- lp->wds_port[4].dev->hard_start_xmit = &wl_tx_port5;
- lp->wds_port[5].dev->hard_start_xmit = &wl_tx_port6;
-
- WL_WDS_NETIF_STOP_QUEUE( lp );
-} // wl_wds_device_alloc
/*============================================================================*/
/*******************************************************************************
@@ -1480,26 +1495,27 @@ void wl_wds_device_alloc( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_device_dealloc( struct wl_private *lp )
+void wl_wds_device_dealloc(struct wl_private *lp)
{
- int count;
+ int count;
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- struct net_device *dev_wds = NULL;
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ struct net_device *dev_wds = NULL;
- dev_wds = lp->wds_port[count].dev;
+ dev_wds = lp->wds_port[count].dev;
- if( dev_wds != NULL ) {
- if( dev_wds->flags & IFF_UP ) {
- dev_close( dev_wds );
- dev_wds->flags &= ~( IFF_UP | IFF_RUNNING );
- }
+ if (dev_wds != NULL) {
+ if (dev_wds->flags & IFF_UP) {
+ dev_close(dev_wds);
+ dev_wds->flags &= ~(IFF_UP | IFF_RUNNING);
+ }
+
+ free_netdev(dev_wds);
+ lp->wds_port[count].dev = NULL;
+ }
+ }
+} /* wl_wds_device_dealloc */
- free_netdev(dev_wds);
- lp->wds_port[count].dev = NULL;
- }
- }
-} // wl_wds_device_dealloc
/*============================================================================*/
/*******************************************************************************
@@ -1520,21 +1536,22 @@ void wl_wds_device_dealloc( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_netif_start_queue( struct wl_private *lp )
+void wl_wds_netif_start_queue(struct wl_private *lp)
{
- int count;
+ int count;
/*------------------------------------------------------------------------*/
- if( lp != NULL ) {
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( lp->wds_port[count].is_registered &&
- lp->wds_port[count].netif_queue_on == FALSE ) {
- netif_start_queue( lp->wds_port[count].dev );
- lp->wds_port[count].netif_queue_on = TRUE;
- }
- }
- }
-} // wl_wds_netif_start_queue
+ if (lp != NULL) {
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (lp->wds_port[count].is_registered &&
+ lp->wds_port[count].netif_queue_on == FALSE) {
+ netif_start_queue(lp->wds_port[count].dev);
+ lp->wds_port[count].netif_queue_on = TRUE;
+ }
+ }
+ }
+} /* wl_wds_netif_start_queue */
+
/*============================================================================*/
/*******************************************************************************
@@ -1555,21 +1572,22 @@ void wl_wds_netif_start_queue( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_netif_stop_queue( struct wl_private *lp )
+void wl_wds_netif_stop_queue(struct wl_private *lp)
{
- int count;
+ int count;
/*------------------------------------------------------------------------*/
- if( lp != NULL ) {
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( lp->wds_port[count].is_registered &&
- lp->wds_port[count].netif_queue_on == TRUE ) {
- netif_stop_queue( lp->wds_port[count].dev );
- lp->wds_port[count].netif_queue_on = FALSE;
- }
- }
- }
-} // wl_wds_netif_stop_queue
+ if (lp != NULL) {
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (lp->wds_port[count].is_registered &&
+ lp->wds_port[count].netif_queue_on == TRUE) {
+ netif_stop_queue(lp->wds_port[count].dev);
+ lp->wds_port[count].netif_queue_on = FALSE;
+ }
+ }
+ }
+} /* wl_wds_netif_stop_queue */
+
/*============================================================================*/
/*******************************************************************************
@@ -1590,21 +1608,22 @@ void wl_wds_netif_stop_queue( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_netif_wake_queue( struct wl_private *lp )
+void wl_wds_netif_wake_queue(struct wl_private *lp)
{
- int count;
+ int count;
/*------------------------------------------------------------------------*/
- if( lp != NULL ) {
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( lp->wds_port[count].is_registered &&
- lp->wds_port[count].netif_queue_on == FALSE ) {
- netif_wake_queue( lp->wds_port[count].dev );
- lp->wds_port[count].netif_queue_on = TRUE;
- }
- }
- }
-} // wl_wds_netif_wake_queue
+ if (lp != NULL) {
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (lp->wds_port[count].is_registered &&
+ lp->wds_port[count].netif_queue_on == FALSE) {
+ netif_wake_queue(lp->wds_port[count].dev);
+ lp->wds_port[count].netif_queue_on = TRUE;
+ }
+ }
+ }
+} /* wl_wds_netif_wake_queue */
+
/*============================================================================*/
/*******************************************************************************
@@ -1625,19 +1644,19 @@ void wl_wds_netif_wake_queue( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_netif_carrier_on( struct wl_private *lp )
+void wl_wds_netif_carrier_on(struct wl_private *lp)
{
- int count;
+ int count;
/*------------------------------------------------------------------------*/
- if( lp != NULL ) {
- for( count = 0; count < NUM_WDS_PORTS; count++ ) {
- if( lp->wds_port[count].is_registered ) {
- netif_carrier_on( lp->wds_port[count].dev );
- }
- }
- }
-} // wl_wds_netif_carrier_on
+ if (lp != NULL) {
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (lp->wds_port[count].is_registered)
+ netif_carrier_on(lp->wds_port[count].dev);
+ }
+ }
+} /* wl_wds_netif_carrier_on */
+
/*============================================================================*/
/*******************************************************************************
@@ -1658,21 +1677,22 @@ void wl_wds_netif_carrier_on( struct wl_private *lp )
* N/A
*
******************************************************************************/
-void wl_wds_netif_carrier_off( struct wl_private *lp )
+void wl_wds_netif_carrier_off(struct wl_private *lp)
{
int count;
- if(lp != NULL) {
- for(count = 0; count < NUM_WDS_PORTS; count++) {
- if(lp->wds_port[count].is_registered)
+ if (lp != NULL) {
+ for (count = 0; count < NUM_WDS_PORTS; count++) {
+ if (lp->wds_port[count].is_registered)
netif_carrier_off(lp->wds_port[count].dev);
}
}
-} // wl_wds_netif_carrier_off
+} /* wl_wds_netif_carrier_off */
+
/*============================================================================*/
-#endif /* USE_WDS */
+#endif /* USE_WDS */
#ifdef ENABLE_DMA
/*******************************************************************************
@@ -1695,70 +1715,71 @@ void wl_wds_netif_carrier_off( struct wl_private *lp )
* 1 on error
*
******************************************************************************/
-int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
+int wl_send_dma(struct wl_private *lp, struct sk_buff *skb, int port)
{
- int len;
- DESC_STRCT *desc = NULL;
- DESC_STRCT *desc_next = NULL;
+ int len;
+ DESC_STRCT *desc = NULL;
+ DESC_STRCT *desc_next = NULL;
/*------------------------------------------------------------------------*/
- if( lp == NULL ) {
- DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
- return FALSE;
- }
+ if (lp == NULL) {
+ DBG_ERROR(DbgInfo, "Private adapter struct is NULL\n");
+ return FALSE;
+ }
- if( lp->dev == NULL ) {
- DBG_ERROR( DbgInfo, "net_device struct in wl_private is NULL\n" );
- return FALSE;
- }
+ if (lp->dev == NULL) {
+ DBG_ERROR(DbgInfo, "net_device struct in wl_private is NULL\n");
+ return FALSE;
+ }
- /* AGAIN, ALL THE QUEUEING DONE HERE IN I/O MODE IS NOT PERFORMED */
+ /* AGAIN, ALL THE QUEUEING DONE HERE IN I/O MODE IS NOT PERFORMED */
- if( skb == NULL ) {
- DBG_WARNING (DbgInfo, "Nothing to send.\n");
- return FALSE;
- }
+ if (skb == NULL) {
+ DBG_WARNING(DbgInfo, "Nothing to send.\n");
+ return FALSE;
+ }
- len = skb->len;
+ len = skb->len;
- /* Get a free descriptor */
- desc = wl_pci_dma_get_tx_packet( lp );
+ /* Get a free descriptor */
+ desc = wl_pci_dma_get_tx_packet(lp);
- if( desc == NULL ) {
- if( lp->netif_queue_on == TRUE ) {
- netif_stop_queue( lp->dev );
- WL_WDS_NETIF_STOP_QUEUE( lp );
- lp->netif_queue_on = FALSE;
+ if (desc == NULL) {
+ if (lp->netif_queue_on == TRUE) {
+ netif_stop_queue(lp->dev);
+ WL_WDS_NETIF_STOP_QUEUE(lp);
+ lp->netif_queue_on = FALSE;
- dev_kfree_skb( skb );
- return 0;
- }
- }
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ SET_BUF_CNT(desc, /*HCF_DMA_FD_CNT */ HFS_ADDR_DEST);
+ SET_BUF_SIZE(desc, HCF_DMA_TX_BUF1_SIZE);
- SET_BUF_CNT( desc, /*HCF_DMA_FD_CNT*/HFS_ADDR_DEST );
- SET_BUF_SIZE( desc, HCF_DMA_TX_BUF1_SIZE );
+ desc_next = desc->next_desc_addr;
- desc_next = desc->next_desc_addr;
+ if (desc_next->buf_addr == NULL) {
+ DBG_ERROR(DbgInfo, "DMA descriptor buf_addr is NULL\n");
+ return FALSE;
+ }
- if( desc_next->buf_addr == NULL ) {
- DBG_ERROR( DbgInfo, "DMA descriptor buf_addr is NULL\n" );
- return FALSE;
- }
+ /* Copy the payload into the DMA packet */
+ memcpy(desc_next->buf_addr, skb->data, len);
- /* Copy the payload into the DMA packet */
- memcpy( desc_next->buf_addr, skb->data, len );
+ SET_BUF_CNT(desc_next, len);
+ SET_BUF_SIZE(desc_next, HCF_MAX_PACKET_SIZE);
- SET_BUF_CNT( desc_next, len );
- SET_BUF_SIZE( desc_next, HCF_MAX_PACKET_SIZE );
+ hcf_dma_tx_put(&(lp->hcfCtx), desc, 0);
- hcf_dma_tx_put( &( lp->hcfCtx ), desc, 0 );
+ /* Free the skb and perform queue cleanup, as the buffer was
+ transmitted successfully */
+ dev_kfree_skb(skb);
- /* Free the skb and perform queue cleanup, as the buffer was
- transmitted successfully */
- dev_kfree_skb( skb );
+ return TRUE;
+} /* wl_send_dma */
- return TRUE;
-} // wl_send_dma
/*============================================================================*/
/*******************************************************************************
@@ -1779,147 +1800,152 @@ int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
* 1 on error
*
******************************************************************************/
-int wl_rx_dma( struct net_device *dev )
+int wl_rx_dma(struct net_device *dev)
{
- int port;
- hcf_16 pktlen;
- hcf_16 hfs_stat;
- struct sk_buff *skb;
- struct wl_private *lp = NULL;
- DESC_STRCT *desc, *desc_next;
- //CFG_MB_INFO_RANGE2_STRCT x;
+ int port;
+ hcf_16 pktlen;
+ hcf_16 hfs_stat;
+ struct sk_buff *skb;
+ struct wl_private *lp = NULL;
+ DESC_STRCT *desc, *desc_next;
/*------------------------------------------------------------------------*/
- DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
+ DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
- if((( lp = dev->priv ) != NULL ) &&
- !( lp->flags & WVLAN2_UIL_BUSY )) {
+ lp = dev->priv;
+ if ((lp != NULL) && !(lp->flags & WVLAN2_UIL_BUSY)) {
#ifdef USE_RTS
- if( lp->useRTS == 1 ) {
- DBG_PRINT( "RTS: We're getting an Rx...\n" );
- return -EIO;
- }
-#endif /* USE_RTS */
-
- //if( lp->dma.status == 0 )
- //{
- desc = hcf_dma_rx_get( &( lp->hcfCtx ));
-
- if( desc != NULL )
- {
- /* Check and see if we rcvd. a WMP frame */
- /*
- if((( *(hcf_8 *)&desc->buf_addr[HFS_STAT] ) &
- ( HFS_STAT_MSG_TYPE | HFS_STAT_ERR )) == HFS_STAT_WMP_MSG )
- {
- DBG_TRACE( DbgInfo, "Got a WMP frame\n" );
-
- x.len = sizeof( CFG_MB_INFO_RANGE2_STRCT ) / sizeof( hcf_16 );
- x.typ = CFG_MB_INFO;
- x.base_typ = CFG_WMP;
- x.frag_cnt = 2;
- x.frag_buf[0].frag_len = GET_BUF_CNT( descp ) / sizeof( hcf_16 );
- x.frag_buf[0].frag_addr = (hcf_8 *) descp->buf_addr ;
- x.frag_buf[1].frag_len = ( GET_BUF_CNT( descp->next_desc_addr ) + 1 ) / sizeof( hcf_16 );
- x.frag_buf[1].frag_addr = (hcf_8 *) descp->next_desc_addr->buf_addr ;
-
- hcf_put_info( &( lp->hcfCtx ), (LTVP)&x );
- }
- */
-
- desc_next = desc->next_desc_addr;
-
- /* Make sure the buffer isn't empty */
- if( GET_BUF_CNT( desc ) == 0 ) {
- DBG_WARNING( DbgInfo, "Buffer is empty!\n" );
-
- /* Give the descriptor back to the HCF */
- hcf_dma_rx_put( &( lp->hcfCtx ), desc );
- return -EIO;
- }
-
- /* Read the HFS_STAT register from the lookahead buffer */
- hfs_stat = (hcf_16)( desc->buf_addr[HFS_STAT/2] );
-
- /* Make sure the frame isn't bad */
- if(( hfs_stat & HFS_STAT_ERR ) != HCF_SUCCESS )
- {
- DBG_WARNING( DbgInfo, "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
- desc->buf_addr[HFS_STAT/2] );
-
- /* Give the descriptor back to the HCF */
- hcf_dma_rx_put( &( lp->hcfCtx ), desc );
- return -EIO;
- }
-
- /* Determine what port this packet is for */
- port = ( hfs_stat >> 8 ) & 0x0007;
- DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
-
- pktlen = GET_BUF_CNT(desc_next);
- if (pktlen != 0) {
- skb = ALLOC_SKB(pktlen);
- if (skb != NULL) {
- switch( port ) {
+ if (lp->useRTS == 1) {
+ DBG_PRINT("RTS: We're getting an Rx...\n");
+ return -EIO;
+ }
+#endif /* USE_RTS */
+
+ /*
+ *if( lp->dma.status == 0 )
+ *{
+ */
+ desc = hcf_dma_rx_get(&(lp->hcfCtx));
+
+ if (desc != NULL) {
+ /* Check and see if we rcvd. a WMP frame */
+ /*
+ if((( *(hcf_8 *)&desc->buf_addr[HFS_STAT] ) &
+ ( HFS_STAT_MSG_TYPE | HFS_STAT_ERR )) == HFS_STAT_WMP_MSG )
+ {
+ DBG_TRACE( DbgInfo, "Got a WMP frame\n" );
+
+ x.len = sizeof( CFG_MB_INFO_RANGE2_STRCT ) / sizeof( hcf_16 );
+ x.typ = CFG_MB_INFO;
+ x.base_typ = CFG_WMP;
+ x.frag_cnt = 2;
+ x.frag_buf[0].frag_len = GET_BUF_CNT( descp ) / sizeof( hcf_16 );
+ x.frag_buf[0].frag_addr = (hcf_8 *) descp->buf_addr ;
+ x.frag_buf[1].frag_len = ( GET_BUF_CNT( descp->next_desc_addr ) + 1 ) / sizeof( hcf_16 );
+ x.frag_buf[1].frag_addr = (hcf_8 *) descp->next_desc_addr->buf_addr ;
+
+ hcf_put_info( &( lp->hcfCtx ), (LTVP)&x );
+ }
+ */
+
+ desc_next = desc->next_desc_addr;
+
+ /* Make sure the buffer isn't empty */
+ if (GET_BUF_CNT(desc) == 0) {
+ DBG_WARNING(DbgInfo, "Buffer is empty!\n");
+
+ /* Give the descriptor back to the HCF */
+ hcf_dma_rx_put(&(lp->hcfCtx), desc);
+ return -EIO;
+ }
+
+ /* Read the HFS_STAT register from the lookahead buffer */
+ hfs_stat = (hcf_16) (desc->buf_addr[HFS_STAT / 2]);
+
+ /* Make sure the frame isn't bad */
+ if ((hfs_stat & HFS_STAT_ERR) != HCF_SUCCESS) {
+ DBG_WARNING(DbgInfo,
+ "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
+ desc->buf_addr[HFS_STAT / 2]);
+
+ /* Give the descriptor back to the HCF */
+ hcf_dma_rx_put(&(lp->hcfCtx), desc);
+ return -EIO;
+ }
+
+ /* Determine what port this packet is for */
+ port = (hfs_stat >> 8) & 0x0007;
+ DBG_RX(DbgInfo, "Rx frame for port %d\n", port);
+
+ pktlen = GET_BUF_CNT(desc_next);
+ if (pktlen != 0) {
+ skb = ALLOC_SKB(pktlen);
+ if (skb != NULL) {
+ switch (port) {
#ifdef USE_WDS
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- case 6:
- skb->dev = lp->wds_port[port-1].dev;
- break;
-#endif /* USE_WDS */
-
- case 0:
- default:
- skb->dev = dev;
- break;
- }
-
- GET_PACKET_DMA( skb->dev, skb, pktlen );
-
- /* Give the descriptor back to the HCF */
- hcf_dma_rx_put( &( lp->hcfCtx ), desc );
-
- netif_rx( skb );
-
- if( port == 0 ) {
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pktlen;
- }
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ skb->dev =
+ lp->wds_port[port - 1].dev;
+ break;
+#endif /* USE_WDS */
+
+ case 0:
+ default:
+ skb->dev = dev;
+ break;
+ }
+
+ GET_PACKET_DMA(skb->dev, skb, pktlen);
+
+ /* Give the descriptor back to the HCF */
+ hcf_dma_rx_put(&(lp->hcfCtx), desc);
+
+ netif_rx(skb);
+
+ if (port == 0) {
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pktlen;
+ }
#ifdef USE_WDS
- else
- {
- lp->wds_port[port-1].stats.rx_packets++;
- lp->wds_port[port-1].stats.rx_bytes += pktlen;
- }
-#endif /* USE_WDS */
-
- dev->last_rx = jiffies;
-
- } else {
- DBG_ERROR( DbgInfo, "Could not alloc skb\n" );
-
- if( port == 0 )
- {
- lp->stats.rx_dropped++;
- }
+ else {
+ lp->wds_port[port -
+ 1].stats.
+ rx_packets++;
+ lp->wds_port[port -
+ 1].stats.
+ rx_bytes += pktlen;
+ }
+#endif /* USE_WDS */
+
+ dev->last_rx = jiffies;
+
+ } else {
+ DBG_ERROR(DbgInfo,
+ "Could not alloc skb\n");
+
+ if (port == 0)
+ lp->stats.rx_dropped++;
#ifdef USE_WDS
- else
- {
- lp->wds_port[port-1].stats.rx_dropped++;
- }
-#endif /* USE_WDS */
- }
- }
- }
- //}
- }
-
- return 0;
-} // wl_rx_dma
+ else {
+ lp->wds_port[port -
+ 1].stats.
+ rx_dropped++;
+ }
+#endif /* USE_WDS */
+ }
+ }
+ }
+ /*}*/
+ }
+
+ return 0;
+} /* wl_rx_dma */
+
/*============================================================================*/
-#endif // ENABLE_DMA
+#endif /* ENABLE_DMA */
diff --git a/drivers/staging/wlags49_h2/wl_util.c b/drivers/staging/wlags49_h2/wl_util.c
index 4ca6e42ecd7e..75019c171d57 100644
--- a/drivers/staging/wlags49_h2/wl_util.c
+++ b/drivers/staging/wlags49_h2/wl_util.c
@@ -161,43 +161,6 @@ int dbm( int value )
-
-/*******************************************************************************
- * percent()
- *******************************************************************************
- *
- * DESCRIPTION:
- *
- * Return a value as a percentage of min to max.
- *
- * PARAMETERS:
- *
- * value - the value in question
- * min - the minimum range value
- * max - the maximum range value
- *
- * RETURNS:
- *
- * the percentage value
- *
- ******************************************************************************/
-int percent( int value, int min, int max )
-{
- /* Truncate the value to be between min and max. */
- if( value < min )
- value = min;
-
- if( value > max )
- value = max;
-
- /* Return the value as a percentage of min to max. */
- return ((( value - min ) * 100 ) / ( max - min ));
-} // percent
-/*============================================================================*/
-
-
-
-
/*******************************************************************************
* is_valid_key_string()
*******************************************************************************
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 4a1ddaf5e00f..49eeeaee664b 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -1061,7 +1061,7 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in
goto out;
}
- if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN + 1) {
+ if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN) {
ret = -EINVAL;
goto out;
}
@@ -3354,7 +3354,7 @@ void wl_wext_event_essid( struct net_device *dev )
the call to wireless_send_event() must also point to where the ESSID
lives */
wrqu.essid.length = strlen( lp->NetworkName );
- wrqu.essid.pointer = (caddr_t)lp->NetworkName;
+ wrqu.essid.pointer = (void __user *)lp->NetworkName;
wrqu.essid.flags = 1;
wireless_send_event( dev, SIOCSIWESSID, &wrqu, lp->NetworkName );
@@ -3419,7 +3419,7 @@ void wl_wext_event_encode( struct net_device *dev )
/* Only provide the key if permissions allow */
if( capable( CAP_NET_ADMIN )) {
- wrqu.encoding.pointer = (caddr_t)lp->DefaultKeys.key[index].key;
+ wrqu.encoding.pointer = (void __user *)lp->DefaultKeys.key[index].key;
wrqu.encoding.length = lp->DefaultKeys.key[index].len;
} else {
wrqu.encoding.flags |= IW_ENCODE_NOKEY;
@@ -3778,7 +3778,7 @@ static const iw_handler wl_private_handler[] =
#endif
};
-struct iw_priv_args wl_priv_args[] = {
+static struct iw_priv_args wl_priv_args[] = {
{SIOCSIWNETNAME, IW_PRIV_TYPE_CHAR | HCF_MAX_NAME_LEN, 0, "snetwork_name" },
{SIOCGIWNETNAME, 0, IW_PRIV_TYPE_CHAR | HCF_MAX_NAME_LEN, "gnetwork_name" },
{SIOCSIWSTANAME, IW_PRIV_TYPE_CHAR | HCF_MAX_NAME_LEN, 0, "sstation_name" },
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index a7d24c95191d..f76f95c29617 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -400,6 +400,8 @@ static int prism2_scan(struct wiphy *wiphy,
numbss = msg1.numbss.data;
for (i = 0; i < numbss; i++) {
+ int freq;
+
memset(&msg2, 0, sizeof(msg2));
msg2.msgcode = DIDmsg_dot11req_scan_results;
msg2.bssindex.data = i;
@@ -414,9 +416,10 @@ static int prism2_scan(struct wiphy *wiphy,
ie_buf[1] = msg2.ssid.data.len;
ie_len = ie_buf[1] + 2;
memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
+ freq = ieee80211_channel_to_frequency(msg2.dschannel.data,
+ IEEE80211_BAND_2GHZ);
bss = cfg80211_inform_bss(wiphy,
- ieee80211_get_channel(wiphy,
- ieee80211_dsss_chan_to_freq(msg2.dschannel.data)),
+ ieee80211_get_channel(wiphy, freq),
(const u8 *) &(msg2.bssid.data.data),
msg2.timestamp.data, msg2.capinfo.data,
msg2.beaconperiod.data,
@@ -746,7 +749,7 @@ static const struct cfg80211_ops prism2_usb_cfg_ops = {
/* Functions to create/free wiphy interface */
-struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
+static struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
{
struct wiphy *wiphy;
struct prism2_wiphy_private *priv;
@@ -783,7 +786,7 @@ struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
}
-void wlan_free_wiphy(struct wiphy *wiphy)
+static void wlan_free_wiphy(struct wiphy *wiphy)
{
wiphy_unregister(wiphy);
wiphy_free(wiphy);
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 333a2f693e49..1f2c78cc0086 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -531,14 +531,14 @@ typedef struct hfa384x_rx_frame {
u16 reserved2;
/*-- 802.11 Header Information (802.11 byte order) --*/
- u16 frame_control;
+ __le16 frame_control;
u16 duration_id;
u8 address1[6];
u8 address2[6];
u8 address3[6];
u16 sequence_control;
u8 address4[6];
- u16 data_len; /* hfa384x (little endian) format */
+ __le16 data_len; /* hfa384x (little endian) format */
/*-- 802.3 Header Information --*/
u8 dest_addr[6];
diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h
index 2610824d36d7..3dd066ac034e 100644
--- a/drivers/staging/wlan-ng/p80211mgmt.h
+++ b/drivers/staging/wlan-ng/p80211mgmt.h
@@ -91,7 +91,7 @@
* fall at the end of their respective frames).
* 5a) The length field is set to include the last of the fixed and fixed
* length fields. It may have to be updated for optional or variable
-* length information elements.
+* length information elements.
* 6) Optional and variable length information elements are special cases
* and must be handled individually by the client code.
* --------------------------------------------------------------------
@@ -219,82 +219,82 @@
/*-- Information Element Types --------------------*/
/* prototype structure, all IEs start with these members */
-typedef struct wlan_ie {
+struct wlan_ie {
u8 eid;
u8 len;
-} __packed wlan_ie_t;
+} __packed;
/*-- Service Set Identity (SSID) -----------------*/
-typedef struct wlan_ie_ssid {
+struct wlan_ie_ssid {
u8 eid;
u8 len;
u8 ssid[1]; /* may be zero, ptrs may overlap */
-} __packed wlan_ie_ssid_t;
+} __packed;
/*-- Supported Rates -----------------------------*/
-typedef struct wlan_ie_supp_rates {
+struct wlan_ie_supp_rates {
u8 eid;
u8 len;
u8 rates[1]; /* had better be at LEAST one! */
-} __packed wlan_ie_supp_rates_t;
+} __packed;
/*-- FH Parameter Set ----------------------------*/
-typedef struct wlan_ie_fh_parms {
+struct wlan_ie_fh_parms {
u8 eid;
u8 len;
u16 dwell;
u8 hopset;
u8 hoppattern;
u8 hopindex;
-} __packed wlan_ie_fh_parms_t;
+} __packed;
/*-- DS Parameter Set ----------------------------*/
-typedef struct wlan_ie_ds_parms {
+struct wlan_ie_ds_parms {
u8 eid;
u8 len;
u8 curr_ch;
-} __packed wlan_ie_ds_parms_t;
+} __packed;
/*-- CF Parameter Set ----------------------------*/
-typedef struct wlan_ie_cf_parms {
+struct wlan_ie_cf_parms {
u8 eid;
u8 len;
u8 cfp_cnt;
u8 cfp_period;
u16 cfp_maxdur;
u16 cfp_durremaining;
-} __packed wlan_ie_cf_parms_t;
+} __packed;
/*-- TIM ------------------------------------------*/
-typedef struct wlan_ie_tim {
+struct wlan_ie_tim {
u8 eid;
u8 len;
u8 dtim_cnt;
u8 dtim_period;
u8 bitmap_ctl;
u8 virt_bm[1];
-} __packed wlan_ie_tim_t;
+} __packed;
/*-- IBSS Parameter Set ---------------------------*/
-typedef struct wlan_ie_ibss_parms {
+struct wlan_ie_ibss_parms {
u8 eid;
u8 len;
u16 atim_win;
-} __packed wlan_ie_ibss_parms_t;
+} __packed;
/*-- Challenge Text ------------------------------*/
-typedef struct wlan_ie_challenge {
+struct wlan_ie_challenge {
u8 eid;
u8 len;
u8 challenge[1];
-} __packed wlan_ie_challenge_t;
+} __packed;
/*-------------------------------------------------*/
/* Frame Types */
/* prototype structure, all mgmt frame types will start with these members */
-typedef struct wlan_fr_mgmt {
+struct wlan_fr_mgmt {
u16 type;
u16 len; /* DOES NOT include CRC !!!! */
u8 *buf;
@@ -303,10 +303,10 @@ typedef struct wlan_fr_mgmt {
void *priv;
/*-- fixed fields -----------*/
/*-- info elements ----------*/
-} wlan_fr_mgmt_t;
+};
/*-- Beacon ---------------------------------------*/
-typedef struct wlan_fr_beacon {
+struct wlan_fr_beacon {
u16 type;
u16 len;
u8 *buf;
@@ -318,18 +318,18 @@ typedef struct wlan_fr_beacon {
u16 *bcn_int;
u16 *cap_info;
/*-- info elements ----------*/
- wlan_ie_ssid_t *ssid;
- wlan_ie_supp_rates_t *supp_rates;
- wlan_ie_fh_parms_t *fh_parms;
- wlan_ie_ds_parms_t *ds_parms;
- wlan_ie_cf_parms_t *cf_parms;
- wlan_ie_ibss_parms_t *ibss_parms;
- wlan_ie_tim_t *tim;
+ struct wlan_ie_ssid *ssid;
+ struct wlan_ie_supp_rates *supp_rates;
+ struct wlan_ie_fh_parms *fh_parms;
+ struct wlan_ie_ds_parms *ds_parms;
+ struct wlan_ie_cf_parms *cf_parms;
+ struct wlan_ie_ibss_parms *ibss_parms;
+ struct wlan_ie_tim *tim;
-} wlan_fr_beacon_t;
+};
/*-- IBSS ATIM ------------------------------------*/
-typedef struct wlan_fr_ibssatim {
+struct wlan_fr_ibssatim {
u16 type;
u16 len;
u8 *buf;
@@ -342,10 +342,10 @@ typedef struct wlan_fr_ibssatim {
/* this frame type has a null body */
-} wlan_fr_ibssatim_t;
+};
/*-- Disassociation -------------------------------*/
-typedef struct wlan_fr_disassoc {
+struct wlan_fr_disassoc {
u16 type;
u16 len;
u8 *buf;
@@ -357,10 +357,10 @@ typedef struct wlan_fr_disassoc {
/*-- info elements ----------*/
-} wlan_fr_disassoc_t;
+};
/*-- Association Request --------------------------*/
-typedef struct wlan_fr_assocreq {
+struct wlan_fr_assocreq {
u16 type;
u16 len;
u8 *buf;
@@ -371,13 +371,13 @@ typedef struct wlan_fr_assocreq {
u16 *cap_info;
u16 *listen_int;
/*-- info elements ----------*/
- wlan_ie_ssid_t *ssid;
- wlan_ie_supp_rates_t *supp_rates;
+ struct wlan_ie_ssid *ssid;
+ struct wlan_ie_supp_rates *supp_rates;
-} wlan_fr_assocreq_t;
+};
/*-- Association Response -------------------------*/
-typedef struct wlan_fr_assocresp {
+struct wlan_fr_assocresp {
u16 type;
u16 len;
u8 *buf;
@@ -389,12 +389,12 @@ typedef struct wlan_fr_assocresp {
u16 *status;
u16 *aid;
/*-- info elements ----------*/
- wlan_ie_supp_rates_t *supp_rates;
+ struct wlan_ie_supp_rates *supp_rates;
-} wlan_fr_assocresp_t;
+};
/*-- Reassociation Request ------------------------*/
-typedef struct wlan_fr_reassocreq {
+struct wlan_fr_reassocreq {
u16 type;
u16 len;
u8 *buf;
@@ -406,13 +406,13 @@ typedef struct wlan_fr_reassocreq {
u16 *listen_int;
u8 *curr_ap;
/*-- info elements ----------*/
- wlan_ie_ssid_t *ssid;
- wlan_ie_supp_rates_t *supp_rates;
+ struct wlan_ie_ssid *ssid;
+ struct wlan_ie_supp_rates *supp_rates;
-} wlan_fr_reassocreq_t;
+};
/*-- Reassociation Response -----------------------*/
-typedef struct wlan_fr_reassocresp {
+struct wlan_fr_reassocresp {
u16 type;
u16 len;
u8 *buf;
@@ -424,12 +424,12 @@ typedef struct wlan_fr_reassocresp {
u16 *status;
u16 *aid;
/*-- info elements ----------*/
- wlan_ie_supp_rates_t *supp_rates;
+ struct wlan_ie_supp_rates *supp_rates;
-} wlan_fr_reassocresp_t;
+};
/*-- Probe Request --------------------------------*/
-typedef struct wlan_fr_probereq {
+struct wlan_fr_probereq {
u16 type;
u16 len;
u8 *buf;
@@ -438,13 +438,13 @@ typedef struct wlan_fr_probereq {
void *priv;
/*-- fixed fields -----------*/
/*-- info elements ----------*/
- wlan_ie_ssid_t *ssid;
- wlan_ie_supp_rates_t *supp_rates;
+ struct wlan_ie_ssid *ssid;
+ struct wlan_ie_supp_rates *supp_rates;
-} wlan_fr_probereq_t;
+};
/*-- Probe Response -------------------------------*/
-typedef struct wlan_fr_proberesp {
+struct wlan_fr_proberesp {
u16 type;
u16 len;
u8 *buf;
@@ -456,16 +456,16 @@ typedef struct wlan_fr_proberesp {
u16 *bcn_int;
u16 *cap_info;
/*-- info elements ----------*/
- wlan_ie_ssid_t *ssid;
- wlan_ie_supp_rates_t *supp_rates;
- wlan_ie_fh_parms_t *fh_parms;
- wlan_ie_ds_parms_t *ds_parms;
- wlan_ie_cf_parms_t *cf_parms;
- wlan_ie_ibss_parms_t *ibss_parms;
-} wlan_fr_proberesp_t;
+ struct wlan_ie_ssid *ssid;
+ struct wlan_ie_supp_rates *supp_rates;
+ struct wlan_ie_fh_parms *fh_parms;
+ struct wlan_ie_ds_parms *ds_parms;
+ struct wlan_ie_cf_parms *cf_parms;
+ struct wlan_ie_ibss_parms *ibss_parms;
+};
/*-- Authentication -------------------------------*/
-typedef struct wlan_fr_authen {
+struct wlan_fr_authen {
u16 type;
u16 len;
u8 *buf;
@@ -477,12 +477,12 @@ typedef struct wlan_fr_authen {
u16 *auth_seq;
u16 *status;
/*-- info elements ----------*/
- wlan_ie_challenge_t *challenge;
+ struct wlan_ie_challenge *challenge;
-} wlan_fr_authen_t;
+};
/*-- Deauthenication -----------------------------*/
-typedef struct wlan_fr_deauthen {
+struct wlan_fr_deauthen {
u16 type;
u16 len;
u8 *buf;
@@ -494,27 +494,27 @@ typedef struct wlan_fr_deauthen {
/*-- info elements ----------*/
-} wlan_fr_deauthen_t;
-
-void wlan_mgmt_encode_beacon(wlan_fr_beacon_t *f);
-void wlan_mgmt_decode_beacon(wlan_fr_beacon_t *f);
-void wlan_mgmt_encode_disassoc(wlan_fr_disassoc_t *f);
-void wlan_mgmt_decode_disassoc(wlan_fr_disassoc_t *f);
-void wlan_mgmt_encode_assocreq(wlan_fr_assocreq_t *f);
-void wlan_mgmt_decode_assocreq(wlan_fr_assocreq_t *f);
-void wlan_mgmt_encode_assocresp(wlan_fr_assocresp_t *f);
-void wlan_mgmt_decode_assocresp(wlan_fr_assocresp_t *f);
-void wlan_mgmt_encode_reassocreq(wlan_fr_reassocreq_t *f);
-void wlan_mgmt_decode_reassocreq(wlan_fr_reassocreq_t *f);
-void wlan_mgmt_encode_reassocresp(wlan_fr_reassocresp_t *f);
-void wlan_mgmt_decode_reassocresp(wlan_fr_reassocresp_t *f);
-void wlan_mgmt_encode_probereq(wlan_fr_probereq_t *f);
-void wlan_mgmt_decode_probereq(wlan_fr_probereq_t *f);
-void wlan_mgmt_encode_proberesp(wlan_fr_proberesp_t *f);
-void wlan_mgmt_decode_proberesp(wlan_fr_proberesp_t *f);
-void wlan_mgmt_encode_authen(wlan_fr_authen_t *f);
-void wlan_mgmt_decode_authen(wlan_fr_authen_t *f);
-void wlan_mgmt_encode_deauthen(wlan_fr_deauthen_t *f);
-void wlan_mgmt_decode_deauthen(wlan_fr_deauthen_t *f);
+};
+
+void wlan_mgmt_encode_beacon(struct wlan_fr_beacon *f);
+void wlan_mgmt_decode_beacon(struct wlan_fr_beacon *f);
+void wlan_mgmt_encode_disassoc(struct wlan_fr_disassoc *f);
+void wlan_mgmt_decode_disassoc(struct wlan_fr_disassoc *f);
+void wlan_mgmt_encode_assocreq(struct wlan_fr_assocreq *f);
+void wlan_mgmt_decode_assocreq(struct wlan_fr_assocreq *f);
+void wlan_mgmt_encode_assocresp(struct wlan_fr_assocresp *f);
+void wlan_mgmt_decode_assocresp(struct wlan_fr_assocresp *f);
+void wlan_mgmt_encode_reassocreq(struct wlan_fr_reassocreq *f);
+void wlan_mgmt_decode_reassocreq(struct wlan_fr_reassocreq *f);
+void wlan_mgmt_encode_reassocresp(struct wlan_fr_reassocresp *f);
+void wlan_mgmt_decode_reassocresp(struct wlan_fr_reassocresp *f);
+void wlan_mgmt_encode_probereq(struct wlan_fr_probereq *f);
+void wlan_mgmt_decode_probereq(struct wlan_fr_probereq *f);
+void wlan_mgmt_encode_proberesp(struct wlan_fr_proberesp *f);
+void wlan_mgmt_decode_proberesp(struct wlan_fr_proberesp *f);
+void wlan_mgmt_encode_authen(struct wlan_fr_authen *f);
+void wlan_mgmt_decode_authen(struct wlan_fr_authen *f);
+void wlan_mgmt_encode_deauthen(struct wlan_fr_deauthen *f);
+void wlan_mgmt_decode_deauthen(struct wlan_fr_deauthen *f);
#endif /* _P80211MGMT_H */
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 0039e082507d..e3ae8024d3a4 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -262,7 +262,6 @@ static void p80211netdev_rx_bh(unsigned long arg)
struct sk_buff *skb = NULL;
netdevice_t *dev = wlandev->netdev;
struct p80211_hdr_a3 *hdr;
- u16 fc;
/* Let's empty our our queue */
while ((skb = skb_dequeue(&wlandev->nsd_rxq))) {
@@ -286,8 +285,7 @@ static void p80211netdev_rx_bh(unsigned long arg)
continue;
} else {
hdr = (struct p80211_hdr_a3 *) skb->data;
- fc = le16_to_cpu(hdr->fc);
- if (p80211_rx_typedrop(wlandev, fc)) {
+ if (p80211_rx_typedrop(wlandev, hdr->fc)) {
dev_kfree_skb(skb);
continue;
}
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 0dfd2a4933ef..2b0c23587dfc 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -201,7 +201,7 @@ static int validate_identity(void);
* 0 - success
* ~0 - failure
----------------------------------------------------------------*/
-int prism2_fwtry(struct usb_device *udev, wlandevice_t *wlandev)
+static int prism2_fwtry(struct usb_device *udev, wlandevice_t *wlandev)
{
const struct firmware *fw_entry = NULL;
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 2199f5afbf90..f9ccf2371dba 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -454,9 +454,8 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
*/
result = hfa384x_drvr_start(hw);
if (result) {
- printk(KERN_ERR
- "hfa384x_drvr_start() failed,"
- "result=%d\n", (int)result);
+ netdev_err(wlandev->netdev,
+ "hfa384x_drvr_start() failed,result=%d\n", (int)result);
result =
P80211ENUM_resultcode_implementation_failure;
wlandev->msdstate = WLAN_MSD_HWPRESENT;
@@ -499,9 +498,8 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
*/
result = hfa384x_drvr_start(hw);
if (result) {
- printk(KERN_ERR
- "hfa384x_drvr_start() failed,"
- "result=%d\n", (int)result);
+ netdev_err(wlandev->netdev,
+ "hfa384x_drvr_start() failed,result=%d\n", (int)result);
result =
P80211ENUM_resultcode_implementation_failure;
wlandev->msdstate = WLAN_MSD_HWPRESENT;
@@ -510,9 +508,8 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
result = prism2sta_getcardinfo(wlandev);
if (result) {
- printk(KERN_ERR
- "prism2sta_getcardinfo() failed,"
- "result=%d\n", (int)result);
+ netdev_err(wlandev->netdev,
+ "prism2sta_getcardinfo() failed,result=%d\n", (int)result);
result =
P80211ENUM_resultcode_implementation_failure;
hfa384x_drvr_stop(hw);
@@ -521,9 +518,8 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
}
result = prism2sta_globalsetup(wlandev);
if (result) {
- printk(KERN_ERR
- "prism2sta_globalsetup() failed,"
- "result=%d\n", (int)result);
+ netdev_err(wlandev->netdev,
+ "prism2sta_globalsetup() failed,result=%d\n", (int)result);
result =
P80211ENUM_resultcode_implementation_failure;
hfa384x_drvr_stop(hw);
@@ -623,7 +619,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->ident_nic,
sizeof(hfa384x_compident_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve NICIDENTITY\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve NICIDENTITY\n");
goto failed;
}
@@ -633,7 +629,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->ident_nic.major = le16_to_cpu(hw->ident_nic.major);
hw->ident_nic.minor = le16_to_cpu(hw->ident_nic.minor);
- printk(KERN_INFO "ident: nic h/w: id=0x%02x %d.%d.%d\n",
+ netdev_info(wlandev->netdev, "ident: nic h/w: id=0x%02x %d.%d.%d\n",
hw->ident_nic.id, hw->ident_nic.major,
hw->ident_nic.minor, hw->ident_nic.variant);
@@ -642,7 +638,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->ident_pri_fw,
sizeof(hfa384x_compident_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve PRIIDENTITY\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve PRIIDENTITY\n");
goto failed;
}
@@ -652,7 +648,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->ident_pri_fw.major = le16_to_cpu(hw->ident_pri_fw.major);
hw->ident_pri_fw.minor = le16_to_cpu(hw->ident_pri_fw.minor);
- printk(KERN_INFO "ident: pri f/w: id=0x%02x %d.%d.%d\n",
+ netdev_info(wlandev->netdev, "ident: pri f/w: id=0x%02x %d.%d.%d\n",
hw->ident_pri_fw.id, hw->ident_pri_fw.major,
hw->ident_pri_fw.minor, hw->ident_pri_fw.variant);
@@ -661,12 +657,12 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->ident_sta_fw,
sizeof(hfa384x_compident_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve STAIDENTITY\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve STAIDENTITY\n");
goto failed;
}
if (hw->ident_nic.id < 0x8000) {
- printk(KERN_ERR
+ netdev_err(wlandev->netdev,
"FATAL: Card is not an Intersil Prism2/2.5/3\n");
result = -1;
goto failed;
@@ -683,16 +679,16 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->ident_sta_fw.variant &= ~((u16) (BIT(14) | BIT(15)));
if (hw->ident_sta_fw.id == 0x1f) {
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"ident: sta f/w: id=0x%02x %d.%d.%d\n",
hw->ident_sta_fw.id, hw->ident_sta_fw.major,
hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
} else {
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"ident: ap f/w: id=0x%02x %d.%d.%d\n",
hw->ident_sta_fw.id, hw->ident_sta_fw.major,
hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
- printk(KERN_ERR "Unsupported Tertiary AP firmeare loaded!\n");
+ netdev_err(wlandev->netdev, "Unsupported Tertiary AP firmeare loaded!\n");
goto failed;
}
@@ -701,7 +697,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_sup_mfi,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve MFISUPRANGE\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve MFISUPRANGE\n");
goto failed;
}
@@ -713,7 +709,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_sup_mfi.bottom = le16_to_cpu(hw->cap_sup_mfi.bottom);
hw->cap_sup_mfi.top = le16_to_cpu(hw->cap_sup_mfi.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_mfi.role, hw->cap_sup_mfi.id,
hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom,
@@ -724,7 +720,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_sup_cfi,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve CFISUPRANGE\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve CFISUPRANGE\n");
goto failed;
}
@@ -736,7 +732,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_sup_cfi.bottom = le16_to_cpu(hw->cap_sup_cfi.bottom);
hw->cap_sup_cfi.top = le16_to_cpu(hw->cap_sup_cfi.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_cfi.role, hw->cap_sup_cfi.id,
hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom,
@@ -747,7 +743,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_sup_pri,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve PRISUPRANGE\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve PRISUPRANGE\n");
goto failed;
}
@@ -759,7 +755,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_sup_pri.bottom = le16_to_cpu(hw->cap_sup_pri.bottom);
hw->cap_sup_pri.top = le16_to_cpu(hw->cap_sup_pri.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_pri.role, hw->cap_sup_pri.id,
hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom,
@@ -770,7 +766,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_sup_sta,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve STASUPRANGE\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve STASUPRANGE\n");
goto failed;
}
@@ -783,13 +779,13 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_sup_sta.top = le16_to_cpu(hw->cap_sup_sta.top);
if (hw->cap_sup_sta.id == 0x04) {
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"STA:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_sta.role, hw->cap_sup_sta.id,
hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom,
hw->cap_sup_sta.top);
} else {
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"AP:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_sup_sta.role, hw->cap_sup_sta.id,
hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom,
@@ -801,7 +797,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_act_pri_cfi,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve PRI_CFIACTRANGES\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve PRI_CFIACTRANGES\n");
goto failed;
}
@@ -813,7 +809,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_act_pri_cfi.bottom = le16_to_cpu(hw->cap_act_pri_cfi.bottom);
hw->cap_act_pri_cfi.top = le16_to_cpu(hw->cap_act_pri_cfi.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id,
hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom,
@@ -824,7 +820,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_act_sta_cfi,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve STA_CFIACTRANGES\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve STA_CFIACTRANGES\n");
goto failed;
}
@@ -836,7 +832,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_act_sta_cfi.bottom = le16_to_cpu(hw->cap_act_sta_cfi.bottom);
hw->cap_act_sta_cfi.top = le16_to_cpu(hw->cap_act_sta_cfi.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id,
hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom,
@@ -847,7 +843,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
&hw->cap_act_sta_mfi,
sizeof(hfa384x_caplevel_t));
if (result) {
- printk(KERN_ERR "Failed to retrieve STA_MFIACTRANGES\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve STA_MFIACTRANGES\n");
goto failed;
}
@@ -859,7 +855,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
hw->cap_act_sta_mfi.bottom = le16_to_cpu(hw->cap_act_sta_mfi.bottom);
hw->cap_act_sta_mfi.top = le16_to_cpu(hw->cap_act_sta_mfi.top);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id,
hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom,
@@ -871,9 +867,9 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
if (!result) {
wlan_mkprintstr(snum, HFA384x_RID_NICSERIALNUMBER_LEN,
pstr, sizeof(pstr));
- printk(KERN_INFO "Prism2 card SN: %s\n", pstr);
+ netdev_info(wlandev->netdev, "Prism2 card SN: %s\n", pstr);
} else {
- printk(KERN_ERR "Failed to retrieve Prism2 Card SN\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve Prism2 Card SN\n");
goto failed;
}
@@ -881,7 +877,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CNFOWNMACADDR,
wlandev->netdev->dev_addr, ETH_ALEN);
if (result != 0) {
- printk(KERN_ERR "Failed to retrieve mac address\n");
+ netdev_err(wlandev->netdev, "Failed to retrieve mac address\n");
goto failed;
}
@@ -909,7 +905,7 @@ static int prism2sta_getcardinfo(wlandevice_t *wlandev)
goto done;
failed:
- printk(KERN_ERR "Failed, result=%d\n", result);
+ netdev_err(wlandev->netdev, "Failed, result=%d\n", result);
done:
return result;
}
@@ -1085,7 +1081,7 @@ static void prism2sta_inf_scanresults(wlandevice_t *wlandev,
HFA384x_RID_JOINREQUEST,
&joinreq, HFA384x_RID_JOINREQUEST_LEN);
if (result) {
- printk(KERN_ERR "setconfig(joinreq) failed, result=%d\n",
+ netdev_err(wlandev->netdev, "setconfig(joinreq) failed, result=%d\n",
result);
}
}
@@ -1226,7 +1222,7 @@ void prism2sta_processing_defer(struct work_struct *data)
*/
netif_carrier_off(wlandev->netdev);
- printk(KERN_INFO "linkstatus=NOTCONNECTED (unhandled)\n");
+ netdev_info(wlandev->netdev, "linkstatus=NOTCONNECTED (unhandled)\n");
break;
case HFA384x_LINK_CONNECTED:
@@ -1253,7 +1249,7 @@ void prism2sta_processing_defer(struct work_struct *data)
if (wlandev->netdev->type == ARPHRD_ETHER) {
u16 portstatus;
- printk(KERN_INFO "linkstatus=CONNECTED\n");
+ netdev_info(wlandev->netdev, "linkstatus=CONNECTED\n");
/* For non-usb devices, we can use the sync versions */
/* Collect the BSSID, and set state to allow tx */
@@ -1315,7 +1311,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* Block Transmits, Ignore receives of data frames
*/
if (wlandev->netdev->type == ARPHRD_ETHER)
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"linkstatus=DISCONNECTED (unhandled)\n");
wlandev->macmode = WLAN_MACMODE_NONE;
@@ -1341,7 +1337,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* Indicate Reassociation
* Enable Transmits, Receives and pass up data frames
*/
- printk(KERN_INFO "linkstatus=AP_CHANGE\n");
+ netdev_info(wlandev->netdev, "linkstatus=AP_CHANGE\n");
result = hfa384x_drvr_getconfig(hw,
HFA384x_RID_CURRENTBSSID,
@@ -1383,7 +1379,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* Response:
* Block Transmits, Ignore receives of data frames
*/
- printk(KERN_INFO "linkstatus=AP_OUTOFRANGE (unhandled)\n");
+ netdev_info(wlandev->netdev, "linkstatus=AP_OUTOFRANGE (unhandled)\n");
netif_carrier_off(wlandev->netdev);
@@ -1396,7 +1392,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* Response:
* Enable Transmits, Receives and pass up data frames
*/
- printk(KERN_INFO "linkstatus=AP_INRANGE\n");
+ netdev_info(wlandev->netdev, "linkstatus=AP_INRANGE\n");
hw->link_status = HFA384x_LINK_CONNECTED;
netif_carrier_on(wlandev->netdev);
@@ -1420,10 +1416,10 @@ void prism2sta_processing_defer(struct work_struct *data)
HFA384x_RID_JOINREQUEST,
&joinreq,
HFA384x_RID_JOINREQUEST_LEN);
- printk(KERN_INFO
+ netdev_info(wlandev->netdev,
"linkstatus=ASSOCFAIL (re-submitting join)\n");
} else {
- printk(KERN_INFO "linkstatus=ASSOCFAIL (unhandled)\n");
+ netdev_info(wlandev->netdev, "linkstatus=ASSOCFAIL (unhandled)\n");
}
netif_carrier_off(wlandev->netdev);
@@ -1713,7 +1709,7 @@ static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev,
if (result) {
if (added)
hw->authlist.cnt--;
- printk(KERN_ERR
+ netdev_err(wlandev->netdev,
"setconfig(authenticatestation) failed, result=%d\n",
result);
}
@@ -1928,7 +1924,7 @@ static wlandevice_t *create_wlan(void)
hw = kzalloc(sizeof(hfa384x_t), GFP_KERNEL);
if (!wlandev || !hw) {
- printk(KERN_ERR "%s: Memory allocation failure.\n", dev_info);
+ pr_err("%s: Memory allocation failure.\n", dev_info);
kfree(wlandev);
kfree(hw);
return NULL;
@@ -1980,7 +1976,7 @@ void prism2sta_commsqual_defer(struct work_struct *data)
&hw->qual, HFA384x_RID_DBMCOMMSQUALITY_LEN);
if (result) {
- printk(KERN_ERR "error fetching commsqual\n");
+ netdev_err(wlandev->netdev, "error fetching commsqual\n");
return;
}
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index b3ff603e6225..466804687fc0 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -1754,8 +1754,7 @@ static int xgifb_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "Unable request memory size %x\n",
xgifb_info->video_size);
dev_err(&pdev->dev,
- "Fatal error: Unable to reserve frame buffer memory. "
- "Is there another framebuffer driver active?\n");
+ "Fatal error: Unable to reserve frame buffer memory. Is there another framebuffer driver active?\n");
ret = -ENODEV;
goto error_disable;
}
@@ -2087,23 +2086,19 @@ static struct pci_driver xgifb_driver = {
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode,
- "Selects the desired default display mode in the format XxYxDepth "
- "(eg. 1024x768x16).");
+ "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16).");
module_param(forcecrt2type, charp, 0);
MODULE_PARM_DESC(forcecrt2type,
- "Force the second display output type. Possible values are NONE, "
- "LCD, TV, VGA, SVIDEO or COMPOSITE.");
+ "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE.");
module_param(vesa, int, 0);
MODULE_PARM_DESC(vesa,
- "Selects the desired default display mode by VESA mode number "
- "(eg. 0x117).");
+ "Selects the desired default display mode by VESA mode number (eg. 0x117).");
module_param(filter, int, 0);
MODULE_PARM_DESC(filter,
- "Selects TV flicker filter type (only for systems with a SiS301 video bridge). "
- "Possible values 0-7. Default: [no filter]).");
+ "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter]).");
static int __init xgifb_init(void)
{
diff --git a/drivers/staging/xillybus/Kconfig b/drivers/staging/xillybus/Kconfig
index 75c38c8c26eb..b53bdf12da0d 100644
--- a/drivers/staging/xillybus/Kconfig
+++ b/drivers/staging/xillybus/Kconfig
@@ -5,6 +5,7 @@
config XILLYBUS
tristate "Xillybus generic FPGA interface"
depends on PCI || (OF_ADDRESS && OF_IRQ)
+ select CRC32
help
Xillybus is a generic interface for peripherals designed on
programmable logic (FPGA). The driver probes the hardware for
@@ -16,7 +17,7 @@ if XILLYBUS
config XILLYBUS_PCIE
tristate "Xillybus over PCIe"
- depends on PCI
+ depends on PCI_MSI
help
Set to M if you want Xillybus to use PCI Express for communicating
with the FPGA.
diff --git a/drivers/staging/xillybus/xillybus_core.c b/drivers/staging/xillybus/xillybus_core.c
index 2ebaf166038c..b0a6696f2da0 100644
--- a/drivers/staging/xillybus/xillybus_core.c
+++ b/drivers/staging/xillybus/xillybus_core.c
@@ -2318,8 +2318,12 @@ static int __init xillybus_init(void)
}
xillybus_wq = alloc_workqueue(xillyname, 0, 0);
+ if (!xillybus_wq) {
+ class_destroy(xillybus_class);
+ rc = -ENOMEM;
+ }
- return 0; /* Success */
+ return rc;
}
static void __exit xillybus_exit(void)
diff --git a/drivers/staging/zram/zram.txt b/drivers/staging/zram/zram.txt
deleted file mode 100644
index 765d790ae831..000000000000
--- a/drivers/staging/zram/zram.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-zram: Compressed RAM based block devices
-----------------------------------------
-
-Project home: http://compcache.googlecode.com/
-
-* Introduction
-
-The zram module creates RAM based block devices named /dev/zram<id>
-(<id> = 0, 1, ...). Pages written to these disks are compressed and stored
-in memory itself. These disks allow very fast I/O and compression provides
-good amounts of memory savings. Some of the usecases include /tmp storage,
-use as swap disks, various caches under /var and maybe many more :)
-
-Statistics for individual zram devices are exported through sysfs nodes at
-/sys/block/zram<id>/
-
-* Usage
-
-Following shows a typical sequence of steps for using zram.
-
-1) Load Module:
- modprobe zram num_devices=4
- This creates 4 devices: /dev/zram{0,1,2,3}
- (num_devices parameter is optional. Default: 1)
-
-2) Set Disksize
- Set disk size by writing the value to sysfs node 'disksize'.
- The value can be either in bytes or you can use mem suffixes.
- Examples:
- # Initialize /dev/zram0 with 50MB disksize
- echo $((50*1024*1024)) > /sys/block/zram0/disksize
-
- # Using mem suffixes
- echo 256K > /sys/block/zram0/disksize
- echo 512M > /sys/block/zram0/disksize
- echo 1G > /sys/block/zram0/disksize
-
-3) Activate:
- mkswap /dev/zram0
- swapon /dev/zram0
-
- mkfs.ext4 /dev/zram1
- mount /dev/zram1 /tmp
-
-4) Stats:
- Per-device statistics are exported as various nodes under
- /sys/block/zram<id>/
- disksize
- num_reads
- num_writes
- invalid_io
- notify_free
- discard
- zero_pages
- orig_data_size
- compr_data_size
- mem_used_total
-
-5) Deactivate:
- swapoff /dev/zram0
- umount /dev/zram1
-
-6) Reset:
- Write any positive value to 'reset' sysfs node
- echo 1 > /sys/block/zram0/reset
- echo 1 > /sys/block/zram1/reset
-
- This frees all the memory allocated for the given device and
- resets the disksize to zero. You must set the disksize again
- before reusing the device.
-
-Please report any problems at:
- - Mailing list: linux-mm-cc at laptop dot org
- - Issue tracker: http://code.google.com/p/compcache/issues/list
-
-Nitin Gupta
-ngupta@vflare.org
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
deleted file mode 100644
index 9d1f2a24ad62..000000000000
--- a/drivers/staging/zsmalloc/Kconfig
+++ /dev/null
@@ -1,24 +0,0 @@
-config ZSMALLOC
- bool "Memory allocator for compressed pages"
- depends on MMU
- default n
- help
- zsmalloc is a slab-based memory allocator designed to store
- compressed RAM pages. zsmalloc uses virtual memory mapping
- in order to reduce fragmentation. However, this results in a
- non-standard allocator interface where a handle, not a pointer, is
- returned by an alloc(). This handle must be mapped in order to
- access the allocated space.
-
-config PGTABLE_MAPPING
- bool "Use page table mapping to access object in zsmalloc"
- depends on ZSMALLOC
- help
- By default, zsmalloc uses a copy-based object mapping method to
- access allocations that span two pages. However, if a particular
- architecture (ex, ARM) performs VM mapping faster than copying,
- then you should select this. This causes zsmalloc to use page table
- mapping rather than copying for object mapping.
-
- You can check speed with zsmalloc benchmark[1].
- [1] https://github.com/spartacus06/zsmalloc
diff --git a/drivers/staging/zsmalloc/Makefile b/drivers/staging/zsmalloc/Makefile
deleted file mode 100644
index b134848a590d..000000000000
--- a/drivers/staging/zsmalloc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-zsmalloc-y := zsmalloc-main.o
-
-obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
deleted file mode 100644
index 7660c87d8b2a..000000000000
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ /dev/null
@@ -1,1106 +0,0 @@
-/*
- * zsmalloc memory allocator
- *
- * Copyright (C) 2011 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the license that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-/*
- * This allocator is designed for use with zram. Thus, the allocator is
- * supposed to work well under low memory conditions. In particular, it
- * never attempts higher order page allocation which is very likely to
- * fail under memory pressure. On the other hand, if we just use single
- * (0-order) pages, it would suffer from very high fragmentation --
- * any object of size PAGE_SIZE/2 or larger would occupy an entire page.
- * This was one of the major issues with its predecessor (xvmalloc).
- *
- * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
- * and links them together using various 'struct page' fields. These linked
- * pages act as a single higher-order page i.e. an object can span 0-order
- * page boundaries. The code refers to these linked pages as a single entity
- * called zspage.
- *
- * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
- * since this satisfies the requirements of all its current users (in the
- * worst case, page is incompressible and is thus stored "as-is" i.e. in
- * uncompressed form). For allocation requests larger than this size, failure
- * is returned (see zs_malloc).
- *
- * Additionally, zs_malloc() does not return a dereferenceable pointer.
- * Instead, it returns an opaque handle (unsigned long) which encodes actual
- * location of the allocated object. The reason for this indirection is that
- * zsmalloc does not keep zspages permanently mapped since that would cause
- * issues on 32-bit systems where the VA region for kernel space mappings
- * is very small. So, before using the allocating memory, the object has to
- * be mapped using zs_map_object() to get a usable pointer and subsequently
- * unmapped using zs_unmap_object().
- *
- * Following is how we use various fields and flags of underlying
- * struct page(s) to form a zspage.
- *
- * Usage of struct page fields:
- * page->first_page: points to the first component (0-order) page
- * page->index (union with page->freelist): offset of the first object
- * starting in this page. For the first page, this is
- * always 0, so we use this field (aka freelist) to point
- * to the first free object in zspage.
- * page->lru: links together all component pages (except the first page)
- * of a zspage
- *
- * For _first_ page only:
- *
- * page->private (union with page->first_page): refers to the
- * component page after the first page
- * page->freelist: points to the first free object in zspage.
- * Free objects are linked together using in-place
- * metadata.
- * page->objects: maximum number of objects we can store in this
- * zspage (class->zspage_order * PAGE_SIZE / class->size)
- * page->lru: links together first pages of various zspages.
- * Basically forming list of zspages in a fullness group.
- * page->mapping: class index and fullness group of the zspage
- *
- * Usage of struct page flags:
- * PG_private: identifies the first component page
- * PG_private2: identifies the last component page
- *
- */
-
-#ifdef CONFIG_ZSMALLOC_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/highmem.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
-#include <linux/cpumask.h>
-#include <linux/cpu.h>
-#include <linux/vmalloc.h>
-#include <linux/hardirq.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include "zsmalloc.h"
-
-/*
- * This must be power of 2 and greater than of equal to sizeof(link_free).
- * These two conditions ensure that any 'struct link_free' itself doesn't
- * span more than 1 page which avoids complex case of mapping 2 pages simply
- * to restore link_free pointer values.
- */
-#define ZS_ALIGN 8
-
-/*
- * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
- * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
- */
-#define ZS_MAX_ZSPAGE_ORDER 2
-#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
-
-/*
- * Object location (<PFN>, <obj_idx>) is encoded as
- * as single (unsigned long) handle value.
- *
- * Note that object index <obj_idx> is relative to system
- * page <PFN> it is stored in, so for each sub-page belonging
- * to a zspage, obj_idx starts with 0.
- *
- * This is made more complicated by various memory models and PAE.
- */
-
-#ifndef MAX_PHYSMEM_BITS
-#ifdef CONFIG_HIGHMEM64G
-#define MAX_PHYSMEM_BITS 36
-#else /* !CONFIG_HIGHMEM64G */
-/*
- * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
- * be PAGE_SHIFT
- */
-#define MAX_PHYSMEM_BITS BITS_PER_LONG
-#endif
-#endif
-#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
-#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
-#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
-
-#define MAX(a, b) ((a) >= (b) ? (a) : (b))
-/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
-#define ZS_MIN_ALLOC_SIZE \
- MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
-#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
-
-/*
- * On systems with 4K page size, this gives 254 size classes! There is a
- * trader-off here:
- * - Large number of size classes is potentially wasteful as free page are
- * spread across these classes
- * - Small number of size classes causes large internal fragmentation
- * - Probably its better to use specific size classes (empirically
- * determined). NOTE: all those class sizes must be set as multiple of
- * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
- *
- * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
- * (reason above)
- */
-#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
-#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
- ZS_SIZE_CLASS_DELTA + 1)
-
-/*
- * We do not maintain any list for completely empty or full pages
- */
-enum fullness_group {
- ZS_ALMOST_FULL,
- ZS_ALMOST_EMPTY,
- _ZS_NR_FULLNESS_GROUPS,
-
- ZS_EMPTY,
- ZS_FULL
-};
-
-/*
- * We assign a page to ZS_ALMOST_EMPTY fullness group when:
- * n <= N / f, where
- * n = number of allocated objects
- * N = total number of objects zspage can store
- * f = 1/fullness_threshold_frac
- *
- * Similarly, we assign zspage to:
- * ZS_ALMOST_FULL when n > N / f
- * ZS_EMPTY when n == 0
- * ZS_FULL when n == N
- *
- * (see: fix_fullness_group())
- */
-static const int fullness_threshold_frac = 4;
-
-struct size_class {
- /*
- * Size of objects stored in this class. Must be multiple
- * of ZS_ALIGN.
- */
- int size;
- unsigned int index;
-
- /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
- int pages_per_zspage;
-
- spinlock_t lock;
-
- /* stats */
- u64 pages_allocated;
-
- struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
-};
-
-/*
- * Placed within free objects to form a singly linked list.
- * For every zspage, first_page->freelist gives head of this list.
- *
- * This must be power of 2 and less than or equal to ZS_ALIGN
- */
-struct link_free {
- /* Handle of next free chunk (encodes <PFN, obj_idx>) */
- void *next;
-};
-
-struct zs_pool {
- struct size_class size_class[ZS_SIZE_CLASSES];
-
- gfp_t flags; /* allocation flags used when growing pool */
-};
-
-/*
- * A zspage's class index and fullness group
- * are encoded in its (first)page->mapping
- */
-#define CLASS_IDX_BITS 28
-#define FULLNESS_BITS 4
-#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
-#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
-
-struct mapping_area {
-#ifdef CONFIG_PGTABLE_MAPPING
- struct vm_struct *vm; /* vm area for mapping object that span pages */
-#else
- char *vm_buf; /* copy buffer for objects that span pages */
-#endif
- char *vm_addr; /* address of kmap_atomic()'ed pages */
- enum zs_mapmode vm_mm; /* mapping mode */
-};
-
-
-/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
-static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
-
-static int is_first_page(struct page *page)
-{
- return PagePrivate(page);
-}
-
-static int is_last_page(struct page *page)
-{
- return PagePrivate2(page);
-}
-
-static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
- enum fullness_group *fullness)
-{
- unsigned long m;
- BUG_ON(!is_first_page(page));
-
- m = (unsigned long)page->mapping;
- *fullness = m & FULLNESS_MASK;
- *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
-}
-
-static void set_zspage_mapping(struct page *page, unsigned int class_idx,
- enum fullness_group fullness)
-{
- unsigned long m;
- BUG_ON(!is_first_page(page));
-
- m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
- (fullness & FULLNESS_MASK);
- page->mapping = (struct address_space *)m;
-}
-
-/*
- * zsmalloc divides the pool into various size classes where each
- * class maintains a list of zspages where each zspage is divided
- * into equal sized chunks. Each allocation falls into one of these
- * classes depending on its size. This function returns index of the
- * size class which has chunk size big enough to hold the give size.
- */
-static int get_size_class_index(int size)
-{
- int idx = 0;
-
- if (likely(size > ZS_MIN_ALLOC_SIZE))
- idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
- ZS_SIZE_CLASS_DELTA);
-
- return idx;
-}
-
-/*
- * For each size class, zspages are divided into different groups
- * depending on how "full" they are. This was done so that we could
- * easily find empty or nearly empty zspages when we try to shrink
- * the pool (not yet implemented). This function returns fullness
- * status of the given page.
- */
-static enum fullness_group get_fullness_group(struct page *page)
-{
- int inuse, max_objects;
- enum fullness_group fg;
- BUG_ON(!is_first_page(page));
-
- inuse = page->inuse;
- max_objects = page->objects;
-
- if (inuse == 0)
- fg = ZS_EMPTY;
- else if (inuse == max_objects)
- fg = ZS_FULL;
- else if (inuse <= max_objects / fullness_threshold_frac)
- fg = ZS_ALMOST_EMPTY;
- else
- fg = ZS_ALMOST_FULL;
-
- return fg;
-}
-
-/*
- * Each size class maintains various freelists and zspages are assigned
- * to one of these freelists based on the number of live objects they
- * have. This functions inserts the given zspage into the freelist
- * identified by <class, fullness_group>.
- */
-static void insert_zspage(struct page *page, struct size_class *class,
- enum fullness_group fullness)
-{
- struct page **head;
-
- BUG_ON(!is_first_page(page));
-
- if (fullness >= _ZS_NR_FULLNESS_GROUPS)
- return;
-
- head = &class->fullness_list[fullness];
- if (*head)
- list_add_tail(&page->lru, &(*head)->lru);
-
- *head = page;
-}
-
-/*
- * This function removes the given zspage from the freelist identified
- * by <class, fullness_group>.
- */
-static void remove_zspage(struct page *page, struct size_class *class,
- enum fullness_group fullness)
-{
- struct page **head;
-
- BUG_ON(!is_first_page(page));
-
- if (fullness >= _ZS_NR_FULLNESS_GROUPS)
- return;
-
- head = &class->fullness_list[fullness];
- BUG_ON(!*head);
- if (list_empty(&(*head)->lru))
- *head = NULL;
- else if (*head == page)
- *head = (struct page *)list_entry((*head)->lru.next,
- struct page, lru);
-
- list_del_init(&page->lru);
-}
-
-/*
- * Each size class maintains zspages in different fullness groups depending
- * on the number of live objects they contain. When allocating or freeing
- * objects, the fullness status of the page can change, say, from ALMOST_FULL
- * to ALMOST_EMPTY when freeing an object. This function checks if such
- * a status change has occurred for the given page and accordingly moves the
- * page from the freelist of the old fullness group to that of the new
- * fullness group.
- */
-static enum fullness_group fix_fullness_group(struct zs_pool *pool,
- struct page *page)
-{
- int class_idx;
- struct size_class *class;
- enum fullness_group currfg, newfg;
-
- BUG_ON(!is_first_page(page));
-
- get_zspage_mapping(page, &class_idx, &currfg);
- newfg = get_fullness_group(page);
- if (newfg == currfg)
- goto out;
-
- class = &pool->size_class[class_idx];
- remove_zspage(page, class, currfg);
- insert_zspage(page, class, newfg);
- set_zspage_mapping(page, class_idx, newfg);
-
-out:
- return newfg;
-}
-
-/*
- * We have to decide on how many pages to link together
- * to form a zspage for each size class. This is important
- * to reduce wastage due to unusable space left at end of
- * each zspage which is given as:
- * wastage = Zp - Zp % size_class
- * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
- *
- * For example, for size class of 3/8 * PAGE_SIZE, we should
- * link together 3 PAGE_SIZE sized pages to form a zspage
- * since then we can perfectly fit in 8 such objects.
- */
-static int get_pages_per_zspage(int class_size)
-{
- int i, max_usedpc = 0;
- /* zspage order which gives maximum used size per KB */
- int max_usedpc_order = 1;
-
- for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
- int zspage_size;
- int waste, usedpc;
-
- zspage_size = i * PAGE_SIZE;
- waste = zspage_size % class_size;
- usedpc = (zspage_size - waste) * 100 / zspage_size;
-
- if (usedpc > max_usedpc) {
- max_usedpc = usedpc;
- max_usedpc_order = i;
- }
- }
-
- return max_usedpc_order;
-}
-
-/*
- * A single 'zspage' is composed of many system pages which are
- * linked together using fields in struct page. This function finds
- * the first/head page, given any component page of a zspage.
- */
-static struct page *get_first_page(struct page *page)
-{
- if (is_first_page(page))
- return page;
- else
- return page->first_page;
-}
-
-static struct page *get_next_page(struct page *page)
-{
- struct page *next;
-
- if (is_last_page(page))
- next = NULL;
- else if (is_first_page(page))
- next = (struct page *)page_private(page);
- else
- next = list_entry(page->lru.next, struct page, lru);
-
- return next;
-}
-
-/*
- * Encode <page, obj_idx> as a single handle value.
- * On hardware platforms with physical memory starting at 0x0 the pfn
- * could be 0 so we ensure that the handle will never be 0 by adjusting the
- * encoded obj_idx value before encoding.
- */
-static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
-{
- unsigned long handle;
-
- if (!page) {
- BUG_ON(obj_idx);
- return NULL;
- }
-
- handle = page_to_pfn(page) << OBJ_INDEX_BITS;
- handle |= ((obj_idx + 1) & OBJ_INDEX_MASK);
-
- return (void *)handle;
-}
-
-/*
- * Decode <page, obj_idx> pair from the given object handle. We adjust the
- * decoded obj_idx back to its original value since it was adjusted in
- * obj_location_to_handle().
- */
-static void obj_handle_to_location(unsigned long handle, struct page **page,
- unsigned long *obj_idx)
-{
- *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
- *obj_idx = (handle & OBJ_INDEX_MASK) - 1;
-}
-
-static unsigned long obj_idx_to_offset(struct page *page,
- unsigned long obj_idx, int class_size)
-{
- unsigned long off = 0;
-
- if (!is_first_page(page))
- off = page->index;
-
- return off + obj_idx * class_size;
-}
-
-static void reset_page(struct page *page)
-{
- clear_bit(PG_private, &page->flags);
- clear_bit(PG_private_2, &page->flags);
- set_page_private(page, 0);
- page->mapping = NULL;
- page->freelist = NULL;
- page_mapcount_reset(page);
-}
-
-static void free_zspage(struct page *first_page)
-{
- struct page *nextp, *tmp, *head_extra;
-
- BUG_ON(!is_first_page(first_page));
- BUG_ON(first_page->inuse);
-
- head_extra = (struct page *)page_private(first_page);
-
- reset_page(first_page);
- __free_page(first_page);
-
- /* zspage with only 1 system page */
- if (!head_extra)
- return;
-
- list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
- list_del(&nextp->lru);
- reset_page(nextp);
- __free_page(nextp);
- }
- reset_page(head_extra);
- __free_page(head_extra);
-}
-
-/* Initialize a newly allocated zspage */
-static void init_zspage(struct page *first_page, struct size_class *class)
-{
- unsigned long off = 0;
- struct page *page = first_page;
-
- BUG_ON(!is_first_page(first_page));
- while (page) {
- struct page *next_page;
- struct link_free *link;
- unsigned int i, objs_on_page;
-
- /*
- * page->index stores offset of first object starting
- * in the page. For the first page, this is always 0,
- * so we use first_page->index (aka ->freelist) to store
- * head of corresponding zspage's freelist.
- */
- if (page != first_page)
- page->index = off;
-
- link = (struct link_free *)kmap_atomic(page) +
- off / sizeof(*link);
- objs_on_page = (PAGE_SIZE - off) / class->size;
-
- for (i = 1; i <= objs_on_page; i++) {
- off += class->size;
- if (off < PAGE_SIZE) {
- link->next = obj_location_to_handle(page, i);
- link += class->size / sizeof(*link);
- }
- }
-
- /*
- * We now come to the last (full or partial) object on this
- * page, which must point to the first object on the next
- * page (if present)
- */
- next_page = get_next_page(page);
- link->next = obj_location_to_handle(next_page, 0);
- kunmap_atomic(link);
- page = next_page;
- off = (off + class->size) % PAGE_SIZE;
- }
-}
-
-/*
- * Allocate a zspage for the given size class
- */
-static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
-{
- int i, error;
- struct page *first_page = NULL, *uninitialized_var(prev_page);
-
- /*
- * Allocate individual pages and link them together as:
- * 1. first page->private = first sub-page
- * 2. all sub-pages are linked together using page->lru
- * 3. each sub-page is linked to the first page using page->first_page
- *
- * For each size class, First/Head pages are linked together using
- * page->lru. Also, we set PG_private to identify the first page
- * (i.e. no other sub-page has this flag set) and PG_private_2 to
- * identify the last page.
- */
- error = -ENOMEM;
- for (i = 0; i < class->pages_per_zspage; i++) {
- struct page *page;
-
- page = alloc_page(flags);
- if (!page)
- goto cleanup;
-
- INIT_LIST_HEAD(&page->lru);
- if (i == 0) { /* first page */
- SetPagePrivate(page);
- set_page_private(page, 0);
- first_page = page;
- first_page->inuse = 0;
- }
- if (i == 1)
- set_page_private(first_page, (unsigned long)page);
- if (i >= 1)
- page->first_page = first_page;
- if (i >= 2)
- list_add(&page->lru, &prev_page->lru);
- if (i == class->pages_per_zspage - 1) /* last page */
- SetPagePrivate2(page);
- prev_page = page;
- }
-
- init_zspage(first_page, class);
-
- first_page->freelist = obj_location_to_handle(first_page, 0);
- /* Maximum number of objects we can store in this zspage */
- first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
-
- error = 0; /* Success */
-
-cleanup:
- if (unlikely(error) && first_page) {
- free_zspage(first_page);
- first_page = NULL;
- }
-
- return first_page;
-}
-
-static struct page *find_get_zspage(struct size_class *class)
-{
- int i;
- struct page *page;
-
- for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
- page = class->fullness_list[i];
- if (page)
- break;
- }
-
- return page;
-}
-
-#ifdef CONFIG_PGTABLE_MAPPING
-static inline int __zs_cpu_up(struct mapping_area *area)
-{
- /*
- * Make sure we don't leak memory if a cpu UP notification
- * and zs_init() race and both call zs_cpu_up() on the same cpu
- */
- if (area->vm)
- return 0;
- area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
- if (!area->vm)
- return -ENOMEM;
- return 0;
-}
-
-static inline void __zs_cpu_down(struct mapping_area *area)
-{
- if (area->vm)
- free_vm_area(area->vm);
- area->vm = NULL;
-}
-
-static inline void *__zs_map_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
-{
- BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages));
- area->vm_addr = area->vm->addr;
- return area->vm_addr + off;
-}
-
-static inline void __zs_unmap_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
-{
- unsigned long addr = (unsigned long)area->vm_addr;
-
- unmap_kernel_range(addr, PAGE_SIZE * 2);
-}
-
-#else /* CONFIG_PGTABLE_MAPPING */
-
-static inline int __zs_cpu_up(struct mapping_area *area)
-{
- /*
- * Make sure we don't leak memory if a cpu UP notification
- * and zs_init() race and both call zs_cpu_up() on the same cpu
- */
- if (area->vm_buf)
- return 0;
- area->vm_buf = (char *)__get_free_page(GFP_KERNEL);
- if (!area->vm_buf)
- return -ENOMEM;
- return 0;
-}
-
-static inline void __zs_cpu_down(struct mapping_area *area)
-{
- if (area->vm_buf)
- free_page((unsigned long)area->vm_buf);
- area->vm_buf = NULL;
-}
-
-static void *__zs_map_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
-{
- int sizes[2];
- void *addr;
- char *buf = area->vm_buf;
-
- /* disable page faults to match kmap_atomic() return conditions */
- pagefault_disable();
-
- /* no read fastpath */
- if (area->vm_mm == ZS_MM_WO)
- goto out;
-
- sizes[0] = PAGE_SIZE - off;
- sizes[1] = size - sizes[0];
-
- /* copy object to per-cpu buffer */
- addr = kmap_atomic(pages[0]);
- memcpy(buf, addr + off, sizes[0]);
- kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
- memcpy(buf + sizes[0], addr, sizes[1]);
- kunmap_atomic(addr);
-out:
- return area->vm_buf;
-}
-
-static void __zs_unmap_object(struct mapping_area *area,
- struct page *pages[2], int off, int size)
-{
- int sizes[2];
- void *addr;
- char *buf = area->vm_buf;
-
- /* no write fastpath */
- if (area->vm_mm == ZS_MM_RO)
- goto out;
-
- sizes[0] = PAGE_SIZE - off;
- sizes[1] = size - sizes[0];
-
- /* copy per-cpu buffer to object */
- addr = kmap_atomic(pages[0]);
- memcpy(addr + off, buf, sizes[0]);
- kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
- memcpy(addr, buf + sizes[0], sizes[1]);
- kunmap_atomic(addr);
-
-out:
- /* enable page faults to match kunmap_atomic() return conditions */
- pagefault_enable();
-}
-
-#endif /* CONFIG_PGTABLE_MAPPING */
-
-static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
- void *pcpu)
-{
- int ret, cpu = (long)pcpu;
- struct mapping_area *area;
-
- switch (action) {
- case CPU_UP_PREPARE:
- area = &per_cpu(zs_map_area, cpu);
- ret = __zs_cpu_up(area);
- if (ret)
- return notifier_from_errno(ret);
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- area = &per_cpu(zs_map_area, cpu);
- __zs_cpu_down(area);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block zs_cpu_nb = {
- .notifier_call = zs_cpu_notifier
-};
-
-static void zs_exit(void)
-{
- int cpu;
-
- for_each_online_cpu(cpu)
- zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
- unregister_cpu_notifier(&zs_cpu_nb);
-}
-
-static int zs_init(void)
-{
- int cpu, ret;
-
- register_cpu_notifier(&zs_cpu_nb);
- for_each_online_cpu(cpu) {
- ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
- if (notifier_to_errno(ret))
- goto fail;
- }
- return 0;
-fail:
- zs_exit();
- return notifier_to_errno(ret);
-}
-
-/**
- * zs_create_pool - Creates an allocation pool to work from.
- * @flags: allocation flags used to allocate pool metadata
- *
- * This function must be called before anything when using
- * the zsmalloc allocator.
- *
- * On success, a pointer to the newly created pool is returned,
- * otherwise NULL.
- */
-struct zs_pool *zs_create_pool(gfp_t flags)
-{
- int i, ovhd_size;
- struct zs_pool *pool;
-
- ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
- pool = kzalloc(ovhd_size, GFP_KERNEL);
- if (!pool)
- return NULL;
-
- for (i = 0; i < ZS_SIZE_CLASSES; i++) {
- int size;
- struct size_class *class;
-
- size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
- if (size > ZS_MAX_ALLOC_SIZE)
- size = ZS_MAX_ALLOC_SIZE;
-
- class = &pool->size_class[i];
- class->size = size;
- class->index = i;
- spin_lock_init(&class->lock);
- class->pages_per_zspage = get_pages_per_zspage(size);
-
- }
-
- pool->flags = flags;
-
- return pool;
-}
-EXPORT_SYMBOL_GPL(zs_create_pool);
-
-void zs_destroy_pool(struct zs_pool *pool)
-{
- int i;
-
- for (i = 0; i < ZS_SIZE_CLASSES; i++) {
- int fg;
- struct size_class *class = &pool->size_class[i];
-
- for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
- if (class->fullness_list[fg]) {
- pr_info("Freeing non-empty class with size %db, fullness group %d\n",
- class->size, fg);
- }
- }
- }
- kfree(pool);
-}
-EXPORT_SYMBOL_GPL(zs_destroy_pool);
-
-/**
- * zs_malloc - Allocate block of given size from pool.
- * @pool: pool to allocate from
- * @size: size of block to allocate
- *
- * On success, handle to the allocated object is returned,
- * otherwise 0.
- * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
- */
-unsigned long zs_malloc(struct zs_pool *pool, size_t size)
-{
- unsigned long obj;
- struct link_free *link;
- int class_idx;
- struct size_class *class;
-
- struct page *first_page, *m_page;
- unsigned long m_objidx, m_offset;
-
- if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
- return 0;
-
- class_idx = get_size_class_index(size);
- class = &pool->size_class[class_idx];
- BUG_ON(class_idx != class->index);
-
- spin_lock(&class->lock);
- first_page = find_get_zspage(class);
-
- if (!first_page) {
- spin_unlock(&class->lock);
- first_page = alloc_zspage(class, pool->flags);
- if (unlikely(!first_page))
- return 0;
-
- set_zspage_mapping(first_page, class->index, ZS_EMPTY);
- spin_lock(&class->lock);
- class->pages_allocated += class->pages_per_zspage;
- }
-
- obj = (unsigned long)first_page->freelist;
- obj_handle_to_location(obj, &m_page, &m_objidx);
- m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
-
- link = (struct link_free *)kmap_atomic(m_page) +
- m_offset / sizeof(*link);
- first_page->freelist = link->next;
- memset(link, POISON_INUSE, sizeof(*link));
- kunmap_atomic(link);
-
- first_page->inuse++;
- /* Now move the zspage to another fullness group, if required */
- fix_fullness_group(pool, first_page);
- spin_unlock(&class->lock);
-
- return obj;
-}
-EXPORT_SYMBOL_GPL(zs_malloc);
-
-void zs_free(struct zs_pool *pool, unsigned long obj)
-{
- struct link_free *link;
- struct page *first_page, *f_page;
- unsigned long f_objidx, f_offset;
-
- int class_idx;
- struct size_class *class;
- enum fullness_group fullness;
-
- if (unlikely(!obj))
- return;
-
- obj_handle_to_location(obj, &f_page, &f_objidx);
- first_page = get_first_page(f_page);
-
- get_zspage_mapping(first_page, &class_idx, &fullness);
- class = &pool->size_class[class_idx];
- f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
-
- spin_lock(&class->lock);
-
- /* Insert this object in containing zspage's freelist */
- link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
- + f_offset);
- link->next = first_page->freelist;
- kunmap_atomic(link);
- first_page->freelist = (void *)obj;
-
- first_page->inuse--;
- fullness = fix_fullness_group(pool, first_page);
-
- if (fullness == ZS_EMPTY)
- class->pages_allocated -= class->pages_per_zspage;
-
- spin_unlock(&class->lock);
-
- if (fullness == ZS_EMPTY)
- free_zspage(first_page);
-}
-EXPORT_SYMBOL_GPL(zs_free);
-
-/**
- * zs_map_object - get address of allocated object from handle.
- * @pool: pool from which the object was allocated
- * @handle: handle returned from zs_malloc
- *
- * Before using an object allocated from zs_malloc, it must be mapped using
- * this function. When done with the object, it must be unmapped using
- * zs_unmap_object.
- *
- * Only one object can be mapped per cpu at a time. There is no protection
- * against nested mappings.
- *
- * This function returns with preemption and page faults disabled.
- */
-void *zs_map_object(struct zs_pool *pool, unsigned long handle,
- enum zs_mapmode mm)
-{
- struct page *page;
- unsigned long obj_idx, off;
-
- unsigned int class_idx;
- enum fullness_group fg;
- struct size_class *class;
- struct mapping_area *area;
- struct page *pages[2];
-
- BUG_ON(!handle);
-
- /*
- * Because we use per-cpu mapping areas shared among the
- * pools/users, we can't allow mapping in interrupt context
- * because it can corrupt another users mappings.
- */
- BUG_ON(in_interrupt());
-
- obj_handle_to_location(handle, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
- class = &pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
-
- area = &get_cpu_var(zs_map_area);
- area->vm_mm = mm;
- if (off + class->size <= PAGE_SIZE) {
- /* this object is contained entirely within a page */
- area->vm_addr = kmap_atomic(page);
- return area->vm_addr + off;
- }
-
- /* this object spans two pages */
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
-
- return __zs_map_object(area, pages, off, class->size);
-}
-EXPORT_SYMBOL_GPL(zs_map_object);
-
-void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
-{
- struct page *page;
- unsigned long obj_idx, off;
-
- unsigned int class_idx;
- enum fullness_group fg;
- struct size_class *class;
- struct mapping_area *area;
-
- BUG_ON(!handle);
-
- obj_handle_to_location(handle, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
- class = &pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
-
- area = &__get_cpu_var(zs_map_area);
- if (off + class->size <= PAGE_SIZE)
- kunmap_atomic(area->vm_addr);
- else {
- struct page *pages[2];
-
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
-
- __zs_unmap_object(area, pages, off, class->size);
- }
- put_cpu_var(zs_map_area);
-}
-EXPORT_SYMBOL_GPL(zs_unmap_object);
-
-u64 zs_get_total_size_bytes(struct zs_pool *pool)
-{
- int i;
- u64 npages = 0;
-
- for (i = 0; i < ZS_SIZE_CLASSES; i++)
- npages += pool->size_class[i].pages_allocated;
-
- return npages << PAGE_SHIFT;
-}
-EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
-
-module_init(zs_init);
-module_exit(zs_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
deleted file mode 100644
index c2eb174b97ee..000000000000
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * zsmalloc memory allocator
- *
- * Copyright (C) 2011 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the license that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-#ifndef _ZS_MALLOC_H_
-#define _ZS_MALLOC_H_
-
-#include <linux/types.h>
-
-/*
- * zsmalloc mapping modes
- *
- * NOTE: These only make a difference when a mapped object spans pages.
- * They also have no effect when PGTABLE_MAPPING is selected.
- */
-enum zs_mapmode {
- ZS_MM_RW, /* normal read-write mapping */
- ZS_MM_RO, /* read-only (no copy-out at unmap time) */
- ZS_MM_WO /* write-only (no copy-in at map time) */
- /*
- * NOTE: ZS_MM_WO should only be used for initializing new
- * (uninitialized) allocations. Partial writes to already
- * initialized allocations should use ZS_MM_RW to preserve the
- * existing data.
- */
-};
-
-struct zs_pool;
-
-struct zs_pool *zs_create_pool(gfp_t flags);
-void zs_destroy_pool(struct zs_pool *pool);
-
-unsigned long zs_malloc(struct zs_pool *pool, size_t size);
-void zs_free(struct zs_pool *pool, unsigned long obj);
-
-void *zs_map_object(struct zs_pool *pool, unsigned long handle,
- enum zs_mapmode mm);
-void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
-
-u64 zs_get_total_size_bytes(struct zs_pool *pool);
-
-#endif
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 18303686eb58..dc2d84ac5a0e 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -3,6 +3,7 @@ menuconfig TARGET_CORE
tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
depends on SCSI && BLOCK
select CONFIGFS_FS
+ select CRC_T10DIF
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
@@ -13,6 +14,7 @@ if TARGET_CORE
config TCM_IBLOCK
tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+ select BLK_DEV_INTEGRITY
help
Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
access to Linux/Block devices using BIO
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 00867190413c..b83ec378d04f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -52,7 +52,7 @@
static LIST_HEAD(g_tiqn_list);
static LIST_HEAD(g_np_list);
static DEFINE_SPINLOCK(tiqn_lock);
-static DEFINE_SPINLOCK(np_lock);
+static DEFINE_MUTEX(np_lock);
static struct idr tiqn_idr;
struct idr sess_idr;
@@ -307,6 +307,9 @@ bool iscsit_check_np_match(
return false;
}
+/*
+ * Called with mutex np_lock held
+ */
static struct iscsi_np *iscsit_get_np(
struct __kernel_sockaddr_storage *sockaddr,
int network_transport)
@@ -314,11 +317,10 @@ static struct iscsi_np *iscsit_get_np(
struct iscsi_np *np;
bool match;
- spin_lock_bh(&np_lock);
list_for_each_entry(np, &g_np_list, np_list) {
- spin_lock(&np->np_thread_lock);
+ spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
- spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np->np_thread_lock);
continue;
}
@@ -330,13 +332,11 @@ static struct iscsi_np *iscsit_get_np(
* while iscsi_tpg_add_network_portal() is called.
*/
np->np_exports++;
- spin_unlock(&np->np_thread_lock);
- spin_unlock_bh(&np_lock);
+ spin_unlock_bh(&np->np_thread_lock);
return np;
}
- spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np->np_thread_lock);
}
- spin_unlock_bh(&np_lock);
return NULL;
}
@@ -350,16 +350,22 @@ struct iscsi_np *iscsit_add_np(
struct sockaddr_in6 *sock_in6;
struct iscsi_np *np;
int ret;
+
+ mutex_lock(&np_lock);
+
/*
* Locate the existing struct iscsi_np if already active..
*/
np = iscsit_get_np(sockaddr, network_transport);
- if (np)
+ if (np) {
+ mutex_unlock(&np_lock);
return np;
+ }
np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
if (!np) {
pr_err("Unable to allocate memory for struct iscsi_np\n");
+ mutex_unlock(&np_lock);
return ERR_PTR(-ENOMEM);
}
@@ -382,6 +388,7 @@ struct iscsi_np *iscsit_add_np(
ret = iscsi_target_setup_login_socket(np, sockaddr);
if (ret != 0) {
kfree(np);
+ mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
@@ -390,6 +397,7 @@ struct iscsi_np *iscsit_add_np(
pr_err("Unable to create kthread: iscsi_np\n");
ret = PTR_ERR(np->np_thread);
kfree(np);
+ mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
/*
@@ -400,10 +408,10 @@ struct iscsi_np *iscsit_add_np(
* point because iscsi_np has not been added to g_np_list yet.
*/
np->np_exports = 1;
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
- spin_lock_bh(&np_lock);
list_add_tail(&np->np_list, &g_np_list);
- spin_unlock_bh(&np_lock);
+ mutex_unlock(&np_lock);
pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
np->np_ip, np->np_port, np->np_transport->name);
@@ -470,9 +478,9 @@ int iscsit_del_np(struct iscsi_np *np)
np->np_transport->iscsit_free_np(np);
- spin_lock_bh(&np_lock);
+ mutex_lock(&np_lock);
list_del(&np->np_list);
- spin_unlock_bh(&np_lock);
+ mutex_unlock(&np_lock);
pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
np->np_ip, np->np_port, np->np_transport->name);
@@ -622,7 +630,7 @@ static int iscsit_add_reject(
{
struct iscsi_cmd *cmd;
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
return -1;
@@ -777,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
spin_unlock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
iscsit_free_cmd(cmd, false);
}
}
@@ -2475,7 +2483,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
if (!conn_p)
return;
- cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+ cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
if (!cmd) {
iscsit_dec_conn_usage_count(conn_p);
return;
@@ -3700,7 +3708,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
break;
case ISTATE_REMOVE:
spin_lock_bh(&conn->cmd_lock);
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, false);
@@ -3951,7 +3959,7 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
switch (hdr->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_SCSI_CMD:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
@@ -3963,28 +3971,28 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
case ISCSI_OP_NOOP_OUT:
cmd = NULL;
if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
}
ret = iscsit_handle_nop_out(conn, cmd, buf);
break;
case ISCSI_OP_SCSI_TMFUNC:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
break;
case ISCSI_OP_TEXT:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
ret = iscsit_handle_text_cmd(conn, cmd, buf);
break;
case ISCSI_OP_LOGOUT:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
@@ -4143,7 +4151,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
@@ -4188,6 +4196,10 @@ int iscsit_close_connection(
iscsit_stop_timers_for_cmds(conn);
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
+
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
iscsit_free_queue_reqs_for_conn(conn);
/*
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index e048d6439f4a..cda4d80cfaef 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -507,7 +507,9 @@ int iscsit_handle_status_snack(
u32 last_statsn;
int found_cmd;
- if (conn->exp_statsn > begrun) {
+ if (!begrun) {
+ begrun = conn->exp_statsn;
+ } else if (conn->exp_statsn > begrun) {
pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
" 0x%08x but already got ExpStatSN: 0x%08x on CID:"
" %hu.\n", begrun, runlength, conn->exp_statsn,
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 33be1fb1df32..4ca8fd2a70db 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(
}
cr = cmd->cr;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
return --cr->cmd_count;
}
@@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
continue;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
/*
* Only perform connection recovery on ISCSI_OP_SCSI_CMD or
* ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
- * list_del(&cmd->i_conn_node); to release the command to the
+ * list_del_init(&cmd->i_conn_node); to release the command to the
* session pool and remove it from the connection's list.
*
* Also stop the DataOUT timer, which will be restarted after
@@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
" CID: %hu\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, conn->cid);
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
cmd->sess = conn->sess;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_all_datain_reqs(cmd);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 83c965c65386..582ba84075ec 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1192,7 +1192,7 @@ get_target:
*/
alloc_tags:
tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
- tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
+ tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 39761837608d..44a5471de00f 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
spin_lock(&tpg->tpg_state_lock);
- if (tpg->tpg_state == TPG_STATE_FREE) {
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
spin_unlock(&tpg->tpg_state_lock);
continue;
}
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 0819e688a398..e655b042ed18 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -152,13 +152,16 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
* May be called from software interrupt (timer) context for allocating
* iSCSI NopINs.
*/
-struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
{
struct iscsi_cmd *cmd;
struct se_session *se_sess = conn->sess->se_sess;
int size, tag;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
+ if (tag < 0)
+ return NULL;
+
size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
memset(cmd, 0, size);
@@ -926,7 +929,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
u8 state;
struct iscsi_cmd *cmd;
- cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
+ cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
if (!cmd)
return -1;
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index e4fc34a02f57..561a424d1980 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -9,7 +9,7 @@ extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
-extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
+extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 1b41e6776152..fadad7c5f635 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -217,7 +217,8 @@ static void tcm_loop_submission_work(struct work_struct *work)
scsi_bufflen(sc), tcm_loop_sam_attr(sc),
sc->sc_data_direction, 0,
scsi_sglist(sc), scsi_sg_count(sc),
- sgl_bidi, sgl_bidi_count);
+ sgl_bidi, sgl_bidi_count,
+ scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
if (rc < 0) {
set_host_byte(sc, DID_NO_CONNECT);
goto out_done;
@@ -462,7 +463,7 @@ static int tcm_loop_driver_probe(struct device *dev)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
- int error;
+ int error, host_prot;
tl_hba = to_tcm_loop_hba(dev);
@@ -486,6 +487,13 @@ static int tcm_loop_driver_probe(struct device *dev)
sh->max_channel = 0;
sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
+ host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
+
+ scsi_host_set_prot(sh, host_prot);
+ scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
+
error = scsi_add_host(sh, &tl_hba->dev);
if (error) {
pr_err("%s: scsi_add_host failed\n", __func__);
@@ -1228,7 +1236,7 @@ static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
/* Start items for tcm_loop_naa_cit */
-struct se_portal_group *tcm_loop_make_naa_tpg(
+static struct se_portal_group *tcm_loop_make_naa_tpg(
struct se_wwn *wwn,
struct config_group *group,
const char *name)
@@ -1273,7 +1281,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
return &tl_tpg->tl_se_tpg;
}
-void tcm_loop_drop_naa_tpg(
+static void tcm_loop_drop_naa_tpg(
struct se_portal_group *se_tpg)
{
struct se_wwn *wwn = se_tpg->se_tpg_wwn;
@@ -1305,7 +1313,7 @@ void tcm_loop_drop_naa_tpg(
/* Start items for tcm_loop_cit */
-struct se_wwn *tcm_loop_make_scsi_hba(
+static struct se_wwn *tcm_loop_make_scsi_hba(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
@@ -1375,7 +1383,7 @@ out:
return ERR_PTR(ret);
}
-void tcm_loop_drop_scsi_hba(
+static void tcm_loop_drop_scsi_hba(
struct se_wwn *wwn)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fdcee326bfbc..c3d9df6aaf5f 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -41,11 +41,14 @@
#include "target_core_alua.h"
#include "target_core_ua.h"
-static sense_reason_t core_alua_check_transition(int state, int *primary);
+static sense_reason_t core_alua_check_transition(int state, int valid,
+ int *primary);
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port, int explicit, int offline);
+static char *core_alua_dump_state(int state);
+
static u16 alua_lu_gps_counter;
static u32 alua_lu_gps_count;
@@ -55,6 +58,86 @@ static LIST_HEAD(lu_gps_list);
struct t10_alua_lu_gp *default_lu_gp;
/*
+ * REPORT REFERRALS
+ *
+ * See sbc3r35 section 5.23
+ */
+sense_reason_t
+target_emulate_report_referrals(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct t10_alua_lba_map *map;
+ struct t10_alua_lba_map_member *map_mem;
+ unsigned char *buf;
+ u32 rd_len = 0, off;
+
+ if (cmd->data_length < 4) {
+ pr_warn("REPORT REFERRALS allocation length %u too"
+ " small\n", cmd->data_length);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ off = 4;
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ if (list_empty(&dev->t10_alua.lba_map_list)) {
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ transport_kunmap_data_sg(cmd);
+
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+ lba_map_list) {
+ int desc_num = off + 3;
+ int pg_num;
+
+ off += 4;
+ if (cmd->data_length > off)
+ put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
+ off += 8;
+ if (cmd->data_length > off)
+ put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
+ off += 8;
+ rd_len += 20;
+ pg_num = 0;
+ list_for_each_entry(map_mem, &map->lba_map_mem_list,
+ lba_map_mem_list) {
+ int alua_state = map_mem->lba_map_mem_alua_state;
+ int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
+
+ if (cmd->data_length > off)
+ buf[off] = alua_state & 0x0f;
+ off += 2;
+ if (cmd->data_length > off)
+ buf[off] = (alua_pg_id >> 8) & 0xff;
+ off++;
+ if (cmd->data_length > off)
+ buf[off] = (alua_pg_id & 0xff);
+ off++;
+ rd_len += 4;
+ pg_num++;
+ }
+ if (cmd->data_length > desc_num)
+ buf[desc_num] = pg_num;
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+
+ /*
+ * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+ */
+ put_unaligned_be16(rd_len, &buf[2]);
+
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+/*
* REPORT_TARGET_PORT_GROUPS
*
* See spc4r17 section 6.27
@@ -210,7 +293,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
unsigned char *ptr;
sense_reason_t rc = TCM_NO_SENSE;
u32 len = 4; /* Skip over RESERVED area in header */
- int alua_access_state, primary = 0;
+ int alua_access_state, primary = 0, valid_states;
u16 tg_pt_id, rtpi;
if (!l_port)
@@ -252,6 +335,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
+ valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
ptr = &buf[4]; /* Skip over RESERVED area in header */
@@ -263,7 +347,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* the state is a primary or secondary target port asymmetric
* access state.
*/
- rc = core_alua_check_transition(alua_access_state, &primary);
+ rc = core_alua_check_transition(alua_access_state,
+ valid_states, &primary);
if (rc) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
@@ -386,6 +471,81 @@ static inline int core_alua_state_nonoptimized(
return 0;
}
+static inline int core_alua_state_lba_dependent(
+ struct se_cmd *cmd,
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ u8 *alua_ascq)
+{
+ struct se_device *dev = cmd->se_dev;
+ u64 segment_size, segment_mult, sectors, lba;
+
+ /* Only need to check for cdb actually containing LBAs */
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
+ return 0;
+
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ segment_size = dev->t10_alua.lba_map_segment_size;
+ segment_mult = dev->t10_alua.lba_map_segment_multiplier;
+ sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+ lba = cmd->t_task_lba;
+ while (lba < cmd->t_task_lba + sectors) {
+ struct t10_alua_lba_map *cur_map = NULL, *map;
+ struct t10_alua_lba_map_member *map_mem;
+
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+ lba_map_list) {
+ u64 start_lba, last_lba;
+ u64 first_lba = map->lba_map_first_lba;
+
+ if (segment_mult) {
+ u64 tmp = lba;
+ start_lba = do_div(tmp, segment_size * segment_mult);
+
+ last_lba = first_lba + segment_size - 1;
+ if (start_lba >= first_lba &&
+ start_lba <= last_lba) {
+ lba += segment_size;
+ cur_map = map;
+ break;
+ }
+ } else {
+ last_lba = map->lba_map_last_lba;
+ if (lba >= first_lba && lba <= last_lba) {
+ lba = last_lba + 1;
+ cur_map = map;
+ break;
+ }
+ }
+ }
+ if (!cur_map) {
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ }
+ list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ if (map_mem->lba_map_mem_alua_pg_id !=
+ tg_pt_gp->tg_pt_gp_id)
+ continue;
+ switch(map_mem->lba_map_mem_alua_state) {
+ case ALUA_ACCESS_STATE_STANDBY:
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ return 1;
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ default:
+ break;
+ }
+ }
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return 0;
+}
+
static inline int core_alua_state_standby(
struct se_cmd *cmd,
unsigned char *cdb,
@@ -583,6 +743,9 @@ target_alua_state_check(struct se_cmd *cmd)
case ALUA_ACCESS_STATE_TRANSITION:
ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
break;
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
+ break;
/*
* OFFLINE is a secondary ALUA target port group access state, that is
* handled above with struct se_port->sep_tg_pt_secondary_offline=1
@@ -618,17 +781,36 @@ out:
* Check implicit and explicit ALUA state change request.
*/
static sense_reason_t
-core_alua_check_transition(int state, int *primary)
+core_alua_check_transition(int state, int valid, int *primary)
{
+ /*
+ * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+ * defined as primary target port asymmetric access states.
+ */
switch (state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+ if (!(valid & ALUA_AO_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ if (!(valid & ALUA_AN_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_STANDBY:
+ if (!(valid & ALUA_S_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
- /*
- * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
- * defined as primary target port asymmetric access states.
- */
+ if (!(valid & ALUA_U_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ if (!(valid & ALUA_LBD_SUP))
+ goto not_supported;
*primary = 1;
break;
case ALUA_ACCESS_STATE_OFFLINE:
@@ -636,14 +818,27 @@ core_alua_check_transition(int state, int *primary)
* OFFLINE state is defined as a secondary target port
* asymmetric access state.
*/
+ if (!(valid & ALUA_O_SUP))
+ goto not_supported;
*primary = 0;
break;
+ case ALUA_ACCESS_STATE_TRANSITION:
+ /*
+ * Transitioning is set internally, and
+ * cannot be selected manually.
+ */
+ goto not_supported;
default:
pr_err("Unknown ALUA access state: 0x%02x\n", state);
return TCM_INVALID_PARAMETER_LIST;
}
return 0;
+
+not_supported:
+ pr_err("ALUA access state %s not supported",
+ core_alua_dump_state(state));
+ return TCM_INVALID_PARAMETER_LIST;
}
static char *core_alua_dump_state(int state)
@@ -653,12 +848,16 @@ static char *core_alua_dump_state(int state)
return "Active/Optimized";
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
return "Active/NonOptimized";
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ return "LBA Dependent";
case ALUA_ACCESS_STATE_STANDBY:
return "Standby";
case ALUA_ACCESS_STATE_UNAVAILABLE:
return "Unavailable";
case ALUA_ACCESS_STATE_OFFLINE:
return "Offline";
+ case ALUA_ACCESS_STATE_TRANSITION:
+ return "Transitioning";
default:
return "Unknown";
}
@@ -735,58 +934,49 @@ static int core_alua_write_tpg_metadata(
* Called with tg_pt_gp->tg_pt_gp_md_mutex held
*/
static int core_alua_update_tpg_primary_metadata(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- int primary_state,
- unsigned char *md_buf)
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
{
+ unsigned char *md_buf;
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
char path[ALUA_METADATA_PATH_LEN];
- int len;
+ int len, rc;
+
+ md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+ if (!md_buf) {
+ pr_err("Unable to allocate buf for ALUA metadata\n");
+ return -ENOMEM;
+ }
memset(path, 0, ALUA_METADATA_PATH_LEN);
- len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+ len = snprintf(md_buf, ALUA_MD_BUF_LEN,
"tg_pt_gp_id=%hu\n"
"alua_access_state=0x%02x\n"
"alua_access_status=0x%02x\n",
- tg_pt_gp->tg_pt_gp_id, primary_state,
+ tg_pt_gp->tg_pt_gp_id,
+ tg_pt_gp->tg_pt_gp_alua_pending_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
snprintf(path, ALUA_METADATA_PATH_LEN,
"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
- return core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(md_buf);
+ return rc;
}
-static int core_alua_do_transition_tg_pt(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- struct se_port *l_port,
- struct se_node_acl *nacl,
- unsigned char *md_buf,
- int new_state,
- int explicit)
+static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
+ struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct se_dev_entry *se_deve;
struct se_lun_acl *lacl;
struct se_port *port;
struct t10_alua_tg_pt_gp_member *mem;
- int old_state = 0;
- /*
- * Save the old primary ALUA access state, and set the current state
- * to ALUA_ACCESS_STATE_TRANSITION.
- */
- old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
- ALUA_ACCESS_STATE_TRANSITION);
- tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
- ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
- ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
- /*
- * Check for the optional ALUA primary state transition delay
- */
- if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
- msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+ bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
@@ -821,9 +1011,12 @@ static int core_alua_do_transition_tg_pt(
if (!lacl)
continue;
- if (explicit &&
- (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
- (l_port != NULL) && (l_port == port))
+ if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
+ (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
+ (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
+ (tg_pt_gp->tg_pt_gp_alua_port == port))
continue;
core_scsi3_ua_allocate(lacl->se_lun_nacl,
@@ -851,20 +1044,102 @@ static int core_alua_do_transition_tg_pt(
*/
if (tg_pt_gp->tg_pt_gp_write_metadata) {
mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
- core_alua_update_tpg_primary_metadata(tg_pt_gp,
- new_state, md_buf);
+ core_alua_update_tpg_primary_metadata(tg_pt_gp);
mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
}
/*
* Set the current primary ALUA access state to the requested new state
*/
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+ tg_pt_gp->tg_pt_gp_alua_pending_state);
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" from primary access state %s to %s\n", (explicit) ? "explicit" :
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
- tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
- core_alua_dump_state(new_state));
+ tg_pt_gp->tg_pt_gp_id,
+ core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
+ core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_dec();
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+ if (tg_pt_gp->tg_pt_gp_transition_complete)
+ complete(tg_pt_gp->tg_pt_gp_transition_complete);
+}
+
+static int core_alua_do_transition_tg_pt(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ int new_state,
+ int explicit)
+{
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ /* Nothing to be done here */
+ if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
+ return 0;
+
+ if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+ return -EAGAIN;
+
+ /*
+ * Flush any pending transitions
+ */
+ if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
+ ALUA_ACCESS_STATE_TRANSITION) {
+ /* Just in case */
+ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+ tg_pt_gp->tg_pt_gp_transition_complete = &wait;
+ flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+ wait_for_completion(&wait);
+ tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+ return 0;
+ }
+
+ /*
+ * Save the old primary ALUA access state, and set the current state
+ * to ALUA_ACCESS_STATE_TRANSITION.
+ */
+ tg_pt_gp->tg_pt_gp_alua_previous_state =
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+ ALUA_ACCESS_STATE_TRANSITION);
+ tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
+
+ /*
+ * Check for the optional ALUA primary state transition delay
+ */
+ if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+ msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+ /*
+ * Take a reference for workqueue item
+ */
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+ if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
+ unsigned long transition_tmo;
+
+ transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
+ queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+ &tg_pt_gp->tg_pt_gp_transition_work,
+ transition_tmo);
+ } else {
+ tg_pt_gp->tg_pt_gp_transition_complete = &wait;
+ queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+ &tg_pt_gp->tg_pt_gp_transition_work, 0);
+ wait_for_completion(&wait);
+ tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+ }
return 0;
}
@@ -878,23 +1153,15 @@ int core_alua_do_port_transition(
int explicit)
{
struct se_device *dev;
- struct se_port *port;
- struct se_node_acl *nacl;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- unsigned char *md_buf;
- int primary;
+ int primary, valid_states, rc = 0;
- if (core_alua_check_transition(new_state, &primary) != 0)
+ valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
+ if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
return -EINVAL;
- md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
- if (!md_buf) {
- pr_err("Unable to allocate buf for ALUA metadata\n");
- return -ENOMEM;
- }
-
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
lu_gp = local_lu_gp_mem->lu_gp;
@@ -911,12 +1178,13 @@ int core_alua_do_port_transition(
* core_alua_do_transition_tg_pt() will always return
* success.
*/
- core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
- md_buf, new_state, explicit);
+ l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
+ l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
+ rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
+ new_state, explicit);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic_dec();
- kfree(md_buf);
- return 0;
+ return rc;
}
/*
* For all other LU groups aside from 'default_lu_gp', walk all of
@@ -951,11 +1219,11 @@ int core_alua_do_port_transition(
continue;
if (l_tg_pt_gp == tg_pt_gp) {
- port = l_port;
- nacl = l_nacl;
+ tg_pt_gp->tg_pt_gp_alua_port = l_port;
+ tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
} else {
- port = NULL;
- nacl = NULL;
+ tg_pt_gp->tg_pt_gp_alua_port = NULL;
+ tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
}
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
@@ -964,12 +1232,14 @@ int core_alua_do_port_transition(
* core_alua_do_transition_tg_pt() will always return
* success.
*/
- core_alua_do_transition_tg_pt(tg_pt_gp, port,
- nacl, md_buf, new_state, explicit);
+ rc = core_alua_do_transition_tg_pt(tg_pt_gp,
+ new_state, explicit);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
+ if (rc)
+ break;
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
@@ -979,16 +1249,18 @@ int core_alua_do_port_transition(
}
spin_unlock(&lu_gp->lu_gp_lock);
- pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
- " Group IDs: %hu %s transition to primary state: %s\n",
- config_item_name(&lu_gp->lu_gp_group.cg_item),
- l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit",
- core_alua_dump_state(new_state));
+ if (!rc) {
+ pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
+ " Group IDs: %hu %s transition to primary state: %s\n",
+ config_item_name(&lu_gp->lu_gp_group.cg_item),
+ l_tg_pt_gp->tg_pt_gp_id,
+ (explicit) ? "explicit" : "implicit",
+ core_alua_dump_state(new_state));
+ }
atomic_dec(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic_dec();
- kfree(md_buf);
- return 0;
+ return rc;
}
/*
@@ -996,13 +1268,18 @@ int core_alua_do_port_transition(
*/
static int core_alua_update_tpg_secondary_metadata(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port,
- unsigned char *md_buf,
- u32 md_buf_len)
+ struct se_port *port)
{
+ unsigned char *md_buf;
struct se_portal_group *se_tpg = port->sep_tpg;
char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
- int len;
+ int len, rc;
+
+ md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+ if (!md_buf) {
+ pr_err("Unable to allocate buf for ALUA metadata\n");
+ return -ENOMEM;
+ }
memset(path, 0, ALUA_METADATA_PATH_LEN);
memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
@@ -1014,7 +1291,7 @@ static int core_alua_update_tpg_secondary_metadata(
snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
- len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
+ len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
atomic_read(&port->sep_tg_pt_secondary_offline),
port->sep_tg_pt_secondary_stat);
@@ -1023,7 +1300,10 @@ static int core_alua_update_tpg_secondary_metadata(
se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
port->sep_lun->unpacked_lun);
- return core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(md_buf);
+
+ return rc;
}
static int core_alua_set_tg_pt_secondary_state(
@@ -1033,8 +1313,6 @@ static int core_alua_set_tg_pt_secondary_state(
int offline)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
- unsigned char *md_buf;
- u32 md_buf_len;
int trans_delay_msecs;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1055,7 +1333,6 @@ static int core_alua_set_tg_pt_secondary_state(
else
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
- md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
port->sep_tg_pt_secondary_stat = (explicit) ?
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
@@ -1077,23 +1354,115 @@ static int core_alua_set_tg_pt_secondary_state(
* secondary state and status
*/
if (port->sep_tg_pt_secondary_write_md) {
- md_buf = kzalloc(md_buf_len, GFP_KERNEL);
- if (!md_buf) {
- pr_err("Unable to allocate md_buf for"
- " secondary ALUA access metadata\n");
- return -ENOMEM;
- }
mutex_lock(&port->sep_tg_pt_md_mutex);
- core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
- md_buf, md_buf_len);
+ core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
mutex_unlock(&port->sep_tg_pt_md_mutex);
+ }
+
+ return 0;
+}
+
+struct t10_alua_lba_map *
+core_alua_allocate_lba_map(struct list_head *list,
+ u64 first_lba, u64 last_lba)
+{
+ struct t10_alua_lba_map *lba_map;
+
+ lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
+ if (!lba_map) {
+ pr_err("Unable to allocate struct t10_alua_lba_map\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
+ lba_map->lba_map_first_lba = first_lba;
+ lba_map->lba_map_last_lba = last_lba;
- kfree(md_buf);
+ list_add_tail(&lba_map->lba_map_list, list);
+ return lba_map;
+}
+
+int
+core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
+ int pg_id, int state)
+{
+ struct t10_alua_lba_map_member *lba_map_mem;
+
+ list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
+ pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
+ return -EINVAL;
+ }
+ }
+
+ lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
+ if (!lba_map_mem) {
+ pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
+ return -ENOMEM;
}
+ lba_map_mem->lba_map_mem_alua_state = state;
+ lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
+ list_add_tail(&lba_map_mem->lba_map_mem_list,
+ &lba_map->lba_map_mem_list);
return 0;
}
+void
+core_alua_free_lba_map(struct list_head *lba_list)
+{
+ struct t10_alua_lba_map *lba_map, *lba_map_tmp;
+ struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
+
+ list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
+ lba_map_list) {
+ list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
+ &lba_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ list_del(&lba_map_mem->lba_map_mem_list);
+ kmem_cache_free(t10_alua_lba_map_mem_cache,
+ lba_map_mem);
+ }
+ list_del(&lba_map->lba_map_list);
+ kmem_cache_free(t10_alua_lba_map_cache, lba_map);
+ }
+}
+
+void
+core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
+ int segment_size, int segment_mult)
+{
+ struct list_head old_lba_map_list;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ int activate = 0, supported;
+
+ INIT_LIST_HEAD(&old_lba_map_list);
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ dev->t10_alua.lba_map_segment_size = segment_size;
+ dev->t10_alua.lba_map_segment_multiplier = segment_mult;
+ list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
+ if (lba_map_list) {
+ list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
+ activate = 1;
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
+ tg_pt_gp_list) {
+
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
+ continue;
+ supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
+ if (activate)
+ supported |= ALUA_LBD_SUP;
+ else
+ supported &= ~ALUA_LBD_SUP;
+ tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
+ }
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+ core_alua_free_lba_map(&old_lba_map_list);
+}
+
struct t10_alua_lu_gp *
core_alua_allocate_lu_gp(const char *name, int def_group)
{
@@ -1346,8 +1715,9 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
+ INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+ core_alua_do_transition_tg_pt_work);
tg_pt_gp->tg_pt_gp_dev = dev;
- tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
/*
@@ -1475,6 +1845,8 @@ void core_alua_free_tg_pt_gp(
dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+ flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 88e2e835f14a..0a7d65e80404 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -13,12 +13,13 @@
/*
* ASYMMETRIC ACCESS STATE field
*
- * from spc4r17 section 6.27 Table 245
+ * from spc4r36j section 6.37 Table 307
*/
#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
#define ALUA_ACCESS_STATE_STANDBY 0x2
#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
+#define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4
#define ALUA_ACCESS_STATE_OFFLINE 0xe
#define ALUA_ACCESS_STATE_TRANSITION 0xf
@@ -78,18 +79,30 @@
*/
#define ALUA_SECONDARY_METADATA_WWN_LEN 256
+/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
+#define ALUA_MD_BUF_LEN 1024
+
extern struct kmem_cache *t10_alua_lu_gp_cache;
extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+extern struct kmem_cache *t10_alua_lba_map_cache;
+extern struct kmem_cache *t10_alua_lba_map_mem_cache;
extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
struct se_node_acl *, int, int);
extern char *core_alua_dump_status(int);
+extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
+ struct list_head *, u64, u64);
+extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
+extern void core_alua_free_lba_map(struct list_head *);
+extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
+ int, int);
extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 272755d03e5a..f0e85b119692 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -643,6 +643,15 @@ SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(emulate_3pc);
SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(pi_prot_type);
+SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
+SE_DEV_ATTR_RO(hw_pi_prot_type);
+
+DEF_DEV_ATTRIB(pi_prot_format);
+SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
+
DEF_DEV_ATTRIB(enforce_pr_isids);
SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
@@ -702,6 +711,9 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_emulate_tpws.attr,
&target_core_dev_attrib_emulate_caw.attr,
&target_core_dev_attrib_emulate_3pc.attr,
+ &target_core_dev_attrib_pi_prot_type.attr,
+ &target_core_dev_attrib_hw_pi_prot_type.attr,
+ &target_core_dev_attrib_pi_prot_format.attr,
&target_core_dev_attrib_enforce_pr_isids.attr,
&target_core_dev_attrib_is_nonrot.attr,
&target_core_dev_attrib_emulate_rest_reord.attr,
@@ -1741,6 +1753,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
.store = target_core_store_alua_lu_gp,
};
+static ssize_t target_core_show_dev_lba_map(void *p, char *page)
+{
+ struct se_device *dev = p;
+ struct t10_alua_lba_map *map;
+ struct t10_alua_lba_map_member *mem;
+ char *b = page;
+ int bl = 0;
+ char state;
+
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ if (!list_empty(&dev->t10_alua.lba_map_list))
+ bl += sprintf(b + bl, "%u %u\n",
+ dev->t10_alua.lba_map_segment_size,
+ dev->t10_alua.lba_map_segment_multiplier);
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
+ bl += sprintf(b + bl, "%llu %llu",
+ map->lba_map_first_lba, map->lba_map_last_lba);
+ list_for_each_entry(mem, &map->lba_map_mem_list,
+ lba_map_mem_list) {
+ switch (mem->lba_map_mem_alua_state) {
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+ state = 'O';
+ break;
+ case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ state = 'A';
+ break;
+ case ALUA_ACCESS_STATE_STANDBY:
+ state = 'S';
+ break;
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ state = 'U';
+ break;
+ default:
+ state = '.';
+ break;
+ }
+ bl += sprintf(b + bl, " %d:%c",
+ mem->lba_map_mem_alua_pg_id, state);
+ }
+ bl += sprintf(b + bl, "\n");
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return bl;
+}
+
+static ssize_t target_core_store_dev_lba_map(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_device *dev = p;
+ struct t10_alua_lba_map *lba_map = NULL;
+ struct list_head lba_list;
+ char *map_entries, *ptr;
+ char state;
+ int pg_num = -1, pg;
+ int ret = 0, num = 0, pg_id, alua_state;
+ unsigned long start_lba = -1, end_lba = -1;
+ unsigned long segment_size = -1, segment_mult = -1;
+
+ map_entries = kstrdup(page, GFP_KERNEL);
+ if (!map_entries)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&lba_list);
+ while ((ptr = strsep(&map_entries, "\n")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ if (num == 0) {
+ if (sscanf(ptr, "%lu %lu\n",
+ &segment_size, &segment_mult) != 2) {
+ pr_err("Invalid line %d\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ num++;
+ continue;
+ }
+ if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
+ pr_err("Invalid line %d\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr = strchr(ptr, ' ');
+ if (!ptr) {
+ pr_err("Invalid line %d, missing end lba\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr++;
+ ptr = strchr(ptr, ' ');
+ if (!ptr) {
+ pr_err("Invalid line %d, missing state definitions\n",
+ num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr++;
+ lba_map = core_alua_allocate_lba_map(&lba_list,
+ start_lba, end_lba);
+ if (IS_ERR(lba_map)) {
+ ret = PTR_ERR(lba_map);
+ break;
+ }
+ pg = 0;
+ while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
+ switch (state) {
+ case 'O':
+ alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
+ break;
+ case 'A':
+ alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
+ break;
+ case 'S':
+ alua_state = ALUA_ACCESS_STATE_STANDBY;
+ break;
+ case 'U':
+ alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
+ break;
+ default:
+ pr_err("Invalid ALUA state '%c'\n", state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = core_alua_allocate_lba_map_mem(lba_map,
+ pg_id, alua_state);
+ if (ret) {
+ pr_err("Invalid target descriptor %d:%c "
+ "at line %d\n",
+ pg_id, state, num);
+ break;
+ }
+ pg++;
+ ptr = strchr(ptr, ' ');
+ if (ptr)
+ ptr++;
+ else
+ break;
+ }
+ if (pg_num == -1)
+ pg_num = pg;
+ else if (pg != pg_num) {
+ pr_err("Only %d from %d port groups definitions "
+ "at line %d\n", pg, pg_num, num);
+ ret = -EINVAL;
+ break;
+ }
+ num++;
+ }
+out:
+ if (ret) {
+ core_alua_free_lba_map(&lba_list);
+ count = ret;
+ } else
+ core_alua_set_lba_map(dev, &lba_list,
+ segment_size, segment_mult);
+ kfree(map_entries);
+ return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "lba_map",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_dev_lba_map,
+ .store = target_core_store_dev_lba_map,
+};
+
static struct configfs_attribute *lio_core_dev_attrs[] = {
&target_core_attr_dev_info.attr,
&target_core_attr_dev_control.attr,
@@ -1748,6 +1930,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
&target_core_attr_dev_udev_path.attr,
&target_core_attr_dev_enable.attr,
&target_core_attr_dev_alua_lu_gp.attr,
+ &target_core_attr_dev_lba_map.attr,
NULL,
};
@@ -2054,6 +2237,13 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
" transition while TPGS_IMPLICIT_ALUA is disabled\n");
return -EINVAL;
}
+ if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
+ new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
+ /* LBA DEPENDENT is only allowed with implicit ALUA */
+ pr_err("Unable to process implicit configfs ALUA transition"
+ " while explicit ALUA management is enabled\n");
+ return -EINVAL;
+ }
ret = core_alua_do_port_transition(tg_pt_gp, dev,
NULL, NULL, new_state, 0);
@@ -2188,7 +2378,7 @@ SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
-SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR);
+SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO);
SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
tg_pt_gp_alua_supported_states, ALUA_U_SUP);
@@ -2937,7 +3127,7 @@ static int __init target_core_init_configfs(void)
* and ALUA Logical Unit Group and Target Port Group infrastructure.
*/
target_cg = &subsys->su_group;
- target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2,
+ target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!target_cg->default_groups) {
pr_err("Unable to allocate target_cg->default_groups\n");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d06de84b069b..65001e133670 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -918,6 +918,90 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
return 0;
}
+int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
+{
+ int rc, old_prot = dev->dev_attrib.pi_prot_type;
+
+ if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
+ pr_err("Illegal value %d for pi_prot_type\n", flag);
+ return -EINVAL;
+ }
+ if (flag == 2) {
+ pr_err("DIF TYPE2 protection currently not supported\n");
+ return -ENOSYS;
+ }
+ if (dev->dev_attrib.hw_pi_prot_type) {
+ pr_warn("DIF protection enabled on underlying hardware,"
+ " ignoring\n");
+ return 0;
+ }
+ if (!dev->transport->init_prot || !dev->transport->free_prot) {
+ pr_err("DIF protection not supported by backend: %s\n",
+ dev->transport->name);
+ return -ENOSYS;
+ }
+ if (!(dev->dev_flags & DF_CONFIGURED)) {
+ pr_err("DIF protection requires device to be configured\n");
+ return -ENODEV;
+ }
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device PROT type while"
+ " export_count is %d\n", dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ dev->dev_attrib.pi_prot_type = flag;
+
+ if (flag && !old_prot) {
+ rc = dev->transport->init_prot(dev);
+ if (rc) {
+ dev->dev_attrib.pi_prot_type = old_prot;
+ return rc;
+ }
+
+ } else if (!flag && old_prot) {
+ dev->transport->free_prot(dev);
+ }
+ pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
+
+ return 0;
+}
+
+int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
+{
+ int rc;
+
+ if (!flag)
+ return 0;
+
+ if (flag != 1) {
+ pr_err("Illegal value %d for pi_prot_format\n", flag);
+ return -EINVAL;
+ }
+ if (!dev->transport->format_prot) {
+ pr_err("DIF protection format not supported by backend %s\n",
+ dev->transport->name);
+ return -ENOSYS;
+ }
+ if (!(dev->dev_flags & DF_CONFIGURED)) {
+ pr_err("DIF protection format requires device to be configured\n");
+ return -ENODEV;
+ }
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to format SE Device PROT type while"
+ " export_count is %d\n", dev, dev->export_count);
+ return -EINVAL;
+ }
+
+ rc = dev->transport->format_prot(dev);
+ if (rc)
+ return rc;
+
+ pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
+
+ return 0;
+}
+
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
@@ -1117,23 +1201,23 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
struct se_lun *core_dev_add_lun(
struct se_portal_group *tpg,
struct se_device *dev,
- u32 lun)
+ u32 unpacked_lun)
{
- struct se_lun *lun_p;
+ struct se_lun *lun;
int rc;
- lun_p = core_tpg_pre_addlun(tpg, lun);
- if (IS_ERR(lun_p))
- return lun_p;
+ lun = core_tpg_alloc_lun(tpg, unpacked_lun);
+ if (IS_ERR(lun))
+ return lun;
- rc = core_tpg_post_addlun(tpg, lun_p,
+ rc = core_tpg_add_lun(tpg, lun,
TRANSPORT_LUNFLAGS_READ_WRITE, dev);
if (rc < 0)
return ERR_PTR(rc);
pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
@@ -1154,7 +1238,7 @@ struct se_lun *core_dev_add_lun(
spin_unlock_irq(&tpg->acl_node_lock);
}
- return lun_p;
+ return lun;
}
/* core_dev_del_lun():
@@ -1420,6 +1504,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_link_magic = SE_DEV_LINK_MAGIC;
dev->se_hba = hba;
dev->transport = hba->transport;
+ dev->prot_length = sizeof(struct se_dif_v1_tuple);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
@@ -1444,6 +1529,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
+ INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
+ spin_lock_init(&dev->t10_alua.lba_map_lock);
dev->t10_wwn.t10_dev = dev;
dev->t10_alua.t10_dev = dev;
@@ -1460,6 +1547,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
dev->dev_attrib.is_nonrot = DA_IS_NONROT;
dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
@@ -1588,9 +1676,13 @@ void target_free_device(struct se_device *dev)
}
core_alua_free_lu_gp_mem(dev);
+ core_alua_set_lba_map(dev, NULL, 0, 0);
core_scsi3_free_all_registrations(dev);
se_release_vpd_for_dev(dev);
+ if (dev->transport->free_prot)
+ dev->transport->free_prot(dev);
+
dev->transport->free_device(dev);
}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index dae2ad6a669e..7de9f0475d05 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -906,7 +906,7 @@ static struct config_group *target_fabric_make_lun(
lun_cg->default_groups[1] = NULL;
port_stat_grp = &lun->port_stat_grps.stat_group;
- port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
+ port_stat_grp->default_groups = kzalloc(sizeof(struct config_group *) * 4,
GFP_KERNEL);
if (!port_stat_grp->default_groups) {
pr_err("Unable to allocate port_stat_grp->default_groups\n");
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 78241a53b555..cf991a91a8a9 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -257,6 +257,72 @@ static void fd_free_device(struct se_device *dev)
kfree(fd_dev);
}
+static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+ int is_write)
+{
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *dev = FD_DEV(se_dev);
+ struct file *prot_fd = dev->fd_prot_file;
+ struct scatterlist *sg;
+ loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
+ unsigned char *buf;
+ u32 prot_size, len, size;
+ int rc, ret = 1, i;
+
+ prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
+ se_dev->prot_length;
+
+ if (!is_write) {
+ fd_prot->prot_buf = vzalloc(prot_size);
+ if (!fd_prot->prot_buf) {
+ pr_err("Unable to allocate fd_prot->prot_buf\n");
+ return -ENOMEM;
+ }
+ buf = fd_prot->prot_buf;
+
+ fd_prot->prot_sg_nents = cmd->t_prot_nents;
+ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
+ fd_prot->prot_sg_nents, GFP_KERNEL);
+ if (!fd_prot->prot_sg) {
+ pr_err("Unable to allocate fd_prot->prot_sg\n");
+ vfree(fd_prot->prot_buf);
+ return -ENOMEM;
+ }
+ size = prot_size;
+
+ for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
+
+ len = min_t(u32, PAGE_SIZE, size);
+ sg_set_buf(sg, buf, len);
+ size -= len;
+ buf += len;
+ }
+ }
+
+ if (is_write) {
+ rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos);
+ if (rc < 0 || prot_size != rc) {
+ pr_err("kernel_write() for fd_do_prot_rw failed:"
+ " %d\n", rc);
+ ret = -EINVAL;
+ }
+ } else {
+ rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size);
+ if (rc < 0) {
+ pr_err("kernel_read() for fd_do_prot_rw failed:"
+ " %d\n", rc);
+ ret = -EINVAL;
+ }
+ }
+
+ if (is_write || ret < 0) {
+ kfree(fd_prot->prot_sg);
+ vfree(fd_prot->prot_buf);
+ }
+
+ return ret;
+}
+
static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, int is_write)
{
@@ -551,6 +617,8 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
+ struct fd_prot fd_prot;
+ sense_reason_t rc;
int ret = 0;
/*
@@ -558,8 +626,48 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* physical memory addresses to struct iovec virtual memory.
*/
if (data_direction == DMA_FROM_DEVICE) {
+ memset(&fd_prot, 0, sizeof(struct fd_prot));
+
+ if (cmd->prot_type) {
+ ret = fd_do_prot_rw(cmd, &fd_prot, false);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
+
+ if (ret > 0 && cmd->prot_type) {
+ u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+ rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
+ return rc;
+ }
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
+ }
} else {
+ memset(&fd_prot, 0, sizeof(struct fd_prot));
+
+ if (cmd->prot_type) {
+ u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+ ret = fd_do_prot_rw(cmd, &fd_prot, false);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
+ return rc;
+ }
+ }
+
ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
/*
* Perform implicit vfs_fsync_range() for fd_do_writev() ops
@@ -576,10 +684,19 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
vfs_fsync_range(fd_dev->fd_file, start, end, 1);
}
+
+ if (ret > 0 && cmd->prot_type) {
+ ret = fd_do_prot_rw(cmd, &fd_prot, true);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
}
- if (ret < 0)
+ if (ret < 0) {
+ kfree(fd_prot.prot_sg);
+ vfree(fd_prot.prot_buf);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
if (ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -700,6 +817,140 @@ static sector_t fd_get_blocks(struct se_device *dev)
dev->dev_attrib.block_size);
}
+static int fd_init_prot(struct se_device *dev)
+{
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *prot_file, *file = fd_dev->fd_file;
+ struct inode *inode;
+ int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+ char buf[FD_MAX_DEV_PROT_NAME];
+
+ if (!file) {
+ pr_err("Unable to locate fd_dev->fd_file\n");
+ return -ENODEV;
+ }
+
+ inode = file->f_mapping->host;
+ if (S_ISBLK(inode->i_mode)) {
+ pr_err("FILEIO Protection emulation only supported on"
+ " !S_ISBLK\n");
+ return -ENOSYS;
+ }
+
+ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
+ flags &= ~O_DSYNC;
+
+ snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
+ fd_dev->fd_dev_name);
+
+ prot_file = filp_open(buf, flags, 0600);
+ if (IS_ERR(prot_file)) {
+ pr_err("filp_open(%s) failed\n", buf);
+ ret = PTR_ERR(prot_file);
+ return ret;
+ }
+ fd_dev->fd_prot_file = prot_file;
+
+ return 0;
+}
+
+static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
+ u32 unit_size, u32 *ref_tag, u16 app_tag,
+ bool inc_reftag)
+{
+ unsigned char *p = buf;
+ int i;
+
+ for (i = 0; i < unit_size; i += dev->prot_length) {
+ *((u16 *)&p[0]) = 0xffff;
+ *((__be16 *)&p[2]) = cpu_to_be16(app_tag);
+ *((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
+
+ if (inc_reftag)
+ (*ref_tag)++;
+
+ p += dev->prot_length;
+ }
+}
+
+static int fd_format_prot(struct se_device *dev)
+{
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *prot_fd = fd_dev->fd_prot_file;
+ sector_t prot_length, prot;
+ unsigned char *buf;
+ loff_t pos = 0;
+ u32 ref_tag = 0;
+ int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
+ int rc, ret = 0, size, len;
+ bool inc_reftag = false;
+
+ if (!dev->dev_attrib.pi_prot_type) {
+ pr_err("Unable to format_prot while pi_prot_type == 0\n");
+ return -ENODEV;
+ }
+ if (!prot_fd) {
+ pr_err("Unable to locate fd_dev->fd_prot_file\n");
+ return -ENODEV;
+ }
+
+ switch (dev->dev_attrib.pi_prot_type) {
+ case TARGET_DIF_TYPE3_PROT:
+ ref_tag = 0xffffffff;
+ break;
+ case TARGET_DIF_TYPE2_PROT:
+ case TARGET_DIF_TYPE1_PROT:
+ inc_reftag = true;
+ break;
+ default:
+ break;
+ }
+
+ buf = vzalloc(unit_size);
+ if (!buf) {
+ pr_err("Unable to allocate FILEIO prot buf\n");
+ return -ENOMEM;
+ }
+
+ prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
+ size = prot_length;
+
+ pr_debug("Using FILEIO prot_length: %llu\n",
+ (unsigned long long)prot_length);
+
+ for (prot = 0; prot < prot_length; prot += unit_size) {
+
+ fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
+ inc_reftag);
+
+ len = min(unit_size, size);
+
+ rc = kernel_write(prot_fd, buf, len, pos);
+ if (rc != len) {
+ pr_err("vfs_write to prot file failed: %d\n", rc);
+ ret = -ENODEV;
+ goto out;
+ }
+ pos += len;
+ size -= len;
+ }
+
+out:
+ vfree(buf);
+ return ret;
+}
+
+static void fd_free_prot(struct se_device *dev)
+{
+ struct fd_dev *fd_dev = FD_DEV(dev);
+
+ if (!fd_dev->fd_prot_file)
+ return;
+
+ filp_close(fd_dev->fd_prot_file, NULL);
+ fd_dev->fd_prot_file = NULL;
+}
+
static struct sbc_ops fd_sbc_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
@@ -730,6 +981,9 @@ static struct se_subsystem_api fileio_template = {
.show_configfs_dev_params = fd_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = fd_get_blocks,
+ .init_prot = fd_init_prot,
+ .format_prot = fd_format_prot,
+ .free_prot = fd_free_prot,
};
static int __init fileio_module_init(void)
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index d7772c167685..182cbb295039 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -4,6 +4,7 @@
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
+#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
@@ -18,6 +19,13 @@
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+#define FDBD_FORMAT_UNIT_SIZE 2048
+
+struct fd_prot {
+ unsigned char *prot_buf;
+ struct scatterlist *prot_sg;
+ u32 prot_sg_nents;
+};
struct fd_dev {
struct se_device dev;
@@ -32,6 +40,7 @@ struct fd_dev {
u32 fd_block_size;
unsigned long long fd_dev_size;
struct file *fd_file;
+ struct file *fd_prot_file;
/* FILEIO HBA device is connected to */
struct fd_host *fd_host;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c87959f12760..554d4f75a75a 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -91,6 +91,7 @@ static int iblock_configure_device(struct se_device *dev)
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
struct block_device *bd = NULL;
+ struct blk_integrity *bi;
fmode_t mode;
int ret = -ENOMEM;
@@ -155,8 +156,40 @@ static int iblock_configure_device(struct se_device *dev)
if (blk_queue_nonrot(q))
dev->dev_attrib.is_nonrot = 1;
+ bi = bdev_get_integrity(bd);
+ if (bi) {
+ struct bio_set *bs = ib_dev->ibd_bio_set;
+
+ if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
+ !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
+ pr_err("IBLOCK export of blk_integrity: %s not"
+ " supported\n", bi->name);
+ ret = -ENOSYS;
+ goto out_blkdev_put;
+ }
+
+ if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
+ } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
+ }
+
+ if (dev->dev_attrib.pi_prot_type) {
+ if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
+ pr_err("Unable to allocate bioset for PI\n");
+ ret = -ENOMEM;
+ goto out_blkdev_put;
+ }
+ pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
+ bs->bio_integrity_pool);
+ }
+ dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
+ }
+
return 0;
+out_blkdev_put:
+ blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
out_free_bioset:
bioset_free(ib_dev->ibd_bio_set);
ib_dev->ibd_bio_set = NULL;
@@ -170,8 +203,10 @@ static void iblock_free_device(struct se_device *dev)
if (ib_dev->ibd_bd != NULL)
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
- if (ib_dev->ibd_bio_set != NULL)
+ if (ib_dev->ibd_bio_set != NULL) {
+ bioset_integrity_free(ib_dev->ibd_bio_set);
bioset_free(ib_dev->ibd_bio_set);
+ }
kfree(ib_dev);
}
@@ -319,7 +354,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
- bio->bi_sector = lba;
+ bio->bi_iter.bi_sector = lba;
return bio;
}
@@ -586,13 +621,58 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
return bl;
}
+static int
+iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct blk_integrity *bi;
+ struct bio_integrity_payload *bip;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct scatterlist *sg;
+ int i, rc;
+
+ bi = bdev_get_integrity(ib_dev->ibd_bd);
+ if (!bi) {
+ pr_err("Unable to locate bio_integrity\n");
+ return -ENODEV;
+ }
+
+ bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
+ if (!bip) {
+ pr_err("Unable to allocate bio_integrity_payload\n");
+ return -ENOMEM;
+ }
+
+ bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
+ dev->prot_length;
+ bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
+
+ pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
+ (unsigned long long)bip->bip_iter.bi_sector);
+
+ for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
+
+ rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
+ sg->offset);
+ if (rc != sg->length) {
+ pr_err("bio_integrity_add_page() failed; %d\n", rc);
+ return -ENOMEM;
+ }
+
+ pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
+ sg_page(sg), sg->length, sg->offset);
+ }
+
+ return 0;
+}
+
static sense_reason_t
iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
struct iblock_req *ibr;
- struct bio *bio;
+ struct bio *bio, *bio_start;
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
@@ -655,6 +735,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!bio)
goto fail_free_ibr;
+ bio_start = bio;
bio_list_init(&list);
bio_list_add(&list, bio);
@@ -688,6 +769,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
sg_num--;
}
+ if (cmd->prot_type) {
+ int rc = iblock_alloc_bip(cmd, bio_start);
+ if (rc)
+ goto fail_put_bios;
+ }
+
iblock_submit_bios(&list, rw);
iblock_complete_cmd(cmd);
return 0;
@@ -763,7 +850,7 @@ iblock_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &iblock_sbc_ops);
}
-bool iblock_get_write_cache(struct se_device *dev)
+static bool iblock_get_write_cache(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 47b63b094cdc..de9cab708f45 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -35,6 +35,8 @@ int se_dev_set_emulate_tpu(struct se_device *, int);
int se_dev_set_emulate_tpws(struct se_device *, int);
int se_dev_set_emulate_caw(struct se_device *, int);
int se_dev_set_emulate_3pc(struct se_device *, int);
+int se_dev_set_pi_prot_type(struct se_device *, int);
+int se_dev_set_pi_prot_format(struct se_device *, int);
int se_dev_set_enforce_pr_isids(struct se_device *, int);
int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
@@ -77,9 +79,9 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tp
const char *);
void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
-struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
-int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
- u32, void *);
+struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
+int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
+ u32, struct se_device *);
struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 2f5d77932c80..3013287a2aaa 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
sense_reason_t ret = TCM_NO_SENSE;
- int pr_holder = 0;
+ int pr_holder = 0, type;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
@@ -2131,6 +2131,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ret = TCM_RESERVATION_CONFLICT;
goto out;
}
+ type = pr_reg->pr_res_type;
spin_lock(&pr_tmpl->registration_lock);
/*
@@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* Release the calling I_T Nexus registration now..
*/
__core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
+ pr_reg = NULL;
/*
* From spc4r17, section 5.7.11.3 Unregistering
@@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* RESERVATIONS RELEASED.
*/
if (pr_holder &&
- (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
- pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+ (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+ type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
list_for_each_entry(pr_reg_p,
&pr_tmpl->registration_list,
pr_reg_list) {
@@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
out:
- core_scsi3_put_pr_reg(pr_reg);
+ if (pr_reg)
+ core_scsi3_put_pr_reg(pr_reg);
return ret;
}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index ed75cdd32cb0..2ee2936fa0bd 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -43,6 +43,11 @@
#define PR_APTPL_MAX_IPORT_LEN 256
#define PR_APTPL_MAX_TPORT_LEN 256
+/*
+ * Function defined in target_core_spc.c
+ */
+void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
+
extern struct kmem_cache *t10_pr_reg_cache;
extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4ffe5f2ec0e9..66a5aba5a0d9 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -78,23 +78,14 @@ static void rd_detach_hba(struct se_hba *hba)
hba->hba_ptr = NULL;
}
-/* rd_release_device_space():
- *
- *
- */
-static void rd_release_device_space(struct rd_dev *rd_dev)
+static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+ u32 sg_table_count)
{
- u32 i, j, page_count = 0, sg_per_table;
- struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
+ u32 i, j, page_count = 0, sg_per_table;
- if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
- return;
-
- sg_table = rd_dev->sg_table_array;
-
- for (i = 0; i < rd_dev->sg_table_count; i++) {
+ for (i = 0; i < sg_table_count; i++) {
sg = sg_table[i].sg_table;
sg_per_table = sg_table[i].rd_sg_count;
@@ -105,16 +96,28 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
page_count++;
}
}
-
kfree(sg);
}
+ kfree(sg_table);
+ return page_count;
+}
+
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+ u32 page_count;
+
+ if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+ return;
+
+ page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
+ rd_dev->sg_table_count);
+
pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
- kfree(sg_table);
rd_dev->sg_table_array = NULL;
rd_dev->sg_table_count = 0;
}
@@ -124,38 +127,15 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
*
*
*/
-static int rd_build_device_space(struct rd_dev *rd_dev)
+static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+ u32 total_sg_needed, unsigned char init_payload)
{
- u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+ u32 i = 0, j, page_offset = 0, sg_per_table;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
- struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
-
- if (rd_dev->rd_page_count <= 0) {
- pr_err("Illegal page count: %u for Ramdisk device\n",
- rd_dev->rd_page_count);
- return -EINVAL;
- }
-
- /* Don't need backing pages for NULLIO */
- if (rd_dev->rd_flags & RDF_NULLIO)
- return 0;
-
- total_sg_needed = rd_dev->rd_page_count;
-
- sg_tables = (total_sg_needed / max_sg_per_table) + 1;
-
- sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
- if (!sg_table) {
- pr_err("Unable to allocate memory for Ramdisk"
- " scatterlist tables\n");
- return -ENOMEM;
- }
-
- rd_dev->sg_table_array = sg_table;
- rd_dev->sg_table_count = sg_tables;
+ unsigned char *p;
while (total_sg_needed) {
sg_per_table = (total_sg_needed > max_sg_per_table) ?
@@ -186,16 +166,114 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
+
+ p = kmap(pg);
+ memset(p, init_payload, PAGE_SIZE);
+ kunmap(pg);
}
page_offset += sg_per_table;
total_sg_needed -= sg_per_table;
}
+ return 0;
+}
+
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+ struct rd_dev_sg_table *sg_table;
+ u32 sg_tables, total_sg_needed;
+ u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+ int rc;
+
+ if (rd_dev->rd_page_count <= 0) {
+ pr_err("Illegal page count: %u for Ramdisk device\n",
+ rd_dev->rd_page_count);
+ return -EINVAL;
+ }
+
+ /* Don't need backing pages for NULLIO */
+ if (rd_dev->rd_flags & RDF_NULLIO)
+ return 0;
+
+ total_sg_needed = rd_dev->rd_page_count;
+
+ sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+ sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+ if (!sg_table) {
+ pr_err("Unable to allocate memory for Ramdisk"
+ " scatterlist tables\n");
+ return -ENOMEM;
+ }
+
+ rd_dev->sg_table_array = sg_table;
+ rd_dev->sg_table_count = sg_tables;
+
+ rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
+ if (rc)
+ return rc;
+
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
- " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
- rd_dev->rd_dev_id, rd_dev->rd_page_count,
- rd_dev->sg_table_count);
+ " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+ rd_dev->rd_dev_id, rd_dev->rd_page_count,
+ rd_dev->sg_table_count);
+
+ return 0;
+}
+
+static void rd_release_prot_space(struct rd_dev *rd_dev)
+{
+ u32 page_count;
+
+ if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
+ return;
+
+ page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
+ rd_dev->sg_prot_count);
+
+ pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
+ " Device ID: %u, pages %u in %u tables total bytes %lu\n",
+ rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+ rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+ rd_dev->sg_prot_array = NULL;
+ rd_dev->sg_prot_count = 0;
+}
+
+static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
+{
+ struct rd_dev_sg_table *sg_table;
+ u32 total_sg_needed, sg_tables;
+ u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+ int rc;
+
+ if (rd_dev->rd_flags & RDF_NULLIO)
+ return 0;
+
+ total_sg_needed = rd_dev->rd_page_count / prot_length;
+
+ sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+ sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+ if (!sg_table) {
+ pr_err("Unable to allocate memory for Ramdisk protection"
+ " scatterlist tables\n");
+ return -ENOMEM;
+ }
+
+ rd_dev->sg_prot_array = sg_table;
+ rd_dev->sg_prot_count = sg_tables;
+
+ rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
+ if (rc)
+ return rc;
+
+ pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
+ " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+ rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
return 0;
}
@@ -278,6 +356,26 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
+static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
+{
+ struct rd_dev_sg_table *sg_table;
+ u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+
+ i = page / sg_per_table;
+ if (i < rd_dev->sg_prot_count) {
+ sg_table = &rd_dev->sg_prot_array[i];
+ if ((sg_table->page_start_offset <= page) &&
+ (sg_table->page_end_offset >= page))
+ return sg_table;
+ }
+
+ pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
+ page);
+
+ return NULL;
+}
+
static sense_reason_t
rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
@@ -292,6 +390,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
u32 rd_page;
u32 src_len;
u64 tmp;
+ sense_reason_t rc;
if (dev->rd_flags & RDF_NULLIO) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -314,6 +413,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
cmd->t_task_lba, rd_size, rd_page, rd_offset);
+ if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
+ struct rd_dev_sg_table *prot_table;
+ struct scatterlist *prot_sg;
+ u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+ u32 prot_offset, prot_page;
+
+ tmp = cmd->t_task_lba * se_dev->prot_length;
+ prot_offset = do_div(tmp, PAGE_SIZE);
+ prot_page = tmp;
+
+ prot_table = rd_get_prot_table(dev, prot_page);
+ if (!prot_table)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
+
+ rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
+ prot_sg, prot_offset);
+ if (rc)
+ return rc;
+ }
+
src_len = PAGE_SIZE - rd_offset;
sg_miter_start(&m, sgl, sgl_nents,
data_direction == DMA_FROM_DEVICE ?
@@ -375,6 +496,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
sg_miter_stop(&m);
+ if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
+ struct rd_dev_sg_table *prot_table;
+ struct scatterlist *prot_sg;
+ u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+ u32 prot_offset, prot_page;
+
+ tmp = cmd->t_task_lba * se_dev->prot_length;
+ prot_offset = do_div(tmp, PAGE_SIZE);
+ prot_page = tmp;
+
+ prot_table = rd_get_prot_table(dev, prot_page);
+ if (!prot_table)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
+
+ rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
+ prot_sg, prot_offset);
+ if (rc)
+ return rc;
+ }
+
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -456,6 +599,23 @@ static sector_t rd_get_blocks(struct se_device *dev)
return blocks_long;
}
+static int rd_init_prot(struct se_device *dev)
+{
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
+ if (!dev->dev_attrib.pi_prot_type)
+ return 0;
+
+ return rd_build_prot_space(rd_dev, dev->prot_length);
+}
+
+static void rd_free_prot(struct se_device *dev)
+{
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
+ rd_release_prot_space(rd_dev);
+}
+
static struct sbc_ops rd_sbc_ops = {
.execute_rw = rd_execute_rw,
};
@@ -481,6 +641,8 @@ static struct se_subsystem_api rd_mcp_template = {
.show_configfs_dev_params = rd_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = rd_get_blocks,
+ .init_prot = rd_init_prot,
+ .free_prot = rd_free_prot,
};
int __init rd_module_init(void)
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 1789d1e14395..cc46a6a89b38 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -33,8 +33,12 @@ struct rd_dev {
u32 rd_page_count;
/* Number of SG tables in sg_table_array */
u32 sg_table_count;
+ /* Number of SG tables in sg_prot_array */
+ u32 sg_prot_count;
/* Array of rd_dev_sg_table_t containing scatterlists */
struct rd_dev_sg_table *sg_table_array;
+ /* Array of rd_dev_sg_table containing protection scatterlists */
+ struct rd_dev_sg_table *sg_prot_array;
/* Ramdisk HBA device is connected to */
struct rd_host *rd_host;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 52ae54e60105..77e6531fb0a1 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
+#include <linux/crc-t10dif.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
@@ -33,7 +34,7 @@
#include "target_core_internal.h"
#include "target_core_ua.h"
-
+#include "target_core_alua.h"
static sense_reason_t
sbc_emulate_readcapacity(struct se_cmd *cmd)
@@ -105,6 +106,11 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
buf[11] = dev->dev_attrib.block_size & 0xff;
+ /*
+ * Set P_TYPE and PROT_EN bits for DIF support
+ */
+ if (dev->dev_attrib.pi_prot_type)
+ buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
if (dev->transport->get_lbppbe)
buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
@@ -563,6 +569,44 @@ sbc_compare_and_write(struct se_cmd *cmd)
return TCM_NO_SENSE;
}
+static bool
+sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
+ u32 sectors)
+{
+ if (!cmd->t_prot_sg || !cmd->t_prot_nents)
+ return true;
+
+ switch (dev->dev_attrib.pi_prot_type) {
+ case TARGET_DIF_TYPE3_PROT:
+ if (!(cdb[1] & 0xe0))
+ return true;
+
+ cmd->reftag_seed = 0xffffffff;
+ break;
+ case TARGET_DIF_TYPE2_PROT:
+ if (cdb[1] & 0xe0)
+ return false;
+
+ cmd->reftag_seed = cmd->t_task_lba;
+ break;
+ case TARGET_DIF_TYPE1_PROT:
+ if (!(cdb[1] & 0xe0))
+ return true;
+
+ cmd->reftag_seed = cmd->t_task_lba;
+ break;
+ case TARGET_DIF_TYPE0_PROT:
+ default:
+ return true;
+ }
+
+ cmd->prot_type = dev->dev_attrib.pi_prot_type;
+ cmd->prot_length = dev->prot_length * sectors;
+ cmd->prot_handover = PROT_SEPERATED;
+
+ return true;
+}
+
sense_reason_t
sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
{
@@ -583,6 +627,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case READ_10:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
@@ -590,6 +638,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case READ_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
@@ -597,6 +649,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case READ_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw;
@@ -612,6 +668,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case WRITE_VERIFY:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -621,6 +681,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case WRITE_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -630,6 +694,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case WRITE_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
+
+ if (!sbc_check_prot(dev, cmd, cdb, sectors))
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -731,6 +799,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case SAI_READ_CAPACITY_16:
cmd->execute_cmd = sbc_emulate_readcapacity_16;
break;
+ case SAI_REPORT_REFERRALS:
+ cmd->execute_cmd = target_emulate_report_referrals;
+ break;
default:
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
@@ -959,3 +1030,195 @@ err:
return ret;
}
EXPORT_SYMBOL(sbc_execute_unmap);
+
+static sense_reason_t
+sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
+ const void *p, sector_t sector, unsigned int ei_lba)
+{
+ int block_size = dev->dev_attrib.block_size;
+ __be16 csum;
+
+ csum = cpu_to_be16(crc_t10dif(p, block_size));
+
+ if (sdt->guard_tag != csum) {
+ pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
+ " csum 0x%04x\n", (unsigned long long)sector,
+ be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
+ return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ }
+
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
+ be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
+ pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
+ " sector MSB: 0x%08x\n", (unsigned long long)sector,
+ be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
+ return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ }
+
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
+ be32_to_cpu(sdt->ref_tag) != ei_lba) {
+ pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
+ " ei_lba: 0x%08x\n", (unsigned long long)sector,
+ be32_to_cpu(sdt->ref_tag), ei_lba);
+ return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ }
+
+ return 0;
+}
+
+static void
+sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
+ struct scatterlist *sg, int sg_off)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *psg;
+ void *paddr, *addr;
+ unsigned int i, len, left;
+ unsigned int offset = sg_off;
+
+ left = sectors * dev->prot_length;
+
+ for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
+ unsigned int psg_len, copied = 0;
+
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ psg_len = min(left, psg->length);
+ while (psg_len) {
+ len = min(psg_len, sg->length - offset);
+ addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
+
+ if (read)
+ memcpy(paddr + copied, addr, len);
+ else
+ memcpy(addr, paddr + copied, len);
+
+ left -= len;
+ offset += len;
+ copied += len;
+ psg_len -= len;
+
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ }
+ kunmap_atomic(addr);
+ }
+ kunmap_atomic(paddr);
+ }
+}
+
+sense_reason_t
+sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+ unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_dif_v1_tuple *sdt;
+ struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+ sector_t sector = start;
+ void *daddr, *paddr;
+ int i, j, offset = 0;
+ sense_reason_t rc;
+
+ for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+
+ for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+ if (offset >= psg->length) {
+ kunmap_atomic(paddr);
+ psg = sg_next(psg);
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ offset = 0;
+ }
+
+ sdt = paddr + offset;
+
+ pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
+ " app_tag: 0x%04x ref_tag: %u\n",
+ (unsigned long long)sector, sdt->guard_tag,
+ sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+ rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+ ei_lba);
+ if (rc) {
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ cmd->bad_sector = sector;
+ return rc;
+ }
+
+ sector++;
+ ei_lba++;
+ offset += sizeof(struct se_dif_v1_tuple);
+ }
+
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ }
+ sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
+
+ return 0;
+}
+EXPORT_SYMBOL(sbc_dif_verify_write);
+
+sense_reason_t
+sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+ unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_dif_v1_tuple *sdt;
+ struct scatterlist *dsg, *psg = sg;
+ sector_t sector = start;
+ void *daddr, *paddr;
+ int i, j, offset = sg_off;
+ sense_reason_t rc;
+
+ for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ paddr = kmap_atomic(sg_page(psg)) + sg->offset;
+
+ for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+ if (offset >= psg->length) {
+ kunmap_atomic(paddr);
+ psg = sg_next(psg);
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ offset = 0;
+ }
+
+ sdt = paddr + offset;
+
+ pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
+ " app_tag: 0x%04x ref_tag: %u\n",
+ (unsigned long long)sector, sdt->guard_tag,
+ sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+ if (sdt->app_tag == cpu_to_be16(0xffff)) {
+ sector++;
+ offset += sizeof(struct se_dif_v1_tuple);
+ continue;
+ }
+
+ rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+ ei_lba);
+ if (rc) {
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ cmd->bad_sector = sector;
+ return rc;
+ }
+
+ sector++;
+ ei_lba++;
+ offset += sizeof(struct se_dif_v1_tuple);
+ }
+
+ kunmap_atomic(paddr);
+ kunmap_atomic(daddr);
+ }
+ sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
+
+ return 0;
+}
+EXPORT_SYMBOL(sbc_dif_verify_read);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 021c3f4a4f00..3bebc71ea033 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -100,6 +100,11 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
*/
if (dev->dev_attrib.emulate_3pc)
buf[5] |= 0x8;
+ /*
+ * Set Protection (PROTECT) bit when DIF has been enabled.
+ */
+ if (dev->dev_attrib.pi_prot_type)
+ buf[5] |= 0x1;
buf[7] = 0x2; /* CmdQue=1 */
@@ -267,7 +272,7 @@ check_t10_vend_desc:
port = lun->lun_sep;
if (port) {
struct t10_alua_lu_gp *lu_gp;
- u32 padding, scsi_name_len;
+ u32 padding, scsi_name_len, scsi_target_len;
u16 lu_gp_id = 0;
u16 tg_pt_gp_id = 0;
u16 tpgt;
@@ -365,16 +370,6 @@ check_lu_gp:
* section 7.5.1 Table 362
*/
check_scsi_name:
- scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
- /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
- scsi_name_len += 10;
- /* Check for 4-byte padding */
- padding = ((-scsi_name_len) & 3);
- if (padding != 0)
- scsi_name_len += padding;
- /* Header size + Designation descriptor */
- scsi_name_len += 4;
-
buf[off] =
(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
@@ -402,13 +397,57 @@ check_scsi_name:
* shall be no larger than 256 and shall be a multiple
* of four.
*/
+ padding = ((-scsi_name_len) & 3);
if (padding)
scsi_name_len += padding;
+ if (scsi_name_len > 256)
+ scsi_name_len = 256;
buf[off-1] = scsi_name_len;
off += scsi_name_len;
/* Header size + Designation descriptor */
len += (scsi_name_len + 4);
+
+ /*
+ * Target device designator
+ */
+ buf[off] =
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOCIATION == target device: 10b */
+ buf[off] |= 0x20;
+ /* DESIGNATOR TYPE == SCSI name string */
+ buf[off++] |= 0x8;
+ off += 2; /* Skip over Reserved and length */
+ /*
+ * SCSI name string identifer containing, $FABRIC_MOD
+ * dependent information. For LIO-Target and iSCSI
+ * Target Port, this means "<iSCSI name>" in
+ * UTF-8 encoding.
+ */
+ scsi_target_len = sprintf(&buf[off], "%s",
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+ scsi_target_len += 1 /* Include NULL terminator */;
+ /*
+ * The null-terminated, null-padded (see 4.4.2) SCSI
+ * NAME STRING field contains a UTF-8 format string.
+ * The number of bytes in the SCSI NAME STRING field
+ * (i.e., the value in the DESIGNATOR LENGTH field)
+ * shall be no larger than 256 and shall be a multiple
+ * of four.
+ */
+ padding = ((-scsi_target_len) & 3);
+ if (padding)
+ scsi_target_len += padding;
+ if (scsi_target_len > 256)
+ scsi_target_len = 256;
+
+ buf[off-1] = scsi_target_len;
+ off += scsi_target_len;
+
+ /* Header size + Designation descriptor */
+ len += (scsi_target_len + 4);
}
buf[2] = ((len >> 8) & 0xff);
buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
@@ -436,12 +475,26 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
struct se_device *dev = cmd->se_dev;
buf[3] = 0x3c;
+ /*
+ * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
+ * only for TYPE3 protection.
+ */
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+ buf[4] = 0x5;
+ else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
+ buf[4] = 0x4;
+
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
if (spc_check_dev_wce(dev))
buf[6] = 0x01;
+ /* If an LBA map is present set R_SUP */
+ spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
+ if (!list_empty(&dev->t10_alua.lba_map_list))
+ buf[8] = 0x10;
+ spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
return 0;
}
@@ -600,6 +653,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
+/* Referrals VPD page */
+static sense_reason_t
+spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[3] = 0x0c;
+ put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
+ put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
+
+ return 0;
+}
+
static sense_reason_t
spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
@@ -614,6 +681,7 @@ static struct {
{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
+ { .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
};
/* supported vital product data pages */
@@ -643,11 +711,15 @@ spc_emulate_inquiry(struct se_cmd *cmd)
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb;
- unsigned char buf[SE_INQUIRY_BUF];
+ unsigned char *buf;
sense_reason_t ret;
int p;
- memset(buf, 0, SE_INQUIRY_BUF);
+ buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate response buffer for INQUIRY\n");
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
if (dev == tpg->tpg_virt_lun0.lun_se_dev)
buf[0] = 0x3f; /* Not connected */
@@ -680,9 +752,10 @@ spc_emulate_inquiry(struct se_cmd *cmd)
out:
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
- memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+ memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
transport_kunmap_data_sg(cmd);
}
+ kfree(buf);
if (!ret)
target_complete_cmd(cmd, GOOD);
@@ -785,6 +858,19 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
* status (see SAM-4).
*/
p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
+ /*
+ * From spc4r30, section 7.5.7 Control mode page
+ *
+ * Application Tag Owner (ATO) bit set to one.
+ *
+ * If the ATO bit is set to one the device server shall not modify the
+ * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
+ * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
+ * TAG field.
+ */
+ if (dev->dev_attrib.pi_prot_type)
+ p[5] |= 0x80;
+
p[8] = 0xff;
p[9] = 0xff;
p[11] = 30;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 2a573de19a9f..c036595b17cf 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -656,7 +656,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
spin_lock_init(&lun->lun_sep_lock);
init_completion(&lun->lun_ref_comp);
- ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+ ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
if (ret < 0)
return ret;
@@ -781,7 +781,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
}
EXPORT_SYMBOL(core_tpg_deregister);
-struct se_lun *core_tpg_pre_addlun(
+struct se_lun *core_tpg_alloc_lun(
struct se_portal_group *tpg,
u32 unpacked_lun)
{
@@ -811,11 +811,11 @@ struct se_lun *core_tpg_pre_addlun(
return lun;
}
-int core_tpg_post_addlun(
+int core_tpg_add_lun(
struct se_portal_group *tpg,
struct se_lun *lun,
u32 lun_access,
- void *lun_ptr)
+ struct se_device *dev)
{
int ret;
@@ -823,7 +823,7 @@ int core_tpg_post_addlun(
if (ret < 0)
return ret;
- ret = core_dev_export(lun_ptr, tpg, lun);
+ ret = core_dev_export(dev, tpg, lun);
if (ret < 0) {
percpu_ref_cancel_init(&lun->lun_ref);
return ret;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 91953da0f623..2956250b7225 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -62,6 +62,8 @@ struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+struct kmem_cache *t10_alua_lba_map_cache;
+struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
@@ -128,14 +130,36 @@ int init_se_kmem_caches(void)
"mem_t failed\n");
goto out_free_tg_pt_gp_cache;
}
+ t10_alua_lba_map_cache = kmem_cache_create(
+ "t10_alua_lba_map_cache",
+ sizeof(struct t10_alua_lba_map),
+ __alignof__(struct t10_alua_lba_map), 0, NULL);
+ if (!t10_alua_lba_map_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lba_map_"
+ "cache failed\n");
+ goto out_free_tg_pt_gp_mem_cache;
+ }
+ t10_alua_lba_map_mem_cache = kmem_cache_create(
+ "t10_alua_lba_map_mem_cache",
+ sizeof(struct t10_alua_lba_map_member),
+ __alignof__(struct t10_alua_lba_map_member), 0, NULL);
+ if (!t10_alua_lba_map_mem_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
+ "cache failed\n");
+ goto out_free_lba_map_cache;
+ }
target_completion_wq = alloc_workqueue("target_completion",
WQ_MEM_RECLAIM, 0);
if (!target_completion_wq)
- goto out_free_tg_pt_gp_mem_cache;
+ goto out_free_lba_map_mem_cache;
return 0;
+out_free_lba_map_mem_cache:
+ kmem_cache_destroy(t10_alua_lba_map_mem_cache);
+out_free_lba_map_cache:
+ kmem_cache_destroy(t10_alua_lba_map_cache);
out_free_tg_pt_gp_mem_cache:
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
out_free_tg_pt_gp_cache:
@@ -164,6 +188,8 @@ void release_se_kmem_caches(void)
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+ kmem_cache_destroy(t10_alua_lba_map_cache);
+ kmem_cache_destroy(t10_alua_lba_map_mem_cache);
}
/* This code ensures unique mib indexes are handed out. */
@@ -568,10 +594,11 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
struct se_lun *lun = cmd->se_lun;
- if (!lun || !cmd->lun_ref_active)
+ if (!lun)
return;
- percpu_ref_put(&lun->lun_ref);
+ if (cmpxchg(&cmd->lun_ref_active, true, false))
+ percpu_ref_put(&lun->lun_ref);
}
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
@@ -642,9 +669,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
return;
}
- if (!success)
- cmd->transport_state |= CMD_T_FAILED;
-
/*
* Check for case where an explicit ABORT_TASK has been received
* and transport_wait_for_tasks() will be waiting for completion..
@@ -654,7 +678,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->t_transport_stop_comp);
return;
- } else if (cmd->transport_state & CMD_T_FAILED) {
+ } else if (!success) {
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
INIT_WORK(&cmd->work, target_complete_ok_work);
@@ -1284,6 +1308,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
* @sgl_count: scatterlist count for unidirectional mapping
* @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
* @sgl_bidi_count: scatterlist count for bidirectional READ mapping
+ * @sgl_prot: struct scatterlist memory protection information
+ * @sgl_prot_count: scatterlist count for protection information
*
* Returns non zero to signal active I/O shutdown failure. All other
* setup exceptions will be returned as a SCSI CHECK_CONDITION response,
@@ -1296,7 +1322,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags,
struct scatterlist *sgl, u32 sgl_count,
- struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
+ struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+ struct scatterlist *sgl_prot, u32 sgl_prot_count)
{
struct se_portal_group *se_tpg;
sense_reason_t rc;
@@ -1338,6 +1365,14 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
target_put_sess_cmd(se_sess, se_cmd);
return 0;
}
+ /*
+ * Save pointers for SGLs containing protection information,
+ * if present.
+ */
+ if (sgl_prot_count) {
+ se_cmd->t_prot_sg = sgl_prot;
+ se_cmd->t_prot_nents = sgl_prot_count;
+ }
rc = target_setup_cmd_from_cdb(se_cmd, cdb);
if (rc != 0) {
@@ -1380,6 +1415,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
return 0;
}
}
+
/*
* Check if we need to delay processing because of ALUA
* Active/NonOptimized primary access state..
@@ -1419,7 +1455,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
{
return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
unpacked_lun, data_length, task_attr, data_dir,
- flags, NULL, 0, NULL, 0);
+ flags, NULL, 0, NULL, 0, NULL, 0);
}
EXPORT_SYMBOL(target_submit_cmd);
@@ -1565,6 +1601,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2455,6 +2494,19 @@ static int transport_get_sense_codes(
return 0;
}
+static
+void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector)
+{
+ /* Place failed LBA in sense data information descriptor 0. */
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc;
+ buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */
+ buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa;
+ buffer[SPC_VALIDITY_OFFSET] = 0x80;
+
+ /* Descriptor Information: failing sector */
+ put_unaligned_be64(bad_sector, &buffer[12]);
+}
+
int
transport_send_check_condition_and_sense(struct se_cmd *cmd,
sense_reason_t reason, int from_transport)
@@ -2648,6 +2700,39 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
break;
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK GUARD CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
+ transport_err_sector_info(buffer, cmd->bad_sector);
+ break;
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
+ transport_err_sector_info(buffer, cmd->bad_sector);
+ break;
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ /* CURRENT ERROR */
+ buffer[0] = 0x70;
+ buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
+ buffer[SPC_ASC_KEY_OFFSET] = 0x10;
+ buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
+ transport_err_sector_info(buffer, cmd->bad_sector);
+ break;
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
default:
/* CURRENT ERROR */
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index b04467e7547c..505519b10cb7 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -98,7 +98,6 @@ int core_scsi3_ua_allocate(
pr_err("Unable to allocate struct se_ua\n");
return -ENOMEM;
}
- INIT_LIST_HEAD(&ua->ua_dev_list);
INIT_LIST_HEAD(&ua->ua_nacl_list);
ua->ua_nacl = nacl;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 6b88a9958f61..669c536fd959 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -40,10 +40,6 @@
static struct workqueue_struct *xcopy_wq = NULL;
/*
- * From target_core_spc.c
- */
-extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
-/*
* From target_core_device.c
*/
extern struct mutex g_device_mutex;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 479ec5621a4e..8b2c1aaf81de 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -438,7 +438,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
struct se_session *se_sess = sess->se_sess;
int tag;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
goto busy;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index c6932fb53a8d..e879da81ad93 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -267,7 +267,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
return found;
}
-struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
+static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
{
struct ft_node_acl *acl;
@@ -552,7 +552,7 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.fabric_drop_nodeacl = &ft_del_acl,
};
-int ft_register_configfs(void)
+static int ft_register_configfs(void)
{
struct target_fabric_configfs *fabric;
int ret;
@@ -599,7 +599,7 @@ int ft_register_configfs(void)
return 0;
}
-void ft_deregister_configfs(void)
+static void ft_deregister_configfs(void)
{
if (!ft_configfs)
return;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f35a1f75b15b..5f88d767671e 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -29,6 +29,19 @@ config THERMAL_HWMON
Say 'Y' here if you want all thermal sensors to
have hwmon sysfs interface too.
+config THERMAL_OF
+ bool
+ prompt "APIs to parse thermal data out of device tree"
+ depends on OF
+ default y
+ help
+ This options provides helpers to add the support to
+ read and parse thermal data definitions out of the
+ device tree blob.
+
+ Say 'Y' here if you need to build thermal infrastructure
+ based on device tree.
+
choice
prompt "Default Thermal governor"
default THERMAL_DEFAULT_GOV_STEP_WISE
@@ -79,6 +92,7 @@ config THERMAL_GOV_USER_SPACE
config CPU_THERMAL
bool "generic cpu cooling support"
depends on CPU_FREQ
+ depends on THERMAL_OF
help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
@@ -121,7 +135,8 @@ config SPEAR_THERMAL
config RCAR_THERMAL
tristate "Renesas R-Car thermal driver"
- depends on ARCH_SHMOBILE
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enable this to plug the R-Car thermal sensor driver into the Linux
thermal framework.
@@ -192,6 +207,21 @@ config X86_PKG_TEMP_THERMAL
two trip points which can be set by user to get notifications via thermal
notification methods.
+config ACPI_INT3403_THERMAL
+ tristate "ACPI INT3403 thermal driver"
+ depends on X86 && ACPI
+ help
+ Newer laptops and tablets that use ACPI may have thermal sensors
+ outside the core CPU/SOC for thermal safety reasons. These
+ temperature sensors are also exposed for the OS to use via the so
+ called INT3403 ACPI object. This driver will, on devices that have
+ such sensors, expose the temperature information from these sensors
+ to userspace via the normal thermal framework. This means that a wide
+ range of applications and GUI widgets can show this information to
+ the user or use this information for making decisions. For example,
+ the Intel Thermal Daemon can use this information to allow the user
+ to select his laptop to run without turning on the fans.
+
menu "Texas Instruments thermal drivers"
source "drivers/thermal/ti-soc-thermal/Kconfig"
endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 584b36319d51..54e4ec9eb5df 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -7,6 +7,7 @@ thermal_sys-y += thermal_core.o
# interface to/from other layers providing sensors
thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o
+thermal_sys-$(CONFIG_THERMAL_OF) += of-thermal.o
# governors
thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
@@ -29,3 +30,4 @@ obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
+obj-$(CONFIG_ACPI_INT3403_THERMAL) += int3403_thermal.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 02a46f23d14c..4246262c4bd2 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -174,6 +174,13 @@ static int get_property(unsigned int cpu, unsigned long input,
max_level++;
}
+ /* No valid cpu frequency entry */
+ if (max_level == 0)
+ return -EINVAL;
+
+ /* max_level is an index, not a counter */
+ max_level--;
+
/* get max level */
if (property == GET_MAXL) {
*output = (unsigned int)max_level;
@@ -181,7 +188,7 @@ static int get_property(unsigned int cpu, unsigned long input,
}
if (property == GET_FREQ)
- level = descend ? input : (max_level - input - 1);
+ level = descend ? input : (max_level - input);
for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
/* ignore invalid entry */
@@ -197,7 +204,7 @@ static int get_property(unsigned int cpu, unsigned long input,
if (property == GET_LEVEL && (unsigned int)input == freq) {
/* get level by frequency */
- *output = descend ? j : (max_level - j - 1);
+ *output = descend ? j : (max_level - j);
return 0;
}
if (property == GET_FREQ && level == j) {
@@ -417,18 +424,21 @@ static struct notifier_block thermal_cpufreq_notifier_block = {
};
/**
- * cpufreq_cooling_register - function to create cpufreq cooling device.
+ * __cpufreq_cooling_register - helper function to create cpufreq cooling device
+ * @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
- * cooling devices.
+ * cooling devices. It also gives the opportunity to link the cooling device
+ * with a device tree node, in order to bind it via the thermal DT code.
*
* Return: a valid struct thermal_cooling_device pointer on success,
* on failure, it returns a corresponding ERR_PTR().
*/
-struct thermal_cooling_device *
-cpufreq_cooling_register(const struct cpumask *clip_cpus)
+static struct thermal_cooling_device *
+__cpufreq_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus)
{
struct thermal_cooling_device *cool_dev;
struct cpufreq_cooling_device *cpufreq_dev = NULL;
@@ -467,8 +477,8 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus)
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
cpufreq_dev->id);
- cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
- &cpufreq_cooling_ops);
+ cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
+ &cpufreq_cooling_ops);
if (IS_ERR(cool_dev)) {
release_idr(&cpufreq_idr, cpufreq_dev->id);
kfree(cpufreq_dev);
@@ -488,9 +498,50 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus)
return cool_dev;
}
+
+/**
+ * cpufreq_cooling_register - function to create cpufreq cooling device.
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ *
+ * This interface function registers the cpufreq cooling device with the name
+ * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
+ * cooling devices.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus)
+{
+ return __cpufreq_cooling_register(NULL, clip_cpus);
+}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
+ * of_cpufreq_cooling_register - function to create cpufreq cooling device.
+ * @np: a valid struct device_node to the cooling device device tree node
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ *
+ * This interface function registers the cpufreq cooling device with the name
+ * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
+ * cooling devices. Using this API, the cpufreq cooling device will be
+ * linked to the device tree node provided.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+of_cpufreq_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus)
+{
+ if (!np)
+ return ERR_PTR(-EINVAL);
+
+ return __cpufreq_cooling_register(np, clip_cpus);
+}
+EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
+
+/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
*
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 1d6c801c1eb9..45af765a3198 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -7,6 +7,7 @@
*
*/
+#include <linux/clk.h>
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
@@ -73,6 +74,7 @@ struct imx_thermal_data {
unsigned long last_temp;
bool irq_enabled;
int irq;
+ struct clk *thermal_clk;
};
static void imx_set_alarm_temp(struct imx_thermal_data *data,
@@ -284,7 +286,7 @@ static int imx_unbind(struct thermal_zone_device *tz,
return 0;
}
-static const struct thermal_zone_device_ops imx_tz_ops = {
+static struct thermal_zone_device_ops imx_tz_ops = {
.bind = imx_bind,
.unbind = imx_unbind,
.get_temp = imx_get_temp,
@@ -457,6 +459,22 @@ static int imx_thermal_probe(struct platform_device *pdev)
return ret;
}
+ data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(data->thermal_clk)) {
+ dev_warn(&pdev->dev, "failed to get thermal clk!\n");
+ } else {
+ /*
+ * Thermal sensor needs clk on to get correct value, normally
+ * we should enable its clk before taking measurement and disable
+ * clk after measurement is done, but if alarm function is enabled,
+ * hardware will auto measure the temperature periodically, so we
+ * need to keep the clk always on for alarm function.
+ */
+ ret = clk_prepare_enable(data->thermal_clk);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+ }
+
/* Enable measurements at ~ 10 Hz */
regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
@@ -478,6 +496,8 @@ static int imx_thermal_remove(struct platform_device *pdev)
/* Disable measurements */
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+ if (!IS_ERR(data->thermal_clk))
+ clk_disable_unprepare(data->thermal_clk);
thermal_zone_device_unregister(data->tz);
cpufreq_cooling_unregister(data->cdev);
@@ -490,27 +510,30 @@ static int imx_thermal_suspend(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
- u32 val;
- regmap_read(map, TEMPSENSE0, &val);
- if ((val & TEMPSENSE0_POWER_DOWN) == 0) {
- /*
- * If a measurement is taking place, wait for a long enough
- * time for it to finish, and then check again. If it still
- * does not finish, something must go wrong.
- */
- udelay(50);
- regmap_read(map, TEMPSENSE0, &val);
- if ((val & TEMPSENSE0_POWER_DOWN) == 0)
- return -ETIMEDOUT;
- }
+ /*
+ * Need to disable thermal sensor, otherwise, when thermal core
+ * try to get temperature before thermal sensor resume, a wrong
+ * temperature will be read as the thermal sensor is powered
+ * down.
+ */
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+ data->mode = THERMAL_DEVICE_DISABLED;
return 0;
}
static int imx_thermal_resume(struct device *dev)
{
- /* Nothing to do for now */
+ struct imx_thermal_data *data = dev_get_drvdata(dev);
+ struct regmap *map = data->tempmon;
+
+ /* Enabled thermal sensor after resume */
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+ data->mode = THERMAL_DEVICE_ENABLED;
+
return 0;
}
#endif
@@ -522,6 +545,7 @@ static const struct of_device_id of_imx_thermal_match[] = {
{ .compatible = "fsl,imx6q-tempmon", },
{ /* end */ }
};
+MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
static struct platform_driver imx_thermal = {
.driver = {
diff --git a/drivers/thermal/int3403_thermal.c b/drivers/thermal/int3403_thermal.c
new file mode 100644
index 000000000000..1301681d9a77
--- /dev/null
+++ b/drivers/thermal/int3403_thermal.c
@@ -0,0 +1,237 @@
+/*
+ * ACPI INT3403 thermal driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/thermal.h>
+
+#define INT3403_TYPE_SENSOR 0x03
+#define INT3403_PERF_CHANGED_EVENT 0x80
+#define INT3403_THERMAL_EVENT 0x90
+
+#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100)
+#define KELVIN_OFFSET 2732
+#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off))
+
+#define ACPI_INT3403_CLASS "int3403"
+#define ACPI_INT3403_FILE_STATE "state"
+
+struct int3403_sensor {
+ struct thermal_zone_device *tzone;
+ unsigned long *thresholds;
+};
+
+static int sys_get_curr_temp(struct thermal_zone_device *tzone,
+ unsigned long *temp)
+{
+ struct acpi_device *device = tzone->devdata;
+ unsigned long long tmp;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET);
+
+ return 0;
+}
+
+static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
+ int trip, unsigned long *temp)
+{
+ struct acpi_device *device = tzone->devdata;
+ unsigned long long hyst;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ *temp = DECI_KELVIN_TO_MILLI_CELSIUS(hyst, KELVIN_OFFSET);
+
+ return 0;
+}
+
+static int sys_get_trip_temp(struct thermal_zone_device *tzone,
+ int trip, unsigned long *temp)
+{
+ struct acpi_device *device = tzone->devdata;
+ struct int3403_sensor *obj = acpi_driver_data(device);
+
+ /*
+ * get_trip_temp is a mandatory callback but
+ * PATx method doesn't return any value, so return
+ * cached value, which was last set from user space.
+ */
+ *temp = obj->thresholds[trip];
+
+ return 0;
+}
+
+static int sys_get_trip_type(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trip_type *type)
+{
+ /* Mandatory callback, may not mean much here */
+ *type = THERMAL_TRIP_PASSIVE;
+
+ return 0;
+}
+
+int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip,
+ unsigned long temp)
+{
+ struct acpi_device *device = tzone->devdata;
+ acpi_status status;
+ char name[10];
+ int ret = 0;
+ struct int3403_sensor *obj = acpi_driver_data(device);
+
+ snprintf(name, sizeof(name), "PAT%d", trip);
+ if (acpi_has_method(device->handle, name)) {
+ status = acpi_execute_simple_method(device->handle, name,
+ MILLI_CELSIUS_TO_DECI_KELVIN(temp,
+ KELVIN_OFFSET));
+ if (ACPI_FAILURE(status))
+ ret = -EIO;
+ else
+ obj->thresholds[trip] = temp;
+ } else {
+ ret = -EIO;
+ dev_err(&device->dev, "sys_set_trip_temp: method not found\n");
+ }
+
+ return ret;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = sys_get_curr_temp,
+ .get_trip_temp = sys_get_trip_temp,
+ .get_trip_type = sys_get_trip_type,
+ .set_trip_temp = sys_set_trip_temp,
+ .get_trip_hyst = sys_get_trip_hyst,
+};
+
+static void acpi_thermal_notify(struct acpi_device *device, u32 event)
+{
+ struct int3403_sensor *obj;
+
+ if (!device)
+ return;
+
+ obj = acpi_driver_data(device);
+ if (!obj)
+ return;
+
+ switch (event) {
+ case INT3403_PERF_CHANGED_EVENT:
+ break;
+ case INT3403_THERMAL_EVENT:
+ thermal_zone_device_update(obj->tzone);
+ break;
+ default:
+ dev_err(&device->dev, "Unsupported event [0x%x]\n", event);
+ break;
+ }
+}
+
+static int acpi_int3403_add(struct acpi_device *device)
+{
+ int result = 0;
+ unsigned long long ptyp;
+ acpi_status status;
+ struct int3403_sensor *obj;
+ unsigned long long trip_cnt;
+ int trip_mask = 0;
+
+ if (!device)
+ return -EINVAL;
+
+ status = acpi_evaluate_integer(device->handle, "PTYP", NULL, &ptyp);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ if (ptyp != INT3403_TYPE_SENSOR)
+ return -EINVAL;
+
+ obj = devm_kzalloc(&device->dev, sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ device->driver_data = obj;
+
+ status = acpi_evaluate_integer(device->handle, "PATC", NULL,
+ &trip_cnt);
+ if (ACPI_FAILURE(status))
+ trip_cnt = 0;
+
+ if (trip_cnt) {
+ /* We have to cache, thresholds can't be readback */
+ obj->thresholds = devm_kzalloc(&device->dev,
+ sizeof(*obj->thresholds) * trip_cnt,
+ GFP_KERNEL);
+ if (!obj->thresholds)
+ return -ENOMEM;
+ trip_mask = BIT(trip_cnt) - 1;
+ }
+ obj->tzone = thermal_zone_device_register(acpi_device_bid(device),
+ trip_cnt, trip_mask, device, &tzone_ops,
+ NULL, 0, 0);
+ if (IS_ERR(obj->tzone)) {
+ result = PTR_ERR(obj->tzone);
+ return result;
+ }
+
+ strcpy(acpi_device_name(device), "INT3403");
+ strcpy(acpi_device_class(device), ACPI_INT3403_CLASS);
+
+ return 0;
+}
+
+static int acpi_int3403_remove(struct acpi_device *device)
+{
+ struct int3403_sensor *obj;
+
+ obj = acpi_driver_data(device);
+ thermal_zone_device_unregister(obj->tzone);
+
+ return 0;
+}
+
+ACPI_MODULE_NAME("int3403");
+static const struct acpi_device_id int3403_device_ids[] = {
+ {"INT3403", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
+
+static struct acpi_driver acpi_int3403_driver = {
+ .name = "INT3403",
+ .class = ACPI_INT3403_CLASS,
+ .ids = int3403_device_ids,
+ .ops = {
+ .add = acpi_int3403_add,
+ .remove = acpi_int3403_remove,
+ .notify = acpi_thermal_notify,
+ },
+};
+
+module_acpi_driver(acpi_int3403_driver);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ACPI INT3403 thermal driver");
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index d833c8f5b465..a084325f1386 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -206,6 +206,15 @@ static void find_target_mwait(void)
}
+static bool has_pkg_state_counter(void)
+{
+ u64 tmp;
+ return !rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &tmp) ||
+ !rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &tmp) ||
+ !rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &tmp) ||
+ !rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &tmp);
+}
+
static u64 pkg_state_counter(void)
{
u64 val;
@@ -498,7 +507,7 @@ static int start_power_clamp(void)
struct task_struct *thread;
/* check if pkg cstate counter is completely 0, abort in this case */
- if (!pkg_state_counter()) {
+ if (!has_pkg_state_counter()) {
pr_err("pkg cstate counter not functional, abort\n");
return -EINVAL;
}
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
new file mode 100644
index 000000000000..04b1be7fa018
--- /dev/null
+++ b/drivers/thermal/of-thermal.c
@@ -0,0 +1,849 @@
+/*
+ * of-thermal.c - Generic Thermal Management device tree support.
+ *
+ * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Eduardo Valentin <eduardo.valentin@ti.com>
+ *
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/string.h>
+
+#include "thermal_core.h"
+
+/*** Private data structures to represent thermal device tree data ***/
+
+/**
+ * struct __thermal_trip - representation of a point in temperature domain
+ * @np: pointer to struct device_node that this trip point was created from
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @type: trip point type
+ */
+
+struct __thermal_trip {
+ struct device_node *np;
+ unsigned long int temperature;
+ unsigned long int hysteresis;
+ enum thermal_trip_type type;
+};
+
+/**
+ * struct __thermal_bind_param - a match between trip and cooling device
+ * @cooling_device: a pointer to identify the referred cooling device
+ * @trip_id: the trip point index
+ * @usage: the percentage (from 0 to 100) of cooling contribution
+ * @min: minimum cooling state used at this trip point
+ * @max: maximum cooling state used at this trip point
+ */
+
+struct __thermal_bind_params {
+ struct device_node *cooling_device;
+ unsigned int trip_id;
+ unsigned int usage;
+ unsigned long min;
+ unsigned long max;
+};
+
+/**
+ * struct __thermal_zone - internal representation of a thermal zone
+ * @mode: current thermal zone device mode (enabled/disabled)
+ * @passive_delay: polling interval while passive cooling is activated
+ * @polling_delay: zone polling interval
+ * @ntrips: number of trip points
+ * @trips: an array of trip points (0..ntrips - 1)
+ * @num_tbps: number of thermal bind params
+ * @tbps: an array of thermal bind params (0..num_tbps - 1)
+ * @sensor_data: sensor private data used while reading temperature and trend
+ * @get_temp: sensor callback to read temperature
+ * @get_trend: sensor callback to read temperature trend
+ */
+
+struct __thermal_zone {
+ enum thermal_device_mode mode;
+ int passive_delay;
+ int polling_delay;
+
+ /* trip data */
+ int ntrips;
+ struct __thermal_trip *trips;
+
+ /* cooling binding data */
+ int num_tbps;
+ struct __thermal_bind_params *tbps;
+
+ /* sensor interface */
+ void *sensor_data;
+ int (*get_temp)(void *, long *);
+ int (*get_trend)(void *, long *);
+};
+
+/*** DT thermal zone device callbacks ***/
+
+static int of_thermal_get_temp(struct thermal_zone_device *tz,
+ unsigned long *temp)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (!data->get_temp)
+ return -EINVAL;
+
+ return data->get_temp(data->sensor_data, temp);
+}
+
+static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
+ enum thermal_trend *trend)
+{
+ struct __thermal_zone *data = tz->devdata;
+ long dev_trend;
+ int r;
+
+ if (!data->get_trend)
+ return -EINVAL;
+
+ r = data->get_trend(data->sensor_data, &dev_trend);
+ if (r)
+ return r;
+
+ /* TODO: These intervals might have some thresholds, but in core code */
+ if (dev_trend > 0)
+ *trend = THERMAL_TREND_RAISING;
+ else if (dev_trend < 0)
+ *trend = THERMAL_TREND_DROPPING;
+ else
+ *trend = THERMAL_TREND_STABLE;
+
+ return 0;
+}
+
+static int of_thermal_bind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ struct __thermal_zone *data = thermal->devdata;
+ int i;
+
+ if (!data || IS_ERR(data))
+ return -ENODEV;
+
+ /* find where to bind */
+ for (i = 0; i < data->num_tbps; i++) {
+ struct __thermal_bind_params *tbp = data->tbps + i;
+
+ if (tbp->cooling_device == cdev->np) {
+ int ret;
+
+ ret = thermal_zone_bind_cooling_device(thermal,
+ tbp->trip_id, cdev,
+ tbp->min,
+ tbp->max);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int of_thermal_unbind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ struct __thermal_zone *data = thermal->devdata;
+ int i;
+
+ if (!data || IS_ERR(data))
+ return -ENODEV;
+
+ /* find where to unbind */
+ for (i = 0; i < data->num_tbps; i++) {
+ struct __thermal_bind_params *tbp = data->tbps + i;
+
+ if (tbp->cooling_device == cdev->np) {
+ int ret;
+
+ ret = thermal_zone_unbind_cooling_device(thermal,
+ tbp->trip_id, cdev);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int of_thermal_get_mode(struct thermal_zone_device *tz,
+ enum thermal_device_mode *mode)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ *mode = data->mode;
+
+ return 0;
+}
+
+static int of_thermal_set_mode(struct thermal_zone_device *tz,
+ enum thermal_device_mode mode)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ mutex_lock(&tz->lock);
+
+ if (mode == THERMAL_DEVICE_ENABLED)
+ tz->polling_delay = data->polling_delay;
+ else
+ tz->polling_delay = 0;
+
+ mutex_unlock(&tz->lock);
+
+ data->mode = mode;
+ thermal_zone_device_update(tz);
+
+ return 0;
+}
+
+static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip,
+ enum thermal_trip_type *type)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (trip >= data->ntrips || trip < 0)
+ return -EDOM;
+
+ *type = data->trips[trip].type;
+
+ return 0;
+}
+
+static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip,
+ unsigned long *temp)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (trip >= data->ntrips || trip < 0)
+ return -EDOM;
+
+ *temp = data->trips[trip].temperature;
+
+ return 0;
+}
+
+static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
+ unsigned long temp)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (trip >= data->ntrips || trip < 0)
+ return -EDOM;
+
+ /* thermal framework should take care of data->mask & (1 << trip) */
+ data->trips[trip].temperature = temp;
+
+ return 0;
+}
+
+static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
+ unsigned long *hyst)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (trip >= data->ntrips || trip < 0)
+ return -EDOM;
+
+ *hyst = data->trips[trip].hysteresis;
+
+ return 0;
+}
+
+static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip,
+ unsigned long hyst)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (trip >= data->ntrips || trip < 0)
+ return -EDOM;
+
+ /* thermal framework should take care of data->mask & (1 << trip) */
+ data->trips[trip].hysteresis = hyst;
+
+ return 0;
+}
+
+static int of_thermal_get_crit_temp(struct thermal_zone_device *tz,
+ unsigned long *temp)
+{
+ struct __thermal_zone *data = tz->devdata;
+ int i;
+
+ for (i = 0; i < data->ntrips; i++)
+ if (data->trips[i].type == THERMAL_TRIP_CRITICAL) {
+ *temp = data->trips[i].temperature;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct thermal_zone_device_ops of_thermal_ops = {
+ .get_mode = of_thermal_get_mode,
+ .set_mode = of_thermal_set_mode,
+
+ .get_trip_type = of_thermal_get_trip_type,
+ .get_trip_temp = of_thermal_get_trip_temp,
+ .set_trip_temp = of_thermal_set_trip_temp,
+ .get_trip_hyst = of_thermal_get_trip_hyst,
+ .set_trip_hyst = of_thermal_set_trip_hyst,
+ .get_crit_temp = of_thermal_get_crit_temp,
+
+ .bind = of_thermal_bind,
+ .unbind = of_thermal_unbind,
+};
+
+/*** sensor API ***/
+
+static struct thermal_zone_device *
+thermal_zone_of_add_sensor(struct device_node *zone,
+ struct device_node *sensor, void *data,
+ int (*get_temp)(void *, long *),
+ int (*get_trend)(void *, long *))
+{
+ struct thermal_zone_device *tzd;
+ struct __thermal_zone *tz;
+
+ tzd = thermal_zone_get_zone_by_name(zone->name);
+ if (IS_ERR(tzd))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ tz = tzd->devdata;
+
+ mutex_lock(&tzd->lock);
+ tz->get_temp = get_temp;
+ tz->get_trend = get_trend;
+ tz->sensor_data = data;
+
+ tzd->ops->get_temp = of_thermal_get_temp;
+ tzd->ops->get_trend = of_thermal_get_trend;
+ mutex_unlock(&tzd->lock);
+
+ return tzd;
+}
+
+/**
+ * thermal_zone_of_sensor_register - registers a sensor to a DT thermal zone
+ * @dev: a valid struct device pointer of a sensor device. Must contain
+ * a valid .of_node, for the sensor node.
+ * @sensor_id: a sensor identifier, in case the sensor IP has more
+ * than one sensors
+ * @data: a private pointer (owned by the caller) that will be passed
+ * back, when a temperature reading is needed.
+ * @get_temp: a pointer to a function that reads the sensor temperature.
+ * @get_trend: a pointer to a function that reads the sensor temperature trend.
+ *
+ * This function will search the list of thermal zones described in device
+ * tree and look for the zone that refer to the sensor device pointed by
+ * @dev->of_node as temperature providers. For the zone pointing to the
+ * sensor node, the sensor will be added to the DT thermal zone device.
+ *
+ * The thermal zone temperature is provided by the @get_temp function
+ * pointer. When called, it will have the private pointer @data back.
+ *
+ * The thermal zone temperature trend is provided by the @get_trend function
+ * pointer. When called, it will have the private pointer @data back.
+ *
+ * TODO:
+ * 01 - This function must enqueue the new sensor instead of using
+ * it as the only source of temperature values.
+ *
+ * 02 - There must be a way to match the sensor with all thermal zones
+ * that refer to it.
+ *
+ * Return: On success returns a valid struct thermal_zone_device,
+ * otherwise, it returns a corresponding ERR_PTR(). Caller must
+ * check the return value with help of IS_ERR() helper.
+ */
+struct thermal_zone_device *
+thermal_zone_of_sensor_register(struct device *dev, int sensor_id,
+ void *data, int (*get_temp)(void *, long *),
+ int (*get_trend)(void *, long *))
+{
+ struct device_node *np, *child, *sensor_np;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ sensor_np = dev->of_node;
+
+ for_each_child_of_node(np, child) {
+ struct of_phandle_args sensor_specs;
+ int ret, id;
+
+ /* For now, thermal framework supports only 1 sensor per zone */
+ ret = of_parse_phandle_with_args(child, "thermal-sensors",
+ "#thermal-sensor-cells",
+ 0, &sensor_specs);
+ if (ret)
+ continue;
+
+ if (sensor_specs.args_count >= 1) {
+ id = sensor_specs.args[0];
+ WARN(sensor_specs.args_count > 1,
+ "%s: too many cells in sensor specifier %d\n",
+ sensor_specs.np->name, sensor_specs.args_count);
+ } else {
+ id = 0;
+ }
+
+ if (sensor_specs.np == sensor_np && id == sensor_id) {
+ of_node_put(np);
+ return thermal_zone_of_add_sensor(child, sensor_np,
+ data,
+ get_temp,
+ get_trend);
+ }
+ }
+ of_node_put(np);
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register);
+
+/**
+ * thermal_zone_of_sensor_unregister - unregisters a sensor from a DT thermal zone
+ * @dev: a valid struct device pointer of a sensor device. Must contain
+ * a valid .of_node, for the sensor node.
+ * @tzd: a pointer to struct thermal_zone_device where the sensor is registered.
+ *
+ * This function removes the sensor callbacks and private data from the
+ * thermal zone device registered with thermal_zone_of_sensor_register()
+ * API. It will also silent the zone by remove the .get_temp() and .get_trend()
+ * thermal zone device callbacks.
+ *
+ * TODO: When the support to several sensors per zone is added, this
+ * function must search the sensor list based on @dev parameter.
+ *
+ */
+void thermal_zone_of_sensor_unregister(struct device *dev,
+ struct thermal_zone_device *tzd)
+{
+ struct __thermal_zone *tz;
+
+ if (!dev || !tzd || !tzd->devdata)
+ return;
+
+ tz = tzd->devdata;
+
+ /* no __thermal_zone, nothing to be done */
+ if (!tz)
+ return;
+
+ mutex_lock(&tzd->lock);
+ tzd->ops->get_temp = NULL;
+ tzd->ops->get_trend = NULL;
+
+ tz->get_temp = NULL;
+ tz->get_trend = NULL;
+ tz->sensor_data = NULL;
+ mutex_unlock(&tzd->lock);
+}
+EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_unregister);
+
+/*** functions parsing device tree nodes ***/
+
+/**
+ * thermal_of_populate_bind_params - parse and fill cooling map data
+ * @np: DT node containing a cooling-map node
+ * @__tbp: data structure to be filled with cooling map info
+ * @trips: array of thermal zone trip points
+ * @ntrips: number of trip points inside trips.
+ *
+ * This function parses a cooling-map type of node represented by
+ * @np parameter and fills the read data into @__tbp data structure.
+ * It needs the already parsed array of trip points of the thermal zone
+ * in consideration.
+ *
+ * Return: 0 on success, proper error code otherwise
+ */
+static int thermal_of_populate_bind_params(struct device_node *np,
+ struct __thermal_bind_params *__tbp,
+ struct __thermal_trip *trips,
+ int ntrips)
+{
+ struct of_phandle_args cooling_spec;
+ struct device_node *trip;
+ int ret, i;
+ u32 prop;
+
+ /* Default weight. Usage is optional */
+ __tbp->usage = 0;
+ ret = of_property_read_u32(np, "contribution", &prop);
+ if (ret == 0)
+ __tbp->usage = prop;
+
+ trip = of_parse_phandle(np, "trip", 0);
+ if (!trip) {
+ pr_err("missing trip property\n");
+ return -ENODEV;
+ }
+
+ /* match using device_node */
+ for (i = 0; i < ntrips; i++)
+ if (trip == trips[i].np) {
+ __tbp->trip_id = i;
+ break;
+ }
+
+ if (i == ntrips) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ ret = of_parse_phandle_with_args(np, "cooling-device", "#cooling-cells",
+ 0, &cooling_spec);
+ if (ret < 0) {
+ pr_err("missing cooling_device property\n");
+ goto end;
+ }
+ __tbp->cooling_device = cooling_spec.np;
+ if (cooling_spec.args_count >= 2) { /* at least min and max */
+ __tbp->min = cooling_spec.args[0];
+ __tbp->max = cooling_spec.args[1];
+ } else {
+ pr_err("wrong reference to cooling device, missing limits\n");
+ }
+
+end:
+ of_node_put(trip);
+
+ return ret;
+}
+
+/**
+ * It maps 'enum thermal_trip_type' found in include/linux/thermal.h
+ * into the device tree binding of 'trip', property type.
+ */
+static const char * const trip_types[] = {
+ [THERMAL_TRIP_ACTIVE] = "active",
+ [THERMAL_TRIP_PASSIVE] = "passive",
+ [THERMAL_TRIP_HOT] = "hot",
+ [THERMAL_TRIP_CRITICAL] = "critical",
+};
+
+/**
+ * thermal_of_get_trip_type - Get phy mode for given device_node
+ * @np: Pointer to the given device_node
+ * @type: Pointer to resulting trip type
+ *
+ * The function gets trip type string from property 'type',
+ * and store its index in trip_types table in @type,
+ *
+ * Return: 0 on success, or errno in error case.
+ */
+static int thermal_of_get_trip_type(struct device_node *np,
+ enum thermal_trip_type *type)
+{
+ const char *t;
+ int err, i;
+
+ err = of_property_read_string(np, "type", &t);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(trip_types); i++)
+ if (!strcasecmp(t, trip_types[i])) {
+ *type = i;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * thermal_of_populate_trip - parse and fill one trip point data
+ * @np: DT node containing a trip point node
+ * @trip: trip point data structure to be filled up
+ *
+ * This function parses a trip point type of node represented by
+ * @np parameter and fills the read data into @trip data structure.
+ *
+ * Return: 0 on success, proper error code otherwise
+ */
+static int thermal_of_populate_trip(struct device_node *np,
+ struct __thermal_trip *trip)
+{
+ int prop;
+ int ret;
+
+ ret = of_property_read_u32(np, "temperature", &prop);
+ if (ret < 0) {
+ pr_err("missing temperature property\n");
+ return ret;
+ }
+ trip->temperature = prop;
+
+ ret = of_property_read_u32(np, "hysteresis", &prop);
+ if (ret < 0) {
+ pr_err("missing hysteresis property\n");
+ return ret;
+ }
+ trip->hysteresis = prop;
+
+ ret = thermal_of_get_trip_type(np, &trip->type);
+ if (ret < 0) {
+ pr_err("wrong trip type property\n");
+ return ret;
+ }
+
+ /* Required for cooling map matching */
+ trip->np = np;
+
+ return 0;
+}
+
+/**
+ * thermal_of_build_thermal_zone - parse and fill one thermal zone data
+ * @np: DT node containing a thermal zone node
+ *
+ * This function parses a thermal zone type of node represented by
+ * @np parameter and fills the read data into a __thermal_zone data structure
+ * and return this pointer.
+ *
+ * TODO: Missing properties to parse: thermal-sensor-names and coefficients
+ *
+ * Return: On success returns a valid struct __thermal_zone,
+ * otherwise, it returns a corresponding ERR_PTR(). Caller must
+ * check the return value with help of IS_ERR() helper.
+ */
+static struct __thermal_zone *
+thermal_of_build_thermal_zone(struct device_node *np)
+{
+ struct device_node *child = NULL, *gchild;
+ struct __thermal_zone *tz;
+ int ret, i;
+ u32 prop;
+
+ if (!np) {
+ pr_err("no thermal zone np\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ tz = kzalloc(sizeof(*tz), GFP_KERNEL);
+ if (!tz)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_property_read_u32(np, "polling-delay-passive", &prop);
+ if (ret < 0) {
+ pr_err("missing polling-delay-passive property\n");
+ goto free_tz;
+ }
+ tz->passive_delay = prop;
+
+ ret = of_property_read_u32(np, "polling-delay", &prop);
+ if (ret < 0) {
+ pr_err("missing polling-delay property\n");
+ goto free_tz;
+ }
+ tz->polling_delay = prop;
+
+ /* trips */
+ child = of_get_child_by_name(np, "trips");
+
+ /* No trips provided */
+ if (!child)
+ goto finish;
+
+ tz->ntrips = of_get_child_count(child);
+ if (tz->ntrips == 0) /* must have at least one child */
+ goto finish;
+
+ tz->trips = kzalloc(tz->ntrips * sizeof(*tz->trips), GFP_KERNEL);
+ if (!tz->trips) {
+ ret = -ENOMEM;
+ goto free_tz;
+ }
+
+ i = 0;
+ for_each_child_of_node(child, gchild) {
+ ret = thermal_of_populate_trip(gchild, &tz->trips[i++]);
+ if (ret)
+ goto free_trips;
+ }
+
+ of_node_put(child);
+
+ /* cooling-maps */
+ child = of_get_child_by_name(np, "cooling-maps");
+
+ /* cooling-maps not provided */
+ if (!child)
+ goto finish;
+
+ tz->num_tbps = of_get_child_count(child);
+ if (tz->num_tbps == 0)
+ goto finish;
+
+ tz->tbps = kzalloc(tz->num_tbps * sizeof(*tz->tbps), GFP_KERNEL);
+ if (!tz->tbps) {
+ ret = -ENOMEM;
+ goto free_trips;
+ }
+
+ i = 0;
+ for_each_child_of_node(child, gchild)
+ ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
+ tz->trips, tz->ntrips);
+ if (ret)
+ goto free_tbps;
+
+finish:
+ of_node_put(child);
+ tz->mode = THERMAL_DEVICE_DISABLED;
+
+ return tz;
+
+free_tbps:
+ kfree(tz->tbps);
+free_trips:
+ kfree(tz->trips);
+free_tz:
+ kfree(tz);
+ of_node_put(child);
+
+ return ERR_PTR(ret);
+}
+
+static inline void of_thermal_free_zone(struct __thermal_zone *tz)
+{
+ kfree(tz->tbps);
+ kfree(tz->trips);
+ kfree(tz);
+}
+
+/**
+ * of_parse_thermal_zones - parse device tree thermal data
+ *
+ * Initialization function that can be called by machine initialization
+ * code to parse thermal data and populate the thermal framework
+ * with hardware thermal zones info. This function only parses thermal zones.
+ * Cooling devices and sensor devices nodes are supposed to be parsed
+ * by their respective drivers.
+ *
+ * Return: 0 on success, proper error code otherwise
+ *
+ */
+int __init of_parse_thermal_zones(void)
+{
+ struct device_node *np, *child;
+ struct __thermal_zone *tz;
+ struct thermal_zone_device_ops *ops;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np) {
+ pr_debug("unable to find thermal zones\n");
+ return 0; /* Run successfully on systems without thermal DT */
+ }
+
+ for_each_child_of_node(np, child) {
+ struct thermal_zone_device *zone;
+ struct thermal_zone_params *tzp;
+
+ tz = thermal_of_build_thermal_zone(child);
+ if (IS_ERR(tz)) {
+ pr_err("failed to build thermal zone %s: %ld\n",
+ child->name,
+ PTR_ERR(tz));
+ continue;
+ }
+
+ ops = kmemdup(&of_thermal_ops, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ goto exit_free;
+
+ tzp = kzalloc(sizeof(*tzp), GFP_KERNEL);
+ if (!tzp) {
+ kfree(ops);
+ goto exit_free;
+ }
+
+ /* No hwmon because there might be hwmon drivers registering */
+ tzp->no_hwmon = true;
+
+ zone = thermal_zone_device_register(child->name, tz->ntrips,
+ 0, tz,
+ ops, tzp,
+ tz->passive_delay,
+ tz->polling_delay);
+ if (IS_ERR(zone)) {
+ pr_err("Failed to build %s zone %ld\n", child->name,
+ PTR_ERR(zone));
+ kfree(tzp);
+ kfree(ops);
+ of_thermal_free_zone(tz);
+ /* attempting to build remaining zones still */
+ }
+ }
+
+ return 0;
+
+exit_free:
+ of_thermal_free_zone(tz);
+
+ /* no memory available, so free what we have built */
+ of_thermal_destroy_zones();
+
+ return -ENOMEM;
+}
+
+/**
+ * of_thermal_destroy_zones - remove all zones parsed and allocated resources
+ *
+ * Finds all zones parsed and added to the thermal framework and remove them
+ * from the system, together with their resources.
+ *
+ */
+void of_thermal_destroy_zones(void)
+{
+ struct device_node *np, *child;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np) {
+ pr_err("unable to find thermal zones\n");
+ return;
+ }
+
+ for_each_child_of_node(np, child) {
+ struct thermal_zone_device *zone;
+
+ zone = thermal_zone_get_zone_by_name(child->name);
+ if (IS_ERR(zone))
+ continue;
+
+ thermal_zone_device_unregister(zone);
+ kfree(zone->tzp);
+ kfree(zone->ops);
+ of_thermal_free_zone(zone->devdata);
+ }
+}
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 88f92e1a9944..79a09d02bbca 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -408,7 +408,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
/* enable temperature comparation */
rcar_thermal_common_write(common, ENR, 0x00030303);
- idle = 0; /* polling delaye is not needed */
+ idle = 0; /* polling delay is not needed */
}
for (i = 0;; i++) {
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index c2301da08ac7..3f5ad25ddca8 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -280,7 +280,7 @@ static int exynos_get_trend(struct thermal_zone_device *thermal,
return 0;
}
/* Operation callback functions for thermal zone */
-static struct thermal_zone_device_ops const exynos_dev_ops = {
+static struct thermal_zone_device_ops exynos_dev_ops = {
.bind = exynos_bind,
.unbind = exynos_unbind,
.get_temp = exynos_get_temp,
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 32f38b90c4f6..0d96a510389f 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -205,6 +205,7 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
skip_calib_data:
if (pdata->max_trigger_level > MAX_THRESHOLD_LEVS) {
dev_err(&pdev->dev, "Invalid max trigger level\n");
+ ret = -EINVAL;
goto out;
}
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index 073c292baa53..476b768c633e 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -131,8 +131,8 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
#define EXYNOS4412_TMU_DATA \
.threshold_falling = 10, \
- .trigger_levels[0] = 85, \
- .trigger_levels[1] = 103, \
+ .trigger_levels[0] = 70, \
+ .trigger_levels[1] = 95, \
.trigger_levels[2] = 110, \
.trigger_levels[3] = 120, \
.trigger_enable[0] = true, \
@@ -155,12 +155,12 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
.second_point_trim = 85, \
.default_temp_offset = 50, \
.freq_tab[0] = { \
- .freq_clip_max = 800 * 1000, \
- .temp_level = 85, \
+ .freq_clip_max = 1400 * 1000, \
+ .temp_level = 70, \
}, \
.freq_tab[1] = { \
- .freq_clip_max = 200 * 1000, \
- .temp_level = 103, \
+ .freq_clip_max = 400 * 1000, \
+ .temp_level = 95, \
}, \
.freq_tab_count = 2, \
.registers = &exynos4412_tmu_registers, \
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index d89e781b0a18..f251521baaa2 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -60,6 +60,7 @@ static unsigned long get_target_state(struct thermal_instance *instance,
*/
cdev->ops->get_cur_state(cdev, &cur_state);
next_target = instance->target;
+ dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
switch (trend) {
case THERMAL_TREND_RAISING:
@@ -131,6 +132,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
if (tz->temperature >= trip_temp)
throttle = true;
+ dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n",
+ trip, trip_type, trip_temp, trend, throttle);
+
mutex_lock(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
@@ -139,6 +143,8 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
old_target = instance->target;
instance->target = get_target_state(instance, trend, throttle);
+ dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
+ old_target, (int)instance->target);
if (old_target == instance->target)
continue;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index f1d511a9475b..71b0ec0c370d 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -34,6 +34,7 @@
#include <linux/thermal.h>
#include <linux/reboot.h>
#include <linux/string.h>
+#include <linux/of.h>
#include <net/netlink.h>
#include <net/genetlink.h>
@@ -55,10 +56,15 @@ static LIST_HEAD(thermal_governor_list);
static DEFINE_MUTEX(thermal_list_lock);
static DEFINE_MUTEX(thermal_governor_lock);
+static struct thermal_governor *def_governor;
+
static struct thermal_governor *__find_governor(const char *name)
{
struct thermal_governor *pos;
+ if (!name || !name[0])
+ return def_governor;
+
list_for_each_entry(pos, &thermal_governor_list, governor_list)
if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH))
return pos;
@@ -81,17 +87,23 @@ int thermal_register_governor(struct thermal_governor *governor)
if (__find_governor(governor->name) == NULL) {
err = 0;
list_add(&governor->governor_list, &thermal_governor_list);
+ if (!def_governor && !strncmp(governor->name,
+ DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH))
+ def_governor = governor;
}
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node) {
+ /*
+ * only thermal zones with specified tz->tzp->governor_name
+ * may run with tz->govenor unset
+ */
if (pos->governor)
continue;
- if (pos->tzp)
- name = pos->tzp->governor_name;
- else
- name = DEFAULT_THERMAL_GOVERNOR;
+
+ name = pos->tzp->governor_name;
+
if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH))
pos->governor = governor;
}
@@ -341,8 +353,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
static void handle_non_critical_trips(struct thermal_zone_device *tz,
int trip, enum thermal_trip_type trip_type)
{
- if (tz->governor)
- tz->governor->throttle(tz, trip);
+ tz->governor ? tz->governor->throttle(tz, trip) :
+ def_governor->throttle(tz, trip);
}
static void handle_critical_trips(struct thermal_zone_device *tz,
@@ -403,7 +415,7 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
enum thermal_trip_type type;
#endif
- if (!tz || IS_ERR(tz))
+ if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
goto exit;
mutex_lock(&tz->lock);
@@ -450,12 +462,18 @@ static void update_temperature(struct thermal_zone_device *tz)
tz->last_temperature = tz->temperature;
tz->temperature = temp;
mutex_unlock(&tz->lock);
+
+ dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
+ tz->last_temperature, tz->temperature);
}
void thermal_zone_device_update(struct thermal_zone_device *tz)
{
int count;
+ if (!tz->ops->get_temp)
+ return;
+
update_temperature(tz);
for (count = 0; count < tz->trips; count++)
@@ -774,6 +792,9 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
ret = tz->ops->set_emul_temp(tz, temperature);
}
+ if (!ret)
+ thermal_zone_device_update(tz);
+
return ret ? ret : count;
}
static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
@@ -1052,7 +1073,8 @@ static struct class thermal_class = {
};
/**
- * thermal_cooling_device_register() - register a new thermal cooling device
+ * __thermal_cooling_device_register() - register a new thermal cooling device
+ * @np: a pointer to a device tree node.
* @type: the thermal cooling device type.
* @devdata: device private data.
* @ops: standard thermal cooling devices callbacks.
@@ -1060,13 +1082,16 @@ static struct class thermal_class = {
* This interface function adds a new thermal cooling device (fan/processor/...)
* to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
* to all the thermal zone devices registered at the same time.
+ * It also gives the opportunity to link the cooling device to a device tree
+ * node, so that it can be bound to a thermal zone created out of device tree.
*
* Return: a pointer to the created struct thermal_cooling_device or an
* ERR_PTR. Caller must check return value with IS_ERR*() helpers.
*/
-struct thermal_cooling_device *
-thermal_cooling_device_register(char *type, void *devdata,
- const struct thermal_cooling_device_ops *ops)
+static struct thermal_cooling_device *
+__thermal_cooling_device_register(struct device_node *np,
+ char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
{
struct thermal_cooling_device *cdev;
int result;
@@ -1091,8 +1116,9 @@ thermal_cooling_device_register(char *type, void *devdata,
strlcpy(cdev->type, type ? : "", sizeof(cdev->type));
mutex_init(&cdev->lock);
INIT_LIST_HEAD(&cdev->thermal_instances);
+ cdev->np = np;
cdev->ops = ops;
- cdev->updated = true;
+ cdev->updated = false;
cdev->device.class = &thermal_class;
cdev->devdata = devdata;
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
@@ -1133,9 +1159,53 @@ unregister:
device_unregister(&cdev->device);
return ERR_PTR(result);
}
+
+/**
+ * thermal_cooling_device_register() - register a new thermal cooling device
+ * @type: the thermal cooling device type.
+ * @devdata: device private data.
+ * @ops: standard thermal cooling devices callbacks.
+ *
+ * This interface function adds a new thermal cooling device (fan/processor/...)
+ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
+ * to all the thermal zone devices registered at the same time.
+ *
+ * Return: a pointer to the created struct thermal_cooling_device or an
+ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
+ */
+struct thermal_cooling_device *
+thermal_cooling_device_register(char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
+{
+ return __thermal_cooling_device_register(NULL, type, devdata, ops);
+}
EXPORT_SYMBOL_GPL(thermal_cooling_device_register);
/**
+ * thermal_of_cooling_device_register() - register an OF thermal cooling device
+ * @np: a pointer to a device tree node.
+ * @type: the thermal cooling device type.
+ * @devdata: device private data.
+ * @ops: standard thermal cooling devices callbacks.
+ *
+ * This function will register a cooling device with device tree node reference.
+ * This interface function adds a new thermal cooling device (fan/processor/...)
+ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
+ * to all the thermal zone devices registered at the same time.
+ *
+ * Return: a pointer to the created struct thermal_cooling_device or an
+ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
+ */
+struct thermal_cooling_device *
+thermal_of_cooling_device_register(struct device_node *np,
+ char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
+{
+ return __thermal_cooling_device_register(np, type, devdata, ops);
+}
+EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register);
+
+/**
* thermal_cooling_device_unregister - removes the registered thermal cooling device
* @cdev: the thermal cooling device to remove.
*
@@ -1207,6 +1277,8 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
mutex_lock(&cdev->lock);
/* Make sure cdev enters the deepest cooling state */
list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
+ dev_dbg(&cdev->device, "zone%d->target=%lu\n",
+ instance->tz->id, instance->target);
if (instance->target == THERMAL_NO_TARGET)
continue;
if (instance->target > target)
@@ -1215,6 +1287,7 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
mutex_unlock(&cdev->lock);
cdev->ops->set_cur_state(cdev, target);
cdev->updated = true;
+ dev_dbg(&cdev->device, "set to state %lu\n", target);
}
EXPORT_SYMBOL(thermal_cdev_update);
@@ -1370,7 +1443,7 @@ static void remove_trip_attrs(struct thermal_zone_device *tz)
*/
struct thermal_zone_device *thermal_zone_device_register(const char *type,
int trips, int mask, void *devdata,
- const struct thermal_zone_device_ops *ops,
+ struct thermal_zone_device_ops *ops,
const struct thermal_zone_params *tzp,
int passive_delay, int polling_delay)
{
@@ -1386,7 +1459,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips)
return ERR_PTR(-EINVAL);
- if (!ops || !ops->get_temp)
+ if (!ops)
return ERR_PTR(-EINVAL);
if (trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp))
@@ -1471,7 +1544,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
if (tz->tzp)
tz->governor = __find_governor(tz->tzp->governor_name);
else
- tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR);
+ tz->governor = def_governor;
mutex_unlock(&thermal_governor_lock);
@@ -1490,6 +1563,9 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
+ if (!tz->ops->get_temp)
+ thermal_zone_device_set_polling(tz, 0);
+
thermal_zone_device_update(tz);
if (!result)
@@ -1740,8 +1816,14 @@ static int __init thermal_init(void)
if (result)
goto unregister_class;
+ result = of_parse_thermal_zones();
+ if (result)
+ goto exit_netlink;
+
return 0;
+exit_netlink:
+ genetlink_exit();
unregister_governors:
thermal_unregister_governors();
unregister_class:
@@ -1757,6 +1839,7 @@ error:
static void __exit thermal_exit(void)
{
+ of_thermal_destroy_zones();
genetlink_exit();
class_unregister(&thermal_class);
thermal_unregister_governors();
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 7cf2f6626251..3db339fb636f 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -77,4 +77,13 @@ static inline int thermal_gov_user_space_register(void) { return 0; }
static inline void thermal_gov_user_space_unregister(void) {}
#endif /* CONFIG_THERMAL_GOV_USER_SPACE */
+/* device tree support */
+#ifdef CONFIG_THERMAL_OF
+int of_parse_thermal_zones(void);
+void of_thermal_destroy_zones(void);
+#else
+static inline int of_parse_thermal_zones(void) { return 0; }
+static inline void of_thermal_destroy_zones(void) { }
+#endif
+
#endif /* __THERMAL_CORE_H__ */
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 5a47cc8c8f85..9eec26dc0448 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -31,6 +31,7 @@
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
+#include <linux/of.h>
#include "ti-thermal.h"
#include "ti-bandgap.h"
@@ -44,6 +45,7 @@ struct ti_thermal_data {
enum thermal_device_mode mode;
struct work_struct thermal_wq;
int sensor_id;
+ bool our_zone;
};
static void ti_thermal_work(struct work_struct *work)
@@ -75,11 +77,10 @@ static inline int ti_thermal_hotspot_temperature(int t, int s, int c)
/* thermal zone ops */
/* Get temperature callback function for thermal zone*/
-static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
+static inline int __ti_thermal_get_temp(void *devdata, long *temp)
{
struct thermal_zone_device *pcb_tz = NULL;
- struct ti_thermal_data *data = thermal->devdata;
+ struct ti_thermal_data *data = devdata;
struct ti_bandgap *bgp;
const struct ti_temp_sensor *s;
int ret, tmp, slope, constant;
@@ -118,6 +119,14 @@ static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
return ret;
}
+static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ struct ti_thermal_data *data = thermal->devdata;
+
+ return __ti_thermal_get_temp(data, temp);
+}
+
/* Bind callback functions for thermal zone */
static int ti_thermal_bind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
@@ -230,11 +239,9 @@ static int ti_thermal_get_trip_temp(struct thermal_zone_device *thermal,
return 0;
}
-/* Get the temperature trend callback functions for thermal zone */
-static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
- int trip, enum thermal_trend *trend)
+static int __ti_thermal_get_trend(void *p, long *trend)
{
- struct ti_thermal_data *data = thermal->devdata;
+ struct ti_thermal_data *data = p;
struct ti_bandgap *bgp;
int id, tr, ret = 0;
@@ -245,6 +252,22 @@ static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
if (ret)
return ret;
+ *trend = tr;
+
+ return 0;
+}
+
+/* Get the temperature trend callback functions for thermal zone */
+static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trend *trend)
+{
+ int ret;
+ long tr;
+
+ ret = __ti_thermal_get_trend(thermal->devdata, &tr);
+ if (ret)
+ return ret;
+
if (tr > 0)
*trend = THERMAL_TREND_RAISING;
else if (tr < 0)
@@ -308,16 +331,23 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
if (!data)
return -EINVAL;
- /* Create thermal zone */
- data->ti_thermal = thermal_zone_device_register(domain,
+ /* in case this is specified by DT */
+ data->ti_thermal = thermal_zone_of_sensor_register(bgp->dev, id,
+ data, __ti_thermal_get_temp,
+ __ti_thermal_get_trend);
+ if (IS_ERR(data->ti_thermal)) {
+ /* Create thermal zone */
+ data->ti_thermal = thermal_zone_device_register(domain,
OMAP_TRIP_NUMBER, 0, data, &ti_thermal_ops,
NULL, FAST_TEMP_MONITORING_RATE,
FAST_TEMP_MONITORING_RATE);
- if (IS_ERR(data->ti_thermal)) {
- dev_err(bgp->dev, "thermal zone device is NULL\n");
- return PTR_ERR(data->ti_thermal);
+ if (IS_ERR(data->ti_thermal)) {
+ dev_err(bgp->dev, "thermal zone device is NULL\n");
+ return PTR_ERR(data->ti_thermal);
+ }
+ data->ti_thermal->polling_delay = FAST_TEMP_MONITORING_RATE;
+ data->our_zone = true;
}
- data->ti_thermal->polling_delay = FAST_TEMP_MONITORING_RATE;
ti_bandgap_set_sensor_data(bgp, id, data);
ti_bandgap_write_update_interval(bgp, data->sensor_id,
data->ti_thermal->polling_delay);
@@ -331,7 +361,13 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id)
data = ti_bandgap_get_sensor_data(bgp, id);
- thermal_zone_device_unregister(data->ti_thermal);
+ if (data && data->ti_thermal) {
+ if (data->our_zone)
+ thermal_zone_device_unregister(data->ti_thermal);
+ else
+ thermal_zone_of_sensor_unregister(bgp->dev,
+ data->ti_thermal);
+ }
return 0;
}
@@ -350,6 +386,15 @@ int ti_thermal_report_sensor_temperature(struct ti_bandgap *bgp, int id)
int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
{
struct ti_thermal_data *data;
+ struct device_node *np = bgp->dev->of_node;
+
+ /*
+ * We are assuming here that if one deploys the zone
+ * using DT, then it must be aware that the cooling device
+ * loading has to happen via cpufreq driver.
+ */
+ if (of_find_property(np, "#thermal-sensor-cells", NULL))
+ return 0;
data = ti_bandgap_get_sensor_data(bgp, id);
if (!data || IS_ERR(data))
@@ -380,7 +425,9 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
struct ti_thermal_data *data;
data = ti_bandgap_get_sensor_data(bgp, id);
- cpufreq_cooling_unregister(data->cool_dev);
+
+ if (data && data->cool_dev)
+ cpufreq_cooling_unregister(data->cool_dev);
return 0;
}
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 7722cb9d5a80..081fd7e6a9f0 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -68,6 +68,10 @@ struct phy_dev_entry {
struct thermal_zone_device *tzone;
};
+static const struct thermal_zone_params pkg_temp_tz_params = {
+ .no_hwmon = true,
+};
+
/* List maintaining number of package instances */
static LIST_HEAD(phy_dev_list);
static DEFINE_MUTEX(phy_dev_list_mutex);
@@ -215,7 +219,7 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
return 0;
}
-int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
+static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
unsigned long temp)
{
u32 l, h;
@@ -394,7 +398,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
int err;
u32 tj_max;
struct phy_dev_entry *phy_dev_entry;
- char buffer[30];
int thres_count;
u32 eax, ebx, ecx, edx;
u8 *temp;
@@ -440,13 +443,11 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
phy_dev_entry->first_cpu = cpu;
phy_dev_entry->tj_max = tj_max;
phy_dev_entry->ref_cnt = 1;
- snprintf(buffer, sizeof(buffer), "pkg-temp-%d\n",
- phy_dev_entry->phys_proc_id);
- phy_dev_entry->tzone = thermal_zone_device_register(buffer,
+ phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp",
thres_count,
(thres_count == MAX_NUMBER_OF_TRIPS) ?
0x03 : 0x01,
- phy_dev_entry, &tzone_ops, NULL, 0, 0);
+ phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
if (IS_ERR(phy_dev_entry->tzone)) {
err = PTR_ERR(phy_dev_entry->tzone);
goto err_ret_free;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 978db344bda0..b24aa010f68c 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -366,7 +366,7 @@ config TRACE_SINK
"Trace data router for MIPI P1149.7 cJTAG standard".
config PPC_EPAPR_HV_BYTECHAN
- tristate "ePAPR hypervisor byte channel driver"
+ bool "ePAPR hypervisor byte channel driver"
depends on PPC
select EPAPR_PARAVIRT
help
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 50b46881b6ca..94f9e3a38412 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -31,6 +31,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/major.h>
+#include <linux/atomic.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
@@ -70,6 +71,9 @@ static struct task_struct *hvc_task;
/* Picks up late kicks after list walk but before schedule() */
static int hvc_kicked;
+/* hvc_init is triggered from hvc_alloc, i.e. only when actually used */
+static atomic_t hvc_needs_init __read_mostly = ATOMIC_INIT(-1);
+
static int hvc_init(void);
#ifdef CONFIG_MAGIC_SYSRQ
@@ -851,7 +855,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
int i;
/* We wait until a driver actually comes along */
- if (!hvc_driver) {
+ if (atomic_inc_not_zero(&hvc_needs_init)) {
int err = hvc_init();
if (err)
return ERR_PTR(err);
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index db19a38c8c69..ea74460f3638 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -77,6 +77,7 @@ struct hvc_iucv_private {
struct list_head tty_outqueue; /* outgoing IUCV messages */
struct list_head tty_inqueue; /* incoming IUCV messages */
struct device *dev; /* device structure */
+ u8 info_path[16]; /* IUCV path info (dev attr) */
};
struct iucv_tty_buffer {
@@ -126,7 +127,7 @@ static struct iucv_handler hvc_iucv_handler = {
* This function returns the struct hvc_iucv_private instance that corresponds
* to the HVC virtual terminal number specified as parameter @num.
*/
-struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
+static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
{
if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
return NULL;
@@ -772,18 +773,37 @@ static int hvc_iucv_filter_connreq(u8 ipvmid[8])
static int hvc_iucv_path_pending(struct iucv_path *path,
u8 ipvmid[8], u8 ipuser[16])
{
- struct hvc_iucv_private *priv;
+ struct hvc_iucv_private *priv, *tmp;
+ u8 wildcard[9] = "lnxhvc ";
+ int i, rc, find_unused;
u8 nuser_data[16];
u8 vm_user_id[9];
- int i, rc;
+ ASCEBC(wildcard, sizeof(wildcard));
+ find_unused = !memcmp(wildcard, ipuser, 8);
+
+ /* First, check if the pending path request is managed by this
+ * IUCV handler:
+ * - find a disconnected device if ipuser contains the wildcard
+ * - find the device that matches the terminal ID in ipuser
+ */
priv = NULL;
- for (i = 0; i < hvc_iucv_devices; i++)
- if (hvc_iucv_table[i] &&
- (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
- priv = hvc_iucv_table[i];
+ for (i = 0; i < hvc_iucv_devices; i++) {
+ tmp = hvc_iucv_table[i];
+ if (!tmp)
+ continue;
+
+ if (find_unused) {
+ spin_lock(&tmp->lock);
+ if (tmp->iucv_state == IUCV_DISCONN)
+ priv = tmp;
+ spin_unlock(&tmp->lock);
+
+ } else if (!memcmp(tmp->srv_name, ipuser, 8))
+ priv = tmp;
+ if (priv)
break;
- }
+ }
if (!priv)
return -ENODEV;
@@ -826,6 +846,10 @@ static int hvc_iucv_path_pending(struct iucv_path *path,
priv->path = path;
priv->iucv_state = IUCV_CONNECTED;
+ /* store path information */
+ memcpy(priv->info_path, ipvmid, 8);
+ memcpy(priv->info_path + 8, ipuser + 8, 8);
+
/* flush buffered output data... */
schedule_delayed_work(&priv->sndbuf_work, 5);
@@ -960,6 +984,49 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
return 0;
}
+static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hvc_iucv_private *priv = dev_get_drvdata(dev);
+ size_t len;
+
+ len = sizeof(priv->srv_name);
+ memcpy(buf, priv->srv_name, len);
+ EBCASC(buf, len);
+ buf[len++] = '\n';
+ return len;
+}
+
+static ssize_t hvc_iucv_dev_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hvc_iucv_private *priv = dev_get_drvdata(dev);
+ return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
+}
+
+static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hvc_iucv_private *priv = dev_get_drvdata(dev);
+ char vmid[9], ipuser[9];
+
+ memset(vmid, 0, sizeof(vmid));
+ memset(ipuser, 0, sizeof(ipuser));
+
+ spin_lock_bh(&priv->lock);
+ if (priv->iucv_state == IUCV_CONNECTED) {
+ memcpy(vmid, priv->info_path, 8);
+ memcpy(ipuser, priv->info_path + 8, 8);
+ }
+ spin_unlock_bh(&priv->lock);
+ EBCASC(ipuser, 8);
+
+ return sprintf(buf, "%s:%s\n", vmid, ipuser);
+}
+
/* HVC operations */
static const struct hv_ops hvc_iucv_ops = {
@@ -985,6 +1052,25 @@ static struct device_driver hvc_iucv_driver = {
.pm = &hvc_iucv_pm_ops,
};
+/* IUCV HVC device attributes */
+static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
+static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
+static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
+static struct attribute *hvc_iucv_dev_attrs[] = {
+ &dev_attr_termid.attr,
+ &dev_attr_state.attr,
+ &dev_attr_peer.attr,
+ NULL,
+};
+static struct attribute_group hvc_iucv_dev_attr_group = {
+ .attrs = hvc_iucv_dev_attrs,
+};
+static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
+ &hvc_iucv_dev_attr_group,
+ NULL,
+};
+
+
/**
* hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
* @id: hvc_iucv_table index
@@ -1046,6 +1132,7 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
priv->dev->bus = &iucv_bus;
priv->dev->parent = iucv_root;
priv->dev->driver = &hvc_iucv_driver;
+ priv->dev->groups = hvc_iucv_dev_attr_groups;
priv->dev->release = (void (*)(struct device *)) kfree;
rc = device_register(priv->dev);
if (rc) {
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 6496872e2e47..b01659bd4f7c 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -255,13 +255,7 @@ static int __init hvc_opal_init(void)
/* Register as a vio device to receive callbacks */
return platform_driver_register(&hvc_opal_driver);
}
-module_init(hvc_opal_init);
-
-static void __exit hvc_opal_exit(void)
-{
- platform_driver_unregister(&hvc_opal_driver);
-}
-module_exit(hvc_opal_exit);
+device_initcall(hvc_opal_init);
static void udbg_opal_putc(char c)
{
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 0069bb86ba49..08c87920b74a 100644
--- a/drivers/tty/hvc/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
@@ -102,17 +102,7 @@ static int __init hvc_rtas_init(void)
return 0;
}
-module_init(hvc_rtas_init);
-
-/* This will tear down the tty portion of the driver */
-static void __exit hvc_rtas_exit(void)
-{
- /* Really the fun isn't over until the worker thread breaks down and
- * the tty cleans up */
- if (hvc_rtas_dev)
- hvc_remove(hvc_rtas_dev);
-}
-module_exit(hvc_rtas_exit);
+device_initcall(hvc_rtas_init);
/* This will happen prior to module init. There is no tty at this time? */
static int __init hvc_rtas_console_init(void)
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index 72228276fe31..9cf573d06a29 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -80,14 +80,7 @@ static int __init hvc_udbg_init(void)
return 0;
}
-module_init(hvc_udbg_init);
-
-static void __exit hvc_udbg_exit(void)
-{
- if (hvc_udbg_dev)
- hvc_remove(hvc_udbg_dev);
-}
-module_exit(hvc_udbg_exit);
+device_initcall(hvc_udbg_init);
static int __init hvc_udbg_console_init(void)
{
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 636c9baad7a5..2dc2831840ca 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -561,18 +561,7 @@ static int __init xen_hvc_init(void)
#endif
return r;
}
-
-static void __exit xen_hvc_fini(void)
-{
- struct xencons_info *entry, *next;
-
- if (list_empty(&xenconsoles))
- return;
-
- list_for_each_entry_safe(entry, next, &xenconsoles, list) {
- xen_console_remove(entry);
- }
-}
+device_initcall(xen_hvc_init);
static int xen_cons_init(void)
{
@@ -598,10 +587,6 @@ static int xen_cons_init(void)
hvc_instantiate(HVC_COOKIE, 0, ops);
return 0;
}
-
-
-module_init(xen_hvc_init);
-module_exit(xen_hvc_fini);
console_initcall(xen_cons_init);
#ifdef CONFIG_EARLY_PRINTK
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index ebd5bff0f5c1..17ee3bf0926b 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -176,9 +176,6 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
": %d chars not inserted to flip buffer!\n",
length - work);
- /*
- * This may sleep if ->low_latency is set
- */
if (work)
tty_flip_buffer_push(&tty->port);
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index f34461c5f14e..2ebe47b78a3e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1090,6 +1090,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
{
unsigned int addr = 0;
unsigned int modem = 0;
+ unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
u8 *dp = data;
@@ -1116,6 +1117,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
if (len == 0)
return;
}
+ len--;
+ if (len > 0) {
+ while (gsm_read_ea(&brk, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ modem <<= 7;
+ modem |= (brk & 0x7f);
+ }
tty = tty_port_tty_get(&dlci->port);
gsm_process_modem(tty, dlci, modem, clen);
if (tty) {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cb8017aa4434..41fe8a047d37 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -817,8 +817,7 @@ static void process_echoes(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
size_t echoed;
- if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
- ldata->echo_mark == ldata->echo_tail)
+ if (ldata->echo_mark == ldata->echo_tail)
return;
mutex_lock(&ldata->output_lock);
@@ -1244,7 +1243,8 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
if (L_ECHO(tty)) {
echo_char(c, tty);
commit_echoes(tty);
- }
+ } else
+ process_echoes(tty);
isig(signal, tty);
return;
}
@@ -1274,7 +1274,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
if (I_IXON(tty)) {
if (c == START_CHAR(tty)) {
start_tty(tty);
- commit_echoes(tty);
+ process_echoes(tty);
return 0;
}
if (c == STOP_CHAR(tty)) {
@@ -1820,8 +1820,10 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
* Fix tty hang when I_IXON(tty) is cleared, but the tty
* been stopped by STOP_CHAR(tty) before it.
*/
- if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped)
+ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
start_tty(tty);
+ process_echoes(tty);
+ }
/* The termios change make the tty ready for I/O */
if (waitqueue_active(&tty->write_wait))
@@ -1896,15 +1898,12 @@ err:
static inline int input_available_p(struct tty_struct *tty, int poll)
{
struct n_tty_data *ldata = tty->disc_data;
- int amt = poll && !TIME_CHAR(tty) ? MIN_CHAR(tty) : 1;
-
- if (ldata->icanon && !L_EXTPROC(tty)) {
- if (ldata->canon_head != ldata->read_tail)
- return 1;
- } else if (read_cnt(ldata) >= amt)
- return 1;
+ int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1;
- return 0;
+ if (ldata->icanon && !L_EXTPROC(tty))
+ return ldata->canon_head != ldata->read_tail;
+ else
+ return read_cnt(ldata) >= amt;
}
/**
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 61ecd709a722..81f909c2101f 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1694,6 +1694,10 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
static void serial_unlink_irq_chain(struct uart_8250_port *up)
{
+ /*
+ * yes, some broken gcc emit "warning: 'i' may be used uninitialized"
+ * but no, we are not going to take a patch that assigns NULL below.
+ */
struct irq_info *i;
struct hlist_node *n;
struct hlist_head *h;
@@ -2433,6 +2437,24 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
serial_dl_write(up, quot);
/*
+ * XR17V35x UARTs have an extra fractional divisor register (DLD)
+ *
+ * We need to recalculate all of the registers, because DLM and DLL
+ * are already rounded to a whole integer.
+ *
+ * When recalculating we use a 32x clock instead of a 16x clock to
+ * allow 1-bit for rounding in the fractional part.
+ */
+ if (up->port.type == PORT_XR17V35X) {
+ unsigned int baud_x32 = (port->uartclk * 2) / baud;
+ u16 quot = baud_x32 / 32;
+ u8 quot_frac = DIV_ROUND_CLOSEST(baud_x32 % 32, 2);
+
+ serial_dl_write(up, quot);
+ serial_port_out(port, 0x2, quot_frac & 0xf);
+ }
+
+ /*
* LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
* is written without DLAB set, this mode will be disabled.
*/
@@ -2864,14 +2886,10 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
touch_nmi_watchdog();
- local_irq_save(flags);
- if (port->sysrq) {
- /* serial8250_handle_irq() already took the lock */
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
- } else
- spin_lock(&port->lock);
+ if (port->sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
/*
* First save the IER then disable the interrupts
@@ -2903,8 +2921,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
serial8250_modem_status(up);
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static int __init serial8250_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index faa64e646100..ed3113576740 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -391,7 +391,7 @@ static int dw8250_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int dw8250_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -409,7 +409,7 @@ static int dw8250_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
static int dw8250_runtime_suspend(struct device *dev)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 50228eed3b6f..b14bcba96c25 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -783,7 +783,8 @@ static int pci_netmos_9900_setup(struct serial_private *priv,
{
unsigned int bar;
- if ((priv->dev->subsystem_device & 0xff00) == 0x3000) {
+ if ((priv->dev->device != PCI_DEVICE_ID_NETMOS_9865) &&
+ (priv->dev->subsystem_device & 0xff00) == 0x3000) {
/* netmos apparently orders BARs by datasheet layout, so serial
* ports get BARs 0 and 3 (or 1 and 4 for memmapped)
*/
@@ -1365,23 +1366,44 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios,
struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
- unsigned int m = 6912;
- unsigned int n = 15625;
+ unsigned int m, n;
u32 reg;
- /* For baud rates 1M, 2M, 3M and 4M the dividers must be adjusted. */
- if (baud == 1000000 || baud == 2000000 || baud == 4000000) {
+ /*
+ * For baud rates 0.5M, 1M, 1.5M, 2M, 2.5M, 3M, 3.5M and 4M the
+ * dividers must be adjusted.
+ *
+ * uartclk = (m / n) * 100 MHz, where m <= n
+ */
+ switch (baud) {
+ case 500000:
+ case 1000000:
+ case 2000000:
+ case 4000000:
m = 64;
n = 100;
-
p->uartclk = 64000000;
- } else if (baud == 3000000) {
+ break;
+ case 3500000:
+ m = 56;
+ n = 100;
+ p->uartclk = 56000000;
+ break;
+ case 1500000:
+ case 3000000:
m = 48;
n = 100;
-
p->uartclk = 48000000;
- } else {
- p->uartclk = 44236800;
+ break;
+ case 2500000:
+ m = 40;
+ n = 100;
+ p->uartclk = 40000000;
+ break;
+ default:
+ m = 2304;
+ n = 3125;
+ p->uartclk = 73728000;
}
/* Reset the clock */
@@ -3448,6 +3470,10 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 921600,
.reg_shift = 2,
},
+ /*
+ * Intel BayTrail HSUART reference clock is 44.2368 MHz at power-on,
+ * but is overridden by byt_set_termios.
+ */
[pbn_byt] = {
.flags = FL_BASE0,
.num_ports = 1,
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 441ada489874..2577d67bacb2 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -289,7 +289,7 @@ config SERIAL_MAX3100
MAX3100 chip support
config SERIAL_MAX310X
- bool "MAX310X support"
+ tristate "MAX310X support"
depends on SPI_MASTER
select SERIAL_CORE
select REGMAP_SPI if SPI_MASTER
@@ -708,7 +708,7 @@ config SERIAL_IP22_ZILOG_CONSOLE
config SERIAL_SH_SCI
tristate "SuperH SCI(F) serial port support"
- depends on HAVE_CLK && (SUPERH || ARM || COMPILE_TEST)
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
select SERIAL_CORE
config SERIAL_SH_SCI_NR_UARTS
@@ -1034,7 +1034,7 @@ config SERIAL_MSM_CONSOLE
config SERIAL_MSM_HS
tristate "MSM UART High Speed: Serial Driver"
- depends on ARCH_MSM
+ depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
select SERIAL_CORE
help
If you have a machine based on MSM family of SoCs, you
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index d58783d364e3..d4eda24aa68b 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2154,9 +2154,19 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
amba_ports[i] = uap;
amba_set_drvdata(dev, uap);
+
+ if (!amba_reg.state) {
+ ret = uart_register_driver(&amba_reg);
+ if (ret < 0) {
+ pr_err("Failed to register AMBA-PL011 driver\n");
+ return ret;
+ }
+ }
+
ret = uart_add_one_port(&amba_reg, &uap->port);
if (ret) {
amba_ports[i] = NULL;
+ uart_unregister_driver(&amba_reg);
pl011_dma_remove(uap);
}
out:
@@ -2175,6 +2185,7 @@ static int pl011_remove(struct amba_device *dev)
amba_ports[i] = NULL;
pl011_dma_remove(uap);
+ uart_unregister_driver(&amba_reg);
return 0;
}
@@ -2230,22 +2241,14 @@ static struct amba_driver pl011_driver = {
static int __init pl011_init(void)
{
- int ret;
printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
- ret = uart_register_driver(&amba_reg);
- if (ret == 0) {
- ret = amba_driver_register(&pl011_driver);
- if (ret)
- uart_unregister_driver(&amba_reg);
- }
- return ret;
+ return amba_driver_register(&pl011_driver);
}
static void __exit pl011_exit(void)
{
amba_driver_unregister(&pl011_driver);
- uart_unregister_driver(&amba_reg);
}
/*
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index a49f10d269b2..b0603e1f7d82 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -115,9 +115,6 @@ static void atmel_stop_rx(struct uart_port *port);
#define UART_PUT_TCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TCR)
#define UART_GET_TCR(port) __raw_readl((port)->membase + ATMEL_PDC_TCR)
-static int (*atmel_open_hook)(struct uart_port *);
-static void (*atmel_close_hook)(struct uart_port *);
-
struct atmel_dma_buffer {
unsigned char *buf;
dma_addr_t dma_addr;
@@ -1555,7 +1552,7 @@ static int atmel_startup(struct uart_port *port)
retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED,
tty ? tty->name : "atmel_serial", port);
if (retval) {
- printk("atmel_serial: atmel_startup - Can't get irq\n");
+ dev_err(port->dev, "atmel_startup - Can't get irq\n");
return retval;
}
@@ -1575,17 +1572,6 @@ static int atmel_startup(struct uart_port *port)
if (retval < 0)
atmel_set_ops(port);
}
- /*
- * If there is a specific "open" function (to register
- * control line interrupts)
- */
- if (atmel_open_hook) {
- retval = atmel_open_hook(port);
- if (retval) {
- free_irq(port->irq, port);
- return retval;
- }
- }
/* Save current CSR for comparison in atmel_tasklet_func() */
atmel_port->irq_status_prev = UART_GET_CSR(port);
@@ -1684,13 +1670,6 @@ static void atmel_shutdown(struct uart_port *port)
* Free the interrupt
*/
free_irq(port->irq, port);
-
- /*
- * If there is a specific "close" function (to unregister
- * control line interrupts)
- */
- if (atmel_close_hook)
- atmel_close_hook(port);
}
/*
@@ -1738,7 +1717,7 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
clk_disable_unprepare(atmel_port->clk);
break;
default:
- printk(KERN_ERR "atmel_serial: unknown pm %d\n", state);
+ dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
}
}
@@ -1853,13 +1832,10 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
mode &= ~ATMEL_US_USMODE;
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
- dev_dbg(port->dev, "Setting UART to RS485\n");
if ((atmel_port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
- } else {
- dev_dbg(port->dev, "Setting UART to RS232\n");
}
/* set the parity, stop bits and data size */
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 649d5129c4b4..a47421e4627c 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -29,10 +29,9 @@
#include <linux/sysrq.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
-
-#include <bcm63xx_irq.h>
-#include <bcm63xx_regs.h>
-#include <bcm63xx_io.h>
+#include <linux/serial_bcm63xx.h>
+#include <linux/io.h>
+#include <linux/of.h>
#define BCM63XX_NR_UARTS 2
@@ -81,13 +80,13 @@ static struct uart_port ports[BCM63XX_NR_UARTS];
static inline unsigned int bcm_uart_readl(struct uart_port *port,
unsigned int offset)
{
- return bcm_readl(port->membase + offset);
+ return __raw_readl(port->membase + offset);
}
static inline void bcm_uart_writel(struct uart_port *port,
unsigned int value, unsigned int offset)
{
- bcm_writel(value, port->membase + offset);
+ __raw_writel(value, port->membase + offset);
}
/*
@@ -591,7 +590,7 @@ static int bcm_uart_request_port(struct uart_port *port)
{
unsigned int size;
- size = RSET_UART_SIZE;
+ size = UART_REG_SIZE;
if (!request_mem_region(port->mapbase, size, "bcm63xx")) {
dev_err(port->dev, "Memory region busy\n");
return -EBUSY;
@@ -611,7 +610,7 @@ static int bcm_uart_request_port(struct uart_port *port)
*/
static void bcm_uart_release_port(struct uart_port *port)
{
- release_mem_region(port->mapbase, RSET_UART_SIZE);
+ release_mem_region(port->mapbase, UART_REG_SIZE);
iounmap(port->membase);
}
@@ -808,6 +807,9 @@ static int bcm_uart_probe(struct platform_device *pdev)
struct clk *clk;
int ret;
+ if (pdev->dev.of_node)
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+
if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
return -EINVAL;
@@ -859,6 +861,12 @@ static int bcm_uart_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id bcm63xx_of_match[] = {
+ { .compatible = "brcm,bcm6345-uart" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm63xx_of_match);
+
/*
* platform driver stuff
*/
@@ -868,6 +876,7 @@ static struct platform_driver bcm_uart_platform_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "bcm63xx_uart",
+ .of_match_table = bcm63xx_of_match,
},
};
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index b0eacb83f831..5e6fdb1ea73b 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -368,11 +368,16 @@ static const struct uart_ops uart_clps711x_ops = {
static void uart_clps711x_console_putchar(struct uart_port *port, int ch)
{
struct clps711x_port *s = dev_get_drvdata(port->dev);
- u32 sysflg = 0;
- do {
+ /* Wait for FIFO is not full */
+ while (1) {
+ u32 sysflg = 0;
+
regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
- } while (sysflg & SYSFLG_UTXFF);
+ if (!(sysflg & SYSFLG_UTXFF))
+ break;
+ cond_resched();
+ }
writew(ch, port->membase + UARTDR_OFFSET);
}
@@ -382,14 +387,18 @@ static void uart_clps711x_console_write(struct console *co, const char *c,
{
struct uart_port *port = clps711x_uart.state[co->index].uart_port;
struct clps711x_port *s = dev_get_drvdata(port->dev);
- u32 sysflg = 0;
uart_console_write(port, c, n, uart_clps711x_console_putchar);
/* Wait for transmitter to become empty */
- do {
+ while (1) {
+ u32 sysflg = 0;
+
regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
- } while (sysflg & SYSFLG_UBUSY);
+ if (!(sysflg & SYSFLG_UBUSY))
+ break;
+ cond_resched();
+ }
}
static int uart_clps711x_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 477f22f773fc..d567ac5d3af4 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -286,7 +286,6 @@ static struct e100_serial rs_table[] = {
#endif
}, /* ttyS0 */
-#ifndef CONFIG_SVINTO_SIM
{ .baud = DEF_BAUD,
.ioport = (unsigned char *)R_SERIAL1_CTRL,
.irq = 1U << 16, /* uses DMA 8 and 9 */
@@ -447,7 +446,6 @@ static struct e100_serial rs_table[] = {
.dma_in_enabled = 0
#endif
} /* ttyS3 */
-#endif
};
@@ -1035,7 +1033,6 @@ cflag_to_etrax_baud(unsigned int cflag)
static inline void
e100_dtr(struct e100_serial *info, int set)
{
-#ifndef CONFIG_SVINTO_SIM
unsigned char mask = e100_modem_pins[info->line].dtr_mask;
#ifdef SERIAL_DEBUG_IO
@@ -1060,7 +1057,6 @@ e100_dtr(struct e100_serial *info, int set)
info->line, *e100_modem_pins[info->line].dtr_shadow,
E100_DTR_GET(info));
#endif
-#endif
}
/* set = 0 means 3.3V on the pin, bitvalue: 0=active, 1=inactive
@@ -1069,7 +1065,6 @@ e100_dtr(struct e100_serial *info, int set)
static inline void
e100_rts(struct e100_serial *info, int set)
{
-#ifndef CONFIG_SVINTO_SIM
unsigned long flags;
local_irq_save(flags);
info->rx_ctrl &= ~E100_RTS_MASK;
@@ -1079,7 +1074,6 @@ e100_rts(struct e100_serial *info, int set)
#ifdef SERIAL_DEBUG_IO
printk("ser%i rts %i\n", info->line, set);
#endif
-#endif
}
@@ -1087,7 +1081,6 @@ e100_rts(struct e100_serial *info, int set)
static inline void
e100_ri_out(struct e100_serial *info, int set)
{
-#ifndef CONFIG_SVINTO_SIM
/* RI is active low */
{
unsigned char mask = e100_modem_pins[info->line].ri_mask;
@@ -1099,12 +1092,10 @@ e100_ri_out(struct e100_serial *info, int set)
*e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow;
local_irq_restore(flags);
}
-#endif
}
static inline void
e100_cd_out(struct e100_serial *info, int set)
{
-#ifndef CONFIG_SVINTO_SIM
/* CD is active low */
{
unsigned char mask = e100_modem_pins[info->line].cd_mask;
@@ -1116,27 +1107,22 @@ e100_cd_out(struct e100_serial *info, int set)
*e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow;
local_irq_restore(flags);
}
-#endif
}
static inline void
e100_disable_rx(struct e100_serial *info)
{
-#ifndef CONFIG_SVINTO_SIM
/* disable the receiver */
info->ioport[REG_REC_CTRL] =
(info->rx_ctrl &= ~IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
-#endif
}
static inline void
e100_enable_rx(struct e100_serial *info)
{
-#ifndef CONFIG_SVINTO_SIM
/* enable the receiver */
info->ioport[REG_REC_CTRL] =
(info->rx_ctrl |= IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
-#endif
}
/* the rx DMA uses both the dma_descr and the dma_eop interrupts */
@@ -1554,24 +1540,6 @@ transmit_chars_dma(struct e100_serial *info)
unsigned int c, sentl;
struct etrax_dma_descr *descr;
-#ifdef CONFIG_SVINTO_SIM
- /* This will output too little if tail is not 0 always since
- * we don't reloop to send the other part. Anyway this SHOULD be a
- * no-op - transmit_chars_dma would never really be called during sim
- * since rs_write does not write into the xmit buffer then.
- */
- if (info->xmit.tail)
- printk("Error in serial.c:transmit_chars-dma(), tail!=0\n");
- if (info->xmit.head != info->xmit.tail) {
- SIMCOUT(info->xmit.buf + info->xmit.tail,
- CIRC_CNT(info->xmit.head,
- info->xmit.tail,
- SERIAL_XMIT_SIZE));
- info->xmit.head = info->xmit.tail; /* move back head */
- info->tr_running = 0;
- }
- return;
-#endif
/* acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
*info->oclrintradr =
IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
@@ -1842,13 +1810,6 @@ static void receive_chars_dma(struct e100_serial *info)
struct tty_struct *tty;
unsigned char rstat;
-#ifdef CONFIG_SVINTO_SIM
- /* No receive in the simulator. Will probably be when the rest of
- * the serial interface works, and this piece will just be removed.
- */
- return;
-#endif
-
/* Acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
*info->iclrintradr =
IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
@@ -1934,12 +1895,6 @@ static int start_recv_dma(struct e100_serial *info)
static void
start_receive(struct e100_serial *info)
{
-#ifdef CONFIG_SVINTO_SIM
- /* No receive in the simulator. Will probably be when the rest of
- * the serial interface works, and this piece will just be removed.
- */
- return;
-#endif
if (info->uses_dma_in) {
/* reset the input dma channel to be sure it works */
@@ -1972,17 +1927,6 @@ tr_interrupt(int irq, void *dev_id)
int i;
int handled = 0;
-#ifdef CONFIG_SVINTO_SIM
- /* No receive in the simulator. Will probably be when the rest of
- * the serial interface works, and this piece will just be removed.
- */
- {
- const char *s = "What? tr_interrupt in simulator??\n";
- SIMCOUT(s,strlen(s));
- }
- return IRQ_HANDLED;
-#endif
-
/* find out the line that caused this irq and get it from rs_table */
ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
@@ -2021,17 +1965,6 @@ rec_interrupt(int irq, void *dev_id)
int i;
int handled = 0;
-#ifdef CONFIG_SVINTO_SIM
- /* No receive in the simulator. Will probably be when the rest of
- * the serial interface works, and this piece will just be removed.
- */
- {
- const char *s = "What? rec_interrupt in simulator??\n";
- SIMCOUT(s,strlen(s));
- }
- return IRQ_HANDLED;
-#endif
-
/* find out the line that caused this irq and get it from rs_table */
ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
@@ -2153,7 +2086,7 @@ static void flush_timeout_function(unsigned long data)
fast_timers[info->line].function = NULL;
serial_fast_timer_expired++;
- TIMERD(DEBUG_LOG(info->line, "flush_timout %i ", info->line));
+ TIMERD(DEBUG_LOG(info->line, "flush_timeout %i ", info->line));
TIMERD(DEBUG_LOG(info->line, "num expired: %i\n", serial_fast_timer_expired));
check_flush_timeout(info);
}
@@ -2173,10 +2106,6 @@ timed_flush_handler(unsigned long ptr)
struct e100_serial *info;
int i;
-#ifdef CONFIG_SVINTO_SIM
- return;
-#endif
-
for (i = 0; i < NR_PORTS; i++) {
info = rs_table + i;
if (info->uses_dma_in)
@@ -2729,25 +2658,6 @@ startup(struct e100_serial * info)
printk("starting up ttyS%d (xmit_buf 0x%p)...\n", info->line, info->xmit.buf);
#endif
-#ifdef CONFIG_SVINTO_SIM
- /* Bits and pieces collected from below. Better to have them
- in one ifdef:ed clause than to mix in a lot of ifdefs,
- right? */
- if (info->port.tty)
- clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
-
- info->xmit.head = info->xmit.tail = 0;
- info->first_recv_buffer = info->last_recv_buffer = NULL;
- info->recv_cnt = info->max_recv_cnt = 0;
-
- for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
- info->rec_descr[i].buf = NULL;
-
- /* No real action in the simulator, but may set info important
- to ioctl. */
- change_speed(info);
-#else
-
/*
* Clear the FIFO buffers and disable them
* (they will be reenabled in change_speed())
@@ -2837,8 +2747,6 @@ startup(struct e100_serial * info)
e100_rts(info, 1);
e100_dtr(info, 1);
-#endif /* CONFIG_SVINTO_SIM */
-
info->port.flags |= ASYNC_INITIALIZED;
local_irq_restore(flags);
@@ -2857,7 +2765,6 @@ shutdown(struct e100_serial * info)
struct etrax_recv_buffer *buffer;
int i;
-#ifndef CONFIG_SVINTO_SIM
/* shut down the transmitter and receiver */
DFLOW(DEBUG_LOG(info->line, "shutdown %i\n", info->line));
e100_disable_rx(info);
@@ -2882,8 +2789,6 @@ shutdown(struct e100_serial * info)
info->tr_running = 0;
}
-#endif /* CONFIG_SVINTO_SIM */
-
if (!(info->port.flags & ASYNC_INITIALIZED))
return;
@@ -2995,17 +2900,12 @@ change_speed(struct e100_serial *info)
IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal);
r_alt_ser_baudrate_shadow &= ~mask;
r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8));
-#ifndef CONFIG_SVINTO_SIM
*R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow;
-#endif /* CONFIG_SVINTO_SIM */
info->baud = cflag_to_baud(cflag);
-#ifndef CONFIG_SVINTO_SIM
info->ioport[REG_BAUD] = cflag_to_etrax_baud(cflag);
-#endif /* CONFIG_SVINTO_SIM */
}
-#ifndef CONFIG_SVINTO_SIM
/* start with default settings and then fill in changes */
local_irq_save(flags);
/* 8 bit, no/even parity */
@@ -3073,7 +2973,6 @@ change_speed(struct e100_serial *info)
*((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
local_irq_restore(flags);
-#endif /* !CONFIG_SVINTO_SIM */
update_char_time(info);
@@ -3122,11 +3021,6 @@ static int rs_raw_write(struct tty_struct *tty,
count, info->ioport[REG_STATUS]);
#endif
-#ifdef CONFIG_SVINTO_SIM
- /* Really simple. The output is here and now. */
- SIMCOUT(buf, count);
- return count;
-#endif
local_save_flags(flags);
DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
@@ -3463,7 +3357,6 @@ static int
get_lsr_info(struct e100_serial * info, unsigned int *value)
{
unsigned int result = TIOCSER_TEMT;
-#ifndef CONFIG_SVINTO_SIM
unsigned long curr_time = jiffies;
unsigned long curr_time_usec = GET_JIFFIES_USEC();
unsigned long elapsed_usec =
@@ -3474,7 +3367,6 @@ get_lsr_info(struct e100_serial * info, unsigned int *value)
elapsed_usec < 2*info->char_time_usec) {
result = 0;
}
-#endif
if (copy_to_user(value, &result, sizeof(int)))
return -EFAULT;
@@ -3804,7 +3696,6 @@ rs_close(struct tty_struct *tty, struct file * filp)
e100_disable_serial_data_irq(info);
#endif
-#ifndef CONFIG_SVINTO_SIM
e100_disable_rx(info);
e100_disable_rx_irq(info);
@@ -3816,7 +3707,6 @@ rs_close(struct tty_struct *tty, struct file * filp)
*/
rs_wait_until_sent(tty, HZ);
}
-#endif
shutdown(info);
rs_flush_buffer(tty);
@@ -4479,7 +4369,6 @@ static int __init rs_init(void)
fast_timer_init();
#endif
-#ifndef CONFIG_SVINTO_SIM
#ifndef CONFIG_ETRAX_KGDB
/* Not needed in simulator. May only complicate stuff. */
/* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */
@@ -4489,7 +4378,6 @@ static int __init rs_init(void)
panic("%s: Failed to request irq8", __func__);
#endif
-#endif /* CONFIG_SVINTO_SIM */
return 0;
}
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 0eb5b5673ede..028582e924a5 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -671,7 +671,10 @@ static int efm32_uart_probe_dt(struct platform_device *pdev,
if (!np)
return 1;
- ret = of_property_read_u32(np, "location", &location);
+ ret = of_property_read_u32(np, "efm32,location", &location);
+ if (ret)
+ /* fall back to old and (wrongly) generic property "location" */
+ ret = of_property_read_u32(np, "location", &location);
if (!ret) {
if (location > 5) {
dev_err(&pdev->dev, "invalid location\n");
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 8978dc9a58b7..c5eb897de9de 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -13,14 +13,19 @@
#define SUPPORT_SYSRQ
#endif
-#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/clk.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/console.h>
+#include <linux/of_dma.h>
#include <linux/serial_core.h>
+#include <linux/slab.h>
#include <linux/tty_flip.h>
/* All registers are 8-bit width */
@@ -112,6 +117,10 @@
#define UARTSFIFO_TXOF 0x02
#define UARTSFIFO_RXUF 0x01
+#define DMA_MAXBURST 16
+#define DMA_MAXBURST_MASK (DMA_MAXBURST - 1)
+#define FSL_UART_RX_DMA_BUFFER_SIZE 64
+
#define DRIVER_NAME "fsl-lpuart"
#define DEV_NAME "ttyLP"
#define UART_NR 6
@@ -121,6 +130,24 @@ struct lpuart_port {
struct clk *clk;
unsigned int txfifo_size;
unsigned int rxfifo_size;
+
+ bool lpuart_dma_use;
+ struct dma_chan *dma_tx_chan;
+ struct dma_chan *dma_rx_chan;
+ struct dma_async_tx_descriptor *dma_tx_desc;
+ struct dma_async_tx_descriptor *dma_rx_desc;
+ dma_addr_t dma_tx_buf_bus;
+ dma_addr_t dma_rx_buf_bus;
+ dma_cookie_t dma_tx_cookie;
+ dma_cookie_t dma_rx_cookie;
+ unsigned char *dma_tx_buf_virt;
+ unsigned char *dma_rx_buf_virt;
+ unsigned int dma_tx_bytes;
+ unsigned int dma_rx_bytes;
+ int dma_tx_in_progress;
+ int dma_rx_in_progress;
+ unsigned int dma_rx_timeout;
+ struct timer_list lpuart_timer;
};
static struct of_device_id lpuart_dt_ids[] = {
@@ -131,6 +158,10 @@ static struct of_device_id lpuart_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
+/* Forward declare this for the dma callbacks*/
+static void lpuart_dma_tx_complete(void *arg);
+static void lpuart_dma_rx_complete(void *arg);
+
static void lpuart_stop_tx(struct uart_port *port)
{
unsigned char temp;
@@ -152,6 +183,210 @@ static void lpuart_enable_ms(struct uart_port *port)
{
}
+static void lpuart_copy_rx_to_tty(struct lpuart_port *sport,
+ struct tty_port *tty, int count)
+{
+ int copied;
+
+ sport->port.icount.rx += count;
+
+ if (!tty) {
+ dev_err(sport->port.dev, "No tty port\n");
+ return;
+ }
+
+ dma_sync_single_for_cpu(sport->port.dev, sport->dma_rx_buf_bus,
+ FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ copied = tty_insert_flip_string(tty,
+ ((unsigned char *)(sport->dma_rx_buf_virt)), count);
+
+ if (copied != count) {
+ WARN_ON(1);
+ dev_err(sport->port.dev, "RxData copy to tty layer failed\n");
+ }
+
+ dma_sync_single_for_device(sport->port.dev, sport->dma_rx_buf_bus,
+ FSL_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+}
+
+static void lpuart_pio_tx(struct lpuart_port *sport)
+{
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ while (!uart_circ_empty(xmit) &&
+ readb(sport->port.membase + UARTTCFIFO) < sport->txfifo_size) {
+ writeb(xmit->buf[xmit->tail], sport->port.membase + UARTDR);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ sport->port.icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+
+ if (uart_circ_empty(xmit))
+ writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_TDMAS,
+ sport->port.membase + UARTCR5);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static int lpuart_dma_tx(struct lpuart_port *sport, unsigned long count)
+{
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ dma_addr_t tx_bus_addr;
+
+ dma_sync_single_for_device(sport->port.dev, sport->dma_tx_buf_bus,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ sport->dma_tx_bytes = count & ~(DMA_MAXBURST_MASK);
+ tx_bus_addr = sport->dma_tx_buf_bus + xmit->tail;
+ sport->dma_tx_desc = dmaengine_prep_slave_single(sport->dma_tx_chan,
+ tx_bus_addr, sport->dma_tx_bytes,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+
+ if (!sport->dma_tx_desc) {
+ dev_err(sport->port.dev, "Not able to get desc for tx\n");
+ return -EIO;
+ }
+
+ sport->dma_tx_desc->callback = lpuart_dma_tx_complete;
+ sport->dma_tx_desc->callback_param = sport;
+ sport->dma_tx_in_progress = 1;
+ sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
+ dma_async_issue_pending(sport->dma_tx_chan);
+
+ return 0;
+}
+
+static void lpuart_prepare_tx(struct lpuart_port *sport)
+{
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ unsigned long count = CIRC_CNT_TO_END(xmit->head,
+ xmit->tail, UART_XMIT_SIZE);
+
+ if (!count)
+ return;
+
+ if (count < DMA_MAXBURST)
+ writeb(readb(sport->port.membase + UARTCR5) & ~UARTCR5_TDMAS,
+ sport->port.membase + UARTCR5);
+ else {
+ writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_TDMAS,
+ sport->port.membase + UARTCR5);
+ lpuart_dma_tx(sport, count);
+ }
+}
+
+static void lpuart_dma_tx_complete(void *arg)
+{
+ struct lpuart_port *sport = arg;
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ unsigned long flags;
+
+ async_tx_ack(sport->dma_tx_desc);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1);
+ sport->dma_tx_in_progress = 0;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+
+ lpuart_prepare_tx(sport);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static int lpuart_dma_rx(struct lpuart_port *sport)
+{
+ dma_sync_single_for_device(sport->port.dev, sport->dma_rx_buf_bus,
+ FSL_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+ sport->dma_rx_desc = dmaengine_prep_slave_single(sport->dma_rx_chan,
+ sport->dma_rx_buf_bus, FSL_UART_RX_DMA_BUFFER_SIZE,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+
+ if (!sport->dma_rx_desc) {
+ dev_err(sport->port.dev, "Not able to get desc for rx\n");
+ return -EIO;
+ }
+
+ sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
+ sport->dma_rx_desc->callback_param = sport;
+ sport->dma_rx_in_progress = 1;
+ sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
+ dma_async_issue_pending(sport->dma_rx_chan);
+
+ return 0;
+}
+
+static void lpuart_dma_rx_complete(void *arg)
+{
+ struct lpuart_port *sport = arg;
+ struct tty_port *port = &sport->port.state->port;
+ unsigned long flags;
+
+ async_tx_ack(sport->dma_rx_desc);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ sport->dma_rx_in_progress = 0;
+ lpuart_copy_rx_to_tty(sport, port, FSL_UART_RX_DMA_BUFFER_SIZE);
+ tty_flip_buffer_push(port);
+ lpuart_dma_rx(sport);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static void lpuart_timer_func(unsigned long data)
+{
+ struct lpuart_port *sport = (struct lpuart_port *)data;
+ struct tty_port *port = &sport->port.state->port;
+ struct dma_tx_state state;
+ unsigned long flags;
+ unsigned char temp;
+ int count;
+
+ del_timer(&sport->lpuart_timer);
+ dmaengine_pause(sport->dma_rx_chan);
+ dmaengine_tx_status(sport->dma_rx_chan, sport->dma_rx_cookie, &state);
+ dmaengine_terminate_all(sport->dma_rx_chan);
+ count = FSL_UART_RX_DMA_BUFFER_SIZE - state.residue;
+ async_tx_ack(sport->dma_rx_desc);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ sport->dma_rx_in_progress = 0;
+ lpuart_copy_rx_to_tty(sport, port, count);
+ tty_flip_buffer_push(port);
+ temp = readb(sport->port.membase + UARTCR5);
+ writeb(temp & ~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static inline void lpuart_prepare_rx(struct lpuart_port *sport)
+{
+ unsigned long flags;
+ unsigned char temp;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ init_timer(&sport->lpuart_timer);
+ sport->lpuart_timer.function = lpuart_timer_func;
+ sport->lpuart_timer.data = (unsigned long)sport;
+ sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
+ add_timer(&sport->lpuart_timer);
+
+ lpuart_dma_rx(sport);
+ temp = readb(sport->port.membase + UARTCR5);
+ writeb(temp | UARTCR5_RDMAS, sport->port.membase + UARTCR5);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
@@ -172,14 +407,21 @@ static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
static void lpuart_start_tx(struct uart_port *port)
{
- struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct circ_buf *xmit = &sport->port.state->xmit;
unsigned char temp;
temp = readb(port->membase + UARTCR2);
writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
- if (readb(port->membase + UARTSR1) & UARTSR1_TDRE)
- lpuart_transmit_buffer(sport);
+ if (sport->lpuart_dma_use) {
+ if (!uart_circ_empty(xmit) && !sport->dma_tx_in_progress)
+ lpuart_prepare_tx(sport);
+ } else {
+ if (readb(port->membase + UARTSR1) & UARTSR1_TDRE)
+ lpuart_transmit_buffer(sport);
+ }
}
static irqreturn_t lpuart_txint(int irq, void *dev_id)
@@ -279,12 +521,19 @@ static irqreturn_t lpuart_int(int irq, void *dev_id)
sts = readb(sport->port.membase + UARTSR1);
- if (sts & UARTSR1_RDRF)
- lpuart_rxint(irq, dev_id);
-
+ if (sts & UARTSR1_RDRF) {
+ if (sport->lpuart_dma_use)
+ lpuart_prepare_rx(sport);
+ else
+ lpuart_rxint(irq, dev_id);
+ }
if (sts & UARTSR1_TDRE &&
- !(readb(sport->port.membase + UARTCR5) & UARTCR5_TDMAS))
- lpuart_txint(irq, dev_id);
+ !(readb(sport->port.membase + UARTCR5) & UARTCR5_TDMAS)) {
+ if (sport->lpuart_dma_use)
+ lpuart_pio_tx(sport);
+ else
+ lpuart_txint(irq, dev_id);
+ }
return IRQ_HANDLED;
}
@@ -366,13 +615,156 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
sport->port.membase + UARTCFIFO);
- writeb(2, sport->port.membase + UARTTWFIFO);
+ writeb(0, sport->port.membase + UARTTWFIFO);
writeb(1, sport->port.membase + UARTRWFIFO);
/* Restore cr2 */
writeb(cr2_saved, sport->port.membase + UARTCR2);
}
+static int lpuart_dma_tx_request(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct dma_chan *tx_chan;
+ struct dma_slave_config dma_tx_sconfig;
+ dma_addr_t dma_bus;
+ unsigned char *dma_buf;
+ int ret;
+
+ tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
+
+ if (!tx_chan) {
+ dev_err(sport->port.dev, "Dma tx channel request failed!\n");
+ return -ENODEV;
+ }
+
+ dma_bus = dma_map_single(tx_chan->device->dev,
+ sport->port.state->xmit.buf,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(tx_chan->device->dev, dma_bus)) {
+ dev_err(sport->port.dev, "dma_map_single tx failed\n");
+ dma_release_channel(tx_chan);
+ return -ENOMEM;
+ }
+
+ dma_buf = sport->port.state->xmit.buf;
+ dma_tx_sconfig.dst_addr = sport->port.mapbase + UARTDR;
+ dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_tx_sconfig.dst_maxburst = DMA_MAXBURST;
+ dma_tx_sconfig.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(tx_chan, &dma_tx_sconfig);
+
+ if (ret < 0) {
+ dev_err(sport->port.dev,
+ "Dma slave config failed, err = %d\n", ret);
+ dma_release_channel(tx_chan);
+ return ret;
+ }
+
+ sport->dma_tx_chan = tx_chan;
+ sport->dma_tx_buf_virt = dma_buf;
+ sport->dma_tx_buf_bus = dma_bus;
+ sport->dma_tx_in_progress = 0;
+
+ return 0;
+}
+
+static int lpuart_dma_rx_request(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct dma_chan *rx_chan;
+ struct dma_slave_config dma_rx_sconfig;
+ dma_addr_t dma_bus;
+ unsigned char *dma_buf;
+ int ret;
+
+ rx_chan = dma_request_slave_channel(sport->port.dev, "rx");
+
+ if (!rx_chan) {
+ dev_err(sport->port.dev, "Dma rx channel request failed!\n");
+ return -ENODEV;
+ }
+
+ dma_buf = devm_kzalloc(sport->port.dev,
+ FSL_UART_RX_DMA_BUFFER_SIZE, GFP_KERNEL);
+
+ if (!dma_buf) {
+ dev_err(sport->port.dev, "Dma rx alloc failed\n");
+ dma_release_channel(rx_chan);
+ return -ENOMEM;
+ }
+
+ dma_bus = dma_map_single(rx_chan->device->dev, dma_buf,
+ FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(rx_chan->device->dev, dma_bus)) {
+ dev_err(sport->port.dev, "dma_map_single rx failed\n");
+ dma_release_channel(rx_chan);
+ return -ENOMEM;
+ }
+
+ dma_rx_sconfig.src_addr = sport->port.mapbase + UARTDR;
+ dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_rx_sconfig.src_maxburst = 1;
+ dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(rx_chan, &dma_rx_sconfig);
+
+ if (ret < 0) {
+ dev_err(sport->port.dev,
+ "Dma slave config failed, err = %d\n", ret);
+ dma_release_channel(rx_chan);
+ return ret;
+ }
+
+ sport->dma_rx_chan = rx_chan;
+ sport->dma_rx_buf_virt = dma_buf;
+ sport->dma_rx_buf_bus = dma_bus;
+ sport->dma_rx_in_progress = 0;
+
+ sport->dma_rx_timeout = (sport->port.timeout - HZ / 50) *
+ FSL_UART_RX_DMA_BUFFER_SIZE * 3 /
+ sport->rxfifo_size / 2;
+
+ if (sport->dma_rx_timeout < msecs_to_jiffies(20))
+ sport->dma_rx_timeout = msecs_to_jiffies(20);
+
+ return 0;
+}
+
+static void lpuart_dma_tx_free(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct dma_chan *dma_chan;
+
+ dma_unmap_single(sport->port.dev, sport->dma_tx_buf_bus,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ dma_chan = sport->dma_tx_chan;
+ sport->dma_tx_chan = NULL;
+ sport->dma_tx_buf_bus = 0;
+ sport->dma_tx_buf_virt = NULL;
+ dma_release_channel(dma_chan);
+}
+
+static void lpuart_dma_rx_free(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ struct dma_chan *dma_chan;
+
+ dma_unmap_single(sport->port.dev, sport->dma_rx_buf_bus,
+ FSL_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+
+ dma_chan = sport->dma_rx_chan;
+ sport->dma_rx_chan = NULL;
+ sport->dma_rx_buf_bus = 0;
+ sport->dma_rx_buf_virt = NULL;
+ dma_release_channel(dma_chan);
+}
+
static int lpuart_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
@@ -380,6 +772,15 @@ static int lpuart_startup(struct uart_port *port)
unsigned long flags;
unsigned char temp;
+ /*whether use dma support by dma request results*/
+ if (lpuart_dma_tx_request(port) || lpuart_dma_rx_request(port)) {
+ sport->lpuart_dma_use = false;
+ } else {
+ sport->lpuart_dma_use = true;
+ temp = readb(port->membase + UARTCR5);
+ writeb(temp | UARTCR5_TDMAS, port->membase + UARTCR5);
+ }
+
ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0,
DRIVER_NAME, sport);
if (ret)
@@ -414,6 +815,11 @@ static void lpuart_shutdown(struct uart_port *port)
spin_unlock_irqrestore(&port->lock, flags);
devm_free_irq(port->dev, port->irq, sport);
+
+ if (sport->lpuart_dma_use) {
+ lpuart_dma_tx_free(port);
+ lpuart_dma_rx_free(port);
+ }
}
static void
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index d98e43348970..67423805e6d9 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -455,11 +455,11 @@ static void load_code(struct icom_port *icom_port)
for (index = 0; index < fw->size; index++)
new_page[index] = fw->data[index];
- release_firmware(fw);
-
writeb((char) ((fw->size + 16)/16), &icom_port->dram->mac_length);
writel(temp_pci, &icom_port->dram->mac_load_addr);
+ release_firmware(fw);
+
/*Setting the syncReg to 0x80 causes adapter to start downloading
the personality code into adapter instruction RAM.
Once code is loaded, it will begin executing and, based on
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index d799140e53b6..3b6c1a2e25de 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -496,8 +496,7 @@ static void dma_tx_callback(void *data)
dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&sport->port);
+ uart_write_wakeup(&sport->port);
if (waitqueue_active(&sport->dma_wait)) {
wake_up(&sport->dma_wait);
@@ -1117,25 +1116,25 @@ static int imx_startup(struct uart_port *port)
*/
if (sport->txirq > 0) {
retval = request_irq(sport->rxirq, imx_rxint, 0,
- DRIVER_NAME, sport);
+ dev_name(port->dev), sport);
if (retval)
goto error_out1;
retval = request_irq(sport->txirq, imx_txint, 0,
- DRIVER_NAME, sport);
+ dev_name(port->dev), sport);
if (retval)
goto error_out2;
/* do not use RTS IRQ on IrDA */
if (!USE_IRDA(sport)) {
retval = request_irq(sport->rtsirq, imx_rtsint, 0,
- DRIVER_NAME, sport);
+ dev_name(port->dev), sport);
if (retval)
goto error_out3;
}
} else {
retval = request_irq(sport->port.irq, imx_int, 0,
- DRIVER_NAME, sport);
+ dev_name(port->dev), sport);
if (retval) {
free_irq(sport->port.irq, sport);
goto error_out1;
@@ -1470,44 +1469,13 @@ static const char *imx_type(struct uart_port *port)
}
/*
- * Release the memory region(s) being used by 'port'.
- */
-static void imx_release_port(struct uart_port *port)
-{
- struct platform_device *pdev = to_platform_device(port->dev);
- struct resource *mmres;
-
- mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mmres->start, resource_size(mmres));
-}
-
-/*
- * Request the memory region(s) being used by 'port'.
- */
-static int imx_request_port(struct uart_port *port)
-{
- struct platform_device *pdev = to_platform_device(port->dev);
- struct resource *mmres;
- void *ret;
-
- mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mmres)
- return -ENODEV;
-
- ret = request_mem_region(mmres->start, resource_size(mmres), "imx-uart");
-
- return ret ? 0 : -EBUSY;
-}
-
-/*
* Configure/autoconfigure the port.
*/
static void imx_config_port(struct uart_port *port, int flags)
{
struct imx_port *sport = (struct imx_port *)port;
- if (flags & UART_CONFIG_TYPE &&
- imx_request_port(&sport->port) == 0)
+ if (flags & UART_CONFIG_TYPE)
sport->port.type = PORT_IMX;
}
@@ -1617,8 +1585,6 @@ static struct uart_ops imx_pops = {
.flush_buffer = imx_flush_buffer,
.set_termios = imx_set_termios,
.type = imx_type,
- .release_port = imx_release_port,
- .request_port = imx_request_port,
.config_port = imx_config_port,
.verify_port = imx_verify_port,
#if defined(CONFIG_CONSOLE_POLL)
@@ -1935,7 +1901,6 @@ static void serial_imx_probe_pdata(struct imx_port *sport,
static int serial_imx_probe(struct platform_device *pdev)
{
struct imx_port *sport;
- struct imxuart_platform_data *pdata;
void __iomem *base;
int ret = 0;
struct resource *res;
@@ -1951,12 +1916,9 @@ static int serial_imx_probe(struct platform_device *pdev)
return ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- base = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE);
- if (!base)
- return -ENOMEM;
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
sport->port.dev = &pdev->dev;
sport->port.mapbase = res->start;
@@ -1992,38 +1954,16 @@ static int serial_imx_probe(struct platform_device *pdev)
imx_ports[sport->port.line] = sport;
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata && pdata->init) {
- ret = pdata->init(pdev);
- if (ret)
- return ret;
- }
-
- ret = uart_add_one_port(&imx_reg, &sport->port);
- if (ret)
- goto deinit;
platform_set_drvdata(pdev, sport);
- return 0;
-deinit:
- if (pdata && pdata->exit)
- pdata->exit(pdev);
- return ret;
+ return uart_add_one_port(&imx_reg, &sport->port);
}
static int serial_imx_remove(struct platform_device *pdev)
{
- struct imxuart_platform_data *pdata;
struct imx_port *sport = platform_get_drvdata(pdev);
- pdata = dev_get_platdata(&pdev->dev);
-
- uart_remove_one_port(&imx_reg, &sport->port);
-
- if (pdata && pdata->exit)
- pdata->exit(pdev);
-
- return 0;
+ return uart_remove_one_port(&imx_reg, &sport->port);
}
static struct platform_driver serial_imx_driver = {
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 8d71e4047bb3..2a99d0c61b9e 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1,7 +1,7 @@
/*
* Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver
*
- * Copyright (C) 2012-2013 Alexander Shiyan <shc_work@mail.ru>
+ * Copyright (C) 2012-2014 Alexander Shiyan <shc_work@mail.ru>
*
* Based on max3100.c, by Christian Pellegrin <chripell@evolware.org>
* Based on max3110.c, by Feng Tang <feng.tang@intel.com>
@@ -13,19 +13,21 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/regmap.h>
-#include <linux/gpio.h>
#include <linux/spi/spi.h>
-
-#include <linux/platform_data/max310x.h>
+#include <linux/uaccess.h>
#define MAX310X_NAME "max310x"
#define MAX310X_MAJOR 204
@@ -161,10 +163,6 @@
/* IRDA register bits */
#define MAX310X_IRDA_IRDAEN_BIT (1 << 0) /* IRDA mode enable */
#define MAX310X_IRDA_SIR_BIT (1 << 1) /* SIR mode enable */
-#define MAX310X_IRDA_SHORTIR_BIT (1 << 2) /* Short SIR mode enable */
-#define MAX310X_IRDA_MIR_BIT (1 << 3) /* MIR mode enable */
-#define MAX310X_IRDA_RXINV_BIT (1 << 4) /* RX logic inversion enable */
-#define MAX310X_IRDA_TXINV_BIT (1 << 5) /* TX logic inversion enable */
/* Flow control trigger level register masks */
#define MAX310X_FLOWLVL_HALT_MASK (0x000f) /* Flow control halt level */
@@ -220,26 +218,6 @@
* XOFF2
*/
-/* GPIO configuration register bits */
-#define MAX310X_GPIOCFG_GP0OUT_BIT (1 << 0) /* GPIO 0 output enable */
-#define MAX310X_GPIOCFG_GP1OUT_BIT (1 << 1) /* GPIO 1 output enable */
-#define MAX310X_GPIOCFG_GP2OUT_BIT (1 << 2) /* GPIO 2 output enable */
-#define MAX310X_GPIOCFG_GP3OUT_BIT (1 << 3) /* GPIO 3 output enable */
-#define MAX310X_GPIOCFG_GP0OD_BIT (1 << 4) /* GPIO 0 open-drain enable */
-#define MAX310X_GPIOCFG_GP1OD_BIT (1 << 5) /* GPIO 1 open-drain enable */
-#define MAX310X_GPIOCFG_GP2OD_BIT (1 << 6) /* GPIO 2 open-drain enable */
-#define MAX310X_GPIOCFG_GP3OD_BIT (1 << 7) /* GPIO 3 open-drain enable */
-
-/* GPIO DATA register bits */
-#define MAX310X_GPIODATA_GP0OUT_BIT (1 << 0) /* GPIO 0 output value */
-#define MAX310X_GPIODATA_GP1OUT_BIT (1 << 1) /* GPIO 1 output value */
-#define MAX310X_GPIODATA_GP2OUT_BIT (1 << 2) /* GPIO 2 output value */
-#define MAX310X_GPIODATA_GP3OUT_BIT (1 << 3) /* GPIO 3 output value */
-#define MAX310X_GPIODATA_GP0IN_BIT (1 << 4) /* GPIO 0 input value */
-#define MAX310X_GPIODATA_GP1IN_BIT (1 << 5) /* GPIO 1 input value */
-#define MAX310X_GPIODATA_GP2IN_BIT (1 << 6) /* GPIO 2 input value */
-#define MAX310X_GPIODATA_GP3IN_BIT (1 << 7) /* GPIO 3 input value */
-
/* PLL configuration register masks */
#define MAX310X_PLLCFG_PREDIV_MASK (0x3f) /* PLL predivision value */
#define MAX310X_PLLCFG_PLLFACTOR_MASK (0xc0) /* PLL multiplication factor */
@@ -283,16 +261,15 @@ struct max310x_devtype {
struct max310x_one {
struct uart_port port;
struct work_struct tx_work;
+ struct work_struct md_work;
};
struct max310x_port {
struct uart_driver uart;
struct max310x_devtype *devtype;
struct regmap *regmap;
- struct regmap_config regcfg;
struct mutex mutex;
- struct max310x_pdata *pdata;
- int gpio_used;
+ struct clk *clk;
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio;
#endif
@@ -504,25 +481,33 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
return false;
}
-static void max310x_set_baud(struct uart_port *port, int baud)
+static int max310x_set_baud(struct uart_port *port, int baud)
{
- unsigned int mode = 0, div = port->uartclk / baud;
+ unsigned int mode = 0, clk = port->uartclk, div = clk / baud;
- if (!(div / 16)) {
+ /* Check for minimal value for divider */
+ if (div < 16)
+ div = 16;
+
+ if (clk % baud && (div / 16) < 0x8000) {
/* Mode x2 */
mode = MAX310X_BRGCFG_2XMODE_BIT;
- div = (port->uartclk * 2) / baud;
- }
-
- if (!(div / 16)) {
- /* Mode x4 */
- mode = MAX310X_BRGCFG_4XMODE_BIT;
- div = (port->uartclk * 4) / baud;
+ clk = port->uartclk * 2;
+ div = clk / baud;
+
+ if (clk % baud && (div / 16) < 0x8000) {
+ /* Mode x4 */
+ mode = MAX310X_BRGCFG_4XMODE_BIT;
+ clk = port->uartclk * 4;
+ div = clk / baud;
+ }
}
max310x_port_write(port, MAX310X_BRGDIVMSB_REG, (div / 16) >> 8);
max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div / 16);
max310x_port_write(port, MAX310X_BRGCFG_REG, (div % 16) | mode);
+
+ return DIV_ROUND_CLOSEST(clk, div);
}
static int max310x_update_best_err(unsigned long f, long *besterr)
@@ -538,18 +523,19 @@ static int max310x_update_best_err(unsigned long f, long *besterr)
return 1;
}
-static int max310x_set_ref_clk(struct max310x_port *s)
+static int max310x_set_ref_clk(struct max310x_port *s, unsigned long freq,
+ bool xtal)
{
unsigned int div, clksrc, pllcfg = 0;
long besterr = -1;
- unsigned long fdiv, fmul, bestfreq = s->pdata->frequency;
+ unsigned long fdiv, fmul, bestfreq = freq;
/* First, update error without PLL */
- max310x_update_best_err(s->pdata->frequency, &besterr);
+ max310x_update_best_err(freq, &besterr);
/* Try all possible PLL dividers */
for (div = 1; (div <= 63) && besterr; div++) {
- fdiv = DIV_ROUND_CLOSEST(s->pdata->frequency, div);
+ fdiv = DIV_ROUND_CLOSEST(freq, div);
/* Try multiplier 6 */
fmul = fdiv * 6;
@@ -582,10 +568,7 @@ static int max310x_set_ref_clk(struct max310x_port *s)
}
/* Configure clock source */
- if (s->pdata->driver_flags & MAX310X_EXT_CLK)
- clksrc = MAX310X_CLKSRC_EXTCLK_BIT;
- else
- clksrc = MAX310X_CLKSRC_CRYST_BIT;
+ clksrc = xtal ? MAX310X_CLKSRC_CRYST_BIT : MAX310X_CLKSRC_EXTCLK_BIT;
/* Configure PLL */
if (pllcfg) {
@@ -597,7 +580,7 @@ static int max310x_set_ref_clk(struct max310x_port *s)
regmap_write(s->regmap, MAX310X_CLKSRC_REG, clksrc);
/* Wait for crystal */
- if (pllcfg && !(s->pdata->driver_flags & MAX310X_EXT_CLK))
+ if (pllcfg && xtal)
msleep(10);
return (int)bestfreq;
@@ -782,11 +765,21 @@ static unsigned int max310x_get_mctrl(struct uart_port *port)
return TIOCM_DSR | TIOCM_CAR;
}
+static void max310x_md_proc(struct work_struct *ws)
+{
+ struct max310x_one *one = container_of(ws, struct max310x_one, md_work);
+
+ max310x_port_update(&one->port, MAX310X_MODE2_REG,
+ MAX310X_MODE2_LOOPBACK_BIT,
+ (one->port.mctrl & TIOCM_LOOP) ?
+ MAX310X_MODE2_LOOPBACK_BIT : 0);
+}
+
static void max310x_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- /* DCD and DSR are not wired and CTS/RTS is hadnled automatically
- * so do nothing
- */
+ struct max310x_one *one = container_of(port, struct max310x_one, port);
+
+ schedule_work(&one->md_work);
}
static void max310x_break_ctl(struct uart_port *port, int break_state)
@@ -875,40 +868,76 @@ static void max310x_set_termios(struct uart_port *port,
port->uartclk / 4);
/* Setup baudrate generator */
- max310x_set_baud(port, baud);
+ baud = max310x_set_baud(port, baud);
/* Update timeout according to new baud rate */
uart_update_timeout(port, termios->c_cflag, baud);
}
+static int max310x_ioctl(struct uart_port *port, unsigned int cmd,
+ unsigned long arg)
+{
+#if defined(TIOCSRS485) && defined(TIOCGRS485)
+ struct serial_rs485 rs485;
+ unsigned int val;
+
+ switch (cmd) {
+ case TIOCSRS485:
+ if (copy_from_user(&rs485, (void __user *)arg, sizeof(rs485)))
+ return -EFAULT;
+ if (rs485.delay_rts_before_send > 0x0f ||
+ rs485.delay_rts_after_send > 0x0f)
+ return -ERANGE;
+ val = (rs485.delay_rts_before_send << 4) |
+ rs485.delay_rts_after_send;
+ max310x_port_write(port, MAX310X_HDPIXDELAY_REG, val);
+ if (rs485.flags & SER_RS485_ENABLED) {
+ max310x_port_update(port, MAX310X_MODE1_REG,
+ MAX310X_MODE1_TRNSCVCTRL_BIT,
+ MAX310X_MODE1_TRNSCVCTRL_BIT);
+ max310x_port_update(port, MAX310X_MODE2_REG,
+ MAX310X_MODE2_ECHOSUPR_BIT,
+ MAX310X_MODE2_ECHOSUPR_BIT);
+ } else {
+ max310x_port_update(port, MAX310X_MODE1_REG,
+ MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
+ max310x_port_update(port, MAX310X_MODE2_REG,
+ MAX310X_MODE2_ECHOSUPR_BIT, 0);
+ }
+ return 0;
+ case TIOCGRS485:
+ memset(&rs485, 0, sizeof(rs485));
+ val = max310x_port_read(port, MAX310X_MODE1_REG);
+ rs485.flags = (val & MAX310X_MODE1_TRNSCVCTRL_BIT) ?
+ SER_RS485_ENABLED : 0;
+ rs485.flags |= SER_RS485_RTS_ON_SEND;
+ val = max310x_port_read(port, MAX310X_HDPIXDELAY_REG);
+ rs485.delay_rts_before_send = val >> 4;
+ rs485.delay_rts_after_send = val & 0x0f;
+ if (copy_to_user((void __user *)arg, &rs485, sizeof(rs485)))
+ return -EFAULT;
+ return 0;
+ default:
+ break;
+ }
+#endif
+
+ return -ENOIOCTLCMD;
+}
+
static int max310x_startup(struct uart_port *port)
{
- unsigned int val, line = port->line;
struct max310x_port *s = dev_get_drvdata(port->dev);
+ unsigned int val;
s->devtype->power(port, 1);
- /* Configure baud rate, 9600 as default */
- max310x_set_baud(port, 9600);
-
- /* Configure LCR register, 8N1 mode by default */
- max310x_port_write(port, MAX310X_LCR_REG, MAX310X_LCR_WORD_LEN_8);
-
/* Configure MODE1 register */
max310x_port_update(port, MAX310X_MODE1_REG,
- MAX310X_MODE1_TRNSCVCTRL_BIT,
- (s->pdata->uart_flags[line] & MAX310X_AUTO_DIR_CTRL)
- ? MAX310X_MODE1_TRNSCVCTRL_BIT : 0);
-
- /* Configure MODE2 register */
- val = MAX310X_MODE2_RXEMPTINV_BIT;
- if (s->pdata->uart_flags[line] & MAX310X_LOOPBACK)
- val |= MAX310X_MODE2_LOOPBACK_BIT;
- if (s->pdata->uart_flags[line] & MAX310X_ECHO_SUPRESS)
- val |= MAX310X_MODE2_ECHOSUPR_BIT;
-
- /* Reset FIFOs */
- val |= MAX310X_MODE2_FIFORST_BIT;
+ MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
+
+ /* Configure MODE2 register & Reset FIFOs*/
+ val = MAX310X_MODE2_RXEMPTINV_BIT | MAX310X_MODE2_FIFORST_BIT;
max310x_port_write(port, MAX310X_MODE2_REG, val);
max310x_port_update(port, MAX310X_MODE2_REG,
MAX310X_MODE2_FIFORST_BIT, 0);
@@ -989,6 +1018,7 @@ static const struct uart_ops max310x_ops = {
.release_port = max310x_null_void,
.config_port = max310x_config_port,
.verify_port = max310x_verify_port,
+ .ioctl = max310x_ioctl,
};
static int __maybe_unused max310x_suspend(struct device *dev)
@@ -1017,6 +1047,8 @@ static int __maybe_unused max310x_resume(struct device *dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(max310x_pm_ops, max310x_suspend, max310x_resume);
+
#ifdef CONFIG_GPIOLIB
static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
@@ -1063,23 +1095,16 @@ static int max310x_gpio_direction_output(struct gpio_chip *chip,
}
#endif
-static int max310x_probe(struct device *dev, int is_spi,
- struct max310x_devtype *devtype, int irq)
+static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
+ struct regmap *regmap, int irq, unsigned long flags)
{
+ int i, ret, fmin, fmax, freq, uartclk;
+ struct clk *clk_osc, *clk_xtal;
struct max310x_port *s;
- struct max310x_pdata *pdata = dev_get_platdata(dev);
- int i, ret, uartclk;
-
- /* Check for IRQ */
- if (irq <= 0) {
- dev_err(dev, "No IRQ specified\n");
- return -ENOTSUPP;
- }
+ bool xtal = false;
- if (!pdata) {
- dev_err(dev, "No platform data supplied\n");
- return -EINVAL;
- }
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
/* Alloc port structure */
s = devm_kzalloc(dev, sizeof(*s) +
@@ -1089,52 +1114,44 @@ static int max310x_probe(struct device *dev, int is_spi,
return -ENOMEM;
}
- /* Check input frequency */
- if ((pdata->driver_flags & MAX310X_EXT_CLK) &&
- ((pdata->frequency < 500000) || (pdata->frequency > 35000000)))
- goto err_freq;
- /* Check frequency for quartz */
- if (!(pdata->driver_flags & MAX310X_EXT_CLK) &&
- ((pdata->frequency < 1000000) || (pdata->frequency > 4000000)))
- goto err_freq;
-
- s->pdata = pdata;
- s->devtype = devtype;
- dev_set_drvdata(dev, s);
-
- mutex_init(&s->mutex);
+ clk_osc = devm_clk_get(dev, "osc");
+ clk_xtal = devm_clk_get(dev, "xtal");
+ if (!IS_ERR(clk_osc)) {
+ s->clk = clk_osc;
+ fmin = 500000;
+ fmax = 35000000;
+ } else if (!IS_ERR(clk_xtal)) {
+ s->clk = clk_xtal;
+ fmin = 1000000;
+ fmax = 4000000;
+ xtal = true;
+ } else if (PTR_ERR(clk_osc) == -EPROBE_DEFER ||
+ PTR_ERR(clk_xtal) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else {
+ dev_err(dev, "Cannot get clock\n");
+ return -EINVAL;
+ }
- /* Setup regmap */
- s->regcfg.reg_bits = 8;
- s->regcfg.val_bits = 8;
- s->regcfg.read_flag_mask = 0x00;
- s->regcfg.write_flag_mask = 0x80;
- s->regcfg.cache_type = REGCACHE_RBTREE;
- s->regcfg.writeable_reg = max310x_reg_writeable;
- s->regcfg.volatile_reg = max310x_reg_volatile;
- s->regcfg.precious_reg = max310x_reg_precious;
- s->regcfg.max_register = devtype->nr * 0x20 - 1;
-
- if (IS_ENABLED(CONFIG_SPI_MASTER) && is_spi) {
- struct spi_device *spi = to_spi_device(dev);
-
- s->regmap = devm_regmap_init_spi(spi, &s->regcfg);
- } else
- return -ENOTSUPP;
+ ret = clk_prepare_enable(s->clk);
+ if (ret)
+ return ret;
- if (IS_ERR(s->regmap)) {
- dev_err(dev, "Failed to initialize register map\n");
- return PTR_ERR(s->regmap);
+ freq = clk_get_rate(s->clk);
+ /* Check frequency limits */
+ if (freq < fmin || freq > fmax) {
+ ret = -ERANGE;
+ goto out_clk;
}
- /* Board specific configure */
- if (s->pdata->init)
- s->pdata->init();
+ s->regmap = regmap;
+ s->devtype = devtype;
+ dev_set_drvdata(dev, s);
/* Check device to ensure we are talking to what we expect */
ret = devtype->detect(dev);
if (ret)
- return ret;
+ goto out_clk;
for (i = 0; i < devtype->nr; i++) {
unsigned int offs = i << 5;
@@ -1156,7 +1173,7 @@ static int max310x_probe(struct device *dev, int is_spi,
MAX310X_MODE1_AUTOSLEEP_BIT);
}
- uartclk = max310x_set_ref_clk(s);
+ uartclk = max310x_set_ref_clk(s, freq, xtal);
dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
/* Register UART driver */
@@ -1168,9 +1185,28 @@ static int max310x_probe(struct device *dev, int is_spi,
ret = uart_register_driver(&s->uart);
if (ret) {
dev_err(dev, "Registering UART driver failed\n");
- return ret;
+ goto out_clk;
}
+#ifdef CONFIG_GPIOLIB
+ /* Setup GPIO cotroller */
+ s->gpio.owner = THIS_MODULE;
+ s->gpio.dev = dev;
+ s->gpio.label = dev_name(dev);
+ s->gpio.direction_input = max310x_gpio_direction_input;
+ s->gpio.get = max310x_gpio_get;
+ s->gpio.direction_output= max310x_gpio_direction_output;
+ s->gpio.set = max310x_gpio_set;
+ s->gpio.base = -1;
+ s->gpio.ngpio = devtype->nr * 4;
+ s->gpio.can_sleep = 1;
+ ret = gpiochip_add(&s->gpio);
+ if (ret)
+ goto out_uart;
+#endif
+
+ mutex_init(&s->mutex);
+
for (i = 0; i < devtype->nr; i++) {
/* Initialize port data */
s->p[i].port.line = i;
@@ -1178,8 +1214,7 @@ static int max310x_probe(struct device *dev, int is_spi,
s->p[i].port.irq = irq;
s->p[i].port.type = PORT_MAX310X;
s->p[i].port.fifosize = MAX310X_FIFO_SIZE;
- s->p[i].port.flags = UPF_SKIP_TEST | UPF_FIXED_TYPE |
- UPF_LOW_LATENCY;
+ s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.iobase = i * 0x20;
s->p[i].port.membase = (void __iomem *)~0;
@@ -1195,48 +1230,35 @@ static int max310x_probe(struct device *dev, int is_spi,
MAX310X_MODE1_IRQSEL_BIT);
/* Initialize queue for start TX */
INIT_WORK(&s->p[i].tx_work, max310x_wq_proc);
+ /* Initialize queue for changing mode */
+ INIT_WORK(&s->p[i].md_work, max310x_md_proc);
/* Register port */
uart_add_one_port(&s->uart, &s->p[i].port);
/* Go to suspend mode */
devtype->power(&s->p[i].port, 0);
}
-#ifdef CONFIG_GPIOLIB
- /* Setup GPIO cotroller */
- if (s->pdata->gpio_base) {
- s->gpio.owner = THIS_MODULE;
- s->gpio.dev = dev;
- s->gpio.label = dev_name(dev);
- s->gpio.direction_input = max310x_gpio_direction_input;
- s->gpio.get = max310x_gpio_get;
- s->gpio.direction_output= max310x_gpio_direction_output;
- s->gpio.set = max310x_gpio_set;
- s->gpio.base = s->pdata->gpio_base;
- s->gpio.ngpio = devtype->nr * 4;
- s->gpio.can_sleep = 1;
- if (!gpiochip_add(&s->gpio))
- s->gpio_used = 1;
- } else
- dev_info(dev, "GPIO support not enabled\n");
-#endif
-
/* Setup interrupt */
ret = devm_request_threaded_irq(dev, irq, NULL, max310x_ist,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), s);
- if (ret) {
- dev_err(dev, "Unable to reguest IRQ %i\n", irq);
+ IRQF_ONESHOT | flags, dev_name(dev), s);
+ if (!ret)
+ return 0;
+
+ dev_err(dev, "Unable to reguest IRQ %i\n", irq);
+
+ mutex_destroy(&s->mutex);
+
#ifdef CONFIG_GPIOLIB
- if (s->gpio_used)
- WARN_ON(gpiochip_remove(&s->gpio));
+ WARN_ON(gpiochip_remove(&s->gpio));
+
+out_uart:
#endif
- }
+ uart_unregister_driver(&s->uart);
- return ret;
+out_clk:
+ clk_disable_unprepare(s->clk);
-err_freq:
- dev_err(dev, "Frequency parameter incorrect\n");
- return -EINVAL;
+ return ret;
}
static int max310x_remove(struct device *dev)
@@ -1244,30 +1266,51 @@ static int max310x_remove(struct device *dev)
struct max310x_port *s = dev_get_drvdata(dev);
int i, ret = 0;
+#ifdef CONFIG_GPIOLIB
+ ret = gpiochip_remove(&s->gpio);
+ if (ret)
+ return ret;
+#endif
+
for (i = 0; i < s->uart.nr; i++) {
cancel_work_sync(&s->p[i].tx_work);
+ cancel_work_sync(&s->p[i].md_work);
uart_remove_one_port(&s->uart, &s->p[i].port);
s->devtype->power(&s->p[i].port, 0);
}
+ mutex_destroy(&s->mutex);
uart_unregister_driver(&s->uart);
-
-#ifdef CONFIG_GPIOLIB
- if (s->gpio_used)
- ret = gpiochip_remove(&s->gpio);
-#endif
-
- if (s->pdata->exit)
- s->pdata->exit();
+ clk_disable_unprepare(s->clk);
return ret;
}
+static const struct of_device_id __maybe_unused max310x_dt_ids[] = {
+ { .compatible = "maxim,max3107", .data = &max3107_devtype, },
+ { .compatible = "maxim,max3108", .data = &max3108_devtype, },
+ { .compatible = "maxim,max3109", .data = &max3109_devtype, },
+ { .compatible = "maxim,max14830", .data = &max14830_devtype },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max310x_dt_ids);
+
+static struct regmap_config regcfg = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .write_flag_mask = 0x80,
+ .cache_type = REGCACHE_RBTREE,
+ .writeable_reg = max310x_reg_writeable,
+ .volatile_reg = max310x_reg_volatile,
+ .precious_reg = max310x_reg_precious,
+};
+
#ifdef CONFIG_SPI_MASTER
static int max310x_spi_probe(struct spi_device *spi)
{
- struct max310x_devtype *devtype =
- (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
+ struct max310x_devtype *devtype;
+ unsigned long flags = 0;
+ struct regmap *regmap;
int ret;
/* Setup SPI bus */
@@ -1275,12 +1318,25 @@ static int max310x_spi_probe(struct spi_device *spi)
spi->mode = spi->mode ? : SPI_MODE_0;
spi->max_speed_hz = spi->max_speed_hz ? : 26000000;
ret = spi_setup(spi);
- if (ret) {
- dev_err(&spi->dev, "SPI setup failed\n");
+ if (ret)
return ret;
+
+ if (spi->dev.of_node) {
+ const struct of_device_id *of_id =
+ of_match_device(max310x_dt_ids, &spi->dev);
+
+ devtype = (struct max310x_devtype *)of_id->data;
+ } else {
+ const struct spi_device_id *id_entry = spi_get_device_id(spi);
+
+ devtype = (struct max310x_devtype *)id_entry->driver_data;
+ flags = IRQF_TRIGGER_FALLING;
}
- return max310x_probe(&spi->dev, 1, devtype, spi->irq);
+ regcfg.max_register = devtype->nr * 0x20 - 1;
+ regmap = devm_regmap_init_spi(spi, &regcfg);
+
+ return max310x_probe(&spi->dev, devtype, regmap, spi->irq, flags);
}
static int max310x_spi_remove(struct spi_device *spi)
@@ -1288,8 +1344,6 @@ static int max310x_spi_remove(struct spi_device *spi)
return max310x_remove(&spi->dev);
}
-static SIMPLE_DEV_PM_OPS(max310x_pm_ops, max310x_suspend, max310x_resume);
-
static const struct spi_device_id max310x_id_table[] = {
{ "max3107", (kernel_ulong_t)&max3107_devtype, },
{ "max3108", (kernel_ulong_t)&max3108_devtype, },
@@ -1301,9 +1355,10 @@ MODULE_DEVICE_TABLE(spi, max310x_id_table);
static struct spi_driver max310x_uart_driver = {
.driver = {
- .name = MAX310X_NAME,
- .owner = THIS_MODULE,
- .pm = &max310x_pm_ops,
+ .name = MAX310X_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(max310x_dt_ids),
+ .pm = &max310x_pm_ops,
},
.probe = max310x_spi_probe,
.remove = max310x_spi_remove,
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index ec06505e3ae6..97888f4900ec 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -421,6 +421,7 @@ struct psc_fifoc {
static struct psc_fifoc __iomem *psc_fifoc;
static unsigned int psc_fifoc_irq;
+static struct clk *psc_fifoc_clk;
static void mpc512x_psc_fifo_init(struct uart_port *port)
{
@@ -568,36 +569,73 @@ static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port,
/* Init PSC FIFO Controller */
static int __init mpc512x_psc_fifoc_init(void)
{
+ int err;
struct device_node *np;
+ struct clk *clk;
+
+ /* default error code, potentially overwritten by clock calls */
+ err = -ENODEV;
np = of_find_compatible_node(NULL, NULL,
"fsl,mpc5121-psc-fifo");
if (!np) {
pr_err("%s: Can't find FIFOC node\n", __func__);
- return -ENODEV;
+ goto out_err;
}
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ /* backwards compat with device trees that lack clock specs */
+ clk = clk_get_sys(np->name, "ipg");
+ }
+ if (IS_ERR(clk)) {
+ pr_err("%s: Can't lookup FIFO clock\n", __func__);
+ err = PTR_ERR(clk);
+ goto out_ofnode_put;
+ }
+ if (clk_prepare_enable(clk)) {
+ pr_err("%s: Can't enable FIFO clock\n", __func__);
+ clk_put(clk);
+ goto out_ofnode_put;
+ }
+ psc_fifoc_clk = clk;
+
psc_fifoc = of_iomap(np, 0);
if (!psc_fifoc) {
pr_err("%s: Can't map FIFOC\n", __func__);
- of_node_put(np);
- return -ENODEV;
+ goto out_clk_disable;
}
psc_fifoc_irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
if (psc_fifoc_irq == 0) {
pr_err("%s: Can't get FIFOC irq\n", __func__);
- iounmap(psc_fifoc);
- return -ENODEV;
+ goto out_unmap;
}
+ of_node_put(np);
return 0;
+
+out_unmap:
+ iounmap(psc_fifoc);
+out_clk_disable:
+ clk_disable_unprepare(psc_fifoc_clk);
+ clk_put(psc_fifoc_clk);
+out_ofnode_put:
+ of_node_put(np);
+out_err:
+ return err;
}
static void __exit mpc512x_psc_fifoc_uninit(void)
{
iounmap(psc_fifoc);
+
+ /* disable the clock, errors are not fatal */
+ if (psc_fifoc_clk) {
+ clk_disable_unprepare(psc_fifoc_clk);
+ clk_put(psc_fifoc_clk);
+ psc_fifoc_clk = NULL;
+ }
}
/* 512x specific interrupt handler. The caller holds the port lock */
@@ -619,29 +657,55 @@ static irqreturn_t mpc512x_psc_handle_irq(struct uart_port *port)
}
static struct clk *psc_mclk_clk[MPC52xx_PSC_MAXNUM];
+static struct clk *psc_ipg_clk[MPC52xx_PSC_MAXNUM];
/* called from within the .request_port() callback (allocation) */
static int mpc512x_psc_alloc_clock(struct uart_port *port)
{
int psc_num;
- char clk_name[16];
struct clk *clk;
int err;
psc_num = (port->mapbase & 0xf00) >> 8;
- snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
- clk = devm_clk_get(port->dev, clk_name);
+
+ clk = devm_clk_get(port->dev, "mclk");
if (IS_ERR(clk)) {
dev_err(port->dev, "Failed to get MCLK!\n");
- return PTR_ERR(clk);
+ err = PTR_ERR(clk);
+ goto out_err;
}
err = clk_prepare_enable(clk);
if (err) {
dev_err(port->dev, "Failed to enable MCLK!\n");
- return err;
+ goto out_err;
}
psc_mclk_clk[psc_num] = clk;
+
+ clk = devm_clk_get(port->dev, "ipg");
+ if (IS_ERR(clk)) {
+ dev_err(port->dev, "Failed to get IPG clock!\n");
+ err = PTR_ERR(clk);
+ goto out_err;
+ }
+ err = clk_prepare_enable(clk);
+ if (err) {
+ dev_err(port->dev, "Failed to enable IPG clock!\n");
+ goto out_err;
+ }
+ psc_ipg_clk[psc_num] = clk;
+
return 0;
+
+out_err:
+ if (psc_mclk_clk[psc_num]) {
+ clk_disable_unprepare(psc_mclk_clk[psc_num]);
+ psc_mclk_clk[psc_num] = NULL;
+ }
+ if (psc_ipg_clk[psc_num]) {
+ clk_disable_unprepare(psc_ipg_clk[psc_num]);
+ psc_ipg_clk[psc_num] = NULL;
+ }
+ return err;
}
/* called from within the .release_port() callback (release) */
@@ -656,6 +720,10 @@ static void mpc512x_psc_relse_clock(struct uart_port *port)
clk_disable_unprepare(clk);
psc_mclk_clk[psc_num] = NULL;
}
+ if (psc_ipg_clk[psc_num]) {
+ clk_disable_unprepare(psc_ipg_clk[psc_num]);
+ psc_ipg_clk[psc_num] = NULL;
+ }
}
/* implementation of the .clock() callback (enable/disable) */
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index b5d779cd3c2b..053b98eb46c8 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -39,6 +39,13 @@
#include "msm_serial.h"
+enum {
+ UARTDM_1P1 = 1,
+ UARTDM_1P2,
+ UARTDM_1P3,
+ UARTDM_1P4,
+};
+
struct msm_port {
struct uart_port uart;
char name[16];
@@ -309,6 +316,8 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
static void msm_reset(struct uart_port *port)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
/* reset everything */
msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
@@ -316,6 +325,10 @@ static void msm_reset(struct uart_port *port)
msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+
+ /* Disable DM modes */
+ if (msm_port->is_uartdm)
+ msm_write(port, 0, UARTDM_DMEN);
}
static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -711,6 +724,117 @@ static void msm_power(struct uart_port *port, unsigned int state,
}
}
+#ifdef CONFIG_CONSOLE_POLL
+static int msm_poll_init(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ /* Enable single character mode on RX FIFO */
+ if (msm_port->is_uartdm >= UARTDM_1P4)
+ msm_write(port, UARTDM_DMEN_RX_SC_ENABLE, UARTDM_DMEN);
+
+ return 0;
+}
+
+static int msm_poll_get_char_single(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : UART_RF;
+
+ if (!(msm_read(port, UART_SR) & UART_SR_RX_READY))
+ return NO_POLL_CHAR;
+ else
+ return msm_read(port, rf_reg) & 0xff;
+}
+
+static int msm_poll_get_char_dm_1p3(struct uart_port *port)
+{
+ int c;
+ static u32 slop;
+ static int count;
+ unsigned char *sp = (unsigned char *)&slop;
+
+ /* Check if a previous read had more than one char */
+ if (count) {
+ c = sp[sizeof(slop) - count];
+ count--;
+ /* Or if FIFO is empty */
+ } else if (!(msm_read(port, UART_SR) & UART_SR_RX_READY)) {
+ /*
+ * If RX packing buffer has less than a word, force stale to
+ * push contents into RX FIFO
+ */
+ count = msm_read(port, UARTDM_RXFS);
+ count = (count >> UARTDM_RXFS_BUF_SHIFT) & UARTDM_RXFS_BUF_MASK;
+ if (count) {
+ msm_write(port, UART_CR_CMD_FORCE_STALE, UART_CR);
+ slop = msm_read(port, UARTDM_RF);
+ c = sp[0];
+ count--;
+ } else {
+ c = NO_POLL_CHAR;
+ }
+ /* FIFO has a word */
+ } else {
+ slop = msm_read(port, UARTDM_RF);
+ c = sp[0];
+ count = sizeof(slop) - 1;
+ }
+
+ return c;
+}
+
+static int msm_poll_get_char(struct uart_port *port)
+{
+ u32 imr;
+ int c;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ /* Disable all interrupts */
+ imr = msm_read(port, UART_IMR);
+ msm_write(port, 0, UART_IMR);
+
+ if (msm_port->is_uartdm == UARTDM_1P3)
+ c = msm_poll_get_char_dm_1p3(port);
+ else
+ c = msm_poll_get_char_single(port);
+
+ /* Enable interrupts */
+ msm_write(port, imr, UART_IMR);
+
+ return c;
+}
+
+static void msm_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ u32 imr;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ /* Disable all interrupts */
+ imr = msm_read(port, UART_IMR);
+ msm_write(port, 0, UART_IMR);
+
+ if (msm_port->is_uartdm)
+ reset_dm_count(port, 1);
+
+ /* Wait until FIFO is empty */
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ cpu_relax();
+
+ /* Write a character */
+ msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+
+ /* Wait until FIFO is empty */
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ cpu_relax();
+
+ /* Enable interrupts */
+ msm_write(port, imr, UART_IMR);
+
+ return;
+}
+#endif
+
static struct uart_ops msm_uart_pops = {
.tx_empty = msm_tx_empty,
.set_mctrl = msm_set_mctrl,
@@ -729,6 +853,11 @@ static struct uart_ops msm_uart_pops = {
.config_port = msm_config_port,
.verify_port = msm_verify_port,
.pm = msm_power,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = msm_poll_init,
+ .poll_get_char = msm_poll_get_char,
+ .poll_put_char = msm_poll_put_char,
+#endif
};
static struct msm_port msm_uart_ports[] = {
@@ -900,7 +1029,10 @@ static struct uart_driver msm_uart_driver = {
static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
static const struct of_device_id msm_uartdm_table[] = {
- { .compatible = "qcom,msm-uartdm" },
+ { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
+ { .compatible = "qcom,msm-uartdm-v1.2", .data = (void *)UARTDM_1P2 },
+ { .compatible = "qcom,msm-uartdm-v1.3", .data = (void *)UARTDM_1P3 },
+ { .compatible = "qcom,msm-uartdm-v1.4", .data = (void *)UARTDM_1P4 },
{ }
};
@@ -909,6 +1041,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
struct msm_port *msm_port;
struct resource *resource;
struct uart_port *port;
+ const struct of_device_id *id;
int irq;
if (pdev->id == -1)
@@ -923,8 +1056,9 @@ static int __init msm_serial_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
msm_port = UART_TO_MSM(port);
- if (of_match_device(msm_uartdm_table, &pdev->dev))
- msm_port->is_uartdm = 1;
+ id = of_match_device(msm_uartdm_table, &pdev->dev);
+ if (id)
+ msm_port->is_uartdm = (unsigned long)id->data;
else
msm_port->is_uartdm = 0;
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 469fda50ac63..1e9b68b6f9eb 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -59,6 +59,7 @@
#define UART_CR_CMD_RESET_RFR (14 << 4)
#define UART_CR_CMD_PROTECTION_EN (16 << 4)
#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define UART_CR_CMD_FORCE_STALE (4 << 8)
#define UART_CR_CMD_RESET_TX_READY (3 << 8)
#define UART_CR_TX_DISABLE (1 << 3)
#define UART_CR_TX_ENABLE (1 << 2)
@@ -113,6 +114,14 @@
#define GSBI_PROTOCOL_UART 0x40
#define GSBI_PROTOCOL_IDLE 0x0
+#define UARTDM_RXFS 0x50
+#define UARTDM_RXFS_BUF_SHIFT 0x7
+#define UARTDM_RXFS_BUF_MASK 0x7
+
+#define UARTDM_DMEN 0x3C
+#define UARTDM_DMEN_RX_SC_ENABLE BIT(5)
+#define UARTDM_DMEN_TX_SC_ENABLE BIT(4)
+
#define UARTDM_DMRX 0x34
#define UARTDM_NCF_TX 0x40
#define UARTDM_RX_TOTAL_SNAP 0x38
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index fa511ebab67c..dd8b1a5458ff 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -342,7 +342,14 @@ static void serial_omap_stop_tx(struct uart_port *port)
if ((up->rs485.flags & SER_RS485_ENABLED) &&
!(up->rs485.flags & SER_RS485_RX_DURING_TX)) {
- up->ier = UART_IER_RLSI | UART_IER_RDI;
+ /*
+ * Empty the RX FIFO, we are not interested in anything
+ * received during the half-duplex transmission.
+ */
+ serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_RCVR);
+ /* Re-enable RX interrupts */
+ up->ier |= UART_IER_RLSI | UART_IER_RDI;
+ up->port.read_status_mask |= UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
}
@@ -355,7 +362,7 @@ static void serial_omap_stop_rx(struct uart_port *port)
struct uart_omap_port *up = to_uart_omap_port(port);
pm_runtime_get_sync(up->dev);
- up->ier &= ~UART_IER_RLSI;
+ up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
pm_runtime_mark_last_busy(up->dev);
@@ -738,9 +745,6 @@ static int serial_omap_startup(struct uart_port *port)
return retval;
}
disable_irq(up->wakeirq);
- } else {
- dev_info(up->port.dev, "no wakeirq for uart%d\n",
- up->port.line);
}
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
@@ -1604,8 +1608,11 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
flags & SER_RS485_RTS_AFTER_SEND);
if (ret < 0)
return ret;
- } else
+ } else if (up->rts_gpio == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else {
up->rts_gpio = -EINVAL;
+ }
if (of_property_read_u32_array(np, "rs485-rts-delay",
rs485_delay, 2) == 0) {
@@ -1687,6 +1694,9 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.iotype = UPIO_MEM;
up->port.irq = uartirq;
up->wakeirq = wakeirq;
+ if (!up->wakeirq)
+ dev_info(up->port.dev, "no wakeirq for uart%d\n",
+ up->port.line);
up->port.regshift = 2;
up->port.fifosize = 64;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 9cbd3acaf37f..0931b3fe9edf 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1508,10 +1508,14 @@ static int pch_uart_verify_port(struct uart_port *port,
__func__);
return -EOPNOTSUPP;
#endif
- dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n");
- if (!priv->use_dma)
+ if (!priv->use_dma) {
pch_request_dma(port);
- priv->use_dma = 1;
+ if (priv->chan_rx)
+ priv->use_dma = 1;
+ }
+ dev_info(priv->port.dev, "PCH UART: %s\n",
+ priv->use_dma ?
+ "Use DMA Mode" : "No DMA");
}
return 0;
@@ -1758,7 +1762,9 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
int fifosize;
int port_type;
struct pch_uart_driver_data *board;
+#ifdef CONFIG_DEBUG_FS
char name[32]; /* for debugfs file name */
+#endif
board = &drv_dat[id->driver_data];
port_type = board->port_type;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index c1af04d46682..23f459600738 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1209,7 +1209,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
/* reset the fifos (and setup the uart) */
s3c24xx_serial_resetport(port, cfg);
- clk_disable_unprepare(ourport->clk);
return 0;
}
@@ -1283,10 +1282,25 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
if (ret < 0)
goto probe_err;
+ if (!s3c24xx_uart_drv.state) {
+ ret = uart_register_driver(&s3c24xx_uart_drv);
+ if (ret < 0) {
+ pr_err("Failed to register Samsung UART driver\n");
+ return ret;
+ }
+ }
+
dbg("%s: adding port\n", __func__);
uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
platform_set_drvdata(pdev, &ourport->port);
+ /*
+ * Deactivate the clock enabled in s3c24xx_serial_init_port here,
+ * so that a potential re-enablement through the pm-callback overlaps
+ * and keeps the clock enabled in this case.
+ */
+ clk_disable_unprepare(ourport->clk);
+
#ifdef CONFIG_SAMSUNG_CLOCK
ret = device_create_file(&pdev->dev, &dev_attr_clock_source);
if (ret < 0)
@@ -1315,6 +1329,8 @@ static int s3c24xx_serial_remove(struct platform_device *dev)
uart_remove_one_port(&s3c24xx_uart_drv, port);
}
+ uart_unregister_driver(&s3c24xx_uart_drv);
+
return 0;
}
@@ -1814,35 +1830,7 @@ static struct platform_driver samsung_serial_driver = {
},
};
-/* module initialisation code */
-
-static int __init s3c24xx_serial_modinit(void)
-{
- int ret;
-
- ret = uart_register_driver(&s3c24xx_uart_drv);
- if (ret < 0) {
- pr_err("Failed to register Samsung UART driver\n");
- return ret;
- }
-
- ret = platform_driver_register(&samsung_serial_driver);
- if (ret < 0) {
- pr_err("Failed to register platform driver\n");
- uart_unregister_driver(&s3c24xx_uart_drv);
- }
-
- return ret;
-}
-
-static void __exit s3c24xx_serial_modexit(void)
-{
- platform_driver_unregister(&samsung_serial_driver);
- uart_unregister_driver(&s3c24xx_uart_drv);
-}
-
-module_init(s3c24xx_serial_modinit);
-module_exit(s3c24xx_serial_modexit);
+module_platform_driver(samsung_serial_driver);
MODULE_ALIAS("platform:samsung-uart");
MODULE_DESCRIPTION("Samsung SoC Serial port driver");
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index dfe79ccc4fb3..d5c2a287b7e7 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -34,6 +34,7 @@
#include <linux/of_device.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
@@ -44,8 +45,6 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/clk/tegra.h>
-
#define TEGRA_UART_TYPE "TEGRA_UART"
#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
@@ -103,6 +102,7 @@ struct tegra_uart_port {
const struct tegra_uart_chip_data *cdata;
struct clk *uart_clk;
+ struct reset_control *rst;
unsigned int current_baud;
/* Register shadow */
@@ -120,7 +120,6 @@ struct tegra_uart_port {
bool rx_timeout;
int rx_in_progress;
int symb_bit;
- int dma_req_sel;
struct dma_chan *rx_dma_chan;
struct dma_chan *tx_dma_chan;
@@ -832,9 +831,9 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
clk_prepare_enable(tup->uart_clk);
/* Reset the UART controller to clear all previous status.*/
- tegra_periph_reset_assert(tup->uart_clk);
+ reset_control_assert(tup->rst);
udelay(10);
- tegra_periph_reset_deassert(tup->uart_clk);
+ reset_control_deassert(tup->rst);
tup->rx_in_progress = 0;
tup->tx_in_progress = 0;
@@ -910,15 +909,14 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
dma_addr_t dma_phys;
int ret;
struct dma_slave_config dma_sconfig;
- dma_cap_mask_t mask;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_chan = dma_request_channel(mask, NULL, NULL);
- if (!dma_chan) {
+ dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
+ dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
dev_err(tup->uport.dev,
- "Dma channel is not available, will try later\n");
- return -EPROBE_DEFER;
+ "DMA channel alloc failed: %d\n", ret);
+ return ret;
}
if (dma_to_memory) {
@@ -938,7 +936,6 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
dma_buf = tup->uport.state->xmit.buf;
}
- dma_sconfig.slave_id = tup->dma_req_sel;
if (dma_to_memory) {
dma_sconfig.src_addr = tup->uport.mapbase;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
@@ -1222,17 +1219,8 @@ static int tegra_uart_parse_dt(struct platform_device *pdev,
struct tegra_uart_port *tup)
{
struct device_node *np = pdev->dev.of_node;
- u32 of_dma[2];
int port;
- if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
- of_dma, 2) >= 0) {
- tup->dma_req_sel = of_dma[1];
- } else {
- dev_err(&pdev->dev, "missing dma requestor in device tree\n");
- return -EINVAL;
- }
-
port = of_alias_get_id(np, "serial");
if (port < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
@@ -1320,6 +1308,12 @@ static int tegra_uart_probe(struct platform_device *pdev)
return PTR_ERR(tup->uart_clk);
}
+ tup->rst = devm_reset_control_get(&pdev->dev, "serial");
+ if (IS_ERR(tup->rst)) {
+ dev_err(&pdev->dev, "Couldn't get the reset\n");
+ return PTR_ERR(tup->rst);
+ }
+
u->iotype = UPIO_MEM32;
u->irq = platform_get_irq(pdev, 0);
u->regshift = 2;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index ece2049bd270..2cf5649a6dc0 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1319,9 +1319,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
uport = state->uart_port;
port = &state->port;
- pr_debug("uart_close(%d) called\n", uport->line);
+ pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
- if (tty_port_close_start(port, tty, filp) == 0)
+ if (!port->count || tty_port_close_start(port, tty, filp) == 0)
return;
/*
@@ -1762,7 +1762,7 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
}
/**
- * uart_parse_options - Parse serial port baud/parity/bits/flow contro.
+ * uart_parse_options - Parse serial port baud/parity/bits/flow control.
* @options: pointer to option string
* @baud: pointer to an 'int' variable for the baud rate.
* @parity: pointer to an 'int' variable for the parity.
@@ -2609,7 +2609,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
/*
* Register the port whether it's detected or not. This allows
- * setserial to be used to alter this ports parameters.
+ * setserial to be used to alter this port's parameters.
*/
tty_dev = tty_port_register_device_attr(port, drv->tty_driver,
uport->line, uport->dev, port, tty_dev_attr_groups);
@@ -2645,6 +2645,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
+ struct tty_struct *tty;
int ret = 0;
BUG_ON(in_interrupt());
@@ -2673,8 +2674,17 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
*/
tty_unregister_device(drv->tty_driver, uport->line);
- if (port->tty)
+ tty = tty_port_tty_get(port);
+ if (tty) {
tty_vhangup(port->tty);
+ tty_kref_put(tty);
+ }
+
+ /*
+ * If the port is used as a console, unregister it
+ */
+ if (uart_console(uport))
+ unregister_console(uport->cons);
/*
* Free the port IO and memory resources, if any.
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 7d8103cd3e2e..88236da0ddf7 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -23,35 +23,35 @@
#undef DEBUG
-#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/ctype.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/sh_dma.h>
-#include <linux/timer.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/major.h>
-#include <linux/string.h>
-#include <linux/sysrq.h>
#include <linux/ioport.h>
+#include <linux/major.h>
+#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/console.h>
-#include <linux/platform_device.h>
-#include <linux/serial_sci.h>
#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/cpufreq.h>
-#include <linux/clk.h>
-#include <linux/ctype.h>
-#include <linux/err.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_dma.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/string.h>
+#include <linux/sysrq.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
#ifdef CONFIG_SUPERH
#include <asm/sh_bios.h>
@@ -59,11 +59,32 @@
#include "sh-sci.h"
+/* Offsets into the sci_port->irqs array */
+enum {
+ SCIx_ERI_IRQ,
+ SCIx_RXI_IRQ,
+ SCIx_TXI_IRQ,
+ SCIx_BRI_IRQ,
+ SCIx_NR_IRQS,
+
+ SCIx_MUX_IRQ = SCIx_NR_IRQS, /* special case */
+};
+
+#define SCIx_IRQ_IS_MUXED(port) \
+ ((port)->irqs[SCIx_ERI_IRQ] == \
+ (port)->irqs[SCIx_RXI_IRQ]) || \
+ ((port)->irqs[SCIx_ERI_IRQ] && \
+ ((port)->irqs[SCIx_RXI_IRQ] < 0))
+
struct sci_port {
struct uart_port port;
/* Platform configuration */
struct plat_sci_port *cfg;
+ int overrun_bit;
+ unsigned int error_mask;
+ unsigned int sampling_rate;
+
/* Break timer */
struct timer_list break_timer;
@@ -74,8 +95,8 @@ struct sci_port {
/* Function clock */
struct clk *fclk;
+ int irqs[SCIx_NR_IRQS];
char *irqstr[SCIx_NR_IRQS];
- char *gpiostr[SCIx_NR_FNS];
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
@@ -407,7 +428,7 @@ static int sci_probe_regmap(struct plat_sci_port *cfg)
cfg->regtype = SCIx_HSCIF_REGTYPE;
break;
default:
- printk(KERN_ERR "Can't probe register map for given port\n");
+ pr_err("Can't probe register map for given port\n");
return -EINVAL;
}
@@ -421,9 +442,9 @@ static void sci_port_enable(struct sci_port *sci_port)
pm_runtime_get_sync(sci_port->port.dev);
- clk_enable(sci_port->iclk);
+ clk_prepare_enable(sci_port->iclk);
sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
- clk_enable(sci_port->fclk);
+ clk_prepare_enable(sci_port->fclk);
}
static void sci_port_disable(struct sci_port *sci_port)
@@ -431,8 +452,16 @@ static void sci_port_disable(struct sci_port *sci_port)
if (!sci_port->port.dev)
return;
- clk_disable(sci_port->fclk);
- clk_disable(sci_port->iclk);
+ /* Cancel the break timer to ensure that the timer handler will not try
+ * to access the hardware with clocks and power disabled. Reset the
+ * break flag to make the break debouncing state machine ready for the
+ * next break.
+ */
+ del_timer_sync(&sci_port->break_timer);
+ sci_port->break_flag = 0;
+
+ clk_disable_unprepare(sci_port->fclk);
+ clk_disable_unprepare(sci_port->iclk);
pm_runtime_put_sync(sci_port->port.dev);
}
@@ -557,7 +586,7 @@ static inline int sci_rxd_in(struct uart_port *port)
return 1;
/* Cast for ARM damage */
- return !!__raw_readb((void __iomem *)s->cfg->port_reg);
+ return !!__raw_readb((void __iomem *)(uintptr_t)s->cfg->port_reg);
}
/* ********************************************************************** *
@@ -733,8 +762,6 @@ static void sci_break_timer(unsigned long data)
{
struct sci_port *port = (struct sci_port *)data;
- sci_port_enable(port);
-
if (sci_rxd_in(&port->port) == 0) {
port->break_flag = 1;
sci_schedule_break_timer(port);
@@ -744,8 +771,6 @@ static void sci_break_timer(unsigned long data)
sci_schedule_break_timer(port);
} else
port->break_flag = 0;
-
- sci_port_disable(port);
}
static int sci_handle_errors(struct uart_port *port)
@@ -755,19 +780,15 @@ static int sci_handle_errors(struct uart_port *port)
struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
- /*
- * Handle overruns, if supported.
- */
- if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) {
- if (status & (1 << s->cfg->overrun_bit)) {
- port->icount.overrun++;
+ /* Handle overruns */
+ if (status & (1 << s->overrun_bit)) {
+ port->icount.overrun++;
- /* overrun error */
- if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
- copied++;
+ /* overrun error */
+ if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
+ copied++;
- dev_notice(port->dev, "overrun error");
- }
+ dev_notice(port->dev, "overrun error\n");
}
if (status & SCxSR_FER(port)) {
@@ -809,7 +830,7 @@ static int sci_handle_errors(struct uart_port *port)
if (tty_insert_flip_char(tport, 0, TTY_PARITY))
copied++;
- dev_notice(port->dev, "parity error");
+ dev_notice(port->dev, "parity error\n");
}
if (copied)
@@ -829,7 +850,7 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
if (!reg->size)
return 0;
- if ((serial_port_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
+ if ((serial_port_in(port, SCLSR) & (1 << s->overrun_bit))) {
serial_port_out(port, SCLSR, 0);
port->icount.overrun++;
@@ -890,7 +911,7 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
/* Disable future Rx interrupts */
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
disable_irq_nosync(irq);
- scr |= 0x4000;
+ scr |= SCSCR_RDRQE;
} else {
scr &= ~SCSCR_RIE;
}
@@ -1020,8 +1041,7 @@ static int sci_notifier(struct notifier_block *self,
sci_port = container_of(self, struct sci_port, freq_transition);
- if ((phase == CPUFREQ_POSTCHANGE) ||
- (phase == CPUFREQ_RESUMECHANGE)) {
+ if (phase == CPUFREQ_POSTCHANGE) {
struct uart_port *port = &sci_port->port;
spin_lock_irqsave(&port->lock, flags);
@@ -1075,19 +1095,19 @@ static int sci_request_irq(struct sci_port *port)
for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
struct sci_irq_desc *desc;
- unsigned int irq;
+ int irq;
if (SCIx_IRQ_IS_MUXED(port)) {
i = SCIx_MUX_IRQ;
irq = up->irq;
} else {
- irq = port->cfg->irqs[i];
+ irq = port->irqs[i];
/*
* Certain port types won't support all of the
* available interrupt sources.
*/
- if (unlikely(!irq))
+ if (unlikely(irq < 0))
continue;
}
@@ -1112,7 +1132,7 @@ static int sci_request_irq(struct sci_port *port)
out_noirq:
while (--i >= 0)
- free_irq(port->cfg->irqs[i], port);
+ free_irq(port->irqs[i], port);
out_nomem:
while (--j >= 0)
@@ -1130,16 +1150,16 @@ static void sci_free_irq(struct sci_port *port)
* IRQ first.
*/
for (i = 0; i < SCIx_NR_IRQS; i++) {
- unsigned int irq = port->cfg->irqs[i];
+ int irq = port->irqs[i];
/*
* Certain port types won't support all of the available
* interrupt sources.
*/
- if (unlikely(!irq))
+ if (unlikely(irq < 0))
continue;
- free_irq(port->cfg->irqs[i], port);
+ free_irq(port->irqs[i], port);
kfree(port->irqstr[i]);
if (SCIx_IRQ_IS_MUXED(port)) {
@@ -1149,67 +1169,6 @@ static void sci_free_irq(struct sci_port *port)
}
}
-static const char *sci_gpio_names[SCIx_NR_FNS] = {
- "sck", "rxd", "txd", "cts", "rts",
-};
-
-static const char *sci_gpio_str(unsigned int index)
-{
- return sci_gpio_names[index];
-}
-
-static void sci_init_gpios(struct sci_port *port)
-{
- struct uart_port *up = &port->port;
- int i;
-
- if (!port->cfg)
- return;
-
- for (i = 0; i < SCIx_NR_FNS; i++) {
- const char *desc;
- int ret;
-
- if (!port->cfg->gpios[i])
- continue;
-
- desc = sci_gpio_str(i);
-
- port->gpiostr[i] = kasprintf(GFP_KERNEL, "%s:%s",
- dev_name(up->dev), desc);
-
- /*
- * If we've failed the allocation, we can still continue
- * on with a NULL string.
- */
- if (!port->gpiostr[i])
- dev_notice(up->dev, "%s string allocation failure\n",
- desc);
-
- ret = gpio_request(port->cfg->gpios[i], port->gpiostr[i]);
- if (unlikely(ret != 0)) {
- dev_notice(up->dev, "failed %s gpio request\n", desc);
-
- /*
- * If we can't get the GPIO for whatever reason,
- * no point in keeping the verbose string around.
- */
- kfree(port->gpiostr[i]);
- }
- }
-}
-
-static void sci_free_gpios(struct sci_port *port)
-{
- int i;
-
- for (i = 0; i < SCIx_NR_FNS; i++)
- if (port->cfg->gpios[i]) {
- gpio_free(port->cfg->gpios[i]);
- kfree(port->gpiostr[i]);
- }
-}
-
static unsigned int sci_tx_empty(struct uart_port *port)
{
unsigned short status = serial_port_in(port, SCxSR);
@@ -1240,7 +1199,9 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
*/
reg = sci_getreg(port, SCFCR);
if (reg->size)
- serial_port_out(port, SCFCR, serial_port_in(port, SCFCR) | 1);
+ serial_port_out(port, SCFCR,
+ serial_port_in(port, SCFCR) |
+ SCFCR_LOOP);
}
}
@@ -1309,7 +1270,7 @@ static int sci_dma_rx_push(struct sci_port *s, size_t count)
}
if (room < count)
- dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
+ dev_warn(port->dev, "Rx overrun: dropping %zu bytes\n",
count - room);
if (!room)
return room;
@@ -1330,7 +1291,8 @@ static void sci_dma_rx_complete(void *arg)
unsigned long flags;
int count;
- dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
+ dev_dbg(port->dev, "%s(%d) active #%d\n",
+ __func__, port->line, s->active_rx);
spin_lock_irqsave(&port->lock, flags);
@@ -1406,8 +1368,8 @@ static void sci_submit_rx(struct sci_port *s)
sci_rx_dma_release(s, true);
return;
}
- dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
- s->cookie_rx[i], i);
+ dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n",
+ __func__, s->cookie_rx[i], i);
}
s->active_rx = s->cookie_rx[0];
@@ -1442,7 +1404,7 @@ static void work_fn_rx(struct work_struct *work)
int count;
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
- dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
+ dev_dbg(port->dev, "Read %zu bytes with cookie %d\n",
sh_desc->partial, sh_desc->cookie);
spin_lock_irqsave(&port->lock, flags);
@@ -1466,8 +1428,8 @@ static void work_fn_rx(struct work_struct *work)
s->active_rx = s->cookie_rx[!new];
- dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
- s->cookie_rx[new], new, s->active_rx);
+ dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n",
+ __func__, s->cookie_rx[new], new, s->active_rx);
}
static void work_fn_tx(struct work_struct *work)
@@ -1520,8 +1482,8 @@ static void work_fn_tx(struct work_struct *work)
return;
}
- dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
- xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
+ dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
+ __func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
dma_async_issue_pending(chan);
}
@@ -1536,9 +1498,9 @@ static void sci_start_tx(struct uart_port *port)
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
u16 new, scr = serial_port_in(port, SCSCR);
if (s->chan_tx)
- new = scr | 0x8000;
+ new = scr | SCSCR_TDRQE;
else
- new = scr & ~0x8000;
+ new = scr & ~SCSCR_TDRQE;
if (new != scr)
serial_port_out(port, SCSCR, new);
}
@@ -1565,7 +1527,7 @@ static void sci_stop_tx(struct uart_port *port)
ctrl = serial_port_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- ctrl &= ~0x8000;
+ ctrl &= ~SCSCR_TDRQE;
ctrl &= ~SCSCR_TIE;
@@ -1579,7 +1541,7 @@ static void sci_start_rx(struct uart_port *port)
ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- ctrl &= ~0x4000;
+ ctrl &= ~SCSCR_RDRQE;
serial_port_out(port, SCSCR, ctrl);
}
@@ -1591,7 +1553,7 @@ static void sci_stop_rx(struct uart_port *port)
ctrl = serial_port_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- ctrl &= ~0x4000;
+ ctrl &= ~SCSCR_RDRQE;
ctrl &= ~port_rx_irq_mask(port);
@@ -1640,8 +1602,8 @@ static bool filter(struct dma_chan *chan, void *slave)
{
struct sh_dmae_slave *param = slave;
- dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
- param->shdma_slave.slave_id);
+ dev_dbg(chan->device->dev, "%s: slave ID %d\n",
+ __func__, param->shdma_slave.slave_id);
chan->private = &param->shdma_slave;
return true;
@@ -1654,8 +1616,8 @@ static void rx_timer_fn(unsigned long arg)
u16 scr = serial_port_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
- scr &= ~0x4000;
- enable_irq(s->cfg->irqs[1]);
+ scr &= ~SCSCR_RDRQE;
+ enable_irq(s->irqs[SCIx_RXI_IRQ]);
}
serial_port_out(port, SCSCR, scr | SCSCR_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
@@ -1670,8 +1632,7 @@ static void sci_request_dma(struct uart_port *port)
dma_cap_mask_t mask;
int nent;
- dev_dbg(port->dev, "%s: port %d\n", __func__,
- port->line);
+ dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0)
return;
@@ -1691,16 +1652,18 @@ static void sci_request_dma(struct uart_port *port)
s->chan_tx = chan;
sg_init_table(&s->sg_tx, 1);
/* UART circular tx buffer is an aligned page. */
- BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
+ BUG_ON((uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
- UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK);
+ UART_XMIT_SIZE,
+ (uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
if (!nent)
sci_tx_dma_release(s, false);
else
- dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
- sg_dma_len(&s->sg_tx),
- port->state->xmit.buf, sg_dma_address(&s->sg_tx));
+ dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n",
+ __func__,
+ sg_dma_len(&s->sg_tx), port->state->xmit.buf,
+ &sg_dma_address(&s->sg_tx));
s->sg_len_tx = nent;
@@ -1740,7 +1703,7 @@ static void sci_request_dma(struct uart_port *port)
sg_init_table(sg, 1);
sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
- (int)buf[i] & ~PAGE_MASK);
+ (uintptr_t)buf[i] & ~PAGE_MASK);
sg_dma_address(sg) = dma[i];
}
@@ -1808,21 +1771,11 @@ static void sci_shutdown(struct uart_port *port)
sci_free_irq(s);
}
-static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
+static unsigned int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
unsigned long freq)
{
- switch (algo_id) {
- case SCBRR_ALGO_1:
- return ((freq + 16 * bps) / (16 * bps) - 1);
- case SCBRR_ALGO_2:
- return ((freq + 16 * bps) / (32 * bps) - 1);
- case SCBRR_ALGO_3:
- return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
- case SCBRR_ALGO_4:
- return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
- case SCBRR_ALGO_5:
- return (((freq * 1000 / 32) / bps) - 1);
- }
+ if (s->sampling_rate)
+ return DIV_ROUND_CLOSEST(freq, s->sampling_rate * bps) - 1;
/* Warn, but use a safe default */
WARN_ON(1);
@@ -1903,12 +1856,11 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
if (likely(baud && port->uartclk)) {
- if (s->cfg->scbrr_algo_id == SCBRR_ALGO_6) {
+ if (s->cfg->type == PORT_HSCIF) {
sci_baud_calc_hscif(baud, port->uartclk, &t, &srr,
&cks);
} else {
- t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud,
- port->uartclk);
+ t = sci_scbrr_calc(s, baud, port->uartclk);
for (cks = 0; t >= 256 && cks <= 3; cks++)
t >>= 2;
}
@@ -1921,13 +1873,13 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
smr_val = serial_port_in(port, SCSMR) & 3;
if ((termios->c_cflag & CSIZE) == CS7)
- smr_val |= 0x40;
+ smr_val |= SCSMR_CHR;
if (termios->c_cflag & PARENB)
- smr_val |= 0x20;
+ smr_val |= SCSMR_PE;
if (termios->c_cflag & PARODD)
- smr_val |= 0x30;
+ smr_val |= SCSMR_PE | SCSMR_ODD;
if (termios->c_cflag & CSTOPB)
- smr_val |= 0x08;
+ smr_val |= SCSMR_STOP;
uart_update_timeout(port, termios->c_cflag, baud);
@@ -1935,7 +1887,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
__func__, smr_val, cks, t, s->cfg->scscr);
if (t >= 0) {
- serial_port_out(port, SCSMR, (smr_val & ~3) | cks);
+ serial_port_out(port, SCSMR, (smr_val & ~SCSMR_CKS) | cks);
serial_port_out(port, SCBRR, t);
reg = sci_getreg(port, HSSRR);
if (reg->size)
@@ -1983,8 +1935,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
if (s->chan_rx) {
s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
port->fifosize / 2;
- dev_dbg(port->dev,
- "DMA Rx t-out %ums, tty t-out %u jiffies\n",
+ dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n",
s->rx_timeout * 1000 / HZ, port->timeout);
if (s->rx_timeout < msecs_to_jiffies(20))
s->rx_timeout = msecs_to_jiffies(20);
@@ -2003,7 +1954,7 @@ static void sci_pm(struct uart_port *port, unsigned int state,
struct sci_port *sci_port = to_sci_port(port);
switch (state) {
- case 3:
+ case UART_PM_STATE_OFF:
sci_port_disable(sci_port);
break;
default:
@@ -2068,7 +2019,7 @@ static int sci_remap_port(struct uart_port *port)
* need to do any remapping, just cast the cookie
* directly.
*/
- port->membase = (void __iomem *)port->mapbase;
+ port->membase = (void __iomem *)(uintptr_t)port->mapbase;
}
return 0;
@@ -2115,10 +2066,6 @@ static void sci_config_port(struct uart_port *port, int flags)
static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
{
- struct sci_port *s = to_sci_port(port);
-
- if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
- return -EINVAL;
if (ser->baud_base < 2400)
/* No paper tape reader for Mitch.. */
return -EINVAL;
@@ -2151,11 +2098,13 @@ static struct uart_ops sci_uart_ops = {
};
static int sci_init_single(struct platform_device *dev,
- struct sci_port *sci_port,
- unsigned int index,
- struct plat_sci_port *p)
+ struct sci_port *sci_port, unsigned int index,
+ struct plat_sci_port *p, bool early)
{
struct uart_port *port = &sci_port->port;
+ const struct resource *res;
+ unsigned int sampling_rate;
+ unsigned int i;
int ret;
sci_port->cfg = p;
@@ -2164,31 +2113,76 @@ static int sci_init_single(struct platform_device *dev,
port->iotype = UPIO_MEM;
port->line = index;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENOMEM;
+
+ port->mapbase = res->start;
+
+ for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
+ sci_port->irqs[i] = platform_get_irq(dev, i);
+
+ /* The SCI generates several interrupts. They can be muxed together or
+ * connected to different interrupt lines. In the muxed case only one
+ * interrupt resource is specified. In the non-muxed case three or four
+ * interrupt resources are specified, as the BRI interrupt is optional.
+ */
+ if (sci_port->irqs[0] < 0)
+ return -ENXIO;
+
+ if (sci_port->irqs[1] < 0) {
+ sci_port->irqs[1] = sci_port->irqs[0];
+ sci_port->irqs[2] = sci_port->irqs[0];
+ sci_port->irqs[3] = sci_port->irqs[0];
+ }
+
+ if (p->regtype == SCIx_PROBE_REGTYPE) {
+ ret = sci_probe_regmap(p);
+ if (unlikely(ret))
+ return ret;
+ }
+
switch (p->type) {
case PORT_SCIFB:
port->fifosize = 256;
+ sci_port->overrun_bit = 9;
+ sampling_rate = 16;
break;
case PORT_HSCIF:
port->fifosize = 128;
+ sampling_rate = 0;
+ sci_port->overrun_bit = 0;
break;
case PORT_SCIFA:
port->fifosize = 64;
+ sci_port->overrun_bit = 9;
+ sampling_rate = 16;
break;
case PORT_SCIF:
port->fifosize = 16;
+ if (p->regtype == SCIx_SH7705_SCIF_REGTYPE) {
+ sci_port->overrun_bit = 9;
+ sampling_rate = 16;
+ } else {
+ sci_port->overrun_bit = 0;
+ sampling_rate = 32;
+ }
break;
default:
port->fifosize = 1;
+ sci_port->overrun_bit = 5;
+ sampling_rate = 32;
break;
}
- if (p->regtype == SCIx_PROBE_REGTYPE) {
- ret = sci_probe_regmap(p);
- if (unlikely(ret))
- return ret;
- }
+ /* SCIFA on sh7723 and sh7724 need a custom sampling rate that doesn't
+ * match the SoC datasheet, this should be investigated. Let platform
+ * data override the sampling rate for now.
+ */
+ sci_port->sampling_rate = p->sampling_rate ? p->sampling_rate
+ : sampling_rate;
- if (dev) {
+ if (!early) {
sci_port->iclk = clk_get(&dev->dev, "sci_ick");
if (IS_ERR(sci_port->iclk)) {
sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
@@ -2208,8 +2202,6 @@ static int sci_init_single(struct platform_device *dev,
port->dev = &dev->dev;
- sci_init_gpios(sci_port);
-
pm_runtime_enable(&dev->dev);
}
@@ -2220,32 +2212,22 @@ static int sci_init_single(struct platform_device *dev,
/*
* Establish some sensible defaults for the error detection.
*/
- if (!p->error_mask)
- p->error_mask = (p->type == PORT_SCI) ?
+ sci_port->error_mask = (p->type == PORT_SCI) ?
SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK;
/*
* Establish sensible defaults for the overrun detection, unless
* the part has explicitly disabled support for it.
*/
- if (p->overrun_bit != SCIx_NOT_SUPPORTED) {
- if (p->type == PORT_SCI)
- p->overrun_bit = 5;
- else if (p->scbrr_algo_id == SCBRR_ALGO_4)
- p->overrun_bit = 9;
- else
- p->overrun_bit = 0;
- /*
- * Make the error mask inclusive of overrun detection, if
- * supported.
- */
- p->error_mask |= (1 << p->overrun_bit);
- }
+ /*
+ * Make the error mask inclusive of overrun detection, if
+ * supported.
+ */
+ sci_port->error_mask |= 1 << sci_port->overrun_bit;
- port->mapbase = p->mapbase;
port->type = p->type;
- port->flags = p->flags;
+ port->flags = UPF_FIXED_PORT | p->flags;
port->regshift = p->regshift;
/*
@@ -2255,7 +2237,7 @@ static int sci_init_single(struct platform_device *dev,
*
* For the muxed case there's nothing more to do.
*/
- port->irq = p->irqs[SCIx_RXI_IRQ];
+ port->irq = sci_port->irqs[SCIx_RXI_IRQ];
port->irqflags = 0;
port->serial_in = sci_serial_in;
@@ -2270,8 +2252,6 @@ static int sci_init_single(struct platform_device *dev,
static void sci_cleanup_single(struct sci_port *port)
{
- sci_free_gpios(port);
-
clk_put(port->iclk);
clk_put(port->fclk);
@@ -2387,7 +2367,7 @@ static int sci_probe_earlyprintk(struct platform_device *pdev)
early_serial_console.index = pdev->id;
- sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg);
+ sci_init_single(pdev, &sci_ports[pdev->id], pdev->id, cfg, true);
serial_console_setup(&early_serial_console, early_serial_buf);
@@ -2410,8 +2390,7 @@ static inline int sci_probe_earlyprintk(struct platform_device *pdev)
#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
-static char banner[] __initdata =
- KERN_INFO "SuperH (H)SCI(F) driver initialized\n";
+static const char banner[] __initconst = "SuperH (H)SCI(F) driver initialized";
static struct uart_driver sci_uart_driver = {
.owner = THIS_MODULE,
@@ -2437,6 +2416,83 @@ static int sci_remove(struct platform_device *dev)
return 0;
}
+struct sci_port_info {
+ unsigned int type;
+ unsigned int regtype;
+};
+
+static const struct of_device_id of_sci_match[] = {
+ {
+ .compatible = "renesas,scif",
+ .data = &(const struct sci_port_info) {
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_REGTYPE,
+ },
+ }, {
+ .compatible = "renesas,scifa",
+ .data = &(const struct sci_port_info) {
+ .type = PORT_SCIFA,
+ .regtype = SCIx_SCIFA_REGTYPE,
+ },
+ }, {
+ .compatible = "renesas,scifb",
+ .data = &(const struct sci_port_info) {
+ .type = PORT_SCIFB,
+ .regtype = SCIx_SCIFB_REGTYPE,
+ },
+ }, {
+ .compatible = "renesas,hscif",
+ .data = &(const struct sci_port_info) {
+ .type = PORT_HSCIF,
+ .regtype = SCIx_HSCIF_REGTYPE,
+ },
+ }, {
+ /* Terminator */
+ },
+};
+MODULE_DEVICE_TABLE(of, of_sci_match);
+
+static struct plat_sci_port *
+sci_parse_dt(struct platform_device *pdev, unsigned int *dev_id)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ const struct sci_port_info *info;
+ struct plat_sci_port *p;
+ int id;
+
+ if (!IS_ENABLED(CONFIG_OF) || !np)
+ return NULL;
+
+ match = of_match_node(of_sci_match, pdev->dev.of_node);
+ if (!match)
+ return NULL;
+
+ info = match->data;
+
+ p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
+ if (!p) {
+ dev_err(&pdev->dev, "failed to allocate DT config data\n");
+ return NULL;
+ }
+
+ /* Get the line number for the aliases node. */
+ id = of_alias_get_id(np, "serial");
+ if (id < 0) {
+ dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
+ return NULL;
+ }
+
+ *dev_id = id;
+
+ p->flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
+ p->type = info->type;
+ p->regtype = info->regtype;
+ p->scscr = SCSCR_RE | SCSCR_TE;
+
+ return p;
+}
+
static int sci_probe_single(struct platform_device *dev,
unsigned int index,
struct plat_sci_port *p,
@@ -2446,15 +2502,13 @@ static int sci_probe_single(struct platform_device *dev,
/* Sanity check */
if (unlikely(index >= SCI_NPORTS)) {
- dev_notice(&dev->dev, "Attempting to register port "
- "%d when only %d are available.\n",
+ dev_notice(&dev->dev, "Attempting to register port %d when only %d are available\n",
index+1, SCI_NPORTS);
- dev_notice(&dev->dev, "Consider bumping "
- "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
+ dev_notice(&dev->dev, "Consider bumping CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
return -EINVAL;
}
- ret = sci_init_single(dev, sciport, index, p);
+ ret = sci_init_single(dev, sciport, index, p, false);
if (ret)
return ret;
@@ -2469,8 +2523,9 @@ static int sci_probe_single(struct platform_device *dev,
static int sci_probe(struct platform_device *dev)
{
- struct plat_sci_port *p = dev_get_platdata(&dev->dev);
- struct sci_port *sp = &sci_ports[dev->id];
+ struct plat_sci_port *p;
+ struct sci_port *sp;
+ unsigned int dev_id;
int ret;
/*
@@ -2481,9 +2536,24 @@ static int sci_probe(struct platform_device *dev)
if (is_early_platform_device(dev))
return sci_probe_earlyprintk(dev);
+ if (dev->dev.of_node) {
+ p = sci_parse_dt(dev, &dev_id);
+ if (p == NULL)
+ return -EINVAL;
+ } else {
+ p = dev->dev.platform_data;
+ if (p == NULL) {
+ dev_err(&dev->dev, "no platform data supplied\n");
+ return -EINVAL;
+ }
+
+ dev_id = dev->id;
+ }
+
+ sp = &sci_ports[dev_id];
platform_set_drvdata(dev, sp);
- ret = sci_probe_single(dev, dev->id, p, sp);
+ ret = sci_probe_single(dev, dev_id, p, sp);
if (ret)
return ret;
@@ -2492,6 +2562,7 @@ static int sci_probe(struct platform_device *dev)
ret = cpufreq_register_notifier(&sp->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
if (unlikely(ret < 0)) {
+ uart_remove_one_port(&sci_uart_driver, &sp->port);
sci_cleanup_single(sp);
return ret;
}
@@ -2535,6 +2606,7 @@ static struct platform_driver sci_driver = {
.name = "sh-sci",
.owner = THIS_MODULE,
.pm = &sci_dev_pm_ops,
+ .of_match_table = of_match_ptr(of_sci_match),
},
};
@@ -2542,7 +2614,7 @@ static int __init sci_init(void)
{
int ret;
- printk(banner);
+ pr_info("%s\n", banner);
ret = uart_register_driver(&sci_uart_driver);
if (likely(ret == 0)) {
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index 5aca7364634c..d5db81a0a430 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -9,7 +9,7 @@
#define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER)
#define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK)
-#define SCxSR_ERRORS(port) (to_sci_port(port)->cfg->error_mask)
+#define SCxSR_ERRORS(port) (to_sci_port(port)->error_mask)
#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 49a2ffd101a7..68b0fd4b9a6a 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -24,7 +24,6 @@
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
-#include <linux/sirfsoc_dma.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
@@ -173,7 +172,7 @@ static void sirfsoc_uart_stop_tx(struct uart_port *port)
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
+ if (sirfport->tx_dma_chan) {
if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
dmaengine_pause(sirfport->tx_dma_chan);
sirfport->tx_dma_state = TX_DMA_PAUSE;
@@ -288,7 +287,7 @@ static void sirfsoc_uart_start_tx(struct uart_port *port)
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
+ if (sirfport->tx_dma_chan)
sirfsoc_uart_tx_with_dma(sirfport);
else {
sirfsoc_uart_pio_tx_chars(sirfport, 1);
@@ -310,7 +309,7 @@ static void sirfsoc_uart_stop_rx(struct uart_port *port)
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
+ if (sirfport->rx_dma_chan) {
if (!sirfport->is_marco)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
@@ -542,8 +541,10 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
SIRFUART_IO_MODE);
- sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ spin_lock(&port->lock);
+ sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
+ spin_unlock(&port->lock);
if (sirfport->rx_io_count == 4) {
spin_lock_irqsave(&sirfport->rx_lock, flags);
sirfport->rx_io_count = 0;
@@ -673,7 +674,7 @@ recv_char:
uart_handle_cts_change(port, cts_status);
wake_up_interruptible(&state->port.delta_msr_wait);
}
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
+ if (sirfport->rx_dma_chan) {
if (intr_status & uint_st->sirfsoc_rx_timeout)
sirfsoc_uart_handle_rx_tmo(sirfport);
if (intr_status & uint_st->sirfsoc_rx_done)
@@ -684,7 +685,7 @@ recv_char:
SIRFSOC_UART_IO_RX_MAX_CNT);
}
if (intr_status & uint_st->sirfsoc_txfifo_empty) {
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
+ if (sirfport->tx_dma_chan)
sirfsoc_uart_tx_with_dma(sirfport);
else {
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
@@ -776,7 +777,7 @@ static void sirfsoc_uart_start_rx(struct uart_port *port)
wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
+ if (sirfport->rx_dma_chan)
sirfsoc_uart_start_next_rx_dma(port);
else {
if (!sirfport->is_marco)
@@ -1012,11 +1013,11 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
(sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
SIRFSOC_USP_ASYNC_DIV2_OFFSET);
}
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
+ if (sirfport->tx_dma_chan)
wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
else
wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
+ if (sirfport->rx_dma_chan)
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
else
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
@@ -1047,93 +1048,6 @@ static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
clk_disable_unprepare(sirfport->clk);
}
-static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- dma_cap_mask_t dma_mask;
- struct dma_slave_config tx_slv_cfg = {
- .dst_maxburst = 2,
- };
-
- dma_cap_zero(dma_mask);
- dma_cap_set(DMA_SLAVE, dma_mask);
- sirfport->tx_dma_chan = dma_request_channel(dma_mask,
- (dma_filter_fn)sirfsoc_dma_filter_id,
- (void *)sirfport->tx_dma_no);
- if (!sirfport->tx_dma_chan) {
- dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
- sirfport->tx_dma_no);
- return -EPROBE_DEFER;
- }
- dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
-
- return 0;
-}
-
-static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port *port)
-{
- struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- dma_cap_mask_t dma_mask;
- int ret;
- int i, j;
- struct dma_slave_config slv_cfg = {
- .src_maxburst = 2,
- };
-
- dma_cap_zero(dma_mask);
- dma_cap_set(DMA_SLAVE, dma_mask);
- sirfport->rx_dma_chan = dma_request_channel(dma_mask,
- (dma_filter_fn)sirfsoc_dma_filter_id,
- (void *)sirfport->rx_dma_no);
- if (!sirfport->rx_dma_chan) {
- dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
- sirfport->rx_dma_no);
- ret = -EPROBE_DEFER;
- goto request_err;
- }
- for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
- sirfport->rx_dma_items[i].xmit.buf =
- dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
- &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
- if (!sirfport->rx_dma_items[i].xmit.buf) {
- dev_err(port->dev, "Uart alloc bufa failed\n");
- ret = -ENOMEM;
- goto alloc_coherent_err;
- }
- sirfport->rx_dma_items[i].xmit.head =
- sirfport->rx_dma_items[i].xmit.tail = 0;
- }
- dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
-
- return 0;
-alloc_coherent_err:
- for (j = 0; j < i; j++)
- dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
- sirfport->rx_dma_items[j].xmit.buf,
- sirfport->rx_dma_items[j].dma_addr);
- dma_release_channel(sirfport->rx_dma_chan);
-request_err:
- return ret;
-}
-
-static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port *sirfport)
-{
- dmaengine_terminate_all(sirfport->tx_dma_chan);
- dma_release_channel(sirfport->tx_dma_chan);
-}
-
-static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port *sirfport)
-{
- int i;
- struct uart_port *port = &sirfport->port;
- dmaengine_terminate_all(sirfport->rx_dma_chan);
- dma_release_channel(sirfport->rx_dma_chan);
- for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
- dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
- sirfport->rx_dma_items[i].xmit.buf,
- sirfport->rx_dma_items[i].dma_addr);
-}
-
static int sirfsoc_uart_startup(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
@@ -1172,18 +1086,12 @@ static int sirfsoc_uart_startup(struct uart_port *port)
wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
-
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
- ret = sirfsoc_uart_init_rx_dma(port);
- if (ret)
- goto init_rx_err;
+ if (sirfport->rx_dma_chan)
wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
- SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
- SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
- SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
- }
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
- sirfsoc_uart_init_tx_dma(port);
+ SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
+ SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
+ SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
+ if (sirfport->tx_dma_chan) {
sirfport->tx_dma_state = TX_DMA_IDLE;
wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
@@ -1230,12 +1138,8 @@ static void sirfsoc_uart_shutdown(struct uart_port *port)
gpio_set_value(sirfport->rts_gpio, 1);
free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
}
- if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
- sirfsoc_uart_uninit_rx_dma(sirfport);
- if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
- sirfsoc_uart_uninit_tx_dma(sirfport);
+ if (sirfport->tx_dma_chan)
sirfport->tx_dma_state = TX_DMA_IDLE;
- }
}
static const char *sirfsoc_uart_type(struct uart_port *port)
@@ -1311,8 +1215,8 @@ sirfsoc_uart_console_setup(struct console *co, char *options)
port->cons = co;
/* default console tx/rx transfer using io mode */
- sirfport->rx_dma_no = UNVALID_DMA_CHAN;
- sirfport->tx_dma_no = UNVALID_DMA_CHAN;
+ sirfport->rx_dma_chan = NULL;
+ sirfport->tx_dma_chan = NULL;
return uart_set_options(port, co, baud, parity, bits, flow);
}
@@ -1380,6 +1284,13 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
struct uart_port *port;
struct resource *res;
int ret;
+ int i, j;
+ struct dma_slave_config slv_cfg = {
+ .src_maxburst = 2,
+ };
+ struct dma_slave_config tx_slv_cfg = {
+ .dst_maxburst = 2,
+ };
const struct of_device_id *match;
match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
@@ -1400,27 +1311,10 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
"sirf,uart-has-rtscts");
- if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart")) {
+ if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart"))
sirfport->uart_reg->uart_type = SIRF_REAL_UART;
- if (of_property_read_u32(pdev->dev.of_node,
- "sirf,uart-dma-rx-channel",
- &sirfport->rx_dma_no))
- sirfport->rx_dma_no = UNVALID_DMA_CHAN;
- if (of_property_read_u32(pdev->dev.of_node,
- "sirf,uart-dma-tx-channel",
- &sirfport->tx_dma_no))
- sirfport->tx_dma_no = UNVALID_DMA_CHAN;
- }
if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
sirfport->uart_reg->uart_type = SIRF_USP_UART;
- if (of_property_read_u32(pdev->dev.of_node,
- "sirf,usp-dma-rx-channel",
- &sirfport->rx_dma_no))
- sirfport->rx_dma_no = UNVALID_DMA_CHAN;
- if (of_property_read_u32(pdev->dev.of_node,
- "sirf,usp-dma-tx-channel",
- &sirfport->tx_dma_no))
- sirfport->tx_dma_no = UNVALID_DMA_CHAN;
if (!sirfport->hw_flow_ctrl)
goto usp_no_flow_control;
if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
@@ -1513,8 +1407,32 @@ usp_no_flow_control:
goto port_err;
}
- return 0;
+ sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx");
+ for (i = 0; sirfport->rx_dma_chan && i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
+ sirfport->rx_dma_items[i].xmit.buf =
+ dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
+ &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
+ if (!sirfport->rx_dma_items[i].xmit.buf) {
+ dev_err(port->dev, "Uart alloc bufa failed\n");
+ ret = -ENOMEM;
+ goto alloc_coherent_err;
+ }
+ sirfport->rx_dma_items[i].xmit.head =
+ sirfport->rx_dma_items[i].xmit.tail = 0;
+ }
+ if (sirfport->rx_dma_chan)
+ dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
+ sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx");
+ if (sirfport->tx_dma_chan)
+ dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
+ return 0;
+alloc_coherent_err:
+ for (j = 0; j < i; j++)
+ dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
+ sirfport->rx_dma_items[j].xmit.buf,
+ sirfport->rx_dma_items[j].dma_addr);
+ dma_release_channel(sirfport->rx_dma_chan);
port_err:
clk_put(sirfport->clk);
err:
@@ -1527,6 +1445,19 @@ static int sirfsoc_uart_remove(struct platform_device *pdev)
struct uart_port *port = &sirfport->port;
clk_put(sirfport->clk);
uart_remove_one_port(&sirfsoc_uart_drv, port);
+ if (sirfport->rx_dma_chan) {
+ int i;
+ dmaengine_terminate_all(sirfport->rx_dma_chan);
+ dma_release_channel(sirfport->rx_dma_chan);
+ for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
+ dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
+ sirfport->rx_dma_items[i].xmit.buf,
+ sirfport->rx_dma_items[i].dma_addr);
+ }
+ if (sirfport->tx_dma_chan) {
+ dmaengine_terminate_all(sirfport->tx_dma_chan);
+ dma_release_channel(sirfport->tx_dma_chan);
+ }
return 0;
}
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index b7d679c0881b..8a6eddad2f3c 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -392,9 +392,6 @@ struct sirfsoc_uart_register sirfsoc_uart = {
/* Indicate how many buffers used */
#define SIRFSOC_RX_LOOP_BUF_CNT 2
-/* Indicate if DMA channel valid */
-#define IS_DMA_CHAN_VALID(x) ((x) != -1)
-#define UNVALID_DMA_CHAN -1
/* For Fast Baud Rate Calculation */
struct sirfsoc_baudrate_to_regv {
unsigned int baud_rate;
@@ -423,8 +420,6 @@ struct sirfsoc_uart_port {
/* for SiRFmarco, there are SET/CLR for UART_INT_EN */
bool is_marco;
struct sirfsoc_uart_register *uart_reg;
- int rx_dma_no;
- int tx_dma_no;
struct dma_chan *rx_dma_chan;
struct dma_chan *tx_dma_chan;
dma_addr_t tx_dma_addr;
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index cf86e729532b..dc697cee248a 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -433,13 +433,10 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (port->sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
- } else
- spin_lock(&port->lock);
+ if (port->sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
while (n > 0) {
unsigned long ra = __pa(con_write_page);
@@ -470,8 +467,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
}
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static inline void sunhv_console_putchar(struct uart_port *port, char c)
@@ -492,7 +488,10 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
unsigned long flags;
int i, locked = 1;
- local_irq_save(flags);
+ if (port->sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
@@ -507,8 +506,7 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
}
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static struct console sunhv_console = {
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 380fb5355cb2..5faa8e905e98 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -844,20 +844,16 @@ static void sunsab_console_write(struct console *con, const char *s, unsigned n)
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, n, sunsab_console_putchar);
sunsab_tec_wait(up);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static int sunsab_console_setup(struct console *con, char *options)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index db79b76f5c8e..9a0f24f83720 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1295,13 +1295,10 @@ static void sunsu_console_write(struct console *co, const char *s,
unsigned int ier;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
/*
* First save the UER then disable the interrupts
@@ -1319,8 +1316,7 @@ static void sunsu_console_write(struct console *co, const char *s,
serial_out(up, UART_IER, ier);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
/*
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 45a8c6aa5837..a2c40ed287d2 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1195,20 +1195,16 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count)
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, count, sunzilog_putchar);
udelay(2);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static int __init sunzilog_console_setup(struct console *con, char *options)
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 5ae14b46cce0..d48e040cd8c5 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -7866,6 +7866,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+ memset(&new_line, 0, sizeof(new_line));
switch (flags){
case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 144202eef6fe..53ba8537de8d 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1766,6 +1766,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+ memset(&new_line, 0, sizeof(new_line));
switch (flags){
case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index a4fdce74f883..b0e540137e39 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -67,7 +67,7 @@ static void tty_audit_log(const char *description, int major, int minor,
struct task_struct *tsk = current;
uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
- u32 sessionid = audit_get_sessionid(tsk);
+ unsigned int sessionid = audit_get_sessionid(tsk);
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
if (ab) {
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 765125dff20e..8ebd9f88a6f6 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -351,14 +351,11 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags);
* Takes any pending buffers and transfers their ownership to the
* ldisc side of the queue. It then schedules those characters for
* processing by the line discipline.
- * Note that this function can only be used when the low_latency flag
- * is unset. Otherwise the workqueue won't be flushed.
*/
void tty_schedule_flip(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- WARN_ON(port->low_latency);
buf->tail->commit = buf->tail->used;
schedule_work(&buf->work);
@@ -482,17 +479,15 @@ static void flush_to_ldisc(struct work_struct *work)
*/
void tty_flush_to_ldisc(struct tty_struct *tty)
{
- if (!tty->port->low_latency)
- flush_work(&tty->port->buf.work);
+ flush_work(&tty->port->buf.work);
}
/**
* tty_flip_buffer_push - terminal
* @port: tty port to push
*
- * Queue a push of the terminal flip buffers to the line discipline. This
- * function must not be called from IRQ context if port->low_latency is
- * set.
+ * Queue a push of the terminal flip buffers to the line discipline.
+ * Can be called from IRQ/atomic context.
*
* In the event of the queue being busy for flipping the work will be
* held off and retried later.
@@ -500,14 +495,7 @@ void tty_flush_to_ldisc(struct tty_struct *tty)
void tty_flip_buffer_push(struct tty_port *port)
{
- struct tty_bufhead *buf = &port->buf;
-
- buf->tail->commit = buf->tail->used;
-
- if (port->low_latency)
- flush_to_ldisc(&buf->work);
- else
- schedule_work(&buf->work);
+ tty_schedule_flip(port);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index c74a00ad7add..d3448a90f0f9 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1271,12 +1271,13 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
*
* Locking: None
*/
-static void tty_line_name(struct tty_driver *driver, int index, char *p)
+static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
{
if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
- strcpy(p, driver->name);
+ return sprintf(p, "%s", driver->name);
else
- sprintf(p, "%s%d", driver->name, index + driver->name_base);
+ return sprintf(p, "%s%d", driver->name,
+ index + driver->name_base);
}
/**
@@ -3545,9 +3546,19 @@ static ssize_t show_cons_active(struct device *dev,
if (i >= ARRAY_SIZE(cs))
break;
}
- while (i--)
- count += sprintf(buf + count, "%s%d%c",
- cs[i]->name, cs[i]->index, i ? ' ':'\n');
+ while (i--) {
+ int index = cs[i]->index;
+ struct tty_driver *drv = cs[i]->device(cs[i], &index);
+
+ /* don't resolve tty0 as some programs depend on it */
+ if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR))
+ count += tty_line_name(drv, index, buf + count);
+ else
+ count += sprintf(buf + count, "%s%d",
+ cs[i]->name, cs[i]->index);
+
+ count += sprintf(buf + count, "%c", i ? ' ':'\n');
+ }
console_unlock();
return count;
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index d8a55e87877f..0ffb0cbe2823 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -39,17 +39,10 @@
lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
# define __rel(l, n, i) \
lock_release(&(l)->dep_map, n, i)
-# ifdef CONFIG_PROVE_LOCKING
-# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i)
-# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i)
-# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i)
-# define lockdep_release(l, n, i) __rel(l, n, i)
-# else
-# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
-# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
-# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
-# define lockdep_release(l, n, i) __rel(l, n, i)
-# endif
+#define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
+#define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
+#define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
+#define lockdep_release(l, n, i) __rel(l, n, i)
#else
# define lockdep_acquire(l, s, t, i) do { } while (0)
# define lockdep_acquire_nest(l, s, t, n, i) do { } while (0)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 61b1137d7e56..3ad0b61e35b4 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1164,6 +1164,8 @@ static void csi_J(struct vc_data *vc, int vpar)
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
vc->vc_screenbuf_size >> 1);
set_origin(vc);
+ if (CON_IS_VISIBLE(vc))
+ update_screen(vc);
/* fall through */
case 2: /* erase whole display */
count = vc->vc_cols * vc->vc_rows;
@@ -1588,9 +1590,9 @@ static void restore_cur(struct vc_data *vc)
vc->vc_need_wrap = 0;
}
-enum { ESnormal, ESesc, ESsquare, ESgetpars, ESgotpars, ESfunckey,
+enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey,
EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd,
- ESpalette };
+ ESpalette, ESosc };
/* console_lock is held (except via vc_init()) */
static void reset_terminal(struct vc_data *vc, int do_clear)
@@ -1650,11 +1652,15 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
* Control characters can be used in the _middle_
* of an escape sequence.
*/
+ if (vc->vc_state == ESosc && c>=8 && c<=13) /* ... except for OSC */
+ return;
switch (c) {
case 0:
return;
case 7:
- if (vc->vc_bell_duration)
+ if (vc->vc_state == ESosc)
+ vc->vc_state = ESnormal;
+ else if (vc->vc_bell_duration)
kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
return;
case 8:
@@ -1765,7 +1771,9 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
} else if (c=='R') { /* reset palette */
reset_palette(vc);
vc->vc_state = ESnormal;
- } else
+ } else if (c>='0' && c<='9')
+ vc->vc_state = ESosc;
+ else
vc->vc_state = ESnormal;
return;
case ESpalette:
@@ -1805,9 +1813,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_par[vc->vc_npar] *= 10;
vc->vc_par[vc->vc_npar] += c - '0';
return;
- } else
- vc->vc_state = ESgotpars;
- case ESgotpars:
+ }
vc->vc_state = ESnormal;
switch(c) {
case 'h':
@@ -2021,6 +2027,8 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_translate = set_translate(vc->vc_G1_charset, vc);
vc->vc_state = ESnormal;
return;
+ case ESosc:
+ return;
default:
vc->vc_state = ESnormal;
}
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 2e6b832e004b..e0cad4418085 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -2,10 +2,6 @@
# USB device configuration
#
-# These are unused now, remove them once they are no longer selected
-config USB_ARCH_HAS_OHCI
- bool
-
config USB_OHCI_BIG_ENDIAN_DESC
bool
@@ -17,18 +13,12 @@ config USB_OHCI_LITTLE_ENDIAN
default n if STB03xxx || PPC_MPC52xx
default y
-config USB_ARCH_HAS_EHCI
- bool
-
config USB_EHCI_BIG_ENDIAN_MMIO
bool
config USB_EHCI_BIG_ENDIAN_DESC
bool
-config USB_ARCH_HAS_XHCI
- bool
-
menuconfig USB_SUPPORT
bool "USB support"
depends on HAS_IOMEM
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index 7345d2115af2..480bd4d5710a 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -10,6 +10,7 @@ ci_hdrc-$(CONFIG_USB_CHIPIDEA_DEBUG) += debug.o
# Glue/Bridge layers go here
obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_msm.o
+obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_zevio.o
# PCI doesn't provide stubs, need to check
ifneq ($(CONFIG_PCI),)
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
index a85713165688..83d06c1455b7 100644
--- a/drivers/usb/chipidea/bits.h
+++ b/drivers/usb/chipidea/bits.h
@@ -50,12 +50,14 @@
#define PORTSC_PTC (0x0FUL << 16)
#define PORTSC_PHCD(d) ((d) ? BIT(22) : BIT(23))
/* PTS and PTW for non lpm version only */
+#define PORTSC_PFSC BIT(24)
#define PORTSC_PTS(d) \
(u32)((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0))
#define PORTSC_PTW BIT(28)
#define PORTSC_STS BIT(29)
/* DEVLC */
+#define DEVLC_PFSC BIT(23)
#define DEVLC_PSPD (0x03UL << 25)
#define DEVLC_PSPD_HS (0x02UL << 25)
#define DEVLC_PTW BIT(27)
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 88b80f7728e4..e206406ae1d9 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -196,8 +196,6 @@ struct ci_hdrc {
struct ci_hdrc_platform_data *platdata;
int vbus_active;
- /* FIXME: some day, we'll not use global phy */
- bool global_phy;
struct usb_phy *transceiver;
struct usb_hcd *hcd;
struct dentry *debugfs;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index c00f77257d36..2e58f8dfd311 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -96,7 +96,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
{
struct ci_hdrc_imx_data *data;
struct ci_hdrc_platform_data pdata = {
- .name = "ci_hdrc_imx",
+ .name = dev_name(&pdev->dev),
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_REQUIRE_TRANSCEIVER |
CI_HDRC_DISABLE_STREAMING,
diff --git a/drivers/usb/chipidea/ci_hdrc_zevio.c b/drivers/usb/chipidea/ci_hdrc_zevio.c
new file mode 100644
index 000000000000..3bf6489ef5ec
--- /dev/null
+++ b/drivers/usb/chipidea/ci_hdrc_zevio.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Based off drivers/usb/chipidea/ci_hdrc_msm.c
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/chipidea.h>
+
+#include "ci.h"
+
+static struct ci_hdrc_platform_data ci_hdrc_zevio_platdata = {
+ .name = "ci_hdrc_zevio",
+ .flags = CI_HDRC_REGS_SHARED,
+ .capoffset = DEF_CAPOFFSET,
+};
+
+static int ci_hdrc_zevio_probe(struct platform_device *pdev)
+{
+ struct platform_device *ci_pdev;
+
+ dev_dbg(&pdev->dev, "ci_hdrc_zevio_probe\n");
+
+ ci_pdev = ci_hdrc_add_device(&pdev->dev,
+ pdev->resource, pdev->num_resources,
+ &ci_hdrc_zevio_platdata);
+
+ if (IS_ERR(ci_pdev)) {
+ dev_err(&pdev->dev, "ci_hdrc_add_device failed!\n");
+ return PTR_ERR(ci_pdev);
+ }
+
+ platform_set_drvdata(pdev, ci_pdev);
+
+ return 0;
+}
+
+static int ci_hdrc_zevio_remove(struct platform_device *pdev)
+{
+ struct platform_device *ci_pdev = platform_get_drvdata(pdev);
+
+ ci_hdrc_remove_device(ci_pdev);
+
+ return 0;
+}
+
+static const struct of_device_id ci_hdrc_zevio_dt_ids[] = {
+ { .compatible = "lsi,zevio-usb", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver ci_hdrc_zevio_driver = {
+ .probe = ci_hdrc_zevio_probe,
+ .remove = ci_hdrc_zevio_remove,
+ .driver = {
+ .name = "zevio_usb",
+ .owner = THIS_MODULE,
+ .of_match_table = ci_hdrc_zevio_dt_ids,
+ },
+};
+
+MODULE_DEVICE_TABLE(of, ci_hdrc_zevio_dt_ids);
+module_platform_driver(ci_hdrc_zevio_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 33f22bc6ad7f..ca6831c5b763 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -64,6 +64,7 @@
#include <linux/usb/otg.h>
#include <linux/usb/chipidea.h>
#include <linux/usb/of.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/regulator/consumer.h>
@@ -298,6 +299,13 @@ int hw_device_reset(struct ci_hdrc *ci, u32 mode)
if (ci->platdata->flags & CI_HDRC_DISABLE_STREAMING)
hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
+ if (ci->platdata->flags & CI_HDRC_FORCE_FULLSPEED) {
+ if (ci->hw_bank.lpm)
+ hw_write(ci, OP_DEVLC, DEVLC_PFSC, DEVLC_PFSC);
+ else
+ hw_write(ci, OP_PORTSC, PORTSC_PFSC, PORTSC_PFSC);
+ }
+
/* USBMODE should be configured step by step */
hw_write(ci, OP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
hw_write(ci, OP_USBMODE, USBMODE_CM, mode);
@@ -412,6 +420,9 @@ static int ci_get_platdata(struct device *dev,
}
}
+ if (of_usb_get_maximum_speed(dev->of_node) == USB_SPEED_FULL)
+ platdata->flags |= CI_HDRC_FORCE_FULLSPEED;
+
return 0;
}
@@ -496,33 +507,6 @@ static void ci_get_otg_capable(struct ci_hdrc *ci)
}
}
-static int ci_usb_phy_init(struct ci_hdrc *ci)
-{
- if (ci->platdata->phy) {
- ci->transceiver = ci->platdata->phy;
- return usb_phy_init(ci->transceiver);
- } else {
- ci->global_phy = true;
- ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
- if (IS_ERR(ci->transceiver))
- ci->transceiver = NULL;
-
- return 0;
- }
-}
-
-static void ci_usb_phy_destroy(struct ci_hdrc *ci)
-{
- if (!ci->transceiver)
- return;
-
- otg_set_peripheral(ci->transceiver->otg, NULL);
- if (ci->global_phy)
- usb_put_phy(ci->transceiver);
- else
- usb_phy_shutdown(ci->transceiver);
-}
-
static int ci_hdrc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -532,7 +516,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
int ret;
enum usb_dr_mode dr_mode;
- if (!dev->platform_data) {
+ if (!dev_get_platdata(dev)) {
dev_err(dev, "platform data missing\n");
return -ENODEV;
}
@@ -549,7 +533,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
}
ci->dev = dev;
- ci->platdata = dev->platform_data;
+ ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
CI_HDRC_IMX28_WRITE_FIX);
@@ -561,7 +545,26 @@ static int ci_hdrc_probe(struct platform_device *pdev)
hw_phymode_configure(ci);
- ret = ci_usb_phy_init(ci);
+ if (ci->platdata->phy)
+ ci->transceiver = ci->platdata->phy;
+ else
+ ci->transceiver = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+
+ if (IS_ERR(ci->transceiver)) {
+ ret = PTR_ERR(ci->transceiver);
+ /*
+ * if -ENXIO is returned, it means PHY layer wasn't
+ * enabled, so it makes no sense to return -EPROBE_DEFER
+ * in that case, since no PHY driver will ever probe.
+ */
+ if (ret == -ENXIO)
+ return ret;
+
+ dev_err(dev, "no usb2 phy configured\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = usb_phy_init(ci->transceiver);
if (ret) {
dev_err(dev, "unable to init phy: %d\n", ret);
return ret;
@@ -572,8 +575,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ci->irq = platform_get_irq(pdev, 0);
if (ci->irq < 0) {
dev_err(dev, "missing IRQ\n");
- ret = -ENODEV;
- goto destroy_phy;
+ ret = ci->irq;
+ goto deinit_phy;
}
ci_get_otg_capable(ci);
@@ -590,23 +593,12 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ret = ci_hdrc_gadget_init(ci);
if (ret)
dev_info(dev, "doesn't support gadget\n");
- if (!ret && ci->transceiver) {
- ret = otg_set_peripheral(ci->transceiver->otg,
- &ci->gadget);
- /*
- * If we implement all USB functions using chipidea drivers,
- * it doesn't need to call above API, meanwhile, if we only
- * use gadget function, calling above API is useless.
- */
- if (ret && ret != -ENOTSUPP)
- goto destroy_phy;
- }
}
if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
dev_err(dev, "no supported roles\n");
ret = -ENODEV;
- goto destroy_phy;
+ goto deinit_phy;
}
if (ci->is_otg) {
@@ -663,8 +655,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
free_irq(ci->irq, ci);
stop:
ci_role_destroy(ci);
-destroy_phy:
- ci_usb_phy_destroy(ci);
+deinit_phy:
+ usb_phy_shutdown(ci->transceiver);
return ret;
}
@@ -677,7 +669,8 @@ static int ci_hdrc_remove(struct platform_device *pdev)
free_irq(ci->irq, ci);
ci_role_destroy(ci);
ci_hdrc_enter_lpm(ci, true);
- ci_usb_phy_destroy(ci);
+ usb_phy_shutdown(ci->transceiver);
+ kfree(ci->hw_bank.regmap);
return 0;
}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 80de2f88ed2c..7739c64ef259 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -105,7 +105,7 @@ static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
do {
/* flush any pending transfer */
- hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n));
+ hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
cpu_relax();
} while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
@@ -178,19 +178,6 @@ static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
}
/**
- * hw_test_and_clear_setup_status: test & clear setup status (execute without
- * interruption)
- * @n: endpoint number
- *
- * This function returns setup status
- */
-static int hw_test_and_clear_setup_status(struct ci_hdrc *ci, int n)
-{
- n = ep_to_bit(ci, n);
- return hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(n));
-}
-
-/**
* hw_ep_prime: primes endpoint (execute without interruption)
* @num: endpoint number
* @dir: endpoint direction
@@ -205,7 +192,7 @@ static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
return -EAGAIN;
- hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n));
+ hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
cpu_relax();
@@ -962,6 +949,156 @@ __acquires(hwep->lock)
}
/**
+ * isr_setup_packet_handler: setup packet handler
+ * @ci: UDC descriptor
+ *
+ * This function handles setup packet
+ */
+static void isr_setup_packet_handler(struct ci_hdrc *ci)
+__releases(ci->lock)
+__acquires(ci->lock)
+{
+ struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
+ struct usb_ctrlrequest req;
+ int type, num, dir, err = -EINVAL;
+ u8 tmode = 0;
+
+ /*
+ * Flush data and handshake transactions of previous
+ * setup packet.
+ */
+ _ep_nuke(ci->ep0out);
+ _ep_nuke(ci->ep0in);
+
+ /* read_setup_packet */
+ do {
+ hw_test_and_set_setup_guard(ci);
+ memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
+ } while (!hw_test_and_clear_setup_guard(ci));
+
+ type = req.bRequestType;
+
+ ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
+
+ switch (req.bRequest) {
+ case USB_REQ_CLEAR_FEATURE:
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ dir = num & USB_ENDPOINT_DIR_MASK;
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (dir) /* TX */
+ num += ci->hw_ep_max / 2;
+ if (!ci->ci_hw_ep[num].wedge) {
+ spin_unlock(&ci->lock);
+ err = usb_ep_clear_halt(
+ &ci->ci_hw_ep[num].ep);
+ spin_lock(&ci->lock);
+ if (err)
+ break;
+ }
+ err = isr_setup_status_phase(ci);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
+ le16_to_cpu(req.wValue) ==
+ USB_DEVICE_REMOTE_WAKEUP) {
+ if (req.wLength != 0)
+ break;
+ ci->remote_wakeup = 0;
+ err = isr_setup_status_phase(ci);
+ } else {
+ goto delegate;
+ }
+ break;
+ case USB_REQ_GET_STATUS:
+ if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
+ type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
+ type != (USB_DIR_IN|USB_RECIP_INTERFACE))
+ goto delegate;
+ if (le16_to_cpu(req.wLength) != 2 ||
+ le16_to_cpu(req.wValue) != 0)
+ break;
+ err = isr_get_status_response(ci, &req);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
+ goto delegate;
+ if (le16_to_cpu(req.wLength) != 0 ||
+ le16_to_cpu(req.wIndex) != 0)
+ break;
+ ci->address = (u8)le16_to_cpu(req.wValue);
+ ci->setaddr = true;
+ err = isr_setup_status_phase(ci);
+ break;
+ case USB_REQ_SET_FEATURE:
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ dir = num & USB_ENDPOINT_DIR_MASK;
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (dir) /* TX */
+ num += ci->hw_ep_max / 2;
+
+ spin_unlock(&ci->lock);
+ err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
+ spin_lock(&ci->lock);
+ if (!err)
+ isr_setup_status_phase(ci);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
+ if (req.wLength != 0)
+ break;
+ switch (le16_to_cpu(req.wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ ci->remote_wakeup = 1;
+ err = isr_setup_status_phase(ci);
+ break;
+ case USB_DEVICE_TEST_MODE:
+ tmode = le16_to_cpu(req.wIndex) >> 8;
+ switch (tmode) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ ci->test_mode = tmode;
+ err = isr_setup_status_phase(
+ ci);
+ break;
+ default:
+ break;
+ }
+ default:
+ goto delegate;
+ }
+ } else {
+ goto delegate;
+ }
+ break;
+ default:
+delegate:
+ if (req.wLength == 0) /* no data phase */
+ ci->ep0_dir = TX;
+
+ spin_unlock(&ci->lock);
+ err = ci->driver->setup(&ci->gadget, &req);
+ spin_lock(&ci->lock);
+ break;
+ }
+
+ if (err < 0) {
+ spin_unlock(&ci->lock);
+ if (usb_ep_set_halt(&hwep->ep))
+ dev_err(ci->dev, "error: ep_set_halt\n");
+ spin_lock(&ci->lock);
+ }
+}
+
+/**
* isr_tr_complete_handler: transaction complete interrupt handler
* @ci: UDC descriptor
*
@@ -972,12 +1109,10 @@ __releases(ci->lock)
__acquires(ci->lock)
{
unsigned i;
- u8 tmode = 0;
+ int err;
for (i = 0; i < ci->hw_ep_max; i++) {
struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
- int type, num, dir, err = -EINVAL;
- struct usb_ctrlrequest req;
if (hwep->ep.desc == NULL)
continue; /* not configured */
@@ -997,148 +1132,10 @@ __acquires(ci->lock)
}
}
- if (hwep->type != USB_ENDPOINT_XFER_CONTROL ||
- !hw_test_and_clear_setup_status(ci, i))
- continue;
-
- if (i != 0) {
- dev_warn(ci->dev, "ctrl traffic at endpoint %d\n", i);
- continue;
- }
-
- /*
- * Flush data and handshake transactions of previous
- * setup packet.
- */
- _ep_nuke(ci->ep0out);
- _ep_nuke(ci->ep0in);
-
- /* read_setup_packet */
- do {
- hw_test_and_set_setup_guard(ci);
- memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
- } while (!hw_test_and_clear_setup_guard(ci));
-
- type = req.bRequestType;
-
- ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
-
- switch (req.bRequest) {
- case USB_REQ_CLEAR_FEATURE:
- if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
- le16_to_cpu(req.wValue) ==
- USB_ENDPOINT_HALT) {
- if (req.wLength != 0)
- break;
- num = le16_to_cpu(req.wIndex);
- dir = num & USB_ENDPOINT_DIR_MASK;
- num &= USB_ENDPOINT_NUMBER_MASK;
- if (dir) /* TX */
- num += ci->hw_ep_max/2;
- if (!ci->ci_hw_ep[num].wedge) {
- spin_unlock(&ci->lock);
- err = usb_ep_clear_halt(
- &ci->ci_hw_ep[num].ep);
- spin_lock(&ci->lock);
- if (err)
- break;
- }
- err = isr_setup_status_phase(ci);
- } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
- le16_to_cpu(req.wValue) ==
- USB_DEVICE_REMOTE_WAKEUP) {
- if (req.wLength != 0)
- break;
- ci->remote_wakeup = 0;
- err = isr_setup_status_phase(ci);
- } else {
- goto delegate;
- }
- break;
- case USB_REQ_GET_STATUS:
- if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
- type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
- type != (USB_DIR_IN|USB_RECIP_INTERFACE))
- goto delegate;
- if (le16_to_cpu(req.wLength) != 2 ||
- le16_to_cpu(req.wValue) != 0)
- break;
- err = isr_get_status_response(ci, &req);
- break;
- case USB_REQ_SET_ADDRESS:
- if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
- goto delegate;
- if (le16_to_cpu(req.wLength) != 0 ||
- le16_to_cpu(req.wIndex) != 0)
- break;
- ci->address = (u8)le16_to_cpu(req.wValue);
- ci->setaddr = true;
- err = isr_setup_status_phase(ci);
- break;
- case USB_REQ_SET_FEATURE:
- if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
- le16_to_cpu(req.wValue) ==
- USB_ENDPOINT_HALT) {
- if (req.wLength != 0)
- break;
- num = le16_to_cpu(req.wIndex);
- dir = num & USB_ENDPOINT_DIR_MASK;
- num &= USB_ENDPOINT_NUMBER_MASK;
- if (dir) /* TX */
- num += ci->hw_ep_max/2;
-
- spin_unlock(&ci->lock);
- err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
- spin_lock(&ci->lock);
- if (!err)
- isr_setup_status_phase(ci);
- } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
- if (req.wLength != 0)
- break;
- switch (le16_to_cpu(req.wValue)) {
- case USB_DEVICE_REMOTE_WAKEUP:
- ci->remote_wakeup = 1;
- err = isr_setup_status_phase(ci);
- break;
- case USB_DEVICE_TEST_MODE:
- tmode = le16_to_cpu(req.wIndex) >> 8;
- switch (tmode) {
- case TEST_J:
- case TEST_K:
- case TEST_SE0_NAK:
- case TEST_PACKET:
- case TEST_FORCE_EN:
- ci->test_mode = tmode;
- err = isr_setup_status_phase(
- ci);
- break;
- default:
- break;
- }
- default:
- goto delegate;
- }
- } else {
- goto delegate;
- }
- break;
- default:
-delegate:
- if (req.wLength == 0) /* no data phase */
- ci->ep0_dir = TX;
-
- spin_unlock(&ci->lock);
- err = ci->driver->setup(&ci->gadget, &req);
- spin_lock(&ci->lock);
- break;
- }
-
- if (err < 0) {
- spin_unlock(&ci->lock);
- if (usb_ep_set_halt(&hwep->ep))
- dev_err(ci->dev, "error: ep_set_halt\n");
- spin_lock(&ci->lock);
- }
+ /* Only handle setup packet below */
+ if (i == 0 &&
+ hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(0)))
+ isr_setup_packet_handler(ci);
}
}
@@ -1193,6 +1190,11 @@ static int ep_enable(struct usb_ep *ep,
hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */
+ if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
+ dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
+ retval = -EINVAL;
+ }
+
/*
* Enable endpoints in the HW other than ep0 as ep0
* is always enabled
@@ -1837,12 +1839,6 @@ void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
dma_pool_destroy(ci->td_pool);
dma_pool_destroy(ci->qh_pool);
-
- if (ci->transceiver) {
- otg_set_peripheral(ci->transceiver->otg, NULL);
- if (ci->global_phy)
- usb_put_phy(ci->transceiver);
- }
}
static int udc_id_switch_for_device(struct ci_hdrc *ci)
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index fed7f68d025d..cb8e99156f5a 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -10,7 +10,6 @@ config USB_DEBUG
config USB_ANNOUNCE_NEW_DEVICES
bool "USB announce new devices"
- default N
help
Say Y here if you want the USB core to always announce the
idVendor, idProduct, Manufacturer, Product, and SerialNumber
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 8d72f0c65937..1ab4df1de2da 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -10,7 +10,6 @@
#define USB_MAXALTSETTING 128 /* Hard limit */
-#define USB_MAXENDPOINTS 30 /* Hard limit */
#define USB_MAXCONFIG 8 /* Arbitrary limit */
@@ -717,6 +716,10 @@ int usb_get_configuration(struct usb_device *dev)
result = -ENOMEM;
goto err;
}
+
+ if (dev->quirks & USB_QUIRK_DELAY_INIT)
+ msleep(100);
+
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
bigbuffer, length);
if (result < 0) {
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 90e18f6fa2bb..257876ea03a1 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -62,7 +62,7 @@
/* Mutual exclusion for removal, open, and release */
DEFINE_MUTEX(usbfs_mutex);
-struct dev_state {
+struct usb_dev_state {
struct list_head list; /* state list */
struct usb_device *dev;
struct file *file;
@@ -81,7 +81,7 @@ struct dev_state {
struct async {
struct list_head asynclist;
- struct dev_state *ps;
+ struct usb_dev_state *ps;
struct pid *pid;
const struct cred *cred;
unsigned int signr;
@@ -151,7 +151,7 @@ static void usbfs_decrease_memory_usage(unsigned amount)
atomic_sub(amount, &usbfs_memory_usage);
}
-static int connected(struct dev_state *ps)
+static int connected(struct usb_dev_state *ps)
{
return (!list_empty(&ps->list) &&
ps->dev->state != USB_STATE_NOTATTACHED);
@@ -184,7 +184,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
- struct dev_state *ps = file->private_data;
+ struct usb_dev_state *ps = file->private_data;
struct usb_device *dev = ps->dev;
ssize_t ret = 0;
unsigned len;
@@ -307,7 +307,7 @@ static void free_async(struct async *as)
static void async_newpending(struct async *as)
{
- struct dev_state *ps = as->ps;
+ struct usb_dev_state *ps = as->ps;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
@@ -317,7 +317,7 @@ static void async_newpending(struct async *as)
static void async_removepending(struct async *as)
{
- struct dev_state *ps = as->ps;
+ struct usb_dev_state *ps = as->ps;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
@@ -325,7 +325,7 @@ static void async_removepending(struct async *as)
spin_unlock_irqrestore(&ps->lock, flags);
}
-static struct async *async_getcompleted(struct dev_state *ps)
+static struct async *async_getcompleted(struct usb_dev_state *ps)
{
unsigned long flags;
struct async *as = NULL;
@@ -340,7 +340,7 @@ static struct async *async_getcompleted(struct dev_state *ps)
return as;
}
-static struct async *async_getpending(struct dev_state *ps,
+static struct async *async_getpending(struct usb_dev_state *ps,
void __user *userurb)
{
struct async *as;
@@ -448,7 +448,7 @@ static int copy_urb_data_to_user(u8 __user *userbuffer, struct urb *urb)
#define AS_CONTINUATION 1
#define AS_UNLINK 2
-static void cancel_bulk_urbs(struct dev_state *ps, unsigned bulk_addr)
+static void cancel_bulk_urbs(struct usb_dev_state *ps, unsigned bulk_addr)
__releases(ps->lock)
__acquires(ps->lock)
{
@@ -489,7 +489,7 @@ __acquires(ps->lock)
static void async_completed(struct urb *urb)
{
struct async *as = urb->context;
- struct dev_state *ps = as->ps;
+ struct usb_dev_state *ps = as->ps;
struct siginfo sinfo;
struct pid *pid = NULL;
u32 secid = 0;
@@ -529,7 +529,7 @@ static void async_completed(struct urb *urb)
wake_up(&ps->wait);
}
-static void destroy_async(struct dev_state *ps, struct list_head *list)
+static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
{
struct urb *urb;
struct async *as;
@@ -551,7 +551,7 @@ static void destroy_async(struct dev_state *ps, struct list_head *list)
spin_unlock_irqrestore(&ps->lock, flags);
}
-static void destroy_async_on_interface(struct dev_state *ps,
+static void destroy_async_on_interface(struct usb_dev_state *ps,
unsigned int ifnum)
{
struct list_head *p, *q, hitlist;
@@ -566,7 +566,7 @@ static void destroy_async_on_interface(struct dev_state *ps,
destroy_async(ps, &hitlist);
}
-static void destroy_all_async(struct dev_state *ps)
+static void destroy_all_async(struct usb_dev_state *ps)
{
destroy_async(ps, &ps->async_pending);
}
@@ -585,7 +585,7 @@ static int driver_probe(struct usb_interface *intf,
static void driver_disconnect(struct usb_interface *intf)
{
- struct dev_state *ps = usb_get_intfdata(intf);
+ struct usb_dev_state *ps = usb_get_intfdata(intf);
unsigned int ifnum = intf->altsetting->desc.bInterfaceNumber;
if (!ps)
@@ -628,7 +628,7 @@ struct usb_driver usbfs_driver = {
.resume = driver_resume,
};
-static int claimintf(struct dev_state *ps, unsigned int ifnum)
+static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
{
struct usb_device *dev = ps->dev;
struct usb_interface *intf;
@@ -650,7 +650,7 @@ static int claimintf(struct dev_state *ps, unsigned int ifnum)
return err;
}
-static int releaseintf(struct dev_state *ps, unsigned int ifnum)
+static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum)
{
struct usb_device *dev;
struct usb_interface *intf;
@@ -670,7 +670,7 @@ static int releaseintf(struct dev_state *ps, unsigned int ifnum)
return err;
}
-static int checkintf(struct dev_state *ps, unsigned int ifnum)
+static int checkintf(struct usb_dev_state *ps, unsigned int ifnum)
{
if (ps->dev->state != USB_STATE_CONFIGURED)
return -EHOSTUNREACH;
@@ -710,7 +710,7 @@ static int findintfep(struct usb_device *dev, unsigned int ep)
return -ENOENT;
}
-static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
+static int check_ctrlrecip(struct usb_dev_state *ps, unsigned int requesttype,
unsigned int request, unsigned int index)
{
int ret = 0;
@@ -769,6 +769,88 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
return ret;
}
+static struct usb_host_endpoint *ep_to_host_endpoint(struct usb_device *dev,
+ unsigned char ep)
+{
+ if (ep & USB_ENDPOINT_DIR_MASK)
+ return dev->ep_in[ep & USB_ENDPOINT_NUMBER_MASK];
+ else
+ return dev->ep_out[ep & USB_ENDPOINT_NUMBER_MASK];
+}
+
+static int parse_usbdevfs_streams(struct usb_dev_state *ps,
+ struct usbdevfs_streams __user *streams,
+ unsigned int *num_streams_ret,
+ unsigned int *num_eps_ret,
+ struct usb_host_endpoint ***eps_ret,
+ struct usb_interface **intf_ret)
+{
+ unsigned int i, num_streams, num_eps;
+ struct usb_host_endpoint **eps;
+ struct usb_interface *intf = NULL;
+ unsigned char ep;
+ int ifnum, ret;
+
+ if (get_user(num_streams, &streams->num_streams) ||
+ get_user(num_eps, &streams->num_eps))
+ return -EFAULT;
+
+ if (num_eps < 1 || num_eps > USB_MAXENDPOINTS)
+ return -EINVAL;
+
+ /* The XHCI controller allows max 2 ^ 16 streams */
+ if (num_streams_ret && (num_streams < 2 || num_streams > 65536))
+ return -EINVAL;
+
+ eps = kmalloc(num_eps * sizeof(*eps), GFP_KERNEL);
+ if (!eps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_eps; i++) {
+ if (get_user(ep, &streams->eps[i])) {
+ ret = -EFAULT;
+ goto error;
+ }
+ eps[i] = ep_to_host_endpoint(ps->dev, ep);
+ if (!eps[i]) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* usb_alloc/free_streams operate on an usb_interface */
+ ifnum = findintfep(ps->dev, ep);
+ if (ifnum < 0) {
+ ret = ifnum;
+ goto error;
+ }
+
+ if (i == 0) {
+ ret = checkintf(ps, ifnum);
+ if (ret < 0)
+ goto error;
+ intf = usb_ifnum_to_if(ps->dev, ifnum);
+ } else {
+ /* Verify all eps belong to the same interface */
+ if (ifnum != intf->altsetting->desc.bInterfaceNumber) {
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ }
+
+ if (num_streams_ret)
+ *num_streams_ret = num_streams;
+ *num_eps_ret = num_eps;
+ *eps_ret = eps;
+ *intf_ret = intf;
+
+ return 0;
+
+error:
+ kfree(eps);
+ return ret;
+}
+
static int match_devt(struct device *dev, void *data)
{
return dev->devt == (dev_t) (unsigned long) data;
@@ -791,11 +873,11 @@ static struct usb_device *usbdev_lookup_by_devt(dev_t devt)
static int usbdev_open(struct inode *inode, struct file *file)
{
struct usb_device *dev = NULL;
- struct dev_state *ps;
+ struct usb_dev_state *ps;
int ret;
ret = -ENOMEM;
- ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL);
+ ps = kmalloc(sizeof(struct usb_dev_state), GFP_KERNEL);
if (!ps)
goto out_free_ps;
@@ -852,7 +934,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
static int usbdev_release(struct inode *inode, struct file *file)
{
- struct dev_state *ps = file->private_data;
+ struct usb_dev_state *ps = file->private_data;
struct usb_device *dev = ps->dev;
unsigned int ifnum;
struct async *as;
@@ -883,7 +965,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
return 0;
}
-static int proc_control(struct dev_state *ps, void __user *arg)
+static int proc_control(struct usb_dev_state *ps, void __user *arg)
{
struct usb_device *dev = ps->dev;
struct usbdevfs_ctrltransfer ctrl;
@@ -970,7 +1052,7 @@ static int proc_control(struct dev_state *ps, void __user *arg)
return ret;
}
-static int proc_bulk(struct dev_state *ps, void __user *arg)
+static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
{
struct usb_device *dev = ps->dev;
struct usbdevfs_bulktransfer bulk;
@@ -1043,7 +1125,21 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
return ret;
}
-static int proc_resetep(struct dev_state *ps, void __user *arg)
+static void check_reset_of_active_ep(struct usb_device *udev,
+ unsigned int epnum, char *ioctl_name)
+{
+ struct usb_host_endpoint **eps;
+ struct usb_host_endpoint *ep;
+
+ eps = (epnum & USB_DIR_IN) ? udev->ep_in : udev->ep_out;
+ ep = eps[epnum & 0x0f];
+ if (ep && !list_empty(&ep->urb_list))
+ dev_warn(&udev->dev, "Process %d (%s) called USBDEVFS_%s for active endpoint 0x%02x\n",
+ task_pid_nr(current), current->comm,
+ ioctl_name, epnum);
+}
+
+static int proc_resetep(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ep;
int ret;
@@ -1056,11 +1152,12 @@ static int proc_resetep(struct dev_state *ps, void __user *arg)
ret = checkintf(ps, ret);
if (ret)
return ret;
+ check_reset_of_active_ep(ps->dev, ep, "RESETEP");
usb_reset_endpoint(ps->dev, ep);
return 0;
}
-static int proc_clearhalt(struct dev_state *ps, void __user *arg)
+static int proc_clearhalt(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ep;
int pipe;
@@ -1074,6 +1171,7 @@ static int proc_clearhalt(struct dev_state *ps, void __user *arg)
ret = checkintf(ps, ret);
if (ret)
return ret;
+ check_reset_of_active_ep(ps->dev, ep, "CLEAR_HALT");
if (ep & USB_DIR_IN)
pipe = usb_rcvbulkpipe(ps->dev, ep & 0x7f);
else
@@ -1082,7 +1180,7 @@ static int proc_clearhalt(struct dev_state *ps, void __user *arg)
return usb_clear_halt(ps->dev, pipe);
}
-static int proc_getdriver(struct dev_state *ps, void __user *arg)
+static int proc_getdriver(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_getdriver gd;
struct usb_interface *intf;
@@ -1101,7 +1199,7 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
return ret;
}
-static int proc_connectinfo(struct dev_state *ps, void __user *arg)
+static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_connectinfo ci = {
.devnum = ps->dev->devnum,
@@ -1113,12 +1211,12 @@ static int proc_connectinfo(struct dev_state *ps, void __user *arg)
return 0;
}
-static int proc_resetdevice(struct dev_state *ps)
+static int proc_resetdevice(struct usb_dev_state *ps)
{
return usb_reset_device(ps->dev);
}
-static int proc_setintf(struct dev_state *ps, void __user *arg)
+static int proc_setintf(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_setinterface setintf;
int ret;
@@ -1127,11 +1225,14 @@ static int proc_setintf(struct dev_state *ps, void __user *arg)
return -EFAULT;
if ((ret = checkintf(ps, setintf.interface)))
return ret;
+
+ destroy_async_on_interface(ps, setintf.interface);
+
return usb_set_interface(ps->dev, setintf.interface,
setintf.altsetting);
}
-static int proc_setconfig(struct dev_state *ps, void __user *arg)
+static int proc_setconfig(struct usb_dev_state *ps, void __user *arg)
{
int u;
int status = 0;
@@ -1179,7 +1280,7 @@ static int proc_setconfig(struct dev_state *ps, void __user *arg)
return status;
}
-static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
+static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb,
struct usbdevfs_iso_packet_desc __user *iso_frame_desc,
void __user *arg)
{
@@ -1189,6 +1290,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
int i, ret, is_in, num_sgs = 0, ifnum = -1;
+ int number_of_packets = 0;
+ unsigned int stream_id = 0;
void *buf;
if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
@@ -1209,15 +1312,10 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (ret)
return ret;
}
- if ((uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0) {
- is_in = 1;
- ep = ps->dev->ep_in[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
- } else {
- is_in = 0;
- ep = ps->dev->ep_out[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
- }
+ ep = ep_to_host_endpoint(ps->dev, uurb->endpoint);
if (!ep)
return -ENOENT;
+ is_in = (uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0;
u = 0;
switch(uurb->type) {
@@ -1242,7 +1340,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
le16_to_cpup(&dr->wIndex));
if (ret)
goto error;
- uurb->number_of_packets = 0;
uurb->buffer_length = le16_to_cpup(&dr->wLength);
uurb->buffer += 8;
if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) {
@@ -1272,17 +1369,17 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
goto interrupt_urb;
}
- uurb->number_of_packets = 0;
num_sgs = DIV_ROUND_UP(uurb->buffer_length, USB_SG_SIZE);
if (num_sgs == 1 || num_sgs > ps->dev->bus->sg_tablesize)
num_sgs = 0;
+ if (ep->streams)
+ stream_id = uurb->stream_id;
break;
case USBDEVFS_URB_TYPE_INTERRUPT:
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
- uurb->number_of_packets = 0;
break;
case USBDEVFS_URB_TYPE_ISO:
@@ -1292,15 +1389,16 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EINVAL;
if (!usb_endpoint_xfer_isoc(&ep->desc))
return -EINVAL;
+ number_of_packets = uurb->number_of_packets;
isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) *
- uurb->number_of_packets;
+ number_of_packets;
if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
ret = -EFAULT;
goto error;
}
- for (totlen = u = 0; u < uurb->number_of_packets; u++) {
+ for (totlen = u = 0; u < number_of_packets; u++) {
/*
* arbitrary limit need for USB 3.0
* bMaxBurst (0~15 allowed, 1~16 packets)
@@ -1331,7 +1429,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
ret = -EFAULT;
goto error;
}
- as = alloc_async(uurb->number_of_packets);
+ as = alloc_async(number_of_packets);
if (!as) {
ret = -ENOMEM;
goto error;
@@ -1425,7 +1523,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->urb->setup_packet = (unsigned char *)dr;
dr = NULL;
as->urb->start_frame = uurb->start_frame;
- as->urb->number_of_packets = uurb->number_of_packets;
+ as->urb->number_of_packets = number_of_packets;
+ as->urb->stream_id = stream_id;
if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
ps->dev->speed == USB_SPEED_HIGH)
as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
@@ -1433,7 +1532,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->urb->interval = ep->desc.bInterval;
as->urb->context = as;
as->urb->complete = async_completed;
- for (totlen = u = 0; u < uurb->number_of_packets; u++) {
+ for (totlen = u = 0; u < number_of_packets; u++) {
as->urb->iso_frame_desc[u].offset = totlen;
as->urb->iso_frame_desc[u].length = isopkt[u].length;
totlen += isopkt[u].length;
@@ -1508,7 +1607,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return ret;
}
-static int proc_submiturb(struct dev_state *ps, void __user *arg)
+static int proc_submiturb(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
@@ -1520,7 +1619,7 @@ static int proc_submiturb(struct dev_state *ps, void __user *arg)
arg);
}
-static int proc_unlinkurb(struct dev_state *ps, void __user *arg)
+static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg)
{
struct urb *urb;
struct async *as;
@@ -1580,7 +1679,7 @@ err_out:
return -EFAULT;
}
-static struct async *reap_as(struct dev_state *ps)
+static struct async *reap_as(struct usb_dev_state *ps)
{
DECLARE_WAITQUEUE(wait, current);
struct async *as = NULL;
@@ -1603,7 +1702,7 @@ static struct async *reap_as(struct dev_state *ps)
return as;
}
-static int proc_reapurb(struct dev_state *ps, void __user *arg)
+static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
if (as) {
@@ -1616,7 +1715,7 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
return -EIO;
}
-static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
+static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
{
int retval;
struct async *as;
@@ -1631,7 +1730,7 @@ static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
}
#ifdef CONFIG_COMPAT
-static int proc_control_compat(struct dev_state *ps,
+static int proc_control_compat(struct usb_dev_state *ps,
struct usbdevfs_ctrltransfer32 __user *p32)
{
struct usbdevfs_ctrltransfer __user *p;
@@ -1644,7 +1743,7 @@ static int proc_control_compat(struct dev_state *ps,
return proc_control(ps, p);
}
-static int proc_bulk_compat(struct dev_state *ps,
+static int proc_bulk_compat(struct usb_dev_state *ps,
struct usbdevfs_bulktransfer32 __user *p32)
{
struct usbdevfs_bulktransfer __user *p;
@@ -1661,7 +1760,7 @@ static int proc_bulk_compat(struct dev_state *ps,
return proc_bulk(ps, p);
}
-static int proc_disconnectsignal_compat(struct dev_state *ps, void __user *arg)
+static int proc_disconnectsignal_compat(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_disconnectsignal32 ds;
@@ -1699,7 +1798,7 @@ static int get_urb32(struct usbdevfs_urb *kurb,
return 0;
}
-static int proc_submiturb_compat(struct dev_state *ps, void __user *arg)
+static int proc_submiturb_compat(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
@@ -1745,7 +1844,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
return 0;
}
-static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
+static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
if (as) {
@@ -1758,7 +1857,7 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
return -EIO;
}
-static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
+static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *arg)
{
int retval;
struct async *as;
@@ -1775,7 +1874,7 @@ static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
#endif
-static int proc_disconnectsignal(struct dev_state *ps, void __user *arg)
+static int proc_disconnectsignal(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_disconnectsignal ds;
@@ -1786,7 +1885,7 @@ static int proc_disconnectsignal(struct dev_state *ps, void __user *arg)
return 0;
}
-static int proc_claiminterface(struct dev_state *ps, void __user *arg)
+static int proc_claiminterface(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ifnum;
@@ -1795,7 +1894,7 @@ static int proc_claiminterface(struct dev_state *ps, void __user *arg)
return claimintf(ps, ifnum);
}
-static int proc_releaseinterface(struct dev_state *ps, void __user *arg)
+static int proc_releaseinterface(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ifnum;
int ret;
@@ -1808,7 +1907,7 @@ static int proc_releaseinterface(struct dev_state *ps, void __user *arg)
return 0;
}
-static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
+static int proc_ioctl(struct usb_dev_state *ps, struct usbdevfs_ioctl *ctl)
{
int size;
void *buf = NULL;
@@ -1884,7 +1983,7 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
return retval;
}
-static int proc_ioctl_default(struct dev_state *ps, void __user *arg)
+static int proc_ioctl_default(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_ioctl ctrl;
@@ -1894,7 +1993,7 @@ static int proc_ioctl_default(struct dev_state *ps, void __user *arg)
}
#ifdef CONFIG_COMPAT
-static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg)
+static int proc_ioctl_compat(struct usb_dev_state *ps, compat_uptr_t arg)
{
struct usbdevfs_ioctl32 __user *uioc;
struct usbdevfs_ioctl ctrl;
@@ -1912,7 +2011,7 @@ static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg)
}
#endif
-static int proc_claim_port(struct dev_state *ps, void __user *arg)
+static int proc_claim_port(struct usb_dev_state *ps, void __user *arg)
{
unsigned portnum;
int rc;
@@ -1926,7 +2025,7 @@ static int proc_claim_port(struct dev_state *ps, void __user *arg)
return rc;
}
-static int proc_release_port(struct dev_state *ps, void __user *arg)
+static int proc_release_port(struct usb_dev_state *ps, void __user *arg)
{
unsigned portnum;
@@ -1935,7 +2034,7 @@ static int proc_release_port(struct dev_state *ps, void __user *arg)
return usb_hub_release_port(ps->dev, portnum, ps);
}
-static int proc_get_capabilities(struct dev_state *ps, void __user *arg)
+static int proc_get_capabilities(struct usb_dev_state *ps, void __user *arg)
{
__u32 caps;
@@ -1951,7 +2050,7 @@ static int proc_get_capabilities(struct dev_state *ps, void __user *arg)
return 0;
}
-static int proc_disconnect_claim(struct dev_state *ps, void __user *arg)
+static int proc_disconnect_claim(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_disconnect_claim dc;
struct usb_interface *intf;
@@ -1983,6 +2082,45 @@ static int proc_disconnect_claim(struct dev_state *ps, void __user *arg)
return claimintf(ps, dc.interface);
}
+static int proc_alloc_streams(struct usb_dev_state *ps, void __user *arg)
+{
+ unsigned num_streams, num_eps;
+ struct usb_host_endpoint **eps;
+ struct usb_interface *intf;
+ int r;
+
+ r = parse_usbdevfs_streams(ps, arg, &num_streams, &num_eps,
+ &eps, &intf);
+ if (r)
+ return r;
+
+ destroy_async_on_interface(ps,
+ intf->altsetting[0].desc.bInterfaceNumber);
+
+ r = usb_alloc_streams(intf, eps, num_eps, num_streams, GFP_KERNEL);
+ kfree(eps);
+ return r;
+}
+
+static int proc_free_streams(struct usb_dev_state *ps, void __user *arg)
+{
+ unsigned num_eps;
+ struct usb_host_endpoint **eps;
+ struct usb_interface *intf;
+ int r;
+
+ r = parse_usbdevfs_streams(ps, arg, NULL, &num_eps, &eps, &intf);
+ if (r)
+ return r;
+
+ destroy_async_on_interface(ps,
+ intf->altsetting[0].desc.bInterfaceNumber);
+
+ r = usb_free_streams(intf, eps, num_eps, GFP_KERNEL);
+ kfree(eps);
+ return r;
+}
+
/*
* NOTE: All requests here that have interface numbers as parameters
* are assuming that somehow the configuration has been prevented from
@@ -1991,7 +2129,7 @@ static int proc_disconnect_claim(struct dev_state *ps, void __user *arg)
static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p)
{
- struct dev_state *ps = file->private_data;
+ struct usb_dev_state *ps = file->private_data;
struct inode *inode = file_inode(file);
struct usb_device *dev = ps->dev;
int ret = -ENOTTY;
@@ -2159,6 +2297,12 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
case USBDEVFS_DISCONNECT_CLAIM:
ret = proc_disconnect_claim(ps, p);
break;
+ case USBDEVFS_ALLOC_STREAMS:
+ ret = proc_alloc_streams(ps, p);
+ break;
+ case USBDEVFS_FREE_STREAMS:
+ ret = proc_free_streams(ps, p);
+ break;
}
usb_unlock_device(dev);
if (ret >= 0)
@@ -2192,7 +2336,7 @@ static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
static unsigned int usbdev_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct dev_state *ps = file->private_data;
+ struct usb_dev_state *ps = file->private_data;
unsigned int mask = 0;
poll_wait(file, &ps->wait, wait);
@@ -2218,11 +2362,11 @@ const struct file_operations usbdev_file_operations = {
static void usbdev_remove(struct usb_device *udev)
{
- struct dev_state *ps;
+ struct usb_dev_state *ps;
struct siginfo sinfo;
while (!list_empty(&udev->filelist)) {
- ps = list_entry(udev->filelist.next, struct dev_state, list);
+ ps = list_entry(udev->filelist.next, struct usb_dev_state, list);
destroy_all_async(ps);
wake_up_all(&ps->wait);
list_del_init(&ps->list);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 5d01558cef66..888881e5f292 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -63,8 +63,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
dynid->id.idProduct = idProduct;
dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
if (fields > 2 && bInterfaceClass) {
- if (bInterfaceClass > 255)
- return -EINVAL;
+ if (bInterfaceClass > 255) {
+ retval = -EINVAL;
+ goto fail;
+ }
dynid->id.bInterfaceClass = (u8)bInterfaceClass;
dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS;
@@ -73,17 +75,21 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
if (fields > 4) {
const struct usb_device_id *id = id_table;
- if (!id)
- return -ENODEV;
+ if (!id) {
+ retval = -ENODEV;
+ goto fail;
+ }
for (; id->match_flags; id++)
if (id->idVendor == refVendor && id->idProduct == refProduct)
break;
- if (id->match_flags)
+ if (id->match_flags) {
dynid->id.driver_info = id->driver_info;
- else
- return -ENODEV;
+ } else {
+ retval = -ENODEV;
+ goto fail;
+ }
}
spin_lock(&dynids->lock);
@@ -95,6 +101,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
if (retval)
return retval;
return count;
+
+fail:
+ kfree(dynid);
+ return retval;
}
EXPORT_SYMBOL_GPL(usb_store_new_id);
@@ -302,9 +312,9 @@ static int usb_probe_interface(struct device *dev)
return error;
}
- id = usb_match_id(intf, driver->id_table);
+ id = usb_match_dynamic_id(intf, driver);
if (!id)
- id = usb_match_dynamic_id(intf, driver);
+ id = usb_match_id(intf, driver->id_table);
if (!id)
return error;
@@ -390,8 +400,9 @@ static int usb_unbind_interface(struct device *dev)
{
struct usb_driver *driver = to_usb_driver(dev->driver);
struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_host_endpoint *ep, **eps = NULL;
struct usb_device *udev;
- int error, r, lpm_disable_error;
+ int i, j, error, r, lpm_disable_error;
intf->condition = USB_INTERFACE_UNBINDING;
@@ -415,6 +426,26 @@ static int usb_unbind_interface(struct device *dev)
driver->disconnect(intf);
usb_cancel_queued_reset(intf);
+ /* Free streams */
+ for (i = 0, j = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ ep = &intf->cur_altsetting->endpoint[i];
+ if (ep->streams == 0)
+ continue;
+ if (j == 0) {
+ eps = kmalloc(USB_MAXENDPOINTS * sizeof(void *),
+ GFP_KERNEL);
+ if (!eps) {
+ dev_warn(dev, "oom, leaking streams\n");
+ break;
+ }
+ }
+ eps[j++] = ep;
+ }
+ if (j) {
+ usb_free_streams(intf, eps, j, GFP_KERNEL);
+ kfree(eps);
+ }
+
/* Reset other interface state.
* We cannot do a Set-Interface if the device is suspended or
* if it is prepared for a system sleep (since installing a new
@@ -980,8 +1011,7 @@ EXPORT_SYMBOL_GPL(usb_deregister);
* it doesn't support pre_reset/post_reset/reset_resume or
* because it doesn't support suspend/resume.
*
- * The caller must hold @intf's device's lock, but not its pm_mutex
- * and not @intf->dev.sem.
+ * The caller must hold @intf's device's lock, but not @intf's lock.
*/
void usb_forced_unbind_intf(struct usb_interface *intf)
{
@@ -994,16 +1024,37 @@ void usb_forced_unbind_intf(struct usb_interface *intf)
intf->needs_binding = 1;
}
+/*
+ * Unbind drivers for @udev's marked interfaces. These interfaces have
+ * the needs_binding flag set, for example by usb_resume_interface().
+ *
+ * The caller must hold @udev's device lock.
+ */
+static void unbind_marked_interfaces(struct usb_device *udev)
+{
+ struct usb_host_config *config;
+ int i;
+ struct usb_interface *intf;
+
+ config = udev->actconfig;
+ if (config) {
+ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+ intf = config->interface[i];
+ if (intf->dev.driver && intf->needs_binding)
+ usb_forced_unbind_intf(intf);
+ }
+ }
+}
+
/* Delayed forced unbinding of a USB interface driver and scan
* for rebinding.
*
- * The caller must hold @intf's device's lock, but not its pm_mutex
- * and not @intf->dev.sem.
+ * The caller must hold @intf's device's lock, but not @intf's lock.
*
* Note: Rebinds will be skipped if a system sleep transition is in
* progress and the PM "complete" callback hasn't occurred yet.
*/
-void usb_rebind_intf(struct usb_interface *intf)
+static void usb_rebind_intf(struct usb_interface *intf)
{
int rc;
@@ -1020,68 +1071,66 @@ void usb_rebind_intf(struct usb_interface *intf)
}
}
-#ifdef CONFIG_PM
-
-/* Unbind drivers for @udev's interfaces that don't support suspend/resume
- * There is no check for reset_resume here because it can be determined
- * only during resume whether reset_resume is needed.
+/*
+ * Rebind drivers to @udev's marked interfaces. These interfaces have
+ * the needs_binding flag set.
*
* The caller must hold @udev's device lock.
*/
-static void unbind_no_pm_drivers_interfaces(struct usb_device *udev)
+static void rebind_marked_interfaces(struct usb_device *udev)
{
struct usb_host_config *config;
int i;
struct usb_interface *intf;
- struct usb_driver *drv;
config = udev->actconfig;
if (config) {
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
intf = config->interface[i];
-
- if (intf->dev.driver) {
- drv = to_usb_driver(intf->dev.driver);
- if (!drv->suspend || !drv->resume)
- usb_forced_unbind_intf(intf);
- }
+ if (intf->needs_binding)
+ usb_rebind_intf(intf);
}
}
}
-/* Unbind drivers for @udev's interfaces that failed to support reset-resume.
- * These interfaces have the needs_binding flag set by usb_resume_interface().
+/*
+ * Unbind all of @udev's marked interfaces and then rebind all of them.
+ * This ordering is necessary because some drivers claim several interfaces
+ * when they are first probed.
*
* The caller must hold @udev's device lock.
*/
-static void unbind_no_reset_resume_drivers_interfaces(struct usb_device *udev)
+void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev)
{
- struct usb_host_config *config;
- int i;
- struct usb_interface *intf;
-
- config = udev->actconfig;
- if (config) {
- for (i = 0; i < config->desc.bNumInterfaces; ++i) {
- intf = config->interface[i];
- if (intf->dev.driver && intf->needs_binding)
- usb_forced_unbind_intf(intf);
- }
- }
+ unbind_marked_interfaces(udev);
+ rebind_marked_interfaces(udev);
}
-static void do_rebind_interfaces(struct usb_device *udev)
+#ifdef CONFIG_PM
+
+/* Unbind drivers for @udev's interfaces that don't support suspend/resume
+ * There is no check for reset_resume here because it can be determined
+ * only during resume whether reset_resume is needed.
+ *
+ * The caller must hold @udev's device lock.
+ */
+static void unbind_no_pm_drivers_interfaces(struct usb_device *udev)
{
struct usb_host_config *config;
int i;
struct usb_interface *intf;
+ struct usb_driver *drv;
config = udev->actconfig;
if (config) {
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
intf = config->interface[i];
- if (intf->needs_binding)
- usb_rebind_intf(intf);
+
+ if (intf->dev.driver) {
+ drv = to_usb_driver(intf->dev.driver);
+ if (!drv->suspend || !drv->resume)
+ usb_forced_unbind_intf(intf);
+ }
}
}
}
@@ -1410,7 +1459,7 @@ int usb_resume_complete(struct device *dev)
* whose needs_binding flag is set
*/
if (udev->state != USB_STATE_NOTATTACHED)
- do_rebind_interfaces(udev);
+ rebind_marked_interfaces(udev);
return 0;
}
@@ -1432,7 +1481,7 @@ int usb_resume(struct device *dev, pm_message_t msg)
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- unbind_no_reset_resume_drivers_interfaces(udev);
+ unbind_marked_interfaces(udev);
}
/* Avoid PM error messages for devices disconnected while suspended
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index acbfeb0a0119..358ca8dd784f 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -155,6 +155,7 @@ int usb_choose_configuration(struct usb_device *udev)
}
return i;
}
+EXPORT_SYMBOL_GPL(usb_choose_configuration);
static int generic_probe(struct usb_device *udev)
{
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 199aaea6bfe0..9c4e2922b04d 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1032,7 +1032,6 @@ static int register_root_hub(struct usb_hcd *hcd)
dev_name(&usb_dev->dev), retval);
return retval;
}
- usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
}
retval = usb_new_device (usb_dev);
@@ -2050,7 +2049,7 @@ int usb_alloc_streams(struct usb_interface *interface,
{
struct usb_hcd *hcd;
struct usb_device *dev;
- int i;
+ int i, ret;
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
@@ -2059,13 +2058,24 @@ int usb_alloc_streams(struct usb_interface *interface,
if (dev->speed != USB_SPEED_SUPER)
return -EINVAL;
- /* Streams only apply to bulk endpoints. */
- for (i = 0; i < num_eps; i++)
+ for (i = 0; i < num_eps; i++) {
+ /* Streams only apply to bulk endpoints. */
if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
return -EINVAL;
+ /* Re-alloc is not allowed */
+ if (eps[i]->streams)
+ return -EINVAL;
+ }
- return hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
+ ret = hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
num_streams, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_eps; i++)
+ eps[i]->streams = ret;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(usb_alloc_streams);
@@ -2079,8 +2089,7 @@ EXPORT_SYMBOL_GPL(usb_alloc_streams);
* Reverts a group of bulk endpoints back to not using stream IDs.
* Can fail if we are given bad arguments, or HCD is broken.
*
- * Return: On success, the number of allocated streams. On failure, a negative
- * error code.
+ * Return: 0 on success. On failure, a negative error code.
*/
int usb_free_streams(struct usb_interface *interface,
struct usb_host_endpoint **eps, unsigned int num_eps,
@@ -2088,19 +2097,26 @@ int usb_free_streams(struct usb_interface *interface,
{
struct usb_hcd *hcd;
struct usb_device *dev;
- int i;
+ int i, ret;
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
if (dev->speed != USB_SPEED_SUPER)
return -EINVAL;
- /* Streams only apply to bulk endpoints. */
+ /* Double-free is not allowed */
for (i = 0; i < num_eps; i++)
- if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc))
+ if (!eps[i] || !eps[i]->streams)
return -EINVAL;
- return hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
+ ret = hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_eps; i++)
+ eps[i]->streams = 0;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(usb_free_streams);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index babba885978d..090469ebfcff 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -128,7 +128,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
-int usb_device_supports_lpm(struct usb_device *udev)
+static int usb_device_supports_lpm(struct usb_device *udev)
{
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
@@ -141,24 +141,27 @@ int usb_device_supports_lpm(struct usb_device *udev)
return 0;
}
- /* All USB 3.0 must support LPM, but we need their max exit latency
- * information from the SuperSpeed Extended Capabilities BOS descriptor.
+ /*
+ * According to the USB 3.0 spec, all USB 3.0 devices must support LPM.
+ * However, there are some that don't, and they set the U1/U2 exit
+ * latencies to zero.
*/
if (!udev->bos->ss_cap) {
- dev_warn(&udev->dev, "No LPM exit latency info found. "
- "Power management will be impacted.\n");
+ dev_info(&udev->dev, "No LPM exit latency info found, disabling LPM.\n");
return 0;
}
- /* udev is root hub */
- if (!udev->parent)
- return 1;
+ if (udev->bos->ss_cap->bU1devExitLat == 0 &&
+ udev->bos->ss_cap->bU2DevExitLat == 0) {
+ if (udev->parent)
+ dev_info(&udev->dev, "LPM exit latency is zeroed, disabling LPM.\n");
+ else
+ dev_info(&udev->dev, "We don't know the algorithms for LPM for this host, disabling LPM.\n");
+ return 0;
+ }
- if (udev->parent->lpm_capable)
+ if (!udev->parent || udev->parent->lpm_capable)
return 1;
-
- dev_warn(&udev->dev, "Parent hub missing LPM exit latency info. "
- "Power management will be impacted.\n");
return 0;
}
@@ -504,7 +507,8 @@ static void led_work (struct work_struct *work)
changed++;
}
if (changed)
- schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD);
+ queue_delayed_work(system_power_efficient_wq,
+ &hub->leds, LED_CYCLE_PERIOD);
}
/* use a short timeout for hub/port status fetches */
@@ -1045,8 +1049,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
*/
if (type == HUB_INIT) {
delay = hub_power_on(hub, false);
- PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2);
- schedule_delayed_work(&hub->init_work,
+ INIT_DELAYED_WORK(&hub->init_work, hub_init_func2);
+ queue_delayed_work(system_power_efficient_wq,
+ &hub->init_work,
msecs_to_jiffies(delay));
/* Suppress autosuspend until init is done */
@@ -1199,8 +1204,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Don't do a long sleep inside a workqueue routine */
if (type == HUB_INIT2) {
- PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
- schedule_delayed_work(&hub->init_work,
+ INIT_DELAYED_WORK(&hub->init_work, hub_init_func3);
+ queue_delayed_work(system_power_efficient_wq,
+ &hub->init_work,
msecs_to_jiffies(delay));
return; /* Continues at init3: below */
} else {
@@ -1214,7 +1220,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
if (status < 0)
dev_err(hub->intfdev, "activate --> %d\n", status);
if (hub->has_indicators && blinkenlights)
- schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD);
+ queue_delayed_work(system_power_efficient_wq,
+ &hub->leds, LED_CYCLE_PERIOD);
/* Scan all ports that need attention */
kick_khubd(hub);
@@ -1793,7 +1800,7 @@ hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
* to one of these "claimed" ports, the program will "own" the device.
*/
static int find_port_owner(struct usb_device *hdev, unsigned port1,
- struct dev_state ***ppowner)
+ struct usb_dev_state ***ppowner)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
@@ -1811,10 +1818,10 @@ static int find_port_owner(struct usb_device *hdev, unsigned port1,
/* In the following three functions, the caller must hold hdev's lock */
int usb_hub_claim_port(struct usb_device *hdev, unsigned port1,
- struct dev_state *owner)
+ struct usb_dev_state *owner)
{
int rc;
- struct dev_state **powner;
+ struct usb_dev_state **powner;
rc = find_port_owner(hdev, port1, &powner);
if (rc)
@@ -1824,12 +1831,13 @@ int usb_hub_claim_port(struct usb_device *hdev, unsigned port1,
*powner = owner;
return rc;
}
+EXPORT_SYMBOL_GPL(usb_hub_claim_port);
int usb_hub_release_port(struct usb_device *hdev, unsigned port1,
- struct dev_state *owner)
+ struct usb_dev_state *owner)
{
int rc;
- struct dev_state **powner;
+ struct usb_dev_state **powner;
rc = find_port_owner(hdev, port1, &powner);
if (rc)
@@ -1839,8 +1847,9 @@ int usb_hub_release_port(struct usb_device *hdev, unsigned port1,
*powner = NULL;
return rc;
}
+EXPORT_SYMBOL_GPL(usb_hub_release_port);
-void usb_hub_release_all_ports(struct usb_device *hdev, struct dev_state *owner)
+void usb_hub_release_all_ports(struct usb_device *hdev, struct usb_dev_state *owner)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
int n;
@@ -3098,9 +3107,19 @@ static int finish_port_resume(struct usb_device *udev)
* operation is carried out here, after the port has been
* resumed.
*/
- if (udev->reset_resume)
+ if (udev->reset_resume) {
+ /*
+ * If the device morphs or switches modes when it is reset,
+ * we don't want to perform a reset-resume. We'll fail the
+ * resume, which will cause a logical disconnect, and then
+ * the device will be rediscovered.
+ */
retry_reset_resume:
- status = usb_reset_and_verify_device(udev);
+ if (udev->quirks & USB_QUIRK_RESET)
+ status = -ENODEV;
+ else
+ status = usb_reset_and_verify_device(udev);
+ }
/* 10.5.4.5 says be sure devices in the tree are still there.
* For now let's assume the device didn't go crazy on resume,
@@ -3963,7 +3982,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
connect_type = usb_get_hub_port_connect_type(udev->parent,
udev->portnum);
- if ((udev->bos->ext_cap->bmAttributes & USB_BESL_SUPPORT) ||
+ if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
udev->usb2_hw_lpm_allowed = 1;
usb_set_usb2_hardware_lpm(udev, 1);
@@ -4112,8 +4131,12 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
did_new_scheme = true;
retval = hub_enable_device(udev);
- if (retval < 0)
+ if (retval < 0) {
+ dev_err(&udev->dev,
+ "hub failed to enable device, error %d\n",
+ retval);
goto fail;
+ }
#define GET_DESCRIPTOR_BUFSIZE 64
buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
@@ -4316,7 +4339,8 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
/* hub LEDs are probably harder to miss than syslog */
if (hub->has_indicators) {
hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
- schedule_delayed_work (&hub->leds, 0);
+ queue_delayed_work(system_power_efficient_wq,
+ &hub->leds, 0);
}
}
kfree(qual);
@@ -4545,7 +4569,9 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if (hub->has_indicators) {
hub->indicator[port1-1] =
INDICATOR_AMBER_BLINK;
- schedule_delayed_work (&hub->leds, 0);
+ queue_delayed_work(
+ system_power_efficient_wq,
+ &hub->leds, 0);
}
status = -ENOTCONN; /* Don't retry */
goto loop_disable;
@@ -4744,6 +4770,8 @@ static void hub_events(void)
/* deal with port status changes */
for (i = 1; i <= hdev->maxchild; i++) {
+ struct usb_device *udev = hub->ports[i - 1]->child;
+
if (test_bit(i, hub->busy_bits))
continue;
connect_change = test_bit(i, hub->change_bits);
@@ -4842,8 +4870,6 @@ static void hub_events(void)
*/
if (hub_port_warm_reset_required(hub, portstatus)) {
int status;
- struct usb_device *udev =
- hub->ports[i - 1]->child;
dev_dbg(hub_dev, "warm reset port %d\n", i);
if (!udev ||
@@ -4860,6 +4886,24 @@ static void hub_events(void)
usb_unlock_device(udev);
connect_change = 0;
}
+ /*
+ * On disconnect USB3 protocol ports transit from U0 to
+ * SS.Inactive to Rx.Detect. If this happens a warm-
+ * reset is not needed, but a (re)connect may happen
+ * before khubd runs and sees the disconnect, and the
+ * device may be an unknown state.
+ *
+ * If the port went through SS.Inactive without khubd
+ * seeing it the C_LINK_STATE change flag will be set,
+ * and we reset the dev to put it in a known state.
+ */
+ } else if (udev && hub_is_superspeed(hub->hdev) &&
+ (portchange & USB_PORT_STAT_C_LINK_STATE) &&
+ (portstatus & USB_PORT_STAT_CONNECTION)) {
+ usb_lock_device(udev);
+ usb_reset_device(udev);
+ usb_unlock_device(udev);
+ connect_change = 0;
}
if (connect_change)
@@ -5117,7 +5161,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct usb_device_descriptor descriptor = udev->descriptor;
struct usb_host_bos *bos;
- int i, ret = 0;
+ int i, j, ret = 0;
int port1 = udev->portnum;
if (udev->state == USB_STATE_NOTATTACHED ||
@@ -5243,6 +5287,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
ret);
goto re_enumerate;
}
+ /* Resetting also frees any allocated streams */
+ for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++)
+ intf->cur_altsetting->endpoint[j].streams = 0;
}
done:
@@ -5345,10 +5392,11 @@ int usb_reset_device(struct usb_device *udev)
else if (cintf->condition ==
USB_INTERFACE_BOUND)
rebind = 1;
+ if (rebind)
+ cintf->needs_binding = 1;
}
- if (ret == 0 && rebind)
- usb_rebind_intf(cintf);
}
+ usb_unbind_and_rebind_marked_interfaces(udev);
}
usb_autosuspend_device(udev);
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index df629a310e44..33bcb2c6f90a 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -89,7 +89,7 @@ struct usb_hub {
struct usb_port {
struct usb_device *child;
struct device dev;
- struct dev_state *port_owner;
+ struct usb_dev_state *port_owner;
enum usb_port_connect_type connect_type;
u8 portnum;
unsigned power_is_on:1;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index f829a1aad1c3..0c8a7fc4dad8 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -178,7 +178,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
*
* Return:
* If successful, 0. Otherwise a negative error number. The number of actual
- * bytes transferred will be stored in the @actual_length paramater.
+ * bytes transferred will be stored in the @actual_length parameter.
*/
int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout)
@@ -1293,8 +1293,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
struct usb_interface *iface;
struct usb_host_interface *alt;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
- int ret;
- int manual = 0;
+ int i, ret, manual = 0;
unsigned int epaddr;
unsigned int pipe;
@@ -1329,6 +1328,10 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
+ /* Changing alt-setting also frees any allocated streams */
+ for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++)
+ iface->cur_altsetting->endpoint[i].streams = 0;
+
ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
if (ret < 0) {
dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",
@@ -1920,6 +1923,7 @@ free_interfaces:
usb_autosuspend_device(dev);
return 0;
}
+EXPORT_SYMBOL_GPL(usb_set_configuration);
static LIST_HEAD(set_config_list);
static DEFINE_SPINLOCK(set_config_lock);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 8f37063c0a49..739ee8e8bdfd 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -47,6 +47,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech HD Pro Webcams C920 and C930e */
+ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 9ff665f1322f..991386ceb4ec 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -831,7 +831,7 @@ EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
*
* this allows all outstanding URBs to be unlinked starting
* from the back of the queue. This function is asynchronous.
- * The unlinking is just tiggered. It may happen after this
+ * The unlinking is just triggered. It may happen after this
* function has returned.
*
* This routine should not be called by a driver after its disconnect
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index d7cb822d6eab..5ca4070b1f38 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -16,7 +16,6 @@
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/usb/hcd.h>
-#include <acpi/acpi_bus.h>
#include "usb.h"
@@ -127,7 +126,7 @@ out:
return ret;
}
-static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
+static struct acpi_device *usb_acpi_find_companion(struct device *dev)
{
struct usb_device *udev;
acpi_handle *parent_handle;
@@ -169,16 +168,15 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
break;
}
- return -ENODEV;
+ return NULL;
}
/* root hub's parent is the usb hcd. */
- parent_handle = ACPI_HANDLE(dev->parent);
- *handle = acpi_get_child(parent_handle, udev->portnum);
- if (!*handle)
- return -ENODEV;
- return 0;
+ return acpi_find_child_device(ACPI_COMPANION(dev->parent),
+ udev->portnum, false);
} else if (is_usb_port(dev)) {
+ struct acpi_device *adev = NULL;
+
sscanf(dev_name(dev), "port%d", &port_num);
/* Get the struct usb_device point of port's hub */
udev = to_usb_device(dev->parent->parent);
@@ -194,26 +192,27 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
raw_port_num = usb_hcd_find_raw_port_number(hcd,
port_num);
- *handle = acpi_get_child(ACPI_HANDLE(&udev->dev),
- raw_port_num);
- if (!*handle)
- return -ENODEV;
+ adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev),
+ raw_port_num, false);
+ if (!adev)
+ return NULL;
} else {
parent_handle =
usb_get_hub_port_acpi_handle(udev->parent,
udev->portnum);
if (!parent_handle)
- return -ENODEV;
+ return NULL;
- *handle = acpi_get_child(parent_handle, port_num);
- if (!*handle)
- return -ENODEV;
+ acpi_bus_get_device(parent_handle, &adev);
+ adev = acpi_find_child_device(adev, port_num, false);
+ if (!adev)
+ return NULL;
}
- usb_acpi_check_port_connect_type(udev, *handle, port_num);
- } else
- return -ENODEV;
+ usb_acpi_check_port_connect_type(udev, adev->handle, port_num);
+ return adev;
+ }
- return 0;
+ return NULL;
}
static bool usb_acpi_bus_match(struct device *dev)
@@ -224,7 +223,7 @@ static bool usb_acpi_bus_match(struct device *dev)
static struct acpi_bus_type usb_acpi_bus = {
.name = "USB",
.match = usb_acpi_bus_match,
- .find_device = usb_acpi_find_device,
+ .find_companion = usb_acpi_find_companion,
};
int usb_acpi_register(void)
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index c49383669cd8..75bf649da82d 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -2,7 +2,7 @@
#include <linux/acpi.h>
struct usb_hub_descriptor;
-struct dev_state;
+struct usb_dev_state;
/* Functions local to drivers/usb/core/ */
@@ -35,7 +35,6 @@ extern int usb_get_device_descriptor(struct usb_device *dev,
unsigned int size);
extern int usb_get_bos_descriptor(struct usb_device *dev);
extern void usb_release_bos_descriptor(struct usb_device *dev);
-extern int usb_device_supports_lpm(struct usb_device *udev);
extern char *usb_cache_string(struct usb_device *udev, int index);
extern int usb_set_configuration(struct usb_device *dev, int configuration);
extern int usb_choose_configuration(struct usb_device *udev);
@@ -56,14 +55,10 @@ extern int usb_match_one_id_intf(struct usb_device *dev,
extern int usb_match_device(struct usb_device *dev,
const struct usb_device_id *id);
extern void usb_forced_unbind_intf(struct usb_interface *intf);
-extern void usb_rebind_intf(struct usb_interface *intf);
+extern void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev);
-extern int usb_hub_claim_port(struct usb_device *hdev, unsigned port,
- struct dev_state *owner);
-extern int usb_hub_release_port(struct usb_device *hdev, unsigned port,
- struct dev_state *owner);
extern void usb_hub_release_all_ports(struct usb_device *hdev,
- struct dev_state *owner);
+ struct usb_dev_state *owner);
extern bool usb_device_is_owned(struct usb_device *udev);
extern int usb_hub_init(void);
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 8565d87f94b4..1d129884cc39 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -216,7 +216,7 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
int retval = 0;
if (!select_phy)
- return -ENODEV;
+ return 0;
usbcfg = readl(hsotg->regs + GUSBCFG);
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 8205799e6db3..c93918b70d03 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -72,6 +72,26 @@ static const char *dwc2_op_state_str(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_handle_usb_port_intr - handles OTG PRTINT interrupts.
+ * When the PRTINT interrupt fires, there are certain status bits in the Host
+ * Port that needs to get cleared.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_handle_usb_port_intr(struct dwc2_hsotg *hsotg)
+{
+ u32 hprt0 = readl(hsotg->regs + HPRT0);
+
+ if (hprt0 & HPRT0_ENACHG) {
+ hprt0 &= ~HPRT0_ENA;
+ writel(hprt0, hsotg->regs + HPRT0);
+ }
+
+ /* Clear interrupt */
+ writel(GINTSTS_PRTINT, hsotg->regs + GINTSTS);
+}
+
+/**
* dwc2_handle_mode_mismatch_intr() - Logs a mode mismatch warning message
*
* @hsotg: Programming view of DWC_otg controller
@@ -479,9 +499,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
if (dwc2_is_device_mode(hsotg)) {
dev_dbg(hsotg->dev,
" --Port interrupt received in Device mode--\n");
- gintsts = GINTSTS_PRTINT;
- writel(gintsts, hsotg->regs + GINTSTS);
- retval = 1;
+ dwc2_handle_usb_port_intr(hsotg);
+ retval = IRQ_HANDLED;
}
}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index f59484d43b35..4d918ed8d343 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2565,25 +2565,14 @@ static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- int is_control = usb_endpoint_xfer_control(&ep->desc);
- int is_out = usb_endpoint_dir_out(&ep->desc);
- int epnum = usb_endpoint_num(&ep->desc);
- struct usb_device *udev;
unsigned long flags;
dev_dbg(hsotg->dev,
"DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
ep->desc.bEndpointAddress);
- udev = to_usb_device(hsotg->dev);
-
spin_lock_irqsave(&hsotg->lock, flags);
-
- usb_settoggle(udev, epnum, is_out, 0);
- if (is_control)
- usb_settoggle(udev, epnum, !is_out, 0);
dwc2_hcd_endpoint_reset(hsotg, ep);
-
spin_unlock_irqrestore(&hsotg->lock, flags);
}
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 012f17ec1a37..47b9eb5389b4 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -975,8 +975,8 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd)
{
struct dwc2_hcd_urb *urb = qtd->urb;
- int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
+ int pipe_type;
int urb_xfer_done;
if (dbg_hc(chan))
@@ -984,6 +984,11 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
"--Host Channel %d Interrupt: Transfer Complete--\n",
chnum);
+ if (!urb)
+ goto handle_xfercomp_done;
+
+ pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+
if (hsotg->core_params->dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
if (pipe_type == USB_ENDPOINT_XFER_ISOC)
@@ -1005,9 +1010,6 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
}
}
- if (!urb)
- goto handle_xfercomp_done;
-
/* Update the QTD and URB states */
switch (pipe_type) {
case USB_ENDPOINT_XFER_CONTROL:
@@ -1105,7 +1107,7 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd)
{
struct dwc2_hcd_urb *urb = qtd->urb;
- int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+ int pipe_type;
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
chnum);
@@ -1119,6 +1121,8 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
if (!urb)
goto handle_stall_halt;
+ pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+
if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
dwc2_host_complete(hsotg, qtd, -EPIPE);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index d01d0d3f2cf0..eaba547ce26b 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -124,6 +124,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
int retval;
int irq;
+ if (usb_disabled())
+ return -ENODEV;
+
match = of_match_device(dwc2_of_match_table, &dev->dev);
if (match && match->data) {
params = match->data;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index a49217ae3533..d001417e8e37 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -61,9 +61,10 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
* dwc3_core_soft_reset - Issues core soft reset and PHY reset
* @dwc: pointer to our context structure
*/
-static void dwc3_core_soft_reset(struct dwc3 *dwc)
+static int dwc3_core_soft_reset(struct dwc3 *dwc)
{
u32 reg;
+ int ret;
/* Before Resetting PHY, put Core in Reset */
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
@@ -82,6 +83,15 @@ static void dwc3_core_soft_reset(struct dwc3 *dwc)
usb_phy_init(dwc->usb2_phy);
usb_phy_init(dwc->usb3_phy);
+ ret = phy_init(dwc->usb2_generic_phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_init(dwc->usb3_generic_phy);
+ if (ret < 0) {
+ phy_exit(dwc->usb2_generic_phy);
+ return ret;
+ }
mdelay(100);
/* Clear USB3 PHY reset */
@@ -100,6 +110,8 @@ static void dwc3_core_soft_reset(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_CORESOFTRESET;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ return 0;
}
/**
@@ -242,6 +254,90 @@ static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
}
}
+static int dwc3_alloc_scratch_buffers(struct dwc3 *dwc)
+{
+ if (!dwc->has_hibernation)
+ return 0;
+
+ if (!dwc->nr_scratch)
+ return 0;
+
+ dwc->scratchbuf = kmalloc_array(dwc->nr_scratch,
+ DWC3_SCRATCHBUF_SIZE, GFP_KERNEL);
+ if (!dwc->scratchbuf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
+{
+ dma_addr_t scratch_addr;
+ u32 param;
+ int ret;
+
+ if (!dwc->has_hibernation)
+ return 0;
+
+ if (!dwc->nr_scratch)
+ return 0;
+
+ /* should never fall here */
+ if (!WARN_ON(dwc->scratchbuf))
+ return 0;
+
+ scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf,
+ dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dwc->dev, scratch_addr)) {
+ dev_err(dwc->dev, "failed to map scratch buffer\n");
+ ret = -EFAULT;
+ goto err0;
+ }
+
+ dwc->scratch_addr = scratch_addr;
+
+ param = lower_32_bits(scratch_addr);
+
+ ret = dwc3_send_gadget_generic_command(dwc,
+ DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO, param);
+ if (ret < 0)
+ goto err1;
+
+ param = upper_32_bits(scratch_addr);
+
+ ret = dwc3_send_gadget_generic_command(dwc,
+ DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI, param);
+ if (ret < 0)
+ goto err1;
+
+ return 0;
+
+err1:
+ dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+ DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
+
+err0:
+ return ret;
+}
+
+static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
+{
+ if (!dwc->has_hibernation)
+ return;
+
+ if (!dwc->nr_scratch)
+ return;
+
+ /* should never fall here */
+ if (!WARN_ON(dwc->scratchbuf))
+ return;
+
+ dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+ DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
+ kfree(dwc->scratchbuf);
+}
+
static void dwc3_core_num_eps(struct dwc3 *dwc)
{
struct dwc3_hwparams *parms = &dwc->hwparams;
@@ -277,6 +373,7 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
static int dwc3_core_init(struct dwc3 *dwc)
{
unsigned long timeout;
+ u32 hwparams4 = dwc->hwparams.hwparams4;
u32 reg;
int ret;
@@ -306,7 +403,9 @@ static int dwc3_core_init(struct dwc3 *dwc)
cpu_relax();
} while (true);
- dwc3_core_soft_reset(dwc);
+ ret = dwc3_core_soft_reset(dwc);
+ if (ret)
+ goto err0;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
@@ -314,7 +413,29 @@ static int dwc3_core_init(struct dwc3 *dwc)
switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
- reg &= ~DWC3_GCTL_DSBLCLKGTNG;
+ /**
+ * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
+ * issue which would cause xHCI compliance tests to fail.
+ *
+ * Because of that we cannot enable clock gating on such
+ * configurations.
+ *
+ * Refers to:
+ *
+ * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based
+ * SOF/ITP Mode Used
+ */
+ if ((dwc->dr_mode == USB_DR_MODE_HOST ||
+ dwc->dr_mode == USB_DR_MODE_OTG) &&
+ (dwc->revision >= DWC3_REVISION_210A &&
+ dwc->revision <= DWC3_REVISION_250A))
+ reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
+ else
+ reg &= ~DWC3_GCTL_DSBLCLKGTNG;
+ break;
+ case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
+ /* enable hibernation here */
+ dwc->nr_scratch = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);
break;
default:
dev_dbg(dwc->dev, "No power optimization available\n");
@@ -333,16 +454,36 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+ ret = dwc3_alloc_scratch_buffers(dwc);
+ if (ret)
+ goto err1;
+
+ ret = dwc3_setup_scratch_buffers(dwc);
+ if (ret)
+ goto err2;
+
return 0;
+err2:
+ dwc3_free_scratch_buffers(dwc);
+
+err1:
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
err0:
return ret;
}
static void dwc3_core_exit(struct dwc3 *dwc)
{
+ dwc3_free_scratch_buffers(dwc);
usb_phy_shutdown(dwc->usb2_phy);
usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
}
#define DWC3_ALIGN_MASK (16 - 1)
@@ -411,32 +552,52 @@ static int dwc3_probe(struct platform_device *pdev)
if (IS_ERR(dwc->usb2_phy)) {
ret = PTR_ERR(dwc->usb2_phy);
-
- /*
- * if -ENXIO is returned, it means PHY layer wasn't
- * enabled, so it makes no sense to return -EPROBE_DEFER
- * in that case, since no PHY driver will ever probe.
- */
- if (ret == -ENXIO)
+ if (ret == -ENXIO || ret == -ENODEV) {
+ dwc->usb2_phy = NULL;
+ } else if (ret == -EPROBE_DEFER) {
return ret;
-
- dev_err(dev, "no usb2 phy configured\n");
- return -EPROBE_DEFER;
+ } else {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
}
if (IS_ERR(dwc->usb3_phy)) {
ret = PTR_ERR(dwc->usb3_phy);
+ if (ret == -ENXIO || ret == -ENODEV) {
+ dwc->usb3_phy = NULL;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ } else {
+ dev_err(dev, "no usb3 phy configured\n");
+ return ret;
+ }
+ }
- /*
- * if -ENXIO is returned, it means PHY layer wasn't
- * enabled, so it makes no sense to return -EPROBE_DEFER
- * in that case, since no PHY driver will ever probe.
- */
- if (ret == -ENXIO)
+ dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
+ if (IS_ERR(dwc->usb2_generic_phy)) {
+ ret = PTR_ERR(dwc->usb2_generic_phy);
+ if (ret == -ENOSYS || ret == -ENODEV) {
+ dwc->usb2_generic_phy = NULL;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ } else {
+ dev_err(dev, "no usb2 phy configured\n");
return ret;
+ }
+ }
- dev_err(dev, "no usb3 phy configured\n");
- return -EPROBE_DEFER;
+ dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
+ if (IS_ERR(dwc->usb3_generic_phy)) {
+ ret = PTR_ERR(dwc->usb3_generic_phy);
+ if (ret == -ENOSYS || ret == -ENODEV) {
+ dwc->usb3_generic_phy = NULL;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ } else {
+ dev_err(dev, "no usb3 phy configured\n");
+ return ret;
+ }
}
dwc->xhci_resources[0].start = res->start;
@@ -479,6 +640,14 @@ static int dwc3_probe(struct platform_device *pdev)
goto err0;
}
+ if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
+ dwc->dr_mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
+ dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
+
+ if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
+ dwc->dr_mode = USB_DR_MODE_OTG;
+
ret = dwc3_core_init(dwc);
if (ret) {
dev_err(dev, "failed to initialize core\n");
@@ -487,21 +656,20 @@ static int dwc3_probe(struct platform_device *pdev)
usb_phy_set_suspend(dwc->usb2_phy, 0);
usb_phy_set_suspend(dwc->usb3_phy, 0);
+ ret = phy_power_on(dwc->usb2_generic_phy);
+ if (ret < 0)
+ goto err1;
+
+ ret = phy_power_on(dwc->usb3_generic_phy);
+ if (ret < 0)
+ goto err_usb2phy_power;
ret = dwc3_event_buffers_setup(dwc);
if (ret) {
dev_err(dwc->dev, "failed to setup event buffers\n");
- goto err1;
+ goto err_usb3phy_power;
}
- if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
- dwc->dr_mode = USB_DR_MODE_HOST;
- else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
- dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
-
- if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
- dwc->dr_mode = USB_DR_MODE_OTG;
-
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
@@ -568,6 +736,12 @@ err3:
err2:
dwc3_event_buffers_cleanup(dwc);
+err_usb3phy_power:
+ phy_power_off(dwc->usb3_generic_phy);
+
+err_usb2phy_power:
+ phy_power_off(dwc->usb2_generic_phy);
+
err1:
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
@@ -585,6 +759,8 @@ static int dwc3_remove(struct platform_device *pdev)
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -682,6 +858,8 @@ static int dwc3_suspend(struct device *dev)
usb_phy_shutdown(dwc->usb3_phy);
usb_phy_shutdown(dwc->usb2_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
return 0;
}
@@ -690,9 +868,17 @@ static int dwc3_resume(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
unsigned long flags;
+ int ret;
usb_phy_init(dwc->usb3_phy);
usb_phy_init(dwc->usb2_phy);
+ ret = phy_init(dwc->usb2_generic_phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_init(dwc->usb3_generic_phy);
+ if (ret < 0)
+ goto err_usb2phy_init;
spin_lock_irqsave(&dwc->lock, flags);
@@ -716,6 +902,11 @@ static int dwc3_resume(struct device *dev)
pm_runtime_enable(dev);
return 0;
+
+err_usb2phy_init:
+ phy_exit(dwc->usb2_generic_phy);
+
+ return ret;
}
static const struct dev_pm_ops dwc3_dev_pm_ops = {
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index f8af8d44af85..57332e3768e4 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -31,11 +31,14 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
+#include <linux/phy/phy.h>
+
/* Global constants */
#define DWC3_EP0_BOUNCE_SIZE 512
#define DWC3_ENDPOINTS_NUM 32
#define DWC3_XHCI_RESOURCES_NUM 2
+#define DWC3_SCRATCHBUF_SIZE 4096 /* each buffer is assumed to be 4KiB */
#define DWC3_EVENT_SIZE 4 /* bytes */
#define DWC3_EVENT_MAX_NUM 64 /* 2 events/endpoint */
#define DWC3_EVENT_BUFFERS_SIZE (DWC3_EVENT_SIZE * DWC3_EVENT_MAX_NUM)
@@ -157,6 +160,7 @@
#define DWC3_GCTL_PRTCAP_OTG 3
#define DWC3_GCTL_CORESOFTRESET (1 << 11)
+#define DWC3_GCTL_SOFITPSYNC (1 << 10)
#define DWC3_GCTL_SCALEDOWN(n) ((n) << 4)
#define DWC3_GCTL_SCALEDOWN_MASK DWC3_GCTL_SCALEDOWN(3)
#define DWC3_GCTL_DISSCRAMBLE (1 << 3)
@@ -318,7 +322,7 @@
/* Device Endpoint Command Register */
#define DWC3_DEPCMD_PARAM_SHIFT 16
#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT)
-#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
+#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
#define DWC3_DEPCMD_STATUS(x) (((x) >> 15) & 1)
#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
#define DWC3_DEPCMD_CMDACT (1 << 10)
@@ -393,6 +397,7 @@ struct dwc3_event_buffer {
* @busy_slot: first slot which is owned by HW
* @desc: usb_endpoint_descriptor pointer
* @dwc: pointer to DWC controller
+ * @saved_state: ep state saved during hibernation
* @flags: endpoint flags (wedged, stalled, ...)
* @current_trb: index of current used trb
* @number: endpoint number (1 - 15)
@@ -415,6 +420,7 @@ struct dwc3_ep {
const struct usb_ss_ep_comp_descriptor *comp_desc;
struct dwc3 *dwc;
+ u32 saved_state;
unsigned flags;
#define DWC3_EP_ENABLED (1 << 0)
#define DWC3_EP_STALL (1 << 1)
@@ -598,6 +604,7 @@ struct dwc3_scratchpad_array {
* @ep0_trb: dma address of ep0_trb
* @ep0_usb_req: dummy req used while handling STD USB requests
* @ep0_bounce_addr: dma address of ep0_bounce
+ * @scratch_addr: dma address of scratchbuf
* @lock: for synchronizing
* @dev: pointer to our struct device
* @xhci: pointer to our xHCI child
@@ -606,6 +613,7 @@ struct dwc3_scratchpad_array {
* @gadget_driver: pointer to the gadget driver
* @regs: base address for our registers
* @regs_size: address space size
+ * @nr_scratch: number of scratch buffers
* @num_event_buffers: calculated number of event buffers
* @u1u2: only used on revisions <1.83a for workaround
* @maximum_speed: maximum speed requested (mainly for testing purposes)
@@ -613,16 +621,10 @@ struct dwc3_scratchpad_array {
* @dr_mode: requested mode of operation
* @usb2_phy: pointer to USB2 PHY
* @usb3_phy: pointer to USB3 PHY
+ * @usb2_generic_phy: pointer to USB2 PHY
+ * @usb3_generic_phy: pointer to USB3 PHY
* @dcfg: saved contents of DCFG register
* @gctl: saved contents of GCTL register
- * @is_selfpowered: true when we are selfpowered
- * @three_stage_setup: set if we perform a three phase setup
- * @ep0_bounced: true when we used bounce buffer
- * @ep0_expect_in: true when we expect a DATA IN transfer
- * @start_config_issued: true when StartConfig command has been issued
- * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
- * @needs_fifo_resize: not all users might want fifo resizing, flag it
- * @resize_fifos: tells us it's ok to reconfigure our TxFIFO sizes.
* @isoch_delay: wValue from Set Isochronous Delay request;
* @u2sel: parameter from Set SEL request.
* @u2pel: parameter from Set SEL request.
@@ -637,15 +639,31 @@ struct dwc3_scratchpad_array {
* @mem: points to start of memory which is used for this struct.
* @hwparams: copy of hwparams registers
* @root: debugfs root folder pointer
+ * @regset: debugfs pointer to regdump file
+ * @test_mode: true when we're entering a USB test mode
+ * @test_mode_nr: test feature selector
+ * @delayed_status: true when gadget driver asks for delayed status
+ * @ep0_bounced: true when we used bounce buffer
+ * @ep0_expect_in: true when we expect a DATA IN transfer
+ * @has_hibernation: true when dwc3 was configured with Hibernation
+ * @is_selfpowered: true when we are selfpowered
+ * @needs_fifo_resize: not all users might want fifo resizing, flag it
+ * @pullups_connected: true when Run/Stop bit is set
+ * @resize_fifos: tells us it's ok to reconfigure our TxFIFO sizes.
+ * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
+ * @start_config_issued: true when StartConfig command has been issued
+ * @three_stage_setup: set if we perform a three phase setup
*/
struct dwc3 {
struct usb_ctrlrequest *ctrl_req;
struct dwc3_trb *ep0_trb;
void *ep0_bounce;
+ void *scratchbuf;
u8 *setup_buf;
dma_addr_t ctrl_req_addr;
dma_addr_t ep0_trb_addr;
dma_addr_t ep0_bounce_addr;
+ dma_addr_t scratch_addr;
struct dwc3_request ep0_usb_req;
/* device lock */
@@ -665,6 +683,9 @@ struct dwc3 {
struct usb_phy *usb2_phy;
struct usb_phy *usb3_phy;
+ struct phy *usb2_generic_phy;
+ struct phy *usb3_generic_phy;
+
void __iomem *regs;
size_t regs_size;
@@ -674,6 +695,7 @@ struct dwc3 {
u32 dcfg;
u32 gctl;
+ u32 nr_scratch;
u32 num_event_buffers;
u32 u1u2;
u32 maximum_speed;
@@ -695,17 +717,9 @@ struct dwc3 {
#define DWC3_REVISION_230A 0x5533230a
#define DWC3_REVISION_240A 0x5533240a
#define DWC3_REVISION_250A 0x5533250a
-
- unsigned is_selfpowered:1;
- unsigned three_stage_setup:1;
- unsigned ep0_bounced:1;
- unsigned ep0_expect_in:1;
- unsigned start_config_issued:1;
- unsigned setup_packet_pending:1;
- unsigned delayed_status:1;
- unsigned needs_fifo_resize:1;
- unsigned resize_fifos:1;
- unsigned pullups_connected:1;
+#define DWC3_REVISION_260A 0x5533260a
+#define DWC3_REVISION_270A 0x5533270a
+#define DWC3_REVISION_280A 0x5533280a
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -730,6 +744,18 @@ struct dwc3 {
u8 test_mode;
u8 test_mode_nr;
+
+ unsigned delayed_status:1;
+ unsigned ep0_bounced:1;
+ unsigned ep0_expect_in:1;
+ unsigned has_hibernation:1;
+ unsigned is_selfpowered:1;
+ unsigned needs_fifo_resize:1;
+ unsigned pullups_connected:1;
+ unsigned resize_fifos:1;
+ unsigned setup_packet_pending:1;
+ unsigned start_config_issued:1;
+ unsigned three_stage_setup:1;
};
/* -------------------------------------------------------------------------- */
@@ -815,15 +841,15 @@ struct dwc3_event_depevt {
* 12 - VndrDevTstRcved
* @reserved15_12: Reserved, not used
* @event_info: Information about this event
- * @reserved31_24: Reserved, not used
+ * @reserved31_25: Reserved, not used
*/
struct dwc3_event_devt {
u32 one_bit:1;
u32 device_event:7;
u32 type:4;
u32 reserved15_12:4;
- u32 event_info:8;
- u32 reserved31_24:8;
+ u32 event_info:9;
+ u32 reserved31_25:7;
} __packed;
/**
@@ -856,6 +882,19 @@ union dwc3_event {
struct dwc3_event_gevt gevt;
};
+/**
+ * struct dwc3_gadget_ep_cmd_params - representation of endpoint command
+ * parameters
+ * @param2: third parameter
+ * @param1: second parameter
+ * @param0: first parameter
+ */
+struct dwc3_gadget_ep_cmd_params {
+ u32 param2;
+ u32 param1;
+ u32 param0;
+};
+
/*
* DWC3 Features to be used as Driver Data
*/
@@ -881,11 +920,31 @@ static inline void dwc3_host_exit(struct dwc3 *dwc)
#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_gadget_init(struct dwc3 *dwc);
void dwc3_gadget_exit(struct dwc3 *dwc);
+int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
+int dwc3_gadget_get_link_state(struct dwc3 *dwc);
+int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
+int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
+int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; }
static inline void dwc3_gadget_exit(struct dwc3 *dwc)
{ }
+static inline int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
+{ return 0; }
+static inline int dwc3_gadget_get_link_state(struct dwc3 *dwc)
+{ return 0; }
+static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc,
+ enum dwc3_link_state state)
+{ return 0; }
+
+static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
+{ return 0; }
+static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
+ int cmd, u32 param)
+{ return 0; }
#endif
/* power management interface */
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index b269dbd47fc4..1160ff41bed4 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -29,7 +29,6 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/extcon.h>
-#include <linux/extcon/of_extcon.h>
#include <linux/regulator/consumer.h>
#include <linux/usb/otg.h>
@@ -424,11 +423,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing memory base resource\n");
- return -EINVAL;
- }
-
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -522,7 +516,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
dwc3_omap_enable_irqs(omap);
if (of_property_read_bool(node, "extcon")) {
- edev = of_extcon_get_extcon_dev(dev, 0);
+ edev = extcon_get_edev_by_phandle(dev, 0);
if (IS_ERR(edev)) {
dev_vdbg(dev, "couldn't get extcon device\n");
ret = -EPROBE_DEFER;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2da0a5a2803a..a740eac74d56 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -68,6 +68,22 @@ int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
}
/**
+ * dwc3_gadget_get_link_state - Gets current state of USB Link
+ * @dwc: pointer to our context structure
+ *
+ * Caller should take care of locking. This function will
+ * return the link state on success (>= 0) or -ETIMEDOUT.
+ */
+int dwc3_gadget_get_link_state(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+
+ return DWC3_DSTS_USBLNKST(reg);
+}
+
+/**
* dwc3_gadget_set_link_state - Sets USB Link to a particular State
* @dwc: pointer to our context structure
* @state: the state to put link into
@@ -417,7 +433,7 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
const struct usb_endpoint_descriptor *desc,
const struct usb_ss_ep_comp_descriptor *comp_desc,
- bool ignore)
+ bool ignore, bool restore)
{
struct dwc3_gadget_ep_cmd_params params;
@@ -436,6 +452,11 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
if (ignore)
params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
+ if (restore) {
+ params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
+ params.param2 |= dep->saved_state;
+ }
+
params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
| DWC3_DEPCFG_XFER_NOT_READY_EN;
@@ -494,7 +515,7 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
const struct usb_endpoint_descriptor *desc,
const struct usb_ss_ep_comp_descriptor *comp_desc,
- bool ignore)
+ bool ignore, bool restore)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
@@ -508,7 +529,8 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return ret;
}
- ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
+ ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
+ restore);
if (ret)
return ret;
@@ -548,13 +570,13 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return 0;
}
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
if (!list_empty(&dep->req_queued)) {
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, true);
/* - giveback all requests to gadget driver */
while (!list_empty(&dep->req_queued)) {
@@ -659,7 +681,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
}
spin_lock_irqsave(&dwc->lock, flags);
- ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false);
+ ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -771,9 +793,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
else
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
-
- if (!req->request.no_interrupt && !chain)
- trb->ctrl |= DWC3_TRB_CTRL_IOC;
break;
case USB_ENDPOINT_XFER_BULK:
@@ -788,6 +807,9 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
BUG();
}
+ if (!req->request.no_interrupt && !chain)
+ trb->ctrl |= DWC3_TRB_CTRL_IOC;
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
trb->ctrl |= DWC3_TRB_CTRL_CSP;
@@ -1077,7 +1099,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
if (list_empty(&dep->req_queued)) {
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
}
return 0;
@@ -1107,6 +1129,23 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return ret;
}
+ /*
+ * 4. Stream Capable Bulk Endpoints. We need to start the transfer
+ * right away, otherwise host will not know we have streams to be
+ * handled.
+ */
+ if (dep->stream_capable) {
+ int ret;
+
+ ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+ if (ret && ret != -EBUSY) {
+ struct dwc3 *dwc = dep->dwc;
+
+ dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
+ dep->name);
+ }
+ }
+
return 0;
}
@@ -1163,7 +1202,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
}
if (r == req) {
/* wait until it is processed */
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, true);
goto out1;
}
dev_err(dwc->dev, "request %p was not queued to %s\n",
@@ -1194,8 +1233,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
DWC3_DEPCMD_SETSTALL, &params);
if (ret)
- dev_err(dwc->dev, "failed to %s STALL on %s\n",
- value ? "set" : "clear",
+ dev_err(dwc->dev, "failed to set STALL on %s\n",
dep->name);
else
dep->flags |= DWC3_EP_STALL;
@@ -1203,8 +1241,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
DWC3_DEPCMD_CLEARSTALL, &params);
if (ret)
- dev_err(dwc->dev, "failed to %s STALL on %s\n",
- value ? "set" : "clear",
+ dev_err(dwc->dev, "failed to clear STALL on %s\n",
dep->name);
else
dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
@@ -1387,7 +1424,7 @@ static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
return 0;
}
-static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
u32 timeout = 500;
@@ -1402,9 +1439,17 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
if (dwc->revision >= DWC3_REVISION_194A)
reg &= ~DWC3_DCTL_KEEP_CONNECT;
reg |= DWC3_DCTL_RUN_STOP;
+
+ if (dwc->has_hibernation)
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+
dwc->pullups_connected = true;
} else {
reg &= ~DWC3_DCTL_RUN_STOP;
+
+ if (dwc->has_hibernation && !suspend)
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+
dwc->pullups_connected = false;
}
@@ -1442,7 +1487,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
is_on = !!is_on;
spin_lock_irqsave(&dwc->lock, flags);
- ret = dwc3_gadget_run_stop(dwc, is_on);
+ ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -1549,14 +1594,16 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
+ false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err2;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
+ false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err3;
@@ -1849,15 +1896,12 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
*/
dep->flags = DWC3_EP_PENDING_REQUEST;
} else {
- dwc3_stop_active_transfer(dwc, dep->number);
+ dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
}
return 1;
}
- if ((event->status & DEPEVT_STATUS_IOC) &&
- (trb->ctrl & DWC3_TRB_CTRL_IOC))
- return 0;
return 1;
}
@@ -1999,7 +2043,25 @@ static void dwc3_disconnect_gadget(struct dwc3 *dwc)
}
}
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
+static void dwc3_suspend_gadget(struct dwc3 *dwc)
+{
+ if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
+ spin_unlock(&dwc->lock);
+ dwc->gadget_driver->suspend(&dwc->gadget);
+ spin_lock(&dwc->lock);
+ }
+}
+
+static void dwc3_resume_gadget(struct dwc3 *dwc)
+{
+ if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ spin_unlock(&dwc->lock);
+ dwc->gadget_driver->resume(&dwc->gadget);
+ spin_lock(&dwc->lock);
+ }
+}
+
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
{
struct dwc3_ep *dep;
struct dwc3_gadget_ep_cmd_params params;
@@ -2031,7 +2093,8 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
*/
cmd = DWC3_DEPCMD_ENDTRANSFER;
- cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
+ cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
+ cmd |= DWC3_DEPCMD_CMDIOC;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(&params, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
@@ -2260,17 +2323,23 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
reg |= DWC3_DCTL_HIRD_THRES(12);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ } else {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
}
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
+ false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
+ false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
@@ -2378,9 +2447,50 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
dwc->link_state = next;
+ switch (next) {
+ case DWC3_LINK_STATE_U1:
+ if (dwc->speed == USB_SPEED_SUPER)
+ dwc3_suspend_gadget(dwc);
+ break;
+ case DWC3_LINK_STATE_U2:
+ case DWC3_LINK_STATE_U3:
+ dwc3_suspend_gadget(dwc);
+ break;
+ case DWC3_LINK_STATE_RESUME:
+ dwc3_resume_gadget(dwc);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
}
+static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
+ unsigned int evtinfo)
+{
+ unsigned int is_ss = evtinfo & BIT(4);
+
+ /**
+ * WORKAROUND: DWC3 revison 2.20a with hibernation support
+ * have a known issue which can cause USB CV TD.9.23 to fail
+ * randomly.
+ *
+ * Because of this issue, core could generate bogus hibernation
+ * events which SW needs to ignore.
+ *
+ * Refers to:
+ *
+ * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
+ * Device Fallback from SuperSpeed
+ */
+ if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
+ return;
+
+ /* enter hibernation here */
+}
+
static void dwc3_gadget_interrupt(struct dwc3 *dwc,
const struct dwc3_event_devt *event)
{
@@ -2397,6 +2507,13 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
case DWC3_DEVICE_EVENT_WAKEUP:
dwc3_gadget_wakeup_interrupt(dwc);
break;
+ case DWC3_DEVICE_EVENT_HIBER_REQ:
+ if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
+ "unexpected hibernation event\n"))
+ break;
+
+ dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
+ break;
case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
break;
@@ -2661,8 +2778,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_prepare(struct dwc3 *dwc)
{
- if (dwc->pullups_connected)
+ if (dwc->pullups_connected) {
dwc3_gadget_disable_irq(dwc);
+ dwc3_gadget_run_stop(dwc, true, true);
+ }
return 0;
}
@@ -2671,7 +2790,7 @@ void dwc3_gadget_complete(struct dwc3 *dwc)
{
if (dwc->pullups_connected) {
dwc3_gadget_enable_irq(dwc);
- dwc3_gadget_run_stop(dwc, true);
+ dwc3_gadget_run_stop(dwc, true, false);
}
}
@@ -2694,12 +2813,14 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
+ false);
if (ret)
goto err0;
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
+ false);
if (ret)
goto err1;
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index febe1aa7b714..a0ee75b68a80 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -56,12 +56,6 @@ struct dwc3;
/* DEPXFERCFG parameter 0 */
#define DWC3_DEPXFERCFG_NUM_XFER_RES(n) ((n) & 0xffff)
-struct dwc3_gadget_ep_cmd_params {
- u32 param2;
- u32 param1;
- u32 param0;
-};
-
/* -------------------------------------------------------------------------- */
#define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
@@ -85,9 +79,6 @@ static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status);
-int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
-int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
-
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event);
void dwc3_ep0_out_start(struct dwc3 *dwc);
@@ -95,9 +86,6 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags);
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
-int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
-int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param);
/**
* dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 8154165aa601..3557c7e5040d 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -226,7 +226,7 @@ config USB_GR_UDC
config USB_OMAP
tristate "OMAP USB Device Controller"
depends on ARCH_OMAP1
- select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_H4_OTG
+ select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
help
Many Texas Instruments OMAP processors have flexible full
speed USB device controllers, with support for up to 30
@@ -301,7 +301,6 @@ config USB_PXA27X
gadget drivers to also be dynamically linked.
config USB_S3C_HSOTG
- depends on ARM
tristate "Designware/S3C HS/OtG USB Device controller"
help
The Designware USB2.0 high-speed gadget controller
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index cea8c20a1425..f605ad8c1902 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1758,15 +1758,15 @@ static int at91udc_probe(struct platform_device *pdev)
/* newer chips have more FIFO memory than rm9200 */
if (cpu_is_at91sam9260() || cpu_is_at91sam9g20()) {
- usb_ep_set_maxpacket_limit(&udc->ep[0].ep, 64);
- usb_ep_set_maxpacket_limit(&udc->ep[3].ep, 64);
- usb_ep_set_maxpacket_limit(&udc->ep[4].ep, 512);
- usb_ep_set_maxpacket_limit(&udc->ep[5].ep, 512);
+ udc->ep[0].maxpacket = 64;
+ udc->ep[3].maxpacket = 64;
+ udc->ep[4].maxpacket = 512;
+ udc->ep[5].maxpacket = 512;
} else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
- usb_ep_set_maxpacket_limit(&udc->ep[3].ep, 64);
+ udc->ep[3].maxpacket = 64;
} else if (cpu_is_at91sam9263()) {
- usb_ep_set_maxpacket_limit(&udc->ep[0].ep, 64);
- usb_ep_set_maxpacket_limit(&udc->ep[3].ep, 64);
+ udc->ep[0].maxpacket = 64;
+ udc->ep[3].maxpacket = 64;
}
udc->udp_baseaddr = ioremap(res->start, resource_size(res));
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 38bf67b1a97d..9f65324f9ae0 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -326,7 +326,7 @@ static int vbus_is_present(struct usba_udc *udc)
#if defined(CONFIG_ARCH_AT91SAM9RL)
-#include <mach/at91_pmc.h>
+#include <linux/clk/at91_pmc.h>
static void toggle_bias(int is_on)
{
@@ -1661,7 +1661,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
if (dma_status) {
int i;
- for (i = 1; i < USBA_NR_ENDPOINTS; i++)
+ for (i = 1; i < USBA_NR_DMAS; i++)
if (dma_status & (1 << i))
usba_dma_irq(udc, &udc->usba_ep[i]);
}
@@ -1670,7 +1670,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
if (ep_status) {
int i;
- for (i = 0; i < USBA_NR_ENDPOINTS; i++)
+ for (i = 0; i < udc->num_ep; i++)
if (ep_status & (1 << i)) {
if (ep_is_control(&udc->usba_ep[i]))
usba_control_irq(udc, &udc->usba_ep[i]);
@@ -1827,12 +1827,12 @@ static int atmel_usba_stop(struct usb_gadget *gadget,
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
- udc->driver = NULL;
-
clk_disable_unprepare(udc->hclk);
clk_disable_unprepare(udc->pclk);
- DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
+ DBG(DBG_GADGET, "unregistered driver `%s'\n", udc->driver->driver.name);
+
+ udc->driver = NULL;
return 0;
}
@@ -1914,6 +1914,12 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
i++;
}
+ if (i == 0) {
+ dev_err(&pdev->dev, "of_probe: no endpoint specified\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
return eps;
err:
return ERR_PTR(ret);
diff --git a/drivers/usb/gadget/atmel_usba_udc.h b/drivers/usb/gadget/atmel_usba_udc.h
index 2922db50befe..a70706e8cb02 100644
--- a/drivers/usb/gadget/atmel_usba_udc.h
+++ b/drivers/usb/gadget/atmel_usba_udc.h
@@ -210,7 +210,7 @@
#define USBA_FIFO_BASE(x) ((x) << 16)
/* Synth parameters */
-#define USBA_NR_ENDPOINTS 7
+#define USBA_NR_DMAS 7
#define EP0_FIFO_SIZE 64
#define EP0_EPT_SIZE USBA_EPT_SIZE_64
diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
index 888fbb43b338..e969eb809a85 100644
--- a/drivers/usb/gadget/bcm63xx_udc.c
+++ b/drivers/usb/gadget/bcm63xx_udc.c
@@ -360,24 +360,30 @@ static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
bcm_writel(val, udc->iudma_regs + off);
}
-static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
+static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
{
- return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
+ return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
+static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
+ int chan)
{
- bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
+ bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
+static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
{
- return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
+ return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
+static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
+ int chan)
{
- bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
+ bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
@@ -638,7 +644,7 @@ static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
} while (!last_bd);
usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
- ENETDMAC_CHANCFG_REG(iudma->ch_idx));
+ ENETDMAC_CHANCFG_REG, iudma->ch_idx);
}
/**
@@ -694,9 +700,9 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
/* stop DMA, then wait for the hardware to wrap up */
- usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
+ usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
- while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
+ while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
ENETDMAC_CHANCFG_EN_MASK) {
udelay(1);
@@ -713,10 +719,10 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
ch_idx);
usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
- ENETDMAC_CHANCFG_REG(ch_idx));
+ ENETDMAC_CHANCFG_REG, ch_idx);
}
}
- usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
+ usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
/* don't leave "live" HW-owned entries for the next guy to step on */
for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
@@ -728,11 +734,11 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
/* set up IRQs, UBUS burst size, and BD base for this channel */
usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
- ENETDMAC_IRMASK_REG(ch_idx));
- usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
+ ENETDMAC_IRMASK_REG, ch_idx);
+ usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
- usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
- usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
+ usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
+ usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
}
/**
@@ -2035,7 +2041,7 @@ static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
spin_lock(&udc->lock);
usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
- ENETDMAC_IR_REG(iudma->ch_idx));
+ ENETDMAC_IR_REG, iudma->ch_idx);
bep = iudma->bep;
rc = iudma_read(udc, iudma);
@@ -2175,18 +2181,18 @@ static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
seq_printf(s, " [ep%d]:\n",
max_t(int, iudma_defaults[ch_idx].ep_num, 0));
seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
- usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
+ usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
- sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
- sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
+ sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
+ sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
- usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
+ usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
sram2 >> 16, sram2 & 0xffff,
sram3 >> 16, sram3 & 0xffff,
- usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
+ usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
iudma->n_bds);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d742bed7a5fa..fab906429b80 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1139,7 +1139,7 @@ struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev,
uc = copy_gadget_strings(sp, n_gstrings, n_strings);
if (IS_ERR(uc))
- return ERR_PTR(PTR_ERR(uc));
+ return ERR_CAST(uc);
n_gs = get_containers_gs(uc);
ret = usb_string_ids_tab(cdev, n_gs[0]->strings);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 306a2b52125c..2e164dca08e8 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -28,6 +28,10 @@
#include <linux/usb/composite.h>
#include <linux/usb/functionfs.h>
+#include <linux/aio.h>
+#include <linux/mmu_context.h>
+#include <linux/poll.h>
+
#include "u_fs.h"
#include "configfs.h"
@@ -99,6 +103,14 @@ static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
}
+static inline enum ffs_setup_state
+ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
+{
+ return (enum ffs_setup_state)
+ cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
+}
+
+
static void ffs_func_eps_disable(struct ffs_function *func);
static int __must_check ffs_func_eps_enable(struct ffs_function *func);
@@ -122,8 +134,8 @@ struct ffs_ep {
struct usb_ep *ep; /* P: ffs->eps_lock */
struct usb_request *req; /* P: epfile->mutex */
- /* [0]: full speed, [1]: high speed */
- struct usb_endpoint_descriptor *descs[2];
+ /* [0]: full speed, [1]: high speed, [2]: super speed */
+ struct usb_endpoint_descriptor *descs[3];
u8 num;
@@ -148,6 +160,25 @@ struct ffs_epfile {
unsigned char _pad;
};
+/* ffs_io_data structure ***************************************************/
+
+struct ffs_io_data {
+ bool aio;
+ bool read;
+
+ struct kiocb *kiocb;
+ const struct iovec *iovec;
+ unsigned long nr_segs;
+ char __user *buf;
+ size_t len;
+
+ struct mm_struct *mm;
+ struct work_struct work;
+
+ struct usb_ep *ep;
+ struct usb_request *req;
+};
+
static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
@@ -161,8 +192,10 @@ ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
DEFINE_MUTEX(ffs_lock);
EXPORT_SYMBOL(ffs_lock);
-static struct ffs_dev *ffs_find_dev(const char *name);
+static struct ffs_dev *_ffs_find_dev(const char *name);
+static struct ffs_dev *_ffs_alloc_dev(void);
static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
+static void _ffs_free_dev(struct ffs_dev *dev);
static void *ffs_acquire_dev(const char *dev_name);
static void ffs_release_dev(struct ffs_data *ffs_data);
static int ffs_ready(struct ffs_data *ffs);
@@ -218,7 +251,7 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
}
ffs->setup_state = FFS_NO_SETUP;
- return ffs->ep0req_status;
+ return req->status ? req->status : req->actual;
}
static int __ffs_ep0_stall(struct ffs_data *ffs)
@@ -244,7 +277,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
ENTER();
/* Fast check if setup was canceled */
- if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+ if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
/* Acquire mutex */
@@ -310,8 +343,8 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
* rather then _irqsave
*/
spin_lock_irq(&ffs->ev.waitq.lock);
- switch (FFS_SETUP_STATE(ffs)) {
- case FFS_SETUP_CANCELED:
+ switch (ffs_setup_state_clear_cancelled(ffs)) {
+ case FFS_SETUP_CANCELLED:
ret = -EIDRM;
goto done_spin;
@@ -346,7 +379,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
/*
* We are guaranteed to be still in FFS_ACTIVE state
* but the state of setup could have changed from
- * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need
+ * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
* to check for that. If that happened we copied data
* from user space in vain but it's unlikely.
*
@@ -355,7 +388,8 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
* transition can be performed and it's protected by
* mutex.
*/
- if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+ if (ffs_setup_state_clear_cancelled(ffs) ==
+ FFS_SETUP_CANCELLED) {
ret = -EIDRM;
done_spin:
spin_unlock_irq(&ffs->ev.waitq.lock);
@@ -421,7 +455,7 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
ENTER();
/* Fast check if setup was canceled */
- if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+ if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
/* Acquire mutex */
@@ -441,8 +475,8 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
*/
spin_lock_irq(&ffs->ev.waitq.lock);
- switch (FFS_SETUP_STATE(ffs)) {
- case FFS_SETUP_CANCELED:
+ switch (ffs_setup_state_clear_cancelled(ffs)) {
+ case FFS_SETUP_CANCELLED:
ret = -EIDRM;
break;
@@ -489,7 +523,8 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
spin_lock_irq(&ffs->ev.waitq.lock);
/* See ffs_ep0_write() */
- if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+ if (ffs_setup_state_clear_cancelled(ffs) ==
+ FFS_SETUP_CANCELLED) {
ret = -EIDRM;
break;
}
@@ -558,6 +593,45 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
return ret;
}
+static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
+{
+ struct ffs_data *ffs = file->private_data;
+ unsigned int mask = POLLWRNORM;
+ int ret;
+
+ poll_wait(file, &ffs->ev.waitq, wait);
+
+ ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+ if (unlikely(ret < 0))
+ return mask;
+
+ switch (ffs->state) {
+ case FFS_READ_DESCRIPTORS:
+ case FFS_READ_STRINGS:
+ mask |= POLLOUT;
+ break;
+
+ case FFS_ACTIVE:
+ switch (ffs->setup_state) {
+ case FFS_NO_SETUP:
+ if (ffs->ev.count)
+ mask |= POLLIN;
+ break;
+
+ case FFS_SETUP_PENDING:
+ case FFS_SETUP_CANCELLED:
+ mask |= (POLLIN | POLLOUT);
+ break;
+ }
+ case FFS_CLOSING:
+ break;
+ }
+
+ mutex_unlock(&ffs->mutex);
+
+ return mask;
+}
+
static const struct file_operations ffs_ep0_operations = {
.llseek = no_llseek,
@@ -566,6 +640,7 @@ static const struct file_operations ffs_ep0_operations = {
.read = ffs_ep0_read,
.release = ffs_ep0_release,
.unlocked_ioctl = ffs_ep0_ioctl,
+ .poll = ffs_ep0_poll,
};
@@ -581,11 +656,54 @@ static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
}
}
-static ssize_t ffs_epfile_io(struct file *file,
- char __user *buf, size_t len, int read)
+static void ffs_user_copy_worker(struct work_struct *work)
+{
+ struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
+ work);
+ int ret = io_data->req->status ? io_data->req->status :
+ io_data->req->actual;
+
+ if (io_data->read && ret > 0) {
+ int i;
+ size_t pos = 0;
+ use_mm(io_data->mm);
+ for (i = 0; i < io_data->nr_segs; i++) {
+ if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
+ &io_data->buf[pos],
+ io_data->iovec[i].iov_len))) {
+ ret = -EFAULT;
+ break;
+ }
+ pos += io_data->iovec[i].iov_len;
+ }
+ unuse_mm(io_data->mm);
+ }
+
+ aio_complete(io_data->kiocb, ret, ret);
+
+ usb_ep_free_request(io_data->ep, io_data->req);
+
+ io_data->kiocb->private = NULL;
+ if (io_data->read)
+ kfree(io_data->iovec);
+ kfree(io_data->buf);
+ kfree(io_data);
+}
+
+static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
+ struct usb_request *req)
+{
+ struct ffs_io_data *io_data = req->context;
+
+ ENTER();
+
+ INIT_WORK(&io_data->work, ffs_user_copy_worker);
+ schedule_work(&io_data->work);
+}
+
+static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
{
struct ffs_epfile *epfile = file->private_data;
- struct usb_gadget *gadget = epfile->ffs->gadget;
struct ffs_ep *ep;
char *data = NULL;
ssize_t ret, data_len;
@@ -613,7 +731,7 @@ static ssize_t ffs_epfile_io(struct file *file,
}
/* Do we halt? */
- halt = !read == !epfile->in;
+ halt = (!io_data->read == !epfile->in);
if (halt && epfile->isoc) {
ret = -EINVAL;
goto error;
@@ -622,18 +740,41 @@ static ssize_t ffs_epfile_io(struct file *file,
/* Allocate & copy */
if (!halt) {
/*
+ * if we _do_ wait above, the epfile->ffs->gadget might be NULL
+ * before the waiting completes, so do not assign to 'gadget' earlier
+ */
+ struct usb_gadget *gadget = epfile->ffs->gadget;
+
+ /*
* Controller may require buffer size to be aligned to
* maxpacketsize of an out endpoint.
*/
- data_len = read ? usb_ep_align_maybe(gadget, ep->ep, len) : len;
+ data_len = io_data->read ?
+ usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
+ io_data->len;
data = kmalloc(data_len, GFP_KERNEL);
if (unlikely(!data))
return -ENOMEM;
-
- if (!read && unlikely(copy_from_user(data, buf, len))) {
- ret = -EFAULT;
- goto error;
+ if (io_data->aio && !io_data->read) {
+ int i;
+ size_t pos = 0;
+ for (i = 0; i < io_data->nr_segs; i++) {
+ if (unlikely(copy_from_user(&data[pos],
+ io_data->iovec[i].iov_base,
+ io_data->iovec[i].iov_len))) {
+ ret = -EFAULT;
+ goto error;
+ }
+ pos += io_data->iovec[i].iov_len;
+ }
+ } else {
+ if (!io_data->read &&
+ unlikely(__copy_from_user(data, io_data->buf,
+ io_data->len))) {
+ ret = -EFAULT;
+ goto error;
+ }
}
}
@@ -656,40 +797,78 @@ static ssize_t ffs_epfile_io(struct file *file,
ret = -EBADMSG;
} else {
/* Fire the request */
- DECLARE_COMPLETION_ONSTACK(done);
+ struct usb_request *req;
- struct usb_request *req = ep->req;
- req->context = &done;
- req->complete = ffs_epfile_io_complete;
- req->buf = data;
- req->length = data_len;
+ if (io_data->aio) {
+ req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
+ if (unlikely(!req))
+ goto error_lock;
- ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+ req->buf = data;
+ req->length = io_data->len;
- spin_unlock_irq(&epfile->ffs->eps_lock);
+ io_data->buf = data;
+ io_data->ep = ep->ep;
+ io_data->req = req;
- if (unlikely(ret < 0)) {
- /* nop */
- } else if (unlikely(wait_for_completion_interruptible(&done))) {
- ret = -EINTR;
- usb_ep_dequeue(ep->ep, req);
+ req->context = io_data;
+ req->complete = ffs_epfile_async_io_complete;
+
+ ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+ if (unlikely(ret)) {
+ usb_ep_free_request(ep->ep, req);
+ goto error_lock;
+ }
+ ret = -EIOCBQUEUED;
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
} else {
- /*
- * XXX We may end up silently droping data here.
- * Since data_len (i.e. req->length) may be bigger
- * than len (after being rounded up to maxpacketsize),
- * we may end up with more data then user space has
- * space for.
- */
- ret = ep->status;
- if (read && ret > 0 &&
- unlikely(copy_to_user(buf, data,
- min_t(size_t, ret, len))))
- ret = -EFAULT;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ req = ep->req;
+ req->buf = data;
+ req->length = io_data->len;
+
+ req->context = &done;
+ req->complete = ffs_epfile_io_complete;
+
+ ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+
+ if (unlikely(ret < 0)) {
+ /* nop */
+ } else if (unlikely(
+ wait_for_completion_interruptible(&done))) {
+ ret = -EINTR;
+ usb_ep_dequeue(ep->ep, req);
+ } else {
+ /*
+ * XXX We may end up silently droping data
+ * here. Since data_len (i.e. req->length) may
+ * be bigger than len (after being rounded up
+ * to maxpacketsize), we may end up with more
+ * data then user space has space for.
+ */
+ ret = ep->status;
+ if (io_data->read && ret > 0) {
+ ret = min_t(size_t, ret, io_data->len);
+
+ if (unlikely(copy_to_user(io_data->buf,
+ data, ret)))
+ ret = -EFAULT;
+ }
+ }
+ kfree(data);
}
}
mutex_unlock(&epfile->mutex);
+ return ret;
+
+error_lock:
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ mutex_unlock(&epfile->mutex);
error:
kfree(data);
return ret;
@@ -699,17 +878,31 @@ static ssize_t
ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
loff_t *ptr)
{
+ struct ffs_io_data io_data;
+
ENTER();
- return ffs_epfile_io(file, (char __user *)buf, len, 0);
+ io_data.aio = false;
+ io_data.read = false;
+ io_data.buf = (char * __user)buf;
+ io_data.len = len;
+
+ return ffs_epfile_io(file, &io_data);
}
static ssize_t
ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
{
+ struct ffs_io_data io_data;
+
ENTER();
- return ffs_epfile_io(file, buf, len, 1);
+ io_data.aio = false;
+ io_data.read = true;
+ io_data.buf = buf;
+ io_data.len = len;
+
+ return ffs_epfile_io(file, &io_data);
}
static int
@@ -728,6 +921,89 @@ ffs_epfile_open(struct inode *inode, struct file *file)
return 0;
}
+static int ffs_aio_cancel(struct kiocb *kiocb)
+{
+ struct ffs_io_data *io_data = kiocb->private;
+ struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+ int value;
+
+ ENTER();
+
+ spin_lock_irq(&epfile->ffs->eps_lock);
+
+ if (likely(io_data && io_data->ep && io_data->req))
+ value = usb_ep_dequeue(io_data->ep, io_data->req);
+ else
+ value = -EINVAL;
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+
+ return value;
+}
+
+static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
+ const struct iovec *iovec,
+ unsigned long nr_segs, loff_t loff)
+{
+ struct ffs_io_data *io_data;
+
+ ENTER();
+
+ io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
+ if (unlikely(!io_data))
+ return -ENOMEM;
+
+ io_data->aio = true;
+ io_data->read = false;
+ io_data->kiocb = kiocb;
+ io_data->iovec = iovec;
+ io_data->nr_segs = nr_segs;
+ io_data->len = kiocb->ki_nbytes;
+ io_data->mm = current->mm;
+
+ kiocb->private = io_data;
+
+ kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+
+ return ffs_epfile_io(kiocb->ki_filp, io_data);
+}
+
+static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
+ const struct iovec *iovec,
+ unsigned long nr_segs, loff_t loff)
+{
+ struct ffs_io_data *io_data;
+ struct iovec *iovec_copy;
+
+ ENTER();
+
+ iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
+ if (unlikely(!iovec_copy))
+ return -ENOMEM;
+
+ memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
+
+ io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
+ if (unlikely(!io_data)) {
+ kfree(iovec_copy);
+ return -ENOMEM;
+ }
+
+ io_data->aio = true;
+ io_data->read = true;
+ io_data->kiocb = kiocb;
+ io_data->iovec = iovec_copy;
+ io_data->nr_segs = nr_segs;
+ io_data->len = kiocb->ki_nbytes;
+ io_data->mm = current->mm;
+
+ kiocb->private = io_data;
+
+ kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+
+ return ffs_epfile_io(kiocb->ki_filp, io_data);
+}
+
static int
ffs_epfile_release(struct inode *inode, struct file *file)
{
@@ -784,6 +1060,8 @@ static const struct file_operations ffs_epfile_operations = {
.open = ffs_epfile_open,
.write = ffs_epfile_write,
.read = ffs_epfile_read,
+ .aio_write = ffs_epfile_aio_write,
+ .aio_read = ffs_epfile_aio_read,
.release = ffs_epfile_release,
.unlocked_ioctl = ffs_epfile_ioctl,
};
@@ -1167,7 +1445,7 @@ static void ffs_data_clear(struct ffs_data *ffs)
if (ffs->epfiles)
ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
- kfree(ffs->raw_descs);
+ kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
kfree(ffs->stringtabs);
}
@@ -1179,14 +1457,15 @@ static void ffs_data_reset(struct ffs_data *ffs)
ffs_data_clear(ffs);
ffs->epfiles = NULL;
+ ffs->raw_descs_data = NULL;
ffs->raw_descs = NULL;
ffs->raw_strings = NULL;
ffs->stringtabs = NULL;
ffs->raw_descs_length = 0;
- ffs->raw_fs_descs_length = 0;
ffs->fs_descs_count = 0;
ffs->hs_descs_count = 0;
+ ffs->ss_descs_count = 0;
ffs->strings_count = 0;
ffs->interfaces_count = 0;
@@ -1329,7 +1608,24 @@ static int ffs_func_eps_enable(struct ffs_function *func)
spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
struct usb_endpoint_descriptor *ds;
- ds = ep->descs[ep->descs[1] ? 1 : 0];
+ int desc_idx;
+
+ if (ffs->gadget->speed == USB_SPEED_SUPER)
+ desc_idx = 2;
+ else if (ffs->gadget->speed == USB_SPEED_HIGH)
+ desc_idx = 1;
+ else
+ desc_idx = 0;
+
+ /* fall-back to lower speed if desc missing for current speed */
+ do {
+ ds = ep->descs[desc_idx];
+ } while (!ds && --desc_idx >= 0);
+
+ if (!ds) {
+ ret = -EINVAL;
+ break;
+ }
ep->ep->driver_data = ep;
ep->ep->desc = ds;
@@ -1464,6 +1760,12 @@ static int __must_check ffs_do_desc(char *data, unsigned len,
}
break;
+ case USB_DT_SS_ENDPOINT_COMP:
+ pr_vdebug("EP SS companion descriptor\n");
+ if (length != sizeof(struct usb_ss_ep_comp_descriptor))
+ goto inv_length;
+ break;
+
case USB_DT_OTHER_SPEED_CONFIG:
case USB_DT_INTERFACE_POWER:
case USB_DT_DEBUG:
@@ -1574,60 +1876,76 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
static int __ffs_data_got_descs(struct ffs_data *ffs,
char *const _data, size_t len)
{
- unsigned fs_count, hs_count;
- int fs_len, ret = -EINVAL;
- char *data = _data;
+ char *data = _data, *raw_descs;
+ unsigned counts[3], flags;
+ int ret = -EINVAL, i;
ENTER();
- if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_DESCRIPTORS_MAGIC ||
- get_unaligned_le32(data + 4) != len))
+ if (get_unaligned_le32(data + 4) != len)
goto error;
- fs_count = get_unaligned_le32(data + 8);
- hs_count = get_unaligned_le32(data + 12);
- if (!fs_count && !hs_count)
- goto einval;
-
- data += 16;
- len -= 16;
-
- if (likely(fs_count)) {
- fs_len = ffs_do_descs(fs_count, data, len,
- __ffs_data_do_entity, ffs);
- if (unlikely(fs_len < 0)) {
- ret = fs_len;
+ switch (get_unaligned_le32(data)) {
+ case FUNCTIONFS_DESCRIPTORS_MAGIC:
+ flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
+ data += 8;
+ len -= 8;
+ break;
+ case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
+ flags = get_unaligned_le32(data + 8);
+ if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
+ FUNCTIONFS_HAS_HS_DESC |
+ FUNCTIONFS_HAS_SS_DESC)) {
+ ret = -ENOSYS;
goto error;
}
+ data += 12;
+ len -= 12;
+ break;
+ default:
+ goto error;
+ }
- data += fs_len;
- len -= fs_len;
- } else {
- fs_len = 0;
+ /* Read fs_count, hs_count and ss_count (if present) */
+ for (i = 0; i < 3; ++i) {
+ if (!(flags & (1 << i))) {
+ counts[i] = 0;
+ } else if (len < 4) {
+ goto error;
+ } else {
+ counts[i] = get_unaligned_le32(data);
+ data += 4;
+ len -= 4;
+ }
}
- if (likely(hs_count)) {
- ret = ffs_do_descs(hs_count, data, len,
+ /* Read descriptors */
+ raw_descs = data;
+ for (i = 0; i < 3; ++i) {
+ if (!counts[i])
+ continue;
+ ret = ffs_do_descs(counts[i], data, len,
__ffs_data_do_entity, ffs);
- if (unlikely(ret < 0))
+ if (ret < 0)
goto error;
- } else {
- ret = 0;
+ data += ret;
+ len -= ret;
}
- if (unlikely(len != ret))
- goto einval;
+ if (raw_descs == data || len) {
+ ret = -EINVAL;
+ goto error;
+ }
- ffs->raw_fs_descs_length = fs_len;
- ffs->raw_descs_length = fs_len + ret;
- ffs->raw_descs = _data;
- ffs->fs_descs_count = fs_count;
- ffs->hs_descs_count = hs_count;
+ ffs->raw_descs_data = _data;
+ ffs->raw_descs = raw_descs;
+ ffs->raw_descs_length = data - raw_descs;
+ ffs->fs_descs_count = counts[0];
+ ffs->hs_descs_count = counts[1];
+ ffs->ss_descs_count = counts[2];
return 0;
-einval:
- ret = -EINVAL;
error:
kfree(_data);
return ret;
@@ -1784,7 +2102,7 @@ static void __ffs_event_add(struct ffs_data *ffs,
* the source does nothing.
*/
if (ffs->setup_state == FFS_SETUP_PENDING)
- ffs->setup_state = FFS_SETUP_CANCELED;
+ ffs->setup_state = FFS_SETUP_CANCELLED;
switch (type) {
case FUNCTIONFS_RESUME:
@@ -1845,21 +2163,28 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
struct usb_endpoint_descriptor *ds = (void *)desc;
struct ffs_function *func = priv;
struct ffs_ep *ffs_ep;
-
- /*
- * If hs_descriptors is not NULL then we are reading hs
- * descriptors now
- */
- const int isHS = func->function.hs_descriptors != NULL;
- unsigned idx;
+ unsigned ep_desc_id, idx;
+ static const char *speed_names[] = { "full", "high", "super" };
if (type != FFS_DESCRIPTOR)
return 0;
- if (isHS)
+ /*
+ * If ss_descriptors is not NULL, we are reading super speed
+ * descriptors; if hs_descriptors is not NULL, we are reading high
+ * speed descriptors; otherwise, we are reading full speed
+ * descriptors.
+ */
+ if (func->function.ss_descriptors) {
+ ep_desc_id = 2;
+ func->function.ss_descriptors[(long)valuep] = desc;
+ } else if (func->function.hs_descriptors) {
+ ep_desc_id = 1;
func->function.hs_descriptors[(long)valuep] = desc;
- else
+ } else {
+ ep_desc_id = 0;
func->function.fs_descriptors[(long)valuep] = desc;
+ }
if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
return 0;
@@ -1867,13 +2192,13 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
ffs_ep = func->eps + idx;
- if (unlikely(ffs_ep->descs[isHS])) {
- pr_vdebug("two %sspeed descriptors for EP %d\n",
- isHS ? "high" : "full",
+ if (unlikely(ffs_ep->descs[ep_desc_id])) {
+ pr_err("two %sspeed descriptors for EP %d\n",
+ speed_names[ep_desc_id],
ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
return -EINVAL;
}
- ffs_ep->descs[isHS] = ds;
+ ffs_ep->descs[ep_desc_id] = ds;
ffs_dump_mem(": Original ep desc", ds, ds->bLength);
if (ffs_ep->ep) {
@@ -2017,8 +2342,10 @@ static int _ffs_func_bind(struct usb_configuration *c,
const int full = !!func->ffs->fs_descs_count;
const int high = gadget_is_dualspeed(func->gadget) &&
func->ffs->hs_descs_count;
+ const int super = gadget_is_superspeed(func->gadget) &&
+ func->ffs->ss_descs_count;
- int ret;
+ int fs_len, hs_len, ret;
/* Make it a single chunk, less management later on */
vla_group(d);
@@ -2027,15 +2354,16 @@ static int _ffs_func_bind(struct usb_configuration *c,
full ? ffs->fs_descs_count + 1 : 0);
vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
high ? ffs->hs_descs_count + 1 : 0);
+ vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
+ super ? ffs->ss_descs_count + 1 : 0);
vla_item_with_sz(d, short, inums, ffs->interfaces_count);
- vla_item_with_sz(d, char, raw_descs,
- high ? ffs->raw_descs_length : ffs->raw_fs_descs_length);
+ vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
char *vlabuf;
ENTER();
- /* Only high speed but not supported by gadget? */
- if (unlikely(!(full | high)))
+ /* Has descriptors only for speeds gadget does not support */
+ if (unlikely(!(full | high | super)))
return -ENOTSUPP;
/* Allocate a single chunk, less management later on */
@@ -2045,8 +2373,10 @@ static int _ffs_func_bind(struct usb_configuration *c,
/* Zero */
memset(vla_ptr(vlabuf, d, eps), 0, d_eps__sz);
- memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs + 16,
- d_raw_descs__sz);
+ /* Copy descriptors */
+ memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
+ ffs->raw_descs_length);
+
memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
for (ret = ffs->eps_count; ret; --ret) {
struct ffs_ep *ptr;
@@ -2068,22 +2398,38 @@ static int _ffs_func_bind(struct usb_configuration *c,
*/
if (likely(full)) {
func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
- ret = ffs_do_descs(ffs->fs_descs_count,
- vla_ptr(vlabuf, d, raw_descs),
- d_raw_descs__sz,
- __ffs_func_bind_do_descs, func);
- if (unlikely(ret < 0))
+ fs_len = ffs_do_descs(ffs->fs_descs_count,
+ vla_ptr(vlabuf, d, raw_descs),
+ d_raw_descs__sz,
+ __ffs_func_bind_do_descs, func);
+ if (unlikely(fs_len < 0)) {
+ ret = fs_len;
goto error;
+ }
} else {
- ret = 0;
+ fs_len = 0;
}
if (likely(high)) {
func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
- ret = ffs_do_descs(ffs->hs_descs_count,
- vla_ptr(vlabuf, d, raw_descs) + ret,
- d_raw_descs__sz - ret,
- __ffs_func_bind_do_descs, func);
+ hs_len = ffs_do_descs(ffs->hs_descs_count,
+ vla_ptr(vlabuf, d, raw_descs) + fs_len,
+ d_raw_descs__sz - fs_len,
+ __ffs_func_bind_do_descs, func);
+ if (unlikely(hs_len < 0)) {
+ ret = hs_len;
+ goto error;
+ }
+ } else {
+ hs_len = 0;
+ }
+
+ if (likely(super)) {
+ func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
+ ret = ffs_do_descs(ffs->ss_descs_count,
+ vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
+ d_raw_descs__sz - fs_len - hs_len,
+ __ffs_func_bind_do_descs, func);
if (unlikely(ret < 0))
goto error;
}
@@ -2094,7 +2440,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
* now.
*/
ret = ffs_do_descs(ffs->fs_descs_count +
- (high ? ffs->hs_descs_count : 0),
+ (high ? ffs->hs_descs_count : 0) +
+ (super ? ffs->ss_descs_count : 0),
vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
__ffs_func_bind_do_nums, func);
if (unlikely(ret < 0))
@@ -2253,7 +2600,7 @@ static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
static LIST_HEAD(ffs_devices);
-static struct ffs_dev *_ffs_find_dev(const char *name)
+static struct ffs_dev *_ffs_do_find_dev(const char *name)
{
struct ffs_dev *dev;
@@ -2270,7 +2617,7 @@ static struct ffs_dev *_ffs_find_dev(const char *name)
/*
* ffs_lock must be taken by the caller of this function
*/
-static struct ffs_dev *ffs_get_single_dev(void)
+static struct ffs_dev *_ffs_get_single_dev(void)
{
struct ffs_dev *dev;
@@ -2286,15 +2633,15 @@ static struct ffs_dev *ffs_get_single_dev(void)
/*
* ffs_lock must be taken by the caller of this function
*/
-static struct ffs_dev *ffs_find_dev(const char *name)
+static struct ffs_dev *_ffs_find_dev(const char *name)
{
struct ffs_dev *dev;
- dev = ffs_get_single_dev();
+ dev = _ffs_get_single_dev();
if (dev)
return dev;
- return _ffs_find_dev(name);
+ return _ffs_do_find_dev(name);
}
/* Configfs support *********************************************************/
@@ -2330,7 +2677,7 @@ static void ffs_free_inst(struct usb_function_instance *f)
opts = to_f_fs_opts(f);
ffs_dev_lock();
- ffs_free_dev(opts->dev);
+ _ffs_free_dev(opts->dev);
ffs_dev_unlock();
kfree(opts);
}
@@ -2385,7 +2732,7 @@ static struct usb_function_instance *ffs_alloc_inst(void)
opts->func_inst.set_inst_name = ffs_set_inst_name;
opts->func_inst.free_func_inst = ffs_free_inst;
ffs_dev_lock();
- dev = ffs_alloc_dev();
+ dev = _ffs_alloc_dev();
ffs_dev_unlock();
if (IS_ERR(dev)) {
kfree(opts);
@@ -2441,6 +2788,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
*/
func->function.fs_descriptors = NULL;
func->function.hs_descriptors = NULL;
+ func->function.ss_descriptors = NULL;
func->interfaces_nums = NULL;
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
@@ -2473,12 +2821,12 @@ static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
/*
* ffs_lock must be taken by the caller of this function
*/
-struct ffs_dev *ffs_alloc_dev(void)
+static struct ffs_dev *_ffs_alloc_dev(void)
{
struct ffs_dev *dev;
int ret;
- if (ffs_get_single_dev())
+ if (_ffs_get_single_dev())
return ERR_PTR(-EBUSY);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -2506,10 +2854,10 @@ static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
{
struct ffs_dev *existing;
- existing = _ffs_find_dev(name);
+ existing = _ffs_do_find_dev(name);
if (existing)
return -EBUSY;
-
+
dev->name = name;
return 0;
@@ -2550,7 +2898,7 @@ EXPORT_SYMBOL(ffs_single_dev);
/*
* ffs_lock must be taken by the caller of this function
*/
-void ffs_free_dev(struct ffs_dev *dev)
+static void _ffs_free_dev(struct ffs_dev *dev)
{
list_del(&dev->entry);
if (dev->name_allocated)
@@ -2567,7 +2915,7 @@ static void *ffs_acquire_dev(const char *dev_name)
ENTER();
ffs_dev_lock();
- ffs_dev = ffs_find_dev(dev_name);
+ ffs_dev = _ffs_find_dev(dev_name);
if (!ffs_dev)
ffs_dev = ERR_PTR(-ENODEV);
else if (ffs_dev->mounted)
@@ -2590,11 +2938,12 @@ static void ffs_release_dev(struct ffs_data *ffs_data)
ffs_dev_lock();
ffs_dev = ffs_data->private_data;
- if (ffs_dev)
+ if (ffs_dev) {
ffs_dev->mounted = false;
-
- if (ffs_dev->ffs_release_dev_callback)
- ffs_dev->ffs_release_dev_callback(ffs_dev);
+
+ if (ffs_dev->ffs_release_dev_callback)
+ ffs_dev->ffs_release_dev_callback(ffs_dev);
+ }
ffs_dev_unlock();
}
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
index 36d4bb23087f..807b31c0edc3 100644
--- a/drivers/usb/gadget/f_midi.c
+++ b/drivers/usb/gadget/f_midi.c
@@ -664,9 +664,10 @@ static int f_midi_register_card(struct f_midi *midi)
.dev_free = f_midi_snd_free,
};
- err = snd_card_create(midi->index, midi->id, THIS_MODULE, 0, &card);
+ err = snd_card_new(&midi->gadget->dev, midi->index, midi->id,
+ THIS_MODULE, 0, &card);
if (err < 0) {
- ERROR(midi, "snd_card_create() failed\n");
+ ERROR(midi, "snd_card_new() failed\n");
goto fail;
}
midi->card = card;
@@ -703,8 +704,6 @@ static int f_midi_register_card(struct f_midi *midi)
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &gmidi_in_ops);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &gmidi_out_ops);
- snd_card_set_dev(card, &midi->gadget->dev);
-
/* register it - we're ready to go */
err = snd_card_register(card);
if (err < 0) {
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index f1a59190ac9a..df4a0dcbc993 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -276,7 +276,7 @@ static int geth_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
net = gether_connect(&geth->port);
- return IS_ERR(net) ? PTR_ERR(net) : 0;
+ return PTR_RET(net);
}
static void geth_disable(struct usb_function *f)
diff --git a/drivers/usb/gadget/f_uac2.c b/drivers/usb/gadget/f_uac2.c
index 2f23566e53d8..bc23040c7790 100644
--- a/drivers/usb/gadget/f_uac2.c
+++ b/drivers/usb/gadget/f_uac2.c
@@ -394,7 +394,7 @@ static int snd_uac2_probe(struct platform_device *pdev)
int err;
/* Choose any slot, with no id */
- err = snd_card_create(-1, NULL, THIS_MODULE, 0, &card);
+ err = snd_card_new(&pdev->dev, -1, NULL, THIS_MODULE, 0, &card);
if (err < 0)
return err;
@@ -421,8 +421,6 @@ static int snd_uac2_probe(struct platform_device *pdev)
strcpy(card->shortname, "UAC2_Gadget");
sprintf(card->longname, "UAC2_Gadget %i", pdev->id);
- snd_card_set_dev(card, &pdev->dev);
-
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
diff --git a/drivers/usb/gadget/gr_udc.c b/drivers/usb/gadget/gr_udc.c
index 914cbd84ee40..f984ee75324d 100644
--- a/drivers/usb/gadget/gr_udc.c
+++ b/drivers/usb/gadget/gr_udc.c
@@ -225,14 +225,8 @@ static void gr_dfs_create(struct gr_udc *dev)
const char *name = "gr_udc_state";
dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
- if (IS_ERR(dev->dfs_root)) {
- dev_err(dev->dev, "Failed to create debugfs directory\n");
- return;
- }
- dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root,
- dev, &gr_dfs_fops);
- if (IS_ERR(dev->dfs_state))
- dev_err(dev->dev, "Failed to create debugfs file %s\n", name);
+ dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root, dev,
+ &gr_dfs_fops);
}
static void gr_dfs_delete(struct gr_udc *dev)
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index b94c049ab0d0..b5be6f0308c2 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -439,11 +439,9 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
value = -ENOMEM;
- kbuf = kmalloc (len, GFP_KERNEL);
- if (!kbuf)
- goto free1;
- if (copy_from_user (kbuf, buf, len)) {
- value = -EFAULT;
+ kbuf = memdup_user(buf, len);
+ if (!kbuf) {
+ value = PTR_ERR(kbuf);
goto free1;
}
@@ -452,7 +450,6 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
data->name, len, (int) value);
free1:
mutex_unlock(&data->lock);
- kfree (kbuf);
return value;
}
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 049ebab0d360..a139894c600f 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -3295,9 +3295,9 @@ usb_clk_enable_fail:
pll_set_fail:
clk_disable(udc->usb_pll_clk);
pll_enable_fail:
- clk_put(udc->usb_slv_clk);
-usb_otg_clk_get_fail:
clk_put(udc->usb_otg_clk);
+usb_otg_clk_get_fail:
+ clk_put(udc->usb_slv_clk);
usb_clk_get_fail:
clk_put(udc->usb_pll_clk);
pll_get_fail:
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index bf7a56b6d48a..6474081dcbaf 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -427,12 +427,17 @@ setup_rx_reqs(struct printer_dev *dev)
req->length = USB_BUFSIZE;
req->complete = rx_complete;
+ /* here, we unlock, and only unlock, to avoid deadlock. */
+ spin_unlock(&dev->lock);
error = usb_ep_queue(dev->out_ep, req, GFP_ATOMIC);
+ spin_lock(&dev->lock);
if (error) {
DBG(dev, "rx submit --> %d\n", error);
list_add(&req->list, &dev->rx_reqs);
break;
- } else {
+ }
+ /* if the req is empty, then add it into dev->rx_reqs_active. */
+ else if (list_empty(&req->list)) {
list_add(&req->list, &dev->rx_reqs_active);
}
}
@@ -1133,6 +1138,7 @@ static int __init printer_bind_config(struct usb_configuration *c)
NULL, "g_printer");
if (IS_ERR(dev->pdev)) {
ERROR(dev, "Failed to create device: g_printer\n");
+ status = PTR_ERR(dev->pdev);
goto fail;
}
@@ -1157,7 +1163,7 @@ static int __init printer_bind_config(struct usb_configuration *c)
usb_gadget_set_selfpowered(gadget);
- if (gadget->is_otg) {
+ if (gadget_is_otg(gadget)) {
otg_descriptor.bmAttributes |= USB_OTG_HNP;
printer_cfg_driver.descriptors = otg_desc;
printer_cfg_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 1172eaeddd85..2a9cb674926a 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -617,7 +617,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
to_write = DIV_ROUND_UP(to_write, 4);
data = hs_req->req.buf + buf_pos;
- writesl(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
+ iowrite32_rep(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
return (to_write >= can_write) ? -ENOSPC : 0;
}
@@ -720,8 +720,8 @@ static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
ureq->length, ureq->actual);
if (0)
dev_dbg(hsotg->dev,
- "REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
- ureq->buf, length, ureq->dma,
+ "REQ buf %p len %d dma 0x%pad noi=%d zp=%d snok=%d\n",
+ ureq->buf, length, &ureq->dma,
ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
maxreq = get_ep_limit(hs_ep);
@@ -789,8 +789,8 @@ static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
writel(ureq->dma, hsotg->regs + dma_reg);
- dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
- __func__, ureq->dma, dma_reg);
+ dev_dbg(hsotg->dev, "%s: 0x%pad => 0x%08x\n",
+ __func__, &ureq->dma, dma_reg);
}
ctrl |= DxEPCTL_EPEna; /* ensure ep enabled */
@@ -1186,6 +1186,41 @@ static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg);
/**
+ * s3c_hsotg_stall_ep0 - stall ep0
+ * @hsotg: The device state
+ *
+ * Set stall for ep0 as response for setup request.
+ */
+static void s3c_hsotg_stall_ep0(struct s3c_hsotg *hsotg) {
+ struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+ u32 reg;
+ u32 ctrl;
+
+ dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
+ reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
+
+ /*
+ * DxEPCTL_Stall will be cleared by EP once it has
+ * taken effect, so no need to clear later.
+ */
+
+ ctrl = readl(hsotg->regs + reg);
+ ctrl |= DxEPCTL_Stall;
+ ctrl |= DxEPCTL_CNAK;
+ writel(ctrl, hsotg->regs + reg);
+
+ dev_dbg(hsotg->dev,
+ "written DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
+ ctrl, reg, readl(hsotg->regs + reg));
+
+ /*
+ * complete won't be called, so we enqueue
+ * setup request here
+ */
+ s3c_hsotg_enqueue_setup(hsotg);
+}
+
+/**
* s3c_hsotg_process_control - process a control request
* @hsotg: The device state
* @ctrl: The control request received
@@ -1262,38 +1297,8 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
* so respond with a STALL for the status stage to indicate failure.
*/
- if (ret < 0) {
- u32 reg;
- u32 ctrl;
-
- dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
- reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
-
- /*
- * DxEPCTL_Stall will be cleared by EP once it has
- * taken effect, so no need to clear later.
- */
-
- ctrl = readl(hsotg->regs + reg);
- ctrl |= DxEPCTL_Stall;
- ctrl |= DxEPCTL_CNAK;
- writel(ctrl, hsotg->regs + reg);
-
- dev_dbg(hsotg->dev,
- "written DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
- ctrl, reg, readl(hsotg->regs + reg));
-
- /*
- * don't believe we need to anything more to get the EP
- * to reply with a STALL packet
- */
-
- /*
- * complete won't be called, so we enqueue
- * setup request here
- */
- s3c_hsotg_enqueue_setup(hsotg);
- }
+ if (ret < 0)
+ s3c_hsotg_stall_ep0(hsotg);
}
/**
@@ -1488,7 +1493,7 @@ static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
* note, we might over-write the buffer end by 3 bytes depending on
* alignment of the data.
*/
- readsl(fifo, hs_req->req.buf + read_ptr, to_read);
+ ioread32_rep(fifo, hs_req->req.buf + read_ptr, to_read);
}
/**
@@ -2832,6 +2837,15 @@ static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
+ if (index == 0) {
+ if (value)
+ s3c_hsotg_stall_ep0(hs);
+ else
+ dev_warn(hs->dev,
+ "%s: can't clear halt on ep0\n", __func__);
+ return 0;
+ }
+
/* write both IN and OUT control registers */
epreg = DIEPCTL(index);
@@ -3760,10 +3774,55 @@ static int s3c_hsotg_remove(struct platform_device *pdev)
return 0;
}
-#if 1
-#define s3c_hsotg_suspend NULL
-#define s3c_hsotg_resume NULL
-#endif
+static int s3c_hsotg_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
+ unsigned long flags;
+ int ret = 0;
+
+ if (hsotg->driver)
+ dev_info(hsotg->dev, "suspending usb gadget %s\n",
+ hsotg->driver->driver.name);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ s3c_hsotg_disconnect(hsotg);
+ s3c_hsotg_phy_disable(hsotg);
+ hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ if (hsotg->driver) {
+ int ep;
+ for (ep = 0; ep < hsotg->num_of_eps; ep++)
+ s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
+ hsotg->supplies);
+ }
+
+ return ret;
+}
+
+static int s3c_hsotg_resume(struct platform_device *pdev)
+{
+ struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
+ unsigned long flags;
+ int ret = 0;
+
+ if (hsotg->driver) {
+ dev_info(hsotg->dev, "resuming usb gadget %s\n",
+ hsotg->driver->driver.name);
+ ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
+ hsotg->supplies);
+ }
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ hsotg->last_rst = jiffies;
+ s3c_hsotg_phy_enable(hsotg);
+ s3c_hsotg_core_init(hsotg);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return ret;
+}
#ifdef CONFIG_OF
static const struct of_device_id s3c_hsotg_of_ids[] = {
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index ea4bbfe72ec0..10c6a128250c 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -1344,7 +1344,6 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
return 0;
err_add_udc:
-err_add_device:
clk_disable(hsudc->uclk);
err_res:
if (!IS_ERR_OR_NULL(hsudc->transceiver))
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index f04b2c3154de..dd9678f85c58 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1629,7 +1629,7 @@ static void s3c2410_udc_reinit(struct s3c2410_udc *dev)
ep->ep.desc = NULL;
ep->halted = 0;
INIT_LIST_HEAD(&ep->queue);
- usb_ep_set_maxpacket_limit(&ep->ep, &ep->ep.maxpacket);
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket);
}
}
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index 0f8aad78b54f..460c266b8e24 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1613,7 +1613,7 @@ static struct se_wwn *usbg_make_tport(
return ERR_PTR(-ENOMEM);
}
tport->tport_wwpn = wwpn;
- snprintf(tport->tport_name, sizeof(tport->tport_name), wnn_name);
+ snprintf(tport->tport_name, sizeof(tport->tport_name), "%s", wnn_name);
return &tport->tport_wwn;
}
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index b7d4f82872b7..50d09c289137 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -48,6 +48,8 @@
#define UETH__VERSION "29-May-2008"
+#define GETHER_NAPI_WEIGHT 32
+
struct eth_dev {
/* lock is held while accessing port_usb
*/
@@ -72,6 +74,7 @@ struct eth_dev {
struct sk_buff_head *list);
struct work_struct work;
+ struct napi_struct rx_napi;
unsigned long todo;
#define WORK_RX_MEMORY 0
@@ -253,18 +256,16 @@ enomem:
DBG(dev, "rx submit --> %d\n", retval);
if (skb)
dev_kfree_skb_any(skb);
- spin_lock_irqsave(&dev->req_lock, flags);
- list_add(&req->list, &dev->rx_reqs);
- spin_unlock_irqrestore(&dev->req_lock, flags);
}
return retval;
}
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
- struct sk_buff *skb = req->context, *skb2;
+ struct sk_buff *skb = req->context;
struct eth_dev *dev = ep->driver_data;
int status = req->status;
+ bool rx_queue = 0;
switch (status) {
@@ -288,30 +289,8 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
} else {
skb_queue_tail(&dev->rx_frames, skb);
}
- skb = NULL;
-
- skb2 = skb_dequeue(&dev->rx_frames);
- while (skb2) {
- if (status < 0
- || ETH_HLEN > skb2->len
- || skb2->len > VLAN_ETH_FRAME_LEN) {
- dev->net->stats.rx_errors++;
- dev->net->stats.rx_length_errors++;
- DBG(dev, "rx length %d\n", skb2->len);
- dev_kfree_skb_any(skb2);
- goto next_frame;
- }
- skb2->protocol = eth_type_trans(skb2, dev->net);
- dev->net->stats.rx_packets++;
- dev->net->stats.rx_bytes += skb2->len;
-
- /* no buffer copies needed, unless hardware can't
- * use skb buffers.
- */
- status = netif_rx(skb2);
-next_frame:
- skb2 = skb_dequeue(&dev->rx_frames);
- }
+ if (!status)
+ rx_queue = 1;
break;
/* software-driven interface shutdown */
@@ -334,22 +313,20 @@ quiesce:
/* FALLTHROUGH */
default:
+ rx_queue = 1;
+ dev_kfree_skb_any(skb);
dev->net->stats.rx_errors++;
DBG(dev, "rx status %d\n", status);
break;
}
- if (skb)
- dev_kfree_skb_any(skb);
- if (!netif_running(dev->net)) {
clean:
spin_lock(&dev->req_lock);
list_add(&req->list, &dev->rx_reqs);
spin_unlock(&dev->req_lock);
- req = NULL;
- }
- if (req)
- rx_submit(dev, req, GFP_ATOMIC);
+
+ if (rx_queue && likely(napi_schedule_prep(&dev->rx_napi)))
+ __napi_schedule(&dev->rx_napi);
}
static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -414,16 +391,24 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
{
struct usb_request *req;
unsigned long flags;
+ int rx_counts = 0;
/* fill unused rxq slots with some skb */
spin_lock_irqsave(&dev->req_lock, flags);
while (!list_empty(&dev->rx_reqs)) {
+
+ if (++rx_counts > qlen(dev->gadget, dev->qmult))
+ break;
+
req = container_of(dev->rx_reqs.next,
struct usb_request, list);
list_del_init(&req->list);
spin_unlock_irqrestore(&dev->req_lock, flags);
if (rx_submit(dev, req, gfp_flags) < 0) {
+ spin_lock_irqsave(&dev->req_lock, flags);
+ list_add(&req->list, &dev->rx_reqs);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
defer_kevent(dev, WORK_RX_MEMORY);
return;
}
@@ -433,6 +418,41 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
spin_unlock_irqrestore(&dev->req_lock, flags);
}
+static int gether_poll(struct napi_struct *napi, int budget)
+{
+ struct eth_dev *dev = container_of(napi, struct eth_dev, rx_napi);
+ struct sk_buff *skb;
+ unsigned int work_done = 0;
+ int status = 0;
+
+ while ((skb = skb_dequeue(&dev->rx_frames))) {
+ if (status < 0
+ || ETH_HLEN > skb->len
+ || skb->len > VLAN_ETH_FRAME_LEN) {
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ DBG(dev, "rx length %d\n", skb->len);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+ skb->protocol = eth_type_trans(skb, dev->net);
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
+
+ status = netif_rx_ni(skb);
+ }
+
+ if (netif_running(dev->net)) {
+ rx_fill(dev, GFP_KERNEL);
+ work_done++;
+ }
+
+ if (work_done < budget)
+ napi_complete(&dev->rx_napi);
+
+ return work_done;
+}
+
static void eth_work(struct work_struct *work)
{
struct eth_dev *dev = container_of(work, struct eth_dev, work);
@@ -625,6 +645,7 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
/* and open the tx floodgates */
atomic_set(&dev->tx_qlen, 0);
netif_wake_queue(dev->net);
+ napi_enable(&dev->rx_napi);
}
static int eth_open(struct net_device *net)
@@ -651,6 +672,7 @@ static int eth_stop(struct net_device *net)
unsigned long flags;
VDBG(dev, "%s\n", __func__);
+ napi_disable(&dev->rx_napi);
netif_stop_queue(net);
DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
@@ -768,6 +790,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
return ERR_PTR(-ENOMEM);
dev = netdev_priv(net);
+ netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->req_lock);
INIT_WORK(&dev->work, eth_work);
@@ -830,6 +853,7 @@ struct net_device *gether_setup_name_default(const char *netname)
return ERR_PTR(-ENOMEM);
dev = netdev_priv(net);
+ netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->req_lock);
INIT_WORK(&dev->work, eth_work);
@@ -1113,6 +1137,7 @@ void gether_disconnect(struct gether *link)
{
struct eth_dev *dev = link->ioport;
struct usb_request *req;
+ struct sk_buff *skb;
WARN_ON(!dev);
if (!dev)
@@ -1139,6 +1164,12 @@ void gether_disconnect(struct gether *link)
spin_lock(&dev->req_lock);
}
spin_unlock(&dev->req_lock);
+
+ spin_lock(&dev->rx_frames.lock);
+ while ((skb = __skb_dequeue(&dev->rx_frames)))
+ dev_kfree_skb_any(skb);
+ spin_unlock(&dev->rx_frames.lock);
+
link->in_ep->driver_data = NULL;
link->in_ep->desc = NULL;
diff --git a/drivers/usb/gadget/u_fs.h b/drivers/usb/gadget/u_fs.h
index bc2d3718219b..bf0ba375d459 100644
--- a/drivers/usb/gadget/u_fs.h
+++ b/drivers/usb/gadget/u_fs.h
@@ -65,10 +65,8 @@ static inline void ffs_dev_unlock(void)
mutex_unlock(&ffs_lock);
}
-struct ffs_dev *ffs_alloc_dev(void);
int ffs_name_dev(struct ffs_dev *dev, const char *name);
int ffs_single_dev(struct ffs_dev *dev);
-void ffs_free_dev(struct ffs_dev *dev);
struct ffs_epfile;
struct ffs_function;
@@ -125,7 +123,7 @@ enum ffs_setup_state {
* setup. If this state is set read/write on ep0 return
* -EIDRM. This state is only set when adding event.
*/
- FFS_SETUP_CANCELED
+ FFS_SETUP_CANCELLED
};
struct ffs_data {
@@ -156,7 +154,6 @@ struct ffs_data {
*/
struct usb_request *ep0req; /* P: mutex */
struct completion ep0req_completion; /* P: mutex */
- int ep0req_status; /* P: mutex */
/* reference counter */
atomic_t ref;
@@ -168,19 +165,18 @@ struct ffs_data {
/*
* Possible transitions:
- * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock
+ * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock
* happens only in ep0 read which is P: mutex
- * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock
+ * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock
* happens only in ep0 i/o which is P: mutex
- * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELED -- P: ev.waitq.lock
- * + FFS_SETUP_CANCELED -> FFS_NO_SETUP -- cmpxchg
+ * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELLED -- P: ev.waitq.lock
+ * + FFS_SETUP_CANCELLED -> FFS_NO_SETUP -- cmpxchg
+ *
+ * This field should never be accessed directly and instead
+ * ffs_setup_state_clear_cancelled function should be used.
*/
enum ffs_setup_state setup_state;
-#define FFS_SETUP_STATE(ffs) \
- ((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state, \
- FFS_SETUP_CANCELED, FFS_NO_SETUP))
-
/* Events & such. */
struct {
u8 types[4];
@@ -210,16 +206,16 @@ struct ffs_data {
/* filled by __ffs_data_got_descs() */
/*
- * Real descriptors are 16 bytes after raw_descs (so you need
- * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
- * first full speed descriptor). raw_descs_length and
- * raw_fs_descs_length do not have those 16 bytes added.
+ * raw_descs is what you kfree, real_descs points inside of raw_descs,
+ * where full speed, high speed and super speed descriptors start.
+ * real_descs_length is the length of all those descriptors.
*/
+ const void *raw_descs_data;
const void *raw_descs;
unsigned raw_descs_length;
- unsigned raw_fs_descs_length;
unsigned fs_descs_count;
unsigned hs_descs_count;
+ unsigned ss_descs_count;
unsigned short strings_count;
unsigned short interfaces_count;
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index b369292d4b90..ad0aca812002 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -549,8 +549,8 @@ static void gs_rx_push(unsigned long _port)
port->read_started--;
}
- /* Push from tty to ldisc; without low_latency set this is handled by
- * a workqueue, so we won't get callbacks and can hold port_lock
+ /* Push from tty to ldisc; this is handled by a workqueue,
+ * so we won't get callbacks and can hold port_lock
*/
if (do_push)
tty_flip_buffer_push(&port->port);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index a9707da7da0b..3d9e54062d62 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -314,7 +314,6 @@ config USB_ISP1760_HCD
config USB_ISP1362_HCD
tristate "ISP1362 HCD support"
- default N
---help---
Supports the Philips ISP1362 chip as a host controller
@@ -326,7 +325,6 @@ config USB_ISP1362_HCD
config USB_FUSBH200_HCD
tristate "FUSBH200 HCD support"
depends on USB
- default N
---help---
Faraday FUSBH200 is designed to meet USB2.0 EHCI specification
with minor modification.
@@ -337,7 +335,6 @@ config USB_FUSBH200_HCD
config USB_FOTG210_HCD
tristate "FOTG210 HCD support"
depends on USB
- default N
---help---
Faraday FOTG210 is an OTG controller which can be configured as
an USB2.0 host. It is designed to meet USB2.0 EHCI specification
@@ -584,7 +581,6 @@ config FHCI_DEBUG
config USB_U132_HCD
tristate "Elan U132 Adapter Host Controller"
depends on USB_FTDI_ELAN
- default M
help
The U132 adapter is a USB to CardBus adapter specifically designed
for PC cards that contain an OHCI host controller. Typical PC cards
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 471142725ffe..81cda09b47e3 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -685,8 +685,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 status, masked_status, pcd_status = 0, cmd;
int bh;
+ unsigned long flags;
- spin_lock (&ehci->lock);
+ /*
+ * For threadirqs option we use spin_lock_irqsave() variant to prevent
+ * deadlock with ehci hrtimer callback, because hrtimer callbacks run
+ * in interrupt context even when threadirqs is specified. We can go
+ * back to spin_lock() variant when hrtimer callbacks become threaded.
+ */
+ spin_lock_irqsave(&ehci->lock, flags);
status = ehci_readl(ehci, &ehci->regs->status);
@@ -704,7 +711,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* Shared IRQ? */
if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
- spin_unlock(&ehci->lock);
+ spin_unlock_irqrestore(&ehci->lock, flags);
return IRQ_NONE;
}
@@ -815,7 +822,7 @@ dead:
if (bh)
ehci_work (ehci);
- spin_unlock (&ehci->lock);
+ spin_unlock_irqrestore(&ehci->lock, flags);
if (pcd_status)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 47b858fc50b2..7ae0c4d51741 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -238,6 +238,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
int port;
int mask;
int changed;
+ bool fs_idle_delay;
ehci_dbg(ehci, "suspend root hub\n");
@@ -272,6 +273,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
changed = 0;
+ fs_idle_delay = false;
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
@@ -300,16 +302,32 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
}
if (t1 != t2) {
+ /*
+ * On some controllers, Wake-On-Disconnect will
+ * generate false wakeup signals until the bus
+ * switches over to full-speed idle. For their
+ * sake, add a delay if we need one.
+ */
+ if ((t2 & PORT_WKDISC_E) &&
+ ehci_port_speed(ehci, t2) ==
+ USB_PORT_STAT_HIGH_SPEED)
+ fs_idle_delay = true;
ehci_writel(ehci, t2, reg);
changed = 1;
}
}
+ spin_unlock_irq(&ehci->lock);
+
+ if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
+ /*
+ * Wait for HCD to enter low-power mode or for the bus
+ * to switch to full-speed idle.
+ */
+ usleep_range(5000, 5500);
+ }
if (changed && ehci->has_tdi_phy_lpm) {
- spin_unlock_irq(&ehci->lock);
- msleep(5); /* 5 ms for HCD to enter low-power mode */
spin_lock_irq(&ehci->lock);
-
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
@@ -322,8 +340,8 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
port, (t3 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
+ spin_unlock_irq(&ehci->lock);
}
- spin_unlock_irq(&ehci->lock);
/* Apparently some devices need a >= 1-uframe delay here */
if (ehci->bus_suspended)
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 01536cfd361d..b3a0e11073aa 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -3,6 +3,7 @@
*
* Copyright 2007 Steven Brown <sbrown@cortland.com>
* Copyright 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
*
* Derived from the ohci-ssb driver
* Copyright 2007 Michael Buesch <m@bues.ch>
@@ -18,6 +19,7 @@
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -25,6 +27,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
@@ -33,6 +36,13 @@
#include "ehci.h"
#define DRIVER_DESC "EHCI generic platform driver"
+#define EHCI_MAX_CLKS 3
+#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
+
+struct ehci_platform_priv {
+ struct clk *clks[EHCI_MAX_CLKS];
+ struct phy *phy;
+};
static const char hcd_name[] = "ehci-platform";
@@ -45,8 +55,6 @@ static int ehci_platform_reset(struct usb_hcd *hcd)
hcd->has_tt = pdata->has_tt;
ehci->has_synopsys_hc_bug = pdata->has_synopsys_hc_bug;
- ehci->big_endian_desc = pdata->big_endian_desc;
- ehci->big_endian_mmio = pdata->big_endian_mmio;
if (pdata->pre_setup) {
retval = pdata->pre_setup(hcd);
@@ -64,38 +72,91 @@ static int ehci_platform_reset(struct usb_hcd *hcd)
return 0;
}
+static int ehci_platform_power_on(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk, ret;
+
+ for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++) {
+ ret = clk_prepare_enable(priv->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ if (priv->phy) {
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto err_disable_clks;
+
+ ret = phy_power_on(priv->phy);
+ if (ret)
+ goto err_exit_phy;
+ }
+
+ return 0;
+
+err_exit_phy:
+ phy_exit(priv->phy);
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(priv->clks[clk]);
+
+ return ret;
+}
+
+static void ehci_platform_power_off(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk;
+
+ if (priv->phy) {
+ phy_power_off(priv->phy);
+ phy_exit(priv->phy);
+ }
+
+ for (clk = EHCI_MAX_CLKS - 1; clk >= 0; clk--)
+ if (priv->clks[clk])
+ clk_disable_unprepare(priv->clks[clk]);
+}
+
static struct hc_driver __read_mostly ehci_platform_hc_driver;
static const struct ehci_driver_overrides platform_overrides __initconst = {
- .reset = ehci_platform_reset,
+ .reset = ehci_platform_reset,
+ .extra_priv_size = sizeof(struct ehci_platform_priv),
};
-static struct usb_ehci_pdata ehci_platform_defaults;
+static struct usb_ehci_pdata ehci_platform_defaults = {
+ .power_on = ehci_platform_power_on,
+ .power_suspend = ehci_platform_power_off,
+ .power_off = ehci_platform_power_off,
+};
static int ehci_platform_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
struct resource *res_mem;
- struct usb_ehci_pdata *pdata;
- int irq;
- int err;
+ struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ehci_platform_priv *priv;
+ struct ehci_hcd *ehci;
+ int err, irq, clk = 0;
if (usb_disabled())
return -ENODEV;
/*
- * use reasonable defaults so platforms don't have to provide these.
- * with DT probing on ARM, none of these are set.
+ * Use reasonable defaults so platforms don't have to provide these
+ * with DT probing on ARM.
*/
- if (!dev_get_platdata(&dev->dev))
- dev->dev.platform_data = &ehci_platform_defaults;
+ if (!pdata)
+ pdata = &ehci_platform_defaults;
err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
if (err)
return err;
- pdata = dev_get_platdata(&dev->dev);
-
irq = platform_get_irq(dev, 0);
if (irq < 0) {
dev_err(&dev->dev, "no irq provided");
@@ -107,17 +168,72 @@ static int ehci_platform_probe(struct platform_device *dev)
return -ENXIO;
}
+ hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ platform_set_drvdata(dev, hcd);
+ dev->dev.platform_data = pdata;
+ priv = hcd_to_ehci_priv(hcd);
+ ehci = hcd_to_ehci(hcd);
+
+ if (pdata == &ehci_platform_defaults && dev->dev.of_node) {
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-regs"))
+ ehci->big_endian_mmio = 1;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-desc"))
+ ehci->big_endian_desc = 1;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian"))
+ ehci->big_endian_mmio = ehci->big_endian_desc = 1;
+
+ priv->phy = devm_phy_get(&dev->dev, "usb");
+ if (IS_ERR(priv->phy)) {
+ err = PTR_ERR(priv->phy);
+ if (err == -EPROBE_DEFER)
+ goto err_put_hcd;
+ priv->phy = NULL;
+ }
+
+ for (clk = 0; clk < EHCI_MAX_CLKS; clk++) {
+ priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
+ if (IS_ERR(priv->clks[clk])) {
+ err = PTR_ERR(priv->clks[clk]);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->clks[clk] = NULL;
+ break;
+ }
+ }
+ }
+
+ if (pdata->big_endian_desc)
+ ehci->big_endian_desc = 1;
+ if (pdata->big_endian_mmio)
+ ehci->big_endian_mmio = 1;
+
+#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+ if (ehci->big_endian_mmio) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_EHCI_BIG_ENDIAN_MMIO not set\n");
+ err = -EINVAL;
+ goto err_put_clks;
+ }
+#endif
+#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
+ if (ehci->big_endian_desc) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_EHCI_BIG_ENDIAN_DESC not set\n");
+ err = -EINVAL;
+ goto err_put_clks;
+ }
+#endif
+
if (pdata->power_on) {
err = pdata->power_on(dev);
if (err < 0)
- return err;
- }
-
- hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
- dev_name(&dev->dev));
- if (!hcd) {
- err = -ENOMEM;
- goto err_power;
+ goto err_put_clks;
}
hcd->rsrc_start = res_mem->start;
@@ -126,22 +242,28 @@ static int ehci_platform_probe(struct platform_device *dev)
hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
- goto err_put_hcd;
+ goto err_power;
}
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
- goto err_put_hcd;
+ goto err_power;
device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(dev, hcd);
return err;
-err_put_hcd:
- usb_put_hcd(hcd);
err_power:
if (pdata->power_off)
pdata->power_off(dev);
+err_put_clks:
+ while (--clk >= 0)
+ clk_put(priv->clks[clk]);
+err_put_hcd:
+ if (pdata == &ehci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
+ usb_put_hcd(hcd);
return err;
}
@@ -150,13 +272,19 @@ static int ehci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk;
usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
+ for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
+ clk_put(priv->clks[clk]);
+
+ usb_put_hcd(hcd);
+
if (pdata == &ehci_platform_defaults)
dev->dev.platform_data = NULL;
@@ -207,8 +335,10 @@ static int ehci_platform_resume(struct device *dev)
static const struct of_device_id vt8500_ehci_ids[] = {
{ .compatible = "via,vt8500-ehci", },
{ .compatible = "wm,prizm-ehci", },
+ { .compatible = "generic-ehci", },
{}
};
+MODULE_DEVICE_TABLE(of, vt8500_ehci_ids);
static const struct platform_device_id ehci_platform_table[] = {
{ "ehci-platform", 0 },
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index a8f4471dae7b..27ac6ad53c3d 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -17,7 +17,6 @@
*/
#include <linux/clk.h>
-#include <linux/clk/tegra.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gpio.h>
@@ -29,6 +28,7 @@
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/usb/ehci_def.h>
#include <linux/usb/tegra_usb_phy.h>
@@ -38,10 +38,6 @@
#include "ehci.h"
-#define TEGRA_USB_BASE 0xC5000000
-#define TEGRA_USB2_BASE 0xC5004000
-#define TEGRA_USB3_BASE 0xC5008000
-
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
#define TEGRA_USB_DMA_ALIGN 32
@@ -62,6 +58,7 @@ static int (*orig_hub_control)(struct usb_hcd *hcd,
struct tegra_ehci_hcd {
struct tegra_usb_phy *phy;
struct clk *clk;
+ struct reset_control *rst;
int port_resuming;
bool needs_double_reset;
enum tegra_usb_phy_port_speed port_speed;
@@ -385,13 +382,20 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto cleanup_hcd_create;
}
+ tegra->rst = devm_reset_control_get(&pdev->dev, "usb");
+ if (IS_ERR(tegra->rst)) {
+ dev_err(&pdev->dev, "Can't get ehci reset\n");
+ err = PTR_ERR(tegra->rst);
+ goto cleanup_hcd_create;
+ }
+
err = clk_prepare_enable(tegra->clk);
if (err)
goto cleanup_hcd_create;
- tegra_periph_reset_assert(tegra->clk);
+ reset_control_assert(tegra->rst);
udelay(1);
- tegra_periph_reset_deassert(tegra->clk);
+ reset_control_deassert(tegra->rst);
u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
if (IS_ERR(u_phy)) {
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index abd5050a4899..9162d1b6c0a3 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -261,19 +261,8 @@ int fsl_usb2_mpc5121_init(struct platform_device *pdev)
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct clk *clk;
int err;
- char clk_name[10];
- int base, clk_num;
-
- base = pdev->resource->start & 0xf000;
- if (base == 0x3000)
- clk_num = 1;
- else if (base == 0x4000)
- clk_num = 2;
- else
- return -ENODEV;
- snprintf(clk_name, sizeof(clk_name), "usb%d_clk", clk_num);
- clk = devm_clk_get(pdev->dev.parent, clk_name);
+ clk = devm_clk_get(pdev->dev.parent, "ipg");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clk\n");
return PTR_ERR(clk);
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index e07669993f58..d0d8fadf7066 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -261,8 +261,44 @@ static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
dev_err(dev, "cannot listen to notifications: %d\n", result);
goto error_stop;
}
+ /*
+ * If WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS is set,
+ * disable transfer notifications.
+ */
+ if (hwahc->wa.quirks &
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS) {
+ struct usb_host_interface *cur_altsetting =
+ hwahc->wa.usb_iface->cur_altsetting;
+
+ result = usb_control_msg(hwahc->wa.usb_dev,
+ usb_sndctrlpipe(hwahc->wa.usb_dev, 0),
+ WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ WA_REQ_ALEREON_FEATURE_SET,
+ cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ /*
+ * If we successfully sent the control message, start DTI here
+ * because no transfer notifications will be received which is
+ * where DTI is normally started.
+ */
+ if (result == 0)
+ result = wa_dti_start(&hwahc->wa);
+ else
+ result = 0; /* OK. Continue normally. */
+
+ if (result < 0) {
+ dev_err(dev, "cannot start DTI: %d\n", result);
+ goto error_dti_start;
+ }
+ }
+
return result;
+error_dti_start:
+ wa_nep_disarm(&hwahc->wa);
error_stop:
__wa_clear_feature(&hwahc->wa, WA_ENABLE);
return result;
@@ -827,10 +863,12 @@ static void hwahc_disconnect(struct usb_interface *usb_iface)
static struct usb_device_id hwahc_id_table[] = {
/* Alereon 5310 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x02, 0x01),
- .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC },
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
/* Alereon 5611 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x02, 0x01),
- .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC },
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
/* FIXME: use class labels for this */
{ USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
{},
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 68f674cd095f..b6002c951c5c 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -3,6 +3,7 @@
*
* Copyright 2007 Michael Buesch <m@bues.ch>
* Copyright 2011-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
*
* Derived from the OCHI-SSB driver
* Derived from the OHCI-PCI driver
@@ -14,11 +15,14 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb/ohci_pdriver.h>
#include <linux/usb.h>
@@ -27,6 +31,13 @@
#include "ohci.h"
#define DRIVER_DESC "OHCI generic platform driver"
+#define OHCI_MAX_CLKS 3
+#define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
+
+struct ohci_platform_priv {
+ struct clk *clks[OHCI_MAX_CLKS];
+ struct phy *phy;
+};
static const char hcd_name[] = "ohci-platform";
@@ -36,10 +47,6 @@ static int ohci_platform_reset(struct usb_hcd *hcd)
struct usb_ohci_pdata *pdata = dev_get_platdata(&pdev->dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- if (pdata->big_endian_desc)
- ohci->flags |= OHCI_QUIRK_BE_DESC;
- if (pdata->big_endian_mmio)
- ohci->flags |= OHCI_QUIRK_BE_MMIO;
if (pdata->no_big_frame_no)
ohci->flags |= OHCI_QUIRK_FRAME_NO;
if (pdata->num_ports)
@@ -48,11 +55,67 @@ static int ohci_platform_reset(struct usb_hcd *hcd)
return ohci_setup(hcd);
}
+static int ohci_platform_power_on(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+ int clk, ret;
+
+ for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++) {
+ ret = clk_prepare_enable(priv->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ if (priv->phy) {
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto err_disable_clks;
+
+ ret = phy_power_on(priv->phy);
+ if (ret)
+ goto err_exit_phy;
+ }
+
+ return 0;
+
+err_exit_phy:
+ phy_exit(priv->phy);
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(priv->clks[clk]);
+
+ return ret;
+}
+
+static void ohci_platform_power_off(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+ int clk;
+
+ if (priv->phy) {
+ phy_power_off(priv->phy);
+ phy_exit(priv->phy);
+ }
+
+ for (clk = OHCI_MAX_CLKS - 1; clk >= 0; clk--)
+ if (priv->clks[clk])
+ clk_disable_unprepare(priv->clks[clk]);
+}
+
static struct hc_driver __read_mostly ohci_platform_hc_driver;
static const struct ohci_driver_overrides platform_overrides __initconst = {
- .product_desc = "Generic Platform OHCI controller",
- .reset = ohci_platform_reset,
+ .product_desc = "Generic Platform OHCI controller",
+ .reset = ohci_platform_reset,
+ .extra_priv_size = sizeof(struct ohci_platform_priv),
+};
+
+static struct usb_ohci_pdata ohci_platform_defaults = {
+ .power_on = ohci_platform_power_on,
+ .power_suspend = ohci_platform_power_off,
+ .power_off = ohci_platform_power_off,
};
static int ohci_platform_probe(struct platform_device *dev)
@@ -60,17 +123,24 @@ static int ohci_platform_probe(struct platform_device *dev)
struct usb_hcd *hcd;
struct resource *res_mem;
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
- int irq;
- int err = -ENOMEM;
-
- if (!pdata) {
- WARN_ON(1);
- return -ENODEV;
- }
+ struct ohci_platform_priv *priv;
+ struct ohci_hcd *ohci;
+ int err, irq, clk = 0;
if (usb_disabled())
return -ENODEV;
+ /*
+ * Use reasonable defaults so platforms don't have to provide these
+ * with DT probing on ARM.
+ */
+ if (!pdata)
+ pdata = &ohci_platform_defaults;
+
+ err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
irq = platform_get_irq(dev, 0);
if (irq < 0) {
dev_err(&dev->dev, "no irq provided");
@@ -83,17 +153,72 @@ static int ohci_platform_probe(struct platform_device *dev)
return -ENXIO;
}
+ hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ platform_set_drvdata(dev, hcd);
+ dev->dev.platform_data = pdata;
+ priv = hcd_to_ohci_priv(hcd);
+ ohci = hcd_to_ohci(hcd);
+
+ if (pdata == &ohci_platform_defaults && dev->dev.of_node) {
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-regs"))
+ ohci->flags |= OHCI_QUIRK_BE_MMIO;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-desc"))
+ ohci->flags |= OHCI_QUIRK_BE_DESC;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian"))
+ ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
+
+ priv->phy = devm_phy_get(&dev->dev, "usb");
+ if (IS_ERR(priv->phy)) {
+ err = PTR_ERR(priv->phy);
+ if (err == -EPROBE_DEFER)
+ goto err_put_hcd;
+ priv->phy = NULL;
+ }
+
+ for (clk = 0; clk < OHCI_MAX_CLKS; clk++) {
+ priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
+ if (IS_ERR(priv->clks[clk])) {
+ err = PTR_ERR(priv->clks[clk]);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->clks[clk] = NULL;
+ break;
+ }
+ }
+ }
+
+ if (pdata->big_endian_desc)
+ ohci->flags |= OHCI_QUIRK_BE_DESC;
+ if (pdata->big_endian_mmio)
+ ohci->flags |= OHCI_QUIRK_BE_MMIO;
+
+#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
+ if (ohci->flags & OHCI_QUIRK_BE_MMIO) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_OHCI_BIG_ENDIAN_MMIO not set\n");
+ err = -EINVAL;
+ goto err_put_clks;
+ }
+#endif
+#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_DESC
+ if (ohci->flags & OHCI_QUIRK_BE_DESC) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_OHCI_BIG_ENDIAN_DESC not set\n");
+ err = -EINVAL;
+ goto err_put_clks;
+ }
+#endif
+
if (pdata->power_on) {
err = pdata->power_on(dev);
if (err < 0)
- return err;
- }
-
- hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
- dev_name(&dev->dev));
- if (!hcd) {
- err = -ENOMEM;
- goto err_power;
+ goto err_put_clks;
}
hcd->rsrc_start = res_mem->start;
@@ -102,11 +227,11 @@ static int ohci_platform_probe(struct platform_device *dev)
hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
- goto err_put_hcd;
+ goto err_power;
}
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
- goto err_put_hcd;
+ goto err_power;
device_wakeup_enable(hcd->self.controller);
@@ -114,11 +239,17 @@ static int ohci_platform_probe(struct platform_device *dev)
return err;
-err_put_hcd:
- usb_put_hcd(hcd);
err_power:
if (pdata->power_off)
pdata->power_off(dev);
+err_put_clks:
+ while (--clk >= 0)
+ clk_put(priv->clks[clk]);
+err_put_hcd:
+ if (pdata == &ohci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
+ usb_put_hcd(hcd);
return err;
}
@@ -127,13 +258,22 @@ static int ohci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+ int clk;
usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
+ for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++)
+ clk_put(priv->clks[clk]);
+
+ usb_put_hcd(hcd);
+
+ if (pdata == &ohci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
return 0;
}
@@ -180,6 +320,12 @@ static int ohci_platform_resume(struct device *dev)
#define ohci_platform_resume NULL
#endif /* CONFIG_PM */
+static const struct of_device_id ohci_platform_ids[] = {
+ { .compatible = "generic-ohci", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ohci_platform_ids);
+
static const struct platform_device_id ohci_platform_table[] = {
{ "ohci-platform", 0 },
{ }
@@ -200,6 +346,7 @@ static struct platform_driver ohci_platform_driver = {
.owner = THIS_MODULE,
.name = "ohci-platform",
.pm = &ohci_platform_pm_ops,
+ .of_match_table = ohci_platform_ids,
}
};
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index c6c325e4d80d..110b4b9ebeaa 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -94,7 +94,7 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597)
int i = 0;
if (r8a66597->pdata->on_chip) {
- clk_enable(r8a66597->clk);
+ clk_prepare_enable(r8a66597->clk);
do {
r8a66597_write(r8a66597, SCKE, SYSCFG0);
tmp = r8a66597_read(r8a66597, SYSCFG0);
@@ -138,7 +138,7 @@ static void r8a66597_clock_disable(struct r8a66597 *r8a66597)
udelay(1);
if (r8a66597->pdata->on_chip) {
- clk_disable(r8a66597->clk);
+ clk_disable_unprepare(r8a66597->clk);
} else {
r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 44e6c9da8892..01833ab2b5c3 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -148,6 +148,7 @@ static void uhci_hcd_platform_shutdown(struct platform_device *op)
}
static const struct of_device_id platform_uhci_ids[] = {
+ { .compatible = "generic-uhci", },
{ .compatible = "platform-uhci", },
{}
};
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index b016d38199f2..eb009a457fb5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -203,12 +203,12 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
addr, (unsigned int)temp);
addr = &ir_set->erst_base;
- temp_64 = readq(addr);
+ temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
addr, temp_64);
addr = &ir_set->erst_dequeue;
- temp_64 = readq(addr);
+ temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
addr, temp_64);
}
@@ -412,7 +412,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
{
u64 val;
- val = readq(&xhci->op_regs->cmd_ring);
+ val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
lower_32_bits(val));
xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 9992fbfec85f..1ad6bc1951c7 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -732,9 +732,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* Set the U1 and U2 exit latencies. */
memcpy(buf, &usb_bos_descriptor,
USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE);
- temp = readl(&xhci->cap_regs->hcs_params3);
- buf[12] = HCS_U1_LATENCY(temp);
- put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
+ if ((xhci->quirks & XHCI_LPM_SUPPORT)) {
+ temp = readl(&xhci->cap_regs->hcs_params3);
+ buf[12] = HCS_U1_LATENCY(temp);
+ put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
+ }
/* Indicate whether the host has LTM support. */
temp = readl(&xhci->cap_regs->hcc_params);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 873c272b3ef5..c089668308ad 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -149,14 +149,140 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
}
}
+/*
+ * We need a radix tree for mapping physical addresses of TRBs to which stream
+ * ID they belong to. We need to do this because the host controller won't tell
+ * us which stream ring the TRB came from. We could store the stream ID in an
+ * event data TRB, but that doesn't help us for the cancellation case, since the
+ * endpoint may stop before it reaches that event data TRB.
+ *
+ * The radix tree maps the upper portion of the TRB DMA address to a ring
+ * segment that has the same upper portion of DMA addresses. For example, say I
+ * have segments of size 1KB, that are always 1KB aligned. A segment may
+ * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
+ * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
+ * pass the radix tree a key to get the right stream ID:
+ *
+ * 0x10c90fff >> 10 = 0x43243
+ * 0x10c912c0 >> 10 = 0x43244
+ * 0x10c91400 >> 10 = 0x43245
+ *
+ * Obviously, only those TRBs with DMA addresses that are within the segment
+ * will make the radix tree return the stream ID for that ring.
+ *
+ * Caveats for the radix tree:
+ *
+ * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
+ * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
+ * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
+ * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
+ * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
+ * extended systems (where the DMA address can be bigger than 32-bits),
+ * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
+ */
+static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct xhci_ring *ring,
+ struct xhci_segment *seg,
+ gfp_t mem_flags)
+{
+ unsigned long key;
+ int ret;
+
+ key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+ /* Skip any segments that were already added. */
+ if (radix_tree_lookup(trb_address_map, key))
+ return 0;
+
+ ret = radix_tree_maybe_preload(mem_flags);
+ if (ret)
+ return ret;
+ ret = radix_tree_insert(trb_address_map,
+ key, ring);
+ radix_tree_preload_end();
+ return ret;
+}
+
+static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct xhci_segment *seg)
+{
+ unsigned long key;
+
+ key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+ if (radix_tree_lookup(trb_address_map, key))
+ radix_tree_delete(trb_address_map, key);
+}
+
+static int xhci_update_stream_segment_mapping(
+ struct radix_tree_root *trb_address_map,
+ struct xhci_ring *ring,
+ struct xhci_segment *first_seg,
+ struct xhci_segment *last_seg,
+ gfp_t mem_flags)
+{
+ struct xhci_segment *seg;
+ struct xhci_segment *failed_seg;
+ int ret;
+
+ if (WARN_ON_ONCE(trb_address_map == NULL))
+ return 0;
+
+ seg = first_seg;
+ do {
+ ret = xhci_insert_segment_mapping(trb_address_map,
+ ring, seg, mem_flags);
+ if (ret)
+ goto remove_streams;
+ if (seg == last_seg)
+ return 0;
+ seg = seg->next;
+ } while (seg != first_seg);
+
+ return 0;
+
+remove_streams:
+ failed_seg = seg;
+ seg = first_seg;
+ do {
+ xhci_remove_segment_mapping(trb_address_map, seg);
+ if (seg == failed_seg)
+ return ret;
+ seg = seg->next;
+ } while (seg != first_seg);
+
+ return ret;
+}
+
+static void xhci_remove_stream_mapping(struct xhci_ring *ring)
+{
+ struct xhci_segment *seg;
+
+ if (WARN_ON_ONCE(ring->trb_address_map == NULL))
+ return;
+
+ seg = ring->first_seg;
+ do {
+ xhci_remove_segment_mapping(ring->trb_address_map, seg);
+ seg = seg->next;
+ } while (seg != ring->first_seg);
+}
+
+static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
+{
+ return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
+ ring->first_seg, ring->last_seg, mem_flags);
+}
+
/* XXX: Do we need the hcd structure in all these functions? */
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
if (!ring)
return;
- if (ring->first_seg)
+ if (ring->first_seg) {
+ if (ring->type == TYPE_STREAM)
+ xhci_remove_stream_mapping(ring);
xhci_free_segments_for_ring(xhci, ring->first_seg);
+ }
kfree(ring);
}
@@ -349,6 +475,21 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (ret)
return -ENOMEM;
+ if (ring->type == TYPE_STREAM)
+ ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
+ ring, first, last, flags);
+ if (ret) {
+ struct xhci_segment *next;
+ do {
+ next = first->next;
+ xhci_segment_free(xhci, first);
+ if (first == last)
+ break;
+ first = next;
+ } while (true);
+ return ret;
+ }
+
xhci_link_rings(xhci, ring, first, last, num_segs);
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ring expansion succeed, now has %d segments",
@@ -434,12 +575,12 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
- if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- dma_free_coherent(dev,
- sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ if (size > MEDIUM_STREAM_ARRAY_SIZE)
+ dma_free_coherent(dev, size,
stream_ctx, dma);
- else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ else if (size <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_free(xhci->small_streams_pool,
stream_ctx, dma);
else
@@ -462,12 +603,12 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
gfp_t mem_flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
- if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- return dma_alloc_coherent(dev,
- sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ if (size > MEDIUM_STREAM_ARRAY_SIZE)
+ return dma_alloc_coherent(dev, size,
dma, mem_flags);
- else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ else if (size <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_alloc(xhci->small_streams_pool,
mem_flags, dma);
else
@@ -510,36 +651,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
* The number of stream contexts in the stream context array may be bigger than
* the number of streams the driver wants to use. This is because the number of
* stream context array entries must be a power of two.
- *
- * We need a radix tree for mapping physical addresses of TRBs to which stream
- * ID they belong to. We need to do this because the host controller won't tell
- * us which stream ring the TRB came from. We could store the stream ID in an
- * event data TRB, but that doesn't help us for the cancellation case, since the
- * endpoint may stop before it reaches that event data TRB.
- *
- * The radix tree maps the upper portion of the TRB DMA address to a ring
- * segment that has the same upper portion of DMA addresses. For example, say I
- * have segments of size 1KB, that are always 64-byte aligned. A segment may
- * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
- * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
- * pass the radix tree a key to get the right stream ID:
- *
- * 0x10c90fff >> 10 = 0x43243
- * 0x10c912c0 >> 10 = 0x43244
- * 0x10c91400 >> 10 = 0x43245
- *
- * Obviously, only those TRBs with DMA addresses that are within the segment
- * will make the radix tree return the stream ID for that ring.
- *
- * Caveats for the radix tree:
- *
- * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
- * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
- * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
- * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
- * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
- * extended systems (where the DMA address can be bigger than 32-bits),
- * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
@@ -548,7 +659,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
struct xhci_stream_info *stream_info;
u32 cur_stream;
struct xhci_ring *cur_ring;
- unsigned long key;
u64 addr;
int ret;
@@ -603,6 +713,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
if (!cur_ring)
goto cleanup_rings;
cur_ring->stream_id = cur_stream;
+ cur_ring->trb_address_map = &stream_info->trb_address_map;
/* Set deq ptr, cycle bit, and stream context type */
addr = cur_ring->first_seg->dma |
SCT_FOR_CTX(SCT_PRI_TR) |
@@ -612,10 +723,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr);
- key = (unsigned long)
- (cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
- ret = radix_tree_insert(&stream_info->trb_address_map,
- key, cur_ring);
+ ret = xhci_update_stream_mapping(cur_ring, mem_flags);
if (ret) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
@@ -635,9 +743,6 @@ cleanup_rings:
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
- addr = cur_ring->first_seg->dma;
- radix_tree_delete(&stream_info->trb_address_map,
- addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
@@ -698,7 +803,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
{
int cur_stream;
struct xhci_ring *cur_ring;
- dma_addr_t addr;
if (!stream_info)
return;
@@ -707,9 +811,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
- addr = cur_ring->first_seg->dma;
- radix_tree_delete(&stream_info->trb_address_map,
- addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
@@ -1711,7 +1812,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command);
- xhci->cmd_ring_reserved_trbs = 0;
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
@@ -1776,6 +1876,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
no_bw:
+ xhci->cmd_ring_reserved_trbs = 0;
xhci->num_usb2_ports = 0;
xhci->num_usb3_ports = 0;
xhci->num_active_eps = 0;
@@ -1958,7 +2059,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
xhci_warn(xhci, "WARN something wrong with SW event ring "
"dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
- temp = readq(&xhci->ir_set->erst_dequeue);
+ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
@@ -1967,7 +2068,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write event ring dequeue pointer, "
"preserving EHB bit");
- writeq(((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
}
@@ -2269,16 +2370,17 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
- writeq(dma, &xhci->op_regs->dcbaa_ptr);
+ xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
* structure comprised of TRBs. The TRBs must be 16 byte aligned,
- * however, the command ring segment needs 64-byte aligned segments,
- * so we pick the greater alignment need.
+ * however, the command ring segment needs 64-byte aligned segments
+ * and our use of dma addresses in the trb_address_map radix tree needs
+ * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
- TRB_SEGMENT_SIZE, 64, xhci->page_size);
+ TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
@@ -2312,13 +2414,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
- val_64 = readq(&xhci->op_regs->cmd_ring);
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%x", val);
- writeq(val_64, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
@@ -2396,10 +2498,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set ERST base address for ir_set 0 = 0x%llx",
(unsigned long long)xhci->erst.erst_dma_addr);
- val_64 = readq(&xhci->ir_set->erst_base);
+ val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
- writeq(val_64, &xhci->ir_set->erst_base);
+ xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 3c898c12a06b..47390e369cd4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"QUIRK: Resetting on resume");
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0015 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
+ pdev->subsystem_device == 0xc0cd)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
@@ -185,6 +190,10 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct usb_hcd *hcd;
driver = (struct hc_driver *)id->driver_data;
+
+ /* Prevent runtime suspending between USB-2 and USB-3 initialization */
+ pm_runtime_get_noresume(&dev->dev);
+
/* Register the USB 2.0 roothub.
* FIXME: USB core must know to register the USB 2.0 roothub first.
* This is sort of silly, because we could just set the HCD driver flags
@@ -194,7 +203,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
retval = usb_hcd_pci_probe(dev, id);
if (retval)
- return retval;
+ goto put_runtime_pm;
/* USB 2.0 roothub is stored in the PCI device now. */
hcd = dev_get_drvdata(&dev->dev);
@@ -217,11 +226,11 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto put_usb3_hcd;
/* Roothub already marked as USB 3.0 speed */
- /* We know the LPM timeout algorithms for this host, let the USB core
- * enable and disable LPM for devices under the USB 3.0 roothub.
- */
- if (xhci->quirks & XHCI_LPM_SUPPORT)
- hcd_to_bus(xhci->shared_hcd)->root_hub->lpm_capable = 1;
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
+ /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ pm_runtime_put_noidle(&dev->dev);
return 0;
@@ -229,6 +238,8 @@ put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
dealloc_usb2_hcd:
usb_hcd_pci_remove(dev);
+put_runtime_pm:
+ pm_runtime_put_noidle(&dev->dev);
return retval;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 8abda5c73ca1..151901ce1ba9 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -158,6 +158,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
*/
*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
@@ -226,6 +229,7 @@ static const struct dev_pm_ops xhci_plat_pm_ops = {
#ifdef CONFIG_OF
static const struct of_device_id usb_xhci_of_match[] = {
+ { .compatible = "generic-xhci" },
{ .compatible = "xhci-platform" },
{ },
};
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a0b248c34526..5f926bea5ab1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -307,13 +307,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
return 0;
}
- temp_64 = readq(&xhci->op_regs->cmd_ring);
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
if (!(temp_64 & CMD_RING_RUNNING)) {
xhci_dbg(xhci, "Command ring had been stopped\n");
return 0;
}
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
- writeq(temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+ &xhci->op_regs->cmd_ring);
/* Section 4.6.1.2 of xHCI 1.0 spec says software should
* time the completion od all xHCI commands, including
@@ -545,9 +546,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
+ struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
- struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
@@ -572,8 +573,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Finding endpoint context");
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
- state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+ /* 4.6.9 the css flag is written to the stream context for streams */
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring);
+ } else {
+ struct xhci_ep_ctx *ep_ctx
+ = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+ state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+ }
state->new_deq_ptr = cur_td->last_trb;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -891,6 +900,57 @@ remove_finished_td:
/* Return to the event handler with xhci->lock re-acquired */
}
+static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ struct xhci_td *cur_td;
+
+ while (!list_empty(&ring->td_list)) {
+ cur_td = list_first_entry(&ring->td_list,
+ struct xhci_td, td_list);
+ list_del_init(&cur_td->td_list);
+ if (!list_empty(&cur_td->cancelled_td_list))
+ list_del_init(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+ }
+}
+
+static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
+ int slot_id, int ep_index)
+{
+ struct xhci_td *cur_td;
+ struct xhci_virt_ep *ep;
+ struct xhci_ring *ring;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ if ((ep->ep_state & EP_HAS_STREAMS) ||
+ (ep->ep_state & EP_GETTING_NO_STREAMS)) {
+ int stream_id;
+
+ for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+ stream_id++) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Killing URBs for slot ID %u, ep index %u, stream %u",
+ slot_id, ep_index, stream_id + 1);
+ xhci_kill_ring_urbs(xhci,
+ ep->stream_info->stream_rings[stream_id]);
+ }
+ } else {
+ ring = ep->ring;
+ if (!ring)
+ return;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Killing URBs for slot ID %u, ep index %u",
+ slot_id, ep_index);
+ xhci_kill_ring_urbs(xhci, ring);
+ }
+ while (!list_empty(&ep->cancelled_td_list)) {
+ cur_td = list_first_entry(&ep->cancelled_td_list,
+ struct xhci_td, cancelled_td_list);
+ list_del_init(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+ }
+}
+
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
@@ -914,9 +974,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
struct xhci_hcd *xhci;
struct xhci_virt_ep *ep;
- struct xhci_virt_ep *temp_ep;
- struct xhci_ring *ring;
- struct xhci_td *cur_td;
int ret, i, j;
unsigned long flags;
@@ -973,34 +1030,8 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i])
continue;
- for (j = 0; j < 31; j++) {
- temp_ep = &xhci->devs[i]->eps[j];
- ring = temp_ep->ring;
- if (!ring)
- continue;
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Killing URBs for slot ID %u, "
- "ep index %u", i, j);
- while (!list_empty(&ring->td_list)) {
- cur_td = list_first_entry(&ring->td_list,
- struct xhci_td,
- td_list);
- list_del_init(&cur_td->td_list);
- if (!list_empty(&cur_td->cancelled_td_list))
- list_del_init(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN);
- }
- while (!list_empty(&temp_ep->cancelled_td_list)) {
- cur_td = list_first_entry(
- &temp_ep->cancelled_td_list,
- struct xhci_td,
- cancelled_td_list);
- list_del_init(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN);
- }
- }
+ for (j = 0; j < 31; j++)
+ xhci_kill_endpoint_urbs(xhci, i, j);
}
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1072,17 +1103,18 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_device *dev;
+ struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id];
+ ep = &dev->eps[ep_index];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
if (!ep_ring) {
- xhci_warn(xhci, "WARN Set TR deq ptr command for "
- "freed stream ID %u\n",
+ xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
stream_id);
/* XXX: Harmless??? */
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
@@ -1098,12 +1130,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
switch (cmd_comp_code) {
case COMP_TRB_ERR:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
- "of stream ID configuration\n");
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
break;
case COMP_CTX_STATE:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
- "to incorrect slot or ep state.\n");
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
ep_state = le32_to_cpu(ep_ctx->ep_info);
ep_state &= EP_STATE_MASK;
slot_state = le32_to_cpu(slot_ctx->dev_state);
@@ -1113,13 +1143,12 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
slot_state, ep_state);
break;
case COMP_EBADSLT:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
- "slot %u was not enabled.\n", slot_id);
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
+ slot_id);
break;
default:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
- "completion code of %u.\n",
- cmd_comp_code);
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
+ cmd_comp_code);
break;
}
/* OK what do we do now? The endpoint state is hosed, and we
@@ -1129,23 +1158,28 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
* cancelling URBs, which might not be an error...
*/
} else {
+ u64 deq;
+ /* 4.6.10 deq ptr is written to the stream ctx for streams */
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
+ } else {
+ deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
+ }
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Successful Set TR Deq Ptr cmd, deq = @%08llx",
- le64_to_cpu(ep_ctx->deq));
- if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
- dev->eps[ep_index].queued_deq_ptr) ==
- (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
+ "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
+ if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
+ ep->queued_deq_ptr) == deq) {
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
update_ring_for_set_deq_completion(xhci, dev,
ep_ring, ep_index);
} else {
- xhci_warn(xhci, "Mismatch between completed Set TR Deq "
- "Ptr command & xHCI internal state.\n");
+ xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
- dev->eps[ep_index].queued_deq_seg,
- dev->eps[ep_index].queued_deq_ptr);
+ ep->queued_deq_seg, ep->queued_deq_ptr);
}
}
@@ -2864,8 +2898,9 @@ hw_died:
/* Clear the event handler busy flag (RW1C);
* the event ring should be empty.
*/
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
- writeq(temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64 | ERST_EHB,
+ &xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
return IRQ_HANDLED;
@@ -2877,7 +2912,7 @@ hw_died:
*/
while (xhci_handle_event(xhci) > 0) {}
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
if (event_ring_deq != xhci->event_ring->dequeue) {
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
@@ -2892,7 +2927,7 @@ hw_died:
/* Clear the event handler busy flag (RW1C); event ring is empty. */
temp_64 |= ERST_EHB;
- writeq(temp_64, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
@@ -2965,58 +3000,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
while (1) {
- if (room_on_ring(xhci, ep_ring, num_trbs)) {
- union xhci_trb *trb = ep_ring->enqueue;
- unsigned int usable = ep_ring->enq_seg->trbs +
- TRBS_PER_SEGMENT - 1 - trb;
- u32 nop_cmd;
-
- /*
- * Section 4.11.7.1 TD Fragments states that a link
- * TRB must only occur at the boundary between
- * data bursts (eg 512 bytes for 480M).
- * While it is possible to split a large fragment
- * we don't know the size yet.
- * Simplest solution is to fill the trb before the
- * LINK with nop commands.
- */
- if (num_trbs == 1 || num_trbs <= usable || usable == 0)
- break;
-
- if (ep_ring->type != TYPE_BULK)
- /*
- * While isoc transfers might have a buffer that
- * crosses a 64k boundary it is unlikely.
- * Since we can't add NOPs without generating
- * gaps in the traffic just hope it never
- * happens at the end of the ring.
- * This could be fixed by writing a LINK TRB
- * instead of the first NOP - however the
- * TRB_TYPE_LINK_LE32() calls would all need
- * changing to check the ring length.
- */
- break;
-
- if (num_trbs >= TRBS_PER_SEGMENT) {
- xhci_err(xhci, "Too many fragments %d, max %d\n",
- num_trbs, TRBS_PER_SEGMENT - 1);
- return -EINVAL;
- }
-
- nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
- ep_ring->cycle_state);
- ep_ring->num_trbs_free -= usable;
- do {
- trb->generic.field[0] = 0;
- trb->generic.field[1] = 0;
- trb->generic.field[2] = 0;
- trb->generic.field[3] = nop_cmd;
- trb++;
- } while (--usable);
- ep_ring->enqueue = trb;
- if (room_on_ring(xhci, ep_ring, num_trbs))
- break;
- }
+ if (room_on_ring(xhci, ep_ring, num_trbs))
+ break;
if (ep_ring == xhci->cmd_ring) {
xhci_err(xhci, "Do not support expand command ring\n");
@@ -4118,6 +4103,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
+ u32 trb_sct = 0;
u32 type = TRB_TYPE(TRB_SET_DEQ);
struct xhci_virt_ep *ep;
@@ -4136,7 +4122,9 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
}
ep->queued_deq_seg = deq_seg;
ep->queued_deq_ptr = deq_ptr;
- return queue_command(xhci, lower_32_bits(addr) | cycle_state,
+ if (stream_id)
+ trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+ return queue_command(xhci, lower_32_bits(addr) | trb_sct | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ad364394885a..8fe4e124ddd4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -390,6 +390,10 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
}
legacy_irq:
+ if (!strlen(hcd->irq_descr))
+ snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
+ hcd->driver->description, hcd->self.busnum);
+
/* fall back to legacy interrupt*/
ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
hcd->irq_descr, hcd);
@@ -611,7 +615,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring);
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
@@ -756,11 +760,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
{
xhci->s3.command = readl(&xhci->op_regs->command);
xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
- xhci->s3.dcbaa_ptr = readq(&xhci->op_regs->dcbaa_ptr);
+ xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
- xhci->s3.erst_base = readq(&xhci->ir_set->erst_base);
- xhci->s3.erst_dequeue = readq(&xhci->ir_set->erst_dequeue);
+ xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+ xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
}
@@ -769,11 +773,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
{
writel(xhci->s3.command, &xhci->op_regs->command);
writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
- writeq(xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
+ xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
- writeq(xhci->s3.erst_base, &xhci->ir_set->erst_base);
- writeq(xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
+ xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
}
@@ -783,7 +787,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
u64 val_64;
/* step 2: initialize command ring buffer */
- val_64 = readq(&xhci->op_regs->cmd_ring);
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
xhci->cmd_ring->dequeue) &
@@ -792,7 +796,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%llx",
(long unsigned long) val_64);
- writeq(val_64, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
}
/*
@@ -2678,6 +2682,20 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
return ret;
}
+static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
+ struct xhci_virt_device *vdev, int i)
+{
+ struct xhci_virt_ep *ep = &vdev->eps[i];
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
+ xhci_get_endpoint_address(i));
+ xhci_free_stream_info(xhci, ep->stream_info);
+ ep->stream_info = NULL;
+ ep->ep_state &= ~EP_HAS_STREAMS;
+ }
+}
+
/* Called after one or more calls to xhci_add_endpoint() or
* xhci_drop_endpoint(). If this call fails, the USB core is expected
* to call xhci_reset_bandwidth().
@@ -2742,8 +2760,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
/* Free any rings that were dropped, but not changed. */
for (i = 1; i < 31; ++i) {
if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
- !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
+ !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
+ }
}
xhci_zero_in_ctx(xhci, virt_dev);
/*
@@ -2759,6 +2779,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (virt_dev->eps[i].ring) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
}
+ xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
}
@@ -2954,7 +2975,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
if (ret <= 0)
return -EINVAL;
- if (ep->ss_ep_comp.bmAttributes == 0) {
+ if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
" descriptor for ep 0x%x does not support streams\n",
ep->desc.bEndpointAddress);
@@ -3121,6 +3142,12 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
num_streams);
+ /* MaxPSASize value 0 (2 streams) means streams are not supported */
+ if (HCC_MAX_PSA(xhci->hcc_params) < 4) {
+ xhci_dbg(xhci, "xHCI controller does not support streams.\n");
+ return -ENOSYS;
+ }
+
config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
if (!config_cmd) {
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
@@ -3519,6 +3546,8 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_virt_ep *ep = &virt_dev->eps[i];
if (ep->ep_state & EP_HAS_STREAMS) {
+ xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
+ xhci_get_endpoint_address(i));
xhci_free_stream_info(xhci, ep->stream_info);
ep->stream_info = NULL;
ep->ep_state &= ~EP_HAS_STREAMS;
@@ -3842,7 +3871,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
if (ret) {
return ret;
}
- temp_64 = readq(&xhci->op_regs->dcbaa_ptr);
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Op regs DCBAA ptr = %#016llx", temp_64);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
@@ -4730,8 +4759,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
struct device *dev = hcd->self.controller;
int retval;
- /* Limit the block layer scatter-gather lists to half a segment. */
- hcd->self.sg_tablesize = TRBS_PER_SEGMENT / 2;
+ /* Accept arbitrarily long scatter-gather lists */
+ hcd->self.sg_tablesize = ~0;
/* support to build packet from discontinuous buffers */
hcd->self.no_sg_constraint = 1;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f8416639bf31..d280e9213d08 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -28,17 +28,6 @@
#include <linux/kernel.h>
#include <linux/usb/hcd.h>
-/*
- * Registers should always be accessed with double word or quad word accesses.
- *
- * Some xHCI implementations may support 64-bit address pointers. Registers
- * with 64-bit address pointers should be written to with dword accesses by
- * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
- * xHCI implementations that do not support 64-bit address pointers will ignore
- * the high dword, and write order is irrelevant.
- */
-#include <asm-generic/io-64-nonatomic-lo-hi.h>
-
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
@@ -714,6 +703,7 @@ struct xhci_ep_ctx {
/* deq bitmasks */
#define EP_CTX_CYCLE_MASK (1 << 0)
+#define SCTX_DEQ_MASK (~0xfL)
/**
@@ -1129,9 +1119,10 @@ enum xhci_setup_dev {
#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
#define LAST_EP_INDEX 30
-/* Set TR Dequeue Pointer command TRB fields */
+/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
+#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
/* Port Status Change Event TRB fields */
@@ -1279,7 +1270,7 @@ union xhci_trb {
* since the command ring is 64-byte aligned.
* It must also be greater than 16.
*/
-#define TRBS_PER_SEGMENT 256
+#define TRBS_PER_SEGMENT 64
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
@@ -1352,6 +1343,7 @@ struct xhci_ring {
unsigned int num_trbs_free_temp;
enum xhci_ring_type type;
bool last_td_was_short;
+ struct radix_tree_root *trb_address_map;
};
struct xhci_erst_entry {
@@ -1614,6 +1606,34 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
#define xhci_warn_ratelimited(xhci, fmt, args...) \
dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+/*
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers. Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
+ __le64 __iomem *regs)
+{
+ __u32 __iomem *ptr = (__u32 __iomem *) regs;
+ u64 val_lo = readl(ptr);
+ u64 val_hi = readl(ptr + 1);
+ return val_lo + (val_hi << 32);
+}
+static inline void xhci_write_64(struct xhci_hcd *xhci,
+ const u64 val, __le64 __iomem *regs)
+{
+ __u32 __iomem *ptr = (__u32 __iomem *) regs;
+ u32 val_lo = lower_32_bits(val);
+ u32 val_hi = upper_32_bits(val);
+
+ writel(val_lo, ptr);
+ writel(val_hi, ptr + 1);
+}
+
static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
{
return xhci->quirks & XHCI_LINK_TRB_QUIRK;
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index ba5f70f92888..1bca274dc3b5 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -128,7 +128,6 @@ config USB_IDMOUSE
config USB_FTDI_ELAN
tristate "Elan PCMCIA CardBus Adapter USB Client"
- default M
help
ELAN's Uxxx series of adapters are USB to PCMCIA CardBus adapters.
Currently only the U132 adapter is available.
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index de98906f786d..06b5d77cd9ad 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -2123,8 +2123,8 @@ sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
u8 tmp8, tmp82, ramtype;
int bw = 0;
char *ramtypetext1 = NULL;
- const char *ramtypetext2[] = { "SDR SDRAM", "SDR SGRAM",
- "DDR SDRAM", "DDR SGRAM" };
+ static const char ram_datarate[4] = {'S', 'S', 'D', 'D'};
+ static const char ram_dynamictype[4] = {'D', 'G', 'D', 'G'};
static const int busSDR[4] = {64, 64, 128, 128};
static const int busDDR[4] = {32, 32, 64, 64};
static const int busDDRA[4] = {64+32, 64+32 , (64+32)*2, (64+32)*2};
@@ -2156,8 +2156,10 @@ sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
break;
}
- dev_info(&sisusb->sisusb_dev->dev, "%dMB %s %s, bus width %d\n", (sisusb->vramsize >> 20), ramtypetext1,
- ramtypetext2[ramtype], bw);
+
+ dev_info(&sisusb->sisusb_dev->dev, "%dMB %s %cDR S%cRAM, bus width %d\n",
+ sisusb->vramsize >> 20, ramtypetext1,
+ ram_datarate[ramtype], ram_dynamictype[ramtype], bw);
}
static int
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 78eb4ff33269..bdef0d6eb91d 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -22,8 +22,27 @@
enum led_type {
DELCOM_VISUAL_SIGNAL_INDICATOR,
DREAM_CHEEKY_WEBMAIL_NOTIFIER,
+ RISO_KAGAKU_LED
};
+/* the Webmail LED made by RISO KAGAKU CORP. decodes a color index
+ internally, we want to keep the red+green+blue sysfs api, so we decode
+ from 1-bit RGB to the riso kagaku color index according to this table... */
+
+static unsigned const char riso_kagaku_tbl[] = {
+/* R+2G+4B -> riso kagaku color index */
+ [0] = 0, /* black */
+ [1] = 2, /* red */
+ [2] = 1, /* green */
+ [3] = 5, /* yellow */
+ [4] = 3, /* blue */
+ [5] = 6, /* magenta */
+ [6] = 4, /* cyan */
+ [7] = 7 /* white */
+};
+
+#define RISO_KAGAKU_IX(r,g,b) riso_kagaku_tbl[((r)?1:0)+((g)?2:0)+((b)?4:0)]
+
/* table of devices that work with this driver */
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0fc5, 0x1223),
@@ -32,6 +51,8 @@ static const struct usb_device_id id_table[] = {
.driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
{ USB_DEVICE(0x1d34, 0x000a),
.driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
+ { USB_DEVICE(0x1294, 0x1320),
+ .driver_info = RISO_KAGAKU_LED },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -48,6 +69,7 @@ static void change_color(struct usb_led *led)
{
int retval = 0;
unsigned char *buffer;
+ int actlength;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer) {
@@ -104,6 +126,18 @@ static void change_color(struct usb_led *led)
2000);
break;
+ case RISO_KAGAKU_LED:
+ buffer[0] = RISO_KAGAKU_IX(led->red, led->green, led->blue);
+ buffer[1] = 0;
+ buffer[2] = 0;
+ buffer[3] = 0;
+ buffer[4] = 0;
+
+ retval = usb_interrupt_msg(led->udev,
+ usb_sndctrlpipe(led->udev, 2),
+ buffer, 5, &actlength, 1000 /*ms timeout*/);
+ break;
+
default:
dev_err(&led->udev->dev, "unknown device type %d\n", led->type);
}
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 688dc8bb192d..8b789792f6fa 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -43,6 +43,7 @@ config USB_MUSB_HOST
config USB_MUSB_GADGET
bool "Gadget only mode"
depends on USB_GADGET=y || USB_GADGET=USB_MUSB_HDRC
+ depends on HAS_DMA
help
Select this when you want to use MUSB in gadget mode only,
thereby the host feature will be regressed.
@@ -50,6 +51,7 @@ config USB_MUSB_GADGET
config USB_MUSB_DUAL_ROLE
bool "Dual Role mode"
depends on ((USB=y || USB=USB_MUSB_HDRC) && (USB_GADGET=y || USB_GADGET=USB_MUSB_HDRC))
+ depends on HAS_DMA
help
This is the default mode of working of MUSB controller where
both host and gadget features are enabled.
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index fc192ad9cc6a..07576907e2c6 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -438,7 +438,6 @@ void musb_hnp_stop(struct musb *musb)
static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
u8 devctl)
{
- struct usb_otg *otg = musb->xceiv->otg;
irqreturn_t handled = IRQ_NONE;
dev_dbg(musb->controller, "<== DevCtl=%02x, int_usb=0x%x\n", devctl,
@@ -477,8 +476,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->port1_status |=
(USB_PORT_STAT_C_SUSPEND << 16)
| MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+ + msecs_to_jiffies(20);
schedule_delayed_work(
- &musb->finish_resume_work, 20);
+ &musb->finish_resume_work,
+ msecs_to_jiffies(20));
musb->xceiv->state = OTG_STATE_A_HOST;
musb->is_active = 1;
@@ -653,7 +655,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
break;
case OTG_STATE_B_PERIPHERAL:
musb_g_suspend(musb);
- musb->is_active = otg->gadget->b_hnp_enable;
+ musb->is_active = musb->g.b_hnp_enable;
if (musb->is_active) {
musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
@@ -669,7 +671,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
break;
case OTG_STATE_A_HOST:
musb->xceiv->state = OTG_STATE_A_SUSPEND;
- musb->is_active = otg->host->b_hnp_enable;
+ musb->is_active = musb->hcd->self.b_hnp_enable;
break;
case OTG_STATE_B_HOST:
/* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
@@ -2157,11 +2159,19 @@ static void musb_restore_context(struct musb *musb)
void __iomem *musb_base = musb->mregs;
void __iomem *ep_target_regs;
void __iomem *epio;
+ u8 power;
musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
- musb_writeb(musb_base, MUSB_POWER, musb->context.power);
+
+ /* Don't affect SUSPENDM/RESUME bits in POWER reg */
+ power = musb_readb(musb_base, MUSB_POWER);
+ power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
+ musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
+ power |= musb->context.power;
+ musb_writeb(musb_base, MUSB_POWER, power);
+
musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index f88929609bac..7b8bbf53127e 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -39,6 +39,7 @@ struct cppi41_dma_channel {
u32 transferred;
u32 packet_sz;
struct list_head tx_check;
+ struct work_struct dma_completion;
};
#define MUSB_DMA_NUM_CHANNELS 15
@@ -112,6 +113,18 @@ static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
return true;
}
+static bool is_isoc(struct musb_hw_ep *hw_ep, bool in)
+{
+ if (in && hw_ep->in_qh) {
+ if (hw_ep->in_qh->type == USB_ENDPOINT_XFER_ISOC)
+ return true;
+ } else if (hw_ep->out_qh) {
+ if (hw_ep->out_qh->type == USB_ENDPOINT_XFER_ISOC)
+ return true;
+ }
+ return false;
+}
+
static void cppi41_dma_callback(void *private_data);
static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
@@ -119,7 +132,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
struct musb *musb = hw_ep->musb;
- if (!cppi41_channel->prog_len) {
+ if (!cppi41_channel->prog_len ||
+ (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
/* done, complete */
cppi41_channel->channel.actual_len =
@@ -165,6 +179,32 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
}
}
+static void cppi_trans_done_work(struct work_struct *work)
+{
+ unsigned long flags;
+ struct cppi41_dma_channel *cppi41_channel =
+ container_of(work, struct cppi41_dma_channel, dma_completion);
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->musb;
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+ bool empty;
+
+ if (!cppi41_channel->is_tx && is_isoc(hw_ep, 1)) {
+ spin_lock_irqsave(&musb->lock, flags);
+ cppi41_trans_done(cppi41_channel);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ } else {
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty) {
+ spin_lock_irqsave(&musb->lock, flags);
+ cppi41_trans_done(cppi41_channel);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ } else {
+ schedule_work(&cppi41_channel->dma_completion);
+ }
+ }
+}
+
static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
{
struct cppi41_dma_controller *controller;
@@ -228,6 +268,14 @@ static void cppi41_dma_callback(void *private_data)
transferred < cppi41_channel->packet_sz)
cppi41_channel->prog_len = 0;
+ if (!cppi41_channel->is_tx) {
+ if (is_isoc(hw_ep, 1))
+ schedule_work(&cppi41_channel->dma_completion);
+ else
+ cppi41_trans_done(cppi41_channel);
+ goto out;
+ }
+
empty = musb_is_tx_fifo_empty(hw_ep);
if (empty) {
cppi41_trans_done(cppi41_channel);
@@ -264,6 +312,10 @@ static void cppi41_dma_callback(void *private_data)
goto out;
}
}
+ if (is_isoc(hw_ep, 0)) {
+ schedule_work(&cppi41_channel->dma_completion);
+ goto out;
+ }
list_add_tail(&cppi41_channel->tx_check,
&controller->early_tx_list);
if (!hrtimer_active(&controller->early_tx)) {
@@ -448,12 +500,25 @@ static int cppi41_dma_channel_program(struct dma_channel *channel,
dma_addr_t dma_addr, u32 len)
{
int ret;
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ int hb_mult = 0;
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
+ if (is_host_active(cppi41_channel->controller->musb)) {
+ if (cppi41_channel->is_tx)
+ hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
+ else
+ hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
+ }
+
channel->status = MUSB_DMA_STATUS_BUSY;
channel->actual_len = 0;
+
+ if (hb_mult)
+ packet_sz = hb_mult * (packet_sz & 0x7FF);
+
ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
if (!ret)
channel->status = MUSB_DMA_STATUS_FREE;
@@ -607,6 +672,8 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
cppi41_channel->port_num = port;
cppi41_channel->is_tx = is_tx;
INIT_LIST_HEAD(&cppi41_channel->tx_check);
+ INIT_WORK(&cppi41_channel->dma_completion,
+ cppi_trans_done_work);
musb_dma = &cppi41_channel->channel;
musb_dma->private_data = cppi41_channel;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 7a109eae9b9a..3372ded5def7 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -45,6 +45,8 @@
#include <linux/of_irq.h>
#include <linux/usb/of.h>
+#include <linux/debugfs.h>
+
#include "musb_core.h"
static const struct of_device_id musb_dsps_of_match[];
@@ -136,6 +138,26 @@ struct dsps_glue {
unsigned long last_timer; /* last timer data for each instance */
struct dsps_context context;
+ struct debugfs_regset32 regset;
+ struct dentry *dbgfs_root;
+};
+
+static const struct debugfs_reg32 dsps_musb_regs[] = {
+ { "revision", 0x00 },
+ { "control", 0x14 },
+ { "status", 0x18 },
+ { "eoi", 0x24 },
+ { "intr0_stat", 0x30 },
+ { "intr1_stat", 0x34 },
+ { "intr0_set", 0x38 },
+ { "intr1_set", 0x3c },
+ { "txmode", 0x70 },
+ { "rxmode", 0x74 },
+ { "autoreq", 0xd0 },
+ { "srpfixtime", 0xd4 },
+ { "tdown", 0xd8 },
+ { "phy_utmi", 0xe0 },
+ { "mode", 0xe8 },
};
static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
@@ -368,6 +390,30 @@ out:
return ret;
}
+static int dsps_musb_dbg_init(struct musb *musb, struct dsps_glue *glue)
+{
+ struct dentry *root;
+ struct dentry *file;
+ char buf[128];
+
+ sprintf(buf, "%s.dsps", dev_name(musb->controller));
+ root = debugfs_create_dir(buf, NULL);
+ if (!root)
+ return -ENOMEM;
+ glue->dbgfs_root = root;
+
+ glue->regset.regs = dsps_musb_regs;
+ glue->regset.nregs = ARRAY_SIZE(dsps_musb_regs);
+ glue->regset.base = musb->ctrl_base;
+
+ file = debugfs_create_regset32("regdump", S_IRUGO, root, &glue->regset);
+ if (!file) {
+ debugfs_remove_recursive(root);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
static int dsps_musb_init(struct musb *musb)
{
struct device *dev = musb->controller;
@@ -377,6 +423,7 @@ static int dsps_musb_init(struct musb *musb)
void __iomem *reg_base;
struct resource *r;
u32 rev, val;
+ int ret;
r = platform_get_resource_byname(parent, IORESOURCE_MEM, "control");
if (!r)
@@ -410,6 +457,10 @@ static int dsps_musb_init(struct musb *musb)
val &= ~(1 << wrp->otg_disable);
dsps_writel(musb->ctrl_base, wrp->phy_utmi, val);
+ ret = dsps_musb_dbg_init(musb, glue);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -616,7 +667,7 @@ static int dsps_probe(struct platform_device *pdev)
wrp = match->data;
/* allocate glue */
- glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&pdev->dev, "unable to allocate glue memory\n");
return -ENOMEM;
@@ -644,7 +695,6 @@ err3:
pm_runtime_put(&pdev->dev);
err2:
pm_runtime_disable(&pdev->dev);
- kfree(glue);
return ret;
}
@@ -657,7 +707,9 @@ static int dsps_remove(struct platform_device *pdev)
/* disable usbss clocks */
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(glue);
+
+ debugfs_remove_recursive(glue->dbgfs_root);
+
return 0;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ed455724017b..eb06291a40c8 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1183,6 +1183,9 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
csr = MUSB_CSR0_H_STATUSPKT
| MUSB_CSR0_TXPKTRDY;
+ /* disable ping token in status phase */
+ csr |= MUSB_CSR0_H_DIS_PING;
+
/* flag status stage */
musb->ep0_stage = MUSB_EP0_STATUS;
@@ -1691,7 +1694,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
| MUSB_RXCSR_RXPKTRDY);
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
+ defined(CONFIG_USB_TI_CPPI41_DMA)
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
@@ -1704,10 +1708,30 @@ void musb_host_rx(struct musb *musb, u8 epnum)
if (d->status != -EILSEQ && d->status != -EOVERFLOW)
d->status = 0;
- if (++qh->iso_idx >= urb->number_of_packets)
+ if (++qh->iso_idx >= urb->number_of_packets) {
done = true;
- else
+ } else {
+#if defined(CONFIG_USB_TI_CPPI41_DMA)
+ struct dma_controller *c;
+ dma_addr_t *buf;
+ u32 length, ret;
+
+ c = musb->dma_controller;
+ buf = (void *)
+ urb->iso_frame_desc[qh->iso_idx].offset
+ + (u32)urb->transfer_dma;
+
+ length =
+ urb->iso_frame_desc[qh->iso_idx].length;
+
+ val |= MUSB_RXCSR_DMAENAB;
+ musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+ ret = c->channel_program(dma, qh->maxpacket,
+ 0, (u32) buf, length);
+#endif
done = false;
+ }
} else {
/* done if urb buffer is full or short packet is recd */
@@ -1747,7 +1771,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
}
/* we are expecting IN packets */
-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
+ defined(CONFIG_USB_TI_CPPI41_DMA)
if (dma) {
struct dma_controller *c;
u16 rx_count;
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index eb634433ef09..e2d2d8c9891b 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -135,7 +135,8 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
/* later, GetPortStatus will stop RESUME signaling */
musb->port1_status |= MUSB_PORT_STAT_RESUME;
- schedule_delayed_work(&musb->finish_resume_work, 20);
+ schedule_delayed_work(&musb->finish_resume_work,
+ msecs_to_jiffies(20));
}
}
@@ -158,7 +159,6 @@ void musb_port_reset(struct musb *musb, bool do_reset)
*/
power = musb_readb(mbase, MUSB_POWER);
if (do_reset) {
-
/*
* If RESUME is set, we must make sure it stays minimum 20 ms.
* Then we must clear RESUME and wait a bit to let musb start
@@ -167,11 +167,22 @@ void musb_port_reset(struct musb *musb, bool do_reset)
* detected".
*/
if (power & MUSB_POWER_RESUME) {
- while (time_before(jiffies, musb->rh_timer))
- msleep(1);
+ long remain = (unsigned long) musb->rh_timer - jiffies;
+
+ if (musb->rh_timer > 0 && remain > 0) {
+ /* take into account the minimum delay after resume */
+ schedule_delayed_work(
+ &musb->deassert_reset_work, remain);
+ return;
+ }
+
musb_writeb(mbase, MUSB_POWER,
- power & ~MUSB_POWER_RESUME);
- msleep(1);
+ power & ~MUSB_POWER_RESUME);
+
+ /* Give the core 1 ms to clear MUSB_POWER_RESUME */
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(1));
+ return;
}
power &= 0xf0;
@@ -180,7 +191,8 @@ void musb_port_reset(struct musb *musb, bool do_reset)
musb->port1_status |= USB_PORT_STAT_RESET;
musb->port1_status &= ~USB_PORT_STAT_ENABLE;
- schedule_delayed_work(&musb->deassert_reset_work, 50);
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(50));
} else {
dev_dbg(musb->controller, "root port reset stopped\n");
musb_writeb(mbase, MUSB_POWER,
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 2a408cdaf7b2..d341c149a2f9 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -37,7 +37,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/usb/musb-omap.h>
-#include <linux/usb/omap_control_usb.h>
+#include <linux/phy/omap_control_phy.h>
#include <linux/of_platform.h>
#include "musb_core.h"
@@ -659,7 +659,6 @@ static int omap2430_runtime_suspend(struct device *dev)
OTG_INTERFSEL);
omap2430_low_level_exit(musb);
- phy_power_off(musb->phy);
}
return 0;
@@ -674,7 +673,6 @@ static int omap2430_runtime_resume(struct device *dev)
omap2430_low_level_init(musb);
musb_writel(musb->mregs, OTG_INTERFSEL,
musb->context.otg_interfsel);
- phy_power_on(musb->phy);
}
return 0;
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 7d1451d5bbea..416e0c8cf6ff 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -75,27 +75,6 @@ config NOP_USB_XCEIV
built-in with usb ip or which are autonomous and doesn't require any
phy programming such as ISP1x04 etc.
-config OMAP_CONTROL_USB
- tristate "OMAP CONTROL USB Driver"
- depends on ARCH_OMAP2PLUS || COMPILE_TEST
- help
- Enable this to add support for the USB part present in the control
- module. This driver has API to power on the USB2 PHY and to write to
- the mailbox. The mailbox is present only in omap4 and the register to
- power on the USB2 PHY is present in OMAP4 and OMAP5. OMAP5 has an
- additional register to power on USB3 PHY.
-
-config OMAP_USB3
- tristate "OMAP USB3 PHY Driver"
- depends on ARCH_OMAP2PLUS || COMPILE_TEST
- select OMAP_CONTROL_USB
- select USB_PHY
- help
- Enable this to support the USB3 PHY that is part of SOC. This
- driver takes care of all the PHY functionality apart from comparator.
- This driver interacts with the "OMAP Control USB Driver" to power
- on/off the PHY.
-
config AM335X_CONTROL_USB
tristate
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index be58adae3496..f8fa719a31b9 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -13,11 +13,9 @@ obj-$(CONFIG_ISP1301_OMAP) += phy-isp1301-omap.o
obj-$(CONFIG_MV_U3D_PHY) += phy-mv-u3d-usb.o
obj-$(CONFIG_NOP_USB_XCEIV) += phy-generic.o
obj-$(CONFIG_TAHVO_USB) += phy-tahvo.o
-obj-$(CONFIG_OMAP_CONTROL_USB) += phy-omap-control.o
obj-$(CONFIG_AM335X_CONTROL_USB) += phy-am335x-control.o
obj-$(CONFIG_AM335X_PHY_USB) += phy-am335x.o
obj-$(CONFIG_OMAP_OTG) += phy-omap-otg.o
-obj-$(CONFIG_OMAP_USB3) += phy-omap-usb3.o
obj-$(CONFIG_SAMSUNG_USBPHY) += phy-samsung-usb.o
obj-$(CONFIG_SAMSUNG_USB2PHY) += phy-samsung-usb2.o
obj-$(CONFIG_SAMSUNG_USB3PHY) += phy-samsung-usb3.o
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index 7aa314ef4a8a..c47e5a6edde2 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -317,10 +317,12 @@ int otg_statemachine(struct otg_fsm *fsm)
otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
break;
case OTG_STATE_A_HOST:
- if ((!fsm->a_bus_req || fsm->a_suspend_req_inf) &&
+ if (fsm->id || fsm->a_bus_drop)
+ otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
+ else if ((!fsm->a_bus_req || fsm->a_suspend_req_inf) &&
fsm->otg->host->b_hnp_enable)
otg_set_state(fsm, OTG_STATE_A_SUSPEND);
- else if (fsm->id || !fsm->b_conn || fsm->a_bus_drop)
+ else if (!fsm->b_conn)
otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
else if (!fsm->a_vbus_vld)
otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
@@ -346,8 +348,7 @@ int otg_statemachine(struct otg_fsm *fsm)
otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
break;
case OTG_STATE_A_WAIT_VFALL:
- if (fsm->a_wait_vfall_tmout || fsm->id || fsm->a_bus_req ||
- (!fsm->a_sess_vld && !fsm->b_conn))
+ if (fsm->a_wait_vfall_tmout)
otg_set_state(fsm, OTG_STATE_A_IDLE);
break;
case OTG_STATE_A_VBUS_ERR:
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 37752832b770..5b37b81f2ef6 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -40,8 +40,6 @@
#include <linux/usb/msm_hsusb_hw.h>
#include <linux/regulator/consumer.h>
-#include <mach/clk.h>
-
#define MSM_USB_BASE (motg->regs)
#define DRIVER_NAME "msm_otg"
@@ -161,32 +159,6 @@ put_3p3:
return rc;
}
-#ifdef CONFIG_PM_SLEEP
-#define USB_PHY_SUSP_DIG_VOL 500000
-static int msm_hsusb_config_vddcx(int high)
-{
- int max_vol = USB_PHY_VDD_DIG_VOL_MAX;
- int min_vol;
- int ret;
-
- if (high)
- min_vol = USB_PHY_VDD_DIG_VOL_MIN;
- else
- min_vol = USB_PHY_SUSP_DIG_VOL;
-
- ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
- if (ret) {
- pr_err("%s: unable to set the voltage for regulator "
- "HSUSB_VDDCX\n", __func__);
- return ret;
- }
-
- pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
-
- return ret;
-}
-#endif
-
static int msm_hsusb_ldo_set_mode(int on)
{
int ret = 0;
@@ -308,33 +280,30 @@ static void ulpi_init(struct msm_otg *motg)
static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert)
{
- int ret;
+ int ret = 0;
+
+ if (!motg->pdata->link_clk_reset)
+ return ret;
+
+ ret = motg->pdata->link_clk_reset(motg->clk, assert);
+ if (ret)
+ dev_err(motg->phy.dev, "usb link clk reset %s failed\n",
+ assert ? "assert" : "deassert");
- if (assert) {
- ret = clk_reset(motg->clk, CLK_RESET_ASSERT);
- if (ret)
- dev_err(motg->phy.dev, "usb hs_clk assert failed\n");
- } else {
- ret = clk_reset(motg->clk, CLK_RESET_DEASSERT);
- if (ret)
- dev_err(motg->phy.dev, "usb hs_clk deassert failed\n");
- }
return ret;
}
static int msm_otg_phy_clk_reset(struct msm_otg *motg)
{
- int ret;
+ int ret = 0;
- ret = clk_reset(motg->phy_reset_clk, CLK_RESET_ASSERT);
- if (ret) {
- dev_err(motg->phy.dev, "usb phy clk assert failed\n");
+ if (!motg->pdata->phy_clk_reset)
return ret;
- }
- usleep_range(10000, 12000);
- ret = clk_reset(motg->phy_reset_clk, CLK_RESET_DEASSERT);
+
+ ret = motg->pdata->phy_clk_reset(motg->phy_reset_clk);
if (ret)
- dev_err(motg->phy.dev, "usb phy clk deassert failed\n");
+ dev_err(motg->phy.dev, "usb phy clk reset failed\n");
+
return ret;
}
@@ -445,7 +414,32 @@ static int msm_otg_reset(struct usb_phy *phy)
#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
+
+#define USB_PHY_SUSP_DIG_VOL 500000
+static int msm_hsusb_config_vddcx(int high)
+{
+ int max_vol = USB_PHY_VDD_DIG_VOL_MAX;
+ int min_vol;
+ int ret;
+
+ if (high)
+ min_vol = USB_PHY_VDD_DIG_VOL_MIN;
+ else
+ min_vol = USB_PHY_SUSP_DIG_VOL;
+
+ ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
+ if (ret) {
+ pr_err("%s: unable to set the voltage for regulator "
+ "HSUSB_VDDCX\n", __func__);
+ return ret;
+ }
+
+ pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
+
+ return ret;
+}
+
static int msm_otg_suspend(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
@@ -1434,7 +1428,8 @@ static int __init msm_otg_probe(struct platform_device *pdev)
motg->phy.otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL);
if (!motg->phy.otg) {
dev_err(&pdev->dev, "unable to allocate msm_otg\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_motg;
}
motg->pdata = dev_get_platdata(&pdev->dev);
@@ -1738,22 +1733,18 @@ static int msm_otg_pm_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static const struct dev_pm_ops msm_otg_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
msm_otg_runtime_idle)
};
-#endif
static struct platform_driver msm_otg_driver = {
.remove = msm_otg_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &msm_otg_dev_pm_ops,
-#endif
},
};
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index b42897b6474c..c42bdf0c4a1f 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012-2013 Freescale Semiconductor, Inc.
* Copyright (C) 2012 Marek Vasut <marex@denx.de>
* on behalf of DENX Software Engineering GmbH
*
@@ -20,6 +20,9 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#define DRIVER_NAME "mxs_phy"
@@ -28,18 +31,134 @@
#define HW_USBPHY_CTRL_SET 0x34
#define HW_USBPHY_CTRL_CLR 0x38
+#define HW_USBPHY_DEBUG_SET 0x54
+#define HW_USBPHY_DEBUG_CLR 0x58
+
+#define HW_USBPHY_IP 0x90
+#define HW_USBPHY_IP_SET 0x94
+#define HW_USBPHY_IP_CLR 0x98
+
#define BM_USBPHY_CTRL_SFTRST BIT(31)
#define BM_USBPHY_CTRL_CLKGATE BIT(30)
+#define BM_USBPHY_CTRL_ENAUTOSET_USBCLKS BIT(26)
+#define BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE BIT(25)
+#define BM_USBPHY_CTRL_ENVBUSCHG_WKUP BIT(23)
+#define BM_USBPHY_CTRL_ENIDCHG_WKUP BIT(22)
+#define BM_USBPHY_CTRL_ENDPDMCHG_WKUP BIT(21)
+#define BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD BIT(20)
+#define BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE BIT(19)
+#define BM_USBPHY_CTRL_ENAUTO_PWRON_PLL BIT(18)
#define BM_USBPHY_CTRL_ENUTMILEVEL3 BIT(15)
#define BM_USBPHY_CTRL_ENUTMILEVEL2 BIT(14)
#define BM_USBPHY_CTRL_ENHOSTDISCONDETECT BIT(1)
+#define BM_USBPHY_IP_FIX (BIT(17) | BIT(18))
+
+#define BM_USBPHY_DEBUG_CLKGATE BIT(30)
+
+/* Anatop Registers */
+#define ANADIG_ANA_MISC0 0x150
+#define ANADIG_ANA_MISC0_SET 0x154
+#define ANADIG_ANA_MISC0_CLR 0x158
+
+#define ANADIG_USB1_VBUS_DET_STAT 0x1c0
+#define ANADIG_USB2_VBUS_DET_STAT 0x220
+
+#define ANADIG_USB1_LOOPBACK_SET 0x1e4
+#define ANADIG_USB1_LOOPBACK_CLR 0x1e8
+#define ANADIG_USB2_LOOPBACK_SET 0x244
+#define ANADIG_USB2_LOOPBACK_CLR 0x248
+
+#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG BIT(12)
+#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL BIT(11)
+
+#define BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID BIT(3)
+#define BM_ANADIG_USB2_VBUS_DET_STAT_VBUS_VALID BIT(3)
+
+#define BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 BIT(2)
+#define BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN BIT(5)
+#define BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 BIT(2)
+#define BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN BIT(5)
+
+#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
+
+/* Do disconnection between PHY and controller without vbus */
+#define MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS BIT(0)
+
+/*
+ * The PHY will be in messy if there is a wakeup after putting
+ * bus to suspend (set portsc.suspendM) but before setting PHY to low
+ * power mode (set portsc.phcd).
+ */
+#define MXS_PHY_ABNORMAL_IN_SUSPEND BIT(1)
+
+/*
+ * The SOF sends too fast after resuming, it will cause disconnection
+ * between host and high speed device.
+ */
+#define MXS_PHY_SENDING_SOF_TOO_FAST BIT(2)
+
+/*
+ * IC has bug fixes logic, they include
+ * MXS_PHY_ABNORMAL_IN_SUSPEND and MXS_PHY_SENDING_SOF_TOO_FAST
+ * which are described at above flags, the RTL will handle it
+ * according to different versions.
+ */
+#define MXS_PHY_NEED_IP_FIX BIT(3)
+
+struct mxs_phy_data {
+ unsigned int flags;
+};
+
+static const struct mxs_phy_data imx23_phy_data = {
+ .flags = MXS_PHY_ABNORMAL_IN_SUSPEND | MXS_PHY_SENDING_SOF_TOO_FAST,
+};
+
+static const struct mxs_phy_data imx6q_phy_data = {
+ .flags = MXS_PHY_SENDING_SOF_TOO_FAST |
+ MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+ MXS_PHY_NEED_IP_FIX,
+};
+
+static const struct mxs_phy_data imx6sl_phy_data = {
+ .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+ MXS_PHY_NEED_IP_FIX,
+};
+
+static const struct of_device_id mxs_phy_dt_ids[] = {
+ { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
+ { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
+ { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
+
struct mxs_phy {
struct usb_phy phy;
struct clk *clk;
+ const struct mxs_phy_data *data;
+ struct regmap *regmap_anatop;
+ int port_id;
};
-#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
+static inline bool is_imx6q_phy(struct mxs_phy *mxs_phy)
+{
+ return mxs_phy->data == &imx6q_phy_data;
+}
+
+static inline bool is_imx6sl_phy(struct mxs_phy *mxs_phy)
+{
+ return mxs_phy->data == &imx6sl_phy_data;
+}
+
+/*
+ * PHY needs some 32K cycles to switch from 32K clock to
+ * bus (such as AHB/AXI, etc) clock.
+ */
+static void mxs_phy_clock_switch_delay(void)
+{
+ usleep_range(300, 400);
+}
static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
{
@@ -53,19 +172,105 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
/* Power up the PHY */
writel(0, base + HW_USBPHY_PWD);
- /* enable FS/LS device */
- writel(BM_USBPHY_CTRL_ENUTMILEVEL2 |
- BM_USBPHY_CTRL_ENUTMILEVEL3,
+ /*
+ * USB PHY Ctrl Setting
+ * - Auto clock/power on
+ * - Enable full/low speed support
+ */
+ writel(BM_USBPHY_CTRL_ENAUTOSET_USBCLKS |
+ BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE |
+ BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD |
+ BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE |
+ BM_USBPHY_CTRL_ENAUTO_PWRON_PLL |
+ BM_USBPHY_CTRL_ENUTMILEVEL2 |
+ BM_USBPHY_CTRL_ENUTMILEVEL3,
base + HW_USBPHY_CTRL_SET);
+ if (mxs_phy->data->flags & MXS_PHY_NEED_IP_FIX)
+ writel(BM_USBPHY_IP_FIX, base + HW_USBPHY_IP_SET);
+
return 0;
}
+/* Return true if the vbus is there */
+static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
+{
+ unsigned int vbus_value;
+
+ if (mxs_phy->port_id == 0)
+ regmap_read(mxs_phy->regmap_anatop,
+ ANADIG_USB1_VBUS_DET_STAT,
+ &vbus_value);
+ else if (mxs_phy->port_id == 1)
+ regmap_read(mxs_phy->regmap_anatop,
+ ANADIG_USB2_VBUS_DET_STAT,
+ &vbus_value);
+
+ if (vbus_value & BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID)
+ return true;
+ else
+ return false;
+}
+
+static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
+{
+ void __iomem *base = mxs_phy->phy.io_priv;
+ u32 reg;
+
+ if (disconnect)
+ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
+ base + HW_USBPHY_DEBUG_CLR);
+
+ if (mxs_phy->port_id == 0) {
+ reg = disconnect ? ANADIG_USB1_LOOPBACK_SET
+ : ANADIG_USB1_LOOPBACK_CLR;
+ regmap_write(mxs_phy->regmap_anatop, reg,
+ BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 |
+ BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN);
+ } else if (mxs_phy->port_id == 1) {
+ reg = disconnect ? ANADIG_USB2_LOOPBACK_SET
+ : ANADIG_USB2_LOOPBACK_CLR;
+ regmap_write(mxs_phy->regmap_anatop, reg,
+ BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 |
+ BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN);
+ }
+
+ if (!disconnect)
+ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
+ base + HW_USBPHY_DEBUG_SET);
+
+ /* Delay some time, and let Linestate be SE0 for controller */
+ if (disconnect)
+ usleep_range(500, 1000);
+}
+
+static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
+{
+ bool vbus_is_on = false;
+
+ /* If the SoCs don't need to disconnect line without vbus, quit */
+ if (!(mxs_phy->data->flags & MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS))
+ return;
+
+ /* If the SoCs don't have anatop, quit */
+ if (!mxs_phy->regmap_anatop)
+ return;
+
+ vbus_is_on = mxs_phy_get_vbus_status(mxs_phy);
+
+ if (on && !vbus_is_on)
+ __mxs_phy_disconnect_line(mxs_phy, true);
+ else
+ __mxs_phy_disconnect_line(mxs_phy, false);
+
+}
+
static int mxs_phy_init(struct usb_phy *phy)
{
int ret;
struct mxs_phy *mxs_phy = to_mxs_phy(phy);
+ mxs_phy_clock_switch_delay();
ret = clk_prepare_enable(mxs_phy->clk);
if (ret)
return ret;
@@ -94,6 +299,7 @@ static int mxs_phy_suspend(struct usb_phy *x, int suspend)
x->io_priv + HW_USBPHY_CTRL_SET);
clk_disable_unprepare(mxs_phy->clk);
} else {
+ mxs_phy_clock_switch_delay();
ret = clk_prepare_enable(mxs_phy->clk);
if (ret)
return ret;
@@ -105,11 +311,28 @@ static int mxs_phy_suspend(struct usb_phy *x, int suspend)
return 0;
}
+static int mxs_phy_set_wakeup(struct usb_phy *x, bool enabled)
+{
+ struct mxs_phy *mxs_phy = to_mxs_phy(x);
+ u32 value = BM_USBPHY_CTRL_ENVBUSCHG_WKUP |
+ BM_USBPHY_CTRL_ENDPDMCHG_WKUP |
+ BM_USBPHY_CTRL_ENIDCHG_WKUP;
+ if (enabled) {
+ mxs_phy_disconnect_line(mxs_phy, true);
+ writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_SET);
+ } else {
+ writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_CLR);
+ mxs_phy_disconnect_line(mxs_phy, false);
+ }
+
+ return 0;
+}
+
static int mxs_phy_on_connect(struct usb_phy *phy,
enum usb_device_speed speed)
{
- dev_dbg(phy->dev, "%s speed device has connected\n",
- (speed == USB_SPEED_HIGH) ? "high" : "non-high");
+ dev_dbg(phy->dev, "%s device has connected\n",
+ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
if (speed == USB_SPEED_HIGH)
writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
@@ -121,8 +344,8 @@ static int mxs_phy_on_connect(struct usb_phy *phy,
static int mxs_phy_on_disconnect(struct usb_phy *phy,
enum usb_device_speed speed)
{
- dev_dbg(phy->dev, "%s speed device has disconnected\n",
- (speed == USB_SPEED_HIGH) ? "high" : "non-high");
+ dev_dbg(phy->dev, "%s device has disconnected\n",
+ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
if (speed == USB_SPEED_HIGH)
writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
@@ -138,6 +361,9 @@ static int mxs_phy_probe(struct platform_device *pdev)
struct clk *clk;
struct mxs_phy *mxs_phy;
int ret;
+ const struct of_device_id *of_id =
+ of_match_device(mxs_phy_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
@@ -157,6 +383,22 @@ static int mxs_phy_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ /* Some SoCs don't have anatop registers */
+ if (of_get_property(np, "fsl,anatop", NULL)) {
+ mxs_phy->regmap_anatop = syscon_regmap_lookup_by_phandle
+ (np, "fsl,anatop");
+ if (IS_ERR(mxs_phy->regmap_anatop)) {
+ dev_dbg(&pdev->dev,
+ "failed to find regmap for anatop\n");
+ return PTR_ERR(mxs_phy->regmap_anatop);
+ }
+ }
+
+ ret = of_alias_get_id(np, "usbphy");
+ if (ret < 0)
+ dev_dbg(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ mxs_phy->port_id = ret;
+
mxs_phy->phy.io_priv = base;
mxs_phy->phy.dev = &pdev->dev;
mxs_phy->phy.label = DRIVER_NAME;
@@ -166,11 +408,15 @@ static int mxs_phy_probe(struct platform_device *pdev)
mxs_phy->phy.notify_connect = mxs_phy_on_connect;
mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect;
mxs_phy->phy.type = USB_PHY_TYPE_USB2;
+ mxs_phy->phy.set_wakeup = mxs_phy_set_wakeup;
mxs_phy->clk = clk;
+ mxs_phy->data = of_id->data;
platform_set_drvdata(pdev, mxs_phy);
+ device_set_wakeup_capable(&pdev->dev, true);
+
ret = usb_add_phy_dev(&mxs_phy->phy);
if (ret)
return ret;
@@ -187,11 +433,46 @@ static int mxs_phy_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mxs_phy_dt_ids[] = {
- { .compatible = "fsl,imx23-usbphy", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
+#ifdef CONFIG_PM_SLEEP
+static void mxs_phy_enable_ldo_in_suspend(struct mxs_phy *mxs_phy, bool on)
+{
+ unsigned int reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR;
+
+ /* If the SoCs don't have anatop, quit */
+ if (!mxs_phy->regmap_anatop)
+ return;
+
+ if (is_imx6q_phy(mxs_phy))
+ regmap_write(mxs_phy->regmap_anatop, reg,
+ BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG);
+ else if (is_imx6sl_phy(mxs_phy))
+ regmap_write(mxs_phy->regmap_anatop,
+ reg, BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL);
+}
+
+static int mxs_phy_system_suspend(struct device *dev)
+{
+ struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ mxs_phy_enable_ldo_in_suspend(mxs_phy, true);
+
+ return 0;
+}
+
+static int mxs_phy_system_resume(struct device *dev)
+{
+ struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ mxs_phy_enable_ldo_in_suspend(mxs_phy, false);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(mxs_phy_pm, mxs_phy_system_suspend,
+ mxs_phy_system_resume);
static struct platform_driver mxs_phy_driver = {
.probe = mxs_phy_probe,
@@ -200,6 +481,7 @@ static struct platform_driver mxs_phy_driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = mxs_phy_dt_ids,
+ .pm = &mxs_phy_pm,
},
};
diff --git a/drivers/usb/phy/phy-omap-usb3.c b/drivers/usb/phy/phy-omap-usb3.c
deleted file mode 100644
index 0c6ba29bdddd..000000000000
--- a/drivers/usb/phy/phy-omap-usb3.c
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * omap-usb3 - USB PHY, talking to dwc3 controller in OMAP.
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/usb/omap_usb.h>
-#include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/pm_runtime.h>
-#include <linux/delay.h>
-#include <linux/usb/omap_control_usb.h>
-#include <linux/of_platform.h>
-
-#define PLL_STATUS 0x00000004
-#define PLL_GO 0x00000008
-#define PLL_CONFIGURATION1 0x0000000C
-#define PLL_CONFIGURATION2 0x00000010
-#define PLL_CONFIGURATION3 0x00000014
-#define PLL_CONFIGURATION4 0x00000020
-
-#define PLL_REGM_MASK 0x001FFE00
-#define PLL_REGM_SHIFT 0x9
-#define PLL_REGM_F_MASK 0x0003FFFF
-#define PLL_REGM_F_SHIFT 0x0
-#define PLL_REGN_MASK 0x000001FE
-#define PLL_REGN_SHIFT 0x1
-#define PLL_SELFREQDCO_MASK 0x0000000E
-#define PLL_SELFREQDCO_SHIFT 0x1
-#define PLL_SD_MASK 0x0003FC00
-#define PLL_SD_SHIFT 0x9
-#define SET_PLL_GO 0x1
-#define PLL_TICOPWDN 0x10000
-#define PLL_LOCK 0x2
-#define PLL_IDLE 0x1
-
-/*
- * This is an Empirical value that works, need to confirm the actual
- * value required for the USB3PHY_PLL_CONFIGURATION2.PLL_IDLE status
- * to be correctly reflected in the USB3PHY_PLL_STATUS register.
- */
-# define PLL_IDLE_TIME 100;
-
-struct usb_dpll_map {
- unsigned long rate;
- struct usb_dpll_params params;
-};
-
-static struct usb_dpll_map dpll_map[] = {
- {12000000, {1250, 5, 4, 20, 0} }, /* 12 MHz */
- {16800000, {3125, 20, 4, 20, 0} }, /* 16.8 MHz */
- {19200000, {1172, 8, 4, 20, 65537} }, /* 19.2 MHz */
- {20000000, {1000, 7, 4, 10, 0} }, /* 20 MHz */
- {26000000, {1250, 12, 4, 20, 0} }, /* 26 MHz */
- {38400000, {3125, 47, 4, 20, 92843} }, /* 38.4 MHz */
-};
-
-static struct usb_dpll_params *omap_usb3_get_dpll_params(unsigned long rate)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dpll_map); i++) {
- if (rate == dpll_map[i].rate)
- return &dpll_map[i].params;
- }
-
- return NULL;
-}
-
-static int omap_usb3_suspend(struct usb_phy *x, int suspend)
-{
- struct omap_usb *phy = phy_to_omapusb(x);
- int val;
- int timeout = PLL_IDLE_TIME;
-
- if (suspend && !phy->is_suspended) {
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
- val |= PLL_IDLE;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
-
- do {
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_STATUS);
- if (val & PLL_TICOPWDN)
- break;
- udelay(1);
- } while (--timeout);
-
- omap_control_usb_phy_power(phy->control_dev, 0);
-
- phy->is_suspended = 1;
- } else if (!suspend && phy->is_suspended) {
- phy->is_suspended = 0;
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
- val &= ~PLL_IDLE;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
-
- do {
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_STATUS);
- if (!(val & PLL_TICOPWDN))
- break;
- udelay(1);
- } while (--timeout);
- }
-
- return 0;
-}
-
-static void omap_usb_dpll_relock(struct omap_usb *phy)
-{
- u32 val;
- unsigned long timeout;
-
- omap_usb_writel(phy->pll_ctrl_base, PLL_GO, SET_PLL_GO);
-
- timeout = jiffies + msecs_to_jiffies(20);
- do {
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_STATUS);
- if (val & PLL_LOCK)
- break;
- } while (!WARN_ON(time_after(jiffies, timeout)));
-}
-
-static int omap_usb_dpll_lock(struct omap_usb *phy)
-{
- u32 val;
- unsigned long rate;
- struct usb_dpll_params *dpll_params;
-
- rate = clk_get_rate(phy->sys_clk);
- dpll_params = omap_usb3_get_dpll_params(rate);
- if (!dpll_params) {
- dev_err(phy->dev,
- "No DPLL configuration for %lu Hz SYS CLK\n", rate);
- return -EINVAL;
- }
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
- val &= ~PLL_REGN_MASK;
- val |= dpll_params->n << PLL_REGN_SHIFT;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
- val &= ~PLL_SELFREQDCO_MASK;
- val |= dpll_params->freq << PLL_SELFREQDCO_SHIFT;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
- val &= ~PLL_REGM_MASK;
- val |= dpll_params->m << PLL_REGM_SHIFT;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION4);
- val &= ~PLL_REGM_F_MASK;
- val |= dpll_params->mf << PLL_REGM_F_SHIFT;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION4, val);
-
- val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION3);
- val &= ~PLL_SD_MASK;
- val |= dpll_params->sd << PLL_SD_SHIFT;
- omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION3, val);
-
- omap_usb_dpll_relock(phy);
-
- return 0;
-}
-
-static int omap_usb3_init(struct usb_phy *x)
-{
- struct omap_usb *phy = phy_to_omapusb(x);
- int ret;
-
- ret = omap_usb_dpll_lock(phy);
- if (ret)
- return ret;
-
- omap_control_usb_phy_power(phy->control_dev, 1);
-
- return 0;
-}
-
-static int omap_usb3_probe(struct platform_device *pdev)
-{
- struct omap_usb *phy;
- struct resource *res;
- struct device_node *node = pdev->dev.of_node;
- struct device_node *control_node;
- struct platform_device *control_pdev;
-
- if (!node)
- return -EINVAL;
-
- phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
- if (!phy) {
- dev_err(&pdev->dev, "unable to alloc mem for OMAP USB3 PHY\n");
- return -ENOMEM;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll_ctrl");
- phy->pll_ctrl_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(phy->pll_ctrl_base))
- return PTR_ERR(phy->pll_ctrl_base);
-
- phy->dev = &pdev->dev;
-
- phy->phy.dev = phy->dev;
- phy->phy.label = "omap-usb3";
- phy->phy.init = omap_usb3_init;
- phy->phy.set_suspend = omap_usb3_suspend;
- phy->phy.type = USB_PHY_TYPE_USB3;
-
- phy->is_suspended = 1;
- phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
- if (IS_ERR(phy->wkupclk)) {
- dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
- return PTR_ERR(phy->wkupclk);
- }
- clk_prepare(phy->wkupclk);
-
- phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m");
- if (IS_ERR(phy->optclk)) {
- dev_err(&pdev->dev, "unable to get usb_otg_ss_refclk960m\n");
- return PTR_ERR(phy->optclk);
- }
- clk_prepare(phy->optclk);
-
- phy->sys_clk = devm_clk_get(phy->dev, "sys_clkin");
- if (IS_ERR(phy->sys_clk)) {
- pr_err("%s: unable to get sys_clkin\n", __func__);
- return -EINVAL;
- }
-
- control_node = of_parse_phandle(node, "ctrl-module", 0);
- if (!control_node) {
- dev_err(&pdev->dev, "Failed to get control device phandle\n");
- return -EINVAL;
- }
- control_pdev = of_find_device_by_node(control_node);
- if (!control_pdev) {
- dev_err(&pdev->dev, "Failed to get control device\n");
- return -EINVAL;
- }
-
- phy->control_dev = &control_pdev->dev;
-
- omap_control_usb_phy_power(phy->control_dev, 0);
- usb_add_phy_dev(&phy->phy);
-
- platform_set_drvdata(pdev, phy);
-
- pm_runtime_enable(phy->dev);
- pm_runtime_get(&pdev->dev);
-
- return 0;
-}
-
-static int omap_usb3_remove(struct platform_device *pdev)
-{
- struct omap_usb *phy = platform_get_drvdata(pdev);
-
- clk_unprepare(phy->wkupclk);
- clk_unprepare(phy->optclk);
- usb_remove_phy(&phy->phy);
- if (!pm_runtime_suspended(&pdev->dev))
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_RUNTIME
-
-static int omap_usb3_runtime_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_usb *phy = platform_get_drvdata(pdev);
-
- clk_disable(phy->wkupclk);
- clk_disable(phy->optclk);
-
- return 0;
-}
-
-static int omap_usb3_runtime_resume(struct device *dev)
-{
- u32 ret = 0;
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_usb *phy = platform_get_drvdata(pdev);
-
- ret = clk_enable(phy->optclk);
- if (ret) {
- dev_err(phy->dev, "Failed to enable optclk %d\n", ret);
- goto err1;
- }
-
- ret = clk_enable(phy->wkupclk);
- if (ret) {
- dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
- goto err2;
- }
-
- return 0;
-
-err2:
- clk_disable(phy->optclk);
-
-err1:
- return ret;
-}
-
-static const struct dev_pm_ops omap_usb3_pm_ops = {
- SET_RUNTIME_PM_OPS(omap_usb3_runtime_suspend, omap_usb3_runtime_resume,
- NULL)
-};
-
-#define DEV_PM_OPS (&omap_usb3_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id omap_usb3_id_table[] = {
- { .compatible = "ti,omap-usb3" },
- {}
-};
-MODULE_DEVICE_TABLE(of, omap_usb3_id_table);
-#endif
-
-static struct platform_driver omap_usb3_driver = {
- .probe = omap_usb3_probe,
- .remove = omap_usb3_remove,
- .driver = {
- .name = "omap-usb3",
- .owner = THIS_MODULE,
- .pm = DEV_PM_OPS,
- .of_match_table = of_match_ptr(omap_usb3_id_table),
- },
-};
-
-module_platform_driver(omap_usb3_driver);
-
-MODULE_ALIAS("platform: omap_usb3");
-MODULE_AUTHOR("Texas Instruments Inc.");
-MODULE_DESCRIPTION("OMAP USB3 phy driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-rcar-gen2-usb.c b/drivers/usb/phy/phy-rcar-gen2-usb.c
index 551e0a6c0e22..388d89f6b141 100644
--- a/drivers/usb/phy/phy-rcar-gen2-usb.c
+++ b/drivers/usb/phy/phy-rcar-gen2-usb.c
@@ -177,15 +177,15 @@ static int rcar_gen2_usb_phy_probe(struct platform_device *pdev)
struct clk *clk;
int retval;
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
dev_err(dev, "No platform data\n");
return -EINVAL;
}
- clk = devm_clk_get(&pdev->dev, "usbhs");
+ clk = devm_clk_get(dev, "usbhs");
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "Can't get the clock\n");
+ dev_err(dev, "Can't get the clock\n");
return PTR_ERR(clk);
}
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 214172b68d5d..04778cf80d60 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -27,7 +27,7 @@
#include <linux/io.h>
#include <linux/usb/musb-omap.h>
#include <linux/usb/phy_companion.h>
-#include <linux/usb/omap_usb.h>
+#include <linux/phy/omap_usb.h>
#include <linux/i2c/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index 217339dd7a90..17ea3f271bd8 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -47,6 +47,8 @@ struct ulpi_info {
static struct ulpi_info ulpi_ids[] = {
ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
+ ULPI_INFO(ULPI_ID(0x0424, 0x0007), "SMSC USB3320"),
+ ULPI_INFO(ULPI_ID(0x0451, 0x1507), "TI TUSB1210"),
};
static int ulpi_set_otg_flags(struct usb_phy *phy)
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index e6f61e4361df..8afa813d690b 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -130,7 +130,7 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type)
phy = __usb_find_phy(&phy_list, type);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- pr_err("unable to find transceiver of type %s\n",
+ pr_debug("PHY: unable to find transceiver of type %s\n",
usb_phy_type_string(type));
goto err0;
}
@@ -228,7 +228,7 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- pr_err("unable to find transceiver\n");
+ dev_dbg(dev, "unable to find transceiver\n");
goto err0;
}
@@ -424,10 +424,8 @@ int usb_bind_phy(const char *dev_name, u8 index,
unsigned long flags;
phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL);
- if (!phy_bind) {
- pr_err("phy_bind(): No memory for phy_bind");
+ if (!phy_bind)
return -ENOMEM;
- }
phy_bind->dev_name = dev_name;
phy_bind->phy_dev_name = phy_dev_name;
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 82371f61f23d..2d72aa3564a3 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -323,11 +323,11 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
if (r)
goto out;
- dev_dbg(&port->dev, "%s - submitting interrupt urb", __func__);
+ dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__);
r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (r) {
- dev_err(&port->dev, "%s - failed submitting interrupt urb,"
- " error %d\n", __func__, r);
+ dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
+ __func__, r);
ch341_close(port);
goto out;
}
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 0ac3b3b3236c..2916dea3ede8 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -220,7 +220,7 @@ static int cyberjack_write(struct tty_struct *tty,
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev,
- "%s - failed submitting write urb, error %d",
+ "%s - failed submitting write urb, error %d\n",
__func__, result);
/* Throw away data. No better idea what to do with it. */
priv->wrfilled = 0;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index bccb1223143a..01bf53392819 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -279,7 +279,7 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
* the generic firmware, but are not used with
* NMEA and SiRF protocols */
dev_dbg(&port->dev,
- "%s - failed setting baud rate, unsupported speed of %d on Earthmate GPS",
+ "%s - failed setting baud rate, unsupported speed of %d on Earthmate GPS\n",
__func__, new_rate);
return -1;
}
@@ -1224,7 +1224,6 @@ static void cypress_write_int_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct cypress_private *priv = usb_get_serial_port_data(port);
struct device *dev = &urb->dev->dev;
- int result;
int status = urb->status;
switch (status) {
@@ -1239,21 +1238,9 @@ static void cypress_write_int_callback(struct urb *urb)
__func__, status);
priv->write_urb_in_use = 0;
return;
- case -EPIPE: /* no break needed; clear halt and resubmit */
- if (!priv->comm_is_ok)
- break;
- usb_clear_halt(port->serial->dev, 0x02);
- /* error in the urb, so we have to resubmit it */
- dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
- __func__, status);
- port->interrupt_out_urb->transfer_buffer_length = 1;
- result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
- if (!result)
- return;
- dev_err(dev, "%s - failed resubmitting write urb, error %d\n",
- __func__, result);
- cypress_set_dead(port);
- break;
+ case -EPIPE:
+ /* Cannot call usb_clear_halt while in_interrupt */
+ /* FALLTHROUGH */
default:
dev_err(dev, "%s - unexpected nonzero write status received: %d\n",
__func__, status);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ce0d7b0db012..44ab12986805 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -152,6 +152,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -191,6 +192,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
@@ -904,6 +907,8 @@ static const struct usb_device_id id_table_combined[] = {
/* Crucible Devices */
{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
+ /* Cressi Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a7019d1e3058..e599fbfcde5f 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -50,6 +50,7 @@
#define TI_XDS100V2_PID 0xa6d0
#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+#define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */
/* US Interface Navigator (http://www.usinterface.com/) */
#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */
@@ -363,6 +364,12 @@
/* Sprog II (Andrew Crosland's SprogII DCC interface) */
#define FTDI_SPROG_II 0xF0C8
+/*
+ * Two of the Tagsys RFID Readers
+ */
+#define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/
+#define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/
+
/* an infrared receiver for user access control with IR tags */
#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
@@ -1313,3 +1320,9 @@
* Manufacturer: Smart GSM Team
*/
#define FTDI_Z3X_PID 0x0011
+
+/*
+ * Product: Cressi PC Interface
+ * Manufacturer: Cressi
+ */
+#define FTDI_CRESSI_PID 0x87d0
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index b63ce023f96f..1bd192290b08 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -332,9 +332,9 @@ void usb_serial_generic_process_read_urb(struct urb *urb)
* stuff like 3G modems, so shortcircuit it in the 99.9999999% of
* cases where the USB serial is not a console anyway.
*/
- if (!port->port.console || !port->sysrq)
+ if (!port->port.console || !port->sysrq) {
tty_insert_flip_string(&port->port, ch, urb->actual_length);
- else {
+ } else {
for (i = 0; i < urb->actual_length; i++, ch++) {
if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(&port->port, *ch, TTY_NORMAL);
@@ -359,24 +359,38 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
dev_dbg(&port->dev, "%s - urb %d, len %d\n", __func__, i,
urb->actual_length);
-
- if (urb->status) {
- dev_dbg(&port->dev, "%s - non-zero urb status: %d\n",
- __func__, urb->status);
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&port->dev, "%s - urb stopped: %d\n",
+ __func__, urb->status);
return;
+ case -EPIPE:
+ dev_err(&port->dev, "%s - urb stopped: %d\n",
+ __func__, urb->status);
+ return;
+ default:
+ dev_err(&port->dev, "%s - nonzero urb status: %d\n",
+ __func__, urb->status);
+ goto resubmit;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
port->serial->type->process_read_urb(urb);
+resubmit:
/* Throttle the device if requested by tty */
spin_lock_irqsave(&port->lock, flags);
port->throttled = port->throttle_req;
if (!port->throttled) {
spin_unlock_irqrestore(&port->lock, flags);
usb_serial_generic_submit_read_urb(port, i, GFP_ATOMIC);
- } else
+ } else {
spin_unlock_irqrestore(&port->lock, flags);
+ }
}
EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback);
@@ -384,29 +398,38 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
{
unsigned long flags;
struct usb_serial_port *port = urb->context;
- int status = urb->status;
int i;
- for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
if (port->write_urbs[i] == urb)
break;
-
+ }
spin_lock_irqsave(&port->lock, flags);
port->tx_bytes -= urb->transfer_buffer_length;
set_bit(i, &port->write_urbs_free);
spin_unlock_irqrestore(&port->lock, flags);
- if (status) {
- dev_dbg(&port->dev, "%s - non-zero urb status: %d\n",
- __func__, status);
-
- spin_lock_irqsave(&port->lock, flags);
- kfifo_reset_out(&port->write_fifo);
- spin_unlock_irqrestore(&port->lock, flags);
- } else {
- usb_serial_generic_write_start(port, GFP_ATOMIC);
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&port->dev, "%s - urb stopped: %d\n",
+ __func__, urb->status);
+ return;
+ case -EPIPE:
+ dev_err_console(port, "%s - urb stopped: %d\n",
+ __func__, urb->status);
+ return;
+ default:
+ dev_err_console(port, "%s - nonzero urb status: %d\n",
+ __func__, urb->status);
+ goto resubmit;
}
+resubmit:
+ usb_serial_generic_write_start(port, GFP_ATOMIC);
usb_serial_port_softint(port);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback);
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index d00dae17d520..5ad4a0fb4b26 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1151,7 +1151,7 @@ static ssize_t vcc_mode_store(struct device *dev,
goto fail_store_vcc_mode;
}
- dev_dbg(dev, "%s: setting vcc_mode = %ld", __func__, v);
+ dev_dbg(dev, "%s: setting vcc_mode = %ld\n", __func__, v);
if ((v != 3) && (v != 5)) {
dev_err(dev, "%s - vcc_mode %ld is invalid\n", __func__, v);
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 265c6776b081..d3acaead5a81 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -397,17 +397,6 @@ static void usa26_instat_callback(struct urb *urb)
msg = (struct keyspan_usa26_portStatusMessage *)data;
-#if 0
- dev_dbg(&urb->dev->dev,
- "%s - port status: port %d cts %d dcd %d dsr %d ri %d toff %d txoff %d rxen %d cr %d",
- __func__, msg->port, msg->hskia_cts, msg->gpia_dcd, msg->dsr,
- msg->ri, msg->_txOff, msg->_txXoff, msg->rxEnabled,
- msg->controlResponse);
-#endif
-
- /* Now do something useful with the data */
-
-
/* Check port number from message and retrieve private data */
if (msg->port >= serial->num_ports) {
dev_dbg(&urb->dev->dev, "%s - Unexpected port number %d\n", __func__, msg->port);
@@ -523,9 +512,6 @@ static void usa28_instat_callback(struct urb *urb)
goto exit;
}
- /*dev_dbg(&urb->dev->dev, "%s %12ph", __func__, data);*/
-
- /* Now do something useful with the data */
msg = (struct keyspan_usa28_portStatusMessage *)data;
/* Check port number from message and retrieve private data */
@@ -605,9 +591,6 @@ static void usa49_instat_callback(struct urb *urb)
goto exit;
}
- /*dev_dbg(&urb->dev->dev, "%s: %11ph", __func__, data);*/
-
- /* Now do something useful with the data */
msg = (struct keyspan_usa49_portStatusMessage *)data;
/* Check port number from message and retrieve private data */
@@ -1793,12 +1776,6 @@ static int keyspan_usa28_send_setup(struct usb_serial *serial,
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed\n", __func__);
-#if 0
- else {
- dev_dbg(&port->dev, "%s - usb_submit_urb(setup) OK %d bytes\n", __func__,
- this_urb->transfer_buffer_length);
- }
-#endif
return 0;
}
@@ -1976,13 +1953,6 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err);
-#if 0
- else {
- dev_dbg(&port->dev, "%s - usb_submit_urb(%d) OK %d bytes (end %d)\n", __func__,
- outcont_urb, this_urb->transfer_buffer_length,
- usb_pipeendpoint(this_urb->pipe));
- }
-#endif
return 0;
}
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index e972412b614b..742d827f876c 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -189,7 +189,7 @@ exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
- "%s - usb_submit_urb failed with result %d",
+ "%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index c88cc4966b23..d7440b7557af 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -201,7 +201,7 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
else {
status = get_unaligned_le16(status_buf);
- dev_info(&port->serial->dev->dev, "read status %x %x",
+ dev_info(&port->serial->dev->dev, "read status %x %x\n",
status_buf[0], status_buf[1]);
*line_state_p = klsi_105_status2linestate(status);
@@ -464,7 +464,7 @@ static void klsi_105_set_termios(struct tty_struct *tty,
priv->cfg.baudrate = kl5kusb105a_sio_b115200;
break;
default:
- dev_dbg(dev, "KLSI USB->Serial converter: unsupported baudrate request, using default of 9600");
+ dev_dbg(dev, "unsupported baudrate, using 9600\n");
priv->cfg.baudrate = kl5kusb105a_sio_b9600;
baud = 9600;
break;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index d8d164b82dc3..fee242387f55 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -193,7 +193,7 @@ static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port)
KOBIL_TIMEOUT
);
dev_dbg(dev, "%s - Send get_HW_version URB returns: %i\n", __func__, result);
- dev_dbg(dev, "Harware version: %i.%i.%i\n", transfer_buffer[0],
+ dev_dbg(dev, "Hardware version: %i.%i.%i\n", transfer_buffer[0],
transfer_buffer[1], transfer_buffer[2]);
/* get firmware version */
@@ -557,7 +557,8 @@ static int kobil_ioctl(struct tty_struct *tty,
);
dev_dbg(&port->dev,
- "%s - Send reset_all_queues (FLUSH) URB returns: %i", __func__, result);
+ "%s - Send reset_all_queues (FLUSH) URB returns: %i\n",
+ __func__, result);
kfree(transfer_buffer);
return (result < 0) ? -EIO: 0;
default:
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 4eb277225a77..dfd728a263d2 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -209,7 +209,7 @@ static int write_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
index, NULL, 0, MOS_WDR_TIMEOUT);
if (status < 0)
dev_err(&usbdev->dev,
- "mos7720: usb_control_msg() failed: %d", status);
+ "mos7720: usb_control_msg() failed: %d\n", status);
return status;
}
@@ -240,7 +240,7 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
*data = *buf;
else if (status < 0)
dev_err(&usbdev->dev,
- "mos7720: usb_control_msg() failed: %d", status);
+ "mos7720: usb_control_msg() failed: %d\n", status);
kfree(buf);
return status;
@@ -399,7 +399,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
&mos_parport->deferred_urbs);
spin_unlock_irqrestore(&mos_parport->listlock, flags);
tasklet_schedule(&mos_parport->urb_tasklet);
- dev_dbg(&usbdev->dev, "tasklet scheduled");
+ dev_dbg(&usbdev->dev, "tasklet scheduled\n");
return 0;
}
@@ -418,7 +418,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
mutex_unlock(&serial->disc_mutex);
if (ret_val) {
dev_err(&usbdev->dev,
- "%s: submit_urb() failed: %d", __func__, ret_val);
+ "%s: submit_urb() failed: %d\n", __func__, ret_val);
spin_lock_irqsave(&mos_parport->listlock, flags);
list_del(&urbtrack->urblist_entry);
spin_unlock_irqrestore(&mos_parport->listlock, flags);
@@ -656,7 +656,7 @@ static size_t parport_mos7715_write_compat(struct parport *pp,
parport_epilogue(pp);
if (retval) {
dev_err(&mos_parport->serial->dev->dev,
- "mos7720: usb_bulk_msg() failed: %d", retval);
+ "mos7720: usb_bulk_msg() failed: %d\n", retval);
return 0;
}
return actual_len;
@@ -875,7 +875,7 @@ static void mos7715_interrupt_callback(struct urb *urb)
if (!(iir & 0x01)) { /* serial port interrupt pending */
switch (iir & 0x0f) {
case SERIAL_IIR_RLS:
- dev_dbg(dev, "Serial Port: Receiver status error or address bit detected in 9-bit mode\n\n");
+ dev_dbg(dev, "Serial Port: Receiver status error or address bit detected in 9-bit mode\n");
break;
case SERIAL_IIR_CTI:
dev_dbg(dev, "Serial Port: Receiver time out\n");
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index e9d967ff521b..393be562d875 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -522,11 +522,11 @@ static void mos7840_set_led_callback(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* This urb is terminated, clean up */
- dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d",
+ dev_dbg(&urb->dev->dev, "%s - urb shutting down: %d\n",
__func__, urb->status);
break;
default:
- dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d",
+ dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n",
__func__, urb->status);
}
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5c86f57e4afa..68fc9fe65936 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1362,7 +1362,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1525,7 +1526,8 @@ static const struct usb_device_id option_ids[] = {
/* Cinterion */
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c65437cfd4a2..968a40201e5f 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -139,6 +139,9 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 7725ed261ed6..504f5bff79c0 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -372,7 +372,7 @@ static int qt2_open(struct tty_struct *tty, struct usb_serial_port *port)
device_port, data, 2, QT2_USB_TIMEOUT);
if (status < 0) {
- dev_err(&port->dev, "%s - open port failed %i", __func__,
+ dev_err(&port->dev, "%s - open port failed %i\n", __func__,
status);
kfree(data);
return status;
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 4ec04f73c800..ef0dbf0703c5 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -220,9 +220,9 @@ static int spcp8x5_get_msr(struct usb_serial_port *port, u8 *status)
GET_UART_STATUS, GET_UART_STATUS_TYPE,
0, GET_UART_STATUS_MSR, buf, 1, 100);
if (ret < 0)
- dev_err(&port->dev, "failed to get modem status: %d", ret);
+ dev_err(&port->dev, "failed to get modem status: %d\n", ret);
- dev_dbg(&port->dev, "0xc0:0x22:0:6 %d - 0x02%x", ret, *buf);
+ dev_dbg(&port->dev, "0xc0:0x22:0:6 %d - 0x02%x\n", ret, *buf);
*status = *buf;
kfree(buf);
@@ -342,8 +342,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
case 1000000:
buf[0] = 0x0b; break;
default:
- dev_err(&port->dev, "spcp825 driver does not support the "
- "baudrate requested, using default of 9600.\n");
+ dev_err(&port->dev, "unsupported baudrate, using 9600\n");
}
/* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 9fa7dd413e83..8fceec7298e0 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -74,9 +74,7 @@ static void symbol_int_callback(struct urb *urb)
tty_insert_flip_string(&port->port, &data[1], data_length);
tty_flip_buffer_push(&port->port);
} else {
- dev_dbg(&port->dev,
- "Improper amount of data received from the device, "
- "%d bytes", urb->actual_length);
+ dev_dbg(&port->dev, "%s - short packet\n", __func__);
}
exit:
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index ec7cea585663..3dd3ff8c50d3 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -293,7 +293,7 @@ static int ti_startup(struct usb_serial *serial)
int status;
dev_dbg(&dev->dev,
- "%s - product 0x%4X, num configurations %d, configuration value %d",
+ "%s - product 0x%4X, num configurations %d, configuration value %d\n",
__func__, le16_to_cpu(dev->descriptor.idProduct),
dev->descriptor.bNumConfigurations,
dev->actconfig->desc.bConfigurationValue);
@@ -803,7 +803,7 @@ static void ti_set_termios(struct tty_struct *tty,
tty_encode_baud_rate(tty, baud, baud);
dev_dbg(&port->dev,
- "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d",
+ "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n",
__func__, baud, config->wBaudRate, config->wFlags,
config->bDataBits, config->bParity, config->bStopBits,
config->cXon, config->cXoff, config->bUartMode);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index f112b079ddfc..fb79775447b0 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -71,7 +71,8 @@ DEVICE(hp4x, HP4X_IDS);
/* Suunto ANT+ USB Driver */
#define SUUNTO_IDS() \
- { USB_DEVICE(0x0fcf, 0x1008) }
+ { USB_DEVICE(0x0fcf, 0x1008) }, \
+ { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
DEVICE(suunto, SUUNTO_IDS);
/* Siemens USB/MPI adapter */
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 7c9dc28640bb..81fc0dfcfdcf 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -868,7 +868,7 @@ static int usb_serial_probe(struct usb_interface *interface,
max_endpoints = max(max_endpoints, (int)serial->num_ports);
serial->num_port_pointers = max_endpoints;
- dev_dbg(ddev, "setting up %d port structures for this device", max_endpoints);
+ dev_dbg(ddev, "setting up %d port structure(s)\n", max_endpoints);
for (i = 0; i < max_endpoints; ++i) {
port = kzalloc(sizeof(struct usb_serial_port), GFP_KERNEL);
if (!port)
@@ -923,9 +923,8 @@ static int usb_serial_probe(struct usb_interface *interface,
port = serial->port[i];
if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
goto probe_error;
- buffer_size = serial->type->bulk_out_size;
- if (!buffer_size)
- buffer_size = usb_endpoint_maxp(endpoint);
+ buffer_size = max_t(int, serial->type->bulk_out_size,
+ usb_endpoint_maxp(endpoint));
port->bulk_out_size = buffer_size;
port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
@@ -1034,7 +1033,7 @@ static int usb_serial_probe(struct usb_interface *interface,
for (i = 0; i < num_ports; ++i) {
port = serial->port[i];
dev_set_name(&port->dev, "ttyUSB%d", port->minor);
- dev_dbg(ddev, "registering %s", dev_name(&port->dev));
+ dev_dbg(ddev, "registering %s\n", dev_name(&port->dev));
device_enable_async_suspend(&port->dev);
retval = device_add(&port->dev);
@@ -1161,9 +1160,9 @@ static int usb_serial_reset_resume(struct usb_interface *intf)
usb_serial_unpoison_port_urbs(serial);
serial->suspending = 0;
- if (serial->type->reset_resume)
+ if (serial->type->reset_resume) {
rv = serial->type->reset_resume(serial);
- else {
+ } else {
rv = -EOPNOTSUPP;
intf->needs_binding = 1;
}
@@ -1338,9 +1337,9 @@ static int usb_serial_register(struct usb_serial_driver *driver)
if (retval) {
pr_err("problem %d when registering driver %s\n", retval, driver->description);
list_del(&driver->driver_list);
- } else
+ } else {
pr_info("USB Serial support registered for %s\n", driver->description);
-
+ }
mutex_unlock(&table_lock);
return retval;
}
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 8470e1b114f2..13b5bfbaf951 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -18,7 +18,9 @@ config USB_STORAGE
This option depends on 'SCSI' support being enabled, but you
probably also need 'SCSI device support: SCSI disk support'
- (BLK_DEV_SD) for most USB storage devices.
+ (BLK_DEV_SD) for most USB storage devices. Some devices also
+ will require 'Probe all LUNs on each SCSI device'
+ (SCSI_MULTI_LUN).
To compile this driver as a module, choose M here: the
module will be called usb-storage.
@@ -202,7 +204,7 @@ config USB_STORAGE_ENE_UB6250
config USB_UAS
tristate "USB Attached SCSI"
- depends on SCSI && BROKEN
+ depends on SCSI && USB_STORAGE
help
The USB Attached SCSI protocol is supported by some USB
storage devices. It permits higher performance by supporting
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 18509e6c21ab..9d38ddc8da49 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -78,6 +78,8 @@ static const char* host_info(struct Scsi_Host *host)
static int slave_alloc (struct scsi_device *sdev)
{
+ struct us_data *us = host_to_us(sdev->host);
+
/*
* Set the INQUIRY transfer length to 36. We don't use any of
* the extra data and many devices choke if asked for more or
@@ -102,6 +104,10 @@ static int slave_alloc (struct scsi_device *sdev)
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+ /* Tell the SCSI layer if we know there is more than one LUN */
+ if (us->protocol == USB_PR_BULK && us->max_lun > 0)
+ sdev->sdev_bflags |= BLIST_FORCELUN;
+
return 0;
}
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
new file mode 100644
index 000000000000..bb05b984d5f6
--- /dev/null
+++ b/drivers/usb/storage/uas-detect.h
@@ -0,0 +1,96 @@
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include "usb.h"
+
+static int uas_is_interface(struct usb_host_interface *intf)
+{
+ return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE &&
+ intf->desc.bInterfaceSubClass == USB_SC_SCSI &&
+ intf->desc.bInterfaceProtocol == USB_PR_UAS);
+}
+
+static int uas_isnt_supported(struct usb_device *udev)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ dev_warn(&udev->dev, "The driver for the USB controller %s does not "
+ "support scatter-gather which is\n",
+ hcd->driver->description);
+ dev_warn(&udev->dev, "required by the UAS driver. Please try an"
+ "alternative USB controller if you wish to use UAS.\n");
+ return -ENODEV;
+}
+
+static int uas_find_uas_alt_setting(struct usb_interface *intf)
+{
+ int i;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int sg_supported = udev->bus->sg_tablesize != 0;
+
+ for (i = 0; i < intf->num_altsetting; i++) {
+ struct usb_host_interface *alt = &intf->altsetting[i];
+
+ if (uas_is_interface(alt)) {
+ if (!sg_supported)
+ return uas_isnt_supported(udev);
+ return alt->desc.bAlternateSetting;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static int uas_find_endpoints(struct usb_host_interface *alt,
+ struct usb_host_endpoint *eps[])
+{
+ struct usb_host_endpoint *endpoint = alt->endpoint;
+ unsigned i, n_endpoints = alt->desc.bNumEndpoints;
+
+ for (i = 0; i < n_endpoints; i++) {
+ unsigned char *extra = endpoint[i].extra;
+ int len = endpoint[i].extralen;
+ while (len >= 3) {
+ if (extra[1] == USB_DT_PIPE_USAGE) {
+ unsigned pipe_id = extra[2];
+ if (pipe_id > 0 && pipe_id < 5)
+ eps[pipe_id - 1] = &endpoint[i];
+ break;
+ }
+ len -= extra[0];
+ extra += extra[0];
+ }
+ }
+
+ if (!eps[0] || !eps[1] || !eps[2] || !eps[3])
+ return -ENODEV;
+
+ return 0;
+}
+
+static int uas_use_uas_driver(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_host_endpoint *eps[4] = { };
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ unsigned long flags = id->driver_info;
+ int r, alt;
+
+ usb_stor_adjust_quirks(udev, &flags);
+
+ if (flags & US_FL_IGNORE_UAS)
+ return 0;
+
+ if (udev->speed >= USB_SPEED_SUPER && !hcd->can_do_streams)
+ return 0;
+
+ alt = uas_find_uas_alt_setting(intf);
+ if (alt < 0)
+ return 0;
+
+ r = uas_find_endpoints(&intf->altsetting[alt], eps);
+ if (r < 0)
+ return 0;
+
+ return 1;
+}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index d966b59f7d7b..a7ac97cc5949 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -2,6 +2,7 @@
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
@@ -13,17 +14,21 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/usb_usual.h>
#include <linux/usb/hcd.h>
#include <linux/usb/storage.h>
#include <linux/usb/uas.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
+#include "uas-detect.h"
+
/*
* The r00-r01c specs define this version of the SENSE IU data structure.
* It's still in use by several different firmware releases.
@@ -45,12 +50,17 @@ struct uas_dev_info {
struct usb_anchor sense_urbs;
struct usb_anchor data_urbs;
int qdepth, resetting;
- struct response_ui response;
+ struct response_iu response;
unsigned cmd_pipe, status_pipe, data_in_pipe, data_out_pipe;
unsigned use_streams:1;
unsigned uas_sense_old:1;
+ unsigned running_task:1;
+ unsigned shutdown:1;
struct scsi_cmnd *cmnd;
spinlock_t lock;
+ struct work_struct work;
+ struct list_head inflight_list;
+ struct list_head dead_list;
};
enum {
@@ -85,103 +95,117 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo, gfp_t gfp);
static void uas_do_work(struct work_struct *work);
static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller);
+static void uas_free_streams(struct uas_dev_info *devinfo);
+static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *caller);
-static DECLARE_WORK(uas_work, uas_do_work);
-static DEFINE_SPINLOCK(uas_work_lock);
-static LIST_HEAD(uas_work_list);
-
+/* Must be called with devinfo->lock held, will temporary unlock the lock */
static void uas_unlink_data_urbs(struct uas_dev_info *devinfo,
- struct uas_cmd_info *cmdinfo)
+ struct uas_cmd_info *cmdinfo,
+ unsigned long *lock_flags)
{
- unsigned long flags;
-
/*
* The UNLINK_DATA_URBS flag makes sure uas_try_complete
* (called by urb completion) doesn't release cmdinfo
* underneath us.
*/
- spin_lock_irqsave(&devinfo->lock, flags);
cmdinfo->state |= UNLINK_DATA_URBS;
- spin_unlock_irqrestore(&devinfo->lock, flags);
+ spin_unlock_irqrestore(&devinfo->lock, *lock_flags);
if (cmdinfo->data_in_urb)
usb_unlink_urb(cmdinfo->data_in_urb);
if (cmdinfo->data_out_urb)
usb_unlink_urb(cmdinfo->data_out_urb);
- spin_lock_irqsave(&devinfo->lock, flags);
+ spin_lock_irqsave(&devinfo->lock, *lock_flags);
cmdinfo->state &= ~UNLINK_DATA_URBS;
- spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_do_work(struct work_struct *work)
{
+ struct uas_dev_info *devinfo =
+ container_of(work, struct uas_dev_info, work);
struct uas_cmd_info *cmdinfo;
- struct uas_cmd_info *temp;
- struct list_head list;
unsigned long flags;
int err;
- spin_lock_irq(&uas_work_lock);
- list_replace_init(&uas_work_list, &list);
- spin_unlock_irq(&uas_work_lock);
-
- list_for_each_entry_safe(cmdinfo, temp, &list, list) {
+ spin_lock_irqsave(&devinfo->lock, flags);
+ list_for_each_entry(cmdinfo, &devinfo->inflight_list, list) {
struct scsi_pointer *scp = (void *)cmdinfo;
- struct scsi_cmnd *cmnd = container_of(scp,
- struct scsi_cmnd, SCp);
- struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
- spin_lock_irqsave(&devinfo->lock, flags);
- err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
+ struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd,
+ SCp);
+
+ if (!(cmdinfo->state & IS_IN_WORK_LIST))
+ continue;
+
+ err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_NOIO);
if (!err)
cmdinfo->state &= ~IS_IN_WORK_LIST;
- spin_unlock_irqrestore(&devinfo->lock, flags);
- if (err) {
- list_del(&cmdinfo->list);
- spin_lock_irq(&uas_work_lock);
- list_add_tail(&cmdinfo->list, &uas_work_list);
- spin_unlock_irq(&uas_work_lock);
- schedule_work(&uas_work);
- }
+ else
+ schedule_work(&devinfo->work);
}
+ spin_unlock_irqrestore(&devinfo->lock, flags);
}
-static void uas_abort_work(struct uas_dev_info *devinfo)
+static void uas_mark_cmd_dead(struct uas_dev_info *devinfo,
+ struct uas_cmd_info *cmdinfo,
+ int result, const char *caller)
+{
+ struct scsi_pointer *scp = (void *)cmdinfo;
+ struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd, SCp);
+
+ uas_log_cmd_state(cmnd, caller);
+ WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
+ WARN_ON_ONCE(cmdinfo->state & COMMAND_ABORTED);
+ cmdinfo->state |= COMMAND_ABORTED;
+ cmdinfo->state &= ~IS_IN_WORK_LIST;
+ cmnd->result = result << 16;
+ list_move_tail(&cmdinfo->list, &devinfo->dead_list);
+}
+
+static void uas_abort_inflight(struct uas_dev_info *devinfo, int result,
+ const char *caller)
{
struct uas_cmd_info *cmdinfo;
struct uas_cmd_info *temp;
- struct list_head list;
unsigned long flags;
- spin_lock_irq(&uas_work_lock);
- list_replace_init(&uas_work_list, &list);
- spin_unlock_irq(&uas_work_lock);
+ spin_lock_irqsave(&devinfo->lock, flags);
+ list_for_each_entry_safe(cmdinfo, temp, &devinfo->inflight_list, list)
+ uas_mark_cmd_dead(devinfo, cmdinfo, result, caller);
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+}
+
+static void uas_add_work(struct uas_cmd_info *cmdinfo)
+{
+ struct scsi_pointer *scp = (void *)cmdinfo;
+ struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd, SCp);
+ struct uas_dev_info *devinfo = cmnd->device->hostdata;
+
+ WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
+ cmdinfo->state |= IS_IN_WORK_LIST;
+ schedule_work(&devinfo->work);
+}
+
+static void uas_zap_dead(struct uas_dev_info *devinfo)
+{
+ struct uas_cmd_info *cmdinfo;
+ struct uas_cmd_info *temp;
+ unsigned long flags;
spin_lock_irqsave(&devinfo->lock, flags);
- list_for_each_entry_safe(cmdinfo, temp, &list, list) {
+ list_for_each_entry_safe(cmdinfo, temp, &devinfo->dead_list, list) {
struct scsi_pointer *scp = (void *)cmdinfo;
- struct scsi_cmnd *cmnd = container_of(scp,
- struct scsi_cmnd, SCp);
- struct uas_dev_info *di = (void *)cmnd->device->hostdata;
-
- if (di == devinfo) {
- cmdinfo->state |= COMMAND_ABORTED;
- cmdinfo->state &= ~IS_IN_WORK_LIST;
- if (devinfo->resetting) {
- /* uas_stat_cmplt() will not do that
- * when a device reset is in
- * progress */
- cmdinfo->state &= ~COMMAND_INFLIGHT;
- }
- uas_try_complete(cmnd, __func__);
- } else {
- /* not our uas device, relink into list */
- list_del(&cmdinfo->list);
- spin_lock_irq(&uas_work_lock);
- list_add_tail(&cmdinfo->list, &uas_work_list);
- spin_unlock_irq(&uas_work_lock);
- }
+ struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd,
+ SCp);
+ uas_log_cmd_state(cmnd, __func__);
+ WARN_ON_ONCE(!(cmdinfo->state & COMMAND_ABORTED));
+ /* all urbs are killed, clear inflight bits */
+ cmdinfo->state &= ~(COMMAND_INFLIGHT |
+ DATA_IN_URB_INFLIGHT |
+ DATA_OUT_URB_INFLIGHT);
+ uas_try_complete(cmnd, __func__);
}
+ devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
}
@@ -259,20 +283,19 @@ static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
- WARN_ON(!spin_is_locked(&devinfo->lock));
+ WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
if (cmdinfo->state & (COMMAND_INFLIGHT |
DATA_IN_URB_INFLIGHT |
DATA_OUT_URB_INFLIGHT |
UNLINK_DATA_URBS))
return -EBUSY;
- BUG_ON(cmdinfo->state & COMMAND_COMPLETED);
+ WARN_ON_ONCE(cmdinfo->state & COMMAND_COMPLETED);
cmdinfo->state |= COMMAND_COMPLETED;
usb_free_urb(cmdinfo->data_in_urb);
usb_free_urb(cmdinfo->data_out_urb);
- if (cmdinfo->state & COMMAND_ABORTED) {
+ if (cmdinfo->state & COMMAND_ABORTED)
scmd_printk(KERN_INFO, cmnd, "abort completed\n");
- cmnd->result = DID_ABORT << 16;
- }
+ list_del(&cmdinfo->list);
cmnd->scsi_done(cmnd);
return 0;
}
@@ -286,11 +309,7 @@ static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
cmdinfo->state |= direction | SUBMIT_STATUS_URB;
err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
if (err) {
- spin_lock(&uas_work_lock);
- list_add_tail(&cmdinfo->list, &uas_work_list);
- cmdinfo->state |= IS_IN_WORK_LIST;
- spin_unlock(&uas_work_lock);
- schedule_work(&uas_work);
+ uas_add_work(cmdinfo);
}
}
@@ -298,14 +317,20 @@ static void uas_stat_cmplt(struct urb *urb)
{
struct iu *iu = urb->transfer_buffer;
struct Scsi_Host *shost = urb->context;
- struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
struct scsi_cmnd *cmnd;
struct uas_cmd_info *cmdinfo;
unsigned long flags;
u16 tag;
if (urb->status) {
- dev_err(&urb->dev->dev, "URB BAD STATUS %d\n", urb->status);
+ if (urb->status == -ENOENT) {
+ dev_err(&urb->dev->dev, "stat urb: killed, stream %d\n",
+ urb->stream_id);
+ } else {
+ dev_err(&urb->dev->dev, "stat urb: status %d\n",
+ urb->status);
+ }
usb_free_urb(urb);
return;
}
@@ -324,6 +349,9 @@ static void uas_stat_cmplt(struct urb *urb)
if (!cmnd) {
if (iu->iu_id == IU_ID_RESPONSE) {
+ if (!devinfo->running_task)
+ dev_warn(&urb->dev->dev,
+ "stat urb: recv unexpected response iu\n");
/* store results for uas_eh_task_mgmt() */
memcpy(&devinfo->response, iu, sizeof(devinfo->response));
}
@@ -346,17 +374,25 @@ static void uas_stat_cmplt(struct urb *urb)
uas_sense(urb, cmnd);
if (cmnd->result != 0) {
/* cancel data transfers on error */
- spin_unlock_irqrestore(&devinfo->lock, flags);
- uas_unlink_data_urbs(devinfo, cmdinfo);
- spin_lock_irqsave(&devinfo->lock, flags);
+ uas_unlink_data_urbs(devinfo, cmdinfo, &flags);
}
cmdinfo->state &= ~COMMAND_INFLIGHT;
uas_try_complete(cmnd, __func__);
break;
case IU_ID_READ_READY:
+ if (!cmdinfo->data_in_urb ||
+ (cmdinfo->state & DATA_IN_URB_INFLIGHT)) {
+ scmd_printk(KERN_ERR, cmnd, "unexpected read rdy\n");
+ break;
+ }
uas_xfer_data(urb, cmnd, SUBMIT_DATA_IN_URB);
break;
case IU_ID_WRITE_READY:
+ if (!cmdinfo->data_out_urb ||
+ (cmdinfo->state & DATA_OUT_URB_INFLIGHT)) {
+ scmd_printk(KERN_ERR, cmnd, "unexpected write rdy\n");
+ break;
+ }
uas_xfer_data(urb, cmnd, SUBMIT_DATA_OUT_URB);
break;
default:
@@ -383,8 +419,15 @@ static void uas_data_cmplt(struct urb *urb)
sdb = scsi_out(cmnd);
cmdinfo->state &= ~DATA_OUT_URB_INFLIGHT;
}
- BUG_ON(sdb == NULL);
- if (urb->status) {
+ if (sdb == NULL) {
+ WARN_ON_ONCE(1);
+ } else if (urb->status) {
+ if (urb->status != -ECONNRESET) {
+ uas_log_cmd_state(cmnd, __func__);
+ scmd_printk(KERN_ERR, cmnd,
+ "data cmplt err %d stream %d\n",
+ urb->status, urb->stream_id);
+ }
/* error: no data transfered */
sdb->resid = sdb->length;
} else {
@@ -394,6 +437,17 @@ static void uas_data_cmplt(struct urb *urb)
spin_unlock_irqrestore(&devinfo->lock, flags);
}
+static void uas_cmd_cmplt(struct urb *urb)
+{
+ struct scsi_cmnd *cmnd = urb->context;
+
+ if (urb->status) {
+ uas_log_cmd_state(cmnd, __func__);
+ scmd_printk(KERN_ERR, cmnd, "cmd cmplt err %d\n", urb->status);
+ }
+ usb_free_urb(urb);
+}
+
static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
unsigned int pipe, u16 stream_id,
struct scsi_cmnd *cmnd,
@@ -408,8 +462,7 @@ static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
goto out;
usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length,
uas_data_cmplt, cmnd);
- if (devinfo->use_streams)
- urb->stream_id = stream_id;
+ urb->stream_id = stream_id;
urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
urb->sg = sdb->table.sgl;
out:
@@ -442,7 +495,7 @@ static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
}
static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
- struct scsi_cmnd *cmnd, u16 stream_id)
+ struct scsi_cmnd *cmnd)
{
struct usb_device *udev = devinfo->udev;
struct scsi_device *sdev = cmnd->device;
@@ -472,7 +525,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu) + len,
- usb_free_urb, NULL);
+ uas_cmd_cmplt, cmnd);
urb->transfer_flags |= URB_FREE_BUFFER;
out:
return urb;
@@ -512,13 +565,17 @@ static int uas_submit_task_urb(struct scsi_cmnd *cmnd, gfp_t gfp,
}
usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu),
- usb_free_urb, NULL);
+ uas_cmd_cmplt, cmnd);
urb->transfer_flags |= URB_FREE_BUFFER;
+ usb_anchor_urb(urb, &devinfo->cmd_urbs);
err = usb_submit_urb(urb, gfp);
- if (err)
+ if (err) {
+ usb_unanchor_urb(urb);
+ uas_log_cmd_state(cmnd, __func__);
+ scmd_printk(KERN_ERR, cmnd, "task submission err %d\n", err);
goto err;
- usb_anchor_urb(urb, &devinfo->cmd_urbs);
+ }
return 0;
@@ -533,38 +590,43 @@ err:
* daft to me.
*/
-static int uas_submit_sense_urb(struct Scsi_Host *shost,
- gfp_t gfp, unsigned int stream)
+static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd,
+ gfp_t gfp, unsigned int stream)
{
- struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
struct urb *urb;
+ int err;
urb = uas_alloc_sense_urb(devinfo, gfp, shost, stream);
if (!urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
- if (usb_submit_urb(urb, gfp)) {
+ return NULL;
+ usb_anchor_urb(urb, &devinfo->sense_urbs);
+ err = usb_submit_urb(urb, gfp);
+ if (err) {
+ usb_unanchor_urb(urb);
+ uas_log_cmd_state(cmnd, __func__);
shost_printk(KERN_INFO, shost,
- "sense urb submission failure\n");
+ "sense urb submission error %d stream %d\n",
+ err, stream);
usb_free_urb(urb);
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return NULL;
}
- usb_anchor_urb(urb, &devinfo->sense_urbs);
- return 0;
+ return urb;
}
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo, gfp_t gfp)
{
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct urb *urb;
int err;
- WARN_ON(!spin_is_locked(&devinfo->lock));
+ WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
if (cmdinfo->state & SUBMIT_STATUS_URB) {
- err = uas_submit_sense_urb(cmnd->device->host, gfp,
- cmdinfo->stream);
- if (err) {
- return err;
- }
+ urb = uas_submit_sense_urb(cmnd, gfp, cmdinfo->stream);
+ if (!urb)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~SUBMIT_STATUS_URB;
}
@@ -578,14 +640,18 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
}
if (cmdinfo->state & SUBMIT_DATA_IN_URB) {
- if (usb_submit_urb(cmdinfo->data_in_urb, gfp)) {
+ usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs);
+ err = usb_submit_urb(cmdinfo->data_in_urb, gfp);
+ if (err) {
+ usb_unanchor_urb(cmdinfo->data_in_urb);
+ uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
- "data in urb submission failure\n");
+ "data in urb submission error %d stream %d\n",
+ err, cmdinfo->data_in_urb->stream_id);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
cmdinfo->state |= DATA_IN_URB_INFLIGHT;
- usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs);
}
if (cmdinfo->state & ALLOC_DATA_OUT_URB) {
@@ -598,33 +664,37 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
}
if (cmdinfo->state & SUBMIT_DATA_OUT_URB) {
- if (usb_submit_urb(cmdinfo->data_out_urb, gfp)) {
+ usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs);
+ err = usb_submit_urb(cmdinfo->data_out_urb, gfp);
+ if (err) {
+ usb_unanchor_urb(cmdinfo->data_out_urb);
+ uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
- "data out urb submission failure\n");
+ "data out urb submission error %d stream %d\n",
+ err, cmdinfo->data_out_urb->stream_id);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
- usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs);
}
if (cmdinfo->state & ALLOC_CMD_URB) {
- cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd,
- cmdinfo->stream);
+ cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd);
if (!cmdinfo->cmd_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_CMD_URB;
}
if (cmdinfo->state & SUBMIT_CMD_URB) {
- usb_get_urb(cmdinfo->cmd_urb);
- if (usb_submit_urb(cmdinfo->cmd_urb, gfp)) {
+ usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs);
+ err = usb_submit_urb(cmdinfo->cmd_urb, gfp);
+ if (err) {
+ usb_unanchor_urb(cmdinfo->cmd_urb);
+ uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
- "cmd urb submission failure\n");
+ "cmd urb submission error %d\n", err);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
- usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs);
- usb_put_urb(cmdinfo->cmd_urb);
cmdinfo->cmd_urb = NULL;
cmdinfo->state &= ~SUBMIT_CMD_URB;
cmdinfo->state |= COMMAND_INFLIGHT;
@@ -644,18 +714,22 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
+ spin_lock_irqsave(&devinfo->lock, flags);
+
if (devinfo->resetting) {
cmnd->result = DID_ERROR << 16;
cmnd->scsi_done(cmnd);
+ spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
- spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->cmnd) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
+ memset(cmdinfo, 0, sizeof(*cmdinfo));
+
if (blk_rq_tagged(cmnd->request)) {
cmdinfo->stream = cmnd->request->tag + 2;
} else {
@@ -692,13 +766,10 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
- spin_lock(&uas_work_lock);
- list_add_tail(&cmdinfo->list, &uas_work_list);
- cmdinfo->state |= IS_IN_WORK_LIST;
- spin_unlock(&uas_work_lock);
- schedule_work(&uas_work);
+ uas_add_work(cmdinfo);
}
+ list_add_tail(&cmdinfo->list, &devinfo->inflight_list);
spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
@@ -709,46 +780,78 @@ static int uas_eh_task_mgmt(struct scsi_cmnd *cmnd,
const char *fname, u8 function)
{
struct Scsi_Host *shost = cmnd->device->host;
- struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
- u16 tag = devinfo->qdepth - 1;
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+ u16 tag = devinfo->qdepth;
unsigned long flags;
+ struct urb *sense_urb;
+ int result = SUCCESS;
spin_lock_irqsave(&devinfo->lock, flags);
+
+ if (devinfo->resetting) {
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return FAILED;
+ }
+
+ if (devinfo->running_task) {
+ shost_printk(KERN_INFO, shost,
+ "%s: %s: error already running a task\n",
+ __func__, fname);
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return FAILED;
+ }
+
+ devinfo->running_task = 1;
memset(&devinfo->response, 0, sizeof(devinfo->response));
- if (uas_submit_sense_urb(shost, GFP_ATOMIC, tag)) {
+ sense_urb = uas_submit_sense_urb(cmnd, GFP_NOIO,
+ devinfo->use_streams ? tag : 0);
+ if (!sense_urb) {
shost_printk(KERN_INFO, shost,
"%s: %s: submit sense urb failed\n",
__func__, fname);
+ devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
- if (uas_submit_task_urb(cmnd, GFP_ATOMIC, function, tag)) {
+ if (uas_submit_task_urb(cmnd, GFP_NOIO, function, tag)) {
shost_printk(KERN_INFO, shost,
"%s: %s: submit task mgmt urb failed\n",
__func__, fname);
+ devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
+ usb_kill_urb(sense_urb);
return FAILED;
}
spin_unlock_irqrestore(&devinfo->lock, flags);
if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 3000) == 0) {
+ /*
+ * Note we deliberately do not clear running_task here. If we
+ * allow new tasks to be submitted, there is no way to figure
+ * out if a received response_iu is for the failed task or for
+ * the new one. A bus-reset will eventually clear running_task.
+ */
shost_printk(KERN_INFO, shost,
"%s: %s timed out\n", __func__, fname);
return FAILED;
}
+
+ spin_lock_irqsave(&devinfo->lock, flags);
+ devinfo->running_task = 0;
if (be16_to_cpu(devinfo->response.tag) != tag) {
shost_printk(KERN_INFO, shost,
"%s: %s failed (wrong tag %d/%d)\n", __func__,
fname, be16_to_cpu(devinfo->response.tag), tag);
- return FAILED;
- }
- if (devinfo->response.response_code != RC_TMF_COMPLETE) {
+ result = FAILED;
+ } else if (devinfo->response.response_code != RC_TMF_COMPLETE) {
shost_printk(KERN_INFO, shost,
"%s: %s failed (rc 0x%x)\n", __func__,
fname, devinfo->response.response_code);
- return FAILED;
+ result = FAILED;
}
- return SUCCESS;
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+
+ return result;
}
static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
@@ -758,22 +861,19 @@ static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
unsigned long flags;
int ret;
- uas_log_cmd_state(cmnd, __func__);
spin_lock_irqsave(&devinfo->lock, flags);
- cmdinfo->state |= COMMAND_ABORTED;
- if (cmdinfo->state & IS_IN_WORK_LIST) {
- spin_lock(&uas_work_lock);
- list_del(&cmdinfo->list);
- cmdinfo->state &= ~IS_IN_WORK_LIST;
- spin_unlock(&uas_work_lock);
+
+ if (devinfo->resetting) {
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return FAILED;
}
+
+ uas_mark_cmd_dead(devinfo, cmdinfo, DID_ABORT, __func__);
if (cmdinfo->state & COMMAND_INFLIGHT) {
spin_unlock_irqrestore(&devinfo->lock, flags);
ret = uas_eh_task_mgmt(cmnd, "ABORT TASK", TMF_ABORT_TASK);
} else {
- spin_unlock_irqrestore(&devinfo->lock, flags);
- uas_unlink_data_urbs(devinfo, cmdinfo);
- spin_lock_irqsave(&devinfo->lock, flags);
+ uas_unlink_data_urbs(devinfo, cmdinfo, &flags);
uas_try_complete(cmnd, __func__);
spin_unlock_irqrestore(&devinfo->lock, flags);
ret = SUCCESS;
@@ -795,14 +895,25 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
struct usb_device *udev = devinfo->udev;
int err;
+ err = usb_lock_device_for_reset(udev, devinfo->intf);
+ if (err) {
+ shost_printk(KERN_ERR, sdev->host,
+ "%s FAILED to get lock err %d\n", __func__, err);
+ return FAILED;
+ }
+
+ shost_printk(KERN_INFO, sdev->host, "%s start\n", __func__);
devinfo->resetting = 1;
- uas_abort_work(devinfo);
+ uas_abort_inflight(devinfo, DID_RESET, __func__);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
+ uas_zap_dead(devinfo);
err = usb_reset_device(udev);
devinfo->resetting = 0;
+ usb_unlock_device(udev);
+
if (err) {
shost_printk(KERN_INFO, sdev->host, "%s FAILED\n", __func__);
return FAILED;
@@ -814,7 +925,25 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
static int uas_slave_alloc(struct scsi_device *sdev)
{
- sdev->hostdata = (void *)sdev->host->hostdata[0];
+ sdev->hostdata = (void *)sdev->host->hostdata;
+
+ /* USB has unusual DMA-alignment requirements: Although the
+ * starting address of each scatter-gather element doesn't matter,
+ * the length of each element except the last must be divisible
+ * by the Bulk maxpacket value. There's currently no way to
+ * express this by block-layer constraints, so we'll cop out
+ * and simply require addresses to be aligned at 512-byte
+ * boundaries. This is okay since most block I/O involves
+ * hardware sectors that are multiples of 512 bytes in length,
+ * and since host controllers up through USB 2.0 have maxpacket
+ * values no larger than 512.
+ *
+ * But it doesn't suffice for Wireless USB, where Bulk maxpacket
+ * values can be as large as 2048. To make that work properly
+ * will require changes to the block layer.
+ */
+ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+
return 0;
}
@@ -822,7 +951,7 @@ static int uas_slave_configure(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo = sdev->hostdata;
scsi_set_tag_type(sdev, MSG_ORDERED_TAG);
- scsi_activate_tcq(sdev, devinfo->qdepth - 3);
+ scsi_activate_tcq(sdev, devinfo->qdepth - 2);
return 0;
}
@@ -843,7 +972,14 @@ static struct scsi_host_template uas_host_template = {
.ordered_tag = 1,
};
+#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
+ vendorName, productName, useProtocol, useTransport, \
+ initFunction, flags) \
+{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
+ .driver_info = (flags) }
+
static struct usb_device_id uas_usb_ids[] = {
+# include "unusual_uas.h"
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) },
/* 0xaa is a prototype device I happen to have access to */
@@ -852,105 +988,55 @@ static struct usb_device_id uas_usb_ids[] = {
};
MODULE_DEVICE_TABLE(usb, uas_usb_ids);
-static int uas_is_interface(struct usb_host_interface *intf)
-{
- return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE &&
- intf->desc.bInterfaceSubClass == USB_SC_SCSI &&
- intf->desc.bInterfaceProtocol == USB_PR_UAS);
-}
-
-static int uas_isnt_supported(struct usb_device *udev)
-{
- struct usb_hcd *hcd = bus_to_hcd(udev->bus);
-
- dev_warn(&udev->dev, "The driver for the USB controller %s does not "
- "support scatter-gather which is\n",
- hcd->driver->description);
- dev_warn(&udev->dev, "required by the UAS driver. Please try an"
- "alternative USB controller if you wish to use UAS.\n");
- return -ENODEV;
-}
+#undef UNUSUAL_DEV
static int uas_switch_interface(struct usb_device *udev,
- struct usb_interface *intf)
+ struct usb_interface *intf)
{
- int i;
- int sg_supported = udev->bus->sg_tablesize != 0;
-
- for (i = 0; i < intf->num_altsetting; i++) {
- struct usb_host_interface *alt = &intf->altsetting[i];
-
- if (uas_is_interface(alt)) {
- if (!sg_supported)
- return uas_isnt_supported(udev);
- return usb_set_interface(udev,
- alt->desc.bInterfaceNumber,
- alt->desc.bAlternateSetting);
- }
- }
+ int alt;
+
+ alt = uas_find_uas_alt_setting(intf);
+ if (alt < 0)
+ return alt;
- return -ENODEV;
+ return usb_set_interface(udev,
+ intf->altsetting[0].desc.bInterfaceNumber, alt);
}
-static void uas_configure_endpoints(struct uas_dev_info *devinfo)
+static int uas_configure_endpoints(struct uas_dev_info *devinfo)
{
struct usb_host_endpoint *eps[4] = { };
- struct usb_interface *intf = devinfo->intf;
struct usb_device *udev = devinfo->udev;
- struct usb_host_endpoint *endpoint = intf->cur_altsetting->endpoint;
- unsigned i, n_endpoints = intf->cur_altsetting->desc.bNumEndpoints;
+ int r;
devinfo->uas_sense_old = 0;
devinfo->cmnd = NULL;
- for (i = 0; i < n_endpoints; i++) {
- unsigned char *extra = endpoint[i].extra;
- int len = endpoint[i].extralen;
- while (len > 1) {
- if (extra[1] == USB_DT_PIPE_USAGE) {
- unsigned pipe_id = extra[2];
- if (pipe_id > 0 && pipe_id < 5)
- eps[pipe_id - 1] = &endpoint[i];
- break;
- }
- len -= extra[0];
- extra += extra[0];
- }
- }
+ r = uas_find_endpoints(devinfo->intf->cur_altsetting, eps);
+ if (r)
+ return r;
- /*
- * Assume that if we didn't find a control pipe descriptor, we're
- * using a device with old firmware that happens to be set up like
- * this.
- */
- if (!eps[0]) {
- devinfo->cmd_pipe = usb_sndbulkpipe(udev, 1);
- devinfo->status_pipe = usb_rcvbulkpipe(udev, 1);
- devinfo->data_in_pipe = usb_rcvbulkpipe(udev, 2);
- devinfo->data_out_pipe = usb_sndbulkpipe(udev, 2);
-
- eps[1] = usb_pipe_endpoint(udev, devinfo->status_pipe);
- eps[2] = usb_pipe_endpoint(udev, devinfo->data_in_pipe);
- eps[3] = usb_pipe_endpoint(udev, devinfo->data_out_pipe);
- } else {
- devinfo->cmd_pipe = usb_sndbulkpipe(udev,
- eps[0]->desc.bEndpointAddress);
- devinfo->status_pipe = usb_rcvbulkpipe(udev,
- eps[1]->desc.bEndpointAddress);
- devinfo->data_in_pipe = usb_rcvbulkpipe(udev,
- eps[2]->desc.bEndpointAddress);
- devinfo->data_out_pipe = usb_sndbulkpipe(udev,
- eps[3]->desc.bEndpointAddress);
- }
+ devinfo->cmd_pipe = usb_sndbulkpipe(udev,
+ usb_endpoint_num(&eps[0]->desc));
+ devinfo->status_pipe = usb_rcvbulkpipe(udev,
+ usb_endpoint_num(&eps[1]->desc));
+ devinfo->data_in_pipe = usb_rcvbulkpipe(udev,
+ usb_endpoint_num(&eps[2]->desc));
+ devinfo->data_out_pipe = usb_sndbulkpipe(udev,
+ usb_endpoint_num(&eps[3]->desc));
- devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1, 3, 256,
- GFP_KERNEL);
- if (devinfo->qdepth < 0) {
+ if (udev->speed != USB_SPEED_SUPER) {
devinfo->qdepth = 256;
devinfo->use_streams = 0;
} else {
+ devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1,
+ 3, 256, GFP_KERNEL);
+ if (devinfo->qdepth < 0)
+ return devinfo->qdepth;
devinfo->use_streams = 1;
}
+
+ return 0;
}
static void uas_free_streams(struct uas_dev_info *devinfo)
@@ -964,30 +1050,23 @@ static void uas_free_streams(struct uas_dev_info *devinfo)
usb_free_streams(devinfo->intf, eps, 3, GFP_KERNEL);
}
-/*
- * XXX: What I'd like to do here is register a SCSI host for each USB host in
- * the system. Follow usb-storage's design of registering a SCSI host for
- * each USB device for the moment. Can implement this by walking up the
- * USB hierarchy until we find a USB host.
- */
static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
- int result;
- struct Scsi_Host *shost;
+ int result = -ENOMEM;
+ struct Scsi_Host *shost = NULL;
struct uas_dev_info *devinfo;
struct usb_device *udev = interface_to_usbdev(intf);
- if (uas_switch_interface(udev, intf))
+ if (!uas_use_uas_driver(intf, id))
return -ENODEV;
- devinfo = kmalloc(sizeof(struct uas_dev_info), GFP_KERNEL);
- if (!devinfo)
- return -ENOMEM;
+ if (uas_switch_interface(udev, intf))
+ return -ENODEV;
- result = -ENOMEM;
- shost = scsi_host_alloc(&uas_host_template, sizeof(void *));
+ shost = scsi_host_alloc(&uas_host_template,
+ sizeof(struct uas_dev_info));
if (!shost)
- goto free;
+ goto set_alt0;
shost->max_cmd_len = 16 + 252;
shost->max_id = 1;
@@ -995,33 +1074,40 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
shost->max_channel = 0;
shost->sg_tablesize = udev->bus->sg_tablesize;
+ devinfo = (struct uas_dev_info *)shost->hostdata;
devinfo->intf = intf;
devinfo->udev = udev;
devinfo->resetting = 0;
+ devinfo->running_task = 0;
+ devinfo->shutdown = 0;
init_usb_anchor(&devinfo->cmd_urbs);
init_usb_anchor(&devinfo->sense_urbs);
init_usb_anchor(&devinfo->data_urbs);
spin_lock_init(&devinfo->lock);
- uas_configure_endpoints(devinfo);
+ INIT_WORK(&devinfo->work, uas_do_work);
+ INIT_LIST_HEAD(&devinfo->inflight_list);
+ INIT_LIST_HEAD(&devinfo->dead_list);
- result = scsi_init_shared_tag_map(shost, devinfo->qdepth - 3);
+ result = uas_configure_endpoints(devinfo);
if (result)
- goto free;
+ goto set_alt0;
- result = scsi_add_host(shost, &intf->dev);
+ result = scsi_init_shared_tag_map(shost, devinfo->qdepth - 2);
if (result)
- goto deconfig_eps;
+ goto free_streams;
- shost->hostdata[0] = (unsigned long)devinfo;
+ result = scsi_add_host(shost, &intf->dev);
+ if (result)
+ goto free_streams;
scsi_scan_host(shost);
usb_set_intfdata(intf, shost);
return result;
-deconfig_eps:
+free_streams:
uas_free_streams(devinfo);
- free:
- kfree(devinfo);
+set_alt0:
+ usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
if (shost)
scsi_host_put(shost);
return result;
@@ -1029,45 +1115,146 @@ deconfig_eps:
static int uas_pre_reset(struct usb_interface *intf)
{
-/* XXX: Need to return 1 if it's not our device in error handling */
+ struct Scsi_Host *shost = usb_get_intfdata(intf);
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+ unsigned long flags;
+
+ if (devinfo->shutdown)
+ return 0;
+
+ /* Block new requests */
+ spin_lock_irqsave(shost->host_lock, flags);
+ scsi_block_requests(shost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /* Wait for any pending requests to complete */
+ flush_work(&devinfo->work);
+ if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 5000) == 0) {
+ shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
+ return 1;
+ }
+
+ uas_free_streams(devinfo);
+
return 0;
}
static int uas_post_reset(struct usb_interface *intf)
{
-/* XXX: Need to return 1 if it's not our device in error handling */
+ struct Scsi_Host *shost = usb_get_intfdata(intf);
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+ unsigned long flags;
+
+ if (devinfo->shutdown)
+ return 0;
+
+ if (uas_configure_endpoints(devinfo) != 0) {
+ shost_printk(KERN_ERR, shost,
+ "%s: alloc streams error after reset", __func__);
+ return 1;
+ }
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ scsi_report_bus_reset(shost, 0);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ scsi_unblock_requests(shost);
+
+ return 0;
+}
+
+static int uas_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct Scsi_Host *shost = usb_get_intfdata(intf);
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+
+ /* Wait for any pending requests to complete */
+ flush_work(&devinfo->work);
+ if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 5000) == 0) {
+ shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int uas_resume(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static int uas_reset_resume(struct usb_interface *intf)
+{
+ struct Scsi_Host *shost = usb_get_intfdata(intf);
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+ unsigned long flags;
+
+ if (uas_configure_endpoints(devinfo) != 0) {
+ shost_printk(KERN_ERR, shost,
+ "%s: alloc streams error after reset", __func__);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ scsi_report_bus_reset(shost, 0);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
return 0;
}
static void uas_disconnect(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
- struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
devinfo->resetting = 1;
- uas_abort_work(devinfo);
+ cancel_work_sync(&devinfo->work);
+ uas_abort_inflight(devinfo, DID_NO_CONNECT, __func__);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
+ uas_zap_dead(devinfo);
scsi_remove_host(shost);
uas_free_streams(devinfo);
- kfree(devinfo);
+ scsi_host_put(shost);
}
/*
- * XXX: Should this plug into libusual so we can auto-upgrade devices from
- * Bulk-Only to UAS?
+ * Put the device back in usb-storage mode on shutdown, as some BIOS-es
+ * hang on reboot when the device is still in uas mode. Note the reset is
+ * necessary as some devices won't revert to usb-storage mode without it.
*/
+static void uas_shutdown(struct device *dev)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct Scsi_Host *shost = usb_get_intfdata(intf);
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+
+ if (system_state != SYSTEM_RESTART)
+ return;
+
+ devinfo->shutdown = 1;
+ uas_free_streams(devinfo);
+ usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
+ usb_reset_device(udev);
+}
+
static struct usb_driver uas_driver = {
.name = "uas",
.probe = uas_probe,
.disconnect = uas_disconnect,
.pre_reset = uas_pre_reset,
.post_reset = uas_post_reset,
+ .suspend = uas_suspend,
+ .resume = uas_resume,
+ .reset_resume = uas_reset_resume,
+ .drvwrap.driver.shutdown = uas_shutdown,
.id_table = uas_usb_ids,
};
module_usb_driver(uas_driver);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Matthew Wilcox and Sarah Sharp");
+MODULE_AUTHOR(
+ "Hans de Goede <hdegoede@redhat.com>, Matthew Wilcox and Sarah Sharp");
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 65a6a75066a8..82e8ed0324e3 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
"Super Top",
"USB 2.0 SATA BRIDGE",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ad06255c2ade..f4a82291894a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1455,6 +1455,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */
+UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201,
+ "Research In Motion",
+ "BlackBerry Bold 9000",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
/* Reported by Michael Stattmann <michael@stattmann.com> */
UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
"Sony Ericsson",
@@ -2079,6 +2086,11 @@ UNUSUAL_DEV( 0xed10, 0x7636, 0x0001, 0x0001,
"Digital MP3 Audio Player",
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
+/* Unusual uas devices */
+#if IS_ENABLED(CONFIG_USB_UAS)
+#include "unusual_uas.h"
+#endif
+
/* Control/Bulk transport for all SubClass values */
USUAL_DEV(USB_SC_RBC, USB_PR_CB),
USUAL_DEV(USB_SC_8020, USB_PR_CB),
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
new file mode 100644
index 000000000000..7244444df8ee
--- /dev/null
+++ b/drivers/usb/storage/unusual_uas.h
@@ -0,0 +1,52 @@
+/* Driver for USB Attached SCSI devices - Unusual Devices File
+ *
+ * (c) 2013 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on the same file for the usb-storage driver, which is:
+ * (c) 2000-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
+ * (c) 2000 Adam J. Richter (adam@yggdrasil.com), Yggdrasil Computing, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * IMPORTANT NOTE: This file must be included in another file which defines
+ * a UNUSUAL_DEV macro before this file is included.
+ */
+
+/*
+ * If you edit this file, please try to keep it sorted first by VendorID,
+ * then by ProductID.
+ *
+ * If you want to add an entry for this file, be sure to include the
+ * following information:
+ * - a patch that adds the entry for your device, including your
+ * email address right above the entry (plus maybe a brief
+ * explanation of the reason for the entry),
+ * - lsusb -v output for the device
+ * Send your submission to Hans de Goede <hdegoede@redhat.com>
+ * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
+ */
+
+/*
+ * This is an example entry for the US_FL_IGNORE_UAS flag. Once we have an
+ * actual entry using US_FL_IGNORE_UAS this entry should be removed.
+ *
+ * UNUSUAL_DEV( 0xabcd, 0x1234, 0x0100, 0x0100,
+ * "Example",
+ * "Storage with broken UAS",
+ * USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ * US_FL_IGNORE_UAS),
+ */
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 1c0b89f2a138..f1c96261a501 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -72,6 +72,10 @@
#include "sierra_ms.h"
#include "option_ms.h"
+#if IS_ENABLED(CONFIG_USB_UAS)
+#include "uas-detect.h"
+#endif
+
/* Some informational data */
MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
MODULE_DESCRIPTION("USB Mass Storage driver for Linux");
@@ -459,14 +463,14 @@ static int associate_dev(struct us_data *us, struct usb_interface *intf)
#define TOLOWER(x) ((x) | 0x20)
/* Adjust device flags based on the "quirks=" module parameter */
-static void adjust_quirks(struct us_data *us)
+void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
{
char *p;
- u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
- u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
+ u16 vid = le16_to_cpu(udev->descriptor.idVendor);
+ u16 pid = le16_to_cpu(udev->descriptor.idProduct);
unsigned f = 0;
unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
- US_FL_FIX_CAPACITY |
+ US_FL_FIX_CAPACITY | US_FL_IGNORE_UAS |
US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
@@ -537,14 +541,18 @@ static void adjust_quirks(struct us_data *us)
case 's':
f |= US_FL_SINGLE_LUN;
break;
+ case 'u':
+ f |= US_FL_IGNORE_UAS;
+ break;
case 'w':
f |= US_FL_NO_WP_DETECT;
break;
/* Ignore unrecognized flag characters */
}
}
- us->fflags = (us->fflags & ~mask) | f;
+ *fflags = (*fflags & ~mask) | f;
}
+EXPORT_SYMBOL_GPL(usb_stor_adjust_quirks);
/* Get the unusual_devs entries and the string descriptors */
static int get_device_info(struct us_data *us, const struct usb_device_id *id,
@@ -564,7 +572,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
idesc->bInterfaceProtocol :
unusual_dev->useTransport;
us->fflags = id->driver_info;
- adjust_quirks(us);
+ usb_stor_adjust_quirks(us->pusb_dev, &us->fflags);
if (us->fflags & US_FL_IGNORE_DEVICE) {
dev_info(pdev, "device ignored\n");
@@ -1035,6 +1043,12 @@ static int storage_probe(struct usb_interface *intf,
int result;
int size;
+ /* If uas is enabled and this device can do uas then ignore it. */
+#if IS_ENABLED(CONFIG_USB_UAS)
+ if (uas_use_uas_driver(intf, id))
+ return -ENXIO;
+#endif
+
/*
* If the device isn't standard (is handled by a subdriver
* module) then don't accept it.
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 75f70f04f37b..307e339a9478 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -201,4 +201,7 @@ extern int usb_stor_probe1(struct us_data **pus,
extern int usb_stor_probe2(struct us_data *us);
extern void usb_stor_disconnect(struct usb_interface *intf);
+extern void usb_stor_adjust_quirks(struct usb_device *dev,
+ unsigned long *fflags);
+
#endif
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 3b959e83b28e..0677139c6065 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -284,7 +284,7 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
struct device *dev = wusbhc->dev;
struct wusb_dev *wusb_dev;
struct wusb_port *port;
- unsigned idx, devnum;
+ unsigned idx;
mutex_lock(&wusbhc->mutex);
@@ -312,8 +312,6 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
goto error_unlock;
}
- devnum = idx + 2;
-
/* Make sure we are using no crypto on that "virtual port" */
wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0);
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 368360f9a93a..252c7bd9218a 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -75,8 +75,6 @@ void __wa_destroy(struct wahc *wa)
if (wa->dti_urb) {
usb_kill_urb(wa->dti_urb);
usb_put_urb(wa->dti_urb);
- usb_kill_urb(wa->buf_in_urb);
- usb_put_urb(wa->buf_in_urb);
}
kfree(wa->dti_buf);
wa_nep_destroy(wa);
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index a2ef84b8397e..f2a8d29e17b9 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -125,7 +125,8 @@ struct wa_rpipe {
enum wa_dti_state {
WA_DTI_TRANSFER_RESULT_PENDING,
- WA_DTI_ISOC_PACKET_STATUS_PENDING
+ WA_DTI_ISOC_PACKET_STATUS_PENDING,
+ WA_DTI_BUF_IN_DATA_PENDING
};
enum wa_quirks {
@@ -134,8 +135,20 @@ enum wa_quirks {
* requests to be concatenated and not sent as separate packets.
*/
WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC = 0x01,
+ /*
+ * The Alereon HWA can be instructed to not send transfer notifications
+ * as an optimization.
+ */
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS = 0x02,
};
+enum wa_vendor_specific_requests {
+ WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS = 0x4C,
+ WA_REQ_ALEREON_FEATURE_SET = 0x01,
+ WA_REQ_ALEREON_FEATURE_CLEAR = 0x00,
+};
+
+#define WA_MAX_BUF_IN_URBS 4
/**
* Instance of a HWA Host Controller
*
@@ -206,7 +219,9 @@ struct wahc {
u32 dti_isoc_xfer_in_progress;
u8 dti_isoc_xfer_seg;
struct urb *dti_urb; /* URB for reading xfer results */
- struct urb *buf_in_urb; /* URB for reading data in */
+ /* URBs for reading data in */
+ struct urb buf_in_urbs[WA_MAX_BUF_IN_URBS];
+ int active_buf_in_urbs; /* number of buf_in_urbs active. */
struct edc dti_edc; /* DTI error density counter */
void *dti_buf;
size_t dti_buf_size;
@@ -234,6 +249,7 @@ struct wahc {
extern int wa_create(struct wahc *wa, struct usb_interface *iface,
kernel_ulong_t);
extern void __wa_destroy(struct wahc *wa);
+extern int wa_dti_start(struct wahc *wa);
void wa_reset_all(struct wahc *wa);
@@ -275,6 +291,8 @@ static inline void wa_rpipe_init(struct wahc *wa)
static inline void wa_init(struct wahc *wa)
{
+ int index;
+
edc_init(&wa->nep_edc);
atomic_set(&wa->notifs_queued, 0);
wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
@@ -288,6 +306,10 @@ static inline void wa_init(struct wahc *wa)
INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
wa->dto_in_use = 0;
atomic_set(&wa->xfer_id_count, 1);
+ /* init the buf in URBs */
+ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
+ usb_init_urb(&(wa->buf_in_urbs[index]));
+ wa->active_buf_in_urbs = 0;
}
/**
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index 6ca80a4efc1b..6d6da127f6de 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -298,7 +298,7 @@ static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
break;
}
itr += hdr->bLength;
- itr_size -= hdr->bDescriptorType;
+ itr_size -= hdr->bLength;
}
out:
return epcd;
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 3cd96e936d77..c8e2a47d62a7 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -167,6 +167,8 @@ struct wa_xfer {
static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
struct wa_seg *seg, int curr_iso_frame);
+static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
+ int starting_index, enum wa_seg_status status);
static inline void wa_xfer_init(struct wa_xfer *xfer)
{
@@ -367,13 +369,13 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
break;
case WA_SEG_ERROR:
xfer->result = seg->result;
- dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zu(0x%08zX)\n",
+ dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
xfer, wa_xfer_id(xfer), seg->index, seg->result,
seg->result);
goto out;
case WA_SEG_ABORTED:
xfer->result = seg->result;
- dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zu(0x%08zX)\n",
+ dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
xfer, wa_xfer_id(xfer), seg->index, seg->result,
seg->result);
goto out;
@@ -390,6 +392,24 @@ out:
}
/*
+ * Mark the given segment as done. Return true if this completes the xfer.
+ * This should only be called for segs that have been submitted to an RPIPE.
+ * Delayed segs are not marked as submitted so they do not need to be marked
+ * as done when cleaning up.
+ *
+ * xfer->lock has to be locked
+ */
+static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
+ struct wa_seg *seg, enum wa_seg_status status)
+{
+ seg->status = status;
+ xfer->segs_done++;
+
+ /* check for done. */
+ return __wa_xfer_is_done(xfer);
+}
+
+/*
* Search for a transfer list ID on the HCD's URB list
*
* For 32 bit architectures, we use the pointer itself; for 64 bits, a
@@ -416,12 +436,51 @@ out:
struct wa_xfer_abort_buffer {
struct urb urb;
+ struct wahc *wa;
struct wa_xfer_abort cmd;
};
static void __wa_xfer_abort_cb(struct urb *urb)
{
struct wa_xfer_abort_buffer *b = urb->context;
+ struct wahc *wa = b->wa;
+
+ /*
+ * If the abort request URB failed, then the HWA did not get the abort
+ * command. Forcibly clean up the xfer without waiting for a Transfer
+ * Result from the HWA.
+ */
+ if (urb->status < 0) {
+ struct wa_xfer *xfer;
+ struct device *dev = &wa->usb_iface->dev;
+
+ xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
+ dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
+ __func__, urb->status);
+ if (xfer) {
+ unsigned long flags;
+ int done;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
+ __func__, xfer, wa_xfer_id(xfer));
+ spin_lock_irqsave(&xfer->lock, flags);
+ /* mark all segs as aborted. */
+ wa_complete_remaining_xfer_segs(xfer, 0,
+ WA_SEG_ABORTED);
+ done = __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ wa_xfer_delayed_run(rpipe);
+ wa_xfer_put(xfer);
+ } else {
+ dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
+ __func__, le32_to_cpu(b->cmd.dwTransferID));
+ }
+ }
+
+ wa_put(wa); /* taken in __wa_xfer_abort */
usb_put_urb(&b->urb);
}
@@ -449,6 +508,7 @@ static int __wa_xfer_abort(struct wa_xfer *xfer)
b->cmd.bRequestType = WA_XFER_ABORT;
b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
+ b->wa = wa_get(xfer->wa);
usb_init_urb(&b->urb);
usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
@@ -462,6 +522,7 @@ static int __wa_xfer_abort(struct wa_xfer *xfer)
error_submit:
+ wa_put(xfer->wa);
if (printk_ratelimit())
dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
xfer, result);
@@ -733,6 +794,8 @@ static void wa_seg_dto_cb(struct urb *urb)
seg->isoc_frame_offset + seg->isoc_frame_index);
/* resubmit the URB with the next isoc frame. */
+ /* take a ref on resubmit. */
+ wa_xfer_get(xfer);
result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
if (result < 0) {
dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
@@ -760,9 +823,13 @@ static void wa_seg_dto_cb(struct urb *urb)
goto error_default;
}
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
return;
error_dto_submit:
+ /* taken on resubmit attempt. */
+ wa_xfer_put(xfer);
error_default:
spin_lock_irqsave(&xfer->lock, flags);
rpipe = xfer->ep->hcpriv;
@@ -772,12 +839,10 @@ error_default:
wa_reset_all(wa);
}
if (seg->status != WA_SEG_ERROR) {
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
__wa_xfer_abort(xfer);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (holding_dto) {
@@ -788,7 +853,8 @@ error_default:
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
-
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
}
/*
@@ -842,12 +908,11 @@ static void wa_seg_iso_pack_desc_cb(struct urb *urb)
}
if (seg->status != WA_SEG_ERROR) {
usb_unlink_urb(seg->dto_urb);
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
__wa_xfer_abort(xfer);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_ERROR);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
@@ -855,6 +920,8 @@ static void wa_seg_iso_pack_desc_cb(struct urb *urb)
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
}
/*
@@ -919,18 +986,18 @@ static void wa_seg_tr_cb(struct urb *urb)
}
usb_unlink_urb(seg->isoc_pack_desc_urb);
usb_unlink_urb(seg->dto_urb);
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
__wa_xfer_abort(xfer);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
}
/*
@@ -940,7 +1007,7 @@ static void wa_seg_tr_cb(struct urb *urb)
*/
static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
const unsigned int bytes_transferred,
- const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
+ const unsigned int bytes_to_transfer, int *out_num_sgs)
{
struct scatterlist *out_sg;
unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
@@ -1094,14 +1161,13 @@ static int __wa_populate_dto_urb(struct wa_xfer *xfer,
*/
static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
{
- int result, cnt, iso_frame_offset;
+ int result, cnt, isoc_frame_offset = 0;
size_t alloc_size = sizeof(*xfer->seg[0])
- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
struct usb_device *usb_dev = xfer->wa->usb_dev;
const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
struct wa_seg *seg;
size_t buf_itr, buf_size, buf_itr_size;
- int isoc_frame_offset = 0;
result = -ENOMEM;
xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
@@ -1109,7 +1175,6 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
goto error_segs_kzalloc;
buf_itr = 0;
buf_size = xfer->urb->transfer_buffer_length;
- iso_frame_offset = 0;
for (cnt = 0; cnt < xfer->segs; cnt++) {
size_t iso_pkt_descr_size = 0;
int seg_isoc_frame_count = 0, seg_isoc_size = 0;
@@ -1318,30 +1383,41 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
/* default to done unless we encounter a multi-frame isoc segment. */
*dto_done = 1;
+ /*
+ * Take a ref for each segment urb so the xfer cannot disappear until
+ * all of the callbacks run.
+ */
+ wa_xfer_get(xfer);
/* submit the transfer request. */
+ seg->status = WA_SEG_SUBMITTED;
result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
if (result < 0) {
pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
__func__, xfer, seg->index, result);
- goto error_seg_submit;
+ wa_xfer_put(xfer);
+ goto error_tr_submit;
}
/* submit the isoc packet descriptor if present. */
if (seg->isoc_pack_desc_urb) {
+ wa_xfer_get(xfer);
result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
seg->isoc_frame_index = 0;
if (result < 0) {
pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
__func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
goto error_iso_pack_desc_submit;
}
}
/* submit the out data if this is an out request. */
if (seg->dto_urb) {
struct wahc *wa = xfer->wa;
+ wa_xfer_get(xfer);
result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
if (result < 0) {
pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
__func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
goto error_dto_submit;
}
/*
@@ -1353,7 +1429,6 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
&& (seg->isoc_frame_count > 1))
*dto_done = 0;
}
- seg->status = WA_SEG_SUBMITTED;
rpipe_avail_dec(rpipe);
return 0;
@@ -1361,7 +1436,7 @@ error_dto_submit:
usb_unlink_urb(seg->isoc_pack_desc_urb);
error_iso_pack_desc_submit:
usb_unlink_urb(&seg->tr_urb);
-error_seg_submit:
+error_tr_submit:
seg->status = WA_SEG_ERROR;
seg->result = result;
*dto_done = 1;
@@ -1393,6 +1468,12 @@ static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
list_node);
list_del(&seg->list_node);
xfer = seg->xfer;
+ /*
+ * Get a reference to the xfer in case the callbacks for the
+ * URBs submitted by __wa_seg_submit attempt to complete
+ * the xfer before this function completes.
+ */
+ wa_xfer_get(xfer);
result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
/* release the dto resource if this RPIPE is done with it. */
if (dto_done)
@@ -1401,13 +1482,23 @@ static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
xfer, wa_xfer_id(xfer), seg->index,
atomic_read(&rpipe->segs_available), result);
if (unlikely(result < 0)) {
+ int done;
+
spin_unlock_irqrestore(&rpipe->seg_lock, flags);
spin_lock_irqsave(&xfer->lock, flags);
__wa_xfer_abort(xfer);
+ /*
+ * This seg was marked as submitted when it was put on
+ * the RPIPE seg_list. Mark it done.
+ */
xfer->segs_done++;
+ done = __wa_xfer_is_done(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
spin_lock_irqsave(&rpipe->seg_lock, flags);
}
+ wa_xfer_put(xfer);
}
/*
* Mark this RPIPE as waiting if dto was not acquired, there are
@@ -1592,12 +1683,19 @@ static int wa_urb_enqueue_b(struct wa_xfer *xfer)
dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
goto error_xfer_setup;
}
+ /*
+ * Get a xfer reference since __wa_xfer_submit starts asynchronous
+ * operations that may try to complete the xfer before this function
+ * exits.
+ */
+ wa_xfer_get(xfer);
result = __wa_xfer_submit(xfer);
if (result < 0) {
dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
goto error_xfer_submit;
}
spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_xfer_put(xfer);
return 0;
/*
@@ -1623,6 +1721,7 @@ error_xfer_submit:
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
+ wa_xfer_put(xfer);
/* return success since the completion routine will run. */
return 0;
}
@@ -1832,20 +1931,20 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
/* check if it is safe to unlink. */
spin_lock_irqsave(&wa->xfer_list_lock, flags);
result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
+ if ((result == 0) && urb->hcpriv) {
+ /*
+ * Get a xfer ref to prevent a race with wa_xfer_giveback
+ * cleaning up the xfer while we are working with it.
+ */
+ wa_xfer_get(urb->hcpriv);
+ }
spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
if (result)
return result;
xfer = urb->hcpriv;
- if (xfer == NULL) {
- /*
- * Nothing setup yet enqueue will see urb->status !=
- * -EINPROGRESS (by hcd layer) and bail out with
- * error, no need to do completion
- */
- BUG_ON(urb->status == -EINPROGRESS);
- goto out;
- }
+ if (xfer == NULL)
+ return -ENOENT;
spin_lock_irqsave(&xfer->lock, flags);
pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
rpipe = xfer->ep->hcpriv;
@@ -1856,6 +1955,16 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
result = -ENOENT;
goto out_unlock;
}
+ /*
+ * Check for done to avoid racing with wa_xfer_giveback and completing
+ * twice.
+ */
+ if (__wa_xfer_is_done(xfer)) {
+ pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
+ xfer, wa_xfer_id(xfer));
+ result = -ENOENT;
+ goto out_unlock;
+ }
/* Check the delayed list -> if there, release and complete */
spin_lock_irqsave(&wa->xfer_list_lock, flags2);
if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -1865,6 +1974,11 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
goto out_unlock; /* setup(), enqueue_b() completes */
/* Ok, the xfer is in flight already, it's been setup and submitted.*/
xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
+ /*
+ * grab the rpipe->seg_lock here to prevent racing with
+ * __wa_xfer_delayed_run.
+ */
+ spin_lock(&rpipe->seg_lock);
for (cnt = 0; cnt < xfer->segs; cnt++) {
seg = xfer->seg[cnt];
pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
@@ -1885,16 +1999,24 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
*/
seg->status = WA_SEG_ABORTED;
seg->result = -ENOENT;
- spin_lock_irqsave(&rpipe->seg_lock, flags2);
list_del(&seg->list_node);
xfer->segs_done++;
- spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
break;
case WA_SEG_DONE:
case WA_SEG_ERROR:
case WA_SEG_ABORTED:
break;
/*
+ * The buf_in data for a segment in the
+ * WA_SEG_DTI_PENDING state is actively being read.
+ * Let wa_buf_in_cb handle it since it will be called
+ * and will increment xfer->segs_done. Cleaning up
+ * here could cause wa_buf_in_cb to access the xfer
+ * after it has been completed/freed.
+ */
+ case WA_SEG_DTI_PENDING:
+ break;
+ /*
* In the states below, the HWA device already knows
* about the transfer. If an abort request was sent,
* allow the HWA to process it and wait for the
@@ -1903,7 +2025,6 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
*/
case WA_SEG_SUBMITTED:
case WA_SEG_PENDING:
- case WA_SEG_DTI_PENDING:
/*
* Check if the abort was successfully sent. This could
* be false if the HWA has been removed but we haven't
@@ -1917,6 +2038,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
break;
}
}
+ spin_unlock(&rpipe->seg_lock);
xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
done = __wa_xfer_is_done(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
@@ -1924,11 +2046,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
+ wa_xfer_put(xfer);
return result;
out_unlock:
spin_unlock_irqrestore(&xfer->lock, flags);
-out:
+ wa_xfer_put(xfer);
return result;
dequeue_delayed:
@@ -1937,6 +2060,7 @@ dequeue_delayed:
xfer->result = urb->status;
spin_unlock_irqrestore(&xfer->lock, flags);
wa_xfer_giveback(xfer);
+ wa_xfer_put(xfer);
usb_put_urb(urb); /* we got a ref in enqueue() */
return 0;
}
@@ -1996,15 +2120,17 @@ static int wa_xfer_status_to_errno(u8 status)
* no other segment transfer results will be returned from the device.
* Mark the remaining submitted or pending xfers as completed so that
* the xfer will complete cleanly.
+ *
+ * xfer->lock must be held
+ *
*/
static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
- struct wa_seg *incoming_seg, enum wa_seg_status status)
+ int starting_index, enum wa_seg_status status)
{
int index;
struct wa_rpipe *rpipe = xfer->ep->hcpriv;
- for (index = incoming_seg->index + 1; index < xfer->segs_submitted;
- index++) {
+ for (index = starting_index; index < xfer->segs_submitted; index++) {
struct wa_seg *current_seg = xfer->seg[index];
BUG_ON(current_seg == NULL);
@@ -2033,73 +2159,110 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
}
}
-/* Populate the wa->buf_in_urb based on the current isoc transfer state. */
-static void __wa_populate_buf_in_urb_isoc(struct wahc *wa, struct wa_xfer *xfer,
- struct wa_seg *seg, int curr_iso_frame)
+/* Populate the given urb based on the current isoc transfer state. */
+static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
+ struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
{
- BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
+ int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
+ int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
+ int next_frame_contiguous;
+ struct usb_iso_packet_descriptor *iso_frame;
+
+ BUG_ON(buf_in_urb->status == -EINPROGRESS);
+
+ /*
+ * If the current frame actual_length is contiguous with the next frame
+ * and actual_length is a multiple of the DTI endpoint max packet size,
+ * combine the current frame with the next frame in a single URB. This
+ * reduces the number of URBs that must be submitted in that case.
+ */
+ seg_index = seg->isoc_frame_index;
+ do {
+ next_frame_contiguous = 0;
+
+ iso_frame = &iso_frame_desc[urb_frame_index];
+ total_len += iso_frame->actual_length;
+ ++urb_frame_index;
+ ++seg_index;
+
+ if (seg_index < seg->isoc_frame_count) {
+ struct usb_iso_packet_descriptor *next_iso_frame;
+
+ next_iso_frame = &iso_frame_desc[urb_frame_index];
+
+ if ((iso_frame->offset + iso_frame->actual_length) ==
+ next_iso_frame->offset)
+ next_frame_contiguous = 1;
+ }
+ } while (next_frame_contiguous
+ && ((iso_frame->actual_length % dti_packet_size) == 0));
/* this should always be 0 before a resubmit. */
- wa->buf_in_urb->num_mapped_sgs = 0;
- wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
- xfer->urb->iso_frame_desc[curr_iso_frame].offset;
- wa->buf_in_urb->transfer_buffer_length =
- xfer->urb->iso_frame_desc[curr_iso_frame].length;
- wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- wa->buf_in_urb->transfer_buffer = NULL;
- wa->buf_in_urb->sg = NULL;
- wa->buf_in_urb->num_sgs = 0;
- wa->buf_in_urb->context = seg;
+ buf_in_urb->num_mapped_sgs = 0;
+ buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
+ iso_frame_desc[urb_start_frame].offset;
+ buf_in_urb->transfer_buffer_length = total_len;
+ buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
+ buf_in_urb->context = seg;
+
+ /* return the number of frames included in this URB. */
+ return seg_index - seg->isoc_frame_index;
}
-/* Populate the wa->buf_in_urb based on the current transfer state. */
-static int wa_populate_buf_in_urb(struct wahc *wa, struct wa_xfer *xfer,
+/* Populate the given urb based on the current transfer state. */
+static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
unsigned int seg_idx, unsigned int bytes_transferred)
{
int result = 0;
struct wa_seg *seg = xfer->seg[seg_idx];
- BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
+ BUG_ON(buf_in_urb->status == -EINPROGRESS);
/* this should always be 0 before a resubmit. */
- wa->buf_in_urb->num_mapped_sgs = 0;
+ buf_in_urb->num_mapped_sgs = 0;
if (xfer->is_dma) {
- wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma
+ buf_in_urb->transfer_dma = xfer->urb->transfer_dma
+ (seg_idx * xfer->seg_size);
- wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- wa->buf_in_urb->transfer_buffer = NULL;
- wa->buf_in_urb->sg = NULL;
- wa->buf_in_urb->num_sgs = 0;
+ buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
} else {
/* do buffer or SG processing. */
- wa->buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
if (xfer->urb->transfer_buffer) {
- wa->buf_in_urb->transfer_buffer =
+ buf_in_urb->transfer_buffer =
xfer->urb->transfer_buffer
+ (seg_idx * xfer->seg_size);
- wa->buf_in_urb->sg = NULL;
- wa->buf_in_urb->num_sgs = 0;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
} else {
/* allocate an SG list to store seg_size bytes
and copy the subset of the xfer->urb->sg
that matches the buffer subset we are
about to read. */
- wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
+ buf_in_urb->sg = wa_xfer_create_subset_sg(
xfer->urb->sg,
seg_idx * xfer->seg_size,
bytes_transferred,
- &(wa->buf_in_urb->num_sgs));
+ &(buf_in_urb->num_sgs));
- if (!(wa->buf_in_urb->sg)) {
- wa->buf_in_urb->num_sgs = 0;
+ if (!(buf_in_urb->sg)) {
+ buf_in_urb->num_sgs = 0;
result = -ENOMEM;
}
- wa->buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->transfer_buffer = NULL;
}
}
- wa->buf_in_urb->transfer_buffer_length = bytes_transferred;
- wa->buf_in_urb->context = seg;
+ buf_in_urb->transfer_buffer_length = bytes_transferred;
+ buf_in_urb->context = seg;
return result;
}
@@ -2124,6 +2287,7 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
u8 usb_status;
unsigned rpipe_ready = 0;
unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
+ struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
spin_lock_irqsave(&xfer->lock, flags);
seg_idx = xfer_result->bTransferSegment & 0x7f;
@@ -2147,7 +2311,7 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
}
if (usb_status & 0x80) {
seg->result = wa_xfer_status_to_errno(usb_status);
- dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
+ dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
xfer, xfer->id, seg->index, usb_status);
seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
WA_SEG_ABORTED : WA_SEG_ERROR;
@@ -2162,7 +2326,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
* transfers with data or below for no data, the xfer will complete.
*/
if (xfer_result->bTransferSegment & 0x80)
- wa_complete_remaining_xfer_segs(xfer, seg, WA_SEG_DONE);
+ wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
+ WA_SEG_DONE);
if (usb_pipeisoc(xfer->urb->pipe)
&& (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
/* set up WA state to read the isoc packet status next. */
@@ -2173,20 +2338,21 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
&& (bytes_transferred > 0)) {
/* IN data phase: read to buffer */
seg->status = WA_SEG_DTI_PENDING;
- result = wa_populate_buf_in_urb(wa, xfer, seg_idx,
+ result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
bytes_transferred);
if (result < 0)
goto error_buf_in_populate;
- result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
- if (result < 0)
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
+ if (result < 0) {
+ --(wa->active_buf_in_urbs);
goto error_submit_buf_in;
+ }
} else {
/* OUT data phase or no data, complete it -- */
- seg->status = WA_SEG_DONE;
seg->result = bytes_transferred;
- xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
@@ -2205,15 +2371,15 @@ error_submit_buf_in:
dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
xfer, seg_idx, result);
seg->result = result;
- kfree(wa->buf_in_urb->sg);
- wa->buf_in_urb->sg = NULL;
+ kfree(buf_in_urb->sg);
+ buf_in_urb->sg = NULL;
error_buf_in_populate:
__wa_xfer_abort(xfer);
seg->status = WA_SEG_ERROR;
error_complete:
xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- wa_complete_remaining_xfer_segs(xfer, seg, seg->status);
+ wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
done = __wa_xfer_is_done(xfer);
/*
* queue work item to clear STALL for control endpoints.
@@ -2315,16 +2481,16 @@ static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
struct usb_iso_packet_descriptor *iso_frame_desc =
xfer->urb->iso_frame_desc;
- const int urb_frame_index =
+ const int xfer_frame_index =
seg->isoc_frame_offset + seg_index;
- iso_frame_desc[urb_frame_index].status =
+ iso_frame_desc[xfer_frame_index].status =
wa_xfer_status_to_errno(
le16_to_cpu(status_array[seg_index].PacketStatus));
- iso_frame_desc[urb_frame_index].actual_length =
+ iso_frame_desc[xfer_frame_index].actual_length =
le16_to_cpu(status_array[seg_index].PacketLength);
/* track the number of frames successfully transferred. */
- if (iso_frame_desc[urb_frame_index].actual_length > 0) {
+ if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
/* save the starting frame index for buf_in_urb. */
if (!data_frame_count)
first_frame_index = seg_index;
@@ -2333,30 +2499,64 @@ static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
}
if (xfer->is_inbound && data_frame_count) {
- int result;
+ int result, total_frames_read = 0, urb_index = 0;
+ struct urb *buf_in_urb;
+ /* IN data phase: read to buffer */
+ seg->status = WA_SEG_DTI_PENDING;
+
+ /* start with the first frame with data. */
seg->isoc_frame_index = first_frame_index;
- /* submit a read URB for the first frame with data. */
- __wa_populate_buf_in_urb_isoc(wa, xfer, seg,
- seg->isoc_frame_index + seg->isoc_frame_offset);
+ /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
+ do {
+ int urb_frame_index, urb_frame_count;
+ struct usb_iso_packet_descriptor *iso_frame_desc;
+
+ buf_in_urb = &(wa->buf_in_urbs[urb_index]);
+ urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
+ buf_in_urb, xfer, seg);
+ /* advance frame index to start of next read URB. */
+ seg->isoc_frame_index += urb_frame_count;
+ total_frames_read += urb_frame_count;
+
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
+
+ /* skip 0-byte frames. */
+ urb_frame_index =
+ seg->isoc_frame_offset + seg->isoc_frame_index;
+ iso_frame_desc =
+ &(xfer->urb->iso_frame_desc[urb_frame_index]);
+ while ((seg->isoc_frame_index <
+ seg->isoc_frame_count) &&
+ (iso_frame_desc->actual_length == 0)) {
+ ++(seg->isoc_frame_index);
+ ++iso_frame_desc;
+ }
+ ++urb_index;
+
+ } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
+ && (seg->isoc_frame_index <
+ seg->isoc_frame_count));
- result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
if (result < 0) {
+ --(wa->active_buf_in_urbs);
dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
result);
wa_reset_all(wa);
- } else if (data_frame_count > 1)
- /* If we need to read multiple frames, set DTI busy. */
+ } else if (data_frame_count > total_frames_read)
+ /* If we need to read more frames, set DTI busy. */
dti_busy = 1;
} else {
/* OUT transfer or no more IN data, complete it -- */
- seg->status = WA_SEG_DONE;
- xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
}
spin_unlock_irqrestore(&xfer->lock, flags);
- wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+ if (dti_busy)
+ wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
+ else
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
if (done)
wa_xfer_completion(xfer);
if (rpipe_ready)
@@ -2388,8 +2588,9 @@ static void wa_buf_in_cb(struct urb *urb)
struct wahc *wa;
struct device *dev;
struct wa_rpipe *rpipe;
- unsigned rpipe_ready = 0, seg_index, isoc_data_frame_count = 0;
+ unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
unsigned long flags;
+ int resubmit_dti = 0, active_buf_in_urbs;
u8 done = 0;
/* free the sg if it was used. */
@@ -2399,19 +2600,20 @@ static void wa_buf_in_cb(struct urb *urb)
spin_lock_irqsave(&xfer->lock, flags);
wa = xfer->wa;
dev = &wa->usb_iface->dev;
+ --(wa->active_buf_in_urbs);
+ active_buf_in_urbs = wa->active_buf_in_urbs;
if (usb_pipeisoc(xfer->urb->pipe)) {
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ int seg_index;
+
/*
- * Find the next isoc frame with data. Bail out after
- * isoc_data_frame_count > 1 since there is no need to walk
- * the entire frame array. We just need to know if
- * isoc_data_frame_count is 0, 1, or >1.
+ * Find the next isoc frame with data and count how many
+ * frames with data remain.
*/
- seg_index = seg->isoc_frame_index + 1;
- while ((seg_index < seg->isoc_frame_count)
- && (isoc_data_frame_count <= 1)) {
- struct usb_iso_packet_descriptor *iso_frame_desc =
- xfer->urb->iso_frame_desc;
+ seg_index = seg->isoc_frame_index;
+ while (seg_index < seg->isoc_frame_count) {
const int urb_frame_index =
seg->isoc_frame_offset + seg_index;
@@ -2432,24 +2634,39 @@ static void wa_buf_in_cb(struct urb *urb)
seg->result += urb->actual_length;
if (isoc_data_frame_count > 0) {
- int result;
- /* submit a read URB for the first frame with data. */
- __wa_populate_buf_in_urb_isoc(wa, xfer, seg,
- seg->isoc_frame_index + seg->isoc_frame_offset);
- result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
+ int result, urb_frame_count;
+
+ /* submit a read URB for the next frame with data. */
+ urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
+ xfer, seg);
+ /* advance index to start of next read URB. */
+ seg->isoc_frame_index += urb_frame_count;
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(urb, GFP_ATOMIC);
if (result < 0) {
+ --(wa->active_buf_in_urbs);
dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
result);
wa_reset_all(wa);
}
- } else {
+ /*
+ * If we are in this callback and
+ * isoc_data_frame_count > 0, it means that the dti_urb
+ * submission was delayed in wa_dti_cb. Once
+ * we submit the last buf_in_urb, we can submit the
+ * delayed dti_urb.
+ */
+ resubmit_dti = (isoc_data_frame_count ==
+ urb_frame_count);
+ } else if (active_buf_in_urbs == 0) {
rpipe = xfer->ep->hcpriv;
- seg->status = WA_SEG_DONE;
- dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
- xfer, seg->index, seg->result);
- xfer->segs_done++;
+ dev_dbg(dev,
+ "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ seg->result);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_DONE);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
@@ -2461,37 +2678,44 @@ static void wa_buf_in_cb(struct urb *urb)
case -ENOENT: /* as it was done by the who unlinked us */
break;
default: /* Other errors ... */
+ /*
+ * Error on data buf read. Only resubmit DTI if it hasn't
+ * already been done by previously hitting this error or by a
+ * successful completion of the previous buf_in_urb.
+ */
+ resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
spin_lock_irqsave(&xfer->lock, flags);
rpipe = xfer->ep->hcpriv;
if (printk_ratelimit())
- dev_err(dev, "xfer %p#%u: data in error %d\n",
- xfer, seg->index, urb->status);
+ dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ urb->status);
if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME)){
dev_err(dev, "DTO: URB max acceptable errors "
"exceeded, resetting device\n");
wa_reset_all(wa);
}
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- __wa_xfer_abort(xfer);
- done = __wa_xfer_is_done(xfer);
+ if (active_buf_in_urbs == 0)
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_ERROR);
+ else
+ __wa_xfer_abort(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
- /*
- * If we are in this callback and isoc_data_frame_count > 0, it means
- * that the dti_urb submission was delayed in wa_dti_cb. Once
- * isoc_data_frame_count gets to 1, we can submit the deferred URB
- * since the last buf_in_urb was just submitted.
- */
- if (isoc_data_frame_count == 1) {
- int result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+
+ if (resubmit_dti) {
+ int result;
+
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+
+ result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
if (result < 0) {
dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
result);
@@ -2561,11 +2785,15 @@ static void wa_dti_cb(struct urb *urb)
xfer_result->hdr.bNotifyType);
break;
}
+ xfer_id = le32_to_cpu(xfer_result->dwTransferID);
usb_status = xfer_result->bTransferStatus & 0x3f;
- if (usb_status == WA_XFER_STATUS_NOT_FOUND)
+ if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
/* taken care of already */
+ dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
+ __func__, xfer_id,
+ xfer_result->bTransferSegment & 0x7f);
break;
- xfer_id = le32_to_cpu(xfer_result->dwTransferID);
+ }
xfer = wa_xfer_get_by_id(wa, xfer_id);
if (xfer == NULL) {
/* FIXME: transaction not found. */
@@ -2614,6 +2842,54 @@ out:
}
/*
+ * Initialize the DTI URB for reading transfer result notifications and also
+ * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
+ */
+int wa_dti_start(struct wahc *wa)
+{
+ const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
+ struct device *dev = &wa->usb_iface->dev;
+ int result = -ENOMEM, index;
+
+ if (wa->dti_urb != NULL) /* DTI URB already started */
+ goto out;
+
+ wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (wa->dti_urb == NULL) {
+ dev_err(dev, "Can't allocate DTI URB\n");
+ goto error_dti_urb_alloc;
+ }
+ usb_fill_bulk_urb(
+ wa->dti_urb, wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
+ wa->dti_buf, wa->dti_buf_size,
+ wa_dti_cb, wa);
+
+ /* init the buf in URBs */
+ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
+ usb_fill_bulk_urb(
+ &(wa->buf_in_urbs[index]), wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev,
+ 0x80 | dti_epd->bEndpointAddress),
+ NULL, 0, wa_buf_in_cb, wa);
+ }
+ result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
+ result);
+ goto error_dti_urb_submit;
+ }
+out:
+ return 0;
+
+error_dti_urb_submit:
+ usb_put_urb(wa->dti_urb);
+ wa->dti_urb = NULL;
+error_dti_urb_alloc:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wa_dti_start);
+/*
* Transfer complete notification
*
* Called from the notif.c code. We get a notification on EP2 saying
@@ -2627,15 +2903,10 @@ out:
* Follow up in wa_dti_cb(), as that's where the whole state
* machine starts.
*
- * So here we just initialize the DTI URB for reading transfer result
- * notifications and also the buffer-in URB, for reading buffers. Then
- * we just submit the DTI URB.
- *
* @wa shall be referenced
*/
void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
{
- int result;
struct device *dev = &wa->usb_iface->dev;
struct wa_notif_xfer *notif_xfer;
const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
@@ -2649,45 +2920,13 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
goto error;
}
- if (wa->dti_urb != NULL) /* DTI URB already started */
- goto out;
- wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->dti_urb == NULL) {
- dev_err(dev, "Can't allocate DTI URB\n");
- goto error_dti_urb_alloc;
- }
- usb_fill_bulk_urb(
- wa->dti_urb, wa->usb_dev,
- usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
- wa->dti_buf, wa->dti_buf_size,
- wa_dti_cb, wa);
+ /* attempt to start the DTI ep processing. */
+ if (wa_dti_start(wa) < 0)
+ goto error;
- wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->buf_in_urb == NULL) {
- dev_err(dev, "Can't allocate BUF-IN URB\n");
- goto error_buf_in_urb_alloc;
- }
- usb_fill_bulk_urb(
- wa->buf_in_urb, wa->usb_dev,
- usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
- NULL, 0, wa_buf_in_cb, wa);
- result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
- if (result < 0) {
- dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
- result);
- goto error_dti_urb_submit;
- }
-out:
return;
-error_dti_urb_submit:
- usb_put_urb(wa->buf_in_urb);
- wa->buf_in_urb = NULL;
-error_buf_in_urb_alloc:
- usb_put_urb(wa->dti_urb);
- wa->dti_urb = NULL;
-error_dti_urb_alloc:
error:
wa_reset_all(wa);
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 2319d206f630..7ba042498857 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -872,9 +872,13 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
}
+ mutex_lock(&vdev->igate);
+
if (vdev->err_trigger)
eventfd_signal(vdev->err_trigger, 1);
+ mutex_unlock(&vdev->igate);
+
vfio_device_put(device);
return PCI_ERS_RESULT_CAN_RECOVER;
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 641bc87bdb96..9dd49c9839ac 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -482,15 +482,19 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
for (i = 0; i < nvec; i++)
vdev->msix[i].entry = i;
- ret = pci_enable_msix(pdev, vdev->msix, nvec);
- if (ret) {
+ ret = pci_enable_msix_range(pdev, vdev->msix, 1, nvec);
+ if (ret < nvec) {
+ if (ret > 0)
+ pci_disable_msix(pdev);
kfree(vdev->msix);
kfree(vdev->ctx);
return ret;
}
} else {
- ret = pci_enable_msi_block(pdev, nvec);
- if (ret) {
+ ret = pci_enable_msi_range(pdev, 1, nvec);
+ if (ret < nvec) {
+ if (ret > 0)
+ pci_disable_msi(pdev);
kfree(vdev->ctx);
return ret;
}
@@ -749,54 +753,37 @@ static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
unsigned count, uint32_t flags, void *data)
{
int32_t fd = *(int32_t *)data;
- struct pci_dev *pdev = vdev->pdev;
if ((index != VFIO_PCI_ERR_IRQ_INDEX) ||
!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
return -EINVAL;
- /*
- * device_lock synchronizes setting and checking of
- * err_trigger. The vfio_pci_aer_err_detected() is also
- * called with device_lock held.
- */
-
/* DATA_NONE/DATA_BOOL enables loopback testing */
-
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- device_lock(&pdev->dev);
if (vdev->err_trigger)
eventfd_signal(vdev->err_trigger, 1);
- device_unlock(&pdev->dev);
return 0;
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t trigger = *(uint8_t *)data;
- device_lock(&pdev->dev);
if (trigger && vdev->err_trigger)
eventfd_signal(vdev->err_trigger, 1);
- device_unlock(&pdev->dev);
return 0;
}
/* Handle SET_DATA_EVENTFD */
-
if (fd == -1) {
- device_lock(&pdev->dev);
if (vdev->err_trigger)
eventfd_ctx_put(vdev->err_trigger);
vdev->err_trigger = NULL;
- device_unlock(&pdev->dev);
return 0;
} else if (fd >= 0) {
struct eventfd_ctx *efdctx;
efdctx = eventfd_ctx_fdget(fd);
if (IS_ERR(efdctx))
return PTR_ERR(efdctx);
- device_lock(&pdev->dev);
if (vdev->err_trigger)
eventfd_ctx_put(vdev->err_trigger);
vdev->err_trigger = efdctx;
- device_unlock(&pdev->dev);
return 0;
} else
return -EINVAL;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 1eab4ace0671..21271d8df023 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -22,6 +22,7 @@
#include <linux/idr.h>
#include <linux/iommu.h>
#include <linux/list.h>
+#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
@@ -45,9 +46,7 @@ static struct vfio {
struct idr group_idr;
struct mutex group_lock;
struct cdev group_cdev;
- struct device *dev;
- dev_t devt;
- struct cdev cdev;
+ dev_t group_devt;
wait_queue_head_t release_q;
} vfio;
@@ -142,8 +141,7 @@ EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
*/
static int vfio_alloc_group_minor(struct vfio_group *group)
{
- /* index 0 is used by /dev/vfio/vfio */
- return idr_alloc(&vfio.group_idr, group, 1, MINORMASK + 1, GFP_KERNEL);
+ return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
}
static void vfio_free_group_minor(int minor)
@@ -243,7 +241,8 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
}
}
- dev = device_create(vfio.class, NULL, MKDEV(MAJOR(vfio.devt), minor),
+ dev = device_create(vfio.class, NULL,
+ MKDEV(MAJOR(vfio.group_devt), minor),
group, "%d", iommu_group_id(iommu_group));
if (IS_ERR(dev)) {
vfio_free_group_minor(minor);
@@ -268,7 +267,7 @@ static void vfio_group_release(struct kref *kref)
WARN_ON(!list_empty(&group->device_list));
- device_destroy(vfio.class, MKDEV(MAJOR(vfio.devt), group->minor));
+ device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
list_del(&group->vfio_next);
vfio_free_group_minor(group->minor);
vfio_group_unlock_and_free(group);
@@ -1419,12 +1418,17 @@ EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
{
- if (mode && (MINOR(dev->devt) == 0))
- *mode = S_IRUGO | S_IWUGO;
-
return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
}
+static struct miscdevice vfio_dev = {
+ .minor = VFIO_MINOR,
+ .name = "vfio",
+ .fops = &vfio_fops,
+ .nodename = "vfio/vfio",
+ .mode = S_IRUGO | S_IWUGO,
+};
+
static int __init vfio_init(void)
{
int ret;
@@ -1436,6 +1440,13 @@ static int __init vfio_init(void)
INIT_LIST_HEAD(&vfio.iommu_drivers_list);
init_waitqueue_head(&vfio.release_q);
+ ret = misc_register(&vfio_dev);
+ if (ret) {
+ pr_err("vfio: misc device register failed\n");
+ return ret;
+ }
+
+ /* /dev/vfio/$GROUP */
vfio.class = class_create(THIS_MODULE, "vfio");
if (IS_ERR(vfio.class)) {
ret = PTR_ERR(vfio.class);
@@ -1444,27 +1455,14 @@ static int __init vfio_init(void)
vfio.class->devnode = vfio_devnode;
- ret = alloc_chrdev_region(&vfio.devt, 0, MINORMASK, "vfio");
- if (ret)
- goto err_base_chrdev;
-
- cdev_init(&vfio.cdev, &vfio_fops);
- ret = cdev_add(&vfio.cdev, vfio.devt, 1);
+ ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio");
if (ret)
- goto err_base_cdev;
+ goto err_alloc_chrdev;
- vfio.dev = device_create(vfio.class, NULL, vfio.devt, NULL, "vfio");
- if (IS_ERR(vfio.dev)) {
- ret = PTR_ERR(vfio.dev);
- goto err_base_dev;
- }
-
- /* /dev/vfio/$GROUP */
cdev_init(&vfio.group_cdev, &vfio_group_fops);
- ret = cdev_add(&vfio.group_cdev,
- MKDEV(MAJOR(vfio.devt), 1), MINORMASK - 1);
+ ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK);
if (ret)
- goto err_groups_cdev;
+ goto err_cdev_add;
pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
@@ -1478,16 +1476,13 @@ static int __init vfio_init(void)
return 0;
-err_groups_cdev:
- device_destroy(vfio.class, vfio.devt);
-err_base_dev:
- cdev_del(&vfio.cdev);
-err_base_cdev:
- unregister_chrdev_region(vfio.devt, MINORMASK);
-err_base_chrdev:
+err_cdev_add:
+ unregister_chrdev_region(vfio.group_devt, MINORMASK);
+err_alloc_chrdev:
class_destroy(vfio.class);
vfio.class = NULL;
err_class:
+ misc_deregister(&vfio_dev);
return ret;
}
@@ -1497,11 +1492,10 @@ static void __exit vfio_cleanup(void)
idr_destroy(&vfio.group_idr);
cdev_del(&vfio.group_cdev);
- device_destroy(vfio.class, vfio.devt);
- cdev_del(&vfio.cdev);
- unregister_chrdev_region(vfio.devt, MINORMASK);
+ unregister_chrdev_region(vfio.group_devt, MINORMASK);
class_destroy(vfio.class);
vfio.class = NULL;
+ misc_deregister(&vfio_dev);
}
module_init(vfio_init);
@@ -1511,3 +1505,5 @@ MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS_MISCDEV(VFIO_MINOR);
+MODULE_ALIAS("devname:vfio/vfio");
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index bdae7a04af75..a84788ba662c 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container)
* enforcing the limit based on the max that the guest can map.
*/
down_write(&current->mm->mmap_sem);
- npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
+ npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
locked = current->mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
@@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container)
down_write(&current->mm->mmap_sem);
current->mm->locked_vm -= (container->tbl->it_size <<
- IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
+ IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
up_write(&current->mm->mmap_sem);
}
@@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data,
if (info.argsz < minsz)
return -EINVAL;
- info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT;
- info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT;
+ info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
+ info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
info.flags = 0;
if (copy_to_user((void __user *)arg, &info, minsz))
@@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data,
VFIO_DMA_MAP_FLAG_WRITE))
return -EINVAL;
- if ((param.size & ~IOMMU_PAGE_MASK) ||
- (param.vaddr & ~IOMMU_PAGE_MASK))
+ if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
+ (param.vaddr & ~IOMMU_PAGE_MASK_4K))
return -EINVAL;
/* iova is checked by the IOMMU API */
@@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data,
if (ret)
return ret;
- for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) {
+ for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
ret = iommu_put_tce_user_mode(tbl,
- (param.iova >> IOMMU_PAGE_SHIFT) + i,
+ (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
tce);
if (ret)
break;
- tce += IOMMU_PAGE_SIZE;
+ tce += IOMMU_PAGE_SIZE_4K;
}
if (ret)
iommu_clear_tces_and_put_pages(tbl,
- param.iova >> IOMMU_PAGE_SHIFT, i);
+ param.iova >> IOMMU_PAGE_SHIFT_4K, i);
iommu_flush_tce(tbl);
@@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data,
if (param.flags)
return -EINVAL;
- if (param.size & ~IOMMU_PAGE_MASK)
+ if (param.size & ~IOMMU_PAGE_MASK_4K)
return -EINVAL;
ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
- param.size >> IOMMU_PAGE_SHIFT);
+ param.size >> IOMMU_PAGE_SHIFT_4K);
if (ret)
return ret;
ret = iommu_clear_tces_and_put_pages(tbl,
- param.iova >> IOMMU_PAGE_SHIFT,
- param.size >> IOMMU_PAGE_SHIFT);
+ param.iova >> IOMMU_PAGE_SHIFT_4K,
+ param.size >> IOMMU_PAGE_SHIFT_4K);
iommu_flush_tce(tbl);
return ret;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 4fb7a8f83c8a..54af4e933695 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn)
if (pfn_valid(pfn)) {
bool reserved;
struct page *tail = pfn_to_page(pfn);
- struct page *head = compound_trans_head(tail);
+ struct page *head = compound_head(tail);
reserved = !!(PageReserved(head));
if (head != tail) {
/*
* "head" is not a dangling pointer
- * (compound_trans_head takes care of that)
+ * (compound_head takes care of that)
* but the hugepage may have been split
* from under us (and we may not hold a
* reference count on the head page so it can
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 831eb4fd197d..e1e22e0f01e8 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -70,7 +70,12 @@ enum {
};
struct vhost_net_ubuf_ref {
- struct kref kref;
+ /* refcount follows semantics similar to kref:
+ * 0: object is released
+ * 1: no outstanding ubufs
+ * >1: outstanding ubufs
+ */
+ atomic_t refcount;
wait_queue_head_t wait;
struct vhost_virtqueue *vq;
};
@@ -116,14 +121,6 @@ static void vhost_net_enable_zcopy(int vq)
vhost_net_zcopy_mask |= 0x1 << vq;
}
-static void vhost_net_zerocopy_done_signal(struct kref *kref)
-{
- struct vhost_net_ubuf_ref *ubufs;
-
- ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
- wake_up(&ubufs->wait);
-}
-
static struct vhost_net_ubuf_ref *
vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
{
@@ -134,21 +131,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
if (!ubufs)
return ERR_PTR(-ENOMEM);
- kref_init(&ubufs->kref);
+ atomic_set(&ubufs->refcount, 1);
init_waitqueue_head(&ubufs->wait);
ubufs->vq = vq;
return ubufs;
}
-static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{
- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
+ int r = atomic_sub_return(1, &ubufs->refcount);
+ if (unlikely(!r))
+ wake_up(&ubufs->wait);
+ return r;
}
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
{
- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
- wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
+ vhost_net_ubuf_put(ubufs);
+ wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
}
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
@@ -306,23 +306,26 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
{
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
- int cnt = atomic_read(&ubufs->kref.refcount);
+ int cnt;
+
+ rcu_read_lock_bh();
/* set len to mark this desc buffers done DMA */
vq->heads[ubuf->desc].len = success ?
VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
- vhost_net_ubuf_put(ubufs);
+ cnt = vhost_net_ubuf_put(ubufs);
/*
* Trigger polling thread if guest stopped submitting new buffers:
- * in this case, the refcount after decrement will eventually reach 1
- * so here it is 2.
+ * in this case, the refcount after decrement will eventually reach 1.
* We also trigger polling periodically after each 16 packets
* (the value 16 here is more or less arbitrary, it's tuned to trigger
* less than 10% of times).
*/
- if (cnt <= 2 || !(cnt % 16))
+ if (cnt <= 1 || !(cnt % 16))
vhost_poll_queue(&vq->poll);
+
+ rcu_read_unlock_bh();
}
/* Expects to be always run from workqueue - which acts as
@@ -420,7 +423,7 @@ static void handle_tx(struct vhost_net *net)
msg.msg_control = ubuf;
msg.msg_controllen = sizeof(ubuf);
ubufs = nvq->ubufs;
- kref_get(&ubufs->kref);
+ atomic_inc(&ubufs->refcount);
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
} else {
msg.msg_control = NULL;
@@ -502,9 +505,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
r = -ENOBUFS;
goto err;
}
- d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+ r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num);
+ if (unlikely(r < 0))
+ goto err;
+
+ d = r;
if (d == vq->num) {
r = 0;
goto err;
@@ -529,6 +536,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
+
+ /* Detect overrun */
+ if (unlikely(datalen > 0)) {
+ r = UIO_MAXIOV + 1;
+ goto err;
+ }
return headcount;
err:
vhost_discard_vq_desc(vq, headcount);
@@ -584,6 +597,14 @@ static void handle_rx(struct vhost_net *net)
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
break;
+ /* On overrun, truncate and discard */
+ if (unlikely(headcount > UIO_MAXIOV)) {
+ msg.msg_iovlen = 1;
+ err = sock->ops->recvmsg(NULL, sock, &msg,
+ 1, MSG_DONTWAIT | MSG_TRUNC);
+ pr_debug("Discarded rx packet: len %zd\n", sock_len);
+ continue;
+ }
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
@@ -683,7 +704,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- int r, i;
+ int i;
if (!n)
return -ENOMEM;
@@ -706,12 +727,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
}
- r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
- if (r < 0) {
- kfree(n);
- kfree(vqs);
- return r;
- }
+ vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
@@ -785,7 +801,7 @@ static void vhost_net_flush(struct vhost_net *n)
vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = false;
- kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
+ atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
}
}
@@ -805,6 +821,8 @@ static int vhost_net_release(struct inode *inode, struct file *f)
fput(tx_sock->file);
if (rx_sock)
fput(rx_sock->file);
+ /* Make sure no callbacks are outstanding */
+ synchronize_rcu_bh();
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_net_flush(n);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index f175629513ed..e48d4a672580 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -728,7 +728,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
}
se_sess = tv_nexus->tvn_se_sess;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0) {
pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
return ERR_PTR(-ENOMEM);
@@ -889,7 +889,7 @@ static void tcm_vhost_submission_work(struct work_struct *work)
cmd->tvc_lun, cmd->tvc_exp_data_len,
cmd->tvc_task_attr, cmd->tvc_data_direction,
TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
- sg_bidi_ptr, sg_no_bidi);
+ sg_bidi_ptr, sg_no_bidi, NULL, 0);
if (rc < 0) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@ -1001,6 +1001,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
break;
}
+ /* virtio-scsi spec requires byte 0 of the lun to be 1 */
+ if (unlikely(v_req.lun[0] != 1)) {
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
+ }
+
/* Extract the tpgt */
target = v_req.lun[1];
tpg = ACCESS_ONCE(vs_tpg[target]);
@@ -1417,18 +1423,13 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
+ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
tcm_vhost_init_inflight(vs, NULL);
- if (r < 0)
- goto err_init;
-
f->private_data = vs;
return 0;
-err_init:
- kfree(vqs);
err_vqs:
vhost_scsi_free(vs);
err_vs:
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 339eae85859a..c2a54fbf7f99 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -104,7 +104,6 @@ static int vhost_test_open(struct inode *inode, struct file *f)
struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- int r;
if (!n)
return -ENOMEM;
@@ -117,12 +116,7 @@ static int vhost_test_open(struct inode *inode, struct file *f)
dev = &n->dev;
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
- r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
- if (r < 0) {
- kfree(vqs);
- kfree(n);
- return r;
- }
+ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
f->private_data = n;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 69068e0d8f31..78987e481bc6 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -290,7 +290,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
vhost_vq_free_iovecs(dev->vqs[i]);
}
-long vhost_dev_init(struct vhost_dev *dev,
+void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs)
{
struct vhost_virtqueue *vq;
@@ -319,8 +319,6 @@ long vhost_dev_init(struct vhost_dev *dev,
vhost_poll_init(&vq->poll, vq->handle_kick,
POLLIN, dev);
}
-
- return 0;
}
EXPORT_SYMBOL_GPL(vhost_dev_init);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 4465ed5f316d..35eeb2a1bada 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -127,7 +127,7 @@ struct vhost_dev {
struct task_struct *worker;
};
-long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 4f2e1b35eb38..b4b209ce8029 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -27,12 +27,6 @@ config VGASTATE
tristate
default n
-config VIDEO_OUTPUT_CONTROL
- tristate "Lowlevel video output switch controls"
- help
- This framework adds support for low-level control of the video
- output switch.
-
config VIDEOMODE_HELPERS
bool
@@ -312,7 +306,8 @@ config FB_PM2_FIFO_DISCONNECT
config FB_ARMCLCD
tristate "ARM PrimeCell PL110 support"
- depends on FB && ARM && ARM_AMBA
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on FB && ARM_AMBA
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -363,7 +358,7 @@ config FB_SA1100
config FB_IMX
tristate "Freescale i.MX1/21/25/27 LCD support"
- depends on FB && IMX_HAVE_PLATFORM_IMX_FB
+ depends on FB && ARCH_MXC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -801,18 +796,9 @@ config FB_HGA
As this card technology is at least 25 years old,
most people will answer N here.
-config FB_SGIVW
- tristate "SGI Visual Workstation framebuffer support"
- depends on FB && X86_VISWS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- SGI Visual Workstation support for framebuffer graphics.
-
config FB_GBE
bool "SGI Graphics Backend frame buffer support"
- depends on (FB = y) && (SGI_IP32 || X86_VISWS)
+ depends on (FB = y) && SGI_IP32
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -979,6 +965,22 @@ config FB_PVR2
(<file:drivers/video/pvr2fb.c>). Please see the file
<file:Documentation/fb/pvr2fb.txt>.
+config FB_OPENCORES
+ tristate "OpenCores VGA/LCD core 2.0 framebuffer support"
+ depends on FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This enables support for the OpenCores VGA/LCD core.
+
+ The OpenCores VGA/LCD core is typically used together with
+ softcore CPUs (e.g. OpenRISC or Microblaze) or hard processor
+ systems (e.g. Altera socfpga or Xilinx Zynq) on FPGAs.
+
+ The source code and specification for the core is available at
+ <http://opencores.org/project,vga_lcd>
+
config FB_S1D13XXX
tristate "Epson S1D13XXX framebuffer support"
depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index e8bae8dd4804..1be26fe10592 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -75,7 +75,6 @@ obj-$(CONFIG_FB_CG14) += cg14.o sbuslib.o
obj-$(CONFIG_FB_P9100) += p9100.o sbuslib.o
obj-$(CONFIG_FB_TCX) += tcx.o sbuslib.o
obj-$(CONFIG_FB_LEO) += leo.o sbuslib.o
-obj-$(CONFIG_FB_SGIVW) += sgivwfb.o
obj-$(CONFIG_FB_ACORN) += acornfb.o
obj-$(CONFIG_FB_ATARI) += atafb.o c2p_iplan2.o atafb_mfb.o \
atafb_iplan2p2.o atafb_iplan2p4.o atafb_iplan2p8.o
@@ -150,6 +149,7 @@ obj-$(CONFIG_FB_NUC900) += nuc900fb.o
obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
obj-$(CONFIG_FB_HYPERV) += hyperv_fb.o
+obj-$(CONFIG_FB_OPENCORES) += ocfb.o
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
@@ -171,8 +171,6 @@ obj-$(CONFIG_FB_SIMPLE) += simplefb.o
# the test framebuffer is last
obj-$(CONFIG_FB_VIRTUAL) += vfb.o
-#video output switch sysfs driver
-obj-$(CONFIG_VIDEO_OUTPUT_CONTROL) += output.o
obj-$(CONFIG_VIDEOMODE_HELPERS) += display_timing.o videomode.o
ifeq ($(CONFIG_OF),y)
obj-$(CONFIG_VIDEOMODE_HELPERS) += of_display_timing.o of_videomode.o
diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c
index d611f1a1ac53..7e8ddf00ccc2 100644
--- a/drivers/video/asiliantfb.c
+++ b/drivers/video/asiliantfb.c
@@ -589,7 +589,6 @@ static void asiliantfb_remove(struct pci_dev *dp)
fb_dealloc_cmap(&p->cmap);
iounmap(p->screen_base);
release_mem_region(pci_resource_start(dp, 0), pci_resource_len(dp, 0));
- pci_set_drvdata(dp, NULL);
framebuffer_release(p);
}
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 12ca031877d4..52108be69e77 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -357,11 +357,13 @@ static int default_lcd_on = 1;
static bool mtrr = true;
#endif
+#ifdef CONFIG_FB_ATY128_BACKLIGHT
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight = 1;
#else
static int backlight = 0;
#endif
+#endif
/* PLL constants */
struct aty128_constants {
@@ -1671,7 +1673,9 @@ static int aty128fb_setup(char *options)
default_crt_on = simple_strtoul(this_opt+4, NULL, 0);
continue;
} else if (!strncmp(this_opt, "backlight:", 10)) {
+#ifdef CONFIG_FB_ATY128_BACKLIGHT
backlight = simple_strtoul(this_opt+10, NULL, 0);
+#endif
continue;
}
#ifdef CONFIG_MTRR
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 00076ecfe9b8..8ea42b8d9bc8 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -110,8 +110,8 @@ static int hp680bl_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = HP680_MAX_INTENSITY;
- bd = backlight_device_register("hp680-bl", &pdev->dev, NULL,
- &hp680bl_ops, &props);
+ bd = devm_backlight_device_register(&pdev->dev, "hp680-bl", &pdev->dev,
+ NULL, &hp680bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
@@ -131,8 +131,6 @@ static int hp680bl_remove(struct platform_device *pdev)
bd->props.power = 0;
hp680bl_send_intensity(bd);
- backlight_device_unregister(bd);
-
return 0;
}
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 3ccb89340f22..6ce96b4a8796 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -115,9 +115,10 @@ static int jornada_bl_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = BL_MAX_BRIGHT;
- bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL,
- &jornada_bl_ops, &props);
+ bd = devm_backlight_device_register(&pdev->dev, S1D_DEVICENAME,
+ &pdev->dev, NULL, &jornada_bl_ops,
+ &props);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
dev_err(&pdev->dev, "failed to register device, err=%x\n", ret);
@@ -139,18 +140,8 @@ static int jornada_bl_probe(struct platform_device *pdev)
return 0;
}
-static int jornada_bl_remove(struct platform_device *pdev)
-{
- struct backlight_device *bd = platform_get_drvdata(pdev);
-
- backlight_device_unregister(bd);
-
- return 0;
-}
-
static struct platform_driver jornada_bl_driver = {
.probe = jornada_bl_probe,
- .remove = jornada_bl_remove,
.driver = {
.name = "jornada_bl",
},
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index b061413f1a65..da3876c9b3ae 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -100,7 +100,8 @@ static int jornada_lcd_probe(struct platform_device *pdev)
struct lcd_device *lcd_device;
int ret;
- lcd_device = lcd_device_register(S1D_DEVICENAME, &pdev->dev, NULL, &jornada_lcd_props);
+ lcd_device = devm_lcd_device_register(&pdev->dev, S1D_DEVICENAME,
+ &pdev->dev, NULL, &jornada_lcd_props);
if (IS_ERR(lcd_device)) {
ret = PTR_ERR(lcd_device);
@@ -119,18 +120,8 @@ static int jornada_lcd_probe(struct platform_device *pdev)
return 0;
}
-static int jornada_lcd_remove(struct platform_device *pdev)
-{
- struct lcd_device *lcd_device = platform_get_drvdata(pdev);
-
- lcd_device_unregister(lcd_device);
-
- return 0;
-}
-
static struct platform_driver jornada_lcd_driver = {
.probe = jornada_lcd_probe,
- .remove = jornada_lcd_remove,
.driver = {
.name = "jornada_lcd",
},
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index 7592cc25c963..84a110a719cb 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
static unsigned long kb3886bl_flags;
#define KB3886BL_SUSPENDED 0x01
-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
+static struct dmi_system_id kb3886bl_device_table[] __initdata = {
{
.ident = "Sahara Touch-iT",
.matches = {
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index b5fc13bc24e7..63e763828e0e 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -223,8 +223,8 @@ static int l4f00242t03_probe(struct spi_device *spi)
return PTR_ERR(priv->core_reg);
}
- priv->ld = lcd_device_register("l4f00242t03",
- &spi->dev, priv, &l4f_ops);
+ priv->ld = devm_lcd_device_register(&spi->dev, "l4f00242t03", &spi->dev,
+ priv, &l4f_ops);
if (IS_ERR(priv->ld))
return PTR_ERR(priv->ld);
@@ -243,8 +243,6 @@ static int l4f00242t03_remove(struct spi_device *spi)
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
- lcd_device_unregister(priv->ld);
-
return 0;
}
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 93cf15efc717..7de847df224f 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -228,7 +228,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
rc = device_register(&new_ld->dev);
if (rc) {
- kfree(new_ld);
+ put_device(&new_ld->dev);
return ERR_PTR(rc);
}
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index cae80d555e84..2ca3a040007b 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -125,7 +125,7 @@ static bool lp855x_is_valid_rom_area(struct lp855x *lp, u8 addr)
return false;
}
- return (addr >= start && addr <= end);
+ return addr >= start && addr <= end;
}
static int lp8557_bl_off(struct lp855x *lp)
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index e49905d495dc..daba34dc46d4 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -63,13 +63,13 @@ static struct lp8788_bl_config default_bl_config = {
static inline bool is_brightness_ctrl_by_pwm(enum lp8788_bl_ctrl_mode mode)
{
- return (mode == LP8788_BL_COMB_PWM_BASED);
+ return mode == LP8788_BL_COMB_PWM_BASED;
}
static inline bool is_brightness_ctrl_by_register(enum lp8788_bl_ctrl_mode mode)
{
- return (mode == LP8788_BL_REGISTER_ONLY ||
- mode == LP8788_BL_COMB_REGISTER_BASED);
+ return mode == LP8788_BL_REGISTER_ONLY ||
+ mode == LP8788_BL_COMB_REGISTER_BASED;
}
static int lp8788_backlight_configure(struct lp8788_bl *bl)
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index ac11a4650c19..a0dcd88ac74f 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -146,8 +146,8 @@ static int omapbl_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = OMAPBL_MAX_INTENSITY;
- dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops,
- &props);
+ dev = devm_backlight_device_register(&pdev->dev, "omap-bl", &pdev->dev,
+ bl, &omapbl_ops, &props);
if (IS_ERR(dev))
return PTR_ERR(dev);
@@ -170,20 +170,10 @@ static int omapbl_probe(struct platform_device *pdev)
return 0;
}
-static int omapbl_remove(struct platform_device *pdev)
-{
- struct backlight_device *dev = platform_get_drvdata(pdev);
-
- backlight_device_unregister(dev);
-
- return 0;
-}
-
static SIMPLE_DEV_PM_OPS(omapbl_pm_ops, omapbl_suspend, omapbl_resume);
static struct platform_driver omapbl_driver = {
.probe = omapbl_probe,
- .remove = omapbl_remove,
.driver = {
.name = "omap-bl",
.pm = &omapbl_pm_ops,
diff --git a/drivers/video/backlight/ot200_bl.c b/drivers/video/backlight/ot200_bl.c
index fdbb6ee5027c..f5a5202dd79d 100644
--- a/drivers/video/backlight/ot200_bl.c
+++ b/drivers/video/backlight/ot200_bl.c
@@ -118,8 +118,9 @@ static int ot200_backlight_probe(struct platform_device *pdev)
props.brightness = 100;
props.type = BACKLIGHT_RAW;
- bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, data,
- &ot200_backlight_ops, &props);
+ bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev),
+ &pdev->dev, data, &ot200_backlight_ops,
+ &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
retval = PTR_ERR(bl);
@@ -137,10 +138,6 @@ error_devm_kzalloc:
static int ot200_backlight_remove(struct platform_device *pdev)
{
- struct backlight_device *bl = platform_get_drvdata(pdev);
-
- backlight_device_unregister(bl);
-
/* on module unload set brightness to 100% */
cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0);
cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index fb80d68f4d33..b75201ff46f6 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -241,7 +241,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb = devm_kzalloc(&pdev->dev, sizeof(*pb), GFP_KERNEL);
if (!pb) {
- dev_err(&pdev->dev, "no memory for state\n");
ret = -ENOMEM;
goto err_alloc;
}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index b8db9338cacd..3ad676558c80 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -105,8 +105,9 @@ static int tosa_bl_probe(struct i2c_client *client,
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 512 - 1;
- data->bl = backlight_device_register("tosa-bl", &client->dev, data,
- &bl_ops, &props);
+ data->bl = devm_backlight_device_register(&client->dev, "tosa-bl",
+ &client->dev, data, &bl_ops,
+ &props);
if (IS_ERR(data->bl)) {
ret = PTR_ERR(data->bl);
goto err_reg;
@@ -128,9 +129,7 @@ static int tosa_bl_remove(struct i2c_client *client)
{
struct tosa_bl_data *data = i2c_get_clientdata(client);
- backlight_device_unregister(data->bl);
data->bl = NULL;
-
return 0;
}
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index be5d636764bf..f08d641ccd01 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -206,8 +206,8 @@ static int tosa_lcd_probe(struct spi_device *spi)
tosa_lcd_tg_on(data);
- data->lcd = lcd_device_register("tosa-lcd", &spi->dev, data,
- &tosa_lcd_ops);
+ data->lcd = devm_lcd_device_register(&spi->dev, "tosa-lcd", &spi->dev,
+ data, &tosa_lcd_ops);
if (IS_ERR(data->lcd)) {
ret = PTR_ERR(data->lcd);
@@ -226,8 +226,6 @@ static int tosa_lcd_remove(struct spi_device *spi)
{
struct tosa_lcd_data *data = spi_get_drvdata(spi);
- lcd_device_unregister(data->lcd);
-
if (data->i2c)
i2c_unregister_device(data->i2c);
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 846caab75a46..fe1cd0148e13 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -8,7 +8,8 @@ config VGA_CONSOLE
bool "VGA text console" if EXPERT || !X86
depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \
!SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \
- (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER)
+ (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
+ !ARM64
default y
help
Saying Y here will allow you to use Linux in text mode through a
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index cd8a8027f8ae..4e39291ac8b4 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3547,8 +3547,10 @@ static void fbcon_exit(void)
"no"));
for (j = first_fb_vc; j <= last_fb_vc; j++) {
- if (con2fb_map[j] == i)
+ if (con2fb_map[j] == i) {
mapped = 1;
+ break;
+ }
}
if (mapped) {
@@ -3561,6 +3563,7 @@ static void fbcon_exit(void)
fbcon_del_cursor_timer(info);
kfree(ops->cursor_src);
+ kfree(ops->cursor_state.mask);
kfree(info->fbcon_par);
info->fbcon_par = NULL;
}
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 4ad24f2c6472..cecd3de01c24 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -488,7 +488,7 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
return 0;
}
-#ifdef CONFIG_FONTS
+#ifdef CONFIG_FONT_SUPPORT
static struct sti_cooked_font *
sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
{
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig
index 1129d0e9e640..75c8a8e7efc0 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/exynos/Kconfig
@@ -22,7 +22,8 @@ config EXYNOS_MIPI_DSI
config EXYNOS_LCD_S6E8AX0
bool "S6E8AX0 MIPI AMOLED LCD Driver"
- depends on (EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE)
+ depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE
+ depends on (LCD_CLASS_DEVICE = y)
default n
help
If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 010d19105ebc..7309ac704e26 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1577,10 +1577,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena,
static int do_unregister_framebuffer(struct fb_info *fb_info);
#define VGA_FB_PHYS 0xA0000
-static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
+static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
{
- int i;
+ int i, ret;
/* check all firmware fbs and kick off if the base addr overlaps */
for (i = 0 ; i < FB_MAX; i++) {
@@ -1599,22 +1599,29 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
printk(KERN_INFO "fb: conflicting fb hw usage "
"%s vs %s - removing generic driver\n",
name, registered_fb[i]->fix.id);
- do_unregister_framebuffer(registered_fb[i]);
+ ret = do_unregister_framebuffer(registered_fb[i]);
+ if (ret)
+ return ret;
}
}
+
+ return 0;
}
static int do_register_framebuffer(struct fb_info *fb_info)
{
- int i;
+ int i, ret;
struct fb_event event;
struct fb_videomode mode;
if (fb_check_foreignness(fb_info))
return -ENOSYS;
- do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
- fb_is_primary_device(fb_info));
+ ret = do_remove_conflicting_framebuffers(fb_info->apertures,
+ fb_info->fix.id,
+ fb_is_primary_device(fb_info));
+ if (ret)
+ return ret;
if (num_registered_fb == FB_MAX)
return -ENXIO;
@@ -1739,12 +1746,16 @@ int unlink_framebuffer(struct fb_info *fb_info)
}
EXPORT_SYMBOL(unlink_framebuffer);
-void remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
+int remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
{
+ int ret;
+
mutex_lock(&registration_lock);
- do_remove_conflicting_framebuffers(a, name, primary);
+ ret = do_remove_conflicting_framebuffers(a, name, primary);
mutex_unlock(&registration_lock);
+
+ return ret;
}
EXPORT_SYMBOL(remove_conflicting_framebuffers);
@@ -1930,6 +1941,9 @@ int fb_get_options(const char *name, char **option)
options = opt + name_len + 1;
}
}
+ /* No match, pass global option */
+ if (!options && option && fb_mode_option)
+ options = kstrdup(fb_mode_option, GFP_KERNEL);
if (options && !strncmp(options, "off", 3))
retval = 1;
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index 4c7cb368a9dc..3ec65a878ac8 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -45,10 +45,6 @@ struct gbefb_par {
#define GBE_BASE 0x16000000 /* SGI O2 */
#endif
-#ifdef CONFIG_X86_VISWS
-#define GBE_BASE 0xd0000000 /* SGI Visual Workstation */
-#endif
-
/* macro for fastest write-though access to the framebuffer */
#ifdef CONFIG_MIPS
#ifdef CONFIG_CPU_R10000
diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
index 130708f96430..e23392ec5af3 100644
--- a/drivers/video/hyperv_fb.c
+++ b/drivers/video/hyperv_fb.c
@@ -42,6 +42,7 @@
#include <linux/completion.h>
#include <linux/fb.h>
#include <linux/pci.h>
+#include <linux/efi.h>
#include <linux/hyperv.h>
@@ -212,6 +213,7 @@ struct synthvid_msg {
struct hvfb_par {
struct fb_info *info;
+ struct resource mem;
bool fb_ready; /* fb device is ready */
struct completion wait;
u32 synthvid_version;
@@ -460,13 +462,13 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
goto error;
}
- if (par->synthvid_version == SYNTHVID_VERSION_WIN7) {
+ if (par->synthvid_version == SYNTHVID_VERSION_WIN7)
screen_depth = SYNTHVID_DEPTH_WIN7;
- screen_fb_size = SYNTHVID_FB_SIZE_WIN7;
- } else {
+ else
screen_depth = SYNTHVID_DEPTH_WIN8;
- screen_fb_size = SYNTHVID_FB_SIZE_WIN8;
- }
+
+ screen_fb_size = hdev->channel->offermsg.offer.
+ mmio_megabytes * 1024 * 1024;
return 0;
@@ -627,26 +629,46 @@ static void hvfb_get_option(struct fb_info *info)
/* Get framebuffer memory from Hyper-V video pci space */
static int hvfb_getmem(struct fb_info *info)
{
- struct pci_dev *pdev;
- ulong fb_phys;
+ struct hvfb_par *par = info->par;
+ struct pci_dev *pdev = NULL;
void __iomem *fb_virt;
+ int gen2vm = efi_enabled(EFI_BOOT);
+ int ret;
- pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ par->mem.name = KBUILD_MODNAME;
+ par->mem.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (gen2vm) {
+ ret = allocate_resource(&hyperv_mmio, &par->mem,
+ screen_fb_size,
+ 0, -1,
+ screen_fb_size,
+ NULL, NULL);
+ if (ret != 0) {
+ pr_err("Unable to allocate framebuffer memory\n");
+ return -ENODEV;
+ }
+ } else {
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
- if (!pdev) {
- pr_err("Unable to find PCI Hyper-V video\n");
- return -ENODEV;
- }
+ if (!pdev) {
+ pr_err("Unable to find PCI Hyper-V video\n");
+ return -ENODEV;
+ }
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) < screen_fb_size)
- goto err1;
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+ pci_resource_len(pdev, 0) < screen_fb_size)
+ goto err1;
- fb_phys = pci_resource_end(pdev, 0) - screen_fb_size + 1;
- if (!request_mem_region(fb_phys, screen_fb_size, KBUILD_MODNAME))
- goto err1;
+ par->mem.end = pci_resource_end(pdev, 0);
+ par->mem.start = par->mem.end - screen_fb_size + 1;
+ ret = request_resource(&pdev->resource[0], &par->mem);
+ if (ret != 0) {
+ pr_err("Unable to request framebuffer memory\n");
+ goto err1;
+ }
+ }
- fb_virt = ioremap(fb_phys, screen_fb_size);
+ fb_virt = ioremap(par->mem.start, screen_fb_size);
if (!fb_virt)
goto err2;
@@ -654,30 +676,44 @@ static int hvfb_getmem(struct fb_info *info)
if (!info->apertures)
goto err3;
- info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
- info->fix.smem_start = fb_phys;
+ if (gen2vm) {
+ info->apertures->ranges[0].base = screen_info.lfb_base;
+ info->apertures->ranges[0].size = screen_info.lfb_size;
+ remove_conflicting_framebuffers(info->apertures,
+ KBUILD_MODNAME, false);
+ } else {
+ info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
+ }
+
+ info->fix.smem_start = par->mem.start;
info->fix.smem_len = screen_fb_size;
info->screen_base = fb_virt;
info->screen_size = screen_fb_size;
- pci_dev_put(pdev);
+ if (!gen2vm)
+ pci_dev_put(pdev);
+
return 0;
err3:
iounmap(fb_virt);
err2:
- release_mem_region(fb_phys, screen_fb_size);
+ release_resource(&par->mem);
err1:
- pci_dev_put(pdev);
+ if (!gen2vm)
+ pci_dev_put(pdev);
+
return -ENOMEM;
}
/* Release the framebuffer */
static void hvfb_putmem(struct fb_info *info)
{
+ struct hvfb_par *par = info->par;
+
iounmap(info->screen_base);
- release_mem_region(info->fix.smem_start, screen_fb_size);
+ release_resource(&par->mem);
}
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 038192ac7369..bb674e431741 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -2011,9 +2011,7 @@ static int i810fb_init_pci(struct pci_dev *dev,
struct fb_info *info;
struct i810fb_par *par = NULL;
struct fb_videomode mode;
- int i, err = -1, vfreq, hfreq, pixclock;
-
- i = 0;
+ int err = -1, vfreq, hfreq, pixclock;
info = framebuffer_alloc(sizeof(struct i810fb_par), &dev->dev);
if (!info)
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 39ac49e0682c..0037104d66ac 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -54,7 +54,7 @@ config LOGO_PARISC_CLUT224
config LOGO_SGI_CLUT224
bool "224-color SGI Linux logo"
- depends on SGI_IP22 || SGI_IP27 || SGI_IP32 || X86_VISWS
+ depends on SGI_IP22 || SGI_IP27 || SGI_IP32
default y
config LOGO_SUN_CLUT224
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 080c35b34bbb..940cd196eef5 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -17,10 +17,6 @@
#include <asm/setup.h>
#endif
-#ifdef CONFIG_MIPS
-#include <asm/bootinfo.h>
-#endif
-
static bool nologo;
module_param(nologo, bool, 0);
MODULE_PARM_DESC(nologo, "Disables startup logo");
@@ -85,7 +81,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
logo = &logo_parisc_clut224;
#endif
#ifdef CONFIG_LOGO_SGI_CLUT224
- /* SGI Linux logo on MIPS/MIPS64 and VISWS */
+ /* SGI Linux logo on MIPS/MIPS64 */
logo = &logo_sgi_clut224;
#endif
#ifdef CONFIG_LOGO_SUN_CLUT224
diff --git a/drivers/video/mmp/core.c b/drivers/video/mmp/core.c
index 84de2632857a..b563b920f159 100644
--- a/drivers/video/mmp/core.c
+++ b/drivers/video/mmp/core.c
@@ -30,7 +30,7 @@ static struct mmp_overlay *path_get_overlay(struct mmp_path *path,
{
if (path && overlay_id < path->overlay_num)
return &path->overlays[overlay_id];
- return 0;
+ return NULL;
}
static int path_check_status(struct mmp_path *path)
@@ -173,7 +173,7 @@ struct mmp_path *mmp_register_path(struct mmp_path_info *info)
+ sizeof(struct mmp_overlay) * info->overlay_num;
path = kzalloc(size, GFP_KERNEL);
if (!path)
- goto failed;
+ return NULL;
/* path set */
mutex_init(&path->access_ok);
@@ -219,11 +219,6 @@ struct mmp_path *mmp_register_path(struct mmp_path_info *info)
mutex_unlock(&disp_lock);
return path;
-
-failed:
- kfree(path);
- mutex_unlock(&disp_lock);
- return NULL;
}
EXPORT_SYMBOL_GPL(mmp_register_path);
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 804f874d32d3..142e860fb527 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1263,7 +1263,7 @@ static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len,
fbi->screen_base = dma_alloc_writecombine(fbi->device,
mem_len,
- &addr, GFP_DMA);
+ &addr, GFP_DMA | GFP_KERNEL);
if (!fbi->screen_base) {
dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 27197a8048c0..accf48a2cce4 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -49,6 +49,7 @@
#include <linux/fb.h>
#include <linux/regulator/consumer.h>
#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
#include <video/videomode.h>
#define REG_SET 4
@@ -297,7 +298,7 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var,
}
break;
default:
- pr_debug("Unsupported colour depth: %u\n", var->bits_per_pixel);
+ pr_err("Unsupported colour depth: %u\n", var->bits_per_pixel);
return -EINVAL;
}
@@ -426,7 +427,7 @@ static int mxsfb_set_par(struct fb_info *fb_info)
ctrl |= CTRL_SET_WORD_LENGTH(3);
switch (host->ld_intf_width) {
case STMLCDIF_8BIT:
- dev_dbg(&host->pdev->dev,
+ dev_err(&host->pdev->dev,
"Unsupported LCD bus width mapping\n");
return -EINVAL;
case STMLCDIF_16BIT:
@@ -439,7 +440,7 @@ static int mxsfb_set_par(struct fb_info *fb_info)
writel(CTRL1_SET_BYTE_PACKAGING(0x7), host->base + LCDC_CTRL1);
break;
default:
- dev_dbg(&host->pdev->dev, "Unhandled color depth of %u\n",
+ dev_err(&host->pdev->dev, "Unhandled color depth of %u\n",
fb_info->var.bits_per_pixel);
return -EINVAL;
}
@@ -589,7 +590,8 @@ static struct fb_ops mxsfb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int mxsfb_restore_mode(struct mxsfb_info *host)
+static int mxsfb_restore_mode(struct mxsfb_info *host,
+ struct fb_videomode *vmode)
{
struct fb_info *fb_info = &host->fb_info;
unsigned line_count;
@@ -597,7 +599,6 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
unsigned long pa, fbsize;
int bits_per_pixel, ofs;
u32 transfer_count, vdctrl0, vdctrl2, vdctrl3, vdctrl4, ctrl;
- struct fb_videomode vmode;
/* Only restore the mode when the controller is running */
ctrl = readl(host->base + LCDC_CTRL);
@@ -611,8 +612,8 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
transfer_count = readl(host->base + host->devdata->transfer_count);
- vmode.xres = TRANSFER_COUNT_GET_HCOUNT(transfer_count);
- vmode.yres = TRANSFER_COUNT_GET_VCOUNT(transfer_count);
+ vmode->xres = TRANSFER_COUNT_GET_HCOUNT(transfer_count);
+ vmode->yres = TRANSFER_COUNT_GET_VCOUNT(transfer_count);
switch (CTRL_GET_WORD_LENGTH(ctrl)) {
case 0:
@@ -628,40 +629,39 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
fb_info->var.bits_per_pixel = bits_per_pixel;
- vmode.pixclock = KHZ2PICOS(clk_get_rate(host->clk) / 1000U);
- vmode.hsync_len = get_hsync_pulse_width(host, vdctrl2);
- vmode.left_margin = GET_HOR_WAIT_CNT(vdctrl3) - vmode.hsync_len;
- vmode.right_margin = VDCTRL2_GET_HSYNC_PERIOD(vdctrl2) - vmode.hsync_len -
- vmode.left_margin - vmode.xres;
- vmode.vsync_len = VDCTRL0_GET_VSYNC_PULSE_WIDTH(vdctrl0);
+ vmode->pixclock = KHZ2PICOS(clk_get_rate(host->clk) / 1000U);
+ vmode->hsync_len = get_hsync_pulse_width(host, vdctrl2);
+ vmode->left_margin = GET_HOR_WAIT_CNT(vdctrl3) - vmode->hsync_len;
+ vmode->right_margin = VDCTRL2_GET_HSYNC_PERIOD(vdctrl2) -
+ vmode->hsync_len - vmode->left_margin - vmode->xres;
+ vmode->vsync_len = VDCTRL0_GET_VSYNC_PULSE_WIDTH(vdctrl0);
period = readl(host->base + LCDC_VDCTRL1);
- vmode.upper_margin = GET_VERT_WAIT_CNT(vdctrl3) - vmode.vsync_len;
- vmode.lower_margin = period - vmode.vsync_len - vmode.upper_margin - vmode.yres;
+ vmode->upper_margin = GET_VERT_WAIT_CNT(vdctrl3) - vmode->vsync_len;
+ vmode->lower_margin = period - vmode->vsync_len -
+ vmode->upper_margin - vmode->yres;
- vmode.vmode = FB_VMODE_NONINTERLACED;
+ vmode->vmode = FB_VMODE_NONINTERLACED;
- vmode.sync = 0;
+ vmode->sync = 0;
if (vdctrl0 & VDCTRL0_HSYNC_ACT_HIGH)
- vmode.sync |= FB_SYNC_HOR_HIGH_ACT;
+ vmode->sync |= FB_SYNC_HOR_HIGH_ACT;
if (vdctrl0 & VDCTRL0_VSYNC_ACT_HIGH)
- vmode.sync |= FB_SYNC_VERT_HIGH_ACT;
+ vmode->sync |= FB_SYNC_VERT_HIGH_ACT;
pr_debug("Reconstructed video mode:\n");
pr_debug("%dx%d, hsync: %u left: %u, right: %u, vsync: %u, upper: %u, lower: %u\n",
- vmode.xres, vmode.yres,
- vmode.hsync_len, vmode.left_margin, vmode.right_margin,
- vmode.vsync_len, vmode.upper_margin, vmode.lower_margin);
- pr_debug("pixclk: %ldkHz\n", PICOS2KHZ(vmode.pixclock));
-
- fb_add_videomode(&vmode, &fb_info->modelist);
+ vmode->xres, vmode->yres, vmode->hsync_len, vmode->left_margin,
+ vmode->right_margin, vmode->vsync_len, vmode->upper_margin,
+ vmode->lower_margin);
+ pr_debug("pixclk: %ldkHz\n", PICOS2KHZ(vmode->pixclock));
host->ld_intf_width = CTRL_GET_BUS_WIDTH(ctrl);
host->dotclk_delay = VDCTRL4_GET_DOTCLK_DLY(vdctrl4);
- fb_info->fix.line_length = vmode.xres * (bits_per_pixel >> 3);
+ fb_info->fix.line_length = vmode->xres * (bits_per_pixel >> 3);
pa = readl(host->base + host->devdata->cur_buf);
- fbsize = fb_info->fix.line_length * vmode.yres;
+ fbsize = fb_info->fix.line_length * vmode->yres;
if (pa < fb_info->fix.smem_start)
return -EINVAL;
if (pa + fbsize > fb_info->fix.smem_start + fb_info->fix.smem_len)
@@ -681,18 +681,17 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
return 0;
}
-static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host)
+static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host,
+ struct fb_videomode *vmode)
{
struct fb_info *fb_info = &host->fb_info;
struct fb_var_screeninfo *var = &fb_info->var;
struct device *dev = &host->pdev->dev;
struct device_node *np = host->pdev->dev.of_node;
struct device_node *display_np;
- struct device_node *timings_np;
- struct display_timings *timings;
+ struct videomode vm;
u32 width;
- int i;
- int ret = 0;
+ int ret;
display_np = of_parse_phandle(np, "display", 0);
if (!display_np) {
@@ -732,54 +731,35 @@ static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host)
goto put_display_node;
}
- timings = of_get_display_timings(display_np);
- if (!timings) {
- dev_err(dev, "failed to get display timings\n");
- ret = -ENOENT;
+ ret = of_get_videomode(display_np, &vm, OF_USE_NATIVE_MODE);
+ if (ret) {
+ dev_err(dev, "failed to get videomode from DT\n");
goto put_display_node;
}
- timings_np = of_find_node_by_name(display_np,
- "display-timings");
- if (!timings_np) {
- dev_err(dev, "failed to find display-timings node\n");
- ret = -ENOENT;
+ ret = fb_videomode_from_videomode(&vm, vmode);
+ if (ret < 0)
goto put_display_node;
- }
- for (i = 0; i < of_get_child_count(timings_np); i++) {
- struct videomode vm;
- struct fb_videomode fb_vm;
-
- ret = videomode_from_timings(timings, &vm, i);
- if (ret < 0)
- goto put_timings_node;
- ret = fb_videomode_from_videomode(&vm, &fb_vm);
- if (ret < 0)
- goto put_timings_node;
-
- if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
- host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
- if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
- host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
- fb_add_videomode(&fb_vm, &fb_info->modelist);
- }
+ if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
+ host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
+ if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+ host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
-put_timings_node:
- of_node_put(timings_np);
put_display_node:
of_node_put(display_np);
return ret;
}
-static int mxsfb_init_fbinfo(struct mxsfb_info *host)
+static int mxsfb_init_fbinfo(struct mxsfb_info *host,
+ struct fb_videomode *vmode)
{
+ int ret;
struct fb_info *fb_info = &host->fb_info;
struct fb_var_screeninfo *var = &fb_info->var;
dma_addr_t fb_phys;
void *fb_virt;
unsigned fb_size;
- int ret;
fb_info->fbops = &mxsfb_ops;
fb_info->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST;
@@ -789,7 +769,7 @@ static int mxsfb_init_fbinfo(struct mxsfb_info *host)
fb_info->fix.visual = FB_VISUAL_TRUECOLOR,
fb_info->fix.accel = FB_ACCEL_NONE;
- ret = mxsfb_init_fbinfo_dt(host);
+ ret = mxsfb_init_fbinfo_dt(host, vmode);
if (ret)
return ret;
@@ -810,7 +790,7 @@ static int mxsfb_init_fbinfo(struct mxsfb_info *host)
fb_info->screen_base = fb_virt;
fb_info->screen_size = fb_info->fix.smem_len = fb_size;
- if (mxsfb_restore_mode(host))
+ if (mxsfb_restore_mode(host, vmode))
memset(fb_virt, 0, fb_size);
return 0;
@@ -850,7 +830,7 @@ static int mxsfb_probe(struct platform_device *pdev)
struct resource *res;
struct mxsfb_info *host;
struct fb_info *fb_info;
- struct fb_modelist *modelist;
+ struct fb_videomode *mode;
int ret;
if (of_id)
@@ -862,6 +842,11 @@ static int mxsfb_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ mode = devm_kzalloc(&pdev->dev, sizeof(struct fb_videomode),
+ GFP_KERNEL);
+ if (mode == NULL)
+ return -ENOMEM;
+
host = to_imxfb_host(fb_info);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -893,15 +878,11 @@ static int mxsfb_probe(struct platform_device *pdev)
goto fb_release;
}
- INIT_LIST_HEAD(&fb_info->modelist);
-
- ret = mxsfb_init_fbinfo(host);
+ ret = mxsfb_init_fbinfo(host, mode);
if (ret != 0)
goto fb_release;
- modelist = list_first_entry(&fb_info->modelist,
- struct fb_modelist, list);
- fb_videomode_to_var(&fb_info->var, &modelist->mode);
+ fb_videomode_to_var(&fb_info->var, mode);
/* init the color fields */
mxsfb_check_var(&fb_info->var, fb_info);
@@ -927,7 +908,6 @@ static int mxsfb_probe(struct platform_device *pdev)
fb_destroy:
if (host->enabled)
clk_disable_unprepare(host->clk);
- fb_destroy_modelist(&fb_info->modelist);
fb_release:
framebuffer_release(fb_info);
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index ff228713425e..def041204676 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1461,7 +1461,6 @@ static void nvidiafb_remove(struct pci_dev *pd)
pci_release_regions(pd);
kfree(info->pixmap.addr);
framebuffer_release(info);
- pci_set_drvdata(pd, NULL);
NVTRACE_LEAVE();
}
diff --git a/drivers/video/ocfb.c b/drivers/video/ocfb.c
new file mode 100644
index 000000000000..7f9dc9bec309
--- /dev/null
+++ b/drivers/video/ocfb.c
@@ -0,0 +1,440 @@
+/*
+ * OpenCores VGA/LCD 2.0 core frame buffer driver
+ *
+ * Copyright (C) 2013 Stefan Kristiansson, stefan.kristiansson@saunalahti.fi
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+/* OCFB register defines */
+#define OCFB_CTRL 0x000
+#define OCFB_STAT 0x004
+#define OCFB_HTIM 0x008
+#define OCFB_VTIM 0x00c
+#define OCFB_HVLEN 0x010
+#define OCFB_VBARA 0x014
+#define OCFB_PALETTE 0x800
+
+#define OCFB_CTRL_VEN 0x00000001 /* Video Enable */
+#define OCFB_CTRL_HIE 0x00000002 /* HSync Interrupt Enable */
+#define OCFB_CTRL_PC 0x00000800 /* 8-bit Pseudo Color Enable*/
+#define OCFB_CTRL_CD8 0x00000000 /* Color Depth 8 */
+#define OCFB_CTRL_CD16 0x00000200 /* Color Depth 16 */
+#define OCFB_CTRL_CD24 0x00000400 /* Color Depth 24 */
+#define OCFB_CTRL_CD32 0x00000600 /* Color Depth 32 */
+#define OCFB_CTRL_VBL1 0x00000000 /* Burst Length 1 */
+#define OCFB_CTRL_VBL2 0x00000080 /* Burst Length 2 */
+#define OCFB_CTRL_VBL4 0x00000100 /* Burst Length 4 */
+#define OCFB_CTRL_VBL8 0x00000180 /* Burst Length 8 */
+
+#define PALETTE_SIZE 256
+
+#define OCFB_NAME "OC VGA/LCD"
+
+static char *mode_option;
+
+static const struct fb_videomode default_mode = {
+ /* 640x480 @ 60 Hz, 31.5 kHz hsync */
+ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
+ 0, FB_VMODE_NONINTERLACED
+};
+
+struct ocfb_dev {
+ struct fb_info info;
+ void __iomem *regs;
+ /* flag indicating whether the regs are little endian accessed */
+ int little_endian;
+ /* Physical and virtual addresses of framebuffer */
+ phys_addr_t fb_phys;
+ void __iomem *fb_virt;
+ u32 pseudo_palette[PALETTE_SIZE];
+};
+
+#ifndef MODULE
+static int __init ocfb_setup(char *options)
+{
+ char *curr_opt;
+
+ if (!options || !*options)
+ return 0;
+
+ while ((curr_opt = strsep(&options, ",")) != NULL) {
+ if (!*curr_opt)
+ continue;
+ mode_option = curr_opt;
+ }
+
+ return 0;
+}
+#endif
+
+static inline u32 ocfb_readreg(struct ocfb_dev *fbdev, loff_t offset)
+{
+ if (fbdev->little_endian)
+ return ioread32(fbdev->regs + offset);
+ else
+ return ioread32be(fbdev->regs + offset);
+}
+
+static void ocfb_writereg(struct ocfb_dev *fbdev, loff_t offset, u32 data)
+{
+ if (fbdev->little_endian)
+ iowrite32(data, fbdev->regs + offset);
+ else
+ iowrite32be(data, fbdev->regs + offset);
+}
+
+static int ocfb_setupfb(struct ocfb_dev *fbdev)
+{
+ unsigned long bpp_config;
+ struct fb_var_screeninfo *var = &fbdev->info.var;
+ struct device *dev = fbdev->info.device;
+ u32 hlen;
+ u32 vlen;
+
+ /* Disable display */
+ ocfb_writereg(fbdev, OCFB_CTRL, 0);
+
+ /* Register framebuffer address */
+ fbdev->little_endian = 0;
+ ocfb_writereg(fbdev, OCFB_VBARA, fbdev->fb_phys);
+
+ /* Detect endianess */
+ if (ocfb_readreg(fbdev, OCFB_VBARA) != fbdev->fb_phys) {
+ fbdev->little_endian = 1;
+ ocfb_writereg(fbdev, OCFB_VBARA, fbdev->fb_phys);
+ }
+
+ /* Horizontal timings */
+ ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 |
+ (var->right_margin - 1) << 16 | (var->xres - 1));
+
+ /* Vertical timings */
+ ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 |
+ (var->lower_margin - 1) << 16 | (var->yres - 1));
+
+ /* Total length of frame */
+ hlen = var->left_margin + var->right_margin + var->hsync_len +
+ var->xres;
+
+ vlen = var->upper_margin + var->lower_margin + var->vsync_len +
+ var->yres;
+
+ ocfb_writereg(fbdev, OCFB_HVLEN, (hlen - 1) << 16 | (vlen - 1));
+
+ bpp_config = OCFB_CTRL_CD8;
+ switch (var->bits_per_pixel) {
+ case 8:
+ if (!var->grayscale)
+ bpp_config |= OCFB_CTRL_PC; /* enable palette */
+ break;
+
+ case 16:
+ bpp_config |= OCFB_CTRL_CD16;
+ break;
+
+ case 24:
+ bpp_config |= OCFB_CTRL_CD24;
+ break;
+
+ case 32:
+ bpp_config |= OCFB_CTRL_CD32;
+ break;
+
+ default:
+ dev_err(dev, "no bpp specified\n");
+ break;
+ }
+
+ /* maximum (8) VBL (video memory burst length) */
+ bpp_config |= OCFB_CTRL_VBL8;
+
+ /* Enable output */
+ ocfb_writereg(fbdev, OCFB_CTRL, (OCFB_CTRL_VEN | bpp_config));
+
+ return 0;
+}
+
+static int ocfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct ocfb_dev *fbdev = (struct ocfb_dev *)info->par;
+ u32 color;
+
+ if (regno >= info->cmap.len) {
+ dev_err(info->device, "regno >= cmap.len\n");
+ return 1;
+ }
+
+ if (info->var.grayscale) {
+ /* grayscale = 0.30*R + 0.59*G + 0.11*B */
+ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
+ }
+
+ red >>= (16 - info->var.red.length);
+ green >>= (16 - info->var.green.length);
+ blue >>= (16 - info->var.blue.length);
+ transp >>= (16 - info->var.transp.length);
+
+ if (info->var.bits_per_pixel == 8 && !info->var.grayscale) {
+ regno <<= 2;
+ color = (red << 16) | (green << 8) | blue;
+ ocfb_writereg(fbdev, OCFB_PALETTE + regno, color);
+ } else {
+ ((u32 *)(info->pseudo_palette))[regno] =
+ (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+ }
+
+ return 0;
+}
+
+static int ocfb_init_fix(struct ocfb_dev *fbdev)
+{
+ struct fb_var_screeninfo *var = &fbdev->info.var;
+ struct fb_fix_screeninfo *fix = &fbdev->info.fix;
+
+ strcpy(fix->id, OCFB_NAME);
+
+ fix->line_length = var->xres * var->bits_per_pixel/8;
+ fix->smem_len = fix->line_length * var->yres;
+ fix->type = FB_TYPE_PACKED_PIXELS;
+
+ if (var->bits_per_pixel == 8 && !var->grayscale)
+ fix->visual = FB_VISUAL_PSEUDOCOLOR;
+ else
+ fix->visual = FB_VISUAL_TRUECOLOR;
+
+ return 0;
+}
+
+static int ocfb_init_var(struct ocfb_dev *fbdev)
+{
+ struct fb_var_screeninfo *var = &fbdev->info.var;
+
+ var->accel_flags = FB_ACCEL_NONE;
+ var->activate = FB_ACTIVATE_NOW;
+ var->xres_virtual = var->xres;
+ var->yres_virtual = var->yres;
+
+ switch (var->bits_per_pixel) {
+ case 8:
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 0;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ break;
+
+ case 16:
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ break;
+
+ case 24:
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ break;
+
+ case 32:
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ break;
+ }
+
+ return 0;
+}
+
+static struct fb_ops ocfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_setcolreg = ocfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int ocfb_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ocfb_dev *fbdev;
+ struct resource *res;
+ int fbsize;
+
+ fbdev = devm_kzalloc(&pdev->dev, sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, fbdev);
+
+ fbdev->info.fbops = &ocfb_ops;
+ fbdev->info.device = &pdev->dev;
+ fbdev->info.par = fbdev;
+
+ /* Video mode setup */
+ if (!fb_find_mode(&fbdev->info.var, &fbdev->info, mode_option,
+ NULL, 0, &default_mode, 16)) {
+ dev_err(&pdev->dev, "No valid video modes found\n");
+ return -EINVAL;
+ }
+ ocfb_init_var(fbdev);
+ ocfb_init_fix(fbdev);
+
+ /* Request I/O resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "I/O resource request failed\n");
+ return -ENXIO;
+ }
+ res->flags &= ~IORESOURCE_CACHEABLE;
+ fbdev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fbdev->regs))
+ return PTR_ERR(fbdev->regs);
+
+ /* Allocate framebuffer memory */
+ fbsize = fbdev->info.fix.smem_len;
+ fbdev->fb_virt = dma_alloc_coherent(&pdev->dev, PAGE_ALIGN(fbsize),
+ &fbdev->fb_phys, GFP_KERNEL);
+ if (!fbdev->fb_virt) {
+ dev_err(&pdev->dev,
+ "Frame buffer memory allocation failed\n");
+ return -ENOMEM;
+ }
+ fbdev->info.fix.smem_start = fbdev->fb_phys;
+ fbdev->info.screen_base = fbdev->fb_virt;
+ fbdev->info.pseudo_palette = fbdev->pseudo_palette;
+
+ /* Clear framebuffer */
+ memset_io(fbdev->fb_virt, 0, fbsize);
+
+ /* Setup and enable the framebuffer */
+ ocfb_setupfb(fbdev);
+
+ if (fbdev->little_endian)
+ fbdev->info.flags |= FBINFO_FOREIGN_ENDIAN;
+
+ /* Allocate color map */
+ ret = fb_alloc_cmap(&fbdev->info.cmap, PALETTE_SIZE, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Color map allocation failed\n");
+ goto err_dma_free;
+ }
+
+ /* Register framebuffer */
+ ret = register_framebuffer(&fbdev->info);
+ if (ret) {
+ dev_err(&pdev->dev, "Framebuffer registration failed\n");
+ goto err_dealloc_cmap;
+ }
+
+ return 0;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&fbdev->info.cmap);
+
+err_dma_free:
+ dma_free_coherent(&pdev->dev, PAGE_ALIGN(fbsize), fbdev->fb_virt,
+ fbdev->fb_phys);
+
+ return ret;
+}
+
+static int ocfb_remove(struct platform_device *pdev)
+{
+ struct ocfb_dev *fbdev = platform_get_drvdata(pdev);
+
+ unregister_framebuffer(&fbdev->info);
+ fb_dealloc_cmap(&fbdev->info.cmap);
+ dma_free_coherent(&pdev->dev, PAGE_ALIGN(fbdev->info.fix.smem_len),
+ fbdev->fb_virt, fbdev->fb_phys);
+
+ /* Disable display */
+ ocfb_writereg(fbdev, OCFB_CTRL, 0);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct of_device_id ocfb_match[] = {
+ { .compatible = "opencores,ocfb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ocfb_match);
+
+static struct platform_driver ocfb_driver = {
+ .probe = ocfb_probe,
+ .remove = ocfb_remove,
+ .driver = {
+ .name = "ocfb_fb",
+ .of_match_table = ocfb_match,
+ }
+};
+
+/*
+ * Init and exit routines
+ */
+static int __init ocfb_init(void)
+{
+#ifndef MODULE
+ char *option = NULL;
+
+ if (fb_get_options("ocfb", &option))
+ return -ENODEV;
+ ocfb_setup(option);
+#endif
+ return platform_driver_register(&ocfb_driver);
+}
+
+static void __exit ocfb_exit(void)
+{
+ platform_driver_unregister(&ocfb_driver);
+}
+
+module_init(ocfb_init);
+module_exit(ocfb_exit);
+
+MODULE_AUTHOR("Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>");
+MODULE_DESCRIPTION("OpenCores VGA/LCD 2.0 frame buffer driver");
+MODULE_LICENSE("GPL v2");
+module_param(mode_option, charp, 0);
+MODULE_PARM_DESC(mode_option, "Video mode ('<xres>x<yres>[-<bpp>][@refresh]')");
diff --git a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
index d94f35dbd536..8e97d06921ff 100644
--- a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
@@ -346,28 +346,22 @@ static int acx565akm_get_actual_brightness(struct panel_drv_data *ddata)
static int acx565akm_bl_update_status(struct backlight_device *dev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- int r;
int level;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- mutex_lock(&ddata->mutex);
-
if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
dev->props.power == FB_BLANK_UNBLANK)
level = dev->props.brightness;
else
level = 0;
- r = 0;
if (ddata->has_bc)
acx565akm_set_brightness(ddata, level);
else
- r = -ENODEV;
-
- mutex_unlock(&ddata->mutex);
+ return -ENODEV;
- return r;
+ return 0;
}
static int acx565akm_bl_get_intensity(struct backlight_device *dev)
@@ -390,9 +384,33 @@ static int acx565akm_bl_get_intensity(struct backlight_device *dev)
return 0;
}
+static int acx565akm_bl_update_status_locked(struct backlight_device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
+ int r;
+
+ mutex_lock(&ddata->mutex);
+ r = acx565akm_bl_update_status(dev);
+ mutex_unlock(&ddata->mutex);
+
+ return r;
+}
+
+static int acx565akm_bl_get_intensity_locked(struct backlight_device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
+ int r;
+
+ mutex_lock(&ddata->mutex);
+ r = acx565akm_bl_get_intensity(dev);
+ mutex_unlock(&ddata->mutex);
+
+ return r;
+}
+
static const struct backlight_ops acx565akm_bl_ops = {
- .get_brightness = acx565akm_bl_get_intensity,
- .update_status = acx565akm_bl_update_status,
+ .get_brightness = acx565akm_bl_get_intensity_locked,
+ .update_status = acx565akm_bl_update_status_locked,
};
/*--------------------Auto Brightness control via Sysfs---------------------*/
@@ -526,8 +544,6 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
struct omap_dss_device *in = ddata->in;
int r;
- mutex_lock(&ddata->mutex);
-
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
in->ops.sdi->set_timings(in, &ddata->videomode);
@@ -568,8 +584,6 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
set_display_state(ddata, 1);
set_cabc_mode(ddata, ddata->cabc_mode);
- mutex_unlock(&ddata->mutex);
-
return acx565akm_bl_update_status(ddata->bl_dev);
}
@@ -616,7 +630,9 @@ static int acx565akm_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
+ mutex_lock(&ddata->mutex);
r = acx565akm_panel_power_on(dssdev);
+ mutex_unlock(&ddata->mutex);
if (r)
return r;
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index 60758dbefd79..0a0b084ce65d 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -149,6 +149,9 @@ static void apply_init_priv(void)
op = &dss_data.ovl_priv_data_array[i];
+ op->info.color_mode = OMAP_DSS_COLOR_RGB16;
+ op->info.rotation_type = OMAP_DSS_ROT_DMA;
+
op->info.global_alpha = 255;
switch (i) {
@@ -629,7 +632,7 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
struct mgr_priv_data *mp;
int r;
- DSSDBG("writing ovl %d regs", ovl->id);
+ DSSDBG("writing ovl %d regs\n", ovl->id);
if (!op->enabled || !op->info_dirty)
return;
@@ -664,7 +667,7 @@ static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
struct ovl_priv_data *op = get_ovl_priv(ovl);
struct mgr_priv_data *mp;
- DSSDBG("writing ovl %d regs extra", ovl->id);
+ DSSDBG("writing ovl %d regs extra\n", ovl->id);
if (!op->extra_info_dirty)
return;
@@ -687,7 +690,7 @@ static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
struct mgr_priv_data *mp = get_mgr_priv(mgr);
struct omap_overlay *ovl;
- DSSDBG("writing mgr %d regs", mgr->id);
+ DSSDBG("writing mgr %d regs\n", mgr->id);
if (!mp->enabled)
return;
@@ -713,7 +716,7 @@ static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
- DSSDBG("writing mgr %d regs extra", mgr->id);
+ DSSDBG("writing mgr %d regs extra\n", mgr->id);
if (!mp->extra_info_dirty)
return;
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 4ec59ca72e5d..77d6221618f4 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -90,6 +90,8 @@ struct dispc_features {
/* revert to the OMAP4 mechanism of DISPC Smart Standby operation */
bool mstandby_workaround:1;
+
+ bool set_max_preload:1;
};
#define DISPC_MAX_NR_FIFOS 5
@@ -1200,7 +1202,17 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high)
dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
FLD_VAL(high, hi_start, hi_end) |
FLD_VAL(low, lo_start, lo_end));
+
+ /*
+ * configure the preload to the pipeline's high threhold, if HT it's too
+ * large for the preload field, set the threshold to the maximum value
+ * that can be held by the preload register
+ */
+ if (dss_has_feature(FEAT_PRELOAD) && dispc.feat->set_max_preload &&
+ plane != OMAP_DSS_WB)
+ dispc_write_reg(DISPC_OVL_PRELOAD(plane), min(high, 0xfffu));
}
+EXPORT_SYMBOL(dispc_ovl_set_fifo_threshold);
void dispc_enable_fifomerge(bool enable)
{
@@ -1259,6 +1271,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
*fifo_high = total_fifo_size - buf_unit;
}
}
+EXPORT_SYMBOL(dispc_ovl_compute_fifo_thresholds);
static void dispc_ovl_set_fir(enum omap_plane plane,
int hinc, int vinc,
@@ -1988,7 +2001,8 @@ static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
*/
static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *t, u16 pos_x,
- u16 width, u16 height, u16 out_width, u16 out_height)
+ u16 width, u16 height, u16 out_width, u16 out_height,
+ bool five_taps)
{
const int ds = DIV_ROUND_UP(height, out_height);
unsigned long nonactive;
@@ -2008,6 +2022,10 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
if (blank <= limits[i])
return -EINVAL;
+ /* FIXME add checks for 3-tap filter once the limitations are known */
+ if (!five_taps)
+ return 0;
+
/*
* Pixel data should be prepared before visible display point starts.
* So, atleast DS-2 lines must have already been fetched by DISPC
@@ -2142,8 +2160,8 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
*five_taps = false;
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
*core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height, mem_to_mem);
error = (in_width > maxsinglelinewidth || !*core_clk ||
@@ -2181,24 +2199,32 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
- *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
- in_width, in_height, out_width, out_height, color_mode);
-
- error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
- pos_x, in_width, in_height, out_width,
- out_height);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
+ *five_taps = in_height > out_height;
if (in_width > maxsinglelinewidth)
if (in_height > out_height &&
in_height < out_height * 2)
*five_taps = false;
- if (!*five_taps)
+again:
+ if (*five_taps)
+ *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
+ in_width, in_height, out_width,
+ out_height, color_mode);
+ else
*core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height,
mem_to_mem);
+ error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
+ pos_x, in_width, in_height, out_width,
+ out_height, *five_taps);
+ if (error && *five_taps) {
+ *five_taps = false;
+ goto again;
+ }
+
error = (error || in_width > maxsinglelinewidth * 2 ||
(in_width > maxsinglelinewidth && *five_taps) ||
!*core_clk || *core_clk > dispc_core_clk_rate());
@@ -2215,7 +2241,7 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
} while (*decim_x <= *x_predecim && *decim_y <= *y_predecim && error);
if (check_horiz_timing_omap3(pclk, lclk, mgr_timings, pos_x, width,
- height, out_width, out_height)){
+ height, out_width, out_height, *five_taps)) {
DSSERR("horizontal timing too tight\n");
return -EINVAL;
}
@@ -2242,7 +2268,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
{
u16 in_width, in_width_max;
int decim_x_min = *decim_x;
- u16 in_height = DIV_ROUND_UP(height, *decim_y);
+ u16 in_height = height / *decim_y;
const int maxsinglelinewidth =
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
@@ -2261,7 +2287,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
return -EINVAL;
do {
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_width = width / *decim_x;
} while (*decim_x <= *x_predecim &&
in_width > maxsinglelinewidth && ++*decim_x);
@@ -2440,8 +2466,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (r)
return r;
- in_width = DIV_ROUND_UP(in_width, x_predecim);
- in_height = DIV_ROUND_UP(in_height, y_predecim);
+ in_width = in_width / x_predecim;
+ in_height = in_height / y_predecim;
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY ||
@@ -3211,6 +3237,8 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_CONTROL3);
DUMPREG(DISPC_CONFIG3);
}
+ if (dss_has_feature(FEAT_MFLAG))
+ DUMPREG(DISPC_GLOBAL_MFLAG_ATTRIBUTE);
#undef DUMPREG
@@ -3285,6 +3313,8 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
if (dss_has_feature(FEAT_PRELOAD))
DUMPREG(i, DISPC_OVL_PRELOAD);
+ if (dss_has_feature(FEAT_MFLAG))
+ DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
}
#undef DISPC_REG
@@ -3520,6 +3550,7 @@ static const struct dispc_features omap24xx_dispc_feats __initconst = {
.calc_core_clk = calc_core_clk_24xx,
.num_fifos = 3,
.no_framedone_tv = true,
+ .set_max_preload = false,
};
static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
@@ -3539,6 +3570,7 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
.no_framedone_tv = true,
+ .set_max_preload = false,
};
static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
@@ -3558,6 +3590,7 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
.no_framedone_tv = true,
+ .set_max_preload = false,
};
static const struct dispc_features omap44xx_dispc_feats __initconst = {
@@ -3577,6 +3610,7 @@ static const struct dispc_features omap44xx_dispc_feats __initconst = {
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
.gfx_fifo_workaround = true,
+ .set_max_preload = true,
};
static const struct dispc_features omap54xx_dispc_feats __initconst = {
@@ -3597,6 +3631,7 @@ static const struct dispc_features omap54xx_dispc_feats __initconst = {
.num_fifos = 5,
.gfx_fifo_workaround = true,
.mstandby_workaround = true,
+ .set_max_preload = true,
};
static int __init dispc_init_features(struct platform_device *pdev)
@@ -3691,7 +3726,6 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_irq_safe(&pdev->dev);
r = dispc_runtime_get();
if (r)
@@ -3734,6 +3768,8 @@ static int dispc_runtime_suspend(struct device *dev)
static int dispc_runtime_resume(struct device *dev)
{
+ _omap_dispc_initial_config();
+
dispc_restore_context();
return 0;
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index de4863d21ab7..78edb449c763 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -40,6 +40,7 @@
#define DISPC_CONTROL3 0x0848
#define DISPC_CONFIG3 0x084C
#define DISPC_MSTANDBY_CTRL 0x0858
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C
/* DISPC overlay registers */
#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \
@@ -100,6 +101,8 @@
DISPC_FIR_COEF_V2_OFFSET(n, i))
#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \
DISPC_PRELOAD_OFFSET(n))
+#define DISPC_OVL_MFLAG_THRESHOLD(n) (DISPC_OVL_BASE(n) + \
+ DISPC_MFLAG_THRESHOLD_OFFSET(n))
/* DISPC up/downsampling FIR filter coefficient structure */
struct dispc_coef {
@@ -894,4 +897,21 @@ static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane)
return 0;
}
}
+
+static inline u16 DISPC_MFLAG_THRESHOLD_OFFSET(enum omap_plane plane)
+{
+ switch (plane) {
+ case OMAP_DSS_GFX:
+ return 0x0860;
+ case OMAP_DSS_VIDEO1:
+ return 0x0864;
+ case OMAP_DSS_VIDEO2:
+ return 0x0868;
+ case OMAP_DSS_VIDEO3:
+ return 0x086c;
+ default:
+ BUG();
+ return 0;
+ }
+}
#endif
diff --git a/drivers/video/omap2/dss/display-sysfs.c b/drivers/video/omap2/dss/display-sysfs.c
index 21d7f77df702..f7b5f9561041 100644
--- a/drivers/video/omap2/dss/display-sysfs.c
+++ b/drivers/video/omap2/dss/display-sysfs.c
@@ -277,7 +277,7 @@ static ssize_t display_wss_store(struct device *dev,
return size;
}
-static DEVICE_ATTR(name, S_IRUGO, display_name_show, NULL);
+static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL);
static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
display_enabled_show, display_enabled_store);
static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
@@ -292,7 +292,7 @@ static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
display_wss_show, display_wss_store);
static const struct attribute *display_sysfs_attrs[] = {
- &dev_attr_name.attr,
+ &dev_attr_display_name.attr,
&dev_attr_enabled.attr,
&dev_attr_tear_elim.attr,
&dev_attr_timings.attr,
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index bd48cde53561..23ef21ffc2c4 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -117,7 +117,7 @@ struct dpi_clk_calc_ctx {
/* outputs */
struct dsi_clock_info dsi_cinfo;
- struct dss_clock_info dss_cinfo;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
@@ -184,12 +184,11 @@ static bool dpi_calc_pll_cb(int regn, int regm, unsigned long fint,
dpi_calc_hsdiv_cb, ctx);
}
-static bool dpi_calc_dss_cb(int fckd, unsigned long fck, void *data)
+static bool dpi_calc_dss_cb(unsigned long fck, void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
- ctx->dss_cinfo.fck = fck;
- ctx->dss_cinfo.fck_div = fckd;
+ ctx->fck = fck;
return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
@@ -237,7 +236,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
ctx->pck_min = 0;
ctx->pck_max = pck + 1000 * i * i * i;
- ok = dss_div_calc(ctx->pck_min, dpi_calc_dss_cb, ctx);
+ ok = dss_div_calc(pck, ctx->pck_min, dpi_calc_dss_cb, ctx);
if (ok)
return ok;
}
@@ -286,13 +285,13 @@ static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck,
if (!ok)
return -EINVAL;
- r = dss_set_clock_div(&ctx.dss_cinfo);
+ r = dss_set_fck_rate(ctx.fck);
if (r)
return r;
dpi.mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.dss_cinfo.fck;
+ *fck = ctx.fck;
*lck_div = ctx.dispc_cinfo.lck_div;
*pck_div = ctx.dispc_cinfo.pck_div;
@@ -495,7 +494,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
if (!ok)
return -EINVAL;
- fck = ctx.dss_cinfo.fck;
+ fck = ctx.fck;
}
lck_div = ctx.dispc_cinfo.lck_div;
@@ -551,7 +550,8 @@ static int dpi_init_regulator(void)
vdds_dsi = devm_regulator_get(&dpi.pdev->dev, "vdds_dsi");
if (IS_ERR(vdds_dsi)) {
- DSSERR("can't get VDDS_DSI regulator\n");
+ if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
+ DSSERR("can't get VDDS_DSI regulator\n");
return PTR_ERR(vdds_dsi);
}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 6056b27cf73c..a820c37e323e 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -47,63 +47,73 @@
#define DSI_CATCH_MISSING_TE
-struct dsi_reg { u16 idx; };
+struct dsi_reg { u16 module; u16 idx; };
-#define DSI_REG(idx) ((const struct dsi_reg) { idx })
+#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx })
-#define DSI_SZ_REGS SZ_1K
/* DSI Protocol Engine */
-#define DSI_REVISION DSI_REG(0x0000)
-#define DSI_SYSCONFIG DSI_REG(0x0010)
-#define DSI_SYSSTATUS DSI_REG(0x0014)
-#define DSI_IRQSTATUS DSI_REG(0x0018)
-#define DSI_IRQENABLE DSI_REG(0x001C)
-#define DSI_CTRL DSI_REG(0x0040)
-#define DSI_GNQ DSI_REG(0x0044)
-#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
-#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
-#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
-#define DSI_CLK_CTRL DSI_REG(0x0054)
-#define DSI_TIMING1 DSI_REG(0x0058)
-#define DSI_TIMING2 DSI_REG(0x005C)
-#define DSI_VM_TIMING1 DSI_REG(0x0060)
-#define DSI_VM_TIMING2 DSI_REG(0x0064)
-#define DSI_VM_TIMING3 DSI_REG(0x0068)
-#define DSI_CLK_TIMING DSI_REG(0x006C)
-#define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
-#define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
-#define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
-#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
-#define DSI_VM_TIMING4 DSI_REG(0x0080)
-#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
-#define DSI_VM_TIMING5 DSI_REG(0x0088)
-#define DSI_VM_TIMING6 DSI_REG(0x008C)
-#define DSI_VM_TIMING7 DSI_REG(0x0090)
-#define DSI_STOPCLK_TIMING DSI_REG(0x0094)
-#define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
-#define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
-#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
-#define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
-#define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
+#define DSI_PROTO 0
+#define DSI_PROTO_SZ 0x200
+
+#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000)
+#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010)
+#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014)
+#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018)
+#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C)
+#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040)
+#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044)
+#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048)
+#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C)
+#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050)
+#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054)
+#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058)
+#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C)
+#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060)
+#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064)
+#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068)
+#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C)
+#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070)
+#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074)
+#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078)
+#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C)
+#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080)
+#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084)
+#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088)
+#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C)
+#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090)
+#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094)
+#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
+#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
+#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
+#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
+#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
/* DSIPHY_SCP */
-#define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
-#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
-#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
-#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
-#define DSI_DSIPHY_CFG10 DSI_REG(0x200 + 0x0028)
+#define DSI_PHY 1
+#define DSI_PHY_OFFSET 0x200
+#define DSI_PHY_SZ 0x40
+
+#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000)
+#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004)
+#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008)
+#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014)
+#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028)
/* DSI_PLL_CTRL_SCP */
-#define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
-#define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
-#define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
-#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
-#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
+#define DSI_PLL 2
+#define DSI_PLL_OFFSET 0x300
+#define DSI_PLL_SZ 0x20
+
+#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000)
+#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004)
+#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008)
+#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
+#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
#define REG_GET(dsidev, idx, start, end) \
FLD_GET(dsi_read_reg(dsidev, idx), start, end)
@@ -277,7 +287,9 @@ struct dsi_clk_calc_ctx {
struct dsi_data {
struct platform_device *pdev;
- void __iomem *base;
+ void __iomem *proto_base;
+ void __iomem *phy_base;
+ void __iomem *pll_base;
int module_id;
@@ -297,7 +309,8 @@ struct dsi_data {
struct {
enum dsi_vc_source source;
struct omap_dss_device *dssdev;
- enum fifo_size fifo_size;
+ enum fifo_size tx_fifo_size;
+ enum fifo_size rx_fifo_size;
int vc_id;
} vc[4];
@@ -413,16 +426,32 @@ static inline void dsi_write_reg(struct platform_device *dsidev,
const struct dsi_reg idx, u32 val)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ void __iomem *base;
+
+ switch(idx.module) {
+ case DSI_PROTO: base = dsi->proto_base; break;
+ case DSI_PHY: base = dsi->phy_base; break;
+ case DSI_PLL: base = dsi->pll_base; break;
+ default: return;
+ }
- __raw_writel(val, dsi->base + idx.idx);
+ __raw_writel(val, base + idx.idx);
}
static inline u32 dsi_read_reg(struct platform_device *dsidev,
const struct dsi_reg idx)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ void __iomem *base;
- return __raw_readl(dsi->base + idx.idx);
+ switch(idx.module) {
+ case DSI_PROTO: base = dsi->proto_base; break;
+ case DSI_PHY: base = dsi->phy_base; break;
+ case DSI_PLL: base = dsi->pll_base; break;
+ default: return 0;
+ }
+
+ return __raw_readl(base + idx.idx);
}
static void dsi_bus_lock(struct omap_dss_device *dssdev)
@@ -1129,7 +1158,8 @@ static int dsi_regulator_init(struct platform_device *dsidev)
vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "VCXIO");
if (IS_ERR(vdds_dsi)) {
- DSSERR("can't get VDDS_DSI regulator\n");
+ if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
+ DSSERR("can't get VDDS_DSI regulator\n");
return PTR_ERR(vdds_dsi);
}
@@ -2427,14 +2457,14 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
int add = 0;
int i;
- dsi->vc[0].fifo_size = size1;
- dsi->vc[1].fifo_size = size2;
- dsi->vc[2].fifo_size = size3;
- dsi->vc[3].fifo_size = size4;
+ dsi->vc[0].tx_fifo_size = size1;
+ dsi->vc[1].tx_fifo_size = size2;
+ dsi->vc[2].tx_fifo_size = size3;
+ dsi->vc[3].tx_fifo_size = size4;
for (i = 0; i < 4; i++) {
u8 v;
- int size = dsi->vc[i].fifo_size;
+ int size = dsi->vc[i].tx_fifo_size;
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
@@ -2460,14 +2490,14 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
int add = 0;
int i;
- dsi->vc[0].fifo_size = size1;
- dsi->vc[1].fifo_size = size2;
- dsi->vc[2].fifo_size = size3;
- dsi->vc[3].fifo_size = size4;
+ dsi->vc[0].rx_fifo_size = size1;
+ dsi->vc[1].rx_fifo_size = size2;
+ dsi->vc[2].rx_fifo_size = size3;
+ dsi->vc[3].rx_fifo_size = size4;
for (i = 0; i < 4; i++) {
u8 v;
- int size = dsi->vc[i].fifo_size;
+ int size = dsi->vc[i].rx_fifo_size;
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
@@ -2920,7 +2950,7 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
DSSDBG("dsi_vc_send_long, %d bytes\n", len);
/* len + header */
- if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
+ if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) {
DSSERR("unable to send long packet: packet too long.\n");
return -EINVAL;
}
@@ -5345,8 +5375,9 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
{
u32 rev;
int r, i;
- struct resource *dsi_mem;
struct dsi_data *dsi;
+ struct resource *res;
+ struct resource temp_res;
dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
@@ -5376,16 +5407,64 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
dsi->te_timer.function = dsi_te_timeout;
dsi->te_timer.data = 0;
#endif
- dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0);
- if (!dsi_mem) {
- DSSERR("can't get IORESOURCE_MEM DSI\n");
- return -EINVAL;
+
+ res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
+ if (!res) {
+ res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get IORESOURCE_MEM DSI\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start;
+ temp_res.end = temp_res.start + DSI_PROTO_SZ - 1;
+ res = &temp_res;
+ }
+
+ dsi->proto_base = devm_ioremap(&dsidev->dev, res->start,
+ resource_size(res));
+ if (!dsi->proto_base) {
+ DSSERR("can't ioremap DSI protocol engine\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "phy");
+ if (!res) {
+ res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get IORESOURCE_MEM DSI\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start + DSI_PHY_OFFSET;
+ temp_res.end = temp_res.start + DSI_PHY_SZ - 1;
+ res = &temp_res;
+ }
+
+ dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
+ resource_size(res));
+ if (!dsi->proto_base) {
+ DSSERR("can't ioremap DSI PHY\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "pll");
+ if (!res) {
+ res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DSSERR("can't get IORESOURCE_MEM DSI\n");
+ return -EINVAL;
+ }
+
+ temp_res.start = res->start + DSI_PLL_OFFSET;
+ temp_res.end = temp_res.start + DSI_PLL_SZ - 1;
+ res = &temp_res;
}
- dsi->base = devm_ioremap(&dsidev->dev, dsi_mem->start,
- resource_size(dsi_mem));
- if (!dsi->base) {
- DSSERR("can't ioremap DSI\n");
+ dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
+ resource_size(res));
+ if (!dsi->proto_base) {
+ DSSERR("can't ioremap DSI PLL\n");
return -ENOMEM;
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index bd01608e67e2..9a145da35ad3 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -67,7 +67,7 @@ static void dss_runtime_put(void);
struct dss_features {
u8 fck_div_max;
u8 dss_fck_multiplier;
- const char *clk_name;
+ const char *parent_clk_name;
int (*dpi_select_source)(enum omap_channel channel);
};
@@ -75,13 +75,12 @@ static struct {
struct platform_device *pdev;
void __iomem *base;
- struct clk *dpll4_m4_ck;
+ struct clk *parent_clk;
struct clk *dss_clk;
unsigned long dss_clk_rate;
unsigned long cache_req_pck;
unsigned long cache_prate;
- struct dss_clock_info cache_dss_cinfo;
struct dispc_clock_info cache_dispc_cinfo;
enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI];
@@ -265,8 +264,6 @@ const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
void dss_dump_clocks(struct seq_file *s)
{
- unsigned long dpll4_ck_rate;
- unsigned long dpll4_m4_ck_rate;
const char *fclk_name, *fclk_real_name;
unsigned long fclk_rate;
@@ -279,21 +276,9 @@ void dss_dump_clocks(struct seq_file *s)
fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
fclk_rate = clk_get_rate(dss.dss_clk);
- if (dss.dpll4_m4_ck) {
- dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
-
- seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
-
- seq_printf(s, "%s (%s) = %lu / %lu * %d = %lu\n",
- fclk_name, fclk_real_name, dpll4_ck_rate,
- dpll4_ck_rate / dpll4_m4_ck_rate,
- dss.feat->dss_fck_multiplier, fclk_rate);
- } else {
- seq_printf(s, "%s (%s) = %lu\n",
- fclk_name, fclk_real_name,
- fclk_rate);
- }
+ seq_printf(s, "%s (%s) = %lu\n",
+ fclk_name, fclk_real_name,
+ fclk_rate);
dss_runtime_put();
}
@@ -451,30 +436,8 @@ enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
}
}
-/* calculate clock rates using dividers in cinfo */
-int dss_calc_clock_rates(struct dss_clock_info *cinfo)
-{
- if (dss.dpll4_m4_ck) {
- unsigned long prate;
-
- if (cinfo->fck_div > dss.feat->fck_div_max ||
- cinfo->fck_div == 0)
- return -EINVAL;
-
- prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
-
- cinfo->fck = prate / cinfo->fck_div *
- dss.feat->dss_fck_multiplier;
- } else {
- if (cinfo->fck_div != 0)
- return -EINVAL;
- cinfo->fck = clk_get_rate(dss.dss_clk);
- }
-
- return 0;
-}
-
-bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data)
+bool dss_div_calc(unsigned long pck, unsigned long fck_min,
+ dss_div_calc_func func, void *data)
{
int fckd, fckd_start, fckd_stop;
unsigned long fck;
@@ -483,22 +446,24 @@ bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data)
unsigned long prate;
unsigned m;
- if (dss.dpll4_m4_ck == NULL) {
- /*
- * TODO: dss1_fclk can be changed on OMAP2, but the available
- * dividers are not continuous. We just use the pre-set rate for
- * now.
- */
- fck = clk_get_rate(dss.dss_clk);
- fckd = 1;
- return func(fckd, fck, data);
+ fck_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+
+ if (dss.parent_clk == NULL) {
+ unsigned pckd;
+
+ pckd = fck_hw_max / pck;
+
+ fck = pck * pckd;
+
+ fck = clk_round_rate(dss.dss_clk, fck);
+
+ return func(fck, data);
}
- fck_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
fckd_hw_max = dss.feat->fck_div_max;
m = dss.feat->dss_fck_multiplier;
- prate = dss_get_dpll4_rate();
+ prate = clk_get_rate(dss.parent_clk);
fck_min = fck_min ? fck_min : 1;
@@ -508,50 +473,32 @@ bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data)
for (fckd = fckd_start; fckd >= fckd_stop; --fckd) {
fck = prate / fckd * m;
- if (func(fckd, fck, data))
+ if (func(fck, data))
return true;
}
return false;
}
-int dss_set_clock_div(struct dss_clock_info *cinfo)
+int dss_set_fck_rate(unsigned long rate)
{
- if (dss.dpll4_m4_ck) {
- unsigned long prate;
- int r;
+ int r;
- prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- DSSDBG("dpll4_m4 = %ld\n", prate);
+ DSSDBG("set fck to %lu\n", rate);
- r = clk_set_rate(dss.dpll4_m4_ck,
- DIV_ROUND_UP(prate, cinfo->fck_div));
- if (r)
- return r;
- } else {
- if (cinfo->fck_div != 0)
- return -EINVAL;
- }
+ r = clk_set_rate(dss.dss_clk, rate);
+ if (r)
+ return r;
dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
- WARN_ONCE(dss.dss_clk_rate != cinfo->fck,
+ WARN_ONCE(dss.dss_clk_rate != rate,
"clk rate mismatch: %lu != %lu", dss.dss_clk_rate,
- cinfo->fck);
-
- DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
+ rate);
return 0;
}
-unsigned long dss_get_dpll4_rate(void)
-{
- if (dss.dpll4_m4_ck)
- return clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- else
- return 0;
-}
-
unsigned long dss_get_dispc_clk_rate(void)
{
return dss.dss_clk_rate;
@@ -560,27 +507,23 @@ unsigned long dss_get_dispc_clk_rate(void)
static int dss_setup_default_clock(void)
{
unsigned long max_dss_fck, prate;
+ unsigned long fck;
unsigned fck_div;
- struct dss_clock_info dss_cinfo = { 0 };
int r;
- if (dss.dpll4_m4_ck == NULL)
- return 0;
-
max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
- prate = dss_get_dpll4_rate();
-
- fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier,
- max_dss_fck);
-
- dss_cinfo.fck_div = fck_div;
+ if (dss.parent_clk == NULL) {
+ fck = clk_round_rate(dss.dss_clk, max_dss_fck);
+ } else {
+ prate = clk_get_rate(dss.parent_clk);
- r = dss_calc_clock_rates(&dss_cinfo);
- if (r)
- return r;
+ fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier,
+ max_dss_fck);
+ fck = prate / fck_div * dss.feat->dss_fck_multiplier;
+ }
- r = dss_set_clock_div(&dss_cinfo);
+ r = dss_set_fck_rate(fck);
if (r)
return r;
@@ -706,25 +649,25 @@ static int dss_get_clocks(void)
dss.dss_clk = clk;
- if (dss.feat->clk_name) {
- clk = clk_get(NULL, dss.feat->clk_name);
+ if (dss.feat->parent_clk_name) {
+ clk = clk_get(NULL, dss.feat->parent_clk_name);
if (IS_ERR(clk)) {
- DSSERR("Failed to get %s\n", dss.feat->clk_name);
+ DSSERR("Failed to get %s\n", dss.feat->parent_clk_name);
return PTR_ERR(clk);
}
} else {
clk = NULL;
}
- dss.dpll4_m4_ck = clk;
+ dss.parent_clk = clk;
return 0;
}
static void dss_put_clocks(void)
{
- if (dss.dpll4_m4_ck)
- clk_put(dss.dpll4_m4_ck);
+ if (dss.parent_clk)
+ clk_put(dss.parent_clk);
}
static int dss_runtime_get(void)
@@ -761,37 +704,41 @@ void dss_debug_dump_clocks(struct seq_file *s)
#endif
static const struct dss_features omap24xx_dss_feats __initconst = {
- .fck_div_max = 16,
+ /*
+ * fck div max is really 16, but the divider range has gaps. The range
+ * from 1 to 6 has no gaps, so let's use that as a max.
+ */
+ .fck_div_max = 6,
.dss_fck_multiplier = 2,
- .clk_name = NULL,
+ .parent_clk_name = "core_ck",
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
};
static const struct dss_features omap34xx_dss_feats __initconst = {
.fck_div_max = 16,
.dss_fck_multiplier = 2,
- .clk_name = "dpll4_m4_ck",
+ .parent_clk_name = "dpll4_ck",
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
};
static const struct dss_features omap3630_dss_feats __initconst = {
.fck_div_max = 32,
.dss_fck_multiplier = 1,
- .clk_name = "dpll4_m4_ck",
+ .parent_clk_name = "dpll4_ck",
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
};
static const struct dss_features omap44xx_dss_feats __initconst = {
.fck_div_max = 32,
.dss_fck_multiplier = 1,
- .clk_name = "dpll_per_m5x2_ck",
+ .parent_clk_name = "dpll_per_x2_ck",
.dpi_select_source = &dss_dpi_select_source_omap4,
};
static const struct dss_features omap54xx_dss_feats __initconst = {
.fck_div_max = 64,
.dss_fck_multiplier = 1,
- .clk_name = "dpll_per_h12x2_ck",
+ .parent_clk_name = "dpll_per_x2_ck",
.dpi_select_source = &dss_dpi_select_source_omap5,
};
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index f538e867c0f8..057f24c8a332 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -100,14 +100,6 @@ enum dss_writeback_channel {
DSS_WB_LCD3_MGR = 7,
};
-struct dss_clock_info {
- /* rates that we get with dividers below */
- unsigned long fck;
-
- /* dividers */
- u16 fck_div;
-};
-
struct dispc_clock_info {
/* rates that we get with dividers below */
unsigned long lck;
@@ -250,12 +242,11 @@ enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
void dss_set_venc_output(enum omap_dss_venc_type type);
void dss_set_dac_pwrdn_bgz(bool enable);
-unsigned long dss_get_dpll4_rate(void);
-int dss_calc_clock_rates(struct dss_clock_info *cinfo);
-int dss_set_clock_div(struct dss_clock_info *cinfo);
+int dss_set_fck_rate(unsigned long rate);
-typedef bool (*dss_div_calc_func)(int fckd, unsigned long fck, void *data);
-bool dss_div_calc(unsigned long fck_min, dss_div_calc_func func, void *data);
+typedef bool (*dss_div_calc_func)(unsigned long fck, void *data);
+bool dss_div_calc(unsigned long pck, unsigned long fck_min,
+ dss_div_calc_func func, void *data);
/* SDI */
int sdi_init_platform_driver(void) __init;
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index f8fd6dbacabc..7f8969191dc6 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -613,6 +613,7 @@ static const enum dss_feat_id omap5_dss_feat_list[] = {
FEAT_DSI_PLL_SELFREQDCO,
FEAT_DSI_PLL_REFSEL,
FEAT_DSI_PHY_DCC,
+ FEAT_MFLAG,
};
/* OMAP2 DSS Features */
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 10b0556e1352..e3ef3b714896 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -64,6 +64,7 @@ enum dss_feat_id {
FEAT_DSI_PLL_SELFREQDCO,
FEAT_DSI_PLL_REFSEL,
FEAT_DSI_PHY_DCC,
+ FEAT_MFLAG,
};
/* DSS register field id */
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h
index b0493768a5d7..e25681ff5a70 100644
--- a/drivers/video/omap2/dss/hdmi.h
+++ b/drivers/video/omap2/dss/hdmi.h
@@ -41,14 +41,14 @@
#define HDMI_WP_VIDEO_SIZE 0x60
#define HDMI_WP_VIDEO_TIMING_H 0x68
#define HDMI_WP_VIDEO_TIMING_V 0x6C
-#define HDMI_WP_WP_CLK 0x70
+#define HDMI_WP_CLK 0x70
#define HDMI_WP_AUDIO_CFG 0x80
#define HDMI_WP_AUDIO_CFG2 0x84
#define HDMI_WP_AUDIO_CTRL 0x88
#define HDMI_WP_AUDIO_DATA 0x8C
/* HDMI WP IRQ flags */
-
+#define HDMI_IRQ_CORE (1 << 0)
#define HDMI_IRQ_OCP_TIMEOUT (1 << 4)
#define HDMI_IRQ_AUDIO_FIFO_UNDERFLOW (1 << 8)
#define HDMI_IRQ_AUDIO_FIFO_OVERFLOW (1 << 9)
@@ -378,15 +378,15 @@ static inline u32 hdmi_read_reg(void __iomem *base_addr, const u16 idx)
FLD_GET(hdmi_read_reg(base, idx), start, end)
static inline int hdmi_wait_for_bit_change(void __iomem *base_addr,
- const u16 idx, int b2, int b1, u32 val)
+ const u32 idx, int b2, int b1, u32 val)
{
- u32 t = 0;
- while (val != REG_GET(base_addr, idx, b2, b1)) {
- udelay(1);
+ u32 t = 0, v;
+ while (val != (v = REG_GET(base_addr, idx, b2, b1))) {
if (t++ > 10000)
- return !val;
+ return v;
+ udelay(1);
}
- return val;
+ return v;
}
/* HDMI wrapper funcs */
diff --git a/drivers/video/omap2/dss/hdmi4.c b/drivers/video/omap2/dss/hdmi4.c
index e14009614338..4a74538f9ea5 100644
--- a/drivers/video/omap2/dss/hdmi4.c
+++ b/drivers/video/omap2/dss/hdmi4.c
@@ -95,7 +95,8 @@ static int hdmi_init_regulator(void)
reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
if (IS_ERR(reg)) {
- DSSERR("can't get VDDA_HDMI_DAC regulator\n");
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ DSSERR("can't get VDDA_HDMI_DAC regulator\n");
return PTR_ERR(reg);
}
@@ -148,8 +149,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (r)
return r;
- dss_mgr_disable(mgr);
-
p = &hdmi.cfg.timings;
DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
@@ -158,8 +157,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi_pll_compute(&hdmi.pll, clk_get_rate(hdmi.sys_clk), phy);
- hdmi_wp_video_stop(&hdmi.wp);
-
/* config the PLL and PHY hdmi_set_pll_pwrfirst */
r = hdmi_pll_enable(&hdmi.pll, &hdmi.wp);
if (r) {
@@ -218,14 +215,12 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct hdmi_cm cm;
+ struct omap_dss_device *out = &hdmi.output;
- cm = hdmi_get_code(timings);
- if (cm.code == -1)
+ if (!dispc_mgr_timings_ok(out->dispc_channel, timings))
return -EINVAL;
return 0;
-
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
@@ -244,8 +239,17 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
hdmi.cfg = *t;
dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
+ } else {
+ hdmi.cfg.timings = *timings;
+ hdmi.cfg.cm.code = 0;
+ hdmi.cfg.cm.mode = HDMI_DVI;
+
+ dispc_set_tv_pclk(timings->pixel_clock * 1000);
}
+ DSSDBG("using mode: %s, code %d\n", hdmi.cfg.cm.mode == HDMI_DVI ?
+ "DVI" : "HDMI", hdmi.cfg.cm.code);
+
mutex_unlock(&hdmi.lock);
}
diff --git a/drivers/video/omap2/dss/hdmi4_core.c b/drivers/video/omap2/dss/hdmi4_core.c
index 5dd5e5489b41..2eb04dcf807c 100644
--- a/drivers/video/omap2/dss/hdmi4_core.c
+++ b/drivers/video/omap2/dss/hdmi4_core.c
@@ -19,6 +19,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define DSS_SUBSYS_NAME "HDMICORE"
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
@@ -125,12 +127,12 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
/* HDMI_CORE_DDC_STATUS_BUS_LOW */
if (REG_GET(base, HDMI_CORE_DDC_STATUS, 6, 6) == 1) {
- pr_err("I2C Bus Low?\n");
+ DSSERR("I2C Bus Low?\n");
return -EIO;
}
/* HDMI_CORE_DDC_STATUS_NO_ACK */
if (REG_GET(base, HDMI_CORE_DDC_STATUS, 5, 5) == 1) {
- pr_err("I2C No Ack\n");
+ DSSERR("I2C No Ack\n");
return -EIO;
}
@@ -161,7 +163,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
checksum += pedid[i];
if (checksum != 0) {
- pr_err("E-EDID checksum failed!!\n");
+ DSSERR("E-EDID checksum failed!!\n");
return -EIO;
}
@@ -199,7 +201,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
struct hdmi_core_infoframe_avi *avi_cfg,
struct hdmi_core_packet_enable_repeat *repeat_cfg)
{
- pr_debug("Enter hdmi_core_init\n");
+ DSSDBG("Enter hdmi_core_init\n");
/* video core */
video_cfg->ip_bus_width = HDMI_INPUT_8BIT;
@@ -241,19 +243,19 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
{
- pr_debug("Enter hdmi_core_powerdown_disable\n");
+ DSSDBG("Enter hdmi_core_powerdown_disable\n");
REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0);
}
static void hdmi_core_swreset_release(struct hdmi_core_data *core)
{
- pr_debug("Enter hdmi_core_swreset_release\n");
+ DSSDBG("Enter hdmi_core_swreset_release\n");
REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x0, 0, 0);
}
static void hdmi_core_swreset_assert(struct hdmi_core_data *core)
{
- pr_debug("Enter hdmi_core_swreset_assert\n");
+ DSSDBG("Enter hdmi_core_swreset_assert\n");
REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x1, 0, 0);
}
@@ -1004,7 +1006,7 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
struct resource *res;
struct resource temp_res;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_core");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
if (!res) {
DSSDBG("can't get CORE mem resource by name\n");
/*
diff --git a/drivers/video/omap2/dss/hdmi_common.c b/drivers/video/omap2/dss/hdmi_common.c
index 5586aaad9d63..0614922902dd 100644
--- a/drivers/video/omap2/dss/hdmi_common.c
+++ b/drivers/video/omap2/dss/hdmi_common.c
@@ -13,6 +13,8 @@
* map it to corresponding CEA or VESA index.
*/
+#define DSS_SUBSYS_NAME "HDMI"
+
#include <linux/kernel.h>
#include <linux/err.h>
#include <video/omapdss.h>
diff --git a/drivers/video/omap2/dss/hdmi_phy.c b/drivers/video/omap2/dss/hdmi_phy.c
index 45acb997ac00..dd376ce8da01 100644
--- a/drivers/video/omap2/dss/hdmi_phy.c
+++ b/drivers/video/omap2/dss/hdmi_phy.c
@@ -124,7 +124,7 @@ int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
struct resource *res;
struct resource temp_res;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_txphy");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
if (!res) {
DSSDBG("can't get PHY mem resource by name\n");
/*
diff --git a/drivers/video/omap2/dss/hdmi_pll.c b/drivers/video/omap2/dss/hdmi_pll.c
index d3e6e78c0082..5fc71215c303 100644
--- a/drivers/video/omap2/dss/hdmi_pll.c
+++ b/drivers/video/omap2/dss/hdmi_pll.c
@@ -8,6 +8,8 @@
* the Free Software Foundation.
*/
+#define DSS_SUBSYS_NAME "HDMIPLL"
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
@@ -127,24 +129,24 @@ static int hdmi_pll_config(struct hdmi_pll_data *pll)
/* wait for bit change */
if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO,
0, 0, 1) != 1) {
- pr_err("PLL GO bit not set\n");
+ DSSERR("PLL GO bit not set\n");
return -ETIMEDOUT;
}
/* Wait till the lock bit is set in PLL status */
if (hdmi_wait_for_bit_change(pll->base,
PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
- pr_err("cannot lock PLL\n");
- pr_err("CFG1 0x%x\n",
+ DSSERR("cannot lock PLL\n");
+ DSSERR("CFG1 0x%x\n",
hdmi_read_reg(pll->base, PLLCTRL_CFG1));
- pr_err("CFG2 0x%x\n",
+ DSSERR("CFG2 0x%x\n",
hdmi_read_reg(pll->base, PLLCTRL_CFG2));
- pr_err("CFG4 0x%x\n",
+ DSSERR("CFG4 0x%x\n",
hdmi_read_reg(pll->base, PLLCTRL_CFG4));
return -ETIMEDOUT;
}
- pr_debug("PLL locked!\n");
+ DSSDBG("PLL locked!\n");
return 0;
}
@@ -157,7 +159,7 @@ static int hdmi_pll_reset(struct hdmi_pll_data *pll)
/* READ 0x0 reset is in progress */
if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_STATUS, 0, 0, 1)
!= 1) {
- pr_err("Failed to sysreset PLL\n");
+ DSSERR("Failed to sysreset PLL\n");
return -ETIMEDOUT;
}
@@ -200,7 +202,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll)
struct resource *res;
struct resource temp_res;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_pllctrl");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
if (!res) {
DSSDBG("can't get PLL mem resource by name\n");
/*
diff --git a/drivers/video/omap2/dss/hdmi_wp.c b/drivers/video/omap2/dss/hdmi_wp.c
index 8151d8969a6e..cd620c6e43a0 100644
--- a/drivers/video/omap2/dss/hdmi_wp.c
+++ b/drivers/video/omap2/dss/hdmi_wp.c
@@ -8,6 +8,8 @@
* the Free Software Foundation.
*/
+#define DSS_SUBSYS_NAME "HDMIWP"
+
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -34,7 +36,7 @@ void hdmi_wp_dump(struct hdmi_wp_data *wp, struct seq_file *s)
DUMPREG(HDMI_WP_VIDEO_SIZE);
DUMPREG(HDMI_WP_VIDEO_TIMING_H);
DUMPREG(HDMI_WP_VIDEO_TIMING_V);
- DUMPREG(HDMI_WP_WP_CLK);
+ DUMPREG(HDMI_WP_CLK);
DUMPREG(HDMI_WP_AUDIO_CFG);
DUMPREG(HDMI_WP_AUDIO_CFG2);
DUMPREG(HDMI_WP_AUDIO_CTRL);
@@ -76,7 +78,7 @@ int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val)
/* Status of the power control of HDMI PHY */
if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 5, 4, val)
!= val) {
- pr_err("Failed to set PHY power mode to %d\n", val);
+ DSSERR("Failed to set PHY power mode to %d\n", val);
return -ETIMEDOUT;
}
@@ -92,7 +94,7 @@ int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val)
/* wait till PHY_PWR_STATUS is set */
if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 1, 0, val)
!= val) {
- pr_err("Failed to set PLL_PWR_STATUS\n");
+ DSSERR("Failed to set PLL_PWR_STATUS\n");
return -ETIMEDOUT;
}
@@ -129,7 +131,7 @@ void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
{
u32 r;
bool vsync_pol, hsync_pol;
- pr_debug("Enter hdmi_wp_video_config_interface\n");
+ DSSDBG("Enter hdmi_wp_video_config_interface\n");
vsync_pol = timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
hsync_pol = timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
@@ -148,7 +150,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
u32 timing_h = 0;
u32 timing_v = 0;
- pr_debug("Enter hdmi_wp_video_config_timing\n");
+ DSSDBG("Enter hdmi_wp_video_config_timing\n");
timing_h |= FLD_VAL(timings->hbp, 31, 20);
timing_h |= FLD_VAL(timings->hfp, 19, 8);
@@ -164,7 +166,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
struct omap_video_timings *timings, struct hdmi_config *param)
{
- pr_debug("Enter hdmi_wp_video_init_format\n");
+ DSSDBG("Enter hdmi_wp_video_init_format\n");
video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
video_fmt->y_res = param->timings.y_res;
@@ -241,7 +243,7 @@ int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
struct resource *res;
struct resource temp_res;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi_wp");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wp");
if (!res) {
DSSDBG("can't get WP mem resource by name\n");
/*
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index eccde322c28a..2f7cee985cdd 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -113,11 +113,6 @@ void dss_uninit_overlays(struct platform_device *pdev)
int dss_ovl_simple_check(struct omap_overlay *ovl,
const struct omap_overlay_info *info)
{
- if (info->paddr == 0) {
- DSSERR("check_overlay: paddr cannot be 0\n");
- return -EINVAL;
- }
-
if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
if (info->out_width != 0 && info->width != info->out_width) {
DSSERR("check_overlay: overlay %d doesn't support "
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index ccc569ae7cca..ba806c9e7f54 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -46,7 +46,7 @@ static struct {
struct sdi_clk_calc_ctx {
unsigned long pck_min, pck_max;
- struct dss_clock_info dss_cinfo;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
@@ -63,19 +63,18 @@ static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
return true;
}
-static bool dpi_calc_dss_cb(int fckd, unsigned long fck, void *data)
+static bool dpi_calc_dss_cb(unsigned long fck, void *data)
{
struct sdi_clk_calc_ctx *ctx = data;
- ctx->dss_cinfo.fck = fck;
- ctx->dss_cinfo.fck_div = fckd;
+ ctx->fck = fck;
return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
static int sdi_calc_clock_div(unsigned long pclk,
- struct dss_clock_info *dss_cinfo,
+ unsigned long *fck,
struct dispc_clock_info *dispc_cinfo)
{
int i;
@@ -98,9 +97,9 @@ static int sdi_calc_clock_div(unsigned long pclk,
ctx.pck_min = 0;
ctx.pck_max = pclk + 1000 * i * i * i;
- ok = dss_div_calc(ctx.pck_min, dpi_calc_dss_cb, &ctx);
+ ok = dss_div_calc(pclk, ctx.pck_min, dpi_calc_dss_cb, &ctx);
if (ok) {
- *dss_cinfo = ctx.dss_cinfo;
+ *fck = ctx.fck;
*dispc_cinfo = ctx.dispc_cinfo;
return 0;
}
@@ -128,7 +127,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &sdi.output;
struct omap_video_timings *t = &sdi.timings;
- struct dss_clock_info dss_cinfo;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
unsigned long pck;
int r;
@@ -150,13 +149,13 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- r = sdi_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo);
+ r = sdi_calc_clock_div(t->pixel_clock * 1000, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
sdi.mgr_config.clock_info = dispc_cinfo;
- pck = dss_cinfo.fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div / 1000;
+ pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div / 1000;
if (pck != t->pixel_clock) {
DSSWARN("Could not find exact pixel clock. Requested %d kHz, "
@@ -169,7 +168,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
dss_mgr_set_timings(out->manager, t);
- r = dss_set_clock_div(&dss_cinfo);
+ r = dss_set_fck_rate(fck);
if (r)
goto err_set_dss_clock_div;
@@ -265,7 +264,8 @@ static int sdi_init_regulator(void)
vdds_sdi = devm_regulator_get(&sdi.pdev->dev, "vdds_sdi");
if (IS_ERR(vdds_sdi)) {
- DSSERR("can't get VDDS_SDI regulator\n");
+ if (PTR_ERR(vdds_sdi) != -EPROBE_DEFER)
+ DSSERR("can't get VDDS_SDI regulator\n");
return PTR_ERR(vdds_sdi);
}
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 5f88ac47b7fa..2cd7f7e42105 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -639,7 +639,8 @@ static int venc_init_regulator(void)
vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda_dac");
if (IS_ERR(vdda_dac)) {
- DSSERR("can't get VDDA_DAC regulator\n");
+ if (PTR_ERR(vdda_dac) != -EPROBE_DEFER)
+ DSSERR("can't get VDDA_DAC regulator\n");
return PTR_ERR(vdda_dac);
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 27d6905683f3..fcb9e932d00c 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -1833,6 +1833,16 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
if (fbdev == NULL)
return;
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
+ int j;
+
+ for (j = 0; j < ofbi->num_overlays; j++) {
+ struct omap_overlay *ovl = ofbi->overlays[j];
+ ovl->disable(ovl);
+ }
+ }
+
for (i = 0; i < fbdev->num_fbs; i++)
unregister_framebuffer(fbdev->fbs[i]);
@@ -2557,6 +2567,15 @@ static int omapfb_probe(struct platform_device *pdev)
goto cleanup;
}
+ if (def_display) {
+ u16 w, h;
+
+ def_display->driver->get_resolution(def_display, &w, &h);
+
+ dev_info(fbdev->dev, "using display '%s' mode %dx%d\n",
+ def_display->name, w, h);
+ }
+
return 0;
cleanup:
diff --git a/drivers/video/output.c b/drivers/video/output.c
deleted file mode 100644
index 1446c49fe6af..000000000000
--- a/drivers/video/output.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * output.c - Display Output Switch driver
- *
- * Copyright (C) 2006 Luming Yu <luming.yu@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <linux/module.h>
-#include <linux/video_output.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/ctype.h>
-
-
-MODULE_DESCRIPTION("Display Output Switcher Lowlevel Control Abstraction");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>");
-
-static ssize_t state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ssize_t ret_size = 0;
- struct output_device *od = to_output_device(dev);
- if (od->props)
- ret_size = sprintf(buf,"%.8x\n",od->props->get_status(od));
- return ret_size;
-}
-
-static ssize_t state_store(struct device *dev, struct device_attribute *attr,
- const char *buf,size_t count)
-{
- char *endp;
- struct output_device *od = to_output_device(dev);
- int request_state = simple_strtoul(buf,&endp,0);
- size_t size = endp - buf;
-
- if (isspace(*endp))
- size++;
- if (size != count)
- return -EINVAL;
-
- if (od->props) {
- od->request_state = request_state;
- od->props->set_state(od);
- }
- return count;
-}
-static DEVICE_ATTR_RW(state);
-
-static void video_output_release(struct device *dev)
-{
- struct output_device *od = to_output_device(dev);
- kfree(od);
-}
-
-static struct attribute *video_output_attrs[] = {
- &dev_attr_state.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(video_output);
-
-static struct class video_output_class = {
- .name = "video_output",
- .dev_release = video_output_release,
- .dev_groups = video_output_groups,
-};
-
-struct output_device *video_output_register(const char *name,
- struct device *dev,
- void *devdata,
- struct output_properties *op)
-{
- struct output_device *new_dev;
- int ret_code = 0;
-
- new_dev = kzalloc(sizeof(struct output_device),GFP_KERNEL);
- if (!new_dev) {
- ret_code = -ENOMEM;
- goto error_return;
- }
- new_dev->props = op;
- new_dev->dev.class = &video_output_class;
- new_dev->dev.parent = dev;
- dev_set_name(&new_dev->dev, "%s", name);
- dev_set_drvdata(&new_dev->dev, devdata);
- ret_code = device_register(&new_dev->dev);
- if (ret_code) {
- kfree(new_dev);
- goto error_return;
- }
- return new_dev;
-
-error_return:
- return ERR_PTR(ret_code);
-}
-EXPORT_SYMBOL(video_output_register);
-
-void video_output_unregister(struct output_device *dev)
-{
- if (!dev)
- return;
- device_unregister(&dev->dev);
-}
-EXPORT_SYMBOL(video_output_unregister);
-
-static void __exit video_output_class_exit(void)
-{
- class_unregister(&video_output_class);
-}
-
-static int __init video_output_class_init(void)
-{
- return class_register(&video_output_class);
-}
-
-postcore_initcall(video_output_class_init);
-module_exit(video_output_class_exit);
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index a5514acd2ac6..8a8d7f060784 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -2128,7 +2128,6 @@ static void rivafb_remove(struct pci_dev *pd)
pci_release_regions(pd);
kfree(info->pixmap.addr);
framebuffer_release(info);
- pci_set_drvdata(pd, NULL);
NVTRACE_LEAVE();
}
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
deleted file mode 100644
index bc74d0408998..000000000000
--- a/drivers/video/sgivwfb.c
+++ /dev/null
@@ -1,889 +0,0 @@
-/*
- * linux/drivers/video/sgivwfb.c -- SGI DBE frame buffer device
- *
- * Copyright (C) 1999 Silicon Graphics, Inc.
- * Jeffrey Newquist, newquist@engr.sgi.som
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-
-#include <asm/io.h>
-#include <asm/mtrr.h>
-#include <asm/visws/sgivw.h>
-
-#define INCLUDE_TIMING_TABLE_DATA
-#define DBE_REG_BASE par->regs
-#include <video/sgivw.h>
-
-struct sgivw_par {
- struct asregs *regs;
- u32 cmap_fifo;
- u_long timing_num;
-};
-
-#define FLATPANEL_SGI_1600SW 5
-
-/*
- * RAM we reserve for the frame buffer. This defines the maximum screen
- * size
- *
- * The default can be overridden if the driver is compiled as a module
- */
-
-static int ypan = 0;
-static int ywrap = 0;
-
-static int flatpanel_id = -1;
-
-static struct fb_fix_screeninfo sgivwfb_fix = {
- .id = "SGI Vis WS FB",
- .type = FB_TYPE_PACKED_PIXELS,
- .visual = FB_VISUAL_PSEUDOCOLOR,
- .mmio_start = DBE_REG_PHYS,
- .mmio_len = DBE_REG_SIZE,
- .accel = FB_ACCEL_NONE,
- .line_length = 640,
-};
-
-static struct fb_var_screeninfo sgivwfb_var = {
- /* 640x480, 8 bpp */
- .xres = 640,
- .yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel = 8,
- .red = { 0, 8, 0 },
- .green = { 0, 8, 0 },
- .blue = { 0, 8, 0 },
- .height = -1,
- .width = -1,
- .pixclock = 20000,
- .left_margin = 64,
- .right_margin = 64,
- .upper_margin = 32,
- .lower_margin = 32,
- .hsync_len = 64,
- .vsync_len = 2,
- .vmode = FB_VMODE_NONINTERLACED
-};
-
-static struct fb_var_screeninfo sgivwfb_var1600sw = {
- /* 1600x1024, 8 bpp */
- .xres = 1600,
- .yres = 1024,
- .xres_virtual = 1600,
- .yres_virtual = 1024,
- .bits_per_pixel = 8,
- .red = { 0, 8, 0 },
- .green = { 0, 8, 0 },
- .blue = { 0, 8, 0 },
- .height = -1,
- .width = -1,
- .pixclock = 9353,
- .left_margin = 20,
- .right_margin = 30,
- .upper_margin = 37,
- .lower_margin = 3,
- .hsync_len = 20,
- .vsync_len = 3,
- .vmode = FB_VMODE_NONINTERLACED
-};
-
-/*
- * Interface used by the world
- */
-int sgivwfb_init(void);
-
-static int sgivwfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
-static int sgivwfb_set_par(struct fb_info *info);
-static int sgivwfb_setcolreg(u_int regno, u_int red, u_int green,
- u_int blue, u_int transp,
- struct fb_info *info);
-static int sgivwfb_mmap(struct fb_info *info,
- struct vm_area_struct *vma);
-
-static struct fb_ops sgivwfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = sgivwfb_check_var,
- .fb_set_par = sgivwfb_set_par,
- .fb_setcolreg = sgivwfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
- .fb_mmap = sgivwfb_mmap,
-};
-
-/*
- * Internal routines
- */
-static unsigned long bytes_per_pixel(int bpp)
-{
- switch (bpp) {
- case 8:
- return 1;
- case 16:
- return 2;
- case 32:
- return 4;
- default:
- printk(KERN_INFO "sgivwfb: unsupported bpp %d\n", bpp);
- return 0;
- }
-}
-
-static unsigned long get_line_length(int xres_virtual, int bpp)
-{
- return (xres_virtual * bytes_per_pixel(bpp));
-}
-
-/*
- * Function: dbe_TurnOffDma
- * Parameters: (None)
- * Description: This should turn off the monitor and dbe. This is used
- * when switching between the serial console and the graphics
- * console.
- */
-
-static void dbe_TurnOffDma(struct sgivw_par *par)
-{
- unsigned int readVal;
- int i;
-
- // Check to see if things are already turned off:
- // 1) Check to see if dbe is not using the internal dotclock.
- // 2) Check to see if the xy counter in dbe is already off.
-
- DBE_GETREG(ctrlstat, readVal);
- if (GET_DBE_FIELD(CTRLSTAT, PCLKSEL, readVal) < 2)
- return;
-
- DBE_GETREG(vt_xy, readVal);
- if (GET_DBE_FIELD(VT_XY, VT_FREEZE, readVal) == 1)
- return;
-
- // Otherwise, turn off dbe
-
- DBE_GETREG(ovr_control, readVal);
- SET_DBE_FIELD(OVR_CONTROL, OVR_DMA_ENABLE, readVal, 0);
- DBE_SETREG(ovr_control, readVal);
- udelay(1000);
- DBE_GETREG(frm_control, readVal);
- SET_DBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, readVal, 0);
- DBE_SETREG(frm_control, readVal);
- udelay(1000);
- DBE_GETREG(did_control, readVal);
- SET_DBE_FIELD(DID_CONTROL, DID_DMA_ENABLE, readVal, 0);
- DBE_SETREG(did_control, readVal);
- udelay(1000);
-
- // XXX HACK:
- //
- // This was necessary for GBE--we had to wait through two
- // vertical retrace periods before the pixel DMA was
- // turned off for sure. I've left this in for now, in
- // case dbe needs it.
-
- for (i = 0; i < 10000; i++) {
- DBE_GETREG(frm_inhwctrl, readVal);
- if (GET_DBE_FIELD(FRM_INHWCTRL, FRM_DMA_ENABLE, readVal) ==
- 0)
- udelay(10);
- else {
- DBE_GETREG(ovr_inhwctrl, readVal);
- if (GET_DBE_FIELD
- (OVR_INHWCTRL, OVR_DMA_ENABLE, readVal) == 0)
- udelay(10);
- else {
- DBE_GETREG(did_inhwctrl, readVal);
- if (GET_DBE_FIELD
- (DID_INHWCTRL, DID_DMA_ENABLE,
- readVal) == 0)
- udelay(10);
- else
- break;
- }
- }
- }
-}
-
-/*
- * Set the User Defined Part of the Display. Again if par use it to get
- * real video mode.
- */
-static int sgivwfb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct sgivw_par *par = (struct sgivw_par *)info->par;
- struct dbe_timing_info *timing;
- u_long line_length;
- u_long min_mode;
- int req_dot;
- int test_mode;
-
- /*
- * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal!
- * as FB_VMODE_SMOOTH_XPAN is only used internally
- */
-
- if (var->vmode & FB_VMODE_CONUPDATE) {
- var->vmode |= FB_VMODE_YWRAP;
- var->xoffset = info->var.xoffset;
- var->yoffset = info->var.yoffset;
- }
-
- /* XXX FIXME - forcing var's */
- var->xoffset = 0;
- var->yoffset = 0;
-
- /* Limit bpp to 8, 16, and 32 */
- if (var->bits_per_pixel <= 8)
- var->bits_per_pixel = 8;
- else if (var->bits_per_pixel <= 16)
- var->bits_per_pixel = 16;
- else if (var->bits_per_pixel <= 32)
- var->bits_per_pixel = 32;
- else
- return -EINVAL;
-
- var->grayscale = 0; /* No grayscale for now */
-
- /* determine valid resolution and timing */
- for (min_mode = 0; min_mode < ARRAY_SIZE(dbeVTimings); min_mode++) {
- if (dbeVTimings[min_mode].width >= var->xres &&
- dbeVTimings[min_mode].height >= var->yres)
- break;
- }
-
- if (min_mode == ARRAY_SIZE(dbeVTimings))
- return -EINVAL; /* Resolution to high */
-
- /* XXX FIXME - should try to pick best refresh rate */
- /* for now, pick closest dot-clock within 3MHz */
- req_dot = PICOS2KHZ(var->pixclock);
- printk(KERN_INFO "sgivwfb: requested pixclock=%d ps (%d KHz)\n",
- var->pixclock, req_dot);
- test_mode = min_mode;
- while (dbeVTimings[min_mode].width == dbeVTimings[test_mode].width) {
- if (dbeVTimings[test_mode].cfreq + 3000 > req_dot)
- break;
- test_mode++;
- }
- if (dbeVTimings[min_mode].width != dbeVTimings[test_mode].width)
- test_mode--;
- min_mode = test_mode;
- timing = &dbeVTimings[min_mode];
- printk(KERN_INFO "sgivwfb: granted dot-clock=%d KHz\n", timing->cfreq);
-
- /* Adjust virtual resolution, if necessary */
- if (var->xres > var->xres_virtual || (!ywrap && !ypan))
- var->xres_virtual = var->xres;
- if (var->yres > var->yres_virtual || (!ywrap && !ypan))
- var->yres_virtual = var->yres;
-
- /*
- * Memory limit
- */
- line_length = get_line_length(var->xres_virtual, var->bits_per_pixel);
- if (line_length * var->yres_virtual > sgivwfb_mem_size)
- return -ENOMEM; /* Virtual resolution to high */
-
- info->fix.line_length = line_length;
-
- switch (var->bits_per_pixel) {
- case 8:
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 0;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 0;
- var->transp.length = 0;
- break;
- case 16: /* RGBA 5551 */
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 6;
- var->green.length = 5;
- var->blue.offset = 1;
- var->blue.length = 5;
- var->transp.offset = 0;
- var->transp.length = 0;
- break;
- case 32: /* RGB 8888 */
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 16;
- var->blue.length = 8;
- var->transp.offset = 24;
- var->transp.length = 8;
- break;
- }
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
- var->transp.msb_right = 0;
-
- /* set video timing information */
- var->pixclock = KHZ2PICOS(timing->cfreq);
- var->left_margin = timing->htotal - timing->hsync_end;
- var->right_margin = timing->hsync_start - timing->width;
- var->upper_margin = timing->vtotal - timing->vsync_end;
- var->lower_margin = timing->vsync_start - timing->height;
- var->hsync_len = timing->hsync_end - timing->hsync_start;
- var->vsync_len = timing->vsync_end - timing->vsync_start;
-
- /* Ouch. This breaks the rules but timing_num is only important if you
- * change a video mode */
- par->timing_num = min_mode;
-
- printk(KERN_INFO "sgivwfb: new video mode xres=%d yres=%d bpp=%d\n",
- var->xres, var->yres, var->bits_per_pixel);
- printk(KERN_INFO " vxres=%d vyres=%d\n", var->xres_virtual,
- var->yres_virtual);
- return 0;
-}
-
-/*
- * Setup flatpanel related registers.
- */
-static void sgivwfb_setup_flatpanel(struct sgivw_par *par, struct dbe_timing_info *currentTiming)
-{
- int fp_wid, fp_hgt, fp_vbs, fp_vbe;
- u32 outputVal = 0;
-
- SET_DBE_FIELD(VT_FLAGS, HDRV_INVERT, outputVal,
- (currentTiming->flags & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1);
- SET_DBE_FIELD(VT_FLAGS, VDRV_INVERT, outputVal,
- (currentTiming->flags & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1);
- DBE_SETREG(vt_flags, outputVal);
-
- /* Turn on the flat panel */
- switch (flatpanel_id) {
- case FLATPANEL_SGI_1600SW:
- fp_wid = 1600;
- fp_hgt = 1024;
- fp_vbs = 0;
- fp_vbe = 1600;
- currentTiming->pll_m = 4;
- currentTiming->pll_n = 1;
- currentTiming->pll_p = 0;
- break;
- default:
- fp_wid = fp_hgt = fp_vbs = fp_vbe = 0xfff;
- }
-
- outputVal = 0;
- SET_DBE_FIELD(FP_DE, FP_DE_ON, outputVal, fp_vbs);
- SET_DBE_FIELD(FP_DE, FP_DE_OFF, outputVal, fp_vbe);
- DBE_SETREG(fp_de, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(FP_HDRV, FP_HDRV_OFF, outputVal, fp_wid);
- DBE_SETREG(fp_hdrv, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(FP_VDRV, FP_VDRV_ON, outputVal, 1);
- SET_DBE_FIELD(FP_VDRV, FP_VDRV_OFF, outputVal, fp_hgt + 1);
- DBE_SETREG(fp_vdrv, outputVal);
-}
-
-/*
- * Set the hardware according to 'par'.
- */
-static int sgivwfb_set_par(struct fb_info *info)
-{
- struct sgivw_par *par = info->par;
- int i, j, htmp, temp;
- u32 readVal, outputVal;
- int wholeTilesX, maxPixelsPerTileX;
- int frmWrite1, frmWrite2, frmWrite3b;
- struct dbe_timing_info *currentTiming; /* Current Video Timing */
- int xpmax, ypmax; // Monitor resolution
- int bytesPerPixel; // Bytes per pixel
-
- currentTiming = &dbeVTimings[par->timing_num];
- bytesPerPixel = bytes_per_pixel(info->var.bits_per_pixel);
- xpmax = currentTiming->width;
- ypmax = currentTiming->height;
-
- /* dbe_InitGraphicsBase(); */
- /* Turn on dotclock PLL */
- DBE_SETREG(ctrlstat, 0x20000000);
-
- dbe_TurnOffDma(par);
-
- /* dbe_CalculateScreenParams(); */
- maxPixelsPerTileX = 512 / bytesPerPixel;
- wholeTilesX = xpmax / maxPixelsPerTileX;
- if (wholeTilesX * maxPixelsPerTileX < xpmax)
- wholeTilesX++;
-
- printk(KERN_DEBUG "sgivwfb: pixPerTile=%d wholeTilesX=%d\n",
- maxPixelsPerTileX, wholeTilesX);
-
- /* dbe_InitGammaMap(); */
- udelay(10);
-
- for (i = 0; i < 256; i++) {
- DBE_ISETREG(gmap, i, (i << 24) | (i << 16) | (i << 8));
- }
-
- /* dbe_TurnOn(); */
- DBE_GETREG(vt_xy, readVal);
- if (GET_DBE_FIELD(VT_XY, VT_FREEZE, readVal) == 1) {
- DBE_SETREG(vt_xy, 0x00000000);
- udelay(1);
- } else
- dbe_TurnOffDma(par);
-
- /* dbe_Initdbe(); */
- for (i = 0; i < 256; i++) {
- for (j = 0; j < 100; j++) {
- DBE_GETREG(cm_fifo, readVal);
- if (readVal != 0x00000000)
- break;
- else
- udelay(10);
- }
-
- // DBE_ISETREG(cmap, i, 0x00000000);
- DBE_ISETREG(cmap, i, (i << 8) | (i << 16) | (i << 24));
- }
-
- /* dbe_InitFramebuffer(); */
- frmWrite1 = 0;
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_WIDTH_TILE, frmWrite1,
- wholeTilesX);
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_RHS, frmWrite1, 0);
-
- switch (bytesPerPixel) {
- case 1:
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, frmWrite1,
- DBE_FRM_DEPTH_8);
- break;
- case 2:
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, frmWrite1,
- DBE_FRM_DEPTH_16);
- break;
- case 4:
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, frmWrite1,
- DBE_FRM_DEPTH_32);
- break;
- }
-
- frmWrite2 = 0;
- SET_DBE_FIELD(FRM_SIZE_PIXEL, FB_HEIGHT_PIX, frmWrite2, ypmax);
-
- // Tell dbe about the framebuffer location and type
- // XXX What format is the FRM_TILE_PTR?? 64K aligned address?
- frmWrite3b = 0;
- SET_DBE_FIELD(FRM_CONTROL, FRM_TILE_PTR, frmWrite3b,
- sgivwfb_mem_phys >> 9);
- SET_DBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, frmWrite3b, 1);
- SET_DBE_FIELD(FRM_CONTROL, FRM_LINEAR, frmWrite3b, 1);
-
- /* Initialize DIDs */
-
- outputVal = 0;
- switch (bytesPerPixel) {
- case 1:
- SET_DBE_FIELD(WID, TYP, outputVal, DBE_CMODE_I8);
- break;
- case 2:
- SET_DBE_FIELD(WID, TYP, outputVal, DBE_CMODE_RGBA5);
- break;
- case 4:
- SET_DBE_FIELD(WID, TYP, outputVal, DBE_CMODE_RGB8);
- break;
- }
- SET_DBE_FIELD(WID, BUF, outputVal, DBE_BMODE_BOTH);
-
- for (i = 0; i < 32; i++) {
- DBE_ISETREG(mode_regs, i, outputVal);
- }
-
- /* dbe_InitTiming(); */
- DBE_SETREG(vt_intr01, 0xffffffff);
- DBE_SETREG(vt_intr23, 0xffffffff);
-
- DBE_GETREG(dotclock, readVal);
- DBE_SETREG(dotclock, readVal & 0xffff);
-
- DBE_SETREG(vt_xymax, 0x00000000);
- outputVal = 0;
- SET_DBE_FIELD(VT_VSYNC, VT_VSYNC_ON, outputVal,
- currentTiming->vsync_start);
- SET_DBE_FIELD(VT_VSYNC, VT_VSYNC_OFF, outputVal,
- currentTiming->vsync_end);
- DBE_SETREG(vt_vsync, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(VT_HSYNC, VT_HSYNC_ON, outputVal,
- currentTiming->hsync_start);
- SET_DBE_FIELD(VT_HSYNC, VT_HSYNC_OFF, outputVal,
- currentTiming->hsync_end);
- DBE_SETREG(vt_hsync, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(VT_VBLANK, VT_VBLANK_ON, outputVal,
- currentTiming->vblank_start);
- SET_DBE_FIELD(VT_VBLANK, VT_VBLANK_OFF, outputVal,
- currentTiming->vblank_end);
- DBE_SETREG(vt_vblank, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(VT_HBLANK, VT_HBLANK_ON, outputVal,
- currentTiming->hblank_start);
- SET_DBE_FIELD(VT_HBLANK, VT_HBLANK_OFF, outputVal,
- currentTiming->hblank_end - 3);
- DBE_SETREG(vt_hblank, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(VT_VCMAP, VT_VCMAP_ON, outputVal,
- currentTiming->vblank_start);
- SET_DBE_FIELD(VT_VCMAP, VT_VCMAP_OFF, outputVal,
- currentTiming->vblank_end);
- DBE_SETREG(vt_vcmap, outputVal);
- outputVal = 0;
- SET_DBE_FIELD(VT_HCMAP, VT_HCMAP_ON, outputVal,
- currentTiming->hblank_start);
- SET_DBE_FIELD(VT_HCMAP, VT_HCMAP_OFF, outputVal,
- currentTiming->hblank_end - 3);
- DBE_SETREG(vt_hcmap, outputVal);
-
- if (flatpanel_id != -1)
- sgivwfb_setup_flatpanel(par, currentTiming);
-
- outputVal = 0;
- temp = currentTiming->vblank_start - currentTiming->vblank_end - 1;
- if (temp > 0)
- temp = -temp;
-
- SET_DBE_FIELD(DID_START_XY, DID_STARTY, outputVal, (u32) temp);
- if (currentTiming->hblank_end >= 20)
- SET_DBE_FIELD(DID_START_XY, DID_STARTX, outputVal,
- currentTiming->hblank_end - 20);
- else
- SET_DBE_FIELD(DID_START_XY, DID_STARTX, outputVal,
- currentTiming->htotal - (20 -
- currentTiming->
- hblank_end));
- DBE_SETREG(did_start_xy, outputVal);
-
- outputVal = 0;
- SET_DBE_FIELD(CRS_START_XY, CRS_STARTY, outputVal,
- (u32) (temp + 1));
- if (currentTiming->hblank_end >= DBE_CRS_MAGIC)
- SET_DBE_FIELD(CRS_START_XY, CRS_STARTX, outputVal,
- currentTiming->hblank_end - DBE_CRS_MAGIC);
- else
- SET_DBE_FIELD(CRS_START_XY, CRS_STARTX, outputVal,
- currentTiming->htotal - (DBE_CRS_MAGIC -
- currentTiming->
- hblank_end));
- DBE_SETREG(crs_start_xy, outputVal);
-
- outputVal = 0;
- SET_DBE_FIELD(VC_START_XY, VC_STARTY, outputVal, (u32) temp);
- SET_DBE_FIELD(VC_START_XY, VC_STARTX, outputVal,
- currentTiming->hblank_end - 4);
- DBE_SETREG(vc_start_xy, outputVal);
-
- DBE_SETREG(frm_size_tile, frmWrite1);
- DBE_SETREG(frm_size_pixel, frmWrite2);
-
- outputVal = 0;
- SET_DBE_FIELD(DOTCLK, M, outputVal, currentTiming->pll_m - 1);
- SET_DBE_FIELD(DOTCLK, N, outputVal, currentTiming->pll_n - 1);
- SET_DBE_FIELD(DOTCLK, P, outputVal, currentTiming->pll_p);
- SET_DBE_FIELD(DOTCLK, RUN, outputVal, 1);
- DBE_SETREG(dotclock, outputVal);
-
- udelay(11 * 1000);
-
- DBE_SETREG(vt_vpixen, 0xffffff);
- DBE_SETREG(vt_hpixen, 0xffffff);
-
- outputVal = 0;
- SET_DBE_FIELD(VT_XYMAX, VT_MAXX, outputVal, currentTiming->htotal);
- SET_DBE_FIELD(VT_XYMAX, VT_MAXY, outputVal, currentTiming->vtotal);
- DBE_SETREG(vt_xymax, outputVal);
-
- outputVal = frmWrite1;
- SET_DBE_FIELD(FRM_SIZE_TILE, FRM_FIFO_RESET, outputVal, 1);
- DBE_SETREG(frm_size_tile, outputVal);
- DBE_SETREG(frm_size_tile, frmWrite1);
-
- outputVal = 0;
- SET_DBE_FIELD(OVR_WIDTH_TILE, OVR_FIFO_RESET, outputVal, 1);
- DBE_SETREG(ovr_width_tile, outputVal);
- DBE_SETREG(ovr_width_tile, 0);
-
- DBE_SETREG(frm_control, frmWrite3b);
- DBE_SETREG(did_control, 0);
-
- // Wait for dbe to take frame settings
- for (i = 0; i < 100000; i++) {
- DBE_GETREG(frm_inhwctrl, readVal);
- if (GET_DBE_FIELD(FRM_INHWCTRL, FRM_DMA_ENABLE, readVal) !=
- 0)
- break;
- else
- udelay(1);
- }
-
- if (i == 100000)
- printk(KERN_INFO
- "sgivwfb: timeout waiting for frame DMA enable.\n");
-
- outputVal = 0;
- htmp = currentTiming->hblank_end - 19;
- if (htmp < 0)
- htmp += currentTiming->htotal; /* allow blank to wrap around */
- SET_DBE_FIELD(VT_HPIXEN, VT_HPIXEN_ON, outputVal, htmp);
- SET_DBE_FIELD(VT_HPIXEN, VT_HPIXEN_OFF, outputVal,
- ((htmp + currentTiming->width -
- 2) % currentTiming->htotal));
- DBE_SETREG(vt_hpixen, outputVal);
-
- outputVal = 0;
- SET_DBE_FIELD(VT_VPIXEN, VT_VPIXEN_OFF, outputVal,
- currentTiming->vblank_start);
- SET_DBE_FIELD(VT_VPIXEN, VT_VPIXEN_ON, outputVal,
- currentTiming->vblank_end);
- DBE_SETREG(vt_vpixen, outputVal);
-
- // Turn off mouse cursor
- par->regs->crs_ctl = 0;
-
- // XXX What's this section for??
- DBE_GETREG(ctrlstat, readVal);
- readVal &= 0x02000000;
-
- if (readVal != 0) {
- DBE_SETREG(ctrlstat, 0x30000000);
- }
- return 0;
-}
-
-/*
- * Set a single color register. The values supplied are already
- * rounded down to the hardware's capabilities (according to the
- * entries in the var structure). Return != 0 for invalid regno.
- */
-
-static int sgivwfb_setcolreg(u_int regno, u_int red, u_int green,
- u_int blue, u_int transp,
- struct fb_info *info)
-{
- struct sgivw_par *par = (struct sgivw_par *) info->par;
-
- if (regno > 255)
- return 1;
- red >>= 8;
- green >>= 8;
- blue >>= 8;
-
- /* wait for the color map FIFO to have a free entry */
- while (par->cmap_fifo == 0)
- par->cmap_fifo = par->regs->cm_fifo;
-
- par->regs->cmap[regno] = (red << 24) | (green << 16) | (blue << 8);
- par->cmap_fifo--; /* assume FIFO is filling up */
- return 0;
-}
-
-static int sgivwfb_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- int r;
-
- pgprot_val(vma->vm_page_prot) =
- pgprot_val(vma->vm_page_prot) | _PAGE_PCD;
-
- r = vm_iomap_memory(vma, sgivwfb_mem_phys, sgivwfb_mem_size);
-
- printk(KERN_DEBUG "sgivwfb: mmap framebuffer P(%lx)->V(%lx)\n",
- sgivwfb_mem_phys + (vma->vm_pgoff << PAGE_SHIFT), vma->vm_start);
-
- return r;
-}
-
-int __init sgivwfb_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!strncmp(this_opt, "monitor:", 8)) {
- if (!strncmp(this_opt + 8, "crt", 3))
- flatpanel_id = -1;
- else if (!strncmp(this_opt + 8, "1600sw", 6))
- flatpanel_id = FLATPANEL_SGI_1600SW;
- }
- }
- return 0;
-}
-
-/*
- * Initialisation
- */
-static int sgivwfb_probe(struct platform_device *dev)
-{
- struct sgivw_par *par;
- struct fb_info *info;
- char *monitor;
-
- info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 16, &dev->dev);
- if (!info)
- return -ENOMEM;
- par = info->par;
-
- if (!request_mem_region(DBE_REG_PHYS, DBE_REG_SIZE, "sgivwfb")) {
- printk(KERN_ERR "sgivwfb: couldn't reserve mmio region\n");
- framebuffer_release(info);
- return -EBUSY;
- }
-
- par->regs = (struct asregs *) ioremap_nocache(DBE_REG_PHYS, DBE_REG_SIZE);
- if (!par->regs) {
- printk(KERN_ERR "sgivwfb: couldn't ioremap registers\n");
- goto fail_ioremap_regs;
- }
-
- mtrr_add(sgivwfb_mem_phys, sgivwfb_mem_size, MTRR_TYPE_WRCOMB, 1);
-
- sgivwfb_fix.smem_start = sgivwfb_mem_phys;
- sgivwfb_fix.smem_len = sgivwfb_mem_size;
- sgivwfb_fix.ywrapstep = ywrap;
- sgivwfb_fix.ypanstep = ypan;
-
- info->fix = sgivwfb_fix;
-
- switch (flatpanel_id) {
- case FLATPANEL_SGI_1600SW:
- info->var = sgivwfb_var1600sw;
- monitor = "SGI 1600SW flatpanel";
- break;
- default:
- info->var = sgivwfb_var;
- monitor = "CRT";
- }
-
- printk(KERN_INFO "sgivwfb: %s monitor selected\n", monitor);
-
- info->fbops = &sgivwfb_ops;
- info->pseudo_palette = (void *) (par + 1);
- info->flags = FBINFO_DEFAULT;
-
- info->screen_base = ioremap_nocache((unsigned long) sgivwfb_mem_phys, sgivwfb_mem_size);
- if (!info->screen_base) {
- printk(KERN_ERR "sgivwfb: couldn't ioremap screen_base\n");
- goto fail_ioremap_fbmem;
- }
-
- if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
- goto fail_color_map;
-
- if (register_framebuffer(info) < 0) {
- printk(KERN_ERR "sgivwfb: couldn't register framebuffer\n");
- goto fail_register_framebuffer;
- }
-
- platform_set_drvdata(dev, info);
-
- fb_info(info, "SGI DBE frame buffer device, using %ldK of video memory at %#lx\n",
- sgivwfb_mem_size >> 10, sgivwfb_mem_phys);
- return 0;
-
-fail_register_framebuffer:
- fb_dealloc_cmap(&info->cmap);
-fail_color_map:
- iounmap((char *) info->screen_base);
-fail_ioremap_fbmem:
- iounmap(par->regs);
-fail_ioremap_regs:
- release_mem_region(DBE_REG_PHYS, DBE_REG_SIZE);
- framebuffer_release(info);
- return -ENXIO;
-}
-
-static int sgivwfb_remove(struct platform_device *dev)
-{
- struct fb_info *info = platform_get_drvdata(dev);
-
- if (info) {
- struct sgivw_par *par = info->par;
-
- unregister_framebuffer(info);
- dbe_TurnOffDma(par);
- iounmap(par->regs);
- iounmap(info->screen_base);
- release_mem_region(DBE_REG_PHYS, DBE_REG_SIZE);
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
- return 0;
-}
-
-static struct platform_driver sgivwfb_driver = {
- .probe = sgivwfb_probe,
- .remove = sgivwfb_remove,
- .driver = {
- .name = "sgivwfb",
- },
-};
-
-static struct platform_device *sgivwfb_device;
-
-int __init sgivwfb_init(void)
-{
- int ret;
-
-#ifndef MODULE
- char *option = NULL;
-
- if (fb_get_options("sgivwfb", &option))
- return -ENODEV;
- sgivwfb_setup(option);
-#endif
- ret = platform_driver_register(&sgivwfb_driver);
- if (!ret) {
- sgivwfb_device = platform_device_alloc("sgivwfb", 0);
- if (sgivwfb_device) {
- ret = platform_device_add(sgivwfb_device);
- } else
- ret = -ENOMEM;
- if (ret) {
- platform_driver_unregister(&sgivwfb_driver);
- platform_device_put(sgivwfb_device);
- }
- }
- return ret;
-}
-
-module_init(sgivwfb_init);
-
-#ifdef MODULE
-MODULE_LICENSE("GPL");
-
-static void __exit sgivwfb_exit(void)
-{
- platform_device_unregister(sgivwfb_device);
- platform_driver_unregister(&sgivwfb_driver);
-}
-
-module_exit(sgivwfb_exit);
-
-#endif /* MODULE */
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index ab85ad6c25ec..2bcc84ac18c7 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -1227,7 +1227,7 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
/* Free the MERAM cache. */
if (ch->cache) {
sh_mobile_meram_cache_free(priv->meram_dev, ch->cache);
- ch->cache = 0;
+ ch->cache = NULL;
}
}
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index f28674fea909..07c7df9ee77b 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -32,12 +32,6 @@
#include <video/tgafb.h>
-#ifdef CONFIG_PCI
-#define TGA_BUS_PCI(dev) (dev->bus == &pci_bus_type)
-#else
-#define TGA_BUS_PCI(dev) 0
-#endif
-
#ifdef CONFIG_TC
#define TGA_BUS_TC(dev) (dev->bus == &tc_bus_type)
#else
@@ -236,7 +230,7 @@ tgafb_set_par(struct fb_info *info)
};
struct tga_par *par = (struct tga_par *) info->par;
- int tga_bus_pci = TGA_BUS_PCI(par->dev);
+ int tga_bus_pci = dev_is_pci(par->dev);
int tga_bus_tc = TGA_BUS_TC(par->dev);
u32 htimings, vtimings, pll_freq;
u8 tga_type;
@@ -519,7 +513,7 @@ tgafb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *info)
{
struct tga_par *par = (struct tga_par *) info->par;
- int tga_bus_pci = TGA_BUS_PCI(par->dev);
+ int tga_bus_pci = dev_is_pci(par->dev);
int tga_bus_tc = TGA_BUS_TC(par->dev);
if (regno > 255)
@@ -1472,7 +1466,7 @@ static void
tgafb_init_fix(struct fb_info *info)
{
struct tga_par *par = (struct tga_par *)info->par;
- int tga_bus_pci = TGA_BUS_PCI(par->dev);
+ int tga_bus_pci = dev_is_pci(par->dev);
int tga_bus_tc = TGA_BUS_TC(par->dev);
u8 tga_type = par->tga_type;
const char *tga_type_name = NULL;
@@ -1496,10 +1490,9 @@ tgafb_init_fix(struct fb_info *info)
if (tga_bus_tc)
tga_type_name = "Digital ZLX-E3";
break;
- default:
- tga_type_name = "Unknown";
- break;
}
+ if (!tga_type_name)
+ tga_type_name = "Unknown";
strlcpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
@@ -1560,7 +1553,7 @@ static int tgafb_register(struct device *dev)
const struct fb_videomode *modedb_tga = NULL;
resource_size_t bar0_start = 0, bar0_len = 0;
const char *mode_option_tga = NULL;
- int tga_bus_pci = TGA_BUS_PCI(dev);
+ int tga_bus_pci = dev_is_pci(dev);
int tga_bus_tc = TGA_BUS_TC(dev);
unsigned int modedbsize_tga = 0;
void __iomem *mem_base;
@@ -1690,7 +1683,7 @@ static int tgafb_register(struct device *dev)
static void tgafb_unregister(struct device *dev)
{
resource_size_t bar0_start = 0, bar0_len = 0;
- int tga_bus_pci = TGA_BUS_PCI(dev);
+ int tga_bus_pci = dev_is_pci(dev);
int tga_bus_tc = TGA_BUS_TC(dev);
struct fb_info *info = NULL;
struct tga_par *par;
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 025f14e30eed..77b890e4d296 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1624,7 +1624,7 @@ static int dlfb_usb_probe(struct usb_interface *interface,
}
if (pixel_limit) {
- pr_warn("DL chip limit of %d overriden"
+ pr_warn("DL chip limit of %d overridden"
" by module param to %d\n",
dev->sku_pixel_limit, pixel_limit);
dev->sku_pixel_limit = pixel_limit;
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 256fba7f4641..1f38445014c1 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -190,7 +190,7 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
uvfb_tasks[seq] = task;
mutex_unlock(&uvfb_lock);
- err = cn_netlink_send(m, 0, GFP_KERNEL);
+ err = cn_netlink_send(m, 0, 0, GFP_KERNEL);
if (err == -ESRCH) {
/*
* Try to start the userspace helper if sending
@@ -204,7 +204,7 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
"helper is installed and executable\n");
} else {
v86d_started = 1;
- err = cn_netlink_send(m, 0, gfp_any());
+ err = cn_netlink_send(m, 0, 0, gfp_any());
if (err == -ENOBUFS)
err = 0;
}
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c
index 09a136633f35..048a66640b03 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/vermilion/vermilion.c
@@ -383,7 +383,6 @@ static void vmlfb_disable_mmio(struct vml_par *par)
static void vmlfb_release_devices(struct vml_par *par)
{
if (atomic_dec_and_test(&par->refcount)) {
- pci_set_drvdata(par->vdc, NULL);
pci_disable_device(par->gpu);
pci_disable_device(par->vdc);
}
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index cd005c227a23..901014bbc821 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -35,6 +35,7 @@
#include <xen/interface/io/fbif.h>
#include <xen/interface/io/protocols.h>
#include <xen/xenbus.h>
+#include <xen/platform_pci.h>
struct xenfb_info {
unsigned char *fb;
@@ -692,13 +693,16 @@ static DEFINE_XENBUS_DRIVER(xenfb, ,
static int __init xenfb_init(void)
{
- if (!xen_pv_domain())
+ if (!xen_domain())
return -ENODEV;
/* Nothing to do if running in dom0. */
if (xen_initial_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
return xenbus_register_frontend(&xenfb_driver);
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 5c4a95b516cf..25ebe8eecdb7 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -108,8 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
/* We should always be able to add one buffer to an empty queue. */
- if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
- BUG();
+ virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
virtqueue_kick(vq);
/* When host has read buffer, this completes via balloon_ack */
@@ -258,8 +257,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
if (!virtqueue_get_buf(vq, &len))
return;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
- if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
- BUG();
+ virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
virtqueue_kick(vq);
}
@@ -310,6 +308,12 @@ static int balloon(void *_vballoon)
else if (diff < 0)
leak_balloon(vb, -diff);
update_balloon_size(vb);
+
+ /*
+ * For large balloon changes, we could spend a lot of time
+ * and always have work to do. Be nice if preempt disabled.
+ */
+ cond_resched();
}
return 0;
}
@@ -338,7 +342,7 @@ static int init_vqs(struct virtio_balloon *vb)
/*
* Prime this virtqueue with one buffer so the hypervisor can
- * use it to signal us later.
+ * use it to signal us later (it can't be broken yet!).
*/
sg_init_one(&sg, vb->stats, sizeof vb->stats);
if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
@@ -369,7 +373,7 @@ static const struct address_space_operations virtio_balloon_aops;
* This function preforms the balloon page migration task.
* Called through balloon_mapping->a_ops->migratepage
*/
-int virtballoon_migratepage(struct address_space *mapping,
+static int virtballoon_migratepage(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
struct balloon_dev_info *vb_dev_info = balloon_page_device(page);
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index a37c69941d30..101db3faf5d4 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -333,10 +333,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
for (i = 0; i < nvectors; ++i)
vp_dev->msix_entries[i].entry = i;
- /* pci_enable_msix returns positive if we can't get this many. */
- err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors);
- if (err > 0)
- err = -ENOSPC;
+ err = pci_enable_msix_exact(vp_dev->pci_dev,
+ vp_dev->msix_entries, nvectors);
if (err)
goto error;
vp_dev->msix_enabled = 1;
@@ -742,7 +740,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
return 0;
out_set_drvdata:
- pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
out_req_regions:
pci_release_regions(pci_dev);
@@ -760,7 +757,6 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
unregister_virtio_device(&vp_dev->vdev);
vp_del_vqs(&vp_dev->vdev);
- pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 28b5338fff71..1e443629f76d 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
+#include <linux/kmemleak.h>
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
@@ -203,6 +204,11 @@ static inline int virtqueue_add(struct virtqueue *_vq,
BUG_ON(data == NULL);
+ if (unlikely(vq->broken)) {
+ END_USE(vq);
+ return -EIO;
+ }
+
#ifdef DEBUG
{
ktime_t now = ktime_get();
@@ -309,7 +315,7 @@ add_head:
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_sgs(struct virtqueue *_vq,
struct scatterlist *sgs[],
@@ -347,7 +353,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_outbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
@@ -369,7 +375,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_inbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
index 7b07135ab26e..c0227f9418eb 100644
--- a/drivers/vlynq/vlynq.c
+++ b/drivers/vlynq/vlynq.c
@@ -762,7 +762,8 @@ static int vlynq_remove(struct platform_device *pdev)
device_unregister(&dev->dev);
iounmap(dev->local);
- release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start);
+ release_mem_region(dev->regs_start,
+ dev->regs_end - dev->regs_start + 1);
kfree(dev);
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index a06edbfa95ca..bfb2d3f06738 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -869,14 +869,13 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
spin_lock(&image->lock);
- /* The following code handles VME address alignment problem
- * in order to assure the maximal data width cycle.
- * We cannot use memcpy_xxx directly here because it
- * may cut data transfer in 8-bits cycles, thus making
- * D16 cycle impossible.
- * From the other hand, the bridge itself assures that
- * maximal configured data cycle is used and splits it
- * automatically for non-aligned addresses.
+ /* The following code handles VME address alignment. We cannot use
+ * memcpy_xxx here because it may cut data transfers in to 8-bit
+ * cycles when D16 or D32 cycles are required on the VME bus.
+ * On the other hand, the bridge itself assures that the maximum data
+ * cycle configured for the transfer is used and splits it
+ * automatically for non-aligned addresses, so we don't want the
+ * overhead of needlessly forcing small transfers for the entire cycle.
*/
if ((uintptr_t)addr & 0x1) {
*(u8 *)buf = ioread8(addr);
@@ -884,7 +883,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -896,9 +895,9 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
}
count32 = (count - done) & ~0x3;
- if (count32 > 0) {
- memcpy_fromio(buf + done, addr + done, (unsigned int)count);
- done += count32;
+ while (done < count32) {
+ *(u32 *)(buf + done) = ioread32(addr + done);
+ done += 4;
}
if ((count - done) & 0x2) {
@@ -930,7 +929,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
spin_lock(&image->lock);
/* Here we apply for the same strategy we do in master_read
- * function in order to assure D16 cycle when required.
+ * function in order to assure the correct cycles.
*/
if ((uintptr_t)addr & 0x1) {
iowrite8(*(u8 *)buf, addr);
@@ -938,7 +937,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
@@ -950,9 +949,9 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
}
count32 = (count - done) & ~0x3;
- if (count32 > 0) {
- memcpy_toio(addr + done, buf + done, count32);
- done += count32;
+ while (done < count32) {
+ iowrite32(*(u32 *)(buf + done), addr + done);
+ done += 4;
}
if ((count - done) & 0x2) {
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 16830d8b777c..06990c6a1a69 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -1276,8 +1276,8 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
spin_lock(&image->lock);
/* The following code handles VME address alignment. We cannot use
- * memcpy_xxx directly here because it may cut small data transfers in
- * to 8-bit cycles, thus making D16 cycle impossible.
+ * memcpy_xxx here because it may cut data transfers in to 8-bit
+ * cycles when D16 or D32 cycles are required on the VME bus.
* On the other hand, the bridge itself assures that the maximum data
* cycle configured for the transfer is used and splits it
* automatically for non-aligned addresses, so we don't want the
@@ -1289,7 +1289,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -1301,9 +1301,9 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
}
count32 = (count - done) & ~0x3;
- if (count32 > 0) {
- memcpy_fromio(buf + done, addr + done, count32);
- done += count32;
+ while (done < count32) {
+ *(u32 *)(buf + done) = ioread32(addr + done);
+ done += 4;
}
if ((count - done) & 0x2) {
@@ -1363,7 +1363,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
spin_lock(&image->lock);
/* Here we apply for the same strategy we do in master_read
- * function in order to assure D16 cycle when required.
+ * function in order to assure the correct cycles.
*/
if ((uintptr_t)addr & 0x1) {
iowrite8(*(u8 *)buf, addr);
@@ -1371,7 +1371,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
@@ -1383,9 +1383,9 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
}
count32 = (count - done) & ~0x3;
- if (count32 > 0) {
- memcpy_toio(addr + done, buf + done, count32);
- done += count32;
+ while (done < count32) {
+ iowrite32(*(u32 *)(buf + done), addr + done);
+ done += 4;
}
if ((count - done) & 0x2) {
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index efc7f075fcbe..1708b2300c7a 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -36,13 +36,12 @@ config W1_MASTER_DS2482
config W1_MASTER_MXC
tristate "Freescale MXC 1-wire busmaster"
- depends on W1 && ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Say Y here to enable MXC 1-wire host
config W1_MASTER_DS1WM
tristate "Maxim DS1WM 1-wire busmaster"
- depends on W1
help
Say Y here to enable the DS1WM 1-wire driver, such as that
in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 4f7e1d770f81..7404ad3062b7 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1,5 +1,5 @@
/*
- * dscore.c
+ * ds2490.c USB to one wire bridge
*
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
@@ -28,6 +28,10 @@
#include "../w1_int.h"
#include "../w1.h"
+/* USB Standard */
+/* USB Control request vendor type */
+#define VENDOR 0x40
+
/* COMMAND TYPE CODES */
#define CONTROL_CMD 0x00
#define COMM_CMD 0x01
@@ -107,6 +111,8 @@
#define ST_HALT 0x10 /* DS2490 is currently halted */
#define ST_IDLE 0x20 /* DS2490 is currently idle */
#define ST_EPOF 0x80
+/* Status transfer size, 16 bytes status, 16 byte result flags */
+#define ST_SIZE 0x20
/* Result Register flags */
#define RR_DETECT 0xA5 /* New device detected */
@@ -198,7 +204,7 @@ static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
- CONTROL_CMD, 0x40, value, index, NULL, 0, 1000);
+ CONTROL_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
printk(KERN_ERR "Failed to send command control message %x.%x: err=%d.\n",
value, index, err);
@@ -213,7 +219,7 @@ static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index)
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
- MODE_CMD, 0x40, value, index, NULL, 0, 1000);
+ MODE_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
printk(KERN_ERR "Failed to send mode control message %x.%x: err=%d.\n",
value, index, err);
@@ -228,7 +234,7 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
int err;
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
- COMM_CMD, 0x40, value, index, NULL, 0, 1000);
+ COMM_CMD, VENDOR, value, index, NULL, 0, 1000);
if (err < 0) {
printk(KERN_ERR "Failed to send control message %x.%x: err=%d.\n",
value, index, err);
@@ -246,7 +252,8 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
memset(st, 0, sizeof(*st));
count = 0;
- err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 100);
+ err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev,
+ dev->ep[EP_STATUS]), buf, size, &count, 100);
if (err < 0) {
printk(KERN_ERR "Failed to read 1-wire data from 0x%x: err=%d.\n", dev->ep[EP_STATUS], err);
return err;
@@ -353,7 +360,7 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
buf, size, &count, 1000);
if (err < 0) {
- u8 buf[0x20];
+ u8 buf[ST_SIZE];
int count;
printk(KERN_INFO "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
@@ -398,7 +405,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
{
struct ds_status st;
int count = 0, err = 0;
- u8 buf[0x20];
+ u8 buf[ST_SIZE];
do {
err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0);
@@ -450,10 +457,11 @@ int ds_detect(struct ds_device *dev, struct ds_status *st)
static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
{
- u8 buf[0x20];
+ u8 buf[ST_SIZE];
int err, count = 0;
do {
+ st->status = 0;
err = ds_recv_status_nodump(dev, st, buf, sizeof(buf));
#if 0
if (err >= 0) {
@@ -464,7 +472,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
printk("\n");
}
#endif
- } while (!(buf[0x08] & ST_IDLE) && !(err < 0) && ++count < 100);
+ } while (!(st->status & ST_IDLE) && !(err < 0) && ++count < 100);
if (err >= 16 && st->status & ST_EPOF) {
printk(KERN_INFO "Resetting device after ST_EPOF.\n");
@@ -690,37 +698,106 @@ static int ds_write_block(struct ds_device *dev, u8 *buf, int len)
return !(err == len);
}
-#if 0
-
-static int ds_search(struct ds_device *dev, u64 init, u64 *buf, u8 id_number, int conditional_search)
+static void ds9490r_search(void *data, struct w1_master *master,
+ u8 search_type, w1_slave_found_callback callback)
{
+ /* When starting with an existing id, the first id returned will
+ * be that device (if it is still on the bus most likely).
+ *
+ * If the number of devices found is less than or equal to the
+ * search_limit, that number of IDs will be returned. If there are
+ * more, search_limit IDs will be returned followed by a non-zero
+ * discrepency value.
+ */
+ struct ds_device *dev = data;
int err;
u16 value, index;
struct ds_status st;
+ u8 st_buf[ST_SIZE];
+ int search_limit;
+ int found = 0;
+ int i;
- memset(buf, 0, sizeof(buf));
+ /* DS18b20 spec, 13.16 ms per device, 75 per second, sleep for
+ * discovering 8 devices (1 bulk transfer and 1/2 FIFO size) at a time.
+ */
+ const unsigned long jtime = msecs_to_jiffies(1000*8/75);
+ /* FIFO 128 bytes, bulk packet size 64, read a multiple of the
+ * packet size.
+ */
+ u64 buf[2*64/8];
- err = ds_send_data(ds_dev, (unsigned char *)&init, 8);
- if (err)
- return err;
+ mutex_lock(&master->bus_mutex);
- ds_wait_status(ds_dev, &st);
+ /* address to start searching at */
+ if (ds_send_data(dev, (u8 *)&master->search_id, 8) < 0)
+ goto search_out;
+ master->search_id = 0;
- value = COMM_SEARCH_ACCESS | COMM_IM | COMM_SM | COMM_F | COMM_RTS;
- index = (conditional_search ? 0xEC : 0xF0) | (id_number << 8);
- err = ds_send_control(ds_dev, value, index);
- if (err)
- return err;
+ value = COMM_SEARCH_ACCESS | COMM_IM | COMM_RST | COMM_SM | COMM_F |
+ COMM_RTS;
+ search_limit = master->max_slave_count;
+ if (search_limit > 255)
+ search_limit = 0;
+ index = search_type | (search_limit << 8);
+ if (ds_send_control(dev, value, index) < 0)
+ goto search_out;
- ds_wait_status(ds_dev, &st);
+ do {
+ schedule_timeout(jtime);
- err = ds_recv_data(ds_dev, (unsigned char *)buf, 8*id_number);
- if (err < 0)
- return err;
+ if (ds_recv_status_nodump(dev, &st, st_buf, sizeof(st_buf)) <
+ sizeof(st)) {
+ break;
+ }
- return err/8;
+ if (st.data_in_buffer_status) {
+ /* Bulk in can receive partial ids, but when it does
+ * they fail crc and will be discarded anyway.
+ * That has only been seen when status in buffer
+ * is 0 and bulk is read anyway, so don't read
+ * bulk without first checking if status says there
+ * is data to read.
+ */
+ err = ds_recv_data(dev, (u8 *)buf, sizeof(buf));
+ if (err < 0)
+ break;
+ for (i = 0; i < err/8; ++i) {
+ ++found;
+ if (found <= search_limit)
+ callback(master, buf[i]);
+ /* can't know if there will be a discrepancy
+ * value after until the next id */
+ if (found == search_limit)
+ master->search_id = buf[i];
+ }
+ }
+
+ if (test_bit(W1_ABORT_SEARCH, &master->flags))
+ break;
+ } while (!(st.status & (ST_IDLE | ST_HALT)));
+
+ /* only continue the search if some weren't found */
+ if (found <= search_limit) {
+ master->search_id = 0;
+ } else if (!test_bit(W1_WARN_MAX_COUNT, &master->flags)) {
+ /* Only max_slave_count will be scanned in a search,
+ * but it will start where it left off next search
+ * until all ids are identified and then it will start
+ * over. A continued search will report the previous
+ * last id as the first id (provided it is still on the
+ * bus).
+ */
+ dev_info(&dev->udev->dev, "%s: max_slave_count %d reached, "
+ "will continue next search.\n", __func__,
+ master->max_slave_count);
+ set_bit(W1_WARN_MAX_COUNT, &master->flags);
+ }
+search_out:
+ mutex_unlock(&master->bus_mutex);
}
+#if 0
static int ds_match_access(struct ds_device *dev, u64 init)
{
int err;
@@ -894,6 +971,7 @@ static int ds_w1_init(struct ds_device *dev)
dev->master.write_block = &ds9490r_write_block;
dev->master.reset_bus = &ds9490r_reset;
dev->master.set_pullup = &ds9490r_set_pullup;
+ dev->master.search = &ds9490r_search;
return w1_add_master_device(&dev->master);
}
@@ -910,15 +988,13 @@ static int ds_probe(struct usb_interface *intf,
struct usb_endpoint_descriptor *endpoint;
struct usb_host_interface *iface_desc;
struct ds_device *dev;
- int i, err;
+ int i, err, alt;
- dev = kmalloc(sizeof(struct ds_device), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct ds_device), GFP_KERNEL);
if (!dev) {
printk(KERN_INFO "Failed to allocate new DS9490R structure.\n");
return -ENOMEM;
}
- dev->spu_sleep = 0;
- dev->spu_bit = 0;
dev->udev = usb_get_dev(udev);
if (!dev->udev) {
err = -ENOMEM;
@@ -928,20 +1004,25 @@ static int ds_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
- err = usb_set_interface(dev->udev, intf->altsetting[0].desc.bInterfaceNumber, 3);
+ err = usb_reset_configuration(dev->udev);
if (err) {
- printk(KERN_ERR "Failed to set alternative setting 3 for %d interface: err=%d.\n",
- intf->altsetting[0].desc.bInterfaceNumber, err);
+ dev_err(&dev->udev->dev,
+ "Failed to reset configuration: err=%d.\n", err);
goto err_out_clear;
}
- err = usb_reset_configuration(dev->udev);
+ /* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
+ alt = 3;
+ err = usb_set_interface(dev->udev,
+ intf->altsetting[alt].desc.bInterfaceNumber, alt);
if (err) {
- printk(KERN_ERR "Failed to reset configuration: err=%d.\n", err);
+ dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
+ "for %d interface: err=%d.\n", alt,
+ intf->altsetting[alt].desc.bInterfaceNumber, err);
goto err_out_clear;
}
- iface_desc = &intf->altsetting[0];
+ iface_desc = &intf->altsetting[alt];
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
printk(KERN_INFO "Num endpoints=%d. It is not DS9490R.\n", iface_desc->desc.bNumEndpoints);
err = -EINVAL;
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 1e5d94c5afc9..67b067a3e2ab 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -10,24 +10,16 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
*/
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include "../w1.h"
#include "../w1_int.h"
-#include "../w1_log.h"
/* According to the mx27 Datasheet the reset procedure should take up to about
* 1350us. We set the timeout to 500*100us = 50ms for sure */
@@ -36,13 +28,13 @@
/*
* MXC W1 Register offsets
*/
-#define MXC_W1_CONTROL 0x00
-#define MXC_W1_TIME_DIVIDER 0x02
-#define MXC_W1_RESET 0x04
-#define MXC_W1_COMMAND 0x06
-#define MXC_W1_TXRX 0x08
-#define MXC_W1_INTERRUPT 0x0A
-#define MXC_W1_INTERRUPT_EN 0x0C
+#define MXC_W1_CONTROL 0x00
+# define MXC_W1_CONTROL_RDST BIT(3)
+# define MXC_W1_CONTROL_WR(x) BIT(5 - (x))
+# define MXC_W1_CONTROL_PST BIT(6)
+# define MXC_W1_CONTROL_RPP BIT(7)
+#define MXC_W1_TIME_DIVIDER 0x02
+#define MXC_W1_RESET 0x04
struct mxc_w1_device {
void __iomem *regs;
@@ -61,12 +53,12 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
unsigned int timeout_cnt = 0;
struct mxc_w1_device *dev = data;
- __raw_writeb(0x80, (dev->regs + MXC_W1_CONTROL));
+ writeb(MXC_W1_CONTROL_RPP, (dev->regs + MXC_W1_CONTROL));
while (1) {
- reg_val = __raw_readb(dev->regs + MXC_W1_CONTROL);
+ reg_val = readb(dev->regs + MXC_W1_CONTROL);
- if (((reg_val >> 7) & 0x1) == 0 ||
+ if (!(reg_val & MXC_W1_CONTROL_RPP) ||
timeout_cnt > MXC_W1_RESET_TIMEOUT)
break;
else
@@ -74,7 +66,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
udelay(100);
}
- return (reg_val >> 7) & 0x1;
+ return !!(reg_val & MXC_W1_CONTROL_PST);
}
/*
@@ -90,16 +82,16 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
* datasheet.
*/
- __raw_writeb((1 << (5 - bit)), ctrl_addr);
+ writeb(MXC_W1_CONTROL_WR(bit), ctrl_addr);
while (timeout_cnt--) {
- if (!((__raw_readb(ctrl_addr) >> (5 - bit)) & 0x1))
+ if (!(readb(ctrl_addr) & MXC_W1_CONTROL_WR(bit)))
break;
udelay(1);
}
- return ((__raw_readb(ctrl_addr)) >> 3) & 0x1;
+ return !!(readb(ctrl_addr) & MXC_W1_CONTROL_RDST);
}
static int mxc_w1_probe(struct platform_device *pdev)
@@ -139,7 +131,7 @@ static int mxc_w1_probe(struct platform_device *pdev)
if (err)
return err;
- __raw_writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
+ writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
mdev->bus_master.data = mdev;
mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus;
@@ -177,6 +169,7 @@ MODULE_DEVICE_TABLE(of, mxc_w1_dt_ids);
static struct platform_driver mxc_w1_driver = {
.driver = {
.name = "mxc_w1",
+ .owner = THIS_MODULE,
.of_match_table = mxc_w1_dt_ids,
},
.probe = mxc_w1_probe,
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index e36b18b2817b..1d111e56c8c8 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -18,10 +18,31 @@
#include <linux/of_gpio.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/delay.h>
#include "../w1.h"
#include "../w1_int.h"
+static u8 w1_gpio_set_pullup(void *data, int delay)
+{
+ struct w1_gpio_platform_data *pdata = data;
+
+ if (delay) {
+ pdata->pullup_duration = delay;
+ } else {
+ if (pdata->pullup_duration) {
+ gpio_direction_output(pdata->pin, 1);
+
+ msleep(pdata->pullup_duration);
+
+ gpio_direction_input(pdata->pin);
+ }
+ pdata->pullup_duration = 0;
+ }
+
+ return 0;
+}
+
static void w1_gpio_write_bit_dir(void *data, u8 bit)
{
struct w1_gpio_platform_data *pdata = data;
@@ -68,11 +89,22 @@ static int w1_gpio_probe_dt(struct platform_device *pdev)
pdata->is_open_drain = 1;
gpio = of_get_gpio(np, 0);
- if (gpio < 0)
+ if (gpio < 0) {
+ if (gpio != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to parse gpio property for data pin (%d)\n",
+ gpio);
+
return gpio;
+ }
pdata->pin = gpio;
- pdata->ext_pullup_enable_pin = of_get_gpio(np, 1);
+ gpio = of_get_gpio(np, 1);
+ if (gpio == -EPROBE_DEFER)
+ return gpio;
+ /* ignore other errors as the pullup gpio is optional */
+ pdata->ext_pullup_enable_pin = gpio;
+
pdev->dev.platform_data = pdata;
return 0;
@@ -86,10 +118,8 @@ static int w1_gpio_probe(struct platform_device *pdev)
if (of_have_populated_dt()) {
err = w1_gpio_probe_dt(pdev);
- if (err < 0) {
- dev_err(&pdev->dev, "Failed to parse DT\n");
+ if (err < 0)
return err;
- }
}
pdata = dev_get_platdata(&pdev->dev);
@@ -132,6 +162,7 @@ static int w1_gpio_probe(struct platform_device *pdev)
} else {
gpio_direction_input(pdata->pin);
master->write_bit = w1_gpio_write_bit_dir;
+ master->set_pullup = w1_gpio_set_pullup;
}
err = w1_add_master_device(master);
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 5e6a3c9e510b..1cdce80b6abf 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -72,7 +72,6 @@ config W1_SLAVE_DS2433_CRC
config W1_SLAVE_DS2760
tristate "Dallas 2760 battery monitor chip (HP iPAQ & others)"
- depends on W1
help
If you enable this you will have the DS2760 battery monitor
chip support.
@@ -85,7 +84,6 @@ config W1_SLAVE_DS2760
config W1_SLAVE_DS2780
tristate "Dallas 2780 battery monitor chip"
- depends on W1
help
If you enable this you will have the DS2780 battery monitor
chip support.
@@ -98,7 +96,6 @@ config W1_SLAVE_DS2780
config W1_SLAVE_DS2781
tristate "Dallas 2781 battery monitor chip"
- depends on W1
help
If you enable this you will have the DS2781 battery monitor
chip support.
@@ -111,7 +108,6 @@ config W1_SLAVE_DS2781
config W1_SLAVE_DS28E04
tristate "4096-Bit Addressable 1-Wire EEPROM with PIO (DS28E04-100)"
- depends on W1
select CRC16
help
If you enable this you will have the DS28E04-100
@@ -124,7 +120,6 @@ config W1_SLAVE_DS28E04
config W1_SLAVE_BQ27000
tristate "BQ27000 slave support"
- depends on W1
help
Say Y here if you want to use a hdq
bq27000 slave support.
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 8b5ff33f72cf..1f11a20a8ab9 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include "../w1.h"
@@ -58,6 +59,19 @@ MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
+static int w1_therm_add_slave(struct w1_slave *sl)
+{
+ sl->family_data = kzalloc(9, GFP_KERNEL);
+ if (!sl->family_data)
+ return -ENOMEM;
+ return 0;
+}
+
+static void w1_therm_remove_slave(struct w1_slave *sl)
+{
+ kfree(sl->family_data);
+ sl->family_data = NULL;
+}
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf);
@@ -71,6 +85,8 @@ static struct attribute *w1_therm_attrs[] = {
ATTRIBUTE_GROUPS(w1_therm);
static struct w1_family_ops w1_therm_fops = {
+ .add_slave = w1_therm_add_slave,
+ .remove_slave = w1_therm_remove_slave,
.groups = w1_therm_groups,
};
@@ -253,12 +269,13 @@ static ssize_t w1_slave_show(struct device *device,
c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
crc, (verdict) ? "YES" : "NO");
if (verdict)
- memcpy(sl->rom, rom, sizeof(sl->rom));
+ memcpy(sl->family_data, rom, sizeof(rom));
else
dev_warn(device, "Read failed CRC check\n");
for (i = 0; i < 9; ++i)
- c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", sl->rom[i]);
+ c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
+ ((u8 *)sl->family_data)[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
w1_convert_temp(rom, sl->family->fid));
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 66efa96c4603..b96f61b15dc6 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -46,18 +46,29 @@ MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
static int w1_timeout = 10;
-int w1_max_slave_count = 10;
+int w1_max_slave_count = 64;
int w1_max_slave_ttl = 10;
module_param_named(timeout, w1_timeout, int, 0);
+MODULE_PARM_DESC(timeout, "time in seconds between automatic slave searches");
+/* A search stops when w1_max_slave_count devices have been found in that
+ * search. The next search will start over and detect the same set of devices
+ * on a static 1-wire bus. Memory is not allocated based on this number, just
+ * on the number of devices known to the kernel. Having a high number does not
+ * consume additional resources. As a special case, if there is only one
+ * device on the network and w1_max_slave_count is set to 1, the device id can
+ * be read directly skipping the normal slower search process.
+ */
module_param_named(max_slave_count, w1_max_slave_count, int, 0);
+MODULE_PARM_DESC(max_slave_count,
+ "maximum number of slaves detected in a search");
module_param_named(slave_ttl, w1_max_slave_ttl, int, 0);
+MODULE_PARM_DESC(slave_ttl,
+ "Number of searches not seeing a slave before it will be removed");
DEFINE_MUTEX(w1_mlock);
LIST_HEAD(w1_masters);
-static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
-
static int w1_master_match(struct device *dev, struct device_driver *drv)
{
return 1;
@@ -81,19 +92,10 @@ static void w1_slave_release(struct device *dev)
{
struct w1_slave *sl = dev_to_w1_slave(dev);
- dev_dbg(dev, "%s: Releasing %s.\n", __func__, sl->name);
-
- while (atomic_read(&sl->refcnt)) {
- dev_dbg(dev, "Waiting for %s to become free: refcnt=%d.\n",
- sl->name, atomic_read(&sl->refcnt));
- if (msleep_interruptible(1000))
- flush_signals(current);
- }
+ dev_dbg(dev, "%s: Releasing %s [%p]\n", __func__, sl->name, sl);
w1_family_put(sl->family);
sl->master->slave_count--;
-
- complete(&sl->released);
}
static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -243,7 +245,9 @@ static ssize_t w1_master_attribute_store_search(struct device * dev,
mutex_lock(&md->mutex);
md->search_count = tmp;
mutex_unlock(&md->mutex);
- wake_up_process(md->thread);
+ /* Only wake if it is going to be searching. */
+ if (tmp)
+ wake_up_process(md->thread);
return count;
}
@@ -277,7 +281,6 @@ static ssize_t w1_master_attribute_store_pullup(struct device *dev,
mutex_lock(&md->mutex);
md->enable_pullup = tmp;
mutex_unlock(&md->mutex);
- wake_up_process(md->thread);
return count;
}
@@ -314,6 +317,24 @@ static ssize_t w1_master_attribute_show_timeout(struct device *dev, struct devic
return count;
}
+static ssize_t w1_master_attribute_store_max_slave_count(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int tmp;
+ struct w1_master *md = dev_to_w1_master(dev);
+
+ if (kstrtoint(buf, 0, &tmp) == -EINVAL || tmp < 1)
+ return -EINVAL;
+
+ mutex_lock(&md->mutex);
+ md->max_slave_count = tmp;
+ /* allow each time the max_slave_count is updated */
+ clear_bit(W1_WARN_MAX_COUNT, &md->flags);
+ mutex_unlock(&md->mutex);
+
+ return count;
+}
+
static ssize_t w1_master_attribute_show_max_slave_count(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w1_master *md = dev_to_w1_master(dev);
@@ -352,23 +373,20 @@ static ssize_t w1_master_attribute_show_slaves(struct device *dev,
{
struct w1_master *md = dev_to_w1_master(dev);
int c = PAGE_SIZE;
+ struct list_head *ent, *n;
+ struct w1_slave *sl = NULL;
- mutex_lock(&md->mutex);
-
- if (md->slave_count == 0)
- c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n");
- else {
- struct list_head *ent, *n;
- struct w1_slave *sl;
+ mutex_lock(&md->list_mutex);
- list_for_each_safe(ent, n, &md->slist) {
- sl = list_entry(ent, struct w1_slave, w1_slave_entry);
+ list_for_each_safe(ent, n, &md->slist) {
+ sl = list_entry(ent, struct w1_slave, w1_slave_entry);
- c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name);
- }
+ c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name);
}
+ if (!sl)
+ c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n");
- mutex_unlock(&md->mutex);
+ mutex_unlock(&md->list_mutex);
return PAGE_SIZE - c;
}
@@ -422,19 +440,22 @@ static int w1_atoreg_num(struct device *dev, const char *buf, size_t count,
}
/* Searches the slaves in the w1_master and returns a pointer or NULL.
- * Note: must hold the mutex
+ * Note: must not hold list_mutex
*/
-static struct w1_slave *w1_slave_search_device(struct w1_master *dev,
+struct w1_slave *w1_slave_search_device(struct w1_master *dev,
struct w1_reg_num *rn)
{
struct w1_slave *sl;
+ mutex_lock(&dev->list_mutex);
list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
if (sl->reg_num.family == rn->family &&
sl->reg_num.id == rn->id &&
sl->reg_num.crc == rn->crc) {
+ mutex_unlock(&dev->list_mutex);
return sl;
}
}
+ mutex_unlock(&dev->list_mutex);
return NULL;
}
@@ -491,7 +512,10 @@ static ssize_t w1_master_attribute_store_remove(struct device *dev,
mutex_lock(&md->mutex);
sl = w1_slave_search_device(md, &rn);
if (sl) {
- w1_slave_detach(sl);
+ result = w1_slave_detach(sl);
+ /* refcnt 0 means it was detached in the call */
+ if (result == 0)
+ result = count;
} else {
dev_info(dev, "Device %02x-%012llx doesn't exists\n", rn.family,
(unsigned long long)rn.id);
@@ -516,7 +540,7 @@ static ssize_t w1_master_attribute_store_remove(struct device *dev,
static W1_MASTER_ATTR_RO(name, S_IRUGO);
static W1_MASTER_ATTR_RO(slaves, S_IRUGO);
static W1_MASTER_ATTR_RO(slave_count, S_IRUGO);
-static W1_MASTER_ATTR_RO(max_slave_count, S_IRUGO);
+static W1_MASTER_ATTR_RW(max_slave_count, S_IRUGO | S_IWUSR | S_IWGRP);
static W1_MASTER_ATTR_RO(attempts, S_IRUGO);
static W1_MASTER_ATTR_RO(timeout, S_IRUGO);
static W1_MASTER_ATTR_RO(pointer, S_IRUGO);
@@ -686,12 +710,14 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
dev_set_uevent_suppress(&sl->dev, false);
kobject_uevent(&sl->dev.kobj, KOBJ_ADD);
+ mutex_lock(&sl->master->list_mutex);
list_add_tail(&sl->w1_slave_entry, &sl->master->slist);
+ mutex_unlock(&sl->master->list_mutex);
return 0;
}
-static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
+int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
{
struct w1_slave *sl;
struct w1_family *f;
@@ -713,8 +739,8 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
memset(&msg, 0, sizeof(msg));
memcpy(&sl->reg_num, rn, sizeof(sl->reg_num));
- atomic_set(&sl->refcnt, 0);
- init_completion(&sl->released);
+ atomic_set(&sl->refcnt, 1);
+ atomic_inc(&sl->master->refcnt);
/* slave modules need to be loaded in a context with unlocked mutex */
mutex_unlock(&dev->mutex);
@@ -754,23 +780,48 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
return 0;
}
-void w1_slave_detach(struct w1_slave *sl)
+int w1_unref_slave(struct w1_slave *sl)
{
- struct w1_netlink_msg msg;
-
- dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__, sl->name, sl);
-
- list_del(&sl->w1_slave_entry);
-
- memset(&msg, 0, sizeof(msg));
- memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id));
- msg.type = W1_SLAVE_REMOVE;
- w1_netlink_send(sl->master, &msg);
-
- device_unregister(&sl->dev);
+ struct w1_master *dev = sl->master;
+ int refcnt;
+ mutex_lock(&dev->list_mutex);
+ refcnt = atomic_sub_return(1, &sl->refcnt);
+ if (refcnt == 0) {
+ struct w1_netlink_msg msg;
+
+ dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__,
+ sl->name, sl);
+
+ list_del(&sl->w1_slave_entry);
+
+ memset(&msg, 0, sizeof(msg));
+ memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id));
+ msg.type = W1_SLAVE_REMOVE;
+ w1_netlink_send(sl->master, &msg);
+
+ device_unregister(&sl->dev);
+ #ifdef DEBUG
+ memset(sl, 0, sizeof(*sl));
+ #endif
+ kfree(sl);
+ }
+ atomic_dec(&dev->refcnt);
+ mutex_unlock(&dev->list_mutex);
+ return refcnt;
+}
- wait_for_completion(&sl->released);
- kfree(sl);
+int w1_slave_detach(struct w1_slave *sl)
+{
+ /* Only detach a slave once as it decreases the refcnt each time. */
+ int destroy_now;
+ mutex_lock(&sl->master->list_mutex);
+ destroy_now = !test_bit(W1_SLAVE_DETACH, &sl->flags);
+ set_bit(W1_SLAVE_DETACH, &sl->flags);
+ mutex_unlock(&sl->master->list_mutex);
+
+ if (destroy_now)
+ destroy_now = !w1_unref_slave(sl);
+ return destroy_now ? 0 : -EBUSY;
}
struct w1_master *w1_search_master_id(u32 id)
@@ -799,7 +850,7 @@ struct w1_slave *w1_search_slave(struct w1_reg_num *id)
mutex_lock(&w1_mlock);
list_for_each_entry(dev, &w1_masters, w1_master_entry) {
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->list_mutex);
list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
if (sl->reg_num.family == id->family &&
sl->reg_num.id == id->id &&
@@ -810,7 +861,7 @@ struct w1_slave *w1_search_slave(struct w1_reg_num *id)
break;
}
}
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->list_mutex);
if (found)
break;
@@ -830,6 +881,7 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
dev_dbg(&dev->dev, "Reconnecting slaves in device %s "
"for family %02x.\n", dev->name, f->fid);
mutex_lock(&dev->mutex);
+ mutex_lock(&dev->list_mutex);
list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
/* If it is a new family, slaves with the default
* family driver and are that family will be
@@ -841,14 +893,19 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
(!attach && sl->family->fid == f->fid)) {
struct w1_reg_num rn;
+ mutex_unlock(&dev->list_mutex);
memcpy(&rn, &sl->reg_num, sizeof(rn));
- w1_slave_detach(sl);
-
- w1_attach_slave_device(dev, &rn);
+ /* If it was already in use let the automatic
+ * scan pick it up again later.
+ */
+ if (!w1_slave_detach(sl))
+ w1_attach_slave_device(dev, &rn);
+ mutex_lock(&dev->list_mutex);
}
}
dev_dbg(&dev->dev, "Reconnecting slaves in device %s "
"has been finished.\n", dev->name);
+ mutex_unlock(&dev->list_mutex);
mutex_unlock(&dev->mutex);
}
mutex_unlock(&w1_mlock);
@@ -876,7 +933,12 @@ void w1_slave_found(struct w1_master *dev, u64 rn)
}
/**
- * Performs a ROM Search & registers any devices found.
+ * w1_search() - Performs a ROM Search & registers any devices found.
+ * @dev: The master device to search
+ * @search_type: W1_SEARCH to search all devices, or W1_ALARM_SEARCH
+ * to return only devices in the alarmed state
+ * @cb: Function to call when a device is found
+ *
* The 1-wire search is a simple binary tree search.
* For each bit of the address, we read two bits and write one bit.
* The bit written will put to sleep all devies that don't match that bit.
@@ -886,8 +948,6 @@ void w1_slave_found(struct w1_master *dev, u64 rn)
*
* See "Application note 187 1-wire search algorithm" at www.maxim-ic.com
*
- * @dev The master device to search
- * @cb Function to call when a device is found
*/
void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb)
{
@@ -898,7 +958,8 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
u8 triplet_ret = 0;
search_bit = 0;
- rn = last_rn = 0;
+ rn = dev->search_id;
+ last_rn = 0;
last_device = 0;
last_zero = -1;
@@ -945,7 +1006,7 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
else
search_bit = ((last_rn >> i) & 0x1);
- /** Read two bits and write one bit */
+ /* Read two bits and write one bit */
triplet_ret = w1_triplet(dev, search_bit);
/* quit if no device responded */
@@ -960,8 +1021,7 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
tmp64 = (triplet_ret >> 2);
rn |= (tmp64 << i);
- /* ensure we're called from kthread and not by netlink callback */
- if (!dev->priv && kthread_should_stop()) {
+ if (test_bit(W1_ABORT_SEARCH, &dev->flags)) {
mutex_unlock(&dev->bus_mutex);
dev_dbg(&dev->dev, "Abort w1_search\n");
return;
@@ -970,11 +1030,30 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
mutex_unlock(&dev->bus_mutex);
if ( (triplet_ret & 0x03) != 0x03 ) {
- if ( (desc_bit == last_zero) || (last_zero < 0))
+ if ((desc_bit == last_zero) || (last_zero < 0)) {
last_device = 1;
+ dev->search_id = 0;
+ } else {
+ dev->search_id = rn;
+ }
desc_bit = last_zero;
cb(dev, rn);
}
+
+ if (!last_device && slave_count == dev->max_slave_count &&
+ !test_bit(W1_WARN_MAX_COUNT, &dev->flags)) {
+ /* Only max_slave_count will be scanned in a search,
+ * but it will start where it left off next search
+ * until all ids are identified and then it will start
+ * over. A continued search will report the previous
+ * last id as the first id (provided it is still on the
+ * bus).
+ */
+ dev_info(&dev->dev, "%s: max_slave_count %d reached, "
+ "will continue next search.\n", __func__,
+ dev->max_slave_count);
+ set_bit(W1_WARN_MAX_COUNT, &dev->flags);
+ }
}
}
@@ -983,17 +1062,24 @@ void w1_search_process_cb(struct w1_master *dev, u8 search_type,
{
struct w1_slave *sl, *sln;
+ mutex_lock(&dev->list_mutex);
list_for_each_entry(sl, &dev->slist, w1_slave_entry)
clear_bit(W1_SLAVE_ACTIVE, &sl->flags);
+ mutex_unlock(&dev->list_mutex);
w1_search_devices(dev, search_type, cb);
+ mutex_lock(&dev->list_mutex);
list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
- if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl)
+ if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl) {
+ mutex_unlock(&dev->list_mutex);
w1_slave_detach(sl);
+ mutex_lock(&dev->list_mutex);
+ }
else if (test_bit(W1_SLAVE_ACTIVE, &sl->flags))
sl->ttl = dev->slave_ttl;
}
+ mutex_unlock(&dev->list_mutex);
if (dev->search_count > 0)
dev->search_count--;
@@ -1004,6 +1090,32 @@ static void w1_search_process(struct w1_master *dev, u8 search_type)
w1_search_process_cb(dev, search_type, w1_slave_found);
}
+/**
+ * w1_process_callbacks() - execute each dev->async_list callback entry
+ * @dev: w1_master device
+ *
+ * Return: 1 if there were commands to executed 0 otherwise
+ */
+int w1_process_callbacks(struct w1_master *dev)
+{
+ int ret = 0;
+ struct w1_async_cmd *async_cmd, *async_n;
+
+ /* The list can be added to in another thread, loop until it is empty */
+ while (!list_empty(&dev->async_list)) {
+ list_for_each_entry_safe(async_cmd, async_n, &dev->async_list,
+ async_entry) {
+ /* drop the lock, if it is a search it can take a long
+ * time */
+ mutex_unlock(&dev->list_mutex);
+ async_cmd->cb(dev, async_cmd);
+ ret = 1;
+ mutex_lock(&dev->list_mutex);
+ }
+ }
+ return ret;
+}
+
int w1_process(void *data)
{
struct w1_master *dev = (struct w1_master *) data;
@@ -1011,23 +1123,46 @@ int w1_process(void *data)
* time can be calculated in jiffies once.
*/
const unsigned long jtime = msecs_to_jiffies(w1_timeout * 1000);
+ /* remainder if it woke up early */
+ unsigned long jremain = 0;
- while (!kthread_should_stop()) {
- if (dev->search_count) {
+ for (;;) {
+
+ if (!jremain && dev->search_count) {
mutex_lock(&dev->mutex);
w1_search_process(dev, W1_SEARCH);
mutex_unlock(&dev->mutex);
}
+ mutex_lock(&dev->list_mutex);
+ /* Note, w1_process_callback drops the lock while processing,
+ * but locks it again before returning.
+ */
+ if (!w1_process_callbacks(dev) && jremain) {
+ /* a wake up is either to stop the thread, process
+ * callbacks, or search, it isn't process callbacks, so
+ * schedule a search.
+ */
+ jremain = 1;
+ }
+
try_to_freeze();
__set_current_state(TASK_INTERRUPTIBLE);
+ /* hold list_mutex until after interruptible to prevent loosing
+ * the wakeup signal when async_cmd is added.
+ */
+ mutex_unlock(&dev->list_mutex);
+
if (kthread_should_stop())
break;
/* Only sleep when the search is active. */
- if (dev->search_count)
- schedule_timeout(jtime);
+ if (dev->search_count) {
+ if (!jremain)
+ jremain = jtime;
+ jremain = schedule_timeout(jremain);
+ }
else
schedule();
}
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index ca8081a101d6..734dab7fc687 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -22,6 +22,13 @@
#ifndef __W1_H
#define __W1_H
+/**
+ * struct w1_reg_num - broken out slave device id
+ *
+ * @family: identifies the type of device
+ * @id: along with family is the unique device id
+ * @crc: checksum of the other bytes
+ */
struct w1_reg_num
{
#if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -58,7 +65,24 @@ struct w1_reg_num
#define W1_RESUME_CMD 0xA5
#define W1_SLAVE_ACTIVE 0
+#define W1_SLAVE_DETACH 1
+/**
+ * struct w1_slave - holds a single slave device on the bus
+ *
+ * @owner: Points to the one wire "wire" kernel module.
+ * @name: Device id is ascii.
+ * @w1_slave_entry: data for the linked list
+ * @reg_num: the slave id in binary
+ * @refcnt: reference count, delete when 0
+ * @flags: bit flags for W1_SLAVE_ACTIVE W1_SLAVE_DETACH
+ * @ttl: decrement per search this slave isn't found, deatch at 0
+ * @master: bus which this slave is on
+ * @family: module for device family type
+ * @family_data: pointer for use by the family module
+ * @dev: kernel device identifier
+ *
+ */
struct w1_slave
{
struct module *owner;
@@ -66,7 +90,6 @@ struct w1_slave
struct list_head w1_slave_entry;
struct w1_reg_num reg_num;
atomic_t refcnt;
- u8 rom[9];
int ttl;
unsigned long flags;
@@ -74,99 +97,146 @@ struct w1_slave
struct w1_family *family;
void *family_data;
struct device dev;
- struct completion released;
};
typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
/**
+ * struct w1_bus_master - operations available on a bus master
+ *
+ * @data: the first parameter in all the functions below
+ *
+ * @read_bit: Sample the line level @return the level read (0 or 1)
+ *
+ * @write_bit: Sets the line level
+ *
+ * @touch_bit: the lowest-level function for devices that really support the
+ * 1-wire protocol.
+ * touch_bit(0) = write-0 cycle
+ * touch_bit(1) = write-1 / read cycle
+ * @return the bit read (0 or 1)
+ *
+ * @read_byte: Reads a bytes. Same as 8 touch_bit(1) calls.
+ * @return the byte read
+ *
+ * @write_byte: Writes a byte. Same as 8 touch_bit(x) calls.
+ *
+ * @read_block: Same as a series of read_byte() calls
+ * @return the number of bytes read
+ *
+ * @write_block: Same as a series of write_byte() calls
+ *
+ * @triplet: Combines two reads and a smart write for ROM searches
+ * @return bit0=Id bit1=comp_id bit2=dir_taken
+ *
+ * @reset_bus: long write-0 with a read for the presence pulse detection
+ * @return -1=Error, 0=Device present, 1=No device present
+ *
+ * @set_pullup: Put out a strong pull-up pulse of the specified duration.
+ * @return -1=Error, 0=completed
+ *
+ * @search: Really nice hardware can handles the different types of ROM search
+ * w1_master* is passed to the slave found callback.
+ * u8 is search_type, W1_SEARCH or W1_ALARM_SEARCH
+ *
* Note: read_bit and write_bit are very low level functions and should only
* be used with hardware that doesn't really support 1-wire operations,
* like a parallel/serial port.
* Either define read_bit and write_bit OR define, at minimum, touch_bit and
* reset_bus.
+ *
*/
struct w1_bus_master
{
- /** the first parameter in all the functions below */
void *data;
- /**
- * Sample the line level
- * @return the level read (0 or 1)
- */
u8 (*read_bit)(void *);
- /** Sets the line level */
void (*write_bit)(void *, u8);
- /**
- * touch_bit is the lowest-level function for devices that really
- * support the 1-wire protocol.
- * touch_bit(0) = write-0 cycle
- * touch_bit(1) = write-1 / read cycle
- * @return the bit read (0 or 1)
- */
u8 (*touch_bit)(void *, u8);
- /**
- * Reads a bytes. Same as 8 touch_bit(1) calls.
- * @return the byte read
- */
u8 (*read_byte)(void *);
- /**
- * Writes a byte. Same as 8 touch_bit(x) calls.
- */
void (*write_byte)(void *, u8);
- /**
- * Same as a series of read_byte() calls
- * @return the number of bytes read
- */
u8 (*read_block)(void *, u8 *, int);
- /** Same as a series of write_byte() calls */
void (*write_block)(void *, const u8 *, int);
- /**
- * Combines two reads and a smart write for ROM searches
- * @return bit0=Id bit1=comp_id bit2=dir_taken
- */
u8 (*triplet)(void *, u8);
- /**
- * long write-0 with a read for the presence pulse detection
- * @return -1=Error, 0=Device present, 1=No device present
- */
u8 (*reset_bus)(void *);
- /**
- * Put out a strong pull-up pulse of the specified duration.
- * @return -1=Error, 0=completed
- */
u8 (*set_pullup)(void *, int);
- /** Really nice hardware can handles the different types of ROM search
- * w1_master* is passed to the slave found callback.
- */
void (*search)(void *, struct w1_master *,
u8, w1_slave_found_callback);
};
+/**
+ * enum w1_master_flags - bitfields used in w1_master.flags
+ * @W1_ABORT_SEARCH: abort searching early on shutdown
+ * @W1_WARN_MAX_COUNT: limit warning when the maximum count is reached
+ */
+enum w1_master_flags {
+ W1_ABORT_SEARCH = 0,
+ W1_WARN_MAX_COUNT = 1,
+};
+
+/**
+ * struct w1_master - one per bus master
+ * @w1_master_entry: master linked list
+ * @owner: module owner
+ * @name: dynamically allocate bus name
+ * @list_mutex: protect slist and async_list
+ * @slist: linked list of slaves
+ * @async_list: linked list of netlink commands to execute
+ * @max_slave_count: maximum number of slaves to search for at a time
+ * @slave_count: current number of slaves known
+ * @attempts: number of searches ran
+ * @slave_ttl: number of searches before a slave is timed out
+ * @initialized: prevent init/removal race conditions
+ * @id: w1 bus number
+ * @search_count: number of automatic searches to run, -1 unlimited
+ * @search_id: allows continuing a search
+ * @refcnt: reference count
+ * @priv: private data storage
+ * @priv_size: size allocated
+ * @enable_pullup: allows a strong pullup
+ * @pullup_duration: time for the next strong pullup
+ * @flags: one of w1_master_flags
+ * @thread: thread for bus search and netlink commands
+ * @mutex: protect most of w1_master
+ * @bus_mutex: pretect concurrent bus access
+ * @driver: sysfs driver
+ * @dev: sysfs device
+ * @bus_master: io operations available
+ * @seq: sequence number used for netlink broadcasts
+ * @portid: destination for the current netlink command
+ */
struct w1_master
{
struct list_head w1_master_entry;
struct module *owner;
unsigned char name[W1_MAXNAMELEN];
+ /* list_mutex protects just slist and async_list so slaves can be
+ * searched for and async commands added while the master has
+ * w1_master.mutex locked and is operating on the bus.
+ * lock order w1_mlock, w1_master.mutex, w1_master.list_mutex
+ */
+ struct mutex list_mutex;
struct list_head slist;
+ struct list_head async_list;
int max_slave_count, slave_count;
unsigned long attempts;
int slave_ttl;
int initialized;
u32 id;
int search_count;
+ /* id to start searching on, to continue a search or 0 to restart */
+ u64 search_id;
atomic_t refcnt;
@@ -178,6 +248,8 @@ struct w1_master
/** 5V strong pullup duration in milliseconds, zero disabled. */
int pullup_duration;
+ long flags;
+
struct task_struct *thread;
struct mutex mutex;
struct mutex bus_mutex;
@@ -188,16 +260,41 @@ struct w1_master
struct w1_bus_master *bus_master;
u32 seq;
+ /* port id to send netlink responses to. The value is temporarily
+ * stored here while processing a message, set after locking the
+ * mutex, zero before unlocking the mutex.
+ */
+ u32 portid;
+};
+
+/**
+ * struct w1_async_cmd - execute callback from the w1_process kthread
+ * @async_entry: link entry
+ * @cb: callback function, must list_del and destroy this list before
+ * returning
+ *
+ * When inserted into the w1_master async_list, w1_process will execute
+ * the callback. Embed this into the structure with the command details.
+ */
+struct w1_async_cmd {
+ struct list_head async_entry;
+ void (*cb)(struct w1_master *dev, struct w1_async_cmd *async_cmd);
};
int w1_create_master_attributes(struct w1_master *);
void w1_destroy_master_attributes(struct w1_master *master);
void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
+/* call w1_unref_slave to release the reference counts w1_search_slave added */
struct w1_slave *w1_search_slave(struct w1_reg_num *id);
+/* decrements the reference on sl->master and sl, and cleans up if zero
+ * returns the reference count after it has been decremented */
+int w1_unref_slave(struct w1_slave *sl);
void w1_slave_found(struct w1_master *dev, u64 rn);
void w1_search_process_cb(struct w1_master *dev, u8 search_type,
w1_slave_found_callback cb);
+struct w1_slave *w1_slave_search_device(struct w1_master *dev,
+ struct w1_reg_num *rn);
struct w1_master *w1_search_master_id(u32 id);
/* Disconnect and reconnect devices in the given family. Used for finding
@@ -206,7 +303,9 @@ struct w1_master *w1_search_master_id(u32 id);
* has just been registered, to 0 when it has been unregistered.
*/
void w1_reconnect_slaves(struct w1_family *f, int attach);
-void w1_slave_detach(struct w1_slave *sl);
+int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
+/* 0 success, otherwise EBUSY */
+int w1_slave_detach(struct w1_slave *sl);
u8 w1_triplet(struct w1_master *dev, int bdir);
void w1_write_8(struct w1_master *, u8);
@@ -242,6 +341,7 @@ extern int w1_max_slave_ttl;
extern struct list_head w1_masters;
extern struct mutex w1_mlock;
+extern int w1_process_callbacks(struct w1_master *dev);
extern int w1_process(void *);
#endif /* __KERNEL__ */
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index e9309778ee72..3bff6b37b472 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -31,6 +31,10 @@
DEFINE_SPINLOCK(w1_flock);
static LIST_HEAD(w1_families);
+/**
+ * w1_register_family() - register a device family driver
+ * @newf: family to register
+ */
int w1_register_family(struct w1_family *newf)
{
struct list_head *ent, *n;
@@ -59,6 +63,10 @@ int w1_register_family(struct w1_family *newf)
return ret;
}
+/**
+ * w1_unregister_family() - unregister a device family driver
+ * @fent: family to unregister
+ */
void w1_unregister_family(struct w1_family *fent)
{
struct list_head *ent, *n;
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 4ad0e81b6404..26ca1343055b 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -48,6 +48,12 @@
struct w1_slave;
+/**
+ * struct w1_family_ops - operations for a family type
+ * @add_slave: add_slave
+ * @remove_slave: remove_slave
+ * @groups: sysfs group
+ */
struct w1_family_ops
{
int (* add_slave)(struct w1_slave *);
@@ -55,6 +61,13 @@ struct w1_family_ops
const struct attribute_group **groups;
};
+/**
+ * struct w1_family - reference counted family structure.
+ * @family_entry: family linked list
+ * @fid: 8 bit family identifier
+ * @fops: operations for this family
+ * @refcnt: reference counter
+ */
struct w1_family
{
struct list_head family_entry;
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 5a98649f6abc..9b084db739c7 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -75,8 +75,10 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
atomic_set(&dev->refcnt, 2);
INIT_LIST_HEAD(&dev->slist);
+ INIT_LIST_HEAD(&dev->async_list);
mutex_init(&dev->mutex);
mutex_init(&dev->bus_mutex);
+ mutex_init(&dev->list_mutex);
memcpy(&dev->dev, device, sizeof(struct device));
dev_set_name(&dev->dev, "w1_bus_master%u", dev->id);
@@ -103,6 +105,10 @@ static void w1_free_dev(struct w1_master *dev)
device_unregister(&dev->dev);
}
+/**
+ * w1_add_master_device() - registers a new master device
+ * @master: master bus device to register
+ */
int w1_add_master_device(struct w1_bus_master *master)
{
struct w1_master *dev, *entry;
@@ -117,18 +123,6 @@ int w1_add_master_device(struct w1_bus_master *master)
printk(KERN_ERR "w1_add_master_device: invalid function set\n");
return(-EINVAL);
}
- /* While it would be electrically possible to make a device that
- * generated a strong pullup in bit bang mode, only hardware that
- * controls 1-wire time frames are even expected to support a strong
- * pullup. w1_io.c would need to support calling set_pullup before
- * the last write_bit operation of a w1_write_8 which it currently
- * doesn't.
- */
- if (!master->write_byte && !master->touch_bit && master->set_pullup) {
- printk(KERN_ERR "w1_add_master_device: set_pullup requires "
- "write_byte or touch_bit, disabling\n");
- master->set_pullup = NULL;
- }
/* Lock until the device is added (or not) to w1_masters. */
mutex_lock(&w1_mlock);
@@ -184,6 +178,7 @@ int w1_add_master_device(struct w1_bus_master *master)
#if 0 /* Thread cleanup code, not required currently. */
err_out_kill_thread:
+ set_bit(W1_ABORT_SEARCH, &dev->flags);
kthread_stop(dev->thread);
#endif
err_out_rm_attr:
@@ -199,16 +194,22 @@ void __w1_remove_master_device(struct w1_master *dev)
struct w1_netlink_msg msg;
struct w1_slave *sl, *sln;
- kthread_stop(dev->thread);
-
mutex_lock(&w1_mlock);
list_del(&dev->w1_master_entry);
mutex_unlock(&w1_mlock);
+ set_bit(W1_ABORT_SEARCH, &dev->flags);
+ kthread_stop(dev->thread);
+
mutex_lock(&dev->mutex);
- list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry)
+ mutex_lock(&dev->list_mutex);
+ list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
+ mutex_unlock(&dev->list_mutex);
w1_slave_detach(sl);
+ mutex_lock(&dev->list_mutex);
+ }
w1_destroy_master_attributes(dev);
+ mutex_unlock(&dev->list_mutex);
mutex_unlock(&dev->mutex);
atomic_dec(&dev->refcnt);
@@ -218,7 +219,9 @@ void __w1_remove_master_device(struct w1_master *dev)
if (msleep_interruptible(1000))
flush_signals(current);
+ w1_process_callbacks(dev);
}
+ w1_process_callbacks(dev);
memset(&msg, 0, sizeof(msg));
msg.id.mst.id = dev->id;
@@ -228,6 +231,10 @@ void __w1_remove_master_device(struct w1_master *dev)
w1_free_dev(dev);
}
+/**
+ * w1_remove_master_device() - unregister a master device
+ * @bm: master bus device to remove
+ */
void w1_remove_master_device(struct w1_bus_master *bm)
{
struct w1_master *dev, *found = NULL;
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index e10acc237733..282092421cc9 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -62,7 +62,9 @@ static void w1_write_bit(struct w1_master *dev, int bit);
static u8 w1_read_bit(struct w1_master *dev);
/**
- * Generates a write-0 or write-1 cycle and samples the level.
+ * w1_touch_bit() - Generates a write-0 or write-1 cycle and samples the level.
+ * @dev: the master device
+ * @bit: 0 - write a 0, 1 - write a 0 read the level
*/
static u8 w1_touch_bit(struct w1_master *dev, int bit)
{
@@ -77,7 +79,10 @@ static u8 w1_touch_bit(struct w1_master *dev, int bit)
}
/**
- * Generates a write-0 or write-1 cycle.
+ * w1_write_bit() - Generates a write-0 or write-1 cycle.
+ * @dev: the master device
+ * @bit: bit to write
+ *
* Only call if dev->bus_master->touch_bit is NULL
*/
static void w1_write_bit(struct w1_master *dev, int bit)
@@ -102,11 +107,12 @@ static void w1_write_bit(struct w1_master *dev, int bit)
}
/**
+ * w1_pre_write() - pre-write operations
+ * @dev: the master device
+ *
* Pre-write operation, currently only supporting strong pullups.
* Program the hardware for a strong pullup, if one has been requested and
* the hardware supports it.
- *
- * @param dev the master device
*/
static void w1_pre_write(struct w1_master *dev)
{
@@ -118,11 +124,12 @@ static void w1_pre_write(struct w1_master *dev)
}
/**
+ * w1_post_write() - post-write options
+ * @dev: the master device
+ *
* Post-write operation, currently only supporting strong pullups.
* If a strong pullup was requested, clear it if the hardware supports
* them, or execute the delay otherwise, in either case clear the request.
- *
- * @param dev the master device
*/
static void w1_post_write(struct w1_master *dev)
{
@@ -136,10 +143,9 @@ static void w1_post_write(struct w1_master *dev)
}
/**
- * Writes 8 bits.
- *
- * @param dev the master device
- * @param byte the byte to write
+ * w1_write_8() - Writes 8 bits.
+ * @dev: the master device
+ * @byte: the byte to write
*/
void w1_write_8(struct w1_master *dev, u8 byte)
{
@@ -161,7 +167,9 @@ EXPORT_SYMBOL_GPL(w1_write_8);
/**
- * Generates a write-1 cycle and samples the level.
+ * w1_read_bit() - Generates a write-1 cycle and samples the level.
+ * @dev: the master device
+ *
* Only call if dev->bus_master->touch_bit is NULL
*/
static u8 w1_read_bit(struct w1_master *dev)
@@ -185,16 +193,17 @@ static u8 w1_read_bit(struct w1_master *dev)
}
/**
- * Does a triplet - used for searching ROM addresses.
+ * w1_triplet() - * Does a triplet - used for searching ROM addresses.
+ * @dev: the master device
+ * @bdir: the bit to write if both id_bit and comp_bit are 0
+ *
* Return bits:
* bit 0 = id_bit
* bit 1 = comp_bit
* bit 2 = dir_taken
* If both bits 0 & 1 are set, the search should be restarted.
*
- * @param dev the master device
- * @param bdir the bit to write if both id_bit and comp_bit are 0
- * @return bit fields - see above
+ * Return: bit fields - see above
*/
u8 w1_triplet(struct w1_master *dev, int bdir)
{
@@ -226,10 +235,10 @@ u8 w1_triplet(struct w1_master *dev, int bdir)
}
/**
- * Reads 8 bits.
+ * w1_read_8() - Reads 8 bits.
+ * @dev: the master device
*
- * @param dev the master device
- * @return the byte read
+ * Return: the byte read
*/
u8 w1_read_8(struct w1_master *dev)
{
@@ -247,11 +256,10 @@ u8 w1_read_8(struct w1_master *dev)
EXPORT_SYMBOL_GPL(w1_read_8);
/**
- * Writes a series of bytes.
- *
- * @param dev the master device
- * @param buf pointer to the data to write
- * @param len the number of bytes to write
+ * w1_write_block() - Writes a series of bytes.
+ * @dev: the master device
+ * @buf: pointer to the data to write
+ * @len: the number of bytes to write
*/
void w1_write_block(struct w1_master *dev, const u8 *buf, int len)
{
@@ -269,11 +277,10 @@ void w1_write_block(struct w1_master *dev, const u8 *buf, int len)
EXPORT_SYMBOL_GPL(w1_write_block);
/**
- * Touches a series of bytes.
- *
- * @param dev the master device
- * @param buf pointer to the data to write
- * @param len the number of bytes to write
+ * w1_touch_block() - Touches a series of bytes.
+ * @dev: the master device
+ * @buf: pointer to the data to write
+ * @len: the number of bytes to write
*/
void w1_touch_block(struct w1_master *dev, u8 *buf, int len)
{
@@ -294,12 +301,11 @@ void w1_touch_block(struct w1_master *dev, u8 *buf, int len)
EXPORT_SYMBOL_GPL(w1_touch_block);
/**
- * Reads a series of bytes.
- *
- * @param dev the master device
- * @param buf pointer to the buffer to fill
- * @param len the number of bytes to read
- * @return the number of bytes read
+ * w1_read_block() - Reads a series of bytes.
+ * @dev: the master device
+ * @buf: pointer to the buffer to fill
+ * @len: the number of bytes to read
+ * Return: the number of bytes read
*/
u8 w1_read_block(struct w1_master *dev, u8 *buf, int len)
{
@@ -319,10 +325,9 @@ u8 w1_read_block(struct w1_master *dev, u8 *buf, int len)
EXPORT_SYMBOL_GPL(w1_read_block);
/**
- * Issues a reset bus sequence.
- *
- * @param dev The bus master pointer
- * @return 0=Device present, 1=No device present or error
+ * w1_reset_bus() - Issues a reset bus sequence.
+ * @dev: the master device
+ * Return: 0=Device present, 1=No device present or error
*/
int w1_reset_bus(struct w1_master *dev)
{
@@ -383,12 +388,15 @@ void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_cal
}
/**
+ * w1_reset_select_slave() - reset and select a slave
+ * @sl: the slave to select
+ *
* Resets the bus and then selects the slave by sending either a skip rom
- * or a rom match.
+ * or a rom match. A skip rom is issued if there is only one device
+ * registered on the bus.
* The w1 master lock must be held.
*
- * @param sl the slave to select
- * @return 0=success, anything else=error
+ * Return: 0=success, anything else=error
*/
int w1_reset_select_slave(struct w1_slave *sl)
{
@@ -409,6 +417,9 @@ int w1_reset_select_slave(struct w1_slave *sl)
EXPORT_SYMBOL_GPL(w1_reset_select_slave);
/**
+ * w1_reset_resume_command() - resume instead of another match ROM
+ * @dev: the master device
+ *
* When the workflow with a slave amongst many requires several
* successive commands a reset between each, this function is similar
* to doing a reset then a match ROM for the last matched ROM. The
@@ -420,8 +431,6 @@ EXPORT_SYMBOL_GPL(w1_reset_select_slave);
* doesn't work of course, but the resume command is the next best thing.
*
* The w1 master lock must be held.
- *
- * @param dev the master device
*/
int w1_reset_resume_command(struct w1_master *dev)
{
@@ -435,6 +444,10 @@ int w1_reset_resume_command(struct w1_master *dev)
EXPORT_SYMBOL_GPL(w1_reset_resume_command);
/**
+ * w1_next_pullup() - register for a strong pullup
+ * @dev: the master device
+ * @delay: time in milliseconds
+ *
* Put out a strong pull-up of the specified duration after the next write
* operation. Not all hardware supports strong pullups. Hardware that
* doesn't support strong pullups will sleep for the given time after the
@@ -442,8 +455,7 @@ EXPORT_SYMBOL_GPL(w1_reset_resume_command);
* the next write, specifying zero will clear a previous request.
* The w1 master lock must be held.
*
- * @param delay time in milliseconds
- * @return 0=success, anything else=error
+ * Return: 0=success, anything else=error
*/
void w1_next_pullup(struct w1_master *dev, int delay)
{
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 40788c925d1c..5234964fe001 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -45,7 +45,7 @@ void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
memcpy(w, msg, sizeof(struct w1_netlink_msg));
- cn_netlink_send(m, 0, GFP_KERNEL);
+ cn_netlink_send(m, dev->portid, 0, GFP_KERNEL);
}
static void w1_send_slave(struct w1_master *dev, u64 rn)
@@ -54,53 +54,95 @@ static void w1_send_slave(struct w1_master *dev, u64 rn)
struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
int avail;
-
- /* update kernel slave list */
- w1_slave_found(dev, rn);
+ u64 *data;
avail = dev->priv_size - cmd->len;
- if (avail > 8) {
- u64 *data = (void *)(cmd + 1) + cmd->len;
+ if (avail < 8) {
+ msg->ack++;
+ cn_netlink_send(msg, dev->portid, 0, GFP_KERNEL);
- *data = rn;
- cmd->len += 8;
- hdr->len += 8;
- msg->len += 8;
- return;
+ msg->len = sizeof(struct w1_netlink_msg) +
+ sizeof(struct w1_netlink_cmd);
+ hdr->len = sizeof(struct w1_netlink_cmd);
+ cmd->len = 0;
}
- msg->ack++;
- cn_netlink_send(msg, 0, GFP_KERNEL);
+ data = (void *)(cmd + 1) + cmd->len;
- msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
- hdr->len = sizeof(struct w1_netlink_cmd);
- cmd->len = 0;
+ *data = rn;
+ cmd->len += 8;
+ hdr->len += 8;
+ msg->len += 8;
}
-static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg,
- unsigned int avail)
+static void w1_found_send_slave(struct w1_master *dev, u64 rn)
{
- struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
- struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
- int search_type = (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH;
+ /* update kernel slave list */
+ w1_slave_found(dev, rn);
- dev->priv = msg;
- dev->priv_size = avail;
+ w1_send_slave(dev, rn);
+}
+
+/* Get the current slave list, or search (with or without alarm) */
+static int w1_get_slaves(struct w1_master *dev,
+ struct cn_msg *req_msg, struct w1_netlink_msg *req_hdr,
+ struct w1_netlink_cmd *req_cmd)
+{
+ struct cn_msg *msg;
+ struct w1_netlink_msg *hdr;
+ struct w1_netlink_cmd *cmd;
+ struct w1_slave *sl;
+
+ msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->id = req_msg->id;
+ msg->seq = req_msg->seq;
+ msg->ack = 0;
+ msg->len = sizeof(struct w1_netlink_msg) +
+ sizeof(struct w1_netlink_cmd);
+
+ hdr = (struct w1_netlink_msg *)(msg + 1);
+ cmd = (struct w1_netlink_cmd *)(hdr + 1);
+
+ hdr->type = W1_MASTER_CMD;
+ hdr->id = req_hdr->id;
+ hdr->len = sizeof(struct w1_netlink_cmd);
+
+ cmd->cmd = req_cmd->cmd;
+ cmd->len = 0;
- w1_search_process_cb(dev, search_type, w1_send_slave);
+ dev->priv = msg;
+ dev->priv_size = PAGE_SIZE - msg->len - sizeof(struct cn_msg);
+
+ if (req_cmd->cmd == W1_CMD_LIST_SLAVES) {
+ __u64 rn;
+ mutex_lock(&dev->list_mutex);
+ list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
+ memcpy(&rn, &sl->reg_num, sizeof(rn));
+ w1_send_slave(dev, rn);
+ }
+ mutex_unlock(&dev->list_mutex);
+ } else {
+ w1_search_process_cb(dev, cmd->cmd == W1_CMD_ALARM_SEARCH ?
+ W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave);
+ }
msg->ack = 0;
- cn_netlink_send(msg, 0, GFP_KERNEL);
+ cn_netlink_send(msg, dev->portid, 0, GFP_KERNEL);
dev->priv = NULL;
dev->priv_size = 0;
+ kfree(msg);
+
return 0;
}
static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr,
- struct w1_netlink_cmd *cmd)
+ struct w1_netlink_cmd *cmd, u32 portid)
{
void *data;
struct w1_netlink_msg *h;
@@ -131,7 +173,7 @@ static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr,
memcpy(c->data, cmd->data, c->len);
- err = cn_netlink_send(cm, 0, GFP_KERNEL);
+ err = cn_netlink_send(cm, portid, 0, GFP_KERNEL);
kfree(data);
@@ -146,11 +188,11 @@ static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg,
switch (cmd->cmd) {
case W1_CMD_TOUCH:
w1_touch_block(dev, cmd->data, cmd->len);
- w1_send_read_reply(msg, hdr, cmd);
+ w1_send_read_reply(msg, hdr, cmd, dev->portid);
break;
case W1_CMD_READ:
w1_read_block(dev, cmd->data, cmd->len);
- w1_send_read_reply(msg, hdr, cmd);
+ w1_send_read_reply(msg, hdr, cmd, dev->portid);
break;
case W1_CMD_WRITE:
w1_write_block(dev, cmd->data, cmd->len);
@@ -163,38 +205,57 @@ static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg,
return err;
}
-static int w1_process_command_master(struct w1_master *dev, struct cn_msg *req_msg,
- struct w1_netlink_msg *req_hdr, struct w1_netlink_cmd *req_cmd)
+static int w1_process_command_addremove(struct w1_master *dev,
+ struct cn_msg *msg, struct w1_netlink_msg *hdr,
+ struct w1_netlink_cmd *cmd)
{
- int err = -EINVAL;
- struct cn_msg *msg;
- struct w1_netlink_msg *hdr;
- struct w1_netlink_cmd *cmd;
+ struct w1_slave *sl;
+ int err = 0;
+ struct w1_reg_num *id;
- msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (cmd->len != 8)
+ return -EINVAL;
- msg->id = req_msg->id;
- msg->seq = req_msg->seq;
- msg->ack = 0;
- msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
+ id = (struct w1_reg_num *)cmd->data;
- hdr = (struct w1_netlink_msg *)(msg + 1);
- cmd = (struct w1_netlink_cmd *)(hdr + 1);
+ sl = w1_slave_search_device(dev, id);
+ switch (cmd->cmd) {
+ case W1_CMD_SLAVE_ADD:
+ if (sl)
+ err = -EINVAL;
+ else
+ err = w1_attach_slave_device(dev, id);
+ break;
+ case W1_CMD_SLAVE_REMOVE:
+ if (sl)
+ w1_slave_detach(sl);
+ else
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
- hdr->type = W1_MASTER_CMD;
- hdr->id = req_hdr->id;
- hdr->len = sizeof(struct w1_netlink_cmd);
+ return err;
+}
- cmd->cmd = req_cmd->cmd;
- cmd->len = 0;
+static int w1_process_command_master(struct w1_master *dev,
+ struct cn_msg *req_msg, struct w1_netlink_msg *req_hdr,
+ struct w1_netlink_cmd *req_cmd)
+{
+ int err = -EINVAL;
- switch (cmd->cmd) {
+ /* drop bus_mutex for search (does it's own locking), and add/remove
+ * which doesn't use the bus
+ */
+ switch (req_cmd->cmd) {
case W1_CMD_SEARCH:
case W1_CMD_ALARM_SEARCH:
- err = w1_process_search_command(dev, msg,
- PAGE_SIZE - msg->len - sizeof(struct cn_msg));
+ case W1_CMD_LIST_SLAVES:
+ mutex_unlock(&dev->bus_mutex);
+ err = w1_get_slaves(dev, req_msg, req_hdr, req_cmd);
+ mutex_lock(&dev->bus_mutex);
break;
case W1_CMD_READ:
case W1_CMD_WRITE:
@@ -204,12 +265,20 @@ static int w1_process_command_master(struct w1_master *dev, struct cn_msg *req_m
case W1_CMD_RESET:
err = w1_reset_bus(dev);
break;
+ case W1_CMD_SLAVE_ADD:
+ case W1_CMD_SLAVE_REMOVE:
+ mutex_unlock(&dev->bus_mutex);
+ mutex_lock(&dev->mutex);
+ err = w1_process_command_addremove(dev, req_msg, req_hdr,
+ req_cmd);
+ mutex_unlock(&dev->mutex);
+ mutex_lock(&dev->bus_mutex);
+ break;
default:
err = -EINVAL;
break;
}
- kfree(msg);
return err;
}
@@ -223,7 +292,8 @@ static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg,
return w1_process_command_io(sl->master, msg, hdr, cmd);
}
-static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mcmd)
+static int w1_process_command_root(struct cn_msg *msg,
+ struct w1_netlink_msg *mcmd, u32 portid)
{
struct w1_master *m;
struct cn_msg *cn;
@@ -256,7 +326,7 @@ static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mc
mutex_lock(&w1_mlock);
list_for_each_entry(m, &w1_masters, w1_master_entry) {
if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) {
- cn_netlink_send(cn, 0, GFP_KERNEL);
+ cn_netlink_send(cn, portid, 0, GFP_KERNEL);
cn->ack++;
cn->len = sizeof(struct w1_netlink_msg);
w->len = 0;
@@ -269,7 +339,7 @@ static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mc
id++;
}
cn->ack = 0;
- cn_netlink_send(cn, 0, GFP_KERNEL);
+ cn_netlink_send(cn, portid, 0, GFP_KERNEL);
mutex_unlock(&w1_mlock);
kfree(cn);
@@ -277,7 +347,7 @@ static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mc
}
static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rmsg,
- struct w1_netlink_cmd *rcmd, int error)
+ struct w1_netlink_cmd *rcmd, int portid, int error)
{
struct cn_msg *cmsg;
struct w1_netlink_msg *msg;
@@ -304,35 +374,147 @@ static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rm
cmsg->len += sizeof(*cmd);
}
- error = cn_netlink_send(cmsg, 0, GFP_KERNEL);
+ error = cn_netlink_send(cmsg, portid, 0, GFP_KERNEL);
kfree(cmsg);
return error;
}
+/* Bundle together a reference count, the full message, and broken out
+ * commands to be executed on each w1 master kthread in one memory allocation.
+ */
+struct w1_cb_block {
+ atomic_t refcnt;
+ u32 portid; /* Sending process port ID */
+ struct cn_msg msg;
+ /* cn_msg data */
+ /* one or more variable length struct w1_cb_node */
+};
+struct w1_cb_node {
+ struct w1_async_cmd async;
+ /* pointers within w1_cb_block and msg data */
+ struct w1_cb_block *block;
+ struct w1_netlink_msg *m;
+ struct w1_slave *sl;
+ struct w1_master *dev;
+};
+
+static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd)
+{
+ struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node,
+ async);
+ u16 mlen = node->m->len;
+ u8 *cmd_data = node->m->data;
+ int err = 0;
+ struct w1_slave *sl = node->sl;
+ struct w1_netlink_cmd *cmd = NULL;
+
+ mutex_lock(&dev->bus_mutex);
+ dev->portid = node->block->portid;
+ if (sl && w1_reset_select_slave(sl))
+ err = -ENODEV;
+
+ while (mlen && !err) {
+ cmd = (struct w1_netlink_cmd *)cmd_data;
+
+ if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
+ err = -E2BIG;
+ break;
+ }
+
+ if (sl)
+ err = w1_process_command_slave(sl, &node->block->msg,
+ node->m, cmd);
+ else
+ err = w1_process_command_master(dev, &node->block->msg,
+ node->m, cmd);
+
+ w1_netlink_send_error(&node->block->msg, node->m, cmd,
+ node->block->portid, err);
+ err = 0;
+
+ cmd_data += cmd->len + sizeof(struct w1_netlink_cmd);
+ mlen -= cmd->len + sizeof(struct w1_netlink_cmd);
+ }
+
+ if (!cmd || err)
+ w1_netlink_send_error(&node->block->msg, node->m, cmd,
+ node->block->portid, err);
+
+ if (sl)
+ w1_unref_slave(sl);
+ else
+ atomic_dec(&dev->refcnt);
+ dev->portid = 0;
+ mutex_unlock(&dev->bus_mutex);
+
+ mutex_lock(&dev->list_mutex);
+ list_del(&async_cmd->async_entry);
+ mutex_unlock(&dev->list_mutex);
+
+ if (atomic_sub_return(1, &node->block->refcnt) == 0)
+ kfree(node->block);
+}
+
static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1);
- struct w1_netlink_cmd *cmd;
struct w1_slave *sl;
struct w1_master *dev;
+ u16 msg_len;
int err = 0;
+ struct w1_cb_block *block = NULL;
+ struct w1_cb_node *node = NULL;
+ int node_count = 0;
+
+ /* Count the number of master or slave commands there are to allocate
+ * space for one cb_node each.
+ */
+ msg_len = msg->len;
+ while (msg_len && !err) {
+ if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
+ err = -E2BIG;
+ break;
+ }
+
+ if (m->type == W1_MASTER_CMD || m->type == W1_SLAVE_CMD)
+ ++node_count;
- while (msg->len && !err) {
+ msg_len -= sizeof(struct w1_netlink_msg) + m->len;
+ m = (struct w1_netlink_msg *)(((u8 *)m) +
+ sizeof(struct w1_netlink_msg) + m->len);
+ }
+ m = (struct w1_netlink_msg *)(msg + 1);
+ if (node_count) {
+ /* msg->len doesn't include itself */
+ long size = sizeof(struct w1_cb_block) + msg->len +
+ node_count*sizeof(struct w1_cb_node);
+ block = kmalloc(size, GFP_KERNEL);
+ if (!block) {
+ w1_netlink_send_error(msg, m, NULL, nsp->portid,
+ -ENOMEM);
+ return;
+ }
+ atomic_set(&block->refcnt, 1);
+ block->portid = nsp->portid;
+ memcpy(&block->msg, msg, sizeof(*msg) + msg->len);
+ node = (struct w1_cb_node *)((u8 *)block->msg.data + msg->len);
+ }
+
+ msg_len = msg->len;
+ while (msg_len && !err) {
struct w1_reg_num id;
u16 mlen = m->len;
- u8 *cmd_data = m->data;
dev = NULL;
sl = NULL;
- cmd = NULL;
memcpy(&id, m->id.id, sizeof(id));
#if 0
printk("%s: %02x.%012llx.%02x: type=%02x, len=%u.\n",
__func__, id.family, (unsigned long long)id.id, id.crc, m->type, m->len);
#endif
- if (m->len + sizeof(struct w1_netlink_msg) > msg->len) {
+ if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
err = -E2BIG;
break;
}
@@ -344,7 +526,7 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
if (sl)
dev = sl->master;
} else {
- err = w1_process_command_root(msg, m);
+ err = w1_process_command_root(msg, m, nsp->portid);
goto out_cont;
}
@@ -357,41 +539,24 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
if (!mlen)
goto out_cont;
- mutex_lock(&dev->mutex);
-
- if (sl && w1_reset_select_slave(sl)) {
- err = -ENODEV;
- goto out_up;
- }
+ atomic_inc(&block->refcnt);
+ node->async.cb = w1_process_cb;
+ node->block = block;
+ node->m = (struct w1_netlink_msg *)((u8 *)&block->msg +
+ (size_t)((u8 *)m - (u8 *)msg));
+ node->sl = sl;
+ node->dev = dev;
- while (mlen) {
- cmd = (struct w1_netlink_cmd *)cmd_data;
+ mutex_lock(&dev->list_mutex);
+ list_add_tail(&node->async.async_entry, &dev->async_list);
+ wake_up_process(dev->thread);
+ mutex_unlock(&dev->list_mutex);
+ ++node;
- if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
- err = -E2BIG;
- break;
- }
-
- if (sl)
- err = w1_process_command_slave(sl, msg, m, cmd);
- else
- err = w1_process_command_master(dev, msg, m, cmd);
-
- w1_netlink_send_error(msg, m, cmd, err);
- err = 0;
-
- cmd_data += cmd->len + sizeof(struct w1_netlink_cmd);
- mlen -= cmd->len + sizeof(struct w1_netlink_cmd);
- }
-out_up:
- atomic_dec(&dev->refcnt);
- if (sl)
- atomic_dec(&sl->refcnt);
- mutex_unlock(&dev->mutex);
out_cont:
- if (!cmd || err)
- w1_netlink_send_error(msg, m, cmd, err);
- msg->len -= sizeof(struct w1_netlink_msg) + m->len;
+ if (err)
+ w1_netlink_send_error(msg, m, NULL, nsp->portid, err);
+ msg_len -= sizeof(struct w1_netlink_msg) + m->len;
m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len);
/*
@@ -400,6 +565,8 @@ out_cont:
if (err == -ENODEV)
err = 0;
}
+ if (block && atomic_sub_return(1, &block->refcnt) == 0)
+ kfree(block);
}
int w1_init_netlink(void)
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index b0922dc29658..1e9504e67650 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -27,6 +27,18 @@
#include "w1.h"
+/**
+ * enum w1_netlink_message_types - message type
+ *
+ * @W1_SLAVE_ADD: notification that a slave device was added
+ * @W1_SLAVE_REMOVE: notification that a slave device was removed
+ * @W1_MASTER_ADD: notification that a new bus master was added
+ * @W1_MASTER_REMOVE: notification that a bus masterwas removed
+ * @W1_MASTER_CMD: initiate operations on a specific master
+ * @W1_SLAVE_CMD: sends reset, selects the slave, then does a read/write/touch
+ * operation
+ * @W1_LIST_MASTERS: used to determine the bus master identifiers
+ */
enum w1_netlink_message_types {
W1_SLAVE_ADD = 0,
W1_SLAVE_REMOVE,
@@ -52,6 +64,22 @@ struct w1_netlink_msg
__u8 data[0];
};
+/**
+ * enum w1_commands - commands available for master or slave operations
+ * @W1_CMD_READ: read len bytes
+ * @W1_CMD_WRITE: write len bytes
+ * @W1_CMD_SEARCH: initiate a standard search, returns only the slave
+ * devices found during that search
+ * @W1_CMD_ALARM_SEARCH: search for devices that are currently alarming
+ * @W1_CMD_TOUCH: Touches a series of bytes.
+ * @W1_CMD_RESET: sends a bus reset on the given master
+ * @W1_CMD_SLAVE_ADD: adds a slave to the given master,
+ * 8 byte slave id at data[0]
+ * @W1_CMD_SLAVE_REMOVE: removes a slave to the given master,
+ * 8 byte slave id at data[0]
+ * @W1_CMD_LIST_SLAVES: list of slaves registered on this master
+ * @W1_CMD_MAX: number of available commands
+ */
enum w1_commands {
W1_CMD_READ = 0,
W1_CMD_WRITE,
@@ -59,7 +87,10 @@ enum w1_commands {
W1_CMD_ALARM_SEARCH,
W1_CMD_TOUCH,
W1_CMD_RESET,
- W1_CMD_MAX,
+ W1_CMD_SLAVE_ADD,
+ W1_CMD_SLAVE_REMOVE,
+ W1_CMD_LIST_SLAVES,
+ W1_CMD_MAX
};
struct w1_netlink_cmd
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 5be6e919f785..0c6048d5c9a3 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -87,6 +87,14 @@ config DA9055_WATCHDOG
This driver can also be built as a module. If so, the module
will be called da9055_wdt.
+config GPIO_WATCHDOG
+ tristate "Watchdog device controlled through GPIO-line"
+ depends on OF_GPIO
+ select WATCHDOG_CORE
+ help
+ If you say yes here you get support for watchdog device
+ controlled through GPIO-line.
+
config WM831X_WATCHDOG
tristate "WM831x watchdog"
depends on MFD_WM831X
@@ -103,13 +111,22 @@ config WM8350_WATCHDOG
Support for the watchdog in the WM8350 AudioPlus PMIC. When
the watchdog triggers the system will be reset.
+config XILINX_WATCHDOG
+ tristate "Xilinx Watchdog timer"
+ select WATCHDOG_CORE
+ help
+ Watchdog driver for the xps_timebase_wdt ip core.
+
+ To compile this driver as a module, choose M here: the
+ module will be called of_xilinx_wdt.
+
# ALPHA Architecture
# ARM Architecture
config ARM_SP805_WATCHDOG
tristate "ARM SP805 Watchdog"
- depends on ARM && ARM_AMBA
+ depends on (ARM || ARM64) && ARM_AMBA
select WATCHDOG_CORE
help
ARM Primecell SP805 Watchdog timer. This will reboot your system when
@@ -188,6 +205,7 @@ config S3C2410_WATCHDOG
tristate "S3C2410 Watchdog"
depends on HAVE_S3C2410_WATCHDOG
select WATCHDOG_CORE
+ select MFD_SYSCON if ARCH_EXYNOS5
help
Watchdog timer block in the Samsung SoCs. This will reboot
the system when the timer expires with the watchdog enabled.
@@ -214,10 +232,10 @@ config SA1100_WATCHDOG
config DW_WATCHDOG
tristate "Synopsys DesignWare watchdog"
- depends on ARM && HAVE_CLK
+ depends on HAS_IOMEM
help
Say Y here if to include support for the Synopsys DesignWare
- watchdog timer found in many ARM chips.
+ watchdog timer found in many chips.
To compile this driver as a module, choose M here: the
module will be called dw_wdt.
@@ -270,10 +288,11 @@ config IOP_WATCHDOG
config DAVINCI_WATCHDOG
tristate "DaVinci watchdog"
- depends on ARCH_DAVINCI
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE
+ select WATCHDOG_CORE
help
Say Y here if to include support for the watchdog timer
- in the DaVinci DM644x/DM646x processors.
+ in the DaVinci DM644x/DM646x or Keystone processors.
To compile this driver as a module, choose M here: the
module will be called davinci_wdt.
@@ -282,7 +301,7 @@ config DAVINCI_WATCHDOG
config ORION_WATCHDOG
tristate "Orion watchdog"
- depends on ARCH_ORION5X || ARCH_KIRKWOOD || ARCH_DOVE
+ depends on ARCH_ORION5X || ARCH_KIRKWOOD || ARCH_DOVE || MACH_DOVE
select WATCHDOG_CORE
help
Say Y here if to include support for the watchdog timer
@@ -411,6 +430,17 @@ config SIRFSOC_WATCHDOG
Support for CSR SiRFprimaII and SiRFatlasVI watchdog. When
the watchdog triggers the system will be reset.
+config TEGRA_WATCHDOG
+ tristate "Tegra watchdog"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer
+ embedded in NVIDIA Tegra SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tegra_wdt.
+
# AVR32 Architecture
config AT32AP700X_WDT
@@ -523,7 +553,7 @@ config GEODE_WDT
config SC520_WDT
tristate "AMD Elan SC520 processor Watchdog"
- depends on X86
+ depends on MELAN
help
This is the driver for the hardware watchdog built in to the
AMD "Elan" SC520 microcomputer commonly used in embedded systems.
@@ -883,13 +913,22 @@ config VIA_WDT
Most people will say N.
config W83627HF_WDT
- tristate "W83627HF/W83627DHG Watchdog Timer"
+ tristate "Watchdog timer for W83627HF/W83627DHG and compatibles"
depends on X86
select WATCHDOG_CORE
---help---
- This is the driver for the hardware watchdog on the W83627HF chipset
- as used in Advantech PC-9578 and Tyan S2721-533 motherboards
- (and likely others). The driver also supports the W83627DHG chip.
+ This is the driver for the hardware watchdog on the following
+ Super I/O chips.
+ W83627DHG/DHG-P/EHF/EHG/F/G/HF/S/SF/THF/UHG/UG
+ W83637HF
+ W83667HG/HG-B
+ W83687THF
+ W83697HF
+ W83697UG
+ NCT6775
+ NCT6776
+ NCT6779
+
This watchdog simply watches your kernel to make sure it doesn't
freeze, and if it does, it reboots your computer after a certain
amount of time.
@@ -1004,18 +1043,6 @@ config M54xx_WATCHDOG
# MicroBlaze Architecture
-config XILINX_WATCHDOG
- tristate "Xilinx Watchdog timer"
- depends on MICROBLAZE
- ---help---
- Watchdog driver for the xps_timebase_wdt ip core.
-
- IMPORTANT: The xps_timebase_wdt parent must have the property
- "clock-frequency" at device tree.
-
- To compile this driver as a module, choose M here: the
- module will be called of_xilinx_wdt.
-
# MIPS Architecture
config ATH79_WDT
@@ -1139,6 +1166,28 @@ config BCM2835_WDT
To compile this driver as a loadable module, choose M here.
The module will be called bcm2835_wdt.
+config BCM_KONA_WDT
+ tristate "BCM Kona Watchdog"
+ depends on ARCH_BCM_MOBILE
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog timer on the following Broadcom BCM281xx
+ family, which includes BCM11130, BCM11140, BCM11351, BCM28145 and
+ BCM28155 variants.
+
+ Say 'Y' or 'M' here to enable the driver. The module will be called
+ bcm_kona_wdt.
+
+config BCM_KONA_WDT_DEBUG
+ bool "DEBUGFS support for BCM Kona Watchdog"
+ depends on BCM_KONA_WDT
+ help
+ If enabled, adds /sys/kernel/debug/bcm_kona_wdt/info which provides
+ access to the driver's internal data structures as well as watchdog
+ timer hardware registres.
+
+ If in doubt, say 'N'.
+
config LANTIQ_WDT
tristate "Lantiq SoC watchdog"
depends on LANTIQ
@@ -1171,6 +1220,7 @@ config MPC5200_WDT
config 8xxx_WDT
tristate "MPC8xxx Platform Watchdog Timer"
depends on PPC_8xx || PPC_83xx || PPC_86xx
+ select WATCHDOG_CORE
help
This driver is for a SoC level watchdog that exists on some
Freescale PowerPC processors. So far this driver supports:
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 91bd95a64baf..1b5f3d5efad5 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -57,6 +57,8 @@ obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o
obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o
+obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
+obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -171,6 +173,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
+obj-$(CONFIG_GPIO_WATCHDOG) += gpio_wdt.o
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index 5cf1621def9c..5614416f1032 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -239,7 +239,7 @@ static struct miscdevice acq_miscdev = {
* Init & exit routines
*/
-static int acq_probe(struct platform_device *dev)
+static int __init acq_probe(struct platform_device *dev)
{
int ret;
@@ -291,7 +291,6 @@ static void acq_shutdown(struct platform_device *dev)
}
static struct platform_driver acquirewdt_driver = {
- .probe = acq_probe,
.remove = acq_remove,
.shutdown = acq_shutdown,
.driver = {
@@ -306,20 +305,18 @@ static int __init acq_init(void)
pr_info("WDT driver for Acquire single board computer initialising\n");
- err = platform_driver_register(&acquirewdt_driver);
- if (err)
- return err;
-
acq_platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
- if (IS_ERR(acq_platform_device)) {
- err = PTR_ERR(acq_platform_device);
- goto unreg_platform_driver;
- }
+ if (IS_ERR(acq_platform_device))
+ return PTR_ERR(acq_platform_device);
+
+ err = platform_driver_probe(&acquirewdt_driver, acq_probe);
+ if (err)
+ goto unreg_platform_device;
return 0;
-unreg_platform_driver:
- platform_driver_unregister(&acquirewdt_driver);
+unreg_platform_device:
+ platform_device_unregister(acq_platform_device);
return err;
}
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index a8961addc59c..7796db7fa6e1 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -238,7 +238,7 @@ static struct miscdevice advwdt_miscdev = {
* Init & exit routines
*/
-static int advwdt_probe(struct platform_device *dev)
+static int __init advwdt_probe(struct platform_device *dev)
{
int ret;
@@ -299,7 +299,6 @@ static void advwdt_shutdown(struct platform_device *dev)
}
static struct platform_driver advwdt_driver = {
- .probe = advwdt_probe,
.remove = advwdt_remove,
.shutdown = advwdt_shutdown,
.driver = {
@@ -314,21 +313,19 @@ static int __init advwdt_init(void)
pr_info("WDT driver for Advantech single board computer initialising\n");
- err = platform_driver_register(&advwdt_driver);
- if (err)
- return err;
-
advwdt_platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
- if (IS_ERR(advwdt_platform_device)) {
- err = PTR_ERR(advwdt_platform_device);
- goto unreg_platform_driver;
- }
+ if (IS_ERR(advwdt_platform_device))
+ return PTR_ERR(advwdt_platform_device);
+
+ err = platform_driver_probe(&advwdt_driver, advwdt_probe);
+ if (err)
+ goto unreg_platform_device;
return 0;
-unreg_platform_driver:
- platform_driver_unregister(&advwdt_driver);
+unreg_platform_device:
+ platform_device_unregister(advwdt_platform_device);
return err;
}
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index fbb7b94cabfd..3a17fbd39f8a 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -301,7 +301,7 @@ static int ali_notify_sys(struct notifier_block *this,
* want to register another driver on the same PCI id.
*/
-static DEFINE_PCI_DEVICE_TABLE(ali_pci_tbl) __used = {
+static const struct pci_device_id ali_pci_tbl[] __used = {
{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
{ 0, },
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 12f0b762b528..996b2f7d330e 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -414,7 +414,7 @@ err_out:
module_init(alim7101_wdt_init);
module_exit(alim7101_wdt_unload);
-static DEFINE_PCI_DEVICE_TABLE(alim7101_pci_tbl) __used = {
+static const struct pci_device_id alim7101_pci_tbl[] __used = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ }
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 3a996576343a..ae6c287a49cb 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c
index afe7d17e6776..25b5c67d3af9 100644
--- a/drivers/watchdog/at32ap700x_wdt.c
+++ b/drivers/watchdog/at32ap700x_wdt.c
@@ -323,10 +323,8 @@ static int __init at32_wdt_probe(struct platform_device *pdev)
wdt = devm_kzalloc(&pdev->dev, sizeof(struct wdt_at32ap700x),
GFP_KERNEL);
- if (!wdt) {
- dev_dbg(&pdev->dev, "no memory for wdt structure\n");
+ if (!wdt)
return -ENOMEM;
- }
wdt->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!wdt->regs) {
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index be37dde4f864..489729b26298 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -19,11 +19,13 @@
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/jiffies.h>
@@ -31,22 +33,33 @@
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include "at91sam9_wdt.h"
#define DRV_NAME "AT91SAM9 Watchdog"
-#define wdt_read(field) \
- __raw_readl(at91wdt_private.base + field)
-#define wdt_write(field, val) \
- __raw_writel((val), at91wdt_private.base + field)
+#define wdt_read(wdt, field) \
+ __raw_readl((wdt)->base + (field))
+#define wdt_write(wtd, field, val) \
+ __raw_writel((val), (wdt)->base + (field))
/* AT91SAM9 watchdog runs a 12bit counter @ 256Hz,
* use this to convert a watchdog
* value from/to milliseconds.
*/
-#define ms_to_ticks(t) (((t << 8) / 1000) - 1)
-#define ticks_to_ms(t) (((t + 1) * 1000) >> 8)
+#define ticks_to_hz_rounddown(t) ((((t) + 1) * HZ) >> 8)
+#define ticks_to_hz_roundup(t) (((((t) + 1) * HZ) + 255) >> 8)
+#define ticks_to_secs(t) (((t) + 1) >> 8)
+#define secs_to_ticks(s) ((s) ? (((s) << 8) - 1) : 0)
+
+#define WDT_MR_RESET 0x3FFF2FFF
+
+/* Watchdog max counter value in ticks */
+#define WDT_COUNTER_MAX_TICKS 0xFFF
+
+/* Watchdog max delta/value in secs */
+#define WDT_COUNTER_MAX_SECS ticks_to_secs(WDT_COUNTER_MAX_TICKS)
/* Hardware timeout in seconds */
#define WDT_HW_TIMEOUT 2
@@ -66,23 +79,40 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static struct watchdog_device at91_wdt_dev;
-static void at91_ping(unsigned long data);
-
-static struct {
+#define to_wdt(wdd) container_of(wdd, struct at91wdt, wdd)
+struct at91wdt {
+ struct watchdog_device wdd;
void __iomem *base;
unsigned long next_heartbeat; /* the next_heartbeat for the timer */
struct timer_list timer; /* The timer that pings the watchdog */
-} at91wdt_private;
+ u32 mr;
+ u32 mr_mask;
+ unsigned long heartbeat; /* WDT heartbeat in jiffies */
+ bool nowayout;
+ unsigned int irq;
+};
/* ......................................................................... */
+static irqreturn_t wdt_interrupt(int irq, void *dev_id)
+{
+ struct at91wdt *wdt = (struct at91wdt *)dev_id;
+
+ if (wdt_read(wdt, AT91_WDT_SR)) {
+ pr_crit("at91sam9 WDT software reset\n");
+ emergency_restart();
+ pr_crit("Reboot didn't ?????\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
/*
* Reload the watchdog timer. (ie, pat the watchdog)
*/
-static inline void at91_wdt_reset(void)
+static inline void at91_wdt_reset(struct at91wdt *wdt)
{
- wdt_write(AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
+ wdt_write(wdt, AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
}
/*
@@ -90,26 +120,21 @@ static inline void at91_wdt_reset(void)
*/
static void at91_ping(unsigned long data)
{
- if (time_before(jiffies, at91wdt_private.next_heartbeat) ||
- (!watchdog_active(&at91_wdt_dev))) {
- at91_wdt_reset();
- mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
- } else
+ struct at91wdt *wdt = (struct at91wdt *)data;
+ if (time_before(jiffies, wdt->next_heartbeat) ||
+ !watchdog_active(&wdt->wdd)) {
+ at91_wdt_reset(wdt);
+ mod_timer(&wdt->timer, jiffies + wdt->heartbeat);
+ } else {
pr_crit("I will reset your machine !\n");
-}
-
-static int at91_wdt_ping(struct watchdog_device *wdd)
-{
- /* calculate when the next userspace timeout will be */
- at91wdt_private.next_heartbeat = jiffies + wdd->timeout * HZ;
- return 0;
+ }
}
static int at91_wdt_start(struct watchdog_device *wdd)
{
- /* calculate the next userspace timeout and modify the timer */
- at91_wdt_ping(wdd);
- mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
+ struct at91wdt *wdt = to_wdt(wdd);
+ /* calculate when the next userspace timeout will be */
+ wdt->next_heartbeat = jiffies + wdd->timeout * HZ;
return 0;
}
@@ -122,39 +147,104 @@ static int at91_wdt_stop(struct watchdog_device *wdd)
static int at91_wdt_set_timeout(struct watchdog_device *wdd, unsigned int new_timeout)
{
wdd->timeout = new_timeout;
- return 0;
+ return at91_wdt_start(wdd);
}
-/*
- * Set the watchdog time interval in 1/256Hz (write-once)
- * Counter is 12 bit.
- */
-static int at91_wdt_settimeout(unsigned int timeout)
+static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
{
- unsigned int reg;
- unsigned int mr;
-
- /* Check if disabled */
- mr = wdt_read(AT91_WDT_MR);
- if (mr & AT91_WDT_WDDIS) {
- pr_err("sorry, watchdog is disabled\n");
- return -EIO;
+ u32 tmp;
+ u32 delta;
+ u32 value;
+ int err;
+ u32 mask = wdt->mr_mask;
+ unsigned long min_heartbeat = 1;
+ unsigned long max_heartbeat;
+ struct device *dev = &pdev->dev;
+
+ tmp = wdt_read(wdt, AT91_WDT_MR);
+ if ((tmp & mask) != (wdt->mr & mask)) {
+ if (tmp == WDT_MR_RESET) {
+ wdt_write(wdt, AT91_WDT_MR, wdt->mr);
+ tmp = wdt_read(wdt, AT91_WDT_MR);
+ }
+ }
+
+ if (tmp & AT91_WDT_WDDIS) {
+ if (wdt->mr & AT91_WDT_WDDIS)
+ return 0;
+ dev_err(dev, "watchdog is disabled\n");
+ return -EINVAL;
+ }
+
+ value = tmp & AT91_WDT_WDV;
+ delta = (tmp & AT91_WDT_WDD) >> 16;
+
+ if (delta < value)
+ min_heartbeat = ticks_to_hz_roundup(value - delta);
+
+ max_heartbeat = ticks_to_hz_rounddown(value);
+ if (!max_heartbeat) {
+ dev_err(dev,
+ "heartbeat is too small for the system to handle it correctly\n");
+ return -EINVAL;
}
/*
- * All counting occurs at SLOW_CLOCK / 128 = 256 Hz
- *
- * Since WDV is a 12-bit counter, the maximum period is
- * 4096 / 256 = 16 seconds.
+ * Try to reset the watchdog counter 4 or 2 times more often than
+ * actually requested, to avoid spurious watchdog reset.
+ * If this is not possible because of the min_heartbeat value, reset
+ * it at the min_heartbeat period.
*/
- reg = AT91_WDT_WDRSTEN /* causes watchdog reset */
- /* | AT91_WDT_WDRPROC causes processor reset only */
- | AT91_WDT_WDDBGHLT /* disabled in debug mode */
- | AT91_WDT_WDD /* restart at any time */
- | (timeout & AT91_WDT_WDV); /* timer value */
- wdt_write(AT91_WDT_MR, reg);
+ if ((max_heartbeat / 4) >= min_heartbeat)
+ wdt->heartbeat = max_heartbeat / 4;
+ else if ((max_heartbeat / 2) >= min_heartbeat)
+ wdt->heartbeat = max_heartbeat / 2;
+ else
+ wdt->heartbeat = min_heartbeat;
+
+ if (max_heartbeat < min_heartbeat + 4)
+ dev_warn(dev,
+ "min heartbeat and max heartbeat might be too close for the system to handle it correctly\n");
+
+ if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
+ err = request_irq(wdt->irq, wdt_interrupt,
+ IRQF_SHARED | IRQF_IRQPOLL,
+ pdev->name, wdt);
+ if (err)
+ return err;
+ }
+
+ if ((tmp & wdt->mr_mask) != (wdt->mr & wdt->mr_mask))
+ dev_warn(dev,
+ "watchdog already configured differently (mr = %x expecting %x)\n",
+ tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
+
+ setup_timer(&wdt->timer, at91_ping, (unsigned long)wdt);
+
+ /*
+ * Use min_heartbeat the first time to avoid spurious watchdog reset:
+ * we don't know for how long the watchdog counter is running, and
+ * - resetting it right now might trigger a watchdog fault reset
+ * - waiting for heartbeat time might lead to a watchdog timeout
+ * reset
+ */
+ mod_timer(&wdt->timer, jiffies + min_heartbeat);
+
+ /* Try to set timeout from device tree first */
+ if (watchdog_init_timeout(&wdt->wdd, 0, dev))
+ watchdog_init_timeout(&wdt->wdd, heartbeat, dev);
+ watchdog_set_nowayout(&wdt->wdd, wdt->nowayout);
+ err = watchdog_register_device(&wdt->wdd);
+ if (err)
+ goto out_stop_timer;
+
+ wdt->next_heartbeat = jiffies + wdt->wdd.timeout * HZ;
return 0;
+
+out_stop_timer:
+ del_timer(&wdt->timer);
+ return err;
}
/* ......................................................................... */
@@ -169,61 +259,123 @@ static const struct watchdog_ops at91_wdt_ops = {
.owner = THIS_MODULE,
.start = at91_wdt_start,
.stop = at91_wdt_stop,
- .ping = at91_wdt_ping,
.set_timeout = at91_wdt_set_timeout,
};
-static struct watchdog_device at91_wdt_dev = {
- .info = &at91_wdt_info,
- .ops = &at91_wdt_ops,
- .timeout = WDT_HEARTBEAT,
- .min_timeout = 1,
- .max_timeout = 0xFFFF,
-};
+#if defined(CONFIG_OF)
+static int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
+{
+ u32 min = 0;
+ u32 max = WDT_COUNTER_MAX_SECS;
+ const char *tmp;
+
+ /* Get the interrupts property */
+ wdt->irq = irq_of_parse_and_map(np, 0);
+ if (!wdt->irq)
+ dev_warn(wdt->wdd.parent, "failed to get IRQ from DT\n");
+
+ if (!of_property_read_u32_index(np, "atmel,max-heartbeat-sec", 0,
+ &max)) {
+ if (!max || max > WDT_COUNTER_MAX_SECS)
+ max = WDT_COUNTER_MAX_SECS;
+
+ if (!of_property_read_u32_index(np, "atmel,min-heartbeat-sec",
+ 0, &min)) {
+ if (min >= max)
+ min = max - 1;
+ }
+ }
+
+ min = secs_to_ticks(min);
+ max = secs_to_ticks(max);
+
+ wdt->mr_mask = 0x3FFFFFFF;
+ wdt->mr = 0;
+ if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
+ !strcmp(tmp, "software")) {
+ wdt->mr |= AT91_WDT_WDFIEN;
+ wdt->mr_mask &= ~AT91_WDT_WDRPROC;
+ } else {
+ wdt->mr |= AT91_WDT_WDRSTEN;
+ }
+
+ if (!of_property_read_string(np, "atmel,reset-type", &tmp) &&
+ !strcmp(tmp, "proc"))
+ wdt->mr |= AT91_WDT_WDRPROC;
+
+ if (of_property_read_bool(np, "atmel,disable")) {
+ wdt->mr |= AT91_WDT_WDDIS;
+ wdt->mr_mask &= AT91_WDT_WDDIS;
+ }
+
+ if (of_property_read_bool(np, "atmel,idle-halt"))
+ wdt->mr |= AT91_WDT_WDIDLEHLT;
+
+ if (of_property_read_bool(np, "atmel,dbg-halt"))
+ wdt->mr |= AT91_WDT_WDDBGHLT;
+
+ wdt->mr |= max | ((max - min) << 16);
+
+ return 0;
+}
+#else
+static inline int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
+{
+ return 0;
+}
+#endif
static int __init at91wdt_probe(struct platform_device *pdev)
{
struct resource *r;
- int res;
+ int err;
+ struct at91wdt *wdt;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
- at91wdt_private.base = ioremap(r->start, resource_size(r));
- if (!at91wdt_private.base) {
- dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
return -ENOMEM;
- }
- at91_wdt_dev.parent = &pdev->dev;
- watchdog_init_timeout(&at91_wdt_dev, heartbeat, &pdev->dev);
- watchdog_set_nowayout(&at91_wdt_dev, nowayout);
+ wdt->mr = (WDT_HW_TIMEOUT * 256) | AT91_WDT_WDRSTEN | AT91_WDT_WDD |
+ AT91_WDT_WDDBGHLT | AT91_WDT_WDIDLEHLT;
+ wdt->mr_mask = 0x3FFFFFFF;
+ wdt->nowayout = nowayout;
+ wdt->wdd.parent = &pdev->dev;
+ wdt->wdd.info = &at91_wdt_info;
+ wdt->wdd.ops = &at91_wdt_ops;
+ wdt->wdd.timeout = WDT_HEARTBEAT;
+ wdt->wdd.min_timeout = 1;
+ wdt->wdd.max_timeout = 0xFFFF;
- /* Set watchdog */
- res = at91_wdt_settimeout(ms_to_ticks(WDT_HW_TIMEOUT * 1000));
- if (res)
- return res;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(wdt->base))
+ return PTR_ERR(wdt->base);
+
+ if (pdev->dev.of_node) {
+ err = of_at91wdt_init(pdev->dev.of_node, wdt);
+ if (err)
+ return err;
+ }
- res = watchdog_register_device(&at91_wdt_dev);
- if (res)
- return res;
+ err = at91_wdt_init(pdev, wdt);
+ if (err)
+ return err;
- at91wdt_private.next_heartbeat = jiffies + at91_wdt_dev.timeout * HZ;
- setup_timer(&at91wdt_private.timer, at91_ping, 0);
- mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
+ platform_set_drvdata(pdev, wdt);
pr_info("enabled (heartbeat=%d sec, nowayout=%d)\n",
- at91_wdt_dev.timeout, nowayout);
+ wdt->wdd.timeout, wdt->nowayout);
return 0;
}
static int __exit at91wdt_remove(struct platform_device *pdev)
{
- watchdog_unregister_device(&at91_wdt_dev);
+ struct at91wdt *wdt = platform_get_drvdata(pdev);
+ watchdog_unregister_device(&wdt->wdd);
pr_warn("I quit now, hardware will probably reboot!\n");
- del_timer(&at91wdt_private.timer);
+ del_timer(&wdt->timer);
return 0;
}
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 9fa1f69dac13..399c3fddecf6 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -22,7 +22,6 @@
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index cafa973c43be..8df450c090a9 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -114,10 +114,8 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
int err;
wdt = devm_kzalloc(dev, sizeof(struct bcm2835_wdt), GFP_KERNEL);
- if (!wdt) {
- dev_err(dev, "Failed to allocate memory for watchdog device");
+ if (!wdt)
return -ENOMEM;
- }
platform_set_drvdata(pdev, wdt);
spin_lock_init(&wdt->lock);
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index b4021a2b459b..b61fcc535979 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -16,7 +16,6 @@
#include <linux/bcm47xx_wdt.h>
#include <linux/bitops.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 4eb188b87f8e..5a8e879a430a 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -15,7 +15,6 @@
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
@@ -45,7 +44,6 @@
static struct {
void __iomem *regs;
struct timer_list timer;
- int default_ticks;
unsigned long inuse;
atomic_t ticks;
} bcm63xx_wdt_device;
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
new file mode 100644
index 000000000000..9c248099f4a2
--- /dev/null
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define SECWDOG_CTRL_REG 0x00000000
+#define SECWDOG_COUNT_REG 0x00000004
+
+#define SECWDOG_RESERVED_MASK 0x1dffffff
+#define SECWDOG_WD_LOAD_FLAG 0x10000000
+#define SECWDOG_EN_MASK 0x08000000
+#define SECWDOG_SRSTEN_MASK 0x04000000
+#define SECWDOG_RES_MASK 0x00f00000
+#define SECWDOG_COUNT_MASK 0x000fffff
+
+#define SECWDOG_MAX_COUNT SECWDOG_COUNT_MASK
+#define SECWDOG_CLKS_SHIFT 20
+#define SECWDOG_MAX_RES 15
+#define SECWDOG_DEFAULT_RESOLUTION 4
+#define SECWDOG_MAX_TRY 1000
+
+#define SECS_TO_TICKS(x, w) ((x) << (w)->resolution)
+#define TICKS_TO_SECS(x, w) ((x) >> (w)->resolution)
+
+#define BCM_KONA_WDT_NAME "bcm_kona_wdt"
+
+struct bcm_kona_wdt {
+ void __iomem *base;
+ /*
+ * One watchdog tick is 1/(2^resolution) seconds. Resolution can take
+ * the values 0-15, meaning one tick can be 1s to 30.52us. Our default
+ * resolution of 4 means one tick is 62.5ms.
+ *
+ * The watchdog counter is 20 bits. Depending on resolution, the maximum
+ * counter value of 0xfffff expires after about 12 days (resolution 0)
+ * down to only 32s (resolution 15). The default resolution of 4 gives
+ * us a maximum of about 18 hours and 12 minutes before the watchdog
+ * times out.
+ */
+ int resolution;
+ spinlock_t lock;
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+ unsigned long busy_count;
+ struct dentry *debugfs;
+#endif
+};
+
+static int secure_register_read(struct bcm_kona_wdt *wdt, uint32_t offset)
+{
+ uint32_t val;
+ unsigned count = 0;
+
+ /*
+ * If the WD_LOAD_FLAG is set, the watchdog counter field is being
+ * updated in hardware. Once the WD timer is updated in hardware, it
+ * gets cleared.
+ */
+ do {
+ if (unlikely(count > 1))
+ udelay(5);
+ val = readl_relaxed(wdt->base + offset);
+ count++;
+ } while ((val & SECWDOG_WD_LOAD_FLAG) && count < SECWDOG_MAX_TRY);
+
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+ /* Remember the maximum number iterations due to WD_LOAD_FLAG */
+ if (count > wdt->busy_count)
+ wdt->busy_count = count;
+#endif
+
+ /* This is the only place we return a negative value. */
+ if (val & SECWDOG_WD_LOAD_FLAG)
+ return -ETIMEDOUT;
+
+ /* We always mask out reserved bits. */
+ val &= SECWDOG_RESERVED_MASK;
+
+ return val;
+}
+
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+
+static int bcm_kona_wdt_dbg_show(struct seq_file *s, void *data)
+{
+ int ctl_val, cur_val, ret;
+ unsigned long flags;
+ struct bcm_kona_wdt *wdt = s->private;
+
+ if (!wdt)
+ return seq_puts(s, "No device pointer\n");
+
+ spin_lock_irqsave(&wdt->lock, flags);
+ ctl_val = secure_register_read(wdt, SECWDOG_CTRL_REG);
+ cur_val = secure_register_read(wdt, SECWDOG_COUNT_REG);
+ spin_unlock_irqrestore(&wdt->lock, flags);
+
+ if (ctl_val < 0 || cur_val < 0) {
+ ret = seq_puts(s, "Error accessing hardware\n");
+ } else {
+ int ctl, cur, ctl_sec, cur_sec, res;
+
+ ctl = ctl_val & SECWDOG_COUNT_MASK;
+ res = (ctl_val & SECWDOG_RES_MASK) >> SECWDOG_CLKS_SHIFT;
+ cur = cur_val & SECWDOG_COUNT_MASK;
+ ctl_sec = TICKS_TO_SECS(ctl, wdt);
+ cur_sec = TICKS_TO_SECS(cur, wdt);
+ ret = seq_printf(s, "Resolution: %d / %d\n"
+ "Control: %d s / %d (%#x) ticks\n"
+ "Current: %d s / %d (%#x) ticks\n"
+ "Busy count: %lu\n", res,
+ wdt->resolution, ctl_sec, ctl, ctl, cur_sec,
+ cur, cur, wdt->busy_count);
+ }
+
+ return ret;
+}
+
+static int bcm_kona_dbg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bcm_kona_wdt_dbg_show, inode->i_private);
+}
+
+static const struct file_operations bcm_kona_dbg_operations = {
+ .open = bcm_kona_dbg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void bcm_kona_wdt_debug_init(struct platform_device *pdev)
+{
+ struct dentry *dir;
+ struct bcm_kona_wdt *wdt = platform_get_drvdata(pdev);
+
+ if (!wdt)
+ return;
+
+ wdt->debugfs = NULL;
+
+ dir = debugfs_create_dir(BCM_KONA_WDT_NAME, NULL);
+ if (IS_ERR_OR_NULL(dir))
+ return;
+
+ if (debugfs_create_file("info", S_IFREG | S_IRUGO, dir, wdt,
+ &bcm_kona_dbg_operations))
+ wdt->debugfs = dir;
+ else
+ debugfs_remove_recursive(dir);
+}
+
+static void bcm_kona_wdt_debug_exit(struct platform_device *pdev)
+{
+ struct bcm_kona_wdt *wdt = platform_get_drvdata(pdev);
+
+ if (wdt && wdt->debugfs) {
+ debugfs_remove_recursive(wdt->debugfs);
+ wdt->debugfs = NULL;
+ }
+}
+
+#else
+
+static void bcm_kona_wdt_debug_init(struct platform_device *pdev) {}
+static void bcm_kona_wdt_debug_exit(struct platform_device *pdev) {}
+
+#endif /* CONFIG_BCM_KONA_WDT_DEBUG */
+
+static int bcm_kona_wdt_ctrl_reg_modify(struct bcm_kona_wdt *wdt,
+ unsigned mask, unsigned newval)
+{
+ int val;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&wdt->lock, flags);
+
+ val = secure_register_read(wdt, SECWDOG_CTRL_REG);
+ if (val < 0) {
+ ret = val;
+ } else {
+ val &= ~mask;
+ val |= newval;
+ writel_relaxed(val, wdt->base + SECWDOG_CTRL_REG);
+ }
+
+ spin_unlock_irqrestore(&wdt->lock, flags);
+
+ return ret;
+}
+
+static int bcm_kona_wdt_set_resolution_reg(struct bcm_kona_wdt *wdt)
+{
+ if (wdt->resolution > SECWDOG_MAX_RES)
+ return -EINVAL;
+
+ return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_RES_MASK,
+ wdt->resolution << SECWDOG_CLKS_SHIFT);
+}
+
+static int bcm_kona_wdt_set_timeout_reg(struct watchdog_device *wdog,
+ unsigned watchdog_flags)
+{
+ struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+
+ return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_COUNT_MASK,
+ SECS_TO_TICKS(wdog->timeout, wdt) |
+ watchdog_flags);
+}
+
+static int bcm_kona_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int t)
+{
+ wdog->timeout = t;
+ return 0;
+}
+
+static unsigned int bcm_kona_wdt_get_timeleft(struct watchdog_device *wdog)
+{
+ struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+ int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdt->lock, flags);
+ val = secure_register_read(wdt, SECWDOG_COUNT_REG);
+ spin_unlock_irqrestore(&wdt->lock, flags);
+
+ if (val < 0)
+ return val;
+
+ return TICKS_TO_SECS(val & SECWDOG_COUNT_MASK, wdt);
+}
+
+static int bcm_kona_wdt_start(struct watchdog_device *wdog)
+{
+ return bcm_kona_wdt_set_timeout_reg(wdog,
+ SECWDOG_EN_MASK | SECWDOG_SRSTEN_MASK);
+}
+
+static int bcm_kona_wdt_stop(struct watchdog_device *wdog)
+{
+ struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+
+ return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_EN_MASK |
+ SECWDOG_SRSTEN_MASK, 0);
+}
+
+static struct watchdog_ops bcm_kona_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = bcm_kona_wdt_start,
+ .stop = bcm_kona_wdt_stop,
+ .set_timeout = bcm_kona_wdt_set_timeout,
+ .get_timeleft = bcm_kona_wdt_get_timeleft,
+};
+
+static struct watchdog_info bcm_kona_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+ .identity = "Broadcom Kona Watchdog Timer",
+};
+
+static struct watchdog_device bcm_kona_wdt_wdd = {
+ .info = &bcm_kona_wdt_info,
+ .ops = &bcm_kona_wdt_ops,
+ .min_timeout = 1,
+ .max_timeout = SECWDOG_MAX_COUNT >> SECWDOG_DEFAULT_RESOLUTION,
+ .timeout = SECWDOG_MAX_COUNT >> SECWDOG_DEFAULT_RESOLUTION,
+};
+
+static void bcm_kona_wdt_shutdown(struct platform_device *pdev)
+{
+ bcm_kona_wdt_stop(&bcm_kona_wdt_wdd);
+}
+
+static int bcm_kona_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm_kona_wdt *wdt;
+ struct resource *res;
+ int ret;
+
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wdt->base))
+ return -ENODEV;
+
+ wdt->resolution = SECWDOG_DEFAULT_RESOLUTION;
+ ret = bcm_kona_wdt_set_resolution_reg(wdt);
+ if (ret) {
+ dev_err(dev, "Failed to set resolution (error: %d)", ret);
+ return ret;
+ }
+
+ spin_lock_init(&wdt->lock);
+ platform_set_drvdata(pdev, wdt);
+ watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
+
+ ret = bcm_kona_wdt_set_timeout_reg(&bcm_kona_wdt_wdd, 0);
+ if (ret) {
+ dev_err(dev, "Failed set watchdog timeout");
+ return ret;
+ }
+
+ ret = watchdog_register_device(&bcm_kona_wdt_wdd);
+ if (ret) {
+ dev_err(dev, "Failed to register watchdog device");
+ return ret;
+ }
+
+ bcm_kona_wdt_debug_init(pdev);
+ dev_dbg(dev, "Broadcom Kona Watchdog Timer");
+
+ return 0;
+}
+
+static int bcm_kona_wdt_remove(struct platform_device *pdev)
+{
+ bcm_kona_wdt_debug_exit(pdev);
+ bcm_kona_wdt_shutdown(pdev);
+ watchdog_unregister_device(&bcm_kona_wdt_wdd);
+ dev_dbg(&pdev->dev, "Watchdog driver disabled");
+
+ return 0;
+}
+
+static const struct of_device_id bcm_kona_wdt_of_match[] = {
+ { .compatible = "brcm,kona-wdt", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm_kona_wdt_of_match);
+
+static struct platform_driver bcm_kona_wdt_driver = {
+ .driver = {
+ .name = BCM_KONA_WDT_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = bcm_kona_wdt_of_match,
+ },
+ .probe = bcm_kona_wdt_probe,
+ .remove = bcm_kona_wdt_remove,
+ .shutdown = bcm_kona_wdt_shutdown,
+};
+
+module_platform_driver(bcm_kona_wdt_driver);
+
+MODULE_ALIAS("platform:" BCM_KONA_WDT_NAME);
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom Kona Watchdog Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index f1b8d555080e..a8dbceb32914 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -138,14 +138,6 @@ static void __booke_wdt_enable(void *data)
val &= ~WDTP_MASK;
val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period));
-#ifdef CONFIG_PPC_BOOK3E_64
- /*
- * Crit ints are currently broken on PPC64 Book-E, so
- * just disable them for now.
- */
- val &= ~TCR_WIE;
-#endif
-
mtspr(SPRN_TCR, val);
}
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index f7ae49edb518..6d03e8e30f8b 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -27,7 +27,6 @@
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/completion.h>
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 213225edd059..e55ed702209f 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -21,7 +21,6 @@
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/major.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index f09c54e9686f..2e9589652e1e 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -185,7 +185,6 @@ static int da9052_wdt_probe(struct platform_device *pdev)
driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
GFP_KERNEL);
if (!driver_data) {
- dev_err(da9052->dev, "Unable to alloacate watchdog device\n");
ret = -ENOMEM;
goto err;
}
diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c
index 575f37a965a4..495089d8dbfe 100644
--- a/drivers/watchdog/da9055_wdt.c
+++ b/drivers/watchdog/da9055_wdt.c
@@ -151,10 +151,8 @@ static int da9055_wdt_probe(struct platform_device *pdev)
driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
GFP_KERNEL);
- if (!driver_data) {
- dev_err(da9055->dev, "Failed to allocate watchdog device\n");
+ if (!driver_data)
return -ENOMEM;
- }
driver_data->da9055 = da9055;
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index dd625cca1ae5..d09ad2254b57 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -3,7 +3,7 @@
*
* Watchdog driver for DaVinci DM644x/DM646x processors
*
- * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2006-2013 Texas Instruments.
*
* 2007 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
@@ -15,18 +15,11 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/clk.h>
-#include <linux/slab.h>
#include <linux/err.h>
#define MODULE_NAME "DAVINCI-WDT: "
@@ -61,142 +54,103 @@
#define WDKEY_SEQ0 (0xa5c6 << 16)
#define WDKEY_SEQ1 (0xda7e << 16)
-static int heartbeat = DEFAULT_HEARTBEAT;
+static int heartbeat;
-static DEFINE_SPINLOCK(io_lock);
-static unsigned long wdt_status;
-#define WDT_IN_USE 0
-#define WDT_OK_TO_CLOSE 1
-#define WDT_REGION_INITED 2
-#define WDT_DEVICE_INITED 3
-
-static void __iomem *wdt_base;
-struct clk *wdt_clk;
-
-static void wdt_service(void)
-{
- spin_lock(&io_lock);
-
- /* put watchdog in service state */
- iowrite32(WDKEY_SEQ0, wdt_base + WDTCR);
- /* put watchdog in active state */
- iowrite32(WDKEY_SEQ1, wdt_base + WDTCR);
-
- spin_unlock(&io_lock);
-}
+/*
+ * struct to hold data for each WDT device
+ * @base - base io address of WD device
+ * @clk - source clock of WDT
+ * @wdd - hold watchdog device as is in WDT core
+ */
+struct davinci_wdt_device {
+ void __iomem *base;
+ struct clk *clk;
+ struct watchdog_device wdd;
+};
-static void wdt_enable(void)
+static int davinci_wdt_start(struct watchdog_device *wdd)
{
u32 tgcr;
u32 timer_margin;
unsigned long wdt_freq;
+ struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
- wdt_freq = clk_get_rate(wdt_clk);
-
- spin_lock(&io_lock);
+ wdt_freq = clk_get_rate(davinci_wdt->clk);
/* disable, internal clock source */
- iowrite32(0, wdt_base + TCR);
+ iowrite32(0, davinci_wdt->base + TCR);
/* reset timer, set mode to 64-bit watchdog, and unreset */
- iowrite32(0, wdt_base + TGCR);
+ iowrite32(0, davinci_wdt->base + TGCR);
tgcr = TIMMODE_64BIT_WDOG | TIM12RS_UNRESET | TIM34RS_UNRESET;
- iowrite32(tgcr, wdt_base + TGCR);
+ iowrite32(tgcr, davinci_wdt->base + TGCR);
/* clear counter regs */
- iowrite32(0, wdt_base + TIM12);
- iowrite32(0, wdt_base + TIM34);
+ iowrite32(0, davinci_wdt->base + TIM12);
+ iowrite32(0, davinci_wdt->base + TIM34);
/* set timeout period */
- timer_margin = (((u64)heartbeat * wdt_freq) & 0xffffffff);
- iowrite32(timer_margin, wdt_base + PRD12);
- timer_margin = (((u64)heartbeat * wdt_freq) >> 32);
- iowrite32(timer_margin, wdt_base + PRD34);
+ timer_margin = (((u64)wdd->timeout * wdt_freq) & 0xffffffff);
+ iowrite32(timer_margin, davinci_wdt->base + PRD12);
+ timer_margin = (((u64)wdd->timeout * wdt_freq) >> 32);
+ iowrite32(timer_margin, davinci_wdt->base + PRD34);
/* enable run continuously */
- iowrite32(ENAMODE12_PERIODIC, wdt_base + TCR);
+ iowrite32(ENAMODE12_PERIODIC, davinci_wdt->base + TCR);
/* Once the WDT is in pre-active state write to
* TIM12, TIM34, PRD12, PRD34, TCR, TGCR, WDTCR are
* write protected (except for the WDKEY field)
*/
/* put watchdog in pre-active state */
- iowrite32(WDKEY_SEQ0 | WDEN, wdt_base + WDTCR);
+ iowrite32(WDKEY_SEQ0 | WDEN, davinci_wdt->base + WDTCR);
/* put watchdog in active state */
- iowrite32(WDKEY_SEQ1 | WDEN, wdt_base + WDTCR);
-
- spin_unlock(&io_lock);
+ iowrite32(WDKEY_SEQ1 | WDEN, davinci_wdt->base + WDTCR);
+ return 0;
}
-static int davinci_wdt_open(struct inode *inode, struct file *file)
+static int davinci_wdt_ping(struct watchdog_device *wdd)
{
- if (test_and_set_bit(WDT_IN_USE, &wdt_status))
- return -EBUSY;
-
- wdt_enable();
+ struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
- return nonseekable_open(inode, file);
+ /* put watchdog in service state */
+ iowrite32(WDKEY_SEQ0, davinci_wdt->base + WDTCR);
+ /* put watchdog in active state */
+ iowrite32(WDKEY_SEQ1, davinci_wdt->base + WDTCR);
+ return 0;
}
-static ssize_t
-davinci_wdt_write(struct file *file, const char *data, size_t len,
- loff_t *ppos)
+static unsigned int davinci_wdt_get_timeleft(struct watchdog_device *wdd)
{
- if (len)
- wdt_service();
+ u64 timer_counter;
+ unsigned long freq;
+ u32 val;
+ struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
- return len;
-}
+ /* if timeout has occured then return 0 */
+ val = ioread32(davinci_wdt->base + WDTCR);
+ if (val & WDFLAG)
+ return 0;
-static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING,
- .identity = "DaVinci Watchdog",
-};
+ freq = clk_get_rate(davinci_wdt->clk);
-static long davinci_wdt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int ret = -ENOTTY;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_service();
- ret = 0;
- break;
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(heartbeat, (int *)arg);
- break;
- }
- return ret;
-}
+ if (!freq)
+ return 0;
-static int davinci_wdt_release(struct inode *inode, struct file *file)
-{
- wdt_service();
- clear_bit(WDT_IN_USE, &wdt_status);
+ timer_counter = ioread32(davinci_wdt->base + TIM12);
+ timer_counter |= ((u64)ioread32(davinci_wdt->base + TIM34) << 32);
- return 0;
+ do_div(timer_counter, freq);
+
+ return wdd->timeout - timer_counter;
}
-static const struct file_operations davinci_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = davinci_wdt_write,
- .unlocked_ioctl = davinci_wdt_ioctl,
- .open = davinci_wdt_open,
- .release = davinci_wdt_release,
+static const struct watchdog_info davinci_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING,
+ .identity = "DaVinci/Keystone Watchdog",
};
-static struct miscdevice davinci_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &davinci_wdt_fops,
+static const struct watchdog_ops davinci_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = davinci_wdt_start,
+ .stop = davinci_wdt_ping,
+ .ping = davinci_wdt_ping,
+ .get_timeleft = davinci_wdt_get_timeleft,
};
static int davinci_wdt_probe(struct platform_device *pdev)
@@ -204,37 +158,53 @@ static int davinci_wdt_probe(struct platform_device *pdev)
int ret = 0;
struct device *dev = &pdev->dev;
struct resource *wdt_mem;
+ struct watchdog_device *wdd;
+ struct davinci_wdt_device *davinci_wdt;
+
+ davinci_wdt = devm_kzalloc(dev, sizeof(*davinci_wdt), GFP_KERNEL);
+ if (!davinci_wdt)
+ return -ENOMEM;
- wdt_clk = devm_clk_get(dev, NULL);
- if (WARN_ON(IS_ERR(wdt_clk)))
- return PTR_ERR(wdt_clk);
+ davinci_wdt->clk = devm_clk_get(dev, NULL);
+ if (WARN_ON(IS_ERR(davinci_wdt->clk)))
+ return PTR_ERR(davinci_wdt->clk);
- clk_prepare_enable(wdt_clk);
+ clk_prepare_enable(davinci_wdt->clk);
- if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
- heartbeat = DEFAULT_HEARTBEAT;
+ platform_set_drvdata(pdev, davinci_wdt);
- dev_info(dev, "heartbeat %d sec\n", heartbeat);
+ wdd = &davinci_wdt->wdd;
+ wdd->info = &davinci_wdt_info;
+ wdd->ops = &davinci_wdt_ops;
+ wdd->min_timeout = 1;
+ wdd->max_timeout = MAX_HEARTBEAT;
+ wdd->timeout = DEFAULT_HEARTBEAT;
+
+ watchdog_init_timeout(wdd, heartbeat, dev);
+
+ dev_info(dev, "heartbeat %d sec\n", wdd->timeout);
+
+ watchdog_set_drvdata(wdd, davinci_wdt);
+ watchdog_set_nowayout(wdd, 1);
wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt_base = devm_ioremap_resource(dev, wdt_mem);
- if (IS_ERR(wdt_base))
- return PTR_ERR(wdt_base);
+ davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem);
+ if (IS_ERR(davinci_wdt->base))
+ return PTR_ERR(davinci_wdt->base);
- ret = misc_register(&davinci_wdt_miscdev);
- if (ret < 0) {
- dev_err(dev, "cannot register misc device\n");
- } else {
- set_bit(WDT_DEVICE_INITED, &wdt_status);
- }
+ ret = watchdog_register_device(wdd);
+ if (ret < 0)
+ dev_err(dev, "cannot register watchdog device\n");
return ret;
}
static int davinci_wdt_remove(struct platform_device *pdev)
{
- misc_deregister(&davinci_wdt_miscdev);
- clk_disable_unprepare(wdt_clk);
+ struct davinci_wdt_device *davinci_wdt = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&davinci_wdt->wdd);
+ clk_disable_unprepare(davinci_wdt->clk);
return 0;
}
@@ -247,7 +217,7 @@ MODULE_DEVICE_TABLE(of, davinci_wdt_of_match);
static struct platform_driver platform_wdt_driver = {
.driver = {
- .name = "watchdog",
+ .name = "davinci-wdt",
.owner = THIS_MODULE,
.of_match_table = davinci_wdt_of_match,
},
@@ -267,4 +237,4 @@ MODULE_PARM_DESC(heartbeat,
__MODULE_STRING(DEFAULT_HEARTBEAT));
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:watchdog");
+MODULE_ALIAS("platform:davinci-wdt");
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index a46f5c7ee7ff..ee4f86ba83ec 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -8,7 +8,7 @@
* 2 of the License, or (at your option) any later version.
*
* This file implements a driver for the Synopsys DesignWare watchdog device
- * in the many ARM subsystems. The watchdog has 16 different timeout periods
+ * in the many subsystems. The watchdog has 16 different timeout periods
* and these are a function of the input clock frequency.
*
* The DesignWare watchdog cannot be stopped once it has been started so we
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index d1d07f2f69df..5f54e1e5819a 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -118,16 +118,9 @@ static int ep93xx_wdt_probe(struct platform_device *pdev)
int err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
-
- mmio_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!mmio_base)
- return -ENXIO;
+ mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mmio_base))
+ return PTR_ERR(mmio_base);
if (timeout < 1 || timeout > 3600) {
timeout = WDT_TIMEOUT;
@@ -172,9 +165,9 @@ static struct platform_driver ep93xx_wdt_driver = {
module_platform_driver(ep93xx_wdt_driver);
-MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>,"
- "Alessandro Zummo <a.zummo@towertech.it>,"
- "H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>");
+MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
+MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
MODULE_DESCRIPTION("EP93xx Watchdog");
MODULE_LICENSE("GPL");
MODULE_VERSION(WDT_VERSION);
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 4a6ae84b42bc..4c43e3fa8bd2 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -215,7 +215,7 @@ static struct miscdevice geodewdt_miscdev = {
.fops = &geodewdt_fops,
};
-static int geodewdt_probe(struct platform_device *dev)
+static int __init geodewdt_probe(struct platform_device *dev)
{
int ret;
@@ -255,7 +255,6 @@ static void geodewdt_shutdown(struct platform_device *dev)
}
static struct platform_driver geodewdt_driver = {
- .probe = geodewdt_probe,
.remove = geodewdt_remove,
.shutdown = geodewdt_shutdown,
.driver = {
@@ -268,20 +267,18 @@ static int __init geodewdt_init(void)
{
int ret;
- ret = platform_driver_register(&geodewdt_driver);
- if (ret)
- return ret;
-
geodewdt_platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
- if (IS_ERR(geodewdt_platform_device)) {
- ret = PTR_ERR(geodewdt_platform_device);
+ if (IS_ERR(geodewdt_platform_device))
+ return PTR_ERR(geodewdt_platform_device);
+
+ ret = platform_driver_probe(&geodewdt_driver, geodewdt_probe);
+ if (ret)
goto err;
- }
return 0;
err:
- platform_driver_unregister(&geodewdt_driver);
+ platform_device_unregister(geodewdt_platform_device);
return ret;
}
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
new file mode 100644
index 000000000000..220a9e07cfd5
--- /dev/null
+++ b/drivers/watchdog/gpio_wdt.c
@@ -0,0 +1,254 @@
+/*
+ * Driver for watchdog device controlled through GPIO-line
+ *
+ * Author: 2013, Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#define SOFT_TIMEOUT_MIN 1
+#define SOFT_TIMEOUT_DEF 60
+#define SOFT_TIMEOUT_MAX 0xffff
+
+enum {
+ HW_ALGO_TOGGLE,
+ HW_ALGO_LEVEL,
+};
+
+struct gpio_wdt_priv {
+ int gpio;
+ bool active_low;
+ bool state;
+ unsigned int hw_algo;
+ unsigned int hw_margin;
+ unsigned long last_jiffies;
+ struct notifier_block notifier;
+ struct timer_list timer;
+ struct watchdog_device wdd;
+};
+
+static void gpio_wdt_disable(struct gpio_wdt_priv *priv)
+{
+ gpio_set_value_cansleep(priv->gpio, !priv->active_low);
+
+ /* Put GPIO back to tristate */
+ if (priv->hw_algo == HW_ALGO_TOGGLE)
+ gpio_direction_input(priv->gpio);
+}
+
+static int gpio_wdt_start(struct watchdog_device *wdd)
+{
+ struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+ priv->state = priv->active_low;
+ gpio_direction_output(priv->gpio, priv->state);
+ priv->last_jiffies = jiffies;
+ mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin);
+
+ return 0;
+}
+
+static int gpio_wdt_stop(struct watchdog_device *wdd)
+{
+ struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+ mod_timer(&priv->timer, 0);
+ gpio_wdt_disable(priv);
+
+ return 0;
+}
+
+static int gpio_wdt_ping(struct watchdog_device *wdd)
+{
+ struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+ priv->last_jiffies = jiffies;
+
+ return 0;
+}
+
+static int gpio_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
+{
+ wdd->timeout = t;
+
+ return gpio_wdt_ping(wdd);
+}
+
+static void gpio_wdt_hwping(unsigned long data)
+{
+ struct watchdog_device *wdd = (struct watchdog_device *)data;
+ struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+ if (time_after(jiffies, priv->last_jiffies +
+ msecs_to_jiffies(wdd->timeout * 1000))) {
+ dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
+ return;
+ }
+
+ /* Restart timer */
+ mod_timer(&priv->timer, jiffies + priv->hw_margin);
+
+ switch (priv->hw_algo) {
+ case HW_ALGO_TOGGLE:
+ /* Toggle output pin */
+ priv->state = !priv->state;
+ gpio_set_value_cansleep(priv->gpio, priv->state);
+ break;
+ case HW_ALGO_LEVEL:
+ /* Pulse */
+ gpio_set_value_cansleep(priv->gpio, !priv->active_low);
+ udelay(1);
+ gpio_set_value_cansleep(priv->gpio, priv->active_low);
+ break;
+ }
+}
+
+static int gpio_wdt_notify_sys(struct notifier_block *nb, unsigned long code,
+ void *unused)
+{
+ struct gpio_wdt_priv *priv = container_of(nb, struct gpio_wdt_priv,
+ notifier);
+
+ mod_timer(&priv->timer, 0);
+
+ switch (code) {
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ gpio_wdt_disable(priv);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static const struct watchdog_info gpio_wdt_ident = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT,
+ .identity = "GPIO Watchdog",
+};
+
+static const struct watchdog_ops gpio_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = gpio_wdt_start,
+ .stop = gpio_wdt_stop,
+ .ping = gpio_wdt_ping,
+ .set_timeout = gpio_wdt_set_timeout,
+};
+
+static int gpio_wdt_probe(struct platform_device *pdev)
+{
+ struct gpio_wdt_priv *priv;
+ enum of_gpio_flags flags;
+ unsigned int hw_margin;
+ unsigned long f = 0;
+ const char *algo;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
+ if (!gpio_is_valid(priv->gpio))
+ return priv->gpio;
+
+ priv->active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+ ret = of_property_read_string(pdev->dev.of_node, "hw_algo", &algo);
+ if (ret)
+ return ret;
+ if (!strncmp(algo, "toggle", 6)) {
+ priv->hw_algo = HW_ALGO_TOGGLE;
+ f = GPIOF_IN;
+ } else if (!strncmp(algo, "level", 5)) {
+ priv->hw_algo = HW_ALGO_LEVEL;
+ f = priv->active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = devm_gpio_request_one(&pdev->dev, priv->gpio, f,
+ dev_name(&pdev->dev));
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "hw_margin_ms", &hw_margin);
+ if (ret)
+ return ret;
+ /* Disallow values lower than 2 and higher than 65535 ms */
+ if (hw_margin < 2 || hw_margin > 65535)
+ return -EINVAL;
+
+ /* Use safe value (1/2 of real timeout) */
+ priv->hw_margin = msecs_to_jiffies(hw_margin / 2);
+
+ watchdog_set_drvdata(&priv->wdd, priv);
+
+ priv->wdd.info = &gpio_wdt_ident;
+ priv->wdd.ops = &gpio_wdt_ops;
+ priv->wdd.min_timeout = SOFT_TIMEOUT_MIN;
+ priv->wdd.max_timeout = SOFT_TIMEOUT_MAX;
+
+ if (watchdog_init_timeout(&priv->wdd, 0, &pdev->dev) < 0)
+ priv->wdd.timeout = SOFT_TIMEOUT_DEF;
+
+ setup_timer(&priv->timer, gpio_wdt_hwping, (unsigned long)&priv->wdd);
+
+ ret = watchdog_register_device(&priv->wdd);
+ if (ret)
+ return ret;
+
+ priv->notifier.notifier_call = gpio_wdt_notify_sys;
+ ret = register_reboot_notifier(&priv->notifier);
+ if (ret)
+ watchdog_unregister_device(&priv->wdd);
+
+ return ret;
+}
+
+static int gpio_wdt_remove(struct platform_device *pdev)
+{
+ struct gpio_wdt_priv *priv = platform_get_drvdata(pdev);
+
+ del_timer_sync(&priv->timer);
+ unregister_reboot_notifier(&priv->notifier);
+ watchdog_unregister_device(&priv->wdd);
+
+ return 0;
+}
+
+static const struct of_device_id gpio_wdt_dt_ids[] = {
+ { .compatible = "linux,wdt-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpio_wdt_dt_ids);
+
+static struct platform_driver gpio_wdt_driver = {
+ .driver = {
+ .name = "gpio-wdt",
+ .owner = THIS_MODULE,
+ .of_match_table = gpio_wdt_dt_ids,
+ },
+ .probe = gpio_wdt_probe,
+ .remove = gpio_wdt_remove,
+};
+module_platform_driver(gpio_wdt_driver);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("GPIO Watchdog");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 45b979d9dd13..75d2243b94f5 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -17,7 +17,6 @@
#include <linux/device.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
@@ -39,7 +38,7 @@
#endif /* CONFIG_HPWDT_NMI_DECODING */
#include <asm/nmi.h>
-#define HPWDT_VERSION "1.3.2"
+#define HPWDT_VERSION "1.3.3"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535)
@@ -55,7 +54,7 @@ static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_timer_reg;
static unsigned long __iomem *hpwdt_timer_con;
-static DEFINE_PCI_DEVICE_TABLE(hpwdt_devices) = {
+static const struct pci_device_id hpwdt_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, /* iLO2 */
{ PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, /* iLO3 */
{0}, /* terminate list */
@@ -501,8 +500,13 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
"but unable to determine source.\n");
}
}
- panic("An NMI occurred, please see the Integrated "
- "Management Log for details.\n");
+ panic("An NMI occurred. Depending on your system the reason "
+ "for the NMI is logged in any one of the following "
+ "resources:\n"
+ "1. Integrated Management Log (IML)\n"
+ "2. OA Syslog\n"
+ "3. OA Forward Progress Log\n"
+ "4. iLO Event Log");
out:
return NMI_DONE;
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index a72fe9361ddf..d7befd58b391 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -36,7 +36,6 @@
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/uaccess.h>
@@ -334,7 +333,7 @@ static struct miscdevice esb_miscdev = {
/*
* Data for PCI driver interface
*/
-static DEFINE_PCI_DEVICE_TABLE(esb_pci_tbl) = {
+static const struct pci_device_id esb_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), },
{ 0, }, /* End of list */
};
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 04f8af65acfd..0e6c0333f775 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -347,15 +347,15 @@ static const struct watchdog_info ident = {
static const struct watchdog_ops iTCO_wdt_ops = {
.owner = THIS_MODULE,
.start = iTCO_wdt_start,
- .stop = iTCO_wdt_stop,
- .ping = iTCO_wdt_ping,
+ .stop = iTCO_wdt_stop,
+ .ping = iTCO_wdt_ping,
.set_timeout = iTCO_wdt_set_timeout,
.get_timeleft = iTCO_wdt_get_timeleft,
};
static struct watchdog_device iTCO_wdt_watchdog_dev = {
.info = &ident,
- .ops = &iTCO_wdt_ops,
+ .ops = &iTCO_wdt_ops,
};
/*
@@ -485,7 +485,7 @@ static int iTCO_wdt_probe(struct platform_device *dev)
iTCO_wdt_watchdog_dev.bootstatus = 0;
iTCO_wdt_watchdog_dev.timeout = WATCHDOG_TIMEOUT;
watchdog_set_nowayout(&iTCO_wdt_watchdog_dev, nowayout);
- iTCO_wdt_watchdog_dev.parent = dev->dev.parent;
+ iTCO_wdt_watchdog_dev.parent = &dev->dev;
/* Make sure the watchdog is not running */
iTCO_wdt_stop(&iTCO_wdt_watchdog_dev);
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index 7ae36690c449..4247c498ee78 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -277,7 +277,7 @@ static struct miscdevice ibwdt_miscdev = {
* Init & exit routines
*/
-static int ibwdt_probe(struct platform_device *dev)
+static int __init ibwdt_probe(struct platform_device *dev)
{
int res;
@@ -336,7 +336,6 @@ static void ibwdt_shutdown(struct platform_device *dev)
}
static struct platform_driver ibwdt_driver = {
- .probe = ibwdt_probe,
.remove = ibwdt_remove,
.shutdown = ibwdt_shutdown,
.driver = {
@@ -351,21 +350,19 @@ static int __init ibwdt_init(void)
pr_info("WDT driver for IB700 single board computer initialising\n");
- err = platform_driver_register(&ibwdt_driver);
- if (err)
- return err;
-
ibwdt_platform_device = platform_device_register_simple(DRV_NAME,
-1, NULL, 0);
- if (IS_ERR(ibwdt_platform_device)) {
- err = PTR_ERR(ibwdt_platform_device);
- goto unreg_platform_driver;
- }
+ if (IS_ERR(ibwdt_platform_device))
+ return PTR_ERR(ibwdt_platform_device);
+
+ err = platform_driver_probe(&ibwdt_driver, ibwdt_probe);
+ if (err)
+ goto unreg_platform_device;
return 0;
-unreg_platform_driver:
- platform_driver_unregister(&ibwdt_driver);
+unreg_platform_device:
+ platform_device_unregister(ibwdt_platform_device);
return err;
}
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index db0a34460e57..366b0474f278 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -360,7 +360,7 @@ struct ibmasr_id {
int type;
};
-static struct ibmasr_id __initdata ibmasr_id_table[] = {
+static struct ibmasr_id ibmasr_id_table[] __initdata = {
{ "IBM Automatic Server Restart - eserver xSeries 220", ASMTYPE_TOPAZ },
{ "IBM Automatic Server Restart - Machine Type 8673", ASMTYPE_PEARL },
{ "IBM Automatic Server Restart - Machine Type 8480", ASMTYPE_JASPER },
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index b4786bccc42c..dd51d9539b33 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -2,6 +2,7 @@
* Watchdog driver for IMX2 and later processors
*
* Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <w.sang@pengutronix.de>
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
*
* some parts adapted by similar drivers from Darius Augulis and Vladimir
* Zapolskiy, additional improvements by Wim Van Sebroeck.
@@ -40,6 +41,7 @@
#define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */
#define IMX2_WDT_WCR_WRE (1 << 3) /* -> WDOG Reset Enable */
#define IMX2_WDT_WCR_WDE (1 << 2) /* -> Watchdog Enable */
+#define IMX2_WDT_WCR_WDZST (1 << 0) /* -> Watchdog timer Suspend */
#define IMX2_WDT_WSR 0x02 /* Service Register */
#define IMX2_WDT_SEQ1 0x5555 /* -> service sequence 1 */
@@ -87,6 +89,8 @@ static inline void imx2_wdt_setup(void)
{
u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR);
+ /* Suspend timer in low power mode, write once-only */
+ val |= IMX2_WDT_WCR_WDZST;
/* Strip the old watchdog Time-Out value */
val &= ~IMX2_WDT_WCR_WT;
/* Generate reset if WDOG times out */
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 1b5c25a47b87..5d20cdd30efe 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -41,24 +41,15 @@ MODULE_PARM_DESC(nowayout,
static void indydog_start(void)
{
- u32 mc_ctrl0;
-
spin_lock(&indydog_lock);
- mc_ctrl0 = sgimc->cpuctrl0;
- mc_ctrl0 = sgimc->cpuctrl0 | SGIMC_CCTRL0_WDOG;
- sgimc->cpuctrl0 = mc_ctrl0;
+ sgimc->cpuctrl0 |= SGIMC_CCTRL0_WDOG;
spin_unlock(&indydog_lock);
}
static void indydog_stop(void)
{
- u32 mc_ctrl0;
-
spin_lock(&indydog_lock);
-
- mc_ctrl0 = sgimc->cpuctrl0;
- mc_ctrl0 &= ~SGIMC_CCTRL0_WDOG;
- sgimc->cpuctrl0 = mc_ctrl0;
+ sgimc->cpuctrl0 &= ~SGIMC_CCTRL0_WDOG;
spin_unlock(&indydog_lock);
pr_info("Stopped watchdog timer\n");
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index e13e65e996aa..0caab6241eb7 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -211,7 +211,6 @@ static int intel_scu_set_heartbeat(u32 t)
int ipc_ret;
int retry_count;
u32 soft_value;
- u32 hw_pre_value;
u32 hw_value;
watchdog_device.timer_set = t;
@@ -273,8 +272,7 @@ static int intel_scu_set_heartbeat(u32 t)
watchdog_device.timer_load_count_addr);
/* read count value before starting timer */
- hw_pre_value = ioread32(watchdog_device.timer_load_count_addr);
- hw_pre_value = hw_pre_value & 0xFFFF0000;
+ ioread32(watchdog_device.timer_load_count_addr);
/* Start the timer */
iowrite32(0x00000003, watchdog_device.timer_control_addr);
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index e2bba68ae71e..0b93739c0106 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -54,6 +54,7 @@
/* Defaults for Module Parameter */
#define DEFAULT_NOGAMEPORT 0
+#define DEFAULT_NOCIR 0
#define DEFAULT_EXCLUSIVE 1
#define DEFAULT_TIMEOUT 60
#define DEFAULT_TESTMODE 0
@@ -136,11 +137,13 @@
#define WDTS_LOCKED 3
#define WDTS_USE_GP 4
#define WDTS_EXPECTED 5
+#define WDTS_USE_CIR 6
static unsigned int base, gpact, ciract, max_units, chip_type;
static unsigned long wdt_status;
static int nogameport = DEFAULT_NOGAMEPORT;
+static int nocir = DEFAULT_NOCIR;
static int exclusive = DEFAULT_EXCLUSIVE;
static int timeout = DEFAULT_TIMEOUT;
static int testmode = DEFAULT_TESTMODE;
@@ -149,6 +152,9 @@ static bool nowayout = DEFAULT_NOWAYOUT;
module_param(nogameport, int, 0);
MODULE_PARM_DESC(nogameport, "Forbid the activation of game port, default="
__MODULE_STRING(DEFAULT_NOGAMEPORT));
+module_param(nocir, int, 0);
+MODULE_PARM_DESC(nocir, "Forbid the use of Consumer IR interrupts to reset timer, default="
+ __MODULE_STRING(DEFAULT_NOCIR));
module_param(exclusive, int, 0);
MODULE_PARM_DESC(exclusive, "Watchdog exclusive device open, default="
__MODULE_STRING(DEFAULT_EXCLUSIVE));
@@ -258,9 +264,17 @@ static void wdt_keepalive(void)
{
if (test_bit(WDTS_USE_GP, &wdt_status))
inb(base);
- else
+ else if (test_bit(WDTS_USE_CIR, &wdt_status))
/* The timer reloads with around 5 msec delay */
outb(0x55, CIR_DR(base));
+ else {
+ if (superio_enter())
+ return;
+
+ superio_select(GPIO);
+ wdt_update_timeout();
+ superio_exit();
+ }
set_bit(WDTS_KEEPALIVE, &wdt_status);
}
@@ -273,7 +287,7 @@ static int wdt_start(void)
superio_select(GPIO);
if (test_bit(WDTS_USE_GP, &wdt_status))
superio_outb(WDT_GAMEPORT, WDTCTRL);
- else
+ else if (test_bit(WDTS_USE_CIR, &wdt_status))
superio_outb(WDT_CIRINT, WDTCTRL);
wdt_update_timeout();
@@ -660,7 +674,7 @@ static int __init it87_wdt_init(void)
}
/* If we haven't Gameport support, try to get CIR support */
- if (!test_bit(WDTS_USE_GP, &wdt_status)) {
+ if (!nocir && !test_bit(WDTS_USE_GP, &wdt_status)) {
if (!request_region(CIR_BASE, 8, WATCHDOG_NAME)) {
if (gp_rreq_fail)
pr_err("I/O Address 0x%04x and 0x%04x already in use\n",
@@ -682,6 +696,7 @@ static int __init it87_wdt_init(void)
superio_select(GAMEPORT);
superio_outb(gpact, ACTREG);
}
+ set_bit(WDTS_USE_CIR, &wdt_status);
}
if (timeout < 1 || timeout > max_units * 60) {
@@ -707,7 +722,7 @@ static int __init it87_wdt_init(void)
}
/* Initialize CIR to use it as keepalive source */
- if (!test_bit(WDTS_USE_GP, &wdt_status)) {
+ if (test_bit(WDTS_USE_CIR, &wdt_status)) {
outb(0x00, CIR_RCR(base));
outb(0xc0, CIR_TCR1(base));
outb(0x5c, CIR_TCR2(base));
@@ -717,9 +732,9 @@ static int __init it87_wdt_init(void)
outb(0x09, CIR_IER(base));
}
- pr_info("Chip IT%04x revision %d initialized. timeout=%d sec (nowayout=%d testmode=%d exclusive=%d nogameport=%d)\n",
+ pr_info("Chip IT%04x revision %d initialized. timeout=%d sec (nowayout=%d testmode=%d exclusive=%d nogameport=%d nocir=%d)\n",
chip_type, chip_rev, timeout,
- nowayout, testmode, exclusive, nogameport);
+ nowayout, testmode, exclusive, nogameport, nocir);
superio_exit();
return 0;
@@ -727,8 +742,10 @@ static int __init it87_wdt_init(void)
err_out_reboot:
unregister_reboot_notifier(&wdt_notifier);
err_out_region:
- release_region(base, test_bit(WDTS_USE_GP, &wdt_status) ? 1 : 8);
- if (!test_bit(WDTS_USE_GP, &wdt_status)) {
+ if (test_bit(WDTS_USE_GP, &wdt_status))
+ release_region(base, 1);
+ else if (test_bit(WDTS_USE_CIR, &wdt_status)) {
+ release_region(base, 8);
superio_select(CIR);
superio_outb(ciract, ACTREG);
}
@@ -754,7 +771,7 @@ static void __exit it87_wdt_exit(void)
if (test_bit(WDTS_USE_GP, &wdt_status)) {
superio_select(GAMEPORT);
superio_outb(gpact, ACTREG);
- } else {
+ } else if (test_bit(WDTS_USE_CIR, &wdt_status)) {
superio_select(CIR);
superio_outb(ciract, ACTREG);
}
@@ -763,7 +780,11 @@ static void __exit it87_wdt_exit(void)
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
- release_region(base, test_bit(WDTS_USE_GP, &wdt_status) ? 1 : 8);
+
+ if (test_bit(WDTS_USE_GP, &wdt_status))
+ release_region(base, 1);
+ else if (test_bit(WDTS_USE_CIR, &wdt_status))
+ release_region(base, 8);
}
module_init(it87_wdt_init);
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 3aa50cfa335f..91e45ca589e6 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -18,7 +18,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/device.h>
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index bdb3f4a5b27c..0e9cc6f5a919 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -20,7 +20,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 4166e4d116a8..4aa3a8a876fe 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -19,6 +19,8 @@
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
+#include <asm/system_misc.h>
+
#define REG_COUNT 0x4
#define REG_MODE 0x8
#define REG_ENABLE 0xC
@@ -29,8 +31,17 @@ struct moxart_wdt_dev {
unsigned int clock_frequency;
};
+static struct moxart_wdt_dev *moxart_restart_ctx;
+
static int heartbeat;
+static void moxart_wdt_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+ writel(1, moxart_restart_ctx->base + REG_COUNT);
+ writel(0x5ab9, moxart_restart_ctx->base + REG_MODE);
+ writel(0x03, moxart_restart_ctx->base + REG_ENABLE);
+}
+
static int moxart_wdt_stop(struct watchdog_device *wdt_dev)
{
struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
@@ -125,6 +136,9 @@ static int moxart_wdt_probe(struct platform_device *pdev)
if (err)
return err;
+ moxart_restart_ctx = moxart_wdt;
+ arm_pm_restart = moxart_wdt_restart;
+
dev_dbg(dev, "Watchdog enabled (heartbeat=%d sec, nowayout=%d)\n",
moxart_wdt->dev.timeout, nowayout);
@@ -135,6 +149,7 @@ static int moxart_wdt_remove(struct platform_device *pdev)
{
struct moxart_wdt_dev *moxart_wdt = platform_get_drvdata(pdev);
+ arm_pm_restart = NULL;
moxart_wdt_stop(&moxart_wdt->dev);
watchdog_unregister_device(&moxart_wdt->dev);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index d82152077fd9..7831955cd9e1 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -73,9 +73,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
* to 0
*/
static int prescale = 1;
-static unsigned int timeout_sec;
-static unsigned long wdt_is_open;
static DEFINE_SPINLOCK(wdt_spinlock);
static void mpc8xxx_wdt_keepalive(void)
@@ -87,39 +85,23 @@ static void mpc8xxx_wdt_keepalive(void)
spin_unlock(&wdt_spinlock);
}
+static struct watchdog_device mpc8xxx_wdt_dev;
static void mpc8xxx_wdt_timer_ping(unsigned long arg);
-static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0, 0);
+static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0,
+ (unsigned long)&mpc8xxx_wdt_dev);
static void mpc8xxx_wdt_timer_ping(unsigned long arg)
{
+ struct watchdog_device *w = (struct watchdog_device *)arg;
+
mpc8xxx_wdt_keepalive();
/* We're pinging it twice faster than needed, just to be sure. */
- mod_timer(&wdt_timer, jiffies + HZ * timeout_sec / 2);
-}
-
-static void mpc8xxx_wdt_pr_warn(const char *msg)
-{
- pr_crit("%s, expect the %s soon!\n", msg,
- reset ? "reset" : "machine check exception");
+ mod_timer(&wdt_timer, jiffies + HZ * w->timeout / 2);
}
-static ssize_t mpc8xxx_wdt_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- if (count)
- mpc8xxx_wdt_keepalive();
- return count;
-}
-
-static int mpc8xxx_wdt_open(struct inode *inode, struct file *file)
+static int mpc8xxx_wdt_start(struct watchdog_device *w)
{
u32 tmp = SWCRR_SWEN;
- if (test_and_set_bit(0, &wdt_is_open))
- return -EBUSY;
-
- /* Once we start the watchdog we can't stop it */
- if (nowayout)
- __module_get(THIS_MODULE);
/* Good, fire up the show */
if (prescale)
@@ -133,59 +115,37 @@ static int mpc8xxx_wdt_open(struct inode *inode, struct file *file)
del_timer_sync(&wdt_timer);
- return nonseekable_open(inode, file);
+ return 0;
}
-static int mpc8xxx_wdt_release(struct inode *inode, struct file *file)
+static int mpc8xxx_wdt_ping(struct watchdog_device *w)
{
- if (!nowayout)
- mpc8xxx_wdt_timer_ping(0);
- else
- mpc8xxx_wdt_pr_warn("watchdog closed");
- clear_bit(0, &wdt_is_open);
+ mpc8xxx_wdt_keepalive();
return 0;
}
-static long mpc8xxx_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int mpc8xxx_wdt_stop(struct watchdog_device *w)
{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING,
- .firmware_version = 1,
- .identity = "MPC8xxx",
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
- case WDIOC_KEEPALIVE:
- mpc8xxx_wdt_keepalive();
- return 0;
- case WDIOC_GETTIMEOUT:
- return put_user(timeout_sec, p);
- default:
- return -ENOTTY;
- }
+ mod_timer(&wdt_timer, jiffies);
+ return 0;
}
-static const struct file_operations mpc8xxx_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = mpc8xxx_wdt_write,
- .unlocked_ioctl = mpc8xxx_wdt_ioctl,
- .open = mpc8xxx_wdt_open,
- .release = mpc8xxx_wdt_release,
+static struct watchdog_info mpc8xxx_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING,
+ .firmware_version = 1,
+ .identity = "MPC8xxx",
};
-static struct miscdevice mpc8xxx_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &mpc8xxx_wdt_fops,
+static struct watchdog_ops mpc8xxx_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = mpc8xxx_wdt_start,
+ .ping = mpc8xxx_wdt_ping,
+ .stop = mpc8xxx_wdt_stop,
+};
+
+static struct watchdog_device mpc8xxx_wdt_dev = {
+ .info = &mpc8xxx_wdt_info,
+ .ops = &mpc8xxx_wdt_ops,
};
static const struct of_device_id mpc8xxx_wdt_match[];
@@ -197,6 +157,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
const struct mpc8xxx_wdt_type *wdt_type;
u32 freq = fsl_get_sys_freq();
bool enabled;
+ unsigned int timeout_sec;
match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev);
if (!match)
@@ -223,6 +184,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
else
timeout_sec = timeout / freq;
+ mpc8xxx_wdt_dev.timeout = timeout_sec;
#ifdef MODULE
ret = mpc8xxx_wdt_init_late();
if (ret)
@@ -238,7 +200,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
* userspace handles it.
*/
if (enabled)
- mpc8xxx_wdt_timer_ping(0);
+ mod_timer(&wdt_timer, jiffies);
return 0;
err_unmap:
iounmap(wd_base);
@@ -248,9 +210,10 @@ err_unmap:
static int mpc8xxx_wdt_remove(struct platform_device *ofdev)
{
- mpc8xxx_wdt_pr_warn("watchdog removed");
+ pr_crit("Watchdog removed, expect the %s soon!\n",
+ reset ? "reset" : "machine check exception");
del_timer_sync(&wdt_timer);
- misc_deregister(&mpc8xxx_wdt_miscdev);
+ watchdog_unregister_device(&mpc8xxx_wdt_dev);
iounmap(wd_base);
return 0;
@@ -274,6 +237,7 @@ static const struct of_device_id mpc8xxx_wdt_match[] = {
.compatible = "fsl,mpc823-wdt",
.data = &(struct mpc8xxx_wdt_type) {
.prescaler = 0x800,
+ .hw_enabled = true,
},
},
{},
@@ -302,10 +266,11 @@ static int mpc8xxx_wdt_init_late(void)
if (!wd_base)
return -ENODEV;
- ret = misc_register(&mpc8xxx_wdt_miscdev);
+ watchdog_set_nowayout(&mpc8xxx_wdt_dev, nowayout);
+
+ ret = watchdog_register_device(&mpc8xxx_wdt_dev);
if (ret) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
+ pr_err("cannot register watchdog device (err=%d)\n", ret);
return ret;
}
return 0;
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index edb31ffd7927..ff27c4ac96e4 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -40,7 +40,6 @@
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/completion.h>
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index a0d893b0930e..7135803ca1a3 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -12,7 +12,6 @@
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/kernel.h>
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 231e5b9d5c8e..0b9ec61e1313 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -289,7 +289,7 @@ static struct miscdevice nv_tco_miscdev = {
* register a pci_driver, because someone else might one day
* want to register another driver on the same PCI id.
*/
-static DEFINE_PCI_DEVICE_TABLE(tco_pci_tbl) = {
+static const struct pci_device_id tco_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index fb57103c8ebc..57ccae8327ff 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -1,6 +1,7 @@
/*
* Watchdog Device Driver for Xilinx axi/xps_timebase_wdt
*
+ * (C) Copyright 2013 - 2014 Xilinx, Inc.
* (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>)
*
* This program is free software; you can redistribute it and/or
@@ -9,18 +10,13 @@
* 2 of the License, or (at your option) any later version.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/watchdog.h>
#include <linux/io.h>
-#include <linux/uaccess.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -43,102 +39,103 @@
#define XWT_TIMER_FAILED 0xFFFFFFFF
#define WATCHDOG_NAME "Xilinx Watchdog"
-#define PFX WATCHDOG_NAME ": "
struct xwdt_device {
- struct resource res;
void __iomem *base;
- u32 nowayout;
u32 wdt_interval;
- u32 boot_status;
+ spinlock_t spinlock;
+ struct watchdog_device xilinx_wdt_wdd;
};
-static struct xwdt_device xdev;
-
-static u32 timeout;
-static u32 control_status_reg;
-static u8 expect_close;
-static u8 no_timeout;
-static unsigned long driver_open;
-
-static DEFINE_SPINLOCK(spinlock);
-
-static void xwdt_start(void)
+static int xilinx_wdt_start(struct watchdog_device *wdd)
{
- spin_lock(&spinlock);
+ u32 control_status_reg;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+
+ spin_lock(&xdev->spinlock);
/* Clean previous status and enable the watchdog timer */
- control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+ control_status_reg = ioread32(xdev->base + XWT_TWCSR0_OFFSET);
control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
iowrite32((control_status_reg | XWT_CSR0_EWDT1_MASK),
- xdev.base + XWT_TWCSR0_OFFSET);
+ xdev->base + XWT_TWCSR0_OFFSET);
+
+ iowrite32(XWT_CSRX_EWDT2_MASK, xdev->base + XWT_TWCSR1_OFFSET);
- iowrite32(XWT_CSRX_EWDT2_MASK, xdev.base + XWT_TWCSR1_OFFSET);
+ spin_unlock(&xdev->spinlock);
- spin_unlock(&spinlock);
+ return 0;
}
-static void xwdt_stop(void)
+static int xilinx_wdt_stop(struct watchdog_device *wdd)
{
- spin_lock(&spinlock);
+ u32 control_status_reg;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+
+ spin_lock(&xdev->spinlock);
- control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+ control_status_reg = ioread32(xdev->base + XWT_TWCSR0_OFFSET);
iowrite32((control_status_reg & ~XWT_CSR0_EWDT1_MASK),
- xdev.base + XWT_TWCSR0_OFFSET);
+ xdev->base + XWT_TWCSR0_OFFSET);
- iowrite32(0, xdev.base + XWT_TWCSR1_OFFSET);
+ iowrite32(0, xdev->base + XWT_TWCSR1_OFFSET);
- spin_unlock(&spinlock);
+ spin_unlock(&xdev->spinlock);
pr_info("Stopped!\n");
+
+ return 0;
}
-static void xwdt_keepalive(void)
+static int xilinx_wdt_keepalive(struct watchdog_device *wdd)
{
- spin_lock(&spinlock);
+ u32 control_status_reg;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
- control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
- control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
- iowrite32(control_status_reg, xdev.base + XWT_TWCSR0_OFFSET);
+ spin_lock(&xdev->spinlock);
- spin_unlock(&spinlock);
-}
+ control_status_reg = ioread32(xdev->base + XWT_TWCSR0_OFFSET);
+ control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
+ iowrite32(control_status_reg, xdev->base + XWT_TWCSR0_OFFSET);
-static void xwdt_get_status(int *status)
-{
- int new_status;
+ spin_unlock(&xdev->spinlock);
- spin_lock(&spinlock);
+ return 0;
+}
- control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
- new_status = ((control_status_reg &
- (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK)) != 0);
- spin_unlock(&spinlock);
+static const struct watchdog_info xilinx_wdt_ident = {
+ .options = WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+ .firmware_version = 1,
+ .identity = WATCHDOG_NAME,
+};
- *status = 0;
- if (new_status & 1)
- *status |= WDIOF_CARDRESET;
-}
+static const struct watchdog_ops xilinx_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = xilinx_wdt_start,
+ .stop = xilinx_wdt_stop,
+ .ping = xilinx_wdt_keepalive,
+};
-static u32 xwdt_selftest(void)
+static u32 xwdt_selftest(struct xwdt_device *xdev)
{
int i;
u32 timer_value1;
u32 timer_value2;
- spin_lock(&spinlock);
+ spin_lock(&xdev->spinlock);
- timer_value1 = ioread32(xdev.base + XWT_TBR_OFFSET);
- timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET);
+ timer_value1 = ioread32(xdev->base + XWT_TBR_OFFSET);
+ timer_value2 = ioread32(xdev->base + XWT_TBR_OFFSET);
for (i = 0;
((i <= XWT_MAX_SELFTEST_LOOP_COUNT) &&
(timer_value2 == timer_value1)); i++) {
- timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET);
+ timer_value2 = ioread32(xdev->base + XWT_TBR_OFFSET);
}
- spin_unlock(&spinlock);
+ spin_unlock(&xdev->spinlock);
if (timer_value2 != timer_value1)
return ~XWT_TIMER_FAILED;
@@ -146,238 +143,83 @@ static u32 xwdt_selftest(void)
return XWT_TIMER_FAILED;
}
-static int xwdt_open(struct inode *inode, struct file *file)
-{
- /* Only one process can handle the wdt at a time */
- if (test_and_set_bit(0, &driver_open))
- return -EBUSY;
-
- /* Make sure that the module are always loaded...*/
- if (xdev.nowayout)
- __module_get(THIS_MODULE);
-
- xwdt_start();
- pr_info("Started...\n");
-
- return nonseekable_open(inode, file);
-}
-
-static int xwdt_release(struct inode *inode, struct file *file)
-{
- if (expect_close == 42) {
- xwdt_stop();
- } else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- xwdt_keepalive();
- }
-
- clear_bit(0, &driver_open);
- expect_close = 0;
- return 0;
-}
-
-/*
- * xwdt_write:
- * @file: file handle to the watchdog
- * @buf: buffer to write (unused as data does not matter here
- * @count: count of bytes
- * @ppos: pointer to the position to write. No seeks allowed
- *
- * A write to a watchdog device is defined as a keepalive signal. Any
- * write of data will do, as we don't define content meaning.
- */
-static ssize_t xwdt_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
-{
- if (len) {
- if (!xdev.nowayout) {
- size_t i;
-
- /* In case it was set long ago */
- expect_close = 0;
-
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 42;
- }
- }
- xwdt_keepalive();
- }
- return len;
-}
-
-static const struct watchdog_info ident = {
- .options = WDIOF_MAGICCLOSE |
- WDIOF_KEEPALIVEPING,
- .firmware_version = 1,
- .identity = WATCHDOG_NAME,
-};
-
-/*
- * xwdt_ioctl:
- * @file: file handle to the device
- * @cmd: watchdog command
- * @arg: argument pointer
- *
- * The watchdog API defines a common set of functions for all watchdogs
- * according to their available features.
- */
-static long xwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int status;
-
- union {
- struct watchdog_info __user *ident;
- int __user *i;
- } uarg;
-
- uarg.i = (int __user *)arg;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(uarg.ident, &ident,
- sizeof(ident)) ? -EFAULT : 0;
-
- case WDIOC_GETBOOTSTATUS:
- return put_user(xdev.boot_status, uarg.i);
-
- case WDIOC_GETSTATUS:
- xwdt_get_status(&status);
- return put_user(status, uarg.i);
-
- case WDIOC_KEEPALIVE:
- xwdt_keepalive();
- return 0;
-
- case WDIOC_GETTIMEOUT:
- if (no_timeout)
- return -ENOTTY;
- else
- return put_user(timeout, uarg.i);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations xwdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = xwdt_write,
- .open = xwdt_open,
- .release = xwdt_release,
- .unlocked_ioctl = xwdt_ioctl,
-};
-
-static struct miscdevice xwdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &xwdt_fops,
-};
-
static int xwdt_probe(struct platform_device *pdev)
{
int rc;
- u32 *tmptr;
- u32 *pfreq;
-
- no_timeout = 0;
-
- pfreq = (u32 *)of_get_property(pdev->dev.of_node,
- "clock-frequency", NULL);
-
- if (pfreq == NULL) {
- pr_warn("The watchdog clock frequency cannot be obtained!\n");
- no_timeout = 1;
- }
-
- rc = of_address_to_resource(pdev->dev.of_node, 0, &xdev.res);
- if (rc) {
- pr_warn("invalid address!\n");
- return rc;
- }
-
- tmptr = (u32 *)of_get_property(pdev->dev.of_node,
- "xlnx,wdt-interval", NULL);
- if (tmptr == NULL) {
- pr_warn("Parameter \"xlnx,wdt-interval\" not found in device tree!\n");
- no_timeout = 1;
- } else {
- xdev.wdt_interval = *tmptr;
- }
-
- tmptr = (u32 *)of_get_property(pdev->dev.of_node,
- "xlnx,wdt-enable-once", NULL);
- if (tmptr == NULL) {
- pr_warn("Parameter \"xlnx,wdt-enable-once\" not found in device tree!\n");
- xdev.nowayout = WATCHDOG_NOWAYOUT;
- }
-
-/*
- * Twice of the 2^wdt_interval / freq because the first wdt overflow is
- * ignored (interrupt), reset is only generated at second wdt overflow
- */
- if (!no_timeout)
- timeout = 2 * ((1<<xdev.wdt_interval) / *pfreq);
-
- if (!request_mem_region(xdev.res.start,
- xdev.res.end - xdev.res.start + 1, WATCHDOG_NAME)) {
- rc = -ENXIO;
- pr_err("memory request failure!\n");
- goto err_out;
- }
-
- xdev.base = ioremap(xdev.res.start, xdev.res.end - xdev.res.start + 1);
- if (xdev.base == NULL) {
- rc = -ENOMEM;
- pr_err("ioremap failure!\n");
- goto release_mem;
- }
-
- rc = xwdt_selftest();
+ u32 pfreq = 0, enable_once = 0;
+ struct resource *res;
+ struct xwdt_device *xdev;
+ struct watchdog_device *xilinx_wdt_wdd;
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
+ xilinx_wdt_wdd->info = &xilinx_wdt_ident;
+ xilinx_wdt_wdd->ops = &xilinx_wdt_ops;
+ xilinx_wdt_wdd->parent = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xdev->base))
+ return PTR_ERR(xdev->base);
+
+ rc = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &pfreq);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "The watchdog clock frequency cannot be obtained\n");
+
+ rc = of_property_read_u32(pdev->dev.of_node, "xlnx,wdt-interval",
+ &xdev->wdt_interval);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "Parameter \"xlnx,wdt-interval\" not found\n");
+
+ rc = of_property_read_u32(pdev->dev.of_node, "xlnx,wdt-enable-once",
+ &enable_once);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "Parameter \"xlnx,wdt-enable-once\" not found\n");
+
+ watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
+
+ /*
+ * Twice of the 2^wdt_interval / freq because the first wdt overflow is
+ * ignored (interrupt), reset is only generated at second wdt overflow
+ */
+ if (pfreq && xdev->wdt_interval)
+ xilinx_wdt_wdd->timeout = 2 * ((1 << xdev->wdt_interval) /
+ pfreq);
+
+ spin_lock_init(&xdev->spinlock);
+ watchdog_set_drvdata(xilinx_wdt_wdd, xdev);
+
+ rc = xwdt_selftest(xdev);
if (rc == XWT_TIMER_FAILED) {
- pr_err("SelfTest routine error!\n");
- goto unmap_io;
+ dev_err(&pdev->dev, "SelfTest routine error\n");
+ return rc;
}
- xwdt_get_status(&xdev.boot_status);
-
- rc = misc_register(&xwdt_miscdev);
+ rc = watchdog_register_device(xilinx_wdt_wdd);
if (rc) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- xwdt_miscdev.minor, rc);
- goto unmap_io;
+ dev_err(&pdev->dev, "Cannot register watchdog (err=%d)\n", rc);
+ return rc;
}
- if (no_timeout)
- pr_info("driver loaded (timeout=? sec, nowayout=%d)\n",
- xdev.nowayout);
- else
- pr_info("driver loaded (timeout=%d sec, nowayout=%d)\n",
- timeout, xdev.nowayout);
+ dev_info(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds\n",
+ xdev->base, xilinx_wdt_wdd->timeout);
- expect_close = 0;
- clear_bit(0, &driver_open);
+ platform_set_drvdata(pdev, xdev);
return 0;
-
-unmap_io:
- iounmap(xdev.base);
-release_mem:
- release_mem_region(xdev.res.start, resource_size(&xdev.res));
-err_out:
- return rc;
}
-static int xwdt_remove(struct platform_device *dev)
+static int xwdt_remove(struct platform_device *pdev)
{
- misc_deregister(&xwdt_miscdev);
- iounmap(xdev.base);
- release_mem_region(xdev.res.start, resource_size(&xdev.res));
+ struct xwdt_device *xdev = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&xdev->xilinx_wdt_wdd);
return 0;
}
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 09cf0135e8ac..3691b157516a 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -34,7 +34,6 @@
#include <linux/mm.h>
#include <linux/watchdog.h>
#include <linux/reboot.h>
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/moduleparam.h>
@@ -58,7 +57,6 @@ struct omap_wdt_dev {
void __iomem *base; /* physical */
struct device *dev;
bool omap_wdt_users;
- struct resource *mem;
int wdt_trgr_pattern;
struct mutex lock; /* to avoid races with PM */
};
@@ -207,7 +205,7 @@ static int omap_wdt_probe(struct platform_device *pdev)
{
struct omap_wd_timer_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct watchdog_device *omap_wdt;
- struct resource *res, *mem;
+ struct resource *res;
struct omap_wdt_dev *wdev;
u32 rs;
int ret;
@@ -216,29 +214,20 @@ static int omap_wdt_probe(struct platform_device *pdev)
if (!omap_wdt)
return -ENOMEM;
- /* reserve static register mappings */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
- mem = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name);
- if (!mem)
- return -EBUSY;
-
wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
if (!wdev)
return -ENOMEM;
wdev->omap_wdt_users = false;
- wdev->mem = mem;
wdev->dev = &pdev->dev;
wdev->wdt_trgr_pattern = 0x1234;
mutex_init(&wdev->lock);
- wdev->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!wdev->base)
- return -ENOMEM;
+ /* reserve static register mappings */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wdev->base))
+ return PTR_ERR(wdev->base);
omap_wdt->info = &omap_wdt_info;
omap_wdt->ops = &omap_wdt_ops;
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index f7722a424676..498163497c1c 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 5211d56b3681..9f15dd9435d1 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -512,9 +512,8 @@ static int __init pc87413_init(void)
return -EBUSY;
ret = register_reboot_notifier(&pc87413_notifier);
- if (ret != 0) {
+ if (ret != 0)
pr_err("cannot register reboot notifier (err=%d)\n", ret);
- }
ret = misc_register(&pc87413_miscdev);
if (ret != 0) {
@@ -575,8 +574,8 @@ static void __exit pc87413_exit(void)
module_init(pc87413_init);
module_exit(pc87413_exit);
-MODULE_AUTHOR("Sven Anders <anders@anduras.de>, "
- "Marcus Junker <junker@anduras.de>,");
+MODULE_AUTHOR("Sven Anders <anders@anduras.de>");
+MODULE_AUTHOR("Marcus Junker <junker@anduras.de>");
MODULE_DESCRIPTION("PC87413 WDT driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index b4864f254b48..c0d07eef2640 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -801,7 +801,7 @@ static void pcipcwd_card_exit(struct pci_dev *pdev)
cards_found--;
}
-static DEFINE_PCI_DEVICE_TABLE(pcipcwd_pci_tbl) = {
+static const struct pci_device_id pcipcwd_pci_tbl[] = {
{ PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }, /* End of list */
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index e562e0476016..1a11aedc4fe8 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -645,10 +645,8 @@ static int usb_pcwd_probe(struct usb_interface *interface,
/* allocate memory for our device and initialize it */
usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL);
- if (usb_pcwd == NULL) {
- pr_err("Out of memory\n");
+ if (usb_pcwd == NULL)
goto error;
- }
usb_pcwd_device = usb_pcwd;
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 5bec20f5dc2d..15fb316e9437 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -24,7 +24,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 082d06262959..29cf4dcbc59c 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -27,7 +27,6 @@
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/completion.h>
diff --git a/drivers/watchdog/retu_wdt.c b/drivers/watchdog/retu_wdt.c
index f53615dc633d..a7a0695971e4 100644
--- a/drivers/watchdog/retu_wdt.c
+++ b/drivers/watchdog/retu_wdt.c
@@ -16,7 +16,6 @@
* GNU General Public License for more details.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index 3dd8ed28adc8..cfed0fe264dc 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -10,7 +10,6 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/of.h>
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 7d8fd041ee25..7c6ccd071baf 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -30,7 +30,6 @@
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/watchdog.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
@@ -40,6 +39,8 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define S3C2410_WTCON 0x00
#define S3C2410_WTDAT 0x04
@@ -60,6 +61,16 @@
#define CONFIG_S3C2410_WATCHDOG_ATBOOT (0)
#define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME (15)
+#define EXYNOS5_RST_STAT_REG_OFFSET 0x0404
+#define EXYNOS5_WDT_DISABLE_REG_OFFSET 0x0408
+#define EXYNOS5_WDT_MASK_RESET_REG_OFFSET 0x040c
+#define QUIRK_HAS_PMU_CONFIG (1 << 0)
+#define QUIRK_HAS_RST_STAT (1 << 1)
+
+/* These quirks require that we have a PMU register map */
+#define QUIRKS_HAVE_PMUREG (QUIRK_HAS_PMU_CONFIG | \
+ QUIRK_HAS_RST_STAT)
+
static bool nowayout = WATCHDOG_NOWAYOUT;
static int tmr_margin;
static int tmr_atboot = CONFIG_S3C2410_WATCHDOG_ATBOOT;
@@ -83,6 +94,30 @@ MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, "
"0 to reboot (default 0)");
MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)");
+/**
+ * struct s3c2410_wdt_variant - Per-variant config data
+ *
+ * @disable_reg: Offset in pmureg for the register that disables the watchdog
+ * timer reset functionality.
+ * @mask_reset_reg: Offset in pmureg for the register that masks the watchdog
+ * timer reset functionality.
+ * @mask_bit: Bit number for the watchdog timer in the disable register and the
+ * mask reset register.
+ * @rst_stat_reg: Offset in pmureg for the register that has the reset status.
+ * @rst_stat_bit: Bit number in the rst_stat register indicating a watchdog
+ * reset.
+ * @quirks: A bitfield of quirks.
+ */
+
+struct s3c2410_wdt_variant {
+ int disable_reg;
+ int mask_reset_reg;
+ int mask_bit;
+ int rst_stat_reg;
+ int rst_stat_bit;
+ u32 quirks;
+};
+
struct s3c2410_wdt {
struct device *dev;
struct clk *clock;
@@ -93,7 +128,53 @@ struct s3c2410_wdt {
unsigned long wtdat_save;
struct watchdog_device wdt_device;
struct notifier_block freq_transition;
+ struct s3c2410_wdt_variant *drv_data;
+ struct regmap *pmureg;
+};
+
+static const struct s3c2410_wdt_variant drv_data_s3c2410 = {
+ .quirks = 0
+};
+
+#ifdef CONFIG_OF
+static const struct s3c2410_wdt_variant drv_data_exynos5250 = {
+ .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
+ .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
+ .mask_bit = 20,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = 20,
+ .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
+ .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
+ .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
+ .mask_bit = 0,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = 9,
+ .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
+};
+
+static const struct of_device_id s3c2410_wdt_match[] = {
+ { .compatible = "samsung,s3c2410-wdt",
+ .data = &drv_data_s3c2410 },
+ { .compatible = "samsung,exynos5250-wdt",
+ .data = &drv_data_exynos5250 },
+ { .compatible = "samsung,exynos5420-wdt",
+ .data = &drv_data_exynos5420 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
+#endif
+
+static const struct platform_device_id s3c2410_wdt_ids[] = {
+ {
+ .name = "s3c2410-wdt",
+ .driver_data = (unsigned long)&drv_data_s3c2410,
+ },
+ {}
};
+MODULE_DEVICE_TABLE(platform, s3c2410_wdt_ids);
/* watchdog control routines */
@@ -110,6 +191,35 @@ static inline struct s3c2410_wdt *freq_to_wdt(struct notifier_block *nb)
return container_of(nb, struct s3c2410_wdt, freq_transition);
}
+static int s3c2410wdt_mask_and_disable_reset(struct s3c2410_wdt *wdt, bool mask)
+{
+ int ret;
+ u32 mask_val = 1 << wdt->drv_data->mask_bit;
+ u32 val = 0;
+
+ /* No need to do anything if no PMU CONFIG needed */
+ if (!(wdt->drv_data->quirks & QUIRK_HAS_PMU_CONFIG))
+ return 0;
+
+ if (mask)
+ val = mask_val;
+
+ ret = regmap_update_bits(wdt->pmureg,
+ wdt->drv_data->disable_reg,
+ mask_val, val);
+ if (ret < 0)
+ goto error;
+
+ ret = regmap_update_bits(wdt->pmureg,
+ wdt->drv_data->mask_reset_reg,
+ mask_val, val);
+ error:
+ if (ret < 0)
+ dev_err(wdt->dev, "failed to update reg(%d)\n", ret);
+
+ return ret;
+}
+
static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
@@ -188,7 +298,7 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou
if (timeout < 1)
return -EINVAL;
- freq /= 128;
+ freq = DIV_ROUND_UP(freq, 128);
count = timeout * freq;
DBG("%s: count=%d, timeout=%d, freq=%lu\n",
@@ -200,21 +310,18 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou
*/
if (count >= 0x10000) {
- for (divisor = 1; divisor <= 0x100; divisor++) {
- if ((count / divisor) < 0x10000)
- break;
- }
+ divisor = DIV_ROUND_UP(count, 0xffff);
- if ((count / divisor) >= 0x10000) {
+ if (divisor > 0x100) {
dev_err(wdt->dev, "timeout %d too big\n", timeout);
return -EINVAL;
}
}
DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n",
- __func__, timeout, divisor, count, count/divisor);
+ __func__, timeout, divisor, count, DIV_ROUND_UP(count, divisor));
- count /= divisor;
+ count = DIV_ROUND_UP(count, divisor);
wdt->count = count;
/* update the pre-scaler */
@@ -264,7 +371,7 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
return IRQ_HANDLED;
}
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
static int s3c2410wdt_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
@@ -331,6 +438,37 @@ static inline void s3c2410wdt_cpufreq_deregister(struct s3c2410_wdt *wdt)
}
#endif
+static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt)
+{
+ unsigned int rst_stat;
+ int ret;
+
+ if (!(wdt->drv_data->quirks & QUIRK_HAS_RST_STAT))
+ return 0;
+
+ ret = regmap_read(wdt->pmureg, wdt->drv_data->rst_stat_reg, &rst_stat);
+ if (ret)
+ dev_warn(wdt->dev, "Couldn't get RST_STAT register\n");
+ else if (rst_stat & BIT(wdt->drv_data->rst_stat_bit))
+ return WDIOF_CARDRESET;
+
+ return 0;
+}
+
+/* s3c2410_get_wdt_driver_data */
+static inline struct s3c2410_wdt_variant *
+get_wdt_drv_data(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(s3c2410_wdt_match, pdev->dev.of_node);
+ return (struct s3c2410_wdt_variant *)match->data;
+ } else {
+ return (struct s3c2410_wdt_variant *)
+ platform_get_device_id(pdev)->driver_data;
+ }
+}
+
static int s3c2410wdt_probe(struct platform_device *pdev)
{
struct device *dev;
@@ -353,6 +491,16 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
spin_lock_init(&wdt->lock);
wdt->wdt_device = s3c2410_wdd;
+ wdt->drv_data = get_wdt_drv_data(pdev);
+ if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) {
+ wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(wdt->pmureg)) {
+ dev_err(dev, "syscon regmap lookup failed.\n");
+ return PTR_ERR(wdt->pmureg);
+ }
+ }
+
wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (wdt_irq == NULL) {
dev_err(dev, "no irq resource specified\n");
@@ -377,7 +525,11 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
goto err;
}
- clk_prepare_enable(wdt->clock);
+ ret = clk_prepare_enable(wdt->clock);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clock\n");
+ return ret;
+ }
ret = s3c2410wdt_cpufreq_register(wdt);
if (ret < 0) {
@@ -415,12 +567,18 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(&wdt->wdt_device, nowayout);
+ wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt);
+
ret = watchdog_register_device(&wdt->wdt_device);
if (ret) {
dev_err(dev, "cannot register watchdog (%d)\n", ret);
goto err_cpufreq;
}
+ ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+ if (ret < 0)
+ goto err_unregister;
+
if (tmr_atboot && started == 0) {
dev_info(dev, "starting watchdog timer\n");
s3c2410wdt_start(&wdt->wdt_device);
@@ -445,12 +603,14 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
return 0;
+ err_unregister:
+ watchdog_unregister_device(&wdt->wdt_device);
+
err_cpufreq:
s3c2410wdt_cpufreq_deregister(wdt);
err_clk:
clk_disable_unprepare(wdt->clock);
- wdt->clock = NULL;
err:
return ret;
@@ -458,14 +618,18 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
static int s3c2410wdt_remove(struct platform_device *dev)
{
+ int ret;
struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
+ ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+ if (ret < 0)
+ return ret;
+
watchdog_unregister_device(&wdt->wdt_device);
s3c2410wdt_cpufreq_deregister(wdt);
clk_disable_unprepare(wdt->clock);
- wdt->clock = NULL;
return 0;
}
@@ -474,6 +638,8 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
{
struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
+ s3c2410wdt_mask_and_disable_reset(wdt, true);
+
s3c2410wdt_stop(&wdt->wdt_device);
}
@@ -481,12 +647,17 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
static int s3c2410wdt_suspend(struct device *dev)
{
+ int ret;
struct s3c2410_wdt *wdt = dev_get_drvdata(dev);
/* Save watchdog state, and turn it off. */
wdt->wtcon_save = readl(wdt->reg_base + S3C2410_WTCON);
wdt->wtdat_save = readl(wdt->reg_base + S3C2410_WTDAT);
+ ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+ if (ret < 0)
+ return ret;
+
/* Note that WTCNT doesn't need to be saved. */
s3c2410wdt_stop(&wdt->wdt_device);
@@ -495,6 +666,7 @@ static int s3c2410wdt_suspend(struct device *dev)
static int s3c2410wdt_resume(struct device *dev)
{
+ int ret;
struct s3c2410_wdt *wdt = dev_get_drvdata(dev);
/* Restore watchdog state. */
@@ -502,6 +674,10 @@ static int s3c2410wdt_resume(struct device *dev)
writel(wdt->wtdat_save, wdt->reg_base + S3C2410_WTCNT);/* Reset count */
writel(wdt->wtcon_save, wdt->reg_base + S3C2410_WTCON);
+ ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+ if (ret < 0)
+ return ret;
+
dev_info(dev, "watchdog %sabled\n",
(wdt->wtcon_save & S3C2410_WTCON_ENABLE) ? "en" : "dis");
@@ -512,18 +688,11 @@ static int s3c2410wdt_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops, s3c2410wdt_suspend,
s3c2410wdt_resume);
-#ifdef CONFIG_OF
-static const struct of_device_id s3c2410_wdt_match[] = {
- { .compatible = "samsung,s3c2410-wdt" },
- {},
-};
-MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
-#endif
-
static struct platform_driver s3c2410wdt_driver = {
.probe = s3c2410wdt_probe,
.remove = s3c2410wdt_remove,
.shutdown = s3c2410wdt_shutdown,
+ .id_table = s3c2410_wdt_ids,
.driver = {
.owner = THIS_MODULE,
.name = "s3c2410-wdt",
@@ -538,4 +707,3 @@ MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, "
"Dimitry Andric <dimitry.andric@tomtom.com>");
MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c2410-wdt");
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index f353e18b1a82..1cfd3f6a13d5 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -158,12 +158,11 @@ static void wdt_timer_ping(unsigned long data)
static void wdt_config(int writeval)
{
- __u16 dummy;
unsigned long flags;
/* buy some time (ping) */
spin_lock_irqsave(&wdt_spinlock, flags);
- dummy = readw(wdtmrctl); /* ensure write synchronization */
+ readw(wdtmrctl); /* ensure write synchronization */
writew(0xAAAA, wdtmrctl);
writew(0x5555, wdtmrctl);
/* unlock WDT = make WDT configuration register writable one time */
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index af3528f84d65..d04d02b41c32 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -293,8 +293,6 @@ static int sh_wdt_probe(struct platform_device *pdev)
static int sh_wdt_remove(struct platform_device *pdev)
{
- struct sh_wdt *wdt = platform_get_drvdata(pdev);
-
watchdog_unregister_device(&sh_wdt_dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index ced3edc95957..702d07870808 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -212,7 +212,7 @@ static struct platform_driver sirfsoc_wdt_driver = {
.name = "sirfsoc-wdt",
.owner = THIS_MODULE,
.pm = &sirfsoc_wdt_pm_ops,
- .of_match_table = of_match_ptr(sirfsoc_wdt_of_match),
+ .of_match_table = sirfsoc_wdt_of_match,
},
.probe = sirfsoc_wdt_probe,
.remove = sirfsoc_wdt_remove,
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index c04a1aa158e2..0dc5e323d59d 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -62,7 +62,7 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static int soft_noboot = 0;
+static int soft_noboot;
module_param(soft_noboot, int, 0);
MODULE_PARM_DESC(soft_noboot,
"Softdog action, set to 1 to ignore reboots, 0 to reboot (default=0)");
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index ce63a1bbf395..5cca9cddb87d 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -303,7 +303,7 @@ static struct miscdevice sp5100_tco_miscdev = {
* register a pci_driver, because someone else might
* want to register another driver on the same PCI id.
*/
-static DEFINE_PCI_DEVICE_TABLE(sp5100_tco_pci_tbl) = {
+static const struct pci_device_id sp5100_tco_pci_tbl[] = {
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
PCI_ANY_ID, },
{ 0, }, /* End of list */
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 3f786ce0a6f2..47629d268e0a 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -16,7 +16,6 @@
#include <linux/amba/bus.h>
#include <linux/bitops.h>
#include <linux/clk.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
@@ -209,27 +208,15 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
struct sp805_wdt *wdt;
int ret = 0;
- if (!devm_request_mem_region(&adev->dev, adev->res.start,
- resource_size(&adev->res), "sp805_wdt")) {
- dev_warn(&adev->dev, "Failed to get memory region resource\n");
- ret = -ENOENT;
- goto err;
- }
-
wdt = devm_kzalloc(&adev->dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt) {
- dev_warn(&adev->dev, "Kzalloc failed\n");
ret = -ENOMEM;
goto err;
}
- wdt->base = devm_ioremap(&adev->dev, adev->res.start,
- resource_size(&adev->res));
- if (!wdt->base) {
- ret = -ENOMEM;
- dev_warn(&adev->dev, "ioremap fail\n");
- goto err;
- }
+ wdt->base = devm_ioremap_resource(&adev->dev, &adev->res);
+ if (IS_ERR(wdt->base))
+ return PTR_ERR(wdt->base);
wdt->clk = devm_clk_get(&adev->dev, NULL);
if (IS_ERR(wdt->clk)) {
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index bb64ae3f47da..3804d5e9baea 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -9,7 +9,6 @@
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 76332d893e12..cd00a7836cdc 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -205,7 +205,7 @@ static void sunxi_wdt_shutdown(struct platform_device *pdev)
}
static const struct of_device_id sunxi_wdt_dt_ids[] = {
- { .compatible = "allwinner,sun4i-wdt" },
+ { .compatible = "allwinner,sun4i-a10-wdt" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids);
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
new file mode 100644
index 000000000000..750e2a26cb12
--- /dev/null
+++ b/drivers/watchdog/tegra_wdt.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+/* minimum and maximum watchdog trigger timeout, in seconds */
+#define MIN_WDT_TIMEOUT 1
+#define MAX_WDT_TIMEOUT 255
+
+/*
+ * Base of the WDT registers, from the timer base address. There are
+ * actually 5 watchdogs that can be configured (by pairing with an available
+ * timer), at bases 0x100 + (WDT ID) * 0x20, where WDT ID is 0 through 4.
+ * This driver only configures the first watchdog (WDT ID 0).
+ */
+#define WDT_BASE 0x100
+#define WDT_ID 0
+
+/*
+ * Register base of the timer that's selected for pairing with the watchdog.
+ * This driver arbitrarily uses timer 5, which is currently unused by
+ * other drivers (in particular, the Tegra clocksource driver). If this
+ * needs to change, take care that the new timer is not used by the
+ * clocksource driver.
+ */
+#define WDT_TIMER_BASE 0x60
+#define WDT_TIMER_ID 5
+
+/* WDT registers */
+#define WDT_CFG 0x0
+#define WDT_CFG_PERIOD_SHIFT 4
+#define WDT_CFG_PERIOD_MASK 0xff
+#define WDT_CFG_INT_EN (1 << 12)
+#define WDT_CFG_PMC2CAR_RST_EN (1 << 15)
+#define WDT_STS 0x4
+#define WDT_STS_COUNT_SHIFT 4
+#define WDT_STS_COUNT_MASK 0xff
+#define WDT_STS_EXP_SHIFT 12
+#define WDT_STS_EXP_MASK 0x3
+#define WDT_CMD 0x8
+#define WDT_CMD_START_COUNTER (1 << 0)
+#define WDT_CMD_DISABLE_COUNTER (1 << 1)
+#define WDT_UNLOCK (0xc)
+#define WDT_UNLOCK_PATTERN (0xc45a << 0)
+
+/* Timer registers */
+#define TIMER_PTV 0x0
+#define TIMER_EN (1 << 31)
+#define TIMER_PERIODIC (1 << 30)
+
+struct tegra_wdt {
+ struct watchdog_device wdd;
+ void __iomem *wdt_regs;
+ void __iomem *tmr_regs;
+};
+
+#define WDT_HEARTBEAT 120
+static int heartbeat = WDT_HEARTBEAT;
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeats in seconds. (default = "
+ __MODULE_STRING(WDT_HEARTBEAT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int tegra_wdt_start(struct watchdog_device *wdd)
+{
+ struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
+ u32 val;
+
+ /*
+ * This thing has a fixed 1MHz clock. Normally, we would set the
+ * period to 1 second by writing 1000000ul, but the watchdog system
+ * reset actually occurs on the 4th expiration of this counter,
+ * so we set the period to 1/4 of this amount.
+ */
+ val = 1000000ul / 4;
+ val |= (TIMER_EN | TIMER_PERIODIC);
+ writel(val, wdt->tmr_regs + TIMER_PTV);
+
+ /*
+ * Set number of periods and start counter.
+ *
+ * Interrupt handler is not required for user space
+ * WDT accesses, since the caller is responsible to ping the
+ * WDT to reset the counter before expiration, through ioctls.
+ */
+ val = WDT_TIMER_ID |
+ (wdd->timeout << WDT_CFG_PERIOD_SHIFT) |
+ WDT_CFG_PMC2CAR_RST_EN;
+ writel(val, wdt->wdt_regs + WDT_CFG);
+
+ writel(WDT_CMD_START_COUNTER, wdt->wdt_regs + WDT_CMD);
+
+ return 0;
+}
+
+static int tegra_wdt_stop(struct watchdog_device *wdd)
+{
+ struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ writel(WDT_UNLOCK_PATTERN, wdt->wdt_regs + WDT_UNLOCK);
+ writel(WDT_CMD_DISABLE_COUNTER, wdt->wdt_regs + WDT_CMD);
+ writel(0, wdt->tmr_regs + TIMER_PTV);
+
+ return 0;
+}
+
+static int tegra_wdt_ping(struct watchdog_device *wdd)
+{
+ struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ writel(WDT_CMD_START_COUNTER, wdt->wdt_regs + WDT_CMD);
+
+ return 0;
+}
+
+static int tegra_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ wdd->timeout = timeout;
+
+ if (watchdog_active(wdd))
+ return tegra_wdt_start(wdd);
+
+ return 0;
+}
+
+static unsigned int tegra_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
+ u32 val;
+ int count;
+ int exp;
+
+ val = readl(wdt->wdt_regs + WDT_STS);
+
+ /* Current countdown (from timeout) */
+ count = (val >> WDT_STS_COUNT_SHIFT) & WDT_STS_COUNT_MASK;
+
+ /* Number of expirations (we are waiting for the 4th expiration) */
+ exp = (val >> WDT_STS_EXP_SHIFT) & WDT_STS_EXP_MASK;
+
+ /*
+ * The entire thing is divided by 4 because we are ticking down 4 times
+ * faster due to needing to wait for the 4th expiration.
+ */
+ return (((3 - exp) * wdd->timeout) + count) / 4;
+}
+
+static const struct watchdog_info tegra_wdt_info = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+ .firmware_version = 0,
+ .identity = "Tegra Watchdog",
+};
+
+static struct watchdog_ops tegra_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = tegra_wdt_start,
+ .stop = tegra_wdt_stop,
+ .ping = tegra_wdt_ping,
+ .set_timeout = tegra_wdt_set_timeout,
+ .get_timeleft = tegra_wdt_get_timeleft,
+};
+
+static int tegra_wdt_probe(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd;
+ struct tegra_wdt *wdt;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ /* This is the timer base. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ /*
+ * Allocate our watchdog driver data, which has the
+ * struct watchdog_device nested within it.
+ */
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ /* Initialize struct tegra_wdt. */
+ wdt->wdt_regs = regs + WDT_BASE;
+ wdt->tmr_regs = regs + WDT_TIMER_BASE;
+
+ /* Initialize struct watchdog_device. */
+ wdd = &wdt->wdd;
+ wdd->timeout = heartbeat;
+ wdd->info = &tegra_wdt_info;
+ wdd->ops = &tegra_wdt_ops;
+ wdd->min_timeout = MIN_WDT_TIMEOUT;
+ wdd->max_timeout = MAX_WDT_TIMEOUT;
+
+ watchdog_set_drvdata(wdd, wdt);
+
+ watchdog_set_nowayout(wdd, nowayout);
+
+ ret = watchdog_register_device(wdd);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register watchdog device\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, wdt);
+
+ dev_info(&pdev->dev,
+ "initialized (heartbeat = %d sec, nowayout = %d)\n",
+ heartbeat, nowayout);
+
+ return 0;
+}
+
+static int tegra_wdt_remove(struct platform_device *pdev)
+{
+ struct tegra_wdt *wdt = platform_get_drvdata(pdev);
+
+ tegra_wdt_stop(&wdt->wdd);
+
+ watchdog_unregister_device(&wdt->wdd);
+
+ dev_info(&pdev->dev, "removed wdt\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_wdt_runtime_suspend(struct device *dev)
+{
+ struct tegra_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ tegra_wdt_stop(&wdt->wdd);
+
+ return 0;
+}
+
+static int tegra_wdt_runtime_resume(struct device *dev)
+{
+ struct tegra_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ tegra_wdt_start(&wdt->wdd);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id tegra_wdt_of_match[] = {
+ { .compatible = "nvidia,tegra30-timer", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_wdt_of_match);
+
+static const struct dev_pm_ops tegra_wdt_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_wdt_runtime_suspend,
+ tegra_wdt_runtime_resume)
+};
+
+static struct platform_driver tegra_wdt_driver = {
+ .probe = tegra_wdt_probe,
+ .remove = tegra_wdt_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tegra-wdt",
+ .pm = &tegra_wdt_pm_ops,
+ .of_match_table = tegra_wdt_of_match,
+ },
+};
+module_platform_driver(tegra_wdt_driver);
+
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_DESCRIPTION("Tegra Watchdog Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 09d4831aa61f..afa9d6ef353a 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -61,7 +61,7 @@ struct ts72xx_wdt {
struct platform_device *pdev;
};
-struct platform_device *ts72xx_wdt_pdev;
+static struct platform_device *ts72xx_wdt_pdev;
/*
* TS-72xx Watchdog supports following timeouts (value written
@@ -394,10 +394,8 @@ static int ts72xx_wdt_probe(struct platform_device *pdev)
int error = 0;
wdt = devm_kzalloc(&pdev->dev, sizeof(struct ts72xx_wdt), GFP_KERNEL);
- if (!wdt) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!wdt)
return -ENOMEM;
- }
r1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
wdt->control_reg = devm_ioremap_resource(&pdev->dev, r1);
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 1a68f760cf86..d2cd9f0bcb9a 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -239,7 +239,7 @@ static void wdt_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = {
+static const struct pci_device_id wdt_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index e24b21082874..b1da0c18fd1a 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -44,10 +44,13 @@
#define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT"
#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */
-/* You must set this - there is no sane way to probe for this board. */
-static int wdt_io = 0x2E;
-module_param(wdt_io, int, 0);
-MODULE_PARM_DESC(wdt_io, "w83627hf/thf WDT io port (default 0x2E)");
+static int wdt_io;
+static int cr_wdt_timeout; /* WDT timeout register */
+static int cr_wdt_control; /* WDT control register */
+
+enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
+ w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
+ w83667hg_b, nct6775, nct6776, nct6779 };
static int timeout; /* in seconds */
module_param(timeout, int, 0);
@@ -72,6 +75,29 @@ MODULE_PARM_DESC(nowayout,
#define W83627HF_LD_WDT 0x08
+#define W83627HF_ID 0x52
+#define W83627S_ID 0x59
+#define W83697HF_ID 0x60
+#define W83697UG_ID 0x68
+#define W83637HF_ID 0x70
+#define W83627THF_ID 0x82
+#define W83687THF_ID 0x85
+#define W83627EHF_ID 0x88
+#define W83627DHG_ID 0xa0
+#define W83627UHG_ID 0xa2
+#define W83667HG_ID 0xa5
+#define W83627DHG_P_ID 0xb0
+#define W83667HG_B_ID 0xb3
+#define NCT6775_ID 0xb4
+#define NCT6776_ID 0xc3
+#define NCT6779_ID 0xc5
+
+#define W83627HF_WDT_TIMEOUT 0xf6
+#define W83697HF_WDT_TIMEOUT 0xf4
+
+#define W83627HF_WDT_CONTROL 0xf5
+#define W83697HF_WDT_CONTROL 0xf3
+
static void superio_outb(int reg, int val)
{
outb(reg, WDT_EFER);
@@ -106,10 +132,7 @@ static void superio_exit(void)
release_region(wdt_io, 2);
}
-/* tyan motherboards seem to set F5 to 0x4C ?
- * So explicitly init to appropriate value. */
-
-static int w83627hf_init(struct watchdog_device *wdog)
+static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
{
int ret;
unsigned char t;
@@ -119,35 +142,83 @@ static int w83627hf_init(struct watchdog_device *wdog)
return ret;
superio_select(W83627HF_LD_WDT);
- t = superio_inb(0x20); /* check chip version */
- if (t == 0x82) { /* W83627THF */
- t = (superio_inb(0x2b) & 0xf7);
- superio_outb(0x2b, t | 0x04); /* set GPIO3 to WDT0 */
- } else if (t == 0x88 || t == 0xa0) { /* W83627EHF / W83627DHG */
- t = superio_inb(0x2d);
- superio_outb(0x2d, t & ~0x01); /* set GPIO5 to WDT0 */
- }
/* set CR30 bit 0 to activate GPIO2 */
t = superio_inb(0x30);
if (!(t & 0x01))
superio_outb(0x30, t | 0x01);
- t = superio_inb(0xF6);
+ switch (chip) {
+ case w83627hf:
+ case w83627s:
+ t = superio_inb(0x2B) & ~0x10;
+ superio_outb(0x2B, t); /* set GPIO24 to WDT0 */
+ break;
+ case w83697hf:
+ /* Set pin 119 to WDTO# mode (= CR29, WDT0) */
+ t = superio_inb(0x29) & ~0x60;
+ t |= 0x20;
+ superio_outb(0x29, t);
+ break;
+ case w83697ug:
+ /* Set pin 118 to WDTO# mode */
+ t = superio_inb(0x2b) & ~0x04;
+ superio_outb(0x2b, t);
+ break;
+ case w83627thf:
+ t = (superio_inb(0x2B) & ~0x08) | 0x04;
+ superio_outb(0x2B, t); /* set GPIO3 to WDT0 */
+ break;
+ case w83627dhg:
+ case w83627dhg_p:
+ t = superio_inb(0x2D) & ~0x01; /* PIN77 -> WDT0# */
+ superio_outb(0x2D, t); /* set GPIO5 to WDT0 */
+ t = superio_inb(cr_wdt_control);
+ t |= 0x02; /* enable the WDTO# output low pulse
+ * to the KBRST# pin */
+ superio_outb(cr_wdt_control, t);
+ break;
+ case w83637hf:
+ break;
+ case w83687thf:
+ t = superio_inb(0x2C) & ~0x80; /* PIN47 -> WDT0# */
+ superio_outb(0x2C, t);
+ break;
+ case w83627ehf:
+ case w83627uhg:
+ case w83667hg:
+ case w83667hg_b:
+ case nct6775:
+ case nct6776:
+ case nct6779:
+ /*
+ * These chips have a fixed WDTO# output pin (W83627UHG),
+ * or support more than one WDTO# output pin.
+ * Don't touch its configuration, and hope the BIOS
+ * does the right thing.
+ */
+ t = superio_inb(cr_wdt_control);
+ t |= 0x02; /* enable the WDTO# output low pulse
+ * to the KBRST# pin */
+ superio_outb(cr_wdt_control, t);
+ break;
+ default:
+ break;
+ }
+
+ t = superio_inb(cr_wdt_timeout);
if (t != 0) {
pr_info("Watchdog already running. Resetting timeout to %d sec\n",
wdog->timeout);
- superio_outb(0xF6, wdog->timeout);
+ superio_outb(cr_wdt_timeout, wdog->timeout);
}
/* set second mode & disable keyboard turning off watchdog */
- t = superio_inb(0xF5) & ~0x0C;
- /* enable the WDTO# output low pulse to the KBRST# pin */
- t |= 0x02;
- superio_outb(0xF5, t);
+ t = superio_inb(cr_wdt_control) & ~0x0C;
+ superio_outb(cr_wdt_control, t);
- /* disable keyboard & mouse turning off watchdog */
- t = superio_inb(0xF7) & ~0xC0;
+ /* reset trigger, disable keyboard & mouse turning off watchdog */
+ t = superio_inb(0xF7) & ~0xD0;
superio_outb(0xF7, t);
superio_exit();
@@ -164,7 +235,7 @@ static int wdt_set_time(unsigned int timeout)
return ret;
superio_select(W83627HF_LD_WDT);
- superio_outb(0xF6, timeout);
+ superio_outb(cr_wdt_timeout, timeout);
superio_exit();
return 0;
@@ -197,7 +268,7 @@ static unsigned int wdt_get_time(struct watchdog_device *wdog)
return 0;
superio_select(W83627HF_LD_WDT);
- timeleft = superio_inb(0xF6);
+ timeleft = superio_inb(cr_wdt_timeout);
superio_exit();
return timeleft;
@@ -249,16 +320,123 @@ static struct notifier_block wdt_notifier = {
.notifier_call = wdt_notify_sys,
};
+static int wdt_find(int addr)
+{
+ u8 val;
+ int ret;
+
+ cr_wdt_timeout = W83627HF_WDT_TIMEOUT;
+ cr_wdt_control = W83627HF_WDT_CONTROL;
+
+ ret = superio_enter();
+ if (ret)
+ return ret;
+ superio_select(W83627HF_LD_WDT);
+ val = superio_inb(0x20);
+ switch (val) {
+ case W83627HF_ID:
+ ret = w83627hf;
+ break;
+ case W83627S_ID:
+ ret = w83627s;
+ break;
+ case W83697HF_ID:
+ ret = w83697hf;
+ cr_wdt_timeout = W83697HF_WDT_TIMEOUT;
+ cr_wdt_control = W83697HF_WDT_CONTROL;
+ break;
+ case W83697UG_ID:
+ ret = w83697ug;
+ cr_wdt_timeout = W83697HF_WDT_TIMEOUT;
+ cr_wdt_control = W83697HF_WDT_CONTROL;
+ break;
+ case W83637HF_ID:
+ ret = w83637hf;
+ break;
+ case W83627THF_ID:
+ ret = w83627thf;
+ break;
+ case W83687THF_ID:
+ ret = w83687thf;
+ break;
+ case W83627EHF_ID:
+ ret = w83627ehf;
+ break;
+ case W83627DHG_ID:
+ ret = w83627dhg;
+ break;
+ case W83627DHG_P_ID:
+ ret = w83627dhg_p;
+ break;
+ case W83627UHG_ID:
+ ret = w83627uhg;
+ break;
+ case W83667HG_ID:
+ ret = w83667hg;
+ break;
+ case W83667HG_B_ID:
+ ret = w83667hg_b;
+ break;
+ case NCT6775_ID:
+ ret = nct6775;
+ break;
+ case NCT6776_ID:
+ ret = nct6776;
+ break;
+ case NCT6779_ID:
+ ret = nct6779;
+ break;
+ case 0xff:
+ ret = -ENODEV;
+ break;
+ default:
+ ret = -ENODEV;
+ pr_err("Unsupported chip ID: 0x%02x\n", val);
+ break;
+ }
+ superio_exit();
+ return ret;
+}
+
static int __init wdt_init(void)
{
int ret;
+ int chip;
+ const char * const chip_name[] = {
+ "W83627HF",
+ "W83627S",
+ "W83697HF",
+ "W83697UG",
+ "W83637HF",
+ "W83627THF",
+ "W83687THF",
+ "W83627EHF",
+ "W83627DHG",
+ "W83627UHG",
+ "W83667HG",
+ "W83667DHG-P",
+ "W83667HG-B",
+ "NCT6775",
+ "NCT6776",
+ "NCT6779",
+ };
+
+ wdt_io = 0x2e;
+ chip = wdt_find(0x2e);
+ if (chip < 0) {
+ wdt_io = 0x4e;
+ chip = wdt_find(0x4e);
+ if (chip < 0)
+ return chip;
+ }
- pr_info("WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising\n");
+ pr_info("WDT driver for %s Super I/O chip initialising\n",
+ chip_name[chip]);
watchdog_init_timeout(&wdt_dev, timeout, NULL);
watchdog_set_nowayout(&wdt_dev, nowayout);
- ret = w83627hf_init(&wdt_dev);
+ ret = w83627hf_init(&wdt_dev, chip);
if (ret) {
pr_err("failed to initialize watchdog (err=%d)\n", ret);
return ret;
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index aaf2995d37f4..e9ea856b8ff2 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -402,7 +402,7 @@ static int __init wdt_init(void)
if (!found) {
pr_err("No W83697HF/HG could be found\n");
- ret = -EIO;
+ ret = -ENODEV;
goto out;
}
@@ -455,6 +455,6 @@ module_init(wdt_init);
module_exit(wdt_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marcus Junker <junker@anduras.de>, "
- "Samuel Tardieu <sam@rfc1149.net>");
+MODULE_AUTHOR("Marcus Junker <junker@anduras.de>");
+MODULE_AUTHOR("Samuel Tardieu <sam@rfc1149.net>");
MODULE_DESCRIPTION("w83697hf/hg WDT driver");
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 461336c4519f..cec9b559647d 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -78,7 +78,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
watchdog_check_min_max_timeout(wdd);
/* try to get the timeout module parameter first */
- if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
+ if (!watchdog_timeout_invalid(wdd, timeout_parm) && timeout_parm) {
wdd->timeout = timeout_parm;
return ret;
}
@@ -89,7 +89,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
if (dev == NULL || dev->of_node == NULL)
return ret;
of_property_read_u32(dev->of_node, "timeout-sec", &t);
- if (!watchdog_timeout_invalid(wdd, t))
+ if (!watchdog_timeout_invalid(wdd, t) && t)
wdd->timeout = t;
else
ret = -EINVAL;
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 7355ddd0b207..ebbb183be618 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -139,9 +139,8 @@ static const struct watchdog_info ident = {
static long watchdog_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- unsigned int new_margin;
int __user *int_arg = (int __user *)arg;
- int ret = -ENOTTY;
+ int new_margin, ret = -ENOTTY;
switch (cmd) {
case WDIOC_GETSUPPORT:
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index ee89ba4dea63..48b2c058b009 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -49,7 +49,6 @@
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
-#include <linux/init.h>
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/io.h>
@@ -720,7 +719,7 @@ static void wdtpci_remove_one(struct pci_dev *pdev)
}
-static DEFINE_PCI_DEVICE_TABLE(wdtpci_pci_tbl) = {
+static const struct pci_device_id wdtpci_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_ACCESSIO,
.device = PCI_DEVICE_ID_ACCESSIO_WDG_CSM,
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index e243bd01c774..2fa17e746ff6 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -204,7 +204,6 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
GFP_KERNEL);
if (!driver_data) {
- dev_err(wm831x->dev, "Unable to alloacate watchdog device\n");
ret = -ENOMEM;
goto err;
}
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index c794ea182140..38fb36e1c592 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -3,7 +3,6 @@ menu "Xen driver support"
config XEN_BALLOON
bool "Xen memory balloon driver"
- depends on !ARM
default y
help
The balloon driver allows the Xen domain to request more memory from
@@ -222,7 +221,7 @@ config XEN_ACPI_PROCESSOR
To do that the driver parses the Power Management data and uploads
said information to the Xen hypervisor. Then the Xen hypervisor can
- select the proper Cx and Pxx states. It also registers itslef as the
+ select the proper Cx and Pxx states. It also registers itself as the
SMM so that other drivers (such as ACPI cpufreq scaling driver) will
not load.
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 14fe79d8634a..45e00afa7f2d 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -2,7 +2,8 @@ ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),)
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
endif
obj-$(CONFIG_X86) += fallback.o
-obj-y += grant-table.o features.o events.o balloon.o manage.o
+obj-y += grant-table.o features.o balloon.o manage.o
+obj-y += events/
obj-y += xenbus/
nostackp := $(call cc-option, -fno-stack-protector)
@@ -15,7 +16,6 @@ xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
obj-$(CONFIG_BLOCK) += biomerge.o
-obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 4c02e2b94103..61a6ac8fa8fc 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -157,13 +157,6 @@ static struct page *balloon_retrieve(bool prefer_highmem)
return page;
}
-static struct page *balloon_first_page(void)
-{
- if (list_empty(&ballooned_pages))
- return NULL;
- return list_entry(ballooned_pages.next, struct page, lru);
-}
-
static struct page *balloon_next_page(struct page *page)
{
struct list_head *next = page->lru.next;
@@ -328,7 +321,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
- page = balloon_first_page();
+ page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
for (i = 0; i < nr_pages; i++) {
if (!page) {
nr_pages = i;
@@ -406,11 +399,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
state = BP_EAGAIN;
break;
}
+ scrub_page(page);
- pfn = page_to_pfn(page);
- frame_list[i] = pfn_to_mfn(pfn);
+ frame_list[i] = page_to_pfn(page);
+ }
- scrub_page(page);
+ /*
+ * Ensure that ballooned highmem pages don't have kmaps.
+ *
+ * Do this before changing the p2m as kmap_flush_unused()
+ * reads PTEs to obtain pages (and hence needs the original
+ * p2m entry).
+ */
+ kmap_flush_unused();
+
+ /* Update direct mapping, invalidate P2M, and add to balloon. */
+ for (i = 0; i < nr_pages; i++) {
+ pfn = frame_list[i];
+ frame_list[i] = pfn_to_mfn(pfn);
+ page = pfn_to_page(pfn);
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
@@ -436,11 +443,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}
#endif
- balloon_append(pfn_to_page(pfn));
+ balloon_append(page);
}
- /* Ensure that ballooned highmem pages don't have kmaps. */
- kmap_flush_unused();
flush_tlb_all();
set_xen_guest_handle(reservation.extent_start, frame_list);
diff --git a/drivers/xen/dbgp.c b/drivers/xen/dbgp.c
index f3ccc80a455f..8145a59fd9f6 100644
--- a/drivers/xen/dbgp.c
+++ b/drivers/xen/dbgp.c
@@ -19,7 +19,7 @@ static int xen_dbgp_op(struct usb_hcd *hcd, int op)
dbgp.op = op;
#ifdef CONFIG_PCI
- if (ctrlr->bus == &pci_bus_type) {
+ if (dev_is_pci(ctrlr)) {
const struct pci_dev *pdev = to_pci_dev(ctrlr);
dbgp.u.pci.seg = pci_domain_nr(pdev->bus);
diff --git a/drivers/xen/events/Makefile b/drivers/xen/events/Makefile
new file mode 100644
index 000000000000..62be55cd981d
--- /dev/null
+++ b/drivers/xen/events/Makefile
@@ -0,0 +1,5 @@
+obj-y += events.o
+
+events-y += events_base.o
+events-y += events_2l.o
+events-y += events_fifo.o
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
new file mode 100644
index 000000000000..5db43fc100a4
--- /dev/null
+++ b/drivers/xen/events/events_2l.c
@@ -0,0 +1,365 @@
+/*
+ * Xen event channels (2-level ABI)
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+
+#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
+#include <linux/linkage.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include <asm/sync_bitops.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
+#include <xen/xen-ops.h>
+#include <xen/events.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+
+#include "events_internal.h"
+
+/*
+ * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
+ * careful to only use bitops which allow for this (e.g
+ * test_bit/find_first_bit and friends but not __ffs) and to pass
+ * BITS_PER_EVTCHN_WORD as the bitmask length.
+ */
+#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
+/*
+ * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
+ * array. Primarily to avoid long lines (hence the terse name).
+ */
+#define BM(x) (unsigned long *)(x)
+/* Find the first set bit in a evtchn mask */
+#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
+
+static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD],
+ cpu_evtchn_mask);
+
+static unsigned evtchn_2l_max_channels(void)
+{
+ return EVTCHN_2L_NR_CHANNELS;
+}
+
+static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
+{
+ clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
+ set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
+}
+
+static void evtchn_2l_clear_pending(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ sync_clear_bit(port, BM(&s->evtchn_pending[0]));
+}
+
+static void evtchn_2l_set_pending(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ sync_set_bit(port, BM(&s->evtchn_pending[0]));
+}
+
+static bool evtchn_2l_is_pending(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ return sync_test_bit(port, BM(&s->evtchn_pending[0]));
+}
+
+static bool evtchn_2l_test_and_set_mask(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
+}
+
+static void evtchn_2l_mask(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ sync_set_bit(port, BM(&s->evtchn_mask[0]));
+}
+
+static void evtchn_2l_unmask(unsigned port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ unsigned int cpu = get_cpu();
+ int do_hypercall = 0, evtchn_pending = 0;
+
+ BUG_ON(!irqs_disabled());
+
+ if (unlikely((cpu != cpu_from_evtchn(port))))
+ do_hypercall = 1;
+ else {
+ /*
+ * Need to clear the mask before checking pending to
+ * avoid a race with an event becoming pending.
+ *
+ * EVTCHNOP_unmask will only trigger an upcall if the
+ * mask bit was set, so if a hypercall is needed
+ * remask the event.
+ */
+ sync_clear_bit(port, BM(&s->evtchn_mask[0]));
+ evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
+
+ if (unlikely(evtchn_pending && xen_hvm_domain())) {
+ sync_set_bit(port, BM(&s->evtchn_mask[0]));
+ do_hypercall = 1;
+ }
+ }
+
+ /* Slow path (hypercall) if this is a non-local port or if this is
+ * an hvm domain and an event is pending (hvm domains don't have
+ * their own implementation of irq_enable). */
+ if (do_hypercall) {
+ struct evtchn_unmask unmask = { .port = port };
+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
+ } else {
+ struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+
+ /*
+ * The following is basically the equivalent of
+ * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
+ * the interrupt edge' if the channel is masked.
+ */
+ if (evtchn_pending &&
+ !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
+ BM(&vcpu_info->evtchn_pending_sel)))
+ vcpu_info->evtchn_upcall_pending = 1;
+ }
+
+ put_cpu();
+}
+
+static DEFINE_PER_CPU(unsigned int, current_word_idx);
+static DEFINE_PER_CPU(unsigned int, current_bit_idx);
+
+/*
+ * Mask out the i least significant bits of w
+ */
+#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
+
+static inline xen_ulong_t active_evtchns(unsigned int cpu,
+ struct shared_info *sh,
+ unsigned int idx)
+{
+ return sh->evtchn_pending[idx] &
+ per_cpu(cpu_evtchn_mask, cpu)[idx] &
+ ~sh->evtchn_mask[idx];
+}
+
+/*
+ * Search the CPU's pending events bitmasks. For each one found, map
+ * the event number to an irq, and feed it into do_IRQ() for handling.
+ *
+ * Xen uses a two-level bitmap to speed searching. The first level is
+ * a bitset of words which contain pending event bits. The second
+ * level is a bitset of pending events themselves.
+ */
+static void evtchn_2l_handle_events(unsigned cpu)
+{
+ int irq;
+ xen_ulong_t pending_words;
+ xen_ulong_t pending_bits;
+ int start_word_idx, start_bit_idx;
+ int word_idx, bit_idx;
+ int i;
+ struct shared_info *s = HYPERVISOR_shared_info;
+ struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+
+ /* Timer interrupt has highest priority. */
+ irq = irq_from_virq(cpu, VIRQ_TIMER);
+ if (irq != -1) {
+ unsigned int evtchn = evtchn_from_irq(irq);
+ word_idx = evtchn / BITS_PER_LONG;
+ bit_idx = evtchn % BITS_PER_LONG;
+ if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
+ generic_handle_irq(irq);
+ }
+
+ /*
+ * Master flag must be cleared /before/ clearing
+ * selector flag. xchg_xen_ulong must contain an
+ * appropriate barrier.
+ */
+ pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
+
+ start_word_idx = __this_cpu_read(current_word_idx);
+ start_bit_idx = __this_cpu_read(current_bit_idx);
+
+ word_idx = start_word_idx;
+
+ for (i = 0; pending_words != 0; i++) {
+ xen_ulong_t words;
+
+ words = MASK_LSBS(pending_words, word_idx);
+
+ /*
+ * If we masked out all events, wrap to beginning.
+ */
+ if (words == 0) {
+ word_idx = 0;
+ bit_idx = 0;
+ continue;
+ }
+ word_idx = EVTCHN_FIRST_BIT(words);
+
+ pending_bits = active_evtchns(cpu, s, word_idx);
+ bit_idx = 0; /* usually scan entire word from start */
+ /*
+ * We scan the starting word in two parts.
+ *
+ * 1st time: start in the middle, scanning the
+ * upper bits.
+ *
+ * 2nd time: scan the whole word (not just the
+ * parts skipped in the first pass) -- if an
+ * event in the previously scanned bits is
+ * pending again it would just be scanned on
+ * the next loop anyway.
+ */
+ if (word_idx == start_word_idx) {
+ if (i == 0)
+ bit_idx = start_bit_idx;
+ }
+
+ do {
+ xen_ulong_t bits;
+ int port;
+
+ bits = MASK_LSBS(pending_bits, bit_idx);
+
+ /* If we masked out all events, move on. */
+ if (bits == 0)
+ break;
+
+ bit_idx = EVTCHN_FIRST_BIT(bits);
+
+ /* Process port. */
+ port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
+ irq = get_evtchn_to_irq(port);
+
+ if (irq != -1)
+ generic_handle_irq(irq);
+
+ bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
+
+ /* Next caller starts at last processed + 1 */
+ __this_cpu_write(current_word_idx,
+ bit_idx ? word_idx :
+ (word_idx+1) % BITS_PER_EVTCHN_WORD);
+ __this_cpu_write(current_bit_idx, bit_idx);
+ } while (bit_idx != 0);
+
+ /* Scan start_l1i twice; all others once. */
+ if ((word_idx != start_word_idx) || (i != 0))
+ pending_words &= ~(1UL << word_idx);
+
+ word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
+ }
+}
+
+irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
+{
+ struct shared_info *sh = HYPERVISOR_shared_info;
+ int cpu = smp_processor_id();
+ xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
+ int i;
+ unsigned long flags;
+ static DEFINE_SPINLOCK(debug_lock);
+ struct vcpu_info *v;
+
+ spin_lock_irqsave(&debug_lock, flags);
+
+ printk("\nvcpu %d\n ", cpu);
+
+ for_each_online_cpu(i) {
+ int pending;
+ v = per_cpu(xen_vcpu, i);
+ pending = (get_irq_regs() && i == cpu)
+ ? xen_irqs_disabled(get_irq_regs())
+ : v->evtchn_upcall_mask;
+ printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
+ pending, v->evtchn_upcall_pending,
+ (int)(sizeof(v->evtchn_pending_sel)*2),
+ v->evtchn_pending_sel);
+ }
+ v = per_cpu(xen_vcpu, cpu);
+
+ printk("\npending:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)sizeof(sh->evtchn_pending[0])*2,
+ sh->evtchn_pending[i],
+ i % 8 == 0 ? "\n " : " ");
+ printk("\nglobal mask:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)(sizeof(sh->evtchn_mask[0])*2),
+ sh->evtchn_mask[i],
+ i % 8 == 0 ? "\n " : " ");
+
+ printk("\nglobally unmasked:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)(sizeof(sh->evtchn_mask[0])*2),
+ sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
+ i % 8 == 0 ? "\n " : " ");
+
+ printk("\nlocal cpu%d mask:\n ", cpu);
+ for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
+ printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
+ cpu_evtchn[i],
+ i % 8 == 0 ? "\n " : " ");
+
+ printk("\nlocally unmasked:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
+ xen_ulong_t pending = sh->evtchn_pending[i]
+ & ~sh->evtchn_mask[i]
+ & cpu_evtchn[i];
+ printk("%0*"PRI_xen_ulong"%s",
+ (int)(sizeof(sh->evtchn_mask[0])*2),
+ pending, i % 8 == 0 ? "\n " : " ");
+ }
+
+ printk("\npending list:\n");
+ for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
+ if (sync_test_bit(i, BM(sh->evtchn_pending))) {
+ int word_idx = i / BITS_PER_EVTCHN_WORD;
+ printk(" %d: event %d -> irq %d%s%s%s\n",
+ cpu_from_evtchn(i), i,
+ get_evtchn_to_irq(i),
+ sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
+ ? "" : " l2-clear",
+ !sync_test_bit(i, BM(sh->evtchn_mask))
+ ? "" : " globally-masked",
+ sync_test_bit(i, BM(cpu_evtchn))
+ ? "" : " locally-masked");
+ }
+ }
+
+ spin_unlock_irqrestore(&debug_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static const struct evtchn_ops evtchn_ops_2l = {
+ .max_channels = evtchn_2l_max_channels,
+ .nr_channels = evtchn_2l_max_channels,
+ .bind_to_cpu = evtchn_2l_bind_to_cpu,
+ .clear_pending = evtchn_2l_clear_pending,
+ .set_pending = evtchn_2l_set_pending,
+ .is_pending = evtchn_2l_is_pending,
+ .test_and_set_mask = evtchn_2l_test_and_set_mask,
+ .mask = evtchn_2l_mask,
+ .unmask = evtchn_2l_unmask,
+ .handle_events = evtchn_2l_handle_events,
+};
+
+void __init xen_evtchn_2l_init(void)
+{
+ pr_info("Using 2-level ABI\n");
+ evtchn_ops = &evtchn_ops_2l;
+}
diff --git a/drivers/xen/events.c b/drivers/xen/events/events_base.c
index 4035e833ea26..c3458f58de90 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events/events_base.c
@@ -59,6 +59,10 @@
#include <xen/interface/vcpu.h>
#include <asm/hw_irq.h>
+#include "events_internal.h"
+
+const struct evtchn_ops *evtchn_ops;
+
/*
* This lock protects updates to the following mapping and reference-count
* arrays. The lock does not need to be acquired to read the mapping tables.
@@ -73,71 +77,15 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
-/* Interrupt types. */
-enum xen_irq_type {
- IRQT_UNBOUND = 0,
- IRQT_PIRQ,
- IRQT_VIRQ,
- IRQT_IPI,
- IRQT_EVTCHN
-};
-
-/*
- * Packed IRQ information:
- * type - enum xen_irq_type
- * event channel - irq->event channel mapping
- * cpu - cpu this event channel is bound to
- * index - type-specific information:
- * PIRQ - physical IRQ, GSI, flags, and owner domain
- * VIRQ - virq number
- * IPI - IPI vector
- * EVTCHN -
- */
-struct irq_info {
- struct list_head list;
- int refcnt;
- enum xen_irq_type type; /* type */
- unsigned irq;
- unsigned short evtchn; /* event channel */
- unsigned short cpu; /* cpu bound */
-
- union {
- unsigned short virq;
- enum ipi_vector ipi;
- struct {
- unsigned short pirq;
- unsigned short gsi;
- unsigned char flags;
- uint16_t domid;
- } pirq;
- } u;
-};
-#define PIRQ_NEEDS_EOI (1 << 0)
-#define PIRQ_SHAREABLE (1 << 1)
-
-static int *evtchn_to_irq;
+int **evtchn_to_irq;
#ifdef CONFIG_X86
static unsigned long *pirq_eoi_map;
#endif
static bool (*pirq_needs_eoi)(unsigned irq);
-/*
- * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
- * careful to only use bitops which allow for this (e.g
- * test_bit/find_first_bit and friends but not __ffs) and to pass
- * BITS_PER_EVTCHN_WORD as the bitmask length.
- */
-#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
-/*
- * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
- * array. Primarily to avoid long lines (hence the terse name).
- */
-#define BM(x) (unsigned long *)(x)
-/* Find the first set bit in a evtchn mask */
-#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
-
-static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
- cpu_evtchn_mask);
+#define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
+#define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
+#define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
/* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0)
@@ -148,19 +96,75 @@ static struct irq_chip xen_pirq_chip;
static void enable_dynirq(struct irq_data *data);
static void disable_dynirq(struct irq_data *data);
+static void clear_evtchn_to_irq_row(unsigned row)
+{
+ unsigned col;
+
+ for (col = 0; col < EVTCHN_PER_ROW; col++)
+ evtchn_to_irq[row][col] = -1;
+}
+
+static void clear_evtchn_to_irq_all(void)
+{
+ unsigned row;
+
+ for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
+ if (evtchn_to_irq[row] == NULL)
+ continue;
+ clear_evtchn_to_irq_row(row);
+ }
+}
+
+static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
+{
+ unsigned row;
+ unsigned col;
+
+ if (evtchn >= xen_evtchn_max_channels())
+ return -EINVAL;
+
+ row = EVTCHN_ROW(evtchn);
+ col = EVTCHN_COL(evtchn);
+
+ if (evtchn_to_irq[row] == NULL) {
+ /* Unallocated irq entries return -1 anyway */
+ if (irq == -1)
+ return 0;
+
+ evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
+ if (evtchn_to_irq[row] == NULL)
+ return -ENOMEM;
+
+ clear_evtchn_to_irq_row(row);
+ }
+
+ evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+ return 0;
+}
+
+int get_evtchn_to_irq(unsigned evtchn)
+{
+ if (evtchn >= xen_evtchn_max_channels())
+ return -1;
+ if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
+ return -1;
+ return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
+}
+
/* Get info for IRQ */
-static struct irq_info *info_for_irq(unsigned irq)
+struct irq_info *info_for_irq(unsigned irq)
{
return irq_get_handler_data(irq);
}
/* Constructors for packed IRQ information. */
-static void xen_irq_info_common_init(struct irq_info *info,
+static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
enum xen_irq_type type,
- unsigned short evtchn,
+ unsigned evtchn,
unsigned short cpu)
{
+ int ret;
BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
@@ -169,68 +173,78 @@ static void xen_irq_info_common_init(struct irq_info *info,
info->evtchn = evtchn;
info->cpu = cpu;
- evtchn_to_irq[evtchn] = irq;
+ ret = set_evtchn_to_irq(evtchn, irq);
+ if (ret < 0)
+ return ret;
irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
+
+ return xen_evtchn_port_setup(info);
}
-static void xen_irq_info_evtchn_init(unsigned irq,
- unsigned short evtchn)
+static int xen_irq_info_evtchn_setup(unsigned irq,
+ unsigned evtchn)
{
struct irq_info *info = info_for_irq(irq);
- xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
+ return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
}
-static void xen_irq_info_ipi_init(unsigned cpu,
+static int xen_irq_info_ipi_setup(unsigned cpu,
unsigned irq,
- unsigned short evtchn,
+ unsigned evtchn,
enum ipi_vector ipi)
{
struct irq_info *info = info_for_irq(irq);
- xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
-
info->u.ipi = ipi;
per_cpu(ipi_to_irq, cpu)[ipi] = irq;
+
+ return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
}
-static void xen_irq_info_virq_init(unsigned cpu,
+static int xen_irq_info_virq_setup(unsigned cpu,
unsigned irq,
- unsigned short evtchn,
- unsigned short virq)
+ unsigned evtchn,
+ unsigned virq)
{
struct irq_info *info = info_for_irq(irq);
- xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
-
info->u.virq = virq;
per_cpu(virq_to_irq, cpu)[virq] = irq;
+
+ return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
}
-static void xen_irq_info_pirq_init(unsigned irq,
- unsigned short evtchn,
- unsigned short pirq,
- unsigned short gsi,
+static int xen_irq_info_pirq_setup(unsigned irq,
+ unsigned evtchn,
+ unsigned pirq,
+ unsigned gsi,
uint16_t domid,
unsigned char flags)
{
struct irq_info *info = info_for_irq(irq);
- xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
-
info->u.pirq.pirq = pirq;
info->u.pirq.gsi = gsi;
info->u.pirq.domid = domid;
info->u.pirq.flags = flags;
+
+ return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
+}
+
+static void xen_irq_info_cleanup(struct irq_info *info)
+{
+ set_evtchn_to_irq(info->evtchn, -1);
+ info->evtchn = 0;
}
/*
* Accessors for packed IRQ information.
*/
-static unsigned int evtchn_from_irq(unsigned irq)
+unsigned int evtchn_from_irq(unsigned irq)
{
if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
return 0;
@@ -240,10 +254,15 @@ static unsigned int evtchn_from_irq(unsigned irq)
unsigned irq_from_evtchn(unsigned int evtchn)
{
- return evtchn_to_irq[evtchn];
+ return get_evtchn_to_irq(evtchn);
}
EXPORT_SYMBOL_GPL(irq_from_evtchn);
+int irq_from_virq(unsigned int cpu, unsigned int virq)
+{
+ return per_cpu(virq_to_irq, cpu)[virq];
+}
+
static enum ipi_vector ipi_from_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
@@ -279,14 +298,14 @@ static enum xen_irq_type type_from_irq(unsigned irq)
return info_for_irq(irq)->type;
}
-static unsigned cpu_from_irq(unsigned irq)
+unsigned cpu_from_irq(unsigned irq)
{
return info_for_irq(irq)->cpu;
}
-static unsigned int cpu_from_evtchn(unsigned int evtchn)
+unsigned int cpu_from_evtchn(unsigned int evtchn)
{
- int irq = evtchn_to_irq[evtchn];
+ int irq = get_evtchn_to_irq(evtchn);
unsigned ret = 0;
if (irq != -1)
@@ -310,67 +329,28 @@ static bool pirq_needs_eoi_flag(unsigned irq)
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
}
-static inline xen_ulong_t active_evtchns(unsigned int cpu,
- struct shared_info *sh,
- unsigned int idx)
-{
- return sh->evtchn_pending[idx] &
- per_cpu(cpu_evtchn_mask, cpu)[idx] &
- ~sh->evtchn_mask[idx];
-}
-
static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
{
- int irq = evtchn_to_irq[chn];
+ int irq = get_evtchn_to_irq(chn);
+ struct irq_info *info = info_for_irq(irq);
BUG_ON(irq == -1);
#ifdef CONFIG_SMP
- cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
+ cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu));
#endif
+ xen_evtchn_port_bind_to_cpu(info, cpu);
- clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))));
- set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu)));
-
- info_for_irq(irq)->cpu = cpu;
-}
-
-static void init_evtchn_cpu_bindings(void)
-{
- int i;
-#ifdef CONFIG_SMP
- struct irq_info *info;
-
- /* By default all event channels notify CPU#0. */
- list_for_each_entry(info, &xen_irq_list_head, list) {
- struct irq_desc *desc = irq_to_desc(info->irq);
- cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
- }
-#endif
-
- for_each_possible_cpu(i)
- memset(per_cpu(cpu_evtchn_mask, i),
- (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
-}
-
-static inline void clear_evtchn(int port)
-{
- struct shared_info *s = HYPERVISOR_shared_info;
- sync_clear_bit(port, BM(&s->evtchn_pending[0]));
+ info->cpu = cpu;
}
-static inline void set_evtchn(int port)
+static void xen_evtchn_mask_all(void)
{
- struct shared_info *s = HYPERVISOR_shared_info;
- sync_set_bit(port, BM(&s->evtchn_pending[0]));
-}
+ unsigned int evtchn;
-static inline int test_evtchn(int port)
-{
- struct shared_info *s = HYPERVISOR_shared_info;
- return sync_test_bit(port, BM(&s->evtchn_pending[0]));
+ for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
+ mask_evtchn(evtchn);
}
-
/**
* notify_remote_via_irq - send event to remote end of event channel via irq
* @irq: irq of event channel to send event to
@@ -388,71 +368,12 @@ void notify_remote_via_irq(int irq)
}
EXPORT_SYMBOL_GPL(notify_remote_via_irq);
-static void mask_evtchn(int port)
-{
- struct shared_info *s = HYPERVISOR_shared_info;
- sync_set_bit(port, BM(&s->evtchn_mask[0]));
-}
-
-static void unmask_evtchn(int port)
-{
- struct shared_info *s = HYPERVISOR_shared_info;
- unsigned int cpu = get_cpu();
- int do_hypercall = 0, evtchn_pending = 0;
-
- BUG_ON(!irqs_disabled());
-
- if (unlikely((cpu != cpu_from_evtchn(port))))
- do_hypercall = 1;
- else {
- /*
- * Need to clear the mask before checking pending to
- * avoid a race with an event becoming pending.
- *
- * EVTCHNOP_unmask will only trigger an upcall if the
- * mask bit was set, so if a hypercall is needed
- * remask the event.
- */
- sync_clear_bit(port, BM(&s->evtchn_mask[0]));
- evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
-
- if (unlikely(evtchn_pending && xen_hvm_domain())) {
- sync_set_bit(port, BM(&s->evtchn_mask[0]));
- do_hypercall = 1;
- }
- }
-
- /* Slow path (hypercall) if this is a non-local port or if this is
- * an hvm domain and an event is pending (hvm domains don't have
- * their own implementation of irq_enable). */
- if (do_hypercall) {
- struct evtchn_unmask unmask = { .port = port };
- (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
- } else {
- struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
-
- /*
- * The following is basically the equivalent of
- * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
- * the interrupt edge' if the channel is masked.
- */
- if (evtchn_pending &&
- !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
- BM(&vcpu_info->evtchn_pending_sel)))
- vcpu_info->evtchn_upcall_pending = 1;
- }
-
- put_cpu();
-}
-
static void xen_irq_init(unsigned irq)
{
struct irq_info *info;
#ifdef CONFIG_SMP
- struct irq_desc *desc = irq_to_desc(irq);
-
/* By default all event channels notify CPU#0. */
- cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
+ cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0));
#endif
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -538,6 +459,18 @@ static void xen_free_irq(unsigned irq)
irq_free_desc(irq);
}
+static void xen_evtchn_close(unsigned int port)
+{
+ struct evtchn_close close;
+
+ close.port = port;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
+
+ /* Closed ports are implicitly re-bound to VCPU0. */
+ bind_evtchn_to_cpu(port, 0);
+}
+
static void pirq_query_unmask(int irq)
{
struct physdev_irq_status_query irq_status;
@@ -554,13 +487,6 @@ static void pirq_query_unmask(int irq)
info->u.pirq.flags |= PIRQ_NEEDS_EOI;
}
-static bool probing_irq(int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- return desc && desc->action == NULL;
-}
-
static void eoi_pirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
@@ -602,15 +528,20 @@ static unsigned int __startup_pirq(unsigned int irq)
BIND_PIRQ__WILL_SHARE : 0;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
if (rc != 0) {
- if (!probing_irq(irq))
- pr_info("Failed to obtain physical IRQ %d\n", irq);
+ pr_warn("Failed to obtain physical IRQ %d\n", irq);
return 0;
}
evtchn = bind_pirq.port;
pirq_query_unmask(irq);
- evtchn_to_irq[evtchn] = irq;
+ rc = set_evtchn_to_irq(evtchn, irq);
+ if (rc != 0) {
+ pr_err("irq%d: Failed to set port to irq mapping (%d)\n",
+ irq, rc);
+ xen_evtchn_close(evtchn);
+ return 0;
+ }
bind_evtchn_to_cpu(evtchn, 0);
info->evtchn = evtchn;
@@ -628,10 +559,9 @@ static unsigned int startup_pirq(struct irq_data *data)
static void shutdown_pirq(struct irq_data *data)
{
- struct evtchn_close close;
unsigned int irq = data->irq;
struct irq_info *info = info_for_irq(irq);
- int evtchn = evtchn_from_irq(irq);
+ unsigned evtchn = evtchn_from_irq(irq);
BUG_ON(info->type != IRQT_PIRQ);
@@ -639,14 +569,8 @@ static void shutdown_pirq(struct irq_data *data)
return;
mask_evtchn(evtchn);
-
- close.port = evtchn;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
- BUG();
-
- bind_evtchn_to_cpu(evtchn, 0);
- evtchn_to_irq[evtchn] = -1;
- info->evtchn = 0;
+ xen_evtchn_close(evtchn);
+ xen_irq_info_cleanup(info);
}
static void enable_pirq(struct irq_data *data)
@@ -675,6 +599,41 @@ int xen_irq_from_gsi(unsigned gsi)
}
EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
+static void __unbind_from_irq(unsigned int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+ struct irq_info *info = irq_get_handler_data(irq);
+
+ if (info->refcnt > 0) {
+ info->refcnt--;
+ if (info->refcnt != 0)
+ return;
+ }
+
+ if (VALID_EVTCHN(evtchn)) {
+ unsigned int cpu = cpu_from_irq(irq);
+
+ xen_evtchn_close(evtchn);
+
+ switch (type_from_irq(irq)) {
+ case IRQT_VIRQ:
+ per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
+ break;
+ case IRQT_IPI:
+ per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
+ break;
+ default:
+ break;
+ }
+
+ xen_irq_info_cleanup(info);
+ }
+
+ BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
+
+ xen_free_irq(irq);
+}
+
/*
* Do not make any assumptions regarding the relationship between the
* IRQ number returned here and the Xen pirq argument.
@@ -690,6 +649,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
{
int irq = -1;
struct physdev_irq irq_op;
+ int ret;
mutex_lock(&irq_mapping_update_lock);
@@ -717,8 +677,13 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
goto out;
}
- xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF,
+ ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
shareable ? PIRQ_SHAREABLE : 0);
+ if (ret < 0) {
+ __unbind_from_irq(irq);
+ irq = ret;
+ goto out;
+ }
pirq_query_unmask(irq);
/* We try to use the handler with the appropriate semantic for the
@@ -778,7 +743,9 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
name);
- xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0);
+ ret = xen_irq_info_pirq_setup(irq, 0, pirq, 0, domid, 0);
+ if (ret < 0)
+ goto error_irq;
ret = irq_set_msi_desc(irq, msidesc);
if (ret < 0)
goto error_irq;
@@ -786,25 +753,20 @@ out:
mutex_unlock(&irq_mapping_update_lock);
return irq;
error_irq:
+ __unbind_from_irq(irq);
mutex_unlock(&irq_mapping_update_lock);
- xen_free_irq(irq);
return ret;
}
#endif
int xen_destroy_irq(int irq)
{
- struct irq_desc *desc;
struct physdev_unmap_pirq unmap_irq;
struct irq_info *info = info_for_irq(irq);
int rc = -ENOENT;
mutex_lock(&irq_mapping_update_lock);
- desc = irq_to_desc(irq);
- if (!desc)
- goto out;
-
if (xen_initial_domain()) {
unmap_irq.pirq = info->u.pirq.pirq;
unmap_irq.domid = info->u.pirq.domid;
@@ -857,13 +819,18 @@ int xen_pirq_from_irq(unsigned irq)
return pirq_from_irq(irq);
}
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
+
int bind_evtchn_to_irq(unsigned int evtchn)
{
int irq;
+ int ret;
+
+ if (evtchn >= xen_evtchn_max_channels())
+ return -ENOMEM;
mutex_lock(&irq_mapping_update_lock);
- irq = evtchn_to_irq[evtchn];
+ irq = get_evtchn_to_irq(evtchn);
if (irq == -1) {
irq = xen_allocate_irq_dynamic();
@@ -873,7 +840,14 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_edge_irq, "event");
- xen_irq_info_evtchn_init(irq, evtchn);
+ ret = xen_irq_info_evtchn_setup(irq, evtchn);
+ if (ret < 0) {
+ __unbind_from_irq(irq);
+ irq = ret;
+ goto out;
+ }
+ /* New interdomain events are bound to VCPU 0. */
+ bind_evtchn_to_cpu(evtchn, 0);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
@@ -890,6 +864,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
int evtchn, irq;
+ int ret;
mutex_lock(&irq_mapping_update_lock);
@@ -909,8 +884,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
BUG();
evtchn = bind_ipi.port;
- xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
-
+ ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
+ if (ret < 0) {
+ __unbind_from_irq(irq);
+ irq = ret;
+ goto out;
+ }
bind_evtchn_to_cpu(evtchn, cpu);
} else {
struct irq_info *info = info_for_irq(irq);
@@ -943,7 +922,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
int port, rc = -ENOENT;
memset(&status, 0, sizeof(status));
- for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
+ for (port = 0; port < xen_evtchn_max_channels(); port++) {
status.dom = DOMID_SELF;
status.port = port;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
@@ -959,6 +938,19 @@ static int find_virq(unsigned int virq, unsigned int cpu)
return rc;
}
+/**
+ * xen_evtchn_nr_channels - number of usable event channel ports
+ *
+ * This may be less than the maximum supported by the current
+ * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
+ * supported.
+ */
+unsigned xen_evtchn_nr_channels(void)
+{
+ return evtchn_ops->nr_channels();
+}
+EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
+
int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
@@ -989,7 +981,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
evtchn = ret;
}
- xen_irq_info_virq_init(cpu, irq, evtchn, virq);
+ ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
+ if (ret < 0) {
+ __unbind_from_irq(irq);
+ irq = ret;
+ goto out;
+ }
bind_evtchn_to_cpu(evtchn, cpu);
} else {
@@ -1005,50 +1002,8 @@ out:
static void unbind_from_irq(unsigned int irq)
{
- struct evtchn_close close;
- int evtchn = evtchn_from_irq(irq);
- struct irq_info *info = irq_get_handler_data(irq);
-
- if (WARN_ON(!info))
- return;
-
mutex_lock(&irq_mapping_update_lock);
-
- if (info->refcnt > 0) {
- info->refcnt--;
- if (info->refcnt != 0)
- goto done;
- }
-
- if (VALID_EVTCHN(evtchn)) {
- close.port = evtchn;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
- BUG();
-
- switch (type_from_irq(irq)) {
- case IRQT_VIRQ:
- per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
- [virq_from_irq(irq)] = -1;
- break;
- case IRQT_IPI:
- per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
- [ipi_from_irq(irq)] = -1;
- break;
- default:
- break;
- }
-
- /* Closed ports are implicitly re-bound to VCPU0. */
- bind_evtchn_to_cpu(evtchn, 0);
-
- evtchn_to_irq[evtchn] = -1;
- }
-
- BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
-
- xen_free_irq(irq);
-
- done:
+ __unbind_from_irq(irq);
mutex_unlock(&irq_mapping_update_lock);
}
@@ -1148,9 +1103,26 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
+/**
+ * xen_set_irq_priority() - set an event channel priority.
+ * @irq:irq bound to an event channel.
+ * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
+ */
+int xen_set_irq_priority(unsigned irq, unsigned priority)
+{
+ struct evtchn_set_priority set_priority;
+
+ set_priority.port = evtchn_from_irq(irq);
+ set_priority.priority = priority;
+
+ return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
+ &set_priority);
+}
+EXPORT_SYMBOL_GPL(xen_set_irq_priority);
+
int evtchn_make_refcounted(unsigned int evtchn)
{
- int irq = evtchn_to_irq[evtchn];
+ int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info;
if (irq == -1)
@@ -1175,12 +1147,12 @@ int evtchn_get(unsigned int evtchn)
struct irq_info *info;
int err = -ENOENT;
- if (evtchn >= NR_EVENT_CHANNELS)
+ if (evtchn >= xen_evtchn_max_channels())
return -EINVAL;
mutex_lock(&irq_mapping_update_lock);
- irq = evtchn_to_irq[evtchn];
+ irq = get_evtchn_to_irq(evtchn);
if (irq == -1)
goto done;
@@ -1204,7 +1176,7 @@ EXPORT_SYMBOL_GPL(evtchn_get);
void evtchn_put(unsigned int evtchn)
{
- int irq = evtchn_to_irq[evtchn];
+ int irq = get_evtchn_to_irq(evtchn);
if (WARN_ON(irq == -1))
return;
unbind_from_irq(irq);
@@ -1228,222 +1200,21 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
notify_remote_via_irq(irq);
}
-irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
-{
- struct shared_info *sh = HYPERVISOR_shared_info;
- int cpu = smp_processor_id();
- xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
- int i;
- unsigned long flags;
- static DEFINE_SPINLOCK(debug_lock);
- struct vcpu_info *v;
-
- spin_lock_irqsave(&debug_lock, flags);
-
- printk("\nvcpu %d\n ", cpu);
-
- for_each_online_cpu(i) {
- int pending;
- v = per_cpu(xen_vcpu, i);
- pending = (get_irq_regs() && i == cpu)
- ? xen_irqs_disabled(get_irq_regs())
- : v->evtchn_upcall_mask;
- printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
- pending, v->evtchn_upcall_pending,
- (int)(sizeof(v->evtchn_pending_sel)*2),
- v->evtchn_pending_sel);
- }
- v = per_cpu(xen_vcpu, cpu);
-
- printk("\npending:\n ");
- for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
- printk("%0*"PRI_xen_ulong"%s",
- (int)sizeof(sh->evtchn_pending[0])*2,
- sh->evtchn_pending[i],
- i % 8 == 0 ? "\n " : " ");
- printk("\nglobal mask:\n ");
- for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
- printk("%0*"PRI_xen_ulong"%s",
- (int)(sizeof(sh->evtchn_mask[0])*2),
- sh->evtchn_mask[i],
- i % 8 == 0 ? "\n " : " ");
-
- printk("\nglobally unmasked:\n ");
- for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
- printk("%0*"PRI_xen_ulong"%s",
- (int)(sizeof(sh->evtchn_mask[0])*2),
- sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
- i % 8 == 0 ? "\n " : " ");
-
- printk("\nlocal cpu%d mask:\n ", cpu);
- for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
- printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
- cpu_evtchn[i],
- i % 8 == 0 ? "\n " : " ");
-
- printk("\nlocally unmasked:\n ");
- for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
- xen_ulong_t pending = sh->evtchn_pending[i]
- & ~sh->evtchn_mask[i]
- & cpu_evtchn[i];
- printk("%0*"PRI_xen_ulong"%s",
- (int)(sizeof(sh->evtchn_mask[0])*2),
- pending, i % 8 == 0 ? "\n " : " ");
- }
-
- printk("\npending list:\n");
- for (i = 0; i < NR_EVENT_CHANNELS; i++) {
- if (sync_test_bit(i, BM(sh->evtchn_pending))) {
- int word_idx = i / BITS_PER_EVTCHN_WORD;
- printk(" %d: event %d -> irq %d%s%s%s\n",
- cpu_from_evtchn(i), i,
- evtchn_to_irq[i],
- sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
- ? "" : " l2-clear",
- !sync_test_bit(i, BM(sh->evtchn_mask))
- ? "" : " globally-masked",
- sync_test_bit(i, BM(cpu_evtchn))
- ? "" : " locally-masked");
- }
- }
-
- spin_unlock_irqrestore(&debug_lock, flags);
-
- return IRQ_HANDLED;
-}
-
static DEFINE_PER_CPU(unsigned, xed_nesting_count);
-static DEFINE_PER_CPU(unsigned int, current_word_idx);
-static DEFINE_PER_CPU(unsigned int, current_bit_idx);
-
-/*
- * Mask out the i least significant bits of w
- */
-#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
-/*
- * Search the CPUs pending events bitmasks. For each one found, map
- * the event number to an irq, and feed it into do_IRQ() for
- * handling.
- *
- * Xen uses a two-level bitmap to speed searching. The first level is
- * a bitset of words which contain pending event bits. The second
- * level is a bitset of pending events themselves.
- */
static void __xen_evtchn_do_upcall(void)
{
- int start_word_idx, start_bit_idx;
- int word_idx, bit_idx;
- int i, irq;
- int cpu = get_cpu();
- struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+ int cpu = get_cpu();
unsigned count;
do {
- xen_ulong_t pending_words;
- xen_ulong_t pending_bits;
- struct irq_desc *desc;
-
vcpu_info->evtchn_upcall_pending = 0;
if (__this_cpu_inc_return(xed_nesting_count) - 1)
goto out;
- /*
- * Master flag must be cleared /before/ clearing
- * selector flag. xchg_xen_ulong must contain an
- * appropriate barrier.
- */
- if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
- int evtchn = evtchn_from_irq(irq);
- word_idx = evtchn / BITS_PER_LONG;
- pending_bits = evtchn % BITS_PER_LONG;
- if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
- desc = irq_to_desc(irq);
- if (desc)
- generic_handle_irq_desc(irq, desc);
- }
- }
-
- pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
-
- start_word_idx = __this_cpu_read(current_word_idx);
- start_bit_idx = __this_cpu_read(current_bit_idx);
-
- word_idx = start_word_idx;
-
- for (i = 0; pending_words != 0; i++) {
- xen_ulong_t words;
-
- words = MASK_LSBS(pending_words, word_idx);
-
- /*
- * If we masked out all events, wrap to beginning.
- */
- if (words == 0) {
- word_idx = 0;
- bit_idx = 0;
- continue;
- }
- word_idx = EVTCHN_FIRST_BIT(words);
-
- pending_bits = active_evtchns(cpu, s, word_idx);
- bit_idx = 0; /* usually scan entire word from start */
- /*
- * We scan the starting word in two parts.
- *
- * 1st time: start in the middle, scanning the
- * upper bits.
- *
- * 2nd time: scan the whole word (not just the
- * parts skipped in the first pass) -- if an
- * event in the previously scanned bits is
- * pending again it would just be scanned on
- * the next loop anyway.
- */
- if (word_idx == start_word_idx) {
- if (i == 0)
- bit_idx = start_bit_idx;
- }
-
- do {
- xen_ulong_t bits;
- int port;
-
- bits = MASK_LSBS(pending_bits, bit_idx);
-
- /* If we masked out all events, move on. */
- if (bits == 0)
- break;
-
- bit_idx = EVTCHN_FIRST_BIT(bits);
-
- /* Process port. */
- port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
- irq = evtchn_to_irq[port];
-
- if (irq != -1) {
- desc = irq_to_desc(irq);
- if (desc)
- generic_handle_irq_desc(irq, desc);
- }
-
- bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
-
- /* Next caller starts at last processed + 1 */
- __this_cpu_write(current_word_idx,
- bit_idx ? word_idx :
- (word_idx+1) % BITS_PER_EVTCHN_WORD);
- __this_cpu_write(current_bit_idx, bit_idx);
- } while (bit_idx != 0);
-
- /* Scan start_l1i twice; all others once. */
- if ((word_idx != start_word_idx) || (i != 0))
- pending_words &= ~(1UL << word_idx);
-
- word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
- }
+ xen_evtchn_handle_events(cpu);
BUG_ON(!irqs_disabled());
@@ -1464,6 +1235,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
#ifdef CONFIG_X86
exit_idle();
#endif
+ inc_irq_stat(irq_hv_callback_count);
__xen_evtchn_do_upcall();
@@ -1492,12 +1264,12 @@ void rebind_evtchn_irq(int evtchn, int irq)
mutex_lock(&irq_mapping_update_lock);
/* After resume the irq<->evtchn mappings are all cleared out */
- BUG_ON(evtchn_to_irq[evtchn] != -1);
+ BUG_ON(get_evtchn_to_irq(evtchn) != -1);
/* Expect irq to have been bound before,
so there should be a proper type */
BUG_ON(info->type == IRQT_UNBOUND);
- xen_irq_info_evtchn_init(irq, evtchn);
+ (void)xen_irq_info_evtchn_setup(irq, evtchn);
mutex_unlock(&irq_mapping_update_lock);
@@ -1511,7 +1283,6 @@ void rebind_evtchn_irq(int evtchn, int irq)
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
- struct shared_info *s = HYPERVISOR_shared_info;
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
int masked;
@@ -1534,7 +1305,7 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
* Mask the event while changing the VCPU binding to prevent
* it being delivered on an unexpected VCPU.
*/
- masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
+ masked = test_and_set_mask(evtchn);
/*
* If this fails, it usually just indicates that we're dealing with a
@@ -1553,27 +1324,31 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force)
{
- unsigned tcpu = cpumask_first(dest);
+ unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
return rebind_irq_to_cpu(data->irq, tcpu);
}
-int resend_irq_on_evtchn(unsigned int irq)
+static int retrigger_evtchn(int evtchn)
{
- int masked, evtchn = evtchn_from_irq(irq);
- struct shared_info *s = HYPERVISOR_shared_info;
+ int masked;
if (!VALID_EVTCHN(evtchn))
- return 1;
+ return 0;
- masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
- sync_set_bit(evtchn, BM(s->evtchn_pending));
+ masked = test_and_set_mask(evtchn);
+ set_evtchn(evtchn);
if (!masked)
unmask_evtchn(evtchn);
return 1;
}
+int resend_irq_on_evtchn(unsigned int irq)
+{
+ return retrigger_evtchn(evtchn_from_irq(irq));
+}
+
static void enable_dynirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
@@ -1608,21 +1383,7 @@ static void mask_ack_dynirq(struct irq_data *data)
static int retrigger_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
- struct shared_info *sh = HYPERVISOR_shared_info;
- int ret = 0;
-
- if (VALID_EVTCHN(evtchn)) {
- int masked;
-
- masked = sync_test_and_set_bit(evtchn, BM(sh->evtchn_mask));
- sync_set_bit(evtchn, BM(sh->evtchn_pending));
- if (!masked)
- unmask_evtchn(evtchn);
- ret = 1;
- }
-
- return ret;
+ return retrigger_evtchn(evtchn_from_irq(data->irq));
}
static void restore_pirqs(void)
@@ -1683,7 +1444,7 @@ static void restore_cpu_virqs(unsigned int cpu)
evtchn = bind_virq.port;
/* Record the new mapping. */
- xen_irq_info_virq_init(cpu, irq, evtchn, virq);
+ (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
}
}
@@ -1707,7 +1468,7 @@ static void restore_cpu_ipis(unsigned int cpu)
evtchn = bind_ipi.port;
/* Record the new mapping. */
- xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
+ (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
}
}
@@ -1784,21 +1545,18 @@ EXPORT_SYMBOL_GPL(xen_test_irq_shared);
void xen_irq_resume(void)
{
- unsigned int cpu, evtchn;
+ unsigned int cpu;
struct irq_info *info;
- init_evtchn_cpu_bindings();
-
/* New event-channel space is not 'live' yet. */
- for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
- mask_evtchn(evtchn);
+ xen_evtchn_mask_all();
+ xen_evtchn_resume();
/* No IRQ <-> event-channel mappings. */
list_for_each_entry(info, &xen_irq_list_head, list)
info->evtchn = 0; /* zap event-channel binding */
- for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
- evtchn_to_irq[evtchn] = -1;
+ clear_evtchn_to_irq_all();
for_each_possible_cpu(cpu) {
restore_cpu_virqs(cpu);
@@ -1889,27 +1647,40 @@ void xen_callback_vector(void)
void xen_callback_vector(void) {}
#endif
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "xen."
+
+static bool fifo_events = true;
+module_param(fifo_events, bool, 0);
+
void __init xen_init_IRQ(void)
{
- int i;
+ int ret = -EINVAL;
- evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
- GFP_KERNEL);
- BUG_ON(!evtchn_to_irq);
- for (i = 0; i < NR_EVENT_CHANNELS; i++)
- evtchn_to_irq[i] = -1;
+ if (fifo_events)
+ ret = xen_evtchn_fifo_init();
+ if (ret < 0)
+ xen_evtchn_2l_init();
- init_evtchn_cpu_bindings();
+ evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
+ sizeof(*evtchn_to_irq), GFP_KERNEL);
+ BUG_ON(!evtchn_to_irq);
/* No event channels are 'live' right now. */
- for (i = 0; i < NR_EVENT_CHANNELS; i++)
- mask_evtchn(i);
+ xen_evtchn_mask_all();
pirq_needs_eoi = pirq_needs_eoi_flag;
#ifdef CONFIG_X86
- if (xen_hvm_domain()) {
+ if (xen_pv_domain()) {
+ irq_ctx_init(smp_processor_id());
+ if (xen_initial_domain())
+ pci_xen_initial_domain();
+ }
+ if (xen_feature(XENFEAT_hvm_callback_vector))
xen_callback_vector();
+
+ if (xen_hvm_domain()) {
native_init_IRQ();
/* pci_xen_hvm_init must be called after native_init_IRQ so that
* __acpi_register_gsi can point at the right function */
@@ -1918,13 +1689,10 @@ void __init xen_init_IRQ(void)
int rc;
struct physdev_pirq_eoi_gmfn eoi_gmfn;
- irq_ctx_init(smp_processor_id());
- if (xen_initial_domain())
- pci_xen_initial_domain();
-
pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
+ /* TODO: No PVH support for PIRQ EOI */
if (rc != 0) {
free_page((unsigned long) pirq_eoi_map);
pirq_eoi_map = NULL;
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
new file mode 100644
index 000000000000..96109a9972b6
--- /dev/null
+++ b/drivers/xen/events/events_fifo.c
@@ -0,0 +1,424 @@
+/*
+ * Xen event channels (FIFO-based ABI)
+ *
+ * Copyright (C) 2013 Citrix Systems R&D ltd.
+ *
+ * This source code is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * Or, when distributed separately from the Linux kernel or
+ * incorporated into other software packages, subject to the following
+ * license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
+#include <linux/linkage.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <linux/cpu.h>
+
+#include <asm/sync_bitops.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/page.h>
+
+#include <xen/xen.h>
+#include <xen/xen-ops.h>
+#include <xen/events.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+
+#include "events_internal.h"
+
+#define EVENT_WORDS_PER_PAGE (PAGE_SIZE / sizeof(event_word_t))
+#define MAX_EVENT_ARRAY_PAGES (EVTCHN_FIFO_NR_CHANNELS / EVENT_WORDS_PER_PAGE)
+
+struct evtchn_fifo_queue {
+ uint32_t head[EVTCHN_FIFO_MAX_QUEUES];
+};
+
+static DEFINE_PER_CPU(struct evtchn_fifo_control_block *, cpu_control_block);
+static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
+static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
+static unsigned event_array_pages __read_mostly;
+
+#define BM(w) ((unsigned long *)(w))
+
+static inline event_word_t *event_word_from_port(unsigned port)
+{
+ unsigned i = port / EVENT_WORDS_PER_PAGE;
+
+ return event_array[i] + port % EVENT_WORDS_PER_PAGE;
+}
+
+static unsigned evtchn_fifo_max_channels(void)
+{
+ return EVTCHN_FIFO_NR_CHANNELS;
+}
+
+static unsigned evtchn_fifo_nr_channels(void)
+{
+ return event_array_pages * EVENT_WORDS_PER_PAGE;
+}
+
+static void free_unused_array_pages(void)
+{
+ unsigned i;
+
+ for (i = event_array_pages; i < MAX_EVENT_ARRAY_PAGES; i++) {
+ if (!event_array[i])
+ break;
+ free_page((unsigned long)event_array[i]);
+ event_array[i] = NULL;
+ }
+}
+
+static void init_array_page(event_word_t *array_page)
+{
+ unsigned i;
+
+ for (i = 0; i < EVENT_WORDS_PER_PAGE; i++)
+ array_page[i] = 1 << EVTCHN_FIFO_MASKED;
+}
+
+static int evtchn_fifo_setup(struct irq_info *info)
+{
+ unsigned port = info->evtchn;
+ unsigned new_array_pages;
+ int ret;
+
+ new_array_pages = port / EVENT_WORDS_PER_PAGE + 1;
+
+ if (new_array_pages > MAX_EVENT_ARRAY_PAGES)
+ return -EINVAL;
+
+ while (event_array_pages < new_array_pages) {
+ void *array_page;
+ struct evtchn_expand_array expand_array;
+
+ /* Might already have a page if we've resumed. */
+ array_page = event_array[event_array_pages];
+ if (!array_page) {
+ array_page = (void *)__get_free_page(GFP_KERNEL);
+ if (array_page == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ event_array[event_array_pages] = array_page;
+ }
+
+ /* Mask all events in this page before adding it. */
+ init_array_page(array_page);
+
+ expand_array.array_gfn = virt_to_mfn(array_page);
+
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
+ if (ret < 0)
+ goto error;
+
+ event_array_pages++;
+ }
+ return 0;
+
+ error:
+ if (event_array_pages == 0)
+ panic("xen: unable to expand event array with initial page (%d)\n", ret);
+ else
+ pr_err("unable to expand event array (%d)\n", ret);
+ free_unused_array_pages();
+ return ret;
+}
+
+static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
+{
+ /* no-op */
+}
+
+static void evtchn_fifo_clear_pending(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+ sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word));
+}
+
+static void evtchn_fifo_set_pending(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+ sync_set_bit(EVTCHN_FIFO_PENDING, BM(word));
+}
+
+static bool evtchn_fifo_is_pending(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+ return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word));
+}
+
+static bool evtchn_fifo_test_and_set_mask(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+ return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+}
+
+static void evtchn_fifo_mask(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+ sync_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+}
+
+/*
+ * Clear MASKED, spinning if BUSY is set.
+ */
+static void clear_masked(volatile event_word_t *word)
+{
+ event_word_t new, old, w;
+
+ w = *word;
+
+ do {
+ old = w & ~(1 << EVTCHN_FIFO_BUSY);
+ new = old & ~(1 << EVTCHN_FIFO_MASKED);
+ w = sync_cmpxchg(word, old, new);
+ } while (w != old);
+}
+
+static void evtchn_fifo_unmask(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+
+ BUG_ON(!irqs_disabled());
+
+ clear_masked(word);
+ if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) {
+ struct evtchn_unmask unmask = { .port = port };
+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
+ }
+}
+
+static uint32_t clear_linked(volatile event_word_t *word)
+{
+ event_word_t new, old, w;
+
+ w = *word;
+
+ do {
+ old = w;
+ new = (w & ~((1 << EVTCHN_FIFO_LINKED)
+ | EVTCHN_FIFO_LINK_MASK));
+ } while ((w = sync_cmpxchg(word, old, new)) != old);
+
+ return w & EVTCHN_FIFO_LINK_MASK;
+}
+
+static void handle_irq_for_port(unsigned port)
+{
+ int irq;
+
+ irq = get_evtchn_to_irq(port);
+ if (irq != -1)
+ generic_handle_irq(irq);
+}
+
+static void consume_one_event(unsigned cpu,
+ struct evtchn_fifo_control_block *control_block,
+ unsigned priority, uint32_t *ready)
+{
+ struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
+ uint32_t head;
+ unsigned port;
+ event_word_t *word;
+
+ head = q->head[priority];
+
+ /*
+ * Reached the tail last time? Read the new HEAD from the
+ * control block.
+ */
+ if (head == 0) {
+ rmb(); /* Ensure word is up-to-date before reading head. */
+ head = control_block->head[priority];
+ }
+
+ port = head;
+ word = event_word_from_port(port);
+ head = clear_linked(word);
+
+ /*
+ * If the link is non-zero, there are more events in the
+ * queue, otherwise the queue is empty.
+ *
+ * If the queue is empty, clear this priority from our local
+ * copy of the ready word.
+ */
+ if (head == 0)
+ clear_bit(priority, BM(ready));
+
+ if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))
+ && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word)))
+ handle_irq_for_port(port);
+
+ q->head[priority] = head;
+}
+
+static void evtchn_fifo_handle_events(unsigned cpu)
+{
+ struct evtchn_fifo_control_block *control_block;
+ uint32_t ready;
+ unsigned q;
+
+ control_block = per_cpu(cpu_control_block, cpu);
+
+ ready = xchg(&control_block->ready, 0);
+
+ while (ready) {
+ q = find_first_bit(BM(&ready), EVTCHN_FIFO_MAX_QUEUES);
+ consume_one_event(cpu, control_block, q, &ready);
+ ready |= xchg(&control_block->ready, 0);
+ }
+}
+
+static void evtchn_fifo_resume(void)
+{
+ unsigned cpu;
+
+ for_each_possible_cpu(cpu) {
+ void *control_block = per_cpu(cpu_control_block, cpu);
+ struct evtchn_init_control init_control;
+ int ret;
+
+ if (!control_block)
+ continue;
+
+ /*
+ * If this CPU is offline, take the opportunity to
+ * free the control block while it is not being
+ * used.
+ */
+ if (!cpu_online(cpu)) {
+ free_page((unsigned long)control_block);
+ per_cpu(cpu_control_block, cpu) = NULL;
+ continue;
+ }
+
+ init_control.control_gfn = virt_to_mfn(control_block);
+ init_control.offset = 0;
+ init_control.vcpu = cpu;
+
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control,
+ &init_control);
+ if (ret < 0)
+ BUG();
+ }
+
+ /*
+ * The event array starts out as empty again and is extended
+ * as normal when events are bound. The existing pages will
+ * be reused.
+ */
+ event_array_pages = 0;
+}
+
+static const struct evtchn_ops evtchn_ops_fifo = {
+ .max_channels = evtchn_fifo_max_channels,
+ .nr_channels = evtchn_fifo_nr_channels,
+ .setup = evtchn_fifo_setup,
+ .bind_to_cpu = evtchn_fifo_bind_to_cpu,
+ .clear_pending = evtchn_fifo_clear_pending,
+ .set_pending = evtchn_fifo_set_pending,
+ .is_pending = evtchn_fifo_is_pending,
+ .test_and_set_mask = evtchn_fifo_test_and_set_mask,
+ .mask = evtchn_fifo_mask,
+ .unmask = evtchn_fifo_unmask,
+ .handle_events = evtchn_fifo_handle_events,
+ .resume = evtchn_fifo_resume,
+};
+
+static int evtchn_fifo_init_control_block(unsigned cpu)
+{
+ struct page *control_block = NULL;
+ struct evtchn_init_control init_control;
+ int ret = -ENOMEM;
+
+ control_block = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (control_block == NULL)
+ goto error;
+
+ init_control.control_gfn = virt_to_mfn(page_address(control_block));
+ init_control.offset = 0;
+ init_control.vcpu = cpu;
+
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
+ if (ret < 0)
+ goto error;
+
+ per_cpu(cpu_control_block, cpu) = page_address(control_block);
+
+ return 0;
+
+ error:
+ __free_page(control_block);
+ return ret;
+}
+
+static int evtchn_fifo_cpu_notification(struct notifier_block *self,
+ unsigned long action,
+ void *hcpu)
+{
+ int cpu = (long)hcpu;
+ int ret = 0;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (!per_cpu(cpu_control_block, cpu))
+ ret = evtchn_fifo_init_control_block(cpu);
+ break;
+ default:
+ break;
+ }
+ return ret < 0 ? NOTIFY_BAD : NOTIFY_OK;
+}
+
+static struct notifier_block evtchn_fifo_cpu_notifier = {
+ .notifier_call = evtchn_fifo_cpu_notification,
+};
+
+int __init xen_evtchn_fifo_init(void)
+{
+ int cpu = get_cpu();
+ int ret;
+
+ ret = evtchn_fifo_init_control_block(cpu);
+ if (ret < 0)
+ goto out;
+
+ pr_info("Using FIFO-based ABI\n");
+
+ evtchn_ops = &evtchn_ops_fifo;
+
+ register_cpu_notifier(&evtchn_fifo_cpu_notifier);
+out:
+ put_cpu();
+ return ret;
+}
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
new file mode 100644
index 000000000000..677f41a0fff9
--- /dev/null
+++ b/drivers/xen/events/events_internal.h
@@ -0,0 +1,150 @@
+/*
+ * Xen Event Channels (internal header)
+ *
+ * Copyright (C) 2013 Citrix Systems R&D Ltd.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2 or later. See the file COPYING for more details.
+ */
+#ifndef __EVENTS_INTERNAL_H__
+#define __EVENTS_INTERNAL_H__
+
+/* Interrupt types. */
+enum xen_irq_type {
+ IRQT_UNBOUND = 0,
+ IRQT_PIRQ,
+ IRQT_VIRQ,
+ IRQT_IPI,
+ IRQT_EVTCHN
+};
+
+/*
+ * Packed IRQ information:
+ * type - enum xen_irq_type
+ * event channel - irq->event channel mapping
+ * cpu - cpu this event channel is bound to
+ * index - type-specific information:
+ * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
+ * guest, or GSI (real passthrough IRQ) of the device.
+ * VIRQ - virq number
+ * IPI - IPI vector
+ * EVTCHN -
+ */
+struct irq_info {
+ struct list_head list;
+ int refcnt;
+ enum xen_irq_type type; /* type */
+ unsigned irq;
+ unsigned int evtchn; /* event channel */
+ unsigned short cpu; /* cpu bound */
+
+ union {
+ unsigned short virq;
+ enum ipi_vector ipi;
+ struct {
+ unsigned short pirq;
+ unsigned short gsi;
+ unsigned char vector;
+ unsigned char flags;
+ uint16_t domid;
+ } pirq;
+ } u;
+};
+
+#define PIRQ_NEEDS_EOI (1 << 0)
+#define PIRQ_SHAREABLE (1 << 1)
+
+struct evtchn_ops {
+ unsigned (*max_channels)(void);
+ unsigned (*nr_channels)(void);
+
+ int (*setup)(struct irq_info *info);
+ void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
+
+ void (*clear_pending)(unsigned port);
+ void (*set_pending)(unsigned port);
+ bool (*is_pending)(unsigned port);
+ bool (*test_and_set_mask)(unsigned port);
+ void (*mask)(unsigned port);
+ void (*unmask)(unsigned port);
+
+ void (*handle_events)(unsigned cpu);
+ void (*resume)(void);
+};
+
+extern const struct evtchn_ops *evtchn_ops;
+
+extern int **evtchn_to_irq;
+int get_evtchn_to_irq(unsigned int evtchn);
+
+struct irq_info *info_for_irq(unsigned irq);
+unsigned cpu_from_irq(unsigned irq);
+unsigned cpu_from_evtchn(unsigned int evtchn);
+
+static inline unsigned xen_evtchn_max_channels(void)
+{
+ return evtchn_ops->max_channels();
+}
+
+/*
+ * Do any ABI specific setup for a bound event channel before it can
+ * be unmasked and used.
+ */
+static inline int xen_evtchn_port_setup(struct irq_info *info)
+{
+ if (evtchn_ops->setup)
+ return evtchn_ops->setup(info);
+ return 0;
+}
+
+static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
+ unsigned cpu)
+{
+ evtchn_ops->bind_to_cpu(info, cpu);
+}
+
+static inline void clear_evtchn(unsigned port)
+{
+ evtchn_ops->clear_pending(port);
+}
+
+static inline void set_evtchn(unsigned port)
+{
+ evtchn_ops->set_pending(port);
+}
+
+static inline bool test_evtchn(unsigned port)
+{
+ return evtchn_ops->is_pending(port);
+}
+
+static inline bool test_and_set_mask(unsigned port)
+{
+ return evtchn_ops->test_and_set_mask(port);
+}
+
+static inline void mask_evtchn(unsigned port)
+{
+ return evtchn_ops->mask(port);
+}
+
+static inline void unmask_evtchn(unsigned port)
+{
+ return evtchn_ops->unmask(port);
+}
+
+static inline void xen_evtchn_handle_events(unsigned cpu)
+{
+ return evtchn_ops->handle_events(cpu);
+}
+
+static inline void xen_evtchn_resume(void)
+{
+ if (evtchn_ops->resume)
+ evtchn_ops->resume();
+}
+
+void xen_evtchn_2l_init(void);
+int xen_evtchn_fifo_init(void);
+
+#endif /* #ifndef __EVENTS_INTERNAL_H__ */
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 5de2063e16d3..00f40f051d95 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -417,7 +417,7 @@ static long evtchn_ioctl(struct file *file,
break;
rc = -EINVAL;
- if (unbind.port >= NR_EVENT_CHANNELS)
+ if (unbind.port >= xen_evtchn_nr_channels())
break;
rc = -ENOTCONN;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index e41c79c986ea..073b4a19a8b0 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -846,7 +846,7 @@ static int __init gntdev_init(void)
if (!xen_domain())
return -ENODEV;
- use_ptemod = xen_pv_domain();
+ use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
err = misc_register(&gntdev_miscdev);
if (err != 0) {
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index aa846a48f400..b84e3ab839aa 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -62,12 +62,10 @@
static grant_ref_t **gnttab_list;
static unsigned int nr_grant_frames;
-static unsigned int boot_max_nr_grant_frames;
static int gnttab_free_count;
static grant_ref_t gnttab_free_head;
static DEFINE_SPINLOCK(gnttab_list_lock);
-unsigned long xen_hvm_resume_frames;
-EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
+struct grant_frames xen_auto_xlat_grant_frames;
static union {
struct grant_entry_v1 *v1;
@@ -827,6 +825,11 @@ static unsigned int __max_nr_grant_frames(void)
unsigned int gnttab_max_grant_frames(void)
{
unsigned int xen_max = __max_nr_grant_frames();
+ static unsigned int boot_max_nr_grant_frames;
+
+ /* First time, initialize it properly. */
+ if (!boot_max_nr_grant_frames)
+ boot_max_nr_grant_frames = __max_nr_grant_frames();
if (xen_max > boot_max_nr_grant_frames)
return boot_max_nr_grant_frames;
@@ -834,6 +837,51 @@ unsigned int gnttab_max_grant_frames(void)
}
EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
+int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
+{
+ xen_pfn_t *pfn;
+ unsigned int max_nr_gframes = __max_nr_grant_frames();
+ unsigned int i;
+ void *vaddr;
+
+ if (xen_auto_xlat_grant_frames.count)
+ return -EINVAL;
+
+ vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes);
+ if (vaddr == NULL) {
+ pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
+ &addr);
+ return -ENOMEM;
+ }
+ pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
+ if (!pfn) {
+ xen_unmap(vaddr);
+ return -ENOMEM;
+ }
+ for (i = 0; i < max_nr_gframes; i++)
+ pfn[i] = PFN_DOWN(addr) + i;
+
+ xen_auto_xlat_grant_frames.vaddr = vaddr;
+ xen_auto_xlat_grant_frames.pfn = pfn;
+ xen_auto_xlat_grant_frames.count = max_nr_gframes;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
+
+void gnttab_free_auto_xlat_frames(void)
+{
+ if (!xen_auto_xlat_grant_frames.count)
+ return;
+ kfree(xen_auto_xlat_grant_frames.pfn);
+ xen_unmap(xen_auto_xlat_grant_frames.vaddr);
+
+ xen_auto_xlat_grant_frames.pfn = NULL;
+ xen_auto_xlat_grant_frames.count = 0;
+ xen_auto_xlat_grant_frames.vaddr = NULL;
+}
+EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
+
/* Handling of paged out grant targets (GNTST_eagain) */
#define MAX_DELAY 256
static inline void
@@ -1060,10 +1108,11 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
unsigned int nr_gframes = end_idx + 1;
int rc;
- if (xen_hvm_domain()) {
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
struct xen_add_to_physmap xatp;
unsigned int i = end_idx;
rc = 0;
+ BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
/*
* Loop backwards, so that the first hypercall has the largest
* index, ensuring that the table will grow only once.
@@ -1072,7 +1121,7 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
xatp.domid = DOMID_SELF;
xatp.idx = i;
xatp.space = XENMAPSPACE_grant_table;
- xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i;
+ xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
if (rc != 0) {
pr_warn("grant table add_to_physmap failed, err=%d\n",
@@ -1135,10 +1184,8 @@ static void gnttab_request_version(void)
int rc;
struct gnttab_set_version gsv;
- if (xen_hvm_domain())
- gsv.version = 1;
- else
- gsv.version = 2;
+ gsv.version = 1;
+
rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
if (rc == 0 && gsv.version == 2) {
grant_table_version = 2;
@@ -1169,22 +1216,15 @@ static int gnttab_setup(void)
if (max_nr_gframes < nr_grant_frames)
return -ENOSYS;
- if (xen_pv_domain())
- return gnttab_map(0, nr_grant_frames - 1);
-
- if (gnttab_shared.addr == NULL) {
- gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
- PAGE_SIZE * max_nr_gframes);
+ if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
+ gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
if (gnttab_shared.addr == NULL) {
- pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
- xen_hvm_resume_frames);
+ pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
+ (unsigned long)xen_auto_xlat_grant_frames.vaddr);
return -ENOMEM;
}
}
-
- gnttab_map(0, nr_grant_frames - 1);
-
- return 0;
+ return gnttab_map(0, nr_grant_frames - 1);
}
int gnttab_resume(void)
@@ -1227,13 +1267,12 @@ int gnttab_init(void)
gnttab_request_version();
nr_grant_frames = 1;
- boot_max_nr_grant_frames = __max_nr_grant_frames();
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
BUG_ON(grefs_per_grant_frame == 0);
- max_nr_glist_frames = (boot_max_nr_grant_frames *
+ max_nr_glist_frames = (gnttab_max_grant_frames() *
grefs_per_grant_frame / RPP);
gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@@ -1286,5 +1325,6 @@ static int __gnttab_init(void)
return gnttab_init();
}
-
-core_initcall(__gnttab_init);
+/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
+ * beforehand to initialize xen_auto_xlat_grant_frames. */
+core_initcall_sync(__gnttab_init);
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 188825122aae..dd9c249ea311 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -26,7 +26,9 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include "../pci/pci.h"
+#ifdef CONFIG_PCI_MMCONFIG
#include <asm/pci_x86.h>
+#endif
static bool __read_mostly pci_seg_supported = true;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 2f3528e93cb9..a1361c312c06 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -108,6 +108,7 @@ static int platform_pci_init(struct pci_dev *pdev,
long ioaddr;
long mmio_addr, mmio_len;
unsigned int max_nr_gframes;
+ unsigned long grant_frames;
if (!xen_domain())
return -ENODEV;
@@ -154,13 +155,17 @@ static int platform_pci_init(struct pci_dev *pdev,
}
max_nr_gframes = gnttab_max_grant_frames();
- xen_hvm_resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
- ret = gnttab_init();
+ grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
+ ret = gnttab_setup_auto_xlat_frames(grant_frames);
if (ret)
goto out;
+ ret = gnttab_init();
+ if (ret)
+ goto grant_out;
xenbus_probe(NULL);
return 0;
-
+grant_out:
+ gnttab_free_auto_xlat_frames();
out:
pci_release_region(pdev, 0);
mem_out:
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1eac0731c349..ebd8f218a788 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -75,14 +75,32 @@ static unsigned long xen_io_tlb_nslabs;
static u64 start_dma_addr;
+/*
+ * Both of these functions should avoid PFN_PHYS because phys_addr_t
+ * can be 32bit when dma_addr_t is 64bit leading to a loss in
+ * information if the shift is done before casting to 64bit.
+ */
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
{
- return phys_to_machine(XPADDR(paddr)).maddr;
+ unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr));
+ dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT;
+
+ dma |= paddr & ~PAGE_MASK;
+
+ return dma;
}
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{
- return machine_to_phys(XMADDR(baddr)).paddr;
+ unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr));
+ dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
+ phys_addr_t paddr = dma;
+
+ BUG_ON(paddr != dma); /* truncation has occurred, should never happen */
+
+ paddr |= baddr & ~PAGE_MASK;
+
+ return paddr;
}
static inline dma_addr_t xen_virt_to_bus(void *address)
diff --git a/drivers/xen/xen-acpi-cpuhotplug.c b/drivers/xen/xen-acpi-cpuhotplug.c
index 8dae6c13063a..3e62ee4b3b66 100644
--- a/drivers/xen/xen-acpi-cpuhotplug.c
+++ b/drivers/xen/xen-acpi-cpuhotplug.c
@@ -24,10 +24,7 @@
#include <linux/cpu.h>
#include <linux/acpi.h>
#include <linux/uaccess.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
-
#include <xen/acpi.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@@ -269,7 +266,8 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
if (!is_processor_present(handle))
break;
- if (!acpi_bus_get_device(handle, &device))
+ acpi_bus_get_device(handle, &device);
+ if (acpi_device_enumerated(device))
break;
result = acpi_bus_scan(handle);
@@ -277,8 +275,9 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
pr_err(PREFIX "Unable to add the device\n");
break;
}
- result = acpi_bus_get_device(handle, &device);
- if (result) {
+ device = NULL;
+ acpi_bus_get_device(handle, &device);
+ if (!acpi_device_enumerated(device)) {
pr_err(PREFIX "Missing device object\n");
break;
}
@@ -314,7 +313,7 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
goto out;
}
- (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+ (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
out:
acpi_scan_lock_release();
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
index 9083f1e474f8..34e40b733f9a 100644
--- a/drivers/xen/xen-acpi-memhotplug.c
+++ b/drivers/xen/xen-acpi-memhotplug.c
@@ -22,7 +22,6 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/acpi.h>
-#include <acpi/acpi_drivers.h>
#include <xen/acpi.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@@ -169,7 +168,7 @@ static int acpi_memory_get_device(acpi_handle handle,
acpi_scan_lock_acquire();
acpi_bus_get_device(handle, &device);
- if (device)
+ if (acpi_device_enumerated(device))
goto end;
/*
@@ -182,8 +181,9 @@ static int acpi_memory_get_device(acpi_handle handle,
result = -EINVAL;
goto out;
}
- result = acpi_bus_get_device(handle, &device);
- if (result) {
+ device = NULL;
+ acpi_bus_get_device(handle, &device);
+ if (!acpi_device_enumerated(device)) {
pr_warn(PREFIX "Missing device object\n");
result = -EINVAL;
goto out;
@@ -285,7 +285,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
return;
}
- (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+ (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
return;
}
diff --git a/drivers/xen/xen-acpi-pad.c b/drivers/xen/xen-acpi-pad.c
index 59708fdd068b..f83b754505f8 100644
--- a/drivers/xen/xen-acpi-pad.c
+++ b/drivers/xen/xen-acpi-pad.c
@@ -18,11 +18,10 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include <asm/xen/hypercall.h>
+#include <linux/acpi.h>
#include <xen/interface/version.h>
#include <xen/xen-ops.h>
+#include <asm/xen/hypercall.h>
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
@@ -78,27 +77,14 @@ static int acpi_pad_pur(acpi_handle handle)
return num;
}
-/* Notify firmware how many CPUs are idle */
-static void acpi_pad_ost(acpi_handle handle, int stat,
- uint32_t idle_nums)
-{
- union acpi_object params[3] = {
- {.type = ACPI_TYPE_INTEGER,},
- {.type = ACPI_TYPE_INTEGER,},
- {.type = ACPI_TYPE_BUFFER,},
- };
- struct acpi_object_list arg_list = {3, params};
-
- params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
- params[1].integer.value = stat;
- params[2].buffer.length = 4;
- params[2].buffer.pointer = (void *)&idle_nums;
- acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-}
-
static void acpi_pad_handle_notify(acpi_handle handle)
{
int idle_nums;
+ struct acpi_buffer param = {
+ .length = 4,
+ .pointer = (void *)&idle_nums,
+ };
+
mutex_lock(&xen_cpu_lock);
idle_nums = acpi_pad_pur(handle);
@@ -110,7 +96,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
idle_nums = xen_acpi_pad_idle_cpus(idle_nums)
?: xen_acpi_pad_idle_cpus_num();
if (idle_nums >= 0)
- acpi_pad_ost(handle, 0, idle_nums);
+ acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY,
+ 0, &param);
mutex_unlock(&xen_cpu_lock);
}
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 13bc6c31c060..7231859119f1 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -28,10 +28,8 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/syscore_ops.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <acpi/processor.h>
-
#include <xen/xen.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 21e18c18c7a1..745ad79c1d8e 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -175,6 +175,7 @@ static void frontswap_selfshrink(void)
#endif /* CONFIG_FRONTSWAP */
#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+#define PAGES2MB(pages) ((pages) >> (20 - PAGE_SHIFT))
/*
* Use current balloon size, the goal (vm_committed_as), and hysteresis
@@ -525,6 +526,7 @@ EXPORT_SYMBOL(register_xen_selfballooning);
int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)
{
bool enable = false;
+ unsigned long reserve_pages;
if (!xen_domain())
return -ENODEV;
@@ -549,6 +551,26 @@ int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)
if (!enable)
return -ENODEV;
+ /*
+ * Give selfballoon_reserved_mb a default value(10% of total ram pages)
+ * to make selfballoon not so aggressive.
+ *
+ * There are mainly two reasons:
+ * 1) The original goal_page didn't consider some pages used by kernel
+ * space, like slab pages and memory used by device drivers.
+ *
+ * 2) The balloon driver may not give back memory to guest OS fast
+ * enough when the workload suddenly aquries a lot of physical memory.
+ *
+ * In both cases, the guest OS will suffer from memory pressure and
+ * OOM killer may be triggered.
+ * By reserving extra 10% of total ram pages, we can keep the system
+ * much more reliably and response faster in some cases.
+ */
+ if (!selfballoon_reserved_mb) {
+ reserve_pages = totalram_pages / 10;
+ selfballoon_reserved_mb = PAGES2MB(reserve_pages);
+ }
schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ);
return 0;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index ec097d6f964d..01d59e66565d 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -45,6 +45,7 @@
#include <xen/grant_table.h>
#include <xen/xenbus.h>
#include <xen/xen.h>
+#include <xen/features.h>
#include "xenbus_probe.h"
@@ -743,7 +744,7 @@ static const struct xenbus_ring_ops ring_ops_hvm = {
void __init xenbus_ring_ops_init(void)
{
- if (xen_pv_domain())
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
ring_ops = &ring_ops_pv;
else
ring_ops = &ring_ops_hvm;
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 129bf84c19ec..cb385c10d2b1 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -496,7 +496,7 @@ subsys_initcall(xenbus_probe_frontend_init);
#ifndef MODULE
static int __init boot_wait_for_devices(void)
{
- if (xen_hvm_domain() && !xen_platform_pci_unplug)
+ if (!xen_has_pv_devices())
return -ENODEV;
ready_to_wait_for_devices = 1;
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
deleted file mode 100644
index 4793fc594549..000000000000
--- a/drivers/xen/xencomm.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <asm/page.h>
-#include <xen/xencomm.h>
-#include <xen/interface/xen.h>
-#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
-
-static int xencomm_init(struct xencomm_desc *desc,
- void *buffer, unsigned long bytes)
-{
- unsigned long recorded = 0;
- int i = 0;
-
- while ((recorded < bytes) && (i < desc->nr_addrs)) {
- unsigned long vaddr = (unsigned long)buffer + recorded;
- unsigned long paddr;
- int offset;
- int chunksz;
-
- offset = vaddr % PAGE_SIZE; /* handle partial pages */
- chunksz = min(PAGE_SIZE - offset, bytes - recorded);
-
- paddr = xencomm_vtop(vaddr);
- if (paddr == ~0UL) {
- printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
- __func__, vaddr);
- return -EINVAL;
- }
-
- desc->address[i++] = paddr;
- recorded += chunksz;
- }
-
- if (recorded < bytes) {
- printk(KERN_DEBUG
- "%s: could only translate %ld of %ld bytes\n",
- __func__, recorded, bytes);
- return -ENOSPC;
- }
-
- /* mark remaining addresses invalid (just for safety) */
- while (i < desc->nr_addrs)
- desc->address[i++] = XENCOMM_INVALID;
-
- desc->magic = XENCOMM_MAGIC;
-
- return 0;
-}
-
-static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
- void *buffer, unsigned long bytes)
-{
- struct xencomm_desc *desc;
- unsigned long buffer_ulong = (unsigned long)buffer;
- unsigned long start = buffer_ulong & PAGE_MASK;
- unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
- unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
- unsigned long size = sizeof(*desc) +
- sizeof(desc->address[0]) * nr_addrs;
-
- /*
- * slab allocator returns at least sizeof(void*) aligned pointer.
- * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
- * cross page boundary.
- */
- if (sizeof(*desc) > sizeof(void *)) {
- unsigned long order = get_order(size);
- desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
- order);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs =
- ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
- sizeof(*desc->address);
- } else {
- desc = kmalloc(size, gfp_mask);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs = nr_addrs;
- }
- return desc;
-}
-
-void xencomm_free(struct xencomm_handle *desc)
-{
- if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
- struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
- if (sizeof(*desc__) > sizeof(void *)) {
- unsigned long size = sizeof(*desc__) +
- sizeof(desc__->address[0]) * desc__->nr_addrs;
- unsigned long order = get_order(size);
- free_pages((unsigned long)__va(desc), order);
- } else
- kfree(__va(desc));
- }
-}
-
-static int xencomm_create(void *buffer, unsigned long bytes,
- struct xencomm_desc **ret, gfp_t gfp_mask)
-{
- struct xencomm_desc *desc;
- int rc;
-
- pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
-
- if (bytes == 0) {
- /* don't create a descriptor; Xen recognizes NULL. */
- BUG_ON(buffer != NULL);
- *ret = NULL;
- return 0;
- }
-
- BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
-
- desc = xencomm_alloc(gfp_mask, buffer, bytes);
- if (!desc) {
- printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
- return -ENOMEM;
- }
-
- rc = xencomm_init(desc, buffer, bytes);
- if (rc) {
- printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
- xencomm_free((struct xencomm_handle *)__pa(desc));
- return rc;
- }
-
- *ret = desc;
- return 0;
-}
-
-static struct xencomm_handle *xencomm_create_inline(void *ptr)
-{
- unsigned long paddr;
-
- BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
-
- paddr = (unsigned long)xencomm_pa(ptr);
- BUG_ON(paddr & XENCOMM_INLINE_FLAG);
- return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
-}
-
-/* "mini" routine, for stack-based communications: */
-static int xencomm_create_mini(void *buffer,
- unsigned long bytes, struct xencomm_mini *xc_desc,
- struct xencomm_desc **ret)
-{
- int rc = 0;
- struct xencomm_desc *desc;
- BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
-
- desc = (void *)xc_desc;
-
- desc->nr_addrs = XENCOMM_MINI_ADDRS;
-
- rc = xencomm_init(desc, buffer, bytes);
- if (!rc)
- *ret = desc;
-
- return rc;
-}
-
-struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
-{
- int rc;
- struct xencomm_desc *desc;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
-
- if (rc || desc == NULL)
- return NULL;
-
- return xencomm_pa(desc);
-}
-
-struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
- struct xencomm_mini *xc_desc)
-{
- int rc;
- struct xencomm_desc *desc = NULL;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create_mini(ptr, bytes, xc_desc,
- &desc);
-
- if (rc)
- return NULL;
-
- return xencomm_pa(desc);
-}